aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c501.c896
-rw-r--r--drivers/net/ethernet/3com/3c501.h91
-rw-r--r--drivers/net/ethernet/3com/3c509.c1594
-rw-r--r--drivers/net/ethernet/3com/3c515.c1584
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c1181
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c943
-rw-r--r--drivers/net/ethernet/3com/3c59x.c3327
-rw-r--r--drivers/net/ethernet/3com/Kconfig122
-rw-r--r--drivers/net/ethernet/3com/Makefile11
-rw-r--r--drivers/net/ethernet/3com/typhoon.c2573
-rw-r--r--drivers/net/ethernet/3com/typhoon.h624
-rw-r--r--drivers/net/ethernet/8390/3c503.c778
-rw-r--r--drivers/net/ethernet/8390/3c503.h91
-rw-r--r--drivers/net/ethernet/8390/8390.c103
-rw-r--r--drivers/net/ethernet/8390/8390.h232
-rw-r--r--drivers/net/ethernet/8390/8390p.c105
-rw-r--r--drivers/net/ethernet/8390/Kconfig337
-rw-r--r--drivers/net/ethernet/8390/Makefile29
-rw-r--r--drivers/net/ethernet/8390/ac3200.c432
-rw-r--r--drivers/net/ethernet/8390/apne.c620
-rw-r--r--drivers/net/ethernet/8390/ax88796.c1010
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c1725
-rw-r--r--drivers/net/ethernet/8390/e2100.c490
-rw-r--r--drivers/net/ethernet/8390/es3210.c446
-rw-r--r--drivers/net/ethernet/8390/etherh.c866
-rw-r--r--drivers/net/ethernet/8390/hp-plus.c506
-rw-r--r--drivers/net/ethernet/8390/hp.c439
-rw-r--r--drivers/net/ethernet/8390/hydra.c273
-rw-r--r--drivers/net/ethernet/8390/lib8390.c1080
-rw-r--r--drivers/net/ethernet/8390/lne390.c434
-rw-r--r--drivers/net/ethernet/8390/mac8390.c874
-rw-r--r--drivers/net/ethernet/8390/ne-h8300.c685
-rw-r--r--drivers/net/ethernet/8390/ne.c1008
-rw-r--r--drivers/net/ethernet/8390/ne2.c799
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c726
-rw-r--r--drivers/net/ethernet/8390/ne3210.c347
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c1710
-rw-r--r--drivers/net/ethernet/8390/smc-mca.c576
-rw-r--r--drivers/net/ethernet/8390/smc-ultra.c622
-rw-r--r--drivers/net/ethernet/8390/smc-ultra32.c464
-rw-r--r--drivers/net/ethernet/8390/stnic.c294
-rw-r--r--drivers/net/ethernet/8390/wd.c567
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c452
-rw-r--r--drivers/net/ethernet/Kconfig177
-rw-r--r--drivers/net/ethernet/Makefile74
-rw-r--r--drivers/net/ethernet/adaptec/Kconfig36
-rw-r--r--drivers/net/ethernet/adaptec/Makefile5
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2091
-rw-r--r--drivers/net/ethernet/adi/Kconfig69
-rw-r--r--drivers/net/ethernet/adi/Makefile5
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c1767
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.h106
-rw-r--r--drivers/net/ethernet/aeroflex/Kconfig11
-rw-r--r--drivers/net/ethernet/aeroflex/Makefile5
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c1641
-rw-r--r--drivers/net/ethernet/aeroflex/greth.h142
-rw-r--r--drivers/net/ethernet/alteon/Kconfig48
-rw-r--r--drivers/net/ethernet/alteon/Makefile5
-rw-r--r--drivers/net/ethernet/alteon/acenic.c3206
-rw-r--r--drivers/net/ethernet/alteon/acenic.h790
-rw-r--r--drivers/net/ethernet/amd/7990.c662
-rw-r--r--drivers/net/ethernet/amd/7990.h254
-rw-r--r--drivers/net/ethernet/amd/Kconfig195
-rw-r--r--drivers/net/ethernet/amd/Makefile20
-rw-r--r--drivers/net/ethernet/amd/a2065.c781
-rw-r--r--drivers/net/ethernet/amd/a2065.h173
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c770
-rw-r--r--drivers/net/ethernet/amd/am79c961a.h145
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c1992
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h816
-rw-r--r--drivers/net/ethernet/amd/ariadne.c793
-rw-r--r--drivers/net/ethernet/amd/ariadne.h415
-rw-r--r--drivers/net/ethernet/amd/atarilance.c1176
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c1332
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.h134
-rw-r--r--drivers/net/ethernet/amd/declance.c1381
-rw-r--r--drivers/net/ethernet/amd/depca.c2111
-rw-r--r--drivers/net/ethernet/amd/depca.h183
-rw-r--r--drivers/net/ethernet/amd/hplance.c242
-rw-r--r--drivers/net/ethernet/amd/hplance.h26
-rw-r--r--drivers/net/ethernet/amd/lance.c1313
-rw-r--r--drivers/net/ethernet/amd/mvme147.c205
-rw-r--r--drivers/net/ethernet/amd/ni65.c1254
-rw-r--r--drivers/net/ethernet/amd/ni65.h121
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c1525
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c2937
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c961
-rw-r--r--drivers/net/ethernet/amd/sunlance.c1556
-rw-r--r--drivers/net/ethernet/apple/Kconfig97
-rw-r--r--drivers/net/ethernet/apple/Makefile9
-rw-r--r--drivers/net/ethernet/apple/bmac.c1685
-rw-r--r--drivers/net/ethernet/apple/bmac.h164
-rw-r--r--drivers/net/ethernet/apple/cs89x0.c1913
-rw-r--r--drivers/net/ethernet/apple/cs89x0.h465
-rw-r--r--drivers/net/ethernet/apple/mac89x0.c634
-rw-r--r--drivers/net/ethernet/apple/mace.c1031
-rw-r--r--drivers/net/ethernet/apple/mace.h173
-rw-r--r--drivers/net/ethernet/apple/macmace.c792
-rw-r--r--drivers/net/ethernet/atheros/Kconfig70
-rw-r--r--drivers/net/ethernet/atheros/Makefile8
-rw-r--r--drivers/net/ethernet/atheros/atl1c/Makefile2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h636
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c311
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c662
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.h868
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2933
-rw-r--r--drivers/net/ethernet/atheros/atl1e/Makefile2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e.h509
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c390
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_hw.c651
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_hw.h690
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c2557
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_param.c268
-rw-r--r--drivers/net/ethernet/atheros/atlx/Makefile3
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c3674
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.h792
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c3119
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.h525
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c269
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.h503
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig122
-rw-r--r--drivers/net/ethernet/broadcom/Makefile11
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2374
-rw-r--r--drivers/net/ethernet/broadcom/b44.h401
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c1966
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h302
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c8624
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h7388
-rw-r--r--drivers/net/ethernet/broadcom/bnx2_fw.h88
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/Makefile7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2069
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c3598
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h1491
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c2511
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h203
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h1156
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c2392
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h410
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h38
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h5133
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h567
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h912
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c12480
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h493
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c11617
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h7177
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c5689
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h1297
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c1599
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h381
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c5492
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h480
-rw-r--r--drivers/net/ethernet/broadcom/cnic_defs.h5485
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h340
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2690
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c15953
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h3186
-rw-r--r--drivers/net/ethernet/brocade/Kconfig23
-rw-r--r--drivers/net/ethernet/brocade/Makefile5
-rw-r--r--drivers/net/ethernet/brocade/bna/Kconfig17
-rw-r--r--drivers/net/ethernet/brocade/bna/Makefile12
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c299
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.h63
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cs.h140
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h296
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_cna.h229
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h171
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_status.h216
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c2473
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h325
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c878
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.c669
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.h130
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h481
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_cna.h199
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h901
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_reg.h452
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h575
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c2144
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h422
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c3798
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h987
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c3510
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h380
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c962
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h107
-rw-r--r--drivers/net/ethernet/brocade/bna/cna_fwimg.c92
-rw-r--r--drivers/net/ethernet/cadence/Kconfig45
-rw-r--r--drivers/net/ethernet/cadence/Makefile6
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c1254
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.h109
-rw-r--r--drivers/net/ethernet/cadence/macb.c1366
-rw-r--r--drivers/net/ethernet/cadence/macb.h394
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig107
-rw-r--r--drivers/net/ethernet/chelsio/Makefile8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/Makefile9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h353
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cphy.h175
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h639
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c1379
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/elmer0.h158
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.c373
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.h68
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/fpga_defs.h232
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/gmac.h142
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c397
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h127
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/mv88x201x.c260
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/my3126.c209
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c796
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/regs.h2168
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c2139
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.h94
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/subr.c1130
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h1643
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/tp.c171
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/tp.h72
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/vsc7326.c730
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h297
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/Makefile8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/adapter.h334
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/ael1002.c941
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/aq100x.c354
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/common.h775
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h189
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h114
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h177
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c3448
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c1431
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h209
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/firmware_exports.h177
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c454
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h149
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/mc5.c438
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/regs.h2598
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c3303
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge_defs.h255
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h1495
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c3785
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3cdev.h70
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/version.h44
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/vsc8211.c416
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/xgmac.c657
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h722
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c3812
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h239
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c597
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h107
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2442
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2856
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h140
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h678
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h885
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h1623
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/Makefile7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h534
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2949
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2465
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h274
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h121
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c1387
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig30
-rw-r--r--drivers/net/ethernet/cirrus/Makefile5
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c904
-rw-r--r--drivers/net/ethernet/cisco/Kconfig23
-rw-r--r--drivers/net/ethernet/cisco/Makefile5
-rw-r--r--drivers/net/ethernet/cisco/enic/Kconfig9
-rw-r--r--drivers/net/ethernet/cisco/enic/Makefile5
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_desc.h80
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_enet_desc.h185
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h135
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c286
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h64
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2591
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.c368
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.h36
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.c384
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.h148
-rw-r--r--drivers/net/ethernet/cisco/enic/rq_enet_desc.h60
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.c91
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.h114
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c1021
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h135
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h450
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_enet.h57
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_intr.c68
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_intr.h111
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_nic.h72
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_resource.h76
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c221
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h209
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rss.h40
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_stats.h70
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_vic.c79
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_vic.h83
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c200
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h165
-rw-r--r--drivers/net/ethernet/cisco/enic/wq_enet_desc.h98
-rw-r--r--drivers/net/ethernet/davicom/Kconfig24
-rw-r--r--drivers/net/ethernet/davicom/Makefile5
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c1710
-rw-r--r--drivers/net/ethernet/davicom/dm9000.h171
-rw-r--r--drivers/net/ethernet/dec/Kconfig37
-rw-r--r--drivers/net/ethernet/dec/Makefile6
-rw-r--r--drivers/net/ethernet/dec/ewrk3.c1959
-rw-r--r--drivers/net/ethernet/dec/ewrk3.h322
-rw-r--r--drivers/net/ethernet/dec/tulip/21142.c260
-rw-r--r--drivers/net/ethernet/dec/tulip/Kconfig172
-rw-r--r--drivers/net/ethernet/dec/tulip/Makefile19
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c2215
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c5599
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.h1017
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c2253
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c385
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c811
-rw-r--r--drivers/net/ethernet/dec/tulip/media.c556
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic.c173
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic2.c406
-rw-r--r--drivers/net/ethernet/dec/tulip/timer.c179
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip.h573
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2011
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c1850
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c1670
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c1154
-rw-r--r--drivers/net/ethernet/dlink/Kconfig86
-rw-r--r--drivers/net/ethernet/dlink/Makefile8
-rw-r--r--drivers/net/ethernet/dlink/de600.c530
-rw-r--r--drivers/net/ethernet/dlink/de600.h168
-rw-r--r--drivers/net/ethernet/dlink/de620.c988
-rw-r--r--drivers/net/ethernet/dlink/de620.h117
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c1821
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h448
-rw-r--r--drivers/net/ethernet/dlink/sundance.c1940
-rw-r--r--drivers/net/ethernet/dnet.c996
-rw-r--r--drivers/net/ethernet/dnet.h225
-rw-r--r--drivers/net/ethernet/emulex/Kconfig23
-rw-r--r--drivers/net/ethernet/emulex/Makefile5
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig6
-rw-r--r--drivers/net/ethernet/emulex/benet/Makefile7
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h532
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2384
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h1503
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c724
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h510
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c3636
-rw-r--r--drivers/net/ethernet/ethoc.c1203
-rw-r--r--drivers/net/ethernet/faraday/Kconfig40
-rw-r--r--drivers/net/ethernet/faraday/Makefile6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c1365
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.h246
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c1198
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.h180
-rw-r--r--drivers/net/ethernet/fealnx.c1976
-rw-r--r--drivers/net/ethernet/freescale/Kconfig88
-rw-r--r--drivers/net/ethernet/freescale/Makefile18
-rw-r--r--drivers/net/ethernet/freescale/fec.c1728
-rw-r--r--drivers/net/ethernet/freescale/fec.h148
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c1104
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.h294
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c160
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/Kconfig35
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/Makefile14
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fec.h42
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c1196
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h244
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c584
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c498
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c484
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c246
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c251
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c494
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.h52
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c3296
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h1216
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c1764
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c583
-rw-r--r--drivers/net/ethernet/freescale/gianfar_sysfs.c341
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4026
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h1240
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c423
-rw-r--r--drivers/net/ethernet/fujitsu/Kconfig54
-rw-r--r--drivers/net/ethernet/fujitsu/Makefile7
-rw-r--r--drivers/net/ethernet/fujitsu/at1700.c900
-rw-r--r--drivers/net/ethernet/fujitsu/eth16i.c1484
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c1187
-rw-r--r--drivers/net/ethernet/hp/Kconfig32
-rw-r--r--drivers/net/ethernet/hp/Makefile5
-rw-r--r--drivers/net/ethernet/hp/hp100.c3068
-rw-r--r--drivers/net/ethernet/hp/hp100.h615
-rw-r--r--drivers/net/ethernet/i825xx/3c505.c1673
-rw-r--r--drivers/net/ethernet/i825xx/3c505.h292
-rw-r--r--drivers/net/ethernet/i825xx/3c507.c939
-rw-r--r--drivers/net/ethernet/i825xx/3c523.c1312
-rw-r--r--drivers/net/ethernet/i825xx/3c523.h355
-rw-r--r--drivers/net/ethernet/i825xx/3c527.c1661
-rw-r--r--drivers/net/ethernet/i825xx/3c527.h81
-rw-r--r--drivers/net/ethernet/i825xx/82596.c1632
-rw-r--r--drivers/net/ethernet/i825xx/Kconfig183
-rw-r--r--drivers/net/ethernet/i825xx/Makefile20
-rw-r--r--drivers/net/ethernet/i825xx/eepro.c1822
-rw-r--r--drivers/net/ethernet/i825xx/eexpress.c1720
-rw-r--r--drivers/net/ethernet/i825xx/eexpress.h179
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c1094
-rw-r--r--drivers/net/ethernet/i825xx/ether1.h280
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c238
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c1412
-rw-r--r--drivers/net/ethernet/i825xx/lp486e.c1339
-rw-r--r--drivers/net/ethernet/i825xx/ni52.c1346
-rw-r--r--drivers/net/ethernet/i825xx/ni52.h310
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c186
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c1213
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.h318
-rw-r--r--drivers/net/ethernet/i825xx/znet.c924
-rw-r--r--drivers/net/ethernet/ibm/Kconfig48
-rw-r--r--drivers/net/ethernet/ibm/Makefile8
-rw-r--r--drivers/net/ethernet/ibm/ehea/Makefile6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea.h505
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c295
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_hw.h292
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c3781
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_phyp.c626
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_phyp.h467
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c1031
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.h404
-rw-r--r--drivers/net/ethernet/ibm/emac/Kconfig76
-rw-r--r--drivers/net/ethernet/ibm/emac/Makefile11
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c3074
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h462
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.c270
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.h83
-rw-r--r--drivers/net/ethernet/ibm/emac/emac.h312
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c809
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.h316
-rw-r--r--drivers/net/ethernet/ibm/emac/phy.c541
-rw-r--r--drivers/net/ethernet/ibm/emac/phy.h87
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c338
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.h82
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c185
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.h95
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c332
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.h78
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c1638
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h195
-rw-r--r--drivers/net/ethernet/ibm/iseries_veth.c1710
-rw-r--r--drivers/net/ethernet/icplus/Kconfig14
-rw-r--r--drivers/net/ethernet/icplus/Makefile5
-rw-r--r--drivers/net/ethernet/icplus/ipg.c2324
-rw-r--r--drivers/net/ethernet/icplus/ipg.h749
-rw-r--r--drivers/net/ethernet/intel/Kconfig222
-rw-r--r--drivers/net/ethernet/intel/Makefile12
-rw-r--r--drivers/net/ethernet/intel/e100.c3109
-rw-r--r--drivers/net/ethernet/intel/e1000/Makefile35
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h363
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c1863
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c5830
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.h3103
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4971
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_osdep.h109
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c755
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c1515
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c2112
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile37
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h844
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h741
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c1995
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h984
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c4152
-rw-r--r--drivers/net/ethernet/intel/e1000e/lib.c2693
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c6440
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c478
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c3377
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile37
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2084
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h260
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h834
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h529
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c1426
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h90
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c446
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h77
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c713
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h43
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c2347
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h136
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h354
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h426
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2202
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c6927
-rw-r--r--drivers/net/ethernet/intel/igbvf/Makefile38
-rw-r--r--drivers/net/ethernet/intel/igbvf/defines.h125
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c534
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h326
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c350
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.h75
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2856
-rw-r--r--drivers/net/ethernet/intel/igbvf/regs.h108
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c402
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.h266
-rw-r--r--drivers/net/ethernet/intel/ixgb/Makefile35
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h219
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ee.c607
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ee.h104
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c662
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c1262
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.h799
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ids.h53
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c2378
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_osdep.h64
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_param.c469
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h626
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c1334
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c2176
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c3526
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h145
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c366
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h168
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c296
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h97
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c376
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h123
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c797
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c2549
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c835
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h81
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c7860
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c471
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h93
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c1714
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h131
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c892
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h51
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h2941
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c883
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/Makefile38
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h297
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c696
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h320
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c3574
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c341
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h99
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h85
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c426
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h174
-rw-r--r--drivers/net/ethernet/jme.c3236
-rw-r--r--drivers/net/ethernet/jme.h1260
-rw-r--r--drivers/net/ethernet/korina.c1250
-rw-r--r--drivers/net/ethernet/lantiq_etop.c805
-rw-r--r--drivers/net/ethernet/marvell/Kconfig111
-rw-r--r--drivers/net/ethernet/marvell/Makefile8
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c3022
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c1663
-rw-r--r--drivers/net/ethernet/marvell/skge.c4165
-rw-r--r--drivers/net/ethernet/marvell/skge.h2584
-rw-r--r--drivers/net/ethernet/marvell/sky2.c5162
-rw-r--r--drivers/net/ethernet/marvell/sky2.h2427
-rw-r--r--drivers/net/ethernet/mellanox/Kconfig23
-rw-r--r--drivers/net/ethernet/mellanox/Makefile5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig27
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c414
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c156
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c443
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c319
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c178
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c477
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c315
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c1166
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c278
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.h594
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c102
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c918
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c180
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c828
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c842
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c944
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h182
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c430
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.h134
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c184
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c1529
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c928
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h459
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h607
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c667
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c210
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c487
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c238
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c380
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/reset.c185
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/sense.c156
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c257
-rw-r--r--drivers/net/ethernet/micrel/Kconfig69
-rw-r--r--drivers/net/ethernet/micrel/Makefile9
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c1656
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.h107
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c1284
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c1737
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h309
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c1680
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c7288
-rw-r--r--drivers/net/ethernet/microchip/Kconfig38
-rw-r--r--drivers/net/ethernet/microchip/Makefile5
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c1667
-rw-r--r--drivers/net/ethernet/microchip/enc28j60_hw.h309
-rw-r--r--drivers/net/ethernet/mipsnet.c345
-rw-r--r--drivers/net/ethernet/myricom/Kconfig47
-rw-r--r--drivers/net/ethernet/myricom/Makefile5
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/Makefile5
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c4236
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h435
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h60
-rw-r--r--drivers/net/ethernet/natsemi/Kconfig83
-rw-r--r--drivers/net/ethernet/natsemi/Makefile10
-rw-r--r--drivers/net/ethernet/natsemi/ibmlana.c1075
-rw-r--r--drivers/net/ethernet/natsemi/ibmlana.h278
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c308
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c657
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c3370
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c2311
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c742
-rw-r--r--drivers/net/ethernet/natsemi/sonic.h450
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c333
-rw-r--r--drivers/net/ethernet/neterion/Kconfig55
-rw-r--r--drivers/net/ethernet/neterion/Makefile6
-rw-r--r--drivers/net/ethernet/neterion/s2io-regs.h958
-rw-r--r--drivers/net/ethernet/neterion/s2io.c8674
-rw-r--r--drivers/net/ethernet/neterion/s2io.h1148
-rw-r--r--drivers/net/ethernet/neterion/vxge/Makefile7
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c5122
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h2111
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c1132
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.h67
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4858
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h519
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-reg.h4636
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c2514
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.h2298
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-version.h49
-rw-r--r--drivers/net/ethernet/netx-eth.c515
-rw-r--r--drivers/net/ethernet/nuvoton/Kconfig31
-rw-r--r--drivers/net/ethernet/nuvoton/Makefile5
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c1123
-rw-r--r--drivers/net/ethernet/nvidia/Kconfig32
-rw-r--r--drivers/net/ethernet/nvidia/Makefile5
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c6019
-rw-r--r--drivers/net/ethernet/octeon/Kconfig14
-rw-r--r--drivers/net/ethernet/octeon/Makefile5
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c1168
-rw-r--r--drivers/net/ethernet/oki-semi/Kconfig23
-rw-r--r--drivers/net/ethernet/oki-semi/Makefile5
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig22
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Makefile4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h663
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c245
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h36
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c517
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c2604
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c503
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c274
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h37
-rw-r--r--drivers/net/ethernet/packetengines/Kconfig47
-rw-r--r--drivers/net/ethernet/packetengines/Makefile6
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c1952
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c1430
-rw-r--r--drivers/net/ethernet/pasemi/Kconfig30
-rw-r--r--drivers/net/ethernet/pasemi/Makefile5
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c1909
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.h216
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c160
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig54
-rw-r--r--drivers/net/ethernet/qlogic/Makefile8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/Makefile29
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h1441
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c793
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c835
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h1050
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c1976
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h287
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c1949
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c3161
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c3968
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.h1189
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/Makefile8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h1575
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c1127
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c1247
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h1025
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c1787
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c1904
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c4509
-rw-r--r--drivers/net/ethernet/qlogic/qlge/Makefile7
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2334
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c2044
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c688
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c4981
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_mpi.c1284
-rw-r--r--drivers/net/ethernet/racal/Kconfig33
-rw-r--r--drivers/net/ethernet/racal/Makefile5
-rw-r--r--drivers/net/ethernet/racal/ni5010.c771
-rw-r--r--drivers/net/ethernet/racal/ni5010.h144
-rw-r--r--drivers/net/ethernet/rdc/Kconfig35
-rw-r--r--drivers/net/ethernet/rdc/Makefile5
-rw-r--r--drivers/net/ethernet/rdc/r6040.c1276
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2063
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2622
-rw-r--r--drivers/net/ethernet/realtek/Kconfig130
-rw-r--r--drivers/net/ethernet/realtek/Makefile9
-rw-r--r--drivers/net/ethernet/realtek/atp.c940
-rw-r--r--drivers/net/ethernet/realtek/atp.h259
-rw-r--r--drivers/net/ethernet/realtek/r8169.c6219
-rw-r--r--drivers/net/ethernet/realtek/sc92031.c1609
-rw-r--r--drivers/net/ethernet/renesas/Kconfig19
-rw-r--r--drivers/net/ethernet/renesas/Makefile5
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c1975
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h830
-rw-r--r--drivers/net/ethernet/s6gmac.c1072
-rw-r--r--drivers/net/ethernet/seeq/Kconfig47
-rw-r--r--drivers/net/ethernet/seeq/Makefile7
-rw-r--r--drivers/net/ethernet/seeq/ether3.c918
-rw-r--r--drivers/net/ethernet/seeq/ether3.h176
-rw-r--r--drivers/net/ethernet/seeq/seeq8005.c752
-rw-r--r--drivers/net/ethernet/seeq/seeq8005.h156
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c858
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.h103
-rw-r--r--drivers/net/ethernet/sfc/Kconfig21
-rw-r--r--drivers/net/ethernet/sfc/Makefile8
-rw-r--r--drivers/net/ethernet/sfc/bitfield.h538
-rw-r--r--drivers/net/ethernet/sfc/efx.c2732
-rw-r--r--drivers/net/ethernet/sfc/efx.h150
-rw-r--r--drivers/net/ethernet/sfc/enum.h167
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c1029
-rw-r--r--drivers/net/ethernet/sfc/falcon.c1843
-rw-r--r--drivers/net/ethernet/sfc/falcon_boards.c776
-rw-r--r--drivers/net/ethernet/sfc/falcon_xmac.c369
-rw-r--r--drivers/net/ethernet/sfc/filter.c727
-rw-r--r--drivers/net/ethernet/sfc/filter.h112
-rw-r--r--drivers/net/ethernet/sfc/io.h293
-rw-r--r--drivers/net/ethernet/sfc/mac.h21
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c1191
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h130
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mac.c145
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h1775
-rw-r--r--drivers/net/ethernet/sfc/mcdi_phy.c754
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.c323
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.h112
-rw-r--r--drivers/net/ethernet/sfc/mtd.c693
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h1060
-rw-r--r--drivers/net/ethernet/sfc/nic.c1962
-rw-r--r--drivers/net/ethernet/sfc/nic.h272
-rw-r--r--drivers/net/ethernet/sfc/phy.h67
-rw-r--r--drivers/net/ethernet/sfc/qt202x_phy.c462
-rw-r--r--drivers/net/ethernet/sfc/regs.h3188
-rw-r--r--drivers/net/ethernet/sfc/rx.c749
-rw-r--r--drivers/net/ethernet/sfc/selftest.c761
-rw-r--r--drivers/net/ethernet/sfc/selftest.h53
-rw-r--r--drivers/net/ethernet/sfc/siena.c661
-rw-r--r--drivers/net/ethernet/sfc/spi.h99
-rw-r--r--drivers/net/ethernet/sfc/tenxpress.c494
-rw-r--r--drivers/net/ethernet/sfc/tx.c1207
-rw-r--r--drivers/net/ethernet/sfc/txc43128_phy.c560
-rw-r--r--drivers/net/ethernet/sfc/workarounds.h59
-rw-r--r--drivers/net/ethernet/sgi/Kconfig36
-rw-r--r--drivers/net/ethernet/sgi/Makefile6
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c1684
-rw-r--r--drivers/net/ethernet/sgi/meth.c855
-rw-r--r--drivers/net/ethernet/sgi/meth.h243
-rw-r--r--drivers/net/ethernet/sis/Kconfig53
-rw-r--r--drivers/net/ethernet/sis/Makefile6
-rw-r--r--drivers/net/ethernet/sis/sis190.c1956
-rw-r--r--drivers/net/ethernet/sis/sis900.c2494
-rw-r--r--drivers/net/ethernet/sis/sis900.h329
-rw-r--r--drivers/net/ethernet/smsc/Kconfig137
-rw-r--r--drivers/net/ethernet/smsc/Makefile11
-rw-r--r--drivers/net/ethernet/smsc/epic100.c1609
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c2210
-rw-r--r--drivers/net/ethernet/smsc/smc911x.h924
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c1589
-rw-r--r--drivers/net/ethernet/smsc/smc9194.h241
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c2070
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2431
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h1180
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2406
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.h404
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c1763
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.h276
-rw-r--r--drivers/net/ethernet/stmicro/Kconfig23
-rw-r--r--drivers/net/ethernet/stmicro/Makefile5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig66
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h276
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h163
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100.h121
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h208
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c244
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c153
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c194
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c143
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h109
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c258
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c337
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h131
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c265
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c221
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h88
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c473
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2172
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c247
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c134
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h42
-rw-r--r--drivers/net/ethernet/sun/Kconfig88
-rw-r--r--drivers/net/ethernet/sun/Makefile11
-rw-r--r--drivers/net/ethernet/sun/cassini.c5303
-rw-r--r--drivers/net/ethernet/sun/cassini.h2914
-rw-r--r--drivers/net/ethernet/sun/niu.c10265
-rw-r--r--drivers/net/ethernet/sun/niu.h3306
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c1307
-rw-r--r--drivers/net/ethernet/sun/sunbmac.h338
-rw-r--r--drivers/net/ethernet/sun/sungem.c3047
-rw-r--r--drivers/net/ethernet/sun/sungem.h1027
-rw-r--r--drivers/net/ethernet/sun/sunhme.c3359
-rw-r--r--drivers/net/ethernet/sun/sunhme.h512
-rw-r--r--drivers/net/ethernet/sun/sunqe.c1007
-rw-r--r--drivers/net/ethernet/sun/sunqe.h350
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c1284
-rw-r--r--drivers/net/ethernet/sun/sunvnet.h83
-rw-r--r--drivers/net/ethernet/tehuti/Kconfig27
-rw-r--r--drivers/net/ethernet/tehuti/Makefile5
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2470
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.h561
-rw-r--r--drivers/net/ethernet/ti/Kconfig77
-rw-r--r--drivers/net/ethernet/ti/Makefile9
-rw-r--r--drivers/net/ethernet/ti/cpmac.c1305
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c970
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h109
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2047
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c475
-rw-r--r--drivers/net/ethernet/ti/tlan.c3258
-rw-r--r--drivers/net/ethernet/ti/tlan.h546
-rw-r--r--drivers/net/ethernet/tile/Kconfig15
-rw-r--r--drivers/net/ethernet/tile/Makefile10
-rw-r--r--drivers/net/ethernet/tile/tilepro.c2465
-rw-r--r--drivers/net/ethernet/toshiba/Kconfig57
-rw-r--r--drivers/net/ethernet/toshiba/Makefile10
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c1896
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h380
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c2681
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.h326
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2603
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.h489
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c179
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c2228
-rw-r--r--drivers/net/ethernet/tundra/Kconfig29
-rw-r--r--drivers/net/ethernet/tundra/Makefile5
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c1728
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.h356
-rw-r--r--drivers/net/ethernet/via/Kconfig59
-rw-r--r--drivers/net/ethernet/via/Makefile6
-rw-r--r--drivers/net/ethernet/via/via-rhine.c2340
-rw-r--r--drivers/net/ethernet/via/via-velocity.c3589
-rw-r--r--drivers/net/ethernet/via/via-velocity.h1579
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig36
-rw-r--r--drivers/net/ethernet/xilinx/Makefile7
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h385
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c1153
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_mdio.c122
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c1330
-rw-r--r--drivers/net/ethernet/xircom/Kconfig31
-rw-r--r--drivers/net/ethernet/xircom/Makefile5
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c1813
-rw-r--r--drivers/net/ethernet/xscale/Kconfig32
-rw-r--r--drivers/net/ethernet/xscale/Makefile6
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/Kconfig6
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/Makefile3
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/caleb.c136
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/caleb.h22
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/enp2611.c232
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c212
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h115
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc408
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode130
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc272
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode98
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixpdev.c440
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixpdev.h29
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h57
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/pm3386.c351
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/pm3386.h29
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c1489
890 files changed, 852230 insertions, 0 deletions
diff --git a/drivers/net/ethernet/3com/3c501.c b/drivers/net/ethernet/3com/3c501.c
new file mode 100644
index 000000000000..68da81d476f3
--- /dev/null
+++ b/drivers/net/ethernet/3com/3c501.c
@@ -0,0 +1,896 @@
+/* 3c501.c: A 3Com 3c501 Ethernet driver for Linux. */
+/*
+ Written 1992,1993,1994 Donald Becker
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+
+ This is a device driver for the 3Com Etherlink 3c501.
+ Do not purchase this card, even as a joke. It's performance is horrible,
+ and it breaks in many ways.
+
+ The original author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Fixed (again!) the missing interrupt locking on TX/RX shifting.
+ Alan Cox <alan@lxorguk.ukuu.org.uk>
+
+ Removed calls to init_etherdev since they are no longer needed, and
+ cleaned up modularization just a bit. The driver still allows only
+ the default address for cards when loaded as a module, but that's
+ really less braindead than anyone using a 3c501 board. :)
+ 19950208 (invid@msen.com)
+
+ Added traps for interrupts hitting the window as we clear and TX load
+ the board. Now getting 150K/second FTP with a 3c501 card. Still playing
+ with a TX-TX optimisation to see if we can touch 180-200K/second as seems
+ theoretically maximum.
+ 19950402 Alan Cox <alan@lxorguk.ukuu.org.uk>
+
+ Cleaned up for 2.3.x because we broke SMP now.
+ 20000208 Alan Cox <alan@lxorguk.ukuu.org.uk>
+
+ Check up pass for 2.5. Nothing significant changed
+ 20021009 Alan Cox <alan@lxorguk.ukuu.org.uk>
+
+ Fixed zero fill corner case
+ 20030104 Alan Cox <alan@lxorguk.ukuu.org.uk>
+
+
+ For the avoidance of doubt the "preferred form" of this code is one which
+ is in an open non patent encumbered format. Where cryptographic key signing
+ forms part of the process of creating an executable the information
+ including keys needed to generate an equivalently functional executable
+ are deemed to be part of the source code.
+
+*/
+
+
+/**
+ * DOC: 3c501 Card Notes
+ *
+ * Some notes on this thing if you have to hack it. [Alan]
+ *
+ * Some documentation is available from 3Com. Due to the boards age
+ * standard responses when you ask for this will range from 'be serious'
+ * to 'give it to a museum'. The documentation is incomplete and mostly
+ * of historical interest anyway.
+ *
+ * The basic system is a single buffer which can be used to receive or
+ * transmit a packet. A third command mode exists when you are setting
+ * things up.
+ *
+ * If it's transmitting it's not receiving and vice versa. In fact the
+ * time to get the board back into useful state after an operation is
+ * quite large.
+ *
+ * The driver works by keeping the board in receive mode waiting for a
+ * packet to arrive. When one arrives it is copied out of the buffer
+ * and delivered to the kernel. The card is reloaded and off we go.
+ *
+ * When transmitting lp->txing is set and the card is reset (from
+ * receive mode) [possibly losing a packet just received] to command
+ * mode. A packet is loaded and transmit mode triggered. The interrupt
+ * handler runs different code for transmit interrupts and can handle
+ * returning to receive mode or retransmissions (yes you have to help
+ * out with those too).
+ *
+ * DOC: Problems
+ *
+ * There are a wide variety of undocumented error returns from the card
+ * and you basically have to kick the board and pray if they turn up. Most
+ * only occur under extreme load or if you do something the board doesn't
+ * like (eg touching a register at the wrong time).
+ *
+ * The driver is less efficient than it could be. It switches through
+ * receive mode even if more transmits are queued. If this worries you buy
+ * a real Ethernet card.
+ *
+ * The combination of slow receive restart and no real multicast
+ * filter makes the board unusable with a kernel compiled for IP
+ * multicasting in a real multicast environment. That's down to the board,
+ * but even with no multicast programs running a multicast IP kernel is
+ * in group 224.0.0.1 and you will therefore be listening to all multicasts.
+ * One nv conference running over that Ethernet and you can give up.
+ *
+ */
+
+#define DRV_NAME "3c501"
+#define DRV_VERSION "2002/10/09"
+
+
+static const char version[] =
+ DRV_NAME ".c: " DRV_VERSION " Alan Cox (alan@lxorguk.ukuu.org.uk).\n";
+
+/*
+ * Braindamage remaining:
+ * The 3c501 board.
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/fcntl.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+
+#include "3c501.h"
+
+/*
+ * The boilerplate probe code.
+ */
+
+static int io = 0x280;
+static int irq = 5;
+static int mem_start;
+
+/**
+ * el1_probe: - probe for a 3c501
+ * @dev: The device structure passed in to probe.
+ *
+ * This can be called from two places. The network layer will probe using
+ * a device structure passed in with the probe information completed. For a
+ * modular driver we use #init_module to fill in our own structure and probe
+ * for it.
+ *
+ * Returns 0 on success. ENXIO if asked not to probe and ENODEV if asked to
+ * probe and failing to find anything.
+ */
+
+struct net_device * __init el1_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
+ static const unsigned ports[] = { 0x280, 0x300, 0};
+ const unsigned *port;
+ int err = 0;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ io = dev->base_addr;
+ irq = dev->irq;
+ mem_start = dev->mem_start & 7;
+ }
+
+ if (io > 0x1ff) { /* Check a single specified location. */
+ err = el1_probe1(dev, io);
+ } else if (io != 0) {
+ err = -ENXIO; /* Don't probe at all. */
+ } else {
+ for (port = ports; *port && el1_probe1(dev, *port); port++)
+ ;
+ if (!*port)
+ err = -ENODEV;
+ }
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ release_region(dev->base_addr, EL1_IO_EXTENT);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static const struct net_device_ops el_netdev_ops = {
+ .ndo_open = el_open,
+ .ndo_stop = el1_close,
+ .ndo_start_xmit = el_start_xmit,
+ .ndo_tx_timeout = el_timeout,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/**
+ * el1_probe1:
+ * @dev: The device structure to use
+ * @ioaddr: An I/O address to probe at.
+ *
+ * The actual probe. This is iterated over by #el1_probe in order to
+ * check all the applicable device locations.
+ *
+ * Returns 0 for a success, in which case the device is activated,
+ * EAGAIN if the IRQ is in use by another driver, and ENODEV if the
+ * board cannot be found.
+ */
+
+static int __init el1_probe1(struct net_device *dev, int ioaddr)
+{
+ struct net_local *lp;
+ const char *mname; /* Vendor name */
+ unsigned char station_addr[6];
+ int autoirq = 0;
+ int i;
+
+ /*
+ * Reserve I/O resource for exclusive use by this driver
+ */
+
+ if (!request_region(ioaddr, EL1_IO_EXTENT, DRV_NAME))
+ return -ENODEV;
+
+ /*
+ * Read the station address PROM data from the special port.
+ */
+
+ for (i = 0; i < 6; i++) {
+ outw(i, ioaddr + EL1_DATAPTR);
+ station_addr[i] = inb(ioaddr + EL1_SAPROM);
+ }
+ /*
+ * Check the first three octets of the S.A. for 3Com's prefix, or
+ * for the Sager NP943 prefix.
+ */
+
+ if (station_addr[0] == 0x02 && station_addr[1] == 0x60 &&
+ station_addr[2] == 0x8c)
+ mname = "3c501";
+ else if (station_addr[0] == 0x00 && station_addr[1] == 0x80 &&
+ station_addr[2] == 0xC8)
+ mname = "NP943";
+ else {
+ release_region(ioaddr, EL1_IO_EXTENT);
+ return -ENODEV;
+ }
+
+ /*
+ * We auto-IRQ by shutting off the interrupt line and letting it
+ * float high.
+ */
+
+ dev->irq = irq;
+
+ if (dev->irq < 2) {
+ unsigned long irq_mask;
+
+ irq_mask = probe_irq_on();
+ inb(RX_STATUS); /* Clear pending interrupts. */
+ inb(TX_STATUS);
+ outb(AX_LOOP + 1, AX_CMD);
+
+ outb(0x00, AX_CMD);
+
+ mdelay(20);
+ autoirq = probe_irq_off(irq_mask);
+
+ if (autoirq == 0) {
+ pr_warning("%s probe at %#x failed to detect IRQ line.\n",
+ mname, ioaddr);
+ release_region(ioaddr, EL1_IO_EXTENT);
+ return -EAGAIN;
+ }
+ }
+
+ outb(AX_RESET+AX_LOOP, AX_CMD); /* Loopback mode. */
+ dev->base_addr = ioaddr;
+ memcpy(dev->dev_addr, station_addr, ETH_ALEN);
+
+ if (mem_start & 0xf)
+ el_debug = mem_start & 0x7;
+ if (autoirq)
+ dev->irq = autoirq;
+
+ pr_info("%s: %s EtherLink at %#lx, using %sIRQ %d.\n",
+ dev->name, mname, dev->base_addr,
+ autoirq ? "auto":"assigned ", dev->irq);
+
+#ifdef CONFIG_IP_MULTICAST
+ pr_warning("WARNING: Use of the 3c501 in a multicast kernel is NOT recommended.\n");
+#endif
+
+ if (el_debug)
+ pr_debug("%s", version);
+
+ lp = netdev_priv(dev);
+ memset(lp, 0, sizeof(struct net_local));
+ spin_lock_init(&lp->lock);
+
+ /*
+ * The EL1-specific entries in the device structure.
+ */
+
+ dev->netdev_ops = &el_netdev_ops;
+ dev->watchdog_timeo = HZ;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+ return 0;
+}
+
+/**
+ * el1_open:
+ * @dev: device that is being opened
+ *
+ * When an ifconfig is issued which changes the device flags to include
+ * IFF_UP this function is called. It is only called when the change
+ * occurs, not when the interface remains up. #el1_close will be called
+ * when it goes down.
+ *
+ * Returns 0 for a successful open, or -EAGAIN if someone has run off
+ * with our interrupt line.
+ */
+
+static int el_open(struct net_device *dev)
+{
+ int retval;
+ int ioaddr = dev->base_addr;
+ struct net_local *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ if (el_debug > 2)
+ pr_debug("%s: Doing el_open()...\n", dev->name);
+
+ retval = request_irq(dev->irq, el_interrupt, 0, dev->name, dev);
+ if (retval)
+ return retval;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ el_reset(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ lp->txing = 0; /* Board in RX mode */
+ outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */
+ netif_start_queue(dev);
+ return 0;
+}
+
+/**
+ * el_timeout:
+ * @dev: The 3c501 card that has timed out
+ *
+ * Attempt to restart the board. This is basically a mixture of extreme
+ * violence and prayer
+ *
+ */
+
+static void el_timeout(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ if (el_debug)
+ pr_debug("%s: transmit timed out, txsr %#2x axsr=%02x rxsr=%02x.\n",
+ dev->name, inb(TX_STATUS),
+ inb(AX_STATUS), inb(RX_STATUS));
+ dev->stats.tx_errors++;
+ outb(TX_NORM, TX_CMD);
+ outb(RX_NORM, RX_CMD);
+ outb(AX_OFF, AX_CMD); /* Just trigger a false interrupt. */
+ outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */
+ lp->txing = 0; /* Ripped back in to RX */
+ netif_wake_queue(dev);
+}
+
+
+/**
+ * el_start_xmit:
+ * @skb: The packet that is queued to be sent
+ * @dev: The 3c501 card we want to throw it down
+ *
+ * Attempt to send a packet to a 3c501 card. There are some interesting
+ * catches here because the 3c501 is an extremely old and therefore
+ * stupid piece of technology.
+ *
+ * If we are handling an interrupt on the other CPU we cannot load a packet
+ * as we may still be attempting to retrieve the last RX packet buffer.
+ *
+ * When a transmit times out we dump the card into control mode and just
+ * start again. It happens enough that it isn't worth logging.
+ *
+ * We avoid holding the spin locks when doing the packet load to the board.
+ * The device is very slow, and its DMA mode is even slower. If we held the
+ * lock while loading 1500 bytes onto the controller we would drop a lot of
+ * serial port characters. This requires we do extra locking, but we have
+ * no real choice.
+ */
+
+static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ /*
+ * Avoid incoming interrupts between us flipping txing and flipping
+ * mode as the driver assumes txing is a faithful indicator of card
+ * state
+ */
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /*
+ * Avoid timer-based retransmission conflicts.
+ */
+
+ netif_stop_queue(dev);
+
+ do {
+ int len = skb->len;
+ int pad = 0;
+ int gp_start;
+ unsigned char *buf = skb->data;
+
+ if (len < ETH_ZLEN)
+ pad = ETH_ZLEN - len;
+
+ gp_start = 0x800 - (len + pad);
+
+ lp->tx_pkt_start = gp_start;
+ lp->collisions = 0;
+
+ dev->stats.tx_bytes += skb->len;
+
+ /*
+ * Command mode with status cleared should [in theory]
+ * mean no more interrupts can be pending on the card.
+ */
+
+ outb_p(AX_SYS, AX_CMD);
+ inb_p(RX_STATUS);
+ inb_p(TX_STATUS);
+
+ lp->loading = 1;
+ lp->txing = 1;
+
+ /*
+ * Turn interrupts back on while we spend a pleasant
+ * afternoon loading bytes into the board
+ */
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ /* Set rx packet area to 0. */
+ outw(0x00, RX_BUF_CLR);
+ /* aim - packet will be loaded into buffer start */
+ outw(gp_start, GP_LOW);
+ /* load buffer (usual thing each byte increments the pointer) */
+ outsb(DATAPORT, buf, len);
+ if (pad) {
+ while (pad--) /* Zero fill buffer tail */
+ outb(0, DATAPORT);
+ }
+ /* the board reuses the same register */
+ outw(gp_start, GP_LOW);
+
+ if (lp->loading != 2) {
+ /* fire ... Trigger xmit. */
+ outb(AX_XMIT, AX_CMD);
+ lp->loading = 0;
+ if (el_debug > 2)
+ pr_debug(" queued xmit.\n");
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ /* A receive upset our load, despite our best efforts */
+ if (el_debug > 2)
+ pr_debug("%s: burped during tx load.\n", dev->name);
+ spin_lock_irqsave(&lp->lock, flags);
+ } while (1);
+}
+
+/**
+ * el_interrupt:
+ * @irq: Interrupt number
+ * @dev_id: The 3c501 that burped
+ *
+ * Handle the ether interface interrupts. The 3c501 needs a lot more
+ * hand holding than most cards. In particular we get a transmit interrupt
+ * with a collision error because the board firmware isn't capable of rewinding
+ * its own transmit buffer pointers. It can however count to 16 for us.
+ *
+ * On the receive side the card is also very dumb. It has no buffering to
+ * speak of. We simply pull the packet out of its PIO buffer (which is slow)
+ * and queue it for the kernel. Then we reset the card for the next packet.
+ *
+ * We sometimes get surprise interrupts late both because the SMP IRQ delivery
+ * is message passing and because the card sometimes seems to deliver late. I
+ * think if it is part way through a receive and the mode is changed it carries
+ * on receiving and sends us an interrupt. We have to band aid all these cases
+ * to get a sensible 150kBytes/second performance. Even then you want a small
+ * TCP window.
+ */
+
+static irqreturn_t el_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *lp;
+ int ioaddr;
+ int axsr; /* Aux. status reg. */
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ spin_lock(&lp->lock);
+
+ /*
+ * What happened ?
+ */
+
+ axsr = inb(AX_STATUS);
+
+ /*
+ * Log it
+ */
+
+ if (el_debug > 3)
+ pr_debug("%s: el_interrupt() aux=%#02x\n", dev->name, axsr);
+
+ if (lp->loading == 1 && !lp->txing)
+ pr_warning("%s: Inconsistent state loading while not in tx\n",
+ dev->name);
+
+ if (lp->txing) {
+ /*
+ * Board in transmit mode. May be loading. If we are
+ * loading we shouldn't have got this.
+ */
+ int txsr = inb(TX_STATUS);
+
+ if (lp->loading == 1) {
+ if (el_debug > 2)
+ pr_debug("%s: Interrupt while loading [txsr=%02x gp=%04x rp=%04x]\n",
+ dev->name, txsr, inw(GP_LOW), inw(RX_LOW));
+
+ /* Force a reload */
+ lp->loading = 2;
+ spin_unlock(&lp->lock);
+ goto out;
+ }
+ if (el_debug > 6)
+ pr_debug("%s: txsr=%02x gp=%04x rp=%04x\n", dev->name,
+ txsr, inw(GP_LOW), inw(RX_LOW));
+
+ if ((axsr & 0x80) && (txsr & TX_READY) == 0) {
+ /*
+ * FIXME: is there a logic to whether to keep
+ * on trying or reset immediately ?
+ */
+ if (el_debug > 1)
+ pr_debug("%s: Unusual interrupt during Tx, txsr=%02x axsr=%02x gp=%03x rp=%03x.\n",
+ dev->name, txsr, axsr,
+ inw(ioaddr + EL1_DATAPTR),
+ inw(ioaddr + EL1_RXPTR));
+ lp->txing = 0;
+ netif_wake_queue(dev);
+ } else if (txsr & TX_16COLLISIONS) {
+ /*
+ * Timed out
+ */
+ if (el_debug)
+ pr_debug("%s: Transmit failed 16 times, Ethernet jammed?\n", dev->name);
+ outb(AX_SYS, AX_CMD);
+ lp->txing = 0;
+ dev->stats.tx_aborted_errors++;
+ netif_wake_queue(dev);
+ } else if (txsr & TX_COLLISION) {
+ /*
+ * Retrigger xmit.
+ */
+
+ if (el_debug > 6)
+ pr_debug("%s: retransmitting after a collision.\n", dev->name);
+ /*
+ * Poor little chip can't reset its own start
+ * pointer
+ */
+
+ outb(AX_SYS, AX_CMD);
+ outw(lp->tx_pkt_start, GP_LOW);
+ outb(AX_XMIT, AX_CMD);
+ dev->stats.collisions++;
+ spin_unlock(&lp->lock);
+ goto out;
+ } else {
+ /*
+ * It worked.. we will now fall through and receive
+ */
+ dev->stats.tx_packets++;
+ if (el_debug > 6)
+ pr_debug("%s: Tx succeeded %s\n", dev->name,
+ (txsr & TX_RDY) ? "." : "but tx is busy!");
+ /*
+ * This is safe the interrupt is atomic WRT itself.
+ */
+ lp->txing = 0;
+ /* In case more to transmit */
+ netif_wake_queue(dev);
+ }
+ } else {
+ /*
+ * In receive mode.
+ */
+
+ int rxsr = inb(RX_STATUS);
+ if (el_debug > 5)
+ pr_debug("%s: rxsr=%02x txsr=%02x rp=%04x\n",
+ dev->name, rxsr, inb(TX_STATUS), inw(RX_LOW));
+ /*
+ * Just reading rx_status fixes most errors.
+ */
+ if (rxsr & RX_MISSED)
+ dev->stats.rx_missed_errors++;
+ else if (rxsr & RX_RUNT) {
+ /* Handled to avoid board lock-up. */
+ dev->stats.rx_length_errors++;
+ if (el_debug > 5)
+ pr_debug("%s: runt.\n", dev->name);
+ } else if (rxsr & RX_GOOD) {
+ /*
+ * Receive worked.
+ */
+ el_receive(dev);
+ } else {
+ /*
+ * Nothing? Something is broken!
+ */
+ if (el_debug > 2)
+ pr_debug("%s: No packet seen, rxsr=%02x **resetting 3c501***\n",
+ dev->name, rxsr);
+ el_reset(dev);
+ }
+ }
+
+ /*
+ * Move into receive mode
+ */
+
+ outb(AX_RX, AX_CMD);
+ outw(0x00, RX_BUF_CLR);
+ inb(RX_STATUS); /* Be certain that interrupts are cleared. */
+ inb(TX_STATUS);
+ spin_unlock(&lp->lock);
+out:
+ return IRQ_HANDLED;
+}
+
+
+/**
+ * el_receive:
+ * @dev: Device to pull the packets from
+ *
+ * We have a good packet. Well, not really "good", just mostly not broken.
+ * We must check everything to see if it is good. In particular we occasionally
+ * get wild packet sizes from the card. If the packet seems sane we PIO it
+ * off the card and queue it for the protocol layers.
+ */
+
+static void el_receive(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ int pkt_len;
+ struct sk_buff *skb;
+
+ pkt_len = inw(RX_LOW);
+
+ if (el_debug > 4)
+ pr_debug(" el_receive %d.\n", pkt_len);
+
+ if (pkt_len < 60 || pkt_len > 1536) {
+ if (el_debug)
+ pr_debug("%s: bogus packet, length=%d\n",
+ dev->name, pkt_len);
+ dev->stats.rx_over_errors++;
+ return;
+ }
+
+ /*
+ * Command mode so we can empty the buffer
+ */
+
+ outb(AX_SYS, AX_CMD);
+ skb = dev_alloc_skb(pkt_len+2);
+
+ /*
+ * Start of frame
+ */
+
+ outw(0x00, GP_LOW);
+ if (skb == NULL) {
+ pr_info("%s: Memory squeeze, dropping packet.\n", dev->name);
+ dev->stats.rx_dropped++;
+ return;
+ } else {
+ skb_reserve(skb, 2); /* Force 16 byte alignment */
+ /*
+ * The read increments through the bytes. The interrupt
+ * handler will fix the pointer when it returns to
+ * receive mode.
+ */
+ insb(DATAPORT, skb_put(skb, pkt_len), pkt_len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+}
+
+/**
+ * el_reset: Reset a 3c501 card
+ * @dev: The 3c501 card about to get zapped
+ *
+ * Even resetting a 3c501 isn't simple. When you activate reset it loses all
+ * its configuration. You must hold the lock when doing this. The function
+ * cannot take the lock itself as it is callable from the irq handler.
+ */
+
+static void el_reset(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ if (el_debug > 2)
+ pr_info("3c501 reset...\n");
+ outb(AX_RESET, AX_CMD); /* Reset the chip */
+ /* Aux control, irq and loopback enabled */
+ outb(AX_LOOP, AX_CMD);
+ {
+ int i;
+ for (i = 0; i < 6; i++) /* Set the station address. */
+ outb(dev->dev_addr[i], ioaddr + i);
+ }
+
+ outw(0, RX_BUF_CLR); /* Set rx packet area to 0. */
+ outb(TX_NORM, TX_CMD); /* tx irq on done, collision */
+ outb(RX_NORM, RX_CMD); /* Set Rx commands. */
+ inb(RX_STATUS); /* Clear status. */
+ inb(TX_STATUS);
+ lp->txing = 0;
+}
+
+/**
+ * el1_close:
+ * @dev: 3c501 card to shut down
+ *
+ * Close a 3c501 card. The IFF_UP flag has been cleared by the user via
+ * the SIOCSIFFLAGS ioctl. We stop any further transmissions being queued,
+ * and then disable the interrupts. Finally we reset the chip. The effects
+ * of the rest will be cleaned up by #el1_open. Always returns 0 indicating
+ * a success.
+ */
+
+static int el1_close(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (el_debug > 2)
+ pr_info("%s: Shutting down Ethernet card at %#x.\n",
+ dev->name, ioaddr);
+
+ netif_stop_queue(dev);
+
+ /*
+ * Free and disable the IRQ.
+ */
+
+ free_irq(dev->irq, dev);
+ outb(AX_RESET, AX_CMD); /* Reset the chip */
+
+ return 0;
+}
+
+/**
+ * set_multicast_list:
+ * @dev: The device to adjust
+ *
+ * Set or clear the multicast filter for this adaptor to use the best-effort
+ * filtering supported. The 3c501 supports only three modes of filtering.
+ * It always receives broadcasts and packets for itself. You can choose to
+ * optionally receive all packets, or all multicast packets on top of this.
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (dev->flags & IFF_PROMISC) {
+ outb(RX_PROM, RX_CMD);
+ inb(RX_STATUS);
+ } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
+ /* Multicast or all multicast is the same */
+ outb(RX_MULT, RX_CMD);
+ inb(RX_STATUS); /* Clear status. */
+ } else {
+ outb(RX_NORM, RX_CMD);
+ inb(RX_STATUS);
+ }
+}
+
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
+}
+
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 level)
+{
+ debug = level;
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+};
+
+#ifdef MODULE
+
+static struct net_device *dev_3c501;
+
+module_param(io, int, 0);
+module_param(irq, int, 0);
+MODULE_PARM_DESC(io, "EtherLink I/O base address");
+MODULE_PARM_DESC(irq, "EtherLink IRQ number");
+
+/**
+ * init_module:
+ *
+ * When the driver is loaded as a module this function is called. We fake up
+ * a device structure with the base I/O and interrupt set as if it were being
+ * called from Space.c. This minimises the extra code that would otherwise
+ * be required.
+ *
+ * Returns 0 for success or -EIO if a card is not found. Returning an error
+ * here also causes the module to be unloaded
+ */
+
+int __init init_module(void)
+{
+ dev_3c501 = el1_probe(-1);
+ if (IS_ERR(dev_3c501))
+ return PTR_ERR(dev_3c501);
+ return 0;
+}
+
+/**
+ * cleanup_module:
+ *
+ * The module is being unloaded. We unhook our network device from the system
+ * and then free up the resources we took when the card was found.
+ */
+
+void __exit cleanup_module(void)
+{
+ struct net_device *dev = dev_3c501;
+ unregister_netdev(dev);
+ release_region(dev->base_addr, EL1_IO_EXTENT);
+ free_netdev(dev);
+}
+
+#endif /* MODULE */
+
+MODULE_AUTHOR("Donald Becker, Alan Cox");
+MODULE_DESCRIPTION("Support for the ancient 3Com 3c501 ethernet card");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/net/ethernet/3com/3c501.h b/drivers/net/ethernet/3com/3c501.h
new file mode 100644
index 000000000000..183fd55f03cb
--- /dev/null
+++ b/drivers/net/ethernet/3com/3c501.h
@@ -0,0 +1,91 @@
+
+/*
+ * Index to functions.
+ */
+
+static int el1_probe1(struct net_device *dev, int ioaddr);
+static int el_open(struct net_device *dev);
+static void el_timeout(struct net_device *dev);
+static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t el_interrupt(int irq, void *dev_id);
+static void el_receive(struct net_device *dev);
+static void el_reset(struct net_device *dev);
+static int el1_close(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static const struct ethtool_ops netdev_ethtool_ops;
+
+#define EL1_IO_EXTENT 16
+
+#ifndef EL_DEBUG
+#define EL_DEBUG 0 /* use 0 for production, 1 for devel., >2 for debug */
+#endif /* Anything above 5 is wordy death! */
+#define debug el_debug
+static int el_debug = EL_DEBUG;
+
+/*
+ * Board-specific info in netdev_priv(dev).
+ */
+
+struct net_local
+{
+ int tx_pkt_start; /* The length of the current Tx packet. */
+ int collisions; /* Tx collisions this packet */
+ int loading; /* Spot buffer load collisions */
+ int txing; /* True if card is in TX mode */
+ spinlock_t lock; /* Serializing lock */
+};
+
+
+#define RX_STATUS (ioaddr + 0x06)
+#define RX_CMD RX_STATUS
+#define TX_STATUS (ioaddr + 0x07)
+#define TX_CMD TX_STATUS
+#define GP_LOW (ioaddr + 0x08)
+#define GP_HIGH (ioaddr + 0x09)
+#define RX_BUF_CLR (ioaddr + 0x0A)
+#define RX_LOW (ioaddr + 0x0A)
+#define RX_HIGH (ioaddr + 0x0B)
+#define SAPROM (ioaddr + 0x0C)
+#define AX_STATUS (ioaddr + 0x0E)
+#define AX_CMD AX_STATUS
+#define DATAPORT (ioaddr + 0x0F)
+#define TX_RDY 0x08 /* In TX_STATUS */
+
+#define EL1_DATAPTR 0x08
+#define EL1_RXPTR 0x0A
+#define EL1_SAPROM 0x0C
+#define EL1_DATAPORT 0x0f
+
+/*
+ * Writes to the ax command register.
+ */
+
+#define AX_OFF 0x00 /* Irq off, buffer access on */
+#define AX_SYS 0x40 /* Load the buffer */
+#define AX_XMIT 0x44 /* Transmit a packet */
+#define AX_RX 0x48 /* Receive a packet */
+#define AX_LOOP 0x0C /* Loopback mode */
+#define AX_RESET 0x80
+
+/*
+ * Normal receive mode written to RX_STATUS. We must intr on short packets
+ * to avoid bogus rx lockups.
+ */
+
+#define RX_NORM 0xA8 /* 0x68 == all addrs, 0xA8 only to me. */
+#define RX_PROM 0x68 /* Senior Prom, uhmm promiscuous mode. */
+#define RX_MULT 0xE8 /* Accept multicast packets. */
+#define TX_NORM 0x0A /* Interrupt on everything that might hang the chip */
+
+/*
+ * TX_STATUS register.
+ */
+
+#define TX_COLLISION 0x02
+#define TX_16COLLISIONS 0x04
+#define TX_READY 0x08
+
+#define RX_RUNT 0x08
+#define RX_MISSED 0x01 /* Missed a packet due to 3c501 braindamage. */
+#define RX_GOOD 0x30 /* Good packet 0x20, or simple overflow 0x10. */
+
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
new file mode 100644
index 000000000000..92053e6fc980
--- /dev/null
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -0,0 +1,1594 @@
+/* 3c509.c: A 3c509 EtherLink3 ethernet driver for linux. */
+/*
+ Written 1993-2000 by Donald Becker.
+
+ Copyright 1994-2000 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+
+ This driver is for the 3Com EtherLinkIII series.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Known limitations:
+ Because of the way 3c509 ISA detection works it's difficult to predict
+ a priori which of several ISA-mode cards will be detected first.
+
+ This driver does not use predictive interrupt mode, resulting in higher
+ packet latency but lower overhead. If interrupts are disabled for an
+ unusually long time it could also result in missed packets, but in
+ practice this rarely happens.
+
+
+ FIXES:
+ Alan Cox: Removed the 'Unexpected interrupt' bug.
+ Michael Meskes: Upgraded to Donald Becker's version 1.07.
+ Alan Cox: Increased the eeprom delay. Regardless of
+ what the docs say some people definitely
+ get problems with lower (but in card spec)
+ delays
+ v1.10 4/21/97 Fixed module code so that multiple cards may be detected,
+ other cleanups. -djb
+ Andrea Arcangeli: Upgraded to Donald Becker's version 1.12.
+ Rick Payne: Fixed SMP race condition
+ v1.13 9/8/97 Made 'max_interrupt_work' an insmod-settable variable -djb
+ v1.14 10/15/97 Avoided waiting..discard message for fast machines -djb
+ v1.15 1/31/98 Faster recovery for Tx errors. -djb
+ v1.16 2/3/98 Different ID port handling to avoid sound cards. -djb
+ v1.18 12Mar2001 Andrew Morton
+ - Avoid bogus detect of 3c590's (Andrzej Krzysztofowicz)
+ - Reviewed against 1.18 from scyld.com
+ v1.18a 17Nov2001 Jeff Garzik <jgarzik@pobox.com>
+ - ethtool support
+ v1.18b 1Mar2002 Zwane Mwaikambo <zwane@commfireservices.com>
+ - Power Management support
+ v1.18c 1Mar2002 David Ruggiero <jdr@farfalle.com>
+ - Full duplex support
+ v1.19 16Oct2002 Zwane Mwaikambo <zwane@linuxpower.ca>
+ - Additional ethtool features
+ v1.19a 28Oct2002 Davud Ruggiero <jdr@farfalle.com>
+ - Increase *read_eeprom udelay to workaround oops with 2 cards.
+ v1.19b 08Nov2002 Marc Zyngier <maz@wild-wind.fr.eu.org>
+ - Introduce driver model for EISA cards.
+ v1.20 04Feb2008 Ondrej Zary <linux@rainbow-software.org>
+ - convert to isa_driver and pnp_driver and some cleanups
+*/
+
+#define DRV_NAME "3c509"
+#define DRV_VERSION "1.20"
+#define DRV_RELDATE "04Feb2008"
+
+/* A few values that may be tweaked. */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (400*HZ/1000)
+
+#include <linux/module.h>
+#include <linux/mca.h>
+#include <linux/isa.h>
+#include <linux/pnp.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/pm.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h> /* for udelay() */
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/device.h>
+#include <linux/eisa.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+static char version[] __devinitdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n";
+
+#ifdef EL3_DEBUG
+static int el3_debug = EL3_DEBUG;
+#else
+static int el3_debug = 2;
+#endif
+
+/* Used to do a global count of all the cards in the system. Must be
+ * a global variable so that the mca/eisa probe routines can increment
+ * it */
+static int el3_cards = 0;
+#define EL3_MAX_CARDS 8
+
+/* To minimize the size of the driver source I only define operating
+ constants if they are used several times. You'll need the manual
+ anyway if you want to understand driver details. */
+/* Offsets from base I/O address. */
+#define EL3_DATA 0x00
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+#define EEPROM_READ 0x80
+
+#define EL3_IO_EXTENT 16
+
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable. */
+enum c509cmd {
+ TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
+ RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11,
+ TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
+ FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11,
+ StatsDisable = 22<<11, StopCoax = 23<<11, PowerUp = 27<<11,
+ PowerDown = 28<<11, PowerAuto = 29<<11};
+
+enum c509status {
+ IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
+ TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+ IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000, };
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
+
+/* Register window 1 offsets, the window used in normal operation. */
+#define TX_FIFO 0x00
+#define RX_FIFO 0x00
+#define RX_STATUS 0x08
+#define TX_STATUS 0x0B
+#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */
+
+#define WN0_CONF_CTRL 0x04 /* Window 0: Configuration control register */
+#define WN0_ADDR_CONF 0x06 /* Window 0: Address configuration register */
+#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */
+#define WN4_MEDIA 0x0A /* Window 4: Various transcvr/media bits. */
+#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */
+#define WN4_NETDIAG 0x06 /* Window 4: Net diagnostic */
+#define FD_ENABLE 0x8000 /* Enable full-duplex ("external loopback") */
+
+/*
+ * Must be a power of two (we use a binary and in the
+ * circular queue)
+ */
+#define SKB_QUEUE_SIZE 64
+
+enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_MCA, EL3_EISA };
+
+struct el3_private {
+ spinlock_t lock;
+ /* skb send-queue */
+ int head, size;
+ struct sk_buff *queue[SKB_QUEUE_SIZE];
+ enum el3_cardtype type;
+};
+static int id_port;
+static int current_tag;
+static struct net_device *el3_devs[EL3_MAX_CARDS];
+
+/* Parameters that may be passed into the module. */
+static int debug = -1;
+static int irq[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 10;
+#ifdef CONFIG_PNP
+static int nopnp;
+#endif
+
+static int __devinit el3_common_init(struct net_device *dev);
+static void el3_common_remove(struct net_device *dev);
+static ushort id_read_eeprom(int index);
+static ushort read_eeprom(int ioaddr, int index);
+static int el3_open(struct net_device *dev);
+static netdev_tx_t el3_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t el3_interrupt(int irq, void *dev_id);
+static void update_stats(struct net_device *dev);
+static struct net_device_stats *el3_get_stats(struct net_device *dev);
+static int el3_rx(struct net_device *dev);
+static int el3_close(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static void el3_tx_timeout (struct net_device *dev);
+static void el3_down(struct net_device *dev);
+static void el3_up(struct net_device *dev);
+static const struct ethtool_ops ethtool_ops;
+#ifdef CONFIG_PM
+static int el3_suspend(struct device *, pm_message_t);
+static int el3_resume(struct device *);
+#else
+#define el3_suspend NULL
+#define el3_resume NULL
+#endif
+
+
+/* generic device remove for all device types */
+static int el3_device_remove (struct device *device);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void el3_poll_controller(struct net_device *dev);
+#endif
+
+/* Return 0 on success, 1 on error, 2 when found already detected PnP card */
+static int el3_isa_id_sequence(__be16 *phys_addr)
+{
+ short lrs_state = 0xff;
+ int i;
+
+ /* ISA boards are detected by sending the ID sequence to the
+ ID_PORT. We find cards past the first by setting the 'current_tag'
+ on cards as they are found. Cards with their tag set will not
+ respond to subsequent ID sequences. */
+
+ outb(0x00, id_port);
+ outb(0x00, id_port);
+ for (i = 0; i < 255; i++) {
+ outb(lrs_state, id_port);
+ lrs_state <<= 1;
+ lrs_state = lrs_state & 0x100 ? lrs_state ^ 0xcf : lrs_state;
+ }
+ /* For the first probe, clear all board's tag registers. */
+ if (current_tag == 0)
+ outb(0xd0, id_port);
+ else /* Otherwise kill off already-found boards. */
+ outb(0xd8, id_port);
+ if (id_read_eeprom(7) != 0x6d50)
+ return 1;
+ /* Read in EEPROM data, which does contention-select.
+ Only the lowest address board will stay "on-line".
+ 3Com got the byte order backwards. */
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(id_read_eeprom(i));
+#ifdef CONFIG_PNP
+ if (!nopnp) {
+ /* The ISA PnP 3c509 cards respond to the ID sequence too.
+ This check is needed in order not to register them twice. */
+ for (i = 0; i < el3_cards; i++) {
+ struct el3_private *lp = netdev_priv(el3_devs[i]);
+ if (lp->type == EL3_PNP &&
+ !memcmp(phys_addr, el3_devs[i]->dev_addr,
+ ETH_ALEN)) {
+ if (el3_debug > 3)
+ pr_debug("3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n",
+ phys_addr[0] & 0xff, phys_addr[0] >> 8,
+ phys_addr[1] & 0xff, phys_addr[1] >> 8,
+ phys_addr[2] & 0xff, phys_addr[2] >> 8);
+ /* Set the adaptor tag so that the next card can be found. */
+ outb(0xd0 + ++current_tag, id_port);
+ return 2;
+ }
+ }
+ }
+#endif /* CONFIG_PNP */
+ return 0;
+
+}
+
+static void __devinit el3_dev_fill(struct net_device *dev, __be16 *phys_addr,
+ int ioaddr, int irq, int if_port,
+ enum el3_cardtype type)
+{
+ struct el3_private *lp = netdev_priv(dev);
+
+ memcpy(dev->dev_addr, phys_addr, ETH_ALEN);
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ dev->if_port = if_port;
+ lp->type = type;
+}
+
+static int __devinit el3_isa_match(struct device *pdev,
+ unsigned int ndev)
+{
+ struct net_device *dev;
+ int ioaddr, isa_irq, if_port, err;
+ unsigned int iobase;
+ __be16 phys_addr[3];
+
+ while ((err = el3_isa_id_sequence(phys_addr)) == 2)
+ ; /* Skip to next card when PnP card found */
+ if (err == 1)
+ return 0;
+
+ iobase = id_read_eeprom(8);
+ if_port = iobase >> 14;
+ ioaddr = 0x200 + ((iobase & 0x1f) << 4);
+ if (irq[el3_cards] > 1 && irq[el3_cards] < 16)
+ isa_irq = irq[el3_cards];
+ else
+ isa_irq = id_read_eeprom(9) >> 12;
+
+ dev = alloc_etherdev(sizeof(struct el3_private));
+ if (!dev)
+ return -ENOMEM;
+
+ netdev_boot_setup_check(dev);
+
+ if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) {
+ free_netdev(dev);
+ return 0;
+ }
+
+ /* Set the adaptor tag so that the next card can be found. */
+ outb(0xd0 + ++current_tag, id_port);
+
+ /* Activate the adaptor at the EEPROM location. */
+ outb((ioaddr >> 4) | 0xe0, id_port);
+
+ EL3WINDOW(0);
+ if (inw(ioaddr) != 0x6d50) {
+ free_netdev(dev);
+ return 0;
+ }
+
+ /* Free the interrupt so that some other card can use it. */
+ outw(0x0f00, ioaddr + WN0_IRQ);
+
+ el3_dev_fill(dev, phys_addr, ioaddr, isa_irq, if_port, EL3_ISA);
+ dev_set_drvdata(pdev, dev);
+ if (el3_common_init(dev)) {
+ free_netdev(dev);
+ return 0;
+ }
+
+ el3_devs[el3_cards++] = dev;
+ return 1;
+}
+
+static int __devexit el3_isa_remove(struct device *pdev,
+ unsigned int ndev)
+{
+ el3_device_remove(pdev);
+ dev_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int el3_isa_suspend(struct device *dev, unsigned int n,
+ pm_message_t state)
+{
+ current_tag = 0;
+ return el3_suspend(dev, state);
+}
+
+static int el3_isa_resume(struct device *dev, unsigned int n)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ int ioaddr = ndev->base_addr, err;
+ __be16 phys_addr[3];
+
+ while ((err = el3_isa_id_sequence(phys_addr)) == 2)
+ ; /* Skip to next card when PnP card found */
+ if (err == 1)
+ return 0;
+ /* Set the adaptor tag so that the next card can be found. */
+ outb(0xd0 + ++current_tag, id_port);
+ /* Enable the card */
+ outb((ioaddr >> 4) | 0xe0, id_port);
+ EL3WINDOW(0);
+ if (inw(ioaddr) != 0x6d50)
+ return 1;
+ /* Free the interrupt so that some other card can use it. */
+ outw(0x0f00, ioaddr + WN0_IRQ);
+ return el3_resume(dev);
+}
+#endif
+
+static struct isa_driver el3_isa_driver = {
+ .match = el3_isa_match,
+ .remove = __devexit_p(el3_isa_remove),
+#ifdef CONFIG_PM
+ .suspend = el3_isa_suspend,
+ .resume = el3_isa_resume,
+#endif
+ .driver = {
+ .name = "3c509"
+ },
+};
+static int isa_registered;
+
+#ifdef CONFIG_PNP
+static struct pnp_device_id el3_pnp_ids[] = {
+ { .id = "TCM5090" }, /* 3Com Etherlink III (TP) */
+ { .id = "TCM5091" }, /* 3Com Etherlink III */
+ { .id = "TCM5094" }, /* 3Com Etherlink III (combo) */
+ { .id = "TCM5095" }, /* 3Com Etherlink III (TPO) */
+ { .id = "TCM5098" }, /* 3Com Etherlink III (TPC) */
+ { .id = "PNP80f7" }, /* 3Com Etherlink III compatible */
+ { .id = "PNP80f8" }, /* 3Com Etherlink III compatible */
+ { .id = "" }
+};
+MODULE_DEVICE_TABLE(pnp, el3_pnp_ids);
+
+static int __devinit el3_pnp_probe(struct pnp_dev *pdev,
+ const struct pnp_device_id *id)
+{
+ short i;
+ int ioaddr, irq, if_port;
+ __be16 phys_addr[3];
+ struct net_device *dev = NULL;
+ int err;
+
+ ioaddr = pnp_port_start(pdev, 0);
+ if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-pnp"))
+ return -EBUSY;
+ irq = pnp_irq(pdev, 0);
+ EL3WINDOW(0);
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i));
+ if_port = read_eeprom(ioaddr, 8) >> 14;
+ dev = alloc_etherdev(sizeof(struct el3_private));
+ if (!dev) {
+ release_region(ioaddr, EL3_IO_EXTENT);
+ return -ENOMEM;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ netdev_boot_setup_check(dev);
+
+ el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_PNP);
+ pnp_set_drvdata(pdev, dev);
+ err = el3_common_init(dev);
+
+ if (err) {
+ pnp_set_drvdata(pdev, NULL);
+ free_netdev(dev);
+ return err;
+ }
+
+ el3_devs[el3_cards++] = dev;
+ return 0;
+}
+
+static void __devexit el3_pnp_remove(struct pnp_dev *pdev)
+{
+ el3_common_remove(pnp_get_drvdata(pdev));
+ pnp_set_drvdata(pdev, NULL);
+}
+
+#ifdef CONFIG_PM
+static int el3_pnp_suspend(struct pnp_dev *pdev, pm_message_t state)
+{
+ return el3_suspend(&pdev->dev, state);
+}
+
+static int el3_pnp_resume(struct pnp_dev *pdev)
+{
+ return el3_resume(&pdev->dev);
+}
+#endif
+
+static struct pnp_driver el3_pnp_driver = {
+ .name = "3c509",
+ .id_table = el3_pnp_ids,
+ .probe = el3_pnp_probe,
+ .remove = __devexit_p(el3_pnp_remove),
+#ifdef CONFIG_PM
+ .suspend = el3_pnp_suspend,
+ .resume = el3_pnp_resume,
+#endif
+};
+static int pnp_registered;
+#endif /* CONFIG_PNP */
+
+#ifdef CONFIG_EISA
+static struct eisa_device_id el3_eisa_ids[] = {
+ { "TCM5090" },
+ { "TCM5091" },
+ { "TCM5092" },
+ { "TCM5093" },
+ { "TCM5094" },
+ { "TCM5095" },
+ { "TCM5098" },
+ { "" }
+};
+MODULE_DEVICE_TABLE(eisa, el3_eisa_ids);
+
+static int el3_eisa_probe (struct device *device);
+
+static struct eisa_driver el3_eisa_driver = {
+ .id_table = el3_eisa_ids,
+ .driver = {
+ .name = "3c579",
+ .probe = el3_eisa_probe,
+ .remove = __devexit_p (el3_device_remove),
+ .suspend = el3_suspend,
+ .resume = el3_resume,
+ }
+};
+static int eisa_registered;
+#endif
+
+#ifdef CONFIG_MCA
+static int el3_mca_probe(struct device *dev);
+
+static short el3_mca_adapter_ids[] __initdata = {
+ 0x627c,
+ 0x627d,
+ 0x62db,
+ 0x62f6,
+ 0x62f7,
+ 0x0000
+};
+
+static char *el3_mca_adapter_names[] __initdata = {
+ "3Com 3c529 EtherLink III (10base2)",
+ "3Com 3c529 EtherLink III (10baseT)",
+ "3Com 3c529 EtherLink III (test mode)",
+ "3Com 3c529 EtherLink III (TP or coax)",
+ "3Com 3c529 EtherLink III (TP)",
+ NULL
+};
+
+static struct mca_driver el3_mca_driver = {
+ .id_table = el3_mca_adapter_ids,
+ .driver = {
+ .name = "3c529",
+ .bus = &mca_bus_type,
+ .probe = el3_mca_probe,
+ .remove = __devexit_p(el3_device_remove),
+ .suspend = el3_suspend,
+ .resume = el3_resume,
+ },
+};
+static int mca_registered;
+#endif /* CONFIG_MCA */
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = el3_open,
+ .ndo_stop = el3_close,
+ .ndo_start_xmit = el3_start_xmit,
+ .ndo_get_stats = el3_get_stats,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_tx_timeout = el3_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = el3_poll_controller,
+#endif
+};
+
+static int __devinit el3_common_init(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ int err;
+ const char *if_names[] = {"10baseT", "AUI", "undefined", "BNC"};
+
+ spin_lock_init(&lp->lock);
+
+ if (dev->mem_start & 0x05) { /* xcvr codes 1/3/4/12 */
+ dev->if_port = (dev->mem_start & 0x0f);
+ } else { /* xcvr codes 0/8 */
+ /* use eeprom value, but save user's full-duplex selection */
+ dev->if_port |= (dev->mem_start & 0x08);
+ }
+
+ /* The EL3-specific entries in the device structure. */
+ dev->netdev_ops = &netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ SET_ETHTOOL_OPS(dev, &ethtool_ops);
+
+ err = register_netdev(dev);
+ if (err) {
+ pr_err("Failed to register 3c5x9 at %#3.3lx, IRQ %d.\n",
+ dev->base_addr, dev->irq);
+ release_region(dev->base_addr, EL3_IO_EXTENT);
+ return err;
+ }
+
+ pr_info("%s: 3c5x9 found at %#3.3lx, %s port, address %pM, IRQ %d.\n",
+ dev->name, dev->base_addr, if_names[(dev->if_port & 0x03)],
+ dev->dev_addr, dev->irq);
+
+ if (el3_debug > 0)
+ pr_info("%s", version);
+ return 0;
+
+}
+
+static void el3_common_remove (struct net_device *dev)
+{
+ unregister_netdev (dev);
+ release_region(dev->base_addr, EL3_IO_EXTENT);
+ free_netdev (dev);
+}
+
+#ifdef CONFIG_MCA
+static int __init el3_mca_probe(struct device *device)
+{
+ /* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch,
+ * heavily modified by Chris Beauregard
+ * (cpbeaure@csclub.uwaterloo.ca) to support standard MCA
+ * probing.
+ *
+ * redone for multi-card detection by ZP Gu (zpg@castle.net)
+ * now works as a module */
+
+ short i;
+ int ioaddr, irq, if_port;
+ __be16 phys_addr[3];
+ struct net_device *dev = NULL;
+ u_char pos4, pos5;
+ struct mca_device *mdev = to_mca_device(device);
+ int slot = mdev->slot;
+ int err;
+
+ pos4 = mca_device_read_stored_pos(mdev, 4);
+ pos5 = mca_device_read_stored_pos(mdev, 5);
+
+ ioaddr = ((short)((pos4&0xfc)|0x02)) << 8;
+ irq = pos5 & 0x0f;
+
+
+ pr_info("3c529: found %s at slot %d\n",
+ el3_mca_adapter_names[mdev->index], slot + 1);
+
+ /* claim the slot */
+ strncpy(mdev->name, el3_mca_adapter_names[mdev->index],
+ sizeof(mdev->name));
+ mca_device_set_claim(mdev, 1);
+
+ if_port = pos4 & 0x03;
+
+ irq = mca_device_transform_irq(mdev, irq);
+ ioaddr = mca_device_transform_ioport(mdev, ioaddr);
+ if (el3_debug > 2) {
+ pr_debug("3c529: irq %d ioaddr 0x%x ifport %d\n", irq, ioaddr, if_port);
+ }
+ EL3WINDOW(0);
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i));
+
+ dev = alloc_etherdev(sizeof (struct el3_private));
+ if (dev == NULL) {
+ release_region(ioaddr, EL3_IO_EXTENT);
+ return -ENOMEM;
+ }
+
+ netdev_boot_setup_check(dev);
+
+ el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_MCA);
+ dev_set_drvdata(device, dev);
+ err = el3_common_init(dev);
+
+ if (err) {
+ dev_set_drvdata(device, NULL);
+ free_netdev(dev);
+ return -ENOMEM;
+ }
+
+ el3_devs[el3_cards++] = dev;
+ return 0;
+}
+
+#endif /* CONFIG_MCA */
+
+#ifdef CONFIG_EISA
+static int __init el3_eisa_probe (struct device *device)
+{
+ short i;
+ int ioaddr, irq, if_port;
+ __be16 phys_addr[3];
+ struct net_device *dev = NULL;
+ struct eisa_device *edev;
+ int err;
+
+ /* Yeepee, The driver framework is calling us ! */
+ edev = to_eisa_device (device);
+ ioaddr = edev->base_addr;
+
+ if (!request_region(ioaddr, EL3_IO_EXTENT, "3c579-eisa"))
+ return -EBUSY;
+
+ /* Change the register set to the configuration window 0. */
+ outw(SelectWindow | 0, ioaddr + 0xC80 + EL3_CMD);
+
+ irq = inw(ioaddr + WN0_IRQ) >> 12;
+ if_port = inw(ioaddr + 6)>>14;
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i));
+
+ /* Restore the "Product ID" to the EEPROM read register. */
+ read_eeprom(ioaddr, 3);
+
+ dev = alloc_etherdev(sizeof (struct el3_private));
+ if (dev == NULL) {
+ release_region(ioaddr, EL3_IO_EXTENT);
+ return -ENOMEM;
+ }
+
+ netdev_boot_setup_check(dev);
+
+ el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA);
+ eisa_set_drvdata (edev, dev);
+ err = el3_common_init(dev);
+
+ if (err) {
+ eisa_set_drvdata (edev, NULL);
+ free_netdev(dev);
+ return err;
+ }
+
+ el3_devs[el3_cards++] = dev;
+ return 0;
+}
+#endif
+
+/* This remove works for all device types.
+ *
+ * The net dev must be stored in the driver data field */
+static int __devexit el3_device_remove (struct device *device)
+{
+ struct net_device *dev;
+
+ dev = dev_get_drvdata(device);
+
+ el3_common_remove (dev);
+ return 0;
+}
+
+/* Read a word from the EEPROM using the regular EEPROM access register.
+ Assume that we are in register window zero.
+ */
+static ushort read_eeprom(int ioaddr, int index)
+{
+ outw(EEPROM_READ + index, ioaddr + 10);
+ /* Pause for at least 162 us. for the read to take place.
+ Some chips seem to require much longer */
+ mdelay(2);
+ return inw(ioaddr + 12);
+}
+
+/* Read a word from the EEPROM when in the ISA ID probe state. */
+static ushort id_read_eeprom(int index)
+{
+ int bit, word = 0;
+
+ /* Issue read command, and pause for at least 162 us. for it to complete.
+ Assume extra-fast 16Mhz bus. */
+ outb(EEPROM_READ + index, id_port);
+
+ /* Pause for at least 162 us. for the read to take place. */
+ /* Some chips seem to require much longer */
+ mdelay(4);
+
+ for (bit = 15; bit >= 0; bit--)
+ word = (word << 1) + (inb(id_port) & 0x01);
+
+ if (el3_debug > 3)
+ pr_debug(" 3c509 EEPROM word %d %#4.4x.\n", index, word);
+
+ return word;
+}
+
+
+static int
+el3_open(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ int i;
+
+ outw(TxReset, ioaddr + EL3_CMD);
+ outw(RxReset, ioaddr + EL3_CMD);
+ outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+
+ i = request_irq(dev->irq, el3_interrupt, 0, dev->name, dev);
+ if (i)
+ return i;
+
+ EL3WINDOW(0);
+ if (el3_debug > 3)
+ pr_debug("%s: Opening, IRQ %d status@%x %4.4x.\n", dev->name,
+ dev->irq, ioaddr + EL3_STATUS, inw(ioaddr + EL3_STATUS));
+
+ el3_up(dev);
+
+ if (el3_debug > 3)
+ pr_debug("%s: Opened 3c509 IRQ %d status %4.4x.\n",
+ dev->name, dev->irq, inw(ioaddr + EL3_STATUS));
+
+ return 0;
+}
+
+static void
+el3_tx_timeout (struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ /* Transmitter timeout, serious problems. */
+ pr_warning("%s: transmit timed out, Tx_status %2.2x status %4.4x Tx FIFO room %d.\n",
+ dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
+ inw(ioaddr + TX_FREE));
+ dev->stats.tx_errors++;
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ /* Issue TX_RESET and TX_START commands. */
+ outw(TxReset, ioaddr + EL3_CMD);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+}
+
+
+static netdev_tx_t
+el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ netif_stop_queue (dev);
+
+ dev->stats.tx_bytes += skb->len;
+
+ if (el3_debug > 4) {
+ pr_debug("%s: el3_start_xmit(length = %u) called, status %4.4x.\n",
+ dev->name, skb->len, inw(ioaddr + EL3_STATUS));
+ }
+#if 0
+#ifndef final_version
+ { /* Error-checking code, delete someday. */
+ ushort status = inw(ioaddr + EL3_STATUS);
+ if (status & 0x0001 && /* IRQ line active, missed one. */
+ inw(ioaddr + EL3_STATUS) & 1) { /* Make sure. */
+ pr_debug("%s: Missed interrupt, status then %04x now %04x"
+ " Tx %2.2x Rx %4.4x.\n", dev->name, status,
+ inw(ioaddr + EL3_STATUS), inb(ioaddr + TX_STATUS),
+ inw(ioaddr + RX_STATUS));
+ /* Fake interrupt trigger by masking, acknowledge interrupts. */
+ outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+ }
+ }
+#endif
+#endif
+ /*
+ * We lock the driver against other processors. Note
+ * we don't need to lock versus the IRQ as we suspended
+ * that. This means that we lose the ability to take
+ * an RX during a TX upload. That sucks a bit with SMP
+ * on an original 3c509 (2K buffer)
+ *
+ * Using disable_irq stops us crapping on other
+ * time sensitive devices.
+ */
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /* Put out the doubleword header... */
+ outw(skb->len, ioaddr + TX_FIFO);
+ outw(0x00, ioaddr + TX_FIFO);
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+
+ if (inw(ioaddr + TX_FREE) > 1536)
+ netif_start_queue(dev);
+ else
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ dev_kfree_skb (skb);
+
+ /* Clear the Tx status stack. */
+ {
+ short tx_status;
+ int i = 4;
+
+ while (--i > 0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) {
+ if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
+ if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD);
+ if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD);
+ outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
+ }
+ }
+ return NETDEV_TX_OK;
+}
+
+/* The EL3 interrupt handler. */
+static irqreturn_t
+el3_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct el3_private *lp;
+ int ioaddr, status;
+ int i = max_interrupt_work;
+
+ lp = netdev_priv(dev);
+ spin_lock(&lp->lock);
+
+ ioaddr = dev->base_addr;
+
+ if (el3_debug > 4) {
+ status = inw(ioaddr + EL3_STATUS);
+ pr_debug("%s: interrupt, status %4.4x.\n", dev->name, status);
+ }
+
+ while ((status = inw(ioaddr + EL3_STATUS)) &
+ (IntLatch | RxComplete | StatsFull)) {
+
+ if (status & RxComplete)
+ el3_rx(dev);
+
+ if (status & TxAvailable) {
+ if (el3_debug > 5)
+ pr_debug(" TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue (dev);
+ }
+ if (status & (AdapterFailure | RxEarly | StatsFull | TxComplete)) {
+ /* Handle all uncommon interrupts. */
+ if (status & StatsFull) /* Empty statistics. */
+ update_stats(dev);
+ if (status & RxEarly) { /* Rx early is unused. */
+ el3_rx(dev);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & TxComplete) { /* Really Tx error. */
+ short tx_status;
+ int i = 4;
+
+ while (--i>0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) {
+ if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
+ if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD);
+ if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD);
+ outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
+ }
+ }
+ if (status & AdapterFailure) {
+ /* Adapter failure requires Rx reset and reinit. */
+ outw(RxReset, ioaddr + EL3_CMD);
+ /* Set the Rx filter to the current state. */
+ outw(SetRxFilter | RxStation | RxBroadcast
+ | (dev->flags & IFF_ALLMULTI ? RxMulticast : 0)
+ | (dev->flags & IFF_PROMISC ? RxProm : 0),
+ ioaddr + EL3_CMD);
+ outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
+ outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
+ }
+ }
+
+ if (--i < 0) {
+ pr_err("%s: Infinite loop in interrupt, status %4.4x.\n",
+ dev->name, status);
+ /* Clear all interrupts. */
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); /* Ack IRQ */
+ }
+
+ if (el3_debug > 4) {
+ pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name,
+ inw(ioaddr + EL3_STATUS));
+ }
+ spin_unlock(&lp->lock);
+ return IRQ_HANDLED;
+}
+
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void el3_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ el3_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static struct net_device_stats *
+el3_get_stats(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ /*
+ * This is fast enough not to bother with disable IRQ
+ * stuff.
+ */
+
+ spin_lock_irqsave(&lp->lock, flags);
+ update_stats(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return &dev->stats;
+}
+
+/* Update statistics. We change to register window 6, so this should be run
+ single-threaded if the device is active. This is expected to be a rare
+ operation, and it's simpler for the rest of the driver to assume that
+ window 1 is always valid rather than use a special window-state variable.
+ */
+static void update_stats(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (el3_debug > 5)
+ pr_debug(" Updating the statistics.\n");
+ /* Turn off statistics updates while reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ dev->stats.tx_carrier_errors += inb(ioaddr + 0);
+ dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */ inb(ioaddr + 2);
+ dev->stats.collisions += inb(ioaddr + 3);
+ dev->stats.tx_window_errors += inb(ioaddr + 4);
+ dev->stats.rx_fifo_errors += inb(ioaddr + 5);
+ dev->stats.tx_packets += inb(ioaddr + 6);
+ /* Rx packets */ inb(ioaddr + 7);
+ /* Tx deferrals */ inb(ioaddr + 8);
+ inw(ioaddr + 10); /* Total Rx and Tx octets. */
+ inw(ioaddr + 12);
+
+ /* Back to window 1, and turn statistics back on. */
+ EL3WINDOW(1);
+ outw(StatsEnable, ioaddr + EL3_CMD);
+}
+
+static int
+el3_rx(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ short rx_status;
+
+ if (el3_debug > 5)
+ pr_debug(" In rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
+ while ((rx_status = inw(ioaddr + RX_STATUS)) > 0) {
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ short error = rx_status & 0x3800;
+
+ outw(RxDiscard, ioaddr + EL3_CMD);
+ dev->stats.rx_errors++;
+ switch (error) {
+ case 0x0000: dev->stats.rx_over_errors++; break;
+ case 0x0800: dev->stats.rx_length_errors++; break;
+ case 0x1000: dev->stats.rx_frame_errors++; break;
+ case 0x1800: dev->stats.rx_length_errors++; break;
+ case 0x2000: dev->stats.rx_frame_errors++; break;
+ case 0x2800: dev->stats.rx_crc_errors++; break;
+ }
+ } else {
+ short pkt_len = rx_status & 0x7ff;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+5);
+ if (el3_debug > 4)
+ pr_debug("Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb_reserve(skb, 2); /* Align IP on 16 byte */
+
+ /* 'skb->data' points to the start of sk_buff data area. */
+ insl(ioaddr + RX_FIFO, skb_put(skb,pkt_len),
+ (pkt_len + 3) >> 2);
+
+ outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
+ skb->protocol = eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->stats.rx_bytes += pkt_len;
+ dev->stats.rx_packets++;
+ continue;
+ }
+ outw(RxDiscard, ioaddr + EL3_CMD);
+ dev->stats.rx_dropped++;
+ if (el3_debug)
+ pr_debug("%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, pkt_len);
+ }
+ inw(ioaddr + EL3_STATUS); /* Delay. */
+ while (inw(ioaddr + EL3_STATUS) & 0x1000)
+ pr_debug(" Waiting for 3c509 to discard packet, status %x.\n",
+ inw(ioaddr + EL3_STATUS) );
+ }
+
+ return 0;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+static void
+set_multicast_list(struct net_device *dev)
+{
+ unsigned long flags;
+ struct el3_private *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int mc_count = netdev_mc_count(dev);
+
+ if (el3_debug > 1) {
+ static int old;
+ if (old != mc_count) {
+ old = mc_count;
+ pr_debug("%s: Setting Rx mode to %d addresses.\n",
+ dev->name, mc_count);
+ }
+ }
+ spin_lock_irqsave(&lp->lock, flags);
+ if (dev->flags&IFF_PROMISC) {
+ outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
+ ioaddr + EL3_CMD);
+ }
+ else if (mc_count || (dev->flags&IFF_ALLMULTI)) {
+ outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast, ioaddr + EL3_CMD);
+ }
+ else
+ outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+static int
+el3_close(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct el3_private *lp = netdev_priv(dev);
+
+ if (el3_debug > 2)
+ pr_debug("%s: Shutting down ethercard.\n", dev->name);
+
+ el3_down(dev);
+
+ free_irq(dev->irq, dev);
+ /* Switching back to window 0 disables the IRQ. */
+ EL3WINDOW(0);
+ if (lp->type != EL3_EISA) {
+ /* But we explicitly zero the IRQ line select anyway. Don't do
+ * it on EISA cards, it prevents the module from getting an
+ * IRQ after unload+reload... */
+ outw(0x0f00, ioaddr + WN0_IRQ);
+ }
+
+ return 0;
+}
+
+static int
+el3_link_ok(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ u16 tmp;
+
+ EL3WINDOW(4);
+ tmp = inw(ioaddr + WN4_MEDIA);
+ EL3WINDOW(1);
+ return tmp & (1<<11);
+}
+
+static int
+el3_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ u16 tmp;
+ int ioaddr = dev->base_addr;
+
+ EL3WINDOW(0);
+ /* obtain current transceiver via WN4_MEDIA? */
+ tmp = inw(ioaddr + WN0_ADDR_CONF);
+ ecmd->transceiver = XCVR_INTERNAL;
+ switch (tmp >> 14) {
+ case 0:
+ ecmd->port = PORT_TP;
+ break;
+ case 1:
+ ecmd->port = PORT_AUI;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ case 3:
+ ecmd->port = PORT_BNC;
+ default:
+ break;
+ }
+
+ ecmd->duplex = DUPLEX_HALF;
+ ecmd->supported = 0;
+ tmp = inw(ioaddr + WN0_CONF_CTRL);
+ if (tmp & (1<<13))
+ ecmd->supported |= SUPPORTED_AUI;
+ if (tmp & (1<<12))
+ ecmd->supported |= SUPPORTED_BNC;
+ if (tmp & (1<<9)) {
+ ecmd->supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full; /* hmm... */
+ EL3WINDOW(4);
+ tmp = inw(ioaddr + WN4_NETDIAG);
+ if (tmp & FD_ENABLE)
+ ecmd->duplex = DUPLEX_FULL;
+ }
+
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
+ EL3WINDOW(1);
+ return 0;
+}
+
+static int
+el3_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ u16 tmp;
+ int ioaddr = dev->base_addr;
+
+ if (ecmd->speed != SPEED_10)
+ return -EINVAL;
+ if ((ecmd->duplex != DUPLEX_HALF) && (ecmd->duplex != DUPLEX_FULL))
+ return -EINVAL;
+ if ((ecmd->transceiver != XCVR_INTERNAL) && (ecmd->transceiver != XCVR_EXTERNAL))
+ return -EINVAL;
+
+ /* change XCVR type */
+ EL3WINDOW(0);
+ tmp = inw(ioaddr + WN0_ADDR_CONF);
+ switch (ecmd->port) {
+ case PORT_TP:
+ tmp &= ~(3<<14);
+ dev->if_port = 0;
+ break;
+ case PORT_AUI:
+ tmp |= (1<<14);
+ dev->if_port = 1;
+ break;
+ case PORT_BNC:
+ tmp |= (3<<14);
+ dev->if_port = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ outw(tmp, ioaddr + WN0_ADDR_CONF);
+ if (dev->if_port == 3) {
+ /* fire up the DC-DC convertor if BNC gets enabled */
+ tmp = inw(ioaddr + WN0_ADDR_CONF);
+ if (tmp & (3 << 14)) {
+ outw(StartCoax, ioaddr + EL3_CMD);
+ udelay(800);
+ } else
+ return -EIO;
+ }
+
+ EL3WINDOW(4);
+ tmp = inw(ioaddr + WN4_NETDIAG);
+ if (ecmd->duplex == DUPLEX_FULL)
+ tmp |= FD_ENABLE;
+ else
+ tmp &= ~FD_ENABLE;
+ outw(tmp, ioaddr + WN4_NETDIAG);
+ EL3WINDOW(1);
+
+ return 0;
+}
+
+static void el3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+}
+
+static int el3_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ int ret;
+
+ spin_lock_irq(&lp->lock);
+ ret = el3_netdev_get_ecmd(dev, ecmd);
+ spin_unlock_irq(&lp->lock);
+ return ret;
+}
+
+static int el3_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ int ret;
+
+ spin_lock_irq(&lp->lock);
+ ret = el3_netdev_set_ecmd(dev, ecmd);
+ spin_unlock_irq(&lp->lock);
+ return ret;
+}
+
+static u32 el3_get_link(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ u32 ret;
+
+ spin_lock_irq(&lp->lock);
+ ret = el3_link_ok(dev);
+ spin_unlock_irq(&lp->lock);
+ return ret;
+}
+
+static u32 el3_get_msglevel(struct net_device *dev)
+{
+ return el3_debug;
+}
+
+static void el3_set_msglevel(struct net_device *dev, u32 v)
+{
+ el3_debug = v;
+}
+
+static const struct ethtool_ops ethtool_ops = {
+ .get_drvinfo = el3_get_drvinfo,
+ .get_settings = el3_get_settings,
+ .set_settings = el3_set_settings,
+ .get_link = el3_get_link,
+ .get_msglevel = el3_get_msglevel,
+ .set_msglevel = el3_set_msglevel,
+};
+
+static void
+el3_down(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ netif_stop_queue(dev);
+
+ /* Turn off statistics ASAP. We update lp->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ if (dev->if_port == 3)
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+ else if (dev->if_port == 0) {
+ /* Disable link beat and jabber, if_port may change here next open(). */
+ EL3WINDOW(4);
+ outw(inw(ioaddr + WN4_MEDIA) & ~MEDIA_TP, ioaddr + WN4_MEDIA);
+ }
+
+ outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
+
+ update_stats(dev);
+}
+
+static void
+el3_up(struct net_device *dev)
+{
+ int i, sw_info, net_diag;
+ int ioaddr = dev->base_addr;
+
+ /* Activating the board required and does no harm otherwise */
+ outw(0x0001, ioaddr + 4);
+
+ /* Set the IRQ line. */
+ outw((dev->irq << 12) | 0x0f00, ioaddr + WN0_IRQ);
+
+ /* Set the station address in window 2 each time opened. */
+ EL3WINDOW(2);
+
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+
+ if ((dev->if_port & 0x03) == 3) /* BNC interface */
+ /* Start the thinnet transceiver. We should really wait 50ms...*/
+ outw(StartCoax, ioaddr + EL3_CMD);
+ else if ((dev->if_port & 0x03) == 0) { /* 10baseT interface */
+ /* Combine secondary sw_info word (the adapter level) and primary
+ sw_info word (duplex setting plus other useless bits) */
+ EL3WINDOW(0);
+ sw_info = (read_eeprom(ioaddr, 0x14) & 0x400f) |
+ (read_eeprom(ioaddr, 0x0d) & 0xBff0);
+
+ EL3WINDOW(4);
+ net_diag = inw(ioaddr + WN4_NETDIAG);
+ net_diag = (net_diag | FD_ENABLE); /* temporarily assume full-duplex will be set */
+ pr_info("%s: ", dev->name);
+ switch (dev->if_port & 0x0c) {
+ case 12:
+ /* force full-duplex mode if 3c5x9b */
+ if (sw_info & 0x000f) {
+ pr_cont("Forcing 3c5x9b full-duplex mode");
+ break;
+ }
+ case 8:
+ /* set full-duplex mode based on eeprom config setting */
+ if ((sw_info & 0x000f) && (sw_info & 0x8000)) {
+ pr_cont("Setting 3c5x9b full-duplex mode (from EEPROM configuration bit)");
+ break;
+ }
+ default:
+ /* xcvr=(0 || 4) OR user has an old 3c5x9 non "B" model */
+ pr_cont("Setting 3c5x9/3c5x9B half-duplex mode");
+ net_diag = (net_diag & ~FD_ENABLE); /* disable full duplex */
+ }
+
+ outw(net_diag, ioaddr + WN4_NETDIAG);
+ pr_cont(" if_port: %d, sw_info: %4.4x\n", dev->if_port, sw_info);
+ if (el3_debug > 3)
+ pr_debug("%s: 3c5x9 net diag word is now: %4.4x.\n", dev->name, net_diag);
+ /* Enable link beat and jabber check. */
+ outw(inw(ioaddr + WN4_MEDIA) | MEDIA_TP, ioaddr + WN4_MEDIA);
+ }
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 9; i++)
+ inb(ioaddr + i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+
+ /* Switch to register set 1 for normal use. */
+ EL3WINDOW(1);
+
+ /* Accept b-case and phys addr only. */
+ outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(SetIntrEnb | IntLatch|TxAvailable|TxComplete|RxComplete|StatsFull,
+ ioaddr + EL3_CMD);
+
+ netif_start_queue(dev);
+}
+
+/* Power Management support functions */
+#ifdef CONFIG_PM
+
+static int
+el3_suspend(struct device *pdev, pm_message_t state)
+{
+ unsigned long flags;
+ struct net_device *dev;
+ struct el3_private *lp;
+ int ioaddr;
+
+ dev = dev_get_drvdata(pdev);
+ lp = netdev_priv(dev);
+ ioaddr = dev->base_addr;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ if (netif_running(dev))
+ netif_device_detach(dev);
+
+ el3_down(dev);
+ outw(PowerDown, ioaddr + EL3_CMD);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return 0;
+}
+
+static int
+el3_resume(struct device *pdev)
+{
+ unsigned long flags;
+ struct net_device *dev;
+ struct el3_private *lp;
+ int ioaddr;
+
+ dev = dev_get_drvdata(pdev);
+ lp = netdev_priv(dev);
+ ioaddr = dev->base_addr;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ outw(PowerUp, ioaddr + EL3_CMD);
+ EL3WINDOW(0);
+ el3_up(dev);
+
+ if (netif_running(dev))
+ netif_device_attach(dev);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+module_param(debug,int, 0);
+module_param_array(irq, int, NULL, 0);
+module_param(max_interrupt_work, int, 0);
+MODULE_PARM_DESC(debug, "debug level (0-6)");
+MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
+MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
+#ifdef CONFIG_PNP
+module_param(nopnp, int, 0);
+MODULE_PARM_DESC(nopnp, "disable ISA PnP support (0-1)");
+#endif /* CONFIG_PNP */
+MODULE_DESCRIPTION("3Com Etherlink III (3c509, 3c509B, 3c529, 3c579) ethernet driver");
+MODULE_LICENSE("GPL");
+
+static int __init el3_init_module(void)
+{
+ int ret = 0;
+
+ if (debug >= 0)
+ el3_debug = debug;
+
+#ifdef CONFIG_PNP
+ if (!nopnp) {
+ ret = pnp_register_driver(&el3_pnp_driver);
+ if (!ret)
+ pnp_registered = 1;
+ }
+#endif
+ /* Select an open I/O location at 0x1*0 to do ISA contention select. */
+ /* Start with 0x110 to avoid some sound cards.*/
+ for (id_port = 0x110 ; id_port < 0x200; id_port += 0x10) {
+ if (!request_region(id_port, 1, "3c509-control"))
+ continue;
+ outb(0x00, id_port);
+ outb(0xff, id_port);
+ if (inb(id_port) & 0x01)
+ break;
+ else
+ release_region(id_port, 1);
+ }
+ if (id_port >= 0x200) {
+ id_port = 0;
+ pr_err("No I/O port available for 3c509 activation.\n");
+ } else {
+ ret = isa_register_driver(&el3_isa_driver, EL3_MAX_CARDS);
+ if (!ret)
+ isa_registered = 1;
+ }
+#ifdef CONFIG_EISA
+ ret = eisa_driver_register(&el3_eisa_driver);
+ if (!ret)
+ eisa_registered = 1;
+#endif
+#ifdef CONFIG_MCA
+ ret = mca_register_driver(&el3_mca_driver);
+ if (!ret)
+ mca_registered = 1;
+#endif
+
+#ifdef CONFIG_PNP
+ if (pnp_registered)
+ ret = 0;
+#endif
+ if (isa_registered)
+ ret = 0;
+#ifdef CONFIG_EISA
+ if (eisa_registered)
+ ret = 0;
+#endif
+#ifdef CONFIG_MCA
+ if (mca_registered)
+ ret = 0;
+#endif
+ return ret;
+}
+
+static void __exit el3_cleanup_module(void)
+{
+#ifdef CONFIG_PNP
+ if (pnp_registered)
+ pnp_unregister_driver(&el3_pnp_driver);
+#endif
+ if (isa_registered)
+ isa_unregister_driver(&el3_isa_driver);
+ if (id_port)
+ release_region(id_port, 1);
+#ifdef CONFIG_EISA
+ if (eisa_registered)
+ eisa_driver_unregister(&el3_eisa_driver);
+#endif
+#ifdef CONFIG_MCA
+ if (mca_registered)
+ mca_unregister_driver(&el3_mca_driver);
+#endif
+}
+
+module_init (el3_init_module);
+module_exit (el3_cleanup_module);
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
new file mode 100644
index 000000000000..f67a5d3a200c
--- /dev/null
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -0,0 +1,1584 @@
+/*
+ Written 1997-1998 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ This driver is for the 3Com ISA EtherLink XL "Corkscrew" 3c515 ethercard.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+
+ 2000/2/2- Added support for kernel-level ISAPnP
+ by Stephen Frost <sfrost@snowman.net> and Alessandro Zummo
+ Cleaned up for 2.3.x/softnet by Jeff Garzik and Alan Cox.
+
+ 2001/11/17 - Added ethtool support (jgarzik)
+
+ 2002/10/28 - Locking updates for 2.5 (alan@lxorguk.ukuu.org.uk)
+
+*/
+
+#define DRV_NAME "3c515"
+#define DRV_VERSION "0.99t-ac"
+#define DRV_RELDATE "28-Oct-2002"
+
+static char *version =
+DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " becker@scyld.com and others\n";
+
+#define CORKSCREW 1
+
+/* "Knobs" that adjust features and parameters. */
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1512 effectively disables this feature. */
+static int rx_copybreak = 200;
+
+/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */
+static const int mtu = 1500;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Enable the automatic media selection code -- usually set. */
+#define AUTOMEDIA 1
+
+/* Allow the use of fragment bus master transfers instead of only
+ programmed-I/O for Vortex cards. Full-bus-master transfers are always
+ enabled by default on Boomerang cards. If VORTEX_BUS_MASTER is defined,
+ the feature may be turned on using 'options'. */
+#define VORTEX_BUS_MASTER
+
+/* A few values that may be tweaked. */
+/* Keep the ring sizes a power of two for efficiency. */
+#define TX_RING_SIZE 16
+#define RX_RING_SIZE 16
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
+
+#include <linux/module.h>
+#include <linux/isapnp.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#define NEW_MULTICAST
+#include <linux/delay.h>
+
+#define MAX_UNITS 8
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("3Com 3c515 Corkscrew driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/* "Knobs" for adjusting internal parameters. */
+/* Put out somewhat more debugging messages. (0 - no msg, 1 minimal msgs). */
+#define DRIVER_DEBUG 1
+/* Some values here only for performance evaluation and path-coverage
+ debugging. */
+static int rx_nocopy, rx_copy, queued_packet;
+
+/* Number of times to check to see if the Tx FIFO has space, used in some
+ limited cases. */
+#define WAIT_TX_AVAIL 200
+
+/* Operational parameter that usually are not changed. */
+#define TX_TIMEOUT ((4*HZ)/10) /* Time in jiffies before concluding Tx hung */
+
+/* The size here is somewhat misleading: the Corkscrew also uses the ISA
+ aliased registers at <base>+0x400.
+ */
+#define CORKSCREW_TOTAL_SIZE 0x20
+
+#ifdef DRIVER_DEBUG
+static int corkscrew_debug = DRIVER_DEBUG;
+#else
+static int corkscrew_debug = 1;
+#endif
+
+#define CORKSCREW_ID 10
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the 3Com 3c515 ISA Fast EtherLink XL,
+3Com's ISA bus adapter for Fast Ethernet. Due to the unique I/O port layout,
+it's not practical to integrate this driver with the other EtherLink drivers.
+
+II. Board-specific settings
+
+The Corkscrew has an EEPROM for configuration, but no special settings are
+needed for Linux.
+
+III. Driver operation
+
+The 3c515 series use an interface that's very similar to the 3c900 "Boomerang"
+PCI cards, with the bus master interface extensively modified to work with
+the ISA bus.
+
+The card is capable of full-bus-master transfers with separate
+lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet,
+DEC Tulip and Intel Speedo3.
+
+This driver uses a "RX_COPYBREAK" scheme rather than a fixed intermediate
+receive buffer. This scheme allocates full-sized skbuffs as receive
+buffers. The value RX_COPYBREAK is used as the copying breakpoint: it is
+chosen to trade-off the memory wasted by passing the full-sized skbuff to
+the queue layer for all frames vs. the copying cost of copying a frame to a
+correctly-sized skbuff.
+
+
+IIIC. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the netif
+layer. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+IV. Notes
+
+Thanks to Terry Murphy of 3Com for providing documentation and a development
+board.
+
+The names "Vortex", "Boomerang" and "Corkscrew" are the internal 3Com
+project names. I use these names to eliminate confusion -- 3Com product
+numbers and names are very similar and often confused.
+
+The new chips support both ethernet (1.5K) and FDDI (4.5K) frame sizes!
+This driver only supports ethernet frames because of the recent MTU limit
+of 1.5K, but the changes to support 4.5K are minimal.
+*/
+
+/* Operational definitions.
+ These are not used by other compilation units and thus are not
+ exported in a ".h" file.
+
+ First the windows. There are eight register windows, with the command
+ and status registers available in each.
+ */
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable.
+ Note that 11 parameters bits was fine for ethernet, but the new chips
+ can handle FDDI length frames (~4500 octets) and now parameters count
+ 32-bit 'Dwords' rather than octets. */
+
+enum corkscrew_cmd {
+ TotalReset = 0 << 11, SelectWindow = 1 << 11, StartCoax = 2 << 11,
+ RxDisable = 3 << 11, RxEnable = 4 << 11, RxReset = 5 << 11,
+ UpStall = 6 << 11, UpUnstall = (6 << 11) + 1, DownStall = (6 << 11) + 2,
+ DownUnstall = (6 << 11) + 3, RxDiscard = 8 << 11, TxEnable = 9 << 11,
+ TxDisable = 10 << 11, TxReset = 11 << 11, FakeIntr = 12 << 11,
+ AckIntr = 13 << 11, SetIntrEnb = 14 << 11, SetStatusEnb = 15 << 11,
+ SetRxFilter = 16 << 11, SetRxThreshold = 17 << 11,
+ SetTxThreshold = 18 << 11, SetTxStart = 19 << 11, StartDMAUp = 20 << 11,
+ StartDMADown = (20 << 11) + 1, StatsEnable = 21 << 11,
+ StatsDisable = 22 << 11, StopCoax = 23 << 11,
+};
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8
+};
+
+/* Bits in the general status register. */
+enum corkscrew_status {
+ IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
+ TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+ IntReq = 0x0040, StatsFull = 0x0080,
+ DMADone = 1 << 8, DownComplete = 1 << 9, UpComplete = 1 << 10,
+ DMAInProgress = 1 << 11, /* DMA controller is still busy. */
+ CmdInProgress = 1 << 12, /* EL3_CMD is still busy. */
+};
+
+/* Register window 1 offsets, the window used in normal operation.
+ On the Corkscrew this window is always mapped at offsets 0x10-0x1f. */
+enum Window1 {
+ TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14,
+ RxStatus = 0x18, Timer = 0x1A, TxStatus = 0x1B,
+ TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */
+};
+enum Window0 {
+ Wn0IRQ = 0x08,
+#if defined(CORKSCREW)
+ Wn0EepromCmd = 0x200A, /* Corkscrew EEPROM command register. */
+ Wn0EepromData = 0x200C, /* Corkscrew EEPROM results register. */
+#else
+ Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */
+ Wn0EepromData = 12, /* Window 0: EEPROM results register. */
+#endif
+};
+enum Win0_EEPROM_bits {
+ EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0,
+ EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */
+ EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */
+};
+
+/* EEPROM locations. */
+enum eeprom_offset {
+ PhysAddr01 = 0, PhysAddr23 = 1, PhysAddr45 = 2, ModelID = 3,
+ EtherLink3ID = 7,
+};
+
+enum Window3 { /* Window 3: MAC/config bits. */
+ Wn3_Config = 0, Wn3_MAC_Ctrl = 6, Wn3_Options = 8,
+};
+enum wn3_config {
+ Ram_size = 7,
+ Ram_width = 8,
+ Ram_speed = 0x30,
+ Rom_size = 0xc0,
+ Ram_split_shift = 16,
+ Ram_split = 3 << Ram_split_shift,
+ Xcvr_shift = 20,
+ Xcvr = 7 << Xcvr_shift,
+ Autoselect = 0x1000000,
+};
+
+enum Window4 {
+ Wn4_NetDiag = 6, Wn4_Media = 10, /* Window 4: Xcvr/media bits. */
+};
+enum Win4_Media_bits {
+ Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */
+ Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */
+ Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */
+ Media_LnkBeat = 0x0800,
+};
+enum Window7 { /* Window 7: Bus Master control. */
+ Wn7_MasterAddr = 0, Wn7_MasterLen = 6, Wn7_MasterStatus = 12,
+};
+
+/* Boomerang-style bus master control registers. Note ISA aliases! */
+enum MasterCtrl {
+ PktStatus = 0x400, DownListPtr = 0x404, FragAddr = 0x408, FragLen =
+ 0x40c,
+ TxFreeThreshold = 0x40f, UpPktStatus = 0x410, UpListPtr = 0x418,
+};
+
+/* The Rx and Tx descriptor lists.
+ Caution Alpha hackers: these types are 32 bits! Note also the 8 byte
+ alignment contraint on tx_ring[] and rx_ring[]. */
+struct boom_rx_desc {
+ u32 next;
+ s32 status;
+ u32 addr;
+ s32 length;
+};
+
+/* Values for the Rx status entry. */
+enum rx_desc_status {
+ RxDComplete = 0x00008000, RxDError = 0x4000,
+ /* See boomerang_rx() for actual error bits */
+};
+
+struct boom_tx_desc {
+ u32 next;
+ s32 status;
+ u32 addr;
+ s32 length;
+};
+
+struct corkscrew_private {
+ const char *product_name;
+ struct list_head list;
+ struct net_device *our_dev;
+ /* The Rx and Tx rings are here to keep them quad-word-aligned. */
+ struct boom_rx_desc rx_ring[RX_RING_SIZE];
+ struct boom_tx_desc tx_ring[TX_RING_SIZE];
+ /* The addresses of transmit- and receive-in-place skbuffs. */
+ struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+ unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int dirty_rx, dirty_tx;/* The ring entries to be free()ed. */
+ struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
+ struct timer_list timer; /* Media selection timer. */
+ int capabilities ; /* Adapter capabilities word. */
+ int options; /* User-settable misc. driver options. */
+ int last_rx_packets; /* For media autoselection. */
+ unsigned int available_media:8, /* From Wn3_Options */
+ media_override:3, /* Passed-in media type. */
+ default_media:3, /* Read from the EEPROM. */
+ full_duplex:1, autoselect:1, bus_master:1, /* Vortex can only do a fragment bus-m. */
+ full_bus_master_tx:1, full_bus_master_rx:1, /* Boomerang */
+ tx_full:1;
+ spinlock_t lock;
+ struct device *dev;
+};
+
+/* The action to take with a media selection timer tick.
+ Note that we deviate from the 3Com order by checking 10base2 before AUI.
+ */
+enum xcvr_types {
+ XCVR_10baseT = 0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx,
+ XCVR_100baseFx, XCVR_MII = 6, XCVR_Default = 8,
+};
+
+static struct media_table {
+ char *name;
+ unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */
+ mask:8, /* The transceiver-present bit in Wn3_Config. */
+ next:8; /* The media type to try next. */
+ short wait; /* Time before we check media status. */
+} media_tbl[] = {
+ { "10baseT", Media_10TP, 0x08, XCVR_10base2, (14 * HZ) / 10 },
+ { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1 * HZ) / 10},
+ { "undefined", 0, 0x80, XCVR_10baseT, 10000},
+ { "10base2", 0, 0x10, XCVR_AUI, (1 * HZ) / 10},
+ { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14 * HZ) / 10},
+ { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14 * HZ) / 10},
+ { "MII", 0, 0x40, XCVR_10baseT, 3 * HZ},
+ { "undefined", 0, 0x01, XCVR_10baseT, 10000},
+ { "Default", 0, 0xFF, XCVR_10baseT, 10000},
+};
+
+#ifdef __ISAPNP__
+static struct isapnp_device_id corkscrew_isapnp_adapters[] = {
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5051),
+ (long) "3Com Fast EtherLink ISA" },
+ { } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(isapnp, corkscrew_isapnp_adapters);
+
+static int nopnp;
+#endif /* __ISAPNP__ */
+
+static struct net_device *corkscrew_scan(int unit);
+static int corkscrew_setup(struct net_device *dev, int ioaddr,
+ struct pnp_dev *idev, int card_number);
+static int corkscrew_open(struct net_device *dev);
+static void corkscrew_timer(unsigned long arg);
+static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static int corkscrew_rx(struct net_device *dev);
+static void corkscrew_timeout(struct net_device *dev);
+static int boomerang_rx(struct net_device *dev);
+static irqreturn_t corkscrew_interrupt(int irq, void *dev_id);
+static int corkscrew_close(struct net_device *dev);
+static void update_stats(int addr, struct net_device *dev);
+static struct net_device_stats *corkscrew_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static const struct ethtool_ops netdev_ethtool_ops;
+
+
+/*
+ Unfortunately maximizing the shared code between the integrated and
+ module version of the driver results in a complicated set of initialization
+ procedures.
+ init_module() -- modules / tc59x_init() -- built-in
+ The wrappers for corkscrew_scan()
+ corkscrew_scan() The common routine that scans for PCI and EISA cards
+ corkscrew_found_device() Allocate a device structure when we find a card.
+ Different versions exist for modules and built-in.
+ corkscrew_probe1() Fill in the device structure -- this is separated
+ so that the modules code can put it in dev->init.
+*/
+/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
+/* Note: this is the only limit on the number of cards supported!! */
+static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1, };
+
+#ifdef MODULE
+static int debug = -1;
+
+module_param(debug, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param(rx_copybreak, int, 0);
+module_param(max_interrupt_work, int, 0);
+MODULE_PARM_DESC(debug, "3c515 debug level (0-6)");
+MODULE_PARM_DESC(options, "3c515: Bits 0-2: media type, bit 3: full duplex, bit 4: bus mastering");
+MODULE_PARM_DESC(rx_copybreak, "3c515 copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(max_interrupt_work, "3c515 maximum events handled per interrupt");
+
+/* A list of all installed Vortex devices, for removing the driver module. */
+/* we will need locking (and refcounting) if we ever use it for more */
+static LIST_HEAD(root_corkscrew_dev);
+
+int init_module(void)
+{
+ int found = 0;
+ if (debug >= 0)
+ corkscrew_debug = debug;
+ if (corkscrew_debug)
+ pr_debug("%s", version);
+ while (corkscrew_scan(-1))
+ found++;
+ return found ? 0 : -ENODEV;
+}
+
+#else
+struct net_device *tc515_probe(int unit)
+{
+ struct net_device *dev = corkscrew_scan(unit);
+ static int printed;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ if (corkscrew_debug > 0 && !printed) {
+ printed = 1;
+ pr_debug("%s", version);
+ }
+
+ return dev;
+}
+#endif /* not MODULE */
+
+static int check_device(unsigned ioaddr)
+{
+ int timer;
+
+ if (!request_region(ioaddr, CORKSCREW_TOTAL_SIZE, "3c515"))
+ return 0;
+ /* Check the resource configuration for a matching ioaddr. */
+ if ((inw(ioaddr + 0x2002) & 0x1f0) != (ioaddr & 0x1f0)) {
+ release_region(ioaddr, CORKSCREW_TOTAL_SIZE);
+ return 0;
+ }
+ /* Verify by reading the device ID from the EEPROM. */
+ outw(EEPROM_Read + 7, ioaddr + Wn0EepromCmd);
+ /* Pause for at least 162 us. for the read to take place. */
+ for (timer = 4; timer >= 0; timer--) {
+ udelay(162);
+ if ((inw(ioaddr + Wn0EepromCmd) & 0x0200) == 0)
+ break;
+ }
+ if (inw(ioaddr + Wn0EepromData) != 0x6d50) {
+ release_region(ioaddr, CORKSCREW_TOTAL_SIZE);
+ return 0;
+ }
+ return 1;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ struct corkscrew_private *vp = netdev_priv(dev);
+ list_del_init(&vp->list);
+ if (dev->dma)
+ free_dma(dev->dma);
+ outw(TotalReset, dev->base_addr + EL3_CMD);
+ release_region(dev->base_addr, CORKSCREW_TOTAL_SIZE);
+ if (vp->dev)
+ pnp_device_detach(to_pnp_dev(vp->dev));
+}
+
+static struct net_device *corkscrew_scan(int unit)
+{
+ struct net_device *dev;
+ static int cards_found = 0;
+ static int ioaddr;
+ int err;
+#ifdef __ISAPNP__
+ short i;
+ static int pnp_cards;
+#endif
+
+ dev = alloc_etherdev(sizeof(struct corkscrew_private));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+#ifdef __ISAPNP__
+ if(nopnp == 1)
+ goto no_pnp;
+ for(i=0; corkscrew_isapnp_adapters[i].vendor != 0; i++) {
+ struct pnp_dev *idev = NULL;
+ int irq;
+ while((idev = pnp_find_dev(NULL,
+ corkscrew_isapnp_adapters[i].vendor,
+ corkscrew_isapnp_adapters[i].function,
+ idev))) {
+
+ if (pnp_device_attach(idev) < 0)
+ continue;
+ if (pnp_activate_dev(idev) < 0) {
+ pr_warning("pnp activate failed (out of resources?)\n");
+ pnp_device_detach(idev);
+ continue;
+ }
+ if (!pnp_port_valid(idev, 0) || !pnp_irq_valid(idev, 0)) {
+ pnp_device_detach(idev);
+ continue;
+ }
+ ioaddr = pnp_port_start(idev, 0);
+ irq = pnp_irq(idev, 0);
+ if (!check_device(ioaddr)) {
+ pnp_device_detach(idev);
+ continue;
+ }
+ if(corkscrew_debug)
+ pr_debug("ISAPNP reports %s at i/o 0x%x, irq %d\n",
+ (char*) corkscrew_isapnp_adapters[i].driver_data, ioaddr, irq);
+ pr_info("3c515 Resource configuration register %#4.4x, DCR %4.4x.\n",
+ inl(ioaddr + 0x2002), inw(ioaddr + 0x2000));
+ /* irq = inw(ioaddr + 0x2002) & 15; */ /* Use the irq from isapnp */
+ SET_NETDEV_DEV(dev, &idev->dev);
+ pnp_cards++;
+ err = corkscrew_setup(dev, ioaddr, idev, cards_found++);
+ if (!err)
+ return dev;
+ cleanup_card(dev);
+ }
+ }
+no_pnp:
+#endif /* __ISAPNP__ */
+
+ /* Check all locations on the ISA bus -- evil! */
+ for (ioaddr = 0x100; ioaddr < 0x400; ioaddr += 0x20) {
+ if (!check_device(ioaddr))
+ continue;
+
+ pr_info("3c515 Resource configuration register %#4.4x, DCR %4.4x.\n",
+ inl(ioaddr + 0x2002), inw(ioaddr + 0x2000));
+ err = corkscrew_setup(dev, ioaddr, NULL, cards_found++);
+ if (!err)
+ return dev;
+ cleanup_card(dev);
+ }
+ free_netdev(dev);
+ return NULL;
+}
+
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = corkscrew_open,
+ .ndo_stop = corkscrew_close,
+ .ndo_start_xmit = corkscrew_start_xmit,
+ .ndo_tx_timeout = corkscrew_timeout,
+ .ndo_get_stats = corkscrew_get_stats,
+ .ndo_set_rx_mode = set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+
+static int corkscrew_setup(struct net_device *dev, int ioaddr,
+ struct pnp_dev *idev, int card_number)
+{
+ struct corkscrew_private *vp = netdev_priv(dev);
+ unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
+ int i;
+ int irq;
+
+#ifdef __ISAPNP__
+ if (idev) {
+ irq = pnp_irq(idev, 0);
+ vp->dev = &idev->dev;
+ } else {
+ irq = inw(ioaddr + 0x2002) & 15;
+ }
+#else
+ irq = inw(ioaddr + 0x2002) & 15;
+#endif
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ dev->dma = inw(ioaddr + 0x2000) & 7;
+ vp->product_name = "3c515";
+ vp->options = dev->mem_start;
+ vp->our_dev = dev;
+
+ if (!vp->options) {
+ if (card_number >= MAX_UNITS)
+ vp->options = -1;
+ else
+ vp->options = options[card_number];
+ }
+
+ if (vp->options >= 0) {
+ vp->media_override = vp->options & 7;
+ if (vp->media_override == 2)
+ vp->media_override = 0;
+ vp->full_duplex = (vp->options & 8) ? 1 : 0;
+ vp->bus_master = (vp->options & 16) ? 1 : 0;
+ } else {
+ vp->media_override = 7;
+ vp->full_duplex = 0;
+ vp->bus_master = 0;
+ }
+#ifdef MODULE
+ list_add(&vp->list, &root_corkscrew_dev);
+#endif
+
+ pr_info("%s: 3Com %s at %#3x,", dev->name, vp->product_name, ioaddr);
+
+ spin_lock_init(&vp->lock);
+
+ /* Read the station address from the EEPROM. */
+ EL3WINDOW(0);
+ for (i = 0; i < 0x18; i++) {
+ __be16 *phys_addr = (__be16 *) dev->dev_addr;
+ int timer;
+ outw(EEPROM_Read + i, ioaddr + Wn0EepromCmd);
+ /* Pause for at least 162 us. for the read to take place. */
+ for (timer = 4; timer >= 0; timer--) {
+ udelay(162);
+ if ((inw(ioaddr + Wn0EepromCmd) & 0x0200) == 0)
+ break;
+ }
+ eeprom[i] = inw(ioaddr + Wn0EepromData);
+ checksum ^= eeprom[i];
+ if (i < 3)
+ phys_addr[i] = htons(eeprom[i]);
+ }
+ checksum = (checksum ^ (checksum >> 8)) & 0xff;
+ if (checksum != 0x00)
+ pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum);
+ pr_cont(" %pM", dev->dev_addr);
+ if (eeprom[16] == 0x11c7) { /* Corkscrew */
+ if (request_dma(dev->dma, "3c515")) {
+ pr_cont(", DMA %d allocation failed", dev->dma);
+ dev->dma = 0;
+ } else
+ pr_cont(", DMA %d", dev->dma);
+ }
+ pr_cont(", IRQ %d\n", dev->irq);
+ /* Tell them about an invalid IRQ. */
+ if (corkscrew_debug && (dev->irq <= 0 || dev->irq > 15))
+ pr_warning(" *** Warning: this IRQ is unlikely to work! ***\n");
+
+ {
+ static const char * const ram_split[] = {
+ "5:3", "3:1", "1:1", "3:5"
+ };
+ __u32 config;
+ EL3WINDOW(3);
+ vp->available_media = inw(ioaddr + Wn3_Options);
+ config = inl(ioaddr + Wn3_Config);
+ if (corkscrew_debug > 1)
+ pr_info(" Internal config register is %4.4x, transceivers %#x.\n",
+ config, inw(ioaddr + Wn3_Options));
+ pr_info(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
+ 8 << config & Ram_size,
+ config & Ram_width ? "word" : "byte",
+ ram_split[(config & Ram_split) >> Ram_split_shift],
+ config & Autoselect ? "autoselect/" : "",
+ media_tbl[(config & Xcvr) >> Xcvr_shift].name);
+ vp->default_media = (config & Xcvr) >> Xcvr_shift;
+ vp->autoselect = config & Autoselect ? 1 : 0;
+ dev->if_port = vp->default_media;
+ }
+ if (vp->media_override != 7) {
+ pr_info(" Media override to transceiver type %d (%s).\n",
+ vp->media_override,
+ media_tbl[vp->media_override].name);
+ dev->if_port = vp->media_override;
+ }
+
+ vp->capabilities = eeprom[16];
+ vp->full_bus_master_tx = (vp->capabilities & 0x20) ? 1 : 0;
+ /* Rx is broken at 10mbps, so we always disable it. */
+ /* vp->full_bus_master_rx = 0; */
+ vp->full_bus_master_rx = (vp->capabilities & 0x20) ? 1 : 0;
+
+ /* The 3c51x-specific entries in the device structure. */
+ dev->netdev_ops = &netdev_ops;
+ dev->watchdog_timeo = (400 * HZ) / 1000;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+
+ return register_netdev(dev);
+}
+
+
+static int corkscrew_open(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct corkscrew_private *vp = netdev_priv(dev);
+ __u32 config;
+ int i;
+
+ /* Before initializing select the active media port. */
+ EL3WINDOW(3);
+ if (vp->full_duplex)
+ outb(0x20, ioaddr + Wn3_MAC_Ctrl); /* Set the full-duplex bit. */
+ config = inl(ioaddr + Wn3_Config);
+
+ if (vp->media_override != 7) {
+ if (corkscrew_debug > 1)
+ pr_info("%s: Media override to transceiver %d (%s).\n",
+ dev->name, vp->media_override,
+ media_tbl[vp->media_override].name);
+ dev->if_port = vp->media_override;
+ } else if (vp->autoselect) {
+ /* Find first available media type, starting with 100baseTx. */
+ dev->if_port = 4;
+ while (!(vp->available_media & media_tbl[dev->if_port].mask))
+ dev->if_port = media_tbl[dev->if_port].next;
+
+ if (corkscrew_debug > 1)
+ pr_debug("%s: Initial media type %s.\n",
+ dev->name, media_tbl[dev->if_port].name);
+
+ init_timer(&vp->timer);
+ vp->timer.expires = jiffies + media_tbl[dev->if_port].wait;
+ vp->timer.data = (unsigned long) dev;
+ vp->timer.function = corkscrew_timer; /* timer handler */
+ add_timer(&vp->timer);
+ } else
+ dev->if_port = vp->default_media;
+
+ config = (config & ~Xcvr) | (dev->if_port << Xcvr_shift);
+ outl(config, ioaddr + Wn3_Config);
+
+ if (corkscrew_debug > 1) {
+ pr_debug("%s: corkscrew_open() InternalConfig %8.8x.\n",
+ dev->name, config);
+ }
+
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (i = 20; i >= 0; i--)
+ if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+
+ outw(RxReset, ioaddr + EL3_CMD);
+ /* Wait a few ticks for the RxReset command to complete. */
+ for (i = 20; i >= 0; i--)
+ if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+
+ outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+
+ /* Use the now-standard shared IRQ implementation. */
+ if (vp->capabilities == 0x11c7) {
+ /* Corkscrew: Cannot share ISA resources. */
+ if (dev->irq == 0 ||
+ dev->dma == 0 ||
+ request_irq(dev->irq, corkscrew_interrupt, 0,
+ vp->product_name, dev))
+ return -EAGAIN;
+ enable_dma(dev->dma);
+ set_dma_mode(dev->dma, DMA_MODE_CASCADE);
+ } else if (request_irq(dev->irq, corkscrew_interrupt, IRQF_SHARED,
+ vp->product_name, dev)) {
+ return -EAGAIN;
+ }
+
+ if (corkscrew_debug > 1) {
+ EL3WINDOW(4);
+ pr_debug("%s: corkscrew_open() irq %d media status %4.4x.\n",
+ dev->name, dev->irq, inw(ioaddr + Wn4_Media));
+ }
+
+ /* Set the station address and mask in window 2 each time opened. */
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+ for (; i < 12; i += 2)
+ outw(0, ioaddr + i);
+
+ if (dev->if_port == 3)
+ /* Start the thinnet transceiver. We should really wait 50ms... */
+ outw(StartCoax, ioaddr + EL3_CMD);
+ EL3WINDOW(4);
+ outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP | Media_SQE)) |
+ media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 10; i++)
+ inb(ioaddr + i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+ /* New: On the Vortex we must also clear the BadSSD counter. */
+ EL3WINDOW(4);
+ inb(ioaddr + 12);
+ /* ..and on the Boomerang we enable the extra statistics bits. */
+ outw(0x0040, ioaddr + Wn4_NetDiag);
+
+ /* Switch to register set 7 for normal use. */
+ EL3WINDOW(7);
+
+ if (vp->full_bus_master_rx) { /* Boomerang bus master. */
+ vp->cur_rx = vp->dirty_rx = 0;
+ if (corkscrew_debug > 2)
+ pr_debug("%s: Filling in the Rx ring.\n", dev->name);
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ if (i < (RX_RING_SIZE - 1))
+ vp->rx_ring[i].next =
+ isa_virt_to_bus(&vp->rx_ring[i + 1]);
+ else
+ vp->rx_ring[i].next = 0;
+ vp->rx_ring[i].status = 0; /* Clear complete bit. */
+ vp->rx_ring[i].length = PKT_BUF_SZ | 0x80000000;
+ skb = dev_alloc_skb(PKT_BUF_SZ);
+ vp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break; /* Bad news! */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ vp->rx_ring[i].addr = isa_virt_to_bus(skb->data);
+ }
+ if (i != 0)
+ vp->rx_ring[i - 1].next =
+ isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */
+ outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr);
+ }
+ if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
+ vp->cur_tx = vp->dirty_tx = 0;
+ outb(PKT_BUF_SZ >> 8, ioaddr + TxFreeThreshold); /* Room for a packet. */
+ /* Clear the Tx ring. */
+ for (i = 0; i < TX_RING_SIZE; i++)
+ vp->tx_skbuff[i] = NULL;
+ outl(0, ioaddr + DownListPtr);
+ }
+ /* Set receiver mode: presumably accept b-case and phys addr only. */
+ set_rx_mode(dev);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+
+ netif_start_queue(dev);
+
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetStatusEnb | AdapterFailure | IntReq | StatsFull |
+ (vp->full_bus_master_tx ? DownComplete : TxAvailable) |
+ (vp->full_bus_master_rx ? UpComplete : RxComplete) |
+ (vp->bus_master ? DMADone : 0), ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
+ | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete,
+ ioaddr + EL3_CMD);
+
+ return 0;
+}
+
+static void corkscrew_timer(unsigned long data)
+{
+#ifdef AUTOMEDIA
+ struct net_device *dev = (struct net_device *) data;
+ struct corkscrew_private *vp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+ int ok = 0;
+
+ if (corkscrew_debug > 1)
+ pr_debug("%s: Media selection timer tick happened, %s.\n",
+ dev->name, media_tbl[dev->if_port].name);
+
+ spin_lock_irqsave(&vp->lock, flags);
+
+ {
+ int old_window = inw(ioaddr + EL3_CMD) >> 13;
+ int media_status;
+ EL3WINDOW(4);
+ media_status = inw(ioaddr + Wn4_Media);
+ switch (dev->if_port) {
+ case 0:
+ case 4:
+ case 5: /* 10baseT, 100baseTX, 100baseFX */
+ if (media_status & Media_LnkBeat) {
+ ok = 1;
+ if (corkscrew_debug > 1)
+ pr_debug("%s: Media %s has link beat, %x.\n",
+ dev->name,
+ media_tbl[dev->if_port].name,
+ media_status);
+ } else if (corkscrew_debug > 1)
+ pr_debug("%s: Media %s is has no link beat, %x.\n",
+ dev->name,
+ media_tbl[dev->if_port].name,
+ media_status);
+
+ break;
+ default: /* Other media types handled by Tx timeouts. */
+ if (corkscrew_debug > 1)
+ pr_debug("%s: Media %s is has no indication, %x.\n",
+ dev->name,
+ media_tbl[dev->if_port].name,
+ media_status);
+ ok = 1;
+ }
+ if (!ok) {
+ __u32 config;
+
+ do {
+ dev->if_port =
+ media_tbl[dev->if_port].next;
+ }
+ while (!(vp->available_media & media_tbl[dev->if_port].mask));
+
+ if (dev->if_port == 8) { /* Go back to default. */
+ dev->if_port = vp->default_media;
+ if (corkscrew_debug > 1)
+ pr_debug("%s: Media selection failing, using default %s port.\n",
+ dev->name,
+ media_tbl[dev->if_port].name);
+ } else {
+ if (corkscrew_debug > 1)
+ pr_debug("%s: Media selection failed, now trying %s port.\n",
+ dev->name,
+ media_tbl[dev->if_port].name);
+ vp->timer.expires = jiffies + media_tbl[dev->if_port].wait;
+ add_timer(&vp->timer);
+ }
+ outw((media_status & ~(Media_10TP | Media_SQE)) |
+ media_tbl[dev->if_port].media_bits,
+ ioaddr + Wn4_Media);
+
+ EL3WINDOW(3);
+ config = inl(ioaddr + Wn3_Config);
+ config = (config & ~Xcvr) | (dev->if_port << Xcvr_shift);
+ outl(config, ioaddr + Wn3_Config);
+
+ outw(dev->if_port == 3 ? StartCoax : StopCoax,
+ ioaddr + EL3_CMD);
+ }
+ EL3WINDOW(old_window);
+ }
+
+ spin_unlock_irqrestore(&vp->lock, flags);
+ if (corkscrew_debug > 1)
+ pr_debug("%s: Media selection timer finished, %s.\n",
+ dev->name, media_tbl[dev->if_port].name);
+
+#endif /* AUTOMEDIA */
+}
+
+static void corkscrew_timeout(struct net_device *dev)
+{
+ int i;
+ struct corkscrew_private *vp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ pr_warning("%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
+ dev->name, inb(ioaddr + TxStatus),
+ inw(ioaddr + EL3_STATUS));
+ /* Slight code bloat to be user friendly. */
+ if ((inb(ioaddr + TxStatus) & 0x88) == 0x88)
+ pr_warning("%s: Transmitter encountered 16 collisions --"
+ " network cable problem?\n", dev->name);
+#ifndef final_version
+ pr_debug(" Flags; bus-master %d, full %d; dirty %d current %d.\n",
+ vp->full_bus_master_tx, vp->tx_full, vp->dirty_tx,
+ vp->cur_tx);
+ pr_debug(" Down list %8.8x vs. %p.\n", inl(ioaddr + DownListPtr),
+ &vp->tx_ring[0]);
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ pr_debug(" %d: %p length %8.8x status %8.8x\n", i,
+ &vp->tx_ring[i],
+ vp->tx_ring[i].length, vp->tx_ring[i].status);
+ }
+#endif
+ /* Issue TX_RESET and TX_START commands. */
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (i = 20; i >= 0; i--)
+ if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ dev->stats.tx_errors++;
+ dev->stats.tx_dropped++;
+ netif_wake_queue(dev);
+}
+
+static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct corkscrew_private *vp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ /* Block a timer-based transmit from overlapping. */
+
+ netif_stop_queue(dev);
+
+ if (vp->full_bus_master_tx) { /* BOOMERANG bus-master */
+ /* Calculate the next Tx descriptor entry. */
+ int entry = vp->cur_tx % TX_RING_SIZE;
+ struct boom_tx_desc *prev_entry;
+ unsigned long flags;
+ int i;
+
+ if (vp->tx_full) /* No room to transmit with */
+ return NETDEV_TX_BUSY;
+ if (vp->cur_tx != 0)
+ prev_entry = &vp->tx_ring[(vp->cur_tx - 1) % TX_RING_SIZE];
+ else
+ prev_entry = NULL;
+ if (corkscrew_debug > 3)
+ pr_debug("%s: Trying to send a packet, Tx index %d.\n",
+ dev->name, vp->cur_tx);
+ /* vp->tx_full = 1; */
+ vp->tx_skbuff[entry] = skb;
+ vp->tx_ring[entry].next = 0;
+ vp->tx_ring[entry].addr = isa_virt_to_bus(skb->data);
+ vp->tx_ring[entry].length = skb->len | 0x80000000;
+ vp->tx_ring[entry].status = skb->len | 0x80000000;
+
+ spin_lock_irqsave(&vp->lock, flags);
+ outw(DownStall, ioaddr + EL3_CMD);
+ /* Wait for the stall to complete. */
+ for (i = 20; i >= 0; i--)
+ if ((inw(ioaddr + EL3_STATUS) & CmdInProgress) == 0)
+ break;
+ if (prev_entry)
+ prev_entry->next = isa_virt_to_bus(&vp->tx_ring[entry]);
+ if (inl(ioaddr + DownListPtr) == 0) {
+ outl(isa_virt_to_bus(&vp->tx_ring[entry]),
+ ioaddr + DownListPtr);
+ queued_packet++;
+ }
+ outw(DownUnstall, ioaddr + EL3_CMD);
+ spin_unlock_irqrestore(&vp->lock, flags);
+
+ vp->cur_tx++;
+ if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1)
+ vp->tx_full = 1;
+ else { /* Clear previous interrupt enable. */
+ if (prev_entry)
+ prev_entry->status &= ~0x80000000;
+ netif_wake_queue(dev);
+ }
+ return NETDEV_TX_OK;
+ }
+ /* Put out the doubleword header... */
+ outl(skb->len, ioaddr + TX_FIFO);
+ dev->stats.tx_bytes += skb->len;
+#ifdef VORTEX_BUS_MASTER
+ if (vp->bus_master) {
+ /* Set the bus-master controller to transfer the packet. */
+ outl((int) (skb->data), ioaddr + Wn7_MasterAddr);
+ outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
+ vp->tx_skb = skb;
+ outw(StartDMADown, ioaddr + EL3_CMD);
+ /* queue will be woken at the DMADone interrupt. */
+ } else {
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+ dev_kfree_skb(skb);
+ if (inw(ioaddr + TxFree) > 1536) {
+ netif_wake_queue(dev);
+ } else
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + (1536 >> 2),
+ ioaddr + EL3_CMD);
+ }
+#else
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+ dev_kfree_skb(skb);
+ if (inw(ioaddr + TxFree) > 1536) {
+ netif_wake_queue(dev);
+ } else
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + (1536 >> 2), ioaddr + EL3_CMD);
+#endif /* bus master */
+
+
+ /* Clear the Tx status stack. */
+ {
+ short tx_status;
+ int i = 4;
+
+ while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) {
+ if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
+ if (corkscrew_debug > 2)
+ pr_debug("%s: Tx error, status %2.2x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x04)
+ dev->stats.tx_fifo_errors++;
+ if (tx_status & 0x38)
+ dev->stats.tx_aborted_errors++;
+ if (tx_status & 0x30) {
+ int j;
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (j = 20; j >= 0; j--)
+ if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ }
+ outw(TxEnable, ioaddr + EL3_CMD);
+ }
+ outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
+ }
+ }
+ return NETDEV_TX_OK;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+
+static irqreturn_t corkscrew_interrupt(int irq, void *dev_id)
+{
+ /* Use the now-standard shared IRQ implementation. */
+ struct net_device *dev = dev_id;
+ struct corkscrew_private *lp = netdev_priv(dev);
+ int ioaddr, status;
+ int latency;
+ int i = max_interrupt_work;
+
+ ioaddr = dev->base_addr;
+ latency = inb(ioaddr + Timer);
+
+ spin_lock(&lp->lock);
+
+ status = inw(ioaddr + EL3_STATUS);
+
+ if (corkscrew_debug > 4)
+ pr_debug("%s: interrupt, status %4.4x, timer %d.\n",
+ dev->name, status, latency);
+ if ((status & 0xE000) != 0xE000) {
+ static int donedidthis;
+ /* Some interrupt controllers store a bogus interrupt from boot-time.
+ Ignore a single early interrupt, but don't hang the machine for
+ other interrupt problems. */
+ if (donedidthis++ > 100) {
+ pr_err("%s: Bogus interrupt, bailing. Status %4.4x, start=%d.\n",
+ dev->name, status, netif_running(dev));
+ free_irq(dev->irq, dev);
+ dev->irq = -1;
+ }
+ }
+
+ do {
+ if (corkscrew_debug > 5)
+ pr_debug("%s: In interrupt loop, status %4.4x.\n",
+ dev->name, status);
+ if (status & RxComplete)
+ corkscrew_rx(dev);
+
+ if (status & TxAvailable) {
+ if (corkscrew_debug > 5)
+ pr_debug(" TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+ }
+ if (status & DownComplete) {
+ unsigned int dirty_tx = lp->dirty_tx;
+
+ while (lp->cur_tx - dirty_tx > 0) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ if (inl(ioaddr + DownListPtr) == isa_virt_to_bus(&lp->tx_ring[entry]))
+ break; /* It still hasn't been processed. */
+ if (lp->tx_skbuff[entry]) {
+ dev_kfree_skb_irq(lp->tx_skbuff[entry]);
+ lp->tx_skbuff[entry] = NULL;
+ }
+ dirty_tx++;
+ }
+ lp->dirty_tx = dirty_tx;
+ outw(AckIntr | DownComplete, ioaddr + EL3_CMD);
+ if (lp->tx_full && (lp->cur_tx - dirty_tx <= TX_RING_SIZE - 1)) {
+ lp->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+ }
+#ifdef VORTEX_BUS_MASTER
+ if (status & DMADone) {
+ outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
+ dev_kfree_skb_irq(lp->tx_skb); /* Release the transferred buffer */
+ netif_wake_queue(dev);
+ }
+#endif
+ if (status & UpComplete) {
+ boomerang_rx(dev);
+ outw(AckIntr | UpComplete, ioaddr + EL3_CMD);
+ }
+ if (status & (AdapterFailure | RxEarly | StatsFull)) {
+ /* Handle all uncommon interrupts at once. */
+ if (status & RxEarly) { /* Rx early is unused. */
+ corkscrew_rx(dev);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & StatsFull) { /* Empty statistics. */
+ static int DoneDidThat;
+ if (corkscrew_debug > 4)
+ pr_debug("%s: Updating stats.\n", dev->name);
+ update_stats(ioaddr, dev);
+ /* DEBUG HACK: Disable statistics as an interrupt source. */
+ /* This occurs when we have the wrong media type! */
+ if (DoneDidThat == 0 && inw(ioaddr + EL3_STATUS) & StatsFull) {
+ int win, reg;
+ pr_notice("%s: Updating stats failed, disabling stats as an interrupt source.\n",
+ dev->name);
+ for (win = 0; win < 8; win++) {
+ EL3WINDOW(win);
+ pr_notice("Vortex window %d:", win);
+ for (reg = 0; reg < 16; reg++)
+ pr_cont(" %2.2x", inb(ioaddr + reg));
+ pr_cont("\n");
+ }
+ EL3WINDOW(7);
+ outw(SetIntrEnb | TxAvailable |
+ RxComplete | AdapterFailure |
+ UpComplete | DownComplete |
+ TxComplete, ioaddr + EL3_CMD);
+ DoneDidThat++;
+ }
+ }
+ if (status & AdapterFailure) {
+ /* Adapter failure requires Rx reset and reinit. */
+ outw(RxReset, ioaddr + EL3_CMD);
+ /* Set the Rx filter to the current state. */
+ set_rx_mode(dev);
+ outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
+ outw(AckIntr | AdapterFailure,
+ ioaddr + EL3_CMD);
+ }
+ }
+
+ if (--i < 0) {
+ pr_err("%s: Too much work in interrupt, status %4.4x. Disabling functions (%4.4x).\n",
+ dev->name, status, SetStatusEnb | ((~status) & 0x7FE));
+ /* Disable all pending interrupts. */
+ outw(SetStatusEnb | ((~status) & 0x7FE), ioaddr + EL3_CMD);
+ outw(AckIntr | 0x7FF, ioaddr + EL3_CMD);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+
+ } while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
+
+ spin_unlock(&lp->lock);
+
+ if (corkscrew_debug > 4)
+ pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name, status);
+ return IRQ_HANDLED;
+}
+
+static int corkscrew_rx(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ int i;
+ short rx_status;
+
+ if (corkscrew_debug > 5)
+ pr_debug(" In rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr + EL3_STATUS), inw(ioaddr + RxStatus));
+ while ((rx_status = inw(ioaddr + RxStatus)) > 0) {
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ unsigned char rx_error = inb(ioaddr + RxErrors);
+ if (corkscrew_debug > 2)
+ pr_debug(" Rx error: status %2.2x.\n",
+ rx_error);
+ dev->stats.rx_errors++;
+ if (rx_error & 0x01)
+ dev->stats.rx_over_errors++;
+ if (rx_error & 0x02)
+ dev->stats.rx_length_errors++;
+ if (rx_error & 0x04)
+ dev->stats.rx_frame_errors++;
+ if (rx_error & 0x08)
+ dev->stats.rx_crc_errors++;
+ if (rx_error & 0x10)
+ dev->stats.rx_length_errors++;
+ } else {
+ /* The packet length: up to 4.5K!. */
+ short pkt_len = rx_status & 0x1fff;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len + 5 + 2);
+ if (corkscrew_debug > 4)
+ pr_debug("Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ insl(ioaddr + RX_FIFO,
+ skb_put(skb, pkt_len),
+ (pkt_len + 3) >> 2);
+ outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ /* Wait a limited time to go to next packet. */
+ for (i = 200; i >= 0; i--)
+ if (! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ continue;
+ } else if (corkscrew_debug)
+ pr_debug("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, pkt_len);
+ }
+ outw(RxDiscard, ioaddr + EL3_CMD);
+ dev->stats.rx_dropped++;
+ /* Wait a limited time to skip this packet. */
+ for (i = 200; i >= 0; i--)
+ if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ }
+ return 0;
+}
+
+static int boomerang_rx(struct net_device *dev)
+{
+ struct corkscrew_private *vp = netdev_priv(dev);
+ int entry = vp->cur_rx % RX_RING_SIZE;
+ int ioaddr = dev->base_addr;
+ int rx_status;
+
+ if (corkscrew_debug > 5)
+ pr_debug(" In boomerang_rx(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr + EL3_STATUS), inw(ioaddr + RxStatus));
+ while ((rx_status = vp->rx_ring[entry].status) & RxDComplete) {
+ if (rx_status & RxDError) { /* Error, update stats. */
+ unsigned char rx_error = rx_status >> 16;
+ if (corkscrew_debug > 2)
+ pr_debug(" Rx error: status %2.2x.\n",
+ rx_error);
+ dev->stats.rx_errors++;
+ if (rx_error & 0x01)
+ dev->stats.rx_over_errors++;
+ if (rx_error & 0x02)
+ dev->stats.rx_length_errors++;
+ if (rx_error & 0x04)
+ dev->stats.rx_frame_errors++;
+ if (rx_error & 0x08)
+ dev->stats.rx_crc_errors++;
+ if (rx_error & 0x10)
+ dev->stats.rx_length_errors++;
+ } else {
+ /* The packet length: up to 4.5K!. */
+ short pkt_len = rx_status & 0x1fff;
+ struct sk_buff *skb;
+
+ dev->stats.rx_bytes += pkt_len;
+ if (corkscrew_debug > 4)
+ pr_debug("Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+
+ /* Check if the packet is long enough to just accept without
+ copying to a properly sized skbuff. */
+ if (pkt_len < rx_copybreak &&
+ (skb = dev_alloc_skb(pkt_len + 4)) != NULL) {
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ memcpy(skb_put(skb, pkt_len),
+ isa_bus_to_virt(vp->rx_ring[entry].
+ addr), pkt_len);
+ rx_copy++;
+ } else {
+ void *temp;
+ /* Pass up the skbuff already on the Rx ring. */
+ skb = vp->rx_skbuff[entry];
+ vp->rx_skbuff[entry] = NULL;
+ temp = skb_put(skb, pkt_len);
+ /* Remove this checking code for final release. */
+ if (isa_bus_to_virt(vp->rx_ring[entry].addr) != temp)
+ pr_warning("%s: Warning -- the skbuff addresses do not match"
+ " in boomerang_rx: %p vs. %p / %p.\n",
+ dev->name,
+ isa_bus_to_virt(vp->
+ rx_ring[entry].
+ addr), skb->head,
+ temp);
+ rx_nocopy++;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ }
+ entry = (++vp->cur_rx) % RX_RING_SIZE;
+ }
+ /* Refill the Rx ring buffers. */
+ for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = vp->dirty_rx % RX_RING_SIZE;
+ if (vp->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(PKT_BUF_SZ);
+ if (skb == NULL)
+ break; /* Bad news! */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ vp->rx_ring[entry].addr = isa_virt_to_bus(skb->data);
+ vp->rx_skbuff[entry] = skb;
+ }
+ vp->rx_ring[entry].status = 0; /* Clear complete bit. */
+ }
+ return 0;
+}
+
+static int corkscrew_close(struct net_device *dev)
+{
+ struct corkscrew_private *vp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int i;
+
+ netif_stop_queue(dev);
+
+ if (corkscrew_debug > 1) {
+ pr_debug("%s: corkscrew_close() status %4.4x, Tx status %2.2x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS),
+ inb(ioaddr + TxStatus));
+ pr_debug("%s: corkscrew close stats: rx_nocopy %d rx_copy %d tx_queued %d.\n",
+ dev->name, rx_nocopy, rx_copy, queued_packet);
+ }
+
+ del_timer(&vp->timer);
+
+ /* Turn off statistics ASAP. We update lp->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ if (dev->if_port == XCVR_10base2)
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+
+ free_irq(dev->irq, dev);
+
+ outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
+
+ update_stats(ioaddr, dev);
+ if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
+ outl(0, ioaddr + UpListPtr);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ if (vp->rx_skbuff[i]) {
+ dev_kfree_skb(vp->rx_skbuff[i]);
+ vp->rx_skbuff[i] = NULL;
+ }
+ }
+ if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
+ outl(0, ioaddr + DownListPtr);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ if (vp->tx_skbuff[i]) {
+ dev_kfree_skb(vp->tx_skbuff[i]);
+ vp->tx_skbuff[i] = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static struct net_device_stats *corkscrew_get_stats(struct net_device *dev)
+{
+ struct corkscrew_private *vp = netdev_priv(dev);
+ unsigned long flags;
+
+ if (netif_running(dev)) {
+ spin_lock_irqsave(&vp->lock, flags);
+ update_stats(dev->base_addr, dev);
+ spin_unlock_irqrestore(&vp->lock, flags);
+ }
+ return &dev->stats;
+}
+
+/* Update statistics.
+ Unlike with the EL3 we need not worry about interrupts changing
+ the window setting from underneath us, but we must still guard
+ against a race condition with a StatsUpdate interrupt updating the
+ table. This is done by checking that the ASM (!) code generated uses
+ atomic updates with '+='.
+ */
+static void update_stats(int ioaddr, struct net_device *dev)
+{
+ /* Unlike the 3c5x9 we need not turn off stats updates while reading. */
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ dev->stats.tx_carrier_errors += inb(ioaddr + 0);
+ dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */ inb(ioaddr + 2);
+ dev->stats.collisions += inb(ioaddr + 3);
+ dev->stats.tx_window_errors += inb(ioaddr + 4);
+ dev->stats.rx_fifo_errors += inb(ioaddr + 5);
+ dev->stats.tx_packets += inb(ioaddr + 6);
+ dev->stats.tx_packets += (inb(ioaddr + 9) & 0x30) << 4;
+ /* Rx packets */ inb(ioaddr + 7);
+ /* Must read to clear */
+ /* Tx deferrals */ inb(ioaddr + 8);
+ /* Don't bother with register 9, an extension of registers 6&7.
+ If we do use the 6&7 values the atomic update assumption above
+ is invalid. */
+ inw(ioaddr + 10); /* Total Rx and Tx octets. */
+ inw(ioaddr + 12);
+ /* New: On the Vortex we must also clear the BadSSD counter. */
+ EL3WINDOW(4);
+ inb(ioaddr + 12);
+
+ /* We change back to window 7 (not 1) with the Vortex. */
+ EL3WINDOW(7);
+}
+
+/* This new version of set_rx_mode() supports v1.4 kernels.
+ The Vortex chip has no documented multicast filter, so the only
+ multicast setting is to receive all multicast frames. At least
+ the chip has a very clean way to set the mode, unlike many others. */
+static void set_rx_mode(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ short new_mode;
+
+ if (dev->flags & IFF_PROMISC) {
+ if (corkscrew_debug > 3)
+ pr_debug("%s: Setting promiscuous mode.\n",
+ dev->name);
+ new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm;
+ } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
+ new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast;
+ } else
+ new_mode = SetRxFilter | RxStation | RxBroadcast;
+
+ outw(new_mode, ioaddr + EL3_CMD);
+}
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
+}
+
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return corkscrew_debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 level)
+{
+ corkscrew_debug = level;
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+};
+
+
+#ifdef MODULE
+void cleanup_module(void)
+{
+ while (!list_empty(&root_corkscrew_dev)) {
+ struct net_device *dev;
+ struct corkscrew_private *vp;
+
+ vp = list_entry(root_corkscrew_dev.next,
+ struct corkscrew_private, list);
+ dev = vp->our_dev;
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
new file mode 100644
index 000000000000..9c01bc9235b3
--- /dev/null
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -0,0 +1,1181 @@
+/* 3c574.c: A PCMCIA ethernet driver for the 3com 3c574 "RoadRunner".
+
+ Written 1993-1998 by
+ Donald Becker, becker@scyld.com, (driver core) and
+ David Hinds, dahinds@users.sourceforge.net (from his PC card code).
+ Locking fixes (C) Copyright 2003 Red Hat Inc
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License, incorporated herein by reference.
+
+ This driver derives from Donald Becker's 3c509 core, which has the
+ following copyright:
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+
+*/
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the 3Com 3c574 PC card Fast Ethernet
+Adapter.
+
+II. Board-specific settings
+
+None -- PC cards are autoconfigured.
+
+III. Driver operation
+
+The 3c574 uses a Boomerang-style interface, without the bus-master capability.
+See the Boomerang driver and documentation for most details.
+
+IV. Notes and chip documentation.
+
+Two added registers are used to enhance PIO performance, RunnerRdCtrl and
+RunnerWrCtrl. These are 11 bit down-counters that are preloaded with the
+count of word (16 bits) reads or writes the driver is about to do to the Rx
+or Tx FIFO. The chip is then able to hide the internal-PCI-bus to PC-card
+translation latency by buffering the I/O operations with an 8 word FIFO.
+Note: No other chip accesses are permitted when this buffer is used.
+
+A second enhancement is that both attribute and common memory space
+0x0800-0x0fff can translated to the PIO FIFO. Thus memory operations (faster
+with *some* PCcard bridges) may be used instead of I/O operations.
+This is enabled by setting the 0x10 bit in the PCMCIA LAN COR.
+
+Some slow PC card bridges work better if they never see a WAIT signal.
+This is configured by setting the 0x20 bit in the PCMCIA LAN COR.
+Only do this after testing that it is reliable and improves performance.
+
+The upper five bits of RunnerRdCtrl are used to window into PCcard
+configuration space registers. Window 0 is the regular Boomerang/Odie
+register set, 1-5 are various PC card control registers, and 16-31 are
+the (reversed!) CIS table.
+
+A final note: writing the InternalConfig register in window 3 with an
+invalid ramWidth is Very Bad.
+
+V. References
+
+http://www.scyld.com/expert/NWay.html
+http://www.national.com/opf/DP/DP83840A.html
+
+Thanks to Terry Murphy of 3Com for providing development information for
+earlier 3Com products.
+
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/bitops.h>
+#include <linux/mii.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("3Com 3c574 series PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+INT_MODULE_PARM(max_interrupt_work, 32);
+
+/* Force full duplex modes? */
+INT_MODULE_PARM(full_duplex, 0);
+
+/* Autodetect link polarity reversal? */
+INT_MODULE_PARM(auto_polarity, 1);
+
+
+/*====================================================================*/
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT ((800*HZ)/1000)
+
+/* To minimize the size of the driver source and make the driver more
+ readable not all constants are symbolically defined.
+ You'll need the manual if you want to understand driver details anyway. */
+/* Offsets from base I/O address. */
+#define EL3_DATA 0x00
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable. */
+enum el3_cmds {
+ TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
+ RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11,
+ TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
+ FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11,
+ StatsDisable = 22<<11, StopCoax = 23<<11,
+};
+
+enum elxl_status {
+ IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
+ TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+ IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000 };
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8
+};
+
+enum Window0 {
+ Wn0EepromCmd = 10, Wn0EepromData = 12, /* EEPROM command/address, data. */
+ IntrStatus=0x0E, /* Valid in all windows. */
+};
+/* These assumes the larger EEPROM. */
+enum Win0_EEPROM_cmds {
+ EEPROM_Read = 0x200, EEPROM_WRITE = 0x100, EEPROM_ERASE = 0x300,
+ EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */
+ EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */
+};
+
+/* Register window 1 offsets, the window used in normal operation.
+ On the "Odie" this window is always mapped at offsets 0x10-0x1f.
+ Except for TxFree, which is overlapped by RunnerWrCtrl. */
+enum Window1 {
+ TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14,
+ RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B,
+ TxFree = 0x0C, /* Remaining free bytes in Tx buffer. */
+ RunnerRdCtrl = 0x16, RunnerWrCtrl = 0x1c,
+};
+
+enum Window3 { /* Window 3: MAC/config bits. */
+ Wn3_Config=0, Wn3_MAC_Ctrl=6, Wn3_Options=8,
+};
+enum wn3_config {
+ Ram_size = 7,
+ Ram_width = 8,
+ Ram_speed = 0x30,
+ Rom_size = 0xc0,
+ Ram_split_shift = 16,
+ Ram_split = 3 << Ram_split_shift,
+ Xcvr_shift = 20,
+ Xcvr = 7 << Xcvr_shift,
+ Autoselect = 0x1000000,
+};
+
+enum Window4 { /* Window 4: Xcvr/media bits. */
+ Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10,
+};
+
+#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */
+
+struct el3_private {
+ struct pcmcia_device *p_dev;
+ u16 advertising, partner; /* NWay media advertisement */
+ unsigned char phys; /* MII device address */
+ unsigned int autoselect:1, default_media:3; /* Read from the EEPROM/Wn3_Config. */
+ /* for transceiver monitoring */
+ struct timer_list media;
+ unsigned short media_status;
+ unsigned short fast_poll;
+ unsigned long last_irq;
+ spinlock_t window_lock; /* Guards the Window selection */
+};
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+ This only set with the original DP83840 on older 3c905 boards, so the extra
+ code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required = 0;
+
+/* Index of functions. */
+
+static int tc574_config(struct pcmcia_device *link);
+static void tc574_release(struct pcmcia_device *link);
+
+static void mdio_sync(unsigned int ioaddr, int bits);
+static int mdio_read(unsigned int ioaddr, int phy_id, int location);
+static void mdio_write(unsigned int ioaddr, int phy_id, int location,
+ int value);
+static unsigned short read_eeprom(unsigned int ioaddr, int index);
+static void tc574_wait_for_completion(struct net_device *dev, int cmd);
+
+static void tc574_reset(struct net_device *dev);
+static void media_check(unsigned long arg);
+static int el3_open(struct net_device *dev);
+static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t el3_interrupt(int irq, void *dev_id);
+static void update_stats(struct net_device *dev);
+static struct net_device_stats *el3_get_stats(struct net_device *dev);
+static int el3_rx(struct net_device *dev, int worklimit);
+static int el3_close(struct net_device *dev);
+static void el3_tx_timeout(struct net_device *dev);
+static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void set_rx_mode(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+
+static void tc574_detach(struct pcmcia_device *p_dev);
+
+/*
+ tc574_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+*/
+static const struct net_device_ops el3_netdev_ops = {
+ .ndo_open = el3_open,
+ .ndo_stop = el3_close,
+ .ndo_start_xmit = el3_start_xmit,
+ .ndo_tx_timeout = el3_tx_timeout,
+ .ndo_get_stats = el3_get_stats,
+ .ndo_do_ioctl = el3_ioctl,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int tc574_probe(struct pcmcia_device *link)
+{
+ struct el3_private *lp;
+ struct net_device *dev;
+
+ dev_dbg(&link->dev, "3c574_attach()\n");
+
+ /* Create the PC card device object. */
+ dev = alloc_etherdev(sizeof(struct el3_private));
+ if (!dev)
+ return -ENOMEM;
+ lp = netdev_priv(dev);
+ link->priv = dev;
+ lp->p_dev = link;
+
+ spin_lock_init(&lp->window_lock);
+ link->resource[0]->end = 32;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
+ link->config_flags |= CONF_ENABLE_IRQ;
+ link->config_index = 1;
+
+ dev->netdev_ops = &el3_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ return tc574_config(link);
+}
+
+static void tc574_detach(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ dev_dbg(&link->dev, "3c574_detach()\n");
+
+ unregister_netdev(dev);
+
+ tc574_release(link);
+
+ free_netdev(dev);
+} /* tc574_detach */
+
+static const char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+
+static int tc574_config(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ struct el3_private *lp = netdev_priv(dev);
+ int ret, i, j;
+ unsigned int ioaddr;
+ __be16 *phys_addr;
+ char *cardname;
+ __u32 config;
+ u8 *buf;
+ size_t len;
+
+ phys_addr = (__be16 *)dev->dev_addr;
+
+ dev_dbg(&link->dev, "3c574_config()\n");
+
+ link->io_lines = 16;
+
+ for (i = j = 0; j < 0x400; j += 0x20) {
+ link->resource[0]->start = j ^ 0x300;
+ i = pcmcia_request_io(link);
+ if (i == 0)
+ break;
+ }
+ if (i != 0)
+ goto failed;
+
+ ret = pcmcia_request_irq(link, el3_interrupt);
+ if (ret)
+ goto failed;
+
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto failed;
+
+ dev->irq = link->irq;
+ dev->base_addr = link->resource[0]->start;
+
+ ioaddr = dev->base_addr;
+
+ /* The 3c574 normally uses an EEPROM for configuration info, including
+ the hardware address. The future products may include a modem chip
+ and put the address in the CIS. */
+
+ len = pcmcia_get_tuple(link, 0x88, &buf);
+ if (buf && len >= 6) {
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(le16_to_cpu(buf[i * 2]));
+ kfree(buf);
+ } else {
+ kfree(buf); /* 0 < len < 6 */
+ EL3WINDOW(0);
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i + 10));
+ if (phys_addr[0] == htons(0x6060)) {
+ pr_notice("IO port conflict at 0x%03lx-0x%03lx\n",
+ dev->base_addr, dev->base_addr+15);
+ goto failed;
+ }
+ }
+ if (link->prod_id[1])
+ cardname = link->prod_id[1];
+ else
+ cardname = "3Com 3c574";
+
+ {
+ u_char mcr;
+ outw(2<<11, ioaddr + RunnerRdCtrl);
+ mcr = inb(ioaddr + 2);
+ outw(0<<11, ioaddr + RunnerRdCtrl);
+ pr_info(" ASIC rev %d,", mcr>>3);
+ EL3WINDOW(3);
+ config = inl(ioaddr + Wn3_Config);
+ lp->default_media = (config & Xcvr) >> Xcvr_shift;
+ lp->autoselect = config & Autoselect ? 1 : 0;
+ }
+
+ init_timer(&lp->media);
+
+ {
+ int phy;
+
+ /* Roadrunner only: Turn on the MII transceiver */
+ outw(0x8040, ioaddr + Wn3_Options);
+ mdelay(1);
+ outw(0xc040, ioaddr + Wn3_Options);
+ tc574_wait_for_completion(dev, TxReset);
+ tc574_wait_for_completion(dev, RxReset);
+ mdelay(1);
+ outw(0x8040, ioaddr + Wn3_Options);
+
+ EL3WINDOW(4);
+ for (phy = 1; phy <= 32; phy++) {
+ int mii_status;
+ mdio_sync(ioaddr, 32);
+ mii_status = mdio_read(ioaddr, phy & 0x1f, 1);
+ if (mii_status != 0xffff) {
+ lp->phys = phy & 0x1f;
+ dev_dbg(&link->dev, " MII transceiver at "
+ "index %d, status %x.\n",
+ phy, mii_status);
+ if ((mii_status & 0x0040) == 0)
+ mii_preamble_required = 1;
+ break;
+ }
+ }
+ if (phy > 32) {
+ pr_notice(" No MII transceivers found!\n");
+ goto failed;
+ }
+ i = mdio_read(ioaddr, lp->phys, 16) | 0x40;
+ mdio_write(ioaddr, lp->phys, 16, i);
+ lp->advertising = mdio_read(ioaddr, lp->phys, 4);
+ if (full_duplex) {
+ /* Only advertise the FD media types. */
+ lp->advertising &= ~0x02a0;
+ mdio_write(ioaddr, lp->phys, 4, lp->advertising);
+ }
+ }
+
+ SET_NETDEV_DEV(dev, &link->dev);
+
+ if (register_netdev(dev) != 0) {
+ pr_notice("register_netdev() failed\n");
+ goto failed;
+ }
+
+ netdev_info(dev, "%s at io %#3lx, irq %d, hw_addr %pM\n",
+ cardname, dev->base_addr, dev->irq, dev->dev_addr);
+ netdev_info(dev, " %dK FIFO split %s Rx:Tx, %sMII interface.\n",
+ 8 << config & Ram_size,
+ ram_split[(config & Ram_split) >> Ram_split_shift],
+ config & Autoselect ? "autoselect " : "");
+
+ return 0;
+
+failed:
+ tc574_release(link);
+ return -ENODEV;
+
+} /* tc574_config */
+
+static void tc574_release(struct pcmcia_device *link)
+{
+ pcmcia_disable_device(link);
+}
+
+static int tc574_suspend(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ if (link->open)
+ netif_device_detach(dev);
+
+ return 0;
+}
+
+static int tc574_resume(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ if (link->open) {
+ tc574_reset(dev);
+ netif_device_attach(dev);
+ }
+
+ return 0;
+}
+
+static void dump_status(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ EL3WINDOW(1);
+ netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x, tx free %04x\n",
+ inw(ioaddr+EL3_STATUS),
+ inw(ioaddr+RxStatus), inb(ioaddr+TxStatus),
+ inw(ioaddr+TxFree));
+ EL3WINDOW(4);
+ netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
+ inw(ioaddr+0x04), inw(ioaddr+0x06),
+ inw(ioaddr+0x08), inw(ioaddr+0x0a));
+ EL3WINDOW(1);
+}
+
+/*
+ Use this for commands that may take time to finish
+*/
+static void tc574_wait_for_completion(struct net_device *dev, int cmd)
+{
+ int i = 1500;
+ outw(cmd, dev->base_addr + EL3_CMD);
+ while (--i > 0)
+ if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
+ if (i == 0)
+ netdev_notice(dev, "command 0x%04x did not complete!\n", cmd);
+}
+
+/* Read a word from the EEPROM using the regular EEPROM access register.
+ Assume that we are in register window zero.
+ */
+static unsigned short read_eeprom(unsigned int ioaddr, int index)
+{
+ int timer;
+ outw(EEPROM_Read + index, ioaddr + Wn0EepromCmd);
+ /* Pause for at least 162 usec for the read to take place. */
+ for (timer = 1620; timer >= 0; timer--) {
+ if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0)
+ break;
+ }
+ return inw(ioaddr + Wn0EepromData);
+}
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details.
+ The maxium data clock rate is 2.5 Mhz. The timing is easily met by the
+ slow PC card interface. */
+
+#define MDIO_SHIFT_CLK 0x01
+#define MDIO_DIR_WRITE 0x04
+#define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE)
+#define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE)
+#define MDIO_DATA_READ 0x02
+#define MDIO_ENB_IN 0x00
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(unsigned int ioaddr, int bits)
+{
+ unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (-- bits >= 0) {
+ outw(MDIO_DATA_WRITE1, mdio_addr);
+ outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+ }
+}
+
+static int mdio_read(unsigned int ioaddr, int phy_id, int location)
+{
+ int i;
+ int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ unsigned int retval = 0;
+ unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+ if (mii_preamble_required)
+ mdio_sync(ioaddr, 32);
+
+ /* Shift the read command bits out. */
+ for (i = 14; i >= 0; i--) {
+ int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outw(dataval, mdio_addr);
+ outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ outw(MDIO_ENB_IN, mdio_addr);
+ retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
+ outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(unsigned int ioaddr, int phy_id, int location, int value)
+{
+ int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
+ unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+ int i;
+
+ if (mii_preamble_required)
+ mdio_sync(ioaddr, 32);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outw(dataval, mdio_addr);
+ outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+ }
+ /* Leave the interface idle. */
+ for (i = 1; i >= 0; i--) {
+ outw(MDIO_ENB_IN, mdio_addr);
+ outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ }
+}
+
+/* Reset and restore all of the 3c574 registers. */
+static void tc574_reset(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ int i;
+ unsigned int ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ tc574_wait_for_completion(dev, TotalReset|0x10);
+
+ spin_lock_irqsave(&lp->window_lock, flags);
+ /* Clear any transactions in progress. */
+ outw(0, ioaddr + RunnerWrCtrl);
+ outw(0, ioaddr + RunnerRdCtrl);
+
+ /* Set the station address and mask. */
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+ for (; i < 12; i+=2)
+ outw(0, ioaddr + i);
+
+ /* Reset config options */
+ EL3WINDOW(3);
+ outb((dev->mtu > 1500 ? 0x40 : 0), ioaddr + Wn3_MAC_Ctrl);
+ outl((lp->autoselect ? 0x01000000 : 0) | 0x0062001b,
+ ioaddr + Wn3_Config);
+ /* Roadrunner only: Turn on the MII transceiver. */
+ outw(0x8040, ioaddr + Wn3_Options);
+ mdelay(1);
+ outw(0xc040, ioaddr + Wn3_Options);
+ EL3WINDOW(1);
+ spin_unlock_irqrestore(&lp->window_lock, flags);
+
+ tc574_wait_for_completion(dev, TxReset);
+ tc574_wait_for_completion(dev, RxReset);
+ mdelay(1);
+ spin_lock_irqsave(&lp->window_lock, flags);
+ EL3WINDOW(3);
+ outw(0x8040, ioaddr + Wn3_Options);
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 10; i++)
+ inb(ioaddr + i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+ EL3WINDOW(4);
+ inb(ioaddr + 12);
+ inb(ioaddr + 13);
+
+ /* .. enable any extra statistics bits.. */
+ outw(0x0040, ioaddr + Wn4_NetDiag);
+
+ EL3WINDOW(1);
+ spin_unlock_irqrestore(&lp->window_lock, flags);
+
+ /* .. re-sync MII and re-fill what NWay is advertising. */
+ mdio_sync(ioaddr, 32);
+ mdio_write(ioaddr, lp->phys, 4, lp->advertising);
+ if (!auto_polarity) {
+ /* works for TDK 78Q2120 series MII's */
+ i = mdio_read(ioaddr, lp->phys, 16) | 0x20;
+ mdio_write(ioaddr, lp->phys, 16, i);
+ }
+
+ spin_lock_irqsave(&lp->window_lock, flags);
+ /* Switch to register set 1 for normal use, just for TxFree. */
+ set_rx_mode(dev);
+ spin_unlock_irqrestore(&lp->window_lock, flags);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
+ | AdapterFailure | RxEarly, ioaddr + EL3_CMD);
+}
+
+static int el3_open(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
+
+ if (!pcmcia_dev_present(link))
+ return -ENODEV;
+
+ link->open++;
+ netif_start_queue(dev);
+
+ tc574_reset(dev);
+ lp->media.function = media_check;
+ lp->media.data = (unsigned long) dev;
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
+
+ dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
+ dev->name, inw(dev->base_addr + EL3_STATUS));
+
+ return 0;
+}
+
+static void el3_tx_timeout(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+
+ netdev_notice(dev, "Transmit timed out!\n");
+ dump_status(dev);
+ dev->stats.tx_errors++;
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ /* Issue TX_RESET and TX_START commands. */
+ tc574_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+}
+
+static void pop_tx_status(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ int i;
+
+ /* Clear the Tx status stack. */
+ for (i = 32; i > 0; i--) {
+ u_char tx_status = inb(ioaddr + TxStatus);
+ if (!(tx_status & 0x84))
+ break;
+ /* reset transmitter on jabber error or underrun */
+ if (tx_status & 0x30)
+ tc574_wait_for_completion(dev, TxReset);
+ if (tx_status & 0x38) {
+ pr_debug("%s: transmit error: status 0x%02x\n",
+ dev->name, tx_status);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->stats.tx_aborted_errors++;
+ }
+ outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
+ }
+}
+
+static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ pr_debug("%s: el3_start_xmit(length = %ld) called, "
+ "status %4.4x.\n", dev->name, (long)skb->len,
+ inw(ioaddr + EL3_STATUS));
+
+ spin_lock_irqsave(&lp->window_lock, flags);
+
+ dev->stats.tx_bytes += skb->len;
+
+ /* Put out the doubleword header... */
+ outw(skb->len, ioaddr + TX_FIFO);
+ outw(0, ioaddr + TX_FIFO);
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2);
+
+ /* TxFree appears only in Window 1, not offset 0x1c. */
+ if (inw(ioaddr + TxFree) <= 1536) {
+ netif_stop_queue(dev);
+ /* Interrupt us when the FIFO has room for max-sized packet.
+ The threshold is in units of dwords. */
+ outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ }
+
+ pop_tx_status(dev);
+ spin_unlock_irqrestore(&lp->window_lock, flags);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+/* The EL3 interrupt handler. */
+static irqreturn_t el3_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned int ioaddr;
+ unsigned status;
+ int work_budget = max_interrupt_work;
+ int handled = 0;
+
+ if (!netif_device_present(dev))
+ return IRQ_NONE;
+ ioaddr = dev->base_addr;
+
+ pr_debug("%s: interrupt, status %4.4x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS));
+
+ spin_lock(&lp->window_lock);
+
+ while ((status = inw(ioaddr + EL3_STATUS)) &
+ (IntLatch | RxComplete | RxEarly | StatsFull)) {
+ if (!netif_device_present(dev) ||
+ ((status & 0xe000) != 0x2000)) {
+ pr_debug("%s: Interrupt from dead card\n", dev->name);
+ break;
+ }
+
+ handled = 1;
+
+ if (status & RxComplete)
+ work_budget = el3_rx(dev, work_budget);
+
+ if (status & TxAvailable) {
+ pr_debug(" TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+ }
+
+ if (status & TxComplete)
+ pop_tx_status(dev);
+
+ if (status & (AdapterFailure | RxEarly | StatsFull)) {
+ /* Handle all uncommon interrupts. */
+ if (status & StatsFull)
+ update_stats(dev);
+ if (status & RxEarly) {
+ work_budget = el3_rx(dev, work_budget);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & AdapterFailure) {
+ u16 fifo_diag;
+ EL3WINDOW(4);
+ fifo_diag = inw(ioaddr + Wn4_FIFODiag);
+ EL3WINDOW(1);
+ netdev_notice(dev, "adapter failure, FIFO diagnostic register %04x\n",
+ fifo_diag);
+ if (fifo_diag & 0x0400) {
+ /* Tx overrun */
+ tc574_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ }
+ if (fifo_diag & 0x2000) {
+ /* Rx underrun */
+ tc574_wait_for_completion(dev, RxReset);
+ set_rx_mode(dev);
+ outw(RxEnable, ioaddr + EL3_CMD);
+ }
+ outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
+ }
+ }
+
+ if (--work_budget < 0) {
+ pr_debug("%s: Too much work in interrupt, "
+ "status %4.4x.\n", dev->name, status);
+ /* Clear all interrupts */
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+ }
+
+ pr_debug("%s: exiting interrupt, status %4.4x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS));
+
+ spin_unlock(&lp->window_lock);
+ return IRQ_RETVAL(handled);
+}
+
+/*
+ This timer serves two purposes: to check for missed interrupts
+ (and as a last resort, poll the NIC for events), and to monitor
+ the MII, reporting changes in cable status.
+*/
+static void media_check(unsigned long arg)
+{
+ struct net_device *dev = (struct net_device *) arg;
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ unsigned long flags;
+ unsigned short /* cable, */ media, partner;
+
+ if (!netif_device_present(dev))
+ goto reschedule;
+
+ /* Check for pending interrupt with expired latency timer: with
+ this, we can limp along even if the interrupt is blocked */
+ if ((inw(ioaddr + EL3_STATUS) & IntLatch) && (inb(ioaddr + Timer) == 0xff)) {
+ if (!lp->fast_poll)
+ netdev_info(dev, "interrupt(s) dropped!\n");
+
+ local_irq_save(flags);
+ el3_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+
+ lp->fast_poll = HZ;
+ }
+ if (lp->fast_poll) {
+ lp->fast_poll--;
+ lp->media.expires = jiffies + 2*HZ/100;
+ add_timer(&lp->media);
+ return;
+ }
+
+ spin_lock_irqsave(&lp->window_lock, flags);
+ EL3WINDOW(4);
+ media = mdio_read(ioaddr, lp->phys, 1);
+ partner = mdio_read(ioaddr, lp->phys, 5);
+ EL3WINDOW(1);
+
+ if (media != lp->media_status) {
+ if ((media ^ lp->media_status) & 0x0004)
+ netdev_info(dev, "%s link beat\n",
+ (lp->media_status & 0x0004) ? "lost" : "found");
+ if ((media ^ lp->media_status) & 0x0020) {
+ lp->partner = 0;
+ if (lp->media_status & 0x0020) {
+ netdev_info(dev, "autonegotiation restarted\n");
+ } else if (partner) {
+ partner &= lp->advertising;
+ lp->partner = partner;
+ netdev_info(dev, "autonegotiation complete: "
+ "%dbaseT-%cD selected\n",
+ (partner & 0x0180) ? 100 : 10,
+ (partner & 0x0140) ? 'F' : 'H');
+ } else {
+ netdev_info(dev, "link partner did not autonegotiate\n");
+ }
+
+ EL3WINDOW(3);
+ outb((partner & 0x0140 ? 0x20 : 0) |
+ (dev->mtu > 1500 ? 0x40 : 0), ioaddr + Wn3_MAC_Ctrl);
+ EL3WINDOW(1);
+
+ }
+ if (media & 0x0010)
+ netdev_info(dev, "remote fault detected\n");
+ if (media & 0x0002)
+ netdev_info(dev, "jabber detected\n");
+ lp->media_status = media;
+ }
+ spin_unlock_irqrestore(&lp->window_lock, flags);
+
+reschedule:
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
+}
+
+static struct net_device_stats *el3_get_stats(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+
+ if (netif_device_present(dev)) {
+ unsigned long flags;
+ spin_lock_irqsave(&lp->window_lock, flags);
+ update_stats(dev);
+ spin_unlock_irqrestore(&lp->window_lock, flags);
+ }
+ return &dev->stats;
+}
+
+/* Update statistics.
+ Surprisingly this need not be run single-threaded, but it effectively is.
+ The counters clear when read, so the adds must merely be atomic.
+ */
+static void update_stats(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ u8 rx, tx, up;
+
+ pr_debug("%s: updating the statistics.\n", dev->name);
+
+ if (inw(ioaddr+EL3_STATUS) == 0xffff) /* No card. */
+ return;
+
+ /* Unlike the 3c509 we need not turn off stats updates while reading. */
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ dev->stats.tx_carrier_errors += inb(ioaddr + 0);
+ dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */ inb(ioaddr + 2);
+ dev->stats.collisions += inb(ioaddr + 3);
+ dev->stats.tx_window_errors += inb(ioaddr + 4);
+ dev->stats.rx_fifo_errors += inb(ioaddr + 5);
+ dev->stats.tx_packets += inb(ioaddr + 6);
+ up = inb(ioaddr + 9);
+ dev->stats.tx_packets += (up&0x30) << 4;
+ /* Rx packets */ inb(ioaddr + 7);
+ /* Tx deferrals */ inb(ioaddr + 8);
+ rx = inw(ioaddr + 10);
+ tx = inw(ioaddr + 12);
+
+ EL3WINDOW(4);
+ /* BadSSD */ inb(ioaddr + 12);
+ up = inb(ioaddr + 13);
+
+ EL3WINDOW(1);
+}
+
+static int el3_rx(struct net_device *dev, int worklimit)
+{
+ unsigned int ioaddr = dev->base_addr;
+ short rx_status;
+
+ pr_debug("%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
+ while (!((rx_status = inw(ioaddr + RxStatus)) & 0x8000) &&
+ worklimit > 0) {
+ worklimit--;
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ short error = rx_status & 0x3800;
+ dev->stats.rx_errors++;
+ switch (error) {
+ case 0x0000: dev->stats.rx_over_errors++; break;
+ case 0x0800: dev->stats.rx_length_errors++; break;
+ case 0x1000: dev->stats.rx_frame_errors++; break;
+ case 0x1800: dev->stats.rx_length_errors++; break;
+ case 0x2000: dev->stats.rx_frame_errors++; break;
+ case 0x2800: dev->stats.rx_crc_errors++; break;
+ }
+ } else {
+ short pkt_len = rx_status & 0x7ff;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+5);
+
+ pr_debug(" Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb_reserve(skb, 2);
+ insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
+ ((pkt_len+3)>>2));
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ } else {
+ pr_debug("%s: couldn't allocate a sk_buff of"
+ " size %d.\n", dev->name, pkt_len);
+ dev->stats.rx_dropped++;
+ }
+ }
+ tc574_wait_for_completion(dev, RxDiscard);
+ }
+
+ return worklimit;
+}
+
+/* Provide ioctl() calls to examine the MII xcvr state. */
+static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ struct mii_ioctl_data *data = if_mii(rq);
+ int phy = lp->phys & 0x1f;
+
+ pr_debug("%s: In ioct(%-.6s, %#4.4x) %4.4x %4.4x %4.4x %4.4x.\n",
+ dev->name, rq->ifr_ifrn.ifrn_name, cmd,
+ data->phy_id, data->reg_num, data->val_in, data->val_out);
+
+ switch(cmd) {
+ case SIOCGMIIPHY: /* Get the address of the PHY in use. */
+ data->phy_id = phy;
+ case SIOCGMIIREG: /* Read the specified MII register. */
+ {
+ int saved_window;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->window_lock, flags);
+ saved_window = inw(ioaddr + EL3_CMD) >> 13;
+ EL3WINDOW(4);
+ data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f,
+ data->reg_num & 0x1f);
+ EL3WINDOW(saved_window);
+ spin_unlock_irqrestore(&lp->window_lock, flags);
+ return 0;
+ }
+ case SIOCSMIIREG: /* Write the specified MII register */
+ {
+ int saved_window;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->window_lock, flags);
+ saved_window = inw(ioaddr + EL3_CMD) >> 13;
+ EL3WINDOW(4);
+ mdio_write(ioaddr, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, data->val_in);
+ EL3WINDOW(saved_window);
+ spin_unlock_irqrestore(&lp->window_lock, flags);
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* The Odie chip has a 64 bin multicast filter, but the bit layout is not
+ documented. Until it is we revert to receiving all multicast frames when
+ any multicast reception is desired.
+ Note: My other drivers emit a log message whenever promiscuous mode is
+ entered to help detect password sniffers. This is less desirable on
+ typical PC card machines, so we omit the message.
+ */
+
+static void set_rx_mode(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+
+ if (dev->flags & IFF_PROMISC)
+ outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
+ ioaddr + EL3_CMD);
+ else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
+ outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
+ else
+ outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->window_lock, flags);
+ set_rx_mode(dev);
+ spin_unlock_irqrestore(&lp->window_lock, flags);
+}
+
+static int el3_close(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ struct el3_private *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
+
+ dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
+
+ if (pcmcia_dev_present(link)) {
+ unsigned long flags;
+
+ /* Turn off statistics ASAP. We update lp->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ /* Note: Switching to window 0 may disable the IRQ. */
+ EL3WINDOW(0);
+ spin_lock_irqsave(&lp->window_lock, flags);
+ update_stats(dev);
+ spin_unlock_irqrestore(&lp->window_lock, flags);
+
+ /* force interrupts off */
+ outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
+ }
+
+ link->open--;
+ netif_stop_queue(dev);
+ del_timer_sync(&lp->media);
+
+ return 0;
+}
+
+static const struct pcmcia_device_id tc574_ids[] = {
+ PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0574),
+ PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x0556, "cis/3CCFEM556.cis"),
+ PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, tc574_ids);
+
+static struct pcmcia_driver tc574_driver = {
+ .owner = THIS_MODULE,
+ .name = "3c574_cs",
+ .probe = tc574_probe,
+ .remove = tc574_detach,
+ .id_table = tc574_ids,
+ .suspend = tc574_suspend,
+ .resume = tc574_resume,
+};
+
+static int __init init_tc574(void)
+{
+ return pcmcia_register_driver(&tc574_driver);
+}
+
+static void __exit exit_tc574(void)
+{
+ pcmcia_unregister_driver(&tc574_driver);
+}
+
+module_init(init_tc574);
+module_exit(exit_tc574);
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
new file mode 100644
index 000000000000..972f80ecc510
--- /dev/null
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -0,0 +1,943 @@
+/*======================================================================
+
+ A PCMCIA ethernet driver for the 3com 3c589 card.
+
+ Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
+
+ 3c589_cs.c 1.162 2001/10/13 00:08:50
+
+ The network driver code is based on Donald Becker's 3c589 code:
+
+ Written 1994 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+ Donald Becker may be reached at becker@scyld.com
+
+ Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk>
+
+======================================================================*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DRV_NAME "3c589_cs"
+#define DRV_VERSION "1.162-ac"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+/* To minimize the size of the driver source I only define operating
+ constants if they are used several times. You'll need the manual
+ if you want to understand driver details. */
+/* Offsets from base I/O address. */
+#define EL3_DATA 0x00
+#define EL3_TIMER 0x0a
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+
+#define EEPROM_READ 0x0080
+#define EEPROM_BUSY 0x8000
+
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable. */
+enum c509cmd {
+ TotalReset = 0<<11,
+ SelectWindow = 1<<11,
+ StartCoax = 2<<11,
+ RxDisable = 3<<11,
+ RxEnable = 4<<11,
+ RxReset = 5<<11,
+ RxDiscard = 8<<11,
+ TxEnable = 9<<11,
+ TxDisable = 10<<11,
+ TxReset = 11<<11,
+ FakeIntr = 12<<11,
+ AckIntr = 13<<11,
+ SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11,
+ SetRxFilter = 16<<11,
+ SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11,
+ SetTxStart = 19<<11,
+ StatsEnable = 21<<11,
+ StatsDisable = 22<<11,
+ StopCoax = 23<<11
+};
+
+enum c509status {
+ IntLatch = 0x0001,
+ AdapterFailure = 0x0002,
+ TxComplete = 0x0004,
+ TxAvailable = 0x0008,
+ RxComplete = 0x0010,
+ RxEarly = 0x0020,
+ IntReq = 0x0040,
+ StatsFull = 0x0080,
+ CmdBusy = 0x1000
+};
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1,
+ RxMulticast = 2,
+ RxBroadcast = 4,
+ RxProm = 8
+};
+
+/* Register window 1 offsets, the window used in normal operation. */
+#define TX_FIFO 0x00
+#define RX_FIFO 0x00
+#define RX_STATUS 0x08
+#define TX_STATUS 0x0B
+#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */
+
+#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */
+#define WN4_MEDIA 0x0A /* Window 4: Various transcvr/media bits. */
+#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */
+#define MEDIA_LED 0x0001 /* Enable link light on 3C589E cards. */
+
+/* Time in jiffies before concluding Tx hung */
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+struct el3_private {
+ struct pcmcia_device *p_dev;
+ /* For transceiver monitoring */
+ struct timer_list media;
+ u16 media_status;
+ u16 fast_poll;
+ unsigned long last_irq;
+ spinlock_t lock;
+};
+
+static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" };
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("3Com 3c589 series PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
+
+/* Special hook for setting if_port when module is loaded */
+INT_MODULE_PARM(if_port, 0);
+
+
+/*====================================================================*/
+
+static int tc589_config(struct pcmcia_device *link);
+static void tc589_release(struct pcmcia_device *link);
+
+static u16 read_eeprom(unsigned int ioaddr, int index);
+static void tc589_reset(struct net_device *dev);
+static void media_check(unsigned long arg);
+static int el3_config(struct net_device *dev, struct ifmap *map);
+static int el3_open(struct net_device *dev);
+static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t el3_interrupt(int irq, void *dev_id);
+static void update_stats(struct net_device *dev);
+static struct net_device_stats *el3_get_stats(struct net_device *dev);
+static int el3_rx(struct net_device *dev);
+static int el3_close(struct net_device *dev);
+static void el3_tx_timeout(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static const struct ethtool_ops netdev_ethtool_ops;
+
+static void tc589_detach(struct pcmcia_device *p_dev);
+
+static const struct net_device_ops el3_netdev_ops = {
+ .ndo_open = el3_open,
+ .ndo_stop = el3_close,
+ .ndo_start_xmit = el3_start_xmit,
+ .ndo_tx_timeout = el3_tx_timeout,
+ .ndo_set_config = el3_config,
+ .ndo_get_stats = el3_get_stats,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int tc589_probe(struct pcmcia_device *link)
+{
+ struct el3_private *lp;
+ struct net_device *dev;
+
+ dev_dbg(&link->dev, "3c589_attach()\n");
+
+ /* Create new ethernet device */
+ dev = alloc_etherdev(sizeof(struct el3_private));
+ if (!dev)
+ return -ENOMEM;
+ lp = netdev_priv(dev);
+ link->priv = dev;
+ lp->p_dev = link;
+
+ spin_lock_init(&lp->lock);
+ link->resource[0]->end = 16;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
+
+ link->config_flags |= CONF_ENABLE_IRQ;
+ link->config_index = 1;
+
+ dev->netdev_ops = &el3_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+
+ return tc589_config(link);
+}
+
+static void tc589_detach(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ dev_dbg(&link->dev, "3c589_detach\n");
+
+ unregister_netdev(dev);
+
+ tc589_release(link);
+
+ free_netdev(dev);
+} /* tc589_detach */
+
+static int tc589_config(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ __be16 *phys_addr;
+ int ret, i, j, multi = 0, fifo;
+ unsigned int ioaddr;
+ static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+ u8 *buf;
+ size_t len;
+
+ dev_dbg(&link->dev, "3c589_config\n");
+
+ phys_addr = (__be16 *)dev->dev_addr;
+ /* Is this a 3c562? */
+ if (link->manf_id != MANFID_3COM)
+ dev_info(&link->dev, "hmmm, is this really a 3Com card??\n");
+ multi = (link->card_id == PRODID_3COM_3C562);
+
+ link->io_lines = 16;
+
+ /* For the 3c562, the base address must be xx00-xx7f */
+ for (i = j = 0; j < 0x400; j += 0x10) {
+ if (multi && (j & 0x80)) continue;
+ link->resource[0]->start = j ^ 0x300;
+ i = pcmcia_request_io(link);
+ if (i == 0)
+ break;
+ }
+ if (i != 0)
+ goto failed;
+
+ ret = pcmcia_request_irq(link, el3_interrupt);
+ if (ret)
+ goto failed;
+
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto failed;
+
+ dev->irq = link->irq;
+ dev->base_addr = link->resource[0]->start;
+ ioaddr = dev->base_addr;
+ EL3WINDOW(0);
+
+ /* The 3c589 has an extra EEPROM for configuration info, including
+ the hardware address. The 3c562 puts the address in the CIS. */
+ len = pcmcia_get_tuple(link, 0x88, &buf);
+ if (buf && len >= 6) {
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(le16_to_cpu(buf[i*2]));
+ kfree(buf);
+ } else {
+ kfree(buf); /* 0 < len < 6 */
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i));
+ if (phys_addr[0] == htons(0x6060)) {
+ dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
+ dev->base_addr, dev->base_addr+15);
+ goto failed;
+ }
+ }
+
+ /* The address and resource configuration register aren't loaded from
+ the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version. */
+ outw(0x3f00, ioaddr + 8);
+ fifo = inl(ioaddr);
+
+ /* The if_port symbol can be set when the module is loaded */
+ if ((if_port >= 0) && (if_port <= 3))
+ dev->if_port = if_port;
+ else
+ dev_err(&link->dev, "invalid if_port requested\n");
+
+ SET_NETDEV_DEV(dev, &link->dev);
+
+ if (register_netdev(dev) != 0) {
+ dev_err(&link->dev, "register_netdev() failed\n");
+ goto failed;
+ }
+
+ netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n",
+ (multi ? "562" : "589"), dev->base_addr, dev->irq,
+ dev->dev_addr);
+ netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n",
+ (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
+ if_names[dev->if_port]);
+ return 0;
+
+failed:
+ tc589_release(link);
+ return -ENODEV;
+} /* tc589_config */
+
+static void tc589_release(struct pcmcia_device *link)
+{
+ pcmcia_disable_device(link);
+}
+
+static int tc589_suspend(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ if (link->open)
+ netif_device_detach(dev);
+
+ return 0;
+}
+
+static int tc589_resume(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ if (link->open) {
+ tc589_reset(dev);
+ netif_device_attach(dev);
+ }
+
+ return 0;
+}
+
+/*====================================================================*/
+
+/*
+ Use this for commands that may take time to finish
+*/
+static void tc589_wait_for_completion(struct net_device *dev, int cmd)
+{
+ int i = 100;
+ outw(cmd, dev->base_addr + EL3_CMD);
+ while (--i > 0)
+ if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
+ if (i == 0)
+ netdev_warn(dev, "command 0x%04x did not complete!\n", cmd);
+}
+
+/*
+ Read a word from the EEPROM using the regular EEPROM access register.
+ Assume that we are in register window zero.
+*/
+static u16 read_eeprom(unsigned int ioaddr, int index)
+{
+ int i;
+ outw(EEPROM_READ + index, ioaddr + 10);
+ /* Reading the eeprom takes 162 us */
+ for (i = 1620; i >= 0; i--)
+ if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0)
+ break;
+ return inw(ioaddr + 12);
+}
+
+/*
+ Set transceiver type, perhaps to something other than what the user
+ specified in dev->if_port.
+*/
+static void tc589_set_xcvr(struct net_device *dev, int if_port)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+
+ EL3WINDOW(0);
+ switch (if_port) {
+ case 0: case 1: outw(0, ioaddr + 6); break;
+ case 2: outw(3<<14, ioaddr + 6); break;
+ case 3: outw(1<<14, ioaddr + 6); break;
+ }
+ /* On PCMCIA, this just turns on the LED */
+ outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD);
+ /* 10baseT interface, enable link beat and jabber check. */
+ EL3WINDOW(4);
+ outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA);
+ EL3WINDOW(1);
+ if (if_port == 2)
+ lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000);
+ else
+ lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800);
+}
+
+static void dump_status(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ EL3WINDOW(1);
+ netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS),
+ inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE));
+ EL3WINDOW(4);
+ netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
+ inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08),
+ inw(ioaddr+0x0a));
+ EL3WINDOW(1);
+}
+
+/* Reset and restore all of the 3c589 registers. */
+static void tc589_reset(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ int i;
+
+ EL3WINDOW(0);
+ outw(0x0001, ioaddr + 4); /* Activate board. */
+ outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */
+
+ /* Set the station address in window 2. */
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+
+ tc589_set_xcvr(dev, dev->if_port);
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 9; i++)
+ inb(ioaddr+i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+
+ /* Switch to register set 1 for normal use. */
+ EL3WINDOW(1);
+
+ set_rx_mode(dev);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
+ | AdapterFailure, ioaddr + EL3_CMD);
+}
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+};
+
+static int el3_config(struct net_device *dev, struct ifmap *map)
+{
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (map->port <= 3) {
+ dev->if_port = map->port;
+ netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
+ tc589_set_xcvr(dev, dev->if_port);
+ } else
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int el3_open(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
+
+ if (!pcmcia_dev_present(link))
+ return -ENODEV;
+
+ link->open++;
+ netif_start_queue(dev);
+
+ tc589_reset(dev);
+ init_timer(&lp->media);
+ lp->media.function = media_check;
+ lp->media.data = (unsigned long) dev;
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
+
+ dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
+ dev->name, inw(dev->base_addr + EL3_STATUS));
+
+ return 0;
+}
+
+static void el3_tx_timeout(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+
+ netdev_warn(dev, "Transmit timed out!\n");
+ dump_status(dev);
+ dev->stats.tx_errors++;
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ /* Issue TX_RESET and TX_START commands. */
+ tc589_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+}
+
+static void pop_tx_status(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ int i;
+
+ /* Clear the Tx status stack. */
+ for (i = 32; i > 0; i--) {
+ u_char tx_status = inb(ioaddr + TX_STATUS);
+ if (!(tx_status & 0x84)) break;
+ /* reset transmitter on jabber error or underrun */
+ if (tx_status & 0x30)
+ tc589_wait_for_completion(dev, TxReset);
+ if (tx_status & 0x38) {
+ netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->stats.tx_aborted_errors++;
+ }
+ outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
+ }
+}
+
+static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ struct el3_private *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n",
+ (long)skb->len, inw(ioaddr + EL3_STATUS));
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ dev->stats.tx_bytes += skb->len;
+
+ /* Put out the doubleword header... */
+ outw(skb->len, ioaddr + TX_FIFO);
+ outw(0x00, ioaddr + TX_FIFO);
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+
+ if (inw(ioaddr + TX_FREE) <= 1536) {
+ netif_stop_queue(dev);
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
+ }
+
+ pop_tx_status(dev);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/* The EL3 interrupt handler. */
+static irqreturn_t el3_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned int ioaddr;
+ __u16 status;
+ int i = 0, handled = 1;
+
+ if (!netif_device_present(dev))
+ return IRQ_NONE;
+
+ ioaddr = dev->base_addr;
+
+ netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS));
+
+ spin_lock(&lp->lock);
+ while ((status = inw(ioaddr + EL3_STATUS)) &
+ (IntLatch | RxComplete | StatsFull)) {
+ if ((status & 0xe000) != 0x2000) {
+ netdev_dbg(dev, "interrupt from dead card\n");
+ handled = 0;
+ break;
+ }
+ if (status & RxComplete)
+ el3_rx(dev);
+ if (status & TxAvailable) {
+ netdev_dbg(dev, " TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+ }
+ if (status & TxComplete)
+ pop_tx_status(dev);
+ if (status & (AdapterFailure | RxEarly | StatsFull)) {
+ /* Handle all uncommon interrupts. */
+ if (status & StatsFull) /* Empty statistics. */
+ update_stats(dev);
+ if (status & RxEarly) { /* Rx early is unused. */
+ el3_rx(dev);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & AdapterFailure) {
+ u16 fifo_diag;
+ EL3WINDOW(4);
+ fifo_diag = inw(ioaddr + 4);
+ EL3WINDOW(1);
+ netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n",
+ fifo_diag);
+ if (fifo_diag & 0x0400) {
+ /* Tx overrun */
+ tc589_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ }
+ if (fifo_diag & 0x2000) {
+ /* Rx underrun */
+ tc589_wait_for_completion(dev, RxReset);
+ set_rx_mode(dev);
+ outw(RxEnable, ioaddr + EL3_CMD);
+ }
+ outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
+ }
+ }
+ if (++i > 10) {
+ netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n",
+ status);
+ /* Clear all interrupts */
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+ }
+ lp->last_irq = jiffies;
+ spin_unlock(&lp->lock);
+ netdev_dbg(dev, "exiting interrupt, status %4.4x.\n",
+ inw(ioaddr + EL3_STATUS));
+ return IRQ_RETVAL(handled);
+}
+
+static void media_check(unsigned long arg)
+{
+ struct net_device *dev = (struct net_device *)(arg);
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ u16 media, errs;
+ unsigned long flags;
+
+ if (!netif_device_present(dev)) goto reschedule;
+
+ /* Check for pending interrupt with expired latency timer: with
+ this, we can limp along even if the interrupt is blocked */
+ if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
+ (inb(ioaddr + EL3_TIMER) == 0xff)) {
+ if (!lp->fast_poll)
+ netdev_warn(dev, "interrupt(s) dropped!\n");
+
+ local_irq_save(flags);
+ el3_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+
+ lp->fast_poll = HZ;
+ }
+ if (lp->fast_poll) {
+ lp->fast_poll--;
+ lp->media.expires = jiffies + HZ/100;
+ add_timer(&lp->media);
+ return;
+ }
+
+ /* lp->lock guards the EL3 window. Window should always be 1 except
+ when the lock is held */
+ spin_lock_irqsave(&lp->lock, flags);
+ EL3WINDOW(4);
+ media = inw(ioaddr+WN4_MEDIA) & 0xc810;
+
+ /* Ignore collisions unless we've had no irq's recently */
+ if (time_before(jiffies, lp->last_irq + HZ)) {
+ media &= ~0x0010;
+ } else {
+ /* Try harder to detect carrier errors */
+ EL3WINDOW(6);
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ errs = inb(ioaddr + 0);
+ outw(StatsEnable, ioaddr + EL3_CMD);
+ dev->stats.tx_carrier_errors += errs;
+ if (errs || (lp->media_status & 0x0010)) media |= 0x0010;
+ }
+
+ if (media != lp->media_status) {
+ if ((media & lp->media_status & 0x8000) &&
+ ((lp->media_status ^ media) & 0x0800))
+ netdev_info(dev, "%s link beat\n",
+ (lp->media_status & 0x0800 ? "lost" : "found"));
+ else if ((media & lp->media_status & 0x4000) &&
+ ((lp->media_status ^ media) & 0x0010))
+ netdev_info(dev, "coax cable %s\n",
+ (lp->media_status & 0x0010 ? "ok" : "problem"));
+ if (dev->if_port == 0) {
+ if (media & 0x8000) {
+ if (media & 0x0800)
+ netdev_info(dev, "flipped to 10baseT\n");
+ else
+ tc589_set_xcvr(dev, 2);
+ } else if (media & 0x4000) {
+ if (media & 0x0010)
+ tc589_set_xcvr(dev, 1);
+ else
+ netdev_info(dev, "flipped to 10base2\n");
+ }
+ }
+ lp->media_status = media;
+ }
+
+ EL3WINDOW(1);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+reschedule:
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
+}
+
+static struct net_device_stats *el3_get_stats(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ struct pcmcia_device *link = lp->p_dev;
+
+ if (pcmcia_dev_present(link)) {
+ spin_lock_irqsave(&lp->lock, flags);
+ update_stats(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return &dev->stats;
+}
+
+/*
+ Update statistics. We change to register window 6, so this should be run
+ single-threaded if the device is active. This is expected to be a rare
+ operation, and it's simpler for the rest of the driver to assume that
+ window 1 is always valid rather than use a special window-state variable.
+
+ Caller must hold the lock for this
+*/
+static void update_stats(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+
+ netdev_dbg(dev, "updating the statistics.\n");
+ /* Turn off statistics updates while reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ dev->stats.tx_carrier_errors += inb(ioaddr + 0);
+ dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */ inb(ioaddr + 2);
+ dev->stats.collisions += inb(ioaddr + 3);
+ dev->stats.tx_window_errors += inb(ioaddr + 4);
+ dev->stats.rx_fifo_errors += inb(ioaddr + 5);
+ dev->stats.tx_packets += inb(ioaddr + 6);
+ /* Rx packets */ inb(ioaddr + 7);
+ /* Tx deferrals */ inb(ioaddr + 8);
+ /* Rx octets */ inw(ioaddr + 10);
+ /* Tx octets */ inw(ioaddr + 12);
+
+ /* Back to window 1, and turn statistics back on. */
+ EL3WINDOW(1);
+ outw(StatsEnable, ioaddr + EL3_CMD);
+}
+
+static int el3_rx(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ int worklimit = 32;
+ short rx_status;
+
+ netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
+ while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
+ worklimit > 0) {
+ worklimit--;
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ short error = rx_status & 0x3800;
+ dev->stats.rx_errors++;
+ switch (error) {
+ case 0x0000: dev->stats.rx_over_errors++; break;
+ case 0x0800: dev->stats.rx_length_errors++; break;
+ case 0x1000: dev->stats.rx_frame_errors++; break;
+ case 0x1800: dev->stats.rx_length_errors++; break;
+ case 0x2000: dev->stats.rx_frame_errors++; break;
+ case 0x2800: dev->stats.rx_crc_errors++; break;
+ }
+ } else {
+ short pkt_len = rx_status & 0x7ff;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+5);
+
+ netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb_reserve(skb, 2);
+ insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
+ (pkt_len+3)>>2);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ } else {
+ netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n",
+ pkt_len);
+ dev->stats.rx_dropped++;
+ }
+ }
+ /* Pop the top of the Rx FIFO */
+ tc589_wait_for_completion(dev, RxDiscard);
+ }
+ if (worklimit == 0)
+ netdev_warn(dev, "too much work in el3_rx!\n");
+ return 0;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ u16 opts = SetRxFilter | RxStation | RxBroadcast;
+
+ if (dev->flags & IFF_PROMISC)
+ opts |= RxMulticast | RxProm;
+ else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
+ opts |= RxMulticast;
+ outw(opts, ioaddr + EL3_CMD);
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct el3_private *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ set_rx_mode(dev);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int el3_close(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
+ unsigned int ioaddr = dev->base_addr;
+
+ dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
+
+ if (pcmcia_dev_present(link)) {
+ /* Turn off statistics ASAP. We update dev->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ if (dev->if_port == 2)
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+ else if (dev->if_port == 1) {
+ /* Disable link beat and jabber */
+ EL3WINDOW(4);
+ outw(0, ioaddr + WN4_MEDIA);
+ }
+
+ /* Switching back to window 0 disables the IRQ. */
+ EL3WINDOW(0);
+ /* But we explicitly zero the IRQ line select anyway. */
+ outw(0x0f00, ioaddr + WN0_IRQ);
+
+ /* Check if the card still exists */
+ if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
+ update_stats(dev);
+ }
+
+ link->open--;
+ netif_stop_queue(dev);
+ del_timer_sync(&lp->media);
+
+ return 0;
+}
+
+static const struct pcmcia_device_id tc589_ids[] = {
+ PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0101, 0x0562),
+ PCMCIA_MFC_DEVICE_PROD_ID1(0, "Motorola MARQUIS", 0xf03e4e77),
+ PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0589),
+ PCMCIA_DEVICE_PROD_ID12("Farallon", "ENet", 0x58d93fc4, 0x992c2202),
+ PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x0035, "cis/3CXEM556.cis"),
+ PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x003d, "cis/3CXEM556.cis"),
+ PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, tc589_ids);
+
+static struct pcmcia_driver tc589_driver = {
+ .owner = THIS_MODULE,
+ .name = "3c589_cs",
+ .probe = tc589_probe,
+ .remove = tc589_detach,
+ .id_table = tc589_ids,
+ .suspend = tc589_suspend,
+ .resume = tc589_resume,
+};
+
+static int __init init_tc589(void)
+{
+ return pcmcia_register_driver(&tc589_driver);
+}
+
+static void __exit exit_tc589(void)
+{
+ pcmcia_unregister_driver(&tc589_driver);
+}
+
+module_init(init_tc589);
+module_exit(exit_tc589);
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
new file mode 100644
index 000000000000..9ca45dcba755
--- /dev/null
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -0,0 +1,3327 @@
+/* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */
+/*
+ Written 1996-1999 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ This driver is for the 3Com "Vortex" and "Boomerang" series ethercards.
+ Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597
+ and the EtherLink XL 3c900 and 3c905 cards.
+
+ Problem reports and questions should be directed to
+ vortex@scyld.com
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+*/
+
+/*
+ * FIXME: This driver _could_ support MTU changing, but doesn't. See Don's hamachi.c implementation
+ * as well as other drivers
+ *
+ * NOTE: If you make 'vortex_debug' a constant (#define vortex_debug 0) the driver shrinks by 2k
+ * due to dead code elimination. There will be some performance benefits from this due to
+ * elimination of all the tests and reduced cache footprint.
+ */
+
+
+#define DRV_NAME "3c59x"
+
+
+
+/* A few values that may be tweaked. */
+/* Keep the ring sizes a power of two for efficiency. */
+#define TX_RING_SIZE 16
+#define RX_RING_SIZE 32
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+/* "Knobs" that adjust features and parameters. */
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1512 effectively disables this feature. */
+#ifndef __arm__
+static int rx_copybreak = 200;
+#else
+/* ARM systems perform better by disregarding the bus-master
+ transfer capability of these cards. -- rmk */
+static int rx_copybreak = 1513;
+#endif
+/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */
+static const int mtu = 1500;
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 32;
+/* Tx timeout interval (millisecs) */
+static int watchdog = 5000;
+
+/* Allow aggregation of Tx interrupts. Saves CPU load at the cost
+ * of possible Tx stalls if the system is blocking interrupts
+ * somewhere else. Undefine this to disable.
+ */
+#define tx_interrupt_mitigation 1
+
+/* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */
+#define vortex_debug debug
+#ifdef VORTEX_DEBUG
+static int vortex_debug = VORTEX_DEBUG;
+#else
+static int vortex_debug = 1;
+#endif
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/mii.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/highmem.h>
+#include <linux/eisa.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
+#include <linux/gfp.h>
+#include <asm/irq.h> /* For nr_irqs only. */
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+/* Kernel compatibility defines, some common to David Hinds' PCMCIA package.
+ This is only in the support-all-kernels source code. */
+
+#define RUN_AT(x) (jiffies + (x))
+
+#include <linux/delay.h>
+
+
+static const char version[] __devinitconst =
+ DRV_NAME ": Donald Becker and others.\n";
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver ");
+MODULE_LICENSE("GPL");
+
+
+/* Operational parameter that usually are not changed. */
+
+/* The Vortex size is twice that of the original EtherLinkIII series: the
+ runtime register window, window 1, is now always mapped in.
+ The Boomerang size is twice as large as the Vortex -- it has additional
+ bus master control registers. */
+#define VORTEX_TOTAL_SIZE 0x20
+#define BOOMERANG_TOTAL_SIZE 0x40
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+ This only set with the original DP83840 on older 3c905 boards, so the extra
+ code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required;
+
+#define PFX DRV_NAME ": "
+
+
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the 3Com FastEtherLink and FastEtherLink
+XL, 3Com's PCI to 10/100baseT adapters. It also works with the 10Mbs
+versions of the FastEtherLink cards. The supported product IDs are
+ 3c590, 3c592, 3c595, 3c597, 3c900, 3c905
+
+The related ISA 3c515 is supported with a separate driver, 3c515.c, included
+with the kernel source or available from
+ cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS should be set to assign the
+PCI INTA signal to an otherwise unused system IRQ line.
+
+The EEPROM settings for media type and forced-full-duplex are observed.
+The EEPROM media type should be left at the default "autoselect" unless using
+10base2 or AUI connections which cannot be reliably detected.
+
+III. Driver operation
+
+The 3c59x series use an interface that's very similar to the previous 3c5x9
+series. The primary interface is two programmed-I/O FIFOs, with an
+alternate single-contiguous-region bus-master transfer (see next).
+
+The 3c900 "Boomerang" series uses a full-bus-master interface with separate
+lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet,
+DEC Tulip and Intel Speedo3. The first chip version retains a compatible
+programmed-I/O interface that has been removed in 'B' and subsequent board
+revisions.
+
+One extension that is advertised in a very large font is that the adapters
+are capable of being bus masters. On the Vortex chip this capability was
+only for a single contiguous region making it far less useful than the full
+bus master capability. There is a significant performance impact of taking
+an extra interrupt or polling for the completion of each transfer, as well
+as difficulty sharing the single transfer engine between the transmit and
+receive threads. Using DMA transfers is a win only with large blocks or
+with the flawed versions of the Intel Orion motherboard PCI controller.
+
+The Boomerang chip's full-bus-master interface is useful, and has the
+currently-unused advantages over other similar chips that queued transmit
+packets may be reordered and receive buffer groups are associated with a
+single frame.
+
+With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme.
+Rather than a fixed intermediate receive buffer, this scheme allocates
+full-sized skbuffs as receive buffers. The value RX_COPYBREAK is used as
+the copying breakpoint: it is chosen to trade-off the memory wasted by
+passing the full-sized skbuff to the queue layer for all frames vs. the
+copying cost of copying a frame to a correctly-sized skbuff.
+
+IIIC. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+IV. Notes
+
+Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development
+3c590, 3c595, and 3c900 boards.
+The name "Vortex" is the internal 3Com project name for the PCI ASIC, and
+the EISA version is called "Demon". According to Terry these names come
+from rides at the local amusement park.
+
+The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes!
+This driver only supports ethernet packets because of the skbuff allocation
+limit of 4K.
+*/
+
+/* This table drives the PCI probe routines. It's mostly boilerplate in all
+ of the drivers, and will likely be provided by some future kernel.
+*/
+enum pci_flags_bit {
+ PCI_USES_MASTER=4,
+};
+
+enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8,
+ EEPROM_8BIT=0x10, /* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */
+ HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100,
+ INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800,
+ EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000, WNO_XCVR_PWR=0x4000,
+ EXTRA_PREAMBLE=0x8000, EEPROM_RESET=0x10000, };
+
+enum vortex_chips {
+ CH_3C590 = 0,
+ CH_3C592,
+ CH_3C597,
+ CH_3C595_1,
+ CH_3C595_2,
+
+ CH_3C595_3,
+ CH_3C900_1,
+ CH_3C900_2,
+ CH_3C900_3,
+ CH_3C900_4,
+
+ CH_3C900_5,
+ CH_3C900B_FL,
+ CH_3C905_1,
+ CH_3C905_2,
+ CH_3C905B_TX,
+ CH_3C905B_1,
+
+ CH_3C905B_2,
+ CH_3C905B_FX,
+ CH_3C905C,
+ CH_3C9202,
+ CH_3C980,
+ CH_3C9805,
+
+ CH_3CSOHO100_TX,
+ CH_3C555,
+ CH_3C556,
+ CH_3C556B,
+ CH_3C575,
+
+ CH_3C575_1,
+ CH_3CCFE575,
+ CH_3CCFE575CT,
+ CH_3CCFE656,
+ CH_3CCFEM656,
+
+ CH_3CCFEM656_1,
+ CH_3C450,
+ CH_3C920,
+ CH_3C982A,
+ CH_3C982B,
+
+ CH_905BT4,
+ CH_920B_EMB_WNM,
+};
+
+
+/* note: this array directly indexed by above enums, and MUST
+ * be kept in sync with both the enums above, and the PCI device
+ * table below
+ */
+static struct vortex_chip_info {
+ const char *name;
+ int flags;
+ int drv_flags;
+ int io_size;
+} vortex_info_tbl[] __devinitdata = {
+ {"3c590 Vortex 10Mbps",
+ PCI_USES_MASTER, IS_VORTEX, 32, },
+ {"3c592 EISA 10Mbps Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */
+ PCI_USES_MASTER, IS_VORTEX, 32, },
+ {"3c597 EISA Fast Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */
+ PCI_USES_MASTER, IS_VORTEX, 32, },
+ {"3c595 Vortex 100baseTx",
+ PCI_USES_MASTER, IS_VORTEX, 32, },
+ {"3c595 Vortex 100baseT4",
+ PCI_USES_MASTER, IS_VORTEX, 32, },
+
+ {"3c595 Vortex 100base-MII",
+ PCI_USES_MASTER, IS_VORTEX, 32, },
+ {"3c900 Boomerang 10baseT",
+ PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, },
+ {"3c900 Boomerang 10Mbps Combo",
+ PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, },
+ {"3c900 Cyclone 10Mbps TPO", /* AKPM: from Don's 0.99M */
+ PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+ {"3c900 Cyclone 10Mbps Combo",
+ PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+
+ {"3c900 Cyclone 10Mbps TPC", /* AKPM: from Don's 0.99M */
+ PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+ {"3c900B-FL Cyclone 10base-FL",
+ PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+ {"3c905 Boomerang 100baseTx",
+ PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
+ {"3c905 Boomerang 100baseT4",
+ PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
+ {"3C905B-TX Fast Etherlink XL PCI",
+ PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
+ {"3c905B Cyclone 100baseTx",
+ PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
+
+ {"3c905B Cyclone 10/100/BNC",
+ PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
+ {"3c905B-FX Cyclone 100baseFx",
+ PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+ {"3c905C Tornado",
+ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
+ {"3c920B-EMB-WNM (ATI Radeon 9100 IGP)",
+ PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, },
+ {"3c980 Cyclone",
+ PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
+
+ {"3c980C Python-T",
+ PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
+ {"3cSOHO100-TX Hurricane",
+ PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
+ {"3c555 Laptop Hurricane",
+ PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, },
+ {"3c556 Laptop Tornado",
+ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR|
+ HAS_HWCKSM, 128, },
+ {"3c556B Laptop Hurricane",
+ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR|
+ WNO_XCVR_PWR|HAS_HWCKSM, 128, },
+
+ {"3c575 [Megahertz] 10/100 LAN CardBus",
+ PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
+ {"3c575 Boomerang CardBus",
+ PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
+ {"3CCFE575BT Cyclone CardBus",
+ PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|
+ INVERT_LED_PWR|HAS_HWCKSM, 128, },
+ {"3CCFE575CT Tornado CardBus",
+ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+ MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
+ {"3CCFE656 Cyclone CardBus",
+ PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+ INVERT_LED_PWR|HAS_HWCKSM, 128, },
+
+ {"3CCFEM656B Cyclone+Winmodem CardBus",
+ PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+ INVERT_LED_PWR|HAS_HWCKSM, 128, },
+ {"3CXFEM656C Tornado+Winmodem CardBus", /* From pcmcia-cs-3.1.5 */
+ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+ MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
+ {"3c450 HomePNA Tornado", /* AKPM: from Don's 0.99Q */
+ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
+ {"3c920 Tornado",
+ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
+ {"3c982 Hydra Dual Port A",
+ PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, },
+
+ {"3c982 Hydra Dual Port B",
+ PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, },
+ {"3c905B-T4",
+ PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
+ {"3c920B-EMB-WNM Tornado",
+ PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
+
+ {NULL,}, /* NULL terminated list. */
+};
+
+
+static DEFINE_PCI_DEVICE_TABLE(vortex_pci_tbl) = {
+ { 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 },
+ { 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 },
+ { 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 },
+ { 0x10B7, 0x5950, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_1 },
+ { 0x10B7, 0x5951, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_2 },
+
+ { 0x10B7, 0x5952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_3 },
+ { 0x10B7, 0x9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_1 },
+ { 0x10B7, 0x9001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_2 },
+ { 0x10B7, 0x9004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_3 },
+ { 0x10B7, 0x9005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_4 },
+
+ { 0x10B7, 0x9006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_5 },
+ { 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL },
+ { 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 },
+ { 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 },
+ { 0x10B7, 0x9054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_TX },
+ { 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 },
+
+ { 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 },
+ { 0x10B7, 0x905A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_FX },
+ { 0x10B7, 0x9200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C },
+ { 0x10B7, 0x9202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9202 },
+ { 0x10B7, 0x9800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C980 },
+ { 0x10B7, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9805 },
+
+ { 0x10B7, 0x7646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CSOHO100_TX },
+ { 0x10B7, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C555 },
+ { 0x10B7, 0x6055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556 },
+ { 0x10B7, 0x6056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556B },
+ { 0x10B7, 0x5b57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575 },
+
+ { 0x10B7, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575_1 },
+ { 0x10B7, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575 },
+ { 0x10B7, 0x5257, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575CT },
+ { 0x10B7, 0x6560, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE656 },
+ { 0x10B7, 0x6562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656 },
+
+ { 0x10B7, 0x6564, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656_1 },
+ { 0x10B7, 0x4500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C450 },
+ { 0x10B7, 0x9201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C920 },
+ { 0x10B7, 0x1201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982A },
+ { 0x10B7, 0x1202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982B },
+
+ { 0x10B7, 0x9056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_905BT4 },
+ { 0x10B7, 0x9210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_920B_EMB_WNM },
+
+ {0,} /* 0 terminated list. */
+};
+MODULE_DEVICE_TABLE(pci, vortex_pci_tbl);
+
+
+/* Operational definitions.
+ These are not used by other compilation units and thus are not
+ exported in a ".h" file.
+
+ First the windows. There are eight register windows, with the command
+ and status registers available in each.
+ */
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable.
+ Note that 11 parameters bits was fine for ethernet, but the new chip
+ can handle FDDI length frames (~4500 octets) and now parameters count
+ 32-bit 'Dwords' rather than octets. */
+
+enum vortex_cmd {
+ TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
+ RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11,
+ UpStall = 6<<11, UpUnstall = (6<<11)+1,
+ DownStall = (6<<11)+2, DownUnstall = (6<<11)+3,
+ RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
+ FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11, SetTxStart = 19<<11,
+ StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11,
+ StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,};
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
+
+/* Bits in the general status register. */
+enum vortex_status {
+ IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004,
+ TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+ IntReq = 0x0040, StatsFull = 0x0080,
+ DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10,
+ DMAInProgress = 1<<11, /* DMA controller is still busy.*/
+ CmdInProgress = 1<<12, /* EL3_CMD is still busy.*/
+};
+
+/* Register window 1 offsets, the window used in normal operation.
+ On the Vortex this window is always mapped at offsets 0x10-0x1f. */
+enum Window1 {
+ TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14,
+ RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B,
+ TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */
+};
+enum Window0 {
+ Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */
+ Wn0EepromData = 12, /* Window 0: EEPROM results register. */
+ IntrStatus=0x0E, /* Valid in all windows. */
+};
+enum Win0_EEPROM_bits {
+ EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0,
+ EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */
+ EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */
+};
+/* EEPROM locations. */
+enum eeprom_offset {
+ PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3,
+ EtherLink3ID=7, IFXcvrIO=8, IRQLine=9,
+ NodeAddr01=10, NodeAddr23=11, NodeAddr45=12,
+ DriverTune=13, Checksum=15};
+
+enum Window2 { /* Window 2. */
+ Wn2_ResetOptions=12,
+};
+enum Window3 { /* Window 3: MAC/config bits. */
+ Wn3_Config=0, Wn3_MaxPktSize=4, Wn3_MAC_Ctrl=6, Wn3_Options=8,
+};
+
+#define BFEXT(value, offset, bitcount) \
+ ((((unsigned long)(value)) >> (offset)) & ((1 << (bitcount)) - 1))
+
+#define BFINS(lhs, rhs, offset, bitcount) \
+ (((lhs) & ~((((1 << (bitcount)) - 1)) << (offset))) | \
+ (((rhs) & ((1 << (bitcount)) - 1)) << (offset)))
+
+#define RAM_SIZE(v) BFEXT(v, 0, 3)
+#define RAM_WIDTH(v) BFEXT(v, 3, 1)
+#define RAM_SPEED(v) BFEXT(v, 4, 2)
+#define ROM_SIZE(v) BFEXT(v, 6, 2)
+#define RAM_SPLIT(v) BFEXT(v, 16, 2)
+#define XCVR(v) BFEXT(v, 20, 4)
+#define AUTOSELECT(v) BFEXT(v, 24, 1)
+
+enum Window4 { /* Window 4: Xcvr/media bits. */
+ Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10,
+};
+enum Win4_Media_bits {
+ Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */
+ Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */
+ Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */
+ Media_LnkBeat = 0x0800,
+};
+enum Window7 { /* Window 7: Bus Master control. */
+ Wn7_MasterAddr = 0, Wn7_VlanEtherType=4, Wn7_MasterLen = 6,
+ Wn7_MasterStatus = 12,
+};
+/* Boomerang bus master control registers. */
+enum MasterCtrl {
+ PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c,
+ TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38,
+};
+
+/* The Rx and Tx descriptor lists.
+ Caution Alpha hackers: these types are 32 bits! Note also the 8 byte
+ alignment contraint on tx_ring[] and rx_ring[]. */
+#define LAST_FRAG 0x80000000 /* Last Addr/Len pair in descriptor. */
+#define DN_COMPLETE 0x00010000 /* This packet has been downloaded */
+struct boom_rx_desc {
+ __le32 next; /* Last entry points to 0. */
+ __le32 status;
+ __le32 addr; /* Up to 63 addr/len pairs possible. */
+ __le32 length; /* Set LAST_FRAG to indicate last pair. */
+};
+/* Values for the Rx status entry. */
+enum rx_desc_status {
+ RxDComplete=0x00008000, RxDError=0x4000,
+ /* See boomerang_rx() for actual error bits */
+ IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27,
+ IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31,
+};
+
+#ifdef MAX_SKB_FRAGS
+#define DO_ZEROCOPY 1
+#else
+#define DO_ZEROCOPY 0
+#endif
+
+struct boom_tx_desc {
+ __le32 next; /* Last entry points to 0. */
+ __le32 status; /* bits 0:12 length, others see below. */
+#if DO_ZEROCOPY
+ struct {
+ __le32 addr;
+ __le32 length;
+ } frag[1+MAX_SKB_FRAGS];
+#else
+ __le32 addr;
+ __le32 length;
+#endif
+};
+
+/* Values for the Tx status entry. */
+enum tx_desc_status {
+ CRCDisable=0x2000, TxDComplete=0x8000,
+ AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000,
+ TxIntrUploaded=0x80000000, /* IRQ when in FIFO, but maybe not sent. */
+};
+
+/* Chip features we care about in vp->capabilities, read from the EEPROM. */
+enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 };
+
+struct vortex_extra_stats {
+ unsigned long tx_deferred;
+ unsigned long tx_max_collisions;
+ unsigned long tx_multiple_collisions;
+ unsigned long tx_single_collisions;
+ unsigned long rx_bad_ssd;
+};
+
+struct vortex_private {
+ /* The Rx and Tx rings should be quad-word-aligned. */
+ struct boom_rx_desc* rx_ring;
+ struct boom_tx_desc* tx_ring;
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+ /* The addresses of transmit- and receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ struct vortex_extra_stats xstats; /* NIC-specific extra stats */
+ struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
+ dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */
+
+ /* PCI configuration space information. */
+ struct device *gendev;
+ void __iomem *ioaddr; /* IO address space */
+ void __iomem *cb_fn_base; /* CardBus function status addr space. */
+
+ /* Some values here only for performance evaluation and path-coverage */
+ int rx_nocopy, rx_copy, queued_packet, rx_csumhits;
+ int card_idx;
+
+ /* The remainder are related to chip state, mostly media selection. */
+ struct timer_list timer; /* Media selection timer. */
+ struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */
+ int options; /* User-settable misc. driver options. */
+ unsigned int media_override:4, /* Passed-in media type. */
+ default_media:4, /* Read from the EEPROM/Wn3_Config. */
+ full_duplex:1, autoselect:1,
+ bus_master:1, /* Vortex can only do a fragment bus-m. */
+ full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */
+ flow_ctrl:1, /* Use 802.3x flow control (PAUSE only) */
+ partner_flow_ctrl:1, /* Partner supports flow control */
+ has_nway:1,
+ enable_wol:1, /* Wake-on-LAN is enabled */
+ pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */
+ open:1,
+ medialock:1,
+ must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
+ large_frames:1, /* accept large frames */
+ handling_irq:1; /* private in_irq indicator */
+ /* {get|set}_wol operations are already serialized by rtnl.
+ * no additional locking is required for the enable_wol and acpi_set_WOL()
+ */
+ int drv_flags;
+ u16 status_enable;
+ u16 intr_enable;
+ u16 available_media; /* From Wn3_Options. */
+ u16 capabilities, info1, info2; /* Various, from EEPROM. */
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[2]; /* MII device addresses. */
+ u16 deferred; /* Resend these interrupts when we
+ * bale from the ISR */
+ u16 io_size; /* Size of PCI region (for release_region) */
+
+ /* Serialises access to hardware other than MII and variables below.
+ * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */
+ spinlock_t lock;
+
+ spinlock_t mii_lock; /* Serialises access to MII */
+ struct mii_if_info mii; /* MII lib hooks/info */
+ spinlock_t window_lock; /* Serialises access to windowed regs */
+ int window; /* Register window */
+};
+
+static void window_set(struct vortex_private *vp, int window)
+{
+ if (window != vp->window) {
+ iowrite16(SelectWindow + window, vp->ioaddr + EL3_CMD);
+ vp->window = window;
+ }
+}
+
+#define DEFINE_WINDOW_IO(size) \
+static u ## size \
+window_read ## size(struct vortex_private *vp, int window, int addr) \
+{ \
+ unsigned long flags; \
+ u ## size ret; \
+ spin_lock_irqsave(&vp->window_lock, flags); \
+ window_set(vp, window); \
+ ret = ioread ## size(vp->ioaddr + addr); \
+ spin_unlock_irqrestore(&vp->window_lock, flags); \
+ return ret; \
+} \
+static void \
+window_write ## size(struct vortex_private *vp, u ## size value, \
+ int window, int addr) \
+{ \
+ unsigned long flags; \
+ spin_lock_irqsave(&vp->window_lock, flags); \
+ window_set(vp, window); \
+ iowrite ## size(value, vp->ioaddr + addr); \
+ spin_unlock_irqrestore(&vp->window_lock, flags); \
+}
+DEFINE_WINDOW_IO(8)
+DEFINE_WINDOW_IO(16)
+DEFINE_WINDOW_IO(32)
+
+#ifdef CONFIG_PCI
+#define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL)
+#else
+#define DEVICE_PCI(dev) NULL
+#endif
+
+#define VORTEX_PCI(vp) \
+ ((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL))
+
+#ifdef CONFIG_EISA
+#define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL)
+#else
+#define DEVICE_EISA(dev) NULL
+#endif
+
+#define VORTEX_EISA(vp) \
+ ((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL))
+
+/* The action to take with a media selection timer tick.
+ Note that we deviate from the 3Com order by checking 10base2 before AUI.
+ */
+enum xcvr_types {
+ XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx,
+ XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10,
+};
+
+static const struct media_table {
+ char *name;
+ unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */
+ mask:8, /* The transceiver-present bit in Wn3_Config.*/
+ next:8; /* The media type to try next. */
+ int wait; /* Time before we check media status. */
+} media_tbl[] = {
+ { "10baseT", Media_10TP,0x08, XCVR_10base2, (14*HZ)/10},
+ { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10},
+ { "undefined", 0, 0x80, XCVR_10baseT, 10000},
+ { "10base2", 0, 0x10, XCVR_AUI, (1*HZ)/10},
+ { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10},
+ { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14*HZ)/10},
+ { "MII", 0, 0x41, XCVR_10baseT, 3*HZ },
+ { "undefined", 0, 0x01, XCVR_10baseT, 10000},
+ { "Autonegotiate", 0, 0x41, XCVR_10baseT, 3*HZ},
+ { "MII-External", 0, 0x41, XCVR_10baseT, 3*HZ },
+ { "Default", 0, 0xFF, XCVR_10baseT, 10000},
+};
+
+static struct {
+ const char str[ETH_GSTRING_LEN];
+} ethtool_stats_keys[] = {
+ { "tx_deferred" },
+ { "tx_max_collisions" },
+ { "tx_multiple_collisions" },
+ { "tx_single_collisions" },
+ { "rx_bad_ssd" },
+};
+
+/* number of ETHTOOL_GSTATS u64's */
+#define VORTEX_NUM_STATS 5
+
+static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
+ int chip_idx, int card_idx);
+static int vortex_up(struct net_device *dev);
+static void vortex_down(struct net_device *dev, int final);
+static int vortex_open(struct net_device *dev);
+static void mdio_sync(struct vortex_private *vp, int bits);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
+static void vortex_timer(unsigned long arg);
+static void rx_oom_timer(unsigned long arg);
+static netdev_tx_t vortex_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static int vortex_rx(struct net_device *dev);
+static int boomerang_rx(struct net_device *dev);
+static irqreturn_t vortex_interrupt(int irq, void *dev_id);
+static irqreturn_t boomerang_interrupt(int irq, void *dev_id);
+static int vortex_close(struct net_device *dev);
+static void dump_tx_ring(struct net_device *dev);
+static void update_stats(void __iomem *ioaddr, struct net_device *dev);
+static struct net_device_stats *vortex_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+#ifdef CONFIG_PCI
+static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+#endif
+static void vortex_tx_timeout(struct net_device *dev);
+static void acpi_set_WOL(struct net_device *dev);
+static const struct ethtool_ops vortex_ethtool_ops;
+static void set_8021q_mode(struct net_device *dev, int enable);
+
+/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
+/* Option count limit only -- unlimited interfaces are supported. */
+#define MAX_UNITS 8
+static int options[MAX_UNITS] = { [0 ... MAX_UNITS-1] = -1 };
+static int full_duplex[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
+static int hw_checksums[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
+static int flow_ctrl[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
+static int enable_wol[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
+static int use_mmio[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
+static int global_options = -1;
+static int global_full_duplex = -1;
+static int global_enable_wol = -1;
+static int global_use_mmio = -1;
+
+/* Variables to work-around the Compaq PCI BIOS32 problem. */
+static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900;
+static struct net_device *compaq_net_device;
+
+static int vortex_cards_found;
+
+module_param(debug, int, 0);
+module_param(global_options, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param(global_full_duplex, int, 0);
+module_param_array(full_duplex, int, NULL, 0);
+module_param_array(hw_checksums, int, NULL, 0);
+module_param_array(flow_ctrl, int, NULL, 0);
+module_param(global_enable_wol, int, 0);
+module_param_array(enable_wol, int, NULL, 0);
+module_param(rx_copybreak, int, 0);
+module_param(max_interrupt_work, int, 0);
+module_param(compaq_ioaddr, int, 0);
+module_param(compaq_irq, int, 0);
+module_param(compaq_device_id, int, 0);
+module_param(watchdog, int, 0);
+module_param(global_use_mmio, int, 0);
+module_param_array(use_mmio, int, NULL, 0);
+MODULE_PARM_DESC(debug, "3c59x debug level (0-6)");
+MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex");
+MODULE_PARM_DESC(global_options, "3c59x: same as options, but applies to all NICs if options is unset");
+MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)");
+MODULE_PARM_DESC(global_full_duplex, "3c59x: same as full_duplex, but applies to all NICs if full_duplex is unset");
+MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)");
+MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)");
+MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)");
+MODULE_PARM_DESC(global_enable_wol, "3c59x: same as enable_wol, but applies to all NICs if enable_wol is unset");
+MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt");
+MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)");
+MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)");
+MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)");
+MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds");
+MODULE_PARM_DESC(global_use_mmio, "3c59x: same as use_mmio, but applies to all NICs if options is unset");
+MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)");
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void poll_vortex(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ unsigned long flags;
+ local_irq_save(flags);
+ (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
+ local_irq_restore(flags);
+}
+#endif
+
+#ifdef CONFIG_PM
+
+static int vortex_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *ndev = pci_get_drvdata(pdev);
+
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ netif_device_detach(ndev);
+ vortex_down(ndev, 1);
+
+ return 0;
+}
+
+static int vortex_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ int err;
+
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ err = vortex_up(ndev);
+ if (err)
+ return err;
+
+ netif_device_attach(ndev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops vortex_pm_ops = {
+ .suspend = vortex_suspend,
+ .resume = vortex_resume,
+ .freeze = vortex_suspend,
+ .thaw = vortex_resume,
+ .poweroff = vortex_suspend,
+ .restore = vortex_resume,
+};
+
+#define VORTEX_PM_OPS (&vortex_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define VORTEX_PM_OPS NULL
+
+#endif /* !CONFIG_PM */
+
+#ifdef CONFIG_EISA
+static struct eisa_device_id vortex_eisa_ids[] = {
+ { "TCM5920", CH_3C592 },
+ { "TCM5970", CH_3C597 },
+ { "" }
+};
+MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids);
+
+static int __init vortex_eisa_probe(struct device *device)
+{
+ void __iomem *ioaddr;
+ struct eisa_device *edev;
+
+ edev = to_eisa_device(device);
+
+ if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME))
+ return -EBUSY;
+
+ ioaddr = ioport_map(edev->base_addr, VORTEX_TOTAL_SIZE);
+
+ if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12,
+ edev->id.driver_data, vortex_cards_found)) {
+ release_region(edev->base_addr, VORTEX_TOTAL_SIZE);
+ return -ENODEV;
+ }
+
+ vortex_cards_found++;
+
+ return 0;
+}
+
+static int __devexit vortex_eisa_remove(struct device *device)
+{
+ struct eisa_device *edev;
+ struct net_device *dev;
+ struct vortex_private *vp;
+ void __iomem *ioaddr;
+
+ edev = to_eisa_device(device);
+ dev = eisa_get_drvdata(edev);
+
+ if (!dev) {
+ pr_err("vortex_eisa_remove called for Compaq device!\n");
+ BUG();
+ }
+
+ vp = netdev_priv(dev);
+ ioaddr = vp->ioaddr;
+
+ unregister_netdev(dev);
+ iowrite16(TotalReset|0x14, ioaddr + EL3_CMD);
+ release_region(dev->base_addr, VORTEX_TOTAL_SIZE);
+
+ free_netdev(dev);
+ return 0;
+}
+
+static struct eisa_driver vortex_eisa_driver = {
+ .id_table = vortex_eisa_ids,
+ .driver = {
+ .name = "3c59x",
+ .probe = vortex_eisa_probe,
+ .remove = __devexit_p(vortex_eisa_remove)
+ }
+};
+
+#endif /* CONFIG_EISA */
+
+/* returns count found (>= 0), or negative on error */
+static int __init vortex_eisa_init(void)
+{
+ int eisa_found = 0;
+ int orig_cards_found = vortex_cards_found;
+
+#ifdef CONFIG_EISA
+ int err;
+
+ err = eisa_driver_register (&vortex_eisa_driver);
+ if (!err) {
+ /*
+ * Because of the way EISA bus is probed, we cannot assume
+ * any device have been found when we exit from
+ * eisa_driver_register (the bus root driver may not be
+ * initialized yet). So we blindly assume something was
+ * found, and let the sysfs magic happened...
+ */
+ eisa_found = 1;
+ }
+#endif
+
+ /* Special code to work-around the Compaq PCI BIOS32 problem. */
+ if (compaq_ioaddr) {
+ vortex_probe1(NULL, ioport_map(compaq_ioaddr, VORTEX_TOTAL_SIZE),
+ compaq_irq, compaq_device_id, vortex_cards_found++);
+ }
+
+ return vortex_cards_found - orig_cards_found + eisa_found;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit vortex_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rc, unit, pci_bar;
+ struct vortex_chip_info *vci;
+ void __iomem *ioaddr;
+
+ /* wake up and enable device */
+ rc = pci_enable_device(pdev);
+ if (rc < 0)
+ goto out;
+
+ unit = vortex_cards_found;
+
+ if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) {
+ /* Determine the default if the user didn't override us */
+ vci = &vortex_info_tbl[ent->driver_data];
+ pci_bar = vci->drv_flags & (IS_CYCLONE | IS_TORNADO) ? 1 : 0;
+ } else if (unit < MAX_UNITS && use_mmio[unit] >= 0)
+ pci_bar = use_mmio[unit] ? 1 : 0;
+ else
+ pci_bar = global_use_mmio ? 1 : 0;
+
+ ioaddr = pci_iomap(pdev, pci_bar, 0);
+ if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
+ ioaddr = pci_iomap(pdev, 0, 0);
+ if (!ioaddr) {
+ pci_disable_device(pdev);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq,
+ ent->driver_data, unit);
+ if (rc < 0) {
+ pci_iounmap(pdev, ioaddr);
+ pci_disable_device(pdev);
+ goto out;
+ }
+
+ vortex_cards_found++;
+
+out:
+ return rc;
+}
+
+static const struct net_device_ops boomrang_netdev_ops = {
+ .ndo_open = vortex_open,
+ .ndo_stop = vortex_close,
+ .ndo_start_xmit = boomerang_start_xmit,
+ .ndo_tx_timeout = vortex_tx_timeout,
+ .ndo_get_stats = vortex_get_stats,
+#ifdef CONFIG_PCI
+ .ndo_do_ioctl = vortex_ioctl,
+#endif
+ .ndo_set_rx_mode = set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = poll_vortex,
+#endif
+};
+
+static const struct net_device_ops vortex_netdev_ops = {
+ .ndo_open = vortex_open,
+ .ndo_stop = vortex_close,
+ .ndo_start_xmit = vortex_start_xmit,
+ .ndo_tx_timeout = vortex_tx_timeout,
+ .ndo_get_stats = vortex_get_stats,
+#ifdef CONFIG_PCI
+ .ndo_do_ioctl = vortex_ioctl,
+#endif
+ .ndo_set_rx_mode = set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = poll_vortex,
+#endif
+};
+
+/*
+ * Start up the PCI/EISA device which is described by *gendev.
+ * Return 0 on success.
+ *
+ * NOTE: pdev can be NULL, for the case of a Compaq device
+ */
+static int __devinit vortex_probe1(struct device *gendev,
+ void __iomem *ioaddr, int irq,
+ int chip_idx, int card_idx)
+{
+ struct vortex_private *vp;
+ int option;
+ unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
+ int i, step;
+ struct net_device *dev;
+ static int printed_version;
+ int retval, print_info;
+ struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx];
+ const char *print_name = "3c59x";
+ struct pci_dev *pdev = NULL;
+ struct eisa_device *edev = NULL;
+
+ if (!printed_version) {
+ pr_info("%s", version);
+ printed_version = 1;
+ }
+
+ if (gendev) {
+ if ((pdev = DEVICE_PCI(gendev))) {
+ print_name = pci_name(pdev);
+ }
+
+ if ((edev = DEVICE_EISA(gendev))) {
+ print_name = dev_name(&edev->dev);
+ }
+ }
+
+ dev = alloc_etherdev(sizeof(*vp));
+ retval = -ENOMEM;
+ if (!dev) {
+ pr_err(PFX "unable to allocate etherdev, aborting\n");
+ goto out;
+ }
+ SET_NETDEV_DEV(dev, gendev);
+ vp = netdev_priv(dev);
+
+ option = global_options;
+
+ /* The lower four bits are the media type. */
+ if (dev->mem_start) {
+ /*
+ * The 'options' param is passed in as the third arg to the
+ * LILO 'ether=' argument for non-modular use
+ */
+ option = dev->mem_start;
+ }
+ else if (card_idx < MAX_UNITS) {
+ if (options[card_idx] >= 0)
+ option = options[card_idx];
+ }
+
+ if (option > 0) {
+ if (option & 0x8000)
+ vortex_debug = 7;
+ if (option & 0x4000)
+ vortex_debug = 2;
+ if (option & 0x0400)
+ vp->enable_wol = 1;
+ }
+
+ print_info = (vortex_debug > 1);
+ if (print_info)
+ pr_info("See Documentation/networking/vortex.txt\n");
+
+ pr_info("%s: 3Com %s %s at %p.\n",
+ print_name,
+ pdev ? "PCI" : "EISA",
+ vci->name,
+ ioaddr);
+
+ dev->base_addr = (unsigned long)ioaddr;
+ dev->irq = irq;
+ dev->mtu = mtu;
+ vp->ioaddr = ioaddr;
+ vp->large_frames = mtu > 1500;
+ vp->drv_flags = vci->drv_flags;
+ vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0;
+ vp->io_size = vci->io_size;
+ vp->card_idx = card_idx;
+ vp->window = -1;
+
+ /* module list only for Compaq device */
+ if (gendev == NULL) {
+ compaq_net_device = dev;
+ }
+
+ /* PCI-only startup logic */
+ if (pdev) {
+ /* EISA resources already marked, so only PCI needs to do this here */
+ /* Ignore return value, because Cardbus drivers already allocate for us */
+ if (request_region(dev->base_addr, vci->io_size, print_name) != NULL)
+ vp->must_free_region = 1;
+
+ /* enable bus-mastering if necessary */
+ if (vci->flags & PCI_USES_MASTER)
+ pci_set_master(pdev);
+
+ if (vci->drv_flags & IS_VORTEX) {
+ u8 pci_latency;
+ u8 new_latency = 248;
+
+ /* Check the PCI latency value. On the 3c590 series the latency timer
+ must be set to the maximum value to avoid data corruption that occurs
+ when the timer expires during a transfer. This bug exists the Vortex
+ chip only. */
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
+ if (pci_latency < new_latency) {
+ pr_info("%s: Overriding PCI latency timer (CFLT) setting of %d, new value is %d.\n",
+ print_name, pci_latency, new_latency);
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency);
+ }
+ }
+ }
+
+ spin_lock_init(&vp->lock);
+ spin_lock_init(&vp->mii_lock);
+ spin_lock_init(&vp->window_lock);
+ vp->gendev = gendev;
+ vp->mii.dev = dev;
+ vp->mii.mdio_read = mdio_read;
+ vp->mii.mdio_write = mdio_write;
+ vp->mii.phy_id_mask = 0x1f;
+ vp->mii.reg_num_mask = 0x1f;
+
+ /* Makes sure rings are at least 16 byte aligned. */
+ vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
+ + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+ &vp->rx_ring_dma);
+ retval = -ENOMEM;
+ if (!vp->rx_ring)
+ goto free_region;
+
+ vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
+ vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
+
+ /* if we are a PCI driver, we store info in pdev->driver_data
+ * instead of a module list */
+ if (pdev)
+ pci_set_drvdata(pdev, dev);
+ if (edev)
+ eisa_set_drvdata(edev, dev);
+
+ vp->media_override = 7;
+ if (option >= 0) {
+ vp->media_override = ((option & 7) == 2) ? 0 : option & 15;
+ if (vp->media_override != 7)
+ vp->medialock = 1;
+ vp->full_duplex = (option & 0x200) ? 1 : 0;
+ vp->bus_master = (option & 16) ? 1 : 0;
+ }
+
+ if (global_full_duplex > 0)
+ vp->full_duplex = 1;
+ if (global_enable_wol > 0)
+ vp->enable_wol = 1;
+
+ if (card_idx < MAX_UNITS) {
+ if (full_duplex[card_idx] > 0)
+ vp->full_duplex = 1;
+ if (flow_ctrl[card_idx] > 0)
+ vp->flow_ctrl = 1;
+ if (enable_wol[card_idx] > 0)
+ vp->enable_wol = 1;
+ }
+
+ vp->mii.force_media = vp->full_duplex;
+ vp->options = option;
+ /* Read the station address from the EEPROM. */
+ {
+ int base;
+
+ if (vci->drv_flags & EEPROM_8BIT)
+ base = 0x230;
+ else if (vci->drv_flags & EEPROM_OFFSET)
+ base = EEPROM_Read + 0x30;
+ else
+ base = EEPROM_Read;
+
+ for (i = 0; i < 0x40; i++) {
+ int timer;
+ window_write16(vp, base + i, 0, Wn0EepromCmd);
+ /* Pause for at least 162 us. for the read to take place. */
+ for (timer = 10; timer >= 0; timer--) {
+ udelay(162);
+ if ((window_read16(vp, 0, Wn0EepromCmd) &
+ 0x8000) == 0)
+ break;
+ }
+ eeprom[i] = window_read16(vp, 0, Wn0EepromData);
+ }
+ }
+ for (i = 0; i < 0x18; i++)
+ checksum ^= eeprom[i];
+ checksum = (checksum ^ (checksum >> 8)) & 0xff;
+ if (checksum != 0x00) { /* Grrr, needless incompatible change 3Com. */
+ while (i < 0x21)
+ checksum ^= eeprom[i++];
+ checksum = (checksum ^ (checksum >> 8)) & 0xff;
+ }
+ if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO))
+ pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum);
+ for (i = 0; i < 3; i++)
+ ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+ if (print_info)
+ pr_cont(" %pM", dev->dev_addr);
+ /* Unfortunately an all zero eeprom passes the checksum and this
+ gets found in the wild in failure cases. Crypto is hard 8) */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ retval = -EINVAL;
+ pr_err("*** EEPROM MAC address is invalid.\n");
+ goto free_ring; /* With every pack */
+ }
+ for (i = 0; i < 6; i++)
+ window_write8(vp, dev->dev_addr[i], 2, i);
+
+ if (print_info)
+ pr_cont(", IRQ %d\n", dev->irq);
+ /* Tell them about an invalid IRQ. */
+ if (dev->irq <= 0 || dev->irq >= nr_irqs)
+ pr_warning(" *** Warning: IRQ %d is unlikely to work! ***\n",
+ dev->irq);
+
+ step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1;
+ if (print_info) {
+ pr_info(" product code %02x%02x rev %02x.%d date %02d-%02d-%02d\n",
+ eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14],
+ step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9);
+ }
+
+
+ if (pdev && vci->drv_flags & HAS_CB_FNS) {
+ unsigned short n;
+
+ vp->cb_fn_base = pci_iomap(pdev, 2, 0);
+ if (!vp->cb_fn_base) {
+ retval = -ENOMEM;
+ goto free_ring;
+ }
+
+ if (print_info) {
+ pr_info("%s: CardBus functions mapped %16.16llx->%p\n",
+ print_name,
+ (unsigned long long)pci_resource_start(pdev, 2),
+ vp->cb_fn_base);
+ }
+
+ n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
+ if (vp->drv_flags & INVERT_LED_PWR)
+ n |= 0x10;
+ if (vp->drv_flags & INVERT_MII_PWR)
+ n |= 0x4000;
+ window_write16(vp, n, 2, Wn2_ResetOptions);
+ if (vp->drv_flags & WNO_XCVR_PWR) {
+ window_write16(vp, 0x0800, 0, 0);
+ }
+ }
+
+ /* Extract our information from the EEPROM data. */
+ vp->info1 = eeprom[13];
+ vp->info2 = eeprom[15];
+ vp->capabilities = eeprom[16];
+
+ if (vp->info1 & 0x8000) {
+ vp->full_duplex = 1;
+ if (print_info)
+ pr_info("Full duplex capable\n");
+ }
+
+ {
+ static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+ unsigned int config;
+ vp->available_media = window_read16(vp, 3, Wn3_Options);
+ if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */
+ vp->available_media = 0x40;
+ config = window_read32(vp, 3, Wn3_Config);
+ if (print_info) {
+ pr_debug(" Internal config register is %4.4x, transceivers %#x.\n",
+ config, window_read16(vp, 3, Wn3_Options));
+ pr_info(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
+ 8 << RAM_SIZE(config),
+ RAM_WIDTH(config) ? "word" : "byte",
+ ram_split[RAM_SPLIT(config)],
+ AUTOSELECT(config) ? "autoselect/" : "",
+ XCVR(config) > XCVR_ExtMII ? "<invalid transceiver>" :
+ media_tbl[XCVR(config)].name);
+ }
+ vp->default_media = XCVR(config);
+ if (vp->default_media == XCVR_NWAY)
+ vp->has_nway = 1;
+ vp->autoselect = AUTOSELECT(config);
+ }
+
+ if (vp->media_override != 7) {
+ pr_info("%s: Media override to transceiver type %d (%s).\n",
+ print_name, vp->media_override,
+ media_tbl[vp->media_override].name);
+ dev->if_port = vp->media_override;
+ } else
+ dev->if_port = vp->default_media;
+
+ if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) ||
+ dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
+ int phy, phy_idx = 0;
+ mii_preamble_required++;
+ if (vp->drv_flags & EXTRA_PREAMBLE)
+ mii_preamble_required++;
+ mdio_sync(vp, 32);
+ mdio_read(dev, 24, MII_BMSR);
+ for (phy = 0; phy < 32 && phy_idx < 1; phy++) {
+ int mii_status, phyx;
+
+ /*
+ * For the 3c905CX we look at index 24 first, because it bogusly
+ * reports an external PHY at all indices
+ */
+ if (phy == 0)
+ phyx = 24;
+ else if (phy <= 24)
+ phyx = phy - 1;
+ else
+ phyx = phy;
+ mii_status = mdio_read(dev, phyx, MII_BMSR);
+ if (mii_status && mii_status != 0xffff) {
+ vp->phys[phy_idx++] = phyx;
+ if (print_info) {
+ pr_info(" MII transceiver found at address %d, status %4x.\n",
+ phyx, mii_status);
+ }
+ if ((mii_status & 0x0040) == 0)
+ mii_preamble_required++;
+ }
+ }
+ mii_preamble_required--;
+ if (phy_idx == 0) {
+ pr_warning(" ***WARNING*** No MII transceivers found!\n");
+ vp->phys[0] = 24;
+ } else {
+ vp->advertising = mdio_read(dev, vp->phys[0], MII_ADVERTISE);
+ if (vp->full_duplex) {
+ /* Only advertise the FD media types. */
+ vp->advertising &= ~0x02A0;
+ mdio_write(dev, vp->phys[0], 4, vp->advertising);
+ }
+ }
+ vp->mii.phy_id = vp->phys[0];
+ }
+
+ if (vp->capabilities & CapBusMaster) {
+ vp->full_bus_master_tx = 1;
+ if (print_info) {
+ pr_info(" Enabling bus-master transmits and %s receives.\n",
+ (vp->info2 & 1) ? "early" : "whole-frame" );
+ }
+ vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
+ vp->bus_master = 0; /* AKPM: vortex only */
+ }
+
+ /* The 3c59x-specific entries in the device structure. */
+ if (vp->full_bus_master_tx) {
+ dev->netdev_ops = &boomrang_netdev_ops;
+ /* Actually, it still should work with iommu. */
+ if (card_idx < MAX_UNITS &&
+ ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) ||
+ hw_checksums[card_idx] == 1)) {
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+ }
+ } else
+ dev->netdev_ops = &vortex_netdev_ops;
+
+ if (print_info) {
+ pr_info("%s: scatter/gather %sabled. h/w checksums %sabled\n",
+ print_name,
+ (dev->features & NETIF_F_SG) ? "en":"dis",
+ (dev->features & NETIF_F_IP_CSUM) ? "en":"dis");
+ }
+
+ dev->ethtool_ops = &vortex_ethtool_ops;
+ dev->watchdog_timeo = (watchdog * HZ) / 1000;
+
+ if (pdev) {
+ vp->pm_state_valid = 1;
+ pci_save_state(VORTEX_PCI(vp));
+ acpi_set_WOL(dev);
+ }
+ retval = register_netdev(dev);
+ if (retval == 0)
+ return 0;
+
+free_ring:
+ pci_free_consistent(pdev,
+ sizeof(struct boom_rx_desc) * RX_RING_SIZE
+ + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+ vp->rx_ring,
+ vp->rx_ring_dma);
+free_region:
+ if (vp->must_free_region)
+ release_region(dev->base_addr, vci->io_size);
+ free_netdev(dev);
+ pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
+out:
+ return retval;
+}
+
+static void
+issue_and_wait(struct net_device *dev, int cmd)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
+ int i;
+
+ iowrite16(cmd, ioaddr + EL3_CMD);
+ for (i = 0; i < 2000; i++) {
+ if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
+ return;
+ }
+
+ /* OK, that didn't work. Do it the slow way. One second */
+ for (i = 0; i < 100000; i++) {
+ if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) {
+ if (vortex_debug > 1)
+ pr_info("%s: command 0x%04x took %d usecs\n",
+ dev->name, cmd, i * 10);
+ return;
+ }
+ udelay(10);
+ }
+ pr_err("%s: command 0x%04x did not complete! Status=0x%x\n",
+ dev->name, cmd, ioread16(ioaddr + EL3_STATUS));
+}
+
+static void
+vortex_set_duplex(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+
+ pr_info("%s: setting %s-duplex.\n",
+ dev->name, (vp->full_duplex) ? "full" : "half");
+
+ /* Set the full-duplex bit. */
+ window_write16(vp,
+ ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
+ (vp->large_frames ? 0x40 : 0) |
+ ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ?
+ 0x100 : 0),
+ 3, Wn3_MAC_Ctrl);
+}
+
+static void vortex_check_media(struct net_device *dev, unsigned int init)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ unsigned int ok_to_print = 0;
+
+ if (vortex_debug > 3)
+ ok_to_print = 1;
+
+ if (mii_check_media(&vp->mii, ok_to_print, init)) {
+ vp->full_duplex = vp->mii.full_duplex;
+ vortex_set_duplex(dev);
+ } else if (init) {
+ vortex_set_duplex(dev);
+ }
+}
+
+static int
+vortex_up(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
+ unsigned int config;
+ int i, mii_reg1, mii_reg5, err = 0;
+
+ if (VORTEX_PCI(vp)) {
+ pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */
+ if (vp->pm_state_valid)
+ pci_restore_state(VORTEX_PCI(vp));
+ err = pci_enable_device(VORTEX_PCI(vp));
+ if (err) {
+ pr_warning("%s: Could not enable device\n",
+ dev->name);
+ goto err_out;
+ }
+ }
+
+ /* Before initializing select the active media port. */
+ config = window_read32(vp, 3, Wn3_Config);
+
+ if (vp->media_override != 7) {
+ pr_info("%s: Media override to transceiver %d (%s).\n",
+ dev->name, vp->media_override,
+ media_tbl[vp->media_override].name);
+ dev->if_port = vp->media_override;
+ } else if (vp->autoselect) {
+ if (vp->has_nway) {
+ if (vortex_debug > 1)
+ pr_info("%s: using NWAY device table, not %d\n",
+ dev->name, dev->if_port);
+ dev->if_port = XCVR_NWAY;
+ } else {
+ /* Find first available media type, starting with 100baseTx. */
+ dev->if_port = XCVR_100baseTx;
+ while (! (vp->available_media & media_tbl[dev->if_port].mask))
+ dev->if_port = media_tbl[dev->if_port].next;
+ if (vortex_debug > 1)
+ pr_info("%s: first available media type: %s\n",
+ dev->name, media_tbl[dev->if_port].name);
+ }
+ } else {
+ dev->if_port = vp->default_media;
+ if (vortex_debug > 1)
+ pr_info("%s: using default media %s\n",
+ dev->name, media_tbl[dev->if_port].name);
+ }
+
+ init_timer(&vp->timer);
+ vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait);
+ vp->timer.data = (unsigned long)dev;
+ vp->timer.function = vortex_timer; /* timer handler */
+ add_timer(&vp->timer);
+
+ init_timer(&vp->rx_oom_timer);
+ vp->rx_oom_timer.data = (unsigned long)dev;
+ vp->rx_oom_timer.function = rx_oom_timer;
+
+ if (vortex_debug > 1)
+ pr_debug("%s: Initial media type %s.\n",
+ dev->name, media_tbl[dev->if_port].name);
+
+ vp->full_duplex = vp->mii.force_media;
+ config = BFINS(config, dev->if_port, 20, 4);
+ if (vortex_debug > 6)
+ pr_debug("vortex_up(): writing 0x%x to InternalConfig\n", config);
+ window_write32(vp, config, 3, Wn3_Config);
+
+ if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
+ mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR);
+ mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
+ vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
+ vp->mii.full_duplex = vp->full_duplex;
+
+ vortex_check_media(dev, 1);
+ }
+ else
+ vortex_set_duplex(dev);
+
+ issue_and_wait(dev, TxReset);
+ /*
+ * Don't reset the PHY - that upsets autonegotiation during DHCP operations.
+ */
+ issue_and_wait(dev, RxReset|0x04);
+
+
+ iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+
+ if (vortex_debug > 1) {
+ pr_debug("%s: vortex_up() irq %d media status %4.4x.\n",
+ dev->name, dev->irq, window_read16(vp, 4, Wn4_Media));
+ }
+
+ /* Set the station address and mask in window 2 each time opened. */
+ for (i = 0; i < 6; i++)
+ window_write8(vp, dev->dev_addr[i], 2, i);
+ for (; i < 12; i+=2)
+ window_write16(vp, 0, 2, i);
+
+ if (vp->cb_fn_base) {
+ unsigned short n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
+ if (vp->drv_flags & INVERT_LED_PWR)
+ n |= 0x10;
+ if (vp->drv_flags & INVERT_MII_PWR)
+ n |= 0x4000;
+ window_write16(vp, n, 2, Wn2_ResetOptions);
+ }
+
+ if (dev->if_port == XCVR_10base2)
+ /* Start the thinnet transceiver. We should really wait 50ms...*/
+ iowrite16(StartCoax, ioaddr + EL3_CMD);
+ if (dev->if_port != XCVR_NWAY) {
+ window_write16(vp,
+ (window_read16(vp, 4, Wn4_Media) &
+ ~(Media_10TP|Media_SQE)) |
+ media_tbl[dev->if_port].media_bits,
+ 4, Wn4_Media);
+ }
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ iowrite16(StatsDisable, ioaddr + EL3_CMD);
+ for (i = 0; i < 10; i++)
+ window_read8(vp, 6, i);
+ window_read16(vp, 6, 10);
+ window_read16(vp, 6, 12);
+ /* New: On the Vortex we must also clear the BadSSD counter. */
+ window_read8(vp, 4, 12);
+ /* ..and on the Boomerang we enable the extra statistics bits. */
+ window_write16(vp, 0x0040, 4, Wn4_NetDiag);
+
+ if (vp->full_bus_master_rx) { /* Boomerang bus master. */
+ vp->cur_rx = vp->dirty_rx = 0;
+ /* Initialize the RxEarly register as recommended. */
+ iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ iowrite32(0x0020, ioaddr + PktStatus);
+ iowrite32(vp->rx_ring_dma, ioaddr + UpListPtr);
+ }
+ if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
+ vp->cur_tx = vp->dirty_tx = 0;
+ if (vp->drv_flags & IS_BOOMERANG)
+ iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */
+ /* Clear the Rx, Tx rings. */
+ for (i = 0; i < RX_RING_SIZE; i++) /* AKPM: this is done in vortex_open, too */
+ vp->rx_ring[i].status = 0;
+ for (i = 0; i < TX_RING_SIZE; i++)
+ vp->tx_skbuff[i] = NULL;
+ iowrite32(0, ioaddr + DownListPtr);
+ }
+ /* Set receiver mode: presumably accept b-case and phys addr only. */
+ set_rx_mode(dev);
+ /* enable 802.1q tagged frames */
+ set_8021q_mode(dev, 1);
+ iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+
+ iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
+ (vp->full_bus_master_tx ? DownComplete : TxAvailable) |
+ (vp->full_bus_master_rx ? UpComplete : RxComplete) |
+ (vp->bus_master ? DMADone : 0);
+ vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable |
+ (vp->full_bus_master_rx ? 0 : RxComplete) |
+ StatsFull | HostError | TxComplete | IntReq
+ | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
+ iowrite16(vp->status_enable, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ iowrite16(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
+ if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
+ iowrite32(0x8000, vp->cb_fn_base + 4);
+ netif_start_queue (dev);
+err_out:
+ return err;
+}
+
+static int
+vortex_open(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ int i;
+ int retval;
+
+ /* Use the now-standard shared IRQ implementation. */
+ if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
+ boomerang_interrupt : vortex_interrupt, IRQF_SHARED, dev->name, dev))) {
+ pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
+ goto err;
+ }
+
+ if (vp->full_bus_master_rx) { /* Boomerang bus master. */
+ if (vortex_debug > 2)
+ pr_debug("%s: Filling in the Rx ring.\n", dev->name);
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1));
+ vp->rx_ring[i].status = 0; /* Clear complete bit. */
+ vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
+
+ skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN,
+ GFP_KERNEL);
+ vp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break; /* Bad news! */
+
+ skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
+ vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
+ }
+ if (i != RX_RING_SIZE) {
+ int j;
+ pr_emerg("%s: no memory for rx ring\n", dev->name);
+ for (j = 0; j < i; j++) {
+ if (vp->rx_skbuff[j]) {
+ dev_kfree_skb(vp->rx_skbuff[j]);
+ vp->rx_skbuff[j] = NULL;
+ }
+ }
+ retval = -ENOMEM;
+ goto err_free_irq;
+ }
+ /* Wrap the ring. */
+ vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
+ }
+
+ retval = vortex_up(dev);
+ if (!retval)
+ goto out;
+
+err_free_irq:
+ free_irq(dev->irq, dev);
+err:
+ if (vortex_debug > 1)
+ pr_err("%s: vortex_open() fails: returning %d\n", dev->name, retval);
+out:
+ return retval;
+}
+
+static void
+vortex_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
+ int next_tick = 60*HZ;
+ int ok = 0;
+ int media_status;
+
+ if (vortex_debug > 2) {
+ pr_debug("%s: Media selection timer tick happened, %s.\n",
+ dev->name, media_tbl[dev->if_port].name);
+ pr_debug("dev->watchdog_timeo=%d\n", dev->watchdog_timeo);
+ }
+
+ media_status = window_read16(vp, 4, Wn4_Media);
+ switch (dev->if_port) {
+ case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx:
+ if (media_status & Media_LnkBeat) {
+ netif_carrier_on(dev);
+ ok = 1;
+ if (vortex_debug > 1)
+ pr_debug("%s: Media %s has link beat, %x.\n",
+ dev->name, media_tbl[dev->if_port].name, media_status);
+ } else {
+ netif_carrier_off(dev);
+ if (vortex_debug > 1) {
+ pr_debug("%s: Media %s has no link beat, %x.\n",
+ dev->name, media_tbl[dev->if_port].name, media_status);
+ }
+ }
+ break;
+ case XCVR_MII: case XCVR_NWAY:
+ {
+ ok = 1;
+ vortex_check_media(dev, 0);
+ }
+ break;
+ default: /* Other media types handled by Tx timeouts. */
+ if (vortex_debug > 1)
+ pr_debug("%s: Media %s has no indication, %x.\n",
+ dev->name, media_tbl[dev->if_port].name, media_status);
+ ok = 1;
+ }
+
+ if (!netif_carrier_ok(dev))
+ next_tick = 5*HZ;
+
+ if (vp->medialock)
+ goto leave_media_alone;
+
+ if (!ok) {
+ unsigned int config;
+
+ spin_lock_irq(&vp->lock);
+
+ do {
+ dev->if_port = media_tbl[dev->if_port].next;
+ } while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
+ if (dev->if_port == XCVR_Default) { /* Go back to default. */
+ dev->if_port = vp->default_media;
+ if (vortex_debug > 1)
+ pr_debug("%s: Media selection failing, using default %s port.\n",
+ dev->name, media_tbl[dev->if_port].name);
+ } else {
+ if (vortex_debug > 1)
+ pr_debug("%s: Media selection failed, now trying %s port.\n",
+ dev->name, media_tbl[dev->if_port].name);
+ next_tick = media_tbl[dev->if_port].wait;
+ }
+ window_write16(vp,
+ (media_status & ~(Media_10TP|Media_SQE)) |
+ media_tbl[dev->if_port].media_bits,
+ 4, Wn4_Media);
+
+ config = window_read32(vp, 3, Wn3_Config);
+ config = BFINS(config, dev->if_port, 20, 4);
+ window_write32(vp, config, 3, Wn3_Config);
+
+ iowrite16(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax,
+ ioaddr + EL3_CMD);
+ if (vortex_debug > 1)
+ pr_debug("wrote 0x%08x to Wn3_Config\n", config);
+ /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */
+
+ spin_unlock_irq(&vp->lock);
+ }
+
+leave_media_alone:
+ if (vortex_debug > 2)
+ pr_debug("%s: Media selection timer finished, %s.\n",
+ dev->name, media_tbl[dev->if_port].name);
+
+ mod_timer(&vp->timer, RUN_AT(next_tick));
+ if (vp->deferred)
+ iowrite16(FakeIntr, ioaddr + EL3_CMD);
+}
+
+static void vortex_tx_timeout(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
+
+ pr_err("%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
+ dev->name, ioread8(ioaddr + TxStatus),
+ ioread16(ioaddr + EL3_STATUS));
+ pr_err(" diagnostics: net %04x media %04x dma %08x fifo %04x\n",
+ window_read16(vp, 4, Wn4_NetDiag),
+ window_read16(vp, 4, Wn4_Media),
+ ioread32(ioaddr + PktStatus),
+ window_read16(vp, 4, Wn4_FIFODiag));
+ /* Slight code bloat to be user friendly. */
+ if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88)
+ pr_err("%s: Transmitter encountered 16 collisions --"
+ " network cable problem?\n", dev->name);
+ if (ioread16(ioaddr + EL3_STATUS) & IntLatch) {
+ pr_err("%s: Interrupt posted but not delivered --"
+ " IRQ blocked by another device?\n", dev->name);
+ /* Bad idea here.. but we might as well handle a few events. */
+ {
+ /*
+ * Block interrupts because vortex_interrupt does a bare spin_lock()
+ */
+ unsigned long flags;
+ local_irq_save(flags);
+ if (vp->full_bus_master_tx)
+ boomerang_interrupt(dev->irq, dev);
+ else
+ vortex_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+ }
+ }
+
+ if (vortex_debug > 0)
+ dump_tx_ring(dev);
+
+ issue_and_wait(dev, TxReset);
+
+ dev->stats.tx_errors++;
+ if (vp->full_bus_master_tx) {
+ pr_debug("%s: Resetting the Tx ring pointer.\n", dev->name);
+ if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0)
+ iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
+ ioaddr + DownListPtr);
+ if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE)
+ netif_wake_queue (dev);
+ if (vp->drv_flags & IS_BOOMERANG)
+ iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
+ iowrite16(DownUnstall, ioaddr + EL3_CMD);
+ } else {
+ dev->stats.tx_dropped++;
+ netif_wake_queue(dev);
+ }
+
+ /* Issue Tx Enable */
+ iowrite16(TxEnable, ioaddr + EL3_CMD);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+}
+
+/*
+ * Handle uncommon interrupt sources. This is a separate routine to minimize
+ * the cache impact.
+ */
+static void
+vortex_error(struct net_device *dev, int status)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
+ int do_tx_reset = 0, reset_mask = 0;
+ unsigned char tx_status = 0;
+
+ if (vortex_debug > 2) {
+ pr_err("%s: vortex_error(), status=0x%x\n", dev->name, status);
+ }
+
+ if (status & TxComplete) { /* Really "TxError" for us. */
+ tx_status = ioread8(ioaddr + TxStatus);
+ /* Presumably a tx-timeout. We must merely re-enable. */
+ if (vortex_debug > 2 ||
+ (tx_status != 0x88 && vortex_debug > 0)) {
+ pr_err("%s: Transmit error, Tx status register %2.2x.\n",
+ dev->name, tx_status);
+ if (tx_status == 0x82) {
+ pr_err("Probably a duplex mismatch. See "
+ "Documentation/networking/vortex.txt\n");
+ }
+ dump_tx_ring(dev);
+ }
+ if (tx_status & 0x14) dev->stats.tx_fifo_errors++;
+ if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
+ if (tx_status & 0x08) vp->xstats.tx_max_collisions++;
+ iowrite8(0, ioaddr + TxStatus);
+ if (tx_status & 0x30) { /* txJabber or txUnderrun */
+ do_tx_reset = 1;
+ } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */
+ do_tx_reset = 1;
+ reset_mask = 0x0108; /* Reset interface logic, but not download logic */
+ } else { /* Merely re-enable the transmitter. */
+ iowrite16(TxEnable, ioaddr + EL3_CMD);
+ }
+ }
+
+ if (status & RxEarly) /* Rx early is unused. */
+ iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD);
+
+ if (status & StatsFull) { /* Empty statistics. */
+ static int DoneDidThat;
+ if (vortex_debug > 4)
+ pr_debug("%s: Updating stats.\n", dev->name);
+ update_stats(ioaddr, dev);
+ /* HACK: Disable statistics as an interrupt source. */
+ /* This occurs when we have the wrong media type! */
+ if (DoneDidThat == 0 &&
+ ioread16(ioaddr + EL3_STATUS) & StatsFull) {
+ pr_warning("%s: Updating statistics failed, disabling "
+ "stats as an interrupt source.\n", dev->name);
+ iowrite16(SetIntrEnb |
+ (window_read16(vp, 5, 10) & ~StatsFull),
+ ioaddr + EL3_CMD);
+ vp->intr_enable &= ~StatsFull;
+ DoneDidThat++;
+ }
+ }
+ if (status & IntReq) { /* Restore all interrupt sources. */
+ iowrite16(vp->status_enable, ioaddr + EL3_CMD);
+ iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
+ }
+ if (status & HostError) {
+ u16 fifo_diag;
+ fifo_diag = window_read16(vp, 4, Wn4_FIFODiag);
+ pr_err("%s: Host error, FIFO diagnostic register %4.4x.\n",
+ dev->name, fifo_diag);
+ /* Adapter failure requires Tx/Rx reset and reinit. */
+ if (vp->full_bus_master_tx) {
+ int bus_status = ioread32(ioaddr + PktStatus);
+ /* 0x80000000 PCI master abort. */
+ /* 0x40000000 PCI target abort. */
+ if (vortex_debug)
+ pr_err("%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status);
+
+ /* In this case, blow the card away */
+ /* Must not enter D3 or we can't legally issue the reset! */
+ vortex_down(dev, 0);
+ issue_and_wait(dev, TotalReset | 0xff);
+ vortex_up(dev); /* AKPM: bug. vortex_up() assumes that the rx ring is full. It may not be. */
+ } else if (fifo_diag & 0x0400)
+ do_tx_reset = 1;
+ if (fifo_diag & 0x3000) {
+ /* Reset Rx fifo and upload logic */
+ issue_and_wait(dev, RxReset|0x07);
+ /* Set the Rx filter to the current state. */
+ set_rx_mode(dev);
+ /* enable 802.1q VLAN tagged frames */
+ set_8021q_mode(dev, 1);
+ iowrite16(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
+ iowrite16(AckIntr | HostError, ioaddr + EL3_CMD);
+ }
+ }
+
+ if (do_tx_reset) {
+ issue_and_wait(dev, TxReset|reset_mask);
+ iowrite16(TxEnable, ioaddr + EL3_CMD);
+ if (!vp->full_bus_master_tx)
+ netif_wake_queue(dev);
+ }
+}
+
+static netdev_tx_t
+vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
+
+ /* Put out the doubleword header... */
+ iowrite32(skb->len, ioaddr + TX_FIFO);
+ if (vp->bus_master) {
+ /* Set the bus-master controller to transfer the packet. */
+ int len = (skb->len + 3) & ~3;
+ vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
+ PCI_DMA_TODEVICE);
+ spin_lock_irq(&vp->window_lock);
+ window_set(vp, 7);
+ iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
+ iowrite16(len, ioaddr + Wn7_MasterLen);
+ spin_unlock_irq(&vp->window_lock);
+ vp->tx_skb = skb;
+ iowrite16(StartDMADown, ioaddr + EL3_CMD);
+ /* netif_wake_queue() will be called at the DMADone interrupt. */
+ } else {
+ /* ... and the packet rounded to a doubleword. */
+ iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+ dev_kfree_skb (skb);
+ if (ioread16(ioaddr + TxFree) > 1536) {
+ netif_start_queue (dev); /* AKPM: redundant? */
+ } else {
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ netif_stop_queue(dev);
+ iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ }
+ }
+
+
+ /* Clear the Tx status stack. */
+ {
+ int tx_status;
+ int i = 32;
+
+ while (--i > 0 && (tx_status = ioread8(ioaddr + TxStatus)) > 0) {
+ if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
+ if (vortex_debug > 2)
+ pr_debug("%s: Tx error, status %2.2x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x04) dev->stats.tx_fifo_errors++;
+ if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
+ if (tx_status & 0x30) {
+ issue_and_wait(dev, TxReset);
+ }
+ iowrite16(TxEnable, ioaddr + EL3_CMD);
+ }
+ iowrite8(0x00, ioaddr + TxStatus); /* Pop the status stack. */
+ }
+ }
+ return NETDEV_TX_OK;
+}
+
+static netdev_tx_t
+boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
+ /* Calculate the next Tx descriptor entry. */
+ int entry = vp->cur_tx % TX_RING_SIZE;
+ struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
+ unsigned long flags;
+
+ if (vortex_debug > 6) {
+ pr_debug("boomerang_start_xmit()\n");
+ pr_debug("%s: Trying to send a packet, Tx index %d.\n",
+ dev->name, vp->cur_tx);
+ }
+
+ /*
+ * We can't allow a recursion from our interrupt handler back into the
+ * tx routine, as they take the same spin lock, and that causes
+ * deadlock. Just return NETDEV_TX_BUSY and let the stack try again in
+ * a bit
+ */
+ if (vp->handling_irq)
+ return NETDEV_TX_BUSY;
+
+ if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
+ if (vortex_debug > 0)
+ pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n",
+ dev->name);
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ vp->tx_skbuff[entry] = skb;
+
+ vp->tx_ring[entry].next = 0;
+#if DO_ZEROCOPY
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
+ else
+ vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
+
+ if (!skb_shinfo(skb)->nr_frags) {
+ vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
+ skb->len, PCI_DMA_TODEVICE));
+ vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
+ } else {
+ int i;
+
+ vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
+ skb_headlen(skb), PCI_DMA_TODEVICE));
+ vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ vp->tx_ring[entry].frag[i+1].addr =
+ cpu_to_le32(pci_map_single(
+ VORTEX_PCI(vp),
+ (void *)skb_frag_address(frag),
+ frag->size, PCI_DMA_TODEVICE));
+
+ if (i == skb_shinfo(skb)->nr_frags-1)
+ vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG);
+ else
+ vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size);
+ }
+ }
+#else
+ vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
+ vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
+ vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
+#endif
+
+ spin_lock_irqsave(&vp->lock, flags);
+ /* Wait for the stall to complete. */
+ issue_and_wait(dev, DownStall);
+ prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
+ if (ioread32(ioaddr + DownListPtr) == 0) {
+ iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
+ vp->queued_packet++;
+ }
+
+ vp->cur_tx++;
+ if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
+ netif_stop_queue (dev);
+ } else { /* Clear previous interrupt enable. */
+#if defined(tx_interrupt_mitigation)
+ /* Dubious. If in boomeang_interrupt "faster" cyclone ifdef
+ * were selected, this would corrupt DN_COMPLETE. No?
+ */
+ prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
+#endif
+ }
+ iowrite16(DownUnstall, ioaddr + EL3_CMD);
+ spin_unlock_irqrestore(&vp->lock, flags);
+ return NETDEV_TX_OK;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+
+/*
+ * This is the ISR for the vortex series chips.
+ * full_bus_master_tx == 0 && full_bus_master_rx == 0
+ */
+
+static irqreturn_t
+vortex_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr;
+ int status;
+ int work_done = max_interrupt_work;
+ int handled = 0;
+
+ ioaddr = vp->ioaddr;
+ spin_lock(&vp->lock);
+
+ status = ioread16(ioaddr + EL3_STATUS);
+
+ if (vortex_debug > 6)
+ pr_debug("vortex_interrupt(). status=0x%4x\n", status);
+
+ if ((status & IntLatch) == 0)
+ goto handler_exit; /* No interrupt: shared IRQs cause this */
+ handled = 1;
+
+ if (status & IntReq) {
+ status |= vp->deferred;
+ vp->deferred = 0;
+ }
+
+ if (status == 0xffff) /* h/w no longer present (hotplug)? */
+ goto handler_exit;
+
+ if (vortex_debug > 4)
+ pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n",
+ dev->name, status, ioread8(ioaddr + Timer));
+
+ spin_lock(&vp->window_lock);
+ window_set(vp, 7);
+
+ do {
+ if (vortex_debug > 5)
+ pr_debug("%s: In interrupt loop, status %4.4x.\n",
+ dev->name, status);
+ if (status & RxComplete)
+ vortex_rx(dev);
+
+ if (status & TxAvailable) {
+ if (vortex_debug > 5)
+ pr_debug(" TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ iowrite16(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue (dev);
+ }
+
+ if (status & DMADone) {
+ if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
+ iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
+ pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
+ if (ioread16(ioaddr + TxFree) > 1536) {
+ /*
+ * AKPM: FIXME: I don't think we need this. If the queue was stopped due to
+ * insufficient FIFO room, the TxAvailable test will succeed and call
+ * netif_wake_queue()
+ */
+ netif_wake_queue(dev);
+ } else { /* Interrupt when FIFO has room for max-sized packet. */
+ iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ netif_stop_queue(dev);
+ }
+ }
+ }
+ /* Check for all uncommon interrupts at once. */
+ if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
+ if (status == 0xffff)
+ break;
+ if (status & RxEarly)
+ vortex_rx(dev);
+ spin_unlock(&vp->window_lock);
+ vortex_error(dev, status);
+ spin_lock(&vp->window_lock);
+ window_set(vp, 7);
+ }
+
+ if (--work_done < 0) {
+ pr_warning("%s: Too much work in interrupt, status %4.4x.\n",
+ dev->name, status);
+ /* Disable all pending interrupts. */
+ do {
+ vp->deferred |= status;
+ iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
+ ioaddr + EL3_CMD);
+ iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
+ } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
+ /* The timer will reenable interrupts. */
+ mod_timer(&vp->timer, jiffies + 1*HZ);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+ } while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
+
+ spin_unlock(&vp->window_lock);
+
+ if (vortex_debug > 4)
+ pr_debug("%s: exiting interrupt, status %4.4x.\n",
+ dev->name, status);
+handler_exit:
+ spin_unlock(&vp->lock);
+ return IRQ_RETVAL(handled);
+}
+
+/*
+ * This is the ISR for the boomerang series chips.
+ * full_bus_master_tx == 1 && full_bus_master_rx == 1
+ */
+
+static irqreturn_t
+boomerang_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr;
+ int status;
+ int work_done = max_interrupt_work;
+
+ ioaddr = vp->ioaddr;
+
+
+ /*
+ * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
+ * and boomerang_start_xmit
+ */
+ spin_lock(&vp->lock);
+ vp->handling_irq = 1;
+
+ status = ioread16(ioaddr + EL3_STATUS);
+
+ if (vortex_debug > 6)
+ pr_debug("boomerang_interrupt. status=0x%4x\n", status);
+
+ if ((status & IntLatch) == 0)
+ goto handler_exit; /* No interrupt: shared IRQs can cause this */
+
+ if (status == 0xffff) { /* h/w no longer present (hotplug)? */
+ if (vortex_debug > 1)
+ pr_debug("boomerang_interrupt(1): status = 0xffff\n");
+ goto handler_exit;
+ }
+
+ if (status & IntReq) {
+ status |= vp->deferred;
+ vp->deferred = 0;
+ }
+
+ if (vortex_debug > 4)
+ pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n",
+ dev->name, status, ioread8(ioaddr + Timer));
+ do {
+ if (vortex_debug > 5)
+ pr_debug("%s: In interrupt loop, status %4.4x.\n",
+ dev->name, status);
+ if (status & UpComplete) {
+ iowrite16(AckIntr | UpComplete, ioaddr + EL3_CMD);
+ if (vortex_debug > 5)
+ pr_debug("boomerang_interrupt->boomerang_rx\n");
+ boomerang_rx(dev);
+ }
+
+ if (status & DownComplete) {
+ unsigned int dirty_tx = vp->dirty_tx;
+
+ iowrite16(AckIntr | DownComplete, ioaddr + EL3_CMD);
+ while (vp->cur_tx - dirty_tx > 0) {
+ int entry = dirty_tx % TX_RING_SIZE;
+#if 1 /* AKPM: the latter is faster, but cyclone-only */
+ if (ioread32(ioaddr + DownListPtr) ==
+ vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc))
+ break; /* It still hasn't been processed. */
+#else
+ if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0)
+ break; /* It still hasn't been processed. */
+#endif
+
+ if (vp->tx_skbuff[entry]) {
+ struct sk_buff *skb = vp->tx_skbuff[entry];
+#if DO_ZEROCOPY
+ int i;
+ for (i=0; i<=skb_shinfo(skb)->nr_frags; i++)
+ pci_unmap_single(VORTEX_PCI(vp),
+ le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
+ le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
+ PCI_DMA_TODEVICE);
+#else
+ pci_unmap_single(VORTEX_PCI(vp),
+ le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
+#endif
+ dev_kfree_skb_irq(skb);
+ vp->tx_skbuff[entry] = NULL;
+ } else {
+ pr_debug("boomerang_interrupt: no skb!\n");
+ }
+ /* dev->stats.tx_packets++; Counted below. */
+ dirty_tx++;
+ }
+ vp->dirty_tx = dirty_tx;
+ if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) {
+ if (vortex_debug > 6)
+ pr_debug("boomerang_interrupt: wake queue\n");
+ netif_wake_queue (dev);
+ }
+ }
+
+ /* Check for all uncommon interrupts at once. */
+ if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq))
+ vortex_error(dev, status);
+
+ if (--work_done < 0) {
+ pr_warning("%s: Too much work in interrupt, status %4.4x.\n",
+ dev->name, status);
+ /* Disable all pending interrupts. */
+ do {
+ vp->deferred |= status;
+ iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
+ ioaddr + EL3_CMD);
+ iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
+ } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
+ /* The timer will reenable interrupts. */
+ mod_timer(&vp->timer, jiffies + 1*HZ);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+ if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
+ iowrite32(0x8000, vp->cb_fn_base + 4);
+
+ } while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch);
+
+ if (vortex_debug > 4)
+ pr_debug("%s: exiting interrupt, status %4.4x.\n",
+ dev->name, status);
+handler_exit:
+ vp->handling_irq = 0;
+ spin_unlock(&vp->lock);
+ return IRQ_HANDLED;
+}
+
+static int vortex_rx(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
+ int i;
+ short rx_status;
+
+ if (vortex_debug > 5)
+ pr_debug("vortex_rx(): status %4.4x, rx_status %4.4x.\n",
+ ioread16(ioaddr+EL3_STATUS), ioread16(ioaddr+RxStatus));
+ while ((rx_status = ioread16(ioaddr + RxStatus)) > 0) {
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ unsigned char rx_error = ioread8(ioaddr + RxErrors);
+ if (vortex_debug > 2)
+ pr_debug(" Rx error: status %2.2x.\n", rx_error);
+ dev->stats.rx_errors++;
+ if (rx_error & 0x01) dev->stats.rx_over_errors++;
+ if (rx_error & 0x02) dev->stats.rx_length_errors++;
+ if (rx_error & 0x04) dev->stats.rx_frame_errors++;
+ if (rx_error & 0x08) dev->stats.rx_crc_errors++;
+ if (rx_error & 0x10) dev->stats.rx_length_errors++;
+ } else {
+ /* The packet length: up to 4.5K!. */
+ int pkt_len = rx_status & 0x1fff;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len + 5);
+ if (vortex_debug > 4)
+ pr_debug("Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ if (vp->bus_master &&
+ ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
+ dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len),
+ pkt_len, PCI_DMA_FROMDEVICE);
+ iowrite32(dma, ioaddr + Wn7_MasterAddr);
+ iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
+ iowrite16(StartDMAUp, ioaddr + EL3_CMD);
+ while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
+ ;
+ pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE);
+ } else {
+ ioread32_rep(ioaddr + RX_FIFO,
+ skb_put(skb, pkt_len),
+ (pkt_len + 3) >> 2);
+ }
+ iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ /* Wait a limited time to go to next packet. */
+ for (i = 200; i >= 0; i--)
+ if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ continue;
+ } else if (vortex_debug > 0)
+ pr_notice("%s: No memory to allocate a sk_buff of size %d.\n",
+ dev->name, pkt_len);
+ dev->stats.rx_dropped++;
+ }
+ issue_and_wait(dev, RxDiscard);
+ }
+
+ return 0;
+}
+
+static int
+boomerang_rx(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ int entry = vp->cur_rx % RX_RING_SIZE;
+ void __iomem *ioaddr = vp->ioaddr;
+ int rx_status;
+ int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
+
+ if (vortex_debug > 5)
+ pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
+
+ while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
+ if (--rx_work_limit < 0)
+ break;
+ if (rx_status & RxDError) { /* Error, update stats. */
+ unsigned char rx_error = rx_status >> 16;
+ if (vortex_debug > 2)
+ pr_debug(" Rx error: status %2.2x.\n", rx_error);
+ dev->stats.rx_errors++;
+ if (rx_error & 0x01) dev->stats.rx_over_errors++;
+ if (rx_error & 0x02) dev->stats.rx_length_errors++;
+ if (rx_error & 0x04) dev->stats.rx_frame_errors++;
+ if (rx_error & 0x08) dev->stats.rx_crc_errors++;
+ if (rx_error & 0x10) dev->stats.rx_length_errors++;
+ } else {
+ /* The packet length: up to 4.5K!. */
+ int pkt_len = rx_status & 0x1fff;
+ struct sk_buff *skb;
+ dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
+
+ if (vortex_debug > 4)
+ pr_debug("Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+
+ /* Check if the packet is long enough to just accept without
+ copying to a properly sized skbuff. */
+ if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ memcpy(skb_put(skb, pkt_len),
+ vp->rx_skbuff[entry]->data,
+ pkt_len);
+ pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ vp->rx_copy++;
+ } else {
+ /* Pass up the skbuff already on the Rx ring. */
+ skb = vp->rx_skbuff[entry];
+ vp->rx_skbuff[entry] = NULL;
+ skb_put(skb, pkt_len);
+ pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ vp->rx_nocopy++;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ { /* Use hardware checksum info. */
+ int csum_bits = rx_status & 0xee000000;
+ if (csum_bits &&
+ (csum_bits == (IPChksumValid | TCPChksumValid) ||
+ csum_bits == (IPChksumValid | UDPChksumValid))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ vp->rx_csumhits++;
+ }
+ }
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ }
+ entry = (++vp->cur_rx) % RX_RING_SIZE;
+ }
+ /* Refill the Rx ring buffers. */
+ for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = vp->dirty_rx % RX_RING_SIZE;
+ if (vp->rx_skbuff[entry] == NULL) {
+ skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
+ if (skb == NULL) {
+ static unsigned long last_jif;
+ if (time_after(jiffies, last_jif + 10 * HZ)) {
+ pr_warning("%s: memory shortage\n", dev->name);
+ last_jif = jiffies;
+ }
+ if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
+ mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
+ break; /* Bad news! */
+ }
+
+ vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
+ vp->rx_skbuff[entry] = skb;
+ }
+ vp->rx_ring[entry].status = 0; /* Clear complete bit. */
+ iowrite16(UpUnstall, ioaddr + EL3_CMD);
+ }
+ return 0;
+}
+
+/*
+ * If we've hit a total OOM refilling the Rx ring we poll once a second
+ * for some memory. Otherwise there is no way to restart the rx process.
+ */
+static void
+rx_oom_timer(unsigned long arg)
+{
+ struct net_device *dev = (struct net_device *)arg;
+ struct vortex_private *vp = netdev_priv(dev);
+
+ spin_lock_irq(&vp->lock);
+ if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
+ boomerang_rx(dev);
+ if (vortex_debug > 1) {
+ pr_debug("%s: rx_oom_timer %s\n", dev->name,
+ ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
+ }
+ spin_unlock_irq(&vp->lock);
+}
+
+static void
+vortex_down(struct net_device *dev, int final_down)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
+
+ netif_stop_queue (dev);
+
+ del_timer_sync(&vp->rx_oom_timer);
+ del_timer_sync(&vp->timer);
+
+ /* Turn off statistics ASAP. We update dev->stats below. */
+ iowrite16(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ iowrite16(RxDisable, ioaddr + EL3_CMD);
+ iowrite16(TxDisable, ioaddr + EL3_CMD);
+
+ /* Disable receiving 802.1q tagged frames */
+ set_8021q_mode(dev, 0);
+
+ if (dev->if_port == XCVR_10base2)
+ /* Turn off thinnet power. Green! */
+ iowrite16(StopCoax, ioaddr + EL3_CMD);
+
+ iowrite16(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
+
+ update_stats(ioaddr, dev);
+ if (vp->full_bus_master_rx)
+ iowrite32(0, ioaddr + UpListPtr);
+ if (vp->full_bus_master_tx)
+ iowrite32(0, ioaddr + DownListPtr);
+
+ if (final_down && VORTEX_PCI(vp)) {
+ vp->pm_state_valid = 1;
+ pci_save_state(VORTEX_PCI(vp));
+ acpi_set_WOL(dev);
+ }
+}
+
+static int
+vortex_close(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
+ int i;
+
+ if (netif_device_present(dev))
+ vortex_down(dev, 1);
+
+ if (vortex_debug > 1) {
+ pr_debug("%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
+ dev->name, ioread16(ioaddr + EL3_STATUS), ioread8(ioaddr + TxStatus));
+ pr_debug("%s: vortex close stats: rx_nocopy %d rx_copy %d"
+ " tx_queued %d Rx pre-checksummed %d.\n",
+ dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
+ }
+
+#if DO_ZEROCOPY
+ if (vp->rx_csumhits &&
+ (vp->drv_flags & HAS_HWCKSM) == 0 &&
+ (vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) {
+ pr_warning("%s supports hardware checksums, and we're not using them!\n", dev->name);
+ }
+#endif
+
+ free_irq(dev->irq, dev);
+
+ if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
+ for (i = 0; i < RX_RING_SIZE; i++)
+ if (vp->rx_skbuff[i]) {
+ pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr),
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(vp->rx_skbuff[i]);
+ vp->rx_skbuff[i] = NULL;
+ }
+ }
+ if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (vp->tx_skbuff[i]) {
+ struct sk_buff *skb = vp->tx_skbuff[i];
+#if DO_ZEROCOPY
+ int k;
+
+ for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
+ pci_unmap_single(VORTEX_PCI(vp),
+ le32_to_cpu(vp->tx_ring[i].frag[k].addr),
+ le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
+ PCI_DMA_TODEVICE);
+#else
+ pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
+#endif
+ dev_kfree_skb(skb);
+ vp->tx_skbuff[i] = NULL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void
+dump_tx_ring(struct net_device *dev)
+{
+ if (vortex_debug > 0) {
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
+
+ if (vp->full_bus_master_tx) {
+ int i;
+ int stalled = ioread32(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */
+
+ pr_err(" Flags; bus-master %d, dirty %d(%d) current %d(%d)\n",
+ vp->full_bus_master_tx,
+ vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
+ vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
+ pr_err(" Transmit list %8.8x vs. %p.\n",
+ ioread32(ioaddr + DownListPtr),
+ &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
+ issue_and_wait(dev, DownStall);
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ unsigned int length;
+
+#if DO_ZEROCOPY
+ length = le32_to_cpu(vp->tx_ring[i].frag[0].length);
+#else
+ length = le32_to_cpu(vp->tx_ring[i].length);
+#endif
+ pr_err(" %d: @%p length %8.8x status %8.8x\n",
+ i, &vp->tx_ring[i], length,
+ le32_to_cpu(vp->tx_ring[i].status));
+ }
+ if (!stalled)
+ iowrite16(DownUnstall, ioaddr + EL3_CMD);
+ }
+ }
+}
+
+static struct net_device_stats *vortex_get_stats(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
+ unsigned long flags;
+
+ if (netif_device_present(dev)) { /* AKPM: Used to be netif_running */
+ spin_lock_irqsave (&vp->lock, flags);
+ update_stats(ioaddr, dev);
+ spin_unlock_irqrestore (&vp->lock, flags);
+ }
+ return &dev->stats;
+}
+
+/* Update statistics.
+ Unlike with the EL3 we need not worry about interrupts changing
+ the window setting from underneath us, but we must still guard
+ against a race condition with a StatsUpdate interrupt updating the
+ table. This is done by checking that the ASM (!) code generated uses
+ atomic updates with '+='.
+ */
+static void update_stats(void __iomem *ioaddr, struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+
+ /* Unlike the 3c5x9 we need not turn off stats updates while reading. */
+ /* Switch to the stats window, and read everything. */
+ dev->stats.tx_carrier_errors += window_read8(vp, 6, 0);
+ dev->stats.tx_heartbeat_errors += window_read8(vp, 6, 1);
+ dev->stats.tx_window_errors += window_read8(vp, 6, 4);
+ dev->stats.rx_fifo_errors += window_read8(vp, 6, 5);
+ dev->stats.tx_packets += window_read8(vp, 6, 6);
+ dev->stats.tx_packets += (window_read8(vp, 6, 9) &
+ 0x30) << 4;
+ /* Rx packets */ window_read8(vp, 6, 7); /* Must read to clear */
+ /* Don't bother with register 9, an extension of registers 6&7.
+ If we do use the 6&7 values the atomic update assumption above
+ is invalid. */
+ dev->stats.rx_bytes += window_read16(vp, 6, 10);
+ dev->stats.tx_bytes += window_read16(vp, 6, 12);
+ /* Extra stats for get_ethtool_stats() */
+ vp->xstats.tx_multiple_collisions += window_read8(vp, 6, 2);
+ vp->xstats.tx_single_collisions += window_read8(vp, 6, 3);
+ vp->xstats.tx_deferred += window_read8(vp, 6, 8);
+ vp->xstats.rx_bad_ssd += window_read8(vp, 4, 12);
+
+ dev->stats.collisions = vp->xstats.tx_multiple_collisions
+ + vp->xstats.tx_single_collisions
+ + vp->xstats.tx_max_collisions;
+
+ {
+ u8 up = window_read8(vp, 4, 13);
+ dev->stats.rx_bytes += (up & 0x0f) << 16;
+ dev->stats.tx_bytes += (up & 0xf0) << 12;
+ }
+}
+
+static int vortex_nway_reset(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+
+ return mii_nway_restart(&vp->mii);
+}
+
+static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+
+ return mii_ethtool_gset(&vp->mii, cmd);
+}
+
+static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+
+ return mii_ethtool_sset(&vp->mii, cmd);
+}
+
+static u32 vortex_get_msglevel(struct net_device *dev)
+{
+ return vortex_debug;
+}
+
+static void vortex_set_msglevel(struct net_device *dev, u32 dbg)
+{
+ vortex_debug = dbg;
+}
+
+static int vortex_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return VORTEX_NUM_STATS;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void vortex_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vp->lock, flags);
+ update_stats(ioaddr, dev);
+ spin_unlock_irqrestore(&vp->lock, flags);
+
+ data[0] = vp->xstats.tx_deferred;
+ data[1] = vp->xstats.tx_max_collisions;
+ data[2] = vp->xstats.tx_multiple_collisions;
+ data[3] = vp->xstats.tx_single_collisions;
+ data[4] = vp->xstats.rx_bad_ssd;
+}
+
+
+static void vortex_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static void vortex_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_NAME);
+ if (VORTEX_PCI(vp)) {
+ strcpy(info->bus_info, pci_name(VORTEX_PCI(vp)));
+ } else {
+ if (VORTEX_EISA(vp))
+ strcpy(info->bus_info, dev_name(vp->gendev));
+ else
+ sprintf(info->bus_info, "EISA 0x%lx %d",
+ dev->base_addr, dev->irq);
+ }
+}
+
+static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+
+ if (!VORTEX_PCI(vp))
+ return;
+
+ wol->supported = WAKE_MAGIC;
+
+ wol->wolopts = 0;
+ if (vp->enable_wol)
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+
+ if (!VORTEX_PCI(vp))
+ return -EOPNOTSUPP;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ vp->enable_wol = 1;
+ else
+ vp->enable_wol = 0;
+ acpi_set_WOL(dev);
+
+ return 0;
+}
+
+static const struct ethtool_ops vortex_ethtool_ops = {
+ .get_drvinfo = vortex_get_drvinfo,
+ .get_strings = vortex_get_strings,
+ .get_msglevel = vortex_get_msglevel,
+ .set_msglevel = vortex_set_msglevel,
+ .get_ethtool_stats = vortex_get_ethtool_stats,
+ .get_sset_count = vortex_get_sset_count,
+ .get_settings = vortex_get_settings,
+ .set_settings = vortex_set_settings,
+ .get_link = ethtool_op_get_link,
+ .nway_reset = vortex_nway_reset,
+ .get_wol = vortex_get_wol,
+ .set_wol = vortex_set_wol,
+};
+
+#ifdef CONFIG_PCI
+/*
+ * Must power the device up to do MDIO operations
+ */
+static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ int err;
+ struct vortex_private *vp = netdev_priv(dev);
+ pci_power_t state = 0;
+
+ if(VORTEX_PCI(vp))
+ state = VORTEX_PCI(vp)->current_state;
+
+ /* The kernel core really should have pci_get_power_state() */
+
+ if(state != 0)
+ pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
+ err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
+ if(state != 0)
+ pci_set_power_state(VORTEX_PCI(vp), state);
+
+ return err;
+}
+#endif
+
+
+/* Pre-Cyclone chips have no documented multicast filter, so the only
+ multicast setting is to receive all multicast frames. At least
+ the chip has a very clean way to set the mode, unlike many others. */
+static void set_rx_mode(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
+ int new_mode;
+
+ if (dev->flags & IFF_PROMISC) {
+ if (vortex_debug > 3)
+ pr_notice("%s: Setting promiscuous mode.\n", dev->name);
+ new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
+ } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
+ new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
+ } else
+ new_mode = SetRxFilter | RxStation | RxBroadcast;
+
+ iowrite16(new_mode, ioaddr + EL3_CMD);
+}
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+/* Setup the card so that it can receive frames with an 802.1q VLAN tag.
+ Note that this must be done after each RxReset due to some backwards
+ compatibility logic in the Cyclone and Tornado ASICs */
+
+/* The Ethernet Type used for 802.1q tagged frames */
+#define VLAN_ETHER_TYPE 0x8100
+
+static void set_8021q_mode(struct net_device *dev, int enable)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ int mac_ctrl;
+
+ if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) {
+ /* cyclone and tornado chipsets can recognize 802.1q
+ * tagged frames and treat them correctly */
+
+ int max_pkt_size = dev->mtu+14; /* MTU+Ethernet header */
+ if (enable)
+ max_pkt_size += 4; /* 802.1Q VLAN tag */
+
+ window_write16(vp, max_pkt_size, 3, Wn3_MaxPktSize);
+
+ /* set VlanEtherType to let the hardware checksumming
+ treat tagged frames correctly */
+ window_write16(vp, VLAN_ETHER_TYPE, 7, Wn7_VlanEtherType);
+ } else {
+ /* on older cards we have to enable large frames */
+
+ vp->large_frames = dev->mtu > 1500 || enable;
+
+ mac_ctrl = window_read16(vp, 3, Wn3_MAC_Ctrl);
+ if (vp->large_frames)
+ mac_ctrl |= 0x40;
+ else
+ mac_ctrl &= ~0x40;
+ window_write16(vp, mac_ctrl, 3, Wn3_MAC_Ctrl);
+ }
+}
+#else
+
+static void set_8021q_mode(struct net_device *dev, int enable)
+{
+}
+
+
+#endif
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details. */
+
+/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+ "overclocking" issues. */
+static void mdio_delay(struct vortex_private *vp)
+{
+ window_read32(vp, 4, Wn4_PhysicalMgmt);
+}
+
+#define MDIO_SHIFT_CLK 0x01
+#define MDIO_DIR_WRITE 0x04
+#define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE)
+#define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE)
+#define MDIO_DATA_READ 0x02
+#define MDIO_ENB_IN 0x00
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(struct vortex_private *vp, int bits)
+{
+ /* Establish sync by sending at least 32 logic ones. */
+ while (-- bits >= 0) {
+ window_write16(vp, MDIO_DATA_WRITE1, 4, Wn4_PhysicalMgmt);
+ mdio_delay(vp);
+ window_write16(vp, MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK,
+ 4, Wn4_PhysicalMgmt);
+ mdio_delay(vp);
+ }
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ int i;
+ struct vortex_private *vp = netdev_priv(dev);
+ int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ unsigned int retval = 0;
+
+ spin_lock_bh(&vp->mii_lock);
+
+ if (mii_preamble_required)
+ mdio_sync(vp, 32);
+
+ /* Shift the read command bits out. */
+ for (i = 14; i >= 0; i--) {
+ int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
+ mdio_delay(vp);
+ window_write16(vp, dataval | MDIO_SHIFT_CLK,
+ 4, Wn4_PhysicalMgmt);
+ mdio_delay(vp);
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
+ mdio_delay(vp);
+ retval = (retval << 1) |
+ ((window_read16(vp, 4, Wn4_PhysicalMgmt) &
+ MDIO_DATA_READ) ? 1 : 0);
+ window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
+ 4, Wn4_PhysicalMgmt);
+ mdio_delay(vp);
+ }
+
+ spin_unlock_bh(&vp->mii_lock);
+
+ return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
+ int i;
+
+ spin_lock_bh(&vp->mii_lock);
+
+ if (mii_preamble_required)
+ mdio_sync(vp, 32);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
+ mdio_delay(vp);
+ window_write16(vp, dataval | MDIO_SHIFT_CLK,
+ 4, Wn4_PhysicalMgmt);
+ mdio_delay(vp);
+ }
+ /* Leave the interface idle. */
+ for (i = 1; i >= 0; i--) {
+ window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
+ mdio_delay(vp);
+ window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
+ 4, Wn4_PhysicalMgmt);
+ mdio_delay(vp);
+ }
+
+ spin_unlock_bh(&vp->mii_lock);
+}
+
+/* ACPI: Advanced Configuration and Power Interface. */
+/* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */
+static void acpi_set_WOL(struct net_device *dev)
+{
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
+
+ device_set_wakeup_enable(vp->gendev, vp->enable_wol);
+
+ if (vp->enable_wol) {
+ /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
+ window_write16(vp, 2, 7, 0x0c);
+ /* The RxFilter must accept the WOL frames. */
+ iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
+ iowrite16(RxEnable, ioaddr + EL3_CMD);
+
+ if (pci_enable_wake(VORTEX_PCI(vp), PCI_D3hot, 1)) {
+ pr_info("%s: WOL not supported.\n", pci_name(VORTEX_PCI(vp)));
+
+ vp->enable_wol = 0;
+ return;
+ }
+
+ if (VORTEX_PCI(vp)->current_state < PCI_D3hot)
+ return;
+
+ /* Change the power state to D3; RxEnable doesn't take effect. */
+ pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
+ }
+}
+
+
+static void __devexit vortex_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct vortex_private *vp;
+
+ if (!dev) {
+ pr_err("vortex_remove_one called for Compaq device!\n");
+ BUG();
+ }
+
+ vp = netdev_priv(dev);
+
+ if (vp->cb_fn_base)
+ pci_iounmap(VORTEX_PCI(vp), vp->cb_fn_base);
+
+ unregister_netdev(dev);
+
+ if (VORTEX_PCI(vp)) {
+ pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */
+ if (vp->pm_state_valid)
+ pci_restore_state(VORTEX_PCI(vp));
+ pci_disable_device(VORTEX_PCI(vp));
+ }
+ /* Should really use issue_and_wait() here */
+ iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14),
+ vp->ioaddr + EL3_CMD);
+
+ pci_iounmap(VORTEX_PCI(vp), vp->ioaddr);
+
+ pci_free_consistent(pdev,
+ sizeof(struct boom_rx_desc) * RX_RING_SIZE
+ + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+ vp->rx_ring,
+ vp->rx_ring_dma);
+ if (vp->must_free_region)
+ release_region(dev->base_addr, vp->io_size);
+ free_netdev(dev);
+}
+
+
+static struct pci_driver vortex_driver = {
+ .name = "3c59x",
+ .probe = vortex_init_one,
+ .remove = __devexit_p(vortex_remove_one),
+ .id_table = vortex_pci_tbl,
+ .driver.pm = VORTEX_PM_OPS,
+};
+
+
+static int vortex_have_pci;
+static int vortex_have_eisa;
+
+
+static int __init vortex_init(void)
+{
+ int pci_rc, eisa_rc;
+
+ pci_rc = pci_register_driver(&vortex_driver);
+ eisa_rc = vortex_eisa_init();
+
+ if (pci_rc == 0)
+ vortex_have_pci = 1;
+ if (eisa_rc > 0)
+ vortex_have_eisa = 1;
+
+ return (vortex_have_pci + vortex_have_eisa) ? 0 : -ENODEV;
+}
+
+
+static void __exit vortex_eisa_cleanup(void)
+{
+ struct vortex_private *vp;
+ void __iomem *ioaddr;
+
+#ifdef CONFIG_EISA
+ /* Take care of the EISA devices */
+ eisa_driver_unregister(&vortex_eisa_driver);
+#endif
+
+ if (compaq_net_device) {
+ vp = netdev_priv(compaq_net_device);
+ ioaddr = ioport_map(compaq_net_device->base_addr,
+ VORTEX_TOTAL_SIZE);
+
+ unregister_netdev(compaq_net_device);
+ iowrite16(TotalReset, ioaddr + EL3_CMD);
+ release_region(compaq_net_device->base_addr,
+ VORTEX_TOTAL_SIZE);
+
+ free_netdev(compaq_net_device);
+ }
+}
+
+
+static void __exit vortex_cleanup(void)
+{
+ if (vortex_have_pci)
+ pci_unregister_driver(&vortex_driver);
+ if (vortex_have_eisa)
+ vortex_eisa_cleanup();
+}
+
+
+module_init(vortex_init);
+module_exit(vortex_cleanup);
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
new file mode 100644
index 000000000000..a8bb30cf512d
--- /dev/null
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -0,0 +1,122 @@
+#
+# 3Com Ethernet device configuration
+#
+
+config NET_VENDOR_3COM
+ bool "3Com devices"
+ default y
+ depends on ISA || EISA || MCA || PCI || PCMCIA
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about 3Com cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_3COM
+
+config EL1
+ tristate "3c501 \"EtherLink\" support"
+ depends on ISA
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Also, consider buying a
+ new card, since the 3c501 is slow, broken, and obsolete: you will
+ have problems. Some people suggest to ping ("man ping") a nearby
+ machine every minute ("man cron") when using this card.
+
+ To compile this driver as a module, choose M here. The module
+ will be called 3c501.
+
+config EL3
+ tristate "3c509/3c529 (MCA)/3c579 \"EtherLink III\" support"
+ depends on (ISA || EISA || MCA)
+ ---help---
+ If you have a network (Ethernet) card belonging to the 3Com
+ EtherLinkIII series, say Y and read the Ethernet-HOWTO, available
+ from <http://www.tldp.org/docs.html#howto>.
+
+ If your card is not working you may need to use the DOS
+ setup disk to disable Plug & Play mode, and to select the default
+ media type.
+
+ To compile this driver as a module, choose M here. The module
+ will be called 3c509.
+
+config 3C515
+ tristate "3c515 ISA \"Fast EtherLink\""
+ depends on (ISA || EISA) && ISA_DMA_API
+ ---help---
+ If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
+ network card, say Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called 3c515.
+
+config PCMCIA_3C574
+ tristate "3Com 3c574 PCMCIA support"
+ depends on PCMCIA
+ ---help---
+ Say Y here if you intend to attach a 3Com 3c574 or compatible PCMCIA
+ (PC-card) Fast Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called 3c574_cs. If unsure, say N.
+
+config PCMCIA_3C589
+ tristate "3Com 3c589 PCMCIA support"
+ depends on PCMCIA
+ ---help---
+ Say Y here if you intend to attach a 3Com 3c589 or compatible PCMCIA
+ (PC-card) Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called 3c589_cs. If unsure, say N.
+
+config VORTEX
+ tristate "3c590/3c900 series (592/595/597) \"Vortex/Boomerang\" support"
+ depends on (PCI || EISA)
+ select NET_CORE
+ select MII
+ ---help---
+ This option enables driver support for a large number of 10Mbps and
+ 10/100Mbps EISA, PCI and PCMCIA 3Com network cards:
+
+ "Vortex" (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI
+ "Boomerang" (EtherLink XL 3c900 or 3c905) PCI
+ "Cyclone" (3c540/3c900/3c905/3c980/3c575/3c656) PCI and Cardbus
+ "Tornado" (3c905) PCI
+ "Hurricane" (3c555/3cSOHO) PCI
+
+ If you have such a card, say Y and read the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>. More
+ specific information is in
+ <file:Documentation/networking/vortex.txt> and in the comments at
+ the beginning of <file:drivers/net/3c59x.c>.
+
+ To compile this support as a module, choose M here.
+
+config TYPHOON
+ tristate "3cr990 series \"Typhoon\" support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This option enables driver support for the 3cr990 series of cards:
+
+ 3C990-TX, 3CR990-TX-95, 3CR990-TX-97, 3CR990-FX-95, 3CR990-FX-97,
+ 3CR990SVR, 3CR990SVR95, 3CR990SVR97, 3CR990-FX-95 Server,
+ 3CR990-FX-97 Server, 3C990B-TX-M, 3C990BSVR
+
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called typhoon.
+
+endif # NET_VENDOR_3COM
diff --git a/drivers/net/ethernet/3com/Makefile b/drivers/net/ethernet/3com/Makefile
new file mode 100644
index 000000000000..1e5382a30ead
--- /dev/null
+++ b/drivers/net/ethernet/3com/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the 3Com Ethernet device drivers
+#
+
+obj-$(CONFIG_EL1) += 3c501.o
+obj-$(CONFIG_EL3) += 3c509.o
+obj-$(CONFIG_3C515) += 3c515.o
+obj-$(CONFIG_PCMCIA_3C589) += 3c589_cs.o
+obj-$(CONFIG_PCMCIA_3C574) += 3c574_cs.o
+obj-$(CONFIG_VORTEX) += 3c59x.o
+obj-$(CONFIG_TYPHOON) += typhoon.o
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
new file mode 100644
index 000000000000..607c09e3dc80
--- /dev/null
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -0,0 +1,2573 @@
+/* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
+/*
+ Written 2002-2004 by David Dillow <dave@thedillows.org>
+ Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
+ Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This software is available on a public web site. It may enable
+ cryptographic capabilities of the 3Com hardware, and may be
+ exported from the United States under License Exception "TSU"
+ pursuant to 15 C.F.R. Section 740.13(e).
+
+ This work was funded by the National Library of Medicine under
+ the Department of Energy project number 0274DD06D1 and NLM project
+ number Y1-LM-2015-01.
+
+ This driver is designed for the 3Com 3CR990 Family of cards with the
+ 3XP Processor. It has been tested on x86 and sparc64.
+
+ KNOWN ISSUES:
+ *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
+ issue. Hopefully 3Com will fix it.
+ *) Waiting for a command response takes 8ms due to non-preemptable
+ polling. Only significant for getting stats and creating
+ SAs, but an ugly wart never the less.
+
+ TODO:
+ *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
+ *) Add more support for ethtool (especially for NIC stats)
+ *) Allow disabling of RX checksum offloading
+ *) Fix MAC changing to work while the interface is up
+ (Need to put commands on the TX ring, which changes
+ the locking)
+ *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
+ http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
+*/
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ * Setting to > 1518 effectively disables this feature.
+ */
+static int rx_copybreak = 200;
+
+/* Should we use MMIO or Port IO?
+ * 0: Port IO
+ * 1: MMIO
+ * 2: Try MMIO, fallback to Port IO
+ */
+static unsigned int use_mmio = 2;
+
+/* end user-configurable values */
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ */
+static const int multicast_filter_limit = 32;
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ * Making the Tx ring too large decreases the effectiveness of channel
+ * bonding and packet priority.
+ * There are no ill effects from too-large receive rings.
+ *
+ * We don't currently use the Hi Tx ring so, don't make it very big.
+ *
+ * Beware that if we start using the Hi Tx ring, we will need to change
+ * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
+ */
+#define TXHI_ENTRIES 2
+#define TXLO_ENTRIES 128
+#define RX_ENTRIES 32
+#define COMMAND_ENTRIES 16
+#define RESPONSE_ENTRIES 32
+
+#define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc))
+#define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc))
+
+/* The 3XP will preload and remove 64 entries from the free buffer
+ * list, and we need one entry to keep the ring from wrapping, so
+ * to keep this a power of two, we use 128 entries.
+ */
+#define RXFREE_ENTRIES 128
+#define RXENT_ENTRIES (RXFREE_ENTRIES - 1)
+
+/* Operational parameters that usually are not changed. */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2*HZ)
+
+#define PKT_BUF_SZ 1536
+#define FIRMWARE_NAME "3com/typhoon.bin"
+
+#define pr_fmt(fmt) KBUILD_MODNAME " " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <linux/in6.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+
+#include "typhoon.h"
+
+MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FIRMWARE_NAME);
+MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
+MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
+ "the buffer given back to the NIC. Default "
+ "is 200.");
+MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
+ "Default is to try MMIO and fallback to PIO.");
+module_param(rx_copybreak, int, 0);
+module_param(use_mmio, int, 0);
+
+#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
+#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
+#undef NETIF_F_TSO
+#endif
+
+#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
+#error TX ring too small!
+#endif
+
+struct typhoon_card_info {
+ const char *name;
+ const int capabilities;
+};
+
+#define TYPHOON_CRYPTO_NONE 0x00
+#define TYPHOON_CRYPTO_DES 0x01
+#define TYPHOON_CRYPTO_3DES 0x02
+#define TYPHOON_CRYPTO_VARIABLE 0x04
+#define TYPHOON_FIBER 0x08
+#define TYPHOON_WAKEUP_NEEDS_RESET 0x10
+
+enum typhoon_cards {
+ TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
+ TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
+ TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
+ TYPHOON_FXM,
+};
+
+/* directly indexed by enum typhoon_cards, above */
+static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
+ { "3Com Typhoon (3C990-TX)",
+ TYPHOON_CRYPTO_NONE},
+ { "3Com Typhoon (3CR990-TX-95)",
+ TYPHOON_CRYPTO_DES},
+ { "3Com Typhoon (3CR990-TX-97)",
+ TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
+ { "3Com Typhoon (3C990SVR)",
+ TYPHOON_CRYPTO_NONE},
+ { "3Com Typhoon (3CR990SVR95)",
+ TYPHOON_CRYPTO_DES},
+ { "3Com Typhoon (3CR990SVR97)",
+ TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
+ { "3Com Typhoon2 (3C990B-TX-M)",
+ TYPHOON_CRYPTO_VARIABLE},
+ { "3Com Typhoon2 (3C990BSVR)",
+ TYPHOON_CRYPTO_VARIABLE},
+ { "3Com Typhoon (3CR990-FX-95)",
+ TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
+ { "3Com Typhoon (3CR990-FX-97)",
+ TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
+ { "3Com Typhoon (3CR990-FX-95 Server)",
+ TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
+ { "3Com Typhoon (3CR990-FX-97 Server)",
+ TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
+ { "3Com Typhoon2 (3C990B-FX-97)",
+ TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
+};
+
+/* Notes on the new subsystem numbering scheme:
+ * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
+ * bit 4 indicates if this card has secured firmware (we don't support it)
+ * bit 8 indicates if this is a (0) copper or (1) fiber card
+ * bits 12-16 indicate card type: (0) client and (1) server
+ */
+static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
+ PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
+ PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
+ PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
+ PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
+ PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
+ PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
+ PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
+
+/* Define the shared memory area
+ * Align everything the 3XP will normally be using.
+ * We'll need to move/align txHi if we start using that ring.
+ */
+#define __3xp_aligned ____cacheline_aligned
+struct typhoon_shared {
+ struct typhoon_interface iface;
+ struct typhoon_indexes indexes __3xp_aligned;
+ struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned;
+ struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned;
+ struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned;
+ struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned;
+ struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned;
+ struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned;
+ u32 zeroWord;
+ struct tx_desc txHi[TXHI_ENTRIES];
+} __packed;
+
+struct rxbuff_ent {
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+};
+
+struct typhoon {
+ /* Tx cache line section */
+ struct transmit_ring txLoRing ____cacheline_aligned;
+ struct pci_dev * tx_pdev;
+ void __iomem *tx_ioaddr;
+ u32 txlo_dma_addr;
+
+ /* Irq/Rx cache line section */
+ void __iomem *ioaddr ____cacheline_aligned;
+ struct typhoon_indexes *indexes;
+ u8 awaiting_resp;
+ u8 duplex;
+ u8 speed;
+ u8 card_state;
+ struct basic_ring rxLoRing;
+ struct pci_dev * pdev;
+ struct net_device * dev;
+ struct napi_struct napi;
+ struct basic_ring rxHiRing;
+ struct basic_ring rxBuffRing;
+ struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
+
+ /* general section */
+ spinlock_t command_lock ____cacheline_aligned;
+ struct basic_ring cmdRing;
+ struct basic_ring respRing;
+ struct net_device_stats stats;
+ struct net_device_stats stats_saved;
+ struct typhoon_shared * shared;
+ dma_addr_t shared_dma;
+ __le16 xcvr_select;
+ __le16 wol_events;
+ __le32 offload;
+
+ /* unused stuff (future use) */
+ int capabilities;
+ struct transmit_ring txHiRing;
+};
+
+enum completion_wait_values {
+ NoWait = 0, WaitNoSleep, WaitSleep,
+};
+
+/* These are the values for the typhoon.card_state variable.
+ * These determine where the statistics will come from in get_stats().
+ * The sleep image does not support the statistics we need.
+ */
+enum state_values {
+ Sleeping = 0, Running,
+};
+
+/* PCI writes are not guaranteed to be posted in order, but outstanding writes
+ * cannot pass a read, so this forces current writes to post.
+ */
+#define typhoon_post_pci_writes(x) \
+ do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
+
+/* We'll wait up to six seconds for a reset, and half a second normally.
+ */
+#define TYPHOON_UDELAY 50
+#define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ)
+#define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY)
+#define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY)
+
+#if defined(NETIF_F_TSO)
+#define skb_tso_size(x) (skb_shinfo(x)->gso_size)
+#define TSO_NUM_DESCRIPTORS 2
+#define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT
+#else
+#define NETIF_F_TSO 0
+#define skb_tso_size(x) 0
+#define TSO_NUM_DESCRIPTORS 0
+#define TSO_OFFLOAD_ON 0
+#endif
+
+static inline void
+typhoon_inc_index(u32 *index, const int count, const int num_entries)
+{
+ /* Increment a ring index -- we can use this for all rings execept
+ * the Rx rings, as they use different size descriptors
+ * otherwise, everything is the same size as a cmd_desc
+ */
+ *index += count * sizeof(struct cmd_desc);
+ *index %= num_entries * sizeof(struct cmd_desc);
+}
+
+static inline void
+typhoon_inc_cmd_index(u32 *index, const int count)
+{
+ typhoon_inc_index(index, count, COMMAND_ENTRIES);
+}
+
+static inline void
+typhoon_inc_resp_index(u32 *index, const int count)
+{
+ typhoon_inc_index(index, count, RESPONSE_ENTRIES);
+}
+
+static inline void
+typhoon_inc_rxfree_index(u32 *index, const int count)
+{
+ typhoon_inc_index(index, count, RXFREE_ENTRIES);
+}
+
+static inline void
+typhoon_inc_tx_index(u32 *index, const int count)
+{
+ /* if we start using the Hi Tx ring, this needs updateing */
+ typhoon_inc_index(index, count, TXLO_ENTRIES);
+}
+
+static inline void
+typhoon_inc_rx_index(u32 *index, const int count)
+{
+ /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
+ *index += count * sizeof(struct rx_desc);
+ *index %= RX_ENTRIES * sizeof(struct rx_desc);
+}
+
+static int
+typhoon_reset(void __iomem *ioaddr, int wait_type)
+{
+ int i, err = 0;
+ int timeout;
+
+ if(wait_type == WaitNoSleep)
+ timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
+ else
+ timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
+
+ iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
+ iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
+
+ iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
+ typhoon_post_pci_writes(ioaddr);
+ udelay(1);
+ iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
+
+ if(wait_type != NoWait) {
+ for(i = 0; i < timeout; i++) {
+ if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
+ TYPHOON_STATUS_WAITING_FOR_HOST)
+ goto out;
+
+ if(wait_type == WaitSleep)
+ schedule_timeout_uninterruptible(1);
+ else
+ udelay(TYPHOON_UDELAY);
+ }
+
+ err = -ETIMEDOUT;
+ }
+
+out:
+ iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
+ iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
+
+ /* The 3XP seems to need a little extra time to complete the load
+ * of the sleep image before we can reliably boot it. Failure to
+ * do this occasionally results in a hung adapter after boot in
+ * typhoon_init_one() while trying to read the MAC address or
+ * putting the card to sleep. 3Com's driver waits 5ms, but
+ * that seems to be overkill. However, if we can sleep, we might
+ * as well give it that much time. Otherwise, we'll give it 500us,
+ * which should be enough (I've see it work well at 100us, but still
+ * saw occasional problems.)
+ */
+ if(wait_type == WaitSleep)
+ msleep(5);
+ else
+ udelay(500);
+ return err;
+}
+
+static int
+typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
+{
+ int i, err = 0;
+
+ for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
+ if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
+ goto out;
+ udelay(TYPHOON_UDELAY);
+ }
+
+ err = -ETIMEDOUT;
+
+out:
+ return err;
+}
+
+static inline void
+typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
+{
+ if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
+ netif_carrier_off(dev);
+ else
+ netif_carrier_on(dev);
+}
+
+static inline void
+typhoon_hello(struct typhoon *tp)
+{
+ struct basic_ring *ring = &tp->cmdRing;
+ struct cmd_desc *cmd;
+
+ /* We only get a hello request if we've not sent anything to the
+ * card in a long while. If the lock is held, then we're in the
+ * process of issuing a command, so we don't need to respond.
+ */
+ if(spin_trylock(&tp->command_lock)) {
+ cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
+ typhoon_inc_cmd_index(&ring->lastWrite, 1);
+
+ INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
+ wmb();
+ iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
+ spin_unlock(&tp->command_lock);
+ }
+}
+
+static int
+typhoon_process_response(struct typhoon *tp, int resp_size,
+ struct resp_desc *resp_save)
+{
+ struct typhoon_indexes *indexes = tp->indexes;
+ struct resp_desc *resp;
+ u8 *base = tp->respRing.ringBase;
+ int count, len, wrap_len;
+ u32 cleared;
+ u32 ready;
+
+ cleared = le32_to_cpu(indexes->respCleared);
+ ready = le32_to_cpu(indexes->respReady);
+ while(cleared != ready) {
+ resp = (struct resp_desc *)(base + cleared);
+ count = resp->numDesc + 1;
+ if(resp_save && resp->seqNo) {
+ if(count > resp_size) {
+ resp_save->flags = TYPHOON_RESP_ERROR;
+ goto cleanup;
+ }
+
+ wrap_len = 0;
+ len = count * sizeof(*resp);
+ if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
+ wrap_len = cleared + len - RESPONSE_RING_SIZE;
+ len = RESPONSE_RING_SIZE - cleared;
+ }
+
+ memcpy(resp_save, resp, len);
+ if(unlikely(wrap_len)) {
+ resp_save += len / sizeof(*resp);
+ memcpy(resp_save, base, wrap_len);
+ }
+
+ resp_save = NULL;
+ } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
+ typhoon_media_status(tp->dev, resp);
+ } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
+ typhoon_hello(tp);
+ } else {
+ netdev_err(tp->dev,
+ "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
+ le16_to_cpu(resp->cmd),
+ resp->numDesc, resp->flags,
+ le16_to_cpu(resp->parm1),
+ le32_to_cpu(resp->parm2),
+ le32_to_cpu(resp->parm3));
+ }
+
+cleanup:
+ typhoon_inc_resp_index(&cleared, count);
+ }
+
+ indexes->respCleared = cpu_to_le32(cleared);
+ wmb();
+ return resp_save == NULL;
+}
+
+static inline int
+typhoon_num_free(int lastWrite, int lastRead, int ringSize)
+{
+ /* this works for all descriptors but rx_desc, as they are a
+ * different size than the cmd_desc -- everyone else is the same
+ */
+ lastWrite /= sizeof(struct cmd_desc);
+ lastRead /= sizeof(struct cmd_desc);
+ return (ringSize + lastRead - lastWrite - 1) % ringSize;
+}
+
+static inline int
+typhoon_num_free_cmd(struct typhoon *tp)
+{
+ int lastWrite = tp->cmdRing.lastWrite;
+ int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
+
+ return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
+}
+
+static inline int
+typhoon_num_free_resp(struct typhoon *tp)
+{
+ int respReady = le32_to_cpu(tp->indexes->respReady);
+ int respCleared = le32_to_cpu(tp->indexes->respCleared);
+
+ return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
+}
+
+static inline int
+typhoon_num_free_tx(struct transmit_ring *ring)
+{
+ /* if we start using the Hi Tx ring, this needs updating */
+ return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
+}
+
+static int
+typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
+ int num_resp, struct resp_desc *resp)
+{
+ struct typhoon_indexes *indexes = tp->indexes;
+ struct basic_ring *ring = &tp->cmdRing;
+ struct resp_desc local_resp;
+ int i, err = 0;
+ int got_resp;
+ int freeCmd, freeResp;
+ int len, wrap_len;
+
+ spin_lock(&tp->command_lock);
+
+ freeCmd = typhoon_num_free_cmd(tp);
+ freeResp = typhoon_num_free_resp(tp);
+
+ if(freeCmd < num_cmd || freeResp < num_resp) {
+ netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
+ freeCmd, num_cmd, freeResp, num_resp);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if(cmd->flags & TYPHOON_CMD_RESPOND) {
+ /* If we're expecting a response, but the caller hasn't given
+ * us a place to put it, we'll provide one.
+ */
+ tp->awaiting_resp = 1;
+ if(resp == NULL) {
+ resp = &local_resp;
+ num_resp = 1;
+ }
+ }
+
+ wrap_len = 0;
+ len = num_cmd * sizeof(*cmd);
+ if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
+ wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
+ len = COMMAND_RING_SIZE - ring->lastWrite;
+ }
+
+ memcpy(ring->ringBase + ring->lastWrite, cmd, len);
+ if(unlikely(wrap_len)) {
+ struct cmd_desc *wrap_ptr = cmd;
+ wrap_ptr += len / sizeof(*cmd);
+ memcpy(ring->ringBase, wrap_ptr, wrap_len);
+ }
+
+ typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
+
+ /* "I feel a presence... another warrior is on the mesa."
+ */
+ wmb();
+ iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
+ typhoon_post_pci_writes(tp->ioaddr);
+
+ if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
+ goto out;
+
+ /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
+ * preempt or do anything other than take interrupts. So, don't
+ * wait for a response unless you have to.
+ *
+ * I've thought about trying to sleep here, but we're called
+ * from many contexts that don't allow that. Also, given the way
+ * 3Com has implemented irq coalescing, we would likely timeout --
+ * this has been observed in real life!
+ *
+ * The big killer is we have to wait to get stats from the card,
+ * though we could go to a periodic refresh of those if we don't
+ * mind them getting somewhat stale. The rest of the waiting
+ * commands occur during open/close/suspend/resume, so they aren't
+ * time critical. Creating SAs in the future will also have to
+ * wait here.
+ */
+ got_resp = 0;
+ for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
+ if(indexes->respCleared != indexes->respReady)
+ got_resp = typhoon_process_response(tp, num_resp,
+ resp);
+ udelay(TYPHOON_UDELAY);
+ }
+
+ if(!got_resp) {
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* Collect the error response even if we don't care about the
+ * rest of the response
+ */
+ if(resp->flags & TYPHOON_RESP_ERROR)
+ err = -EIO;
+
+out:
+ if(tp->awaiting_resp) {
+ tp->awaiting_resp = 0;
+ smp_wmb();
+
+ /* Ugh. If a response was added to the ring between
+ * the call to typhoon_process_response() and the clearing
+ * of tp->awaiting_resp, we could have missed the interrupt
+ * and it could hang in the ring an indeterminate amount of
+ * time. So, check for it, and interrupt ourselves if this
+ * is the case.
+ */
+ if(indexes->respCleared != indexes->respReady)
+ iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
+ }
+
+ spin_unlock(&tp->command_lock);
+ return err;
+}
+
+static inline void
+typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
+ u32 ring_dma)
+{
+ struct tcpopt_desc *tcpd;
+ u32 tcpd_offset = ring_dma;
+
+ tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
+ tcpd_offset += txRing->lastWrite;
+ tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
+ typhoon_inc_tx_index(&txRing->lastWrite, 1);
+
+ tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
+ tcpd->numDesc = 1;
+ tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
+ tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
+ tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
+ tcpd->bytesTx = cpu_to_le32(skb->len);
+ tcpd->status = 0;
+}
+
+static netdev_tx_t
+typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct typhoon *tp = netdev_priv(dev);
+ struct transmit_ring *txRing;
+ struct tx_desc *txd, *first_txd;
+ dma_addr_t skb_dma;
+ int numDesc;
+
+ /* we have two rings to choose from, but we only use txLo for now
+ * If we start using the Hi ring as well, we'll need to update
+ * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
+ * and TXHI_ENTRIES to match, as well as update the TSO code below
+ * to get the right DMA address
+ */
+ txRing = &tp->txLoRing;
+
+ /* We need one descriptor for each fragment of the sk_buff, plus the
+ * one for the ->data area of it.
+ *
+ * The docs say a maximum of 16 fragment descriptors per TCP option
+ * descriptor, then make a new packet descriptor and option descriptor
+ * for the next 16 fragments. The engineers say just an option
+ * descriptor is needed. I've tested up to 26 fragments with a single
+ * packet descriptor/option descriptor combo, so I use that for now.
+ *
+ * If problems develop with TSO, check this first.
+ */
+ numDesc = skb_shinfo(skb)->nr_frags + 1;
+ if (skb_is_gso(skb))
+ numDesc++;
+
+ /* When checking for free space in the ring, we need to also
+ * account for the initial Tx descriptor, and we always must leave
+ * at least one descriptor unused in the ring so that it doesn't
+ * wrap and look empty.
+ *
+ * The only time we should loop here is when we hit the race
+ * between marking the queue awake and updating the cleared index.
+ * Just loop and it will appear. This comes from the acenic driver.
+ */
+ while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
+ smp_rmb();
+
+ first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
+ typhoon_inc_tx_index(&txRing->lastWrite, 1);
+
+ first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
+ first_txd->numDesc = 0;
+ first_txd->len = 0;
+ first_txd->tx_addr = (u64)((unsigned long) skb);
+ first_txd->processFlags = 0;
+
+ if(skb->ip_summed == CHECKSUM_PARTIAL) {
+ /* The 3XP will figure out if this is UDP/TCP */
+ first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
+ first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
+ first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
+ }
+
+ if(vlan_tx_tag_present(skb)) {
+ first_txd->processFlags |=
+ TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
+ first_txd->processFlags |=
+ cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
+ TYPHOON_TX_PF_VLAN_TAG_SHIFT);
+ }
+
+ if (skb_is_gso(skb)) {
+ first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
+ first_txd->numDesc++;
+
+ typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
+ }
+
+ txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
+ typhoon_inc_tx_index(&txRing->lastWrite, 1);
+
+ /* No need to worry about padding packet -- the firmware pads
+ * it with zeros to ETH_ZLEN for us.
+ */
+ if(skb_shinfo(skb)->nr_frags == 0) {
+ skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
+ txd->len = cpu_to_le16(skb->len);
+ txd->frag.addr = cpu_to_le32(skb_dma);
+ txd->frag.addrHi = 0;
+ first_txd->numDesc++;
+ } else {
+ int i, len;
+
+ len = skb_headlen(skb);
+ skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
+ PCI_DMA_TODEVICE);
+ txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
+ txd->len = cpu_to_le16(len);
+ txd->frag.addr = cpu_to_le32(skb_dma);
+ txd->frag.addrHi = 0;
+ first_txd->numDesc++;
+
+ for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ void *frag_addr;
+
+ txd = (struct tx_desc *) (txRing->ringBase +
+ txRing->lastWrite);
+ typhoon_inc_tx_index(&txRing->lastWrite, 1);
+
+ len = frag->size;
+ frag_addr = skb_frag_address(frag);
+ skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
+ PCI_DMA_TODEVICE);
+ txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
+ txd->len = cpu_to_le16(len);
+ txd->frag.addr = cpu_to_le32(skb_dma);
+ txd->frag.addrHi = 0;
+ first_txd->numDesc++;
+ }
+ }
+
+ /* Kick the 3XP
+ */
+ wmb();
+ iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
+
+ /* If we don't have room to put the worst case packet on the
+ * queue, then we must stop the queue. We need 2 extra
+ * descriptors -- one to prevent ring wrap, and one for the
+ * Tx header.
+ */
+ numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
+
+ if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
+ netif_stop_queue(dev);
+
+ /* A Tx complete IRQ could have gotten between, making
+ * the ring free again. Only need to recheck here, since
+ * Tx is serialized.
+ */
+ if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
+ netif_wake_queue(dev);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static void
+typhoon_set_rx_mode(struct net_device *dev)
+{
+ struct typhoon *tp = netdev_priv(dev);
+ struct cmd_desc xp_cmd;
+ u32 mc_filter[2];
+ __le16 filter;
+
+ filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
+ if(dev->flags & IFF_PROMISC) {
+ filter |= TYPHOON_RX_FILTER_PROMISCOUS;
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ filter |= TYPHOON_RX_FILTER_ALL_MCAST;
+ } else if (!netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f;
+ mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
+ }
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd,
+ TYPHOON_CMD_SET_MULTICAST_HASH);
+ xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
+ xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
+ xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
+ typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+
+ filter |= TYPHOON_RX_FILTER_MCAST_HASH;
+ }
+
+ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
+ xp_cmd.parm1 = filter;
+ typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+}
+
+static int
+typhoon_do_get_stats(struct typhoon *tp)
+{
+ struct net_device_stats *stats = &tp->stats;
+ struct net_device_stats *saved = &tp->stats_saved;
+ struct cmd_desc xp_cmd;
+ struct resp_desc xp_resp[7];
+ struct stats_resp *s = (struct stats_resp *) xp_resp;
+ int err;
+
+ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
+ if(err < 0)
+ return err;
+
+ /* 3Com's Linux driver uses txMultipleCollisions as it's
+ * collisions value, but there is some other collision info as well...
+ *
+ * The extra status reported would be a good candidate for
+ * ethtool_ops->get_{strings,stats}()
+ */
+ stats->tx_packets = le32_to_cpu(s->txPackets) +
+ saved->tx_packets;
+ stats->tx_bytes = le64_to_cpu(s->txBytes) +
+ saved->tx_bytes;
+ stats->tx_errors = le32_to_cpu(s->txCarrierLost) +
+ saved->tx_errors;
+ stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost) +
+ saved->tx_carrier_errors;
+ stats->collisions = le32_to_cpu(s->txMultipleCollisions) +
+ saved->collisions;
+ stats->rx_packets = le32_to_cpu(s->rxPacketsGood) +
+ saved->rx_packets;
+ stats->rx_bytes = le64_to_cpu(s->rxBytesGood) +
+ saved->rx_bytes;
+ stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns) +
+ saved->rx_fifo_errors;
+ stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
+ le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors) +
+ saved->rx_errors;
+ stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors) +
+ saved->rx_crc_errors;
+ stats->rx_length_errors = le32_to_cpu(s->rxOversized) +
+ saved->rx_length_errors;
+ tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
+ SPEED_100 : SPEED_10;
+ tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+
+ return 0;
+}
+
+static struct net_device_stats *
+typhoon_get_stats(struct net_device *dev)
+{
+ struct typhoon *tp = netdev_priv(dev);
+ struct net_device_stats *stats = &tp->stats;
+ struct net_device_stats *saved = &tp->stats_saved;
+
+ smp_rmb();
+ if(tp->card_state == Sleeping)
+ return saved;
+
+ if(typhoon_do_get_stats(tp) < 0) {
+ netdev_err(dev, "error getting stats\n");
+ return saved;
+ }
+
+ return stats;
+}
+
+static int
+typhoon_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *saddr = (struct sockaddr *) addr;
+
+ if(netif_running(dev))
+ return -EBUSY;
+
+ memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
+ return 0;
+}
+
+static void
+typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct typhoon *tp = netdev_priv(dev);
+ struct pci_dev *pci_dev = tp->pdev;
+ struct cmd_desc xp_cmd;
+ struct resp_desc xp_resp[3];
+
+ smp_rmb();
+ if(tp->card_state == Sleeping) {
+ strcpy(info->fw_version, "Sleep image");
+ } else {
+ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
+ if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
+ strcpy(info->fw_version, "Unknown runtime");
+ } else {
+ u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
+ snprintf(info->fw_version, 32, "%02x.%03x.%03x",
+ sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
+ sleep_ver & 0xfff);
+ }
+ }
+
+ strcpy(info->driver, KBUILD_MODNAME);
+ strcpy(info->bus_info, pci_name(pci_dev));
+}
+
+static int
+typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct typhoon *tp = netdev_priv(dev);
+
+ cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg;
+
+ switch (tp->xcvr_select) {
+ case TYPHOON_XCVR_10HALF:
+ cmd->advertising = ADVERTISED_10baseT_Half;
+ break;
+ case TYPHOON_XCVR_10FULL:
+ cmd->advertising = ADVERTISED_10baseT_Full;
+ break;
+ case TYPHOON_XCVR_100HALF:
+ cmd->advertising = ADVERTISED_100baseT_Half;
+ break;
+ case TYPHOON_XCVR_100FULL:
+ cmd->advertising = ADVERTISED_100baseT_Full;
+ break;
+ case TYPHOON_XCVR_AUTONEG:
+ cmd->advertising = ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_Autoneg;
+ break;
+ }
+
+ if(tp->capabilities & TYPHOON_FIBER) {
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_FIBRE;
+ cmd->port = PORT_FIBRE;
+ } else {
+ cmd->supported |= SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_TP;
+ cmd->advertising |= ADVERTISED_TP;
+ cmd->port = PORT_TP;
+ }
+
+ /* need to get stats to make these link speed/duplex valid */
+ typhoon_do_get_stats(tp);
+ ethtool_cmd_speed_set(cmd, tp->speed);
+ cmd->duplex = tp->duplex;
+ cmd->phy_address = 0;
+ cmd->transceiver = XCVR_INTERNAL;
+ if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
+ cmd->autoneg = AUTONEG_ENABLE;
+ else
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->maxtxpkt = 1;
+ cmd->maxrxpkt = 1;
+
+ return 0;
+}
+
+static int
+typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct typhoon *tp = netdev_priv(dev);
+ u32 speed = ethtool_cmd_speed(cmd);
+ struct cmd_desc xp_cmd;
+ __le16 xcvr;
+ int err;
+
+ err = -EINVAL;
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ xcvr = TYPHOON_XCVR_AUTONEG;
+ } else {
+ if (cmd->duplex == DUPLEX_HALF) {
+ if (speed == SPEED_10)
+ xcvr = TYPHOON_XCVR_10HALF;
+ else if (speed == SPEED_100)
+ xcvr = TYPHOON_XCVR_100HALF;
+ else
+ goto out;
+ } else if (cmd->duplex == DUPLEX_FULL) {
+ if (speed == SPEED_10)
+ xcvr = TYPHOON_XCVR_10FULL;
+ else if (speed == SPEED_100)
+ xcvr = TYPHOON_XCVR_100FULL;
+ else
+ goto out;
+ } else
+ goto out;
+ }
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
+ xp_cmd.parm1 = xcvr;
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0)
+ goto out;
+
+ tp->xcvr_select = xcvr;
+ if(cmd->autoneg == AUTONEG_ENABLE) {
+ tp->speed = 0xff; /* invalid */
+ tp->duplex = 0xff; /* invalid */
+ } else {
+ tp->speed = speed;
+ tp->duplex = cmd->duplex;
+ }
+
+out:
+ return err;
+}
+
+static void
+typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct typhoon *tp = netdev_priv(dev);
+
+ wol->supported = WAKE_PHY | WAKE_MAGIC;
+ wol->wolopts = 0;
+ if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
+ wol->wolopts |= WAKE_PHY;
+ if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
+ wol->wolopts |= WAKE_MAGIC;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int
+typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct typhoon *tp = netdev_priv(dev);
+
+ if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
+
+ tp->wol_events = 0;
+ if(wol->wolopts & WAKE_PHY)
+ tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
+ if(wol->wolopts & WAKE_MAGIC)
+ tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
+
+ return 0;
+}
+
+static void
+typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+ ering->rx_max_pending = RXENT_ENTRIES;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = 0;
+ ering->tx_max_pending = TXLO_ENTRIES - 1;
+
+ ering->rx_pending = RXENT_ENTRIES;
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = 0;
+ ering->tx_pending = TXLO_ENTRIES - 1;
+}
+
+static const struct ethtool_ops typhoon_ethtool_ops = {
+ .get_settings = typhoon_get_settings,
+ .set_settings = typhoon_set_settings,
+ .get_drvinfo = typhoon_get_drvinfo,
+ .get_wol = typhoon_get_wol,
+ .set_wol = typhoon_set_wol,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = typhoon_get_ringparam,
+};
+
+static int
+typhoon_wait_interrupt(void __iomem *ioaddr)
+{
+ int i, err = 0;
+
+ for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
+ if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
+ TYPHOON_INTR_BOOTCMD)
+ goto out;
+ udelay(TYPHOON_UDELAY);
+ }
+
+ err = -ETIMEDOUT;
+
+out:
+ iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
+ return err;
+}
+
+#define shared_offset(x) offsetof(struct typhoon_shared, x)
+
+static void
+typhoon_init_interface(struct typhoon *tp)
+{
+ struct typhoon_interface *iface = &tp->shared->iface;
+ dma_addr_t shared_dma;
+
+ memset(tp->shared, 0, sizeof(struct typhoon_shared));
+
+ /* The *Hi members of iface are all init'd to zero by the memset().
+ */
+ shared_dma = tp->shared_dma + shared_offset(indexes);
+ iface->ringIndex = cpu_to_le32(shared_dma);
+
+ shared_dma = tp->shared_dma + shared_offset(txLo);
+ iface->txLoAddr = cpu_to_le32(shared_dma);
+ iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
+
+ shared_dma = tp->shared_dma + shared_offset(txHi);
+ iface->txHiAddr = cpu_to_le32(shared_dma);
+ iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
+
+ shared_dma = tp->shared_dma + shared_offset(rxBuff);
+ iface->rxBuffAddr = cpu_to_le32(shared_dma);
+ iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
+ sizeof(struct rx_free));
+
+ shared_dma = tp->shared_dma + shared_offset(rxLo);
+ iface->rxLoAddr = cpu_to_le32(shared_dma);
+ iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
+
+ shared_dma = tp->shared_dma + shared_offset(rxHi);
+ iface->rxHiAddr = cpu_to_le32(shared_dma);
+ iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
+
+ shared_dma = tp->shared_dma + shared_offset(cmd);
+ iface->cmdAddr = cpu_to_le32(shared_dma);
+ iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
+
+ shared_dma = tp->shared_dma + shared_offset(resp);
+ iface->respAddr = cpu_to_le32(shared_dma);
+ iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
+
+ shared_dma = tp->shared_dma + shared_offset(zeroWord);
+ iface->zeroAddr = cpu_to_le32(shared_dma);
+
+ tp->indexes = &tp->shared->indexes;
+ tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
+ tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
+ tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
+ tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
+ tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
+ tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
+ tp->respRing.ringBase = (u8 *) tp->shared->resp;
+
+ tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
+ tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
+
+ tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
+ tp->card_state = Sleeping;
+
+ tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
+ tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
+ tp->offload |= TYPHOON_OFFLOAD_VLAN;
+
+ spin_lock_init(&tp->command_lock);
+
+ /* Force the writes to the shared memory area out before continuing. */
+ wmb();
+}
+
+static void
+typhoon_init_rings(struct typhoon *tp)
+{
+ memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
+
+ tp->txLoRing.lastWrite = 0;
+ tp->txHiRing.lastWrite = 0;
+ tp->rxLoRing.lastWrite = 0;
+ tp->rxHiRing.lastWrite = 0;
+ tp->rxBuffRing.lastWrite = 0;
+ tp->cmdRing.lastWrite = 0;
+ tp->respRing.lastWrite = 0;
+
+ tp->txLoRing.lastRead = 0;
+ tp->txHiRing.lastRead = 0;
+}
+
+static const struct firmware *typhoon_fw;
+
+static int
+typhoon_request_firmware(struct typhoon *tp)
+{
+ const struct typhoon_file_header *fHdr;
+ const struct typhoon_section_header *sHdr;
+ const u8 *image_data;
+ u32 numSections;
+ u32 section_len;
+ u32 remaining;
+ int err;
+
+ if (typhoon_fw)
+ return 0;
+
+ err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
+ if (err) {
+ netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
+ FIRMWARE_NAME);
+ return err;
+ }
+
+ image_data = (u8 *) typhoon_fw->data;
+ remaining = typhoon_fw->size;
+ if (remaining < sizeof(struct typhoon_file_header))
+ goto invalid_fw;
+
+ fHdr = (struct typhoon_file_header *) image_data;
+ if (memcmp(fHdr->tag, "TYPHOON", 8))
+ goto invalid_fw;
+
+ numSections = le32_to_cpu(fHdr->numSections);
+ image_data += sizeof(struct typhoon_file_header);
+ remaining -= sizeof(struct typhoon_file_header);
+
+ while (numSections--) {
+ if (remaining < sizeof(struct typhoon_section_header))
+ goto invalid_fw;
+
+ sHdr = (struct typhoon_section_header *) image_data;
+ image_data += sizeof(struct typhoon_section_header);
+ section_len = le32_to_cpu(sHdr->len);
+
+ if (remaining < section_len)
+ goto invalid_fw;
+
+ image_data += section_len;
+ remaining -= section_len;
+ }
+
+ return 0;
+
+invalid_fw:
+ netdev_err(tp->dev, "Invalid firmware image\n");
+ release_firmware(typhoon_fw);
+ typhoon_fw = NULL;
+ return -EINVAL;
+}
+
+static int
+typhoon_download_firmware(struct typhoon *tp)
+{
+ void __iomem *ioaddr = tp->ioaddr;
+ struct pci_dev *pdev = tp->pdev;
+ const struct typhoon_file_header *fHdr;
+ const struct typhoon_section_header *sHdr;
+ const u8 *image_data;
+ void *dpage;
+ dma_addr_t dpage_dma;
+ __sum16 csum;
+ u32 irqEnabled;
+ u32 irqMasked;
+ u32 numSections;
+ u32 section_len;
+ u32 len;
+ u32 load_addr;
+ u32 hmac;
+ int i;
+ int err;
+
+ image_data = (u8 *) typhoon_fw->data;
+ fHdr = (struct typhoon_file_header *) image_data;
+
+ /* Cannot just map the firmware image using pci_map_single() as
+ * the firmware is vmalloc()'d and may not be physically contiguous,
+ * so we allocate some consistent memory to copy the sections into.
+ */
+ err = -ENOMEM;
+ dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
+ if(!dpage) {
+ netdev_err(tp->dev, "no DMA mem for firmware\n");
+ goto err_out;
+ }
+
+ irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
+ iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
+ ioaddr + TYPHOON_REG_INTR_ENABLE);
+ irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
+ iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
+ ioaddr + TYPHOON_REG_INTR_MASK);
+
+ err = -ETIMEDOUT;
+ if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
+ netdev_err(tp->dev, "card ready timeout\n");
+ goto err_out_irq;
+ }
+
+ numSections = le32_to_cpu(fHdr->numSections);
+ load_addr = le32_to_cpu(fHdr->startAddr);
+
+ iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
+ iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
+ hmac = le32_to_cpu(fHdr->hmacDigest[0]);
+ iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
+ hmac = le32_to_cpu(fHdr->hmacDigest[1]);
+ iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
+ hmac = le32_to_cpu(fHdr->hmacDigest[2]);
+ iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
+ hmac = le32_to_cpu(fHdr->hmacDigest[3]);
+ iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
+ hmac = le32_to_cpu(fHdr->hmacDigest[4]);
+ iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
+ typhoon_post_pci_writes(ioaddr);
+ iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
+
+ image_data += sizeof(struct typhoon_file_header);
+
+ /* The ioread32() in typhoon_wait_interrupt() will force the
+ * last write to the command register to post, so
+ * we don't need a typhoon_post_pci_writes() after it.
+ */
+ for(i = 0; i < numSections; i++) {
+ sHdr = (struct typhoon_section_header *) image_data;
+ image_data += sizeof(struct typhoon_section_header);
+ load_addr = le32_to_cpu(sHdr->startAddr);
+ section_len = le32_to_cpu(sHdr->len);
+
+ while(section_len) {
+ len = min_t(u32, section_len, PAGE_SIZE);
+
+ if(typhoon_wait_interrupt(ioaddr) < 0 ||
+ ioread32(ioaddr + TYPHOON_REG_STATUS) !=
+ TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
+ netdev_err(tp->dev, "segment ready timeout\n");
+ goto err_out_irq;
+ }
+
+ /* Do an pseudo IPv4 checksum on the data -- first
+ * need to convert each u16 to cpu order before
+ * summing. Fortunately, due to the properties of
+ * the checksum, we can do this once, at the end.
+ */
+ csum = csum_fold(csum_partial_copy_nocheck(image_data,
+ dpage, len,
+ 0));
+
+ iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
+ iowrite32(le16_to_cpu((__force __le16)csum),
+ ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
+ iowrite32(load_addr,
+ ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
+ iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
+ iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
+ typhoon_post_pci_writes(ioaddr);
+ iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
+ ioaddr + TYPHOON_REG_COMMAND);
+
+ image_data += len;
+ load_addr += len;
+ section_len -= len;
+ }
+ }
+
+ if(typhoon_wait_interrupt(ioaddr) < 0 ||
+ ioread32(ioaddr + TYPHOON_REG_STATUS) !=
+ TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
+ netdev_err(tp->dev, "final segment ready timeout\n");
+ goto err_out_irq;
+ }
+
+ iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
+
+ if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
+ netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
+ ioread32(ioaddr + TYPHOON_REG_STATUS));
+ goto err_out_irq;
+ }
+
+ err = 0;
+
+err_out_irq:
+ iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
+ iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
+
+ pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
+
+err_out:
+ return err;
+}
+
+static int
+typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
+{
+ void __iomem *ioaddr = tp->ioaddr;
+
+ if(typhoon_wait_status(ioaddr, initial_status) < 0) {
+ netdev_err(tp->dev, "boot ready timeout\n");
+ goto out_timeout;
+ }
+
+ iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
+ iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
+ typhoon_post_pci_writes(ioaddr);
+ iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
+ ioaddr + TYPHOON_REG_COMMAND);
+
+ if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
+ netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
+ ioread32(ioaddr + TYPHOON_REG_STATUS));
+ goto out_timeout;
+ }
+
+ /* Clear the Transmit and Command ready registers
+ */
+ iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
+ iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
+ iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
+ typhoon_post_pci_writes(ioaddr);
+ iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
+
+ return 0;
+
+out_timeout:
+ return -ETIMEDOUT;
+}
+
+static u32
+typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
+ volatile __le32 * index)
+{
+ u32 lastRead = txRing->lastRead;
+ struct tx_desc *tx;
+ dma_addr_t skb_dma;
+ int dma_len;
+ int type;
+
+ while(lastRead != le32_to_cpu(*index)) {
+ tx = (struct tx_desc *) (txRing->ringBase + lastRead);
+ type = tx->flags & TYPHOON_TYPE_MASK;
+
+ if(type == TYPHOON_TX_DESC) {
+ /* This tx_desc describes a packet.
+ */
+ unsigned long ptr = tx->tx_addr;
+ struct sk_buff *skb = (struct sk_buff *) ptr;
+ dev_kfree_skb_irq(skb);
+ } else if(type == TYPHOON_FRAG_DESC) {
+ /* This tx_desc describes a memory mapping. Free it.
+ */
+ skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
+ dma_len = le16_to_cpu(tx->len);
+ pci_unmap_single(tp->pdev, skb_dma, dma_len,
+ PCI_DMA_TODEVICE);
+ }
+
+ tx->flags = 0;
+ typhoon_inc_tx_index(&lastRead, 1);
+ }
+
+ return lastRead;
+}
+
+static void
+typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
+ volatile __le32 * index)
+{
+ u32 lastRead;
+ int numDesc = MAX_SKB_FRAGS + 1;
+
+ /* This will need changing if we start to use the Hi Tx ring. */
+ lastRead = typhoon_clean_tx(tp, txRing, index);
+ if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
+ lastRead, TXLO_ENTRIES) > (numDesc + 2))
+ netif_wake_queue(tp->dev);
+
+ txRing->lastRead = lastRead;
+ smp_wmb();
+}
+
+static void
+typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
+{
+ struct typhoon_indexes *indexes = tp->indexes;
+ struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
+ struct basic_ring *ring = &tp->rxBuffRing;
+ struct rx_free *r;
+
+ if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
+ le32_to_cpu(indexes->rxBuffCleared)) {
+ /* no room in ring, just drop the skb
+ */
+ dev_kfree_skb_any(rxb->skb);
+ rxb->skb = NULL;
+ return;
+ }
+
+ r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
+ typhoon_inc_rxfree_index(&ring->lastWrite, 1);
+ r->virtAddr = idx;
+ r->physAddr = cpu_to_le32(rxb->dma_addr);
+
+ /* Tell the card about it */
+ wmb();
+ indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
+}
+
+static int
+typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
+{
+ struct typhoon_indexes *indexes = tp->indexes;
+ struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
+ struct basic_ring *ring = &tp->rxBuffRing;
+ struct rx_free *r;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+
+ rxb->skb = NULL;
+
+ if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
+ le32_to_cpu(indexes->rxBuffCleared))
+ return -ENOMEM;
+
+ skb = dev_alloc_skb(PKT_BUF_SZ);
+ if(!skb)
+ return -ENOMEM;
+
+#if 0
+ /* Please, 3com, fix the firmware to allow DMA to a unaligned
+ * address! Pretty please?
+ */
+ skb_reserve(skb, 2);
+#endif
+
+ skb->dev = tp->dev;
+ dma_addr = pci_map_single(tp->pdev, skb->data,
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+
+ /* Since no card does 64 bit DAC, the high bits will never
+ * change from zero.
+ */
+ r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
+ typhoon_inc_rxfree_index(&ring->lastWrite, 1);
+ r->virtAddr = idx;
+ r->physAddr = cpu_to_le32(dma_addr);
+ rxb->skb = skb;
+ rxb->dma_addr = dma_addr;
+
+ /* Tell the card about it */
+ wmb();
+ indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
+ return 0;
+}
+
+static int
+typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
+ volatile __le32 * cleared, int budget)
+{
+ struct rx_desc *rx;
+ struct sk_buff *skb, *new_skb;
+ struct rxbuff_ent *rxb;
+ dma_addr_t dma_addr;
+ u32 local_ready;
+ u32 rxaddr;
+ int pkt_len;
+ u32 idx;
+ __le32 csum_bits;
+ int received;
+
+ received = 0;
+ local_ready = le32_to_cpu(*ready);
+ rxaddr = le32_to_cpu(*cleared);
+ while(rxaddr != local_ready && budget > 0) {
+ rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
+ idx = rx->addr;
+ rxb = &tp->rxbuffers[idx];
+ skb = rxb->skb;
+ dma_addr = rxb->dma_addr;
+
+ typhoon_inc_rx_index(&rxaddr, 1);
+
+ if(rx->flags & TYPHOON_RX_ERROR) {
+ typhoon_recycle_rx_skb(tp, idx);
+ continue;
+ }
+
+ pkt_len = le16_to_cpu(rx->frameLen);
+
+ if(pkt_len < rx_copybreak &&
+ (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb_reserve(new_skb, 2);
+ pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
+ PKT_BUF_SZ,
+ PCI_DMA_FROMDEVICE);
+ skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
+ pci_dma_sync_single_for_device(tp->pdev, dma_addr,
+ PKT_BUF_SZ,
+ PCI_DMA_FROMDEVICE);
+ skb_put(new_skb, pkt_len);
+ typhoon_recycle_rx_skb(tp, idx);
+ } else {
+ new_skb = skb;
+ skb_put(new_skb, pkt_len);
+ pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
+ PCI_DMA_FROMDEVICE);
+ typhoon_alloc_rx_skb(tp, idx);
+ }
+ new_skb->protocol = eth_type_trans(new_skb, tp->dev);
+ csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
+ TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
+ if(csum_bits ==
+ (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
+ csum_bits ==
+ (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
+ new_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else
+ skb_checksum_none_assert(new_skb);
+
+ if (rx->rxStatus & TYPHOON_RX_VLAN)
+ __vlan_hwaccel_put_tag(new_skb,
+ ntohl(rx->vlanTag) & 0xffff);
+ netif_receive_skb(new_skb);
+
+ received++;
+ budget--;
+ }
+ *cleared = cpu_to_le32(rxaddr);
+
+ return received;
+}
+
+static void
+typhoon_fill_free_ring(struct typhoon *tp)
+{
+ u32 i;
+
+ for(i = 0; i < RXENT_ENTRIES; i++) {
+ struct rxbuff_ent *rxb = &tp->rxbuffers[i];
+ if(rxb->skb)
+ continue;
+ if(typhoon_alloc_rx_skb(tp, i) < 0)
+ break;
+ }
+}
+
+static int
+typhoon_poll(struct napi_struct *napi, int budget)
+{
+ struct typhoon *tp = container_of(napi, struct typhoon, napi);
+ struct typhoon_indexes *indexes = tp->indexes;
+ int work_done;
+
+ rmb();
+ if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
+ typhoon_process_response(tp, 0, NULL);
+
+ if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
+ typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
+
+ work_done = 0;
+
+ if(indexes->rxHiCleared != indexes->rxHiReady) {
+ work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
+ &indexes->rxHiCleared, budget);
+ }
+
+ if(indexes->rxLoCleared != indexes->rxLoReady) {
+ work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
+ &indexes->rxLoCleared, budget - work_done);
+ }
+
+ if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
+ /* rxBuff ring is empty, try to fill it. */
+ typhoon_fill_free_ring(tp);
+ }
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ iowrite32(TYPHOON_INTR_NONE,
+ tp->ioaddr + TYPHOON_REG_INTR_MASK);
+ typhoon_post_pci_writes(tp->ioaddr);
+ }
+
+ return work_done;
+}
+
+static irqreturn_t
+typhoon_interrupt(int irq, void *dev_instance)
+{
+ struct net_device *dev = dev_instance;
+ struct typhoon *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->ioaddr;
+ u32 intr_status;
+
+ intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
+ if(!(intr_status & TYPHOON_INTR_HOST_INT))
+ return IRQ_NONE;
+
+ iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
+
+ if (napi_schedule_prep(&tp->napi)) {
+ iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
+ typhoon_post_pci_writes(ioaddr);
+ __napi_schedule(&tp->napi);
+ } else {
+ netdev_err(dev, "Error, poll already scheduled\n");
+ }
+ return IRQ_HANDLED;
+}
+
+static void
+typhoon_free_rx_rings(struct typhoon *tp)
+{
+ u32 i;
+
+ for(i = 0; i < RXENT_ENTRIES; i++) {
+ struct rxbuff_ent *rxb = &tp->rxbuffers[i];
+ if(rxb->skb) {
+ pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(rxb->skb);
+ rxb->skb = NULL;
+ }
+ }
+}
+
+static int
+typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
+{
+ struct pci_dev *pdev = tp->pdev;
+ void __iomem *ioaddr = tp->ioaddr;
+ struct cmd_desc xp_cmd;
+ int err;
+
+ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
+ xp_cmd.parm1 = events;
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0) {
+ netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
+ err);
+ return err;
+ }
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0) {
+ netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
+ return err;
+ }
+
+ if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
+ return -ETIMEDOUT;
+
+ /* Since we cannot monitor the status of the link while sleeping,
+ * tell the world it went away.
+ */
+ netif_carrier_off(tp->dev);
+
+ pci_enable_wake(tp->pdev, state, 1);
+ pci_disable_device(pdev);
+ return pci_set_power_state(pdev, state);
+}
+
+static int
+typhoon_wakeup(struct typhoon *tp, int wait_type)
+{
+ struct pci_dev *pdev = tp->pdev;
+ void __iomem *ioaddr = tp->ioaddr;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ /* Post 2.x.x versions of the Sleep Image require a reset before
+ * we can download the Runtime Image. But let's not make users of
+ * the old firmware pay for the reset.
+ */
+ iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
+ if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
+ (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
+ return typhoon_reset(ioaddr, wait_type);
+
+ return 0;
+}
+
+static int
+typhoon_start_runtime(struct typhoon *tp)
+{
+ struct net_device *dev = tp->dev;
+ void __iomem *ioaddr = tp->ioaddr;
+ struct cmd_desc xp_cmd;
+ int err;
+
+ typhoon_init_rings(tp);
+ typhoon_fill_free_ring(tp);
+
+ err = typhoon_download_firmware(tp);
+ if(err < 0) {
+ netdev_err(tp->dev, "cannot load runtime on 3XP\n");
+ goto error_out;
+ }
+
+ if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
+ netdev_err(tp->dev, "cannot boot 3XP\n");
+ err = -EIO;
+ goto error_out;
+ }
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
+ xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0)
+ goto error_out;
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
+ xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
+ xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0)
+ goto error_out;
+
+ /* Disable IRQ coalescing -- we can reenable it when 3Com gives
+ * us some more information on how to control it.
+ */
+ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
+ xp_cmd.parm1 = 0;
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0)
+ goto error_out;
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
+ xp_cmd.parm1 = tp->xcvr_select;
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0)
+ goto error_out;
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
+ xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0)
+ goto error_out;
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
+ xp_cmd.parm2 = tp->offload;
+ xp_cmd.parm3 = tp->offload;
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0)
+ goto error_out;
+
+ typhoon_set_rx_mode(dev);
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0)
+ goto error_out;
+
+ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
+ err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+ if(err < 0)
+ goto error_out;
+
+ tp->card_state = Running;
+ smp_wmb();
+
+ iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
+ iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
+ typhoon_post_pci_writes(ioaddr);
+
+ return 0;
+
+error_out:
+ typhoon_reset(ioaddr, WaitNoSleep);
+ typhoon_free_rx_rings(tp);
+ typhoon_init_rings(tp);
+ return err;
+}
+
+static int
+typhoon_stop_runtime(struct typhoon *tp, int wait_type)
+{
+ struct typhoon_indexes *indexes = tp->indexes;
+ struct transmit_ring *txLo = &tp->txLoRing;
+ void __iomem *ioaddr = tp->ioaddr;
+ struct cmd_desc xp_cmd;
+ int i;
+
+ /* Disable interrupts early, since we can't schedule a poll
+ * when called with !netif_running(). This will be posted
+ * when we force the posting of the command.
+ */
+ iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
+ typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+
+ /* Wait 1/2 sec for any outstanding transmits to occur
+ * We'll cleanup after the reset if this times out.
+ */
+ for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
+ if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
+ break;
+ udelay(TYPHOON_UDELAY);
+ }
+
+ if(i == TYPHOON_WAIT_TIMEOUT)
+ netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
+ typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+
+ /* save the statistics so when we bring the interface up again,
+ * the values reported to userspace are correct.
+ */
+ tp->card_state = Sleeping;
+ smp_wmb();
+ typhoon_do_get_stats(tp);
+ memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
+ typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
+
+ if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
+ netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
+
+ if(typhoon_reset(ioaddr, wait_type) < 0) {
+ netdev_err(tp->dev, "unable to reset 3XP\n");
+ return -ETIMEDOUT;
+ }
+
+ /* cleanup any outstanding Tx packets */
+ if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
+ indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
+ typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
+ }
+
+ return 0;
+}
+
+static void
+typhoon_tx_timeout(struct net_device *dev)
+{
+ struct typhoon *tp = netdev_priv(dev);
+
+ if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
+ netdev_warn(dev, "could not reset in tx timeout\n");
+ goto truly_dead;
+ }
+
+ /* If we ever start using the Hi ring, it will need cleaning too */
+ typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
+ typhoon_free_rx_rings(tp);
+
+ if(typhoon_start_runtime(tp) < 0) {
+ netdev_err(dev, "could not start runtime in tx timeout\n");
+ goto truly_dead;
+ }
+
+ netif_wake_queue(dev);
+ return;
+
+truly_dead:
+ /* Reset the hardware, and turn off carrier to avoid more timeouts */
+ typhoon_reset(tp->ioaddr, NoWait);
+ netif_carrier_off(dev);
+}
+
+static int
+typhoon_open(struct net_device *dev)
+{
+ struct typhoon *tp = netdev_priv(dev);
+ int err;
+
+ err = typhoon_request_firmware(tp);
+ if (err)
+ goto out;
+
+ err = typhoon_wakeup(tp, WaitSleep);
+ if(err < 0) {
+ netdev_err(dev, "unable to wakeup device\n");
+ goto out_sleep;
+ }
+
+ err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
+ dev->name, dev);
+ if(err < 0)
+ goto out_sleep;
+
+ napi_enable(&tp->napi);
+
+ err = typhoon_start_runtime(tp);
+ if(err < 0) {
+ napi_disable(&tp->napi);
+ goto out_irq;
+ }
+
+ netif_start_queue(dev);
+ return 0;
+
+out_irq:
+ free_irq(dev->irq, dev);
+
+out_sleep:
+ if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
+ netdev_err(dev, "unable to reboot into sleep img\n");
+ typhoon_reset(tp->ioaddr, NoWait);
+ goto out;
+ }
+
+ if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
+ netdev_err(dev, "unable to go back to sleep\n");
+
+out:
+ return err;
+}
+
+static int
+typhoon_close(struct net_device *dev)
+{
+ struct typhoon *tp = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ napi_disable(&tp->napi);
+
+ if(typhoon_stop_runtime(tp, WaitSleep) < 0)
+ netdev_err(dev, "unable to stop runtime\n");
+
+ /* Make sure there is no irq handler running on a different CPU. */
+ free_irq(dev->irq, dev);
+
+ typhoon_free_rx_rings(tp);
+ typhoon_init_rings(tp);
+
+ if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
+ netdev_err(dev, "unable to boot sleep image\n");
+
+ if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
+ netdev_err(dev, "unable to put card to sleep\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int
+typhoon_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct typhoon *tp = netdev_priv(dev);
+
+ /* If we're down, resume when we are upped.
+ */
+ if(!netif_running(dev))
+ return 0;
+
+ if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
+ netdev_err(dev, "critical: could not wake up in resume\n");
+ goto reset;
+ }
+
+ if(typhoon_start_runtime(tp) < 0) {
+ netdev_err(dev, "critical: could not start runtime in resume\n");
+ goto reset;
+ }
+
+ netif_device_attach(dev);
+ return 0;
+
+reset:
+ typhoon_reset(tp->ioaddr, NoWait);
+ return -EBUSY;
+}
+
+static int
+typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct typhoon *tp = netdev_priv(dev);
+ struct cmd_desc xp_cmd;
+
+ /* If we're down, we're already suspended.
+ */
+ if(!netif_running(dev))
+ return 0;
+
+ /* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */
+ if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
+ netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n");
+
+ netif_device_detach(dev);
+
+ if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
+ netdev_err(dev, "unable to stop runtime\n");
+ goto need_resume;
+ }
+
+ typhoon_free_rx_rings(tp);
+ typhoon_init_rings(tp);
+
+ if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
+ netdev_err(dev, "unable to boot sleep image\n");
+ goto need_resume;
+ }
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
+ xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
+ xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
+ if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
+ netdev_err(dev, "unable to set mac address in suspend\n");
+ goto need_resume;
+ }
+
+ INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
+ xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
+ if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
+ netdev_err(dev, "unable to set rx filter in suspend\n");
+ goto need_resume;
+ }
+
+ if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
+ netdev_err(dev, "unable to put card to sleep\n");
+ goto need_resume;
+ }
+
+ return 0;
+
+need_resume:
+ typhoon_resume(pdev);
+ return -EBUSY;
+}
+#endif
+
+static int __devinit
+typhoon_test_mmio(struct pci_dev *pdev)
+{
+ void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
+ int mode = 0;
+ u32 val;
+
+ if(!ioaddr)
+ goto out;
+
+ if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
+ TYPHOON_STATUS_WAITING_FOR_HOST)
+ goto out_unmap;
+
+ iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
+ iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
+ iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
+
+ /* Ok, see if we can change our interrupt status register by
+ * sending ourselves an interrupt. If so, then MMIO works.
+ * The 50usec delay is arbitrary -- it could probably be smaller.
+ */
+ val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
+ if((val & TYPHOON_INTR_SELF) == 0) {
+ iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
+ ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
+ udelay(50);
+ val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
+ if(val & TYPHOON_INTR_SELF)
+ mode = 1;
+ }
+
+ iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
+ iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
+ iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
+ ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
+
+out_unmap:
+ pci_iounmap(pdev, ioaddr);
+
+out:
+ if(!mode)
+ pr_info("%s: falling back to port IO\n", pci_name(pdev));
+ return mode;
+}
+
+static const struct net_device_ops typhoon_netdev_ops = {
+ .ndo_open = typhoon_open,
+ .ndo_stop = typhoon_close,
+ .ndo_start_xmit = typhoon_start_tx,
+ .ndo_set_rx_mode = typhoon_set_rx_mode,
+ .ndo_tx_timeout = typhoon_tx_timeout,
+ .ndo_get_stats = typhoon_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = typhoon_set_mac_address,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static int __devinit
+typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct typhoon *tp;
+ int card_id = (int) ent->driver_data;
+ void __iomem *ioaddr;
+ void *shared;
+ dma_addr_t shared_dma;
+ struct cmd_desc xp_cmd;
+ struct resp_desc xp_resp[3];
+ int err = 0;
+ const char *err_msg;
+
+ dev = alloc_etherdev(sizeof(*tp));
+ if(dev == NULL) {
+ err_msg = "unable to alloc new net device";
+ err = -ENOMEM;
+ goto error_out;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = pci_enable_device(pdev);
+ if(err < 0) {
+ err_msg = "unable to enable device";
+ goto error_out_dev;
+ }
+
+ err = pci_set_mwi(pdev);
+ if(err < 0) {
+ err_msg = "unable to set MWI";
+ goto error_out_disable;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if(err < 0) {
+ err_msg = "No usable DMA configuration";
+ goto error_out_mwi;
+ }
+
+ /* sanity checks on IO and MMIO BARs
+ */
+ if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
+ err_msg = "region #1 not a PCI IO resource, aborting";
+ err = -ENODEV;
+ goto error_out_mwi;
+ }
+ if(pci_resource_len(pdev, 0) < 128) {
+ err_msg = "Invalid PCI IO region size, aborting";
+ err = -ENODEV;
+ goto error_out_mwi;
+ }
+ if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+ err_msg = "region #1 not a PCI MMIO resource, aborting";
+ err = -ENODEV;
+ goto error_out_mwi;
+ }
+ if(pci_resource_len(pdev, 1) < 128) {
+ err_msg = "Invalid PCI MMIO region size, aborting";
+ err = -ENODEV;
+ goto error_out_mwi;
+ }
+
+ err = pci_request_regions(pdev, KBUILD_MODNAME);
+ if(err < 0) {
+ err_msg = "could not request regions";
+ goto error_out_mwi;
+ }
+
+ /* map our registers
+ */
+ if(use_mmio != 0 && use_mmio != 1)
+ use_mmio = typhoon_test_mmio(pdev);
+
+ ioaddr = pci_iomap(pdev, use_mmio, 128);
+ if (!ioaddr) {
+ err_msg = "cannot remap registers, aborting";
+ err = -EIO;
+ goto error_out_regions;
+ }
+
+ /* allocate pci dma space for rx and tx descriptor rings
+ */
+ shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
+ &shared_dma);
+ if(!shared) {
+ err_msg = "could not allocate DMA memory";
+ err = -ENOMEM;
+ goto error_out_remap;
+ }
+
+ dev->irq = pdev->irq;
+ tp = netdev_priv(dev);
+ tp->shared = shared;
+ tp->shared_dma = shared_dma;
+ tp->pdev = pdev;
+ tp->tx_pdev = pdev;
+ tp->ioaddr = ioaddr;
+ tp->tx_ioaddr = ioaddr;
+ tp->dev = dev;
+
+ /* Init sequence:
+ * 1) Reset the adapter to clear any bad juju
+ * 2) Reload the sleep image
+ * 3) Boot the sleep image
+ * 4) Get the hardware address.
+ * 5) Put the card to sleep.
+ */
+ if (typhoon_reset(ioaddr, WaitSleep) < 0) {
+ err_msg = "could not reset 3XP";
+ err = -EIO;
+ goto error_out_dma;
+ }
+
+ /* Now that we've reset the 3XP and are sure it's not going to
+ * write all over memory, enable bus mastering, and save our
+ * state for resuming after a suspend.
+ */
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+
+ typhoon_init_interface(tp);
+ typhoon_init_rings(tp);
+
+ if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
+ err_msg = "cannot boot 3XP sleep image";
+ err = -EIO;
+ goto error_out_reset;
+ }
+
+ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
+ if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
+ err_msg = "cannot read MAC address";
+ err = -EIO;
+ goto error_out_reset;
+ }
+
+ *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
+ *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
+
+ if(!is_valid_ether_addr(dev->dev_addr)) {
+ err_msg = "Could not obtain valid ethernet address, aborting";
+ goto error_out_reset;
+ }
+
+ /* Read the Sleep Image version last, so the response is valid
+ * later when we print out the version reported.
+ */
+ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
+ if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
+ err_msg = "Could not get Sleep Image version";
+ goto error_out_reset;
+ }
+
+ tp->capabilities = typhoon_card_info[card_id].capabilities;
+ tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
+
+ /* Typhoon 1.0 Sleep Images return one response descriptor to the
+ * READ_VERSIONS command. Those versions are OK after waking up
+ * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
+ * seem to need a little extra help to get started. Since we don't
+ * know how to nudge it along, just kick it.
+ */
+ if(xp_resp[0].numDesc != 0)
+ tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
+
+ if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
+ err_msg = "cannot put adapter to sleep";
+ err = -EIO;
+ goto error_out_reset;
+ }
+
+ /* The chip-specific entries in the device structure. */
+ dev->netdev_ops = &typhoon_netdev_ops;
+ netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
+
+ /* We can handle scatter gather, up to 16 entries, and
+ * we can do IP checksumming (only version 4, doh...)
+ *
+ * There's no way to turn off the RX VLAN offloading and stripping
+ * on the current 3XP firmware -- it does not respect the offload
+ * settings -- so we only allow the user to toggle the TX processing.
+ */
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_HW_VLAN_TX;
+ dev->features = dev->hw_features |
+ NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM;
+
+ if(register_netdev(dev) < 0) {
+ err_msg = "unable to register netdev";
+ goto error_out_reset;
+ }
+
+ pci_set_drvdata(pdev, dev);
+
+ netdev_info(dev, "%s at %s 0x%llx, %pM\n",
+ typhoon_card_info[card_id].name,
+ use_mmio ? "MMIO" : "IO",
+ (unsigned long long)pci_resource_start(pdev, use_mmio),
+ dev->dev_addr);
+
+ /* xp_resp still contains the response to the READ_VERSIONS command.
+ * For debugging, let the user know what version he has.
+ */
+ if(xp_resp[0].numDesc == 0) {
+ /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
+ * of version is Month/Day of build.
+ */
+ u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
+ netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
+ monthday >> 8, monthday & 0xff);
+ } else if(xp_resp[0].numDesc == 2) {
+ /* This is the Typhoon 1.1+ type Sleep Image
+ */
+ u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
+ u8 *ver_string = (u8 *) &xp_resp[1];
+ ver_string[25] = 0;
+ netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
+ sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
+ sleep_ver & 0xfff, ver_string);
+ } else {
+ netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
+ xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
+ }
+
+ return 0;
+
+error_out_reset:
+ typhoon_reset(ioaddr, NoWait);
+
+error_out_dma:
+ pci_free_consistent(pdev, sizeof(struct typhoon_shared),
+ shared, shared_dma);
+error_out_remap:
+ pci_iounmap(pdev, ioaddr);
+error_out_regions:
+ pci_release_regions(pdev);
+error_out_mwi:
+ pci_clear_mwi(pdev);
+error_out_disable:
+ pci_disable_device(pdev);
+error_out_dev:
+ free_netdev(dev);
+error_out:
+ pr_err("%s: %s\n", pci_name(pdev), err_msg);
+ return err;
+}
+
+static void __devexit
+typhoon_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct typhoon *tp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ typhoon_reset(tp->ioaddr, NoWait);
+ pci_iounmap(pdev, tp->ioaddr);
+ pci_free_consistent(pdev, sizeof(struct typhoon_shared),
+ tp->shared, tp->shared_dma);
+ pci_release_regions(pdev);
+ pci_clear_mwi(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(dev);
+}
+
+static struct pci_driver typhoon_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = typhoon_pci_tbl,
+ .probe = typhoon_init_one,
+ .remove = __devexit_p(typhoon_remove_one),
+#ifdef CONFIG_PM
+ .suspend = typhoon_suspend,
+ .resume = typhoon_resume,
+#endif
+};
+
+static int __init
+typhoon_init(void)
+{
+ return pci_register_driver(&typhoon_driver);
+}
+
+static void __exit
+typhoon_cleanup(void)
+{
+ if (typhoon_fw)
+ release_firmware(typhoon_fw);
+ pci_unregister_driver(&typhoon_driver);
+}
+
+module_init(typhoon_init);
+module_exit(typhoon_cleanup);
diff --git a/drivers/net/ethernet/3com/typhoon.h b/drivers/net/ethernet/3com/typhoon.h
new file mode 100644
index 000000000000..88187fc84aa3
--- /dev/null
+++ b/drivers/net/ethernet/3com/typhoon.h
@@ -0,0 +1,624 @@
+/* typhoon.h: chip info for the 3Com 3CR990 family of controllers */
+/*
+ Written 2002-2003 by David Dillow <dave@thedillows.org>
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This software is available on a public web site. It may enable
+ cryptographic capabilities of the 3Com hardware, and may be
+ exported from the United States under License Exception "TSU"
+ pursuant to 15 C.F.R. Section 740.13(e).
+
+ This work was funded by the National Library of Medicine under
+ the Department of Energy project number 0274DD06D1 and NLM project
+ number Y1-LM-2015-01.
+*/
+
+/* All Typhoon ring positions are specificed in bytes, and point to the
+ * first "clean" entry in the ring -- ie the next entry we use for whatever
+ * purpose.
+ */
+
+/* The Typhoon basic ring
+ * ringBase: where this ring lives (our virtual address)
+ * lastWrite: the next entry we'll use
+ */
+struct basic_ring {
+ u8 *ringBase;
+ u32 lastWrite;
+};
+
+/* The Typoon transmit ring -- same as a basic ring, plus:
+ * lastRead: where we're at in regard to cleaning up the ring
+ * writeRegister: register to use for writing (different for Hi & Lo rings)
+ */
+struct transmit_ring {
+ u8 *ringBase;
+ u32 lastWrite;
+ u32 lastRead;
+ int writeRegister;
+};
+
+/* The host<->Typhoon ring index structure
+ * This indicates the current positions in the rings
+ *
+ * All values must be in little endian format for the 3XP
+ *
+ * rxHiCleared: entry we've cleared to in the Hi receive ring
+ * rxLoCleared: entry we've cleared to in the Lo receive ring
+ * rxBuffReady: next entry we'll put a free buffer in
+ * respCleared: entry we've cleared to in the response ring
+ *
+ * txLoCleared: entry the NIC has cleared to in the Lo transmit ring
+ * txHiCleared: entry the NIC has cleared to in the Hi transmit ring
+ * rxLoReady: entry the NIC has filled to in the Lo receive ring
+ * rxBuffCleared: entry the NIC has cleared in the free buffer ring
+ * cmdCleared: entry the NIC has cleared in the command ring
+ * respReady: entry the NIC has filled to in the response ring
+ * rxHiReady: entry the NIC has filled to in the Hi receive ring
+ */
+struct typhoon_indexes {
+ /* The first four are written by the host, and read by the NIC */
+ volatile __le32 rxHiCleared;
+ volatile __le32 rxLoCleared;
+ volatile __le32 rxBuffReady;
+ volatile __le32 respCleared;
+
+ /* The remaining are written by the NIC, and read by the host */
+ volatile __le32 txLoCleared;
+ volatile __le32 txHiCleared;
+ volatile __le32 rxLoReady;
+ volatile __le32 rxBuffCleared;
+ volatile __le32 cmdCleared;
+ volatile __le32 respReady;
+ volatile __le32 rxHiReady;
+} __packed;
+
+/* The host<->Typhoon interface
+ * Our means of communicating where things are
+ *
+ * All values must be in little endian format for the 3XP
+ *
+ * ringIndex: 64 bit bus address of the index structure
+ * txLoAddr: 64 bit bus address of the Lo transmit ring
+ * txLoSize: size (in bytes) of the Lo transmit ring
+ * txHi*: as above for the Hi priority transmit ring
+ * rxLo*: as above for the Lo priority receive ring
+ * rxBuff*: as above for the free buffer ring
+ * cmd*: as above for the command ring
+ * resp*: as above for the response ring
+ * zeroAddr: 64 bit bus address of a zero word (for DMA)
+ * rxHi*: as above for the Hi Priority receive ring
+ *
+ * While there is room for 64 bit addresses, current versions of the 3XP
+ * only do 32 bit addresses, so the *Hi for each of the above will always
+ * be zero.
+ */
+struct typhoon_interface {
+ __le32 ringIndex;
+ __le32 ringIndexHi;
+ __le32 txLoAddr;
+ __le32 txLoAddrHi;
+ __le32 txLoSize;
+ __le32 txHiAddr;
+ __le32 txHiAddrHi;
+ __le32 txHiSize;
+ __le32 rxLoAddr;
+ __le32 rxLoAddrHi;
+ __le32 rxLoSize;
+ __le32 rxBuffAddr;
+ __le32 rxBuffAddrHi;
+ __le32 rxBuffSize;
+ __le32 cmdAddr;
+ __le32 cmdAddrHi;
+ __le32 cmdSize;
+ __le32 respAddr;
+ __le32 respAddrHi;
+ __le32 respSize;
+ __le32 zeroAddr;
+ __le32 zeroAddrHi;
+ __le32 rxHiAddr;
+ __le32 rxHiAddrHi;
+ __le32 rxHiSize;
+} __packed;
+
+/* The Typhoon transmit/fragment descriptor
+ *
+ * A packet is described by a packet descriptor, followed by option descriptors,
+ * if any, then one or more fragment descriptors.
+ *
+ * Packet descriptor:
+ * flags: Descriptor type
+ * len:i zero, or length of this packet
+ * addr*: 8 bytes of opaque data to the firmware -- for skb pointer
+ * processFlags: Determine offload tasks to perform on this packet.
+ *
+ * Fragment descriptor:
+ * flags: Descriptor type
+ * len:i length of this fragment
+ * addr: low bytes of DMA address for this part of the packet
+ * addrHi: hi bytes of DMA address for this part of the packet
+ * processFlags: must be zero
+ *
+ * TYPHOON_DESC_VALID is not mentioned in their docs, but their Linux
+ * driver uses it.
+ */
+struct tx_desc {
+ u8 flags;
+#define TYPHOON_TYPE_MASK 0x07
+#define TYPHOON_FRAG_DESC 0x00
+#define TYPHOON_TX_DESC 0x01
+#define TYPHOON_CMD_DESC 0x02
+#define TYPHOON_OPT_DESC 0x03
+#define TYPHOON_RX_DESC 0x04
+#define TYPHOON_RESP_DESC 0x05
+#define TYPHOON_OPT_TYPE_MASK 0xf0
+#define TYPHOON_OPT_IPSEC 0x00
+#define TYPHOON_OPT_TCP_SEG 0x10
+#define TYPHOON_CMD_RESPOND 0x40
+#define TYPHOON_RESP_ERROR 0x40
+#define TYPHOON_RX_ERROR 0x40
+#define TYPHOON_DESC_VALID 0x80
+ u8 numDesc;
+ __le16 len;
+ union {
+ struct {
+ __le32 addr;
+ __le32 addrHi;
+ } frag;
+ u64 tx_addr; /* opaque for hardware, for TX_DESC */
+ };
+ __le32 processFlags;
+#define TYPHOON_TX_PF_NO_CRC cpu_to_le32(0x00000001)
+#define TYPHOON_TX_PF_IP_CHKSUM cpu_to_le32(0x00000002)
+#define TYPHOON_TX_PF_TCP_CHKSUM cpu_to_le32(0x00000004)
+#define TYPHOON_TX_PF_TCP_SEGMENT cpu_to_le32(0x00000008)
+#define TYPHOON_TX_PF_INSERT_VLAN cpu_to_le32(0x00000010)
+#define TYPHOON_TX_PF_IPSEC cpu_to_le32(0x00000020)
+#define TYPHOON_TX_PF_VLAN_PRIORITY cpu_to_le32(0x00000040)
+#define TYPHOON_TX_PF_UDP_CHKSUM cpu_to_le32(0x00000080)
+#define TYPHOON_TX_PF_PAD_FRAME cpu_to_le32(0x00000100)
+#define TYPHOON_TX_PF_RESERVED cpu_to_le32(0x00000e00)
+#define TYPHOON_TX_PF_VLAN_MASK cpu_to_le32(0x0ffff000)
+#define TYPHOON_TX_PF_INTERNAL cpu_to_le32(0xf0000000)
+#define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12
+} __packed;
+
+/* The TCP Segmentation offload option descriptor
+ *
+ * flags: descriptor type
+ * numDesc: must be 1
+ * mss_flags: bits 0-11 (little endian) are MSS, 12 is first TSO descriptor
+ * 13 is list TSO descriptor, set both if only one TSO
+ * respAddrLo: low bytes of address of the bytesTx field of this descriptor
+ * bytesTx: total number of bytes in this TSO request
+ * status: 0 on completion
+ */
+struct tcpopt_desc {
+ u8 flags;
+ u8 numDesc;
+ __le16 mss_flags;
+#define TYPHOON_TSO_FIRST cpu_to_le16(0x1000)
+#define TYPHOON_TSO_LAST cpu_to_le16(0x2000)
+ __le32 respAddrLo;
+ __le32 bytesTx;
+ __le32 status;
+} __packed;
+
+/* The IPSEC Offload descriptor
+ *
+ * flags: descriptor type
+ * numDesc: must be 1
+ * ipsecFlags: bit 0: 0 -- generate IV, 1 -- use supplied IV
+ * sa1, sa2: Security Association IDs for this packet
+ * reserved: set to 0
+ */
+struct ipsec_desc {
+ u8 flags;
+ u8 numDesc;
+ __le16 ipsecFlags;
+#define TYPHOON_IPSEC_GEN_IV cpu_to_le16(0x0000)
+#define TYPHOON_IPSEC_USE_IV cpu_to_le16(0x0001)
+ __le32 sa1;
+ __le32 sa2;
+ __le32 reserved;
+} __packed;
+
+/* The Typhoon receive descriptor (Updated by NIC)
+ *
+ * flags: Descriptor type, error indication
+ * numDesc: Always zero
+ * frameLen: the size of the packet received
+ * addr: low 32 bytes of the virtual addr passed in for this buffer
+ * addrHi: high 32 bytes of the virtual addr passed in for this buffer
+ * rxStatus: Error if set in flags, otherwise result of offload processing
+ * filterResults: results of filtering on packet, not used
+ * ipsecResults: Results of IPSEC processing
+ * vlanTag: the 801.2q TCI from the packet
+ */
+struct rx_desc {
+ u8 flags;
+ u8 numDesc;
+ __le16 frameLen;
+ u32 addr; /* opaque, comes from virtAddr */
+ u32 addrHi; /* opaque, comes from virtAddrHi */
+ __le32 rxStatus;
+#define TYPHOON_RX_ERR_INTERNAL cpu_to_le32(0x00000000)
+#define TYPHOON_RX_ERR_FIFO_UNDERRUN cpu_to_le32(0x00000001)
+#define TYPHOON_RX_ERR_BAD_SSD cpu_to_le32(0x00000002)
+#define TYPHOON_RX_ERR_RUNT cpu_to_le32(0x00000003)
+#define TYPHOON_RX_ERR_CRC cpu_to_le32(0x00000004)
+#define TYPHOON_RX_ERR_OVERSIZE cpu_to_le32(0x00000005)
+#define TYPHOON_RX_ERR_ALIGN cpu_to_le32(0x00000006)
+#define TYPHOON_RX_ERR_DRIBBLE cpu_to_le32(0x00000007)
+#define TYPHOON_RX_PROTO_MASK cpu_to_le32(0x00000003)
+#define TYPHOON_RX_PROTO_UNKNOWN cpu_to_le32(0x00000000)
+#define TYPHOON_RX_PROTO_IP cpu_to_le32(0x00000001)
+#define TYPHOON_RX_PROTO_IPX cpu_to_le32(0x00000002)
+#define TYPHOON_RX_VLAN cpu_to_le32(0x00000004)
+#define TYPHOON_RX_IP_FRAG cpu_to_le32(0x00000008)
+#define TYPHOON_RX_IPSEC cpu_to_le32(0x00000010)
+#define TYPHOON_RX_IP_CHK_FAIL cpu_to_le32(0x00000020)
+#define TYPHOON_RX_TCP_CHK_FAIL cpu_to_le32(0x00000040)
+#define TYPHOON_RX_UDP_CHK_FAIL cpu_to_le32(0x00000080)
+#define TYPHOON_RX_IP_CHK_GOOD cpu_to_le32(0x00000100)
+#define TYPHOON_RX_TCP_CHK_GOOD cpu_to_le32(0x00000200)
+#define TYPHOON_RX_UDP_CHK_GOOD cpu_to_le32(0x00000400)
+ __le16 filterResults;
+#define TYPHOON_RX_FILTER_MASK cpu_to_le16(0x7fff)
+#define TYPHOON_RX_FILTERED cpu_to_le16(0x8000)
+ __le16 ipsecResults;
+#define TYPHOON_RX_OUTER_AH_GOOD cpu_to_le16(0x0001)
+#define TYPHOON_RX_OUTER_ESP_GOOD cpu_to_le16(0x0002)
+#define TYPHOON_RX_INNER_AH_GOOD cpu_to_le16(0x0004)
+#define TYPHOON_RX_INNER_ESP_GOOD cpu_to_le16(0x0008)
+#define TYPHOON_RX_OUTER_AH_FAIL cpu_to_le16(0x0010)
+#define TYPHOON_RX_OUTER_ESP_FAIL cpu_to_le16(0x0020)
+#define TYPHOON_RX_INNER_AH_FAIL cpu_to_le16(0x0040)
+#define TYPHOON_RX_INNER_ESP_FAIL cpu_to_le16(0x0080)
+#define TYPHOON_RX_UNKNOWN_SA cpu_to_le16(0x0100)
+#define TYPHOON_RX_ESP_FORMAT_ERR cpu_to_le16(0x0200)
+ __be32 vlanTag;
+} __packed;
+
+/* The Typhoon free buffer descriptor, used to give a buffer to the NIC
+ *
+ * physAddr: low 32 bits of the bus address of the buffer
+ * physAddrHi: high 32 bits of the bus address of the buffer, always zero
+ * virtAddr: low 32 bits of the skb address
+ * virtAddrHi: high 32 bits of the skb address, always zero
+ *
+ * the virt* address is basically two 32 bit cookies, just passed back
+ * from the NIC
+ */
+struct rx_free {
+ __le32 physAddr;
+ __le32 physAddrHi;
+ u32 virtAddr;
+ u32 virtAddrHi;
+} __packed;
+
+/* The Typhoon command descriptor, used for commands and responses
+ *
+ * flags: descriptor type
+ * numDesc: number of descriptors following in this command/response,
+ * ie, zero for a one descriptor command
+ * cmd: the command
+ * seqNo: sequence number (unused)
+ * parm1: use varies by command
+ * parm2: use varies by command
+ * parm3: use varies by command
+ */
+struct cmd_desc {
+ u8 flags;
+ u8 numDesc;
+ __le16 cmd;
+#define TYPHOON_CMD_TX_ENABLE cpu_to_le16(0x0001)
+#define TYPHOON_CMD_TX_DISABLE cpu_to_le16(0x0002)
+#define TYPHOON_CMD_RX_ENABLE cpu_to_le16(0x0003)
+#define TYPHOON_CMD_RX_DISABLE cpu_to_le16(0x0004)
+#define TYPHOON_CMD_SET_RX_FILTER cpu_to_le16(0x0005)
+#define TYPHOON_CMD_READ_STATS cpu_to_le16(0x0007)
+#define TYPHOON_CMD_XCVR_SELECT cpu_to_le16(0x0013)
+#define TYPHOON_CMD_SET_MAX_PKT_SIZE cpu_to_le16(0x001a)
+#define TYPHOON_CMD_READ_MEDIA_STATUS cpu_to_le16(0x001b)
+#define TYPHOON_CMD_GOTO_SLEEP cpu_to_le16(0x0023)
+#define TYPHOON_CMD_SET_MULTICAST_HASH cpu_to_le16(0x0025)
+#define TYPHOON_CMD_SET_MAC_ADDRESS cpu_to_le16(0x0026)
+#define TYPHOON_CMD_READ_MAC_ADDRESS cpu_to_le16(0x0027)
+#define TYPHOON_CMD_VLAN_TYPE_WRITE cpu_to_le16(0x002b)
+#define TYPHOON_CMD_CREATE_SA cpu_to_le16(0x0034)
+#define TYPHOON_CMD_DELETE_SA cpu_to_le16(0x0035)
+#define TYPHOON_CMD_READ_VERSIONS cpu_to_le16(0x0043)
+#define TYPHOON_CMD_IRQ_COALESCE_CTRL cpu_to_le16(0x0045)
+#define TYPHOON_CMD_ENABLE_WAKE_EVENTS cpu_to_le16(0x0049)
+#define TYPHOON_CMD_SET_OFFLOAD_TASKS cpu_to_le16(0x004f)
+#define TYPHOON_CMD_HELLO_RESP cpu_to_le16(0x0057)
+#define TYPHOON_CMD_HALT cpu_to_le16(0x005d)
+#define TYPHOON_CMD_READ_IPSEC_INFO cpu_to_le16(0x005e)
+#define TYPHOON_CMD_GET_IPSEC_ENABLE cpu_to_le16(0x0067)
+#define TYPHOON_CMD_GET_CMD_LVL cpu_to_le16(0x0069)
+ u16 seqNo;
+ __le16 parm1;
+ __le32 parm2;
+ __le32 parm3;
+} __packed;
+
+/* The Typhoon response descriptor, see command descriptor for details
+ */
+struct resp_desc {
+ u8 flags;
+ u8 numDesc;
+ __le16 cmd;
+ __le16 seqNo;
+ __le16 parm1;
+ __le32 parm2;
+ __le32 parm3;
+} __packed;
+
+#define INIT_COMMAND_NO_RESPONSE(x, command) \
+ do { struct cmd_desc *_ptr = (x); \
+ memset(_ptr, 0, sizeof(struct cmd_desc)); \
+ _ptr->flags = TYPHOON_CMD_DESC | TYPHOON_DESC_VALID; \
+ _ptr->cmd = command; \
+ } while(0)
+
+/* We set seqNo to 1 if we're expecting a response from this command */
+#define INIT_COMMAND_WITH_RESPONSE(x, command) \
+ do { struct cmd_desc *_ptr = (x); \
+ memset(_ptr, 0, sizeof(struct cmd_desc)); \
+ _ptr->flags = TYPHOON_CMD_RESPOND | TYPHOON_CMD_DESC; \
+ _ptr->flags |= TYPHOON_DESC_VALID; \
+ _ptr->cmd = command; \
+ _ptr->seqNo = 1; \
+ } while(0)
+
+/* TYPHOON_CMD_SET_RX_FILTER filter bits (cmd.parm1)
+ */
+#define TYPHOON_RX_FILTER_DIRECTED cpu_to_le16(0x0001)
+#define TYPHOON_RX_FILTER_ALL_MCAST cpu_to_le16(0x0002)
+#define TYPHOON_RX_FILTER_BROADCAST cpu_to_le16(0x0004)
+#define TYPHOON_RX_FILTER_PROMISCOUS cpu_to_le16(0x0008)
+#define TYPHOON_RX_FILTER_MCAST_HASH cpu_to_le16(0x0010)
+
+/* TYPHOON_CMD_READ_STATS response format
+ */
+struct stats_resp {
+ u8 flags;
+ u8 numDesc;
+ __le16 cmd;
+ __le16 seqNo;
+ __le16 unused;
+ __le32 txPackets;
+ __le64 txBytes;
+ __le32 txDeferred;
+ __le32 txLateCollisions;
+ __le32 txCollisions;
+ __le32 txCarrierLost;
+ __le32 txMultipleCollisions;
+ __le32 txExcessiveCollisions;
+ __le32 txFifoUnderruns;
+ __le32 txMulticastTxOverflows;
+ __le32 txFiltered;
+ __le32 rxPacketsGood;
+ __le64 rxBytesGood;
+ __le32 rxFifoOverruns;
+ __le32 BadSSD;
+ __le32 rxCrcErrors;
+ __le32 rxOversized;
+ __le32 rxBroadcast;
+ __le32 rxMulticast;
+ __le32 rxOverflow;
+ __le32 rxFiltered;
+ __le32 linkStatus;
+#define TYPHOON_LINK_STAT_MASK cpu_to_le32(0x00000001)
+#define TYPHOON_LINK_GOOD cpu_to_le32(0x00000001)
+#define TYPHOON_LINK_BAD cpu_to_le32(0x00000000)
+#define TYPHOON_LINK_SPEED_MASK cpu_to_le32(0x00000002)
+#define TYPHOON_LINK_100MBPS cpu_to_le32(0x00000002)
+#define TYPHOON_LINK_10MBPS cpu_to_le32(0x00000000)
+#define TYPHOON_LINK_DUPLEX_MASK cpu_to_le32(0x00000004)
+#define TYPHOON_LINK_FULL_DUPLEX cpu_to_le32(0x00000004)
+#define TYPHOON_LINK_HALF_DUPLEX cpu_to_le32(0x00000000)
+ __le32 unused2;
+ __le32 unused3;
+} __packed;
+
+/* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1)
+ */
+#define TYPHOON_XCVR_10HALF cpu_to_le16(0x0000)
+#define TYPHOON_XCVR_10FULL cpu_to_le16(0x0001)
+#define TYPHOON_XCVR_100HALF cpu_to_le16(0x0002)
+#define TYPHOON_XCVR_100FULL cpu_to_le16(0x0003)
+#define TYPHOON_XCVR_AUTONEG cpu_to_le16(0x0004)
+
+/* TYPHOON_CMD_READ_MEDIA_STATUS (resp.parm1)
+ */
+#define TYPHOON_MEDIA_STAT_CRC_STRIP_DISABLE cpu_to_le16(0x0004)
+#define TYPHOON_MEDIA_STAT_COLLISION_DETECT cpu_to_le16(0x0010)
+#define TYPHOON_MEDIA_STAT_CARRIER_SENSE cpu_to_le16(0x0020)
+#define TYPHOON_MEDIA_STAT_POLARITY_REV cpu_to_le16(0x0400)
+#define TYPHOON_MEDIA_STAT_NO_LINK cpu_to_le16(0x0800)
+
+/* TYPHOON_CMD_SET_MULTICAST_HASH enable values (cmd.parm1)
+ */
+#define TYPHOON_MCAST_HASH_DISABLE cpu_to_le16(0x0000)
+#define TYPHOON_MCAST_HASH_ENABLE cpu_to_le16(0x0001)
+#define TYPHOON_MCAST_HASH_SET cpu_to_le16(0x0002)
+
+/* TYPHOON_CMD_CREATE_SA descriptor and settings
+ */
+struct sa_descriptor {
+ u8 flags;
+ u8 numDesc;
+ u16 cmd;
+ u16 seqNo;
+ u16 mode;
+#define TYPHOON_SA_MODE_NULL cpu_to_le16(0x0000)
+#define TYPHOON_SA_MODE_AH cpu_to_le16(0x0001)
+#define TYPHOON_SA_MODE_ESP cpu_to_le16(0x0002)
+ u8 hashFlags;
+#define TYPHOON_SA_HASH_ENABLE 0x01
+#define TYPHOON_SA_HASH_SHA1 0x02
+#define TYPHOON_SA_HASH_MD5 0x04
+ u8 direction;
+#define TYPHOON_SA_DIR_RX 0x00
+#define TYPHOON_SA_DIR_TX 0x01
+ u8 encryptionFlags;
+#define TYPHOON_SA_ENCRYPT_ENABLE 0x01
+#define TYPHOON_SA_ENCRYPT_DES 0x02
+#define TYPHOON_SA_ENCRYPT_3DES 0x00
+#define TYPHOON_SA_ENCRYPT_3DES_2KEY 0x00
+#define TYPHOON_SA_ENCRYPT_3DES_3KEY 0x04
+#define TYPHOON_SA_ENCRYPT_CBC 0x08
+#define TYPHOON_SA_ENCRYPT_ECB 0x00
+ u8 specifyIndex;
+#define TYPHOON_SA_SPECIFY_INDEX 0x01
+#define TYPHOON_SA_GENERATE_INDEX 0x00
+ u32 SPI;
+ u32 destAddr;
+ u32 destMask;
+ u8 integKey[20];
+ u8 confKey[24];
+ u32 index;
+ u32 unused;
+ u32 unused2;
+} __packed;
+
+/* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx))
+ * This is all for IPv4.
+ */
+#define TYPHOON_OFFLOAD_TCP_CHKSUM cpu_to_le32(0x00000002)
+#define TYPHOON_OFFLOAD_UDP_CHKSUM cpu_to_le32(0x00000004)
+#define TYPHOON_OFFLOAD_IP_CHKSUM cpu_to_le32(0x00000008)
+#define TYPHOON_OFFLOAD_IPSEC cpu_to_le32(0x00000010)
+#define TYPHOON_OFFLOAD_BCAST_THROTTLE cpu_to_le32(0x00000020)
+#define TYPHOON_OFFLOAD_DHCP_PREVENT cpu_to_le32(0x00000040)
+#define TYPHOON_OFFLOAD_VLAN cpu_to_le32(0x00000080)
+#define TYPHOON_OFFLOAD_FILTERING cpu_to_le32(0x00000100)
+#define TYPHOON_OFFLOAD_TCP_SEGMENT cpu_to_le32(0x00000200)
+
+/* TYPHOON_CMD_ENABLE_WAKE_EVENTS bits (cmd.parm1)
+ */
+#define TYPHOON_WAKE_MAGIC_PKT cpu_to_le16(0x01)
+#define TYPHOON_WAKE_LINK_EVENT cpu_to_le16(0x02)
+#define TYPHOON_WAKE_ICMP_ECHO cpu_to_le16(0x04)
+#define TYPHOON_WAKE_ARP cpu_to_le16(0x08)
+
+/* These are used to load the firmware image on the NIC
+ */
+struct typhoon_file_header {
+ u8 tag[8];
+ __le32 version;
+ __le32 numSections;
+ __le32 startAddr;
+ __le32 hmacDigest[5];
+} __packed;
+
+struct typhoon_section_header {
+ __le32 len;
+ u16 checksum;
+ u16 reserved;
+ __le32 startAddr;
+} __packed;
+
+/* The Typhoon Register offsets
+ */
+#define TYPHOON_REG_SOFT_RESET 0x00
+#define TYPHOON_REG_INTR_STATUS 0x04
+#define TYPHOON_REG_INTR_ENABLE 0x08
+#define TYPHOON_REG_INTR_MASK 0x0c
+#define TYPHOON_REG_SELF_INTERRUPT 0x10
+#define TYPHOON_REG_HOST2ARM7 0x14
+#define TYPHOON_REG_HOST2ARM6 0x18
+#define TYPHOON_REG_HOST2ARM5 0x1c
+#define TYPHOON_REG_HOST2ARM4 0x20
+#define TYPHOON_REG_HOST2ARM3 0x24
+#define TYPHOON_REG_HOST2ARM2 0x28
+#define TYPHOON_REG_HOST2ARM1 0x2c
+#define TYPHOON_REG_HOST2ARM0 0x30
+#define TYPHOON_REG_ARM2HOST3 0x34
+#define TYPHOON_REG_ARM2HOST2 0x38
+#define TYPHOON_REG_ARM2HOST1 0x3c
+#define TYPHOON_REG_ARM2HOST0 0x40
+
+#define TYPHOON_REG_BOOT_DATA_LO TYPHOON_REG_HOST2ARM5
+#define TYPHOON_REG_BOOT_DATA_HI TYPHOON_REG_HOST2ARM4
+#define TYPHOON_REG_BOOT_DEST_ADDR TYPHOON_REG_HOST2ARM3
+#define TYPHOON_REG_BOOT_CHECKSUM TYPHOON_REG_HOST2ARM2
+#define TYPHOON_REG_BOOT_LENGTH TYPHOON_REG_HOST2ARM1
+
+#define TYPHOON_REG_DOWNLOAD_BOOT_ADDR TYPHOON_REG_HOST2ARM1
+#define TYPHOON_REG_DOWNLOAD_HMAC_0 TYPHOON_REG_HOST2ARM2
+#define TYPHOON_REG_DOWNLOAD_HMAC_1 TYPHOON_REG_HOST2ARM3
+#define TYPHOON_REG_DOWNLOAD_HMAC_2 TYPHOON_REG_HOST2ARM4
+#define TYPHOON_REG_DOWNLOAD_HMAC_3 TYPHOON_REG_HOST2ARM5
+#define TYPHOON_REG_DOWNLOAD_HMAC_4 TYPHOON_REG_HOST2ARM6
+
+#define TYPHOON_REG_BOOT_RECORD_ADDR_HI TYPHOON_REG_HOST2ARM2
+#define TYPHOON_REG_BOOT_RECORD_ADDR_LO TYPHOON_REG_HOST2ARM1
+
+#define TYPHOON_REG_TX_LO_READY TYPHOON_REG_HOST2ARM3
+#define TYPHOON_REG_CMD_READY TYPHOON_REG_HOST2ARM2
+#define TYPHOON_REG_TX_HI_READY TYPHOON_REG_HOST2ARM1
+
+#define TYPHOON_REG_COMMAND TYPHOON_REG_HOST2ARM0
+#define TYPHOON_REG_HEARTBEAT TYPHOON_REG_ARM2HOST3
+#define TYPHOON_REG_STATUS TYPHOON_REG_ARM2HOST0
+
+/* 3XP Reset values (TYPHOON_REG_SOFT_RESET)
+ */
+#define TYPHOON_RESET_ALL 0x7f
+#define TYPHOON_RESET_NONE 0x00
+
+/* 3XP irq bits (TYPHOON_REG_INTR{STATUS,ENABLE,MASK})
+ *
+ * Some of these came from OpenBSD, as the 3Com docs have it wrong
+ * (INTR_SELF) or don't list it at all (INTR_*_ABORT)
+ *
+ * Enabling irqs on the Heartbeat reg (ArmToHost3) gets you an irq
+ * about every 8ms, so don't do it.
+ */
+#define TYPHOON_INTR_HOST_INT 0x00000001
+#define TYPHOON_INTR_ARM2HOST0 0x00000002
+#define TYPHOON_INTR_ARM2HOST1 0x00000004
+#define TYPHOON_INTR_ARM2HOST2 0x00000008
+#define TYPHOON_INTR_ARM2HOST3 0x00000010
+#define TYPHOON_INTR_DMA0 0x00000020
+#define TYPHOON_INTR_DMA1 0x00000040
+#define TYPHOON_INTR_DMA2 0x00000080
+#define TYPHOON_INTR_DMA3 0x00000100
+#define TYPHOON_INTR_MASTER_ABORT 0x00000200
+#define TYPHOON_INTR_TARGET_ABORT 0x00000400
+#define TYPHOON_INTR_SELF 0x00000800
+#define TYPHOON_INTR_RESERVED 0xfffff000
+
+#define TYPHOON_INTR_BOOTCMD TYPHOON_INTR_ARM2HOST0
+
+#define TYPHOON_INTR_ENABLE_ALL 0xffffffef
+#define TYPHOON_INTR_ALL 0xffffffff
+#define TYPHOON_INTR_NONE 0x00000000
+
+/* The commands for the 3XP chip (TYPHOON_REG_COMMAND)
+ */
+#define TYPHOON_BOOTCMD_BOOT 0x00
+#define TYPHOON_BOOTCMD_WAKEUP 0xfa
+#define TYPHOON_BOOTCMD_DNLD_COMPLETE 0xfb
+#define TYPHOON_BOOTCMD_SEG_AVAILABLE 0xfc
+#define TYPHOON_BOOTCMD_RUNTIME_IMAGE 0xfd
+#define TYPHOON_BOOTCMD_REG_BOOT_RECORD 0xff
+
+/* 3XP Status values (TYPHOON_REG_STATUS)
+ */
+#define TYPHOON_STATUS_WAITING_FOR_BOOT 0x07
+#define TYPHOON_STATUS_SECOND_INIT 0x08
+#define TYPHOON_STATUS_RUNNING 0x09
+#define TYPHOON_STATUS_WAITING_FOR_HOST 0x0d
+#define TYPHOON_STATUS_WAITING_FOR_SEGMENT 0x10
+#define TYPHOON_STATUS_SLEEPING 0x11
+#define TYPHOON_STATUS_HALTED 0x14
diff --git a/drivers/net/ethernet/8390/3c503.c b/drivers/net/ethernet/8390/3c503.c
new file mode 100644
index 000000000000..fbab1367505f
--- /dev/null
+++ b/drivers/net/ethernet/8390/3c503.c
@@ -0,0 +1,778 @@
+/* 3c503.c: A shared-memory NS8390 ethernet driver for linux. */
+/*
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+
+ This driver should work with the 3c503 and 3c503/16. It should be used
+ in shared memory mode for best performance, although it may also work
+ in programmed-I/O mode.
+
+ Sources:
+ EtherLink II Technical Reference Manual,
+ EtherLink II/16 Technical Reference Manual Supplement,
+ 3Com Corporation, 5400 Bayfront Plaza, Santa Clara CA 95052-8145
+
+ The Crynwr 3c503 packet driver.
+
+ Changelog:
+
+ Paul Gortmaker : add support for the 2nd 8kB of RAM on 16 bit cards.
+ Paul Gortmaker : multiple card support for module users.
+ rjohnson@analogic.com : Fix up PIO interface for efficient operation.
+ Jeff Garzik : ethtool support
+
+*/
+
+#define DRV_NAME "3c503"
+#define DRV_VERSION "1.10a"
+#define DRV_RELDATE "11/17/2001"
+
+
+static const char version[] =
+ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Donald Becker (becker@scyld.com)\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ethtool.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/byteorder.h>
+
+#include "8390.h"
+#include "3c503.h"
+#define WRD_COUNT 4
+
+static int el2_pio_probe(struct net_device *dev);
+static int el2_probe1(struct net_device *dev, int ioaddr);
+
+/* A zero-terminated list of I/O addresses to be probed in PIO mode. */
+static unsigned int netcard_portlist[] __initdata =
+ { 0x300,0x310,0x330,0x350,0x250,0x280,0x2a0,0x2e0,0};
+
+#define EL2_IO_EXTENT 16
+
+static int el2_open(struct net_device *dev);
+static int el2_close(struct net_device *dev);
+static void el2_reset_8390(struct net_device *dev);
+static void el2_init_card(struct net_device *dev);
+static void el2_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page);
+static void el2_block_input(struct net_device *dev, int count, struct sk_buff *skb,
+ int ring_offset);
+static void el2_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static const struct ethtool_ops netdev_ethtool_ops;
+
+
+/* This routine probes for a memory-mapped 3c503 board by looking for
+ the "location register" at the end of the jumpered boot PROM space.
+ This works even if a PROM isn't there.
+
+ If the ethercard isn't found there is an optional probe for
+ ethercard jumpered to programmed-I/O mode.
+ */
+static int __init do_el2_probe(struct net_device *dev)
+{
+ int *addr, addrs[] = { 0xddffe, 0xd9ffe, 0xcdffe, 0xc9ffe, 0};
+ int base_addr = dev->base_addr;
+ int irq = dev->irq;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return el2_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ for (addr = addrs; *addr; addr++) {
+ void __iomem *p = ioremap(*addr, 1);
+ unsigned base_bits;
+ int i;
+
+ if (!p)
+ continue;
+ base_bits = readb(p);
+ iounmap(p);
+ i = ffs(base_bits) - 1;
+ if (i == -1 || base_bits != (1 << i))
+ continue;
+ if (el2_probe1(dev, netcard_portlist[i]) == 0)
+ return 0;
+ dev->irq = irq;
+ }
+#if ! defined(no_probe_nonshared_memory)
+ return el2_pio_probe(dev);
+#else
+ return -ENODEV;
+#endif
+}
+
+/* Try all of the locations that aren't obviously empty. This touches
+ a lot of locations, and is much riskier than the code above. */
+static int __init
+el2_pio_probe(struct net_device *dev)
+{
+ int i;
+ int base_addr = dev->base_addr;
+ int irq = dev->irq;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return el2_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ for (i = 0; netcard_portlist[i]; i++) {
+ if (el2_probe1(dev, netcard_portlist[i]) == 0)
+ return 0;
+ dev->irq = irq;
+ }
+
+ return -ENODEV;
+}
+
+#ifndef MODULE
+struct net_device * __init el2_probe(int unit)
+{
+ struct net_device *dev = alloc_eip_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_el2_probe(dev);
+ if (err)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static const struct net_device_ops el2_netdev_ops = {
+ .ndo_open = el2_open,
+ .ndo_stop = el2_close,
+
+ .ndo_start_xmit = eip_start_xmit,
+ .ndo_tx_timeout = eip_tx_timeout,
+ .ndo_get_stats = eip_get_stats,
+ .ndo_set_rx_mode = eip_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = eip_poll,
+#endif
+};
+
+/* Probe for the Etherlink II card at I/O port base IOADDR,
+ returning non-zero on success. If found, set the station
+ address and memory parameters in DEVICE. */
+static int __init
+el2_probe1(struct net_device *dev, int ioaddr)
+{
+ int i, iobase_reg, membase_reg, saved_406, wordlength, retval;
+ static unsigned version_printed;
+ unsigned long vendor_id;
+
+ if (!request_region(ioaddr, EL2_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ if (!request_region(ioaddr + 0x400, 8, DRV_NAME)) {
+ retval = -EBUSY;
+ goto out;
+ }
+
+ /* Reset and/or avoid any lurking NE2000 */
+ if (inb(ioaddr + 0x408) == 0xff) {
+ mdelay(1);
+ retval = -ENODEV;
+ goto out1;
+ }
+
+ /* We verify that it's a 3C503 board by checking the first three octets
+ of its ethernet address. */
+ iobase_reg = inb(ioaddr+0x403);
+ membase_reg = inb(ioaddr+0x404);
+ /* ASIC location registers should be 0 or have only a single bit set. */
+ if ((iobase_reg & (iobase_reg - 1)) ||
+ (membase_reg & (membase_reg - 1))) {
+ retval = -ENODEV;
+ goto out1;
+ }
+ saved_406 = inb_p(ioaddr + 0x406);
+ outb_p(ECNTRL_RESET|ECNTRL_THIN, ioaddr + 0x406); /* Reset it... */
+ outb_p(ECNTRL_THIN, ioaddr + 0x406);
+ /* Map the station addr PROM into the lower I/O ports. We now check
+ for both the old and new 3Com prefix */
+ outb(ECNTRL_SAPROM|ECNTRL_THIN, ioaddr + 0x406);
+ vendor_id = inb(ioaddr)*0x10000 + inb(ioaddr + 1)*0x100 + inb(ioaddr + 2);
+ if ((vendor_id != OLD_3COM_ID) && (vendor_id != NEW_3COM_ID)) {
+ /* Restore the register we frobbed. */
+ outb(saved_406, ioaddr + 0x406);
+ retval = -ENODEV;
+ goto out1;
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ pr_debug("%s", version);
+
+ dev->base_addr = ioaddr;
+
+ pr_info("%s: 3c503 at i/o base %#3x, node ", dev->name, ioaddr);
+
+ /* Retrieve and print the ethernet address. */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + i);
+ pr_cont("%pM", dev->dev_addr);
+
+ /* Map the 8390 back into the window. */
+ outb(ECNTRL_THIN, ioaddr + 0x406);
+
+ /* Check for EL2/16 as described in tech. man. */
+ outb_p(E8390_PAGE0, ioaddr + E8390_CMD);
+ outb_p(0, ioaddr + EN0_DCFG);
+ outb_p(E8390_PAGE2, ioaddr + E8390_CMD);
+ wordlength = inb_p(ioaddr + EN0_DCFG) & ENDCFG_WTS;
+ outb_p(E8390_PAGE0, ioaddr + E8390_CMD);
+
+ /* Probe for, turn on and clear the board's shared memory. */
+ if (ei_debug > 2)
+ pr_cont(" memory jumpers %2.2x ", membase_reg);
+ outb(EGACFR_NORM, ioaddr + 0x405); /* Enable RAM */
+
+ /* This should be probed for (or set via an ioctl()) at run-time.
+ Right now we use a sleazy hack to pass in the interface number
+ at boot-time via the low bits of the mem_end field. That value is
+ unused, and the low bits would be discarded even if it was used. */
+#if defined(EI8390_THICK) || defined(EL2_AUI)
+ ei_status.interface_num = 1;
+#else
+ ei_status.interface_num = dev->mem_end & 0xf;
+#endif
+ pr_cont(", using %sternal xcvr.\n", ei_status.interface_num == 0 ? "in" : "ex");
+
+ if ((membase_reg & 0xf0) == 0) {
+ dev->mem_start = 0;
+ ei_status.name = "3c503-PIO";
+ ei_status.mem = NULL;
+ } else {
+ dev->mem_start = ((membase_reg & 0xc0) ? 0xD8000 : 0xC8000) +
+ ((membase_reg & 0xA0) ? 0x4000 : 0);
+#define EL2_MEMSIZE (EL2_MB1_STOP_PG - EL2_MB1_START_PG)*256
+ ei_status.mem = ioremap(dev->mem_start, EL2_MEMSIZE);
+
+#ifdef EL2MEMTEST
+ /* This has never found an error, but someone might care.
+ Note that it only tests the 2nd 8kB on 16kB 3c503/16
+ cards between card addr. 0x2000 and 0x3fff. */
+ { /* Check the card's memory. */
+ void __iomem *mem_base = ei_status.mem;
+ unsigned int test_val = 0xbbadf00d;
+ writel(0xba5eba5e, mem_base);
+ for (i = sizeof(test_val); i < EL2_MEMSIZE; i+=sizeof(test_val)) {
+ writel(test_val, mem_base + i);
+ if (readl(mem_base) != 0xba5eba5e ||
+ readl(mem_base + i) != test_val) {
+ pr_warning("3c503: memory failure or memory address conflict.\n");
+ dev->mem_start = 0;
+ ei_status.name = "3c503-PIO";
+ iounmap(mem_base);
+ ei_status.mem = NULL;
+ break;
+ }
+ test_val += 0x55555555;
+ writel(0, mem_base + i);
+ }
+ }
+#endif /* EL2MEMTEST */
+
+ if (dev->mem_start)
+ dev->mem_end = dev->mem_start + EL2_MEMSIZE;
+
+ if (wordlength) { /* No Tx pages to skip over to get to Rx */
+ ei_status.priv = 0;
+ ei_status.name = "3c503/16";
+ } else {
+ ei_status.priv = TX_PAGES * 256;
+ ei_status.name = "3c503";
+ }
+ }
+
+ /*
+ Divide up the memory on the card. This is the same regardless of
+ whether shared-mem or PIO is used. For 16 bit cards (16kB RAM),
+ we use the entire 8k of bank1 for an Rx ring. We only use 3k
+ of the bank0 for 2 full size Tx packet slots. For 8 bit cards,
+ (8kB RAM) we use 3kB of bank1 for two Tx slots, and the remaining
+ 5kB for an Rx ring. */
+
+ if (wordlength) {
+ ei_status.tx_start_page = EL2_MB0_START_PG;
+ ei_status.rx_start_page = EL2_MB1_START_PG;
+ } else {
+ ei_status.tx_start_page = EL2_MB1_START_PG;
+ ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES;
+ }
+
+ /* Finish setting the board's parameters. */
+ ei_status.stop_page = EL2_MB1_STOP_PG;
+ ei_status.word16 = wordlength;
+ ei_status.reset_8390 = el2_reset_8390;
+ ei_status.get_8390_hdr = el2_get_8390_hdr;
+ ei_status.block_input = el2_block_input;
+ ei_status.block_output = el2_block_output;
+
+ if (dev->irq == 2)
+ dev->irq = 9;
+ else if (dev->irq > 5 && dev->irq != 9) {
+ pr_warning("3c503: configured interrupt %d invalid, will use autoIRQ.\n",
+ dev->irq);
+ dev->irq = 0;
+ }
+
+ ei_status.saved_irq = dev->irq;
+
+ dev->netdev_ops = &el2_netdev_ops;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+
+ retval = register_netdev(dev);
+ if (retval)
+ goto out1;
+
+ if (dev->mem_start)
+ pr_info("%s: %s - %dkB RAM, 8kB shared mem window at %#6lx-%#6lx.\n",
+ dev->name, ei_status.name, (wordlength+1)<<3,
+ dev->mem_start, dev->mem_end-1);
+
+ else
+ {
+ ei_status.tx_start_page = EL2_MB1_START_PG;
+ ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES;
+ pr_info("%s: %s, %dkB RAM, using programmed I/O (REJUMPER for SHARED MEMORY).\n",
+ dev->name, ei_status.name, (wordlength+1)<<3);
+ }
+ release_region(ioaddr + 0x400, 8);
+ return 0;
+out1:
+ release_region(ioaddr + 0x400, 8);
+out:
+ release_region(ioaddr, EL2_IO_EXTENT);
+ return retval;
+}
+
+static irqreturn_t el2_probe_interrupt(int irq, void *seen)
+{
+ *(bool *)seen = true;
+ return IRQ_HANDLED;
+}
+
+static int
+el2_open(struct net_device *dev)
+{
+ int retval;
+
+ if (dev->irq < 2) {
+ static const int irqlist[] = {5, 9, 3, 4, 0};
+ const int *irqp = irqlist;
+
+ outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */
+ do {
+ bool seen;
+
+ retval = request_irq(*irqp, el2_probe_interrupt, 0,
+ dev->name, &seen);
+ if (retval == -EBUSY)
+ continue;
+ if (retval < 0)
+ goto err_disable;
+
+ /* Twinkle the interrupt, and check if it's seen. */
+ seen = false;
+ smp_wmb();
+ outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
+ outb_p(0x00, E33G_IDCFR);
+ msleep(1);
+ free_irq(*irqp, &seen);
+ if (!seen)
+ continue;
+
+ retval = request_irq(dev->irq = *irqp, eip_interrupt, 0,
+ dev->name, dev);
+ if (retval == -EBUSY)
+ continue;
+ if (retval < 0)
+ goto err_disable;
+ break;
+ } while (*++irqp);
+
+ if (*irqp == 0) {
+ err_disable:
+ outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
+ return -EAGAIN;
+ }
+ } else {
+ if ((retval = request_irq(dev->irq, eip_interrupt, 0, dev->name, dev))) {
+ return retval;
+ }
+ }
+
+ el2_init_card(dev);
+ eip_open(dev);
+ return 0;
+}
+
+static int
+el2_close(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+ dev->irq = ei_status.saved_irq;
+ outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
+
+ eip_close(dev);
+ return 0;
+}
+
+/* This is called whenever we have a unrecoverable failure:
+ transmit timeout
+ Bad ring buffer packet header
+ */
+static void
+el2_reset_8390(struct net_device *dev)
+{
+ if (ei_debug > 1) {
+ pr_debug("%s: Resetting the 3c503 board...", dev->name);
+ pr_cont(" %#lx=%#02x %#lx=%#02x %#lx=%#02x...", E33G_IDCFR, inb(E33G_IDCFR),
+ E33G_CNTRL, inb(E33G_CNTRL), E33G_GACFR, inb(E33G_GACFR));
+ }
+ outb_p(ECNTRL_RESET|ECNTRL_THIN, E33G_CNTRL);
+ ei_status.txing = 0;
+ outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+ el2_init_card(dev);
+ if (ei_debug > 1)
+ pr_cont("done\n");
+}
+
+/* Initialize the 3c503 GA registers after a reset. */
+static void
+el2_init_card(struct net_device *dev)
+{
+ /* Unmap the station PROM and select the DIX or BNC connector. */
+ outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+
+ /* Set ASIC copy of rx's first and last+1 buffer pages */
+ /* These must be the same as in the 8390. */
+ outb(ei_status.rx_start_page, E33G_STARTPG);
+ outb(ei_status.stop_page, E33G_STOPPG);
+
+ /* Point the vector pointer registers somewhere ?harmless?. */
+ outb(0xff, E33G_VP2); /* Point at the ROM restart location 0xffff0 */
+ outb(0xff, E33G_VP1);
+ outb(0x00, E33G_VP0);
+ /* Turn off all interrupts until we're opened. */
+ outb_p(0x00, dev->base_addr + EN0_IMR);
+ /* Enable IRQs iff started. */
+ outb(EGACFR_NORM, E33G_GACFR);
+
+ /* Set the interrupt line. */
+ outb_p((0x04 << (dev->irq == 9 ? 2 : dev->irq)), E33G_IDCFR);
+ outb_p((WRD_COUNT << 1), E33G_DRQCNT); /* Set burst size to 8 */
+ outb_p(0x20, E33G_DMAAH); /* Put a valid addr in the GA DMA */
+ outb_p(0x00, E33G_DMAAL);
+ return; /* We always succeed */
+}
+
+/*
+ * Either use the shared memory (if enabled on the board) or put the packet
+ * out through the ASIC FIFO.
+ */
+static void
+el2_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ unsigned short int *wrd;
+ int boguscount; /* timeout counter */
+ unsigned short word; /* temporary for better machine code */
+ void __iomem *base = ei_status.mem;
+
+ if (ei_status.word16) /* Tx packets go into bank 0 on EL2/16 card */
+ outb(EGACFR_RSEL|EGACFR_TCM, E33G_GACFR);
+ else
+ outb(EGACFR_NORM, E33G_GACFR);
+
+ if (base) { /* Shared memory transfer */
+ memcpy_toio(base + ((start_page - ei_status.tx_start_page) << 8),
+ buf, count);
+ outb(EGACFR_NORM, E33G_GACFR); /* Back to bank1 in case on bank0 */
+ return;
+ }
+
+/*
+ * No shared memory, put the packet out the other way.
+ * Set up then start the internal memory transfer to Tx Start Page
+ */
+
+ word = (unsigned short)start_page;
+ outb(word&0xFF, E33G_DMAAH);
+ outb(word>>8, E33G_DMAAL);
+
+ outb_p((ei_status.interface_num ? ECNTRL_AUI : ECNTRL_THIN ) | ECNTRL_OUTPUT
+ | ECNTRL_START, E33G_CNTRL);
+
+/*
+ * Here I am going to write data to the FIFO as quickly as possible.
+ * Note that E33G_FIFOH is defined incorrectly. It is really
+ * E33G_FIFOL, the lowest port address for both the byte and
+ * word write. Variable 'count' is NOT checked. Caller must supply a
+ * valid count. Note that I may write a harmless extra byte to the
+ * 8390 if the byte-count was not even.
+ */
+ wrd = (unsigned short int *) buf;
+ count = (count + 1) >> 1;
+ for(;;)
+ {
+ boguscount = 0x1000;
+ while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
+ {
+ if(!boguscount--)
+ {
+ pr_notice("%s: FIFO blocked in el2_block_output.\n", dev->name);
+ el2_reset_8390(dev);
+ goto blocked;
+ }
+ }
+ if(count > WRD_COUNT)
+ {
+ outsw(E33G_FIFOH, wrd, WRD_COUNT);
+ wrd += WRD_COUNT;
+ count -= WRD_COUNT;
+ }
+ else
+ {
+ outsw(E33G_FIFOH, wrd, count);
+ break;
+ }
+ }
+ blocked:;
+ outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+}
+
+/* Read the 4 byte, page aligned 8390 specific header. */
+static void
+el2_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int boguscount;
+ void __iomem *base = ei_status.mem;
+ unsigned short word;
+
+ if (base) { /* Use the shared memory. */
+ void __iomem *hdr_start = base + ((ring_page - EL2_MB1_START_PG)<<8);
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+ hdr->count = le16_to_cpu(hdr->count);
+ return;
+ }
+
+/*
+ * No shared memory, use programmed I/O.
+ */
+
+ word = (unsigned short)ring_page;
+ outb(word&0xFF, E33G_DMAAH);
+ outb(word>>8, E33G_DMAAL);
+
+ outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT
+ | ECNTRL_START, E33G_CNTRL);
+ boguscount = 0x1000;
+ while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
+ {
+ if(!boguscount--)
+ {
+ pr_notice("%s: FIFO blocked in el2_get_8390_hdr.\n", dev->name);
+ memset(hdr, 0x00, sizeof(struct e8390_pkt_hdr));
+ el2_reset_8390(dev);
+ goto blocked;
+ }
+ }
+ insw(E33G_FIFOH, hdr, (sizeof(struct e8390_pkt_hdr))>> 1);
+ blocked:;
+ outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+}
+
+
+static void
+el2_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int boguscount = 0;
+ void __iomem *base = ei_status.mem;
+ unsigned short int *buf;
+ unsigned short word;
+
+ /* Maybe enable shared memory just be to be safe... nahh.*/
+ if (base) { /* Use the shared memory. */
+ ring_offset -= (EL2_MB1_START_PG<<8);
+ if (ring_offset + count > EL2_MEMSIZE) {
+ /* We must wrap the input move. */
+ int semi_count = EL2_MEMSIZE - ring_offset;
+ memcpy_fromio(skb->data, base + ring_offset, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, base + ei_status.priv, count);
+ } else {
+ memcpy_fromio(skb->data, base + ring_offset, count);
+ }
+ return;
+ }
+
+/*
+ * No shared memory, use programmed I/O.
+ */
+ word = (unsigned short) ring_offset;
+ outb(word>>8, E33G_DMAAH);
+ outb(word&0xFF, E33G_DMAAL);
+
+ outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT
+ | ECNTRL_START, E33G_CNTRL);
+
+/*
+ * Here I also try to get data as fast as possible. I am betting that I
+ * can read one extra byte without clobbering anything in the kernel because
+ * this would only occur on an odd byte-count and allocation of skb->data
+ * is word-aligned. Variable 'count' is NOT checked. Caller must check
+ * for a valid count.
+ * [This is currently quite safe.... but if one day the 3c503 explodes
+ * you know where to come looking ;)]
+ */
+
+ buf = (unsigned short int *) skb->data;
+ count = (count + 1) >> 1;
+ for(;;)
+ {
+ boguscount = 0x1000;
+ while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
+ {
+ if(!boguscount--)
+ {
+ pr_notice("%s: FIFO blocked in el2_block_input.\n", dev->name);
+ el2_reset_8390(dev);
+ goto blocked;
+ }
+ }
+ if(count > WRD_COUNT)
+ {
+ insw(E33G_FIFOH, buf, WRD_COUNT);
+ buf += WRD_COUNT;
+ count -= WRD_COUNT;
+ }
+ else
+ {
+ insw(E33G_FIFOH, buf, count);
+ break;
+ }
+ }
+ blocked:;
+ outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+}
+
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+};
+
+#ifdef MODULE
+#define MAX_EL2_CARDS 4 /* Max number of EL2 cards per module */
+
+static struct net_device *dev_el2[MAX_EL2_CARDS];
+static int io[MAX_EL2_CARDS];
+static int irq[MAX_EL2_CARDS];
+static int xcvr[MAX_EL2_CARDS]; /* choose int. or ext. xcvr */
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(xcvr, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
+MODULE_PARM_DESC(xcvr, "transceiver(s) (0=internal, 1=external)");
+MODULE_DESCRIPTION("3Com ISA EtherLink II, II/16 (3c503, 3c503/16) driver");
+MODULE_LICENSE("GPL");
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int __init
+init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) {
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ pr_notice("3c503.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ dev = alloc_eip_netdev();
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */
+ if (do_el2_probe(dev) == 0) {
+ dev_el2[found++] = dev;
+ continue;
+ }
+ free_netdev(dev);
+ pr_warning("3c503.c: No 3c503 card found (i/o = 0x%x).\n", io[this_dev]);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ /* NB: el2_close() handles free_irq */
+ release_region(dev->base_addr, EL2_IO_EXTENT);
+ if (ei_status.mem)
+ iounmap(ei_status.mem);
+}
+
+void __exit
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) {
+ struct net_device *dev = dev_el2[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/3c503.h b/drivers/net/ethernet/8390/3c503.h
new file mode 100644
index 000000000000..e2367b82a2ec
--- /dev/null
+++ b/drivers/net/ethernet/8390/3c503.h
@@ -0,0 +1,91 @@
+/* Definitions for the 3Com 3c503 Etherlink 2. */
+/* This file is distributed under the GPL.
+ Many of these names and comments are directly from the Crynwr packet
+ drivers, which are released under the GPL. */
+
+#define EL2H (dev->base_addr + 0x400)
+#define EL2L (dev->base_addr)
+
+/* Vendor unique hardware addr. prefix. 3Com has 2 because they ran
+ out of available addresses on the first one... */
+
+#define OLD_3COM_ID 0x02608c
+#define NEW_3COM_ID 0x0020af
+
+/* Shared memory management parameters. NB: The 8 bit cards have only
+ one bank (MB1) which serves both Tx and Rx packet space. The 16bit
+ cards have 2 banks, MB0 for Tx packets, and MB1 for Rx packets.
+ You choose which bank appears in the sh. mem window with EGACFR_MBSn */
+
+#define EL2_MB0_START_PG (0x00) /* EL2/16 Tx packets go in bank 0 */
+#define EL2_MB1_START_PG (0x20) /* First page of bank 1 */
+#define EL2_MB1_STOP_PG (0x40) /* Last page +1 of bank 1 */
+
+/* 3Com 3c503 ASIC registers */
+#define E33G_STARTPG (EL2H+0) /* Start page, matching EN0_STARTPG */
+#define E33G_STOPPG (EL2H+1) /* Stop page, must match EN0_STOPPG */
+#define E33G_DRQCNT (EL2H+2) /* DMA burst count */
+#define E33G_IOBASE (EL2H+3) /* Read of I/O base jumpers. */
+ /* (non-useful, but it also appears at the end of EPROM space) */
+#define E33G_ROMBASE (EL2H+4) /* Read of memory base jumpers. */
+#define E33G_GACFR (EL2H+5) /* Config/setup bits for the ASIC GA */
+#define E33G_CNTRL (EL2H+6) /* Board's main control register */
+#define E33G_STATUS (EL2H+7) /* Status on completions. */
+#define E33G_IDCFR (EL2H+8) /* Interrupt/DMA config register */
+ /* (Which IRQ to assert, DMA chan to use) */
+#define E33G_DMAAH (EL2H+9) /* High byte of DMA address reg */
+#define E33G_DMAAL (EL2H+10) /* Low byte of DMA address reg */
+/* "Vector pointer" - if this address matches a read, the EPROM (rather than
+ shared RAM) is mapped into memory space. */
+#define E33G_VP2 (EL2H+11)
+#define E33G_VP1 (EL2H+12)
+#define E33G_VP0 (EL2H+13)
+#define E33G_FIFOH (EL2H+14) /* FIFO for programmed I/O moves */
+#define E33G_FIFOL (EL2H+15) /* ... low byte of above. */
+
+/* Bits in E33G_CNTRL register: */
+
+#define ECNTRL_RESET (0x01) /* Software reset of the ASIC and 8390 */
+#define ECNTRL_THIN (0x02) /* Onboard xcvr enable, AUI disable */
+#define ECNTRL_AUI (0x00) /* Onboard xcvr disable, AUI enable */
+#define ECNTRL_SAPROM (0x04) /* Map the station address prom */
+#define ECNTRL_DBLBFR (0x20) /* FIFO configuration bit */
+#define ECNTRL_OUTPUT (0x40) /* PC-to-3C503 direction if 1 */
+#define ECNTRL_INPUT (0x00) /* 3C503-to-PC direction if 0 */
+#define ECNTRL_START (0x80) /* Start the DMA logic */
+
+/* Bits in E33G_STATUS register: */
+
+#define ESTAT_DPRDY (0x80) /* Data port (of FIFO) ready */
+#define ESTAT_UFLW (0x40) /* Tried to read FIFO when it was empty */
+#define ESTAT_OFLW (0x20) /* Tried to write FIFO when it was full */
+#define ESTAT_DTC (0x10) /* Terminal Count from PC bus DMA logic */
+#define ESTAT_DIP (0x08) /* DMA In Progress */
+
+/* Bits in E33G_GACFR register: */
+
+#define EGACFR_NIM (0x80) /* NIC interrupt mask */
+#define EGACFR_TCM (0x40) /* DMA term. count interrupt mask */
+#define EGACFR_RSEL (0x08) /* Map a bank of card mem into system mem */
+#define EGACFR_MBS2 (0x04) /* Memory bank select, bit 2. */
+#define EGACFR_MBS1 (0x02) /* Memory bank select, bit 1. */
+#define EGACFR_MBS0 (0x01) /* Memory bank select, bit 0. */
+
+#define EGACFR_NORM (0x49) /* TCM | RSEL | MBS0 */
+#define EGACFR_IRQOFF (0xc9) /* TCM | RSEL | MBS0 | NIM */
+
+/*
+ MBS2 MBS1 MBS0 Sh. mem windows card mem at:
+ ---- ---- ---- -----------------------------
+ 0 0 0 0x0000 -- bank 0
+ 0 0 1 0x2000 -- bank 1 (only choice for 8bit card)
+ 0 1 0 0x4000 -- bank 2, not used
+ 0 1 1 0x6000 -- bank 3, not used
+
+There was going to be a 32k card that used bank 2 and 3, but it
+never got produced.
+
+*/
+
+
+/* End of 3C503 parameter definitions */
diff --git a/drivers/net/ethernet/8390/8390.c b/drivers/net/ethernet/8390/8390.c
new file mode 100644
index 000000000000..5db1f55abef4
--- /dev/null
+++ b/drivers/net/ethernet/8390/8390.c
@@ -0,0 +1,103 @@
+/* 8390 core for usual drivers */
+
+static const char version[] =
+ "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include "lib8390.c"
+
+int ei_open(struct net_device *dev)
+{
+ return __ei_open(dev);
+}
+EXPORT_SYMBOL(ei_open);
+
+int ei_close(struct net_device *dev)
+{
+ return __ei_close(dev);
+}
+EXPORT_SYMBOL(ei_close);
+
+netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ return __ei_start_xmit(skb, dev);
+}
+EXPORT_SYMBOL(ei_start_xmit);
+
+struct net_device_stats *ei_get_stats(struct net_device *dev)
+{
+ return __ei_get_stats(dev);
+}
+EXPORT_SYMBOL(ei_get_stats);
+
+void ei_set_multicast_list(struct net_device *dev)
+{
+ __ei_set_multicast_list(dev);
+}
+EXPORT_SYMBOL(ei_set_multicast_list);
+
+void ei_tx_timeout(struct net_device *dev)
+{
+ __ei_tx_timeout(dev);
+}
+EXPORT_SYMBOL(ei_tx_timeout);
+
+irqreturn_t ei_interrupt(int irq, void *dev_id)
+{
+ return __ei_interrupt(irq, dev_id);
+}
+EXPORT_SYMBOL(ei_interrupt);
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+void ei_poll(struct net_device *dev)
+{
+ __ei_poll(dev);
+}
+EXPORT_SYMBOL(ei_poll);
+#endif
+
+const struct net_device_ops ei_netdev_ops = {
+ .ndo_open = ei_open,
+ .ndo_stop = ei_close,
+ .ndo_start_xmit = ei_start_xmit,
+ .ndo_tx_timeout = ei_tx_timeout,
+ .ndo_get_stats = ei_get_stats,
+ .ndo_set_rx_mode = ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ei_poll,
+#endif
+};
+EXPORT_SYMBOL(ei_netdev_ops);
+
+struct net_device *__alloc_ei_netdev(int size)
+{
+ struct net_device *dev = ____alloc_ei_netdev(size);
+ if (dev)
+ dev->netdev_ops = &ei_netdev_ops;
+ return dev;
+}
+EXPORT_SYMBOL(__alloc_ei_netdev);
+
+void NS8390_init(struct net_device *dev, int startp)
+{
+ __NS8390_init(dev, startp);
+}
+EXPORT_SYMBOL(NS8390_init);
+
+#if defined(MODULE)
+
+static int __init ns8390_module_init(void)
+{
+ return 0;
+}
+
+static void __exit ns8390_module_exit(void)
+{
+}
+
+module_init(ns8390_module_init);
+module_exit(ns8390_module_exit);
+#endif /* MODULE */
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h
new file mode 100644
index 000000000000..58a12e4c78f9
--- /dev/null
+++ b/drivers/net/ethernet/8390/8390.h
@@ -0,0 +1,232 @@
+/* Generic NS8390 register definitions. */
+/* This file is part of Donald Becker's 8390 drivers, and is distributed
+ under the same license. Auto-loading of 8390.o only in v2.2 - Paul G.
+ Some of these names and comments originated from the Crynwr
+ packet drivers, which are distributed under the GPL. */
+
+#ifndef _8390_h
+#define _8390_h
+
+#include <linux/if_ether.h>
+#include <linux/ioport.h>
+#include <linux/irqreturn.h>
+#include <linux/skbuff.h>
+
+#define TX_PAGES 12 /* Two Tx slots */
+
+#define ETHER_ADDR_LEN 6
+
+/* The 8390 specific per-packet-header format. */
+struct e8390_pkt_hdr {
+ unsigned char status; /* status */
+ unsigned char next; /* pointer to next packet. */
+ unsigned short count; /* header + packet length in bytes */
+};
+
+#ifdef notdef
+extern int ei_debug;
+#else
+#define ei_debug 1
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+extern void ei_poll(struct net_device *dev);
+extern void eip_poll(struct net_device *dev);
+#endif
+
+
+/* Without I/O delay - non ISA or later chips */
+extern void NS8390_init(struct net_device *dev, int startp);
+extern int ei_open(struct net_device *dev);
+extern int ei_close(struct net_device *dev);
+extern irqreturn_t ei_interrupt(int irq, void *dev_id);
+extern void ei_tx_timeout(struct net_device *dev);
+extern netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev);
+extern void ei_set_multicast_list(struct net_device *dev);
+extern struct net_device_stats *ei_get_stats(struct net_device *dev);
+
+extern const struct net_device_ops ei_netdev_ops;
+
+extern struct net_device *__alloc_ei_netdev(int size);
+static inline struct net_device *alloc_ei_netdev(void)
+{
+ return __alloc_ei_netdev(0);
+}
+
+/* With I/O delay form */
+extern void NS8390p_init(struct net_device *dev, int startp);
+extern int eip_open(struct net_device *dev);
+extern int eip_close(struct net_device *dev);
+extern irqreturn_t eip_interrupt(int irq, void *dev_id);
+extern void eip_tx_timeout(struct net_device *dev);
+extern netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev);
+extern void eip_set_multicast_list(struct net_device *dev);
+extern struct net_device_stats *eip_get_stats(struct net_device *dev);
+
+extern const struct net_device_ops eip_netdev_ops;
+
+extern struct net_device *__alloc_eip_netdev(int size);
+static inline struct net_device *alloc_eip_netdev(void)
+{
+ return __alloc_eip_netdev(0);
+}
+
+/* You have one of these per-board */
+struct ei_device {
+ const char *name;
+ void (*reset_8390)(struct net_device *);
+ void (*get_8390_hdr)(struct net_device *, struct e8390_pkt_hdr *, int);
+ void (*block_output)(struct net_device *, int, const unsigned char *, int);
+ void (*block_input)(struct net_device *, int, struct sk_buff *, int);
+ unsigned long rmem_start;
+ unsigned long rmem_end;
+ void __iomem *mem;
+ unsigned char mcfilter[8];
+ unsigned open:1;
+ unsigned word16:1; /* We have the 16-bit (vs 8-bit) version of the card. */
+ unsigned bigendian:1; /* 16-bit big endian mode. Do NOT */
+ /* set this on random 8390 clones! */
+ unsigned txing:1; /* Transmit Active */
+ unsigned irqlock:1; /* 8390's intrs disabled when '1'. */
+ unsigned dmaing:1; /* Remote DMA Active */
+ unsigned char tx_start_page, rx_start_page, stop_page;
+ unsigned char current_page; /* Read pointer in buffer */
+ unsigned char interface_num; /* Net port (AUI, 10bT.) to use. */
+ unsigned char txqueue; /* Tx Packet buffer queue length. */
+ short tx1, tx2; /* Packet lengths for ping-pong tx. */
+ short lasttx; /* Alpha version consistency check. */
+ unsigned char reg0; /* Register '0' in a WD8013 */
+ unsigned char reg5; /* Register '5' in a WD8013 */
+ unsigned char saved_irq; /* Original dev->irq value. */
+ u32 *reg_offset; /* Register mapping table */
+ spinlock_t page_lock; /* Page register locks */
+ unsigned long priv; /* Private field to store bus IDs etc. */
+#ifdef AX88796_PLATFORM
+ unsigned char rxcr_base; /* default value for RXCR */
+#endif
+};
+
+/* The maximum number of 8390 interrupt service routines called per IRQ. */
+#define MAX_SERVICE 12
+
+/* The maximum time waited (in jiffies) before assuming a Tx failed. (20ms) */
+#define TX_TIMEOUT (20*HZ/100)
+
+#define ei_status (*(struct ei_device *)netdev_priv(dev))
+
+/* Some generic ethernet register configurations. */
+#define E8390_TX_IRQ_MASK 0xa /* For register EN0_ISR */
+#define E8390_RX_IRQ_MASK 0x5
+
+#ifdef AX88796_PLATFORM
+#define E8390_RXCONFIG (ei_status.rxcr_base | 0x04)
+#define E8390_RXOFF (ei_status.rxcr_base | 0x20)
+#else
+#define E8390_RXCONFIG 0x4 /* EN0_RXCR: broadcasts, no multicast,errors */
+#define E8390_RXOFF 0x20 /* EN0_RXCR: Accept no packets */
+#endif
+
+#define E8390_TXCONFIG 0x00 /* EN0_TXCR: Normal transmit mode */
+#define E8390_TXOFF 0x02 /* EN0_TXCR: Transmitter off */
+
+
+/* Register accessed at EN_CMD, the 8390 base addr. */
+#define E8390_STOP 0x01 /* Stop and reset the chip */
+#define E8390_START 0x02 /* Start the chip, clear reset */
+#define E8390_TRANS 0x04 /* Transmit a frame */
+#define E8390_RREAD 0x08 /* Remote read */
+#define E8390_RWRITE 0x10 /* Remote write */
+#define E8390_NODMA 0x20 /* Remote DMA */
+#define E8390_PAGE0 0x00 /* Select page chip registers */
+#define E8390_PAGE1 0x40 /* using the two high-order bits */
+#define E8390_PAGE2 0x80 /* Page 3 is invalid. */
+
+/*
+ * Only generate indirect loads given a machine that needs them.
+ * - removed AMIGA_PCMCIA from this list, handled as ISA io now
+ * - the _p for generates no delay by default 8390p.c overrides this.
+ */
+
+#ifndef ei_inb
+#define ei_inb(_p) inb(_p)
+#define ei_outb(_v,_p) outb(_v,_p)
+#define ei_inb_p(_p) inb(_p)
+#define ei_outb_p(_v,_p) outb(_v,_p)
+#endif
+
+#ifndef EI_SHIFT
+#define EI_SHIFT(x) (x)
+#endif
+
+#define E8390_CMD EI_SHIFT(0x00) /* The command register (for all pages) */
+/* Page 0 register offsets. */
+#define EN0_CLDALO EI_SHIFT(0x01) /* Low byte of current local dma addr RD */
+#define EN0_STARTPG EI_SHIFT(0x01) /* Starting page of ring bfr WR */
+#define EN0_CLDAHI EI_SHIFT(0x02) /* High byte of current local dma addr RD */
+#define EN0_STOPPG EI_SHIFT(0x02) /* Ending page +1 of ring bfr WR */
+#define EN0_BOUNDARY EI_SHIFT(0x03) /* Boundary page of ring bfr RD WR */
+#define EN0_TSR EI_SHIFT(0x04) /* Transmit status reg RD */
+#define EN0_TPSR EI_SHIFT(0x04) /* Transmit starting page WR */
+#define EN0_NCR EI_SHIFT(0x05) /* Number of collision reg RD */
+#define EN0_TCNTLO EI_SHIFT(0x05) /* Low byte of tx byte count WR */
+#define EN0_FIFO EI_SHIFT(0x06) /* FIFO RD */
+#define EN0_TCNTHI EI_SHIFT(0x06) /* High byte of tx byte count WR */
+#define EN0_ISR EI_SHIFT(0x07) /* Interrupt status reg RD WR */
+#define EN0_CRDALO EI_SHIFT(0x08) /* low byte of current remote dma address RD */
+#define EN0_RSARLO EI_SHIFT(0x08) /* Remote start address reg 0 */
+#define EN0_CRDAHI EI_SHIFT(0x09) /* high byte, current remote dma address RD */
+#define EN0_RSARHI EI_SHIFT(0x09) /* Remote start address reg 1 */
+#define EN0_RCNTLO EI_SHIFT(0x0a) /* Remote byte count reg WR */
+#define EN0_RCNTHI EI_SHIFT(0x0b) /* Remote byte count reg WR */
+#define EN0_RSR EI_SHIFT(0x0c) /* rx status reg RD */
+#define EN0_RXCR EI_SHIFT(0x0c) /* RX configuration reg WR */
+#define EN0_TXCR EI_SHIFT(0x0d) /* TX configuration reg WR */
+#define EN0_COUNTER0 EI_SHIFT(0x0d) /* Rcv alignment error counter RD */
+#define EN0_DCFG EI_SHIFT(0x0e) /* Data configuration reg WR */
+#define EN0_COUNTER1 EI_SHIFT(0x0e) /* Rcv CRC error counter RD */
+#define EN0_IMR EI_SHIFT(0x0f) /* Interrupt mask reg WR */
+#define EN0_COUNTER2 EI_SHIFT(0x0f) /* Rcv missed frame error counter RD */
+
+/* Bits in EN0_ISR - Interrupt status register */
+#define ENISR_RX 0x01 /* Receiver, no error */
+#define ENISR_TX 0x02 /* Transmitter, no error */
+#define ENISR_RX_ERR 0x04 /* Receiver, with error */
+#define ENISR_TX_ERR 0x08 /* Transmitter, with error */
+#define ENISR_OVER 0x10 /* Receiver overwrote the ring */
+#define ENISR_COUNTERS 0x20 /* Counters need emptying */
+#define ENISR_RDC 0x40 /* remote dma complete */
+#define ENISR_RESET 0x80 /* Reset completed */
+#define ENISR_ALL 0x3f /* Interrupts we will enable */
+
+/* Bits in EN0_DCFG - Data config register */
+#define ENDCFG_WTS 0x01 /* word transfer mode selection */
+#define ENDCFG_BOS 0x02 /* byte order selection */
+
+/* Page 1 register offsets. */
+#define EN1_PHYS EI_SHIFT(0x01) /* This board's physical enet addr RD WR */
+#define EN1_PHYS_SHIFT(i) EI_SHIFT(i+1) /* Get and set mac address */
+#define EN1_CURPAG EI_SHIFT(0x07) /* Current memory page RD WR */
+#define EN1_MULT EI_SHIFT(0x08) /* Multicast filter mask array (8 bytes) RD WR */
+#define EN1_MULT_SHIFT(i) EI_SHIFT(8+i) /* Get and set multicast filter */
+
+/* Bits in received packet status byte and EN0_RSR*/
+#define ENRSR_RXOK 0x01 /* Received a good packet */
+#define ENRSR_CRC 0x02 /* CRC error */
+#define ENRSR_FAE 0x04 /* frame alignment error */
+#define ENRSR_FO 0x08 /* FIFO overrun */
+#define ENRSR_MPA 0x10 /* missed pkt */
+#define ENRSR_PHY 0x20 /* physical/multicast address */
+#define ENRSR_DIS 0x40 /* receiver disable. set in monitor mode */
+#define ENRSR_DEF 0x80 /* deferring */
+
+/* Transmitted packet status, EN0_TSR. */
+#define ENTSR_PTX 0x01 /* Packet transmitted without error */
+#define ENTSR_ND 0x02 /* The transmit wasn't deferred. */
+#define ENTSR_COL 0x04 /* The transmit collided at least once. */
+#define ENTSR_ABT 0x08 /* The transmit collided 16 times, and was deferred. */
+#define ENTSR_CRS 0x10 /* The carrier sense was lost. */
+#define ENTSR_FU 0x20 /* A "FIFO underrun" occurred during transmit. */
+#define ENTSR_CDH 0x40 /* The collision detect "heartbeat" signal was lost. */
+#define ENTSR_OWC 0x80 /* There was an out-of-window collision. */
+
+#endif /* _8390_h */
diff --git a/drivers/net/ethernet/8390/8390p.c b/drivers/net/ethernet/8390/8390p.c
new file mode 100644
index 000000000000..e8fc2e87e840
--- /dev/null
+++ b/drivers/net/ethernet/8390/8390p.c
@@ -0,0 +1,105 @@
+/* 8390 core for ISA devices needing bus delays */
+
+static const char version[] =
+ "8390p.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#define ei_inb(_p) inb(_p)
+#define ei_outb(_v, _p) outb(_v, _p)
+#define ei_inb_p(_p) inb_p(_p)
+#define ei_outb_p(_v, _p) outb_p(_v, _p)
+
+#include "lib8390.c"
+
+int eip_open(struct net_device *dev)
+{
+ return __ei_open(dev);
+}
+EXPORT_SYMBOL(eip_open);
+
+int eip_close(struct net_device *dev)
+{
+ return __ei_close(dev);
+}
+EXPORT_SYMBOL(eip_close);
+
+netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ return __ei_start_xmit(skb, dev);
+}
+EXPORT_SYMBOL(eip_start_xmit);
+
+struct net_device_stats *eip_get_stats(struct net_device *dev)
+{
+ return __ei_get_stats(dev);
+}
+EXPORT_SYMBOL(eip_get_stats);
+
+void eip_set_multicast_list(struct net_device *dev)
+{
+ __ei_set_multicast_list(dev);
+}
+EXPORT_SYMBOL(eip_set_multicast_list);
+
+void eip_tx_timeout(struct net_device *dev)
+{
+ __ei_tx_timeout(dev);
+}
+EXPORT_SYMBOL(eip_tx_timeout);
+
+irqreturn_t eip_interrupt(int irq, void *dev_id)
+{
+ return __ei_interrupt(irq, dev_id);
+}
+EXPORT_SYMBOL(eip_interrupt);
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+void eip_poll(struct net_device *dev)
+{
+ __ei_poll(dev);
+}
+EXPORT_SYMBOL(eip_poll);
+#endif
+
+const struct net_device_ops eip_netdev_ops = {
+ .ndo_open = eip_open,
+ .ndo_stop = eip_close,
+ .ndo_start_xmit = eip_start_xmit,
+ .ndo_tx_timeout = eip_tx_timeout,
+ .ndo_get_stats = eip_get_stats,
+ .ndo_set_rx_mode = eip_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = eip_poll,
+#endif
+};
+EXPORT_SYMBOL(eip_netdev_ops);
+
+struct net_device *__alloc_eip_netdev(int size)
+{
+ struct net_device *dev = ____alloc_ei_netdev(size);
+ if (dev)
+ dev->netdev_ops = &eip_netdev_ops;
+ return dev;
+}
+EXPORT_SYMBOL(__alloc_eip_netdev);
+
+void NS8390p_init(struct net_device *dev, int startp)
+{
+ __NS8390_init(dev, startp);
+}
+EXPORT_SYMBOL(NS8390p_init);
+
+static int __init NS8390p_init_module(void)
+{
+ return 0;
+}
+
+static void __exit NS8390p_cleanup_module(void)
+{
+}
+
+module_init(NS8390p_init_module);
+module_exit(NS8390p_cleanup_module);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
new file mode 100644
index 000000000000..e04ade444247
--- /dev/null
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -0,0 +1,337 @@
+#
+# 8390 device configuration
+#
+
+config NET_VENDOR_8390
+ bool "National Semi-conductor 8390 devices"
+ default y
+ depends on NET_VENDOR_NATSEMI && (AMIGA_PCMCIA || PCI || SUPERH || \
+ ISA || MCA || EISA || MAC || M32R || MACH_TX49XX || \
+ MCA_LEGACY || H8300 || ARM || MIPS || ZORRO || PCMCIA || \
+ EXPERIMENTAL)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Western Digital cards. If you say Y, you will be
+ asked for your specific card in the following questions.
+
+if NET_VENDOR_8390
+
+config EL2
+ tristate "3c503 \"EtherLink II\" support"
+ depends on ISA
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called 3c503.
+
+config AC3200
+ tristate "Ansel Communications EISA 3200 support (EXPERIMENTAL)"
+ depends on PCI && (ISA || EISA) && EXPERIMENTAL
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ac3200.
+
+config PCMCIA_AXNET
+ tristate "Asix AX88190 PCMCIA support"
+ depends on PCMCIA
+ ---help---
+ Say Y here if you intend to attach an Asix AX88190-based PCMCIA
+ (PC-card) Fast Ethernet card to your computer. These cards are
+ nearly NE2000 compatible but need a separate driver due to a few
+ misfeatures.
+
+ To compile this driver as a module, choose M here: the module will be
+ called axnet_cs. If unsure, say N.
+
+config AX88796
+ tristate "ASIX AX88796 NE2000 clone support"
+ depends on (ARM || MIPS || SUPERH)
+ select PHYLIB
+ select MDIO_BITBANG
+ ---help---
+ AX88796 driver, using platform bus to provide
+ chip detection and resources
+
+config AX88796_93CX6
+ bool "ASIX AX88796 external 93CX6 eeprom support"
+ depends on AX88796
+ select EEPROM_93CX6
+ ---help---
+ Select this if your platform comes with an external 93CX6 eeprom.
+
+config E2100
+ tristate "Cabletron E21xx support"
+ depends on ISA
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called e2100.
+
+config ES3210
+ tristate "Racal-Interlan EISA ES3210 support (EXPERIMENTAL)"
+ depends on PCI && EISA && EXPERIMENTAL
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called es3210.
+
+config HPLAN_PLUS
+ tristate "HP PCLAN+ (27247B and 27252A) support"
+ depends on ISA
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called hp-plus.
+
+config HPLAN
+ tristate "HP PCLAN (27245 and other 27xxx series) support"
+ depends on ISA
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called hp.
+
+config HYDRA
+ tristate "Hydra support"
+ depends on ZORRO
+ select CRC32
+ ---help---
+ If you have a Hydra Ethernet adapter, say Y. Otherwise, say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hydra.
+
+config ARM_ETHERH
+ tristate "I-cubed EtherH/ANT EtherM support"
+ depends on ARM && ARCH_ACORN
+ select CRC32
+ ---help---
+ If you have an Acorn system with one of these network cards, you
+ should say Y to this option if you wish to use it with Linux.
+
+config LNE390
+ tristate "Mylex EISA LNE390A/B support (EXPERIMENTAL)"
+ depends on PCI && EISA && EXPERIMENTAL
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called lne390.
+
+config MAC8390
+ bool "Macintosh NS 8390 based ethernet cards"
+ depends on MAC
+ select CRC32
+ ---help---
+ If you want to include a driver to support Nubus or LC-PDS
+ Ethernet cards using an NS8390 chipset or its equivalent, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+config NE2000
+ tristate "NE2000/NE1000 support"
+ depends on (ISA || (Q40 && m) || M32R || MACH_TX49XX)
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Many Ethernet cards
+ without a specific driver are compatible with NE2000.
+
+ If you have a PCI NE2000 card however, say N here and Y to "PCI
+ NE2000 and clone support" under "EISA, VLB, PCI and on board
+ controllers" below. If you have a NE2000 card and are running on
+ an MCA system (a bus system used on some IBM PS/2 computers and
+ laptops), say N here and Y to "NE/2 (ne2000 MCA version) support",
+ below.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ne.
+
+config NE2_MCA
+ tristate "NE/2 (ne2000 MCA version) support"
+ depends on MCA_LEGACY
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ne2.
+
+config NE2K_PCI
+ tristate "PCI NE2000 and clones support (see help)"
+ depends on PCI
+ select CRC32
+ ---help---
+ This driver is for NE2000 compatible PCI cards. It will not work
+ with ISA NE2000 cards (they have their own driver, "NE2000/NE1000
+ support" below). If you have a PCI NE2000 network (Ethernet) card,
+ say Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ This driver also works for the following NE2000 clone cards:
+ RealTek RTL-8029 Winbond 89C940 Compex RL2000 KTI ET32P2
+ NetVin NV5000SC Via 86C926 SureCom NE34 Winbond
+ Holtek HT80232 Holtek HT80229
+
+ To compile this driver as a module, choose M here. The module
+ will be called ne2k-pci.
+
+config APNE
+ tristate "PCMCIA NE2000 support"
+ depends on AMIGA_PCMCIA
+ select CRC32
+ ---help---
+ If you have a PCMCIA NE2000 compatible adapter, say Y. Otherwise,
+ say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called apne.
+
+config NE3210
+ tristate "Novell/Eagle/Microdyne NE3210 EISA support (EXPERIMENTAL)"
+ depends on PCI && EISA && EXPERIMENTAL
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Note that this driver
+ will NOT WORK for NE3200 cards as they are completely different.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ne3210.
+
+config PCMCIA_PCNET
+ tristate "NE2000 compatible PCMCIA support"
+ depends on PCMCIA
+ select CRC32
+ ---help---
+ Say Y here if you intend to attach an NE2000 compatible PCMCIA
+ (PC-card) Ethernet or Fast Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called pcnet_cs. If unsure, say N.
+
+config NE_H8300
+ tristate "NE2000 compatible support for H8/300"
+ depends on H8300
+ ---help---
+ Say Y here if you want to use the NE2000 compatible
+ controller on the Renesas H8/300 processor.
+
+config STNIC
+ tristate "National DP83902AV support"
+ depends on SUPERH
+ select CRC32
+ ---help---
+ Support for cards based on the National Semiconductor DP83902AV
+ ST-NIC Serial Network Interface Controller for Twisted Pair. This
+ is a 10Mbit/sec Ethernet controller. Product overview and specs at
+ <http://www.national.com/pf/DP/DP83902A.html>.
+
+ If unsure, say N.
+
+config ULTRAMCA
+ tristate "SMC Ultra MCA support"
+ depends on MCA
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type and are running
+ an MCA based system (PS/2), say Y and read the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called smc-mca.
+
+config ULTRA
+ tristate "SMC Ultra support"
+ depends on ISA
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Important: There have been many reports that, with some motherboards
+ mixing an SMC Ultra and an Adaptec AHA154x SCSI card (or compatible,
+ such as some BusLogic models) causes corruption problems with many
+ operating systems. The Linux smc-ultra driver has a work-around for
+ this but keep it in mind if you have such a SCSI card and have
+ problems.
+
+ To compile this driver as a module, choose M here. The module
+ will be called smc-ultra.
+
+config ULTRA32
+ tristate "SMC Ultra32 EISA support"
+ depends on EISA
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called smc-ultra32.
+
+config WD80x3
+ tristate "WD80*3 support"
+ depends on ISA
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called wd.
+
+config ZORRO8390
+ tristate "Zorro NS8390-based Ethernet support"
+ depends on ZORRO
+ select CRC32
+ ---help---
+ This driver is for Zorro Ethernet cards using an NS8390-compatible
+ chipset, like the Village Tronic Ariadne II and the Individual
+ Computers X-Surf Ethernet cards. If you have such a card, say Y.
+ Otherwise, say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called zorro8390.
+
+endif # NET_VENDOR_8390
diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile
new file mode 100644
index 000000000000..3337d7fb4344
--- /dev/null
+++ b/drivers/net/ethernet/8390/Makefile
@@ -0,0 +1,29 @@
+#
+# Makefile for the 8390 network device drivers.
+#
+
+obj-$(CONFIG_MAC8390) += mac8390.o
+obj-$(CONFIG_AC3200) += ac3200.o 8390.o
+obj-$(CONFIG_APNE) += apne.o 8390.o
+obj-$(CONFIG_ARM_ETHERH) += etherh.o
+obj-$(CONFIG_AX88796) += ax88796.o
+obj-$(CONFIG_E2100) += e2100.o 8390.o
+obj-$(CONFIG_EL2) += 3c503.o 8390p.o
+obj-$(CONFIG_ES3210) += es3210.o 8390.o
+obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o
+obj-$(CONFIG_HPLAN) += hp.o 8390p.o
+obj-$(CONFIG_HYDRA) += hydra.o 8390.o
+obj-$(CONFIG_LNE390) += lne390.o 8390.o
+obj-$(CONFIG_NE2000) += ne.o 8390p.o
+obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o
+obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o
+obj-$(CONFIG_NE3210) += ne3210.o 8390.o
+obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
+obj-$(CONFIG_PCMCIA_AXNET) += axnet_cs.o 8390.o
+obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o
+obj-$(CONFIG_STNIC) += stnic.o 8390.o
+obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o
+obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o
+obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o
+obj-$(CONFIG_WD80x3) += wd.o 8390.o
+obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o
diff --git a/drivers/net/ethernet/8390/ac3200.c b/drivers/net/ethernet/8390/ac3200.c
new file mode 100644
index 000000000000..5337dd0a59b0
--- /dev/null
+++ b/drivers/net/ethernet/8390/ac3200.c
@@ -0,0 +1,432 @@
+/* ac3200.c: A driver for the Ansel Communications EISA ethernet adaptor. */
+/*
+ Written 1993, 1994 by Donald Becker.
+ Copyright 1993 United States Government as represented by the Director,
+ National Security Agency. This software may only be used and distributed
+ according to the terms of the GNU General Public License as modified by SRC,
+ incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ This is driver for the Ansel Communications Model 3200 EISA Ethernet LAN
+ Adapter. The programming information is from the users manual, as related
+ by glee@ardnassak.math.clemson.edu.
+
+ Changelog:
+
+ Paul Gortmaker 05/98 : add support for shared mem above 1MB.
+
+ */
+
+static const char version[] =
+ "ac3200.c:v1.01 7/1/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+#include <linux/eisa.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "8390.h"
+
+#define DRV_NAME "ac3200"
+
+/* Offsets from the base address. */
+#define AC_NIC_BASE 0x00
+#define AC_SA_PROM 0x16 /* The station address PROM. */
+#define AC_ADDR0 0x00 /* Prefix station address values. */
+#define AC_ADDR1 0x40
+#define AC_ADDR2 0x90
+#define AC_ID_PORT 0xC80
+#define AC_EISA_ID 0x0110d305
+#define AC_RESET_PORT 0xC84
+#define AC_RESET 0x00
+#define AC_ENABLE 0x01
+#define AC_CONFIG 0xC90 /* The configuration port. */
+
+#define AC_IO_EXTENT 0x20
+ /* Actually accessed is:
+ * AC_NIC_BASE (0-15)
+ * AC_SA_PROM (0-5)
+ * AC_ID_PORT (0-3)
+ * AC_RESET_PORT
+ * AC_CONFIG
+ */
+
+/* Decoding of the configuration register. */
+static unsigned char config2irqmap[8] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
+static int addrmap[8] =
+{0xFF0000, 0xFE0000, 0xFD0000, 0xFFF0000, 0xFFE0000, 0xFFC0000, 0xD0000, 0 };
+static const char *port_name[4] = { "10baseT", "invalid", "AUI", "10base2"};
+
+#define config2irq(configval) config2irqmap[((configval) >> 3) & 7]
+#define config2mem(configval) addrmap[(configval) & 7]
+#define config2name(configval) port_name[((configval) >> 6) & 3]
+
+/* First and last 8390 pages. */
+#define AC_START_PG 0x00 /* First page of 8390 TX buffer */
+#define AC_STOP_PG 0x80 /* Last page +1 of the 8390 RX ring */
+
+static int ac_probe1(int ioaddr, struct net_device *dev);
+
+static int ac_open(struct net_device *dev);
+static void ac_reset_8390(struct net_device *dev);
+static void ac_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ac_block_output(struct net_device *dev, const int count,
+ const unsigned char *buf, const int start_page);
+static void ac_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+
+static int ac_close_card(struct net_device *dev);
+
+
+/* Probe for the AC3200.
+
+ The AC3200 can be identified by either the EISA configuration registers,
+ or the unique value in the station address PROM.
+ */
+
+static int __init do_ac3200_probe(struct net_device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+ int irq = dev->irq;
+ int mem_start = dev->mem_start;
+
+ if (ioaddr > 0x1ff) /* Check a single specified location. */
+ return ac_probe1(ioaddr, dev);
+ else if (ioaddr > 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ if ( ! EISA_bus)
+ return -ENXIO;
+
+ for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
+ if (ac_probe1(ioaddr, dev) == 0)
+ return 0;
+ dev->irq = irq;
+ dev->mem_start = mem_start;
+ }
+
+ return -ENODEV;
+}
+
+#ifndef MODULE
+struct net_device * __init ac3200_probe(int unit)
+{
+ struct net_device *dev = alloc_ei_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_ac3200_probe(dev);
+ if (err)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static const struct net_device_ops ac_netdev_ops = {
+ .ndo_open = ac_open,
+ .ndo_stop = ac_close_card,
+
+ .ndo_start_xmit = ei_start_xmit,
+ .ndo_tx_timeout = ei_tx_timeout,
+ .ndo_get_stats = ei_get_stats,
+ .ndo_set_rx_mode = ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ei_poll,
+#endif
+};
+
+static int __init ac_probe1(int ioaddr, struct net_device *dev)
+{
+ int i, retval;
+
+ if (!request_region(ioaddr, AC_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ if (inb_p(ioaddr + AC_ID_PORT) == 0xff) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ if (inl(ioaddr + AC_ID_PORT) != AC_EISA_ID) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+#ifndef final_version
+ printk(KERN_DEBUG "AC3200 ethercard configuration register is %#02x,"
+ " EISA ID %02x %02x %02x %02x.\n", inb(ioaddr + AC_CONFIG),
+ inb(ioaddr + AC_ID_PORT + 0), inb(ioaddr + AC_ID_PORT + 1),
+ inb(ioaddr + AC_ID_PORT + 2), inb(ioaddr + AC_ID_PORT + 3));
+#endif
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + AC_SA_PROM + i);
+
+ printk(KERN_DEBUG "AC3200 in EISA slot %d, node %pM",
+ ioaddr/0x1000, dev->dev_addr);
+#if 0
+ /* Check the vendor ID/prefix. Redundant after checking the EISA ID */
+ if (inb(ioaddr + AC_SA_PROM + 0) != AC_ADDR0
+ || inb(ioaddr + AC_SA_PROM + 1) != AC_ADDR1
+ || inb(ioaddr + AC_SA_PROM + 2) != AC_ADDR2 ) {
+ printk(", not found (invalid prefix).\n");
+ retval = -ENODEV;
+ goto out;
+ }
+#endif
+
+ /* Assign and allocate the interrupt now. */
+ if (dev->irq == 0) {
+ dev->irq = config2irq(inb(ioaddr + AC_CONFIG));
+ printk(", using");
+ } else {
+ dev->irq = irq_canonicalize(dev->irq);
+ printk(", assigning");
+ }
+
+ retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev);
+ if (retval) {
+ printk (" nothing! Unable to get IRQ %d.\n", dev->irq);
+ goto out;
+ }
+
+ printk(" IRQ %d, %s port\n", dev->irq, port_name[dev->if_port]);
+
+ dev->base_addr = ioaddr;
+
+#ifdef notyet
+ if (dev->mem_start) { /* Override the value from the board. */
+ for (i = 0; i < 7; i++)
+ if (addrmap[i] == dev->mem_start)
+ break;
+ if (i >= 7)
+ i = 0;
+ outb((inb(ioaddr + AC_CONFIG) & ~7) | i, ioaddr + AC_CONFIG);
+ }
+#endif
+
+ dev->if_port = inb(ioaddr + AC_CONFIG) >> 6;
+ dev->mem_start = config2mem(inb(ioaddr + AC_CONFIG));
+
+ printk("%s: AC3200 at %#3x with %dkB memory at physical address %#lx.\n",
+ dev->name, ioaddr, AC_STOP_PG/4, dev->mem_start);
+
+ /*
+ * BEWARE!! Some dain-bramaged EISA SCUs will allow you to put
+ * the card mem within the region covered by `normal' RAM !!!
+ *
+ * ioremap() will fail in that case.
+ */
+ ei_status.mem = ioremap(dev->mem_start, AC_STOP_PG*0x100);
+ if (!ei_status.mem) {
+ printk(KERN_ERR "ac3200.c: Unable to remap card memory above 1MB !!\n");
+ printk(KERN_ERR "ac3200.c: Try using EISA SCU to set memory below 1MB.\n");
+ printk(KERN_ERR "ac3200.c: Driver NOT installed.\n");
+ retval = -EINVAL;
+ goto out1;
+ }
+ printk("ac3200.c: remapped %dkB card memory to virtual address %p\n",
+ AC_STOP_PG/4, ei_status.mem);
+
+ dev->mem_start = (unsigned long)ei_status.mem;
+ dev->mem_end = dev->mem_start + (AC_STOP_PG - AC_START_PG)*256;
+
+ ei_status.name = "AC3200";
+ ei_status.tx_start_page = AC_START_PG;
+ ei_status.rx_start_page = AC_START_PG + TX_PAGES;
+ ei_status.stop_page = AC_STOP_PG;
+ ei_status.word16 = 1;
+
+ if (ei_debug > 0)
+ printk(version);
+
+ ei_status.reset_8390 = &ac_reset_8390;
+ ei_status.block_input = &ac_block_input;
+ ei_status.block_output = &ac_block_output;
+ ei_status.get_8390_hdr = &ac_get_8390_hdr;
+
+ dev->netdev_ops = &ac_netdev_ops;
+ NS8390_init(dev, 0);
+
+ retval = register_netdev(dev);
+ if (retval)
+ goto out2;
+ return 0;
+out2:
+ if (ei_status.reg0)
+ iounmap(ei_status.mem);
+out1:
+ free_irq(dev->irq, dev);
+out:
+ release_region(ioaddr, AC_IO_EXTENT);
+ return retval;
+}
+
+static int ac_open(struct net_device *dev)
+{
+#ifdef notyet
+ /* Someday we may enable the IRQ and shared memory here. */
+ int ioaddr = dev->base_addr;
+#endif
+
+ ei_open(dev);
+ return 0;
+}
+
+static void ac_reset_8390(struct net_device *dev)
+{
+ ushort ioaddr = dev->base_addr;
+
+ outb(AC_RESET, ioaddr + AC_RESET_PORT);
+ if (ei_debug > 1) printk("resetting AC3200, t=%ld...", jiffies);
+
+ ei_status.txing = 0;
+ outb(AC_ENABLE, ioaddr + AC_RESET_PORT);
+ if (ei_debug > 1) printk("reset done\n");
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void
+ac_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ void __iomem *hdr_start = ei_status.mem + ((ring_page - AC_START_PG)<<8);
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+}
+
+/* Block input and output are easy on shared memory ethercards, the only
+ complication is when the ring buffer wraps. */
+
+static void ac_block_input(struct net_device *dev, int count, struct sk_buff *skb,
+ int ring_offset)
+{
+ void __iomem *start = ei_status.mem + ring_offset - AC_START_PG*256;
+
+ if (ring_offset + count > AC_STOP_PG*256) {
+ /* We must wrap the input move. */
+ int semi_count = AC_STOP_PG*256 - ring_offset;
+ memcpy_fromio(skb->data, start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count,
+ ei_status.mem + TX_PAGES*256, count);
+ } else {
+ memcpy_fromio(skb->data, start, count);
+ }
+}
+
+static void ac_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ void __iomem *shmem = ei_status.mem + ((start_page - AC_START_PG)<<8);
+
+ memcpy_toio(shmem, buf, count);
+}
+
+static int ac_close_card(struct net_device *dev)
+{
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+#ifdef notyet
+ /* We should someday disable shared memory and interrupts. */
+ outb(0x00, ioaddr + 6); /* Disable interrupts. */
+ free_irq(dev->irq, dev);
+#endif
+
+ ei_close(dev);
+ return 0;
+}
+
+#ifdef MODULE
+#define MAX_AC32_CARDS 4 /* Max number of AC32 cards per module */
+static struct net_device *dev_ac32[MAX_AC32_CARDS];
+static int io[MAX_AC32_CARDS];
+static int irq[MAX_AC32_CARDS];
+static int mem[MAX_AC32_CARDS];
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(mem, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s)");
+MODULE_PARM_DESC(mem, "Memory base address(es)");
+MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver");
+MODULE_LICENSE("GPL");
+
+static int __init ac3200_module_init(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) {
+ if (io[this_dev] == 0 && this_dev != 0)
+ break;
+ dev = alloc_ei_netdev();
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_start = mem[this_dev]; /* Currently ignored by driver */
+ if (do_ac3200_probe(dev) == 0) {
+ dev_ac32[found++] = dev;
+ continue;
+ }
+ free_netdev(dev);
+ printk(KERN_WARNING "ac3200.c: No ac3200 card found (i/o = 0x%x).\n", io[this_dev]);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ /* Someday free_irq may be in ac_close_card() */
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, AC_IO_EXTENT);
+ iounmap(ei_status.mem);
+}
+
+static void __exit ac3200_module_exit(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) {
+ struct net_device *dev = dev_ac32[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+module_init(ac3200_module_init);
+module_exit(ac3200_module_exit);
+#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
new file mode 100644
index 000000000000..547737340cbb
--- /dev/null
+++ b/drivers/net/ethernet/8390/apne.c
@@ -0,0 +1,620 @@
+/*
+ * Amiga Linux/68k 8390 based PCMCIA Ethernet Driver for the Amiga 1200
+ *
+ * (C) Copyright 1997 Alain Malek
+ * (Alain.Malek@cryogen.com)
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This program is based on
+ *
+ * ne.c: A general non-shared-memory NS8390 ethernet driver for linux
+ * Written 1992-94 by Donald Becker.
+ *
+ * 8390.c: A general NS8390 ethernet driver core for linux.
+ * Written 1992-94 by Donald Becker.
+ *
+ * cnetdevice: A Sana-II ethernet driver for AmigaOS
+ * Written by Bruce Abbott (bhabbott@inhb.co.nz)
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ */
+
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/setup.h>
+#include <asm/amigaints.h>
+#include <asm/amigahw.h>
+#include <asm/amigayle.h>
+#include <asm/amipcmcia.h>
+
+#include "8390.h"
+
+/* ---- No user-serviceable parts below ---- */
+
+#define DRV_NAME "apne"
+
+#define NE_BASE (dev->base_addr)
+#define NE_CMD 0x00
+#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */
+#define NE_IO_EXTENT 0x20
+
+#define NE_EN0_ISR 0x07
+#define NE_EN0_DCFG 0x0e
+
+#define NE_EN0_RSARLO 0x08
+#define NE_EN0_RSARHI 0x09
+#define NE_EN0_RCNTLO 0x0a
+#define NE_EN0_RXCR 0x0c
+#define NE_EN0_TXCR 0x0d
+#define NE_EN0_RCNTHI 0x0b
+#define NE_EN0_IMR 0x0f
+
+#define NE1SM_START_PG 0x20 /* First page of TX buffer */
+#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+
+struct net_device * __init apne_probe(int unit);
+static int apne_probe1(struct net_device *dev, int ioaddr);
+
+static void apne_reset_8390(struct net_device *dev);
+static void apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void apne_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void apne_block_output(struct net_device *dev, const int count,
+ const unsigned char *buf, const int start_page);
+static irqreturn_t apne_interrupt(int irq, void *dev_id);
+
+static int init_pcmcia(void);
+
+/* IO base address used for nic */
+
+#define IOBASE 0x300
+
+/*
+ use MANUAL_CONFIG and MANUAL_OFFSET for enabling IO by hand
+ you can find the values to use by looking at the cnet.device
+ config file example (the default values are for the CNET40BC card)
+*/
+
+/*
+#define MANUAL_CONFIG 0x20
+#define MANUAL_OFFSET 0x3f8
+
+#define MANUAL_HWADDR0 0x00
+#define MANUAL_HWADDR1 0x12
+#define MANUAL_HWADDR2 0x34
+#define MANUAL_HWADDR3 0x56
+#define MANUAL_HWADDR4 0x78
+#define MANUAL_HWADDR5 0x9a
+*/
+
+static const char version[] =
+ "apne.c:v1.1 7/10/98 Alain Malek (Alain.Malek@cryogen.ch)\n";
+
+static int apne_owned; /* signal if card already owned */
+
+struct net_device * __init apne_probe(int unit)
+{
+ struct net_device *dev;
+#ifndef MANUAL_CONFIG
+ char tuple[8];
+#endif
+ int err;
+
+ if (!MACH_IS_AMIGA)
+ return ERR_PTR(-ENODEV);
+
+ if (apne_owned)
+ return ERR_PTR(-ENODEV);
+
+ if ( !(AMIGAHW_PRESENT(PCMCIA)) )
+ return ERR_PTR(-ENODEV);
+
+ printk("Looking for PCMCIA ethernet card : ");
+
+ /* check if a card is inserted */
+ if (!(PCMCIA_INSERTED)) {
+ printk("NO PCMCIA card inserted\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ dev = alloc_ei_netdev();
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ /* disable pcmcia irq for readtuple */
+ pcmcia_disable_irq();
+
+#ifndef MANUAL_CONFIG
+ if ((pcmcia_copy_tuple(CISTPL_FUNCID, tuple, 8) < 3) ||
+ (tuple[2] != CISTPL_FUNCID_NETWORK)) {
+ printk("not an ethernet card\n");
+ /* XXX: shouldn't we re-enable irq here? */
+ free_netdev(dev);
+ return ERR_PTR(-ENODEV);
+ }
+#endif
+
+ printk("ethernet PCMCIA card inserted\n");
+
+ if (!init_pcmcia()) {
+ /* XXX: shouldn't we re-enable irq here? */
+ free_netdev(dev);
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (!request_region(IOBASE, 0x20, DRV_NAME)) {
+ free_netdev(dev);
+ return ERR_PTR(-EBUSY);
+ }
+
+ err = apne_probe1(dev, IOBASE);
+ if (err) {
+ release_region(IOBASE, 0x20);
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+ err = register_netdev(dev);
+ if (!err)
+ return dev;
+
+ pcmcia_disable_irq();
+ free_irq(IRQ_AMIGA_PORTS, dev);
+ pcmcia_reset();
+ release_region(IOBASE, 0x20);
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static int __init apne_probe1(struct net_device *dev, int ioaddr)
+{
+ int i;
+ unsigned char SA_prom[32];
+ int wordlength = 2;
+ const char *name = NULL;
+ int start_page, stop_page;
+#ifndef MANUAL_HWADDR0
+ int neX000, ctron;
+#endif
+ static unsigned version_printed;
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ printk("PCMCIA NE*000 ethercard probe");
+
+ /* Reset card. Who knows what dain-bramaged state it was left in. */
+ { unsigned long reset_start_time = jiffies;
+
+ outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET);
+
+ while ((inb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0)
+ if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
+ printk(" not found (no reset ack).\n");
+ return -ENODEV;
+ }
+
+ outb(0xff, ioaddr + NE_EN0_ISR); /* Ack all intr. */
+ }
+
+#ifndef MANUAL_HWADDR0
+
+ /* Read the 16 bytes of station address PROM.
+ We must first initialize registers, similar to NS8390_init(eifdev, 0).
+ We can't reliably read the SAPROM address without this.
+ (I learned the hard way!). */
+ {
+ struct {unsigned long value, offset; } program_seq[] = {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, NE_CMD}, /* Select page 0*/
+ {0x48, NE_EN0_DCFG}, /* Set byte-wide (0x48) access. */
+ {0x00, NE_EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, NE_EN0_RCNTHI},
+ {0x00, NE_EN0_IMR}, /* Mask completion irq. */
+ {0xFF, NE_EN0_ISR},
+ {E8390_RXOFF, NE_EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, NE_EN0_TXCR}, /* 0x02 and loopback mode. */
+ {32, NE_EN0_RCNTLO},
+ {0x00, NE_EN0_RCNTHI},
+ {0x00, NE_EN0_RSARLO}, /* DMA starting at 0x0000. */
+ {0x00, NE_EN0_RSARHI},
+ {E8390_RREAD+E8390_START, NE_CMD},
+ };
+ for (i = 0; i < ARRAY_SIZE(program_seq); i++) {
+ outb(program_seq[i].value, ioaddr + program_seq[i].offset);
+ }
+
+ }
+ for(i = 0; i < 32 /*sizeof(SA_prom)*/; i+=2) {
+ SA_prom[i] = inb(ioaddr + NE_DATAPORT);
+ SA_prom[i+1] = inb(ioaddr + NE_DATAPORT);
+ if (SA_prom[i] != SA_prom[i+1])
+ wordlength = 1;
+ }
+
+ /* At this point, wordlength *only* tells us if the SA_prom is doubled
+ up or not because some broken PCI cards don't respect the byte-wide
+ request in program_seq above, and hence don't have doubled up values.
+ These broken cards would otherwise be detected as an ne1000. */
+
+ if (wordlength == 2)
+ for (i = 0; i < 16; i++)
+ SA_prom[i] = SA_prom[i+i];
+
+ if (wordlength == 2) {
+ /* We must set the 8390 for word mode. */
+ outb(0x49, ioaddr + NE_EN0_DCFG);
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
+ } else {
+ start_page = NE1SM_START_PG;
+ stop_page = NE1SM_STOP_PG;
+ }
+
+ neX000 = (SA_prom[14] == 0x57 && SA_prom[15] == 0x57);
+ ctron = (SA_prom[0] == 0x00 && SA_prom[1] == 0x00 && SA_prom[2] == 0x1d);
+
+ /* Set up the rest of the parameters. */
+ if (neX000) {
+ name = (wordlength == 2) ? "NE2000" : "NE1000";
+ } else if (ctron) {
+ name = (wordlength == 2) ? "Ctron-8" : "Ctron-16";
+ start_page = 0x01;
+ stop_page = (wordlength == 2) ? 0x40 : 0x20;
+ } else {
+ printk(" not found.\n");
+ return -ENXIO;
+
+ }
+
+#else
+ wordlength = 2;
+ /* We must set the 8390 for word mode. */
+ outb(0x49, ioaddr + NE_EN0_DCFG);
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
+
+ SA_prom[0] = MANUAL_HWADDR0;
+ SA_prom[1] = MANUAL_HWADDR1;
+ SA_prom[2] = MANUAL_HWADDR2;
+ SA_prom[3] = MANUAL_HWADDR3;
+ SA_prom[4] = MANUAL_HWADDR4;
+ SA_prom[5] = MANUAL_HWADDR5;
+ name = "NE2000";
+#endif
+
+ dev->base_addr = ioaddr;
+ dev->irq = IRQ_AMIGA_PORTS;
+ dev->netdev_ops = &ei_netdev_ops;
+
+ /* Install the Interrupt handler */
+ i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev);
+ if (i) return i;
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++)
+ dev->dev_addr[i] = SA_prom[i];
+
+ printk(" %pM\n", dev->dev_addr);
+
+ printk("%s: %s found.\n", dev->name, name);
+
+ ei_status.name = name;
+ ei_status.tx_start_page = start_page;
+ ei_status.stop_page = stop_page;
+ ei_status.word16 = (wordlength == 2);
+
+ ei_status.rx_start_page = start_page + TX_PAGES;
+
+ ei_status.reset_8390 = &apne_reset_8390;
+ ei_status.block_input = &apne_block_input;
+ ei_status.block_output = &apne_block_output;
+ ei_status.get_8390_hdr = &apne_get_8390_hdr;
+
+ NS8390_init(dev, 0);
+
+ pcmcia_ack_int(pcmcia_get_intreq()); /* ack PCMCIA int req */
+ pcmcia_enable_irq();
+
+ apne_owned = 1;
+
+ return 0;
+}
+
+/* Hard reset the card. This used to pause for the same period that a
+ 8390 reset command required, but that shouldn't be necessary. */
+static void
+apne_reset_8390(struct net_device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+
+ init_pcmcia();
+
+ if (ei_debug > 1) printk("resetting the 8390 t=%ld...", jiffies);
+
+ outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((inb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0)
+ if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
+ printk("%s: ne_reset_8390() did not complete.\n", dev->name);
+ break;
+ }
+ outb(ENISR_RESET, NE_BASE + NE_EN0_ISR); /* Ack intr. */
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void
+apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+
+ int nic_base = dev->base_addr;
+ int cnt;
+ char *ptrc;
+ short *ptrs;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb(ENISR_RDC, nic_base + NE_EN0_ISR);
+ outb(sizeof(struct e8390_pkt_hdr), nic_base + NE_EN0_RCNTLO);
+ outb(0, nic_base + NE_EN0_RCNTHI);
+ outb(0, nic_base + NE_EN0_RSARLO); /* On page boundary */
+ outb(ring_page, nic_base + NE_EN0_RSARHI);
+ outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ if (ei_status.word16) {
+ ptrs = (short*)hdr;
+ for(cnt = 0; cnt < (sizeof(struct e8390_pkt_hdr)>>1); cnt++)
+ *ptrs++ = inw(NE_BASE + NE_DATAPORT);
+ } else {
+ ptrc = (char*)hdr;
+ for(cnt = 0; cnt < sizeof(struct e8390_pkt_hdr); cnt++)
+ *ptrc++ = inb(NE_BASE + NE_DATAPORT);
+ }
+
+ outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+
+ le16_to_cpus(&hdr->count);
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you
+ are porting to a new ethercard, look at the packet driver source for hints.
+ The NEx000 doesn't share the on-board packet memory -- you have to put
+ the packet out through the "remote DMA" dataport using outb. */
+
+static void
+apne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int nic_base = dev->base_addr;
+ char *buf = skb->data;
+ char *ptrc;
+ short *ptrs;
+ int cnt;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne_block_input "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb(ENISR_RDC, nic_base + NE_EN0_ISR);
+ outb(count & 0xff, nic_base + NE_EN0_RCNTLO);
+ outb(count >> 8, nic_base + NE_EN0_RCNTHI);
+ outb(ring_offset & 0xff, nic_base + NE_EN0_RSARLO);
+ outb(ring_offset >> 8, nic_base + NE_EN0_RSARHI);
+ outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ if (ei_status.word16) {
+ ptrs = (short*)buf;
+ for (cnt = 0; cnt < (count>>1); cnt++)
+ *ptrs++ = inw(NE_BASE + NE_DATAPORT);
+ if (count & 0x01) {
+ buf[count-1] = inb(NE_BASE + NE_DATAPORT);
+ }
+ } else {
+ ptrc = (char*)buf;
+ for (cnt = 0; cnt < count; cnt++)
+ *ptrc++ = inb(NE_BASE + NE_DATAPORT);
+ }
+
+ outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+static void
+apne_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ int nic_base = NE_BASE;
+ unsigned long dma_start;
+ char *ptrc;
+ short *ptrs;
+ int cnt;
+
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+ if (ei_status.word16 && (count & 0x01))
+ count++;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne_block_output."
+ "[DMAstat:%d][irqlock:%d][intr:%d]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock, dev->irq);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
+
+ outb(ENISR_RDC, nic_base + NE_EN0_ISR);
+
+ /* Now the normal output. */
+ outb(count & 0xff, nic_base + NE_EN0_RCNTLO);
+ outb(count >> 8, nic_base + NE_EN0_RCNTHI);
+ outb(0x00, nic_base + NE_EN0_RSARLO);
+ outb(start_page, nic_base + NE_EN0_RSARHI);
+
+ outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
+ if (ei_status.word16) {
+ ptrs = (short*)buf;
+ for (cnt = 0; cnt < count>>1; cnt++)
+ outw(*ptrs++, NE_BASE+NE_DATAPORT);
+ } else {
+ ptrc = (char*)buf;
+ for (cnt = 0; cnt < count; cnt++)
+ outb(*ptrc++, NE_BASE + NE_DATAPORT);
+ }
+
+ dma_start = jiffies;
+
+ while ((inb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
+ if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
+ printk("%s: timeout waiting for Tx RDC.\n", dev->name);
+ apne_reset_8390(dev);
+ NS8390_init(dev,1);
+ break;
+ }
+
+ outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+static irqreturn_t apne_interrupt(int irq, void *dev_id)
+{
+ unsigned char pcmcia_intreq;
+
+ if (!(gayle.inten & GAYLE_IRQ_IRQ))
+ return IRQ_NONE;
+
+ pcmcia_intreq = pcmcia_get_intreq();
+
+ if (!(pcmcia_intreq & GAYLE_IRQ_IRQ)) {
+ pcmcia_ack_int(pcmcia_intreq);
+ return IRQ_NONE;
+ }
+ if (ei_debug > 3)
+ printk("pcmcia intreq = %x\n", pcmcia_intreq);
+ pcmcia_disable_irq(); /* to get rid of the sti() within ei_interrupt */
+ ei_interrupt(irq, dev_id);
+ pcmcia_ack_int(pcmcia_get_intreq());
+ pcmcia_enable_irq();
+ return IRQ_HANDLED;
+}
+
+#ifdef MODULE
+static struct net_device *apne_dev;
+
+static int __init apne_module_init(void)
+{
+ apne_dev = apne_probe(-1);
+ if (IS_ERR(apne_dev))
+ return PTR_ERR(apne_dev);
+ return 0;
+}
+
+static void __exit apne_module_exit(void)
+{
+ unregister_netdev(apne_dev);
+
+ pcmcia_disable_irq();
+
+ free_irq(IRQ_AMIGA_PORTS, apne_dev);
+
+ pcmcia_reset();
+
+ release_region(IOBASE, 0x20);
+
+ free_netdev(apne_dev);
+}
+module_init(apne_module_init);
+module_exit(apne_module_exit);
+#endif
+
+static int init_pcmcia(void)
+{
+ u_char config;
+#ifndef MANUAL_CONFIG
+ u_char tuple[32];
+ int offset_len;
+#endif
+ u_long offset;
+
+ pcmcia_reset();
+ pcmcia_program_voltage(PCMCIA_0V);
+ pcmcia_access_speed(PCMCIA_SPEED_250NS);
+ pcmcia_write_enable();
+
+#ifdef MANUAL_CONFIG
+ config = MANUAL_CONFIG;
+#else
+ /* get and write config byte to enable IO port */
+
+ if (pcmcia_copy_tuple(CISTPL_CFTABLE_ENTRY, tuple, 32) < 3)
+ return 0;
+
+ config = tuple[2] & 0x3f;
+#endif
+#ifdef MANUAL_OFFSET
+ offset = MANUAL_OFFSET;
+#else
+ if (pcmcia_copy_tuple(CISTPL_CONFIG, tuple, 32) < 6)
+ return 0;
+
+ offset_len = (tuple[2] & 0x3) + 1;
+ offset = 0;
+ while(offset_len--) {
+ offset = (offset << 8) | tuple[4+offset_len];
+ }
+#endif
+
+ out_8(GAYLE_ATTRIBUTE+offset, config);
+
+ return 1;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
new file mode 100644
index 000000000000..e9f8432f55b4
--- /dev/null
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -0,0 +1,1010 @@
+/* drivers/net/ax88796.c
+ *
+ * Copyright 2005,2007 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * Asix AX88796 10/100 Ethernet controller support
+ * Based on ne.c, by Donald Becker, et-al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/isapnp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/phy.h>
+#include <linux/eeprom_93cx6.h>
+#include <linux/slab.h>
+
+#include <net/ax88796.h>
+
+#include <asm/system.h>
+
+/* Rename the lib8390.c functions to show that they are in this driver */
+#define __ei_open ax_ei_open
+#define __ei_close ax_ei_close
+#define __ei_poll ax_ei_poll
+#define __ei_start_xmit ax_ei_start_xmit
+#define __ei_tx_timeout ax_ei_tx_timeout
+#define __ei_get_stats ax_ei_get_stats
+#define __ei_set_multicast_list ax_ei_set_multicast_list
+#define __ei_interrupt ax_ei_interrupt
+#define ____alloc_ei_netdev ax__alloc_ei_netdev
+#define __NS8390_init ax_NS8390_init
+
+/* force unsigned long back to 'void __iomem *' */
+#define ax_convert_addr(_a) ((void __force __iomem *)(_a))
+
+#define ei_inb(_a) readb(ax_convert_addr(_a))
+#define ei_outb(_v, _a) writeb(_v, ax_convert_addr(_a))
+
+#define ei_inb_p(_a) ei_inb(_a)
+#define ei_outb_p(_v, _a) ei_outb(_v, _a)
+
+/* define EI_SHIFT() to take into account our register offsets */
+#define EI_SHIFT(x) (ei_local->reg_offset[(x)])
+
+/* Ensure we have our RCR base value */
+#define AX88796_PLATFORM
+
+static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electronics\n";
+
+#include "lib8390.c"
+
+#define DRV_NAME "ax88796"
+#define DRV_VERSION "1.00"
+
+/* from ne.c */
+#define NE_CMD EI_SHIFT(0x00)
+#define NE_RESET EI_SHIFT(0x1f)
+#define NE_DATAPORT EI_SHIFT(0x10)
+
+#define NE1SM_START_PG 0x20 /* First page of TX buffer */
+#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+#define AX_GPOC_PPDSET BIT(6)
+
+/* device private data */
+
+struct ax_device {
+ struct mii_bus *mii_bus;
+ struct mdiobb_ctrl bb_ctrl;
+ struct phy_device *phy_dev;
+ void __iomem *addr_memr;
+ u8 reg_memr;
+ int link;
+ int speed;
+ int duplex;
+
+ void __iomem *map2;
+ const struct ax_plat_data *plat;
+
+ unsigned char running;
+ unsigned char resume_open;
+ unsigned int irqflags;
+
+ u32 reg_offsets[0x20];
+};
+
+static inline struct ax_device *to_ax_dev(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ return (struct ax_device *)(ei_local + 1);
+}
+
+/*
+ * ax_initial_check
+ *
+ * do an initial probe for the card to check wether it exists
+ * and is functional
+ */
+static int ax_initial_check(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ void __iomem *ioaddr = ei_local->mem;
+ int reg0;
+ int regd;
+
+ reg0 = ei_inb(ioaddr);
+ if (reg0 == 0xFF)
+ return -ENODEV;
+
+ ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD);
+ regd = ei_inb(ioaddr + 0x0d);
+ ei_outb(0xff, ioaddr + 0x0d);
+ ei_outb(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD);
+ ei_inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
+ if (ei_inb(ioaddr + EN0_COUNTER0) != 0) {
+ ei_outb(reg0, ioaddr);
+ ei_outb(regd, ioaddr + 0x0d); /* Restore the old values. */
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/*
+ * Hard reset the card. This used to pause for the same period that a
+ * 8390 reset command required, but that shouldn't be necessary.
+ */
+static void ax_reset_8390(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ unsigned long reset_start_time = jiffies;
+ void __iomem *addr = (void __iomem *)dev->base_addr;
+
+ if (ei_debug > 1)
+ netdev_dbg(dev, "resetting the 8390 t=%ld\n", jiffies);
+
+ ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
+
+ ei_local->txing = 0;
+ ei_local->dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
+ if (jiffies - reset_start_time > 2 * HZ / 100) {
+ netdev_warn(dev, "%s: did not complete.\n", __func__);
+ break;
+ }
+ }
+
+ ei_outb(ENISR_RESET, addr + EN0_ISR); /* Ack intr. */
+}
+
+
+static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ void __iomem *nic_base = ei_local->mem;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_local->dmaing) {
+ netdev_err(dev, "DMAing conflict in %s "
+ "[DMAstat:%d][irqlock:%d].\n",
+ __func__,
+ ei_local->dmaing, ei_local->irqlock);
+ return;
+ }
+
+ ei_local->dmaing |= 0x01;
+ ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
+ ei_outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
+ ei_outb(0, nic_base + EN0_RCNTHI);
+ ei_outb(0, nic_base + EN0_RSARLO); /* On page boundary */
+ ei_outb(ring_page, nic_base + EN0_RSARHI);
+ ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ if (ei_local->word16)
+ readsw(nic_base + NE_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr) >> 1);
+ else
+ readsb(nic_base + NE_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr));
+
+ ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_local->dmaing &= ~0x01;
+
+ le16_to_cpus(&hdr->count);
+}
+
+
+/*
+ * Block input and output, similar to the Crynwr packet driver. If
+ * you are porting to a new ethercard, look at the packet driver
+ * source for hints. The NEx000 doesn't share the on-board packet
+ * memory -- you have to put the packet out through the "remote DMA"
+ * dataport using ei_outb.
+ */
+static void ax_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ void __iomem *nic_base = ei_local->mem;
+ char *buf = skb->data;
+
+ if (ei_local->dmaing) {
+ netdev_err(dev,
+ "DMAing conflict in %s "
+ "[DMAstat:%d][irqlock:%d].\n",
+ __func__,
+ ei_local->dmaing, ei_local->irqlock);
+ return;
+ }
+
+ ei_local->dmaing |= 0x01;
+
+ ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + NE_CMD);
+ ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
+ ei_outb(count >> 8, nic_base + EN0_RCNTHI);
+ ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI);
+ ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ if (ei_local->word16) {
+ readsw(nic_base + NE_DATAPORT, buf, count >> 1);
+ if (count & 0x01)
+ buf[count-1] = ei_inb(nic_base + NE_DATAPORT);
+
+ } else {
+ readsb(nic_base + NE_DATAPORT, buf, count);
+ }
+
+ ei_local->dmaing &= ~1;
+}
+
+static void ax_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ void __iomem *nic_base = ei_local->mem;
+ unsigned long dma_start;
+
+ /*
+ * Round the count up for word writes. Do we need to do this?
+ * What effect will an odd byte count have on the 8390? I
+ * should check someday.
+ */
+ if (ei_local->word16 && (count & 0x01))
+ count++;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_local->dmaing) {
+ netdev_err(dev, "DMAing conflict in %s."
+ "[DMAstat:%d][irqlock:%d]\n",
+ __func__,
+ ei_local->dmaing, ei_local->irqlock);
+ return;
+ }
+
+ ei_local->dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ ei_outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
+
+ ei_outb(ENISR_RDC, nic_base + EN0_ISR);
+
+ /* Now the normal output. */
+ ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
+ ei_outb(count >> 8, nic_base + EN0_RCNTHI);
+ ei_outb(0x00, nic_base + EN0_RSARLO);
+ ei_outb(start_page, nic_base + EN0_RSARHI);
+
+ ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
+ if (ei_local->word16)
+ writesw(nic_base + NE_DATAPORT, buf, count >> 1);
+ else
+ writesb(nic_base + NE_DATAPORT, buf, count);
+
+ dma_start = jiffies;
+
+ while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
+ if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */
+ netdev_warn(dev, "timeout waiting for Tx RDC.\n");
+ ax_reset_8390(dev);
+ ax_NS8390_init(dev, 1);
+ break;
+ }
+ }
+
+ ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_local->dmaing &= ~0x01;
+}
+
+/* definitions for accessing MII/EEPROM interface */
+
+#define AX_MEMR EI_SHIFT(0x14)
+#define AX_MEMR_MDC BIT(0)
+#define AX_MEMR_MDIR BIT(1)
+#define AX_MEMR_MDI BIT(2)
+#define AX_MEMR_MDO BIT(3)
+#define AX_MEMR_EECS BIT(4)
+#define AX_MEMR_EEI BIT(5)
+#define AX_MEMR_EEO BIT(6)
+#define AX_MEMR_EECLK BIT(7)
+
+static void ax_handle_link_change(struct net_device *dev)
+{
+ struct ax_device *ax = to_ax_dev(dev);
+ struct phy_device *phy_dev = ax->phy_dev;
+ int status_change = 0;
+
+ if (phy_dev->link && ((ax->speed != phy_dev->speed) ||
+ (ax->duplex != phy_dev->duplex))) {
+
+ ax->speed = phy_dev->speed;
+ ax->duplex = phy_dev->duplex;
+ status_change = 1;
+ }
+
+ if (phy_dev->link != ax->link) {
+ if (!phy_dev->link) {
+ ax->speed = 0;
+ ax->duplex = -1;
+ }
+ ax->link = phy_dev->link;
+
+ status_change = 1;
+ }
+
+ if (status_change)
+ phy_print_status(phy_dev);
+}
+
+static int ax_mii_probe(struct net_device *dev)
+{
+ struct ax_device *ax = to_ax_dev(dev);
+ struct phy_device *phy_dev = NULL;
+ int ret;
+
+ /* find the first phy */
+ phy_dev = phy_find_first(ax->mii_bus);
+ if (!phy_dev) {
+ netdev_err(dev, "no PHY found\n");
+ return -ENODEV;
+ }
+
+ ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change, 0,
+ PHY_INTERFACE_MODE_MII);
+ if (ret) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ return ret;
+ }
+
+ /* mask with MAC supported features */
+ phy_dev->supported &= PHY_BASIC_FEATURES;
+ phy_dev->advertising = phy_dev->supported;
+
+ ax->phy_dev = phy_dev;
+
+ netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ phy_dev->drv->name, dev_name(&phy_dev->dev), phy_dev->irq);
+
+ return 0;
+}
+
+static void ax_phy_switch(struct net_device *dev, int on)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
+
+ u8 reg_gpoc = ax->plat->gpoc_val;
+
+ if (!!on)
+ reg_gpoc &= ~AX_GPOC_PPDSET;
+ else
+ reg_gpoc |= AX_GPOC_PPDSET;
+
+ ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17));
+}
+
+static int ax_open(struct net_device *dev)
+{
+ struct ax_device *ax = to_ax_dev(dev);
+ int ret;
+
+ netdev_dbg(dev, "open\n");
+
+ ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags,
+ dev->name, dev);
+ if (ret)
+ goto failed_request_irq;
+
+ /* turn the phy on (if turned off) */
+ ax_phy_switch(dev, 1);
+
+ ret = ax_mii_probe(dev);
+ if (ret)
+ goto failed_mii_probe;
+ phy_start(ax->phy_dev);
+
+ ret = ax_ei_open(dev);
+ if (ret)
+ goto failed_ax_ei_open;
+
+ ax->running = 1;
+
+ return 0;
+
+ failed_ax_ei_open:
+ phy_disconnect(ax->phy_dev);
+ failed_mii_probe:
+ ax_phy_switch(dev, 0);
+ free_irq(dev->irq, dev);
+ failed_request_irq:
+ return ret;
+}
+
+static int ax_close(struct net_device *dev)
+{
+ struct ax_device *ax = to_ax_dev(dev);
+
+ netdev_dbg(dev, "close\n");
+
+ ax->running = 0;
+ wmb();
+
+ ax_ei_close(dev);
+
+ /* turn the phy off */
+ ax_phy_switch(dev, 0);
+ phy_disconnect(ax->phy_dev);
+
+ free_irq(dev->irq, dev);
+ return 0;
+}
+
+static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ struct ax_device *ax = to_ax_dev(dev);
+ struct phy_device *phy_dev = ax->phy_dev;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!phy_dev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(phy_dev, req, cmd);
+}
+
+/* ethtool ops */
+
+static void ax_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct platform_device *pdev = to_platform_device(dev->dev.parent);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pdev->name);
+}
+
+static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ax_device *ax = to_ax_dev(dev);
+ struct phy_device *phy_dev = ax->phy_dev;
+
+ if (!phy_dev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(phy_dev, cmd);
+}
+
+static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ax_device *ax = to_ax_dev(dev);
+ struct phy_device *phy_dev = ax->phy_dev;
+
+ if (!phy_dev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phy_dev, cmd);
+}
+
+static const struct ethtool_ops ax_ethtool_ops = {
+ .get_drvinfo = ax_get_drvinfo,
+ .get_settings = ax_get_settings,
+ .set_settings = ax_set_settings,
+ .get_link = ethtool_op_get_link,
+};
+
+#ifdef CONFIG_AX88796_93CX6
+static void ax_eeprom_register_read(struct eeprom_93cx6 *eeprom)
+{
+ struct ei_device *ei_local = eeprom->data;
+ u8 reg = ei_inb(ei_local->mem + AX_MEMR);
+
+ eeprom->reg_data_in = reg & AX_MEMR_EEI;
+ eeprom->reg_data_out = reg & AX_MEMR_EEO; /* Input pin */
+ eeprom->reg_data_clock = reg & AX_MEMR_EECLK;
+ eeprom->reg_chip_select = reg & AX_MEMR_EECS;
+}
+
+static void ax_eeprom_register_write(struct eeprom_93cx6 *eeprom)
+{
+ struct ei_device *ei_local = eeprom->data;
+ u8 reg = ei_inb(ei_local->mem + AX_MEMR);
+
+ reg &= ~(AX_MEMR_EEI | AX_MEMR_EECLK | AX_MEMR_EECS);
+
+ if (eeprom->reg_data_in)
+ reg |= AX_MEMR_EEI;
+ if (eeprom->reg_data_clock)
+ reg |= AX_MEMR_EECLK;
+ if (eeprom->reg_chip_select)
+ reg |= AX_MEMR_EECS;
+
+ ei_outb(reg, ei_local->mem + AX_MEMR);
+ udelay(10);
+}
+#endif
+
+static const struct net_device_ops ax_netdev_ops = {
+ .ndo_open = ax_open,
+ .ndo_stop = ax_close,
+ .ndo_do_ioctl = ax_ioctl,
+
+ .ndo_start_xmit = ax_ei_start_xmit,
+ .ndo_tx_timeout = ax_ei_tx_timeout,
+ .ndo_get_stats = ax_ei_get_stats,
+ .ndo_set_rx_mode = ax_ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ax_ei_poll,
+#endif
+};
+
+static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level)
+{
+ struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+ if (level)
+ ax->reg_memr |= AX_MEMR_MDC;
+ else
+ ax->reg_memr &= ~AX_MEMR_MDC;
+
+ ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output)
+{
+ struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+ if (output)
+ ax->reg_memr &= ~AX_MEMR_MDIR;
+ else
+ ax->reg_memr |= AX_MEMR_MDIR;
+
+ ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value)
+{
+ struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+ if (value)
+ ax->reg_memr |= AX_MEMR_MDO;
+ else
+ ax->reg_memr &= ~AX_MEMR_MDO;
+
+ ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static int ax_bb_get_data(struct mdiobb_ctrl *ctrl)
+{
+ struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+ int reg_memr = ei_inb(ax->addr_memr);
+
+ return reg_memr & AX_MEMR_MDI ? 1 : 0;
+}
+
+static struct mdiobb_ops bb_ops = {
+ .owner = THIS_MODULE,
+ .set_mdc = ax_bb_mdc,
+ .set_mdio_dir = ax_bb_dir,
+ .set_mdio_data = ax_bb_set_data,
+ .get_mdio_data = ax_bb_get_data,
+};
+
+/* setup code */
+
+static int ax_mii_init(struct net_device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev->dev.parent);
+ struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
+ int err, i;
+
+ ax->bb_ctrl.ops = &bb_ops;
+ ax->addr_memr = ei_local->mem + AX_MEMR;
+ ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl);
+ if (!ax->mii_bus) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ax->mii_bus->name = "ax88796_mii_bus";
+ ax->mii_bus->parent = dev->dev.parent;
+ snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+
+ ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!ax->mii_bus->irq) {
+ err = -ENOMEM;
+ goto out_free_mdio_bitbang;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ ax->mii_bus->irq[i] = PHY_POLL;
+
+ err = mdiobus_register(ax->mii_bus);
+ if (err)
+ goto out_free_irq;
+
+ return 0;
+
+ out_free_irq:
+ kfree(ax->mii_bus->irq);
+ out_free_mdio_bitbang:
+ free_mdio_bitbang(ax->mii_bus);
+ out:
+ return err;
+}
+
+static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local)
+{
+ void __iomem *ioaddr = ei_local->mem;
+ struct ax_device *ax = to_ax_dev(dev);
+
+ /* Select page 0 */
+ ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_STOP, ioaddr + E8390_CMD);
+
+ /* set to byte access */
+ ei_outb(ax->plat->dcr_val & ~1, ioaddr + EN0_DCFG);
+ ei_outb(ax->plat->gpoc_val, ioaddr + EI_SHIFT(0x17));
+}
+
+/*
+ * ax_init_dev
+ *
+ * initialise the specified device, taking care to note the MAC
+ * address it may already have (if configured), ensure
+ * the device is ready to be used by lib8390.c and registerd with
+ * the network layer.
+ */
+static int ax_init_dev(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
+ void __iomem *ioaddr = ei_local->mem;
+ unsigned int start_page;
+ unsigned int stop_page;
+ int ret;
+ int i;
+
+ ret = ax_initial_check(dev);
+ if (ret)
+ goto err_out;
+
+ /* setup goes here */
+
+ ax_initial_setup(dev, ei_local);
+
+ /* read the mac from the card prom if we need it */
+
+ if (ax->plat->flags & AXFLG_HAS_EEPROM) {
+ unsigned char SA_prom[32];
+
+ for (i = 0; i < sizeof(SA_prom); i += 2) {
+ SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT);
+ SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT);
+ }
+
+ if (ax->plat->wordlength == 2)
+ for (i = 0; i < 16; i++)
+ SA_prom[i] = SA_prom[i+i];
+
+ memcpy(dev->dev_addr, SA_prom, 6);
+ }
+
+#ifdef CONFIG_AX88796_93CX6
+ if (ax->plat->flags & AXFLG_HAS_93CX6) {
+ unsigned char mac_addr[6];
+ struct eeprom_93cx6 eeprom;
+
+ eeprom.data = ei_local;
+ eeprom.register_read = ax_eeprom_register_read;
+ eeprom.register_write = ax_eeprom_register_write;
+ eeprom.width = PCI_EEPROM_WIDTH_93C56;
+
+ eeprom_93cx6_multiread(&eeprom, 0,
+ (__le16 __force *)mac_addr,
+ sizeof(mac_addr) >> 1);
+
+ memcpy(dev->dev_addr, mac_addr, 6);
+ }
+#endif
+ if (ax->plat->wordlength == 2) {
+ /* We must set the 8390 for word mode. */
+ ei_outb(ax->plat->dcr_val, ei_local->mem + EN0_DCFG);
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
+ } else {
+ start_page = NE1SM_START_PG;
+ stop_page = NE1SM_STOP_PG;
+ }
+
+ /* load the mac-address from the device */
+ if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
+ ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
+ ei_local->mem + E8390_CMD); /* 0x61 */
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ dev->dev_addr[i] =
+ ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
+ }
+
+ if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
+ ax->plat->mac_addr)
+ memcpy(dev->dev_addr, ax->plat->mac_addr,
+ ETHER_ADDR_LEN);
+
+ ax_reset_8390(dev);
+
+ ei_local->name = "AX88796";
+ ei_local->tx_start_page = start_page;
+ ei_local->stop_page = stop_page;
+ ei_local->word16 = (ax->plat->wordlength == 2);
+ ei_local->rx_start_page = start_page + TX_PAGES;
+
+#ifdef PACKETBUF_MEMSIZE
+ /* Allow the packet buffer size to be overridden by know-it-alls. */
+ ei_local->stop_page = ei_local->tx_start_page + PACKETBUF_MEMSIZE;
+#endif
+
+ ei_local->reset_8390 = &ax_reset_8390;
+ ei_local->block_input = &ax_block_input;
+ ei_local->block_output = &ax_block_output;
+ ei_local->get_8390_hdr = &ax_get_8390_hdr;
+ ei_local->priv = 0;
+
+ dev->netdev_ops = &ax_netdev_ops;
+ dev->ethtool_ops = &ax_ethtool_ops;
+
+ ret = ax_mii_init(dev);
+ if (ret)
+ goto out_irq;
+
+ ax_NS8390_init(dev, 0);
+
+ ret = register_netdev(dev);
+ if (ret)
+ goto out_irq;
+
+ netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n",
+ ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr,
+ dev->dev_addr);
+
+ return 0;
+
+ out_irq:
+ /* cleanup irq */
+ free_irq(dev->irq, dev);
+ err_out:
+ return ret;
+}
+
+static int ax_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
+ struct resource *mem;
+
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+
+ iounmap(ei_local->mem);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+
+ if (ax->map2) {
+ iounmap(ax->map2);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ release_mem_region(mem->start, resource_size(mem));
+ }
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+/*
+ * ax_probe
+ *
+ * This is the entry point when the platform device system uses to
+ * notify us of a new device to attach to. Allocate memory, find the
+ * resources and information passed, and map the necessary registers.
+ */
+static int ax_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct ei_device *ei_local;
+ struct ax_device *ax;
+ struct resource *irq, *mem, *mem2;
+ resource_size_t mem_size, mem2_size = 0;
+ int ret = 0;
+
+ dev = ax__alloc_ei_netdev(sizeof(struct ax_device));
+ if (dev == NULL)
+ return -ENOMEM;
+
+ /* ok, let's setup our device */
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ ei_local = netdev_priv(dev);
+ ax = to_ax_dev(dev);
+
+ ax->plat = pdev->dev.platform_data;
+ platform_set_drvdata(pdev, dev);
+
+ ei_local->rxcr_base = ax->plat->rcr_val;
+
+ /* find the platform resources */
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "no IRQ specified\n");
+ ret = -ENXIO;
+ goto exit_mem;
+ }
+
+ dev->irq = irq->start;
+ ax->irqflags = irq->flags & IRQF_TRIGGER_MASK;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "no MEM specified\n");
+ ret = -ENXIO;
+ goto exit_mem;
+ }
+
+ mem_size = resource_size(mem);
+
+ /*
+ * setup the register offsets from either the platform data or
+ * by using the size of the resource provided
+ */
+ if (ax->plat->reg_offsets)
+ ei_local->reg_offset = ax->plat->reg_offsets;
+ else {
+ ei_local->reg_offset = ax->reg_offsets;
+ for (ret = 0; ret < 0x18; ret++)
+ ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
+ }
+
+ if (!request_mem_region(mem->start, mem_size, pdev->name)) {
+ dev_err(&pdev->dev, "cannot reserve registers\n");
+ ret = -ENXIO;
+ goto exit_mem;
+ }
+
+ ei_local->mem = ioremap(mem->start, mem_size);
+ dev->base_addr = (unsigned long)ei_local->mem;
+
+ if (ei_local->mem == NULL) {
+ dev_err(&pdev->dev, "Cannot ioremap area %pR\n", mem);
+
+ ret = -ENXIO;
+ goto exit_req;
+ }
+
+ /* look for reset area */
+ mem2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!mem2) {
+ if (!ax->plat->reg_offsets) {
+ for (ret = 0; ret < 0x20; ret++)
+ ax->reg_offsets[ret] = (mem_size / 0x20) * ret;
+ }
+ } else {
+ mem2_size = resource_size(mem2);
+
+ if (!request_mem_region(mem2->start, mem2_size, pdev->name)) {
+ dev_err(&pdev->dev, "cannot reserve registers\n");
+ ret = -ENXIO;
+ goto exit_mem1;
+ }
+
+ ax->map2 = ioremap(mem2->start, mem2_size);
+ if (!ax->map2) {
+ dev_err(&pdev->dev, "cannot map reset register\n");
+ ret = -ENXIO;
+ goto exit_mem2;
+ }
+
+ ei_local->reg_offset[0x1f] = ax->map2 - ei_local->mem;
+ }
+
+ /* got resources, now initialise and register device */
+ ret = ax_init_dev(dev);
+ if (!ret)
+ return 0;
+
+ if (!ax->map2)
+ goto exit_mem1;
+
+ iounmap(ax->map2);
+
+ exit_mem2:
+ release_mem_region(mem2->start, mem2_size);
+
+ exit_mem1:
+ iounmap(ei_local->mem);
+
+ exit_req:
+ release_mem_region(mem->start, mem_size);
+
+ exit_mem:
+ free_netdev(dev);
+
+ return ret;
+}
+
+/* suspend and resume */
+
+#ifdef CONFIG_PM
+static int ax_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct net_device *ndev = platform_get_drvdata(dev);
+ struct ax_device *ax = to_ax_dev(ndev);
+
+ ax->resume_open = ax->running;
+
+ netif_device_detach(ndev);
+ ax_close(ndev);
+
+ return 0;
+}
+
+static int ax_resume(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ax_device *ax = to_ax_dev(ndev);
+
+ ax_initial_setup(ndev, netdev_priv(ndev));
+ ax_NS8390_init(ndev, ax->resume_open);
+ netif_device_attach(ndev);
+
+ if (ax->resume_open)
+ ax_open(ndev);
+
+ return 0;
+}
+
+#else
+#define ax_suspend NULL
+#define ax_resume NULL
+#endif
+
+static struct platform_driver axdrv = {
+ .driver = {
+ .name = "ax88796",
+ .owner = THIS_MODULE,
+ },
+ .probe = ax_probe,
+ .remove = ax_remove,
+ .suspend = ax_suspend,
+ .resume = ax_resume,
+};
+
+static int __init axdrv_init(void)
+{
+ return platform_driver_register(&axdrv);
+}
+
+static void __exit axdrv_exit(void)
+{
+ platform_driver_unregister(&axdrv);
+}
+
+module_init(axdrv_init);
+module_exit(axdrv_exit);
+
+MODULE_DESCRIPTION("AX88796 10/100 Ethernet platform driver");
+MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ax88796");
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
new file mode 100644
index 000000000000..bba51cdc74a1
--- /dev/null
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -0,0 +1,1725 @@
+/*======================================================================
+
+ A PCMCIA ethernet driver for Asix AX88190-based cards
+
+ The Asix AX88190 is a NS8390-derived chipset with a few nasty
+ idiosyncracies that make it very inconvenient to support with a
+ standard 8390 driver. This driver is based on pcnet_cs, with the
+ tweaked 8390 code grafted on the end. Much of what I did was to
+ clean up and update a similar driver supplied by Asix, which was
+ adapted by William Lee, william@asix.com.tw.
+
+ Copyright (C) 2001 David A. Hinds -- dahinds@users.sourceforge.net
+
+ axnet_cs.c 1.28 2002/06/29 06:27:37
+
+ The network driver code is based on Donald Becker's NE2000 code:
+
+ Written 1992,1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+ Donald Becker may be reached at becker@scyld.com
+
+======================================================================*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include "8390.h"
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/cisreg.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+#define AXNET_CMD 0x00
+#define AXNET_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+#define AXNET_RESET 0x1f /* Issue a read to reset, a write to clear. */
+#define AXNET_MII_EEP 0x14 /* Offset of MII access port */
+#define AXNET_TEST 0x15 /* Offset of TEST Register port */
+#define AXNET_GPIO 0x17 /* Offset of General Purpose Register Port */
+
+#define AXNET_START_PG 0x40 /* First page of TX buffer */
+#define AXNET_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+#define AXNET_RDC_TIMEOUT 0x02 /* Max wait in jiffies for Tx RDC */
+
+#define IS_AX88190 0x0001
+#define IS_AX88790 0x0002
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("Asix AX88190 PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+
+/*====================================================================*/
+
+static int axnet_config(struct pcmcia_device *link);
+static void axnet_release(struct pcmcia_device *link);
+static int axnet_open(struct net_device *dev);
+static int axnet_close(struct net_device *dev);
+static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static void axnet_tx_timeout(struct net_device *dev);
+static irqreturn_t ei_irq_wrapper(int irq, void *dev_id);
+static void ei_watchdog(u_long arg);
+static void axnet_reset_8390(struct net_device *dev);
+
+static int mdio_read(unsigned int addr, int phy_id, int loc);
+static void mdio_write(unsigned int addr, int phy_id, int loc, int value);
+
+static void get_8390_hdr(struct net_device *,
+ struct e8390_pkt_hdr *, int);
+static void block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void block_output(struct net_device *dev, int count,
+ const u_char *buf, const int start_page);
+
+static void axnet_detach(struct pcmcia_device *p_dev);
+
+static void AX88190_init(struct net_device *dev, int startp);
+static int ax_open(struct net_device *dev);
+static int ax_close(struct net_device *dev);
+static irqreturn_t ax_interrupt(int irq, void *dev_id);
+
+/*====================================================================*/
+
+typedef struct axnet_dev_t {
+ struct pcmcia_device *p_dev;
+ caddr_t base;
+ struct timer_list watchdog;
+ int stale, fast_poll;
+ u_short link_status;
+ u_char duplex_flag;
+ int phy_id;
+ int flags;
+ int active_low;
+} axnet_dev_t;
+
+static inline axnet_dev_t *PRIV(struct net_device *dev)
+{
+ void *p = (char *)netdev_priv(dev) + sizeof(struct ei_device);
+ return p;
+}
+
+static const struct net_device_ops axnet_netdev_ops = {
+ .ndo_open = axnet_open,
+ .ndo_stop = axnet_close,
+ .ndo_do_ioctl = axnet_ioctl,
+ .ndo_start_xmit = axnet_start_xmit,
+ .ndo_tx_timeout = axnet_tx_timeout,
+ .ndo_get_stats = get_stats,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int axnet_probe(struct pcmcia_device *link)
+{
+ axnet_dev_t *info;
+ struct net_device *dev;
+ struct ei_device *ei_local;
+
+ dev_dbg(&link->dev, "axnet_attach()\n");
+
+ dev = alloc_etherdev(sizeof(struct ei_device) + sizeof(axnet_dev_t));
+ if (!dev)
+ return -ENOMEM;
+
+ ei_local = netdev_priv(dev);
+ spin_lock_init(&ei_local->page_lock);
+
+ info = PRIV(dev);
+ info->p_dev = link;
+ link->priv = dev;
+ link->config_flags |= CONF_ENABLE_IRQ;
+
+ dev->netdev_ops = &axnet_netdev_ops;
+
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ return axnet_config(link);
+} /* axnet_attach */
+
+static void axnet_detach(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ dev_dbg(&link->dev, "axnet_detach(0x%p)\n", link);
+
+ unregister_netdev(dev);
+
+ axnet_release(link);
+
+ free_netdev(dev);
+} /* axnet_detach */
+
+/*======================================================================
+
+ This probes for a card's hardware address by reading the PROM.
+
+======================================================================*/
+
+static int get_prom(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ unsigned int ioaddr = dev->base_addr;
+ int i, j;
+
+ /* This is based on drivers/net/ne.c */
+ struct {
+ u_char value, offset;
+ } program_seq[] = {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
+ {0x01, EN0_DCFG}, /* Set word-wide access. */
+ {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0xFF, EN0_ISR},
+ {E8390_RXOFF|0x40, EN0_RXCR}, /* 0x60 Set to monitor */
+ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ {0x10, EN0_RCNTLO},
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_RSARLO}, /* DMA starting at 0x0400. */
+ {0x04, EN0_RSARHI},
+ {E8390_RREAD+E8390_START, E8390_CMD},
+ };
+
+ /* Not much of a test, but the alternatives are messy */
+ if (link->config_base != 0x03c0)
+ return 0;
+
+ axnet_reset_8390(dev);
+ mdelay(10);
+
+ for (i = 0; i < ARRAY_SIZE(program_seq); i++)
+ outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
+
+ for (i = 0; i < 6; i += 2) {
+ j = inw(ioaddr + AXNET_DATAPORT);
+ dev->dev_addr[i] = j & 0xff;
+ dev->dev_addr[i+1] = j >> 8;
+ }
+ return 1;
+} /* get_prom */
+
+static int try_io_port(struct pcmcia_device *link)
+{
+ int j, ret;
+ link->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ link->resource[1]->flags &= ~IO_DATA_PATH_WIDTH;
+ if (link->resource[0]->end == 32) {
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+ /* for master/slave multifunction cards */
+ if (link->resource[1]->end > 0)
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
+ } else {
+ /* This should be two 16-port windows */
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_16;
+ }
+ if (link->resource[0]->start == 0) {
+ for (j = 0; j < 0x400; j += 0x20) {
+ link->resource[0]->start = j ^ 0x300;
+ link->resource[1]->start = (j ^ 0x300) + 0x10;
+ link->io_lines = 16;
+ ret = pcmcia_request_io(link);
+ if (ret == 0)
+ return ret;
+ }
+ return ret;
+ } else {
+ return pcmcia_request_io(link);
+ }
+}
+
+static int axnet_configcheck(struct pcmcia_device *p_dev, void *priv_data)
+{
+ if (p_dev->config_index == 0)
+ return -EINVAL;
+
+ p_dev->config_index = 0x05;
+ if (p_dev->resource[0]->end + p_dev->resource[1]->end < 32)
+ return -ENODEV;
+
+ return try_io_port(p_dev);
+}
+
+static int axnet_config(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ axnet_dev_t *info = PRIV(dev);
+ int i, j, j2, ret;
+
+ dev_dbg(&link->dev, "axnet_config(0x%p)\n", link);
+
+ /* don't trust the CIS on this; Linksys got it wrong */
+ link->config_regs = 0x63;
+ link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+ ret = pcmcia_loop_config(link, axnet_configcheck, NULL);
+ if (ret != 0)
+ goto failed;
+
+ if (!link->irq)
+ goto failed;
+
+ if (resource_size(link->resource[1]) == 8)
+ link->config_flags |= CONF_ENABLE_SPKR;
+
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto failed;
+
+ dev->irq = link->irq;
+ dev->base_addr = link->resource[0]->start;
+
+ if (!get_prom(link)) {
+ pr_notice("this is not an AX88190 card!\n");
+ pr_notice("use pcnet_cs instead.\n");
+ goto failed;
+ }
+
+ ei_status.name = "AX88190";
+ ei_status.word16 = 1;
+ ei_status.tx_start_page = AXNET_START_PG;
+ ei_status.rx_start_page = AXNET_START_PG + TX_PAGES;
+ ei_status.stop_page = AXNET_STOP_PG;
+ ei_status.reset_8390 = axnet_reset_8390;
+ ei_status.get_8390_hdr = get_8390_hdr;
+ ei_status.block_input = block_input;
+ ei_status.block_output = block_output;
+
+ if (inb(dev->base_addr + AXNET_TEST) != 0)
+ info->flags |= IS_AX88790;
+ else
+ info->flags |= IS_AX88190;
+
+ if (info->flags & IS_AX88790)
+ outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */
+
+ info->active_low = 0;
+
+ for (i = 0; i < 32; i++) {
+ j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
+ j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
+ if (j == j2) continue;
+ if ((j != 0) && (j != 0xffff)) break;
+ }
+
+ if (i == 32) {
+ /* Maybe PHY is in power down mode. (PPD_SET = 1)
+ Bit 2 of CCSR is active low. */
+ pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
+ for (i = 0; i < 32; i++) {
+ j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
+ j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
+ if (j == j2) continue;
+ if ((j != 0) && (j != 0xffff)) {
+ info->active_low = 1;
+ break;
+ }
+ }
+ }
+
+ info->phy_id = (i < 32) ? i : -1;
+ SET_NETDEV_DEV(dev, &link->dev);
+
+ if (register_netdev(dev) != 0) {
+ pr_notice("register_netdev() failed\n");
+ goto failed;
+ }
+
+ netdev_info(dev, "Asix AX88%d90: io %#3lx, irq %d, hw_addr %pM\n",
+ ((info->flags & IS_AX88790) ? 7 : 1),
+ dev->base_addr, dev->irq, dev->dev_addr);
+ if (info->phy_id != -1) {
+ netdev_dbg(dev, " MII transceiver at index %d, status %x\n",
+ info->phy_id, j);
+ } else {
+ netdev_notice(dev, " No MII transceivers found!\n");
+ }
+ return 0;
+
+failed:
+ axnet_release(link);
+ return -ENODEV;
+} /* axnet_config */
+
+static void axnet_release(struct pcmcia_device *link)
+{
+ pcmcia_disable_device(link);
+}
+
+static int axnet_suspend(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ if (link->open)
+ netif_device_detach(dev);
+
+ return 0;
+}
+
+static int axnet_resume(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ axnet_dev_t *info = PRIV(dev);
+
+ if (link->open) {
+ if (info->active_low == 1)
+ pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
+
+ axnet_reset_8390(dev);
+ AX88190_init(dev, 1);
+ netif_device_attach(dev);
+ }
+
+ return 0;
+}
+
+
+/*======================================================================
+
+ MII interface support
+
+======================================================================*/
+
+#define MDIO_SHIFT_CLK 0x01
+#define MDIO_DATA_WRITE0 0x00
+#define MDIO_DATA_WRITE1 0x08
+#define MDIO_DATA_READ 0x04
+#define MDIO_MASK 0x0f
+#define MDIO_ENB_IN 0x02
+
+static void mdio_sync(unsigned int addr)
+{
+ int bits;
+ for (bits = 0; bits < 32; bits++) {
+ outb_p(MDIO_DATA_WRITE1, addr);
+ outb_p(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr);
+ }
+}
+
+static int mdio_read(unsigned int addr, int phy_id, int loc)
+{
+ u_int cmd = (0xf6<<10)|(phy_id<<5)|loc;
+ int i, retval = 0;
+
+ mdio_sync(addr);
+ for (i = 14; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb_p(dat, addr);
+ outb_p(dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 19; i > 0; i--) {
+ outb_p(MDIO_ENB_IN, addr);
+ retval = (retval << 1) | ((inb_p(addr) & MDIO_DATA_READ) != 0);
+ outb_p(MDIO_ENB_IN | MDIO_SHIFT_CLK, addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(unsigned int addr, int phy_id, int loc, int value)
+{
+ u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value;
+ int i;
+
+ mdio_sync(addr);
+ for (i = 31; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb_p(dat, addr);
+ outb_p(dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 1; i >= 0; i--) {
+ outb_p(MDIO_ENB_IN, addr);
+ outb_p(MDIO_ENB_IN | MDIO_SHIFT_CLK, addr);
+ }
+}
+
+/*====================================================================*/
+
+static int axnet_open(struct net_device *dev)
+{
+ int ret;
+ axnet_dev_t *info = PRIV(dev);
+ struct pcmcia_device *link = info->p_dev;
+ unsigned int nic_base = dev->base_addr;
+
+ dev_dbg(&link->dev, "axnet_open('%s')\n", dev->name);
+
+ if (!pcmcia_dev_present(link))
+ return -ENODEV;
+
+ outb_p(0xFF, nic_base + EN0_ISR); /* Clear bogus intr. */
+ ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, "axnet_cs", dev);
+ if (ret)
+ return ret;
+
+ link->open++;
+
+ info->link_status = 0x00;
+ init_timer(&info->watchdog);
+ info->watchdog.function = ei_watchdog;
+ info->watchdog.data = (u_long)dev;
+ info->watchdog.expires = jiffies + HZ;
+ add_timer(&info->watchdog);
+
+ return ax_open(dev);
+} /* axnet_open */
+
+/*====================================================================*/
+
+static int axnet_close(struct net_device *dev)
+{
+ axnet_dev_t *info = PRIV(dev);
+ struct pcmcia_device *link = info->p_dev;
+
+ dev_dbg(&link->dev, "axnet_close('%s')\n", dev->name);
+
+ ax_close(dev);
+ free_irq(dev->irq, dev);
+
+ link->open--;
+ netif_stop_queue(dev);
+ del_timer_sync(&info->watchdog);
+
+ return 0;
+} /* axnet_close */
+
+/*======================================================================
+
+ Hard reset the card. This used to pause for the same period that
+ a 8390 reset command required, but that shouldn't be necessary.
+
+======================================================================*/
+
+static void axnet_reset_8390(struct net_device *dev)
+{
+ unsigned int nic_base = dev->base_addr;
+ int i;
+
+ ei_status.txing = ei_status.dmaing = 0;
+
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, nic_base + E8390_CMD);
+
+ outb(inb(nic_base + AXNET_RESET), nic_base + AXNET_RESET);
+
+ for (i = 0; i < 100; i++) {
+ if ((inb_p(nic_base+EN0_ISR) & ENISR_RESET) != 0)
+ break;
+ udelay(100);
+ }
+ outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */
+
+ if (i == 100)
+ netdev_err(dev, "axnet_reset_8390() did not complete\n");
+
+} /* axnet_reset_8390 */
+
+/*====================================================================*/
+
+static irqreturn_t ei_irq_wrapper(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ PRIV(dev)->stale = 0;
+ return ax_interrupt(irq, dev_id);
+}
+
+static void ei_watchdog(u_long arg)
+{
+ struct net_device *dev = (struct net_device *)(arg);
+ axnet_dev_t *info = PRIV(dev);
+ unsigned int nic_base = dev->base_addr;
+ unsigned int mii_addr = nic_base + AXNET_MII_EEP;
+ u_short link;
+
+ if (!netif_device_present(dev)) goto reschedule;
+
+ /* Check for pending interrupt with expired latency timer: with
+ this, we can limp along even if the interrupt is blocked */
+ if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) {
+ if (!info->fast_poll)
+ netdev_info(dev, "interrupt(s) dropped!\n");
+ ei_irq_wrapper(dev->irq, dev);
+ info->fast_poll = HZ;
+ }
+ if (info->fast_poll) {
+ info->fast_poll--;
+ info->watchdog.expires = jiffies + 1;
+ add_timer(&info->watchdog);
+ return;
+ }
+
+ if (info->phy_id < 0)
+ goto reschedule;
+ link = mdio_read(mii_addr, info->phy_id, 1);
+ if (!link || (link == 0xffff)) {
+ netdev_info(dev, "MII is missing!\n");
+ info->phy_id = -1;
+ goto reschedule;
+ }
+
+ link &= 0x0004;
+ if (link != info->link_status) {
+ u_short p = mdio_read(mii_addr, info->phy_id, 5);
+ netdev_info(dev, "%s link beat\n", link ? "found" : "lost");
+ if (link) {
+ info->duplex_flag = (p & 0x0140) ? 0x80 : 0x00;
+ if (p)
+ netdev_info(dev, "autonegotiation complete: %dbaseT-%cD selected\n",
+ (p & 0x0180) ? 100 : 10, (p & 0x0140) ? 'F' : 'H');
+ else
+ netdev_info(dev, "link partner did not autonegotiate\n");
+ AX88190_init(dev, 1);
+ }
+ info->link_status = link;
+ }
+
+reschedule:
+ info->watchdog.expires = jiffies + HZ;
+ add_timer(&info->watchdog);
+}
+
+/*====================================================================*/
+
+static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ axnet_dev_t *info = PRIV(dev);
+ struct mii_ioctl_data *data = if_mii(rq);
+ unsigned int mii_addr = dev->base_addr + AXNET_MII_EEP;
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = info->phy_id;
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f);
+ return 0;
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ mdio_write(mii_addr, data->phy_id, data->reg_num & 0x1f, data->val_in);
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+/*====================================================================*/
+
+static void get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ unsigned int nic_base = dev->base_addr;
+
+ outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
+
+ insw(nic_base + AXNET_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr)>>1);
+ /* Fix for big endian systems */
+ hdr->count = le16_to_cpu(hdr->count);
+
+}
+
+/*====================================================================*/
+
+static void block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ unsigned int nic_base = dev->base_addr;
+ int xfer_count = count;
+ char *buf = skb->data;
+
+ if ((ei_debug > 4) && (count != 4))
+ pr_debug("%s: [bi=%d]\n", dev->name, count+4);
+ outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
+
+ insw(nic_base + AXNET_DATAPORT,buf,count>>1);
+ if (count & 0x01)
+ buf[count-1] = inb(nic_base + AXNET_DATAPORT), xfer_count++;
+
+}
+
+/*====================================================================*/
+
+static void block_output(struct net_device *dev, int count,
+ const u_char *buf, const int start_page)
+{
+ unsigned int nic_base = dev->base_addr;
+
+ pr_debug("%s: [bo=%d]\n", dev->name, count);
+
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+ if (count & 0x01)
+ count++;
+
+ outb_p(0x00, nic_base + EN0_RSARLO);
+ outb_p(start_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RWRITE+E8390_START, nic_base + AXNET_CMD);
+ outsw(nic_base + AXNET_DATAPORT, buf, count>>1);
+}
+
+static const struct pcmcia_device_id axnet_ids[] = {
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x016c, 0x0081),
+ PCMCIA_DEVICE_MANF_CARD(0x018a, 0x0301),
+ PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328),
+ PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0301),
+ PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0303),
+ PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309),
+ PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1106),
+ PCMCIA_DEVICE_MANF_CARD(0x8a01, 0xc1ab),
+ PCMCIA_DEVICE_MANF_CARD(0x021b, 0x0202),
+ PCMCIA_DEVICE_MANF_CARD(0xffff, 0x1090),
+ PCMCIA_DEVICE_PROD_ID12("AmbiCom,Inc.", "Fast Ethernet PC Card(AMB8110)", 0x49b020a7, 0x119cc9fc),
+ PCMCIA_DEVICE_PROD_ID124("Fast Ethernet", "16-bit PC Card", "AX88190", 0xb4be14e3, 0x9a12eb6a, 0xab9be5ef),
+ PCMCIA_DEVICE_PROD_ID12("ASIX", "AX88190", 0x0959823b, 0xab9be5ef),
+ PCMCIA_DEVICE_PROD_ID12("Billionton", "LNA-100B", 0x552ab682, 0xbc3b87e1),
+ PCMCIA_DEVICE_PROD_ID12("CHEETAH ETHERCARD", "EN2228", 0x00fa7bc8, 0x00e990cc),
+ PCMCIA_DEVICE_PROD_ID12("CNet", "CNF301", 0xbc477dde, 0x78c5f40b),
+ PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXD", 0x5261440f, 0x436768c5),
+ PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEtherII PCC-TXD", 0x5261440f, 0x730df72e),
+ PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXM", 0x5261440f, 0x3abbd061),
+ PCMCIA_DEVICE_PROD_ID12("Dynalink", "L100C16", 0x55632fd5, 0x66bc2a90),
+ PCMCIA_DEVICE_PROD_ID12("IO DATA", "ETXPCM", 0x547e66dc, 0x233adac2),
+ PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V3)", 0x0733cc81, 0x232019a8),
+ PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC3-TX", 0x481e0094, 0xf91af609),
+ PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA411", 0x9aa79dc3, 0x40fad875),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA", "100BASE", 0x281f1c5d, 0x7c2add04),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEtherCard", 0x281f1c5d, 0x7ef26116),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FEP501", 0x281f1c5d, 0x2e272058),
+ PCMCIA_DEVICE_PROD_ID14("Network Everywhere", "AX88190", 0x820a67b6, 0xab9be5ef),
+ PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, axnet_ids);
+
+static struct pcmcia_driver axnet_cs_driver = {
+ .owner = THIS_MODULE,
+ .name = "axnet_cs",
+ .probe = axnet_probe,
+ .remove = axnet_detach,
+ .id_table = axnet_ids,
+ .suspend = axnet_suspend,
+ .resume = axnet_resume,
+};
+
+static int __init init_axnet_cs(void)
+{
+ return pcmcia_register_driver(&axnet_cs_driver);
+}
+
+static void __exit exit_axnet_cs(void)
+{
+ pcmcia_unregister_driver(&axnet_cs_driver);
+}
+
+module_init(init_axnet_cs);
+module_exit(exit_axnet_cs);
+
+/*====================================================================*/
+
+/* 8390.c: A general NS8390 ethernet driver core for linux. */
+/*
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ This is the chip-specific code for many 8390-based ethernet adaptors.
+ This is not a complete driver, it must be combined with board-specific
+ code such as ne.c, wd.c, 3c503.c, etc.
+
+ Seeing how at least eight drivers use this code, (not counting the
+ PCMCIA ones either) it is easy to break some card by what seems like
+ a simple innocent change. Please contact me or Donald if you think
+ you have found something that needs changing. -- PG
+
+ Changelog:
+
+ Paul Gortmaker : remove set_bit lock, other cleanups.
+ Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to
+ ei_block_input() for eth_io_copy_and_sum().
+ Paul Gortmaker : exchange static int ei_pingpong for a #define,
+ also add better Tx error handling.
+ Paul Gortmaker : rewrite Rx overrun handling as per NS specs.
+ Alexey Kuznetsov : use the 8390's six bit hash multicast filter.
+ Paul Gortmaker : tweak ANK's above multicast changes a bit.
+ Paul Gortmaker : update packet statistics for v2.1.x
+ Alan Cox : support arbitrary stupid port mappings on the
+ 68K Macintosh. Support >16bit I/O spaces
+ Paul Gortmaker : add kmod support for auto-loading of the 8390
+ module by all drivers that require it.
+ Alan Cox : Spinlocking work, added 'BUG_83C690'
+ Paul Gortmaker : Separate out Tx timeout code from Tx path.
+
+ Sources:
+ The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
+
+ */
+
+#include <linux/bitops.h>
+#include <asm/irq.h>
+#include <linux/fcntl.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+
+#define BUG_83C690
+
+/* These are the operational function interfaces to board-specific
+ routines.
+ void reset_8390(struct net_device *dev)
+ Resets the board associated with DEV, including a hardware reset of
+ the 8390. This is only called when there is a transmit timeout, and
+ it is always followed by 8390_init().
+ void block_output(struct net_device *dev, int count, const unsigned char *buf,
+ int start_page)
+ Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The
+ "page" value uses the 8390's 256-byte pages.
+ void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
+ Read the 4 byte, page aligned 8390 header. *If* there is a
+ subsequent read, it will be of the rest of the packet.
+ void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+ Read COUNT bytes from the packet buffer into the skb data area. Start
+ reading from RING_OFFSET, the address as the 8390 sees it. This will always
+ follow the read of the 8390 header.
+*/
+#define ei_reset_8390 (ei_local->reset_8390)
+#define ei_block_output (ei_local->block_output)
+#define ei_block_input (ei_local->block_input)
+#define ei_get_8390_hdr (ei_local->get_8390_hdr)
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef ei_debug
+int ei_debug = 1;
+#endif
+
+/* Index to functions. */
+static void ei_tx_intr(struct net_device *dev);
+static void ei_tx_err(struct net_device *dev);
+static void ei_receive(struct net_device *dev);
+static void ei_rx_overrun(struct net_device *dev);
+
+/* Routines generic to NS8390-based boards. */
+static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
+ int start_page);
+static void do_set_multicast_list(struct net_device *dev);
+
+/*
+ * SMP and the 8390 setup.
+ *
+ * The 8390 isn't exactly designed to be multithreaded on RX/TX. There is
+ * a page register that controls bank and packet buffer access. We guard
+ * this with ei_local->page_lock. Nobody should assume or set the page other
+ * than zero when the lock is not held. Lock holders must restore page 0
+ * before unlocking. Even pure readers must take the lock to protect in
+ * page 0.
+ *
+ * To make life difficult the chip can also be very slow. We therefore can't
+ * just use spinlocks. For the longer lockups we disable the irq the device
+ * sits on and hold the lock. We must hold the lock because there is a dual
+ * processor case other than interrupts (get stats/set multicast list in
+ * parallel with each other and transmit).
+ *
+ * Note: in theory we can just disable the irq on the card _but_ there is
+ * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
+ * enter lock, take the queued irq. So we waddle instead of flying.
+ *
+ * Finally by special arrangement for the purpose of being generally
+ * annoying the transmit function is called bh atomic. That places
+ * restrictions on the user context callers as disable_irq won't save
+ * them.
+ */
+
+/**
+ * ax_open - Open/initialize the board.
+ * @dev: network device to initialize
+ *
+ * This routine goes all-out, setting everything
+ * up anew at each open, even though many of these registers should only
+ * need to be set once at boot.
+ */
+static int ax_open(struct net_device *dev)
+{
+ unsigned long flags;
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ /*
+ * Grab the page lock so we own the register set, then call
+ * the init function.
+ */
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ AX88190_init(dev, 1);
+ /* Set the flag before we drop the lock, That way the IRQ arrives
+ after its set and we get no silly warnings */
+ netif_start_queue(dev);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+ ei_local->irqlock = 0;
+ return 0;
+}
+
+#define dev_lock(dev) (((struct ei_device *)netdev_priv(dev))->page_lock)
+
+/**
+ * ax_close - shut down network device
+ * @dev: network device to close
+ *
+ * Opposite of ax_open(). Only used when "ifconfig <devname> down" is done.
+ */
+static int ax_close(struct net_device *dev)
+{
+ unsigned long flags;
+
+ /*
+ * Hold the page lock during close
+ */
+
+ spin_lock_irqsave(&dev_lock(dev), flags);
+ AX88190_init(dev, 0);
+ spin_unlock_irqrestore(&dev_lock(dev), flags);
+ netif_stop_queue(dev);
+ return 0;
+}
+
+/**
+ * axnet_tx_timeout - handle transmit time out condition
+ * @dev: network device which has apparently fallen asleep
+ *
+ * Called by kernel when device never acknowledges a transmit has
+ * completed (or failed) - i.e. never posted a Tx related interrupt.
+ */
+
+static void axnet_tx_timeout(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = netdev_priv(dev);
+ int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
+ unsigned long flags;
+
+ dev->stats.tx_errors++;
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ txsr = inb(e8390_base+EN0_TSR);
+ isr = inb(e8390_base+EN0_ISR);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+ netdev_printk(KERN_DEBUG, dev,
+ "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
+ (txsr & ENTSR_ABT) ? "excess collisions." :
+ (isr) ? "lost interrupt?" : "cable problem?",
+ txsr, isr, tickssofar);
+
+ if (!isr && !dev->stats.tx_packets)
+ {
+ /* The 8390 probably hasn't gotten on the cable yet. */
+ ei_local->interface_num ^= 1; /* Try a different xcvr. */
+ }
+
+ /* Ugly but a reset can be slow, yet must be protected */
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+
+ /* Try to restart the card. Perhaps the user has fixed something. */
+ ei_reset_8390(dev);
+ AX88190_init(dev, 1);
+
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+ netif_wake_queue(dev);
+}
+
+/**
+ * axnet_start_xmit - begin packet transmission
+ * @skb: packet to be sent
+ * @dev: network device to which packet is sent
+ *
+ * Sends a packet to an 8390 network device.
+ */
+
+static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = netdev_priv(dev);
+ int length, send_length, output_page;
+ unsigned long flags;
+ u8 packet[ETH_ZLEN];
+
+ netif_stop_queue(dev);
+
+ length = skb->len;
+
+ /* Mask interrupts from the ethercard.
+ SMP: We have to grab the lock here otherwise the IRQ handler
+ on another CPU can flip window and race the IRQ mask set. We end
+ up trashing the mcast filter not disabling irqs if we don't lock */
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ outb_p(0x00, e8390_base + EN0_IMR);
+
+ /*
+ * Slow phase with lock held.
+ */
+
+ ei_local->irqlock = 1;
+
+ send_length = max(length, ETH_ZLEN);
+
+ /*
+ * We have two Tx slots available for use. Find the first free
+ * slot, and then perform some sanity checks. With two Tx bufs,
+ * you get very close to transmitting back-to-back packets. With
+ * only one Tx buf, the transmitter sits idle while you reload the
+ * card, leaving a substantial gap between each transmitted packet.
+ */
+
+ if (ei_local->tx1 == 0)
+ {
+ output_page = ei_local->tx_start_page;
+ ei_local->tx1 = send_length;
+ if (ei_debug && ei_local->tx2 > 0)
+ netdev_printk(KERN_DEBUG, dev,
+ "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
+ ei_local->tx2, ei_local->lasttx,
+ ei_local->txing);
+ }
+ else if (ei_local->tx2 == 0)
+ {
+ output_page = ei_local->tx_start_page + TX_PAGES/2;
+ ei_local->tx2 = send_length;
+ if (ei_debug && ei_local->tx1 > 0)
+ netdev_printk(KERN_DEBUG, dev,
+ "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
+ ei_local->tx1, ei_local->lasttx,
+ ei_local->txing);
+ }
+ else
+ { /* We should never get here. */
+ if (ei_debug)
+ netdev_printk(KERN_DEBUG, dev,
+ "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
+ ei_local->tx1, ei_local->tx2,
+ ei_local->lasttx);
+ ei_local->irqlock = 0;
+ netif_stop_queue(dev);
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+ dev->stats.tx_errors++;
+ return NETDEV_TX_BUSY;
+ }
+
+ /*
+ * Okay, now upload the packet and trigger a send if the transmitter
+ * isn't already sending. If it is busy, the interrupt handler will
+ * trigger the send later, upon receiving a Tx done interrupt.
+ */
+
+ if (length == skb->len)
+ ei_block_output(dev, length, skb->data, output_page);
+ else {
+ memset(packet, 0, ETH_ZLEN);
+ skb_copy_from_linear_data(skb, packet, skb->len);
+ ei_block_output(dev, length, packet, output_page);
+ }
+
+ if (! ei_local->txing)
+ {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, send_length, output_page);
+ dev->trans_start = jiffies;
+ if (output_page == ei_local->tx_start_page)
+ {
+ ei_local->tx1 = -1;
+ ei_local->lasttx = -1;
+ }
+ else
+ {
+ ei_local->tx2 = -1;
+ ei_local->lasttx = -2;
+ }
+ }
+ else ei_local->txqueue++;
+
+ if (ei_local->tx1 && ei_local->tx2)
+ netif_stop_queue(dev);
+ else
+ netif_start_queue(dev);
+
+ /* Turn 8390 interrupts back on. */
+ ei_local->irqlock = 0;
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+ dev_kfree_skb (skb);
+ dev->stats.tx_bytes += send_length;
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ax_interrupt - handle the interrupts from an 8390
+ * @irq: interrupt number
+ * @dev_id: a pointer to the net_device
+ *
+ * Handle the ether interface interrupts. We pull packets from
+ * the 8390 via the card specific functions and fire them at the networking
+ * stack. We also handle transmit completions and wake the transmit path if
+ * necessary. We also update the counters and do other housekeeping as
+ * needed.
+ */
+
+static irqreturn_t ax_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ long e8390_base;
+ int interrupts, nr_serviced = 0, i;
+ struct ei_device *ei_local;
+ int handled = 0;
+ unsigned long flags;
+
+ e8390_base = dev->base_addr;
+ ei_local = netdev_priv(dev);
+
+ /*
+ * Protect the irq test too.
+ */
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+
+ if (ei_local->irqlock) {
+#if 1 /* This might just be an interrupt for a PCI device sharing this line */
+ const char *msg;
+ /* The "irqlock" check is only for testing. */
+ if (ei_local->irqlock)
+ msg = "Interrupted while interrupts are masked!";
+ else
+ msg = "Reentering the interrupt handler!";
+ netdev_info(dev, "%s, isr=%#2x imr=%#2x\n",
+ msg,
+ inb_p(e8390_base + EN0_ISR),
+ inb_p(e8390_base + EN0_IMR));
+#endif
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+ return IRQ_NONE;
+ }
+
+ if (ei_debug > 3)
+ netdev_printk(KERN_DEBUG, dev, "interrupt(isr=%#2.2x)\n",
+ inb_p(e8390_base + EN0_ISR));
+
+ outb_p(0x00, e8390_base + EN0_ISR);
+ ei_local->irqlock = 1;
+
+ /* !!Assumption!! -- we stay in page 0. Don't break this. */
+ while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0 &&
+ ++nr_serviced < MAX_SERVICE)
+ {
+ if (!netif_running(dev) || (interrupts == 0xff)) {
+ if (ei_debug > 1)
+ netdev_warn(dev,
+ "interrupt from stopped card\n");
+ outb_p(interrupts, e8390_base + EN0_ISR);
+ interrupts = 0;
+ break;
+ }
+ handled = 1;
+
+ /* AX88190 bug fix. */
+ outb_p(interrupts, e8390_base + EN0_ISR);
+ for (i = 0; i < 10; i++) {
+ if (!(inb(e8390_base + EN0_ISR) & interrupts))
+ break;
+ outb_p(0, e8390_base + EN0_ISR);
+ outb_p(interrupts, e8390_base + EN0_ISR);
+ }
+ if (interrupts & ENISR_OVER)
+ ei_rx_overrun(dev);
+ else if (interrupts & (ENISR_RX+ENISR_RX_ERR))
+ {
+ /* Got a good (?) packet. */
+ ei_receive(dev);
+ }
+ /* Push the next to-transmit packet through. */
+ if (interrupts & ENISR_TX)
+ ei_tx_intr(dev);
+ else if (interrupts & ENISR_TX_ERR)
+ ei_tx_err(dev);
+
+ if (interrupts & ENISR_COUNTERS)
+ {
+ dev->stats.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0);
+ dev->stats.rx_crc_errors += inb_p(e8390_base + EN0_COUNTER1);
+ dev->stats.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2);
+ }
+ }
+
+ if (interrupts && ei_debug > 3)
+ {
+ handled = 1;
+ if (nr_serviced >= MAX_SERVICE)
+ {
+ /* 0xFF is valid for a card removal */
+ if(interrupts!=0xFF)
+ netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
+ interrupts);
+ outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
+ } else {
+ netdev_warn(dev, "unknown interrupt %#2x\n",
+ interrupts);
+ outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
+ }
+ }
+
+ /* Turn 8390 interrupts back on. */
+ ei_local->irqlock = 0;
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+ return IRQ_RETVAL(handled);
+}
+
+/**
+ * ei_tx_err - handle transmitter error
+ * @dev: network device which threw the exception
+ *
+ * A transmitter error has happened. Most likely excess collisions (which
+ * is a fairly normal condition). If the error is one where the Tx will
+ * have been aborted, we try and send another one right away, instead of
+ * letting the failed packet sit and collect dust in the Tx buffer. This
+ * is a much better solution as it avoids kernel based Tx timeouts, and
+ * an unnecessary card reset.
+ *
+ * Called with lock held.
+ */
+
+static void ei_tx_err(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ unsigned char txsr = inb_p(e8390_base+EN0_TSR);
+ unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
+
+#ifdef VERBOSE_ERROR_DUMP
+ netdev_printk(KERN_DEBUG, dev,
+ "transmitter error (%#2x):", txsr);
+ if (txsr & ENTSR_ABT)
+ pr_cont(" excess-collisions");
+ if (txsr & ENTSR_ND)
+ pr_cont(" non-deferral");
+ if (txsr & ENTSR_CRS)
+ pr_cont(" lost-carrier");
+ if (txsr & ENTSR_FU)
+ pr_cont(" FIFO-underrun");
+ if (txsr & ENTSR_CDH)
+ pr_cont(" lost-heartbeat");
+ pr_cont("\n");
+#endif
+
+ if (tx_was_aborted)
+ ei_tx_intr(dev);
+ else
+ {
+ dev->stats.tx_errors++;
+ if (txsr & ENTSR_CRS) dev->stats.tx_carrier_errors++;
+ if (txsr & ENTSR_CDH) dev->stats.tx_heartbeat_errors++;
+ if (txsr & ENTSR_OWC) dev->stats.tx_window_errors++;
+ }
+}
+
+/**
+ * ei_tx_intr - transmit interrupt handler
+ * @dev: network device for which tx intr is handled
+ *
+ * We have finished a transmit: check for errors and then trigger the next
+ * packet to be sent. Called with lock held.
+ */
+
+static void ei_tx_intr(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = netdev_priv(dev);
+ int status = inb(e8390_base + EN0_TSR);
+
+ /*
+ * There are two Tx buffers, see which one finished, and trigger
+ * the send of another one if it exists.
+ */
+ ei_local->txqueue--;
+
+ if (ei_local->tx1 < 0)
+ {
+ if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
+ netdev_err(dev, "%s: bogus last_tx_buffer %d, tx1=%d\n",
+ ei_local->name, ei_local->lasttx,
+ ei_local->tx1);
+ ei_local->tx1 = 0;
+ if (ei_local->tx2 > 0)
+ {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
+ dev->trans_start = jiffies;
+ ei_local->tx2 = -1,
+ ei_local->lasttx = 2;
+ }
+ else ei_local->lasttx = 20, ei_local->txing = 0;
+ }
+ else if (ei_local->tx2 < 0)
+ {
+ if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
+ netdev_info(dev, "%s: bogus last_tx_buffer %d, tx2=%d\n",
+ ei_local->name, ei_local->lasttx,
+ ei_local->tx2);
+ ei_local->tx2 = 0;
+ if (ei_local->tx1 > 0)
+ {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
+ dev->trans_start = jiffies;
+ ei_local->tx1 = -1;
+ ei_local->lasttx = 1;
+ }
+ else
+ ei_local->lasttx = 10, ei_local->txing = 0;
+ }
+// else
+// netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
+// ei_local->lasttx);
+
+ /* Minimize Tx latency: update the statistics after we restart TXing. */
+ if (status & ENTSR_COL)
+ dev->stats.collisions++;
+ if (status & ENTSR_PTX)
+ dev->stats.tx_packets++;
+ else
+ {
+ dev->stats.tx_errors++;
+ if (status & ENTSR_ABT)
+ {
+ dev->stats.tx_aborted_errors++;
+ dev->stats.collisions += 16;
+ }
+ if (status & ENTSR_CRS)
+ dev->stats.tx_carrier_errors++;
+ if (status & ENTSR_FU)
+ dev->stats.tx_fifo_errors++;
+ if (status & ENTSR_CDH)
+ dev->stats.tx_heartbeat_errors++;
+ if (status & ENTSR_OWC)
+ dev->stats.tx_window_errors++;
+ }
+ netif_wake_queue(dev);
+}
+
+/**
+ * ei_receive - receive some packets
+ * @dev: network device with which receive will be run
+ *
+ * We have a good packet(s), get it/them out of the buffers.
+ * Called with lock held.
+ */
+
+static void ei_receive(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = netdev_priv(dev);
+ unsigned char rxing_page, this_frame, next_frame;
+ unsigned short current_offset;
+ int rx_pkt_count = 0;
+ struct e8390_pkt_hdr rx_frame;
+
+ while (++rx_pkt_count < 10)
+ {
+ int pkt_len, pkt_stat;
+
+ /* Get the rx page (incoming packet pointer). */
+ rxing_page = inb_p(e8390_base + EN1_CURPAG -1);
+
+ /* Remove one frame from the ring. Boundary is always a page behind. */
+ this_frame = inb_p(e8390_base + EN0_BOUNDARY) + 1;
+ if (this_frame >= ei_local->stop_page)
+ this_frame = ei_local->rx_start_page;
+
+ /* Someday we'll omit the previous, iff we never get this message.
+ (There is at least one clone claimed to have a problem.)
+
+ Keep quiet if it looks like a card removal. One problem here
+ is that some clones crash in roughly the same way.
+ */
+ if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
+ netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
+ this_frame, ei_local->current_page);
+
+ if (this_frame == rxing_page) /* Read all the frames? */
+ break; /* Done for now */
+
+ current_offset = this_frame << 8;
+ ei_get_8390_hdr(dev, &rx_frame, this_frame);
+
+ pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
+ pkt_stat = rx_frame.status;
+
+ next_frame = this_frame + 1 + ((pkt_len+4)>>8);
+
+ if (pkt_len < 60 || pkt_len > 1518)
+ {
+ if (ei_debug)
+ netdev_printk(KERN_DEBUG, dev,
+ "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
+ rx_frame.count, rx_frame.status,
+ rx_frame.next);
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
+ }
+ else if ((pkt_stat & 0x0F) == ENRSR_RXOK)
+ {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL)
+ {
+ if (ei_debug > 1)
+ netdev_printk(KERN_DEBUG, dev,
+ "Couldn't allocate a sk_buff of size %d\n",
+ pkt_len);
+ dev->stats.rx_dropped++;
+ break;
+ }
+ else
+ {
+ skb_reserve(skb,2); /* IP headers on 16 byte boundaries */
+ skb_put(skb, pkt_len); /* Make room */
+ ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ if (pkt_stat & ENRSR_PHY)
+ dev->stats.multicast++;
+ }
+ }
+ else
+ {
+ if (ei_debug)
+ netdev_printk(KERN_DEBUG, dev,
+ "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+ rx_frame.status, rx_frame.next,
+ rx_frame.count);
+ dev->stats.rx_errors++;
+ /* NB: The NIC counts CRC, frame and missed errors. */
+ if (pkt_stat & ENRSR_FO)
+ dev->stats.rx_fifo_errors++;
+ }
+ next_frame = rx_frame.next;
+
+ /* This _should_ never happen: it's here for avoiding bad clones. */
+ if (next_frame >= ei_local->stop_page) {
+ netdev_info(dev, "next frame inconsistency, %#2x\n",
+ next_frame);
+ next_frame = ei_local->rx_start_page;
+ }
+ ei_local->current_page = next_frame;
+ outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
+ }
+}
+
+/**
+ * ei_rx_overrun - handle receiver overrun
+ * @dev: network device which threw exception
+ *
+ * We have a receiver overrun: we have to kick the 8390 to get it started
+ * again. Problem is that you have to kick it exactly as NS prescribes in
+ * the updated datasheets, or "the NIC may act in an unpredictable manner."
+ * This includes causing "the NIC to defer indefinitely when it is stopped
+ * on a busy network." Ugh.
+ * Called with lock held. Don't call this with the interrupts off or your
+ * computer will hate you - it takes 10ms or so.
+ */
+
+static void ei_rx_overrun(struct net_device *dev)
+{
+ axnet_dev_t *info = PRIV(dev);
+ long e8390_base = dev->base_addr;
+ unsigned char was_txing, must_resend = 0;
+
+ /*
+ * Record whether a Tx was in progress and then issue the
+ * stop command.
+ */
+ was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
+
+ if (ei_debug > 1)
+ netdev_printk(KERN_DEBUG, dev, "Receiver overrun\n");
+ dev->stats.rx_over_errors++;
+
+ /*
+ * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
+ * We wait at least 2ms.
+ */
+
+ mdelay(2);
+
+ /*
+ * Reset RBCR[01] back to zero as per magic incantation.
+ */
+ outb_p(0x00, e8390_base+EN0_RCNTLO);
+ outb_p(0x00, e8390_base+EN0_RCNTHI);
+
+ /*
+ * See if any Tx was interrupted or not. According to NS, this
+ * step is vital, and skipping it will cause no end of havoc.
+ */
+
+ if (was_txing)
+ {
+ unsigned char tx_completed = inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
+ if (!tx_completed)
+ must_resend = 1;
+ }
+
+ /*
+ * Have to enter loopback mode and then restart the NIC before
+ * you are allowed to slurp packets up off the ring.
+ */
+ outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
+ outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
+
+ /*
+ * Clear the Rx ring of all the debris, and ack the interrupt.
+ */
+ ei_receive(dev);
+
+ /*
+ * Leave loopback mode, and resend any packet that got stopped.
+ */
+ outb_p(E8390_TXCONFIG | info->duplex_flag, e8390_base + EN0_TXCR);
+ if (must_resend)
+ outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
+}
+
+/*
+ * Collect the stats. This is called unlocked and from several contexts.
+ */
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct ei_device *ei_local = netdev_priv(dev);
+ unsigned long flags;
+
+ /* If the card is stopped, just return the present stats. */
+ if (!netif_running(dev))
+ return &dev->stats;
+
+ spin_lock_irqsave(&ei_local->page_lock,flags);
+ /* Read the counter registers, assuming we are in page 0. */
+ dev->stats.rx_frame_errors += inb_p(ioaddr + EN0_COUNTER0);
+ dev->stats.rx_crc_errors += inb_p(ioaddr + EN0_COUNTER1);
+ dev->stats.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+ return &dev->stats;
+}
+
+/*
+ * Form the 64 bit 8390 multicast table from the linked list of addresses
+ * associated with this dev structure.
+ */
+
+static inline void make_mc_bits(u8 *bits, struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ u32 crc;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc(ETH_ALEN, ha->addr);
+ /*
+ * The 8390 uses the 6 most significant bits of the
+ * CRC to index the multicast table.
+ */
+ bits[crc>>29] |= (1<<((crc>>26)&7));
+ }
+}
+
+/**
+ * do_set_multicast_list - set/clear multicast filter
+ * @dev: net device for which multicast filter is adjusted
+ *
+ * Set or clear the multicast filter for this adaptor.
+ * Must be called with lock held.
+ */
+
+static void do_set_multicast_list(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ int i;
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
+ memset(ei_local->mcfilter, 0, 8);
+ if (!netdev_mc_empty(dev))
+ make_mc_bits(ei_local->mcfilter, dev);
+ } else {
+ /* set to accept-all */
+ memset(ei_local->mcfilter, 0xFF, 8);
+ }
+
+ outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
+ for(i = 0; i < 8; i++)
+ {
+ outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
+ }
+ outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
+
+ if(dev->flags&IFF_PROMISC)
+ outb_p(E8390_RXCONFIG | 0x58, e8390_base + EN0_RXCR);
+ else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
+ outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR);
+ else
+ outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR);
+
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
+}
+
+/*
+ * Called without lock held. This is invoked from user context and may
+ * be parallel to just about everything else. Its also fairly quick and
+ * not called too often. Must protect against both bh and irq users
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_lock(dev), flags);
+ do_set_multicast_list(dev);
+ spin_unlock_irqrestore(&dev_lock(dev), flags);
+}
+
+/* This page of functions should be 8390 generic */
+/* Follow National Semi's recommendations for initializing the "NIC". */
+
+/**
+ * AX88190_init - initialize 8390 hardware
+ * @dev: network device to initialize
+ * @startp: boolean. non-zero value to initiate chip processing
+ *
+ * Must be called with lock held.
+ */
+
+static void AX88190_init(struct net_device *dev, int startp)
+{
+ axnet_dev_t *info = PRIV(dev);
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = netdev_priv(dev);
+ int i;
+ int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48;
+
+ if(sizeof(struct e8390_pkt_hdr)!=4)
+ panic("8390.c: header struct mispacked\n");
+ /* Follow National Semi's recommendations for initing the DP83902. */
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
+ outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
+ /* Clear the remote byte count registers. */
+ outb_p(0x00, e8390_base + EN0_RCNTLO);
+ outb_p(0x00, e8390_base + EN0_RCNTHI);
+ /* Set to monitor and loopback mode -- this is vital!. */
+ outb_p(E8390_RXOFF|0x40, e8390_base + EN0_RXCR); /* 0x60 */
+ outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
+ /* Set the transmit page and receive ring. */
+ outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
+ ei_local->tx1 = ei_local->tx2 = 0;
+ outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
+ outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/
+ ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */
+ outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
+ /* Clear the pending interrupts and mask. */
+ outb_p(0xFF, e8390_base + EN0_ISR);
+ outb_p(0x00, e8390_base + EN0_IMR);
+
+ /* Copy the station address into the DS8390 registers. */
+
+ outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
+ for(i = 0; i < 6; i++)
+ {
+ outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
+ if(inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i])
+ netdev_err(dev, "Hw. address read/write mismap %d\n", i);
+ }
+
+ outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
+
+ netif_start_queue(dev);
+ ei_local->tx1 = ei_local->tx2 = 0;
+ ei_local->txing = 0;
+
+ if (info->flags & IS_AX88790) /* select Internal PHY */
+ outb(0x10, e8390_base + AXNET_GPIO);
+
+ if (startp)
+ {
+ outb_p(0xff, e8390_base + EN0_ISR);
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
+ outb_p(E8390_TXCONFIG | info->duplex_flag,
+ e8390_base + EN0_TXCR); /* xmit on. */
+ /* 3c503 TechMan says rxconfig only after the NIC is started. */
+ outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR); /* rx on, */
+ do_set_multicast_list(dev); /* (re)load the mcast table */
+ }
+}
+
+/* Trigger a transmit start, assuming the length is valid.
+ Always called with the page lock held */
+
+static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
+ int start_page)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
+
+ if (inb_p(e8390_base) & E8390_TRANS)
+ {
+ netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
+ return;
+ }
+ outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
+ outb_p(length >> 8, e8390_base + EN0_TCNTHI);
+ outb_p(start_page, e8390_base + EN0_TPSR);
+ outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
+}
diff --git a/drivers/net/ethernet/8390/e2100.c b/drivers/net/ethernet/8390/e2100.c
new file mode 100644
index 000000000000..d16dc53c1813
--- /dev/null
+++ b/drivers/net/ethernet/8390/e2100.c
@@ -0,0 +1,490 @@
+/* e2100.c: A Cabletron E2100 series ethernet driver for linux. */
+/*
+ Written 1993-1994 by Donald Becker.
+
+ Copyright 1994 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+
+ This is a driver for the Cabletron E2100 series ethercards.
+
+ The Author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ The E2100 series ethercard is a fairly generic shared memory 8390
+ implementation. The only unusual aspect is the way the shared memory
+ registers are set: first you do an inb() in what is normally the
+ station address region, and the low three bits of next outb() *address*
+ is used as the write value for that register. Either someone wasn't
+ too used to dem bit en bites, or they were trying to obfuscate the
+ programming interface.
+
+ There is an additional complication when setting the window on the packet
+ buffer. You must first do a read into the packet buffer region with the
+ low 8 address bits the address setting the page for the start of the packet
+ buffer window, and then do the above operation. See mem_on() for details.
+
+ One bug on the chip is that even a hard reset won't disable the memory
+ window, usually resulting in a hung machine if mem_off() isn't called.
+ If this happens, you must power down the machine for about 30 seconds.
+*/
+
+static const char version[] =
+ "e2100.c:v1.01 7/21/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "8390.h"
+
+#define DRV_NAME "e2100"
+
+static int e21_probe_list[] = {0x300, 0x280, 0x380, 0x220, 0};
+
+/* Offsets from the base_addr.
+ Read from the ASIC register, and the low three bits of the next outb()
+ address is used to set the corresponding register. */
+#define E21_NIC_OFFSET 0 /* Offset to the 8390 NIC. */
+#define E21_ASIC 0x10
+#define E21_MEM_ENABLE 0x10
+#define E21_MEM_ON 0x05 /* Enable memory in 16 bit mode. */
+#define E21_MEM_ON_8 0x07 /* Enable memory in 8 bit mode. */
+#define E21_MEM_BASE 0x11
+#define E21_IRQ_LOW 0x12 /* The low three bits of the IRQ number. */
+#define E21_IRQ_HIGH 0x14 /* The high IRQ bit and media select ... */
+#define E21_MEDIA 0x14 /* (alias). */
+#define E21_ALT_IFPORT 0x02 /* Set to use the other (BNC,AUI) port. */
+#define E21_BIG_MEM 0x04 /* Use a bigger (64K) buffer (we don't) */
+#define E21_SAPROM 0x10 /* Offset to station address data. */
+#define E21_IO_EXTENT 0x20
+
+static inline void mem_on(short port, volatile char __iomem *mem_base,
+ unsigned char start_page )
+{
+ /* This is a little weird: set the shared memory window by doing a
+ read. The low address bits specify the starting page. */
+ readb(mem_base+start_page);
+ inb(port + E21_MEM_ENABLE);
+ outb(E21_MEM_ON, port + E21_MEM_ENABLE + E21_MEM_ON);
+}
+
+static inline void mem_off(short port)
+{
+ inb(port + E21_MEM_ENABLE);
+ outb(0x00, port + E21_MEM_ENABLE);
+}
+
+/* In other drivers I put the TX pages first, but the E2100 window circuitry
+ is designed to have a 4K Tx region last. The windowing circuitry wraps the
+ window at 0x2fff->0x0000 so that the packets at e.g. 0x2f00 in the RX ring
+ appear contiguously in the window. */
+#define E21_RX_START_PG 0x00 /* First page of RX buffer */
+#define E21_RX_STOP_PG 0x30 /* Last page +1 of RX ring */
+#define E21_BIG_RX_STOP_PG 0xF0 /* Last page +1 of RX ring */
+#define E21_TX_START_PG E21_RX_STOP_PG /* First page of TX buffer */
+
+static int e21_probe1(struct net_device *dev, int ioaddr);
+
+static int e21_open(struct net_device *dev);
+static void e21_reset_8390(struct net_device *dev);
+static void e21_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void e21_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page);
+static void e21_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static int e21_open(struct net_device *dev);
+static int e21_close(struct net_device *dev);
+
+
+/* Probe for the E2100 series ethercards. These cards have an 8390 at the
+ base address and the station address at both offset 0x10 and 0x18. I read
+ the station address from offset 0x18 to avoid the dataport of NE2000
+ ethercards, and look for Ctron's unique ID (first three octets of the
+ station address).
+ */
+
+static int __init do_e2100_probe(struct net_device *dev)
+{
+ int *port;
+ int base_addr = dev->base_addr;
+ int irq = dev->irq;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return e21_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ for (port = e21_probe_list; *port; port++) {
+ dev->irq = irq;
+ if (e21_probe1(dev, *port) == 0)
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+#ifndef MODULE
+struct net_device * __init e2100_probe(int unit)
+{
+ struct net_device *dev = alloc_ei_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_e2100_probe(dev);
+ if (err)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static const struct net_device_ops e21_netdev_ops = {
+ .ndo_open = e21_open,
+ .ndo_stop = e21_close,
+
+ .ndo_start_xmit = ei_start_xmit,
+ .ndo_tx_timeout = ei_tx_timeout,
+ .ndo_get_stats = ei_get_stats,
+ .ndo_set_rx_mode = ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ei_poll,
+#endif
+};
+
+static int __init e21_probe1(struct net_device *dev, int ioaddr)
+{
+ int i, status, retval;
+ unsigned char *station_addr = dev->dev_addr;
+ static unsigned version_printed;
+
+ if (!request_region(ioaddr, E21_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ /* First check the station address for the Ctron prefix. */
+ if (inb(ioaddr + E21_SAPROM + 0) != 0x00 ||
+ inb(ioaddr + E21_SAPROM + 1) != 0x00 ||
+ inb(ioaddr + E21_SAPROM + 2) != 0x1d) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ /* Verify by making certain that there is a 8390 at there. */
+ outb(E8390_NODMA + E8390_STOP, ioaddr);
+ udelay(1); /* we want to delay one I/O cycle - which is 2MHz */
+ status = inb(ioaddr);
+ if (status != 0x21 && status != 0x23) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ /* Read the station address PROM. */
+ for (i = 0; i < 6; i++)
+ station_addr[i] = inb(ioaddr + E21_SAPROM + i);
+
+ inb(ioaddr + E21_MEDIA); /* Point to media selection. */
+ outb(0, ioaddr + E21_ASIC); /* and disable the secondary interface. */
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ for (i = 0; i < 6; i++)
+ printk(" %02X", station_addr[i]);
+
+ if (dev->irq < 2) {
+ static const int irqlist[] = {15, 11, 10, 12, 5, 9, 3, 4};
+ for (i = 0; i < ARRAY_SIZE(irqlist); i++)
+ if (request_irq (irqlist[i], NULL, 0, "bogus", NULL) != -EBUSY) {
+ dev->irq = irqlist[i];
+ break;
+ }
+ if (i >= ARRAY_SIZE(irqlist)) {
+ printk(" unable to get IRQ %d.\n", dev->irq);
+ retval = -EAGAIN;
+ goto out;
+ }
+ } else if (dev->irq == 2) /* Fixup luser bogosity: IRQ2 is really IRQ9 */
+ dev->irq = 9;
+
+ /* The 8390 is at the base address. */
+ dev->base_addr = ioaddr;
+
+ ei_status.name = "E2100";
+ ei_status.word16 = 1;
+ ei_status.tx_start_page = E21_TX_START_PG;
+ ei_status.rx_start_page = E21_RX_START_PG;
+ ei_status.stop_page = E21_RX_STOP_PG;
+ ei_status.saved_irq = dev->irq;
+
+ /* Check the media port used. The port can be passed in on the
+ low mem_end bits. */
+ if (dev->mem_end & 15)
+ dev->if_port = dev->mem_end & 7;
+ else {
+ dev->if_port = 0;
+ inb(ioaddr + E21_MEDIA); /* Turn automatic media detection on. */
+ for(i = 0; i < 6; i++)
+ if (station_addr[i] != inb(ioaddr + E21_SAPROM + 8 + i)) {
+ dev->if_port = 1;
+ break;
+ }
+ }
+
+ /* Never map in the E21 shared memory unless you are actively using it.
+ Also, the shared memory has effective only one setting -- spread all
+ over the 128K region! */
+ if (dev->mem_start == 0)
+ dev->mem_start = 0xd0000;
+
+ ei_status.mem = ioremap(dev->mem_start, 2*1024);
+ if (!ei_status.mem) {
+ printk("unable to remap memory\n");
+ retval = -EAGAIN;
+ goto out;
+ }
+
+#ifdef notdef
+ /* These values are unused. The E2100 has a 2K window into the packet
+ buffer. The window can be set to start on any page boundary. */
+ ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
+ dev->mem_end = ei_status.rmem_end = dev->mem_start + 2*1024;
+#endif
+
+ printk(", IRQ %d, %s media, memory @ %#lx.\n", dev->irq,
+ dev->if_port ? "secondary" : "primary", dev->mem_start);
+
+ ei_status.reset_8390 = &e21_reset_8390;
+ ei_status.block_input = &e21_block_input;
+ ei_status.block_output = &e21_block_output;
+ ei_status.get_8390_hdr = &e21_get_8390_hdr;
+
+ dev->netdev_ops = &e21_netdev_ops;
+ NS8390_init(dev, 0);
+
+ retval = register_netdev(dev);
+ if (retval)
+ goto out;
+ return 0;
+out:
+ release_region(ioaddr, E21_IO_EXTENT);
+ return retval;
+}
+
+static int
+e21_open(struct net_device *dev)
+{
+ short ioaddr = dev->base_addr;
+ int retval;
+
+ if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev)))
+ return retval;
+
+ /* Set the interrupt line and memory base on the hardware. */
+ inb(ioaddr + E21_IRQ_LOW);
+ outb(0, ioaddr + E21_ASIC + (dev->irq & 7));
+ inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */
+ outb(0, ioaddr + E21_ASIC + (dev->irq > 7 ? 1:0)
+ + (dev->if_port ? E21_ALT_IFPORT : 0));
+ inb(ioaddr + E21_MEM_BASE);
+ outb(0, ioaddr + E21_ASIC + ((dev->mem_start >> 17) & 7));
+
+ ei_open(dev);
+ return 0;
+}
+
+static void
+e21_reset_8390(struct net_device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ outb(0x01, ioaddr);
+ if (ei_debug > 1) printk("resetting the E2180x3 t=%ld...", jiffies);
+ ei_status.txing = 0;
+
+ /* Set up the ASIC registers, just in case something changed them. */
+
+ if (ei_debug > 1) printk("reset done\n");
+}
+
+/* Grab the 8390 specific header. We put the 2k window so the header page
+ appears at the start of the shared memory. */
+
+static void
+e21_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+
+ short ioaddr = dev->base_addr;
+ char __iomem *shared_mem = ei_status.mem;
+
+ mem_on(ioaddr, shared_mem, ring_page);
+
+#ifdef notdef
+ /* Officially this is what we are doing, but the readl() is faster */
+ memcpy_fromio(hdr, shared_mem, sizeof(struct e8390_pkt_hdr));
+#else
+ ((unsigned int*)hdr)[0] = readl(shared_mem);
+#endif
+
+ /* Turn off memory access: we would need to reprogram the window anyway. */
+ mem_off(ioaddr);
+
+}
+
+/* Block input and output are easy on shared memory ethercards.
+ The E21xx makes block_input() especially easy by wrapping the top
+ ring buffer to the bottom automatically. */
+static void
+e21_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ short ioaddr = dev->base_addr;
+ char __iomem *shared_mem = ei_status.mem;
+
+ mem_on(ioaddr, shared_mem, (ring_offset>>8));
+
+ memcpy_fromio(skb->data, ei_status.mem + (ring_offset & 0xff), count);
+
+ mem_off(ioaddr);
+}
+
+static void
+e21_block_output(struct net_device *dev, int count, const unsigned char *buf,
+ int start_page)
+{
+ short ioaddr = dev->base_addr;
+ volatile char __iomem *shared_mem = ei_status.mem;
+
+ /* Set the shared memory window start by doing a read, with the low address
+ bits specifying the starting page. */
+ readb(shared_mem + start_page);
+ mem_on(ioaddr, shared_mem, start_page);
+
+ memcpy_toio(shared_mem, buf, count);
+ mem_off(ioaddr);
+}
+
+static int
+e21_close(struct net_device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ free_irq(dev->irq, dev);
+ dev->irq = ei_status.saved_irq;
+
+ /* Shut off the interrupt line and secondary interface. */
+ inb(ioaddr + E21_IRQ_LOW);
+ outb(0, ioaddr + E21_ASIC);
+ inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */
+ outb(0, ioaddr + E21_ASIC);
+
+ ei_close(dev);
+
+ /* Double-check that the memory has been turned off, because really
+ really bad things happen if it isn't. */
+ mem_off(ioaddr);
+
+ return 0;
+}
+
+
+#ifdef MODULE
+#define MAX_E21_CARDS 4 /* Max number of E21 cards per module */
+static struct net_device *dev_e21[MAX_E21_CARDS];
+static int io[MAX_E21_CARDS];
+static int irq[MAX_E21_CARDS];
+static int mem[MAX_E21_CARDS];
+static int xcvr[MAX_E21_CARDS]; /* choose int. or ext. xcvr */
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(mem, int, NULL, 0);
+module_param_array(xcvr, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s)");
+MODULE_PARM_DESC(mem, " memory base address(es)");
+MODULE_PARM_DESC(xcvr, "transceiver(s) (0=internal, 1=external)");
+MODULE_DESCRIPTION("Cabletron E2100 ISA ethernet driver");
+MODULE_LICENSE("GPL");
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+
+int __init init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) {
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "e2100.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ dev = alloc_ei_netdev();
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_start = mem[this_dev];
+ dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */
+ if (do_e2100_probe(dev) == 0) {
+ dev_e21[found++] = dev;
+ continue;
+ }
+ free_netdev(dev);
+ printk(KERN_WARNING "e2100.c: No E2100 card found (i/o = 0x%x).\n", io[this_dev]);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ /* NB: e21_close() handles free_irq */
+ iounmap(ei_status.mem);
+ release_region(dev->base_addr, E21_IO_EXTENT);
+}
+
+void __exit
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) {
+ struct net_device *dev = dev_e21[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/es3210.c b/drivers/net/ethernet/8390/es3210.c
new file mode 100644
index 000000000000..7a09575ecff0
--- /dev/null
+++ b/drivers/net/ethernet/8390/es3210.c
@@ -0,0 +1,446 @@
+/*
+ es3210.c
+
+ Linux driver for Racal-Interlan ES3210 EISA Network Adapter
+
+ Copyright (C) 1996, Paul Gortmaker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Information and Code Sources:
+
+ 1) The existing myriad of Linux 8390 drivers written by Donald Becker.
+
+ 2) Once again Russ Nelson's asm packet driver provided additional info.
+
+ 3) Info for getting IRQ and sh-mem gleaned from the EISA cfg files.
+ Too bad it doesn't work -- see below.
+
+ The ES3210 is an EISA shared memory NS8390 implementation. Note
+ that all memory copies to/from the board must be 32bit transfers.
+ Which rules out using eth_io_copy_and_sum() in this driver.
+
+ Apparently there are two slightly different revisions of the
+ card, since there are two distinct EISA cfg files (!rii0101.cfg
+ and !rii0102.cfg) One has media select in the cfg file and the
+ other doesn't. Hopefully this will work with either.
+
+ That is about all I can tell you about it, having never actually
+ even seen one of these cards. :) Try http://www.interlan.com
+ if you want more info.
+
+ Thanks go to Mark Salazar for testing v0.02 of this driver.
+
+ Bugs, to-fix, etc:
+
+ 1) The EISA cfg ports that are *supposed* to have the IRQ and shared
+ mem values just read 0xff all the time. Hrrmpf. Apparently the
+ same happens with the packet driver as the code for reading
+ these registers is disabled there. In the meantime, boot with:
+ ether=<IRQ>,0,0x<shared_mem_addr>,eth0 to override the IRQ and
+ shared memory detection. (The i/o port detection is okay.)
+
+ 2) Module support currently untested. Probably works though.
+
+*/
+
+static const char version[] =
+ "es3210.c: Driver revision v0.03, 14/09/96\n";
+
+#include <linux/module.h>
+#include <linux/eisa.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "8390.h"
+
+static int es_probe1(struct net_device *dev, int ioaddr);
+
+static void es_reset_8390(struct net_device *dev);
+
+static void es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page);
+static void es_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset);
+static void es_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page);
+
+#define ES_START_PG 0x00 /* First page of TX buffer */
+#define ES_STOP_PG 0x40 /* Last page +1 of RX ring */
+
+#define ES_IO_EXTENT 0x37 /* The cfg file says 0xc90 -> 0xcc7 */
+#define ES_ID_PORT 0xc80 /* Same for all EISA cards */
+#define ES_SA_PROM 0xc90 /* Start of e'net addr. */
+#define ES_RESET_PORT 0xc84 /* From the packet driver source */
+#define ES_NIC_OFFSET 0xca0 /* Hello, the 8390 is *here* */
+
+#define ES_ADDR0 0x02 /* 3 byte vendor prefix */
+#define ES_ADDR1 0x07
+#define ES_ADDR2 0x01
+
+/*
+ * Two card revisions. EISA ID's are always rev. minor, rev. major,, and
+ * then the three vendor letters stored in 5 bits each, with an "a" = 1.
+ * For eg: "rii" = 10010 01001 01001 = 0x4929, which is how the EISA
+ * config utility determines automagically what config file(s) to use.
+ */
+#define ES_EISA_ID1 0x01012949 /* !rii0101.cfg */
+#define ES_EISA_ID2 0x02012949 /* !rii0102.cfg */
+
+#define ES_CFG1 0xcc0 /* IOPORT(1) --> IOPORT(6) in cfg file */
+#define ES_CFG2 0xcc1
+#define ES_CFG3 0xcc2
+#define ES_CFG4 0xcc3
+#define ES_CFG5 0xcc4
+#define ES_CFG6 0xc84 /* NB: 0xc84 is also "reset" port. */
+
+/*
+ * You can OR any of the following bits together and assign it
+ * to ES_DEBUG to get verbose driver info during operation.
+ * Some of these don't do anything yet.
+ */
+
+#define ES_D_PROBE 0x01
+#define ES_D_RX_PKT 0x02
+#define ES_D_TX_PKT 0x04
+#define ED_D_IRQ 0x08
+
+#define ES_DEBUG 0
+
+static unsigned char lo_irq_map[] __initdata = {3, 4, 5, 6, 7, 9, 10};
+static unsigned char hi_irq_map[] __initdata = {11, 12, 0, 14, 0, 0, 0, 15};
+
+/*
+ * Probe for the card. The best way is to read the EISA ID if it
+ * is known. Then we check the prefix of the station address
+ * PROM for a match against the Racal-Interlan assigned value.
+ */
+
+static int __init do_es_probe(struct net_device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+ int irq = dev->irq;
+ int mem_start = dev->mem_start;
+
+ if (ioaddr > 0x1ff) /* Check a single specified location. */
+ return es_probe1(dev, ioaddr);
+ else if (ioaddr > 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ if (!EISA_bus) {
+#if ES_DEBUG & ES_D_PROBE
+ printk("es3210.c: Not EISA bus. Not probing high ports.\n");
+#endif
+ return -ENXIO;
+ }
+
+ /* EISA spec allows for up to 16 slots, but 8 is typical. */
+ for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
+ if (es_probe1(dev, ioaddr) == 0)
+ return 0;
+ dev->irq = irq;
+ dev->mem_start = mem_start;
+ }
+
+ return -ENODEV;
+}
+
+#ifndef MODULE
+struct net_device * __init es_probe(int unit)
+{
+ struct net_device *dev = alloc_ei_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_es_probe(dev);
+ if (err)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static int __init es_probe1(struct net_device *dev, int ioaddr)
+{
+ int i, retval;
+ unsigned long eisa_id;
+
+ if (!request_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT, "es3210"))
+ return -ENODEV;
+
+#if ES_DEBUG & ES_D_PROBE
+ printk("es3210.c: probe at %#x, ID %#8x\n", ioaddr, inl(ioaddr + ES_ID_PORT));
+ printk("es3210.c: config regs: %#x %#x %#x %#x %#x %#x\n",
+ inb(ioaddr + ES_CFG1), inb(ioaddr + ES_CFG2), inb(ioaddr + ES_CFG3),
+ inb(ioaddr + ES_CFG4), inb(ioaddr + ES_CFG5), inb(ioaddr + ES_CFG6));
+#endif
+
+/* Check the EISA ID of the card. */
+ eisa_id = inl(ioaddr + ES_ID_PORT);
+ if ((eisa_id != ES_EISA_ID1) && (eisa_id != ES_EISA_ID2)) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ for (i = 0; i < ETHER_ADDR_LEN ; i++)
+ dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i);
+
+/* Check the Racal vendor ID as well. */
+ if (dev->dev_addr[0] != ES_ADDR0 ||
+ dev->dev_addr[1] != ES_ADDR1 ||
+ dev->dev_addr[2] != ES_ADDR2) {
+ printk("es3210.c: card not found %pM (invalid_prefix).\n",
+ dev->dev_addr);
+ retval = -ENODEV;
+ goto out;
+ }
+
+ printk("es3210.c: ES3210 rev. %ld at %#x, node %pM",
+ eisa_id>>24, ioaddr, dev->dev_addr);
+
+ /* Snarf the interrupt now. */
+ if (dev->irq == 0) {
+ unsigned char hi_irq = inb(ioaddr + ES_CFG2) & 0x07;
+ unsigned char lo_irq = inb(ioaddr + ES_CFG1) & 0xfe;
+
+ if (hi_irq != 0) {
+ dev->irq = hi_irq_map[hi_irq - 1];
+ } else {
+ int i = 0;
+ while (lo_irq > (1<<i)) i++;
+ dev->irq = lo_irq_map[i];
+ }
+ printk(" using IRQ %d", dev->irq);
+#if ES_DEBUG & ES_D_PROBE
+ printk("es3210.c: hi_irq %#x, lo_irq %#x, dev->irq = %d\n",
+ hi_irq, lo_irq, dev->irq);
+#endif
+ } else {
+ if (dev->irq == 2)
+ dev->irq = 9; /* Doh! */
+ printk(" assigning IRQ %d", dev->irq);
+ }
+
+ if (request_irq(dev->irq, ei_interrupt, 0, "es3210", dev)) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ retval = -EAGAIN;
+ goto out;
+ }
+
+ if (dev->mem_start == 0) {
+ unsigned char mem_enabled = inb(ioaddr + ES_CFG2) & 0xc0;
+ unsigned char mem_bits = inb(ioaddr + ES_CFG3) & 0x07;
+
+ if (mem_enabled != 0x80) {
+ printk(" shared mem disabled - giving up\n");
+ retval = -ENXIO;
+ goto out1;
+ }
+ dev->mem_start = 0xC0000 + mem_bits*0x4000;
+ printk(" using ");
+ } else {
+ printk(" assigning ");
+ }
+
+ ei_status.mem = ioremap(dev->mem_start, (ES_STOP_PG - ES_START_PG)*256);
+ if (!ei_status.mem) {
+ printk("ioremap failed - giving up\n");
+ retval = -ENXIO;
+ goto out1;
+ }
+
+ dev->mem_end = dev->mem_start + (ES_STOP_PG - ES_START_PG)*256;
+
+ printk("mem %#lx-%#lx\n", dev->mem_start, dev->mem_end-1);
+
+#if ES_DEBUG & ES_D_PROBE
+ if (inb(ioaddr + ES_CFG5))
+ printk("es3210: Warning - DMA channel enabled, but not used here.\n");
+#endif
+ /* Note, point at the 8390, and not the card... */
+ dev->base_addr = ioaddr + ES_NIC_OFFSET;
+
+ ei_status.name = "ES3210";
+ ei_status.tx_start_page = ES_START_PG;
+ ei_status.rx_start_page = ES_START_PG + TX_PAGES;
+ ei_status.stop_page = ES_STOP_PG;
+ ei_status.word16 = 1;
+
+ if (ei_debug > 0)
+ printk(version);
+
+ ei_status.reset_8390 = &es_reset_8390;
+ ei_status.block_input = &es_block_input;
+ ei_status.block_output = &es_block_output;
+ ei_status.get_8390_hdr = &es_get_8390_hdr;
+
+ dev->netdev_ops = &ei_netdev_ops;
+ NS8390_init(dev, 0);
+
+ retval = register_netdev(dev);
+ if (retval)
+ goto out1;
+ return 0;
+out1:
+ free_irq(dev->irq, dev);
+out:
+ release_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT);
+ return retval;
+}
+
+/*
+ * Reset as per the packet driver method. Judging by the EISA cfg
+ * file, this just toggles the "Board Enable" bits (bit 2 and 0).
+ */
+
+static void es_reset_8390(struct net_device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+ unsigned long end;
+
+ outb(0x04, ioaddr + ES_RESET_PORT);
+ if (ei_debug > 1) printk("%s: resetting the ES3210...", dev->name);
+
+ end = jiffies + 2*HZ/100;
+ while ((signed)(end - jiffies) > 0) continue;
+
+ ei_status.txing = 0;
+ outb(0x01, ioaddr + ES_RESET_PORT);
+ if (ei_debug > 1) printk("reset done\n");
+}
+
+/*
+ * Note: In the following three functions is the implicit assumption
+ * that the associated memcpy will only use "rep; movsl" as long as
+ * we keep the counts as some multiple of doublewords. This is a
+ * requirement of the hardware, and also prevents us from using
+ * eth_io_copy_and_sum() since we can't guarantee it will limit
+ * itself to doubleword access.
+ */
+
+/*
+ * Grab the 8390 specific header. Similar to the block_input routine, but
+ * we don't need to be concerned with ring wrap as the header will be at
+ * the start of a page, so we optimize accordingly. (A single doubleword.)
+ */
+
+static void
+es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ void __iomem *hdr_start = ei_status.mem + ((ring_page - ES_START_PG)<<8);
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+ hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */
+}
+
+/*
+ * Block input and output are easy on shared memory ethercards, the only
+ * complication is when the ring buffer wraps. The count will already
+ * be rounded up to a doubleword value via es_get_8390_hdr() above.
+ */
+
+static void es_block_input(struct net_device *dev, int count, struct sk_buff *skb,
+ int ring_offset)
+{
+ void __iomem *xfer_start = ei_status.mem + ring_offset - ES_START_PG*256;
+
+ if (ring_offset + count > ES_STOP_PG*256) {
+ /* Packet wraps over end of ring buffer. */
+ int semi_count = ES_STOP_PG*256 - ring_offset;
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, ei_status.mem, count);
+ } else {
+ /* Packet is in one chunk. */
+ memcpy_fromio(skb->data, xfer_start, count);
+ }
+}
+
+static void es_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ void __iomem *shmem = ei_status.mem + ((start_page - ES_START_PG)<<8);
+
+ count = (count + 3) & ~3; /* Round up to doubleword */
+ memcpy_toio(shmem, buf, count);
+}
+
+#ifdef MODULE
+#define MAX_ES_CARDS 4 /* Max number of ES3210 cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static struct net_device *dev_es3210[MAX_ES_CARDS];
+static int io[MAX_ES_CARDS];
+static int irq[MAX_ES_CARDS];
+static int mem[MAX_ES_CARDS];
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(mem, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s)");
+MODULE_PARM_DESC(mem, "memory base address(es)");
+MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver");
+MODULE_LICENSE("GPL");
+
+int __init init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_ES_CARDS; this_dev++) {
+ if (io[this_dev] == 0 && this_dev != 0)
+ break;
+ dev = alloc_ei_netdev();
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_start = mem[this_dev];
+ if (do_es_probe(dev) == 0) {
+ dev_es3210[found++] = dev;
+ continue;
+ }
+ free_netdev(dev);
+ printk(KERN_WARNING "es3210.c: No es3210 card found (i/o = 0x%x).\n", io[this_dev]);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, ES_IO_EXTENT);
+ iounmap(ei_status.mem);
+}
+
+void __exit
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_ES_CARDS; this_dev++) {
+ struct net_device *dev = dev_es3210[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
new file mode 100644
index 000000000000..48c4948750d1
--- /dev/null
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -0,0 +1,866 @@
+/*
+ * linux/drivers/acorn/net/etherh.c
+ *
+ * Copyright (C) 2000-2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * NS8390 I-cubed EtherH and ANT EtherM specific driver
+ * Thanks to I-Cubed for information on their cards.
+ * EtherM conversion (C) 1999 Chris Kemp and Tim Watterton
+ * EtherM integration (C) 2000 Aleph One Ltd (Tak-Shing Chan)
+ * EtherM integration re-engineered by Russell King.
+ *
+ * Changelog:
+ * 08-12-1996 RMK 1.00 Created
+ * RMK 1.03 Added support for EtherLan500 cards
+ * 23-11-1997 RMK 1.04 Added media autodetection
+ * 16-04-1998 RMK 1.05 Improved media autodetection
+ * 10-02-2000 RMK 1.06 Updated for 2.3.43
+ * 13-05-2000 RMK 1.07 Updated for 2.3.99-pre8
+ * 12-10-1999 CK/TEW EtherM driver first release
+ * 21-12-2000 TTC EtherH/EtherM integration
+ * 25-12-2000 RMK 1.08 Clean integration of EtherM into this driver.
+ * 03-01-2002 RMK 1.09 Always enable IRQs if we're in the nic slot.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
+
+#include <asm/system.h>
+#include <asm/ecard.h>
+#include <asm/io.h>
+
+#define EI_SHIFT(x) (ei_local->reg_offset[x])
+
+#define ei_inb(_p) readb((void __iomem *)_p)
+#define ei_outb(_v,_p) writeb(_v,(void __iomem *)_p)
+#define ei_inb_p(_p) readb((void __iomem *)_p)
+#define ei_outb_p(_v,_p) writeb(_v,(void __iomem *)_p)
+
+#define NET_DEBUG 0
+#define DEBUG_INIT 2
+
+#define DRV_NAME "etherh"
+#define DRV_VERSION "1.11"
+
+static char version[] __initdata =
+ "EtherH/EtherM Driver (c) 2002-2004 Russell King " DRV_VERSION "\n";
+
+#include "lib8390.c"
+
+static unsigned int net_debug = NET_DEBUG;
+
+struct etherh_priv {
+ void __iomem *ioc_fast;
+ void __iomem *memc;
+ void __iomem *dma_base;
+ unsigned int id;
+ void __iomem *ctrl_port;
+ unsigned char ctrl;
+ u32 supported;
+};
+
+struct etherh_data {
+ unsigned long ns8390_offset;
+ unsigned long dataport_offset;
+ unsigned long ctrlport_offset;
+ int ctrl_ioc;
+ const char name[16];
+ u32 supported;
+ unsigned char tx_start_page;
+ unsigned char stop_page;
+};
+
+MODULE_AUTHOR("Russell King");
+MODULE_DESCRIPTION("EtherH/EtherM driver");
+MODULE_LICENSE("GPL");
+
+#define ETHERH500_DATAPORT 0x800 /* MEMC */
+#define ETHERH500_NS8390 0x000 /* MEMC */
+#define ETHERH500_CTRLPORT 0x800 /* IOC */
+
+#define ETHERH600_DATAPORT 0x040 /* MEMC */
+#define ETHERH600_NS8390 0x800 /* MEMC */
+#define ETHERH600_CTRLPORT 0x200 /* MEMC */
+
+#define ETHERH_CP_IE 1
+#define ETHERH_CP_IF 2
+#define ETHERH_CP_HEARTBEAT 2
+
+#define ETHERH_TX_START_PAGE 1
+#define ETHERH_STOP_PAGE 127
+
+/*
+ * These came from CK/TEW
+ */
+#define ETHERM_DATAPORT 0x200 /* MEMC */
+#define ETHERM_NS8390 0x800 /* MEMC */
+#define ETHERM_CTRLPORT 0x23c /* MEMC */
+
+#define ETHERM_TX_START_PAGE 64
+#define ETHERM_STOP_PAGE 127
+
+/* ------------------------------------------------------------------------ */
+
+#define etherh_priv(dev) \
+ ((struct etherh_priv *)(((char *)netdev_priv(dev)) + sizeof(struct ei_device)))
+
+static inline void etherh_set_ctrl(struct etherh_priv *eh, unsigned char mask)
+{
+ unsigned char ctrl = eh->ctrl | mask;
+ eh->ctrl = ctrl;
+ writeb(ctrl, eh->ctrl_port);
+}
+
+static inline void etherh_clr_ctrl(struct etherh_priv *eh, unsigned char mask)
+{
+ unsigned char ctrl = eh->ctrl & ~mask;
+ eh->ctrl = ctrl;
+ writeb(ctrl, eh->ctrl_port);
+}
+
+static inline unsigned int etherh_get_stat(struct etherh_priv *eh)
+{
+ return readb(eh->ctrl_port);
+}
+
+
+
+
+static void etherh_irq_enable(ecard_t *ec, int irqnr)
+{
+ struct etherh_priv *eh = ec->irq_data;
+
+ etherh_set_ctrl(eh, ETHERH_CP_IE);
+}
+
+static void etherh_irq_disable(ecard_t *ec, int irqnr)
+{
+ struct etherh_priv *eh = ec->irq_data;
+
+ etherh_clr_ctrl(eh, ETHERH_CP_IE);
+}
+
+static expansioncard_ops_t etherh_ops = {
+ .irqenable = etherh_irq_enable,
+ .irqdisable = etherh_irq_disable,
+};
+
+
+
+
+static void
+etherh_setif(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ unsigned long flags;
+ void __iomem *addr;
+
+ local_irq_save(flags);
+
+ /* set the interface type */
+ switch (etherh_priv(dev)->id) {
+ case PROD_I3_ETHERLAN600:
+ case PROD_I3_ETHERLAN600A:
+ addr = (void __iomem *)dev->base_addr + EN0_RCNTHI;
+
+ switch (dev->if_port) {
+ case IF_PORT_10BASE2:
+ writeb((readb(addr) & 0xf8) | 1, addr);
+ break;
+ case IF_PORT_10BASET:
+ writeb((readb(addr) & 0xf8), addr);
+ break;
+ }
+ break;
+
+ case PROD_I3_ETHERLAN500:
+ switch (dev->if_port) {
+ case IF_PORT_10BASE2:
+ etherh_clr_ctrl(etherh_priv(dev), ETHERH_CP_IF);
+ break;
+
+ case IF_PORT_10BASET:
+ etherh_set_ctrl(etherh_priv(dev), ETHERH_CP_IF);
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ local_irq_restore(flags);
+}
+
+static int
+etherh_getifstat(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ void __iomem *addr;
+ int stat = 0;
+
+ switch (etherh_priv(dev)->id) {
+ case PROD_I3_ETHERLAN600:
+ case PROD_I3_ETHERLAN600A:
+ addr = (void __iomem *)dev->base_addr + EN0_RCNTHI;
+ switch (dev->if_port) {
+ case IF_PORT_10BASE2:
+ stat = 1;
+ break;
+ case IF_PORT_10BASET:
+ stat = readb(addr) & 4;
+ break;
+ }
+ break;
+
+ case PROD_I3_ETHERLAN500:
+ switch (dev->if_port) {
+ case IF_PORT_10BASE2:
+ stat = 1;
+ break;
+ case IF_PORT_10BASET:
+ stat = etherh_get_stat(etherh_priv(dev)) & ETHERH_CP_HEARTBEAT;
+ break;
+ }
+ break;
+
+ default:
+ stat = 0;
+ break;
+ }
+
+ return stat != 0;
+}
+
+/*
+ * Configure the interface. Note that we ignore the other
+ * parts of ifmap, since its mostly meaningless for this driver.
+ */
+static int etherh_set_config(struct net_device *dev, struct ifmap *map)
+{
+ switch (map->port) {
+ case IF_PORT_10BASE2:
+ case IF_PORT_10BASET:
+ /*
+ * If the user explicitly sets the interface
+ * media type, turn off automedia detection.
+ */
+ dev->flags &= ~IFF_AUTOMEDIA;
+ dev->if_port = map->port;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ etherh_setif(dev);
+
+ return 0;
+}
+
+/*
+ * Reset the 8390 (hard reset). Note that we can't actually do this.
+ */
+static void
+etherh_reset(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ void __iomem *addr = (void __iomem *)dev->base_addr;
+
+ writeb(E8390_NODMA+E8390_PAGE0+E8390_STOP, addr);
+
+ /*
+ * See if we need to change the interface type.
+ * Note that we use 'interface_num' as a flag
+ * to indicate that we need to change the media.
+ */
+ if (dev->flags & IFF_AUTOMEDIA && ei_local->interface_num) {
+ ei_local->interface_num = 0;
+
+ if (dev->if_port == IF_PORT_10BASET)
+ dev->if_port = IF_PORT_10BASE2;
+ else
+ dev->if_port = IF_PORT_10BASET;
+
+ etherh_setif(dev);
+ }
+}
+
+/*
+ * Write a block of data out to the 8390
+ */
+static void
+etherh_block_output (struct net_device *dev, int count, const unsigned char *buf, int start_page)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ unsigned long dma_start;
+ void __iomem *dma_base, *addr;
+
+ if (ei_local->dmaing) {
+ printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: "
+ " DMAstat %d irqlock %d\n", dev->name,
+ ei_local->dmaing, ei_local->irqlock);
+ return;
+ }
+
+ /*
+ * Make sure we have a round number of bytes if we're in word mode.
+ */
+ if (count & 1 && ei_local->word16)
+ count++;
+
+ ei_local->dmaing = 1;
+
+ addr = (void __iomem *)dev->base_addr;
+ dma_base = etherh_priv(dev)->dma_base;
+
+ count = (count + 1) & ~1;
+ writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD);
+
+ writeb (0x42, addr + EN0_RCNTLO);
+ writeb (0x00, addr + EN0_RCNTHI);
+ writeb (0x42, addr + EN0_RSARLO);
+ writeb (0x00, addr + EN0_RSARHI);
+ writeb (E8390_RREAD | E8390_START, addr + E8390_CMD);
+
+ udelay (1);
+
+ writeb (ENISR_RDC, addr + EN0_ISR);
+ writeb (count, addr + EN0_RCNTLO);
+ writeb (count >> 8, addr + EN0_RCNTHI);
+ writeb (0, addr + EN0_RSARLO);
+ writeb (start_page, addr + EN0_RSARHI);
+ writeb (E8390_RWRITE | E8390_START, addr + E8390_CMD);
+
+ if (ei_local->word16)
+ writesw (dma_base, buf, count >> 1);
+ else
+ writesb (dma_base, buf, count);
+
+ dma_start = jiffies;
+
+ while ((readb (addr + EN0_ISR) & ENISR_RDC) == 0)
+ if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
+ printk(KERN_ERR "%s: timeout waiting for TX RDC\n",
+ dev->name);
+ etherh_reset (dev);
+ __NS8390_init (dev, 1);
+ break;
+ }
+
+ writeb (ENISR_RDC, addr + EN0_ISR);
+ ei_local->dmaing = 0;
+}
+
+/*
+ * Read a block of data from the 8390
+ */
+static void
+etherh_block_input (struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ unsigned char *buf;
+ void __iomem *dma_base, *addr;
+
+ if (ei_local->dmaing) {
+ printk(KERN_ERR "%s: DMAing conflict in etherh_block_input: "
+ " DMAstat %d irqlock %d\n", dev->name,
+ ei_local->dmaing, ei_local->irqlock);
+ return;
+ }
+
+ ei_local->dmaing = 1;
+
+ addr = (void __iomem *)dev->base_addr;
+ dma_base = etherh_priv(dev)->dma_base;
+
+ buf = skb->data;
+ writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD);
+ writeb (count, addr + EN0_RCNTLO);
+ writeb (count >> 8, addr + EN0_RCNTHI);
+ writeb (ring_offset, addr + EN0_RSARLO);
+ writeb (ring_offset >> 8, addr + EN0_RSARHI);
+ writeb (E8390_RREAD | E8390_START, addr + E8390_CMD);
+
+ if (ei_local->word16) {
+ readsw (dma_base, buf, count >> 1);
+ if (count & 1)
+ buf[count - 1] = readb (dma_base);
+ } else
+ readsb (dma_base, buf, count);
+
+ writeb (ENISR_RDC, addr + EN0_ISR);
+ ei_local->dmaing = 0;
+}
+
+/*
+ * Read a header from the 8390
+ */
+static void
+etherh_get_header (struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ void __iomem *dma_base, *addr;
+
+ if (ei_local->dmaing) {
+ printk(KERN_ERR "%s: DMAing conflict in etherh_get_header: "
+ " DMAstat %d irqlock %d\n", dev->name,
+ ei_local->dmaing, ei_local->irqlock);
+ return;
+ }
+
+ ei_local->dmaing = 1;
+
+ addr = (void __iomem *)dev->base_addr;
+ dma_base = etherh_priv(dev)->dma_base;
+
+ writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD);
+ writeb (sizeof (*hdr), addr + EN0_RCNTLO);
+ writeb (0, addr + EN0_RCNTHI);
+ writeb (0, addr + EN0_RSARLO);
+ writeb (ring_page, addr + EN0_RSARHI);
+ writeb (E8390_RREAD | E8390_START, addr + E8390_CMD);
+
+ if (ei_local->word16)
+ readsw (dma_base, hdr, sizeof (*hdr) >> 1);
+ else
+ readsb (dma_base, hdr, sizeof (*hdr));
+
+ writeb (ENISR_RDC, addr + EN0_ISR);
+ ei_local->dmaing = 0;
+}
+
+/*
+ * Open/initialize the board. This is called (in the current kernel)
+ * sometime after booting when the 'ifconfig' program is run.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is non-reboot way to recover if something goes wrong.
+ */
+static int
+etherh_open(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ printk(KERN_WARNING "%s: invalid ethernet MAC address\n",
+ dev->name);
+ return -EINVAL;
+ }
+
+ if (request_irq(dev->irq, __ei_interrupt, 0, dev->name, dev))
+ return -EAGAIN;
+
+ /*
+ * Make sure that we aren't going to change the
+ * media type on the next reset - we are about to
+ * do automedia manually now.
+ */
+ ei_local->interface_num = 0;
+
+ /*
+ * If we are doing automedia detection, do it now.
+ * This is more reliable than the 8390's detection.
+ */
+ if (dev->flags & IFF_AUTOMEDIA) {
+ dev->if_port = IF_PORT_10BASET;
+ etherh_setif(dev);
+ mdelay(1);
+ if (!etherh_getifstat(dev)) {
+ dev->if_port = IF_PORT_10BASE2;
+ etherh_setif(dev);
+ }
+ } else
+ etherh_setif(dev);
+
+ etherh_reset(dev);
+ __ei_open(dev);
+
+ return 0;
+}
+
+/*
+ * The inverse routine to etherh_open().
+ */
+static int
+etherh_close(struct net_device *dev)
+{
+ __ei_close (dev);
+ free_irq (dev->irq, dev);
+ return 0;
+}
+
+/*
+ * Initialisation
+ */
+
+static void __init etherh_banner(void)
+{
+ static int version_printed;
+
+ if (net_debug && version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+}
+
+/*
+ * Read the ethernet address string from the on board rom.
+ * This is an ascii string...
+ */
+static int __devinit etherh_addr(char *addr, struct expansion_card *ec)
+{
+ struct in_chunk_dir cd;
+ char *s;
+
+ if (!ecard_readchunk(&cd, ec, 0xf5, 0)) {
+ printk(KERN_ERR "%s: unable to read podule description string\n",
+ dev_name(&ec->dev));
+ goto no_addr;
+ }
+
+ s = strchr(cd.d.string, '(');
+ if (s) {
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ addr[i] = simple_strtoul(s + 1, &s, 0x10);
+ if (*s != (i == 5? ')' : ':'))
+ break;
+ }
+
+ if (i == 6)
+ return 0;
+ }
+
+ printk(KERN_ERR "%s: unable to parse MAC address: %s\n",
+ dev_name(&ec->dev), cd.d.string);
+
+ no_addr:
+ return -ENODEV;
+}
+
+/*
+ * Create an ethernet address from the system serial number.
+ */
+static int __init etherm_addr(char *addr)
+{
+ unsigned int serial;
+
+ if (system_serial_low == 0 && system_serial_high == 0)
+ return -ENODEV;
+
+ serial = system_serial_low | system_serial_high;
+
+ addr[0] = 0;
+ addr[1] = 0;
+ addr[2] = 0xa4;
+ addr[3] = 0x10 + (serial >> 24);
+ addr[4] = serial >> 16;
+ addr[5] = serial >> 8;
+ return 0;
+}
+
+static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ sizeof(info->bus_info));
+}
+
+static int etherh_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ cmd->supported = etherh_priv(dev)->supported;
+ ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->duplex = DUPLEX_HALF;
+ cmd->port = dev->if_port == IF_PORT_10BASET ? PORT_TP : PORT_BNC;
+ cmd->autoneg = (dev->flags & IFF_AUTOMEDIA ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE);
+ return 0;
+}
+
+static int etherh_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ switch (cmd->autoneg) {
+ case AUTONEG_ENABLE:
+ dev->flags |= IFF_AUTOMEDIA;
+ break;
+
+ case AUTONEG_DISABLE:
+ switch (cmd->port) {
+ case PORT_TP:
+ dev->if_port = IF_PORT_10BASET;
+ break;
+
+ case PORT_BNC:
+ dev->if_port = IF_PORT_10BASE2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ dev->flags &= ~IFF_AUTOMEDIA;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ etherh_setif(dev);
+
+ return 0;
+}
+
+static const struct ethtool_ops etherh_ethtool_ops = {
+ .get_settings = etherh_get_settings,
+ .set_settings = etherh_set_settings,
+ .get_drvinfo = etherh_get_drvinfo,
+};
+
+static const struct net_device_ops etherh_netdev_ops = {
+ .ndo_open = etherh_open,
+ .ndo_stop = etherh_close,
+ .ndo_set_config = etherh_set_config,
+ .ndo_start_xmit = __ei_start_xmit,
+ .ndo_tx_timeout = __ei_tx_timeout,
+ .ndo_get_stats = __ei_get_stats,
+ .ndo_set_rx_mode = __ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = __ei_poll,
+#endif
+};
+
+static u32 etherh_regoffsets[16];
+static u32 etherm_regoffsets[16];
+
+static int __devinit
+etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
+{
+ const struct etherh_data *data = id->data;
+ struct ei_device *ei_local;
+ struct net_device *dev;
+ struct etherh_priv *eh;
+ int ret;
+
+ etherh_banner();
+
+ ret = ecard_request_resources(ec);
+ if (ret)
+ goto out;
+
+ dev = ____alloc_ei_netdev(sizeof(struct etherh_priv));
+ if (!dev) {
+ ret = -ENOMEM;
+ goto release;
+ }
+
+ SET_NETDEV_DEV(dev, &ec->dev);
+
+ dev->netdev_ops = &etherh_netdev_ops;
+ dev->irq = ec->irq;
+ dev->ethtool_ops = &etherh_ethtool_ops;
+
+ if (data->supported & SUPPORTED_Autoneg)
+ dev->flags |= IFF_AUTOMEDIA;
+ if (data->supported & SUPPORTED_TP) {
+ dev->flags |= IFF_PORTSEL;
+ dev->if_port = IF_PORT_10BASET;
+ } else if (data->supported & SUPPORTED_BNC) {
+ dev->flags |= IFF_PORTSEL;
+ dev->if_port = IF_PORT_10BASE2;
+ } else
+ dev->if_port = IF_PORT_UNKNOWN;
+
+ eh = etherh_priv(dev);
+ eh->supported = data->supported;
+ eh->ctrl = 0;
+ eh->id = ec->cid.product;
+ eh->memc = ecardm_iomap(ec, ECARD_RES_MEMC, 0, PAGE_SIZE);
+ if (!eh->memc) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ eh->ctrl_port = eh->memc;
+ if (data->ctrl_ioc) {
+ eh->ioc_fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, PAGE_SIZE);
+ if (!eh->ioc_fast) {
+ ret = -ENOMEM;
+ goto free;
+ }
+ eh->ctrl_port = eh->ioc_fast;
+ }
+
+ dev->base_addr = (unsigned long)eh->memc + data->ns8390_offset;
+ eh->dma_base = eh->memc + data->dataport_offset;
+ eh->ctrl_port += data->ctrlport_offset;
+
+ /*
+ * IRQ and control port handling - only for non-NIC slot cards.
+ */
+ if (ec->slot_no != 8) {
+ ecard_setirq(ec, &etherh_ops, eh);
+ } else {
+ /*
+ * If we're in the NIC slot, make sure the IRQ is enabled
+ */
+ etherh_set_ctrl(eh, ETHERH_CP_IE);
+ }
+
+ ei_local = netdev_priv(dev);
+ spin_lock_init(&ei_local->page_lock);
+
+ if (ec->cid.product == PROD_ANT_ETHERM) {
+ etherm_addr(dev->dev_addr);
+ ei_local->reg_offset = etherm_regoffsets;
+ } else {
+ etherh_addr(dev->dev_addr, ec);
+ ei_local->reg_offset = etherh_regoffsets;
+ }
+
+ ei_local->name = dev->name;
+ ei_local->word16 = 1;
+ ei_local->tx_start_page = data->tx_start_page;
+ ei_local->rx_start_page = ei_local->tx_start_page + TX_PAGES;
+ ei_local->stop_page = data->stop_page;
+ ei_local->reset_8390 = etherh_reset;
+ ei_local->block_input = etherh_block_input;
+ ei_local->block_output = etherh_block_output;
+ ei_local->get_8390_hdr = etherh_get_header;
+ ei_local->interface_num = 0;
+
+ etherh_reset(dev);
+ __NS8390_init(dev, 0);
+
+ ret = register_netdev(dev);
+ if (ret)
+ goto free;
+
+ printk(KERN_INFO "%s: %s in slot %d, %pM\n",
+ dev->name, data->name, ec->slot_no, dev->dev_addr);
+
+ ecard_set_drvdata(ec, dev);
+
+ return 0;
+
+ free:
+ free_netdev(dev);
+ release:
+ ecard_release_resources(ec);
+ out:
+ return ret;
+}
+
+static void __devexit etherh_remove(struct expansion_card *ec)
+{
+ struct net_device *dev = ecard_get_drvdata(ec);
+
+ ecard_set_drvdata(ec, NULL);
+
+ unregister_netdev(dev);
+
+ free_netdev(dev);
+
+ ecard_release_resources(ec);
+}
+
+static struct etherh_data etherm_data = {
+ .ns8390_offset = ETHERM_NS8390,
+ .dataport_offset = ETHERM_NS8390 + ETHERM_DATAPORT,
+ .ctrlport_offset = ETHERM_NS8390 + ETHERM_CTRLPORT,
+ .name = "ANT EtherM",
+ .supported = SUPPORTED_10baseT_Half,
+ .tx_start_page = ETHERM_TX_START_PAGE,
+ .stop_page = ETHERM_STOP_PAGE,
+};
+
+static struct etherh_data etherlan500_data = {
+ .ns8390_offset = ETHERH500_NS8390,
+ .dataport_offset = ETHERH500_NS8390 + ETHERH500_DATAPORT,
+ .ctrlport_offset = ETHERH500_CTRLPORT,
+ .ctrl_ioc = 1,
+ .name = "i3 EtherH 500",
+ .supported = SUPPORTED_10baseT_Half,
+ .tx_start_page = ETHERH_TX_START_PAGE,
+ .stop_page = ETHERH_STOP_PAGE,
+};
+
+static struct etherh_data etherlan600_data = {
+ .ns8390_offset = ETHERH600_NS8390,
+ .dataport_offset = ETHERH600_NS8390 + ETHERH600_DATAPORT,
+ .ctrlport_offset = ETHERH600_NS8390 + ETHERH600_CTRLPORT,
+ .name = "i3 EtherH 600",
+ .supported = SUPPORTED_10baseT_Half | SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_Autoneg,
+ .tx_start_page = ETHERH_TX_START_PAGE,
+ .stop_page = ETHERH_STOP_PAGE,
+};
+
+static struct etherh_data etherlan600a_data = {
+ .ns8390_offset = ETHERH600_NS8390,
+ .dataport_offset = ETHERH600_NS8390 + ETHERH600_DATAPORT,
+ .ctrlport_offset = ETHERH600_NS8390 + ETHERH600_CTRLPORT,
+ .name = "i3 EtherH 600A",
+ .supported = SUPPORTED_10baseT_Half | SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_Autoneg,
+ .tx_start_page = ETHERH_TX_START_PAGE,
+ .stop_page = ETHERH_STOP_PAGE,
+};
+
+static const struct ecard_id etherh_ids[] = {
+ { MANU_ANT, PROD_ANT_ETHERM, &etherm_data },
+ { MANU_I3, PROD_I3_ETHERLAN500, &etherlan500_data },
+ { MANU_I3, PROD_I3_ETHERLAN600, &etherlan600_data },
+ { MANU_I3, PROD_I3_ETHERLAN600A, &etherlan600a_data },
+ { 0xffff, 0xffff }
+};
+
+static struct ecard_driver etherh_driver = {
+ .probe = etherh_probe,
+ .remove = __devexit_p(etherh_remove),
+ .id_table = etherh_ids,
+ .drv = {
+ .name = DRV_NAME,
+ },
+};
+
+static int __init etherh_init(void)
+{
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ etherh_regoffsets[i] = i << 2;
+ etherm_regoffsets[i] = i << 5;
+ }
+
+ return ecard_register_driver(&etherh_driver);
+}
+
+static void __exit etherh_exit(void)
+{
+ ecard_remove_driver(&etherh_driver);
+}
+
+module_init(etherh_init);
+module_exit(etherh_exit);
diff --git a/drivers/net/ethernet/8390/hp-plus.c b/drivers/net/ethernet/8390/hp-plus.c
new file mode 100644
index 000000000000..eeac843dcd2d
--- /dev/null
+++ b/drivers/net/ethernet/8390/hp-plus.c
@@ -0,0 +1,506 @@
+/* hp-plus.c: A HP PCLAN/plus ethernet driver for linux. */
+/*
+ Written 1994 by Donald Becker.
+
+ This driver is for the Hewlett Packard PC LAN (27***) plus ethercards.
+ These cards are sold under several model numbers, usually 2724*.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ As is often the case, a great deal of credit is owed to Russ Nelson.
+ The Crynwr packet driver was my primary source of HP-specific
+ programming information.
+*/
+
+static const char version[] =
+"hp-plus.c:v1.10 9/24/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/string.h> /* Important -- this inlines word moves. */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+#include "8390.h"
+
+#define DRV_NAME "hp-plus"
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int hpplus_portlist[] __initdata =
+{0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0};
+
+/*
+ The HP EtherTwist chip implementation is a fairly routine DP8390
+ implementation. It allows both shared memory and programmed-I/O buffer
+ access, using a custom interface for both. The programmed-I/O mode is
+ entirely implemented in the HP EtherTwist chip, bypassing the problem
+ ridden built-in 8390 facilities used on NE2000 designs. The shared
+ memory mode is likewise special, with an offset register used to make
+ packets appear at the shared memory base. Both modes use a base and bounds
+ page register to hide the Rx ring buffer wrap -- a packet that spans the
+ end of physical buffer memory appears continuous to the driver. (c.f. the
+ 3c503 and Cabletron E2100)
+
+ A special note: the internal buffer of the board is only 8 bits wide.
+ This lays several nasty traps for the unaware:
+ - the 8390 must be programmed for byte-wide operations
+ - all I/O and memory operations must work on whole words (the access
+ latches are serially preloaded and have no byte-swapping ability).
+
+ This board is laid out in I/O space much like the earlier HP boards:
+ the first 16 locations are for the board registers, and the second 16 are
+ for the 8390. The board is easy to identify, with both a dedicated 16 bit
+ ID register and a constant 0x530* value in the upper bits of the paging
+ register.
+*/
+
+#define HP_ID 0x00 /* ID register, always 0x4850. */
+#define HP_PAGING 0x02 /* Registers visible @ 8-f, see PageName. */
+#define HPP_OPTION 0x04 /* Bitmapped options, see HP_Option. */
+#define HPP_OUT_ADDR 0x08 /* I/O output location in Perf_Page. */
+#define HPP_IN_ADDR 0x0A /* I/O input location in Perf_Page. */
+#define HP_DATAPORT 0x0c /* I/O data transfer in Perf_Page. */
+#define NIC_OFFSET 0x10 /* Offset to the 8390 registers. */
+#define HP_IO_EXTENT 32
+
+#define HP_START_PG 0x00 /* First page of TX buffer */
+#define HP_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+/* The register set selected in HP_PAGING. */
+enum PageName {
+ Perf_Page = 0, /* Normal operation. */
+ MAC_Page = 1, /* The ethernet address (+checksum). */
+ HW_Page = 2, /* EEPROM-loaded hardware parameters. */
+ LAN_Page = 4, /* Transceiver selection, testing, etc. */
+ ID_Page = 6 };
+
+/* The bit definitions for the HPP_OPTION register. */
+enum HP_Option {
+ NICReset = 1, ChipReset = 2, /* Active low, really UNreset. */
+ EnableIRQ = 4, FakeIntr = 8, BootROMEnb = 0x10, IOEnb = 0x20,
+ MemEnable = 0x40, ZeroWait = 0x80, MemDisable = 0x1000, };
+
+static int hpp_probe1(struct net_device *dev, int ioaddr);
+
+static void hpp_reset_8390(struct net_device *dev);
+static int hpp_open(struct net_device *dev);
+static int hpp_close(struct net_device *dev);
+static void hpp_mem_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void hpp_mem_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page);
+static void hpp_mem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void hpp_io_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void hpp_io_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page);
+static void hpp_io_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+
+
+/* Probe a list of addresses for an HP LAN+ adaptor.
+ This routine is almost boilerplate. */
+
+static int __init do_hpp_probe(struct net_device *dev)
+{
+ int i;
+ int base_addr = dev->base_addr;
+ int irq = dev->irq;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return hpp_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ for (i = 0; hpplus_portlist[i]; i++) {
+ if (hpp_probe1(dev, hpplus_portlist[i]) == 0)
+ return 0;
+ dev->irq = irq;
+ }
+
+ return -ENODEV;
+}
+
+#ifndef MODULE
+struct net_device * __init hp_plus_probe(int unit)
+{
+ struct net_device *dev = alloc_eip_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_hpp_probe(dev);
+ if (err)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static const struct net_device_ops hpp_netdev_ops = {
+ .ndo_open = hpp_open,
+ .ndo_stop = hpp_close,
+ .ndo_start_xmit = eip_start_xmit,
+ .ndo_tx_timeout = eip_tx_timeout,
+ .ndo_get_stats = eip_get_stats,
+ .ndo_set_rx_mode = eip_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = eip_poll,
+#endif
+};
+
+
+/* Do the interesting part of the probe at a single address. */
+static int __init hpp_probe1(struct net_device *dev, int ioaddr)
+{
+ int i, retval;
+ unsigned char checksum = 0;
+ const char name[] = "HP-PC-LAN+";
+ int mem_start;
+ static unsigned version_printed;
+
+ if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ /* Check for the HP+ signature, 50 48 0x 53. */
+ if (inw(ioaddr + HP_ID) != 0x4850 ||
+ (inw(ioaddr + HP_PAGING) & 0xfff0) != 0x5300) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ printk("%s: %s at %#3x, ", dev->name, name, ioaddr);
+
+ /* Retrieve and checksum the station address. */
+ outw(MAC_Page, ioaddr + HP_PAGING);
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ unsigned char inval = inb(ioaddr + 8 + i);
+ dev->dev_addr[i] = inval;
+ checksum += inval;
+ }
+ checksum += inb(ioaddr + 14);
+
+ printk("%pM", dev->dev_addr);
+
+ if (checksum != 0xff) {
+ printk(" bad checksum %2.2x.\n", checksum);
+ retval = -ENODEV;
+ goto out;
+ } else {
+ /* Point at the Software Configuration Flags. */
+ outw(ID_Page, ioaddr + HP_PAGING);
+ printk(" ID %4.4x", inw(ioaddr + 12));
+ }
+
+ /* Read the IRQ line. */
+ outw(HW_Page, ioaddr + HP_PAGING);
+ {
+ int irq = inb(ioaddr + 13) & 0x0f;
+ int option = inw(ioaddr + HPP_OPTION);
+
+ dev->irq = irq;
+ if (option & MemEnable) {
+ mem_start = inw(ioaddr + 9) << 8;
+ printk(", IRQ %d, memory address %#x.\n", irq, mem_start);
+ } else {
+ mem_start = 0;
+ printk(", IRQ %d, programmed-I/O mode.\n", irq);
+ }
+ }
+
+ /* Set the wrap registers for string I/O reads. */
+ outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
+
+ /* Set the base address to point to the NIC, not the "real" base! */
+ dev->base_addr = ioaddr + NIC_OFFSET;
+
+ dev->netdev_ops = &hpp_netdev_ops;
+
+ ei_status.name = name;
+ ei_status.word16 = 0; /* Agggghhhhh! Debug time: 2 days! */
+ ei_status.tx_start_page = HP_START_PG;
+ ei_status.rx_start_page = HP_START_PG + TX_PAGES/2;
+ ei_status.stop_page = HP_STOP_PG;
+
+ ei_status.reset_8390 = &hpp_reset_8390;
+ ei_status.block_input = &hpp_io_block_input;
+ ei_status.block_output = &hpp_io_block_output;
+ ei_status.get_8390_hdr = &hpp_io_get_8390_hdr;
+
+ /* Check if the memory_enable flag is set in the option register. */
+ if (mem_start) {
+ ei_status.block_input = &hpp_mem_block_input;
+ ei_status.block_output = &hpp_mem_block_output;
+ ei_status.get_8390_hdr = &hpp_mem_get_8390_hdr;
+ dev->mem_start = mem_start;
+ ei_status.mem = ioremap(mem_start,
+ (HP_STOP_PG - HP_START_PG)*256);
+ if (!ei_status.mem) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ ei_status.rmem_start = dev->mem_start + TX_PAGES/2*256;
+ dev->mem_end = ei_status.rmem_end
+ = dev->mem_start + (HP_STOP_PG - HP_START_PG)*256;
+ }
+
+ outw(Perf_Page, ioaddr + HP_PAGING);
+ NS8390p_init(dev, 0);
+ /* Leave the 8390 and HP chip reset. */
+ outw(inw(ioaddr + HPP_OPTION) & ~EnableIRQ, ioaddr + HPP_OPTION);
+
+ retval = register_netdev(dev);
+ if (retval)
+ goto out1;
+ return 0;
+out1:
+ iounmap(ei_status.mem);
+out:
+ release_region(ioaddr, HP_IO_EXTENT);
+ return retval;
+}
+
+static int
+hpp_open(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg;
+ int retval;
+
+ if ((retval = request_irq(dev->irq, eip_interrupt, 0, dev->name, dev))) {
+ return retval;
+ }
+
+ /* Reset the 8390 and HP chip. */
+ option_reg = inw(ioaddr + HPP_OPTION);
+ outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION);
+ udelay(5);
+ /* Unreset the board and enable interrupts. */
+ outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION);
+
+ /* Set the wrap registers for programmed-I/O operation. */
+ outw(HW_Page, ioaddr + HP_PAGING);
+ outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
+
+ /* Select the operational page. */
+ outw(Perf_Page, ioaddr + HP_PAGING);
+
+ return eip_open(dev);
+}
+
+static int
+hpp_close(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ free_irq(dev->irq, dev);
+ eip_close(dev);
+ outw((option_reg & ~EnableIRQ) | MemDisable | NICReset | ChipReset,
+ ioaddr + HPP_OPTION);
+
+ return 0;
+}
+
+static void
+hpp_reset_8390(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies);
+
+ outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION);
+ /* Pause a few cycles for the hardware reset to take place. */
+ udelay(5);
+ ei_status.txing = 0;
+ outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION);
+
+ udelay(5);
+
+
+ if ((inb_p(ioaddr+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0)
+ printk("%s: hp_reset_8390() did not complete.\n", dev->name);
+
+ if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
+}
+
+/* The programmed-I/O version of reading the 4 byte 8390 specific header.
+ Note that transfer with the EtherTwist+ must be on word boundaries. */
+
+static void
+hpp_io_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+
+ outw((ring_page<<8), ioaddr + HPP_IN_ADDR);
+ insw(ioaddr + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+}
+
+/* Block input and output, similar to the Crynwr packet driver. */
+
+static void
+hpp_io_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ char *buf = skb->data;
+
+ outw(ring_offset, ioaddr + HPP_IN_ADDR);
+ insw(ioaddr + HP_DATAPORT, buf, count>>1);
+ if (count & 0x01)
+ buf[count-1] = inw(ioaddr + HP_DATAPORT);
+}
+
+/* The corresponding shared memory versions of the above 2 functions. */
+
+static void
+hpp_mem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ outw((ring_page<<8), ioaddr + HPP_IN_ADDR);
+ outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
+ memcpy_fromio(hdr, ei_status.mem, sizeof(struct e8390_pkt_hdr));
+ outw(option_reg, ioaddr + HPP_OPTION);
+ hdr->count = (le16_to_cpu(hdr->count) + 3) & ~3; /* Round up allocation. */
+}
+
+static void
+hpp_mem_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ outw(ring_offset, ioaddr + HPP_IN_ADDR);
+
+ outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
+
+ /* Caution: this relies on get_8390_hdr() rounding up count!
+ Also note that we *can't* use eth_io_copy_and_sum() because
+ it will not always copy "count" bytes (e.g. padded IP). */
+
+ memcpy_fromio(skb->data, ei_status.mem, count);
+ outw(option_reg, ioaddr + HPP_OPTION);
+}
+
+/* A special note: we *must* always transfer >=16 bit words.
+ It's always safe to round up, so we do. */
+static void
+hpp_io_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
+ outsl(ioaddr + HP_DATAPORT, buf, (count+3)>>2);
+}
+
+static void
+hpp_mem_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
+ outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
+ memcpy_toio(ei_status.mem, buf, (count + 3) & ~3);
+ outw(option_reg, ioaddr + HPP_OPTION);
+}
+
+
+#ifdef MODULE
+#define MAX_HPP_CARDS 4 /* Max number of HPP cards per module */
+static struct net_device *dev_hpp[MAX_HPP_CARDS];
+static int io[MAX_HPP_CARDS];
+static int irq[MAX_HPP_CARDS];
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O port address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s); ignored if properly detected");
+MODULE_DESCRIPTION("HP PC-LAN+ ISA ethernet driver");
+MODULE_LICENSE("GPL");
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int __init
+init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) {
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "hp-plus.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ dev = alloc_eip_netdev();
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ if (do_hpp_probe(dev) == 0) {
+ dev_hpp[found++] = dev;
+ continue;
+ }
+ free_netdev(dev);
+ printk(KERN_WARNING "hp-plus.c: No HP-Plus card found (i/o = 0x%x).\n", io[this_dev]);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ /* NB: hpp_close() handles free_irq */
+ iounmap(ei_status.mem);
+ release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
+}
+
+void __exit
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) {
+ struct net_device *dev = dev_hpp[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/hp.c b/drivers/net/ethernet/8390/hp.c
new file mode 100644
index 000000000000..18564d4a7c04
--- /dev/null
+++ b/drivers/net/ethernet/8390/hp.c
@@ -0,0 +1,439 @@
+/* hp.c: A HP LAN ethernet driver for linux. */
+/*
+ Written 1993-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ This is a driver for the HP PC-LAN adaptors.
+
+ Sources:
+ The Crynwr packet driver.
+*/
+
+static const char version[] =
+ "hp.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+#include "8390.h"
+
+#define DRV_NAME "hp"
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int hppclan_portlist[] __initdata =
+{ 0x300, 0x320, 0x340, 0x280, 0x2C0, 0x200, 0x240, 0};
+
+#define HP_IO_EXTENT 32
+
+#define HP_DATAPORT 0x0c /* "Remote DMA" data port. */
+#define HP_ID 0x07
+#define HP_CONFIGURE 0x08 /* Configuration register. */
+#define HP_RUN 0x01 /* 1 == Run, 0 == reset. */
+#define HP_IRQ 0x0E /* Mask for software-configured IRQ line. */
+#define HP_DATAON 0x10 /* Turn on dataport */
+#define NIC_OFFSET 0x10 /* Offset the 8390 registers. */
+
+#define HP_START_PG 0x00 /* First page of TX buffer */
+#define HP_8BSTOP_PG 0x80 /* Last page +1 of RX ring */
+#define HP_16BSTOP_PG 0xFF /* Same, for 16 bit cards. */
+
+static int hp_probe1(struct net_device *dev, int ioaddr);
+
+static void hp_reset_8390(struct net_device *dev);
+static void hp_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void hp_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb , int ring_offset);
+static void hp_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page);
+
+static void hp_init_card(struct net_device *dev);
+
+/* The map from IRQ number to HP_CONFIGURE register setting. */
+/* My default is IRQ5 0 1 2 3 4 5 6 7 8 9 10 11 */
+static char irqmap[16] __initdata= { 0, 0, 4, 6, 8,10, 0,14, 0, 4, 2,12,0,0,0,0};
+
+
+/* Probe for an HP LAN adaptor.
+ Also initialize the card and fill in STATION_ADDR with the station
+ address. */
+
+static int __init do_hp_probe(struct net_device *dev)
+{
+ int i;
+ int base_addr = dev->base_addr;
+ int irq = dev->irq;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return hp_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ for (i = 0; hppclan_portlist[i]; i++) {
+ if (hp_probe1(dev, hppclan_portlist[i]) == 0)
+ return 0;
+ dev->irq = irq;
+ }
+
+ return -ENODEV;
+}
+
+#ifndef MODULE
+struct net_device * __init hp_probe(int unit)
+{
+ struct net_device *dev = alloc_eip_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_hp_probe(dev);
+ if (err)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static int __init hp_probe1(struct net_device *dev, int ioaddr)
+{
+ int i, retval, board_id, wordmode;
+ const char *name;
+ static unsigned version_printed;
+
+ if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ /* Check for the HP physical address, 08 00 09 xx xx xx. */
+ /* This really isn't good enough: we may pick up HP LANCE boards
+ also! Avoid the lance 0x5757 signature. */
+ if (inb(ioaddr) != 0x08
+ || inb(ioaddr+1) != 0x00
+ || inb(ioaddr+2) != 0x09
+ || inb(ioaddr+14) == 0x57) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ /* Set up the parameters based on the board ID.
+ If you have additional mappings, please mail them to me -djb. */
+ if ((board_id = inb(ioaddr + HP_ID)) & 0x80) {
+ name = "HP27247";
+ wordmode = 1;
+ } else {
+ name = "HP27250";
+ wordmode = 0;
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr);
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++)
+ dev->dev_addr[i] = inb(ioaddr + i);
+
+ printk(" %pM", dev->dev_addr);
+
+ /* Snarf the interrupt now. Someday this could be moved to open(). */
+ if (dev->irq < 2) {
+ static const int irq_16list[] = { 11, 10, 5, 3, 4, 7, 9, 0};
+ static const int irq_8list[] = { 7, 5, 3, 4, 9, 0};
+ const int *irqp = wordmode ? irq_16list : irq_8list;
+ do {
+ int irq = *irqp;
+ if (request_irq (irq, NULL, 0, "bogus", NULL) != -EBUSY) {
+ unsigned long cookie = probe_irq_on();
+ /* Twinkle the interrupt, and check if it's seen. */
+ outb_p(irqmap[irq] | HP_RUN, ioaddr + HP_CONFIGURE);
+ outb_p( 0x00 | HP_RUN, ioaddr + HP_CONFIGURE);
+ if (irq == probe_irq_off(cookie) /* It's a good IRQ line! */
+ && request_irq (irq, eip_interrupt, 0, DRV_NAME, dev) == 0) {
+ printk(" selecting IRQ %d.\n", irq);
+ dev->irq = *irqp;
+ break;
+ }
+ }
+ } while (*++irqp);
+ if (*irqp == 0) {
+ printk(" no free IRQ lines.\n");
+ retval = -EBUSY;
+ goto out;
+ }
+ } else {
+ if (dev->irq == 2)
+ dev->irq = 9;
+ if ((retval = request_irq(dev->irq, eip_interrupt, 0, DRV_NAME, dev))) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ goto out;
+ }
+ }
+
+ /* Set the base address to point to the NIC, not the "real" base! */
+ dev->base_addr = ioaddr + NIC_OFFSET;
+ dev->netdev_ops = &eip_netdev_ops;
+
+ ei_status.name = name;
+ ei_status.word16 = wordmode;
+ ei_status.tx_start_page = HP_START_PG;
+ ei_status.rx_start_page = HP_START_PG + TX_PAGES;
+ ei_status.stop_page = wordmode ? HP_16BSTOP_PG : HP_8BSTOP_PG;
+
+ ei_status.reset_8390 = hp_reset_8390;
+ ei_status.get_8390_hdr = hp_get_8390_hdr;
+ ei_status.block_input = hp_block_input;
+ ei_status.block_output = hp_block_output;
+ hp_init_card(dev);
+
+ retval = register_netdev(dev);
+ if (retval)
+ goto out1;
+ return 0;
+out1:
+ free_irq(dev->irq, dev);
+out:
+ release_region(ioaddr, HP_IO_EXTENT);
+ return retval;
+}
+
+static void
+hp_reset_8390(struct net_device *dev)
+{
+ int hp_base = dev->base_addr - NIC_OFFSET;
+ int saved_config = inb_p(hp_base + HP_CONFIGURE);
+
+ if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies);
+ outb_p(0x00, hp_base + HP_CONFIGURE);
+ ei_status.txing = 0;
+ /* Pause just a few cycles for the hardware reset to take place. */
+ udelay(5);
+
+ outb_p(saved_config, hp_base + HP_CONFIGURE);
+ udelay(5);
+
+ if ((inb_p(hp_base+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0)
+ printk("%s: hp_reset_8390() did not complete.\n", dev->name);
+
+ if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
+}
+
+static void
+hp_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int nic_base = dev->base_addr;
+ int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
+
+ outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base);
+ outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
+ outb_p(0, nic_base + EN0_RCNTHI);
+ outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base);
+
+ if (ei_status.word16)
+ insw(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+ else
+ insb(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
+
+ outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you are
+ porting to a new ethercard look at the packet driver source for hints.
+ The HP LAN doesn't use shared memory -- we put the packet
+ out through the "remote DMA" dataport. */
+
+static void
+hp_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int nic_base = dev->base_addr;
+ int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
+ int xfer_count = count;
+ char *buf = skb->data;
+
+ outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base);
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base);
+ if (ei_status.word16) {
+ insw(nic_base - NIC_OFFSET + HP_DATAPORT,buf,count>>1);
+ if (count & 0x01)
+ buf[count-1] = inb(nic_base - NIC_OFFSET + HP_DATAPORT), xfer_count++;
+ } else {
+ insb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count);
+ }
+ /* This is for the ALPHA version only, remove for later releases. */
+ if (ei_debug > 0) { /* DMA termination address check... */
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ int addr = (high << 8) + low;
+ /* Check only the lower 8 bits so we can ignore ring wrap. */
+ if (((ring_offset + xfer_count) & 0xff) != (addr & 0xff))
+ printk("%s: RX transfer address mismatch, %#4.4x vs. %#4.4x (actual).\n",
+ dev->name, ring_offset + xfer_count, addr);
+ }
+ outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
+}
+
+static void
+hp_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ int nic_base = dev->base_addr;
+ int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
+
+ outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+ if (ei_status.word16 && (count & 0x01))
+ count++;
+ /* We should already be in page 0, but to be safe... */
+ outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base);
+
+#ifdef NE8390_RW_BUGFIX
+ /* Handle the read-before-write bug the same way as the
+ Crynwr packet driver -- the NatSemi method doesn't work. */
+ outb_p(0x42, nic_base + EN0_RCNTLO);
+ outb_p(0, nic_base + EN0_RCNTHI);
+ outb_p(0xff, nic_base + EN0_RSARLO);
+ outb_p(0x00, nic_base + EN0_RSARHI);
+#define NE_CMD 0x00
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ /* Make certain that the dummy read has occurred. */
+ inb_p(0x61);
+ inb_p(0x61);
+#endif
+
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(0x00, nic_base + EN0_RSARLO);
+ outb_p(start_page, nic_base + EN0_RSARHI);
+
+ outb_p(E8390_RWRITE+E8390_START, nic_base);
+ if (ei_status.word16) {
+ /* Use the 'rep' sequence for 16 bit boards. */
+ outsw(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count>>1);
+ } else {
+ outsb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count);
+ }
+
+ /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here -- it's broken! */
+
+ /* This is for the ALPHA version only, remove for later releases. */
+ if (ei_debug > 0) { /* DMA termination address check... */
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ int addr = (high << 8) + low;
+ if ((start_page << 8) + count != addr)
+ printk("%s: TX Transfer address mismatch, %#4.4x vs. %#4.4x.\n",
+ dev->name, (start_page << 8) + count, addr);
+ }
+ outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
+}
+
+/* This function resets the ethercard if something screws up. */
+static void __init
+hp_init_card(struct net_device *dev)
+{
+ int irq = dev->irq;
+ NS8390p_init(dev, 0);
+ outb_p(irqmap[irq&0x0f] | HP_RUN,
+ dev->base_addr - NIC_OFFSET + HP_CONFIGURE);
+}
+
+#ifdef MODULE
+#define MAX_HP_CARDS 4 /* Max number of HP cards per module */
+static struct net_device *dev_hp[MAX_HP_CARDS];
+static int io[MAX_HP_CARDS];
+static int irq[MAX_HP_CARDS];
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
+MODULE_DESCRIPTION("HP PC-LAN ISA ethernet driver");
+MODULE_LICENSE("GPL");
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int __init
+init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) {
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "hp.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ dev = alloc_eip_netdev();
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ if (do_hp_probe(dev) == 0) {
+ dev_hp[found++] = dev;
+ continue;
+ }
+ free_netdev(dev);
+ printk(KERN_WARNING "hp.c: No HP card found (i/o = 0x%x).\n", io[this_dev]);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
+}
+
+void __exit
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) {
+ struct net_device *dev = dev_hp[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
new file mode 100644
index 000000000000..3dac937a67c4
--- /dev/null
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -0,0 +1,273 @@
+/* New Hydra driver using generic 8390 core */
+/* Based on old hydra driver by Topi Kanerva (topi@susanna.oulu.fi) */
+
+/* This file is subject to the terms and conditions of the GNU General */
+/* Public License. See the file COPYING in the main directory of the */
+/* Linux distribution for more details. */
+
+/* Peter De Schrijver (p2@mind.be) */
+/* Oldenburg 2000 */
+
+/* The Amiganet is a Zorro-II board made by Hydra Systems. It contains a */
+/* NS8390 NIC (network interface controller) clone, 16 or 64K on-board RAM */
+/* and 10BASE-2 (thin coax) and AUI connectors. */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/amigaints.h>
+#include <asm/amigahw.h>
+#include <linux/zorro.h>
+
+#define EI_SHIFT(x) (ei_local->reg_offset[x])
+#define ei_inb(port) in_8(port)
+#define ei_outb(val,port) out_8(port,val)
+#define ei_inb_p(port) in_8(port)
+#define ei_outb_p(val,port) out_8(port,val)
+
+static const char version[] =
+ "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include "lib8390.c"
+
+#define NE_EN0_DCFG (0x0e*2)
+
+#define NESM_START_PG 0x0 /* First page of TX buffer */
+#define NESM_STOP_PG 0x40 /* Last page +1 of RX ring */
+
+#define HYDRA_NIC_BASE 0xffe1
+#define HYDRA_ADDRPROM 0xffc0
+#define HYDRA_VERSION "v3.0alpha"
+
+#define WORDSWAP(a) ((((a)>>8)&0xff) | ((a)<<8))
+
+
+static int __devinit hydra_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent);
+static int __devinit hydra_init(struct zorro_dev *z);
+static int hydra_open(struct net_device *dev);
+static int hydra_close(struct net_device *dev);
+static void hydra_reset_8390(struct net_device *dev);
+static void hydra_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page);
+static void hydra_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void hydra_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page);
+static void __devexit hydra_remove_one(struct zorro_dev *z);
+
+static struct zorro_device_id hydra_zorro_tbl[] __devinitdata = {
+ { ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(zorro, hydra_zorro_tbl);
+
+static struct zorro_driver hydra_driver = {
+ .name = "hydra",
+ .id_table = hydra_zorro_tbl,
+ .probe = hydra_init_one,
+ .remove = __devexit_p(hydra_remove_one),
+};
+
+static int __devinit hydra_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
+{
+ int err;
+
+ if (!request_mem_region(z->resource.start, 0x10000, "Hydra"))
+ return -EBUSY;
+ if ((err = hydra_init(z))) {
+ release_mem_region(z->resource.start, 0x10000);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static const struct net_device_ops hydra_netdev_ops = {
+ .ndo_open = hydra_open,
+ .ndo_stop = hydra_close,
+
+ .ndo_start_xmit = __ei_start_xmit,
+ .ndo_tx_timeout = __ei_tx_timeout,
+ .ndo_get_stats = __ei_get_stats,
+ .ndo_set_rx_mode = __ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = __ei_poll,
+#endif
+};
+
+static int __devinit hydra_init(struct zorro_dev *z)
+{
+ struct net_device *dev;
+ unsigned long board = ZTWO_VADDR(z->resource.start);
+ unsigned long ioaddr = board+HYDRA_NIC_BASE;
+ const char name[] = "NE2000";
+ int start_page, stop_page;
+ int j;
+ int err;
+
+ static u32 hydra_offsets[16] = {
+ 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
+ };
+
+ dev = ____alloc_ei_netdev(0);
+ if (!dev)
+ return -ENOMEM;
+
+ for(j = 0; j < ETHER_ADDR_LEN; j++)
+ dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j));
+
+ /* We must set the 8390 for word mode. */
+ z_writeb(0x4b, ioaddr + NE_EN0_DCFG);
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
+
+ dev->base_addr = ioaddr;
+ dev->irq = IRQ_AMIGA_PORTS;
+
+ /* Install the Interrupt handler */
+ if (request_irq(IRQ_AMIGA_PORTS, __ei_interrupt, IRQF_SHARED, "Hydra Ethernet",
+ dev)) {
+ free_netdev(dev);
+ return -EAGAIN;
+ }
+
+ ei_status.name = name;
+ ei_status.tx_start_page = start_page;
+ ei_status.stop_page = stop_page;
+ ei_status.word16 = 1;
+ ei_status.bigendian = 1;
+
+ ei_status.rx_start_page = start_page + TX_PAGES;
+
+ ei_status.reset_8390 = hydra_reset_8390;
+ ei_status.block_input = hydra_block_input;
+ ei_status.block_output = hydra_block_output;
+ ei_status.get_8390_hdr = hydra_get_8390_hdr;
+ ei_status.reg_offset = hydra_offsets;
+
+ dev->netdev_ops = &hydra_netdev_ops;
+ __NS8390_init(dev, 0);
+
+ err = register_netdev(dev);
+ if (err) {
+ free_irq(IRQ_AMIGA_PORTS, dev);
+ free_netdev(dev);
+ return err;
+ }
+
+ zorro_set_drvdata(z, dev);
+
+ pr_info("%s: Hydra at %pR, address %pM (hydra.c " HYDRA_VERSION ")\n",
+ dev->name, &z->resource, dev->dev_addr);
+
+ return 0;
+}
+
+static int hydra_open(struct net_device *dev)
+{
+ __ei_open(dev);
+ return 0;
+}
+
+static int hydra_close(struct net_device *dev)
+{
+ if (ei_debug > 1)
+ printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
+ __ei_close(dev);
+ return 0;
+}
+
+static void hydra_reset_8390(struct net_device *dev)
+{
+ printk(KERN_INFO "Hydra hw reset not there\n");
+}
+
+static void hydra_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int nic_base = dev->base_addr;
+ short *ptrs;
+ unsigned long hdr_start= (nic_base-HYDRA_NIC_BASE) +
+ ((ring_page - NESM_START_PG)<<8);
+ ptrs = (short *)hdr;
+
+ *(ptrs++) = z_readw(hdr_start);
+ *((short *)hdr) = WORDSWAP(*((short *)hdr));
+ hdr_start += 2;
+ *(ptrs++) = z_readw(hdr_start);
+ *((short *)hdr+1) = WORDSWAP(*((short *)hdr+1));
+}
+
+static void hydra_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ unsigned long nic_base = dev->base_addr;
+ unsigned long mem_base = nic_base - HYDRA_NIC_BASE;
+ unsigned long xfer_start = mem_base + ring_offset - (NESM_START_PG<<8);
+
+ if (count&1)
+ count++;
+
+ if (xfer_start+count > mem_base + (NESM_STOP_PG<<8)) {
+ int semi_count = (mem_base + (NESM_STOP_PG<<8)) - xfer_start;
+
+ z_memcpy_fromio(skb->data,xfer_start,semi_count);
+ count -= semi_count;
+ z_memcpy_fromio(skb->data+semi_count, mem_base, count);
+ } else
+ z_memcpy_fromio(skb->data, xfer_start,count);
+
+}
+
+static void hydra_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ unsigned long nic_base = dev->base_addr;
+ unsigned long mem_base = nic_base - HYDRA_NIC_BASE;
+
+ if (count&1)
+ count++;
+
+ z_memcpy_toio(mem_base+((start_page - NESM_START_PG)<<8), buf, count);
+}
+
+static void __devexit hydra_remove_one(struct zorro_dev *z)
+{
+ struct net_device *dev = zorro_get_drvdata(z);
+
+ unregister_netdev(dev);
+ free_irq(IRQ_AMIGA_PORTS, dev);
+ release_mem_region(ZTWO_PADDR(dev->base_addr)-HYDRA_NIC_BASE, 0x10000);
+ free_netdev(dev);
+}
+
+static int __init hydra_init_module(void)
+{
+ return zorro_register_driver(&hydra_driver);
+}
+
+static void __exit hydra_cleanup_module(void)
+{
+ zorro_unregister_driver(&hydra_driver);
+}
+
+module_init(hydra_init_module);
+module_exit(hydra_cleanup_module);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
new file mode 100644
index 000000000000..05ae21435bfd
--- /dev/null
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -0,0 +1,1080 @@
+/* 8390.c: A general NS8390 ethernet driver core for linux. */
+/*
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+
+ This is the chip-specific code for many 8390-based ethernet adaptors.
+ This is not a complete driver, it must be combined with board-specific
+ code such as ne.c, wd.c, 3c503.c, etc.
+
+ Seeing how at least eight drivers use this code, (not counting the
+ PCMCIA ones either) it is easy to break some card by what seems like
+ a simple innocent change. Please contact me or Donald if you think
+ you have found something that needs changing. -- PG
+
+
+ Changelog:
+
+ Paul Gortmaker : remove set_bit lock, other cleanups.
+ Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to
+ ei_block_input() for eth_io_copy_and_sum().
+ Paul Gortmaker : exchange static int ei_pingpong for a #define,
+ also add better Tx error handling.
+ Paul Gortmaker : rewrite Rx overrun handling as per NS specs.
+ Alexey Kuznetsov : use the 8390's six bit hash multicast filter.
+ Paul Gortmaker : tweak ANK's above multicast changes a bit.
+ Paul Gortmaker : update packet statistics for v2.1.x
+ Alan Cox : support arbitrary stupid port mappings on the
+ 68K Macintosh. Support >16bit I/O spaces
+ Paul Gortmaker : add kmod support for auto-loading of the 8390
+ module by all drivers that require it.
+ Alan Cox : Spinlocking work, added 'BUG_83C690'
+ Paul Gortmaker : Separate out Tx timeout code from Tx path.
+ Paul Gortmaker : Remove old unused single Tx buffer code.
+ Hayato Fujiwara : Add m32r support.
+ Paul Gortmaker : use skb_padto() instead of stack scratch area
+
+ Sources:
+ The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
+
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <asm/system.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#define NS8390_CORE
+#include "8390.h"
+
+#define BUG_83C690
+
+/* These are the operational function interfaces to board-specific
+ routines.
+ void reset_8390(struct net_device *dev)
+ Resets the board associated with DEV, including a hardware reset of
+ the 8390. This is only called when there is a transmit timeout, and
+ it is always followed by 8390_init().
+ void block_output(struct net_device *dev, int count, const unsigned char *buf,
+ int start_page)
+ Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The
+ "page" value uses the 8390's 256-byte pages.
+ void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
+ Read the 4 byte, page aligned 8390 header. *If* there is a
+ subsequent read, it will be of the rest of the packet.
+ void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+ Read COUNT bytes from the packet buffer into the skb data area. Start
+ reading from RING_OFFSET, the address as the 8390 sees it. This will always
+ follow the read of the 8390 header.
+*/
+#define ei_reset_8390 (ei_local->reset_8390)
+#define ei_block_output (ei_local->block_output)
+#define ei_block_input (ei_local->block_input)
+#define ei_get_8390_hdr (ei_local->get_8390_hdr)
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef ei_debug
+int ei_debug = 1;
+#endif
+
+/* Index to functions. */
+static void ei_tx_intr(struct net_device *dev);
+static void ei_tx_err(struct net_device *dev);
+static void ei_receive(struct net_device *dev);
+static void ei_rx_overrun(struct net_device *dev);
+
+/* Routines generic to NS8390-based boards. */
+static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
+ int start_page);
+static void do_set_multicast_list(struct net_device *dev);
+static void __NS8390_init(struct net_device *dev, int startp);
+
+/*
+ * SMP and the 8390 setup.
+ *
+ * The 8390 isn't exactly designed to be multithreaded on RX/TX. There is
+ * a page register that controls bank and packet buffer access. We guard
+ * this with ei_local->page_lock. Nobody should assume or set the page other
+ * than zero when the lock is not held. Lock holders must restore page 0
+ * before unlocking. Even pure readers must take the lock to protect in
+ * page 0.
+ *
+ * To make life difficult the chip can also be very slow. We therefore can't
+ * just use spinlocks. For the longer lockups we disable the irq the device
+ * sits on and hold the lock. We must hold the lock because there is a dual
+ * processor case other than interrupts (get stats/set multicast list in
+ * parallel with each other and transmit).
+ *
+ * Note: in theory we can just disable the irq on the card _but_ there is
+ * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
+ * enter lock, take the queued irq. So we waddle instead of flying.
+ *
+ * Finally by special arrangement for the purpose of being generally
+ * annoying the transmit function is called bh atomic. That places
+ * restrictions on the user context callers as disable_irq won't save
+ * them.
+ *
+ * Additional explanation of problems with locking by Alan Cox:
+ *
+ * "The author (me) didn't use spin_lock_irqsave because the slowness of the
+ * card means that approach caused horrible problems like losing serial data
+ * at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA
+ * chips with FPGA front ends.
+ *
+ * Ok the logic behind the 8390 is very simple:
+ *
+ * Things to know
+ * - IRQ delivery is asynchronous to the PCI bus
+ * - Blocking the local CPU IRQ via spin locks was too slow
+ * - The chip has register windows needing locking work
+ *
+ * So the path was once (I say once as people appear to have changed it
+ * in the mean time and it now looks rather bogus if the changes to use
+ * disable_irq_nosync_irqsave are disabling the local IRQ)
+ *
+ *
+ * Take the page lock
+ * Mask the IRQ on chip
+ * Disable the IRQ (but not mask locally- someone seems to have
+ * broken this with the lock validator stuff)
+ * [This must be _nosync as the page lock may otherwise
+ * deadlock us]
+ * Drop the page lock and turn IRQs back on
+ *
+ * At this point an existing IRQ may still be running but we can't
+ * get a new one
+ *
+ * Take the lock (so we know the IRQ has terminated) but don't mask
+ * the IRQs on the processor
+ * Set irqlock [for debug]
+ *
+ * Transmit (slow as ****)
+ *
+ * re-enable the IRQ
+ *
+ *
+ * We have to use disable_irq because otherwise you will get delayed
+ * interrupts on the APIC bus deadlocking the transmit path.
+ *
+ * Quite hairy but the chip simply wasn't designed for SMP and you can't
+ * even ACK an interrupt without risking corrupting other parallel
+ * activities on the chip." [lkml, 25 Jul 2007]
+ */
+
+
+
+/**
+ * ei_open - Open/initialize the board.
+ * @dev: network device to initialize
+ *
+ * This routine goes all-out, setting everything
+ * up anew at each open, even though many of these registers should only
+ * need to be set once at boot.
+ */
+static int __ei_open(struct net_device *dev)
+{
+ unsigned long flags;
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ if (dev->watchdog_timeo <= 0)
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ /*
+ * Grab the page lock so we own the register set, then call
+ * the init function.
+ */
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ __NS8390_init(dev, 1);
+ /* Set the flag before we drop the lock, That way the IRQ arrives
+ after its set and we get no silly warnings */
+ netif_start_queue(dev);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+ ei_local->irqlock = 0;
+ return 0;
+}
+
+/**
+ * ei_close - shut down network device
+ * @dev: network device to close
+ *
+ * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done.
+ */
+static int __ei_close(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ unsigned long flags;
+
+ /*
+ * Hold the page lock during close
+ */
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ __NS8390_init(dev, 0);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+ netif_stop_queue(dev);
+ return 0;
+}
+
+/**
+ * ei_tx_timeout - handle transmit time out condition
+ * @dev: network device which has apparently fallen asleep
+ *
+ * Called by kernel when device never acknowledges a transmit has
+ * completed (or failed) - i.e. never posted a Tx related interrupt.
+ */
+
+static void __ei_tx_timeout(struct net_device *dev)
+{
+ unsigned long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = netdev_priv(dev);
+ int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
+ unsigned long flags;
+
+ dev->stats.tx_errors++;
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ txsr = ei_inb(e8390_base+EN0_TSR);
+ isr = ei_inb(e8390_base+EN0_ISR);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+ netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d\n",
+ (txsr & ENTSR_ABT) ? "excess collisions." :
+ (isr) ? "lost interrupt?" : "cable problem?",
+ txsr, isr, tickssofar);
+
+ if (!isr && !dev->stats.tx_packets) {
+ /* The 8390 probably hasn't gotten on the cable yet. */
+ ei_local->interface_num ^= 1; /* Try a different xcvr. */
+ }
+
+ /* Ugly but a reset can be slow, yet must be protected */
+
+ disable_irq_nosync_lockdep(dev->irq);
+ spin_lock(&ei_local->page_lock);
+
+ /* Try to restart the card. Perhaps the user has fixed something. */
+ ei_reset_8390(dev);
+ __NS8390_init(dev, 1);
+
+ spin_unlock(&ei_local->page_lock);
+ enable_irq_lockdep(dev->irq);
+ netif_wake_queue(dev);
+}
+
+/**
+ * ei_start_xmit - begin packet transmission
+ * @skb: packet to be sent
+ * @dev: network device to which packet is sent
+ *
+ * Sends a packet to an 8390 network device.
+ */
+
+static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ unsigned long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = netdev_priv(dev);
+ int send_length = skb->len, output_page;
+ unsigned long flags;
+ char buf[ETH_ZLEN];
+ char *data = skb->data;
+
+ if (skb->len < ETH_ZLEN) {
+ memset(buf, 0, ETH_ZLEN); /* more efficient than doing just the needed bits */
+ memcpy(buf, data, skb->len);
+ send_length = ETH_ZLEN;
+ data = buf;
+ }
+
+ /* Mask interrupts from the ethercard.
+ SMP: We have to grab the lock here otherwise the IRQ handler
+ on another CPU can flip window and race the IRQ mask set. We end
+ up trashing the mcast filter not disabling irqs if we don't lock */
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ ei_outb_p(0x00, e8390_base + EN0_IMR);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+
+ /*
+ * Slow phase with lock held.
+ */
+
+ disable_irq_nosync_lockdep_irqsave(dev->irq, &flags);
+
+ spin_lock(&ei_local->page_lock);
+
+ ei_local->irqlock = 1;
+
+ /*
+ * We have two Tx slots available for use. Find the first free
+ * slot, and then perform some sanity checks. With two Tx bufs,
+ * you get very close to transmitting back-to-back packets. With
+ * only one Tx buf, the transmitter sits idle while you reload the
+ * card, leaving a substantial gap between each transmitted packet.
+ */
+
+ if (ei_local->tx1 == 0) {
+ output_page = ei_local->tx_start_page;
+ ei_local->tx1 = send_length;
+ if (ei_debug && ei_local->tx2 > 0)
+ netdev_dbg(dev, "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
+ ei_local->tx2, ei_local->lasttx, ei_local->txing);
+ } else if (ei_local->tx2 == 0) {
+ output_page = ei_local->tx_start_page + TX_PAGES/2;
+ ei_local->tx2 = send_length;
+ if (ei_debug && ei_local->tx1 > 0)
+ netdev_dbg(dev, "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
+ ei_local->tx1, ei_local->lasttx, ei_local->txing);
+ } else { /* We should never get here. */
+ if (ei_debug)
+ netdev_dbg(dev, "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
+ ei_local->tx1, ei_local->tx2, ei_local->lasttx);
+ ei_local->irqlock = 0;
+ netif_stop_queue(dev);
+ ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ spin_unlock(&ei_local->page_lock);
+ enable_irq_lockdep_irqrestore(dev->irq, &flags);
+ dev->stats.tx_errors++;
+ return NETDEV_TX_BUSY;
+ }
+
+ /*
+ * Okay, now upload the packet and trigger a send if the transmitter
+ * isn't already sending. If it is busy, the interrupt handler will
+ * trigger the send later, upon receiving a Tx done interrupt.
+ */
+
+ ei_block_output(dev, send_length, data, output_page);
+
+ if (!ei_local->txing) {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, send_length, output_page);
+ if (output_page == ei_local->tx_start_page) {
+ ei_local->tx1 = -1;
+ ei_local->lasttx = -1;
+ } else {
+ ei_local->tx2 = -1;
+ ei_local->lasttx = -2;
+ }
+ } else
+ ei_local->txqueue++;
+
+ if (ei_local->tx1 && ei_local->tx2)
+ netif_stop_queue(dev);
+ else
+ netif_start_queue(dev);
+
+ /* Turn 8390 interrupts back on. */
+ ei_local->irqlock = 0;
+ ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+
+ spin_unlock(&ei_local->page_lock);
+ enable_irq_lockdep_irqrestore(dev->irq, &flags);
+ skb_tx_timestamp(skb);
+ dev_kfree_skb(skb);
+ dev->stats.tx_bytes += send_length;
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ei_interrupt - handle the interrupts from an 8390
+ * @irq: interrupt number
+ * @dev_id: a pointer to the net_device
+ *
+ * Handle the ether interface interrupts. We pull packets from
+ * the 8390 via the card specific functions and fire them at the networking
+ * stack. We also handle transmit completions and wake the transmit path if
+ * necessary. We also update the counters and do other housekeeping as
+ * needed.
+ */
+
+static irqreturn_t __ei_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ unsigned long e8390_base = dev->base_addr;
+ int interrupts, nr_serviced = 0;
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ /*
+ * Protect the irq test too.
+ */
+
+ spin_lock(&ei_local->page_lock);
+
+ if (ei_local->irqlock) {
+ /*
+ * This might just be an interrupt for a PCI device sharing
+ * this line
+ */
+ netdev_err(dev, "Interrupted while interrupts are masked! isr=%#2x imr=%#2x\n",
+ ei_inb_p(e8390_base + EN0_ISR),
+ ei_inb_p(e8390_base + EN0_IMR));
+ spin_unlock(&ei_local->page_lock);
+ return IRQ_NONE;
+ }
+
+ /* Change to page 0 and read the intr status reg. */
+ ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
+ if (ei_debug > 3)
+ netdev_dbg(dev, "interrupt(isr=%#2.2x)\n",
+ ei_inb_p(e8390_base + EN0_ISR));
+
+ /* !!Assumption!! -- we stay in page 0. Don't break this. */
+ while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
+ ++nr_serviced < MAX_SERVICE) {
+ if (!netif_running(dev)) {
+ netdev_warn(dev, "interrupt from stopped card\n");
+ /* rmk - acknowledge the interrupts */
+ ei_outb_p(interrupts, e8390_base + EN0_ISR);
+ interrupts = 0;
+ break;
+ }
+ if (interrupts & ENISR_OVER)
+ ei_rx_overrun(dev);
+ else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) {
+ /* Got a good (?) packet. */
+ ei_receive(dev);
+ }
+ /* Push the next to-transmit packet through. */
+ if (interrupts & ENISR_TX)
+ ei_tx_intr(dev);
+ else if (interrupts & ENISR_TX_ERR)
+ ei_tx_err(dev);
+
+ if (interrupts & ENISR_COUNTERS) {
+ dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0);
+ dev->stats.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1);
+ dev->stats.rx_missed_errors += ei_inb_p(e8390_base + EN0_COUNTER2);
+ ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
+ }
+
+ /* Ignore any RDC interrupts that make it back to here. */
+ if (interrupts & ENISR_RDC)
+ ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR);
+
+ ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
+ }
+
+ if (interrupts && ei_debug) {
+ ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
+ if (nr_serviced >= MAX_SERVICE) {
+ /* 0xFF is valid for a card removal */
+ if (interrupts != 0xFF)
+ netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
+ interrupts);
+ ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
+ } else {
+ netdev_warn(dev, "unknown interrupt %#2x\n", interrupts);
+ ei_outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
+ }
+ }
+ spin_unlock(&ei_local->page_lock);
+ return IRQ_RETVAL(nr_serviced > 0);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void __ei_poll(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ __ei_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+/**
+ * ei_tx_err - handle transmitter error
+ * @dev: network device which threw the exception
+ *
+ * A transmitter error has happened. Most likely excess collisions (which
+ * is a fairly normal condition). If the error is one where the Tx will
+ * have been aborted, we try and send another one right away, instead of
+ * letting the failed packet sit and collect dust in the Tx buffer. This
+ * is a much better solution as it avoids kernel based Tx timeouts, and
+ * an unnecessary card reset.
+ *
+ * Called with lock held.
+ */
+
+static void ei_tx_err(struct net_device *dev)
+{
+ unsigned long e8390_base = dev->base_addr;
+ /* ei_local is used on some platforms via the EI_SHIFT macro */
+ struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
+ unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR);
+ unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
+
+#ifdef VERBOSE_ERROR_DUMP
+ netdev_dbg(dev, "transmitter error (%#2x):", txsr);
+ if (txsr & ENTSR_ABT)
+ pr_cont(" excess-collisions ");
+ if (txsr & ENTSR_ND)
+ pr_cont(" non-deferral ");
+ if (txsr & ENTSR_CRS)
+ pr_cont(" lost-carrier ");
+ if (txsr & ENTSR_FU)
+ pr_cont(" FIFO-underrun ");
+ if (txsr & ENTSR_CDH)
+ pr_cont(" lost-heartbeat ");
+ pr_cont("\n");
+#endif
+
+ ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
+
+ if (tx_was_aborted)
+ ei_tx_intr(dev);
+ else {
+ dev->stats.tx_errors++;
+ if (txsr & ENTSR_CRS)
+ dev->stats.tx_carrier_errors++;
+ if (txsr & ENTSR_CDH)
+ dev->stats.tx_heartbeat_errors++;
+ if (txsr & ENTSR_OWC)
+ dev->stats.tx_window_errors++;
+ }
+}
+
+/**
+ * ei_tx_intr - transmit interrupt handler
+ * @dev: network device for which tx intr is handled
+ *
+ * We have finished a transmit: check for errors and then trigger the next
+ * packet to be sent. Called with lock held.
+ */
+
+static void ei_tx_intr(struct net_device *dev)
+{
+ unsigned long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = netdev_priv(dev);
+ int status = ei_inb(e8390_base + EN0_TSR);
+
+ ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
+
+ /*
+ * There are two Tx buffers, see which one finished, and trigger
+ * the send of another one if it exists.
+ */
+ ei_local->txqueue--;
+
+ if (ei_local->tx1 < 0) {
+ if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
+ pr_err("%s: bogus last_tx_buffer %d, tx1=%d\n",
+ ei_local->name, ei_local->lasttx, ei_local->tx1);
+ ei_local->tx1 = 0;
+ if (ei_local->tx2 > 0) {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
+ dev->trans_start = jiffies;
+ ei_local->tx2 = -1,
+ ei_local->lasttx = 2;
+ } else
+ ei_local->lasttx = 20, ei_local->txing = 0;
+ } else if (ei_local->tx2 < 0) {
+ if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
+ pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n",
+ ei_local->name, ei_local->lasttx, ei_local->tx2);
+ ei_local->tx2 = 0;
+ if (ei_local->tx1 > 0) {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
+ dev->trans_start = jiffies;
+ ei_local->tx1 = -1;
+ ei_local->lasttx = 1;
+ } else
+ ei_local->lasttx = 10, ei_local->txing = 0;
+ } /* else
+ netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
+ ei_local->lasttx);
+*/
+
+ /* Minimize Tx latency: update the statistics after we restart TXing. */
+ if (status & ENTSR_COL)
+ dev->stats.collisions++;
+ if (status & ENTSR_PTX)
+ dev->stats.tx_packets++;
+ else {
+ dev->stats.tx_errors++;
+ if (status & ENTSR_ABT) {
+ dev->stats.tx_aborted_errors++;
+ dev->stats.collisions += 16;
+ }
+ if (status & ENTSR_CRS)
+ dev->stats.tx_carrier_errors++;
+ if (status & ENTSR_FU)
+ dev->stats.tx_fifo_errors++;
+ if (status & ENTSR_CDH)
+ dev->stats.tx_heartbeat_errors++;
+ if (status & ENTSR_OWC)
+ dev->stats.tx_window_errors++;
+ }
+ netif_wake_queue(dev);
+}
+
+/**
+ * ei_receive - receive some packets
+ * @dev: network device with which receive will be run
+ *
+ * We have a good packet(s), get it/them out of the buffers.
+ * Called with lock held.
+ */
+
+static void ei_receive(struct net_device *dev)
+{
+ unsigned long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = netdev_priv(dev);
+ unsigned char rxing_page, this_frame, next_frame;
+ unsigned short current_offset;
+ int rx_pkt_count = 0;
+ struct e8390_pkt_hdr rx_frame;
+ int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
+
+ while (++rx_pkt_count < 10) {
+ int pkt_len, pkt_stat;
+
+ /* Get the rx page (incoming packet pointer). */
+ ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
+ rxing_page = ei_inb_p(e8390_base + EN1_CURPAG);
+ ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
+
+ /* Remove one frame from the ring. Boundary is always a page behind. */
+ this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1;
+ if (this_frame >= ei_local->stop_page)
+ this_frame = ei_local->rx_start_page;
+
+ /* Someday we'll omit the previous, iff we never get this message.
+ (There is at least one clone claimed to have a problem.)
+
+ Keep quiet if it looks like a card removal. One problem here
+ is that some clones crash in roughly the same way.
+ */
+ if (ei_debug > 0 &&
+ this_frame != ei_local->current_page &&
+ (this_frame != 0x0 || rxing_page != 0xFF))
+ netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
+ this_frame, ei_local->current_page);
+
+ if (this_frame == rxing_page) /* Read all the frames? */
+ break; /* Done for now */
+
+ current_offset = this_frame << 8;
+ ei_get_8390_hdr(dev, &rx_frame, this_frame);
+
+ pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
+ pkt_stat = rx_frame.status;
+
+ next_frame = this_frame + 1 + ((pkt_len+4)>>8);
+
+ /* Check for bogosity warned by 3c503 book: the status byte is never
+ written. This happened a lot during testing! This code should be
+ cleaned up someday. */
+ if (rx_frame.next != next_frame &&
+ rx_frame.next != next_frame + 1 &&
+ rx_frame.next != next_frame - num_rx_pages &&
+ rx_frame.next != next_frame + 1 - num_rx_pages) {
+ ei_local->current_page = rxing_page;
+ ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
+ dev->stats.rx_errors++;
+ continue;
+ }
+
+ if (pkt_len < 60 || pkt_len > 1518) {
+ if (ei_debug)
+ netdev_dbg(dev, "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
+ rx_frame.count, rx_frame.status,
+ rx_frame.next);
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
+ } else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) {
+ if (ei_debug > 1)
+ netdev_dbg(dev, "Couldn't allocate a sk_buff of size %d\n",
+ pkt_len);
+ dev->stats.rx_dropped++;
+ break;
+ } else {
+ skb_reserve(skb, 2); /* IP headers on 16 byte boundaries */
+ skb_put(skb, pkt_len); /* Make room */
+ ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
+ skb->protocol = eth_type_trans(skb, dev);
+ if (!skb_defer_rx_timestamp(skb))
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ if (pkt_stat & ENRSR_PHY)
+ dev->stats.multicast++;
+ }
+ } else {
+ if (ei_debug)
+ netdev_dbg(dev, "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+ rx_frame.status, rx_frame.next,
+ rx_frame.count);
+ dev->stats.rx_errors++;
+ /* NB: The NIC counts CRC, frame and missed errors. */
+ if (pkt_stat & ENRSR_FO)
+ dev->stats.rx_fifo_errors++;
+ }
+ next_frame = rx_frame.next;
+
+ /* This _should_ never happen: it's here for avoiding bad clones. */
+ if (next_frame >= ei_local->stop_page) {
+ netdev_notice(dev, "next frame inconsistency, %#2x\n",
+ next_frame);
+ next_frame = ei_local->rx_start_page;
+ }
+ ei_local->current_page = next_frame;
+ ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
+ }
+
+ /* We used to also ack ENISR_OVER here, but that would sometimes mask
+ a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
+ ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
+}
+
+/**
+ * ei_rx_overrun - handle receiver overrun
+ * @dev: network device which threw exception
+ *
+ * We have a receiver overrun: we have to kick the 8390 to get it started
+ * again. Problem is that you have to kick it exactly as NS prescribes in
+ * the updated datasheets, or "the NIC may act in an unpredictable manner."
+ * This includes causing "the NIC to defer indefinitely when it is stopped
+ * on a busy network." Ugh.
+ * Called with lock held. Don't call this with the interrupts off or your
+ * computer will hate you - it takes 10ms or so.
+ */
+
+static void ei_rx_overrun(struct net_device *dev)
+{
+ unsigned long e8390_base = dev->base_addr;
+ unsigned char was_txing, must_resend = 0;
+ /* ei_local is used on some platforms via the EI_SHIFT macro */
+ struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
+
+ /*
+ * Record whether a Tx was in progress and then issue the
+ * stop command.
+ */
+ was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
+ ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
+
+ if (ei_debug > 1)
+ netdev_dbg(dev, "Receiver overrun\n");
+ dev->stats.rx_over_errors++;
+
+ /*
+ * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
+ * Early datasheets said to poll the reset bit, but now they say that
+ * it "is not a reliable indicator and subsequently should be ignored."
+ * We wait at least 10ms.
+ */
+
+ mdelay(10);
+
+ /*
+ * Reset RBCR[01] back to zero as per magic incantation.
+ */
+ ei_outb_p(0x00, e8390_base+EN0_RCNTLO);
+ ei_outb_p(0x00, e8390_base+EN0_RCNTHI);
+
+ /*
+ * See if any Tx was interrupted or not. According to NS, this
+ * step is vital, and skipping it will cause no end of havoc.
+ */
+
+ if (was_txing) {
+ unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
+ if (!tx_completed)
+ must_resend = 1;
+ }
+
+ /*
+ * Have to enter loopback mode and then restart the NIC before
+ * you are allowed to slurp packets up off the ring.
+ */
+ ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
+ ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
+
+ /*
+ * Clear the Rx ring of all the debris, and ack the interrupt.
+ */
+ ei_receive(dev);
+ ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR);
+
+ /*
+ * Leave loopback mode, and resend any packet that got stopped.
+ */
+ ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
+ if (must_resend)
+ ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
+}
+
+/*
+ * Collect the stats. This is called unlocked and from several contexts.
+ */
+
+static struct net_device_stats *__ei_get_stats(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct ei_device *ei_local = netdev_priv(dev);
+ unsigned long flags;
+
+ /* If the card is stopped, just return the present stats. */
+ if (!netif_running(dev))
+ return &dev->stats;
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ /* Read the counter registers, assuming we are in page 0. */
+ dev->stats.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0);
+ dev->stats.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1);
+ dev->stats.rx_missed_errors += ei_inb_p(ioaddr + EN0_COUNTER2);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+ return &dev->stats;
+}
+
+/*
+ * Form the 64 bit 8390 multicast table from the linked list of addresses
+ * associated with this dev structure.
+ */
+
+static inline void make_mc_bits(u8 *bits, struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 crc = ether_crc(ETH_ALEN, ha->addr);
+ /*
+ * The 8390 uses the 6 most significant bits of the
+ * CRC to index the multicast table.
+ */
+ bits[crc>>29] |= (1<<((crc>>26)&7));
+ }
+}
+
+/**
+ * do_set_multicast_list - set/clear multicast filter
+ * @dev: net device for which multicast filter is adjusted
+ *
+ * Set or clear the multicast filter for this adaptor. May be called
+ * from a BH in 2.1.x. Must be called with lock held.
+ */
+
+static void do_set_multicast_list(struct net_device *dev)
+{
+ unsigned long e8390_base = dev->base_addr;
+ int i;
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
+ memset(ei_local->mcfilter, 0, 8);
+ if (!netdev_mc_empty(dev))
+ make_mc_bits(ei_local->mcfilter, dev);
+ } else
+ memset(ei_local->mcfilter, 0xFF, 8); /* mcast set to accept-all */
+
+ /*
+ * DP8390 manuals don't specify any magic sequence for altering
+ * the multicast regs on an already running card. To be safe, we
+ * ensure multicast mode is off prior to loading up the new hash
+ * table. If this proves to be not enough, we can always resort
+ * to stopping the NIC, loading the table and then restarting.
+ *
+ * Bug Alert! The MC regs on the SMC 83C690 (SMC Elite and SMC
+ * Elite16) appear to be write-only. The NS 8390 data sheet lists
+ * them as r/w so this is a bug. The SMC 83C790 (SMC Ultra and
+ * Ultra32 EISA) appears to have this bug fixed.
+ */
+
+ if (netif_running(dev))
+ ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
+ ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
+ for (i = 0; i < 8; i++) {
+ ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
+#ifndef BUG_83C690
+ if (ei_inb_p(e8390_base + EN1_MULT_SHIFT(i)) != ei_local->mcfilter[i])
+ netdev_err(dev, "Multicast filter read/write mismap %d\n",
+ i);
+#endif
+ }
+ ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
+
+ if (dev->flags&IFF_PROMISC)
+ ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
+ else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
+ ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
+ else
+ ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
+}
+
+/*
+ * Called without lock held. This is invoked from user context and may
+ * be parallel to just about everything else. Its also fairly quick and
+ * not called too often. Must protect against both bh and irq users
+ */
+
+static void __ei_set_multicast_list(struct net_device *dev)
+{
+ unsigned long flags;
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ do_set_multicast_list(dev);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+}
+
+/**
+ * ethdev_setup - init rest of 8390 device struct
+ * @dev: network device structure to init
+ *
+ * Initialize the rest of the 8390 device structure. Do NOT __init
+ * this, as it is used by 8390 based modular drivers too.
+ */
+
+static void ethdev_setup(struct net_device *dev)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ if (ei_debug > 1)
+ printk(version);
+
+ ether_setup(dev);
+
+ spin_lock_init(&ei_local->page_lock);
+}
+
+/**
+ * alloc_ei_netdev - alloc_etherdev counterpart for 8390
+ * @size: extra bytes to allocate
+ *
+ * Allocate 8390-specific net_device.
+ */
+static struct net_device *____alloc_ei_netdev(int size)
+{
+ return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
+ ethdev_setup);
+}
+
+
+
+
+/* This page of functions should be 8390 generic */
+/* Follow National Semi's recommendations for initializing the "NIC". */
+
+/**
+ * NS8390_init - initialize 8390 hardware
+ * @dev: network device to initialize
+ * @startp: boolean. non-zero value to initiate chip processing
+ *
+ * Must be called with lock held.
+ */
+
+static void __NS8390_init(struct net_device *dev, int startp)
+{
+ unsigned long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = netdev_priv(dev);
+ int i;
+ int endcfg = ei_local->word16
+ ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
+ : 0x48;
+
+ if (sizeof(struct e8390_pkt_hdr) != 4)
+ panic("8390.c: header struct mispacked\n");
+ /* Follow National Semi's recommendations for initing the DP83902. */
+ ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
+ ei_outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
+ /* Clear the remote byte count registers. */
+ ei_outb_p(0x00, e8390_base + EN0_RCNTLO);
+ ei_outb_p(0x00, e8390_base + EN0_RCNTHI);
+ /* Set to monitor and loopback mode -- this is vital!. */
+ ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
+ ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
+ /* Set the transmit page and receive ring. */
+ ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
+ ei_local->tx1 = ei_local->tx2 = 0;
+ ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
+ ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/
+ ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */
+ ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
+ /* Clear the pending interrupts and mask. */
+ ei_outb_p(0xFF, e8390_base + EN0_ISR);
+ ei_outb_p(0x00, e8390_base + EN0_IMR);
+
+ /* Copy the station address into the DS8390 registers. */
+
+ ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
+ for (i = 0; i < 6; i++) {
+ ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
+ if (ei_debug > 1 &&
+ ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i])
+ netdev_err(dev, "Hw. address read/write mismap %d\n", i);
+ }
+
+ ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
+ ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
+
+ ei_local->tx1 = ei_local->tx2 = 0;
+ ei_local->txing = 0;
+
+ if (startp) {
+ ei_outb_p(0xff, e8390_base + EN0_ISR);
+ ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
+ ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
+ /* 3c503 TechMan says rxconfig only after the NIC is started. */
+ ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on, */
+ do_set_multicast_list(dev); /* (re)load the mcast table */
+ }
+}
+
+/* Trigger a transmit start, assuming the length is valid.
+ Always called with the page lock held */
+
+static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
+ int start_page)
+{
+ unsigned long e8390_base = dev->base_addr;
+ struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
+
+ ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
+
+ if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS) {
+ netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
+ return;
+ }
+ ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
+ ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI);
+ ei_outb_p(start_page, e8390_base + EN0_TPSR);
+ ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
+}
diff --git a/drivers/net/ethernet/8390/lne390.c b/drivers/net/ethernet/8390/lne390.c
new file mode 100644
index 000000000000..f9888d20177b
--- /dev/null
+++ b/drivers/net/ethernet/8390/lne390.c
@@ -0,0 +1,434 @@
+/*
+ lne390.c
+
+ Linux driver for Mylex LNE390 EISA Network Adapter
+
+ Copyright (C) 1996-1998, Paul Gortmaker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Information and Code Sources:
+
+ 1) Based upon framework of es3210 driver.
+ 2) The existing myriad of other Linux 8390 drivers by Donald Becker.
+ 3) Russ Nelson's asm packet driver provided additional info.
+ 4) Info for getting IRQ and sh-mem gleaned from the EISA cfg files.
+
+ The LNE390 is an EISA shared memory NS8390 implementation. Note
+ that all memory copies to/from the board must be 32bit transfers.
+ There are two versions of the card: the lne390a and the lne390b.
+ Going by the EISA cfg files, the "a" has jumpers to select between
+ BNC/AUI, but the "b" also has RJ-45 and selection is via the SCU.
+ The shared memory address selection is also slightly different.
+ Note that shared memory address > 1MB are supported with this driver.
+
+ You can try <http://www.mylex.com> if you want more info, as I've
+ never even seen one of these cards. :)
+
+ Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 2000/09/01
+ - get rid of check_region
+ - no need to check if dev == NULL in lne390_probe1
+*/
+
+static const char *version =
+ "lne390.c: Driver revision v0.99.1, 01/09/2000\n";
+
+#include <linux/module.h>
+#include <linux/eisa.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "8390.h"
+
+#define DRV_NAME "lne390"
+
+static int lne390_probe1(struct net_device *dev, int ioaddr);
+
+static void lne390_reset_8390(struct net_device *dev);
+
+static void lne390_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page);
+static void lne390_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset);
+static void lne390_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page);
+
+#define LNE390_START_PG 0x00 /* First page of TX buffer */
+#define LNE390_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+#define LNE390_ID_PORT 0xc80 /* Same for all EISA cards */
+#define LNE390_IO_EXTENT 0x20
+#define LNE390_SA_PROM 0x16 /* Start of e'net addr. */
+#define LNE390_RESET_PORT 0xc84 /* From the pkt driver source */
+#define LNE390_NIC_OFFSET 0x00 /* Hello, the 8390 is *here* */
+
+#define LNE390_ADDR0 0x00 /* 3 byte vendor prefix */
+#define LNE390_ADDR1 0x80
+#define LNE390_ADDR2 0xe5
+
+#define LNE390_ID0 0x10009835 /* 0x3598 = 01101 01100 11000 = mlx */
+#define LNE390_ID1 0x11009835 /* above is the 390A, this is 390B */
+
+#define LNE390_CFG1 0xc84 /* NB: 0xc84 is also "reset" port. */
+#define LNE390_CFG2 0xc90
+
+/*
+ * You can OR any of the following bits together and assign it
+ * to LNE390_DEBUG to get verbose driver info during operation.
+ * Currently only the probe one is implemented.
+ */
+
+#define LNE390_D_PROBE 0x01
+#define LNE390_D_RX_PKT 0x02
+#define LNE390_D_TX_PKT 0x04
+#define LNE390_D_IRQ 0x08
+
+#define LNE390_DEBUG 0
+
+static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
+static unsigned int shmem_mapA[] __initdata = {0xff, 0xfe, 0xfd, 0xfff, 0xffe, 0xffc, 0x0d, 0x0};
+static unsigned int shmem_mapB[] __initdata = {0xff, 0xfe, 0x0e, 0xfff, 0xffe, 0xffc, 0x0d, 0x0};
+
+/*
+ * Probe for the card. The best way is to read the EISA ID if it
+ * is known. Then we can check the prefix of the station address
+ * PROM for a match against the value assigned to Mylex.
+ */
+
+static int __init do_lne390_probe(struct net_device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+ int irq = dev->irq;
+ int mem_start = dev->mem_start;
+ int ret;
+
+ if (ioaddr > 0x1ff) { /* Check a single specified location. */
+ if (!request_region(ioaddr, LNE390_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+ ret = lne390_probe1(dev, ioaddr);
+ if (ret)
+ release_region(ioaddr, LNE390_IO_EXTENT);
+ return ret;
+ }
+ else if (ioaddr > 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ if (!EISA_bus) {
+#if LNE390_DEBUG & LNE390_D_PROBE
+ printk("lne390-debug: Not an EISA bus. Not probing high ports.\n");
+#endif
+ return -ENXIO;
+ }
+
+ /* EISA spec allows for up to 16 slots, but 8 is typical. */
+ for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
+ if (!request_region(ioaddr, LNE390_IO_EXTENT, DRV_NAME))
+ continue;
+ if (lne390_probe1(dev, ioaddr) == 0)
+ return 0;
+ release_region(ioaddr, LNE390_IO_EXTENT);
+ dev->irq = irq;
+ dev->mem_start = mem_start;
+ }
+
+ return -ENODEV;
+}
+
+#ifndef MODULE
+struct net_device * __init lne390_probe(int unit)
+{
+ struct net_device *dev = alloc_ei_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_lne390_probe(dev);
+ if (err)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static int __init lne390_probe1(struct net_device *dev, int ioaddr)
+{
+ int i, revision, ret;
+ unsigned long eisa_id;
+
+ if (inb_p(ioaddr + LNE390_ID_PORT) == 0xff) return -ENODEV;
+
+#if LNE390_DEBUG & LNE390_D_PROBE
+ printk("lne390-debug: probe at %#x, ID %#8x\n", ioaddr, inl(ioaddr + LNE390_ID_PORT));
+ printk("lne390-debug: config regs: %#x %#x\n",
+ inb(ioaddr + LNE390_CFG1), inb(ioaddr + LNE390_CFG2));
+#endif
+
+
+/* Check the EISA ID of the card. */
+ eisa_id = inl(ioaddr + LNE390_ID_PORT);
+ if ((eisa_id != LNE390_ID0) && (eisa_id != LNE390_ID1)) {
+ return -ENODEV;
+ }
+
+ revision = (eisa_id >> 24) & 0x01; /* 0 = rev A, 1 rev B */
+
+#if 0
+/* Check the Mylex vendor ID as well. Not really required. */
+ if (inb(ioaddr + LNE390_SA_PROM + 0) != LNE390_ADDR0
+ || inb(ioaddr + LNE390_SA_PROM + 1) != LNE390_ADDR1
+ || inb(ioaddr + LNE390_SA_PROM + 2) != LNE390_ADDR2 ) {
+ printk("lne390.c: card not found");
+ for(i = 0; i < ETHER_ADDR_LEN; i++)
+ printk(" %02x", inb(ioaddr + LNE390_SA_PROM + i));
+ printk(" (invalid prefix).\n");
+ return -ENODEV;
+ }
+#endif
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++)
+ dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i);
+ printk("lne390.c: LNE390%X in EISA slot %d, address %pM.\n",
+ 0xa+revision, ioaddr/0x1000, dev->dev_addr);
+
+ printk("lne390.c: ");
+
+ /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */
+ if (dev->irq == 0) {
+ unsigned char irq_reg = inb(ioaddr + LNE390_CFG2) >> 3;
+ dev->irq = irq_map[irq_reg & 0x07];
+ printk("using");
+ } else {
+ /* This is useless unless we reprogram the card here too */
+ if (dev->irq == 2) dev->irq = 9; /* Doh! */
+ printk("assigning");
+ }
+ printk(" IRQ %d,", dev->irq);
+
+ if ((ret = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev))) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ return ret;
+ }
+
+ if (dev->mem_start == 0) {
+ unsigned char mem_reg = inb(ioaddr + LNE390_CFG2) & 0x07;
+
+ if (revision) /* LNE390B */
+ dev->mem_start = shmem_mapB[mem_reg] * 0x10000;
+ else /* LNE390A */
+ dev->mem_start = shmem_mapA[mem_reg] * 0x10000;
+ printk(" using ");
+ } else {
+ /* Should check for value in shmem_map and reprogram the card to use it */
+ dev->mem_start &= 0xfff0000;
+ printk(" assigning ");
+ }
+
+ printk("%dkB memory at physical address %#lx\n",
+ LNE390_STOP_PG/4, dev->mem_start);
+
+ /*
+ BEWARE!! Some dain-bramaged EISA SCUs will allow you to put
+ the card mem within the region covered by `normal' RAM !!!
+
+ ioremap() will fail in that case.
+ */
+ ei_status.mem = ioremap(dev->mem_start, LNE390_STOP_PG*0x100);
+ if (!ei_status.mem) {
+ printk(KERN_ERR "lne390.c: Unable to remap card memory above 1MB !!\n");
+ printk(KERN_ERR "lne390.c: Try using EISA SCU to set memory below 1MB.\n");
+ printk(KERN_ERR "lne390.c: Driver NOT installed.\n");
+ ret = -EAGAIN;
+ goto cleanup;
+ }
+ printk("lne390.c: remapped %dkB card memory to virtual address %p\n",
+ LNE390_STOP_PG/4, ei_status.mem);
+
+ dev->mem_start = (unsigned long)ei_status.mem;
+ dev->mem_end = dev->mem_start + (LNE390_STOP_PG - LNE390_START_PG)*256;
+
+ /* The 8390 offset is zero for the LNE390 */
+ dev->base_addr = ioaddr;
+
+ ei_status.name = "LNE390";
+ ei_status.tx_start_page = LNE390_START_PG;
+ ei_status.rx_start_page = LNE390_START_PG + TX_PAGES;
+ ei_status.stop_page = LNE390_STOP_PG;
+ ei_status.word16 = 1;
+
+ if (ei_debug > 0)
+ printk(version);
+
+ ei_status.reset_8390 = &lne390_reset_8390;
+ ei_status.block_input = &lne390_block_input;
+ ei_status.block_output = &lne390_block_output;
+ ei_status.get_8390_hdr = &lne390_get_8390_hdr;
+
+ dev->netdev_ops = &ei_netdev_ops;
+ NS8390_init(dev, 0);
+
+ ret = register_netdev(dev);
+ if (ret)
+ goto unmap;
+ return 0;
+unmap:
+ if (ei_status.reg0)
+ iounmap(ei_status.mem);
+cleanup:
+ free_irq(dev->irq, dev);
+ return ret;
+}
+
+/*
+ * Reset as per the packet driver method. Judging by the EISA cfg
+ * file, this just toggles the "Board Enable" bits (bit 2 and 0).
+ */
+
+static void lne390_reset_8390(struct net_device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+
+ outb(0x04, ioaddr + LNE390_RESET_PORT);
+ if (ei_debug > 1) printk("%s: resetting the LNE390...", dev->name);
+
+ mdelay(2);
+
+ ei_status.txing = 0;
+ outb(0x01, ioaddr + LNE390_RESET_PORT);
+ if (ei_debug > 1) printk("reset done\n");
+}
+
+/*
+ * Note: In the following three functions is the implicit assumption
+ * that the associated memcpy will only use "rep; movsl" as long as
+ * we keep the counts as some multiple of doublewords. This is a
+ * requirement of the hardware, and also prevents us from using
+ * eth_io_copy_and_sum() since we can't guarantee it will limit
+ * itself to doubleword access.
+ */
+
+/*
+ * Grab the 8390 specific header. Similar to the block_input routine, but
+ * we don't need to be concerned with ring wrap as the header will be at
+ * the start of a page, so we optimize accordingly. (A single doubleword.)
+ */
+
+static void
+lne390_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ void __iomem *hdr_start = ei_status.mem + ((ring_page - LNE390_START_PG)<<8);
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+ hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */
+}
+
+/*
+ * Block input and output are easy on shared memory ethercards, the only
+ * complication is when the ring buffer wraps. The count will already
+ * be rounded up to a doubleword value via lne390_get_8390_hdr() above.
+ */
+
+static void lne390_block_input(struct net_device *dev, int count, struct sk_buff *skb,
+ int ring_offset)
+{
+ void __iomem *xfer_start = ei_status.mem + ring_offset - (LNE390_START_PG<<8);
+
+ if (ring_offset + count > (LNE390_STOP_PG<<8)) {
+ /* Packet wraps over end of ring buffer. */
+ int semi_count = (LNE390_STOP_PG<<8) - ring_offset;
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count,
+ ei_status.mem + (TX_PAGES<<8), count);
+ } else {
+ /* Packet is in one chunk. */
+ memcpy_fromio(skb->data, xfer_start, count);
+ }
+}
+
+static void lne390_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ void __iomem *shmem = ei_status.mem + ((start_page - LNE390_START_PG)<<8);
+
+ count = (count + 3) & ~3; /* Round up to doubleword */
+ memcpy_toio(shmem, buf, count);
+}
+
+
+#ifdef MODULE
+#define MAX_LNE_CARDS 4 /* Max number of LNE390 cards per module */
+static struct net_device *dev_lne[MAX_LNE_CARDS];
+static int io[MAX_LNE_CARDS];
+static int irq[MAX_LNE_CARDS];
+static int mem[MAX_LNE_CARDS];
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(mem, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s)");
+MODULE_PARM_DESC(mem, "memory base address(es)");
+MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver");
+MODULE_LICENSE("GPL");
+
+int __init init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_LNE_CARDS; this_dev++) {
+ if (io[this_dev] == 0 && this_dev != 0)
+ break;
+ dev = alloc_ei_netdev();
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_start = mem[this_dev];
+ if (do_lne390_probe(dev) == 0) {
+ dev_lne[found++] = dev;
+ continue;
+ }
+ free_netdev(dev);
+ printk(KERN_WARNING "lne390.c: No LNE390 card found (i/o = 0x%x).\n", io[this_dev]);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, LNE390_IO_EXTENT);
+ iounmap(ei_status.mem);
+}
+
+void __exit cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_LNE_CARDS; this_dev++) {
+ struct net_device *dev = dev_lne[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
new file mode 100644
index 000000000000..af5d9822cad9
--- /dev/null
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -0,0 +1,874 @@
+/* mac8390.c: New driver for 8390-based Nubus (or Nubus-alike)
+ Ethernet cards on Linux */
+/* Based on the former daynaport.c driver, by Alan Cox. Some code
+ taken from or inspired by skeleton.c by Donald Becker, acenic.c by
+ Jes Sorensen, and ne2k-pci.c by Donald Becker and Paul Gortmaker.
+
+ This software may be used and distributed according to the terms of
+ the GNU Public License, incorporated herein by reference. */
+
+/* 2000-02-28: support added for Dayna and Kinetics cards by
+ A.G.deWijn@phys.uu.nl */
+/* 2000-04-04: support added for Dayna2 by bart@etpmod.phys.tue.nl */
+/* 2001-04-18: support for DaynaPort E/LC-M by rayk@knightsmanor.org */
+/* 2001-05-15: support for Cabletron ported from old daynaport driver
+ * and fixed access to Sonic Sys card which masquerades as a Farallon
+ * by rayk@knightsmanor.org */
+/* 2002-12-30: Try to support more cards, some clues from NetBSD driver */
+/* 2003-12-26: Make sure Asante cards always work. */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/nubus.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/hwtest.h>
+#include <asm/macints.h>
+
+static char version[] =
+ "v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n";
+
+#define EI_SHIFT(x) (ei_local->reg_offset[x])
+#define ei_inb(port) in_8(port)
+#define ei_outb(val, port) out_8(port, val)
+#define ei_inb_p(port) in_8(port)
+#define ei_outb_p(val, port) out_8(port, val)
+
+#include "lib8390.c"
+
+#define WD_START_PG 0x00 /* First page of TX buffer */
+#define CABLETRON_RX_START_PG 0x00 /* First page of RX buffer */
+#define CABLETRON_RX_STOP_PG 0x30 /* Last page +1 of RX ring */
+#define CABLETRON_TX_START_PG CABLETRON_RX_STOP_PG
+ /* First page of TX buffer */
+
+/*
+ * Unfortunately it seems we have to hardcode these for the moment
+ * Shouldn't the card know about this?
+ * Does anyone know where to read it off the card?
+ * Do we trust the data provided by the card?
+ */
+
+#define DAYNA_8390_BASE 0x80000
+#define DAYNA_8390_MEM 0x00000
+
+#define CABLETRON_8390_BASE 0x90000
+#define CABLETRON_8390_MEM 0x00000
+
+#define INTERLAN_8390_BASE 0xE0000
+#define INTERLAN_8390_MEM 0xD0000
+
+enum mac8390_type {
+ MAC8390_NONE = -1,
+ MAC8390_APPLE,
+ MAC8390_ASANTE,
+ MAC8390_FARALLON,
+ MAC8390_CABLETRON,
+ MAC8390_DAYNA,
+ MAC8390_INTERLAN,
+ MAC8390_KINETICS,
+};
+
+static const char *cardname[] = {
+ "apple",
+ "asante",
+ "farallon",
+ "cabletron",
+ "dayna",
+ "interlan",
+ "kinetics",
+};
+
+static const int word16[] = {
+ 1, /* apple */
+ 1, /* asante */
+ 1, /* farallon */
+ 1, /* cabletron */
+ 0, /* dayna */
+ 1, /* interlan */
+ 0, /* kinetics */
+};
+
+/* on which cards do we use NuBus resources? */
+static const int useresources[] = {
+ 1, /* apple */
+ 1, /* asante */
+ 1, /* farallon */
+ 0, /* cabletron */
+ 0, /* dayna */
+ 0, /* interlan */
+ 0, /* kinetics */
+};
+
+enum mac8390_access {
+ ACCESS_UNKNOWN = 0,
+ ACCESS_32,
+ ACCESS_16,
+};
+
+extern int mac8390_memtest(struct net_device *dev);
+static int mac8390_initdev(struct net_device *dev, struct nubus_dev *ndev,
+ enum mac8390_type type);
+
+static int mac8390_open(struct net_device *dev);
+static int mac8390_close(struct net_device *dev);
+static void mac8390_no_reset(struct net_device *dev);
+static void interlan_reset(struct net_device *dev);
+
+/* Sane (32-bit chunk memory read/write) - Some Farallon and Apple do this*/
+static void sane_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page);
+static void sane_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void sane_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page);
+
+/* dayna_memcpy to and from card */
+static void dayna_memcpy_fromcard(struct net_device *dev, void *to,
+ int from, int count);
+static void dayna_memcpy_tocard(struct net_device *dev, int to,
+ const void *from, int count);
+
+/* Dayna - Dayna/Kinetics use this */
+static void dayna_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page);
+static void dayna_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void dayna_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page);
+
+#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
+#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
+
+#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
+
+/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
+static void slow_sane_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page);
+static void slow_sane_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void slow_sane_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page);
+static void word_memcpy_tocard(unsigned long tp, const void *fp, int count);
+static void word_memcpy_fromcard(void *tp, unsigned long fp, int count);
+
+static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
+{
+ switch (dev->dr_sw) {
+ case NUBUS_DRSW_3COM:
+ switch (dev->dr_hw) {
+ case NUBUS_DRHW_APPLE_SONIC_NB:
+ case NUBUS_DRHW_APPLE_SONIC_LC:
+ case NUBUS_DRHW_SONNET:
+ return MAC8390_NONE;
+ break;
+ default:
+ return MAC8390_APPLE;
+ break;
+ }
+ break;
+
+ case NUBUS_DRSW_APPLE:
+ switch (dev->dr_hw) {
+ case NUBUS_DRHW_ASANTE_LC:
+ return MAC8390_NONE;
+ break;
+ case NUBUS_DRHW_CABLETRON:
+ return MAC8390_CABLETRON;
+ break;
+ default:
+ return MAC8390_APPLE;
+ break;
+ }
+ break;
+
+ case NUBUS_DRSW_ASANTE:
+ return MAC8390_ASANTE;
+ break;
+
+ case NUBUS_DRSW_TECHWORKS:
+ case NUBUS_DRSW_DAYNA2:
+ case NUBUS_DRSW_DAYNA_LC:
+ if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
+ return MAC8390_CABLETRON;
+ else
+ return MAC8390_APPLE;
+ break;
+
+ case NUBUS_DRSW_FARALLON:
+ return MAC8390_FARALLON;
+ break;
+
+ case NUBUS_DRSW_KINETICS:
+ switch (dev->dr_hw) {
+ case NUBUS_DRHW_INTERLAN:
+ return MAC8390_INTERLAN;
+ break;
+ default:
+ return MAC8390_KINETICS;
+ break;
+ }
+ break;
+
+ case NUBUS_DRSW_DAYNA:
+ /*
+ * These correspond to Dayna Sonic cards
+ * which use the macsonic driver
+ */
+ if (dev->dr_hw == NUBUS_DRHW_SMC9194 ||
+ dev->dr_hw == NUBUS_DRHW_INTERLAN)
+ return MAC8390_NONE;
+ else
+ return MAC8390_DAYNA;
+ break;
+ }
+ return MAC8390_NONE;
+}
+
+static enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
+{
+ unsigned long outdata = 0xA5A0B5B0;
+ unsigned long indata = 0x00000000;
+ /* Try writing 32 bits */
+ memcpy_toio(membase, &outdata, 4);
+ /* Now compare them */
+ if (memcmp_withio(&outdata, membase, 4) == 0)
+ return ACCESS_32;
+ /* Write 16 bit output */
+ word_memcpy_tocard(membase, &outdata, 4);
+ /* Now read it back */
+ word_memcpy_fromcard(&indata, membase, 4);
+ if (outdata == indata)
+ return ACCESS_16;
+ return ACCESS_UNKNOWN;
+}
+
+static int __init mac8390_memsize(unsigned long membase)
+{
+ unsigned long flags;
+ int i, j;
+
+ local_irq_save(flags);
+ /* Check up to 32K in 4K increments */
+ for (i = 0; i < 8; i++) {
+ volatile unsigned short *m = (unsigned short *)(membase + (i * 0x1000));
+
+ /* Unwriteable - we have a fully decoded card and the
+ RAM end located */
+ if (hwreg_present(m) == 0)
+ break;
+
+ /* write a distinctive byte */
+ *m = 0xA5A0 | i;
+ /* check that we read back what we wrote */
+ if (*m != (0xA5A0 | i))
+ break;
+
+ /* check for partial decode and wrap */
+ for (j = 0; j < i; j++) {
+ volatile unsigned short *p = (unsigned short *)(membase + (j * 0x1000));
+ if (*p != (0xA5A0 | j))
+ break;
+ }
+ }
+ local_irq_restore(flags);
+ /*
+ * in any case, we stopped once we tried one block too many,
+ * or once we reached 32K
+ */
+ return i * 0x1000;
+}
+
+static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev,
+ enum mac8390_type cardtype)
+{
+ struct nubus_dir dir;
+ struct nubus_dirent ent;
+ int offset;
+ volatile unsigned short *i;
+
+ printk_once(KERN_INFO pr_fmt("%s"), version);
+
+ dev->irq = SLOT2IRQ(ndev->board->slot);
+ /* This is getting to be a habit */
+ dev->base_addr = (ndev->board->slot_addr |
+ ((ndev->board->slot & 0xf) << 20));
+
+ /*
+ * Get some Nubus info - we will trust the card's idea
+ * of where its memory and registers are.
+ */
+
+ if (nubus_get_func_dir(ndev, &dir) == -1) {
+ pr_err("%s: Unable to get Nubus functional directory for slot %X!\n",
+ dev->name, ndev->board->slot);
+ return false;
+ }
+
+ /* Get the MAC address */
+ if (nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent) == -1) {
+ pr_info("%s: Couldn't get MAC address!\n", dev->name);
+ return false;
+ }
+
+ nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
+
+ if (useresources[cardtype] == 1) {
+ nubus_rewinddir(&dir);
+ if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS,
+ &ent) == -1) {
+ pr_err("%s: Memory offset resource for slot %X not found!\n",
+ dev->name, ndev->board->slot);
+ return false;
+ }
+ nubus_get_rsrc_mem(&offset, &ent, 4);
+ dev->mem_start = dev->base_addr + offset;
+ /* yes, this is how the Apple driver does it */
+ dev->base_addr = dev->mem_start + 0x10000;
+ nubus_rewinddir(&dir);
+ if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH,
+ &ent) == -1) {
+ pr_info("%s: Memory length resource for slot %X not found, probing\n",
+ dev->name, ndev->board->slot);
+ offset = mac8390_memsize(dev->mem_start);
+ } else {
+ nubus_get_rsrc_mem(&offset, &ent, 4);
+ }
+ dev->mem_end = dev->mem_start + offset;
+ } else {
+ switch (cardtype) {
+ case MAC8390_KINETICS:
+ case MAC8390_DAYNA: /* it's the same */
+ dev->base_addr = (int)(ndev->board->slot_addr +
+ DAYNA_8390_BASE);
+ dev->mem_start = (int)(ndev->board->slot_addr +
+ DAYNA_8390_MEM);
+ dev->mem_end = dev->mem_start +
+ mac8390_memsize(dev->mem_start);
+ break;
+ case MAC8390_INTERLAN:
+ dev->base_addr = (int)(ndev->board->slot_addr +
+ INTERLAN_8390_BASE);
+ dev->mem_start = (int)(ndev->board->slot_addr +
+ INTERLAN_8390_MEM);
+ dev->mem_end = dev->mem_start +
+ mac8390_memsize(dev->mem_start);
+ break;
+ case MAC8390_CABLETRON:
+ dev->base_addr = (int)(ndev->board->slot_addr +
+ CABLETRON_8390_BASE);
+ dev->mem_start = (int)(ndev->board->slot_addr +
+ CABLETRON_8390_MEM);
+ /* The base address is unreadable if 0x00
+ * has been written to the command register
+ * Reset the chip by writing E8390_NODMA +
+ * E8390_PAGE0 + E8390_STOP just to be
+ * sure
+ */
+ i = (void *)dev->base_addr;
+ *i = 0x21;
+ dev->mem_end = dev->mem_start +
+ mac8390_memsize(dev->mem_start);
+ break;
+
+ default:
+ pr_err("Card type %s is unsupported, sorry\n",
+ ndev->board->name);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+struct net_device * __init mac8390_probe(int unit)
+{
+ struct net_device *dev;
+ struct nubus_dev *ndev = NULL;
+ int err = -ENODEV;
+
+ static unsigned int slots;
+
+ enum mac8390_type cardtype;
+
+ /* probably should check for Nubus instead */
+
+ if (!MACH_IS_MAC)
+ return ERR_PTR(-ENODEV);
+
+ dev = ____alloc_ei_netdev(0);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0)
+ sprintf(dev->name, "eth%d", unit);
+
+ while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET,
+ ndev))) {
+ /* Have we seen it already? */
+ if (slots & (1 << ndev->board->slot))
+ continue;
+ slots |= 1 << ndev->board->slot;
+
+ cardtype = mac8390_ident(ndev);
+ if (cardtype == MAC8390_NONE)
+ continue;
+
+ if (!mac8390_init(dev, ndev, cardtype))
+ continue;
+
+ /* Do the nasty 8390 stuff */
+ if (!mac8390_initdev(dev, ndev, cardtype))
+ break;
+ }
+
+ if (!ndev)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out;
+ return dev;
+
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+#ifdef MODULE
+MODULE_AUTHOR("David Huggins-Daines <dhd@debian.org> and others");
+MODULE_DESCRIPTION("Macintosh NS8390-based Nubus Ethernet driver");
+MODULE_LICENSE("GPL");
+
+/* overkill, of course */
+static struct net_device *dev_mac8390[15];
+int init_module(void)
+{
+ int i;
+ for (i = 0; i < 15; i++) {
+ struct net_device *dev = mac8390_probe(-1);
+ if (IS_ERR(dev))
+ break;
+ dev_mac890[i] = dev;
+ }
+ if (!i) {
+ pr_notice("No useable cards found, driver NOT installed.\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ int i;
+ for (i = 0; i < 15; i++) {
+ struct net_device *dev = dev_mac890[i];
+ if (dev) {
+ unregister_netdev(dev);
+ free_netdev(dev);
+ }
+ }
+}
+
+#endif /* MODULE */
+
+static const struct net_device_ops mac8390_netdev_ops = {
+ .ndo_open = mac8390_open,
+ .ndo_stop = mac8390_close,
+ .ndo_start_xmit = __ei_start_xmit,
+ .ndo_tx_timeout = __ei_tx_timeout,
+ .ndo_get_stats = __ei_get_stats,
+ .ndo_set_rx_mode = __ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = __ei_poll,
+#endif
+};
+
+static int __init mac8390_initdev(struct net_device *dev,
+ struct nubus_dev *ndev,
+ enum mac8390_type type)
+{
+ static u32 fwrd4_offsets[16] = {
+ 0, 4, 8, 12,
+ 16, 20, 24, 28,
+ 32, 36, 40, 44,
+ 48, 52, 56, 60
+ };
+ static u32 back4_offsets[16] = {
+ 60, 56, 52, 48,
+ 44, 40, 36, 32,
+ 28, 24, 20, 16,
+ 12, 8, 4, 0
+ };
+ static u32 fwrd2_offsets[16] = {
+ 0, 2, 4, 6,
+ 8, 10, 12, 14,
+ 16, 18, 20, 22,
+ 24, 26, 28, 30
+ };
+
+ int access_bitmode = 0;
+
+ /* Now fill in our stuff */
+ dev->netdev_ops = &mac8390_netdev_ops;
+
+ /* GAR, ei_status is actually a macro even though it looks global */
+ ei_status.name = cardname[type];
+ ei_status.word16 = word16[type];
+
+ /* Cabletron's TX/RX buffers are backwards */
+ if (type == MAC8390_CABLETRON) {
+ ei_status.tx_start_page = CABLETRON_TX_START_PG;
+ ei_status.rx_start_page = CABLETRON_RX_START_PG;
+ ei_status.stop_page = CABLETRON_RX_STOP_PG;
+ ei_status.rmem_start = dev->mem_start;
+ ei_status.rmem_end = dev->mem_start + CABLETRON_RX_STOP_PG*256;
+ } else {
+ ei_status.tx_start_page = WD_START_PG;
+ ei_status.rx_start_page = WD_START_PG + TX_PAGES;
+ ei_status.stop_page = (dev->mem_end - dev->mem_start)/256;
+ ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
+ ei_status.rmem_end = dev->mem_end;
+ }
+
+ /* Fill in model-specific information and functions */
+ switch (type) {
+ case MAC8390_FARALLON:
+ case MAC8390_APPLE:
+ switch (mac8390_testio(dev->mem_start)) {
+ case ACCESS_UNKNOWN:
+ pr_err("Don't know how to access card memory!\n");
+ return -ENODEV;
+ break;
+
+ case ACCESS_16:
+ /* 16 bit card, register map is reversed */
+ ei_status.reset_8390 = mac8390_no_reset;
+ ei_status.block_input = slow_sane_block_input;
+ ei_status.block_output = slow_sane_block_output;
+ ei_status.get_8390_hdr = slow_sane_get_8390_hdr;
+ ei_status.reg_offset = back4_offsets;
+ break;
+
+ case ACCESS_32:
+ /* 32 bit card, register map is reversed */
+ ei_status.reset_8390 = mac8390_no_reset;
+ ei_status.block_input = sane_block_input;
+ ei_status.block_output = sane_block_output;
+ ei_status.get_8390_hdr = sane_get_8390_hdr;
+ ei_status.reg_offset = back4_offsets;
+ access_bitmode = 1;
+ break;
+ }
+ break;
+
+ case MAC8390_ASANTE:
+ /* Some Asante cards pass the 32 bit test
+ * but overwrite system memory when run at 32 bit.
+ * so we run them all at 16 bit.
+ */
+ ei_status.reset_8390 = mac8390_no_reset;
+ ei_status.block_input = slow_sane_block_input;
+ ei_status.block_output = slow_sane_block_output;
+ ei_status.get_8390_hdr = slow_sane_get_8390_hdr;
+ ei_status.reg_offset = back4_offsets;
+ break;
+
+ case MAC8390_CABLETRON:
+ /* 16 bit card, register map is short forward */
+ ei_status.reset_8390 = mac8390_no_reset;
+ ei_status.block_input = slow_sane_block_input;
+ ei_status.block_output = slow_sane_block_output;
+ ei_status.get_8390_hdr = slow_sane_get_8390_hdr;
+ ei_status.reg_offset = fwrd2_offsets;
+ break;
+
+ case MAC8390_DAYNA:
+ case MAC8390_KINETICS:
+ /* 16 bit memory, register map is forward */
+ /* dayna and similar */
+ ei_status.reset_8390 = mac8390_no_reset;
+ ei_status.block_input = dayna_block_input;
+ ei_status.block_output = dayna_block_output;
+ ei_status.get_8390_hdr = dayna_get_8390_hdr;
+ ei_status.reg_offset = fwrd4_offsets;
+ break;
+
+ case MAC8390_INTERLAN:
+ /* 16 bit memory, register map is forward */
+ ei_status.reset_8390 = interlan_reset;
+ ei_status.block_input = slow_sane_block_input;
+ ei_status.block_output = slow_sane_block_output;
+ ei_status.get_8390_hdr = slow_sane_get_8390_hdr;
+ ei_status.reg_offset = fwrd4_offsets;
+ break;
+
+ default:
+ pr_err("Card type %s is unsupported, sorry\n",
+ ndev->board->name);
+ return -ENODEV;
+ }
+
+ __NS8390_init(dev, 0);
+
+ /* Good, done, now spit out some messages */
+ pr_info("%s: %s in slot %X (type %s)\n",
+ dev->name, ndev->board->name, ndev->board->slot,
+ cardname[type]);
+ pr_info("MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n",
+ dev->dev_addr, dev->irq,
+ (unsigned int)(dev->mem_end - dev->mem_start) >> 10,
+ dev->mem_start, access_bitmode ? 32 : 16);
+ return 0;
+}
+
+static int mac8390_open(struct net_device *dev)
+{
+ int err;
+
+ __ei_open(dev);
+ err = request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev);
+ if (err)
+ pr_err("%s: unable to get IRQ %d\n", dev->name, dev->irq);
+ return err;
+}
+
+static int mac8390_close(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+ __ei_close(dev);
+ return 0;
+}
+
+static void mac8390_no_reset(struct net_device *dev)
+{
+ ei_status.txing = 0;
+ if (ei_debug > 1)
+ pr_info("reset not supported\n");
+}
+
+static void interlan_reset(struct net_device *dev)
+{
+ unsigned char *target = nubus_slot_addr(IRQ2SLOT(dev->irq));
+ if (ei_debug > 1)
+ pr_info("Need to reset the NS8390 t=%lu...", jiffies);
+ ei_status.txing = 0;
+ target[0xC0000] = 0;
+ if (ei_debug > 1)
+ pr_cont("reset complete\n");
+}
+
+/* dayna_memcpy_fromio/dayna_memcpy_toio */
+/* directly from daynaport.c by Alan Cox */
+static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from,
+ int count)
+{
+ volatile unsigned char *ptr;
+ unsigned char *target = to;
+ from <<= 1; /* word, skip overhead */
+ ptr = (unsigned char *)(dev->mem_start+from);
+ /* Leading byte? */
+ if (from & 2) {
+ *target++ = ptr[-1];
+ ptr += 2;
+ count--;
+ }
+ while (count >= 2) {
+ *(unsigned short *)target = *(unsigned short volatile *)ptr;
+ ptr += 4; /* skip cruft */
+ target += 2;
+ count -= 2;
+ }
+ /* Trailing byte? */
+ if (count)
+ *target = *ptr;
+}
+
+static void dayna_memcpy_tocard(struct net_device *dev, int to,
+ const void *from, int count)
+{
+ volatile unsigned short *ptr;
+ const unsigned char *src = from;
+ to <<= 1; /* word, skip overhead */
+ ptr = (unsigned short *)(dev->mem_start+to);
+ /* Leading byte? */
+ if (to & 2) { /* avoid a byte write (stomps on other data) */
+ ptr[-1] = (ptr[-1]&0xFF00)|*src++;
+ ptr++;
+ count--;
+ }
+ while (count >= 2) {
+ *ptr++ = *(unsigned short *)src; /* Copy and */
+ ptr++; /* skip cruft */
+ src += 2;
+ count -= 2;
+ }
+ /* Trailing byte? */
+ if (count) {
+ /* card doesn't like byte writes */
+ *ptr = (*ptr & 0x00FF) | (*src << 8);
+ }
+}
+
+/* sane block input/output */
+static void sane_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
+ memcpy_fromio(hdr, dev->mem_start + hdr_start, 4);
+ /* Fix endianness */
+ hdr->count = swab16(hdr->count);
+}
+
+static void sane_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
+ unsigned long xfer_start = xfer_base + dev->mem_start;
+
+ if (xfer_start + count > ei_status.rmem_end) {
+ /* We must wrap the input move. */
+ int semi_count = ei_status.rmem_end - xfer_start;
+ memcpy_fromio(skb->data, dev->mem_start + xfer_base,
+ semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, ei_status.rmem_start,
+ count);
+ } else {
+ memcpy_fromio(skb->data, dev->mem_start + xfer_base, count);
+ }
+}
+
+static void sane_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ long shmem = (start_page - WD_START_PG)<<8;
+
+ memcpy_toio(dev->mem_start + shmem, buf, count);
+}
+
+/* dayna block input/output */
+static void dayna_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
+
+ dayna_memcpy_fromcard(dev, hdr, hdr_start, 4);
+ /* Fix endianness */
+ hdr->count = (hdr->count & 0xFF) << 8 | (hdr->count >> 8);
+}
+
+static void dayna_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
+ unsigned long xfer_start = xfer_base+dev->mem_start;
+
+ /* Note the offset math is done in card memory space which is word
+ per long onto our space. */
+
+ if (xfer_start + count > ei_status.rmem_end) {
+ /* We must wrap the input move. */
+ int semi_count = ei_status.rmem_end - xfer_start;
+ dayna_memcpy_fromcard(dev, skb->data, xfer_base, semi_count);
+ count -= semi_count;
+ dayna_memcpy_fromcard(dev, skb->data + semi_count,
+ ei_status.rmem_start - dev->mem_start,
+ count);
+ } else {
+ dayna_memcpy_fromcard(dev, skb->data, xfer_base, count);
+ }
+}
+
+static void dayna_block_output(struct net_device *dev, int count,
+ const unsigned char *buf,
+ int start_page)
+{
+ long shmem = (start_page - WD_START_PG)<<8;
+
+ dayna_memcpy_tocard(dev, shmem, buf, count);
+}
+
+/* Cabletron block I/O */
+static void slow_sane_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
+ word_memcpy_fromcard(hdr, dev->mem_start + hdr_start, 4);
+ /* Register endianism - fix here rather than 8390.c */
+ hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8);
+}
+
+static void slow_sane_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
+ unsigned long xfer_start = xfer_base+dev->mem_start;
+
+ if (xfer_start + count > ei_status.rmem_end) {
+ /* We must wrap the input move. */
+ int semi_count = ei_status.rmem_end - xfer_start;
+ word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base,
+ semi_count);
+ count -= semi_count;
+ word_memcpy_fromcard(skb->data + semi_count,
+ ei_status.rmem_start, count);
+ } else {
+ word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base,
+ count);
+ }
+}
+
+static void slow_sane_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ long shmem = (start_page - WD_START_PG)<<8;
+
+ word_memcpy_tocard(dev->mem_start + shmem, buf, count);
+}
+
+static void word_memcpy_tocard(unsigned long tp, const void *fp, int count)
+{
+ volatile unsigned short *to = (void *)tp;
+ const unsigned short *from = fp;
+
+ count++;
+ count /= 2;
+
+ while (count--)
+ *to++ = *from++;
+}
+
+static void word_memcpy_fromcard(void *tp, unsigned long fp, int count)
+{
+ unsigned short *to = tp;
+ const volatile unsigned short *from = (const void *)fp;
+
+ count++;
+ count /= 2;
+
+ while (count--)
+ *to++ = *from++;
+}
+
+
diff --git a/drivers/net/ethernet/8390/ne-h8300.c b/drivers/net/ethernet/8390/ne-h8300.c
new file mode 100644
index 000000000000..cd36a6a5f408
--- /dev/null
+++ b/drivers/net/ethernet/8390/ne-h8300.c
@@ -0,0 +1,685 @@
+/* ne-h8300.c: A NE2000 clone on H8/300 driver for linux. */
+/*
+ original ne.c
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
+
+ H8/300 modified
+ Yoshinori Sato <ysato@users.sourceforge.jp>
+*/
+
+static const char version1[] =
+"ne-h8300.c:v1.00 2004/04/11 ysato\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/jiffies.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#define EI_SHIFT(x) (ei_local->reg_offset[x])
+
+#include "8390.h"
+
+#define DRV_NAME "ne-h8300"
+
+/* Some defines that people can play with if so inclined. */
+
+/* Do we perform extra sanity checks on stuff ? */
+/* #define NE_SANITY_CHECK */
+
+/* Do we implement the read before write bugfix ? */
+/* #define NE_RW_BUGFIX */
+
+/* Do we have a non std. amount of memory? (in units of 256 byte pages) */
+/* #define PACKETBUF_MEMSIZE 0x40 */
+
+/* A zero-terminated list of I/O addresses to be probed at boot. */
+
+/* ---- No user-serviceable parts below ---- */
+
+static const char version[] =
+ "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include "lib8390.c"
+
+#define NE_BASE (dev->base_addr)
+#define NE_CMD 0x00
+#define NE_DATAPORT (ei_status.word16?0x20:0x10) /* NatSemi-defined port window offset. */
+#define NE_RESET (ei_status.word16?0x3f:0x1f) /* Issue a read to reset, a write to clear. */
+#define NE_IO_EXTENT (ei_status.word16?0x40:0x20)
+
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+static int ne_probe1(struct net_device *dev, int ioaddr);
+
+static int ne_open(struct net_device *dev);
+static int ne_close(struct net_device *dev);
+
+static void ne_reset_8390(struct net_device *dev);
+static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ne_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ne_block_output(struct net_device *dev, const int count,
+ const unsigned char *buf, const int start_page);
+
+
+static u32 reg_offset[16];
+
+static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ int i;
+ unsigned char bus_width;
+
+ bus_width = *(volatile unsigned char *)ABWCR;
+ bus_width &= 1 << ((base_addr >> 21) & 7);
+
+ for (i = 0; i < ARRAY_SIZE(reg_offset); i++)
+ if (bus_width == 0)
+ reg_offset[i] = i * 2 + 1;
+ else
+ reg_offset[i] = i;
+
+ ei_local->reg_offset = reg_offset;
+ return 0;
+}
+
+static int __initdata h8300_ne_count = 0;
+#ifdef CONFIG_H8300H_H8MAX
+static unsigned long __initdata h8300_ne_base[] = { 0x800600 };
+static int h8300_ne_irq[] = {EXT_IRQ4};
+#endif
+#ifdef CONFIG_H8300H_AKI3068NET
+static unsigned long __initdata h8300_ne_base[] = { 0x200000 };
+static int h8300_ne_irq[] = {EXT_IRQ5};
+#endif
+
+static inline int init_dev(struct net_device *dev)
+{
+ if (h8300_ne_count < ARRAY_SIZE(h8300_ne_base)) {
+ dev->base_addr = h8300_ne_base[h8300_ne_count];
+ dev->irq = h8300_ne_irq[h8300_ne_count];
+ h8300_ne_count++;
+ return 0;
+ } else
+ return -ENODEV;
+}
+
+/* Probe for various non-shared-memory ethercards.
+
+ NEx000-clone boards have a Station Address PROM (SAPROM) in the packet
+ buffer memory space. NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of
+ the SAPROM, while other supposed NE2000 clones must be detected by their
+ SA prefix.
+
+ Reading the SAPROM from a word-wide card with the 8390 set in byte-wide
+ mode results in doubled values, which can be detected and compensated for.
+
+ The probe is also responsible for initializing the card and filling
+ in the 'dev' and 'ei_status' structures.
+
+ We use the minimum memory size for some ethercard product lines, iff we can't
+ distinguish models. You can increase the packet buffer size by setting
+ PACKETBUF_MEMSIZE. Reported Cabletron packet buffer locations are:
+ E1010 starts at 0x100 and ends at 0x2000.
+ E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory")
+ E2010 starts at 0x100 and ends at 0x4000.
+ E2010-x starts at 0x100 and ends at 0xffff. */
+
+static int __init do_ne_probe(struct net_device *dev)
+{
+ unsigned int base_addr = dev->base_addr;
+
+ /* First check any supplied i/o locations. User knows best. <cough> */
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return ne_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ return -ENODEV;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, NE_IO_EXTENT);
+}
+
+#ifndef MODULE
+struct net_device * __init ne_probe(int unit)
+{
+ struct net_device *dev = ____alloc_ei_netdev(0);
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (init_dev(dev))
+ return ERR_PTR(-ENODEV);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = init_reg_offset(dev, dev->base_addr);
+ if (err)
+ goto out;
+
+ err = do_ne_probe(dev);
+ if (err)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static const struct net_device_ops ne_netdev_ops = {
+ .ndo_open = ne_open,
+ .ndo_stop = ne_close,
+
+ .ndo_start_xmit = __ei_start_xmit,
+ .ndo_tx_timeout = __ei_tx_timeout,
+ .ndo_get_stats = __ei_get_stats,
+ .ndo_set_rx_mode = __ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = __ei_poll,
+#endif
+};
+
+static int __init ne_probe1(struct net_device *dev, int ioaddr)
+{
+ int i;
+ unsigned char SA_prom[16];
+ int wordlength = 2;
+ const char *name = NULL;
+ int start_page, stop_page;
+ int reg0, ret;
+ static unsigned version_printed;
+ struct ei_device *ei_local = netdev_priv(dev);
+ unsigned char bus_width;
+
+ if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ reg0 = inb_p(ioaddr);
+ if (reg0 == 0xFF) {
+ ret = -ENODEV;
+ goto err_out;
+ }
+
+ /* Do a preliminary verification that we have a 8390. */
+ {
+ int regd;
+ outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
+ regd = inb_p(ioaddr + EI_SHIFT(0x0d));
+ outb_p(0xff, ioaddr + EI_SHIFT(0x0d));
+ outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
+ inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
+ if (inb_p(ioaddr + EN0_COUNTER0) != 0) {
+ outb_p(reg0, ioaddr + EI_SHIFT(0));
+ outb_p(regd, ioaddr + EI_SHIFT(0x0d)); /* Restore the old values. */
+ ret = -ENODEV;
+ goto err_out;
+ }
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk(KERN_INFO "%s", version1);
+
+ printk(KERN_INFO "NE*000 ethercard probe at %08x:", ioaddr);
+
+ /* Read the 16 bytes of station address PROM.
+ We must first initialize registers, similar to NS8390_init(eifdev, 0).
+ We can't reliably read the SAPROM address without this.
+ (I learned the hard way!). */
+ {
+ struct {unsigned char value, offset; } program_seq[] =
+ {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
+ {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */
+ {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0xFF, EN0_ISR},
+ {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ {32, EN0_RCNTLO},
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
+ {0x00, EN0_RSARHI},
+ {E8390_RREAD+E8390_START, E8390_CMD},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(program_seq); i++)
+ outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
+
+ }
+ bus_width = *(volatile unsigned char *)ABWCR;
+ bus_width &= 1 << ((ioaddr >> 21) & 7);
+ ei_status.word16 = (bus_width == 0); /* temporary setting */
+ for(i = 0; i < 16 /*sizeof(SA_prom)*/; i++) {
+ SA_prom[i] = inb_p(ioaddr + NE_DATAPORT);
+ inb_p(ioaddr + NE_DATAPORT); /* dummy read */
+ }
+
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
+
+ if (bus_width)
+ wordlength = 1;
+ else
+ outb_p(0x49, ioaddr + EN0_DCFG);
+
+ /* Set up the rest of the parameters. */
+ name = (wordlength == 2) ? "NE2000" : "NE1000";
+
+ if (! dev->irq) {
+ printk(" failed to detect IRQ line.\n");
+ ret = -EAGAIN;
+ goto err_out;
+ }
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+ ret = request_irq(dev->irq, __ei_interrupt, 0, name, dev);
+ if (ret) {
+ printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
+ goto err_out;
+ }
+
+ dev->base_addr = ioaddr;
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++)
+ dev->dev_addr[i] = SA_prom[i];
+ printk(" %pM\n", dev->dev_addr);
+
+ printk("%s: %s found at %#x, using IRQ %d.\n",
+ dev->name, name, ioaddr, dev->irq);
+
+ ei_status.name = name;
+ ei_status.tx_start_page = start_page;
+ ei_status.stop_page = stop_page;
+ ei_status.word16 = (wordlength == 2);
+
+ ei_status.rx_start_page = start_page + TX_PAGES;
+#ifdef PACKETBUF_MEMSIZE
+ /* Allow the packet buffer size to be overridden by know-it-alls. */
+ ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
+#endif
+
+ ei_status.reset_8390 = &ne_reset_8390;
+ ei_status.block_input = &ne_block_input;
+ ei_status.block_output = &ne_block_output;
+ ei_status.get_8390_hdr = &ne_get_8390_hdr;
+ ei_status.priv = 0;
+
+ dev->netdev_ops = &ne_netdev_ops;
+
+ __NS8390_init(dev, 0);
+
+ ret = register_netdev(dev);
+ if (ret)
+ goto out_irq;
+ return 0;
+out_irq:
+ free_irq(dev->irq, dev);
+err_out:
+ release_region(ioaddr, NE_IO_EXTENT);
+ return ret;
+}
+
+static int ne_open(struct net_device *dev)
+{
+ __ei_open(dev);
+ return 0;
+}
+
+static int ne_close(struct net_device *dev)
+{
+ if (ei_debug > 1)
+ printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
+ __ei_close(dev);
+ return 0;
+}
+
+/* Hard reset the card. This used to pause for the same period that a
+ 8390 reset command required, but that shouldn't be necessary. */
+
+static void ne_reset_8390(struct net_device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+ struct ei_device *ei_local = netdev_priv(dev);
+
+ if (ei_debug > 1)
+ printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
+
+ /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
+ outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
+ if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
+ printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name);
+ break;
+ }
+ outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+
+ if (ei_status.dmaing)
+ {
+ printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD);
+ outb_p(sizeof(struct e8390_pkt_hdr), NE_BASE + EN0_RCNTLO);
+ outb_p(0, NE_BASE + EN0_RCNTHI);
+ outb_p(0, NE_BASE + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, NE_BASE + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD);
+
+ if (ei_status.word16) {
+ int len;
+ unsigned short *p = (unsigned short *)hdr;
+ for (len = sizeof(struct e8390_pkt_hdr)>>1; len > 0; len--)
+ *p++ = inw(NE_BASE + NE_DATAPORT);
+ } else
+ insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
+
+ outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+
+ le16_to_cpus(&hdr->count);
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you
+ are porting to a new ethercard, look at the packet driver source for hints.
+ The NEx000 doesn't share the on-board packet memory -- you have to put
+ the packet out through the "remote DMA" dataport using outb. */
+
+static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+#ifdef NE_SANITY_CHECK
+ int xfer_count = count;
+#endif
+ char *buf = skb->data;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing)
+ {
+ printk(KERN_EMERG "%s: DMAing conflict in ne_block_input "
+ "[DMAstat:%d][irqlock:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD);
+ outb_p(count & 0xff, NE_BASE + EN0_RCNTLO);
+ outb_p(count >> 8, NE_BASE + EN0_RCNTHI);
+ outb_p(ring_offset & 0xff, NE_BASE + EN0_RSARLO);
+ outb_p(ring_offset >> 8, NE_BASE + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD);
+ if (ei_status.word16)
+ {
+ int len;
+ unsigned short *p = (unsigned short *)buf;
+ for (len = count>>1; len > 0; len--)
+ *p++ = inw(NE_BASE + NE_DATAPORT);
+ if (count & 0x01)
+ {
+ buf[count-1] = inb(NE_BASE + NE_DATAPORT);
+#ifdef NE_SANITY_CHECK
+ xfer_count++;
+#endif
+ }
+ } else {
+ insb(NE_BASE + NE_DATAPORT, buf, count);
+ }
+
+#ifdef NE_SANITY_CHECK
+ /* This was for the ALPHA version only, but enough people have
+ been encountering problems so it is still here. If you see
+ this message you either 1) have a slightly incompatible clone
+ or 2) have noise/speed problems with your bus. */
+
+ if (ei_debug > 1)
+ {
+ /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
+ -- it's broken for Rx on some cards! */
+ int high = inb_p(NE_BASE + EN0_RSARHI);
+ int low = inb_p(NE_BASE + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if (((ring_offset + xfer_count) & 0xff) == low)
+ break;
+ } while (--tries > 0);
+ if (tries <= 0)
+ printk(KERN_WARNING "%s: RX transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, ring_offset + xfer_count, addr);
+ }
+#endif
+ outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+static void ne_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ unsigned long dma_start;
+#ifdef NE_SANITY_CHECK
+ int retries = 0;
+#endif
+
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+
+ if (ei_status.word16 && (count & 0x01))
+ count++;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing)
+ {
+ printk(KERN_EMERG "%s: DMAing conflict in ne_block_output."
+ "[DMAstat:%d][irqlock:%d]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, NE_BASE + NE_CMD);
+
+#ifdef NE_SANITY_CHECK
+retry:
+#endif
+
+#ifdef NE8390_RW_BUGFIX
+ /* Handle the read-before-write bug the same way as the
+ Crynwr packet driver -- the NatSemi method doesn't work.
+ Actually this doesn't always work either, but if you have
+ problems with your NEx000 this is better than nothing! */
+
+ outb_p(0x42, NE_BASE + EN0_RCNTLO);
+ outb_p(0x00, NE_BASE + EN0_RCNTHI);
+ outb_p(0x42, NE_BASE + EN0_RSARLO);
+ outb_p(0x00, NE_BASE + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD);
+ /* Make certain that the dummy read has occurred. */
+ udelay(6);
+#endif
+
+ outb_p(ENISR_RDC, NE_BASE + EN0_ISR);
+
+ /* Now the normal output. */
+ outb_p(count & 0xff, NE_BASE + EN0_RCNTLO);
+ outb_p(count >> 8, NE_BASE + EN0_RCNTHI);
+ outb_p(0x00, NE_BASE + EN0_RSARLO);
+ outb_p(start_page, NE_BASE + EN0_RSARHI);
+
+ outb_p(E8390_RWRITE+E8390_START, NE_BASE + NE_CMD);
+ if (ei_status.word16) {
+ int len;
+ unsigned short *p = (unsigned short *)buf;
+ for (len = count>>1; len > 0; len--)
+ outw(*p++, NE_BASE + NE_DATAPORT);
+ } else {
+ outsb(NE_BASE + NE_DATAPORT, buf, count);
+ }
+
+ dma_start = jiffies;
+
+#ifdef NE_SANITY_CHECK
+ /* This was for the ALPHA version only, but enough people have
+ been encountering problems so it is still here. */
+
+ if (ei_debug > 1)
+ {
+ /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ int high = inb_p(NE_BASE + EN0_RSARHI);
+ int low = inb_p(NE_BASE + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if ((start_page << 8) + count == addr)
+ break;
+ } while (--tries > 0);
+
+ if (tries <= 0)
+ {
+ printk(KERN_WARNING "%s: Tx packet transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, (start_page << 8) + count, addr);
+ if (retries++ == 0)
+ goto retry;
+ }
+ }
+#endif
+
+ while ((inb_p(NE_BASE + EN0_ISR) & ENISR_RDC) == 0)
+ if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
+ printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
+ ne_reset_8390(dev);
+ __NS8390_init(dev,1);
+ break;
+ }
+
+ outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+
+#ifdef MODULE
+#define MAX_NE_CARDS 1 /* Max number of NE cards per module */
+static struct net_device *dev_ne[MAX_NE_CARDS];
+static int io[MAX_NE_CARDS];
+static int irq[MAX_NE_CARDS];
+static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(bad, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s)");
+MODULE_DESCRIPTION("H8/300 NE2000 Ethernet driver");
+MODULE_LICENSE("GPL");
+
+/* This is set up so that no ISA autoprobe takes place. We can't guarantee
+that the ne2k probe is the last 8390 based probe to take place (as it
+is at boot) and so the probe will get confused by any other 8390 cards.
+ISA device autoprobes on a running machine are not recommended anyway. */
+
+int init_module(void)
+{
+ int this_dev, found = 0;
+ int err;
+
+ for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+ struct net_device *dev = ____alloc_ei_netdev(0);
+ if (!dev)
+ break;
+ if (io[this_dev]) {
+ dev->irq = irq[this_dev];
+ dev->mem_end = bad[this_dev];
+ dev->base_addr = io[this_dev];
+ } else {
+ dev->base_addr = h8300_ne_base[this_dev];
+ dev->irq = h8300_ne_irq[this_dev];
+ }
+ err = init_reg_offset(dev, dev->base_addr);
+ if (!err) {
+ if (do_ne_probe(dev) == 0) {
+ dev_ne[found++] = dev;
+ continue;
+ }
+ }
+ free_netdev(dev);
+ if (found)
+ break;
+ if (io[this_dev] != 0)
+ printk(KERN_WARNING "ne.c: No NE*000 card found at i/o = %#x\n", dev->base_addr);
+ else
+ printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\" value(s) for ISA cards.\n");
+ return -ENXIO;
+ }
+ if (found)
+ return 0;
+ return -ENODEV;
+}
+
+void cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+ struct net_device *dev = dev_ne[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
new file mode 100644
index 000000000000..1063093b3afc
--- /dev/null
+++ b/drivers/net/ethernet/8390/ne.c
@@ -0,0 +1,1008 @@
+/* ne.c: A general non-shared-memory NS8390 ethernet driver for linux. */
+/*
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
+
+ This driver should work with many programmed-I/O 8390-based ethernet
+ boards. Currently it supports the NE1000, NE2000, many clones,
+ and some Cabletron products.
+
+ Changelog:
+
+ Paul Gortmaker : use ENISR_RDC to monitor Tx PIO uploads, made
+ sanity checks and bad clone support optional.
+ Paul Gortmaker : new reset code, reset card after probe at boot.
+ Paul Gortmaker : multiple card support for module users.
+ Paul Gortmaker : Support for PCI ne2k clones, similar to lance.c
+ Paul Gortmaker : Allow users with bad cards to avoid full probe.
+ Paul Gortmaker : PCI probe changes, more PCI cards supported.
+ rjohnson@analogic.com : Changed init order so an interrupt will only
+ occur after memory is allocated for dev->priv. Deallocated memory
+ last in cleanup_modue()
+ Richard Guenther : Added support for ISAPnP cards
+ Paul Gortmaker : Discontinued PCI support - use ne2k-pci.c instead.
+ Hayato Fujiwara : Add m32r support.
+
+*/
+
+/* Routines for the NatSemi-based designs (NE[12]000). */
+
+static const char version1[] =
+"ne.c:v1.10 9/23/94 Donald Becker (becker@scyld.com)\n";
+static const char version2[] =
+"Last modified Nov 1, 2000 by Paul Gortmaker\n";
+
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/isapnp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+#include "8390.h"
+
+#define DRV_NAME "ne"
+
+/* Some defines that people can play with if so inclined. */
+
+/* Do we support clones that don't adhere to 14,15 of the SAprom ? */
+#define SUPPORT_NE_BAD_CLONES
+/* 0xbad = bad sig or no reset ack */
+#define BAD 0xbad
+
+#define MAX_NE_CARDS 4 /* Max number of NE cards per module */
+static struct platform_device *pdev_ne[MAX_NE_CARDS];
+static int io[MAX_NE_CARDS];
+static int irq[MAX_NE_CARDS];
+static int bad[MAX_NE_CARDS];
+
+#ifdef MODULE
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(bad, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es),required");
+MODULE_PARM_DESC(irq, "IRQ number(s)");
+MODULE_PARM_DESC(bad, "Accept card(s) with bad signatures");
+MODULE_DESCRIPTION("NE1000/NE2000 ISA/PnP Ethernet driver");
+MODULE_LICENSE("GPL");
+#endif /* MODULE */
+
+/* Do we perform extra sanity checks on stuff ? */
+/* #define NE_SANITY_CHECK */
+
+/* Do we implement the read before write bugfix ? */
+/* #define NE_RW_BUGFIX */
+
+/* Do we have a non std. amount of memory? (in units of 256 byte pages) */
+/* #define PACKETBUF_MEMSIZE 0x40 */
+
+/* This is set up so that no ISA autoprobe takes place. We can't guarantee
+that the ne2k probe is the last 8390 based probe to take place (as it
+is at boot) and so the probe will get confused by any other 8390 cards.
+ISA device autoprobes on a running machine are not recommended anyway. */
+#if !defined(MODULE) && (defined(CONFIG_ISA) || defined(CONFIG_M32R))
+/* Do we need a portlist for the ISA auto-probe ? */
+#define NEEDS_PORTLIST
+#endif
+
+/* A zero-terminated list of I/O addresses to be probed at boot. */
+#ifdef NEEDS_PORTLIST
+static unsigned int netcard_portlist[] __initdata = {
+ 0x300, 0x280, 0x320, 0x340, 0x360, 0x380, 0
+};
+#endif
+
+static struct isapnp_device_id isapnp_clone_list[] __initdata = {
+ { ISAPNP_CARD_ID('A','X','E',0x2011),
+ ISAPNP_VENDOR('A','X','E'), ISAPNP_FUNCTION(0x2011),
+ (long) "NetGear EA201" },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('E','D','I'), ISAPNP_FUNCTION(0x0216),
+ (long) "NN NE2000" },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('P','N','P'), ISAPNP_FUNCTION(0x80d6),
+ (long) "Generic PNP" },
+ { } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(isapnp, isapnp_clone_list);
+
+#ifdef SUPPORT_NE_BAD_CLONES
+/* A list of bad clones that we none-the-less recognize. */
+static struct { const char *name8, *name16; unsigned char SAprefix[4];}
+bad_clone_list[] __initdata = {
+ {"DE100", "DE200", {0x00, 0xDE, 0x01,}},
+ {"DE120", "DE220", {0x00, 0x80, 0xc8,}},
+ {"DFI1000", "DFI2000", {'D', 'F', 'I',}}, /* Original, eh? */
+ {"EtherNext UTP8", "EtherNext UTP16", {0x00, 0x00, 0x79}},
+ {"NE1000","NE2000-invalid", {0x00, 0x00, 0xd8}}, /* Ancient real NE1000. */
+ {"NN1000", "NN2000", {0x08, 0x03, 0x08}}, /* Outlaw no-name clone. */
+ {"4-DIM8","4-DIM16", {0x00,0x00,0x4d,}}, /* Outlaw 4-Dimension cards. */
+ {"Con-Intl_8", "Con-Intl_16", {0x00, 0x00, 0x24}}, /* Connect Int'nl */
+ {"ET-100","ET-200", {0x00, 0x45, 0x54}}, /* YANG and YA clone */
+ {"COMPEX","COMPEX16",{0x00,0x80,0x48}}, /* Broken ISA Compex cards */
+ {"E-LAN100", "E-LAN200", {0x00, 0x00, 0x5d}}, /* Broken ne1000 clones */
+ {"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */
+ {"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */
+#ifdef CONFIG_MACH_TX49XX
+ {"RBHMA4X00-RTL8019", "RBHMA4X00-RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */
+#endif
+ {"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */
+ {NULL,}
+};
+#endif
+
+/* ---- No user-serviceable parts below ---- */
+
+#define NE_BASE (dev->base_addr)
+#define NE_CMD 0x00
+#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */
+#define NE_IO_EXTENT 0x20
+
+#define NE1SM_START_PG 0x20 /* First page of TX buffer */
+#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+#if defined(CONFIG_PLAT_MAPPI)
+# define DCR_VAL 0x4b
+#elif defined(CONFIG_PLAT_OAKS32R) || \
+ defined(CONFIG_MACH_TX49XX)
+# define DCR_VAL 0x48 /* 8-bit mode */
+#else
+# define DCR_VAL 0x49
+#endif
+
+static int ne_probe1(struct net_device *dev, unsigned long ioaddr);
+static int ne_probe_isapnp(struct net_device *dev);
+
+static void ne_reset_8390(struct net_device *dev);
+static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ne_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ne_block_output(struct net_device *dev, const int count,
+ const unsigned char *buf, const int start_page);
+
+
+/* Probe for various non-shared-memory ethercards.
+
+ NEx000-clone boards have a Station Address PROM (SAPROM) in the packet
+ buffer memory space. NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of
+ the SAPROM, while other supposed NE2000 clones must be detected by their
+ SA prefix.
+
+ Reading the SAPROM from a word-wide card with the 8390 set in byte-wide
+ mode results in doubled values, which can be detected and compensated for.
+
+ The probe is also responsible for initializing the card and filling
+ in the 'dev' and 'ei_status' structures.
+
+ We use the minimum memory size for some ethercard product lines, iff we can't
+ distinguish models. You can increase the packet buffer size by setting
+ PACKETBUF_MEMSIZE. Reported Cabletron packet buffer locations are:
+ E1010 starts at 0x100 and ends at 0x2000.
+ E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory")
+ E2010 starts at 0x100 and ends at 0x4000.
+ E2010-x starts at 0x100 and ends at 0xffff. */
+
+static int __init do_ne_probe(struct net_device *dev)
+{
+ unsigned long base_addr = dev->base_addr;
+#ifdef NEEDS_PORTLIST
+ int orig_irq = dev->irq;
+#endif
+
+ /* First check any supplied i/o locations. User knows best. <cough> */
+ if (base_addr > 0x1ff) { /* Check a single specified location. */
+ int ret = ne_probe1(dev, base_addr);
+ if (ret)
+ printk(KERN_WARNING "ne.c: No NE*000 card found at "
+ "i/o = %#lx\n", base_addr);
+ return ret;
+ }
+ else if (base_addr != 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ /* Then look for any installed ISAPnP clones */
+ if (isapnp_present() && (ne_probe_isapnp(dev) == 0))
+ return 0;
+
+#ifdef NEEDS_PORTLIST
+ /* Last resort. The semi-risky ISA auto-probe. */
+ for (base_addr = 0; netcard_portlist[base_addr] != 0; base_addr++) {
+ int ioaddr = netcard_portlist[base_addr];
+ dev->irq = orig_irq;
+ if (ne_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+#endif
+
+ return -ENODEV;
+}
+
+static int __init ne_probe_isapnp(struct net_device *dev)
+{
+ int i;
+
+ for (i = 0; isapnp_clone_list[i].vendor != 0; i++) {
+ struct pnp_dev *idev = NULL;
+
+ while ((idev = pnp_find_dev(NULL,
+ isapnp_clone_list[i].vendor,
+ isapnp_clone_list[i].function,
+ idev))) {
+ /* Avoid already found cards from previous calls */
+ if (pnp_device_attach(idev) < 0)
+ continue;
+ if (pnp_activate_dev(idev) < 0) {
+ pnp_device_detach(idev);
+ continue;
+ }
+ /* if no io and irq, search for next */
+ if (!pnp_port_valid(idev, 0) || !pnp_irq_valid(idev, 0)) {
+ pnp_device_detach(idev);
+ continue;
+ }
+ /* found it */
+ dev->base_addr = pnp_port_start(idev, 0);
+ dev->irq = pnp_irq(idev, 0);
+ printk(KERN_INFO "ne.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
+ (char *) isapnp_clone_list[i].driver_data,
+ dev->base_addr, dev->irq);
+ if (ne_probe1(dev, dev->base_addr) != 0) { /* Shouldn't happen. */
+ printk(KERN_ERR "ne.c: Probe of ISAPnP card at %#lx failed.\n", dev->base_addr);
+ pnp_device_detach(idev);
+ return -ENXIO;
+ }
+ ei_status.priv = (unsigned long)idev;
+ break;
+ }
+ if (!idev)
+ continue;
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
+{
+ int i;
+ unsigned char SA_prom[32];
+ int wordlength = 2;
+ const char *name = NULL;
+ int start_page, stop_page;
+ int neX000, ctron, copam, bad_card;
+ int reg0, ret;
+ static unsigned version_printed;
+
+ if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ reg0 = inb_p(ioaddr);
+ if (reg0 == 0xFF) {
+ ret = -ENODEV;
+ goto err_out;
+ }
+
+ /* Do a preliminary verification that we have a 8390. */
+ {
+ int regd;
+ outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
+ regd = inb_p(ioaddr + 0x0d);
+ outb_p(0xff, ioaddr + 0x0d);
+ outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
+ inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
+ if (inb_p(ioaddr + EN0_COUNTER0) != 0) {
+ outb_p(reg0, ioaddr);
+ outb_p(regd, ioaddr + 0x0d); /* Restore the old values. */
+ ret = -ENODEV;
+ goto err_out;
+ }
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk(KERN_INFO "%s%s", version1, version2);
+
+ printk(KERN_INFO "NE*000 ethercard probe at %#3lx:", ioaddr);
+
+ /* A user with a poor card that fails to ack the reset, or that
+ does not have a valid 0x57,0x57 signature can still use this
+ without having to recompile. Specifying an i/o address along
+ with an otherwise unused dev->mem_end value of "0xBAD" will
+ cause the driver to skip these parts of the probe. */
+
+ bad_card = ((dev->base_addr != 0) && (dev->mem_end == BAD));
+
+ /* Reset card. Who knows what dain-bramaged state it was left in. */
+
+ {
+ unsigned long reset_start_time = jiffies;
+
+ /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
+ outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET);
+
+ while ((inb_p(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
+ if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
+ if (bad_card) {
+ printk(" (warning: no reset ack)");
+ break;
+ } else {
+ printk(" not found (no reset ack).\n");
+ ret = -ENODEV;
+ goto err_out;
+ }
+ }
+
+ outb_p(0xff, ioaddr + EN0_ISR); /* Ack all intr. */
+ }
+
+ /* Read the 16 bytes of station address PROM.
+ We must first initialize registers, similar to NS8390p_init(eifdev, 0).
+ We can't reliably read the SAPROM address without this.
+ (I learned the hard way!). */
+ {
+ struct {unsigned char value, offset; } program_seq[] =
+ {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
+ {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */
+ {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0xFF, EN0_ISR},
+ {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ {32, EN0_RCNTLO},
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
+ {0x00, EN0_RSARHI},
+ {E8390_RREAD+E8390_START, E8390_CMD},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(program_seq); i++)
+ outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
+
+ }
+ for(i = 0; i < 32 /*sizeof(SA_prom)*/; i+=2) {
+ SA_prom[i] = inb(ioaddr + NE_DATAPORT);
+ SA_prom[i+1] = inb(ioaddr + NE_DATAPORT);
+ if (SA_prom[i] != SA_prom[i+1])
+ wordlength = 1;
+ }
+
+ if (wordlength == 2)
+ {
+ for (i = 0; i < 16; i++)
+ SA_prom[i] = SA_prom[i+i];
+ /* We must set the 8390 for word mode. */
+ outb_p(DCR_VAL, ioaddr + EN0_DCFG);
+ start_page = NESM_START_PG;
+
+ /*
+ * Realtek RTL8019AS datasheet says that the PSTOP register
+ * shouldn't exceed 0x60 in 8-bit mode.
+ * This chip can be identified by reading the signature from
+ * the remote byte count registers (otherwise write-only)...
+ */
+ if ((DCR_VAL & 0x01) == 0 && /* 8-bit mode */
+ inb(ioaddr + EN0_RCNTLO) == 0x50 &&
+ inb(ioaddr + EN0_RCNTHI) == 0x70)
+ stop_page = 0x60;
+ else
+ stop_page = NESM_STOP_PG;
+ } else {
+ start_page = NE1SM_START_PG;
+ stop_page = NE1SM_STOP_PG;
+ }
+
+#if defined(CONFIG_PLAT_MAPPI) || defined(CONFIG_PLAT_OAKS32R)
+ neX000 = ((SA_prom[14] == 0x57 && SA_prom[15] == 0x57)
+ || (SA_prom[14] == 0x42 && SA_prom[15] == 0x42));
+#else
+ neX000 = (SA_prom[14] == 0x57 && SA_prom[15] == 0x57);
+#endif
+ ctron = (SA_prom[0] == 0x00 && SA_prom[1] == 0x00 && SA_prom[2] == 0x1d);
+ copam = (SA_prom[14] == 0x49 && SA_prom[15] == 0x00);
+
+ /* Set up the rest of the parameters. */
+ if (neX000 || bad_card || copam) {
+ name = (wordlength == 2) ? "NE2000" : "NE1000";
+ }
+ else if (ctron)
+ {
+ name = (wordlength == 2) ? "Ctron-8" : "Ctron-16";
+ start_page = 0x01;
+ stop_page = (wordlength == 2) ? 0x40 : 0x20;
+ }
+ else
+ {
+#ifdef SUPPORT_NE_BAD_CLONES
+ /* Ack! Well, there might be a *bad* NE*000 clone there.
+ Check for total bogus addresses. */
+ for (i = 0; bad_clone_list[i].name8; i++)
+ {
+ if (SA_prom[0] == bad_clone_list[i].SAprefix[0] &&
+ SA_prom[1] == bad_clone_list[i].SAprefix[1] &&
+ SA_prom[2] == bad_clone_list[i].SAprefix[2])
+ {
+ if (wordlength == 2)
+ {
+ name = bad_clone_list[i].name16;
+ } else {
+ name = bad_clone_list[i].name8;
+ }
+ break;
+ }
+ }
+ if (bad_clone_list[i].name8 == NULL)
+ {
+ printk(" not found (invalid signature %2.2x %2.2x).\n",
+ SA_prom[14], SA_prom[15]);
+ ret = -ENXIO;
+ goto err_out;
+ }
+#else
+ printk(" not found.\n");
+ ret = -ENXIO;
+ goto err_out;
+#endif
+ }
+
+ if (dev->irq < 2)
+ {
+ unsigned long cookie = probe_irq_on();
+ outb_p(0x50, ioaddr + EN0_IMR); /* Enable one interrupt. */
+ outb_p(0x00, ioaddr + EN0_RCNTLO);
+ outb_p(0x00, ioaddr + EN0_RCNTHI);
+ outb_p(E8390_RREAD+E8390_START, ioaddr); /* Trigger it... */
+ mdelay(10); /* wait 10ms for interrupt to propagate */
+ outb_p(0x00, ioaddr + EN0_IMR); /* Mask it again. */
+ dev->irq = probe_irq_off(cookie);
+ if (ei_debug > 2)
+ printk(" autoirq is %d\n", dev->irq);
+ } else if (dev->irq == 2)
+ /* Fixup for users that don't know that IRQ 2 is really IRQ 9,
+ or don't know which one to set. */
+ dev->irq = 9;
+
+ if (! dev->irq) {
+ printk(" failed to detect IRQ line.\n");
+ ret = -EAGAIN;
+ goto err_out;
+ }
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+ ret = request_irq(dev->irq, eip_interrupt, 0, name, dev);
+ if (ret) {
+ printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
+ goto err_out;
+ }
+
+ dev->base_addr = ioaddr;
+
+#ifdef CONFIG_PLAT_MAPPI
+ outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
+ ioaddr + E8390_CMD); /* 0x61 */
+ for (i = 0 ; i < ETHER_ADDR_LEN ; i++) {
+ dev->dev_addr[i] = SA_prom[i]
+ = inb_p(ioaddr + EN1_PHYS_SHIFT(i));
+ }
+#else
+ for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ dev->dev_addr[i] = SA_prom[i];
+ }
+#endif
+
+ printk("%pM\n", dev->dev_addr);
+
+ ei_status.name = name;
+ ei_status.tx_start_page = start_page;
+ ei_status.stop_page = stop_page;
+
+ /* Use 16-bit mode only if this wasn't overridden by DCR_VAL */
+ ei_status.word16 = (wordlength == 2 && (DCR_VAL & 0x01));
+
+ ei_status.rx_start_page = start_page + TX_PAGES;
+#ifdef PACKETBUF_MEMSIZE
+ /* Allow the packet buffer size to be overridden by know-it-alls. */
+ ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
+#endif
+
+ ei_status.reset_8390 = &ne_reset_8390;
+ ei_status.block_input = &ne_block_input;
+ ei_status.block_output = &ne_block_output;
+ ei_status.get_8390_hdr = &ne_get_8390_hdr;
+ ei_status.priv = 0;
+
+ dev->netdev_ops = &eip_netdev_ops;
+ NS8390p_init(dev, 0);
+
+ ret = register_netdev(dev);
+ if (ret)
+ goto out_irq;
+ printk(KERN_INFO "%s: %s found at %#lx, using IRQ %d.\n",
+ dev->name, name, ioaddr, dev->irq);
+ return 0;
+
+out_irq:
+ free_irq(dev->irq, dev);
+err_out:
+ release_region(ioaddr, NE_IO_EXTENT);
+ return ret;
+}
+
+/* Hard reset the card. This used to pause for the same period that a
+ 8390 reset command required, but that shouldn't be necessary. */
+
+static void ne_reset_8390(struct net_device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+
+ if (ei_debug > 1)
+ printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
+
+ /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
+ outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
+ if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
+ printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name);
+ break;
+ }
+ outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int nic_base = dev->base_addr;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+
+ if (ei_status.dmaing)
+ {
+ printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
+ outb_p(0, nic_base + EN0_RCNTHI);
+ outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ if (ei_status.word16)
+ insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+ else
+ insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+
+ le16_to_cpus(&hdr->count);
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you
+ are porting to a new ethercard, look at the packet driver source for hints.
+ The NEx000 doesn't share the on-board packet memory -- you have to put
+ the packet out through the "remote DMA" dataport using outb. */
+
+static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+#ifdef NE_SANITY_CHECK
+ int xfer_count = count;
+#endif
+ int nic_base = dev->base_addr;
+ char *buf = skb->data;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing)
+ {
+ printk(KERN_EMERG "%s: DMAing conflict in ne_block_input "
+ "[DMAstat:%d][irqlock:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ if (ei_status.word16)
+ {
+ insw(NE_BASE + NE_DATAPORT,buf,count>>1);
+ if (count & 0x01)
+ {
+ buf[count-1] = inb(NE_BASE + NE_DATAPORT);
+#ifdef NE_SANITY_CHECK
+ xfer_count++;
+#endif
+ }
+ } else {
+ insb(NE_BASE + NE_DATAPORT, buf, count);
+ }
+
+#ifdef NE_SANITY_CHECK
+ /* This was for the ALPHA version only, but enough people have
+ been encountering problems so it is still here. If you see
+ this message you either 1) have a slightly incompatible clone
+ or 2) have noise/speed problems with your bus. */
+
+ if (ei_debug > 1)
+ {
+ /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
+ -- it's broken for Rx on some cards! */
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if (((ring_offset + xfer_count) & 0xff) == low)
+ break;
+ } while (--tries > 0);
+ if (tries <= 0)
+ printk(KERN_WARNING "%s: RX transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, ring_offset + xfer_count, addr);
+ }
+#endif
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+static void ne_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ int nic_base = NE_BASE;
+ unsigned long dma_start;
+#ifdef NE_SANITY_CHECK
+ int retries = 0;
+#endif
+
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+
+ if (ei_status.word16 && (count & 0x01))
+ count++;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing)
+ {
+ printk(KERN_EMERG "%s: DMAing conflict in ne_block_output."
+ "[DMAstat:%d][irqlock:%d]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
+
+#ifdef NE_SANITY_CHECK
+retry:
+#endif
+
+#ifdef NE8390_RW_BUGFIX
+ /* Handle the read-before-write bug the same way as the
+ Crynwr packet driver -- the NatSemi method doesn't work.
+ Actually this doesn't always work either, but if you have
+ problems with your NEx000 this is better than nothing! */
+
+ outb_p(0x42, nic_base + EN0_RCNTLO);
+ outb_p(0x00, nic_base + EN0_RCNTHI);
+ outb_p(0x42, nic_base + EN0_RSARLO);
+ outb_p(0x00, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ /* Make certain that the dummy read has occurred. */
+ udelay(6);
+#endif
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR);
+
+ /* Now the normal output. */
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(0x00, nic_base + EN0_RSARLO);
+ outb_p(start_page, nic_base + EN0_RSARHI);
+
+ outb_p(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
+ if (ei_status.word16) {
+ outsw(NE_BASE + NE_DATAPORT, buf, count>>1);
+ } else {
+ outsb(NE_BASE + NE_DATAPORT, buf, count);
+ }
+
+ dma_start = jiffies;
+
+#ifdef NE_SANITY_CHECK
+ /* This was for the ALPHA version only, but enough people have
+ been encountering problems so it is still here. */
+
+ if (ei_debug > 1)
+ {
+ /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if ((start_page << 8) + count == addr)
+ break;
+ } while (--tries > 0);
+
+ if (tries <= 0)
+ {
+ printk(KERN_WARNING "%s: Tx packet transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, (start_page << 8) + count, addr);
+ if (retries++ == 0)
+ goto retry;
+ }
+ }
+#endif
+
+ while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
+ if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
+ printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
+ ne_reset_8390(dev);
+ NS8390p_init(dev, 1);
+ break;
+ }
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+static int __init ne_drv_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ int err, this_dev = pdev->id;
+ struct resource *res;
+
+ dev = alloc_eip_netdev();
+ if (!dev)
+ return -ENOMEM;
+
+ /* ne.c doesn't populate resources in platform_device, but
+ * rbtx4927_ne_init and rbtx4938_ne_init do register devices
+ * with resources.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (res) {
+ dev->base_addr = res->start;
+ dev->irq = platform_get_irq(pdev, 0);
+ } else {
+ if (this_dev < 0 || this_dev >= MAX_NE_CARDS) {
+ free_netdev(dev);
+ return -EINVAL;
+ }
+ dev->base_addr = io[this_dev];
+ dev->irq = irq[this_dev];
+ dev->mem_end = bad[this_dev];
+ }
+ err = do_ne_probe(dev);
+ if (err) {
+ free_netdev(dev);
+ return err;
+ }
+ platform_set_drvdata(pdev, dev);
+
+ /* Update with any values found by probing, don't update if
+ * resources were specified.
+ */
+ if (!res) {
+ io[this_dev] = dev->base_addr;
+ irq[this_dev] = dev->irq;
+ }
+ return 0;
+}
+
+static int ne_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+
+ if (dev) {
+ struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
+ netif_device_detach(dev);
+ unregister_netdev(dev);
+ if (idev)
+ pnp_device_detach(idev);
+ /* Careful ne_drv_remove can be called twice, once from
+ * the platform_driver.remove and again when the
+ * platform_device is being removed.
+ */
+ ei_status.priv = 0;
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, NE_IO_EXTENT);
+ free_netdev(dev);
+ platform_set_drvdata(pdev, NULL);
+ }
+ return 0;
+}
+
+/* Remove unused devices or all if true. */
+static void ne_loop_rm_unreg(int all)
+{
+ int this_dev;
+ struct platform_device *pdev;
+ for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+ pdev = pdev_ne[this_dev];
+ /* No network device == unused */
+ if (pdev && (!platform_get_drvdata(pdev) || all)) {
+ ne_drv_remove(pdev);
+ platform_device_unregister(pdev);
+ pdev_ne[this_dev] = NULL;
+ }
+ }
+}
+
+#ifdef CONFIG_PM
+static int ne_drv_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+
+ if (netif_running(dev)) {
+ struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
+ netif_device_detach(dev);
+ if (idev)
+ pnp_stop_dev(idev);
+ }
+ return 0;
+}
+
+static int ne_drv_resume(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+
+ if (netif_running(dev)) {
+ struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
+ if (idev)
+ pnp_start_dev(idev);
+ ne_reset_8390(dev);
+ NS8390p_init(dev, 1);
+ netif_device_attach(dev);
+ }
+ return 0;
+}
+#else
+#define ne_drv_suspend NULL
+#define ne_drv_resume NULL
+#endif
+
+static struct platform_driver ne_driver = {
+ .remove = ne_drv_remove,
+ .suspend = ne_drv_suspend,
+ .resume = ne_drv_resume,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static void __init ne_add_devices(void)
+{
+ int this_dev;
+ struct platform_device *pdev;
+
+ for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+ if (pdev_ne[this_dev])
+ continue;
+ pdev = platform_device_register_simple(
+ DRV_NAME, this_dev, NULL, 0);
+ if (IS_ERR(pdev))
+ continue;
+ pdev_ne[this_dev] = pdev;
+ }
+}
+
+#ifdef MODULE
+int __init init_module(void)
+{
+ int retval;
+ ne_add_devices();
+ retval = platform_driver_probe(&ne_driver, ne_drv_probe);
+ if (retval) {
+ if (io[0] == 0)
+ printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\""
+ " value(s) for ISA cards.\n");
+ ne_loop_rm_unreg(1);
+ return retval;
+ }
+
+ /* Unregister unused platform_devices. */
+ ne_loop_rm_unreg(0);
+ return retval;
+}
+#else /* MODULE */
+static int __init ne_init(void)
+{
+ int retval = platform_driver_probe(&ne_driver, ne_drv_probe);
+
+ /* Unregister unused platform_devices. */
+ ne_loop_rm_unreg(0);
+ return retval;
+}
+module_init(ne_init);
+
+struct net_device * __init ne_probe(int unit)
+{
+ int this_dev;
+ struct net_device *dev;
+
+ /* Find an empty slot, that is no net_device and zero io port. */
+ this_dev = 0;
+ while ((pdev_ne[this_dev] && platform_get_drvdata(pdev_ne[this_dev])) ||
+ io[this_dev]) {
+ if (++this_dev == MAX_NE_CARDS)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Get irq, io from kernel command line */
+ dev = alloc_eip_netdev();
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ io[this_dev] = dev->base_addr;
+ irq[this_dev] = dev->irq;
+ bad[this_dev] = dev->mem_end;
+
+ free_netdev(dev);
+
+ ne_add_devices();
+
+ /* return the first device found */
+ for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+ if (pdev_ne[this_dev]) {
+ dev = platform_get_drvdata(pdev_ne[this_dev]);
+ if (dev)
+ return dev;
+ }
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+#endif /* MODULE */
+
+static void __exit ne_exit(void)
+{
+ platform_driver_unregister(&ne_driver);
+ ne_loop_rm_unreg(1);
+}
+module_exit(ne_exit);
diff --git a/drivers/net/ethernet/8390/ne2.c b/drivers/net/ethernet/8390/ne2.c
new file mode 100644
index 000000000000..70cdc6996342
--- /dev/null
+++ b/drivers/net/ethernet/8390/ne2.c
@@ -0,0 +1,799 @@
+/* ne2.c: A NE/2 Ethernet Driver for Linux. */
+/*
+ Based on the NE2000 driver written by Donald Becker (1992-94).
+ modified by Wim Dumon (Apr 1996)
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as wimpie@linux.cc.kuleuven.ac.be
+
+ Currently supported: NE/2
+ This patch was never tested on other MCA-ethernet adapters, but it
+ might work. Just give it a try and let me know if you have problems.
+ Also mail me if it really works, please!
+
+ Changelog:
+ Mon Feb 3 16:26:02 MET 1997
+ - adapted the driver to work with the 2.1.25 kernel
+ - multiple ne2 support (untested)
+ - module support (untested)
+
+ Fri Aug 28 00:18:36 CET 1998 (David Weinehall)
+ - fixed a few minor typos
+ - made the MODULE_PARM conditional (it only works with the v2.1.x kernels)
+ - fixed the module support (Now it's working...)
+
+ Mon Sep 7 19:01:44 CET 1998 (David Weinehall)
+ - added support for Arco Electronics AE/2-card (experimental)
+
+ Mon Sep 14 09:53:42 CET 1998 (David Weinehall)
+ - added support for Compex ENET-16MC/P (experimental)
+
+ Tue Sep 15 16:21:12 CET 1998 (David Weinehall, Magnus Jonsson, Tomas Ogren)
+ - Miscellaneous bugfixes
+
+ Tue Sep 19 16:21:12 CET 1998 (Magnus Jonsson)
+ - Cleanup
+
+ Wed Sep 23 14:33:34 CET 1998 (David Weinehall)
+ - Restructuring and rewriting for v2.1.x compliance
+
+ Wed Oct 14 17:19:21 CET 1998 (David Weinehall)
+ - Added code that unregisters irq and proc-info
+ - Version# bump
+
+ Mon Nov 16 15:28:23 CET 1998 (Wim Dumon)
+ - pass 'dev' as last parameter of request_irq in stead of 'NULL'
+
+ Wed Feb 7 21:24:00 CET 2001 (Alfred Arnold)
+ - added support for the D-Link DE-320CT
+
+ * WARNING
+ -------
+ This is alpha-test software. It is not guaranteed to work. As a
+ matter of fact, I'm quite sure there are *LOTS* of bugs in here. I
+ would like to hear from you if you use this driver, even if it works.
+ If it doesn't work, be sure to send me a mail with the problems !
+*/
+
+static const char *version = "ne2.c:v0.91 Nov 16 1998 Wim Dumon <wimpie@kotnet.org>\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/mca-legacy.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include "8390.h"
+
+#define DRV_NAME "ne2"
+
+/* Some defines that people can play with if so inclined. */
+
+/* Do we perform extra sanity checks on stuff ? */
+/* #define NE_SANITY_CHECK */
+
+/* Do we implement the read before write bugfix ? */
+/* #define NE_RW_BUGFIX */
+
+/* Do we have a non std. amount of memory? (in units of 256 byte pages) */
+/* #define PACKETBUF_MEMSIZE 0x40 */
+
+
+/* ---- No user-serviceable parts below ---- */
+
+#define NE_BASE (dev->base_addr)
+#define NE_CMD 0x00
+#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+#define NE_RESET 0x20 /* Issue a read to reset, a write to clear. */
+#define NE_IO_EXTENT 0x30
+
+#define NE1SM_START_PG 0x20 /* First page of TX buffer */
+#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+/* From the .ADF file: */
+static unsigned int addresses[7] __initdata =
+ {0x1000, 0x2020, 0x8020, 0xa0a0, 0xb0b0, 0xc0c0, 0xc3d0};
+static int irqs[4] __initdata = {3, 4, 5, 9};
+
+/* From the D-Link ADF file: */
+static unsigned int dlink_addresses[4] __initdata =
+ {0x300, 0x320, 0x340, 0x360};
+static int dlink_irqs[8] __initdata = {3, 4, 5, 9, 10, 11, 14, 15};
+
+struct ne2_adapters_t {
+ unsigned int id;
+ char *name;
+};
+
+static struct ne2_adapters_t ne2_adapters[] __initdata = {
+ { 0x6354, "Arco Ethernet Adapter AE/2" },
+ { 0x70DE, "Compex ENET-16 MC/P" },
+ { 0x7154, "Novell Ethernet Adapter NE/2" },
+ { 0x56ea, "D-Link DE-320CT" },
+ { 0x0000, NULL }
+};
+
+extern int netcard_probe(struct net_device *dev);
+
+static int ne2_probe1(struct net_device *dev, int slot);
+
+static void ne_reset_8390(struct net_device *dev);
+static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ne_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ne_block_output(struct net_device *dev, const int count,
+ const unsigned char *buf, const int start_page);
+
+
+/*
+ * special code to read the DE-320's MAC address EEPROM. In contrast to a
+ * standard NE design, this is a serial EEPROM (93C46) that has to be read
+ * bit by bit. The EEPROM cotrol port at base + 0x1e has the following
+ * layout:
+ *
+ * Bit 0 = Data out (read from EEPROM)
+ * Bit 1 = Data in (write to EEPROM)
+ * Bit 2 = Clock
+ * Bit 3 = Chip Select
+ * Bit 7 = ~50 kHz clock for defined delays
+ *
+ */
+
+static void __init dlink_put_eeprom(unsigned char value, unsigned int addr)
+{
+ int z;
+ unsigned char v1, v2;
+
+ /* write the value to the NIC EEPROM register */
+
+ outb(value, addr + 0x1e);
+
+ /* now wait the clock line to toggle twice. Effectively, we are
+ waiting (at least) for one clock cycle */
+
+ for (z = 0; z < 2; z++) {
+ do {
+ v1 = inb(addr + 0x1e);
+ v2 = inb(addr + 0x1e);
+ }
+ while (!((v1 ^ v2) & 0x80));
+ }
+}
+
+static void __init dlink_send_eeprom_bit(unsigned int bit, unsigned int addr)
+{
+ /* shift data bit into correct position */
+
+ bit = bit << 1;
+
+ /* write value, keep clock line high for two cycles */
+
+ dlink_put_eeprom(0x09 | bit, addr);
+ dlink_put_eeprom(0x0d | bit, addr);
+ dlink_put_eeprom(0x0d | bit, addr);
+ dlink_put_eeprom(0x09 | bit, addr);
+}
+
+static void __init dlink_send_eeprom_word(unsigned int value, unsigned int len, unsigned int addr)
+{
+ int z;
+
+ /* adjust bits so that they are left-aligned in a 16-bit-word */
+
+ value = value << (16 - len);
+
+ /* shift bits out to the EEPROM */
+
+ for (z = 0; z < len; z++) {
+ dlink_send_eeprom_bit((value & 0x8000) >> 15, addr);
+ value = value << 1;
+ }
+}
+
+static unsigned int __init dlink_get_eeprom(unsigned int eeaddr, unsigned int addr)
+{
+ int z;
+ unsigned int value = 0;
+
+ /* pull the CS line low for a moment. This resets the EEPROM-
+ internal logic, and makes it ready for a new command. */
+
+ dlink_put_eeprom(0x01, addr);
+ dlink_put_eeprom(0x09, addr);
+
+ /* send one start bit, read command (1 - 0), plus the address to
+ the EEPROM */
+
+ dlink_send_eeprom_word(0x0180 | (eeaddr & 0x3f), 9, addr);
+
+ /* get the data word. We clock by sending 0s to the EEPROM, which
+ get ignored during the read process */
+
+ for (z = 0; z < 16; z++) {
+ dlink_send_eeprom_bit(0, addr);
+ value = (value << 1) | (inb(addr + 0x1e) & 0x01);
+ }
+
+ return value;
+}
+
+/*
+ * Note that at boot, this probe only picks up one card at a time.
+ */
+
+static int __init do_ne2_probe(struct net_device *dev)
+{
+ static int current_mca_slot = -1;
+ int i;
+ int adapter_found = 0;
+
+ /* Do not check any supplied i/o locations.
+ POS registers usually don't fail :) */
+
+ /* MCA cards have POS registers.
+ Autodetecting MCA cards is extremely simple.
+ Just search for the card. */
+
+ for(i = 0; (ne2_adapters[i].name != NULL) && !adapter_found; i++) {
+ current_mca_slot =
+ mca_find_unused_adapter(ne2_adapters[i].id, 0);
+
+ if((current_mca_slot != MCA_NOTFOUND) && !adapter_found) {
+ int res;
+ mca_set_adapter_name(current_mca_slot,
+ ne2_adapters[i].name);
+ mca_mark_as_used(current_mca_slot);
+
+ res = ne2_probe1(dev, current_mca_slot);
+ if (res)
+ mca_mark_as_unused(current_mca_slot);
+ return res;
+ }
+ }
+ return -ENODEV;
+}
+
+#ifndef MODULE
+struct net_device * __init ne2_probe(int unit)
+{
+ struct net_device *dev = alloc_eip_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_ne2_probe(dev);
+ if (err)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static int ne2_procinfo(char *buf, int slot, struct net_device *dev)
+{
+ int len=0;
+
+ len += sprintf(buf+len, "The NE/2 Ethernet Adapter\n" );
+ len += sprintf(buf+len, "Driver written by Wim Dumon ");
+ len += sprintf(buf+len, "<wimpie@kotnet.org>\n");
+ len += sprintf(buf+len, "Modified by ");
+ len += sprintf(buf+len, "David Weinehall <tao@acc.umu.se>\n");
+ len += sprintf(buf+len, "and by Magnus Jonsson <bigfoot@acc.umu.se>\n");
+ len += sprintf(buf+len, "Based on the original NE2000 drivers\n" );
+ len += sprintf(buf+len, "Base IO: %#x\n", (unsigned int)dev->base_addr);
+ len += sprintf(buf+len, "IRQ : %d\n", dev->irq);
+ len += sprintf(buf+len, "HW addr : %pM\n", dev->dev_addr);
+
+ return len;
+}
+
+static int __init ne2_probe1(struct net_device *dev, int slot)
+{
+ int i, base_addr, irq, retval;
+ unsigned char POS;
+ unsigned char SA_prom[32];
+ const char *name = "NE/2";
+ int start_page, stop_page;
+ static unsigned version_printed;
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ printk("NE/2 ethercard found in slot %d:", slot);
+
+ /* Read base IO and IRQ from the POS-registers */
+ POS = mca_read_stored_pos(slot, 2);
+ if(!(POS % 2)) {
+ printk(" disabled.\n");
+ return -ENODEV;
+ }
+
+ /* handle different POS register structure for D-Link card */
+
+ if (mca_read_stored_pos(slot, 0) == 0xea) {
+ base_addr = dlink_addresses[(POS >> 5) & 0x03];
+ irq = dlink_irqs[(POS >> 2) & 0x07];
+ }
+ else {
+ i = (POS & 0xE)>>1;
+ /* printk("Halleluja sdog, als er na de pijl een 1 staat is 1 - 1 == 0"
+ " en zou het moeten werken -> %d\n", i);
+ The above line was for remote testing, thanx to sdog ... */
+ base_addr = addresses[i - 1];
+ irq = irqs[(POS & 0x60)>>5];
+ }
+
+ if (!request_region(base_addr, NE_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+#ifdef DEBUG
+ printk("POS info : pos 2 = %#x ; base = %#x ; irq = %ld\n", POS,
+ base_addr, irq);
+#endif
+
+#ifndef CRYNWR_WAY
+ /* Reset the card the way they do it in the Crynwr packet driver */
+ for (i=0; i<8; i++)
+ outb(0x0, base_addr + NE_RESET);
+ inb(base_addr + NE_RESET);
+ outb(0x21, base_addr + NE_CMD);
+ if (inb(base_addr + NE_CMD) != 0x21) {
+ printk("NE/2 adapter not responding\n");
+ retval = -ENODEV;
+ goto out;
+ }
+
+ /* In the crynwr sources they do a RAM-test here. I skip it. I suppose
+ my RAM is okay. Suppose your memory is broken. Then this test
+ should fail and you won't be able to use your card. But if I do not
+ test, you won't be able to use your card, neither. So this test
+ won't help you. */
+
+#else /* _I_ never tested it this way .. Go ahead and try ...*/
+ /* Reset card. Who knows what dain-bramaged state it was left in. */
+ {
+ unsigned long reset_start_time = jiffies;
+
+ /* DON'T change these to inb_p/outb_p or reset will fail on
+ clones.. */
+ outb(inb(base_addr + NE_RESET), base_addr + NE_RESET);
+
+ while ((inb_p(base_addr + EN0_ISR) & ENISR_RESET) == 0)
+ if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
+ printk(" not found (no reset ack).\n");
+ retval = -ENODEV;
+ goto out;
+ }
+
+ outb_p(0xff, base_addr + EN0_ISR); /* Ack all intr. */
+ }
+#endif
+
+
+ /* Read the 16 bytes of station address PROM.
+ We must first initialize registers, similar to
+ NS8390p_init(eifdev, 0).
+ We can't reliably read the SAPROM address without this.
+ (I learned the hard way!). */
+ {
+ struct {
+ unsigned char value, offset;
+ } program_seq[] = {
+ /* Select page 0 */
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD},
+ {0x49, EN0_DCFG}, /* Set WORD-wide (0x49) access. */
+ {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0xFF, EN0_ISR},
+ {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ {32, EN0_RCNTLO},
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
+ {0x00, EN0_RSARHI},
+ {E8390_RREAD+E8390_START, E8390_CMD},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(program_seq); i++)
+ outb_p(program_seq[i].value, base_addr +
+ program_seq[i].offset);
+
+ }
+ for(i = 0; i < 6 /*sizeof(SA_prom)*/; i+=1) {
+ SA_prom[i] = inb(base_addr + NE_DATAPORT);
+ }
+
+ /* I don't know whether the previous sequence includes the general
+ board reset procedure, so better don't omit it and just overwrite
+ the garbage read from a DE-320 with correct stuff. */
+
+ if (mca_read_stored_pos(slot, 0) == 0xea) {
+ unsigned int v;
+
+ for (i = 0; i < 3; i++) {
+ v = dlink_get_eeprom(i, base_addr);
+ SA_prom[(i << 1) ] = v & 0xff;
+ SA_prom[(i << 1) + 1] = (v >> 8) & 0xff;
+ }
+ }
+
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
+
+ dev->irq=irq;
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+ retval = request_irq(dev->irq, eip_interrupt, 0, DRV_NAME, dev);
+ if (retval) {
+ printk (" unable to get IRQ %d (irqval=%d).\n",
+ dev->irq, retval);
+ goto out;
+ }
+
+ dev->base_addr = base_addr;
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++)
+ dev->dev_addr[i] = SA_prom[i];
+
+ printk(" %pM\n", dev->dev_addr);
+
+ printk("%s: %s found at %#x, using IRQ %d.\n",
+ dev->name, name, base_addr, dev->irq);
+
+ mca_set_adapter_procfn(slot, (MCA_ProcFn) ne2_procinfo, dev);
+
+ ei_status.name = name;
+ ei_status.tx_start_page = start_page;
+ ei_status.stop_page = stop_page;
+ ei_status.word16 = (2 == 2);
+
+ ei_status.rx_start_page = start_page + TX_PAGES;
+#ifdef PACKETBUF_MEMSIZE
+ /* Allow the packet buffer size to be overridden by know-it-alls. */
+ ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
+#endif
+
+ ei_status.reset_8390 = &ne_reset_8390;
+ ei_status.block_input = &ne_block_input;
+ ei_status.block_output = &ne_block_output;
+ ei_status.get_8390_hdr = &ne_get_8390_hdr;
+
+ ei_status.priv = slot;
+
+ dev->netdev_ops = &eip_netdev_ops;
+ NS8390p_init(dev, 0);
+
+ retval = register_netdev(dev);
+ if (retval)
+ goto out1;
+ return 0;
+out1:
+ mca_set_adapter_procfn( ei_status.priv, NULL, NULL);
+ free_irq(dev->irq, dev);
+out:
+ release_region(base_addr, NE_IO_EXTENT);
+ return retval;
+}
+
+/* Hard reset the card. This used to pause for the same period that a
+ 8390 reset command required, but that shouldn't be necessary. */
+static void ne_reset_8390(struct net_device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+
+ if (ei_debug > 1)
+ printk("resetting the 8390 t=%ld...", jiffies);
+
+ /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
+ outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
+ if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
+ printk("%s: ne_reset_8390() did not complete.\n",
+ dev->name);
+ break;
+ }
+ outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+
+ int nic_base = dev->base_addr;
+
+ /* This *shouldn't* happen.
+ If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
+ outb_p(0, nic_base + EN0_RCNTHI);
+ outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ if (ei_status.word16)
+ insw(NE_BASE + NE_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr)>>1);
+ else
+ insb(NE_BASE + NE_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr));
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you
+ are porting to a new ethercard, look at the packet driver source for
+ hints. The NEx000 doesn't share the on-board packet memory -- you have
+ to put the packet out through the "remote DMA" dataport using outb. */
+
+static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb,
+ int ring_offset)
+{
+#ifdef NE_SANITY_CHECK
+ int xfer_count = count;
+#endif
+ int nic_base = dev->base_addr;
+ char *buf = skb->data;
+
+ /* This *shouldn't* happen.
+ If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne_block_input "
+ "[DMAstat:%d][irqlock:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ if (ei_status.word16) {
+ insw(NE_BASE + NE_DATAPORT,buf,count>>1);
+ if (count & 0x01) {
+ buf[count-1] = inb(NE_BASE + NE_DATAPORT);
+#ifdef NE_SANITY_CHECK
+ xfer_count++;
+#endif
+ }
+ } else {
+ insb(NE_BASE + NE_DATAPORT, buf, count);
+ }
+
+#ifdef NE_SANITY_CHECK
+ /* This was for the ALPHA version only, but enough people have
+ been encountering problems so it is still here. If you see
+ this message you either 1) have a slightly incompatible clone
+ or 2) have noise/speed problems with your bus. */
+ if (ei_debug > 1) { /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
+ -- it's broken for Rx on some cards! */
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if (((ring_offset + xfer_count) & 0xff) == low)
+ break;
+ } while (--tries > 0);
+ if (tries <= 0)
+ printk("%s: RX transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, ring_offset + xfer_count, addr);
+ }
+#endif
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+static void ne_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ int nic_base = NE_BASE;
+ unsigned long dma_start;
+#ifdef NE_SANITY_CHECK
+ int retries = 0;
+#endif
+
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+ if (ei_status.word16 && (count & 0x01))
+ count++;
+
+ /* This *shouldn't* happen.
+ If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne_block_output."
+ "[DMAstat:%d][irqlock:%d]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
+
+#ifdef NE_SANITY_CHECK
+retry:
+#endif
+
+#ifdef NE8390_RW_BUGFIX
+ /* Handle the read-before-write bug the same way as the
+ Crynwr packet driver -- the NatSemi method doesn't work.
+ Actually this doesn't always work either, but if you have
+ problems with your NEx000 this is better than nothing! */
+ outb_p(0x42, nic_base + EN0_RCNTLO);
+ outb_p(0x00, nic_base + EN0_RCNTHI);
+ outb_p(0x42, nic_base + EN0_RSARLO);
+ outb_p(0x00, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ /* Make certain that the dummy read has occurred. */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+#endif
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR);
+
+ /* Now the normal output. */
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(0x00, nic_base + EN0_RSARLO);
+ outb_p(start_page, nic_base + EN0_RSARHI);
+
+ outb_p(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
+ if (ei_status.word16) {
+ outsw(NE_BASE + NE_DATAPORT, buf, count>>1);
+ } else {
+ outsb(NE_BASE + NE_DATAPORT, buf, count);
+ }
+
+ dma_start = jiffies;
+
+#ifdef NE_SANITY_CHECK
+ /* This was for the ALPHA version only, but enough people have
+ been encountering problems so it is still here. */
+
+ if (ei_debug > 1) { /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if ((start_page << 8) + count == addr)
+ break;
+ } while (--tries > 0);
+ if (tries <= 0) {
+ printk("%s: Tx packet transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, (start_page << 8) + count, addr);
+ if (retries++ == 0)
+ goto retry;
+ }
+ }
+#endif
+
+ while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
+ if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
+ printk("%s: timeout waiting for Tx RDC.\n", dev->name);
+ ne_reset_8390(dev);
+ NS8390p_init(dev, 1);
+ break;
+ }
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+
+#ifdef MODULE
+#define MAX_NE_CARDS 4 /* Max number of NE cards per module */
+static struct net_device *dev_ne[MAX_NE_CARDS];
+static int io[MAX_NE_CARDS];
+static int irq[MAX_NE_CARDS];
+static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */
+MODULE_LICENSE("GPL");
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(bad, int, NULL, 0);
+MODULE_PARM_DESC(io, "(ignored)");
+MODULE_PARM_DESC(irq, "(ignored)");
+MODULE_PARM_DESC(bad, "(ignored)");
+
+/* Module code fixed by David Weinehall */
+
+int __init init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+ dev = alloc_eip_netdev();
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->mem_end = bad[this_dev];
+ dev->base_addr = io[this_dev];
+ if (do_ne2_probe(dev) == 0) {
+ dev_ne[found++] = dev;
+ continue;
+ }
+ free_netdev(dev);
+ break;
+ }
+ if (found)
+ return 0;
+ printk(KERN_WARNING "ne2.c: No NE/2 card found\n");
+ return -ENXIO;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ mca_mark_as_unused(ei_status.priv);
+ mca_set_adapter_procfn( ei_status.priv, NULL, NULL);
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, NE_IO_EXTENT);
+}
+
+void __exit cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+ struct net_device *dev = dev_ne[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
new file mode 100644
index 000000000000..39923425ba25
--- /dev/null
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -0,0 +1,726 @@
+/* ne2k-pci.c: A NE2000 clone on PCI bus driver for Linux. */
+/*
+ A Linux device driver for PCI NE2000 clones.
+
+ Authors and other copyright holders:
+ 1992-2000 by Donald Becker, NE2000 core and various modifications.
+ 1995-1998 by Paul Gortmaker, core modifications and PCI support.
+ Copyright 1993 assigned to the United States Government as represented
+ by the Director, National Security Agency.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Issues remaining:
+ People are making PCI ne2000 clones! Oh the horror, the horror...
+ Limited full-duplex support.
+*/
+
+#define DRV_NAME "ne2k-pci"
+#define DRV_VERSION "1.03"
+#define DRV_RELDATE "9/22/2003"
+
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+/* Used to pass the full-duplex flag, etc. */
+static int full_duplex[MAX_UNITS];
+static int options[MAX_UNITS];
+
+/* Force a non std. amount of memory. Units are 256 byte pages. */
+/* #define PACKETBUF_MEMSIZE 0x40 */
+
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "8390.h"
+
+/* These identify the driver base version and may not be removed. */
+static const char version[] __devinitconst =
+ KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
+ " D. Becker/P. Gortmaker\n";
+
+#if defined(__powerpc__)
+#define inl_le(addr) le32_to_cpu(inl(addr))
+#define inw_le(addr) le16_to_cpu(inw(addr))
+#endif
+
+#define PFX DRV_NAME ": "
+
+MODULE_AUTHOR("Donald Becker / Paul Gortmaker");
+MODULE_DESCRIPTION("PCI NE2000 clone driver");
+MODULE_LICENSE("GPL");
+
+module_param(debug, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+MODULE_PARM_DESC(debug, "debug level (1-2)");
+MODULE_PARM_DESC(options, "Bit 5: full duplex");
+MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
+
+/* Some defines that people can play with if so inclined. */
+
+/* Use 32 bit data-movement operations instead of 16 bit. */
+#define USE_LONGIO
+
+/* Do we implement the read before write bugfix ? */
+/* #define NE_RW_BUGFIX */
+
+/* Flags. We rename an existing ei_status field to store flags! */
+/* Thus only the low 8 bits are usable for non-init-time flags. */
+#define ne2k_flags reg0
+enum {
+ ONLY_16BIT_IO=8, ONLY_32BIT_IO=4, /* Chip can do only 16/32-bit xfers. */
+ FORCE_FDX=0x20, /* User override. */
+ REALTEK_FDX=0x40, HOLTEK_FDX=0x80,
+ STOP_PG_0x60=0x100,
+};
+
+enum ne2k_pci_chipsets {
+ CH_RealTek_RTL_8029 = 0,
+ CH_Winbond_89C940,
+ CH_Compex_RL2000,
+ CH_KTI_ET32P2,
+ CH_NetVin_NV5000SC,
+ CH_Via_86C926,
+ CH_SureCom_NE34,
+ CH_Winbond_W89C940F,
+ CH_Holtek_HT80232,
+ CH_Holtek_HT80229,
+ CH_Winbond_89C940_8c4a,
+};
+
+
+static struct {
+ char *name;
+ int flags;
+} pci_clone_list[] __devinitdata = {
+ {"RealTek RTL-8029", REALTEK_FDX},
+ {"Winbond 89C940", 0},
+ {"Compex RL2000", 0},
+ {"KTI ET32P2", 0},
+ {"NetVin NV5000SC", 0},
+ {"Via 86C926", ONLY_16BIT_IO},
+ {"SureCom NE34", 0},
+ {"Winbond W89C940F", 0},
+ {"Holtek HT80232", ONLY_16BIT_IO | HOLTEK_FDX},
+ {"Holtek HT80229", ONLY_32BIT_IO | HOLTEK_FDX | STOP_PG_0x60 },
+ {"Winbond W89C940(misprogrammed)", 0},
+ {NULL,}
+};
+
+
+static DEFINE_PCI_DEVICE_TABLE(ne2k_pci_tbl) = {
+ { 0x10ec, 0x8029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_RealTek_RTL_8029 },
+ { 0x1050, 0x0940, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940 },
+ { 0x11f6, 0x1401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Compex_RL2000 },
+ { 0x8e2e, 0x3000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_KTI_ET32P2 },
+ { 0x4a14, 0x5000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_NetVin_NV5000SC },
+ { 0x1106, 0x0926, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Via_86C926 },
+ { 0x10bd, 0x0e34, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_SureCom_NE34 },
+ { 0x1050, 0x5a5a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_W89C940F },
+ { 0x12c3, 0x0058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Holtek_HT80232 },
+ { 0x12c3, 0x5598, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Holtek_HT80229 },
+ { 0x8c4a, 0x1980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940_8c4a },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ne2k_pci_tbl);
+
+
+/* ---- No user-serviceable parts below ---- */
+
+#define NE_BASE (dev->base_addr)
+#define NE_CMD 0x00
+#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */
+#define NE_IO_EXTENT 0x20
+
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+
+static int ne2k_pci_open(struct net_device *dev);
+static int ne2k_pci_close(struct net_device *dev);
+
+static void ne2k_pci_reset_8390(struct net_device *dev);
+static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ne2k_pci_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ne2k_pci_block_output(struct net_device *dev, const int count,
+ const unsigned char *buf, const int start_page);
+static const struct ethtool_ops ne2k_pci_ethtool_ops;
+
+
+
+/* There is no room in the standard 8390 structure for extra info we need,
+ so we build a meta/outer-wrapper structure.. */
+struct ne2k_pci_card {
+ struct net_device *dev;
+ struct pci_dev *pci_dev;
+};
+
+
+
+/*
+ NEx000-clone boards have a Station Address (SA) PROM (SAPROM) in the packet
+ buffer memory space. By-the-spec NE2000 clones have 0x57,0x57 in bytes
+ 0x0e,0x0f of the SAPROM, while other supposed NE2000 clones must be
+ detected by their SA prefix.
+
+ Reading the SAPROM from a word-wide card with the 8390 set in byte-wide
+ mode results in doubled values, which can be detected and compensated for.
+
+ The probe is also responsible for initializing the card and filling
+ in the 'dev' and 'ei_status' structures.
+*/
+
+static const struct net_device_ops ne2k_netdev_ops = {
+ .ndo_open = ne2k_pci_open,
+ .ndo_stop = ne2k_pci_close,
+ .ndo_start_xmit = ei_start_xmit,
+ .ndo_tx_timeout = ei_tx_timeout,
+ .ndo_get_stats = ei_get_stats,
+ .ndo_set_rx_mode = ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ei_poll,
+#endif
+};
+
+static int __devinit ne2k_pci_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ int i;
+ unsigned char SA_prom[32];
+ int start_page, stop_page;
+ int irq, reg0, chip_idx = ent->driver_data;
+ static unsigned int fnd_cnt;
+ long ioaddr;
+ int flags = pci_clone_list[chip_idx].flags;
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(version);
+#endif
+
+ fnd_cnt++;
+
+ i = pci_enable_device (pdev);
+ if (i)
+ return i;
+
+ ioaddr = pci_resource_start (pdev, 0);
+ irq = pdev->irq;
+
+ if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) {
+ dev_err(&pdev->dev, "no I/O resource at PCI BAR #0\n");
+ return -ENODEV;
+ }
+
+ if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) {
+ dev_err(&pdev->dev, "I/O resource 0x%x @ 0x%lx busy\n",
+ NE_IO_EXTENT, ioaddr);
+ return -EBUSY;
+ }
+
+ reg0 = inb(ioaddr);
+ if (reg0 == 0xFF)
+ goto err_out_free_res;
+
+ /* Do a preliminary verification that we have a 8390. */
+ {
+ int regd;
+ outb(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
+ regd = inb(ioaddr + 0x0d);
+ outb(0xff, ioaddr + 0x0d);
+ outb(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
+ inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
+ if (inb(ioaddr + EN0_COUNTER0) != 0) {
+ outb(reg0, ioaddr);
+ outb(regd, ioaddr + 0x0d); /* Restore the old values. */
+ goto err_out_free_res;
+ }
+ }
+
+ /* Allocate net_device, dev->priv; fill in 8390 specific dev fields. */
+ dev = alloc_ei_netdev();
+ if (!dev) {
+ dev_err(&pdev->dev, "cannot allocate ethernet device\n");
+ goto err_out_free_res;
+ }
+ dev->netdev_ops = &ne2k_netdev_ops;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* Reset card. Who knows what dain-bramaged state it was left in. */
+ {
+ unsigned long reset_start_time = jiffies;
+
+ outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET);
+
+ /* This looks like a horrible timing loop, but it should never take
+ more than a few cycles.
+ */
+ while ((inb(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
+ /* Limit wait: '2' avoids jiffy roll-over. */
+ if (jiffies - reset_start_time > 2) {
+ dev_err(&pdev->dev,
+ "Card failure (no reset ack).\n");
+ goto err_out_free_netdev;
+ }
+
+ outb(0xff, ioaddr + EN0_ISR); /* Ack all intr. */
+ }
+
+ /* Read the 16 bytes of station address PROM.
+ We must first initialize registers, similar to NS8390_init(eifdev, 0).
+ We can't reliably read the SAPROM address without this.
+ (I learned the hard way!). */
+ {
+ struct {unsigned char value, offset; } program_seq[] = {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
+ {0x49, EN0_DCFG}, /* Set word-wide access. */
+ {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0xFF, EN0_ISR},
+ {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ {32, EN0_RCNTLO},
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
+ {0x00, EN0_RSARHI},
+ {E8390_RREAD+E8390_START, E8390_CMD},
+ };
+ for (i = 0; i < ARRAY_SIZE(program_seq); i++)
+ outb(program_seq[i].value, ioaddr + program_seq[i].offset);
+
+ }
+
+ /* Note: all PCI cards have at least 16 bit access, so we don't have
+ to check for 8 bit cards. Most cards permit 32 bit access. */
+ if (flags & ONLY_32BIT_IO) {
+ for (i = 0; i < 4 ; i++)
+ ((u32 *)SA_prom)[i] = le32_to_cpu(inl(ioaddr + NE_DATAPORT));
+ } else
+ for(i = 0; i < 32 /*sizeof(SA_prom)*/; i++)
+ SA_prom[i] = inb(ioaddr + NE_DATAPORT);
+
+ /* We always set the 8390 registers for word mode. */
+ outb(0x49, ioaddr + EN0_DCFG);
+ start_page = NESM_START_PG;
+
+ stop_page = flags & STOP_PG_0x60 ? 0x60 : NESM_STOP_PG;
+
+ /* Set up the rest of the parameters. */
+ dev->irq = irq;
+ dev->base_addr = ioaddr;
+ pci_set_drvdata(pdev, dev);
+
+ ei_status.name = pci_clone_list[chip_idx].name;
+ ei_status.tx_start_page = start_page;
+ ei_status.stop_page = stop_page;
+ ei_status.word16 = 1;
+ ei_status.ne2k_flags = flags;
+ if (fnd_cnt < MAX_UNITS) {
+ if (full_duplex[fnd_cnt] > 0 || (options[fnd_cnt] & FORCE_FDX))
+ ei_status.ne2k_flags |= FORCE_FDX;
+ }
+
+ ei_status.rx_start_page = start_page + TX_PAGES;
+#ifdef PACKETBUF_MEMSIZE
+ /* Allow the packet buffer size to be overridden by know-it-alls. */
+ ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
+#endif
+
+ ei_status.reset_8390 = &ne2k_pci_reset_8390;
+ ei_status.block_input = &ne2k_pci_block_input;
+ ei_status.block_output = &ne2k_pci_block_output;
+ ei_status.get_8390_hdr = &ne2k_pci_get_8390_hdr;
+ ei_status.priv = (unsigned long) pdev;
+
+ dev->ethtool_ops = &ne2k_pci_ethtool_ops;
+ NS8390_init(dev, 0);
+
+ memcpy(dev->dev_addr, SA_prom, dev->addr_len);
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ i = register_netdev(dev);
+ if (i)
+ goto err_out_free_netdev;
+
+ printk("%s: %s found at %#lx, IRQ %d, %pM.\n",
+ dev->name, pci_clone_list[chip_idx].name, ioaddr, dev->irq,
+ dev->dev_addr);
+
+ return 0;
+
+err_out_free_netdev:
+ free_netdev (dev);
+err_out_free_res:
+ release_region (ioaddr, NE_IO_EXTENT);
+ pci_set_drvdata (pdev, NULL);
+ return -ENODEV;
+
+}
+
+/*
+ * Magic incantation sequence for full duplex on the supported cards.
+ */
+static inline int set_realtek_fdx(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+
+ outb(0xC0 + E8390_NODMA, ioaddr + NE_CMD); /* Page 3 */
+ outb(0xC0, ioaddr + 0x01); /* Enable writes to CONFIG3 */
+ outb(0x40, ioaddr + 0x06); /* Enable full duplex */
+ outb(0x00, ioaddr + 0x01); /* Disable writes to CONFIG3 */
+ outb(E8390_PAGE0 + E8390_NODMA, ioaddr + NE_CMD); /* Page 0 */
+ return 0;
+}
+
+static inline int set_holtek_fdx(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+
+ outb(inb(ioaddr + 0x20) | 0x80, ioaddr + 0x20);
+ return 0;
+}
+
+static int ne2k_pci_set_fdx(struct net_device *dev)
+{
+ if (ei_status.ne2k_flags & REALTEK_FDX)
+ return set_realtek_fdx(dev);
+ else if (ei_status.ne2k_flags & HOLTEK_FDX)
+ return set_holtek_fdx(dev);
+
+ return -EOPNOTSUPP;
+}
+
+static int ne2k_pci_open(struct net_device *dev)
+{
+ int ret = request_irq(dev->irq, ei_interrupt, IRQF_SHARED, dev->name, dev);
+ if (ret)
+ return ret;
+
+ if (ei_status.ne2k_flags & FORCE_FDX)
+ ne2k_pci_set_fdx(dev);
+
+ ei_open(dev);
+ return 0;
+}
+
+static int ne2k_pci_close(struct net_device *dev)
+{
+ ei_close(dev);
+ free_irq(dev->irq, dev);
+ return 0;
+}
+
+/* Hard reset the card. This used to pause for the same period that a
+ 8390 reset command required, but that shouldn't be necessary. */
+static void ne2k_pci_reset_8390(struct net_device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+
+ if (debug > 1) printk("%s: Resetting the 8390 t=%ld...",
+ dev->name, jiffies);
+
+ outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((inb(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
+ if (jiffies - reset_start_time > 2) {
+ printk("%s: ne2k_pci_reset_8390() did not complete.\n", dev->name);
+ break;
+ }
+ outb(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+
+ long nic_base = dev->base_addr;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne2k_pci_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
+ outb(0, nic_base + EN0_RCNTHI);
+ outb(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb(ring_page, nic_base + EN0_RSARHI);
+ outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ if (ei_status.ne2k_flags & ONLY_16BIT_IO) {
+ insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+ } else {
+ *(u32*)hdr = le32_to_cpu(inl(NE_BASE + NE_DATAPORT));
+ le16_to_cpus(&hdr->count);
+ }
+
+ outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you
+ are porting to a new ethercard, look at the packet driver source for hints.
+ The NEx000 doesn't share the on-board packet memory -- you have to put
+ the packet out through the "remote DMA" dataport using outb. */
+
+static void ne2k_pci_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ long nic_base = dev->base_addr;
+ char *buf = skb->data;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne2k_pci_block_input "
+ "[DMAstat:%d][irqlock:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ if (ei_status.ne2k_flags & ONLY_32BIT_IO)
+ count = (count + 3) & 0xFFFC;
+ outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb(count & 0xff, nic_base + EN0_RCNTLO);
+ outb(count >> 8, nic_base + EN0_RCNTHI);
+ outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ if (ei_status.ne2k_flags & ONLY_16BIT_IO) {
+ insw(NE_BASE + NE_DATAPORT,buf,count>>1);
+ if (count & 0x01) {
+ buf[count-1] = inb(NE_BASE + NE_DATAPORT);
+ }
+ } else {
+ insl(NE_BASE + NE_DATAPORT, buf, count>>2);
+ if (count & 3) {
+ buf += count & ~3;
+ if (count & 2) {
+ __le16 *b = (__le16 *)buf;
+
+ *b++ = cpu_to_le16(inw(NE_BASE + NE_DATAPORT));
+ buf = (char *)b;
+ }
+ if (count & 1)
+ *buf = inb(NE_BASE + NE_DATAPORT);
+ }
+ }
+
+ outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+static void ne2k_pci_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ long nic_base = NE_BASE;
+ unsigned long dma_start;
+
+ /* On little-endian it's always safe to round the count up for
+ word writes. */
+ if (ei_status.ne2k_flags & ONLY_32BIT_IO)
+ count = (count + 3) & 0xFFFC;
+ else
+ if (count & 0x01)
+ count++;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne2k_pci_block_output."
+ "[DMAstat:%d][irqlock:%d]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
+
+#ifdef NE8390_RW_BUGFIX
+ /* Handle the read-before-write bug the same way as the
+ Crynwr packet driver -- the NatSemi method doesn't work.
+ Actually this doesn't always work either, but if you have
+ problems with your NEx000 this is better than nothing! */
+ outb(0x42, nic_base + EN0_RCNTLO);
+ outb(0x00, nic_base + EN0_RCNTHI);
+ outb(0x42, nic_base + EN0_RSARLO);
+ outb(0x00, nic_base + EN0_RSARHI);
+ outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+#endif
+ outb(ENISR_RDC, nic_base + EN0_ISR);
+
+ /* Now the normal output. */
+ outb(count & 0xff, nic_base + EN0_RCNTLO);
+ outb(count >> 8, nic_base + EN0_RCNTHI);
+ outb(0x00, nic_base + EN0_RSARLO);
+ outb(start_page, nic_base + EN0_RSARHI);
+ outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
+ if (ei_status.ne2k_flags & ONLY_16BIT_IO) {
+ outsw(NE_BASE + NE_DATAPORT, buf, count>>1);
+ } else {
+ outsl(NE_BASE + NE_DATAPORT, buf, count>>2);
+ if (count & 3) {
+ buf += count & ~3;
+ if (count & 2) {
+ __le16 *b = (__le16 *)buf;
+
+ outw(le16_to_cpu(*b++), NE_BASE + NE_DATAPORT);
+ buf = (char *)b;
+ }
+ }
+ }
+
+ dma_start = jiffies;
+
+ while ((inb(nic_base + EN0_ISR) & ENISR_RDC) == 0)
+ if (jiffies - dma_start > 2) { /* Avoid clock roll-over. */
+ printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
+ ne2k_pci_reset_8390(dev);
+ NS8390_init(dev,1);
+ break;
+ }
+
+ outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+static void ne2k_pci_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct ei_device *ei = netdev_priv(dev);
+ struct pci_dev *pci_dev = (struct pci_dev *) ei->priv;
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(pci_dev));
+}
+
+static const struct ethtool_ops ne2k_pci_ethtool_ops = {
+ .get_drvinfo = ne2k_pci_get_drvinfo,
+};
+
+static void __devexit ne2k_pci_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ BUG_ON(!dev);
+ unregister_netdev(dev);
+ release_region(dev->base_addr, NE_IO_EXTENT);
+ free_netdev(dev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+#ifdef CONFIG_PM
+static int ne2k_pci_suspend (struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+
+ netif_device_detach(dev);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int ne2k_pci_resume (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ int rc;
+
+ pci_set_power_state(pdev, 0);
+ pci_restore_state(pdev);
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ NS8390_init(dev, 1);
+ netif_device_attach(dev);
+
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+
+static struct pci_driver ne2k_driver = {
+ .name = DRV_NAME,
+ .probe = ne2k_pci_init_one,
+ .remove = __devexit_p(ne2k_pci_remove_one),
+ .id_table = ne2k_pci_tbl,
+#ifdef CONFIG_PM
+ .suspend = ne2k_pci_suspend,
+ .resume = ne2k_pci_resume,
+#endif /* CONFIG_PM */
+
+};
+
+
+static int __init ne2k_pci_init(void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk(version);
+#endif
+ return pci_register_driver(&ne2k_driver);
+}
+
+
+static void __exit ne2k_pci_cleanup(void)
+{
+ pci_unregister_driver (&ne2k_driver);
+}
+
+module_init(ne2k_pci_init);
+module_exit(ne2k_pci_cleanup);
diff --git a/drivers/net/ethernet/8390/ne3210.c b/drivers/net/ethernet/8390/ne3210.c
new file mode 100644
index 000000000000..243ed2aee88e
--- /dev/null
+++ b/drivers/net/ethernet/8390/ne3210.c
@@ -0,0 +1,347 @@
+/*
+ ne3210.c
+
+ Linux driver for Novell NE3210 EISA Network Adapter
+
+ Copyright (C) 1998, Paul Gortmaker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Information and Code Sources:
+
+ 1) Based upon my other EISA 8390 drivers (lne390, es3210, smc-ultra32)
+ 2) The existing myriad of other Linux 8390 drivers by Donald Becker.
+ 3) Info for getting IRQ and sh-mem gleaned from the EISA cfg file
+
+ The NE3210 is an EISA shared memory NS8390 implementation. Shared
+ memory address > 1MB should work with this driver.
+
+ Note that the .cfg file (3/11/93, v1.0) has AUI and BNC switched
+ around (or perhaps there are some defective/backwards cards ???)
+
+ This driver WILL NOT WORK FOR THE NE3200 - it is completely different
+ and does not use an 8390 at all.
+
+ Updated to EISA probing API 5/2003 by Marc Zyngier.
+*/
+
+#include <linux/module.h>
+#include <linux/eisa.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mm.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "8390.h"
+
+#define DRV_NAME "ne3210"
+
+static void ne3210_reset_8390(struct net_device *dev);
+
+static void ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page);
+static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset);
+static void ne3210_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page);
+
+#define NE3210_START_PG 0x00 /* First page of TX buffer */
+#define NE3210_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+#define NE3210_IO_EXTENT 0x20
+#define NE3210_SA_PROM 0x16 /* Start of e'net addr. */
+#define NE3210_RESET_PORT 0xc84
+#define NE3210_NIC_OFFSET 0x00 /* Hello, the 8390 is *here* */
+
+#define NE3210_ADDR0 0x00 /* 3 byte vendor prefix */
+#define NE3210_ADDR1 0x00
+#define NE3210_ADDR2 0x1b
+
+#define NE3210_CFG1 0xc84 /* NB: 0xc84 is also "reset" port. */
+#define NE3210_CFG2 0xc90
+#define NE3210_CFG_EXTENT (NE3210_CFG2 - NE3210_CFG1 + 1)
+
+/*
+ * You can OR any of the following bits together and assign it
+ * to NE3210_DEBUG to get verbose driver info during operation.
+ * Currently only the probe one is implemented.
+ */
+
+#define NE3210_D_PROBE 0x01
+#define NE3210_D_RX_PKT 0x02
+#define NE3210_D_TX_PKT 0x04
+#define NE3210_D_IRQ 0x08
+
+#define NE3210_DEBUG 0x0
+
+static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
+static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0};
+static const char *ifmap[] __initdata = {"UTP", "?", "BNC", "AUI"};
+static int ifmap_val[] __initdata = {
+ IF_PORT_10BASET,
+ IF_PORT_UNKNOWN,
+ IF_PORT_10BASE2,
+ IF_PORT_AUI,
+};
+
+static int __init ne3210_eisa_probe (struct device *device)
+{
+ unsigned long ioaddr, phys_mem;
+ int i, retval, port_index;
+ struct eisa_device *edev = to_eisa_device (device);
+ struct net_device *dev;
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (!(dev = alloc_ei_netdev ())) {
+ printk ("ne3210.c: unable to allocate memory for dev!\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(dev, device);
+ dev_set_drvdata(device, dev);
+ ioaddr = edev->base_addr;
+
+ if (!request_region(ioaddr, NE3210_IO_EXTENT, DRV_NAME)) {
+ retval = -EBUSY;
+ goto out;
+ }
+
+ if (!request_region(ioaddr + NE3210_CFG1,
+ NE3210_CFG_EXTENT, DRV_NAME)) {
+ retval = -EBUSY;
+ goto out1;
+ }
+
+#if NE3210_DEBUG & NE3210_D_PROBE
+ printk("ne3210-debug: probe at %#x, ID %s\n", ioaddr, edev->id.sig);
+ printk("ne3210-debug: config regs: %#x %#x\n",
+ inb(ioaddr + NE3210_CFG1), inb(ioaddr + NE3210_CFG2));
+#endif
+
+ port_index = inb(ioaddr + NE3210_CFG2) >> 6;
+ for(i = 0; i < ETHER_ADDR_LEN; i++)
+ dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i);
+ printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %pM.\n",
+ edev->slot, ifmap[port_index], dev->dev_addr);
+
+ /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */
+ dev->irq = irq_map[(inb(ioaddr + NE3210_CFG2) >> 3) & 0x07];
+ printk("ne3210.c: using IRQ %d, ", dev->irq);
+
+ retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev);
+ if (retval) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ goto out2;
+ }
+
+ phys_mem = shmem_map[inb(ioaddr + NE3210_CFG2) & 0x07] * 0x1000;
+
+ /*
+ BEWARE!! Some dain-bramaged EISA SCUs will allow you to put
+ the card mem within the region covered by `normal' RAM !!!
+ */
+ if (phys_mem > 1024*1024) { /* phys addr > 1MB */
+ if (phys_mem < virt_to_phys(high_memory)) {
+ printk(KERN_CRIT "ne3210.c: Card RAM overlaps with normal memory!!!\n");
+ printk(KERN_CRIT "ne3210.c: Use EISA SCU to set card memory below 1MB,\n");
+ printk(KERN_CRIT "ne3210.c: or to an address above 0x%llx.\n",
+ (u64)virt_to_phys(high_memory));
+ printk(KERN_CRIT "ne3210.c: Driver NOT installed.\n");
+ retval = -EINVAL;
+ goto out3;
+ }
+ }
+
+ if (!request_mem_region (phys_mem, NE3210_STOP_PG*0x100, DRV_NAME)) {
+ printk ("ne3210.c: Unable to request shared memory at physical address %#lx\n",
+ phys_mem);
+ goto out3;
+ }
+
+ printk("%dkB memory at physical address %#lx\n",
+ NE3210_STOP_PG/4, phys_mem);
+
+ ei_status.mem = ioremap(phys_mem, NE3210_STOP_PG*0x100);
+ if (!ei_status.mem) {
+ printk(KERN_ERR "ne3210.c: Unable to remap card memory !!\n");
+ printk(KERN_ERR "ne3210.c: Driver NOT installed.\n");
+ retval = -EAGAIN;
+ goto out4;
+ }
+ printk("ne3210.c: remapped %dkB card memory to virtual address %p\n",
+ NE3210_STOP_PG/4, ei_status.mem);
+ dev->mem_start = (unsigned long)ei_status.mem;
+ dev->mem_end = dev->mem_start + (NE3210_STOP_PG - NE3210_START_PG)*256;
+
+ /* The 8390 offset is zero for the NE3210 */
+ dev->base_addr = ioaddr;
+
+ ei_status.name = "NE3210";
+ ei_status.tx_start_page = NE3210_START_PG;
+ ei_status.rx_start_page = NE3210_START_PG + TX_PAGES;
+ ei_status.stop_page = NE3210_STOP_PG;
+ ei_status.word16 = 1;
+ ei_status.priv = phys_mem;
+
+ if (ei_debug > 0)
+ printk("ne3210 loaded.\n");
+
+ ei_status.reset_8390 = &ne3210_reset_8390;
+ ei_status.block_input = &ne3210_block_input;
+ ei_status.block_output = &ne3210_block_output;
+ ei_status.get_8390_hdr = &ne3210_get_8390_hdr;
+
+ dev->netdev_ops = &ei_netdev_ops;
+
+ dev->if_port = ifmap_val[port_index];
+
+ if ((retval = register_netdev (dev)))
+ goto out5;
+
+ NS8390_init(dev, 0);
+ return 0;
+
+ out5:
+ iounmap(ei_status.mem);
+ out4:
+ release_mem_region (phys_mem, NE3210_STOP_PG*0x100);
+ out3:
+ free_irq (dev->irq, dev);
+ out2:
+ release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT);
+ out1:
+ release_region (ioaddr, NE3210_IO_EXTENT);
+ out:
+ free_netdev (dev);
+
+ return retval;
+}
+
+static int __devexit ne3210_eisa_remove (struct device *device)
+{
+ struct net_device *dev = dev_get_drvdata(device);
+ unsigned long ioaddr = to_eisa_device (device)->base_addr;
+
+ unregister_netdev (dev);
+ iounmap(ei_status.mem);
+ release_mem_region (ei_status.priv, NE3210_STOP_PG*0x100);
+ free_irq (dev->irq, dev);
+ release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT);
+ release_region (ioaddr, NE3210_IO_EXTENT);
+ free_netdev (dev);
+
+ return 0;
+}
+
+/*
+ * Reset by toggling the "Board Enable" bits (bit 2 and 0).
+ */
+
+static void ne3210_reset_8390(struct net_device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+
+ outb(0x04, ioaddr + NE3210_RESET_PORT);
+ if (ei_debug > 1) printk("%s: resetting the NE3210...", dev->name);
+
+ mdelay(2);
+
+ ei_status.txing = 0;
+ outb(0x01, ioaddr + NE3210_RESET_PORT);
+ if (ei_debug > 1) printk("reset done\n");
+}
+
+/*
+ * Note: In the following three functions is the implicit assumption
+ * that the associated memcpy will only use "rep; movsl" as long as
+ * we keep the counts as some multiple of doublewords. This is a
+ * requirement of the hardware, and also prevents us from using
+ * eth_io_copy_and_sum() since we can't guarantee it will limit
+ * itself to doubleword access.
+ */
+
+/*
+ * Grab the 8390 specific header. Similar to the block_input routine, but
+ * we don't need to be concerned with ring wrap as the header will be at
+ * the start of a page, so we optimize accordingly. (A single doubleword.)
+ */
+
+static void
+ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ void __iomem *hdr_start = ei_status.mem + ((ring_page - NE3210_START_PG)<<8);
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+ hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */
+}
+
+/*
+ * Block input and output are easy on shared memory ethercards, the only
+ * complication is when the ring buffer wraps. The count will already
+ * be rounded up to a doubleword value via ne3210_get_8390_hdr() above.
+ */
+
+static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb,
+ int ring_offset)
+{
+ void __iomem *start = ei_status.mem + ring_offset - NE3210_START_PG*256;
+
+ if (ring_offset + count > NE3210_STOP_PG*256) {
+ /* Packet wraps over end of ring buffer. */
+ int semi_count = NE3210_STOP_PG*256 - ring_offset;
+ memcpy_fromio(skb->data, start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count,
+ ei_status.mem + TX_PAGES*256, count);
+ } else {
+ /* Packet is in one chunk. */
+ memcpy_fromio(skb->data, start, count);
+ }
+}
+
+static void ne3210_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ void __iomem *shmem = ei_status.mem + ((start_page - NE3210_START_PG)<<8);
+
+ count = (count + 3) & ~3; /* Round up to doubleword */
+ memcpy_toio(shmem, buf, count);
+}
+
+static struct eisa_device_id ne3210_ids[] = {
+ { "EGL0101" },
+ { "NVL1801" },
+ { "" },
+};
+MODULE_DEVICE_TABLE(eisa, ne3210_ids);
+
+static struct eisa_driver ne3210_eisa_driver = {
+ .id_table = ne3210_ids,
+ .driver = {
+ .name = "ne3210",
+ .probe = ne3210_eisa_probe,
+ .remove = __devexit_p (ne3210_eisa_remove),
+ },
+};
+
+MODULE_DESCRIPTION("NE3210 EISA Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(eisa, ne3210_ids);
+
+static int ne3210_init(void)
+{
+ return eisa_driver_register (&ne3210_eisa_driver);
+}
+
+static void ne3210_cleanup(void)
+{
+ eisa_driver_unregister (&ne3210_eisa_driver);
+}
+
+module_init (ne3210_init);
+module_exit (ne3210_cleanup);
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
new file mode 100644
index 000000000000..053b2551a72d
--- /dev/null
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -0,0 +1,1710 @@
+/*======================================================================
+
+ A PCMCIA ethernet driver for NS8390-based cards
+
+ This driver supports the D-Link DE-650 and Linksys EthernetCard
+ cards, the newer D-Link and Linksys combo cards, Accton EN2212
+ cards, the RPTI EP400, and the PreMax PE-200 in non-shared-memory
+ mode, and the IBM Credit Card Adapter, the NE4100, the Thomas
+ Conrad ethernet card, and the Kingston KNE-PCM/x in shared-memory
+ mode. It will also handle the Socket EA card in either mode.
+
+ Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
+
+ pcnet_cs.c 1.153 2003/11/09 18:53:09
+
+ The network driver code is based on Donald Becker's NE2000 code:
+
+ Written 1992,1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+ Donald Becker may be reached at becker@scyld.com
+
+ Based also on Keith Moore's changes to Don Becker's code, for IBM
+ CCAE support. Drivers merged back together, and shared-memory
+ Socket EA support added, by Ken Raeburn, September 1995.
+
+======================================================================*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/log2.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include "8390.h"
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/cisreg.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+#define PCNET_CMD 0x00
+#define PCNET_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+#define PCNET_RESET 0x1f /* Issue a read to reset, a write to clear. */
+#define PCNET_MISC 0x18 /* For IBM CCAE and Socket EA cards */
+
+#define PCNET_START_PG 0x40 /* First page of TX buffer */
+#define PCNET_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+/* Socket EA cards have a larger packet buffer */
+#define SOCKET_START_PG 0x01
+#define SOCKET_STOP_PG 0xff
+
+#define PCNET_RDC_TIMEOUT (2*HZ/100) /* Max wait in jiffies for Tx RDC */
+
+static const char *if_names[] = { "auto", "10baseT", "10base2"};
+
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("NE2000 compatible PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
+
+INT_MODULE_PARM(if_port, 1); /* Transceiver type */
+INT_MODULE_PARM(use_big_buf, 1); /* use 64K packet buffer? */
+INT_MODULE_PARM(mem_speed, 0); /* shared mem speed, in ns */
+INT_MODULE_PARM(delay_output, 0); /* pause after xmit? */
+INT_MODULE_PARM(delay_time, 4); /* in usec */
+INT_MODULE_PARM(use_shmem, -1); /* use shared memory? */
+INT_MODULE_PARM(full_duplex, 0); /* full duplex? */
+
+/* Ugh! Let the user hardwire the hardware address for queer cards */
+static int hw_addr[6] = { 0, /* ... */ };
+module_param_array(hw_addr, int, NULL, 0);
+
+/*====================================================================*/
+
+static void mii_phy_probe(struct net_device *dev);
+static int pcnet_config(struct pcmcia_device *link);
+static void pcnet_release(struct pcmcia_device *link);
+static int pcnet_open(struct net_device *dev);
+static int pcnet_close(struct net_device *dev);
+static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static irqreturn_t ei_irq_wrapper(int irq, void *dev_id);
+static void ei_watchdog(u_long arg);
+static void pcnet_reset_8390(struct net_device *dev);
+static int set_config(struct net_device *dev, struct ifmap *map);
+static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
+ int stop_pg, int cm_offset);
+static int setup_dma_config(struct pcmcia_device *link, int start_pg,
+ int stop_pg);
+
+static void pcnet_detach(struct pcmcia_device *p_dev);
+
+/*====================================================================*/
+
+typedef struct hw_info_t {
+ u_int offset;
+ u_char a0, a1, a2;
+ u_int flags;
+} hw_info_t;
+
+#define DELAY_OUTPUT 0x01
+#define HAS_MISC_REG 0x02
+#define USE_BIG_BUF 0x04
+#define HAS_IBM_MISC 0x08
+#define IS_DL10019 0x10
+#define IS_DL10022 0x20
+#define HAS_MII 0x40
+#define USE_SHMEM 0x80 /* autodetected */
+
+#define AM79C9XX_HOME_PHY 0x00006B90 /* HomePNA PHY */
+#define AM79C9XX_ETH_PHY 0x00006B70 /* 10baseT PHY */
+#define MII_PHYID_REV_MASK 0xfffffff0
+#define MII_PHYID_REG1 0x02
+#define MII_PHYID_REG2 0x03
+
+static hw_info_t hw_info[] = {
+ { /* Accton EN2212 */ 0x0ff0, 0x00, 0x00, 0xe8, DELAY_OUTPUT },
+ { /* Allied Telesis LA-PCM */ 0x0ff0, 0x00, 0x00, 0xf4, 0 },
+ { /* APEX MultiCard */ 0x03f4, 0x00, 0x20, 0xe5, 0 },
+ { /* ASANTE FriendlyNet */ 0x4910, 0x00, 0x00, 0x94,
+ DELAY_OUTPUT | HAS_IBM_MISC },
+ { /* Danpex EN-6200P2 */ 0x0110, 0x00, 0x40, 0xc7, 0 },
+ { /* DataTrek NetCard */ 0x0ff0, 0x00, 0x20, 0xe8, 0 },
+ { /* Dayna CommuniCard E */ 0x0110, 0x00, 0x80, 0x19, 0 },
+ { /* D-Link DE-650 */ 0x0040, 0x00, 0x80, 0xc8, 0 },
+ { /* EP-210 Ethernet */ 0x0110, 0x00, 0x40, 0x33, 0 },
+ { /* EP4000 Ethernet */ 0x01c0, 0x00, 0x00, 0xb4, 0 },
+ { /* Epson EEN10B */ 0x0ff0, 0x00, 0x00, 0x48,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* ELECOM Laneed LD-CDWA */ 0xb8, 0x08, 0x00, 0x42, 0 },
+ { /* Hypertec Ethernet */ 0x01c0, 0x00, 0x40, 0x4c, 0 },
+ { /* IBM CCAE */ 0x0ff0, 0x08, 0x00, 0x5a,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* IBM CCAE */ 0x0ff0, 0x00, 0x04, 0xac,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* IBM CCAE */ 0x0ff0, 0x00, 0x06, 0x29,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* IBM FME */ 0x0374, 0x08, 0x00, 0x5a,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* IBM FME */ 0x0374, 0x00, 0x04, 0xac,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* Kansai KLA-PCM/T */ 0x0ff0, 0x00, 0x60, 0x87,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* NSC DP83903 */ 0x0374, 0x08, 0x00, 0x17,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* NSC DP83903 */ 0x0374, 0x00, 0xc0, 0xa8,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* NSC DP83903 */ 0x0374, 0x00, 0xa0, 0xb0,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* NSC DP83903 */ 0x0198, 0x00, 0x20, 0xe0,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* I-O DATA PCLA/T */ 0x0ff0, 0x00, 0xa0, 0xb0, 0 },
+ { /* Katron PE-520 */ 0x0110, 0x00, 0x40, 0xf6, 0 },
+ { /* Kingston KNE-PCM/x */ 0x0ff0, 0x00, 0xc0, 0xf0,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* Kingston KNE-PCM/x */ 0x0ff0, 0xe2, 0x0c, 0x0f,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* Kingston KNE-PC2 */ 0x0180, 0x00, 0xc0, 0xf0, 0 },
+ { /* Maxtech PCN2000 */ 0x5000, 0x00, 0x00, 0xe8, 0 },
+ { /* NDC Instant-Link */ 0x003a, 0x00, 0x80, 0xc6, 0 },
+ { /* NE2000 Compatible */ 0x0ff0, 0x00, 0xa0, 0x0c, 0 },
+ { /* Network General Sniffer */ 0x0ff0, 0x00, 0x00, 0x65,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* Panasonic VEL211 */ 0x0ff0, 0x00, 0x80, 0x45,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* PreMax PE-200 */ 0x07f0, 0x00, 0x20, 0xe0, 0 },
+ { /* RPTI EP400 */ 0x0110, 0x00, 0x40, 0x95, 0 },
+ { /* SCM Ethernet */ 0x0ff0, 0x00, 0x20, 0xcb, 0 },
+ { /* Socket EA */ 0x4000, 0x00, 0xc0, 0x1b,
+ DELAY_OUTPUT | HAS_MISC_REG | USE_BIG_BUF },
+ { /* Socket LP-E CF+ */ 0x01c0, 0x00, 0xc0, 0x1b, 0 },
+ { /* SuperSocket RE450T */ 0x0110, 0x00, 0xe0, 0x98, 0 },
+ { /* Volktek NPL-402CT */ 0x0060, 0x00, 0x40, 0x05, 0 },
+ { /* NEC PC-9801N-J12 */ 0x0ff0, 0x00, 0x00, 0x4c, 0 },
+ { /* PCMCIA Technology OEM */ 0x01c8, 0x00, 0xa0, 0x0c, 0 }
+};
+
+#define NR_INFO ARRAY_SIZE(hw_info)
+
+static hw_info_t default_info = { 0, 0, 0, 0, 0 };
+static hw_info_t dl10019_info = { 0, 0, 0, 0, IS_DL10019|HAS_MII };
+static hw_info_t dl10022_info = { 0, 0, 0, 0, IS_DL10022|HAS_MII };
+
+typedef struct pcnet_dev_t {
+ struct pcmcia_device *p_dev;
+ u_int flags;
+ void __iomem *base;
+ struct timer_list watchdog;
+ int stale, fast_poll;
+ u_char phy_id;
+ u_char eth_phy, pna_phy;
+ u_short link_status;
+ u_long mii_reset;
+} pcnet_dev_t;
+
+static inline pcnet_dev_t *PRIV(struct net_device *dev)
+{
+ char *p = netdev_priv(dev);
+ return (pcnet_dev_t *)(p + sizeof(struct ei_device));
+}
+
+static const struct net_device_ops pcnet_netdev_ops = {
+ .ndo_open = pcnet_open,
+ .ndo_stop = pcnet_close,
+ .ndo_set_config = set_config,
+ .ndo_start_xmit = ei_start_xmit,
+ .ndo_get_stats = ei_get_stats,
+ .ndo_do_ioctl = ei_ioctl,
+ .ndo_set_rx_mode = ei_set_multicast_list,
+ .ndo_tx_timeout = ei_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ei_poll,
+#endif
+};
+
+static int pcnet_probe(struct pcmcia_device *link)
+{
+ pcnet_dev_t *info;
+ struct net_device *dev;
+
+ dev_dbg(&link->dev, "pcnet_attach()\n");
+
+ /* Create new ethernet device */
+ dev = __alloc_ei_netdev(sizeof(pcnet_dev_t));
+ if (!dev) return -ENOMEM;
+ info = PRIV(dev);
+ info->p_dev = link;
+ link->priv = dev;
+
+ link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+
+ dev->netdev_ops = &pcnet_netdev_ops;
+
+ return pcnet_config(link);
+} /* pcnet_attach */
+
+static void pcnet_detach(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ dev_dbg(&link->dev, "pcnet_detach\n");
+
+ unregister_netdev(dev);
+
+ pcnet_release(link);
+
+ free_netdev(dev);
+} /* pcnet_detach */
+
+/*======================================================================
+
+ This probes for a card's hardware address, for card types that
+ encode this information in their CIS.
+
+======================================================================*/
+
+static hw_info_t *get_hwinfo(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ u_char __iomem *base, *virt;
+ int i, j;
+
+ /* Allocate a small memory window */
+ link->resource[2]->flags |= WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
+ link->resource[2]->start = 0; link->resource[2]->end = 0;
+ i = pcmcia_request_window(link, link->resource[2], 0);
+ if (i != 0)
+ return NULL;
+
+ virt = ioremap(link->resource[2]->start,
+ resource_size(link->resource[2]));
+ for (i = 0; i < NR_INFO; i++) {
+ pcmcia_map_mem_page(link, link->resource[2],
+ hw_info[i].offset & ~(resource_size(link->resource[2])-1));
+ base = &virt[hw_info[i].offset & (resource_size(link->resource[2])-1)];
+ if ((readb(base+0) == hw_info[i].a0) &&
+ (readb(base+2) == hw_info[i].a1) &&
+ (readb(base+4) == hw_info[i].a2)) {
+ for (j = 0; j < 6; j++)
+ dev->dev_addr[j] = readb(base + (j<<1));
+ break;
+ }
+ }
+
+ iounmap(virt);
+ j = pcmcia_release_window(link, link->resource[2]);
+ return (i < NR_INFO) ? hw_info+i : NULL;
+} /* get_hwinfo */
+
+/*======================================================================
+
+ This probes for a card's hardware address by reading the PROM.
+ It checks the address against a list of known types, then falls
+ back to a simple NE2000 clone signature check.
+
+======================================================================*/
+
+static hw_info_t *get_prom(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ unsigned int ioaddr = dev->base_addr;
+ u_char prom[32];
+ int i, j;
+
+ /* This is lifted straight from drivers/net/ne.c */
+ struct {
+ u_char value, offset;
+ } program_seq[] = {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
+ {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */
+ {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0xFF, EN0_ISR},
+ {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ {32, EN0_RCNTLO},
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
+ {0x00, EN0_RSARHI},
+ {E8390_RREAD+E8390_START, E8390_CMD},
+ };
+
+ pcnet_reset_8390(dev);
+ mdelay(10);
+
+ for (i = 0; i < ARRAY_SIZE(program_seq); i++)
+ outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
+
+ for (i = 0; i < 32; i++)
+ prom[i] = inb(ioaddr + PCNET_DATAPORT);
+ for (i = 0; i < NR_INFO; i++) {
+ if ((prom[0] == hw_info[i].a0) &&
+ (prom[2] == hw_info[i].a1) &&
+ (prom[4] == hw_info[i].a2))
+ break;
+ }
+ if ((i < NR_INFO) || ((prom[28] == 0x57) && (prom[30] == 0x57))) {
+ for (j = 0; j < 6; j++)
+ dev->dev_addr[j] = prom[j<<1];
+ return (i < NR_INFO) ? hw_info+i : &default_info;
+ }
+ return NULL;
+} /* get_prom */
+
+/*======================================================================
+
+ For DL10019 based cards, like the Linksys EtherFast
+
+======================================================================*/
+
+static hw_info_t *get_dl10019(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ int i;
+ u_char sum;
+
+ for (sum = 0, i = 0x14; i < 0x1c; i++)
+ sum += inb_p(dev->base_addr + i);
+ if (sum != 0xff)
+ return NULL;
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb_p(dev->base_addr + 0x14 + i);
+ i = inb(dev->base_addr + 0x1f);
+ return ((i == 0x91)||(i == 0x99)) ? &dl10022_info : &dl10019_info;
+}
+
+/*======================================================================
+
+ For Asix AX88190 based cards
+
+======================================================================*/
+
+static hw_info_t *get_ax88190(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ unsigned int ioaddr = dev->base_addr;
+ int i, j;
+
+ /* Not much of a test, but the alternatives are messy */
+ if (link->config_base != 0x03c0)
+ return NULL;
+
+ outb_p(0x01, ioaddr + EN0_DCFG); /* Set word-wide access. */
+ outb_p(0x00, ioaddr + EN0_RSARLO); /* DMA starting at 0x0400. */
+ outb_p(0x04, ioaddr + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, ioaddr + E8390_CMD);
+
+ for (i = 0; i < 6; i += 2) {
+ j = inw(ioaddr + PCNET_DATAPORT);
+ dev->dev_addr[i] = j & 0xff;
+ dev->dev_addr[i+1] = j >> 8;
+ }
+ return NULL;
+}
+
+/*======================================================================
+
+ This should be totally unnecessary... but when we can't figure
+ out the hardware address any other way, we'll let the user hard
+ wire it when the module is initialized.
+
+======================================================================*/
+
+static hw_info_t *get_hwired(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ if (hw_addr[i] != 0) break;
+ if (i == 6)
+ return NULL;
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = hw_addr[i];
+
+ return &default_info;
+} /* get_hwired */
+
+static int try_io_port(struct pcmcia_device *link)
+{
+ int j, ret;
+ link->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ link->resource[1]->flags &= ~IO_DATA_PATH_WIDTH;
+ if (link->resource[0]->end == 32) {
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+ if (link->resource[1]->end > 0) {
+ /* for master/slave multifunction cards */
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
+ }
+ } else {
+ /* This should be two 16-port windows */
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_16;
+ }
+ if (link->resource[0]->start == 0) {
+ for (j = 0; j < 0x400; j += 0x20) {
+ link->resource[0]->start = j ^ 0x300;
+ link->resource[1]->start = (j ^ 0x300) + 0x10;
+ link->io_lines = 16;
+ ret = pcmcia_request_io(link);
+ if (ret == 0)
+ return ret;
+ }
+ return ret;
+ } else {
+ return pcmcia_request_io(link);
+ }
+}
+
+static int pcnet_confcheck(struct pcmcia_device *p_dev, void *priv_data)
+{
+ int *priv = priv_data;
+ int try = (*priv & 0x1);
+
+ *priv &= (p_dev->resource[2]->end >= 0x4000) ? 0x10 : ~0x10;
+
+ if (p_dev->config_index == 0)
+ return -EINVAL;
+
+ if (p_dev->resource[0]->end + p_dev->resource[1]->end < 32)
+ return -EINVAL;
+
+ if (try)
+ p_dev->io_lines = 16;
+ return try_io_port(p_dev);
+}
+
+static hw_info_t *pcnet_try_config(struct pcmcia_device *link,
+ int *has_shmem, int try)
+{
+ struct net_device *dev = link->priv;
+ hw_info_t *local_hw_info;
+ pcnet_dev_t *info = PRIV(dev);
+ int priv = try;
+ int ret;
+
+ ret = pcmcia_loop_config(link, pcnet_confcheck, &priv);
+ if (ret) {
+ dev_warn(&link->dev, "no useable port range found\n");
+ return NULL;
+ }
+ *has_shmem = (priv & 0x10);
+
+ if (!link->irq)
+ return NULL;
+
+ if (resource_size(link->resource[1]) == 8)
+ link->config_flags |= CONF_ENABLE_SPKR;
+
+ if ((link->manf_id == MANFID_IBM) &&
+ (link->card_id == PRODID_IBM_HOME_AND_AWAY))
+ link->config_index |= 0x10;
+
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ return NULL;
+
+ dev->irq = link->irq;
+ dev->base_addr = link->resource[0]->start;
+
+ if (info->flags & HAS_MISC_REG) {
+ if ((if_port == 1) || (if_port == 2))
+ dev->if_port = if_port;
+ else
+ dev_notice(&link->dev, "invalid if_port requested\n");
+ } else
+ dev->if_port = 0;
+
+ if ((link->config_base == 0x03c0) &&
+ (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
+ dev_info(&link->dev,
+ "this is an AX88190 card - use axnet_cs instead.\n");
+ return NULL;
+ }
+
+ local_hw_info = get_hwinfo(link);
+ if (!local_hw_info)
+ local_hw_info = get_prom(link);
+ if (!local_hw_info)
+ local_hw_info = get_dl10019(link);
+ if (!local_hw_info)
+ local_hw_info = get_ax88190(link);
+ if (!local_hw_info)
+ local_hw_info = get_hwired(link);
+
+ return local_hw_info;
+}
+
+static int pcnet_config(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ pcnet_dev_t *info = PRIV(dev);
+ int start_pg, stop_pg, cm_offset;
+ int has_shmem = 0;
+ hw_info_t *local_hw_info;
+
+ dev_dbg(&link->dev, "pcnet_config\n");
+
+ local_hw_info = pcnet_try_config(link, &has_shmem, 0);
+ if (!local_hw_info) {
+ /* check whether forcing io_lines to 16 helps... */
+ pcmcia_disable_device(link);
+ local_hw_info = pcnet_try_config(link, &has_shmem, 1);
+ if (local_hw_info == NULL) {
+ dev_notice(&link->dev, "unable to read hardware net"
+ " address for io base %#3lx\n", dev->base_addr);
+ goto failed;
+ }
+ }
+
+ info->flags = local_hw_info->flags;
+ /* Check for user overrides */
+ info->flags |= (delay_output) ? DELAY_OUTPUT : 0;
+ if ((link->manf_id == MANFID_SOCKET) &&
+ ((link->card_id == PRODID_SOCKET_LPE) ||
+ (link->card_id == PRODID_SOCKET_LPE_CF) ||
+ (link->card_id == PRODID_SOCKET_EIO)))
+ info->flags &= ~USE_BIG_BUF;
+ if (!use_big_buf)
+ info->flags &= ~USE_BIG_BUF;
+
+ if (info->flags & USE_BIG_BUF) {
+ start_pg = SOCKET_START_PG;
+ stop_pg = SOCKET_STOP_PG;
+ cm_offset = 0x10000;
+ } else {
+ start_pg = PCNET_START_PG;
+ stop_pg = PCNET_STOP_PG;
+ cm_offset = 0;
+ }
+
+ /* has_shmem is ignored if use_shmem != -1 */
+ if ((use_shmem == 0) || (!has_shmem && (use_shmem == -1)) ||
+ (setup_shmem_window(link, start_pg, stop_pg, cm_offset) != 0))
+ setup_dma_config(link, start_pg, stop_pg);
+
+ ei_status.name = "NE2000";
+ ei_status.word16 = 1;
+ ei_status.reset_8390 = pcnet_reset_8390;
+
+ if (info->flags & (IS_DL10019|IS_DL10022))
+ mii_phy_probe(dev);
+
+ SET_NETDEV_DEV(dev, &link->dev);
+
+ if (register_netdev(dev) != 0) {
+ pr_notice("register_netdev() failed\n");
+ goto failed;
+ }
+
+ if (info->flags & (IS_DL10019|IS_DL10022)) {
+ u_char id = inb(dev->base_addr + 0x1a);
+ netdev_info(dev, "NE2000 (DL100%d rev %02x): ",
+ (info->flags & IS_DL10022) ? 22 : 19, id);
+ if (info->pna_phy)
+ pr_cont("PNA, ");
+ } else {
+ netdev_info(dev, "NE2000 Compatible: ");
+ }
+ pr_cont("io %#3lx, irq %d,", dev->base_addr, dev->irq);
+ if (info->flags & USE_SHMEM)
+ pr_cont(" mem %#5lx,", dev->mem_start);
+ if (info->flags & HAS_MISC_REG)
+ pr_cont(" %s xcvr,", if_names[dev->if_port]);
+ pr_cont(" hw_addr %pM\n", dev->dev_addr);
+ return 0;
+
+failed:
+ pcnet_release(link);
+ return -ENODEV;
+} /* pcnet_config */
+
+static void pcnet_release(struct pcmcia_device *link)
+{
+ pcnet_dev_t *info = PRIV(link->priv);
+
+ dev_dbg(&link->dev, "pcnet_release\n");
+
+ if (info->flags & USE_SHMEM)
+ iounmap(info->base);
+
+ pcmcia_disable_device(link);
+}
+
+static int pcnet_suspend(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ if (link->open)
+ netif_device_detach(dev);
+
+ return 0;
+}
+
+static int pcnet_resume(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ if (link->open) {
+ pcnet_reset_8390(dev);
+ NS8390_init(dev, 1);
+ netif_device_attach(dev);
+ }
+
+ return 0;
+}
+
+
+/*======================================================================
+
+ MII interface support for DL10019 and DL10022 based cards
+
+ On the DL10019, the MII IO direction bit is 0x10; on the DL10022
+ it is 0x20. Setting both bits seems to work on both card types.
+
+======================================================================*/
+
+#define DLINK_GPIO 0x1c
+#define DLINK_DIAG 0x1d
+#define DLINK_EEPROM 0x1e
+
+#define MDIO_SHIFT_CLK 0x80
+#define MDIO_DATA_OUT 0x40
+#define MDIO_DIR_WRITE 0x30
+#define MDIO_DATA_WRITE0 (MDIO_DIR_WRITE)
+#define MDIO_DATA_WRITE1 (MDIO_DIR_WRITE | MDIO_DATA_OUT)
+#define MDIO_DATA_READ 0x10
+#define MDIO_MASK 0x0f
+
+static void mdio_sync(unsigned int addr)
+{
+ int bits, mask = inb(addr) & MDIO_MASK;
+ for (bits = 0; bits < 32; bits++) {
+ outb(mask | MDIO_DATA_WRITE1, addr);
+ outb(mask | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr);
+ }
+}
+
+static int mdio_read(unsigned int addr, int phy_id, int loc)
+{
+ u_int cmd = (0x06<<10)|(phy_id<<5)|loc;
+ int i, retval = 0, mask = inb(addr) & MDIO_MASK;
+
+ mdio_sync(addr);
+ for (i = 13; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb(mask | dat, addr);
+ outb(mask | dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 19; i > 0; i--) {
+ outb(mask, addr);
+ retval = (retval << 1) | ((inb(addr) & MDIO_DATA_READ) != 0);
+ outb(mask | MDIO_SHIFT_CLK, addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(unsigned int addr, int phy_id, int loc, int value)
+{
+ u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value;
+ int i, mask = inb(addr) & MDIO_MASK;
+
+ mdio_sync(addr);
+ for (i = 31; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb(mask | dat, addr);
+ outb(mask | dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 1; i >= 0; i--) {
+ outb(mask, addr);
+ outb(mask | MDIO_SHIFT_CLK, addr);
+ }
+}
+
+/*======================================================================
+
+ EEPROM access routines for DL10019 and DL10022 based cards
+
+======================================================================*/
+
+#define EE_EEP 0x40
+#define EE_ASIC 0x10
+#define EE_CS 0x08
+#define EE_CK 0x04
+#define EE_DO 0x02
+#define EE_DI 0x01
+#define EE_ADOT 0x01 /* DataOut for ASIC */
+#define EE_READ_CMD 0x06
+
+#define DL19FDUPLX 0x0400 /* DL10019 Full duplex mode */
+
+static int read_eeprom(unsigned int ioaddr, int location)
+{
+ int i, retval = 0;
+ unsigned int ee_addr = ioaddr + DLINK_EEPROM;
+ int read_cmd = location | (EE_READ_CMD << 8);
+
+ outb(0, ee_addr);
+ outb(EE_EEP|EE_CS, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 10; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_DO : 0;
+ outb_p(EE_EEP|EE_CS|dataval, ee_addr);
+ outb_p(EE_EEP|EE_CS|dataval|EE_CK, ee_addr);
+ }
+ outb(EE_EEP|EE_CS, ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ outb_p(EE_EEP|EE_CS | EE_CK, ee_addr);
+ retval = (retval << 1) | ((inb(ee_addr) & EE_DI) ? 1 : 0);
+ outb_p(EE_EEP|EE_CS, ee_addr);
+ }
+
+ /* Terminate the EEPROM access. */
+ outb(0, ee_addr);
+ return retval;
+}
+
+/*
+ The internal ASIC registers can be changed by EEPROM READ access
+ with EE_ASIC bit set.
+ In ASIC mode, EE_ADOT is used to output the data to the ASIC.
+*/
+
+static void write_asic(unsigned int ioaddr, int location, short asic_data)
+{
+ int i;
+ unsigned int ee_addr = ioaddr + DLINK_EEPROM;
+ short dataval;
+ int read_cmd = location | (EE_READ_CMD << 8);
+
+ asic_data |= read_eeprom(ioaddr, location);
+
+ outb(0, ee_addr);
+ outb(EE_ASIC|EE_CS|EE_DI, ee_addr);
+
+ read_cmd = read_cmd >> 1;
+
+ /* Shift the read command bits out. */
+ for (i = 9; i >= 0; i--) {
+ dataval = (read_cmd & (1 << i)) ? EE_DO : 0;
+ outb_p(EE_ASIC|EE_CS|EE_DI|dataval, ee_addr);
+ outb_p(EE_ASIC|EE_CS|EE_DI|dataval|EE_CK, ee_addr);
+ outb_p(EE_ASIC|EE_CS|EE_DI|dataval, ee_addr);
+ }
+ // sync
+ outb(EE_ASIC|EE_CS, ee_addr);
+ outb(EE_ASIC|EE_CS|EE_CK, ee_addr);
+ outb(EE_ASIC|EE_CS, ee_addr);
+
+ for (i = 15; i >= 0; i--) {
+ dataval = (asic_data & (1 << i)) ? EE_ADOT : 0;
+ outb_p(EE_ASIC|EE_CS|dataval, ee_addr);
+ outb_p(EE_ASIC|EE_CS|dataval|EE_CK, ee_addr);
+ outb_p(EE_ASIC|EE_CS|dataval, ee_addr);
+ }
+
+ /* Terminate the ASIC access. */
+ outb(EE_ASIC|EE_DI, ee_addr);
+ outb(EE_ASIC|EE_DI| EE_CK, ee_addr);
+ outb(EE_ASIC|EE_DI, ee_addr);
+
+ outb(0, ee_addr);
+}
+
+/*====================================================================*/
+
+static void set_misc_reg(struct net_device *dev)
+{
+ unsigned int nic_base = dev->base_addr;
+ pcnet_dev_t *info = PRIV(dev);
+ u_char tmp;
+
+ if (info->flags & HAS_MISC_REG) {
+ tmp = inb_p(nic_base + PCNET_MISC) & ~3;
+ if (dev->if_port == 2)
+ tmp |= 1;
+ if (info->flags & USE_BIG_BUF)
+ tmp |= 2;
+ if (info->flags & HAS_IBM_MISC)
+ tmp |= 8;
+ outb_p(tmp, nic_base + PCNET_MISC);
+ }
+ if (info->flags & IS_DL10022) {
+ if (info->flags & HAS_MII) {
+ /* Advertise 100F, 100H, 10F, 10H */
+ mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 4, 0x01e1);
+ /* Restart MII autonegotiation */
+ mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x0000);
+ mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x1200);
+ info->mii_reset = jiffies;
+ } else {
+ outb(full_duplex ? 4 : 0, nic_base + DLINK_DIAG);
+ }
+ } else if (info->flags & IS_DL10019) {
+ /* Advertise 100F, 100H, 10F, 10H */
+ mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 4, 0x01e1);
+ /* Restart MII autonegotiation */
+ mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x0000);
+ mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x1200);
+ }
+}
+
+/*====================================================================*/
+
+static void mii_phy_probe(struct net_device *dev)
+{
+ pcnet_dev_t *info = PRIV(dev);
+ unsigned int mii_addr = dev->base_addr + DLINK_GPIO;
+ int i;
+ u_int tmp, phyid;
+
+ for (i = 31; i >= 0; i--) {
+ tmp = mdio_read(mii_addr, i, 1);
+ if ((tmp == 0) || (tmp == 0xffff))
+ continue;
+ tmp = mdio_read(mii_addr, i, MII_PHYID_REG1);
+ phyid = tmp << 16;
+ phyid |= mdio_read(mii_addr, i, MII_PHYID_REG2);
+ phyid &= MII_PHYID_REV_MASK;
+ netdev_dbg(dev, "MII at %d is 0x%08x\n", i, phyid);
+ if (phyid == AM79C9XX_HOME_PHY) {
+ info->pna_phy = i;
+ } else if (phyid != AM79C9XX_ETH_PHY) {
+ info->eth_phy = i;
+ }
+ }
+}
+
+static int pcnet_open(struct net_device *dev)
+{
+ int ret;
+ pcnet_dev_t *info = PRIV(dev);
+ struct pcmcia_device *link = info->p_dev;
+ unsigned int nic_base = dev->base_addr;
+
+ dev_dbg(&link->dev, "pcnet_open('%s')\n", dev->name);
+
+ if (!pcmcia_dev_present(link))
+ return -ENODEV;
+
+ set_misc_reg(dev);
+
+ outb_p(0xFF, nic_base + EN0_ISR); /* Clear bogus intr. */
+ ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, dev->name, dev);
+ if (ret)
+ return ret;
+
+ link->open++;
+
+ info->phy_id = info->eth_phy;
+ info->link_status = 0x00;
+ init_timer(&info->watchdog);
+ info->watchdog.function = ei_watchdog;
+ info->watchdog.data = (u_long)dev;
+ info->watchdog.expires = jiffies + HZ;
+ add_timer(&info->watchdog);
+
+ return ei_open(dev);
+} /* pcnet_open */
+
+/*====================================================================*/
+
+static int pcnet_close(struct net_device *dev)
+{
+ pcnet_dev_t *info = PRIV(dev);
+ struct pcmcia_device *link = info->p_dev;
+
+ dev_dbg(&link->dev, "pcnet_close('%s')\n", dev->name);
+
+ ei_close(dev);
+ free_irq(dev->irq, dev);
+
+ link->open--;
+ netif_stop_queue(dev);
+ del_timer_sync(&info->watchdog);
+
+ return 0;
+} /* pcnet_close */
+
+/*======================================================================
+
+ Hard reset the card. This used to pause for the same period that
+ a 8390 reset command required, but that shouldn't be necessary.
+
+======================================================================*/
+
+static void pcnet_reset_8390(struct net_device *dev)
+{
+ unsigned int nic_base = dev->base_addr;
+ int i;
+
+ ei_status.txing = ei_status.dmaing = 0;
+
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, nic_base + E8390_CMD);
+
+ outb(inb(nic_base + PCNET_RESET), nic_base + PCNET_RESET);
+
+ for (i = 0; i < 100; i++) {
+ if ((inb_p(nic_base+EN0_ISR) & ENISR_RESET) != 0)
+ break;
+ udelay(100);
+ }
+ outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */
+
+ if (i == 100)
+ netdev_err(dev, "pcnet_reset_8390() did not complete.\n");
+
+ set_misc_reg(dev);
+
+} /* pcnet_reset_8390 */
+
+/*====================================================================*/
+
+static int set_config(struct net_device *dev, struct ifmap *map)
+{
+ pcnet_dev_t *info = PRIV(dev);
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (!(info->flags & HAS_MISC_REG))
+ return -EOPNOTSUPP;
+ else if ((map->port < 1) || (map->port > 2))
+ return -EINVAL;
+ dev->if_port = map->port;
+ netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
+ NS8390_init(dev, 1);
+ }
+ return 0;
+}
+
+/*====================================================================*/
+
+static irqreturn_t ei_irq_wrapper(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ pcnet_dev_t *info;
+ irqreturn_t ret = ei_interrupt(irq, dev_id);
+
+ if (ret == IRQ_HANDLED) {
+ info = PRIV(dev);
+ info->stale = 0;
+ }
+ return ret;
+}
+
+static void ei_watchdog(u_long arg)
+{
+ struct net_device *dev = (struct net_device *)arg;
+ pcnet_dev_t *info = PRIV(dev);
+ unsigned int nic_base = dev->base_addr;
+ unsigned int mii_addr = nic_base + DLINK_GPIO;
+ u_short link;
+
+ if (!netif_device_present(dev)) goto reschedule;
+
+ /* Check for pending interrupt with expired latency timer: with
+ this, we can limp along even if the interrupt is blocked */
+ if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) {
+ if (!info->fast_poll)
+ netdev_info(dev, "interrupt(s) dropped!\n");
+ ei_irq_wrapper(dev->irq, dev);
+ info->fast_poll = HZ;
+ }
+ if (info->fast_poll) {
+ info->fast_poll--;
+ info->watchdog.expires = jiffies + 1;
+ add_timer(&info->watchdog);
+ return;
+ }
+
+ if (!(info->flags & HAS_MII))
+ goto reschedule;
+
+ mdio_read(mii_addr, info->phy_id, 1);
+ link = mdio_read(mii_addr, info->phy_id, 1);
+ if (!link || (link == 0xffff)) {
+ if (info->eth_phy) {
+ info->phy_id = info->eth_phy = 0;
+ } else {
+ netdev_info(dev, "MII is missing!\n");
+ info->flags &= ~HAS_MII;
+ }
+ goto reschedule;
+ }
+
+ link &= 0x0004;
+ if (link != info->link_status) {
+ u_short p = mdio_read(mii_addr, info->phy_id, 5);
+ netdev_info(dev, "%s link beat\n", link ? "found" : "lost");
+ if (link && (info->flags & IS_DL10022)) {
+ /* Disable collision detection on full duplex links */
+ outb((p & 0x0140) ? 4 : 0, nic_base + DLINK_DIAG);
+ } else if (link && (info->flags & IS_DL10019)) {
+ /* Disable collision detection on full duplex links */
+ write_asic(dev->base_addr, 4, (p & 0x140) ? DL19FDUPLX : 0);
+ }
+ if (link) {
+ if (info->phy_id == info->eth_phy) {
+ if (p)
+ netdev_info(dev, "autonegotiation complete: "
+ "%sbaseT-%cD selected\n",
+ ((p & 0x0180) ? "100" : "10"),
+ ((p & 0x0140) ? 'F' : 'H'));
+ else
+ netdev_info(dev, "link partner did not autonegotiate\n");
+ }
+ NS8390_init(dev, 1);
+ }
+ info->link_status = link;
+ }
+ if (info->pna_phy && time_after(jiffies, info->mii_reset + 6*HZ)) {
+ link = mdio_read(mii_addr, info->eth_phy, 1) & 0x0004;
+ if (((info->phy_id == info->pna_phy) && link) ||
+ ((info->phy_id != info->pna_phy) && !link)) {
+ /* isolate this MII and try flipping to the other one */
+ mdio_write(mii_addr, info->phy_id, 0, 0x0400);
+ info->phy_id ^= info->pna_phy ^ info->eth_phy;
+ netdev_info(dev, "switched to %s transceiver\n",
+ (info->phy_id == info->eth_phy) ? "ethernet" : "PNA");
+ mdio_write(mii_addr, info->phy_id, 0,
+ (info->phy_id == info->eth_phy) ? 0x1000 : 0);
+ info->link_status = 0;
+ info->mii_reset = jiffies;
+ }
+ }
+
+reschedule:
+ info->watchdog.expires = jiffies + HZ;
+ add_timer(&info->watchdog);
+}
+
+/*====================================================================*/
+
+
+static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ pcnet_dev_t *info = PRIV(dev);
+ struct mii_ioctl_data *data = if_mii(rq);
+ unsigned int mii_addr = dev->base_addr + DLINK_GPIO;
+
+ if (!(info->flags & (IS_DL10019|IS_DL10022)))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = info->phy_id;
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f);
+ return 0;
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ mdio_write(mii_addr, data->phy_id, data->reg_num & 0x1f, data->val_in);
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+/*====================================================================*/
+
+static void dma_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ unsigned int nic_base = dev->base_addr;
+
+ if (ei_status.dmaing) {
+ netdev_notice(dev, "DMAing conflict in dma_block_input."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD);
+ outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
+ outb_p(0, nic_base + EN0_RCNTHI);
+ outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD);
+
+ insw(nic_base + PCNET_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr)>>1);
+ /* Fix for big endian systems */
+ hdr->count = le16_to_cpu(hdr->count);
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+/*====================================================================*/
+
+static void dma_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ unsigned int nic_base = dev->base_addr;
+ int xfer_count = count;
+ char *buf = skb->data;
+
+ if ((ei_debug > 4) && (count != 4))
+ netdev_dbg(dev, "[bi=%d]\n", count+4);
+ if (ei_status.dmaing) {
+ netdev_notice(dev, "DMAing conflict in dma_block_input."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD);
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD);
+
+ insw(nic_base + PCNET_DATAPORT,buf,count>>1);
+ if (count & 0x01)
+ buf[count-1] = inb(nic_base + PCNET_DATAPORT), xfer_count++;
+
+ /* This was for the ALPHA version only, but enough people have been
+ encountering problems that it is still here. */
+#ifdef PCMCIA_DEBUG
+ if (ei_debug > 4) { /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
+ -- it's broken for Rx on some cards! */
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if (((ring_offset + xfer_count) & 0xff) == (addr & 0xff))
+ break;
+ } while (--tries > 0);
+ if (tries <= 0)
+ netdev_notice(dev, "RX transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ ring_offset + xfer_count, addr);
+ }
+#endif
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+} /* dma_block_input */
+
+/*====================================================================*/
+
+static void dma_block_output(struct net_device *dev, int count,
+ const u_char *buf, const int start_page)
+{
+ unsigned int nic_base = dev->base_addr;
+ pcnet_dev_t *info = PRIV(dev);
+#ifdef PCMCIA_DEBUG
+ int retries = 0;
+#endif
+ u_long dma_start;
+
+#ifdef PCMCIA_DEBUG
+ if (ei_debug > 4)
+ netdev_dbg(dev, "[bo=%d]\n", count);
+#endif
+
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+ if (count & 0x01)
+ count++;
+ if (ei_status.dmaing) {
+ netdev_notice(dev, "DMAing conflict in dma_block_output."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base+PCNET_CMD);
+
+#ifdef PCMCIA_DEBUG
+ retry:
+#endif
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR);
+
+ /* Now the normal output. */
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(0x00, nic_base + EN0_RSARLO);
+ outb_p(start_page, nic_base + EN0_RSARHI);
+
+ outb_p(E8390_RWRITE+E8390_START, nic_base + PCNET_CMD);
+ outsw(nic_base + PCNET_DATAPORT, buf, count>>1);
+
+ dma_start = jiffies;
+
+#ifdef PCMCIA_DEBUG
+ /* This was for the ALPHA version only, but enough people have been
+ encountering problems that it is still here. */
+ if (ei_debug > 4) { /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if ((start_page << 8) + count == addr)
+ break;
+ } while (--tries > 0);
+ if (tries <= 0) {
+ netdev_notice(dev, "Tx packet transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ (start_page << 8) + count, addr);
+ if (retries++ == 0)
+ goto retry;
+ }
+ }
+#endif
+
+ while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
+ if (time_after(jiffies, dma_start + PCNET_RDC_TIMEOUT)) {
+ netdev_notice(dev, "timeout waiting for Tx RDC.\n");
+ pcnet_reset_8390(dev);
+ NS8390_init(dev, 1);
+ break;
+ }
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ if (info->flags & DELAY_OUTPUT)
+ udelay((long)delay_time);
+ ei_status.dmaing &= ~0x01;
+}
+
+/*====================================================================*/
+
+static int setup_dma_config(struct pcmcia_device *link, int start_pg,
+ int stop_pg)
+{
+ struct net_device *dev = link->priv;
+
+ ei_status.tx_start_page = start_pg;
+ ei_status.rx_start_page = start_pg + TX_PAGES;
+ ei_status.stop_page = stop_pg;
+
+ /* set up block i/o functions */
+ ei_status.get_8390_hdr = dma_get_8390_hdr;
+ ei_status.block_input = dma_block_input;
+ ei_status.block_output = dma_block_output;
+
+ return 0;
+}
+
+/*====================================================================*/
+
+static void copyin(void *dest, void __iomem *src, int c)
+{
+ u_short *d = dest;
+ u_short __iomem *s = src;
+ int odd;
+
+ if (c <= 0)
+ return;
+ odd = (c & 1); c >>= 1;
+
+ if (c) {
+ do { *d++ = __raw_readw(s++); } while (--c);
+ }
+ /* get last byte by fetching a word and masking */
+ if (odd)
+ *((u_char *)d) = readw(s) & 0xff;
+}
+
+static void copyout(void __iomem *dest, const void *src, int c)
+{
+ u_short __iomem *d = dest;
+ const u_short *s = src;
+ int odd;
+
+ if (c <= 0)
+ return;
+ odd = (c & 1); c >>= 1;
+
+ if (c) {
+ do { __raw_writew(*s++, d++); } while (--c);
+ }
+ /* copy last byte doing a read-modify-write */
+ if (odd)
+ writew((readw(d) & 0xff00) | *(u_char *)s, d);
+}
+
+/*====================================================================*/
+
+static void shmem_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ void __iomem *xfer_start = ei_status.mem + (TX_PAGES<<8)
+ + (ring_page << 8)
+ - (ei_status.rx_start_page << 8);
+
+ copyin(hdr, xfer_start, sizeof(struct e8390_pkt_hdr));
+ /* Fix for big endian systems */
+ hdr->count = le16_to_cpu(hdr->count);
+}
+
+/*====================================================================*/
+
+static void shmem_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ void __iomem *base = ei_status.mem;
+ unsigned long offset = (TX_PAGES<<8) + ring_offset
+ - (ei_status.rx_start_page << 8);
+ char *buf = skb->data;
+
+ if (offset + count > ei_status.priv) {
+ /* We must wrap the input move. */
+ int semi_count = ei_status.priv - offset;
+ copyin(buf, base + offset, semi_count);
+ buf += semi_count;
+ offset = TX_PAGES<<8;
+ count -= semi_count;
+ }
+ copyin(buf, base + offset, count);
+}
+
+/*====================================================================*/
+
+static void shmem_block_output(struct net_device *dev, int count,
+ const u_char *buf, const int start_page)
+{
+ void __iomem *shmem = ei_status.mem + (start_page << 8);
+ shmem -= ei_status.tx_start_page << 8;
+ copyout(shmem, buf, count);
+}
+
+/*====================================================================*/
+
+static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
+ int stop_pg, int cm_offset)
+{
+ struct net_device *dev = link->priv;
+ pcnet_dev_t *info = PRIV(dev);
+ int i, window_size, offset, ret;
+
+ window_size = (stop_pg - start_pg) << 8;
+ if (window_size > 32 * 1024)
+ window_size = 32 * 1024;
+
+ /* Make sure it's a power of two. */
+ window_size = roundup_pow_of_two(window_size);
+
+ /* Allocate a memory window */
+ link->resource[3]->flags |= WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE;
+ link->resource[3]->flags |= WIN_USE_WAIT;
+ link->resource[3]->start = 0; link->resource[3]->end = window_size;
+ ret = pcmcia_request_window(link, link->resource[3], mem_speed);
+ if (ret)
+ goto failed;
+
+ offset = (start_pg << 8) + cm_offset;
+ offset -= offset % window_size;
+ ret = pcmcia_map_mem_page(link, link->resource[3], offset);
+ if (ret)
+ goto failed;
+
+ /* Try scribbling on the buffer */
+ info->base = ioremap(link->resource[3]->start,
+ resource_size(link->resource[3]));
+ for (i = 0; i < (TX_PAGES<<8); i += 2)
+ __raw_writew((i>>1), info->base+offset+i);
+ udelay(100);
+ for (i = 0; i < (TX_PAGES<<8); i += 2)
+ if (__raw_readw(info->base+offset+i) != (i>>1)) break;
+ pcnet_reset_8390(dev);
+ if (i != (TX_PAGES<<8)) {
+ iounmap(info->base);
+ pcmcia_release_window(link, link->resource[3]);
+ info->base = NULL;
+ goto failed;
+ }
+
+ ei_status.mem = info->base + offset;
+ ei_status.priv = resource_size(link->resource[3]);
+ dev->mem_start = (u_long)ei_status.mem;
+ dev->mem_end = dev->mem_start + resource_size(link->resource[3]);
+
+ ei_status.tx_start_page = start_pg;
+ ei_status.rx_start_page = start_pg + TX_PAGES;
+ ei_status.stop_page = start_pg + (
+ (resource_size(link->resource[3]) - offset) >> 8);
+
+ /* set up block i/o functions */
+ ei_status.get_8390_hdr = shmem_get_8390_hdr;
+ ei_status.block_input = shmem_block_input;
+ ei_status.block_output = shmem_block_output;
+
+ info->flags |= USE_SHMEM;
+ return 0;
+
+failed:
+ return 1;
+}
+
+/*====================================================================*/
+
+static const struct pcmcia_device_id pcnet_ids[] = {
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0057, 0x0021),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0104, 0x000a),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0xea15),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0143, 0x3341),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0143, 0xc0ab),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x021b, 0x0101),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x08a1, 0xc0ab),
+ PCMCIA_PFC_DEVICE_PROD_ID12(0, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4),
+ PCMCIA_PFC_DEVICE_PROD_ID12(0, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e),
+ PCMCIA_PFC_DEVICE_PROD_ID12(0, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff),
+ PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae),
+ PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033),
+ PCMCIA_PFC_DEVICE_PROD_ID12(0, "LINKSYS", "PCMLM336", 0xf7cb0b07, 0x7a821b58),
+ PCMCIA_PFC_DEVICE_PROD_ID12(0, "MICRO RESEARCH", "COMBO-L/M-336", 0xb2ced065, 0x3ced0555),
+ PCMCIA_PFC_DEVICE_PROD_ID12(0, "PCMCIAs", "ComboCard", 0xdcfe12d3, 0xcd8906cc),
+ PCMCIA_PFC_DEVICE_PROD_ID12(0, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f),
+ PCMCIA_MFC_DEVICE_PROD_ID12(0, "IBM", "Home and Away 28.8 PC Card ", 0xb569a6e5, 0x5bd4ff2c),
+ PCMCIA_MFC_DEVICE_PROD_ID12(0, "IBM", "Home and Away Credit Card Adapter", 0xb569a6e5, 0x4bdf15c3),
+ PCMCIA_MFC_DEVICE_PROD_ID12(0, "IBM", "w95 Home and Away Credit Card ", 0xb569a6e5, 0xae911c15),
+ PCMCIA_MFC_DEVICE_PROD_ID123(0, "APEX DATA", "MULTICARD", "ETHERNET-MODEM", 0x11c2da09, 0x7289dc5d, 0xaad95e1f),
+ PCMCIA_MFC_DEVICE_PROD_ID2(0, "FAX/Modem/Ethernet Combo Card ", 0x1ed59302),
+ PCMCIA_DEVICE_MANF_CARD(0x0057, 0x1004),
+ PCMCIA_DEVICE_MANF_CARD(0x0104, 0x000d),
+ PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0075),
+ PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0145),
+ PCMCIA_DEVICE_MANF_CARD(0x0149, 0x0230),
+ PCMCIA_DEVICE_MANF_CARD(0x0149, 0x4530),
+ PCMCIA_DEVICE_MANF_CARD(0x0149, 0xc1ab),
+ PCMCIA_DEVICE_MANF_CARD(0x0186, 0x0110),
+ PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x8041),
+ PCMCIA_DEVICE_MANF_CARD(0x0213, 0x2452),
+ PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0300),
+ PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0307),
+ PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a),
+ PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1103),
+ PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1121),
+ PCMCIA_DEVICE_PROD_ID12("2408LAN", "Ethernet", 0x352fff7f, 0x00b2e941),
+ PCMCIA_DEVICE_PROD_ID1234("Socket", "CF 10/100 Ethernet Card", "Revision B", "05/11/06", 0xb38bcc2e, 0x4de88352, 0xeaca6c8d, 0x7e57c22e),
+ PCMCIA_DEVICE_PROD_ID123("Cardwell", "PCMCIA", "ETHERNET", 0x9533672e, 0x281f1c5d, 0x3ff7175b),
+ PCMCIA_DEVICE_PROD_ID123("CNet ", "CN30BC", "ETHERNET", 0x9fe55d3d, 0x85601198, 0x3ff7175b),
+ PCMCIA_DEVICE_PROD_ID123("Digital", "Ethernet", "Adapter", 0x9999ab35, 0x00b2e941, 0x4b0d829e),
+ PCMCIA_DEVICE_PROD_ID123("Edimax Technology Inc.", "PCMCIA", "Ethernet Card", 0x738a0019, 0x281f1c5d, 0x5e9d92c0),
+ PCMCIA_DEVICE_PROD_ID123("EFA ", "EFA207", "ETHERNET", 0x3d294be4, 0xeb9aab6c, 0x3ff7175b),
+ PCMCIA_DEVICE_PROD_ID123("I-O DATA", "PCLA", "ETHERNET", 0x1d55d7ec, 0xe4c64d34, 0x3ff7175b),
+ PCMCIA_DEVICE_PROD_ID123("IO DATA", "PCLATE", "ETHERNET", 0x547e66dc, 0x6b260753, 0x3ff7175b),
+ PCMCIA_DEVICE_PROD_ID123("KingMax Technology Inc.", "EN10-T2", "PCMCIA Ethernet Card", 0x932b7189, 0x699e4436, 0x6f6652e0),
+ PCMCIA_DEVICE_PROD_ID123("PCMCIA", "PCMCIA-ETHERNET-CARD", "UE2216", 0x281f1c5d, 0xd4cd2f20, 0xb87add82),
+ PCMCIA_DEVICE_PROD_ID123("PCMCIA", "PCMCIA-ETHERNET-CARD", "UE2620", 0x281f1c5d, 0xd4cd2f20, 0x7d3d83a8),
+ PCMCIA_DEVICE_PROD_ID1("2412LAN", 0x67f236ab),
+ PCMCIA_DEVICE_PROD_ID12("ACCTON", "EN2212", 0xdfc6b5b2, 0xcb112a11),
+ PCMCIA_DEVICE_PROD_ID12("ACCTON", "EN2216-PCMCIA-ETHERNET", 0xdfc6b5b2, 0x5542bfff),
+ PCMCIA_DEVICE_PROD_ID12("Allied Telesis, K.K.", "CentreCOM LA100-PCM-T V2 100/10M LAN PC Card", 0xbb7fbdd7, 0xcd91cc68),
+ PCMCIA_DEVICE_PROD_ID12("Allied Telesis K.K.", "LA100-PCM V2", 0x36634a66, 0xc6d05997),
+ PCMCIA_DEVICE_PROD_ID12("Allied Telesis, K.K.", "CentreCOM LA-PCM_V2", 0xbb7fBdd7, 0x28e299f8),
+ PCMCIA_DEVICE_PROD_ID12("Allied Telesis K.K.", "LA-PCM V3", 0x36634a66, 0x62241d96),
+ PCMCIA_DEVICE_PROD_ID12("AmbiCom", "AMB8010", 0x5070a7f9, 0x82f96e96),
+ PCMCIA_DEVICE_PROD_ID12("AmbiCom", "AMB8610", 0x5070a7f9, 0x86741224),
+ PCMCIA_DEVICE_PROD_ID12("AmbiCom Inc", "AMB8002", 0x93b15570, 0x75ec3efb),
+ PCMCIA_DEVICE_PROD_ID12("AmbiCom Inc", "AMB8002T", 0x93b15570, 0x461c5247),
+ PCMCIA_DEVICE_PROD_ID12("AmbiCom Inc", "AMB8010", 0x93b15570, 0x82f96e96),
+ PCMCIA_DEVICE_PROD_ID12("AnyCom", "ECO Ethernet", 0x578ba6e7, 0x0a9888c1),
+ PCMCIA_DEVICE_PROD_ID12("AnyCom", "ECO Ethernet 10/100", 0x578ba6e7, 0x939fedbd),
+ PCMCIA_DEVICE_PROD_ID12("AROWANA", "PCMCIA Ethernet LAN Card", 0x313adbc8, 0x08d9f190),
+ PCMCIA_DEVICE_PROD_ID12("ASANTE", "FriendlyNet PC Card", 0x3a7ade0f, 0x41c64504),
+ PCMCIA_DEVICE_PROD_ID12("Billionton", "LNT-10TB", 0x552ab682, 0xeeb1ba6a),
+ PCMCIA_DEVICE_PROD_ID12("CF", "10Base-Ethernet", 0x44ebf863, 0x93ae4d79),
+ PCMCIA_DEVICE_PROD_ID12("CNet", "CN40BC Ethernet", 0xbc477dde, 0xfba775a7),
+ PCMCIA_DEVICE_PROD_ID12("COMPU-SHACK", "BASEline PCMCIA 10 MBit Ethernetadapter", 0xfa2e424d, 0xe9190d8a),
+ PCMCIA_DEVICE_PROD_ID12("COMPU-SHACK", "FASTline PCMCIA 10/100 Fast-Ethernet", 0xfa2e424d, 0x3953d9b9),
+ PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722),
+ PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2),
+ PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a),
+ PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether CF-TD LAN Card", 0x5261440f, 0x8797663b),
+ PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd),
+ PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d),
+ PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
+ PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether PCC-T", 0x5261440f, 0x6705fcaa),
+ PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether PCC-TD", 0x5261440f, 0x47d5ca83),
+ PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FastEther PCC-TX", 0x5261440f, 0x485e85d9),
+ PCMCIA_DEVICE_PROD_ID12("Corega,K.K.", "Ethernet LAN Card", 0x110d26d9, 0x9fd2f0a2),
+ PCMCIA_DEVICE_PROD_ID12("corega,K.K.", "Ethernet LAN Card", 0x9791a90e, 0x9fd2f0a2),
+ PCMCIA_DEVICE_PROD_ID12("corega K.K.", "(CG-LAPCCTXD)", 0x5261440f, 0x73ec0d88),
+ PCMCIA_DEVICE_PROD_ID12("CouplerlessPCMCIA", "100BASE", 0xee5af0ad, 0x7c2add04),
+ PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-010", 0x77008979, 0x9d8d445d),
+ PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-110E 10/100M LAN Card", 0x77008979, 0xfd184814),
+ PCMCIA_DEVICE_PROD_ID12("DataTrek.", "NetCard ", 0x5cd66d9d, 0x84697ce0),
+ PCMCIA_DEVICE_PROD_ID12("Dayna Communications, Inc.", "CommuniCard E", 0x0c629325, 0xb4e7dbaf),
+ PCMCIA_DEVICE_PROD_ID12("Digicom", "Palladio LAN 10/100", 0x697403d8, 0xe160b995),
+ PCMCIA_DEVICE_PROD_ID12("Digicom", "Palladio LAN 10/100 Dongless", 0x697403d8, 0xa6d3b233),
+ PCMCIA_DEVICE_PROD_ID12("DIGITAL", "DEPCM-XX", 0x69616cb3, 0xe600e76e),
+ PCMCIA_DEVICE_PROD_ID12("D-Link", "DE-650", 0x1a424a1c, 0xf28c8398),
+ PCMCIA_DEVICE_PROD_ID12("D-Link", "DE-660", 0x1a424a1c, 0xd9a1d05b),
+ PCMCIA_DEVICE_PROD_ID12("D-Link", "DE-660+", 0x1a424a1c, 0x50dcd0ec),
+ PCMCIA_DEVICE_PROD_ID12("D-Link", "DFE-650", 0x1a424a1c, 0x0f0073f9),
+ PCMCIA_DEVICE_PROD_ID12("Dual Speed", "10/100 PC Card", 0x725b842d, 0xf1efee84),
+ PCMCIA_DEVICE_PROD_ID12("Dual Speed", "10/100 Port Attached PC Card", 0x725b842d, 0x2db1f8e9),
+ PCMCIA_DEVICE_PROD_ID12("Dynalink", "L10BC", 0x55632fd5, 0xdc65f2b1),
+ PCMCIA_DEVICE_PROD_ID12("DYNALINK", "L10BC", 0x6a26d1cf, 0xdc65f2b1),
+ PCMCIA_DEVICE_PROD_ID12("DYNALINK", "L10C", 0x6a26d1cf, 0xc4f84efb),
+ PCMCIA_DEVICE_PROD_ID12("E-CARD", "E-CARD", 0x6701da11, 0x6701da11),
+ PCMCIA_DEVICE_PROD_ID12("EIGER Labs Inc.", "Ethernet 10BaseT card", 0x53c864c6, 0xedd059f6),
+ PCMCIA_DEVICE_PROD_ID12("EIGER Labs Inc.", "Ethernet Combo card", 0x53c864c6, 0x929c486c),
+ PCMCIA_DEVICE_PROD_ID12("Ethernet", "Adapter", 0x00b2e941, 0x4b0d829e),
+ PCMCIA_DEVICE_PROD_ID12("Ethernet Adapter", "E2000 PCMCIA Ethernet", 0x96767301, 0x71fbbc61),
+ PCMCIA_DEVICE_PROD_ID12("Ethernet PCMCIA adapter", "EP-210", 0x8dd86181, 0xf2b52517),
+ PCMCIA_DEVICE_PROD_ID12("Fast Ethernet", "Adapter", 0xb4be14e3, 0x4b0d829e),
+ PCMCIA_DEVICE_PROD_ID12("Grey Cell", "GCS2000", 0x2a151fac, 0xf00555cb),
+ PCMCIA_DEVICE_PROD_ID12("Grey Cell", "GCS2220", 0x2a151fac, 0xc1b7e327),
+ PCMCIA_DEVICE_PROD_ID12("GVC", "NIC-2000p", 0x76e171bd, 0x6eb1c947),
+ PCMCIA_DEVICE_PROD_ID12("IBM Corp.", "Ethernet", 0xe3736c88, 0x00b2e941),
+ PCMCIA_DEVICE_PROD_ID12("IC-CARD", "IC-CARD", 0x60cb09a6, 0x60cb09a6),
+ PCMCIA_DEVICE_PROD_ID12("IC-CARD+", "IC-CARD+", 0x93693494, 0x93693494),
+ PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b),
+ PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0),
+ PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956),
+ PCMCIA_DEVICE_PROD_ID12("KENTRONICS", "KEP-230", 0xaf8144c9, 0x868f6616),
+ PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64),
+ PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5),
+ PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3),
+ PCMCIA_DEVICE_PROD_ID12("Kingston Technology Corp.", "EtheRx PC Card Ethernet Adapter", 0x313c7be3, 0x0afb54a2),
+ PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-10/100CD", 0x1b7827b2, 0xcda71d1c),
+ PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-CDF", 0x1b7827b2, 0xfec71e40),
+ PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-CDL/T", 0x1b7827b2, 0x79fba4f7),
+ PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-CDS", 0x1b7827b2, 0x931afaab),
+ PCMCIA_DEVICE_PROD_ID12("LEMEL", "LM-N89TX PRO", 0xbbefb52f, 0xd2897a97),
+ PCMCIA_DEVICE_PROD_ID12("Linksys", "Combo PCMCIA EthernetCard (EC2T)", 0x0733cc81, 0x32ee8c78),
+ PCMCIA_DEVICE_PROD_ID12("LINKSYS", "E-CARD", 0xf7cb0b07, 0x6701da11),
+ PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 Integrated PC Card (PCM100)", 0x0733cc81, 0x453c3f9d),
+ PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100)", 0x0733cc81, 0x66c5a389),
+ PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V2)", 0x0733cc81, 0x3a3b28e9),
+ PCMCIA_DEVICE_PROD_ID12("Linksys", "HomeLink Phoneline + 10/100 Network PC Card (PCM100H1)", 0x733cc81, 0x7a3e5c3a),
+ PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TX", 0x88fcdeda, 0x6d772737),
+ PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TE", 0x88fcdeda, 0x0e714bee),
+ PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN20T", 0x88fcdeda, 0x81090922),
+ PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN10TE", 0x88fcdeda, 0xc1e2521c),
+ PCMCIA_DEVICE_PROD_ID12("LONGSHINE", "PCMCIA Ethernet Card", 0xf866b0b0, 0x6f6652e0),
+ PCMCIA_DEVICE_PROD_ID12("MACNICA", "ME1-JEIDA", 0x20841b68, 0xaf8a3578),
+ PCMCIA_DEVICE_PROD_ID12("Macsense", "MPC-10", 0xd830297f, 0xd265c307),
+ PCMCIA_DEVICE_PROD_ID12("Matsushita Electric Industrial Co.,LTD.", "CF-VEL211", 0x44445376, 0x8ded41d4),
+ PCMCIA_DEVICE_PROD_ID12("MAXTECH", "PCN2000", 0x78d64bc0, 0xca0ca4b8),
+ PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC2-T", 0x481e0094, 0xa2eb0cf3),
+ PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC2-TX", 0x481e0094, 0x41a6916c),
+ PCMCIA_DEVICE_PROD_ID12("Microcom C.E.", "Travel Card LAN 10/100", 0x4b91cec7, 0xe70220d6),
+ PCMCIA_DEVICE_PROD_ID12("Microdyne", "NE4200", 0x2e6da59b, 0x0478e472),
+ PCMCIA_DEVICE_PROD_ID12("MIDORI ELEC.", "LT-PCMT", 0x648d55c1, 0xbde526c7),
+ PCMCIA_DEVICE_PROD_ID12("National Semiconductor", "InfoMover 4100", 0x36e1191f, 0x60c229b9),
+ PCMCIA_DEVICE_PROD_ID12("National Semiconductor", "InfoMover NE4100", 0x36e1191f, 0xa6617ec8),
+ PCMCIA_DEVICE_PROD_ID12("NEC", "PC-9801N-J12", 0x18df0ba0, 0xbc912d76),
+ PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA410TX", 0x9aa79dc3, 0x60e5bc0e),
+ PCMCIA_DEVICE_PROD_ID12("Network Everywhere", "Fast Ethernet 10/100 PC Card", 0x820a67b6, 0x31ed1a5f),
+ PCMCIA_DEVICE_PROD_ID12("NextCom K.K.", "Next Hawk", 0xaedaec74, 0xad050ef1),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA", "10/100Mbps Ethernet Card", 0x281f1c5d, 0x6e41773b),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Ethernet", 0x281f1c5d, 0x00b2e941),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA", "ETHERNET", 0x281f1c5d, 0x3ff7175b),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Ethernet 10BaseT Card", 0x281f1c5d, 0x4de2f6c8),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Ethernet Card", 0x281f1c5d, 0x5e9d92c0),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Ethernet Combo card", 0x281f1c5d, 0x929c486c),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA", "ETHERNET V1.0", 0x281f1c5d, 0x4d8817c8),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEthernet", 0x281f1c5d, 0xfe871eeb),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Fast-Ethernet", 0x281f1c5d, 0x45f1f3b4),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FAST ETHERNET CARD", 0x281f1c5d, 0xec5dbca7),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA LAN", "Ethernet", 0x7500e246, 0x00b2e941),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA", "LNT-10TN", 0x281f1c5d, 0xe707f641),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIAs", "ComboCard", 0xdcfe12d3, 0xcd8906cc),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA", "UE2212", 0x281f1c5d, 0xbf17199b),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA", " Ethernet NE2000 Compatible", 0x281f1c5d, 0x42d5d7e1),
+ PCMCIA_DEVICE_PROD_ID12("PRETEC", "Ethernet CompactLAN 10baseT 3.3V", 0xebf91155, 0x30074c80),
+ PCMCIA_DEVICE_PROD_ID12("PRETEC", "Ethernet CompactLAN 10BaseT 3.3V", 0xebf91155, 0x7f5a4f50),
+ PCMCIA_DEVICE_PROD_ID12("Psion Dacom", "Gold Card Ethernet", 0xf5f025c2, 0x3a30e110),
+ PCMCIA_DEVICE_PROD_ID12("=RELIA==", "Ethernet", 0xcdd0644a, 0x00b2e941),
+ PCMCIA_DEVICE_PROD_ID12("RIOS Systems Co.", "PC CARD3 ETHERNET", 0x7dd33481, 0x10b41826),
+ PCMCIA_DEVICE_PROD_ID12("RP", "1625B Ethernet NE2000 Compatible", 0xe3e66e22, 0xb96150df),
+ PCMCIA_DEVICE_PROD_ID12("RPTI", "EP400 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4a7e2ae0),
+ PCMCIA_DEVICE_PROD_ID12("RPTI", "EP401 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4bcbd7fd),
+ PCMCIA_DEVICE_PROD_ID12("RPTI LTD.", "EP400", 0xc53ac515, 0x81e39388),
+ PCMCIA_DEVICE_PROD_ID12("SCM", "Ethernet Combo card", 0xbdc3b102, 0x929c486c),
+ PCMCIA_DEVICE_PROD_ID12("Seiko Epson Corp.", "Ethernet", 0x09928730, 0x00b2e941),
+ PCMCIA_DEVICE_PROD_ID12("SMC", "EZCard-10-PCMCIA", 0xc4f8b18b, 0xfb21d265),
+ PCMCIA_DEVICE_PROD_ID12("Socket Communications Inc", "Socket EA PCMCIA LAN Adapter Revision D", 0xc70a4760, 0x2ade483e),
+ PCMCIA_DEVICE_PROD_ID12("Socket Communications Inc", "Socket EA PCMCIA LAN Adapter Revision E", 0xc70a4760, 0x5dd978a8),
+ PCMCIA_DEVICE_PROD_ID12("TDK", "LAK-CD031 for PCMCIA", 0x1eae9475, 0x0ed386fa),
+ PCMCIA_DEVICE_PROD_ID12("Telecom Device K.K.", "SuperSocket RE450T", 0x466b05f0, 0x8b74bc4f),
+ PCMCIA_DEVICE_PROD_ID12("Telecom Device K.K.", "SuperSocket RE550T", 0x466b05f0, 0x33c8db2a),
+ PCMCIA_DEVICE_PROD_ID13("Hypertec", "EP401", 0x8787bec7, 0xf6e4a31e),
+ PCMCIA_DEVICE_PROD_ID13("KingMax Technology Inc.", "Ethernet Card", 0x932b7189, 0x5e9d92c0),
+ PCMCIA_DEVICE_PROD_ID13("LONGSHINE", "EP401", 0xf866b0b0, 0xf6e4a31e),
+ PCMCIA_DEVICE_PROD_ID13("Xircom", "CFE-10", 0x2e3ee845, 0x22a49f89),
+ PCMCIA_DEVICE_PROD_ID1("CyQ've 10 Base-T LAN CARD", 0x94faf360),
+ PCMCIA_DEVICE_PROD_ID1("EP-210 PCMCIA LAN CARD.", 0x8850b4de),
+ PCMCIA_DEVICE_PROD_ID1("ETHER-C16", 0x06a8514f),
+ PCMCIA_DEVICE_PROD_ID1("NE2000 Compatible", 0x75b8ad5a),
+ PCMCIA_DEVICE_PROD_ID2("EN-6200P2", 0xa996d078),
+ /* too generic! */
+ /* PCMCIA_DEVICE_PROD_ID12("PCMCIA", "10/100 Ethernet Card", 0x281f1c5d, 0x11b0ffc0), */
+ PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "PCMCIA", "EN2218-LAN/MODEM", 0x281f1c5d, 0x570f348e, "cis/PCMLM28.cis"),
+ PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "PCMCIA", "UE2218-LAN/MODEM", 0x281f1c5d, 0x6fdcacee, "cis/PCMLM28.cis"),
+ PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"),
+ PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"),
+ PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"),
+ PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "TOSHIBA", "Modem/LAN Card", 0xb4585a1a, 0x53f922f8, "cis/PCMLM28.cis"),
+ PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"),
+ PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
+ PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"),
+ PCMCIA_DEVICE_CIS_PROD_ID12("Allied Telesis,K.K", "Ethernet LAN Card", 0x2ad62f3c, 0x9fd2f0a2, "cis/LA-PCM.cis"),
+ PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"),
+ PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"),
+ PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"),
+ PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"),
+ PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b),
+ PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0",
+ 0xb4be14e3, 0x43ac239b, 0x0877b627),
+ PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, pcnet_ids);
+MODULE_FIRMWARE("cis/PCMLM28.cis");
+MODULE_FIRMWARE("cis/DP83903.cis");
+MODULE_FIRMWARE("cis/LA-PCM.cis");
+MODULE_FIRMWARE("cis/PE520.cis");
+MODULE_FIRMWARE("cis/NE2K.cis");
+MODULE_FIRMWARE("cis/PE-200.cis");
+MODULE_FIRMWARE("cis/tamarack.cis");
+
+static struct pcmcia_driver pcnet_driver = {
+ .name = "pcnet_cs",
+ .probe = pcnet_probe,
+ .remove = pcnet_detach,
+ .owner = THIS_MODULE,
+ .id_table = pcnet_ids,
+ .suspend = pcnet_suspend,
+ .resume = pcnet_resume,
+};
+
+static int __init init_pcnet_cs(void)
+{
+ return pcmcia_register_driver(&pcnet_driver);
+}
+
+static void __exit exit_pcnet_cs(void)
+{
+ pcmcia_unregister_driver(&pcnet_driver);
+}
+
+module_init(init_pcnet_cs);
+module_exit(exit_pcnet_cs);
diff --git a/drivers/net/ethernet/8390/smc-mca.c b/drivers/net/ethernet/8390/smc-mca.c
new file mode 100644
index 000000000000..77efec44fea0
--- /dev/null
+++ b/drivers/net/ethernet/8390/smc-mca.c
@@ -0,0 +1,576 @@
+/* smc-mca.c: A SMC Ultra ethernet driver for linux. */
+/*
+ Most of this driver, except for ultramca_probe is nearly
+ verbatim from smc-ultra.c by Donald Becker. The rest is
+ written and copyright 1996 by David Weis, weisd3458@uni.edu
+
+ This is a driver for the SMC Ultra and SMC EtherEZ ethercards.
+
+ This driver uses the cards in the 8390-compatible, shared memory mode.
+ Most of the run-time complexity is handled by the generic code in
+ 8390.c.
+
+ This driver enables the shared memory only when doing the actual data
+ transfers to avoid a bug in early version of the card that corrupted
+ data transferred by a AHA1542.
+
+ This driver does not support the programmed-I/O data transfer mode of
+ the EtherEZ. That support (if available) is smc-ez.c. Nor does it
+ use the non-8390-compatible "Altego" mode. (No support currently planned.)
+
+ Changelog:
+
+ Paul Gortmaker : multiple card support for module users.
+ David Weis : Micro Channel-ized it.
+ Tom Sightler : Added support for IBM PS/2 Ethernet Adapter/A
+ Christopher Turcksin : Changed MCA-probe so that multiple adapters are
+ found correctly (Jul 16, 1997)
+ Chris Beauregard : Tried to merge the two changes above (Dec 15, 1997)
+ Tom Sightler : Fixed minor detection bug caused by above merge
+ Tom Sightler : Added support for three more Western Digital
+ MCA-adapters
+ Tom Sightler : Added support for 2.2.x mca_find_unused_adapter
+ Hartmut Schmidt : - Modified parameter detection to handle each
+ card differently depending on a switch-list
+ - 'card_ver' removed from the adapter list
+ - Some minor bug fixes
+*/
+
+#include <linux/mca.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "8390.h"
+
+#define DRV_NAME "smc-mca"
+
+static int ultramca_open(struct net_device *dev);
+static void ultramca_reset_8390(struct net_device *dev);
+static void ultramca_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ultramca_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb,
+ int ring_offset);
+static void ultramca_block_output(struct net_device *dev, int count,
+ const unsigned char *buf,
+ const int start_page);
+static int ultramca_close_card(struct net_device *dev);
+
+#define START_PG 0x00 /* First page of TX buffer */
+
+#define ULTRA_CMDREG 0 /* Offset to ASIC command register. */
+#define ULTRA_RESET 0x80 /* Board reset, in ULTRA_CMDREG. */
+#define ULTRA_MEMENB 0x40 /* Enable the shared memory. */
+#define ULTRA_NIC_OFFSET 16 /* NIC register offset from the base_addr. */
+#define ULTRA_IO_EXTENT 32
+#define EN0_ERWCNT 0x08 /* Early receive warning count. */
+
+#define _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A 0
+#define _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A 1
+#define _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A 2
+#define _6fc1_WD_Starcard_PLUS_A_WD8003ST_A 3
+#define _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A 4
+#define _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A 5
+#define _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A 6
+#define _efe5_IBM_PS2_Adapter_A_for_Ethernet 7
+
+struct smc_mca_adapters_t {
+ unsigned int id;
+ char *name;
+};
+
+#define MAX_ULTRAMCA_CARDS 4 /* Max number of Ultra cards per module */
+
+static int ultra_io[MAX_ULTRAMCA_CARDS];
+static int ultra_irq[MAX_ULTRAMCA_CARDS];
+MODULE_LICENSE("GPL");
+
+module_param_array(ultra_io, int, NULL, 0);
+module_param_array(ultra_irq, int, NULL, 0);
+MODULE_PARM_DESC(ultra_io, "SMC Ultra/EtherEZ MCA I/O base address(es)");
+MODULE_PARM_DESC(ultra_irq, "SMC Ultra/EtherEZ MCA IRQ number(s)");
+
+static const struct {
+ unsigned int base_addr;
+} addr_table[] = {
+ { 0x0800 },
+ { 0x1800 },
+ { 0x2800 },
+ { 0x3800 },
+ { 0x4800 },
+ { 0x5800 },
+ { 0x6800 },
+ { 0x7800 },
+ { 0x8800 },
+ { 0x9800 },
+ { 0xa800 },
+ { 0xb800 },
+ { 0xc800 },
+ { 0xd800 },
+ { 0xe800 },
+ { 0xf800 }
+};
+
+#define MEM_MASK 64
+
+static const struct {
+ unsigned char mem_index;
+ unsigned long mem_start;
+ unsigned char num_pages;
+} mem_table[] = {
+ { 16, 0x0c0000, 40 },
+ { 18, 0x0c4000, 40 },
+ { 20, 0x0c8000, 40 },
+ { 22, 0x0cc000, 40 },
+ { 24, 0x0d0000, 40 },
+ { 26, 0x0d4000, 40 },
+ { 28, 0x0d8000, 40 },
+ { 30, 0x0dc000, 40 },
+ {144, 0xfc0000, 40 },
+ {148, 0xfc8000, 40 },
+ {154, 0xfd0000, 40 },
+ {156, 0xfd8000, 40 },
+ { 0, 0x0c0000, 20 },
+ { 1, 0x0c2000, 20 },
+ { 2, 0x0c4000, 20 },
+ { 3, 0x0c6000, 20 }
+};
+
+#define IRQ_MASK 243
+static const struct {
+ unsigned char new_irq;
+ unsigned char old_irq;
+} irq_table[] = {
+ { 3, 3 },
+ { 4, 4 },
+ { 10, 10 },
+ { 14, 15 }
+};
+
+static short smc_mca_adapter_ids[] __initdata = {
+ 0x61c8,
+ 0x61c9,
+ 0x6fc0,
+ 0x6fc1,
+ 0x6fc2,
+ 0xefd4,
+ 0xefd5,
+ 0xefe5,
+ 0x0000
+};
+
+static char *smc_mca_adapter_names[] __initdata = {
+ "SMC Ethercard PLUS Elite/A BNC/AUI (WD8013EP/A)",
+ "SMC Ethercard PLUS Elite/A UTP/AUI (WD8013WP/A)",
+ "WD Ethercard PLUS/A (WD8003E/A or WD8003ET/A)",
+ "WD Starcard PLUS/A (WD8003ST/A)",
+ "WD Ethercard PLUS 10T/A (WD8003W/A)",
+ "IBM PS/2 Adapter/A for Ethernet UTP/AUI (WD8013WP/A)",
+ "IBM PS/2 Adapter/A for Ethernet BNC/AUI (WD8013EP/A)",
+ "IBM PS/2 Adapter/A for Ethernet",
+ NULL
+};
+
+static int ultra_found = 0;
+
+
+static const struct net_device_ops ultramca_netdev_ops = {
+ .ndo_open = ultramca_open,
+ .ndo_stop = ultramca_close_card,
+
+ .ndo_start_xmit = ei_start_xmit,
+ .ndo_tx_timeout = ei_tx_timeout,
+ .ndo_get_stats = ei_get_stats,
+ .ndo_set_rx_mode = ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ei_poll,
+#endif
+};
+
+static int __init ultramca_probe(struct device *gen_dev)
+{
+ unsigned short ioaddr;
+ struct net_device *dev;
+ unsigned char reg4, num_pages;
+ struct mca_device *mca_dev = to_mca_device(gen_dev);
+ char slot = mca_dev->slot;
+ unsigned char pos2 = 0xff, pos3 = 0xff, pos4 = 0xff, pos5 = 0xff;
+ int i, rc;
+ int adapter = mca_dev->index;
+ int tbase = 0;
+ int tirq = 0;
+ int base_addr = ultra_io[ultra_found];
+ int irq = ultra_irq[ultra_found];
+
+ if (base_addr || irq) {
+ printk(KERN_INFO "Probing for SMC MCA adapter");
+ if (base_addr) {
+ printk(KERN_INFO " at I/O address 0x%04x%c",
+ base_addr, irq ? ' ' : '\n');
+ }
+ if (irq) {
+ printk(KERN_INFO "using irq %d\n", irq);
+ }
+ }
+
+ tirq = 0;
+ tbase = 0;
+
+ /* If we're trying to match a specificied irq or io address,
+ * we'll reject the adapter found unless it's the one we're
+ * looking for */
+
+ pos2 = mca_device_read_stored_pos(mca_dev, 2); /* io_addr */
+ pos3 = mca_device_read_stored_pos(mca_dev, 3); /* shared mem */
+ pos4 = mca_device_read_stored_pos(mca_dev, 4); /* ROM bios addr range */
+ pos5 = mca_device_read_stored_pos(mca_dev, 5); /* irq, media and RIPL */
+
+ /* Test the following conditions:
+ * - If an irq parameter is supplied, compare it
+ * with the irq of the adapter we found
+ * - If a base_addr paramater is given, compare it
+ * with the base_addr of the adapter we found
+ * - Check that the irq and the base_addr of the
+ * adapter we found is not already in use by
+ * this driver
+ */
+
+ switch (mca_dev->index) {
+ case _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A:
+ case _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A:
+ case _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A:
+ case _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A:
+ {
+ tbase = addr_table[(pos2 & 0xf0) >> 4].base_addr;
+ tirq = irq_table[(pos5 & 0xc) >> 2].new_irq;
+ break;
+ }
+ case _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A:
+ case _6fc1_WD_Starcard_PLUS_A_WD8003ST_A:
+ case _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A:
+ case _efe5_IBM_PS2_Adapter_A_for_Ethernet:
+ {
+ tbase = ((pos2 & 0x0fe) * 0x10);
+ tirq = irq_table[(pos5 & 3)].old_irq;
+ break;
+ }
+ }
+
+ if(!tirq || !tbase ||
+ (irq && irq != tirq) ||
+ (base_addr && tbase != base_addr))
+ /* FIXME: we're trying to force the ordering of the
+ * devices here, there should be a way of getting this
+ * to happen */
+ return -ENXIO;
+
+ /* Adapter found. */
+ dev = alloc_ei_netdev();
+ if(!dev)
+ return -ENODEV;
+
+ SET_NETDEV_DEV(dev, gen_dev);
+ mca_device_set_name(mca_dev, smc_mca_adapter_names[adapter]);
+ mca_device_set_claim(mca_dev, 1);
+
+ printk(KERN_INFO "smc_mca: %s found in slot %d\n",
+ smc_mca_adapter_names[adapter], slot + 1);
+
+ ultra_found++;
+
+ dev->base_addr = ioaddr = mca_device_transform_ioport(mca_dev, tbase);
+ dev->irq = mca_device_transform_irq(mca_dev, tirq);
+ dev->mem_start = 0;
+ num_pages = 40;
+
+ switch (adapter) { /* card-# in const array above [hs] */
+ case _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A:
+ case _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A:
+ {
+ for (i = 0; i < 16; i++) { /* taking 16 counts
+ * up to 15 [hs] */
+ if (mem_table[i].mem_index == (pos3 & ~MEM_MASK)) {
+ dev->mem_start = (unsigned long)
+ mca_device_transform_memory(mca_dev, (void *)mem_table[i].mem_start);
+ num_pages = mem_table[i].num_pages;
+ }
+ }
+ break;
+ }
+ case _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A:
+ case _6fc1_WD_Starcard_PLUS_A_WD8003ST_A:
+ case _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A:
+ case _efe5_IBM_PS2_Adapter_A_for_Ethernet:
+ {
+ dev->mem_start = (unsigned long)
+ mca_device_transform_memory(mca_dev, (void *)((pos3 & 0xfc) * 0x1000));
+ num_pages = 0x40;
+ break;
+ }
+ case _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A:
+ case _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A:
+ {
+ /* courtesy of gamera@quartz.ocn.ne.jp, pos3 indicates
+ * the index of the 0x2000 step.
+ * beware different number of pages [hs]
+ */
+ dev->mem_start = (unsigned long)
+ mca_device_transform_memory(mca_dev, (void *)(0xc0000 + (0x2000 * (pos3 & 0xf))));
+ num_pages = 0x20 + (2 * (pos3 & 0x10));
+ break;
+ }
+ }
+
+ /* sanity check, shouldn't happen */
+ if (dev->mem_start == 0) {
+ rc = -ENODEV;
+ goto err_unclaim;
+ }
+
+ if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME)) {
+ rc = -ENODEV;
+ goto err_unclaim;
+ }
+
+ reg4 = inb(ioaddr + 4) & 0x7f;
+ outb(reg4, ioaddr + 4);
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + 8 + i);
+
+ printk(KERN_INFO "smc_mca[%d]: Parameters: %#3x, %pM",
+ slot + 1, ioaddr, dev->dev_addr);
+
+ /* Switch from the station address to the alternate register set
+ * and read the useful registers there.
+ */
+
+ outb(0x80 | reg4, ioaddr + 4);
+
+ /* Enable FINE16 mode to avoid BIOS ROM width mismatches @ reboot.
+ */
+
+ outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c);
+
+ /* Switch back to the station address register set so that
+ * the MS-DOS driver can find the card after a warm boot.
+ */
+
+ outb(reg4, ioaddr + 4);
+
+ dev_set_drvdata(gen_dev, dev);
+
+ /* The 8390 isn't at the base address, so fake the offset
+ */
+
+ dev->base_addr = ioaddr + ULTRA_NIC_OFFSET;
+
+ ei_status.name = "SMC Ultra MCA";
+ ei_status.word16 = 1;
+ ei_status.tx_start_page = START_PG;
+ ei_status.rx_start_page = START_PG + TX_PAGES;
+ ei_status.stop_page = num_pages;
+
+ ei_status.mem = ioremap(dev->mem_start, (ei_status.stop_page - START_PG) * 256);
+ if (!ei_status.mem) {
+ rc = -ENOMEM;
+ goto err_release_region;
+ }
+
+ dev->mem_end = dev->mem_start + (ei_status.stop_page - START_PG) * 256;
+
+ printk(", IRQ %d memory %#lx-%#lx.\n",
+ dev->irq, dev->mem_start, dev->mem_end - 1);
+
+ ei_status.reset_8390 = &ultramca_reset_8390;
+ ei_status.block_input = &ultramca_block_input;
+ ei_status.block_output = &ultramca_block_output;
+ ei_status.get_8390_hdr = &ultramca_get_8390_hdr;
+
+ ei_status.priv = slot;
+
+ dev->netdev_ops = &ultramca_netdev_ops;
+
+ NS8390_init(dev, 0);
+
+ rc = register_netdev(dev);
+ if (rc)
+ goto err_unmap;
+
+ return 0;
+
+err_unmap:
+ iounmap(ei_status.mem);
+err_release_region:
+ release_region(ioaddr, ULTRA_IO_EXTENT);
+err_unclaim:
+ mca_device_set_claim(mca_dev, 0);
+ free_netdev(dev);
+ return rc;
+}
+
+static int ultramca_open(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+ int retval;
+
+ if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev)))
+ return retval;
+
+ outb(ULTRA_MEMENB, ioaddr); /* Enable memory */
+ outb(0x80, ioaddr + 5); /* ??? */
+ outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */
+ outb(0x04, ioaddr + 5); /* ??? */
+
+ /* Set the early receive warning level in window 0 high enough not
+ * to receive ERW interrupts.
+ */
+
+ /* outb_p(E8390_NODMA + E8390_PAGE0, dev->base_addr);
+ * outb(0xff, dev->base_addr + EN0_ERWCNT);
+ */
+
+ ei_open(dev);
+ return 0;
+}
+
+static void ultramca_reset_8390(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+
+ outb(ULTRA_RESET, ioaddr);
+ if (ei_debug > 1)
+ printk("resetting Ultra, t=%ld...", jiffies);
+ ei_status.txing = 0;
+
+ outb(0x80, ioaddr + 5); /* ??? */
+ outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */
+
+ if (ei_debug > 1)
+ printk("reset done\n");
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ * we don't need to be concerned with ring wrap as the header will be at
+ * the start of a page, so we optimize accordingly.
+ */
+
+static void ultramca_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ void __iomem *hdr_start = ei_status.mem + ((ring_page - START_PG) << 8);
+
+#ifdef notdef
+ /* Officially this is what we are doing, but the readl() is faster */
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+#else
+ ((unsigned int*)hdr)[0] = readl(hdr_start);
+#endif
+}
+
+/* Block input and output are easy on shared memory ethercards, the only
+ * complication is when the ring buffer wraps.
+ */
+
+static void ultramca_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ void __iomem *xfer_start = ei_status.mem + ring_offset - START_PG * 256;
+
+ if (ring_offset + count > ei_status.stop_page * 256) {
+ /* We must wrap the input move. */
+ int semi_count = ei_status.stop_page * 256 - ring_offset;
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count);
+ } else {
+ memcpy_fromio(skb->data, xfer_start, count);
+ }
+
+}
+
+static void ultramca_block_output(struct net_device *dev, int count, const unsigned char *buf,
+ int start_page)
+{
+ void __iomem *shmem = ei_status.mem + ((start_page - START_PG) << 8);
+
+ memcpy_toio(shmem, buf, count);
+}
+
+static int ultramca_close_card(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+
+ netif_stop_queue(dev);
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ outb(0x00, ioaddr + 6); /* Disable interrupts. */
+ free_irq(dev->irq, dev);
+
+ NS8390_init(dev, 0);
+ /* We should someday disable shared memory and change to 8-bit mode
+ * "just in case"...
+ */
+
+ return 0;
+}
+
+static int ultramca_remove(struct device *gen_dev)
+{
+ struct mca_device *mca_dev = to_mca_device(gen_dev);
+ struct net_device *dev = dev_get_drvdata(gen_dev);
+
+ if (dev) {
+ /* NB: ultra_close_card() does free_irq */
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET;
+
+ unregister_netdev(dev);
+ mca_device_set_claim(mca_dev, 0);
+ release_region(ioaddr, ULTRA_IO_EXTENT);
+ iounmap(ei_status.mem);
+ free_netdev(dev);
+ }
+ return 0;
+}
+
+
+static struct mca_driver ultra_driver = {
+ .id_table = smc_mca_adapter_ids,
+ .driver = {
+ .name = "smc-mca",
+ .bus = &mca_bus_type,
+ .probe = ultramca_probe,
+ .remove = ultramca_remove,
+ }
+};
+
+static int __init ultramca_init_module(void)
+{
+ if(!MCA_bus)
+ return -ENXIO;
+
+ mca_register_driver(&ultra_driver);
+
+ return ultra_found ? 0 : -ENXIO;
+}
+
+static void __exit ultramca_cleanup_module(void)
+{
+ mca_unregister_driver(&ultra_driver);
+}
+module_init(ultramca_init_module);
+module_exit(ultramca_cleanup_module);
+
diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c
new file mode 100644
index 000000000000..1cc306a83ff7
--- /dev/null
+++ b/drivers/net/ethernet/8390/smc-ultra.c
@@ -0,0 +1,622 @@
+/* smc-ultra.c: A SMC Ultra ethernet driver for linux. */
+/*
+ This is a driver for the SMC Ultra and SMC EtherEZ ISA ethercards.
+
+ Written 1993-1998 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ This driver uses the cards in the 8390-compatible mode.
+ Most of the run-time complexity is handled by the generic code in
+ 8390.c. The code in this file is responsible for
+
+ ultra_probe() Detecting and initializing the card.
+ ultra_probe1()
+ ultra_probe_isapnp()
+
+ ultra_open() The card-specific details of starting, stopping
+ ultra_reset_8390() and resetting the 8390 NIC core.
+ ultra_close()
+
+ ultra_block_input() Routines for reading and writing blocks of
+ ultra_block_output() packet buffer memory.
+ ultra_pio_input()
+ ultra_pio_output()
+
+ This driver enables the shared memory only when doing the actual data
+ transfers to avoid a bug in early version of the card that corrupted
+ data transferred by a AHA1542.
+
+ This driver now supports the programmed-I/O (PIO) data transfer mode of
+ the EtherEZ. It does not use the non-8390-compatible "Altego" mode.
+ That support (if available) is in smc-ez.c.
+
+ Changelog:
+
+ Paul Gortmaker : multiple card support for module users.
+ Donald Becker : 4/17/96 PIO support, minor potential problems avoided.
+ Donald Becker : 6/6/96 correctly set auto-wrap bit.
+ Alexander Sotirov : 1/20/01 Added support for ISAPnP cards
+
+ Note about the ISA PnP support:
+
+ This driver can not autoprobe for more than one SMC EtherEZ PnP card.
+ You have to configure the second card manually through the /proc/isapnp
+ interface and then load the module with an explicit io=0x___ option.
+*/
+
+static const char version[] =
+ "smc-ultra.c:v2.02 2/3/98 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/isapnp.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+
+#include "8390.h"
+
+#define DRV_NAME "smc-ultra"
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int ultra_portlist[] __initdata =
+{0x200, 0x220, 0x240, 0x280, 0x300, 0x340, 0x380, 0};
+
+static int ultra_probe1(struct net_device *dev, int ioaddr);
+
+#ifdef __ISAPNP__
+static int ultra_probe_isapnp(struct net_device *dev);
+#endif
+
+static int ultra_open(struct net_device *dev);
+static void ultra_reset_8390(struct net_device *dev);
+static void ultra_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ultra_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ultra_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page);
+static void ultra_pio_get_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ultra_pio_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ultra_pio_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page);
+static int ultra_close_card(struct net_device *dev);
+
+#ifdef __ISAPNP__
+static struct isapnp_device_id ultra_device_ids[] __initdata = {
+ { ISAPNP_VENDOR('S','M','C'), ISAPNP_FUNCTION(0x8416),
+ ISAPNP_VENDOR('S','M','C'), ISAPNP_FUNCTION(0x8416),
+ (long) "SMC EtherEZ (8416)" },
+ { } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(isapnp, ultra_device_ids);
+#endif
+
+
+#define START_PG 0x00 /* First page of TX buffer */
+
+#define ULTRA_CMDREG 0 /* Offset to ASIC command register. */
+#define ULTRA_RESET 0x80 /* Board reset, in ULTRA_CMDREG. */
+#define ULTRA_MEMENB 0x40 /* Enable the shared memory. */
+#define IOPD 0x02 /* I/O Pipe Data (16 bits), PIO operation. */
+#define IOPA 0x07 /* I/O Pipe Address for PIO operation. */
+#define ULTRA_NIC_OFFSET 16 /* NIC register offset from the base_addr. */
+#define ULTRA_IO_EXTENT 32
+#define EN0_ERWCNT 0x08 /* Early receive warning count. */
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void ultra_poll(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ ei_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+/* Probe for the Ultra. This looks like a 8013 with the station
+ address PROM at I/O ports <base>+8 to <base>+13, with a checksum
+ following.
+*/
+
+static int __init do_ultra_probe(struct net_device *dev)
+{
+ int i;
+ int base_addr = dev->base_addr;
+ int irq = dev->irq;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return ultra_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return -ENXIO;
+
+#ifdef __ISAPNP__
+ /* Look for any installed ISAPnP cards */
+ if (isapnp_present() && (ultra_probe_isapnp(dev) == 0))
+ return 0;
+#endif
+
+ for (i = 0; ultra_portlist[i]; i++) {
+ dev->irq = irq;
+ if (ultra_probe1(dev, ultra_portlist[i]) == 0)
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+#ifndef MODULE
+struct net_device * __init ultra_probe(int unit)
+{
+ struct net_device *dev = alloc_ei_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_ultra_probe(dev);
+ if (err)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static const struct net_device_ops ultra_netdev_ops = {
+ .ndo_open = ultra_open,
+ .ndo_stop = ultra_close_card,
+
+ .ndo_start_xmit = ei_start_xmit,
+ .ndo_tx_timeout = ei_tx_timeout,
+ .ndo_get_stats = ei_get_stats,
+ .ndo_set_rx_mode = ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ultra_poll,
+#endif
+};
+
+static int __init ultra_probe1(struct net_device *dev, int ioaddr)
+{
+ int i, retval;
+ int checksum = 0;
+ const char *model_name;
+ unsigned char eeprom_irq = 0;
+ static unsigned version_printed;
+ /* Values from various config regs. */
+ unsigned char num_pages, irqreg, addr, piomode;
+ unsigned char idreg = inb(ioaddr + 7);
+ unsigned char reg4 = inb(ioaddr + 4) & 0x7f;
+
+ if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ /* Check the ID nibble. */
+ if ((idreg & 0xF0) != 0x20 /* SMC Ultra */
+ && (idreg & 0xF0) != 0x40) { /* SMC EtherEZ */
+ retval = -ENODEV;
+ goto out;
+ }
+
+ /* Select the station address register set. */
+ outb(reg4, ioaddr + 4);
+
+ for (i = 0; i < 8; i++)
+ checksum += inb(ioaddr + 8 + i);
+ if ((checksum & 0xff) != 0xFF) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ";
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + 8 + i);
+
+ printk("%s: %s at %#3x, %pM", dev->name, model_name,
+ ioaddr, dev->dev_addr);
+
+ /* Switch from the station address to the alternate register set and
+ read the useful registers there. */
+ outb(0x80 | reg4, ioaddr + 4);
+
+ /* Enabled FINE16 mode to avoid BIOS ROM width mismatches @ reboot. */
+ outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c);
+ piomode = inb(ioaddr + 0x8);
+ addr = inb(ioaddr + 0xb);
+ irqreg = inb(ioaddr + 0xd);
+
+ /* Switch back to the station address register set so that the MS-DOS driver
+ can find the card after a warm boot. */
+ outb(reg4, ioaddr + 4);
+
+ if (dev->irq < 2) {
+ unsigned char irqmap[] = {0, 9, 3, 5, 7, 10, 11, 15};
+ int irq;
+
+ /* The IRQ bits are split. */
+ irq = irqmap[((irqreg & 0x40) >> 4) + ((irqreg & 0x0c) >> 2)];
+
+ if (irq == 0) {
+ printk(", failed to detect IRQ line.\n");
+ retval = -EAGAIN;
+ goto out;
+ }
+ dev->irq = irq;
+ eeprom_irq = 1;
+ }
+
+ /* The 8390 isn't at the base address, so fake the offset */
+ dev->base_addr = ioaddr+ULTRA_NIC_OFFSET;
+
+ {
+ static const int addr_tbl[4] = {
+ 0x0C0000, 0x0E0000, 0xFC0000, 0xFE0000
+ };
+ static const short num_pages_tbl[4] = {
+ 0x20, 0x40, 0x80, 0xff
+ };
+
+ dev->mem_start = ((addr & 0x0f) << 13) + addr_tbl[(addr >> 6) & 3] ;
+ num_pages = num_pages_tbl[(addr >> 4) & 3];
+ }
+
+ ei_status.name = model_name;
+ ei_status.word16 = 1;
+ ei_status.tx_start_page = START_PG;
+ ei_status.rx_start_page = START_PG + TX_PAGES;
+ ei_status.stop_page = num_pages;
+
+ ei_status.mem = ioremap(dev->mem_start, (ei_status.stop_page - START_PG)*256);
+ if (!ei_status.mem) {
+ printk(", failed to ioremap.\n");
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ dev->mem_end = dev->mem_start + (ei_status.stop_page - START_PG)*256;
+
+ if (piomode) {
+ printk(",%s IRQ %d programmed-I/O mode.\n",
+ eeprom_irq ? "EEPROM" : "assigned ", dev->irq);
+ ei_status.block_input = &ultra_pio_input;
+ ei_status.block_output = &ultra_pio_output;
+ ei_status.get_8390_hdr = &ultra_pio_get_hdr;
+ } else {
+ printk(",%s IRQ %d memory %#lx-%#lx.\n", eeprom_irq ? "" : "assigned ",
+ dev->irq, dev->mem_start, dev->mem_end-1);
+ ei_status.block_input = &ultra_block_input;
+ ei_status.block_output = &ultra_block_output;
+ ei_status.get_8390_hdr = &ultra_get_8390_hdr;
+ }
+ ei_status.reset_8390 = &ultra_reset_8390;
+
+ dev->netdev_ops = &ultra_netdev_ops;
+ NS8390_init(dev, 0);
+
+ retval = register_netdev(dev);
+ if (retval)
+ goto out;
+ return 0;
+out:
+ release_region(ioaddr, ULTRA_IO_EXTENT);
+ return retval;
+}
+
+#ifdef __ISAPNP__
+static int __init ultra_probe_isapnp(struct net_device *dev)
+{
+ int i;
+
+ for (i = 0; ultra_device_ids[i].vendor != 0; i++) {
+ struct pnp_dev *idev = NULL;
+
+ while ((idev = pnp_find_dev(NULL,
+ ultra_device_ids[i].vendor,
+ ultra_device_ids[i].function,
+ idev))) {
+ /* Avoid already found cards from previous calls */
+ if (pnp_device_attach(idev) < 0)
+ continue;
+ if (pnp_activate_dev(idev) < 0) {
+ __again:
+ pnp_device_detach(idev);
+ continue;
+ }
+ /* if no io and irq, search for next */
+ if (!pnp_port_valid(idev, 0) || !pnp_irq_valid(idev, 0))
+ goto __again;
+ /* found it */
+ dev->base_addr = pnp_port_start(idev, 0);
+ dev->irq = pnp_irq(idev, 0);
+ printk(KERN_INFO "smc-ultra.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
+ (char *) ultra_device_ids[i].driver_data,
+ dev->base_addr, dev->irq);
+ if (ultra_probe1(dev, dev->base_addr) != 0) { /* Shouldn't happen. */
+ printk(KERN_ERR "smc-ultra.c: Probe of ISAPnP card at %#lx failed.\n", dev->base_addr);
+ pnp_device_detach(idev);
+ return -ENXIO;
+ }
+ ei_status.priv = (unsigned long)idev;
+ break;
+ }
+ if (!idev)
+ continue;
+ return 0;
+ }
+
+ return -ENODEV;
+}
+#endif
+
+static int
+ultra_open(struct net_device *dev)
+{
+ int retval;
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+ unsigned char irq2reg[] = {0, 0, 0x04, 0x08, 0, 0x0C, 0, 0x40,
+ 0, 0x04, 0x44, 0x48, 0, 0, 0, 0x4C, };
+
+ retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev);
+ if (retval)
+ return retval;
+
+ outb(0x00, ioaddr); /* Disable shared memory for safety. */
+ outb(0x80, ioaddr + 5);
+ /* Set the IRQ line. */
+ outb(inb(ioaddr + 4) | 0x80, ioaddr + 4);
+ outb((inb(ioaddr + 13) & ~0x4C) | irq2reg[dev->irq], ioaddr + 13);
+ outb(inb(ioaddr + 4) & 0x7f, ioaddr + 4);
+
+ if (ei_status.block_input == &ultra_pio_input) {
+ outb(0x11, ioaddr + 6); /* Enable interrupts and PIO. */
+ outb(0x01, ioaddr + 0x19); /* Enable ring read auto-wrap. */
+ } else
+ outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */
+ /* Set the early receive warning level in window 0 high enough not
+ to receive ERW interrupts. */
+ outb_p(E8390_NODMA+E8390_PAGE0, dev->base_addr);
+ outb(0xff, dev->base_addr + EN0_ERWCNT);
+ ei_open(dev);
+ return 0;
+}
+
+static void
+ultra_reset_8390(struct net_device *dev)
+{
+ int cmd_port = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC base addr */
+
+ outb(ULTRA_RESET, cmd_port);
+ if (ei_debug > 1) printk("resetting Ultra, t=%ld...", jiffies);
+ ei_status.txing = 0;
+
+ outb(0x00, cmd_port); /* Disable shared memory for safety. */
+ outb(0x80, cmd_port + 5);
+ if (ei_status.block_input == &ultra_pio_input)
+ outb(0x11, cmd_port + 6); /* Enable interrupts and PIO. */
+ else
+ outb(0x01, cmd_port + 6); /* Enable interrupts and memory. */
+
+ if (ei_debug > 1) printk("reset done\n");
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void
+ultra_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ void __iomem *hdr_start = ei_status.mem + ((ring_page - START_PG)<<8);
+
+ outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET); /* shmem on */
+#ifdef __BIG_ENDIAN
+ /* Officially this is what we are doing, but the readl() is faster */
+ /* unfortunately it isn't endian aware of the struct */
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+ hdr->count = le16_to_cpu(hdr->count);
+#else
+ ((unsigned int*)hdr)[0] = readl(hdr_start);
+#endif
+ outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* shmem off */
+}
+
+/* Block input and output are easy on shared memory ethercards, the only
+ complication is when the ring buffer wraps. */
+
+static void
+ultra_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ void __iomem *xfer_start = ei_status.mem + ring_offset - (START_PG<<8);
+
+ /* Enable shared memory. */
+ outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET);
+
+ if (ring_offset + count > ei_status.stop_page*256) {
+ /* We must wrap the input move. */
+ int semi_count = ei_status.stop_page*256 - ring_offset;
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count);
+ } else {
+ memcpy_fromio(skb->data, xfer_start, count);
+ }
+
+ outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* Disable memory. */
+}
+
+static void
+ultra_block_output(struct net_device *dev, int count, const unsigned char *buf,
+ int start_page)
+{
+ void __iomem *shmem = ei_status.mem + ((start_page - START_PG)<<8);
+
+ /* Enable shared memory. */
+ outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET);
+
+ memcpy_toio(shmem, buf, count);
+
+ outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* Disable memory. */
+}
+
+/* The identical operations for programmed I/O cards.
+ The PIO model is trivial to use: the 16 bit start address is written
+ byte-sequentially to IOPA, with no intervening I/O operations, and the
+ data is read or written to the IOPD data port.
+ The only potential complication is that the address register is shared
+ and must be always be rewritten between each read/write direction change.
+ This is no problem for us, as the 8390 code ensures that we are single
+ threaded. */
+static void ultra_pio_get_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+ outb(0x00, ioaddr + IOPA); /* Set the address, LSB first. */
+ outb(ring_page, ioaddr + IOPA);
+ insw(ioaddr + IOPD, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+}
+
+static void ultra_pio_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+ char *buf = skb->data;
+
+ /* For now set the address again, although it should already be correct. */
+ outb(ring_offset, ioaddr + IOPA); /* Set the address, LSB first. */
+ outb(ring_offset >> 8, ioaddr + IOPA);
+ /* We know skbuffs are padded to at least word alignment. */
+ insw(ioaddr + IOPD, buf, (count+1)>>1);
+}
+
+static void ultra_pio_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+ outb(0x00, ioaddr + IOPA); /* Set the address, LSB first. */
+ outb(start_page, ioaddr + IOPA);
+ /* An extra odd byte is OK here as well. */
+ outsw(ioaddr + IOPD, buf, (count+1)>>1);
+}
+
+static int
+ultra_close_card(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* CMDREG */
+
+ netif_stop_queue(dev);
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ outb(0x00, ioaddr + 6); /* Disable interrupts. */
+ free_irq(dev->irq, dev);
+
+ NS8390_init(dev, 0);
+
+ /* We should someday disable shared memory and change to 8-bit mode
+ "just in case"... */
+
+ return 0;
+}
+
+
+#ifdef MODULE
+#define MAX_ULTRA_CARDS 4 /* Max number of Ultra cards per module */
+static struct net_device *dev_ultra[MAX_ULTRA_CARDS];
+static int io[MAX_ULTRA_CARDS];
+static int irq[MAX_ULTRA_CARDS];
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
+MODULE_DESCRIPTION("SMC Ultra/EtherEZ ISA/PnP Ethernet driver");
+MODULE_LICENSE("GPL");
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int __init
+init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_ULTRA_CARDS; this_dev++) {
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "smc-ultra.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ dev = alloc_ei_netdev();
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ if (do_ultra_probe(dev) == 0) {
+ dev_ultra[found++] = dev;
+ continue;
+ }
+ free_netdev(dev);
+ printk(KERN_WARNING "smc-ultra.c: No SMC Ultra card found (i/o = 0x%x).\n", io[this_dev]);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ /* NB: ultra_close_card() does free_irq */
+#ifdef __ISAPNP__
+ struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
+ if (idev)
+ pnp_device_detach(idev);
+#endif
+ release_region(dev->base_addr - ULTRA_NIC_OFFSET, ULTRA_IO_EXTENT);
+ iounmap(ei_status.mem);
+}
+
+void __exit
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_ULTRA_CARDS; this_dev++) {
+ struct net_device *dev = dev_ultra[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/smc-ultra32.c b/drivers/net/ethernet/8390/smc-ultra32.c
new file mode 100644
index 000000000000..bb87053eb3da
--- /dev/null
+++ b/drivers/net/ethernet/8390/smc-ultra32.c
@@ -0,0 +1,464 @@
+/* smc-ultra32.c: An SMC Ultra32 EISA ethernet driver for linux.
+
+Sources:
+
+ This driver is based on (cloned from) the ISA SMC Ultra driver
+ written by Donald Becker. Modifications to support the EISA
+ version of the card by Paul Gortmaker and Leonard N. Zubkoff.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+Theory of Operation:
+
+ The SMC Ultra32C card uses the SMC 83c790 chip which is also
+ found on the ISA SMC Ultra cards. It has a shared memory mode of
+ operation that makes it similar to the ISA version of the card.
+ The main difference is that the EISA card has 32KB of RAM, but
+ only an 8KB window into that memory. The EISA card also can be
+ set for a bus-mastering mode of operation via the ECU, but that
+ is not (and probably will never be) supported by this driver.
+ The ECU should be run to enable shared memory and to disable the
+ bus-mastering feature for use with linux.
+
+ By programming the 8390 to use only 8KB RAM, the modifications
+ to the ISA driver can be limited to the probe and initialization
+ code. This allows easy integration of EISA support into the ISA
+ driver. However, the driver development kit from SMC provided the
+ register information for sliding the 8KB window, and hence the 8390
+ is programmed to use the full 32KB RAM.
+
+ Unfortunately this required code changes outside the probe/init
+ routines, and thus we decided to separate the EISA driver from
+ the ISA one. In this way, ISA users don't end up with a larger
+ driver due to the EISA code, and EISA users don't end up with a
+ larger driver due to the ISA EtherEZ PIO code. The driver is
+ similar to the 3c503/16 driver, in that the window must be set
+ back to the 1st 8KB of space for access to the two 8390 Tx slots.
+
+ In testing, using only 8KB RAM (3 Tx / 5 Rx) didn't appear to
+ be a limiting factor, since the EISA bus could get packets off
+ the card fast enough, but having the use of lots of RAM as Rx
+ space is extra insurance if interrupt latencies become excessive.
+
+*/
+
+static const char *version = "smc-ultra32.c: 06/97 v1.00\n";
+
+
+#include <linux/module.h>
+#include <linux/eisa.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "8390.h"
+
+#define DRV_NAME "smc-ultra32"
+
+static int ultra32_probe1(struct net_device *dev, int ioaddr);
+static int ultra32_open(struct net_device *dev);
+static void ultra32_reset_8390(struct net_device *dev);
+static void ultra32_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ultra32_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ultra32_block_output(struct net_device *dev, int count,
+ const unsigned char *buf,
+ const int start_page);
+static int ultra32_close(struct net_device *dev);
+
+#define ULTRA32_CMDREG 0 /* Offset to ASIC command register. */
+#define ULTRA32_RESET 0x80 /* Board reset, in ULTRA32_CMDREG. */
+#define ULTRA32_MEMENB 0x40 /* Enable the shared memory. */
+#define ULTRA32_NIC_OFFSET 16 /* NIC register offset from the base_addr. */
+#define ULTRA32_IO_EXTENT 32
+#define EN0_ERWCNT 0x08 /* Early receive warning count. */
+
+/*
+ * Defines that apply only to the Ultra32 EISA card. Note that
+ * "smc" = 10011 01101 00011 = 0x4da3, and hence !smc8010.cfg translates
+ * into an EISA ID of 0x1080A34D
+ */
+#define ULTRA32_BASE 0xca0
+#define ULTRA32_ID 0x1080a34d
+#define ULTRA32_IDPORT (-0x20) /* 0xc80 */
+/* Config regs 1->7 from the EISA !SMC8010.CFG file. */
+#define ULTRA32_CFG1 0x04 /* 0xca4 */
+#define ULTRA32_CFG2 0x05 /* 0xca5 */
+#define ULTRA32_CFG3 (-0x18) /* 0xc88 */
+#define ULTRA32_CFG4 (-0x17) /* 0xc89 */
+#define ULTRA32_CFG5 (-0x16) /* 0xc8a */
+#define ULTRA32_CFG6 (-0x15) /* 0xc8b */
+#define ULTRA32_CFG7 0x0d /* 0xcad */
+
+static void cleanup_card(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET;
+ /* NB: ultra32_close_card() does free_irq */
+ release_region(ioaddr, ULTRA32_IO_EXTENT);
+ iounmap(ei_status.mem);
+}
+
+/* Probe for the Ultra32. This looks like a 8013 with the station
+ address PROM at I/O ports <base>+8 to <base>+13, with a checksum
+ following.
+*/
+
+struct net_device * __init ultra32_probe(int unit)
+{
+ struct net_device *dev;
+ int base;
+ int irq;
+ int err = -ENODEV;
+
+ if (!EISA_bus)
+ return ERR_PTR(-ENODEV);
+
+ dev = alloc_ei_netdev();
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ irq = dev->irq;
+
+ /* EISA spec allows for up to 16 slots, but 8 is typical. */
+ for (base = 0x1000 + ULTRA32_BASE; base < 0x9000; base += 0x1000) {
+ if (ultra32_probe1(dev, base) == 0)
+ break;
+ dev->irq = irq;
+ }
+ if (base >= 0x9000)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+
+static const struct net_device_ops ultra32_netdev_ops = {
+ .ndo_open = ultra32_open,
+ .ndo_stop = ultra32_close,
+ .ndo_start_xmit = ei_start_xmit,
+ .ndo_tx_timeout = ei_tx_timeout,
+ .ndo_get_stats = ei_get_stats,
+ .ndo_set_rx_mode = ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ei_poll,
+#endif
+};
+
+static int __init ultra32_probe1(struct net_device *dev, int ioaddr)
+{
+ int i, edge, media, retval;
+ int checksum = 0;
+ const char *model_name;
+ static unsigned version_printed;
+ /* Values from various config regs. */
+ unsigned char idreg;
+ unsigned char reg4;
+ const char *ifmap[] = {"UTP No Link", "", "UTP/AUI", "UTP/BNC"};
+
+ if (!request_region(ioaddr, ULTRA32_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ if (inb(ioaddr + ULTRA32_IDPORT) == 0xff ||
+ inl(ioaddr + ULTRA32_IDPORT) != ULTRA32_ID) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ media = inb(ioaddr + ULTRA32_CFG7) & 0x03;
+ edge = inb(ioaddr + ULTRA32_CFG5) & 0x08;
+ printk("SMC Ultra32 in EISA Slot %d, Media: %s, %s IRQs.\n",
+ ioaddr >> 12, ifmap[media],
+ (edge ? "Edge Triggered" : "Level Sensitive"));
+
+ idreg = inb(ioaddr + 7);
+ reg4 = inb(ioaddr + 4) & 0x7f;
+
+ /* Check the ID nibble. */
+ if ((idreg & 0xf0) != 0x20) { /* SMC Ultra */
+ retval = -ENODEV;
+ goto out;
+ }
+
+ /* Select the station address register set. */
+ outb(reg4, ioaddr + 4);
+
+ for (i = 0; i < 8; i++)
+ checksum += inb(ioaddr + 8 + i);
+ if ((checksum & 0xff) != 0xff) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ model_name = "SMC Ultra32";
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + 8 + i);
+
+ printk("%s: %s at 0x%X, %pM",
+ dev->name, model_name, ioaddr, dev->dev_addr);
+
+ /* Switch from the station address to the alternate register set and
+ read the useful registers there. */
+ outb(0x80 | reg4, ioaddr + 4);
+
+ /* Enable FINE16 mode to avoid BIOS ROM width mismatches @ reboot. */
+ outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c);
+
+ /* Reset RAM addr. */
+ outb(0x00, ioaddr + 0x0b);
+
+ /* Switch back to the station address register set so that the
+ MS-DOS driver can find the card after a warm boot. */
+ outb(reg4, ioaddr + 4);
+
+ if ((inb(ioaddr + ULTRA32_CFG5) & 0x40) == 0) {
+ printk("\nsmc-ultra32: Card RAM is disabled! "
+ "Run EISA config utility.\n");
+ retval = -ENODEV;
+ goto out;
+ }
+ if ((inb(ioaddr + ULTRA32_CFG2) & 0x04) == 0)
+ printk("\nsmc-ultra32: Ignoring Bus-Master enable bit. "
+ "Run EISA config utility.\n");
+
+ if (dev->irq < 2) {
+ unsigned char irqmap[] = {0, 9, 3, 5, 7, 10, 11, 15};
+ int irq = irqmap[inb(ioaddr + ULTRA32_CFG5) & 0x07];
+ if (irq == 0) {
+ printk(", failed to detect IRQ line.\n");
+ retval = -EAGAIN;
+ goto out;
+ }
+ dev->irq = irq;
+ }
+
+ /* The 8390 isn't at the base address, so fake the offset */
+ dev->base_addr = ioaddr + ULTRA32_NIC_OFFSET;
+
+ /* Save RAM address in the unused reg0 to avoid excess inb's. */
+ ei_status.reg0 = inb(ioaddr + ULTRA32_CFG3) & 0xfc;
+
+ dev->mem_start = 0xc0000 + ((ei_status.reg0 & 0x7c) << 11);
+
+ ei_status.name = model_name;
+ ei_status.word16 = 1;
+ ei_status.tx_start_page = 0;
+ ei_status.rx_start_page = TX_PAGES;
+ /* All Ultra32 cards have 32KB memory with an 8KB window. */
+ ei_status.stop_page = 128;
+
+ ei_status.mem = ioremap(dev->mem_start, 0x2000);
+ if (!ei_status.mem) {
+ printk(", failed to ioremap.\n");
+ retval = -ENOMEM;
+ goto out;
+ }
+ dev->mem_end = dev->mem_start + 0x1fff;
+
+ printk(", IRQ %d, 32KB memory, 8KB window at 0x%lx-0x%lx.\n",
+ dev->irq, dev->mem_start, dev->mem_end);
+ ei_status.block_input = &ultra32_block_input;
+ ei_status.block_output = &ultra32_block_output;
+ ei_status.get_8390_hdr = &ultra32_get_8390_hdr;
+ ei_status.reset_8390 = &ultra32_reset_8390;
+
+ dev->netdev_ops = &ultra32_netdev_ops;
+ NS8390_init(dev, 0);
+
+ return 0;
+out:
+ release_region(ioaddr, ULTRA32_IO_EXTENT);
+ return retval;
+}
+
+static int ultra32_open(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC addr */
+ int irq_flags = (inb(ioaddr + ULTRA32_CFG5) & 0x08) ? 0 : IRQF_SHARED;
+ int retval;
+
+ retval = request_irq(dev->irq, ei_interrupt, irq_flags, dev->name, dev);
+ if (retval)
+ return retval;
+
+ outb(ULTRA32_MEMENB, ioaddr); /* Enable Shared Memory. */
+ outb(0x80, ioaddr + ULTRA32_CFG6); /* Enable Interrupts. */
+ outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */
+ outb(0x01, ioaddr + 6); /* Enable Interrupts. */
+ /* Set the early receive warning level in window 0 high enough not
+ to receive ERW interrupts. */
+ outb_p(E8390_NODMA+E8390_PAGE0, dev->base_addr);
+ outb(0xff, dev->base_addr + EN0_ERWCNT);
+ ei_open(dev);
+ return 0;
+}
+
+static int ultra32_close(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* CMDREG */
+
+ netif_stop_queue(dev);
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ outb(0x00, ioaddr + ULTRA32_CFG6); /* Disable Interrupts. */
+ outb(0x00, ioaddr + 6); /* Disable interrupts. */
+ free_irq(dev->irq, dev);
+
+ NS8390_init(dev, 0);
+
+ return 0;
+}
+
+static void ultra32_reset_8390(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC base addr */
+
+ outb(ULTRA32_RESET, ioaddr);
+ if (ei_debug > 1) printk("resetting Ultra32, t=%ld...", jiffies);
+ ei_status.txing = 0;
+
+ outb(ULTRA32_MEMENB, ioaddr); /* Enable Shared Memory. */
+ outb(0x80, ioaddr + ULTRA32_CFG6); /* Enable Interrupts. */
+ outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */
+ outb(0x01, ioaddr + 6); /* Enable Interrupts. */
+ if (ei_debug > 1) printk("reset done\n");
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void ultra32_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ void __iomem *hdr_start = ei_status.mem + ((ring_page & 0x1f) << 8);
+ unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3;
+
+ /* Select correct 8KB Window. */
+ outb(ei_status.reg0 | ((ring_page & 0x60) >> 5), RamReg);
+
+#ifdef __BIG_ENDIAN
+ /* Officially this is what we are doing, but the readl() is faster */
+ /* unfortunately it isn't endian aware of the struct */
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+ hdr->count = le16_to_cpu(hdr->count);
+#else
+ ((unsigned int*)hdr)[0] = readl(hdr_start);
+#endif
+}
+
+/* Block input and output are easy on shared memory ethercards, the only
+ complication is when the ring buffer wraps, or in this case, when a
+ packet spans an 8KB boundary. Note that the current 8KB segment is
+ already set by the get_8390_hdr routine. */
+
+static void ultra32_block_input(struct net_device *dev,
+ int count,
+ struct sk_buff *skb,
+ int ring_offset)
+{
+ void __iomem *xfer_start = ei_status.mem + (ring_offset & 0x1fff);
+ unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3;
+
+ if ((ring_offset & ~0x1fff) != ((ring_offset + count - 1) & ~0x1fff)) {
+ int semi_count = 8192 - (ring_offset & 0x1FFF);
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ if (ring_offset < 96*256) {
+ /* Select next 8KB Window. */
+ ring_offset += semi_count;
+ outb(ei_status.reg0 | ((ring_offset & 0x6000) >> 13), RamReg);
+ memcpy_fromio(skb->data + semi_count, ei_status.mem, count);
+ } else {
+ /* Select first 8KB Window. */
+ outb(ei_status.reg0, RamReg);
+ memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count);
+ }
+ } else {
+ memcpy_fromio(skb->data, xfer_start, count);
+ }
+}
+
+static void ultra32_block_output(struct net_device *dev,
+ int count,
+ const unsigned char *buf,
+ int start_page)
+{
+ void __iomem *xfer_start = ei_status.mem + (start_page<<8);
+ unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3;
+
+ /* Select first 8KB Window. */
+ outb(ei_status.reg0, RamReg);
+
+ memcpy_toio(xfer_start, buf, count);
+}
+
+#ifdef MODULE
+#define MAX_ULTRA32_CARDS 4 /* Max number of Ultra cards per module */
+static struct net_device *dev_ultra[MAX_ULTRA32_CARDS];
+
+MODULE_DESCRIPTION("SMC Ultra32 EISA ethernet driver");
+MODULE_LICENSE("GPL");
+
+int __init init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_ULTRA32_CARDS; this_dev++) {
+ struct net_device *dev = ultra32_probe(-1);
+ if (IS_ERR(dev))
+ break;
+ dev_ultra[found++] = dev;
+ }
+ if (found)
+ return 0;
+ printk(KERN_WARNING "smc-ultra32.c: No SMC Ultra32 found.\n");
+ return -ENXIO;
+}
+
+void __exit cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_ULTRA32_CARDS; this_dev++) {
+ struct net_device *dev = dev_ultra[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
new file mode 100644
index 000000000000..d85f0a84bc7b
--- /dev/null
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -0,0 +1,294 @@
+/* stnic.c : A SH7750 specific part of driver for NS DP83902A ST-NIC.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999 kaz Kojima
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <mach-se/mach/se.h>
+#include <asm/machvec.h>
+#ifdef CONFIG_SH_STANDARD_BIOS
+#include <asm/sh_bios.h>
+#endif
+
+#include "8390.h"
+
+#define DRV_NAME "stnic"
+
+#define byte unsigned char
+#define half unsigned short
+#define word unsigned int
+#define vbyte volatile unsigned char
+#define vhalf volatile unsigned short
+#define vword volatile unsigned int
+
+#define STNIC_RUN 0x01 /* 1 == Run, 0 == reset. */
+
+#define START_PG 0 /* First page of TX buffer */
+#define STOP_PG 128 /* Last page +1 of RX ring */
+
+/* Alias */
+#define STNIC_CR E8390_CMD
+#define PG0_RSAR0 EN0_RSARLO
+#define PG0_RSAR1 EN0_RSARHI
+#define PG0_RBCR0 EN0_RCNTLO
+#define PG0_RBCR1 EN0_RCNTHI
+
+#define CR_RRD E8390_RREAD
+#define CR_RWR E8390_RWRITE
+#define CR_PG0 E8390_PAGE0
+#define CR_STA E8390_START
+#define CR_RDMA E8390_NODMA
+
+/* FIXME! YOU MUST SET YOUR OWN ETHER ADDRESS. */
+static byte stnic_eadr[6] =
+{0x00, 0xc0, 0x6e, 0x00, 0x00, 0x07};
+
+static struct net_device *stnic_dev;
+
+static void stnic_reset (struct net_device *dev);
+static void stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void stnic_block_input (struct net_device *dev, int count,
+ struct sk_buff *skb , int ring_offset);
+static void stnic_block_output (struct net_device *dev, int count,
+ const unsigned char *buf, int start_page);
+
+static void stnic_init (struct net_device *dev);
+
+/* SH7750 specific read/write io. */
+static inline void
+STNIC_DELAY (void)
+{
+ vword trash;
+ trash = *(vword *) 0xa0000000;
+ trash = *(vword *) 0xa0000000;
+ trash = *(vword *) 0xa0000000;
+}
+
+static inline byte
+STNIC_READ (int reg)
+{
+ byte val;
+
+ val = (*(vhalf *) (PA_83902 + ((reg) << 1)) >> 8) & 0xff;
+ STNIC_DELAY ();
+ return val;
+}
+
+static inline void
+STNIC_WRITE (int reg, byte val)
+{
+ *(vhalf *) (PA_83902 + ((reg) << 1)) = ((half) (val) << 8);
+ STNIC_DELAY ();
+}
+
+static int __init stnic_probe(void)
+{
+ struct net_device *dev;
+ int i, err;
+
+ /* If we are not running on a SolutionEngine, give up now */
+ if (! MACH_SE)
+ return -ENODEV;
+
+ /* New style probing API */
+ dev = alloc_ei_netdev();
+ if (!dev)
+ return -ENOMEM;
+
+#ifdef CONFIG_SH_STANDARD_BIOS
+ sh_bios_get_node_addr (stnic_eadr);
+#endif
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ dev->dev_addr[i] = stnic_eadr[i];
+
+ /* Set the base address to point to the NIC, not the "real" base! */
+ dev->base_addr = 0x1000;
+ dev->irq = IRQ_STNIC;
+ dev->netdev_ops = &ei_netdev_ops;
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+ err = request_irq (dev->irq, ei_interrupt, 0, DRV_NAME, dev);
+ if (err) {
+ printk (KERN_EMERG " unable to get IRQ %d.\n", dev->irq);
+ free_netdev(dev);
+ return err;
+ }
+
+ ei_status.name = dev->name;
+ ei_status.word16 = 1;
+#ifdef __LITTLE_ENDIAN__
+ ei_status.bigendian = 0;
+#else
+ ei_status.bigendian = 1;
+#endif
+ ei_status.tx_start_page = START_PG;
+ ei_status.rx_start_page = START_PG + TX_PAGES;
+ ei_status.stop_page = STOP_PG;
+
+ ei_status.reset_8390 = &stnic_reset;
+ ei_status.get_8390_hdr = &stnic_get_hdr;
+ ei_status.block_input = &stnic_block_input;
+ ei_status.block_output = &stnic_block_output;
+
+ stnic_init (dev);
+
+ err = register_netdev(dev);
+ if (err) {
+ free_irq(dev->irq, dev);
+ free_netdev(dev);
+ return err;
+ }
+ stnic_dev = dev;
+
+ printk (KERN_INFO "NS ST-NIC 83902A\n");
+
+ return 0;
+}
+
+static void
+stnic_reset (struct net_device *dev)
+{
+ *(vhalf *) PA_83902_RST = 0;
+ udelay (5);
+ if (ei_debug > 1)
+ printk (KERN_WARNING "8390 reset done (%ld).\n", jiffies);
+ *(vhalf *) PA_83902_RST = ~0;
+ udelay (5);
+}
+
+static void
+stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ half buf[2];
+
+ STNIC_WRITE (PG0_RSAR0, 0);
+ STNIC_WRITE (PG0_RSAR1, ring_page);
+ STNIC_WRITE (PG0_RBCR0, 4);
+ STNIC_WRITE (PG0_RBCR1, 0);
+ STNIC_WRITE (STNIC_CR, CR_RRD | CR_PG0 | CR_STA);
+
+ buf[0] = *(vhalf *) PA_83902_IF;
+ STNIC_DELAY ();
+ buf[1] = *(vhalf *) PA_83902_IF;
+ STNIC_DELAY ();
+ hdr->next = buf[0] >> 8;
+ hdr->status = buf[0] & 0xff;
+#ifdef __LITTLE_ENDIAN__
+ hdr->count = buf[1];
+#else
+ hdr->count = ((buf[1] >> 8) & 0xff) | (buf[1] << 8);
+#endif
+
+ if (ei_debug > 1)
+ printk (KERN_DEBUG "ring %x status %02x next %02x count %04x.\n",
+ ring_page, hdr->status, hdr->next, hdr->count);
+
+ STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA);
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you are
+ porting to a new ethercard look at the packet driver source for hints.
+ The HP LAN doesn't use shared memory -- we put the packet
+ out through the "remote DMA" dataport. */
+
+static void
+stnic_block_input (struct net_device *dev, int length, struct sk_buff *skb,
+ int offset)
+{
+ char *buf = skb->data;
+ half val;
+
+ STNIC_WRITE (PG0_RSAR0, offset & 0xff);
+ STNIC_WRITE (PG0_RSAR1, offset >> 8);
+ STNIC_WRITE (PG0_RBCR0, length & 0xff);
+ STNIC_WRITE (PG0_RBCR1, length >> 8);
+ STNIC_WRITE (STNIC_CR, CR_RRD | CR_PG0 | CR_STA);
+
+ if (length & 1)
+ length++;
+
+ while (length > 0)
+ {
+ val = *(vhalf *) PA_83902_IF;
+#ifdef __LITTLE_ENDIAN__
+ *buf++ = val & 0xff;
+ *buf++ = val >> 8;
+#else
+ *buf++ = val >> 8;
+ *buf++ = val & 0xff;
+#endif
+ STNIC_DELAY ();
+ length -= sizeof (half);
+ }
+
+ STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA);
+}
+
+static void
+stnic_block_output (struct net_device *dev, int length,
+ const unsigned char *buf, int output_page)
+{
+ STNIC_WRITE (PG0_RBCR0, 1); /* Write non-zero value */
+ STNIC_WRITE (STNIC_CR, CR_RRD | CR_PG0 | CR_STA);
+ STNIC_DELAY ();
+
+ STNIC_WRITE (PG0_RBCR0, length & 0xff);
+ STNIC_WRITE (PG0_RBCR1, length >> 8);
+ STNIC_WRITE (PG0_RSAR0, 0);
+ STNIC_WRITE (PG0_RSAR1, output_page);
+ STNIC_WRITE (STNIC_CR, CR_RWR | CR_PG0 | CR_STA);
+
+ if (length & 1)
+ length++;
+
+ while (length > 0)
+ {
+#ifdef __LITTLE_ENDIAN__
+ *(vhalf *) PA_83902_IF = ((half) buf[1] << 8) | buf[0];
+#else
+ *(vhalf *) PA_83902_IF = ((half) buf[0] << 8) | buf[1];
+#endif
+ STNIC_DELAY ();
+ buf += sizeof (half);
+ length -= sizeof (half);
+ }
+
+ STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA);
+}
+
+/* This function resets the STNIC if something screws up. */
+static void
+stnic_init (struct net_device *dev)
+{
+ stnic_reset (dev);
+ NS8390_init (dev, 0);
+}
+
+static void __exit stnic_cleanup(void)
+{
+ unregister_netdev(stnic_dev);
+ free_irq(stnic_dev->irq, stnic_dev);
+ free_netdev(stnic_dev);
+}
+
+module_init(stnic_probe);
+module_exit(stnic_cleanup);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c
new file mode 100644
index 000000000000..c175fadb597b
--- /dev/null
+++ b/drivers/net/ethernet/8390/wd.c
@@ -0,0 +1,567 @@
+/* wd.c: A WD80x3 ethernet driver for linux. */
+/*
+ Written 1993-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ This is a driver for WD8003 and WD8013 "compatible" ethercards.
+
+ Thanks to Russ Nelson (nelson@crnwyr.com) for loaning me a WD8013.
+
+ Changelog:
+
+ Paul Gortmaker : multiple card support for module users, support
+ for non-standard memory sizes.
+
+
+*/
+
+static const char version[] =
+ "wd.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "8390.h"
+
+#define DRV_NAME "wd"
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int wd_portlist[] __initdata =
+{0x300, 0x280, 0x380, 0x240, 0};
+
+static int wd_probe1(struct net_device *dev, int ioaddr);
+
+static int wd_open(struct net_device *dev);
+static void wd_reset_8390(struct net_device *dev);
+static void wd_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void wd_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void wd_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page);
+static int wd_close(struct net_device *dev);
+
+
+#define WD_START_PG 0x00 /* First page of TX buffer */
+#define WD03_STOP_PG 0x20 /* Last page +1 of RX ring */
+#define WD13_STOP_PG 0x40 /* Last page +1 of RX ring */
+
+#define WD_CMDREG 0 /* Offset to ASIC command register. */
+#define WD_RESET 0x80 /* Board reset, in WD_CMDREG. */
+#define WD_MEMENB 0x40 /* Enable the shared memory. */
+#define WD_CMDREG5 5 /* Offset to 16-bit-only ASIC register 5. */
+#define ISA16 0x80 /* Enable 16 bit access from the ISA bus. */
+#define NIC16 0x40 /* Enable 16 bit access from the 8390. */
+#define WD_NIC_OFFSET 16 /* Offset to the 8390 from the base_addr. */
+#define WD_IO_EXTENT 32
+
+
+/* Probe for the WD8003 and WD8013. These cards have the station
+ address PROM at I/O ports <base>+8 to <base>+13, with a checksum
+ following. A Soundblaster can have the same checksum as an WDethercard,
+ so we have an extra exclusionary check for it.
+
+ The wd_probe1() routine initializes the card and fills the
+ station address field. */
+
+static int __init do_wd_probe(struct net_device *dev)
+{
+ int i;
+ struct resource *r;
+ int base_addr = dev->base_addr;
+ int irq = dev->irq;
+ int mem_start = dev->mem_start;
+ int mem_end = dev->mem_end;
+
+ if (base_addr > 0x1ff) { /* Check a user specified location. */
+ r = request_region(base_addr, WD_IO_EXTENT, "wd-probe");
+ if ( r == NULL)
+ return -EBUSY;
+ i = wd_probe1(dev, base_addr);
+ if (i != 0)
+ release_region(base_addr, WD_IO_EXTENT);
+ else
+ r->name = dev->name;
+ return i;
+ }
+ else if (base_addr != 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ for (i = 0; wd_portlist[i]; i++) {
+ int ioaddr = wd_portlist[i];
+ r = request_region(ioaddr, WD_IO_EXTENT, "wd-probe");
+ if (r == NULL)
+ continue;
+ if (wd_probe1(dev, ioaddr) == 0) {
+ r->name = dev->name;
+ return 0;
+ }
+ release_region(ioaddr, WD_IO_EXTENT);
+ dev->irq = irq;
+ dev->mem_start = mem_start;
+ dev->mem_end = mem_end;
+ }
+
+ return -ENODEV;
+}
+
+#ifndef MODULE
+struct net_device * __init wd_probe(int unit)
+{
+ struct net_device *dev = alloc_ei_netdev();
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_wd_probe(dev);
+ if (err)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static const struct net_device_ops wd_netdev_ops = {
+ .ndo_open = wd_open,
+ .ndo_stop = wd_close,
+ .ndo_start_xmit = ei_start_xmit,
+ .ndo_tx_timeout = ei_tx_timeout,
+ .ndo_get_stats = ei_get_stats,
+ .ndo_set_rx_mode = ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ei_poll,
+#endif
+};
+
+static int __init wd_probe1(struct net_device *dev, int ioaddr)
+{
+ int i;
+ int err;
+ int checksum = 0;
+ int ancient = 0; /* An old card without config registers. */
+ int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */
+ const char *model_name;
+ static unsigned version_printed;
+
+ for (i = 0; i < 8; i++)
+ checksum += inb(ioaddr + 8 + i);
+ if (inb(ioaddr + 8) == 0xff /* Extra check to avoid soundcard. */
+ || inb(ioaddr + 9) == 0xff
+ || (checksum & 0xff) != 0xFF)
+ return -ENODEV;
+
+ /* Check for semi-valid mem_start/end values if supplied. */
+ if ((dev->mem_start % 0x2000) || (dev->mem_end % 0x2000)) {
+ printk(KERN_WARNING "wd.c: user supplied mem_start or mem_end not on 8kB boundary - ignored.\n");
+ dev->mem_start = 0;
+ dev->mem_end = 0;
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk(version);
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + 8 + i);
+
+ printk("%s: WD80x3 at %#3x, %pM",
+ dev->name, ioaddr, dev->dev_addr);
+
+ /* The following PureData probe code was contributed by
+ Mike Jagdis <jaggy@purplet.demon.co.uk>. Puredata does software
+ configuration differently from others so we have to check for them.
+ This detects an 8 bit, 16 bit or dumb (Toshiba, jumpered) card.
+ */
+ if (inb(ioaddr+0) == 'P' && inb(ioaddr+1) == 'D') {
+ unsigned char reg5 = inb(ioaddr+5);
+
+ switch (inb(ioaddr+2)) {
+ case 0x03: word16 = 0; model_name = "PDI8023-8"; break;
+ case 0x05: word16 = 0; model_name = "PDUC8023"; break;
+ case 0x0a: word16 = 1; model_name = "PDI8023-16"; break;
+ /* Either 0x01 (dumb) or they've released a new version. */
+ default: word16 = 0; model_name = "PDI8023"; break;
+ }
+ dev->mem_start = ((reg5 & 0x1c) + 0xc0) << 12;
+ dev->irq = (reg5 & 0xe0) == 0xe0 ? 10 : (reg5 >> 5) + 1;
+ } else { /* End of PureData probe */
+ /* This method of checking for a 16-bit board is borrowed from the
+ we.c driver. A simpler method is just to look in ASIC reg. 0x03.
+ I'm comparing the two method in alpha test to make certain they
+ return the same result. */
+ /* Check for the old 8 bit board - it has register 0/8 aliasing.
+ Do NOT check i>=6 here -- it hangs the old 8003 boards! */
+ for (i = 0; i < 6; i++)
+ if (inb(ioaddr+i) != inb(ioaddr+8+i))
+ break;
+ if (i >= 6) {
+ ancient = 1;
+ model_name = "WD8003-old";
+ word16 = 0;
+ } else {
+ int tmp = inb(ioaddr+1); /* fiddle with 16bit bit */
+ outb( tmp ^ 0x01, ioaddr+1 ); /* attempt to clear 16bit bit */
+ if (((inb( ioaddr+1) & 0x01) == 0x01) /* A 16 bit card */
+ && (tmp & 0x01) == 0x01 ) { /* In a 16 slot. */
+ int asic_reg5 = inb(ioaddr+WD_CMDREG5);
+ /* Magic to set ASIC to word-wide mode. */
+ outb( NIC16 | (asic_reg5&0x1f), ioaddr+WD_CMDREG5);
+ outb(tmp, ioaddr+1);
+ model_name = "WD8013";
+ word16 = 1; /* We have a 16bit board here! */
+ } else {
+ model_name = "WD8003";
+ word16 = 0;
+ }
+ outb(tmp, ioaddr+1); /* Restore original reg1 value. */
+ }
+#ifndef final_version
+ if ( !ancient && (inb(ioaddr+1) & 0x01) != (word16 & 0x01))
+ printk("\nWD80?3: Bus width conflict, %d (probe) != %d (reg report).",
+ word16 ? 16 : 8, (inb(ioaddr+1) & 0x01) ? 16 : 8);
+#endif
+ }
+
+#if defined(WD_SHMEM) && WD_SHMEM > 0x80000
+ /* Allow a compile-time override. */
+ dev->mem_start = WD_SHMEM;
+#else
+ if (dev->mem_start == 0) {
+ /* Sanity and old 8003 check */
+ int reg0 = inb(ioaddr);
+ if (reg0 == 0xff || reg0 == 0) {
+ /* Future plan: this could check a few likely locations first. */
+ dev->mem_start = 0xd0000;
+ printk(" assigning address %#lx", dev->mem_start);
+ } else {
+ int high_addr_bits = inb(ioaddr+WD_CMDREG5) & 0x1f;
+ /* Some boards don't have the register 5 -- it returns 0xff. */
+ if (high_addr_bits == 0x1f || word16 == 0)
+ high_addr_bits = 0x01;
+ dev->mem_start = ((reg0&0x3f) << 13) + (high_addr_bits << 19);
+ }
+ }
+#endif
+
+ /* The 8390 isn't at the base address -- the ASIC regs are there! */
+ dev->base_addr = ioaddr+WD_NIC_OFFSET;
+
+ if (dev->irq < 2) {
+ static const int irqmap[] = {9, 3, 5, 7, 10, 11, 15, 4};
+ int reg1 = inb(ioaddr+1);
+ int reg4 = inb(ioaddr+4);
+ if (ancient || reg1 == 0xff) { /* Ack!! No way to read the IRQ! */
+ short nic_addr = ioaddr+WD_NIC_OFFSET;
+ unsigned long irq_mask;
+
+ /* We have an old-style ethercard that doesn't report its IRQ
+ line. Do autoirq to find the IRQ line. Note that this IS NOT
+ a reliable way to trigger an interrupt. */
+ outb_p(E8390_NODMA + E8390_STOP, nic_addr);
+ outb(0x00, nic_addr+EN0_IMR); /* Disable all intrs. */
+
+ irq_mask = probe_irq_on();
+ outb_p(0xff, nic_addr + EN0_IMR); /* Enable all interrupts. */
+ outb_p(0x00, nic_addr + EN0_RCNTLO);
+ outb_p(0x00, nic_addr + EN0_RCNTHI);
+ outb(E8390_RREAD+E8390_START, nic_addr); /* Trigger it... */
+ mdelay(20);
+ dev->irq = probe_irq_off(irq_mask);
+
+ outb_p(0x00, nic_addr+EN0_IMR); /* Mask all intrs. again. */
+
+ if (ei_debug > 2)
+ printk(" autoirq is %d", dev->irq);
+ if (dev->irq < 2)
+ dev->irq = word16 ? 10 : 5;
+ } else
+ dev->irq = irqmap[((reg4 >> 5) & 0x03) + (reg1 & 0x04)];
+ } else if (dev->irq == 2) /* Fixup bogosity: IRQ2 is really IRQ9 */
+ dev->irq = 9;
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+ i = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev);
+ if (i) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ return i;
+ }
+
+ /* OK, were are certain this is going to work. Setup the device. */
+ ei_status.name = model_name;
+ ei_status.word16 = word16;
+ ei_status.tx_start_page = WD_START_PG;
+ ei_status.rx_start_page = WD_START_PG + TX_PAGES;
+
+ /* Don't map in the shared memory until the board is actually opened. */
+
+ /* Some cards (eg WD8003EBT) can be jumpered for more (32k!) memory. */
+ if (dev->mem_end != 0) {
+ ei_status.stop_page = (dev->mem_end - dev->mem_start)/256;
+ ei_status.priv = dev->mem_end - dev->mem_start;
+ } else {
+ ei_status.stop_page = word16 ? WD13_STOP_PG : WD03_STOP_PG;
+ dev->mem_end = dev->mem_start + (ei_status.stop_page - WD_START_PG)*256;
+ ei_status.priv = (ei_status.stop_page - WD_START_PG)*256;
+ }
+
+ ei_status.mem = ioremap(dev->mem_start, ei_status.priv);
+ if (!ei_status.mem) {
+ free_irq(dev->irq, dev);
+ return -ENOMEM;
+ }
+
+ printk(" %s, IRQ %d, shared memory at %#lx-%#lx.\n",
+ model_name, dev->irq, dev->mem_start, dev->mem_end-1);
+
+ ei_status.reset_8390 = wd_reset_8390;
+ ei_status.block_input = wd_block_input;
+ ei_status.block_output = wd_block_output;
+ ei_status.get_8390_hdr = wd_get_8390_hdr;
+
+ dev->netdev_ops = &wd_netdev_ops;
+ NS8390_init(dev, 0);
+
+#if 1
+ /* Enable interrupt generation on softconfig cards -- M.U */
+ /* .. but possibly potentially unsafe - Donald */
+ if (inb(ioaddr+14) & 0x20)
+ outb(inb(ioaddr+4)|0x80, ioaddr+4);
+#endif
+
+ err = register_netdev(dev);
+ if (err) {
+ free_irq(dev->irq, dev);
+ iounmap(ei_status.mem);
+ }
+ return err;
+}
+
+static int
+wd_open(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+
+ /* Map in the shared memory. Always set register 0 last to remain
+ compatible with very old boards. */
+ ei_status.reg0 = ((dev->mem_start>>13) & 0x3f) | WD_MEMENB;
+ ei_status.reg5 = ((dev->mem_start>>19) & 0x1f) | NIC16;
+
+ if (ei_status.word16)
+ outb(ei_status.reg5, ioaddr+WD_CMDREG5);
+ outb(ei_status.reg0, ioaddr); /* WD_CMDREG */
+
+ return ei_open(dev);
+}
+
+static void
+wd_reset_8390(struct net_device *dev)
+{
+ int wd_cmd_port = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+
+ outb(WD_RESET, wd_cmd_port);
+ if (ei_debug > 1) printk("resetting the WD80x3 t=%lu...", jiffies);
+ ei_status.txing = 0;
+
+ /* Set up the ASIC registers, just in case something changed them. */
+ outb((((dev->mem_start>>13) & 0x3f)|WD_MEMENB), wd_cmd_port);
+ if (ei_status.word16)
+ outb(NIC16 | ((dev->mem_start>>19) & 0x1f), wd_cmd_port+WD_CMDREG5);
+
+ if (ei_debug > 1) printk("reset done\n");
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void
+wd_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+
+ int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ void __iomem *hdr_start = ei_status.mem + ((ring_page - WD_START_PG)<<8);
+
+ /* We'll always get a 4 byte header read followed by a packet read, so
+ we enable 16 bit mode before the header, and disable after the body. */
+ if (ei_status.word16)
+ outb(ISA16 | ei_status.reg5, wd_cmdreg+WD_CMDREG5);
+
+#ifdef __BIG_ENDIAN
+ /* Officially this is what we are doing, but the readl() is faster */
+ /* unfortunately it isn't endian aware of the struct */
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+ hdr->count = le16_to_cpu(hdr->count);
+#else
+ ((unsigned int*)hdr)[0] = readl(hdr_start);
+#endif
+}
+
+/* Block input and output are easy on shared memory ethercards, and trivial
+ on the Western digital card where there is no choice of how to do it.
+ The only complications are that the ring buffer wraps, and need to map
+ switch between 8- and 16-bit modes. */
+
+static void
+wd_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ unsigned long offset = ring_offset - (WD_START_PG<<8);
+ void __iomem *xfer_start = ei_status.mem + offset;
+
+ if (offset + count > ei_status.priv) {
+ /* We must wrap the input move. */
+ int semi_count = ei_status.priv - offset;
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count);
+ } else {
+ /* Packet is in one chunk -- we can copy + cksum. */
+ memcpy_fromio(skb->data, xfer_start, count);
+ }
+
+ /* Turn off 16 bit access so that reboot works. ISA brain-damage */
+ if (ei_status.word16)
+ outb(ei_status.reg5, wd_cmdreg+WD_CMDREG5);
+}
+
+static void
+wd_block_output(struct net_device *dev, int count, const unsigned char *buf,
+ int start_page)
+{
+ int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ void __iomem *shmem = ei_status.mem + ((start_page - WD_START_PG)<<8);
+
+
+ if (ei_status.word16) {
+ /* Turn on and off 16 bit access so that reboot works. */
+ outb(ISA16 | ei_status.reg5, wd_cmdreg+WD_CMDREG5);
+ memcpy_toio(shmem, buf, count);
+ outb(ei_status.reg5, wd_cmdreg+WD_CMDREG5);
+ } else
+ memcpy_toio(shmem, buf, count);
+}
+
+
+static int
+wd_close(struct net_device *dev)
+{
+ int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+ ei_close(dev);
+
+ /* Change from 16-bit to 8-bit shared memory so reboot works. */
+ if (ei_status.word16)
+ outb(ei_status.reg5, wd_cmdreg + WD_CMDREG5 );
+
+ /* And disable the shared memory. */
+ outb(ei_status.reg0 & ~WD_MEMENB, wd_cmdreg);
+
+ return 0;
+}
+
+
+#ifdef MODULE
+#define MAX_WD_CARDS 4 /* Max number of wd cards per module */
+static struct net_device *dev_wd[MAX_WD_CARDS];
+static int io[MAX_WD_CARDS];
+static int irq[MAX_WD_CARDS];
+static int mem[MAX_WD_CARDS];
+static int mem_end[MAX_WD_CARDS]; /* for non std. mem size */
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(mem, int, NULL, 0);
+module_param_array(mem_end, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O base address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s) (ignored for PureData boards)");
+MODULE_PARM_DESC(mem, "memory base address(es)(ignored for PureData boards)");
+MODULE_PARM_DESC(mem_end, "memory end address(es)");
+MODULE_DESCRIPTION("ISA Western Digital wd8003/wd8013 ; SMC Elite, Elite16 ethernet driver");
+MODULE_LICENSE("GPL");
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+
+int __init init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_WD_CARDS; this_dev++) {
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "wd.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ dev = alloc_ei_netdev();
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_start = mem[this_dev];
+ dev->mem_end = mem_end[this_dev];
+ if (do_wd_probe(dev) == 0) {
+ dev_wd[found++] = dev;
+ continue;
+ }
+ free_netdev(dev);
+ printk(KERN_WARNING "wd.c: No wd80x3 card found (i/o = 0x%x).\n", io[this_dev]);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr - WD_NIC_OFFSET, WD_IO_EXTENT);
+ iounmap(ei_status.mem);
+}
+
+void __exit
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_WD_CARDS; this_dev++) {
+ struct net_device *dev = dev_wd[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
new file mode 100644
index 000000000000..3aa9fe9999b5
--- /dev/null
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -0,0 +1,452 @@
+/*
+ * Amiga Linux/m68k and Linux/PPC Zorro NS8390 Ethernet Driver
+ *
+ * (C) Copyright 1998-2000 by some Elitist 680x0 Users(TM)
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This program is based on all the other NE2000 drivers for Linux
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * The Ariadne II and X-Surf are Zorro-II boards containing Realtek RTL8019AS
+ * Ethernet Controllers.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/zorro.h>
+#include <linux/jiffies.h>
+
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/amigaints.h>
+#include <asm/amigahw.h>
+
+#define EI_SHIFT(x) (ei_local->reg_offset[x])
+#define ei_inb(port) in_8(port)
+#define ei_outb(val, port) out_8(port, val)
+#define ei_inb_p(port) in_8(port)
+#define ei_outb_p(val, port) out_8(port, val)
+
+static const char version[] =
+ "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include "lib8390.c"
+
+#define DRV_NAME "zorro8390"
+
+#define NE_BASE (dev->base_addr)
+#define NE_CMD (0x00 * 2)
+#define NE_DATAPORT (0x10 * 2) /* NatSemi-defined port window offset */
+#define NE_RESET (0x1f * 2) /* Issue a read to reset,
+ * a write to clear. */
+#define NE_IO_EXTENT (0x20 * 2)
+
+#define NE_EN0_ISR (0x07 * 2)
+#define NE_EN0_DCFG (0x0e * 2)
+
+#define NE_EN0_RSARLO (0x08 * 2)
+#define NE_EN0_RSARHI (0x09 * 2)
+#define NE_EN0_RCNTLO (0x0a * 2)
+#define NE_EN0_RXCR (0x0c * 2)
+#define NE_EN0_TXCR (0x0d * 2)
+#define NE_EN0_RCNTHI (0x0b * 2)
+#define NE_EN0_IMR (0x0f * 2)
+
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+#define WORDSWAP(a) ((((a) >> 8) & 0xff) | ((a) << 8))
+
+static struct card_info {
+ zorro_id id;
+ const char *name;
+ unsigned int offset;
+} cards[] __devinitdata = {
+ { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, "Ariadne II", 0x0600 },
+ { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, "X-Surf", 0x8600 },
+};
+
+/* Hard reset the card. This used to pause for the same period that a
+ * 8390 reset command required, but that shouldn't be necessary.
+ */
+static void zorro8390_reset_8390(struct net_device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+
+ if (ei_debug > 1)
+ netdev_dbg(dev, "resetting - t=%ld...\n", jiffies);
+
+ z_writeb(z_readb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RESET) == 0)
+ if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) {
+ netdev_warn(dev, "%s: did not complete\n", __func__);
+ break;
+ }
+ z_writeb(ENISR_RESET, NE_BASE + NE_EN0_ISR); /* Ack intr */
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ * we don't need to be concerned with ring wrap as the header will be at
+ * the start of a page, so we optimize accordingly.
+ */
+static void zorro8390_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int nic_base = dev->base_addr;
+ int cnt;
+ short *ptrs;
+
+ /* This *shouldn't* happen.
+ * If it does, it's the last thing you'll see
+ */
+ if (ei_status.dmaing) {
+ netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
+ __func__, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ z_writeb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
+ z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR);
+ z_writeb(sizeof(struct e8390_pkt_hdr), nic_base + NE_EN0_RCNTLO);
+ z_writeb(0, nic_base + NE_EN0_RCNTHI);
+ z_writeb(0, nic_base + NE_EN0_RSARLO); /* On page boundary */
+ z_writeb(ring_page, nic_base + NE_EN0_RSARHI);
+ z_writeb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ ptrs = (short *)hdr;
+ for (cnt = 0; cnt < sizeof(struct e8390_pkt_hdr) >> 1; cnt++)
+ *ptrs++ = z_readw(NE_BASE + NE_DATAPORT);
+
+ z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr */
+
+ hdr->count = WORDSWAP(hdr->count);
+
+ ei_status.dmaing &= ~0x01;
+}
+
+/* Block input and output, similar to the Crynwr packet driver.
+ * If you are porting to a new ethercard, look at the packet driver source
+ * for hints. The NEx000 doesn't share the on-board packet memory --
+ * you have to put the packet out through the "remote DMA" dataport
+ * using z_writeb.
+ */
+static void zorro8390_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ int nic_base = dev->base_addr;
+ char *buf = skb->data;
+ short *ptrs;
+ int cnt;
+
+ /* This *shouldn't* happen.
+ * If it does, it's the last thing you'll see
+ */
+ if (ei_status.dmaing) {
+ netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
+ __func__, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ z_writeb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
+ z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR);
+ z_writeb(count & 0xff, nic_base + NE_EN0_RCNTLO);
+ z_writeb(count >> 8, nic_base + NE_EN0_RCNTHI);
+ z_writeb(ring_offset & 0xff, nic_base + NE_EN0_RSARLO);
+ z_writeb(ring_offset >> 8, nic_base + NE_EN0_RSARHI);
+ z_writeb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ ptrs = (short *)buf;
+ for (cnt = 0; cnt < count >> 1; cnt++)
+ *ptrs++ = z_readw(NE_BASE + NE_DATAPORT);
+ if (count & 0x01)
+ buf[count - 1] = z_readb(NE_BASE + NE_DATAPORT);
+
+ z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr */
+ ei_status.dmaing &= ~0x01;
+}
+
+static void zorro8390_block_output(struct net_device *dev, int count,
+ const unsigned char *buf,
+ const int start_page)
+{
+ int nic_base = NE_BASE;
+ unsigned long dma_start;
+ short *ptrs;
+ int cnt;
+
+ /* Round the count up for word writes. Do we need to do this?
+ * What effect will an odd byte count have on the 8390?
+ * I should check someday.
+ */
+ if (count & 0x01)
+ count++;
+
+ /* This *shouldn't* happen.
+ * If it does, it's the last thing you'll see
+ */
+ if (ei_status.dmaing) {
+ netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
+ __func__, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ z_writeb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
+
+ z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR);
+
+ /* Now the normal output. */
+ z_writeb(count & 0xff, nic_base + NE_EN0_RCNTLO);
+ z_writeb(count >> 8, nic_base + NE_EN0_RCNTHI);
+ z_writeb(0x00, nic_base + NE_EN0_RSARLO);
+ z_writeb(start_page, nic_base + NE_EN0_RSARHI);
+
+ z_writeb(E8390_RWRITE + E8390_START, nic_base + NE_CMD);
+ ptrs = (short *)buf;
+ for (cnt = 0; cnt < count >> 1; cnt++)
+ z_writew(*ptrs++, NE_BASE + NE_DATAPORT);
+
+ dma_start = jiffies;
+
+ while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
+ if (time_after(jiffies, dma_start + 2 * HZ / 100)) {
+ /* 20ms */
+ netdev_err(dev, "timeout waiting for Tx RDC\n");
+ zorro8390_reset_8390(dev);
+ __NS8390_init(dev, 1);
+ break;
+ }
+
+ z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr */
+ ei_status.dmaing &= ~0x01;
+}
+
+static int zorro8390_open(struct net_device *dev)
+{
+ __ei_open(dev);
+ return 0;
+}
+
+static int zorro8390_close(struct net_device *dev)
+{
+ if (ei_debug > 1)
+ netdev_dbg(dev, "Shutting down ethercard\n");
+ __ei_close(dev);
+ return 0;
+}
+
+static void __devexit zorro8390_remove_one(struct zorro_dev *z)
+{
+ struct net_device *dev = zorro_get_drvdata(z);
+
+ unregister_netdev(dev);
+ free_irq(IRQ_AMIGA_PORTS, dev);
+ release_mem_region(ZTWO_PADDR(dev->base_addr), NE_IO_EXTENT * 2);
+ free_netdev(dev);
+}
+
+static struct zorro_device_id zorro8390_zorro_tbl[] __devinitdata = {
+ { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, },
+ { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(zorro, zorro8390_zorro_tbl);
+
+static const struct net_device_ops zorro8390_netdev_ops = {
+ .ndo_open = zorro8390_open,
+ .ndo_stop = zorro8390_close,
+ .ndo_start_xmit = __ei_start_xmit,
+ .ndo_tx_timeout = __ei_tx_timeout,
+ .ndo_get_stats = __ei_get_stats,
+ .ndo_set_rx_mode = __ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = __ei_poll,
+#endif
+};
+
+static int __devinit zorro8390_init(struct net_device *dev,
+ unsigned long board, const char *name,
+ unsigned long ioaddr)
+{
+ int i;
+ int err;
+ unsigned char SA_prom[32];
+ int start_page, stop_page;
+ static u32 zorro8390_offsets[16] = {
+ 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
+ };
+
+ /* Reset card. Who knows what dain-bramaged state it was left in. */
+ {
+ unsigned long reset_start_time = jiffies;
+
+ z_writeb(z_readb(ioaddr + NE_RESET), ioaddr + NE_RESET);
+
+ while ((z_readb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0)
+ if (time_after(jiffies,
+ reset_start_time + 2 * HZ / 100)) {
+ netdev_warn(dev, "not found (no reset ack)\n");
+ return -ENODEV;
+ }
+
+ z_writeb(0xff, ioaddr + NE_EN0_ISR); /* Ack all intr. */
+ }
+
+ /* Read the 16 bytes of station address PROM.
+ * We must first initialize registers,
+ * similar to NS8390_init(eifdev, 0).
+ * We can't reliably read the SAPROM address without this.
+ * (I learned the hard way!).
+ */
+ {
+ static const struct {
+ u32 value;
+ u32 offset;
+ } program_seq[] = {
+ {E8390_NODMA + E8390_PAGE0 + E8390_STOP, NE_CMD},
+ /* Select page 0 */
+ {0x48, NE_EN0_DCFG}, /* 0x48: Set byte-wide access */
+ {0x00, NE_EN0_RCNTLO}, /* Clear the count regs */
+ {0x00, NE_EN0_RCNTHI},
+ {0x00, NE_EN0_IMR}, /* Mask completion irq */
+ {0xFF, NE_EN0_ISR},
+ {E8390_RXOFF, NE_EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, NE_EN0_TXCR}, /* 0x02 and loopback mode */
+ {32, NE_EN0_RCNTLO},
+ {0x00, NE_EN0_RCNTHI},
+ {0x00, NE_EN0_RSARLO}, /* DMA starting at 0x0000 */
+ {0x00, NE_EN0_RSARHI},
+ {E8390_RREAD + E8390_START, NE_CMD},
+ };
+ for (i = 0; i < ARRAY_SIZE(program_seq); i++)
+ z_writeb(program_seq[i].value,
+ ioaddr + program_seq[i].offset);
+ }
+ for (i = 0; i < 16; i++) {
+ SA_prom[i] = z_readb(ioaddr + NE_DATAPORT);
+ (void)z_readb(ioaddr + NE_DATAPORT);
+ }
+
+ /* We must set the 8390 for word mode. */
+ z_writeb(0x49, ioaddr + NE_EN0_DCFG);
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
+
+ dev->base_addr = ioaddr;
+ dev->irq = IRQ_AMIGA_PORTS;
+
+ /* Install the Interrupt handler */
+ i = request_irq(IRQ_AMIGA_PORTS, __ei_interrupt,
+ IRQF_SHARED, DRV_NAME, dev);
+ if (i)
+ return i;
+
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ dev->dev_addr[i] = SA_prom[i];
+
+ pr_debug("Found ethernet address: %pM\n", dev->dev_addr);
+
+ ei_status.name = name;
+ ei_status.tx_start_page = start_page;
+ ei_status.stop_page = stop_page;
+ ei_status.word16 = 1;
+
+ ei_status.rx_start_page = start_page + TX_PAGES;
+
+ ei_status.reset_8390 = zorro8390_reset_8390;
+ ei_status.block_input = zorro8390_block_input;
+ ei_status.block_output = zorro8390_block_output;
+ ei_status.get_8390_hdr = zorro8390_get_8390_hdr;
+ ei_status.reg_offset = zorro8390_offsets;
+
+ dev->netdev_ops = &zorro8390_netdev_ops;
+ __NS8390_init(dev, 0);
+ err = register_netdev(dev);
+ if (err) {
+ free_irq(IRQ_AMIGA_PORTS, dev);
+ return err;
+ }
+
+ netdev_info(dev, "%s at 0x%08lx, Ethernet Address %pM\n",
+ name, board, dev->dev_addr);
+
+ return 0;
+}
+
+static int __devinit zorro8390_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
+{
+ struct net_device *dev;
+ unsigned long board, ioaddr;
+ int err, i;
+
+ for (i = ARRAY_SIZE(cards) - 1; i >= 0; i--)
+ if (z->id == cards[i].id)
+ break;
+ if (i < 0)
+ return -ENODEV;
+
+ board = z->resource.start;
+ ioaddr = board + cards[i].offset;
+ dev = ____alloc_ei_netdev(0);
+ if (!dev)
+ return -ENOMEM;
+ if (!request_mem_region(ioaddr, NE_IO_EXTENT * 2, DRV_NAME)) {
+ free_netdev(dev);
+ return -EBUSY;
+ }
+ err = zorro8390_init(dev, board, cards[i].name, ZTWO_VADDR(ioaddr));
+ if (err) {
+ release_mem_region(ioaddr, NE_IO_EXTENT * 2);
+ free_netdev(dev);
+ return err;
+ }
+ zorro_set_drvdata(z, dev);
+ return 0;
+}
+
+static struct zorro_driver zorro8390_driver = {
+ .name = "zorro8390",
+ .id_table = zorro8390_zorro_tbl,
+ .probe = zorro8390_init_one,
+ .remove = __devexit_p(zorro8390_remove_one),
+};
+
+static int __init zorro8390_init_module(void)
+{
+ return zorro_register_driver(&zorro8390_driver);
+}
+
+static void __exit zorro8390_cleanup_module(void)
+{
+ zorro_unregister_driver(&zorro8390_driver);
+}
+
+module_init(zorro8390_init_module);
+module_exit(zorro8390_cleanup_module);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
new file mode 100644
index 000000000000..6dff5a0e733f
--- /dev/null
+++ b/drivers/net/ethernet/Kconfig
@@ -0,0 +1,177 @@
+#
+# Ethernet LAN device configuration
+#
+
+menuconfig ETHERNET
+ bool "Ethernet driver support"
+ depends on NET
+ default y
+ ---help---
+ This section contains all the Ethernet device drivers.
+
+if ETHERNET
+
+config MDIO
+ tristate
+
+config SUNGEM_PHY
+ tristate
+
+source "drivers/net/ethernet/3com/Kconfig"
+source "drivers/net/ethernet/adaptec/Kconfig"
+source "drivers/net/ethernet/aeroflex/Kconfig"
+source "drivers/net/ethernet/alteon/Kconfig"
+source "drivers/net/ethernet/amd/Kconfig"
+source "drivers/net/ethernet/apple/Kconfig"
+source "drivers/net/ethernet/atheros/Kconfig"
+source "drivers/net/ethernet/cadence/Kconfig"
+source "drivers/net/ethernet/adi/Kconfig"
+source "drivers/net/ethernet/broadcom/Kconfig"
+source "drivers/net/ethernet/brocade/Kconfig"
+source "drivers/net/ethernet/chelsio/Kconfig"
+source "drivers/net/ethernet/cirrus/Kconfig"
+source "drivers/net/ethernet/cisco/Kconfig"
+source "drivers/net/ethernet/davicom/Kconfig"
+
+config DNET
+ tristate "Dave ethernet support (DNET)"
+ depends on HAS_IOMEM
+ select PHYLIB
+ ---help---
+ The Dave ethernet interface (DNET) is found on Qong Board FPGA.
+ Say Y to include support for the DNET chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called dnet.
+
+source "drivers/net/ethernet/dec/Kconfig"
+source "drivers/net/ethernet/dlink/Kconfig"
+source "drivers/net/ethernet/emulex/Kconfig"
+source "drivers/net/ethernet/neterion/Kconfig"
+source "drivers/net/ethernet/faraday/Kconfig"
+source "drivers/net/ethernet/freescale/Kconfig"
+source "drivers/net/ethernet/fujitsu/Kconfig"
+source "drivers/net/ethernet/hp/Kconfig"
+source "drivers/net/ethernet/ibm/Kconfig"
+source "drivers/net/ethernet/intel/Kconfig"
+source "drivers/net/ethernet/i825xx/Kconfig"
+source "drivers/net/ethernet/xscale/Kconfig"
+source "drivers/net/ethernet/icplus/Kconfig"
+
+config JME
+ tristate "JMicron(R) PCI-Express Gigabit Ethernet support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This driver supports the PCI-Express gigabit ethernet adapters
+ based on JMicron JMC250 chipset.
+
+ To compile this driver as a module, choose M here. The module
+ will be called jme.
+
+config KORINA
+ tristate "Korina (IDT RC32434) Ethernet support"
+ depends on MIKROTIK_RB532
+ ---help---
+ If you have a Mikrotik RouterBoard 500 or IDT RC32434
+ based system say Y. Otherwise say N.
+
+config LANTIQ_ETOP
+ tristate "Lantiq SoC ETOP driver"
+ depends on SOC_TYPE_XWAY
+ ---help---
+ Support for the MII0 inside the Lantiq SoC
+
+source "drivers/net/ethernet/marvell/Kconfig"
+source "drivers/net/ethernet/mellanox/Kconfig"
+source "drivers/net/ethernet/micrel/Kconfig"
+source "drivers/net/ethernet/microchip/Kconfig"
+
+config MIPS_SIM_NET
+ tristate "MIPS simulator Network device"
+ depends on MIPS_SIM
+ ---help---
+ The MIPSNET device is a simple Ethernet network device which is
+ emulated by the MIPS Simulator.
+ If you are not using a MIPSsim or are unsure, say N.
+
+source "drivers/net/ethernet/myricom/Kconfig"
+
+config FEALNX
+ tristate "Myson MTD-8xx PCI Ethernet support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
+ cards. <http://www.myson.com.tw/>
+
+source "drivers/net/ethernet/natsemi/Kconfig"
+source "drivers/net/ethernet/8390/Kconfig"
+
+config NET_NETX
+ tristate "NetX Ethernet support"
+ select NET_CORE
+ select MII
+ depends on ARCH_NETX
+ ---help---
+ This is support for the Hilscher netX builtin Ethernet ports
+
+ To compile this driver as a module, choose M here. The module
+ will be called netx-eth.
+
+source "drivers/net/ethernet/nuvoton/Kconfig"
+source "drivers/net/ethernet/nvidia/Kconfig"
+source "drivers/net/ethernet/octeon/Kconfig"
+source "drivers/net/ethernet/oki-semi/Kconfig"
+
+config ETHOC
+ tristate "OpenCores 10/100 Mbps Ethernet MAC support"
+ depends on HAS_IOMEM && HAS_DMA
+ select NET_CORE
+ select MII
+ select PHYLIB
+ select CRC32
+ select BITREVERSE
+ ---help---
+ Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
+
+source "drivers/net/ethernet/packetengines/Kconfig"
+source "drivers/net/ethernet/pasemi/Kconfig"
+source "drivers/net/ethernet/qlogic/Kconfig"
+source "drivers/net/ethernet/racal/Kconfig"
+source "drivers/net/ethernet/realtek/Kconfig"
+source "drivers/net/ethernet/renesas/Kconfig"
+source "drivers/net/ethernet/rdc/Kconfig"
+
+config S6GMAC
+ tristate "S6105 GMAC ethernet support"
+ depends on XTENSA_VARIANT_S6000
+ select PHYLIB
+ ---help---
+ This driver supports the on chip ethernet device on the
+ S6105 xtensa processor.
+
+ To compile this driver as a module, choose M here. The module
+ will be called s6gmac.
+
+source "drivers/net/ethernet/seeq/Kconfig"
+source "drivers/net/ethernet/sis/Kconfig"
+source "drivers/net/ethernet/sfc/Kconfig"
+source "drivers/net/ethernet/sgi/Kconfig"
+source "drivers/net/ethernet/smsc/Kconfig"
+source "drivers/net/ethernet/stmicro/Kconfig"
+source "drivers/net/ethernet/sun/Kconfig"
+source "drivers/net/ethernet/tehuti/Kconfig"
+source "drivers/net/ethernet/ti/Kconfig"
+source "drivers/net/ethernet/tile/Kconfig"
+source "drivers/net/ethernet/toshiba/Kconfig"
+source "drivers/net/ethernet/tundra/Kconfig"
+source "drivers/net/ethernet/via/Kconfig"
+source "drivers/net/ethernet/xilinx/Kconfig"
+source "drivers/net/ethernet/xircom/Kconfig"
+
+endif # ETHERNET
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
new file mode 100644
index 000000000000..c53ad3afc991
--- /dev/null
+++ b/drivers/net/ethernet/Makefile
@@ -0,0 +1,74 @@
+#
+# Makefile for the Linux network Ethernet device drivers.
+#
+
+obj-$(CONFIG_NET_VENDOR_3COM) += 3com/
+obj-$(CONFIG_NET_VENDOR_8390) += 8390/
+obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
+obj-$(CONFIG_GRETH) += aeroflex/
+obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
+obj-$(CONFIG_NET_VENDOR_AMD) += amd/
+obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
+obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
+obj-$(CONFIG_NET_ATMEL) += cadence/
+obj-$(CONFIG_NET_BFIN) += adi/
+obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
+obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
+obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
+obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
+obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
+obj-$(CONFIG_DM9000) += davicom/
+obj-$(CONFIG_DNET) += dnet.o
+obj-$(CONFIG_NET_VENDOR_DEC) += dec/
+obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/
+obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/
+obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/
+obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
+obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
+obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
+obj-$(CONFIG_NET_VENDOR_HP) += hp/
+obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
+obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
+obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/
+obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/
+obj-$(CONFIG_IP1000) += icplus/
+obj-$(CONFIG_JME) += jme.o
+obj-$(CONFIG_KORINA) += korina.o
+obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
+obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
+obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
+obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
+obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
+obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o
+obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
+obj-$(CONFIG_FEALNX) += fealnx.o
+obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
+obj-$(CONFIG_NET_NETX) += netx-eth.o
+obj-$(CONFIG_NET_VENDOR_NUVOTON) += nuvoton/
+obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/
+obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
+obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/
+obj-$(CONFIG_ETHOC) += ethoc.o
+obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/
+obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/
+obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
+obj-$(CONFIG_NET_VENDOR_RACAL) += racal/
+obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
+obj-$(CONFIG_SH_ETH) += renesas/
+obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
+obj-$(CONFIG_S6GMAC) += s6gmac.o
+obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
+obj-$(CONFIG_NET_VENDOR_SIS) += sis/
+obj-$(CONFIG_SFC) += sfc/
+obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
+obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
+obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
+obj-$(CONFIG_NET_VENDOR_SUN) += sun/
+obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
+obj-$(CONFIG_NET_VENDOR_TI) += ti/
+obj-$(CONFIG_TILE_NET) += tile/
+obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/
+obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/
+obj-$(CONFIG_NET_VENDOR_VIA) += via/
+obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
+obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
diff --git a/drivers/net/ethernet/adaptec/Kconfig b/drivers/net/ethernet/adaptec/Kconfig
new file mode 100644
index 000000000000..0bff571b1bb3
--- /dev/null
+++ b/drivers/net/ethernet/adaptec/Kconfig
@@ -0,0 +1,36 @@
+#
+# Adaptec network device configuration
+#
+
+config NET_VENDOR_ADAPTEC
+ bool "Adaptec devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Adaptec cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_ADAPTEC
+
+config ADAPTEC_STARFIRE
+ tristate "Adaptec Starfire/DuraLAN support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ Say Y here if you have an Adaptec Starfire (or DuraLAN) PCI network
+ adapter. The DuraLAN chip is used on the 64 bit PCI boards from
+ Adaptec e.g. the ANA-6922A. The older 32 bit boards use the tulip
+ driver.
+
+ To compile this driver as a module, choose M here: the module
+ will be called starfire. This is recommended.
+
+endif # NET_VENDOR_ADAPTEC
diff --git a/drivers/net/ethernet/adaptec/Makefile b/drivers/net/ethernet/adaptec/Makefile
new file mode 100644
index 000000000000..6c07b758ac0a
--- /dev/null
+++ b/drivers/net/ethernet/adaptec/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Adaptec network device drivers.
+#
+
+obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
new file mode 100644
index 000000000000..d6b015598569
--- /dev/null
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -0,0 +1,2091 @@
+/* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
+/*
+ Written 1998-2000 by Donald Becker.
+
+ Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please
+ send all bug reports to me, and not to Donald Becker, as this code
+ has been heavily modified from Donald's original version.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The information below comes from Donald Becker's original driver:
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support and updates available at
+ http://www.scyld.com/network/starfire.html
+ [link no longer provides useful info -jgarzik]
+
+*/
+
+#define DRV_NAME "starfire"
+#define DRV_VERSION "2.1"
+#define DRV_RELDATE "July 6, 2008"
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/mm.h>
+#include <linux/firmware.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+/*
+ * The current frame processor firmware fails to checksum a fragment
+ * of length 1. If and when this is fixed, the #define below can be removed.
+ */
+#define HAS_BROKEN_FIRMWARE
+
+/*
+ * If using the broken firmware, data must be padded to the next 32-bit boundary.
+ */
+#ifdef HAS_BROKEN_FIRMWARE
+#define PADDING_MASK 3
+#endif
+
+/*
+ * Define this if using the driver with the zero-copy patch
+ */
+#define ZEROCOPY
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define VLAN_SUPPORT
+#endif
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Used for tuning interrupt latency vs. overhead. */
+static int intr_latency;
+static int small_frames;
+
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+static int max_interrupt_work = 20;
+static int mtu;
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ The Starfire has a 512 element hash table based on the Ethernet CRC. */
+static const int multicast_filter_limit = 512;
+/* Whether to do TCP/UDP checksums in hardware */
+static int enable_hw_cksum = 1;
+
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+/*
+ * Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ * Setting to > 1518 effectively disables this feature.
+ *
+ * NOTE:
+ * The ia64 doesn't allow for unaligned loads even of integers being
+ * misaligned on a 2 byte boundary. Thus always force copying of
+ * packets as the starfire doesn't allow for misaligned DMAs ;-(
+ * 23/10/2000 - Jes
+ *
+ * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
+ * at least, having unaligned frames leads to a rather serious performance
+ * penalty. -Ion
+ */
+#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
+static int rx_copybreak = PKT_BUF_SZ;
+#else
+static int rx_copybreak /* = 0 */;
+#endif
+
+/* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
+#ifdef __sparc__
+#define DMA_BURST_SIZE 64
+#else
+#define DMA_BURST_SIZE 128
+#endif
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
+ The media type is usually passed in 'options[]'.
+ These variables are deprecated, use ethtool instead. -Ion
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {0, };
+static int full_duplex[MAX_UNITS] = {0, };
+
+/* Operational parameters that are set at compile time. */
+
+/* The "native" ring sizes are either 256 or 2048.
+ However in some modes a descriptor may be marked to wrap the ring earlier.
+*/
+#define RX_RING_SIZE 256
+#define TX_RING_SIZE 32
+/* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
+#define DONE_Q_SIZE 1024
+/* All queues must be aligned on a 256-byte boundary */
+#define QUEUE_ALIGN 256
+
+#if RX_RING_SIZE > 256
+#define RX_Q_ENTRIES Rx2048QEntries
+#else
+#define RX_Q_ENTRIES Rx256QEntries
+#endif
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2 * HZ)
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+/* 64-bit dma_addr_t */
+#define ADDR_64BITS /* This chip uses 64 bit addresses. */
+#define netdrv_addr_t __le64
+#define cpu_to_dma(x) cpu_to_le64(x)
+#define dma_to_cpu(x) le64_to_cpu(x)
+#define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
+#define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
+#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
+#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
+#define RX_DESC_ADDR_SIZE RxDescAddr64bit
+#else /* 32-bit dma_addr_t */
+#define netdrv_addr_t __le32
+#define cpu_to_dma(x) cpu_to_le32(x)
+#define dma_to_cpu(x) le32_to_cpu(x)
+#define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
+#define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
+#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
+#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
+#define RX_DESC_ADDR_SIZE RxDescAddr32bit
+#endif
+
+#define skb_first_frag_len(skb) skb_headlen(skb)
+#define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
+
+/* Firmware names */
+#define FIRMWARE_RX "adaptec/starfire_rx.bin"
+#define FIRMWARE_TX "adaptec/starfire_tx.bin"
+
+/* These identify the driver base version and may not be removed. */
+static const char version[] __devinitconst =
+KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
+" (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_FIRMWARE(FIRMWARE_RX);
+MODULE_FIRMWARE(FIRMWARE_TX);
+
+module_param(max_interrupt_work, int, 0);
+module_param(mtu, int, 0);
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param(intr_latency, int, 0);
+module_param(small_frames, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+module_param(enable_hw_cksum, int, 0);
+MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
+MODULE_PARM_DESC(mtu, "MTU (all boards)");
+MODULE_PARM_DESC(debug, "Debug level (0-6)");
+MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
+MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
+MODULE_PARM_DESC(options, "Deprecated: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "Deprecated: Forced full-duplex setting (0/1)");
+MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
+
+II. Board-specific settings
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
+ring sizes are set fixed by the hardware, but may optionally be wrapped
+earlier by the END bit in the descriptor.
+This driver uses that hardware queue size for the Rx ring, where a large
+number of entries has no ill effect beyond increases the potential backlog.
+The Tx ring is wrapped with the END bit, since a large hardware Tx queue
+disables the queue layer priority ordering and we have no mechanism to
+utilize the hardware two-level priority queue. When modifying the
+RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
+levels.
+
+IIIb/c. Transmit/Receive Structure
+
+See the Adaptec manual for the many possible structures, and options for
+each structure. There are far too many to document all of them here.
+
+For transmit this driver uses type 0/1 transmit descriptors (depending
+on the 32/64 bitness of the architecture), and relies on automatic
+minimum-length padding. It does not use the completion queue
+consumer index, but instead checks for non-zero status entries.
+
+For receive this driver uses type 2/3 receive descriptors. The driver
+allocates full frame size skbuffs for the Rx ring buffers, so all frames
+should fit in a single descriptor. The driver does not use the completion
+queue consumer index, but instead checks for non-zero status entries.
+
+When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
+is allocated and the frame is copied to the new skbuff. When the incoming
+frame is larger, the skbuff is passed directly up the protocol stack.
+Buffers consumed this way are replaced by newly allocated skbuffs in a later
+phase of receive.
+
+A notable aspect of operation is that unaligned buffers are not permitted by
+the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame
+isn't longword aligned, which may cause problems on some machine
+e.g. Alphas and IA64. For these architectures, the driver is forced to copy
+the frame into a new skbuff unconditionally. Copied frames are put into the
+skbuff at an offset of "+2", thus 16-byte aligning the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring and the netif_queue
+status. If the number of free Tx slots in the ring falls below a certain number
+(currently hardcoded to 4), it signals the upper layer to stop the queue.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
+number of free Tx slow is above the threshold, it signals the upper layer to
+restart the queue.
+
+IV. Notes
+
+IVb. References
+
+The Adaptec Starfire manuals, available only from Adaptec.
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+
+IVc. Errata
+
+- StopOnPerr is broken, don't enable
+- Hardware ethernet padding exposes random data, perform software padding
+ instead (unverified -- works correctly for all the hardware I have)
+
+*/
+
+
+
+enum chip_capability_flags {CanHaveMII=1, };
+
+enum chipset {
+ CH_6915 = 0,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(starfire_pci_tbl) = {
+ { PCI_VDEVICE(ADAPTEC, 0x6915), CH_6915 },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
+
+/* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
+static const struct chip_info {
+ const char *name;
+ int drv_flags;
+} netdrv_tbl[] __devinitdata = {
+ { "Adaptec Starfire 6915", CanHaveMII },
+};
+
+
+/* Offsets to the device registers.
+ Unlike software-only systems, device drivers interact with complex hardware.
+ It's not useful to define symbolic names for every register bit in the
+ device. The name can only partially document the semantics and make
+ the driver longer and more difficult to read.
+ In general, only the important configuration values or bits changed
+ multiple times should be defined symbolically.
+*/
+enum register_offsets {
+ PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
+ IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
+ MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
+ GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
+ TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
+ TxRingHiAddr=0x5009C, /* 64 bit address extension. */
+ TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
+ TxThreshold=0x500B0,
+ CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
+ RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
+ CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
+ RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
+ RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
+ TxMode=0x55000, VlanType=0x55064,
+ PerfFilterTable=0x56000, HashTable=0x56100,
+ TxGfpMem=0x58000, RxGfpMem=0x5a000,
+};
+
+/*
+ * Bits in the interrupt status/mask registers.
+ * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
+ * enables all the interrupt sources that are or'ed into those status bits.
+ */
+enum intr_status_bits {
+ IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
+ IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
+ IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
+ IntrTxComplQLow=0x200000, IntrPCI=0x100000,
+ IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
+ IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
+ IntrNormalSummary=0x8000, IntrTxDone=0x4000,
+ IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
+ IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
+ IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
+ IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
+ IntrNoTxCsum=0x20, IntrTxBadID=0x10,
+ IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
+ IntrTxGfp=0x02, IntrPCIPad=0x01,
+ /* not quite bits */
+ IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
+ IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
+ IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
+};
+
+/* Bits in the RxFilterMode register. */
+enum rx_mode_bits {
+ AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
+ AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
+ PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
+ WakeupOnGFP=0x0800,
+};
+
+/* Bits in the TxMode register */
+enum tx_mode_bits {
+ MiiSoftReset=0x8000, MIILoopback=0x4000,
+ TxFlowEnable=0x0800, RxFlowEnable=0x0400,
+ PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
+};
+
+/* Bits in the TxDescCtrl register. */
+enum tx_ctrl_bits {
+ TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
+ TxDescSpace128=0x30, TxDescSpace256=0x40,
+ TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
+ TxDescType3=0x03, TxDescType4=0x04,
+ TxNoDMACompletion=0x08,
+ TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
+ TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
+ TxDMABurstSizeShift=8,
+};
+
+/* Bits in the RxDescQCtrl register. */
+enum rx_ctrl_bits {
+ RxBufferLenShift=16, RxMinDescrThreshShift=0,
+ RxPrefetchMode=0x8000, RxVariableQ=0x2000,
+ Rx2048QEntries=0x4000, Rx256QEntries=0,
+ RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
+ RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
+ RxDescSpace4=0x000, RxDescSpace8=0x100,
+ RxDescSpace16=0x200, RxDescSpace32=0x300,
+ RxDescSpace64=0x400, RxDescSpace128=0x500,
+ RxConsumerWrEn=0x80,
+};
+
+/* Bits in the RxDMACtrl register. */
+enum rx_dmactrl_bits {
+ RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
+ RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
+ RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
+ RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
+ RxChecksumRejectTCPOnly=0x01000000,
+ RxCompletionQ2Enable=0x800000,
+ RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
+ RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
+ RxDMAQ2NonIP=0x400000,
+ RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
+ RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
+ RxBurstSizeShift=0,
+};
+
+/* Bits in the RxCompletionAddr register */
+enum rx_compl_bits {
+ RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
+ RxComplProducerWrEn=0x40,
+ RxComplType0=0x00, RxComplType1=0x10,
+ RxComplType2=0x20, RxComplType3=0x30,
+ RxComplThreshShift=0,
+};
+
+/* Bits in the TxCompletionAddr register */
+enum tx_compl_bits {
+ TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
+ TxComplProducerWrEn=0x40,
+ TxComplIntrStatus=0x20,
+ CommonQueueMode=0x10,
+ TxComplThreshShift=0,
+};
+
+/* Bits in the GenCtrl register */
+enum gen_ctrl_bits {
+ RxEnable=0x05, TxEnable=0x0a,
+ RxGFPEnable=0x10, TxGFPEnable=0x20,
+};
+
+/* Bits in the IntrTimerCtrl register */
+enum intr_ctrl_bits {
+ Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
+ SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
+ IntrLatencyMask=0x1f,
+};
+
+/* The Rx and Tx buffer descriptors. */
+struct starfire_rx_desc {
+ netdrv_addr_t rxaddr;
+};
+enum rx_desc_bits {
+ RxDescValid=1, RxDescEndRing=2,
+};
+
+/* Completion queue entry. */
+struct short_rx_done_desc {
+ __le32 status; /* Low 16 bits is length. */
+};
+struct basic_rx_done_desc {
+ __le32 status; /* Low 16 bits is length. */
+ __le16 vlanid;
+ __le16 status2;
+};
+struct csum_rx_done_desc {
+ __le32 status; /* Low 16 bits is length. */
+ __le16 csum; /* Partial checksum */
+ __le16 status2;
+};
+struct full_rx_done_desc {
+ __le32 status; /* Low 16 bits is length. */
+ __le16 status3;
+ __le16 status2;
+ __le16 vlanid;
+ __le16 csum; /* partial checksum */
+ __le32 timestamp;
+};
+/* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
+#ifdef VLAN_SUPPORT
+typedef struct full_rx_done_desc rx_done_desc;
+#define RxComplType RxComplType3
+#else /* not VLAN_SUPPORT */
+typedef struct csum_rx_done_desc rx_done_desc;
+#define RxComplType RxComplType2
+#endif /* not VLAN_SUPPORT */
+
+enum rx_done_bits {
+ RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
+};
+
+/* Type 1 Tx descriptor. */
+struct starfire_tx_desc_1 {
+ __le32 status; /* Upper bits are status, lower 16 length. */
+ __le32 addr;
+};
+
+/* Type 2 Tx descriptor. */
+struct starfire_tx_desc_2 {
+ __le32 status; /* Upper bits are status, lower 16 length. */
+ __le32 reserved;
+ __le64 addr;
+};
+
+#ifdef ADDR_64BITS
+typedef struct starfire_tx_desc_2 starfire_tx_desc;
+#define TX_DESC_TYPE TxDescType2
+#else /* not ADDR_64BITS */
+typedef struct starfire_tx_desc_1 starfire_tx_desc;
+#define TX_DESC_TYPE TxDescType1
+#endif /* not ADDR_64BITS */
+#define TX_DESC_SPACING TxDescSpaceUnlim
+
+enum tx_desc_bits {
+ TxDescID=0xB0000000,
+ TxCRCEn=0x01000000, TxDescIntr=0x08000000,
+ TxRingWrap=0x04000000, TxCalTCP=0x02000000,
+};
+struct tx_done_desc {
+ __le32 status; /* timestamp, index. */
+#if 0
+ __le32 intrstatus; /* interrupt status */
+#endif
+};
+
+struct rx_ring_info {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+};
+struct tx_ring_info {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ unsigned int used_slots;
+};
+
+#define PHY_CNT 2
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct starfire_rx_desc *rx_ring;
+ starfire_tx_desc *tx_ring;
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+ /* The addresses of rx/tx-in-place skbuffs. */
+ struct rx_ring_info rx_info[RX_RING_SIZE];
+ struct tx_ring_info tx_info[TX_RING_SIZE];
+ /* Pointers to completion queues (full pages). */
+ rx_done_desc *rx_done_q;
+ dma_addr_t rx_done_q_dma;
+ unsigned int rx_done;
+ struct tx_done_desc *tx_done_q;
+ dma_addr_t tx_done_q_dma;
+ unsigned int tx_done;
+ struct napi_struct napi;
+ struct net_device *dev;
+ struct pci_dev *pci_dev;
+#ifdef VLAN_SUPPORT
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+#endif
+ void *queue_mem;
+ dma_addr_t queue_mem_dma;
+ size_t queue_mem_size;
+
+ /* Frequently used values: keep some adjacent for cache effect. */
+ spinlock_t lock;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int cur_tx, dirty_tx, reap_tx;
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ /* These values keep track of the transceiver/media in use. */
+ int speed100; /* Set if speed == 100MBit. */
+ u32 tx_mode;
+ u32 intr_timer_ctrl;
+ u8 tx_threshold;
+ /* MII transceiver section. */
+ struct mii_if_info mii_if; /* MII lib hooks/info */
+ int phy_cnt; /* MII device addresses. */
+ unsigned char phys[PHY_CNT]; /* MII device addresses. */
+ void __iomem *base;
+};
+
+
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int netdev_open(struct net_device *dev);
+static void check_duplex(struct net_device *dev);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t intr_handler(int irq, void *dev_instance);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int __netdev_rx(struct net_device *dev, int *quota);
+static int netdev_poll(struct napi_struct *napi, int budget);
+static void refill_rx_ring(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+static void netdev_media_change(struct net_device *dev);
+static const struct ethtool_ops ethtool_ops;
+
+
+#ifdef VLAN_SUPPORT
+static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ spin_lock(&np->lock);
+ if (debug > 1)
+ printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
+ set_bit(vid, np->active_vlans);
+ set_rx_mode(dev);
+ spin_unlock(&np->lock);
+}
+
+static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ spin_lock(&np->lock);
+ if (debug > 1)
+ printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
+ clear_bit(vid, np->active_vlans);
+ set_rx_mode(dev);
+ spin_unlock(&np->lock);
+}
+#endif /* VLAN_SUPPORT */
+
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = netdev_open,
+ .ndo_stop = netdev_close,
+ .ndo_start_xmit = start_tx,
+ .ndo_tx_timeout = tx_timeout,
+ .ndo_get_stats = get_stats,
+ .ndo_set_rx_mode = set_rx_mode,
+ .ndo_do_ioctl = netdev_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef VLAN_SUPPORT
+ .ndo_vlan_rx_add_vid = netdev_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = netdev_vlan_rx_kill_vid,
+#endif
+};
+
+static int __devinit starfire_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct netdev_private *np;
+ int i, irq, option, chip_idx = ent->driver_data;
+ struct net_device *dev;
+ static int card_idx = -1;
+ long ioaddr;
+ void __iomem *base;
+ int drv_flags, io_size;
+ int boguscnt;
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(version);
+#endif
+
+ card_idx++;
+
+ if (pci_enable_device (pdev))
+ return -EIO;
+
+ ioaddr = pci_resource_start(pdev, 0);
+ io_size = pci_resource_len(pdev, 0);
+ if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
+ printk(KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx);
+ return -ENODEV;
+ }
+
+ dev = alloc_etherdev(sizeof(*np));
+ if (!dev) {
+ printk(KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx);
+ return -ENOMEM;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ irq = pdev->irq;
+
+ if (pci_request_regions (pdev, DRV_NAME)) {
+ printk(KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx);
+ goto err_out_free_netdev;
+ }
+
+ base = ioremap(ioaddr, io_size);
+ if (!base) {
+ printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n",
+ card_idx, io_size, ioaddr);
+ goto err_out_free_res;
+ }
+
+ pci_set_master(pdev);
+
+ /* enable MWI -- it vastly improves Rx performance on sparc64 */
+ pci_try_set_mwi(pdev);
+
+#ifdef ZEROCOPY
+ /* Starfire can do TCP/UDP checksumming */
+ if (enable_hw_cksum)
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+#endif /* ZEROCOPY */
+
+#ifdef VLAN_SUPPORT
+ dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+#endif /* VLAN_RX_KILL_VID */
+#ifdef ADDR_64BITS
+ dev->features |= NETIF_F_HIGHDMA;
+#endif /* ADDR_64BITS */
+
+ /* Serial EEPROM reads are hidden by the hardware. */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
+
+#if ! defined(final_version) /* Dump the EEPROM contents during development. */
+ if (debug > 4)
+ for (i = 0; i < 0x20; i++)
+ printk("%2.2x%s",
+ (unsigned int)readb(base + EEPROMCtrl + i),
+ i % 16 != 15 ? " " : "\n");
+#endif
+
+ /* Issue soft reset */
+ writel(MiiSoftReset, base + TxMode);
+ udelay(1000);
+ writel(0, base + TxMode);
+
+ /* Reset the chip to erase previous misconfiguration. */
+ writel(1, base + PCIDeviceConfig);
+ boguscnt = 1000;
+ while (--boguscnt > 0) {
+ udelay(10);
+ if ((readl(base + PCIDeviceConfig) & 1) == 0)
+ break;
+ }
+ if (boguscnt == 0)
+ printk("%s: chipset reset never completed!\n", dev->name);
+ /* wait a little longer */
+ udelay(1000);
+
+ dev->base_addr = (unsigned long)base;
+ dev->irq = irq;
+
+ np = netdev_priv(dev);
+ np->dev = dev;
+ np->base = base;
+ spin_lock_init(&np->lock);
+ pci_set_drvdata(pdev, dev);
+
+ np->pci_dev = pdev;
+
+ np->mii_if.dev = dev;
+ np->mii_if.mdio_read = mdio_read;
+ np->mii_if.mdio_write = mdio_write;
+ np->mii_if.phy_id_mask = 0x1f;
+ np->mii_if.reg_num_mask = 0x1f;
+
+ drv_flags = netdrv_tbl[chip_idx].drv_flags;
+
+ option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option & 0x200)
+ np->mii_if.full_duplex = 1;
+
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->mii_if.full_duplex = 1;
+
+ if (np->mii_if.full_duplex)
+ np->mii_if.force_media = 1;
+ else
+ np->mii_if.force_media = 0;
+ np->speed100 = 1;
+
+ /* timer resolution is 128 * 0.8us */
+ np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
+ Timer10X | EnableIntrMasking;
+
+ if (small_frames > 0) {
+ np->intr_timer_ctrl |= SmallFrameBypass;
+ switch (small_frames) {
+ case 1 ... 64:
+ np->intr_timer_ctrl |= SmallFrame64;
+ break;
+ case 65 ... 128:
+ np->intr_timer_ctrl |= SmallFrame128;
+ break;
+ case 129 ... 256:
+ np->intr_timer_ctrl |= SmallFrame256;
+ break;
+ default:
+ np->intr_timer_ctrl |= SmallFrame512;
+ if (small_frames > 512)
+ printk("Adjusting small_frames down to 512\n");
+ break;
+ }
+ }
+
+ dev->netdev_ops = &netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ SET_ETHTOOL_OPS(dev, &ethtool_ops);
+
+ netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
+
+ if (mtu)
+ dev->mtu = mtu;
+
+ if (register_netdev(dev))
+ goto err_out_cleardev;
+
+ printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
+ dev->name, netdrv_tbl[chip_idx].name, base,
+ dev->dev_addr, irq);
+
+ if (drv_flags & CanHaveMII) {
+ int phy, phy_idx = 0;
+ int mii_status;
+ for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
+ mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
+ mdelay(100);
+ boguscnt = 1000;
+ while (--boguscnt > 0)
+ if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
+ break;
+ if (boguscnt == 0) {
+ printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
+ continue;
+ }
+ mii_status = mdio_read(dev, phy, MII_BMSR);
+ if (mii_status != 0) {
+ np->phys[phy_idx++] = phy;
+ np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "%#4.4x advertising %#4.4x.\n",
+ dev->name, phy, mii_status, np->mii_if.advertising);
+ /* there can be only one PHY on-board */
+ break;
+ }
+ }
+ np->phy_cnt = phy_idx;
+ if (np->phy_cnt > 0)
+ np->mii_if.phy_id = np->phys[0];
+ else
+ memset(&np->mii_if, 0, sizeof(np->mii_if));
+ }
+
+ printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
+ dev->name, enable_hw_cksum ? "enabled" : "disabled");
+ return 0;
+
+err_out_cleardev:
+ pci_set_drvdata(pdev, NULL);
+ iounmap(base);
+err_out_free_res:
+ pci_release_regions (pdev);
+err_out_free_netdev:
+ free_netdev(dev);
+ return -ENODEV;
+}
+
+
+/* Read the MII Management Data I/O (MDIO) interfaces. */
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
+ int result, boguscnt=1000;
+ /* ??? Should we add a busy-wait here? */
+ do {
+ result = readl(mdio_addr);
+ } while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
+ if (boguscnt == 0)
+ return 0;
+ if ((result & 0xffff) == 0xffff)
+ return 0;
+ return result & 0xffff;
+}
+
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
+ writel(value, mdio_addr);
+ /* The busy-wait will occur before a read. */
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+ const struct firmware *fw_rx, *fw_tx;
+ const __be32 *fw_rx_data, *fw_tx_data;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int i, retval;
+ size_t tx_size, rx_size;
+ size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
+
+ /* Do we ever need to reset the chip??? */
+
+ retval = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
+ if (retval)
+ return retval;
+
+ /* Disable the Rx and Tx, and reset the chip. */
+ writel(0, ioaddr + GenCtrl);
+ writel(1, ioaddr + PCIDeviceConfig);
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ /* Allocate the various queues. */
+ if (!np->queue_mem) {
+ tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
+ rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
+ tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
+ rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
+ np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
+ np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
+ if (np->queue_mem == NULL) {
+ free_irq(dev->irq, dev);
+ return -ENOMEM;
+ }
+
+ np->tx_done_q = np->queue_mem;
+ np->tx_done_q_dma = np->queue_mem_dma;
+ np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size;
+ np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
+ np->tx_ring = (void *) np->rx_done_q + rx_done_q_size;
+ np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size;
+ np->rx_ring = (void *) np->tx_ring + tx_ring_size;
+ np->rx_ring_dma = np->tx_ring_dma + tx_ring_size;
+ }
+
+ /* Start with no carrier, it gets adjusted later */
+ netif_carrier_off(dev);
+ init_ring(dev);
+ /* Set the size of the Rx buffers. */
+ writel((np->rx_buf_sz << RxBufferLenShift) |
+ (0 << RxMinDescrThreshShift) |
+ RxPrefetchMode | RxVariableQ |
+ RX_Q_ENTRIES |
+ RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
+ RxDescSpace4,
+ ioaddr + RxDescQCtrl);
+
+ /* Set up the Rx DMA controller. */
+ writel(RxChecksumIgnore |
+ (0 << RxEarlyIntThreshShift) |
+ (6 << RxHighPrioThreshShift) |
+ ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
+ ioaddr + RxDMACtrl);
+
+ /* Set Tx descriptor */
+ writel((2 << TxHiPriFIFOThreshShift) |
+ (0 << TxPadLenShift) |
+ ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
+ TX_DESC_Q_ADDR_SIZE |
+ TX_DESC_SPACING | TX_DESC_TYPE,
+ ioaddr + TxDescCtrl);
+
+ writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
+ writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
+ writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
+ writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
+ writel(np->tx_ring_dma, ioaddr + TxRingPtr);
+
+ writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
+ writel(np->rx_done_q_dma |
+ RxComplType |
+ (0 << RxComplThreshShift),
+ ioaddr + RxCompletionAddr);
+
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
+
+ /* Fill both the Tx SA register and the Rx perfect filter. */
+ for (i = 0; i < 6; i++)
+ writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
+ /* The first entry is special because it bypasses the VLAN filter.
+ Don't use it. */
+ writew(0, ioaddr + PerfFilterTable);
+ writew(0, ioaddr + PerfFilterTable + 4);
+ writew(0, ioaddr + PerfFilterTable + 8);
+ for (i = 1; i < 16; i++) {
+ __be16 *eaddrs = (__be16 *)dev->dev_addr;
+ void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
+ writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
+ writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
+ writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8;
+ }
+
+ /* Initialize other registers. */
+ /* Configure the PCI bus bursts and FIFO thresholds. */
+ np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable; /* modified when link is up. */
+ writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
+ udelay(1000);
+ writel(np->tx_mode, ioaddr + TxMode);
+ np->tx_threshold = 4;
+ writel(np->tx_threshold, ioaddr + TxThreshold);
+
+ writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
+
+ napi_enable(&np->napi);
+
+ netif_start_queue(dev);
+
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
+ set_rx_mode(dev);
+
+ np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
+ check_duplex(dev);
+
+ /* Enable GPIO interrupts on link change */
+ writel(0x0f00ff00, ioaddr + GPIOCtrl);
+
+ /* Set the interrupt mask */
+ writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
+ IntrTxDMADone | IntrStatsMax | IntrLinkChange |
+ IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
+ ioaddr + IntrEnable);
+ /* Enable PCI interrupts. */
+ writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
+ ioaddr + PCIDeviceConfig);
+
+#ifdef VLAN_SUPPORT
+ /* Set VLAN type to 802.1q */
+ writel(ETH_P_8021Q, ioaddr + VlanType);
+#endif /* VLAN_SUPPORT */
+
+ retval = request_firmware(&fw_rx, FIRMWARE_RX, &np->pci_dev->dev);
+ if (retval) {
+ printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
+ FIRMWARE_RX);
+ goto out_init;
+ }
+ if (fw_rx->size % 4) {
+ printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
+ fw_rx->size, FIRMWARE_RX);
+ retval = -EINVAL;
+ goto out_rx;
+ }
+ retval = request_firmware(&fw_tx, FIRMWARE_TX, &np->pci_dev->dev);
+ if (retval) {
+ printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
+ FIRMWARE_TX);
+ goto out_rx;
+ }
+ if (fw_tx->size % 4) {
+ printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
+ fw_tx->size, FIRMWARE_TX);
+ retval = -EINVAL;
+ goto out_tx;
+ }
+ fw_rx_data = (const __be32 *)&fw_rx->data[0];
+ fw_tx_data = (const __be32 *)&fw_tx->data[0];
+ rx_size = fw_rx->size / 4;
+ tx_size = fw_tx->size / 4;
+
+ /* Load Rx/Tx firmware into the frame processors */
+ for (i = 0; i < rx_size; i++)
+ writel(be32_to_cpup(&fw_rx_data[i]), ioaddr + RxGfpMem + i * 4);
+ for (i = 0; i < tx_size; i++)
+ writel(be32_to_cpup(&fw_tx_data[i]), ioaddr + TxGfpMem + i * 4);
+ if (enable_hw_cksum)
+ /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
+ writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
+ else
+ /* Enable the Rx and Tx units only. */
+ writel(TxEnable|RxEnable, ioaddr + GenCtrl);
+
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: Done netdev_open().\n",
+ dev->name);
+
+out_tx:
+ release_firmware(fw_tx);
+out_rx:
+ release_firmware(fw_rx);
+out_init:
+ if (retval)
+ netdev_close(dev);
+ return retval;
+}
+
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ u16 reg0;
+ int silly_count = 1000;
+
+ mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
+ mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
+ udelay(500);
+ while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
+ /* do nothing */;
+ if (!silly_count) {
+ printk("%s: MII reset failed!\n", dev->name);
+ return;
+ }
+
+ reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
+
+ if (!np->mii_if.force_media) {
+ reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
+ } else {
+ reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
+ if (np->speed100)
+ reg0 |= BMCR_SPEED100;
+ if (np->mii_if.full_duplex)
+ reg0 |= BMCR_FULLDPLX;
+ printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
+ dev->name,
+ np->speed100 ? "100" : "10",
+ np->mii_if.full_duplex ? "full" : "half");
+ }
+ mdio_write(dev, np->phys[0], MII_BMCR, reg0);
+}
+
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int old_debug;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
+ "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
+
+ /* Perhaps we should reinitialize the hardware here. */
+
+ /*
+ * Stop and restart the interface.
+ * Cheat and increase the debug level temporarily.
+ */
+ old_debug = debug;
+ debug = 2;
+ netdev_close(dev);
+ netdev_open(dev);
+ debug = old_debug;
+
+ /* Trigger an immediate transmit demand. */
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ dev->stats.tx_errors++;
+ netif_wake_queue(dev);
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ np->cur_rx = np->cur_tx = np->reap_tx = 0;
+ np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
+
+ np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_info[i].skb = skb;
+ if (skb == NULL)
+ break;
+ np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ skb->dev = dev; /* Mark as being used by this device. */
+ /* Grrr, we cannot offset to correctly align the IP header. */
+ np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
+ }
+ writew(i - 1, np->base + RxDescQIdx);
+ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ /* Clear the remainder of the Rx buffer ring. */
+ for ( ; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].rxaddr = 0;
+ np->rx_info[i].skb = NULL;
+ np->rx_info[i].mapping = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
+
+ /* Clear the completion rings. */
+ for (i = 0; i < DONE_Q_SIZE; i++) {
+ np->rx_done_q[i].status = 0;
+ np->tx_done_q[i].status = 0;
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++)
+ memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
+}
+
+
+static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ unsigned int entry;
+ u32 status;
+ int i;
+
+ /*
+ * be cautious here, wrapping the queue has weird semantics
+ * and we may not have enough slots even when it seems we do.
+ */
+ if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
+ return NETDEV_TX_OK;
+ }
+#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
+
+ entry = np->cur_tx % TX_RING_SIZE;
+ for (i = 0; i < skb_num_frags(skb); i++) {
+ int wrap_ring = 0;
+ status = TxDescID;
+
+ if (i == 0) {
+ np->tx_info[entry].skb = skb;
+ status |= TxCRCEn;
+ if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
+ status |= TxRingWrap;
+ wrap_ring = 1;
+ }
+ if (np->reap_tx) {
+ status |= TxDescIntr;
+ np->reap_tx = 0;
+ }
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ status |= TxCalTCP;
+ dev->stats.tx_compressed++;
+ }
+ status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
+
+ np->tx_info[entry].mapping =
+ pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
+ } else {
+ skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
+ status |= this_frag->size;
+ np->tx_info[entry].mapping =
+ pci_map_single(np->pci_dev,
+ skb_frag_address(this_frag),
+ this_frag->size,
+ PCI_DMA_TODEVICE);
+ }
+
+ np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
+ np->tx_ring[entry].status = cpu_to_le32(status);
+ if (debug > 3)
+ printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
+ dev->name, np->cur_tx, np->dirty_tx,
+ entry, status);
+ if (wrap_ring) {
+ np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
+ np->cur_tx += np->tx_info[entry].used_slots;
+ entry = 0;
+ } else {
+ np->tx_info[entry].used_slots = 1;
+ np->cur_tx += np->tx_info[entry].used_slots;
+ entry++;
+ }
+ /* scavenge the tx descriptors twice per TX_RING_SIZE */
+ if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
+ np->reap_tx = 1;
+ }
+
+ /* Non-x86: explicitly flush descriptor cache lines here. */
+ /* Ensure all descriptors are written back before the transmit is
+ initiated. - Jes */
+ wmb();
+
+ /* Update the producer index. */
+ writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
+
+ /* 4 is arbitrary, but should be ok */
+ if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
+ netif_stop_queue(dev);
+
+ return NETDEV_TX_OK;
+}
+
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t intr_handler(int irq, void *dev_instance)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int boguscnt = max_interrupt_work;
+ int consumer;
+ int tx_status;
+ int handled = 0;
+
+ do {
+ u32 intr_status = readl(ioaddr + IntrClear);
+
+ if (debug > 4)
+ printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
+ dev->name, intr_status);
+
+ if (intr_status == 0 || intr_status == (u32) -1)
+ break;
+
+ handled = 1;
+
+ if (intr_status & (IntrRxDone | IntrRxEmpty)) {
+ u32 enable;
+
+ if (likely(napi_schedule_prep(&np->napi))) {
+ __napi_schedule(&np->napi);
+ enable = readl(ioaddr + IntrEnable);
+ enable &= ~(IntrRxDone | IntrRxEmpty);
+ writel(enable, ioaddr + IntrEnable);
+ /* flush PCI posting buffers */
+ readl(ioaddr + IntrEnable);
+ } else {
+ /* Paranoia check */
+ enable = readl(ioaddr + IntrEnable);
+ if (enable & (IntrRxDone | IntrRxEmpty)) {
+ printk(KERN_INFO
+ "%s: interrupt while in poll!\n",
+ dev->name);
+ enable &= ~(IntrRxDone | IntrRxEmpty);
+ writel(enable, ioaddr + IntrEnable);
+ }
+ }
+ }
+
+ /* Scavenge the skbuff list based on the Tx-done queue.
+ There are redundant checks here that may be cleaned up
+ after the driver has proven to be reliable. */
+ consumer = readl(ioaddr + TxConsumerIdx);
+ if (debug > 3)
+ printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
+ dev->name, consumer);
+
+ while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
+ if (debug > 3)
+ printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
+ dev->name, np->dirty_tx, np->tx_done, tx_status);
+ if ((tx_status & 0xe0000000) == 0xa0000000) {
+ dev->stats.tx_packets++;
+ } else if ((tx_status & 0xe0000000) == 0x80000000) {
+ u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
+ struct sk_buff *skb = np->tx_info[entry].skb;
+ np->tx_info[entry].skb = NULL;
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[entry].mapping,
+ skb_first_frag_len(skb),
+ PCI_DMA_TODEVICE);
+ np->tx_info[entry].mapping = 0;
+ np->dirty_tx += np->tx_info[entry].used_slots;
+ entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
+ {
+ int i;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[entry].mapping,
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_TODEVICE);
+ np->dirty_tx++;
+ entry++;
+ }
+ }
+
+ dev_kfree_skb_irq(skb);
+ }
+ np->tx_done_q[np->tx_done].status = 0;
+ np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
+ }
+ writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
+
+ if (netif_queue_stopped(dev) &&
+ (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
+ /* The ring is no longer full, wake the queue. */
+ netif_wake_queue(dev);
+ }
+
+ /* Stats overflow */
+ if (intr_status & IntrStatsMax)
+ get_stats(dev);
+
+ /* Media change interrupt. */
+ if (intr_status & IntrLinkChange)
+ netdev_media_change(dev);
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & IntrAbnormalSummary)
+ netdev_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ if (debug > 1)
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=%#8.8x.\n",
+ dev->name, intr_status);
+ break;
+ }
+ } while (1);
+
+ if (debug > 4)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
+ dev->name, (int) readl(ioaddr + IntrStatus));
+ return IRQ_RETVAL(handled);
+}
+
+
+/*
+ * This routine is logically part of the interrupt/poll handler, but separated
+ * for clarity and better register allocation.
+ */
+static int __netdev_rx(struct net_device *dev, int *quota)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ u32 desc_status;
+ int retcode = 0;
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
+ struct sk_buff *skb;
+ u16 pkt_len;
+ int entry;
+ rx_done_desc *desc = &np->rx_done_q[np->rx_done];
+
+ if (debug > 4)
+ printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
+ if (!(desc_status & RxOK)) {
+ /* There was an error. */
+ if (debug > 2)
+ printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status);
+ dev->stats.rx_errors++;
+ if (desc_status & RxFIFOErr)
+ dev->stats.rx_fifo_errors++;
+ goto next_rx;
+ }
+
+ if (*quota <= 0) { /* out of rx quota */
+ retcode = 1;
+ goto out;
+ }
+ (*quota)--;
+
+ pkt_len = desc_status; /* Implicitly Truncate */
+ entry = (desc_status >> 16) & 0x7ff;
+
+ if (debug > 4)
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak &&
+ (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ pci_dma_sync_single_for_cpu(np->pci_dev,
+ np->rx_info[entry].mapping,
+ pkt_len, PCI_DMA_FROMDEVICE);
+ skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
+ pci_dma_sync_single_for_device(np->pci_dev,
+ np->rx_info[entry].mapping,
+ pkt_len, PCI_DMA_FROMDEVICE);
+ skb_put(skb, pkt_len);
+ } else {
+ pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ skb = np->rx_info[entry].skb;
+ skb_put(skb, pkt_len);
+ np->rx_info[entry].skb = NULL;
+ np->rx_info[entry].mapping = 0;
+ }
+#ifndef final_version /* Remove after testing. */
+ /* You will want this info for the initial debug. */
+ if (debug > 5) {
+ printk(KERN_DEBUG " Rx data %pM %pM %2.2x%2.2x.\n",
+ skb->data, skb->data + 6,
+ skb->data[12], skb->data[13]);
+ }
+#endif
+
+ skb->protocol = eth_type_trans(skb, dev);
+#ifdef VLAN_SUPPORT
+ if (debug > 4)
+ printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
+#endif
+ if (le16_to_cpu(desc->status2) & 0x0100) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ dev->stats.rx_compressed++;
+ }
+ /*
+ * This feature doesn't seem to be working, at least
+ * with the two firmware versions I have. If the GFP sees
+ * an IP fragment, it either ignores it completely, or reports
+ * "bad checksum" on it.
+ *
+ * Maybe I missed something -- corrections are welcome.
+ * Until then, the printk stays. :-) -Ion
+ */
+ else if (le16_to_cpu(desc->status2) & 0x0040) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = le16_to_cpu(desc->csum);
+ printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
+ }
+#ifdef VLAN_SUPPORT
+ if (le16_to_cpu(desc->status2) & 0x0200) {
+ u16 vlid = le16_to_cpu(desc->vlanid);
+
+ if (debug > 4) {
+ printk(KERN_DEBUG " netdev_rx() vlanid = %d\n",
+ vlid);
+ }
+ __vlan_hwaccel_put_tag(skb, vlid);
+ }
+#endif /* VLAN_SUPPORT */
+ netif_receive_skb(skb);
+ dev->stats.rx_packets++;
+
+ next_rx:
+ np->cur_rx++;
+ desc->status = 0;
+ np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
+ }
+
+ if (*quota == 0) { /* out of rx quota */
+ retcode = 1;
+ goto out;
+ }
+ writew(np->rx_done, np->base + CompletionQConsumerIdx);
+
+ out:
+ refill_rx_ring(dev);
+ if (debug > 5)
+ printk(KERN_DEBUG " exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
+ retcode, np->rx_done, desc_status);
+ return retcode;
+}
+
+static int netdev_poll(struct napi_struct *napi, int budget)
+{
+ struct netdev_private *np = container_of(napi, struct netdev_private, napi);
+ struct net_device *dev = np->dev;
+ u32 intr_status;
+ void __iomem *ioaddr = np->base;
+ int quota = budget;
+
+ do {
+ writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
+
+ if (__netdev_rx(dev, &quota))
+ goto out;
+
+ intr_status = readl(ioaddr + IntrStatus);
+ } while (intr_status & (IntrRxDone | IntrRxEmpty));
+
+ napi_complete(napi);
+ intr_status = readl(ioaddr + IntrEnable);
+ intr_status |= IntrRxDone | IntrRxEmpty;
+ writel(intr_status, ioaddr + IntrEnable);
+
+ out:
+ if (debug > 5)
+ printk(KERN_DEBUG " exiting netdev_poll(): %d.\n",
+ budget - quota);
+
+ /* Restart Rx engine if stopped. */
+ return budget - quota;
+}
+
+static void refill_rx_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ struct sk_buff *skb;
+ int entry = -1;
+
+ /* Refill the Rx ring buffers. */
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_info[entry].skb == NULL) {
+ skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_info[entry].skb = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ np->rx_info[entry].mapping =
+ pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[entry].rxaddr =
+ cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
+ }
+ if (entry == RX_RING_SIZE - 1)
+ np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
+ }
+ if (entry >= 0)
+ writew(entry, np->base + RxDescQIdx);
+}
+
+
+static void netdev_media_change(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ u16 reg0, reg1, reg4, reg5;
+ u32 new_tx_mode;
+ u32 new_intr_timer_ctrl;
+
+ /* reset status first */
+ mdio_read(dev, np->phys[0], MII_BMCR);
+ mdio_read(dev, np->phys[0], MII_BMSR);
+
+ reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
+ reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
+
+ if (reg1 & BMSR_LSTATUS) {
+ /* link is up */
+ if (reg0 & BMCR_ANENABLE) {
+ /* autonegotiation is enabled */
+ reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
+ reg5 = mdio_read(dev, np->phys[0], MII_LPA);
+ if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
+ np->speed100 = 1;
+ np->mii_if.full_duplex = 1;
+ } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
+ np->speed100 = 1;
+ np->mii_if.full_duplex = 0;
+ } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
+ np->speed100 = 0;
+ np->mii_if.full_duplex = 1;
+ } else {
+ np->speed100 = 0;
+ np->mii_if.full_duplex = 0;
+ }
+ } else {
+ /* autonegotiation is disabled */
+ if (reg0 & BMCR_SPEED100)
+ np->speed100 = 1;
+ else
+ np->speed100 = 0;
+ if (reg0 & BMCR_FULLDPLX)
+ np->mii_if.full_duplex = 1;
+ else
+ np->mii_if.full_duplex = 0;
+ }
+ netif_carrier_on(dev);
+ printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
+ dev->name,
+ np->speed100 ? "100" : "10",
+ np->mii_if.full_duplex ? "full" : "half");
+
+ new_tx_mode = np->tx_mode & ~FullDuplex; /* duplex setting */
+ if (np->mii_if.full_duplex)
+ new_tx_mode |= FullDuplex;
+ if (np->tx_mode != new_tx_mode) {
+ np->tx_mode = new_tx_mode;
+ writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
+ udelay(1000);
+ writel(np->tx_mode, ioaddr + TxMode);
+ }
+
+ new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
+ if (np->speed100)
+ new_intr_timer_ctrl |= Timer10X;
+ if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
+ np->intr_timer_ctrl = new_intr_timer_ctrl;
+ writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
+ }
+ } else {
+ netif_carrier_off(dev);
+ printk(KERN_DEBUG "%s: Link is down\n", dev->name);
+ }
+}
+
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ /* Came close to underrunning the Tx FIFO, increase threshold. */
+ if (intr_status & IntrTxDataLow) {
+ if (np->tx_threshold <= PKT_BUF_SZ / 16) {
+ writel(++np->tx_threshold, np->base + TxThreshold);
+ printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
+ dev->name, np->tx_threshold * 16);
+ } else
+ printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
+ }
+ if (intr_status & IntrRxGFPDead) {
+ dev->stats.rx_fifo_errors++;
+ dev->stats.rx_errors++;
+ }
+ if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
+ dev->stats.tx_fifo_errors++;
+ dev->stats.tx_errors++;
+ }
+ if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
+ printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
+ dev->name, intr_status);
+}
+
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+
+ /* This adapter architecture needs no SMP locks. */
+ dev->stats.tx_bytes = readl(ioaddr + 0x57010);
+ dev->stats.rx_bytes = readl(ioaddr + 0x57044);
+ dev->stats.tx_packets = readl(ioaddr + 0x57000);
+ dev->stats.tx_aborted_errors =
+ readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
+ dev->stats.tx_window_errors = readl(ioaddr + 0x57018);
+ dev->stats.collisions =
+ readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
+
+ /* The chip only need report frame silently dropped. */
+ dev->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
+ writew(0, ioaddr + RxDMAStatus);
+ dev->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
+ dev->stats.rx_frame_errors = readl(ioaddr + 0x57040);
+ dev->stats.rx_length_errors = readl(ioaddr + 0x57058);
+ dev->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
+
+ return &dev->stats;
+}
+
+#ifdef VLAN_SUPPORT
+static u32 set_vlan_mode(struct netdev_private *np)
+{
+ u32 ret = VlanMode;
+ u16 vid;
+ void __iomem *filter_addr = np->base + HashTable + 8;
+ int vlan_count = 0;
+
+ for_each_set_bit(vid, np->active_vlans, VLAN_N_VID) {
+ if (vlan_count == 32)
+ break;
+ writew(vid, filter_addr);
+ filter_addr += 16;
+ vlan_count++;
+ }
+ if (vlan_count == 32) {
+ ret |= PerfectFilterVlan;
+ while (vlan_count < 32) {
+ writew(0, filter_addr);
+ filter_addr += 16;
+ vlan_count++;
+ }
+ }
+ return ret;
+}
+#endif /* VLAN_SUPPORT */
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ u32 rx_mode = MinVLANPrio;
+ struct netdev_hw_addr *ha;
+ int i;
+
+#ifdef VLAN_SUPPORT
+ rx_mode |= set_vlan_mode(np);
+#endif /* VLAN_SUPPORT */
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ rx_mode |= AcceptAll;
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
+ } else if (netdev_mc_count(dev) <= 14) {
+ /* Use the 16 element perfect filter, skip first two entries. */
+ void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
+ __be16 *eaddrs;
+ netdev_for_each_mc_addr(ha, dev) {
+ eaddrs = (__be16 *) ha->addr;
+ writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
+ writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
+ writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
+ }
+ eaddrs = (__be16 *)dev->dev_addr;
+ i = netdev_mc_count(dev) + 2;
+ while (i++ < 16) {
+ writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
+ writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
+ writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
+ }
+ rx_mode |= AcceptBroadcast|PerfectFilter;
+ } else {
+ /* Must use a multicast hash table. */
+ void __iomem *filter_addr;
+ __be16 *eaddrs;
+ __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ /* The chip uses the upper 9 CRC bits
+ as index into the hash table */
+ int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23;
+ __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
+
+ *fptr |= cpu_to_le32(1 << (bit_nr & 31));
+ }
+ /* Clear the perfect filter list, skip first two entries. */
+ filter_addr = ioaddr + PerfFilterTable + 2 * 16;
+ eaddrs = (__be16 *)dev->dev_addr;
+ for (i = 2; i < 16; i++) {
+ writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
+ writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
+ writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
+ }
+ for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
+ writew(mc_filter[i], filter_addr);
+ rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
+ }
+ writel(rx_mode, ioaddr + RxFilterMode);
+}
+
+static int check_if_running(struct net_device *dev)
+{
+ if (!netif_running(dev))
+ return -EINVAL;
+ return 0;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(np->pci_dev));
+}
+
+static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ spin_lock_irq(&np->lock);
+ mii_ethtool_gset(&np->mii_if, ecmd);
+ spin_unlock_irq(&np->lock);
+ return 0;
+}
+
+static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int res;
+ spin_lock_irq(&np->lock);
+ res = mii_ethtool_sset(&np->mii_if, ecmd);
+ spin_unlock_irq(&np->lock);
+ check_duplex(dev);
+ return res;
+}
+
+static int nway_reset(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_nway_restart(&np->mii_if);
+}
+
+static u32 get_link(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_link_ok(&np->mii_if);
+}
+
+static u32 get_msglevel(struct net_device *dev)
+{
+ return debug;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+ debug = val;
+}
+
+static const struct ethtool_ops ethtool_ops = {
+ .begin = check_if_running,
+ .get_drvinfo = get_drvinfo,
+ .get_settings = get_settings,
+ .set_settings = set_settings,
+ .nway_reset = nway_reset,
+ .get_link = get_link,
+ .get_msglevel = get_msglevel,
+ .set_msglevel = set_msglevel,
+};
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(rq);
+ int rc;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irq(&np->lock);
+ rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
+ spin_unlock_irq(&np->lock);
+
+ if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
+ check_duplex(dev);
+
+ return rc;
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int i;
+
+ netif_stop_queue(dev);
+
+ napi_disable(&np->napi);
+
+ if (debug > 1) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
+ dev->name, (int) readl(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx,
+ np->cur_rx, np->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ writel(0, ioaddr + IntrEnable);
+
+ /* Stop the chip's Tx and Rx processes. */
+ writel(0, ioaddr + GenCtrl);
+ readl(ioaddr + GenCtrl);
+
+ if (debug > 5) {
+ printk(KERN_DEBUG" Tx ring at %#llx:\n",
+ (long long) np->tx_ring_dma);
+ for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
+ printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
+ i, le32_to_cpu(np->tx_ring[i].status),
+ (long long) dma_to_cpu(np->tx_ring[i].addr),
+ le32_to_cpu(np->tx_done_q[i].status));
+ printk(KERN_DEBUG " Rx ring at %#llx -> %p:\n",
+ (long long) np->rx_ring_dma, np->rx_done_q);
+ if (np->rx_done_q)
+ for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
+ printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
+ i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
+ }
+ }
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
+ if (np->rx_info[i].skb != NULL) {
+ pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(np->rx_info[i].skb);
+ }
+ np->rx_info[i].skb = NULL;
+ np->rx_info[i].mapping = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ struct sk_buff *skb = np->tx_info[i].skb;
+ if (skb == NULL)
+ continue;
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[i].mapping,
+ skb_first_frag_len(skb), PCI_DMA_TODEVICE);
+ np->tx_info[i].mapping = 0;
+ dev_kfree_skb(skb);
+ np->tx_info[i].skb = NULL;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int starfire_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (netif_running(dev)) {
+ netif_device_detach(dev);
+ netdev_close(dev);
+ }
+
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev,state));
+
+ return 0;
+}
+
+static int starfire_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if (netif_running(dev)) {
+ netdev_open(dev);
+ netif_device_attach(dev);
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+
+static void __devexit starfire_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct netdev_private *np = netdev_priv(dev);
+
+ BUG_ON(!dev);
+
+ unregister_netdev(dev);
+
+ if (np->queue_mem)
+ pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma);
+
+
+ /* XXX: add wakeup code -- requires firmware for MagicPacket */
+ pci_set_power_state(pdev, PCI_D3hot); /* go to sleep in D3 mode */
+ pci_disable_device(pdev);
+
+ iounmap(np->base);
+ pci_release_regions(pdev);
+
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(dev); /* Will also free np!! */
+}
+
+
+static struct pci_driver starfire_driver = {
+ .name = DRV_NAME,
+ .probe = starfire_init_one,
+ .remove = __devexit_p(starfire_remove_one),
+#ifdef CONFIG_PM
+ .suspend = starfire_suspend,
+ .resume = starfire_resume,
+#endif /* CONFIG_PM */
+ .id_table = starfire_pci_tbl,
+};
+
+
+static int __init starfire_init (void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk(version);
+
+ printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
+#endif
+
+ BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(netdrv_addr_t));
+
+ return pci_register_driver(&starfire_driver);
+}
+
+
+static void __exit starfire_cleanup (void)
+{
+ pci_unregister_driver (&starfire_driver);
+}
+
+
+module_init(starfire_init);
+module_exit(starfire_cleanup);
+
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
new file mode 100644
index 000000000000..49a30d37ae4a
--- /dev/null
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -0,0 +1,69 @@
+#
+# Blackfin device configuration
+#
+
+config NET_BFIN
+ bool "Blackfin devices"
+ depends on BF516 || BF518 || BF526 || BF527 || BF536 || BF537
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y.
+ Make sure you know the name of your card. Read the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ If unsure, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the remaining Blackfin card questions. If you say Y, you will be
+ asked for your specific card in the following questions.
+
+if NET_BFIN
+
+config BFIN_MAC
+ tristate "Blackfin on-chip MAC support"
+ depends on (BF516 || BF518 || BF526 || BF527 || BF536 || BF537)
+ select CRC32
+ select NET_CORE
+ select MII
+ select PHYLIB
+ select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE
+ ---help---
+ This is the driver for Blackfin on-chip mac device. Say Y if you want
+ it compiled into the kernel. This driver is also available as a
+ module ( = code which can be inserted in and removed from the running
+ kernel whenever you want). The module will be called bfin_mac.
+
+config BFIN_MAC_USE_L1
+ bool "Use L1 memory for rx/tx packets"
+ depends on BFIN_MAC && (BF527 || BF537)
+ default y
+ ---help---
+ To get maximum network performance, you should use L1 memory as rx/tx
+ buffers. Say N here if you want to reserve L1 memory for other uses.
+
+config BFIN_TX_DESC_NUM
+ int "Number of transmit buffer packets"
+ depends on BFIN_MAC
+ range 6 10 if BFIN_MAC_USE_L1
+ range 10 100
+ default "10"
+ ---help---
+ Set the number of buffer packets used in driver.
+
+config BFIN_RX_DESC_NUM
+ int "Number of receive buffer packets"
+ depends on BFIN_MAC
+ range 20 100 if BFIN_MAC_USE_L1
+ range 20 800
+ default "20"
+ ---help---
+ Set the number of buffer packets used in driver.
+
+config BFIN_MAC_USE_HWSTAMP
+ bool "Use IEEE 1588 hwstamp"
+ depends on BFIN_MAC && BF518
+ default y
+ ---help---
+ To support the IEEE 1588 Precision Time Protocol (PTP), select y here
+
+endif # NET_BFIN
diff --git a/drivers/net/ethernet/adi/Makefile b/drivers/net/ethernet/adi/Makefile
new file mode 100644
index 000000000000..b1fbe195d0e8
--- /dev/null
+++ b/drivers/net/ethernet/adi/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Blackfin device drivers.
+#
+
+obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
new file mode 100644
index 000000000000..b6d69c91db96
--- /dev/null
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -0,0 +1,1767 @@
+/*
+ * Blackfin On-Chip MAC Driver
+ *
+ * Copyright 2004-2010 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#define DRV_VERSION "1.1"
+#define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/crc32.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/platform_device.h>
+
+#include <asm/dma.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/div64.h>
+#include <asm/dpmc.h>
+#include <asm/blackfin.h>
+#include <asm/cacheflush.h>
+#include <asm/portmux.h>
+#include <mach/pll.h>
+
+#include "bfin_mac.h"
+
+MODULE_AUTHOR("Bryan Wu, Luke Yang");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_ALIAS("platform:bfin_mac");
+
+#if defined(CONFIG_BFIN_MAC_USE_L1)
+# define bfin_mac_alloc(dma_handle, size, num) l1_data_sram_zalloc(size*num)
+# define bfin_mac_free(dma_handle, ptr, num) l1_data_sram_free(ptr)
+#else
+# define bfin_mac_alloc(dma_handle, size, num) \
+ dma_alloc_coherent(NULL, size*num, dma_handle, GFP_KERNEL)
+# define bfin_mac_free(dma_handle, ptr, num) \
+ dma_free_coherent(NULL, sizeof(*ptr)*num, ptr, dma_handle)
+#endif
+
+#define PKT_BUF_SZ 1580
+
+#define MAX_TIMEOUT_CNT 500
+
+/* pointers to maintain transmit list */
+static struct net_dma_desc_tx *tx_list_head;
+static struct net_dma_desc_tx *tx_list_tail;
+static struct net_dma_desc_rx *rx_list_head;
+static struct net_dma_desc_rx *rx_list_tail;
+static struct net_dma_desc_rx *current_rx_ptr;
+static struct net_dma_desc_tx *current_tx_ptr;
+static struct net_dma_desc_tx *tx_desc;
+static struct net_dma_desc_rx *rx_desc;
+
+static void desc_list_free(void)
+{
+ struct net_dma_desc_rx *r;
+ struct net_dma_desc_tx *t;
+ int i;
+#if !defined(CONFIG_BFIN_MAC_USE_L1)
+ dma_addr_t dma_handle = 0;
+#endif
+
+ if (tx_desc) {
+ t = tx_list_head;
+ for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
+ if (t) {
+ if (t->skb) {
+ dev_kfree_skb(t->skb);
+ t->skb = NULL;
+ }
+ t = t->next;
+ }
+ }
+ bfin_mac_free(dma_handle, tx_desc, CONFIG_BFIN_TX_DESC_NUM);
+ }
+
+ if (rx_desc) {
+ r = rx_list_head;
+ for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
+ if (r) {
+ if (r->skb) {
+ dev_kfree_skb(r->skb);
+ r->skb = NULL;
+ }
+ r = r->next;
+ }
+ }
+ bfin_mac_free(dma_handle, rx_desc, CONFIG_BFIN_RX_DESC_NUM);
+ }
+}
+
+static int desc_list_init(void)
+{
+ int i;
+ struct sk_buff *new_skb;
+#if !defined(CONFIG_BFIN_MAC_USE_L1)
+ /*
+ * This dma_handle is useless in Blackfin dma_alloc_coherent().
+ * The real dma handler is the return value of dma_alloc_coherent().
+ */
+ dma_addr_t dma_handle;
+#endif
+
+ tx_desc = bfin_mac_alloc(&dma_handle,
+ sizeof(struct net_dma_desc_tx),
+ CONFIG_BFIN_TX_DESC_NUM);
+ if (tx_desc == NULL)
+ goto init_error;
+
+ rx_desc = bfin_mac_alloc(&dma_handle,
+ sizeof(struct net_dma_desc_rx),
+ CONFIG_BFIN_RX_DESC_NUM);
+ if (rx_desc == NULL)
+ goto init_error;
+
+ /* init tx_list */
+ tx_list_head = tx_list_tail = tx_desc;
+
+ for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
+ struct net_dma_desc_tx *t = tx_desc + i;
+ struct dma_descriptor *a = &(t->desc_a);
+ struct dma_descriptor *b = &(t->desc_b);
+
+ /*
+ * disable DMA
+ * read from memory WNR = 0
+ * wordsize is 32 bits
+ * 6 half words is desc size
+ * large desc flow
+ */
+ a->config = WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
+ a->start_addr = (unsigned long)t->packet;
+ a->x_count = 0;
+ a->next_dma_desc = b;
+
+ /*
+ * enabled DMA
+ * write to memory WNR = 1
+ * wordsize is 32 bits
+ * disable interrupt
+ * 6 half words is desc size
+ * large desc flow
+ */
+ b->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
+ b->start_addr = (unsigned long)(&(t->status));
+ b->x_count = 0;
+
+ t->skb = NULL;
+ tx_list_tail->desc_b.next_dma_desc = a;
+ tx_list_tail->next = t;
+ tx_list_tail = t;
+ }
+ tx_list_tail->next = tx_list_head; /* tx_list is a circle */
+ tx_list_tail->desc_b.next_dma_desc = &(tx_list_head->desc_a);
+ current_tx_ptr = tx_list_head;
+
+ /* init rx_list */
+ rx_list_head = rx_list_tail = rx_desc;
+
+ for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
+ struct net_dma_desc_rx *r = rx_desc + i;
+ struct dma_descriptor *a = &(r->desc_a);
+ struct dma_descriptor *b = &(r->desc_b);
+
+ /* allocate a new skb for next time receive */
+ new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
+ if (!new_skb) {
+ pr_notice("init: low on mem - packet dropped\n");
+ goto init_error;
+ }
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ /* Invidate the data cache of skb->data range when it is write back
+ * cache. It will prevent overwritting the new data from DMA
+ */
+ blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
+ (unsigned long)new_skb->end);
+ r->skb = new_skb;
+
+ /*
+ * enabled DMA
+ * write to memory WNR = 1
+ * wordsize is 32 bits
+ * disable interrupt
+ * 6 half words is desc size
+ * large desc flow
+ */
+ a->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
+ /* since RXDWA is enabled */
+ a->start_addr = (unsigned long)new_skb->data - 2;
+ a->x_count = 0;
+ a->next_dma_desc = b;
+
+ /*
+ * enabled DMA
+ * write to memory WNR = 1
+ * wordsize is 32 bits
+ * enable interrupt
+ * 6 half words is desc size
+ * large desc flow
+ */
+ b->config = DMAEN | WNR | WDSIZE_32 | DI_EN |
+ NDSIZE_6 | DMAFLOW_LARGE;
+ b->start_addr = (unsigned long)(&(r->status));
+ b->x_count = 0;
+
+ rx_list_tail->desc_b.next_dma_desc = a;
+ rx_list_tail->next = r;
+ rx_list_tail = r;
+ }
+ rx_list_tail->next = rx_list_head; /* rx_list is a circle */
+ rx_list_tail->desc_b.next_dma_desc = &(rx_list_head->desc_a);
+ current_rx_ptr = rx_list_head;
+
+ return 0;
+
+init_error:
+ desc_list_free();
+ pr_err("kmalloc failed\n");
+ return -ENOMEM;
+}
+
+
+/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
+
+/*
+ * MII operations
+ */
+/* Wait until the previous MDC/MDIO transaction has completed */
+static int bfin_mdio_poll(void)
+{
+ int timeout_cnt = MAX_TIMEOUT_CNT;
+
+ /* poll the STABUSY bit */
+ while ((bfin_read_EMAC_STAADD()) & STABUSY) {
+ udelay(1);
+ if (timeout_cnt-- < 0) {
+ pr_err("wait MDC/MDIO transaction to complete timeout\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+/* Read an off-chip register in a PHY through the MDC/MDIO port */
+static int bfin_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ int ret;
+
+ ret = bfin_mdio_poll();
+ if (ret)
+ return ret;
+
+ /* read mode */
+ bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
+ SET_REGAD((u16) regnum) |
+ STABUSY);
+
+ ret = bfin_mdio_poll();
+ if (ret)
+ return ret;
+
+ return (int) bfin_read_EMAC_STADAT();
+}
+
+/* Write an off-chip register in a PHY through the MDC/MDIO port */
+static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
+ u16 value)
+{
+ int ret;
+
+ ret = bfin_mdio_poll();
+ if (ret)
+ return ret;
+
+ bfin_write_EMAC_STADAT((u32) value);
+
+ /* write mode */
+ bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
+ SET_REGAD((u16) regnum) |
+ STAOP |
+ STABUSY);
+
+ return bfin_mdio_poll();
+}
+
+static int bfin_mdiobus_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+static void bfin_mac_adjust_link(struct net_device *dev)
+{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+ struct phy_device *phydev = lp->phydev;
+ unsigned long flags;
+ int new_state = 0;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ if (phydev->link) {
+ /* Now we make sure that we can be in full duplex mode.
+ * If not, we operate in half-duplex mode. */
+ if (phydev->duplex != lp->old_duplex) {
+ u32 opmode = bfin_read_EMAC_OPMODE();
+ new_state = 1;
+
+ if (phydev->duplex)
+ opmode |= FDMODE;
+ else
+ opmode &= ~(FDMODE);
+
+ bfin_write_EMAC_OPMODE(opmode);
+ lp->old_duplex = phydev->duplex;
+ }
+
+ if (phydev->speed != lp->old_speed) {
+ if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
+ u32 opmode = bfin_read_EMAC_OPMODE();
+ switch (phydev->speed) {
+ case 10:
+ opmode |= RMII_10;
+ break;
+ case 100:
+ opmode &= ~RMII_10;
+ break;
+ default:
+ netdev_warn(dev,
+ "Ack! Speed (%d) is not 10/100!\n",
+ phydev->speed);
+ break;
+ }
+ bfin_write_EMAC_OPMODE(opmode);
+ }
+
+ new_state = 1;
+ lp->old_speed = phydev->speed;
+ }
+
+ if (!lp->old_link) {
+ new_state = 1;
+ lp->old_link = 1;
+ }
+ } else if (lp->old_link) {
+ new_state = 1;
+ lp->old_link = 0;
+ lp->old_speed = 0;
+ lp->old_duplex = -1;
+ }
+
+ if (new_state) {
+ u32 opmode = bfin_read_EMAC_OPMODE();
+ phy_print_status(phydev);
+ pr_debug("EMAC_OPMODE = 0x%08x\n", opmode);
+ }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+/* MDC = 2.5 MHz */
+#define MDC_CLK 2500000
+
+static int mii_probe(struct net_device *dev, int phy_mode)
+{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+ struct phy_device *phydev = NULL;
+ unsigned short sysctl;
+ int i;
+ u32 sclk, mdc_div;
+
+ /* Enable PHY output early */
+ if (!(bfin_read_VR_CTL() & CLKBUFOE))
+ bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE);
+
+ sclk = get_sclk();
+ mdc_div = ((sclk / MDC_CLK) / 2) - 1;
+
+ sysctl = bfin_read_EMAC_SYSCTL();
+ sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div);
+ bfin_write_EMAC_SYSCTL(sysctl);
+
+ /* search for connected PHY device */
+ for (i = 0; i < PHY_MAX_ADDR; ++i) {
+ struct phy_device *const tmp_phydev = lp->mii_bus->phy_map[i];
+
+ if (!tmp_phydev)
+ continue; /* no PHY here... */
+
+ phydev = tmp_phydev;
+ break; /* found it */
+ }
+
+ /* now we are supposed to have a proper phydev, to attach to... */
+ if (!phydev) {
+ netdev_err(dev, "no phy device found\n");
+ return -ENODEV;
+ }
+
+ if (phy_mode != PHY_INTERFACE_MODE_RMII &&
+ phy_mode != PHY_INTERFACE_MODE_MII) {
+ netdev_err(dev, "invalid phy interface mode\n");
+ return -EINVAL;
+ }
+
+ phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link,
+ 0, phy_mode);
+
+ if (IS_ERR(phydev)) {
+ netdev_err(dev, "could not attach PHY\n");
+ return PTR_ERR(phydev);
+ }
+
+ /* mask with MAC supported features */
+ phydev->supported &= (SUPPORTED_10baseT_Half
+ | SUPPORTED_10baseT_Full
+ | SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full
+ | SUPPORTED_Autoneg
+ | SUPPORTED_Pause | SUPPORTED_Asym_Pause
+ | SUPPORTED_MII
+ | SUPPORTED_TP);
+
+ phydev->advertising = phydev->supported;
+
+ lp->old_link = 0;
+ lp->old_speed = 0;
+ lp->old_duplex = -1;
+ lp->phydev = phydev;
+
+ pr_info("attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n",
+ phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
+ MDC_CLK, mdc_div, sclk/1000000);
+
+ return 0;
+}
+
+/*
+ * Ethtool support
+ */
+
+/*
+ * interrupt routine for magic packet wakeup
+ */
+static irqreturn_t bfin_mac_wake_interrupt(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
+static int
+bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+
+ if (lp->phydev)
+ return phy_ethtool_gset(lp->phydev, cmd);
+
+ return -EINVAL;
+}
+
+static int
+bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (lp->phydev)
+ return phy_ethtool_sset(lp->phydev, cmd);
+
+ return -EINVAL;
+}
+
+static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, KBUILD_MODNAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->fw_version, "N/A");
+ strcpy(info->bus_info, dev_name(&dev->dev));
+}
+
+static void bfin_mac_ethtool_getwol(struct net_device *dev,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+
+ wolinfo->supported = WAKE_MAGIC;
+ wolinfo->wolopts = lp->wol;
+}
+
+static int bfin_mac_ethtool_setwol(struct net_device *dev,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+ int rc;
+
+ if (wolinfo->wolopts & (WAKE_MAGICSECURE |
+ WAKE_UCAST |
+ WAKE_MCAST |
+ WAKE_BCAST |
+ WAKE_ARP))
+ return -EOPNOTSUPP;
+
+ lp->wol = wolinfo->wolopts;
+
+ if (lp->wol && !lp->irq_wake_requested) {
+ /* register wake irq handler */
+ rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt,
+ IRQF_DISABLED, "EMAC_WAKE", dev);
+ if (rc)
+ return rc;
+ lp->irq_wake_requested = true;
+ }
+
+ if (!lp->wol && lp->irq_wake_requested) {
+ free_irq(IRQ_MAC_WAKEDET, dev);
+ lp->irq_wake_requested = false;
+ }
+
+ /* Make sure the PHY driver doesn't suspend */
+ device_init_wakeup(&dev->dev, lp->wol);
+
+ return 0;
+}
+
+static const struct ethtool_ops bfin_mac_ethtool_ops = {
+ .get_settings = bfin_mac_ethtool_getsettings,
+ .set_settings = bfin_mac_ethtool_setsettings,
+ .get_link = ethtool_op_get_link,
+ .get_drvinfo = bfin_mac_ethtool_getdrvinfo,
+ .get_wol = bfin_mac_ethtool_getwol,
+ .set_wol = bfin_mac_ethtool_setwol,
+};
+
+/**************************************************************************/
+static void setup_system_regs(struct net_device *dev)
+{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+ int i;
+ unsigned short sysctl;
+
+ /*
+ * Odd word alignment for Receive Frame DMA word
+ * Configure checksum support and rcve frame word alignment
+ */
+ sysctl = bfin_read_EMAC_SYSCTL();
+ /*
+ * check if interrupt is requested for any PHY,
+ * enable PHY interrupt only if needed
+ */
+ for (i = 0; i < PHY_MAX_ADDR; ++i)
+ if (lp->mii_bus->irq[i] != PHY_POLL)
+ break;
+ if (i < PHY_MAX_ADDR)
+ sysctl |= PHYIE;
+ sysctl |= RXDWA;
+#if defined(BFIN_MAC_CSUM_OFFLOAD)
+ sysctl |= RXCKS;
+#else
+ sysctl &= ~RXCKS;
+#endif
+ bfin_write_EMAC_SYSCTL(sysctl);
+
+ bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
+
+ /* Set vlan regs to let 1522 bytes long packets pass through */
+ bfin_write_EMAC_VLAN1(lp->vlan1_mask);
+ bfin_write_EMAC_VLAN2(lp->vlan2_mask);
+
+ /* Initialize the TX DMA channel registers */
+ bfin_write_DMA2_X_COUNT(0);
+ bfin_write_DMA2_X_MODIFY(4);
+ bfin_write_DMA2_Y_COUNT(0);
+ bfin_write_DMA2_Y_MODIFY(0);
+
+ /* Initialize the RX DMA channel registers */
+ bfin_write_DMA1_X_COUNT(0);
+ bfin_write_DMA1_X_MODIFY(4);
+ bfin_write_DMA1_Y_COUNT(0);
+ bfin_write_DMA1_Y_MODIFY(0);
+}
+
+static void setup_mac_addr(u8 *mac_addr)
+{
+ u32 addr_low = le32_to_cpu(*(__le32 *) & mac_addr[0]);
+ u16 addr_hi = le16_to_cpu(*(__le16 *) & mac_addr[4]);
+
+ /* this depends on a little-endian machine */
+ bfin_write_EMAC_ADDRLO(addr_low);
+ bfin_write_EMAC_ADDRHI(addr_hi);
+}
+
+static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+ if (netif_running(dev))
+ return -EBUSY;
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ setup_mac_addr(dev->dev_addr);
+ return 0;
+}
+
+#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
+#define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE)
+
+static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config config;
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+ u16 ptpctl;
+ u32 ptpfv1, ptpfv2, ptpfv3, ptpfoff;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ pr_debug("%s config flag:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
+ __func__, config.flags, config.tx_type, config.rx_filter);
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ if ((config.tx_type != HWTSTAMP_TX_OFF) &&
+ (config.tx_type != HWTSTAMP_TX_ON))
+ return -ERANGE;
+
+ ptpctl = bfin_read_EMAC_PTP_CTL();
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ /*
+ * Dont allow any timestamping
+ */
+ ptpfv3 = 0xFFFFFFFF;
+ bfin_write_EMAC_PTP_FV3(ptpfv3);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ /*
+ * Clear the five comparison mask bits (bits[12:8]) in EMAC_PTP_CTL)
+ * to enable all the field matches.
+ */
+ ptpctl &= ~0x1F00;
+ bfin_write_EMAC_PTP_CTL(ptpctl);
+ /*
+ * Keep the default values of the EMAC_PTP_FOFF register.
+ */
+ ptpfoff = 0x4A24170C;
+ bfin_write_EMAC_PTP_FOFF(ptpfoff);
+ /*
+ * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
+ * registers.
+ */
+ ptpfv1 = 0x11040800;
+ bfin_write_EMAC_PTP_FV1(ptpfv1);
+ ptpfv2 = 0x0140013F;
+ bfin_write_EMAC_PTP_FV2(ptpfv2);
+ /*
+ * The default value (0xFFFC) allows the timestamping of both
+ * received Sync messages and Delay_Req messages.
+ */
+ ptpfv3 = 0xFFFFFFFC;
+ bfin_write_EMAC_PTP_FV3(ptpfv3);
+
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ /* Clear all five comparison mask bits (bits[12:8]) in the
+ * EMAC_PTP_CTL register to enable all the field matches.
+ */
+ ptpctl &= ~0x1F00;
+ bfin_write_EMAC_PTP_CTL(ptpctl);
+ /*
+ * Keep the default values of the EMAC_PTP_FOFF register, except set
+ * the PTPCOF field to 0x2A.
+ */
+ ptpfoff = 0x2A24170C;
+ bfin_write_EMAC_PTP_FOFF(ptpfoff);
+ /*
+ * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
+ * registers.
+ */
+ ptpfv1 = 0x11040800;
+ bfin_write_EMAC_PTP_FV1(ptpfv1);
+ ptpfv2 = 0x0140013F;
+ bfin_write_EMAC_PTP_FV2(ptpfv2);
+ /*
+ * To allow the timestamping of Pdelay_Req and Pdelay_Resp, set
+ * the value to 0xFFF0.
+ */
+ ptpfv3 = 0xFFFFFFF0;
+ bfin_write_EMAC_PTP_FV3(ptpfv3);
+
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ /*
+ * Clear bits 8 and 12 of the EMAC_PTP_CTL register to enable only the
+ * EFTM and PTPCM field comparison.
+ */
+ ptpctl &= ~0x1100;
+ bfin_write_EMAC_PTP_CTL(ptpctl);
+ /*
+ * Keep the default values of all the fields of the EMAC_PTP_FOFF
+ * register, except set the PTPCOF field to 0x0E.
+ */
+ ptpfoff = 0x0E24170C;
+ bfin_write_EMAC_PTP_FOFF(ptpfoff);
+ /*
+ * Program bits [15:0] of the EMAC_PTP_FV1 register to 0x88F7, which
+ * corresponds to PTP messages on the MAC layer.
+ */
+ ptpfv1 = 0x110488F7;
+ bfin_write_EMAC_PTP_FV1(ptpfv1);
+ ptpfv2 = 0x0140013F;
+ bfin_write_EMAC_PTP_FV2(ptpfv2);
+ /*
+ * To allow the timestamping of Pdelay_Req and Pdelay_Resp
+ * messages, set the value to 0xFFF0.
+ */
+ ptpfv3 = 0xFFFFFFF0;
+ bfin_write_EMAC_PTP_FV3(ptpfv3);
+
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (config.tx_type == HWTSTAMP_TX_OFF &&
+ bfin_mac_hwtstamp_is_none(config.rx_filter)) {
+ ptpctl &= ~PTP_EN;
+ bfin_write_EMAC_PTP_CTL(ptpctl);
+
+ SSYNC();
+ } else {
+ ptpctl |= PTP_EN;
+ bfin_write_EMAC_PTP_CTL(ptpctl);
+
+ /*
+ * clear any existing timestamp
+ */
+ bfin_read_EMAC_PTP_RXSNAPLO();
+ bfin_read_EMAC_PTP_RXSNAPHI();
+
+ bfin_read_EMAC_PTP_TXSNAPLO();
+ bfin_read_EMAC_PTP_TXSNAPHI();
+
+ /*
+ * Set registers so that rollover occurs soon to test this.
+ */
+ bfin_write_EMAC_PTP_TIMELO(0x00000000);
+ bfin_write_EMAC_PTP_TIMEHI(0xFF800000);
+
+ SSYNC();
+
+ lp->compare.last_update = 0;
+ timecounter_init(&lp->clock,
+ &lp->cycles,
+ ktime_to_ns(ktime_get_real()));
+ timecompare_update(&lp->compare, 0);
+ }
+
+ lp->stamp_cfg = config;
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static void bfin_dump_hwtamp(char *s, ktime_t *hw, ktime_t *ts, struct timecompare *cmp)
+{
+ ktime_t sys = ktime_get_real();
+
+ pr_debug("%s %s hardware:%d,%d transform system:%d,%d system:%d,%d, cmp:%lld, %lld\n",
+ __func__, s, hw->tv.sec, hw->tv.nsec, ts->tv.sec, ts->tv.nsec, sys.tv.sec,
+ sys.tv.nsec, cmp->offset, cmp->skew);
+}
+
+static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
+{
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ int timeout_cnt = MAX_TIMEOUT_CNT;
+
+ /* When doing time stamping, keep the connection to the socket
+ * a while longer
+ */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ /*
+ * The timestamping is done at the EMAC module's MII/RMII interface
+ * when the module sees the Start of Frame of an event message packet. This
+ * interface is the closest possible place to the physical Ethernet transmission
+ * medium, providing the best timing accuracy.
+ */
+ while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
+ udelay(1);
+ if (timeout_cnt == 0)
+ netdev_err(netdev, "timestamp the TX packet failed\n");
+ else {
+ struct skb_shared_hwtstamps shhwtstamps;
+ u64 ns;
+ u64 regval;
+
+ regval = bfin_read_EMAC_PTP_TXSNAPLO();
+ regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ ns = timecounter_cyc2time(&lp->clock,
+ regval);
+ timecompare_update(&lp->compare, ns);
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ shhwtstamps.syststamp =
+ timecompare_transform(&lp->compare, ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+
+ bfin_dump_hwtamp("TX", &shhwtstamps.hwtstamp, &shhwtstamps.syststamp, &lp->compare);
+ }
+ }
+}
+
+static void bfin_rx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
+{
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+ u32 valid;
+ u64 regval, ns;
+ struct skb_shared_hwtstamps *shhwtstamps;
+
+ if (bfin_mac_hwtstamp_is_none(lp->stamp_cfg.rx_filter))
+ return;
+
+ valid = bfin_read_EMAC_PTP_ISTAT() & RXEL;
+ if (!valid)
+ return;
+
+ shhwtstamps = skb_hwtstamps(skb);
+
+ regval = bfin_read_EMAC_PTP_RXSNAPLO();
+ regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32;
+ ns = timecounter_cyc2time(&lp->clock, regval);
+ timecompare_update(&lp->compare, ns);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ shhwtstamps->syststamp = timecompare_transform(&lp->compare, ns);
+
+ bfin_dump_hwtamp("RX", &shhwtstamps->hwtstamp, &shhwtstamps->syststamp, &lp->compare);
+}
+
+/*
+ * bfin_read_clock - read raw cycle counter (to be used by time counter)
+ */
+static cycle_t bfin_read_clock(const struct cyclecounter *tc)
+{
+ u64 stamp;
+
+ stamp = bfin_read_EMAC_PTP_TIMELO();
+ stamp |= (u64)bfin_read_EMAC_PTP_TIMEHI() << 32ULL;
+
+ return stamp;
+}
+
+#define PTP_CLK 25000000
+
+static void bfin_mac_hwtstamp_init(struct net_device *netdev)
+{
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+ u64 append;
+
+ /* Initialize hardware timer */
+ append = PTP_CLK * (1ULL << 32);
+ do_div(append, get_sclk());
+ bfin_write_EMAC_PTP_ADDEND((u32)append);
+
+ memset(&lp->cycles, 0, sizeof(lp->cycles));
+ lp->cycles.read = bfin_read_clock;
+ lp->cycles.mask = CLOCKSOURCE_MASK(64);
+ lp->cycles.mult = 1000000000 / PTP_CLK;
+ lp->cycles.shift = 0;
+
+ /* Synchronize our NIC clock against system wall clock */
+ memset(&lp->compare, 0, sizeof(lp->compare));
+ lp->compare.source = &lp->clock;
+ lp->compare.target = ktime_get_real;
+ lp->compare.num_samples = 10;
+
+ /* Initialize hwstamp config */
+ lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
+}
+
+#else
+# define bfin_mac_hwtstamp_is_none(cfg) 0
+# define bfin_mac_hwtstamp_init(dev)
+# define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP)
+# define bfin_rx_hwtstamp(dev, skb)
+# define bfin_tx_hwtstamp(dev, skb)
+#endif
+
+static inline void _tx_reclaim_skb(void)
+{
+ do {
+ tx_list_head->desc_a.config &= ~DMAEN;
+ tx_list_head->status.status_word = 0;
+ if (tx_list_head->skb) {
+ dev_kfree_skb(tx_list_head->skb);
+ tx_list_head->skb = NULL;
+ }
+ tx_list_head = tx_list_head->next;
+
+ } while (tx_list_head->status.status_word != 0);
+}
+
+static void tx_reclaim_skb(struct bfin_mac_local *lp)
+{
+ int timeout_cnt = MAX_TIMEOUT_CNT;
+
+ if (tx_list_head->status.status_word != 0)
+ _tx_reclaim_skb();
+
+ if (current_tx_ptr->next == tx_list_head) {
+ while (tx_list_head->status.status_word == 0) {
+ /* slow down polling to avoid too many queue stop. */
+ udelay(10);
+ /* reclaim skb if DMA is not running. */
+ if (!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN))
+ break;
+ if (timeout_cnt-- < 0)
+ break;
+ }
+
+ if (timeout_cnt >= 0)
+ _tx_reclaim_skb();
+ else
+ netif_stop_queue(lp->ndev);
+ }
+
+ if (current_tx_ptr->next != tx_list_head &&
+ netif_queue_stopped(lp->ndev))
+ netif_wake_queue(lp->ndev);
+
+ if (tx_list_head != current_tx_ptr) {
+ /* shorten the timer interval if tx queue is stopped */
+ if (netif_queue_stopped(lp->ndev))
+ lp->tx_reclaim_timer.expires =
+ jiffies + (TX_RECLAIM_JIFFIES >> 4);
+ else
+ lp->tx_reclaim_timer.expires =
+ jiffies + TX_RECLAIM_JIFFIES;
+
+ mod_timer(&lp->tx_reclaim_timer,
+ lp->tx_reclaim_timer.expires);
+ }
+
+ return;
+}
+
+static void tx_reclaim_skb_timeout(unsigned long lp)
+{
+ tx_reclaim_skb((struct bfin_mac_local *)lp);
+}
+
+static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+ u16 *data;
+ u32 data_align = (unsigned long)(skb->data) & 0x3;
+
+ current_tx_ptr->skb = skb;
+
+ if (data_align == 0x2) {
+ /* move skb->data to current_tx_ptr payload */
+ data = (u16 *)(skb->data) - 1;
+ *data = (u16)(skb->len);
+ /*
+ * When transmitting an Ethernet packet, the PTP_TSYNC module requires
+ * a DMA_Length_Word field associated with the packet. The lower 12 bits
+ * of this field are the length of the packet payload in bytes and the higher
+ * 4 bits are the timestamping enable field.
+ */
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ *data |= 0x1000;
+
+ current_tx_ptr->desc_a.start_addr = (u32)data;
+ /* this is important! */
+ blackfin_dcache_flush_range((u32)data,
+ (u32)((u8 *)data + skb->len + 4));
+ } else {
+ *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
+ /* enable timestamping for the sent packet */
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ *((u16 *)(current_tx_ptr->packet)) |= 0x1000;
+ memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
+ skb->len);
+ current_tx_ptr->desc_a.start_addr =
+ (u32)current_tx_ptr->packet;
+ blackfin_dcache_flush_range(
+ (u32)current_tx_ptr->packet,
+ (u32)(current_tx_ptr->packet + skb->len + 2));
+ }
+
+ /* make sure the internal data buffers in the core are drained
+ * so that the DMA descriptors are completely written when the
+ * DMA engine goes to fetch them below
+ */
+ SSYNC();
+
+ /* always clear status buffer before start tx dma */
+ current_tx_ptr->status.status_word = 0;
+
+ /* enable this packet's dma */
+ current_tx_ptr->desc_a.config |= DMAEN;
+
+ /* tx dma is running, just return */
+ if (bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)
+ goto out;
+
+ /* tx dma is not running */
+ bfin_write_DMA2_NEXT_DESC_PTR(&(current_tx_ptr->desc_a));
+ /* dma enabled, read from memory, size is 6 */
+ bfin_write_DMA2_CONFIG(current_tx_ptr->desc_a.config);
+ /* Turn on the EMAC tx */
+ bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
+
+out:
+ bfin_tx_hwtstamp(dev, skb);
+
+ current_tx_ptr = current_tx_ptr->next;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += (skb->len);
+
+ tx_reclaim_skb(lp);
+
+ return NETDEV_TX_OK;
+}
+
+#define IP_HEADER_OFF 0
+#define RX_ERROR_MASK (RX_LONG | RX_ALIGN | RX_CRC | RX_LEN | \
+ RX_FRAG | RX_ADDR | RX_DMAO | RX_PHY | RX_LATE | RX_RANGE)
+
+static void bfin_mac_rx(struct net_device *dev)
+{
+ struct sk_buff *skb, *new_skb;
+ unsigned short len;
+ struct bfin_mac_local *lp __maybe_unused = netdev_priv(dev);
+#if defined(BFIN_MAC_CSUM_OFFLOAD)
+ unsigned int i;
+ unsigned char fcs[ETH_FCS_LEN + 1];
+#endif
+
+ /* check if frame status word reports an error condition
+ * we which case we simply drop the packet
+ */
+ if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
+ netdev_notice(dev, "rx: receive error - packet dropped\n");
+ dev->stats.rx_dropped++;
+ goto out;
+ }
+
+ /* allocate a new skb for next time receive */
+ skb = current_rx_ptr->skb;
+
+ new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
+ if (!new_skb) {
+ netdev_notice(dev, "rx: low on mem - packet dropped\n");
+ dev->stats.rx_dropped++;
+ goto out;
+ }
+ /* reserve 2 bytes for RXDWA padding */
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ /* Invidate the data cache of skb->data range when it is write back
+ * cache. It will prevent overwritting the new data from DMA
+ */
+ blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
+ (unsigned long)new_skb->end);
+
+ current_rx_ptr->skb = new_skb;
+ current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
+
+ len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
+ /* Deduce Ethernet FCS length from Ethernet payload length */
+ len -= ETH_FCS_LEN;
+ skb_put(skb, len);
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ bfin_rx_hwtstamp(dev, skb);
+
+#if defined(BFIN_MAC_CSUM_OFFLOAD)
+ /* Checksum offloading only works for IPv4 packets with the standard IP header
+ * length of 20 bytes, because the blackfin MAC checksum calculation is
+ * based on that assumption. We must NOT use the calculated checksum if our
+ * IP version or header break that assumption.
+ */
+ if (skb->data[IP_HEADER_OFF] == 0x45) {
+ skb->csum = current_rx_ptr->status.ip_payload_csum;
+ /*
+ * Deduce Ethernet FCS from hardware generated IP payload checksum.
+ * IP checksum is based on 16-bit one's complement algorithm.
+ * To deduce a value from checksum is equal to add its inversion.
+ * If the IP payload len is odd, the inversed FCS should also
+ * begin from odd address and leave first byte zero.
+ */
+ if (skb->len % 2) {
+ fcs[0] = 0;
+ for (i = 0; i < ETH_FCS_LEN; i++)
+ fcs[i + 1] = ~skb->data[skb->len + i];
+ skb->csum = csum_partial(fcs, ETH_FCS_LEN + 1, skb->csum);
+ } else {
+ for (i = 0; i < ETH_FCS_LEN; i++)
+ fcs[i] = ~skb->data[skb->len + i];
+ skb->csum = csum_partial(fcs, ETH_FCS_LEN, skb->csum);
+ }
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+#endif
+
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+out:
+ current_rx_ptr->status.status_word = 0x00000000;
+ current_rx_ptr = current_rx_ptr->next;
+}
+
+/* interrupt routine to handle rx and error signal */
+static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ int number = 0;
+
+get_one_packet:
+ if (current_rx_ptr->status.status_word == 0) {
+ /* no more new packet received */
+ if (number == 0) {
+ if (current_rx_ptr->next->status.status_word != 0) {
+ current_rx_ptr = current_rx_ptr->next;
+ goto real_rx;
+ }
+ }
+ bfin_write_DMA1_IRQ_STATUS(bfin_read_DMA1_IRQ_STATUS() |
+ DMA_DONE | DMA_ERR);
+ return IRQ_HANDLED;
+ }
+
+real_rx:
+ bfin_mac_rx(dev);
+ number++;
+ goto get_one_packet;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bfin_mac_poll(struct net_device *dev)
+{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+
+ disable_irq(IRQ_MAC_RX);
+ bfin_mac_interrupt(IRQ_MAC_RX, dev);
+ tx_reclaim_skb(lp);
+ enable_irq(IRQ_MAC_RX);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+static void bfin_mac_disable(void)
+{
+ unsigned int opmode;
+
+ opmode = bfin_read_EMAC_OPMODE();
+ opmode &= (~RE);
+ opmode &= (~TE);
+ /* Turn off the EMAC */
+ bfin_write_EMAC_OPMODE(opmode);
+}
+
+/*
+ * Enable Interrupts, Receive, and Transmit
+ */
+static int bfin_mac_enable(struct phy_device *phydev)
+{
+ int ret;
+ u32 opmode;
+
+ pr_debug("%s\n", __func__);
+
+ /* Set RX DMA */
+ bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
+ bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config);
+
+ /* Wait MII done */
+ ret = bfin_mdio_poll();
+ if (ret)
+ return ret;
+
+ /* We enable only RX here */
+ /* ASTP : Enable Automatic Pad Stripping
+ PR : Promiscuous Mode for test
+ PSF : Receive frames with total length less than 64 bytes.
+ FDMODE : Full Duplex Mode
+ LB : Internal Loopback for test
+ RE : Receiver Enable */
+ opmode = bfin_read_EMAC_OPMODE();
+ if (opmode & FDMODE)
+ opmode |= PSF;
+ else
+ opmode |= DRO | DC | PSF;
+ opmode |= RE;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
+ opmode |= RMII; /* For Now only 100MBit are supported */
+#if defined(CONFIG_BF537) || defined(CONFIG_BF536)
+ if (__SILICON_REVISION__ < 3) {
+ /*
+ * This isn't publicly documented (fun times!), but in
+ * silicon <=0.2, the RX and TX pins are clocked together.
+ * So in order to recv, we must enable the transmit side
+ * as well. This will cause a spurious TX interrupt too,
+ * but we can easily consume that.
+ */
+ opmode |= TE;
+ }
+#endif
+ }
+
+ /* Turn on the EMAC rx */
+ bfin_write_EMAC_OPMODE(opmode);
+
+ return 0;
+}
+
+/* Our watchdog timed out. Called by the networking layer */
+static void bfin_mac_timeout(struct net_device *dev)
+{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+
+ pr_debug("%s: %s\n", dev->name, __func__);
+
+ bfin_mac_disable();
+
+ del_timer(&lp->tx_reclaim_timer);
+
+ /* reset tx queue and free skb */
+ while (tx_list_head != current_tx_ptr) {
+ tx_list_head->desc_a.config &= ~DMAEN;
+ tx_list_head->status.status_word = 0;
+ if (tx_list_head->skb) {
+ dev_kfree_skb(tx_list_head->skb);
+ tx_list_head->skb = NULL;
+ }
+ tx_list_head = tx_list_head->next;
+ }
+
+ if (netif_queue_stopped(lp->ndev))
+ netif_wake_queue(lp->ndev);
+
+ bfin_mac_enable(lp->phydev);
+
+ /* We can accept TX packets again */
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+
+static void bfin_mac_multicast_hash(struct net_device *dev)
+{
+ u32 emac_hashhi, emac_hashlo;
+ struct netdev_hw_addr *ha;
+ u32 crc;
+
+ emac_hashhi = emac_hashlo = 0;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc(ETH_ALEN, ha->addr);
+ crc >>= 26;
+
+ if (crc & 0x20)
+ emac_hashhi |= 1 << (crc & 0x1f);
+ else
+ emac_hashlo |= 1 << (crc & 0x1f);
+ }
+
+ bfin_write_EMAC_HASHHI(emac_hashhi);
+ bfin_write_EMAC_HASHLO(emac_hashlo);
+}
+
+/*
+ * This routine will, depending on the values passed to it,
+ * either make it accept multicast packets, go into
+ * promiscuous mode (for TCPDUMP and cousins) or accept
+ * a select set of multicast packets
+ */
+static void bfin_mac_set_multicast_list(struct net_device *dev)
+{
+ u32 sysctl;
+
+ if (dev->flags & IFF_PROMISC) {
+ netdev_info(dev, "set promisc mode\n");
+ sysctl = bfin_read_EMAC_OPMODE();
+ sysctl |= PR;
+ bfin_write_EMAC_OPMODE(sysctl);
+ } else if (dev->flags & IFF_ALLMULTI) {
+ /* accept all multicast */
+ sysctl = bfin_read_EMAC_OPMODE();
+ sysctl |= PAM;
+ bfin_write_EMAC_OPMODE(sysctl);
+ } else if (!netdev_mc_empty(dev)) {
+ /* set up multicast hash table */
+ sysctl = bfin_read_EMAC_OPMODE();
+ sysctl |= HM;
+ bfin_write_EMAC_OPMODE(sysctl);
+ bfin_mac_multicast_hash(dev);
+ } else {
+ /* clear promisc or multicast mode */
+ sysctl = bfin_read_EMAC_OPMODE();
+ sysctl &= ~(RAF | PAM);
+ bfin_write_EMAC_OPMODE(sysctl);
+ }
+}
+
+static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return bfin_mac_hwtstamp_ioctl(netdev, ifr, cmd);
+ default:
+ if (lp->phydev)
+ return phy_mii_ioctl(lp->phydev, ifr, cmd);
+ else
+ return -EOPNOTSUPP;
+ }
+}
+
+/*
+ * this puts the device in an inactive state
+ */
+static void bfin_mac_shutdown(struct net_device *dev)
+{
+ /* Turn off the EMAC */
+ bfin_write_EMAC_OPMODE(0x00000000);
+ /* Turn off the EMAC RX DMA */
+ bfin_write_DMA1_CONFIG(0x0000);
+ bfin_write_DMA2_CONFIG(0x0000);
+}
+
+/*
+ * Open and Initialize the interface
+ *
+ * Set up everything, reset the card, etc..
+ */
+static int bfin_mac_open(struct net_device *dev)
+{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+ int ret;
+ pr_debug("%s: %s\n", dev->name, __func__);
+
+ /*
+ * Check that the address is valid. If its not, refuse
+ * to bring the device up. The user must specify an
+ * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
+ */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ netdev_warn(dev, "no valid ethernet hw addr\n");
+ return -EINVAL;
+ }
+
+ /* initial rx and tx list */
+ ret = desc_list_init();
+ if (ret)
+ return ret;
+
+ phy_start(lp->phydev);
+ phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
+ setup_system_regs(dev);
+ setup_mac_addr(dev->dev_addr);
+
+ bfin_mac_disable();
+ ret = bfin_mac_enable(lp->phydev);
+ if (ret)
+ return ret;
+ pr_debug("hardware init finished\n");
+
+ netif_start_queue(dev);
+ netif_carrier_on(dev);
+
+ return 0;
+}
+
+/*
+ * this makes the board clean up everything that it can
+ * and not talk to the outside world. Caused by
+ * an 'ifconfig ethX down'
+ */
+static int bfin_mac_close(struct net_device *dev)
+{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+ pr_debug("%s: %s\n", dev->name, __func__);
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ phy_stop(lp->phydev);
+ phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN);
+
+ /* clear everything */
+ bfin_mac_shutdown(dev);
+
+ /* free the rx/tx buffers */
+ desc_list_free();
+
+ return 0;
+}
+
+static const struct net_device_ops bfin_mac_netdev_ops = {
+ .ndo_open = bfin_mac_open,
+ .ndo_stop = bfin_mac_close,
+ .ndo_start_xmit = bfin_mac_hard_start_xmit,
+ .ndo_set_mac_address = bfin_mac_set_mac_address,
+ .ndo_tx_timeout = bfin_mac_timeout,
+ .ndo_set_rx_mode = bfin_mac_set_multicast_list,
+ .ndo_do_ioctl = bfin_mac_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = bfin_mac_poll,
+#endif
+};
+
+static int __devinit bfin_mac_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev;
+ struct bfin_mac_local *lp;
+ struct platform_device *pd;
+ struct bfin_mii_bus_platform_data *mii_bus_data;
+ int rc;
+
+ ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
+ if (!ndev) {
+ dev_err(&pdev->dev, "Cannot allocate net device!\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ platform_set_drvdata(pdev, ndev);
+ lp = netdev_priv(ndev);
+ lp->ndev = ndev;
+
+ /* Grab the MAC address in the MAC */
+ *(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
+ *(__le16 *) (&(ndev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI());
+
+ /* probe mac */
+ /*todo: how to proble? which is revision_register */
+ bfin_write_EMAC_ADDRLO(0x12345678);
+ if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
+ dev_err(&pdev->dev, "Cannot detect Blackfin on-chip ethernet MAC controller!\n");
+ rc = -ENODEV;
+ goto out_err_probe_mac;
+ }
+
+
+ /*
+ * Is it valid? (Did bootloader initialize it?)
+ * Grab the MAC from the board somehow
+ * this is done in the arch/blackfin/mach-bfxxx/boards/eth_mac.c
+ */
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ bfin_get_ether_addr(ndev->dev_addr);
+
+ /* If still not valid, get a random one */
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ random_ether_addr(ndev->dev_addr);
+
+ setup_mac_addr(ndev->dev_addr);
+
+ if (!pdev->dev.platform_data) {
+ dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n");
+ rc = -ENODEV;
+ goto out_err_probe_mac;
+ }
+ pd = pdev->dev.platform_data;
+ lp->mii_bus = platform_get_drvdata(pd);
+ if (!lp->mii_bus) {
+ dev_err(&pdev->dev, "Cannot get mii_bus!\n");
+ rc = -ENODEV;
+ goto out_err_probe_mac;
+ }
+ lp->mii_bus->priv = ndev;
+ mii_bus_data = pd->dev.platform_data;
+
+ rc = mii_probe(ndev, mii_bus_data->phy_mode);
+ if (rc) {
+ dev_err(&pdev->dev, "MII Probe failed!\n");
+ goto out_err_mii_probe;
+ }
+
+ lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask;
+ lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask;
+
+ /* Fill in the fields of the device structure with ethernet values. */
+ ether_setup(ndev);
+
+ ndev->netdev_ops = &bfin_mac_netdev_ops;
+ ndev->ethtool_ops = &bfin_mac_ethtool_ops;
+
+ init_timer(&lp->tx_reclaim_timer);
+ lp->tx_reclaim_timer.data = (unsigned long)lp;
+ lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout;
+
+ spin_lock_init(&lp->lock);
+
+ /* now, enable interrupts */
+ /* register irq handler */
+ rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
+ IRQF_DISABLED, "EMAC_RX", ndev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
+ rc = -EBUSY;
+ goto out_err_request_irq;
+ }
+
+ rc = register_netdev(ndev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot register net device!\n");
+ goto out_err_reg_ndev;
+ }
+
+ bfin_mac_hwtstamp_init(ndev);
+
+ /* now, print out the card info, in a short format.. */
+ netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
+
+ return 0;
+
+out_err_reg_ndev:
+ free_irq(IRQ_MAC_RX, ndev);
+out_err_request_irq:
+out_err_mii_probe:
+ mdiobus_unregister(lp->mii_bus);
+ mdiobus_free(lp->mii_bus);
+out_err_probe_mac:
+ platform_set_drvdata(pdev, NULL);
+ free_netdev(ndev);
+
+ return rc;
+}
+
+static int __devexit bfin_mac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct bfin_mac_local *lp = netdev_priv(ndev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ lp->mii_bus->priv = NULL;
+
+ unregister_netdev(ndev);
+
+ free_irq(IRQ_MAC_RX, ndev);
+
+ free_netdev(ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ struct net_device *net_dev = platform_get_drvdata(pdev);
+ struct bfin_mac_local *lp = netdev_priv(net_dev);
+
+ if (lp->wol) {
+ bfin_write_EMAC_OPMODE((bfin_read_EMAC_OPMODE() & ~TE) | RE);
+ bfin_write_EMAC_WKUP_CTL(MPKE);
+ enable_irq_wake(IRQ_MAC_WAKEDET);
+ } else {
+ if (netif_running(net_dev))
+ bfin_mac_close(net_dev);
+ }
+
+ return 0;
+}
+
+static int bfin_mac_resume(struct platform_device *pdev)
+{
+ struct net_device *net_dev = platform_get_drvdata(pdev);
+ struct bfin_mac_local *lp = netdev_priv(net_dev);
+
+ if (lp->wol) {
+ bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
+ bfin_write_EMAC_WKUP_CTL(0);
+ disable_irq_wake(IRQ_MAC_WAKEDET);
+ } else {
+ if (netif_running(net_dev))
+ bfin_mac_open(net_dev);
+ }
+
+ return 0;
+}
+#else
+#define bfin_mac_suspend NULL
+#define bfin_mac_resume NULL
+#endif /* CONFIG_PM */
+
+static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
+{
+ struct mii_bus *miibus;
+ struct bfin_mii_bus_platform_data *mii_bus_pd;
+ const unsigned short *pin_req;
+ int rc, i;
+
+ mii_bus_pd = dev_get_platdata(&pdev->dev);
+ if (!mii_bus_pd) {
+ dev_err(&pdev->dev, "No peripherals in platform data!\n");
+ return -EINVAL;
+ }
+
+ /*
+ * We are setting up a network card,
+ * so set the GPIO pins to Ethernet mode
+ */
+ pin_req = mii_bus_pd->mac_peripherals;
+ rc = peripheral_request_list(pin_req, KBUILD_MODNAME);
+ if (rc) {
+ dev_err(&pdev->dev, "Requesting peripherals failed!\n");
+ return rc;
+ }
+
+ rc = -ENOMEM;
+ miibus = mdiobus_alloc();
+ if (miibus == NULL)
+ goto out_err_alloc;
+ miibus->read = bfin_mdiobus_read;
+ miibus->write = bfin_mdiobus_write;
+ miibus->reset = bfin_mdiobus_reset;
+
+ miibus->parent = &pdev->dev;
+ miibus->name = "bfin_mii_bus";
+ miibus->phy_mask = mii_bus_pd->phy_mask;
+
+ snprintf(miibus->id, MII_BUS_ID_SIZE, "0");
+ miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ if (!miibus->irq)
+ goto out_err_irq_alloc;
+
+ for (i = rc; i < PHY_MAX_ADDR; ++i)
+ miibus->irq[i] = PHY_POLL;
+
+ rc = clamp(mii_bus_pd->phydev_number, 0, PHY_MAX_ADDR);
+ if (rc != mii_bus_pd->phydev_number)
+ dev_err(&pdev->dev, "Invalid number (%i) of phydevs\n",
+ mii_bus_pd->phydev_number);
+ for (i = 0; i < rc; ++i) {
+ unsigned short phyaddr = mii_bus_pd->phydev_data[i].addr;
+ if (phyaddr < PHY_MAX_ADDR)
+ miibus->irq[phyaddr] = mii_bus_pd->phydev_data[i].irq;
+ else
+ dev_err(&pdev->dev,
+ "Invalid PHY address %i for phydev %i\n",
+ phyaddr, i);
+ }
+
+ rc = mdiobus_register(miibus);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
+ goto out_err_mdiobus_register;
+ }
+
+ platform_set_drvdata(pdev, miibus);
+ return 0;
+
+out_err_mdiobus_register:
+ kfree(miibus->irq);
+out_err_irq_alloc:
+ mdiobus_free(miibus);
+out_err_alloc:
+ peripheral_free_list(pin_req);
+
+ return rc;
+}
+
+static int __devexit bfin_mii_bus_remove(struct platform_device *pdev)
+{
+ struct mii_bus *miibus = platform_get_drvdata(pdev);
+ struct bfin_mii_bus_platform_data *mii_bus_pd =
+ dev_get_platdata(&pdev->dev);
+
+ platform_set_drvdata(pdev, NULL);
+ mdiobus_unregister(miibus);
+ kfree(miibus->irq);
+ mdiobus_free(miibus);
+ peripheral_free_list(mii_bus_pd->mac_peripherals);
+
+ return 0;
+}
+
+static struct platform_driver bfin_mii_bus_driver = {
+ .probe = bfin_mii_bus_probe,
+ .remove = __devexit_p(bfin_mii_bus_remove),
+ .driver = {
+ .name = "bfin_mii_bus",
+ .owner = THIS_MODULE,
+ },
+};
+
+static struct platform_driver bfin_mac_driver = {
+ .probe = bfin_mac_probe,
+ .remove = __devexit_p(bfin_mac_remove),
+ .resume = bfin_mac_resume,
+ .suspend = bfin_mac_suspend,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init bfin_mac_init(void)
+{
+ int ret;
+ ret = platform_driver_register(&bfin_mii_bus_driver);
+ if (!ret)
+ return platform_driver_register(&bfin_mac_driver);
+ return -ENODEV;
+}
+
+module_init(bfin_mac_init);
+
+static void __exit bfin_mac_cleanup(void)
+{
+ platform_driver_unregister(&bfin_mac_driver);
+ platform_driver_unregister(&bfin_mii_bus_driver);
+}
+
+module_exit(bfin_mac_cleanup);
+
diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h
new file mode 100644
index 000000000000..f8559ac9a403
--- /dev/null
+++ b/drivers/net/ethernet/adi/bfin_mac.h
@@ -0,0 +1,106 @@
+/*
+ * Blackfin On-Chip MAC Driver
+ *
+ * Copyright 2004-2007 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+#ifndef _BFIN_MAC_H_
+#define _BFIN_MAC_H_
+
+#include <linux/net_tstamp.h>
+#include <linux/clocksource.h>
+#include <linux/timecompare.h>
+#include <linux/timer.h>
+#include <linux/etherdevice.h>
+#include <linux/bfin_mac.h>
+
+/*
+ * Disable hardware checksum for bug #5600 if writeback cache is
+ * enabled. Otherwize, corrupted RX packet will be sent up stack
+ * without error mark.
+ */
+#ifndef CONFIG_BFIN_EXTMEM_WRITEBACK
+#define BFIN_MAC_CSUM_OFFLOAD
+#endif
+
+#define TX_RECLAIM_JIFFIES (HZ / 5)
+
+struct dma_descriptor {
+ struct dma_descriptor *next_dma_desc;
+ unsigned long start_addr;
+ unsigned short config;
+ unsigned short x_count;
+};
+
+struct status_area_rx {
+#if defined(BFIN_MAC_CSUM_OFFLOAD)
+ unsigned short ip_hdr_csum; /* ip header checksum */
+ /* ip payload(udp or tcp or others) checksum */
+ unsigned short ip_payload_csum;
+#endif
+ unsigned long status_word; /* the frame status word */
+};
+
+struct status_area_tx {
+ unsigned long status_word; /* the frame status word */
+};
+
+/* use two descriptors for a packet */
+struct net_dma_desc_rx {
+ struct net_dma_desc_rx *next;
+ struct sk_buff *skb;
+ struct dma_descriptor desc_a;
+ struct dma_descriptor desc_b;
+ struct status_area_rx status;
+};
+
+/* use two descriptors for a packet */
+struct net_dma_desc_tx {
+ struct net_dma_desc_tx *next;
+ struct sk_buff *skb;
+ struct dma_descriptor desc_a;
+ struct dma_descriptor desc_b;
+ unsigned char packet[1560];
+ struct status_area_tx status;
+};
+
+struct bfin_mac_local {
+ /*
+ * these are things that the kernel wants me to keep, so users
+ * can find out semi-useless statistics of how well the card is
+ * performing
+ */
+ struct net_device_stats stats;
+
+ spinlock_t lock;
+
+ int wol; /* Wake On Lan */
+ int irq_wake_requested;
+ struct timer_list tx_reclaim_timer;
+ struct net_device *ndev;
+
+ /* Data for EMAC_VLAN1 regs */
+ u16 vlan1_mask, vlan2_mask;
+
+ /* MII and PHY stuffs */
+ int old_link; /* used by bf537_adjust_link */
+ int old_speed;
+ int old_duplex;
+
+ struct phy_device *phydev;
+ struct mii_bus *mii_bus;
+
+#if defined(CONFIG_BFIN_MAC_USE_HWSTAMP)
+ struct cyclecounter cycles;
+ struct timecounter clock;
+ struct timecompare compare;
+ struct hwtstamp_config stamp_cfg;
+#endif
+};
+
+extern void bfin_get_ether_addr(char *addr);
+
+#endif
diff --git a/drivers/net/ethernet/aeroflex/Kconfig b/drivers/net/ethernet/aeroflex/Kconfig
new file mode 100644
index 000000000000..4f4a8d78fd54
--- /dev/null
+++ b/drivers/net/ethernet/aeroflex/Kconfig
@@ -0,0 +1,11 @@
+#
+# Aeroflex Gaisler network device configuration
+#
+
+config GRETH
+ tristate "Aeroflex Gaisler GRETH Ethernet MAC support"
+ depends on SPARC
+ select PHYLIB
+ select CRC32
+ ---help---
+ Say Y here if you want to use the Aeroflex Gaisler GRETH Ethernet MAC.
diff --git a/drivers/net/ethernet/aeroflex/Makefile b/drivers/net/ethernet/aeroflex/Makefile
new file mode 100644
index 000000000000..6e62a679282f
--- /dev/null
+++ b/drivers/net/ethernet/aeroflex/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Aeroflex Gaisler network device drivers.
+#
+
+obj-$(CONFIG_GRETH) += greth.o
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
new file mode 100644
index 000000000000..6715bf54f04e
--- /dev/null
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -0,0 +1,1641 @@
+/*
+ * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
+ *
+ * 2005-2010 (c) Aeroflex Gaisler AB
+ *
+ * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
+ * available in the GRLIB VHDL IP core library.
+ *
+ * Full documentation of both cores can be found here:
+ * http://www.gaisler.com/products/grlib/grip.pdf
+ *
+ * The Gigabit version supports scatter/gather DMA, any alignment of
+ * buffers and checksum offloading.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Contributors: Kristoffer Glembo
+ * Daniel Hellstrom
+ * Marko Isomaki
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/io.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <asm/cacheflush.h>
+#include <asm/byteorder.h>
+
+#ifdef CONFIG_SPARC
+#include <asm/idprom.h>
+#endif
+
+#include "greth.h"
+
+#define GRETH_DEF_MSG_ENABLE \
+ (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+static int greth_debug = -1; /* -1 == use GRETH_DEF_MSG_ENABLE as value */
+module_param(greth_debug, int, 0);
+MODULE_PARM_DESC(greth_debug, "GRETH bitmapped debugging message enable value");
+
+/* Accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
+static int macaddr[6];
+module_param_array(macaddr, int, NULL, 0);
+MODULE_PARM_DESC(macaddr, "GRETH Ethernet MAC address");
+
+static int greth_edcl = 1;
+module_param(greth_edcl, int, 0);
+MODULE_PARM_DESC(greth_edcl, "GRETH EDCL usage indicator. Set to 1 if EDCL is used.");
+
+static int greth_open(struct net_device *dev);
+static netdev_tx_t greth_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb,
+ struct net_device *dev);
+static int greth_rx(struct net_device *dev, int limit);
+static int greth_rx_gbit(struct net_device *dev, int limit);
+static void greth_clean_tx(struct net_device *dev);
+static void greth_clean_tx_gbit(struct net_device *dev);
+static irqreturn_t greth_interrupt(int irq, void *dev_id);
+static int greth_close(struct net_device *dev);
+static int greth_set_mac_add(struct net_device *dev, void *p);
+static void greth_set_multicast_list(struct net_device *dev);
+
+#define GRETH_REGLOAD(a) (be32_to_cpu(__raw_readl(&(a))))
+#define GRETH_REGSAVE(a, v) (__raw_writel(cpu_to_be32(v), &(a)))
+#define GRETH_REGORIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) | (v))))
+#define GRETH_REGANDIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) & (v))))
+
+#define NEXT_TX(N) (((N) + 1) & GRETH_TXBD_NUM_MASK)
+#define SKIP_TX(N, C) (((N) + C) & GRETH_TXBD_NUM_MASK)
+#define NEXT_RX(N) (((N) + 1) & GRETH_RXBD_NUM_MASK)
+
+static void greth_print_rx_packet(void *addr, int len)
+{
+ print_hex_dump(KERN_DEBUG, "RX: ", DUMP_PREFIX_OFFSET, 16, 1,
+ addr, len, true);
+}
+
+static void greth_print_tx_packet(struct sk_buff *skb)
+{
+ int i;
+ int length;
+
+ if (skb_shinfo(skb)->nr_frags == 0)
+ length = skb->len;
+ else
+ length = skb_headlen(skb);
+
+ print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, length, true);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+
+ print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb_frag_address(&skb_shinfo(skb)->frags[i]),
+ skb_shinfo(skb)->frags[i].size, true);
+ }
+}
+
+static inline void greth_enable_tx(struct greth_private *greth)
+{
+ wmb();
+ GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
+}
+
+static inline void greth_disable_tx(struct greth_private *greth)
+{
+ GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
+}
+
+static inline void greth_enable_rx(struct greth_private *greth)
+{
+ wmb();
+ GRETH_REGORIN(greth->regs->control, GRETH_RXEN);
+}
+
+static inline void greth_disable_rx(struct greth_private *greth)
+{
+ GRETH_REGANDIN(greth->regs->control, ~GRETH_RXEN);
+}
+
+static inline void greth_enable_irqs(struct greth_private *greth)
+{
+ GRETH_REGORIN(greth->regs->control, GRETH_RXI | GRETH_TXI);
+}
+
+static inline void greth_disable_irqs(struct greth_private *greth)
+{
+ GRETH_REGANDIN(greth->regs->control, ~(GRETH_RXI|GRETH_TXI));
+}
+
+static inline void greth_write_bd(u32 *bd, u32 val)
+{
+ __raw_writel(cpu_to_be32(val), bd);
+}
+
+static inline u32 greth_read_bd(u32 *bd)
+{
+ return be32_to_cpu(__raw_readl(bd));
+}
+
+static void greth_clean_rings(struct greth_private *greth)
+{
+ int i;
+ struct greth_bd *rx_bdp = greth->rx_bd_base;
+ struct greth_bd *tx_bdp = greth->tx_bd_base;
+
+ if (greth->gbit_mac) {
+
+ /* Free and unmap RX buffers */
+ for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
+ if (greth->rx_skbuff[i] != NULL) {
+ dev_kfree_skb(greth->rx_skbuff[i]);
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&rx_bdp->addr),
+ MAX_FRAME_SIZE+NET_IP_ALIGN,
+ DMA_FROM_DEVICE);
+ }
+ }
+
+ /* TX buffers */
+ while (greth->tx_free < GRETH_TXBD_NUM) {
+
+ struct sk_buff *skb = greth->tx_skbuff[greth->tx_last];
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ tx_bdp = greth->tx_bd_base + greth->tx_last;
+ greth->tx_last = NEXT_TX(greth->tx_last);
+
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&tx_bdp->addr),
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
+
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ tx_bdp = greth->tx_bd_base + greth->tx_last;
+
+ dma_unmap_page(greth->dev,
+ greth_read_bd(&tx_bdp->addr),
+ frag->size,
+ DMA_TO_DEVICE);
+
+ greth->tx_last = NEXT_TX(greth->tx_last);
+ }
+ greth->tx_free += nr_frags+1;
+ dev_kfree_skb(skb);
+ }
+
+
+ } else { /* 10/100 Mbps MAC */
+
+ for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
+ kfree(greth->rx_bufs[i]);
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&rx_bdp->addr),
+ MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ }
+ for (i = 0; i < GRETH_TXBD_NUM; i++, tx_bdp++) {
+ kfree(greth->tx_bufs[i]);
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&tx_bdp->addr),
+ MAX_FRAME_SIZE,
+ DMA_TO_DEVICE);
+ }
+ }
+}
+
+static int greth_init_rings(struct greth_private *greth)
+{
+ struct sk_buff *skb;
+ struct greth_bd *rx_bd, *tx_bd;
+ u32 dma_addr;
+ int i;
+
+ rx_bd = greth->rx_bd_base;
+ tx_bd = greth->tx_bd_base;
+
+ /* Initialize descriptor rings and buffers */
+ if (greth->gbit_mac) {
+
+ for (i = 0; i < GRETH_RXBD_NUM; i++) {
+ skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN);
+ if (skb == NULL) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Error allocating DMA ring.\n");
+ goto cleanup;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+ dma_addr = dma_map_single(greth->dev,
+ skb->data,
+ MAX_FRAME_SIZE+NET_IP_ALIGN,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(greth->dev, dma_addr)) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Could not create initial DMA mapping\n");
+ goto cleanup;
+ }
+ greth->rx_skbuff[i] = skb;
+ greth_write_bd(&rx_bd[i].addr, dma_addr);
+ greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
+ }
+
+ } else {
+
+ /* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */
+ for (i = 0; i < GRETH_RXBD_NUM; i++) {
+
+ greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
+
+ if (greth->rx_bufs[i] == NULL) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Error allocating DMA ring.\n");
+ goto cleanup;
+ }
+
+ dma_addr = dma_map_single(greth->dev,
+ greth->rx_bufs[i],
+ MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(greth->dev, dma_addr)) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Could not create initial DMA mapping\n");
+ goto cleanup;
+ }
+ greth_write_bd(&rx_bd[i].addr, dma_addr);
+ greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
+ }
+ for (i = 0; i < GRETH_TXBD_NUM; i++) {
+
+ greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
+
+ if (greth->tx_bufs[i] == NULL) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Error allocating DMA ring.\n");
+ goto cleanup;
+ }
+
+ dma_addr = dma_map_single(greth->dev,
+ greth->tx_bufs[i],
+ MAX_FRAME_SIZE,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(greth->dev, dma_addr)) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Could not create initial DMA mapping\n");
+ goto cleanup;
+ }
+ greth_write_bd(&tx_bd[i].addr, dma_addr);
+ greth_write_bd(&tx_bd[i].stat, 0);
+ }
+ }
+ greth_write_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat,
+ greth_read_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR);
+
+ /* Initialize pointers. */
+ greth->rx_cur = 0;
+ greth->tx_next = 0;
+ greth->tx_last = 0;
+ greth->tx_free = GRETH_TXBD_NUM;
+
+ /* Initialize descriptor base address */
+ GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys);
+ GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys);
+
+ return 0;
+
+cleanup:
+ greth_clean_rings(greth);
+ return -ENOMEM;
+}
+
+static int greth_open(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ int err;
+
+ err = greth_init_rings(greth);
+ if (err) {
+ if (netif_msg_ifup(greth))
+ dev_err(&dev->dev, "Could not allocate memory for DMA rings\n");
+ return err;
+ }
+
+ err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev);
+ if (err) {
+ if (netif_msg_ifup(greth))
+ dev_err(&dev->dev, "Could not allocate interrupt %d\n", dev->irq);
+ greth_clean_rings(greth);
+ return err;
+ }
+
+ if (netif_msg_ifup(greth))
+ dev_dbg(&dev->dev, " starting queue\n");
+ netif_start_queue(dev);
+
+ GRETH_REGSAVE(greth->regs->status, 0xFF);
+
+ napi_enable(&greth->napi);
+
+ greth_enable_irqs(greth);
+ greth_enable_tx(greth);
+ greth_enable_rx(greth);
+ return 0;
+
+}
+
+static int greth_close(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+
+ napi_disable(&greth->napi);
+
+ greth_disable_irqs(greth);
+ greth_disable_tx(greth);
+ greth_disable_rx(greth);
+
+ netif_stop_queue(dev);
+
+ free_irq(greth->irq, (void *) dev);
+
+ greth_clean_rings(greth);
+
+ return 0;
+}
+
+static netdev_tx_t
+greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct greth_bd *bdp;
+ int err = NETDEV_TX_OK;
+ u32 status, dma_addr, ctrl;
+ unsigned long flags;
+
+ /* Clean TX Ring */
+ greth_clean_tx(greth->netdev);
+
+ if (unlikely(greth->tx_free <= 0)) {
+ spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
+ ctrl = GRETH_REGLOAD(greth->regs->control);
+ /* Enable TX IRQ only if not already in poll() routine */
+ if (ctrl & GRETH_RXI)
+ GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&greth->devlock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (netif_msg_pktdata(greth))
+ greth_print_tx_packet(skb);
+
+
+ if (unlikely(skb->len > MAX_FRAME_SIZE)) {
+ dev->stats.tx_errors++;
+ goto out;
+ }
+
+ bdp = greth->tx_bd_base + greth->tx_next;
+ dma_addr = greth_read_bd(&bdp->addr);
+
+ memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
+
+ dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
+
+ status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
+ greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN;
+
+ /* Wrap around descriptor ring */
+ if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
+ status |= GRETH_BD_WR;
+ }
+
+ greth->tx_next = NEXT_TX(greth->tx_next);
+ greth->tx_free--;
+
+ /* Write descriptor control word and enable transmission */
+ greth_write_bd(&bdp->stat, status);
+ spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
+ greth_enable_tx(greth);
+ spin_unlock_irqrestore(&greth->devlock, flags);
+
+out:
+ dev_kfree_skb(skb);
+ return err;
+}
+
+
+static netdev_tx_t
+greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct greth_bd *bdp;
+ u32 status = 0, dma_addr, ctrl;
+ int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
+ unsigned long flags;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ /* Clean TX Ring */
+ greth_clean_tx_gbit(dev);
+
+ if (greth->tx_free < nr_frags + 1) {
+ spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
+ ctrl = GRETH_REGLOAD(greth->regs->control);
+ /* Enable TX IRQ only if not already in poll() routine */
+ if (ctrl & GRETH_RXI)
+ GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&greth->devlock, flags);
+ err = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ if (netif_msg_pktdata(greth))
+ greth_print_tx_packet(skb);
+
+ if (unlikely(skb->len > MAX_FRAME_SIZE)) {
+ dev->stats.tx_errors++;
+ goto out;
+ }
+
+ /* Save skb pointer. */
+ greth->tx_skbuff[greth->tx_next] = skb;
+
+ /* Linear buf */
+ if (nr_frags != 0)
+ status = GRETH_TXBD_MORE;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ status |= GRETH_TXBD_CSALL;
+ status |= skb_headlen(skb) & GRETH_BD_LEN;
+ if (greth->tx_next == GRETH_TXBD_NUM_MASK)
+ status |= GRETH_BD_WR;
+
+
+ bdp = greth->tx_bd_base + greth->tx_next;
+ greth_write_bd(&bdp->stat, status);
+ dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
+ goto map_error;
+
+ greth_write_bd(&bdp->addr, dma_addr);
+
+ curr_tx = NEXT_TX(greth->tx_next);
+
+ /* Frags */
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ greth->tx_skbuff[curr_tx] = NULL;
+ bdp = greth->tx_bd_base + curr_tx;
+
+ status = GRETH_BD_EN;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ status |= GRETH_TXBD_CSALL;
+ status |= frag->size & GRETH_BD_LEN;
+
+ /* Wrap around descriptor ring */
+ if (curr_tx == GRETH_TXBD_NUM_MASK)
+ status |= GRETH_BD_WR;
+
+ /* More fragments left */
+ if (i < nr_frags - 1)
+ status |= GRETH_TXBD_MORE;
+ else
+ status |= GRETH_BD_IE; /* enable IRQ on last fragment */
+
+ greth_write_bd(&bdp->stat, status);
+
+ dma_addr = skb_frag_dma_map(greth->dev, frag, 0, frag->size,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
+ goto frag_map_error;
+
+ greth_write_bd(&bdp->addr, dma_addr);
+
+ curr_tx = NEXT_TX(curr_tx);
+ }
+
+ wmb();
+
+ /* Enable the descriptor chain by enabling the first descriptor */
+ bdp = greth->tx_bd_base + greth->tx_next;
+ greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
+ greth->tx_next = curr_tx;
+ greth->tx_free -= nr_frags + 1;
+
+ wmb();
+
+ spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
+ greth_enable_tx(greth);
+ spin_unlock_irqrestore(&greth->devlock, flags);
+
+ return NETDEV_TX_OK;
+
+frag_map_error:
+ /* Unmap SKB mappings that succeeded and disable descriptor */
+ for (i = 0; greth->tx_next + i != curr_tx; i++) {
+ bdp = greth->tx_bd_base + greth->tx_next + i;
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&bdp->addr),
+ greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
+ DMA_TO_DEVICE);
+ greth_write_bd(&bdp->stat, 0);
+ }
+map_error:
+ if (net_ratelimit())
+ dev_warn(greth->dev, "Could not create TX DMA mapping\n");
+ dev_kfree_skb(skb);
+out:
+ return err;
+}
+
+static irqreturn_t greth_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct greth_private *greth;
+ u32 status, ctrl;
+ irqreturn_t retval = IRQ_NONE;
+
+ greth = netdev_priv(dev);
+
+ spin_lock(&greth->devlock);
+
+ /* Get the interrupt events that caused us to be here. */
+ status = GRETH_REGLOAD(greth->regs->status);
+
+ /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be
+ * set regardless of whether IRQ is enabled or not. Especially
+ * important when shared IRQ.
+ */
+ ctrl = GRETH_REGLOAD(greth->regs->control);
+
+ /* Handle rx and tx interrupts through poll */
+ if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
+ ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
+ retval = IRQ_HANDLED;
+
+ /* Disable interrupts and schedule poll() */
+ greth_disable_irqs(greth);
+ napi_schedule(&greth->napi);
+ }
+
+ mmiowb();
+ spin_unlock(&greth->devlock);
+
+ return retval;
+}
+
+static void greth_clean_tx(struct net_device *dev)
+{
+ struct greth_private *greth;
+ struct greth_bd *bdp;
+ u32 stat;
+
+ greth = netdev_priv(dev);
+
+ while (1) {
+ bdp = greth->tx_bd_base + greth->tx_last;
+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
+ mb();
+ stat = greth_read_bd(&bdp->stat);
+
+ if (unlikely(stat & GRETH_BD_EN))
+ break;
+
+ if (greth->tx_free == GRETH_TXBD_NUM)
+ break;
+
+ /* Check status for errors */
+ if (unlikely(stat & GRETH_TXBD_STATUS)) {
+ dev->stats.tx_errors++;
+ if (stat & GRETH_TXBD_ERR_AL)
+ dev->stats.tx_aborted_errors++;
+ if (stat & GRETH_TXBD_ERR_UE)
+ dev->stats.tx_fifo_errors++;
+ }
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last];
+ greth->tx_last = NEXT_TX(greth->tx_last);
+ greth->tx_free++;
+ }
+
+ if (greth->tx_free > 0) {
+ netif_wake_queue(dev);
+ }
+
+}
+
+static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
+{
+ /* Check status for errors */
+ if (unlikely(stat & GRETH_TXBD_STATUS)) {
+ dev->stats.tx_errors++;
+ if (stat & GRETH_TXBD_ERR_AL)
+ dev->stats.tx_aborted_errors++;
+ if (stat & GRETH_TXBD_ERR_UE)
+ dev->stats.tx_fifo_errors++;
+ if (stat & GRETH_TXBD_ERR_LC)
+ dev->stats.tx_aborted_errors++;
+ }
+ dev->stats.tx_packets++;
+}
+
+static void greth_clean_tx_gbit(struct net_device *dev)
+{
+ struct greth_private *greth;
+ struct greth_bd *bdp, *bdp_last_frag;
+ struct sk_buff *skb;
+ u32 stat;
+ int nr_frags, i;
+
+ greth = netdev_priv(dev);
+
+ while (greth->tx_free < GRETH_TXBD_NUM) {
+
+ skb = greth->tx_skbuff[greth->tx_last];
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ /* We only clean fully completed SKBs */
+ bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
+
+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
+ mb();
+ stat = greth_read_bd(&bdp_last_frag->stat);
+
+ if (stat & GRETH_BD_EN)
+ break;
+
+ greth->tx_skbuff[greth->tx_last] = NULL;
+
+ greth_update_tx_stats(dev, stat);
+ dev->stats.tx_bytes += skb->len;
+
+ bdp = greth->tx_bd_base + greth->tx_last;
+
+ greth->tx_last = NEXT_TX(greth->tx_last);
+
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&bdp->addr),
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
+
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ bdp = greth->tx_bd_base + greth->tx_last;
+
+ dma_unmap_page(greth->dev,
+ greth_read_bd(&bdp->addr),
+ frag->size,
+ DMA_TO_DEVICE);
+
+ greth->tx_last = NEXT_TX(greth->tx_last);
+ }
+ greth->tx_free += nr_frags+1;
+ dev_kfree_skb(skb);
+ }
+
+ if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
+ netif_wake_queue(dev);
+}
+
+static int greth_rx(struct net_device *dev, int limit)
+{
+ struct greth_private *greth;
+ struct greth_bd *bdp;
+ struct sk_buff *skb;
+ int pkt_len;
+ int bad, count;
+ u32 status, dma_addr;
+ unsigned long flags;
+
+ greth = netdev_priv(dev);
+
+ for (count = 0; count < limit; ++count) {
+
+ bdp = greth->rx_bd_base + greth->rx_cur;
+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
+ mb();
+ status = greth_read_bd(&bdp->stat);
+
+ if (unlikely(status & GRETH_BD_EN)) {
+ break;
+ }
+
+ dma_addr = greth_read_bd(&bdp->addr);
+ bad = 0;
+
+ /* Check status for errors. */
+ if (unlikely(status & GRETH_RXBD_STATUS)) {
+ if (status & GRETH_RXBD_ERR_FT) {
+ dev->stats.rx_length_errors++;
+ bad = 1;
+ }
+ if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) {
+ dev->stats.rx_frame_errors++;
+ bad = 1;
+ }
+ if (status & GRETH_RXBD_ERR_CRC) {
+ dev->stats.rx_crc_errors++;
+ bad = 1;
+ }
+ }
+ if (unlikely(bad)) {
+ dev->stats.rx_errors++;
+
+ } else {
+
+ pkt_len = status & GRETH_BD_LEN;
+
+ skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
+
+ if (unlikely(skb == NULL)) {
+
+ if (net_ratelimit())
+ dev_warn(&dev->dev, "low on memory - " "packet dropped\n");
+
+ dev->stats.rx_dropped++;
+
+ } else {
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb->dev = dev;
+
+ dma_sync_single_for_cpu(greth->dev,
+ dma_addr,
+ pkt_len,
+ DMA_FROM_DEVICE);
+
+ if (netif_msg_pktdata(greth))
+ greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);
+
+ memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ dev->stats.rx_bytes += pkt_len;
+ dev->stats.rx_packets++;
+ netif_receive_skb(skb);
+ }
+ }
+
+ status = GRETH_BD_EN | GRETH_BD_IE;
+ if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
+ status |= GRETH_BD_WR;
+ }
+
+ wmb();
+ greth_write_bd(&bdp->stat, status);
+
+ dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
+
+ spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
+ greth_enable_rx(greth);
+ spin_unlock_irqrestore(&greth->devlock, flags);
+
+ greth->rx_cur = NEXT_RX(greth->rx_cur);
+ }
+
+ return count;
+}
+
+static inline int hw_checksummed(u32 status)
+{
+
+ if (status & GRETH_RXBD_IP_FRAG)
+ return 0;
+
+ if (status & GRETH_RXBD_IP && status & GRETH_RXBD_IP_CSERR)
+ return 0;
+
+ if (status & GRETH_RXBD_UDP && status & GRETH_RXBD_UDP_CSERR)
+ return 0;
+
+ if (status & GRETH_RXBD_TCP && status & GRETH_RXBD_TCP_CSERR)
+ return 0;
+
+ return 1;
+}
+
+static int greth_rx_gbit(struct net_device *dev, int limit)
+{
+ struct greth_private *greth;
+ struct greth_bd *bdp;
+ struct sk_buff *skb, *newskb;
+ int pkt_len;
+ int bad, count = 0;
+ u32 status, dma_addr;
+ unsigned long flags;
+
+ greth = netdev_priv(dev);
+
+ for (count = 0; count < limit; ++count) {
+
+ bdp = greth->rx_bd_base + greth->rx_cur;
+ skb = greth->rx_skbuff[greth->rx_cur];
+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
+ mb();
+ status = greth_read_bd(&bdp->stat);
+ bad = 0;
+
+ if (status & GRETH_BD_EN)
+ break;
+
+ /* Check status for errors. */
+ if (unlikely(status & GRETH_RXBD_STATUS)) {
+
+ if (status & GRETH_RXBD_ERR_FT) {
+ dev->stats.rx_length_errors++;
+ bad = 1;
+ } else if (status &
+ (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) {
+ dev->stats.rx_frame_errors++;
+ bad = 1;
+ } else if (status & GRETH_RXBD_ERR_CRC) {
+ dev->stats.rx_crc_errors++;
+ bad = 1;
+ }
+ }
+
+ /* Allocate new skb to replace current, not needed if the
+ * current skb can be reused */
+ if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
+ skb_reserve(newskb, NET_IP_ALIGN);
+
+ dma_addr = dma_map_single(greth->dev,
+ newskb->data,
+ MAX_FRAME_SIZE + NET_IP_ALIGN,
+ DMA_FROM_DEVICE);
+
+ if (!dma_mapping_error(greth->dev, dma_addr)) {
+ /* Process the incoming frame. */
+ pkt_len = status & GRETH_BD_LEN;
+
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&bdp->addr),
+ MAX_FRAME_SIZE + NET_IP_ALIGN,
+ DMA_FROM_DEVICE);
+
+ if (netif_msg_pktdata(greth))
+ greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len);
+
+ skb_put(skb, pkt_len);
+
+ if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ netif_receive_skb(skb);
+
+ greth->rx_skbuff[greth->rx_cur] = newskb;
+ greth_write_bd(&bdp->addr, dma_addr);
+ } else {
+ if (net_ratelimit())
+ dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
+ dev_kfree_skb(newskb);
+ /* reusing current skb, so it is a drop */
+ dev->stats.rx_dropped++;
+ }
+ } else if (bad) {
+ /* Bad Frame transfer, the skb is reused */
+ dev->stats.rx_dropped++;
+ } else {
+ /* Failed Allocating a new skb. This is rather stupid
+ * but the current "filled" skb is reused, as if
+ * transfer failure. One could argue that RX descriptor
+ * table handling should be divided into cleaning and
+ * filling as the TX part of the driver
+ */
+ if (net_ratelimit())
+ dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
+ /* reusing current skb, so it is a drop */
+ dev->stats.rx_dropped++;
+ }
+
+ status = GRETH_BD_EN | GRETH_BD_IE;
+ if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
+ status |= GRETH_BD_WR;
+ }
+
+ wmb();
+ greth_write_bd(&bdp->stat, status);
+ spin_lock_irqsave(&greth->devlock, flags);
+ greth_enable_rx(greth);
+ spin_unlock_irqrestore(&greth->devlock, flags);
+ greth->rx_cur = NEXT_RX(greth->rx_cur);
+ }
+
+ return count;
+
+}
+
+static int greth_poll(struct napi_struct *napi, int budget)
+{
+ struct greth_private *greth;
+ int work_done = 0;
+ unsigned long flags;
+ u32 mask, ctrl;
+ greth = container_of(napi, struct greth_private, napi);
+
+restart_txrx_poll:
+ if (netif_queue_stopped(greth->netdev)) {
+ if (greth->gbit_mac)
+ greth_clean_tx_gbit(greth->netdev);
+ else
+ greth_clean_tx(greth->netdev);
+ }
+
+ if (greth->gbit_mac) {
+ work_done += greth_rx_gbit(greth->netdev, budget - work_done);
+ } else {
+ work_done += greth_rx(greth->netdev, budget - work_done);
+ }
+
+ if (work_done < budget) {
+
+ spin_lock_irqsave(&greth->devlock, flags);
+
+ ctrl = GRETH_REGLOAD(greth->regs->control);
+ if (netif_queue_stopped(greth->netdev)) {
+ GRETH_REGSAVE(greth->regs->control,
+ ctrl | GRETH_TXI | GRETH_RXI);
+ mask = GRETH_INT_RX | GRETH_INT_RE |
+ GRETH_INT_TX | GRETH_INT_TE;
+ } else {
+ GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
+ mask = GRETH_INT_RX | GRETH_INT_RE;
+ }
+
+ if (GRETH_REGLOAD(greth->regs->status) & mask) {
+ GRETH_REGSAVE(greth->regs->control, ctrl);
+ spin_unlock_irqrestore(&greth->devlock, flags);
+ goto restart_txrx_poll;
+ } else {
+ __napi_complete(napi);
+ spin_unlock_irqrestore(&greth->devlock, flags);
+ }
+ }
+
+ return work_done;
+}
+
+static int greth_set_mac_add(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct greth_private *greth;
+ struct greth_regs *regs;
+
+ greth = netdev_priv(dev);
+ regs = (struct greth_regs *) greth->regs;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
+ GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
+ dev->dev_addr[4] << 8 | dev->dev_addr[5]);
+
+ return 0;
+}
+
+static u32 greth_hash_get_index(__u8 *addr)
+{
+ return (ether_crc(6, addr)) & 0x3F;
+}
+
+static void greth_set_hash_filter(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ struct greth_private *greth = netdev_priv(dev);
+ struct greth_regs *regs = (struct greth_regs *) greth->regs;
+ u32 mc_filter[2];
+ unsigned int bitnr;
+
+ mc_filter[0] = mc_filter[1] = 0;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ bitnr = greth_hash_get_index(ha->addr);
+ mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
+ }
+
+ GRETH_REGSAVE(regs->hash_msb, mc_filter[1]);
+ GRETH_REGSAVE(regs->hash_lsb, mc_filter[0]);
+}
+
+static void greth_set_multicast_list(struct net_device *dev)
+{
+ int cfg;
+ struct greth_private *greth = netdev_priv(dev);
+ struct greth_regs *regs = (struct greth_regs *) greth->regs;
+
+ cfg = GRETH_REGLOAD(regs->control);
+ if (dev->flags & IFF_PROMISC)
+ cfg |= GRETH_CTRL_PR;
+ else
+ cfg &= ~GRETH_CTRL_PR;
+
+ if (greth->multicast) {
+ if (dev->flags & IFF_ALLMULTI) {
+ GRETH_REGSAVE(regs->hash_msb, -1);
+ GRETH_REGSAVE(regs->hash_lsb, -1);
+ cfg |= GRETH_CTRL_MCEN;
+ GRETH_REGSAVE(regs->control, cfg);
+ return;
+ }
+
+ if (netdev_mc_empty(dev)) {
+ cfg &= ~GRETH_CTRL_MCEN;
+ GRETH_REGSAVE(regs->control, cfg);
+ return;
+ }
+
+ /* Setup multicast filter */
+ greth_set_hash_filter(dev);
+ cfg |= GRETH_CTRL_MCEN;
+ }
+ GRETH_REGSAVE(regs->control, cfg);
+}
+
+static u32 greth_get_msglevel(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ return greth->msg_enable;
+}
+
+static void greth_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ greth->msg_enable = value;
+}
+static int greth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct phy_device *phy = greth->phy;
+
+ if (!phy)
+ return -ENODEV;
+
+ return phy_ethtool_gset(phy, cmd);
+}
+
+static int greth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct phy_device *phy = greth->phy;
+
+ if (!phy)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phy, cmd);
+}
+
+static int greth_get_regs_len(struct net_device *dev)
+{
+ return sizeof(struct greth_regs);
+}
+
+static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct greth_private *greth = netdev_priv(dev);
+
+ strncpy(info->driver, dev_driver_string(greth->dev), 32);
+ strncpy(info->version, "revision: 1.0", 32);
+ strncpy(info->bus_info, greth->dev->bus->name, 32);
+ strncpy(info->fw_version, "N/A", 32);
+ info->eedump_len = 0;
+ info->regdump_len = sizeof(struct greth_regs);
+}
+
+static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
+{
+ int i;
+ struct greth_private *greth = netdev_priv(dev);
+ u32 __iomem *greth_regs = (u32 __iomem *) greth->regs;
+ u32 *buff = p;
+
+ for (i = 0; i < sizeof(struct greth_regs) / sizeof(u32); i++)
+ buff[i] = greth_read_bd(&greth_regs[i]);
+}
+
+static const struct ethtool_ops greth_ethtool_ops = {
+ .get_msglevel = greth_get_msglevel,
+ .set_msglevel = greth_set_msglevel,
+ .get_settings = greth_get_settings,
+ .set_settings = greth_set_settings,
+ .get_drvinfo = greth_get_drvinfo,
+ .get_regs_len = greth_get_regs_len,
+ .get_regs = greth_get_regs,
+ .get_link = ethtool_op_get_link,
+};
+
+static struct net_device_ops greth_netdev_ops = {
+ .ndo_open = greth_open,
+ .ndo_stop = greth_close,
+ .ndo_start_xmit = greth_start_xmit,
+ .ndo_set_mac_address = greth_set_mac_add,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static inline int wait_for_mdio(struct greth_private *greth)
+{
+ unsigned long timeout = jiffies + 4*HZ/100;
+ while (GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_BUSY) {
+ if (time_after(jiffies, timeout))
+ return 0;
+ }
+ return 1;
+}
+
+static int greth_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct greth_private *greth = bus->priv;
+ int data;
+
+ if (!wait_for_mdio(greth))
+ return -EBUSY;
+
+ GRETH_REGSAVE(greth->regs->mdio, ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 2);
+
+ if (!wait_for_mdio(greth))
+ return -EBUSY;
+
+ if (!(GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_NVALID)) {
+ data = (GRETH_REGLOAD(greth->regs->mdio) >> 16) & 0xFFFF;
+ return data;
+
+ } else {
+ return -1;
+ }
+}
+
+static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct greth_private *greth = bus->priv;
+
+ if (!wait_for_mdio(greth))
+ return -EBUSY;
+
+ GRETH_REGSAVE(greth->regs->mdio,
+ ((val & 0xFFFF) << 16) | ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 1);
+
+ if (!wait_for_mdio(greth))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int greth_mdio_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+static void greth_link_change(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct phy_device *phydev = greth->phy;
+ unsigned long flags;
+ int status_change = 0;
+ u32 ctrl;
+
+ spin_lock_irqsave(&greth->devlock, flags);
+
+ if (phydev->link) {
+
+ if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
+ ctrl = GRETH_REGLOAD(greth->regs->control) &
+ ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB);
+
+ if (phydev->duplex)
+ ctrl |= GRETH_CTRL_FD;
+
+ if (phydev->speed == SPEED_100)
+ ctrl |= GRETH_CTRL_SP;
+ else if (phydev->speed == SPEED_1000)
+ ctrl |= GRETH_CTRL_GB;
+
+ GRETH_REGSAVE(greth->regs->control, ctrl);
+ greth->speed = phydev->speed;
+ greth->duplex = phydev->duplex;
+ status_change = 1;
+ }
+ }
+
+ if (phydev->link != greth->link) {
+ if (!phydev->link) {
+ greth->speed = 0;
+ greth->duplex = -1;
+ }
+ greth->link = phydev->link;
+
+ status_change = 1;
+ }
+
+ spin_unlock_irqrestore(&greth->devlock, flags);
+
+ if (status_change) {
+ if (phydev->link)
+ pr_debug("%s: link up (%d/%s)\n",
+ dev->name, phydev->speed,
+ DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
+ else
+ pr_debug("%s: link down\n", dev->name);
+ }
+}
+
+static int greth_mdio_probe(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct phy_device *phy = NULL;
+ int ret;
+
+ /* Find the first PHY */
+ phy = phy_find_first(greth->mdio);
+
+ if (!phy) {
+ if (netif_msg_probe(greth))
+ dev_err(&dev->dev, "no PHY found\n");
+ return -ENXIO;
+ }
+
+ ret = phy_connect_direct(dev, phy, &greth_link_change,
+ 0, greth->gbit_mac ?
+ PHY_INTERFACE_MODE_GMII :
+ PHY_INTERFACE_MODE_MII);
+ if (ret) {
+ if (netif_msg_ifup(greth))
+ dev_err(&dev->dev, "could not attach to PHY\n");
+ return ret;
+ }
+
+ if (greth->gbit_mac)
+ phy->supported &= PHY_GBIT_FEATURES;
+ else
+ phy->supported &= PHY_BASIC_FEATURES;
+
+ phy->advertising = phy->supported;
+
+ greth->link = 0;
+ greth->speed = 0;
+ greth->duplex = -1;
+ greth->phy = phy;
+
+ return 0;
+}
+
+static inline int phy_aneg_done(struct phy_device *phydev)
+{
+ int retval;
+
+ retval = phy_read(phydev, MII_BMSR);
+
+ return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
+}
+
+static int greth_mdio_init(struct greth_private *greth)
+{
+ int ret, phy;
+ unsigned long timeout;
+
+ greth->mdio = mdiobus_alloc();
+ if (!greth->mdio) {
+ return -ENOMEM;
+ }
+
+ greth->mdio->name = "greth-mdio";
+ snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq);
+ greth->mdio->read = greth_mdio_read;
+ greth->mdio->write = greth_mdio_write;
+ greth->mdio->reset = greth_mdio_reset;
+ greth->mdio->priv = greth;
+
+ greth->mdio->irq = greth->mdio_irqs;
+
+ for (phy = 0; phy < PHY_MAX_ADDR; phy++)
+ greth->mdio->irq[phy] = PHY_POLL;
+
+ ret = mdiobus_register(greth->mdio);
+ if (ret) {
+ goto error;
+ }
+
+ ret = greth_mdio_probe(greth->netdev);
+ if (ret) {
+ if (netif_msg_probe(greth))
+ dev_err(&greth->netdev->dev, "failed to probe MDIO bus\n");
+ goto unreg_mdio;
+ }
+
+ phy_start(greth->phy);
+
+ /* If Ethernet debug link is used make autoneg happen right away */
+ if (greth->edcl && greth_edcl == 1) {
+ phy_start_aneg(greth->phy);
+ timeout = jiffies + 6*HZ;
+ while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) {
+ }
+ genphy_read_status(greth->phy);
+ greth_link_change(greth->netdev);
+ }
+
+ return 0;
+
+unreg_mdio:
+ mdiobus_unregister(greth->mdio);
+error:
+ mdiobus_free(greth->mdio);
+ return ret;
+}
+
+/* Initialize the GRETH MAC */
+static int __devinit greth_of_probe(struct platform_device *ofdev)
+{
+ struct net_device *dev;
+ struct greth_private *greth;
+ struct greth_regs *regs;
+
+ int i;
+ int err;
+ int tmp;
+ unsigned long timeout;
+
+ dev = alloc_etherdev(sizeof(struct greth_private));
+
+ if (dev == NULL)
+ return -ENOMEM;
+
+ greth = netdev_priv(dev);
+ greth->netdev = dev;
+ greth->dev = &ofdev->dev;
+
+ if (greth_debug > 0)
+ greth->msg_enable = greth_debug;
+ else
+ greth->msg_enable = GRETH_DEF_MSG_ENABLE;
+
+ spin_lock_init(&greth->devlock);
+
+ greth->regs = of_ioremap(&ofdev->resource[0], 0,
+ resource_size(&ofdev->resource[0]),
+ "grlib-greth regs");
+
+ if (greth->regs == NULL) {
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "ioremap failure.\n");
+ err = -EIO;
+ goto error1;
+ }
+
+ regs = (struct greth_regs *) greth->regs;
+ greth->irq = ofdev->archdata.irqs[0];
+
+ dev_set_drvdata(greth->dev, dev);
+ SET_NETDEV_DEV(dev, greth->dev);
+
+ if (netif_msg_probe(greth))
+ dev_dbg(greth->dev, "reseting controller.\n");
+
+ /* Reset the controller. */
+ GRETH_REGSAVE(regs->control, GRETH_RESET);
+
+ /* Wait for MAC to reset itself */
+ timeout = jiffies + HZ/100;
+ while (GRETH_REGLOAD(regs->control) & GRETH_RESET) {
+ if (time_after(jiffies, timeout)) {
+ err = -EIO;
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "timeout when waiting for reset.\n");
+ goto error2;
+ }
+ }
+
+ /* Get default PHY address */
+ greth->phyaddr = (GRETH_REGLOAD(regs->mdio) >> 11) & 0x1F;
+
+ /* Check if we have GBIT capable MAC */
+ tmp = GRETH_REGLOAD(regs->control);
+ greth->gbit_mac = (tmp >> 27) & 1;
+
+ /* Check for multicast capability */
+ greth->multicast = (tmp >> 25) & 1;
+
+ greth->edcl = (tmp >> 31) & 1;
+
+ /* If we have EDCL we disable the EDCL speed-duplex FSM so
+ * it doesn't interfere with the software */
+ if (greth->edcl != 0)
+ GRETH_REGORIN(regs->control, GRETH_CTRL_DISDUPLEX);
+
+ /* Check if MAC can handle MDIO interrupts */
+ greth->mdio_int_en = (tmp >> 26) & 1;
+
+ err = greth_mdio_init(greth);
+ if (err) {
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "failed to register MDIO bus\n");
+ goto error2;
+ }
+
+ /* Allocate TX descriptor ring in coherent memory */
+ greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
+ 1024,
+ &greth->tx_bd_base_phys,
+ GFP_KERNEL);
+
+ if (!greth->tx_bd_base) {
+ if (netif_msg_probe(greth))
+ dev_err(&dev->dev, "could not allocate descriptor memory.\n");
+ err = -ENOMEM;
+ goto error3;
+ }
+
+ memset(greth->tx_bd_base, 0, 1024);
+
+ /* Allocate RX descriptor ring in coherent memory */
+ greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
+ 1024,
+ &greth->rx_bd_base_phys,
+ GFP_KERNEL);
+
+ if (!greth->rx_bd_base) {
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "could not allocate descriptor memory.\n");
+ err = -ENOMEM;
+ goto error4;
+ }
+
+ memset(greth->rx_bd_base, 0, 1024);
+
+ /* Get MAC address from: module param, OF property or ID prom */
+ for (i = 0; i < 6; i++) {
+ if (macaddr[i] != 0)
+ break;
+ }
+ if (i == 6) {
+ const unsigned char *addr;
+ int len;
+ addr = of_get_property(ofdev->dev.of_node, "local-mac-address",
+ &len);
+ if (addr != NULL && len == 6) {
+ for (i = 0; i < 6; i++)
+ macaddr[i] = (unsigned int) addr[i];
+ } else {
+#ifdef CONFIG_SPARC
+ for (i = 0; i < 6; i++)
+ macaddr[i] = (unsigned int) idprom->id_ethaddr[i];
+#endif
+ }
+ }
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = macaddr[i];
+
+ macaddr[5]++;
+
+ if (!is_valid_ether_addr(&dev->dev_addr[0])) {
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "no valid ethernet address, aborting.\n");
+ err = -EINVAL;
+ goto error5;
+ }
+
+ GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
+ GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
+ dev->dev_addr[4] << 8 | dev->dev_addr[5]);
+
+ /* Clear all pending interrupts except PHY irq */
+ GRETH_REGSAVE(regs->status, 0xFF);
+
+ if (greth->gbit_mac) {
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_RXCSUM;
+ dev->features = dev->hw_features | NETIF_F_HIGHDMA;
+ greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit;
+ }
+
+ if (greth->multicast) {
+ greth_netdev_ops.ndo_set_rx_mode = greth_set_multicast_list;
+ dev->flags |= IFF_MULTICAST;
+ } else {
+ dev->flags &= ~IFF_MULTICAST;
+ }
+
+ dev->netdev_ops = &greth_netdev_ops;
+ dev->ethtool_ops = &greth_ethtool_ops;
+
+ err = register_netdev(dev);
+ if (err) {
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "netdevice registration failed.\n");
+ goto error5;
+ }
+
+ /* setup NAPI */
+ netif_napi_add(dev, &greth->napi, greth_poll, 64);
+
+ return 0;
+
+error5:
+ dma_free_coherent(greth->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
+error4:
+ dma_free_coherent(greth->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
+error3:
+ mdiobus_unregister(greth->mdio);
+error2:
+ of_iounmap(&ofdev->resource[0], greth->regs, resource_size(&ofdev->resource[0]));
+error1:
+ free_netdev(dev);
+ return err;
+}
+
+static int __devexit greth_of_remove(struct platform_device *of_dev)
+{
+ struct net_device *ndev = dev_get_drvdata(&of_dev->dev);
+ struct greth_private *greth = netdev_priv(ndev);
+
+ /* Free descriptor areas */
+ dma_free_coherent(&of_dev->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
+
+ dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
+
+ dev_set_drvdata(&of_dev->dev, NULL);
+
+ if (greth->phy)
+ phy_stop(greth->phy);
+ mdiobus_unregister(greth->mdio);
+
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+
+ of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
+
+ return 0;
+}
+
+static struct of_device_id greth_of_match[] = {
+ {
+ .name = "GAISLER_ETHMAC",
+ },
+ {
+ .name = "01_01d",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, greth_of_match);
+
+static struct platform_driver greth_of_driver = {
+ .driver = {
+ .name = "grlib-greth",
+ .owner = THIS_MODULE,
+ .of_match_table = greth_of_match,
+ },
+ .probe = greth_of_probe,
+ .remove = __devexit_p(greth_of_remove),
+};
+
+static int __init greth_init(void)
+{
+ return platform_driver_register(&greth_of_driver);
+}
+
+static void __exit greth_cleanup(void)
+{
+ platform_driver_unregister(&greth_of_driver);
+}
+
+module_init(greth_init);
+module_exit(greth_cleanup);
+
+MODULE_AUTHOR("Aeroflex Gaisler AB.");
+MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/aeroflex/greth.h b/drivers/net/ethernet/aeroflex/greth.h
new file mode 100644
index 000000000000..232a622a85b7
--- /dev/null
+++ b/drivers/net/ethernet/aeroflex/greth.h
@@ -0,0 +1,142 @@
+#ifndef GRETH_H
+#define GRETH_H
+
+#include <linux/phy.h>
+
+/* Register bits and masks */
+#define GRETH_RESET 0x40
+#define GRETH_MII_BUSY 0x8
+#define GRETH_MII_NVALID 0x10
+
+#define GRETH_CTRL_FD 0x10
+#define GRETH_CTRL_PR 0x20
+#define GRETH_CTRL_SP 0x80
+#define GRETH_CTRL_GB 0x100
+#define GRETH_CTRL_PSTATIEN 0x400
+#define GRETH_CTRL_MCEN 0x800
+#define GRETH_CTRL_DISDUPLEX 0x1000
+#define GRETH_STATUS_PHYSTAT 0x100
+
+#define GRETH_BD_EN 0x800
+#define GRETH_BD_WR 0x1000
+#define GRETH_BD_IE 0x2000
+#define GRETH_BD_LEN 0x7FF
+
+#define GRETH_TXEN 0x1
+#define GRETH_INT_TE 0x2
+#define GRETH_INT_TX 0x8
+#define GRETH_TXI 0x4
+#define GRETH_TXBD_STATUS 0x0001C000
+#define GRETH_TXBD_MORE 0x20000
+#define GRETH_TXBD_IPCS 0x40000
+#define GRETH_TXBD_TCPCS 0x80000
+#define GRETH_TXBD_UDPCS 0x100000
+#define GRETH_TXBD_CSALL (GRETH_TXBD_IPCS | GRETH_TXBD_TCPCS | GRETH_TXBD_UDPCS)
+#define GRETH_TXBD_ERR_LC 0x10000
+#define GRETH_TXBD_ERR_UE 0x4000
+#define GRETH_TXBD_ERR_AL 0x8000
+
+#define GRETH_INT_RE 0x1
+#define GRETH_INT_RX 0x4
+#define GRETH_RXEN 0x2
+#define GRETH_RXI 0x8
+#define GRETH_RXBD_STATUS 0xFFFFC000
+#define GRETH_RXBD_ERR_AE 0x4000
+#define GRETH_RXBD_ERR_FT 0x8000
+#define GRETH_RXBD_ERR_CRC 0x10000
+#define GRETH_RXBD_ERR_OE 0x20000
+#define GRETH_RXBD_ERR_LE 0x40000
+#define GRETH_RXBD_IP 0x80000
+#define GRETH_RXBD_IP_CSERR 0x100000
+#define GRETH_RXBD_UDP 0x200000
+#define GRETH_RXBD_UDP_CSERR 0x400000
+#define GRETH_RXBD_TCP 0x800000
+#define GRETH_RXBD_TCP_CSERR 0x1000000
+#define GRETH_RXBD_IP_FRAG 0x2000000
+#define GRETH_RXBD_MCAST 0x4000000
+
+/* Descriptor parameters */
+#define GRETH_TXBD_NUM 128
+#define GRETH_TXBD_NUM_MASK (GRETH_TXBD_NUM-1)
+#define GRETH_TX_BUF_SIZE 2048
+#define GRETH_RXBD_NUM 128
+#define GRETH_RXBD_NUM_MASK (GRETH_RXBD_NUM-1)
+#define GRETH_RX_BUF_SIZE 2048
+
+/* Buffers per page */
+#define GRETH_RX_BUF_PPGAE (PAGE_SIZE/GRETH_RX_BUF_SIZE)
+#define GRETH_TX_BUF_PPGAE (PAGE_SIZE/GRETH_TX_BUF_SIZE)
+
+/* How many pages are needed for buffers */
+#define GRETH_RX_BUF_PAGE_NUM (GRETH_RXBD_NUM/GRETH_RX_BUF_PPGAE)
+#define GRETH_TX_BUF_PAGE_NUM (GRETH_TXBD_NUM/GRETH_TX_BUF_PPGAE)
+
+/* Buffer size.
+ * Gbit MAC uses tagged maximum frame size which is 1518 excluding CRC.
+ * Set to 1520 to make all buffers word aligned for non-gbit MAC.
+ */
+#define MAX_FRAME_SIZE 1520
+
+/* GRETH APB registers */
+struct greth_regs {
+ u32 control;
+ u32 status;
+ u32 esa_msb;
+ u32 esa_lsb;
+ u32 mdio;
+ u32 tx_desc_p;
+ u32 rx_desc_p;
+ u32 edclip;
+ u32 hash_msb;
+ u32 hash_lsb;
+};
+
+/* GRETH buffer descriptor */
+struct greth_bd {
+ u32 stat;
+ u32 addr;
+};
+
+struct greth_private {
+ struct sk_buff *rx_skbuff[GRETH_RXBD_NUM];
+ struct sk_buff *tx_skbuff[GRETH_TXBD_NUM];
+
+ unsigned char *tx_bufs[GRETH_TXBD_NUM];
+ unsigned char *rx_bufs[GRETH_RXBD_NUM];
+ u16 tx_bufs_length[GRETH_TXBD_NUM];
+
+ u16 tx_next;
+ u16 tx_last;
+ u16 tx_free;
+ u16 rx_cur;
+
+ struct greth_regs *regs; /* Address of controller registers. */
+ struct greth_bd *rx_bd_base; /* Address of Rx BDs. */
+ struct greth_bd *tx_bd_base; /* Address of Tx BDs. */
+ dma_addr_t rx_bd_base_phys;
+ dma_addr_t tx_bd_base_phys;
+
+ int irq;
+
+ struct device *dev; /* Pointer to platform_device->dev */
+ struct net_device *netdev;
+ struct napi_struct napi;
+ spinlock_t devlock;
+
+ struct phy_device *phy;
+ struct mii_bus *mdio;
+ int mdio_irqs[PHY_MAX_ADDR];
+ unsigned int link;
+ unsigned int speed;
+ unsigned int duplex;
+
+ u32 msg_enable;
+
+ u8 phyaddr;
+ u8 multicast;
+ u8 gbit_mac;
+ u8 mdio_int_en;
+ u8 edcl;
+};
+
+#endif
diff --git a/drivers/net/ethernet/alteon/Kconfig b/drivers/net/ethernet/alteon/Kconfig
new file mode 100644
index 000000000000..799a85282070
--- /dev/null
+++ b/drivers/net/ethernet/alteon/Kconfig
@@ -0,0 +1,48 @@
+#
+# Alteon network device configuration
+#
+
+config NET_VENDOR_ALTEON
+ bool "Alteon devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Alteon cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_ALTEON
+
+config ACENIC
+ tristate "Alteon AceNIC/3Com 3C985/NetGear GA620 Gigabit support"
+ depends on PCI
+ ---help---
+ Say Y here if you have an Alteon AceNIC, 3Com 3C985(B), NetGear
+ GA620, SGI Gigabit or Farallon PN9000-SX PCI Gigabit Ethernet
+ adapter. The driver allows for using the Jumbo Frame option (9000
+ bytes/frame) however it requires that your switches can handle this
+ as well. To enable Jumbo Frames, add `mtu 9000' to your ifconfig
+ line.
+
+ To compile this driver as a module, choose M here: the
+ module will be called acenic.
+
+config ACENIC_OMIT_TIGON_I
+ bool "Omit support for old Tigon I based AceNICs"
+ depends on ACENIC
+ ---help---
+ Say Y here if you only have Tigon II based AceNICs and want to leave
+ out support for the older Tigon I based cards which are no longer
+ being sold (ie. the original Alteon AceNIC and 3Com 3C985 (non B
+ version)). This will reduce the size of the driver object by
+ app. 100KB. If you are not sure whether your card is a Tigon I or a
+ Tigon II, say N here.
+
+ The safe and default value for this is N.
+
+endif # NET_VENDOR_ALTEON
diff --git a/drivers/net/ethernet/alteon/Makefile b/drivers/net/ethernet/alteon/Makefile
new file mode 100644
index 000000000000..a2ca173f2a50
--- /dev/null
+++ b/drivers/net/ethernet/alteon/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Alteon network device drivers.
+#
+
+obj-$(CONFIG_ACENIC) += acenic.o
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
new file mode 100644
index 000000000000..b1a4e8204437
--- /dev/null
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -0,0 +1,3206 @@
+/*
+ * acenic.c: Linux driver for the Alteon AceNIC Gigabit Ethernet card
+ * and other Tigon based cards.
+ *
+ * Copyright 1998-2002 by Jes Sorensen, <jes@trained-monkey.org>.
+ *
+ * Thanks to Alteon and 3Com for providing hardware and documentation
+ * enabling me to write this driver.
+ *
+ * A mailing list for discussing the use of this driver has been
+ * setup, please subscribe to the lists if you have any questions
+ * about the driver. Send mail to linux-acenic-help@sunsite.auc.dk to
+ * see how to subscribe.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Additional credits:
+ * Pete Wyckoff <wyckoff@ca.sandia.gov>: Initial Linux/Alpha and trace
+ * dump support. The trace dump support has not been
+ * integrated yet however.
+ * Troy Benjegerdes: Big Endian (PPC) patches.
+ * Nate Stahl: Better out of memory handling and stats support.
+ * Aman Singla: Nasty race between interrupt handler and tx code dealing
+ * with 'testing the tx_ret_csm and setting tx_full'
+ * David S. Miller <davem@redhat.com>: conversion to new PCI dma mapping
+ * infrastructure and Sparc support
+ * Pierrick Pinasseau (CERN): For lending me an Ultra 5 to test the
+ * driver under Linux/Sparc64
+ * Matt Domsch <Matt_Domsch@dell.com>: Detect Alteon 1000baseT cards
+ * ETHTOOL_GDRVINFO support
+ * Chip Salzenberg <chip@valinux.com>: Fix race condition between tx
+ * handler and close() cleanup.
+ * Ken Aaker <kdaaker@rchland.vnet.ibm.com>: Correct check for whether
+ * memory mapped IO is enabled to
+ * make the driver work on RS/6000.
+ * Takayoshi Kouchi <kouchi@hpc.bs1.fc.nec.co.jp>: Identifying problem
+ * where the driver would disable
+ * bus master mode if it had to disable
+ * write and invalidate.
+ * Stephen Hack <stephen_hack@hp.com>: Fixed ace_set_mac_addr for little
+ * endian systems.
+ * Val Henson <vhenson@esscom.com>: Reset Jumbo skb producer and
+ * rx producer index when
+ * flushing the Jumbo ring.
+ * Hans Grobler <grobh@sun.ac.za>: Memory leak fixes in the
+ * driver init path.
+ * Grant Grundler <grundler@cup.hp.com>: PCI write posting fixes.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/sockios.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/prefetch.h>
+#include <linux/if_vlan.h>
+
+#ifdef SIOCETHTOOL
+#include <linux/ethtool.h>
+#endif
+
+#include <net/sock.h>
+#include <net/ip.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+
+#define DRV_NAME "acenic"
+
+#undef INDEX_DEBUG
+
+#ifdef CONFIG_ACENIC_OMIT_TIGON_I
+#define ACE_IS_TIGON_I(ap) 0
+#define ACE_TX_RING_ENTRIES(ap) MAX_TX_RING_ENTRIES
+#else
+#define ACE_IS_TIGON_I(ap) (ap->version == 1)
+#define ACE_TX_RING_ENTRIES(ap) ap->tx_ring_entries
+#endif
+
+#ifndef PCI_VENDOR_ID_ALTEON
+#define PCI_VENDOR_ID_ALTEON 0x12ae
+#endif
+#ifndef PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE
+#define PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE 0x0001
+#define PCI_DEVICE_ID_ALTEON_ACENIC_COPPER 0x0002
+#endif
+#ifndef PCI_DEVICE_ID_3COM_3C985
+#define PCI_DEVICE_ID_3COM_3C985 0x0001
+#endif
+#ifndef PCI_VENDOR_ID_NETGEAR
+#define PCI_VENDOR_ID_NETGEAR 0x1385
+#define PCI_DEVICE_ID_NETGEAR_GA620 0x620a
+#endif
+#ifndef PCI_DEVICE_ID_NETGEAR_GA620T
+#define PCI_DEVICE_ID_NETGEAR_GA620T 0x630a
+#endif
+
+
+/*
+ * Farallon used the DEC vendor ID by mistake and they seem not
+ * to care - stinky!
+ */
+#ifndef PCI_DEVICE_ID_FARALLON_PN9000SX
+#define PCI_DEVICE_ID_FARALLON_PN9000SX 0x1a
+#endif
+#ifndef PCI_DEVICE_ID_FARALLON_PN9100T
+#define PCI_DEVICE_ID_FARALLON_PN9100T 0xfa
+#endif
+#ifndef PCI_VENDOR_ID_SGI
+#define PCI_VENDOR_ID_SGI 0x10a9
+#endif
+#ifndef PCI_DEVICE_ID_SGI_ACENIC
+#define PCI_DEVICE_ID_SGI_ACENIC 0x0009
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(acenic_pci_tbl) = {
+ { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
+ { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
+ { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C985,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
+ { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
+ { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620T,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
+ /*
+ * Farallon used the DEC vendor ID on their cards incorrectly,
+ * then later Alteon's ID.
+ */
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_FARALLON_PN9000SX,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
+ { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_FARALLON_PN9100T,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
+ { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_ACENIC,
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, acenic_pci_tbl);
+
+#define ace_sync_irq(irq) synchronize_irq(irq)
+
+#ifndef offset_in_page
+#define offset_in_page(ptr) ((unsigned long)(ptr) & ~PAGE_MASK)
+#endif
+
+#define ACE_MAX_MOD_PARMS 8
+#define BOARD_IDX_STATIC 0
+#define BOARD_IDX_OVERFLOW -1
+
+#include "acenic.h"
+
+/*
+ * These must be defined before the firmware is included.
+ */
+#define MAX_TEXT_LEN 96*1024
+#define MAX_RODATA_LEN 8*1024
+#define MAX_DATA_LEN 2*1024
+
+#ifndef tigon2FwReleaseLocal
+#define tigon2FwReleaseLocal 0
+#endif
+
+/*
+ * This driver currently supports Tigon I and Tigon II based cards
+ * including the Alteon AceNIC, the 3Com 3C985[B] and NetGear
+ * GA620. The driver should also work on the SGI, DEC and Farallon
+ * versions of the card, however I have not been able to test that
+ * myself.
+ *
+ * This card is really neat, it supports receive hardware checksumming
+ * and jumbo frames (up to 9000 bytes) and does a lot of work in the
+ * firmware. Also the programming interface is quite neat, except for
+ * the parts dealing with the i2c eeprom on the card ;-)
+ *
+ * Using jumbo frames:
+ *
+ * To enable jumbo frames, simply specify an mtu between 1500 and 9000
+ * bytes to ifconfig. Jumbo frames can be enabled or disabled at any time
+ * by running `ifconfig eth<X> mtu <MTU>' with <X> being the Ethernet
+ * interface number and <MTU> being the MTU value.
+ *
+ * Module parameters:
+ *
+ * When compiled as a loadable module, the driver allows for a number
+ * of module parameters to be specified. The driver supports the
+ * following module parameters:
+ *
+ * trace=<val> - Firmware trace level. This requires special traced
+ * firmware to replace the firmware supplied with
+ * the driver - for debugging purposes only.
+ *
+ * link=<val> - Link state. Normally you want to use the default link
+ * parameters set by the driver. This can be used to
+ * override these in case your switch doesn't negotiate
+ * the link properly. Valid values are:
+ * 0x0001 - Force half duplex link.
+ * 0x0002 - Do not negotiate line speed with the other end.
+ * 0x0010 - 10Mbit/sec link.
+ * 0x0020 - 100Mbit/sec link.
+ * 0x0040 - 1000Mbit/sec link.
+ * 0x0100 - Do not negotiate flow control.
+ * 0x0200 - Enable RX flow control Y
+ * 0x0400 - Enable TX flow control Y (Tigon II NICs only).
+ * Default value is 0x0270, ie. enable link+flow
+ * control negotiation. Negotiating the highest
+ * possible link speed with RX flow control enabled.
+ *
+ * When disabling link speed negotiation, only one link
+ * speed is allowed to be specified!
+ *
+ * tx_coal_tick=<val> - number of coalescing clock ticks (us) allowed
+ * to wait for more packets to arive before
+ * interrupting the host, from the time the first
+ * packet arrives.
+ *
+ * rx_coal_tick=<val> - number of coalescing clock ticks (us) allowed
+ * to wait for more packets to arive in the transmit ring,
+ * before interrupting the host, after transmitting the
+ * first packet in the ring.
+ *
+ * max_tx_desc=<val> - maximum number of transmit descriptors
+ * (packets) transmitted before interrupting the host.
+ *
+ * max_rx_desc=<val> - maximum number of receive descriptors
+ * (packets) received before interrupting the host.
+ *
+ * tx_ratio=<val> - 7 bit value (0 - 63) specifying the split in 64th
+ * increments of the NIC's on board memory to be used for
+ * transmit and receive buffers. For the 1MB NIC app. 800KB
+ * is available, on the 1/2MB NIC app. 300KB is available.
+ * 68KB will always be available as a minimum for both
+ * directions. The default value is a 50/50 split.
+ * dis_pci_mem_inval=<val> - disable PCI memory write and invalidate
+ * operations, default (1) is to always disable this as
+ * that is what Alteon does on NT. I have not been able
+ * to measure any real performance differences with
+ * this on my systems. Set <val>=0 if you want to
+ * enable these operations.
+ *
+ * If you use more than one NIC, specify the parameters for the
+ * individual NICs with a comma, ie. trace=0,0x00001fff,0 you want to
+ * run tracing on NIC #2 but not on NIC #1 and #3.
+ *
+ * TODO:
+ *
+ * - Proper multicast support.
+ * - NIC dump support.
+ * - More tuning parameters.
+ *
+ * The mini ring is not used under Linux and I am not sure it makes sense
+ * to actually use it.
+ *
+ * New interrupt handler strategy:
+ *
+ * The old interrupt handler worked using the traditional method of
+ * replacing an skbuff with a new one when a packet arrives. However
+ * the rx rings do not need to contain a static number of buffer
+ * descriptors, thus it makes sense to move the memory allocation out
+ * of the main interrupt handler and do it in a bottom half handler
+ * and only allocate new buffers when the number of buffers in the
+ * ring is below a certain threshold. In order to avoid starving the
+ * NIC under heavy load it is however necessary to force allocation
+ * when hitting a minimum threshold. The strategy for alloction is as
+ * follows:
+ *
+ * RX_LOW_BUF_THRES - allocate buffers in the bottom half
+ * RX_PANIC_LOW_THRES - we are very low on buffers, allocate
+ * the buffers in the interrupt handler
+ * RX_RING_THRES - maximum number of buffers in the rx ring
+ * RX_MINI_THRES - maximum number of buffers in the mini ring
+ * RX_JUMBO_THRES - maximum number of buffers in the jumbo ring
+ *
+ * One advantagous side effect of this allocation approach is that the
+ * entire rx processing can be done without holding any spin lock
+ * since the rx rings and registers are totally independent of the tx
+ * ring and its registers. This of course includes the kmalloc's of
+ * new skb's. Thus start_xmit can run in parallel with rx processing
+ * and the memory allocation on SMP systems.
+ *
+ * Note that running the skb reallocation in a bottom half opens up
+ * another can of races which needs to be handled properly. In
+ * particular it can happen that the interrupt handler tries to run
+ * the reallocation while the bottom half is either running on another
+ * CPU or was interrupted on the same CPU. To get around this the
+ * driver uses bitops to prevent the reallocation routines from being
+ * reentered.
+ *
+ * TX handling can also be done without holding any spin lock, wheee
+ * this is fun! since tx_ret_csm is only written to by the interrupt
+ * handler. The case to be aware of is when shutting down the device
+ * and cleaning up where it is necessary to make sure that
+ * start_xmit() is not running while this is happening. Well DaveM
+ * informs me that this case is already protected against ... bye bye
+ * Mr. Spin Lock, it was nice to know you.
+ *
+ * TX interrupts are now partly disabled so the NIC will only generate
+ * TX interrupts for the number of coal ticks, not for the number of
+ * TX packets in the queue. This should reduce the number of TX only,
+ * ie. when no RX processing is done, interrupts seen.
+ */
+
+/*
+ * Threshold values for RX buffer allocation - the low water marks for
+ * when to start refilling the rings are set to 75% of the ring
+ * sizes. It seems to make sense to refill the rings entirely from the
+ * intrrupt handler once it gets below the panic threshold, that way
+ * we don't risk that the refilling is moved to another CPU when the
+ * one running the interrupt handler just got the slab code hot in its
+ * cache.
+ */
+#define RX_RING_SIZE 72
+#define RX_MINI_SIZE 64
+#define RX_JUMBO_SIZE 48
+
+#define RX_PANIC_STD_THRES 16
+#define RX_PANIC_STD_REFILL (3*RX_PANIC_STD_THRES)/2
+#define RX_LOW_STD_THRES (3*RX_RING_SIZE)/4
+#define RX_PANIC_MINI_THRES 12
+#define RX_PANIC_MINI_REFILL (3*RX_PANIC_MINI_THRES)/2
+#define RX_LOW_MINI_THRES (3*RX_MINI_SIZE)/4
+#define RX_PANIC_JUMBO_THRES 6
+#define RX_PANIC_JUMBO_REFILL (3*RX_PANIC_JUMBO_THRES)/2
+#define RX_LOW_JUMBO_THRES (3*RX_JUMBO_SIZE)/4
+
+
+/*
+ * Size of the mini ring entries, basically these just should be big
+ * enough to take TCP ACKs
+ */
+#define ACE_MINI_SIZE 100
+
+#define ACE_MINI_BUFSIZE ACE_MINI_SIZE
+#define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 4)
+#define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 4)
+
+/*
+ * There seems to be a magic difference in the effect between 995 and 996
+ * but little difference between 900 and 995 ... no idea why.
+ *
+ * There is now a default set of tuning parameters which is set, depending
+ * on whether or not the user enables Jumbo frames. It's assumed that if
+ * Jumbo frames are enabled, the user wants optimal tuning for that case.
+ */
+#define DEF_TX_COAL 400 /* 996 */
+#define DEF_TX_MAX_DESC 60 /* was 40 */
+#define DEF_RX_COAL 120 /* 1000 */
+#define DEF_RX_MAX_DESC 25
+#define DEF_TX_RATIO 21 /* 24 */
+
+#define DEF_JUMBO_TX_COAL 20
+#define DEF_JUMBO_TX_MAX_DESC 60
+#define DEF_JUMBO_RX_COAL 30
+#define DEF_JUMBO_RX_MAX_DESC 6
+#define DEF_JUMBO_TX_RATIO 21
+
+#if tigon2FwReleaseLocal < 20001118
+/*
+ * Standard firmware and early modifications duplicate
+ * IRQ load without this flag (coal timer is never reset).
+ * Note that with this flag tx_coal should be less than
+ * time to xmit full tx ring.
+ * 400usec is not so bad for tx ring size of 128.
+ */
+#define TX_COAL_INTS_ONLY 1 /* worth it */
+#else
+/*
+ * With modified firmware, this is not necessary, but still useful.
+ */
+#define TX_COAL_INTS_ONLY 1
+#endif
+
+#define DEF_TRACE 0
+#define DEF_STAT (2 * TICKS_PER_SEC)
+
+
+static int link_state[ACE_MAX_MOD_PARMS];
+static int trace[ACE_MAX_MOD_PARMS];
+static int tx_coal_tick[ACE_MAX_MOD_PARMS];
+static int rx_coal_tick[ACE_MAX_MOD_PARMS];
+static int max_tx_desc[ACE_MAX_MOD_PARMS];
+static int max_rx_desc[ACE_MAX_MOD_PARMS];
+static int tx_ratio[ACE_MAX_MOD_PARMS];
+static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1};
+
+MODULE_AUTHOR("Jes Sorensen <jes@trained-monkey.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver");
+#ifndef CONFIG_ACENIC_OMIT_TIGON_I
+MODULE_FIRMWARE("acenic/tg1.bin");
+#endif
+MODULE_FIRMWARE("acenic/tg2.bin");
+
+module_param_array_named(link, link_state, int, NULL, 0);
+module_param_array(trace, int, NULL, 0);
+module_param_array(tx_coal_tick, int, NULL, 0);
+module_param_array(max_tx_desc, int, NULL, 0);
+module_param_array(rx_coal_tick, int, NULL, 0);
+module_param_array(max_rx_desc, int, NULL, 0);
+module_param_array(tx_ratio, int, NULL, 0);
+MODULE_PARM_DESC(link, "AceNIC/3C985/NetGear link state");
+MODULE_PARM_DESC(trace, "AceNIC/3C985/NetGear firmware trace level");
+MODULE_PARM_DESC(tx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives");
+MODULE_PARM_DESC(max_tx_desc, "AceNIC/3C985/GA620 max number of transmit descriptors to wait");
+MODULE_PARM_DESC(rx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives");
+MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descriptors to wait");
+MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)");
+
+
+static const char version[] __devinitconst =
+ "acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n"
+ " http://home.cern.ch/~jes/gige/acenic.html\n";
+
+static int ace_get_settings(struct net_device *, struct ethtool_cmd *);
+static int ace_set_settings(struct net_device *, struct ethtool_cmd *);
+static void ace_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
+
+static const struct ethtool_ops ace_ethtool_ops = {
+ .get_settings = ace_get_settings,
+ .set_settings = ace_set_settings,
+ .get_drvinfo = ace_get_drvinfo,
+};
+
+static void ace_watchdog(struct net_device *dev);
+
+static const struct net_device_ops ace_netdev_ops = {
+ .ndo_open = ace_open,
+ .ndo_stop = ace_close,
+ .ndo_tx_timeout = ace_watchdog,
+ .ndo_get_stats = ace_get_stats,
+ .ndo_start_xmit = ace_start_xmit,
+ .ndo_set_rx_mode = ace_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = ace_set_mac_addr,
+ .ndo_change_mtu = ace_change_mtu,
+};
+
+static int __devinit acenic_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct net_device *dev;
+ struct ace_private *ap;
+ static int boards_found;
+
+ dev = alloc_etherdev(sizeof(struct ace_private));
+ if (dev == NULL) {
+ printk(KERN_ERR "acenic: Unable to allocate "
+ "net_device structure!\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ ap = netdev_priv(dev);
+ ap->pdev = pdev;
+ ap->name = pci_name(pdev);
+
+ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+
+ dev->watchdog_timeo = 5*HZ;
+
+ dev->netdev_ops = &ace_netdev_ops;
+ SET_ETHTOOL_OPS(dev, &ace_ethtool_ops);
+
+ /* we only display this string ONCE */
+ if (!boards_found)
+ printk(version);
+
+ if (pci_enable_device(pdev))
+ goto fail_free_netdev;
+
+ /*
+ * Enable master mode before we start playing with the
+ * pci_command word since pci_set_master() will modify
+ * it.
+ */
+ pci_set_master(pdev);
+
+ pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command);
+
+ /* OpenFirmware on Mac's does not set this - DOH.. */
+ if (!(ap->pci_command & PCI_COMMAND_MEMORY)) {
+ printk(KERN_INFO "%s: Enabling PCI Memory Mapped "
+ "access - was not enabled by BIOS/Firmware\n",
+ ap->name);
+ ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY;
+ pci_write_config_word(ap->pdev, PCI_COMMAND,
+ ap->pci_command);
+ wmb();
+ }
+
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ap->pci_latency);
+ if (ap->pci_latency <= 0x40) {
+ ap->pci_latency = 0x40;
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ap->pci_latency);
+ }
+
+ /*
+ * Remap the regs into kernel space - this is abuse of
+ * dev->base_addr since it was means for I/O port
+ * addresses but who gives a damn.
+ */
+ dev->base_addr = pci_resource_start(pdev, 0);
+ ap->regs = ioremap(dev->base_addr, 0x4000);
+ if (!ap->regs) {
+ printk(KERN_ERR "%s: Unable to map I/O register, "
+ "AceNIC %i will be disabled.\n",
+ ap->name, boards_found);
+ goto fail_free_netdev;
+ }
+
+ switch(pdev->vendor) {
+ case PCI_VENDOR_ID_ALTEON:
+ if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9100T) {
+ printk(KERN_INFO "%s: Farallon PN9100-T ",
+ ap->name);
+ } else {
+ printk(KERN_INFO "%s: Alteon AceNIC ",
+ ap->name);
+ }
+ break;
+ case PCI_VENDOR_ID_3COM:
+ printk(KERN_INFO "%s: 3Com 3C985 ", ap->name);
+ break;
+ case PCI_VENDOR_ID_NETGEAR:
+ printk(KERN_INFO "%s: NetGear GA620 ", ap->name);
+ break;
+ case PCI_VENDOR_ID_DEC:
+ if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX) {
+ printk(KERN_INFO "%s: Farallon PN9000-SX ",
+ ap->name);
+ break;
+ }
+ case PCI_VENDOR_ID_SGI:
+ printk(KERN_INFO "%s: SGI AceNIC ", ap->name);
+ break;
+ default:
+ printk(KERN_INFO "%s: Unknown AceNIC ", ap->name);
+ break;
+ }
+
+ printk("Gigabit Ethernet at 0x%08lx, ", dev->base_addr);
+ printk("irq %d\n", pdev->irq);
+
+#ifdef CONFIG_ACENIC_OMIT_TIGON_I
+ if ((readl(&ap->regs->HostCtrl) >> 28) == 4) {
+ printk(KERN_ERR "%s: Driver compiled without Tigon I"
+ " support - NIC disabled\n", dev->name);
+ goto fail_uninit;
+ }
+#endif
+
+ if (ace_allocate_descriptors(dev))
+ goto fail_free_netdev;
+
+#ifdef MODULE
+ if (boards_found >= ACE_MAX_MOD_PARMS)
+ ap->board_idx = BOARD_IDX_OVERFLOW;
+ else
+ ap->board_idx = boards_found;
+#else
+ ap->board_idx = BOARD_IDX_STATIC;
+#endif
+
+ if (ace_init(dev))
+ goto fail_free_netdev;
+
+ if (register_netdev(dev)) {
+ printk(KERN_ERR "acenic: device registration failed\n");
+ goto fail_uninit;
+ }
+ ap->name = dev->name;
+
+ if (ap->pci_using_dac)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ pci_set_drvdata(pdev, dev);
+
+ boards_found++;
+ return 0;
+
+ fail_uninit:
+ ace_init_cleanup(dev);
+ fail_free_netdev:
+ free_netdev(dev);
+ return -ENODEV;
+}
+
+static void __devexit acenic_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ short i;
+
+ unregister_netdev(dev);
+
+ writel(readl(&regs->CpuCtrl) | CPU_HALT, &regs->CpuCtrl);
+ if (ap->version >= 2)
+ writel(readl(&regs->CpuBCtrl) | CPU_HALT, &regs->CpuBCtrl);
+
+ /*
+ * This clears any pending interrupts
+ */
+ writel(1, &regs->Mb0Lo);
+ readl(&regs->CpuCtrl); /* flush */
+
+ /*
+ * Make sure no other CPUs are processing interrupts
+ * on the card before the buffers are being released.
+ * Otherwise one might experience some `interesting'
+ * effects.
+ *
+ * Then release the RX buffers - jumbo buffers were
+ * already released in ace_close().
+ */
+ ace_sync_irq(dev->irq);
+
+ for (i = 0; i < RX_STD_RING_ENTRIES; i++) {
+ struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb;
+
+ if (skb) {
+ struct ring_info *ringp;
+ dma_addr_t mapping;
+
+ ringp = &ap->skb->rx_std_skbuff[i];
+ mapping = dma_unmap_addr(ringp, mapping);
+ pci_unmap_page(ap->pdev, mapping,
+ ACE_STD_BUFSIZE,
+ PCI_DMA_FROMDEVICE);
+
+ ap->rx_std_ring[i].size = 0;
+ ap->skb->rx_std_skbuff[i].skb = NULL;
+ dev_kfree_skb(skb);
+ }
+ }
+
+ if (ap->version >= 2) {
+ for (i = 0; i < RX_MINI_RING_ENTRIES; i++) {
+ struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb;
+
+ if (skb) {
+ struct ring_info *ringp;
+ dma_addr_t mapping;
+
+ ringp = &ap->skb->rx_mini_skbuff[i];
+ mapping = dma_unmap_addr(ringp,mapping);
+ pci_unmap_page(ap->pdev, mapping,
+ ACE_MINI_BUFSIZE,
+ PCI_DMA_FROMDEVICE);
+
+ ap->rx_mini_ring[i].size = 0;
+ ap->skb->rx_mini_skbuff[i].skb = NULL;
+ dev_kfree_skb(skb);
+ }
+ }
+ }
+
+ for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
+ struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb;
+ if (skb) {
+ struct ring_info *ringp;
+ dma_addr_t mapping;
+
+ ringp = &ap->skb->rx_jumbo_skbuff[i];
+ mapping = dma_unmap_addr(ringp, mapping);
+ pci_unmap_page(ap->pdev, mapping,
+ ACE_JUMBO_BUFSIZE,
+ PCI_DMA_FROMDEVICE);
+
+ ap->rx_jumbo_ring[i].size = 0;
+ ap->skb->rx_jumbo_skbuff[i].skb = NULL;
+ dev_kfree_skb(skb);
+ }
+ }
+
+ ace_init_cleanup(dev);
+ free_netdev(dev);
+}
+
+static struct pci_driver acenic_pci_driver = {
+ .name = "acenic",
+ .id_table = acenic_pci_tbl,
+ .probe = acenic_probe_one,
+ .remove = __devexit_p(acenic_remove_one),
+};
+
+static int __init acenic_init(void)
+{
+ return pci_register_driver(&acenic_pci_driver);
+}
+
+static void __exit acenic_exit(void)
+{
+ pci_unregister_driver(&acenic_pci_driver);
+}
+
+module_init(acenic_init);
+module_exit(acenic_exit);
+
+static void ace_free_descriptors(struct net_device *dev)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ int size;
+
+ if (ap->rx_std_ring != NULL) {
+ size = (sizeof(struct rx_desc) *
+ (RX_STD_RING_ENTRIES +
+ RX_JUMBO_RING_ENTRIES +
+ RX_MINI_RING_ENTRIES +
+ RX_RETURN_RING_ENTRIES));
+ pci_free_consistent(ap->pdev, size, ap->rx_std_ring,
+ ap->rx_ring_base_dma);
+ ap->rx_std_ring = NULL;
+ ap->rx_jumbo_ring = NULL;
+ ap->rx_mini_ring = NULL;
+ ap->rx_return_ring = NULL;
+ }
+ if (ap->evt_ring != NULL) {
+ size = (sizeof(struct event) * EVT_RING_ENTRIES);
+ pci_free_consistent(ap->pdev, size, ap->evt_ring,
+ ap->evt_ring_dma);
+ ap->evt_ring = NULL;
+ }
+ if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) {
+ size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
+ pci_free_consistent(ap->pdev, size, ap->tx_ring,
+ ap->tx_ring_dma);
+ }
+ ap->tx_ring = NULL;
+
+ if (ap->evt_prd != NULL) {
+ pci_free_consistent(ap->pdev, sizeof(u32),
+ (void *)ap->evt_prd, ap->evt_prd_dma);
+ ap->evt_prd = NULL;
+ }
+ if (ap->rx_ret_prd != NULL) {
+ pci_free_consistent(ap->pdev, sizeof(u32),
+ (void *)ap->rx_ret_prd,
+ ap->rx_ret_prd_dma);
+ ap->rx_ret_prd = NULL;
+ }
+ if (ap->tx_csm != NULL) {
+ pci_free_consistent(ap->pdev, sizeof(u32),
+ (void *)ap->tx_csm, ap->tx_csm_dma);
+ ap->tx_csm = NULL;
+ }
+}
+
+
+static int ace_allocate_descriptors(struct net_device *dev)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ int size;
+
+ size = (sizeof(struct rx_desc) *
+ (RX_STD_RING_ENTRIES +
+ RX_JUMBO_RING_ENTRIES +
+ RX_MINI_RING_ENTRIES +
+ RX_RETURN_RING_ENTRIES));
+
+ ap->rx_std_ring = pci_alloc_consistent(ap->pdev, size,
+ &ap->rx_ring_base_dma);
+ if (ap->rx_std_ring == NULL)
+ goto fail;
+
+ ap->rx_jumbo_ring = ap->rx_std_ring + RX_STD_RING_ENTRIES;
+ ap->rx_mini_ring = ap->rx_jumbo_ring + RX_JUMBO_RING_ENTRIES;
+ ap->rx_return_ring = ap->rx_mini_ring + RX_MINI_RING_ENTRIES;
+
+ size = (sizeof(struct event) * EVT_RING_ENTRIES);
+
+ ap->evt_ring = pci_alloc_consistent(ap->pdev, size, &ap->evt_ring_dma);
+
+ if (ap->evt_ring == NULL)
+ goto fail;
+
+ /*
+ * Only allocate a host TX ring for the Tigon II, the Tigon I
+ * has to use PCI registers for this ;-(
+ */
+ if (!ACE_IS_TIGON_I(ap)) {
+ size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
+
+ ap->tx_ring = pci_alloc_consistent(ap->pdev, size,
+ &ap->tx_ring_dma);
+
+ if (ap->tx_ring == NULL)
+ goto fail;
+ }
+
+ ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
+ &ap->evt_prd_dma);
+ if (ap->evt_prd == NULL)
+ goto fail;
+
+ ap->rx_ret_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
+ &ap->rx_ret_prd_dma);
+ if (ap->rx_ret_prd == NULL)
+ goto fail;
+
+ ap->tx_csm = pci_alloc_consistent(ap->pdev, sizeof(u32),
+ &ap->tx_csm_dma);
+ if (ap->tx_csm == NULL)
+ goto fail;
+
+ return 0;
+
+fail:
+ /* Clean up. */
+ ace_init_cleanup(dev);
+ return 1;
+}
+
+
+/*
+ * Generic cleanup handling data allocated during init. Used when the
+ * module is unloaded or if an error occurs during initialization
+ */
+static void ace_init_cleanup(struct net_device *dev)
+{
+ struct ace_private *ap;
+
+ ap = netdev_priv(dev);
+
+ ace_free_descriptors(dev);
+
+ if (ap->info)
+ pci_free_consistent(ap->pdev, sizeof(struct ace_info),
+ ap->info, ap->info_dma);
+ kfree(ap->skb);
+ kfree(ap->trace_buf);
+
+ if (dev->irq)
+ free_irq(dev->irq, dev);
+
+ iounmap(ap->regs);
+}
+
+
+/*
+ * Commands are considered to be slow.
+ */
+static inline void ace_issue_cmd(struct ace_regs __iomem *regs, struct cmd *cmd)
+{
+ u32 idx;
+
+ idx = readl(&regs->CmdPrd);
+
+ writel(*(u32 *)(cmd), &regs->CmdRng[idx]);
+ idx = (idx + 1) % CMD_RING_ENTRIES;
+
+ writel(idx, &regs->CmdPrd);
+}
+
+
+static int __devinit ace_init(struct net_device *dev)
+{
+ struct ace_private *ap;
+ struct ace_regs __iomem *regs;
+ struct ace_info *info = NULL;
+ struct pci_dev *pdev;
+ unsigned long myjif;
+ u64 tmp_ptr;
+ u32 tig_ver, mac1, mac2, tmp, pci_state;
+ int board_idx, ecode = 0;
+ short i;
+ unsigned char cache_size;
+
+ ap = netdev_priv(dev);
+ regs = ap->regs;
+
+ board_idx = ap->board_idx;
+
+ /*
+ * aman@sgi.com - its useful to do a NIC reset here to
+ * address the `Firmware not running' problem subsequent
+ * to any crashes involving the NIC
+ */
+ writel(HW_RESET | (HW_RESET << 24), &regs->HostCtrl);
+ readl(&regs->HostCtrl); /* PCI write posting */
+ udelay(5);
+
+ /*
+ * Don't access any other registers before this point!
+ */
+#ifdef __BIG_ENDIAN
+ /*
+ * This will most likely need BYTE_SWAP once we switch
+ * to using __raw_writel()
+ */
+ writel((WORD_SWAP | CLR_INT | ((WORD_SWAP | CLR_INT) << 24)),
+ &regs->HostCtrl);
+#else
+ writel((CLR_INT | WORD_SWAP | ((CLR_INT | WORD_SWAP) << 24)),
+ &regs->HostCtrl);
+#endif
+ readl(&regs->HostCtrl); /* PCI write posting */
+
+ /*
+ * Stop the NIC CPU and clear pending interrupts
+ */
+ writel(readl(&regs->CpuCtrl) | CPU_HALT, &regs->CpuCtrl);
+ readl(&regs->CpuCtrl); /* PCI write posting */
+ writel(0, &regs->Mb0Lo);
+
+ tig_ver = readl(&regs->HostCtrl) >> 28;
+
+ switch(tig_ver){
+#ifndef CONFIG_ACENIC_OMIT_TIGON_I
+ case 4:
+ case 5:
+ printk(KERN_INFO " Tigon I (Rev. %i), Firmware: %i.%i.%i, ",
+ tig_ver, ap->firmware_major, ap->firmware_minor,
+ ap->firmware_fix);
+ writel(0, &regs->LocalCtrl);
+ ap->version = 1;
+ ap->tx_ring_entries = TIGON_I_TX_RING_ENTRIES;
+ break;
+#endif
+ case 6:
+ printk(KERN_INFO " Tigon II (Rev. %i), Firmware: %i.%i.%i, ",
+ tig_ver, ap->firmware_major, ap->firmware_minor,
+ ap->firmware_fix);
+ writel(readl(&regs->CpuBCtrl) | CPU_HALT, &regs->CpuBCtrl);
+ readl(&regs->CpuBCtrl); /* PCI write posting */
+ /*
+ * The SRAM bank size does _not_ indicate the amount
+ * of memory on the card, it controls the _bank_ size!
+ * Ie. a 1MB AceNIC will have two banks of 512KB.
+ */
+ writel(SRAM_BANK_512K, &regs->LocalCtrl);
+ writel(SYNC_SRAM_TIMING, &regs->MiscCfg);
+ ap->version = 2;
+ ap->tx_ring_entries = MAX_TX_RING_ENTRIES;
+ break;
+ default:
+ printk(KERN_WARNING " Unsupported Tigon version detected "
+ "(%i)\n", tig_ver);
+ ecode = -ENODEV;
+ goto init_error;
+ }
+
+ /*
+ * ModeStat _must_ be set after the SRAM settings as this change
+ * seems to corrupt the ModeStat and possible other registers.
+ * The SRAM settings survive resets and setting it to the same
+ * value a second time works as well. This is what caused the
+ * `Firmware not running' problem on the Tigon II.
+ */
+#ifdef __BIG_ENDIAN
+ writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | ACE_BYTE_SWAP_BD |
+ ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, &regs->ModeStat);
+#else
+ writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL |
+ ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, &regs->ModeStat);
+#endif
+ readl(&regs->ModeStat); /* PCI write posting */
+
+ mac1 = 0;
+ for(i = 0; i < 4; i++) {
+ int t;
+
+ mac1 = mac1 << 8;
+ t = read_eeprom_byte(dev, 0x8c+i);
+ if (t < 0) {
+ ecode = -EIO;
+ goto init_error;
+ } else
+ mac1 |= (t & 0xff);
+ }
+ mac2 = 0;
+ for(i = 4; i < 8; i++) {
+ int t;
+
+ mac2 = mac2 << 8;
+ t = read_eeprom_byte(dev, 0x8c+i);
+ if (t < 0) {
+ ecode = -EIO;
+ goto init_error;
+ } else
+ mac2 |= (t & 0xff);
+ }
+
+ writel(mac1, &regs->MacAddrHi);
+ writel(mac2, &regs->MacAddrLo);
+
+ dev->dev_addr[0] = (mac1 >> 8) & 0xff;
+ dev->dev_addr[1] = mac1 & 0xff;
+ dev->dev_addr[2] = (mac2 >> 24) & 0xff;
+ dev->dev_addr[3] = (mac2 >> 16) & 0xff;
+ dev->dev_addr[4] = (mac2 >> 8) & 0xff;
+ dev->dev_addr[5] = mac2 & 0xff;
+
+ printk("MAC: %pM\n", dev->dev_addr);
+
+ /*
+ * Looks like this is necessary to deal with on all architectures,
+ * even this %$#%$# N440BX Intel based thing doesn't get it right.
+ * Ie. having two NICs in the machine, one will have the cache
+ * line set at boot time, the other will not.
+ */
+ pdev = ap->pdev;
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_size);
+ cache_size <<= 2;
+ if (cache_size != SMP_CACHE_BYTES) {
+ printk(KERN_INFO " PCI cache line size set incorrectly "
+ "(%i bytes) by BIOS/FW, ", cache_size);
+ if (cache_size > SMP_CACHE_BYTES)
+ printk("expecting %i\n", SMP_CACHE_BYTES);
+ else {
+ printk("correcting to %i\n", SMP_CACHE_BYTES);
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
+ SMP_CACHE_BYTES >> 2);
+ }
+ }
+
+ pci_state = readl(&regs->PciState);
+ printk(KERN_INFO " PCI bus width: %i bits, speed: %iMHz, "
+ "latency: %i clks\n",
+ (pci_state & PCI_32BIT) ? 32 : 64,
+ (pci_state & PCI_66MHZ) ? 66 : 33,
+ ap->pci_latency);
+
+ /*
+ * Set the max DMA transfer size. Seems that for most systems
+ * the performance is better when no MAX parameter is
+ * set. However for systems enabling PCI write and invalidate,
+ * DMA writes must be set to the L1 cache line size to get
+ * optimal performance.
+ *
+ * The default is now to turn the PCI write and invalidate off
+ * - that is what Alteon does for NT.
+ */
+ tmp = READ_CMD_MEM | WRITE_CMD_MEM;
+ if (ap->version >= 2) {
+ tmp |= (MEM_READ_MULTIPLE | (pci_state & PCI_66MHZ));
+ /*
+ * Tuning parameters only supported for 8 cards
+ */
+ if (board_idx == BOARD_IDX_OVERFLOW ||
+ dis_pci_mem_inval[board_idx]) {
+ if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
+ ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
+ pci_write_config_word(pdev, PCI_COMMAND,
+ ap->pci_command);
+ printk(KERN_INFO " Disabling PCI memory "
+ "write and invalidate\n");
+ }
+ } else if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
+ printk(KERN_INFO " PCI memory write & invalidate "
+ "enabled by BIOS, enabling counter measures\n");
+
+ switch(SMP_CACHE_BYTES) {
+ case 16:
+ tmp |= DMA_WRITE_MAX_16;
+ break;
+ case 32:
+ tmp |= DMA_WRITE_MAX_32;
+ break;
+ case 64:
+ tmp |= DMA_WRITE_MAX_64;
+ break;
+ case 128:
+ tmp |= DMA_WRITE_MAX_128;
+ break;
+ default:
+ printk(KERN_INFO " Cache line size %i not "
+ "supported, PCI write and invalidate "
+ "disabled\n", SMP_CACHE_BYTES);
+ ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
+ pci_write_config_word(pdev, PCI_COMMAND,
+ ap->pci_command);
+ }
+ }
+ }
+
+#ifdef __sparc__
+ /*
+ * On this platform, we know what the best dma settings
+ * are. We use 64-byte maximum bursts, because if we
+ * burst larger than the cache line size (or even cross
+ * a 64byte boundary in a single burst) the UltraSparc
+ * PCI controller will disconnect at 64-byte multiples.
+ *
+ * Read-multiple will be properly enabled above, and when
+ * set will give the PCI controller proper hints about
+ * prefetching.
+ */
+ tmp &= ~DMA_READ_WRITE_MASK;
+ tmp |= DMA_READ_MAX_64;
+ tmp |= DMA_WRITE_MAX_64;
+#endif
+#ifdef __alpha__
+ tmp &= ~DMA_READ_WRITE_MASK;
+ tmp |= DMA_READ_MAX_128;
+ /*
+ * All the docs say MUST NOT. Well, I did.
+ * Nothing terrible happens, if we load wrong size.
+ * Bit w&i still works better!
+ */
+ tmp |= DMA_WRITE_MAX_128;
+#endif
+ writel(tmp, &regs->PciState);
+
+#if 0
+ /*
+ * The Host PCI bus controller driver has to set FBB.
+ * If all devices on that PCI bus support FBB, then the controller
+ * can enable FBB support in the Host PCI Bus controller (or on
+ * the PCI-PCI bridge if that applies).
+ * -ggg
+ */
+ /*
+ * I have received reports from people having problems when this
+ * bit is enabled.
+ */
+ if (!(ap->pci_command & PCI_COMMAND_FAST_BACK)) {
+ printk(KERN_INFO " Enabling PCI Fast Back to Back\n");
+ ap->pci_command |= PCI_COMMAND_FAST_BACK;
+ pci_write_config_word(pdev, PCI_COMMAND, ap->pci_command);
+ }
+#endif
+
+ /*
+ * Configure DMA attributes.
+ */
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ ap->pci_using_dac = 1;
+ } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ ap->pci_using_dac = 0;
+ } else {
+ ecode = -ENODEV;
+ goto init_error;
+ }
+
+ /*
+ * Initialize the generic info block and the command+event rings
+ * and the control blocks for the transmit and receive rings
+ * as they need to be setup once and for all.
+ */
+ if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info),
+ &ap->info_dma))) {
+ ecode = -EAGAIN;
+ goto init_error;
+ }
+ ap->info = info;
+
+ /*
+ * Get the memory for the skb rings.
+ */
+ if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) {
+ ecode = -EAGAIN;
+ goto init_error;
+ }
+
+ ecode = request_irq(pdev->irq, ace_interrupt, IRQF_SHARED,
+ DRV_NAME, dev);
+ if (ecode) {
+ printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
+ DRV_NAME, pdev->irq);
+ goto init_error;
+ } else
+ dev->irq = pdev->irq;
+
+#ifdef INDEX_DEBUG
+ spin_lock_init(&ap->debug_lock);
+ ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1;
+ ap->last_std_rx = 0;
+ ap->last_mini_rx = 0;
+#endif
+
+ memset(ap->info, 0, sizeof(struct ace_info));
+ memset(ap->skb, 0, sizeof(struct ace_skb));
+
+ ecode = ace_load_firmware(dev);
+ if (ecode)
+ goto init_error;
+
+ ap->fw_running = 0;
+
+ tmp_ptr = ap->info_dma;
+ writel(tmp_ptr >> 32, &regs->InfoPtrHi);
+ writel(tmp_ptr & 0xffffffff, &regs->InfoPtrLo);
+
+ memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event));
+
+ set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma);
+ info->evt_ctrl.flags = 0;
+
+ *(ap->evt_prd) = 0;
+ wmb();
+ set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma);
+ writel(0, &regs->EvtCsm);
+
+ set_aceaddr(&info->cmd_ctrl.rngptr, 0x100);
+ info->cmd_ctrl.flags = 0;
+ info->cmd_ctrl.max_len = 0;
+
+ for (i = 0; i < CMD_RING_ENTRIES; i++)
+ writel(0, &regs->CmdRng[i]);
+
+ writel(0, &regs->CmdPrd);
+ writel(0, &regs->CmdCsm);
+
+ tmp_ptr = ap->info_dma;
+ tmp_ptr += (unsigned long) &(((struct ace_info *)0)->s.stats);
+ set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr);
+
+ set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma);
+ info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE;
+ info->rx_std_ctrl.flags =
+ RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
+
+ memset(ap->rx_std_ring, 0,
+ RX_STD_RING_ENTRIES * sizeof(struct rx_desc));
+
+ for (i = 0; i < RX_STD_RING_ENTRIES; i++)
+ ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM;
+
+ ap->rx_std_skbprd = 0;
+ atomic_set(&ap->cur_rx_bufs, 0);
+
+ set_aceaddr(&info->rx_jumbo_ctrl.rngptr,
+ (ap->rx_ring_base_dma +
+ (sizeof(struct rx_desc) * RX_STD_RING_ENTRIES)));
+ info->rx_jumbo_ctrl.max_len = 0;
+ info->rx_jumbo_ctrl.flags =
+ RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
+
+ memset(ap->rx_jumbo_ring, 0,
+ RX_JUMBO_RING_ENTRIES * sizeof(struct rx_desc));
+
+ for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++)
+ ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO;
+
+ ap->rx_jumbo_skbprd = 0;
+ atomic_set(&ap->cur_jumbo_bufs, 0);
+
+ memset(ap->rx_mini_ring, 0,
+ RX_MINI_RING_ENTRIES * sizeof(struct rx_desc));
+
+ if (ap->version >= 2) {
+ set_aceaddr(&info->rx_mini_ctrl.rngptr,
+ (ap->rx_ring_base_dma +
+ (sizeof(struct rx_desc) *
+ (RX_STD_RING_ENTRIES +
+ RX_JUMBO_RING_ENTRIES))));
+ info->rx_mini_ctrl.max_len = ACE_MINI_SIZE;
+ info->rx_mini_ctrl.flags =
+ RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR|RCB_FLG_VLAN_ASSIST;
+
+ for (i = 0; i < RX_MINI_RING_ENTRIES; i++)
+ ap->rx_mini_ring[i].flags =
+ BD_FLG_TCP_UDP_SUM | BD_FLG_MINI;
+ } else {
+ set_aceaddr(&info->rx_mini_ctrl.rngptr, 0);
+ info->rx_mini_ctrl.flags = RCB_FLG_RNG_DISABLE;
+ info->rx_mini_ctrl.max_len = 0;
+ }
+
+ ap->rx_mini_skbprd = 0;
+ atomic_set(&ap->cur_mini_bufs, 0);
+
+ set_aceaddr(&info->rx_return_ctrl.rngptr,
+ (ap->rx_ring_base_dma +
+ (sizeof(struct rx_desc) *
+ (RX_STD_RING_ENTRIES +
+ RX_JUMBO_RING_ENTRIES +
+ RX_MINI_RING_ENTRIES))));
+ info->rx_return_ctrl.flags = 0;
+ info->rx_return_ctrl.max_len = RX_RETURN_RING_ENTRIES;
+
+ memset(ap->rx_return_ring, 0,
+ RX_RETURN_RING_ENTRIES * sizeof(struct rx_desc));
+
+ set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma);
+ *(ap->rx_ret_prd) = 0;
+
+ writel(TX_RING_BASE, &regs->WinBase);
+
+ if (ACE_IS_TIGON_I(ap)) {
+ ap->tx_ring = (__force struct tx_desc *) regs->Window;
+ for (i = 0; i < (TIGON_I_TX_RING_ENTRIES
+ * sizeof(struct tx_desc)) / sizeof(u32); i++)
+ writel(0, (__force void __iomem *)ap->tx_ring + i * 4);
+
+ set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE);
+ } else {
+ memset(ap->tx_ring, 0,
+ MAX_TX_RING_ENTRIES * sizeof(struct tx_desc));
+
+ set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma);
+ }
+
+ info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap);
+ tmp = RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
+
+ /*
+ * The Tigon I does not like having the TX ring in host memory ;-(
+ */
+ if (!ACE_IS_TIGON_I(ap))
+ tmp |= RCB_FLG_TX_HOST_RING;
+#if TX_COAL_INTS_ONLY
+ tmp |= RCB_FLG_COAL_INT_ONLY;
+#endif
+ info->tx_ctrl.flags = tmp;
+
+ set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma);
+
+ /*
+ * Potential item for tuning parameter
+ */
+#if 0 /* NO */
+ writel(DMA_THRESH_16W, &regs->DmaReadCfg);
+ writel(DMA_THRESH_16W, &regs->DmaWriteCfg);
+#else
+ writel(DMA_THRESH_8W, &regs->DmaReadCfg);
+ writel(DMA_THRESH_8W, &regs->DmaWriteCfg);
+#endif
+
+ writel(0, &regs->MaskInt);
+ writel(1, &regs->IfIdx);
+#if 0
+ /*
+ * McKinley boxes do not like us fiddling with AssistState
+ * this early
+ */
+ writel(1, &regs->AssistState);
+#endif
+
+ writel(DEF_STAT, &regs->TuneStatTicks);
+ writel(DEF_TRACE, &regs->TuneTrace);
+
+ ace_set_rxtx_parms(dev, 0);
+
+ if (board_idx == BOARD_IDX_OVERFLOW) {
+ printk(KERN_WARNING "%s: more than %i NICs detected, "
+ "ignoring module parameters!\n",
+ ap->name, ACE_MAX_MOD_PARMS);
+ } else if (board_idx >= 0) {
+ if (tx_coal_tick[board_idx])
+ writel(tx_coal_tick[board_idx],
+ &regs->TuneTxCoalTicks);
+ if (max_tx_desc[board_idx])
+ writel(max_tx_desc[board_idx], &regs->TuneMaxTxDesc);
+
+ if (rx_coal_tick[board_idx])
+ writel(rx_coal_tick[board_idx],
+ &regs->TuneRxCoalTicks);
+ if (max_rx_desc[board_idx])
+ writel(max_rx_desc[board_idx], &regs->TuneMaxRxDesc);
+
+ if (trace[board_idx])
+ writel(trace[board_idx], &regs->TuneTrace);
+
+ if ((tx_ratio[board_idx] > 0) && (tx_ratio[board_idx] < 64))
+ writel(tx_ratio[board_idx], &regs->TxBufRat);
+ }
+
+ /*
+ * Default link parameters
+ */
+ tmp = LNK_ENABLE | LNK_FULL_DUPLEX | LNK_1000MB | LNK_100MB |
+ LNK_10MB | LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL | LNK_NEGOTIATE;
+ if(ap->version >= 2)
+ tmp |= LNK_TX_FLOW_CTL_Y;
+
+ /*
+ * Override link default parameters
+ */
+ if ((board_idx >= 0) && link_state[board_idx]) {
+ int option = link_state[board_idx];
+
+ tmp = LNK_ENABLE;
+
+ if (option & 0x01) {
+ printk(KERN_INFO "%s: Setting half duplex link\n",
+ ap->name);
+ tmp &= ~LNK_FULL_DUPLEX;
+ }
+ if (option & 0x02)
+ tmp &= ~LNK_NEGOTIATE;
+ if (option & 0x10)
+ tmp |= LNK_10MB;
+ if (option & 0x20)
+ tmp |= LNK_100MB;
+ if (option & 0x40)
+ tmp |= LNK_1000MB;
+ if ((option & 0x70) == 0) {
+ printk(KERN_WARNING "%s: No media speed specified, "
+ "forcing auto negotiation\n", ap->name);
+ tmp |= LNK_NEGOTIATE | LNK_1000MB |
+ LNK_100MB | LNK_10MB;
+ }
+ if ((option & 0x100) == 0)
+ tmp |= LNK_NEG_FCTL;
+ else
+ printk(KERN_INFO "%s: Disabling flow control "
+ "negotiation\n", ap->name);
+ if (option & 0x200)
+ tmp |= LNK_RX_FLOW_CTL_Y;
+ if ((option & 0x400) && (ap->version >= 2)) {
+ printk(KERN_INFO "%s: Enabling TX flow control\n",
+ ap->name);
+ tmp |= LNK_TX_FLOW_CTL_Y;
+ }
+ }
+
+ ap->link = tmp;
+ writel(tmp, &regs->TuneLink);
+ if (ap->version >= 2)
+ writel(tmp, &regs->TuneFastLink);
+
+ writel(ap->firmware_start, &regs->Pc);
+
+ writel(0, &regs->Mb0Lo);
+
+ /*
+ * Set tx_csm before we start receiving interrupts, otherwise
+ * the interrupt handler might think it is supposed to process
+ * tx ints before we are up and running, which may cause a null
+ * pointer access in the int handler.
+ */
+ ap->cur_rx = 0;
+ ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0;
+
+ wmb();
+ ace_set_txprd(regs, ap, 0);
+ writel(0, &regs->RxRetCsm);
+
+ /*
+ * Enable DMA engine now.
+ * If we do this sooner, Mckinley box pukes.
+ * I assume it's because Tigon II DMA engine wants to check
+ * *something* even before the CPU is started.
+ */
+ writel(1, &regs->AssistState); /* enable DMA */
+
+ /*
+ * Start the NIC CPU
+ */
+ writel(readl(&regs->CpuCtrl) & ~(CPU_HALT|CPU_TRACE), &regs->CpuCtrl);
+ readl(&regs->CpuCtrl);
+
+ /*
+ * Wait for the firmware to spin up - max 3 seconds.
+ */
+ myjif = jiffies + 3 * HZ;
+ while (time_before(jiffies, myjif) && !ap->fw_running)
+ cpu_relax();
+
+ if (!ap->fw_running) {
+ printk(KERN_ERR "%s: Firmware NOT running!\n", ap->name);
+
+ ace_dump_trace(ap);
+ writel(readl(&regs->CpuCtrl) | CPU_HALT, &regs->CpuCtrl);
+ readl(&regs->CpuCtrl);
+
+ /* aman@sgi.com - account for badly behaving firmware/NIC:
+ * - have observed that the NIC may continue to generate
+ * interrupts for some reason; attempt to stop it - halt
+ * second CPU for Tigon II cards, and also clear Mb0
+ * - if we're a module, we'll fail to load if this was
+ * the only GbE card in the system => if the kernel does
+ * see an interrupt from the NIC, code to handle it is
+ * gone and OOps! - so free_irq also
+ */
+ if (ap->version >= 2)
+ writel(readl(&regs->CpuBCtrl) | CPU_HALT,
+ &regs->CpuBCtrl);
+ writel(0, &regs->Mb0Lo);
+ readl(&regs->Mb0Lo);
+
+ ecode = -EBUSY;
+ goto init_error;
+ }
+
+ /*
+ * We load the ring here as there seem to be no way to tell the
+ * firmware to wipe the ring without re-initializing it.
+ */
+ if (!test_and_set_bit(0, &ap->std_refill_busy))
+ ace_load_std_rx_ring(dev, RX_RING_SIZE);
+ else
+ printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n",
+ ap->name);
+ if (ap->version >= 2) {
+ if (!test_and_set_bit(0, &ap->mini_refill_busy))
+ ace_load_mini_rx_ring(dev, RX_MINI_SIZE);
+ else
+ printk(KERN_ERR "%s: Someone is busy refilling "
+ "the RX mini ring\n", ap->name);
+ }
+ return 0;
+
+ init_error:
+ ace_init_cleanup(dev);
+ return ecode;
+}
+
+
+static void ace_set_rxtx_parms(struct net_device *dev, int jumbo)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ int board_idx = ap->board_idx;
+
+ if (board_idx >= 0) {
+ if (!jumbo) {
+ if (!tx_coal_tick[board_idx])
+ writel(DEF_TX_COAL, &regs->TuneTxCoalTicks);
+ if (!max_tx_desc[board_idx])
+ writel(DEF_TX_MAX_DESC, &regs->TuneMaxTxDesc);
+ if (!rx_coal_tick[board_idx])
+ writel(DEF_RX_COAL, &regs->TuneRxCoalTicks);
+ if (!max_rx_desc[board_idx])
+ writel(DEF_RX_MAX_DESC, &regs->TuneMaxRxDesc);
+ if (!tx_ratio[board_idx])
+ writel(DEF_TX_RATIO, &regs->TxBufRat);
+ } else {
+ if (!tx_coal_tick[board_idx])
+ writel(DEF_JUMBO_TX_COAL,
+ &regs->TuneTxCoalTicks);
+ if (!max_tx_desc[board_idx])
+ writel(DEF_JUMBO_TX_MAX_DESC,
+ &regs->TuneMaxTxDesc);
+ if (!rx_coal_tick[board_idx])
+ writel(DEF_JUMBO_RX_COAL,
+ &regs->TuneRxCoalTicks);
+ if (!max_rx_desc[board_idx])
+ writel(DEF_JUMBO_RX_MAX_DESC,
+ &regs->TuneMaxRxDesc);
+ if (!tx_ratio[board_idx])
+ writel(DEF_JUMBO_TX_RATIO, &regs->TxBufRat);
+ }
+ }
+}
+
+
+static void ace_watchdog(struct net_device *data)
+{
+ struct net_device *dev = data;
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+
+ /*
+ * We haven't received a stats update event for more than 2.5
+ * seconds and there is data in the transmit queue, thus we
+ * assume the card is stuck.
+ */
+ if (*ap->tx_csm != ap->tx_ret_csm) {
+ printk(KERN_WARNING "%s: Transmitter is stuck, %08x\n",
+ dev->name, (unsigned int)readl(&regs->HostCtrl));
+ /* This can happen due to ieee flow control. */
+ } else {
+ printk(KERN_DEBUG "%s: BUG... transmitter died. Kicking it.\n",
+ dev->name);
+#if 0
+ netif_wake_queue(dev);
+#endif
+ }
+}
+
+
+static void ace_tasklet(unsigned long arg)
+{
+ struct net_device *dev = (struct net_device *) arg;
+ struct ace_private *ap = netdev_priv(dev);
+ int cur_size;
+
+ cur_size = atomic_read(&ap->cur_rx_bufs);
+ if ((cur_size < RX_LOW_STD_THRES) &&
+ !test_and_set_bit(0, &ap->std_refill_busy)) {
+#ifdef DEBUG
+ printk("refilling buffers (current %i)\n", cur_size);
+#endif
+ ace_load_std_rx_ring(dev, RX_RING_SIZE - cur_size);
+ }
+
+ if (ap->version >= 2) {
+ cur_size = atomic_read(&ap->cur_mini_bufs);
+ if ((cur_size < RX_LOW_MINI_THRES) &&
+ !test_and_set_bit(0, &ap->mini_refill_busy)) {
+#ifdef DEBUG
+ printk("refilling mini buffers (current %i)\n",
+ cur_size);
+#endif
+ ace_load_mini_rx_ring(dev, RX_MINI_SIZE - cur_size);
+ }
+ }
+
+ cur_size = atomic_read(&ap->cur_jumbo_bufs);
+ if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) &&
+ !test_and_set_bit(0, &ap->jumbo_refill_busy)) {
+#ifdef DEBUG
+ printk("refilling jumbo buffers (current %i)\n", cur_size);
+#endif
+ ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size);
+ }
+ ap->tasklet_pending = 0;
+}
+
+
+/*
+ * Copy the contents of the NIC's trace buffer to kernel memory.
+ */
+static void ace_dump_trace(struct ace_private *ap)
+{
+#if 0
+ if (!ap->trace_buf)
+ if (!(ap->trace_buf = kmalloc(ACE_TRACE_SIZE, GFP_KERNEL)))
+ return;
+#endif
+}
+
+
+/*
+ * Load the standard rx ring.
+ *
+ * Loading rings is safe without holding the spin lock since this is
+ * done only before the device is enabled, thus no interrupts are
+ * generated and by the interrupt handler/tasklet handler.
+ */
+static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ short i, idx;
+
+
+ prefetchw(&ap->cur_rx_bufs);
+
+ idx = ap->rx_std_skbprd;
+
+ for (i = 0; i < nr_bufs; i++) {
+ struct sk_buff *skb;
+ struct rx_desc *rd;
+ dma_addr_t mapping;
+
+ skb = netdev_alloc_skb_ip_align(dev, ACE_STD_BUFSIZE);
+ if (!skb)
+ break;
+
+ mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
+ offset_in_page(skb->data),
+ ACE_STD_BUFSIZE,
+ PCI_DMA_FROMDEVICE);
+ ap->skb->rx_std_skbuff[idx].skb = skb;
+ dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
+ mapping, mapping);
+
+ rd = &ap->rx_std_ring[idx];
+ set_aceaddr(&rd->addr, mapping);
+ rd->size = ACE_STD_BUFSIZE;
+ rd->idx = idx;
+ idx = (idx + 1) % RX_STD_RING_ENTRIES;
+ }
+
+ if (!i)
+ goto error_out;
+
+ atomic_add(i, &ap->cur_rx_bufs);
+ ap->rx_std_skbprd = idx;
+
+ if (ACE_IS_TIGON_I(ap)) {
+ struct cmd cmd;
+ cmd.evt = C_SET_RX_PRD_IDX;
+ cmd.code = 0;
+ cmd.idx = ap->rx_std_skbprd;
+ ace_issue_cmd(regs, &cmd);
+ } else {
+ writel(idx, &regs->RxStdPrd);
+ wmb();
+ }
+
+ out:
+ clear_bit(0, &ap->std_refill_busy);
+ return;
+
+ error_out:
+ printk(KERN_INFO "Out of memory when allocating "
+ "standard receive buffers\n");
+ goto out;
+}
+
+
+static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ short i, idx;
+
+ prefetchw(&ap->cur_mini_bufs);
+
+ idx = ap->rx_mini_skbprd;
+ for (i = 0; i < nr_bufs; i++) {
+ struct sk_buff *skb;
+ struct rx_desc *rd;
+ dma_addr_t mapping;
+
+ skb = netdev_alloc_skb_ip_align(dev, ACE_MINI_BUFSIZE);
+ if (!skb)
+ break;
+
+ mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
+ offset_in_page(skb->data),
+ ACE_MINI_BUFSIZE,
+ PCI_DMA_FROMDEVICE);
+ ap->skb->rx_mini_skbuff[idx].skb = skb;
+ dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
+ mapping, mapping);
+
+ rd = &ap->rx_mini_ring[idx];
+ set_aceaddr(&rd->addr, mapping);
+ rd->size = ACE_MINI_BUFSIZE;
+ rd->idx = idx;
+ idx = (idx + 1) % RX_MINI_RING_ENTRIES;
+ }
+
+ if (!i)
+ goto error_out;
+
+ atomic_add(i, &ap->cur_mini_bufs);
+
+ ap->rx_mini_skbprd = idx;
+
+ writel(idx, &regs->RxMiniPrd);
+ wmb();
+
+ out:
+ clear_bit(0, &ap->mini_refill_busy);
+ return;
+ error_out:
+ printk(KERN_INFO "Out of memory when allocating "
+ "mini receive buffers\n");
+ goto out;
+}
+
+
+/*
+ * Load the jumbo rx ring, this may happen at any time if the MTU
+ * is changed to a value > 1500.
+ */
+static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ short i, idx;
+
+ idx = ap->rx_jumbo_skbprd;
+
+ for (i = 0; i < nr_bufs; i++) {
+ struct sk_buff *skb;
+ struct rx_desc *rd;
+ dma_addr_t mapping;
+
+ skb = netdev_alloc_skb_ip_align(dev, ACE_JUMBO_BUFSIZE);
+ if (!skb)
+ break;
+
+ mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
+ offset_in_page(skb->data),
+ ACE_JUMBO_BUFSIZE,
+ PCI_DMA_FROMDEVICE);
+ ap->skb->rx_jumbo_skbuff[idx].skb = skb;
+ dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
+ mapping, mapping);
+
+ rd = &ap->rx_jumbo_ring[idx];
+ set_aceaddr(&rd->addr, mapping);
+ rd->size = ACE_JUMBO_BUFSIZE;
+ rd->idx = idx;
+ idx = (idx + 1) % RX_JUMBO_RING_ENTRIES;
+ }
+
+ if (!i)
+ goto error_out;
+
+ atomic_add(i, &ap->cur_jumbo_bufs);
+ ap->rx_jumbo_skbprd = idx;
+
+ if (ACE_IS_TIGON_I(ap)) {
+ struct cmd cmd;
+ cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
+ cmd.code = 0;
+ cmd.idx = ap->rx_jumbo_skbprd;
+ ace_issue_cmd(regs, &cmd);
+ } else {
+ writel(idx, &regs->RxJumboPrd);
+ wmb();
+ }
+
+ out:
+ clear_bit(0, &ap->jumbo_refill_busy);
+ return;
+ error_out:
+ if (net_ratelimit())
+ printk(KERN_INFO "Out of memory when allocating "
+ "jumbo receive buffers\n");
+ goto out;
+}
+
+
+/*
+ * All events are considered to be slow (RX/TX ints do not generate
+ * events) and are handled here, outside the main interrupt handler,
+ * to reduce the size of the handler.
+ */
+static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd)
+{
+ struct ace_private *ap;
+
+ ap = netdev_priv(dev);
+
+ while (evtcsm != evtprd) {
+ switch (ap->evt_ring[evtcsm].evt) {
+ case E_FW_RUNNING:
+ printk(KERN_INFO "%s: Firmware up and running\n",
+ ap->name);
+ ap->fw_running = 1;
+ wmb();
+ break;
+ case E_STATS_UPDATED:
+ break;
+ case E_LNK_STATE:
+ {
+ u16 code = ap->evt_ring[evtcsm].code;
+ switch (code) {
+ case E_C_LINK_UP:
+ {
+ u32 state = readl(&ap->regs->GigLnkState);
+ printk(KERN_WARNING "%s: Optical link UP "
+ "(%s Duplex, Flow Control: %s%s)\n",
+ ap->name,
+ state & LNK_FULL_DUPLEX ? "Full":"Half",
+ state & LNK_TX_FLOW_CTL_Y ? "TX " : "",
+ state & LNK_RX_FLOW_CTL_Y ? "RX" : "");
+ break;
+ }
+ case E_C_LINK_DOWN:
+ printk(KERN_WARNING "%s: Optical link DOWN\n",
+ ap->name);
+ break;
+ case E_C_LINK_10_100:
+ printk(KERN_WARNING "%s: 10/100BaseT link "
+ "UP\n", ap->name);
+ break;
+ default:
+ printk(KERN_ERR "%s: Unknown optical link "
+ "state %02x\n", ap->name, code);
+ }
+ break;
+ }
+ case E_ERROR:
+ switch(ap->evt_ring[evtcsm].code) {
+ case E_C_ERR_INVAL_CMD:
+ printk(KERN_ERR "%s: invalid command error\n",
+ ap->name);
+ break;
+ case E_C_ERR_UNIMP_CMD:
+ printk(KERN_ERR "%s: unimplemented command "
+ "error\n", ap->name);
+ break;
+ case E_C_ERR_BAD_CFG:
+ printk(KERN_ERR "%s: bad config error\n",
+ ap->name);
+ break;
+ default:
+ printk(KERN_ERR "%s: unknown error %02x\n",
+ ap->name, ap->evt_ring[evtcsm].code);
+ }
+ break;
+ case E_RESET_JUMBO_RNG:
+ {
+ int i;
+ for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
+ if (ap->skb->rx_jumbo_skbuff[i].skb) {
+ ap->rx_jumbo_ring[i].size = 0;
+ set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0);
+ dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb);
+ ap->skb->rx_jumbo_skbuff[i].skb = NULL;
+ }
+ }
+
+ if (ACE_IS_TIGON_I(ap)) {
+ struct cmd cmd;
+ cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
+ cmd.code = 0;
+ cmd.idx = 0;
+ ace_issue_cmd(ap->regs, &cmd);
+ } else {
+ writel(0, &((ap->regs)->RxJumboPrd));
+ wmb();
+ }
+
+ ap->jumbo = 0;
+ ap->rx_jumbo_skbprd = 0;
+ printk(KERN_INFO "%s: Jumbo ring flushed\n",
+ ap->name);
+ clear_bit(0, &ap->jumbo_refill_busy);
+ break;
+ }
+ default:
+ printk(KERN_ERR "%s: Unhandled event 0x%02x\n",
+ ap->name, ap->evt_ring[evtcsm].evt);
+ }
+ evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES;
+ }
+
+ return evtcsm;
+}
+
+
+static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ u32 idx;
+ int mini_count = 0, std_count = 0;
+
+ idx = rxretcsm;
+
+ prefetchw(&ap->cur_rx_bufs);
+ prefetchw(&ap->cur_mini_bufs);
+
+ while (idx != rxretprd) {
+ struct ring_info *rip;
+ struct sk_buff *skb;
+ struct rx_desc *rxdesc, *retdesc;
+ u32 skbidx;
+ int bd_flags, desc_type, mapsize;
+ u16 csum;
+
+
+ /* make sure the rx descriptor isn't read before rxretprd */
+ if (idx == rxretcsm)
+ rmb();
+
+ retdesc = &ap->rx_return_ring[idx];
+ skbidx = retdesc->idx;
+ bd_flags = retdesc->flags;
+ desc_type = bd_flags & (BD_FLG_JUMBO | BD_FLG_MINI);
+
+ switch(desc_type) {
+ /*
+ * Normal frames do not have any flags set
+ *
+ * Mini and normal frames arrive frequently,
+ * so use a local counter to avoid doing
+ * atomic operations for each packet arriving.
+ */
+ case 0:
+ rip = &ap->skb->rx_std_skbuff[skbidx];
+ mapsize = ACE_STD_BUFSIZE;
+ rxdesc = &ap->rx_std_ring[skbidx];
+ std_count++;
+ break;
+ case BD_FLG_JUMBO:
+ rip = &ap->skb->rx_jumbo_skbuff[skbidx];
+ mapsize = ACE_JUMBO_BUFSIZE;
+ rxdesc = &ap->rx_jumbo_ring[skbidx];
+ atomic_dec(&ap->cur_jumbo_bufs);
+ break;
+ case BD_FLG_MINI:
+ rip = &ap->skb->rx_mini_skbuff[skbidx];
+ mapsize = ACE_MINI_BUFSIZE;
+ rxdesc = &ap->rx_mini_ring[skbidx];
+ mini_count++;
+ break;
+ default:
+ printk(KERN_INFO "%s: unknown frame type (0x%02x) "
+ "returned by NIC\n", dev->name,
+ retdesc->flags);
+ goto error;
+ }
+
+ skb = rip->skb;
+ rip->skb = NULL;
+ pci_unmap_page(ap->pdev,
+ dma_unmap_addr(rip, mapping),
+ mapsize,
+ PCI_DMA_FROMDEVICE);
+ skb_put(skb, retdesc->size);
+
+ /*
+ * Fly baby, fly!
+ */
+ csum = retdesc->tcp_udp_csum;
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /*
+ * Instead of forcing the poor tigon mips cpu to calculate
+ * pseudo hdr checksum, we do this ourselves.
+ */
+ if (bd_flags & BD_FLG_TCP_UDP_SUM) {
+ skb->csum = htons(csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ } else {
+ skb_checksum_none_assert(skb);
+ }
+
+ /* send it up */
+ if ((bd_flags & BD_FLG_VLAN_TAG))
+ __vlan_hwaccel_put_tag(skb, retdesc->vlan);
+ netif_rx(skb);
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += retdesc->size;
+
+ idx = (idx + 1) % RX_RETURN_RING_ENTRIES;
+ }
+
+ atomic_sub(std_count, &ap->cur_rx_bufs);
+ if (!ACE_IS_TIGON_I(ap))
+ atomic_sub(mini_count, &ap->cur_mini_bufs);
+
+ out:
+ /*
+ * According to the documentation RxRetCsm is obsolete with
+ * the 12.3.x Firmware - my Tigon I NICs seem to disagree!
+ */
+ if (ACE_IS_TIGON_I(ap)) {
+ writel(idx, &ap->regs->RxRetCsm);
+ }
+ ap->cur_rx = idx;
+
+ return;
+ error:
+ idx = rxretprd;
+ goto out;
+}
+
+
+static inline void ace_tx_int(struct net_device *dev,
+ u32 txcsm, u32 idx)
+{
+ struct ace_private *ap = netdev_priv(dev);
+
+ do {
+ struct sk_buff *skb;
+ struct tx_ring_info *info;
+
+ info = ap->skb->tx_skbuff + idx;
+ skb = info->skb;
+
+ if (dma_unmap_len(info, maplen)) {
+ pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
+ dma_unmap_len(info, maplen),
+ PCI_DMA_TODEVICE);
+ dma_unmap_len_set(info, maplen, 0);
+ }
+
+ if (skb) {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ dev_kfree_skb_irq(skb);
+ info->skb = NULL;
+ }
+
+ idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
+ } while (idx != txcsm);
+
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+
+ wmb();
+ ap->tx_ret_csm = txcsm;
+
+ /* So... tx_ret_csm is advanced _after_ check for device wakeup.
+ *
+ * We could try to make it before. In this case we would get
+ * the following race condition: hard_start_xmit on other cpu
+ * enters after we advanced tx_ret_csm and fills space,
+ * which we have just freed, so that we make illegal device wakeup.
+ * There is no good way to workaround this (at entry
+ * to ace_start_xmit detects this condition and prevents
+ * ring corruption, but it is not a good workaround.)
+ *
+ * When tx_ret_csm is advanced after, we wake up device _only_
+ * if we really have some space in ring (though the core doing
+ * hard_start_xmit can see full ring for some period and has to
+ * synchronize.) Superb.
+ * BUT! We get another subtle race condition. hard_start_xmit
+ * may think that ring is full between wakeup and advancing
+ * tx_ret_csm and will stop device instantly! It is not so bad.
+ * We are guaranteed that there is something in ring, so that
+ * the next irq will resume transmission. To speedup this we could
+ * mark descriptor, which closes ring with BD_FLG_COAL_NOW
+ * (see ace_start_xmit).
+ *
+ * Well, this dilemma exists in all lock-free devices.
+ * We, following scheme used in drivers by Donald Becker,
+ * select the least dangerous.
+ * --ANK
+ */
+}
+
+
+static irqreturn_t ace_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ u32 idx;
+ u32 txcsm, rxretcsm, rxretprd;
+ u32 evtcsm, evtprd;
+
+ /*
+ * In case of PCI shared interrupts or spurious interrupts,
+ * we want to make sure it is actually our interrupt before
+ * spending any time in here.
+ */
+ if (!(readl(&regs->HostCtrl) & IN_INT))
+ return IRQ_NONE;
+
+ /*
+ * ACK intr now. Otherwise we will lose updates to rx_ret_prd,
+ * which happened _after_ rxretprd = *ap->rx_ret_prd; but before
+ * writel(0, &regs->Mb0Lo).
+ *
+ * "IRQ avoidance" recommended in docs applies to IRQs served
+ * threads and it is wrong even for that case.
+ */
+ writel(0, &regs->Mb0Lo);
+ readl(&regs->Mb0Lo);
+
+ /*
+ * There is no conflict between transmit handling in
+ * start_xmit and receive processing, thus there is no reason
+ * to take a spin lock for RX handling. Wait until we start
+ * working on the other stuff - hey we don't need a spin lock
+ * anymore.
+ */
+ rxretprd = *ap->rx_ret_prd;
+ rxretcsm = ap->cur_rx;
+
+ if (rxretprd != rxretcsm)
+ ace_rx_int(dev, rxretprd, rxretcsm);
+
+ txcsm = *ap->tx_csm;
+ idx = ap->tx_ret_csm;
+
+ if (txcsm != idx) {
+ /*
+ * If each skb takes only one descriptor this check degenerates
+ * to identity, because new space has just been opened.
+ * But if skbs are fragmented we must check that this index
+ * update releases enough of space, otherwise we just
+ * wait for device to make more work.
+ */
+ if (!tx_ring_full(ap, txcsm, ap->tx_prd))
+ ace_tx_int(dev, txcsm, idx);
+ }
+
+ evtcsm = readl(&regs->EvtCsm);
+ evtprd = *ap->evt_prd;
+
+ if (evtcsm != evtprd) {
+ evtcsm = ace_handle_event(dev, evtcsm, evtprd);
+ writel(evtcsm, &regs->EvtCsm);
+ }
+
+ /*
+ * This has to go last in the interrupt handler and run with
+ * the spin lock released ... what lock?
+ */
+ if (netif_running(dev)) {
+ int cur_size;
+ int run_tasklet = 0;
+
+ cur_size = atomic_read(&ap->cur_rx_bufs);
+ if (cur_size < RX_LOW_STD_THRES) {
+ if ((cur_size < RX_PANIC_STD_THRES) &&
+ !test_and_set_bit(0, &ap->std_refill_busy)) {
+#ifdef DEBUG
+ printk("low on std buffers %i\n", cur_size);
+#endif
+ ace_load_std_rx_ring(dev,
+ RX_RING_SIZE - cur_size);
+ } else
+ run_tasklet = 1;
+ }
+
+ if (!ACE_IS_TIGON_I(ap)) {
+ cur_size = atomic_read(&ap->cur_mini_bufs);
+ if (cur_size < RX_LOW_MINI_THRES) {
+ if ((cur_size < RX_PANIC_MINI_THRES) &&
+ !test_and_set_bit(0,
+ &ap->mini_refill_busy)) {
+#ifdef DEBUG
+ printk("low on mini buffers %i\n",
+ cur_size);
+#endif
+ ace_load_mini_rx_ring(dev,
+ RX_MINI_SIZE - cur_size);
+ } else
+ run_tasklet = 1;
+ }
+ }
+
+ if (ap->jumbo) {
+ cur_size = atomic_read(&ap->cur_jumbo_bufs);
+ if (cur_size < RX_LOW_JUMBO_THRES) {
+ if ((cur_size < RX_PANIC_JUMBO_THRES) &&
+ !test_and_set_bit(0,
+ &ap->jumbo_refill_busy)){
+#ifdef DEBUG
+ printk("low on jumbo buffers %i\n",
+ cur_size);
+#endif
+ ace_load_jumbo_rx_ring(dev,
+ RX_JUMBO_SIZE - cur_size);
+ } else
+ run_tasklet = 1;
+ }
+ }
+ if (run_tasklet && !ap->tasklet_pending) {
+ ap->tasklet_pending = 1;
+ tasklet_schedule(&ap->ace_tasklet);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ace_open(struct net_device *dev)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ struct cmd cmd;
+
+ if (!(ap->fw_running)) {
+ printk(KERN_WARNING "%s: Firmware not running!\n", dev->name);
+ return -EBUSY;
+ }
+
+ writel(dev->mtu + ETH_HLEN + 4, &regs->IfMtu);
+
+ cmd.evt = C_CLEAR_STATS;
+ cmd.code = 0;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+
+ cmd.evt = C_HOST_STATE;
+ cmd.code = C_C_STACK_UP;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+
+ if (ap->jumbo &&
+ !test_and_set_bit(0, &ap->jumbo_refill_busy))
+ ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
+
+ if (dev->flags & IFF_PROMISC) {
+ cmd.evt = C_SET_PROMISC_MODE;
+ cmd.code = C_C_PROMISC_ENABLE;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+
+ ap->promisc = 1;
+ }else
+ ap->promisc = 0;
+ ap->mcast_all = 0;
+
+#if 0
+ cmd.evt = C_LNK_NEGOTIATION;
+ cmd.code = 0;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+#endif
+
+ netif_start_queue(dev);
+
+ /*
+ * Setup the bottom half rx ring refill handler
+ */
+ tasklet_init(&ap->ace_tasklet, ace_tasklet, (unsigned long)dev);
+ return 0;
+}
+
+
+static int ace_close(struct net_device *dev)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ struct cmd cmd;
+ unsigned long flags;
+ short i;
+
+ /*
+ * Without (or before) releasing irq and stopping hardware, this
+ * is an absolute non-sense, by the way. It will be reset instantly
+ * by the first irq.
+ */
+ netif_stop_queue(dev);
+
+
+ if (ap->promisc) {
+ cmd.evt = C_SET_PROMISC_MODE;
+ cmd.code = C_C_PROMISC_DISABLE;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+ ap->promisc = 0;
+ }
+
+ cmd.evt = C_HOST_STATE;
+ cmd.code = C_C_STACK_DOWN;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+
+ tasklet_kill(&ap->ace_tasklet);
+
+ /*
+ * Make sure one CPU is not processing packets while
+ * buffers are being released by another.
+ */
+
+ local_irq_save(flags);
+ ace_mask_irq(dev);
+
+ for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) {
+ struct sk_buff *skb;
+ struct tx_ring_info *info;
+
+ info = ap->skb->tx_skbuff + i;
+ skb = info->skb;
+
+ if (dma_unmap_len(info, maplen)) {
+ if (ACE_IS_TIGON_I(ap)) {
+ /* NB: TIGON_1 is special, tx_ring is in io space */
+ struct tx_desc __iomem *tx;
+ tx = (__force struct tx_desc __iomem *) &ap->tx_ring[i];
+ writel(0, &tx->addr.addrhi);
+ writel(0, &tx->addr.addrlo);
+ writel(0, &tx->flagsize);
+ } else
+ memset(ap->tx_ring + i, 0,
+ sizeof(struct tx_desc));
+ pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
+ dma_unmap_len(info, maplen),
+ PCI_DMA_TODEVICE);
+ dma_unmap_len_set(info, maplen, 0);
+ }
+ if (skb) {
+ dev_kfree_skb(skb);
+ info->skb = NULL;
+ }
+ }
+
+ if (ap->jumbo) {
+ cmd.evt = C_RESET_JUMBO_RNG;
+ cmd.code = 0;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+ }
+
+ ace_unmask_irq(dev);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+
+static inline dma_addr_t
+ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
+ struct sk_buff *tail, u32 idx)
+{
+ dma_addr_t mapping;
+ struct tx_ring_info *info;
+
+ mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
+ offset_in_page(skb->data),
+ skb->len, PCI_DMA_TODEVICE);
+
+ info = ap->skb->tx_skbuff + idx;
+ info->skb = tail;
+ dma_unmap_addr_set(info, mapping, mapping);
+ dma_unmap_len_set(info, maplen, skb->len);
+ return mapping;
+}
+
+
+static inline void
+ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr,
+ u32 flagsize, u32 vlan_tag)
+{
+#if !USE_TX_COAL_NOW
+ flagsize &= ~BD_FLG_COAL_NOW;
+#endif
+
+ if (ACE_IS_TIGON_I(ap)) {
+ struct tx_desc __iomem *io = (__force struct tx_desc __iomem *) desc;
+ writel(addr >> 32, &io->addr.addrhi);
+ writel(addr & 0xffffffff, &io->addr.addrlo);
+ writel(flagsize, &io->flagsize);
+ writel(vlan_tag, &io->vlanres);
+ } else {
+ desc->addr.addrhi = addr >> 32;
+ desc->addr.addrlo = addr;
+ desc->flagsize = flagsize;
+ desc->vlanres = vlan_tag;
+ }
+}
+
+
+static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ struct tx_desc *desc;
+ u32 idx, flagsize;
+ unsigned long maxjiff = jiffies + 3*HZ;
+
+restart:
+ idx = ap->tx_prd;
+
+ if (tx_ring_full(ap, ap->tx_ret_csm, idx))
+ goto overflow;
+
+ if (!skb_shinfo(skb)->nr_frags) {
+ dma_addr_t mapping;
+ u32 vlan_tag = 0;
+
+ mapping = ace_map_tx_skb(ap, skb, skb, idx);
+ flagsize = (skb->len << 16) | (BD_FLG_END);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ flagsize |= BD_FLG_TCP_UDP_SUM;
+ if (vlan_tx_tag_present(skb)) {
+ flagsize |= BD_FLG_VLAN_TAG;
+ vlan_tag = vlan_tx_tag_get(skb);
+ }
+ desc = ap->tx_ring + idx;
+ idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
+
+ /* Look at ace_tx_int for explanations. */
+ if (tx_ring_full(ap, ap->tx_ret_csm, idx))
+ flagsize |= BD_FLG_COAL_NOW;
+
+ ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
+ } else {
+ dma_addr_t mapping;
+ u32 vlan_tag = 0;
+ int i, len = 0;
+
+ mapping = ace_map_tx_skb(ap, skb, NULL, idx);
+ flagsize = (skb_headlen(skb) << 16);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ flagsize |= BD_FLG_TCP_UDP_SUM;
+ if (vlan_tx_tag_present(skb)) {
+ flagsize |= BD_FLG_VLAN_TAG;
+ vlan_tag = vlan_tx_tag_get(skb);
+ }
+
+ ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag);
+
+ idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ struct tx_ring_info *info;
+
+ len += frag->size;
+ info = ap->skb->tx_skbuff + idx;
+ desc = ap->tx_ring + idx;
+
+ mapping = skb_frag_dma_map(&ap->pdev->dev, frag, 0,
+ frag->size,
+ DMA_TO_DEVICE);
+
+ flagsize = (frag->size << 16);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ flagsize |= BD_FLG_TCP_UDP_SUM;
+ idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
+
+ if (i == skb_shinfo(skb)->nr_frags - 1) {
+ flagsize |= BD_FLG_END;
+ if (tx_ring_full(ap, ap->tx_ret_csm, idx))
+ flagsize |= BD_FLG_COAL_NOW;
+
+ /*
+ * Only the last fragment frees
+ * the skb!
+ */
+ info->skb = skb;
+ } else {
+ info->skb = NULL;
+ }
+ dma_unmap_addr_set(info, mapping, mapping);
+ dma_unmap_len_set(info, maplen, frag->size);
+ ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
+ }
+ }
+
+ wmb();
+ ap->tx_prd = idx;
+ ace_set_txprd(regs, ap, idx);
+
+ if (flagsize & BD_FLG_COAL_NOW) {
+ netif_stop_queue(dev);
+
+ /*
+ * A TX-descriptor producer (an IRQ) might have gotten
+ * between, making the ring free again. Since xmit is
+ * serialized, this is the only situation we have to
+ * re-test.
+ */
+ if (!tx_ring_full(ap, ap->tx_ret_csm, idx))
+ netif_wake_queue(dev);
+ }
+
+ return NETDEV_TX_OK;
+
+overflow:
+ /*
+ * This race condition is unavoidable with lock-free drivers.
+ * We wake up the queue _before_ tx_prd is advanced, so that we can
+ * enter hard_start_xmit too early, while tx ring still looks closed.
+ * This happens ~1-4 times per 100000 packets, so that we can allow
+ * to loop syncing to other CPU. Probably, we need an additional
+ * wmb() in ace_tx_intr as well.
+ *
+ * Note that this race is relieved by reserving one more entry
+ * in tx ring than it is necessary (see original non-SG driver).
+ * However, with SG we need to reserve 2*MAX_SKB_FRAGS+1, which
+ * is already overkill.
+ *
+ * Alternative is to return with 1 not throttling queue. In this
+ * case loop becomes longer, no more useful effects.
+ */
+ if (time_before(jiffies, maxjiff)) {
+ barrier();
+ cpu_relax();
+ goto restart;
+ }
+
+ /* The ring is stuck full. */
+ printk(KERN_WARNING "%s: Transmit ring stuck full\n", dev->name);
+ return NETDEV_TX_BUSY;
+}
+
+
+static int ace_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+
+ if (new_mtu > ACE_JUMBO_MTU)
+ return -EINVAL;
+
+ writel(new_mtu + ETH_HLEN + 4, &regs->IfMtu);
+ dev->mtu = new_mtu;
+
+ if (new_mtu > ACE_STD_MTU) {
+ if (!(ap->jumbo)) {
+ printk(KERN_INFO "%s: Enabling Jumbo frame "
+ "support\n", dev->name);
+ ap->jumbo = 1;
+ if (!test_and_set_bit(0, &ap->jumbo_refill_busy))
+ ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
+ ace_set_rxtx_parms(dev, 1);
+ }
+ } else {
+ while (test_and_set_bit(0, &ap->jumbo_refill_busy));
+ ace_sync_irq(dev->irq);
+ ace_set_rxtx_parms(dev, 0);
+ if (ap->jumbo) {
+ struct cmd cmd;
+
+ cmd.evt = C_RESET_JUMBO_RNG;
+ cmd.code = 0;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+ }
+ }
+
+ return 0;
+}
+
+static int ace_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ u32 link;
+
+ memset(ecmd, 0, sizeof(struct ethtool_cmd));
+ ecmd->supported =
+ (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg | SUPPORTED_FIBRE);
+
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ link = readl(&regs->GigLnkState);
+ if (link & LNK_1000MB)
+ ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ else {
+ link = readl(&regs->FastLnkState);
+ if (link & LNK_100MB)
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
+ else if (link & LNK_10MB)
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
+ else
+ ethtool_cmd_speed_set(ecmd, 0);
+ }
+ if (link & LNK_FULL_DUPLEX)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+
+ if (link & LNK_NEGOTIATE)
+ ecmd->autoneg = AUTONEG_ENABLE;
+ else
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+#if 0
+ /*
+ * Current struct ethtool_cmd is insufficient
+ */
+ ecmd->trace = readl(&regs->TuneTrace);
+
+ ecmd->txcoal = readl(&regs->TuneTxCoalTicks);
+ ecmd->rxcoal = readl(&regs->TuneRxCoalTicks);
+#endif
+ ecmd->maxtxpkt = readl(&regs->TuneMaxTxDesc);
+ ecmd->maxrxpkt = readl(&regs->TuneMaxRxDesc);
+
+ return 0;
+}
+
+static int ace_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ u32 link, speed;
+
+ link = readl(&regs->GigLnkState);
+ if (link & LNK_1000MB)
+ speed = SPEED_1000;
+ else {
+ link = readl(&regs->FastLnkState);
+ if (link & LNK_100MB)
+ speed = SPEED_100;
+ else if (link & LNK_10MB)
+ speed = SPEED_10;
+ else
+ speed = SPEED_100;
+ }
+
+ link = LNK_ENABLE | LNK_1000MB | LNK_100MB | LNK_10MB |
+ LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL;
+ if (!ACE_IS_TIGON_I(ap))
+ link |= LNK_TX_FLOW_CTL_Y;
+ if (ecmd->autoneg == AUTONEG_ENABLE)
+ link |= LNK_NEGOTIATE;
+ if (ethtool_cmd_speed(ecmd) != speed) {
+ link &= ~(LNK_1000MB | LNK_100MB | LNK_10MB);
+ switch (ethtool_cmd_speed(ecmd)) {
+ case SPEED_1000:
+ link |= LNK_1000MB;
+ break;
+ case SPEED_100:
+ link |= LNK_100MB;
+ break;
+ case SPEED_10:
+ link |= LNK_10MB;
+ break;
+ }
+ }
+
+ if (ecmd->duplex == DUPLEX_FULL)
+ link |= LNK_FULL_DUPLEX;
+
+ if (link != ap->link) {
+ struct cmd cmd;
+ printk(KERN_INFO "%s: Renegotiating link state\n",
+ dev->name);
+
+ ap->link = link;
+ writel(link, &regs->TuneLink);
+ if (!ACE_IS_TIGON_I(ap))
+ writel(link, &regs->TuneFastLink);
+ wmb();
+
+ cmd.evt = C_LNK_NEGOTIATION;
+ cmd.code = 0;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+ }
+ return 0;
+}
+
+static void ace_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct ace_private *ap = netdev_priv(dev);
+
+ strlcpy(info->driver, "acenic", sizeof(info->driver));
+ snprintf(info->version, sizeof(info->version), "%i.%i.%i",
+ ap->firmware_major, ap->firmware_minor,
+ ap->firmware_fix);
+
+ if (ap->pdev)
+ strlcpy(info->bus_info, pci_name(ap->pdev),
+ sizeof(info->bus_info));
+
+}
+
+/*
+ * Set the hardware MAC address.
+ */
+static int ace_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ struct sockaddr *addr=p;
+ u8 *da;
+ struct cmd cmd;
+
+ if(netif_running(dev))
+ return -EBUSY;
+
+ memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
+
+ da = (u8 *)dev->dev_addr;
+
+ writel(da[0] << 8 | da[1], &regs->MacAddrHi);
+ writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5],
+ &regs->MacAddrLo);
+
+ cmd.evt = C_SET_MAC_ADDR;
+ cmd.code = 0;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+
+ return 0;
+}
+
+
+static void ace_set_multicast_list(struct net_device *dev)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ struct cmd cmd;
+
+ if ((dev->flags & IFF_ALLMULTI) && !(ap->mcast_all)) {
+ cmd.evt = C_SET_MULTICAST_MODE;
+ cmd.code = C_C_MCAST_ENABLE;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+ ap->mcast_all = 1;
+ } else if (ap->mcast_all) {
+ cmd.evt = C_SET_MULTICAST_MODE;
+ cmd.code = C_C_MCAST_DISABLE;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+ ap->mcast_all = 0;
+ }
+
+ if ((dev->flags & IFF_PROMISC) && !(ap->promisc)) {
+ cmd.evt = C_SET_PROMISC_MODE;
+ cmd.code = C_C_PROMISC_ENABLE;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+ ap->promisc = 1;
+ }else if (!(dev->flags & IFF_PROMISC) && (ap->promisc)) {
+ cmd.evt = C_SET_PROMISC_MODE;
+ cmd.code = C_C_PROMISC_DISABLE;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+ ap->promisc = 0;
+ }
+
+ /*
+ * For the time being multicast relies on the upper layers
+ * filtering it properly. The Firmware does not allow one to
+ * set the entire multicast list at a time and keeping track of
+ * it here is going to be messy.
+ */
+ if (!netdev_mc_empty(dev) && !ap->mcast_all) {
+ cmd.evt = C_SET_MULTICAST_MODE;
+ cmd.code = C_C_MCAST_ENABLE;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+ }else if (!ap->mcast_all) {
+ cmd.evt = C_SET_MULTICAST_MODE;
+ cmd.code = C_C_MCAST_DISABLE;
+ cmd.idx = 0;
+ ace_issue_cmd(regs, &cmd);
+ }
+}
+
+
+static struct net_device_stats *ace_get_stats(struct net_device *dev)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_mac_stats __iomem *mac_stats =
+ (struct ace_mac_stats __iomem *)ap->regs->Stats;
+
+ dev->stats.rx_missed_errors = readl(&mac_stats->drop_space);
+ dev->stats.multicast = readl(&mac_stats->kept_mc);
+ dev->stats.collisions = readl(&mac_stats->coll);
+
+ return &dev->stats;
+}
+
+
+static void __devinit ace_copy(struct ace_regs __iomem *regs, const __be32 *src,
+ u32 dest, int size)
+{
+ void __iomem *tdest;
+ short tsize, i;
+
+ if (size <= 0)
+ return;
+
+ while (size > 0) {
+ tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
+ min_t(u32, size, ACE_WINDOW_SIZE));
+ tdest = (void __iomem *) &regs->Window +
+ (dest & (ACE_WINDOW_SIZE - 1));
+ writel(dest & ~(ACE_WINDOW_SIZE - 1), &regs->WinBase);
+ for (i = 0; i < (tsize / 4); i++) {
+ /* Firmware is big-endian */
+ writel(be32_to_cpup(src), tdest);
+ src++;
+ tdest += 4;
+ dest += 4;
+ size -= 4;
+ }
+ }
+}
+
+
+static void __devinit ace_clear(struct ace_regs __iomem *regs, u32 dest, int size)
+{
+ void __iomem *tdest;
+ short tsize = 0, i;
+
+ if (size <= 0)
+ return;
+
+ while (size > 0) {
+ tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
+ min_t(u32, size, ACE_WINDOW_SIZE));
+ tdest = (void __iomem *) &regs->Window +
+ (dest & (ACE_WINDOW_SIZE - 1));
+ writel(dest & ~(ACE_WINDOW_SIZE - 1), &regs->WinBase);
+
+ for (i = 0; i < (tsize / 4); i++) {
+ writel(0, tdest + i*4);
+ }
+
+ dest += tsize;
+ size -= tsize;
+ }
+}
+
+
+/*
+ * Download the firmware into the SRAM on the NIC
+ *
+ * This operation requires the NIC to be halted and is performed with
+ * interrupts disabled and with the spinlock hold.
+ */
+static int __devinit ace_load_firmware(struct net_device *dev)
+{
+ const struct firmware *fw;
+ const char *fw_name = "acenic/tg2.bin";
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ const __be32 *fw_data;
+ u32 load_addr;
+ int ret;
+
+ if (!(readl(&regs->CpuCtrl) & CPU_HALTED)) {
+ printk(KERN_ERR "%s: trying to download firmware while the "
+ "CPU is running!\n", ap->name);
+ return -EFAULT;
+ }
+
+ if (ACE_IS_TIGON_I(ap))
+ fw_name = "acenic/tg1.bin";
+
+ ret = request_firmware(&fw, fw_name, &ap->pdev->dev);
+ if (ret) {
+ printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
+ ap->name, fw_name);
+ return ret;
+ }
+
+ fw_data = (void *)fw->data;
+
+ /* Firmware blob starts with version numbers, followed by
+ load and start address. Remainder is the blob to be loaded
+ contiguously from load address. We don't bother to represent
+ the BSS/SBSS sections any more, since we were clearing the
+ whole thing anyway. */
+ ap->firmware_major = fw->data[0];
+ ap->firmware_minor = fw->data[1];
+ ap->firmware_fix = fw->data[2];
+
+ ap->firmware_start = be32_to_cpu(fw_data[1]);
+ if (ap->firmware_start < 0x4000 || ap->firmware_start >= 0x80000) {
+ printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n",
+ ap->name, ap->firmware_start, fw_name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ load_addr = be32_to_cpu(fw_data[2]);
+ if (load_addr < 0x4000 || load_addr >= 0x80000) {
+ printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n",
+ ap->name, load_addr, fw_name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Do not try to clear more than 512KiB or we end up seeing
+ * funny things on NICs with only 512KiB SRAM
+ */
+ ace_clear(regs, 0x2000, 0x80000-0x2000);
+ ace_copy(regs, &fw_data[3], load_addr, fw->size-12);
+ out:
+ release_firmware(fw);
+ return ret;
+}
+
+
+/*
+ * The eeprom on the AceNIC is an Atmel i2c EEPROM.
+ *
+ * Accessing the EEPROM is `interesting' to say the least - don't read
+ * this code right after dinner.
+ *
+ * This is all about black magic and bit-banging the device .... I
+ * wonder in what hospital they have put the guy who designed the i2c
+ * specs.
+ *
+ * Oh yes, this is only the beginning!
+ *
+ * Thanks to Stevarino Webinski for helping tracking down the bugs in the
+ * code i2c readout code by beta testing all my hacks.
+ */
+static void __devinit eeprom_start(struct ace_regs __iomem *regs)
+{
+ u32 local;
+
+ readl(&regs->LocalCtrl);
+ udelay(ACE_SHORT_DELAY);
+ local = readl(&regs->LocalCtrl);
+ local |= EEPROM_DATA_OUT | EEPROM_WRITE_ENABLE;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ local |= EEPROM_CLK_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ local &= ~EEPROM_DATA_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ local &= ~EEPROM_CLK_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+}
+
+
+static void __devinit eeprom_prep(struct ace_regs __iomem *regs, u8 magic)
+{
+ short i;
+ u32 local;
+
+ udelay(ACE_SHORT_DELAY);
+ local = readl(&regs->LocalCtrl);
+ local &= ~EEPROM_DATA_OUT;
+ local |= EEPROM_WRITE_ENABLE;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+
+ for (i = 0; i < 8; i++, magic <<= 1) {
+ udelay(ACE_SHORT_DELAY);
+ if (magic & 0x80)
+ local |= EEPROM_DATA_OUT;
+ else
+ local &= ~EEPROM_DATA_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+
+ udelay(ACE_SHORT_DELAY);
+ local |= EEPROM_CLK_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ local &= ~(EEPROM_CLK_OUT | EEPROM_DATA_OUT);
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ }
+}
+
+
+static int __devinit eeprom_check_ack(struct ace_regs __iomem *regs)
+{
+ int state;
+ u32 local;
+
+ local = readl(&regs->LocalCtrl);
+ local &= ~EEPROM_WRITE_ENABLE;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_LONG_DELAY);
+ local |= EEPROM_CLK_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ /* sample data in middle of high clk */
+ state = (readl(&regs->LocalCtrl) & EEPROM_DATA_IN) != 0;
+ udelay(ACE_SHORT_DELAY);
+ mb();
+ writel(readl(&regs->LocalCtrl) & ~EEPROM_CLK_OUT, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+
+ return state;
+}
+
+
+static void __devinit eeprom_stop(struct ace_regs __iomem *regs)
+{
+ u32 local;
+
+ udelay(ACE_SHORT_DELAY);
+ local = readl(&regs->LocalCtrl);
+ local |= EEPROM_WRITE_ENABLE;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ local &= ~EEPROM_DATA_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ local |= EEPROM_CLK_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ local |= EEPROM_DATA_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_LONG_DELAY);
+ local &= ~EEPROM_CLK_OUT;
+ writel(local, &regs->LocalCtrl);
+ mb();
+}
+
+
+/*
+ * Read a whole byte from the EEPROM.
+ */
+static int __devinit read_eeprom_byte(struct net_device *dev,
+ unsigned long offset)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+ unsigned long flags;
+ u32 local;
+ int result = 0;
+ short i;
+
+ /*
+ * Don't take interrupts on this CPU will bit banging
+ * the %#%#@$ I2C device
+ */
+ local_irq_save(flags);
+
+ eeprom_start(regs);
+
+ eeprom_prep(regs, EEPROM_WRITE_SELECT);
+ if (eeprom_check_ack(regs)) {
+ local_irq_restore(flags);
+ printk(KERN_ERR "%s: Unable to sync eeprom\n", ap->name);
+ result = -EIO;
+ goto eeprom_read_error;
+ }
+
+ eeprom_prep(regs, (offset >> 8) & 0xff);
+ if (eeprom_check_ack(regs)) {
+ local_irq_restore(flags);
+ printk(KERN_ERR "%s: Unable to set address byte 0\n",
+ ap->name);
+ result = -EIO;
+ goto eeprom_read_error;
+ }
+
+ eeprom_prep(regs, offset & 0xff);
+ if (eeprom_check_ack(regs)) {
+ local_irq_restore(flags);
+ printk(KERN_ERR "%s: Unable to set address byte 1\n",
+ ap->name);
+ result = -EIO;
+ goto eeprom_read_error;
+ }
+
+ eeprom_start(regs);
+ eeprom_prep(regs, EEPROM_READ_SELECT);
+ if (eeprom_check_ack(regs)) {
+ local_irq_restore(flags);
+ printk(KERN_ERR "%s: Unable to set READ_SELECT\n",
+ ap->name);
+ result = -EIO;
+ goto eeprom_read_error;
+ }
+
+ for (i = 0; i < 8; i++) {
+ local = readl(&regs->LocalCtrl);
+ local &= ~EEPROM_WRITE_ENABLE;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ udelay(ACE_LONG_DELAY);
+ mb();
+ local |= EEPROM_CLK_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ /* sample data mid high clk */
+ result = (result << 1) |
+ ((readl(&regs->LocalCtrl) & EEPROM_DATA_IN) != 0);
+ udelay(ACE_SHORT_DELAY);
+ mb();
+ local = readl(&regs->LocalCtrl);
+ local &= ~EEPROM_CLK_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ udelay(ACE_SHORT_DELAY);
+ mb();
+ if (i == 7) {
+ local |= EEPROM_WRITE_ENABLE;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ }
+ }
+
+ local |= EEPROM_DATA_OUT;
+ writel(local, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ writel(readl(&regs->LocalCtrl) | EEPROM_CLK_OUT, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ udelay(ACE_LONG_DELAY);
+ writel(readl(&regs->LocalCtrl) & ~EEPROM_CLK_OUT, &regs->LocalCtrl);
+ readl(&regs->LocalCtrl);
+ mb();
+ udelay(ACE_SHORT_DELAY);
+ eeprom_stop(regs);
+
+ local_irq_restore(flags);
+ out:
+ return result;
+
+ eeprom_read_error:
+ printk(KERN_ERR "%s: Unable to read eeprom byte 0x%02lx\n",
+ ap->name, offset);
+ goto out;
+}
diff --git a/drivers/net/ethernet/alteon/acenic.h b/drivers/net/ethernet/alteon/acenic.h
new file mode 100644
index 000000000000..51c486cfbb8c
--- /dev/null
+++ b/drivers/net/ethernet/alteon/acenic.h
@@ -0,0 +1,790 @@
+#ifndef _ACENIC_H_
+#define _ACENIC_H_
+#include <linux/interrupt.h>
+
+
+/*
+ * Generate TX index update each time, when TX ring is closed.
+ * Normally, this is not useful, because results in more dma (and irqs
+ * without TX_COAL_INTS_ONLY).
+ */
+#define USE_TX_COAL_NOW 0
+
+/*
+ * Addressing:
+ *
+ * The Tigon uses 64-bit host addresses, regardless of their actual
+ * length, and it expects a big-endian format. For 32 bit systems the
+ * upper 32 bits of the address are simply ignored (zero), however for
+ * little endian 64 bit systems (Alpha) this looks strange with the
+ * two parts of the address word being swapped.
+ *
+ * The addresses are split in two 32 bit words for all architectures
+ * as some of them are in PCI shared memory and it is necessary to use
+ * readl/writel to access them.
+ *
+ * The addressing code is derived from Pete Wyckoff's work, but
+ * modified to deal properly with readl/writel usage.
+ */
+
+struct ace_regs {
+ u32 pad0[16]; /* PCI control registers */
+
+ u32 HostCtrl; /* 0x40 */
+ u32 LocalCtrl;
+
+ u32 pad1[2];
+
+ u32 MiscCfg; /* 0x50 */
+
+ u32 pad2[2];
+
+ u32 PciState;
+
+ u32 pad3[2]; /* 0x60 */
+
+ u32 WinBase;
+ u32 WinData;
+
+ u32 pad4[12]; /* 0x70 */
+
+ u32 DmaWriteState; /* 0xa0 */
+ u32 pad5[3];
+ u32 DmaReadState; /* 0xb0 */
+
+ u32 pad6[26];
+
+ u32 AssistState;
+
+ u32 pad7[8]; /* 0x120 */
+
+ u32 CpuCtrl; /* 0x140 */
+ u32 Pc;
+
+ u32 pad8[3];
+
+ u32 SramAddr; /* 0x154 */
+ u32 SramData;
+
+ u32 pad9[49];
+
+ u32 MacRxState; /* 0x220 */
+
+ u32 pad10[7];
+
+ u32 CpuBCtrl; /* 0x240 */
+ u32 PcB;
+
+ u32 pad11[3];
+
+ u32 SramBAddr; /* 0x254 */
+ u32 SramBData;
+
+ u32 pad12[105];
+
+ u32 pad13[32]; /* 0x400 */
+ u32 Stats[32];
+
+ u32 Mb0Hi; /* 0x500 */
+ u32 Mb0Lo;
+ u32 Mb1Hi;
+ u32 CmdPrd;
+ u32 Mb2Hi;
+ u32 TxPrd;
+ u32 Mb3Hi;
+ u32 RxStdPrd;
+ u32 Mb4Hi;
+ u32 RxJumboPrd;
+ u32 Mb5Hi;
+ u32 RxMiniPrd;
+ u32 Mb6Hi;
+ u32 Mb6Lo;
+ u32 Mb7Hi;
+ u32 Mb7Lo;
+ u32 Mb8Hi;
+ u32 Mb8Lo;
+ u32 Mb9Hi;
+ u32 Mb9Lo;
+ u32 MbAHi;
+ u32 MbALo;
+ u32 MbBHi;
+ u32 MbBLo;
+ u32 MbCHi;
+ u32 MbCLo;
+ u32 MbDHi;
+ u32 MbDLo;
+ u32 MbEHi;
+ u32 MbELo;
+ u32 MbFHi;
+ u32 MbFLo;
+
+ u32 pad14[32];
+
+ u32 MacAddrHi; /* 0x600 */
+ u32 MacAddrLo;
+ u32 InfoPtrHi;
+ u32 InfoPtrLo;
+ u32 MultiCastHi; /* 0x610 */
+ u32 MultiCastLo;
+ u32 ModeStat;
+ u32 DmaReadCfg;
+ u32 DmaWriteCfg; /* 0x620 */
+ u32 TxBufRat;
+ u32 EvtCsm;
+ u32 CmdCsm;
+ u32 TuneRxCoalTicks;/* 0x630 */
+ u32 TuneTxCoalTicks;
+ u32 TuneStatTicks;
+ u32 TuneMaxTxDesc;
+ u32 TuneMaxRxDesc; /* 0x640 */
+ u32 TuneTrace;
+ u32 TuneLink;
+ u32 TuneFastLink;
+ u32 TracePtr; /* 0x650 */
+ u32 TraceStrt;
+ u32 TraceLen;
+ u32 IfIdx;
+ u32 IfMtu; /* 0x660 */
+ u32 MaskInt;
+ u32 GigLnkState;
+ u32 FastLnkState;
+ u32 pad16[4]; /* 0x670 */
+ u32 RxRetCsm; /* 0x680 */
+
+ u32 pad17[31];
+
+ u32 CmdRng[64]; /* 0x700 */
+ u32 Window[0x200];
+};
+
+
+typedef struct {
+ u32 addrhi;
+ u32 addrlo;
+} aceaddr;
+
+
+#define ACE_WINDOW_SIZE 0x800
+
+#define ACE_JUMBO_MTU 9000
+#define ACE_STD_MTU 1500
+
+#define ACE_TRACE_SIZE 0x8000
+
+/*
+ * Host control register bits.
+ */
+
+#define IN_INT 0x01
+#define CLR_INT 0x02
+#define HW_RESET 0x08
+#define BYTE_SWAP 0x10
+#define WORD_SWAP 0x20
+#define MASK_INTS 0x40
+
+/*
+ * Local control register bits.
+ */
+
+#define EEPROM_DATA_IN 0x800000
+#define EEPROM_DATA_OUT 0x400000
+#define EEPROM_WRITE_ENABLE 0x200000
+#define EEPROM_CLK_OUT 0x100000
+
+#define EEPROM_BASE 0xa0000000
+
+#define EEPROM_WRITE_SELECT 0xa0
+#define EEPROM_READ_SELECT 0xa1
+
+#define SRAM_BANK_512K 0x200
+
+
+/*
+ * udelay() values for when clocking the eeprom
+ */
+#define ACE_SHORT_DELAY 2
+#define ACE_LONG_DELAY 4
+
+
+/*
+ * Misc Config bits
+ */
+
+#define SYNC_SRAM_TIMING 0x100000
+
+
+/*
+ * CPU state bits.
+ */
+
+#define CPU_RESET 0x01
+#define CPU_TRACE 0x02
+#define CPU_PROM_FAILED 0x10
+#define CPU_HALT 0x00010000
+#define CPU_HALTED 0xffff0000
+
+
+/*
+ * PCI State bits.
+ */
+
+#define DMA_READ_MAX_4 0x04
+#define DMA_READ_MAX_16 0x08
+#define DMA_READ_MAX_32 0x0c
+#define DMA_READ_MAX_64 0x10
+#define DMA_READ_MAX_128 0x14
+#define DMA_READ_MAX_256 0x18
+#define DMA_READ_MAX_1K 0x1c
+#define DMA_WRITE_MAX_4 0x20
+#define DMA_WRITE_MAX_16 0x40
+#define DMA_WRITE_MAX_32 0x60
+#define DMA_WRITE_MAX_64 0x80
+#define DMA_WRITE_MAX_128 0xa0
+#define DMA_WRITE_MAX_256 0xc0
+#define DMA_WRITE_MAX_1K 0xe0
+#define DMA_READ_WRITE_MASK 0xfc
+#define MEM_READ_MULTIPLE 0x00020000
+#define PCI_66MHZ 0x00080000
+#define PCI_32BIT 0x00100000
+#define DMA_WRITE_ALL_ALIGN 0x00800000
+#define READ_CMD_MEM 0x06000000
+#define WRITE_CMD_MEM 0x70000000
+
+
+/*
+ * Mode status
+ */
+
+#define ACE_BYTE_SWAP_BD 0x02
+#define ACE_WORD_SWAP_BD 0x04 /* not actually used */
+#define ACE_WARN 0x08
+#define ACE_BYTE_SWAP_DMA 0x10
+#define ACE_NO_JUMBO_FRAG 0x200
+#define ACE_FATAL 0x40000000
+
+
+/*
+ * DMA config
+ */
+
+#define DMA_THRESH_1W 0x10
+#define DMA_THRESH_2W 0x20
+#define DMA_THRESH_4W 0x40
+#define DMA_THRESH_8W 0x80
+#define DMA_THRESH_16W 0x100
+#define DMA_THRESH_32W 0x0 /* not described in doc, but exists. */
+
+
+/*
+ * Tuning parameters
+ */
+
+#define TICKS_PER_SEC 1000000
+
+
+/*
+ * Link bits
+ */
+
+#define LNK_PREF 0x00008000
+#define LNK_10MB 0x00010000
+#define LNK_100MB 0x00020000
+#define LNK_1000MB 0x00040000
+#define LNK_FULL_DUPLEX 0x00080000
+#define LNK_HALF_DUPLEX 0x00100000
+#define LNK_TX_FLOW_CTL_Y 0x00200000
+#define LNK_NEG_ADVANCED 0x00400000
+#define LNK_RX_FLOW_CTL_Y 0x00800000
+#define LNK_NIC 0x01000000
+#define LNK_JAM 0x02000000
+#define LNK_JUMBO 0x04000000
+#define LNK_ALTEON 0x08000000
+#define LNK_NEG_FCTL 0x10000000
+#define LNK_NEGOTIATE 0x20000000
+#define LNK_ENABLE 0x40000000
+#define LNK_UP 0x80000000
+
+
+/*
+ * Event definitions
+ */
+
+#define EVT_RING_ENTRIES 256
+#define EVT_RING_SIZE (EVT_RING_ENTRIES * sizeof(struct event))
+
+struct event {
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ u32 idx:12;
+ u32 code:12;
+ u32 evt:8;
+#else
+ u32 evt:8;
+ u32 code:12;
+ u32 idx:12;
+#endif
+ u32 pad;
+};
+
+
+/*
+ * Events
+ */
+
+#define E_FW_RUNNING 0x01
+#define E_STATS_UPDATED 0x04
+
+#define E_STATS_UPDATE 0x04
+
+#define E_LNK_STATE 0x06
+#define E_C_LINK_UP 0x01
+#define E_C_LINK_DOWN 0x02
+#define E_C_LINK_10_100 0x03
+
+#define E_ERROR 0x07
+#define E_C_ERR_INVAL_CMD 0x01
+#define E_C_ERR_UNIMP_CMD 0x02
+#define E_C_ERR_BAD_CFG 0x03
+
+#define E_MCAST_LIST 0x08
+#define E_C_MCAST_ADDR_ADD 0x01
+#define E_C_MCAST_ADDR_DEL 0x02
+
+#define E_RESET_JUMBO_RNG 0x09
+
+
+/*
+ * Commands
+ */
+
+#define CMD_RING_ENTRIES 64
+
+struct cmd {
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ u32 idx:12;
+ u32 code:12;
+ u32 evt:8;
+#else
+ u32 evt:8;
+ u32 code:12;
+ u32 idx:12;
+#endif
+};
+
+
+#define C_HOST_STATE 0x01
+#define C_C_STACK_UP 0x01
+#define C_C_STACK_DOWN 0x02
+
+#define C_FDR_FILTERING 0x02
+#define C_C_FDR_FILT_ENABLE 0x01
+#define C_C_FDR_FILT_DISABLE 0x02
+
+#define C_SET_RX_PRD_IDX 0x03
+#define C_UPDATE_STATS 0x04
+#define C_RESET_JUMBO_RNG 0x05
+#define C_ADD_MULTICAST_ADDR 0x08
+#define C_DEL_MULTICAST_ADDR 0x09
+
+#define C_SET_PROMISC_MODE 0x0a
+#define C_C_PROMISC_ENABLE 0x01
+#define C_C_PROMISC_DISABLE 0x02
+
+#define C_LNK_NEGOTIATION 0x0b
+#define C_C_NEGOTIATE_BOTH 0x00
+#define C_C_NEGOTIATE_GIG 0x01
+#define C_C_NEGOTIATE_10_100 0x02
+
+#define C_SET_MAC_ADDR 0x0c
+#define C_CLEAR_PROFILE 0x0d
+
+#define C_SET_MULTICAST_MODE 0x0e
+#define C_C_MCAST_ENABLE 0x01
+#define C_C_MCAST_DISABLE 0x02
+
+#define C_CLEAR_STATS 0x0f
+#define C_SET_RX_JUMBO_PRD_IDX 0x10
+#define C_REFRESH_STATS 0x11
+
+
+/*
+ * Descriptor flags
+ */
+#define BD_FLG_TCP_UDP_SUM 0x01
+#define BD_FLG_IP_SUM 0x02
+#define BD_FLG_END 0x04
+#define BD_FLG_MORE 0x08
+#define BD_FLG_JUMBO 0x10
+#define BD_FLG_UCAST 0x20
+#define BD_FLG_MCAST 0x40
+#define BD_FLG_BCAST 0x60
+#define BD_FLG_TYP_MASK 0x60
+#define BD_FLG_IP_FRAG 0x80
+#define BD_FLG_IP_FRAG_END 0x100
+#define BD_FLG_VLAN_TAG 0x200
+#define BD_FLG_FRAME_ERROR 0x400
+#define BD_FLG_COAL_NOW 0x800
+#define BD_FLG_MINI 0x1000
+
+
+/*
+ * Ring Control block flags
+ */
+#define RCB_FLG_TCP_UDP_SUM 0x01
+#define RCB_FLG_IP_SUM 0x02
+#define RCB_FLG_NO_PSEUDO_HDR 0x08
+#define RCB_FLG_VLAN_ASSIST 0x10
+#define RCB_FLG_COAL_INT_ONLY 0x20
+#define RCB_FLG_TX_HOST_RING 0x40
+#define RCB_FLG_IEEE_SNAP_SUM 0x80
+#define RCB_FLG_EXT_RX_BD 0x100
+#define RCB_FLG_RNG_DISABLE 0x200
+
+
+/*
+ * TX ring - maximum TX ring entries for Tigon I's is 128
+ */
+#define MAX_TX_RING_ENTRIES 256
+#define TIGON_I_TX_RING_ENTRIES 128
+#define TX_RING_SIZE (MAX_TX_RING_ENTRIES * sizeof(struct tx_desc))
+#define TX_RING_BASE 0x3800
+
+struct tx_desc{
+ aceaddr addr;
+ u32 flagsize;
+#if 0
+/*
+ * This is in PCI shared mem and must be accessed with readl/writel
+ * real layout is:
+ */
+#if __LITTLE_ENDIAN
+ u16 flags;
+ u16 size;
+ u16 vlan;
+ u16 reserved;
+#else
+ u16 size;
+ u16 flags;
+ u16 reserved;
+ u16 vlan;
+#endif
+#endif
+ u32 vlanres;
+};
+
+
+#define RX_STD_RING_ENTRIES 512
+#define RX_STD_RING_SIZE (RX_STD_RING_ENTRIES * sizeof(struct rx_desc))
+
+#define RX_JUMBO_RING_ENTRIES 256
+#define RX_JUMBO_RING_SIZE (RX_JUMBO_RING_ENTRIES *sizeof(struct rx_desc))
+
+#define RX_MINI_RING_ENTRIES 1024
+#define RX_MINI_RING_SIZE (RX_MINI_RING_ENTRIES *sizeof(struct rx_desc))
+
+#define RX_RETURN_RING_ENTRIES 2048
+#define RX_RETURN_RING_SIZE (RX_MAX_RETURN_RING_ENTRIES * \
+ sizeof(struct rx_desc))
+
+struct rx_desc{
+ aceaddr addr;
+#ifdef __LITTLE_ENDIAN
+ u16 size;
+ u16 idx;
+#else
+ u16 idx;
+ u16 size;
+#endif
+#ifdef __LITTLE_ENDIAN
+ u16 flags;
+ u16 type;
+#else
+ u16 type;
+ u16 flags;
+#endif
+#ifdef __LITTLE_ENDIAN
+ u16 tcp_udp_csum;
+ u16 ip_csum;
+#else
+ u16 ip_csum;
+ u16 tcp_udp_csum;
+#endif
+#ifdef __LITTLE_ENDIAN
+ u16 vlan;
+ u16 err_flags;
+#else
+ u16 err_flags;
+ u16 vlan;
+#endif
+ u32 reserved;
+ u32 opague;
+};
+
+
+/*
+ * This struct is shared with the NIC firmware.
+ */
+struct ring_ctrl {
+ aceaddr rngptr;
+#ifdef __LITTLE_ENDIAN
+ u16 flags;
+ u16 max_len;
+#else
+ u16 max_len;
+ u16 flags;
+#endif
+ u32 pad;
+};
+
+
+struct ace_mac_stats {
+ u32 excess_colls;
+ u32 coll_1;
+ u32 coll_2;
+ u32 coll_3;
+ u32 coll_4;
+ u32 coll_5;
+ u32 coll_6;
+ u32 coll_7;
+ u32 coll_8;
+ u32 coll_9;
+ u32 coll_10;
+ u32 coll_11;
+ u32 coll_12;
+ u32 coll_13;
+ u32 coll_14;
+ u32 coll_15;
+ u32 late_coll;
+ u32 defers;
+ u32 crc_err;
+ u32 underrun;
+ u32 crs_err;
+ u32 pad[3];
+ u32 drop_ula;
+ u32 drop_mc;
+ u32 drop_fc;
+ u32 drop_space;
+ u32 coll;
+ u32 kept_bc;
+ u32 kept_mc;
+ u32 kept_uc;
+};
+
+
+struct ace_info {
+ union {
+ u32 stats[256];
+ } s;
+ struct ring_ctrl evt_ctrl;
+ struct ring_ctrl cmd_ctrl;
+ struct ring_ctrl tx_ctrl;
+ struct ring_ctrl rx_std_ctrl;
+ struct ring_ctrl rx_jumbo_ctrl;
+ struct ring_ctrl rx_mini_ctrl;
+ struct ring_ctrl rx_return_ctrl;
+ aceaddr evt_prd_ptr;
+ aceaddr rx_ret_prd_ptr;
+ aceaddr tx_csm_ptr;
+ aceaddr stats2_ptr;
+};
+
+
+struct ring_info {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+
+/*
+ * Funny... As soon as we add maplen on alpha, it starts to work
+ * much slower. Hmm... is it because struct does not fit to one cacheline?
+ * So, split tx_ring_info.
+ */
+struct tx_ring_info {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_LEN(maplen);
+};
+
+
+/*
+ * struct ace_skb holding the rings of skb's. This is an awful lot of
+ * pointers, but I don't see any other smart mode to do this in an
+ * efficient manner ;-(
+ */
+struct ace_skb
+{
+ struct tx_ring_info tx_skbuff[MAX_TX_RING_ENTRIES];
+ struct ring_info rx_std_skbuff[RX_STD_RING_ENTRIES];
+ struct ring_info rx_mini_skbuff[RX_MINI_RING_ENTRIES];
+ struct ring_info rx_jumbo_skbuff[RX_JUMBO_RING_ENTRIES];
+};
+
+
+/*
+ * Struct private for the AceNIC.
+ *
+ * Elements are grouped so variables used by the tx handling goes
+ * together, and will go into the same cache lines etc. in order to
+ * avoid cache line contention between the rx and tx handling on SMP.
+ *
+ * Frequently accessed variables are put at the beginning of the
+ * struct to help the compiler generate better/shorter code.
+ */
+struct ace_private
+{
+ struct ace_info *info;
+ struct ace_regs __iomem *regs; /* register base */
+ struct ace_skb *skb;
+ dma_addr_t info_dma; /* 32/64 bit */
+
+ int version, link;
+ int promisc, mcast_all;
+
+ /*
+ * TX elements
+ */
+ struct tx_desc *tx_ring;
+ u32 tx_prd;
+ volatile u32 tx_ret_csm;
+ int tx_ring_entries;
+
+ /*
+ * RX elements
+ */
+ unsigned long std_refill_busy
+ __attribute__ ((aligned (SMP_CACHE_BYTES)));
+ unsigned long mini_refill_busy, jumbo_refill_busy;
+ atomic_t cur_rx_bufs;
+ atomic_t cur_mini_bufs;
+ atomic_t cur_jumbo_bufs;
+ u32 rx_std_skbprd, rx_mini_skbprd, rx_jumbo_skbprd;
+ u32 cur_rx;
+
+ struct rx_desc *rx_std_ring;
+ struct rx_desc *rx_jumbo_ring;
+ struct rx_desc *rx_mini_ring;
+ struct rx_desc *rx_return_ring;
+
+ int tasklet_pending, jumbo;
+ struct tasklet_struct ace_tasklet;
+
+ struct event *evt_ring;
+
+ volatile u32 *evt_prd, *rx_ret_prd, *tx_csm;
+
+ dma_addr_t tx_ring_dma; /* 32/64 bit */
+ dma_addr_t rx_ring_base_dma;
+ dma_addr_t evt_ring_dma;
+ dma_addr_t evt_prd_dma, rx_ret_prd_dma, tx_csm_dma;
+
+ unsigned char *trace_buf;
+ struct pci_dev *pdev;
+ struct net_device *next;
+ volatile int fw_running;
+ int board_idx;
+ u16 pci_command;
+ u8 pci_latency;
+ const char *name;
+#ifdef INDEX_DEBUG
+ spinlock_t debug_lock
+ __attribute__ ((aligned (SMP_CACHE_BYTES)));
+ u32 last_tx, last_std_rx, last_mini_rx;
+#endif
+ int pci_using_dac;
+ u8 firmware_major;
+ u8 firmware_minor;
+ u8 firmware_fix;
+ u32 firmware_start;
+};
+
+
+#define TX_RESERVED MAX_SKB_FRAGS
+
+static inline int tx_space (struct ace_private *ap, u32 csm, u32 prd)
+{
+ return (csm - prd - 1) & (ACE_TX_RING_ENTRIES(ap) - 1);
+}
+
+#define tx_free(ap) tx_space((ap)->tx_ret_csm, (ap)->tx_prd, ap)
+#define tx_ring_full(ap, csm, prd) (tx_space(ap, csm, prd) <= TX_RESERVED)
+
+static inline void set_aceaddr(aceaddr *aa, dma_addr_t addr)
+{
+ u64 baddr = (u64) addr;
+ aa->addrlo = baddr & 0xffffffff;
+ aa->addrhi = baddr >> 32;
+ wmb();
+}
+
+
+static inline void ace_set_txprd(struct ace_regs __iomem *regs,
+ struct ace_private *ap, u32 value)
+{
+#ifdef INDEX_DEBUG
+ unsigned long flags;
+ spin_lock_irqsave(&ap->debug_lock, flags);
+ writel(value, &regs->TxPrd);
+ if (value == ap->last_tx)
+ printk(KERN_ERR "AceNIC RACE ALERT! writing identical value "
+ "to tx producer (%i)\n", value);
+ ap->last_tx = value;
+ spin_unlock_irqrestore(&ap->debug_lock, flags);
+#else
+ writel(value, &regs->TxPrd);
+#endif
+ wmb();
+}
+
+
+static inline void ace_mask_irq(struct net_device *dev)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+
+ if (ACE_IS_TIGON_I(ap))
+ writel(1, &regs->MaskInt);
+ else
+ writel(readl(&regs->HostCtrl) | MASK_INTS, &regs->HostCtrl);
+
+ ace_sync_irq(dev->irq);
+}
+
+
+static inline void ace_unmask_irq(struct net_device *dev)
+{
+ struct ace_private *ap = netdev_priv(dev);
+ struct ace_regs __iomem *regs = ap->regs;
+
+ if (ACE_IS_TIGON_I(ap))
+ writel(0, &regs->MaskInt);
+ else
+ writel(readl(&regs->HostCtrl) & ~MASK_INTS, &regs->HostCtrl);
+}
+
+
+/*
+ * Prototypes
+ */
+static int ace_init(struct net_device *dev);
+static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs);
+static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs);
+static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs);
+static irqreturn_t ace_interrupt(int irq, void *dev_id);
+static int ace_load_firmware(struct net_device *dev);
+static int ace_open(struct net_device *dev);
+static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static int ace_close(struct net_device *dev);
+static void ace_tasklet(unsigned long dev);
+static void ace_dump_trace(struct ace_private *ap);
+static void ace_set_multicast_list(struct net_device *dev);
+static int ace_change_mtu(struct net_device *dev, int new_mtu);
+static int ace_set_mac_addr(struct net_device *dev, void *p);
+static void ace_set_rxtx_parms(struct net_device *dev, int jumbo);
+static int ace_allocate_descriptors(struct net_device *dev);
+static void ace_free_descriptors(struct net_device *dev);
+static void ace_init_cleanup(struct net_device *dev);
+static struct net_device_stats *ace_get_stats(struct net_device *dev);
+static int read_eeprom_byte(struct net_device *dev, unsigned long offset);
+
+#endif /* _ACENIC_H_ */
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
new file mode 100644
index 000000000000..60b35fb5f524
--- /dev/null
+++ b/drivers/net/ethernet/amd/7990.c
@@ -0,0 +1,662 @@
+/*
+ * 7990.c -- LANCE ethernet IC generic routines.
+ * This is an attempt to separate out the bits of various ethernet
+ * drivers that are common because they all use the AMD 7990 LANCE
+ * (Local Area Network Controller for Ethernet) chip.
+ *
+ * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
+ *
+ * Most of this stuff was obtained by looking at other LANCE drivers,
+ * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful.
+ * NB: this was made easy by the fact that Jes Sorensen had cleaned up
+ * most of a2025 and sunlance with the aim of merging them, so the
+ * common code was pretty obvious.
+ */
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/route.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <asm/irq.h>
+/* Used for the temporal inet entries and routing */
+#include <linux/socket.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/pgtable.h>
+#ifdef CONFIG_HP300
+#include <asm/blinken.h>
+#endif
+
+#include "7990.h"
+
+#define WRITERAP(lp,x) out_be16(lp->base + LANCE_RAP, (x))
+#define WRITERDP(lp,x) out_be16(lp->base + LANCE_RDP, (x))
+#define READRDP(lp) in_be16(lp->base + LANCE_RDP)
+
+#if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE)
+#include "hplance.h"
+
+#undef WRITERAP
+#undef WRITERDP
+#undef READRDP
+
+#if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE)
+
+/* Lossage Factor Nine, Mr Sulu. */
+#define WRITERAP(lp,x) (lp->writerap(lp,x))
+#define WRITERDP(lp,x) (lp->writerdp(lp,x))
+#define READRDP(lp) (lp->readrdp(lp))
+
+#else
+
+/* These inlines can be used if only CONFIG_HPLANCE is defined */
+static inline void WRITERAP(struct lance_private *lp, __u16 value)
+{
+ do {
+ out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+}
+
+static inline void WRITERDP(struct lance_private *lp, __u16 value)
+{
+ do {
+ out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+}
+
+static inline __u16 READRDP(struct lance_private *lp)
+{
+ __u16 value;
+ do {
+ value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+ return value;
+}
+
+#endif
+#endif /* CONFIG_HPLANCE || CONFIG_HPLANCE_MODULE */
+
+/* debugging output macros, various flavours */
+/* #define TEST_HITS */
+#ifdef UNDEF
+#define PRINT_RINGS() \
+do { \
+ int t; \
+ for (t=0; t < RX_RING_SIZE; t++) { \
+ printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n",\
+ t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0,\
+ ib->brx_ring[t].length,\
+ ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits);\
+ }\
+ for (t=0; t < TX_RING_SIZE; t++) { \
+ printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n",\
+ t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0,\
+ ib->btx_ring[t].length,\
+ ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits);\
+ }\
+} while (0)
+#else
+#define PRINT_RINGS()
+#endif
+
+/* Load the CSR registers. The LANCE has to be STOPped when we do this! */
+static void load_csrs (struct lance_private *lp)
+{
+ volatile struct lance_init_block *aib = lp->lance_init_block;
+ int leptr;
+
+ leptr = LANCE_ADDR (aib);
+
+ WRITERAP(lp, LE_CSR1); /* load address of init block */
+ WRITERDP(lp, leptr & 0xFFFF);
+ WRITERAP(lp, LE_CSR2);
+ WRITERDP(lp, leptr >> 16);
+ WRITERAP(lp, LE_CSR3);
+ WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */
+
+ /* Point back to csr0 */
+ WRITERAP(lp, LE_CSR0);
+}
+
+/* #define to 0 or 1 appropriately */
+#define DEBUG_IRING 0
+/* Set up the Lance Rx and Tx rings and the init block */
+static void lance_init_ring (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
+ int leptr;
+ int i;
+
+ aib = lp->lance_init_block;
+
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */
+
+ /* Copy the ethernet address to the lance init block
+ * Notice that we do a byteswap if we're big endian.
+ * [I think this is the right criterion; at least, sunlance,
+ * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
+ * However, the datasheet says that the BSWAP bit doesn't affect
+ * the init block, so surely it should be low byte first for
+ * everybody? Um.]
+ * We could define the ib->physaddr as three 16bit values and
+ * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
+ */
+#ifdef __BIG_ENDIAN
+ ib->phys_addr [0] = dev->dev_addr [1];
+ ib->phys_addr [1] = dev->dev_addr [0];
+ ib->phys_addr [2] = dev->dev_addr [3];
+ ib->phys_addr [3] = dev->dev_addr [2];
+ ib->phys_addr [4] = dev->dev_addr [5];
+ ib->phys_addr [5] = dev->dev_addr [4];
+#else
+ for (i=0; i<6; i++)
+ ib->phys_addr[i] = dev->dev_addr[i];
+#endif
+
+ if (DEBUG_IRING)
+ printk ("TX rings:\n");
+
+ lp->tx_full = 0;
+ /* Setup the Tx ring entries */
+ for (i = 0; i < (1<<lp->lance_log_tx_bufs); i++) {
+ leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
+ ib->btx_ring [i].tmd0 = leptr;
+ ib->btx_ring [i].tmd1_hadr = leptr >> 16;
+ ib->btx_ring [i].tmd1_bits = 0;
+ ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
+ ib->btx_ring [i].misc = 0;
+ if (DEBUG_IRING)
+ printk ("%d: 0x%8.8x\n", i, leptr);
+ }
+
+ /* Setup the Rx ring entries */
+ if (DEBUG_IRING)
+ printk ("RX rings:\n");
+ for (i = 0; i < (1<<lp->lance_log_rx_bufs); i++) {
+ leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
+
+ ib->brx_ring [i].rmd0 = leptr;
+ ib->brx_ring [i].rmd1_hadr = leptr >> 16;
+ ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
+ /* 0xf000 == bits that must be one (reserved, presumably) */
+ ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
+ ib->brx_ring [i].mblength = 0;
+ if (DEBUG_IRING)
+ printk ("%d: 0x%8.8x\n", i, leptr);
+ }
+
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->brx_ring);
+ ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
+ ib->rx_ptr = leptr;
+ if (DEBUG_IRING)
+ printk ("RX ptr: %8.8x\n", leptr);
+
+ /* Setup tx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->btx_ring);
+ ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
+ ib->tx_ptr = leptr;
+ if (DEBUG_IRING)
+ printk ("TX ptr: %8.8x\n", leptr);
+
+ /* Clear the multicast filter */
+ ib->filter [0] = 0;
+ ib->filter [1] = 0;
+ PRINT_RINGS();
+}
+
+/* LANCE must be STOPped before we do this, too... */
+static int init_restart_lance (struct lance_private *lp)
+{
+ int i;
+
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_INIT);
+
+ /* Need a hook here for sunlance ledma stuff */
+
+ /* Wait for the lance to complete initialization */
+ for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
+ barrier();
+ if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
+ printk ("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
+ return -1;
+ }
+
+ /* Clear IDON by writing a "1", enable interrupts and start lance */
+ WRITERDP(lp, LE_C0_IDON);
+ WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
+
+ return 0;
+}
+
+static int lance_reset (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int status;
+
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+
+ load_csrs (lp);
+ lance_init_ring (dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ status = init_restart_lance (lp);
+#ifdef DEBUG_DRIVER
+ printk ("Lance restart=%d\n", status);
+#endif
+ return status;
+}
+
+static int lance_rx (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_rx_desc *rd;
+ unsigned char bits;
+#ifdef TEST_HITS
+ int i;
+#endif
+
+#ifdef TEST_HITS
+ printk ("[");
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (i == lp->rx_new)
+ printk ("%s",
+ ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X");
+ else
+ printk ("%s",
+ ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1");
+ }
+ printk ("]");
+#endif
+#ifdef CONFIG_HP300
+ blinken_leds(0x40, 0);
+#endif
+ WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */
+ for (rd = &ib->brx_ring [lp->rx_new]; /* For each Rx ring we own... */
+ !((bits = rd->rmd1_bits) & LE_R1_OWN);
+ rd = &ib->brx_ring [lp->rx_new]) {
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ continue;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP) dev->stats.rx_errors++;
+ } else {
+ int len = (rd->mblength & 0xfff) - 4;
+ struct sk_buff *skb = dev_alloc_skb (len+2);
+
+ if (!skb) {
+ printk ("%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+ dev->stats.rx_dropped++;
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ return 0;
+ }
+
+ skb_reserve (skb, 2); /* 16 byte align */
+ skb_put (skb, len); /* make room */
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
+ len);
+ skb->protocol = eth_type_trans (skb, dev);
+ netif_rx (skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ }
+
+ /* Return the packet to the pool */
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ }
+ return 0;
+}
+
+static int lance_tx (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_tx_desc *td;
+ int i, j;
+ int status;
+
+#ifdef CONFIG_HP300
+ blinken_leds(0x80, 0);
+#endif
+ /* csr0 is 2f3 */
+ WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
+ /* csr0 is 73 */
+
+ j = lp->tx_old;
+ for (i = j; i != lp->tx_new; i = j) {
+ td = &ib->btx_ring [i];
+
+ /* If we hit a packet not owned by us, stop */
+ if (td->tmd1_bits & LE_T1_OWN)
+ break;
+
+ if (td->tmd1_bits & LE_T1_ERR) {
+ status = td->misc;
+
+ dev->stats.tx_errors++;
+ if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ dev->stats.tx_carrier_errors++;
+ if (lp->auto_select) {
+ lp->tpe = 1 - lp->tpe;
+ printk("%s: Carrier Lost, trying %s\n",
+ dev->name, lp->tpe?"TPE":"AUI");
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring (dev);
+ load_csrs (lp);
+ init_restart_lance (lp);
+ return 0;
+ }
+ }
+
+ /* buffer errors and underflows turn off the transmitter */
+ /* Restart the adapter */
+ if (status & (LE_T3_BUF|LE_T3_UFL)) {
+ dev->stats.tx_fifo_errors++;
+
+ printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+ dev->name);
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring (dev);
+ load_csrs (lp);
+ init_restart_lance (lp);
+ return 0;
+ }
+ } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ td->tmd1_bits &= ~(LE_T1_POK);
+
+ /* One collision before packet was sent. */
+ if (td->tmd1_bits & LE_T1_EONE)
+ dev->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (td->tmd1_bits & LE_T1_EMORE)
+ dev->stats.collisions += 2;
+
+ dev->stats.tx_packets++;
+ }
+
+ j = (j + 1) & lp->tx_ring_mod_mask;
+ }
+ lp->tx_old = j;
+ WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
+ return 0;
+}
+
+static irqreturn_t
+lance_interrupt (int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ int csr0;
+
+ spin_lock (&lp->devlock);
+
+ WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */
+ csr0 = READRDP(lp);
+
+ PRINT_RINGS();
+
+ if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */
+ spin_unlock (&lp->devlock);
+ return IRQ_NONE; /* been generated by the Lance. */
+ }
+
+ /* Acknowledge all the interrupt sources ASAP */
+ WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
+
+ if ((csr0 & LE_C0_ERR)) {
+ /* Clear the error condition */
+ WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
+ }
+
+ if (csr0 & LE_C0_RINT)
+ lance_rx (dev);
+
+ if (csr0 & LE_C0_TINT)
+ lance_tx (dev);
+
+ /* Log misc errors. */
+ if (csr0 & LE_C0_BABL)
+ dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & LE_C0_MISS)
+ dev->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & LE_C0_MERR) {
+ printk("%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ WRITERDP(lp, LE_C0_STRT);
+ }
+
+ if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
+ lp->tx_full = 0;
+ netif_wake_queue (dev);
+ }
+
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
+
+ spin_unlock (&lp->devlock);
+ return IRQ_HANDLED;
+}
+
+int lance_open (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int res;
+
+ /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
+ if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev))
+ return -EAGAIN;
+
+ res = lance_reset(dev);
+ spin_lock_init(&lp->devlock);
+ netif_start_queue (dev);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(lance_open);
+
+int lance_close (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ netif_stop_queue (dev);
+
+ /* Stop the LANCE */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+
+ free_irq(lp->irq, dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lance_close);
+
+void lance_tx_timeout(struct net_device *dev)
+{
+ printk("lance_tx_timeout\n");
+ lance_reset(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue (dev);
+}
+EXPORT_SYMBOL_GPL(lance_tx_timeout);
+
+int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ int entry, skblen, len;
+ static int outs;
+ unsigned long flags;
+
+ if (!TX_BUFFS_AVAIL)
+ return NETDEV_TX_LOCKED;
+
+ netif_stop_queue (dev);
+
+ skblen = skb->len;
+
+#ifdef DEBUG_DRIVER
+ /* dump the packet */
+ {
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ if ((i % 16) == 0)
+ printk ("\n");
+ printk ("%2.2x ", skb->data [i]);
+ }
+ }
+#endif
+ len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
+ entry = lp->tx_new & lp->tx_ring_mod_mask;
+ ib->btx_ring [entry].length = (-len) | 0xf000;
+ ib->btx_ring [entry].misc = 0;
+
+ if (skb->len < ETH_ZLEN)
+ memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
+ skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
+
+ /* Now, give the packet to the lance */
+ ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
+ lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
+
+ outs++;
+ /* Kick the lance: transmit now */
+ WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
+ dev_kfree_skb (skb);
+
+ spin_lock_irqsave (&lp->devlock, flags);
+ if (TX_BUFFS_AVAIL)
+ netif_start_queue (dev);
+ else
+ lp->tx_full = 1;
+ spin_unlock_irqrestore (&lp->devlock, flags);
+
+ return NETDEV_TX_OK;
+}
+EXPORT_SYMBOL_GPL(lance_start_xmit);
+
+/* taken from the depca driver via a2065.c */
+static void lance_load_multicast (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile u16 *mcast_table = (u16 *)&ib->filter;
+ struct netdev_hw_addr *ha;
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI){
+ ib->filter [0] = 0xffffffff;
+ ib->filter [1] = 0xffffffff;
+ return;
+ }
+ /* clear the multicast filter */
+ ib->filter [0] = 0;
+ ib->filter [1] = 0;
+
+ /* Add addresses */
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
+ crc = crc >> 26;
+ mcast_table [crc >> 4] |= 1 << (crc & 0xf);
+ }
+}
+
+
+void lance_set_multicast (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ int stopped;
+
+ stopped = netif_queue_stopped(dev);
+ if (!stopped)
+ netif_stop_queue (dev);
+
+ while (lp->tx_old != lp->tx_new)
+ schedule();
+
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring (dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ ib->mode |= LE_MO_PROM;
+ } else {
+ ib->mode &= ~LE_MO_PROM;
+ lance_load_multicast (dev);
+ }
+ load_csrs (lp);
+ init_restart_lance (lp);
+
+ if (!stopped)
+ netif_start_queue (dev);
+}
+EXPORT_SYMBOL_GPL(lance_set_multicast);
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+void lance_poll(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ spin_lock (&lp->devlock);
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STRT);
+ spin_unlock (&lp->devlock);
+ lance_interrupt(dev->irq, dev);
+}
+#endif
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
new file mode 100644
index 000000000000..0a5837b96421
--- /dev/null
+++ b/drivers/net/ethernet/amd/7990.h
@@ -0,0 +1,254 @@
+/*
+ * 7990.h -- LANCE ethernet IC generic routines.
+ * This is an attempt to separate out the bits of various ethernet
+ * drivers that are common because they all use the AMD 7990 LANCE
+ * (Local Area Network Controller for Ethernet) chip.
+ *
+ * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
+ *
+ * Most of this stuff was obtained by looking at other LANCE drivers,
+ * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful.
+ */
+
+#ifndef _7990_H
+#define _7990_H
+
+/* The lance only has two register locations. We communicate mostly via memory. */
+#define LANCE_RDP 0 /* Register Data Port */
+#define LANCE_RAP 2 /* Register Address Port */
+
+/* Transmit/receive ring definitions.
+ * We allow the specific drivers to override these defaults if they want to.
+ * NB: according to lance.c, increasing the number of buffers is a waste
+ * of space and reduces the chance that an upper layer will be able to
+ * reorder queued Tx packets based on priority. [Clearly there is a minimum
+ * limit too: too small and we drop rx packets and can't tx at full speed.]
+ * 4+4 seems to be the usual setting; the atarilance driver uses 3 and 5.
+ */
+
+/* Blast! This won't work. The problem is that we can't specify a default
+ * setting because that would cause the lance_init_block struct to be
+ * too long (and overflow the RAM on shared-memory cards like the HP LANCE.
+ */
+#ifndef LANCE_LOG_TX_BUFFERS
+#define LANCE_LOG_TX_BUFFERS 1
+#define LANCE_LOG_RX_BUFFERS 3
+#endif
+
+#define TX_RING_SIZE (1<<LANCE_LOG_TX_BUFFERS)
+#define RX_RING_SIZE (1<<LANCE_LOG_RX_BUFFERS)
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
+#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
+#define PKT_BUFF_SIZE (1544)
+#define RX_BUFF_SIZE PKT_BUFF_SIZE
+#define TX_BUFF_SIZE PKT_BUFF_SIZE
+
+/* Each receive buffer is described by a receive message descriptor (RMD) */
+struct lance_rx_desc {
+ volatile unsigned short rmd0; /* low address of packet */
+ volatile unsigned char rmd1_bits; /* descriptor bits */
+ volatile unsigned char rmd1_hadr; /* high address of packet */
+ volatile short length; /* This length is 2s complement (negative)!
+ * Buffer length
+ */
+ volatile unsigned short mblength; /* Actual number of bytes received */
+};
+
+/* Ditto for TMD: */
+struct lance_tx_desc {
+ volatile unsigned short tmd0; /* low address of packet */
+ volatile unsigned char tmd1_bits; /* descriptor bits */
+ volatile unsigned char tmd1_hadr; /* high address of packet */
+ volatile short length; /* Length is 2s complement (negative)! */
+ volatile unsigned short misc;
+};
+
+/* There are three memory structures accessed by the LANCE:
+ * the initialization block, the receive and transmit descriptor rings,
+ * and the data buffers themselves. In fact we might as well put the
+ * init block,the Tx and Rx rings and the buffers together in memory:
+ */
+struct lance_init_block {
+ volatile unsigned short mode; /* Pre-set mode (reg. 15) */
+ volatile unsigned char phys_addr[6]; /* Physical ethernet address */
+ volatile unsigned filter[2]; /* Multicast filter (64 bits) */
+
+ /* Receive and transmit ring base, along with extra bits. */
+ volatile unsigned short rx_ptr; /* receive descriptor addr */
+ volatile unsigned short rx_len; /* receive len and high addr */
+ volatile unsigned short tx_ptr; /* transmit descriptor addr */
+ volatile unsigned short tx_len; /* transmit len and high addr */
+
+ /* The Tx and Rx ring entries must be aligned on 8-byte boundaries.
+ * This will be true if this whole struct is 8-byte aligned.
+ */
+ volatile struct lance_tx_desc btx_ring[TX_RING_SIZE];
+ volatile struct lance_rx_desc brx_ring[RX_RING_SIZE];
+
+ volatile char tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
+ volatile char rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
+ /* we use this just to make the struct big enough that we can move its startaddr
+ * in order to force alignment to an eight byte boundary.
+ */
+};
+
+/* This is where we keep all the stuff the driver needs to know about.
+ * I'm definitely unhappy about the mechanism for allowing specific
+ * drivers to add things...
+ */
+struct lance_private
+{
+ char *name;
+ unsigned long base;
+ volatile struct lance_init_block *init_block; /* CPU address of RAM */
+ volatile struct lance_init_block *lance_init_block; /* LANCE address of RAM */
+
+ int rx_new, tx_new;
+ int rx_old, tx_old;
+
+ int lance_log_rx_bufs, lance_log_tx_bufs;
+ int rx_ring_mod_mask, tx_ring_mod_mask;
+
+ int tpe; /* TPE is selected */
+ int auto_select; /* cable-selection is by carrier */
+ unsigned short busmaster_regval;
+
+ unsigned int irq; /* IRQ to register */
+
+ /* This is because the HP LANCE is disgusting and you have to check
+ * a DIO-specific register every time you read/write the LANCE regs :-<
+ * [could we get away with making these some sort of macro?]
+ */
+ void (*writerap)(void *, unsigned short);
+ void (*writerdp)(void *, unsigned short);
+ unsigned short (*readrdp)(void *);
+ spinlock_t devlock;
+ char tx_full;
+};
+
+/*
+ * Am7990 Control and Status Registers
+ */
+#define LE_CSR0 0x0000 /* LANCE Controller Status */
+#define LE_CSR1 0x0001 /* IADR[15:0] (bit0==0 ie word aligned) */
+#define LE_CSR2 0x0002 /* IADR[23:16] (high bits reserved) */
+#define LE_CSR3 0x0003 /* Misc */
+
+/*
+ * Bit definitions for CSR0 (LANCE Controller Status)
+ */
+#define LE_C0_ERR 0x8000 /* Error = BABL | CERR | MISS | MERR */
+#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */
+#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */
+#define LE_C0_MISS 0x1000 /* Missed Frame (no rx buffer to put it in) */
+#define LE_C0_MERR 0x0800 /* Memory Error */
+#define LE_C0_RINT 0x0400 /* Receive Interrupt */
+#define LE_C0_TINT 0x0200 /* Transmit Interrupt */
+#define LE_C0_IDON 0x0100 /* Initialization Done */
+#define LE_C0_INTR 0x0080 /* Interrupt Flag
+ = BABL | MISS | MERR | RINT | TINT | IDON */
+#define LE_C0_INEA 0x0040 /* Interrupt Enable */
+#define LE_C0_RXON 0x0020 /* Receive On */
+#define LE_C0_TXON 0x0010 /* Transmit On */
+#define LE_C0_TDMD 0x0008 /* Transmit Demand */
+#define LE_C0_STOP 0x0004 /* Stop */
+#define LE_C0_STRT 0x0002 /* Start */
+#define LE_C0_INIT 0x0001 /* Initialize */
+
+
+/*
+ * Bit definitions for CSR3
+ */
+#define LE_C3_BSWP 0x0004 /* Byte Swap
+ (on for big endian byte order) */
+#define LE_C3_ACON 0x0002 /* ALE Control
+ (on for active low ALE) */
+#define LE_C3_BCON 0x0001 /* Byte Control */
+
+
+/*
+ * Mode Flags
+ */
+#define LE_MO_PROM 0x8000 /* Promiscuous Mode */
+/* these next ones 0x4000 -- 0x0080 are not available on the LANCE 7990,
+ * but they are in NetBSD's am7990.h, presumably for backwards-compatible chips
+ */
+#define LE_MO_DRCVBC 0x4000 /* disable receive broadcast */
+#define LE_MO_DRCVPA 0x2000 /* disable physical address detection */
+#define LE_MO_DLNKTST 0x1000 /* disable link status */
+#define LE_MO_DAPC 0x0800 /* disable automatic polarity correction */
+#define LE_MO_MENDECL 0x0400 /* MENDEC loopback mode */
+#define LE_MO_LRTTSEL 0x0200 /* lower RX threshold / TX mode selection */
+#define LE_MO_PSEL1 0x0100 /* port selection bit1 */
+#define LE_MO_PSEL0 0x0080 /* port selection bit0 */
+/* and this one is from the C-LANCE data sheet... */
+#define LE_MO_EMBA 0x0080 /* Enable Modified Backoff Algorithm
+ (C-LANCE, not original LANCE) */
+#define LE_MO_INTL 0x0040 /* Internal Loopback */
+#define LE_MO_DRTY 0x0020 /* Disable Retry */
+#define LE_MO_FCOLL 0x0010 /* Force Collision */
+#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */
+#define LE_MO_LOOP 0x0004 /* Loopback Enable */
+#define LE_MO_DTX 0x0002 /* Disable Transmitter */
+#define LE_MO_DRX 0x0001 /* Disable Receiver */
+
+
+/*
+ * Receive Flags
+ */
+#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_R1_ERR 0x40 /* Error */
+#define LE_R1_FRA 0x20 /* Framing Error */
+#define LE_R1_OFL 0x10 /* Overflow Error */
+#define LE_R1_CRC 0x08 /* CRC Error */
+#define LE_R1_BUF 0x04 /* Buffer Error */
+#define LE_R1_SOP 0x02 /* Start of Packet */
+#define LE_R1_EOP 0x01 /* End of Packet */
+#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+
+/*
+ * Transmit Flags
+ */
+#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_T1_ERR 0x40 /* Error */
+#define LE_T1_RES 0x20 /* Reserved, LANCE writes this with a zero */
+#define LE_T1_EMORE 0x10 /* More than one retry needed */
+#define LE_T1_EONE 0x08 /* One retry needed */
+#define LE_T1_EDEF 0x04 /* Deferred */
+#define LE_T1_SOP 0x02 /* Start of Packet */
+#define LE_T1_EOP 0x01 /* End of Packet */
+#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+/*
+ * Error Flags
+ */
+#define LE_T3_BUF 0x8000 /* Buffer Error */
+#define LE_T3_UFL 0x4000 /* Underflow Error */
+#define LE_T3_LCOL 0x1000 /* Late Collision */
+#define LE_T3_CLOS 0x0800 /* Loss of Carrier */
+#define LE_T3_RTY 0x0400 /* Retry Error */
+#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */
+
+/* Miscellaneous useful macros */
+
+#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
+ lp->tx_old+lp->tx_ring_mod_mask-lp->tx_new:\
+ lp->tx_old - lp->tx_new-1)
+
+/* The LANCE only uses 24 bit addresses. This does the obvious thing. */
+#define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
+
+/* Now the prototypes we export */
+extern int lance_open(struct net_device *dev);
+extern int lance_close (struct net_device *dev);
+extern int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
+extern void lance_set_multicast (struct net_device *dev);
+extern void lance_tx_timeout(struct net_device *dev);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+extern void lance_poll(struct net_device *dev);
+#endif
+
+#endif /* ndef _7990_H */
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
new file mode 100644
index 000000000000..238b537b68fe
--- /dev/null
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -0,0 +1,195 @@
+#
+# AMD network device configuration
+#
+
+config NET_VENDOR_AMD
+ bool "AMD devices"
+ default y
+ depends on DIO || MACH_DECSTATION || MVME147 || ATARI || SUN3 || \
+ SUN3X || SBUS || PCI || ZORRO || (ISA && ISA_DMA_API) || \
+ (ARM && ARCH_EBSA110) || ISA || EISA || MCA || PCMCIA
+ ---help---
+ If you have a network (Ethernet) chipset belonging to this class,
+ say Y.
+
+ Note that the answer to this question does not directly affect
+ the kernel: saying N will just case the configurator to skip all
+ the questions regarding AMD chipsets. If you say Y, you will be asked
+ for your specific chipset/driver in the following questions.
+
+if NET_VENDOR_AMD
+
+config A2065
+ tristate "A2065 support"
+ depends on ZORRO
+ select CRC32
+ ---help---
+ If you have a Commodore A2065 Ethernet adapter, say Y. Otherwise,
+ say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called a2065.
+
+config AMD8111_ETH
+ tristate "AMD 8111 (new PCI LANCE) support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ If you have an AMD 8111-based PCI LANCE ethernet card,
+ answer Y here and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called amd8111e.
+
+config LANCE
+ tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
+ depends on ISA && ISA_DMA_API
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Some LinkSys cards are
+ of this type.
+
+ To compile this driver as a module, choose M here: the module
+ will be called lance. This is recommended.
+
+config PCNET32
+ tristate "AMD PCnet32 PCI support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ If you have a PCnet32 or PCnetPCI based network (Ethernet) card,
+ answer Y here and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called pcnet32.
+
+config ARIADNE
+ tristate "Ariadne support"
+ depends on ZORRO
+ ---help---
+ If you have a Village Tronic Ariadne Ethernet adapter, say Y.
+ Otherwise, say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ariadne.
+
+config ARM_AM79C961A
+ bool "ARM EBSA110 AM79C961A support"
+ depends on ARM && ARCH_EBSA110
+ select CRC32
+ ---help---
+ If you wish to compile a kernel for the EBSA-110, then you should
+ always answer Y to this.
+
+config ATARILANCE
+ tristate "Atari LANCE support"
+ depends on ATARI
+ ---help---
+ Say Y to include support for several Atari Ethernet adapters based
+ on the AMD LANCE chipset: RieblCard (with or without battery), or
+ PAMCard VME (also the version by Rhotron, with different addresses).
+
+config DECLANCE
+ tristate "DEC LANCE ethernet controller support"
+ depends on MACH_DECSTATION
+ select CRC32
+ ---help---
+ This driver is for the series of Ethernet controllers produced by
+ DEC (now Compaq) based on the AMD LANCE chipset, including the
+ DEPCA series. (This chipset is better known via the NE2100 cards.)
+
+config DEPCA
+ tristate "DEPCA, DE10x, DE200, DE201, DE202, DE422 support"
+ depends on (ISA || EISA || MCA)
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto> as well as
+ <file:drivers/net/depca.c>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called depca.
+
+config HPLANCE
+ bool "HP on-board LANCE support"
+ depends on DIO
+ select CRC32
+ ---help---
+ If you want to use the builtin "LANCE" Ethernet controller on an
+ HP300 machine, say Y here.
+
+config MIPS_AU1X00_ENET
+ tristate "MIPS AU1000 Ethernet support"
+ depends on MIPS_ALCHEMY
+ select PHYLIB
+ select CRC32
+ ---help---
+ If you have an Alchemy Semi AU1X00 based system
+ say Y. Otherwise, say N.
+
+config MVME147_NET
+ tristate "MVME147 (LANCE) Ethernet support"
+ depends on MVME147
+ select CRC32
+ ---help---
+ Support for the on-board Ethernet interface on the Motorola MVME147
+ single-board computer. Say Y here to include the
+ driver for this chip in your kernel.
+ To compile this driver as a module, choose M here.
+
+config PCMCIA_NMCLAN
+ tristate "New Media PCMCIA support"
+ depends on PCMCIA
+ help
+ Say Y here if you intend to attach a New Media Ethernet or LiveWire
+ PCMCIA (PC-card) Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called nmclan_cs. If unsure, say N.
+
+config NI65
+ tristate "NI6510 support"
+ depends on ISA && ISA_DMA_API
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ni65.
+
+config SUN3LANCE
+ tristate "Sun3/Sun3x on-board LANCE support"
+ depends on (SUN3 || SUN3X)
+ ---help---
+ Most Sun3 and Sun3x motherboards (including the 3/50, 3/60 and 3/80)
+ featured an AMD LANCE 10Mbit Ethernet controller on board; say Y
+ here to compile in the Linux driver for this and enable Ethernet.
+ General Linux information on the Sun 3 and 3x series (now
+ discontinued) is at
+ <http://www.angelfire.com/ca2/tech68k/sun3.html>.
+
+ If you're not building a kernel for a Sun 3, say N.
+
+config SUNLANCE
+ tristate "Sun LANCE support"
+ depends on SBUS
+ select CRC32
+ ---help---
+ This driver supports the "le" interface present on all 32-bit Sparc
+ systems, on some older Ultra systems and as an Sbus option. These
+ cards are based on the AMD LANCE chipset, which is better known
+ via the NE2100 cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sunlance.
+
+endif # NET_VENDOR_AMD
diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile
new file mode 100644
index 000000000000..175caa5328c9
--- /dev/null
+++ b/drivers/net/ethernet/amd/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for the AMD network device drivers.
+#
+
+obj-$(CONFIG_A2065) += a2065.o
+obj-$(CONFIG_AMD8111_ETH) += amd8111e.o
+obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o
+obj-$(CONFIG_ARIADNE) += ariadne.o
+obj-$(CONFIG_ATARILANCE) += atarilance.o
+obj-$(CONFIG_DECLANCE) += declance.o
+obj-$(CONFIG_DEPCA) += depca.o
+obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
+obj-$(CONFIG_LANCE) += lance.o
+obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
+obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
+obj-$(CONFIG_PCMCIA_NMCLAN) += nmclan_cs.o
+obj-$(CONFIG_NI65) += ni65.o
+obj-$(CONFIG_PCNET32) += pcnet32.o
+obj-$(CONFIG_SUN3LANCE) += sun3lance.o
+obj-$(CONFIG_SUNLANCE) += sunlance.o
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
new file mode 100644
index 000000000000..825e5d4ef4c3
--- /dev/null
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -0,0 +1,781 @@
+/*
+ * Amiga Linux/68k A2065 Ethernet Driver
+ *
+ * (C) Copyright 1995-2003 by Geert Uytterhoeven <geert@linux-m68k.org>
+ *
+ * Fixes and tips by:
+ * - Janos Farkas (CHEXUM@sparta.banki.hu)
+ * - Jes Degn Soerensen (jds@kom.auc.dk)
+ * - Matt Domsch (Matt_Domsch@dell.com)
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This program is based on
+ *
+ * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver
+ * (C) Copyright 1995 by Geert Uytterhoeven,
+ * Peter De Schrijver
+ *
+ * lance.c: An AMD LANCE ethernet driver for linux.
+ * Written 1993-94 by Donald Becker.
+ *
+ * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
+ * Advanced Micro Devices
+ * Publication #16907, Rev. B, Amendment/0, May 1994
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains:
+ *
+ * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with
+ * both 10BASE-2 (thin coax) and AUI (DB-15) connectors
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+/*#define DEBUG*/
+/*#define TEST_HITS*/
+
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/zorro.h>
+#include <linux/bitops.h>
+
+#include <asm/irq.h>
+#include <asm/amigaints.h>
+#include <asm/amigahw.h>
+
+#include "a2065.h"
+
+/* Transmit/Receive Ring Definitions */
+
+#define LANCE_LOG_TX_BUFFERS (2)
+#define LANCE_LOG_RX_BUFFERS (4)
+
+#define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS)
+#define RX_RING_SIZE (1 << LANCE_LOG_RX_BUFFERS)
+
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+
+#define PKT_BUF_SIZE (1544)
+#define RX_BUFF_SIZE PKT_BUF_SIZE
+#define TX_BUFF_SIZE PKT_BUF_SIZE
+
+/* Layout of the Lance's RAM Buffer */
+
+struct lance_init_block {
+ unsigned short mode; /* Pre-set mode (reg. 15) */
+ unsigned char phys_addr[6]; /* Physical ethernet address */
+ unsigned filter[2]; /* Multicast filter. */
+
+ /* Receive and transmit ring base, along with extra bits. */
+ unsigned short rx_ptr; /* receive descriptor addr */
+ unsigned short rx_len; /* receive len and high addr */
+ unsigned short tx_ptr; /* transmit descriptor addr */
+ unsigned short tx_len; /* transmit len and high addr */
+
+ /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
+ struct lance_rx_desc brx_ring[RX_RING_SIZE];
+ struct lance_tx_desc btx_ring[TX_RING_SIZE];
+
+ char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE];
+ char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
+};
+
+/* Private Device Data */
+
+struct lance_private {
+ char *name;
+ volatile struct lance_regs *ll;
+ volatile struct lance_init_block *init_block; /* Hosts view */
+ volatile struct lance_init_block *lance_init_block; /* Lance view */
+
+ int rx_new, tx_new;
+ int rx_old, tx_old;
+
+ int lance_log_rx_bufs, lance_log_tx_bufs;
+ int rx_ring_mod_mask, tx_ring_mod_mask;
+
+ int tpe; /* cable-selection is TPE */
+ int auto_select; /* cable-selection by carrier */
+ unsigned short busmaster_regval;
+
+#ifdef CONFIG_SUNLANCE
+ struct Linux_SBus_DMA *ledma; /* if set this points to ledma and arch=4m */
+ int burst_sizes; /* ledma SBus burst sizes */
+#endif
+ struct timer_list multicast_timer;
+};
+
+#define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
+
+/* Load the CSR registers */
+static void load_csrs(struct lance_private *lp)
+{
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_init_block *aib = lp->lance_init_block;
+ int leptr = LANCE_ADDR(aib);
+
+ ll->rap = LE_CSR1;
+ ll->rdp = (leptr & 0xFFFF);
+ ll->rap = LE_CSR2;
+ ll->rdp = leptr >> 16;
+ ll->rap = LE_CSR3;
+ ll->rdp = lp->busmaster_regval;
+
+ /* Point back to csr0 */
+ ll->rap = LE_CSR0;
+}
+
+/* Setup the Lance Rx and Tx rings */
+static void lance_init_ring(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_init_block *aib = lp->lance_init_block;
+ /* for LANCE_ADDR computations */
+ int leptr;
+ int i;
+
+ /* Lock out other processes while setting up hardware */
+ netif_stop_queue(dev);
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ ib->mode = 0;
+
+ /* Copy the ethernet address to the lance init block
+ * Note that on the sparc you need to swap the ethernet address.
+ */
+ ib->phys_addr[0] = dev->dev_addr[1];
+ ib->phys_addr[1] = dev->dev_addr[0];
+ ib->phys_addr[2] = dev->dev_addr[3];
+ ib->phys_addr[3] = dev->dev_addr[2];
+ ib->phys_addr[4] = dev->dev_addr[5];
+ ib->phys_addr[5] = dev->dev_addr[4];
+
+ /* Setup the Tx ring entries */
+ netdev_dbg(dev, "TX rings:\n");
+ for (i = 0; i <= 1 << lp->lance_log_tx_bufs; i++) {
+ leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
+ ib->btx_ring[i].tmd0 = leptr;
+ ib->btx_ring[i].tmd1_hadr = leptr >> 16;
+ ib->btx_ring[i].tmd1_bits = 0;
+ ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */
+ ib->btx_ring[i].misc = 0;
+ if (i < 3)
+ netdev_dbg(dev, "%d: 0x%08x\n", i, leptr);
+ }
+
+ /* Setup the Rx ring entries */
+ netdev_dbg(dev, "RX rings:\n");
+ for (i = 0; i < 1 << lp->lance_log_rx_bufs; i++) {
+ leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
+
+ ib->brx_ring[i].rmd0 = leptr;
+ ib->brx_ring[i].rmd1_hadr = leptr >> 16;
+ ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
+ ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000;
+ ib->brx_ring[i].mblength = 0;
+ if (i < 3)
+ netdev_dbg(dev, "%d: 0x%08x\n", i, leptr);
+ }
+
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->brx_ring);
+ ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
+ ib->rx_ptr = leptr;
+ netdev_dbg(dev, "RX ptr: %08x\n", leptr);
+
+ /* Setup tx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->btx_ring);
+ ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
+ ib->tx_ptr = leptr;
+ netdev_dbg(dev, "TX ptr: %08x\n", leptr);
+
+ /* Clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+}
+
+static int init_restart_lance(struct lance_private *lp)
+{
+ volatile struct lance_regs *ll = lp->ll;
+ int i;
+
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_INIT;
+
+ /* Wait for the lance to complete initialization */
+ for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++)
+ barrier();
+ if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
+ pr_err("unopened after %d ticks, csr0=%04x\n", i, ll->rdp);
+ return -EIO;
+ }
+
+ /* Clear IDON by writing a "1", enable interrupts and start lance */
+ ll->rdp = LE_C0_IDON;
+ ll->rdp = LE_C0_INEA | LE_C0_STRT;
+
+ return 0;
+}
+
+static int lance_rx(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_rx_desc *rd;
+ unsigned char bits;
+
+#ifdef TEST_HITS
+ int i;
+ char buf[RX_RING_SIZE + 1];
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ char r1_own = ib->brx_ring[i].rmd1_bits & LE_R1_OWN;
+ if (i == lp->rx_new)
+ buf[i] = r1_own ? '_' : 'X';
+ else
+ buf[i] = r1_own ? '.' : '1';
+ }
+ buf[RX_RING_SIZE] = 0;
+
+ pr_debug("RxRing TestHits: [%s]\n", buf);
+#endif
+
+ ll->rdp = LE_C0_RINT | LE_C0_INEA;
+ for (rd = &ib->brx_ring[lp->rx_new];
+ !((bits = rd->rmd1_bits) & LE_R1_OWN);
+ rd = &ib->brx_ring[lp->rx_new]) {
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ continue;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF)
+ dev->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC)
+ dev->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL)
+ dev->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA)
+ dev->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP)
+ dev->stats.rx_errors++;
+ } else {
+ int len = (rd->mblength & 0xfff) - 4;
+ struct sk_buff *skb = dev_alloc_skb(len + 2);
+
+ if (!skb) {
+ netdev_warn(dev, "Memory squeeze, deferring packet\n");
+ dev->stats.rx_dropped++;
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ return 0;
+ }
+
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)&ib->rx_buf[lp->rx_new][0],
+ len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ }
+
+ /* Return the packet to the pool */
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ }
+ return 0;
+}
+
+static int lance_tx(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_tx_desc *td;
+ int i, j;
+ int status;
+
+ /* csr0 is 2f3 */
+ ll->rdp = LE_C0_TINT | LE_C0_INEA;
+ /* csr0 is 73 */
+
+ j = lp->tx_old;
+ for (i = j; i != lp->tx_new; i = j) {
+ td = &ib->btx_ring[i];
+
+ /* If we hit a packet not owned by us, stop */
+ if (td->tmd1_bits & LE_T1_OWN)
+ break;
+
+ if (td->tmd1_bits & LE_T1_ERR) {
+ status = td->misc;
+
+ dev->stats.tx_errors++;
+ if (status & LE_T3_RTY)
+ dev->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL)
+ dev->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ dev->stats.tx_carrier_errors++;
+ if (lp->auto_select) {
+ lp->tpe = 1 - lp->tpe;
+ netdev_err(dev, "Carrier Lost, trying %s\n",
+ lp->tpe ? "TPE" : "AUI");
+ /* Stop the lance */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ return 0;
+ }
+ }
+
+ /* buffer errors and underflows turn off
+ * the transmitter, so restart the adapter
+ */
+ if (status & (LE_T3_BUF | LE_T3_UFL)) {
+ dev->stats.tx_fifo_errors++;
+
+ netdev_err(dev, "Tx: ERR_BUF|ERR_UFL, restarting\n");
+ /* Stop the lance */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ return 0;
+ }
+ } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
+ /* So we don't count the packet more than once. */
+ td->tmd1_bits &= ~(LE_T1_POK);
+
+ /* One collision before packet was sent. */
+ if (td->tmd1_bits & LE_T1_EONE)
+ dev->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (td->tmd1_bits & LE_T1_EMORE)
+ dev->stats.collisions += 2;
+
+ dev->stats.tx_packets++;
+ }
+
+ j = (j + 1) & lp->tx_ring_mod_mask;
+ }
+ lp->tx_old = j;
+ ll->rdp = LE_C0_TINT | LE_C0_INEA;
+ return 0;
+}
+
+static int lance_tx_buffs_avail(struct lance_private *lp)
+{
+ if (lp->tx_old <= lp->tx_new)
+ return lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new;
+ return lp->tx_old - lp->tx_new - 1;
+}
+
+static irqreturn_t lance_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int csr0;
+
+ ll->rap = LE_CSR0; /* LANCE Controller Status */
+ csr0 = ll->rdp;
+
+ if (!(csr0 & LE_C0_INTR)) /* Check if any interrupt has */
+ return IRQ_NONE; /* been generated by the Lance. */
+
+ /* Acknowledge all the interrupt sources ASAP */
+ ll->rdp = csr0 & ~(LE_C0_INEA | LE_C0_TDMD | LE_C0_STOP | LE_C0_STRT |
+ LE_C0_INIT);
+
+ if (csr0 & LE_C0_ERR) {
+ /* Clear the error condition */
+ ll->rdp = LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | LE_C0_INEA;
+ }
+
+ if (csr0 & LE_C0_RINT)
+ lance_rx(dev);
+
+ if (csr0 & LE_C0_TINT)
+ lance_tx(dev);
+
+ /* Log misc errors. */
+ if (csr0 & LE_C0_BABL)
+ dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & LE_C0_MISS)
+ dev->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & LE_C0_MERR) {
+ netdev_err(dev, "Bus master arbitration failure, status %04x\n",
+ csr0);
+ /* Restart the chip. */
+ ll->rdp = LE_C0_STRT;
+ }
+
+ if (netif_queue_stopped(dev) && lance_tx_buffs_avail(lp) > 0)
+ netif_wake_queue(dev);
+
+ ll->rap = LE_CSR0;
+ ll->rdp = (LE_C0_BABL | LE_C0_CERR | LE_C0_MISS | LE_C0_MERR |
+ LE_C0_IDON | LE_C0_INEA);
+ return IRQ_HANDLED;
+}
+
+static int lance_open(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int ret;
+
+ /* Stop the Lance */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+
+ /* Install the Interrupt handler */
+ ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, IRQF_SHARED,
+ dev->name, dev);
+ if (ret)
+ return ret;
+
+ load_csrs(lp);
+ lance_init_ring(dev);
+
+ netif_start_queue(dev);
+
+ return init_restart_lance(lp);
+}
+
+static int lance_close(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+
+ netif_stop_queue(dev);
+ del_timer_sync(&lp->multicast_timer);
+
+ /* Stop the card */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+
+ free_irq(IRQ_AMIGA_PORTS, dev);
+ return 0;
+}
+
+static inline int lance_reset(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int status;
+
+ /* Stop the lance */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+
+ load_csrs(lp);
+
+ lance_init_ring(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_start_queue(dev);
+
+ status = init_restart_lance(lp);
+ netdev_dbg(dev, "Lance restart=%d\n", status);
+
+ return status;
+}
+
+static void lance_tx_timeout(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+
+ netdev_err(dev, "transmit timed out, status %04x, reset\n", ll->rdp);
+ lance_reset(dev);
+ netif_wake_queue(dev);
+}
+
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_init_block *ib = lp->init_block;
+ int entry, skblen;
+ int status = NETDEV_TX_OK;
+ unsigned long flags;
+
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ skblen = max_t(unsigned, skb->len, ETH_ZLEN);
+
+ local_irq_save(flags);
+
+ if (!lance_tx_buffs_avail(lp)) {
+ local_irq_restore(flags);
+ return NETDEV_TX_LOCKED;
+ }
+
+#ifdef DEBUG
+ /* dump the packet */
+ print_hex_dump(KERN_DEBUG, "skb->data: ", DUMP_PREFIX_NONE,
+ 16, 1, skb->data, 64, true);
+#endif
+ entry = lp->tx_new & lp->tx_ring_mod_mask;
+ ib->btx_ring[entry].length = (-skblen) | 0xf000;
+ ib->btx_ring[entry].misc = 0;
+
+ skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
+
+ /* Now, give the packet to the lance */
+ ib->btx_ring[entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
+ lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
+ dev->stats.tx_bytes += skblen;
+
+ if (lance_tx_buffs_avail(lp) <= 0)
+ netif_stop_queue(dev);
+
+ /* Kick the lance: transmit now */
+ ll->rdp = LE_C0_INEA | LE_C0_TDMD;
+ dev_kfree_skb(skb);
+
+ local_irq_restore(flags);
+
+ return status;
+}
+
+/* taken from the depca driver */
+static void lance_load_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile u16 *mcast_table = (u16 *)&ib->filter;
+ struct netdev_hw_addr *ha;
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI) {
+ ib->filter[0] = 0xffffffff;
+ ib->filter[1] = 0xffffffff;
+ return;
+ }
+ /* clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+
+ /* Add addresses */
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
+ crc = crc >> 26;
+ mcast_table[crc >> 4] |= 1 << (crc & 0xf);
+ }
+}
+
+static void lance_set_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_regs *ll = lp->ll;
+
+ if (!netif_running(dev))
+ return;
+
+ if (lp->tx_old != lp->tx_new) {
+ mod_timer(&lp->multicast_timer, jiffies + 4);
+ netif_wake_queue(dev);
+ return;
+ }
+
+ netif_stop_queue(dev);
+
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+ lance_init_ring(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ ib->mode |= LE_MO_PROM;
+ } else {
+ ib->mode &= ~LE_MO_PROM;
+ lance_load_multicast(dev);
+ }
+ load_csrs(lp);
+ init_restart_lance(lp);
+ netif_wake_queue(dev);
+}
+
+static int __devinit a2065_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent);
+static void __devexit a2065_remove_one(struct zorro_dev *z);
+
+
+static struct zorro_device_id a2065_zorro_tbl[] __devinitdata = {
+ { ZORRO_PROD_CBM_A2065_1 },
+ { ZORRO_PROD_CBM_A2065_2 },
+ { ZORRO_PROD_AMERISTAR_A2065 },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl);
+
+static struct zorro_driver a2065_driver = {
+ .name = "a2065",
+ .id_table = a2065_zorro_tbl,
+ .probe = a2065_init_one,
+ .remove = __devexit_p(a2065_remove_one),
+};
+
+static const struct net_device_ops lance_netdev_ops = {
+ .ndo_open = lance_open,
+ .ndo_stop = lance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_tx_timeout = lance_tx_timeout,
+ .ndo_set_rx_mode = lance_set_multicast,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int __devinit a2065_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
+{
+ struct net_device *dev;
+ struct lance_private *priv;
+ unsigned long board = z->resource.start;
+ unsigned long base_addr = board + A2065_LANCE;
+ unsigned long mem_start = board + A2065_RAM;
+ struct resource *r1, *r2;
+ int err;
+
+ r1 = request_mem_region(base_addr, sizeof(struct lance_regs),
+ "Am7990");
+ if (!r1)
+ return -EBUSY;
+ r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM");
+ if (!r2) {
+ release_mem_region(base_addr, sizeof(struct lance_regs));
+ return -EBUSY;
+ }
+
+ dev = alloc_etherdev(sizeof(struct lance_private));
+ if (dev == NULL) {
+ release_mem_region(base_addr, sizeof(struct lance_regs));
+ release_mem_region(mem_start, A2065_RAM_SIZE);
+ return -ENOMEM;
+ }
+
+ priv = netdev_priv(dev);
+
+ r1->name = dev->name;
+ r2->name = dev->name;
+
+ dev->dev_addr[0] = 0x00;
+ if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */
+ dev->dev_addr[1] = 0x80;
+ dev->dev_addr[2] = 0x10;
+ } else { /* Ameristar */
+ dev->dev_addr[1] = 0x00;
+ dev->dev_addr[2] = 0x9f;
+ }
+ dev->dev_addr[3] = (z->rom.er_SerialNumber >> 16) & 0xff;
+ dev->dev_addr[4] = (z->rom.er_SerialNumber >> 8) & 0xff;
+ dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff;
+ dev->base_addr = ZTWO_VADDR(base_addr);
+ dev->mem_start = ZTWO_VADDR(mem_start);
+ dev->mem_end = dev->mem_start + A2065_RAM_SIZE;
+
+ priv->ll = (volatile struct lance_regs *)dev->base_addr;
+ priv->init_block = (struct lance_init_block *)dev->mem_start;
+ priv->lance_init_block = (struct lance_init_block *)A2065_RAM;
+ priv->auto_select = 0;
+ priv->busmaster_regval = LE_C3_BSWP;
+
+ priv->lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
+ priv->lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
+ priv->rx_ring_mod_mask = RX_RING_MOD_MASK;
+ priv->tx_ring_mod_mask = TX_RING_MOD_MASK;
+
+ dev->netdev_ops = &lance_netdev_ops;
+ dev->watchdog_timeo = 5*HZ;
+ dev->dma = 0;
+
+ init_timer(&priv->multicast_timer);
+ priv->multicast_timer.data = (unsigned long) dev;
+ priv->multicast_timer.function =
+ (void (*)(unsigned long))lance_set_multicast;
+
+ err = register_netdev(dev);
+ if (err) {
+ release_mem_region(base_addr, sizeof(struct lance_regs));
+ release_mem_region(mem_start, A2065_RAM_SIZE);
+ free_netdev(dev);
+ return err;
+ }
+ zorro_set_drvdata(z, dev);
+
+ netdev_info(dev, "A2065 at 0x%08lx, Ethernet Address %pM\n",
+ board, dev->dev_addr);
+
+ return 0;
+}
+
+
+static void __devexit a2065_remove_one(struct zorro_dev *z)
+{
+ struct net_device *dev = zorro_get_drvdata(z);
+
+ unregister_netdev(dev);
+ release_mem_region(ZTWO_PADDR(dev->base_addr),
+ sizeof(struct lance_regs));
+ release_mem_region(ZTWO_PADDR(dev->mem_start), A2065_RAM_SIZE);
+ free_netdev(dev);
+}
+
+static int __init a2065_init_module(void)
+{
+ return zorro_register_driver(&a2065_driver);
+}
+
+static void __exit a2065_cleanup_module(void)
+{
+ zorro_unregister_driver(&a2065_driver);
+}
+
+module_init(a2065_init_module);
+module_exit(a2065_cleanup_module);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/a2065.h b/drivers/net/ethernet/amd/a2065.h
new file mode 100644
index 000000000000..5117759d4e9c
--- /dev/null
+++ b/drivers/net/ethernet/amd/a2065.h
@@ -0,0 +1,173 @@
+/*
+ * Amiga Linux/68k A2065 Ethernet Driver
+ *
+ * (C) Copyright 1995 by Geert Uytterhoeven <geert@linux-m68k.org>
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This program is based on
+ *
+ * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver
+ * (C) Copyright 1995 by Geert Uytterhoeven,
+ * Peter De Schrijver
+ *
+ * lance.c: An AMD LANCE ethernet driver for linux.
+ * Written 1993-94 by Donald Becker.
+ *
+ * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
+ * Advanced Micro Devices
+ * Publication #16907, Rev. B, Amendment/0, May 1994
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains:
+ *
+ * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with
+ * both 10BASE-2 (thin coax) and AUI (DB-15) connectors
+ */
+
+
+/*
+ * Am7990 Local Area Network Controller for Ethernet (LANCE)
+ */
+
+struct lance_regs {
+ unsigned short rdp; /* Register Data Port */
+ unsigned short rap; /* Register Address Port */
+};
+
+
+/*
+ * Am7990 Control and Status Registers
+ */
+
+#define LE_CSR0 0x0000 /* LANCE Controller Status */
+#define LE_CSR1 0x0001 /* IADR[15:0] */
+#define LE_CSR2 0x0002 /* IADR[23:16] */
+#define LE_CSR3 0x0003 /* Misc */
+
+
+/*
+ * Bit definitions for CSR0 (LANCE Controller Status)
+ */
+
+#define LE_C0_ERR 0x8000 /* Error */
+#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */
+#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */
+#define LE_C0_MISS 0x1000 /* Missed Frame */
+#define LE_C0_MERR 0x0800 /* Memory Error */
+#define LE_C0_RINT 0x0400 /* Receive Interrupt */
+#define LE_C0_TINT 0x0200 /* Transmit Interrupt */
+#define LE_C0_IDON 0x0100 /* Initialization Done */
+#define LE_C0_INTR 0x0080 /* Interrupt Flag */
+#define LE_C0_INEA 0x0040 /* Interrupt Enable */
+#define LE_C0_RXON 0x0020 /* Receive On */
+#define LE_C0_TXON 0x0010 /* Transmit On */
+#define LE_C0_TDMD 0x0008 /* Transmit Demand */
+#define LE_C0_STOP 0x0004 /* Stop */
+#define LE_C0_STRT 0x0002 /* Start */
+#define LE_C0_INIT 0x0001 /* Initialize */
+
+
+/*
+ * Bit definitions for CSR3
+ */
+
+#define LE_C3_BSWP 0x0004 /* Byte Swap
+ (on for big endian byte order) */
+#define LE_C3_ACON 0x0002 /* ALE Control
+ (on for active low ALE) */
+#define LE_C3_BCON 0x0001 /* Byte Control */
+
+
+/*
+ * Mode Flags
+ */
+
+#define LE_MO_PROM 0x8000 /* Promiscuous Mode */
+#define LE_MO_INTL 0x0040 /* Internal Loopback */
+#define LE_MO_DRTY 0x0020 /* Disable Retry */
+#define LE_MO_FCOLL 0x0010 /* Force Collision */
+#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */
+#define LE_MO_LOOP 0x0004 /* Loopback Enable */
+#define LE_MO_DTX 0x0002 /* Disable Transmitter */
+#define LE_MO_DRX 0x0001 /* Disable Receiver */
+
+
+struct lance_rx_desc {
+ unsigned short rmd0; /* low address of packet */
+ unsigned char rmd1_bits; /* descriptor bits */
+ unsigned char rmd1_hadr; /* high address of packet */
+ short length; /* This length is 2s complement (negative)!
+ * Buffer length
+ */
+ unsigned short mblength; /* Aactual number of bytes received */
+};
+
+struct lance_tx_desc {
+ unsigned short tmd0; /* low address of packet */
+ unsigned char tmd1_bits; /* descriptor bits */
+ unsigned char tmd1_hadr; /* high address of packet */
+ short length; /* Length is 2s complement (negative)! */
+ unsigned short misc;
+};
+
+
+/*
+ * Receive Flags
+ */
+
+#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_R1_ERR 0x40 /* Error */
+#define LE_R1_FRA 0x20 /* Framing Error */
+#define LE_R1_OFL 0x10 /* Overflow Error */
+#define LE_R1_CRC 0x08 /* CRC Error */
+#define LE_R1_BUF 0x04 /* Buffer Error */
+#define LE_R1_SOP 0x02 /* Start of Packet */
+#define LE_R1_EOP 0x01 /* End of Packet */
+#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+
+/*
+ * Transmit Flags
+ */
+
+#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_T1_ERR 0x40 /* Error */
+#define LE_T1_RES 0x20 /* Reserved,
+ LANCE writes this with a zero */
+#define LE_T1_EMORE 0x10 /* More than one retry needed */
+#define LE_T1_EONE 0x08 /* One retry needed */
+#define LE_T1_EDEF 0x04 /* Deferred */
+#define LE_T1_SOP 0x02 /* Start of Packet */
+#define LE_T1_EOP 0x01 /* End of Packet */
+#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+
+/*
+ * Error Flags
+ */
+
+#define LE_T3_BUF 0x8000 /* Buffer Error */
+#define LE_T3_UFL 0x4000 /* Underflow Error */
+#define LE_T3_LCOL 0x1000 /* Late Collision */
+#define LE_T3_CLOS 0x0800 /* Loss of Carrier */
+#define LE_T3_RTY 0x0400 /* Retry Error */
+#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */
+
+
+/*
+ * A2065 Expansion Board Structure
+ */
+
+#define A2065_LANCE 0x4000
+
+#define A2065_RAM 0x8000
+#define A2065_RAM_SIZE 0x8000
+
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
new file mode 100644
index 000000000000..7d5ded80d2d7
--- /dev/null
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -0,0 +1,770 @@
+/*
+ * linux/drivers/net/am79c961.c
+ *
+ * by Russell King <rmk@arm.linux.org.uk> 1995-2001.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Derived from various things including skeleton.c
+ *
+ * This is a special driver for the am79c961A Lance chip used in the
+ * Intel (formally Digital Equipment Corp) EBSA110 platform. Please
+ * note that this can not be built as a module (it doesn't make sense).
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <asm/system.h>
+
+#define TX_BUFFERS 15
+#define RX_BUFFERS 25
+
+#include "am79c961a.h"
+
+static irqreturn_t
+am79c961_interrupt (int irq, void *dev_id);
+
+static unsigned int net_debug = NET_DEBUG;
+
+static const char version[] =
+ "am79c961 ethernet driver (C) 1995-2001 Russell King v0.04\n";
+
+/* --------------------------------------------------------------------------- */
+
+#ifdef __arm__
+static void write_rreg(u_long base, u_int reg, u_int val)
+{
+ asm volatile(
+ "str%?h %1, [%2] @ NET_RAP\n\t"
+ "str%?h %0, [%2, #-4] @ NET_RDP"
+ :
+ : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
+}
+
+static inline unsigned short read_rreg(u_long base_addr, u_int reg)
+{
+ unsigned short v;
+ asm volatile(
+ "str%?h %1, [%2] @ NET_RAP\n\t"
+ "ldr%?h %0, [%2, #-4] @ NET_RDP"
+ : "=r" (v)
+ : "r" (reg), "r" (ISAIO_BASE + 0x0464));
+ return v;
+}
+
+static inline void write_ireg(u_long base, u_int reg, u_int val)
+{
+ asm volatile(
+ "str%?h %1, [%2] @ NET_RAP\n\t"
+ "str%?h %0, [%2, #8] @ NET_IDP"
+ :
+ : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
+}
+
+static inline unsigned short read_ireg(u_long base_addr, u_int reg)
+{
+ u_short v;
+ asm volatile(
+ "str%?h %1, [%2] @ NAT_RAP\n\t"
+ "ldr%?h %0, [%2, #8] @ NET_IDP\n\t"
+ : "=r" (v)
+ : "r" (reg), "r" (ISAIO_BASE + 0x0464));
+ return v;
+}
+
+#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1))
+#define am_readword(dev,off) __raw_readw(ISAMEM_BASE + ((off) << 1))
+
+static void
+am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
+{
+ offset = ISAMEM_BASE + (offset << 1);
+ length = (length + 1) & ~1;
+ if ((int)buf & 2) {
+ asm volatile("str%?h %2, [%0], #4"
+ : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
+ buf += 2;
+ length -= 2;
+ }
+ while (length > 8) {
+ register unsigned int tmp asm("r2"), tmp2 asm("r3");
+ asm volatile(
+ "ldm%?ia %0!, {%1, %2}"
+ : "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
+ length -= 8;
+ asm volatile(
+ "str%?h %1, [%0], #4\n\t"
+ "mov%? %1, %1, lsr #16\n\t"
+ "str%?h %1, [%0], #4\n\t"
+ "str%?h %2, [%0], #4\n\t"
+ "mov%? %2, %2, lsr #16\n\t"
+ "str%?h %2, [%0], #4"
+ : "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
+ }
+ while (length > 0) {
+ asm volatile("str%?h %2, [%0], #4"
+ : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
+ buf += 2;
+ length -= 2;
+ }
+}
+
+static void
+am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
+{
+ offset = ISAMEM_BASE + (offset << 1);
+ length = (length + 1) & ~1;
+ if ((int)buf & 2) {
+ unsigned int tmp;
+ asm volatile(
+ "ldr%?h %2, [%0], #4\n\t"
+ "str%?b %2, [%1], #1\n\t"
+ "mov%? %2, %2, lsr #8\n\t"
+ "str%?b %2, [%1], #1"
+ : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf));
+ length -= 2;
+ }
+ while (length > 8) {
+ register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
+ asm volatile(
+ "ldr%?h %2, [%0], #4\n\t"
+ "ldr%?h %4, [%0], #4\n\t"
+ "ldr%?h %3, [%0], #4\n\t"
+ "orr%? %2, %2, %4, lsl #16\n\t"
+ "ldr%?h %4, [%0], #4\n\t"
+ "orr%? %3, %3, %4, lsl #16\n\t"
+ "stm%?ia %1!, {%2, %3}"
+ : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3)
+ : "0" (offset), "1" (buf));
+ length -= 8;
+ }
+ while (length > 0) {
+ unsigned int tmp;
+ asm volatile(
+ "ldr%?h %2, [%0], #4\n\t"
+ "str%?b %2, [%1], #1\n\t"
+ "mov%? %2, %2, lsr #8\n\t"
+ "str%?b %2, [%1], #1"
+ : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf));
+ length -= 2;
+ }
+}
+#else
+#error Not compatible
+#endif
+
+static int
+am79c961_ramtest(struct net_device *dev, unsigned int val)
+{
+ unsigned char *buffer = kmalloc (65536, GFP_KERNEL);
+ int i, error = 0, errorcount = 0;
+
+ if (!buffer)
+ return 0;
+ memset (buffer, val, 65536);
+ am_writebuffer(dev, 0, buffer, 65536);
+ memset (buffer, val ^ 255, 65536);
+ am_readbuffer(dev, 0, buffer, 65536);
+ for (i = 0; i < 65536; i++) {
+ if (buffer[i] != val && !error) {
+ printk ("%s: buffer error (%02X %02X) %05X - ", dev->name, val, buffer[i], i);
+ error = 1;
+ errorcount ++;
+ } else if (error && buffer[i] == val) {
+ printk ("%05X\n", i);
+ error = 0;
+ }
+ }
+ if (error)
+ printk ("10000\n");
+ kfree (buffer);
+ return errorcount;
+}
+
+static void am79c961_mc_hash(char *addr, u16 *hash)
+{
+ int idx, bit;
+ u32 crc;
+
+ crc = ether_crc_le(ETH_ALEN, addr);
+
+ idx = crc >> 30;
+ bit = (crc >> 26) & 15;
+
+ hash[idx] |= 1 << bit;
+}
+
+static unsigned int am79c961_get_rx_mode(struct net_device *dev, u16 *hash)
+{
+ unsigned int mode = MODE_PORT_10BT;
+
+ if (dev->flags & IFF_PROMISC) {
+ mode |= MODE_PROMISC;
+ memset(hash, 0xff, 4 * sizeof(*hash));
+ } else if (dev->flags & IFF_ALLMULTI) {
+ memset(hash, 0xff, 4 * sizeof(*hash));
+ } else {
+ struct netdev_hw_addr *ha;
+
+ memset(hash, 0, 4 * sizeof(*hash));
+
+ netdev_for_each_mc_addr(ha, dev)
+ am79c961_mc_hash(ha->addr, hash);
+ }
+
+ return mode;
+}
+
+static void
+am79c961_init_for_open(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ unsigned char *p;
+ u_int hdr_addr, first_free_addr;
+ u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
+ int i;
+
+ /*
+ * Stop the chip.
+ */
+ spin_lock_irqsave(&priv->chip_lock, flags);
+ write_rreg (dev->base_addr, CSR0, CSR0_BABL|CSR0_CERR|CSR0_MISS|CSR0_MERR|CSR0_TINT|CSR0_RINT|CSR0_STOP);
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+
+ write_ireg (dev->base_addr, 5, 0x00a0); /* Receive address LED */
+ write_ireg (dev->base_addr, 6, 0x0081); /* Collision LED */
+ write_ireg (dev->base_addr, 7, 0x0090); /* XMIT LED */
+ write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */
+
+ for (i = LADRL; i <= LADRH; i++)
+ write_rreg (dev->base_addr, i, multi_hash[i - LADRL]);
+
+ for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2)
+ write_rreg (dev->base_addr, i, p[0] | (p[1] << 8));
+
+ write_rreg (dev->base_addr, MODE, mode);
+ write_rreg (dev->base_addr, POLLINT, 0);
+ write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS);
+ write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS);
+
+ first_free_addr = RX_BUFFERS * 8 + TX_BUFFERS * 8 + 16;
+ hdr_addr = 0;
+
+ priv->rxhead = 0;
+ priv->rxtail = 0;
+ priv->rxhdr = hdr_addr;
+
+ for (i = 0; i < RX_BUFFERS; i++) {
+ priv->rxbuffer[i] = first_free_addr;
+ am_writeword (dev, hdr_addr, first_free_addr);
+ am_writeword (dev, hdr_addr + 2, RMD_OWN);
+ am_writeword (dev, hdr_addr + 4, (-1600));
+ am_writeword (dev, hdr_addr + 6, 0);
+ first_free_addr += 1600;
+ hdr_addr += 8;
+ }
+ priv->txhead = 0;
+ priv->txtail = 0;
+ priv->txhdr = hdr_addr;
+ for (i = 0; i < TX_BUFFERS; i++) {
+ priv->txbuffer[i] = first_free_addr;
+ am_writeword (dev, hdr_addr, first_free_addr);
+ am_writeword (dev, hdr_addr + 2, TMD_STP|TMD_ENP);
+ am_writeword (dev, hdr_addr + 4, 0xf000);
+ am_writeword (dev, hdr_addr + 6, 0);
+ first_free_addr += 1600;
+ hdr_addr += 8;
+ }
+
+ write_rreg (dev->base_addr, BASERXL, priv->rxhdr);
+ write_rreg (dev->base_addr, BASERXH, 0);
+ write_rreg (dev->base_addr, BASETXL, priv->txhdr);
+ write_rreg (dev->base_addr, BASERXH, 0);
+ write_rreg (dev->base_addr, CSR0, CSR0_STOP);
+ write_rreg (dev->base_addr, CSR3, CSR3_IDONM|CSR3_BABLM|CSR3_DXSUFLO);
+ write_rreg (dev->base_addr, CSR4, CSR4_APAD_XMIT|CSR4_MFCOM|CSR4_RCVCCOM|CSR4_TXSTRTM|CSR4_JABM);
+ write_rreg (dev->base_addr, CSR0, CSR0_IENA|CSR0_STRT);
+}
+
+static void am79c961_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned int lnkstat, carrier;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->chip_lock, flags);
+ lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+ carrier = netif_carrier_ok(dev);
+
+ if (lnkstat && !carrier) {
+ netif_carrier_on(dev);
+ printk("%s: link up\n", dev->name);
+ } else if (!lnkstat && carrier) {
+ netif_carrier_off(dev);
+ printk("%s: link down\n", dev->name);
+ }
+
+ mod_timer(&priv->timer, jiffies + msecs_to_jiffies(500));
+}
+
+/*
+ * Open/initialize the board.
+ */
+static int
+am79c961_open(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev);
+ if (ret)
+ return ret;
+
+ am79c961_init_for_open(dev);
+
+ netif_carrier_off(dev);
+
+ priv->timer.expires = jiffies;
+ add_timer(&priv->timer);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/*
+ * The inverse routine to am79c961_open().
+ */
+static int
+am79c961_close(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ del_timer_sync(&priv->timer);
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ spin_lock_irqsave(&priv->chip_lock, flags);
+ write_rreg (dev->base_addr, CSR0, CSR0_STOP);
+ write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+
+ free_irq (dev->irq, dev);
+
+ return 0;
+}
+
+/*
+ * Set or clear promiscuous/multicast mode filter for this adapter.
+ */
+static void am79c961_setmulticastlist (struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
+ int i, stopped;
+
+ spin_lock_irqsave(&priv->chip_lock, flags);
+
+ stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP;
+
+ if (!stopped) {
+ /*
+ * Put the chip into suspend mode
+ */
+ write_rreg(dev->base_addr, CTRL1, CTRL1_SPND);
+
+ /*
+ * Spin waiting for chip to report suspend mode
+ */
+ while ((read_rreg(dev->base_addr, CTRL1) & CTRL1_SPND) == 0) {
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+ nop();
+ spin_lock_irqsave(&priv->chip_lock, flags);
+ }
+ }
+
+ /*
+ * Update the multicast hash table
+ */
+ for (i = 0; i < ARRAY_SIZE(multi_hash); i++)
+ write_rreg(dev->base_addr, i + LADRL, multi_hash[i]);
+
+ /*
+ * Write the mode register
+ */
+ write_rreg(dev->base_addr, MODE, mode);
+
+ if (!stopped) {
+ /*
+ * Put the chip back into running mode
+ */
+ write_rreg(dev->base_addr, CTRL1, 0);
+ }
+
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+}
+
+static void am79c961_timeout(struct net_device *dev)
+{
+ printk(KERN_WARNING "%s: transmit timed out, network cable problem?\n",
+ dev->name);
+
+ /*
+ * ought to do some setup of the tx side here
+ */
+
+ netif_wake_queue(dev);
+}
+
+/*
+ * Transmit a packet
+ */
+static int
+am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned int hdraddr, bufaddr;
+ unsigned int head;
+ unsigned long flags;
+
+ head = priv->txhead;
+ hdraddr = priv->txhdr + (head << 3);
+ bufaddr = priv->txbuffer[head];
+ head += 1;
+ if (head >= TX_BUFFERS)
+ head = 0;
+
+ am_writebuffer (dev, bufaddr, skb->data, skb->len);
+ am_writeword (dev, hdraddr + 4, -skb->len);
+ am_writeword (dev, hdraddr + 2, TMD_OWN|TMD_STP|TMD_ENP);
+ priv->txhead = head;
+
+ spin_lock_irqsave(&priv->chip_lock, flags);
+ write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA);
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+
+ /*
+ * If the next packet is owned by the ethernet device,
+ * then the tx ring is full and we can't add another
+ * packet.
+ */
+ if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN)
+ netif_stop_queue(dev);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * If we have a good packet(s), get it/them out of the buffers.
+ */
+static void
+am79c961_rx(struct net_device *dev, struct dev_priv *priv)
+{
+ do {
+ struct sk_buff *skb;
+ u_int hdraddr;
+ u_int pktaddr;
+ u_int status;
+ int len;
+
+ hdraddr = priv->rxhdr + (priv->rxtail << 3);
+ pktaddr = priv->rxbuffer[priv->rxtail];
+
+ status = am_readword (dev, hdraddr + 2);
+ if (status & RMD_OWN) /* do we own it? */
+ break;
+
+ priv->rxtail ++;
+ if (priv->rxtail >= RX_BUFFERS)
+ priv->rxtail = 0;
+
+ if ((status & (RMD_ERR|RMD_STP|RMD_ENP)) != (RMD_STP|RMD_ENP)) {
+ am_writeword (dev, hdraddr + 2, RMD_OWN);
+ dev->stats.rx_errors++;
+ if (status & RMD_ERR) {
+ if (status & RMD_FRAM)
+ dev->stats.rx_frame_errors++;
+ if (status & RMD_CRC)
+ dev->stats.rx_crc_errors++;
+ } else if (status & RMD_STP)
+ dev->stats.rx_length_errors++;
+ continue;
+ }
+
+ len = am_readword(dev, hdraddr + 6);
+ skb = dev_alloc_skb(len + 2);
+
+ if (skb) {
+ skb_reserve(skb, 2);
+
+ am_readbuffer(dev, pktaddr, skb_put(skb, len), len);
+ am_writeword(dev, hdraddr + 2, RMD_OWN);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+ } else {
+ am_writeword (dev, hdraddr + 2, RMD_OWN);
+ printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
+ dev->stats.rx_dropped++;
+ break;
+ }
+ } while (1);
+}
+
+/*
+ * Update stats for the transmitted packet
+ */
+static void
+am79c961_tx(struct net_device *dev, struct dev_priv *priv)
+{
+ do {
+ short len;
+ u_int hdraddr;
+ u_int status;
+
+ hdraddr = priv->txhdr + (priv->txtail << 3);
+ status = am_readword (dev, hdraddr + 2);
+ if (status & TMD_OWN)
+ break;
+
+ priv->txtail ++;
+ if (priv->txtail >= TX_BUFFERS)
+ priv->txtail = 0;
+
+ if (status & TMD_ERR) {
+ u_int status2;
+
+ dev->stats.tx_errors++;
+
+ status2 = am_readword (dev, hdraddr + 6);
+
+ /*
+ * Clear the error byte
+ */
+ am_writeword (dev, hdraddr + 6, 0);
+
+ if (status2 & TST_RTRY)
+ dev->stats.collisions += 16;
+ if (status2 & TST_LCOL)
+ dev->stats.tx_window_errors++;
+ if (status2 & TST_LCAR)
+ dev->stats.tx_carrier_errors++;
+ if (status2 & TST_UFLO)
+ dev->stats.tx_fifo_errors++;
+ continue;
+ }
+ dev->stats.tx_packets++;
+ len = am_readword (dev, hdraddr + 4);
+ dev->stats.tx_bytes += -len;
+ } while (priv->txtail != priv->txhead);
+
+ netif_wake_queue(dev);
+}
+
+static irqreturn_t
+am79c961_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct dev_priv *priv = netdev_priv(dev);
+ u_int status, n = 100;
+ int handled = 0;
+
+ do {
+ status = read_rreg(dev->base_addr, CSR0);
+ write_rreg(dev->base_addr, CSR0, status &
+ (CSR0_IENA|CSR0_TINT|CSR0_RINT|
+ CSR0_MERR|CSR0_MISS|CSR0_CERR|CSR0_BABL));
+
+ if (status & CSR0_RINT) {
+ handled = 1;
+ am79c961_rx(dev, priv);
+ }
+ if (status & CSR0_TINT) {
+ handled = 1;
+ am79c961_tx(dev, priv);
+ }
+ if (status & CSR0_MISS) {
+ handled = 1;
+ dev->stats.rx_dropped++;
+ }
+ if (status & CSR0_CERR) {
+ handled = 1;
+ mod_timer(&priv->timer, jiffies);
+ }
+ } while (--n && status & (CSR0_RINT | CSR0_TINT));
+
+ return IRQ_RETVAL(handled);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void am79c961_poll_controller(struct net_device *dev)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ am79c961_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+}
+#endif
+
+/*
+ * Initialise the chip. Note that we always expect
+ * to be entered with interrupts enabled.
+ */
+static int
+am79c961_hw_init(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ spin_lock_irq(&priv->chip_lock);
+ write_rreg (dev->base_addr, CSR0, CSR0_STOP);
+ write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
+ spin_unlock_irq(&priv->chip_lock);
+
+ am79c961_ramtest(dev, 0x66);
+ am79c961_ramtest(dev, 0x99);
+
+ return 0;
+}
+
+static void __init am79c961_banner(void)
+{
+ static unsigned version_printed;
+
+ if (net_debug && version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+}
+static const struct net_device_ops am79c961_netdev_ops = {
+ .ndo_open = am79c961_open,
+ .ndo_stop = am79c961_close,
+ .ndo_start_xmit = am79c961_sendpacket,
+ .ndo_set_rx_mode = am79c961_setmulticastlist,
+ .ndo_tx_timeout = am79c961_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = am79c961_poll_controller,
+#endif
+};
+
+static int __devinit am79c961_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct net_device *dev;
+ struct dev_priv *priv;
+ int i, ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res)
+ return -ENODEV;
+
+ dev = alloc_etherdev(sizeof(struct dev_priv));
+ ret = -ENOMEM;
+ if (!dev)
+ goto out;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ priv = netdev_priv(dev);
+
+ /*
+ * Fixed address and IRQ lines here.
+ * The PNP initialisation should have been
+ * done by the ether bootp loader.
+ */
+ dev->base_addr = res->start;
+ ret = platform_get_irq(pdev, 0);
+
+ if (ret < 0) {
+ ret = -ENODEV;
+ goto nodev;
+ }
+ dev->irq = ret;
+
+ ret = -ENODEV;
+ if (!request_region(dev->base_addr, 0x18, dev->name))
+ goto nodev;
+
+ /*
+ * Reset the device.
+ */
+ inb(dev->base_addr + NET_RESET);
+ udelay(5);
+
+ /*
+ * Check the manufacturer part of the
+ * ether address.
+ */
+ if (inb(dev->base_addr) != 0x08 ||
+ inb(dev->base_addr + 2) != 0x00 ||
+ inb(dev->base_addr + 4) != 0x2b)
+ goto release;
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff;
+
+ am79c961_banner();
+
+ spin_lock_init(&priv->chip_lock);
+ init_timer(&priv->timer);
+ priv->timer.data = (unsigned long)dev;
+ priv->timer.function = am79c961_timer;
+
+ if (am79c961_hw_init(dev))
+ goto release;
+
+ dev->netdev_ops = &am79c961_netdev_ops;
+
+ ret = register_netdev(dev);
+ if (ret == 0) {
+ printk(KERN_INFO "%s: ether address %pM\n",
+ dev->name, dev->dev_addr);
+ return 0;
+ }
+
+release:
+ release_region(dev->base_addr, 0x18);
+nodev:
+ free_netdev(dev);
+out:
+ return ret;
+}
+
+static struct platform_driver am79c961_driver = {
+ .probe = am79c961_probe,
+ .driver = {
+ .name = "am79c961",
+ },
+};
+
+static int __init am79c961_init(void)
+{
+ return platform_driver_register(&am79c961_driver);
+}
+
+__initcall(am79c961_init);
diff --git a/drivers/net/ethernet/amd/am79c961a.h b/drivers/net/ethernet/amd/am79c961a.h
new file mode 100644
index 000000000000..fd634d32756b
--- /dev/null
+++ b/drivers/net/ethernet/amd/am79c961a.h
@@ -0,0 +1,145 @@
+/*
+ * linux/drivers/net/arm/am79c961a.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_am79c961a_H
+#define _LINUX_am79c961a_H
+
+/* use 0 for production, 1 for verification, >2 for debug. debug flags: */
+#define DEBUG_TX 2
+#define DEBUG_RX 4
+#define DEBUG_INT 8
+#define DEBUG_IC 16
+#ifndef NET_DEBUG
+#define NET_DEBUG 0
+#endif
+
+#define NET_UID 0
+#define NET_RDP 0x10
+#define NET_RAP 0x12
+#define NET_RESET 0x14
+#define NET_IDP 0x16
+
+/*
+ * RAP registers
+ */
+#define CSR0 0
+#define CSR0_INIT 0x0001
+#define CSR0_STRT 0x0002
+#define CSR0_STOP 0x0004
+#define CSR0_TDMD 0x0008
+#define CSR0_TXON 0x0010
+#define CSR0_RXON 0x0020
+#define CSR0_IENA 0x0040
+#define CSR0_INTR 0x0080
+#define CSR0_IDON 0x0100
+#define CSR0_TINT 0x0200
+#define CSR0_RINT 0x0400
+#define CSR0_MERR 0x0800
+#define CSR0_MISS 0x1000
+#define CSR0_CERR 0x2000
+#define CSR0_BABL 0x4000
+#define CSR0_ERR 0x8000
+
+#define CSR3 3
+#define CSR3_EMBA 0x0008
+#define CSR3_DXMT2PD 0x0010
+#define CSR3_LAPPEN 0x0020
+#define CSR3_DXSUFLO 0x0040
+#define CSR3_IDONM 0x0100
+#define CSR3_TINTM 0x0200
+#define CSR3_RINTM 0x0400
+#define CSR3_MERRM 0x0800
+#define CSR3_MISSM 0x1000
+#define CSR3_BABLM 0x4000
+#define CSR3_MASKALL 0x5F00
+
+#define CSR4 4
+#define CSR4_JABM 0x0001
+#define CSR4_JAB 0x0002
+#define CSR4_TXSTRTM 0x0004
+#define CSR4_TXSTRT 0x0008
+#define CSR4_RCVCCOM 0x0010
+#define CSR4_RCVCCO 0x0020
+#define CSR4_MFCOM 0x0100
+#define CSR4_MFCO 0x0200
+#define CSR4_ASTRP_RCV 0x0400
+#define CSR4_APAD_XMIT 0x0800
+
+#define CTRL1 5
+#define CTRL1_SPND 0x0001
+
+#define LADRL 8
+#define LADRM1 9
+#define LADRM2 10
+#define LADRH 11
+#define PADRL 12
+#define PADRM 13
+#define PADRH 14
+
+#define MODE 15
+#define MODE_DISRX 0x0001
+#define MODE_DISTX 0x0002
+#define MODE_LOOP 0x0004
+#define MODE_DTCRC 0x0008
+#define MODE_COLL 0x0010
+#define MODE_DRETRY 0x0020
+#define MODE_INTLOOP 0x0040
+#define MODE_PORT_AUI 0x0000
+#define MODE_PORT_10BT 0x0080
+#define MODE_DRXPA 0x2000
+#define MODE_DRXBA 0x4000
+#define MODE_PROMISC 0x8000
+
+#define BASERXL 24
+#define BASERXH 25
+#define BASETXL 30
+#define BASETXH 31
+
+#define POLLINT 47
+
+#define SIZERXR 76
+#define SIZETXR 78
+
+#define CSR_MFC 112
+
+#define RMD_ENP 0x0100
+#define RMD_STP 0x0200
+#define RMD_CRC 0x0800
+#define RMD_FRAM 0x2000
+#define RMD_ERR 0x4000
+#define RMD_OWN 0x8000
+
+#define TMD_ENP 0x0100
+#define TMD_STP 0x0200
+#define TMD_MORE 0x1000
+#define TMD_ERR 0x4000
+#define TMD_OWN 0x8000
+
+#define TST_RTRY 0x0400
+#define TST_LCAR 0x0800
+#define TST_LCOL 0x1000
+#define TST_UFLO 0x4000
+#define TST_BUFF 0x8000
+
+#define ISALED0 0x0004
+#define ISALED0_LNKST 0x8000
+
+struct dev_priv {
+ unsigned long rxbuffer[RX_BUFFERS];
+ unsigned long txbuffer[TX_BUFFERS];
+ unsigned char txhead;
+ unsigned char txtail;
+ unsigned char rxhead;
+ unsigned char rxtail;
+ unsigned long rxhdr;
+ unsigned long txhdr;
+ spinlock_t chip_lock;
+ struct timer_list timer;
+};
+
+#endif
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
new file mode 100644
index 000000000000..a9745f4ddbfe
--- /dev/null
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -0,0 +1,1992 @@
+
+/* Advanced Micro Devices Inc. AMD8111E Linux Network Driver
+ * Copyright (C) 2004 Advanced Micro Devices
+ *
+ *
+ * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
+ * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
+ * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
+ * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency.[ pcnet32.c ]
+ * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+
+Module Name:
+
+ amd8111e.c
+
+Abstract:
+
+ AMD8111 based 10/100 Ethernet Controller Driver.
+
+Environment:
+
+ Kernel Mode
+
+Revision History:
+ 3.0.0
+ Initial Revision.
+ 3.0.1
+ 1. Dynamic interrupt coalescing.
+ 2. Removed prev_stats.
+ 3. MII support.
+ 4. Dynamic IPG support
+ 3.0.2 05/29/2003
+ 1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
+ 2. Bug fix: Fixed VLAN support failure.
+ 3. Bug fix: Fixed receive interrupt coalescing bug.
+ 4. Dynamic IPG support is disabled by default.
+ 3.0.3 06/05/2003
+ 1. Bug fix: Fixed failure to close the interface if SMP is enabled.
+ 3.0.4 12/09/2003
+ 1. Added set_mac_address routine for bonding driver support.
+ 2. Tested the driver for bonding support
+ 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
+ indicated to the h/w.
+ 4. Modified amd8111e_rx() routine to receive all the received packets
+ in the first interrupt.
+ 5. Bug fix: Corrected rx_errors reported in get_stats() function.
+ 3.0.5 03/22/2004
+ 1. Added NAPI support
+
+*/
+
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/ctype.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define AMD8111E_VLAN_TAG_USED 1
+#else
+#define AMD8111E_VLAN_TAG_USED 0
+#endif
+
+#include "amd8111e.h"
+#define MODULE_NAME "amd8111e"
+#define MODULE_VERS "3.0.7"
+MODULE_AUTHOR("Advanced Micro Devices, Inc.");
+MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "MODULE_VERS);
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
+module_param_array(speed_duplex, int, NULL, 0);
+MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
+module_param_array(coalesce, bool, NULL, 0);
+MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
+module_param_array(dynamic_ipg, bool, NULL, 0);
+MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
+
+static DEFINE_PCI_DEVICE_TABLE(amd8111e_pci_tbl) = {
+
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { 0, }
+
+};
+/*
+This function will read the PHY registers.
+*/
+static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
+{
+ void __iomem *mmio = lp->mmio;
+ unsigned int reg_val;
+ unsigned int repeat= REPEAT_CNT;
+
+ reg_val = readl(mmio + PHY_ACCESS);
+ while (reg_val & PHY_CMD_ACTIVE)
+ reg_val = readl( mmio + PHY_ACCESS );
+
+ writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
+ ((reg & 0x1f) << 16), mmio +PHY_ACCESS);
+ do{
+ reg_val = readl(mmio + PHY_ACCESS);
+ udelay(30); /* It takes 30 us to read/write data */
+ } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
+ if(reg_val & PHY_RD_ERR)
+ goto err_phy_read;
+
+ *val = reg_val & 0xffff;
+ return 0;
+err_phy_read:
+ *val = 0;
+ return -EINVAL;
+
+}
+
+/*
+This function will write into PHY registers.
+*/
+static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
+{
+ unsigned int repeat = REPEAT_CNT;
+ void __iomem *mmio = lp->mmio;
+ unsigned int reg_val;
+
+ reg_val = readl(mmio + PHY_ACCESS);
+ while (reg_val & PHY_CMD_ACTIVE)
+ reg_val = readl( mmio + PHY_ACCESS );
+
+ writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
+ ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
+
+ do{
+ reg_val = readl(mmio + PHY_ACCESS);
+ udelay(30); /* It takes 30 us to read/write the data */
+ } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
+
+ if(reg_val & PHY_RD_ERR)
+ goto err_phy_write;
+
+ return 0;
+
+err_phy_write:
+ return -EINVAL;
+
+}
+/*
+This is the mii register read function provided to the mii interface.
+*/
+static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
+{
+ struct amd8111e_priv* lp = netdev_priv(dev);
+ unsigned int reg_val;
+
+ amd8111e_read_phy(lp,phy_id,reg_num,&reg_val);
+ return reg_val;
+
+}
+
+/*
+This is the mii register write function provided to the mii interface.
+*/
+static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val)
+{
+ struct amd8111e_priv* lp = netdev_priv(dev);
+
+ amd8111e_write_phy(lp, phy_id, reg_num, val);
+}
+
+/*
+This function will set PHY speed. During initialization sets the original speed to 100 full.
+*/
+static void amd8111e_set_ext_phy(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ u32 bmcr,advert,tmp;
+
+ /* Determine mii register values to set the speed */
+ advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
+ tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ switch (lp->ext_phy_option){
+
+ default:
+ case SPEED_AUTONEG: /* advertise all values */
+ tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL|
+ ADVERTISE_100HALF|ADVERTISE_100FULL) ;
+ break;
+ case SPEED10_HALF:
+ tmp |= ADVERTISE_10HALF;
+ break;
+ case SPEED10_FULL:
+ tmp |= ADVERTISE_10FULL;
+ break;
+ case SPEED100_HALF:
+ tmp |= ADVERTISE_100HALF;
+ break;
+ case SPEED100_FULL:
+ tmp |= ADVERTISE_100FULL;
+ break;
+ }
+
+ if(advert != tmp)
+ amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp);
+ /* Restart auto negotiation */
+ bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr);
+
+}
+
+/*
+This function will unmap skb->data space and will free
+all transmit and receive skbuffs.
+*/
+static int amd8111e_free_skbs(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ struct sk_buff* rx_skbuff;
+ int i;
+
+ /* Freeing transmit skbs */
+ for(i = 0; i < NUM_TX_BUFFERS; i++){
+ if(lp->tx_skbuff[i]){
+ pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i], lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE);
+ dev_kfree_skb (lp->tx_skbuff[i]);
+ lp->tx_skbuff[i] = NULL;
+ lp->tx_dma_addr[i] = 0;
+ }
+ }
+ /* Freeing previously allocated receive buffers */
+ for (i = 0; i < NUM_RX_BUFFERS; i++){
+ rx_skbuff = lp->rx_skbuff[i];
+ if(rx_skbuff != NULL){
+ pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i],
+ lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(lp->rx_skbuff[i]);
+ lp->rx_skbuff[i] = NULL;
+ lp->rx_dma_addr[i] = 0;
+ }
+ }
+
+ return 0;
+}
+
+/*
+This will set the receive buffer length corresponding to the mtu size of networkinterface.
+*/
+static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
+{
+ struct amd8111e_priv* lp = netdev_priv(dev);
+ unsigned int mtu = dev->mtu;
+
+ if (mtu > ETH_DATA_LEN){
+ /* MTU + ethernet header + FCS
+ + optional VLAN tag + skb reserve space 2 */
+
+ lp->rx_buff_len = mtu + ETH_HLEN + 10;
+ lp->options |= OPTION_JUMBO_ENABLE;
+ } else{
+ lp->rx_buff_len = PKT_BUFF_SZ;
+ lp->options &= ~OPTION_JUMBO_ENABLE;
+ }
+}
+
+/*
+This function will free all the previously allocated buffers, determine new receive buffer length and will allocate new receive buffers. This function also allocates and initializes both the transmitter and receive hardware descriptors.
+ */
+static int amd8111e_init_ring(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int i;
+
+ lp->rx_idx = lp->tx_idx = 0;
+ lp->tx_complete_idx = 0;
+ lp->tx_ring_idx = 0;
+
+
+ if(lp->opened)
+ /* Free previously allocated transmit and receive skbs */
+ amd8111e_free_skbs(dev);
+
+ else{
+ /* allocate the tx and rx descriptors */
+ if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
+ sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
+ &lp->tx_ring_dma_addr)) == NULL)
+
+ goto err_no_mem;
+
+ if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
+ sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
+ &lp->rx_ring_dma_addr)) == NULL)
+
+ goto err_free_tx_ring;
+
+ }
+ /* Set new receive buff size */
+ amd8111e_set_rx_buff_len(dev);
+
+ /* Allocating receive skbs */
+ for (i = 0; i < NUM_RX_BUFFERS; i++) {
+
+ if (!(lp->rx_skbuff[i] = dev_alloc_skb(lp->rx_buff_len))) {
+ /* Release previos allocated skbs */
+ for(--i; i >= 0 ;i--)
+ dev_kfree_skb(lp->rx_skbuff[i]);
+ goto err_free_rx_ring;
+ }
+ skb_reserve(lp->rx_skbuff[i],2);
+ }
+ /* Initilaizing receive descriptors */
+ for (i = 0; i < NUM_RX_BUFFERS; i++) {
+ lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev,
+ lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
+
+ lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
+ lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
+ wmb();
+ lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
+ }
+
+ /* Initializing transmit descriptors */
+ for (i = 0; i < NUM_TX_RING_DR; i++) {
+ lp->tx_ring[i].buff_phy_addr = 0;
+ lp->tx_ring[i].tx_flags = 0;
+ lp->tx_ring[i].buff_count = 0;
+ }
+
+ return 0;
+
+err_free_rx_ring:
+
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring,
+ lp->rx_ring_dma_addr);
+
+err_free_tx_ring:
+
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring,
+ lp->tx_ring_dma_addr);
+
+err_no_mem:
+ return -ENOMEM;
+}
+/* This function will set the interrupt coalescing according to the input arguments */
+static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
+{
+ unsigned int timeout;
+ unsigned int event_count;
+
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ void __iomem *mmio = lp->mmio;
+ struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
+
+
+ switch(cmod)
+ {
+ case RX_INTR_COAL :
+ timeout = coal_conf->rx_timeout;
+ event_count = coal_conf->rx_event_count;
+ if( timeout > MAX_TIMEOUT ||
+ event_count > MAX_EVENT_COUNT )
+ return -EINVAL;
+
+ timeout = timeout * DELAY_TIMER_CONV;
+ writel(VAL0|STINTEN, mmio+INTEN0);
+ writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout,
+ mmio+DLY_INT_A);
+ break;
+
+ case TX_INTR_COAL :
+ timeout = coal_conf->tx_timeout;
+ event_count = coal_conf->tx_event_count;
+ if( timeout > MAX_TIMEOUT ||
+ event_count > MAX_EVENT_COUNT )
+ return -EINVAL;
+
+
+ timeout = timeout * DELAY_TIMER_CONV;
+ writel(VAL0|STINTEN,mmio+INTEN0);
+ writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout,
+ mmio+DLY_INT_B);
+ break;
+
+ case DISABLE_COAL:
+ writel(0,mmio+STVAL);
+ writel(STINTEN, mmio+INTEN0);
+ writel(0, mmio +DLY_INT_B);
+ writel(0, mmio+DLY_INT_A);
+ break;
+ case ENABLE_COAL:
+ /* Start the timer */
+ writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /* 0.5 sec */
+ writel(VAL0|STINTEN, mmio+INTEN0);
+ break;
+ default:
+ break;
+
+ }
+ return 0;
+
+}
+
+/*
+This function initializes the device registers and starts the device.
+*/
+static int amd8111e_restart(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ void __iomem *mmio = lp->mmio;
+ int i,reg_val;
+
+ /* stop the chip */
+ writel(RUN, mmio + CMD0);
+
+ if(amd8111e_init_ring(dev))
+ return -ENOMEM;
+
+ /* enable the port manager and set auto negotiation always */
+ writel((u32) VAL1|EN_PMGR, mmio + CMD3 );
+ writel((u32)XPHYANE|XPHYRST , mmio + CTRL2);
+
+ amd8111e_set_ext_phy(dev);
+
+ /* set control registers */
+ reg_val = readl(mmio + CTRL1);
+ reg_val &= ~XMTSP_MASK;
+ writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 );
+
+ /* enable interrupt */
+ writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
+ APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
+ SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
+
+ writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
+
+ /* initialize tx and rx ring base addresses */
+ writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
+ writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
+
+ writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
+ writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
+
+ /* set default IPG to 96 */
+ writew((u32)DEFAULT_IPG,mmio+IPG);
+ writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
+
+ if(lp->options & OPTION_JUMBO_ENABLE){
+ writel((u32)VAL2|JUMBO, mmio + CMD3);
+ /* Reset REX_UFLO */
+ writel( REX_UFLO, mmio + CMD2);
+ /* Should not set REX_UFLO for jumbo frames */
+ writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2);
+ }else{
+ writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
+ writel((u32)JUMBO, mmio + CMD3);
+ }
+
+#if AMD8111E_VLAN_TAG_USED
+ writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
+#endif
+ writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
+
+ /* Setting the MAC address to the device */
+ for(i = 0; i < ETH_ADDR_LEN; i++)
+ writeb( dev->dev_addr[i], mmio + PADR + i );
+
+ /* Enable interrupt coalesce */
+ if(lp->options & OPTION_INTR_COAL_ENABLE){
+ printk(KERN_INFO "%s: Interrupt Coalescing Enabled.\n",
+ dev->name);
+ amd8111e_set_coalesce(dev,ENABLE_COAL);
+ }
+
+ /* set RUN bit to start the chip */
+ writel(VAL2 | RDMD0, mmio + CMD0);
+ writel(VAL0 | INTREN | RUN, mmio + CMD0);
+
+ /* To avoid PCI posting bug */
+ readl(mmio+CMD0);
+ return 0;
+}
+/*
+This function clears necessary the device registers.
+*/
+static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
+{
+ unsigned int reg_val;
+ unsigned int logic_filter[2] ={0,};
+ void __iomem *mmio = lp->mmio;
+
+
+ /* stop the chip */
+ writel(RUN, mmio + CMD0);
+
+ /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
+ writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0);
+
+ /* Clear RCV_RING_BASE_ADDR */
+ writel(0, mmio + RCV_RING_BASE_ADDR0);
+
+ /* Clear XMT_RING_BASE_ADDR */
+ writel(0, mmio + XMT_RING_BASE_ADDR0);
+ writel(0, mmio + XMT_RING_BASE_ADDR1);
+ writel(0, mmio + XMT_RING_BASE_ADDR2);
+ writel(0, mmio + XMT_RING_BASE_ADDR3);
+
+ /* Clear CMD0 */
+ writel(CMD0_CLEAR,mmio + CMD0);
+
+ /* Clear CMD2 */
+ writel(CMD2_CLEAR, mmio +CMD2);
+
+ /* Clear CMD7 */
+ writel(CMD7_CLEAR , mmio + CMD7);
+
+ /* Clear DLY_INT_A and DLY_INT_B */
+ writel(0x0, mmio + DLY_INT_A);
+ writel(0x0, mmio + DLY_INT_B);
+
+ /* Clear FLOW_CONTROL */
+ writel(0x0, mmio + FLOW_CONTROL);
+
+ /* Clear INT0 write 1 to clear register */
+ reg_val = readl(mmio + INT0);
+ writel(reg_val, mmio + INT0);
+
+ /* Clear STVAL */
+ writel(0x0, mmio + STVAL);
+
+ /* Clear INTEN0 */
+ writel( INTEN0_CLEAR, mmio + INTEN0);
+
+ /* Clear LADRF */
+ writel(0x0 , mmio + LADRF);
+
+ /* Set SRAM_SIZE & SRAM_BOUNDARY registers */
+ writel( 0x80010,mmio + SRAM_SIZE);
+
+ /* Clear RCV_RING0_LEN */
+ writel(0x0, mmio + RCV_RING_LEN0);
+
+ /* Clear XMT_RING0/1/2/3_LEN */
+ writel(0x0, mmio + XMT_RING_LEN0);
+ writel(0x0, mmio + XMT_RING_LEN1);
+ writel(0x0, mmio + XMT_RING_LEN2);
+ writel(0x0, mmio + XMT_RING_LEN3);
+
+ /* Clear XMT_RING_LIMIT */
+ writel(0x0, mmio + XMT_RING_LIMIT);
+
+ /* Clear MIB */
+ writew(MIB_CLEAR, mmio + MIB_ADDR);
+
+ /* Clear LARF */
+ amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF);
+
+ /* SRAM_SIZE register */
+ reg_val = readl(mmio + SRAM_SIZE);
+
+ if(lp->options & OPTION_JUMBO_ENABLE)
+ writel( VAL2|JUMBO, mmio + CMD3);
+#if AMD8111E_VLAN_TAG_USED
+ writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
+#endif
+ /* Set default value to CTRL1 Register */
+ writel(CTRL1_DEFAULT, mmio + CTRL1);
+
+ /* To avoid PCI posting bug */
+ readl(mmio + CMD2);
+
+}
+
+/*
+This function disables the interrupt and clears all the pending
+interrupts in INT0
+ */
+static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
+{
+ u32 intr0;
+
+ /* Disable interrupt */
+ writel(INTREN, lp->mmio + CMD0);
+
+ /* Clear INT0 */
+ intr0 = readl(lp->mmio + INT0);
+ writel(intr0, lp->mmio + INT0);
+
+ /* To avoid PCI posting bug */
+ readl(lp->mmio + INT0);
+
+}
+
+/*
+This function stops the chip.
+*/
+static void amd8111e_stop_chip(struct amd8111e_priv* lp)
+{
+ writel(RUN, lp->mmio + CMD0);
+
+ /* To avoid PCI posting bug */
+ readl(lp->mmio + CMD0);
+}
+
+/*
+This function frees the transmiter and receiver descriptor rings.
+*/
+static void amd8111e_free_ring(struct amd8111e_priv* lp)
+{
+ /* Free transmit and receive descriptor rings */
+ if(lp->rx_ring){
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
+ lp->rx_ring, lp->rx_ring_dma_addr);
+ lp->rx_ring = NULL;
+ }
+
+ if(lp->tx_ring){
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
+ lp->tx_ring, lp->tx_ring_dma_addr);
+
+ lp->tx_ring = NULL;
+ }
+
+}
+
+/*
+This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb.
+*/
+static int amd8111e_tx(struct net_device *dev)
+{
+ struct amd8111e_priv* lp = netdev_priv(dev);
+ int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
+ int status;
+ /* Complete all the transmit packet */
+ while (lp->tx_complete_idx != lp->tx_idx){
+ tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
+ status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
+
+ if(status & OWN_BIT)
+ break; /* It still hasn't been Txed */
+
+ lp->tx_ring[tx_index].buff_phy_addr = 0;
+
+ /* We must free the original skb */
+ if (lp->tx_skbuff[tx_index]) {
+ pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
+ lp->tx_skbuff[tx_index]->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
+ lp->tx_skbuff[tx_index] = NULL;
+ lp->tx_dma_addr[tx_index] = 0;
+ }
+ lp->tx_complete_idx++;
+ /*COAL update tx coalescing parameters */
+ lp->coal_conf.tx_packets++;
+ lp->coal_conf.tx_bytes +=
+ le16_to_cpu(lp->tx_ring[tx_index].buff_count);
+
+ if (netif_queue_stopped(dev) &&
+ lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
+ /* The ring is no longer full, clear tbusy. */
+ /* lp->tx_full = 0; */
+ netif_wake_queue (dev);
+ }
+ }
+ return 0;
+}
+
+/* This function handles the driver receive operation in polling mode */
+static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi);
+ struct net_device *dev = lp->amd8111e_net_dev;
+ int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
+ void __iomem *mmio = lp->mmio;
+ struct sk_buff *skb,*new_skb;
+ int min_pkt_len, status;
+ unsigned int intr0;
+ int num_rx_pkt = 0;
+ short pkt_len;
+#if AMD8111E_VLAN_TAG_USED
+ short vtag;
+#endif
+ int rx_pkt_limit = budget;
+ unsigned long flags;
+
+ do{
+ /* process receive packets until we use the quota*/
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while(1) {
+ status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
+ if (status & OWN_BIT)
+ break;
+
+ /*
+ * There is a tricky error noted by John Murphy,
+ * <murf@perftech.com> to Russ Nelson: Even with
+ * full-sized * buffers it's possible for a
+ * jabber packet to use two buffers, with only
+ * the last correctly noting the error.
+ */
+
+ if(status & ERR_BIT) {
+ /* reseting flags */
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ goto err_next_pkt;
+ }
+ /* check for STP and ENP */
+ if(!((status & STP_BIT) && (status & ENP_BIT))){
+ /* reseting flags */
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ goto err_next_pkt;
+ }
+ pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
+
+#if AMD8111E_VLAN_TAG_USED
+ vtag = status & TT_MASK;
+ /*MAC will strip vlan tag*/
+ if (vtag != 0)
+ min_pkt_len =MIN_PKT_LEN - 4;
+ else
+#endif
+ min_pkt_len =MIN_PKT_LEN;
+
+ if (pkt_len < min_pkt_len) {
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ lp->drv_rx_errors++;
+ goto err_next_pkt;
+ }
+ if(--rx_pkt_limit < 0)
+ goto rx_not_empty;
+ if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){
+ /* if allocation fail,
+ ignore that pkt and go to next one */
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ lp->drv_rx_errors++;
+ goto err_next_pkt;
+ }
+
+ skb_reserve(new_skb, 2);
+ skb = lp->rx_skbuff[rx_index];
+ pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
+ lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
+ skb_put(skb, pkt_len);
+ lp->rx_skbuff[rx_index] = new_skb;
+ lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
+ new_skb->data,
+ lp->rx_buff_len-2,
+ PCI_DMA_FROMDEVICE);
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+#if AMD8111E_VLAN_TAG_USED
+ if (vtag == TT_VLAN_TAGGED){
+ u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
+ __vlan_hwaccel_put_tag(skb, vlan_tag);
+ }
+#endif
+ netif_receive_skb(skb);
+ /*COAL update rx coalescing parameters*/
+ lp->coal_conf.rx_packets++;
+ lp->coal_conf.rx_bytes += pkt_len;
+ num_rx_pkt++;
+
+ err_next_pkt:
+ lp->rx_ring[rx_index].buff_phy_addr
+ = cpu_to_le32(lp->rx_dma_addr[rx_index]);
+ lp->rx_ring[rx_index].buff_count =
+ cpu_to_le16(lp->rx_buff_len-2);
+ wmb();
+ lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
+ rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
+ }
+ /* Check the interrupt status register for more packets in the
+ mean time. Process them since we have not used up our quota.*/
+
+ intr0 = readl(mmio + INT0);
+ /*Ack receive packets */
+ writel(intr0 & RINT0,mmio + INT0);
+
+ } while(intr0 & RINT0);
+
+ if (rx_pkt_limit > 0) {
+ /* Receive descriptor is empty now */
+ spin_lock_irqsave(&lp->lock, flags);
+ __napi_complete(napi);
+ writel(VAL0|RINTEN0, mmio + INTEN0);
+ writel(VAL2 | RDMD0, mmio + CMD0);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+
+rx_not_empty:
+ return num_rx_pkt;
+}
+
+/*
+This function will indicate the link status to the kernel.
+*/
+static int amd8111e_link_change(struct net_device* dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int status0,speed;
+
+ /* read the link change */
+ status0 = readl(lp->mmio + STAT0);
+
+ if(status0 & LINK_STATS){
+ if(status0 & AUTONEG_COMPLETE)
+ lp->link_config.autoneg = AUTONEG_ENABLE;
+ else
+ lp->link_config.autoneg = AUTONEG_DISABLE;
+
+ if(status0 & FULL_DPLX)
+ lp->link_config.duplex = DUPLEX_FULL;
+ else
+ lp->link_config.duplex = DUPLEX_HALF;
+ speed = (status0 & SPEED_MASK) >> 7;
+ if(speed == PHY_SPEED_10)
+ lp->link_config.speed = SPEED_10;
+ else if(speed == PHY_SPEED_100)
+ lp->link_config.speed = SPEED_100;
+
+ printk(KERN_INFO "%s: Link is Up. Speed is %s Mbps %s Duplex\n", dev->name,
+ (lp->link_config.speed == SPEED_100) ? "100": "10",
+ (lp->link_config.duplex == DUPLEX_FULL)? "Full": "Half");
+ netif_carrier_on(dev);
+ }
+ else{
+ lp->link_config.speed = SPEED_INVALID;
+ lp->link_config.duplex = DUPLEX_INVALID;
+ lp->link_config.autoneg = AUTONEG_INVALID;
+ printk(KERN_INFO "%s: Link is Down.\n",dev->name);
+ netif_carrier_off(dev);
+ }
+
+ return 0;
+}
+/*
+This function reads the mib counters.
+*/
+static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
+{
+ unsigned int status;
+ unsigned int data;
+ unsigned int repeat = REPEAT_CNT;
+
+ writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
+ do {
+ status = readw(mmio + MIB_ADDR);
+ udelay(2); /* controller takes MAX 2 us to get mib data */
+ }
+ while (--repeat && (status & MIB_CMD_ACTIVE));
+
+ data = readl(mmio + MIB_DATA);
+ return data;
+}
+
+/*
+ * This function reads the mib registers and returns the hardware statistics.
+ * It updates previous internal driver statistics with new values.
+ */
+static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ void __iomem *mmio = lp->mmio;
+ unsigned long flags;
+ struct net_device_stats *new_stats = &dev->stats;
+
+ if (!lp->opened)
+ return new_stats;
+ spin_lock_irqsave (&lp->lock, flags);
+
+ /* stats.rx_packets */
+ new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
+ amd8111e_read_mib(mmio, rcv_multicast_pkts)+
+ amd8111e_read_mib(mmio, rcv_unicast_pkts);
+
+ /* stats.tx_packets */
+ new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
+
+ /*stats.rx_bytes */
+ new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
+
+ /* stats.tx_bytes */
+ new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
+
+ /* stats.rx_errors */
+ /* hw errors + errors driver reported */
+ new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
+ amd8111e_read_mib(mmio, rcv_fragments)+
+ amd8111e_read_mib(mmio, rcv_jabbers)+
+ amd8111e_read_mib(mmio, rcv_alignment_errors)+
+ amd8111e_read_mib(mmio, rcv_fcs_errors)+
+ amd8111e_read_mib(mmio, rcv_miss_pkts)+
+ lp->drv_rx_errors;
+
+ /* stats.tx_errors */
+ new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
+
+ /* stats.rx_dropped*/
+ new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
+
+ /* stats.tx_dropped*/
+ new_stats->tx_dropped = amd8111e_read_mib(mmio, xmt_underrun_pkts);
+
+ /* stats.multicast*/
+ new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
+
+ /* stats.collisions*/
+ new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
+
+ /* stats.rx_length_errors*/
+ new_stats->rx_length_errors =
+ amd8111e_read_mib(mmio, rcv_undersize_pkts)+
+ amd8111e_read_mib(mmio, rcv_oversize_pkts);
+
+ /* stats.rx_over_errors*/
+ new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
+
+ /* stats.rx_crc_errors*/
+ new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
+
+ /* stats.rx_frame_errors*/
+ new_stats->rx_frame_errors =
+ amd8111e_read_mib(mmio, rcv_alignment_errors);
+
+ /* stats.rx_fifo_errors */
+ new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
+
+ /* stats.rx_missed_errors */
+ new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
+
+ /* stats.tx_aborted_errors*/
+ new_stats->tx_aborted_errors =
+ amd8111e_read_mib(mmio, xmt_excessive_collision);
+
+ /* stats.tx_carrier_errors*/
+ new_stats->tx_carrier_errors =
+ amd8111e_read_mib(mmio, xmt_loss_carrier);
+
+ /* stats.tx_fifo_errors*/
+ new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
+
+ /* stats.tx_window_errors*/
+ new_stats->tx_window_errors =
+ amd8111e_read_mib(mmio, xmt_late_collision);
+
+ /* Reset the mibs for collecting new statistics */
+ /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
+
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ return new_stats;
+}
+/* This function recalculate the interrupt coalescing mode on every interrupt
+according to the datarate and the packet rate.
+*/
+static int amd8111e_calc_coalesce(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
+ int tx_pkt_rate;
+ int rx_pkt_rate;
+ int tx_data_rate;
+ int rx_data_rate;
+ int rx_pkt_size;
+ int tx_pkt_size;
+
+ tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
+ coal_conf->tx_prev_packets = coal_conf->tx_packets;
+
+ tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
+ coal_conf->tx_prev_bytes = coal_conf->tx_bytes;
+
+ rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
+ coal_conf->rx_prev_packets = coal_conf->rx_packets;
+
+ rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
+ coal_conf->rx_prev_bytes = coal_conf->rx_bytes;
+
+ if(rx_pkt_rate < 800){
+ if(coal_conf->rx_coal_type != NO_COALESCE){
+
+ coal_conf->rx_timeout = 0x0;
+ coal_conf->rx_event_count = 0;
+ amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ coal_conf->rx_coal_type = NO_COALESCE;
+ }
+ }
+ else{
+
+ rx_pkt_size = rx_data_rate/rx_pkt_rate;
+ if (rx_pkt_size < 128){
+ if(coal_conf->rx_coal_type != NO_COALESCE){
+
+ coal_conf->rx_timeout = 0;
+ coal_conf->rx_event_count = 0;
+ amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ coal_conf->rx_coal_type = NO_COALESCE;
+ }
+
+ }
+ else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
+
+ if(coal_conf->rx_coal_type != LOW_COALESCE){
+ coal_conf->rx_timeout = 1;
+ coal_conf->rx_event_count = 4;
+ amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ coal_conf->rx_coal_type = LOW_COALESCE;
+ }
+ }
+ else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
+
+ if(coal_conf->rx_coal_type != MEDIUM_COALESCE){
+ coal_conf->rx_timeout = 1;
+ coal_conf->rx_event_count = 4;
+ amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ coal_conf->rx_coal_type = MEDIUM_COALESCE;
+ }
+
+ }
+ else if(rx_pkt_size >= 1024){
+ if(coal_conf->rx_coal_type != HIGH_COALESCE){
+ coal_conf->rx_timeout = 2;
+ coal_conf->rx_event_count = 3;
+ amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ coal_conf->rx_coal_type = HIGH_COALESCE;
+ }
+ }
+ }
+ /* NOW FOR TX INTR COALESC */
+ if(tx_pkt_rate < 800){
+ if(coal_conf->tx_coal_type != NO_COALESCE){
+
+ coal_conf->tx_timeout = 0x0;
+ coal_conf->tx_event_count = 0;
+ amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ coal_conf->tx_coal_type = NO_COALESCE;
+ }
+ }
+ else{
+
+ tx_pkt_size = tx_data_rate/tx_pkt_rate;
+ if (tx_pkt_size < 128){
+
+ if(coal_conf->tx_coal_type != NO_COALESCE){
+
+ coal_conf->tx_timeout = 0;
+ coal_conf->tx_event_count = 0;
+ amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ coal_conf->tx_coal_type = NO_COALESCE;
+ }
+
+ }
+ else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
+
+ if(coal_conf->tx_coal_type != LOW_COALESCE){
+ coal_conf->tx_timeout = 1;
+ coal_conf->tx_event_count = 2;
+ amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ coal_conf->tx_coal_type = LOW_COALESCE;
+
+ }
+ }
+ else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
+
+ if(coal_conf->tx_coal_type != MEDIUM_COALESCE){
+ coal_conf->tx_timeout = 2;
+ coal_conf->tx_event_count = 5;
+ amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ coal_conf->tx_coal_type = MEDIUM_COALESCE;
+ }
+
+ }
+ else if(tx_pkt_size >= 1024){
+ if (tx_pkt_size >= 1024){
+ if(coal_conf->tx_coal_type != HIGH_COALESCE){
+ coal_conf->tx_timeout = 4;
+ coal_conf->tx_event_count = 8;
+ amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ coal_conf->tx_coal_type = HIGH_COALESCE;
+ }
+ }
+ }
+ }
+ return 0;
+
+}
+/*
+This is device interrupt function. It handles transmit, receive,link change and hardware timer interrupts.
+*/
+static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
+{
+
+ struct net_device * dev = (struct net_device *) dev_id;
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ void __iomem *mmio = lp->mmio;
+ unsigned int intr0, intren0;
+ unsigned int handled = 1;
+
+ if(unlikely(dev == NULL))
+ return IRQ_NONE;
+
+ spin_lock(&lp->lock);
+
+ /* disabling interrupt */
+ writel(INTREN, mmio + CMD0);
+
+ /* Read interrupt status */
+ intr0 = readl(mmio + INT0);
+ intren0 = readl(mmio + INTEN0);
+
+ /* Process all the INT event until INTR bit is clear. */
+
+ if (!(intr0 & INTR)){
+ handled = 0;
+ goto err_no_interrupt;
+ }
+
+ /* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */
+ writel(intr0, mmio + INT0);
+
+ /* Check if Receive Interrupt has occurred. */
+ if (intr0 & RINT0) {
+ if (napi_schedule_prep(&lp->napi)) {
+ /* Disable receive interupts */
+ writel(RINTEN0, mmio + INTEN0);
+ /* Schedule a polling routine */
+ __napi_schedule(&lp->napi);
+ } else if (intren0 & RINTEN0) {
+ printk("************Driver bug! interrupt while in poll\n");
+ /* Fix by disable receive interrupts */
+ writel(RINTEN0, mmio + INTEN0);
+ }
+ }
+
+ /* Check if Transmit Interrupt has occurred. */
+ if (intr0 & TINT0)
+ amd8111e_tx(dev);
+
+ /* Check if Link Change Interrupt has occurred. */
+ if (intr0 & LCINT)
+ amd8111e_link_change(dev);
+
+ /* Check if Hardware Timer Interrupt has occurred. */
+ if (intr0 & STINT)
+ amd8111e_calc_coalesce(dev);
+
+err_no_interrupt:
+ writel( VAL0 | INTREN,mmio + CMD0);
+
+ spin_unlock(&lp->lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void amd8111e_poll(struct net_device *dev)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ amd8111e_interrupt(0, dev);
+ local_irq_restore(flags);
+}
+#endif
+
+
+/*
+This function closes the network interface and updates the statistics so that most recent statistics will be available after the interface is down.
+*/
+static int amd8111e_close(struct net_device * dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ netif_stop_queue(dev);
+
+ napi_disable(&lp->napi);
+
+ spin_lock_irq(&lp->lock);
+
+ amd8111e_disable_interrupt(lp);
+ amd8111e_stop_chip(lp);
+
+ /* Free transmit and receive skbs */
+ amd8111e_free_skbs(lp->amd8111e_net_dev);
+
+ netif_carrier_off(lp->amd8111e_net_dev);
+
+ /* Delete ipg timer */
+ if(lp->options & OPTION_DYN_IPG_ENABLE)
+ del_timer_sync(&lp->ipg_data.ipg_timer);
+
+ spin_unlock_irq(&lp->lock);
+ free_irq(dev->irq, dev);
+ amd8111e_free_ring(lp);
+
+ /* Update the statistics before closing */
+ amd8111e_get_stats(dev);
+ lp->opened = 0;
+ return 0;
+}
+/* This function opens new interface.It requests irq for the device, initializes the device,buffers and descriptors, and starts the device.
+*/
+static int amd8111e_open(struct net_device * dev )
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+
+ if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, IRQF_SHARED,
+ dev->name, dev))
+ return -EAGAIN;
+
+ napi_enable(&lp->napi);
+
+ spin_lock_irq(&lp->lock);
+
+ amd8111e_init_hw_default(lp);
+
+ if(amd8111e_restart(dev)){
+ spin_unlock_irq(&lp->lock);
+ napi_disable(&lp->napi);
+ if (dev->irq)
+ free_irq(dev->irq, dev);
+ return -ENOMEM;
+ }
+ /* Start ipg timer */
+ if(lp->options & OPTION_DYN_IPG_ENABLE){
+ add_timer(&lp->ipg_data.ipg_timer);
+ printk(KERN_INFO "%s: Dynamic IPG Enabled.\n",dev->name);
+ }
+
+ lp->opened = 1;
+
+ spin_unlock_irq(&lp->lock);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+/*
+This function checks if there is any transmit descriptors available to queue more packet.
+*/
+static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
+{
+ int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
+ if (lp->tx_skbuff[tx_index])
+ return -1;
+ else
+ return 0;
+
+}
+/*
+This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc.
+*/
+
+static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
+ struct net_device * dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int tx_index;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
+
+ lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
+
+ lp->tx_skbuff[tx_index] = skb;
+ lp->tx_ring[tx_index].tx_flags = 0;
+
+#if AMD8111E_VLAN_TAG_USED
+ if (vlan_tx_tag_present(skb)) {
+ lp->tx_ring[tx_index].tag_ctrl_cmd |=
+ cpu_to_le16(TCC_VLAN_INSERT);
+ lp->tx_ring[tx_index].tag_ctrl_info =
+ cpu_to_le16(vlan_tx_tag_get(skb));
+
+ }
+#endif
+ lp->tx_dma_addr[tx_index] =
+ pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ lp->tx_ring[tx_index].buff_phy_addr =
+ cpu_to_le32(lp->tx_dma_addr[tx_index]);
+
+ /* Set FCS and LTINT bits */
+ wmb();
+ lp->tx_ring[tx_index].tx_flags |=
+ cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
+
+ lp->tx_idx++;
+
+ /* Trigger an immediate send poll. */
+ writel( VAL1 | TDMD0, lp->mmio + CMD0);
+ writel( VAL2 | RDMD0,lp->mmio + CMD0);
+
+ if(amd8111e_tx_queue_avail(lp) < 0){
+ netif_stop_queue(dev);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return NETDEV_TX_OK;
+}
+/*
+This function returns all the memory mapped registers of the device.
+*/
+static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
+{
+ void __iomem *mmio = lp->mmio;
+ /* Read only necessary registers */
+ buf[0] = readl(mmio + XMT_RING_BASE_ADDR0);
+ buf[1] = readl(mmio + XMT_RING_LEN0);
+ buf[2] = readl(mmio + RCV_RING_BASE_ADDR0);
+ buf[3] = readl(mmio + RCV_RING_LEN0);
+ buf[4] = readl(mmio + CMD0);
+ buf[5] = readl(mmio + CMD2);
+ buf[6] = readl(mmio + CMD3);
+ buf[7] = readl(mmio + CMD7);
+ buf[8] = readl(mmio + INT0);
+ buf[9] = readl(mmio + INTEN0);
+ buf[10] = readl(mmio + LADRF);
+ buf[11] = readl(mmio + LADRF+4);
+ buf[12] = readl(mmio + STAT0);
+}
+
+
+/*
+This function sets promiscuos mode, all-multi mode or the multicast address
+list to the device.
+*/
+static void amd8111e_set_multicast_list(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ u32 mc_filter[2] ;
+ int bit_num;
+
+ if(dev->flags & IFF_PROMISC){
+ writel( VAL2 | PROM, lp->mmio + CMD2);
+ return;
+ }
+ else
+ writel( PROM, lp->mmio + CMD2);
+ if (dev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(dev) > MAX_FILTER_SIZE) {
+ /* get all multicast packet */
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ lp->options |= OPTION_MULTICAST_ENABLE;
+ amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
+ return;
+ }
+ if (netdev_mc_empty(dev)) {
+ /* get only own packets */
+ mc_filter[1] = mc_filter[0] = 0;
+ lp->options &= ~OPTION_MULTICAST_ENABLE;
+ amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
+ /* disable promiscuous mode */
+ writel(PROM, lp->mmio + CMD2);
+ return;
+ }
+ /* load all the multicast addresses in the logic filter */
+ lp->options |= OPTION_MULTICAST_ENABLE;
+ mc_filter[1] = mc_filter[0] = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
+ mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
+ }
+ amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
+
+ /* To eliminate PCI posting bug */
+ readl(lp->mmio + CMD2);
+
+}
+
+static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo *info)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ struct pci_dev *pci_dev = lp->pci_dev;
+ strcpy (info->driver, MODULE_NAME);
+ strcpy (info->version, MODULE_VERS);
+ sprintf(info->fw_version,"%u",chip_version);
+ strcpy (info->bus_info, pci_name(pci_dev));
+}
+
+static int amd8111e_get_regs_len(struct net_device *dev)
+{
+ return AMD8111E_REG_DUMP_LEN;
+}
+
+static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ regs->version = 0;
+ amd8111e_read_regs(lp, buf);
+}
+
+static int amd8111e_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ spin_lock_irq(&lp->lock);
+ mii_ethtool_gset(&lp->mii_if, ecmd);
+ spin_unlock_irq(&lp->lock);
+ return 0;
+}
+
+static int amd8111e_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int res;
+ spin_lock_irq(&lp->lock);
+ res = mii_ethtool_sset(&lp->mii_if, ecmd);
+ spin_unlock_irq(&lp->lock);
+ return res;
+}
+
+static int amd8111e_nway_reset(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ return mii_nway_restart(&lp->mii_if);
+}
+
+static u32 amd8111e_get_link(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ return mii_link_ok(&lp->mii_if);
+}
+
+static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ wol_info->supported = WAKE_MAGIC|WAKE_PHY;
+ if (lp->options & OPTION_WOL_ENABLE)
+ wol_info->wolopts = WAKE_MAGIC;
+}
+
+static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY))
+ return -EINVAL;
+ spin_lock_irq(&lp->lock);
+ if (wol_info->wolopts & WAKE_MAGIC)
+ lp->options |=
+ (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
+ else if(wol_info->wolopts & WAKE_PHY)
+ lp->options |=
+ (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
+ else
+ lp->options &= ~OPTION_WOL_ENABLE;
+ spin_unlock_irq(&lp->lock);
+ return 0;
+}
+
+static const struct ethtool_ops ops = {
+ .get_drvinfo = amd8111e_get_drvinfo,
+ .get_regs_len = amd8111e_get_regs_len,
+ .get_regs = amd8111e_get_regs,
+ .get_settings = amd8111e_get_settings,
+ .set_settings = amd8111e_set_settings,
+ .nway_reset = amd8111e_nway_reset,
+ .get_link = amd8111e_get_link,
+ .get_wol = amd8111e_get_wol,
+ .set_wol = amd8111e_set_wol,
+};
+
+/*
+This function handles all the ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application.
+*/
+
+static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(ifr);
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int err;
+ u32 mii_regval;
+
+ switch(cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = lp->ext_phy_addr;
+
+ /* fallthru */
+ case SIOCGMIIREG:
+
+ spin_lock_irq(&lp->lock);
+ err = amd8111e_read_phy(lp, data->phy_id,
+ data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
+ spin_unlock_irq(&lp->lock);
+
+ data->val_out = mii_regval;
+ return err;
+
+ case SIOCSMIIREG:
+
+ spin_lock_irq(&lp->lock);
+ err = amd8111e_write_phy(lp, data->phy_id,
+ data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
+ spin_unlock_irq(&lp->lock);
+
+ return err;
+
+ default:
+ /* do nothing */
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+static int amd8111e_set_mac_address(struct net_device *dev, void *p)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int i;
+ struct sockaddr *addr = p;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ spin_lock_irq(&lp->lock);
+ /* Setting the MAC address to the device */
+ for(i = 0; i < ETH_ADDR_LEN; i++)
+ writeb( dev->dev_addr[i], lp->mmio + PADR + i );
+
+ spin_unlock_irq(&lp->lock);
+
+ return 0;
+}
+
+/*
+This function changes the mtu of the device. It restarts the device to initialize the descriptor with new receive buffers.
+*/
+static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int err;
+
+ if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU))
+ return -EINVAL;
+
+ if (!netif_running(dev)) {
+ /* new_mtu will be used
+ when device starts netxt time */
+ dev->mtu = new_mtu;
+ return 0;
+ }
+
+ spin_lock_irq(&lp->lock);
+
+ /* stop the chip */
+ writel(RUN, lp->mmio + CMD0);
+
+ dev->mtu = new_mtu;
+
+ err = amd8111e_restart(dev);
+ spin_unlock_irq(&lp->lock);
+ if(!err)
+ netif_start_queue(dev);
+ return err;
+}
+
+static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
+{
+ writel( VAL1|MPPLBA, lp->mmio + CMD3);
+ writel( VAL0|MPEN_SW, lp->mmio + CMD7);
+
+ /* To eliminate PCI posting bug */
+ readl(lp->mmio + CMD7);
+ return 0;
+}
+
+static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
+{
+
+ /* Adapter is already stoped/suspended/interrupt-disabled */
+ writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
+
+ /* To eliminate PCI posting bug */
+ readl(lp->mmio + CMD7);
+ return 0;
+}
+
+/*
+ * This function is called when a packet transmission fails to complete
+ * within a reasonable period, on the assumption that an interrupt have
+ * failed or the interface is locked up. This function will reinitialize
+ * the hardware.
+ */
+static void amd8111e_tx_timeout(struct net_device *dev)
+{
+ struct amd8111e_priv* lp = netdev_priv(dev);
+ int err;
+
+ printk(KERN_ERR "%s: transmit timed out, resetting\n",
+ dev->name);
+ spin_lock_irq(&lp->lock);
+ err = amd8111e_restart(dev);
+ spin_unlock_irq(&lp->lock);
+ if(!err)
+ netif_wake_queue(dev);
+}
+static int amd8111e_suspend(struct pci_dev *pci_dev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pci_dev);
+ struct amd8111e_priv *lp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ /* disable the interrupt */
+ spin_lock_irq(&lp->lock);
+ amd8111e_disable_interrupt(lp);
+ spin_unlock_irq(&lp->lock);
+
+ netif_device_detach(dev);
+
+ /* stop chip */
+ spin_lock_irq(&lp->lock);
+ if(lp->options & OPTION_DYN_IPG_ENABLE)
+ del_timer_sync(&lp->ipg_data.ipg_timer);
+ amd8111e_stop_chip(lp);
+ spin_unlock_irq(&lp->lock);
+
+ if(lp->options & OPTION_WOL_ENABLE){
+ /* enable wol */
+ if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
+ amd8111e_enable_magicpkt(lp);
+ if(lp->options & OPTION_WAKE_PHY_ENABLE)
+ amd8111e_enable_link_change(lp);
+
+ pci_enable_wake(pci_dev, PCI_D3hot, 1);
+ pci_enable_wake(pci_dev, PCI_D3cold, 1);
+
+ }
+ else{
+ pci_enable_wake(pci_dev, PCI_D3hot, 0);
+ pci_enable_wake(pci_dev, PCI_D3cold, 0);
+ }
+
+ pci_save_state(pci_dev);
+ pci_set_power_state(pci_dev, PCI_D3hot);
+
+ return 0;
+}
+static int amd8111e_resume(struct pci_dev *pci_dev)
+{
+ struct net_device *dev = pci_get_drvdata(pci_dev);
+ struct amd8111e_priv *lp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ pci_set_power_state(pci_dev, PCI_D0);
+ pci_restore_state(pci_dev);
+
+ pci_enable_wake(pci_dev, PCI_D3hot, 0);
+ pci_enable_wake(pci_dev, PCI_D3cold, 0); /* D3 cold */
+
+ netif_device_attach(dev);
+
+ spin_lock_irq(&lp->lock);
+ amd8111e_restart(dev);
+ /* Restart ipg timer */
+ if(lp->options & OPTION_DYN_IPG_ENABLE)
+ mod_timer(&lp->ipg_data.ipg_timer,
+ jiffies + IPG_CONVERGE_JIFFIES);
+ spin_unlock_irq(&lp->lock);
+
+ return 0;
+}
+
+
+static void __devexit amd8111e_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ if (dev) {
+ unregister_netdev(dev);
+ iounmap(((struct amd8111e_priv *)netdev_priv(dev))->mmio);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+static void amd8111e_config_ipg(struct net_device* dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ struct ipg_info* ipg_data = &lp->ipg_data;
+ void __iomem *mmio = lp->mmio;
+ unsigned int prev_col_cnt = ipg_data->col_cnt;
+ unsigned int total_col_cnt;
+ unsigned int tmp_ipg;
+
+ if(lp->link_config.duplex == DUPLEX_FULL){
+ ipg_data->ipg = DEFAULT_IPG;
+ return;
+ }
+
+ if(ipg_data->ipg_state == SSTATE){
+
+ if(ipg_data->timer_tick == IPG_STABLE_TIME){
+
+ ipg_data->timer_tick = 0;
+ ipg_data->ipg = MIN_IPG - IPG_STEP;
+ ipg_data->current_ipg = MIN_IPG;
+ ipg_data->diff_col_cnt = 0xFFFFFFFF;
+ ipg_data->ipg_state = CSTATE;
+ }
+ else
+ ipg_data->timer_tick++;
+ }
+
+ if(ipg_data->ipg_state == CSTATE){
+
+ /* Get the current collision count */
+
+ total_col_cnt = ipg_data->col_cnt =
+ amd8111e_read_mib(mmio, xmt_collisions);
+
+ if ((total_col_cnt - prev_col_cnt) <
+ (ipg_data->diff_col_cnt)){
+
+ ipg_data->diff_col_cnt =
+ total_col_cnt - prev_col_cnt ;
+
+ ipg_data->ipg = ipg_data->current_ipg;
+ }
+
+ ipg_data->current_ipg += IPG_STEP;
+
+ if (ipg_data->current_ipg <= MAX_IPG)
+ tmp_ipg = ipg_data->current_ipg;
+ else{
+ tmp_ipg = ipg_data->ipg;
+ ipg_data->ipg_state = SSTATE;
+ }
+ writew((u32)tmp_ipg, mmio + IPG);
+ writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
+ }
+ mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
+ return;
+
+}
+
+static void __devinit amd8111e_probe_ext_phy(struct net_device* dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int i;
+
+ for (i = 0x1e; i >= 0; i--) {
+ u32 id1, id2;
+
+ if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1))
+ continue;
+ if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2))
+ continue;
+ lp->ext_phy_id = (id1 << 16) | id2;
+ lp->ext_phy_addr = i;
+ return;
+ }
+ lp->ext_phy_id = 0;
+ lp->ext_phy_addr = 1;
+}
+
+static const struct net_device_ops amd8111e_netdev_ops = {
+ .ndo_open = amd8111e_open,
+ .ndo_stop = amd8111e_close,
+ .ndo_start_xmit = amd8111e_start_xmit,
+ .ndo_tx_timeout = amd8111e_tx_timeout,
+ .ndo_get_stats = amd8111e_get_stats,
+ .ndo_set_rx_mode = amd8111e_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = amd8111e_set_mac_address,
+ .ndo_do_ioctl = amd8111e_ioctl,
+ .ndo_change_mtu = amd8111e_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = amd8111e_poll,
+#endif
+};
+
+static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int err,i,pm_cap;
+ unsigned long reg_addr,reg_len;
+ struct amd8111e_priv* lp;
+ struct net_device* dev;
+
+ err = pci_enable_device(pdev);
+ if(err){
+ printk(KERN_ERR "amd8111e: Cannot enable new PCI device, "
+ "exiting.\n");
+ return err;
+ }
+
+ if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
+ printk(KERN_ERR "amd8111e: Cannot find PCI base address, "
+ "exiting.\n");
+ err = -ENODEV;
+ goto err_disable_pdev;
+ }
+
+ err = pci_request_regions(pdev, MODULE_NAME);
+ if(err){
+ printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, "
+ "exiting.\n");
+ goto err_disable_pdev;
+ }
+
+ pci_set_master(pdev);
+
+ /* Find power-management capability. */
+ if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){
+ printk(KERN_ERR "amd8111e: No Power Management capability, "
+ "exiting.\n");
+ goto err_free_reg;
+ }
+
+ /* Initialize DMA */
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) {
+ printk(KERN_ERR "amd8111e: DMA not supported,"
+ "exiting.\n");
+ goto err_free_reg;
+ }
+
+ reg_addr = pci_resource_start(pdev, 0);
+ reg_len = pci_resource_len(pdev, 0);
+
+ dev = alloc_etherdev(sizeof(struct amd8111e_priv));
+ if (!dev) {
+ printk(KERN_ERR "amd8111e: Etherdev alloc failed, exiting.\n");
+ err = -ENOMEM;
+ goto err_free_reg;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+#if AMD8111E_VLAN_TAG_USED
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ;
+#endif
+
+ lp = netdev_priv(dev);
+ lp->pci_dev = pdev;
+ lp->amd8111e_net_dev = dev;
+ lp->pm_cap = pm_cap;
+
+ spin_lock_init(&lp->lock);
+
+ lp->mmio = ioremap(reg_addr, reg_len);
+ if (!lp->mmio) {
+ printk(KERN_ERR "amd8111e: Cannot map device registers, "
+ "exiting\n");
+ err = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ /* Initializing MAC address */
+ for(i = 0; i < ETH_ADDR_LEN; i++)
+ dev->dev_addr[i] = readb(lp->mmio + PADR + i);
+
+ /* Setting user defined parametrs */
+ lp->ext_phy_option = speed_duplex[card_idx];
+ if(coalesce[card_idx])
+ lp->options |= OPTION_INTR_COAL_ENABLE;
+ if(dynamic_ipg[card_idx++])
+ lp->options |= OPTION_DYN_IPG_ENABLE;
+
+
+ /* Initialize driver entry points */
+ dev->netdev_ops = &amd8111e_netdev_ops;
+ SET_ETHTOOL_OPS(dev, &ops);
+ dev->irq =pdev->irq;
+ dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
+ netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
+
+#if AMD8111E_VLAN_TAG_USED
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+#endif
+ /* Probe the external PHY */
+ amd8111e_probe_ext_phy(dev);
+
+ /* setting mii default values */
+ lp->mii_if.dev = dev;
+ lp->mii_if.mdio_read = amd8111e_mdio_read;
+ lp->mii_if.mdio_write = amd8111e_mdio_write;
+ lp->mii_if.phy_id = lp->ext_phy_addr;
+
+ /* Set receive buffer length and set jumbo option*/
+ amd8111e_set_rx_buff_len(dev);
+
+
+ err = register_netdev(dev);
+ if (err) {
+ printk(KERN_ERR "amd8111e: Cannot register net device, "
+ "exiting.\n");
+ goto err_iounmap;
+ }
+
+ pci_set_drvdata(pdev, dev);
+
+ /* Initialize software ipg timer */
+ if(lp->options & OPTION_DYN_IPG_ENABLE){
+ init_timer(&lp->ipg_data.ipg_timer);
+ lp->ipg_data.ipg_timer.data = (unsigned long) dev;
+ lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg;
+ lp->ipg_data.ipg_timer.expires = jiffies +
+ IPG_CONVERGE_JIFFIES;
+ lp->ipg_data.ipg = DEFAULT_IPG;
+ lp->ipg_data.ipg_state = CSTATE;
+ }
+
+ /* display driver and device information */
+
+ chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
+ printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n",
+ dev->name,MODULE_VERS);
+ printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
+ dev->name, chip_version, dev->dev_addr);
+ if (lp->ext_phy_id)
+ printk(KERN_INFO "%s: Found MII PHY ID 0x%08x at address 0x%02x\n",
+ dev->name, lp->ext_phy_id, lp->ext_phy_addr);
+ else
+ printk(KERN_INFO "%s: Couldn't detect MII PHY, assuming address 0x01\n",
+ dev->name);
+ return 0;
+err_iounmap:
+ iounmap(lp->mmio);
+
+err_free_dev:
+ free_netdev(dev);
+
+err_free_reg:
+ pci_release_regions(pdev);
+
+err_disable_pdev:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+
+}
+
+static struct pci_driver amd8111e_driver = {
+ .name = MODULE_NAME,
+ .id_table = amd8111e_pci_tbl,
+ .probe = amd8111e_probe_one,
+ .remove = __devexit_p(amd8111e_remove_one),
+ .suspend = amd8111e_suspend,
+ .resume = amd8111e_resume
+};
+
+static int __init amd8111e_init(void)
+{
+ return pci_register_driver(&amd8111e_driver);
+}
+
+static void __exit amd8111e_cleanup(void)
+{
+ pci_unregister_driver(&amd8111e_driver);
+}
+
+module_init(amd8111e_init);
+module_exit(amd8111e_cleanup);
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
new file mode 100644
index 000000000000..2ff2e7a12dd0
--- /dev/null
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -0,0 +1,816 @@
+/*
+ * Advanced Micro Devices Inc. AMD8111E Linux Network Driver
+ * Copyright (C) 2003 Advanced Micro Devices
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+
+Module Name:
+
+ amd8111e.h
+
+Abstract:
+
+ AMD8111 based 10/100 Ethernet Controller driver definitions.
+
+Environment:
+
+ Kernel Mode
+
+Revision History:
+ 3.0.0
+ Initial Revision.
+ 3.0.1
+*/
+
+#ifndef _AMD811E_H
+#define _AMD811E_H
+
+/* Command style register access
+
+Registers CMD0, CMD2, CMD3,CMD7 and INTEN0 uses a write access technique called command style access. It allows the write to selected bits of this register without altering the bits that are not selected. Command style registers are divided into 4 bytes that can be written independently. Higher order bit of each byte is the value bit that specifies the value that will be written into the selected bits of register.
+
+eg., if the value 10011010b is written into the least significant byte of a command style register, bits 1,3 and 4 of the register will be set to 1, and the other bits will not be altered. If the value 00011010b is written into the same byte, bits 1,3 and 4 will be cleared to 0 and the other bits will not be altered.
+
+*/
+
+/* Offset for Memory Mapped Registers. */
+/* 32 bit registers */
+
+#define ASF_STAT 0x00 /* ASF status register */
+#define CHIPID 0x04 /* Chip ID regsiter */
+#define MIB_DATA 0x10 /* MIB data register */
+#define MIB_ADDR 0x14 /* MIB address register */
+#define STAT0 0x30 /* Status0 register */
+#define INT0 0x38 /* Interrupt0 register */
+#define INTEN0 0x40 /* Interrupt0 enable register*/
+#define CMD0 0x48 /* Command0 register */
+#define CMD2 0x50 /* Command2 register */
+#define CMD3 0x54 /* Command3 resiter */
+#define CMD7 0x64 /* Command7 register */
+
+#define CTRL1 0x6C /* Control1 register */
+#define CTRL2 0x70 /* Control2 register */
+
+#define XMT_RING_LIMIT 0x7C /* Transmit ring limit register */
+
+#define AUTOPOLL0 0x88 /* Auto-poll0 register */
+#define AUTOPOLL1 0x8A /* Auto-poll1 register */
+#define AUTOPOLL2 0x8C /* Auto-poll2 register */
+#define AUTOPOLL3 0x8E /* Auto-poll3 register */
+#define AUTOPOLL4 0x90 /* Auto-poll4 register */
+#define AUTOPOLL5 0x92 /* Auto-poll5 register */
+
+#define AP_VALUE 0x98 /* Auto-poll value register */
+#define DLY_INT_A 0xA8 /* Group A delayed interrupt register */
+#define DLY_INT_B 0xAC /* Group B delayed interrupt register */
+
+#define FLOW_CONTROL 0xC8 /* Flow control register */
+#define PHY_ACCESS 0xD0 /* PHY access register */
+
+#define STVAL 0xD8 /* Software timer value register */
+
+#define XMT_RING_BASE_ADDR0 0x100 /* Transmit ring0 base addr register */
+#define XMT_RING_BASE_ADDR1 0x108 /* Transmit ring1 base addr register */
+#define XMT_RING_BASE_ADDR2 0x110 /* Transmit ring2 base addr register */
+#define XMT_RING_BASE_ADDR3 0x118 /* Transmit ring2 base addr register */
+
+#define RCV_RING_BASE_ADDR0 0x120 /* Transmit ring0 base addr register */
+
+#define PMAT0 0x190 /* OnNow pattern register0 */
+#define PMAT1 0x194 /* OnNow pattern register1 */
+
+/* 16bit registers */
+
+#define XMT_RING_LEN0 0x140 /* Transmit Ring0 length register */
+#define XMT_RING_LEN1 0x144 /* Transmit Ring1 length register */
+#define XMT_RING_LEN2 0x148 /* Transmit Ring2 length register */
+#define XMT_RING_LEN3 0x14C /* Transmit Ring3 length register */
+
+#define RCV_RING_LEN0 0x150 /* Receive Ring0 length register */
+
+#define SRAM_SIZE 0x178 /* SRAM size register */
+#define SRAM_BOUNDARY 0x17A /* SRAM boundary register */
+
+/* 48bit register */
+
+#define PADR 0x160 /* Physical address register */
+
+#define IFS1 0x18C /* Inter-frame spacing Part1 register */
+#define IFS 0x18D /* Inter-frame spacing register */
+#define IPG 0x18E /* Inter-frame gap register */
+/* 64bit register */
+
+#define LADRF 0x168 /* Logical address filter register */
+
+
+/* Register Bit Definitions */
+typedef enum {
+
+ ASF_INIT_DONE = (1 << 1),
+ ASF_INIT_PRESENT = (1 << 0),
+
+}STAT_ASF_BITS;
+
+typedef enum {
+
+ MIB_CMD_ACTIVE = (1 << 15 ),
+ MIB_RD_CMD = (1 << 13 ),
+ MIB_CLEAR = (1 << 12 ),
+ MIB_ADDRESS = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)|
+ (1 << 4) | (1 << 5),
+}MIB_ADDR_BITS;
+
+
+typedef enum {
+
+ PMAT_DET = (1 << 12),
+ MP_DET = (1 << 11),
+ LC_DET = (1 << 10),
+ SPEED_MASK = (1 << 9)|(1 << 8)|(1 << 7),
+ FULL_DPLX = (1 << 6),
+ LINK_STATS = (1 << 5),
+ AUTONEG_COMPLETE = (1 << 4),
+ MIIPD = (1 << 3),
+ RX_SUSPENDED = (1 << 2),
+ TX_SUSPENDED = (1 << 1),
+ RUNNING = (1 << 0),
+
+}STAT0_BITS;
+
+#define PHY_SPEED_10 0x2
+#define PHY_SPEED_100 0x3
+
+/* INT0 0x38, 32bit register */
+typedef enum {
+
+ INTR = (1 << 31),
+ PCSINT = (1 << 28),
+ LCINT = (1 << 27),
+ APINT5 = (1 << 26),
+ APINT4 = (1 << 25),
+ APINT3 = (1 << 24),
+ TINT_SUM = (1 << 23),
+ APINT2 = (1 << 22),
+ APINT1 = (1 << 21),
+ APINT0 = (1 << 20),
+ MIIPDTINT = (1 << 19),
+ MCCINT = (1 << 17),
+ MREINT = (1 << 16),
+ RINT_SUM = (1 << 15),
+ SPNDINT = (1 << 14),
+ MPINT = (1 << 13),
+ SINT = (1 << 12),
+ TINT3 = (1 << 11),
+ TINT2 = (1 << 10),
+ TINT1 = (1 << 9),
+ TINT0 = (1 << 8),
+ UINT = (1 << 7),
+ STINT = (1 << 4),
+ RINT0 = (1 << 0),
+
+}INT0_BITS;
+
+typedef enum {
+
+ VAL3 = (1 << 31), /* VAL bit for byte 3 */
+ VAL2 = (1 << 23), /* VAL bit for byte 2 */
+ VAL1 = (1 << 15), /* VAL bit for byte 1 */
+ VAL0 = (1 << 7), /* VAL bit for byte 0 */
+
+}VAL_BITS;
+
+typedef enum {
+
+ /* VAL3 */
+ LCINTEN = (1 << 27),
+ APINT5EN = (1 << 26),
+ APINT4EN = (1 << 25),
+ APINT3EN = (1 << 24),
+ /* VAL2 */
+ APINT2EN = (1 << 22),
+ APINT1EN = (1 << 21),
+ APINT0EN = (1 << 20),
+ MIIPDTINTEN = (1 << 19),
+ MCCIINTEN = (1 << 18),
+ MCCINTEN = (1 << 17),
+ MREINTEN = (1 << 16),
+ /* VAL1 */
+ SPNDINTEN = (1 << 14),
+ MPINTEN = (1 << 13),
+ TINTEN3 = (1 << 11),
+ SINTEN = (1 << 12),
+ TINTEN2 = (1 << 10),
+ TINTEN1 = (1 << 9),
+ TINTEN0 = (1 << 8),
+ /* VAL0 */
+ STINTEN = (1 << 4),
+ RINTEN0 = (1 << 0),
+
+ INTEN0_CLEAR = 0x1F7F7F1F, /* Command style register */
+
+}INTEN0_BITS;
+
+typedef enum {
+ /* VAL2 */
+ RDMD0 = (1 << 16),
+ /* VAL1 */
+ TDMD3 = (1 << 11),
+ TDMD2 = (1 << 10),
+ TDMD1 = (1 << 9),
+ TDMD0 = (1 << 8),
+ /* VAL0 */
+ UINTCMD = (1 << 6),
+ RX_FAST_SPND = (1 << 5),
+ TX_FAST_SPND = (1 << 4),
+ RX_SPND = (1 << 3),
+ TX_SPND = (1 << 2),
+ INTREN = (1 << 1),
+ RUN = (1 << 0),
+
+ CMD0_CLEAR = 0x000F0F7F, /* Command style register */
+
+}CMD0_BITS;
+
+typedef enum {
+
+ /* VAL3 */
+ CONDUIT_MODE = (1 << 29),
+ /* VAL2 */
+ RPA = (1 << 19),
+ DRCVPA = (1 << 18),
+ DRCVBC = (1 << 17),
+ PROM = (1 << 16),
+ /* VAL1 */
+ ASTRP_RCV = (1 << 13),
+ RCV_DROP0 = (1 << 12),
+ EMBA = (1 << 11),
+ DXMT2PD = (1 << 10),
+ LTINTEN = (1 << 9),
+ DXMTFCS = (1 << 8),
+ /* VAL0 */
+ APAD_XMT = (1 << 6),
+ DRTY = (1 << 5),
+ INLOOP = (1 << 4),
+ EXLOOP = (1 << 3),
+ REX_RTRY = (1 << 2),
+ REX_UFLO = (1 << 1),
+ REX_LCOL = (1 << 0),
+
+ CMD2_CLEAR = 0x3F7F3F7F, /* Command style register */
+
+}CMD2_BITS;
+
+typedef enum {
+
+ /* VAL3 */
+ ASF_INIT_DONE_ALIAS = (1 << 29),
+ /* VAL2 */
+ JUMBO = (1 << 21),
+ VSIZE = (1 << 20),
+ VLONLY = (1 << 19),
+ VL_TAG_DEL = (1 << 18),
+ /* VAL1 */
+ EN_PMGR = (1 << 14),
+ INTLEVEL = (1 << 13),
+ FORCE_FULL_DUPLEX = (1 << 12),
+ FORCE_LINK_STATUS = (1 << 11),
+ APEP = (1 << 10),
+ MPPLBA = (1 << 9),
+ /* VAL0 */
+ RESET_PHY_PULSE = (1 << 2),
+ RESET_PHY = (1 << 1),
+ PHY_RST_POL = (1 << 0),
+
+}CMD3_BITS;
+
+
+typedef enum {
+
+ /* VAL0 */
+ PMAT_SAVE_MATCH = (1 << 4),
+ PMAT_MODE = (1 << 3),
+ MPEN_SW = (1 << 1),
+ LCMODE_SW = (1 << 0),
+
+ CMD7_CLEAR = 0x0000001B /* Command style register */
+
+}CMD7_BITS;
+
+
+typedef enum {
+
+ RESET_PHY_WIDTH = (0xF << 16) | (0xF<< 20), /* 0x00FF0000 */
+ XMTSP_MASK = (1 << 9) | (1 << 8), /* 9:8 */
+ XMTSP_128 = (1 << 9), /* 9 */
+ XMTSP_64 = (1 << 8),
+ CACHE_ALIGN = (1 << 4),
+ BURST_LIMIT_MASK = (0xF << 0 ),
+ CTRL1_DEFAULT = 0x00010111,
+
+}CTRL1_BITS;
+
+typedef enum {
+
+ FMDC_MASK = (1 << 9)|(1 << 8), /* 9:8 */
+ XPHYRST = (1 << 7),
+ XPHYANE = (1 << 6),
+ XPHYFD = (1 << 5),
+ XPHYSP = (1 << 4) | (1 << 3), /* 4:3 */
+ APDW_MASK = (1 << 2) | (1 << 1) | (1 << 0), /* 2:0 */
+
+}CTRL2_BITS;
+
+/* XMT_RING_LIMIT 0x7C, 32bit register */
+typedef enum {
+
+ XMT_RING2_LIMIT = (0xFF << 16), /* 23:16 */
+ XMT_RING1_LIMIT = (0xFF << 8), /* 15:8 */
+ XMT_RING0_LIMIT = (0xFF << 0), /* 7:0 */
+
+}XMT_RING_LIMIT_BITS;
+
+typedef enum {
+
+ AP_REG0_EN = (1 << 15),
+ AP_REG0_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PHY0_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL0_BITS;
+
+/* AUTOPOLL1 0x8A, 16bit register */
+typedef enum {
+
+ AP_REG1_EN = (1 << 15),
+ AP_REG1_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PRE_SUP1 = (1 << 6),
+ AP_PHY1_DFLT = (1 << 5),
+ AP_PHY1_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL1_BITS;
+
+
+typedef enum {
+
+ AP_REG2_EN = (1 << 15),
+ AP_REG2_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PRE_SUP2 = (1 << 6),
+ AP_PHY2_DFLT = (1 << 5),
+ AP_PHY2_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL2_BITS;
+
+typedef enum {
+
+ AP_REG3_EN = (1 << 15),
+ AP_REG3_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PRE_SUP3 = (1 << 6),
+ AP_PHY3_DFLT = (1 << 5),
+ AP_PHY3_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL3_BITS;
+
+
+typedef enum {
+
+ AP_REG4_EN = (1 << 15),
+ AP_REG4_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PRE_SUP4 = (1 << 6),
+ AP_PHY4_DFLT = (1 << 5),
+ AP_PHY4_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL4_BITS;
+
+
+typedef enum {
+
+ AP_REG5_EN = (1 << 15),
+ AP_REG5_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PRE_SUP5 = (1 << 6),
+ AP_PHY5_DFLT = (1 << 5),
+ AP_PHY5_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL5_BITS;
+
+
+
+
+/* AP_VALUE 0x98, 32bit ragister */
+typedef enum {
+
+ AP_VAL_ACTIVE = (1 << 31),
+ AP_VAL_RD_CMD = ( 1 << 29),
+ AP_ADDR = (1 << 18)|(1 << 17)|(1 << 16), /* 18:16 */
+ AP_VAL = (0xF << 0) | (0xF << 4) |( 0xF << 8) |
+ (0xF << 12), /* 15:0 */
+
+}AP_VALUE_BITS;
+
+typedef enum {
+
+ DLY_INT_A_R3 = (1 << 31),
+ DLY_INT_A_R2 = (1 << 30),
+ DLY_INT_A_R1 = (1 << 29),
+ DLY_INT_A_R0 = (1 << 28),
+ DLY_INT_A_T3 = (1 << 27),
+ DLY_INT_A_T2 = (1 << 26),
+ DLY_INT_A_T1 = (1 << 25),
+ DLY_INT_A_T0 = ( 1 << 24),
+ EVENT_COUNT_A = (0xF << 16) | (0x1 << 20),/* 20:16 */
+ MAX_DELAY_TIME_A = (0xF << 0) | (0xF << 4) | (1 << 8)|
+ (1 << 9) | (1 << 10), /* 10:0 */
+
+}DLY_INT_A_BITS;
+
+typedef enum {
+
+ DLY_INT_B_R3 = (1 << 31),
+ DLY_INT_B_R2 = (1 << 30),
+ DLY_INT_B_R1 = (1 << 29),
+ DLY_INT_B_R0 = (1 << 28),
+ DLY_INT_B_T3 = (1 << 27),
+ DLY_INT_B_T2 = (1 << 26),
+ DLY_INT_B_T1 = (1 << 25),
+ DLY_INT_B_T0 = ( 1 << 24),
+ EVENT_COUNT_B = (0xF << 16) | (0x1 << 20),/* 20:16 */
+ MAX_DELAY_TIME_B = (0xF << 0) | (0xF << 4) | (1 << 8)|
+ (1 << 9) | (1 << 10), /* 10:0 */
+}DLY_INT_B_BITS;
+
+
+/* FLOW_CONTROL 0xC8, 32bit register */
+typedef enum {
+
+ PAUSE_LEN_CHG = (1 << 30),
+ FTPE = (1 << 22),
+ FRPE = (1 << 21),
+ NAPA = (1 << 20),
+ NPA = (1 << 19),
+ FIXP = ( 1 << 18),
+ FCCMD = ( 1 << 16),
+ PAUSE_LEN = (0xF << 0) | (0xF << 4) |( 0xF << 8) | (0xF << 12), /* 15:0 */
+
+}FLOW_CONTROL_BITS;
+
+/* PHY_ ACCESS 0xD0, 32bit register */
+typedef enum {
+
+ PHY_CMD_ACTIVE = (1 << 31),
+ PHY_WR_CMD = (1 << 30),
+ PHY_RD_CMD = (1 << 29),
+ PHY_RD_ERR = (1 << 28),
+ PHY_PRE_SUP = (1 << 27),
+ PHY_ADDR = (1 << 21) | (1 << 22) | (1 << 23)|
+ (1 << 24) |(1 << 25),/* 25:21 */
+ PHY_REG_ADDR = (1 << 16) | (1 << 17) | (1 << 18)| (1 << 19) | (1 << 20),/* 20:16 */
+ PHY_DATA = (0xF << 0)|(0xF << 4) |(0xF << 8)|
+ (0xF << 12),/* 15:0 */
+
+}PHY_ACCESS_BITS;
+
+
+/* PMAT0 0x190, 32bit register */
+typedef enum {
+ PMR_ACTIVE = (1 << 31),
+ PMR_WR_CMD = (1 << 30),
+ PMR_RD_CMD = (1 << 29),
+ PMR_BANK = (1 <<28),
+ PMR_ADDR = (0xF << 16)|(1 << 20)|(1 << 21)|
+ (1 << 22),/* 22:16 */
+ PMR_B4 = (0xF << 0) | (0xF << 4),/* 15:0 */
+}PMAT0_BITS;
+
+
+/* PMAT1 0x194, 32bit register */
+typedef enum {
+ PMR_B3 = (0xF << 24) | (0xF <<28),/* 31:24 */
+ PMR_B2 = (0xF << 16) |(0xF << 20),/* 23:16 */
+ PMR_B1 = (0xF << 8) | (0xF <<12), /* 15:8 */
+ PMR_B0 = (0xF << 0)|(0xF << 4),/* 7:0 */
+}PMAT1_BITS;
+
+/************************************************************************/
+/* */
+/* MIB counter definitions */
+/* */
+/************************************************************************/
+
+#define rcv_miss_pkts 0x00
+#define rcv_octets 0x01
+#define rcv_broadcast_pkts 0x02
+#define rcv_multicast_pkts 0x03
+#define rcv_undersize_pkts 0x04
+#define rcv_oversize_pkts 0x05
+#define rcv_fragments 0x06
+#define rcv_jabbers 0x07
+#define rcv_unicast_pkts 0x08
+#define rcv_alignment_errors 0x09
+#define rcv_fcs_errors 0x0A
+#define rcv_good_octets 0x0B
+#define rcv_mac_ctrl 0x0C
+#define rcv_flow_ctrl 0x0D
+#define rcv_pkts_64_octets 0x0E
+#define rcv_pkts_65to127_octets 0x0F
+#define rcv_pkts_128to255_octets 0x10
+#define rcv_pkts_256to511_octets 0x11
+#define rcv_pkts_512to1023_octets 0x12
+#define rcv_pkts_1024to1518_octets 0x13
+#define rcv_unsupported_opcode 0x14
+#define rcv_symbol_errors 0x15
+#define rcv_drop_pkts_ring1 0x16
+#define rcv_drop_pkts_ring2 0x17
+#define rcv_drop_pkts_ring3 0x18
+#define rcv_drop_pkts_ring4 0x19
+#define rcv_jumbo_pkts 0x1A
+
+#define xmt_underrun_pkts 0x20
+#define xmt_octets 0x21
+#define xmt_packets 0x22
+#define xmt_broadcast_pkts 0x23
+#define xmt_multicast_pkts 0x24
+#define xmt_collisions 0x25
+#define xmt_unicast_pkts 0x26
+#define xmt_one_collision 0x27
+#define xmt_multiple_collision 0x28
+#define xmt_deferred_transmit 0x29
+#define xmt_late_collision 0x2A
+#define xmt_excessive_defer 0x2B
+#define xmt_loss_carrier 0x2C
+#define xmt_excessive_collision 0x2D
+#define xmt_back_pressure 0x2E
+#define xmt_flow_ctrl 0x2F
+#define xmt_pkts_64_octets 0x30
+#define xmt_pkts_65to127_octets 0x31
+#define xmt_pkts_128to255_octets 0x32
+#define xmt_pkts_256to511_octets 0x33
+#define xmt_pkts_512to1023_octets 0x34
+#define xmt_pkts_1024to1518_octet 0x35
+#define xmt_oversize_pkts 0x36
+#define xmt_jumbo_pkts 0x37
+
+
+/* Driver definitions */
+
+#define PCI_VENDOR_ID_AMD 0x1022
+#define PCI_DEVICE_ID_AMD8111E_7462 0x7462
+
+#define MAX_UNITS 8 /* Maximum number of devices possible */
+
+#define NUM_TX_BUFFERS 32 /* Number of transmit buffers */
+#define NUM_RX_BUFFERS 32 /* Number of receive buffers */
+
+#define TX_BUFF_MOD_MASK 31 /* (NUM_TX_BUFFERS -1) */
+#define RX_BUFF_MOD_MASK 31 /* (NUM_RX_BUFFERS -1) */
+
+#define NUM_TX_RING_DR 32
+#define NUM_RX_RING_DR 32
+
+#define TX_RING_DR_MOD_MASK 31 /* (NUM_TX_RING_DR -1) */
+#define RX_RING_DR_MOD_MASK 31 /* (NUM_RX_RING_DR -1) */
+
+#define MAX_FILTER_SIZE 64 /* Maximum multicast address */
+#define AMD8111E_MIN_MTU 60
+#define AMD8111E_MAX_MTU 9000
+
+#define PKT_BUFF_SZ 1536
+#define MIN_PKT_LEN 60
+#define ETH_ADDR_LEN 6
+
+#define AMD8111E_TX_TIMEOUT (3 * HZ)/* 3 sec */
+#define SOFT_TIMER_FREQ 0xBEBC /* 0.5 sec */
+#define DELAY_TIMER_CONV 50 /* msec to 10 usec conversion.
+ Only 500 usec resolution */
+#define OPTION_VLAN_ENABLE 0x0001
+#define OPTION_JUMBO_ENABLE 0x0002
+#define OPTION_MULTICAST_ENABLE 0x0004
+#define OPTION_WOL_ENABLE 0x0008
+#define OPTION_WAKE_MAGIC_ENABLE 0x0010
+#define OPTION_WAKE_PHY_ENABLE 0x0020
+#define OPTION_INTR_COAL_ENABLE 0x0040
+#define OPTION_DYN_IPG_ENABLE 0x0080
+
+#define PHY_REG_ADDR_MASK 0x1f
+
+/* ipg parameters */
+#define DEFAULT_IPG 0x60
+#define IFS1_DELTA 36
+#define IPG_CONVERGE_JIFFIES (HZ/2)
+#define IPG_STABLE_TIME 5
+#define MIN_IPG 96
+#define MAX_IPG 255
+#define IPG_STEP 16
+#define CSTATE 1
+#define SSTATE 2
+
+/* Assume contoller gets data 10 times the maximum processing time */
+#define REPEAT_CNT 10
+
+/* amd8111e decriptor flag definitions */
+typedef enum {
+
+ OWN_BIT = (1 << 15),
+ ADD_FCS_BIT = (1 << 13),
+ LTINT_BIT = (1 << 12),
+ STP_BIT = (1 << 9),
+ ENP_BIT = (1 << 8),
+ KILL_BIT = (1 << 6),
+ TCC_VLAN_INSERT = (1 << 1),
+ TCC_VLAN_REPLACE = (1 << 1) |( 1<< 0),
+
+}TX_FLAG_BITS;
+
+typedef enum {
+ ERR_BIT = (1 << 14),
+ FRAM_BIT = (1 << 13),
+ OFLO_BIT = (1 << 12),
+ CRC_BIT = (1 << 11),
+ PAM_BIT = (1 << 6),
+ LAFM_BIT = (1 << 5),
+ BAM_BIT = (1 << 4),
+ TT_VLAN_TAGGED = (1 << 3) |(1 << 2),/* 0x000 */
+ TT_PRTY_TAGGED = (1 << 3),/* 0x0008 */
+
+}RX_FLAG_BITS;
+
+#define RESET_RX_FLAGS 0x0000
+#define TT_MASK 0x000c
+#define TCC_MASK 0x0003
+
+/* driver ioctl parameters */
+#define AMD8111E_REG_DUMP_LEN 13*sizeof(u32)
+
+/* amd8111e desriptor format */
+
+struct amd8111e_tx_dr{
+
+ __le16 buff_count; /* Size of the buffer pointed by this descriptor */
+
+ __le16 tx_flags;
+
+ __le16 tag_ctrl_info;
+
+ __le16 tag_ctrl_cmd;
+
+ __le32 buff_phy_addr;
+
+ __le32 reserved;
+};
+
+struct amd8111e_rx_dr{
+
+ __le32 reserved;
+
+ __le16 msg_count; /* Received message len */
+
+ __le16 tag_ctrl_info;
+
+ __le16 buff_count; /* Len of the buffer pointed by descriptor. */
+
+ __le16 rx_flags;
+
+ __le32 buff_phy_addr;
+
+};
+struct amd8111e_link_config{
+
+#define SPEED_INVALID 0xffff
+#define DUPLEX_INVALID 0xff
+#define AUTONEG_INVALID 0xff
+
+ unsigned long orig_phy_option;
+ u16 speed;
+ u8 duplex;
+ u8 autoneg;
+ u8 reserved; /* 32bit alignment */
+};
+
+enum coal_type{
+
+ NO_COALESCE,
+ LOW_COALESCE,
+ MEDIUM_COALESCE,
+ HIGH_COALESCE,
+
+};
+
+enum coal_mode{
+ RX_INTR_COAL,
+ TX_INTR_COAL,
+ DISABLE_COAL,
+ ENABLE_COAL,
+
+};
+#define MAX_TIMEOUT 40
+#define MAX_EVENT_COUNT 31
+struct amd8111e_coalesce_conf{
+
+ unsigned int rx_timeout;
+ unsigned int rx_event_count;
+ unsigned long rx_packets;
+ unsigned long rx_prev_packets;
+ unsigned long rx_bytes;
+ unsigned long rx_prev_bytes;
+ unsigned int rx_coal_type;
+
+ unsigned int tx_timeout;
+ unsigned int tx_event_count;
+ unsigned long tx_packets;
+ unsigned long tx_prev_packets;
+ unsigned long tx_bytes;
+ unsigned long tx_prev_bytes;
+ unsigned int tx_coal_type;
+
+};
+struct ipg_info{
+
+ unsigned int ipg_state;
+ unsigned int ipg;
+ unsigned int current_ipg;
+ unsigned int col_cnt;
+ unsigned int diff_col_cnt;
+ unsigned int timer_tick;
+ unsigned int prev_ipg;
+ struct timer_list ipg_timer;
+};
+
+struct amd8111e_priv{
+
+ struct amd8111e_tx_dr* tx_ring;
+ struct amd8111e_rx_dr* rx_ring;
+ dma_addr_t tx_ring_dma_addr; /* tx descriptor ring base address */
+ dma_addr_t rx_ring_dma_addr; /* rx descriptor ring base address */
+ const char *name;
+ struct pci_dev *pci_dev; /* Ptr to the associated pci_dev */
+ struct net_device* amd8111e_net_dev; /* ptr to associated net_device */
+ /* Transmit and recive skbs */
+ struct sk_buff *tx_skbuff[NUM_TX_BUFFERS];
+ struct sk_buff *rx_skbuff[NUM_RX_BUFFERS];
+ /* Transmit and receive dma mapped addr */
+ dma_addr_t tx_dma_addr[NUM_TX_BUFFERS];
+ dma_addr_t rx_dma_addr[NUM_RX_BUFFERS];
+ /* Reg memory mapped address */
+ void __iomem *mmio;
+
+ struct napi_struct napi;
+
+ spinlock_t lock; /* Guard lock */
+ unsigned long rx_idx, tx_idx; /* The next free ring entry */
+ unsigned long tx_complete_idx;
+ unsigned long tx_ring_complete_idx;
+ unsigned long tx_ring_idx;
+ unsigned int rx_buff_len; /* Buffer length of rx buffers */
+ int options; /* Options enabled/disabled for the device */
+
+ unsigned long ext_phy_option;
+ int ext_phy_addr;
+ u32 ext_phy_id;
+
+ struct amd8111e_link_config link_config;
+ int pm_cap;
+
+ struct net_device *next;
+ int mii;
+ struct mii_if_info mii_if;
+ char opened;
+ unsigned int drv_rx_errors;
+ struct amd8111e_coalesce_conf coal_conf;
+
+ struct ipg_info ipg_data;
+
+};
+
+/* kernel provided writeq does not write 64 bits into the amd8111e device register instead writes only higher 32bits data into lower 32bits of the register.
+BUG? */
+#define amd8111e_writeq(_UlData,_memMap) \
+ writel(*(u32*)(&_UlData), _memMap); \
+ writel(*(u32*)((u8*)(&_UlData)+4), _memMap+4)
+
+/* maps the external speed options to internal value */
+typedef enum {
+ SPEED_AUTONEG,
+ SPEED10_HALF,
+ SPEED10_FULL,
+ SPEED100_HALF,
+ SPEED100_FULL,
+}EXT_PHY_OPTION;
+
+static int card_idx;
+static int speed_duplex[MAX_UNITS] = { 0, };
+static int coalesce[MAX_UNITS] = {1,1,1,1,1,1,1,1};
+static int dynamic_ipg[MAX_UNITS] = {0,0,0,0,0,0,0,0};
+static unsigned int chip_version;
+
+#endif /* _AMD8111E_H */
+
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
new file mode 100644
index 000000000000..eb18e1fe65c8
--- /dev/null
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -0,0 +1,793 @@
+/*
+ * Amiga Linux/m68k Ariadne Ethernet Driver
+ *
+ * © Copyright 1995-2003 by Geert Uytterhoeven (geert@linux-m68k.org)
+ * Peter De Schrijver (p2@mind.be)
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This program is based on
+ *
+ * lance.c: An AMD LANCE ethernet driver for linux.
+ * Written 1993-94 by Donald Becker.
+ *
+ * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
+ * Advanced Micro Devices
+ * Publication #16907, Rev. B, Amendment/0, May 1994
+ *
+ * MC68230: Parallel Interface/Timer (PI/T)
+ * Motorola Semiconductors, December, 1983
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * The Ariadne is a Zorro-II board made by Village Tronic. It contains:
+ *
+ * - an Am79C960 PCnet-ISA Single-Chip Ethernet Controller with both
+ * 10BASE-2 (thin coax) and 10BASE-T (UTP) connectors
+ *
+ * - an MC68230 Parallel Interface/Timer configured as 2 parallel ports
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+/*#define DEBUG*/
+
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/zorro.h>
+#include <linux/bitops.h>
+
+#include <asm/amigaints.h>
+#include <asm/amigahw.h>
+#include <asm/irq.h>
+
+#include "ariadne.h"
+
+#ifdef ARIADNE_DEBUG
+int ariadne_debug = ARIADNE_DEBUG;
+#else
+int ariadne_debug = 1;
+#endif
+
+/* Macros to Fix Endianness problems */
+
+/* Swap the Bytes in a WORD */
+#define swapw(x) (((x >> 8) & 0x00ff) | ((x << 8) & 0xff00))
+/* Get the Low BYTE in a WORD */
+#define lowb(x) (x & 0xff)
+/* Get the Swapped High WORD in a LONG */
+#define swhighw(x) ((((x) >> 8) & 0xff00) | (((x) >> 24) & 0x00ff))
+/* Get the Swapped Low WORD in a LONG */
+#define swloww(x) ((((x) << 8) & 0xff00) | (((x) >> 8) & 0x00ff))
+
+/* Transmit/Receive Ring Definitions */
+
+#define TX_RING_SIZE 5
+#define RX_RING_SIZE 16
+
+#define PKT_BUF_SIZE 1520
+
+/* Private Device Data */
+
+struct ariadne_private {
+ volatile struct TDRE *tx_ring[TX_RING_SIZE];
+ volatile struct RDRE *rx_ring[RX_RING_SIZE];
+ volatile u_short *tx_buff[TX_RING_SIZE];
+ volatile u_short *rx_buff[RX_RING_SIZE];
+ int cur_tx, cur_rx; /* The next free ring entry */
+ int dirty_tx; /* The ring entries to be free()ed */
+ char tx_full;
+};
+
+/* Structure Created in the Ariadne's RAM Buffer */
+
+struct lancedata {
+ struct TDRE tx_ring[TX_RING_SIZE];
+ struct RDRE rx_ring[RX_RING_SIZE];
+ u_short tx_buff[TX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)];
+ u_short rx_buff[RX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)];
+};
+
+static void memcpyw(volatile u_short *dest, u_short *src, int len)
+{
+ while (len >= 2) {
+ *(dest++) = *(src++);
+ len -= 2;
+ }
+ if (len == 1)
+ *dest = (*(u_char *)src) << 8;
+}
+
+static void ariadne_init_ring(struct net_device *dev)
+{
+ struct ariadne_private *priv = netdev_priv(dev);
+ volatile struct lancedata *lancedata = (struct lancedata *)dev->mem_start;
+ int i;
+
+ netif_stop_queue(dev);
+
+ priv->tx_full = 0;
+ priv->cur_rx = priv->cur_tx = 0;
+ priv->dirty_tx = 0;
+
+ /* Set up TX Ring */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ volatile struct TDRE *t = &lancedata->tx_ring[i];
+ t->TMD0 = swloww(ARIADNE_RAM +
+ offsetof(struct lancedata, tx_buff[i]));
+ t->TMD1 = swhighw(ARIADNE_RAM +
+ offsetof(struct lancedata, tx_buff[i])) |
+ TF_STP | TF_ENP;
+ t->TMD2 = swapw((u_short)-PKT_BUF_SIZE);
+ t->TMD3 = 0;
+ priv->tx_ring[i] = &lancedata->tx_ring[i];
+ priv->tx_buff[i] = lancedata->tx_buff[i];
+ netdev_dbg(dev, "TX Entry %2d at %p, Buf at %p\n",
+ i, &lancedata->tx_ring[i], lancedata->tx_buff[i]);
+ }
+
+ /* Set up RX Ring */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ volatile struct RDRE *r = &lancedata->rx_ring[i];
+ r->RMD0 = swloww(ARIADNE_RAM +
+ offsetof(struct lancedata, rx_buff[i]));
+ r->RMD1 = swhighw(ARIADNE_RAM +
+ offsetof(struct lancedata, rx_buff[i])) |
+ RF_OWN;
+ r->RMD2 = swapw((u_short)-PKT_BUF_SIZE);
+ r->RMD3 = 0x0000;
+ priv->rx_ring[i] = &lancedata->rx_ring[i];
+ priv->rx_buff[i] = lancedata->rx_buff[i];
+ netdev_dbg(dev, "RX Entry %2d at %p, Buf at %p\n",
+ i, &lancedata->rx_ring[i], lancedata->rx_buff[i]);
+ }
+}
+
+static int ariadne_rx(struct net_device *dev)
+{
+ struct ariadne_private *priv = netdev_priv(dev);
+ int entry = priv->cur_rx % RX_RING_SIZE;
+ int i;
+
+ /* If we own the next entry, it's a new packet. Send it up */
+ while (!(lowb(priv->rx_ring[entry]->RMD1) & RF_OWN)) {
+ int status = lowb(priv->rx_ring[entry]->RMD1);
+
+ if (status != (RF_STP | RF_ENP)) { /* There was an error */
+ /* There is a tricky error noted by
+ * John Murphy <murf@perftech.com> to Russ Nelson:
+ * Even with full-sized buffers it's possible for a
+ * jabber packet to use two buffers, with only the
+ * last correctly noting the error
+ */
+ /* Only count a general error at the end of a packet */
+ if (status & RF_ENP)
+ dev->stats.rx_errors++;
+ if (status & RF_FRAM)
+ dev->stats.rx_frame_errors++;
+ if (status & RF_OFLO)
+ dev->stats.rx_over_errors++;
+ if (status & RF_CRC)
+ dev->stats.rx_crc_errors++;
+ if (status & RF_BUFF)
+ dev->stats.rx_fifo_errors++;
+ priv->rx_ring[entry]->RMD1 &= 0xff00 | RF_STP | RF_ENP;
+ } else {
+ /* Malloc up new buffer, compatible with net-3 */
+ short pkt_len = swapw(priv->rx_ring[entry]->RMD3);
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len + 2);
+ if (skb == NULL) {
+ netdev_warn(dev, "Memory squeeze, deferring packet\n");
+ for (i = 0; i < RX_RING_SIZE; i++)
+ if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN)
+ break;
+
+ if (i > RX_RING_SIZE - 2) {
+ dev->stats.rx_dropped++;
+ priv->rx_ring[entry]->RMD1 |= RF_OWN;
+ priv->cur_rx++;
+ }
+ break;
+ }
+
+
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, pkt_len); /* Make room */
+ skb_copy_to_linear_data(skb,
+ (const void *)priv->rx_buff[entry],
+ pkt_len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netdev_dbg(dev, "RX pkt type 0x%04x from %pM to %pM data 0x%08x len %d\n",
+ ((u_short *)skb->data)[6],
+ skb->data + 6, skb->data,
+ (int)skb->data, (int)skb->len);
+
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+
+ priv->rx_ring[entry]->RMD1 |= RF_OWN;
+ entry = (++priv->cur_rx) % RX_RING_SIZE;
+ }
+
+ priv->cur_rx = priv->cur_rx % RX_RING_SIZE;
+
+ /* We should check that at least two ring entries are free.
+ * If not, we should free one and mark stats->rx_dropped++
+ */
+
+ return 0;
+}
+
+static irqreturn_t ariadne_interrupt(int irq, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+ struct ariadne_private *priv;
+ int csr0, boguscnt;
+ int handled = 0;
+
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+
+ if (!(lance->RDP & INTR)) /* Check if any interrupt has been */
+ return IRQ_NONE; /* generated by the board */
+
+ priv = netdev_priv(dev);
+
+ boguscnt = 10;
+ while ((csr0 = lance->RDP) & (ERR | RINT | TINT) && --boguscnt >= 0) {
+ /* Acknowledge all of the current interrupt sources ASAP */
+ lance->RDP = csr0 & ~(INEA | TDMD | STOP | STRT | INIT);
+
+#ifdef DEBUG
+ if (ariadne_debug > 5) {
+ netdev_dbg(dev, "interrupt csr0=%#02x new csr=%#02x [",
+ csr0, lance->RDP);
+ if (csr0 & INTR)
+ pr_cont(" INTR");
+ if (csr0 & INEA)
+ pr_cont(" INEA");
+ if (csr0 & RXON)
+ pr_cont(" RXON");
+ if (csr0 & TXON)
+ pr_cont(" TXON");
+ if (csr0 & TDMD)
+ pr_cont(" TDMD");
+ if (csr0 & STOP)
+ pr_cont(" STOP");
+ if (csr0 & STRT)
+ pr_cont(" STRT");
+ if (csr0 & INIT)
+ pr_cont(" INIT");
+ if (csr0 & ERR)
+ pr_cont(" ERR");
+ if (csr0 & BABL)
+ pr_cont(" BABL");
+ if (csr0 & CERR)
+ pr_cont(" CERR");
+ if (csr0 & MISS)
+ pr_cont(" MISS");
+ if (csr0 & MERR)
+ pr_cont(" MERR");
+ if (csr0 & RINT)
+ pr_cont(" RINT");
+ if (csr0 & TINT)
+ pr_cont(" TINT");
+ if (csr0 & IDON)
+ pr_cont(" IDON");
+ pr_cont(" ]\n");
+ }
+#endif
+
+ if (csr0 & RINT) { /* Rx interrupt */
+ handled = 1;
+ ariadne_rx(dev);
+ }
+
+ if (csr0 & TINT) { /* Tx-done interrupt */
+ int dirty_tx = priv->dirty_tx;
+
+ handled = 1;
+ while (dirty_tx < priv->cur_tx) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int status = lowb(priv->tx_ring[entry]->TMD1);
+
+ if (status & TF_OWN)
+ break; /* It still hasn't been Txed */
+
+ priv->tx_ring[entry]->TMD1 &= 0xff00;
+
+ if (status & TF_ERR) {
+ /* There was an major error, log it */
+ int err_status = priv->tx_ring[entry]->TMD3;
+ dev->stats.tx_errors++;
+ if (err_status & EF_RTRY)
+ dev->stats.tx_aborted_errors++;
+ if (err_status & EF_LCAR)
+ dev->stats.tx_carrier_errors++;
+ if (err_status & EF_LCOL)
+ dev->stats.tx_window_errors++;
+ if (err_status & EF_UFLO) {
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ dev->stats.tx_fifo_errors++;
+ /* Remove this verbosity later! */
+ netdev_err(dev, "Tx FIFO error! Status %04x\n",
+ csr0);
+ /* Restart the chip */
+ lance->RDP = STRT;
+ }
+ } else {
+ if (status & (TF_MORE | TF_ONE))
+ dev->stats.collisions++;
+ dev->stats.tx_packets++;
+ }
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) {
+ netdev_err(dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n",
+ dirty_tx, priv->cur_tx,
+ priv->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (priv->tx_full && netif_queue_stopped(dev) &&
+ dirty_tx > priv->cur_tx - TX_RING_SIZE + 2) {
+ /* The ring is no longer full */
+ priv->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+
+ priv->dirty_tx = dirty_tx;
+ }
+
+ /* Log misc errors */
+ if (csr0 & BABL) {
+ handled = 1;
+ dev->stats.tx_errors++; /* Tx babble */
+ }
+ if (csr0 & MISS) {
+ handled = 1;
+ dev->stats.rx_errors++; /* Missed a Rx frame */
+ }
+ if (csr0 & MERR) {
+ handled = 1;
+ netdev_err(dev, "Bus master arbitration failure, status %04x\n",
+ csr0);
+ /* Restart the chip */
+ lance->RDP = STRT;
+ }
+ }
+
+ /* Clear any other interrupt, and set interrupt enable */
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = INEA | BABL | CERR | MISS | MERR | IDON;
+
+ if (ariadne_debug > 4)
+ netdev_dbg(dev, "exiting interrupt, csr%d=%#04x\n",
+ lance->RAP, lance->RDP);
+
+ return IRQ_RETVAL(handled);
+}
+
+static int ariadne_open(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+ u_short in;
+ u_long version;
+ int i;
+
+ /* Reset the LANCE */
+ in = lance->Reset;
+
+ /* Stop the LANCE */
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = STOP;
+
+ /* Check the LANCE version */
+ lance->RAP = CSR88; /* Chip ID */
+ version = swapw(lance->RDP);
+ lance->RAP = CSR89; /* Chip ID */
+ version |= swapw(lance->RDP) << 16;
+ if ((version & 0x00000fff) != 0x00000003) {
+ pr_warn("Couldn't find AMD Ethernet Chip\n");
+ return -EAGAIN;
+ }
+ if ((version & 0x0ffff000) != 0x00003000) {
+ pr_warn("Couldn't find Am79C960 (Wrong part number = %ld)\n",
+ (version & 0x0ffff000) >> 12);
+ return -EAGAIN;
+ }
+
+ netdev_dbg(dev, "Am79C960 (PCnet-ISA) Revision %ld\n",
+ (version & 0xf0000000) >> 28);
+
+ ariadne_init_ring(dev);
+
+ /* Miscellaneous Stuff */
+ lance->RAP = CSR3; /* Interrupt Masks and Deferral Control */
+ lance->RDP = 0x0000;
+ lance->RAP = CSR4; /* Test and Features Control */
+ lance->RDP = DPOLL | APAD_XMT | MFCOM | RCVCCOM | TXSTRTM | JABM;
+
+ /* Set the Multicast Table */
+ lance->RAP = CSR8; /* Logical Address Filter, LADRF[15:0] */
+ lance->RDP = 0x0000;
+ lance->RAP = CSR9; /* Logical Address Filter, LADRF[31:16] */
+ lance->RDP = 0x0000;
+ lance->RAP = CSR10; /* Logical Address Filter, LADRF[47:32] */
+ lance->RDP = 0x0000;
+ lance->RAP = CSR11; /* Logical Address Filter, LADRF[63:48] */
+ lance->RDP = 0x0000;
+
+ /* Set the Ethernet Hardware Address */
+ lance->RAP = CSR12; /* Physical Address Register, PADR[15:0] */
+ lance->RDP = ((u_short *)&dev->dev_addr[0])[0];
+ lance->RAP = CSR13; /* Physical Address Register, PADR[31:16] */
+ lance->RDP = ((u_short *)&dev->dev_addr[0])[1];
+ lance->RAP = CSR14; /* Physical Address Register, PADR[47:32] */
+ lance->RDP = ((u_short *)&dev->dev_addr[0])[2];
+
+ /* Set the Init Block Mode */
+ lance->RAP = CSR15; /* Mode Register */
+ lance->RDP = 0x0000;
+
+ /* Set the Transmit Descriptor Ring Pointer */
+ lance->RAP = CSR30; /* Base Address of Transmit Ring */
+ lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, tx_ring));
+ lance->RAP = CSR31; /* Base Address of transmit Ring */
+ lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, tx_ring));
+
+ /* Set the Receive Descriptor Ring Pointer */
+ lance->RAP = CSR24; /* Base Address of Receive Ring */
+ lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, rx_ring));
+ lance->RAP = CSR25; /* Base Address of Receive Ring */
+ lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, rx_ring));
+
+ /* Set the Number of RX and TX Ring Entries */
+ lance->RAP = CSR76; /* Receive Ring Length */
+ lance->RDP = swapw(((u_short)-RX_RING_SIZE));
+ lance->RAP = CSR78; /* Transmit Ring Length */
+ lance->RDP = swapw(((u_short)-TX_RING_SIZE));
+
+ /* Enable Media Interface Port Auto Select (10BASE-2/10BASE-T) */
+ lance->RAP = ISACSR2; /* Miscellaneous Configuration */
+ lance->IDP = ASEL;
+
+ /* LED Control */
+ lance->RAP = ISACSR5; /* LED1 Status */
+ lance->IDP = PSE|XMTE;
+ lance->RAP = ISACSR6; /* LED2 Status */
+ lance->IDP = PSE|COLE;
+ lance->RAP = ISACSR7; /* LED3 Status */
+ lance->IDP = PSE|RCVE;
+
+ netif_start_queue(dev);
+
+ i = request_irq(IRQ_AMIGA_PORTS, ariadne_interrupt, IRQF_SHARED,
+ dev->name, dev);
+ if (i)
+ return i;
+
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = INEA | STRT;
+
+ return 0;
+}
+
+static int ariadne_close(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+
+ netif_stop_queue(dev);
+
+ lance->RAP = CSR112; /* Missed Frame Count */
+ dev->stats.rx_missed_errors = swapw(lance->RDP);
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+
+ if (ariadne_debug > 1) {
+ netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
+ lance->RDP);
+ netdev_dbg(dev, "%lu packets missed\n",
+ dev->stats.rx_missed_errors);
+ }
+
+ /* We stop the LANCE here -- it occasionally polls memory if we don't */
+ lance->RDP = STOP;
+
+ free_irq(IRQ_AMIGA_PORTS, dev);
+
+ return 0;
+}
+
+static inline void ariadne_reset(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = STOP;
+ ariadne_init_ring(dev);
+ lance->RDP = INEA | STRT;
+ netif_start_queue(dev);
+}
+
+static void ariadne_tx_timeout(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+
+ netdev_err(dev, "transmit timed out, status %04x, resetting\n",
+ lance->RDP);
+ ariadne_reset(dev);
+ netif_wake_queue(dev);
+}
+
+static netdev_tx_t ariadne_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ariadne_private *priv = netdev_priv(dev);
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+ int entry;
+ unsigned long flags;
+ int len = skb->len;
+
+#if 0
+ if (ariadne_debug > 3) {
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ netdev_dbg(dev, "%s: csr0 %04x\n", __func__, lance->RDP);
+ lance->RDP = 0x0000;
+ }
+#endif
+
+ /* FIXME: is the 79C960 new enough to do its own padding right ? */
+ if (skb->len < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ len = ETH_ZLEN;
+ }
+
+ /* Fill in a Tx ring entry */
+
+ netdev_dbg(dev, "TX pkt type 0x%04x from %pM to %pM data 0x%08x len %d\n",
+ ((u_short *)skb->data)[6],
+ skb->data + 6, skb->data,
+ (int)skb->data, (int)skb->len);
+
+ local_irq_save(flags);
+
+ entry = priv->cur_tx % TX_RING_SIZE;
+
+ /* Caution: the write order is important here, set the base address with
+ the "ownership" bits last */
+
+ priv->tx_ring[entry]->TMD2 = swapw((u_short)-skb->len);
+ priv->tx_ring[entry]->TMD3 = 0x0000;
+ memcpyw(priv->tx_buff[entry], (u_short *)skb->data, len);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_DEBUG, "tx_buff: ", DUMP_PREFIX_OFFSET, 16, 1,
+ (void *)priv->tx_buff[entry],
+ skb->len > 64 ? 64 : skb->len, true);
+#endif
+
+ priv->tx_ring[entry]->TMD1 = (priv->tx_ring[entry]->TMD1 & 0xff00)
+ | TF_OWN | TF_STP | TF_ENP;
+
+ dev_kfree_skb(skb);
+
+ priv->cur_tx++;
+ if ((priv->cur_tx >= TX_RING_SIZE) &&
+ (priv->dirty_tx >= TX_RING_SIZE)) {
+
+ netdev_dbg(dev, "*** Subtracting TX_RING_SIZE from cur_tx (%d) and dirty_tx (%d)\n",
+ priv->cur_tx, priv->dirty_tx);
+
+ priv->cur_tx -= TX_RING_SIZE;
+ priv->dirty_tx -= TX_RING_SIZE;
+ }
+ dev->stats.tx_bytes += len;
+
+ /* Trigger an immediate send poll */
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = INEA | TDMD;
+
+ if (lowb(priv->tx_ring[(entry + 1) % TX_RING_SIZE]->TMD1) != 0) {
+ netif_stop_queue(dev);
+ priv->tx_full = 1;
+ }
+ local_irq_restore(flags);
+
+ return NETDEV_TX_OK;
+}
+
+static struct net_device_stats *ariadne_get_stats(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+ short saved_addr;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ saved_addr = lance->RAP;
+ lance->RAP = CSR112; /* Missed Frame Count */
+ dev->stats.rx_missed_errors = swapw(lance->RDP);
+ lance->RAP = saved_addr;
+ local_irq_restore(flags);
+
+ return &dev->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ * num_addrs == -1 Promiscuous mode, receive all packets
+ * num_addrs == 0 Normal mode, clear multicast list
+ * num_addrs > 0 Multicast mode, receive normal and MC packets,
+ * and do best-effort filtering.
+ */
+static void set_multicast_list(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+
+ if (!netif_running(dev))
+ return;
+
+ netif_stop_queue(dev);
+
+ /* We take the simple way out and always enable promiscuous mode */
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = STOP; /* Temporarily stop the lance */
+ ariadne_init_ring(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ lance->RAP = CSR15; /* Mode Register */
+ lance->RDP = PROM; /* Set promiscuous mode */
+ } else {
+ short multicast_table[4];
+ int num_addrs = netdev_mc_count(dev);
+ int i;
+ /* We don't use the multicast table,
+ * but rely on upper-layer filtering
+ */
+ memset(multicast_table, (num_addrs == 0) ? 0 : -1,
+ sizeof(multicast_table));
+ for (i = 0; i < 4; i++) {
+ lance->RAP = CSR8 + (i << 8);
+ /* Logical Address Filter */
+ lance->RDP = swapw(multicast_table[i]);
+ }
+ lance->RAP = CSR15; /* Mode Register */
+ lance->RDP = 0x0000; /* Unset promiscuous mode */
+ }
+
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = INEA | STRT | IDON;/* Resume normal operation */
+
+ netif_wake_queue(dev);
+}
+
+
+static void __devexit ariadne_remove_one(struct zorro_dev *z)
+{
+ struct net_device *dev = zorro_get_drvdata(z);
+
+ unregister_netdev(dev);
+ release_mem_region(ZTWO_PADDR(dev->base_addr), sizeof(struct Am79C960));
+ release_mem_region(ZTWO_PADDR(dev->mem_start), ARIADNE_RAM_SIZE);
+ free_netdev(dev);
+}
+
+static struct zorro_device_id ariadne_zorro_tbl[] __devinitdata = {
+ { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(zorro, ariadne_zorro_tbl);
+
+static const struct net_device_ops ariadne_netdev_ops = {
+ .ndo_open = ariadne_open,
+ .ndo_stop = ariadne_close,
+ .ndo_start_xmit = ariadne_start_xmit,
+ .ndo_tx_timeout = ariadne_tx_timeout,
+ .ndo_get_stats = ariadne_get_stats,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int __devinit ariadne_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
+{
+ unsigned long board = z->resource.start;
+ unsigned long base_addr = board + ARIADNE_LANCE;
+ unsigned long mem_start = board + ARIADNE_RAM;
+ struct resource *r1, *r2;
+ struct net_device *dev;
+ struct ariadne_private *priv;
+ int err;
+
+ r1 = request_mem_region(base_addr, sizeof(struct Am79C960), "Am79C960");
+ if (!r1)
+ return -EBUSY;
+ r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM");
+ if (!r2) {
+ release_mem_region(base_addr, sizeof(struct Am79C960));
+ return -EBUSY;
+ }
+
+ dev = alloc_etherdev(sizeof(struct ariadne_private));
+ if (dev == NULL) {
+ release_mem_region(base_addr, sizeof(struct Am79C960));
+ release_mem_region(mem_start, ARIADNE_RAM_SIZE);
+ return -ENOMEM;
+ }
+
+ priv = netdev_priv(dev);
+
+ r1->name = dev->name;
+ r2->name = dev->name;
+
+ dev->dev_addr[0] = 0x00;
+ dev->dev_addr[1] = 0x60;
+ dev->dev_addr[2] = 0x30;
+ dev->dev_addr[3] = (z->rom.er_SerialNumber >> 16) & 0xff;
+ dev->dev_addr[4] = (z->rom.er_SerialNumber >> 8) & 0xff;
+ dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff;
+ dev->base_addr = ZTWO_VADDR(base_addr);
+ dev->mem_start = ZTWO_VADDR(mem_start);
+ dev->mem_end = dev->mem_start + ARIADNE_RAM_SIZE;
+
+ dev->netdev_ops = &ariadne_netdev_ops;
+ dev->watchdog_timeo = 5 * HZ;
+
+ err = register_netdev(dev);
+ if (err) {
+ release_mem_region(base_addr, sizeof(struct Am79C960));
+ release_mem_region(mem_start, ARIADNE_RAM_SIZE);
+ free_netdev(dev);
+ return err;
+ }
+ zorro_set_drvdata(z, dev);
+
+ netdev_info(dev, "Ariadne at 0x%08lx, Ethernet Address %pM\n",
+ board, dev->dev_addr);
+
+ return 0;
+}
+
+static struct zorro_driver ariadne_driver = {
+ .name = "ariadne",
+ .id_table = ariadne_zorro_tbl,
+ .probe = ariadne_init_one,
+ .remove = __devexit_p(ariadne_remove_one),
+};
+
+static int __init ariadne_init_module(void)
+{
+ return zorro_register_driver(&ariadne_driver);
+}
+
+static void __exit ariadne_cleanup_module(void)
+{
+ zorro_unregister_driver(&ariadne_driver);
+}
+
+module_init(ariadne_init_module);
+module_exit(ariadne_cleanup_module);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/ariadne.h b/drivers/net/ethernet/amd/ariadne.h
new file mode 100644
index 000000000000..727be5cdd1ea
--- /dev/null
+++ b/drivers/net/ethernet/amd/ariadne.h
@@ -0,0 +1,415 @@
+/*
+ * Amiga Linux/m68k Ariadne Ethernet Driver
+ *
+ * © Copyright 1995 by Geert Uytterhoeven (geert@linux-m68k.org)
+ * Peter De Schrijver
+ * (Peter.DeSchrijver@linux.cc.kuleuven.ac.be)
+ *
+ * ----------------------------------------------------------------------------------
+ *
+ * This program is based on
+ *
+ * lance.c: An AMD LANCE ethernet driver for linux.
+ * Written 1993-94 by Donald Becker.
+ *
+ * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
+ * Advanced Micro Devices
+ * Publication #16907, Rev. B, Amendment/0, May 1994
+ *
+ * MC68230: Parallel Interface/Timer (PI/T)
+ * Motorola Semiconductors, December, 1983
+ *
+ * ----------------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ *
+ * ----------------------------------------------------------------------------------
+ *
+ * The Ariadne is a Zorro-II board made by Village Tronic. It contains:
+ *
+ * - an Am79C960 PCnet-ISA Single-Chip Ethernet Controller with both
+ * 10BASE-2 (thin coax) and 10BASE-T (UTP) connectors
+ *
+ * - an MC68230 Parallel Interface/Timer configured as 2 parallel ports
+ */
+
+
+ /*
+ * Am79C960 PCnet-ISA
+ */
+
+struct Am79C960 {
+ volatile u_short AddressPROM[8];
+ /* IEEE Address PROM (Unused in the Ariadne) */
+ volatile u_short RDP; /* Register Data Port */
+ volatile u_short RAP; /* Register Address Port */
+ volatile u_short Reset; /* Reset Chip on Read Access */
+ volatile u_short IDP; /* ISACSR Data Port */
+};
+
+
+ /*
+ * Am79C960 Control and Status Registers
+ *
+ * These values are already swap()ed!!
+ *
+ * Only registers marked with a `-' are intended for network software
+ * access
+ */
+
+#define CSR0 0x0000 /* - PCnet-ISA Controller Status */
+#define CSR1 0x0100 /* - IADR[15:0] */
+#define CSR2 0x0200 /* - IADR[23:16] */
+#define CSR3 0x0300 /* - Interrupt Masks and Deferral Control */
+#define CSR4 0x0400 /* - Test and Features Control */
+#define CSR6 0x0600 /* RCV/XMT Descriptor Table Length */
+#define CSR8 0x0800 /* - Logical Address Filter, LADRF[15:0] */
+#define CSR9 0x0900 /* - Logical Address Filter, LADRF[31:16] */
+#define CSR10 0x0a00 /* - Logical Address Filter, LADRF[47:32] */
+#define CSR11 0x0b00 /* - Logical Address Filter, LADRF[63:48] */
+#define CSR12 0x0c00 /* - Physical Address Register, PADR[15:0] */
+#define CSR13 0x0d00 /* - Physical Address Register, PADR[31:16] */
+#define CSR14 0x0e00 /* - Physical Address Register, PADR[47:32] */
+#define CSR15 0x0f00 /* - Mode Register */
+#define CSR16 0x1000 /* Initialization Block Address Lower */
+#define CSR17 0x1100 /* Initialization Block Address Upper */
+#define CSR18 0x1200 /* Current Receive Buffer Address */
+#define CSR19 0x1300 /* Current Receive Buffer Address */
+#define CSR20 0x1400 /* Current Transmit Buffer Address */
+#define CSR21 0x1500 /* Current Transmit Buffer Address */
+#define CSR22 0x1600 /* Next Receive Buffer Address */
+#define CSR23 0x1700 /* Next Receive Buffer Address */
+#define CSR24 0x1800 /* - Base Address of Receive Ring */
+#define CSR25 0x1900 /* - Base Address of Receive Ring */
+#define CSR26 0x1a00 /* Next Receive Descriptor Address */
+#define CSR27 0x1b00 /* Next Receive Descriptor Address */
+#define CSR28 0x1c00 /* Current Receive Descriptor Address */
+#define CSR29 0x1d00 /* Current Receive Descriptor Address */
+#define CSR30 0x1e00 /* - Base Address of Transmit Ring */
+#define CSR31 0x1f00 /* - Base Address of transmit Ring */
+#define CSR32 0x2000 /* Next Transmit Descriptor Address */
+#define CSR33 0x2100 /* Next Transmit Descriptor Address */
+#define CSR34 0x2200 /* Current Transmit Descriptor Address */
+#define CSR35 0x2300 /* Current Transmit Descriptor Address */
+#define CSR36 0x2400 /* Next Next Receive Descriptor Address */
+#define CSR37 0x2500 /* Next Next Receive Descriptor Address */
+#define CSR38 0x2600 /* Next Next Transmit Descriptor Address */
+#define CSR39 0x2700 /* Next Next Transmit Descriptor Address */
+#define CSR40 0x2800 /* Current Receive Status and Byte Count */
+#define CSR41 0x2900 /* Current Receive Status and Byte Count */
+#define CSR42 0x2a00 /* Current Transmit Status and Byte Count */
+#define CSR43 0x2b00 /* Current Transmit Status and Byte Count */
+#define CSR44 0x2c00 /* Next Receive Status and Byte Count */
+#define CSR45 0x2d00 /* Next Receive Status and Byte Count */
+#define CSR46 0x2e00 /* Poll Time Counter */
+#define CSR47 0x2f00 /* Polling Interval */
+#define CSR48 0x3000 /* Temporary Storage */
+#define CSR49 0x3100 /* Temporary Storage */
+#define CSR50 0x3200 /* Temporary Storage */
+#define CSR51 0x3300 /* Temporary Storage */
+#define CSR52 0x3400 /* Temporary Storage */
+#define CSR53 0x3500 /* Temporary Storage */
+#define CSR54 0x3600 /* Temporary Storage */
+#define CSR55 0x3700 /* Temporary Storage */
+#define CSR56 0x3800 /* Temporary Storage */
+#define CSR57 0x3900 /* Temporary Storage */
+#define CSR58 0x3a00 /* Temporary Storage */
+#define CSR59 0x3b00 /* Temporary Storage */
+#define CSR60 0x3c00 /* Previous Transmit Descriptor Address */
+#define CSR61 0x3d00 /* Previous Transmit Descriptor Address */
+#define CSR62 0x3e00 /* Previous Transmit Status and Byte Count */
+#define CSR63 0x3f00 /* Previous Transmit Status and Byte Count */
+#define CSR64 0x4000 /* Next Transmit Buffer Address */
+#define CSR65 0x4100 /* Next Transmit Buffer Address */
+#define CSR66 0x4200 /* Next Transmit Status and Byte Count */
+#define CSR67 0x4300 /* Next Transmit Status and Byte Count */
+#define CSR68 0x4400 /* Transmit Status Temporary Storage */
+#define CSR69 0x4500 /* Transmit Status Temporary Storage */
+#define CSR70 0x4600 /* Temporary Storage */
+#define CSR71 0x4700 /* Temporary Storage */
+#define CSR72 0x4800 /* Receive Ring Counter */
+#define CSR74 0x4a00 /* Transmit Ring Counter */
+#define CSR76 0x4c00 /* - Receive Ring Length */
+#define CSR78 0x4e00 /* - Transmit Ring Length */
+#define CSR80 0x5000 /* - Burst and FIFO Threshold Control */
+#define CSR82 0x5200 /* - Bus Activity Timer */
+#define CSR84 0x5400 /* DMA Address */
+#define CSR85 0x5500 /* DMA Address */
+#define CSR86 0x5600 /* Buffer Byte Counter */
+#define CSR88 0x5800 /* - Chip ID */
+#define CSR89 0x5900 /* - Chip ID */
+#define CSR92 0x5c00 /* Ring Length Conversion */
+#define CSR94 0x5e00 /* Transmit Time Domain Reflectometry Count */
+#define CSR96 0x6000 /* Bus Interface Scratch Register 0 */
+#define CSR97 0x6100 /* Bus Interface Scratch Register 0 */
+#define CSR98 0x6200 /* Bus Interface Scratch Register 1 */
+#define CSR99 0x6300 /* Bus Interface Scratch Register 1 */
+#define CSR104 0x6800 /* SWAP */
+#define CSR105 0x6900 /* SWAP */
+#define CSR108 0x6c00 /* Buffer Management Scratch */
+#define CSR109 0x6d00 /* Buffer Management Scratch */
+#define CSR112 0x7000 /* - Missed Frame Count */
+#define CSR114 0x7200 /* - Receive Collision Count */
+#define CSR124 0x7c00 /* - Buffer Management Unit Test */
+
+
+ /*
+ * Am79C960 ISA Control and Status Registers
+ *
+ * These values are already swap()ed!!
+ */
+
+#define ISACSR0 0x0000 /* Master Mode Read Active */
+#define ISACSR1 0x0100 /* Master Mode Write Active */
+#define ISACSR2 0x0200 /* Miscellaneous Configuration */
+#define ISACSR4 0x0400 /* LED0 Status (Link Integrity) */
+#define ISACSR5 0x0500 /* LED1 Status */
+#define ISACSR6 0x0600 /* LED2 Status */
+#define ISACSR7 0x0700 /* LED3 Status */
+
+
+ /*
+ * Bit definitions for CSR0 (PCnet-ISA Controller Status)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define ERR 0x0080 /* Error */
+#define BABL 0x0040 /* Babble: Transmitted too many bits */
+#define CERR 0x0020 /* No Heartbeat (10BASE-T) */
+#define MISS 0x0010 /* Missed Frame */
+#define MERR 0x0008 /* Memory Error */
+#define RINT 0x0004 /* Receive Interrupt */
+#define TINT 0x0002 /* Transmit Interrupt */
+#define IDON 0x0001 /* Initialization Done */
+#define INTR 0x8000 /* Interrupt Flag */
+#define INEA 0x4000 /* Interrupt Enable */
+#define RXON 0x2000 /* Receive On */
+#define TXON 0x1000 /* Transmit On */
+#define TDMD 0x0800 /* Transmit Demand */
+#define STOP 0x0400 /* Stop */
+#define STRT 0x0200 /* Start */
+#define INIT 0x0100 /* Initialize */
+
+
+ /*
+ * Bit definitions for CSR3 (Interrupt Masks and Deferral Control)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define BABLM 0x0040 /* Babble Mask */
+#define MISSM 0x0010 /* Missed Frame Mask */
+#define MERRM 0x0008 /* Memory Error Mask */
+#define RINTM 0x0004 /* Receive Interrupt Mask */
+#define TINTM 0x0002 /* Transmit Interrupt Mask */
+#define IDONM 0x0001 /* Initialization Done Mask */
+#define DXMT2PD 0x1000 /* Disable Transmit Two Part Deferral */
+#define EMBA 0x0800 /* Enable Modified Back-off Algorithm */
+
+
+ /*
+ * Bit definitions for CSR4 (Test and Features Control)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define ENTST 0x0080 /* Enable Test Mode */
+#define DMAPLUS 0x0040 /* Disable Burst Transaction Counter */
+#define TIMER 0x0020 /* Timer Enable Register */
+#define DPOLL 0x0010 /* Disable Transmit Polling */
+#define APAD_XMT 0x0008 /* Auto Pad Transmit */
+#define ASTRP_RCV 0x0004 /* Auto Pad Stripping */
+#define MFCO 0x0002 /* Missed Frame Counter Overflow Interrupt */
+#define MFCOM 0x0001 /* Missed Frame Counter Overflow Mask */
+#define RCVCCO 0x2000 /* Receive Collision Counter Overflow Interrupt */
+#define RCVCCOM 0x1000 /* Receive Collision Counter Overflow Mask */
+#define TXSTRT 0x0800 /* Transmit Start Status */
+#define TXSTRTM 0x0400 /* Transmit Start Mask */
+#define JAB 0x0200 /* Jabber Error */
+#define JABM 0x0100 /* Jabber Error Mask */
+
+
+ /*
+ * Bit definitions for CSR15 (Mode Register)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define PROM 0x0080 /* Promiscuous Mode */
+#define DRCVBC 0x0040 /* Disable Receive Broadcast */
+#define DRCVPA 0x0020 /* Disable Receive Physical Address */
+#define DLNKTST 0x0010 /* Disable Link Status */
+#define DAPC 0x0008 /* Disable Automatic Polarity Correction */
+#define MENDECL 0x0004 /* MENDEC Loopback Mode */
+#define LRTTSEL 0x0002 /* Low Receive Threshold/Transmit Mode Select */
+#define PORTSEL1 0x0001 /* Port Select Bits */
+#define PORTSEL2 0x8000 /* Port Select Bits */
+#define INTL 0x4000 /* Internal Loopback */
+#define DRTY 0x2000 /* Disable Retry */
+#define FCOLL 0x1000 /* Force Collision */
+#define DXMTFCS 0x0800 /* Disable Transmit CRC */
+#define LOOP 0x0400 /* Loopback Enable */
+#define DTX 0x0200 /* Disable Transmitter */
+#define DRX 0x0100 /* Disable Receiver */
+
+
+ /*
+ * Bit definitions for ISACSR2 (Miscellaneous Configuration)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define ASEL 0x0200 /* Media Interface Port Auto Select */
+
+
+ /*
+ * Bit definitions for ISACSR5-7 (LED1-3 Status)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define LEDOUT 0x0080 /* Current LED Status */
+#define PSE 0x8000 /* Pulse Stretcher Enable */
+#define XMTE 0x1000 /* Enable Transmit Status Signal */
+#define RVPOLE 0x0800 /* Enable Receive Polarity Signal */
+#define RCVE 0x0400 /* Enable Receive Status Signal */
+#define JABE 0x0200 /* Enable Jabber Signal */
+#define COLE 0x0100 /* Enable Collision Signal */
+
+
+ /*
+ * Receive Descriptor Ring Entry
+ */
+
+struct RDRE {
+ volatile u_short RMD0; /* LADR[15:0] */
+ volatile u_short RMD1; /* HADR[23:16] | Receive Flags */
+ volatile u_short RMD2; /* Buffer Byte Count (two's complement) */
+ volatile u_short RMD3; /* Message Byte Count */
+};
+
+
+ /*
+ * Transmit Descriptor Ring Entry
+ */
+
+struct TDRE {
+ volatile u_short TMD0; /* LADR[15:0] */
+ volatile u_short TMD1; /* HADR[23:16] | Transmit Flags */
+ volatile u_short TMD2; /* Buffer Byte Count (two's complement) */
+ volatile u_short TMD3; /* Error Flags */
+};
+
+
+ /*
+ * Receive Flags
+ */
+
+#define RF_OWN 0x0080 /* PCnet-ISA controller owns the descriptor */
+#define RF_ERR 0x0040 /* Error */
+#define RF_FRAM 0x0020 /* Framing Error */
+#define RF_OFLO 0x0010 /* Overflow Error */
+#define RF_CRC 0x0008 /* CRC Error */
+#define RF_BUFF 0x0004 /* Buffer Error */
+#define RF_STP 0x0002 /* Start of Packet */
+#define RF_ENP 0x0001 /* End of Packet */
+
+
+ /*
+ * Transmit Flags
+ */
+
+#define TF_OWN 0x0080 /* PCnet-ISA controller owns the descriptor */
+#define TF_ERR 0x0040 /* Error */
+#define TF_ADD_FCS 0x0020 /* Controls FCS Generation */
+#define TF_MORE 0x0010 /* More than one retry needed */
+#define TF_ONE 0x0008 /* One retry needed */
+#define TF_DEF 0x0004 /* Deferred */
+#define TF_STP 0x0002 /* Start of Packet */
+#define TF_ENP 0x0001 /* End of Packet */
+
+
+ /*
+ * Error Flags
+ */
+
+#define EF_BUFF 0x0080 /* Buffer Error */
+#define EF_UFLO 0x0040 /* Underflow Error */
+#define EF_LCOL 0x0010 /* Late Collision */
+#define EF_LCAR 0x0008 /* Loss of Carrier */
+#define EF_RTRY 0x0004 /* Retry Error */
+#define EF_TDR 0xff03 /* Time Domain Reflectometry */
+
+
+
+ /*
+ * MC68230 Parallel Interface/Timer
+ */
+
+struct MC68230 {
+ volatile u_char PGCR; /* Port General Control Register */
+ u_char Pad1[1];
+ volatile u_char PSRR; /* Port Service Request Register */
+ u_char Pad2[1];
+ volatile u_char PADDR; /* Port A Data Direction Register */
+ u_char Pad3[1];
+ volatile u_char PBDDR; /* Port B Data Direction Register */
+ u_char Pad4[1];
+ volatile u_char PCDDR; /* Port C Data Direction Register */
+ u_char Pad5[1];
+ volatile u_char PIVR; /* Port Interrupt Vector Register */
+ u_char Pad6[1];
+ volatile u_char PACR; /* Port A Control Register */
+ u_char Pad7[1];
+ volatile u_char PBCR; /* Port B Control Register */
+ u_char Pad8[1];
+ volatile u_char PADR; /* Port A Data Register */
+ u_char Pad9[1];
+ volatile u_char PBDR; /* Port B Data Register */
+ u_char Pad10[1];
+ volatile u_char PAAR; /* Port A Alternate Register */
+ u_char Pad11[1];
+ volatile u_char PBAR; /* Port B Alternate Register */
+ u_char Pad12[1];
+ volatile u_char PCDR; /* Port C Data Register */
+ u_char Pad13[1];
+ volatile u_char PSR; /* Port Status Register */
+ u_char Pad14[5];
+ volatile u_char TCR; /* Timer Control Register */
+ u_char Pad15[1];
+ volatile u_char TIVR; /* Timer Interrupt Vector Register */
+ u_char Pad16[3];
+ volatile u_char CPRH; /* Counter Preload Register (High) */
+ u_char Pad17[1];
+ volatile u_char CPRM; /* Counter Preload Register (Mid) */
+ u_char Pad18[1];
+ volatile u_char CPRL; /* Counter Preload Register (Low) */
+ u_char Pad19[3];
+ volatile u_char CNTRH; /* Count Register (High) */
+ u_char Pad20[1];
+ volatile u_char CNTRM; /* Count Register (Mid) */
+ u_char Pad21[1];
+ volatile u_char CNTRL; /* Count Register (Low) */
+ u_char Pad22[1];
+ volatile u_char TSR; /* Timer Status Register */
+ u_char Pad23[11];
+};
+
+
+ /*
+ * Ariadne Expansion Board Structure
+ */
+
+#define ARIADNE_LANCE 0x360
+
+#define ARIADNE_PIT 0x1000
+
+#define ARIADNE_BOOTPROM 0x4000 /* I guess it's here :-) */
+#define ARIADNE_BOOTPROM_SIZE 0x4000
+
+#define ARIADNE_RAM 0x8000 /* Always access WORDs!! */
+#define ARIADNE_RAM_SIZE 0x8000
+
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
new file mode 100644
index 000000000000..15bfa28d6c53
--- /dev/null
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -0,0 +1,1176 @@
+/* atarilance.c: Ethernet driver for VME Lance cards on the Atari */
+/*
+ Written 1995/96 by Roman Hodek (Roman.Hodek@informatik.uni-erlangen.de)
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ This drivers was written with the following sources of reference:
+ - The driver for the Riebl Lance card by the TU Vienna.
+ - The modified TUW driver for PAM's VME cards
+ - The PC-Linux driver for Lance cards (but this is for bus master
+ cards, not the shared memory ones)
+ - The Amiga Ariadne driver
+
+ v1.0: (in 1.2.13pl4/0.9.13)
+ Initial version
+ v1.1: (in 1.2.13pl5)
+ more comments
+ deleted some debugging stuff
+ optimized register access (keep AREG pointing to CSR0)
+ following AMD, CSR0_STRT should be set only after IDON is detected
+ use memcpy() for data transfers, that also employs long word moves
+ better probe procedure for 24-bit systems
+ non-VME-RieblCards need extra delays in memcpy
+ must also do write test, since 0xfxe00000 may hit ROM
+ use 8/32 tx/rx buffers, which should give better NFS performance;
+ this is made possible by shifting the last packet buffer after the
+ RieblCard reserved area
+ v1.2: (in 1.2.13pl8)
+ again fixed probing for the Falcon; 0xfe01000 hits phys. 0x00010000
+ and thus RAM, in case of no Lance found all memory contents have to
+ be restored!
+ Now possible to compile as module.
+ v1.3: 03/30/96 Jes Sorensen, Roman (in 1.3)
+ Several little 1.3 adaptions
+ When the lance is stopped it jumps back into little-endian
+ mode. It is therefore necessary to put it back where it
+ belongs, in big endian mode, in order to make things work.
+ This might be the reason why multicast-mode didn't work
+ before, but I'm not able to test it as I only got an Amiga
+ (we had similar problems with the A2065 driver).
+
+*/
+
+static char version[] = "atarilance.c: v1.3 04/04/96 "
+ "Roman.Hodek@informatik.uni-erlangen.de\n";
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/atarihw.h>
+#include <asm/atariints.h>
+#include <asm/io.h>
+
+/* Debug level:
+ * 0 = silent, print only serious errors
+ * 1 = normal, print error messages
+ * 2 = debug, print debug infos
+ * 3 = debug, print even more debug infos (packet data)
+ */
+
+#define LANCE_DEBUG 1
+
+#ifdef LANCE_DEBUG
+static int lance_debug = LANCE_DEBUG;
+#else
+static int lance_debug = 1;
+#endif
+module_param(lance_debug, int, 0);
+MODULE_PARM_DESC(lance_debug, "atarilance debug level (0-3)");
+MODULE_LICENSE("GPL");
+
+/* Print debug messages on probing? */
+#undef LANCE_DEBUG_PROBE
+
+#define DPRINTK(n,a) \
+ do { \
+ if (lance_debug >= n) \
+ printk a; \
+ } while( 0 )
+
+#ifdef LANCE_DEBUG_PROBE
+# define PROBE_PRINT(a) printk a
+#else
+# define PROBE_PRINT(a)
+#endif
+
+/* These define the number of Rx and Tx buffers as log2. (Only powers
+ * of two are valid)
+ * Much more rx buffers (32) are reserved than tx buffers (8), since receiving
+ * is more time critical then sending and packets may have to remain in the
+ * board's memory when main memory is low.
+ */
+
+#define TX_LOG_RING_SIZE 3
+#define RX_LOG_RING_SIZE 5
+
+/* These are the derived values */
+
+#define TX_RING_SIZE (1 << TX_LOG_RING_SIZE)
+#define TX_RING_LEN_BITS (TX_LOG_RING_SIZE << 5)
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+
+#define RX_RING_SIZE (1 << RX_LOG_RING_SIZE)
+#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5)
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+
+#define TX_TIMEOUT (HZ/5)
+
+/* The LANCE Rx and Tx ring descriptors. */
+struct lance_rx_head {
+ unsigned short base; /* Low word of base addr */
+ volatile unsigned char flag;
+ unsigned char base_hi; /* High word of base addr (unused) */
+ short buf_length; /* This length is 2s complement! */
+ volatile short msg_length; /* This length is "normal". */
+};
+
+struct lance_tx_head {
+ unsigned short base; /* Low word of base addr */
+ volatile unsigned char flag;
+ unsigned char base_hi; /* High word of base addr (unused) */
+ short length; /* Length is 2s complement! */
+ volatile short misc;
+};
+
+struct ringdesc {
+ unsigned short adr_lo; /* Low 16 bits of address */
+ unsigned char len; /* Length bits */
+ unsigned char adr_hi; /* High 8 bits of address (unused) */
+};
+
+/* The LANCE initialization block, described in databook. */
+struct lance_init_block {
+ unsigned short mode; /* Pre-set mode */
+ unsigned char hwaddr[6]; /* Physical ethernet address */
+ unsigned filter[2]; /* Multicast filter (unused). */
+ /* Receive and transmit ring base, along with length bits. */
+ struct ringdesc rx_ring;
+ struct ringdesc tx_ring;
+};
+
+/* The whole layout of the Lance shared memory */
+struct lance_memory {
+ struct lance_init_block init;
+ struct lance_tx_head tx_head[TX_RING_SIZE];
+ struct lance_rx_head rx_head[RX_RING_SIZE];
+ char packet_area[0]; /* packet data follow after the
+ * init block and the ring
+ * descriptors and are located
+ * at runtime */
+};
+
+/* RieblCard specifics:
+ * The original TOS driver for these cards reserves the area from offset
+ * 0xee70 to 0xeebb for storing configuration data. Of interest to us is the
+ * Ethernet address there, and the magic for verifying the data's validity.
+ * The reserved area isn't touch by packet buffers. Furthermore, offset 0xfffe
+ * is reserved for the interrupt vector number.
+ */
+#define RIEBL_RSVD_START 0xee70
+#define RIEBL_RSVD_END 0xeec0
+#define RIEBL_MAGIC 0x09051990
+#define RIEBL_MAGIC_ADDR ((unsigned long *)(((char *)MEM) + 0xee8a))
+#define RIEBL_HWADDR_ADDR ((unsigned char *)(((char *)MEM) + 0xee8e))
+#define RIEBL_IVEC_ADDR ((unsigned short *)(((char *)MEM) + 0xfffe))
+
+/* This is a default address for the old RieblCards without a battery
+ * that have no ethernet address at boot time. 00:00:36:04 is the
+ * prefix for Riebl cards, the 00:00 at the end is arbitrary.
+ */
+
+static unsigned char OldRieblDefHwaddr[6] = {
+ 0x00, 0x00, 0x36, 0x04, 0x00, 0x00
+};
+
+
+/* I/O registers of the Lance chip */
+
+struct lance_ioreg {
+/* base+0x0 */ volatile unsigned short data;
+/* base+0x2 */ volatile unsigned short addr;
+ unsigned char _dummy1[3];
+/* base+0x7 */ volatile unsigned char ivec;
+ unsigned char _dummy2[5];
+/* base+0xd */ volatile unsigned char eeprom;
+ unsigned char _dummy3;
+/* base+0xf */ volatile unsigned char mem;
+};
+
+/* Types of boards this driver supports */
+
+enum lance_type {
+ OLD_RIEBL, /* old Riebl card without battery */
+ NEW_RIEBL, /* new Riebl card with battery */
+ PAM_CARD /* PAM card with EEPROM */
+};
+
+static char *lance_names[] = {
+ "Riebl-Card (without battery)",
+ "Riebl-Card (with battery)",
+ "PAM intern card"
+};
+
+/* The driver's private device structure */
+
+struct lance_private {
+ enum lance_type cardtype;
+ struct lance_ioreg *iobase;
+ struct lance_memory *mem;
+ int cur_rx, cur_tx; /* The next free ring entry */
+ int dirty_tx; /* Ring entries to be freed. */
+ /* copy function */
+ void *(*memcpy_f)( void *, const void *, size_t );
+/* This must be long for set_bit() */
+ long tx_full;
+ spinlock_t devlock;
+};
+
+/* I/O register access macros */
+
+#define MEM lp->mem
+#define DREG IO->data
+#define AREG IO->addr
+#define REGA(a) (*( AREG = (a), &DREG ))
+
+/* Definitions for packet buffer access: */
+#define PKT_BUF_SZ 1544
+/* Get the address of a packet buffer corresponding to a given buffer head */
+#define PKTBUF_ADDR(head) (((unsigned char *)(MEM)) + (head)->base)
+
+/* Possible memory/IO addresses for probing */
+
+static struct lance_addr {
+ unsigned long memaddr;
+ unsigned long ioaddr;
+ int slow_flag;
+} lance_addr_list[] = {
+ { 0xfe010000, 0xfe00fff0, 0 }, /* RieblCard VME in TT */
+ { 0xffc10000, 0xffc0fff0, 0 }, /* RieblCard VME in MegaSTE
+ (highest byte stripped) */
+ { 0xffe00000, 0xffff7000, 1 }, /* RieblCard in ST
+ (highest byte stripped) */
+ { 0xffd00000, 0xffff7000, 1 }, /* RieblCard in ST with hw modif. to
+ avoid conflict with ROM
+ (highest byte stripped) */
+ { 0xffcf0000, 0xffcffff0, 0 }, /* PAMCard VME in TT and MSTE
+ (highest byte stripped) */
+ { 0xfecf0000, 0xfecffff0, 0 }, /* Rhotron's PAMCard VME in TT and MSTE
+ (highest byte stripped) */
+};
+
+#define N_LANCE_ADDR ARRAY_SIZE(lance_addr_list)
+
+
+/* Definitions for the Lance */
+
+/* tx_head flags */
+#define TMD1_ENP 0x01 /* end of packet */
+#define TMD1_STP 0x02 /* start of packet */
+#define TMD1_DEF 0x04 /* deferred */
+#define TMD1_ONE 0x08 /* one retry needed */
+#define TMD1_MORE 0x10 /* more than one retry needed */
+#define TMD1_ERR 0x40 /* error summary */
+#define TMD1_OWN 0x80 /* ownership (set: chip owns) */
+
+#define TMD1_OWN_CHIP TMD1_OWN
+#define TMD1_OWN_HOST 0
+
+/* tx_head misc field */
+#define TMD3_TDR 0x03FF /* Time Domain Reflectometry counter */
+#define TMD3_RTRY 0x0400 /* failed after 16 retries */
+#define TMD3_LCAR 0x0800 /* carrier lost */
+#define TMD3_LCOL 0x1000 /* late collision */
+#define TMD3_UFLO 0x4000 /* underflow (late memory) */
+#define TMD3_BUFF 0x8000 /* buffering error (no ENP) */
+
+/* rx_head flags */
+#define RMD1_ENP 0x01 /* end of packet */
+#define RMD1_STP 0x02 /* start of packet */
+#define RMD1_BUFF 0x04 /* buffer error */
+#define RMD1_CRC 0x08 /* CRC error */
+#define RMD1_OFLO 0x10 /* overflow */
+#define RMD1_FRAM 0x20 /* framing error */
+#define RMD1_ERR 0x40 /* error summary */
+#define RMD1_OWN 0x80 /* ownership (set: ship owns) */
+
+#define RMD1_OWN_CHIP RMD1_OWN
+#define RMD1_OWN_HOST 0
+
+/* register names */
+#define CSR0 0 /* mode/status */
+#define CSR1 1 /* init block addr (low) */
+#define CSR2 2 /* init block addr (high) */
+#define CSR3 3 /* misc */
+#define CSR8 8 /* address filter */
+#define CSR15 15 /* promiscuous mode */
+
+/* CSR0 */
+/* (R=readable, W=writeable, S=set on write, C=clear on write) */
+#define CSR0_INIT 0x0001 /* initialize (RS) */
+#define CSR0_STRT 0x0002 /* start (RS) */
+#define CSR0_STOP 0x0004 /* stop (RS) */
+#define CSR0_TDMD 0x0008 /* transmit demand (RS) */
+#define CSR0_TXON 0x0010 /* transmitter on (R) */
+#define CSR0_RXON 0x0020 /* receiver on (R) */
+#define CSR0_INEA 0x0040 /* interrupt enable (RW) */
+#define CSR0_INTR 0x0080 /* interrupt active (R) */
+#define CSR0_IDON 0x0100 /* initialization done (RC) */
+#define CSR0_TINT 0x0200 /* transmitter interrupt (RC) */
+#define CSR0_RINT 0x0400 /* receiver interrupt (RC) */
+#define CSR0_MERR 0x0800 /* memory error (RC) */
+#define CSR0_MISS 0x1000 /* missed frame (RC) */
+#define CSR0_CERR 0x2000 /* carrier error (no heartbeat :-) (RC) */
+#define CSR0_BABL 0x4000 /* babble: tx-ed too many bits (RC) */
+#define CSR0_ERR 0x8000 /* error (RC) */
+
+/* CSR3 */
+#define CSR3_BCON 0x0001 /* byte control */
+#define CSR3_ACON 0x0002 /* ALE control */
+#define CSR3_BSWP 0x0004 /* byte swap (1=big endian) */
+
+
+
+/***************************** Prototypes *****************************/
+
+static unsigned long lance_probe1( struct net_device *dev, struct lance_addr
+ *init_rec );
+static int lance_open( struct net_device *dev );
+static void lance_init_ring( struct net_device *dev );
+static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev );
+static irqreturn_t lance_interrupt( int irq, void *dev_id );
+static int lance_rx( struct net_device *dev );
+static int lance_close( struct net_device *dev );
+static void set_multicast_list( struct net_device *dev );
+static int lance_set_mac_address( struct net_device *dev, void *addr );
+static void lance_tx_timeout (struct net_device *dev);
+
+/************************* End of Prototypes **************************/
+
+
+
+
+
+static void *slow_memcpy( void *dst, const void *src, size_t len )
+
+{ char *cto = dst;
+ const char *cfrom = src;
+
+ while( len-- ) {
+ *cto++ = *cfrom++;
+ MFPDELAY();
+ }
+ return dst;
+}
+
+
+struct net_device * __init atarilance_probe(int unit)
+{
+ int i;
+ static int found;
+ struct net_device *dev;
+ int err = -ENODEV;
+
+ if (!MACH_IS_ATARI || found)
+ /* Assume there's only one board possible... That seems true, since
+ * the Riebl/PAM board's address cannot be changed. */
+ return ERR_PTR(-ENODEV);
+
+ dev = alloc_etherdev(sizeof(struct lance_private));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ for( i = 0; i < N_LANCE_ADDR; ++i ) {
+ if (lance_probe1( dev, &lance_addr_list[i] )) {
+ found = 1;
+ err = register_netdev(dev);
+ if (!err)
+ return dev;
+ free_irq(dev->irq, dev);
+ break;
+ }
+ }
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+
+/* Derived from hwreg_present() in atari/config.c: */
+
+static noinline int __init addr_accessible(volatile void *regp, int wordflag,
+ int writeflag)
+{
+ int ret;
+ unsigned long flags;
+ long *vbr, save_berr;
+
+ local_irq_save(flags);
+
+ __asm__ __volatile__ ( "movec %/vbr,%0" : "=r" (vbr) : );
+ save_berr = vbr[2];
+
+ __asm__ __volatile__
+ ( "movel %/sp,%/d1\n\t"
+ "movel #Lberr,%2@\n\t"
+ "moveq #0,%0\n\t"
+ "tstl %3\n\t"
+ "bne 1f\n\t"
+ "moveb %1@,%/d0\n\t"
+ "nop \n\t"
+ "bra 2f\n"
+"1: movew %1@,%/d0\n\t"
+ "nop \n"
+"2: tstl %4\n\t"
+ "beq 2f\n\t"
+ "tstl %3\n\t"
+ "bne 1f\n\t"
+ "clrb %1@\n\t"
+ "nop \n\t"
+ "moveb %/d0,%1@\n\t"
+ "nop \n\t"
+ "bra 2f\n"
+"1: clrw %1@\n\t"
+ "nop \n\t"
+ "movew %/d0,%1@\n\t"
+ "nop \n"
+"2: moveq #1,%0\n"
+"Lberr: movel %/d1,%/sp"
+ : "=&d" (ret)
+ : "a" (regp), "a" (&vbr[2]), "rm" (wordflag), "rm" (writeflag)
+ : "d0", "d1", "memory"
+ );
+
+ vbr[2] = save_berr;
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static const struct net_device_ops lance_netdev_ops = {
+ .ndo_open = lance_open,
+ .ndo_stop = lance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_set_mac_address = lance_set_mac_address,
+ .ndo_tx_timeout = lance_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static unsigned long __init lance_probe1( struct net_device *dev,
+ struct lance_addr *init_rec )
+{
+ volatile unsigned short *memaddr =
+ (volatile unsigned short *)init_rec->memaddr;
+ volatile unsigned short *ioaddr =
+ (volatile unsigned short *)init_rec->ioaddr;
+ struct lance_private *lp;
+ struct lance_ioreg *IO;
+ int i;
+ static int did_version;
+ unsigned short save1, save2;
+
+ PROBE_PRINT(( "Probing for Lance card at mem %#lx io %#lx\n",
+ (long)memaddr, (long)ioaddr ));
+
+ /* Test whether memory readable and writable */
+ PROBE_PRINT(( "lance_probe1: testing memory to be accessible\n" ));
+ if (!addr_accessible( memaddr, 1, 1 )) goto probe_fail;
+
+ /* Written values should come back... */
+ PROBE_PRINT(( "lance_probe1: testing memory to be writable (1)\n" ));
+ save1 = *memaddr;
+ *memaddr = 0x0001;
+ if (*memaddr != 0x0001) goto probe_fail;
+ PROBE_PRINT(( "lance_probe1: testing memory to be writable (2)\n" ));
+ *memaddr = 0x0000;
+ if (*memaddr != 0x0000) goto probe_fail;
+ *memaddr = save1;
+
+ /* First port should be readable and writable */
+ PROBE_PRINT(( "lance_probe1: testing ioport to be accessible\n" ));
+ if (!addr_accessible( ioaddr, 1, 1 )) goto probe_fail;
+
+ /* and written values should be readable */
+ PROBE_PRINT(( "lance_probe1: testing ioport to be writeable\n" ));
+ save2 = ioaddr[1];
+ ioaddr[1] = 0x0001;
+ if (ioaddr[1] != 0x0001) goto probe_fail;
+
+ /* The CSR0_INIT bit should not be readable */
+ PROBE_PRINT(( "lance_probe1: testing CSR0 register function (1)\n" ));
+ save1 = ioaddr[0];
+ ioaddr[1] = CSR0;
+ ioaddr[0] = CSR0_INIT | CSR0_STOP;
+ if (ioaddr[0] != CSR0_STOP) {
+ ioaddr[0] = save1;
+ ioaddr[1] = save2;
+ goto probe_fail;
+ }
+ PROBE_PRINT(( "lance_probe1: testing CSR0 register function (2)\n" ));
+ ioaddr[0] = CSR0_STOP;
+ if (ioaddr[0] != CSR0_STOP) {
+ ioaddr[0] = save1;
+ ioaddr[1] = save2;
+ goto probe_fail;
+ }
+
+ /* Now ok... */
+ PROBE_PRINT(( "lance_probe1: Lance card detected\n" ));
+ goto probe_ok;
+
+ probe_fail:
+ return 0;
+
+ probe_ok:
+ lp = netdev_priv(dev);
+ MEM = (struct lance_memory *)memaddr;
+ IO = lp->iobase = (struct lance_ioreg *)ioaddr;
+ dev->base_addr = (unsigned long)ioaddr; /* informational only */
+ lp->memcpy_f = init_rec->slow_flag ? slow_memcpy : memcpy;
+
+ REGA( CSR0 ) = CSR0_STOP;
+
+ /* Now test for type: If the eeprom I/O port is readable, it is a
+ * PAM card */
+ if (addr_accessible( &(IO->eeprom), 0, 0 )) {
+ /* Switch back to Ram */
+ i = IO->mem;
+ lp->cardtype = PAM_CARD;
+ }
+ else if (*RIEBL_MAGIC_ADDR == RIEBL_MAGIC) {
+ lp->cardtype = NEW_RIEBL;
+ }
+ else
+ lp->cardtype = OLD_RIEBL;
+
+ if (lp->cardtype == PAM_CARD ||
+ memaddr == (unsigned short *)0xffe00000) {
+ /* PAMs card and Riebl on ST use level 5 autovector */
+ if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO,
+ "PAM,Riebl-ST Ethernet", dev)) {
+ printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 );
+ return 0;
+ }
+ dev->irq = (unsigned short)IRQ_AUTO_5;
+ }
+ else {
+ /* For VME-RieblCards, request a free VME int;
+ * (This must be unsigned long, since dev->irq is short and the
+ * IRQ_MACHSPEC bit would be cut off...)
+ */
+ unsigned long irq = atari_register_vme_int();
+ if (!irq) {
+ printk( "Lance: request for VME interrupt failed\n" );
+ return 0;
+ }
+ if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO,
+ "Riebl-VME Ethernet", dev)) {
+ printk( "Lance: request for irq %ld failed\n", irq );
+ return 0;
+ }
+ dev->irq = irq;
+ }
+
+ printk("%s: %s at io %#lx, mem %#lx, irq %d%s, hwaddr ",
+ dev->name, lance_names[lp->cardtype],
+ (unsigned long)ioaddr,
+ (unsigned long)memaddr,
+ dev->irq,
+ init_rec->slow_flag ? " (slow memcpy)" : "" );
+
+ /* Get the ethernet address */
+ switch( lp->cardtype ) {
+ case OLD_RIEBL:
+ /* No ethernet address! (Set some default address) */
+ memcpy( dev->dev_addr, OldRieblDefHwaddr, 6 );
+ break;
+ case NEW_RIEBL:
+ lp->memcpy_f( dev->dev_addr, RIEBL_HWADDR_ADDR, 6 );
+ break;
+ case PAM_CARD:
+ i = IO->eeprom;
+ for( i = 0; i < 6; ++i )
+ dev->dev_addr[i] =
+ ((((unsigned short *)MEM)[i*2] & 0x0f) << 4) |
+ ((((unsigned short *)MEM)[i*2+1] & 0x0f));
+ i = IO->mem;
+ break;
+ }
+ printk("%pM\n", dev->dev_addr);
+ if (lp->cardtype == OLD_RIEBL) {
+ printk( "%s: Warning: This is a default ethernet address!\n",
+ dev->name );
+ printk( " Use \"ifconfig hw ether ...\" to set the address.\n" );
+ }
+
+ spin_lock_init(&lp->devlock);
+
+ MEM->init.mode = 0x0000; /* Disable Rx and Tx. */
+ for( i = 0; i < 6; i++ )
+ MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */
+ MEM->init.filter[0] = 0x00000000;
+ MEM->init.filter[1] = 0x00000000;
+ MEM->init.rx_ring.adr_lo = offsetof( struct lance_memory, rx_head );
+ MEM->init.rx_ring.adr_hi = 0;
+ MEM->init.rx_ring.len = RX_RING_LEN_BITS;
+ MEM->init.tx_ring.adr_lo = offsetof( struct lance_memory, tx_head );
+ MEM->init.tx_ring.adr_hi = 0;
+ MEM->init.tx_ring.len = TX_RING_LEN_BITS;
+
+ if (lp->cardtype == PAM_CARD)
+ IO->ivec = IRQ_SOURCE_TO_VECTOR(dev->irq);
+ else
+ *RIEBL_IVEC_ADDR = IRQ_SOURCE_TO_VECTOR(dev->irq);
+
+ if (did_version++ == 0)
+ DPRINTK( 1, ( version ));
+
+ dev->netdev_ops = &lance_netdev_ops;
+
+ /* XXX MSch */
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ return 1;
+}
+
+
+static int lance_open( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_ioreg *IO = lp->iobase;
+ int i;
+
+ DPRINTK( 2, ( "%s: lance_open()\n", dev->name ));
+
+ lance_init_ring(dev);
+ /* Re-initialize the LANCE, and start it when done. */
+
+ REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
+ REGA( CSR2 ) = 0;
+ REGA( CSR1 ) = 0;
+ REGA( CSR0 ) = CSR0_INIT;
+ /* From now on, AREG is kept to point to CSR0 */
+
+ i = 1000000;
+ while (--i > 0)
+ if (DREG & CSR0_IDON)
+ break;
+ if (i <= 0 || (DREG & CSR0_ERR)) {
+ DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
+ dev->name, i, DREG ));
+ DREG = CSR0_STOP;
+ return -EIO;
+ }
+ DREG = CSR0_IDON;
+ DREG = CSR0_STRT;
+ DREG = CSR0_INEA;
+
+ netif_start_queue (dev);
+
+ DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG ));
+
+ return 0;
+}
+
+
+/* Initialize the LANCE Rx and Tx rings. */
+
+static void lance_init_ring( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int i;
+ unsigned offset;
+
+ lp->tx_full = 0;
+ lp->cur_rx = lp->cur_tx = 0;
+ lp->dirty_tx = 0;
+
+ offset = offsetof( struct lance_memory, packet_area );
+
+/* If the packet buffer at offset 'o' would conflict with the reserved area
+ * of RieblCards, advance it */
+#define CHECK_OFFSET(o) \
+ do { \
+ if (lp->cardtype == OLD_RIEBL || lp->cardtype == NEW_RIEBL) { \
+ if (((o) < RIEBL_RSVD_START) ? (o)+PKT_BUF_SZ > RIEBL_RSVD_START \
+ : (o) < RIEBL_RSVD_END) \
+ (o) = RIEBL_RSVD_END; \
+ } \
+ } while(0)
+
+ for( i = 0; i < TX_RING_SIZE; i++ ) {
+ CHECK_OFFSET(offset);
+ MEM->tx_head[i].base = offset;
+ MEM->tx_head[i].flag = TMD1_OWN_HOST;
+ MEM->tx_head[i].base_hi = 0;
+ MEM->tx_head[i].length = 0;
+ MEM->tx_head[i].misc = 0;
+ offset += PKT_BUF_SZ;
+ }
+
+ for( i = 0; i < RX_RING_SIZE; i++ ) {
+ CHECK_OFFSET(offset);
+ MEM->rx_head[i].base = offset;
+ MEM->rx_head[i].flag = TMD1_OWN_CHIP;
+ MEM->rx_head[i].base_hi = 0;
+ MEM->rx_head[i].buf_length = -PKT_BUF_SZ;
+ MEM->rx_head[i].msg_length = 0;
+ offset += PKT_BUF_SZ;
+ }
+}
+
+
+/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
+
+
+static void lance_tx_timeout (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_ioreg *IO = lp->iobase;
+
+ AREG = CSR0;
+ DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n",
+ dev->name, DREG ));
+ DREG = CSR0_STOP;
+ /*
+ * Always set BSWP after a STOP as STOP puts it back into
+ * little endian mode.
+ */
+ REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
+ dev->stats.tx_errors++;
+#ifndef final_version
+ { int i;
+ DPRINTK( 2, ( "Ring data: dirty_tx %d cur_tx %d%s cur_rx %d\n",
+ lp->dirty_tx, lp->cur_tx,
+ lp->tx_full ? " (full)" : "",
+ lp->cur_rx ));
+ for( i = 0 ; i < RX_RING_SIZE; i++ )
+ DPRINTK( 2, ( "rx #%d: base=%04x blen=%04x mlen=%04x\n",
+ i, MEM->rx_head[i].base,
+ -MEM->rx_head[i].buf_length,
+ MEM->rx_head[i].msg_length ));
+ for( i = 0 ; i < TX_RING_SIZE; i++ )
+ DPRINTK( 2, ( "tx #%d: base=%04x len=%04x misc=%04x\n",
+ i, MEM->tx_head[i].base,
+ -MEM->tx_head[i].length,
+ MEM->tx_head[i].misc ));
+ }
+#endif
+ /* XXX MSch: maybe purge/reinit ring here */
+ /* lance_restart, essentially */
+ lance_init_ring(dev);
+ REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+
+/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
+
+static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_ioreg *IO = lp->iobase;
+ int entry, len;
+ struct lance_tx_head *head;
+ unsigned long flags;
+
+ DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n",
+ dev->name, DREG ));
+
+
+ /* The old LANCE chips doesn't automatically pad buffers to min. size. */
+ len = skb->len;
+ if (len < ETH_ZLEN)
+ len = ETH_ZLEN;
+ /* PAM-Card has a bug: Can only send packets with even number of bytes! */
+ else if (lp->cardtype == PAM_CARD && (len & 1))
+ ++len;
+
+ if (len > skb->len) {
+ if (skb_padto(skb, len))
+ return NETDEV_TX_OK;
+ }
+
+ netif_stop_queue (dev);
+
+ /* Fill in a Tx ring entry */
+ if (lance_debug >= 3) {
+ printk( "%s: TX pkt type 0x%04x from %pM to %pM"
+ " data at 0x%08x len %d\n",
+ dev->name, ((u_short *)skb->data)[6],
+ &skb->data[6], skb->data,
+ (int)skb->data, (int)skb->len );
+ }
+
+ /* We're not prepared for the int until the last flags are set/reset. And
+ * the int may happen already after setting the OWN_CHIP... */
+ spin_lock_irqsave (&lp->devlock, flags);
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->cur_tx & TX_RING_MOD_MASK;
+ head = &(MEM->tx_head[entry]);
+
+ /* Caution: the write order is important here, set the "ownership" bits
+ * last.
+ */
+
+
+ head->length = -len;
+ head->misc = 0;
+ lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len );
+ head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP;
+ dev->stats.tx_bytes += skb->len;
+ dev_kfree_skb( skb );
+ lp->cur_tx++;
+ while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) {
+ lp->cur_tx -= TX_RING_SIZE;
+ lp->dirty_tx -= TX_RING_SIZE;
+ }
+
+ /* Trigger an immediate send poll. */
+ DREG = CSR0_INEA | CSR0_TDMD;
+
+ if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
+ TMD1_OWN_HOST)
+ netif_start_queue (dev);
+ else
+ lp->tx_full = 1;
+ spin_unlock_irqrestore (&lp->devlock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/* The LANCE interrupt handler. */
+
+static irqreturn_t lance_interrupt( int irq, void *dev_id )
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp;
+ struct lance_ioreg *IO;
+ int csr0, boguscnt = 10;
+ int handled = 0;
+
+ if (dev == NULL) {
+ DPRINTK( 1, ( "lance_interrupt(): interrupt for unknown device.\n" ));
+ return IRQ_NONE;
+ }
+
+ lp = netdev_priv(dev);
+ IO = lp->iobase;
+ spin_lock (&lp->devlock);
+
+ AREG = CSR0;
+
+ while( ((csr0 = DREG) & (CSR0_ERR | CSR0_TINT | CSR0_RINT)) &&
+ --boguscnt >= 0) {
+ handled = 1;
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ DREG = csr0 & ~(CSR0_INIT | CSR0_STRT | CSR0_STOP |
+ CSR0_TDMD | CSR0_INEA);
+
+ DPRINTK( 2, ( "%s: interrupt csr0=%04x new csr=%04x.\n",
+ dev->name, csr0, DREG ));
+
+ if (csr0 & CSR0_RINT) /* Rx interrupt */
+ lance_rx( dev );
+
+ if (csr0 & CSR0_TINT) { /* Tx-done interrupt */
+ int dirty_tx = lp->dirty_tx;
+
+ while( dirty_tx < lp->cur_tx) {
+ int entry = dirty_tx & TX_RING_MOD_MASK;
+ int status = MEM->tx_head[entry].flag;
+
+ if (status & TMD1_OWN_CHIP)
+ break; /* It still hasn't been Txed */
+
+ MEM->tx_head[entry].flag = 0;
+
+ if (status & TMD1_ERR) {
+ /* There was an major error, log it. */
+ int err_status = MEM->tx_head[entry].misc;
+ dev->stats.tx_errors++;
+ if (err_status & TMD3_RTRY) dev->stats.tx_aborted_errors++;
+ if (err_status & TMD3_LCAR) dev->stats.tx_carrier_errors++;
+ if (err_status & TMD3_LCOL) dev->stats.tx_window_errors++;
+ if (err_status & TMD3_UFLO) {
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ dev->stats.tx_fifo_errors++;
+ /* Remove this verbosity later! */
+ DPRINTK( 1, ( "%s: Tx FIFO error! Status %04x\n",
+ dev->name, csr0 ));
+ /* Restart the chip. */
+ DREG = CSR0_STRT;
+ }
+ } else {
+ if (status & (TMD1_MORE | TMD1_ONE | TMD1_DEF))
+ dev->stats.collisions++;
+ dev->stats.tx_packets++;
+ }
+
+ /* XXX MSch: free skb?? */
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
+ DPRINTK( 0, ( "out-of-sync dirty pointer,"
+ " %d vs. %d, full=%ld.\n",
+ dirty_tx, lp->cur_tx, lp->tx_full ));
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (lp->tx_full && (netif_queue_stopped(dev)) &&
+ dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ netif_wake_queue (dev);
+ }
+
+ lp->dirty_tx = dirty_tx;
+ }
+
+ /* Log misc errors. */
+ if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & CSR0_MERR) {
+ DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), "
+ "status %04x.\n", dev->name, csr0 ));
+ /* Restart the chip. */
+ DREG = CSR0_STRT;
+ }
+ }
+
+ /* Clear any other interrupt, and set interrupt enable. */
+ DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR |
+ CSR0_IDON | CSR0_INEA;
+
+ DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n",
+ dev->name, DREG ));
+
+ spin_unlock (&lp->devlock);
+ return IRQ_RETVAL(handled);
+}
+
+
+static int lance_rx( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int entry = lp->cur_rx & RX_RING_MOD_MASK;
+ int i;
+
+ DPRINTK( 2, ( "%s: rx int, flag=%04x\n", dev->name,
+ MEM->rx_head[entry].flag ));
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) {
+ struct lance_rx_head *head = &(MEM->rx_head[entry]);
+ int status = head->flag;
+
+ if (status != (RMD1_ENP|RMD1_STP)) { /* There was an error. */
+ /* There is a tricky error noted by John Murphy,
+ <murf@perftech.com> to Russ Nelson: Even with full-sized
+ buffers it's possible for a jabber packet to use two
+ buffers, with only the last correctly noting the error. */
+ if (status & RMD1_ENP) /* Only count a general error at the */
+ dev->stats.rx_errors++; /* end of a packet.*/
+ if (status & RMD1_FRAM) dev->stats.rx_frame_errors++;
+ if (status & RMD1_OFLO) dev->stats.rx_over_errors++;
+ if (status & RMD1_CRC) dev->stats.rx_crc_errors++;
+ if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++;
+ head->flag &= (RMD1_ENP|RMD1_STP);
+ } else {
+ /* Malloc up new buffer, compatible with net-3. */
+ short pkt_len = head->msg_length & 0xfff;
+ struct sk_buff *skb;
+
+ if (pkt_len < 60) {
+ printk( "%s: Runt packet!\n", dev->name );
+ dev->stats.rx_errors++;
+ }
+ else {
+ skb = dev_alloc_skb( pkt_len+2 );
+ if (skb == NULL) {
+ DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
+ dev->name ));
+ for( i = 0; i < RX_RING_SIZE; i++ )
+ if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag &
+ RMD1_OWN_CHIP)
+ break;
+
+ if (i > RX_RING_SIZE - 2) {
+ dev->stats.rx_dropped++;
+ head->flag |= RMD1_OWN_CHIP;
+ lp->cur_rx++;
+ }
+ break;
+ }
+
+ if (lance_debug >= 3) {
+ u_char *data = PKTBUF_ADDR(head);
+
+ printk(KERN_DEBUG "%s: RX pkt type 0x%04x from %pM to %pM "
+ "data %02x %02x %02x %02x %02x %02x %02x %02x "
+ "len %d\n",
+ dev->name, ((u_short *)data)[6],
+ &data[6], data,
+ data[15], data[16], data[17], data[18],
+ data[19], data[20], data[21], data[22],
+ pkt_len);
+ }
+
+ skb_reserve( skb, 2 ); /* 16 byte align */
+ skb_put( skb, pkt_len ); /* Make room */
+ lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len );
+ skb->protocol = eth_type_trans( skb, dev );
+ netif_rx( skb );
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+ }
+
+ head->flag |= RMD1_OWN_CHIP;
+ entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
+ }
+ lp->cur_rx &= RX_RING_MOD_MASK;
+
+ /* From lance.c (Donald Becker): */
+ /* We should check that at least two ring entries are free. If not,
+ we should free one and mark stats->rx_dropped++. */
+
+ return 0;
+}
+
+
+static int lance_close( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_ioreg *IO = lp->iobase;
+
+ netif_stop_queue (dev);
+
+ AREG = CSR0;
+
+ DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, DREG ));
+
+ /* We stop the LANCE here -- it occasionally polls
+ memory if we don't. */
+ DREG = CSR0_STOP;
+
+ return 0;
+}
+
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+
+static void set_multicast_list( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_ioreg *IO = lp->iobase;
+
+ if (netif_running(dev))
+ /* Only possible if board is already started */
+ return;
+
+ /* We take the simple way out and always enable promiscuous mode. */
+ DREG = CSR0_STOP; /* Temporarily stop the lance. */
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Log any net taps. */
+ DPRINTK( 2, ( "%s: Promiscuous mode enabled.\n", dev->name ));
+ REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
+ } else {
+ short multicast_table[4];
+ int num_addrs = netdev_mc_count(dev);
+ int i;
+ /* We don't use the multicast table, but rely on upper-layer
+ * filtering. */
+ memset( multicast_table, (num_addrs == 0) ? 0 : -1,
+ sizeof(multicast_table) );
+ for( i = 0; i < 4; i++ )
+ REGA( CSR8+i ) = multicast_table[i];
+ REGA( CSR15 ) = 0; /* Unset promiscuous mode */
+ }
+
+ /*
+ * Always set BSWP after a STOP as STOP puts it back into
+ * little endian mode.
+ */
+ REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
+
+ /* Resume normal operation and reset AREG to CSR0 */
+ REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT;
+}
+
+
+/* This is needed for old RieblCards and possible for new RieblCards */
+
+static int lance_set_mac_address( struct net_device *dev, void *addr )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct sockaddr *saddr = addr;
+ int i;
+
+ if (lp->cardtype != OLD_RIEBL && lp->cardtype != NEW_RIEBL)
+ return -EOPNOTSUPP;
+
+ if (netif_running(dev)) {
+ /* Only possible while card isn't started */
+ DPRINTK( 1, ( "%s: hwaddr can be set only while card isn't open.\n",
+ dev->name ));
+ return -EIO;
+ }
+
+ memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len );
+ for( i = 0; i < 6; i++ )
+ MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */
+ lp->memcpy_f( RIEBL_HWADDR_ADDR, dev->dev_addr, 6 );
+ /* set also the magic for future sessions */
+ *RIEBL_MAGIC_ADDR = RIEBL_MAGIC;
+
+ return 0;
+}
+
+
+#ifdef MODULE
+static struct net_device *atarilance_dev;
+
+static int __init atarilance_module_init(void)
+{
+ atarilance_dev = atarilance_probe(-1);
+ if (IS_ERR(atarilance_dev))
+ return PTR_ERR(atarilance_dev);
+ return 0;
+}
+
+static void __exit atarilance_module_exit(void)
+{
+ unregister_netdev(atarilance_dev);
+ free_irq(atarilance_dev->irq, atarilance_dev);
+ free_netdev(atarilance_dev);
+}
+module_init(atarilance_module_init);
+module_exit(atarilance_module_exit);
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * c-indent-level: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
new file mode 100644
index 000000000000..82386677bb8c
--- /dev/null
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -0,0 +1,1332 @@
+/*
+ *
+ * Alchemy Au1x00 ethernet driver
+ *
+ * Copyright 2001-2003, 2006 MontaVista Software Inc.
+ * Copyright 2002 TimeSys Corp.
+ * Added ethtool/mii-tool support,
+ * Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
+ * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de
+ * or riemer@riemer-nt.de: fixed the link beat detection with
+ * ioctls (SIOCGMIIPHY)
+ * Copyright 2006 Herbert Valerio Riedel <hvr@gnu.org>
+ * converted to use linux-2.6.x's PHY framework
+ *
+ * Author: MontaVista Software, Inc.
+ * ppopov@mvista.com or source@mvista.com
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ *
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/capability.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/cpu.h>
+#include <linux/io.h>
+
+#include <asm/mipsregs.h>
+#include <asm/irq.h>
+#include <asm/processor.h>
+
+#include <au1000.h>
+#include <au1xxx_eth.h>
+#include <prom.h>
+
+#include "au1000_eth.h"
+
+#ifdef AU1000_ETH_DEBUG
+static int au1000_debug = 5;
+#else
+static int au1000_debug = 3;
+#endif
+
+#define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK)
+
+#define DRV_NAME "au1000_eth"
+#define DRV_VERSION "1.7"
+#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
+#define DRV_DESC "Au1xxx on-chip Ethernet driver"
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/*
+ * Theory of operation
+ *
+ * The Au1000 MACs use a simple rx and tx descriptor ring scheme.
+ * There are four receive and four transmit descriptors. These
+ * descriptors are not in memory; rather, they are just a set of
+ * hardware registers.
+ *
+ * Since the Au1000 has a coherent data cache, the receive and
+ * transmit buffers are allocated from the KSEG0 segment. The
+ * hardware registers, however, are still mapped at KSEG1 to
+ * make sure there's no out-of-order writes, and that all writes
+ * complete immediately.
+ */
+
+/*
+ * board-specific configurations
+ *
+ * PHY detection algorithm
+ *
+ * If phy_static_config is undefined, the PHY setup is
+ * autodetected:
+ *
+ * mii_probe() first searches the current MAC's MII bus for a PHY,
+ * selecting the first (or last, if phy_search_highest_addr is
+ * defined) PHY address not already claimed by another netdev.
+ *
+ * If nothing was found that way when searching for the 2nd ethernet
+ * controller's PHY and phy1_search_mac0 is defined, then
+ * the first MII bus is searched as well for an unclaimed PHY; this is
+ * needed in case of a dual-PHY accessible only through the MAC0's MII
+ * bus.
+ *
+ * Finally, if no PHY is found, then the corresponding ethernet
+ * controller is not registered to the network subsystem.
+ */
+
+/* autodetection defaults: phy1_search_mac0 */
+
+/* static PHY setup
+ *
+ * most boards PHY setup should be detectable properly with the
+ * autodetection algorithm in mii_probe(), but in some cases (e.g. if
+ * you have a switch attached, or want to use the PHY's interrupt
+ * notification capabilities) you can provide a static PHY
+ * configuration here
+ *
+ * IRQs may only be set, if a PHY address was configured
+ * If a PHY address is given, also a bus id is required to be set
+ *
+ * ps: make sure the used irqs are configured properly in the board
+ * specific irq-map
+ */
+
+static void au1000_enable_mac(struct net_device *dev, int force_reset)
+{
+ unsigned long flags;
+ struct au1000_private *aup = netdev_priv(dev);
+
+ spin_lock_irqsave(&aup->lock, flags);
+
+ if (force_reset || (!aup->mac_enabled)) {
+ writel(MAC_EN_CLOCK_ENABLE, aup->enable);
+ au_sync_delay(2);
+ writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
+ | MAC_EN_CLOCK_ENABLE), aup->enable);
+ au_sync_delay(2);
+
+ aup->mac_enabled = 1;
+ }
+
+ spin_unlock_irqrestore(&aup->lock, flags);
+}
+
+/*
+ * MII operations
+ */
+static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ u32 *const mii_control_reg = &aup->mac->mii_control;
+ u32 *const mii_data_reg = &aup->mac->mii_data;
+ u32 timedout = 20;
+ u32 mii_control;
+
+ while (readl(mii_control_reg) & MAC_MII_BUSY) {
+ mdelay(1);
+ if (--timedout == 0) {
+ netdev_err(dev, "read_MII busy timeout!!\n");
+ return -1;
+ }
+ }
+
+ mii_control = MAC_SET_MII_SELECT_REG(reg) |
+ MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ;
+
+ writel(mii_control, mii_control_reg);
+
+ timedout = 20;
+ while (readl(mii_control_reg) & MAC_MII_BUSY) {
+ mdelay(1);
+ if (--timedout == 0) {
+ netdev_err(dev, "mdio_read busy timeout!!\n");
+ return -1;
+ }
+ }
+ return readl(mii_data_reg);
+}
+
+static void au1000_mdio_write(struct net_device *dev, int phy_addr,
+ int reg, u16 value)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ u32 *const mii_control_reg = &aup->mac->mii_control;
+ u32 *const mii_data_reg = &aup->mac->mii_data;
+ u32 timedout = 20;
+ u32 mii_control;
+
+ while (readl(mii_control_reg) & MAC_MII_BUSY) {
+ mdelay(1);
+ if (--timedout == 0) {
+ netdev_err(dev, "mdio_write busy timeout!!\n");
+ return;
+ }
+ }
+
+ mii_control = MAC_SET_MII_SELECT_REG(reg) |
+ MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE;
+
+ writel(value, mii_data_reg);
+ writel(mii_control, mii_control_reg);
+}
+
+static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ /* WARNING: bus->phy_map[phy_addr].attached_dev == dev does
+ * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus)
+ */
+ struct net_device *const dev = bus->priv;
+
+ /* make sure the MAC associated with this
+ * mii_bus is enabled
+ */
+ au1000_enable_mac(dev, 0);
+
+ return au1000_mdio_read(dev, phy_addr, regnum);
+}
+
+static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
+ u16 value)
+{
+ struct net_device *const dev = bus->priv;
+
+ /* make sure the MAC associated with this
+ * mii_bus is enabled
+ */
+ au1000_enable_mac(dev, 0);
+
+ au1000_mdio_write(dev, phy_addr, regnum, value);
+ return 0;
+}
+
+static int au1000_mdiobus_reset(struct mii_bus *bus)
+{
+ struct net_device *const dev = bus->priv;
+
+ /* make sure the MAC associated with this
+ * mii_bus is enabled
+ */
+ au1000_enable_mac(dev, 0);
+
+ return 0;
+}
+
+static void au1000_hard_stop(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ u32 reg;
+
+ netif_dbg(aup, drv, dev, "hard stop\n");
+
+ reg = readl(&aup->mac->control);
+ reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
+ writel(reg, &aup->mac->control);
+ au_sync_delay(10);
+}
+
+static void au1000_enable_rx_tx(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ u32 reg;
+
+ netif_dbg(aup, hw, dev, "enable_rx_tx\n");
+
+ reg = readl(&aup->mac->control);
+ reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
+ writel(reg, &aup->mac->control);
+ au_sync_delay(10);
+}
+
+static void
+au1000_adjust_link(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ struct phy_device *phydev = aup->phy_dev;
+ unsigned long flags;
+ u32 reg;
+
+ int status_change = 0;
+
+ BUG_ON(!aup->phy_dev);
+
+ spin_lock_irqsave(&aup->lock, flags);
+
+ if (phydev->link && (aup->old_speed != phydev->speed)) {
+ /* speed changed */
+
+ switch (phydev->speed) {
+ case SPEED_10:
+ case SPEED_100:
+ break;
+ default:
+ netdev_warn(dev, "Speed (%d) is not 10/100 ???\n",
+ phydev->speed);
+ break;
+ }
+
+ aup->old_speed = phydev->speed;
+
+ status_change = 1;
+ }
+
+ if (phydev->link && (aup->old_duplex != phydev->duplex)) {
+ /* duplex mode changed */
+
+ /* switching duplex mode requires to disable rx and tx! */
+ au1000_hard_stop(dev);
+
+ reg = readl(&aup->mac->control);
+ if (DUPLEX_FULL == phydev->duplex) {
+ reg |= MAC_FULL_DUPLEX;
+ reg &= ~MAC_DISABLE_RX_OWN;
+ } else {
+ reg &= ~MAC_FULL_DUPLEX;
+ reg |= MAC_DISABLE_RX_OWN;
+ }
+ writel(reg, &aup->mac->control);
+ au_sync_delay(1);
+
+ au1000_enable_rx_tx(dev);
+ aup->old_duplex = phydev->duplex;
+
+ status_change = 1;
+ }
+
+ if (phydev->link != aup->old_link) {
+ /* link state changed */
+
+ if (!phydev->link) {
+ /* link went down */
+ aup->old_speed = 0;
+ aup->old_duplex = -1;
+ }
+
+ aup->old_link = phydev->link;
+ status_change = 1;
+ }
+
+ spin_unlock_irqrestore(&aup->lock, flags);
+
+ if (status_change) {
+ if (phydev->link)
+ netdev_info(dev, "link up (%d/%s)\n",
+ phydev->speed,
+ DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
+ else
+ netdev_info(dev, "link down\n");
+ }
+}
+
+static int au1000_mii_probe(struct net_device *dev)
+{
+ struct au1000_private *const aup = netdev_priv(dev);
+ struct phy_device *phydev = NULL;
+ int phy_addr;
+
+ if (aup->phy_static_config) {
+ BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
+
+ if (aup->phy_addr)
+ phydev = aup->mii_bus->phy_map[aup->phy_addr];
+ else
+ netdev_info(dev, "using PHY-less setup\n");
+ return 0;
+ }
+
+ /* find the first (lowest address) PHY
+ * on the current MAC's MII bus
+ */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
+ if (aup->mii_bus->phy_map[phy_addr]) {
+ phydev = aup->mii_bus->phy_map[phy_addr];
+ if (!aup->phy_search_highest_addr)
+ /* break out with first one found */
+ break;
+ }
+
+ if (aup->phy1_search_mac0) {
+ /* try harder to find a PHY */
+ if (!phydev && (aup->mac_id == 1)) {
+ /* no PHY found, maybe we have a dual PHY? */
+ dev_info(&dev->dev, ": no PHY found on MAC1, "
+ "let's see if it's attached to MAC0...\n");
+
+ /* find the first (lowest address) non-attached
+ * PHY on the MAC0 MII bus
+ */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+ struct phy_device *const tmp_phydev =
+ aup->mii_bus->phy_map[phy_addr];
+
+ if (aup->mac_id == 1)
+ break;
+
+ /* no PHY here... */
+ if (!tmp_phydev)
+ continue;
+
+ /* already claimed by MAC0 */
+ if (tmp_phydev->attached_dev)
+ continue;
+
+ phydev = tmp_phydev;
+ break; /* found it */
+ }
+ }
+ }
+
+ if (!phydev) {
+ netdev_err(dev, "no PHY found\n");
+ return -1;
+ }
+
+ /* now we are supposed to have a proper phydev, to attach to... */
+ BUG_ON(phydev->attached_dev);
+
+ phydev = phy_connect(dev, dev_name(&phydev->dev), &au1000_adjust_link,
+ 0, PHY_INTERFACE_MODE_MII);
+
+ if (IS_ERR(phydev)) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ return PTR_ERR(phydev);
+ }
+
+ /* mask with MAC supported features */
+ phydev->supported &= (SUPPORTED_10baseT_Half
+ | SUPPORTED_10baseT_Full
+ | SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full
+ | SUPPORTED_Autoneg
+ /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */
+ | SUPPORTED_MII
+ | SUPPORTED_TP);
+
+ phydev->advertising = phydev->supported;
+
+ aup->old_link = 0;
+ aup->old_speed = 0;
+ aup->old_duplex = -1;
+ aup->phy_dev = phydev;
+
+ netdev_info(dev, "attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, irq=%d)\n",
+ phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
+
+ return 0;
+}
+
+
+/*
+ * Buffer allocation/deallocation routines. The buffer descriptor returned
+ * has the virtual and dma address of a buffer suitable for
+ * both, receive and transmit operations.
+ */
+static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
+{
+ struct db_dest *pDB;
+ pDB = aup->pDBfree;
+
+ if (pDB)
+ aup->pDBfree = pDB->pnext;
+
+ return pDB;
+}
+
+void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
+{
+ struct db_dest *pDBfree = aup->pDBfree;
+ if (pDBfree)
+ pDBfree->pnext = pDB;
+ aup->pDBfree = pDB;
+}
+
+static void au1000_reset_mac_unlocked(struct net_device *dev)
+{
+ struct au1000_private *const aup = netdev_priv(dev);
+ int i;
+
+ au1000_hard_stop(dev);
+
+ writel(MAC_EN_CLOCK_ENABLE, aup->enable);
+ au_sync_delay(2);
+ writel(0, aup->enable);
+ au_sync_delay(2);
+
+ aup->tx_full = 0;
+ for (i = 0; i < NUM_RX_DMA; i++) {
+ /* reset control bits */
+ aup->rx_dma_ring[i]->buff_stat &= ~0xf;
+ }
+ for (i = 0; i < NUM_TX_DMA; i++) {
+ /* reset control bits */
+ aup->tx_dma_ring[i]->buff_stat &= ~0xf;
+ }
+
+ aup->mac_enabled = 0;
+
+}
+
+static void au1000_reset_mac(struct net_device *dev)
+{
+ struct au1000_private *const aup = netdev_priv(dev);
+ unsigned long flags;
+
+ netif_dbg(aup, hw, dev, "reset mac, aup %x\n",
+ (unsigned)aup);
+
+ spin_lock_irqsave(&aup->lock, flags);
+
+ au1000_reset_mac_unlocked(dev);
+
+ spin_unlock_irqrestore(&aup->lock, flags);
+}
+
+/*
+ * Setup the receive and transmit "rings". These pointers are the addresses
+ * of the rx and tx MAC DMA registers so they are fixed by the hardware --
+ * these are not descriptors sitting in memory.
+ */
+static void
+au1000_setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
+{
+ int i;
+
+ for (i = 0; i < NUM_RX_DMA; i++) {
+ aup->rx_dma_ring[i] =
+ (struct rx_dma *)
+ (rx_base + sizeof(struct rx_dma)*i);
+ }
+ for (i = 0; i < NUM_TX_DMA; i++) {
+ aup->tx_dma_ring[i] =
+ (struct tx_dma *)
+ (tx_base + sizeof(struct tx_dma)*i);
+ }
+}
+
+/*
+ * ethtool operations
+ */
+
+static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+
+ if (aup->phy_dev)
+ return phy_ethtool_gset(aup->phy_dev, cmd);
+
+ return -EINVAL;
+}
+
+static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (aup->phy_dev)
+ return phy_ethtool_sset(aup->phy_dev, cmd);
+
+ return -EINVAL;
+}
+
+static void
+au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ info->fw_version[0] = '\0';
+ sprintf(info->bus_info, "%s %d", DRV_NAME, aup->mac_id);
+ info->regdump_len = 0;
+}
+
+static void au1000_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ aup->msg_enable = value;
+}
+
+static u32 au1000_get_msglevel(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ return aup->msg_enable;
+}
+
+static const struct ethtool_ops au1000_ethtool_ops = {
+ .get_settings = au1000_get_settings,
+ .set_settings = au1000_set_settings,
+ .get_drvinfo = au1000_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = au1000_get_msglevel,
+ .set_msglevel = au1000_set_msglevel,
+};
+
+
+/*
+ * Initialize the interface.
+ *
+ * When the device powers up, the clocks are disabled and the
+ * mac is in reset state. When the interface is closed, we
+ * do the same -- reset the device and disable the clocks to
+ * conserve power. Thus, whenever au1000_init() is called,
+ * the device should already be in reset state.
+ */
+static int au1000_init(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ unsigned long flags;
+ int i;
+ u32 control;
+
+ netif_dbg(aup, hw, dev, "au1000_init\n");
+
+ /* bring the device out of reset */
+ au1000_enable_mac(dev, 1);
+
+ spin_lock_irqsave(&aup->lock, flags);
+
+ writel(0, &aup->mac->control);
+ aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
+ aup->tx_tail = aup->tx_head;
+ aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
+
+ writel(dev->dev_addr[5]<<8 | dev->dev_addr[4],
+ &aup->mac->mac_addr_high);
+ writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
+ dev->dev_addr[1]<<8 | dev->dev_addr[0],
+ &aup->mac->mac_addr_low);
+
+
+ for (i = 0; i < NUM_RX_DMA; i++)
+ aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
+
+ au_sync();
+
+ control = MAC_RX_ENABLE | MAC_TX_ENABLE;
+#ifndef CONFIG_CPU_LITTLE_ENDIAN
+ control |= MAC_BIG_ENDIAN;
+#endif
+ if (aup->phy_dev) {
+ if (aup->phy_dev->link && (DUPLEX_FULL == aup->phy_dev->duplex))
+ control |= MAC_FULL_DUPLEX;
+ else
+ control |= MAC_DISABLE_RX_OWN;
+ } else { /* PHY-less op, assume full-duplex */
+ control |= MAC_FULL_DUPLEX;
+ }
+
+ writel(control, &aup->mac->control);
+ writel(0x8100, &aup->mac->vlan1_tag); /* activate vlan support */
+ au_sync();
+
+ spin_unlock_irqrestore(&aup->lock, flags);
+ return 0;
+}
+
+static inline void au1000_update_rx_stats(struct net_device *dev, u32 status)
+{
+ struct net_device_stats *ps = &dev->stats;
+
+ ps->rx_packets++;
+ if (status & RX_MCAST_FRAME)
+ ps->multicast++;
+
+ if (status & RX_ERROR) {
+ ps->rx_errors++;
+ if (status & RX_MISSED_FRAME)
+ ps->rx_missed_errors++;
+ if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR))
+ ps->rx_length_errors++;
+ if (status & RX_CRC_ERROR)
+ ps->rx_crc_errors++;
+ if (status & RX_COLL)
+ ps->collisions++;
+ } else
+ ps->rx_bytes += status & RX_FRAME_LEN_MASK;
+
+}
+
+/*
+ * Au1000 receive routine.
+ */
+static int au1000_rx(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct rx_dma *prxd;
+ u32 buff_stat, status;
+ struct db_dest *pDB;
+ u32 frmlen;
+
+ netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
+
+ prxd = aup->rx_dma_ring[aup->rx_head];
+ buff_stat = prxd->buff_stat;
+ while (buff_stat & RX_T_DONE) {
+ status = prxd->status;
+ pDB = aup->rx_db_inuse[aup->rx_head];
+ au1000_update_rx_stats(dev, status);
+ if (!(status & RX_ERROR)) {
+
+ /* good frame */
+ frmlen = (status & RX_FRAME_LEN_MASK);
+ frmlen -= 4; /* Remove FCS */
+ skb = dev_alloc_skb(frmlen + 2);
+ if (skb == NULL) {
+ netdev_err(dev, "Memory squeeze, dropping packet.\n");
+ dev->stats.rx_dropped++;
+ continue;
+ }
+ skb_reserve(skb, 2); /* 16 byte IP header align */
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)pDB->vaddr, frmlen);
+ skb_put(skb, frmlen);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb); /* pass the packet to upper layers */
+ } else {
+ if (au1000_debug > 4) {
+ pr_err("rx_error(s):");
+ if (status & RX_MISSED_FRAME)
+ pr_cont(" miss");
+ if (status & RX_WDOG_TIMER)
+ pr_cont(" wdog");
+ if (status & RX_RUNT)
+ pr_cont(" runt");
+ if (status & RX_OVERLEN)
+ pr_cont(" overlen");
+ if (status & RX_COLL)
+ pr_cont(" coll");
+ if (status & RX_MII_ERROR)
+ pr_cont(" mii error");
+ if (status & RX_CRC_ERROR)
+ pr_cont(" crc error");
+ if (status & RX_LEN_ERROR)
+ pr_cont(" len error");
+ if (status & RX_U_CNTRL_FRAME)
+ pr_cont(" u control frame");
+ pr_cont("\n");
+ }
+ }
+ prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
+ aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
+ au_sync();
+
+ /* next descriptor */
+ prxd = aup->rx_dma_ring[aup->rx_head];
+ buff_stat = prxd->buff_stat;
+ }
+ return 0;
+}
+
+static void au1000_update_tx_stats(struct net_device *dev, u32 status)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ struct net_device_stats *ps = &dev->stats;
+
+ if (status & TX_FRAME_ABORTED) {
+ if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) {
+ if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
+ /* any other tx errors are only valid
+ * in half duplex mode
+ */
+ ps->tx_errors++;
+ ps->tx_aborted_errors++;
+ }
+ } else {
+ ps->tx_errors++;
+ ps->tx_aborted_errors++;
+ if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
+ ps->tx_carrier_errors++;
+ }
+ }
+}
+
+/*
+ * Called from the interrupt service routine to acknowledge
+ * the TX DONE bits. This is a must if the irq is setup as
+ * edge triggered.
+ */
+static void au1000_tx_ack(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ struct tx_dma *ptxd;
+
+ ptxd = aup->tx_dma_ring[aup->tx_tail];
+
+ while (ptxd->buff_stat & TX_T_DONE) {
+ au1000_update_tx_stats(dev, ptxd->status);
+ ptxd->buff_stat &= ~TX_T_DONE;
+ ptxd->len = 0;
+ au_sync();
+
+ aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
+ ptxd = aup->tx_dma_ring[aup->tx_tail];
+
+ if (aup->tx_full) {
+ aup->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+ }
+}
+
+/*
+ * Au1000 interrupt service routine.
+ */
+static irqreturn_t au1000_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+
+ /* Handle RX interrupts first to minimize chance of overrun */
+
+ au1000_rx(dev);
+ au1000_tx_ack(dev);
+ return IRQ_RETVAL(1);
+}
+
+static int au1000_open(struct net_device *dev)
+{
+ int retval;
+ struct au1000_private *aup = netdev_priv(dev);
+
+ netif_dbg(aup, drv, dev, "open: dev=%p\n", dev);
+
+ retval = request_irq(dev->irq, au1000_interrupt, 0,
+ dev->name, dev);
+ if (retval) {
+ netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
+ return retval;
+ }
+
+ retval = au1000_init(dev);
+ if (retval) {
+ netdev_err(dev, "error in au1000_init\n");
+ free_irq(dev->irq, dev);
+ return retval;
+ }
+
+ if (aup->phy_dev) {
+ /* cause the PHY state machine to schedule a link state check */
+ aup->phy_dev->state = PHY_CHANGELINK;
+ phy_start(aup->phy_dev);
+ }
+
+ netif_start_queue(dev);
+
+ netif_dbg(aup, drv, dev, "open: Initialization done.\n");
+
+ return 0;
+}
+
+static int au1000_close(struct net_device *dev)
+{
+ unsigned long flags;
+ struct au1000_private *const aup = netdev_priv(dev);
+
+ netif_dbg(aup, drv, dev, "close: dev=%p\n", dev);
+
+ if (aup->phy_dev)
+ phy_stop(aup->phy_dev);
+
+ spin_lock_irqsave(&aup->lock, flags);
+
+ au1000_reset_mac_unlocked(dev);
+
+ /* stop the device */
+ netif_stop_queue(dev);
+
+ /* disable the interrupt */
+ free_irq(dev->irq, dev);
+ spin_unlock_irqrestore(&aup->lock, flags);
+
+ return 0;
+}
+
+/*
+ * Au1000 transmit routine.
+ */
+static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ struct net_device_stats *ps = &dev->stats;
+ struct tx_dma *ptxd;
+ u32 buff_stat;
+ struct db_dest *pDB;
+ int i;
+
+ netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
+ (unsigned)aup, skb->len,
+ skb->data, aup->tx_head);
+
+ ptxd = aup->tx_dma_ring[aup->tx_head];
+ buff_stat = ptxd->buff_stat;
+ if (buff_stat & TX_DMA_ENABLE) {
+ /* We've wrapped around and the transmitter is still busy */
+ netif_stop_queue(dev);
+ aup->tx_full = 1;
+ return NETDEV_TX_BUSY;
+ } else if (buff_stat & TX_T_DONE) {
+ au1000_update_tx_stats(dev, ptxd->status);
+ ptxd->len = 0;
+ }
+
+ if (aup->tx_full) {
+ aup->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+
+ pDB = aup->tx_db_inuse[aup->tx_head];
+ skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
+ if (skb->len < ETH_ZLEN) {
+ for (i = skb->len; i < ETH_ZLEN; i++)
+ ((char *)pDB->vaddr)[i] = 0;
+
+ ptxd->len = ETH_ZLEN;
+ } else
+ ptxd->len = skb->len;
+
+ ps->tx_packets++;
+ ps->tx_bytes += ptxd->len;
+
+ ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
+ au_sync();
+ dev_kfree_skb(skb);
+ aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
+ return NETDEV_TX_OK;
+}
+
+/*
+ * The Tx ring has been full longer than the watchdog timeout
+ * value. The transmitter must be hung?
+ */
+static void au1000_tx_timeout(struct net_device *dev)
+{
+ netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
+ au1000_reset_mac(dev);
+ au1000_init(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+
+static void au1000_multicast_list(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ u32 reg;
+
+ netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags);
+ reg = readl(&aup->mac->control);
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ reg |= MAC_PROMISCUOUS;
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+ netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
+ reg |= MAC_PASS_ALL_MULTI;
+ reg &= ~MAC_PROMISCUOUS;
+ netdev_info(dev, "Pass all multicast\n");
+ } else {
+ struct netdev_hw_addr *ha;
+ u32 mc_filter[2]; /* Multicast hash filter */
+
+ mc_filter[1] = mc_filter[0] = 0;
+ netdev_for_each_mc_addr(ha, dev)
+ set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
+ (long *)mc_filter);
+ writel(mc_filter[1], &aup->mac->multi_hash_high);
+ writel(mc_filter[0], &aup->mac->multi_hash_low);
+ reg &= ~MAC_PROMISCUOUS;
+ reg |= MAC_HASH_MODE;
+ }
+ writel(reg, &aup->mac->control);
+}
+
+static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!aup->phy_dev)
+ return -EINVAL; /* PHY not controllable */
+
+ return phy_mii_ioctl(aup->phy_dev, rq, cmd);
+}
+
+static const struct net_device_ops au1000_netdev_ops = {
+ .ndo_open = au1000_open,
+ .ndo_stop = au1000_close,
+ .ndo_start_xmit = au1000_tx,
+ .ndo_set_rx_mode = au1000_multicast_list,
+ .ndo_do_ioctl = au1000_ioctl,
+ .ndo_tx_timeout = au1000_tx_timeout,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static int __devinit au1000_probe(struct platform_device *pdev)
+{
+ static unsigned version_printed;
+ struct au1000_private *aup = NULL;
+ struct au1000_eth_platform_data *pd;
+ struct net_device *dev = NULL;
+ struct db_dest *pDB, *pDBfree;
+ int irq, i, err = 0;
+ struct resource *base, *macen;
+
+ base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!base) {
+ dev_err(&pdev->dev, "failed to retrieve base register\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!macen) {
+ dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to retrieve IRQ\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (!request_mem_region(base->start, resource_size(base),
+ pdev->name)) {
+ dev_err(&pdev->dev, "failed to request memory region for base registers\n");
+ err = -ENXIO;
+ goto out;
+ }
+
+ if (!request_mem_region(macen->start, resource_size(macen),
+ pdev->name)) {
+ dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
+ err = -ENXIO;
+ goto err_request;
+ }
+
+ dev = alloc_etherdev(sizeof(struct au1000_private));
+ if (!dev) {
+ dev_err(&pdev->dev, "alloc_etherdev failed\n");
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ platform_set_drvdata(pdev, dev);
+ aup = netdev_priv(dev);
+
+ spin_lock_init(&aup->lock);
+ aup->msg_enable = (au1000_debug < 4 ?
+ AU1000_DEF_MSG_ENABLE : au1000_debug);
+
+ /* Allocate the data buffers
+ * Snooping works fine with eth on all au1xxx
+ */
+ aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
+ (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ &aup->dma_addr, 0);
+ if (!aup->vaddr) {
+ dev_err(&pdev->dev, "failed to allocate data buffers\n");
+ err = -ENOMEM;
+ goto err_vaddr;
+ }
+
+ /* aup->mac is the base address of the MAC's registers */
+ aup->mac = (struct mac_reg *)
+ ioremap_nocache(base->start, resource_size(base));
+ if (!aup->mac) {
+ dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
+ err = -ENXIO;
+ goto err_remap1;
+ }
+
+ /* Setup some variables for quick register address access */
+ aup->enable = (u32 *)ioremap_nocache(macen->start,
+ resource_size(macen));
+ if (!aup->enable) {
+ dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
+ err = -ENXIO;
+ goto err_remap2;
+ }
+ aup->mac_id = pdev->id;
+
+ if (pdev->id == 0)
+ au1000_setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
+ else if (pdev->id == 1)
+ au1000_setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
+
+ /* set a random MAC now in case platform_data doesn't provide one */
+ random_ether_addr(dev->dev_addr);
+
+ writel(0, aup->enable);
+ aup->mac_enabled = 0;
+
+ pd = pdev->dev.platform_data;
+ if (!pd) {
+ dev_info(&pdev->dev, "no platform_data passed,"
+ " PHY search on MAC0\n");
+ aup->phy1_search_mac0 = 1;
+ } else {
+ if (is_valid_ether_addr(pd->mac))
+ memcpy(dev->dev_addr, pd->mac, 6);
+
+ aup->phy_static_config = pd->phy_static_config;
+ aup->phy_search_highest_addr = pd->phy_search_highest_addr;
+ aup->phy1_search_mac0 = pd->phy1_search_mac0;
+ aup->phy_addr = pd->phy_addr;
+ aup->phy_busid = pd->phy_busid;
+ aup->phy_irq = pd->phy_irq;
+ }
+
+ if (aup->phy_busid && aup->phy_busid > 0) {
+ dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n");
+ err = -ENODEV;
+ goto err_mdiobus_alloc;
+ }
+
+ aup->mii_bus = mdiobus_alloc();
+ if (aup->mii_bus == NULL) {
+ dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
+ err = -ENOMEM;
+ goto err_mdiobus_alloc;
+ }
+
+ aup->mii_bus->priv = dev;
+ aup->mii_bus->read = au1000_mdiobus_read;
+ aup->mii_bus->write = au1000_mdiobus_write;
+ aup->mii_bus->reset = au1000_mdiobus_reset;
+ aup->mii_bus->name = "au1000_eth_mii";
+ snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%x", aup->mac_id);
+ aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ if (aup->mii_bus->irq == NULL)
+ goto err_out;
+
+ for (i = 0; i < PHY_MAX_ADDR; ++i)
+ aup->mii_bus->irq[i] = PHY_POLL;
+ /* if known, set corresponding PHY IRQs */
+ if (aup->phy_static_config)
+ if (aup->phy_irq && aup->phy_busid == aup->mac_id)
+ aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq;
+
+ err = mdiobus_register(aup->mii_bus);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register MDIO bus\n");
+ goto err_mdiobus_reg;
+ }
+
+ if (au1000_mii_probe(dev) != 0)
+ goto err_out;
+
+ pDBfree = NULL;
+ /* setup the data buffer descriptors and attach a buffer to each one */
+ pDB = aup->db;
+ for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
+ pDB->pnext = pDBfree;
+ pDBfree = pDB;
+ pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
+ pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
+ pDB++;
+ }
+ aup->pDBfree = pDBfree;
+
+ for (i = 0; i < NUM_RX_DMA; i++) {
+ pDB = au1000_GetFreeDB(aup);
+ if (!pDB)
+ goto err_out;
+
+ aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
+ aup->rx_db_inuse[i] = pDB;
+ }
+ for (i = 0; i < NUM_TX_DMA; i++) {
+ pDB = au1000_GetFreeDB(aup);
+ if (!pDB)
+ goto err_out;
+
+ aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
+ aup->tx_dma_ring[i]->len = 0;
+ aup->tx_db_inuse[i] = pDB;
+ }
+
+ dev->base_addr = base->start;
+ dev->irq = irq;
+ dev->netdev_ops = &au1000_netdev_ops;
+ SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
+ dev->watchdog_timeo = ETH_TX_TIMEOUT;
+
+ /*
+ * The boot code uses the ethernet controller, so reset it to start
+ * fresh. au1000_init() expects that the device is in reset state.
+ */
+ au1000_reset_mac(dev);
+
+ err = register_netdev(dev);
+ if (err) {
+ netdev_err(dev, "Cannot register net device, aborting.\n");
+ goto err_out;
+ }
+
+ netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
+ (unsigned long)base->start, irq);
+ if (version_printed++ == 0)
+ pr_info("%s version %s %s\n",
+ DRV_NAME, DRV_VERSION, DRV_AUTHOR);
+
+ return 0;
+
+err_out:
+ if (aup->mii_bus != NULL)
+ mdiobus_unregister(aup->mii_bus);
+
+ /* here we should have a valid dev plus aup-> register addresses
+ * so we can reset the mac properly.
+ */
+ au1000_reset_mac(dev);
+
+ for (i = 0; i < NUM_RX_DMA; i++) {
+ if (aup->rx_db_inuse[i])
+ au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
+ }
+ for (i = 0; i < NUM_TX_DMA; i++) {
+ if (aup->tx_db_inuse[i])
+ au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
+ }
+err_mdiobus_reg:
+ mdiobus_free(aup->mii_bus);
+err_mdiobus_alloc:
+ iounmap(aup->enable);
+err_remap2:
+ iounmap(aup->mac);
+err_remap1:
+ dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ (void *)aup->vaddr, aup->dma_addr);
+err_vaddr:
+ free_netdev(dev);
+err_alloc:
+ release_mem_region(macen->start, resource_size(macen));
+err_request:
+ release_mem_region(base->start, resource_size(base));
+out:
+ return err;
+}
+
+static int __devexit au1000_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct au1000_private *aup = netdev_priv(dev);
+ int i;
+ struct resource *base, *macen;
+
+ platform_set_drvdata(pdev, NULL);
+
+ unregister_netdev(dev);
+ mdiobus_unregister(aup->mii_bus);
+ mdiobus_free(aup->mii_bus);
+
+ for (i = 0; i < NUM_RX_DMA; i++)
+ if (aup->rx_db_inuse[i])
+ au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
+
+ for (i = 0; i < NUM_TX_DMA; i++)
+ if (aup->tx_db_inuse[i])
+ au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
+
+ dma_free_noncoherent(NULL, MAX_BUF_SIZE *
+ (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ (void *)aup->vaddr, aup->dma_addr);
+
+ iounmap(aup->mac);
+ iounmap(aup->enable);
+
+ base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(base->start, resource_size(base));
+
+ macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ release_mem_region(macen->start, resource_size(macen));
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+static struct platform_driver au1000_eth_driver = {
+ .probe = au1000_probe,
+ .remove = __devexit_p(au1000_remove),
+ .driver = {
+ .name = "au1000-eth",
+ .owner = THIS_MODULE,
+ },
+};
+MODULE_ALIAS("platform:au1000-eth");
+
+
+static int __init au1000_init_module(void)
+{
+ return platform_driver_register(&au1000_eth_driver);
+}
+
+static void __exit au1000_exit_module(void)
+{
+ platform_driver_unregister(&au1000_eth_driver);
+}
+
+module_init(au1000_init_module);
+module_exit(au1000_exit_module);
diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h
new file mode 100644
index 000000000000..6229c774552c
--- /dev/null
+++ b/drivers/net/ethernet/amd/au1000_eth.h
@@ -0,0 +1,134 @@
+/*
+ *
+ * Alchemy Au1x00 ethernet driver include file
+ *
+ * Author: Pete Popov <ppopov@mvista.com>
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ *
+ *
+ */
+
+
+#define MAC_IOSIZE 0x10000
+#define NUM_RX_DMA 4 /* Au1x00 has 4 rx hardware descriptors */
+#define NUM_TX_DMA 4 /* Au1x00 has 4 tx hardware descriptors */
+
+#define NUM_RX_BUFFS 4
+#define NUM_TX_BUFFS 4
+#define MAX_BUF_SIZE 2048
+
+#define ETH_TX_TIMEOUT (HZ/4)
+#define MAC_MIN_PKT_SIZE 64
+
+#define MULTICAST_FILTER_LIMIT 64
+
+/*
+ * Data Buffer Descriptor. Data buffers must be aligned on 32 byte
+ * boundary for both, receive and transmit.
+ */
+struct db_dest {
+ struct db_dest *pnext;
+ u32 *vaddr;
+ dma_addr_t dma_addr;
+};
+
+/*
+ * The transmit and receive descriptors are memory
+ * mapped registers.
+ */
+struct tx_dma {
+ u32 status;
+ u32 buff_stat;
+ u32 len;
+ u32 pad;
+};
+
+struct rx_dma {
+ u32 status;
+ u32 buff_stat;
+ u32 pad[2];
+};
+
+
+/*
+ * MAC control registers, memory mapped.
+ */
+struct mac_reg {
+ u32 control;
+ u32 mac_addr_high;
+ u32 mac_addr_low;
+ u32 multi_hash_high;
+ u32 multi_hash_low;
+ u32 mii_control;
+ u32 mii_data;
+ u32 flow_control;
+ u32 vlan1_tag;
+ u32 vlan2_tag;
+};
+
+
+struct au1000_private {
+ struct db_dest *pDBfree;
+ struct db_dest db[NUM_RX_BUFFS+NUM_TX_BUFFS];
+ struct rx_dma *rx_dma_ring[NUM_RX_DMA];
+ struct tx_dma *tx_dma_ring[NUM_TX_DMA];
+ struct db_dest *rx_db_inuse[NUM_RX_DMA];
+ struct db_dest *tx_db_inuse[NUM_TX_DMA];
+ u32 rx_head;
+ u32 tx_head;
+ u32 tx_tail;
+ u32 tx_full;
+
+ int mac_id;
+
+ int mac_enabled; /* whether MAC is currently enabled and running
+ * (req. for mdio)
+ */
+
+ int old_link; /* used by au1000_adjust_link */
+ int old_speed;
+ int old_duplex;
+
+ struct phy_device *phy_dev;
+ struct mii_bus *mii_bus;
+
+ /* PHY configuration */
+ int phy_static_config;
+ int phy_search_highest_addr;
+ int phy1_search_mac0;
+
+ int phy_addr;
+ int phy_busid;
+ int phy_irq;
+
+ /* These variables are just for quick access
+ * to certain regs addresses.
+ */
+ struct mac_reg *mac; /* mac registers */
+ u32 *enable; /* address of MAC Enable Register */
+
+ u32 vaddr; /* virtual address of rx/tx buffers */
+ dma_addr_t dma_addr; /* dma address of rx/tx buffers */
+
+ spinlock_t lock; /* Serialise access to device */
+
+ u32 msg_enable;
+};
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
new file mode 100644
index 000000000000..73f8d4fa682d
--- /dev/null
+++ b/drivers/net/ethernet/amd/declance.c
@@ -0,0 +1,1381 @@
+/*
+ * Lance ethernet driver for the MIPS processor based
+ * DECstation family
+ *
+ *
+ * adopted from sunlance.c by Richard van den Berg
+ *
+ * Copyright (C) 2002, 2003, 2005, 2006 Maciej W. Rozycki
+ *
+ * additional sources:
+ * - PMAD-AA TURBOchannel Ethernet Module Functional Specification,
+ * Revision 1.2
+ *
+ * History:
+ *
+ * v0.001: The kernel accepts the code and it shows the hardware address.
+ *
+ * v0.002: Removed most sparc stuff, left only some module and dma stuff.
+ *
+ * v0.003: Enhanced base address calculation from proposals by
+ * Harald Koerfgen and Thomas Riemer.
+ *
+ * v0.004: lance-regs is pointing at the right addresses, added prom
+ * check. First start of address mapping and DMA.
+ *
+ * v0.005: started to play around with LANCE-DMA. This driver will not
+ * work for non IOASIC lances. HK
+ *
+ * v0.006: added pointer arrays to lance_private and setup routine for
+ * them in dec_lance_init. HK
+ *
+ * v0.007: Big shit. The LANCE seems to use a different DMA mechanism to
+ * access the init block. This looks like one (short) word at a
+ * time, but the smallest amount the IOASIC can transfer is a
+ * (long) word. So we have a 2-2 padding here. Changed
+ * lance_init_block accordingly. The 16-16 padding for the buffers
+ * seems to be correct. HK
+ *
+ * v0.008: mods to make PMAX_LANCE work. 01/09/1999 triemer
+ *
+ * v0.009: Module support fixes, multiple interfaces support, various
+ * bits. macro
+ *
+ * v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the
+ * PMAX requirement to only use halfword accesses to the
+ * buffer. macro
+ *
+ * v0.011: Converted the PMAD to the driver model. macro
+ */
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/if_ether.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/tc.h>
+#include <linux/types.h>
+
+#include <asm/addrspace.h>
+#include <asm/system.h>
+
+#include <asm/dec/interrupts.h>
+#include <asm/dec/ioasic.h>
+#include <asm/dec/ioasic_addrs.h>
+#include <asm/dec/kn01.h>
+#include <asm/dec/machtype.h>
+#include <asm/dec/system.h>
+
+static char version[] __devinitdata =
+"declance.c: v0.011 by Linux MIPS DECstation task force\n";
+
+MODULE_AUTHOR("Linux MIPS DECstation task force");
+MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver");
+MODULE_LICENSE("GPL");
+
+#define __unused __attribute__ ((unused))
+
+/*
+ * card types
+ */
+#define ASIC_LANCE 1
+#define PMAD_LANCE 2
+#define PMAX_LANCE 3
+
+
+#define LE_CSR0 0
+#define LE_CSR1 1
+#define LE_CSR2 2
+#define LE_CSR3 3
+
+#define LE_MO_PROM 0x8000 /* Enable promiscuous mode */
+
+#define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */
+#define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */
+#define LE_C0_CERR 0x2000 /* SQE: Signal quality error */
+#define LE_C0_MISS 0x1000 /* MISS: Missed a packet */
+#define LE_C0_MERR 0x0800 /* ME: Memory error */
+#define LE_C0_RINT 0x0400 /* Received interrupt */
+#define LE_C0_TINT 0x0200 /* Transmitter Interrupt */
+#define LE_C0_IDON 0x0100 /* IFIN: Init finished. */
+#define LE_C0_INTR 0x0080 /* Interrupt or error */
+#define LE_C0_INEA 0x0040 /* Interrupt enable */
+#define LE_C0_RXON 0x0020 /* Receiver on */
+#define LE_C0_TXON 0x0010 /* Transmitter on */
+#define LE_C0_TDMD 0x0008 /* Transmitter demand */
+#define LE_C0_STOP 0x0004 /* Stop the card */
+#define LE_C0_STRT 0x0002 /* Start the card */
+#define LE_C0_INIT 0x0001 /* Init the card */
+
+#define LE_C3_BSWP 0x4 /* SWAP */
+#define LE_C3_ACON 0x2 /* ALE Control */
+#define LE_C3_BCON 0x1 /* Byte control */
+
+/* Receive message descriptor 1 */
+#define LE_R1_OWN 0x8000 /* Who owns the entry */
+#define LE_R1_ERR 0x4000 /* Error: if FRA, OFL, CRC or BUF is set */
+#define LE_R1_FRA 0x2000 /* FRA: Frame error */
+#define LE_R1_OFL 0x1000 /* OFL: Frame overflow */
+#define LE_R1_CRC 0x0800 /* CRC error */
+#define LE_R1_BUF 0x0400 /* BUF: Buffer error */
+#define LE_R1_SOP 0x0200 /* Start of packet */
+#define LE_R1_EOP 0x0100 /* End of packet */
+#define LE_R1_POK 0x0300 /* Packet is complete: SOP + EOP */
+
+/* Transmit message descriptor 1 */
+#define LE_T1_OWN 0x8000 /* Lance owns the packet */
+#define LE_T1_ERR 0x4000 /* Error summary */
+#define LE_T1_EMORE 0x1000 /* Error: more than one retry needed */
+#define LE_T1_EONE 0x0800 /* Error: one retry needed */
+#define LE_T1_EDEF 0x0400 /* Error: deferred */
+#define LE_T1_SOP 0x0200 /* Start of packet */
+#define LE_T1_EOP 0x0100 /* End of packet */
+#define LE_T1_POK 0x0300 /* Packet is complete: SOP + EOP */
+
+#define LE_T3_BUF 0x8000 /* Buffer error */
+#define LE_T3_UFL 0x4000 /* Error underflow */
+#define LE_T3_LCOL 0x1000 /* Error late collision */
+#define LE_T3_CLOS 0x0800 /* Error carrier loss */
+#define LE_T3_RTY 0x0400 /* Error retry */
+#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */
+
+/* Define: 2^4 Tx buffers and 2^4 Rx buffers */
+
+#ifndef LANCE_LOG_TX_BUFFERS
+#define LANCE_LOG_TX_BUFFERS 4
+#define LANCE_LOG_RX_BUFFERS 4
+#endif
+
+#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+
+#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+
+#define PKT_BUF_SZ 1536
+#define RX_BUFF_SIZE PKT_BUF_SZ
+#define TX_BUFF_SIZE PKT_BUF_SZ
+
+#undef TEST_HITS
+#define ZERO 0
+
+/*
+ * The DS2100/3100 have a linear 64 kB buffer which supports halfword
+ * accesses only. Each halfword of the buffer is word-aligned in the
+ * CPU address space.
+ *
+ * The PMAD-AA has a 128 kB buffer on-board.
+ *
+ * The IOASIC LANCE devices use a shared memory region. This region
+ * as seen from the CPU is (max) 128 kB long and has to be on an 128 kB
+ * boundary. The LANCE sees this as a 64 kB long continuous memory
+ * region.
+ *
+ * The LANCE's DMA address is used as an index in this buffer and DMA
+ * takes place in bursts of eight 16-bit words which are packed into
+ * four 32-bit words by the IOASIC. This leads to a strange padding:
+ * 16 bytes of valid data followed by a 16 byte gap :-(.
+ */
+
+struct lance_rx_desc {
+ unsigned short rmd0; /* low address of packet */
+ unsigned short rmd1; /* high address of packet
+ and descriptor bits */
+ short length; /* 2s complement (negative!)
+ of buffer length */
+ unsigned short mblength; /* actual number of bytes received */
+};
+
+struct lance_tx_desc {
+ unsigned short tmd0; /* low address of packet */
+ unsigned short tmd1; /* high address of packet
+ and descriptor bits */
+ short length; /* 2s complement (negative!)
+ of buffer length */
+ unsigned short misc;
+};
+
+
+/* First part of the LANCE initialization block, described in databook. */
+struct lance_init_block {
+ unsigned short mode; /* pre-set mode (reg. 15) */
+
+ unsigned short phys_addr[3]; /* physical ethernet address */
+ unsigned short filter[4]; /* multicast filter */
+
+ /* Receive and transmit ring base, along with extra bits. */
+ unsigned short rx_ptr; /* receive descriptor addr */
+ unsigned short rx_len; /* receive len and high addr */
+ unsigned short tx_ptr; /* transmit descriptor addr */
+ unsigned short tx_len; /* transmit len and high addr */
+
+ short gap[4];
+
+ /* The buffer descriptors */
+ struct lance_rx_desc brx_ring[RX_RING_SIZE];
+ struct lance_tx_desc btx_ring[TX_RING_SIZE];
+};
+
+#define BUF_OFFSET_CPU sizeof(struct lance_init_block)
+#define BUF_OFFSET_LNC sizeof(struct lance_init_block)
+
+#define shift_off(off, type) \
+ (type == ASIC_LANCE || type == PMAX_LANCE ? off << 1 : off)
+
+#define lib_off(rt, type) \
+ shift_off(offsetof(struct lance_init_block, rt), type)
+
+#define lib_ptr(ib, rt, type) \
+ ((volatile u16 *)((u8 *)(ib) + lib_off(rt, type)))
+
+#define rds_off(rt, type) \
+ shift_off(offsetof(struct lance_rx_desc, rt), type)
+
+#define rds_ptr(rd, rt, type) \
+ ((volatile u16 *)((u8 *)(rd) + rds_off(rt, type)))
+
+#define tds_off(rt, type) \
+ shift_off(offsetof(struct lance_tx_desc, rt), type)
+
+#define tds_ptr(td, rt, type) \
+ ((volatile u16 *)((u8 *)(td) + tds_off(rt, type)))
+
+struct lance_private {
+ struct net_device *next;
+ int type;
+ int dma_irq;
+ volatile struct lance_regs *ll;
+
+ spinlock_t lock;
+
+ int rx_new, tx_new;
+ int rx_old, tx_old;
+
+ unsigned short busmaster_regval;
+
+ struct timer_list multicast_timer;
+
+ /* Pointers to the ring buffers as seen from the CPU */
+ char *rx_buf_ptr_cpu[RX_RING_SIZE];
+ char *tx_buf_ptr_cpu[TX_RING_SIZE];
+
+ /* Pointers to the ring buffers as seen from the LANCE */
+ uint rx_buf_ptr_lnc[RX_RING_SIZE];
+ uint tx_buf_ptr_lnc[TX_RING_SIZE];
+};
+
+#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
+ lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\
+ lp->tx_old - lp->tx_new-1)
+
+/* The lance control ports are at an absolute address, machine and tc-slot
+ * dependent.
+ * DECstations do only 32-bit access and the LANCE uses 16 bit addresses,
+ * so we have to give the structure an extra member making rap pointing
+ * at the right address
+ */
+struct lance_regs {
+ volatile unsigned short rdp; /* register data port */
+ unsigned short pad;
+ volatile unsigned short rap; /* register address port */
+};
+
+int dec_lance_debug = 2;
+
+static struct tc_driver dec_lance_tc_driver;
+static struct net_device *root_lance_dev;
+
+static inline void writereg(volatile unsigned short *regptr, short value)
+{
+ *regptr = value;
+ iob();
+}
+
+/* Load the CSR registers */
+static void load_csrs(struct lance_private *lp)
+{
+ volatile struct lance_regs *ll = lp->ll;
+ uint leptr;
+
+ /* The address space as seen from the LANCE
+ * begins at address 0. HK
+ */
+ leptr = 0;
+
+ writereg(&ll->rap, LE_CSR1);
+ writereg(&ll->rdp, (leptr & 0xFFFF));
+ writereg(&ll->rap, LE_CSR2);
+ writereg(&ll->rdp, leptr >> 16);
+ writereg(&ll->rap, LE_CSR3);
+ writereg(&ll->rdp, lp->busmaster_regval);
+
+ /* Point back to csr0 */
+ writereg(&ll->rap, LE_CSR0);
+}
+
+/*
+ * Our specialized copy routines
+ *
+ */
+static void cp_to_buf(const int type, void *to, const void *from, int len)
+{
+ unsigned short *tp;
+ const unsigned short *fp;
+ unsigned short clen;
+ unsigned char *rtp;
+ const unsigned char *rfp;
+
+ if (type == PMAD_LANCE) {
+ memcpy(to, from, len);
+ } else if (type == PMAX_LANCE) {
+ clen = len >> 1;
+ tp = to;
+ fp = from;
+
+ while (clen--) {
+ *tp++ = *fp++;
+ tp++;
+ }
+
+ clen = len & 1;
+ rtp = tp;
+ rfp = fp;
+ while (clen--) {
+ *rtp++ = *rfp++;
+ }
+ } else {
+ /*
+ * copy 16 Byte chunks
+ */
+ clen = len >> 4;
+ tp = to;
+ fp = from;
+ while (clen--) {
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ tp += 8;
+ }
+
+ /*
+ * do the rest, if any.
+ */
+ clen = len & 15;
+ rtp = (unsigned char *) tp;
+ rfp = (unsigned char *) fp;
+ while (clen--) {
+ *rtp++ = *rfp++;
+ }
+ }
+
+ iob();
+}
+
+static void cp_from_buf(const int type, void *to, const void *from, int len)
+{
+ unsigned short *tp;
+ const unsigned short *fp;
+ unsigned short clen;
+ unsigned char *rtp;
+ const unsigned char *rfp;
+
+ if (type == PMAD_LANCE) {
+ memcpy(to, from, len);
+ } else if (type == PMAX_LANCE) {
+ clen = len >> 1;
+ tp = to;
+ fp = from;
+ while (clen--) {
+ *tp++ = *fp++;
+ fp++;
+ }
+
+ clen = len & 1;
+
+ rtp = tp;
+ rfp = fp;
+
+ while (clen--) {
+ *rtp++ = *rfp++;
+ }
+ } else {
+
+ /*
+ * copy 16 Byte chunks
+ */
+ clen = len >> 4;
+ tp = to;
+ fp = from;
+ while (clen--) {
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ fp += 8;
+ }
+
+ /*
+ * do the rest, if any.
+ */
+ clen = len & 15;
+ rtp = (unsigned char *) tp;
+ rfp = (unsigned char *) fp;
+ while (clen--) {
+ *rtp++ = *rfp++;
+ }
+
+
+ }
+
+}
+
+/* Setup the Lance Rx and Tx rings */
+static void lance_init_ring(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ uint leptr;
+ int i;
+
+ /* Lock out other processes while setting up hardware */
+ netif_stop_queue(dev);
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ /* Copy the ethernet address to the lance init block.
+ * XXX bit 0 of the physical address registers has to be zero
+ */
+ *lib_ptr(ib, phys_addr[0], lp->type) = (dev->dev_addr[1] << 8) |
+ dev->dev_addr[0];
+ *lib_ptr(ib, phys_addr[1], lp->type) = (dev->dev_addr[3] << 8) |
+ dev->dev_addr[2];
+ *lib_ptr(ib, phys_addr[2], lp->type) = (dev->dev_addr[5] << 8) |
+ dev->dev_addr[4];
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = offsetof(struct lance_init_block, brx_ring);
+ *lib_ptr(ib, rx_len, lp->type) = (LANCE_LOG_RX_BUFFERS << 13) |
+ (leptr >> 16);
+ *lib_ptr(ib, rx_ptr, lp->type) = leptr;
+ if (ZERO)
+ printk("RX ptr: %8.8x(%8.8x)\n",
+ leptr, lib_off(brx_ring, lp->type));
+
+ /* Setup tx descriptor pointer */
+ leptr = offsetof(struct lance_init_block, btx_ring);
+ *lib_ptr(ib, tx_len, lp->type) = (LANCE_LOG_TX_BUFFERS << 13) |
+ (leptr >> 16);
+ *lib_ptr(ib, tx_ptr, lp->type) = leptr;
+ if (ZERO)
+ printk("TX ptr: %8.8x(%8.8x)\n",
+ leptr, lib_off(btx_ring, lp->type));
+
+ if (ZERO)
+ printk("TX rings:\n");
+
+ /* Setup the Tx ring entries */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ leptr = lp->tx_buf_ptr_lnc[i];
+ *lib_ptr(ib, btx_ring[i].tmd0, lp->type) = leptr;
+ *lib_ptr(ib, btx_ring[i].tmd1, lp->type) = (leptr >> 16) &
+ 0xff;
+ *lib_ptr(ib, btx_ring[i].length, lp->type) = 0xf000;
+ /* The ones required by tmd2 */
+ *lib_ptr(ib, btx_ring[i].misc, lp->type) = 0;
+ if (i < 3 && ZERO)
+ printk("%d: 0x%8.8x(0x%8.8x)\n",
+ i, leptr, (uint)lp->tx_buf_ptr_cpu[i]);
+ }
+
+ /* Setup the Rx ring entries */
+ if (ZERO)
+ printk("RX rings:\n");
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ leptr = lp->rx_buf_ptr_lnc[i];
+ *lib_ptr(ib, brx_ring[i].rmd0, lp->type) = leptr;
+ *lib_ptr(ib, brx_ring[i].rmd1, lp->type) = ((leptr >> 16) &
+ 0xff) |
+ LE_R1_OWN;
+ *lib_ptr(ib, brx_ring[i].length, lp->type) = -RX_BUFF_SIZE |
+ 0xf000;
+ *lib_ptr(ib, brx_ring[i].mblength, lp->type) = 0;
+ if (i < 3 && ZERO)
+ printk("%d: 0x%8.8x(0x%8.8x)\n",
+ i, leptr, (uint)lp->rx_buf_ptr_cpu[i]);
+ }
+ iob();
+}
+
+static int init_restart_lance(struct lance_private *lp)
+{
+ volatile struct lance_regs *ll = lp->ll;
+ int i;
+
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_INIT);
+
+ /* Wait for the lance to complete initialization */
+ for (i = 0; (i < 100) && !(ll->rdp & LE_C0_IDON); i++) {
+ udelay(10);
+ }
+ if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
+ printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
+ i, ll->rdp);
+ return -1;
+ }
+ if ((ll->rdp & LE_C0_ERR)) {
+ printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
+ i, ll->rdp);
+ return -1;
+ }
+ writereg(&ll->rdp, LE_C0_IDON);
+ writereg(&ll->rdp, LE_C0_STRT);
+ writereg(&ll->rdp, LE_C0_INEA);
+
+ return 0;
+}
+
+static int lance_rx(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ volatile u16 *rd;
+ unsigned short bits;
+ int entry, len;
+ struct sk_buff *skb;
+
+#ifdef TEST_HITS
+ {
+ int i;
+
+ printk("[");
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (i == lp->rx_new)
+ printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
+ lp->type) &
+ LE_R1_OWN ? "_" : "X");
+ else
+ printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
+ lp->type) &
+ LE_R1_OWN ? "." : "1");
+ }
+ printk("]");
+ }
+#endif
+
+ for (rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type);
+ !((bits = *rds_ptr(rd, rmd1, lp->type)) & LE_R1_OWN);
+ rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type)) {
+ entry = lp->rx_new;
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF)
+ dev->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC)
+ dev->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL)
+ dev->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA)
+ dev->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP)
+ dev->stats.rx_errors++;
+ } else {
+ len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4;
+ skb = dev_alloc_skb(len + 2);
+
+ if (skb == 0) {
+ printk("%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+ dev->stats.rx_dropped++;
+ *rds_ptr(rd, mblength, lp->type) = 0;
+ *rds_ptr(rd, rmd1, lp->type) =
+ ((lp->rx_buf_ptr_lnc[entry] >> 16) &
+ 0xff) | LE_R1_OWN;
+ lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
+ return 0;
+ }
+ dev->stats.rx_bytes += len;
+
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+
+ cp_from_buf(lp->type, skb->data,
+ (char *)lp->rx_buf_ptr_cpu[entry], len);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ }
+
+ /* Return the packet to the pool */
+ *rds_ptr(rd, mblength, lp->type) = 0;
+ *rds_ptr(rd, length, lp->type) = -RX_BUFF_SIZE | 0xf000;
+ *rds_ptr(rd, rmd1, lp->type) =
+ ((lp->rx_buf_ptr_lnc[entry] >> 16) & 0xff) | LE_R1_OWN;
+ lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
+ }
+ return 0;
+}
+
+static void lance_tx(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ volatile struct lance_regs *ll = lp->ll;
+ volatile u16 *td;
+ int i, j;
+ int status;
+
+ j = lp->tx_old;
+
+ spin_lock(&lp->lock);
+
+ for (i = j; i != lp->tx_new; i = j) {
+ td = lib_ptr(ib, btx_ring[i], lp->type);
+ /* If we hit a packet not owned by us, stop */
+ if (*tds_ptr(td, tmd1, lp->type) & LE_T1_OWN)
+ break;
+
+ if (*tds_ptr(td, tmd1, lp->type) & LE_T1_ERR) {
+ status = *tds_ptr(td, misc, lp->type);
+
+ dev->stats.tx_errors++;
+ if (status & LE_T3_RTY)
+ dev->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL)
+ dev->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ dev->stats.tx_carrier_errors++;
+ printk("%s: Carrier Lost\n", dev->name);
+ /* Stop the lance */
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ /* Buffer errors and underflows turn off the
+ * transmitter, restart the adapter.
+ */
+ if (status & (LE_T3_BUF | LE_T3_UFL)) {
+ dev->stats.tx_fifo_errors++;
+
+ printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+ dev->name);
+ /* Stop the lance */
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ } else if ((*tds_ptr(td, tmd1, lp->type) & LE_T1_POK) ==
+ LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ *tds_ptr(td, tmd1, lp->type) &= ~(LE_T1_POK);
+
+ /* One collision before packet was sent. */
+ if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EONE)
+ dev->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EMORE)
+ dev->stats.collisions += 2;
+
+ dev->stats.tx_packets++;
+ }
+ j = (j + 1) & TX_RING_MOD_MASK;
+ }
+ lp->tx_old = j;
+out:
+ if (netif_queue_stopped(dev) &&
+ TX_BUFFS_AVAIL > 0)
+ netif_wake_queue(dev);
+
+ spin_unlock(&lp->lock);
+}
+
+static irqreturn_t lance_dma_merr_int(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+
+ printk(KERN_ERR "%s: DMA error\n", dev->name);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lance_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int csr0;
+
+ writereg(&ll->rap, LE_CSR0);
+ csr0 = ll->rdp;
+
+ /* Acknowledge all the interrupt sources ASAP */
+ writereg(&ll->rdp, csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT));
+
+ if ((csr0 & LE_C0_ERR)) {
+ /* Clear the error condition */
+ writereg(&ll->rdp, LE_C0_BABL | LE_C0_ERR | LE_C0_MISS |
+ LE_C0_CERR | LE_C0_MERR);
+ }
+ if (csr0 & LE_C0_RINT)
+ lance_rx(dev);
+
+ if (csr0 & LE_C0_TINT)
+ lance_tx(dev);
+
+ if (csr0 & LE_C0_BABL)
+ dev->stats.tx_errors++;
+
+ if (csr0 & LE_C0_MISS)
+ dev->stats.rx_errors++;
+
+ if (csr0 & LE_C0_MERR) {
+ printk("%s: Memory error, status %04x\n", dev->name, csr0);
+
+ writereg(&ll->rdp, LE_C0_STOP);
+
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ netif_wake_queue(dev);
+ }
+
+ writereg(&ll->rdp, LE_C0_INEA);
+ writereg(&ll->rdp, LE_C0_INEA);
+ return IRQ_HANDLED;
+}
+
+static int lance_open(struct net_device *dev)
+{
+ volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int status = 0;
+
+ /* Stop the Lance */
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+
+ /* Set mode and clear multicast filter only at device open,
+ * so that lance_init_ring() called at any error will not
+ * forget multicast filters.
+ *
+ * BTW it is common bug in all lance drivers! --ANK
+ */
+ *lib_ptr(ib, mode, lp->type) = 0;
+ *lib_ptr(ib, filter[0], lp->type) = 0;
+ *lib_ptr(ib, filter[1], lp->type) = 0;
+ *lib_ptr(ib, filter[2], lp->type) = 0;
+ *lib_ptr(ib, filter[3], lp->type) = 0;
+
+ lance_init_ring(dev);
+ load_csrs(lp);
+
+ netif_start_queue(dev);
+
+ /* Associate IRQ with lance_interrupt */
+ if (request_irq(dev->irq, lance_interrupt, 0, "lance", dev)) {
+ printk("%s: Can't get IRQ %d\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+ if (lp->dma_irq >= 0) {
+ unsigned long flags;
+
+ if (request_irq(lp->dma_irq, lance_dma_merr_int, 0,
+ "lance error", dev)) {
+ free_irq(dev->irq, dev);
+ printk("%s: Can't get DMA IRQ %d\n", dev->name,
+ lp->dma_irq);
+ return -EAGAIN;
+ }
+
+ spin_lock_irqsave(&ioasic_ssr_lock, flags);
+
+ fast_mb();
+ /* Enable I/O ASIC LANCE DMA. */
+ ioasic_write(IO_REG_SSR,
+ ioasic_read(IO_REG_SSR) | IO_SSR_LANCE_DMA_EN);
+
+ fast_mb();
+ spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
+ }
+
+ status = init_restart_lance(lp);
+ return status;
+}
+
+static int lance_close(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+
+ netif_stop_queue(dev);
+ del_timer_sync(&lp->multicast_timer);
+
+ /* Stop the card */
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+
+ if (lp->dma_irq >= 0) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioasic_ssr_lock, flags);
+
+ fast_mb();
+ /* Disable I/O ASIC LANCE DMA. */
+ ioasic_write(IO_REG_SSR,
+ ioasic_read(IO_REG_SSR) & ~IO_SSR_LANCE_DMA_EN);
+
+ fast_iob();
+ spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
+
+ free_irq(lp->dma_irq, dev);
+ }
+ free_irq(dev->irq, dev);
+ return 0;
+}
+
+static inline int lance_reset(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int status;
+
+ /* Stop the lance */
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+
+ lance_init_ring(dev);
+ load_csrs(lp);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ status = init_restart_lance(lp);
+ return status;
+}
+
+static void lance_tx_timeout(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+
+ printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
+ dev->name, ll->rdp);
+ lance_reset(dev);
+ netif_wake_queue(dev);
+}
+
+static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ unsigned long flags;
+ int entry, len;
+
+ len = skb->len;
+
+ if (len < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ len = ETH_ZLEN;
+ }
+
+ dev->stats.tx_bytes += len;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ entry = lp->tx_new;
+ *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
+ *lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
+
+ cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data, len);
+
+ /* Now, give the packet to the lance */
+ *lib_ptr(ib, btx_ring[entry].tmd1, lp->type) =
+ ((lp->tx_buf_ptr_lnc[entry] >> 16) & 0xff) |
+ (LE_T1_POK | LE_T1_OWN);
+ lp->tx_new = (entry + 1) & TX_RING_MOD_MASK;
+
+ if (TX_BUFFS_AVAIL <= 0)
+ netif_stop_queue(dev);
+
+ /* Kick the lance: transmit now */
+ writereg(&ll->rdp, LE_C0_INEA | LE_C0_TDMD);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static void lance_load_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ struct netdev_hw_addr *ha;
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI) {
+ *lib_ptr(ib, filter[0], lp->type) = 0xffff;
+ *lib_ptr(ib, filter[1], lp->type) = 0xffff;
+ *lib_ptr(ib, filter[2], lp->type) = 0xffff;
+ *lib_ptr(ib, filter[3], lp->type) = 0xffff;
+ return;
+ }
+ /* clear the multicast filter */
+ *lib_ptr(ib, filter[0], lp->type) = 0;
+ *lib_ptr(ib, filter[1], lp->type) = 0;
+ *lib_ptr(ib, filter[2], lp->type) = 0;
+ *lib_ptr(ib, filter[3], lp->type) = 0;
+
+ /* Add addresses */
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ crc = crc >> 26;
+ *lib_ptr(ib, filter[crc >> 4], lp->type) |= 1 << (crc & 0xf);
+ }
+}
+
+static void lance_set_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ volatile struct lance_regs *ll = lp->ll;
+
+ if (!netif_running(dev))
+ return;
+
+ if (lp->tx_old != lp->tx_new) {
+ mod_timer(&lp->multicast_timer, jiffies + 4 * HZ/100);
+ netif_wake_queue(dev);
+ return;
+ }
+
+ netif_stop_queue(dev);
+
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+
+ lance_init_ring(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ *lib_ptr(ib, mode, lp->type) |= LE_MO_PROM;
+ } else {
+ *lib_ptr(ib, mode, lp->type) &= ~LE_MO_PROM;
+ lance_load_multicast(dev);
+ }
+ load_csrs(lp);
+ init_restart_lance(lp);
+ netif_wake_queue(dev);
+}
+
+static void lance_set_multicast_retry(unsigned long _opaque)
+{
+ struct net_device *dev = (struct net_device *) _opaque;
+
+ lance_set_multicast(dev);
+}
+
+static const struct net_device_ops lance_netdev_ops = {
+ .ndo_open = lance_open,
+ .ndo_stop = lance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_tx_timeout = lance_tx_timeout,
+ .ndo_set_rx_mode = lance_set_multicast,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int __devinit dec_lance_probe(struct device *bdev, const int type)
+{
+ static unsigned version_printed;
+ static const char fmt[] = "declance%d";
+ char name[10];
+ struct net_device *dev;
+ struct lance_private *lp;
+ volatile struct lance_regs *ll;
+ resource_size_t start = 0, len = 0;
+ int i, ret;
+ unsigned long esar_base;
+ unsigned char *esar;
+
+ if (dec_lance_debug && version_printed++ == 0)
+ printk(version);
+
+ if (bdev)
+ snprintf(name, sizeof(name), "%s", dev_name(bdev));
+ else {
+ i = 0;
+ dev = root_lance_dev;
+ while (dev) {
+ i++;
+ lp = netdev_priv(dev);
+ dev = lp->next;
+ }
+ snprintf(name, sizeof(name), fmt, i);
+ }
+
+ dev = alloc_etherdev(sizeof(struct lance_private));
+ if (!dev) {
+ printk(KERN_ERR "%s: Unable to allocate etherdev, aborting.\n",
+ name);
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ /*
+ * alloc_etherdev ensures the data structures used by the LANCE
+ * are aligned.
+ */
+ lp = netdev_priv(dev);
+ spin_lock_init(&lp->lock);
+
+ lp->type = type;
+ switch (type) {
+ case ASIC_LANCE:
+ dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE);
+
+ /* buffer space for the on-board LANCE shared memory */
+ /*
+ * FIXME: ugly hack!
+ */
+ dev->mem_start = CKSEG1ADDR(0x00020000);
+ dev->mem_end = dev->mem_start + 0x00020000;
+ dev->irq = dec_interrupt[DEC_IRQ_LANCE];
+ esar_base = CKSEG1ADDR(dec_kn_slot_base + IOASIC_ESAR);
+
+ /* Workaround crash with booting KN04 2.1k from Disk */
+ memset((void *)dev->mem_start, 0,
+ dev->mem_end - dev->mem_start);
+
+ /*
+ * setup the pointer arrays, this sucks [tm] :-(
+ */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ lp->rx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
+ 2 * i * RX_BUFF_SIZE);
+ lp->rx_buf_ptr_lnc[i] =
+ (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
+ 2 * RX_RING_SIZE * RX_BUFF_SIZE +
+ 2 * i * TX_BUFF_SIZE);
+ lp->tx_buf_ptr_lnc[i] =
+ (BUF_OFFSET_LNC +
+ RX_RING_SIZE * RX_BUFF_SIZE +
+ i * TX_BUFF_SIZE);
+ }
+
+ /* Setup I/O ASIC LANCE DMA. */
+ lp->dma_irq = dec_interrupt[DEC_IRQ_LANCE_MERR];
+ ioasic_write(IO_REG_LANCE_DMA_P,
+ CPHYSADDR(dev->mem_start) << 3);
+
+ break;
+#ifdef CONFIG_TC
+ case PMAD_LANCE:
+ dev_set_drvdata(bdev, dev);
+
+ start = to_tc_dev(bdev)->resource.start;
+ len = to_tc_dev(bdev)->resource.end - start + 1;
+ if (!request_mem_region(start, len, dev_name(bdev))) {
+ printk(KERN_ERR
+ "%s: Unable to reserve MMIO resource\n",
+ dev_name(bdev));
+ ret = -EBUSY;
+ goto err_out_dev;
+ }
+
+ dev->mem_start = CKSEG1ADDR(start);
+ dev->mem_end = dev->mem_start + 0x100000;
+ dev->base_addr = dev->mem_start + 0x100000;
+ dev->irq = to_tc_dev(bdev)->interrupt;
+ esar_base = dev->mem_start + 0x1c0002;
+ lp->dma_irq = -1;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ lp->rx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + BUF_OFFSET_CPU +
+ i * RX_BUFF_SIZE);
+ lp->rx_buf_ptr_lnc[i] =
+ (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + BUF_OFFSET_CPU +
+ RX_RING_SIZE * RX_BUFF_SIZE +
+ i * TX_BUFF_SIZE);
+ lp->tx_buf_ptr_lnc[i] =
+ (BUF_OFFSET_LNC +
+ RX_RING_SIZE * RX_BUFF_SIZE +
+ i * TX_BUFF_SIZE);
+ }
+
+ break;
+#endif
+ case PMAX_LANCE:
+ dev->irq = dec_interrupt[DEC_IRQ_LANCE];
+ dev->base_addr = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE);
+ dev->mem_start = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE_MEM);
+ dev->mem_end = dev->mem_start + KN01_SLOT_SIZE;
+ esar_base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_ESAR + 1);
+ lp->dma_irq = -1;
+
+ /*
+ * setup the pointer arrays, this sucks [tm] :-(
+ */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ lp->rx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
+ 2 * i * RX_BUFF_SIZE);
+ lp->rx_buf_ptr_lnc[i] =
+ (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
+ 2 * RX_RING_SIZE * RX_BUFF_SIZE +
+ 2 * i * TX_BUFF_SIZE);
+ lp->tx_buf_ptr_lnc[i] =
+ (BUF_OFFSET_LNC +
+ RX_RING_SIZE * RX_BUFF_SIZE +
+ i * TX_BUFF_SIZE);
+ }
+
+ break;
+
+ default:
+ printk(KERN_ERR "%s: declance_init called with unknown type\n",
+ name);
+ ret = -ENODEV;
+ goto err_out_dev;
+ }
+
+ ll = (struct lance_regs *) dev->base_addr;
+ esar = (unsigned char *) esar_base;
+
+ /* prom checks */
+ /* First, check for test pattern */
+ if (esar[0x60] != 0xff && esar[0x64] != 0x00 &&
+ esar[0x68] != 0x55 && esar[0x6c] != 0xaa) {
+ printk(KERN_ERR
+ "%s: Ethernet station address prom not found!\n",
+ name);
+ ret = -ENODEV;
+ goto err_out_resource;
+ }
+ /* Check the prom contents */
+ for (i = 0; i < 8; i++) {
+ if (esar[i * 4] != esar[0x3c - i * 4] &&
+ esar[i * 4] != esar[0x40 + i * 4] &&
+ esar[0x3c - i * 4] != esar[0x40 + i * 4]) {
+ printk(KERN_ERR "%s: Something is wrong with the "
+ "ethernet station address prom!\n", name);
+ ret = -ENODEV;
+ goto err_out_resource;
+ }
+ }
+
+ /* Copy the ethernet address to the device structure, later to the
+ * lance initialization block so the lance gets it every time it's
+ * (re)initialized.
+ */
+ switch (type) {
+ case ASIC_LANCE:
+ printk("%s: IOASIC onboard LANCE", name);
+ break;
+ case PMAD_LANCE:
+ printk("%s: PMAD-AA", name);
+ break;
+ case PMAX_LANCE:
+ printk("%s: PMAX onboard LANCE", name);
+ break;
+ }
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = esar[i * 4];
+
+ printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
+
+ dev->netdev_ops = &lance_netdev_ops;
+ dev->watchdog_timeo = 5*HZ;
+
+ /* lp->ll is the location of the registers for lance card */
+ lp->ll = ll;
+
+ /* busmaster_regval (CSR3) should be zero according to the PMAD-AA
+ * specification.
+ */
+ lp->busmaster_regval = 0;
+
+ dev->dma = 0;
+
+ /* We cannot sleep if the chip is busy during a
+ * multicast list update event, because such events
+ * can occur from interrupts (ex. IPv6). So we
+ * use a timer to try again later when necessary. -DaveM
+ */
+ init_timer(&lp->multicast_timer);
+ lp->multicast_timer.data = (unsigned long) dev;
+ lp->multicast_timer.function = lance_set_multicast_retry;
+
+ ret = register_netdev(dev);
+ if (ret) {
+ printk(KERN_ERR
+ "%s: Unable to register netdev, aborting.\n", name);
+ goto err_out_resource;
+ }
+
+ if (!bdev) {
+ lp->next = root_lance_dev;
+ root_lance_dev = dev;
+ }
+
+ printk("%s: registered as %s.\n", name, dev->name);
+ return 0;
+
+err_out_resource:
+ if (bdev)
+ release_mem_region(start, len);
+
+err_out_dev:
+ free_netdev(dev);
+
+err_out:
+ return ret;
+}
+
+static void __exit dec_lance_remove(struct device *bdev)
+{
+ struct net_device *dev = dev_get_drvdata(bdev);
+ resource_size_t start, len;
+
+ unregister_netdev(dev);
+ start = to_tc_dev(bdev)->resource.start;
+ len = to_tc_dev(bdev)->resource.end - start + 1;
+ release_mem_region(start, len);
+ free_netdev(dev);
+}
+
+/* Find all the lance cards on the system and initialize them */
+static int __init dec_lance_platform_probe(void)
+{
+ int count = 0;
+
+ if (dec_interrupt[DEC_IRQ_LANCE] >= 0) {
+ if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) {
+ if (dec_lance_probe(NULL, ASIC_LANCE) >= 0)
+ count++;
+ } else if (!TURBOCHANNEL) {
+ if (dec_lance_probe(NULL, PMAX_LANCE) >= 0)
+ count++;
+ }
+ }
+
+ return (count > 0) ? 0 : -ENODEV;
+}
+
+static void __exit dec_lance_platform_remove(void)
+{
+ while (root_lance_dev) {
+ struct net_device *dev = root_lance_dev;
+ struct lance_private *lp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ root_lance_dev = lp->next;
+ free_netdev(dev);
+ }
+}
+
+#ifdef CONFIG_TC
+static int __devinit dec_lance_tc_probe(struct device *dev);
+static int __exit dec_lance_tc_remove(struct device *dev);
+
+static const struct tc_device_id dec_lance_tc_table[] = {
+ { "DEC ", "PMAD-AA " },
+ { }
+};
+MODULE_DEVICE_TABLE(tc, dec_lance_tc_table);
+
+static struct tc_driver dec_lance_tc_driver = {
+ .id_table = dec_lance_tc_table,
+ .driver = {
+ .name = "declance",
+ .bus = &tc_bus_type,
+ .probe = dec_lance_tc_probe,
+ .remove = __exit_p(dec_lance_tc_remove),
+ },
+};
+
+static int __devinit dec_lance_tc_probe(struct device *dev)
+{
+ int status = dec_lance_probe(dev, PMAD_LANCE);
+ if (!status)
+ get_device(dev);
+ return status;
+}
+
+static int __exit dec_lance_tc_remove(struct device *dev)
+{
+ put_device(dev);
+ dec_lance_remove(dev);
+ return 0;
+}
+#endif
+
+static int __init dec_lance_init(void)
+{
+ int status;
+
+ status = tc_register_driver(&dec_lance_tc_driver);
+ if (!status)
+ dec_lance_platform_probe();
+ return status;
+}
+
+static void __exit dec_lance_exit(void)
+{
+ dec_lance_platform_remove();
+ tc_unregister_driver(&dec_lance_tc_driver);
+}
+
+
+module_init(dec_lance_init);
+module_exit(dec_lance_exit);
diff --git a/drivers/net/ethernet/amd/depca.c b/drivers/net/ethernet/amd/depca.c
new file mode 100644
index 000000000000..681970c07f22
--- /dev/null
+++ b/drivers/net/ethernet/amd/depca.c
@@ -0,0 +1,2111 @@
+/* depca.c: A DIGITAL DEPCA & EtherWORKS ethernet driver for linux.
+
+ Written 1994, 1995 by David C. Davies.
+
+
+ Copyright 1994 David C. Davies
+ and
+ United States Government
+ (as represented by the Director, National Security Agency).
+
+ Copyright 1995 Digital Equipment Corporation.
+
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License, incorporated herein by reference.
+
+ This driver is written for the Digital Equipment Corporation series
+ of DEPCA and EtherWORKS ethernet cards:
+
+ DEPCA (the original)
+ DE100
+ DE101
+ DE200 Turbo
+ DE201 Turbo
+ DE202 Turbo (TP BNC)
+ DE210
+ DE422 (EISA)
+
+ The driver has been tested on DE100, DE200 and DE202 cards in a
+ relatively busy network. The DE422 has been tested a little.
+
+ This driver will NOT work for the DE203, DE204 and DE205 series of
+ cards, since they have a new custom ASIC in place of the AMD LANCE
+ chip. See the 'ewrk3.c' driver in the Linux source tree for running
+ those cards.
+
+ I have benchmarked the driver with a DE100 at 595kB/s to (542kB/s from)
+ a DECstation 5000/200.
+
+ The author may be reached at davies@maniac.ultranet.com
+
+ =========================================================================
+
+ The driver was originally based on the 'lance.c' driver from Donald
+ Becker which is included with the standard driver distribution for
+ linux. V0.4 is a complete re-write with only the kernel interface
+ remaining from the original code.
+
+ 1) Lance.c code in /linux/drivers/net/
+ 2) "Ethernet/IEEE 802.3 Family. 1992 World Network Data Book/Handbook",
+ AMD, 1992 [(800) 222-9323].
+ 3) "Am79C90 CMOS Local Area Network Controller for Ethernet (C-LANCE)",
+ AMD, Pub. #17881, May 1993.
+ 4) "Am79C960 PCnet-ISA(tm), Single-Chip Ethernet Controller for ISA",
+ AMD, Pub. #16907, May 1992
+ 5) "DEC EtherWORKS LC Ethernet Controller Owners Manual",
+ Digital Equipment corporation, 1990, Pub. #EK-DE100-OM.003
+ 6) "DEC EtherWORKS Turbo Ethernet Controller Owners Manual",
+ Digital Equipment corporation, 1990, Pub. #EK-DE200-OM.003
+ 7) "DEPCA Hardware Reference Manual", Pub. #EK-DEPCA-PR
+ Digital Equipment Corporation, 1989
+ 8) "DEC EtherWORKS Turbo_(TP BNC) Ethernet Controller Owners Manual",
+ Digital Equipment corporation, 1991, Pub. #EK-DE202-OM.001
+
+
+ Peter Bauer's depca.c (V0.5) was referred to when debugging V0.1 of this
+ driver.
+
+ The original DEPCA card requires that the ethernet ROM address counter
+ be enabled to count and has an 8 bit NICSR. The ROM counter enabling is
+ only done when a 0x08 is read as the first address octet (to minimise
+ the chances of writing over some other hardware's I/O register). The
+ NICSR accesses have been changed to byte accesses for all the cards
+ supported by this driver, since there is only one useful bit in the MSB
+ (remote boot timeout) and it is not used. Also, there is a maximum of
+ only 48kB network RAM for this card. My thanks to Torbjorn Lindh for
+ help debugging all this (and holding my feet to the fire until I got it
+ right).
+
+ The DE200 series boards have on-board 64kB RAM for use as a shared
+ memory network buffer. Only the DE100 cards make use of a 2kB buffer
+ mode which has not been implemented in this driver (only the 32kB and
+ 64kB modes are supported [16kB/48kB for the original DEPCA]).
+
+ At the most only 2 DEPCA cards can be supported on the ISA bus because
+ there is only provision for two I/O base addresses on each card (0x300
+ and 0x200). The I/O address is detected by searching for a byte sequence
+ in the Ethernet station address PROM at the expected I/O address for the
+ Ethernet PROM. The shared memory base address is 'autoprobed' by
+ looking for the self test PROM and detecting the card name. When a
+ second DEPCA is detected, information is placed in the base_addr
+ variable of the next device structure (which is created if necessary),
+ thus enabling ethif_probe initialization for the device. More than 2
+ EISA cards can be supported, but care will be needed assigning the
+ shared memory to ensure that each slot has the correct IRQ, I/O address
+ and shared memory address assigned.
+
+ ************************************************************************
+
+ NOTE: If you are using two ISA DEPCAs, it is important that you assign
+ the base memory addresses correctly. The driver autoprobes I/O 0x300
+ then 0x200. The base memory address for the first device must be less
+ than that of the second so that the auto probe will correctly assign the
+ I/O and memory addresses on the same card. I can't think of a way to do
+ this unambiguously at the moment, since there is nothing on the cards to
+ tie I/O and memory information together.
+
+ I am unable to test 2 cards together for now, so this code is
+ unchecked. All reports, good or bad, are welcome.
+
+ ************************************************************************
+
+ The board IRQ setting must be at an unused IRQ which is auto-probed
+ using Donald Becker's autoprobe routines. DEPCA and DE100 board IRQs are
+ {2,3,4,5,7}, whereas the DE200 is at {5,9,10,11,15}. Note that IRQ2 is
+ really IRQ9 in machines with 16 IRQ lines.
+
+ No 16MB memory limitation should exist with this driver as DMA is not
+ used and the common memory area is in low memory on the network card (my
+ current system has 20MB and I've not had problems yet).
+
+ The ability to load this driver as a loadable module has been added. To
+ utilise this ability, you have to do <8 things:
+
+ 0) have a copy of the loadable modules code installed on your system.
+ 1) copy depca.c from the /linux/drivers/net directory to your favourite
+ temporary directory.
+ 2) if you wish, edit the source code near line 1530 to reflect the I/O
+ address and IRQ you're using (see also 5).
+ 3) compile depca.c, but include -DMODULE in the command line to ensure
+ that the correct bits are compiled (see end of source code).
+ 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
+ kernel with the depca configuration turned off and reboot.
+ 5) insmod depca.o [irq=7] [io=0x200] [mem=0xd0000] [adapter_name=DE100]
+ [Alan Cox: Changed the code to allow command line irq/io assignments]
+ [Dave Davies: Changed the code to allow command line mem/name
+ assignments]
+ 6) run the net startup bits for your eth?? interface manually
+ (usually /etc/rc.inet[12] at boot time).
+ 7) enjoy!
+
+ Note that autoprobing is not allowed in loadable modules - the system is
+ already up and running and you're messing with interrupts.
+
+ To unload a module, turn off the associated interface
+ 'ifconfig eth?? down' then 'rmmod depca'.
+
+ To assign a base memory address for the shared memory when running as a
+ loadable module, see 5 above. To include the adapter name (if you have
+ no PROM but know the card name) also see 5 above. Note that this last
+ option will not work with kernel built-in depca's.
+
+ The shared memory assignment for a loadable module makes sense to avoid
+ the 'memory autoprobe' picking the wrong shared memory (for the case of
+ 2 depca's in a PC).
+
+ ************************************************************************
+ Support for MCA EtherWORKS cards added 11-3-98.
+ Verified to work with up to 2 DE212 cards in a system (although not
+ fully stress-tested).
+
+ Currently known bugs/limitations:
+
+ Note: with the MCA stuff as a module, it trusts the MCA configuration,
+ not the command line for IRQ and memory address. You can
+ specify them if you want, but it will throw your values out.
+ You still have to pass the IO address it was configured as
+ though.
+
+ ************************************************************************
+ TO DO:
+ ------
+
+
+ Revision History
+ ----------------
+
+ Version Date Description
+
+ 0.1 25-jan-94 Initial writing.
+ 0.2 27-jan-94 Added LANCE TX hardware buffer chaining.
+ 0.3 1-feb-94 Added multiple DEPCA support.
+ 0.31 4-feb-94 Added DE202 recognition.
+ 0.32 19-feb-94 Tidy up. Improve multi-DEPCA support.
+ 0.33 25-feb-94 Fix DEPCA ethernet ROM counter enable.
+ Add jabber packet fix from murf@perftech.com
+ and becker@super.org
+ 0.34 7-mar-94 Fix DEPCA max network memory RAM & NICSR access.
+ 0.35 8-mar-94 Added DE201 recognition. Tidied up.
+ 0.351 30-apr-94 Added EISA support. Added DE422 recognition.
+ 0.36 16-may-94 DE422 fix released.
+ 0.37 22-jul-94 Added MODULE support
+ 0.38 15-aug-94 Added DBR ROM switch in depca_close().
+ Multi DEPCA bug fix.
+ 0.38axp 15-sep-94 Special version for Alpha AXP Linux V1.0.
+ 0.381 12-dec-94 Added DE101 recognition, fix multicast bug.
+ 0.382 9-feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>.
+ 0.383 22-feb-95 Fix for conflict with VESA SCSI reported by
+ <stromain@alf.dec.com>
+ 0.384 17-mar-95 Fix a ring full bug reported by <bkm@star.rl.ac.uk>
+ 0.385 3-apr-95 Fix a recognition bug reported by
+ <ryan.niemi@lastfrontier.com>
+ 0.386 21-apr-95 Fix the last fix...sorry, must be galloping senility
+ 0.40 25-May-95 Rewrite for portability & updated.
+ ALPHA support from <jestabro@amt.tay1.dec.com>
+ 0.41 26-Jun-95 Added verify_area() calls in depca_ioctl() from
+ suggestion by <heiko@colossus.escape.de>
+ 0.42 27-Dec-95 Add 'mem' shared memory assignment for loadable
+ modules.
+ Add 'adapter_name' for loadable modules when no PROM.
+ Both above from a suggestion by
+ <pchen@woodruffs121.residence.gatech.edu>.
+ Add new multicasting code.
+ 0.421 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi>
+ 0.422 29-Apr-96 Fix depca_hw_init() bug <jari@markkus2.fimr.fi>
+ 0.423 7-Jun-96 Fix module load bug <kmg@barco.be>
+ 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c
+ 0.44 1-Sep-97 Fix *_probe() to test check_region() first - bug
+ reported by <mmogilvi@elbert.uccs.edu>
+ 0.45 3-Nov-98 Added support for MCA EtherWORKS (DE210/DE212) cards
+ by <tymm@computer.org>
+ 0.451 5-Nov-98 Fixed mca stuff cuz I'm a dummy. <tymm@computer.org>
+ 0.5 14-Nov-98 Re-spin for 2.1.x kernels.
+ 0.51 27-Jun-99 Correct received packet length for CRC from
+ report by <worm@dkik.dk>
+ 0.52 16-Oct-00 Fixes for 2.3 io memory accesses
+ Fix show-stopper (ints left masked) in depca_interrupt
+ by <peterd@pnd-pc.demon.co.uk>
+ 0.53 12-Jan-01 Release resources on failure, bss tidbits
+ by acme@conectiva.com.br
+ 0.54 08-Nov-01 use library crc32 functions
+ by Matt_Domsch@dell.com
+ 0.55 01-Mar-03 Use EISA/sysfs framework <maz@wild-wind.fr.eu.org>
+
+ =========================================================================
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+#include <linux/ctype.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#ifdef CONFIG_MCA
+#include <linux/mca.h>
+#endif
+
+#ifdef CONFIG_EISA
+#include <linux/eisa.h>
+#endif
+
+#include "depca.h"
+
+static char version[] __initdata = "depca.c:v0.53 2001/1/12 davies@maniac.ultranet.com\n";
+
+#ifdef DEPCA_DEBUG
+static int depca_debug = DEPCA_DEBUG;
+#else
+static int depca_debug = 1;
+#endif
+
+#define DEPCA_NDA 0xffe0 /* No Device Address */
+
+#define TX_TIMEOUT (1*HZ)
+
+/*
+** Ethernet PROM defines
+*/
+#define PROBE_LENGTH 32
+#define ETH_PROM_SIG 0xAA5500FFUL
+
+/*
+** Set the number of Tx and Rx buffers. Ensure that the memory requested
+** here is <= to the amount of shared memory set up by the board switches.
+** The number of descriptors MUST BE A POWER OF 2.
+**
+** total_memory = NUM_RX_DESC*(8+RX_BUFF_SZ) + NUM_TX_DESC*(8+TX_BUFF_SZ)
+*/
+#define NUM_RX_DESC 8 /* Number of RX descriptors */
+#define NUM_TX_DESC 8 /* Number of TX descriptors */
+#define RX_BUFF_SZ 1536 /* Buffer size for each Rx buffer */
+#define TX_BUFF_SZ 1536 /* Buffer size for each Tx buffer */
+
+/*
+** EISA bus defines
+*/
+#define DEPCA_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
+
+/*
+** ISA Bus defines
+*/
+#define DEPCA_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0xe0000,0x00000}
+#define DEPCA_TOTAL_SIZE 0x10
+
+static struct {
+ u_long iobase;
+ struct platform_device *device;
+} depca_io_ports[] = {
+ { 0x300, NULL },
+ { 0x200, NULL },
+ { 0 , NULL },
+};
+
+/*
+** Name <-> Adapter mapping
+*/
+#define DEPCA_SIGNATURE {"DEPCA",\
+ "DE100","DE101",\
+ "DE200","DE201","DE202",\
+ "DE210","DE212",\
+ "DE422",\
+ ""}
+
+static char* __initdata depca_signature[] = DEPCA_SIGNATURE;
+
+enum depca_type {
+ DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown
+};
+
+static char depca_string[] = "depca";
+
+static int depca_device_remove (struct device *device);
+
+#ifdef CONFIG_EISA
+static struct eisa_device_id depca_eisa_ids[] = {
+ { "DEC4220", de422 },
+ { "" }
+};
+MODULE_DEVICE_TABLE(eisa, depca_eisa_ids);
+
+static int depca_eisa_probe (struct device *device);
+
+static struct eisa_driver depca_eisa_driver = {
+ .id_table = depca_eisa_ids,
+ .driver = {
+ .name = depca_string,
+ .probe = depca_eisa_probe,
+ .remove = __devexit_p (depca_device_remove)
+ }
+};
+#endif
+
+#ifdef CONFIG_MCA
+/*
+** Adapter ID for the MCA EtherWORKS DE210/212 adapter
+*/
+#define DE210_ID 0x628d
+#define DE212_ID 0x6def
+
+static short depca_mca_adapter_ids[] = {
+ DE210_ID,
+ DE212_ID,
+ 0x0000
+};
+
+static char *depca_mca_adapter_name[] = {
+ "DEC EtherWORKS MC Adapter (DE210)",
+ "DEC EtherWORKS MC Adapter (DE212)",
+ NULL
+};
+
+static enum depca_type depca_mca_adapter_type[] = {
+ de210,
+ de212,
+ 0
+};
+
+static int depca_mca_probe (struct device *);
+
+static struct mca_driver depca_mca_driver = {
+ .id_table = depca_mca_adapter_ids,
+ .driver = {
+ .name = depca_string,
+ .bus = &mca_bus_type,
+ .probe = depca_mca_probe,
+ .remove = __devexit_p(depca_device_remove),
+ },
+};
+#endif
+
+static int depca_isa_probe (struct platform_device *);
+
+static int __devexit depca_isa_remove(struct platform_device *pdev)
+{
+ return depca_device_remove(&pdev->dev);
+}
+
+static struct platform_driver depca_isa_driver = {
+ .probe = depca_isa_probe,
+ .remove = __devexit_p(depca_isa_remove),
+ .driver = {
+ .name = depca_string,
+ },
+};
+
+/*
+** Miscellaneous info...
+*/
+#define DEPCA_STRLEN 16
+
+/*
+** Memory Alignment. Each descriptor is 4 longwords long. To force a
+** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
+** DESC_ALIGN. DEPCA_ALIGN aligns the start address of the private memory area
+** and hence the RX descriptor ring's first entry.
+*/
+#define DEPCA_ALIGN4 ((u_long)4 - 1) /* 1 longword align */
+#define DEPCA_ALIGN8 ((u_long)8 - 1) /* 2 longword (quadword) align */
+#define DEPCA_ALIGN DEPCA_ALIGN8 /* Keep the LANCE happy... */
+
+/*
+** The DEPCA Rx and Tx ring descriptors.
+*/
+struct depca_rx_desc {
+ volatile s32 base;
+ s16 buf_length; /* This length is negative 2's complement! */
+ s16 msg_length; /* This length is "normal". */
+};
+
+struct depca_tx_desc {
+ volatile s32 base;
+ s16 length; /* This length is negative 2's complement! */
+ s16 misc; /* Errors and TDR info */
+};
+
+#define LA_MASK 0x0000ffff /* LANCE address mask for mapping network RAM
+ to LANCE memory address space */
+
+/*
+** The Lance initialization block, described in databook, in common memory.
+*/
+struct depca_init {
+ u16 mode; /* Mode register */
+ u8 phys_addr[ETH_ALEN]; /* Physical ethernet address */
+ u8 mcast_table[8]; /* Multicast Hash Table. */
+ u32 rx_ring; /* Rx ring base pointer & ring length */
+ u32 tx_ring; /* Tx ring base pointer & ring length */
+};
+
+#define DEPCA_PKT_STAT_SZ 16
+#define DEPCA_PKT_BIN_SZ 128 /* Should be >=100 unless you
+ increase DEPCA_PKT_STAT_SZ */
+struct depca_private {
+ char adapter_name[DEPCA_STRLEN]; /* /proc/ioports string */
+ enum depca_type adapter; /* Adapter type */
+ enum {
+ DEPCA_BUS_MCA = 1,
+ DEPCA_BUS_ISA,
+ DEPCA_BUS_EISA,
+ } depca_bus; /* type of bus */
+ struct depca_init init_block; /* Shadow Initialization block */
+/* CPU address space fields */
+ struct depca_rx_desc __iomem *rx_ring; /* Pointer to start of RX descriptor ring */
+ struct depca_tx_desc __iomem *tx_ring; /* Pointer to start of TX descriptor ring */
+ void __iomem *rx_buff[NUM_RX_DESC]; /* CPU virt address of sh'd memory buffs */
+ void __iomem *tx_buff[NUM_TX_DESC]; /* CPU virt address of sh'd memory buffs */
+ void __iomem *sh_mem; /* CPU mapped virt address of device RAM */
+ u_long mem_start; /* Bus address of device RAM (before remap) */
+ u_long mem_len; /* device memory size */
+/* Device address space fields */
+ u_long device_ram_start; /* Start of RAM in device addr space */
+/* Offsets used in both address spaces */
+ u_long rx_ring_offset; /* Offset from start of RAM to rx_ring */
+ u_long tx_ring_offset; /* Offset from start of RAM to tx_ring */
+ u_long buffs_offset; /* LANCE Rx and Tx buffers start address. */
+/* Kernel-only (not device) fields */
+ int rx_new, tx_new; /* The next free ring entry */
+ int rx_old, tx_old; /* The ring entries to be free()ed. */
+ spinlock_t lock;
+ struct { /* Private stats counters */
+ u32 bins[DEPCA_PKT_STAT_SZ];
+ u32 unicast;
+ u32 multicast;
+ u32 broadcast;
+ u32 excessive_collisions;
+ u32 tx_underruns;
+ u32 excessive_underruns;
+ } pktStats;
+ int txRingMask; /* TX ring mask */
+ int rxRingMask; /* RX ring mask */
+ s32 rx_rlen; /* log2(rxRingMask+1) for the descriptors */
+ s32 tx_rlen; /* log2(txRingMask+1) for the descriptors */
+};
+
+/*
+** The transmit ring full condition is described by the tx_old and tx_new
+** pointers by:
+** tx_old = tx_new Empty ring
+** tx_old = tx_new+1 Full ring
+** tx_old+txRingMask = tx_new Full ring (wrapped condition)
+*/
+#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
+ lp->tx_old+lp->txRingMask-lp->tx_new:\
+ lp->tx_old -lp->tx_new-1)
+
+/*
+** Public Functions
+*/
+static int depca_open(struct net_device *dev);
+static netdev_tx_t depca_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t depca_interrupt(int irq, void *dev_id);
+static int depca_close(struct net_device *dev);
+static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void depca_tx_timeout(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+
+/*
+** Private functions
+*/
+static void depca_init_ring(struct net_device *dev);
+static int depca_rx(struct net_device *dev);
+static int depca_tx(struct net_device *dev);
+
+static void LoadCSRs(struct net_device *dev);
+static int InitRestartDepca(struct net_device *dev);
+static int DepcaSignature(char *name, u_long paddr);
+static int DevicePresent(u_long ioaddr);
+static int get_hw_addr(struct net_device *dev);
+static void SetMulticastFilter(struct net_device *dev);
+static int load_packet(struct net_device *dev, struct sk_buff *skb);
+static void depca_dbg_open(struct net_device *dev);
+
+static u_char de1xx_irq[] __initdata = { 2, 3, 4, 5, 7, 9, 0 };
+static u_char de2xx_irq[] __initdata = { 5, 9, 10, 11, 15, 0 };
+static u_char de422_irq[] __initdata = { 5, 9, 10, 11, 0 };
+static u_char *depca_irq;
+
+static int irq;
+static int io;
+static char *adapter_name;
+static int mem; /* For loadable module assignment
+ use insmod mem=0x????? .... */
+module_param (irq, int, 0);
+module_param (io, int, 0);
+module_param (adapter_name, charp, 0);
+module_param (mem, int, 0);
+MODULE_PARM_DESC(irq, "DEPCA IRQ number");
+MODULE_PARM_DESC(io, "DEPCA I/O base address");
+MODULE_PARM_DESC(adapter_name, "DEPCA adapter name");
+MODULE_PARM_DESC(mem, "DEPCA shared memory address");
+MODULE_LICENSE("GPL");
+
+/*
+** Miscellaneous defines...
+*/
+#define STOP_DEPCA \
+ outw(CSR0, DEPCA_ADDR);\
+ outw(STOP, DEPCA_DATA)
+
+static const struct net_device_ops depca_netdev_ops = {
+ .ndo_open = depca_open,
+ .ndo_start_xmit = depca_start_xmit,
+ .ndo_stop = depca_close,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_do_ioctl = depca_ioctl,
+ .ndo_tx_timeout = depca_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __init depca_hw_init (struct net_device *dev, struct device *device)
+{
+ struct depca_private *lp;
+ int i, j, offset, netRAM, mem_len, status = 0;
+ s16 nicsr;
+ u_long ioaddr;
+ u_long mem_start;
+
+ /*
+ * We are now supposed to enter this function with the
+ * following fields filled with proper values :
+ *
+ * dev->base_addr
+ * lp->mem_start
+ * lp->depca_bus
+ * lp->adapter
+ *
+ * dev->irq can be set if known from device configuration (on
+ * MCA or EISA) or module option. Otherwise, it will be auto
+ * detected.
+ */
+
+ ioaddr = dev->base_addr;
+
+ STOP_DEPCA;
+
+ nicsr = inb(DEPCA_NICSR);
+ nicsr = ((nicsr & ~SHE & ~RBE & ~IEN) | IM);
+ outb(nicsr, DEPCA_NICSR);
+
+ if (inw(DEPCA_DATA) != STOP) {
+ return -ENXIO;
+ }
+
+ lp = netdev_priv(dev);
+ mem_start = lp->mem_start;
+
+ if (!mem_start || lp->adapter < DEPCA || lp->adapter >=unknown)
+ return -ENXIO;
+
+ printk("%s: %s at 0x%04lx",
+ dev_name(device), depca_signature[lp->adapter], ioaddr);
+
+ switch (lp->depca_bus) {
+#ifdef CONFIG_MCA
+ case DEPCA_BUS_MCA:
+ printk(" (MCA slot %d)", to_mca_device(device)->slot + 1);
+ break;
+#endif
+
+#ifdef CONFIG_EISA
+ case DEPCA_BUS_EISA:
+ printk(" (EISA slot %d)", to_eisa_device(device)->slot);
+ break;
+#endif
+
+ case DEPCA_BUS_ISA:
+ break;
+
+ default:
+ printk("Unknown DEPCA bus %d\n", lp->depca_bus);
+ return -ENXIO;
+ }
+
+ printk(", h/w address ");
+ status = get_hw_addr(dev);
+ printk("%pM", dev->dev_addr);
+ if (status != 0) {
+ printk(" which has an Ethernet PROM CRC error.\n");
+ return -ENXIO;
+ }
+
+ /* Set up the maximum amount of network RAM(kB) */
+ netRAM = ((lp->adapter != DEPCA) ? 64 : 48);
+ if ((nicsr & _128KB) && (lp->adapter == de422))
+ netRAM = 128;
+
+ /* Shared Memory Base Address */
+ if (nicsr & BUF) {
+ nicsr &= ~BS; /* DEPCA RAM in top 32k */
+ netRAM -= 32;
+
+ /* Only EISA/ISA needs start address to be re-computed */
+ if (lp->depca_bus != DEPCA_BUS_MCA)
+ mem_start += 0x8000;
+ }
+
+ if ((mem_len = (NUM_RX_DESC * (sizeof(struct depca_rx_desc) + RX_BUFF_SZ) + NUM_TX_DESC * (sizeof(struct depca_tx_desc) + TX_BUFF_SZ) + sizeof(struct depca_init)))
+ > (netRAM << 10)) {
+ printk(",\n requests %dkB RAM: only %dkB is available!\n", (mem_len >> 10), netRAM);
+ return -ENXIO;
+ }
+
+ printk(",\n has %dkB RAM at 0x%.5lx", netRAM, mem_start);
+
+ /* Enable the shadow RAM. */
+ if (lp->adapter != DEPCA) {
+ nicsr |= SHE;
+ outb(nicsr, DEPCA_NICSR);
+ }
+
+ spin_lock_init(&lp->lock);
+ sprintf(lp->adapter_name, "%s (%s)",
+ depca_signature[lp->adapter], dev_name(device));
+ status = -EBUSY;
+
+ /* Initialisation Block */
+ if (!request_mem_region (mem_start, mem_len, lp->adapter_name)) {
+ printk(KERN_ERR "depca: cannot request ISA memory, aborting\n");
+ goto out_priv;
+ }
+
+ status = -EIO;
+ lp->sh_mem = ioremap(mem_start, mem_len);
+ if (lp->sh_mem == NULL) {
+ printk(KERN_ERR "depca: cannot remap ISA memory, aborting\n");
+ goto out1;
+ }
+
+ lp->mem_start = mem_start;
+ lp->mem_len = mem_len;
+ lp->device_ram_start = mem_start & LA_MASK;
+
+ offset = 0;
+ offset += sizeof(struct depca_init);
+
+ /* Tx & Rx descriptors (aligned to a quadword boundary) */
+ offset = (offset + DEPCA_ALIGN) & ~DEPCA_ALIGN;
+ lp->rx_ring = lp->sh_mem + offset;
+ lp->rx_ring_offset = offset;
+
+ offset += (sizeof(struct depca_rx_desc) * NUM_RX_DESC);
+ lp->tx_ring = lp->sh_mem + offset;
+ lp->tx_ring_offset = offset;
+
+ offset += (sizeof(struct depca_tx_desc) * NUM_TX_DESC);
+
+ lp->buffs_offset = offset;
+
+ /* Finish initialising the ring information. */
+ lp->rxRingMask = NUM_RX_DESC - 1;
+ lp->txRingMask = NUM_TX_DESC - 1;
+
+ /* Calculate Tx/Rx RLEN size for the descriptors. */
+ for (i = 0, j = lp->rxRingMask; j > 0; i++) {
+ j >>= 1;
+ }
+ lp->rx_rlen = (s32) (i << 29);
+ for (i = 0, j = lp->txRingMask; j > 0; i++) {
+ j >>= 1;
+ }
+ lp->tx_rlen = (s32) (i << 29);
+
+ /* Load the initialisation block */
+ depca_init_ring(dev);
+
+ /* Initialise the control and status registers */
+ LoadCSRs(dev);
+
+ /* Enable DEPCA board interrupts for autoprobing */
+ nicsr = ((nicsr & ~IM) | IEN);
+ outb(nicsr, DEPCA_NICSR);
+
+ /* To auto-IRQ we enable the initialization-done and DMA err,
+ interrupts. For now we will always get a DMA error. */
+ if (dev->irq < 2) {
+ unsigned char irqnum;
+ unsigned long irq_mask, delay;
+
+ irq_mask = probe_irq_on();
+
+ /* Assign the correct irq list */
+ switch (lp->adapter) {
+ case DEPCA:
+ case de100:
+ case de101:
+ depca_irq = de1xx_irq;
+ break;
+ case de200:
+ case de201:
+ case de202:
+ case de210:
+ case de212:
+ depca_irq = de2xx_irq;
+ break;
+ case de422:
+ depca_irq = de422_irq;
+ break;
+
+ default:
+ break; /* Not reached */
+ }
+
+ /* Trigger an initialization just for the interrupt. */
+ outw(INEA | INIT, DEPCA_DATA);
+
+ delay = jiffies + HZ/50;
+ while (time_before(jiffies, delay))
+ yield();
+
+ irqnum = probe_irq_off(irq_mask);
+
+ status = -ENXIO;
+ if (!irqnum) {
+ printk(" and failed to detect IRQ line.\n");
+ goto out2;
+ } else {
+ for (dev->irq = 0, i = 0; (depca_irq[i]) && (!dev->irq); i++)
+ if (irqnum == depca_irq[i]) {
+ dev->irq = irqnum;
+ printk(" and uses IRQ%d.\n", dev->irq);
+ }
+
+ if (!dev->irq) {
+ printk(" but incorrect IRQ line detected.\n");
+ goto out2;
+ }
+ }
+ } else {
+ printk(" and assigned IRQ%d.\n", dev->irq);
+ }
+
+ if (depca_debug > 1) {
+ printk(version);
+ }
+
+ /* The DEPCA-specific entries in the device structure. */
+ dev->netdev_ops = &depca_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ dev->mem_start = 0;
+
+ dev_set_drvdata(device, dev);
+ SET_NETDEV_DEV (dev, device);
+
+ status = register_netdev(dev);
+ if (status == 0)
+ return 0;
+out2:
+ iounmap(lp->sh_mem);
+out1:
+ release_mem_region (mem_start, mem_len);
+out_priv:
+ return status;
+}
+
+
+static int depca_open(struct net_device *dev)
+{
+ struct depca_private *lp = netdev_priv(dev);
+ u_long ioaddr = dev->base_addr;
+ s16 nicsr;
+ int status = 0;
+
+ STOP_DEPCA;
+ nicsr = inb(DEPCA_NICSR);
+
+ /* Make sure the shadow RAM is enabled */
+ if (lp->adapter != DEPCA) {
+ nicsr |= SHE;
+ outb(nicsr, DEPCA_NICSR);
+ }
+
+ /* Re-initialize the DEPCA... */
+ depca_init_ring(dev);
+ LoadCSRs(dev);
+
+ depca_dbg_open(dev);
+
+ if (request_irq(dev->irq, depca_interrupt, 0, lp->adapter_name, dev)) {
+ printk("depca_open(): Requested IRQ%d is busy\n", dev->irq);
+ status = -EAGAIN;
+ } else {
+
+ /* Enable DEPCA board interrupts and turn off LED */
+ nicsr = ((nicsr & ~IM & ~LED) | IEN);
+ outb(nicsr, DEPCA_NICSR);
+ outw(CSR0, DEPCA_ADDR);
+
+ netif_start_queue(dev);
+
+ status = InitRestartDepca(dev);
+
+ if (depca_debug > 1) {
+ printk("CSR0: 0x%4.4x\n", inw(DEPCA_DATA));
+ printk("nicsr: 0x%02x\n", inb(DEPCA_NICSR));
+ }
+ }
+ return status;
+}
+
+/* Initialize the lance Rx and Tx descriptor rings. */
+static void depca_init_ring(struct net_device *dev)
+{
+ struct depca_private *lp = netdev_priv(dev);
+ u_int i;
+ u_long offset;
+
+ /* Lock out other processes whilst setting up the hardware */
+ netif_stop_queue(dev);
+
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ /* Initialize the base address and length of each buffer in the ring */
+ for (i = 0; i <= lp->rxRingMask; i++) {
+ offset = lp->buffs_offset + i * RX_BUFF_SZ;
+ writel((lp->device_ram_start + offset) | R_OWN, &lp->rx_ring[i].base);
+ writew(-RX_BUFF_SZ, &lp->rx_ring[i].buf_length);
+ lp->rx_buff[i] = lp->sh_mem + offset;
+ }
+
+ for (i = 0; i <= lp->txRingMask; i++) {
+ offset = lp->buffs_offset + (i + lp->rxRingMask + 1) * TX_BUFF_SZ;
+ writel((lp->device_ram_start + offset) & 0x00ffffff, &lp->tx_ring[i].base);
+ lp->tx_buff[i] = lp->sh_mem + offset;
+ }
+
+ /* Set up the initialization block */
+ lp->init_block.rx_ring = (lp->device_ram_start + lp->rx_ring_offset) | lp->rx_rlen;
+ lp->init_block.tx_ring = (lp->device_ram_start + lp->tx_ring_offset) | lp->tx_rlen;
+
+ SetMulticastFilter(dev);
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ }
+
+ lp->init_block.mode = 0x0000; /* Enable the Tx and Rx */
+}
+
+
+static void depca_tx_timeout(struct net_device *dev)
+{
+ u_long ioaddr = dev->base_addr;
+
+ printk("%s: transmit timed out, status %04x, resetting.\n", dev->name, inw(DEPCA_DATA));
+
+ STOP_DEPCA;
+ depca_init_ring(dev);
+ LoadCSRs(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+ InitRestartDepca(dev);
+}
+
+
+/*
+** Writes a socket buffer to TX descriptor ring and starts transmission
+*/
+static netdev_tx_t depca_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct depca_private *lp = netdev_priv(dev);
+ u_long ioaddr = dev->base_addr;
+ int status = 0;
+
+ /* Transmitter timeout, serious problems. */
+ if (skb->len < 1)
+ goto out;
+
+ if (skb_padto(skb, ETH_ZLEN))
+ goto out;
+
+ netif_stop_queue(dev);
+
+ if (TX_BUFFS_AVAIL) { /* Fill in a Tx ring entry */
+ status = load_packet(dev, skb);
+
+ if (!status) {
+ /* Trigger an immediate send demand. */
+ outw(CSR0, DEPCA_ADDR);
+ outw(INEA | TDMD, DEPCA_DATA);
+
+ dev_kfree_skb(skb);
+ }
+ if (TX_BUFFS_AVAIL)
+ netif_start_queue(dev);
+ } else
+ status = NETDEV_TX_LOCKED;
+
+ out:
+ return status;
+}
+
+/*
+** The DEPCA interrupt handler.
+*/
+static irqreturn_t depca_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct depca_private *lp;
+ s16 csr0, nicsr;
+ u_long ioaddr;
+
+ if (dev == NULL) {
+ printk("depca_interrupt(): irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+
+ lp = netdev_priv(dev);
+ ioaddr = dev->base_addr;
+
+ spin_lock(&lp->lock);
+
+ /* mask the DEPCA board interrupts and turn on the LED */
+ nicsr = inb(DEPCA_NICSR);
+ nicsr |= (IM | LED);
+ outb(nicsr, DEPCA_NICSR);
+
+ outw(CSR0, DEPCA_ADDR);
+ csr0 = inw(DEPCA_DATA);
+
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outw(csr0 & INTE, DEPCA_DATA);
+
+ if (csr0 & RINT) /* Rx interrupt (packet arrived) */
+ depca_rx(dev);
+
+ if (csr0 & TINT) /* Tx interrupt (packet sent) */
+ depca_tx(dev);
+
+ /* Any resources available? */
+ if ((TX_BUFFS_AVAIL >= 0) && netif_queue_stopped(dev)) {
+ netif_wake_queue(dev);
+ }
+
+ /* Unmask the DEPCA board interrupts and turn off the LED */
+ nicsr = (nicsr & ~IM & ~LED);
+ outb(nicsr, DEPCA_NICSR);
+
+ spin_unlock(&lp->lock);
+ return IRQ_HANDLED;
+}
+
+/* Called with lp->lock held */
+static int depca_rx(struct net_device *dev)
+{
+ struct depca_private *lp = netdev_priv(dev);
+ int i, entry;
+ s32 status;
+
+ for (entry = lp->rx_new; !(readl(&lp->rx_ring[entry].base) & R_OWN); entry = lp->rx_new) {
+ status = readl(&lp->rx_ring[entry].base) >> 16;
+ if (status & R_STP) { /* Remember start of frame */
+ lp->rx_old = entry;
+ }
+ if (status & R_ENP) { /* Valid frame status */
+ if (status & R_ERR) { /* There was an error. */
+ dev->stats.rx_errors++; /* Update the error stats. */
+ if (status & R_FRAM)
+ dev->stats.rx_frame_errors++;
+ if (status & R_OFLO)
+ dev->stats.rx_over_errors++;
+ if (status & R_CRC)
+ dev->stats.rx_crc_errors++;
+ if (status & R_BUFF)
+ dev->stats.rx_fifo_errors++;
+ } else {
+ short len, pkt_len = readw(&lp->rx_ring[entry].msg_length) - 4;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len + 2);
+ if (skb != NULL) {
+ unsigned char *buf;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ buf = skb_put(skb, pkt_len);
+ if (entry < lp->rx_old) { /* Wrapped buffer */
+ len = (lp->rxRingMask - lp->rx_old + 1) * RX_BUFF_SZ;
+ memcpy_fromio(buf, lp->rx_buff[lp->rx_old], len);
+ memcpy_fromio(buf + len, lp->rx_buff[0], pkt_len - len);
+ } else { /* Linear buffer */
+ memcpy_fromio(buf, lp->rx_buff[lp->rx_old], pkt_len);
+ }
+
+ /*
+ ** Notify the upper protocol layers that there is another
+ ** packet to handle
+ */
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+
+ /*
+ ** Update stats
+ */
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ for (i = 1; i < DEPCA_PKT_STAT_SZ - 1; i++) {
+ if (pkt_len < (i * DEPCA_PKT_BIN_SZ)) {
+ lp->pktStats.bins[i]++;
+ i = DEPCA_PKT_STAT_SZ;
+ }
+ }
+ if (is_multicast_ether_addr(buf)) {
+ if (is_broadcast_ether_addr(buf)) {
+ lp->pktStats.broadcast++;
+ } else {
+ lp->pktStats.multicast++;
+ }
+ } else if (compare_ether_addr(buf, dev->dev_addr) == 0) {
+ lp->pktStats.unicast++;
+ }
+
+ lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
+ if (lp->pktStats.bins[0] == 0) { /* Reset counters */
+ memset((char *) &lp->pktStats, 0, sizeof(lp->pktStats));
+ }
+ } else {
+ printk("%s: Memory squeeze, deferring packet.\n", dev->name);
+ dev->stats.rx_dropped++; /* Really, deferred. */
+ break;
+ }
+ }
+ /* Change buffer ownership for this last frame, back to the adapter */
+ for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) {
+ writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base);
+ }
+ writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base);
+ }
+
+ /*
+ ** Update entry information
+ */
+ lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask;
+ }
+
+ return 0;
+}
+
+/*
+** Buffer sent - check for buffer errors.
+** Called with lp->lock held
+*/
+static int depca_tx(struct net_device *dev)
+{
+ struct depca_private *lp = netdev_priv(dev);
+ int entry;
+ s32 status;
+ u_long ioaddr = dev->base_addr;
+
+ for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
+ status = readl(&lp->tx_ring[entry].base) >> 16;
+
+ if (status < 0) { /* Packet not yet sent! */
+ break;
+ } else if (status & T_ERR) { /* An error occurred. */
+ status = readl(&lp->tx_ring[entry].misc);
+ dev->stats.tx_errors++;
+ if (status & TMD3_RTRY)
+ dev->stats.tx_aborted_errors++;
+ if (status & TMD3_LCAR)
+ dev->stats.tx_carrier_errors++;
+ if (status & TMD3_LCOL)
+ dev->stats.tx_window_errors++;
+ if (status & TMD3_UFLO)
+ dev->stats.tx_fifo_errors++;
+ if (status & (TMD3_BUFF | TMD3_UFLO)) {
+ /* Trigger an immediate send demand. */
+ outw(CSR0, DEPCA_ADDR);
+ outw(INEA | TDMD, DEPCA_DATA);
+ }
+ } else if (status & (T_MORE | T_ONE)) {
+ dev->stats.collisions++;
+ } else {
+ dev->stats.tx_packets++;
+ }
+
+ /* Update all the pointers */
+ lp->tx_old = (lp->tx_old + 1) & lp->txRingMask;
+ }
+
+ return 0;
+}
+
+static int depca_close(struct net_device *dev)
+{
+ struct depca_private *lp = netdev_priv(dev);
+ s16 nicsr;
+ u_long ioaddr = dev->base_addr;
+
+ netif_stop_queue(dev);
+
+ outw(CSR0, DEPCA_ADDR);
+
+ if (depca_debug > 1) {
+ printk("%s: Shutting down ethercard, status was %2.2x.\n", dev->name, inw(DEPCA_DATA));
+ }
+
+ /*
+ ** We stop the DEPCA here -- it occasionally polls
+ ** memory if we don't.
+ */
+ outw(STOP, DEPCA_DATA);
+
+ /*
+ ** Give back the ROM in case the user wants to go to DOS
+ */
+ if (lp->adapter != DEPCA) {
+ nicsr = inb(DEPCA_NICSR);
+ nicsr &= ~SHE;
+ outb(nicsr, DEPCA_NICSR);
+ }
+
+ /*
+ ** Free the associated irq
+ */
+ free_irq(dev->irq, dev);
+ return 0;
+}
+
+static void LoadCSRs(struct net_device *dev)
+{
+ struct depca_private *lp = netdev_priv(dev);
+ u_long ioaddr = dev->base_addr;
+
+ outw(CSR1, DEPCA_ADDR); /* initialisation block address LSW */
+ outw((u16) lp->device_ram_start, DEPCA_DATA);
+ outw(CSR2, DEPCA_ADDR); /* initialisation block address MSW */
+ outw((u16) (lp->device_ram_start >> 16), DEPCA_DATA);
+ outw(CSR3, DEPCA_ADDR); /* ALE control */
+ outw(ACON, DEPCA_DATA);
+
+ outw(CSR0, DEPCA_ADDR); /* Point back to CSR0 */
+}
+
+static int InitRestartDepca(struct net_device *dev)
+{
+ struct depca_private *lp = netdev_priv(dev);
+ u_long ioaddr = dev->base_addr;
+ int i, status = 0;
+
+ /* Copy the shadow init_block to shared memory */
+ memcpy_toio(lp->sh_mem, &lp->init_block, sizeof(struct depca_init));
+
+ outw(CSR0, DEPCA_ADDR); /* point back to CSR0 */
+ outw(INIT, DEPCA_DATA); /* initialize DEPCA */
+
+ /* wait for lance to complete initialisation */
+ for (i = 0; (i < 100) && !(inw(DEPCA_DATA) & IDON); i++);
+
+ if (i != 100) {
+ /* clear IDON by writing a "1", enable interrupts and start lance */
+ outw(IDON | INEA | STRT, DEPCA_DATA);
+ if (depca_debug > 2) {
+ printk("%s: DEPCA open after %d ticks, init block 0x%08lx csr0 %4.4x.\n", dev->name, i, lp->mem_start, inw(DEPCA_DATA));
+ }
+ } else {
+ printk("%s: DEPCA unopen after %d ticks, init block 0x%08lx csr0 %4.4x.\n", dev->name, i, lp->mem_start, inw(DEPCA_DATA));
+ status = -1;
+ }
+
+ return status;
+}
+
+/*
+** Set or clear the multicast filter for this adaptor.
+*/
+static void set_multicast_list(struct net_device *dev)
+{
+ struct depca_private *lp = netdev_priv(dev);
+ u_long ioaddr = dev->base_addr;
+
+ netif_stop_queue(dev);
+ while (lp->tx_old != lp->tx_new); /* Wait for the ring to empty */
+
+ STOP_DEPCA; /* Temporarily stop the depca. */
+ depca_init_ring(dev); /* Initialize the descriptor rings */
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous mode */
+ lp->init_block.mode |= PROM;
+ } else {
+ SetMulticastFilter(dev);
+ lp->init_block.mode &= ~PROM; /* Unset promiscuous mode */
+ }
+
+ LoadCSRs(dev); /* Reload CSR3 */
+ InitRestartDepca(dev); /* Resume normal operation. */
+ netif_start_queue(dev); /* Unlock the TX ring */
+}
+
+/*
+** Calculate the hash code and update the logical address filter
+** from a list of ethernet multicast addresses.
+** Big endian crc one liner is mine, all mine, ha ha ha ha!
+** LANCE calculates its hash codes big endian.
+*/
+static void SetMulticastFilter(struct net_device *dev)
+{
+ struct depca_private *lp = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ int i, j, bit, byte;
+ u16 hashcode;
+ u32 crc;
+
+ if (dev->flags & IFF_ALLMULTI) { /* Set all multicast bits */
+ for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) {
+ lp->init_block.mcast_table[i] = (char) 0xff;
+ }
+ } else {
+ for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) { /* Clear the multicast table */
+ lp->init_block.mcast_table[i] = 0;
+ }
+ /* Add multicast addresses */
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc(ETH_ALEN, ha->addr);
+ hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */
+ for (j = 0; j < 5; j++) { /* ... in reverse order. */
+ hashcode = (hashcode << 1) | ((crc >>= 1) & 1);
+ }
+
+ byte = hashcode >> 3; /* bit[3-5] -> byte in filter */
+ bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
+ lp->init_block.mcast_table[byte] |= bit;
+ }
+ }
+}
+
+static int __init depca_common_init (u_long ioaddr, struct net_device **devp)
+{
+ int status = 0;
+
+ if (!request_region (ioaddr, DEPCA_TOTAL_SIZE, depca_string)) {
+ status = -EBUSY;
+ goto out;
+ }
+
+ if (DevicePresent(ioaddr)) {
+ status = -ENODEV;
+ goto out_release;
+ }
+
+ if (!(*devp = alloc_etherdev (sizeof (struct depca_private)))) {
+ status = -ENOMEM;
+ goto out_release;
+ }
+
+ return 0;
+
+ out_release:
+ release_region (ioaddr, DEPCA_TOTAL_SIZE);
+ out:
+ return status;
+}
+
+#ifdef CONFIG_MCA
+/*
+** Microchannel bus I/O device probe
+*/
+static int __init depca_mca_probe(struct device *device)
+{
+ unsigned char pos[2];
+ unsigned char where;
+ unsigned long iobase, mem_start;
+ int irq, err;
+ struct mca_device *mdev = to_mca_device (device);
+ struct net_device *dev;
+ struct depca_private *lp;
+
+ /*
+ ** Search for the adapter. If an address has been given, search
+ ** specifically for the card at that address. Otherwise find the
+ ** first card in the system.
+ */
+
+ pos[0] = mca_device_read_stored_pos(mdev, 2);
+ pos[1] = mca_device_read_stored_pos(mdev, 3);
+
+ /*
+ ** IO of card is handled by bits 1 and 2 of pos0.
+ **
+ ** bit2 bit1 IO
+ ** 0 0 0x2c00
+ ** 0 1 0x2c10
+ ** 1 0 0x2c20
+ ** 1 1 0x2c30
+ */
+ where = (pos[0] & 6) >> 1;
+ iobase = 0x2c00 + (0x10 * where);
+
+ /*
+ ** Found the adapter we were looking for. Now start setting it up.
+ **
+ ** First work on decoding the IRQ. It's stored in the lower 4 bits
+ ** of pos1. Bits are as follows (from the ADF file):
+ **
+ ** Bits
+ ** 3 2 1 0 IRQ
+ ** --------------------
+ ** 0 0 1 0 5
+ ** 0 0 0 1 9
+ ** 0 1 0 0 10
+ ** 1 0 0 0 11
+ */
+ where = pos[1] & 0x0f;
+ switch (where) {
+ case 1:
+ irq = 9;
+ break;
+ case 2:
+ irq = 5;
+ break;
+ case 4:
+ irq = 10;
+ break;
+ case 8:
+ irq = 11;
+ break;
+ default:
+ printk("%s: mca_probe IRQ error. You should never get here (%d).\n", mdev->name, where);
+ return -EINVAL;
+ }
+
+ /*
+ ** Shared memory address of adapter is stored in bits 3-5 of pos0.
+ ** They are mapped as follows:
+ **
+ ** Bit
+ ** 5 4 3 Memory Addresses
+ ** 0 0 0 C0000-CFFFF (64K)
+ ** 1 0 0 C8000-CFFFF (32K)
+ ** 0 0 1 D0000-DFFFF (64K)
+ ** 1 0 1 D8000-DFFFF (32K)
+ ** 0 1 0 E0000-EFFFF (64K)
+ ** 1 1 0 E8000-EFFFF (32K)
+ */
+ where = (pos[0] & 0x18) >> 3;
+ mem_start = 0xc0000 + (where * 0x10000);
+ if (pos[0] & 0x20) {
+ mem_start += 0x8000;
+ }
+
+ /* claim the slot */
+ strncpy(mdev->name, depca_mca_adapter_name[mdev->index],
+ sizeof(mdev->name));
+ mca_device_set_claim(mdev, 1);
+
+ /*
+ ** Get everything allocated and initialized... (almost just
+ ** like the ISA and EISA probes)
+ */
+ irq = mca_device_transform_irq(mdev, irq);
+ iobase = mca_device_transform_ioport(mdev, iobase);
+
+ if ((err = depca_common_init (iobase, &dev)))
+ goto out_unclaim;
+
+ dev->irq = irq;
+ dev->base_addr = iobase;
+ lp = netdev_priv(dev);
+ lp->depca_bus = DEPCA_BUS_MCA;
+ lp->adapter = depca_mca_adapter_type[mdev->index];
+ lp->mem_start = mem_start;
+
+ if ((err = depca_hw_init(dev, device)))
+ goto out_free;
+
+ return 0;
+
+ out_free:
+ free_netdev (dev);
+ release_region (iobase, DEPCA_TOTAL_SIZE);
+ out_unclaim:
+ mca_device_set_claim(mdev, 0);
+
+ return err;
+}
+#endif
+
+/*
+** ISA bus I/O device probe
+*/
+
+static void __init depca_platform_probe (void)
+{
+ int i;
+ struct platform_device *pldev;
+
+ for (i = 0; depca_io_ports[i].iobase; i++) {
+ depca_io_ports[i].device = NULL;
+
+ /* if an address has been specified on the command
+ * line, use it (if valid) */
+ if (io && io != depca_io_ports[i].iobase)
+ continue;
+
+ pldev = platform_device_alloc(depca_string, i);
+ if (!pldev)
+ continue;
+
+ pldev->dev.platform_data = (void *) depca_io_ports[i].iobase;
+ depca_io_ports[i].device = pldev;
+
+ if (platform_device_add(pldev)) {
+ depca_io_ports[i].device = NULL;
+ pldev->dev.platform_data = NULL;
+ platform_device_put(pldev);
+ continue;
+ }
+
+ if (!pldev->dev.driver) {
+ /* The driver was not bound to this device, there was
+ * no hardware at this address. Unregister it, as the
+ * release function will take care of freeing the
+ * allocated structure */
+
+ depca_io_ports[i].device = NULL;
+ pldev->dev.platform_data = NULL;
+ platform_device_unregister (pldev);
+ }
+ }
+}
+
+static enum depca_type __init depca_shmem_probe (ulong *mem_start)
+{
+ u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES;
+ enum depca_type adapter = unknown;
+ int i;
+
+ for (i = 0; mem_base[i]; i++) {
+ *mem_start = mem ? mem : mem_base[i];
+ adapter = DepcaSignature (adapter_name, *mem_start);
+ if (adapter != unknown)
+ break;
+ }
+
+ return adapter;
+}
+
+static int __devinit depca_isa_probe (struct platform_device *device)
+{
+ struct net_device *dev;
+ struct depca_private *lp;
+ u_long ioaddr, mem_start = 0;
+ enum depca_type adapter = unknown;
+ int status = 0;
+
+ ioaddr = (u_long) device->dev.platform_data;
+
+ if ((status = depca_common_init (ioaddr, &dev)))
+ goto out;
+
+ adapter = depca_shmem_probe (&mem_start);
+
+ if (adapter == unknown) {
+ status = -ENODEV;
+ goto out_free;
+ }
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq; /* Use whatever value the user gave
+ * us, and 0 if he didn't. */
+ lp = netdev_priv(dev);
+ lp->depca_bus = DEPCA_BUS_ISA;
+ lp->adapter = adapter;
+ lp->mem_start = mem_start;
+
+ if ((status = depca_hw_init(dev, &device->dev)))
+ goto out_free;
+
+ return 0;
+
+ out_free:
+ free_netdev (dev);
+ release_region (ioaddr, DEPCA_TOTAL_SIZE);
+ out:
+ return status;
+}
+
+/*
+** EISA callbacks from sysfs.
+*/
+
+#ifdef CONFIG_EISA
+static int __init depca_eisa_probe (struct device *device)
+{
+ enum depca_type adapter = unknown;
+ struct eisa_device *edev;
+ struct net_device *dev;
+ struct depca_private *lp;
+ u_long ioaddr, mem_start;
+ int status = 0;
+
+ edev = to_eisa_device (device);
+ ioaddr = edev->base_addr + DEPCA_EISA_IO_PORTS;
+
+ if ((status = depca_common_init (ioaddr, &dev)))
+ goto out;
+
+ /* It would have been nice to get card configuration from the
+ * card. Unfortunately, this register is write-only (shares
+ * it's address with the ethernet prom)... As we don't parse
+ * the EISA configuration structures (yet... :-), just rely on
+ * the ISA probing to sort it out... */
+
+ adapter = depca_shmem_probe (&mem_start);
+ if (adapter == unknown) {
+ status = -ENODEV;
+ goto out_free;
+ }
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ lp = netdev_priv(dev);
+ lp->depca_bus = DEPCA_BUS_EISA;
+ lp->adapter = edev->id.driver_data;
+ lp->mem_start = mem_start;
+
+ if ((status = depca_hw_init(dev, device)))
+ goto out_free;
+
+ return 0;
+
+ out_free:
+ free_netdev (dev);
+ release_region (ioaddr, DEPCA_TOTAL_SIZE);
+ out:
+ return status;
+}
+#endif
+
+static int __devexit depca_device_remove (struct device *device)
+{
+ struct net_device *dev;
+ struct depca_private *lp;
+ int bus;
+
+ dev = dev_get_drvdata(device);
+ lp = netdev_priv(dev);
+
+ unregister_netdev (dev);
+ iounmap (lp->sh_mem);
+ release_mem_region (lp->mem_start, lp->mem_len);
+ release_region (dev->base_addr, DEPCA_TOTAL_SIZE);
+ bus = lp->depca_bus;
+ free_netdev (dev);
+
+ return 0;
+}
+
+/*
+** Look for a particular board name in the on-board Remote Diagnostics
+** and Boot (readb) ROM. This will also give us a clue to the network RAM
+** base address.
+*/
+static int __init DepcaSignature(char *name, u_long base_addr)
+{
+ u_int i, j, k;
+ void __iomem *ptr;
+ char tmpstr[16];
+ u_long prom_addr = base_addr + 0xc000;
+ u_long mem_addr = base_addr + 0x8000; /* 32KB */
+
+ /* Can't reserve the prom region, it is already marked as
+ * used, at least on x86. Instead, reserve a memory region a
+ * board would certainly use. If it works, go ahead. If not,
+ * run like hell... */
+
+ if (!request_mem_region (mem_addr, 16, depca_string))
+ return unknown;
+
+ /* Copy the first 16 bytes of ROM */
+
+ ptr = ioremap(prom_addr, 16);
+ if (ptr == NULL) {
+ printk(KERN_ERR "depca: I/O remap failed at %lx\n", prom_addr);
+ return unknown;
+ }
+ for (i = 0; i < 16; i++) {
+ tmpstr[i] = readb(ptr + i);
+ }
+ iounmap(ptr);
+
+ release_mem_region (mem_addr, 16);
+
+ /* Check if PROM contains a valid string */
+ for (i = 0; *depca_signature[i] != '\0'; i++) {
+ for (j = 0, k = 0; j < 16 && k < strlen(depca_signature[i]); j++) {
+ if (depca_signature[i][k] == tmpstr[j]) { /* track signature */
+ k++;
+ } else { /* lost signature; begin search again */
+ k = 0;
+ }
+ }
+ if (k == strlen(depca_signature[i]))
+ break;
+ }
+
+ /* Check if name string is valid, provided there's no PROM */
+ if (name && *name && (i == unknown)) {
+ for (i = 0; *depca_signature[i] != '\0'; i++) {
+ if (strcmp(name, depca_signature[i]) == 0)
+ break;
+ }
+ }
+
+ return i;
+}
+
+/*
+** Look for a special sequence in the Ethernet station address PROM that
+** is common across all DEPCA products. Note that the original DEPCA needs
+** its ROM address counter to be initialized and enabled. Only enable
+** if the first address octet is a 0x08 - this minimises the chances of
+** messing around with some other hardware, but it assumes that this DEPCA
+** card initialized itself correctly.
+**
+** Search the Ethernet address ROM for the signature. Since the ROM address
+** counter can start at an arbitrary point, the search must include the entire
+** probe sequence length plus the (length_of_the_signature - 1).
+** Stop the search IMMEDIATELY after the signature is found so that the
+** PROM address counter is correctly positioned at the start of the
+** ethernet address for later read out.
+*/
+static int __init DevicePresent(u_long ioaddr)
+{
+ union {
+ struct {
+ u32 a;
+ u32 b;
+ } llsig;
+ char Sig[sizeof(u32) << 1];
+ }
+ dev;
+ short sigLength = 0;
+ s8 data;
+ s16 nicsr;
+ int i, j, status = 0;
+
+ data = inb(DEPCA_PROM); /* clear counter on DEPCA */
+ data = inb(DEPCA_PROM); /* read data */
+
+ if (data == 0x08) { /* Enable counter on DEPCA */
+ nicsr = inb(DEPCA_NICSR);
+ nicsr |= AAC;
+ outb(nicsr, DEPCA_NICSR);
+ }
+
+ dev.llsig.a = ETH_PROM_SIG;
+ dev.llsig.b = ETH_PROM_SIG;
+ sigLength = sizeof(u32) << 1;
+
+ for (i = 0, j = 0; j < sigLength && i < PROBE_LENGTH + sigLength - 1; i++) {
+ data = inb(DEPCA_PROM);
+ if (dev.Sig[j] == data) { /* track signature */
+ j++;
+ } else { /* lost signature; begin search again */
+ if (data == dev.Sig[0]) { /* rare case.... */
+ j = 1;
+ } else {
+ j = 0;
+ }
+ }
+ }
+
+ if (j != sigLength) {
+ status = -ENODEV; /* search failed */
+ }
+
+ return status;
+}
+
+/*
+** The DE100 and DE101 PROM accesses were made non-standard for some bizarre
+** reason: access the upper half of the PROM with x=0; access the lower half
+** with x=1.
+*/
+static int __init get_hw_addr(struct net_device *dev)
+{
+ u_long ioaddr = dev->base_addr;
+ struct depca_private *lp = netdev_priv(dev);
+ int i, k, tmp, status = 0;
+ u_short j, x, chksum;
+
+ x = (((lp->adapter == de100) || (lp->adapter == de101)) ? 1 : 0);
+
+ for (i = 0, k = 0, j = 0; j < 3; j++) {
+ k <<= 1;
+ if (k > 0xffff)
+ k -= 0xffff;
+
+ k += (u_char) (tmp = inb(DEPCA_PROM + x));
+ dev->dev_addr[i++] = (u_char) tmp;
+ k += (u_short) ((tmp = inb(DEPCA_PROM + x)) << 8);
+ dev->dev_addr[i++] = (u_char) tmp;
+
+ if (k > 0xffff)
+ k -= 0xffff;
+ }
+ if (k == 0xffff)
+ k = 0;
+
+ chksum = (u_char) inb(DEPCA_PROM + x);
+ chksum |= (u_short) (inb(DEPCA_PROM + x) << 8);
+ if (k != chksum)
+ status = -1;
+
+ return status;
+}
+
+/*
+** Load a packet into the shared memory
+*/
+static int load_packet(struct net_device *dev, struct sk_buff *skb)
+{
+ struct depca_private *lp = netdev_priv(dev);
+ int i, entry, end, len, status = NETDEV_TX_OK;
+
+ entry = lp->tx_new; /* Ring around buffer number. */
+ end = (entry + (skb->len - 1) / TX_BUFF_SZ) & lp->txRingMask;
+ if (!(readl(&lp->tx_ring[end].base) & T_OWN)) { /* Enough room? */
+ /*
+ ** Caution: the write order is important here... don't set up the
+ ** ownership rights until all the other information is in place.
+ */
+ if (end < entry) { /* wrapped buffer */
+ len = (lp->txRingMask - entry + 1) * TX_BUFF_SZ;
+ memcpy_toio(lp->tx_buff[entry], skb->data, len);
+ memcpy_toio(lp->tx_buff[0], skb->data + len, skb->len - len);
+ } else { /* linear buffer */
+ memcpy_toio(lp->tx_buff[entry], skb->data, skb->len);
+ }
+
+ /* set up the buffer descriptors */
+ len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
+ for (i = entry; i != end; i = (i+1) & lp->txRingMask) {
+ /* clean out flags */
+ writel(readl(&lp->tx_ring[i].base) & ~T_FLAGS, &lp->tx_ring[i].base);
+ writew(0x0000, &lp->tx_ring[i].misc); /* clears other error flags */
+ writew(-TX_BUFF_SZ, &lp->tx_ring[i].length); /* packet length in buffer */
+ len -= TX_BUFF_SZ;
+ }
+ /* clean out flags */
+ writel(readl(&lp->tx_ring[end].base) & ~T_FLAGS, &lp->tx_ring[end].base);
+ writew(0x0000, &lp->tx_ring[end].misc); /* clears other error flags */
+ writew(-len, &lp->tx_ring[end].length); /* packet length in last buff */
+
+ /* start of packet */
+ writel(readl(&lp->tx_ring[entry].base) | T_STP, &lp->tx_ring[entry].base);
+ /* end of packet */
+ writel(readl(&lp->tx_ring[end].base) | T_ENP, &lp->tx_ring[end].base);
+
+ for (i = end; i != entry; --i) {
+ /* ownership of packet */
+ writel(readl(&lp->tx_ring[i].base) | T_OWN, &lp->tx_ring[i].base);
+ if (i == 0)
+ i = lp->txRingMask + 1;
+ }
+ writel(readl(&lp->tx_ring[entry].base) | T_OWN, &lp->tx_ring[entry].base);
+
+ lp->tx_new = (++end) & lp->txRingMask; /* update current pointers */
+ } else {
+ status = NETDEV_TX_LOCKED;
+ }
+
+ return status;
+}
+
+static void depca_dbg_open(struct net_device *dev)
+{
+ struct depca_private *lp = netdev_priv(dev);
+ u_long ioaddr = dev->base_addr;
+ struct depca_init *p = &lp->init_block;
+ int i;
+
+ if (depca_debug > 1) {
+ /* Do not copy the shadow init block into shared memory */
+ /* Debugging should not affect normal operation! */
+ /* The shadow init block will get copied across during InitRestartDepca */
+ printk("%s: depca open with irq %d\n", dev->name, dev->irq);
+ printk("Descriptor head addresses (CPU):\n");
+ printk(" 0x%lx 0x%lx\n", (u_long) lp->rx_ring, (u_long) lp->tx_ring);
+ printk("Descriptor addresses (CPU):\nRX: ");
+ for (i = 0; i < lp->rxRingMask; i++) {
+ if (i < 3) {
+ printk("%p ", &lp->rx_ring[i].base);
+ }
+ }
+ printk("...%p\n", &lp->rx_ring[i].base);
+ printk("TX: ");
+ for (i = 0; i < lp->txRingMask; i++) {
+ if (i < 3) {
+ printk("%p ", &lp->tx_ring[i].base);
+ }
+ }
+ printk("...%p\n", &lp->tx_ring[i].base);
+ printk("\nDescriptor buffers (Device):\nRX: ");
+ for (i = 0; i < lp->rxRingMask; i++) {
+ if (i < 3) {
+ printk("0x%8.8x ", readl(&lp->rx_ring[i].base));
+ }
+ }
+ printk("...0x%8.8x\n", readl(&lp->rx_ring[i].base));
+ printk("TX: ");
+ for (i = 0; i < lp->txRingMask; i++) {
+ if (i < 3) {
+ printk("0x%8.8x ", readl(&lp->tx_ring[i].base));
+ }
+ }
+ printk("...0x%8.8x\n", readl(&lp->tx_ring[i].base));
+ printk("Initialisation block at 0x%8.8lx(Phys)\n", lp->mem_start);
+ printk(" mode: 0x%4.4x\n", p->mode);
+ printk(" physical address: %pM\n", p->phys_addr);
+ printk(" multicast hash table: ");
+ for (i = 0; i < (HASH_TABLE_LEN >> 3) - 1; i++) {
+ printk("%2.2x:", p->mcast_table[i]);
+ }
+ printk("%2.2x\n", p->mcast_table[i]);
+ printk(" rx_ring at: 0x%8.8x\n", p->rx_ring);
+ printk(" tx_ring at: 0x%8.8x\n", p->tx_ring);
+ printk("buffers (Phys): 0x%8.8lx\n", lp->mem_start + lp->buffs_offset);
+ printk("Ring size:\nRX: %d Log2(rxRingMask): 0x%8.8x\n", (int) lp->rxRingMask + 1, lp->rx_rlen);
+ printk("TX: %d Log2(txRingMask): 0x%8.8x\n", (int) lp->txRingMask + 1, lp->tx_rlen);
+ outw(CSR2, DEPCA_ADDR);
+ printk("CSR2&1: 0x%4.4x", inw(DEPCA_DATA));
+ outw(CSR1, DEPCA_ADDR);
+ printk("%4.4x\n", inw(DEPCA_DATA));
+ outw(CSR3, DEPCA_ADDR);
+ printk("CSR3: 0x%4.4x\n", inw(DEPCA_DATA));
+ }
+}
+
+/*
+** Perform IOCTL call functions here. Some are privileged operations and the
+** effective uid is checked in those cases.
+** All multicast IOCTLs will not work here and are for testing purposes only.
+*/
+static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct depca_private *lp = netdev_priv(dev);
+ struct depca_ioctl *ioc = (struct depca_ioctl *) &rq->ifr_ifru;
+ int i, status = 0;
+ u_long ioaddr = dev->base_addr;
+ union {
+ u8 addr[(HASH_TABLE_LEN * ETH_ALEN)];
+ u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
+ u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2];
+ } tmp;
+ unsigned long flags;
+ void *buf;
+
+ switch (ioc->cmd) {
+ case DEPCA_GET_HWADDR: /* Get the hardware address */
+ for (i = 0; i < ETH_ALEN; i++) {
+ tmp.addr[i] = dev->dev_addr[i];
+ }
+ ioc->len = ETH_ALEN;
+ if (copy_to_user(ioc->data, tmp.addr, ioc->len))
+ return -EFAULT;
+ break;
+
+ case DEPCA_SET_HWADDR: /* Set the hardware address */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN))
+ return -EFAULT;
+ for (i = 0; i < ETH_ALEN; i++) {
+ dev->dev_addr[i] = tmp.addr[i];
+ }
+ netif_stop_queue(dev);
+ while (lp->tx_old != lp->tx_new)
+ cpu_relax(); /* Wait for the ring to empty */
+
+ STOP_DEPCA; /* Temporarily stop the depca. */
+ depca_init_ring(dev); /* Initialize the descriptor rings */
+ LoadCSRs(dev); /* Reload CSR3 */
+ InitRestartDepca(dev); /* Resume normal operation. */
+ netif_start_queue(dev); /* Unlock the TX ring */
+ break;
+
+ case DEPCA_SET_PROM: /* Set Promiscuous Mode */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ netif_stop_queue(dev);
+ while (lp->tx_old != lp->tx_new)
+ cpu_relax(); /* Wait for the ring to empty */
+
+ STOP_DEPCA; /* Temporarily stop the depca. */
+ depca_init_ring(dev); /* Initialize the descriptor rings */
+ lp->init_block.mode |= PROM; /* Set promiscuous mode */
+
+ LoadCSRs(dev); /* Reload CSR3 */
+ InitRestartDepca(dev); /* Resume normal operation. */
+ netif_start_queue(dev); /* Unlock the TX ring */
+ break;
+
+ case DEPCA_CLR_PROM: /* Clear Promiscuous Mode */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ netif_stop_queue(dev);
+ while (lp->tx_old != lp->tx_new)
+ cpu_relax(); /* Wait for the ring to empty */
+
+ STOP_DEPCA; /* Temporarily stop the depca. */
+ depca_init_ring(dev); /* Initialize the descriptor rings */
+ lp->init_block.mode &= ~PROM; /* Clear promiscuous mode */
+
+ LoadCSRs(dev); /* Reload CSR3 */
+ InitRestartDepca(dev); /* Resume normal operation. */
+ netif_start_queue(dev); /* Unlock the TX ring */
+ break;
+
+ case DEPCA_SAY_BOO: /* Say "Boo!" to the kernel log file */
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ printk("%s: Boo!\n", dev->name);
+ break;
+
+ case DEPCA_GET_MCA: /* Get the multicast address table */
+ ioc->len = (HASH_TABLE_LEN >> 3);
+ if (copy_to_user(ioc->data, lp->init_block.mcast_table, ioc->len))
+ return -EFAULT;
+ break;
+
+ case DEPCA_SET_MCA: /* Set a multicast address */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (ioc->len >= HASH_TABLE_LEN)
+ return -EINVAL;
+ if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN * ioc->len))
+ return -EFAULT;
+ set_multicast_list(dev);
+ break;
+
+ case DEPCA_CLR_MCA: /* Clear all multicast addresses */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ set_multicast_list(dev);
+ break;
+
+ case DEPCA_MCA_EN: /* Enable pass all multicast addressing */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ set_multicast_list(dev);
+ break;
+
+ case DEPCA_GET_STATS: /* Get the driver statistics */
+ ioc->len = sizeof(lp->pktStats);
+ buf = kmalloc(ioc->len, GFP_KERNEL);
+ if(!buf)
+ return -ENOMEM;
+ spin_lock_irqsave(&lp->lock, flags);
+ memcpy(buf, &lp->pktStats, ioc->len);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ if (copy_to_user(ioc->data, buf, ioc->len))
+ status = -EFAULT;
+ kfree(buf);
+ break;
+
+ case DEPCA_CLR_STATS: /* Zero out the driver statistics */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ spin_lock_irqsave(&lp->lock, flags);
+ memset(&lp->pktStats, 0, sizeof(lp->pktStats));
+ spin_unlock_irqrestore(&lp->lock, flags);
+ break;
+
+ case DEPCA_GET_REG: /* Get the DEPCA Registers */
+ i = 0;
+ tmp.sval[i++] = inw(DEPCA_NICSR);
+ outw(CSR0, DEPCA_ADDR); /* status register */
+ tmp.sval[i++] = inw(DEPCA_DATA);
+ memcpy(&tmp.sval[i], &lp->init_block, sizeof(struct depca_init));
+ ioc->len = i + sizeof(struct depca_init);
+ if (copy_to_user(ioc->data, tmp.addr, ioc->len))
+ return -EFAULT;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return status;
+}
+
+static int __init depca_module_init (void)
+{
+ int err = 0;
+
+#ifdef CONFIG_MCA
+ err = mca_register_driver(&depca_mca_driver);
+ if (err)
+ goto err;
+#endif
+#ifdef CONFIG_EISA
+ err = eisa_driver_register(&depca_eisa_driver);
+ if (err)
+ goto err_mca;
+#endif
+ err = platform_driver_register(&depca_isa_driver);
+ if (err)
+ goto err_eisa;
+
+ depca_platform_probe();
+ return 0;
+
+err_eisa:
+#ifdef CONFIG_EISA
+ eisa_driver_unregister(&depca_eisa_driver);
+err_mca:
+#endif
+#ifdef CONFIG_MCA
+ mca_unregister_driver(&depca_mca_driver);
+err:
+#endif
+ return err;
+}
+
+static void __exit depca_module_exit (void)
+{
+ int i;
+#ifdef CONFIG_MCA
+ mca_unregister_driver (&depca_mca_driver);
+#endif
+#ifdef CONFIG_EISA
+ eisa_driver_unregister (&depca_eisa_driver);
+#endif
+ platform_driver_unregister (&depca_isa_driver);
+
+ for (i = 0; depca_io_ports[i].iobase; i++) {
+ if (depca_io_ports[i].device) {
+ depca_io_ports[i].device->dev.platform_data = NULL;
+ platform_device_unregister (depca_io_ports[i].device);
+ depca_io_ports[i].device = NULL;
+ }
+ }
+}
+
+module_init (depca_module_init);
+module_exit (depca_module_exit);
diff --git a/drivers/net/ethernet/amd/depca.h b/drivers/net/ethernet/amd/depca.h
new file mode 100644
index 000000000000..cdcfe4252c16
--- /dev/null
+++ b/drivers/net/ethernet/amd/depca.h
@@ -0,0 +1,183 @@
+/*
+ Written 1994 by David C. Davies.
+
+ Copyright 1994 David C. Davies. This software may be used and distributed
+ according to the terms of the GNU General Public License, incorporated herein by
+ reference.
+*/
+
+/*
+** I/O addresses. Note that the 2k buffer option is not supported in
+** this driver.
+*/
+#define DEPCA_NICSR ioaddr+0x00 /* Network interface CSR */
+#define DEPCA_RBI ioaddr+0x02 /* RAM buffer index (2k buffer mode) */
+#define DEPCA_DATA ioaddr+0x04 /* LANCE registers' data port */
+#define DEPCA_ADDR ioaddr+0x06 /* LANCE registers' address port */
+#define DEPCA_HBASE ioaddr+0x08 /* EISA high memory base address reg. */
+#define DEPCA_PROM ioaddr+0x0c /* Ethernet address ROM data port */
+#define DEPCA_CNFG ioaddr+0x0c /* EISA Configuration port */
+#define DEPCA_RBSA ioaddr+0x0e /* RAM buffer starting address (2k buff.) */
+
+/*
+** These are LANCE registers addressable through DEPCA_ADDR
+*/
+#define CSR0 0
+#define CSR1 1
+#define CSR2 2
+#define CSR3 3
+
+/*
+** NETWORK INTERFACE CSR (NI_CSR) bit definitions
+*/
+
+#define TO 0x0100 /* Time Out for remote boot */
+#define SHE 0x0080 /* SHadow memory Enable */
+#define BS 0x0040 /* Bank Select */
+#define BUF 0x0020 /* BUFfer size (1->32k, 0->64k) */
+#define RBE 0x0010 /* Remote Boot Enable (1->net boot) */
+#define AAC 0x0008 /* Address ROM Address Counter (1->enable) */
+#define _128KB 0x0008 /* 128kB Network RAM (1->enable) */
+#define IM 0x0004 /* Interrupt Mask (1->mask) */
+#define IEN 0x0002 /* Interrupt tristate ENable (1->enable) */
+#define LED 0x0001 /* LED control */
+
+/*
+** Control and Status Register 0 (CSR0) bit definitions
+*/
+
+#define ERR 0x8000 /* Error summary */
+#define BABL 0x4000 /* Babble transmitter timeout error */
+#define CERR 0x2000 /* Collision Error */
+#define MISS 0x1000 /* Missed packet */
+#define MERR 0x0800 /* Memory Error */
+#define RINT 0x0400 /* Receiver Interrupt */
+#define TINT 0x0200 /* Transmit Interrupt */
+#define IDON 0x0100 /* Initialization Done */
+#define INTR 0x0080 /* Interrupt Flag */
+#define INEA 0x0040 /* Interrupt Enable */
+#define RXON 0x0020 /* Receiver on */
+#define TXON 0x0010 /* Transmitter on */
+#define TDMD 0x0008 /* Transmit Demand */
+#define STOP 0x0004 /* Stop */
+#define STRT 0x0002 /* Start */
+#define INIT 0x0001 /* Initialize */
+#define INTM 0xff00 /* Interrupt Mask */
+#define INTE 0xfff0 /* Interrupt Enable */
+
+/*
+** CONTROL AND STATUS REGISTER 3 (CSR3)
+*/
+
+#define BSWP 0x0004 /* Byte SWaP */
+#define ACON 0x0002 /* ALE control */
+#define BCON 0x0001 /* Byte CONtrol */
+
+/*
+** Initialization Block Mode Register
+*/
+
+#define PROM 0x8000 /* Promiscuous Mode */
+#define EMBA 0x0080 /* Enable Modified Back-off Algorithm */
+#define INTL 0x0040 /* Internal Loopback */
+#define DRTY 0x0020 /* Disable Retry */
+#define COLL 0x0010 /* Force Collision */
+#define DTCR 0x0008 /* Disable Transmit CRC */
+#define LOOP 0x0004 /* Loopback */
+#define DTX 0x0002 /* Disable the Transmitter */
+#define DRX 0x0001 /* Disable the Receiver */
+
+/*
+** Receive Message Descriptor 1 (RMD1) bit definitions.
+*/
+
+#define R_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */
+#define R_ERR 0x4000 /* Error Summary */
+#define R_FRAM 0x2000 /* Framing Error */
+#define R_OFLO 0x1000 /* Overflow Error */
+#define R_CRC 0x0800 /* CRC Error */
+#define R_BUFF 0x0400 /* Buffer Error */
+#define R_STP 0x0200 /* Start of Packet */
+#define R_ENP 0x0100 /* End of Packet */
+
+/*
+** Transmit Message Descriptor 1 (TMD1) bit definitions.
+*/
+
+#define T_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */
+#define T_ERR 0x4000 /* Error Summary */
+#define T_ADD_FCS 0x2000 /* More the 1 retry needed to Xmit */
+#define T_MORE 0x1000 /* >1 retry to transmit packet */
+#define T_ONE 0x0800 /* 1 try needed to transmit the packet */
+#define T_DEF 0x0400 /* Deferred */
+#define T_STP 0x02000000 /* Start of Packet */
+#define T_ENP 0x01000000 /* End of Packet */
+#define T_FLAGS 0xff000000 /* TX Flags Field */
+
+/*
+** Transmit Message Descriptor 3 (TMD3) bit definitions.
+*/
+
+#define TMD3_BUFF 0x8000 /* BUFFer error */
+#define TMD3_UFLO 0x4000 /* UnderFLOw error */
+#define TMD3_RES 0x2000 /* REServed */
+#define TMD3_LCOL 0x1000 /* Late COLlision */
+#define TMD3_LCAR 0x0800 /* Loss of CARrier */
+#define TMD3_RTRY 0x0400 /* ReTRY error */
+
+/*
+** EISA configuration Register (CNFG) bit definitions
+*/
+
+#define TIMEOUT 0x0100 /* 0:2.5 mins, 1: 30 secs */
+#define REMOTE 0x0080 /* Remote Boot Enable -> 1 */
+#define IRQ11 0x0040 /* Enable -> 1 */
+#define IRQ10 0x0020 /* Enable -> 1 */
+#define IRQ9 0x0010 /* Enable -> 1 */
+#define IRQ5 0x0008 /* Enable -> 1 */
+#define BUFF 0x0004 /* 0: 64kB or 128kB, 1: 32kB */
+#define PADR16 0x0002 /* RAM on 64kB boundary */
+#define PADR17 0x0001 /* RAM on 128kB boundary */
+
+/*
+** Miscellaneous
+*/
+#define HASH_TABLE_LEN 64 /* Bits */
+#define HASH_BITS 0x003f /* 6 LS bits */
+
+#define MASK_INTERRUPTS 1
+#define UNMASK_INTERRUPTS 0
+
+#define EISA_EN 0x0001 /* Enable EISA bus buffers */
+#define EISA_ID iobase+0x0080 /* ID long word for EISA card */
+#define EISA_CTRL iobase+0x0084 /* Control word for EISA card */
+
+/*
+** Include the IOCTL stuff
+*/
+#include <linux/sockios.h>
+
+struct depca_ioctl {
+ unsigned short cmd; /* Command to run */
+ unsigned short len; /* Length of the data buffer */
+ unsigned char __user *data; /* Pointer to the data buffer */
+};
+
+/*
+** Recognised commands for the driver
+*/
+#define DEPCA_GET_HWADDR 0x01 /* Get the hardware address */
+#define DEPCA_SET_HWADDR 0x02 /* Get the hardware address */
+#define DEPCA_SET_PROM 0x03 /* Set Promiscuous Mode */
+#define DEPCA_CLR_PROM 0x04 /* Clear Promiscuous Mode */
+#define DEPCA_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
+#define DEPCA_GET_MCA 0x06 /* Get a multicast address */
+#define DEPCA_SET_MCA 0x07 /* Set a multicast address */
+#define DEPCA_CLR_MCA 0x08 /* Clear a multicast address */
+#define DEPCA_MCA_EN 0x09 /* Enable a multicast address group */
+#define DEPCA_GET_STATS 0x0a /* Get the driver statistics */
+#define DEPCA_CLR_STATS 0x0b /* Zero out the driver statistics */
+#define DEPCA_GET_REG 0x0c /* Get the Register contents */
+#define DEPCA_SET_REG 0x0d /* Set the Register contents */
+#define DEPCA_DUMP 0x0f /* Dump the DEPCA Status */
+
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
new file mode 100644
index 000000000000..86aa0d546a5b
--- /dev/null
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -0,0 +1,242 @@
+/* hplance.c : the Linux/hp300/lance ethernet driver
+ *
+ * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
+ * Based on the Sun Lance driver and the NetBSD HP Lance driver
+ * Uses the generic 7990.c LANCE code.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+/* Used for the temporal inet entries and routing */
+#include <linux/socket.h>
+#include <linux/route.h>
+#include <linux/dio.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+
+#include "hplance.h"
+
+/* We have 16834 bytes of RAM for the init block and buffers. This places
+ * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
+ * buffers and 2 Tx buffers.
+ */
+#define LANCE_LOG_TX_BUFFERS 1
+#define LANCE_LOG_RX_BUFFERS 3
+
+#include "7990.h" /* use generic LANCE code */
+
+/* Our private data structure */
+struct hplance_private {
+ struct lance_private lance;
+};
+
+/* function prototypes... This is easy because all the grot is in the
+ * generic LANCE support. All we have to support is probing for boards,
+ * plus board-specific init, open and close actions.
+ * Oh, and we need to tell the generic code how to read and write LANCE registers...
+ */
+static int __devinit hplance_init_one(struct dio_dev *d,
+ const struct dio_device_id *ent);
+static void __devinit hplance_init(struct net_device *dev,
+ struct dio_dev *d);
+static void __devexit hplance_remove_one(struct dio_dev *d);
+static void hplance_writerap(void *priv, unsigned short value);
+static void hplance_writerdp(void *priv, unsigned short value);
+static unsigned short hplance_readrdp(void *priv);
+static int hplance_open(struct net_device *dev);
+static int hplance_close(struct net_device *dev);
+
+static struct dio_device_id hplance_dio_tbl[] = {
+ { DIO_ID_LAN },
+ { 0 }
+};
+
+static struct dio_driver hplance_driver = {
+ .name = "hplance",
+ .id_table = hplance_dio_tbl,
+ .probe = hplance_init_one,
+ .remove = __devexit_p(hplance_remove_one),
+};
+
+static const struct net_device_ops hplance_netdev_ops = {
+ .ndo_open = hplance_open,
+ .ndo_stop = hplance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_set_rx_mode = lance_set_multicast,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = lance_poll,
+#endif
+};
+
+/* Find all the HP Lance boards and initialise them... */
+static int __devinit hplance_init_one(struct dio_dev *d,
+ const struct dio_device_id *ent)
+{
+ struct net_device *dev;
+ int err = -ENOMEM;
+ int i;
+
+ dev = alloc_etherdev(sizeof(struct hplance_private));
+ if (!dev)
+ goto out;
+
+ err = -EBUSY;
+ if (!request_mem_region(dio_resource_start(d),
+ dio_resource_len(d), d->name))
+ goto out_free_netdev;
+
+ hplance_init(dev, d);
+ err = register_netdev(dev);
+ if (err)
+ goto out_release_mem_region;
+
+ dio_set_drvdata(d, dev);
+
+ printk(KERN_INFO "%s: %s; select code %d, addr %2.2x", dev->name, d->name, d->scode, dev->dev_addr[0]);
+
+ for (i=1; i<6; i++) {
+ printk(":%2.2x", dev->dev_addr[i]);
+ }
+
+ printk(", irq %d\n", d->ipl);
+
+ return 0;
+
+ out_release_mem_region:
+ release_mem_region(dio_resource_start(d), dio_resource_len(d));
+ out_free_netdev:
+ free_netdev(dev);
+ out:
+ return err;
+}
+
+static void __devexit hplance_remove_one(struct dio_dev *d)
+{
+ struct net_device *dev = dio_get_drvdata(d);
+
+ unregister_netdev(dev);
+ release_mem_region(dio_resource_start(d), dio_resource_len(d));
+ free_netdev(dev);
+}
+
+/* Initialise a single lance board at the given DIO device */
+static void __devinit hplance_init(struct net_device *dev, struct dio_dev *d)
+{
+ unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
+ struct hplance_private *lp;
+ int i;
+
+ /* reset the board */
+ out_8(va+DIO_IDOFF, 0xff);
+ udelay(100); /* ariba! ariba! udelay! udelay! */
+
+ /* Fill the dev fields */
+ dev->base_addr = va;
+ dev->netdev_ops = &hplance_netdev_ops;
+ dev->dma = 0;
+
+ for (i=0; i<6; i++) {
+ /* The NVRAM holds our ethernet address, one nibble per byte,
+ * at bytes NVRAMOFF+1,3,5,7,9...
+ */
+ dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
+ | (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF);
+ }
+
+ lp = netdev_priv(dev);
+ lp->lance.name = (char*)d->name; /* discards const, shut up gcc */
+ lp->lance.base = va;
+ lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */
+ lp->lance.lance_init_block = NULL; /* LANCE addr of same RAM */
+ lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */
+ lp->lance.irq = d->ipl;
+ lp->lance.writerap = hplance_writerap;
+ lp->lance.writerdp = hplance_writerdp;
+ lp->lance.readrdp = hplance_readrdp;
+ lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
+ lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
+ lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK;
+ lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK;
+}
+
+/* This is disgusting. We have to check the DIO status register for ack every
+ * time we read or write the LANCE registers.
+ */
+static void hplance_writerap(void *priv, unsigned short value)
+{
+ struct lance_private *lp = (struct lance_private *)priv;
+ do {
+ out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+}
+
+static void hplance_writerdp(void *priv, unsigned short value)
+{
+ struct lance_private *lp = (struct lance_private *)priv;
+ do {
+ out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+}
+
+static unsigned short hplance_readrdp(void *priv)
+{
+ struct lance_private *lp = (struct lance_private *)priv;
+ __u16 value;
+ do {
+ value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+ return value;
+}
+
+static int hplance_open(struct net_device *dev)
+{
+ int status;
+ struct lance_private *lp = netdev_priv(dev);
+
+ status = lance_open(dev); /* call generic lance open code */
+ if (status)
+ return status;
+ /* enable interrupts at board level. */
+ out_8(lp->base + HPLANCE_STATUS, LE_IE);
+
+ return 0;
+}
+
+static int hplance_close(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ out_8(lp->base + HPLANCE_STATUS, 0); /* disable interrupts at boardlevel */
+ lance_close(dev);
+ return 0;
+}
+
+static int __init hplance_init_module(void)
+{
+ return dio_register_driver(&hplance_driver);
+}
+
+static void __exit hplance_cleanup_module(void)
+{
+ dio_unregister_driver(&hplance_driver);
+}
+
+module_init(hplance_init_module);
+module_exit(hplance_cleanup_module);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/hplance.h b/drivers/net/ethernet/amd/hplance.h
new file mode 100644
index 000000000000..04aee9e0376a
--- /dev/null
+++ b/drivers/net/ethernet/amd/hplance.h
@@ -0,0 +1,26 @@
+/* Random defines and structures for the HP Lance driver.
+ * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
+ * Based on the Sun Lance driver and the NetBSD HP Lance driver
+ */
+
+/* Registers */
+#define HPLANCE_ID 0x01 /* DIO register: ID byte */
+#define HPLANCE_STATUS 0x03 /* DIO register: interrupt enable/status */
+
+/* Control and status bits for the status register */
+#define LE_IE 0x80 /* interrupt enable */
+#define LE_IR 0x40 /* interrupt requested */
+#define LE_LOCK 0x08 /* lock status register */
+#define LE_ACK 0x04 /* ack of lock */
+#define LE_JAB 0x02 /* loss of tx clock (???) */
+/* We can also extract the IPL from the status register with the standard
+ * DIO_IPL(hplance) macro, or using dio_scodetoipl()
+ */
+
+/* These are the offsets for the DIO regs (hplance_reg), lance_ioreg,
+ * memory and NVRAM:
+ */
+#define HPLANCE_IDOFF 0 /* board baseaddr */
+#define HPLANCE_REGOFF 0x4000 /* lance registers */
+#define HPLANCE_MEMOFF 0x8000 /* struct lance_init_block */
+#define HPLANCE_NVRAMOFF 0xC008 /* etheraddress as one *nibble* per byte */
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
new file mode 100644
index 000000000000..a6e2e840884e
--- /dev/null
+++ b/drivers/net/ethernet/amd/lance.c
@@ -0,0 +1,1313 @@
+/* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
+/*
+ Written/copyright 1993-1998 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
+ with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Andrey V. Savochkin:
+ - alignment problem with 1.3.* kernel and some minor changes.
+ Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
+ - added support for Linux/Alpha, but removed most of it, because
+ it worked only for the PCI chip.
+ - added hook for the 32bit lance driver
+ - added PCnetPCI II (79C970A) to chip table
+ Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
+ - hopefully fix above so Linux/Alpha can use ISA cards too.
+ 8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
+ v1.12 10/27/97 Module support -djb
+ v1.14 2/3/98 Module support modified, made PCI support optional -djb
+ v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed
+ before unregister_netdev() which caused NULL pointer
+ reference later in the chain (in rtnetlink_fill_ifinfo())
+ -- Mika Kuoppala <miku@iki.fi>
+
+ Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from
+ the 2.1 version of the old driver - Alan Cox
+
+ Get rid of check_region, check kmalloc return in lance_probe1
+ Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
+
+ Reworked detection, added support for Racal InterLan EtherBlaster cards
+ Vesselin Kostadinov <vesok at yahoo dot com > - 22/4/2004
+*/
+
+static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/mm.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+
+static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0};
+static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
+static int __init do_lance_probe(struct net_device *dev);
+
+
+static struct card {
+ char id_offset14;
+ char id_offset15;
+} cards[] = {
+ { //"normal"
+ .id_offset14 = 0x57,
+ .id_offset15 = 0x57,
+ },
+ { //NI6510EB
+ .id_offset14 = 0x52,
+ .id_offset15 = 0x44,
+ },
+ { //Racal InterLan EtherBlaster
+ .id_offset14 = 0x52,
+ .id_offset15 = 0x49,
+ },
+};
+#define NUM_CARDS 3
+
+#ifdef LANCE_DEBUG
+static int lance_debug = LANCE_DEBUG;
+#else
+static int lance_debug = 1;
+#endif
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the AMD 79C960, the "PCnet-ISA
+single-chip ethernet controller for ISA". This chip is used in a wide
+variety of boards from vendors such as Allied Telesis, HP, Kingston,
+and Boca. This driver is also intended to work with older AMD 7990
+designs, such as the NE1500 and NE2100, and newer 79C961. For convenience,
+I use the name LANCE to refer to all of the AMD chips, even though it properly
+refers only to the original 7990.
+
+II. Board-specific settings
+
+The driver is designed to work the boards that use the faster
+bus-master mode, rather than in shared memory mode. (Only older designs
+have on-board buffer memory needed to support the slower shared memory mode.)
+
+Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
+channel. This driver probes the likely base addresses:
+{0x300, 0x320, 0x340, 0x360}.
+After the board is found it generates a DMA-timeout interrupt and uses
+autoIRQ to find the IRQ line. The DMA channel can be set with the low bits
+of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is
+probed for by enabling each free DMA channel in turn and checking if
+initialization succeeds.
+
+The HP-J2405A board is an exception: with this board it is easy to read the
+EEPROM-set values for the base, IRQ, and DMA. (Of course you must already
+_know_ the base address -- that field is for writing the EEPROM.)
+
+III. Driver operation
+
+IIIa. Ring buffers
+The LANCE uses ring buffers of Tx and Rx descriptors. Each entry describes
+the base and length of the data buffer, along with status bits. The length
+of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
+the buffer length (rather than being directly the buffer length) for
+implementation ease. The current values are 2 (Tx) and 4 (Rx), which leads to
+ring sizes of 4 (Tx) and 16 (Rx). Increasing the number of ring entries
+needlessly uses extra space and reduces the chance that an upper layer will
+be able to reorder queued Tx packets based on priority. Decreasing the number
+of entries makes it more difficult to achieve back-to-back packet transmission
+and increases the chance that Rx ring will overflow. (Consider the worst case
+of receiving back-to-back minimum-sized packets.)
+
+The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
+statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
+avoid the administrative overhead. For the Rx side this avoids dynamically
+allocating full-sized buffers "just in case", at the expense of a
+memory-to-memory data copy for each packet received. For most systems this
+is a good tradeoff: the Rx buffer will always be in low memory, the copy
+is inexpensive, and it primes the cache for later packet processing. For Tx
+the buffers are only used when needed as low-memory bounce buffers.
+
+IIIB. 16M memory limitations.
+For the ISA bus master mode all structures used directly by the LANCE,
+the initialization block, Rx and Tx rings, and data buffers, must be
+accessible from the ISA bus, i.e. in the lower 16M of real memory.
+This is a problem for current Linux kernels on >16M machines. The network
+devices are initialized after memory initialization, and the kernel doles out
+memory from the top of memory downward. The current solution is to have a
+special network initialization routine that's called before memory
+initialization; this will eventually be generalized for all network devices.
+As mentioned before, low-memory "bounce-buffers" are used when needed.
+
+IIIC. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
+we can't avoid the interrupt overhead by having the Tx routine reap the Tx
+stats.) After reaping the stats, it marks the queue entry as empty by setting
+the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
+tx_full and tbusy flags.
+
+*/
+
+/* Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
+ That translates to 4 and 4 (16 == 2^^4).
+ This is a compile-time option for efficiency.
+ */
+#ifndef LANCE_LOG_TX_BUFFERS
+#define LANCE_LOG_TX_BUFFERS 4
+#define LANCE_LOG_RX_BUFFERS 4
+#endif
+
+#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
+
+#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
+
+#define PKT_BUF_SZ 1544
+
+/* Offsets from base I/O address. */
+#define LANCE_DATA 0x10
+#define LANCE_ADDR 0x12
+#define LANCE_RESET 0x14
+#define LANCE_BUS_IF 0x16
+#define LANCE_TOTAL_SIZE 0x18
+
+#define TX_TIMEOUT (HZ/5)
+
+/* The LANCE Rx and Tx ring descriptors. */
+struct lance_rx_head {
+ s32 base;
+ s16 buf_length; /* This length is 2s complement (negative)! */
+ s16 msg_length; /* This length is "normal". */
+};
+
+struct lance_tx_head {
+ s32 base;
+ s16 length; /* Length is 2s complement (negative)! */
+ s16 misc;
+};
+
+/* The LANCE initialization block, described in databook. */
+struct lance_init_block {
+ u16 mode; /* Pre-set mode (reg. 15) */
+ u8 phys_addr[6]; /* Physical ethernet address */
+ u32 filter[2]; /* Multicast filter (unused). */
+ /* Receive and transmit ring base, along with extra bits. */
+ u32 rx_ring; /* Tx and Rx ring base pointers */
+ u32 tx_ring;
+};
+
+struct lance_private {
+ /* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
+ struct lance_rx_head rx_ring[RX_RING_SIZE];
+ struct lance_tx_head tx_ring[TX_RING_SIZE];
+ struct lance_init_block init_block;
+ const char *name;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ unsigned long rx_buffs; /* Address of Rx and Tx buffers. */
+ /* Tx low-memory "bounce buffer" address. */
+ char (*tx_bounce_buffs)[PKT_BUF_SZ];
+ int cur_rx, cur_tx; /* The next free ring entry */
+ int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ int dma;
+ unsigned char chip_version; /* See lance_chip_type. */
+ spinlock_t devlock;
+};
+
+#define LANCE_MUST_PAD 0x00000001
+#define LANCE_ENABLE_AUTOSELECT 0x00000002
+#define LANCE_MUST_REINIT_RING 0x00000004
+#define LANCE_MUST_UNRESET 0x00000008
+#define LANCE_HAS_MISSED_FRAME 0x00000010
+
+/* A mapping from the chip ID number to the part number and features.
+ These are from the datasheets -- in real life the '970 version
+ reportedly has the same ID as the '965. */
+static struct lance_chip_type {
+ int id_number;
+ const char *name;
+ int flags;
+} chip_table[] = {
+ {0x0000, "LANCE 7990", /* Ancient lance chip. */
+ LANCE_MUST_PAD + LANCE_MUST_UNRESET},
+ {0x0003, "PCnet/ISA 79C960", /* 79C960 PCnet/ISA. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x2260, "PCnet/ISA+ 79C961", /* 79C961 PCnet/ISA+, Plug-n-Play. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x2420, "PCnet/PCI 79C970", /* 79C970 or 79C974 PCnet-SCSI, PCI. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
+ it the PCnet32. */
+ {0x2430, "PCnet32", /* 79C965 PCnet for VL bus. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x2621, "PCnet/PCI-II 79C970A", /* 79C970A PCInetPCI II. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x0, "PCnet (unknown)",
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+};
+
+enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
+
+
+/* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
+ Assume yes until we know the memory size. */
+static unsigned char lance_need_isa_bounce_buffers = 1;
+
+static int lance_open(struct net_device *dev);
+static void lance_init_ring(struct net_device *dev, gfp_t mode);
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static int lance_rx(struct net_device *dev);
+static irqreturn_t lance_interrupt(int irq, void *dev_id);
+static int lance_close(struct net_device *dev);
+static struct net_device_stats *lance_get_stats(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static void lance_tx_timeout (struct net_device *dev);
+
+
+
+#ifdef MODULE
+#define MAX_CARDS 8 /* Max number of interfaces (cards) per module */
+
+static struct net_device *dev_lance[MAX_CARDS];
+static int io[MAX_CARDS];
+static int dma[MAX_CARDS];
+static int irq[MAX_CARDS];
+
+module_param_array(io, int, NULL, 0);
+module_param_array(dma, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param(lance_debug, int, 0);
+MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required");
+MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
+MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
+MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
+
+int __init init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) /* only complain once */
+ break;
+ printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
+ return -EPERM;
+ }
+ dev = alloc_etherdev(0);
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->dma = dma[this_dev];
+ if (do_lance_probe(dev) == 0) {
+ dev_lance[found++] = dev;
+ continue;
+ }
+ free_netdev(dev);
+ break;
+ }
+ if (found != 0)
+ return 0;
+ return -ENXIO;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ struct lance_private *lp = dev->ml_priv;
+ if (dev->dma != 4)
+ free_dma(dev->dma);
+ release_region(dev->base_addr, LANCE_TOTAL_SIZE);
+ kfree(lp->tx_bounce_buffs);
+ kfree((void*)lp->rx_buffs);
+ kfree(lp);
+}
+
+void __exit cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
+ struct net_device *dev = dev_lance[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+MODULE_LICENSE("GPL");
+
+
+/* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
+ board probes now that kmalloc() can allocate ISA DMA-able regions.
+ This also allows the LANCE driver to be used as a module.
+ */
+static int __init do_lance_probe(struct net_device *dev)
+{
+ unsigned int *port;
+ int result;
+
+ if (high_memory <= phys_to_virt(16*1024*1024))
+ lance_need_isa_bounce_buffers = 0;
+
+ for (port = lance_portlist; *port; port++) {
+ int ioaddr = *port;
+ struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE,
+ "lance-probe");
+
+ if (r) {
+ /* Detect the card with minimal I/O reads */
+ char offset14 = inb(ioaddr + 14);
+ int card;
+ for (card = 0; card < NUM_CARDS; ++card)
+ if (cards[card].id_offset14 == offset14)
+ break;
+ if (card < NUM_CARDS) {/*yes, the first byte matches*/
+ char offset15 = inb(ioaddr + 15);
+ for (card = 0; card < NUM_CARDS; ++card)
+ if ((cards[card].id_offset14 == offset14) &&
+ (cards[card].id_offset15 == offset15))
+ break;
+ }
+ if (card < NUM_CARDS) { /*Signature OK*/
+ result = lance_probe1(dev, ioaddr, 0, 0);
+ if (!result) {
+ struct lance_private *lp = dev->ml_priv;
+ int ver = lp->chip_version;
+
+ r->name = chip_table[ver].name;
+ return 0;
+ }
+ }
+ release_region(ioaddr, LANCE_TOTAL_SIZE);
+ }
+ }
+ return -ENODEV;
+}
+
+#ifndef MODULE
+struct net_device * __init lance_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(0);
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_lance_probe(dev);
+ if (err)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static const struct net_device_ops lance_netdev_ops = {
+ .ndo_open = lance_open,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_stop = lance_close,
+ .ndo_get_stats = lance_get_stats,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_tx_timeout = lance_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
+{
+ struct lance_private *lp;
+ unsigned long dma_channels; /* Mark spuriously-busy DMA channels */
+ int i, reset_val, lance_version;
+ const char *chipname;
+ /* Flags for specific chips or boards. */
+ unsigned char hpJ2405A = 0; /* HP ISA adaptor */
+ int hp_builtin = 0; /* HP on-board ethernet. */
+ static int did_version; /* Already printed version info. */
+ unsigned long flags;
+ int err = -ENOMEM;
+ void __iomem *bios;
+
+ /* First we look for special cases.
+ Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
+ There are two HP versions, check the BIOS for the configuration port.
+ This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
+ */
+ bios = ioremap(0xf00f0, 0x14);
+ if (!bios)
+ return -ENOMEM;
+ if (readw(bios + 0x12) == 0x5048) {
+ static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
+ int hp_port = (readl(bios + 1) & 1) ? 0x499 : 0x99;
+ /* We can have boards other than the built-in! Verify this is on-board. */
+ if ((inb(hp_port) & 0xc0) == 0x80 &&
+ ioaddr_table[inb(hp_port) & 3] == ioaddr)
+ hp_builtin = hp_port;
+ }
+ iounmap(bios);
+ /* We also recognize the HP Vectra on-board here, but check below. */
+ hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00 &&
+ inb(ioaddr+2) == 0x09);
+
+ /* Reset the LANCE. */
+ reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
+
+ /* The Un-Reset needed is only needed for the real NE2100, and will
+ confuse the HP board. */
+ if (!hpJ2405A)
+ outw(reset_val, ioaddr+LANCE_RESET);
+
+ outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
+ if (inw(ioaddr+LANCE_DATA) != 0x0004)
+ return -ENODEV;
+
+ /* Get the version of the chip. */
+ outw(88, ioaddr+LANCE_ADDR);
+ if (inw(ioaddr+LANCE_ADDR) != 88) {
+ lance_version = 0;
+ } else { /* Good, it's a newer chip. */
+ int chip_version = inw(ioaddr+LANCE_DATA);
+ outw(89, ioaddr+LANCE_ADDR);
+ chip_version |= inw(ioaddr+LANCE_DATA) << 16;
+ if (lance_debug > 2)
+ printk(" LANCE chip version is %#x.\n", chip_version);
+ if ((chip_version & 0xfff) != 0x003)
+ return -ENODEV;
+ chip_version = (chip_version >> 12) & 0xffff;
+ for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
+ if (chip_table[lance_version].id_number == chip_version)
+ break;
+ }
+ }
+
+ /* We can't allocate private data from alloc_etherdev() because it must
+ a ISA DMA-able region. */
+ chipname = chip_table[lance_version].name;
+ printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr);
+
+ /* There is a 16 byte station address PROM at the base address.
+ The first six bytes are the station address. */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + i);
+ printk("%pM", dev->dev_addr);
+
+ dev->base_addr = ioaddr;
+ /* Make certain the data structures used by the LANCE are aligned and DMAble. */
+
+ lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
+ if(lp==NULL)
+ return -ENODEV;
+ if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
+ dev->ml_priv = lp;
+ lp->name = chipname;
+ lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE,
+ GFP_DMA | GFP_KERNEL);
+ if (!lp->rx_buffs)
+ goto out_lp;
+ if (lance_need_isa_bounce_buffers) {
+ lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE,
+ GFP_DMA | GFP_KERNEL);
+ if (!lp->tx_bounce_buffs)
+ goto out_rx;
+ } else
+ lp->tx_bounce_buffs = NULL;
+
+ lp->chip_version = lance_version;
+ spin_lock_init(&lp->devlock);
+
+ lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
+ lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
+
+ outw(0x0001, ioaddr+LANCE_ADDR);
+ inw(ioaddr+LANCE_ADDR);
+ outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ inw(ioaddr+LANCE_ADDR);
+ outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ inw(ioaddr+LANCE_ADDR);
+
+ if (irq) { /* Set iff PCI card. */
+ dev->dma = 4; /* Native bus-master, no DMA channel needed. */
+ dev->irq = irq;
+ } else if (hp_builtin) {
+ static const char dma_tbl[4] = {3, 5, 6, 0};
+ static const char irq_tbl[4] = {3, 4, 5, 9};
+ unsigned char port_val = inb(hp_builtin);
+ dev->dma = dma_tbl[(port_val >> 4) & 3];
+ dev->irq = irq_tbl[(port_val >> 2) & 3];
+ printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
+ } else if (hpJ2405A) {
+ static const char dma_tbl[4] = {3, 5, 6, 7};
+ static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
+ short reset_val = inw(ioaddr+LANCE_RESET);
+ dev->dma = dma_tbl[(reset_val >> 2) & 3];
+ dev->irq = irq_tbl[(reset_val >> 4) & 7];
+ printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
+ } else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */
+ short bus_info;
+ outw(8, ioaddr+LANCE_ADDR);
+ bus_info = inw(ioaddr+LANCE_BUS_IF);
+ dev->dma = bus_info & 0x07;
+ dev->irq = (bus_info >> 4) & 0x0F;
+ } else {
+ /* The DMA channel may be passed in PARAM1. */
+ if (dev->mem_start & 0x07)
+ dev->dma = dev->mem_start & 0x07;
+ }
+
+ if (dev->dma == 0) {
+ /* Read the DMA channel status register, so that we can avoid
+ stuck DMA channels in the DMA detection below. */
+ dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
+ (inb(DMA2_STAT_REG) & 0xf0);
+ }
+ err = -ENODEV;
+ if (dev->irq >= 2)
+ printk(" assigned IRQ %d", dev->irq);
+ else if (lance_version != 0) { /* 7990 boards need DMA detection first. */
+ unsigned long irq_mask;
+
+ /* To auto-IRQ we enable the initialization-done and DMA error
+ interrupts. For ISA boards we get a DMA error, but VLB and PCI
+ boards will work. */
+ irq_mask = probe_irq_on();
+
+ /* Trigger an initialization just for the interrupt. */
+ outw(0x0041, ioaddr+LANCE_DATA);
+
+ mdelay(20);
+ dev->irq = probe_irq_off(irq_mask);
+ if (dev->irq)
+ printk(", probed IRQ %d", dev->irq);
+ else {
+ printk(", failed to detect IRQ line.\n");
+ goto out_tx;
+ }
+
+ /* Check for the initialization done bit, 0x0100, which means
+ that we don't need a DMA channel. */
+ if (inw(ioaddr+LANCE_DATA) & 0x0100)
+ dev->dma = 4;
+ }
+
+ if (dev->dma == 4) {
+ printk(", no DMA needed.\n");
+ } else if (dev->dma) {
+ if (request_dma(dev->dma, chipname)) {
+ printk("DMA %d allocation failed.\n", dev->dma);
+ goto out_tx;
+ } else
+ printk(", assigned DMA %d.\n", dev->dma);
+ } else { /* OK, we have to auto-DMA. */
+ for (i = 0; i < 4; i++) {
+ static const char dmas[] = { 5, 6, 7, 3 };
+ int dma = dmas[i];
+ int boguscnt;
+
+ /* Don't enable a permanently busy DMA channel, or the machine
+ will hang. */
+ if (test_bit(dma, &dma_channels))
+ continue;
+ outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
+ if (request_dma(dma, chipname))
+ continue;
+
+ flags=claim_dma_lock();
+ set_dma_mode(dma, DMA_MODE_CASCADE);
+ enable_dma(dma);
+ release_dma_lock(flags);
+
+ /* Trigger an initialization. */
+ outw(0x0001, ioaddr+LANCE_DATA);
+ for (boguscnt = 100; boguscnt > 0; --boguscnt)
+ if (inw(ioaddr+LANCE_DATA) & 0x0900)
+ break;
+ if (inw(ioaddr+LANCE_DATA) & 0x0100) {
+ dev->dma = dma;
+ printk(", DMA %d.\n", dev->dma);
+ break;
+ } else {
+ flags=claim_dma_lock();
+ disable_dma(dma);
+ release_dma_lock(flags);
+ free_dma(dma);
+ }
+ }
+ if (i == 4) { /* Failure: bail. */
+ printk("DMA detection failed.\n");
+ goto out_tx;
+ }
+ }
+
+ if (lance_version == 0 && dev->irq == 0) {
+ /* We may auto-IRQ now that we have a DMA channel. */
+ /* Trigger an initialization just for the interrupt. */
+ unsigned long irq_mask;
+
+ irq_mask = probe_irq_on();
+ outw(0x0041, ioaddr+LANCE_DATA);
+
+ mdelay(40);
+ dev->irq = probe_irq_off(irq_mask);
+ if (dev->irq == 0) {
+ printk(" Failed to detect the 7990 IRQ line.\n");
+ goto out_dma;
+ }
+ printk(" Auto-IRQ detected IRQ%d.\n", dev->irq);
+ }
+
+ if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
+ /* Turn on auto-select of media (10baseT or BNC) so that the user
+ can watch the LEDs even if the board isn't opened. */
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ /* Don't touch 10base2 power bit. */
+ outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
+ }
+
+ if (lance_debug > 0 && did_version++ == 0)
+ printk(version);
+
+ /* The LANCE-specific entries in the device structure. */
+ dev->netdev_ops = &lance_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ err = register_netdev(dev);
+ if (err)
+ goto out_dma;
+ return 0;
+out_dma:
+ if (dev->dma != 4)
+ free_dma(dev->dma);
+out_tx:
+ kfree(lp->tx_bounce_buffs);
+out_rx:
+ kfree((void*)lp->rx_buffs);
+out_lp:
+ kfree(lp);
+ return err;
+}
+
+
+static int
+lance_open(struct net_device *dev)
+{
+ struct lance_private *lp = dev->ml_priv;
+ int ioaddr = dev->base_addr;
+ int i;
+
+ if (dev->irq == 0 ||
+ request_irq(dev->irq, lance_interrupt, 0, lp->name, dev)) {
+ return -EAGAIN;
+ }
+
+ /* We used to allocate DMA here, but that was silly.
+ DMA lines can't be shared! We now permanently allocate them. */
+
+ /* Reset the LANCE */
+ inw(ioaddr+LANCE_RESET);
+
+ /* The DMA controller is used as a no-operation slave, "cascade mode". */
+ if (dev->dma != 4) {
+ unsigned long flags=claim_dma_lock();
+ enable_dma(dev->dma);
+ set_dma_mode(dev->dma, DMA_MODE_CASCADE);
+ release_dma_lock(flags);
+ }
+
+ /* Un-Reset the LANCE, needed only for the NE2100. */
+ if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
+ outw(0, ioaddr+LANCE_RESET);
+
+ if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
+ /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ /* Only touch autoselect bit. */
+ outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
+ }
+
+ if (lance_debug > 1)
+ printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
+ dev->name, dev->irq, dev->dma,
+ (u32) isa_virt_to_bus(lp->tx_ring),
+ (u32) isa_virt_to_bus(lp->rx_ring),
+ (u32) isa_virt_to_bus(&lp->init_block));
+
+ lance_init_ring(dev, GFP_KERNEL);
+ /* Re-initialize the LANCE, and start it when done. */
+ outw(0x0001, ioaddr+LANCE_ADDR);
+ outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
+
+ outw(0x0004, ioaddr+LANCE_ADDR);
+ outw(0x0915, ioaddr+LANCE_DATA);
+
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ outw(0x0001, ioaddr+LANCE_DATA);
+
+ netif_start_queue (dev);
+
+ i = 0;
+ while (i++ < 100)
+ if (inw(ioaddr+LANCE_DATA) & 0x0100)
+ break;
+ /*
+ * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
+ * reports that doing so triggers a bug in the '974.
+ */
+ outw(0x0042, ioaddr+LANCE_DATA);
+
+ if (lance_debug > 2)
+ printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
+ dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
+
+ return 0; /* Always succeed */
+}
+
+/* The LANCE has been halted for one reason or another (busmaster memory
+ arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
+ etc.). Modern LANCE variants always reload their ring-buffer
+ configuration when restarted, so we must reinitialize our ring
+ context before restarting. As part of this reinitialization,
+ find all packets still on the Tx ring and pretend that they had been
+ sent (in effect, drop the packets on the floor) - the higher-level
+ protocols will time out and retransmit. It'd be better to shuffle
+ these skbs to a temp list and then actually re-Tx them after
+ restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
+*/
+
+static void
+lance_purge_ring(struct net_device *dev)
+{
+ struct lance_private *lp = dev->ml_priv;
+ int i;
+
+ /* Free all the skbuffs in the Rx and Tx queues. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = lp->rx_skbuff[i];
+ lp->rx_skbuff[i] = NULL;
+ lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */
+ if (skb)
+ dev_kfree_skb_any(skb);
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (lp->tx_skbuff[i]) {
+ dev_kfree_skb_any(lp->tx_skbuff[i]);
+ lp->tx_skbuff[i] = NULL;
+ }
+ }
+}
+
+
+/* Initialize the LANCE Rx and Tx rings. */
+static void
+lance_init_ring(struct net_device *dev, gfp_t gfp)
+{
+ struct lance_private *lp = dev->ml_priv;
+ int i;
+
+ lp->cur_rx = lp->cur_tx = 0;
+ lp->dirty_rx = lp->dirty_tx = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ void *rx_buff;
+
+ skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
+ lp->rx_skbuff[i] = skb;
+ if (skb) {
+ skb->dev = dev;
+ rx_buff = skb->data;
+ } else
+ rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
+ if (rx_buff == NULL)
+ lp->rx_ring[i].base = 0;
+ else
+ lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
+ lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
+ }
+ /* The Tx buffer address is filled in as needed, but we do need to clear
+ the upper ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_skbuff[i] = NULL;
+ lp->tx_ring[i].base = 0;
+ }
+
+ lp->init_block.mode = 0x0000;
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
+ lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
+}
+
+static void
+lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
+{
+ struct lance_private *lp = dev->ml_priv;
+
+ if (must_reinit ||
+ (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
+ lance_purge_ring(dev);
+ lance_init_ring(dev, GFP_ATOMIC);
+ }
+ outw(0x0000, dev->base_addr + LANCE_ADDR);
+ outw(csr0_bits, dev->base_addr + LANCE_DATA);
+}
+
+
+static void lance_tx_timeout (struct net_device *dev)
+{
+ struct lance_private *lp = (struct lance_private *) dev->ml_priv;
+ int ioaddr = dev->base_addr;
+
+ outw (0, ioaddr + LANCE_ADDR);
+ printk ("%s: transmit timed out, status %4.4x, resetting.\n",
+ dev->name, inw (ioaddr + LANCE_DATA));
+ outw (0x0004, ioaddr + LANCE_DATA);
+ dev->stats.tx_errors++;
+#ifndef final_version
+ if (lance_debug > 3) {
+ int i;
+ printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
+ lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "",
+ lp->cur_rx);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
+ lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
+ lp->rx_ring[i].msg_length);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
+ lp->tx_ring[i].base, -lp->tx_ring[i].length,
+ lp->tx_ring[i].misc);
+ printk ("\n");
+ }
+#endif
+ lance_restart (dev, 0x0043, 1);
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue (dev);
+}
+
+
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct lance_private *lp = dev->ml_priv;
+ int ioaddr = dev->base_addr;
+ int entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->devlock, flags);
+
+ if (lance_debug > 3) {
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
+ inw(ioaddr+LANCE_DATA));
+ outw(0x0000, ioaddr+LANCE_DATA);
+ }
+
+ /* Fill in a Tx ring entry */
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->cur_tx & TX_RING_MOD_MASK;
+
+ /* Caution: the write order is important here, set the base address
+ with the "ownership" bits last. */
+
+ /* The old LANCE chips doesn't automatically pad buffers to min. size. */
+ if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
+ if (skb->len < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ goto out;
+ lp->tx_ring[entry].length = -ETH_ZLEN;
+ }
+ else
+ lp->tx_ring[entry].length = -skb->len;
+ } else
+ lp->tx_ring[entry].length = -skb->len;
+
+ lp->tx_ring[entry].misc = 0x0000;
+
+ dev->stats.tx_bytes += skb->len;
+
+ /* If any part of this buffer is >16M we must copy it to a low-memory
+ buffer. */
+ if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) {
+ if (lance_debug > 5)
+ printk("%s: bouncing a high-memory packet (%#x).\n",
+ dev->name, (u32)isa_virt_to_bus(skb->data));
+ skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
+ lp->tx_ring[entry].base =
+ ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
+ dev_kfree_skb(skb);
+ } else {
+ lp->tx_skbuff[entry] = skb;
+ lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
+ }
+ lp->cur_tx++;
+
+ /* Trigger an immediate send poll. */
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ outw(0x0048, ioaddr+LANCE_DATA);
+
+ if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
+ netif_stop_queue(dev);
+
+out:
+ spin_unlock_irqrestore(&lp->devlock, flags);
+ return NETDEV_TX_OK;
+}
+
+/* The LANCE interrupt handler. */
+static irqreturn_t lance_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp;
+ int csr0, ioaddr, boguscnt=10;
+ int must_restart;
+
+ ioaddr = dev->base_addr;
+ lp = dev->ml_priv;
+
+ spin_lock (&lp->devlock);
+
+ outw(0x00, dev->base_addr + LANCE_ADDR);
+ while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600 &&
+ --boguscnt >= 0) {
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
+
+ must_restart = 0;
+
+ if (lance_debug > 5)
+ printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
+ dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
+
+ if (csr0 & 0x0400) /* Rx interrupt */
+ lance_rx(dev);
+
+ if (csr0 & 0x0200) { /* Tx-done interrupt */
+ int dirty_tx = lp->dirty_tx;
+
+ while (dirty_tx < lp->cur_tx) {
+ int entry = dirty_tx & TX_RING_MOD_MASK;
+ int status = lp->tx_ring[entry].base;
+
+ if (status < 0)
+ break; /* It still hasn't been Txed */
+
+ lp->tx_ring[entry].base = 0;
+
+ if (status & 0x40000000) {
+ /* There was an major error, log it. */
+ int err_status = lp->tx_ring[entry].misc;
+ dev->stats.tx_errors++;
+ if (err_status & 0x0400)
+ dev->stats.tx_aborted_errors++;
+ if (err_status & 0x0800)
+ dev->stats.tx_carrier_errors++;
+ if (err_status & 0x1000)
+ dev->stats.tx_window_errors++;
+ if (err_status & 0x4000) {
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ dev->stats.tx_fifo_errors++;
+ /* Remove this verbosity later! */
+ printk("%s: Tx FIFO error! Status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ must_restart = 1;
+ }
+ } else {
+ if (status & 0x18000000)
+ dev->stats.collisions++;
+ dev->stats.tx_packets++;
+ }
+
+ /* We must free the original skb if it's not a data-only copy
+ in the bounce buffer. */
+ if (lp->tx_skbuff[entry]) {
+ dev_kfree_skb_irq(lp->tx_skbuff[entry]);
+ lp->tx_skbuff[entry] = NULL;
+ }
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
+ printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n",
+ dirty_tx, lp->cur_tx,
+ netif_queue_stopped(dev) ? "yes" : "no");
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ /* if the ring is no longer full, accept more packets */
+ if (netif_queue_stopped(dev) &&
+ dirty_tx > lp->cur_tx - TX_RING_SIZE + 2)
+ netif_wake_queue (dev);
+
+ lp->dirty_tx = dirty_tx;
+ }
+
+ /* Log misc errors. */
+ if (csr0 & 0x4000)
+ dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & 0x1000)
+ dev->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & 0x0800) {
+ printk("%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ must_restart = 1;
+ }
+
+ if (must_restart) {
+ /* stop the chip to clear the error condition, then restart */
+ outw(0x0000, dev->base_addr + LANCE_ADDR);
+ outw(0x0004, dev->base_addr + LANCE_DATA);
+ lance_restart(dev, 0x0002, 0);
+ }
+ }
+
+ /* Clear any other interrupt, and set interrupt enable. */
+ outw(0x0000, dev->base_addr + LANCE_ADDR);
+ outw(0x7940, dev->base_addr + LANCE_DATA);
+
+ if (lance_debug > 4)
+ printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
+ dev->name, inw(ioaddr + LANCE_ADDR),
+ inw(dev->base_addr + LANCE_DATA));
+
+ spin_unlock (&lp->devlock);
+ return IRQ_HANDLED;
+}
+
+static int
+lance_rx(struct net_device *dev)
+{
+ struct lance_private *lp = dev->ml_priv;
+ int entry = lp->cur_rx & RX_RING_MOD_MASK;
+ int i;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while (lp->rx_ring[entry].base >= 0) {
+ int status = lp->rx_ring[entry].base >> 24;
+
+ if (status != 0x03) { /* There was an error. */
+ /* There is a tricky error noted by John Murphy,
+ <murf@perftech.com> to Russ Nelson: Even with full-sized
+ buffers it's possible for a jabber packet to use two
+ buffers, with only the last correctly noting the error. */
+ if (status & 0x01) /* Only count a general error at the */
+ dev->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x20)
+ dev->stats.rx_frame_errors++;
+ if (status & 0x10)
+ dev->stats.rx_over_errors++;
+ if (status & 0x08)
+ dev->stats.rx_crc_errors++;
+ if (status & 0x04)
+ dev->stats.rx_fifo_errors++;
+ lp->rx_ring[entry].base &= 0x03ffffff;
+ }
+ else
+ {
+ /* Malloc up new buffer, compatible with net3. */
+ short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
+ struct sk_buff *skb;
+
+ if(pkt_len<60)
+ {
+ printk("%s: Runt packet!\n",dev->name);
+ dev->stats.rx_errors++;
+ }
+ else
+ {
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL)
+ {
+ printk("%s: Memory squeeze, deferring packet.\n", dev->name);
+ for (i=0; i < RX_RING_SIZE; i++)
+ if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
+ break;
+
+ if (i > RX_RING_SIZE -2)
+ {
+ dev->stats.rx_dropped++;
+ lp->rx_ring[entry].base |= 0x80000000;
+ lp->cur_rx++;
+ }
+ break;
+ }
+ skb_reserve(skb,2); /* 16 byte align */
+ skb_put(skb,pkt_len); /* Make room */
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
+ pkt_len);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+ }
+ /* The docs say that the buffer length isn't touched, but Andrew Boyd
+ of QNX reports that some revs of the 79C965 clear it. */
+ lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
+ lp->rx_ring[entry].base |= 0x80000000;
+ entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
+ }
+
+ /* We should check that at least two ring entries are free. If not,
+ we should free one and mark stats->rx_dropped++. */
+
+ return 0;
+}
+
+static int
+lance_close(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct lance_private *lp = dev->ml_priv;
+
+ netif_stop_queue (dev);
+
+ if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
+ outw(112, ioaddr+LANCE_ADDR);
+ dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
+ }
+ outw(0, ioaddr+LANCE_ADDR);
+
+ if (lance_debug > 1)
+ printk("%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inw(ioaddr+LANCE_DATA));
+
+ /* We stop the LANCE here -- it occasionally polls
+ memory if we don't. */
+ outw(0x0004, ioaddr+LANCE_DATA);
+
+ if (dev->dma != 4)
+ {
+ unsigned long flags=claim_dma_lock();
+ disable_dma(dev->dma);
+ release_dma_lock(flags);
+ }
+ free_irq(dev->irq, dev);
+
+ lance_purge_ring(dev);
+
+ return 0;
+}
+
+static struct net_device_stats *lance_get_stats(struct net_device *dev)
+{
+ struct lance_private *lp = dev->ml_priv;
+
+ if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
+ short ioaddr = dev->base_addr;
+ short saved_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->devlock, flags);
+ saved_addr = inw(ioaddr+LANCE_ADDR);
+ outw(112, ioaddr+LANCE_ADDR);
+ dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
+ outw(saved_addr, ioaddr+LANCE_ADDR);
+ spin_unlock_irqrestore(&lp->devlock, flags);
+ }
+
+ return &dev->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ outw(0, ioaddr+LANCE_ADDR);
+ outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */
+
+ if (dev->flags&IFF_PROMISC) {
+ outw(15, ioaddr+LANCE_ADDR);
+ outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
+ } else {
+ short multicast_table[4];
+ int i;
+ int num_addrs=netdev_mc_count(dev);
+ if(dev->flags&IFF_ALLMULTI)
+ num_addrs=1;
+ /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
+ memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
+ for (i = 0; i < 4; i++) {
+ outw(8 + i, ioaddr+LANCE_ADDR);
+ outw(multicast_table[i], ioaddr+LANCE_DATA);
+ }
+ outw(15, ioaddr+LANCE_ADDR);
+ outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
+ }
+
+ lance_restart(dev, 0x0142, 0); /* Resume normal operation */
+
+}
+
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
new file mode 100644
index 000000000000..56bc47a94186
--- /dev/null
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -0,0 +1,205 @@
+/* mvme147.c : the Linux/mvme147/lance ethernet driver
+ *
+ * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
+ * Based on the Sun Lance driver and the NetBSD HP Lance driver
+ * Uses the generic 7990.c LANCE code.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
+/* Used for the temporal inet entries and routing */
+#include <linux/socket.h>
+#include <linux/route.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/mvme147hw.h>
+
+/* We have 16834 bytes of RAM for the init block and buffers. This places
+ * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
+ * buffers and 2 Tx buffers.
+ */
+#define LANCE_LOG_TX_BUFFERS 1
+#define LANCE_LOG_RX_BUFFERS 3
+
+#include "7990.h" /* use generic LANCE code */
+
+/* Our private data structure */
+struct m147lance_private {
+ struct lance_private lance;
+ unsigned long ram;
+};
+
+/* function prototypes... This is easy because all the grot is in the
+ * generic LANCE support. All we have to support is probing for boards,
+ * plus board-specific init, open and close actions.
+ * Oh, and we need to tell the generic code how to read and write LANCE registers...
+ */
+static int m147lance_open(struct net_device *dev);
+static int m147lance_close(struct net_device *dev);
+static void m147lance_writerap(struct lance_private *lp, unsigned short value);
+static void m147lance_writerdp(struct lance_private *lp, unsigned short value);
+static unsigned short m147lance_readrdp(struct lance_private *lp);
+
+typedef void (*writerap_t)(void *, unsigned short);
+typedef void (*writerdp_t)(void *, unsigned short);
+typedef unsigned short (*readrdp_t)(void *);
+
+static const struct net_device_ops lance_netdev_ops = {
+ .ndo_open = m147lance_open,
+ .ndo_stop = m147lance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_set_rx_mode = lance_set_multicast,
+ .ndo_tx_timeout = lance_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+/* Initialise the one and only on-board 7990 */
+struct net_device * __init mvme147lance_probe(int unit)
+{
+ struct net_device *dev;
+ static int called;
+ static const char name[] = "MVME147 LANCE";
+ struct m147lance_private *lp;
+ u_long *addr;
+ u_long address;
+ int err;
+
+ if (!MACH_IS_MVME147 || called)
+ return ERR_PTR(-ENODEV);
+ called++;
+
+ dev = alloc_etherdev(sizeof(struct m147lance_private));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0)
+ sprintf(dev->name, "eth%d", unit);
+
+ /* Fill the dev fields */
+ dev->base_addr = (unsigned long)MVME147_LANCE_BASE;
+ dev->netdev_ops = &lance_netdev_ops;
+ dev->dma = 0;
+
+ addr=(u_long *)ETHERNET_ADDRESS;
+ address = *addr;
+ dev->dev_addr[0]=0x08;
+ dev->dev_addr[1]=0x00;
+ dev->dev_addr[2]=0x3e;
+ address=address>>8;
+ dev->dev_addr[5]=address&0xff;
+ address=address>>8;
+ dev->dev_addr[4]=address&0xff;
+ address=address>>8;
+ dev->dev_addr[3]=address&0xff;
+
+ printk("%s: MVME147 at 0x%08lx, irq %d, "
+ "Hardware Address %pM\n",
+ dev->name, dev->base_addr, MVME147_LANCE_IRQ,
+ dev->dev_addr);
+
+ lp = netdev_priv(dev);
+ lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 16K */
+ if (!lp->ram)
+ {
+ printk("%s: No memory for LANCE buffers\n", dev->name);
+ free_netdev(dev);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ lp->lance.name = (char*)name; /* discards const, shut up gcc */
+ lp->lance.base = dev->base_addr;
+ lp->lance.init_block = (struct lance_init_block *)(lp->ram); /* CPU addr */
+ lp->lance.lance_init_block = (struct lance_init_block *)(lp->ram); /* LANCE addr of same RAM */
+ lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */
+ lp->lance.irq = MVME147_LANCE_IRQ;
+ lp->lance.writerap = (writerap_t)m147lance_writerap;
+ lp->lance.writerdp = (writerdp_t)m147lance_writerdp;
+ lp->lance.readrdp = (readrdp_t)m147lance_readrdp;
+ lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
+ lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
+ lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK;
+ lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK;
+
+ err = register_netdev(dev);
+ if (err) {
+ free_pages(lp->ram, 3);
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+
+ return dev;
+}
+
+static void m147lance_writerap(struct lance_private *lp, unsigned short value)
+{
+ out_be16(lp->base + LANCE_RAP, value);
+}
+
+static void m147lance_writerdp(struct lance_private *lp, unsigned short value)
+{
+ out_be16(lp->base + LANCE_RDP, value);
+}
+
+static unsigned short m147lance_readrdp(struct lance_private *lp)
+{
+ return in_be16(lp->base + LANCE_RDP);
+}
+
+static int m147lance_open(struct net_device *dev)
+{
+ int status;
+
+ status = lance_open(dev); /* call generic lance open code */
+ if (status)
+ return status;
+ /* enable interrupts at board level. */
+ m147_pcc->lan_cntrl=0; /* clear the interrupts (if any) */
+ m147_pcc->lan_cntrl=0x08 | 0x04; /* Enable irq 4 */
+
+ return 0;
+}
+
+static int m147lance_close(struct net_device *dev)
+{
+ /* disable interrupts at boardlevel */
+ m147_pcc->lan_cntrl=0x0; /* disable interrupts */
+ lance_close(dev);
+ return 0;
+}
+
+#ifdef MODULE
+MODULE_LICENSE("GPL");
+
+static struct net_device *dev_mvme147_lance;
+int __init init_module(void)
+{
+ dev_mvme147_lance = mvme147lance_probe(-1);
+ if (IS_ERR(dev_mvme147_lance))
+ return PTR_ERR(dev_mvme147_lance);
+ return 0;
+}
+
+void __exit cleanup_module(void)
+{
+ struct m147lance_private *lp = netdev_priv(dev_mvme147_lance);
+ unregister_netdev(dev_mvme147_lance);
+ free_pages(lp->ram, 3);
+ free_netdev(dev_mvme147_lance);
+}
+
+#endif /* MODULE */
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
new file mode 100644
index 000000000000..6e6aa7213aab
--- /dev/null
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -0,0 +1,1254 @@
+/*
+ * ni6510 (am7990 'lance' chip) driver for Linux-net-3
+ * BETAcode v0.71 (96/09/29) for 2.0.0 (or later)
+ * copyrights (c) 1994,1995,1996 by M.Hipp
+ *
+ * This driver can handle the old ni6510 board and the newer ni6510
+ * EtherBlaster. (probably it also works with every full NE2100
+ * compatible card)
+ *
+ * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same GNU General Public License that covers the Linux-kernel.
+ *
+ * comments/bugs/suggestions can be sent to:
+ * Michael Hipp
+ * email: hippm@informatik.uni-tuebingen.de
+ *
+ * sources:
+ * some things are from the 'ni6510-packet-driver for dos by Russ Nelson'
+ * and from the original drivers by D.Becker
+ *
+ * known problems:
+ * - on some PCI boards (including my own) the card/board/ISA-bridge has
+ * problems with bus master DMA. This results in lotsa overruns.
+ * It may help to '#define RCV_PARANOIA_CHECK' or try to #undef
+ * the XMT and RCV_VIA_SKB option .. this reduces driver performance.
+ * Or just play with your BIOS options to optimize ISA-DMA access.
+ * Maybe you also wanna play with the LOW_PERFORAMCE and MID_PERFORMANCE
+ * defines -> please report me your experience then
+ * - Harald reported for ASUS SP3G mainboards, that you should use
+ * the 'optimal settings' from the user's manual on page 3-12!
+ *
+ * credits:
+ * thanx to Jason Sullivan for sending me a ni6510 card!
+ * lot of debug runs with ASUS SP3G Boards (Intel Saturn) by Harald Koenig
+ *
+ * simple performance test: (486DX-33/Ni6510-EB receives from 486DX4-100/Ni6510-EB)
+ * average: FTP -> 8384421 bytes received in 8.5 seconds
+ * (no RCV_VIA_SKB,no XMT_VIA_SKB,PARANOIA_CHECK,4 XMIT BUFS, 8 RCV_BUFFS)
+ * peak: FTP -> 8384421 bytes received in 7.5 seconds
+ * (RCV_VIA_SKB,XMT_VIA_SKB,no PARANOIA_CHECK,1(!) XMIT BUF, 16 RCV BUFFS)
+ */
+
+/*
+ * 99.Jun.8: added support for /proc/net/dev byte count for xosview (HK)
+ * 96.Sept.29: virt_to_bus stuff added for new memory modell
+ * 96.April.29: Added Harald Koenig's Patches (MH)
+ * 96.April.13: enhanced error handling .. more tests (MH)
+ * 96.April.5/6: a lot of performance tests. Got it stable now (hopefully) (MH)
+ * 96.April.1: (no joke ;) .. added EtherBlaster and Module support (MH)
+ * 96.Feb.19: fixed a few bugs .. cleanups .. tested for 1.3.66 (MH)
+ * hopefully no more 16MB limit
+ *
+ * 95.Nov.18: multicast tweaked (AC).
+ *
+ * 94.Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH)
+ *
+ * 94.July.16: fixed bugs in recv_skb and skb-alloc stuff (MH)
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include "ni65.h"
+
+/*
+ * the current setting allows an acceptable performance
+ * for 'RCV_PARANOIA_CHECK' read the 'known problems' part in
+ * the header of this file
+ * 'invert' the defines for max. performance. This may cause DMA problems
+ * on some boards (e.g on my ASUS SP3G)
+ */
+#undef XMT_VIA_SKB
+#undef RCV_VIA_SKB
+#define RCV_PARANOIA_CHECK
+
+#define MID_PERFORMANCE
+
+#if defined( LOW_PERFORMANCE )
+ static int isa0=7,isa1=7,csr80=0x0c10;
+#elif defined( MID_PERFORMANCE )
+ static int isa0=5,isa1=5,csr80=0x2810;
+#else /* high performance */
+ static int isa0=4,isa1=4,csr80=0x0017;
+#endif
+
+/*
+ * a few card/vendor specific defines
+ */
+#define NI65_ID0 0x00
+#define NI65_ID1 0x55
+#define NI65_EB_ID0 0x52
+#define NI65_EB_ID1 0x44
+#define NE2100_ID0 0x57
+#define NE2100_ID1 0x57
+
+#define PORT p->cmdr_addr
+
+/*
+ * buffer configuration
+ */
+#if 1
+#define RMDNUM 16
+#define RMDNUMMASK 0x80000000
+#else
+#define RMDNUM 8
+#define RMDNUMMASK 0x60000000 /* log2(RMDNUM)<<29 */
+#endif
+
+#if 0
+#define TMDNUM 1
+#define TMDNUMMASK 0x00000000
+#else
+#define TMDNUM 4
+#define TMDNUMMASK 0x40000000 /* log2(TMDNUM)<<29 */
+#endif
+
+/* slightly oversized */
+#define R_BUF_SIZE 1544
+#define T_BUF_SIZE 1544
+
+/*
+ * lance register defines
+ */
+#define L_DATAREG 0x00
+#define L_ADDRREG 0x02
+#define L_RESET 0x04
+#define L_CONFIG 0x05
+#define L_BUSIF 0x06
+
+/*
+ * to access the lance/am7990-regs, you have to write
+ * reg-number into L_ADDRREG, then you can access it using L_DATAREG
+ */
+#define CSR0 0x00
+#define CSR1 0x01
+#define CSR2 0x02
+#define CSR3 0x03
+
+#define INIT_RING_BEFORE_START 0x1
+#define FULL_RESET_ON_ERROR 0x2
+
+#if 0
+#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
+ outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
+#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
+ inw(PORT+L_DATAREG))
+#if 0
+#define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
+#else
+#define writedatareg(val) { writereg(val,CSR0); }
+#endif
+#else
+#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);}
+#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG))
+#define writedatareg(val) { writereg(val,CSR0); }
+#endif
+
+static unsigned char ni_vendor[] = { 0x02,0x07,0x01 };
+
+static struct card {
+ unsigned char id0,id1;
+ short id_offset;
+ short total_size;
+ short cmd_offset;
+ short addr_offset;
+ unsigned char *vendor_id;
+ char *cardname;
+ unsigned long config;
+} cards[] = {
+ {
+ .id0 = NI65_ID0,
+ .id1 = NI65_ID1,
+ .id_offset = 0x0e,
+ .total_size = 0x10,
+ .cmd_offset = 0x0,
+ .addr_offset = 0x8,
+ .vendor_id = ni_vendor,
+ .cardname = "ni6510",
+ .config = 0x1,
+ },
+ {
+ .id0 = NI65_EB_ID0,
+ .id1 = NI65_EB_ID1,
+ .id_offset = 0x0e,
+ .total_size = 0x18,
+ .cmd_offset = 0x10,
+ .addr_offset = 0x0,
+ .vendor_id = ni_vendor,
+ .cardname = "ni6510 EtherBlaster",
+ .config = 0x2,
+ },
+ {
+ .id0 = NE2100_ID0,
+ .id1 = NE2100_ID1,
+ .id_offset = 0x0e,
+ .total_size = 0x18,
+ .cmd_offset = 0x10,
+ .addr_offset = 0x0,
+ .vendor_id = NULL,
+ .cardname = "generic NE2100",
+ .config = 0x0,
+ },
+};
+#define NUM_CARDS 3
+
+struct priv
+{
+ struct rmd rmdhead[RMDNUM];
+ struct tmd tmdhead[TMDNUM];
+ struct init_block ib;
+ int rmdnum;
+ int tmdnum,tmdlast;
+#ifdef RCV_VIA_SKB
+ struct sk_buff *recv_skb[RMDNUM];
+#else
+ void *recvbounce[RMDNUM];
+#endif
+#ifdef XMT_VIA_SKB
+ struct sk_buff *tmd_skb[TMDNUM];
+#endif
+ void *tmdbounce[TMDNUM];
+ int tmdbouncenum;
+ int lock,xmit_queued;
+
+ void *self;
+ int cmdr_addr;
+ int cardno;
+ int features;
+ spinlock_t ring_lock;
+};
+
+static int ni65_probe1(struct net_device *dev,int);
+static irqreturn_t ni65_interrupt(int irq, void * dev_id);
+static void ni65_recv_intr(struct net_device *dev,int);
+static void ni65_xmit_intr(struct net_device *dev,int);
+static int ni65_open(struct net_device *dev);
+static int ni65_lance_reinit(struct net_device *dev);
+static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
+static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
+ struct net_device *dev);
+static void ni65_timeout(struct net_device *dev);
+static int ni65_close(struct net_device *dev);
+static int ni65_alloc_buffer(struct net_device *dev);
+static void ni65_free_buffer(struct priv *p);
+static void set_multicast_list(struct net_device *dev);
+
+static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */
+static int dmatab[] __initdata = { 0,3,5,6,7 }; /* dma config-translate and autodetect */
+
+static int debuglevel = 1;
+
+/*
+ * set 'performance' registers .. we must STOP lance for that
+ */
+static void ni65_set_performance(struct priv *p)
+{
+ writereg(CSR0_STOP | CSR0_CLRALL,CSR0); /* STOP */
+
+ if( !(cards[p->cardno].config & 0x02) )
+ return;
+
+ outw(80,PORT+L_ADDRREG);
+ if(inw(PORT+L_ADDRREG) != 80)
+ return;
+
+ writereg( (csr80 & 0x3fff) ,80); /* FIFO watermarks */
+ outw(0,PORT+L_ADDRREG);
+ outw((short)isa0,PORT+L_BUSIF); /* write ISA 0: DMA_R : isa0 * 50ns */
+ outw(1,PORT+L_ADDRREG);
+ outw((short)isa1,PORT+L_BUSIF); /* write ISA 1: DMA_W : isa1 * 50ns */
+
+ outw(CSR0,PORT+L_ADDRREG); /* switch back to CSR0 */
+}
+
+/*
+ * open interface (up)
+ */
+static int ni65_open(struct net_device *dev)
+{
+ struct priv *p = dev->ml_priv;
+ int irqval = request_irq(dev->irq, ni65_interrupt,0,
+ cards[p->cardno].cardname,dev);
+ if (irqval) {
+ printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n",
+ dev->name,dev->irq, irqval);
+ return -EAGAIN;
+ }
+
+ if(ni65_lance_reinit(dev))
+ {
+ netif_start_queue(dev);
+ return 0;
+ }
+ else
+ {
+ free_irq(dev->irq,dev);
+ return -EAGAIN;
+ }
+}
+
+/*
+ * close interface (down)
+ */
+static int ni65_close(struct net_device *dev)
+{
+ struct priv *p = dev->ml_priv;
+
+ netif_stop_queue(dev);
+
+ outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */
+
+#ifdef XMT_VIA_SKB
+ {
+ int i;
+ for(i=0;i<TMDNUM;i++)
+ {
+ if(p->tmd_skb[i]) {
+ dev_kfree_skb(p->tmd_skb[i]);
+ p->tmd_skb[i] = NULL;
+ }
+ }
+ }
+#endif
+ free_irq(dev->irq,dev);
+ return 0;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ struct priv *p = dev->ml_priv;
+ disable_dma(dev->dma);
+ free_dma(dev->dma);
+ release_region(dev->base_addr, cards[p->cardno].total_size);
+ ni65_free_buffer(p);
+}
+
+/* set: io,irq,dma or set it when calling insmod */
+static int irq;
+static int io;
+static int dma;
+
+/*
+ * Probe The Card (not the lance-chip)
+ */
+struct net_device * __init ni65_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(0);
+ static const int ports[] = { 0x360, 0x300, 0x320, 0x340, 0 };
+ const int *port;
+ int err = 0;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ irq = dev->irq;
+ dma = dev->dma;
+ } else {
+ dev->base_addr = io;
+ }
+
+ if (dev->base_addr > 0x1ff) { /* Check a single specified location. */
+ err = ni65_probe1(dev, dev->base_addr);
+ } else if (dev->base_addr > 0) { /* Don't probe at all. */
+ err = -ENXIO;
+ } else {
+ for (port = ports; *port && ni65_probe1(dev, *port); port++)
+ ;
+ if (!*port)
+ err = -ENODEV;
+ }
+ if (err)
+ goto out;
+
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static const struct net_device_ops ni65_netdev_ops = {
+ .ndo_open = ni65_open,
+ .ndo_stop = ni65_close,
+ .ndo_start_xmit = ni65_send_packet,
+ .ndo_tx_timeout = ni65_timeout,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/*
+ * this is the real card probe ..
+ */
+static int __init ni65_probe1(struct net_device *dev,int ioaddr)
+{
+ int i,j;
+ struct priv *p;
+ unsigned long flags;
+
+ dev->irq = irq;
+ dev->dma = dma;
+
+ for(i=0;i<NUM_CARDS;i++) {
+ if(!request_region(ioaddr, cards[i].total_size, cards[i].cardname))
+ continue;
+ if(cards[i].id_offset >= 0) {
+ if(inb(ioaddr+cards[i].id_offset+0) != cards[i].id0 ||
+ inb(ioaddr+cards[i].id_offset+1) != cards[i].id1) {
+ release_region(ioaddr, cards[i].total_size);
+ continue;
+ }
+ }
+ if(cards[i].vendor_id) {
+ for(j=0;j<3;j++)
+ if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) {
+ release_region(ioaddr, cards[i].total_size);
+ continue;
+ }
+ }
+ break;
+ }
+ if(i == NUM_CARDS)
+ return -ENODEV;
+
+ for(j=0;j<6;j++)
+ dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j);
+
+ if( (j=ni65_alloc_buffer(dev)) < 0) {
+ release_region(ioaddr, cards[i].total_size);
+ return j;
+ }
+ p = dev->ml_priv;
+ p->cmdr_addr = ioaddr + cards[i].cmd_offset;
+ p->cardno = i;
+ spin_lock_init(&p->ring_lock);
+
+ printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr);
+
+ outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
+ if( (j=readreg(CSR0)) != 0x4) {
+ printk("failed.\n");
+ printk(KERN_ERR "%s: Can't RESET card: %04x\n", dev->name, j);
+ ni65_free_buffer(p);
+ release_region(ioaddr, cards[p->cardno].total_size);
+ return -EAGAIN;
+ }
+
+ outw(88,PORT+L_ADDRREG);
+ if(inw(PORT+L_ADDRREG) == 88) {
+ unsigned long v;
+ v = inw(PORT+L_DATAREG);
+ v <<= 16;
+ outw(89,PORT+L_ADDRREG);
+ v |= inw(PORT+L_DATAREG);
+ printk("Version %#08lx, ",v);
+ p->features = INIT_RING_BEFORE_START;
+ }
+ else {
+ printk("ancient LANCE, ");
+ p->features = 0x0;
+ }
+
+ if(test_bit(0,&cards[i].config)) {
+ dev->irq = irqtab[(inw(ioaddr+L_CONFIG)>>2)&3];
+ dev->dma = dmatab[inw(ioaddr+L_CONFIG)&3];
+ printk("IRQ %d (from card), DMA %d (from card).\n",dev->irq,dev->dma);
+ }
+ else {
+ if(dev->dma == 0) {
+ /* 'stuck test' from lance.c */
+ unsigned long dma_channels =
+ ((inb(DMA1_STAT_REG) >> 4) & 0x0f)
+ | (inb(DMA2_STAT_REG) & 0xf0);
+ for(i=1;i<5;i++) {
+ int dma = dmatab[i];
+ if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510"))
+ continue;
+
+ flags=claim_dma_lock();
+ disable_dma(dma);
+ set_dma_mode(dma,DMA_MODE_CASCADE);
+ enable_dma(dma);
+ release_dma_lock(flags);
+
+ ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */
+
+ flags=claim_dma_lock();
+ disable_dma(dma);
+ free_dma(dma);
+ release_dma_lock(flags);
+
+ if(readreg(CSR0) & CSR0_IDON)
+ break;
+ }
+ if(i == 5) {
+ printk("failed.\n");
+ printk(KERN_ERR "%s: Can't detect DMA channel!\n", dev->name);
+ ni65_free_buffer(p);
+ release_region(ioaddr, cards[p->cardno].total_size);
+ return -EAGAIN;
+ }
+ dev->dma = dmatab[i];
+ printk("DMA %d (autodetected), ",dev->dma);
+ }
+ else
+ printk("DMA %d (assigned), ",dev->dma);
+
+ if(dev->irq < 2)
+ {
+ unsigned long irq_mask;
+
+ ni65_init_lance(p,dev->dev_addr,0,0);
+ irq_mask = probe_irq_on();
+ writereg(CSR0_INIT|CSR0_INEA,CSR0); /* trigger interrupt */
+ msleep(20);
+ dev->irq = probe_irq_off(irq_mask);
+ if(!dev->irq)
+ {
+ printk("Failed to detect IRQ line!\n");
+ ni65_free_buffer(p);
+ release_region(ioaddr, cards[p->cardno].total_size);
+ return -EAGAIN;
+ }
+ printk("IRQ %d (autodetected).\n",dev->irq);
+ }
+ else
+ printk("IRQ %d (assigned).\n",dev->irq);
+ }
+
+ if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0)
+ {
+ printk(KERN_ERR "%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
+ ni65_free_buffer(p);
+ release_region(ioaddr, cards[p->cardno].total_size);
+ return -EAGAIN;
+ }
+
+ dev->base_addr = ioaddr;
+ dev->netdev_ops = &ni65_netdev_ops;
+ dev->watchdog_timeo = HZ/2;
+
+ return 0; /* everything is OK */
+}
+
+/*
+ * set lance register and trigger init
+ */
+static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode)
+{
+ int i;
+ u32 pib;
+
+ writereg(CSR0_CLRALL|CSR0_STOP,CSR0);
+
+ for(i=0;i<6;i++)
+ p->ib.eaddr[i] = daddr[i];
+
+ for(i=0;i<8;i++)
+ p->ib.filter[i] = filter;
+ p->ib.mode = mode;
+
+ p->ib.trp = (u32) isa_virt_to_bus(p->tmdhead) | TMDNUMMASK;
+ p->ib.rrp = (u32) isa_virt_to_bus(p->rmdhead) | RMDNUMMASK;
+ writereg(0,CSR3); /* busmaster/no word-swap */
+ pib = (u32) isa_virt_to_bus(&p->ib);
+ writereg(pib & 0xffff,CSR1);
+ writereg(pib >> 16,CSR2);
+
+ writereg(CSR0_INIT,CSR0); /* this changes L_ADDRREG to CSR0 */
+
+ for(i=0;i<32;i++)
+ {
+ mdelay(4);
+ if(inw(PORT+L_DATAREG) & (CSR0_IDON | CSR0_MERR) )
+ break; /* init ok ? */
+ }
+}
+
+/*
+ * allocate memory area and check the 16MB border
+ */
+static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type)
+{
+ struct sk_buff *skb=NULL;
+ unsigned char *ptr;
+ void *ret;
+
+ if(type) {
+ ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA);
+ if(!skb) {
+ printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
+ return NULL;
+ }
+ skb_reserve(skb,2+16);
+ skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */
+ ptr = skb->data;
+ }
+ else {
+ ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA);
+ if(!ret) {
+ printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
+ return NULL;
+ }
+ }
+ if( (u32) virt_to_phys(ptr+size) > 0x1000000) {
+ printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what);
+ if(type)
+ kfree_skb(skb);
+ else
+ kfree(ptr);
+ return NULL;
+ }
+ return ret;
+}
+
+/*
+ * allocate all memory structures .. send/recv buffers etc ...
+ */
+static int ni65_alloc_buffer(struct net_device *dev)
+{
+ unsigned char *ptr;
+ struct priv *p;
+ int i;
+
+ /*
+ * we need 8-aligned memory ..
+ */
+ ptr = ni65_alloc_mem(dev,"BUFFER",sizeof(struct priv)+8,0);
+ if(!ptr)
+ return -ENOMEM;
+
+ p = dev->ml_priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7);
+ memset((char *)p, 0, sizeof(struct priv));
+ p->self = ptr;
+
+ for(i=0;i<TMDNUM;i++)
+ {
+#ifdef XMT_VIA_SKB
+ p->tmd_skb[i] = NULL;
+#endif
+ p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0);
+ if(!p->tmdbounce[i]) {
+ ni65_free_buffer(p);
+ return -ENOMEM;
+ }
+ }
+
+ for(i=0;i<RMDNUM;i++)
+ {
+#ifdef RCV_VIA_SKB
+ p->recv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1);
+ if(!p->recv_skb[i]) {
+ ni65_free_buffer(p);
+ return -ENOMEM;
+ }
+#else
+ p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0);
+ if(!p->recvbounce[i]) {
+ ni65_free_buffer(p);
+ return -ENOMEM;
+ }
+#endif
+ }
+
+ return 0; /* everything is OK */
+}
+
+/*
+ * free buffers and private struct
+ */
+static void ni65_free_buffer(struct priv *p)
+{
+ int i;
+
+ if(!p)
+ return;
+
+ for(i=0;i<TMDNUM;i++) {
+ kfree(p->tmdbounce[i]);
+#ifdef XMT_VIA_SKB
+ if(p->tmd_skb[i])
+ dev_kfree_skb(p->tmd_skb[i]);
+#endif
+ }
+
+ for(i=0;i<RMDNUM;i++)
+ {
+#ifdef RCV_VIA_SKB
+ if(p->recv_skb[i])
+ dev_kfree_skb(p->recv_skb[i]);
+#else
+ kfree(p->recvbounce[i]);
+#endif
+ }
+ kfree(p->self);
+}
+
+
+/*
+ * stop and (re)start lance .. e.g after an error
+ */
+static void ni65_stop_start(struct net_device *dev,struct priv *p)
+{
+ int csr0 = CSR0_INEA;
+
+ writedatareg(CSR0_STOP);
+
+ if(debuglevel > 1)
+ printk(KERN_DEBUG "ni65_stop_start\n");
+
+ if(p->features & INIT_RING_BEFORE_START) {
+ int i;
+#ifdef XMT_VIA_SKB
+ struct sk_buff *skb_save[TMDNUM];
+#endif
+ unsigned long buffer[TMDNUM];
+ short blen[TMDNUM];
+
+ if(p->xmit_queued) {
+ while(1) {
+ if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN))
+ break;
+ p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
+ if(p->tmdlast == p->tmdnum)
+ break;
+ }
+ }
+
+ for(i=0;i<TMDNUM;i++) {
+ struct tmd *tmdp = p->tmdhead + i;
+#ifdef XMT_VIA_SKB
+ skb_save[i] = p->tmd_skb[i];
+#endif
+ buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer);
+ blen[i] = tmdp->blen;
+ tmdp->u.s.status = 0x0;
+ }
+
+ for(i=0;i<RMDNUM;i++) {
+ struct rmd *rmdp = p->rmdhead + i;
+ rmdp->u.s.status = RCV_OWN;
+ }
+ p->tmdnum = p->xmit_queued = 0;
+ writedatareg(CSR0_STRT | csr0);
+
+ for(i=0;i<TMDNUM;i++) {
+ int num = (i + p->tmdlast) & (TMDNUM-1);
+ p->tmdhead[i].u.buffer = (u32) isa_virt_to_bus((char *)buffer[num]); /* status is part of buffer field */
+ p->tmdhead[i].blen = blen[num];
+ if(p->tmdhead[i].u.s.status & XMIT_OWN) {
+ p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
+ p->xmit_queued = 1;
+ writedatareg(CSR0_TDMD | CSR0_INEA | csr0);
+ }
+#ifdef XMT_VIA_SKB
+ p->tmd_skb[i] = skb_save[num];
+#endif
+ }
+ p->rmdnum = p->tmdlast = 0;
+ if(!p->lock)
+ if (p->tmdnum || !p->xmit_queued)
+ netif_wake_queue(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ }
+ else
+ writedatareg(CSR0_STRT | csr0);
+}
+
+/*
+ * init lance (write init-values .. init-buffers) (open-helper)
+ */
+static int ni65_lance_reinit(struct net_device *dev)
+{
+ int i;
+ struct priv *p = dev->ml_priv;
+ unsigned long flags;
+
+ p->lock = 0;
+ p->xmit_queued = 0;
+
+ flags=claim_dma_lock();
+ disable_dma(dev->dma); /* I've never worked with dma, but we do it like the packetdriver */
+ set_dma_mode(dev->dma,DMA_MODE_CASCADE);
+ enable_dma(dev->dma);
+ release_dma_lock(flags);
+
+ outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
+ if( (i=readreg(CSR0) ) != 0x4)
+ {
+ printk(KERN_ERR "%s: can't RESET %s card: %04x\n",dev->name,
+ cards[p->cardno].cardname,(int) i);
+ flags=claim_dma_lock();
+ disable_dma(dev->dma);
+ release_dma_lock(flags);
+ return 0;
+ }
+
+ p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0;
+ for(i=0;i<TMDNUM;i++)
+ {
+ struct tmd *tmdp = p->tmdhead + i;
+#ifdef XMT_VIA_SKB
+ if(p->tmd_skb[i]) {
+ dev_kfree_skb(p->tmd_skb[i]);
+ p->tmd_skb[i] = NULL;
+ }
+#endif
+ tmdp->u.buffer = 0x0;
+ tmdp->u.s.status = XMIT_START | XMIT_END;
+ tmdp->blen = tmdp->status2 = 0;
+ }
+
+ for(i=0;i<RMDNUM;i++)
+ {
+ struct rmd *rmdp = p->rmdhead + i;
+#ifdef RCV_VIA_SKB
+ rmdp->u.buffer = (u32) isa_virt_to_bus(p->recv_skb[i]->data);
+#else
+ rmdp->u.buffer = (u32) isa_virt_to_bus(p->recvbounce[i]);
+#endif
+ rmdp->blen = -(R_BUF_SIZE-8);
+ rmdp->mlen = 0;
+ rmdp->u.s.status = RCV_OWN;
+ }
+
+ if(dev->flags & IFF_PROMISC)
+ ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
+ else if (netdev_mc_count(dev) || dev->flags & IFF_ALLMULTI)
+ ni65_init_lance(p,dev->dev_addr,0xff,0x0);
+ else
+ ni65_init_lance(p,dev->dev_addr,0x00,0x00);
+
+ /*
+ * ni65_set_lance_mem() sets L_ADDRREG to CSR0
+ * NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED
+ */
+
+ if(inw(PORT+L_DATAREG) & CSR0_IDON) {
+ ni65_set_performance(p);
+ /* init OK: start lance , enable interrupts */
+ writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
+ return 1; /* ->OK */
+ }
+ printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
+ flags=claim_dma_lock();
+ disable_dma(dev->dma);
+ release_dma_lock(flags);
+ return 0; /* ->Error */
+}
+
+/*
+ * interrupt handler
+ */
+static irqreturn_t ni65_interrupt(int irq, void * dev_id)
+{
+ int csr0 = 0;
+ struct net_device *dev = dev_id;
+ struct priv *p;
+ int bcnt = 32;
+
+ p = dev->ml_priv;
+
+ spin_lock(&p->ring_lock);
+
+ while(--bcnt) {
+ csr0 = inw(PORT+L_DATAREG);
+
+#if 0
+ writedatareg( (csr0 & CSR0_CLRALL) ); /* ack interrupts, disable int. */
+#else
+ writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA ); /* ack interrupts, interrupts enabled */
+#endif
+
+ if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT)))
+ break;
+
+ if(csr0 & CSR0_RINT) /* RECV-int? */
+ ni65_recv_intr(dev,csr0);
+ if(csr0 & CSR0_TINT) /* XMIT-int? */
+ ni65_xmit_intr(dev,csr0);
+
+ if(csr0 & CSR0_ERR)
+ {
+ if(debuglevel > 1)
+ printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0);
+ if(csr0 & CSR0_BABL)
+ dev->stats.tx_errors++;
+ if(csr0 & CSR0_MISS) {
+ int i;
+ for(i=0;i<RMDNUM;i++)
+ printk("%02x ",p->rmdhead[i].u.s.status);
+ printk("\n");
+ dev->stats.rx_errors++;
+ }
+ if(csr0 & CSR0_MERR) {
+ if(debuglevel > 1)
+ printk(KERN_ERR "%s: Ooops .. memory error: %04x.\n",dev->name,csr0);
+ ni65_stop_start(dev,p);
+ }
+ }
+ }
+
+#ifdef RCV_PARANOIA_CHECK
+{
+ int j;
+ for(j=0;j<RMDNUM;j++)
+ {
+ int i, num2;
+ for(i=RMDNUM-1;i>0;i--) {
+ num2 = (p->rmdnum + i) & (RMDNUM-1);
+ if(!(p->rmdhead[num2].u.s.status & RCV_OWN))
+ break;
+ }
+
+ if(i) {
+ int k, num1;
+ for(k=0;k<RMDNUM;k++) {
+ num1 = (p->rmdnum + k) & (RMDNUM-1);
+ if(!(p->rmdhead[num1].u.s.status & RCV_OWN))
+ break;
+ }
+ if(!k)
+ break;
+
+ if(debuglevel > 0)
+ {
+ char buf[256],*buf1;
+ buf1 = buf;
+ for(k=0;k<RMDNUM;k++) {
+ sprintf(buf1,"%02x ",(p->rmdhead[k].u.s.status)); /* & RCV_OWN) ); */
+ buf1 += 3;
+ }
+ *buf1 = 0;
+ printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf);
+ }
+
+ p->rmdnum = num1;
+ ni65_recv_intr(dev,csr0);
+ if((p->rmdhead[num2].u.s.status & RCV_OWN))
+ break; /* ok, we are 'in sync' again */
+ }
+ else
+ break;
+ }
+}
+#endif
+
+ if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) {
+ printk(KERN_DEBUG "%s: RX or TX was offline -> restart\n",dev->name);
+ ni65_stop_start(dev,p);
+ }
+ else
+ writedatareg(CSR0_INEA);
+
+ spin_unlock(&p->ring_lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * We have received an Xmit-Interrupt ..
+ * send a new packet if necessary
+ */
+static void ni65_xmit_intr(struct net_device *dev,int csr0)
+{
+ struct priv *p = dev->ml_priv;
+
+ while(p->xmit_queued)
+ {
+ struct tmd *tmdp = p->tmdhead + p->tmdlast;
+ int tmdstat = tmdp->u.s.status;
+
+ if(tmdstat & XMIT_OWN)
+ break;
+
+ if(tmdstat & XMIT_ERR)
+ {
+#if 0
+ if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3)
+ printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name);
+#endif
+ /* checking some errors */
+ if(tmdp->status2 & XMIT_RTRY)
+ dev->stats.tx_aborted_errors++;
+ if(tmdp->status2 & XMIT_LCAR)
+ dev->stats.tx_carrier_errors++;
+ if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {
+ /* this stops the xmitter */
+ dev->stats.tx_fifo_errors++;
+ if(debuglevel > 0)
+ printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);
+ if(p->features & INIT_RING_BEFORE_START) {
+ tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; /* test: resend this frame */
+ ni65_stop_start(dev,p);
+ break; /* no more Xmit processing .. */
+ }
+ else
+ ni65_stop_start(dev,p);
+ }
+ if(debuglevel > 2)
+ printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);
+ if(!(csr0 & CSR0_BABL)) /* don't count errors twice */
+ dev->stats.tx_errors++;
+ tmdp->status2 = 0;
+ }
+ else {
+ dev->stats.tx_bytes -= (short)(tmdp->blen);
+ dev->stats.tx_packets++;
+ }
+
+#ifdef XMT_VIA_SKB
+ if(p->tmd_skb[p->tmdlast]) {
+ dev_kfree_skb_irq(p->tmd_skb[p->tmdlast]);
+ p->tmd_skb[p->tmdlast] = NULL;
+ }
+#endif
+
+ p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
+ if(p->tmdlast == p->tmdnum)
+ p->xmit_queued = 0;
+ }
+ netif_wake_queue(dev);
+}
+
+/*
+ * We have received a packet
+ */
+static void ni65_recv_intr(struct net_device *dev,int csr0)
+{
+ struct rmd *rmdp;
+ int rmdstat,len;
+ int cnt=0;
+ struct priv *p = dev->ml_priv;
+
+ rmdp = p->rmdhead + p->rmdnum;
+ while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
+ {
+ cnt++;
+ if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) ) /* error or oversized? */
+ {
+ if(!(rmdstat & RCV_ERR)) {
+ if(rmdstat & RCV_START)
+ {
+ dev->stats.rx_length_errors++;
+ printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);
+ }
+ }
+ else {
+ if(debuglevel > 2)
+ printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
+ dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );
+ if(rmdstat & RCV_FRAM)
+ dev->stats.rx_frame_errors++;
+ if(rmdstat & RCV_OFLO)
+ dev->stats.rx_over_errors++;
+ if(rmdstat & RCV_CRC)
+ dev->stats.rx_crc_errors++;
+ if(rmdstat & RCV_BUF_ERR)
+ dev->stats.rx_fifo_errors++;
+ }
+ if(!(csr0 & CSR0_MISS)) /* don't count errors twice */
+ dev->stats.rx_errors++;
+ }
+ else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)
+ {
+#ifdef RCV_VIA_SKB
+ struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC);
+ if (skb)
+ skb_reserve(skb,16);
+#else
+ struct sk_buff *skb = dev_alloc_skb(len+2);
+#endif
+ if(skb)
+ {
+ skb_reserve(skb,2);
+#ifdef RCV_VIA_SKB
+ if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
+ skb_put(skb,len);
+ skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len);
+ }
+ else {
+ struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
+ skb_put(skb,R_BUF_SIZE);
+ p->recv_skb[p->rmdnum] = skb;
+ rmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
+ skb = skb1;
+ skb_trim(skb,len);
+ }
+#else
+ skb_put(skb,len);
+ skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
+#endif
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ }
+ else
+ {
+ printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);
+ dev->stats.rx_dropped++;
+ }
+ }
+ else {
+ printk(KERN_INFO "%s: received runt packet\n",dev->name);
+ dev->stats.rx_errors++;
+ }
+ rmdp->blen = -(R_BUF_SIZE-8);
+ rmdp->mlen = 0;
+ rmdp->u.s.status = RCV_OWN; /* change owner */
+ p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1);
+ rmdp = p->rmdhead + p->rmdnum;
+ }
+}
+
+/*
+ * kick xmitter ..
+ */
+
+static void ni65_timeout(struct net_device *dev)
+{
+ int i;
+ struct priv *p = dev->ml_priv;
+
+ printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name);
+ for(i=0;i<TMDNUM;i++)
+ printk("%02x ",p->tmdhead[i].u.s.status);
+ printk("\n");
+ ni65_lance_reinit(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+
+/*
+ * Send a packet
+ */
+
+static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct priv *p = dev->ml_priv;
+
+ netif_stop_queue(dev);
+
+ if (test_and_set_bit(0, (void*)&p->lock)) {
+ printk(KERN_ERR "%s: Queue was locked.\n", dev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ {
+ short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ struct tmd *tmdp;
+ unsigned long flags;
+
+#ifdef XMT_VIA_SKB
+ if( (unsigned long) (skb->data + skb->len) > 0x1000000) {
+#endif
+
+ skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum],
+ skb->len > T_BUF_SIZE ? T_BUF_SIZE :
+ skb->len);
+ if (len > skb->len)
+ memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len);
+ dev_kfree_skb (skb);
+
+ spin_lock_irqsave(&p->ring_lock, flags);
+ tmdp = p->tmdhead + p->tmdnum;
+ tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]);
+ p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1);
+
+#ifdef XMT_VIA_SKB
+ }
+ else {
+ spin_lock_irqsave(&p->ring_lock, flags);
+
+ tmdp = p->tmdhead + p->tmdnum;
+ tmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
+ p->tmd_skb[p->tmdnum] = skb;
+ }
+#endif
+ tmdp->blen = -len;
+
+ tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
+ writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */
+
+ p->xmit_queued = 1;
+ p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
+
+ if(p->tmdnum != p->tmdlast)
+ netif_wake_queue(dev);
+
+ p->lock = 0;
+
+ spin_unlock_irqrestore(&p->ring_lock, flags);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ if(!ni65_lance_reinit(dev))
+ printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name);
+ netif_wake_queue(dev);
+}
+
+#ifdef MODULE
+static struct net_device *dev_ni65;
+
+module_param(irq, int, 0);
+module_param(io, int, 0);
+module_param(dma, int, 0);
+MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");
+MODULE_PARM_DESC(io, "ni6510 I/O base address");
+MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
+
+int __init init_module(void)
+{
+ dev_ni65 = ni65_probe(-1);
+ return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0;
+}
+
+void __exit cleanup_module(void)
+{
+ unregister_netdev(dev_ni65);
+ cleanup_card(dev_ni65);
+ free_netdev(dev_ni65);
+}
+#endif /* MODULE */
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/ni65.h b/drivers/net/ethernet/amd/ni65.h
new file mode 100644
index 000000000000..e6217e35edf0
--- /dev/null
+++ b/drivers/net/ethernet/amd/ni65.h
@@ -0,0 +1,121 @@
+/* am7990 (lance) definitions
+ *
+ * This is an extension to the Linux operating system, and is covered by
+ * same GNU General Public License that covers that work.
+ *
+ * Michael Hipp
+ * email: mhipp@student.uni-tuebingen.de
+ *
+ * sources: (mail me or ask archie if you need them)
+ * crynwr-packet-driver
+ */
+
+/*
+ * Control and Status Register 0 (CSR0) bit definitions
+ * (R=Readable) (W=Writeable) (S=Set on write) (C-Clear on write)
+ *
+ */
+
+#define CSR0_ERR 0x8000 /* Error summary (R) */
+#define CSR0_BABL 0x4000 /* Babble transmitter timeout error (RC) */
+#define CSR0_CERR 0x2000 /* Collision Error (RC) */
+#define CSR0_MISS 0x1000 /* Missed packet (RC) */
+#define CSR0_MERR 0x0800 /* Memory Error (RC) */
+#define CSR0_RINT 0x0400 /* Receiver Interrupt (RC) */
+#define CSR0_TINT 0x0200 /* Transmit Interrupt (RC) */
+#define CSR0_IDON 0x0100 /* Initialization Done (RC) */
+#define CSR0_INTR 0x0080 /* Interrupt Flag (R) */
+#define CSR0_INEA 0x0040 /* Interrupt Enable (RW) */
+#define CSR0_RXON 0x0020 /* Receiver on (R) */
+#define CSR0_TXON 0x0010 /* Transmitter on (R) */
+#define CSR0_TDMD 0x0008 /* Transmit Demand (RS) */
+#define CSR0_STOP 0x0004 /* Stop (RS) */
+#define CSR0_STRT 0x0002 /* Start (RS) */
+#define CSR0_INIT 0x0001 /* Initialize (RS) */
+
+#define CSR0_CLRALL 0x7f00 /* mask for all clearable bits */
+/*
+ * Initialization Block Mode operation Bit Definitions.
+ */
+
+#define M_PROM 0x8000 /* Promiscuous Mode */
+#define M_INTL 0x0040 /* Internal Loopback */
+#define M_DRTY 0x0020 /* Disable Retry */
+#define M_COLL 0x0010 /* Force Collision */
+#define M_DTCR 0x0008 /* Disable Transmit CRC) */
+#define M_LOOP 0x0004 /* Loopback */
+#define M_DTX 0x0002 /* Disable the Transmitter */
+#define M_DRX 0x0001 /* Disable the Receiver */
+
+
+/*
+ * Receive message descriptor bit definitions.
+ */
+
+#define RCV_OWN 0x80 /* owner bit 0 = host, 1 = lance */
+#define RCV_ERR 0x40 /* Error Summary */
+#define RCV_FRAM 0x20 /* Framing Error */
+#define RCV_OFLO 0x10 /* Overflow Error */
+#define RCV_CRC 0x08 /* CRC Error */
+#define RCV_BUF_ERR 0x04 /* Buffer Error */
+#define RCV_START 0x02 /* Start of Packet */
+#define RCV_END 0x01 /* End of Packet */
+
+
+/*
+ * Transmit message descriptor bit definitions.
+ */
+
+#define XMIT_OWN 0x80 /* owner bit 0 = host, 1 = lance */
+#define XMIT_ERR 0x40 /* Error Summary */
+#define XMIT_RETRY 0x10 /* more the 1 retry needed to Xmit */
+#define XMIT_1_RETRY 0x08 /* one retry needed to Xmit */
+#define XMIT_DEF 0x04 /* Deferred */
+#define XMIT_START 0x02 /* Start of Packet */
+#define XMIT_END 0x01 /* End of Packet */
+
+/*
+ * transmit status (2) (valid if XMIT_ERR == 1)
+ */
+
+#define XMIT_TDRMASK 0x03ff /* time-domain-reflectometer-value */
+#define XMIT_RTRY 0x0400 /* Failed after 16 retransmissions */
+#define XMIT_LCAR 0x0800 /* Loss of Carrier */
+#define XMIT_LCOL 0x1000 /* Late collision */
+#define XMIT_RESERV 0x2000 /* Reserved */
+#define XMIT_UFLO 0x4000 /* Underflow (late memory) */
+#define XMIT_BUFF 0x8000 /* Buffering error (no ENP) */
+
+struct init_block {
+ unsigned short mode;
+ unsigned char eaddr[6];
+ unsigned char filter[8];
+ /* bit 29-31: number of rmd's (power of 2) */
+ u32 rrp; /* receive ring pointer (align 8) */
+ /* bit 29-31: number of tmd's (power of 2) */
+ u32 trp; /* transmit ring pointer (align 8) */
+};
+
+struct rmd { /* Receive Message Descriptor */
+ union {
+ volatile u32 buffer;
+ struct {
+ volatile unsigned char dummy[3];
+ volatile unsigned char status;
+ } s;
+ } u;
+ volatile short blen;
+ volatile unsigned short mlen;
+};
+
+struct tmd {
+ union {
+ volatile u32 buffer;
+ struct {
+ volatile unsigned char dummy[3];
+ volatile unsigned char status;
+ } s;
+ } u;
+ volatile unsigned short blen;
+ volatile unsigned short status2;
+};
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
new file mode 100644
index 000000000000..3accd5d21b08
--- /dev/null
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -0,0 +1,1525 @@
+/* ----------------------------------------------------------------------------
+Linux PCMCIA ethernet adapter driver for the New Media Ethernet LAN.
+ nmclan_cs.c,v 0.16 1995/07/01 06:42:17 rpao Exp rpao
+
+ The Ethernet LAN uses the Advanced Micro Devices (AMD) Am79C940 Media
+ Access Controller for Ethernet (MACE). It is essentially the Am2150
+ PCMCIA Ethernet card contained in the Am2150 Demo Kit.
+
+Written by Roger C. Pao <rpao@paonet.org>
+ Copyright 1995 Roger C. Pao
+ Linux 2.5 cleanups Copyright Red Hat 2003
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License.
+
+Ported to Linux 1.3.* network driver environment by
+ Matti Aarnio <mea@utu.fi>
+
+References
+
+ Am2150 Technical Reference Manual, Revision 1.0, August 17, 1993
+ Am79C940 (MACE) Data Sheet, 1994
+ Am79C90 (C-LANCE) Data Sheet, 1994
+ Linux PCMCIA Programmer's Guide v1.17
+ /usr/src/linux/net/inet/dev.c, Linux kernel 1.2.8
+
+ Eric Mears, New Media Corporation
+ Tom Pollard, New Media Corporation
+ Dean Siasoyco, New Media Corporation
+ Ken Lesniak, Silicon Graphics, Inc. <lesniak@boston.sgi.com>
+ Donald Becker <becker@scyld.com>
+ David Hinds <dahinds@users.sourceforge.net>
+
+ The Linux client driver is based on the 3c589_cs.c client driver by
+ David Hinds.
+
+ The Linux network driver outline is based on the 3c589_cs.c driver,
+ the 8390.c driver, and the example skeleton.c kernel code, which are
+ by Donald Becker.
+
+ The Am2150 network driver hardware interface code is based on the
+ OS/9000 driver for the New Media Ethernet LAN by Eric Mears.
+
+ Special thanks for testing and help in debugging this driver goes
+ to Ken Lesniak.
+
+-------------------------------------------------------------------------------
+Driver Notes and Issues
+-------------------------------------------------------------------------------
+
+1. Developed on a Dell 320SLi
+ PCMCIA Card Services 2.6.2
+ Linux dell 1.2.10 #1 Thu Jun 29 20:23:41 PDT 1995 i386
+
+2. rc.pcmcia may require loading pcmcia_core with io_speed=300:
+ 'insmod pcmcia_core.o io_speed=300'.
+ This will avoid problems with fast systems which causes rx_framecnt
+ to return random values.
+
+3. If hot extraction does not work for you, use 'ifconfig eth0 down'
+ before extraction.
+
+4. There is a bad slow-down problem in this driver.
+
+5. Future: Multicast processing. In the meantime, do _not_ compile your
+ kernel with multicast ip enabled.
+
+-------------------------------------------------------------------------------
+History
+-------------------------------------------------------------------------------
+Log: nmclan_cs.c,v
+ * 2.5.75-ac1 2003/07/11 Alan Cox <alan@lxorguk.ukuu.org.uk>
+ * Fixed hang on card eject as we probe it
+ * Cleaned up to use new style locking.
+ *
+ * Revision 0.16 1995/07/01 06:42:17 rpao
+ * Bug fix: nmclan_reset() called CardServices incorrectly.
+ *
+ * Revision 0.15 1995/05/24 08:09:47 rpao
+ * Re-implement MULTI_TX dev->tbusy handling.
+ *
+ * Revision 0.14 1995/05/23 03:19:30 rpao
+ * Added, in nmclan_config(), "tuple.Attributes = 0;".
+ * Modified MACE ID check to ignore chip revision level.
+ * Avoid tx_free_frames race condition between _start_xmit and _interrupt.
+ *
+ * Revision 0.13 1995/05/18 05:56:34 rpao
+ * Statistics changes.
+ * Bug fix: nmclan_reset did not enable TX and RX: call restore_multicast_list.
+ * Bug fix: mace_interrupt checks ~MACE_IMR_DEFAULT. Fixes driver lockup.
+ *
+ * Revision 0.12 1995/05/14 00:12:23 rpao
+ * Statistics overhaul.
+ *
+
+95/05/13 rpao V0.10a
+ Bug fix: MACE statistics counters used wrong I/O ports.
+ Bug fix: mace_interrupt() needed to allow statistics to be
+ processed without RX or TX interrupts pending.
+95/05/11 rpao V0.10
+ Multiple transmit request processing.
+ Modified statistics to use MACE counters where possible.
+95/05/10 rpao V0.09 Bug fix: Must use IO_DATA_PATH_WIDTH_AUTO.
+ *Released
+95/05/10 rpao V0.08
+ Bug fix: Make all non-exported functions private by using
+ static keyword.
+ Bug fix: Test IntrCnt _before_ reading MACE_IR.
+95/05/10 rpao V0.07 Statistics.
+95/05/09 rpao V0.06 Fix rx_framecnt problem by addition of PCIC wait states.
+
+---------------------------------------------------------------------------- */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DRV_NAME "nmclan_cs"
+#define DRV_VERSION "0.16"
+
+
+/* ----------------------------------------------------------------------------
+Conditional Compilation Options
+---------------------------------------------------------------------------- */
+
+#define MULTI_TX 0
+#define RESET_ON_TIMEOUT 1
+#define TX_INTERRUPTABLE 1
+#define RESET_XILINX 0
+
+/* ----------------------------------------------------------------------------
+Include Files
+---------------------------------------------------------------------------- */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/bitops.h>
+
+#include <pcmcia/cisreg.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+/* ----------------------------------------------------------------------------
+Defines
+---------------------------------------------------------------------------- */
+
+#define ETHER_ADDR_LEN ETH_ALEN
+ /* 6 bytes in an Ethernet Address */
+#define MACE_LADRF_LEN 8
+ /* 8 bytes in Logical Address Filter */
+
+/* Loop Control Defines */
+#define MACE_MAX_IR_ITERATIONS 10
+#define MACE_MAX_RX_ITERATIONS 12
+ /*
+ TBD: Dean brought this up, and I assumed the hardware would
+ handle it:
+
+ If MACE_MAX_RX_ITERATIONS is > 1, rx_framecnt may still be
+ non-zero when the isr exits. We may not get another interrupt
+ to process the remaining packets for some time.
+ */
+
+/*
+The Am2150 has a Xilinx XC3042 field programmable gate array (FPGA)
+which manages the interface between the MACE and the PCMCIA bus. It
+also includes buffer management for the 32K x 8 SRAM to control up to
+four transmit and 12 receive frames at a time.
+*/
+#define AM2150_MAX_TX_FRAMES 4
+#define AM2150_MAX_RX_FRAMES 12
+
+/* Am2150 Ethernet Card I/O Mapping */
+#define AM2150_RCV 0x00
+#define AM2150_XMT 0x04
+#define AM2150_XMT_SKIP 0x09
+#define AM2150_RCV_NEXT 0x0A
+#define AM2150_RCV_FRAME_COUNT 0x0B
+#define AM2150_MACE_BANK 0x0C
+#define AM2150_MACE_BASE 0x10
+
+/* MACE Registers */
+#define MACE_RCVFIFO 0
+#define MACE_XMTFIFO 1
+#define MACE_XMTFC 2
+#define MACE_XMTFS 3
+#define MACE_XMTRC 4
+#define MACE_RCVFC 5
+#define MACE_RCVFS 6
+#define MACE_FIFOFC 7
+#define MACE_IR 8
+#define MACE_IMR 9
+#define MACE_PR 10
+#define MACE_BIUCC 11
+#define MACE_FIFOCC 12
+#define MACE_MACCC 13
+#define MACE_PLSCC 14
+#define MACE_PHYCC 15
+#define MACE_CHIPIDL 16
+#define MACE_CHIPIDH 17
+#define MACE_IAC 18
+/* Reserved */
+#define MACE_LADRF 20
+#define MACE_PADR 21
+/* Reserved */
+/* Reserved */
+#define MACE_MPC 24
+/* Reserved */
+#define MACE_RNTPC 26
+#define MACE_RCVCC 27
+/* Reserved */
+#define MACE_UTR 29
+#define MACE_RTR1 30
+#define MACE_RTR2 31
+
+/* MACE Bit Masks */
+#define MACE_XMTRC_EXDEF 0x80
+#define MACE_XMTRC_XMTRC 0x0F
+
+#define MACE_XMTFS_XMTSV 0x80
+#define MACE_XMTFS_UFLO 0x40
+#define MACE_XMTFS_LCOL 0x20
+#define MACE_XMTFS_MORE 0x10
+#define MACE_XMTFS_ONE 0x08
+#define MACE_XMTFS_DEFER 0x04
+#define MACE_XMTFS_LCAR 0x02
+#define MACE_XMTFS_RTRY 0x01
+
+#define MACE_RCVFS_RCVSTS 0xF000
+#define MACE_RCVFS_OFLO 0x8000
+#define MACE_RCVFS_CLSN 0x4000
+#define MACE_RCVFS_FRAM 0x2000
+#define MACE_RCVFS_FCS 0x1000
+
+#define MACE_FIFOFC_RCVFC 0xF0
+#define MACE_FIFOFC_XMTFC 0x0F
+
+#define MACE_IR_JAB 0x80
+#define MACE_IR_BABL 0x40
+#define MACE_IR_CERR 0x20
+#define MACE_IR_RCVCCO 0x10
+#define MACE_IR_RNTPCO 0x08
+#define MACE_IR_MPCO 0x04
+#define MACE_IR_RCVINT 0x02
+#define MACE_IR_XMTINT 0x01
+
+#define MACE_MACCC_PROM 0x80
+#define MACE_MACCC_DXMT2PD 0x40
+#define MACE_MACCC_EMBA 0x20
+#define MACE_MACCC_RESERVED 0x10
+#define MACE_MACCC_DRCVPA 0x08
+#define MACE_MACCC_DRCVBC 0x04
+#define MACE_MACCC_ENXMT 0x02
+#define MACE_MACCC_ENRCV 0x01
+
+#define MACE_PHYCC_LNKFL 0x80
+#define MACE_PHYCC_DLNKTST 0x40
+#define MACE_PHYCC_REVPOL 0x20
+#define MACE_PHYCC_DAPC 0x10
+#define MACE_PHYCC_LRT 0x08
+#define MACE_PHYCC_ASEL 0x04
+#define MACE_PHYCC_RWAKE 0x02
+#define MACE_PHYCC_AWAKE 0x01
+
+#define MACE_IAC_ADDRCHG 0x80
+#define MACE_IAC_PHYADDR 0x04
+#define MACE_IAC_LOGADDR 0x02
+
+#define MACE_UTR_RTRE 0x80
+#define MACE_UTR_RTRD 0x40
+#define MACE_UTR_RPA 0x20
+#define MACE_UTR_FCOLL 0x10
+#define MACE_UTR_RCVFCSE 0x08
+#define MACE_UTR_LOOP_INCL_MENDEC 0x06
+#define MACE_UTR_LOOP_NO_MENDEC 0x04
+#define MACE_UTR_LOOP_EXTERNAL 0x02
+#define MACE_UTR_LOOP_NONE 0x00
+#define MACE_UTR_RESERVED 0x01
+
+/* Switch MACE register bank (only 0 and 1 are valid) */
+#define MACEBANK(win_num) outb((win_num), ioaddr + AM2150_MACE_BANK)
+
+#define MACE_IMR_DEFAULT \
+ (0xFF - \
+ ( \
+ MACE_IR_CERR | \
+ MACE_IR_RCVCCO | \
+ MACE_IR_RNTPCO | \
+ MACE_IR_MPCO | \
+ MACE_IR_RCVINT | \
+ MACE_IR_XMTINT \
+ ) \
+ )
+#undef MACE_IMR_DEFAULT
+#define MACE_IMR_DEFAULT 0x00 /* New statistics handling: grab everything */
+
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+/* ----------------------------------------------------------------------------
+Type Definitions
+---------------------------------------------------------------------------- */
+
+typedef struct _mace_statistics {
+ /* MACE_XMTFS */
+ int xmtsv;
+ int uflo;
+ int lcol;
+ int more;
+ int one;
+ int defer;
+ int lcar;
+ int rtry;
+
+ /* MACE_XMTRC */
+ int exdef;
+ int xmtrc;
+
+ /* RFS1--Receive Status (RCVSTS) */
+ int oflo;
+ int clsn;
+ int fram;
+ int fcs;
+
+ /* RFS2--Runt Packet Count (RNTPC) */
+ int rfs_rntpc;
+
+ /* RFS3--Receive Collision Count (RCVCC) */
+ int rfs_rcvcc;
+
+ /* MACE_IR */
+ int jab;
+ int babl;
+ int cerr;
+ int rcvcco;
+ int rntpco;
+ int mpco;
+
+ /* MACE_MPC */
+ int mpc;
+
+ /* MACE_RNTPC */
+ int rntpc;
+
+ /* MACE_RCVCC */
+ int rcvcc;
+} mace_statistics;
+
+typedef struct _mace_private {
+ struct pcmcia_device *p_dev;
+ struct net_device_stats linux_stats; /* Linux statistics counters */
+ mace_statistics mace_stats; /* MACE chip statistics counters */
+
+ /* restore_multicast_list() state variables */
+ int multicast_ladrf[MACE_LADRF_LEN]; /* Logical address filter */
+ int multicast_num_addrs;
+
+ char tx_free_frames; /* Number of free transmit frame buffers */
+ char tx_irq_disabled; /* MACE TX interrupt disabled */
+
+ spinlock_t bank_lock; /* Must be held if you step off bank 0 */
+} mace_private;
+
+/* ----------------------------------------------------------------------------
+Private Global Variables
+---------------------------------------------------------------------------- */
+
+static const char *if_names[]={
+ "Auto", "10baseT", "BNC",
+};
+
+/* ----------------------------------------------------------------------------
+Parameters
+ These are the parameters that can be set during loading with
+ 'insmod'.
+---------------------------------------------------------------------------- */
+
+MODULE_DESCRIPTION("New Media PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
+
+/* 0=auto, 1=10baseT, 2 = 10base2, default=auto */
+INT_MODULE_PARM(if_port, 0);
+
+
+/* ----------------------------------------------------------------------------
+Function Prototypes
+---------------------------------------------------------------------------- */
+
+static int nmclan_config(struct pcmcia_device *link);
+static void nmclan_release(struct pcmcia_device *link);
+
+static void nmclan_reset(struct net_device *dev);
+static int mace_config(struct net_device *dev, struct ifmap *map);
+static int mace_open(struct net_device *dev);
+static int mace_close(struct net_device *dev);
+static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static void mace_tx_timeout(struct net_device *dev);
+static irqreturn_t mace_interrupt(int irq, void *dev_id);
+static struct net_device_stats *mace_get_stats(struct net_device *dev);
+static int mace_rx(struct net_device *dev, unsigned char RxCnt);
+static void restore_multicast_list(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static const struct ethtool_ops netdev_ethtool_ops;
+
+
+static void nmclan_detach(struct pcmcia_device *p_dev);
+
+static const struct net_device_ops mace_netdev_ops = {
+ .ndo_open = mace_open,
+ .ndo_stop = mace_close,
+ .ndo_start_xmit = mace_start_xmit,
+ .ndo_tx_timeout = mace_tx_timeout,
+ .ndo_set_config = mace_config,
+ .ndo_get_stats = mace_get_stats,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int nmclan_probe(struct pcmcia_device *link)
+{
+ mace_private *lp;
+ struct net_device *dev;
+
+ dev_dbg(&link->dev, "nmclan_attach()\n");
+
+ /* Create new ethernet device */
+ dev = alloc_etherdev(sizeof(mace_private));
+ if (!dev)
+ return -ENOMEM;
+ lp = netdev_priv(dev);
+ lp->p_dev = link;
+ link->priv = dev;
+
+ spin_lock_init(&lp->bank_lock);
+ link->resource[0]->end = 32;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+ link->config_flags |= CONF_ENABLE_IRQ;
+ link->config_index = 1;
+ link->config_regs = PRESENT_OPTION;
+
+ lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
+
+ dev->netdev_ops = &mace_netdev_ops;
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ return nmclan_config(link);
+} /* nmclan_attach */
+
+static void nmclan_detach(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ dev_dbg(&link->dev, "nmclan_detach\n");
+
+ unregister_netdev(dev);
+
+ nmclan_release(link);
+
+ free_netdev(dev);
+} /* nmclan_detach */
+
+/* ----------------------------------------------------------------------------
+mace_read
+ Reads a MACE register. This is bank independent; however, the
+ caller must ensure that this call is not interruptable. We are
+ assuming that during normal operation, the MACE is always in
+ bank 0.
+---------------------------------------------------------------------------- */
+static int mace_read(mace_private *lp, unsigned int ioaddr, int reg)
+{
+ int data = 0xFF;
+ unsigned long flags;
+
+ switch (reg >> 4) {
+ case 0: /* register 0-15 */
+ data = inb(ioaddr + AM2150_MACE_BASE + reg);
+ break;
+ case 1: /* register 16-31 */
+ spin_lock_irqsave(&lp->bank_lock, flags);
+ MACEBANK(1);
+ data = inb(ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
+ MACEBANK(0);
+ spin_unlock_irqrestore(&lp->bank_lock, flags);
+ break;
+ }
+ return data & 0xFF;
+} /* mace_read */
+
+/* ----------------------------------------------------------------------------
+mace_write
+ Writes to a MACE register. This is bank independent; however,
+ the caller must ensure that this call is not interruptable. We
+ are assuming that during normal operation, the MACE is always in
+ bank 0.
+---------------------------------------------------------------------------- */
+static void mace_write(mace_private *lp, unsigned int ioaddr, int reg,
+ int data)
+{
+ unsigned long flags;
+
+ switch (reg >> 4) {
+ case 0: /* register 0-15 */
+ outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + reg);
+ break;
+ case 1: /* register 16-31 */
+ spin_lock_irqsave(&lp->bank_lock, flags);
+ MACEBANK(1);
+ outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
+ MACEBANK(0);
+ spin_unlock_irqrestore(&lp->bank_lock, flags);
+ break;
+ }
+} /* mace_write */
+
+/* ----------------------------------------------------------------------------
+mace_init
+ Resets the MACE chip.
+---------------------------------------------------------------------------- */
+static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
+{
+ int i;
+ int ct = 0;
+
+ /* MACE Software reset */
+ mace_write(lp, ioaddr, MACE_BIUCC, 1);
+ while (mace_read(lp, ioaddr, MACE_BIUCC) & 0x01) {
+ /* Wait for reset bit to be cleared automatically after <= 200ns */;
+ if(++ct > 500)
+ {
+ pr_err("reset failed, card removed?\n");
+ return -1;
+ }
+ udelay(1);
+ }
+ mace_write(lp, ioaddr, MACE_BIUCC, 0);
+
+ /* The Am2150 requires that the MACE FIFOs operate in burst mode. */
+ mace_write(lp, ioaddr, MACE_FIFOCC, 0x0F);
+
+ mace_write(lp,ioaddr, MACE_RCVFC, 0); /* Disable Auto Strip Receive */
+ mace_write(lp, ioaddr, MACE_IMR, 0xFF); /* Disable all interrupts until _open */
+
+ /*
+ * Bit 2-1 PORTSEL[1-0] Port Select.
+ * 00 AUI/10Base-2
+ * 01 10Base-T
+ * 10 DAI Port (reserved in Am2150)
+ * 11 GPSI
+ * For this card, only the first two are valid.
+ * So, PLSCC should be set to
+ * 0x00 for 10Base-2
+ * 0x02 for 10Base-T
+ * Or just set ASEL in PHYCC below!
+ */
+ switch (if_port) {
+ case 1:
+ mace_write(lp, ioaddr, MACE_PLSCC, 0x02);
+ break;
+ case 2:
+ mace_write(lp, ioaddr, MACE_PLSCC, 0x00);
+ break;
+ default:
+ mace_write(lp, ioaddr, MACE_PHYCC, /* ASEL */ 4);
+ /* ASEL Auto Select. When set, the PORTSEL[1-0] bits are overridden,
+ and the MACE device will automatically select the operating media
+ interface port. */
+ break;
+ }
+
+ mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_PHYADDR);
+ /* Poll ADDRCHG bit */
+ ct = 0;
+ while (mace_read(lp, ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG)
+ {
+ if(++ ct > 500)
+ {
+ pr_err("ADDRCHG timeout, card removed?\n");
+ return -1;
+ }
+ }
+ /* Set PADR register */
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ mace_write(lp, ioaddr, MACE_PADR, enet_addr[i]);
+
+ /* MAC Configuration Control Register should be written last */
+ /* Let set_multicast_list set this. */
+ /* mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); */
+ mace_write(lp, ioaddr, MACE_MACCC, 0x00);
+ return 0;
+} /* mace_init */
+
+static int nmclan_config(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ mace_private *lp = netdev_priv(dev);
+ u8 *buf;
+ size_t len;
+ int i, ret;
+ unsigned int ioaddr;
+
+ dev_dbg(&link->dev, "nmclan_config\n");
+
+ link->io_lines = 5;
+ ret = pcmcia_request_io(link);
+ if (ret)
+ goto failed;
+ ret = pcmcia_request_exclusive_irq(link, mace_interrupt);
+ if (ret)
+ goto failed;
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto failed;
+
+ dev->irq = link->irq;
+ dev->base_addr = link->resource[0]->start;
+
+ ioaddr = dev->base_addr;
+
+ /* Read the ethernet address from the CIS. */
+ len = pcmcia_get_tuple(link, 0x80, &buf);
+ if (!buf || len < ETHER_ADDR_LEN) {
+ kfree(buf);
+ goto failed;
+ }
+ memcpy(dev->dev_addr, buf, ETHER_ADDR_LEN);
+ kfree(buf);
+
+ /* Verify configuration by reading the MACE ID. */
+ {
+ char sig[2];
+
+ sig[0] = mace_read(lp, ioaddr, MACE_CHIPIDL);
+ sig[1] = mace_read(lp, ioaddr, MACE_CHIPIDH);
+ if ((sig[0] == 0x40) && ((sig[1] & 0x0F) == 0x09)) {
+ dev_dbg(&link->dev, "nmclan_cs configured: mace id=%x %x\n",
+ sig[0], sig[1]);
+ } else {
+ pr_notice("mace id not found: %x %x should be 0x40 0x?9\n",
+ sig[0], sig[1]);
+ return -ENODEV;
+ }
+ }
+
+ if(mace_init(lp, ioaddr, dev->dev_addr) == -1)
+ goto failed;
+
+ /* The if_port symbol can be set when the module is loaded */
+ if (if_port <= 2)
+ dev->if_port = if_port;
+ else
+ pr_notice("invalid if_port requested\n");
+
+ SET_NETDEV_DEV(dev, &link->dev);
+
+ i = register_netdev(dev);
+ if (i != 0) {
+ pr_notice("register_netdev() failed\n");
+ goto failed;
+ }
+
+ netdev_info(dev, "nmclan: port %#3lx, irq %d, %s port, hw_addr %pM\n",
+ dev->base_addr, dev->irq, if_names[dev->if_port], dev->dev_addr);
+ return 0;
+
+failed:
+ nmclan_release(link);
+ return -ENODEV;
+} /* nmclan_config */
+
+static void nmclan_release(struct pcmcia_device *link)
+{
+ dev_dbg(&link->dev, "nmclan_release\n");
+ pcmcia_disable_device(link);
+}
+
+static int nmclan_suspend(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ if (link->open)
+ netif_device_detach(dev);
+
+ return 0;
+}
+
+static int nmclan_resume(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ if (link->open) {
+ nmclan_reset(dev);
+ netif_device_attach(dev);
+ }
+
+ return 0;
+}
+
+
+/* ----------------------------------------------------------------------------
+nmclan_reset
+ Reset and restore all of the Xilinx and MACE registers.
+---------------------------------------------------------------------------- */
+static void nmclan_reset(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+
+#if RESET_XILINX
+ struct pcmcia_device *link = &lp->link;
+ u8 OrigCorValue;
+
+ /* Save original COR value */
+ pcmcia_read_config_byte(link, CISREG_COR, &OrigCorValue);
+
+ /* Reset Xilinx */
+ dev_dbg(&link->dev, "nmclan_reset: OrigCorValue=0x%x, resetting...\n",
+ OrigCorValue);
+ pcmcia_write_config_byte(link, CISREG_COR, COR_SOFT_RESET);
+ /* Need to wait for 20 ms for PCMCIA to finish reset. */
+
+ /* Restore original COR configuration index */
+ pcmcia_write_config_byte(link, CISREG_COR,
+ (COR_LEVEL_REQ | (OrigCorValue & COR_CONFIG_MASK)));
+ /* Xilinx is now completely reset along with the MACE chip. */
+ lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
+
+#endif /* #if RESET_XILINX */
+
+ /* Xilinx is now completely reset along with the MACE chip. */
+ lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
+
+ /* Reinitialize the MACE chip for operation. */
+ mace_init(lp, dev->base_addr, dev->dev_addr);
+ mace_write(lp, dev->base_addr, MACE_IMR, MACE_IMR_DEFAULT);
+
+ /* Restore the multicast list and enable TX and RX. */
+ restore_multicast_list(dev);
+} /* nmclan_reset */
+
+/* ----------------------------------------------------------------------------
+mace_config
+ [Someone tell me what this is supposed to do? Is if_port a defined
+ standard? If so, there should be defines to indicate 1=10Base-T,
+ 2=10Base-2, etc. including limited automatic detection.]
+---------------------------------------------------------------------------- */
+static int mace_config(struct net_device *dev, struct ifmap *map)
+{
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (map->port <= 2) {
+ dev->if_port = map->port;
+ netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
+ } else
+ return -EINVAL;
+ }
+ return 0;
+} /* mace_config */
+
+/* ----------------------------------------------------------------------------
+mace_open
+ Open device driver.
+---------------------------------------------------------------------------- */
+static int mace_open(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ mace_private *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
+
+ if (!pcmcia_dev_present(link))
+ return -ENODEV;
+
+ link->open++;
+
+ MACEBANK(0);
+
+ netif_start_queue(dev);
+ nmclan_reset(dev);
+
+ return 0; /* Always succeed */
+} /* mace_open */
+
+/* ----------------------------------------------------------------------------
+mace_close
+ Closes device driver.
+---------------------------------------------------------------------------- */
+static int mace_close(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ mace_private *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
+
+ dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
+
+ /* Mask off all interrupts from the MACE chip. */
+ outb(0xFF, ioaddr + AM2150_MACE_BASE + MACE_IMR);
+
+ link->open--;
+ netif_stop_queue(dev);
+
+ return 0;
+} /* mace_close */
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+};
+
+/* ----------------------------------------------------------------------------
+mace_start_xmit
+ This routine begins the packet transmit function. When completed,
+ it will generate a transmit interrupt.
+
+ According to /usr/src/linux/net/inet/dev.c, if _start_xmit
+ returns 0, the "packet is now solely the responsibility of the
+ driver." If _start_xmit returns non-zero, the "transmission
+ failed, put skb back into a list."
+---------------------------------------------------------------------------- */
+
+static void mace_tx_timeout(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
+
+ netdev_notice(dev, "transmit timed out -- ");
+#if RESET_ON_TIMEOUT
+ pr_cont("resetting card\n");
+ pcmcia_reset_card(link->socket);
+#else /* #if RESET_ON_TIMEOUT */
+ pr_cont("NOT resetting card\n");
+#endif /* #if RESET_ON_TIMEOUT */
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+
+static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+
+ netif_stop_queue(dev);
+
+ pr_debug("%s: mace_start_xmit(length = %ld) called.\n",
+ dev->name, (long)skb->len);
+
+#if (!TX_INTERRUPTABLE)
+ /* Disable MACE TX interrupts. */
+ outb(MACE_IMR_DEFAULT | MACE_IR_XMTINT,
+ ioaddr + AM2150_MACE_BASE + MACE_IMR);
+ lp->tx_irq_disabled=1;
+#endif /* #if (!TX_INTERRUPTABLE) */
+
+ {
+ /* This block must not be interrupted by another transmit request!
+ mace_tx_timeout will take care of timer-based retransmissions from
+ the upper layers. The interrupt handler is guaranteed never to
+ service a transmit interrupt while we are in here.
+ */
+
+ lp->linux_stats.tx_bytes += skb->len;
+ lp->tx_free_frames--;
+
+ /* WARNING: Write the _exact_ number of bytes written in the header! */
+ /* Put out the word header [must be an outw()] . . . */
+ outw(skb->len, ioaddr + AM2150_XMT);
+ /* . . . and the packet [may be any combination of outw() and outb()] */
+ outsw(ioaddr + AM2150_XMT, skb->data, skb->len >> 1);
+ if (skb->len & 1) {
+ /* Odd byte transfer */
+ outb(skb->data[skb->len-1], ioaddr + AM2150_XMT);
+ }
+
+#if MULTI_TX
+ if (lp->tx_free_frames > 0)
+ netif_start_queue(dev);
+#endif /* #if MULTI_TX */
+ }
+
+#if (!TX_INTERRUPTABLE)
+ /* Re-enable MACE TX interrupts. */
+ lp->tx_irq_disabled=0;
+ outb(MACE_IMR_DEFAULT, ioaddr + AM2150_MACE_BASE + MACE_IMR);
+#endif /* #if (!TX_INTERRUPTABLE) */
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+} /* mace_start_xmit */
+
+/* ----------------------------------------------------------------------------
+mace_interrupt
+ The interrupt handler.
+---------------------------------------------------------------------------- */
+static irqreturn_t mace_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ mace_private *lp = netdev_priv(dev);
+ unsigned int ioaddr;
+ int status;
+ int IntrCnt = MACE_MAX_IR_ITERATIONS;
+
+ if (dev == NULL) {
+ pr_debug("mace_interrupt(): irq 0x%X for unknown device.\n",
+ irq);
+ return IRQ_NONE;
+ }
+
+ ioaddr = dev->base_addr;
+
+ if (lp->tx_irq_disabled) {
+ const char *msg;
+ if (lp->tx_irq_disabled)
+ msg = "Interrupt with tx_irq_disabled";
+ else
+ msg = "Re-entering the interrupt handler";
+ netdev_notice(dev, "%s [isr=%02X, imr=%02X]\n",
+ msg,
+ inb(ioaddr + AM2150_MACE_BASE + MACE_IR),
+ inb(ioaddr + AM2150_MACE_BASE + MACE_IMR));
+ /* WARNING: MACE_IR has been read! */
+ return IRQ_NONE;
+ }
+
+ if (!netif_device_present(dev)) {
+ netdev_dbg(dev, "interrupt from dead card\n");
+ return IRQ_NONE;
+ }
+
+ do {
+ /* WARNING: MACE_IR is a READ/CLEAR port! */
+ status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR);
+
+ pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status);
+
+ if (status & MACE_IR_RCVINT) {
+ mace_rx(dev, MACE_MAX_RX_ITERATIONS);
+ }
+
+ if (status & MACE_IR_XMTINT) {
+ unsigned char fifofc;
+ unsigned char xmtrc;
+ unsigned char xmtfs;
+
+ fifofc = inb(ioaddr + AM2150_MACE_BASE + MACE_FIFOFC);
+ if ((fifofc & MACE_FIFOFC_XMTFC)==0) {
+ lp->linux_stats.tx_errors++;
+ outb(0xFF, ioaddr + AM2150_XMT_SKIP);
+ }
+
+ /* Transmit Retry Count (XMTRC, reg 4) */
+ xmtrc = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTRC);
+ if (xmtrc & MACE_XMTRC_EXDEF) lp->mace_stats.exdef++;
+ lp->mace_stats.xmtrc += (xmtrc & MACE_XMTRC_XMTRC);
+
+ if (
+ (xmtfs = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTFS)) &
+ MACE_XMTFS_XMTSV /* Transmit Status Valid */
+ ) {
+ lp->mace_stats.xmtsv++;
+
+ if (xmtfs & ~MACE_XMTFS_XMTSV) {
+ if (xmtfs & MACE_XMTFS_UFLO) {
+ /* Underflow. Indicates that the Transmit FIFO emptied before
+ the end of frame was reached. */
+ lp->mace_stats.uflo++;
+ }
+ if (xmtfs & MACE_XMTFS_LCOL) {
+ /* Late Collision */
+ lp->mace_stats.lcol++;
+ }
+ if (xmtfs & MACE_XMTFS_MORE) {
+ /* MORE than one retry was needed */
+ lp->mace_stats.more++;
+ }
+ if (xmtfs & MACE_XMTFS_ONE) {
+ /* Exactly ONE retry occurred */
+ lp->mace_stats.one++;
+ }
+ if (xmtfs & MACE_XMTFS_DEFER) {
+ /* Transmission was defered */
+ lp->mace_stats.defer++;
+ }
+ if (xmtfs & MACE_XMTFS_LCAR) {
+ /* Loss of carrier */
+ lp->mace_stats.lcar++;
+ }
+ if (xmtfs & MACE_XMTFS_RTRY) {
+ /* Retry error: transmit aborted after 16 attempts */
+ lp->mace_stats.rtry++;
+ }
+ } /* if (xmtfs & ~MACE_XMTFS_XMTSV) */
+
+ } /* if (xmtfs & MACE_XMTFS_XMTSV) */
+
+ lp->linux_stats.tx_packets++;
+ lp->tx_free_frames++;
+ netif_wake_queue(dev);
+ } /* if (status & MACE_IR_XMTINT) */
+
+ if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) {
+ if (status & MACE_IR_JAB) {
+ /* Jabber Error. Excessive transmit duration (20-150ms). */
+ lp->mace_stats.jab++;
+ }
+ if (status & MACE_IR_BABL) {
+ /* Babble Error. >1518 bytes transmitted. */
+ lp->mace_stats.babl++;
+ }
+ if (status & MACE_IR_CERR) {
+ /* Collision Error. CERR indicates the absence of the
+ Signal Quality Error Test message after a packet
+ transmission. */
+ lp->mace_stats.cerr++;
+ }
+ if (status & MACE_IR_RCVCCO) {
+ /* Receive Collision Count Overflow; */
+ lp->mace_stats.rcvcco++;
+ }
+ if (status & MACE_IR_RNTPCO) {
+ /* Runt Packet Count Overflow */
+ lp->mace_stats.rntpco++;
+ }
+ if (status & MACE_IR_MPCO) {
+ /* Missed Packet Count Overflow */
+ lp->mace_stats.mpco++;
+ }
+ } /* if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) */
+
+ } while ((status & ~MACE_IMR_DEFAULT) && (--IntrCnt));
+
+ return IRQ_HANDLED;
+} /* mace_interrupt */
+
+/* ----------------------------------------------------------------------------
+mace_rx
+ Receives packets.
+---------------------------------------------------------------------------- */
+static int mace_rx(struct net_device *dev, unsigned char RxCnt)
+{
+ mace_private *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ unsigned char rx_framecnt;
+ unsigned short rx_status;
+
+ while (
+ ((rx_framecnt = inb(ioaddr + AM2150_RCV_FRAME_COUNT)) > 0) &&
+ (rx_framecnt <= 12) && /* rx_framecnt==0xFF if card is extracted. */
+ (RxCnt--)
+ ) {
+ rx_status = inw(ioaddr + AM2150_RCV);
+
+ pr_debug("%s: in mace_rx(), framecnt 0x%X, rx_status"
+ " 0x%X.\n", dev->name, rx_framecnt, rx_status);
+
+ if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */
+ lp->linux_stats.rx_errors++;
+ if (rx_status & MACE_RCVFS_OFLO) {
+ lp->mace_stats.oflo++;
+ }
+ if (rx_status & MACE_RCVFS_CLSN) {
+ lp->mace_stats.clsn++;
+ }
+ if (rx_status & MACE_RCVFS_FRAM) {
+ lp->mace_stats.fram++;
+ }
+ if (rx_status & MACE_RCVFS_FCS) {
+ lp->mace_stats.fcs++;
+ }
+ } else {
+ short pkt_len = (rx_status & ~MACE_RCVFS_RCVSTS) - 4;
+ /* Auto Strip is off, always subtract 4 */
+ struct sk_buff *skb;
+
+ lp->mace_stats.rfs_rntpc += inb(ioaddr + AM2150_RCV);
+ /* runt packet count */
+ lp->mace_stats.rfs_rcvcc += inb(ioaddr + AM2150_RCV);
+ /* rcv collision count */
+
+ pr_debug(" receiving packet size 0x%X rx_status"
+ " 0x%X.\n", pkt_len, rx_status);
+
+ skb = dev_alloc_skb(pkt_len+2);
+
+ if (skb != NULL) {
+ skb_reserve(skb, 2);
+ insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1);
+ if (pkt_len & 1)
+ *(skb_tail_pointer(skb) - 1) = inb(ioaddr + AM2150_RCV);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ netif_rx(skb); /* Send the packet to the upper (protocol) layers. */
+
+ lp->linux_stats.rx_packets++;
+ lp->linux_stats.rx_bytes += pkt_len;
+ outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
+ continue;
+ } else {
+ pr_debug("%s: couldn't allocate a sk_buff of size"
+ " %d.\n", dev->name, pkt_len);
+ lp->linux_stats.rx_dropped++;
+ }
+ }
+ outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
+ } /* while */
+
+ return 0;
+} /* mace_rx */
+
+/* ----------------------------------------------------------------------------
+pr_linux_stats
+---------------------------------------------------------------------------- */
+static void pr_linux_stats(struct net_device_stats *pstats)
+{
+ pr_debug("pr_linux_stats\n");
+ pr_debug(" rx_packets=%-7ld tx_packets=%ld\n",
+ (long)pstats->rx_packets, (long)pstats->tx_packets);
+ pr_debug(" rx_errors=%-7ld tx_errors=%ld\n",
+ (long)pstats->rx_errors, (long)pstats->tx_errors);
+ pr_debug(" rx_dropped=%-7ld tx_dropped=%ld\n",
+ (long)pstats->rx_dropped, (long)pstats->tx_dropped);
+ pr_debug(" multicast=%-7ld collisions=%ld\n",
+ (long)pstats->multicast, (long)pstats->collisions);
+
+ pr_debug(" rx_length_errors=%-7ld rx_over_errors=%ld\n",
+ (long)pstats->rx_length_errors, (long)pstats->rx_over_errors);
+ pr_debug(" rx_crc_errors=%-7ld rx_frame_errors=%ld\n",
+ (long)pstats->rx_crc_errors, (long)pstats->rx_frame_errors);
+ pr_debug(" rx_fifo_errors=%-7ld rx_missed_errors=%ld\n",
+ (long)pstats->rx_fifo_errors, (long)pstats->rx_missed_errors);
+
+ pr_debug(" tx_aborted_errors=%-7ld tx_carrier_errors=%ld\n",
+ (long)pstats->tx_aborted_errors, (long)pstats->tx_carrier_errors);
+ pr_debug(" tx_fifo_errors=%-7ld tx_heartbeat_errors=%ld\n",
+ (long)pstats->tx_fifo_errors, (long)pstats->tx_heartbeat_errors);
+ pr_debug(" tx_window_errors=%ld\n",
+ (long)pstats->tx_window_errors);
+} /* pr_linux_stats */
+
+/* ----------------------------------------------------------------------------
+pr_mace_stats
+---------------------------------------------------------------------------- */
+static void pr_mace_stats(mace_statistics *pstats)
+{
+ pr_debug("pr_mace_stats\n");
+
+ pr_debug(" xmtsv=%-7d uflo=%d\n",
+ pstats->xmtsv, pstats->uflo);
+ pr_debug(" lcol=%-7d more=%d\n",
+ pstats->lcol, pstats->more);
+ pr_debug(" one=%-7d defer=%d\n",
+ pstats->one, pstats->defer);
+ pr_debug(" lcar=%-7d rtry=%d\n",
+ pstats->lcar, pstats->rtry);
+
+ /* MACE_XMTRC */
+ pr_debug(" exdef=%-7d xmtrc=%d\n",
+ pstats->exdef, pstats->xmtrc);
+
+ /* RFS1--Receive Status (RCVSTS) */
+ pr_debug(" oflo=%-7d clsn=%d\n",
+ pstats->oflo, pstats->clsn);
+ pr_debug(" fram=%-7d fcs=%d\n",
+ pstats->fram, pstats->fcs);
+
+ /* RFS2--Runt Packet Count (RNTPC) */
+ /* RFS3--Receive Collision Count (RCVCC) */
+ pr_debug(" rfs_rntpc=%-7d rfs_rcvcc=%d\n",
+ pstats->rfs_rntpc, pstats->rfs_rcvcc);
+
+ /* MACE_IR */
+ pr_debug(" jab=%-7d babl=%d\n",
+ pstats->jab, pstats->babl);
+ pr_debug(" cerr=%-7d rcvcco=%d\n",
+ pstats->cerr, pstats->rcvcco);
+ pr_debug(" rntpco=%-7d mpco=%d\n",
+ pstats->rntpco, pstats->mpco);
+
+ /* MACE_MPC */
+ pr_debug(" mpc=%d\n", pstats->mpc);
+
+ /* MACE_RNTPC */
+ pr_debug(" rntpc=%d\n", pstats->rntpc);
+
+ /* MACE_RCVCC */
+ pr_debug(" rcvcc=%d\n", pstats->rcvcc);
+
+} /* pr_mace_stats */
+
+/* ----------------------------------------------------------------------------
+update_stats
+ Update statistics. We change to register window 1, so this
+ should be run single-threaded if the device is active. This is
+ expected to be a rare operation, and it's simpler for the rest
+ of the driver to assume that window 0 is always valid rather
+ than use a special window-state variable.
+
+ oflo & uflo should _never_ occur since it would mean the Xilinx
+ was not able to transfer data between the MACE FIFO and the
+ card's SRAM fast enough. If this happens, something is
+ seriously wrong with the hardware.
+---------------------------------------------------------------------------- */
+static void update_stats(unsigned int ioaddr, struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+
+ lp->mace_stats.rcvcc += mace_read(lp, ioaddr, MACE_RCVCC);
+ lp->mace_stats.rntpc += mace_read(lp, ioaddr, MACE_RNTPC);
+ lp->mace_stats.mpc += mace_read(lp, ioaddr, MACE_MPC);
+ /* At this point, mace_stats is fully updated for this call.
+ We may now update the linux_stats. */
+
+ /* The MACE has no equivalent for linux_stats field which are commented
+ out. */
+
+ /* lp->linux_stats.multicast; */
+ lp->linux_stats.collisions =
+ lp->mace_stats.rcvcco * 256 + lp->mace_stats.rcvcc;
+ /* Collision: The MACE may retry sending a packet 15 times
+ before giving up. The retry count is in XMTRC.
+ Does each retry constitute a collision?
+ If so, why doesn't the RCVCC record these collisions? */
+
+ /* detailed rx_errors: */
+ lp->linux_stats.rx_length_errors =
+ lp->mace_stats.rntpco * 256 + lp->mace_stats.rntpc;
+ /* lp->linux_stats.rx_over_errors */
+ lp->linux_stats.rx_crc_errors = lp->mace_stats.fcs;
+ lp->linux_stats.rx_frame_errors = lp->mace_stats.fram;
+ lp->linux_stats.rx_fifo_errors = lp->mace_stats.oflo;
+ lp->linux_stats.rx_missed_errors =
+ lp->mace_stats.mpco * 256 + lp->mace_stats.mpc;
+
+ /* detailed tx_errors */
+ lp->linux_stats.tx_aborted_errors = lp->mace_stats.rtry;
+ lp->linux_stats.tx_carrier_errors = lp->mace_stats.lcar;
+ /* LCAR usually results from bad cabling. */
+ lp->linux_stats.tx_fifo_errors = lp->mace_stats.uflo;
+ lp->linux_stats.tx_heartbeat_errors = lp->mace_stats.cerr;
+ /* lp->linux_stats.tx_window_errors; */
+} /* update_stats */
+
+/* ----------------------------------------------------------------------------
+mace_get_stats
+ Gathers ethernet statistics from the MACE chip.
+---------------------------------------------------------------------------- */
+static struct net_device_stats *mace_get_stats(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+
+ update_stats(dev->base_addr, dev);
+
+ pr_debug("%s: updating the statistics.\n", dev->name);
+ pr_linux_stats(&lp->linux_stats);
+ pr_mace_stats(&lp->mace_stats);
+
+ return &lp->linux_stats;
+} /* net_device_stats */
+
+/* ----------------------------------------------------------------------------
+updateCRC
+ Modified from Am79C90 data sheet.
+---------------------------------------------------------------------------- */
+
+#ifdef BROKEN_MULTICAST
+
+static void updateCRC(int *CRC, int bit)
+{
+ static const int poly[]={
+ 1,1,1,0, 1,1,0,1,
+ 1,0,1,1, 1,0,0,0,
+ 1,0,0,0, 0,0,1,1,
+ 0,0,1,0, 0,0,0,0
+ }; /* CRC polynomial. poly[n] = coefficient of the x**n term of the
+ CRC generator polynomial. */
+
+ int j;
+
+ /* shift CRC and control bit (CRC[32]) */
+ for (j = 32; j > 0; j--)
+ CRC[j] = CRC[j-1];
+ CRC[0] = 0;
+
+ /* If bit XOR(control bit) = 1, set CRC = CRC XOR polynomial. */
+ if (bit ^ CRC[32])
+ for (j = 0; j < 32; j++)
+ CRC[j] ^= poly[j];
+} /* updateCRC */
+
+/* ----------------------------------------------------------------------------
+BuildLAF
+ Build logical address filter.
+ Modified from Am79C90 data sheet.
+
+Input
+ ladrf: logical address filter (contents initialized to 0)
+ adr: ethernet address
+---------------------------------------------------------------------------- */
+static void BuildLAF(int *ladrf, int *adr)
+{
+ int CRC[33]={1}; /* CRC register, 1 word/bit + extra control bit */
+
+ int i, byte; /* temporary array indices */
+ int hashcode; /* the output object */
+
+ CRC[32]=0;
+
+ for (byte = 0; byte < 6; byte++)
+ for (i = 0; i < 8; i++)
+ updateCRC(CRC, (adr[byte] >> i) & 1);
+
+ hashcode = 0;
+ for (i = 0; i < 6; i++)
+ hashcode = (hashcode << 1) + CRC[i];
+
+ byte = hashcode >> 3;
+ ladrf[byte] |= (1 << (hashcode & 7));
+
+#ifdef PCMCIA_DEBUG
+ if (0)
+ printk(KERN_DEBUG " adr =%pM\n", adr);
+ printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode);
+ for (i = 0; i < 8; i++)
+ pr_cont(" %02X", ladrf[i]);
+ pr_cont("\n");
+#endif
+} /* BuildLAF */
+
+/* ----------------------------------------------------------------------------
+restore_multicast_list
+ Restores the multicast filter for MACE chip to the last
+ set_multicast_list() call.
+
+Input
+ multicast_num_addrs
+ multicast_ladrf[]
+---------------------------------------------------------------------------- */
+static void restore_multicast_list(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+ int num_addrs = lp->multicast_num_addrs;
+ int *ladrf = lp->multicast_ladrf;
+ unsigned int ioaddr = dev->base_addr;
+ int i;
+
+ pr_debug("%s: restoring Rx mode to %d addresses.\n",
+ dev->name, num_addrs);
+
+ if (num_addrs > 0) {
+
+ pr_debug("Attempt to restore multicast list detected.\n");
+
+ mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_LOGADDR);
+ /* Poll ADDRCHG bit */
+ while (mace_read(lp, ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG)
+ ;
+ /* Set LADRF register */
+ for (i = 0; i < MACE_LADRF_LEN; i++)
+ mace_write(lp, ioaddr, MACE_LADRF, ladrf[i]);
+
+ mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_RCVFCSE | MACE_UTR_LOOP_EXTERNAL);
+ mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
+
+ } else if (num_addrs < 0) {
+
+ /* Promiscuous mode: receive all packets */
+ mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(lp, ioaddr, MACE_MACCC,
+ MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV
+ );
+
+ } else {
+
+ /* Normal mode */
+ mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
+
+ }
+} /* restore_multicast_list */
+
+/* ----------------------------------------------------------------------------
+set_multicast_list
+ Set or clear the multicast filter for this adaptor.
+
+Input
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+Output
+ multicast_num_addrs
+ multicast_ladrf[]
+---------------------------------------------------------------------------- */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+ int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */
+ struct netdev_hw_addr *ha;
+
+#ifdef PCMCIA_DEBUG
+ {
+ static int old;
+ if (netdev_mc_count(dev) != old) {
+ old = netdev_mc_count(dev);
+ pr_debug("%s: setting Rx mode to %d addresses.\n",
+ dev->name, old);
+ }
+ }
+#endif
+
+ /* Set multicast_num_addrs. */
+ lp->multicast_num_addrs = netdev_mc_count(dev);
+
+ /* Set multicast_ladrf. */
+ if (num_addrs > 0) {
+ /* Calculate multicast logical address filter */
+ memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(adr, ha->addr, ETHER_ADDR_LEN);
+ BuildLAF(lp->multicast_ladrf, adr);
+ }
+ }
+
+ restore_multicast_list(dev);
+
+} /* set_multicast_list */
+
+#endif /* BROKEN_MULTICAST */
+
+static void restore_multicast_list(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ mace_private *lp = netdev_priv(dev);
+
+ pr_debug("%s: restoring Rx mode to %d addresses.\n", dev->name,
+ lp->multicast_num_addrs);
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Promiscuous mode: receive all packets */
+ mace_write(lp,ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(lp, ioaddr, MACE_MACCC,
+ MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV
+ );
+ } else {
+ /* Normal mode */
+ mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
+ }
+} /* restore_multicast_list */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+
+#ifdef PCMCIA_DEBUG
+ {
+ static int old;
+ if (netdev_mc_count(dev) != old) {
+ old = netdev_mc_count(dev);
+ pr_debug("%s: setting Rx mode to %d addresses.\n",
+ dev->name, old);
+ }
+ }
+#endif
+
+ lp->multicast_num_addrs = netdev_mc_count(dev);
+ restore_multicast_list(dev);
+
+} /* set_multicast_list */
+
+static const struct pcmcia_device_id nmclan_ids[] = {
+ PCMCIA_DEVICE_PROD_ID12("New Media Corporation", "Ethernet", 0x085a850b, 0x00b2e941),
+ PCMCIA_DEVICE_PROD_ID12("Portable Add-ons", "Ethernet+", 0xebf1d60, 0xad673aaf),
+ PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, nmclan_ids);
+
+static struct pcmcia_driver nmclan_cs_driver = {
+ .owner = THIS_MODULE,
+ .name = "nmclan_cs",
+ .probe = nmclan_probe,
+ .remove = nmclan_detach,
+ .id_table = nmclan_ids,
+ .suspend = nmclan_suspend,
+ .resume = nmclan_resume,
+};
+
+static int __init init_nmclan_cs(void)
+{
+ return pcmcia_register_driver(&nmclan_cs_driver);
+}
+
+static void __exit exit_nmclan_cs(void)
+{
+ pcmcia_unregister_driver(&nmclan_cs_driver);
+}
+
+module_init(init_nmclan_cs);
+module_exit(exit_nmclan_cs);
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
new file mode 100644
index 000000000000..f92bc6e34828
--- /dev/null
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -0,0 +1,2937 @@
+/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */
+/*
+ * Copyright 1996-1999 Thomas Bogendoerfer
+ *
+ * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
+ *
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * This driver is for PCnet32 and PCnetPCI based ethercards
+ */
+/**************************************************************************
+ * 23 Oct, 2000.
+ * Fixed a few bugs, related to running the controller in 32bit mode.
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ *************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DRV_NAME "pcnet32"
+#define DRV_VERSION "1.35"
+#define DRV_RELDATE "21.Apr.2008"
+#define PFX DRV_NAME ": "
+
+static const char *const version =
+ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/moduleparam.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <asm/dma.h>
+#include <asm/irq.h>
+
+/*
+ * PCI device identifiers for "new style" Linux PCI Device Drivers
+ */
+static DEFINE_PCI_DEVICE_TABLE(pcnet32_pci_tbl) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
+
+ /*
+ * Adapters that were sold with IBM's RS/6000 or pSeries hardware have
+ * the incorrect vendor id.
+ */
+ { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE),
+ .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, },
+
+ { } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl);
+
+static int cards_found;
+
+/*
+ * VLB I/O addresses
+ */
+static unsigned int pcnet32_portlist[] =
+ { 0x300, 0x320, 0x340, 0x360, 0 };
+
+static int pcnet32_debug;
+static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
+static int pcnet32vlb; /* check for VLB cards ? */
+
+static struct net_device *pcnet32_dev;
+
+static int max_interrupt_work = 2;
+static int rx_copybreak = 200;
+
+#define PCNET32_PORT_AUI 0x00
+#define PCNET32_PORT_10BT 0x01
+#define PCNET32_PORT_GPSI 0x02
+#define PCNET32_PORT_MII 0x03
+
+#define PCNET32_PORT_PORTSEL 0x03
+#define PCNET32_PORT_ASEL 0x04
+#define PCNET32_PORT_100 0x40
+#define PCNET32_PORT_FD 0x80
+
+#define PCNET32_DMA_MASK 0xffffffff
+
+#define PCNET32_WATCHDOG_TIMEOUT (jiffies + (2 * HZ))
+#define PCNET32_BLINK_TIMEOUT (jiffies + (HZ/4))
+
+/*
+ * table to translate option values from tulip
+ * to internal options
+ */
+static const unsigned char options_mapping[] = {
+ PCNET32_PORT_ASEL, /* 0 Auto-select */
+ PCNET32_PORT_AUI, /* 1 BNC/AUI */
+ PCNET32_PORT_AUI, /* 2 AUI/BNC */
+ PCNET32_PORT_ASEL, /* 3 not supported */
+ PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */
+ PCNET32_PORT_ASEL, /* 5 not supported */
+ PCNET32_PORT_ASEL, /* 6 not supported */
+ PCNET32_PORT_ASEL, /* 7 not supported */
+ PCNET32_PORT_ASEL, /* 8 not supported */
+ PCNET32_PORT_MII, /* 9 MII 10baseT */
+ PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */
+ PCNET32_PORT_MII, /* 11 MII (autosel) */
+ PCNET32_PORT_10BT, /* 12 10BaseT */
+ PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */
+ /* 14 MII 100BaseTx-FD */
+ PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD,
+ PCNET32_PORT_ASEL /* 15 not supported */
+};
+
+static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Loopback test (offline)"
+};
+
+#define PCNET32_TEST_LEN ARRAY_SIZE(pcnet32_gstrings_test)
+
+#define PCNET32_NUM_REGS 136
+
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS];
+static int full_duplex[MAX_UNITS];
+static int homepna[MAX_UNITS];
+
+/*
+ * Theory of Operation
+ *
+ * This driver uses the same software structure as the normal lance
+ * driver. So look for a verbose description in lance.c. The differences
+ * to the normal lance driver is the use of the 32bit mode of PCnet32
+ * and PCnetPCI chips. Because these chips are 32bit chips, there is no
+ * 16MB limitation and we don't need bounce buffers.
+ */
+
+/*
+ * Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ * Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
+ * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
+ */
+#ifndef PCNET32_LOG_TX_BUFFERS
+#define PCNET32_LOG_TX_BUFFERS 4
+#define PCNET32_LOG_RX_BUFFERS 5
+#define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */
+#define PCNET32_LOG_MAX_RX_BUFFERS 9
+#endif
+
+#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS))
+#define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS))
+
+#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
+#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
+
+#define PKT_BUF_SKB 1544
+/* actual buffer length after being aligned */
+#define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN)
+/* chip wants twos complement of the (aligned) buffer length */
+#define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB)
+
+/* Offsets from base I/O address. */
+#define PCNET32_WIO_RDP 0x10
+#define PCNET32_WIO_RAP 0x12
+#define PCNET32_WIO_RESET 0x14
+#define PCNET32_WIO_BDP 0x16
+
+#define PCNET32_DWIO_RDP 0x10
+#define PCNET32_DWIO_RAP 0x14
+#define PCNET32_DWIO_RESET 0x18
+#define PCNET32_DWIO_BDP 0x1C
+
+#define PCNET32_TOTAL_SIZE 0x20
+
+#define CSR0 0
+#define CSR0_INIT 0x1
+#define CSR0_START 0x2
+#define CSR0_STOP 0x4
+#define CSR0_TXPOLL 0x8
+#define CSR0_INTEN 0x40
+#define CSR0_IDON 0x0100
+#define CSR0_NORMAL (CSR0_START | CSR0_INTEN)
+#define PCNET32_INIT_LOW 1
+#define PCNET32_INIT_HIGH 2
+#define CSR3 3
+#define CSR4 4
+#define CSR5 5
+#define CSR5_SUSPEND 0x0001
+#define CSR15 15
+#define PCNET32_MC_FILTER 8
+
+#define PCNET32_79C970A 0x2621
+
+/* The PCNET32 Rx and Tx ring descriptors. */
+struct pcnet32_rx_head {
+ __le32 base;
+ __le16 buf_length; /* two`s complement of length */
+ __le16 status;
+ __le32 msg_length;
+ __le32 reserved;
+};
+
+struct pcnet32_tx_head {
+ __le32 base;
+ __le16 length; /* two`s complement of length */
+ __le16 status;
+ __le32 misc;
+ __le32 reserved;
+};
+
+/* The PCNET32 32-Bit initialization block, described in databook. */
+struct pcnet32_init_block {
+ __le16 mode;
+ __le16 tlen_rlen;
+ u8 phys_addr[6];
+ __le16 reserved;
+ __le32 filter[2];
+ /* Receive and transmit ring base, along with extra bits. */
+ __le32 rx_ring;
+ __le32 tx_ring;
+};
+
+/* PCnet32 access functions */
+struct pcnet32_access {
+ u16 (*read_csr) (unsigned long, int);
+ void (*write_csr) (unsigned long, int, u16);
+ u16 (*read_bcr) (unsigned long, int);
+ void (*write_bcr) (unsigned long, int, u16);
+ u16 (*read_rap) (unsigned long);
+ void (*write_rap) (unsigned long, u16);
+ void (*reset) (unsigned long);
+};
+
+/*
+ * The first field of pcnet32_private is read by the ethernet device
+ * so the structure should be allocated using pci_alloc_consistent().
+ */
+struct pcnet32_private {
+ struct pcnet32_init_block *init_block;
+ /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
+ struct pcnet32_rx_head *rx_ring;
+ struct pcnet32_tx_head *tx_ring;
+ dma_addr_t init_dma_addr;/* DMA address of beginning of the init block,
+ returned by pci_alloc_consistent */
+ struct pci_dev *pci_dev;
+ const char *name;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff **tx_skbuff;
+ struct sk_buff **rx_skbuff;
+ dma_addr_t *tx_dma_addr;
+ dma_addr_t *rx_dma_addr;
+ const struct pcnet32_access *a;
+ spinlock_t lock; /* Guard lock */
+ unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int rx_ring_size; /* current rx ring size */
+ unsigned int tx_ring_size; /* current tx ring size */
+ unsigned int rx_mod_mask; /* rx ring modular mask */
+ unsigned int tx_mod_mask; /* tx ring modular mask */
+ unsigned short rx_len_bits;
+ unsigned short tx_len_bits;
+ dma_addr_t rx_ring_dma_addr;
+ dma_addr_t tx_ring_dma_addr;
+ unsigned int dirty_rx, /* ring entries to be freed. */
+ dirty_tx;
+
+ struct net_device *dev;
+ struct napi_struct napi;
+ char tx_full;
+ char phycount; /* number of phys found */
+ int options;
+ unsigned int shared_irq:1, /* shared irq possible */
+ dxsuflo:1, /* disable transmit stop on uflo */
+ mii:1; /* mii port available */
+ struct net_device *next;
+ struct mii_if_info mii_if;
+ struct timer_list watchdog_timer;
+ u32 msg_enable; /* debug message level */
+
+ /* each bit indicates an available PHY */
+ u32 phymask;
+ unsigned short chip_version; /* which variant this is */
+
+ /* saved registers during ethtool blink */
+ u16 save_regs[4];
+};
+
+static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
+static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
+static int pcnet32_open(struct net_device *);
+static int pcnet32_init_ring(struct net_device *);
+static netdev_tx_t pcnet32_start_xmit(struct sk_buff *,
+ struct net_device *);
+static void pcnet32_tx_timeout(struct net_device *dev);
+static irqreturn_t pcnet32_interrupt(int, void *);
+static int pcnet32_close(struct net_device *);
+static struct net_device_stats *pcnet32_get_stats(struct net_device *);
+static void pcnet32_load_multicast(struct net_device *dev);
+static void pcnet32_set_multicast_list(struct net_device *);
+static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
+static void pcnet32_watchdog(struct net_device *);
+static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
+static void mdio_write(struct net_device *dev, int phy_id, int reg_num,
+ int val);
+static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits);
+static void pcnet32_ethtool_test(struct net_device *dev,
+ struct ethtool_test *eth_test, u64 * data);
+static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1);
+static int pcnet32_get_regs_len(struct net_device *dev);
+static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *ptr);
+static void pcnet32_purge_tx_ring(struct net_device *dev);
+static int pcnet32_alloc_ring(struct net_device *dev, const char *name);
+static void pcnet32_free_ring(struct net_device *dev);
+static void pcnet32_check_media(struct net_device *dev, int verbose);
+
+static u16 pcnet32_wio_read_csr(unsigned long addr, int index)
+{
+ outw(index, addr + PCNET32_WIO_RAP);
+ return inw(addr + PCNET32_WIO_RDP);
+}
+
+static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val)
+{
+ outw(index, addr + PCNET32_WIO_RAP);
+ outw(val, addr + PCNET32_WIO_RDP);
+}
+
+static u16 pcnet32_wio_read_bcr(unsigned long addr, int index)
+{
+ outw(index, addr + PCNET32_WIO_RAP);
+ return inw(addr + PCNET32_WIO_BDP);
+}
+
+static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val)
+{
+ outw(index, addr + PCNET32_WIO_RAP);
+ outw(val, addr + PCNET32_WIO_BDP);
+}
+
+static u16 pcnet32_wio_read_rap(unsigned long addr)
+{
+ return inw(addr + PCNET32_WIO_RAP);
+}
+
+static void pcnet32_wio_write_rap(unsigned long addr, u16 val)
+{
+ outw(val, addr + PCNET32_WIO_RAP);
+}
+
+static void pcnet32_wio_reset(unsigned long addr)
+{
+ inw(addr + PCNET32_WIO_RESET);
+}
+
+static int pcnet32_wio_check(unsigned long addr)
+{
+ outw(88, addr + PCNET32_WIO_RAP);
+ return inw(addr + PCNET32_WIO_RAP) == 88;
+}
+
+static const struct pcnet32_access pcnet32_wio = {
+ .read_csr = pcnet32_wio_read_csr,
+ .write_csr = pcnet32_wio_write_csr,
+ .read_bcr = pcnet32_wio_read_bcr,
+ .write_bcr = pcnet32_wio_write_bcr,
+ .read_rap = pcnet32_wio_read_rap,
+ .write_rap = pcnet32_wio_write_rap,
+ .reset = pcnet32_wio_reset
+};
+
+static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
+{
+ outl(index, addr + PCNET32_DWIO_RAP);
+ return inl(addr + PCNET32_DWIO_RDP) & 0xffff;
+}
+
+static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
+{
+ outl(index, addr + PCNET32_DWIO_RAP);
+ outl(val, addr + PCNET32_DWIO_RDP);
+}
+
+static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
+{
+ outl(index, addr + PCNET32_DWIO_RAP);
+ return inl(addr + PCNET32_DWIO_BDP) & 0xffff;
+}
+
+static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
+{
+ outl(index, addr + PCNET32_DWIO_RAP);
+ outl(val, addr + PCNET32_DWIO_BDP);
+}
+
+static u16 pcnet32_dwio_read_rap(unsigned long addr)
+{
+ return inl(addr + PCNET32_DWIO_RAP) & 0xffff;
+}
+
+static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
+{
+ outl(val, addr + PCNET32_DWIO_RAP);
+}
+
+static void pcnet32_dwio_reset(unsigned long addr)
+{
+ inl(addr + PCNET32_DWIO_RESET);
+}
+
+static int pcnet32_dwio_check(unsigned long addr)
+{
+ outl(88, addr + PCNET32_DWIO_RAP);
+ return (inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88;
+}
+
+static const struct pcnet32_access pcnet32_dwio = {
+ .read_csr = pcnet32_dwio_read_csr,
+ .write_csr = pcnet32_dwio_write_csr,
+ .read_bcr = pcnet32_dwio_read_bcr,
+ .write_bcr = pcnet32_dwio_write_bcr,
+ .read_rap = pcnet32_dwio_read_rap,
+ .write_rap = pcnet32_dwio_write_rap,
+ .reset = pcnet32_dwio_reset
+};
+
+static void pcnet32_netif_stop(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ napi_disable(&lp->napi);
+ netif_tx_disable(dev);
+}
+
+static void pcnet32_netif_start(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ ulong ioaddr = dev->base_addr;
+ u16 val;
+
+ netif_wake_queue(dev);
+ val = lp->a->read_csr(ioaddr, CSR3);
+ val &= 0x00ff;
+ lp->a->write_csr(ioaddr, CSR3, val);
+ napi_enable(&lp->napi);
+}
+
+/*
+ * Allocate space for the new sized tx ring.
+ * Free old resources
+ * Save new resources.
+ * Any failure keeps old resources.
+ * Must be called with lp->lock held.
+ */
+static void pcnet32_realloc_tx_ring(struct net_device *dev,
+ struct pcnet32_private *lp,
+ unsigned int size)
+{
+ dma_addr_t new_ring_dma_addr;
+ dma_addr_t *new_dma_addr_list;
+ struct pcnet32_tx_head *new_tx_ring;
+ struct sk_buff **new_skb_list;
+
+ pcnet32_purge_tx_ring(dev);
+
+ new_tx_ring = pci_alloc_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_tx_head) *
+ (1 << size),
+ &new_ring_dma_addr);
+ if (new_tx_ring == NULL) {
+ netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
+ return;
+ }
+ memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
+
+ new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
+ GFP_ATOMIC);
+ if (!new_dma_addr_list) {
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
+ goto free_new_tx_ring;
+ }
+
+ new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
+ GFP_ATOMIC);
+ if (!new_skb_list) {
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
+ goto free_new_lists;
+ }
+
+ kfree(lp->tx_skbuff);
+ kfree(lp->tx_dma_addr);
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_tx_head) *
+ lp->tx_ring_size, lp->tx_ring,
+ lp->tx_ring_dma_addr);
+
+ lp->tx_ring_size = (1 << size);
+ lp->tx_mod_mask = lp->tx_ring_size - 1;
+ lp->tx_len_bits = (size << 12);
+ lp->tx_ring = new_tx_ring;
+ lp->tx_ring_dma_addr = new_ring_dma_addr;
+ lp->tx_dma_addr = new_dma_addr_list;
+ lp->tx_skbuff = new_skb_list;
+ return;
+
+free_new_lists:
+ kfree(new_dma_addr_list);
+free_new_tx_ring:
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_tx_head) *
+ (1 << size),
+ new_tx_ring,
+ new_ring_dma_addr);
+}
+
+/*
+ * Allocate space for the new sized rx ring.
+ * Re-use old receive buffers.
+ * alloc extra buffers
+ * free unneeded buffers
+ * free unneeded buffers
+ * Save new resources.
+ * Any failure keeps old resources.
+ * Must be called with lp->lock held.
+ */
+static void pcnet32_realloc_rx_ring(struct net_device *dev,
+ struct pcnet32_private *lp,
+ unsigned int size)
+{
+ dma_addr_t new_ring_dma_addr;
+ dma_addr_t *new_dma_addr_list;
+ struct pcnet32_rx_head *new_rx_ring;
+ struct sk_buff **new_skb_list;
+ int new, overlap;
+
+ new_rx_ring = pci_alloc_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_rx_head) *
+ (1 << size),
+ &new_ring_dma_addr);
+ if (new_rx_ring == NULL) {
+ netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
+ return;
+ }
+ memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
+
+ new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
+ GFP_ATOMIC);
+ if (!new_dma_addr_list) {
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
+ goto free_new_rx_ring;
+ }
+
+ new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
+ GFP_ATOMIC);
+ if (!new_skb_list) {
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
+ goto free_new_lists;
+ }
+
+ /* first copy the current receive buffers */
+ overlap = min(size, lp->rx_ring_size);
+ for (new = 0; new < overlap; new++) {
+ new_rx_ring[new] = lp->rx_ring[new];
+ new_dma_addr_list[new] = lp->rx_dma_addr[new];
+ new_skb_list[new] = lp->rx_skbuff[new];
+ }
+ /* now allocate any new buffers needed */
+ for (; new < size; new++) {
+ struct sk_buff *rx_skbuff;
+ new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB);
+ rx_skbuff = new_skb_list[new];
+ if (!rx_skbuff) {
+ /* keep the original lists and buffers */
+ netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n",
+ __func__);
+ goto free_all_new;
+ }
+ skb_reserve(rx_skbuff, NET_IP_ALIGN);
+
+ new_dma_addr_list[new] =
+ pci_map_single(lp->pci_dev, rx_skbuff->data,
+ PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
+ new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
+ new_rx_ring[new].status = cpu_to_le16(0x8000);
+ }
+ /* and free any unneeded buffers */
+ for (; new < lp->rx_ring_size; new++) {
+ if (lp->rx_skbuff[new]) {
+ pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
+ PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(lp->rx_skbuff[new]);
+ }
+ }
+
+ kfree(lp->rx_skbuff);
+ kfree(lp->rx_dma_addr);
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_rx_head) *
+ lp->rx_ring_size, lp->rx_ring,
+ lp->rx_ring_dma_addr);
+
+ lp->rx_ring_size = (1 << size);
+ lp->rx_mod_mask = lp->rx_ring_size - 1;
+ lp->rx_len_bits = (size << 4);
+ lp->rx_ring = new_rx_ring;
+ lp->rx_ring_dma_addr = new_ring_dma_addr;
+ lp->rx_dma_addr = new_dma_addr_list;
+ lp->rx_skbuff = new_skb_list;
+ return;
+
+free_all_new:
+ while (--new >= lp->rx_ring_size) {
+ if (new_skb_list[new]) {
+ pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
+ PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(new_skb_list[new]);
+ }
+ }
+ kfree(new_skb_list);
+free_new_lists:
+ kfree(new_dma_addr_list);
+free_new_rx_ring:
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_rx_head) *
+ (1 << size),
+ new_rx_ring,
+ new_ring_dma_addr);
+}
+
+static void pcnet32_purge_rx_ring(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int i;
+
+ /* free all allocated skbuffs */
+ for (i = 0; i < lp->rx_ring_size; i++) {
+ lp->rx_ring[i].status = 0; /* CPU owns buffer */
+ wmb(); /* Make sure adapter sees owner change */
+ if (lp->rx_skbuff[i]) {
+ pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
+ PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(lp->rx_skbuff[i]);
+ }
+ lp->rx_skbuff[i] = NULL;
+ lp->rx_dma_addr[i] = 0;
+ }
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void pcnet32_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ pcnet32_interrupt(0, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ int r = -EOPNOTSUPP;
+
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ mii_ethtool_gset(&lp->mii_if, cmd);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ r = 0;
+ }
+ return r;
+}
+
+static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ int r = -EOPNOTSUPP;
+
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ r = mii_ethtool_sset(&lp->mii_if, cmd);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return r;
+}
+
+static void pcnet32_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ if (lp->pci_dev)
+ strcpy(info->bus_info, pci_name(lp->pci_dev));
+ else
+ sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr);
+}
+
+static u32 pcnet32_get_link(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ int r;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ if (lp->mii) {
+ r = mii_link_ok(&lp->mii_if);
+ } else if (lp->chip_version >= PCNET32_79C970A) {
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
+ } else { /* can not detect link on really old chips */
+ r = 1;
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return r;
+}
+
+static u32 pcnet32_get_msglevel(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ return lp->msg_enable;
+}
+
+static void pcnet32_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ lp->msg_enable = value;
+}
+
+static int pcnet32_nway_reset(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ int r = -EOPNOTSUPP;
+
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ r = mii_nway_restart(&lp->mii_if);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return r;
+}
+
+static void pcnet32_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+
+ ering->tx_max_pending = TX_MAX_RING_SIZE;
+ ering->tx_pending = lp->tx_ring_size;
+ ering->rx_max_pending = RX_MAX_RING_SIZE;
+ ering->rx_pending = lp->rx_ring_size;
+}
+
+static int pcnet32_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ unsigned int size;
+ ulong ioaddr = dev->base_addr;
+ int i;
+
+ if (ering->rx_mini_pending || ering->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (netif_running(dev))
+ pcnet32_netif_stop(dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
+
+ size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
+
+ /* set the minimum ring size to 4, to allow the loopback test to work
+ * unchanged.
+ */
+ for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
+ if (size <= (1 << i))
+ break;
+ }
+ if ((1 << i) != lp->tx_ring_size)
+ pcnet32_realloc_tx_ring(dev, lp, i);
+
+ size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
+ for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
+ if (size <= (1 << i))
+ break;
+ }
+ if ((1 << i) != lp->rx_ring_size)
+ pcnet32_realloc_rx_ring(dev, lp, i);
+
+ lp->napi.weight = lp->rx_ring_size / 2;
+
+ if (netif_running(dev)) {
+ pcnet32_netif_start(dev);
+ pcnet32_restart(dev, CSR0_NORMAL);
+ }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
+ lp->rx_ring_size, lp->tx_ring_size);
+
+ return 0;
+}
+
+static void pcnet32_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
+{
+ memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
+}
+
+static int pcnet32_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return PCNET32_TEST_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void pcnet32_ethtool_test(struct net_device *dev,
+ struct ethtool_test *test, u64 * data)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int rc;
+
+ if (test->flags == ETH_TEST_FL_OFFLINE) {
+ rc = pcnet32_loopback_test(dev, data);
+ if (rc) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Loopback test failed\n");
+ test->flags |= ETH_TEST_FL_FAILED;
+ } else
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Loopback test passed\n");
+ } else
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "No tests to run (specify 'Offline' on ethtool)\n");
+} /* end pcnet32_ethtool_test */
+
+static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ const struct pcnet32_access *a = lp->a; /* access to registers */
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ struct sk_buff *skb; /* sk buff */
+ int x, i; /* counters */
+ int numbuffs = 4; /* number of TX/RX buffers and descs */
+ u16 status = 0x8300; /* TX ring status */
+ __le16 teststatus; /* test of ring status */
+ int rc; /* return code */
+ int size; /* size of packets */
+ unsigned char *packet; /* source packet data */
+ static const int data_len = 60; /* length of source packets */
+ unsigned long flags;
+ unsigned long ticks;
+
+ rc = 1; /* default to fail */
+
+ if (netif_running(dev))
+ pcnet32_netif_stop(dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
+
+ numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
+
+ /* Reset the PCNET32 */
+ lp->a->reset(ioaddr);
+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
+
+ /* switch pcnet32 to 32bit mode */
+ lp->a->write_bcr(ioaddr, 20, 2);
+
+ /* purge & init rings but don't actually restart */
+ pcnet32_restart(dev, 0x0000);
+
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
+
+ /* Initialize Transmit buffers. */
+ size = data_len + 15;
+ for (x = 0; x < numbuffs; x++) {
+ skb = dev_alloc_skb(size);
+ if (!skb) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Cannot allocate skb at line: %d!\n",
+ __LINE__);
+ goto clean_up;
+ }
+ packet = skb->data;
+ skb_put(skb, size); /* create space for data */
+ lp->tx_skbuff[x] = skb;
+ lp->tx_ring[x].length = cpu_to_le16(-skb->len);
+ lp->tx_ring[x].misc = 0;
+
+ /* put DA and SA into the skb */
+ for (i = 0; i < 6; i++)
+ *packet++ = dev->dev_addr[i];
+ for (i = 0; i < 6; i++)
+ *packet++ = dev->dev_addr[i];
+ /* type */
+ *packet++ = 0x08;
+ *packet++ = 0x06;
+ /* packet number */
+ *packet++ = x;
+ /* fill packet with data */
+ for (i = 0; i < data_len; i++)
+ *packet++ = i;
+
+ lp->tx_dma_addr[x] =
+ pci_map_single(lp->pci_dev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->tx_ring[x].status = cpu_to_le16(status);
+ }
+
+ x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */
+ a->write_bcr(ioaddr, 32, x | 0x0002);
+
+ /* set int loopback in CSR15 */
+ x = a->read_csr(ioaddr, CSR15) & 0xfffc;
+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
+
+ teststatus = cpu_to_le16(0x8000);
+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
+
+ /* Check status of descriptors */
+ for (x = 0; x < numbuffs; x++) {
+ ticks = 0;
+ rmb();
+ while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
+ spin_unlock_irqrestore(&lp->lock, flags);
+ msleep(1);
+ spin_lock_irqsave(&lp->lock, flags);
+ rmb();
+ ticks++;
+ }
+ if (ticks == 200) {
+ netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x);
+ break;
+ }
+ }
+
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
+ wmb();
+ if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
+ netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
+
+ for (x = 0; x < numbuffs; x++) {
+ netdev_printk(KERN_DEBUG, dev, "Packet %d: ", x);
+ skb = lp->rx_skbuff[x];
+ for (i = 0; i < size; i++)
+ pr_cont(" %02x", *(skb->data + i));
+ pr_cont("\n");
+ }
+ }
+
+ x = 0;
+ rc = 0;
+ while (x < numbuffs && !rc) {
+ skb = lp->rx_skbuff[x];
+ packet = lp->tx_skbuff[x]->data;
+ for (i = 0; i < size; i++) {
+ if (*(skb->data + i) != packet[i]) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Error in compare! %2x - %02x %02x\n",
+ i, *(skb->data + i), packet[i]);
+ rc = 1;
+ break;
+ }
+ }
+ x++;
+ }
+
+clean_up:
+ *data1 = rc;
+ pcnet32_purge_tx_ring(dev);
+
+ x = a->read_csr(ioaddr, CSR15);
+ a->write_csr(ioaddr, CSR15, (x & ~0x0044)); /* reset bits 6 and 2 */
+
+ x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
+ a->write_bcr(ioaddr, 32, (x & ~0x0002));
+
+ if (netif_running(dev)) {
+ pcnet32_netif_start(dev);
+ pcnet32_restart(dev, CSR0_NORMAL);
+ } else {
+ pcnet32_purge_rx_ring(dev);
+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return rc;
+} /* end pcnet32_loopback_test */
+
+static int pcnet32_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ const struct pcnet32_access *a = lp->a;
+ ulong ioaddr = dev->base_addr;
+ unsigned long flags;
+ int i;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ /* Save the current value of the bcrs */
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i = 4; i < 8; i++)
+ lp->save_regs[i - 4] = a->read_bcr(ioaddr, i);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return 2; /* cycle on/off twice per second */
+
+ case ETHTOOL_ID_ON:
+ case ETHTOOL_ID_OFF:
+ /* Blink the led */
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i = 4; i < 8; i++)
+ a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ /* Restore the original value of the bcrs */
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i = 4; i < 8; i++)
+ a->write_bcr(ioaddr, i, lp->save_regs[i - 4]);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return 0;
+}
+
+/*
+ * lp->lock must be held.
+ */
+static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
+ int can_sleep)
+{
+ int csr5;
+ struct pcnet32_private *lp = netdev_priv(dev);
+ const struct pcnet32_access *a = lp->a;
+ ulong ioaddr = dev->base_addr;
+ int ticks;
+
+ /* really old chips have to be stopped. */
+ if (lp->chip_version < PCNET32_79C970A)
+ return 0;
+
+ /* set SUSPEND (SPND) - CSR5 bit 0 */
+ csr5 = a->read_csr(ioaddr, CSR5);
+ a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND);
+
+ /* poll waiting for bit to be set */
+ ticks = 0;
+ while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) {
+ spin_unlock_irqrestore(&lp->lock, *flags);
+ if (can_sleep)
+ msleep(1);
+ else
+ mdelay(1);
+ spin_lock_irqsave(&lp->lock, *flags);
+ ticks++;
+ if (ticks > 200) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Error getting into suspend!\n");
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/*
+ * process one receive descriptor entry
+ */
+
+static void pcnet32_rx_entry(struct net_device *dev,
+ struct pcnet32_private *lp,
+ struct pcnet32_rx_head *rxp,
+ int entry)
+{
+ int status = (short)le16_to_cpu(rxp->status) >> 8;
+ int rx_in_place = 0;
+ struct sk_buff *skb;
+ short pkt_len;
+
+ if (status != 0x03) { /* There was an error. */
+ /*
+ * There is a tricky error noted by John Murphy,
+ * <murf@perftech.com> to Russ Nelson: Even with full-sized
+ * buffers it's possible for a jabber packet to use two
+ * buffers, with only the last correctly noting the error.
+ */
+ if (status & 0x01) /* Only count a general error at the */
+ dev->stats.rx_errors++; /* end of a packet. */
+ if (status & 0x20)
+ dev->stats.rx_frame_errors++;
+ if (status & 0x10)
+ dev->stats.rx_over_errors++;
+ if (status & 0x08)
+ dev->stats.rx_crc_errors++;
+ if (status & 0x04)
+ dev->stats.rx_fifo_errors++;
+ return;
+ }
+
+ pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4;
+
+ /* Discard oversize frames. */
+ if (unlikely(pkt_len > PKT_BUF_SIZE)) {
+ netif_err(lp, drv, dev, "Impossible packet size %d!\n",
+ pkt_len);
+ dev->stats.rx_errors++;
+ return;
+ }
+ if (pkt_len < 60) {
+ netif_err(lp, rx_err, dev, "Runt packet!\n");
+ dev->stats.rx_errors++;
+ return;
+ }
+
+ if (pkt_len > rx_copybreak) {
+ struct sk_buff *newskb;
+
+ newskb = dev_alloc_skb(PKT_BUF_SKB);
+ if (newskb) {
+ skb_reserve(newskb, NET_IP_ALIGN);
+ skb = lp->rx_skbuff[entry];
+ pci_unmap_single(lp->pci_dev,
+ lp->rx_dma_addr[entry],
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ skb_put(skb, pkt_len);
+ lp->rx_skbuff[entry] = newskb;
+ lp->rx_dma_addr[entry] =
+ pci_map_single(lp->pci_dev,
+ newskb->data,
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]);
+ rx_in_place = 1;
+ } else
+ skb = NULL;
+ } else
+ skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
+
+ if (skb == NULL) {
+ netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n");
+ dev->stats.rx_dropped++;
+ return;
+ }
+ if (!rx_in_place) {
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb_put(skb, pkt_len); /* Make room */
+ pci_dma_sync_single_for_cpu(lp->pci_dev,
+ lp->rx_dma_addr[entry],
+ pkt_len,
+ PCI_DMA_FROMDEVICE);
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)(lp->rx_skbuff[entry]->data),
+ pkt_len);
+ pci_dma_sync_single_for_device(lp->pci_dev,
+ lp->rx_dma_addr[entry],
+ pkt_len,
+ PCI_DMA_FROMDEVICE);
+ }
+ dev->stats.rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ dev->stats.rx_packets++;
+}
+
+static int pcnet32_rx(struct net_device *dev, int budget)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int entry = lp->cur_rx & lp->rx_mod_mask;
+ struct pcnet32_rx_head *rxp = &lp->rx_ring[entry];
+ int npackets = 0;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while (npackets < budget && (short)le16_to_cpu(rxp->status) >= 0) {
+ pcnet32_rx_entry(dev, lp, rxp, entry);
+ npackets += 1;
+ /*
+ * The docs say that the buffer length isn't touched, but Andrew
+ * Boyd of QNX reports that some revs of the 79C965 clear it.
+ */
+ rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE);
+ wmb(); /* Make sure owner changes after others are visible */
+ rxp->status = cpu_to_le16(0x8000);
+ entry = (++lp->cur_rx) & lp->rx_mod_mask;
+ rxp = &lp->rx_ring[entry];
+ }
+
+ return npackets;
+}
+
+static int pcnet32_tx(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned int dirty_tx = lp->dirty_tx;
+ int delta;
+ int must_restart = 0;
+
+ while (dirty_tx != lp->cur_tx) {
+ int entry = dirty_tx & lp->tx_mod_mask;
+ int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
+
+ if (status < 0)
+ break; /* It still hasn't been Txed */
+
+ lp->tx_ring[entry].base = 0;
+
+ if (status & 0x4000) {
+ /* There was a major error, log it. */
+ int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
+ dev->stats.tx_errors++;
+ netif_err(lp, tx_err, dev,
+ "Tx error status=%04x err_status=%08x\n",
+ status, err_status);
+ if (err_status & 0x04000000)
+ dev->stats.tx_aborted_errors++;
+ if (err_status & 0x08000000)
+ dev->stats.tx_carrier_errors++;
+ if (err_status & 0x10000000)
+ dev->stats.tx_window_errors++;
+#ifndef DO_DXSUFLO
+ if (err_status & 0x40000000) {
+ dev->stats.tx_fifo_errors++;
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ /* Remove this verbosity later! */
+ netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
+ must_restart = 1;
+ }
+#else
+ if (err_status & 0x40000000) {
+ dev->stats.tx_fifo_errors++;
+ if (!lp->dxsuflo) { /* If controller doesn't recover ... */
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ /* Remove this verbosity later! */
+ netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
+ must_restart = 1;
+ }
+ }
+#endif
+ } else {
+ if (status & 0x1800)
+ dev->stats.collisions++;
+ dev->stats.tx_packets++;
+ }
+
+ /* We must free the original skb */
+ if (lp->tx_skbuff[entry]) {
+ pci_unmap_single(lp->pci_dev,
+ lp->tx_dma_addr[entry],
+ lp->tx_skbuff[entry]->
+ len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(lp->tx_skbuff[entry]);
+ lp->tx_skbuff[entry] = NULL;
+ lp->tx_dma_addr[entry] = 0;
+ }
+ dirty_tx++;
+ }
+
+ delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
+ if (delta > lp->tx_ring_size) {
+ netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n",
+ dirty_tx, lp->cur_tx, lp->tx_full);
+ dirty_tx += lp->tx_ring_size;
+ delta -= lp->tx_ring_size;
+ }
+
+ if (lp->tx_full &&
+ netif_queue_stopped(dev) &&
+ delta < lp->tx_ring_size - 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+ lp->dirty_tx = dirty_tx;
+
+ return must_restart;
+}
+
+static int pcnet32_poll(struct napi_struct *napi, int budget)
+{
+ struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi);
+ struct net_device *dev = lp->dev;
+ unsigned long ioaddr = dev->base_addr;
+ unsigned long flags;
+ int work_done;
+ u16 val;
+
+ work_done = pcnet32_rx(dev, budget);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ if (pcnet32_tx(dev)) {
+ /* reset the chip to clear the error condition, then restart */
+ lp->a->reset(ioaddr);
+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
+ pcnet32_restart(dev, CSR0_START);
+ netif_wake_queue(dev);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ if (work_done < budget) {
+ spin_lock_irqsave(&lp->lock, flags);
+
+ __napi_complete(napi);
+
+ /* clear interrupt masks */
+ val = lp->a->read_csr(ioaddr, CSR3);
+ val &= 0x00ff;
+ lp->a->write_csr(ioaddr, CSR3, val);
+
+ /* Set interrupt enable. */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return work_done;
+}
+
+#define PCNET32_REGS_PER_PHY 32
+#define PCNET32_MAX_PHYS 32
+static int pcnet32_get_regs_len(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int j = lp->phycount * PCNET32_REGS_PER_PHY;
+
+ return (PCNET32_NUM_REGS + j) * sizeof(u16);
+}
+
+static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *ptr)
+{
+ int i, csr0;
+ u16 *buff = ptr;
+ struct pcnet32_private *lp = netdev_priv(dev);
+ const struct pcnet32_access *a = lp->a;
+ ulong ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ csr0 = a->read_csr(ioaddr, CSR0);
+ if (!(csr0 & CSR0_STOP)) /* If not stopped */
+ pcnet32_suspend(dev, &flags, 1);
+
+ /* read address PROM */
+ for (i = 0; i < 16; i += 2)
+ *buff++ = inw(ioaddr + i);
+
+ /* read control and status registers */
+ for (i = 0; i < 90; i++)
+ *buff++ = a->read_csr(ioaddr, i);
+
+ *buff++ = a->read_csr(ioaddr, 112);
+ *buff++ = a->read_csr(ioaddr, 114);
+
+ /* read bus configuration registers */
+ for (i = 0; i < 30; i++)
+ *buff++ = a->read_bcr(ioaddr, i);
+
+ *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */
+
+ for (i = 31; i < 36; i++)
+ *buff++ = a->read_bcr(ioaddr, i);
+
+ /* read mii phy registers */
+ if (lp->mii) {
+ int j;
+ for (j = 0; j < PCNET32_MAX_PHYS; j++) {
+ if (lp->phymask & (1 << j)) {
+ for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
+ lp->a->write_bcr(ioaddr, 33,
+ (j << 5) | i);
+ *buff++ = lp->a->read_bcr(ioaddr, 34);
+ }
+ }
+ }
+ }
+
+ if (!(csr0 & CSR0_STOP)) { /* If not stopped */
+ int csr5;
+
+ /* clear SUSPEND (SPND) - CSR5 bit 0 */
+ csr5 = a->read_csr(ioaddr, CSR5);
+ a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
+ }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+static const struct ethtool_ops pcnet32_ethtool_ops = {
+ .get_settings = pcnet32_get_settings,
+ .set_settings = pcnet32_set_settings,
+ .get_drvinfo = pcnet32_get_drvinfo,
+ .get_msglevel = pcnet32_get_msglevel,
+ .set_msglevel = pcnet32_set_msglevel,
+ .nway_reset = pcnet32_nway_reset,
+ .get_link = pcnet32_get_link,
+ .get_ringparam = pcnet32_get_ringparam,
+ .set_ringparam = pcnet32_set_ringparam,
+ .get_strings = pcnet32_get_strings,
+ .self_test = pcnet32_ethtool_test,
+ .set_phys_id = pcnet32_set_phys_id,
+ .get_regs_len = pcnet32_get_regs_len,
+ .get_regs = pcnet32_get_regs,
+ .get_sset_count = pcnet32_get_sset_count,
+};
+
+/* only probes for non-PCI devices, the rest are handled by
+ * pci_register_driver via pcnet32_probe_pci */
+
+static void __devinit pcnet32_probe_vlbus(unsigned int *pcnet32_portlist)
+{
+ unsigned int *port, ioaddr;
+
+ /* search for PCnet32 VLB cards at known addresses */
+ for (port = pcnet32_portlist; (ioaddr = *port); port++) {
+ if (request_region
+ (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) {
+ /* check if there is really a pcnet chip on that ioaddr */
+ if ((inb(ioaddr + 14) == 0x57) &&
+ (inb(ioaddr + 15) == 0x57)) {
+ pcnet32_probe1(ioaddr, 0, NULL);
+ } else {
+ release_region(ioaddr, PCNET32_TOTAL_SIZE);
+ }
+ }
+ }
+}
+
+static int __devinit
+pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ unsigned long ioaddr;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err < 0) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_err("failed to enable device -- err=%d\n", err);
+ return err;
+ }
+ pci_set_master(pdev);
+
+ ioaddr = pci_resource_start(pdev, 0);
+ if (!ioaddr) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_err("card has no PCI IO resources, aborting\n");
+ return -ENODEV;
+ }
+
+ if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_err("architecture does not support 32bit PCI busmaster DMA\n");
+ return -ENODEV;
+ }
+ if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_err("io address range already allocated\n");
+ return -EBUSY;
+ }
+
+ err = pcnet32_probe1(ioaddr, 1, pdev);
+ if (err < 0)
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static const struct net_device_ops pcnet32_netdev_ops = {
+ .ndo_open = pcnet32_open,
+ .ndo_stop = pcnet32_close,
+ .ndo_start_xmit = pcnet32_start_xmit,
+ .ndo_tx_timeout = pcnet32_tx_timeout,
+ .ndo_get_stats = pcnet32_get_stats,
+ .ndo_set_rx_mode = pcnet32_set_multicast_list,
+ .ndo_do_ioctl = pcnet32_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = pcnet32_poll_controller,
+#endif
+};
+
+/* pcnet32_probe1
+ * Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
+ * pdev will be NULL when called from pcnet32_probe_vlbus.
+ */
+static int __devinit
+pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
+{
+ struct pcnet32_private *lp;
+ int i, media;
+ int fdx, mii, fset, dxsuflo;
+ int chip_version;
+ char *chipname;
+ struct net_device *dev;
+ const struct pcnet32_access *a = NULL;
+ u8 promaddr[6];
+ int ret = -ENODEV;
+
+ /* reset the chip */
+ pcnet32_wio_reset(ioaddr);
+
+ /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */
+ if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
+ a = &pcnet32_wio;
+ } else {
+ pcnet32_dwio_reset(ioaddr);
+ if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 &&
+ pcnet32_dwio_check(ioaddr)) {
+ a = &pcnet32_dwio;
+ } else {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_err("No access methods\n");
+ goto err_release_region;
+ }
+ }
+
+ chip_version =
+ a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
+ if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
+ pr_info(" PCnet chip version is %#x\n", chip_version);
+ if ((chip_version & 0xfff) != 0x003) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_info("Unsupported chip version\n");
+ goto err_release_region;
+ }
+
+ /* initialize variables */
+ fdx = mii = fset = dxsuflo = 0;
+ chip_version = (chip_version >> 12) & 0xffff;
+
+ switch (chip_version) {
+ case 0x2420:
+ chipname = "PCnet/PCI 79C970"; /* PCI */
+ break;
+ case 0x2430:
+ if (shared)
+ chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */
+ else
+ chipname = "PCnet/32 79C965"; /* 486/VL bus */
+ break;
+ case 0x2621:
+ chipname = "PCnet/PCI II 79C970A"; /* PCI */
+ fdx = 1;
+ break;
+ case 0x2623:
+ chipname = "PCnet/FAST 79C971"; /* PCI */
+ fdx = 1;
+ mii = 1;
+ fset = 1;
+ break;
+ case 0x2624:
+ chipname = "PCnet/FAST+ 79C972"; /* PCI */
+ fdx = 1;
+ mii = 1;
+ fset = 1;
+ break;
+ case 0x2625:
+ chipname = "PCnet/FAST III 79C973"; /* PCI */
+ fdx = 1;
+ mii = 1;
+ break;
+ case 0x2626:
+ chipname = "PCnet/Home 79C978"; /* PCI */
+ fdx = 1;
+ /*
+ * This is based on specs published at www.amd.com. This section
+ * assumes that a card with a 79C978 wants to go into standard
+ * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode,
+ * and the module option homepna=1 can select this instead.
+ */
+ media = a->read_bcr(ioaddr, 49);
+ media &= ~3; /* default to 10Mb ethernet */
+ if (cards_found < MAX_UNITS && homepna[cards_found])
+ media |= 1; /* switch to home wiring mode */
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_DEBUG PFX "media set to %sMbit mode\n",
+ (media & 1) ? "1" : "10");
+ a->write_bcr(ioaddr, 49, media);
+ break;
+ case 0x2627:
+ chipname = "PCnet/FAST III 79C975"; /* PCI */
+ fdx = 1;
+ mii = 1;
+ break;
+ case 0x2628:
+ chipname = "PCnet/PRO 79C976";
+ fdx = 1;
+ mii = 1;
+ break;
+ default:
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_info("PCnet version %#x, no PCnet32 chip\n",
+ chip_version);
+ goto err_release_region;
+ }
+
+ /*
+ * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
+ * starting until the packet is loaded. Strike one for reliability, lose
+ * one for latency - although on PCI this isn't a big loss. Older chips
+ * have FIFO's smaller than a packet, so you can't do this.
+ * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
+ */
+
+ if (fset) {
+ a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860));
+ a->write_csr(ioaddr, 80,
+ (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
+ dxsuflo = 1;
+ }
+
+ dev = alloc_etherdev(sizeof(*lp));
+ if (!dev) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_err("Memory allocation failed\n");
+ ret = -ENOMEM;
+ goto err_release_region;
+ }
+
+ if (pdev)
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_info("%s at %#3lx,", chipname, ioaddr);
+
+ /* In most chips, after a chip reset, the ethernet address is read from the
+ * station address PROM at the base address and programmed into the
+ * "Physical Address Registers" CSR12-14.
+ * As a precautionary measure, we read the PROM values and complain if
+ * they disagree with the CSRs. If they miscompare, and the PROM addr
+ * is valid, then the PROM addr is used.
+ */
+ for (i = 0; i < 3; i++) {
+ unsigned int val;
+ val = a->read_csr(ioaddr, i + 12) & 0x0ffff;
+ /* There may be endianness issues here. */
+ dev->dev_addr[2 * i] = val & 0x0ff;
+ dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff;
+ }
+
+ /* read PROM address and compare with CSR address */
+ for (i = 0; i < 6; i++)
+ promaddr[i] = inb(ioaddr + i);
+
+ if (memcmp(promaddr, dev->dev_addr, 6) ||
+ !is_valid_ether_addr(dev->dev_addr)) {
+ if (is_valid_ether_addr(promaddr)) {
+ if (pcnet32_debug & NETIF_MSG_PROBE) {
+ pr_cont(" warning: CSR address invalid,\n");
+ pr_info(" using instead PROM address of");
+ }
+ memcpy(dev->dev_addr, promaddr, 6);
+ }
+ }
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
+ if (!is_valid_ether_addr(dev->perm_addr))
+ memset(dev->dev_addr, 0, ETH_ALEN);
+
+ if (pcnet32_debug & NETIF_MSG_PROBE) {
+ pr_cont(" %pM", dev->dev_addr);
+
+ /* Version 0x2623 and 0x2624 */
+ if (((chip_version + 1) & 0xfffe) == 0x2624) {
+ i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
+ pr_info(" tx_start_pt(0x%04x):", i);
+ switch (i >> 10) {
+ case 0:
+ pr_cont(" 20 bytes,");
+ break;
+ case 1:
+ pr_cont(" 64 bytes,");
+ break;
+ case 2:
+ pr_cont(" 128 bytes,");
+ break;
+ case 3:
+ pr_cont("~220 bytes,");
+ break;
+ }
+ i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
+ pr_cont(" BCR18(%x):", i & 0xffff);
+ if (i & (1 << 5))
+ pr_cont("BurstWrEn ");
+ if (i & (1 << 6))
+ pr_cont("BurstRdEn ");
+ if (i & (1 << 7))
+ pr_cont("DWordIO ");
+ if (i & (1 << 11))
+ pr_cont("NoUFlow ");
+ i = a->read_bcr(ioaddr, 25);
+ pr_info(" SRAMSIZE=0x%04x,", i << 8);
+ i = a->read_bcr(ioaddr, 26);
+ pr_cont(" SRAM_BND=0x%04x,", i << 8);
+ i = a->read_bcr(ioaddr, 27);
+ if (i & (1 << 14))
+ pr_cont("LowLatRx");
+ }
+ }
+
+ dev->base_addr = ioaddr;
+ lp = netdev_priv(dev);
+ /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
+ lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block),
+ &lp->init_dma_addr);
+ if (!lp->init_block) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_err("Consistent memory allocation failed\n");
+ ret = -ENOMEM;
+ goto err_free_netdev;
+ }
+ lp->pci_dev = pdev;
+
+ lp->dev = dev;
+
+ spin_lock_init(&lp->lock);
+
+ lp->name = chipname;
+ lp->shared_irq = shared;
+ lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
+ lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */
+ lp->tx_mod_mask = lp->tx_ring_size - 1;
+ lp->rx_mod_mask = lp->rx_ring_size - 1;
+ lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
+ lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
+ lp->mii_if.full_duplex = fdx;
+ lp->mii_if.phy_id_mask = 0x1f;
+ lp->mii_if.reg_num_mask = 0x1f;
+ lp->dxsuflo = dxsuflo;
+ lp->mii = mii;
+ lp->chip_version = chip_version;
+ lp->msg_enable = pcnet32_debug;
+ if ((cards_found >= MAX_UNITS) ||
+ (options[cards_found] >= sizeof(options_mapping)))
+ lp->options = PCNET32_PORT_ASEL;
+ else
+ lp->options = options_mapping[options[cards_found]];
+ lp->mii_if.dev = dev;
+ lp->mii_if.mdio_read = mdio_read;
+ lp->mii_if.mdio_write = mdio_write;
+
+ /* napi.weight is used in both the napi and non-napi cases */
+ lp->napi.weight = lp->rx_ring_size / 2;
+
+ netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2);
+
+ if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
+ ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
+ lp->options |= PCNET32_PORT_FD;
+
+ lp->a = a;
+
+ /* prior to register_netdev, dev->name is not yet correct */
+ if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
+ ret = -ENOMEM;
+ goto err_free_ring;
+ }
+ /* detect special T1/E1 WAN card by checking for MAC address */
+ if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 &&
+ dev->dev_addr[2] == 0x75)
+ lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
+
+ lp->init_block->mode = cpu_to_le16(0x0003); /* Disable Rx and Tx. */
+ lp->init_block->tlen_rlen =
+ cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits);
+ for (i = 0; i < 6; i++)
+ lp->init_block->phys_addr[i] = dev->dev_addr[i];
+ lp->init_block->filter[0] = 0x00000000;
+ lp->init_block->filter[1] = 0x00000000;
+ lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
+ lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
+
+ /* switch pcnet32 to 32bit mode */
+ a->write_bcr(ioaddr, 20, 2);
+
+ a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
+ a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
+
+ if (pdev) { /* use the IRQ provided by PCI */
+ dev->irq = pdev->irq;
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_cont(" assigned IRQ %d\n", dev->irq);
+ } else {
+ unsigned long irq_mask = probe_irq_on();
+
+ /*
+ * To auto-IRQ we enable the initialization-done and DMA error
+ * interrupts. For ISA boards we get a DMA error, but VLB and PCI
+ * boards will work.
+ */
+ /* Trigger an initialization just for the interrupt. */
+ a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_INIT);
+ mdelay(1);
+
+ dev->irq = probe_irq_off(irq_mask);
+ if (!dev->irq) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_cont(", failed to detect IRQ line\n");
+ ret = -ENODEV;
+ goto err_free_ring;
+ }
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_cont(", probed IRQ %d\n", dev->irq);
+ }
+
+ /* Set the mii phy_id so that we can query the link state */
+ if (lp->mii) {
+ /* lp->phycount and lp->phymask are set to 0 by memset above */
+
+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
+ /* scan for PHYs */
+ for (i = 0; i < PCNET32_MAX_PHYS; i++) {
+ unsigned short id1, id2;
+
+ id1 = mdio_read(dev, i, MII_PHYSID1);
+ if (id1 == 0xffff)
+ continue;
+ id2 = mdio_read(dev, i, MII_PHYSID2);
+ if (id2 == 0xffff)
+ continue;
+ if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624)
+ continue; /* 79C971 & 79C972 have phantom phy at id 31 */
+ lp->phycount++;
+ lp->phymask |= (1 << i);
+ lp->mii_if.phy_id = i;
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_info("Found PHY %04x:%04x at address %d\n",
+ id1, id2, i);
+ }
+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
+ if (lp->phycount > 1)
+ lp->options |= PCNET32_PORT_MII;
+ }
+
+ init_timer(&lp->watchdog_timer);
+ lp->watchdog_timer.data = (unsigned long)dev;
+ lp->watchdog_timer.function = (void *)&pcnet32_watchdog;
+
+ /* The PCNET32-specific entries in the device structure. */
+ dev->netdev_ops = &pcnet32_netdev_ops;
+ dev->ethtool_ops = &pcnet32_ethtool_ops;
+ dev->watchdog_timeo = (5 * HZ);
+
+ /* Fill in the generic fields of the device structure. */
+ if (register_netdev(dev))
+ goto err_free_ring;
+
+ if (pdev) {
+ pci_set_drvdata(pdev, dev);
+ } else {
+ lp->next = pcnet32_dev;
+ pcnet32_dev = dev;
+ }
+
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_info("%s: registered as %s\n", dev->name, lp->name);
+ cards_found++;
+
+ /* enable LED writes */
+ a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000);
+
+ return 0;
+
+err_free_ring:
+ pcnet32_free_ring(dev);
+ pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
+ lp->init_block, lp->init_dma_addr);
+err_free_netdev:
+ free_netdev(dev);
+err_release_region:
+ release_region(ioaddr, PCNET32_TOTAL_SIZE);
+ return ret;
+}
+
+/* if any allocation fails, caller must also call pcnet32_free_ring */
+static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+
+ lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_tx_head) *
+ lp->tx_ring_size,
+ &lp->tx_ring_dma_addr);
+ if (lp->tx_ring == NULL) {
+ netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_rx_head) *
+ lp->rx_ring_size,
+ &lp->rx_ring_dma_addr);
+ if (lp->rx_ring == NULL) {
+ netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
+ GFP_ATOMIC);
+ if (!lp->tx_dma_addr) {
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
+ GFP_ATOMIC);
+ if (!lp->rx_dma_addr) {
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
+ GFP_ATOMIC);
+ if (!lp->tx_skbuff) {
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
+ GFP_ATOMIC);
+ if (!lp->rx_skbuff) {
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void pcnet32_free_ring(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+
+ kfree(lp->tx_skbuff);
+ lp->tx_skbuff = NULL;
+
+ kfree(lp->rx_skbuff);
+ lp->rx_skbuff = NULL;
+
+ kfree(lp->tx_dma_addr);
+ lp->tx_dma_addr = NULL;
+
+ kfree(lp->rx_dma_addr);
+ lp->rx_dma_addr = NULL;
+
+ if (lp->tx_ring) {
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_tx_head) *
+ lp->tx_ring_size, lp->tx_ring,
+ lp->tx_ring_dma_addr);
+ lp->tx_ring = NULL;
+ }
+
+ if (lp->rx_ring) {
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_rx_head) *
+ lp->rx_ring_size, lp->rx_ring,
+ lp->rx_ring_dma_addr);
+ lp->rx_ring = NULL;
+ }
+}
+
+static int pcnet32_open(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ struct pci_dev *pdev = lp->pci_dev;
+ unsigned long ioaddr = dev->base_addr;
+ u16 val;
+ int i;
+ int rc;
+ unsigned long flags;
+
+ if (request_irq(dev->irq, pcnet32_interrupt,
+ lp->shared_irq ? IRQF_SHARED : 0, dev->name,
+ (void *)dev)) {
+ return -EAGAIN;
+ }
+
+ spin_lock_irqsave(&lp->lock, flags);
+ /* Check for a valid station address */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ rc = -EINVAL;
+ goto err_free_irq;
+ }
+
+ /* Reset the PCNET32 */
+ lp->a->reset(ioaddr);
+
+ /* switch pcnet32 to 32bit mode */
+ lp->a->write_bcr(ioaddr, 20, 2);
+
+ netif_printk(lp, ifup, KERN_DEBUG, dev,
+ "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
+ __func__, dev->irq, (u32) (lp->tx_ring_dma_addr),
+ (u32) (lp->rx_ring_dma_addr),
+ (u32) (lp->init_dma_addr));
+
+ /* set/reset autoselect bit */
+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
+ if (lp->options & PCNET32_PORT_ASEL)
+ val |= 2;
+ lp->a->write_bcr(ioaddr, 2, val);
+
+ /* handle full duplex setting */
+ if (lp->mii_if.full_duplex) {
+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
+ if (lp->options & PCNET32_PORT_FD) {
+ val |= 1;
+ if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
+ val |= 2;
+ } else if (lp->options & PCNET32_PORT_ASEL) {
+ /* workaround of xSeries250, turn on for 79C975 only */
+ if (lp->chip_version == 0x2627)
+ val |= 3;
+ }
+ lp->a->write_bcr(ioaddr, 9, val);
+ }
+
+ /* set/reset GPSI bit in test register */
+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
+ if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
+ val |= 0x10;
+ lp->a->write_csr(ioaddr, 124, val);
+
+ /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
+ if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
+ (pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
+ pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
+ if (lp->options & PCNET32_PORT_ASEL) {
+ lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
+ netif_printk(lp, link, KERN_DEBUG, dev,
+ "Setting 100Mb-Full Duplex\n");
+ }
+ }
+ if (lp->phycount < 2) {
+ /*
+ * 24 Jun 2004 according AMD, in order to change the PHY,
+ * DANAS (or DISPM for 79C976) must be set; then select the speed,
+ * duplex, and/or enable auto negotiation, and clear DANAS
+ */
+ if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
+ lp->a->write_bcr(ioaddr, 32,
+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
+ /* disable Auto Negotiation, set 10Mpbs, HD */
+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
+ if (lp->options & PCNET32_PORT_FD)
+ val |= 0x10;
+ if (lp->options & PCNET32_PORT_100)
+ val |= 0x08;
+ lp->a->write_bcr(ioaddr, 32, val);
+ } else {
+ if (lp->options & PCNET32_PORT_ASEL) {
+ lp->a->write_bcr(ioaddr, 32,
+ lp->a->read_bcr(ioaddr,
+ 32) | 0x0080);
+ /* enable auto negotiate, setup, disable fd */
+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
+ val |= 0x20;
+ lp->a->write_bcr(ioaddr, 32, val);
+ }
+ }
+ } else {
+ int first_phy = -1;
+ u16 bmcr;
+ u32 bcr9;
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+
+ /*
+ * There is really no good other way to handle multiple PHYs
+ * other than turning off all automatics
+ */
+ val = lp->a->read_bcr(ioaddr, 2);
+ lp->a->write_bcr(ioaddr, 2, val & ~2);
+ val = lp->a->read_bcr(ioaddr, 32);
+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
+
+ if (!(lp->options & PCNET32_PORT_ASEL)) {
+ /* setup ecmd */
+ ecmd.port = PORT_MII;
+ ecmd.transceiver = XCVR_INTERNAL;
+ ecmd.autoneg = AUTONEG_DISABLE;
+ ethtool_cmd_speed_set(&ecmd,
+ (lp->options & PCNET32_PORT_100) ?
+ SPEED_100 : SPEED_10);
+ bcr9 = lp->a->read_bcr(ioaddr, 9);
+
+ if (lp->options & PCNET32_PORT_FD) {
+ ecmd.duplex = DUPLEX_FULL;
+ bcr9 |= (1 << 0);
+ } else {
+ ecmd.duplex = DUPLEX_HALF;
+ bcr9 |= ~(1 << 0);
+ }
+ lp->a->write_bcr(ioaddr, 9, bcr9);
+ }
+
+ for (i = 0; i < PCNET32_MAX_PHYS; i++) {
+ if (lp->phymask & (1 << i)) {
+ /* isolate all but the first PHY */
+ bmcr = mdio_read(dev, i, MII_BMCR);
+ if (first_phy == -1) {
+ first_phy = i;
+ mdio_write(dev, i, MII_BMCR,
+ bmcr & ~BMCR_ISOLATE);
+ } else {
+ mdio_write(dev, i, MII_BMCR,
+ bmcr | BMCR_ISOLATE);
+ }
+ /* use mii_ethtool_sset to setup PHY */
+ lp->mii_if.phy_id = i;
+ ecmd.phy_address = i;
+ if (lp->options & PCNET32_PORT_ASEL) {
+ mii_ethtool_gset(&lp->mii_if, &ecmd);
+ ecmd.autoneg = AUTONEG_ENABLE;
+ }
+ mii_ethtool_sset(&lp->mii_if, &ecmd);
+ }
+ }
+ lp->mii_if.phy_id = first_phy;
+ netif_info(lp, link, dev, "Using PHY number %d\n", first_phy);
+ }
+
+#ifdef DO_DXSUFLO
+ if (lp->dxsuflo) { /* Disable transmit stop on underflow */
+ val = lp->a->read_csr(ioaddr, CSR3);
+ val |= 0x40;
+ lp->a->write_csr(ioaddr, CSR3, val);
+ }
+#endif
+
+ lp->init_block->mode =
+ cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
+ pcnet32_load_multicast(dev);
+
+ if (pcnet32_init_ring(dev)) {
+ rc = -ENOMEM;
+ goto err_free_ring;
+ }
+
+ napi_enable(&lp->napi);
+
+ /* Re-initialize the PCNET32, and start it when done. */
+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
+
+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
+
+ netif_start_queue(dev);
+
+ if (lp->chip_version >= PCNET32_79C970A) {
+ /* Print the link status and start the watchdog */
+ pcnet32_check_media(dev, 1);
+ mod_timer(&lp->watchdog_timer, PCNET32_WATCHDOG_TIMEOUT);
+ }
+
+ i = 0;
+ while (i++ < 100)
+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
+ break;
+ /*
+ * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
+ * reports that doing so triggers a bug in the '974.
+ */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
+
+ netif_printk(lp, ifup, KERN_DEBUG, dev,
+ "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
+ i,
+ (u32) (lp->init_dma_addr),
+ lp->a->read_csr(ioaddr, CSR0));
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return 0; /* Always succeed */
+
+err_free_ring:
+ /* free any allocated skbuffs */
+ pcnet32_purge_rx_ring(dev);
+
+ /*
+ * Switch back to 16bit mode to avoid problems with dumb
+ * DOS packet driver after a warm reboot
+ */
+ lp->a->write_bcr(ioaddr, 20, 4);
+
+err_free_irq:
+ spin_unlock_irqrestore(&lp->lock, flags);
+ free_irq(dev->irq, dev);
+ return rc;
+}
+
+/*
+ * The LANCE has been halted for one reason or another (busmaster memory
+ * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
+ * etc.). Modern LANCE variants always reload their ring-buffer
+ * configuration when restarted, so we must reinitialize our ring
+ * context before restarting. As part of this reinitialization,
+ * find all packets still on the Tx ring and pretend that they had been
+ * sent (in effect, drop the packets on the floor) - the higher-level
+ * protocols will time out and retransmit. It'd be better to shuffle
+ * these skbs to a temp list and then actually re-Tx them after
+ * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
+ */
+
+static void pcnet32_purge_tx_ring(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < lp->tx_ring_size; i++) {
+ lp->tx_ring[i].status = 0; /* CPU owns buffer */
+ wmb(); /* Make sure adapter sees owner change */
+ if (lp->tx_skbuff[i]) {
+ pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
+ lp->tx_skbuff[i]->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(lp->tx_skbuff[i]);
+ }
+ lp->tx_skbuff[i] = NULL;
+ lp->tx_dma_addr[i] = 0;
+ }
+}
+
+/* Initialize the PCNET32 Rx and Tx rings. */
+static int pcnet32_init_ring(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int i;
+
+ lp->tx_full = 0;
+ lp->cur_rx = lp->cur_tx = 0;
+ lp->dirty_rx = lp->dirty_tx = 0;
+
+ for (i = 0; i < lp->rx_ring_size; i++) {
+ struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
+ if (rx_skbuff == NULL) {
+ lp->rx_skbuff[i] = dev_alloc_skb(PKT_BUF_SKB);
+ rx_skbuff = lp->rx_skbuff[i];
+ if (!rx_skbuff) {
+ /* there is not much we can do at this point */
+ netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n",
+ __func__);
+ return -1;
+ }
+ skb_reserve(rx_skbuff, NET_IP_ALIGN);
+ }
+
+ rmb();
+ if (lp->rx_dma_addr[i] == 0)
+ lp->rx_dma_addr[i] =
+ pci_map_single(lp->pci_dev, rx_skbuff->data,
+ PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
+ lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->rx_ring[i].status = cpu_to_le16(0x8000);
+ }
+ /* The Tx buffer address is filled in as needed, but we do need to clear
+ * the upper ownership bit. */
+ for (i = 0; i < lp->tx_ring_size; i++) {
+ lp->tx_ring[i].status = 0; /* CPU owns buffer */
+ wmb(); /* Make sure adapter sees owner change */
+ lp->tx_ring[i].base = 0;
+ lp->tx_dma_addr[i] = 0;
+ }
+
+ lp->init_block->tlen_rlen =
+ cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits);
+ for (i = 0; i < 6; i++)
+ lp->init_block->phys_addr[i] = dev->dev_addr[i];
+ lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
+ lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
+ wmb(); /* Make sure all changes are visible */
+ return 0;
+}
+
+/* the pcnet32 has been issued a stop or reset. Wait for the stop bit
+ * then flush the pending transmit operations, re-initialize the ring,
+ * and tell the chip to initialize.
+ */
+static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ int i;
+
+ /* wait for stop */
+ for (i = 0; i < 100; i++)
+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
+ break;
+
+ if (i >= 100)
+ netif_err(lp, drv, dev, "%s timed out waiting for stop\n",
+ __func__);
+
+ pcnet32_purge_tx_ring(dev);
+ if (pcnet32_init_ring(dev))
+ return;
+
+ /* ReInit Ring */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
+ i = 0;
+ while (i++ < 1000)
+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
+ break;
+
+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
+}
+
+static void pcnet32_tx_timeout(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr, flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ /* Transmitter timeout, serious problems. */
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ pr_err("%s: transmit timed out, status %4.4x, resetting\n",
+ dev->name, lp->a->read_csr(ioaddr, CSR0));
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
+ dev->stats.tx_errors++;
+ if (netif_msg_tx_err(lp)) {
+ int i;
+ printk(KERN_DEBUG
+ " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
+ lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
+ lp->cur_rx);
+ for (i = 0; i < lp->rx_ring_size; i++)
+ printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
+ le32_to_cpu(lp->rx_ring[i].base),
+ (-le16_to_cpu(lp->rx_ring[i].buf_length)) &
+ 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length),
+ le16_to_cpu(lp->rx_ring[i].status));
+ for (i = 0; i < lp->tx_ring_size; i++)
+ printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
+ le32_to_cpu(lp->tx_ring[i].base),
+ (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
+ le32_to_cpu(lp->tx_ring[i].misc),
+ le16_to_cpu(lp->tx_ring[i].status));
+ printk("\n");
+ }
+ pcnet32_restart(dev, CSR0_NORMAL);
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ u16 status;
+ int entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ netif_printk(lp, tx_queued, KERN_DEBUG, dev,
+ "%s() called, csr0 %4.4x\n",
+ __func__, lp->a->read_csr(ioaddr, CSR0));
+
+ /* Default status -- will not enable Successful-TxDone
+ * interrupt when that option is available to us.
+ */
+ status = 0x8300;
+
+ /* Fill in a Tx ring entry */
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->cur_tx & lp->tx_mod_mask;
+
+ /* Caution: the write order is important here, set the status
+ * with the "ownership" bits last. */
+
+ lp->tx_ring[entry].length = cpu_to_le16(-skb->len);
+
+ lp->tx_ring[entry].misc = 0x00000000;
+
+ lp->tx_skbuff[entry] = skb;
+ lp->tx_dma_addr[entry] =
+ pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->tx_ring[entry].status = cpu_to_le16(status);
+
+ lp->cur_tx++;
+ dev->stats.tx_bytes += skb->len;
+
+ /* Trigger an immediate send poll. */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
+
+ if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
+ lp->tx_full = 1;
+ netif_stop_queue(dev);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return NETDEV_TX_OK;
+}
+
+/* The PCNET32 interrupt handler. */
+static irqreturn_t
+pcnet32_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct pcnet32_private *lp;
+ unsigned long ioaddr;
+ u16 csr0;
+ int boguscnt = max_interrupt_work;
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ spin_lock(&lp->lock);
+
+ csr0 = lp->a->read_csr(ioaddr, CSR0);
+ while ((csr0 & 0x8f00) && --boguscnt >= 0) {
+ if (csr0 == 0xffff)
+ break; /* PCMCIA remove happened */
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
+
+ netif_printk(lp, intr, KERN_DEBUG, dev,
+ "interrupt csr0=%#2.2x new csr=%#2.2x\n",
+ csr0, lp->a->read_csr(ioaddr, CSR0));
+
+ /* Log misc errors. */
+ if (csr0 & 0x4000)
+ dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & 0x1000) {
+ /*
+ * This happens when our receive ring is full. This
+ * shouldn't be a problem as we will see normal rx
+ * interrupts for the frames in the receive ring. But
+ * there are some PCI chipsets (I can reproduce this
+ * on SP3G with Intel saturn chipset) which have
+ * sometimes problems and will fill up the receive
+ * ring with error descriptors. In this situation we
+ * don't get a rx interrupt, but a missed frame
+ * interrupt sooner or later.
+ */
+ dev->stats.rx_errors++; /* Missed a Rx frame. */
+ }
+ if (csr0 & 0x0800) {
+ netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n",
+ csr0);
+ /* unlike for the lance, there is no restart needed */
+ }
+ if (napi_schedule_prep(&lp->napi)) {
+ u16 val;
+ /* set interrupt masks */
+ val = lp->a->read_csr(ioaddr, CSR3);
+ val |= 0x5f00;
+ lp->a->write_csr(ioaddr, CSR3, val);
+
+ __napi_schedule(&lp->napi);
+ break;
+ }
+ csr0 = lp->a->read_csr(ioaddr, CSR0);
+ }
+
+ netif_printk(lp, intr, KERN_DEBUG, dev,
+ "exiting interrupt, csr0=%#4.4x\n",
+ lp->a->read_csr(ioaddr, CSR0));
+
+ spin_unlock(&lp->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int pcnet32_close(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ del_timer_sync(&lp->watchdog_timer);
+
+ netif_stop_queue(dev);
+ napi_disable(&lp->napi);
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
+
+ netif_printk(lp, ifdown, KERN_DEBUG, dev,
+ "Shutting down ethercard, status was %2.2x\n",
+ lp->a->read_csr(ioaddr, CSR0));
+
+ /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
+
+ /*
+ * Switch back to 16bit mode to avoid problems with dumb
+ * DOS packet driver after a warm reboot
+ */
+ lp->a->write_bcr(ioaddr, 20, 4);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ free_irq(dev->irq, dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ pcnet32_purge_rx_ring(dev);
+ pcnet32_purge_tx_ring(dev);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return 0;
+}
+
+static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return &dev->stats;
+}
+
+/* taken from the sunlance driver, which it took from the depca driver */
+static void pcnet32_load_multicast(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ volatile struct pcnet32_init_block *ib = lp->init_block;
+ volatile __le16 *mcast_table = (__le16 *)ib->filter;
+ struct netdev_hw_addr *ha;
+ unsigned long ioaddr = dev->base_addr;
+ int i;
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI) {
+ ib->filter[0] = cpu_to_le32(~0U);
+ ib->filter[1] = cpu_to_le32(~0U);
+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
+ return;
+ }
+ /* clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+
+ /* Add addresses */
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
+ crc = crc >> 26;
+ mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
+ }
+ for (i = 0; i < 4; i++)
+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
+ le16_to_cpu(mcast_table[i]));
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+static void pcnet32_set_multicast_list(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr, flags;
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int csr15, suspended;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ suspended = pcnet32_suspend(dev, &flags, 0);
+ csr15 = lp->a->read_csr(ioaddr, CSR15);
+ if (dev->flags & IFF_PROMISC) {
+ /* Log any net taps. */
+ netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
+ lp->init_block->mode =
+ cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
+ 7);
+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
+ } else {
+ lp->init_block->mode =
+ cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
+ pcnet32_load_multicast(dev);
+ }
+
+ if (suspended) {
+ int csr5;
+ /* clear SUSPEND (SPND) - CSR5 bit 0 */
+ csr5 = lp->a->read_csr(ioaddr, CSR5);
+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
+ } else {
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
+ pcnet32_restart(dev, CSR0_NORMAL);
+ netif_wake_queue(dev);
+ }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+/* This routine assumes that the lp->lock is held */
+static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ u16 val_out;
+
+ if (!lp->mii)
+ return 0;
+
+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
+ val_out = lp->a->read_bcr(ioaddr, 34);
+
+ return val_out;
+}
+
+/* This routine assumes that the lp->lock is held */
+static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+
+ if (!lp->mii)
+ return;
+
+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
+ lp->a->write_bcr(ioaddr, 34, val);
+}
+
+static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int rc;
+ unsigned long flags;
+
+ /* SIOC[GS]MIIxxx ioctls */
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ } else {
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
+static int pcnet32_check_otherphy(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ struct mii_if_info mii = lp->mii_if;
+ u16 bmcr;
+ int i;
+
+ for (i = 0; i < PCNET32_MAX_PHYS; i++) {
+ if (i == lp->mii_if.phy_id)
+ continue; /* skip active phy */
+ if (lp->phymask & (1 << i)) {
+ mii.phy_id = i;
+ if (mii_link_ok(&mii)) {
+ /* found PHY with active link */
+ netif_info(lp, link, dev, "Using PHY number %d\n",
+ i);
+
+ /* isolate inactive phy */
+ bmcr =
+ mdio_read(dev, lp->mii_if.phy_id, MII_BMCR);
+ mdio_write(dev, lp->mii_if.phy_id, MII_BMCR,
+ bmcr | BMCR_ISOLATE);
+
+ /* de-isolate new phy */
+ bmcr = mdio_read(dev, i, MII_BMCR);
+ mdio_write(dev, i, MII_BMCR,
+ bmcr & ~BMCR_ISOLATE);
+
+ /* set new phy address */
+ lp->mii_if.phy_id = i;
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * Show the status of the media. Similar to mii_check_media however it
+ * correctly shows the link speed for all (tested) pcnet32 variants.
+ * Devices with no mii just report link state without speed.
+ *
+ * Caller is assumed to hold and release the lp->lock.
+ */
+
+static void pcnet32_check_media(struct net_device *dev, int verbose)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int curr_link;
+ int prev_link = netif_carrier_ok(dev) ? 1 : 0;
+ u32 bcr9;
+
+ if (lp->mii) {
+ curr_link = mii_link_ok(&lp->mii_if);
+ } else {
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
+ }
+ if (!curr_link) {
+ if (prev_link || verbose) {
+ netif_carrier_off(dev);
+ netif_info(lp, link, dev, "link down\n");
+ }
+ if (lp->phycount > 1) {
+ curr_link = pcnet32_check_otherphy(dev);
+ prev_link = 0;
+ }
+ } else if (verbose || !prev_link) {
+ netif_carrier_on(dev);
+ if (lp->mii) {
+ if (netif_msg_link(lp)) {
+ struct ethtool_cmd ecmd = {
+ .cmd = ETHTOOL_GSET };
+ mii_ethtool_gset(&lp->mii_if, &ecmd);
+ netdev_info(dev, "link up, %uMbps, %s-duplex\n",
+ ethtool_cmd_speed(&ecmd),
+ (ecmd.duplex == DUPLEX_FULL)
+ ? "full" : "half");
+ }
+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
+ if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
+ if (lp->mii_if.full_duplex)
+ bcr9 |= (1 << 0);
+ else
+ bcr9 &= ~(1 << 0);
+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
+ }
+ } else {
+ netif_info(lp, link, dev, "link up\n");
+ }
+ }
+}
+
+/*
+ * Check for loss of link and link establishment.
+ * Can not use mii_check_media because it does nothing if mode is forced.
+ */
+
+static void pcnet32_watchdog(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ /* Print the link status if it has changed */
+ spin_lock_irqsave(&lp->lock, flags);
+ pcnet32_check_media(dev, 0);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT));
+}
+
+static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (netif_running(dev)) {
+ netif_device_detach(dev);
+ pcnet32_close(dev);
+ }
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+
+static int pcnet32_pm_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if (netif_running(dev)) {
+ pcnet32_open(dev);
+ netif_device_attach(dev);
+ }
+ return 0;
+}
+
+static void __devexit pcnet32_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct pcnet32_private *lp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ pcnet32_free_ring(dev);
+ release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
+ pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
+ lp->init_block, lp->init_dma_addr);
+ free_netdev(dev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+static struct pci_driver pcnet32_driver = {
+ .name = DRV_NAME,
+ .probe = pcnet32_probe_pci,
+ .remove = __devexit_p(pcnet32_remove_one),
+ .id_table = pcnet32_pci_tbl,
+ .suspend = pcnet32_pm_suspend,
+ .resume = pcnet32_pm_resume,
+};
+
+/* An additional parameter that may be passed in... */
+static int debug = -1;
+static int tx_start_pt = -1;
+static int pcnet32_have_pci;
+
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, DRV_NAME " debug level");
+module_param(max_interrupt_work, int, 0);
+MODULE_PARM_DESC(max_interrupt_work,
+ DRV_NAME " maximum events handled per interrupt");
+module_param(rx_copybreak, int, 0);
+MODULE_PARM_DESC(rx_copybreak,
+ DRV_NAME " copy breakpoint for copy-only-tiny-frames");
+module_param(tx_start_pt, int, 0);
+MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)");
+module_param(pcnet32vlb, int, 0);
+MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)");
+module_param_array(options, int, NULL, 0);
+MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)");
+module_param_array(full_duplex, int, NULL, 0);
+MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)");
+/* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */
+module_param_array(homepna, int, NULL, 0);
+MODULE_PARM_DESC(homepna,
+ DRV_NAME
+ " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet");
+
+MODULE_AUTHOR("Thomas Bogendoerfer");
+MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards");
+MODULE_LICENSE("GPL");
+
+#define PCNET32_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+
+static int __init pcnet32_init_module(void)
+{
+ pr_info("%s", version);
+
+ pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
+
+ if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
+ tx_start = tx_start_pt;
+
+ /* find the PCI devices */
+ if (!pci_register_driver(&pcnet32_driver))
+ pcnet32_have_pci = 1;
+
+ /* should we find any remaining VLbus devices ? */
+ if (pcnet32vlb)
+ pcnet32_probe_vlbus(pcnet32_portlist);
+
+ if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
+ pr_info("%d cards_found\n", cards_found);
+
+ return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
+}
+
+static void __exit pcnet32_cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ while (pcnet32_dev) {
+ struct pcnet32_private *lp = netdev_priv(pcnet32_dev);
+ next_dev = lp->next;
+ unregister_netdev(pcnet32_dev);
+ pcnet32_free_ring(pcnet32_dev);
+ release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
+ pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
+ lp->init_block, lp->init_dma_addr);
+ free_netdev(pcnet32_dev);
+ pcnet32_dev = next_dev;
+ }
+
+ if (pcnet32_have_pci)
+ pci_unregister_driver(&pcnet32_driver);
+}
+
+module_init(pcnet32_init_module);
+module_exit(pcnet32_cleanup_module);
+
+/*
+ * Local variables:
+ * c-indent-level: 4
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
new file mode 100644
index 000000000000..080b71fcc683
--- /dev/null
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -0,0 +1,961 @@
+/* sun3lance.c: Ethernet driver for SUN3 Lance chip */
+/*
+
+ Sun3 Lance ethernet driver, by Sam Creasey (sammy@users.qual.net).
+ This driver is a part of the linux kernel, and is thus distributed
+ under the GNU General Public License.
+
+ The values used in LANCE_OBIO and LANCE_IRQ seem to be empirically
+ true for the correct IRQ and address of the lance registers. They
+ have not been widely tested, however. What we probably need is a
+ "proper" way to search for a device in the sun3's prom, but, alas,
+ linux has no such thing.
+
+ This driver is largely based on atarilance.c, by Roman Hodek. Other
+ sources of inspiration were the NetBSD sun3 am7990 driver, and the
+ linux sparc lance driver (sunlance.c).
+
+ There are more assumptions made throughout this driver, it almost
+ certainly still needs work, but it does work at least for RARP/BOOTP and
+ mounting the root NFS filesystem.
+
+*/
+
+static char *version = "sun3lance.c: v1.2 1/12/2001 Sam Creasey (sammy@sammy.net)\n";
+
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/cacheflush.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/dvma.h>
+#include <asm/idprom.h>
+#include <asm/machines.h>
+
+#ifdef CONFIG_SUN3
+#include <asm/sun3mmu.h>
+#else
+#include <asm/sun3xprom.h>
+#endif
+
+/* sun3/60 addr/irq for the lance chip. If your sun is different,
+ change this. */
+#define LANCE_OBIO 0x120000
+#define LANCE_IRQ IRQ_AUTO_3
+
+/* Debug level:
+ * 0 = silent, print only serious errors
+ * 1 = normal, print error messages
+ * 2 = debug, print debug infos
+ * 3 = debug, print even more debug infos (packet data)
+ */
+
+#define LANCE_DEBUG 0
+
+#ifdef LANCE_DEBUG
+static int lance_debug = LANCE_DEBUG;
+#else
+static int lance_debug = 1;
+#endif
+module_param(lance_debug, int, 0);
+MODULE_PARM_DESC(lance_debug, "SUN3 Lance debug level (0-3)");
+MODULE_LICENSE("GPL");
+
+#define DPRINTK(n,a) \
+ do { \
+ if (lance_debug >= n) \
+ printk a; \
+ } while( 0 )
+
+
+/* we're only using 32k of memory, so we use 4 TX
+ buffers and 16 RX buffers. These values are expressed as log2. */
+
+#define TX_LOG_RING_SIZE 3
+#define RX_LOG_RING_SIZE 5
+
+/* These are the derived values */
+
+#define TX_RING_SIZE (1 << TX_LOG_RING_SIZE)
+#define TX_RING_LEN_BITS (TX_LOG_RING_SIZE << 5)
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+
+#define RX_RING_SIZE (1 << RX_LOG_RING_SIZE)
+#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5)
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+
+/* Definitions for packet buffer access: */
+#define PKT_BUF_SZ 1544
+
+/* Get the address of a packet buffer corresponding to a given buffer head */
+#define PKTBUF_ADDR(head) (void *)((unsigned long)(MEM) | (head)->base)
+
+
+/* The LANCE Rx and Tx ring descriptors. */
+struct lance_rx_head {
+ unsigned short base; /* Low word of base addr */
+ volatile unsigned char flag;
+ unsigned char base_hi; /* High word of base addr (unused) */
+ short buf_length; /* This length is 2s complement! */
+ volatile short msg_length; /* This length is "normal". */
+};
+
+struct lance_tx_head {
+ unsigned short base; /* Low word of base addr */
+ volatile unsigned char flag;
+ unsigned char base_hi; /* High word of base addr (unused) */
+ short length; /* Length is 2s complement! */
+ volatile short misc;
+};
+
+/* The LANCE initialization block, described in databook. */
+struct lance_init_block {
+ unsigned short mode; /* Pre-set mode */
+ unsigned char hwaddr[6]; /* Physical ethernet address */
+ unsigned int filter[2]; /* Multicast filter (unused). */
+ /* Receive and transmit ring base, along with length bits. */
+ unsigned short rdra;
+ unsigned short rlen;
+ unsigned short tdra;
+ unsigned short tlen;
+ unsigned short pad[4]; /* is thie needed? */
+};
+
+/* The whole layout of the Lance shared memory */
+struct lance_memory {
+ struct lance_init_block init;
+ struct lance_tx_head tx_head[TX_RING_SIZE];
+ struct lance_rx_head rx_head[RX_RING_SIZE];
+ char rx_data[RX_RING_SIZE][PKT_BUF_SZ];
+ char tx_data[TX_RING_SIZE][PKT_BUF_SZ];
+};
+
+/* The driver's private device structure */
+
+struct lance_private {
+ volatile unsigned short *iobase;
+ struct lance_memory *mem;
+ int new_rx, new_tx; /* The next free ring entry */
+ int old_tx, old_rx; /* ring entry to be processed */
+/* These two must be longs for set_bit() */
+ long tx_full;
+ long lock;
+};
+
+/* I/O register access macros */
+
+#define MEM lp->mem
+#define DREG lp->iobase[0]
+#define AREG lp->iobase[1]
+#define REGA(a) (*( AREG = (a), &DREG ))
+
+/* Definitions for the Lance */
+
+/* tx_head flags */
+#define TMD1_ENP 0x01 /* end of packet */
+#define TMD1_STP 0x02 /* start of packet */
+#define TMD1_DEF 0x04 /* deferred */
+#define TMD1_ONE 0x08 /* one retry needed */
+#define TMD1_MORE 0x10 /* more than one retry needed */
+#define TMD1_ERR 0x40 /* error summary */
+#define TMD1_OWN 0x80 /* ownership (set: chip owns) */
+
+#define TMD1_OWN_CHIP TMD1_OWN
+#define TMD1_OWN_HOST 0
+
+/* tx_head misc field */
+#define TMD3_TDR 0x03FF /* Time Domain Reflectometry counter */
+#define TMD3_RTRY 0x0400 /* failed after 16 retries */
+#define TMD3_LCAR 0x0800 /* carrier lost */
+#define TMD3_LCOL 0x1000 /* late collision */
+#define TMD3_UFLO 0x4000 /* underflow (late memory) */
+#define TMD3_BUFF 0x8000 /* buffering error (no ENP) */
+
+/* rx_head flags */
+#define RMD1_ENP 0x01 /* end of packet */
+#define RMD1_STP 0x02 /* start of packet */
+#define RMD1_BUFF 0x04 /* buffer error */
+#define RMD1_CRC 0x08 /* CRC error */
+#define RMD1_OFLO 0x10 /* overflow */
+#define RMD1_FRAM 0x20 /* framing error */
+#define RMD1_ERR 0x40 /* error summary */
+#define RMD1_OWN 0x80 /* ownership (set: ship owns) */
+
+#define RMD1_OWN_CHIP RMD1_OWN
+#define RMD1_OWN_HOST 0
+
+/* register names */
+#define CSR0 0 /* mode/status */
+#define CSR1 1 /* init block addr (low) */
+#define CSR2 2 /* init block addr (high) */
+#define CSR3 3 /* misc */
+#define CSR8 8 /* address filter */
+#define CSR15 15 /* promiscuous mode */
+
+/* CSR0 */
+/* (R=readable, W=writeable, S=set on write, C=clear on write) */
+#define CSR0_INIT 0x0001 /* initialize (RS) */
+#define CSR0_STRT 0x0002 /* start (RS) */
+#define CSR0_STOP 0x0004 /* stop (RS) */
+#define CSR0_TDMD 0x0008 /* transmit demand (RS) */
+#define CSR0_TXON 0x0010 /* transmitter on (R) */
+#define CSR0_RXON 0x0020 /* receiver on (R) */
+#define CSR0_INEA 0x0040 /* interrupt enable (RW) */
+#define CSR0_INTR 0x0080 /* interrupt active (R) */
+#define CSR0_IDON 0x0100 /* initialization done (RC) */
+#define CSR0_TINT 0x0200 /* transmitter interrupt (RC) */
+#define CSR0_RINT 0x0400 /* receiver interrupt (RC) */
+#define CSR0_MERR 0x0800 /* memory error (RC) */
+#define CSR0_MISS 0x1000 /* missed frame (RC) */
+#define CSR0_CERR 0x2000 /* carrier error (no heartbeat :-) (RC) */
+#define CSR0_BABL 0x4000 /* babble: tx-ed too many bits (RC) */
+#define CSR0_ERR 0x8000 /* error (RC) */
+
+/* CSR3 */
+#define CSR3_BCON 0x0001 /* byte control */
+#define CSR3_ACON 0x0002 /* ALE control */
+#define CSR3_BSWP 0x0004 /* byte swap (1=big endian) */
+
+/***************************** Prototypes *****************************/
+
+static int lance_probe( struct net_device *dev);
+static int lance_open( struct net_device *dev );
+static void lance_init_ring( struct net_device *dev );
+static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev );
+static irqreturn_t lance_interrupt( int irq, void *dev_id);
+static int lance_rx( struct net_device *dev );
+static int lance_close( struct net_device *dev );
+static void set_multicast_list( struct net_device *dev );
+
+/************************* End of Prototypes **************************/
+
+struct net_device * __init sun3lance_probe(int unit)
+{
+ struct net_device *dev;
+ static int found;
+ int err = -ENODEV;
+
+ if (!MACH_IS_SUN3 && !MACH_IS_SUN3X)
+ return ERR_PTR(-ENODEV);
+
+ /* check that this machine has an onboard lance */
+ switch(idprom->id_machtype) {
+ case SM_SUN3|SM_3_50:
+ case SM_SUN3|SM_3_60:
+ case SM_SUN3X|SM_3_80:
+ /* these machines have lance */
+ break;
+
+ default:
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (found)
+ return ERR_PTR(-ENODEV);
+
+ dev = alloc_etherdev(sizeof(struct lance_private));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ if (!lance_probe(dev))
+ goto out;
+
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ found = 1;
+ return dev;
+
+out1:
+#ifdef CONFIG_SUN3
+ iounmap((void __iomem *)dev->base_addr);
+#endif
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static const struct net_device_ops lance_netdev_ops = {
+ .ndo_open = lance_open,
+ .ndo_stop = lance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_set_mac_address = NULL,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __init lance_probe( struct net_device *dev)
+{
+ unsigned long ioaddr;
+
+ struct lance_private *lp;
+ int i;
+ static int did_version;
+ volatile unsigned short *ioaddr_probe;
+ unsigned short tmp1, tmp2;
+
+#ifdef CONFIG_SUN3
+ ioaddr = (unsigned long)ioremap(LANCE_OBIO, PAGE_SIZE);
+ if (!ioaddr)
+ return 0;
+#else
+ ioaddr = SUN3X_LANCE;
+#endif
+
+ /* test to see if there's really a lance here */
+ /* (CSRO_INIT shouldn't be readable) */
+
+ ioaddr_probe = (volatile unsigned short *)ioaddr;
+ tmp1 = ioaddr_probe[0];
+ tmp2 = ioaddr_probe[1];
+
+ ioaddr_probe[1] = CSR0;
+ ioaddr_probe[0] = CSR0_INIT | CSR0_STOP;
+
+ if(ioaddr_probe[0] != CSR0_STOP) {
+ ioaddr_probe[0] = tmp1;
+ ioaddr_probe[1] = tmp2;
+
+#ifdef CONFIG_SUN3
+ iounmap((void __iomem *)ioaddr);
+#endif
+ return 0;
+ }
+
+ lp = netdev_priv(dev);
+
+ /* XXX - leak? */
+ MEM = dvma_malloc_align(sizeof(struct lance_memory), 0x10000);
+ if (MEM == NULL) {
+#ifdef CONFIG_SUN3
+ iounmap((void __iomem *)ioaddr);
+#endif
+ printk(KERN_WARNING "SUN3 Lance couldn't allocate DVMA memory\n");
+ return 0;
+ }
+
+ lp->iobase = (volatile unsigned short *)ioaddr;
+ dev->base_addr = (unsigned long)ioaddr; /* informational only */
+
+ REGA(CSR0) = CSR0_STOP;
+
+ if (request_irq(LANCE_IRQ, lance_interrupt, IRQF_DISABLED, "SUN3 Lance", dev) < 0) {
+#ifdef CONFIG_SUN3
+ iounmap((void __iomem *)ioaddr);
+#endif
+ dvma_free((void *)MEM);
+ printk(KERN_WARNING "SUN3 Lance unable to allocate IRQ\n");
+ return 0;
+ }
+ dev->irq = (unsigned short)LANCE_IRQ;
+
+
+ printk("%s: SUN3 Lance at io %#lx, mem %#lx, irq %d, hwaddr ",
+ dev->name,
+ (unsigned long)ioaddr,
+ (unsigned long)MEM,
+ dev->irq);
+
+ /* copy in the ethernet address from the prom */
+ for(i = 0; i < 6 ; i++)
+ dev->dev_addr[i] = idprom->id_ethaddr[i];
+
+ /* tell the card it's ether address, bytes swapped */
+ MEM->init.hwaddr[0] = dev->dev_addr[1];
+ MEM->init.hwaddr[1] = dev->dev_addr[0];
+ MEM->init.hwaddr[2] = dev->dev_addr[3];
+ MEM->init.hwaddr[3] = dev->dev_addr[2];
+ MEM->init.hwaddr[4] = dev->dev_addr[5];
+ MEM->init.hwaddr[5] = dev->dev_addr[4];
+
+ printk("%pM\n", dev->dev_addr);
+
+ MEM->init.mode = 0x0000;
+ MEM->init.filter[0] = 0x00000000;
+ MEM->init.filter[1] = 0x00000000;
+ MEM->init.rdra = dvma_vtob(MEM->rx_head);
+ MEM->init.rlen = (RX_LOG_RING_SIZE << 13) |
+ (dvma_vtob(MEM->rx_head) >> 16);
+ MEM->init.tdra = dvma_vtob(MEM->tx_head);
+ MEM->init.tlen = (TX_LOG_RING_SIZE << 13) |
+ (dvma_vtob(MEM->tx_head) >> 16);
+
+ DPRINTK(2, ("initaddr: %08lx rx_ring: %08lx tx_ring: %08lx\n",
+ dvma_vtob(&(MEM->init)), dvma_vtob(MEM->rx_head),
+ (dvma_vtob(MEM->tx_head))));
+
+ if (did_version++ == 0)
+ printk( version );
+
+ dev->netdev_ops = &lance_netdev_ops;
+// KLUDGE -- REMOVE ME
+ set_bit(__LINK_STATE_PRESENT, &dev->state);
+
+
+ return 1;
+}
+
+static int lance_open( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int i;
+
+ DPRINTK( 2, ( "%s: lance_open()\n", dev->name ));
+
+ REGA(CSR0) = CSR0_STOP;
+
+ lance_init_ring(dev);
+
+ /* From now on, AREG is kept to point to CSR0 */
+ REGA(CSR0) = CSR0_INIT;
+
+ i = 1000000;
+ while (--i > 0)
+ if (DREG & CSR0_IDON)
+ break;
+ if (i <= 0 || (DREG & CSR0_ERR)) {
+ DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
+ dev->name, i, DREG ));
+ DREG = CSR0_STOP;
+ return -EIO;
+ }
+
+ DREG = CSR0_IDON | CSR0_STRT | CSR0_INEA;
+
+ netif_start_queue(dev);
+
+ DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG ));
+
+ return 0;
+}
+
+
+/* Initialize the LANCE Rx and Tx rings. */
+
+static void lance_init_ring( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int i;
+
+ lp->lock = 0;
+ lp->tx_full = 0;
+ lp->new_rx = lp->new_tx = 0;
+ lp->old_rx = lp->old_tx = 0;
+
+ for( i = 0; i < TX_RING_SIZE; i++ ) {
+ MEM->tx_head[i].base = dvma_vtob(MEM->tx_data[i]);
+ MEM->tx_head[i].flag = 0;
+ MEM->tx_head[i].base_hi =
+ (dvma_vtob(MEM->tx_data[i])) >>16;
+ MEM->tx_head[i].length = 0;
+ MEM->tx_head[i].misc = 0;
+ }
+
+ for( i = 0; i < RX_RING_SIZE; i++ ) {
+ MEM->rx_head[i].base = dvma_vtob(MEM->rx_data[i]);
+ MEM->rx_head[i].flag = RMD1_OWN_CHIP;
+ MEM->rx_head[i].base_hi =
+ (dvma_vtob(MEM->rx_data[i])) >> 16;
+ MEM->rx_head[i].buf_length = -PKT_BUF_SZ | 0xf000;
+ MEM->rx_head[i].msg_length = 0;
+ }
+
+ /* tell the card it's ether address, bytes swapped */
+ MEM->init.hwaddr[0] = dev->dev_addr[1];
+ MEM->init.hwaddr[1] = dev->dev_addr[0];
+ MEM->init.hwaddr[2] = dev->dev_addr[3];
+ MEM->init.hwaddr[3] = dev->dev_addr[2];
+ MEM->init.hwaddr[4] = dev->dev_addr[5];
+ MEM->init.hwaddr[5] = dev->dev_addr[4];
+
+ MEM->init.mode = 0x0000;
+ MEM->init.filter[0] = 0x00000000;
+ MEM->init.filter[1] = 0x00000000;
+ MEM->init.rdra = dvma_vtob(MEM->rx_head);
+ MEM->init.rlen = (RX_LOG_RING_SIZE << 13) |
+ (dvma_vtob(MEM->rx_head) >> 16);
+ MEM->init.tdra = dvma_vtob(MEM->tx_head);
+ MEM->init.tlen = (TX_LOG_RING_SIZE << 13) |
+ (dvma_vtob(MEM->tx_head) >> 16);
+
+
+ /* tell the lance the address of its init block */
+ REGA(CSR1) = dvma_vtob(&(MEM->init));
+ REGA(CSR2) = dvma_vtob(&(MEM->init)) >> 16;
+
+#ifdef CONFIG_SUN3X
+ REGA(CSR3) = CSR3_BSWP | CSR3_ACON | CSR3_BCON;
+#else
+ REGA(CSR3) = CSR3_BSWP;
+#endif
+
+}
+
+
+static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int entry, len;
+ struct lance_tx_head *head;
+ unsigned long flags;
+
+ DPRINTK( 1, ( "%s: transmit start.\n",
+ dev->name));
+
+ /* Transmitter timeout, serious problems. */
+ if (netif_queue_stopped(dev)) {
+ int tickssofar = jiffies - dev_trans_start(dev);
+ if (tickssofar < HZ/5)
+ return NETDEV_TX_BUSY;
+
+ DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n",
+ dev->name, DREG ));
+ DREG = CSR0_STOP;
+ /*
+ * Always set BSWP after a STOP as STOP puts it back into
+ * little endian mode.
+ */
+ REGA(CSR3) = CSR3_BSWP;
+ dev->stats.tx_errors++;
+
+ if(lance_debug >= 2) {
+ int i;
+ printk("Ring data: old_tx %d new_tx %d%s new_rx %d\n",
+ lp->old_tx, lp->new_tx,
+ lp->tx_full ? " (full)" : "",
+ lp->new_rx );
+ for( i = 0 ; i < RX_RING_SIZE; i++ )
+ printk( "rx #%d: base=%04x blen=%04x mlen=%04x\n",
+ i, MEM->rx_head[i].base,
+ -MEM->rx_head[i].buf_length,
+ MEM->rx_head[i].msg_length);
+ for( i = 0 ; i < TX_RING_SIZE; i++ )
+ printk("tx #%d: base=%04x len=%04x misc=%04x\n",
+ i, MEM->tx_head[i].base,
+ -MEM->tx_head[i].length,
+ MEM->tx_head[i].misc );
+ }
+
+ lance_init_ring(dev);
+ REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
+
+ netif_start_queue(dev);
+
+ return NETDEV_TX_OK;
+ }
+
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+
+ /* Block a timer-based transmit from overlapping with us by
+ stopping the queue for a bit... */
+
+ netif_stop_queue(dev);
+
+ if (test_and_set_bit( 0, (void*)&lp->lock ) != 0) {
+ printk( "%s: tx queue lock!.\n", dev->name);
+ /* don't clear dev->tbusy flag. */
+ return NETDEV_TX_BUSY;
+ }
+
+ AREG = CSR0;
+ DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n",
+ dev->name, DREG ));
+
+#ifdef CONFIG_SUN3X
+ /* this weirdness doesn't appear on sun3... */
+ if(!(DREG & CSR0_INIT)) {
+ DPRINTK( 1, ("INIT not set, reinitializing...\n"));
+ REGA( CSR0 ) = CSR0_STOP;
+ lance_init_ring(dev);
+ REGA( CSR0 ) = CSR0_INIT | CSR0_STRT;
+ }
+#endif
+
+ /* Fill in a Tx ring entry */
+#if 0
+ if (lance_debug >= 2) {
+ printk( "%s: TX pkt %d type 0x%04x"
+ " from %s to %s"
+ " data at 0x%08x len %d\n",
+ dev->name, lp->new_tx, ((u_short *)skb->data)[6],
+ DEV_ADDR(&skb->data[6]), DEV_ADDR(skb->data),
+ (int)skb->data, (int)skb->len );
+ }
+#endif
+ /* We're not prepared for the int until the last flags are set/reset.
+ * And the int may happen already after setting the OWN_CHIP... */
+ local_irq_save(flags);
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->new_tx;
+ head = &(MEM->tx_head[entry]);
+
+ /* Caution: the write order is important here, set the "ownership" bits
+ * last.
+ */
+
+ /* the sun3's lance needs it's buffer padded to the minimum
+ size */
+ len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
+
+// head->length = -len;
+ head->length = (-len) | 0xf000;
+ head->misc = 0;
+
+ skb_copy_from_linear_data(skb, PKTBUF_ADDR(head), skb->len);
+ if (len != skb->len)
+ memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len);
+
+ head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP;
+ lp->new_tx = (lp->new_tx + 1) & TX_RING_MOD_MASK;
+ dev->stats.tx_bytes += skb->len;
+
+ /* Trigger an immediate send poll. */
+ REGA(CSR0) = CSR0_INEA | CSR0_TDMD | CSR0_STRT;
+ AREG = CSR0;
+ DPRINTK( 2, ( "%s: lance_start_xmit() exiting, csr0 %4.4x.\n",
+ dev->name, DREG ));
+ dev_kfree_skb(skb);
+
+ lp->lock = 0;
+ if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
+ TMD1_OWN_HOST)
+ netif_start_queue(dev);
+
+ local_irq_restore(flags);
+
+ return NETDEV_TX_OK;
+}
+
+/* The LANCE interrupt handler. */
+
+static irqreturn_t lance_interrupt( int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ int csr0;
+ static int in_interrupt;
+
+ if (dev == NULL) {
+ DPRINTK( 1, ( "lance_interrupt(): invalid dev_id\n" ));
+ return IRQ_NONE;
+ }
+
+ if (in_interrupt)
+ DPRINTK( 2, ( "%s: Re-entering the interrupt handler.\n", dev->name ));
+ in_interrupt = 1;
+
+ still_more:
+ flush_cache_all();
+
+ AREG = CSR0;
+ csr0 = DREG;
+
+ /* ack interrupts */
+ DREG = csr0 & (CSR0_TINT | CSR0_RINT | CSR0_IDON);
+
+ /* clear errors */
+ if(csr0 & CSR0_ERR)
+ DREG = CSR0_BABL | CSR0_MERR | CSR0_CERR | CSR0_MISS;
+
+
+ DPRINTK( 2, ( "%s: interrupt csr0=%04x new csr=%04x.\n",
+ dev->name, csr0, DREG ));
+
+ if (csr0 & CSR0_TINT) { /* Tx-done interrupt */
+ int old_tx = lp->old_tx;
+
+// if(lance_debug >= 3) {
+// int i;
+//
+// printk("%s: tx int\n", dev->name);
+//
+// for(i = 0; i < TX_RING_SIZE; i++)
+// printk("ring %d flag=%04x\n", i,
+// MEM->tx_head[i].flag);
+// }
+
+ while( old_tx != lp->new_tx) {
+ struct lance_tx_head *head = &(MEM->tx_head[old_tx]);
+
+ DPRINTK(3, ("on tx_ring %d\n", old_tx));
+
+ if (head->flag & TMD1_OWN_CHIP)
+ break; /* It still hasn't been Txed */
+
+ if (head->flag & TMD1_ERR) {
+ int status = head->misc;
+ dev->stats.tx_errors++;
+ if (status & TMD3_RTRY) dev->stats.tx_aborted_errors++;
+ if (status & TMD3_LCAR) dev->stats.tx_carrier_errors++;
+ if (status & TMD3_LCOL) dev->stats.tx_window_errors++;
+ if (status & (TMD3_UFLO | TMD3_BUFF)) {
+ dev->stats.tx_fifo_errors++;
+ printk("%s: Tx FIFO error\n",
+ dev->name);
+ REGA(CSR0) = CSR0_STOP;
+ REGA(CSR3) = CSR3_BSWP;
+ lance_init_ring(dev);
+ REGA(CSR0) = CSR0_STRT | CSR0_INEA;
+ return IRQ_HANDLED;
+ }
+ } else if(head->flag & (TMD1_ENP | TMD1_STP)) {
+
+ head->flag &= ~(TMD1_ENP | TMD1_STP);
+ if(head->flag & (TMD1_ONE | TMD1_MORE))
+ dev->stats.collisions++;
+
+ dev->stats.tx_packets++;
+ DPRINTK(3, ("cleared tx ring %d\n", old_tx));
+ }
+ old_tx = (old_tx +1) & TX_RING_MOD_MASK;
+ }
+
+ lp->old_tx = old_tx;
+ }
+
+
+ if (netif_queue_stopped(dev)) {
+ /* The ring is no longer full, clear tbusy. */
+ netif_start_queue(dev);
+ netif_wake_queue(dev);
+ }
+
+ if (csr0 & CSR0_RINT) /* Rx interrupt */
+ lance_rx( dev );
+
+ /* Log misc errors. */
+ if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & CSR0_MERR) {
+ DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), "
+ "status %04x.\n", dev->name, csr0 ));
+ /* Restart the chip. */
+ REGA(CSR0) = CSR0_STOP;
+ REGA(CSR3) = CSR3_BSWP;
+ lance_init_ring(dev);
+ REGA(CSR0) = CSR0_STRT | CSR0_INEA;
+ }
+
+
+ /* Clear any other interrupt, and set interrupt enable. */
+// DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR |
+// CSR0_IDON | CSR0_INEA;
+
+ REGA(CSR0) = CSR0_INEA;
+
+ if(DREG & (CSR0_RINT | CSR0_TINT)) {
+ DPRINTK(2, ("restarting interrupt, csr0=%#04x\n", DREG));
+ goto still_more;
+ }
+
+ DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n",
+ dev->name, DREG ));
+ in_interrupt = 0;
+ return IRQ_HANDLED;
+}
+
+/* get packet, toss into skbuff */
+static int lance_rx( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int entry = lp->new_rx;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) {
+ struct lance_rx_head *head = &(MEM->rx_head[entry]);
+ int status = head->flag;
+
+ if (status != (RMD1_ENP|RMD1_STP)) { /* There was an error. */
+ /* There is a tricky error noted by John Murphy,
+ <murf@perftech.com> to Russ Nelson: Even with
+ full-sized buffers it's possible for a jabber packet to use two
+ buffers, with only the last correctly noting the error. */
+ if (status & RMD1_ENP) /* Only count a general error at the */
+ dev->stats.rx_errors++; /* end of a packet.*/
+ if (status & RMD1_FRAM) dev->stats.rx_frame_errors++;
+ if (status & RMD1_OFLO) dev->stats.rx_over_errors++;
+ if (status & RMD1_CRC) dev->stats.rx_crc_errors++;
+ if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++;
+ head->flag &= (RMD1_ENP|RMD1_STP);
+ } else {
+ /* Malloc up new buffer, compatible with net-3. */
+// short pkt_len = head->msg_length;// & 0xfff;
+ short pkt_len = (head->msg_length & 0xfff) - 4;
+ struct sk_buff *skb;
+
+ if (pkt_len < 60) {
+ printk( "%s: Runt packet!\n", dev->name );
+ dev->stats.rx_errors++;
+ }
+ else {
+ skb = dev_alloc_skb( pkt_len+2 );
+ if (skb == NULL) {
+ DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
+ dev->name ));
+
+ dev->stats.rx_dropped++;
+ head->msg_length = 0;
+ head->flag |= RMD1_OWN_CHIP;
+ lp->new_rx = (lp->new_rx+1) &
+ RX_RING_MOD_MASK;
+ }
+
+#if 0
+ if (lance_debug >= 3) {
+ u_char *data = PKTBUF_ADDR(head);
+ printk("%s: RX pkt %d type 0x%04x"
+ " from %pM to %pM",
+ dev->name, lp->new_tx, ((u_short *)data)[6],
+ &data[6], data);
+
+ printk(" data %02x %02x %02x %02x %02x %02x %02x %02x "
+ "len %d at %08x\n",
+ data[15], data[16], data[17], data[18],
+ data[19], data[20], data[21], data[22],
+ pkt_len, data);
+ }
+#endif
+ if (lance_debug >= 3) {
+ u_char *data = PKTBUF_ADDR(head);
+ printk( "%s: RX pkt %d type 0x%04x len %d\n ", dev->name, entry, ((u_short *)data)[6], pkt_len);
+ }
+
+
+ skb_reserve( skb, 2 ); /* 16 byte align */
+ skb_put( skb, pkt_len ); /* Make room */
+ skb_copy_to_linear_data(skb,
+ PKTBUF_ADDR(head),
+ pkt_len);
+
+ skb->protocol = eth_type_trans( skb, dev );
+ netif_rx( skb );
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+ }
+
+// head->buf_length = -PKT_BUF_SZ | 0xf000;
+ head->msg_length = 0;
+ head->flag = RMD1_OWN_CHIP;
+
+ entry = lp->new_rx = (lp->new_rx +1) & RX_RING_MOD_MASK;
+ }
+
+ /* From lance.c (Donald Becker): */
+ /* We should check that at least two ring entries are free.
+ If not, we should free one and mark stats->rx_dropped++. */
+
+ return 0;
+}
+
+
+static int lance_close( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ AREG = CSR0;
+
+ DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, DREG ));
+
+ /* We stop the LANCE here -- it occasionally polls
+ memory if we don't. */
+ DREG = CSR0_STOP;
+ return 0;
+}
+
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+
+/* completely untested on a sun3 */
+static void set_multicast_list( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ if(netif_queue_stopped(dev))
+ /* Only possible if board is already started */
+ return;
+
+ /* We take the simple way out and always enable promiscuous mode. */
+ DREG = CSR0_STOP; /* Temporarily stop the lance. */
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Log any net taps. */
+ DPRINTK( 3, ( "%s: Promiscuous mode enabled.\n", dev->name ));
+ REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
+ } else {
+ short multicast_table[4];
+ int num_addrs = netdev_mc_count(dev);
+ int i;
+ /* We don't use the multicast table, but rely on upper-layer
+ * filtering. */
+ memset( multicast_table, (num_addrs == 0) ? 0 : -1,
+ sizeof(multicast_table) );
+ for( i = 0; i < 4; i++ )
+ REGA( CSR8+i ) = multicast_table[i];
+ REGA( CSR15 ) = 0; /* Unset promiscuous mode */
+ }
+
+ /*
+ * Always set BSWP after a STOP as STOP puts it back into
+ * little endian mode.
+ */
+ REGA( CSR3 ) = CSR3_BSWP;
+
+ /* Resume normal operation and reset AREG to CSR0 */
+ REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT;
+}
+
+
+#ifdef MODULE
+
+static struct net_device *sun3lance_dev;
+
+int __init init_module(void)
+{
+ sun3lance_dev = sun3lance_probe(-1);
+ if (IS_ERR(sun3lance_dev))
+ return PTR_ERR(sun3lance_dev);
+ return 0;
+}
+
+void __exit cleanup_module(void)
+{
+ unregister_netdev(sun3lance_dev);
+#ifdef CONFIG_SUN3
+ iounmap((void __iomem *)sun3lance_dev->base_addr);
+#endif
+ free_netdev(sun3lance_dev);
+}
+
+#endif /* MODULE */
+
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
new file mode 100644
index 000000000000..8fda457f94cf
--- /dev/null
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -0,0 +1,1556 @@
+/* $Id: sunlance.c,v 1.112 2002/01/15 06:48:55 davem Exp $
+ * lance.c: Linux/Sparc/Lance driver
+ *
+ * Written 1995, 1996 by Miguel de Icaza
+ * Sources:
+ * The Linux depca driver
+ * The Linux lance driver.
+ * The Linux skeleton driver.
+ * The NetBSD Sparc/Lance driver.
+ * Theo de Raadt (deraadt@openbsd.org)
+ * NCR92C990 Lan Controller manual
+ *
+ * 1.4:
+ * Added support to run with a ledma on the Sun4m
+ *
+ * 1.5:
+ * Added multiple card detection.
+ *
+ * 4/17/96: Burst sizes and tpe selection on sun4m by Eddie C. Dost
+ * (ecd@skynet.be)
+ *
+ * 5/15/96: auto carrier detection on sun4m by Eddie C. Dost
+ * (ecd@skynet.be)
+ *
+ * 5/17/96: lebuffer on scsi/ether cards now work David S. Miller
+ * (davem@caip.rutgers.edu)
+ *
+ * 5/29/96: override option 'tpe-link-test?', if it is 'false', as
+ * this disables auto carrier detection on sun4m. Eddie C. Dost
+ * (ecd@skynet.be)
+ *
+ * 1.7:
+ * 6/26/96: Bug fix for multiple ledmas, miguel.
+ *
+ * 1.8:
+ * Stole multicast code from depca.c, fixed lance_tx.
+ *
+ * 1.9:
+ * 8/21/96: Fixed the multicast code (Pedro Roque)
+ *
+ * 8/28/96: Send fake packet in lance_open() if auto_select is true,
+ * so we can detect the carrier loss condition in time.
+ * Eddie C. Dost (ecd@skynet.be)
+ *
+ * 9/15/96: Align rx_buf so that eth_copy_and_sum() won't cause an
+ * MNA trap during chksum_partial_copy(). (ecd@skynet.be)
+ *
+ * 11/17/96: Handle LE_C0_MERR in lance_interrupt(). (ecd@skynet.be)
+ *
+ * 12/22/96: Don't loop forever in lance_rx() on incomplete packets.
+ * This was the sun4c killer. Shit, stupid bug.
+ * (ecd@skynet.be)
+ *
+ * 1.10:
+ * 1/26/97: Modularize driver. (ecd@skynet.be)
+ *
+ * 1.11:
+ * 12/27/97: Added sun4d support. (jj@sunsite.mff.cuni.cz)
+ *
+ * 1.12:
+ * 11/3/99: Fixed SMP race in lance_start_xmit found by davem.
+ * Anton Blanchard (anton@progsoc.uts.edu.au)
+ * 2.00: 11/9/99: Massive overhaul and port to new SBUS driver interfaces.
+ * David S. Miller (davem@redhat.com)
+ * 2.01:
+ * 11/08/01: Use library crc32 functions (Matt_Domsch@dell.com)
+ *
+ */
+
+#undef DEBUG_DRIVER
+
+static char lancestr[] = "LANCE";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/errno.h>
+#include <linux/socket.h> /* Used for the temporal inet entries and routing */
+#include <linux/route.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/gfp.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/pgtable.h>
+#include <asm/byteorder.h> /* Used by the checksum routines */
+#include <asm/idprom.h>
+#include <asm/prom.h>
+#include <asm/auxio.h> /* For tpe-link-test? setting */
+#include <asm/irq.h>
+
+#define DRV_NAME "sunlance"
+#define DRV_VERSION "2.02"
+#define DRV_RELDATE "8/24/03"
+#define DRV_AUTHOR "Miguel de Icaza (miguel@nuclecu.unam.mx)"
+
+static char version[] =
+ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
+
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION("Sun Lance ethernet driver");
+MODULE_LICENSE("GPL");
+
+/* Define: 2^4 Tx buffers and 2^4 Rx buffers */
+#ifndef LANCE_LOG_TX_BUFFERS
+#define LANCE_LOG_TX_BUFFERS 4
+#define LANCE_LOG_RX_BUFFERS 4
+#endif
+
+#define LE_CSR0 0
+#define LE_CSR1 1
+#define LE_CSR2 2
+#define LE_CSR3 3
+
+#define LE_MO_PROM 0x8000 /* Enable promiscuous mode */
+
+#define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */
+#define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */
+#define LE_C0_CERR 0x2000 /* SQE: Signal quality error */
+#define LE_C0_MISS 0x1000 /* MISS: Missed a packet */
+#define LE_C0_MERR 0x0800 /* ME: Memory error */
+#define LE_C0_RINT 0x0400 /* Received interrupt */
+#define LE_C0_TINT 0x0200 /* Transmitter Interrupt */
+#define LE_C0_IDON 0x0100 /* IFIN: Init finished. */
+#define LE_C0_INTR 0x0080 /* Interrupt or error */
+#define LE_C0_INEA 0x0040 /* Interrupt enable */
+#define LE_C0_RXON 0x0020 /* Receiver on */
+#define LE_C0_TXON 0x0010 /* Transmitter on */
+#define LE_C0_TDMD 0x0008 /* Transmitter demand */
+#define LE_C0_STOP 0x0004 /* Stop the card */
+#define LE_C0_STRT 0x0002 /* Start the card */
+#define LE_C0_INIT 0x0001 /* Init the card */
+
+#define LE_C3_BSWP 0x4 /* SWAP */
+#define LE_C3_ACON 0x2 /* ALE Control */
+#define LE_C3_BCON 0x1 /* Byte control */
+
+/* Receive message descriptor 1 */
+#define LE_R1_OWN 0x80 /* Who owns the entry */
+#define LE_R1_ERR 0x40 /* Error: if FRA, OFL, CRC or BUF is set */
+#define LE_R1_FRA 0x20 /* FRA: Frame error */
+#define LE_R1_OFL 0x10 /* OFL: Frame overflow */
+#define LE_R1_CRC 0x08 /* CRC error */
+#define LE_R1_BUF 0x04 /* BUF: Buffer error */
+#define LE_R1_SOP 0x02 /* Start of packet */
+#define LE_R1_EOP 0x01 /* End of packet */
+#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+#define LE_T1_OWN 0x80 /* Lance owns the packet */
+#define LE_T1_ERR 0x40 /* Error summary */
+#define LE_T1_EMORE 0x10 /* Error: more than one retry needed */
+#define LE_T1_EONE 0x08 /* Error: one retry needed */
+#define LE_T1_EDEF 0x04 /* Error: deferred */
+#define LE_T1_SOP 0x02 /* Start of packet */
+#define LE_T1_EOP 0x01 /* End of packet */
+#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+#define LE_T3_BUF 0x8000 /* Buffer error */
+#define LE_T3_UFL 0x4000 /* Error underflow */
+#define LE_T3_LCOL 0x1000 /* Error late collision */
+#define LE_T3_CLOS 0x0800 /* Error carrier loss */
+#define LE_T3_RTY 0x0400 /* Error retry */
+#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */
+
+#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
+#define TX_NEXT(__x) (((__x)+1) & TX_RING_MOD_MASK)
+
+#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
+#define RX_NEXT(__x) (((__x)+1) & RX_RING_MOD_MASK)
+
+#define PKT_BUF_SZ 1544
+#define RX_BUFF_SIZE PKT_BUF_SZ
+#define TX_BUFF_SIZE PKT_BUF_SZ
+
+struct lance_rx_desc {
+ u16 rmd0; /* low address of packet */
+ u8 rmd1_bits; /* descriptor bits */
+ u8 rmd1_hadr; /* high address of packet */
+ s16 length; /* This length is 2s complement (negative)!
+ * Buffer length
+ */
+ u16 mblength; /* This is the actual number of bytes received */
+};
+
+struct lance_tx_desc {
+ u16 tmd0; /* low address of packet */
+ u8 tmd1_bits; /* descriptor bits */
+ u8 tmd1_hadr; /* high address of packet */
+ s16 length; /* Length is 2s complement (negative)! */
+ u16 misc;
+};
+
+/* The LANCE initialization block, described in databook. */
+/* On the Sparc, this block should be on a DMA region */
+struct lance_init_block {
+ u16 mode; /* Pre-set mode (reg. 15) */
+ u8 phys_addr[6]; /* Physical ethernet address */
+ u32 filter[2]; /* Multicast filter. */
+
+ /* Receive and transmit ring base, along with extra bits. */
+ u16 rx_ptr; /* receive descriptor addr */
+ u16 rx_len; /* receive len and high addr */
+ u16 tx_ptr; /* transmit descriptor addr */
+ u16 tx_len; /* transmit len and high addr */
+
+ /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
+ struct lance_rx_desc brx_ring[RX_RING_SIZE];
+ struct lance_tx_desc btx_ring[TX_RING_SIZE];
+
+ u8 tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
+ u8 pad[2]; /* align rx_buf for copy_and_sum(). */
+ u8 rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
+};
+
+#define libdesc_offset(rt, elem) \
+((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem])))))
+
+#define libbuff_offset(rt, elem) \
+((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem][0])))))
+
+struct lance_private {
+ void __iomem *lregs; /* Lance RAP/RDP regs. */
+ void __iomem *dregs; /* DMA controller regs. */
+ struct lance_init_block __iomem *init_block_iomem;
+ struct lance_init_block *init_block_mem;
+
+ spinlock_t lock;
+
+ int rx_new, tx_new;
+ int rx_old, tx_old;
+
+ struct platform_device *ledma; /* If set this points to ledma */
+ char tpe; /* cable-selection is TPE */
+ char auto_select; /* cable-selection by carrier */
+ char burst_sizes; /* ledma SBus burst sizes */
+ char pio_buffer; /* init block in PIO space? */
+
+ unsigned short busmaster_regval;
+
+ void (*init_ring)(struct net_device *);
+ void (*rx)(struct net_device *);
+ void (*tx)(struct net_device *);
+
+ char *name;
+ dma_addr_t init_block_dvma;
+ struct net_device *dev; /* Backpointer */
+ struct platform_device *op;
+ struct platform_device *lebuffer;
+ struct timer_list multicast_timer;
+};
+
+#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
+ lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\
+ lp->tx_old - lp->tx_new-1)
+
+/* Lance registers. */
+#define RDP 0x00UL /* register data port */
+#define RAP 0x02UL /* register address port */
+#define LANCE_REG_SIZE 0x04UL
+
+#define STOP_LANCE(__lp) \
+do { void __iomem *__base = (__lp)->lregs; \
+ sbus_writew(LE_CSR0, __base + RAP); \
+ sbus_writew(LE_C0_STOP, __base + RDP); \
+} while (0)
+
+int sparc_lance_debug = 2;
+
+/* The Lance uses 24 bit addresses */
+/* On the Sun4c the DVMA will provide the remaining bytes for us */
+/* On the Sun4m we have to instruct the ledma to provide them */
+/* Even worse, on scsi/ether SBUS cards, the init block and the
+ * transmit/receive buffers are addresses as offsets from absolute
+ * zero on the lebuffer PIO area. -DaveM
+ */
+
+#define LANCE_ADDR(x) ((long)(x) & ~0xff000000)
+
+/* Load the CSR registers */
+static void load_csrs(struct lance_private *lp)
+{
+ u32 leptr;
+
+ if (lp->pio_buffer)
+ leptr = 0;
+ else
+ leptr = LANCE_ADDR(lp->init_block_dvma);
+
+ sbus_writew(LE_CSR1, lp->lregs + RAP);
+ sbus_writew(leptr & 0xffff, lp->lregs + RDP);
+ sbus_writew(LE_CSR2, lp->lregs + RAP);
+ sbus_writew(leptr >> 16, lp->lregs + RDP);
+ sbus_writew(LE_CSR3, lp->lregs + RAP);
+ sbus_writew(lp->busmaster_regval, lp->lregs + RDP);
+
+ /* Point back to csr0 */
+ sbus_writew(LE_CSR0, lp->lregs + RAP);
+}
+
+/* Setup the Lance Rx and Tx rings */
+static void lance_init_ring_dvma(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block *ib = lp->init_block_mem;
+ dma_addr_t aib = lp->init_block_dvma;
+ __u32 leptr;
+ int i;
+
+ /* Lock out other processes while setting up hardware */
+ netif_stop_queue(dev);
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ /* Copy the ethernet address to the lance init block
+ * Note that on the sparc you need to swap the ethernet address.
+ */
+ ib->phys_addr [0] = dev->dev_addr [1];
+ ib->phys_addr [1] = dev->dev_addr [0];
+ ib->phys_addr [2] = dev->dev_addr [3];
+ ib->phys_addr [3] = dev->dev_addr [2];
+ ib->phys_addr [4] = dev->dev_addr [5];
+ ib->phys_addr [5] = dev->dev_addr [4];
+
+ /* Setup the Tx ring entries */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ leptr = LANCE_ADDR(aib + libbuff_offset(tx_buf, i));
+ ib->btx_ring [i].tmd0 = leptr;
+ ib->btx_ring [i].tmd1_hadr = leptr >> 16;
+ ib->btx_ring [i].tmd1_bits = 0;
+ ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
+ ib->btx_ring [i].misc = 0;
+ }
+
+ /* Setup the Rx ring entries */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ leptr = LANCE_ADDR(aib + libbuff_offset(rx_buf, i));
+
+ ib->brx_ring [i].rmd0 = leptr;
+ ib->brx_ring [i].rmd1_hadr = leptr >> 16;
+ ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
+ ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
+ ib->brx_ring [i].mblength = 0;
+ }
+
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = LANCE_ADDR(aib + libdesc_offset(brx_ring, 0));
+ ib->rx_len = (LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16);
+ ib->rx_ptr = leptr;
+
+ /* Setup tx descriptor pointer */
+ leptr = LANCE_ADDR(aib + libdesc_offset(btx_ring, 0));
+ ib->tx_len = (LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16);
+ ib->tx_ptr = leptr;
+}
+
+static void lance_init_ring_pio(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ u32 leptr;
+ int i;
+
+ /* Lock out other processes while setting up hardware */
+ netif_stop_queue(dev);
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ /* Copy the ethernet address to the lance init block
+ * Note that on the sparc you need to swap the ethernet address.
+ */
+ sbus_writeb(dev->dev_addr[1], &ib->phys_addr[0]);
+ sbus_writeb(dev->dev_addr[0], &ib->phys_addr[1]);
+ sbus_writeb(dev->dev_addr[3], &ib->phys_addr[2]);
+ sbus_writeb(dev->dev_addr[2], &ib->phys_addr[3]);
+ sbus_writeb(dev->dev_addr[5], &ib->phys_addr[4]);
+ sbus_writeb(dev->dev_addr[4], &ib->phys_addr[5]);
+
+ /* Setup the Tx ring entries */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ leptr = libbuff_offset(tx_buf, i);
+ sbus_writew(leptr, &ib->btx_ring [i].tmd0);
+ sbus_writeb(leptr >> 16,&ib->btx_ring [i].tmd1_hadr);
+ sbus_writeb(0, &ib->btx_ring [i].tmd1_bits);
+
+ /* The ones required by tmd2 */
+ sbus_writew(0xf000, &ib->btx_ring [i].length);
+ sbus_writew(0, &ib->btx_ring [i].misc);
+ }
+
+ /* Setup the Rx ring entries */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ leptr = libbuff_offset(rx_buf, i);
+
+ sbus_writew(leptr, &ib->brx_ring [i].rmd0);
+ sbus_writeb(leptr >> 16,&ib->brx_ring [i].rmd1_hadr);
+ sbus_writeb(LE_R1_OWN, &ib->brx_ring [i].rmd1_bits);
+ sbus_writew(-RX_BUFF_SIZE|0xf000,
+ &ib->brx_ring [i].length);
+ sbus_writew(0, &ib->brx_ring [i].mblength);
+ }
+
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = libdesc_offset(brx_ring, 0);
+ sbus_writew((LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16),
+ &ib->rx_len);
+ sbus_writew(leptr, &ib->rx_ptr);
+
+ /* Setup tx descriptor pointer */
+ leptr = libdesc_offset(btx_ring, 0);
+ sbus_writew((LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16),
+ &ib->tx_len);
+ sbus_writew(leptr, &ib->tx_ptr);
+}
+
+static void init_restart_ledma(struct lance_private *lp)
+{
+ u32 csr = sbus_readl(lp->dregs + DMA_CSR);
+
+ if (!(csr & DMA_HNDL_ERROR)) {
+ /* E-Cache draining */
+ while (sbus_readl(lp->dregs + DMA_CSR) & DMA_FIFO_ISDRAIN)
+ barrier();
+ }
+
+ csr = sbus_readl(lp->dregs + DMA_CSR);
+ csr &= ~DMA_E_BURSTS;
+ if (lp->burst_sizes & DMA_BURST32)
+ csr |= DMA_E_BURST32;
+ else
+ csr |= DMA_E_BURST16;
+
+ csr |= (DMA_DSBL_RD_DRN | DMA_DSBL_WR_INV | DMA_FIFO_INV);
+
+ if (lp->tpe)
+ csr |= DMA_EN_ENETAUI;
+ else
+ csr &= ~DMA_EN_ENETAUI;
+ udelay(20);
+ sbus_writel(csr, lp->dregs + DMA_CSR);
+ udelay(200);
+}
+
+static int init_restart_lance(struct lance_private *lp)
+{
+ u16 regval = 0;
+ int i;
+
+ if (lp->dregs)
+ init_restart_ledma(lp);
+
+ sbus_writew(LE_CSR0, lp->lregs + RAP);
+ sbus_writew(LE_C0_INIT, lp->lregs + RDP);
+
+ /* Wait for the lance to complete initialization */
+ for (i = 0; i < 100; i++) {
+ regval = sbus_readw(lp->lregs + RDP);
+
+ if (regval & (LE_C0_ERR | LE_C0_IDON))
+ break;
+ barrier();
+ }
+ if (i == 100 || (regval & LE_C0_ERR)) {
+ printk(KERN_ERR "LANCE unopened after %d ticks, csr0=%4.4x.\n",
+ i, regval);
+ if (lp->dregs)
+ printk("dcsr=%8.8x\n", sbus_readl(lp->dregs + DMA_CSR));
+ return -1;
+ }
+
+ /* Clear IDON by writing a "1", enable interrupts and start lance */
+ sbus_writew(LE_C0_IDON, lp->lregs + RDP);
+ sbus_writew(LE_C0_INEA | LE_C0_STRT, lp->lregs + RDP);
+
+ if (lp->dregs) {
+ u32 csr = sbus_readl(lp->dregs + DMA_CSR);
+
+ csr |= DMA_INT_ENAB;
+ sbus_writel(csr, lp->dregs + DMA_CSR);
+ }
+
+ return 0;
+}
+
+static void lance_rx_dvma(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block *ib = lp->init_block_mem;
+ struct lance_rx_desc *rd;
+ u8 bits;
+ int len, entry = lp->rx_new;
+ struct sk_buff *skb;
+
+ for (rd = &ib->brx_ring [entry];
+ !((bits = rd->rmd1_bits) & LE_R1_OWN);
+ rd = &ib->brx_ring [entry]) {
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP) dev->stats.rx_errors++;
+ } else {
+ len = (rd->mblength & 0xfff) - 4;
+ skb = dev_alloc_skb(len + 2);
+
+ if (skb == NULL) {
+ printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+ dev->stats.rx_dropped++;
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = RX_NEXT(entry);
+ return;
+ }
+
+ dev->stats.rx_bytes += len;
+
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)&(ib->rx_buf [entry][0]),
+ len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ }
+
+ /* Return the packet to the pool */
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ entry = RX_NEXT(entry);
+ }
+
+ lp->rx_new = entry;
+}
+
+static void lance_tx_dvma(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block *ib = lp->init_block_mem;
+ int i, j;
+
+ spin_lock(&lp->lock);
+
+ j = lp->tx_old;
+ for (i = j; i != lp->tx_new; i = j) {
+ struct lance_tx_desc *td = &ib->btx_ring [i];
+ u8 bits = td->tmd1_bits;
+
+ /* If we hit a packet not owned by us, stop */
+ if (bits & LE_T1_OWN)
+ break;
+
+ if (bits & LE_T1_ERR) {
+ u16 status = td->misc;
+
+ dev->stats.tx_errors++;
+ if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ dev->stats.tx_carrier_errors++;
+ if (lp->auto_select) {
+ lp->tpe = 1 - lp->tpe;
+ printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n",
+ dev->name, lp->tpe?"TPE":"AUI");
+ STOP_LANCE(lp);
+ lp->init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ }
+
+ /* Buffer errors and underflows turn off the
+ * transmitter, restart the adapter.
+ */
+ if (status & (LE_T3_BUF|LE_T3_UFL)) {
+ dev->stats.tx_fifo_errors++;
+
+ printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+ dev->name);
+ STOP_LANCE(lp);
+ lp->init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ } else if ((bits & LE_T1_POK) == LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ td->tmd1_bits = bits & ~(LE_T1_POK);
+
+ /* One collision before packet was sent. */
+ if (bits & LE_T1_EONE)
+ dev->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (bits & LE_T1_EMORE)
+ dev->stats.collisions += 2;
+
+ dev->stats.tx_packets++;
+ }
+
+ j = TX_NEXT(j);
+ }
+ lp->tx_old = j;
+out:
+ if (netif_queue_stopped(dev) &&
+ TX_BUFFS_AVAIL > 0)
+ netif_wake_queue(dev);
+
+ spin_unlock(&lp->lock);
+}
+
+static void lance_piocopy_to_skb(struct sk_buff *skb, void __iomem *piobuf, int len)
+{
+ u16 *p16 = (u16 *) skb->data;
+ u32 *p32;
+ u8 *p8;
+ void __iomem *pbuf = piobuf;
+
+ /* We know here that both src and dest are on a 16bit boundary. */
+ *p16++ = sbus_readw(pbuf);
+ p32 = (u32 *) p16;
+ pbuf += 2;
+ len -= 2;
+
+ while (len >= 4) {
+ *p32++ = sbus_readl(pbuf);
+ pbuf += 4;
+ len -= 4;
+ }
+ p8 = (u8 *) p32;
+ if (len >= 2) {
+ p16 = (u16 *) p32;
+ *p16++ = sbus_readw(pbuf);
+ pbuf += 2;
+ len -= 2;
+ p8 = (u8 *) p16;
+ }
+ if (len >= 1)
+ *p8 = sbus_readb(pbuf);
+}
+
+static void lance_rx_pio(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ struct lance_rx_desc __iomem *rd;
+ unsigned char bits;
+ int len, entry;
+ struct sk_buff *skb;
+
+ entry = lp->rx_new;
+ for (rd = &ib->brx_ring [entry];
+ !((bits = sbus_readb(&rd->rmd1_bits)) & LE_R1_OWN);
+ rd = &ib->brx_ring [entry]) {
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP) dev->stats.rx_errors++;
+ } else {
+ len = (sbus_readw(&rd->mblength) & 0xfff) - 4;
+ skb = dev_alloc_skb(len + 2);
+
+ if (skb == NULL) {
+ printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+ dev->stats.rx_dropped++;
+ sbus_writew(0, &rd->mblength);
+ sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
+ lp->rx_new = RX_NEXT(entry);
+ return;
+ }
+
+ dev->stats.rx_bytes += len;
+
+ skb_reserve (skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+ lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ }
+
+ /* Return the packet to the pool */
+ sbus_writew(0, &rd->mblength);
+ sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
+ entry = RX_NEXT(entry);
+ }
+
+ lp->rx_new = entry;
+}
+
+static void lance_tx_pio(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ int i, j;
+
+ spin_lock(&lp->lock);
+
+ j = lp->tx_old;
+ for (i = j; i != lp->tx_new; i = j) {
+ struct lance_tx_desc __iomem *td = &ib->btx_ring [i];
+ u8 bits = sbus_readb(&td->tmd1_bits);
+
+ /* If we hit a packet not owned by us, stop */
+ if (bits & LE_T1_OWN)
+ break;
+
+ if (bits & LE_T1_ERR) {
+ u16 status = sbus_readw(&td->misc);
+
+ dev->stats.tx_errors++;
+ if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ dev->stats.tx_carrier_errors++;
+ if (lp->auto_select) {
+ lp->tpe = 1 - lp->tpe;
+ printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n",
+ dev->name, lp->tpe?"TPE":"AUI");
+ STOP_LANCE(lp);
+ lp->init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ }
+
+ /* Buffer errors and underflows turn off the
+ * transmitter, restart the adapter.
+ */
+ if (status & (LE_T3_BUF|LE_T3_UFL)) {
+ dev->stats.tx_fifo_errors++;
+
+ printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+ dev->name);
+ STOP_LANCE(lp);
+ lp->init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ } else if ((bits & LE_T1_POK) == LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ sbus_writeb(bits & ~(LE_T1_POK), &td->tmd1_bits);
+
+ /* One collision before packet was sent. */
+ if (bits & LE_T1_EONE)
+ dev->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (bits & LE_T1_EMORE)
+ dev->stats.collisions += 2;
+
+ dev->stats.tx_packets++;
+ }
+
+ j = TX_NEXT(j);
+ }
+ lp->tx_old = j;
+
+ if (netif_queue_stopped(dev) &&
+ TX_BUFFS_AVAIL > 0)
+ netif_wake_queue(dev);
+out:
+ spin_unlock(&lp->lock);
+}
+
+static irqreturn_t lance_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ int csr0;
+
+ sbus_writew(LE_CSR0, lp->lregs + RAP);
+ csr0 = sbus_readw(lp->lregs + RDP);
+
+ /* Acknowledge all the interrupt sources ASAP */
+ sbus_writew(csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT),
+ lp->lregs + RDP);
+
+ if ((csr0 & LE_C0_ERR) != 0) {
+ /* Clear the error condition */
+ sbus_writew((LE_C0_BABL | LE_C0_ERR | LE_C0_MISS |
+ LE_C0_CERR | LE_C0_MERR),
+ lp->lregs + RDP);
+ }
+
+ if (csr0 & LE_C0_RINT)
+ lp->rx(dev);
+
+ if (csr0 & LE_C0_TINT)
+ lp->tx(dev);
+
+ if (csr0 & LE_C0_BABL)
+ dev->stats.tx_errors++;
+
+ if (csr0 & LE_C0_MISS)
+ dev->stats.rx_errors++;
+
+ if (csr0 & LE_C0_MERR) {
+ if (lp->dregs) {
+ u32 addr = sbus_readl(lp->dregs + DMA_ADDR);
+
+ printk(KERN_ERR "%s: Memory error, status %04x, addr %06x\n",
+ dev->name, csr0, addr & 0xffffff);
+ } else {
+ printk(KERN_ERR "%s: Memory error, status %04x\n",
+ dev->name, csr0);
+ }
+
+ sbus_writew(LE_C0_STOP, lp->lregs + RDP);
+
+ if (lp->dregs) {
+ u32 dma_csr = sbus_readl(lp->dregs + DMA_CSR);
+
+ dma_csr |= DMA_FIFO_INV;
+ sbus_writel(dma_csr, lp->dregs + DMA_CSR);
+ }
+
+ lp->init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ netif_wake_queue(dev);
+ }
+
+ sbus_writew(LE_C0_INEA, lp->lregs + RDP);
+
+ return IRQ_HANDLED;
+}
+
+/* Build a fake network packet and send it to ourselves. */
+static void build_fake_packet(struct lance_private *lp)
+{
+ struct net_device *dev = lp->dev;
+ int i, entry;
+
+ entry = lp->tx_new & TX_RING_MOD_MASK;
+ if (lp->pio_buffer) {
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ u16 __iomem *packet = (u16 __iomem *) &(ib->tx_buf[entry][0]);
+ struct ethhdr __iomem *eth = (struct ethhdr __iomem *) packet;
+ for (i = 0; i < (ETH_ZLEN / sizeof(u16)); i++)
+ sbus_writew(0, &packet[i]);
+ for (i = 0; i < 6; i++) {
+ sbus_writeb(dev->dev_addr[i], &eth->h_dest[i]);
+ sbus_writeb(dev->dev_addr[i], &eth->h_source[i]);
+ }
+ sbus_writew((-ETH_ZLEN) | 0xf000, &ib->btx_ring[entry].length);
+ sbus_writew(0, &ib->btx_ring[entry].misc);
+ sbus_writeb(LE_T1_POK|LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits);
+ } else {
+ struct lance_init_block *ib = lp->init_block_mem;
+ u16 *packet = (u16 *) &(ib->tx_buf[entry][0]);
+ struct ethhdr *eth = (struct ethhdr *) packet;
+ memset(packet, 0, ETH_ZLEN);
+ for (i = 0; i < 6; i++) {
+ eth->h_dest[i] = dev->dev_addr[i];
+ eth->h_source[i] = dev->dev_addr[i];
+ }
+ ib->btx_ring[entry].length = (-ETH_ZLEN) | 0xf000;
+ ib->btx_ring[entry].misc = 0;
+ ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
+ }
+ lp->tx_new = TX_NEXT(entry);
+}
+
+static int lance_open(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int status = 0;
+
+ STOP_LANCE(lp);
+
+ if (request_irq(dev->irq, lance_interrupt, IRQF_SHARED,
+ lancestr, (void *) dev)) {
+ printk(KERN_ERR "Lance: Can't get irq %d\n", dev->irq);
+ return -EAGAIN;
+ }
+
+ /* On the 4m, setup the ledma to provide the upper bits for buffers */
+ if (lp->dregs) {
+ u32 regval = lp->init_block_dvma & 0xff000000;
+
+ sbus_writel(regval, lp->dregs + DMA_TEST);
+ }
+
+ /* Set mode and clear multicast filter only at device open,
+ * so that lance_init_ring() called at any error will not
+ * forget multicast filters.
+ *
+ * BTW it is common bug in all lance drivers! --ANK
+ */
+ if (lp->pio_buffer) {
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ sbus_writew(0, &ib->mode);
+ sbus_writel(0, &ib->filter[0]);
+ sbus_writel(0, &ib->filter[1]);
+ } else {
+ struct lance_init_block *ib = lp->init_block_mem;
+ ib->mode = 0;
+ ib->filter [0] = 0;
+ ib->filter [1] = 0;
+ }
+
+ lp->init_ring(dev);
+ load_csrs(lp);
+
+ netif_start_queue(dev);
+
+ status = init_restart_lance(lp);
+ if (!status && lp->auto_select) {
+ build_fake_packet(lp);
+ sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP);
+ }
+
+ return status;
+}
+
+static int lance_close(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ del_timer_sync(&lp->multicast_timer);
+
+ STOP_LANCE(lp);
+
+ free_irq(dev->irq, (void *) dev);
+ return 0;
+}
+
+static int lance_reset(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int status;
+
+ STOP_LANCE(lp);
+
+ /* On the 4m, reset the dma too */
+ if (lp->dregs) {
+ u32 csr, addr;
+
+ printk(KERN_ERR "resetting ledma\n");
+ csr = sbus_readl(lp->dregs + DMA_CSR);
+ sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR);
+ udelay(200);
+ sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR);
+
+ addr = lp->init_block_dvma & 0xff000000;
+ sbus_writel(addr, lp->dregs + DMA_TEST);
+ }
+ lp->init_ring(dev);
+ load_csrs(lp);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ status = init_restart_lance(lp);
+ return status;
+}
+
+static void lance_piocopy_from_skb(void __iomem *dest, unsigned char *src, int len)
+{
+ void __iomem *piobuf = dest;
+ u32 *p32;
+ u16 *p16;
+ u8 *p8;
+
+ switch ((unsigned long)src & 0x3) {
+ case 0:
+ p32 = (u32 *) src;
+ while (len >= 4) {
+ sbus_writel(*p32, piobuf);
+ p32++;
+ piobuf += 4;
+ len -= 4;
+ }
+ src = (char *) p32;
+ break;
+ case 1:
+ case 3:
+ p8 = (u8 *) src;
+ while (len >= 4) {
+ u32 val;
+
+ val = p8[0] << 24;
+ val |= p8[1] << 16;
+ val |= p8[2] << 8;
+ val |= p8[3];
+ sbus_writel(val, piobuf);
+ p8 += 4;
+ piobuf += 4;
+ len -= 4;
+ }
+ src = (char *) p8;
+ break;
+ case 2:
+ p16 = (u16 *) src;
+ while (len >= 4) {
+ u32 val = p16[0]<<16 | p16[1];
+ sbus_writel(val, piobuf);
+ p16 += 2;
+ piobuf += 4;
+ len -= 4;
+ }
+ src = (char *) p16;
+ break;
+ }
+ if (len >= 2) {
+ u16 val = src[0] << 8 | src[1];
+ sbus_writew(val, piobuf);
+ src += 2;
+ piobuf += 2;
+ len -= 2;
+ }
+ if (len >= 1)
+ sbus_writeb(src[0], piobuf);
+}
+
+static void lance_piozero(void __iomem *dest, int len)
+{
+ void __iomem *piobuf = dest;
+
+ if ((unsigned long)piobuf & 1) {
+ sbus_writeb(0, piobuf);
+ piobuf += 1;
+ len -= 1;
+ if (len == 0)
+ return;
+ }
+ if (len == 1) {
+ sbus_writeb(0, piobuf);
+ return;
+ }
+ if ((unsigned long)piobuf & 2) {
+ sbus_writew(0, piobuf);
+ piobuf += 2;
+ len -= 2;
+ if (len == 0)
+ return;
+ }
+ while (len >= 4) {
+ sbus_writel(0, piobuf);
+ piobuf += 4;
+ len -= 4;
+ }
+ if (len >= 2) {
+ sbus_writew(0, piobuf);
+ piobuf += 2;
+ len -= 2;
+ }
+ if (len >= 1)
+ sbus_writeb(0, piobuf);
+}
+
+static void lance_tx_timeout(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
+ dev->name, sbus_readw(lp->lregs + RDP));
+ lance_reset(dev);
+ netif_wake_queue(dev);
+}
+
+static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int entry, skblen, len;
+
+ skblen = skb->len;
+
+ len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
+
+ spin_lock_irq(&lp->lock);
+
+ dev->stats.tx_bytes += len;
+
+ entry = lp->tx_new & TX_RING_MOD_MASK;
+ if (lp->pio_buffer) {
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ sbus_writew((-len) | 0xf000, &ib->btx_ring[entry].length);
+ sbus_writew(0, &ib->btx_ring[entry].misc);
+ lance_piocopy_from_skb(&ib->tx_buf[entry][0], skb->data, skblen);
+ if (len != skblen)
+ lance_piozero(&ib->tx_buf[entry][skblen], len - skblen);
+ sbus_writeb(LE_T1_POK | LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits);
+ } else {
+ struct lance_init_block *ib = lp->init_block_mem;
+ ib->btx_ring [entry].length = (-len) | 0xf000;
+ ib->btx_ring [entry].misc = 0;
+ skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen);
+ if (len != skblen)
+ memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen);
+ ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
+ }
+
+ lp->tx_new = TX_NEXT(entry);
+
+ if (TX_BUFFS_AVAIL <= 0)
+ netif_stop_queue(dev);
+
+ /* Kick the lance: transmit now */
+ sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP);
+
+ /* Read back CSR to invalidate the E-Cache.
+ * This is needed, because DMA_DSBL_WR_INV is set.
+ */
+ if (lp->dregs)
+ sbus_readw(lp->lregs + RDP);
+
+ spin_unlock_irq(&lp->lock);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/* taken from the depca driver */
+static void lance_load_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ u32 crc;
+ u32 val;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI)
+ val = ~0;
+ else
+ val = 0;
+
+ if (lp->pio_buffer) {
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ sbus_writel(val, &ib->filter[0]);
+ sbus_writel(val, &ib->filter[1]);
+ } else {
+ struct lance_init_block *ib = lp->init_block_mem;
+ ib->filter [0] = val;
+ ib->filter [1] = val;
+ }
+
+ if (dev->flags & IFF_ALLMULTI)
+ return;
+
+ /* Add addresses */
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
+ crc = crc >> 26;
+ if (lp->pio_buffer) {
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ u16 __iomem *mcast_table = (u16 __iomem *) &ib->filter;
+ u16 tmp = sbus_readw(&mcast_table[crc>>4]);
+ tmp |= 1 << (crc & 0xf);
+ sbus_writew(tmp, &mcast_table[crc>>4]);
+ } else {
+ struct lance_init_block *ib = lp->init_block_mem;
+ u16 *mcast_table = (u16 *) &ib->filter;
+ mcast_table [crc >> 4] |= 1 << (crc & 0xf);
+ }
+ }
+}
+
+static void lance_set_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block *ib_mem = lp->init_block_mem;
+ struct lance_init_block __iomem *ib_iomem = lp->init_block_iomem;
+ u16 mode;
+
+ if (!netif_running(dev))
+ return;
+
+ if (lp->tx_old != lp->tx_new) {
+ mod_timer(&lp->multicast_timer, jiffies + 4);
+ netif_wake_queue(dev);
+ return;
+ }
+
+ netif_stop_queue(dev);
+
+ STOP_LANCE(lp);
+ lp->init_ring(dev);
+
+ if (lp->pio_buffer)
+ mode = sbus_readw(&ib_iomem->mode);
+ else
+ mode = ib_mem->mode;
+ if (dev->flags & IFF_PROMISC) {
+ mode |= LE_MO_PROM;
+ if (lp->pio_buffer)
+ sbus_writew(mode, &ib_iomem->mode);
+ else
+ ib_mem->mode = mode;
+ } else {
+ mode &= ~LE_MO_PROM;
+ if (lp->pio_buffer)
+ sbus_writew(mode, &ib_iomem->mode);
+ else
+ ib_mem->mode = mode;
+ lance_load_multicast(dev);
+ }
+ load_csrs(lp);
+ init_restart_lance(lp);
+ netif_wake_queue(dev);
+}
+
+static void lance_set_multicast_retry(unsigned long _opaque)
+{
+ struct net_device *dev = (struct net_device *) _opaque;
+
+ lance_set_multicast(dev);
+}
+
+static void lance_free_hwresources(struct lance_private *lp)
+{
+ if (lp->lregs)
+ of_iounmap(&lp->op->resource[0], lp->lregs, LANCE_REG_SIZE);
+ if (lp->dregs) {
+ struct platform_device *ledma = lp->ledma;
+
+ of_iounmap(&ledma->resource[0], lp->dregs,
+ resource_size(&ledma->resource[0]));
+ }
+ if (lp->init_block_iomem) {
+ of_iounmap(&lp->lebuffer->resource[0], lp->init_block_iomem,
+ sizeof(struct lance_init_block));
+ } else if (lp->init_block_mem) {
+ dma_free_coherent(&lp->op->dev,
+ sizeof(struct lance_init_block),
+ lp->init_block_mem,
+ lp->init_block_dvma);
+ }
+}
+
+/* Ethtool support... */
+static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, "sunlance");
+ strcpy(info->version, "2.02");
+}
+
+static const struct ethtool_ops sparc_lance_ethtool_ops = {
+ .get_drvinfo = sparc_lance_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+static const struct net_device_ops sparc_lance_ops = {
+ .ndo_open = lance_open,
+ .ndo_stop = lance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_set_rx_mode = lance_set_multicast,
+ .ndo_tx_timeout = lance_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __devinit sparc_lance_probe_one(struct platform_device *op,
+ struct platform_device *ledma,
+ struct platform_device *lebuffer)
+{
+ struct device_node *dp = op->dev.of_node;
+ static unsigned version_printed;
+ struct lance_private *lp;
+ struct net_device *dev;
+ int i;
+
+ dev = alloc_etherdev(sizeof(struct lance_private) + 8);
+ if (!dev)
+ return -ENOMEM;
+
+ lp = netdev_priv(dev);
+
+ if (sparc_lance_debug && version_printed++ == 0)
+ printk (KERN_INFO "%s", version);
+
+ spin_lock_init(&lp->lock);
+
+ /* Copy the IDPROM ethernet address to the device structure, later we
+ * will copy the address in the device structure to the lance
+ * initialization block.
+ */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = idprom->id_ethaddr[i];
+
+ /* Get the IO region */
+ lp->lregs = of_ioremap(&op->resource[0], 0,
+ LANCE_REG_SIZE, lancestr);
+ if (!lp->lregs) {
+ printk(KERN_ERR "SunLance: Cannot map registers.\n");
+ goto fail;
+ }
+
+ lp->ledma = ledma;
+ if (lp->ledma) {
+ lp->dregs = of_ioremap(&ledma->resource[0], 0,
+ resource_size(&ledma->resource[0]),
+ "ledma");
+ if (!lp->dregs) {
+ printk(KERN_ERR "SunLance: Cannot map "
+ "ledma registers.\n");
+ goto fail;
+ }
+ }
+
+ lp->op = op;
+ lp->lebuffer = lebuffer;
+ if (lebuffer) {
+ /* sanity check */
+ if (lebuffer->resource[0].start & 7) {
+ printk(KERN_ERR "SunLance: ERROR: Rx and Tx rings not on even boundary.\n");
+ goto fail;
+ }
+ lp->init_block_iomem =
+ of_ioremap(&lebuffer->resource[0], 0,
+ sizeof(struct lance_init_block), "lebuffer");
+ if (!lp->init_block_iomem) {
+ printk(KERN_ERR "SunLance: Cannot map PIO buffer.\n");
+ goto fail;
+ }
+ lp->init_block_dvma = 0;
+ lp->pio_buffer = 1;
+ lp->init_ring = lance_init_ring_pio;
+ lp->rx = lance_rx_pio;
+ lp->tx = lance_tx_pio;
+ } else {
+ lp->init_block_mem =
+ dma_alloc_coherent(&op->dev,
+ sizeof(struct lance_init_block),
+ &lp->init_block_dvma, GFP_ATOMIC);
+ if (!lp->init_block_mem) {
+ printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n");
+ goto fail;
+ }
+ lp->pio_buffer = 0;
+ lp->init_ring = lance_init_ring_dvma;
+ lp->rx = lance_rx_dvma;
+ lp->tx = lance_tx_dvma;
+ }
+ lp->busmaster_regval = of_getintprop_default(dp, "busmaster-regval",
+ (LE_C3_BSWP |
+ LE_C3_ACON |
+ LE_C3_BCON));
+
+ lp->name = lancestr;
+
+ lp->burst_sizes = 0;
+ if (lp->ledma) {
+ struct device_node *ledma_dp = ledma->dev.of_node;
+ struct device_node *sbus_dp;
+ unsigned int sbmask;
+ const char *prop;
+ u32 csr;
+
+ /* Find burst-size property for ledma */
+ lp->burst_sizes = of_getintprop_default(ledma_dp,
+ "burst-sizes", 0);
+
+ /* ledma may be capable of fast bursts, but sbus may not. */
+ sbus_dp = ledma_dp->parent;
+ sbmask = of_getintprop_default(sbus_dp, "burst-sizes",
+ DMA_BURSTBITS);
+ lp->burst_sizes &= sbmask;
+
+ /* Get the cable-selection property */
+ prop = of_get_property(ledma_dp, "cable-selection", NULL);
+ if (!prop || prop[0] == '\0') {
+ struct device_node *nd;
+
+ printk(KERN_INFO "SunLance: using "
+ "auto-carrier-detection.\n");
+
+ nd = of_find_node_by_path("/options");
+ if (!nd)
+ goto no_link_test;
+
+ prop = of_get_property(nd, "tpe-link-test?", NULL);
+ if (!prop)
+ goto no_link_test;
+
+ if (strcmp(prop, "true")) {
+ printk(KERN_NOTICE "SunLance: warning: overriding option "
+ "'tpe-link-test?'\n");
+ printk(KERN_NOTICE "SunLance: warning: mail any problems "
+ "to ecd@skynet.be\n");
+ auxio_set_lte(AUXIO_LTE_ON);
+ }
+no_link_test:
+ lp->auto_select = 1;
+ lp->tpe = 0;
+ } else if (!strcmp(prop, "aui")) {
+ lp->auto_select = 0;
+ lp->tpe = 0;
+ } else {
+ lp->auto_select = 0;
+ lp->tpe = 1;
+ }
+
+ /* Reset ledma */
+ csr = sbus_readl(lp->dregs + DMA_CSR);
+ sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR);
+ udelay(200);
+ sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR);
+ } else
+ lp->dregs = NULL;
+
+ lp->dev = dev;
+ SET_NETDEV_DEV(dev, &op->dev);
+ dev->watchdog_timeo = 5*HZ;
+ dev->ethtool_ops = &sparc_lance_ethtool_ops;
+ dev->netdev_ops = &sparc_lance_ops;
+
+ dev->irq = op->archdata.irqs[0];
+
+ /* We cannot sleep if the chip is busy during a
+ * multicast list update event, because such events
+ * can occur from interrupts (ex. IPv6). So we
+ * use a timer to try again later when necessary. -DaveM
+ */
+ init_timer(&lp->multicast_timer);
+ lp->multicast_timer.data = (unsigned long) dev;
+ lp->multicast_timer.function = lance_set_multicast_retry;
+
+ if (register_netdev(dev)) {
+ printk(KERN_ERR "SunLance: Cannot register device.\n");
+ goto fail;
+ }
+
+ dev_set_drvdata(&op->dev, lp);
+
+ printk(KERN_INFO "%s: LANCE %pM\n",
+ dev->name, dev->dev_addr);
+
+ return 0;
+
+fail:
+ lance_free_hwresources(lp);
+ free_netdev(dev);
+ return -ENODEV;
+}
+
+static int __devinit sunlance_sbus_probe(struct platform_device *op)
+{
+ struct platform_device *parent = to_platform_device(op->dev.parent);
+ struct device_node *parent_dp = parent->dev.of_node;
+ int err;
+
+ if (!strcmp(parent_dp->name, "ledma")) {
+ err = sparc_lance_probe_one(op, parent, NULL);
+ } else if (!strcmp(parent_dp->name, "lebuffer")) {
+ err = sparc_lance_probe_one(op, NULL, parent);
+ } else
+ err = sparc_lance_probe_one(op, NULL, NULL);
+
+ return err;
+}
+
+static int __devexit sunlance_sbus_remove(struct platform_device *op)
+{
+ struct lance_private *lp = dev_get_drvdata(&op->dev);
+ struct net_device *net_dev = lp->dev;
+
+ unregister_netdev(net_dev);
+
+ lance_free_hwresources(lp);
+
+ free_netdev(net_dev);
+
+ dev_set_drvdata(&op->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id sunlance_sbus_match[] = {
+ {
+ .name = "le",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, sunlance_sbus_match);
+
+static struct platform_driver sunlance_sbus_driver = {
+ .driver = {
+ .name = "sunlance",
+ .owner = THIS_MODULE,
+ .of_match_table = sunlance_sbus_match,
+ },
+ .probe = sunlance_sbus_probe,
+ .remove = __devexit_p(sunlance_sbus_remove),
+};
+
+
+/* Find all the lance cards on the system and initialize them */
+static int __init sparc_lance_init(void)
+{
+ return platform_driver_register(&sunlance_sbus_driver);
+}
+
+static void __exit sparc_lance_exit(void)
+{
+ platform_driver_unregister(&sunlance_sbus_driver);
+}
+
+module_init(sparc_lance_init);
+module_exit(sparc_lance_exit);
diff --git a/drivers/net/ethernet/apple/Kconfig b/drivers/net/ethernet/apple/Kconfig
new file mode 100644
index 000000000000..59d5c2630acb
--- /dev/null
+++ b/drivers/net/ethernet/apple/Kconfig
@@ -0,0 +1,97 @@
+#
+# Apple device configuration
+#
+
+config NET_VENDOR_APPLE
+ bool "Apple devices"
+ default y
+ depends on (PPC_PMAC && PPC32) || MAC || ISA || EISA || MACH_IXDP2351 \
+ || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about IBM devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_APPLE
+
+config MACE
+ tristate "MACE (Power Mac ethernet) support"
+ depends on PPC_PMAC && PPC32
+ select CRC32
+ ---help---
+ Power Macintoshes and clones with Ethernet built-in on the
+ motherboard will usually use a MACE (Medium Access Control for
+ Ethernet) interface. Say Y to include support for the MACE chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mace.
+
+config MACE_AAUI_PORT
+ bool "Use AAUI port instead of TP by default"
+ depends on MACE
+ ---help---
+ Some Apple machines (notably the Apple Network Server) which use the
+ MACE ethernet chip have an Apple AUI port (small 15-pin connector),
+ instead of an 8-pin RJ45 connector for twisted-pair ethernet. Say
+ Y here if you have such a machine. If unsure, say N.
+ The driver will default to AAUI on ANS anyway, and if you use it as
+ a module, you can provide the port_aaui=0|1 to force the driver.
+
+config BMAC
+ tristate "BMAC (G3 ethernet) support"
+ depends on PPC_PMAC && PPC32
+ select CRC32
+ ---help---
+ Say Y for support of BMAC Ethernet interfaces. These are used on G3
+ computers.
+
+ To compile this driver as a module, choose M here: the module
+ will be called bmac.
+
+config MAC89x0
+ tristate "Macintosh CS89x0 based ethernet cards"
+ depends on MAC
+ ---help---
+ Support for CS89x0 chipset based Ethernet cards. If you have a
+ Nubus or LC-PDS network (Ethernet) card of this type, say Y and
+ read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. This module will
+ be called mac89x0.
+
+config MACMACE
+ bool "Macintosh (AV) onboard MACE ethernet"
+ depends on MAC
+ select CRC32
+ ---help---
+ Support for the onboard AMD 79C940 MACE Ethernet controller used in
+ the 660AV and 840AV Macintosh. If you have one of these Macintoshes
+ say Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+config CS89x0
+ tristate "CS89x0 support"
+ depends on (ISA || EISA || MACH_IXDP2351 \
+ || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440)
+ ---help---
+ Support for CS89x0 chipset based Ethernet cards. If you have a
+ network (Ethernet) card of this type, say Y and read the
+ Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto> as well as
+ <file:Documentation/networking/cs89x0.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called cs89x0.
+
+config CS89x0_NONISA_IRQ
+ def_bool y
+ depends on CS89x0 != n
+ depends on MACH_IXDP2351 || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440
+
+endif # NET_VENDOR_APPLE
diff --git a/drivers/net/ethernet/apple/Makefile b/drivers/net/ethernet/apple/Makefile
new file mode 100644
index 000000000000..9d300864461f
--- /dev/null
+++ b/drivers/net/ethernet/apple/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the Apple network device drivers.
+#
+
+obj-$(CONFIG_MACE) += mace.o
+obj-$(CONFIG_BMAC) += bmac.o
+obj-$(CONFIG_MAC89x0) += mac89x0.o
+obj-$(CONFIG_CS89x0) += cs89x0.o
+obj-$(CONFIG_MACMACE) += macmace.o
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
new file mode 100644
index 000000000000..d070b229dbf7
--- /dev/null
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -0,0 +1,1685 @@
+/*
+ * Network device driver for the BMAC ethernet controller on
+ * Apple Powermacs. Assumes it's under a DBDMA controller.
+ *
+ * Copyright (C) 1998 Randy Gobbel.
+ *
+ * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
+ * dynamic procfs inode.
+ */
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/crc32.h>
+#include <linux/bitrev.h>
+#include <linux/ethtool.h>
+#include <linux/slab.h>
+#include <asm/prom.h>
+#include <asm/dbdma.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/macio.h>
+#include <asm/irq.h>
+
+#include "bmac.h"
+
+#define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
+#define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
+
+/*
+ * CRC polynomial - used in working out multicast filter bits.
+ */
+#define ENET_CRCPOLY 0x04c11db7
+
+/* switch to use multicast code lifted from sunhme driver */
+#define SUNHME_MULTICAST
+
+#define N_RX_RING 64
+#define N_TX_RING 32
+#define MAX_TX_ACTIVE 1
+#define ETHERCRC 4
+#define ETHERMINPACKET 64
+#define ETHERMTU 1500
+#define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2)
+#define TX_TIMEOUT HZ /* 1 second */
+
+/* Bits in transmit DMA status */
+#define TX_DMA_ERR 0x80
+
+#define XXDEBUG(args)
+
+struct bmac_data {
+ /* volatile struct bmac *bmac; */
+ struct sk_buff_head *queue;
+ volatile struct dbdma_regs __iomem *tx_dma;
+ int tx_dma_intr;
+ volatile struct dbdma_regs __iomem *rx_dma;
+ int rx_dma_intr;
+ volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
+ volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
+ struct macio_dev *mdev;
+ int is_bmac_plus;
+ struct sk_buff *rx_bufs[N_RX_RING];
+ int rx_fill;
+ int rx_empty;
+ struct sk_buff *tx_bufs[N_TX_RING];
+ int tx_fill;
+ int tx_empty;
+ unsigned char tx_fullup;
+ struct timer_list tx_timeout;
+ int timeout_active;
+ int sleeping;
+ int opened;
+ unsigned short hash_use_count[64];
+ unsigned short hash_table_mask[4];
+ spinlock_t lock;
+};
+
+#if 0 /* Move that to ethtool */
+
+typedef struct bmac_reg_entry {
+ char *name;
+ unsigned short reg_offset;
+} bmac_reg_entry_t;
+
+#define N_REG_ENTRIES 31
+
+static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
+ {"MEMADD", MEMADD},
+ {"MEMDATAHI", MEMDATAHI},
+ {"MEMDATALO", MEMDATALO},
+ {"TXPNTR", TXPNTR},
+ {"RXPNTR", RXPNTR},
+ {"IPG1", IPG1},
+ {"IPG2", IPG2},
+ {"ALIMIT", ALIMIT},
+ {"SLOT", SLOT},
+ {"PALEN", PALEN},
+ {"PAPAT", PAPAT},
+ {"TXSFD", TXSFD},
+ {"JAM", JAM},
+ {"TXCFG", TXCFG},
+ {"TXMAX", TXMAX},
+ {"TXMIN", TXMIN},
+ {"PAREG", PAREG},
+ {"DCNT", DCNT},
+ {"NCCNT", NCCNT},
+ {"NTCNT", NTCNT},
+ {"EXCNT", EXCNT},
+ {"LTCNT", LTCNT},
+ {"TXSM", TXSM},
+ {"RXCFG", RXCFG},
+ {"RXMAX", RXMAX},
+ {"RXMIN", RXMIN},
+ {"FRCNT", FRCNT},
+ {"AECNT", AECNT},
+ {"FECNT", FECNT},
+ {"RXSM", RXSM},
+ {"RXCV", RXCV}
+};
+
+#endif
+
+static unsigned char *bmac_emergency_rxbuf;
+
+/*
+ * Number of bytes of private data per BMAC: allow enough for
+ * the rx and tx dma commands plus a branch dma command each,
+ * and another 16 bytes to allow us to align the dma command
+ * buffers on a 16 byte boundary.
+ */
+#define PRIV_BYTES (sizeof(struct bmac_data) \
+ + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
+ + sizeof(struct sk_buff_head))
+
+static int bmac_open(struct net_device *dev);
+static int bmac_close(struct net_device *dev);
+static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
+static void bmac_set_multicast(struct net_device *dev);
+static void bmac_reset_and_enable(struct net_device *dev);
+static void bmac_start_chip(struct net_device *dev);
+static void bmac_init_chip(struct net_device *dev);
+static void bmac_init_registers(struct net_device *dev);
+static void bmac_enable_and_reset_chip(struct net_device *dev);
+static int bmac_set_address(struct net_device *dev, void *addr);
+static irqreturn_t bmac_misc_intr(int irq, void *dev_id);
+static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
+static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
+static void bmac_set_timeout(struct net_device *dev);
+static void bmac_tx_timeout(unsigned long data);
+static int bmac_output(struct sk_buff *skb, struct net_device *dev);
+static void bmac_start(struct net_device *dev);
+
+#define DBDMA_SET(x) ( ((x) | (x) << 16) )
+#define DBDMA_CLEAR(x) ( (x) << 16)
+
+static inline void
+dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
+{
+ __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
+}
+
+static inline unsigned long
+dbdma_ld32(volatile __u32 __iomem *a)
+{
+ __u32 swap;
+ __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a));
+ return swap;
+}
+
+static void
+dbdma_continue(volatile struct dbdma_regs __iomem *dmap)
+{
+ dbdma_st32(&dmap->control,
+ DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
+ eieio();
+}
+
+static void
+dbdma_reset(volatile struct dbdma_regs __iomem *dmap)
+{
+ dbdma_st32(&dmap->control,
+ DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
+ eieio();
+ while (dbdma_ld32(&dmap->status) & RUN)
+ eieio();
+}
+
+static void
+dbdma_setcmd(volatile struct dbdma_cmd *cp,
+ unsigned short cmd, unsigned count, unsigned long addr,
+ unsigned long cmd_dep)
+{
+ out_le16(&cp->command, cmd);
+ out_le16(&cp->req_count, count);
+ out_le32(&cp->phy_addr, addr);
+ out_le32(&cp->cmd_dep, cmd_dep);
+ out_le16(&cp->xfer_status, 0);
+ out_le16(&cp->res_count, 0);
+}
+
+static inline
+void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
+{
+ out_le16((void __iomem *)dev->base_addr + reg_offset, data);
+}
+
+
+static inline
+unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
+{
+ return in_le16((void __iomem *)dev->base_addr + reg_offset);
+}
+
+static void
+bmac_enable_and_reset_chip(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
+ volatile struct dbdma_regs __iomem *td = bp->tx_dma;
+
+ if (rd)
+ dbdma_reset(rd);
+ if (td)
+ dbdma_reset(td);
+
+ pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
+}
+
+#define MIFDELAY udelay(10)
+
+static unsigned int
+bmac_mif_readbits(struct net_device *dev, int nb)
+{
+ unsigned int val = 0;
+
+ while (--nb >= 0) {
+ bmwrite(dev, MIFCSR, 0);
+ MIFDELAY;
+ if (bmread(dev, MIFCSR) & 8)
+ val |= 1 << nb;
+ bmwrite(dev, MIFCSR, 1);
+ MIFDELAY;
+ }
+ bmwrite(dev, MIFCSR, 0);
+ MIFDELAY;
+ bmwrite(dev, MIFCSR, 1);
+ MIFDELAY;
+ return val;
+}
+
+static void
+bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
+{
+ int b;
+
+ while (--nb >= 0) {
+ b = (val & (1 << nb))? 6: 4;
+ bmwrite(dev, MIFCSR, b);
+ MIFDELAY;
+ bmwrite(dev, MIFCSR, b|1);
+ MIFDELAY;
+ }
+}
+
+static unsigned int
+bmac_mif_read(struct net_device *dev, unsigned int addr)
+{
+ unsigned int val;
+
+ bmwrite(dev, MIFCSR, 4);
+ MIFDELAY;
+ bmac_mif_writebits(dev, ~0U, 32);
+ bmac_mif_writebits(dev, 6, 4);
+ bmac_mif_writebits(dev, addr, 10);
+ bmwrite(dev, MIFCSR, 2);
+ MIFDELAY;
+ bmwrite(dev, MIFCSR, 1);
+ MIFDELAY;
+ val = bmac_mif_readbits(dev, 17);
+ bmwrite(dev, MIFCSR, 4);
+ MIFDELAY;
+ return val;
+}
+
+static void
+bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
+{
+ bmwrite(dev, MIFCSR, 4);
+ MIFDELAY;
+ bmac_mif_writebits(dev, ~0U, 32);
+ bmac_mif_writebits(dev, 5, 4);
+ bmac_mif_writebits(dev, addr, 10);
+ bmac_mif_writebits(dev, 2, 2);
+ bmac_mif_writebits(dev, val, 16);
+ bmac_mif_writebits(dev, 3, 2);
+}
+
+static void
+bmac_init_registers(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile unsigned short regValue;
+ unsigned short *pWord16;
+ int i;
+
+ /* XXDEBUG(("bmac: enter init_registers\n")); */
+
+ bmwrite(dev, RXRST, RxResetValue);
+ bmwrite(dev, TXRST, TxResetBit);
+
+ i = 100;
+ do {
+ --i;
+ udelay(10000);
+ regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
+ } while ((regValue & TxResetBit) && i > 0);
+
+ if (!bp->is_bmac_plus) {
+ regValue = bmread(dev, XCVRIF);
+ regValue |= ClkBit | SerialMode | COLActiveLow;
+ bmwrite(dev, XCVRIF, regValue);
+ udelay(10000);
+ }
+
+ bmwrite(dev, RSEED, (unsigned short)0x1968);
+
+ regValue = bmread(dev, XIFC);
+ regValue |= TxOutputEnable;
+ bmwrite(dev, XIFC, regValue);
+
+ bmread(dev, PAREG);
+
+ /* set collision counters to 0 */
+ bmwrite(dev, NCCNT, 0);
+ bmwrite(dev, NTCNT, 0);
+ bmwrite(dev, EXCNT, 0);
+ bmwrite(dev, LTCNT, 0);
+
+ /* set rx counters to 0 */
+ bmwrite(dev, FRCNT, 0);
+ bmwrite(dev, LECNT, 0);
+ bmwrite(dev, AECNT, 0);
+ bmwrite(dev, FECNT, 0);
+ bmwrite(dev, RXCV, 0);
+
+ /* set tx fifo information */
+ bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */
+
+ bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */
+ bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
+
+ /* set rx fifo information */
+ bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
+ bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
+
+ //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */
+ bmread(dev, STATUS); /* read it just to clear it */
+
+ /* zero out the chip Hash Filter registers */
+ for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
+ bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
+ bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
+ bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
+ bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
+
+ pWord16 = (unsigned short *)dev->dev_addr;
+ bmwrite(dev, MADD0, *pWord16++);
+ bmwrite(dev, MADD1, *pWord16++);
+ bmwrite(dev, MADD2, *pWord16);
+
+ bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
+
+ bmwrite(dev, INTDISABLE, EnableNormal);
+}
+
+#if 0
+static void
+bmac_disable_interrupts(struct net_device *dev)
+{
+ bmwrite(dev, INTDISABLE, DisableAll);
+}
+
+static void
+bmac_enable_interrupts(struct net_device *dev)
+{
+ bmwrite(dev, INTDISABLE, EnableNormal);
+}
+#endif
+
+
+static void
+bmac_start_chip(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
+ unsigned short oldConfig;
+
+ /* enable rx dma channel */
+ dbdma_continue(rd);
+
+ oldConfig = bmread(dev, TXCFG);
+ bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
+
+ /* turn on rx plus any other bits already on (promiscuous possibly) */
+ oldConfig = bmread(dev, RXCFG);
+ bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
+ udelay(20000);
+}
+
+static void
+bmac_init_phy(struct net_device *dev)
+{
+ unsigned int addr;
+ struct bmac_data *bp = netdev_priv(dev);
+
+ printk(KERN_DEBUG "phy registers:");
+ for (addr = 0; addr < 32; ++addr) {
+ if ((addr & 7) == 0)
+ printk(KERN_DEBUG);
+ printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr));
+ }
+ printk(KERN_CONT "\n");
+
+ if (bp->is_bmac_plus) {
+ unsigned int capable, ctrl;
+
+ ctrl = bmac_mif_read(dev, 0);
+ capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
+ if (bmac_mif_read(dev, 4) != capable ||
+ (ctrl & 0x1000) == 0) {
+ bmac_mif_write(dev, 4, capable);
+ bmac_mif_write(dev, 0, 0x1200);
+ } else
+ bmac_mif_write(dev, 0, 0x1000);
+ }
+}
+
+static void bmac_init_chip(struct net_device *dev)
+{
+ bmac_init_phy(dev);
+ bmac_init_registers(dev);
+}
+
+#ifdef CONFIG_PM
+static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
+{
+ struct net_device* dev = macio_get_drvdata(mdev);
+ struct bmac_data *bp = netdev_priv(dev);
+ unsigned long flags;
+ unsigned short config;
+ int i;
+
+ netif_device_detach(dev);
+ /* prolly should wait for dma to finish & turn off the chip */
+ spin_lock_irqsave(&bp->lock, flags);
+ if (bp->timeout_active) {
+ del_timer(&bp->tx_timeout);
+ bp->timeout_active = 0;
+ }
+ disable_irq(dev->irq);
+ disable_irq(bp->tx_dma_intr);
+ disable_irq(bp->rx_dma_intr);
+ bp->sleeping = 1;
+ spin_unlock_irqrestore(&bp->lock, flags);
+ if (bp->opened) {
+ volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
+ volatile struct dbdma_regs __iomem *td = bp->tx_dma;
+
+ config = bmread(dev, RXCFG);
+ bmwrite(dev, RXCFG, (config & ~RxMACEnable));
+ config = bmread(dev, TXCFG);
+ bmwrite(dev, TXCFG, (config & ~TxMACEnable));
+ bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
+ /* disable rx and tx dma */
+ st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
+ st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
+ /* free some skb's */
+ for (i=0; i<N_RX_RING; i++) {
+ if (bp->rx_bufs[i] != NULL) {
+ dev_kfree_skb(bp->rx_bufs[i]);
+ bp->rx_bufs[i] = NULL;
+ }
+ }
+ for (i = 0; i<N_TX_RING; i++) {
+ if (bp->tx_bufs[i] != NULL) {
+ dev_kfree_skb(bp->tx_bufs[i]);
+ bp->tx_bufs[i] = NULL;
+ }
+ }
+ }
+ pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
+ return 0;
+}
+
+static int bmac_resume(struct macio_dev *mdev)
+{
+ struct net_device* dev = macio_get_drvdata(mdev);
+ struct bmac_data *bp = netdev_priv(dev);
+
+ /* see if this is enough */
+ if (bp->opened)
+ bmac_reset_and_enable(dev);
+
+ enable_irq(dev->irq);
+ enable_irq(bp->tx_dma_intr);
+ enable_irq(bp->rx_dma_intr);
+ netif_device_attach(dev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static int bmac_set_address(struct net_device *dev, void *addr)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ unsigned char *p = addr;
+ unsigned short *pWord16;
+ unsigned long flags;
+ int i;
+
+ XXDEBUG(("bmac: enter set_address\n"));
+ spin_lock_irqsave(&bp->lock, flags);
+
+ for (i = 0; i < 6; ++i) {
+ dev->dev_addr[i] = p[i];
+ }
+ /* load up the hardware address */
+ pWord16 = (unsigned short *)dev->dev_addr;
+ bmwrite(dev, MADD0, *pWord16++);
+ bmwrite(dev, MADD1, *pWord16++);
+ bmwrite(dev, MADD2, *pWord16);
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+ XXDEBUG(("bmac: exit set_address\n"));
+ return 0;
+}
+
+static inline void bmac_set_timeout(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->lock, flags);
+ if (bp->timeout_active)
+ del_timer(&bp->tx_timeout);
+ bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
+ bp->tx_timeout.function = bmac_tx_timeout;
+ bp->tx_timeout.data = (unsigned long) dev;
+ add_timer(&bp->tx_timeout);
+ bp->timeout_active = 1;
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static void
+bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
+{
+ void *vaddr;
+ unsigned long baddr;
+ unsigned long len;
+
+ len = skb->len;
+ vaddr = skb->data;
+ baddr = virt_to_bus(vaddr);
+
+ dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
+}
+
+static void
+bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
+{
+ unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
+
+ dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
+ virt_to_bus(addr), 0);
+}
+
+static void
+bmac_init_tx_ring(struct bmac_data *bp)
+{
+ volatile struct dbdma_regs __iomem *td = bp->tx_dma;
+
+ memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
+
+ bp->tx_empty = 0;
+ bp->tx_fill = 0;
+ bp->tx_fullup = 0;
+
+ /* put a branch at the end of the tx command list */
+ dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
+ (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
+
+ /* reset tx dma */
+ dbdma_reset(td);
+ out_le32(&td->wait_sel, 0x00200020);
+ out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
+}
+
+static int
+bmac_init_rx_ring(struct bmac_data *bp)
+{
+ volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
+ int i;
+ struct sk_buff *skb;
+
+ /* initialize list of sk_buffs for receiving and set up recv dma */
+ memset((char *)bp->rx_cmds, 0,
+ (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
+ for (i = 0; i < N_RX_RING; i++) {
+ if ((skb = bp->rx_bufs[i]) == NULL) {
+ bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
+ if (skb != NULL)
+ skb_reserve(skb, 2);
+ }
+ bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
+ }
+
+ bp->rx_empty = 0;
+ bp->rx_fill = i;
+
+ /* Put a branch back to the beginning of the receive command list */
+ dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
+ (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
+
+ /* start rx dma */
+ dbdma_reset(rd);
+ out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
+
+ return 1;
+}
+
+
+static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile struct dbdma_regs __iomem *td = bp->tx_dma;
+ int i;
+
+ /* see if there's a free slot in the tx ring */
+ /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
+ /* bp->tx_empty, bp->tx_fill)); */
+ i = bp->tx_fill + 1;
+ if (i >= N_TX_RING)
+ i = 0;
+ if (i == bp->tx_empty) {
+ netif_stop_queue(dev);
+ bp->tx_fullup = 1;
+ XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
+ return -1; /* can't take it at the moment */
+ }
+
+ dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
+
+ bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
+
+ bp->tx_bufs[bp->tx_fill] = skb;
+ bp->tx_fill = i;
+
+ dev->stats.tx_bytes += skb->len;
+
+ dbdma_continue(td);
+
+ return 0;
+}
+
+static int rxintcount;
+
+static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
+ volatile struct dbdma_cmd *cp;
+ int i, nb, stat;
+ struct sk_buff *skb;
+ unsigned int residual;
+ int last;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ if (++rxintcount < 10) {
+ XXDEBUG(("bmac_rxdma_intr\n"));
+ }
+
+ last = -1;
+ i = bp->rx_empty;
+
+ while (1) {
+ cp = &bp->rx_cmds[i];
+ stat = ld_le16(&cp->xfer_status);
+ residual = ld_le16(&cp->res_count);
+ if ((stat & ACTIVE) == 0)
+ break;
+ nb = RX_BUFLEN - residual - 2;
+ if (nb < (ETHERMINPACKET - ETHERCRC)) {
+ skb = NULL;
+ dev->stats.rx_length_errors++;
+ dev->stats.rx_errors++;
+ } else {
+ skb = bp->rx_bufs[i];
+ bp->rx_bufs[i] = NULL;
+ }
+ if (skb != NULL) {
+ nb -= ETHERCRC;
+ skb_put(skb, nb);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ ++dev->stats.rx_packets;
+ dev->stats.rx_bytes += nb;
+ } else {
+ ++dev->stats.rx_dropped;
+ }
+ if ((skb = bp->rx_bufs[i]) == NULL) {
+ bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
+ if (skb != NULL)
+ skb_reserve(bp->rx_bufs[i], 2);
+ }
+ bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
+ st_le16(&cp->res_count, 0);
+ st_le16(&cp->xfer_status, 0);
+ last = i;
+ if (++i >= N_RX_RING) i = 0;
+ }
+
+ if (last != -1) {
+ bp->rx_fill = last;
+ bp->rx_empty = i;
+ }
+
+ dbdma_continue(rd);
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ if (rxintcount < 10) {
+ XXDEBUG(("bmac_rxdma_intr done\n"));
+ }
+ return IRQ_HANDLED;
+}
+
+static int txintcount;
+
+static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile struct dbdma_cmd *cp;
+ int stat;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ if (txintcount++ < 10) {
+ XXDEBUG(("bmac_txdma_intr\n"));
+ }
+
+ /* del_timer(&bp->tx_timeout); */
+ /* bp->timeout_active = 0; */
+
+ while (1) {
+ cp = &bp->tx_cmds[bp->tx_empty];
+ stat = ld_le16(&cp->xfer_status);
+ if (txintcount < 10) {
+ XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
+ }
+ if (!(stat & ACTIVE)) {
+ /*
+ * status field might not have been filled by DBDMA
+ */
+ if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
+ break;
+ }
+
+ if (bp->tx_bufs[bp->tx_empty]) {
+ ++dev->stats.tx_packets;
+ dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
+ }
+ bp->tx_bufs[bp->tx_empty] = NULL;
+ bp->tx_fullup = 0;
+ netif_wake_queue(dev);
+ if (++bp->tx_empty >= N_TX_RING)
+ bp->tx_empty = 0;
+ if (bp->tx_empty == bp->tx_fill)
+ break;
+ }
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ if (txintcount < 10) {
+ XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
+ }
+
+ bmac_start(dev);
+ return IRQ_HANDLED;
+}
+
+#ifndef SUNHME_MULTICAST
+/* Real fast bit-reversal algorithm, 6-bit values */
+static int reverse6[64] = {
+ 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
+ 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
+ 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
+ 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
+ 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
+ 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
+ 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
+ 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
+};
+
+static unsigned int
+crc416(unsigned int curval, unsigned short nxtval)
+{
+ register unsigned int counter, cur = curval, next = nxtval;
+ register int high_crc_set, low_data_set;
+
+ /* Swap bytes */
+ next = ((next & 0x00FF) << 8) | (next >> 8);
+
+ /* Compute bit-by-bit */
+ for (counter = 0; counter < 16; ++counter) {
+ /* is high CRC bit set? */
+ if ((cur & 0x80000000) == 0) high_crc_set = 0;
+ else high_crc_set = 1;
+
+ cur = cur << 1;
+
+ if ((next & 0x0001) == 0) low_data_set = 0;
+ else low_data_set = 1;
+
+ next = next >> 1;
+
+ /* do the XOR */
+ if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
+ }
+ return cur;
+}
+
+static unsigned int
+bmac_crc(unsigned short *address)
+{
+ unsigned int newcrc;
+
+ XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
+ newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
+ newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
+ newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
+
+ return(newcrc);
+}
+
+/*
+ * Add requested mcast addr to BMac's hash table filter.
+ *
+ */
+
+static void
+bmac_addhash(struct bmac_data *bp, unsigned char *addr)
+{
+ unsigned int crc;
+ unsigned short mask;
+
+ if (!(*addr)) return;
+ crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
+ crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
+ if (bp->hash_use_count[crc]++) return; /* This bit is already set */
+ mask = crc % 16;
+ mask = (unsigned char)1 << mask;
+ bp->hash_use_count[crc/16] |= mask;
+}
+
+static void
+bmac_removehash(struct bmac_data *bp, unsigned char *addr)
+{
+ unsigned int crc;
+ unsigned char mask;
+
+ /* Now, delete the address from the filter copy, as indicated */
+ crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
+ crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
+ if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
+ if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
+ mask = crc % 16;
+ mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
+ bp->hash_table_mask[crc/16] &= mask;
+}
+
+/*
+ * Sync the adapter with the software copy of the multicast mask
+ * (logical address filter).
+ */
+
+static void
+bmac_rx_off(struct net_device *dev)
+{
+ unsigned short rx_cfg;
+
+ rx_cfg = bmread(dev, RXCFG);
+ rx_cfg &= ~RxMACEnable;
+ bmwrite(dev, RXCFG, rx_cfg);
+ do {
+ rx_cfg = bmread(dev, RXCFG);
+ } while (rx_cfg & RxMACEnable);
+}
+
+unsigned short
+bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
+{
+ unsigned short rx_cfg;
+
+ rx_cfg = bmread(dev, RXCFG);
+ rx_cfg |= RxMACEnable;
+ if (hash_enable) rx_cfg |= RxHashFilterEnable;
+ else rx_cfg &= ~RxHashFilterEnable;
+ if (promisc_enable) rx_cfg |= RxPromiscEnable;
+ else rx_cfg &= ~RxPromiscEnable;
+ bmwrite(dev, RXRST, RxResetValue);
+ bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
+ bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
+ bmwrite(dev, RXCFG, rx_cfg );
+ return rx_cfg;
+}
+
+static void
+bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
+{
+ bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
+ bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
+ bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
+ bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
+}
+
+#if 0
+static void
+bmac_add_multi(struct net_device *dev,
+ struct bmac_data *bp, unsigned char *addr)
+{
+ /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
+ bmac_addhash(bp, addr);
+ bmac_rx_off(dev);
+ bmac_update_hash_table_mask(dev, bp);
+ bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
+ /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
+}
+
+static void
+bmac_remove_multi(struct net_device *dev,
+ struct bmac_data *bp, unsigned char *addr)
+{
+ bmac_removehash(bp, addr);
+ bmac_rx_off(dev);
+ bmac_update_hash_table_mask(dev, bp);
+ bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
+}
+#endif
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+static void bmac_set_multicast(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ struct bmac_data *bp = netdev_priv(dev);
+ int num_addrs = netdev_mc_count(dev);
+ unsigned short rx_cfg;
+ int i;
+
+ if (bp->sleeping)
+ return;
+
+ XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
+
+ if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
+ for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
+ bmac_update_hash_table_mask(dev, bp);
+ rx_cfg = bmac_rx_on(dev, 1, 0);
+ XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
+ } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
+ rx_cfg = bmread(dev, RXCFG);
+ rx_cfg |= RxPromiscEnable;
+ bmwrite(dev, RXCFG, rx_cfg);
+ rx_cfg = bmac_rx_on(dev, 0, 1);
+ XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
+ } else {
+ for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
+ for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
+ if (num_addrs == 0) {
+ rx_cfg = bmac_rx_on(dev, 0, 0);
+ XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
+ } else {
+ netdev_for_each_mc_addr(ha, dev)
+ bmac_addhash(bp, ha->addr);
+ bmac_update_hash_table_mask(dev, bp);
+ rx_cfg = bmac_rx_on(dev, 1, 0);
+ XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
+ }
+ }
+ /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
+}
+#else /* ifdef SUNHME_MULTICAST */
+
+/* The version of set_multicast below was lifted from sunhme.c */
+
+static void bmac_set_multicast(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ int i;
+ unsigned short rx_cfg;
+ u32 crc;
+
+ if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
+ bmwrite(dev, BHASH0, 0xffff);
+ bmwrite(dev, BHASH1, 0xffff);
+ bmwrite(dev, BHASH2, 0xffff);
+ bmwrite(dev, BHASH3, 0xffff);
+ } else if(dev->flags & IFF_PROMISC) {
+ rx_cfg = bmread(dev, RXCFG);
+ rx_cfg |= RxPromiscEnable;
+ bmwrite(dev, RXCFG, rx_cfg);
+ } else {
+ u16 hash_table[4];
+
+ rx_cfg = bmread(dev, RXCFG);
+ rx_cfg &= ~RxPromiscEnable;
+ bmwrite(dev, RXCFG, rx_cfg);
+
+ for(i = 0; i < 4; i++) hash_table[i] = 0;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
+ crc >>= 26;
+ hash_table[crc >> 4] |= 1 << (crc & 0xf);
+ }
+ bmwrite(dev, BHASH0, hash_table[0]);
+ bmwrite(dev, BHASH1, hash_table[1]);
+ bmwrite(dev, BHASH2, hash_table[2]);
+ bmwrite(dev, BHASH3, hash_table[3]);
+ }
+}
+#endif /* SUNHME_MULTICAST */
+
+static int miscintcount;
+
+static irqreturn_t bmac_misc_intr(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ unsigned int status = bmread(dev, STATUS);
+ if (miscintcount++ < 10) {
+ XXDEBUG(("bmac_misc_intr\n"));
+ }
+ /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
+ /* bmac_txdma_intr_inner(irq, dev_id); */
+ /* if (status & FrameReceived) dev->stats.rx_dropped++; */
+ if (status & RxErrorMask) dev->stats.rx_errors++;
+ if (status & RxCRCCntExp) dev->stats.rx_crc_errors++;
+ if (status & RxLenCntExp) dev->stats.rx_length_errors++;
+ if (status & RxOverFlow) dev->stats.rx_over_errors++;
+ if (status & RxAlignCntExp) dev->stats.rx_frame_errors++;
+
+ /* if (status & FrameSent) dev->stats.tx_dropped++; */
+ if (status & TxErrorMask) dev->stats.tx_errors++;
+ if (status & TxUnderrun) dev->stats.tx_fifo_errors++;
+ if (status & TxNormalCollExp) dev->stats.collisions++;
+ return IRQ_HANDLED;
+}
+
+/*
+ * Procedure for reading EEPROM
+ */
+#define SROMAddressLength 5
+#define DataInOn 0x0008
+#define DataInOff 0x0000
+#define Clk 0x0002
+#define ChipSelect 0x0001
+#define SDIShiftCount 3
+#define SD0ShiftCount 2
+#define DelayValue 1000 /* number of microseconds */
+#define SROMStartOffset 10 /* this is in words */
+#define SROMReadCount 3 /* number of words to read from SROM */
+#define SROMAddressBits 6
+#define EnetAddressOffset 20
+
+static unsigned char
+bmac_clock_out_bit(struct net_device *dev)
+{
+ unsigned short data;
+ unsigned short val;
+
+ bmwrite(dev, SROMCSR, ChipSelect | Clk);
+ udelay(DelayValue);
+
+ data = bmread(dev, SROMCSR);
+ udelay(DelayValue);
+ val = (data >> SD0ShiftCount) & 1;
+
+ bmwrite(dev, SROMCSR, ChipSelect);
+ udelay(DelayValue);
+
+ return val;
+}
+
+static void
+bmac_clock_in_bit(struct net_device *dev, unsigned int val)
+{
+ unsigned short data;
+
+ if (val != 0 && val != 1) return;
+
+ data = (val << SDIShiftCount);
+ bmwrite(dev, SROMCSR, data | ChipSelect );
+ udelay(DelayValue);
+
+ bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
+ udelay(DelayValue);
+
+ bmwrite(dev, SROMCSR, data | ChipSelect);
+ udelay(DelayValue);
+}
+
+static void
+reset_and_select_srom(struct net_device *dev)
+{
+ /* first reset */
+ bmwrite(dev, SROMCSR, 0);
+ udelay(DelayValue);
+
+ /* send it the read command (110) */
+ bmac_clock_in_bit(dev, 1);
+ bmac_clock_in_bit(dev, 1);
+ bmac_clock_in_bit(dev, 0);
+}
+
+static unsigned short
+read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
+{
+ unsigned short data, val;
+ int i;
+
+ /* send out the address we want to read from */
+ for (i = 0; i < addr_len; i++) {
+ val = addr >> (addr_len-i-1);
+ bmac_clock_in_bit(dev, val & 1);
+ }
+
+ /* Now read in the 16-bit data */
+ data = 0;
+ for (i = 0; i < 16; i++) {
+ val = bmac_clock_out_bit(dev);
+ data <<= 1;
+ data |= val;
+ }
+ bmwrite(dev, SROMCSR, 0);
+
+ return data;
+}
+
+/*
+ * It looks like Cogent and SMC use different methods for calculating
+ * checksums. What a pain..
+ */
+
+static int
+bmac_verify_checksum(struct net_device *dev)
+{
+ unsigned short data, storedCS;
+
+ reset_and_select_srom(dev);
+ data = read_srom(dev, 3, SROMAddressBits);
+ storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
+
+ return 0;
+}
+
+
+static void
+bmac_get_station_address(struct net_device *dev, unsigned char *ea)
+{
+ int i;
+ unsigned short data;
+
+ for (i = 0; i < 6; i++)
+ {
+ reset_and_select_srom(dev);
+ data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
+ ea[2*i] = bitrev8(data & 0x0ff);
+ ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
+ }
+}
+
+static void bmac_reset_and_enable(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ unsigned long flags;
+ struct sk_buff *skb;
+ unsigned char *data;
+
+ spin_lock_irqsave(&bp->lock, flags);
+ bmac_enable_and_reset_chip(dev);
+ bmac_init_tx_ring(bp);
+ bmac_init_rx_ring(bp);
+ bmac_init_chip(dev);
+ bmac_start_chip(dev);
+ bmwrite(dev, INTDISABLE, EnableNormal);
+ bp->sleeping = 0;
+
+ /*
+ * It seems that the bmac can't receive until it's transmitted
+ * a packet. So we give it a dummy packet to transmit.
+ */
+ skb = dev_alloc_skb(ETHERMINPACKET);
+ if (skb != NULL) {
+ data = skb_put(skb, ETHERMINPACKET);
+ memset(data, 0, ETHERMINPACKET);
+ memcpy(data, dev->dev_addr, 6);
+ memcpy(data+6, dev->dev_addr, 6);
+ bmac_transmit_packet(skb, dev);
+ }
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static const struct ethtool_ops bmac_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+};
+
+static const struct net_device_ops bmac_netdev_ops = {
+ .ndo_open = bmac_open,
+ .ndo_stop = bmac_close,
+ .ndo_start_xmit = bmac_output,
+ .ndo_set_rx_mode = bmac_set_multicast,
+ .ndo_set_mac_address = bmac_set_address,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
+{
+ int j, rev, ret;
+ struct bmac_data *bp;
+ const unsigned char *prop_addr;
+ unsigned char addr[6];
+ struct net_device *dev;
+ int is_bmac_plus = ((int)match->data) != 0;
+
+ if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
+ printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
+ return -ENODEV;
+ }
+ prop_addr = of_get_property(macio_get_of_node(mdev),
+ "mac-address", NULL);
+ if (prop_addr == NULL) {
+ prop_addr = of_get_property(macio_get_of_node(mdev),
+ "local-mac-address", NULL);
+ if (prop_addr == NULL) {
+ printk(KERN_ERR "BMAC: Can't get mac-address\n");
+ return -ENODEV;
+ }
+ }
+ memcpy(addr, prop_addr, sizeof(addr));
+
+ dev = alloc_etherdev(PRIV_BYTES);
+ if (!dev) {
+ printk(KERN_ERR "BMAC: alloc_etherdev failed, out of memory\n");
+ return -ENOMEM;
+ }
+
+ bp = netdev_priv(dev);
+ SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
+ macio_set_drvdata(mdev, dev);
+
+ bp->mdev = mdev;
+ spin_lock_init(&bp->lock);
+
+ if (macio_request_resources(mdev, "bmac")) {
+ printk(KERN_ERR "BMAC: can't request IO resource !\n");
+ goto out_free;
+ }
+
+ dev->base_addr = (unsigned long)
+ ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
+ if (dev->base_addr == 0)
+ goto out_release;
+
+ dev->irq = macio_irq(mdev, 0);
+
+ bmac_enable_and_reset_chip(dev);
+ bmwrite(dev, INTDISABLE, DisableAll);
+
+ rev = addr[0] == 0 && addr[1] == 0xA0;
+ for (j = 0; j < 6; ++j)
+ dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
+
+ /* Enable chip without interrupts for now */
+ bmac_enable_and_reset_chip(dev);
+ bmwrite(dev, INTDISABLE, DisableAll);
+
+ dev->netdev_ops = &bmac_netdev_ops;
+ dev->ethtool_ops = &bmac_ethtool_ops;
+
+ bmac_get_station_address(dev, addr);
+ if (bmac_verify_checksum(dev) != 0)
+ goto err_out_iounmap;
+
+ bp->is_bmac_plus = is_bmac_plus;
+ bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
+ if (!bp->tx_dma)
+ goto err_out_iounmap;
+ bp->tx_dma_intr = macio_irq(mdev, 1);
+ bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
+ if (!bp->rx_dma)
+ goto err_out_iounmap_tx;
+ bp->rx_dma_intr = macio_irq(mdev, 2);
+
+ bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
+ bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
+
+ bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
+ skb_queue_head_init(bp->queue);
+
+ init_timer(&bp->tx_timeout);
+
+ ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
+ if (ret) {
+ printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
+ goto err_out_iounmap_rx;
+ }
+ ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev);
+ if (ret) {
+ printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr);
+ goto err_out_irq0;
+ }
+ ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
+ if (ret) {
+ printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr);
+ goto err_out_irq1;
+ }
+
+ /* Mask chip interrupts and disable chip, will be
+ * re-enabled on open()
+ */
+ disable_irq(dev->irq);
+ pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
+
+ if (register_netdev(dev) != 0) {
+ printk(KERN_ERR "BMAC: Ethernet registration failed\n");
+ goto err_out_irq2;
+ }
+
+ printk(KERN_INFO "%s: BMAC%s at %pM",
+ dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr);
+ XXDEBUG((", base_addr=%#0lx", dev->base_addr));
+ printk("\n");
+
+ return 0;
+
+err_out_irq2:
+ free_irq(bp->rx_dma_intr, dev);
+err_out_irq1:
+ free_irq(bp->tx_dma_intr, dev);
+err_out_irq0:
+ free_irq(dev->irq, dev);
+err_out_iounmap_rx:
+ iounmap(bp->rx_dma);
+err_out_iounmap_tx:
+ iounmap(bp->tx_dma);
+err_out_iounmap:
+ iounmap((void __iomem *)dev->base_addr);
+out_release:
+ macio_release_resources(mdev);
+out_free:
+ pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
+ free_netdev(dev);
+
+ return -ENODEV;
+}
+
+static int bmac_open(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ /* XXDEBUG(("bmac: enter open\n")); */
+ /* reset the chip */
+ bp->opened = 1;
+ bmac_reset_and_enable(dev);
+ enable_irq(dev->irq);
+ return 0;
+}
+
+static int bmac_close(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
+ volatile struct dbdma_regs __iomem *td = bp->tx_dma;
+ unsigned short config;
+ int i;
+
+ bp->sleeping = 1;
+
+ /* disable rx and tx */
+ config = bmread(dev, RXCFG);
+ bmwrite(dev, RXCFG, (config & ~RxMACEnable));
+
+ config = bmread(dev, TXCFG);
+ bmwrite(dev, TXCFG, (config & ~TxMACEnable));
+
+ bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
+
+ /* disable rx and tx dma */
+ st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
+ st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
+
+ /* free some skb's */
+ XXDEBUG(("bmac: free rx bufs\n"));
+ for (i=0; i<N_RX_RING; i++) {
+ if (bp->rx_bufs[i] != NULL) {
+ dev_kfree_skb(bp->rx_bufs[i]);
+ bp->rx_bufs[i] = NULL;
+ }
+ }
+ XXDEBUG(("bmac: free tx bufs\n"));
+ for (i = 0; i<N_TX_RING; i++) {
+ if (bp->tx_bufs[i] != NULL) {
+ dev_kfree_skb(bp->tx_bufs[i]);
+ bp->tx_bufs[i] = NULL;
+ }
+ }
+ XXDEBUG(("bmac: all bufs freed\n"));
+
+ bp->opened = 0;
+ disable_irq(dev->irq);
+ pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
+
+ return 0;
+}
+
+static void
+bmac_start(struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ int i;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ if (bp->sleeping)
+ return;
+
+ spin_lock_irqsave(&bp->lock, flags);
+ while (1) {
+ i = bp->tx_fill + 1;
+ if (i >= N_TX_RING)
+ i = 0;
+ if (i == bp->tx_empty)
+ break;
+ skb = skb_dequeue(bp->queue);
+ if (skb == NULL)
+ break;
+ bmac_transmit_packet(skb, dev);
+ }
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static int
+bmac_output(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bmac_data *bp = netdev_priv(dev);
+ skb_queue_tail(bp->queue, skb);
+ bmac_start(dev);
+ return NETDEV_TX_OK;
+}
+
+static void bmac_tx_timeout(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct bmac_data *bp = netdev_priv(dev);
+ volatile struct dbdma_regs __iomem *td = bp->tx_dma;
+ volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
+ volatile struct dbdma_cmd *cp;
+ unsigned long flags;
+ unsigned short config, oldConfig;
+ int i;
+
+ XXDEBUG(("bmac: tx_timeout called\n"));
+ spin_lock_irqsave(&bp->lock, flags);
+ bp->timeout_active = 0;
+
+ /* update various counters */
+/* bmac_handle_misc_intrs(bp, 0); */
+
+ cp = &bp->tx_cmds[bp->tx_empty];
+/* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
+/* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */
+/* mb->pr, mb->xmtfs, mb->fifofc)); */
+
+ /* turn off both tx and rx and reset the chip */
+ config = bmread(dev, RXCFG);
+ bmwrite(dev, RXCFG, (config & ~RxMACEnable));
+ config = bmread(dev, TXCFG);
+ bmwrite(dev, TXCFG, (config & ~TxMACEnable));
+ out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
+ printk(KERN_ERR "bmac: transmit timeout - resetting\n");
+ bmac_enable_and_reset_chip(dev);
+
+ /* restart rx dma */
+ cp = bus_to_virt(ld_le32(&rd->cmdptr));
+ out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
+ out_le16(&cp->xfer_status, 0);
+ out_le32(&rd->cmdptr, virt_to_bus(cp));
+ out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
+
+ /* fix up the transmit side */
+ XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
+ bp->tx_empty, bp->tx_fill, bp->tx_fullup));
+ i = bp->tx_empty;
+ ++dev->stats.tx_errors;
+ if (i != bp->tx_fill) {
+ dev_kfree_skb(bp->tx_bufs[i]);
+ bp->tx_bufs[i] = NULL;
+ if (++i >= N_TX_RING) i = 0;
+ bp->tx_empty = i;
+ }
+ bp->tx_fullup = 0;
+ netif_wake_queue(dev);
+ if (i != bp->tx_fill) {
+ cp = &bp->tx_cmds[i];
+ out_le16(&cp->xfer_status, 0);
+ out_le16(&cp->command, OUTPUT_LAST);
+ out_le32(&td->cmdptr, virt_to_bus(cp));
+ out_le32(&td->control, DBDMA_SET(RUN));
+ /* bmac_set_timeout(dev); */
+ XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
+ }
+
+ /* turn it back on */
+ oldConfig = bmread(dev, RXCFG);
+ bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
+ oldConfig = bmread(dev, TXCFG);
+ bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+#if 0
+static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
+{
+ int i,*ip;
+
+ for (i=0;i< count;i++) {
+ ip = (int*)(cp+i);
+
+ printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
+ ld_le32(ip+0),
+ ld_le32(ip+1),
+ ld_le32(ip+2),
+ ld_le32(ip+3));
+ }
+
+}
+#endif
+
+#if 0
+static int
+bmac_proc_info(char *buffer, char **start, off_t offset, int length)
+{
+ int len = 0;
+ off_t pos = 0;
+ off_t begin = 0;
+ int i;
+
+ if (bmac_devs == NULL)
+ return -ENOSYS;
+
+ len += sprintf(buffer, "BMAC counters & registers\n");
+
+ for (i = 0; i<N_REG_ENTRIES; i++) {
+ len += sprintf(buffer + len, "%s: %#08x\n",
+ reg_entries[i].name,
+ bmread(bmac_devs, reg_entries[i].reg_offset));
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+
+ if (pos > offset+length) break;
+ }
+
+ *start = buffer + (offset - begin);
+ len -= (offset - begin);
+
+ if (len > length) len = length;
+
+ return len;
+}
+#endif
+
+static int __devexit bmac_remove(struct macio_dev *mdev)
+{
+ struct net_device *dev = macio_get_drvdata(mdev);
+ struct bmac_data *bp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ free_irq(dev->irq, dev);
+ free_irq(bp->tx_dma_intr, dev);
+ free_irq(bp->rx_dma_intr, dev);
+
+ iounmap((void __iomem *)dev->base_addr);
+ iounmap(bp->tx_dma);
+ iounmap(bp->rx_dma);
+
+ macio_release_resources(mdev);
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+static struct of_device_id bmac_match[] =
+{
+ {
+ .name = "bmac",
+ .data = (void *)0,
+ },
+ {
+ .type = "network",
+ .compatible = "bmac+",
+ .data = (void *)1,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE (of, bmac_match);
+
+static struct macio_driver bmac_driver =
+{
+ .driver = {
+ .name = "bmac",
+ .owner = THIS_MODULE,
+ .of_match_table = bmac_match,
+ },
+ .probe = bmac_probe,
+ .remove = bmac_remove,
+#ifdef CONFIG_PM
+ .suspend = bmac_suspend,
+ .resume = bmac_resume,
+#endif
+};
+
+
+static int __init bmac_init(void)
+{
+ if (bmac_emergency_rxbuf == NULL) {
+ bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
+ if (bmac_emergency_rxbuf == NULL) {
+ printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n");
+ return -ENOMEM;
+ }
+ }
+
+ return macio_register_driver(&bmac_driver);
+}
+
+static void __exit bmac_exit(void)
+{
+ macio_unregister_driver(&bmac_driver);
+
+ kfree(bmac_emergency_rxbuf);
+ bmac_emergency_rxbuf = NULL;
+}
+
+MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
+MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
+MODULE_LICENSE("GPL");
+
+module_init(bmac_init);
+module_exit(bmac_exit);
diff --git a/drivers/net/ethernet/apple/bmac.h b/drivers/net/ethernet/apple/bmac.h
new file mode 100644
index 000000000000..a1d19d867ba5
--- /dev/null
+++ b/drivers/net/ethernet/apple/bmac.h
@@ -0,0 +1,164 @@
+/*
+ * mace.h - definitions for the registers in the "Big Mac"
+ * Ethernet controller found in PowerMac G3 models.
+ *
+ * Copyright (C) 1998 Randy Gobbel.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/* The "Big MAC" appears to have some parts in common with the Sun "Happy Meal"
+ * (HME) controller. See sunhme.h
+ */
+
+
+/* register offsets */
+
+/* global status and control */
+#define XIFC 0x000 /* low-level interface control */
+# define TxOutputEnable 0x0001 /* output driver enable */
+# define XIFLoopback 0x0002 /* Loopback-mode XIF enable */
+# define MIILoopback 0x0004 /* Loopback-mode MII enable */
+# define MIILoopbackBits 0x0006
+# define MIIBuffDisable 0x0008 /* MII receive buffer disable */
+# define SQETestEnable 0x0010 /* SQE test enable */
+# define SQETimeWindow 0x03e0 /* SQE time window */
+# define XIFLanceMode 0x0010 /* Lance mode enable */
+# define XIFLanceIPG0 0x03e0 /* Lance mode IPG0 */
+#define TXFIFOCSR 0x100 /* transmit FIFO control */
+# define TxFIFOEnable 0x0001
+#define TXTH 0x110 /* transmit threshold */
+# define TxThreshold 0x0004
+#define RXFIFOCSR 0x120 /* receive FIFO control */
+# define RxFIFOEnable 0x0001
+#define MEMADD 0x130 /* memory address, unknown function */
+#define MEMDATAHI 0x140 /* memory data high, presently unused in driver */
+#define MEMDATALO 0x150 /* memory data low, presently unused in driver */
+#define XCVRIF 0x160 /* transceiver interface control */
+# define COLActiveLow 0x0002
+# define SerialMode 0x0004
+# define ClkBit 0x0008
+# define LinkStatus 0x0100
+#define CHIPID 0x170 /* chip ID */
+#define MIFCSR 0x180 /* ??? */
+#define SROMCSR 0x190 /* SROM control */
+# define ChipSelect 0x0001
+# define Clk 0x0002
+#define TXPNTR 0x1a0 /* transmit pointer */
+#define RXPNTR 0x1b0 /* receive pointer */
+#define STATUS 0x200 /* status--reading this clears it */
+#define INTDISABLE 0x210 /* interrupt enable/disable control */
+/* bits below are the same in both STATUS and INTDISABLE registers */
+# define FrameReceived 0x00000001 /* Received a frame */
+# define RxFrameCntExp 0x00000002 /* Receive frame counter expired */
+# define RxAlignCntExp 0x00000004 /* Align-error counter expired */
+# define RxCRCCntExp 0x00000008 /* CRC-error counter expired */
+# define RxLenCntExp 0x00000010 /* Length-error counter expired */
+# define RxOverFlow 0x00000020 /* Receive FIFO overflow */
+# define RxCodeViolation 0x00000040 /* Code-violation counter expired */
+# define SQETestError 0x00000080 /* Test error in XIF for SQE */
+# define FrameSent 0x00000100 /* Transmitted a frame */
+# define TxUnderrun 0x00000200 /* Transmit FIFO underrun */
+# define TxMaxSizeError 0x00000400 /* Max-packet size error */
+# define TxNormalCollExp 0x00000800 /* Normal-collision counter expired */
+# define TxExcessCollExp 0x00001000 /* Excess-collision counter expired */
+# define TxLateCollExp 0x00002000 /* Late-collision counter expired */
+# define TxNetworkCollExp 0x00004000 /* First-collision counter expired */
+# define TxDeferTimerExp 0x00008000 /* Defer-timer expired */
+# define RxFIFOToHost 0x00010000 /* Data moved from FIFO to host */
+# define RxNoDescriptors 0x00020000 /* No more receive descriptors */
+# define RxDMAError 0x00040000 /* Error during receive DMA */
+# define RxDMALateErr 0x00080000 /* Receive DMA, data late */
+# define RxParityErr 0x00100000 /* Parity error during receive DMA */
+# define RxTagError 0x00200000 /* Tag error during receive DMA */
+# define TxEOPError 0x00400000 /* Tx descriptor did not have EOP set */
+# define MIFIntrEvent 0x00800000 /* MIF is signaling an interrupt */
+# define TxHostToFIFO 0x01000000 /* Data moved from host to FIFO */
+# define TxFIFOAllSent 0x02000000 /* Transmitted all packets in FIFO */
+# define TxDMAError 0x04000000 /* Error during transmit DMA */
+# define TxDMALateError 0x08000000 /* Late error during transmit DMA */
+# define TxParityError 0x10000000 /* Parity error during transmit DMA */
+# define TxTagError 0x20000000 /* Tag error during transmit DMA */
+# define PIOError 0x40000000 /* PIO access got an error */
+# define PIOParityError 0x80000000 /* PIO access got a parity error */
+# define DisableAll 0xffffffff
+# define EnableAll 0x00000000
+/* # define NormalIntEvents ~(FrameReceived | FrameSent | TxUnderrun) */
+# define EnableNormal ~(FrameReceived | FrameSent)
+# define EnableErrors (FrameReceived | FrameSent)
+# define RxErrorMask (RxFrameCntExp | RxAlignCntExp | RxCRCCntExp | \
+ RxLenCntExp | RxOverFlow | RxCodeViolation)
+# define TxErrorMask (TxUnderrun | TxMaxSizeError | TxExcessCollExp | \
+ TxLateCollExp | TxNetworkCollExp | TxDeferTimerExp)
+
+/* transmit control */
+#define TXRST 0x420 /* transmit reset */
+# define TxResetBit 0x0001
+#define TXCFG 0x430 /* transmit configuration control*/
+# define TxMACEnable 0x0001 /* output driver enable */
+# define TxSlowMode 0x0020 /* enable slow mode */
+# define TxIgnoreColl 0x0040 /* ignore transmit collisions */
+# define TxNoFCS 0x0080 /* do not emit FCS */
+# define TxNoBackoff 0x0100 /* no backoff in case of collisions */
+# define TxFullDuplex 0x0200 /* enable full-duplex */
+# define TxNeverGiveUp 0x0400 /* don't give up on transmits */
+#define IPG1 0x440 /* Inter-packet gap 1 */
+#define IPG2 0x450 /* Inter-packet gap 2 */
+#define ALIMIT 0x460 /* Transmit attempt limit */
+#define SLOT 0x470 /* Transmit slot time */
+#define PALEN 0x480 /* Size of transmit preamble */
+#define PAPAT 0x490 /* Pattern for transmit preamble */
+#define TXSFD 0x4a0 /* Transmit frame delimiter */
+#define JAM 0x4b0 /* Jam size */
+#define TXMAX 0x4c0 /* Transmit max pkt size */
+#define TXMIN 0x4d0 /* Transmit min pkt size */
+#define PAREG 0x4e0 /* Count of transmit peak attempts */
+#define DCNT 0x4f0 /* Transmit defer timer */
+#define NCCNT 0x500 /* Transmit normal-collision counter */
+#define NTCNT 0x510 /* Transmit first-collision counter */
+#define EXCNT 0x520 /* Transmit excess-collision counter */
+#define LTCNT 0x530 /* Transmit late-collision counter */
+#define RSEED 0x540 /* Transmit random number seed */
+#define TXSM 0x550 /* Transmit state machine */
+
+/* receive control */
+#define RXRST 0x620 /* receive reset */
+# define RxResetValue 0x0000
+#define RXCFG 0x630 /* receive configuration control */
+# define RxMACEnable 0x0001 /* receiver overall enable */
+# define RxCFGReserved 0x0004
+# define RxPadStripEnab 0x0020 /* enable pad byte stripping */
+# define RxPromiscEnable 0x0040 /* turn on promiscuous mode */
+# define RxNoErrCheck 0x0080 /* disable receive error checking */
+# define RxCRCNoStrip 0x0100 /* disable auto-CRC-stripping */
+# define RxRejectOwnPackets 0x0200 /* don't receive our own packets */
+# define RxGrpPromisck 0x0400 /* enable group promiscuous mode */
+# define RxHashFilterEnable 0x0800 /* enable hash filter */
+# define RxAddrFilterEnable 0x1000 /* enable address filter */
+#define RXMAX 0x640 /* Max receive packet size */
+#define RXMIN 0x650 /* Min receive packet size */
+#define MADD2 0x660 /* our enet address, high part */
+#define MADD1 0x670 /* our enet address, middle part */
+#define MADD0 0x680 /* our enet address, low part */
+#define FRCNT 0x690 /* receive frame counter */
+#define LECNT 0x6a0 /* Receive excess length error counter */
+#define AECNT 0x6b0 /* Receive misaligned error counter */
+#define FECNT 0x6c0 /* Receive CRC error counter */
+#define RXSM 0x6d0 /* Receive state machine */
+#define RXCV 0x6e0 /* Receive code violation */
+
+#define BHASH3 0x700 /* multicast hash register */
+#define BHASH2 0x710 /* multicast hash register */
+#define BHASH1 0x720 /* multicast hash register */
+#define BHASH0 0x730 /* multicast hash register */
+
+#define AFR2 0x740 /* address filtering setup? */
+#define AFR1 0x750 /* address filtering setup? */
+#define AFR0 0x760 /* address filtering setup? */
+#define AFCR 0x770 /* address filter compare register? */
+# define EnableAllCompares 0x0fff
+
+/* bits in XIFC */
diff --git a/drivers/net/ethernet/apple/cs89x0.c b/drivers/net/ethernet/apple/cs89x0.c
new file mode 100644
index 000000000000..f328da24c8fa
--- /dev/null
+++ b/drivers/net/ethernet/apple/cs89x0.c
@@ -0,0 +1,1913 @@
+/* cs89x0.c: A Crystal Semiconductor (Now Cirrus Logic) CS89[02]0
+ * driver for linux.
+ */
+
+/*
+ Written 1996 by Russell Nelson, with reference to skeleton.c
+ written 1993-1994 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached at nelson@crynwr.com, Crynwr
+ Software, 521 Pleasant Valley Rd., Potsdam, NY 13676
+
+ Changelog:
+
+ Mike Cruse : mcruse@cti-ltd.com
+ : Changes for Linux 2.0 compatibility.
+ : Added dev_id parameter in net_interrupt(),
+ : request_irq() and free_irq(). Just NULL for now.
+
+ Mike Cruse : Added MOD_INC_USE_COUNT and MOD_DEC_USE_COUNT macros
+ : in net_open() and net_close() so kerneld would know
+ : that the module is in use and wouldn't eject the
+ : driver prematurely.
+
+ Mike Cruse : Rewrote init_module() and cleanup_module using 8390.c
+ : as an example. Disabled autoprobing in init_module(),
+ : not a good thing to do to other devices while Linux
+ : is running from all accounts.
+
+ Russ Nelson : Jul 13 1998. Added RxOnly DMA support.
+
+ Melody Lee : Aug 10 1999. Changes for Linux 2.2.5 compatibility.
+ : email: ethernet@crystal.cirrus.com
+
+ Alan Cox : Removed 1.2 support, added 2.1 extra counters.
+
+ Andrew Morton : Kernel 2.3.48
+ : Handle kmalloc() failures
+ : Other resource allocation fixes
+ : Add SMP locks
+ : Integrate Russ Nelson's ALLOW_DMA functionality back in.
+ : If ALLOW_DMA is true, make DMA runtime selectable
+ : Folded in changes from Cirrus (Melody Lee
+ : <klee@crystal.cirrus.com>)
+ : Don't call netif_wake_queue() in net_send_packet()
+ : Fixed an out-of-mem bug in dma_rx()
+ : Updated Documentation/networking/cs89x0.txt
+
+ Andrew Morton : Kernel 2.3.99-pre1
+ : Use skb_reserve to longword align IP header (two places)
+ : Remove a delay loop from dma_rx()
+ : Replace '100' with HZ
+ : Clean up a couple of skb API abuses
+ : Added 'cs89x0_dma=N' kernel boot option
+ : Correctly initialise lp->lock in non-module compile
+
+ Andrew Morton : Kernel 2.3.99-pre4-1
+ : MOD_INC/DEC race fix (see
+ : http://www.uwsg.indiana.edu/hypermail/linux/kernel/0003.3/1532.html)
+
+ Andrew Morton : Kernel 2.4.0-test7-pre2
+ : Enhanced EEPROM support to cover more devices,
+ : abstracted IRQ mapping to support CONFIG_ARCH_CLPS7500 arch
+ : (Jason Gunthorpe <jgg@ualberta.ca>)
+
+ Andrew Morton : Kernel 2.4.0-test11-pre4
+ : Use dev->name in request_*() (Andrey Panin)
+ : Fix an error-path memleak in init_module()
+ : Preserve return value from request_irq()
+ : Fix type of `media' module parm (Keith Owens)
+ : Use SET_MODULE_OWNER()
+ : Tidied up strange request_irq() abuse in net_open().
+
+ Andrew Morton : Kernel 2.4.3-pre1
+ : Request correct number of pages for DMA (Hugh Dickens)
+ : Select PP_ChipID _after_ unregister_netdev in cleanup_module()
+ : because unregister_netdev() calls get_stats.
+ : Make `version[]' __initdata
+ : Uninlined the read/write reg/word functions.
+
+ Oskar Schirmer : oskar@scara.com
+ : HiCO.SH4 (superh) support added (irq#1, cs89x0_media=)
+
+ Deepak Saxena : dsaxena@plexity.net
+ : Intel IXDP2x01 (XScale ixp2x00 NPU) platform support
+
+ Dmitry Pervushin : dpervushin@ru.mvista.com
+ : PNX010X platform support
+
+ Deepak Saxena : dsaxena@plexity.net
+ : Intel IXDP2351 platform support
+
+ Dmitry Pervushin : dpervushin@ru.mvista.com
+ : PNX010X platform support
+
+ Domenico Andreoli : cavokz@gmail.com
+ : QQ2440 platform support
+
+*/
+
+/* Always include 'config.h' first in case the user wants to turn on
+ or override something. */
+#include <linux/module.h>
+
+/*
+ * Set this to zero to disable DMA code
+ *
+ * Note that even if DMA is turned off we still support the 'dma' and 'use_dma'
+ * module options so we don't break any startup scripts.
+ */
+#ifndef CONFIG_ISA_DMA_API
+#define ALLOW_DMA 0
+#else
+#define ALLOW_DMA 1
+#endif
+
+/*
+ * Set this to zero to remove all the debug statements via
+ * dead code elimination
+ */
+#define DEBUGGING 1
+
+/*
+ Sources:
+
+ Crynwr packet driver epktisa.
+
+ Crystal Semiconductor data sheets.
+
+*/
+
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/gfp.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#if ALLOW_DMA
+#include <asm/dma.h>
+#endif
+
+#include "cs89x0.h"
+
+static char version[] __initdata =
+"cs89x0.c: v2.4.3-pre1 Russell Nelson <nelson@crynwr.com>, Andrew Morton\n";
+
+#define DRV_NAME "cs89x0"
+
+/* First, a few definitions that the brave might change.
+ A zero-terminated list of I/O addresses to be probed. Some special flags..
+ Addr & 1 = Read back the address port, look for signature and reset
+ the page window before probing
+ Addr & 3 = Reset the page window and probe
+ The CLPS eval board has the Cirrus chip at 0x80090300, in ARM IO space,
+ but it is possible that a Cirrus board could be plugged into the ISA
+ slots. */
+/* The cs8900 has 4 IRQ pins, software selectable. cs8900_irq_map maps
+ them to system IRQ numbers. This mapping is card specific and is set to
+ the configuration of the Cirrus Eval board for this chip. */
+#if defined(CONFIG_MACH_IXDP2351)
+static unsigned int netcard_portlist[] __used __initdata = {IXDP2351_VIRT_CS8900_BASE, 0};
+static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0};
+#elif defined(CONFIG_ARCH_IXDP2X01)
+static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0};
+static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
+#elif defined(CONFIG_MACH_QQ2440)
+#include <mach/qq2440.h>
+static unsigned int netcard_portlist[] __used __initdata = { QQ2440_CS8900_VIRT_BASE + 0x300, 0 };
+static unsigned int cs8900_irq_map[] = { QQ2440_CS8900_IRQ, 0, 0, 0 };
+#elif defined(CONFIG_MACH_MX31ADS)
+#include <mach/board-mx31ads.h>
+static unsigned int netcard_portlist[] __used __initdata = {
+ PBC_BASE_ADDRESS + PBC_CS8900A_IOBASE + 0x300, 0
+};
+static unsigned cs8900_irq_map[] = {EXPIO_INT_ENET_INT, 0, 0, 0};
+#else
+static unsigned int netcard_portlist[] __used __initdata =
+ { 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0};
+static unsigned int cs8900_irq_map[] = {10,11,12,5};
+#endif
+
+#if DEBUGGING
+static unsigned int net_debug = DEBUGGING;
+#else
+#define net_debug 0 /* gcc will remove all the debug code for us */
+#endif
+
+/* The number of low I/O ports used by the ethercard. */
+#define NETCARD_IO_EXTENT 16
+
+/* we allow the user to override various values normally set in the EEPROM */
+#define FORCE_RJ45 0x0001 /* pick one of these three */
+#define FORCE_AUI 0x0002
+#define FORCE_BNC 0x0004
+
+#define FORCE_AUTO 0x0010 /* pick one of these three */
+#define FORCE_HALF 0x0020
+#define FORCE_FULL 0x0030
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ int chip_type; /* one of: CS8900, CS8920, CS8920M */
+ char chip_revision; /* revision letter of the chip ('A'...) */
+ int send_cmd; /* the proper send command: TX_NOW, TX_AFTER_381, or TX_AFTER_ALL */
+ int auto_neg_cnf; /* auto-negotiation word from EEPROM */
+ int adapter_cnf; /* adapter configuration from EEPROM */
+ int isa_config; /* ISA configuration from EEPROM */
+ int irq_map; /* IRQ map from EEPROM */
+ int rx_mode; /* what mode are we in? 0, RX_MULTCAST_ACCEPT, or RX_ALL_ACCEPT */
+ int curr_rx_cfg; /* a copy of PP_RxCFG */
+ int linectl; /* either 0 or LOW_RX_SQUELCH, depending on configuration. */
+ int send_underrun; /* keep track of how many underruns in a row we get */
+ int force; /* force various values; see FORCE* above. */
+ spinlock_t lock;
+#if ALLOW_DMA
+ int use_dma; /* Flag: we're using dma */
+ int dma; /* DMA channel */
+ int dmasize; /* 16 or 64 */
+ unsigned char *dma_buff; /* points to the beginning of the buffer */
+ unsigned char *end_dma_buff; /* points to the end of the buffer */
+ unsigned char *rx_dma_ptr; /* points to the next packet */
+#endif
+};
+
+/* Index to functions, as function prototypes. */
+
+static int cs89x0_probe1(struct net_device *dev, int ioaddr, int modular);
+static int net_open(struct net_device *dev);
+static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t net_interrupt(int irq, void *dev_id);
+static void set_multicast_list(struct net_device *dev);
+static void net_timeout(struct net_device *dev);
+static void net_rx(struct net_device *dev);
+static int net_close(struct net_device *dev);
+static struct net_device_stats *net_get_stats(struct net_device *dev);
+static void reset_chip(struct net_device *dev);
+static int get_eeprom_data(struct net_device *dev, int off, int len, int *buffer);
+static int get_eeprom_cksum(int off, int len, int *buffer);
+static int set_mac_address(struct net_device *dev, void *addr);
+static void count_rx_errors(int status, struct net_device *dev);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void net_poll_controller(struct net_device *dev);
+#endif
+#if ALLOW_DMA
+static void get_dma_channel(struct net_device *dev);
+static void release_dma_buff(struct net_local *lp);
+#endif
+
+/* Example routines you must write ;->. */
+#define tx_done(dev) 1
+
+/*
+ * Permit 'cs89x0_dma=N' in the kernel boot environment
+ */
+#if !defined(MODULE) && (ALLOW_DMA != 0)
+static int g_cs89x0_dma;
+
+static int __init dma_fn(char *str)
+{
+ g_cs89x0_dma = simple_strtol(str,NULL,0);
+ return 1;
+}
+
+__setup("cs89x0_dma=", dma_fn);
+#endif /* !defined(MODULE) && (ALLOW_DMA != 0) */
+
+#ifndef MODULE
+static int g_cs89x0_media__force;
+
+static int __init media_fn(char *str)
+{
+ if (!strcmp(str, "rj45")) g_cs89x0_media__force = FORCE_RJ45;
+ else if (!strcmp(str, "aui")) g_cs89x0_media__force = FORCE_AUI;
+ else if (!strcmp(str, "bnc")) g_cs89x0_media__force = FORCE_BNC;
+ return 1;
+}
+
+__setup("cs89x0_media=", media_fn);
+
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+ Return 0 on success.
+ */
+
+struct net_device * __init cs89x0_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
+ unsigned *port;
+ int err = 0;
+ int irq;
+ int io;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ io = dev->base_addr;
+ irq = dev->irq;
+
+ if (net_debug)
+ printk("cs89x0:cs89x0_probe(0x%x)\n", io);
+
+ if (io > 0x1ff) { /* Check a single specified location. */
+ err = cs89x0_probe1(dev, io, 0);
+ } else if (io != 0) { /* Don't probe at all. */
+ err = -ENXIO;
+ } else {
+ for (port = netcard_portlist; *port; port++) {
+ if (cs89x0_probe1(dev, *port, 0) == 0)
+ break;
+ dev->irq = irq;
+ }
+ if (!*port)
+ err = -ENODEV;
+ }
+ if (err)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ printk(KERN_WARNING "cs89x0: no cs8900 or cs8920 detected. Be sure to disable PnP with SETUP\n");
+ return ERR_PTR(err);
+}
+#endif
+
+#if defined(CONFIG_MACH_IXDP2351)
+static u16
+readword(unsigned long base_addr, int portno)
+{
+ return __raw_readw(base_addr + (portno << 1));
+}
+
+static void
+writeword(unsigned long base_addr, int portno, u16 value)
+{
+ __raw_writew(value, base_addr + (portno << 1));
+}
+#elif defined(CONFIG_ARCH_IXDP2X01)
+static u16
+readword(unsigned long base_addr, int portno)
+{
+ return __raw_readl(base_addr + (portno << 1));
+}
+
+static void
+writeword(unsigned long base_addr, int portno, u16 value)
+{
+ __raw_writel(value, base_addr + (portno << 1));
+}
+#else
+static u16
+readword(unsigned long base_addr, int portno)
+{
+ return inw(base_addr + portno);
+}
+
+static void
+writeword(unsigned long base_addr, int portno, u16 value)
+{
+ outw(value, base_addr + portno);
+}
+#endif
+
+static void
+readwords(unsigned long base_addr, int portno, void *buf, int length)
+{
+ u8 *buf8 = (u8 *)buf;
+
+ do {
+ u16 tmp16;
+
+ tmp16 = readword(base_addr, portno);
+ *buf8++ = (u8)tmp16;
+ *buf8++ = (u8)(tmp16 >> 8);
+ } while (--length);
+}
+
+static void
+writewords(unsigned long base_addr, int portno, void *buf, int length)
+{
+ u8 *buf8 = (u8 *)buf;
+
+ do {
+ u16 tmp16;
+
+ tmp16 = *buf8++;
+ tmp16 |= (*buf8++) << 8;
+ writeword(base_addr, portno, tmp16);
+ } while (--length);
+}
+
+static u16
+readreg(struct net_device *dev, u16 regno)
+{
+ writeword(dev->base_addr, ADD_PORT, regno);
+ return readword(dev->base_addr, DATA_PORT);
+}
+
+static void
+writereg(struct net_device *dev, u16 regno, u16 value)
+{
+ writeword(dev->base_addr, ADD_PORT, regno);
+ writeword(dev->base_addr, DATA_PORT, value);
+}
+
+static int __init
+wait_eeprom_ready(struct net_device *dev)
+{
+ int timeout = jiffies;
+ /* check to see if the EEPROM is ready, a timeout is used -
+ just in case EEPROM is ready when SI_BUSY in the
+ PP_SelfST is clear */
+ while(readreg(dev, PP_SelfST) & SI_BUSY)
+ if (jiffies - timeout >= 40)
+ return -1;
+ return 0;
+}
+
+static int __init
+get_eeprom_data(struct net_device *dev, int off, int len, int *buffer)
+{
+ int i;
+
+ if (net_debug > 3) printk("EEPROM data from %x for %x:\n",off,len);
+ for (i = 0; i < len; i++) {
+ if (wait_eeprom_ready(dev) < 0) return -1;
+ /* Now send the EEPROM read command and EEPROM location to read */
+ writereg(dev, PP_EECMD, (off + i) | EEPROM_READ_CMD);
+ if (wait_eeprom_ready(dev) < 0) return -1;
+ buffer[i] = readreg(dev, PP_EEData);
+ if (net_debug > 3) printk("%04x ", buffer[i]);
+ }
+ if (net_debug > 3) printk("\n");
+ return 0;
+}
+
+static int __init
+get_eeprom_cksum(int off, int len, int *buffer)
+{
+ int i, cksum;
+
+ cksum = 0;
+ for (i = 0; i < len; i++)
+ cksum += buffer[i];
+ cksum &= 0xffff;
+ if (cksum == 0)
+ return 0;
+ return -1;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void net_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ net_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static const struct net_device_ops net_ops = {
+ .ndo_open = net_open,
+ .ndo_stop = net_close,
+ .ndo_tx_timeout = net_timeout,
+ .ndo_start_xmit = net_send_packet,
+ .ndo_get_stats = net_get_stats,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_set_mac_address = set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = net_poll_controller,
+#endif
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/* This is the real probe routine. Linux has a history of friendly device
+ probes on the ISA bus. A good device probes avoids doing writes, and
+ verifies that the correct device exists and functions.
+ Return 0 on success.
+ */
+
+static int __init
+cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
+{
+ struct net_local *lp = netdev_priv(dev);
+ static unsigned version_printed;
+ int i;
+ int tmp;
+ unsigned rev_type = 0;
+ int eeprom_buff[CHKSUM_LEN];
+ int retval;
+
+ /* Initialize the device structure. */
+ if (!modular) {
+ memset(lp, 0, sizeof(*lp));
+ spin_lock_init(&lp->lock);
+#ifndef MODULE
+#if ALLOW_DMA
+ if (g_cs89x0_dma) {
+ lp->use_dma = 1;
+ lp->dma = g_cs89x0_dma;
+ lp->dmasize = 16; /* Could make this an option... */
+ }
+#endif
+ lp->force = g_cs89x0_media__force;
+#endif
+
+#if defined(CONFIG_MACH_QQ2440)
+ lp->force |= FORCE_RJ45 | FORCE_FULL;
+#endif
+ }
+
+ /* Grab the region so we can find another board if autoIRQ fails. */
+ /* WTF is going on here? */
+ if (!request_region(ioaddr & ~3, NETCARD_IO_EXTENT, DRV_NAME)) {
+ printk(KERN_ERR "%s: request_region(0x%x, 0x%x) failed\n",
+ DRV_NAME, ioaddr, NETCARD_IO_EXTENT);
+ retval = -EBUSY;
+ goto out1;
+ }
+
+ /* if they give us an odd I/O address, then do ONE write to
+ the address port, to get it back to address zero, where we
+ expect to find the EISA signature word. An IO with a base of 0x3
+ will skip the test for the ADD_PORT. */
+ if (ioaddr & 1) {
+ if (net_debug > 1)
+ printk(KERN_INFO "%s: odd ioaddr 0x%x\n", dev->name, ioaddr);
+ if ((ioaddr & 2) != 2)
+ if ((readword(ioaddr & ~3, ADD_PORT) & ADD_MASK) != ADD_SIG) {
+ printk(KERN_ERR "%s: bad signature 0x%x\n",
+ dev->name, readword(ioaddr & ~3, ADD_PORT));
+ retval = -ENODEV;
+ goto out2;
+ }
+ }
+
+ ioaddr &= ~3;
+ printk(KERN_DEBUG "PP_addr at %x[%x]: 0x%x\n",
+ ioaddr, ADD_PORT, readword(ioaddr, ADD_PORT));
+ writeword(ioaddr, ADD_PORT, PP_ChipID);
+
+ tmp = readword(ioaddr, DATA_PORT);
+ if (tmp != CHIP_EISA_ID_SIG) {
+ printk(KERN_DEBUG "%s: incorrect signature at %x[%x]: 0x%x!="
+ CHIP_EISA_ID_SIG_STR "\n",
+ dev->name, ioaddr, DATA_PORT, tmp);
+ retval = -ENODEV;
+ goto out2;
+ }
+
+ /* Fill in the 'dev' fields. */
+ dev->base_addr = ioaddr;
+
+ /* get the chip type */
+ rev_type = readreg(dev, PRODUCT_ID_ADD);
+ lp->chip_type = rev_type &~ REVISON_BITS;
+ lp->chip_revision = ((rev_type & REVISON_BITS) >> 8) + 'A';
+
+ /* Check the chip type and revision in order to set the correct send command
+ CS8920 revision C and CS8900 revision F can use the faster send. */
+ lp->send_cmd = TX_AFTER_381;
+ if (lp->chip_type == CS8900 && lp->chip_revision >= 'F')
+ lp->send_cmd = TX_NOW;
+ if (lp->chip_type != CS8900 && lp->chip_revision >= 'C')
+ lp->send_cmd = TX_NOW;
+
+ if (net_debug && version_printed++ == 0)
+ printk(version);
+
+ printk(KERN_INFO "%s: cs89%c0%s rev %c found at %#3lx ",
+ dev->name,
+ lp->chip_type==CS8900?'0':'2',
+ lp->chip_type==CS8920M?"M":"",
+ lp->chip_revision,
+ dev->base_addr);
+
+ reset_chip(dev);
+
+ /* Here we read the current configuration of the chip. If there
+ is no Extended EEPROM then the idea is to not disturb the chip
+ configuration, it should have been correctly setup by automatic
+ EEPROM read on reset. So, if the chip says it read the EEPROM
+ the driver will always do *something* instead of complain that
+ adapter_cnf is 0. */
+
+
+ if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) ==
+ (EEPROM_OK|EEPROM_PRESENT)) {
+ /* Load the MAC. */
+ for (i=0; i < ETH_ALEN/2; i++) {
+ unsigned int Addr;
+ Addr = readreg(dev, PP_IA+i*2);
+ dev->dev_addr[i*2] = Addr & 0xFF;
+ dev->dev_addr[i*2+1] = Addr >> 8;
+ }
+
+ /* Load the Adapter Configuration.
+ Note: Barring any more specific information from some
+ other source (ie EEPROM+Schematics), we would not know
+ how to operate a 10Base2 interface on the AUI port.
+ However, since we do read the status of HCB1 and use
+ settings that always result in calls to control_dc_dc(dev,0)
+ a BNC interface should work if the enable pin
+ (dc/dc converter) is on HCB1. It will be called AUI
+ however. */
+
+ lp->adapter_cnf = 0;
+ i = readreg(dev, PP_LineCTL);
+ /* Preserve the setting of the HCB1 pin. */
+ if ((i & (HCB1 | HCB1_ENBL)) == (HCB1 | HCB1_ENBL))
+ lp->adapter_cnf |= A_CNF_DC_DC_POLARITY;
+ /* Save the sqelch bit */
+ if ((i & LOW_RX_SQUELCH) == LOW_RX_SQUELCH)
+ lp->adapter_cnf |= A_CNF_EXTND_10B_2 | A_CNF_LOW_RX_SQUELCH;
+ /* Check if the card is in 10Base-t only mode */
+ if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == 0)
+ lp->adapter_cnf |= A_CNF_10B_T | A_CNF_MEDIA_10B_T;
+ /* Check if the card is in AUI only mode */
+ if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUI_ONLY)
+ lp->adapter_cnf |= A_CNF_AUI | A_CNF_MEDIA_AUI;
+ /* Check if the card is in Auto mode. */
+ if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUTO_AUI_10BASET)
+ lp->adapter_cnf |= A_CNF_AUI | A_CNF_10B_T |
+ A_CNF_MEDIA_AUI | A_CNF_MEDIA_10B_T | A_CNF_MEDIA_AUTO;
+
+ if (net_debug > 1)
+ printk(KERN_INFO "%s: PP_LineCTL=0x%x, adapter_cnf=0x%x\n",
+ dev->name, i, lp->adapter_cnf);
+
+ /* IRQ. Other chips already probe, see below. */
+ if (lp->chip_type == CS8900)
+ lp->isa_config = readreg(dev, PP_CS8900_ISAINT) & INT_NO_MASK;
+
+ printk( "[Cirrus EEPROM] ");
+ }
+
+ printk("\n");
+
+ /* First check to see if an EEPROM is attached. */
+
+ if ((readreg(dev, PP_SelfST) & EEPROM_PRESENT) == 0)
+ printk(KERN_WARNING "cs89x0: No EEPROM, relying on command line....\n");
+ else if (get_eeprom_data(dev, START_EEPROM_DATA,CHKSUM_LEN,eeprom_buff) < 0) {
+ printk(KERN_WARNING "\ncs89x0: EEPROM read failed, relying on command line.\n");
+ } else if (get_eeprom_cksum(START_EEPROM_DATA,CHKSUM_LEN,eeprom_buff) < 0) {
+ /* Check if the chip was able to read its own configuration starting
+ at 0 in the EEPROM*/
+ if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) !=
+ (EEPROM_OK|EEPROM_PRESENT))
+ printk(KERN_WARNING "cs89x0: Extended EEPROM checksum bad and no Cirrus EEPROM, relying on command line\n");
+
+ } else {
+ /* This reads an extended EEPROM that is not documented
+ in the CS8900 datasheet. */
+
+ /* get transmission control word but keep the autonegotiation bits */
+ if (!lp->auto_neg_cnf) lp->auto_neg_cnf = eeprom_buff[AUTO_NEG_CNF_OFFSET/2];
+ /* Store adapter configuration */
+ if (!lp->adapter_cnf) lp->adapter_cnf = eeprom_buff[ADAPTER_CNF_OFFSET/2];
+ /* Store ISA configuration */
+ lp->isa_config = eeprom_buff[ISA_CNF_OFFSET/2];
+ dev->mem_start = eeprom_buff[PACKET_PAGE_OFFSET/2] << 8;
+
+ /* eeprom_buff has 32-bit ints, so we can't just memcpy it */
+ /* store the initial memory base address */
+ for (i = 0; i < ETH_ALEN/2; i++) {
+ dev->dev_addr[i*2] = eeprom_buff[i];
+ dev->dev_addr[i*2+1] = eeprom_buff[i] >> 8;
+ }
+ if (net_debug > 1)
+ printk(KERN_DEBUG "%s: new adapter_cnf: 0x%x\n",
+ dev->name, lp->adapter_cnf);
+ }
+
+ /* allow them to force multiple transceivers. If they force multiple, autosense */
+ {
+ int count = 0;
+ if (lp->force & FORCE_RJ45) {lp->adapter_cnf |= A_CNF_10B_T; count++; }
+ if (lp->force & FORCE_AUI) {lp->adapter_cnf |= A_CNF_AUI; count++; }
+ if (lp->force & FORCE_BNC) {lp->adapter_cnf |= A_CNF_10B_2; count++; }
+ if (count > 1) {lp->adapter_cnf |= A_CNF_MEDIA_AUTO; }
+ else if (lp->force & FORCE_RJ45){lp->adapter_cnf |= A_CNF_MEDIA_10B_T; }
+ else if (lp->force & FORCE_AUI) {lp->adapter_cnf |= A_CNF_MEDIA_AUI; }
+ else if (lp->force & FORCE_BNC) {lp->adapter_cnf |= A_CNF_MEDIA_10B_2; }
+ }
+
+ if (net_debug > 1)
+ printk(KERN_DEBUG "%s: after force 0x%x, adapter_cnf=0x%x\n",
+ dev->name, lp->force, lp->adapter_cnf);
+
+ /* FIXME: We don't let you set dc-dc polarity or low RX squelch from the command line: add it here */
+
+ /* FIXME: We don't let you set the IMM bit from the command line: add it to lp->auto_neg_cnf here */
+
+ /* FIXME: we don't set the Ethernet address on the command line. Use
+ ifconfig IFACE hw ether AABBCCDDEEFF */
+
+ printk(KERN_INFO "cs89x0 media %s%s%s",
+ (lp->adapter_cnf & A_CNF_10B_T)?"RJ-45,":"",
+ (lp->adapter_cnf & A_CNF_AUI)?"AUI,":"",
+ (lp->adapter_cnf & A_CNF_10B_2)?"BNC,":"");
+
+ lp->irq_map = 0xffff;
+
+ /* If this is a CS8900 then no pnp soft */
+ if (lp->chip_type != CS8900 &&
+ /* Check if the ISA IRQ has been set */
+ (i = readreg(dev, PP_CS8920_ISAINT) & 0xff,
+ (i != 0 && i < CS8920_NO_INTS))) {
+ if (!dev->irq)
+ dev->irq = i;
+ } else {
+ i = lp->isa_config & INT_NO_MASK;
+ if (lp->chip_type == CS8900) {
+#ifdef CONFIG_CS89x0_NONISA_IRQ
+ i = cs8900_irq_map[0];
+#else
+ /* Translate the IRQ using the IRQ mapping table. */
+ if (i >= ARRAY_SIZE(cs8900_irq_map))
+ printk("\ncs89x0: invalid ISA interrupt number %d\n", i);
+ else
+ i = cs8900_irq_map[i];
+
+ lp->irq_map = CS8900_IRQ_MAP; /* fixed IRQ map for CS8900 */
+ } else {
+ int irq_map_buff[IRQ_MAP_LEN/2];
+
+ if (get_eeprom_data(dev, IRQ_MAP_EEPROM_DATA,
+ IRQ_MAP_LEN/2,
+ irq_map_buff) >= 0) {
+ if ((irq_map_buff[0] & 0xff) == PNP_IRQ_FRMT)
+ lp->irq_map = (irq_map_buff[0]>>8) | (irq_map_buff[1] << 8);
+ }
+#endif
+ }
+ if (!dev->irq)
+ dev->irq = i;
+ }
+
+ printk(" IRQ %d", dev->irq);
+
+#if ALLOW_DMA
+ if (lp->use_dma) {
+ get_dma_channel(dev);
+ printk(", DMA %d", dev->dma);
+ }
+ else
+#endif
+ {
+ printk(", programmed I/O");
+ }
+
+ /* print the ethernet address. */
+ printk(", MAC %pM", dev->dev_addr);
+
+ dev->netdev_ops = &net_ops;
+ dev->watchdog_timeo = HZ;
+
+ printk("\n");
+ if (net_debug)
+ printk("cs89x0_probe1() successful\n");
+
+ retval = register_netdev(dev);
+ if (retval)
+ goto out3;
+ return 0;
+out3:
+ writeword(dev->base_addr, ADD_PORT, PP_ChipID);
+out2:
+ release_region(ioaddr & ~3, NETCARD_IO_EXTENT);
+out1:
+ return retval;
+}
+
+
+/*********************************
+ * This page contains DMA routines
+**********************************/
+
+#if ALLOW_DMA
+
+#define dma_page_eq(ptr1, ptr2) ((long)(ptr1)>>17 == (long)(ptr2)>>17)
+
+static void
+get_dma_channel(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ if (lp->dma) {
+ dev->dma = lp->dma;
+ lp->isa_config |= ISA_RxDMA;
+ } else {
+ if ((lp->isa_config & ANY_ISA_DMA) == 0)
+ return;
+ dev->dma = lp->isa_config & DMA_NO_MASK;
+ if (lp->chip_type == CS8900)
+ dev->dma += 5;
+ if (dev->dma < 5 || dev->dma > 7) {
+ lp->isa_config &= ~ANY_ISA_DMA;
+ return;
+ }
+ }
+}
+
+static void
+write_dma(struct net_device *dev, int chip_type, int dma)
+{
+ struct net_local *lp = netdev_priv(dev);
+ if ((lp->isa_config & ANY_ISA_DMA) == 0)
+ return;
+ if (chip_type == CS8900) {
+ writereg(dev, PP_CS8900_ISADMA, dma-5);
+ } else {
+ writereg(dev, PP_CS8920_ISADMA, dma);
+ }
+}
+
+static void
+set_dma_cfg(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ if (lp->use_dma) {
+ if ((lp->isa_config & ANY_ISA_DMA) == 0) {
+ if (net_debug > 3)
+ printk("set_dma_cfg(): no DMA\n");
+ return;
+ }
+ if (lp->isa_config & ISA_RxDMA) {
+ lp->curr_rx_cfg |= RX_DMA_ONLY;
+ if (net_debug > 3)
+ printk("set_dma_cfg(): RX_DMA_ONLY\n");
+ } else {
+ lp->curr_rx_cfg |= AUTO_RX_DMA; /* not that we support it... */
+ if (net_debug > 3)
+ printk("set_dma_cfg(): AUTO_RX_DMA\n");
+ }
+ }
+}
+
+static int
+dma_bufcfg(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ if (lp->use_dma)
+ return (lp->isa_config & ANY_ISA_DMA)? RX_DMA_ENBL : 0;
+ else
+ return 0;
+}
+
+static int
+dma_busctl(struct net_device *dev)
+{
+ int retval = 0;
+ struct net_local *lp = netdev_priv(dev);
+ if (lp->use_dma) {
+ if (lp->isa_config & ANY_ISA_DMA)
+ retval |= RESET_RX_DMA; /* Reset the DMA pointer */
+ if (lp->isa_config & DMA_BURST)
+ retval |= DMA_BURST_MODE; /* Does ISA config specify DMA burst ? */
+ if (lp->dmasize == 64)
+ retval |= RX_DMA_SIZE_64K; /* did they ask for 64K? */
+ retval |= MEMORY_ON; /* we need memory enabled to use DMA. */
+ }
+ return retval;
+}
+
+static void
+dma_rx(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ struct sk_buff *skb;
+ int status, length;
+ unsigned char *bp = lp->rx_dma_ptr;
+
+ status = bp[0] + (bp[1]<<8);
+ length = bp[2] + (bp[3]<<8);
+ bp += 4;
+ if (net_debug > 5) {
+ printk( "%s: receiving DMA packet at %lx, status %x, length %x\n",
+ dev->name, (unsigned long)bp, status, length);
+ }
+ if ((status & RX_OK) == 0) {
+ count_rx_errors(status, dev);
+ goto skip_this_frame;
+ }
+
+ /* Malloc up new buffer. */
+ skb = dev_alloc_skb(length + 2);
+ if (skb == NULL) {
+ if (net_debug) /* I don't think we want to do this to a stressed system */
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ dev->stats.rx_dropped++;
+
+ /* AKPM: advance bp to the next frame */
+skip_this_frame:
+ bp += (length + 3) & ~3;
+ if (bp >= lp->end_dma_buff) bp -= lp->dmasize*1024;
+ lp->rx_dma_ptr = bp;
+ return;
+ }
+ skb_reserve(skb, 2); /* longword align L3 header */
+
+ if (bp + length > lp->end_dma_buff) {
+ int semi_cnt = lp->end_dma_buff - bp;
+ memcpy(skb_put(skb,semi_cnt), bp, semi_cnt);
+ memcpy(skb_put(skb,length - semi_cnt), lp->dma_buff,
+ length - semi_cnt);
+ } else {
+ memcpy(skb_put(skb,length), bp, length);
+ }
+ bp += (length + 3) & ~3;
+ if (bp >= lp->end_dma_buff) bp -= lp->dmasize*1024;
+ lp->rx_dma_ptr = bp;
+
+ if (net_debug > 3) {
+ printk( "%s: received %d byte DMA packet of type %x\n",
+ dev->name, length,
+ (skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]);
+ }
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += length;
+}
+
+#endif /* ALLOW_DMA */
+
+static void __init reset_chip(struct net_device *dev)
+{
+#if !defined(CONFIG_MACH_MX31ADS)
+#if !defined(CS89x0_NONISA_IRQ)
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+#endif /* CS89x0_NONISA_IRQ */
+ int reset_start_time;
+
+ writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
+
+ /* wait 30 ms */
+ msleep(30);
+
+#if !defined(CS89x0_NONISA_IRQ)
+ if (lp->chip_type != CS8900) {
+ /* Hardware problem requires PNP registers to be reconfigured after a reset */
+ writeword(ioaddr, ADD_PORT, PP_CS8920_ISAINT);
+ outb(dev->irq, ioaddr + DATA_PORT);
+ outb(0, ioaddr + DATA_PORT + 1);
+
+ writeword(ioaddr, ADD_PORT, PP_CS8920_ISAMemB);
+ outb((dev->mem_start >> 16) & 0xff, ioaddr + DATA_PORT);
+ outb((dev->mem_start >> 8) & 0xff, ioaddr + DATA_PORT + 1);
+ }
+#endif /* CS89x0_NONISA_IRQ */
+
+ /* Wait until the chip is reset */
+ reset_start_time = jiffies;
+ while( (readreg(dev, PP_SelfST) & INIT_DONE) == 0 && jiffies - reset_start_time < 2)
+ ;
+#endif /* !CONFIG_MACH_MX31ADS */
+}
+
+
+static void
+control_dc_dc(struct net_device *dev, int on_not_off)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned int selfcontrol;
+ int timenow = jiffies;
+ /* control the DC to DC convertor in the SelfControl register.
+ Note: This is hooked up to a general purpose pin, might not
+ always be a DC to DC convertor. */
+
+ selfcontrol = HCB1_ENBL; /* Enable the HCB1 bit as an output */
+ if (((lp->adapter_cnf & A_CNF_DC_DC_POLARITY) != 0) ^ on_not_off)
+ selfcontrol |= HCB1;
+ else
+ selfcontrol &= ~HCB1;
+ writereg(dev, PP_SelfCTL, selfcontrol);
+
+ /* Wait for the DC/DC converter to power up - 500ms */
+ while (jiffies - timenow < HZ)
+ ;
+}
+
+#define DETECTED_NONE 0
+#define DETECTED_RJ45H 1
+#define DETECTED_RJ45F 2
+#define DETECTED_AUI 3
+#define DETECTED_BNC 4
+
+static int
+detect_tp(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int timenow = jiffies;
+ int fdx;
+
+ if (net_debug > 1) printk("%s: Attempting TP\n", dev->name);
+
+ /* If connected to another full duplex capable 10-Base-T card the link pulses
+ seem to be lost when the auto detect bit in the LineCTL is set.
+ To overcome this the auto detect bit will be cleared whilst testing the
+ 10-Base-T interface. This would not be necessary for the sparrow chip but
+ is simpler to do it anyway. */
+ writereg(dev, PP_LineCTL, lp->linectl &~ AUI_ONLY);
+ control_dc_dc(dev, 0);
+
+ /* Delay for the hardware to work out if the TP cable is present - 150ms */
+ for (timenow = jiffies; jiffies - timenow < 15; )
+ ;
+ if ((readreg(dev, PP_LineST) & LINK_OK) == 0)
+ return DETECTED_NONE;
+
+ if (lp->chip_type == CS8900) {
+ switch (lp->force & 0xf0) {
+#if 0
+ case FORCE_AUTO:
+ printk("%s: cs8900 doesn't autonegotiate\n",dev->name);
+ return DETECTED_NONE;
+#endif
+ /* CS8900 doesn't support AUTO, change to HALF*/
+ case FORCE_AUTO:
+ lp->force &= ~FORCE_AUTO;
+ lp->force |= FORCE_HALF;
+ break;
+ case FORCE_HALF:
+ break;
+ case FORCE_FULL:
+ writereg(dev, PP_TestCTL, readreg(dev, PP_TestCTL) | FDX_8900);
+ break;
+ }
+ fdx = readreg(dev, PP_TestCTL) & FDX_8900;
+ } else {
+ switch (lp->force & 0xf0) {
+ case FORCE_AUTO:
+ lp->auto_neg_cnf = AUTO_NEG_ENABLE;
+ break;
+ case FORCE_HALF:
+ lp->auto_neg_cnf = 0;
+ break;
+ case FORCE_FULL:
+ lp->auto_neg_cnf = RE_NEG_NOW | ALLOW_FDX;
+ break;
+ }
+
+ writereg(dev, PP_AutoNegCTL, lp->auto_neg_cnf & AUTO_NEG_MASK);
+
+ if ((lp->auto_neg_cnf & AUTO_NEG_BITS) == AUTO_NEG_ENABLE) {
+ printk(KERN_INFO "%s: negotiating duplex...\n",dev->name);
+ while (readreg(dev, PP_AutoNegST) & AUTO_NEG_BUSY) {
+ if (jiffies - timenow > 4000) {
+ printk(KERN_ERR "**** Full / half duplex auto-negotiation timed out ****\n");
+ break;
+ }
+ }
+ }
+ fdx = readreg(dev, PP_AutoNegST) & FDX_ACTIVE;
+ }
+ if (fdx)
+ return DETECTED_RJ45F;
+ else
+ return DETECTED_RJ45H;
+}
+
+/* send a test packet - return true if carrier bits are ok */
+static int
+send_test_pkt(struct net_device *dev)
+{
+ char test_packet[] = { 0,0,0,0,0,0, 0,0,0,0,0,0,
+ 0, 46, /* A 46 in network order */
+ 0, 0, /* DSAP=0 & SSAP=0 fields */
+ 0xf3, 0 /* Control (Test Req + P bit set) */ };
+ long timenow = jiffies;
+
+ writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_TX_ON);
+
+ memcpy(test_packet, dev->dev_addr, ETH_ALEN);
+ memcpy(test_packet+ETH_ALEN, dev->dev_addr, ETH_ALEN);
+
+ writeword(dev->base_addr, TX_CMD_PORT, TX_AFTER_ALL);
+ writeword(dev->base_addr, TX_LEN_PORT, ETH_ZLEN);
+
+ /* Test to see if the chip has allocated memory for the packet */
+ while (jiffies - timenow < 5)
+ if (readreg(dev, PP_BusST) & READY_FOR_TX_NOW)
+ break;
+ if (jiffies - timenow >= 5)
+ return 0; /* this shouldn't happen */
+
+ /* Write the contents of the packet */
+ writewords(dev->base_addr, TX_FRAME_PORT,test_packet,(ETH_ZLEN+1) >>1);
+
+ if (net_debug > 1) printk("Sending test packet ");
+ /* wait a couple of jiffies for packet to be received */
+ for (timenow = jiffies; jiffies - timenow < 3; )
+ ;
+ if ((readreg(dev, PP_TxEvent) & TX_SEND_OK_BITS) == TX_OK) {
+ if (net_debug > 1) printk("succeeded\n");
+ return 1;
+ }
+ if (net_debug > 1) printk("failed\n");
+ return 0;
+}
+
+
+static int
+detect_aui(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ if (net_debug > 1) printk("%s: Attempting AUI\n", dev->name);
+ control_dc_dc(dev, 0);
+
+ writereg(dev, PP_LineCTL, (lp->linectl &~ AUTO_AUI_10BASET) | AUI_ONLY);
+
+ if (send_test_pkt(dev))
+ return DETECTED_AUI;
+ else
+ return DETECTED_NONE;
+}
+
+static int
+detect_bnc(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ if (net_debug > 1) printk("%s: Attempting BNC\n", dev->name);
+ control_dc_dc(dev, 1);
+
+ writereg(dev, PP_LineCTL, (lp->linectl &~ AUTO_AUI_10BASET) | AUI_ONLY);
+
+ if (send_test_pkt(dev))
+ return DETECTED_BNC;
+ else
+ return DETECTED_NONE;
+}
+
+
+static void
+write_irq(struct net_device *dev, int chip_type, int irq)
+{
+ int i;
+
+ if (chip_type == CS8900) {
+ /* Search the mapping table for the corresponding IRQ pin. */
+ for (i = 0; i != ARRAY_SIZE(cs8900_irq_map); i++)
+ if (cs8900_irq_map[i] == irq)
+ break;
+ /* Not found */
+ if (i == ARRAY_SIZE(cs8900_irq_map))
+ i = 3;
+ writereg(dev, PP_CS8900_ISAINT, i);
+ } else {
+ writereg(dev, PP_CS8920_ISAINT, irq);
+ }
+}
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine should set everything up anew at each open, even
+ registers that "should" only need to be set once at boot, so that
+ there is non-reboot way to recover if something goes wrong.
+ */
+
+/* AKPM: do we need to do any locking here? */
+
+static int
+net_open(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int result = 0;
+ int i;
+ int ret;
+
+ if (dev->irq < 2) {
+ /* Allow interrupts to be generated by the chip */
+/* Cirrus' release had this: */
+#if 0
+ writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL)|ENABLE_IRQ );
+#endif
+/* And 2.3.47 had this: */
+ writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON);
+
+ for (i = 2; i < CS8920_NO_INTS; i++) {
+ if ((1 << i) & lp->irq_map) {
+ if (request_irq(i, net_interrupt, 0, dev->name, dev) == 0) {
+ dev->irq = i;
+ write_irq(dev, lp->chip_type, i);
+ /* writereg(dev, PP_BufCFG, GENERATE_SW_INTERRUPT); */
+ break;
+ }
+ }
+ }
+
+ if (i >= CS8920_NO_INTS) {
+ writereg(dev, PP_BusCTL, 0); /* disable interrupts. */
+ printk(KERN_ERR "cs89x0: can't get an interrupt\n");
+ ret = -EAGAIN;
+ goto bad_out;
+ }
+ }
+ else
+ {
+#ifndef CONFIG_CS89x0_NONISA_IRQ
+ if (((1 << dev->irq) & lp->irq_map) == 0) {
+ printk(KERN_ERR "%s: IRQ %d is not in our map of allowable IRQs, which is %x\n",
+ dev->name, dev->irq, lp->irq_map);
+ ret = -EAGAIN;
+ goto bad_out;
+ }
+#endif
+/* FIXME: Cirrus' release had this: */
+ writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL)|ENABLE_IRQ );
+/* And 2.3.47 had this: */
+#if 0
+ writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON);
+#endif
+ write_irq(dev, lp->chip_type, dev->irq);
+ ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev);
+ if (ret) {
+ printk(KERN_ERR "cs89x0: request_irq(%d) failed\n", dev->irq);
+ goto bad_out;
+ }
+ }
+
+#if ALLOW_DMA
+ if (lp->use_dma) {
+ if (lp->isa_config & ANY_ISA_DMA) {
+ unsigned long flags;
+ lp->dma_buff = (unsigned char *)__get_dma_pages(GFP_KERNEL,
+ get_order(lp->dmasize * 1024));
+
+ if (!lp->dma_buff) {
+ printk(KERN_ERR "%s: cannot get %dK memory for DMA\n", dev->name, lp->dmasize);
+ goto release_irq;
+ }
+ if (net_debug > 1) {
+ printk( "%s: dma %lx %lx\n",
+ dev->name,
+ (unsigned long)lp->dma_buff,
+ (unsigned long)isa_virt_to_bus(lp->dma_buff));
+ }
+ if ((unsigned long) lp->dma_buff >= MAX_DMA_ADDRESS ||
+ !dma_page_eq(lp->dma_buff, lp->dma_buff+lp->dmasize*1024-1)) {
+ printk(KERN_ERR "%s: not usable as DMA buffer\n", dev->name);
+ goto release_irq;
+ }
+ memset(lp->dma_buff, 0, lp->dmasize * 1024); /* Why? */
+ if (request_dma(dev->dma, dev->name)) {
+ printk(KERN_ERR "%s: cannot get dma channel %d\n", dev->name, dev->dma);
+ goto release_irq;
+ }
+ write_dma(dev, lp->chip_type, dev->dma);
+ lp->rx_dma_ptr = lp->dma_buff;
+ lp->end_dma_buff = lp->dma_buff + lp->dmasize*1024;
+ spin_lock_irqsave(&lp->lock, flags);
+ disable_dma(dev->dma);
+ clear_dma_ff(dev->dma);
+ set_dma_mode(dev->dma, DMA_RX_MODE); /* auto_init as well */
+ set_dma_addr(dev->dma, isa_virt_to_bus(lp->dma_buff));
+ set_dma_count(dev->dma, lp->dmasize*1024);
+ enable_dma(dev->dma);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ }
+#endif /* ALLOW_DMA */
+
+ /* set the Ethernet address */
+ for (i=0; i < ETH_ALEN/2; i++)
+ writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8));
+
+ /* while we're testing the interface, leave interrupts disabled */
+ writereg(dev, PP_BusCTL, MEMORY_ON);
+
+ /* Set the LineCTL quintuplet based on adapter configuration read from EEPROM */
+ if ((lp->adapter_cnf & A_CNF_EXTND_10B_2) && (lp->adapter_cnf & A_CNF_LOW_RX_SQUELCH))
+ lp->linectl = LOW_RX_SQUELCH;
+ else
+ lp->linectl = 0;
+
+ /* check to make sure that they have the "right" hardware available */
+ switch(lp->adapter_cnf & A_CNF_MEDIA_TYPE) {
+ case A_CNF_MEDIA_10B_T: result = lp->adapter_cnf & A_CNF_10B_T; break;
+ case A_CNF_MEDIA_AUI: result = lp->adapter_cnf & A_CNF_AUI; break;
+ case A_CNF_MEDIA_10B_2: result = lp->adapter_cnf & A_CNF_10B_2; break;
+ default: result = lp->adapter_cnf & (A_CNF_10B_T | A_CNF_AUI | A_CNF_10B_2);
+ }
+ if (!result) {
+ printk(KERN_ERR "%s: EEPROM is configured for unavailable media\n", dev->name);
+release_dma:
+#if ALLOW_DMA
+ free_dma(dev->dma);
+release_irq:
+ release_dma_buff(lp);
+#endif
+ writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON));
+ free_irq(dev->irq, dev);
+ ret = -EAGAIN;
+ goto bad_out;
+ }
+
+ /* set the hardware to the configured choice */
+ switch(lp->adapter_cnf & A_CNF_MEDIA_TYPE) {
+ case A_CNF_MEDIA_10B_T:
+ result = detect_tp(dev);
+ if (result==DETECTED_NONE) {
+ printk(KERN_WARNING "%s: 10Base-T (RJ-45) has no cable\n", dev->name);
+ if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
+ result = DETECTED_RJ45H; /* Yes! I don't care if I see a link pulse */
+ }
+ break;
+ case A_CNF_MEDIA_AUI:
+ result = detect_aui(dev);
+ if (result==DETECTED_NONE) {
+ printk(KERN_WARNING "%s: 10Base-5 (AUI) has no cable\n", dev->name);
+ if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
+ result = DETECTED_AUI; /* Yes! I don't care if I see a carrrier */
+ }
+ break;
+ case A_CNF_MEDIA_10B_2:
+ result = detect_bnc(dev);
+ if (result==DETECTED_NONE) {
+ printk(KERN_WARNING "%s: 10Base-2 (BNC) has no cable\n", dev->name);
+ if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
+ result = DETECTED_BNC; /* Yes! I don't care if I can xmit a packet */
+ }
+ break;
+ case A_CNF_MEDIA_AUTO:
+ writereg(dev, PP_LineCTL, lp->linectl | AUTO_AUI_10BASET);
+ if (lp->adapter_cnf & A_CNF_10B_T)
+ if ((result = detect_tp(dev)) != DETECTED_NONE)
+ break;
+ if (lp->adapter_cnf & A_CNF_AUI)
+ if ((result = detect_aui(dev)) != DETECTED_NONE)
+ break;
+ if (lp->adapter_cnf & A_CNF_10B_2)
+ if ((result = detect_bnc(dev)) != DETECTED_NONE)
+ break;
+ printk(KERN_ERR "%s: no media detected\n", dev->name);
+ goto release_dma;
+ }
+ switch(result) {
+ case DETECTED_NONE:
+ printk(KERN_ERR "%s: no network cable attached to configured media\n", dev->name);
+ goto release_dma;
+ case DETECTED_RJ45H:
+ printk(KERN_INFO "%s: using half-duplex 10Base-T (RJ-45)\n", dev->name);
+ break;
+ case DETECTED_RJ45F:
+ printk(KERN_INFO "%s: using full-duplex 10Base-T (RJ-45)\n", dev->name);
+ break;
+ case DETECTED_AUI:
+ printk(KERN_INFO "%s: using 10Base-5 (AUI)\n", dev->name);
+ break;
+ case DETECTED_BNC:
+ printk(KERN_INFO "%s: using 10Base-2 (BNC)\n", dev->name);
+ break;
+ }
+
+ /* Turn on both receive and transmit operations */
+ writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_RX_ON | SERIAL_TX_ON);
+
+ /* Receive only error free packets addressed to this card */
+ lp->rx_mode = 0;
+ writereg(dev, PP_RxCTL, DEF_RX_ACCEPT);
+
+ lp->curr_rx_cfg = RX_OK_ENBL | RX_CRC_ERROR_ENBL;
+
+ if (lp->isa_config & STREAM_TRANSFER)
+ lp->curr_rx_cfg |= RX_STREAM_ENBL;
+#if ALLOW_DMA
+ set_dma_cfg(dev);
+#endif
+ writereg(dev, PP_RxCFG, lp->curr_rx_cfg);
+
+ writereg(dev, PP_TxCFG, TX_LOST_CRS_ENBL | TX_SQE_ERROR_ENBL | TX_OK_ENBL |
+ TX_LATE_COL_ENBL | TX_JBR_ENBL | TX_ANY_COL_ENBL | TX_16_COL_ENBL);
+
+ writereg(dev, PP_BufCFG, READY_FOR_TX_ENBL | RX_MISS_COUNT_OVRFLOW_ENBL |
+#if ALLOW_DMA
+ dma_bufcfg(dev) |
+#endif
+ TX_COL_COUNT_OVRFLOW_ENBL | TX_UNDERRUN_ENBL);
+
+ /* now that we've got our act together, enable everything */
+ writereg(dev, PP_BusCTL, ENABLE_IRQ
+ | (dev->mem_start?MEMORY_ON : 0) /* turn memory on */
+#if ALLOW_DMA
+ | dma_busctl(dev)
+#endif
+ );
+ netif_start_queue(dev);
+ if (net_debug > 1)
+ printk("cs89x0: net_open() succeeded\n");
+ return 0;
+bad_out:
+ return ret;
+}
+
+static void net_timeout(struct net_device *dev)
+{
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+ if (net_debug > 0) printk("%s: transmit timed out, %s?\n", dev->name,
+ tx_done(dev) ? "IRQ conflict ?" : "network cable problem");
+ /* Try to restart the adaptor. */
+ netif_wake_queue(dev);
+}
+
+static netdev_tx_t net_send_packet(struct sk_buff *skb,struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ if (net_debug > 3) {
+ printk("%s: sent %d byte packet of type %x\n",
+ dev->name, skb->len,
+ (skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]);
+ }
+
+ /* keep the upload from being interrupted, since we
+ ask the chip to start transmitting before the
+ whole packet has been completely uploaded. */
+
+ spin_lock_irqsave(&lp->lock, flags);
+ netif_stop_queue(dev);
+
+ /* initiate a transmit sequence */
+ writeword(dev->base_addr, TX_CMD_PORT, lp->send_cmd);
+ writeword(dev->base_addr, TX_LEN_PORT, skb->len);
+
+ /* Test to see if the chip has allocated memory for the packet */
+ if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) {
+ /*
+ * Gasp! It hasn't. But that shouldn't happen since
+ * we're waiting for TxOk, so return 1 and requeue this packet.
+ */
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+ if (net_debug) printk("cs89x0: Tx buffer not free!\n");
+ return NETDEV_TX_BUSY;
+ }
+ /* Write the contents of the packet */
+ writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ dev->stats.tx_bytes += skb->len;
+ dev_kfree_skb (skb);
+
+ /*
+ * We DO NOT call netif_wake_queue() here.
+ * We also DO NOT call netif_start_queue().
+ *
+ * Either of these would cause another bottom half run through
+ * net_send_packet() before this packet has fully gone out. That causes
+ * us to hit the "Gasp!" above and the send is rescheduled. it runs like
+ * a dog. We just return and wait for the Tx completion interrupt handler
+ * to restart the netdevice layer
+ */
+
+ return NETDEV_TX_OK;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+
+static irqreturn_t net_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *lp;
+ int ioaddr, status;
+ int handled = 0;
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ /* we MUST read all the events out of the ISQ, otherwise we'll never
+ get interrupted again. As a consequence, we can't have any limit
+ on the number of times we loop in the interrupt handler. The
+ hardware guarantees that eventually we'll run out of events. Of
+ course, if you're on a slow machine, and packets are arriving
+ faster than you can read them off, you're screwed. Hasta la
+ vista, baby! */
+ while ((status = readword(dev->base_addr, ISQ_PORT))) {
+ if (net_debug > 4)printk("%s: event=%04x\n", dev->name, status);
+ handled = 1;
+ switch(status & ISQ_EVENT_MASK) {
+ case ISQ_RECEIVER_EVENT:
+ /* Got a packet(s). */
+ net_rx(dev);
+ break;
+ case ISQ_TRANSMITTER_EVENT:
+ dev->stats.tx_packets++;
+ netif_wake_queue(dev); /* Inform upper layers. */
+ if ((status & ( TX_OK |
+ TX_LOST_CRS |
+ TX_SQE_ERROR |
+ TX_LATE_COL |
+ TX_16_COL)) != TX_OK) {
+ if ((status & TX_OK) == 0)
+ dev->stats.tx_errors++;
+ if (status & TX_LOST_CRS)
+ dev->stats.tx_carrier_errors++;
+ if (status & TX_SQE_ERROR)
+ dev->stats.tx_heartbeat_errors++;
+ if (status & TX_LATE_COL)
+ dev->stats.tx_window_errors++;
+ if (status & TX_16_COL)
+ dev->stats.tx_aborted_errors++;
+ }
+ break;
+ case ISQ_BUFFER_EVENT:
+ if (status & READY_FOR_TX) {
+ /* we tried to transmit a packet earlier,
+ but inexplicably ran out of buffers.
+ That shouldn't happen since we only ever
+ load one packet. Shrug. Do the right
+ thing anyway. */
+ netif_wake_queue(dev); /* Inform upper layers. */
+ }
+ if (status & TX_UNDERRUN) {
+ if (net_debug > 0) printk("%s: transmit underrun\n", dev->name);
+ lp->send_underrun++;
+ if (lp->send_underrun == 3) lp->send_cmd = TX_AFTER_381;
+ else if (lp->send_underrun == 6) lp->send_cmd = TX_AFTER_ALL;
+ /* transmit cycle is done, although
+ frame wasn't transmitted - this
+ avoids having to wait for the upper
+ layers to timeout on us, in the
+ event of a tx underrun */
+ netif_wake_queue(dev); /* Inform upper layers. */
+ }
+#if ALLOW_DMA
+ if (lp->use_dma && (status & RX_DMA)) {
+ int count = readreg(dev, PP_DmaFrameCnt);
+ while(count) {
+ if (net_debug > 5)
+ printk("%s: receiving %d DMA frames\n", dev->name, count);
+ if (net_debug > 2 && count >1)
+ printk("%s: receiving %d DMA frames\n", dev->name, count);
+ dma_rx(dev);
+ if (--count == 0)
+ count = readreg(dev, PP_DmaFrameCnt);
+ if (net_debug > 2 && count > 0)
+ printk("%s: continuing with %d DMA frames\n", dev->name, count);
+ }
+ }
+#endif
+ break;
+ case ISQ_RX_MISS_EVENT:
+ dev->stats.rx_missed_errors += (status >> 6);
+ break;
+ case ISQ_TX_COL_EVENT:
+ dev->stats.collisions += (status >> 6);
+ break;
+ }
+ }
+ return IRQ_RETVAL(handled);
+}
+
+static void
+count_rx_errors(int status, struct net_device *dev)
+{
+ dev->stats.rx_errors++;
+ if (status & RX_RUNT)
+ dev->stats.rx_length_errors++;
+ if (status & RX_EXTRA_DATA)
+ dev->stats.rx_length_errors++;
+ if ((status & RX_CRC_ERROR) && !(status & (RX_EXTRA_DATA|RX_RUNT)))
+ /* per str 172 */
+ dev->stats.rx_crc_errors++;
+ if (status & RX_DRIBBLE)
+ dev->stats.rx_frame_errors++;
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void
+net_rx(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ int status, length;
+
+ int ioaddr = dev->base_addr;
+ status = readword(ioaddr, RX_FRAME_PORT);
+ length = readword(ioaddr, RX_FRAME_PORT);
+
+ if ((status & RX_OK) == 0) {
+ count_rx_errors(status, dev);
+ return;
+ }
+
+ /* Malloc up new buffer. */
+ skb = dev_alloc_skb(length + 2);
+ if (skb == NULL) {
+#if 0 /* Again, this seems a cruel thing to do */
+ printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
+#endif
+ dev->stats.rx_dropped++;
+ return;
+ }
+ skb_reserve(skb, 2); /* longword align L3 header */
+
+ readwords(ioaddr, RX_FRAME_PORT, skb_put(skb, length), length >> 1);
+ if (length & 1)
+ skb->data[length-1] = readword(ioaddr, RX_FRAME_PORT);
+
+ if (net_debug > 3) {
+ printk( "%s: received %d byte packet of type %x\n",
+ dev->name, length,
+ (skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]);
+ }
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += length;
+}
+
+#if ALLOW_DMA
+static void release_dma_buff(struct net_local *lp)
+{
+ if (lp->dma_buff) {
+ free_pages((unsigned long)(lp->dma_buff), get_order(lp->dmasize * 1024));
+ lp->dma_buff = NULL;
+ }
+}
+#endif
+
+/* The inverse routine to net_open(). */
+static int
+net_close(struct net_device *dev)
+{
+#if ALLOW_DMA
+ struct net_local *lp = netdev_priv(dev);
+#endif
+
+ netif_stop_queue(dev);
+
+ writereg(dev, PP_RxCFG, 0);
+ writereg(dev, PP_TxCFG, 0);
+ writereg(dev, PP_BufCFG, 0);
+ writereg(dev, PP_BusCTL, 0);
+
+ free_irq(dev->irq, dev);
+
+#if ALLOW_DMA
+ if (lp->use_dma && lp->dma) {
+ free_dma(dev->dma);
+ release_dma_buff(lp);
+ }
+#endif
+
+ /* Update the statistics here. */
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct net_device_stats *
+net_get_stats(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ /* Update the statistics from the device registers. */
+ dev->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6);
+ dev->stats.collisions += (readreg(dev, PP_TxCol) >> 6);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return &dev->stats;
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ if(dev->flags&IFF_PROMISC)
+ {
+ lp->rx_mode = RX_ALL_ACCEPT;
+ }
+ else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
+ {
+ /* The multicast-accept list is initialized to accept-all, and we
+ rely on higher-level filtering for now. */
+ lp->rx_mode = RX_MULTCAST_ACCEPT;
+ }
+ else
+ lp->rx_mode = 0;
+
+ writereg(dev, PP_RxCTL, DEF_RX_ACCEPT | lp->rx_mode);
+
+ /* in promiscuous mode, we accept errored packets, so we have to enable interrupts on them also */
+ writereg(dev, PP_RxCFG, lp->curr_rx_cfg |
+ (lp->rx_mode == RX_ALL_ACCEPT? (RX_CRC_ERROR_ENBL|RX_RUNT_ENBL|RX_EXTRA_DATA_ENBL) : 0));
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+
+static int set_mac_address(struct net_device *dev, void *p)
+{
+ int i;
+ struct sockaddr *addr = p;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ if (net_debug)
+ printk("%s: Setting MAC address to %pM.\n",
+ dev->name, dev->dev_addr);
+
+ /* set the Ethernet address */
+ for (i=0; i < ETH_ALEN/2; i++)
+ writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8));
+
+ return 0;
+}
+
+#ifdef MODULE
+
+static struct net_device *dev_cs89x0;
+
+/*
+ * Support the 'debug' module parm even if we're compiled for non-debug to
+ * avoid breaking someone's startup scripts
+ */
+
+static int io;
+static int irq;
+static int debug;
+static char media[8];
+static int duplex=-1;
+
+static int use_dma; /* These generate unused var warnings if ALLOW_DMA = 0 */
+static int dma;
+static int dmasize=16; /* or 64 */
+
+module_param(io, int, 0);
+module_param(irq, int, 0);
+module_param(debug, int, 0);
+module_param_string(media, media, sizeof(media), 0);
+module_param(duplex, int, 0);
+module_param(dma , int, 0);
+module_param(dmasize , int, 0);
+module_param(use_dma , int, 0);
+MODULE_PARM_DESC(io, "cs89x0 I/O base address");
+MODULE_PARM_DESC(irq, "cs89x0 IRQ number");
+#if DEBUGGING
+MODULE_PARM_DESC(debug, "cs89x0 debug level (0-6)");
+#else
+MODULE_PARM_DESC(debug, "(ignored)");
+#endif
+MODULE_PARM_DESC(media, "Set cs89x0 adapter(s) media type(s) (rj45,bnc,aui)");
+/* No other value than -1 for duplex seems to be currently interpreted */
+MODULE_PARM_DESC(duplex, "(ignored)");
+#if ALLOW_DMA
+MODULE_PARM_DESC(dma , "cs89x0 ISA DMA channel; ignored if use_dma=0");
+MODULE_PARM_DESC(dmasize , "cs89x0 DMA size in kB (16,64); ignored if use_dma=0");
+MODULE_PARM_DESC(use_dma , "cs89x0 using DMA (0-1)");
+#else
+MODULE_PARM_DESC(dma , "(ignored)");
+MODULE_PARM_DESC(dmasize , "(ignored)");
+MODULE_PARM_DESC(use_dma , "(ignored)");
+#endif
+
+MODULE_AUTHOR("Mike Cruse, Russwll Nelson <nelson@crynwr.com>, Andrew Morton");
+MODULE_LICENSE("GPL");
+
+
+/*
+* media=t - specify media type
+ or media=2
+ or media=aui
+ or medai=auto
+* duplex=0 - specify forced half/full/autonegotiate duplex
+* debug=# - debug level
+
+
+* Default Chip Configuration:
+ * DMA Burst = enabled
+ * IOCHRDY Enabled = enabled
+ * UseSA = enabled
+ * CS8900 defaults to half-duplex if not specified on command-line
+ * CS8920 defaults to autoneg if not specified on command-line
+ * Use reset defaults for other config parameters
+
+* Assumptions:
+ * media type specified is supported (circuitry is present)
+ * if memory address is > 1MB, then required mem decode hw is present
+ * if 10B-2, then agent other than driver will enable DC/DC converter
+ (hw or software util)
+
+
+*/
+
+int __init init_module(void)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
+ struct net_local *lp;
+ int ret = 0;
+
+#if DEBUGGING
+ net_debug = debug;
+#else
+ debug = 0;
+#endif
+ if (!dev)
+ return -ENOMEM;
+
+ dev->irq = irq;
+ dev->base_addr = io;
+ lp = netdev_priv(dev);
+
+#if ALLOW_DMA
+ if (use_dma) {
+ lp->use_dma = use_dma;
+ lp->dma = dma;
+ lp->dmasize = dmasize;
+ }
+#endif
+
+ spin_lock_init(&lp->lock);
+
+ /* boy, they'd better get these right */
+ if (!strcmp(media, "rj45"))
+ lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T;
+ else if (!strcmp(media, "aui"))
+ lp->adapter_cnf = A_CNF_MEDIA_AUI | A_CNF_AUI;
+ else if (!strcmp(media, "bnc"))
+ lp->adapter_cnf = A_CNF_MEDIA_10B_2 | A_CNF_10B_2;
+ else
+ lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T;
+
+ if (duplex==-1)
+ lp->auto_neg_cnf = AUTO_NEG_ENABLE;
+
+ if (io == 0) {
+ printk(KERN_ERR "cs89x0.c: Module autoprobing not allowed.\n");
+ printk(KERN_ERR "cs89x0.c: Append io=0xNNN\n");
+ ret = -EPERM;
+ goto out;
+ } else if (io <= 0x1ff) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+#if ALLOW_DMA
+ if (use_dma && dmasize != 16 && dmasize != 64) {
+ printk(KERN_ERR "cs89x0.c: dma size must be either 16K or 64K, not %dK\n", dmasize);
+ ret = -EPERM;
+ goto out;
+ }
+#endif
+ ret = cs89x0_probe1(dev, io, 1);
+ if (ret)
+ goto out;
+
+ dev_cs89x0 = dev;
+ return 0;
+out:
+ free_netdev(dev);
+ return ret;
+}
+
+void __exit
+cleanup_module(void)
+{
+ unregister_netdev(dev_cs89x0);
+ writeword(dev_cs89x0->base_addr, ADD_PORT, PP_ChipID);
+ release_region(dev_cs89x0->base_addr, NETCARD_IO_EXTENT);
+ free_netdev(dev_cs89x0);
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * version-control: t
+ * kept-new-versions: 5
+ * c-indent-level: 8
+ * tab-width: 8
+ * End:
+ *
+ */
diff --git a/drivers/net/ethernet/apple/cs89x0.h b/drivers/net/ethernet/apple/cs89x0.h
new file mode 100644
index 000000000000..91423b70bb45
--- /dev/null
+++ b/drivers/net/ethernet/apple/cs89x0.h
@@ -0,0 +1,465 @@
+/* Copyright, 1988-1992, Russell Nelson, Crynwr Software
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, version 1.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+#define PP_ChipID 0x0000 /* offset 0h -> Corp -ID */
+ /* offset 2h -> Model/Product Number */
+ /* offset 3h -> Chip Revision Number */
+
+#define PP_ISAIOB 0x0020 /* IO base address */
+#define PP_CS8900_ISAINT 0x0022 /* ISA interrupt select */
+#define PP_CS8920_ISAINT 0x0370 /* ISA interrupt select */
+#define PP_CS8900_ISADMA 0x0024 /* ISA Rec DMA channel */
+#define PP_CS8920_ISADMA 0x0374 /* ISA Rec DMA channel */
+#define PP_ISASOF 0x0026 /* ISA DMA offset */
+#define PP_DmaFrameCnt 0x0028 /* ISA DMA Frame count */
+#define PP_DmaByteCnt 0x002A /* ISA DMA Byte count */
+#define PP_CS8900_ISAMemB 0x002C /* Memory base */
+#define PP_CS8920_ISAMemB 0x0348 /* */
+
+#define PP_ISABootBase 0x0030 /* Boot Prom base */
+#define PP_ISABootMask 0x0034 /* Boot Prom Mask */
+
+/* EEPROM data and command registers */
+#define PP_EECMD 0x0040 /* NVR Interface Command register */
+#define PP_EEData 0x0042 /* NVR Interface Data Register */
+#define PP_DebugReg 0x0044 /* Debug Register */
+
+#define PP_RxCFG 0x0102 /* Rx Bus config */
+#define PP_RxCTL 0x0104 /* Receive Control Register */
+#define PP_TxCFG 0x0106 /* Transmit Config Register */
+#define PP_TxCMD 0x0108 /* Transmit Command Register */
+#define PP_BufCFG 0x010A /* Bus configuration Register */
+#define PP_LineCTL 0x0112 /* Line Config Register */
+#define PP_SelfCTL 0x0114 /* Self Command Register */
+#define PP_BusCTL 0x0116 /* ISA bus control Register */
+#define PP_TestCTL 0x0118 /* Test Register */
+#define PP_AutoNegCTL 0x011C /* Auto Negotiation Ctrl */
+
+#define PP_ISQ 0x0120 /* Interrupt Status */
+#define PP_RxEvent 0x0124 /* Rx Event Register */
+#define PP_TxEvent 0x0128 /* Tx Event Register */
+#define PP_BufEvent 0x012C /* Bus Event Register */
+#define PP_RxMiss 0x0130 /* Receive Miss Count */
+#define PP_TxCol 0x0132 /* Transmit Collision Count */
+#define PP_LineST 0x0134 /* Line State Register */
+#define PP_SelfST 0x0136 /* Self State register */
+#define PP_BusST 0x0138 /* Bus Status */
+#define PP_TDR 0x013C /* Time Domain Reflectometry */
+#define PP_AutoNegST 0x013E /* Auto Neg Status */
+#define PP_TxCommand 0x0144 /* Tx Command */
+#define PP_TxLength 0x0146 /* Tx Length */
+#define PP_LAF 0x0150 /* Hash Table */
+#define PP_IA 0x0158 /* Physical Address Register */
+
+#define PP_RxStatus 0x0400 /* Receive start of frame */
+#define PP_RxLength 0x0402 /* Receive Length of frame */
+#define PP_RxFrame 0x0404 /* Receive frame pointer */
+#define PP_TxFrame 0x0A00 /* Transmit frame pointer */
+
+/* Primary I/O Base Address. If no I/O base is supplied by the user, then this */
+/* can be used as the default I/O base to access the PacketPage Area. */
+#define DEFAULTIOBASE 0x0300
+#define FIRST_IO 0x020C /* First I/O port to check */
+#define LAST_IO 0x037C /* Last I/O port to check (+10h) */
+#define ADD_MASK 0x3000 /* Mask it use of the ADD_PORT register */
+#define ADD_SIG 0x3000 /* Expected ID signature */
+
+/* On Macs, we only need use the ISA I/O stuff until we do MEMORY_ON */
+#ifdef CONFIG_MAC
+#define LCSLOTBASE 0xfee00000
+#define MMIOBASE 0x40000
+#endif
+
+#define CHIP_EISA_ID_SIG 0x630E /* Product ID Code for Crystal Chip (CS8900 spec 4.3) */
+#define CHIP_EISA_ID_SIG_STR "0x630E"
+
+#ifdef IBMEIPKT
+#define EISA_ID_SIG 0x4D24 /* IBM */
+#define PART_NO_SIG 0x1010 /* IBM */
+#define MONGOOSE_BIT 0x0000 /* IBM */
+#else
+#define EISA_ID_SIG 0x630E /* PnP Vendor ID (same as chip id for Crystal board) */
+#define PART_NO_SIG 0x4000 /* ID code CS8920 board (PnP Vendor Product code) */
+#define MONGOOSE_BIT 0x2000 /* PART_NO_SIG + MONGOOSE_BUT => ID of mongoose */
+#endif
+
+#define PRODUCT_ID_ADD 0x0002 /* Address of product ID */
+
+/* Mask to find out the types of registers */
+#define REG_TYPE_MASK 0x001F
+
+/* Eeprom Commands */
+#define ERSE_WR_ENBL 0x00F0
+#define ERSE_WR_DISABLE 0x0000
+
+/* Defines Control/Config register quintuplet numbers */
+#define RX_BUF_CFG 0x0003
+#define RX_CONTROL 0x0005
+#define TX_CFG 0x0007
+#define TX_COMMAND 0x0009
+#define BUF_CFG 0x000B
+#define LINE_CONTROL 0x0013
+#define SELF_CONTROL 0x0015
+#define BUS_CONTROL 0x0017
+#define TEST_CONTROL 0x0019
+
+/* Defines Status/Count registers quintuplet numbers */
+#define RX_EVENT 0x0004
+#define TX_EVENT 0x0008
+#define BUF_EVENT 0x000C
+#define RX_MISS_COUNT 0x0010
+#define TX_COL_COUNT 0x0012
+#define LINE_STATUS 0x0014
+#define SELF_STATUS 0x0016
+#define BUS_STATUS 0x0018
+#define TDR 0x001C
+
+/* PP_RxCFG - Receive Configuration and Interrupt Mask bit definition - Read/write */
+#define SKIP_1 0x0040
+#define RX_STREAM_ENBL 0x0080
+#define RX_OK_ENBL 0x0100
+#define RX_DMA_ONLY 0x0200
+#define AUTO_RX_DMA 0x0400
+#define BUFFER_CRC 0x0800
+#define RX_CRC_ERROR_ENBL 0x1000
+#define RX_RUNT_ENBL 0x2000
+#define RX_EXTRA_DATA_ENBL 0x4000
+
+/* PP_RxCTL - Receive Control bit definition - Read/write */
+#define RX_IA_HASH_ACCEPT 0x0040
+#define RX_PROM_ACCEPT 0x0080
+#define RX_OK_ACCEPT 0x0100
+#define RX_MULTCAST_ACCEPT 0x0200
+#define RX_IA_ACCEPT 0x0400
+#define RX_BROADCAST_ACCEPT 0x0800
+#define RX_BAD_CRC_ACCEPT 0x1000
+#define RX_RUNT_ACCEPT 0x2000
+#define RX_EXTRA_DATA_ACCEPT 0x4000
+#define RX_ALL_ACCEPT (RX_PROM_ACCEPT|RX_BAD_CRC_ACCEPT|RX_RUNT_ACCEPT|RX_EXTRA_DATA_ACCEPT)
+/* Default receive mode - individually addressed, broadcast, and error free */
+#define DEF_RX_ACCEPT (RX_IA_ACCEPT | RX_BROADCAST_ACCEPT | RX_OK_ACCEPT)
+
+/* PP_TxCFG - Transmit Configuration Interrupt Mask bit definition - Read/write */
+#define TX_LOST_CRS_ENBL 0x0040
+#define TX_SQE_ERROR_ENBL 0x0080
+#define TX_OK_ENBL 0x0100
+#define TX_LATE_COL_ENBL 0x0200
+#define TX_JBR_ENBL 0x0400
+#define TX_ANY_COL_ENBL 0x0800
+#define TX_16_COL_ENBL 0x8000
+
+/* PP_TxCMD - Transmit Command bit definition - Read-only */
+#define TX_START_4_BYTES 0x0000
+#define TX_START_64_BYTES 0x0040
+#define TX_START_128_BYTES 0x0080
+#define TX_START_ALL_BYTES 0x00C0
+#define TX_FORCE 0x0100
+#define TX_ONE_COL 0x0200
+#define TX_TWO_PART_DEFF_DISABLE 0x0400
+#define TX_NO_CRC 0x1000
+#define TX_RUNT 0x2000
+
+/* PP_BufCFG - Buffer Configuration Interrupt Mask bit definition - Read/write */
+#define GENERATE_SW_INTERRUPT 0x0040
+#define RX_DMA_ENBL 0x0080
+#define READY_FOR_TX_ENBL 0x0100
+#define TX_UNDERRUN_ENBL 0x0200
+#define RX_MISS_ENBL 0x0400
+#define RX_128_BYTE_ENBL 0x0800
+#define TX_COL_COUNT_OVRFLOW_ENBL 0x1000
+#define RX_MISS_COUNT_OVRFLOW_ENBL 0x2000
+#define RX_DEST_MATCH_ENBL 0x8000
+
+/* PP_LineCTL - Line Control bit definition - Read/write */
+#define SERIAL_RX_ON 0x0040
+#define SERIAL_TX_ON 0x0080
+#define AUI_ONLY 0x0100
+#define AUTO_AUI_10BASET 0x0200
+#define MODIFIED_BACKOFF 0x0800
+#define NO_AUTO_POLARITY 0x1000
+#define TWO_PART_DEFDIS 0x2000
+#define LOW_RX_SQUELCH 0x4000
+
+/* PP_SelfCTL - Software Self Control bit definition - Read/write */
+#define POWER_ON_RESET 0x0040
+#define SW_STOP 0x0100
+#define SLEEP_ON 0x0200
+#define AUTO_WAKEUP 0x0400
+#define HCB0_ENBL 0x1000
+#define HCB1_ENBL 0x2000
+#define HCB0 0x4000
+#define HCB1 0x8000
+
+/* PP_BusCTL - ISA Bus Control bit definition - Read/write */
+#define RESET_RX_DMA 0x0040
+#define MEMORY_ON 0x0400
+#define DMA_BURST_MODE 0x0800
+#define IO_CHANNEL_READY_ON 0x1000
+#define RX_DMA_SIZE_64K 0x2000
+#define ENABLE_IRQ 0x8000
+
+/* PP_TestCTL - Test Control bit definition - Read/write */
+#define LINK_OFF 0x0080
+#define ENDEC_LOOPBACK 0x0200
+#define AUI_LOOPBACK 0x0400
+#define BACKOFF_OFF 0x0800
+#define FDX_8900 0x4000
+#define FAST_TEST 0x8000
+
+/* PP_RxEvent - Receive Event Bit definition - Read-only */
+#define RX_IA_HASHED 0x0040
+#define RX_DRIBBLE 0x0080
+#define RX_OK 0x0100
+#define RX_HASHED 0x0200
+#define RX_IA 0x0400
+#define RX_BROADCAST 0x0800
+#define RX_CRC_ERROR 0x1000
+#define RX_RUNT 0x2000
+#define RX_EXTRA_DATA 0x4000
+
+#define HASH_INDEX_MASK 0x0FC00
+
+/* PP_TxEvent - Transmit Event Bit definition - Read-only */
+#define TX_LOST_CRS 0x0040
+#define TX_SQE_ERROR 0x0080
+#define TX_OK 0x0100
+#define TX_LATE_COL 0x0200
+#define TX_JBR 0x0400
+#define TX_16_COL 0x8000
+#define TX_SEND_OK_BITS (TX_OK|TX_LOST_CRS)
+#define TX_COL_COUNT_MASK 0x7800
+
+/* PP_BufEvent - Buffer Event Bit definition - Read-only */
+#define SW_INTERRUPT 0x0040
+#define RX_DMA 0x0080
+#define READY_FOR_TX 0x0100
+#define TX_UNDERRUN 0x0200
+#define RX_MISS 0x0400
+#define RX_128_BYTE 0x0800
+#define TX_COL_OVRFLW 0x1000
+#define RX_MISS_OVRFLW 0x2000
+#define RX_DEST_MATCH 0x8000
+
+/* PP_LineST - Ethernet Line Status bit definition - Read-only */
+#define LINK_OK 0x0080
+#define AUI_ON 0x0100
+#define TENBASET_ON 0x0200
+#define POLARITY_OK 0x1000
+#define CRS_OK 0x4000
+
+/* PP_SelfST - Chip Software Status bit definition */
+#define ACTIVE_33V 0x0040
+#define INIT_DONE 0x0080
+#define SI_BUSY 0x0100
+#define EEPROM_PRESENT 0x0200
+#define EEPROM_OK 0x0400
+#define EL_PRESENT 0x0800
+#define EE_SIZE_64 0x1000
+
+/* PP_BusST - ISA Bus Status bit definition */
+#define TX_BID_ERROR 0x0080
+#define READY_FOR_TX_NOW 0x0100
+
+/* PP_AutoNegCTL - Auto Negotiation Control bit definition */
+#define RE_NEG_NOW 0x0040
+#define ALLOW_FDX 0x0080
+#define AUTO_NEG_ENABLE 0x0100
+#define NLP_ENABLE 0x0200
+#define FORCE_FDX 0x8000
+#define AUTO_NEG_BITS (FORCE_FDX|NLP_ENABLE|AUTO_NEG_ENABLE)
+#define AUTO_NEG_MASK (FORCE_FDX|NLP_ENABLE|AUTO_NEG_ENABLE|ALLOW_FDX|RE_NEG_NOW)
+
+/* PP_AutoNegST - Auto Negotiation Status bit definition */
+#define AUTO_NEG_BUSY 0x0080
+#define FLP_LINK 0x0100
+#define FLP_LINK_GOOD 0x0800
+#define LINK_FAULT 0x1000
+#define HDX_ACTIVE 0x4000
+#define FDX_ACTIVE 0x8000
+
+/* The following block defines the ISQ event types */
+#define ISQ_RECEIVER_EVENT 0x04
+#define ISQ_TRANSMITTER_EVENT 0x08
+#define ISQ_BUFFER_EVENT 0x0c
+#define ISQ_RX_MISS_EVENT 0x10
+#define ISQ_TX_COL_EVENT 0x12
+
+#define ISQ_EVENT_MASK 0x003F /* ISQ mask to find out type of event */
+#define ISQ_HIST 16 /* small history buffer */
+#define AUTOINCREMENT 0x8000 /* Bit mask to set bit-15 for autoincrement */
+
+#define TXRXBUFSIZE 0x0600
+#define RXDMABUFSIZE 0x8000
+#define RXDMASIZE 0x4000
+#define TXRX_LENGTH_MASK 0x07FF
+
+/* rx options bits */
+#define RCV_WITH_RXON 1 /* Set SerRx ON */
+#define RCV_COUNTS 2 /* Use Framecnt1 */
+#define RCV_PONG 4 /* Pong respondent */
+#define RCV_DONG 8 /* Dong operation */
+#define RCV_POLLING 0x10 /* Poll RxEvent */
+#define RCV_ISQ 0x20 /* Use ISQ, int */
+#define RCV_AUTO_DMA 0x100 /* Set AutoRxDMAE */
+#define RCV_DMA 0x200 /* Set RxDMA only */
+#define RCV_DMA_ALL 0x400 /* Copy all DMA'ed */
+#define RCV_FIXED_DATA 0x800 /* Every frame same */
+#define RCV_IO 0x1000 /* Use ISA IO only */
+#define RCV_MEMORY 0x2000 /* Use ISA Memory */
+
+#define RAM_SIZE 0x1000 /* The card has 4k bytes or RAM */
+#define PKT_START PP_TxFrame /* Start of packet RAM */
+
+#define RX_FRAME_PORT 0x0000
+#define TX_FRAME_PORT RX_FRAME_PORT
+#define TX_CMD_PORT 0x0004
+#define TX_NOW 0x0000 /* Tx packet after 5 bytes copied */
+#define TX_AFTER_381 0x0040 /* Tx packet after 381 bytes copied */
+#define TX_AFTER_ALL 0x00c0 /* Tx packet after all bytes copied */
+#define TX_LEN_PORT 0x0006
+#define ISQ_PORT 0x0008
+#define ADD_PORT 0x000A
+#define DATA_PORT 0x000C
+
+#define EEPROM_WRITE_EN 0x00F0
+#define EEPROM_WRITE_DIS 0x0000
+#define EEPROM_WRITE_CMD 0x0100
+#define EEPROM_READ_CMD 0x0200
+
+/* Receive Header */
+/* Description of header of each packet in receive area of memory */
+#define RBUF_EVENT_LOW 0 /* Low byte of RxEvent - status of received frame */
+#define RBUF_EVENT_HIGH 1 /* High byte of RxEvent - status of received frame */
+#define RBUF_LEN_LOW 2 /* Length of received data - low byte */
+#define RBUF_LEN_HI 3 /* Length of received data - high byte */
+#define RBUF_HEAD_LEN 4 /* Length of this header */
+
+#define CHIP_READ 0x1 /* Used to mark state of the repins code (chip or dma) */
+#define DMA_READ 0x2 /* Used to mark state of the repins code (chip or dma) */
+
+/* for bios scan */
+/* */
+#ifdef CSDEBUG
+/* use these values for debugging bios scan */
+#define BIOS_START_SEG 0x00000
+#define BIOS_OFFSET_INC 0x0010
+#else
+#define BIOS_START_SEG 0x0c000
+#define BIOS_OFFSET_INC 0x0200
+#endif
+
+#define BIOS_LAST_OFFSET 0x0fc00
+
+/* Byte offsets into the EEPROM configuration buffer */
+#define ISA_CNF_OFFSET 0x6
+#define TX_CTL_OFFSET (ISA_CNF_OFFSET + 8) /* 8900 eeprom */
+#define AUTO_NEG_CNF_OFFSET (ISA_CNF_OFFSET + 8) /* 8920 eeprom */
+
+ /* the assumption here is that the bits in the eeprom are generally */
+ /* in the same position as those in the autonegctl register. */
+ /* Of course the IMM bit is not in that register so it must be */
+ /* masked out */
+#define EE_FORCE_FDX 0x8000
+#define EE_NLP_ENABLE 0x0200
+#define EE_AUTO_NEG_ENABLE 0x0100
+#define EE_ALLOW_FDX 0x0080
+#define EE_AUTO_NEG_CNF_MASK (EE_FORCE_FDX|EE_NLP_ENABLE|EE_AUTO_NEG_ENABLE|EE_ALLOW_FDX)
+
+#define IMM_BIT 0x0040 /* ignore missing media */
+
+#define ADAPTER_CNF_OFFSET (AUTO_NEG_CNF_OFFSET + 2)
+#define A_CNF_10B_T 0x0001
+#define A_CNF_AUI 0x0002
+#define A_CNF_10B_2 0x0004
+#define A_CNF_MEDIA_TYPE 0x0070
+#define A_CNF_MEDIA_AUTO 0x0070
+#define A_CNF_MEDIA_10B_T 0x0020
+#define A_CNF_MEDIA_AUI 0x0040
+#define A_CNF_MEDIA_10B_2 0x0010
+#define A_CNF_DC_DC_POLARITY 0x0080
+#define A_CNF_NO_AUTO_POLARITY 0x2000
+#define A_CNF_LOW_RX_SQUELCH 0x4000
+#define A_CNF_EXTND_10B_2 0x8000
+
+#define PACKET_PAGE_OFFSET 0x8
+
+/* Bit definitions for the ISA configuration word from the EEPROM */
+#define INT_NO_MASK 0x000F
+#define DMA_NO_MASK 0x0070
+#define ISA_DMA_SIZE 0x0200
+#define ISA_AUTO_RxDMA 0x0400
+#define ISA_RxDMA 0x0800
+#define DMA_BURST 0x1000
+#define STREAM_TRANSFER 0x2000
+#define ANY_ISA_DMA (ISA_AUTO_RxDMA | ISA_RxDMA)
+
+/* DMA controller registers */
+#define DMA_BASE 0x00 /* DMA controller base */
+#define DMA_BASE_2 0x0C0 /* DMA controller base */
+
+#define DMA_STAT 0x0D0 /* DMA controller status register */
+#define DMA_MASK 0x0D4 /* DMA controller mask register */
+#define DMA_MODE 0x0D6 /* DMA controller mode register */
+#define DMA_RESETFF 0x0D8 /* DMA controller first/last flip flop */
+
+/* DMA data */
+#define DMA_DISABLE 0x04 /* Disable channel n */
+#define DMA_ENABLE 0x00 /* Enable channel n */
+/* Demand transfers, incr. address, auto init, writes, ch. n */
+#define DMA_RX_MODE 0x14
+/* Demand transfers, incr. address, auto init, reads, ch. n */
+#define DMA_TX_MODE 0x18
+
+#define DMA_SIZE (16*1024) /* Size of dma buffer - 16k */
+
+#define CS8900 0x0000
+#define CS8920 0x4000
+#define CS8920M 0x6000
+#define REVISON_BITS 0x1F00
+#define EEVER_NUMBER 0x12
+#define CHKSUM_LEN 0x14
+#define CHKSUM_VAL 0x0000
+#define START_EEPROM_DATA 0x001c /* Offset into eeprom for start of data */
+#define IRQ_MAP_EEPROM_DATA 0x0046 /* Offset into eeprom for the IRQ map */
+#define IRQ_MAP_LEN 0x0004 /* No of bytes to read for the IRQ map */
+#define PNP_IRQ_FRMT 0x0022 /* PNP small item IRQ format */
+#define CS8900_IRQ_MAP 0x1c20 /* This IRQ map is fixed */
+
+#define CS8920_NO_INTS 0x0F /* Max CS8920 interrupt select # */
+
+#define PNP_ADD_PORT 0x0279
+#define PNP_WRITE_PORT 0x0A79
+
+#define GET_PNP_ISA_STRUCT 0x40
+#define PNP_ISA_STRUCT_LEN 0x06
+#define PNP_CSN_CNT_OFF 0x01
+#define PNP_RD_PORT_OFF 0x02
+#define PNP_FUNCTION_OK 0x00
+#define PNP_WAKE 0x03
+#define PNP_RSRC_DATA 0x04
+#define PNP_RSRC_READY 0x01
+#define PNP_STATUS 0x05
+#define PNP_ACTIVATE 0x30
+#define PNP_CNF_IO_H 0x60
+#define PNP_CNF_IO_L 0x61
+#define PNP_CNF_INT 0x70
+#define PNP_CNF_DMA 0x74
+#define PNP_CNF_MEM 0x48
+
+#define BIT0 1
+#define BIT15 0x8000
+
diff --git a/drivers/net/ethernet/apple/mac89x0.c b/drivers/net/ethernet/apple/mac89x0.c
new file mode 100644
index 000000000000..83781f316d1f
--- /dev/null
+++ b/drivers/net/ethernet/apple/mac89x0.c
@@ -0,0 +1,634 @@
+/* mac89x0.c: A Crystal Semiconductor CS89[02]0 driver for linux. */
+/*
+ Written 1996 by Russell Nelson, with reference to skeleton.c
+ written 1993-1994 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached at nelson@crynwr.com, Crynwr
+ Software, 11 Grant St., Potsdam, NY 13676
+
+ Changelog:
+
+ Mike Cruse : mcruse@cti-ltd.com
+ : Changes for Linux 2.0 compatibility.
+ : Added dev_id parameter in net_interrupt(),
+ : request_irq() and free_irq(). Just NULL for now.
+
+ Mike Cruse : Added MOD_INC_USE_COUNT and MOD_DEC_USE_COUNT macros
+ : in net_open() and net_close() so kerneld would know
+ : that the module is in use and wouldn't eject the
+ : driver prematurely.
+
+ Mike Cruse : Rewrote init_module() and cleanup_module using 8390.c
+ : as an example. Disabled autoprobing in init_module(),
+ : not a good thing to do to other devices while Linux
+ : is running from all accounts.
+
+ Alan Cox : Removed 1.2 support, added 2.1 extra counters.
+
+ David Huggins-Daines <dhd@debian.org>
+
+ Split this off into mac89x0.c, and gutted it of all parts which are
+ not relevant to the existing CS8900 cards on the Macintosh
+ (i.e. basically the Daynaport CS and LC cards). To be precise:
+
+ * Removed all the media-detection stuff, because these cards are
+ TP-only.
+
+ * Lobotomized the ISA interrupt bogosity, because these cards use
+ a hardwired NuBus interrupt and a magic ISAIRQ value in the card.
+
+ * Basically eliminated everything not relevant to getting the
+ cards minimally functioning on the Macintosh.
+
+ I might add that these cards are badly designed even from the Mac
+ standpoint, in that Dayna, in their infinite wisdom, used NuBus slot
+ I/O space and NuBus interrupts for these cards, but neglected to
+ provide anything even remotely resembling a NuBus ROM. Therefore we
+ have to probe for them in a brain-damaged ISA-like fashion.
+
+ Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
+ check kmalloc and release the allocated memory on failure in
+ mac89x0_probe and in init_module
+ use local_irq_{save,restore}(flags) in net_get_stat, not just
+ local_irq_{dis,en}able()
+*/
+
+static char *version =
+"cs89x0.c:v1.02 11/26/96 Russell Nelson <nelson@crynwr.com>\n";
+
+/* ======================= configure the driver here ======================= */
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 0
+#endif
+
+/* ======================= end of configuration ======================= */
+
+
+/* Always include 'config.h' first in case the user wants to turn on
+ or override something. */
+#include <linux/module.h>
+
+/*
+ Sources:
+
+ Crynwr packet driver epktisa.
+
+ Crystal Semiconductor data sheets.
+
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/nubus.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/gfp.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/hwtest.h>
+#include <asm/macints.h>
+
+#include "cs89x0.h"
+
+static unsigned int net_debug = NET_DEBUG;
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ int chip_type; /* one of: CS8900, CS8920, CS8920M */
+ char chip_revision; /* revision letter of the chip ('A'...) */
+ int send_cmd; /* the propercommand used to send a packet. */
+ int rx_mode;
+ int curr_rx_cfg;
+ int send_underrun; /* keep track of how many underruns in a row we get */
+ struct sk_buff *skb;
+};
+
+/* Index to functions, as function prototypes. */
+
+#if 0
+extern void reset_chip(struct net_device *dev);
+#endif
+static int net_open(struct net_device *dev);
+static int net_send_packet(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t net_interrupt(int irq, void *dev_id);
+static void set_multicast_list(struct net_device *dev);
+static void net_rx(struct net_device *dev);
+static int net_close(struct net_device *dev);
+static struct net_device_stats *net_get_stats(struct net_device *dev);
+static int set_mac_address(struct net_device *dev, void *addr);
+
+
+/* Example routines you must write ;->. */
+#define tx_done(dev) 1
+
+/* For reading/writing registers ISA-style */
+static inline int
+readreg_io(struct net_device *dev, int portno)
+{
+ nubus_writew(swab16(portno), dev->base_addr + ADD_PORT);
+ return swab16(nubus_readw(dev->base_addr + DATA_PORT));
+}
+
+static inline void
+writereg_io(struct net_device *dev, int portno, int value)
+{
+ nubus_writew(swab16(portno), dev->base_addr + ADD_PORT);
+ nubus_writew(swab16(value), dev->base_addr + DATA_PORT);
+}
+
+/* These are for reading/writing registers in shared memory */
+static inline int
+readreg(struct net_device *dev, int portno)
+{
+ return swab16(nubus_readw(dev->mem_start + portno));
+}
+
+static inline void
+writereg(struct net_device *dev, int portno, int value)
+{
+ nubus_writew(swab16(value), dev->mem_start + portno);
+}
+
+static const struct net_device_ops mac89x0_netdev_ops = {
+ .ndo_open = net_open,
+ .ndo_stop = net_close,
+ .ndo_start_xmit = net_send_packet,
+ .ndo_get_stats = net_get_stats,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_set_mac_address = set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+/* Probe for the CS8900 card in slot E. We won't bother looking
+ anywhere else until we have a really good reason to do so. */
+struct net_device * __init mac89x0_probe(int unit)
+{
+ struct net_device *dev;
+ static int once_is_enough;
+ struct net_local *lp;
+ static unsigned version_printed;
+ int i, slot;
+ unsigned rev_type = 0;
+ unsigned long ioaddr;
+ unsigned short sig;
+ int err = -ENODEV;
+
+ if (!MACH_IS_MAC)
+ return ERR_PTR(-ENODEV);
+
+ dev = alloc_etherdev(sizeof(struct net_local));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ if (once_is_enough)
+ goto out;
+ once_is_enough = 1;
+
+ /* We might have to parameterize this later */
+ slot = 0xE;
+ /* Get out now if there's a real NuBus card in slot E */
+ if (nubus_find_slot(slot, NULL) != NULL)
+ goto out;
+
+ /* The pseudo-ISA bits always live at offset 0x300 (gee,
+ wonder why...) */
+ ioaddr = (unsigned long)
+ nubus_slot_addr(slot) | (((slot&0xf) << 20) + DEFAULTIOBASE);
+ {
+ unsigned long flags;
+ int card_present;
+
+ local_irq_save(flags);
+ card_present = (hwreg_present((void*) ioaddr+4) &&
+ hwreg_present((void*) ioaddr + DATA_PORT));
+ local_irq_restore(flags);
+
+ if (!card_present)
+ goto out;
+ }
+
+ nubus_writew(0, ioaddr + ADD_PORT);
+ sig = nubus_readw(ioaddr + DATA_PORT);
+ if (sig != swab16(CHIP_EISA_ID_SIG))
+ goto out;
+
+ /* Initialize the net_device structure. */
+ lp = netdev_priv(dev);
+
+ /* Fill in the 'dev' fields. */
+ dev->base_addr = ioaddr;
+ dev->mem_start = (unsigned long)
+ nubus_slot_addr(slot) | (((slot&0xf) << 20) + MMIOBASE);
+ dev->mem_end = dev->mem_start + 0x1000;
+
+ /* Turn on shared memory */
+ writereg_io(dev, PP_BusCTL, MEMORY_ON);
+
+ /* get the chip type */
+ rev_type = readreg(dev, PRODUCT_ID_ADD);
+ lp->chip_type = rev_type &~ REVISON_BITS;
+ lp->chip_revision = ((rev_type & REVISON_BITS) >> 8) + 'A';
+
+ /* Check the chip type and revision in order to set the correct send command
+ CS8920 revision C and CS8900 revision F can use the faster send. */
+ lp->send_cmd = TX_AFTER_381;
+ if (lp->chip_type == CS8900 && lp->chip_revision >= 'F')
+ lp->send_cmd = TX_NOW;
+ if (lp->chip_type != CS8900 && lp->chip_revision >= 'C')
+ lp->send_cmd = TX_NOW;
+
+ if (net_debug && version_printed++ == 0)
+ printk(version);
+
+ printk(KERN_INFO "%s: cs89%c0%s rev %c found at %#8lx",
+ dev->name,
+ lp->chip_type==CS8900?'0':'2',
+ lp->chip_type==CS8920M?"M":"",
+ lp->chip_revision,
+ dev->base_addr);
+
+ /* Try to read the MAC address */
+ if ((readreg(dev, PP_SelfST) & (EEPROM_PRESENT | EEPROM_OK)) == 0) {
+ printk("\nmac89x0: No EEPROM, giving up now.\n");
+ goto out1;
+ } else {
+ for (i = 0; i < ETH_ALEN; i += 2) {
+ /* Big-endian (why??!) */
+ unsigned short s = readreg(dev, PP_IA + i);
+ dev->dev_addr[i] = s >> 8;
+ dev->dev_addr[i+1] = s & 0xff;
+ }
+ }
+
+ dev->irq = SLOT2IRQ(slot);
+
+ /* print the IRQ and ethernet address. */
+
+ printk(" IRQ %d ADDR %pM\n", dev->irq, dev->dev_addr);
+
+ dev->netdev_ops = &mac89x0_netdev_ops;
+
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return NULL;
+out1:
+ nubus_writew(0, dev->base_addr + ADD_PORT);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+#if 0
+/* This is useful for something, but I don't know what yet. */
+void __init reset_chip(struct net_device *dev)
+{
+ int reset_start_time;
+
+ writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
+
+ /* wait 30 ms */
+ msleep_interruptible(30);
+
+ /* Wait until the chip is reset */
+ reset_start_time = jiffies;
+ while( (readreg(dev, PP_SelfST) & INIT_DONE) == 0 && jiffies - reset_start_time < 2)
+ ;
+}
+#endif
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine should set everything up anew at each open, even
+ registers that "should" only need to be set once at boot, so that
+ there is non-reboot way to recover if something goes wrong.
+ */
+static int
+net_open(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int i;
+
+ /* Disable the interrupt for now */
+ writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL) & ~ENABLE_IRQ);
+
+ /* Grab the interrupt */
+ if (request_irq(dev->irq, net_interrupt, 0, "cs89x0", dev))
+ return -EAGAIN;
+
+ /* Set up the IRQ - Apparently magic */
+ if (lp->chip_type == CS8900)
+ writereg(dev, PP_CS8900_ISAINT, 0);
+ else
+ writereg(dev, PP_CS8920_ISAINT, 0);
+
+ /* set the Ethernet address */
+ for (i=0; i < ETH_ALEN/2; i++)
+ writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8));
+
+ /* Turn on both receive and transmit operations */
+ writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_RX_ON | SERIAL_TX_ON);
+
+ /* Receive only error free packets addressed to this card */
+ lp->rx_mode = 0;
+ writereg(dev, PP_RxCTL, DEF_RX_ACCEPT);
+
+ lp->curr_rx_cfg = RX_OK_ENBL | RX_CRC_ERROR_ENBL;
+
+ writereg(dev, PP_RxCFG, lp->curr_rx_cfg);
+
+ writereg(dev, PP_TxCFG, TX_LOST_CRS_ENBL | TX_SQE_ERROR_ENBL | TX_OK_ENBL |
+ TX_LATE_COL_ENBL | TX_JBR_ENBL | TX_ANY_COL_ENBL | TX_16_COL_ENBL);
+
+ writereg(dev, PP_BufCFG, READY_FOR_TX_ENBL | RX_MISS_COUNT_OVRFLOW_ENBL |
+ TX_COL_COUNT_OVRFLOW_ENBL | TX_UNDERRUN_ENBL);
+
+ /* now that we've got our act together, enable everything */
+ writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL) | ENABLE_IRQ);
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int
+net_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ if (net_debug > 3)
+ printk("%s: sent %d byte packet of type %x\n",
+ dev->name, skb->len,
+ (skb->data[ETH_ALEN+ETH_ALEN] << 8)
+ | skb->data[ETH_ALEN+ETH_ALEN+1]);
+
+ /* keep the upload from being interrupted, since we
+ ask the chip to start transmitting before the
+ whole packet has been completely uploaded. */
+ local_irq_save(flags);
+ netif_stop_queue(dev);
+
+ /* initiate a transmit sequence */
+ writereg(dev, PP_TxCMD, lp->send_cmd);
+ writereg(dev, PP_TxLength, skb->len);
+
+ /* Test to see if the chip has allocated memory for the packet */
+ if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) {
+ /* Gasp! It hasn't. But that shouldn't happen since
+ we're waiting for TxOk, so return 1 and requeue this packet. */
+ local_irq_restore(flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Write the contents of the packet */
+ skb_copy_from_linear_data(skb, (void *)(dev->mem_start + PP_TxFrame),
+ skb->len+1);
+
+ local_irq_restore(flags);
+ dev_kfree_skb (skb);
+
+ return NETDEV_TX_OK;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static irqreturn_t net_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *lp;
+ int ioaddr, status;
+
+ if (dev == NULL) {
+ printk ("net_interrupt(): irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ /* we MUST read all the events out of the ISQ, otherwise we'll never
+ get interrupted again. As a consequence, we can't have any limit
+ on the number of times we loop in the interrupt handler. The
+ hardware guarantees that eventually we'll run out of events. Of
+ course, if you're on a slow machine, and packets are arriving
+ faster than you can read them off, you're screwed. Hasta la
+ vista, baby! */
+ while ((status = swab16(nubus_readw(dev->base_addr + ISQ_PORT)))) {
+ if (net_debug > 4)printk("%s: event=%04x\n", dev->name, status);
+ switch(status & ISQ_EVENT_MASK) {
+ case ISQ_RECEIVER_EVENT:
+ /* Got a packet(s). */
+ net_rx(dev);
+ break;
+ case ISQ_TRANSMITTER_EVENT:
+ dev->stats.tx_packets++;
+ netif_wake_queue(dev);
+ if ((status & TX_OK) == 0)
+ dev->stats.tx_errors++;
+ if (status & TX_LOST_CRS)
+ dev->stats.tx_carrier_errors++;
+ if (status & TX_SQE_ERROR)
+ dev->stats.tx_heartbeat_errors++;
+ if (status & TX_LATE_COL)
+ dev->stats.tx_window_errors++;
+ if (status & TX_16_COL)
+ dev->stats.tx_aborted_errors++;
+ break;
+ case ISQ_BUFFER_EVENT:
+ if (status & READY_FOR_TX) {
+ /* we tried to transmit a packet earlier,
+ but inexplicably ran out of buffers.
+ That shouldn't happen since we only ever
+ load one packet. Shrug. Do the right
+ thing anyway. */
+ netif_wake_queue(dev);
+ }
+ if (status & TX_UNDERRUN) {
+ if (net_debug > 0) printk("%s: transmit underrun\n", dev->name);
+ lp->send_underrun++;
+ if (lp->send_underrun == 3) lp->send_cmd = TX_AFTER_381;
+ else if (lp->send_underrun == 6) lp->send_cmd = TX_AFTER_ALL;
+ }
+ break;
+ case ISQ_RX_MISS_EVENT:
+ dev->stats.rx_missed_errors += (status >> 6);
+ break;
+ case ISQ_TX_COL_EVENT:
+ dev->stats.collisions += (status >> 6);
+ break;
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void
+net_rx(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ int status, length;
+
+ status = readreg(dev, PP_RxStatus);
+ if ((status & RX_OK) == 0) {
+ dev->stats.rx_errors++;
+ if (status & RX_RUNT)
+ dev->stats.rx_length_errors++;
+ if (status & RX_EXTRA_DATA)
+ dev->stats.rx_length_errors++;
+ if ((status & RX_CRC_ERROR) &&
+ !(status & (RX_EXTRA_DATA|RX_RUNT)))
+ /* per str 172 */
+ dev->stats.rx_crc_errors++;
+ if (status & RX_DRIBBLE)
+ dev->stats.rx_frame_errors++;
+ return;
+ }
+
+ length = readreg(dev, PP_RxLength);
+ /* Malloc up new buffer. */
+ skb = alloc_skb(length, GFP_ATOMIC);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ dev->stats.rx_dropped++;
+ return;
+ }
+ skb_put(skb, length);
+
+ skb_copy_to_linear_data(skb, (void *)(dev->mem_start + PP_RxFrame),
+ length);
+
+ if (net_debug > 3)printk("%s: received %d byte packet of type %x\n",
+ dev->name, length,
+ (skb->data[ETH_ALEN+ETH_ALEN] << 8)
+ | skb->data[ETH_ALEN+ETH_ALEN+1]);
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += length;
+}
+
+/* The inverse routine to net_open(). */
+static int
+net_close(struct net_device *dev)
+{
+
+ writereg(dev, PP_RxCFG, 0);
+ writereg(dev, PP_TxCFG, 0);
+ writereg(dev, PP_BufCFG, 0);
+ writereg(dev, PP_BusCTL, 0);
+
+ netif_stop_queue(dev);
+
+ free_irq(dev->irq, dev);
+
+ /* Update the statistics here. */
+
+ return 0;
+
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct net_device_stats *
+net_get_stats(struct net_device *dev)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ /* Update the statistics from the device registers. */
+ dev->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6);
+ dev->stats.collisions += (readreg(dev, PP_TxCol) >> 6);
+ local_irq_restore(flags);
+
+ return &dev->stats;
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ if(dev->flags&IFF_PROMISC)
+ {
+ lp->rx_mode = RX_ALL_ACCEPT;
+ } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
+ /* The multicast-accept list is initialized to accept-all, and we
+ rely on higher-level filtering for now. */
+ lp->rx_mode = RX_MULTCAST_ACCEPT;
+ }
+ else
+ lp->rx_mode = 0;
+
+ writereg(dev, PP_RxCTL, DEF_RX_ACCEPT | lp->rx_mode);
+
+ /* in promiscuous mode, we accept errored packets, so we have to enable interrupts on them also */
+ writereg(dev, PP_RxCFG, lp->curr_rx_cfg |
+ (lp->rx_mode == RX_ALL_ACCEPT? (RX_CRC_ERROR_ENBL|RX_RUNT_ENBL|RX_EXTRA_DATA_ENBL) : 0));
+}
+
+
+static int set_mac_address(struct net_device *dev, void *addr)
+{
+ int i;
+ printk("%s: Setting MAC address to ", dev->name);
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = ((unsigned char *)addr)[i]);
+ printk(".\n");
+ /* set the Ethernet address */
+ for (i=0; i < ETH_ALEN/2; i++)
+ writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8));
+
+ return 0;
+}
+
+#ifdef MODULE
+
+static struct net_device *dev_cs89x0;
+static int debug;
+
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "CS89[02]0 debug level (0-5)");
+MODULE_LICENSE("GPL");
+
+int __init
+init_module(void)
+{
+ net_debug = debug;
+ dev_cs89x0 = mac89x0_probe(-1);
+ if (IS_ERR(dev_cs89x0)) {
+ printk(KERN_WARNING "mac89x0.c: No card found\n");
+ return PTR_ERR(dev_cs89x0);
+ }
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(dev_cs89x0);
+ nubus_writew(0, dev_cs89x0->base_addr + ADD_PORT);
+ free_netdev(dev_cs89x0);
+}
+#endif /* MODULE */
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
new file mode 100644
index 000000000000..bec87bd9195c
--- /dev/null
+++ b/drivers/net/ethernet/apple/mace.c
@@ -0,0 +1,1031 @@
+/*
+ * Network device driver for the MACE ethernet controller on
+ * Apple Powermacs. Assumes it's under a DBDMA controller.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/crc32.h>
+#include <linux/spinlock.h>
+#include <linux/bitrev.h>
+#include <linux/slab.h>
+#include <asm/prom.h>
+#include <asm/dbdma.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/macio.h>
+
+#include "mace.h"
+
+static int port_aaui = -1;
+
+#define N_RX_RING 8
+#define N_TX_RING 6
+#define MAX_TX_ACTIVE 1
+#define NCMDS_TX 1 /* dma commands per element in tx ring */
+#define RX_BUFLEN (ETH_FRAME_LEN + 8)
+#define TX_TIMEOUT HZ /* 1 second */
+
+/* Chip rev needs workaround on HW & multicast addr change */
+#define BROKEN_ADDRCHG_REV 0x0941
+
+/* Bits in transmit DMA status */
+#define TX_DMA_ERR 0x80
+
+struct mace_data {
+ volatile struct mace __iomem *mace;
+ volatile struct dbdma_regs __iomem *tx_dma;
+ int tx_dma_intr;
+ volatile struct dbdma_regs __iomem *rx_dma;
+ int rx_dma_intr;
+ volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
+ volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
+ struct sk_buff *rx_bufs[N_RX_RING];
+ int rx_fill;
+ int rx_empty;
+ struct sk_buff *tx_bufs[N_TX_RING];
+ int tx_fill;
+ int tx_empty;
+ unsigned char maccc;
+ unsigned char tx_fullup;
+ unsigned char tx_active;
+ unsigned char tx_bad_runt;
+ struct timer_list tx_timeout;
+ int timeout_active;
+ int port_aaui;
+ int chipid;
+ struct macio_dev *mdev;
+ spinlock_t lock;
+};
+
+/*
+ * Number of bytes of private data per MACE: allow enough for
+ * the rx and tx dma commands plus a branch dma command each,
+ * and another 16 bytes to allow us to align the dma command
+ * buffers on a 16 byte boundary.
+ */
+#define PRIV_BYTES (sizeof(struct mace_data) \
+ + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd))
+
+static int mace_open(struct net_device *dev);
+static int mace_close(struct net_device *dev);
+static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
+static void mace_set_multicast(struct net_device *dev);
+static void mace_reset(struct net_device *dev);
+static int mace_set_address(struct net_device *dev, void *addr);
+static irqreturn_t mace_interrupt(int irq, void *dev_id);
+static irqreturn_t mace_txdma_intr(int irq, void *dev_id);
+static irqreturn_t mace_rxdma_intr(int irq, void *dev_id);
+static void mace_set_timeout(struct net_device *dev);
+static void mace_tx_timeout(unsigned long data);
+static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma);
+static inline void mace_clean_rings(struct mace_data *mp);
+static void __mace_set_address(struct net_device *dev, void *addr);
+
+/*
+ * If we can't get a skbuff when we need it, we use this area for DMA.
+ */
+static unsigned char *dummy_buf;
+
+static const struct net_device_ops mace_netdev_ops = {
+ .ndo_open = mace_open,
+ .ndo_stop = mace_close,
+ .ndo_start_xmit = mace_xmit_start,
+ .ndo_set_rx_mode = mace_set_multicast,
+ .ndo_set_mac_address = mace_set_address,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
+{
+ struct device_node *mace = macio_get_of_node(mdev);
+ struct net_device *dev;
+ struct mace_data *mp;
+ const unsigned char *addr;
+ int j, rev, rc = -EBUSY;
+
+ if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
+ printk(KERN_ERR "can't use MACE %s: need 3 addrs and 3 irqs\n",
+ mace->full_name);
+ return -ENODEV;
+ }
+
+ addr = of_get_property(mace, "mac-address", NULL);
+ if (addr == NULL) {
+ addr = of_get_property(mace, "local-mac-address", NULL);
+ if (addr == NULL) {
+ printk(KERN_ERR "Can't get mac-address for MACE %s\n",
+ mace->full_name);
+ return -ENODEV;
+ }
+ }
+
+ /*
+ * lazy allocate the driver-wide dummy buffer. (Note that we
+ * never have more than one MACE in the system anyway)
+ */
+ if (dummy_buf == NULL) {
+ dummy_buf = kmalloc(RX_BUFLEN+2, GFP_KERNEL);
+ if (dummy_buf == NULL) {
+ printk(KERN_ERR "MACE: couldn't allocate dummy buffer\n");
+ return -ENOMEM;
+ }
+ }
+
+ if (macio_request_resources(mdev, "mace")) {
+ printk(KERN_ERR "MACE: can't request IO resources !\n");
+ return -EBUSY;
+ }
+
+ dev = alloc_etherdev(PRIV_BYTES);
+ if (!dev) {
+ printk(KERN_ERR "MACE: can't allocate ethernet device !\n");
+ rc = -ENOMEM;
+ goto err_release;
+ }
+ SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
+
+ mp = netdev_priv(dev);
+ mp->mdev = mdev;
+ macio_set_drvdata(mdev, dev);
+
+ dev->base_addr = macio_resource_start(mdev, 0);
+ mp->mace = ioremap(dev->base_addr, 0x1000);
+ if (mp->mace == NULL) {
+ printk(KERN_ERR "MACE: can't map IO resources !\n");
+ rc = -ENOMEM;
+ goto err_free;
+ }
+ dev->irq = macio_irq(mdev, 0);
+
+ rev = addr[0] == 0 && addr[1] == 0xA0;
+ for (j = 0; j < 6; ++j) {
+ dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
+ }
+ mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
+ in_8(&mp->mace->chipid_lo);
+
+
+ mp = netdev_priv(dev);
+ mp->maccc = ENXMT | ENRCV;
+
+ mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
+ if (mp->tx_dma == NULL) {
+ printk(KERN_ERR "MACE: can't map TX DMA resources !\n");
+ rc = -ENOMEM;
+ goto err_unmap_io;
+ }
+ mp->tx_dma_intr = macio_irq(mdev, 1);
+
+ mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000);
+ if (mp->rx_dma == NULL) {
+ printk(KERN_ERR "MACE: can't map RX DMA resources !\n");
+ rc = -ENOMEM;
+ goto err_unmap_tx_dma;
+ }
+ mp->rx_dma_intr = macio_irq(mdev, 2);
+
+ mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1);
+ mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1;
+
+ memset((char *) mp->tx_cmds, 0,
+ (NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd));
+ init_timer(&mp->tx_timeout);
+ spin_lock_init(&mp->lock);
+ mp->timeout_active = 0;
+
+ if (port_aaui >= 0)
+ mp->port_aaui = port_aaui;
+ else {
+ /* Apple Network Server uses the AAUI port */
+ if (of_machine_is_compatible("AAPL,ShinerESB"))
+ mp->port_aaui = 1;
+ else {
+#ifdef CONFIG_MACE_AAUI_PORT
+ mp->port_aaui = 1;
+#else
+ mp->port_aaui = 0;
+#endif
+ }
+ }
+
+ dev->netdev_ops = &mace_netdev_ops;
+
+ /*
+ * Most of what is below could be moved to mace_open()
+ */
+ mace_reset(dev);
+
+ rc = request_irq(dev->irq, mace_interrupt, 0, "MACE", dev);
+ if (rc) {
+ printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq);
+ goto err_unmap_rx_dma;
+ }
+ rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev);
+ if (rc) {
+ printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr);
+ goto err_free_irq;
+ }
+ rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev);
+ if (rc) {
+ printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr);
+ goto err_free_tx_irq;
+ }
+
+ rc = register_netdev(dev);
+ if (rc) {
+ printk(KERN_ERR "MACE: Cannot register net device, aborting.\n");
+ goto err_free_rx_irq;
+ }
+
+ printk(KERN_INFO "%s: MACE at %pM, chip revision %d.%d\n",
+ dev->name, dev->dev_addr,
+ mp->chipid >> 8, mp->chipid & 0xff);
+
+ return 0;
+
+ err_free_rx_irq:
+ free_irq(macio_irq(mdev, 2), dev);
+ err_free_tx_irq:
+ free_irq(macio_irq(mdev, 1), dev);
+ err_free_irq:
+ free_irq(macio_irq(mdev, 0), dev);
+ err_unmap_rx_dma:
+ iounmap(mp->rx_dma);
+ err_unmap_tx_dma:
+ iounmap(mp->tx_dma);
+ err_unmap_io:
+ iounmap(mp->mace);
+ err_free:
+ free_netdev(dev);
+ err_release:
+ macio_release_resources(mdev);
+
+ return rc;
+}
+
+static int __devexit mace_remove(struct macio_dev *mdev)
+{
+ struct net_device *dev = macio_get_drvdata(mdev);
+ struct mace_data *mp;
+
+ BUG_ON(dev == NULL);
+
+ macio_set_drvdata(mdev, NULL);
+
+ mp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ free_irq(dev->irq, dev);
+ free_irq(mp->tx_dma_intr, dev);
+ free_irq(mp->rx_dma_intr, dev);
+
+ iounmap(mp->rx_dma);
+ iounmap(mp->tx_dma);
+ iounmap(mp->mace);
+
+ free_netdev(dev);
+
+ macio_release_resources(mdev);
+
+ return 0;
+}
+
+static void dbdma_reset(volatile struct dbdma_regs __iomem *dma)
+{
+ int i;
+
+ out_le32(&dma->control, (WAKE|FLUSH|PAUSE|RUN) << 16);
+
+ /*
+ * Yes this looks peculiar, but apparently it needs to be this
+ * way on some machines.
+ */
+ for (i = 200; i > 0; --i)
+ if (ld_le32(&dma->control) & RUN)
+ udelay(1);
+}
+
+static void mace_reset(struct net_device *dev)
+{
+ struct mace_data *mp = netdev_priv(dev);
+ volatile struct mace __iomem *mb = mp->mace;
+ int i;
+
+ /* soft-reset the chip */
+ i = 200;
+ while (--i) {
+ out_8(&mb->biucc, SWRST);
+ if (in_8(&mb->biucc) & SWRST) {
+ udelay(10);
+ continue;
+ }
+ break;
+ }
+ if (!i) {
+ printk(KERN_ERR "mace: cannot reset chip!\n");
+ return;
+ }
+
+ out_8(&mb->imr, 0xff); /* disable all intrs for now */
+ i = in_8(&mb->ir);
+ out_8(&mb->maccc, 0); /* turn off tx, rx */
+
+ out_8(&mb->biucc, XMTSP_64);
+ out_8(&mb->utr, RTRD);
+ out_8(&mb->fifocc, RCVFW_32 | XMTFW_16 | XMTFWU | RCVFWU | XMTBRST);
+ out_8(&mb->xmtfc, AUTO_PAD_XMIT); /* auto-pad short frames */
+ out_8(&mb->rcvfc, 0);
+
+ /* load up the hardware address */
+ __mace_set_address(dev, dev->dev_addr);
+
+ /* clear the multicast filter */
+ if (mp->chipid == BROKEN_ADDRCHG_REV)
+ out_8(&mb->iac, LOGADDR);
+ else {
+ out_8(&mb->iac, ADDRCHG | LOGADDR);
+ while ((in_8(&mb->iac) & ADDRCHG) != 0)
+ ;
+ }
+ for (i = 0; i < 8; ++i)
+ out_8(&mb->ladrf, 0);
+
+ /* done changing address */
+ if (mp->chipid != BROKEN_ADDRCHG_REV)
+ out_8(&mb->iac, 0);
+
+ if (mp->port_aaui)
+ out_8(&mb->plscc, PORTSEL_AUI + ENPLSIO);
+ else
+ out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO);
+}
+
+static void __mace_set_address(struct net_device *dev, void *addr)
+{
+ struct mace_data *mp = netdev_priv(dev);
+ volatile struct mace __iomem *mb = mp->mace;
+ unsigned char *p = addr;
+ int i;
+
+ /* load up the hardware address */
+ if (mp->chipid == BROKEN_ADDRCHG_REV)
+ out_8(&mb->iac, PHYADDR);
+ else {
+ out_8(&mb->iac, ADDRCHG | PHYADDR);
+ while ((in_8(&mb->iac) & ADDRCHG) != 0)
+ ;
+ }
+ for (i = 0; i < 6; ++i)
+ out_8(&mb->padr, dev->dev_addr[i] = p[i]);
+ if (mp->chipid != BROKEN_ADDRCHG_REV)
+ out_8(&mb->iac, 0);
+}
+
+static int mace_set_address(struct net_device *dev, void *addr)
+{
+ struct mace_data *mp = netdev_priv(dev);
+ volatile struct mace __iomem *mb = mp->mace;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mp->lock, flags);
+
+ __mace_set_address(dev, addr);
+
+ /* note: setting ADDRCHG clears ENRCV */
+ out_8(&mb->maccc, mp->maccc);
+
+ spin_unlock_irqrestore(&mp->lock, flags);
+ return 0;
+}
+
+static inline void mace_clean_rings(struct mace_data *mp)
+{
+ int i;
+
+ /* free some skb's */
+ for (i = 0; i < N_RX_RING; ++i) {
+ if (mp->rx_bufs[i] != NULL) {
+ dev_kfree_skb(mp->rx_bufs[i]);
+ mp->rx_bufs[i] = NULL;
+ }
+ }
+ for (i = mp->tx_empty; i != mp->tx_fill; ) {
+ dev_kfree_skb(mp->tx_bufs[i]);
+ if (++i >= N_TX_RING)
+ i = 0;
+ }
+}
+
+static int mace_open(struct net_device *dev)
+{
+ struct mace_data *mp = netdev_priv(dev);
+ volatile struct mace __iomem *mb = mp->mace;
+ volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
+ volatile struct dbdma_regs __iomem *td = mp->tx_dma;
+ volatile struct dbdma_cmd *cp;
+ int i;
+ struct sk_buff *skb;
+ unsigned char *data;
+
+ /* reset the chip */
+ mace_reset(dev);
+
+ /* initialize list of sk_buffs for receiving and set up recv dma */
+ mace_clean_rings(mp);
+ memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd));
+ cp = mp->rx_cmds;
+ for (i = 0; i < N_RX_RING - 1; ++i) {
+ skb = dev_alloc_skb(RX_BUFLEN + 2);
+ if (!skb) {
+ data = dummy_buf;
+ } else {
+ skb_reserve(skb, 2); /* so IP header lands on 4-byte bdry */
+ data = skb->data;
+ }
+ mp->rx_bufs[i] = skb;
+ st_le16(&cp->req_count, RX_BUFLEN);
+ st_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);
+ st_le32(&cp->phy_addr, virt_to_bus(data));
+ cp->xfer_status = 0;
+ ++cp;
+ }
+ mp->rx_bufs[i] = NULL;
+ st_le16(&cp->command, DBDMA_STOP);
+ mp->rx_fill = i;
+ mp->rx_empty = 0;
+
+ /* Put a branch back to the beginning of the receive command list */
+ ++cp;
+ st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
+ st_le32(&cp->cmd_dep, virt_to_bus(mp->rx_cmds));
+
+ /* start rx dma */
+ out_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
+ out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds));
+ out_le32(&rd->control, (RUN << 16) | RUN);
+
+ /* put a branch at the end of the tx command list */
+ cp = mp->tx_cmds + NCMDS_TX * N_TX_RING;
+ st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
+ st_le32(&cp->cmd_dep, virt_to_bus(mp->tx_cmds));
+
+ /* reset tx dma */
+ out_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16);
+ out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds));
+ mp->tx_fill = 0;
+ mp->tx_empty = 0;
+ mp->tx_fullup = 0;
+ mp->tx_active = 0;
+ mp->tx_bad_runt = 0;
+
+ /* turn it on! */
+ out_8(&mb->maccc, mp->maccc);
+ /* enable all interrupts except receive interrupts */
+ out_8(&mb->imr, RCVINT);
+
+ return 0;
+}
+
+static int mace_close(struct net_device *dev)
+{
+ struct mace_data *mp = netdev_priv(dev);
+ volatile struct mace __iomem *mb = mp->mace;
+ volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
+ volatile struct dbdma_regs __iomem *td = mp->tx_dma;
+
+ /* disable rx and tx */
+ out_8(&mb->maccc, 0);
+ out_8(&mb->imr, 0xff); /* disable all intrs */
+
+ /* disable rx and tx dma */
+ st_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
+ st_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
+
+ mace_clean_rings(mp);
+
+ return 0;
+}
+
+static inline void mace_set_timeout(struct net_device *dev)
+{
+ struct mace_data *mp = netdev_priv(dev);
+
+ if (mp->timeout_active)
+ del_timer(&mp->tx_timeout);
+ mp->tx_timeout.expires = jiffies + TX_TIMEOUT;
+ mp->tx_timeout.function = mace_tx_timeout;
+ mp->tx_timeout.data = (unsigned long) dev;
+ add_timer(&mp->tx_timeout);
+ mp->timeout_active = 1;
+}
+
+static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mace_data *mp = netdev_priv(dev);
+ volatile struct dbdma_regs __iomem *td = mp->tx_dma;
+ volatile struct dbdma_cmd *cp, *np;
+ unsigned long flags;
+ int fill, next, len;
+
+ /* see if there's a free slot in the tx ring */
+ spin_lock_irqsave(&mp->lock, flags);
+ fill = mp->tx_fill;
+ next = fill + 1;
+ if (next >= N_TX_RING)
+ next = 0;
+ if (next == mp->tx_empty) {
+ netif_stop_queue(dev);
+ mp->tx_fullup = 1;
+ spin_unlock_irqrestore(&mp->lock, flags);
+ return NETDEV_TX_BUSY; /* can't take it at the moment */
+ }
+ spin_unlock_irqrestore(&mp->lock, flags);
+
+ /* partially fill in the dma command block */
+ len = skb->len;
+ if (len > ETH_FRAME_LEN) {
+ printk(KERN_DEBUG "mace: xmit frame too long (%d)\n", len);
+ len = ETH_FRAME_LEN;
+ }
+ mp->tx_bufs[fill] = skb;
+ cp = mp->tx_cmds + NCMDS_TX * fill;
+ st_le16(&cp->req_count, len);
+ st_le32(&cp->phy_addr, virt_to_bus(skb->data));
+
+ np = mp->tx_cmds + NCMDS_TX * next;
+ out_le16(&np->command, DBDMA_STOP);
+
+ /* poke the tx dma channel */
+ spin_lock_irqsave(&mp->lock, flags);
+ mp->tx_fill = next;
+ if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) {
+ out_le16(&cp->xfer_status, 0);
+ out_le16(&cp->command, OUTPUT_LAST);
+ out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
+ ++mp->tx_active;
+ mace_set_timeout(dev);
+ }
+ if (++next >= N_TX_RING)
+ next = 0;
+ if (next == mp->tx_empty)
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&mp->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static void mace_set_multicast(struct net_device *dev)
+{
+ struct mace_data *mp = netdev_priv(dev);
+ volatile struct mace __iomem *mb = mp->mace;
+ int i;
+ u32 crc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mp->lock, flags);
+ mp->maccc &= ~PROM;
+ if (dev->flags & IFF_PROMISC) {
+ mp->maccc |= PROM;
+ } else {
+ unsigned char multicast_filter[8];
+ struct netdev_hw_addr *ha;
+
+ if (dev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < 8; i++)
+ multicast_filter[i] = 0xff;
+ } else {
+ for (i = 0; i < 8; i++)
+ multicast_filter[i] = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
+ i = crc >> 26; /* bit number in multicast_filter */
+ multicast_filter[i >> 3] |= 1 << (i & 7);
+ }
+ }
+#if 0
+ printk("Multicast filter :");
+ for (i = 0; i < 8; i++)
+ printk("%02x ", multicast_filter[i]);
+ printk("\n");
+#endif
+
+ if (mp->chipid == BROKEN_ADDRCHG_REV)
+ out_8(&mb->iac, LOGADDR);
+ else {
+ out_8(&mb->iac, ADDRCHG | LOGADDR);
+ while ((in_8(&mb->iac) & ADDRCHG) != 0)
+ ;
+ }
+ for (i = 0; i < 8; ++i)
+ out_8(&mb->ladrf, multicast_filter[i]);
+ if (mp->chipid != BROKEN_ADDRCHG_REV)
+ out_8(&mb->iac, 0);
+ }
+ /* reset maccc */
+ out_8(&mb->maccc, mp->maccc);
+ spin_unlock_irqrestore(&mp->lock, flags);
+}
+
+static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev)
+{
+ volatile struct mace __iomem *mb = mp->mace;
+ static int mace_babbles, mace_jabbers;
+
+ if (intr & MPCO)
+ dev->stats.rx_missed_errors += 256;
+ dev->stats.rx_missed_errors += in_8(&mb->mpc); /* reading clears it */
+ if (intr & RNTPCO)
+ dev->stats.rx_length_errors += 256;
+ dev->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */
+ if (intr & CERR)
+ ++dev->stats.tx_heartbeat_errors;
+ if (intr & BABBLE)
+ if (mace_babbles++ < 4)
+ printk(KERN_DEBUG "mace: babbling transmitter\n");
+ if (intr & JABBER)
+ if (mace_jabbers++ < 4)
+ printk(KERN_DEBUG "mace: jabbering transceiver\n");
+}
+
+static irqreturn_t mace_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct mace_data *mp = netdev_priv(dev);
+ volatile struct mace __iomem *mb = mp->mace;
+ volatile struct dbdma_regs __iomem *td = mp->tx_dma;
+ volatile struct dbdma_cmd *cp;
+ int intr, fs, i, stat, x;
+ int xcount, dstat;
+ unsigned long flags;
+ /* static int mace_last_fs, mace_last_xcount; */
+
+ spin_lock_irqsave(&mp->lock, flags);
+ intr = in_8(&mb->ir); /* read interrupt register */
+ in_8(&mb->xmtrc); /* get retries */
+ mace_handle_misc_intrs(mp, intr, dev);
+
+ i = mp->tx_empty;
+ while (in_8(&mb->pr) & XMTSV) {
+ del_timer(&mp->tx_timeout);
+ mp->timeout_active = 0;
+ /*
+ * Clear any interrupt indication associated with this status
+ * word. This appears to unlatch any error indication from
+ * the DMA controller.
+ */
+ intr = in_8(&mb->ir);
+ if (intr != 0)
+ mace_handle_misc_intrs(mp, intr, dev);
+ if (mp->tx_bad_runt) {
+ fs = in_8(&mb->xmtfs);
+ mp->tx_bad_runt = 0;
+ out_8(&mb->xmtfc, AUTO_PAD_XMIT);
+ continue;
+ }
+ dstat = ld_le32(&td->status);
+ /* stop DMA controller */
+ out_le32(&td->control, RUN << 16);
+ /*
+ * xcount is the number of complete frames which have been
+ * written to the fifo but for which status has not been read.
+ */
+ xcount = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
+ if (xcount == 0 || (dstat & DEAD)) {
+ /*
+ * If a packet was aborted before the DMA controller has
+ * finished transferring it, it seems that there are 2 bytes
+ * which are stuck in some buffer somewhere. These will get
+ * transmitted as soon as we read the frame status (which
+ * reenables the transmit data transfer request). Turning
+ * off the DMA controller and/or resetting the MACE doesn't
+ * help. So we disable auto-padding and FCS transmission
+ * so the two bytes will only be a runt packet which should
+ * be ignored by other stations.
+ */
+ out_8(&mb->xmtfc, DXMTFCS);
+ }
+ fs = in_8(&mb->xmtfs);
+ if ((fs & XMTSV) == 0) {
+ printk(KERN_ERR "mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n",
+ fs, xcount, dstat);
+ mace_reset(dev);
+ /*
+ * XXX mace likes to hang the machine after a xmtfs error.
+ * This is hard to reproduce, reseting *may* help
+ */
+ }
+ cp = mp->tx_cmds + NCMDS_TX * i;
+ stat = ld_le16(&cp->xfer_status);
+ if ((fs & (UFLO|LCOL|LCAR|RTRY)) || (dstat & DEAD) || xcount == 0) {
+ /*
+ * Check whether there were in fact 2 bytes written to
+ * the transmit FIFO.
+ */
+ udelay(1);
+ x = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
+ if (x != 0) {
+ /* there were two bytes with an end-of-packet indication */
+ mp->tx_bad_runt = 1;
+ mace_set_timeout(dev);
+ } else {
+ /*
+ * Either there weren't the two bytes buffered up, or they
+ * didn't have an end-of-packet indication.
+ * We flush the transmit FIFO just in case (by setting the
+ * XMTFWU bit with the transmitter disabled).
+ */
+ out_8(&mb->maccc, in_8(&mb->maccc) & ~ENXMT);
+ out_8(&mb->fifocc, in_8(&mb->fifocc) | XMTFWU);
+ udelay(1);
+ out_8(&mb->maccc, in_8(&mb->maccc) | ENXMT);
+ out_8(&mb->xmtfc, AUTO_PAD_XMIT);
+ }
+ }
+ /* dma should have finished */
+ if (i == mp->tx_fill) {
+ printk(KERN_DEBUG "mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n",
+ fs, xcount, dstat);
+ continue;
+ }
+ /* Update stats */
+ if (fs & (UFLO|LCOL|LCAR|RTRY)) {
+ ++dev->stats.tx_errors;
+ if (fs & LCAR)
+ ++dev->stats.tx_carrier_errors;
+ if (fs & (UFLO|LCOL|RTRY))
+ ++dev->stats.tx_aborted_errors;
+ } else {
+ dev->stats.tx_bytes += mp->tx_bufs[i]->len;
+ ++dev->stats.tx_packets;
+ }
+ dev_kfree_skb_irq(mp->tx_bufs[i]);
+ --mp->tx_active;
+ if (++i >= N_TX_RING)
+ i = 0;
+#if 0
+ mace_last_fs = fs;
+ mace_last_xcount = xcount;
+#endif
+ }
+
+ if (i != mp->tx_empty) {
+ mp->tx_fullup = 0;
+ netif_wake_queue(dev);
+ }
+ mp->tx_empty = i;
+ i += mp->tx_active;
+ if (i >= N_TX_RING)
+ i -= N_TX_RING;
+ if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) {
+ do {
+ /* set up the next one */
+ cp = mp->tx_cmds + NCMDS_TX * i;
+ out_le16(&cp->xfer_status, 0);
+ out_le16(&cp->command, OUTPUT_LAST);
+ ++mp->tx_active;
+ if (++i >= N_TX_RING)
+ i = 0;
+ } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE);
+ out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
+ mace_set_timeout(dev);
+ }
+ spin_unlock_irqrestore(&mp->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void mace_tx_timeout(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct mace_data *mp = netdev_priv(dev);
+ volatile struct mace __iomem *mb = mp->mace;
+ volatile struct dbdma_regs __iomem *td = mp->tx_dma;
+ volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
+ volatile struct dbdma_cmd *cp;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&mp->lock, flags);
+ mp->timeout_active = 0;
+ if (mp->tx_active == 0 && !mp->tx_bad_runt)
+ goto out;
+
+ /* update various counters */
+ mace_handle_misc_intrs(mp, in_8(&mb->ir), dev);
+
+ cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty;
+
+ /* turn off both tx and rx and reset the chip */
+ out_8(&mb->maccc, 0);
+ printk(KERN_ERR "mace: transmit timeout - resetting\n");
+ dbdma_reset(td);
+ mace_reset(dev);
+
+ /* restart rx dma */
+ cp = bus_to_virt(ld_le32(&rd->cmdptr));
+ dbdma_reset(rd);
+ out_le16(&cp->xfer_status, 0);
+ out_le32(&rd->cmdptr, virt_to_bus(cp));
+ out_le32(&rd->control, (RUN << 16) | RUN);
+
+ /* fix up the transmit side */
+ i = mp->tx_empty;
+ mp->tx_active = 0;
+ ++dev->stats.tx_errors;
+ if (mp->tx_bad_runt) {
+ mp->tx_bad_runt = 0;
+ } else if (i != mp->tx_fill) {
+ dev_kfree_skb(mp->tx_bufs[i]);
+ if (++i >= N_TX_RING)
+ i = 0;
+ mp->tx_empty = i;
+ }
+ mp->tx_fullup = 0;
+ netif_wake_queue(dev);
+ if (i != mp->tx_fill) {
+ cp = mp->tx_cmds + NCMDS_TX * i;
+ out_le16(&cp->xfer_status, 0);
+ out_le16(&cp->command, OUTPUT_LAST);
+ out_le32(&td->cmdptr, virt_to_bus(cp));
+ out_le32(&td->control, (RUN << 16) | RUN);
+ ++mp->tx_active;
+ mace_set_timeout(dev);
+ }
+
+ /* turn it back on */
+ out_8(&mb->imr, RCVINT);
+ out_8(&mb->maccc, mp->maccc);
+
+out:
+ spin_unlock_irqrestore(&mp->lock, flags);
+}
+
+static irqreturn_t mace_txdma_intr(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mace_rxdma_intr(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct mace_data *mp = netdev_priv(dev);
+ volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
+ volatile struct dbdma_cmd *cp, *np;
+ int i, nb, stat, next;
+ struct sk_buff *skb;
+ unsigned frame_status;
+ static int mace_lost_status;
+ unsigned char *data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mp->lock, flags);
+ for (i = mp->rx_empty; i != mp->rx_fill; ) {
+ cp = mp->rx_cmds + i;
+ stat = ld_le16(&cp->xfer_status);
+ if ((stat & ACTIVE) == 0) {
+ next = i + 1;
+ if (next >= N_RX_RING)
+ next = 0;
+ np = mp->rx_cmds + next;
+ if (next != mp->rx_fill &&
+ (ld_le16(&np->xfer_status) & ACTIVE) != 0) {
+ printk(KERN_DEBUG "mace: lost a status word\n");
+ ++mace_lost_status;
+ } else
+ break;
+ }
+ nb = ld_le16(&cp->req_count) - ld_le16(&cp->res_count);
+ out_le16(&cp->command, DBDMA_STOP);
+ /* got a packet, have a look at it */
+ skb = mp->rx_bufs[i];
+ if (!skb) {
+ ++dev->stats.rx_dropped;
+ } else if (nb > 8) {
+ data = skb->data;
+ frame_status = (data[nb-3] << 8) + data[nb-4];
+ if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) {
+ ++dev->stats.rx_errors;
+ if (frame_status & RS_OFLO)
+ ++dev->stats.rx_over_errors;
+ if (frame_status & RS_FRAMERR)
+ ++dev->stats.rx_frame_errors;
+ if (frame_status & RS_FCSERR)
+ ++dev->stats.rx_crc_errors;
+ } else {
+ /* Mace feature AUTO_STRIP_RCV is on by default, dropping the
+ * FCS on frames with 802.3 headers. This means that Ethernet
+ * frames have 8 extra octets at the end, while 802.3 frames
+ * have only 4. We need to correctly account for this. */
+ if (*(unsigned short *)(data+12) < 1536) /* 802.3 header */
+ nb -= 4;
+ else /* Ethernet header; mace includes FCS */
+ nb -= 8;
+ skb_put(skb, nb);
+ skb->protocol = eth_type_trans(skb, dev);
+ dev->stats.rx_bytes += skb->len;
+ netif_rx(skb);
+ mp->rx_bufs[i] = NULL;
+ ++dev->stats.rx_packets;
+ }
+ } else {
+ ++dev->stats.rx_errors;
+ ++dev->stats.rx_length_errors;
+ }
+
+ /* advance to next */
+ if (++i >= N_RX_RING)
+ i = 0;
+ }
+ mp->rx_empty = i;
+
+ i = mp->rx_fill;
+ for (;;) {
+ next = i + 1;
+ if (next >= N_RX_RING)
+ next = 0;
+ if (next == mp->rx_empty)
+ break;
+ cp = mp->rx_cmds + i;
+ skb = mp->rx_bufs[i];
+ if (!skb) {
+ skb = dev_alloc_skb(RX_BUFLEN + 2);
+ if (skb) {
+ skb_reserve(skb, 2);
+ mp->rx_bufs[i] = skb;
+ }
+ }
+ st_le16(&cp->req_count, RX_BUFLEN);
+ data = skb? skb->data: dummy_buf;
+ st_le32(&cp->phy_addr, virt_to_bus(data));
+ out_le16(&cp->xfer_status, 0);
+ out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);
+#if 0
+ if ((ld_le32(&rd->status) & ACTIVE) != 0) {
+ out_le32(&rd->control, (PAUSE << 16) | PAUSE);
+ while ((in_le32(&rd->status) & ACTIVE) != 0)
+ ;
+ }
+#endif
+ i = next;
+ }
+ if (i != mp->rx_fill) {
+ out_le32(&rd->control, ((RUN|WAKE) << 16) | (RUN|WAKE));
+ mp->rx_fill = i;
+ }
+ spin_unlock_irqrestore(&mp->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static struct of_device_id mace_match[] =
+{
+ {
+ .name = "mace",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE (of, mace_match);
+
+static struct macio_driver mace_driver =
+{
+ .driver = {
+ .name = "mace",
+ .owner = THIS_MODULE,
+ .of_match_table = mace_match,
+ },
+ .probe = mace_probe,
+ .remove = mace_remove,
+};
+
+
+static int __init mace_init(void)
+{
+ return macio_register_driver(&mace_driver);
+}
+
+static void __exit mace_cleanup(void)
+{
+ macio_unregister_driver(&mace_driver);
+
+ kfree(dummy_buf);
+ dummy_buf = NULL;
+}
+
+MODULE_AUTHOR("Paul Mackerras");
+MODULE_DESCRIPTION("PowerMac MACE driver.");
+module_param(port_aaui, int, 0);
+MODULE_PARM_DESC(port_aaui, "MACE uses AAUI port (0-1)");
+MODULE_LICENSE("GPL");
+
+module_init(mace_init);
+module_exit(mace_cleanup);
diff --git a/drivers/net/ethernet/apple/mace.h b/drivers/net/ethernet/apple/mace.h
new file mode 100644
index 000000000000..30b7ec0cedb5
--- /dev/null
+++ b/drivers/net/ethernet/apple/mace.h
@@ -0,0 +1,173 @@
+/*
+ * mace.h - definitions for the registers in the Am79C940 MACE
+ * (Medium Access Control for Ethernet) controller.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define REG(x) volatile unsigned char x; char x ## _pad[15]
+
+struct mace {
+ REG(rcvfifo); /* receive FIFO */
+ REG(xmtfifo); /* transmit FIFO */
+ REG(xmtfc); /* transmit frame control */
+ REG(xmtfs); /* transmit frame status */
+ REG(xmtrc); /* transmit retry count */
+ REG(rcvfc); /* receive frame control */
+ REG(rcvfs); /* receive frame status (4 bytes) */
+ REG(fifofc); /* FIFO frame count */
+ REG(ir); /* interrupt register */
+ REG(imr); /* interrupt mask register */
+ REG(pr); /* poll register */
+ REG(biucc); /* bus interface unit config control */
+ REG(fifocc); /* FIFO configuration control */
+ REG(maccc); /* medium access control config control */
+ REG(plscc); /* phys layer signalling config control */
+ REG(phycc); /* physical configuration control */
+ REG(chipid_lo); /* chip ID, lsb */
+ REG(chipid_hi); /* chip ID, msb */
+ REG(iac); /* internal address config */
+ REG(reg19);
+ REG(ladrf); /* logical address filter (8 bytes) */
+ REG(padr); /* physical address (6 bytes) */
+ REG(reg22);
+ REG(reg23);
+ REG(mpc); /* missed packet count (clears when read) */
+ REG(reg25);
+ REG(rntpc); /* runt packet count (clears when read) */
+ REG(rcvcc); /* recv collision count (clears when read) */
+ REG(reg28);
+ REG(utr); /* user test reg */
+ REG(reg30);
+ REG(reg31);
+};
+
+/* Bits in XMTFC */
+#define DRTRY 0x80 /* don't retry transmission after collision */
+#define DXMTFCS 0x08 /* don't append FCS to transmitted frame */
+#define AUTO_PAD_XMIT 0x01 /* auto-pad short packets on transmission */
+
+/* Bits in XMTFS: only valid when XMTSV is set in PR and XMTFS */
+#define XMTSV 0x80 /* transmit status (i.e. XMTFS) valid */
+#define UFLO 0x40 /* underflow - xmit fifo ran dry */
+#define LCOL 0x20 /* late collision (transmission aborted) */
+#define MORE 0x10 /* 2 or more retries needed to xmit frame */
+#define ONE 0x08 /* 1 retry needed to xmit frame */
+#define DEFER 0x04 /* MACE had to defer xmission (enet busy) */
+#define LCAR 0x02 /* loss of carrier (transmission aborted) */
+#define RTRY 0x01 /* too many retries (transmission aborted) */
+
+/* Bits in XMTRC: only valid when XMTSV is set in PR (and XMTFS) */
+#define EXDEF 0x80 /* had to defer for excessive time */
+#define RETRY_MASK 0x0f /* number of retries (0 - 15) */
+
+/* Bits in RCVFC */
+#define LLRCV 0x08 /* low latency receive: early DMA request */
+#define M_RBAR 0x04 /* sets function of EAM/R pin */
+#define AUTO_STRIP_RCV 0x01 /* auto-strip short LLC frames on recv */
+
+/*
+ * Bits in RCVFS. After a frame is received, four bytes of status
+ * are automatically read from this register and appended to the frame
+ * data in memory. These are:
+ * Byte 0 and 1: message byte count and frame status
+ * Byte 2: runt packet count
+ * Byte 3: receive collision count
+ */
+#define RS_OFLO 0x8000 /* receive FIFO overflowed */
+#define RS_CLSN 0x4000 /* received frame suffered (late) collision */
+#define RS_FRAMERR 0x2000 /* framing error flag */
+#define RS_FCSERR 0x1000 /* frame had FCS error */
+#define RS_COUNT 0x0fff /* mask for byte count field */
+
+/* Bits (fields) in FIFOFC */
+#define RCVFC_SH 4 /* receive frame count in FIFO */
+#define RCVFC_MASK 0x0f
+#define XMTFC_SH 0 /* transmit frame count in FIFO */
+#define XMTFC_MASK 0x0f
+
+/*
+ * Bits in IR and IMR. The IR clears itself when read.
+ * Setting a bit in the IMR will disable the corresponding interrupt.
+ */
+#define JABBER 0x80 /* jabber error - 10baseT xmission too long */
+#define BABBLE 0x40 /* babble - xmitter xmitting for too long */
+#define CERR 0x20 /* collision err - no SQE test (heartbeat) */
+#define RCVCCO 0x10 /* RCVCC overflow */
+#define RNTPCO 0x08 /* RNTPC overflow */
+#define MPCO 0x04 /* MPC overflow */
+#define RCVINT 0x02 /* receive interrupt */
+#define XMTINT 0x01 /* transmitter interrupt */
+
+/* Bits in PR */
+#define XMTSV 0x80 /* XMTFS valid (same as in XMTFS) */
+#define TDTREQ 0x40 /* set when xmit fifo is requesting data */
+#define RDTREQ 0x20 /* set when recv fifo requests data xfer */
+
+/* Bits in BIUCC */
+#define BSWP 0x40 /* byte swap, i.e. big-endian bus */
+#define XMTSP_4 0x00 /* start xmitting when 4 bytes in FIFO */
+#define XMTSP_16 0x10 /* start xmitting when 16 bytes in FIFO */
+#define XMTSP_64 0x20 /* start xmitting when 64 bytes in FIFO */
+#define XMTSP_112 0x30 /* start xmitting when 112 bytes in FIFO */
+#define SWRST 0x01 /* software reset */
+
+/* Bits in FIFOCC */
+#define XMTFW_8 0x00 /* xmit fifo watermark = 8 words free */
+#define XMTFW_16 0x40 /* 16 words free */
+#define XMTFW_32 0x80 /* 32 words free */
+#define RCVFW_16 0x00 /* recv fifo watermark = 16 bytes avail */
+#define RCVFW_32 0x10 /* 32 bytes avail */
+#define RCVFW_64 0x20 /* 64 bytes avail */
+#define XMTFWU 0x08 /* xmit fifo watermark update enable */
+#define RCVFWU 0x04 /* recv fifo watermark update enable */
+#define XMTBRST 0x02 /* enable transmit burst mode */
+#define RCVBRST 0x01 /* enable receive burst mode */
+
+/* Bits in MACCC */
+#define PROM 0x80 /* promiscuous mode */
+#define DXMT2PD 0x40 /* disable xmit two-part deferral algorithm */
+#define EMBA 0x20 /* enable modified backoff algorithm */
+#define DRCVPA 0x08 /* disable receiving physical address */
+#define DRCVBC 0x04 /* disable receiving broadcasts */
+#define ENXMT 0x02 /* enable transmitter */
+#define ENRCV 0x01 /* enable receiver */
+
+/* Bits in PLSCC */
+#define XMTSEL 0x08 /* select DO+/DO- state when idle */
+#define PORTSEL_AUI 0x00 /* select AUI port */
+#define PORTSEL_10T 0x02 /* select 10Base-T port */
+#define PORTSEL_DAI 0x04 /* select DAI port */
+#define PORTSEL_GPSI 0x06 /* select GPSI port */
+#define ENPLSIO 0x01 /* enable optional PLS I/O pins */
+
+/* Bits in PHYCC */
+#define LNKFL 0x80 /* reports 10Base-T link failure */
+#define DLNKTST 0x40 /* disable 10Base-T link test */
+#define REVPOL 0x20 /* 10Base-T receiver polarity reversed */
+#define DAPC 0x10 /* disable auto receiver polarity correction */
+#define LRT 0x08 /* low receive threshold for long links */
+#define ASEL 0x04 /* auto-select AUI or 10Base-T port */
+#define RWAKE 0x02 /* remote wake function */
+#define AWAKE 0x01 /* auto wake function */
+
+/* Bits in IAC */
+#define ADDRCHG 0x80 /* request address change */
+#define PHYADDR 0x04 /* access physical address */
+#define LOGADDR 0x02 /* access multicast filter */
+
+/* Bits in UTR */
+#define RTRE 0x80 /* reserved test register enable. DON'T SET. */
+#define RTRD 0x40 /* reserved test register disable. Sticky */
+#define RPAC 0x20 /* accept runt packets */
+#define FCOLL 0x10 /* force collision */
+#define RCVFCSE 0x08 /* receive FCS enable */
+#define LOOP_NONE 0x00 /* no loopback */
+#define LOOP_EXT 0x02 /* external loopback */
+#define LOOP_INT 0x04 /* internal loopback, excludes MENDEC */
+#define LOOP_MENDEC 0x06 /* internal loopback, includes MENDEC */
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
new file mode 100644
index 000000000000..7cf81bbffe0e
--- /dev/null
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -0,0 +1,792 @@
+/*
+ * Driver for the Macintosh 68K onboard MACE controller with PSC
+ * driven DMA. The MACE driver code is derived from mace.c. The
+ * Mac68k theory of operation is courtesy of the MacBSD wizards.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ * Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver
+ *
+ * Copyright (C) 2007 Finn Thain
+ *
+ * Converted to DMA API, converted to unified driver model,
+ * sync'd some routines with mace.c and fixed various bugs.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/crc32.h>
+#include <linux/bitrev.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <asm/io.h>
+#include <asm/macints.h>
+#include <asm/mac_psc.h>
+#include <asm/page.h>
+#include "mace.h"
+
+static char mac_mace_string[] = "macmace";
+
+#define N_TX_BUFF_ORDER 0
+#define N_TX_RING (1 << N_TX_BUFF_ORDER)
+#define N_RX_BUFF_ORDER 3
+#define N_RX_RING (1 << N_RX_BUFF_ORDER)
+
+#define TX_TIMEOUT HZ
+
+#define MACE_BUFF_SIZE 0x800
+
+/* Chip rev needs workaround on HW & multicast addr change */
+#define BROKEN_ADDRCHG_REV 0x0941
+
+/* The MACE is simply wired down on a Mac68K box */
+
+#define MACE_BASE (void *)(0x50F1C000)
+#define MACE_PROM (void *)(0x50F08001)
+
+struct mace_data {
+ volatile struct mace *mace;
+ unsigned char *tx_ring;
+ dma_addr_t tx_ring_phys;
+ unsigned char *rx_ring;
+ dma_addr_t rx_ring_phys;
+ int dma_intr;
+ int rx_slot, rx_tail;
+ int tx_slot, tx_sloti, tx_count;
+ int chipid;
+ struct device *device;
+};
+
+struct mace_frame {
+ u8 rcvcnt;
+ u8 pad1;
+ u8 rcvsts;
+ u8 pad2;
+ u8 rntpc;
+ u8 pad3;
+ u8 rcvcc;
+ u8 pad4;
+ u32 pad5;
+ u32 pad6;
+ u8 data[1];
+ /* And frame continues.. */
+};
+
+#define PRIV_BYTES sizeof(struct mace_data)
+
+static int mace_open(struct net_device *dev);
+static int mace_close(struct net_device *dev);
+static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
+static void mace_set_multicast(struct net_device *dev);
+static int mace_set_address(struct net_device *dev, void *addr);
+static void mace_reset(struct net_device *dev);
+static irqreturn_t mace_interrupt(int irq, void *dev_id);
+static irqreturn_t mace_dma_intr(int irq, void *dev_id);
+static void mace_tx_timeout(struct net_device *dev);
+static void __mace_set_address(struct net_device *dev, void *addr);
+
+/*
+ * Load a receive DMA channel with a base address and ring length
+ */
+
+static void mace_load_rxdma_base(struct net_device *dev, int set)
+{
+ struct mace_data *mp = netdev_priv(dev);
+
+ psc_write_word(PSC_ENETRD_CMD + set, 0x0100);
+ psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys);
+ psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING);
+ psc_write_word(PSC_ENETRD_CMD + set, 0x9800);
+ mp->rx_tail = 0;
+}
+
+/*
+ * Reset the receive DMA subsystem
+ */
+
+static void mace_rxdma_reset(struct net_device *dev)
+{
+ struct mace_data *mp = netdev_priv(dev);
+ volatile struct mace *mace = mp->mace;
+ u8 maccc = mace->maccc;
+
+ mace->maccc = maccc & ~ENRCV;
+
+ psc_write_word(PSC_ENETRD_CTL, 0x8800);
+ mace_load_rxdma_base(dev, 0x00);
+ psc_write_word(PSC_ENETRD_CTL, 0x0400);
+
+ psc_write_word(PSC_ENETRD_CTL, 0x8800);
+ mace_load_rxdma_base(dev, 0x10);
+ psc_write_word(PSC_ENETRD_CTL, 0x0400);
+
+ mace->maccc = maccc;
+ mp->rx_slot = 0;
+
+ psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800);
+ psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800);
+}
+
+/*
+ * Reset the transmit DMA subsystem
+ */
+
+static void mace_txdma_reset(struct net_device *dev)
+{
+ struct mace_data *mp = netdev_priv(dev);
+ volatile struct mace *mace = mp->mace;
+ u8 maccc;
+
+ psc_write_word(PSC_ENETWR_CTL, 0x8800);
+
+ maccc = mace->maccc;
+ mace->maccc = maccc & ~ENXMT;
+
+ mp->tx_slot = mp->tx_sloti = 0;
+ mp->tx_count = N_TX_RING;
+
+ psc_write_word(PSC_ENETWR_CTL, 0x0400);
+ mace->maccc = maccc;
+}
+
+/*
+ * Disable DMA
+ */
+
+static void mace_dma_off(struct net_device *dev)
+{
+ psc_write_word(PSC_ENETRD_CTL, 0x8800);
+ psc_write_word(PSC_ENETRD_CTL, 0x1000);
+ psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x1100);
+ psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x1100);
+
+ psc_write_word(PSC_ENETWR_CTL, 0x8800);
+ psc_write_word(PSC_ENETWR_CTL, 0x1000);
+ psc_write_word(PSC_ENETWR_CMD + PSC_SET0, 0x1100);
+ psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100);
+}
+
+static const struct net_device_ops mace_netdev_ops = {
+ .ndo_open = mace_open,
+ .ndo_stop = mace_close,
+ .ndo_start_xmit = mace_xmit_start,
+ .ndo_tx_timeout = mace_tx_timeout,
+ .ndo_set_rx_mode = mace_set_multicast,
+ .ndo_set_mac_address = mace_set_address,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/*
+ * Not really much of a probe. The hardware table tells us if this
+ * model of Macintrash has a MACE (AV macintoshes)
+ */
+
+static int __devinit mace_probe(struct platform_device *pdev)
+{
+ int j;
+ struct mace_data *mp;
+ unsigned char *addr;
+ struct net_device *dev;
+ unsigned char checksum = 0;
+ int err;
+
+ dev = alloc_etherdev(PRIV_BYTES);
+ if (!dev)
+ return -ENOMEM;
+
+ mp = netdev_priv(dev);
+
+ mp->device = &pdev->dev;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ dev->base_addr = (u32)MACE_BASE;
+ mp->mace = MACE_BASE;
+
+ dev->irq = IRQ_MAC_MACE;
+ mp->dma_intr = IRQ_MAC_MACE_DMA;
+
+ mp->chipid = mp->mace->chipid_hi << 8 | mp->mace->chipid_lo;
+
+ /*
+ * The PROM contains 8 bytes which total 0xFF when XOR'd
+ * together. Due to the usual peculiar apple brain damage
+ * the bytes are spaced out in a strange boundary and the
+ * bits are reversed.
+ */
+
+ addr = (void *)MACE_PROM;
+
+ for (j = 0; j < 6; ++j) {
+ u8 v = bitrev8(addr[j<<4]);
+ checksum ^= v;
+ dev->dev_addr[j] = v;
+ }
+ for (; j < 8; ++j) {
+ checksum ^= bitrev8(addr[j<<4]);
+ }
+
+ if (checksum != 0xFF) {
+ free_netdev(dev);
+ return -ENODEV;
+ }
+
+ dev->netdev_ops = &mace_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ printk(KERN_INFO "%s: 68K MACE, hardware address %pM\n",
+ dev->name, dev->dev_addr);
+
+ err = register_netdev(dev);
+ if (!err)
+ return 0;
+
+ free_netdev(dev);
+ return err;
+}
+
+/*
+ * Reset the chip.
+ */
+
+static void mace_reset(struct net_device *dev)
+{
+ struct mace_data *mp = netdev_priv(dev);
+ volatile struct mace *mb = mp->mace;
+ int i;
+
+ /* soft-reset the chip */
+ i = 200;
+ while (--i) {
+ mb->biucc = SWRST;
+ if (mb->biucc & SWRST) {
+ udelay(10);
+ continue;
+ }
+ break;
+ }
+ if (!i) {
+ printk(KERN_ERR "macmace: cannot reset chip!\n");
+ return;
+ }
+
+ mb->maccc = 0; /* turn off tx, rx */
+ mb->imr = 0xFF; /* disable all intrs for now */
+ i = mb->ir;
+
+ mb->biucc = XMTSP_64;
+ mb->utr = RTRD;
+ mb->fifocc = XMTFW_8 | RCVFW_64 | XMTFWU | RCVFWU;
+
+ mb->xmtfc = AUTO_PAD_XMIT; /* auto-pad short frames */
+ mb->rcvfc = 0;
+
+ /* load up the hardware address */
+ __mace_set_address(dev, dev->dev_addr);
+
+ /* clear the multicast filter */
+ if (mp->chipid == BROKEN_ADDRCHG_REV)
+ mb->iac = LOGADDR;
+ else {
+ mb->iac = ADDRCHG | LOGADDR;
+ while ((mb->iac & ADDRCHG) != 0)
+ ;
+ }
+ for (i = 0; i < 8; ++i)
+ mb->ladrf = 0;
+
+ /* done changing address */
+ if (mp->chipid != BROKEN_ADDRCHG_REV)
+ mb->iac = 0;
+
+ mb->plscc = PORTSEL_AUI;
+}
+
+/*
+ * Load the address on a mace controller.
+ */
+
+static void __mace_set_address(struct net_device *dev, void *addr)
+{
+ struct mace_data *mp = netdev_priv(dev);
+ volatile struct mace *mb = mp->mace;
+ unsigned char *p = addr;
+ int i;
+
+ /* load up the hardware address */
+ if (mp->chipid == BROKEN_ADDRCHG_REV)
+ mb->iac = PHYADDR;
+ else {
+ mb->iac = ADDRCHG | PHYADDR;
+ while ((mb->iac & ADDRCHG) != 0)
+ ;
+ }
+ for (i = 0; i < 6; ++i)
+ mb->padr = dev->dev_addr[i] = p[i];
+ if (mp->chipid != BROKEN_ADDRCHG_REV)
+ mb->iac = 0;
+}
+
+static int mace_set_address(struct net_device *dev, void *addr)
+{
+ struct mace_data *mp = netdev_priv(dev);
+ volatile struct mace *mb = mp->mace;
+ unsigned long flags;
+ u8 maccc;
+
+ local_irq_save(flags);
+
+ maccc = mb->maccc;
+
+ __mace_set_address(dev, addr);
+
+ mb->maccc = maccc;
+
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+/*
+ * Open the Macintosh MACE. Most of this is playing with the DMA
+ * engine. The ethernet chip is quite friendly.
+ */
+
+static int mace_open(struct net_device *dev)
+{
+ struct mace_data *mp = netdev_priv(dev);
+ volatile struct mace *mb = mp->mace;
+
+ /* reset the chip */
+ mace_reset(dev);
+
+ if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
+ printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+ if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
+ printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
+ free_irq(dev->irq, dev);
+ return -EAGAIN;
+ }
+
+ /* Allocate the DMA ring buffers */
+
+ mp->tx_ring = dma_alloc_coherent(mp->device,
+ N_TX_RING * MACE_BUFF_SIZE,
+ &mp->tx_ring_phys, GFP_KERNEL);
+ if (mp->tx_ring == NULL) {
+ printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
+ goto out1;
+ }
+
+ mp->rx_ring = dma_alloc_coherent(mp->device,
+ N_RX_RING * MACE_BUFF_SIZE,
+ &mp->rx_ring_phys, GFP_KERNEL);
+ if (mp->rx_ring == NULL) {
+ printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
+ goto out2;
+ }
+
+ mace_dma_off(dev);
+
+ /* Not sure what these do */
+
+ psc_write_word(PSC_ENETWR_CTL, 0x9000);
+ psc_write_word(PSC_ENETRD_CTL, 0x9000);
+ psc_write_word(PSC_ENETWR_CTL, 0x0400);
+ psc_write_word(PSC_ENETRD_CTL, 0x0400);
+
+ mace_rxdma_reset(dev);
+ mace_txdma_reset(dev);
+
+ /* turn it on! */
+ mb->maccc = ENXMT | ENRCV;
+ /* enable all interrupts except receive interrupts */
+ mb->imr = RCVINT;
+ return 0;
+
+out2:
+ dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
+ mp->tx_ring, mp->tx_ring_phys);
+out1:
+ free_irq(dev->irq, dev);
+ free_irq(mp->dma_intr, dev);
+ return -ENOMEM;
+}
+
+/*
+ * Shut down the mace and its interrupt channel
+ */
+
+static int mace_close(struct net_device *dev)
+{
+ struct mace_data *mp = netdev_priv(dev);
+ volatile struct mace *mb = mp->mace;
+
+ mb->maccc = 0; /* disable rx and tx */
+ mb->imr = 0xFF; /* disable all irqs */
+ mace_dma_off(dev); /* disable rx and tx dma */
+
+ return 0;
+}
+
+/*
+ * Transmit a frame
+ */
+
+static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mace_data *mp = netdev_priv(dev);
+ unsigned long flags;
+
+ /* Stop the queue since there's only the one buffer */
+
+ local_irq_save(flags);
+ netif_stop_queue(dev);
+ if (!mp->tx_count) {
+ printk(KERN_ERR "macmace: tx queue running but no free buffers.\n");
+ local_irq_restore(flags);
+ return NETDEV_TX_BUSY;
+ }
+ mp->tx_count--;
+ local_irq_restore(flags);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ /* We need to copy into our xmit buffer to take care of alignment and caching issues */
+ skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
+
+ /* load the Tx DMA and fire it off */
+
+ psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32) mp->tx_ring_phys);
+ psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len);
+ psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800);
+
+ mp->tx_slot ^= 0x10;
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static void mace_set_multicast(struct net_device *dev)
+{
+ struct mace_data *mp = netdev_priv(dev);
+ volatile struct mace *mb = mp->mace;
+ int i;
+ u32 crc;
+ u8 maccc;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ maccc = mb->maccc;
+ mb->maccc &= ~PROM;
+
+ if (dev->flags & IFF_PROMISC) {
+ mb->maccc |= PROM;
+ } else {
+ unsigned char multicast_filter[8];
+ struct netdev_hw_addr *ha;
+
+ if (dev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < 8; i++) {
+ multicast_filter[i] = 0xFF;
+ }
+ } else {
+ for (i = 0; i < 8; i++)
+ multicast_filter[i] = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
+ /* bit number in multicast_filter */
+ i = crc >> 26;
+ multicast_filter[i >> 3] |= 1 << (i & 7);
+ }
+ }
+
+ if (mp->chipid == BROKEN_ADDRCHG_REV)
+ mb->iac = LOGADDR;
+ else {
+ mb->iac = ADDRCHG | LOGADDR;
+ while ((mb->iac & ADDRCHG) != 0)
+ ;
+ }
+ for (i = 0; i < 8; ++i)
+ mb->ladrf = multicast_filter[i];
+ if (mp->chipid != BROKEN_ADDRCHG_REV)
+ mb->iac = 0;
+ }
+
+ mb->maccc = maccc;
+ local_irq_restore(flags);
+}
+
+static void mace_handle_misc_intrs(struct net_device *dev, int intr)
+{
+ struct mace_data *mp = netdev_priv(dev);
+ volatile struct mace *mb = mp->mace;
+ static int mace_babbles, mace_jabbers;
+
+ if (intr & MPCO)
+ dev->stats.rx_missed_errors += 256;
+ dev->stats.rx_missed_errors += mb->mpc; /* reading clears it */
+ if (intr & RNTPCO)
+ dev->stats.rx_length_errors += 256;
+ dev->stats.rx_length_errors += mb->rntpc; /* reading clears it */
+ if (intr & CERR)
+ ++dev->stats.tx_heartbeat_errors;
+ if (intr & BABBLE)
+ if (mace_babbles++ < 4)
+ printk(KERN_DEBUG "macmace: babbling transmitter\n");
+ if (intr & JABBER)
+ if (mace_jabbers++ < 4)
+ printk(KERN_DEBUG "macmace: jabbering transceiver\n");
+}
+
+static irqreturn_t mace_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct mace_data *mp = netdev_priv(dev);
+ volatile struct mace *mb = mp->mace;
+ int intr, fs;
+ unsigned long flags;
+
+ /* don't want the dma interrupt handler to fire */
+ local_irq_save(flags);
+
+ intr = mb->ir; /* read interrupt register */
+ mace_handle_misc_intrs(dev, intr);
+
+ if (intr & XMTINT) {
+ fs = mb->xmtfs;
+ if ((fs & XMTSV) == 0) {
+ printk(KERN_ERR "macmace: xmtfs not valid! (fs=%x)\n", fs);
+ mace_reset(dev);
+ /*
+ * XXX mace likes to hang the machine after a xmtfs error.
+ * This is hard to reproduce, reseting *may* help
+ */
+ }
+ /* dma should have finished */
+ if (!mp->tx_count) {
+ printk(KERN_DEBUG "macmace: tx ring ran out? (fs=%x)\n", fs);
+ }
+ /* Update stats */
+ if (fs & (UFLO|LCOL|LCAR|RTRY)) {
+ ++dev->stats.tx_errors;
+ if (fs & LCAR)
+ ++dev->stats.tx_carrier_errors;
+ else if (fs & (UFLO|LCOL|RTRY)) {
+ ++dev->stats.tx_aborted_errors;
+ if (mb->xmtfs & UFLO) {
+ printk(KERN_ERR "%s: DMA underrun.\n", dev->name);
+ dev->stats.tx_fifo_errors++;
+ mace_txdma_reset(dev);
+ }
+ }
+ }
+ }
+
+ if (mp->tx_count)
+ netif_wake_queue(dev);
+
+ local_irq_restore(flags);
+
+ return IRQ_HANDLED;
+}
+
+static void mace_tx_timeout(struct net_device *dev)
+{
+ struct mace_data *mp = netdev_priv(dev);
+ volatile struct mace *mb = mp->mace;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /* turn off both tx and rx and reset the chip */
+ mb->maccc = 0;
+ printk(KERN_ERR "macmace: transmit timeout - resetting\n");
+ mace_txdma_reset(dev);
+ mace_reset(dev);
+
+ /* restart rx dma */
+ mace_rxdma_reset(dev);
+
+ mp->tx_count = N_TX_RING;
+ netif_wake_queue(dev);
+
+ /* turn it on! */
+ mb->maccc = ENXMT | ENRCV;
+ /* enable all interrupts except receive interrupts */
+ mb->imr = RCVINT;
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Handle a newly arrived frame
+ */
+
+static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
+{
+ struct sk_buff *skb;
+ unsigned int frame_status = mf->rcvsts;
+
+ if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) {
+ dev->stats.rx_errors++;
+ if (frame_status & RS_OFLO) {
+ printk(KERN_DEBUG "%s: fifo overflow.\n", dev->name);
+ dev->stats.rx_fifo_errors++;
+ }
+ if (frame_status & RS_CLSN)
+ dev->stats.collisions++;
+ if (frame_status & RS_FRAMERR)
+ dev->stats.rx_frame_errors++;
+ if (frame_status & RS_FCSERR)
+ dev->stats.rx_crc_errors++;
+ } else {
+ unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 );
+
+ skb = dev_alloc_skb(frame_length + 2);
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ return;
+ }
+ skb_reserve(skb, 2);
+ memcpy(skb_put(skb, frame_length), mf->data, frame_length);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += frame_length;
+ }
+}
+
+/*
+ * The PSC has passed us a DMA interrupt event.
+ */
+
+static irqreturn_t mace_dma_intr(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct mace_data *mp = netdev_priv(dev);
+ int left, head;
+ u16 status;
+ u32 baka;
+
+ /* Not sure what this does */
+
+ while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY));
+ if (!(baka & 0x60000000)) return IRQ_NONE;
+
+ /*
+ * Process the read queue
+ */
+
+ status = psc_read_word(PSC_ENETRD_CTL);
+
+ if (status & 0x2000) {
+ mace_rxdma_reset(dev);
+ } else if (status & 0x0100) {
+ psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100);
+
+ left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot);
+ head = N_RX_RING - left;
+
+ /* Loop through the ring buffer and process new packages */
+
+ while (mp->rx_tail < head) {
+ mace_dma_rx_frame(dev, (struct mace_frame*) (mp->rx_ring
+ + (mp->rx_tail * MACE_BUFF_SIZE)));
+ mp->rx_tail++;
+ }
+
+ /* If we're out of buffers in this ring then switch to */
+ /* the other set, otherwise just reactivate this one. */
+
+ if (!left) {
+ mace_load_rxdma_base(dev, mp->rx_slot);
+ mp->rx_slot ^= 0x10;
+ } else {
+ psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800);
+ }
+ }
+
+ /*
+ * Process the write queue
+ */
+
+ status = psc_read_word(PSC_ENETWR_CTL);
+
+ if (status & 0x2000) {
+ mace_txdma_reset(dev);
+ } else if (status & 0x0100) {
+ psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100);
+ mp->tx_sloti ^= 0x10;
+ mp->tx_count++;
+ }
+ return IRQ_HANDLED;
+}
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
+MODULE_ALIAS("platform:macmace");
+
+static int __devexit mac_mace_device_remove (struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct mace_data *mp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ free_irq(dev->irq, dev);
+ free_irq(IRQ_MAC_MACE_DMA, dev);
+
+ dma_free_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE,
+ mp->rx_ring, mp->rx_ring_phys);
+ dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
+ mp->tx_ring, mp->tx_ring_phys);
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+static struct platform_driver mac_mace_driver = {
+ .probe = mace_probe,
+ .remove = __devexit_p(mac_mace_device_remove),
+ .driver = {
+ .name = mac_mace_string,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mac_mace_init_module(void)
+{
+ if (!MACH_IS_MAC)
+ return -ENODEV;
+
+ return platform_driver_register(&mac_mace_driver);
+}
+
+static void __exit mac_mace_cleanup_module(void)
+{
+ platform_driver_unregister(&mac_mace_driver);
+}
+
+module_init(mac_mace_init_module);
+module_exit(mac_mace_cleanup_module);
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
new file mode 100644
index 000000000000..1ed886d421f8
--- /dev/null
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -0,0 +1,70 @@
+#
+# Atheros device configuration
+#
+
+config NET_VENDOR_ATHEROS
+ bool "Atheros devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Atheros devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_ATHEROS
+
+config ATL2
+ tristate "Atheros L2 Fast Ethernet support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This driver supports the Atheros L2 fast ethernet adapter.
+
+ To compile this driver as a module, choose M here. The module
+ will be called atl2.
+
+config ATL1
+ tristate "Atheros/Attansic L1 Gigabit Ethernet support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This driver supports the Atheros/Attansic L1 gigabit ethernet
+ adapter.
+
+ To compile this driver as a module, choose M here. The module
+ will be called atl1.
+
+config ATL1E
+ tristate "Atheros L1E Gigabit Ethernet support (EXPERIMENTAL)"
+ depends on PCI && EXPERIMENTAL
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This driver supports the Atheros L1E gigabit ethernet adapter.
+
+ To compile this driver as a module, choose M here. The module
+ will be called atl1e.
+
+config ATL1C
+ tristate "Atheros L1C Gigabit Ethernet support (EXPERIMENTAL)"
+ depends on PCI && EXPERIMENTAL
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This driver supports the Atheros L1C gigabit ethernet adapter.
+
+ To compile this driver as a module, choose M here. The module
+ will be called atl1c.
+
+endif # NET_VENDOR_ATHEROS
diff --git a/drivers/net/ethernet/atheros/Makefile b/drivers/net/ethernet/atheros/Makefile
new file mode 100644
index 000000000000..e7e76fb576ff
--- /dev/null
+++ b/drivers/net/ethernet/atheros/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Atheros network device drivers.
+#
+
+obj-$(CONFIG_ATL1) += atlx/
+obj-$(CONFIG_ATL2) += atlx/
+obj-$(CONFIG_ATL1E) += atl1e/
+obj-$(CONFIG_ATL1C) += atl1c/
diff --git a/drivers/net/ethernet/atheros/atl1c/Makefile b/drivers/net/ethernet/atheros/atl1c/Makefile
new file mode 100644
index 000000000000..c37d966952ee
--- /dev/null
+++ b/drivers/net/ethernet/atheros/atl1c/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_ATL1C) += atl1c.o
+atl1c-objs := atl1c_main.o atl1c_hw.o atl1c_ethtool.o
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
new file mode 100644
index 000000000000..ca70e16b6e2c
--- /dev/null
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -0,0 +1,636 @@
+/*
+ * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved.
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _ATL1C_H_
+#define _ATL1C_H_
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <linux/mii.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/tcp.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/workqueue.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+
+#include "atl1c_hw.h"
+
+/* Wake Up Filter Control */
+#define AT_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define AT_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define AT_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define AT_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */
+#define AT_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+
+#define AT_VLAN_TO_TAG(_vlan, _tag) \
+ _tag = ((((_vlan) >> 8) & 0xFF) |\
+ (((_vlan) & 0xFF) << 8))
+
+#define AT_TAG_TO_VLAN(_tag, _vlan) \
+ _vlan = ((((_tag) >> 8) & 0xFF) |\
+ (((_tag) & 0xFF) << 8))
+
+#define SPEED_0 0xffff
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+#define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)
+#define MAX_JUMBO_FRAME_SIZE (6*1024)
+#define MAX_TSO_FRAME_SIZE (7*1024)
+#define MAX_TX_OFFLOAD_THRESH (9*1024)
+
+#define AT_MAX_RECEIVE_QUEUE 4
+#define AT_DEF_RECEIVE_QUEUE 1
+#define AT_MAX_TRANSMIT_QUEUE 2
+
+#define AT_DMA_HI_ADDR_MASK 0xffffffff00000000ULL
+#define AT_DMA_LO_ADDR_MASK 0x00000000ffffffffULL
+
+#define AT_TX_WATCHDOG (5 * HZ)
+#define AT_MAX_INT_WORK 5
+#define AT_TWSI_EEPROM_TIMEOUT 100
+#define AT_HW_MAX_IDLE_DELAY 10
+#define AT_SUSPEND_LINK_TIMEOUT 100
+
+#define AT_ASPM_L0S_TIMER 6
+#define AT_ASPM_L1_TIMER 12
+#define AT_LCKDET_TIMER 12
+
+#define ATL1C_PCIE_L0S_L1_DISABLE 0x01
+#define ATL1C_PCIE_PHY_RESET 0x02
+
+#define ATL1C_ASPM_L0s_ENABLE 0x0001
+#define ATL1C_ASPM_L1_ENABLE 0x0002
+
+#define AT_REGS_LEN (75 * sizeof(u32))
+#define AT_EEPROM_LEN 512
+
+#define ATL1C_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i]))
+#define ATL1C_RFD_DESC(R, i) ATL1C_GET_DESC(R, i, struct atl1c_rx_free_desc)
+#define ATL1C_TPD_DESC(R, i) ATL1C_GET_DESC(R, i, struct atl1c_tpd_desc)
+#define ATL1C_RRD_DESC(R, i) ATL1C_GET_DESC(R, i, struct atl1c_recv_ret_status)
+
+/* tpd word 1 bit 0:7 General Checksum task offload */
+#define TPD_L4HDR_OFFSET_MASK 0x00FF
+#define TPD_L4HDR_OFFSET_SHIFT 0
+
+/* tpd word 1 bit 0:7 Large Send task offload (IPv4/IPV6) */
+#define TPD_TCPHDR_OFFSET_MASK 0x00FF
+#define TPD_TCPHDR_OFFSET_SHIFT 0
+
+/* tpd word 1 bit 0:7 Custom Checksum task offload */
+#define TPD_PLOADOFFSET_MASK 0x00FF
+#define TPD_PLOADOFFSET_SHIFT 0
+
+/* tpd word 1 bit 8:17 */
+#define TPD_CCSUM_EN_MASK 0x0001
+#define TPD_CCSUM_EN_SHIFT 8
+#define TPD_IP_CSUM_MASK 0x0001
+#define TPD_IP_CSUM_SHIFT 9
+#define TPD_TCP_CSUM_MASK 0x0001
+#define TPD_TCP_CSUM_SHIFT 10
+#define TPD_UDP_CSUM_MASK 0x0001
+#define TPD_UDP_CSUM_SHIFT 11
+#define TPD_LSO_EN_MASK 0x0001 /* TCP Large Send Offload */
+#define TPD_LSO_EN_SHIFT 12
+#define TPD_LSO_VER_MASK 0x0001
+#define TPD_LSO_VER_SHIFT 13 /* 0 : ipv4; 1 : ipv4/ipv6 */
+#define TPD_CON_VTAG_MASK 0x0001
+#define TPD_CON_VTAG_SHIFT 14
+#define TPD_INS_VTAG_MASK 0x0001
+#define TPD_INS_VTAG_SHIFT 15
+#define TPD_IPV4_PACKET_MASK 0x0001 /* valid when LSO VER is 1 */
+#define TPD_IPV4_PACKET_SHIFT 16
+#define TPD_ETH_TYPE_MASK 0x0001
+#define TPD_ETH_TYPE_SHIFT 17 /* 0 : 802.3 frame; 1 : Ethernet */
+
+/* tpd word 18:25 Custom Checksum task offload */
+#define TPD_CCSUM_OFFSET_MASK 0x00FF
+#define TPD_CCSUM_OFFSET_SHIFT 18
+#define TPD_CCSUM_EPAD_MASK 0x0001
+#define TPD_CCSUM_EPAD_SHIFT 30
+
+/* tpd word 18:30 Large Send task offload (IPv4/IPV6) */
+#define TPD_MSS_MASK 0x1FFF
+#define TPD_MSS_SHIFT 18
+
+#define TPD_EOP_MASK 0x0001
+#define TPD_EOP_SHIFT 31
+
+struct atl1c_tpd_desc {
+ __le16 buffer_len; /* include 4-byte CRC */
+ __le16 vlan_tag;
+ __le32 word1;
+ __le64 buffer_addr;
+};
+
+struct atl1c_tpd_ext_desc {
+ u32 reservd_0;
+ __le32 word1;
+ __le32 pkt_len;
+ u32 reservd_1;
+};
+/* rrs word 0 bit 0:31 */
+#define RRS_RX_CSUM_MASK 0xFFFF
+#define RRS_RX_CSUM_SHIFT 0
+#define RRS_RX_RFD_CNT_MASK 0x000F
+#define RRS_RX_RFD_CNT_SHIFT 16
+#define RRS_RX_RFD_INDEX_MASK 0x0FFF
+#define RRS_RX_RFD_INDEX_SHIFT 20
+
+/* rrs flag bit 0:16 */
+#define RRS_HEAD_LEN_MASK 0x00FF
+#define RRS_HEAD_LEN_SHIFT 0
+#define RRS_HDS_TYPE_MASK 0x0003
+#define RRS_HDS_TYPE_SHIFT 8
+#define RRS_CPU_NUM_MASK 0x0003
+#define RRS_CPU_NUM_SHIFT 10
+#define RRS_HASH_FLG_MASK 0x000F
+#define RRS_HASH_FLG_SHIFT 12
+
+#define RRS_HDS_TYPE_HEAD 1
+#define RRS_HDS_TYPE_DATA 2
+
+#define RRS_IS_NO_HDS_TYPE(flag) \
+ ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == 0)
+
+#define RRS_IS_HDS_HEAD(flag) \
+ ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \
+ RRS_HDS_TYPE_HEAD)
+
+#define RRS_IS_HDS_DATA(flag) \
+ ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \
+ RRS_HDS_TYPE_DATA)
+
+/* rrs word 3 bit 0:31 */
+#define RRS_PKT_SIZE_MASK 0x3FFF
+#define RRS_PKT_SIZE_SHIFT 0
+#define RRS_ERR_L4_CSUM_MASK 0x0001
+#define RRS_ERR_L4_CSUM_SHIFT 14
+#define RRS_ERR_IP_CSUM_MASK 0x0001
+#define RRS_ERR_IP_CSUM_SHIFT 15
+#define RRS_VLAN_INS_MASK 0x0001
+#define RRS_VLAN_INS_SHIFT 16
+#define RRS_PROT_ID_MASK 0x0007
+#define RRS_PROT_ID_SHIFT 17
+#define RRS_RX_ERR_SUM_MASK 0x0001
+#define RRS_RX_ERR_SUM_SHIFT 20
+#define RRS_RX_ERR_CRC_MASK 0x0001
+#define RRS_RX_ERR_CRC_SHIFT 21
+#define RRS_RX_ERR_FAE_MASK 0x0001
+#define RRS_RX_ERR_FAE_SHIFT 22
+#define RRS_RX_ERR_TRUNC_MASK 0x0001
+#define RRS_RX_ERR_TRUNC_SHIFT 23
+#define RRS_RX_ERR_RUNC_MASK 0x0001
+#define RRS_RX_ERR_RUNC_SHIFT 24
+#define RRS_RX_ERR_ICMP_MASK 0x0001
+#define RRS_RX_ERR_ICMP_SHIFT 25
+#define RRS_PACKET_BCAST_MASK 0x0001
+#define RRS_PACKET_BCAST_SHIFT 26
+#define RRS_PACKET_MCAST_MASK 0x0001
+#define RRS_PACKET_MCAST_SHIFT 27
+#define RRS_PACKET_TYPE_MASK 0x0001
+#define RRS_PACKET_TYPE_SHIFT 28
+#define RRS_FIFO_FULL_MASK 0x0001
+#define RRS_FIFO_FULL_SHIFT 29
+#define RRS_802_3_LEN_ERR_MASK 0x0001
+#define RRS_802_3_LEN_ERR_SHIFT 30
+#define RRS_RXD_UPDATED_MASK 0x0001
+#define RRS_RXD_UPDATED_SHIFT 31
+
+#define RRS_ERR_L4_CSUM 0x00004000
+#define RRS_ERR_IP_CSUM 0x00008000
+#define RRS_VLAN_INS 0x00010000
+#define RRS_RX_ERR_SUM 0x00100000
+#define RRS_RX_ERR_CRC 0x00200000
+#define RRS_802_3_LEN_ERR 0x40000000
+#define RRS_RXD_UPDATED 0x80000000
+
+#define RRS_PACKET_TYPE_802_3 1
+#define RRS_PACKET_TYPE_ETH 0
+#define RRS_PACKET_IS_ETH(word) \
+ ((((word) >> RRS_PACKET_TYPE_SHIFT) & RRS_PACKET_TYPE_MASK) == \
+ RRS_PACKET_TYPE_ETH)
+#define RRS_RXD_IS_VALID(word) \
+ ((((word) >> RRS_RXD_UPDATED_SHIFT) & RRS_RXD_UPDATED_MASK) == 1)
+
+#define RRS_PACKET_PROT_IS_IPV4_ONLY(word) \
+ ((((word) >> RRS_PROT_ID_SHIFT) & RRS_PROT_ID_MASK) == 1)
+#define RRS_PACKET_PROT_IS_IPV6_ONLY(word) \
+ ((((word) >> RRS_PROT_ID_SHIFT) & RRS_PROT_ID_MASK) == 6)
+
+struct atl1c_recv_ret_status {
+ __le32 word0;
+ __le32 rss_hash;
+ __le16 vlan_tag;
+ __le16 flag;
+ __le32 word3;
+};
+
+/* RFD descriptor */
+struct atl1c_rx_free_desc {
+ __le64 buffer_addr;
+};
+
+/* DMA Order Settings */
+enum atl1c_dma_order {
+ atl1c_dma_ord_in = 1,
+ atl1c_dma_ord_enh = 2,
+ atl1c_dma_ord_out = 4
+};
+
+enum atl1c_dma_rcb {
+ atl1c_rcb_64 = 0,
+ atl1c_rcb_128 = 1
+};
+
+enum atl1c_mac_speed {
+ atl1c_mac_speed_0 = 0,
+ atl1c_mac_speed_10_100 = 1,
+ atl1c_mac_speed_1000 = 2
+};
+
+enum atl1c_dma_req_block {
+ atl1c_dma_req_128 = 0,
+ atl1c_dma_req_256 = 1,
+ atl1c_dma_req_512 = 2,
+ atl1c_dma_req_1024 = 3,
+ atl1c_dma_req_2048 = 4,
+ atl1c_dma_req_4096 = 5
+};
+
+enum atl1c_rss_mode {
+ atl1c_rss_mode_disable = 0,
+ atl1c_rss_sig_que = 1,
+ atl1c_rss_mul_que_sig_int = 2,
+ atl1c_rss_mul_que_mul_int = 4,
+};
+
+enum atl1c_rss_type {
+ atl1c_rss_disable = 0,
+ atl1c_rss_ipv4 = 1,
+ atl1c_rss_ipv4_tcp = 2,
+ atl1c_rss_ipv6 = 4,
+ atl1c_rss_ipv6_tcp = 8
+};
+
+enum atl1c_nic_type {
+ athr_l1c = 0,
+ athr_l2c = 1,
+ athr_l2c_b,
+ athr_l2c_b2,
+ athr_l1d,
+ athr_l1d_2,
+};
+
+enum atl1c_trans_queue {
+ atl1c_trans_normal = 0,
+ atl1c_trans_high = 1
+};
+
+struct atl1c_hw_stats {
+ /* rx */
+ unsigned long rx_ok; /* The number of good packet received. */
+ unsigned long rx_bcast; /* The number of good broadcast packet received. */
+ unsigned long rx_mcast; /* The number of good multicast packet received. */
+ unsigned long rx_pause; /* The number of Pause packet received. */
+ unsigned long rx_ctrl; /* The number of Control packet received other than Pause frame. */
+ unsigned long rx_fcs_err; /* The number of packets with bad FCS. */
+ unsigned long rx_len_err; /* The number of packets with mismatch of length field and actual size. */
+ unsigned long rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */
+ unsigned long rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */
+ unsigned long rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */
+ unsigned long rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */
+ unsigned long rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */
+ unsigned long rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */
+ unsigned long rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */
+ unsigned long rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */
+ unsigned long rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */
+ unsigned long rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */
+ unsigned long rx_sz_ov; /* The number of good and bad packets received that are more than MTU size truncated by Selene. */
+ unsigned long rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */
+ unsigned long rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */
+ unsigned long rx_align_err; /* Alignment Error */
+ unsigned long rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */
+ unsigned long rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */
+ unsigned long rx_err_addr; /* The number of packets dropped due to address filtering. */
+
+ /* tx */
+ unsigned long tx_ok; /* The number of good packet transmitted. */
+ unsigned long tx_bcast; /* The number of good broadcast packet transmitted. */
+ unsigned long tx_mcast; /* The number of good multicast packet transmitted. */
+ unsigned long tx_pause; /* The number of Pause packet transmitted. */
+ unsigned long tx_exc_defer; /* The number of packets transmitted with excessive deferral. */
+ unsigned long tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */
+ unsigned long tx_defer; /* The number of packets transmitted that is deferred. */
+ unsigned long tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */
+ unsigned long tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */
+ unsigned long tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */
+ unsigned long tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */
+ unsigned long tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */
+ unsigned long tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */
+ unsigned long tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */
+ unsigned long tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */
+ unsigned long tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */
+ unsigned long tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */
+ unsigned long tx_late_col; /* The number of packets transmitted with late collisions. */
+ unsigned long tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */
+ unsigned long tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */
+ unsigned long tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */
+ unsigned long tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */
+ unsigned long tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */
+ unsigned long tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */
+ unsigned long tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */
+};
+
+struct atl1c_hw {
+ u8 __iomem *hw_addr; /* inner register address */
+ struct atl1c_adapter *adapter;
+ enum atl1c_nic_type nic_type;
+ enum atl1c_dma_order dma_order;
+ enum atl1c_dma_rcb rcb_value;
+ enum atl1c_dma_req_block dmar_block;
+ enum atl1c_dma_req_block dmaw_block;
+
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u16 phy_id1;
+ u16 phy_id2;
+
+ u32 intr_mask;
+ u8 dmaw_dly_cnt;
+ u8 dmar_dly_cnt;
+
+ u8 preamble_len;
+ u16 max_frame_size;
+ u16 min_frame_size;
+
+ enum atl1c_mac_speed mac_speed;
+ bool mac_duplex;
+ bool hibernate;
+ u16 media_type;
+#define MEDIA_TYPE_AUTO_SENSOR 0
+#define MEDIA_TYPE_100M_FULL 1
+#define MEDIA_TYPE_100M_HALF 2
+#define MEDIA_TYPE_10M_FULL 3
+#define MEDIA_TYPE_10M_HALF 4
+
+ u16 autoneg_advertised;
+ u16 mii_autoneg_adv_reg;
+ u16 mii_1000t_ctrl_reg;
+
+ u16 tx_imt; /* TX Interrupt Moderator timer ( 2us resolution) */
+ u16 rx_imt; /* RX Interrupt Moderator timer ( 2us resolution) */
+ u16 ict; /* Interrupt Clear timer (2us resolution) */
+ u16 ctrl_flags;
+#define ATL1C_INTR_CLEAR_ON_READ 0x0001
+#define ATL1C_INTR_MODRT_ENABLE 0x0002
+#define ATL1C_CMB_ENABLE 0x0004
+#define ATL1C_SMB_ENABLE 0x0010
+#define ATL1C_TXQ_MODE_ENHANCE 0x0020
+#define ATL1C_RX_IPV6_CHKSUM 0x0040
+#define ATL1C_ASPM_L0S_SUPPORT 0x0080
+#define ATL1C_ASPM_L1_SUPPORT 0x0100
+#define ATL1C_ASPM_CTRL_MON 0x0200
+#define ATL1C_HIB_DISABLE 0x0400
+#define ATL1C_APS_MODE_ENABLE 0x0800
+#define ATL1C_LINK_EXT_SYNC 0x1000
+#define ATL1C_CLK_GATING_EN 0x2000
+#define ATL1C_FPGA_VERSION 0x8000
+ u16 link_cap_flags;
+#define ATL1C_LINK_CAP_1000M 0x0001
+ u16 cmb_tpd;
+ u16 cmb_rrd;
+ u16 cmb_rx_timer; /* 2us resolution */
+ u16 cmb_tx_timer;
+ u32 smb_timer;
+
+ u16 rrd_thresh; /* Threshold of number of RRD produced to trigger
+ interrupt request */
+ u16 tpd_thresh;
+ u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */
+ u8 rfd_burst;
+ enum atl1c_rss_type rss_type;
+ enum atl1c_rss_mode rss_mode;
+ u8 rss_hash_bits;
+ u32 base_cpu;
+ u32 indirect_tab;
+ u8 mac_addr[ETH_ALEN];
+ u8 perm_mac_addr[ETH_ALEN];
+
+ bool phy_configured;
+ bool re_autoneg;
+ bool emi_ca;
+};
+
+/*
+ * atl1c_ring_header represents a single, contiguous block of DMA space
+ * mapped for the three descriptor rings (tpd, rfd, rrd) and the two
+ * message blocks (cmb, smb) described below
+ */
+struct atl1c_ring_header {
+ void *desc; /* virtual address */
+ dma_addr_t dma; /* physical address*/
+ unsigned int size; /* length in bytes */
+};
+
+/*
+ * atl1c_buffer is wrapper around a pointer to a socket buffer
+ * so a DMA handle can be stored along with the skb
+ */
+struct atl1c_buffer {
+ struct sk_buff *skb; /* socket buffer */
+ u16 length; /* rx buffer length */
+ u16 flags; /* information of buffer */
+#define ATL1C_BUFFER_FREE 0x0001
+#define ATL1C_BUFFER_BUSY 0x0002
+#define ATL1C_BUFFER_STATE_MASK 0x0003
+
+#define ATL1C_PCIMAP_SINGLE 0x0004
+#define ATL1C_PCIMAP_PAGE 0x0008
+#define ATL1C_PCIMAP_TYPE_MASK 0x000C
+
+#define ATL1C_PCIMAP_TODEVICE 0x0010
+#define ATL1C_PCIMAP_FROMDEVICE 0x0020
+#define ATL1C_PCIMAP_DIRECTION_MASK 0x0030
+ dma_addr_t dma;
+};
+
+#define ATL1C_SET_BUFFER_STATE(buff, state) do { \
+ ((buff)->flags) &= ~ATL1C_BUFFER_STATE_MASK; \
+ ((buff)->flags) |= (state); \
+ } while (0)
+
+#define ATL1C_SET_PCIMAP_TYPE(buff, type, direction) do { \
+ ((buff)->flags) &= ~ATL1C_PCIMAP_TYPE_MASK; \
+ ((buff)->flags) |= (type); \
+ ((buff)->flags) &= ~ATL1C_PCIMAP_DIRECTION_MASK; \
+ ((buff)->flags) |= (direction); \
+ } while (0)
+
+/* transimit packet descriptor (tpd) ring */
+struct atl1c_tpd_ring {
+ void *desc; /* descriptor ring virtual address */
+ dma_addr_t dma; /* descriptor ring physical address */
+ u16 size; /* descriptor ring length in bytes */
+ u16 count; /* number of descriptors in the ring */
+ u16 next_to_use; /* this is protectd by adapter->tx_lock */
+ atomic_t next_to_clean;
+ struct atl1c_buffer *buffer_info;
+};
+
+/* receive free descriptor (rfd) ring */
+struct atl1c_rfd_ring {
+ void *desc; /* descriptor ring virtual address */
+ dma_addr_t dma; /* descriptor ring physical address */
+ u16 size; /* descriptor ring length in bytes */
+ u16 count; /* number of descriptors in the ring */
+ u16 next_to_use;
+ u16 next_to_clean;
+ struct atl1c_buffer *buffer_info;
+};
+
+/* receive return descriptor (rrd) ring */
+struct atl1c_rrd_ring {
+ void *desc; /* descriptor ring virtual address */
+ dma_addr_t dma; /* descriptor ring physical address */
+ u16 size; /* descriptor ring length in bytes */
+ u16 count; /* number of descriptors in the ring */
+ u16 next_to_use;
+ u16 next_to_clean;
+};
+
+struct atl1c_cmb {
+ void *cmb;
+ dma_addr_t dma;
+};
+
+struct atl1c_smb {
+ void *smb;
+ dma_addr_t dma;
+};
+
+/* board specific private data structure */
+struct atl1c_adapter {
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct napi_struct napi;
+ struct atl1c_hw hw;
+ struct atl1c_hw_stats hw_stats;
+ struct mii_if_info mii; /* MII interface info */
+ u16 rx_buffer_len;
+
+ unsigned long flags;
+#define __AT_TESTING 0x0001
+#define __AT_RESETTING 0x0002
+#define __AT_DOWN 0x0003
+ unsigned long work_event;
+#define ATL1C_WORK_EVENT_RESET 0
+#define ATL1C_WORK_EVENT_LINK_CHANGE 1
+ u32 msg_enable;
+
+ bool have_msi;
+ u32 wol;
+ u16 link_speed;
+ u16 link_duplex;
+
+ spinlock_t mdio_lock;
+ spinlock_t tx_lock;
+ atomic_t irq_sem;
+
+ struct work_struct common_task;
+ struct timer_list watchdog_timer;
+ struct timer_list phy_config_timer;
+
+ /* All Descriptor memory */
+ struct atl1c_ring_header ring_header;
+ struct atl1c_tpd_ring tpd_ring[AT_MAX_TRANSMIT_QUEUE];
+ struct atl1c_rfd_ring rfd_ring[AT_MAX_RECEIVE_QUEUE];
+ struct atl1c_rrd_ring rrd_ring[AT_MAX_RECEIVE_QUEUE];
+ struct atl1c_cmb cmb;
+ struct atl1c_smb smb;
+ int num_rx_queues;
+ u32 bd_number; /* board number;*/
+};
+
+#define AT_WRITE_REG(a, reg, value) ( \
+ writel((value), ((a)->hw_addr + reg)))
+
+#define AT_WRITE_FLUSH(a) (\
+ readl((a)->hw_addr))
+
+#define AT_READ_REG(a, reg, pdata) do { \
+ if (unlikely((a)->hibernate)) { \
+ readl((a)->hw_addr + reg); \
+ *(u32 *)pdata = readl((a)->hw_addr + reg); \
+ } else { \
+ *(u32 *)pdata = readl((a)->hw_addr + reg); \
+ } \
+ } while (0)
+
+#define AT_WRITE_REGB(a, reg, value) (\
+ writeb((value), ((a)->hw_addr + reg)))
+
+#define AT_READ_REGB(a, reg) (\
+ readb((a)->hw_addr + reg))
+
+#define AT_WRITE_REGW(a, reg, value) (\
+ writew((value), ((a)->hw_addr + reg)))
+
+#define AT_READ_REGW(a, reg) (\
+ readw((a)->hw_addr + reg))
+
+#define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+ writel((value), (((a)->hw_addr + reg) + ((offset) << 2))))
+
+#define AT_READ_REG_ARRAY(a, reg, offset) ( \
+ readl(((a)->hw_addr + reg) + ((offset) << 2)))
+
+extern char atl1c_driver_name[];
+extern char atl1c_driver_version[];
+
+extern void atl1c_reinit_locked(struct atl1c_adapter *adapter);
+extern s32 atl1c_reset_hw(struct atl1c_hw *hw);
+extern void atl1c_set_ethtool_ops(struct net_device *netdev);
+#endif /* _ATL1C_H_ */
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
new file mode 100644
index 000000000000..7be884d0aaf6
--- /dev/null
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright(c) 2009 - 2009 Atheros Corporation. All rights reserved.
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/slab.h>
+
+#include "atl1c.h"
+
+static int atl1c_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+ struct atl1c_hw *hw = &adapter->hw;
+
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
+ if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M)
+ ecmd->supported |= SUPPORTED_1000baseT_Full;
+
+ ecmd->advertising = ADVERTISED_TP;
+
+ ecmd->advertising |= hw->autoneg_advertised;
+
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = 0;
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ if (adapter->link_speed != SPEED_0) {
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
+ if (adapter->link_duplex == FULL_DUPLEX)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ } else {
+ ethtool_cmd_speed_set(ecmd, -1);
+ ecmd->duplex = -1;
+ }
+
+ ecmd->autoneg = AUTONEG_ENABLE;
+ return 0;
+}
+
+static int atl1c_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+ struct atl1c_hw *hw = &adapter->hw;
+ u16 autoneg_advertised;
+
+ while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
+ msleep(1);
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ autoneg_advertised = ADVERTISED_Autoneg;
+ } else {
+ u32 speed = ethtool_cmd_speed(ecmd);
+ if (speed == SPEED_1000) {
+ if (ecmd->duplex != DUPLEX_FULL) {
+ if (netif_msg_link(adapter))
+ dev_warn(&adapter->pdev->dev,
+ "1000M half is invalid\n");
+ clear_bit(__AT_RESETTING, &adapter->flags);
+ return -EINVAL;
+ }
+ autoneg_advertised = ADVERTISED_1000baseT_Full;
+ } else if (speed == SPEED_100) {
+ if (ecmd->duplex == DUPLEX_FULL)
+ autoneg_advertised = ADVERTISED_100baseT_Full;
+ else
+ autoneg_advertised = ADVERTISED_100baseT_Half;
+ } else {
+ if (ecmd->duplex == DUPLEX_FULL)
+ autoneg_advertised = ADVERTISED_10baseT_Full;
+ else
+ autoneg_advertised = ADVERTISED_10baseT_Half;
+ }
+ }
+
+ if (hw->autoneg_advertised != autoneg_advertised) {
+ hw->autoneg_advertised = autoneg_advertised;
+ if (atl1c_restart_autoneg(hw) != 0) {
+ if (netif_msg_link(adapter))
+ dev_warn(&adapter->pdev->dev,
+ "ethtool speed/duplex setting failed\n");
+ clear_bit(__AT_RESETTING, &adapter->flags);
+ return -EINVAL;
+ }
+ }
+ clear_bit(__AT_RESETTING, &adapter->flags);
+ return 0;
+}
+
+static u32 atl1c_get_msglevel(struct net_device *netdev)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void atl1c_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+static int atl1c_get_regs_len(struct net_device *netdev)
+{
+ return AT_REGS_LEN;
+}
+
+static void atl1c_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+ struct atl1c_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u16 phy_data;
+
+ memset(p, 0, AT_REGS_LEN);
+
+ regs->version = 0;
+ AT_READ_REG(hw, REG_VPD_CAP, p++);
+ AT_READ_REG(hw, REG_PM_CTRL, p++);
+ AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL, p++);
+ AT_READ_REG(hw, REG_TWSI_CTRL, p++);
+ AT_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL, p++);
+ AT_READ_REG(hw, REG_MASTER_CTRL, p++);
+ AT_READ_REG(hw, REG_MANUAL_TIMER_INIT, p++);
+ AT_READ_REG(hw, REG_IRQ_MODRT_TIMER_INIT, p++);
+ AT_READ_REG(hw, REG_GPHY_CTRL, p++);
+ AT_READ_REG(hw, REG_LINK_CTRL, p++);
+ AT_READ_REG(hw, REG_IDLE_STATUS, p++);
+ AT_READ_REG(hw, REG_MDIO_CTRL, p++);
+ AT_READ_REG(hw, REG_SERDES_LOCK, p++);
+ AT_READ_REG(hw, REG_MAC_CTRL, p++);
+ AT_READ_REG(hw, REG_MAC_IPG_IFG, p++);
+ AT_READ_REG(hw, REG_MAC_STA_ADDR, p++);
+ AT_READ_REG(hw, REG_MAC_STA_ADDR+4, p++);
+ AT_READ_REG(hw, REG_RX_HASH_TABLE, p++);
+ AT_READ_REG(hw, REG_RX_HASH_TABLE+4, p++);
+ AT_READ_REG(hw, REG_RXQ_CTRL, p++);
+ AT_READ_REG(hw, REG_TXQ_CTRL, p++);
+ AT_READ_REG(hw, REG_MTU, p++);
+ AT_READ_REG(hw, REG_WOL_CTRL, p++);
+
+ atl1c_read_phy_reg(hw, MII_BMCR, &phy_data);
+ regs_buff[73] = (u32) phy_data;
+ atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+ regs_buff[74] = (u32) phy_data;
+}
+
+static int atl1c_get_eeprom_len(struct net_device *netdev)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+
+ if (atl1c_check_eeprom_exist(&adapter->hw))
+ return AT_EEPROM_LEN;
+ else
+ return 0;
+}
+
+static int atl1c_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+ struct atl1c_hw *hw = &adapter->hw;
+ u32 *eeprom_buff;
+ int first_dword, last_dword;
+ int ret_val = 0;
+ int i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (!atl1c_check_eeprom_exist(hw)) /* not exist */
+ return -EINVAL;
+
+ eeprom->magic = adapter->pdev->vendor |
+ (adapter->pdev->device << 16);
+
+ first_dword = eeprom->offset >> 2;
+ last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
+
+ eeprom_buff = kmalloc(sizeof(u32) *
+ (last_dword - first_dword + 1), GFP_KERNEL);
+ if (eeprom_buff == NULL)
+ return -ENOMEM;
+
+ for (i = first_dword; i < last_dword; i++) {
+ if (!atl1c_read_eeprom(hw, i * 4, &(eeprom_buff[i-first_dword]))) {
+ kfree(eeprom_buff);
+ return -EIO;
+ }
+ }
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3),
+ eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+ return 0;
+}
+
+static void atl1c_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, atl1c_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
+ drvinfo->n_stats = 0;
+ drvinfo->testinfo_len = 0;
+ drvinfo->regdump_len = atl1c_get_regs_len(netdev);
+ drvinfo->eedump_len = atl1c_get_eeprom_len(netdev);
+}
+
+static void atl1c_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = WAKE_MAGIC | WAKE_PHY;
+ wol->wolopts = 0;
+
+ if (adapter->wol & AT_WUFC_EX)
+ wol->wolopts |= WAKE_UCAST;
+ if (adapter->wol & AT_WUFC_MC)
+ wol->wolopts |= WAKE_MCAST;
+ if (adapter->wol & AT_WUFC_BC)
+ wol->wolopts |= WAKE_BCAST;
+ if (adapter->wol & AT_WUFC_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+ if (adapter->wol & AT_WUFC_LNKC)
+ wol->wolopts |= WAKE_PHY;
+}
+
+static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+
+ if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
+ WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
+ return -EOPNOTSUPP;
+ /* these settings will always override what we currently have */
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wol |= AT_WUFC_MAG;
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wol |= AT_WUFC_LNKC;
+
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
+static int atl1c_nway_reset(struct net_device *netdev)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+ if (netif_running(netdev))
+ atl1c_reinit_locked(adapter);
+ return 0;
+}
+
+static const struct ethtool_ops atl1c_ethtool_ops = {
+ .get_settings = atl1c_get_settings,
+ .set_settings = atl1c_set_settings,
+ .get_drvinfo = atl1c_get_drvinfo,
+ .get_regs_len = atl1c_get_regs_len,
+ .get_regs = atl1c_get_regs,
+ .get_wol = atl1c_get_wol,
+ .set_wol = atl1c_set_wol,
+ .get_msglevel = atl1c_get_msglevel,
+ .set_msglevel = atl1c_set_msglevel,
+ .nway_reset = atl1c_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = atl1c_get_eeprom_len,
+ .get_eeprom = atl1c_get_eeprom,
+};
+
+void atl1c_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &atl1c_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
new file mode 100644
index 000000000000..23f2ab0f2fa8
--- /dev/null
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -0,0 +1,662 @@
+/*
+ * Copyright(c) 2007 Atheros Corporation. All rights reserved.
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+
+#include "atl1c.h"
+
+/*
+ * check_eeprom_exist
+ * return 1 if eeprom exist
+ */
+int atl1c_check_eeprom_exist(struct atl1c_hw *hw)
+{
+ u32 data;
+
+ AT_READ_REG(hw, REG_TWSI_DEBUG, &data);
+ if (data & TWSI_DEBUG_DEV_EXIST)
+ return 1;
+
+ AT_READ_REG(hw, REG_MASTER_CTRL, &data);
+ if (data & MASTER_CTRL_OTP_SEL)
+ return 1;
+ return 0;
+}
+
+void atl1c_hw_set_mac_addr(struct atl1c_hw *hw)
+{
+ u32 value;
+ /*
+ * 00-0B-6A-F6-00-DC
+ * 0: 6AF600DC 1: 000B
+ * low dword
+ */
+ value = (((u32)hw->mac_addr[2]) << 24) |
+ (((u32)hw->mac_addr[3]) << 16) |
+ (((u32)hw->mac_addr[4]) << 8) |
+ (((u32)hw->mac_addr[5])) ;
+ AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value);
+ /* hight dword */
+ value = (((u32)hw->mac_addr[0]) << 8) |
+ (((u32)hw->mac_addr[1])) ;
+ AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value);
+}
+
+/*
+ * atl1c_get_permanent_address
+ * return 0 if get valid mac address,
+ */
+static int atl1c_get_permanent_address(struct atl1c_hw *hw)
+{
+ u32 addr[2];
+ u32 i;
+ u32 otp_ctrl_data;
+ u32 twsi_ctrl_data;
+ u32 ltssm_ctrl_data;
+ u32 wol_data;
+ u8 eth_addr[ETH_ALEN];
+ u16 phy_data;
+ bool raise_vol = false;
+
+ /* init */
+ addr[0] = addr[1] = 0;
+ AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
+ if (atl1c_check_eeprom_exist(hw)) {
+ if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) {
+ /* Enable OTP CLK */
+ if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) {
+ otp_ctrl_data |= OTP_CTRL_CLK_EN;
+ AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
+ AT_WRITE_FLUSH(hw);
+ msleep(1);
+ }
+ }
+
+ if (hw->nic_type == athr_l2c_b ||
+ hw->nic_type == athr_l2c_b2 ||
+ hw->nic_type == athr_l1d) {
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00);
+ if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
+ goto out;
+ phy_data &= 0xFF7F;
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
+
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
+ if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
+ goto out;
+ phy_data |= 0x8;
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
+ udelay(20);
+ raise_vol = true;
+ }
+ /* close open bit of ReadOnly*/
+ AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &ltssm_ctrl_data);
+ ltssm_ctrl_data &= ~LTSSM_ID_EN_WRO;
+ AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, ltssm_ctrl_data);
+
+ /* clear any WOL settings */
+ AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
+ AT_READ_REG(hw, REG_WOL_CTRL, &wol_data);
+
+
+ AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data);
+ twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART;
+ AT_WRITE_REG(hw, REG_TWSI_CTRL, twsi_ctrl_data);
+ for (i = 0; i < AT_TWSI_EEPROM_TIMEOUT; i++) {
+ msleep(10);
+ AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data);
+ if ((twsi_ctrl_data & TWSI_CTRL_SW_LDSTART) == 0)
+ break;
+ }
+ if (i >= AT_TWSI_EEPROM_TIMEOUT)
+ return -1;
+ }
+ /* Disable OTP_CLK */
+ if ((hw->nic_type == athr_l1c || hw->nic_type == athr_l2c)) {
+ otp_ctrl_data &= ~OTP_CTRL_CLK_EN;
+ AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
+ msleep(1);
+ }
+ if (raise_vol) {
+ if (hw->nic_type == athr_l2c_b ||
+ hw->nic_type == athr_l2c_b2 ||
+ hw->nic_type == athr_l1d ||
+ hw->nic_type == athr_l1d_2) {
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00);
+ if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
+ goto out;
+ phy_data |= 0x80;
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
+
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
+ if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
+ goto out;
+ phy_data &= 0xFFF7;
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
+ udelay(20);
+ }
+ }
+
+ /* maybe MAC-address is from BIOS */
+ AT_READ_REG(hw, REG_MAC_STA_ADDR, &addr[0]);
+ AT_READ_REG(hw, REG_MAC_STA_ADDR + 4, &addr[1]);
+ *(u32 *) &eth_addr[2] = swab32(addr[0]);
+ *(u16 *) &eth_addr[0] = swab16(*(u16 *)&addr[1]);
+
+ if (is_valid_ether_addr(eth_addr)) {
+ memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
+ return 0;
+ }
+
+out:
+ return -1;
+}
+
+bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value)
+{
+ int i;
+ int ret = false;
+ u32 otp_ctrl_data;
+ u32 control;
+ u32 data;
+
+ if (offset & 3)
+ return ret; /* address do not align */
+
+ AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
+ if (!(otp_ctrl_data & OTP_CTRL_CLK_EN))
+ AT_WRITE_REG(hw, REG_OTP_CTRL,
+ (otp_ctrl_data | OTP_CTRL_CLK_EN));
+
+ AT_WRITE_REG(hw, REG_EEPROM_DATA_LO, 0);
+ control = (offset & EEPROM_CTRL_ADDR_MASK) << EEPROM_CTRL_ADDR_SHIFT;
+ AT_WRITE_REG(hw, REG_EEPROM_CTRL, control);
+
+ for (i = 0; i < 10; i++) {
+ udelay(100);
+ AT_READ_REG(hw, REG_EEPROM_CTRL, &control);
+ if (control & EEPROM_CTRL_RW)
+ break;
+ }
+ if (control & EEPROM_CTRL_RW) {
+ AT_READ_REG(hw, REG_EEPROM_CTRL, &data);
+ AT_READ_REG(hw, REG_EEPROM_DATA_LO, p_value);
+ data = data & 0xFFFF;
+ *p_value = swab32((data << 16) | (*p_value >> 16));
+ ret = true;
+ }
+ if (!(otp_ctrl_data & OTP_CTRL_CLK_EN))
+ AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
+
+ return ret;
+}
+/*
+ * Reads the adapter's MAC address from the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ */
+int atl1c_read_mac_addr(struct atl1c_hw *hw)
+{
+ int err = 0;
+
+ err = atl1c_get_permanent_address(hw);
+ if (err)
+ random_ether_addr(hw->perm_mac_addr);
+
+ memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr));
+ return 0;
+}
+
+/*
+ * atl1c_hash_mc_addr
+ * purpose
+ * set hash value for a multicast address
+ * hash calcu processing :
+ * 1. calcu 32bit CRC for multicast address
+ * 2. reverse crc with MSB to LSB
+ */
+u32 atl1c_hash_mc_addr(struct atl1c_hw *hw, u8 *mc_addr)
+{
+ u32 crc32;
+ u32 value = 0;
+ int i;
+
+ crc32 = ether_crc_le(6, mc_addr);
+ for (i = 0; i < 32; i++)
+ value |= (((crc32 >> i) & 1) << (31 - i));
+
+ return value;
+}
+
+/*
+ * Sets the bit in the multicast table corresponding to the hash value.
+ * hw - Struct containing variables accessed by shared code
+ * hash_value - Multicast address hash value
+ */
+void atl1c_hash_set(struct atl1c_hw *hw, u32 hash_value)
+{
+ u32 hash_bit, hash_reg;
+ u32 mta;
+
+ /*
+ * The HASH Table is a register array of 2 32-bit registers.
+ * It is treated like an array of 64 bits. We want to set
+ * bit BitArray[hash_value]. So we figure out what register
+ * the bit is in, read it, OR in the new bit, then write
+ * back the new value. The register is determined by the
+ * upper bit of the hash value and the bit within that
+ * register are determined by the lower 5 bits of the value.
+ */
+ hash_reg = (hash_value >> 31) & 0x1;
+ hash_bit = (hash_value >> 26) & 0x1F;
+
+ mta = AT_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg);
+
+ mta |= (1 << hash_bit);
+
+ AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta);
+}
+
+/*
+ * Reads the value from a PHY register
+ * hw - Struct containing variables accessed by shared code
+ * reg_addr - address of the PHY register to read
+ */
+int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data)
+{
+ u32 val;
+ int i;
+
+ val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
+ MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW |
+ MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
+
+ AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
+
+ for (i = 0; i < MDIO_WAIT_TIMES; i++) {
+ udelay(2);
+ AT_READ_REG(hw, REG_MDIO_CTRL, &val);
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ break;
+ }
+ if (!(val & (MDIO_START | MDIO_BUSY))) {
+ *phy_data = (u16)val;
+ return 0;
+ }
+
+ return -1;
+}
+
+/*
+ * Writes a value to a PHY register
+ * hw - Struct containing variables accessed by shared code
+ * reg_addr - address of the PHY register to write
+ * data - data to write to the PHY
+ */
+int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data)
+{
+ int i;
+ u32 val;
+
+ val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
+ (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
+ MDIO_SUP_PREAMBLE | MDIO_START |
+ MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
+
+ AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
+
+ for (i = 0; i < MDIO_WAIT_TIMES; i++) {
+ udelay(2);
+ AT_READ_REG(hw, REG_MDIO_CTRL, &val);
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ break;
+ }
+
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ return 0;
+
+ return -1;
+}
+
+/*
+ * Configures PHY autoneg and flow control advertisement settings
+ *
+ * hw - Struct containing variables accessed by shared code
+ */
+static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
+{
+ u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_ALL;
+ u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP &
+ ~GIGA_CR_1000T_SPEED_MASK;
+
+ if (hw->autoneg_advertised & ADVERTISED_10baseT_Half)
+ mii_adv_data |= ADVERTISE_10HALF;
+ if (hw->autoneg_advertised & ADVERTISED_10baseT_Full)
+ mii_adv_data |= ADVERTISE_10FULL;
+ if (hw->autoneg_advertised & ADVERTISED_100baseT_Half)
+ mii_adv_data |= ADVERTISE_100HALF;
+ if (hw->autoneg_advertised & ADVERTISED_100baseT_Full)
+ mii_adv_data |= ADVERTISE_100FULL;
+
+ if (hw->autoneg_advertised & ADVERTISED_Autoneg)
+ mii_adv_data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL;
+
+ if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M) {
+ if (hw->autoneg_advertised & ADVERTISED_1000baseT_Half)
+ mii_giga_ctrl_data |= ADVERTISE_1000HALF;
+ if (hw->autoneg_advertised & ADVERTISED_1000baseT_Full)
+ mii_giga_ctrl_data |= ADVERTISE_1000FULL;
+ if (hw->autoneg_advertised & ADVERTISED_Autoneg)
+ mii_giga_ctrl_data |= ADVERTISE_1000HALF |
+ ADVERTISE_1000FULL;
+ }
+
+ if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 ||
+ atl1c_write_phy_reg(hw, MII_CTRL1000, mii_giga_ctrl_data) != 0)
+ return -1;
+ return 0;
+}
+
+void atl1c_phy_disable(struct atl1c_hw *hw)
+{
+ AT_WRITE_REGW(hw, REG_GPHY_CTRL,
+ GPHY_CTRL_PW_WOL_DIS | GPHY_CTRL_EXT_RESET);
+}
+
+static void atl1c_phy_magic_data(struct atl1c_hw *hw)
+{
+ u16 data;
+
+ data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
+ ((1 & ANA_INTERVAL_SEL_TIMER_MASK) <<
+ ANA_INTERVAL_SEL_TIMER_SHIFT);
+
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_18);
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
+
+ data = (2 & ANA_SERDES_CDR_BW_MASK) | ANA_MS_PAD_DBG |
+ ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
+ ANA_SERDES_EN_LCKDT;
+
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_5);
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
+
+ data = (44 & ANA_LONG_CABLE_TH_100_MASK) |
+ ((33 & ANA_SHORT_CABLE_TH_100_MASK) <<
+ ANA_SHORT_CABLE_TH_100_SHIFT) | ANA_BP_BAD_LINK_ACCUM |
+ ANA_BP_SMALL_BW;
+
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_54);
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
+
+ data = (11 & ANA_IECHO_ADJ_MASK) | ((11 & ANA_IECHO_ADJ_MASK) <<
+ ANA_IECHO_ADJ_2_SHIFT) | ((8 & ANA_IECHO_ADJ_MASK) <<
+ ANA_IECHO_ADJ_1_SHIFT) | ((8 & ANA_IECHO_ADJ_MASK) <<
+ ANA_IECHO_ADJ_0_SHIFT);
+
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_4);
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
+
+ data = ANA_RESTART_CAL | ((7 & ANA_MANUL_SWICH_ON_MASK) <<
+ ANA_MANUL_SWICH_ON_SHIFT) | ANA_MAN_ENABLE |
+ ANA_SEL_HSP | ANA_EN_HB | ANA_OEN_125M;
+
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_0);
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
+
+ if (hw->ctrl_flags & ATL1C_HIB_DISABLE) {
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_41);
+ if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &data) != 0)
+ return;
+ data &= ~ANA_TOP_PS_EN;
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
+
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_11);
+ if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &data) != 0)
+ return;
+ data &= ~ANA_PS_HIB_EN;
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
+ }
+}
+
+int atl1c_phy_reset(struct atl1c_hw *hw)
+{
+ struct atl1c_adapter *adapter = hw->adapter;
+ struct pci_dev *pdev = adapter->pdev;
+ u16 phy_data;
+ u32 phy_ctrl_data = GPHY_CTRL_DEFAULT;
+ u32 mii_ier_data = IER_LINK_UP | IER_LINK_DOWN;
+ int err;
+
+ if (hw->ctrl_flags & ATL1C_HIB_DISABLE)
+ phy_ctrl_data &= ~GPHY_CTRL_HIB_EN;
+
+ AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data);
+ AT_WRITE_FLUSH(hw);
+ msleep(40);
+ phy_ctrl_data |= GPHY_CTRL_EXT_RESET;
+ AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data);
+ AT_WRITE_FLUSH(hw);
+ msleep(10);
+
+ if (hw->nic_type == athr_l2c_b) {
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x0A);
+ atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data);
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xDFFF);
+ }
+
+ if (hw->nic_type == athr_l2c_b ||
+ hw->nic_type == athr_l2c_b2 ||
+ hw->nic_type == athr_l1d ||
+ hw->nic_type == athr_l1d_2) {
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
+ atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data);
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xFFF7);
+ msleep(20);
+ }
+ if (hw->nic_type == athr_l1d) {
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x929D);
+ }
+ if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b2
+ || hw->nic_type == athr_l2c) {
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD);
+ }
+ err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data);
+ if (err) {
+ if (netif_msg_hw(adapter))
+ dev_err(&pdev->dev,
+ "Error enable PHY linkChange Interrupt\n");
+ return err;
+ }
+ if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION))
+ atl1c_phy_magic_data(hw);
+ return 0;
+}
+
+int atl1c_phy_init(struct atl1c_hw *hw)
+{
+ struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct pci_dev *pdev = adapter->pdev;
+ int ret_val;
+ u16 mii_bmcr_data = BMCR_RESET;
+
+ if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id1) != 0) ||
+ (atl1c_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id2) != 0)) {
+ dev_err(&pdev->dev, "Error get phy ID\n");
+ return -1;
+ }
+ switch (hw->media_type) {
+ case MEDIA_TYPE_AUTO_SENSOR:
+ ret_val = atl1c_phy_setup_adv(hw);
+ if (ret_val) {
+ if (netif_msg_link(adapter))
+ dev_err(&pdev->dev,
+ "Error Setting up Auto-Negotiation\n");
+ return ret_val;
+ }
+ mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
+ break;
+ case MEDIA_TYPE_100M_FULL:
+ mii_bmcr_data |= BMCR_SPEED100 | BMCR_FULLDPLX;
+ break;
+ case MEDIA_TYPE_100M_HALF:
+ mii_bmcr_data |= BMCR_SPEED100;
+ break;
+ case MEDIA_TYPE_10M_FULL:
+ mii_bmcr_data |= BMCR_FULLDPLX;
+ break;
+ case MEDIA_TYPE_10M_HALF:
+ break;
+ default:
+ if (netif_msg_link(adapter))
+ dev_err(&pdev->dev, "Wrong Media type %d\n",
+ hw->media_type);
+ return -1;
+ break;
+ }
+
+ ret_val = atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
+ if (ret_val)
+ return ret_val;
+ hw->phy_configured = true;
+
+ return 0;
+}
+
+/*
+ * Detects the current speed and duplex settings of the hardware.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * speed - Speed of the connection
+ * duplex - Duplex setting of the connection
+ */
+int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex)
+{
+ int err;
+ u16 phy_data;
+
+ /* Read PHY Specific Status Register (17) */
+ err = atl1c_read_phy_reg(hw, MII_GIGA_PSSR, &phy_data);
+ if (err)
+ return err;
+
+ if (!(phy_data & GIGA_PSSR_SPD_DPLX_RESOLVED))
+ return -1;
+
+ switch (phy_data & GIGA_PSSR_SPEED) {
+ case GIGA_PSSR_1000MBS:
+ *speed = SPEED_1000;
+ break;
+ case GIGA_PSSR_100MBS:
+ *speed = SPEED_100;
+ break;
+ case GIGA_PSSR_10MBS:
+ *speed = SPEED_10;
+ break;
+ default:
+ return -1;
+ break;
+ }
+
+ if (phy_data & GIGA_PSSR_DPLX)
+ *duplex = FULL_DUPLEX;
+ else
+ *duplex = HALF_DUPLEX;
+
+ return 0;
+}
+
+int atl1c_phy_power_saving(struct atl1c_hw *hw)
+{
+ struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct pci_dev *pdev = adapter->pdev;
+ int ret = 0;
+ u16 autoneg_advertised = ADVERTISED_10baseT_Half;
+ u16 save_autoneg_advertised;
+ u16 phy_data;
+ u16 mii_lpa_data;
+ u16 speed = SPEED_0;
+ u16 duplex = FULL_DUPLEX;
+ int i;
+
+ atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+ atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+ if (phy_data & BMSR_LSTATUS) {
+ atl1c_read_phy_reg(hw, MII_LPA, &mii_lpa_data);
+ if (mii_lpa_data & LPA_10FULL)
+ autoneg_advertised = ADVERTISED_10baseT_Full;
+ else if (mii_lpa_data & LPA_10HALF)
+ autoneg_advertised = ADVERTISED_10baseT_Half;
+ else if (mii_lpa_data & LPA_100HALF)
+ autoneg_advertised = ADVERTISED_100baseT_Half;
+ else if (mii_lpa_data & LPA_100FULL)
+ autoneg_advertised = ADVERTISED_100baseT_Full;
+
+ save_autoneg_advertised = hw->autoneg_advertised;
+ hw->phy_configured = false;
+ hw->autoneg_advertised = autoneg_advertised;
+ if (atl1c_restart_autoneg(hw) != 0) {
+ dev_dbg(&pdev->dev, "phy autoneg failed\n");
+ ret = -1;
+ }
+ hw->autoneg_advertised = save_autoneg_advertised;
+
+ if (mii_lpa_data) {
+ for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
+ mdelay(100);
+ atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+ atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+ if (phy_data & BMSR_LSTATUS) {
+ if (atl1c_get_speed_and_duplex(hw, &speed,
+ &duplex) != 0)
+ dev_dbg(&pdev->dev,
+ "get speed and duplex failed\n");
+ break;
+ }
+ }
+ }
+ } else {
+ speed = SPEED_10;
+ duplex = HALF_DUPLEX;
+ }
+ adapter->link_speed = speed;
+ adapter->link_duplex = duplex;
+
+ return ret;
+}
+
+int atl1c_restart_autoneg(struct atl1c_hw *hw)
+{
+ int err = 0;
+ u16 mii_bmcr_data = BMCR_RESET;
+
+ err = atl1c_phy_setup_adv(hw);
+ if (err)
+ return err;
+ mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
+
+ return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
+}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
new file mode 100644
index 000000000000..655fc6c4a8a4
--- /dev/null
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
@@ -0,0 +1,868 @@
+/*
+ * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved.
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _ATL1C_HW_H_
+#define _ATL1C_HW_H_
+
+#include <linux/types.h>
+#include <linux/mii.h>
+
+struct atl1c_adapter;
+struct atl1c_hw;
+
+/* function prototype */
+void atl1c_phy_disable(struct atl1c_hw *hw);
+void atl1c_hw_set_mac_addr(struct atl1c_hw *hw);
+int atl1c_phy_reset(struct atl1c_hw *hw);
+int atl1c_read_mac_addr(struct atl1c_hw *hw);
+int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex);
+u32 atl1c_hash_mc_addr(struct atl1c_hw *hw, u8 *mc_addr);
+void atl1c_hash_set(struct atl1c_hw *hw, u32 hash_value);
+int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data);
+int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data);
+bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value);
+int atl1c_phy_init(struct atl1c_hw *hw);
+int atl1c_check_eeprom_exist(struct atl1c_hw *hw);
+int atl1c_restart_autoneg(struct atl1c_hw *hw);
+int atl1c_phy_power_saving(struct atl1c_hw *hw);
+/* register definition */
+#define REG_DEVICE_CAP 0x5C
+#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7
+#define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0
+
+#define REG_DEVICE_CTRL 0x60
+#define DEVICE_CTRL_MAX_PAYLOAD_MASK 0x7
+#define DEVICE_CTRL_MAX_PAYLOAD_SHIFT 5
+#define DEVICE_CTRL_MAX_RREQ_SZ_MASK 0x7
+#define DEVICE_CTRL_MAX_RREQ_SZ_SHIFT 12
+
+#define REG_LINK_CTRL 0x68
+#define LINK_CTRL_L0S_EN 0x01
+#define LINK_CTRL_L1_EN 0x02
+#define LINK_CTRL_EXT_SYNC 0x80
+
+#define REG_VPD_CAP 0x6C
+#define VPD_CAP_ID_MASK 0xff
+#define VPD_CAP_ID_SHIFT 0
+#define VPD_CAP_NEXT_PTR_MASK 0xFF
+#define VPD_CAP_NEXT_PTR_SHIFT 8
+#define VPD_CAP_VPD_ADDR_MASK 0x7FFF
+#define VPD_CAP_VPD_ADDR_SHIFT 16
+#define VPD_CAP_VPD_FLAG 0x80000000
+
+#define REG_VPD_DATA 0x70
+
+#define REG_PCIE_UC_SEVERITY 0x10C
+#define PCIE_UC_SERVRITY_TRN 0x00000001
+#define PCIE_UC_SERVRITY_DLP 0x00000010
+#define PCIE_UC_SERVRITY_PSN_TLP 0x00001000
+#define PCIE_UC_SERVRITY_FCP 0x00002000
+#define PCIE_UC_SERVRITY_CPL_TO 0x00004000
+#define PCIE_UC_SERVRITY_CA 0x00008000
+#define PCIE_UC_SERVRITY_UC 0x00010000
+#define PCIE_UC_SERVRITY_ROV 0x00020000
+#define PCIE_UC_SERVRITY_MLFP 0x00040000
+#define PCIE_UC_SERVRITY_ECRC 0x00080000
+#define PCIE_UC_SERVRITY_UR 0x00100000
+
+#define REG_DEV_SERIALNUM_CTRL 0x200
+#define REG_DEV_MAC_SEL_MASK 0x0 /* 0:EUI; 1:MAC */
+#define REG_DEV_MAC_SEL_SHIFT 0
+#define REG_DEV_SERIAL_NUM_EN_MASK 0x1
+#define REG_DEV_SERIAL_NUM_EN_SHIFT 1
+
+#define REG_TWSI_CTRL 0x218
+#define TWSI_CTRL_LD_OFFSET_MASK 0xFF
+#define TWSI_CTRL_LD_OFFSET_SHIFT 0
+#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7
+#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8
+#define TWSI_CTRL_SW_LDSTART 0x800
+#define TWSI_CTRL_HW_LDSTART 0x1000
+#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F
+#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15
+#define TWSI_CTRL_LD_EXIST 0x400000
+#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3
+#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23
+#define TWSI_CTRL_FREQ_SEL_100K 0
+#define TWSI_CTRL_FREQ_SEL_200K 1
+#define TWSI_CTRL_FREQ_SEL_300K 2
+#define TWSI_CTRL_FREQ_SEL_400K 3
+#define TWSI_CTRL_SMB_SLV_ADDR
+#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3
+#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24
+
+
+#define REG_PCIE_DEV_MISC_CTRL 0x21C
+#define PCIE_DEV_MISC_EXT_PIPE 0x2
+#define PCIE_DEV_MISC_RETRY_BUFDIS 0x1
+#define PCIE_DEV_MISC_SPIROM_EXIST 0x4
+#define PCIE_DEV_MISC_SERDES_ENDIAN 0x8
+#define PCIE_DEV_MISC_SERDES_SEL_DIN 0x10
+
+#define REG_PCIE_PHYMISC 0x1000
+#define PCIE_PHYMISC_FORCE_RCV_DET 0x4
+
+#define REG_PCIE_PHYMISC2 0x1004
+#define PCIE_PHYMISC2_SERDES_CDR_MASK 0x3
+#define PCIE_PHYMISC2_SERDES_CDR_SHIFT 16
+#define PCIE_PHYMISC2_SERDES_TH_MASK 0x3
+#define PCIE_PHYMISC2_SERDES_TH_SHIFT 18
+
+#define REG_TWSI_DEBUG 0x1108
+#define TWSI_DEBUG_DEV_EXIST 0x20000000
+
+#define REG_EEPROM_CTRL 0x12C0
+#define EEPROM_CTRL_DATA_HI_MASK 0xFFFF
+#define EEPROM_CTRL_DATA_HI_SHIFT 0
+#define EEPROM_CTRL_ADDR_MASK 0x3FF
+#define EEPROM_CTRL_ADDR_SHIFT 16
+#define EEPROM_CTRL_ACK 0x40000000
+#define EEPROM_CTRL_RW 0x80000000
+
+#define REG_EEPROM_DATA_LO 0x12C4
+
+#define REG_OTP_CTRL 0x12F0
+#define OTP_CTRL_CLK_EN 0x0002
+
+#define REG_PM_CTRL 0x12F8
+#define PM_CTRL_SDES_EN 0x00000001
+#define PM_CTRL_RBER_EN 0x00000002
+#define PM_CTRL_CLK_REQ_EN 0x00000004
+#define PM_CTRL_ASPM_L1_EN 0x00000008
+#define PM_CTRL_SERDES_L1_EN 0x00000010
+#define PM_CTRL_SERDES_PLL_L1_EN 0x00000020
+#define PM_CTRL_SERDES_PD_EX_L1 0x00000040
+#define PM_CTRL_SERDES_BUDS_RX_L1_EN 0x00000080
+#define PM_CTRL_L0S_ENTRY_TIMER_MASK 0xF
+#define PM_CTRL_L0S_ENTRY_TIMER_SHIFT 8
+#define PM_CTRL_ASPM_L0S_EN 0x00001000
+#define PM_CTRL_CLK_SWH_L1 0x00002000
+#define PM_CTRL_CLK_PWM_VER1_1 0x00004000
+#define PM_CTRL_RCVR_WT_TIMER 0x00008000
+#define PM_CTRL_L1_ENTRY_TIMER_MASK 0xF
+#define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16
+#define PM_CTRL_PM_REQ_TIMER_MASK 0xF
+#define PM_CTRL_PM_REQ_TIMER_SHIFT 20
+#define PM_CTRL_LCKDET_TIMER_MASK 0xF
+#define PM_CTRL_LCKDET_TIMER_SHIFT 24
+#define PM_CTRL_EN_BUFS_RX_L0S 0x10000000
+#define PM_CTRL_SA_DLY_EN 0x20000000
+#define PM_CTRL_MAC_ASPM_CHK 0x40000000
+#define PM_CTRL_HOTRST 0x80000000
+
+#define REG_LTSSM_ID_CTRL 0x12FC
+#define LTSSM_ID_EN_WRO 0x1000
+/* Selene Master Control Register */
+#define REG_MASTER_CTRL 0x1400
+#define MASTER_CTRL_SOFT_RST 0x1
+#define MASTER_CTRL_TEST_MODE_MASK 0x3
+#define MASTER_CTRL_TEST_MODE_SHIFT 2
+#define MASTER_CTRL_BERT_START 0x10
+#define MASTER_CTRL_OOB_DIS_OFF 0x40
+#define MASTER_CTRL_SA_TIMER_EN 0x80
+#define MASTER_CTRL_MTIMER_EN 0x100
+#define MASTER_CTRL_MANUAL_INT 0x200
+#define MASTER_CTRL_TX_ITIMER_EN 0x400
+#define MASTER_CTRL_RX_ITIMER_EN 0x800
+#define MASTER_CTRL_CLK_SEL_DIS 0x1000
+#define MASTER_CTRL_CLK_SWH_MODE 0x2000
+#define MASTER_CTRL_INT_RDCLR 0x4000
+#define MASTER_CTRL_REV_NUM_SHIFT 16
+#define MASTER_CTRL_REV_NUM_MASK 0xff
+#define MASTER_CTRL_DEV_ID_SHIFT 24
+#define MASTER_CTRL_DEV_ID_MASK 0x7f
+#define MASTER_CTRL_OTP_SEL 0x80000000
+
+/* Timer Initial Value Register */
+#define REG_MANUAL_TIMER_INIT 0x1404
+
+/* IRQ ModeratorTimer Initial Value Register */
+#define REG_IRQ_MODRT_TIMER_INIT 0x1408
+#define IRQ_MODRT_TIMER_MASK 0xffff
+#define IRQ_MODRT_TX_TIMER_SHIFT 0
+#define IRQ_MODRT_RX_TIMER_SHIFT 16
+
+#define REG_GPHY_CTRL 0x140C
+#define GPHY_CTRL_EXT_RESET 0x1
+#define GPHY_CTRL_RTL_MODE 0x2
+#define GPHY_CTRL_LED_MODE 0x4
+#define GPHY_CTRL_ANEG_NOW 0x8
+#define GPHY_CTRL_REV_ANEG 0x10
+#define GPHY_CTRL_GATE_25M_EN 0x20
+#define GPHY_CTRL_LPW_EXIT 0x40
+#define GPHY_CTRL_PHY_IDDQ 0x80
+#define GPHY_CTRL_PHY_IDDQ_DIS 0x100
+#define GPHY_CTRL_GIGA_DIS 0x200
+#define GPHY_CTRL_HIB_EN 0x400
+#define GPHY_CTRL_HIB_PULSE 0x800
+#define GPHY_CTRL_SEL_ANA_RST 0x1000
+#define GPHY_CTRL_PHY_PLL_ON 0x2000
+#define GPHY_CTRL_PWDOWN_HW 0x4000
+#define GPHY_CTRL_PHY_PLL_BYPASS 0x8000
+
+#define GPHY_CTRL_DEFAULT ( \
+ GPHY_CTRL_SEL_ANA_RST |\
+ GPHY_CTRL_HIB_PULSE |\
+ GPHY_CTRL_HIB_EN)
+
+#define GPHY_CTRL_PW_WOL_DIS ( \
+ GPHY_CTRL_SEL_ANA_RST |\
+ GPHY_CTRL_HIB_PULSE |\
+ GPHY_CTRL_HIB_EN |\
+ GPHY_CTRL_PWDOWN_HW |\
+ GPHY_CTRL_PHY_IDDQ)
+
+#define GPHY_CTRL_POWER_SAVING ( \
+ GPHY_CTRL_SEL_ANA_RST |\
+ GPHY_CTRL_HIB_EN |\
+ GPHY_CTRL_HIB_PULSE |\
+ GPHY_CTRL_PWDOWN_HW |\
+ GPHY_CTRL_PHY_IDDQ)
+/* Block IDLE Status Register */
+#define REG_IDLE_STATUS 0x1410
+#define IDLE_STATUS_MASK 0x00FF
+#define IDLE_STATUS_RXMAC_NO_IDLE 0x1
+#define IDLE_STATUS_TXMAC_NO_IDLE 0x2
+#define IDLE_STATUS_RXQ_NO_IDLE 0x4
+#define IDLE_STATUS_TXQ_NO_IDLE 0x8
+#define IDLE_STATUS_DMAR_NO_IDLE 0x10
+#define IDLE_STATUS_DMAW_NO_IDLE 0x20
+#define IDLE_STATUS_SMB_NO_IDLE 0x40
+#define IDLE_STATUS_CMB_NO_IDLE 0x80
+
+/* MDIO Control Register */
+#define REG_MDIO_CTRL 0x1414
+#define MDIO_DATA_MASK 0xffff /* On MDIO write, the 16-bit
+ * control data to write to PHY
+ * MII management register */
+#define MDIO_DATA_SHIFT 0 /* On MDIO read, the 16-bit
+ * status data that was read
+ * from the PHY MII management register */
+#define MDIO_REG_ADDR_MASK 0x1f /* MDIO register address */
+#define MDIO_REG_ADDR_SHIFT 16
+#define MDIO_RW 0x200000 /* 1: read, 0: write */
+#define MDIO_SUP_PREAMBLE 0x400000 /* Suppress preamble */
+#define MDIO_START 0x800000 /* Write 1 to initiate the MDIO
+ * master. And this bit is self
+ * cleared after one cycle */
+#define MDIO_CLK_SEL_SHIFT 24
+#define MDIO_CLK_25_4 0
+#define MDIO_CLK_25_6 2
+#define MDIO_CLK_25_8 3
+#define MDIO_CLK_25_10 4
+#define MDIO_CLK_25_14 5
+#define MDIO_CLK_25_20 6
+#define MDIO_CLK_25_28 7
+#define MDIO_BUSY 0x8000000
+#define MDIO_AP_EN 0x10000000
+#define MDIO_WAIT_TIMES 10
+
+/* MII PHY Status Register */
+#define REG_PHY_STATUS 0x1418
+#define PHY_GENERAL_STATUS_MASK 0xFFFF
+#define PHY_STATUS_RECV_ENABLE 0x0001
+#define PHY_OE_PWSP_STATUS_MASK 0x07FF
+#define PHY_OE_PWSP_STATUS_SHIFT 16
+#define PHY_STATUS_LPW_STATE 0x80000000
+/* BIST Control and Status Register0 (for the Packet Memory) */
+#define REG_BIST0_CTRL 0x141c
+#define BIST0_NOW 0x1
+#define BIST0_SRAM_FAIL 0x2 /* 1: The SRAM failure is
+ * un-repairable because
+ * it has address decoder
+ * failure or more than 1 cell
+ * stuck-to-x failure */
+#define BIST0_FUSE_FLAG 0x4
+
+/* BIST Control and Status Register1(for the retry buffer of PCI Express) */
+#define REG_BIST1_CTRL 0x1420
+#define BIST1_NOW 0x1
+#define BIST1_SRAM_FAIL 0x2
+#define BIST1_FUSE_FLAG 0x4
+
+/* SerDes Lock Detect Control and Status Register */
+#define REG_SERDES_LOCK 0x1424
+#define SERDES_LOCK_DETECT 0x1 /* SerDes lock detected. This signal
+ * comes from Analog SerDes */
+#define SERDES_LOCK_DETECT_EN 0x2 /* 1: Enable SerDes Lock detect function */
+#define SERDES_LOCK_STS_SELFB_PLL_SHIFT 0xE
+#define SERDES_LOCK_STS_SELFB_PLL_MASK 0x3
+#define SERDES_OVCLK_18_25 0x0
+#define SERDES_OVCLK_12_18 0x1
+#define SERDES_OVCLK_0_4 0x2
+#define SERDES_OVCLK_4_12 0x3
+#define SERDES_MAC_CLK_SLOWDOWN 0x20000
+#define SERDES_PYH_CLK_SLOWDOWN 0x40000
+
+/* MAC Control Register */
+#define REG_MAC_CTRL 0x1480
+#define MAC_CTRL_TX_EN 0x1
+#define MAC_CTRL_RX_EN 0x2
+#define MAC_CTRL_TX_FLOW 0x4
+#define MAC_CTRL_RX_FLOW 0x8
+#define MAC_CTRL_LOOPBACK 0x10
+#define MAC_CTRL_DUPLX 0x20
+#define MAC_CTRL_ADD_CRC 0x40
+#define MAC_CTRL_PAD 0x80
+#define MAC_CTRL_LENCHK 0x100
+#define MAC_CTRL_HUGE_EN 0x200
+#define MAC_CTRL_PRMLEN_SHIFT 10
+#define MAC_CTRL_PRMLEN_MASK 0xf
+#define MAC_CTRL_RMV_VLAN 0x4000
+#define MAC_CTRL_PROMIS_EN 0x8000
+#define MAC_CTRL_TX_PAUSE 0x10000
+#define MAC_CTRL_SCNT 0x20000
+#define MAC_CTRL_SRST_TX 0x40000
+#define MAC_CTRL_TX_SIMURST 0x80000
+#define MAC_CTRL_SPEED_SHIFT 20
+#define MAC_CTRL_SPEED_MASK 0x3
+#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000
+#define MAC_CTRL_TX_HUGE 0x800000
+#define MAC_CTRL_RX_CHKSUM_EN 0x1000000
+#define MAC_CTRL_MC_ALL_EN 0x2000000
+#define MAC_CTRL_BC_EN 0x4000000
+#define MAC_CTRL_DBG 0x8000000
+#define MAC_CTRL_SINGLE_PAUSE_EN 0x10000000
+#define MAC_CTRL_HASH_ALG_CRC32 0x20000000
+#define MAC_CTRL_SPEED_MODE_SW 0x40000000
+
+/* MAC IPG/IFG Control Register */
+#define REG_MAC_IPG_IFG 0x1484
+#define MAC_IPG_IFG_IPGT_SHIFT 0 /* Desired back to back
+ * inter-packet gap. The
+ * default is 96-bit time */
+#define MAC_IPG_IFG_IPGT_MASK 0x7f
+#define MAC_IPG_IFG_MIFG_SHIFT 8 /* Minimum number of IFG to
+ * enforce in between RX frames */
+#define MAC_IPG_IFG_MIFG_MASK 0xff /* Frame gap below such IFP is dropped */
+#define MAC_IPG_IFG_IPGR1_SHIFT 16 /* 64bit Carrier-Sense window */
+#define MAC_IPG_IFG_IPGR1_MASK 0x7f
+#define MAC_IPG_IFG_IPGR2_SHIFT 24 /* 96-bit IPG window */
+#define MAC_IPG_IFG_IPGR2_MASK 0x7f
+
+/* MAC STATION ADDRESS */
+#define REG_MAC_STA_ADDR 0x1488
+
+/* Hash table for multicast address */
+#define REG_RX_HASH_TABLE 0x1490
+
+/* MAC Half-Duplex Control Register */
+#define REG_MAC_HALF_DUPLX_CTRL 0x1498
+#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0 /* Collision Window */
+#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff
+#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12
+#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf
+#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000
+#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000
+#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000 /* No back-off on backpressure,
+ * immediately start the
+ * transmission after back pressure */
+#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000 /* 1: Alternative Binary Exponential Back-off Enabled */
+#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20 /* Maximum binary exponential number */
+#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf
+#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24 /* IPG to start JAM for collision based flow control in half-duplex */
+#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf /* mode. In unit of 8-bit time */
+
+/* Maximum Frame Length Control Register */
+#define REG_MTU 0x149c
+
+/* Wake-On-Lan control register */
+#define REG_WOL_CTRL 0x14a0
+#define WOL_PATTERN_EN 0x00000001
+#define WOL_PATTERN_PME_EN 0x00000002
+#define WOL_MAGIC_EN 0x00000004
+#define WOL_MAGIC_PME_EN 0x00000008
+#define WOL_LINK_CHG_EN 0x00000010
+#define WOL_LINK_CHG_PME_EN 0x00000020
+#define WOL_PATTERN_ST 0x00000100
+#define WOL_MAGIC_ST 0x00000200
+#define WOL_LINKCHG_ST 0x00000400
+#define WOL_CLK_SWITCH_EN 0x00008000
+#define WOL_PT0_EN 0x00010000
+#define WOL_PT1_EN 0x00020000
+#define WOL_PT2_EN 0x00040000
+#define WOL_PT3_EN 0x00080000
+#define WOL_PT4_EN 0x00100000
+#define WOL_PT5_EN 0x00200000
+#define WOL_PT6_EN 0x00400000
+
+/* WOL Length ( 2 DWORD ) */
+#define REG_WOL_PATTERN_LEN 0x14a4
+#define WOL_PT_LEN_MASK 0x7f
+#define WOL_PT0_LEN_SHIFT 0
+#define WOL_PT1_LEN_SHIFT 8
+#define WOL_PT2_LEN_SHIFT 16
+#define WOL_PT3_LEN_SHIFT 24
+#define WOL_PT4_LEN_SHIFT 0
+#define WOL_PT5_LEN_SHIFT 8
+#define WOL_PT6_LEN_SHIFT 16
+
+/* Internal SRAM Partition Register */
+#define RFDX_HEAD_ADDR_MASK 0x03FF
+#define RFDX_HARD_ADDR_SHIFT 0
+#define RFDX_TAIL_ADDR_MASK 0x03FF
+#define RFDX_TAIL_ADDR_SHIFT 16
+
+#define REG_SRAM_RFD0_INFO 0x1500
+#define REG_SRAM_RFD1_INFO 0x1504
+#define REG_SRAM_RFD2_INFO 0x1508
+#define REG_SRAM_RFD3_INFO 0x150C
+
+#define REG_RFD_NIC_LEN 0x1510 /* In 8-bytes */
+#define RFD_NIC_LEN_MASK 0x03FF
+
+#define REG_SRAM_TRD_ADDR 0x1518
+#define TPD_HEAD_ADDR_MASK 0x03FF
+#define TPD_HEAD_ADDR_SHIFT 0
+#define TPD_TAIL_ADDR_MASK 0x03FF
+#define TPD_TAIL_ADDR_SHIFT 16
+
+#define REG_SRAM_TRD_LEN 0x151C /* In 8-bytes */
+#define TPD_NIC_LEN_MASK 0x03FF
+
+#define REG_SRAM_RXF_ADDR 0x1520
+#define REG_SRAM_RXF_LEN 0x1524
+#define REG_SRAM_TXF_ADDR 0x1528
+#define REG_SRAM_TXF_LEN 0x152C
+#define REG_SRAM_TCPH_ADDR 0x1530
+#define REG_SRAM_PKTH_ADDR 0x1532
+
+/*
+ * Load Ptr Register
+ * Software sets this bit after the initialization of the head and tail */
+#define REG_LOAD_PTR 0x1534
+
+/*
+ * addresses of all descriptors, as well as the following descriptor
+ * control register, which triggers each function block to load the head
+ * pointer to prepare for the operation. This bit is then self-cleared
+ * after one cycle.
+ */
+#define REG_RX_BASE_ADDR_HI 0x1540
+#define REG_TX_BASE_ADDR_HI 0x1544
+#define REG_SMB_BASE_ADDR_HI 0x1548
+#define REG_SMB_BASE_ADDR_LO 0x154C
+#define REG_RFD0_HEAD_ADDR_LO 0x1550
+#define REG_RFD1_HEAD_ADDR_LO 0x1554
+#define REG_RFD2_HEAD_ADDR_LO 0x1558
+#define REG_RFD3_HEAD_ADDR_LO 0x155C
+#define REG_RFD_RING_SIZE 0x1560
+#define RFD_RING_SIZE_MASK 0x0FFF
+#define REG_RX_BUF_SIZE 0x1564
+#define RX_BUF_SIZE_MASK 0xFFFF
+#define REG_RRD0_HEAD_ADDR_LO 0x1568
+#define REG_RRD1_HEAD_ADDR_LO 0x156C
+#define REG_RRD2_HEAD_ADDR_LO 0x1570
+#define REG_RRD3_HEAD_ADDR_LO 0x1574
+#define REG_RRD_RING_SIZE 0x1578
+#define RRD_RING_SIZE_MASK 0x0FFF
+#define REG_HTPD_HEAD_ADDR_LO 0x157C
+#define REG_NTPD_HEAD_ADDR_LO 0x1580
+#define REG_TPD_RING_SIZE 0x1584
+#define TPD_RING_SIZE_MASK 0xFFFF
+#define REG_CMB_BASE_ADDR_LO 0x1588
+
+/* RSS about */
+#define REG_RSS_KEY0 0x14B0
+#define REG_RSS_KEY1 0x14B4
+#define REG_RSS_KEY2 0x14B8
+#define REG_RSS_KEY3 0x14BC
+#define REG_RSS_KEY4 0x14C0
+#define REG_RSS_KEY5 0x14C4
+#define REG_RSS_KEY6 0x14C8
+#define REG_RSS_KEY7 0x14CC
+#define REG_RSS_KEY8 0x14D0
+#define REG_RSS_KEY9 0x14D4
+#define REG_IDT_TABLE0 0x14E0
+#define REG_IDT_TABLE1 0x14E4
+#define REG_IDT_TABLE2 0x14E8
+#define REG_IDT_TABLE3 0x14EC
+#define REG_IDT_TABLE4 0x14F0
+#define REG_IDT_TABLE5 0x14F4
+#define REG_IDT_TABLE6 0x14F8
+#define REG_IDT_TABLE7 0x14FC
+#define REG_IDT_TABLE REG_IDT_TABLE0
+#define REG_RSS_HASH_VALUE 0x15B0
+#define REG_RSS_HASH_FLAG 0x15B4
+#define REG_BASE_CPU_NUMBER 0x15B8
+
+/* TXQ Control Register */
+#define REG_TXQ_CTRL 0x1590
+#define TXQ_NUM_TPD_BURST_MASK 0xF
+#define TXQ_NUM_TPD_BURST_SHIFT 0
+#define TXQ_CTRL_IP_OPTION_EN 0x10
+#define TXQ_CTRL_EN 0x20
+#define TXQ_CTRL_ENH_MODE 0x40
+#define TXQ_CTRL_LS_8023_EN 0x80
+#define TXQ_TXF_BURST_NUM_SHIFT 16
+#define TXQ_TXF_BURST_NUM_MASK 0xFFFF
+
+/* Jumbo packet Threshold for task offload */
+#define REG_TX_TSO_OFFLOAD_THRESH 0x1594 /* In 8-bytes */
+#define TX_TSO_OFFLOAD_THRESH_MASK 0x07FF
+
+#define REG_TXF_WATER_MARK 0x1598 /* In 8-bytes */
+#define TXF_WATER_MARK_MASK 0x0FFF
+#define TXF_LOW_WATER_MARK_SHIFT 0
+#define TXF_HIGH_WATER_MARK_SHIFT 16
+#define TXQ_CTRL_BURST_MODE_EN 0x80000000
+
+#define REG_THRUPUT_MON_CTRL 0x159C
+#define THRUPUT_MON_RATE_MASK 0x3
+#define THRUPUT_MON_RATE_SHIFT 0
+#define THRUPUT_MON_EN 0x80
+
+/* RXQ Control Register */
+#define REG_RXQ_CTRL 0x15A0
+#define ASPM_THRUPUT_LIMIT_MASK 0x3
+#define ASPM_THRUPUT_LIMIT_SHIFT 0
+#define ASPM_THRUPUT_LIMIT_NO 0x00
+#define ASPM_THRUPUT_LIMIT_1M 0x01
+#define ASPM_THRUPUT_LIMIT_10M 0x02
+#define ASPM_THRUPUT_LIMIT_100M 0x04
+#define RXQ1_CTRL_EN 0x10
+#define RXQ2_CTRL_EN 0x20
+#define RXQ3_CTRL_EN 0x40
+#define IPV6_CHKSUM_CTRL_EN 0x80
+#define RSS_HASH_BITS_MASK 0x00FF
+#define RSS_HASH_BITS_SHIFT 8
+#define RSS_HASH_IPV4 0x10000
+#define RSS_HASH_IPV4_TCP 0x20000
+#define RSS_HASH_IPV6 0x40000
+#define RSS_HASH_IPV6_TCP 0x80000
+#define RXQ_RFD_BURST_NUM_MASK 0x003F
+#define RXQ_RFD_BURST_NUM_SHIFT 20
+#define RSS_MODE_MASK 0x0003
+#define RSS_MODE_SHIFT 26
+#define RSS_NIP_QUEUE_SEL_MASK 0x1
+#define RSS_NIP_QUEUE_SEL_SHIFT 28
+#define RRS_HASH_CTRL_EN 0x20000000
+#define RX_CUT_THRU_EN 0x40000000
+#define RXQ_CTRL_EN 0x80000000
+
+#define REG_RFD_FREE_THRESH 0x15A4
+#define RFD_FREE_THRESH_MASK 0x003F
+#define RFD_FREE_HI_THRESH_SHIFT 0
+#define RFD_FREE_LO_THRESH_SHIFT 6
+
+/* RXF flow control register */
+#define REG_RXQ_RXF_PAUSE_THRESH 0x15A8
+#define RXQ_RXF_PAUSE_TH_HI_SHIFT 0
+#define RXQ_RXF_PAUSE_TH_HI_MASK 0x0FFF
+#define RXQ_RXF_PAUSE_TH_LO_SHIFT 16
+#define RXQ_RXF_PAUSE_TH_LO_MASK 0x0FFF
+
+#define REG_RXD_DMA_CTRL 0x15AC
+#define RXD_DMA_THRESH_MASK 0x0FFF /* In 8-bytes */
+#define RXD_DMA_THRESH_SHIFT 0
+#define RXD_DMA_DOWN_TIMER_MASK 0xFFFF
+#define RXD_DMA_DOWN_TIMER_SHIFT 16
+
+/* DMA Engine Control Register */
+#define REG_DMA_CTRL 0x15C0
+#define DMA_CTRL_DMAR_IN_ORDER 0x1
+#define DMA_CTRL_DMAR_ENH_ORDER 0x2
+#define DMA_CTRL_DMAR_OUT_ORDER 0x4
+#define DMA_CTRL_RCB_VALUE 0x8
+#define DMA_CTRL_DMAR_BURST_LEN_MASK 0x0007
+#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4
+#define DMA_CTRL_DMAW_BURST_LEN_MASK 0x0007
+#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7
+#define DMA_CTRL_DMAR_REQ_PRI 0x400
+#define DMA_CTRL_DMAR_DLY_CNT_MASK 0x001F
+#define DMA_CTRL_DMAR_DLY_CNT_SHIFT 11
+#define DMA_CTRL_DMAW_DLY_CNT_MASK 0x000F
+#define DMA_CTRL_DMAW_DLY_CNT_SHIFT 16
+#define DMA_CTRL_CMB_EN 0x100000
+#define DMA_CTRL_SMB_EN 0x200000
+#define DMA_CTRL_CMB_NOW 0x400000
+#define MAC_CTRL_SMB_DIS 0x1000000
+#define DMA_CTRL_SMB_NOW 0x80000000
+
+/* CMB/SMB Control Register */
+#define REG_SMB_STAT_TIMER 0x15C4 /* 2us resolution */
+#define SMB_STAT_TIMER_MASK 0xFFFFFF
+#define REG_CMB_TPD_THRESH 0x15C8
+#define CMB_TPD_THRESH_MASK 0xFFFF
+#define REG_CMB_TX_TIMER 0x15CC /* 2us resolution */
+#define CMB_TX_TIMER_MASK 0xFFFF
+
+/* Mail box */
+#define MB_RFDX_PROD_IDX_MASK 0xFFFF
+#define REG_MB_RFD0_PROD_IDX 0x15E0
+#define REG_MB_RFD1_PROD_IDX 0x15E4
+#define REG_MB_RFD2_PROD_IDX 0x15E8
+#define REG_MB_RFD3_PROD_IDX 0x15EC
+
+#define MB_PRIO_PROD_IDX_MASK 0xFFFF
+#define REG_MB_PRIO_PROD_IDX 0x15F0
+#define MB_HTPD_PROD_IDX_SHIFT 0
+#define MB_NTPD_PROD_IDX_SHIFT 16
+
+#define MB_PRIO_CONS_IDX_MASK 0xFFFF
+#define REG_MB_PRIO_CONS_IDX 0x15F4
+#define MB_HTPD_CONS_IDX_SHIFT 0
+#define MB_NTPD_CONS_IDX_SHIFT 16
+
+#define REG_MB_RFD01_CONS_IDX 0x15F8
+#define MB_RFD0_CONS_IDX_MASK 0x0000FFFF
+#define MB_RFD1_CONS_IDX_MASK 0xFFFF0000
+#define REG_MB_RFD23_CONS_IDX 0x15FC
+#define MB_RFD2_CONS_IDX_MASK 0x0000FFFF
+#define MB_RFD3_CONS_IDX_MASK 0xFFFF0000
+
+/* Interrupt Status Register */
+#define REG_ISR 0x1600
+#define ISR_SMB 0x00000001
+#define ISR_TIMER 0x00000002
+/*
+ * Software manual interrupt, for debug. Set when SW_MAN_INT_EN is set
+ * in Table 51 Selene Master Control Register (Offset 0x1400).
+ */
+#define ISR_MANUAL 0x00000004
+#define ISR_HW_RXF_OV 0x00000008 /* RXF overflow interrupt */
+#define ISR_RFD0_UR 0x00000010 /* RFD0 under run */
+#define ISR_RFD1_UR 0x00000020
+#define ISR_RFD2_UR 0x00000040
+#define ISR_RFD3_UR 0x00000080
+#define ISR_TXF_UR 0x00000100
+#define ISR_DMAR_TO_RST 0x00000200
+#define ISR_DMAW_TO_RST 0x00000400
+#define ISR_TX_CREDIT 0x00000800
+#define ISR_GPHY 0x00001000
+/* GPHY low power state interrupt */
+#define ISR_GPHY_LPW 0x00002000
+#define ISR_TXQ_TO_RST 0x00004000
+#define ISR_TX_PKT 0x00008000
+#define ISR_RX_PKT_0 0x00010000
+#define ISR_RX_PKT_1 0x00020000
+#define ISR_RX_PKT_2 0x00040000
+#define ISR_RX_PKT_3 0x00080000
+#define ISR_MAC_RX 0x00100000
+#define ISR_MAC_TX 0x00200000
+#define ISR_UR_DETECTED 0x00400000
+#define ISR_FERR_DETECTED 0x00800000
+#define ISR_NFERR_DETECTED 0x01000000
+#define ISR_CERR_DETECTED 0x02000000
+#define ISR_PHY_LINKDOWN 0x04000000
+#define ISR_DIS_INT 0x80000000
+
+/* Interrupt Mask Register */
+#define REG_IMR 0x1604
+
+#define IMR_NORMAL_MASK (\
+ ISR_MANUAL |\
+ ISR_HW_RXF_OV |\
+ ISR_RFD0_UR |\
+ ISR_TXF_UR |\
+ ISR_DMAR_TO_RST |\
+ ISR_TXQ_TO_RST |\
+ ISR_DMAW_TO_RST |\
+ ISR_GPHY |\
+ ISR_TX_PKT |\
+ ISR_RX_PKT_0 |\
+ ISR_GPHY_LPW |\
+ ISR_PHY_LINKDOWN)
+
+#define ISR_RX_PKT (\
+ ISR_RX_PKT_0 |\
+ ISR_RX_PKT_1 |\
+ ISR_RX_PKT_2 |\
+ ISR_RX_PKT_3)
+
+#define ISR_OVER (\
+ ISR_RFD0_UR |\
+ ISR_RFD1_UR |\
+ ISR_RFD2_UR |\
+ ISR_RFD3_UR |\
+ ISR_HW_RXF_OV |\
+ ISR_TXF_UR)
+
+#define ISR_ERROR (\
+ ISR_DMAR_TO_RST |\
+ ISR_TXQ_TO_RST |\
+ ISR_DMAW_TO_RST |\
+ ISR_PHY_LINKDOWN)
+
+#define REG_INT_RETRIG_TIMER 0x1608
+#define INT_RETRIG_TIMER_MASK 0xFFFF
+
+#define REG_HDS_CTRL 0x160C
+#define HDS_CTRL_EN 0x0001
+#define HDS_CTRL_BACKFILLSIZE_SHIFT 8
+#define HDS_CTRL_BACKFILLSIZE_MASK 0x0FFF
+#define HDS_CTRL_MAX_HDRSIZE_SHIFT 20
+#define HDS_CTRL_MAC_HDRSIZE_MASK 0x0FFF
+
+#define REG_MAC_RX_STATUS_BIN 0x1700
+#define REG_MAC_RX_STATUS_END 0x175c
+#define REG_MAC_TX_STATUS_BIN 0x1760
+#define REG_MAC_TX_STATUS_END 0x17c0
+
+#define REG_CLK_GATING_CTRL 0x1814
+#define CLK_GATING_DMAW_EN 0x0001
+#define CLK_GATING_DMAR_EN 0x0002
+#define CLK_GATING_TXQ_EN 0x0004
+#define CLK_GATING_RXQ_EN 0x0008
+#define CLK_GATING_TXMAC_EN 0x0010
+#define CLK_GATING_RXMAC_EN 0x0020
+
+#define CLK_GATING_EN_ALL (CLK_GATING_DMAW_EN |\
+ CLK_GATING_DMAR_EN |\
+ CLK_GATING_TXQ_EN |\
+ CLK_GATING_RXQ_EN |\
+ CLK_GATING_TXMAC_EN|\
+ CLK_GATING_RXMAC_EN)
+
+/* DEBUG ADDR */
+#define REG_DEBUG_DATA0 0x1900
+#define REG_DEBUG_DATA1 0x1904
+
+#define L1D_MPW_PHYID1 0xD01C /* V7 */
+#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */
+#define L1D_MPW_PHYID3 0xD01E /* V8 */
+
+
+/* Autoneg Advertisement Register */
+#define ADVERTISE_DEFAULT_CAP \
+ (ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)
+
+/* 1000BASE-T Control Register */
+#define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */
+
+#define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */
+#define GIGA_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value 0=Automatic Master/Slave config */
+#define GIGA_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define GIGA_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
+#define GIGA_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
+#define GIGA_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
+#define GIGA_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
+#define GIGA_CR_1000T_SPEED_MASK 0x0300
+#define GIGA_CR_1000T_DEFAULT_CAP 0x0300
+
+/* PHY Specific Status Register */
+#define MII_GIGA_PSSR 0x11
+#define GIGA_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
+#define GIGA_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
+#define GIGA_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
+#define GIGA_PSSR_10MBS 0x0000 /* 00=10Mbs */
+#define GIGA_PSSR_100MBS 0x4000 /* 01=100Mbs */
+#define GIGA_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
+
+/* PHY Interrupt Enable Register */
+#define MII_IER 0x12
+#define IER_LINK_UP 0x0400
+#define IER_LINK_DOWN 0x0800
+
+/* PHY Interrupt Status Register */
+#define MII_ISR 0x13
+#define ISR_LINK_UP 0x0400
+#define ISR_LINK_DOWN 0x0800
+
+/* Cable-Detect-Test Control Register */
+#define MII_CDTC 0x16
+#define CDTC_EN_OFF 0 /* sc */
+#define CDTC_EN_BITS 1
+#define CDTC_PAIR_OFF 8
+#define CDTC_PAIR_BIT 2
+
+/* Cable-Detect-Test Status Register */
+#define MII_CDTS 0x1C
+#define CDTS_STATUS_OFF 8
+#define CDTS_STATUS_BITS 2
+#define CDTS_STATUS_NORMAL 0
+#define CDTS_STATUS_SHORT 1
+#define CDTS_STATUS_OPEN 2
+#define CDTS_STATUS_INVALID 3
+
+#define MII_DBG_ADDR 0x1D
+#define MII_DBG_DATA 0x1E
+
+#define MII_ANA_CTRL_0 0x0
+#define ANA_RESTART_CAL 0x0001
+#define ANA_MANUL_SWICH_ON_SHIFT 0x1
+#define ANA_MANUL_SWICH_ON_MASK 0xF
+#define ANA_MAN_ENABLE 0x0020
+#define ANA_SEL_HSP 0x0040
+#define ANA_EN_HB 0x0080
+#define ANA_EN_HBIAS 0x0100
+#define ANA_OEN_125M 0x0200
+#define ANA_EN_LCKDT 0x0400
+#define ANA_LCKDT_PHY 0x0800
+#define ANA_AFE_MODE 0x1000
+#define ANA_VCO_SLOW 0x2000
+#define ANA_VCO_FAST 0x4000
+#define ANA_SEL_CLK125M_DSP 0x8000
+
+#define MII_ANA_CTRL_4 0x4
+#define ANA_IECHO_ADJ_MASK 0xF
+#define ANA_IECHO_ADJ_3_SHIFT 0
+#define ANA_IECHO_ADJ_2_SHIFT 4
+#define ANA_IECHO_ADJ_1_SHIFT 8
+#define ANA_IECHO_ADJ_0_SHIFT 12
+
+#define MII_ANA_CTRL_5 0x5
+#define ANA_SERDES_CDR_BW_SHIFT 0
+#define ANA_SERDES_CDR_BW_MASK 0x3
+#define ANA_MS_PAD_DBG 0x0004
+#define ANA_SPEEDUP_DBG 0x0008
+#define ANA_SERDES_TH_LOS_SHIFT 4
+#define ANA_SERDES_TH_LOS_MASK 0x3
+#define ANA_SERDES_EN_DEEM 0x0040
+#define ANA_SERDES_TXELECIDLE 0x0080
+#define ANA_SERDES_BEACON 0x0100
+#define ANA_SERDES_HALFTXDR 0x0200
+#define ANA_SERDES_SEL_HSP 0x0400
+#define ANA_SERDES_EN_PLL 0x0800
+#define ANA_SERDES_EN 0x1000
+#define ANA_SERDES_EN_LCKDT 0x2000
+
+#define MII_ANA_CTRL_11 0xB
+#define ANA_PS_HIB_EN 0x8000
+
+#define MII_ANA_CTRL_18 0x12
+#define ANA_TEST_MODE_10BT_01SHIFT 0
+#define ANA_TEST_MODE_10BT_01MASK 0x3
+#define ANA_LOOP_SEL_10BT 0x0004
+#define ANA_RGMII_MODE_SW 0x0008
+#define ANA_EN_LONGECABLE 0x0010
+#define ANA_TEST_MODE_10BT_2 0x0020
+#define ANA_EN_10BT_IDLE 0x0400
+#define ANA_EN_MASK_TB 0x0800
+#define ANA_TRIGGER_SEL_TIMER_SHIFT 12
+#define ANA_TRIGGER_SEL_TIMER_MASK 0x3
+#define ANA_INTERVAL_SEL_TIMER_SHIFT 14
+#define ANA_INTERVAL_SEL_TIMER_MASK 0x3
+
+#define MII_ANA_CTRL_41 0x29
+#define ANA_TOP_PS_EN 0x8000
+
+#define MII_ANA_CTRL_54 0x36
+#define ANA_LONG_CABLE_TH_100_SHIFT 0
+#define ANA_LONG_CABLE_TH_100_MASK 0x3F
+#define ANA_DESERVED 0x0040
+#define ANA_EN_LIT_CH 0x0080
+#define ANA_SHORT_CABLE_TH_100_SHIFT 8
+#define ANA_SHORT_CABLE_TH_100_MASK 0x3F
+#define ANA_BP_BAD_LINK_ACCUM 0x4000
+#define ANA_BP_SMALL_BW 0x8000
+
+#endif /*_ATL1C_HW_H_*/
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
new file mode 100644
index 000000000000..12a0b30319db
--- /dev/null
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -0,0 +1,2933 @@
+/*
+ * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved.
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include "atl1c.h"
+
+#define ATL1C_DRV_VERSION "1.0.1.0-NAPI"
+char atl1c_driver_name[] = "atl1c";
+char atl1c_driver_version[] = ATL1C_DRV_VERSION;
+#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062
+#define PCI_DEVICE_ID_ATTANSIC_L1C 0x1063
+#define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */
+#define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */
+#define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */
+#define PCI_DEVICE_ID_ATHEROS_L1D_2_0 0x1083 /* AR8151 v2.0 Gigabit 1000 */
+#define L2CB_V10 0xc0
+#define L2CB_V11 0xc1
+
+/*
+ * atl1c_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)},
+ /* required last entry */
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl);
+
+MODULE_AUTHOR("Jie Yang <jie.yang@atheros.com>");
+MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ATL1C_DRV_VERSION);
+
+static int atl1c_stop_mac(struct atl1c_hw *hw);
+static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw);
+static void atl1c_enable_tx_ctrl(struct atl1c_hw *hw);
+static void atl1c_disable_l0s_l1(struct atl1c_hw *hw);
+static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup);
+static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter);
+static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
+ int *work_done, int work_to_do);
+static int atl1c_up(struct atl1c_adapter *adapter);
+static void atl1c_down(struct atl1c_adapter *adapter);
+
+static const u16 atl1c_pay_load_size[] = {
+ 128, 256, 512, 1024, 2048, 4096,
+};
+
+static const u16 atl1c_rfd_prod_idx_regs[AT_MAX_RECEIVE_QUEUE] =
+{
+ REG_MB_RFD0_PROD_IDX,
+ REG_MB_RFD1_PROD_IDX,
+ REG_MB_RFD2_PROD_IDX,
+ REG_MB_RFD3_PROD_IDX
+};
+
+static const u16 atl1c_rfd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
+{
+ REG_RFD0_HEAD_ADDR_LO,
+ REG_RFD1_HEAD_ADDR_LO,
+ REG_RFD2_HEAD_ADDR_LO,
+ REG_RFD3_HEAD_ADDR_LO
+};
+
+static const u16 atl1c_rrd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
+{
+ REG_RRD0_HEAD_ADDR_LO,
+ REG_RRD1_HEAD_ADDR_LO,
+ REG_RRD2_HEAD_ADDR_LO,
+ REG_RRD3_HEAD_ADDR_LO
+};
+
+static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
+static void atl1c_pcie_patch(struct atl1c_hw *hw)
+{
+ u32 data;
+
+ AT_READ_REG(hw, REG_PCIE_PHYMISC, &data);
+ data |= PCIE_PHYMISC_FORCE_RCV_DET;
+ AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data);
+
+ if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) {
+ AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data);
+
+ data &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK <<
+ PCIE_PHYMISC2_SERDES_CDR_SHIFT);
+ data |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
+ data &= ~(PCIE_PHYMISC2_SERDES_TH_MASK <<
+ PCIE_PHYMISC2_SERDES_TH_SHIFT);
+ data |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
+ AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data);
+ }
+}
+
+/* FIXME: no need any more ? */
+/*
+ * atl1c_init_pcie - init PCIE module
+ */
+static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
+{
+ u32 data;
+ u32 pci_cmd;
+ struct pci_dev *pdev = hw->adapter->pdev;
+
+ AT_READ_REG(hw, PCI_COMMAND, &pci_cmd);
+ pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
+ PCI_COMMAND_IO);
+ AT_WRITE_REG(hw, PCI_COMMAND, pci_cmd);
+
+ /*
+ * Clear any PowerSaveing Settings
+ */
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ /*
+ * Mask some pcie error bits
+ */
+ AT_READ_REG(hw, REG_PCIE_UC_SEVERITY, &data);
+ data &= ~PCIE_UC_SERVRITY_DLP;
+ data &= ~PCIE_UC_SERVRITY_FCP;
+ AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data);
+
+ AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data);
+ data &= ~LTSSM_ID_EN_WRO;
+ AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, data);
+
+ atl1c_pcie_patch(hw);
+ if (flag & ATL1C_PCIE_L0S_L1_DISABLE)
+ atl1c_disable_l0s_l1(hw);
+ if (flag & ATL1C_PCIE_PHY_RESET)
+ AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT);
+ else
+ AT_WRITE_REG(hw, REG_GPHY_CTRL,
+ GPHY_CTRL_DEFAULT | GPHY_CTRL_EXT_RESET);
+
+ msleep(5);
+}
+
+/*
+ * atl1c_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ */
+static inline void atl1c_irq_enable(struct atl1c_adapter *adapter)
+{
+ if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
+ AT_WRITE_REG(&adapter->hw, REG_ISR, 0x7FFFFFFF);
+ AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
+ AT_WRITE_FLUSH(&adapter->hw);
+ }
+}
+
+/*
+ * atl1c_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ */
+static inline void atl1c_irq_disable(struct atl1c_adapter *adapter)
+{
+ atomic_inc(&adapter->irq_sem);
+ AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
+ AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT);
+ AT_WRITE_FLUSH(&adapter->hw);
+ synchronize_irq(adapter->pdev->irq);
+}
+
+/*
+ * atl1c_irq_reset - reset interrupt confiure on the NIC
+ * @adapter: board private structure
+ */
+static inline void atl1c_irq_reset(struct atl1c_adapter *adapter)
+{
+ atomic_set(&adapter->irq_sem, 1);
+ atl1c_irq_enable(adapter);
+}
+
+/*
+ * atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads
+ * of the idle status register until the device is actually idle
+ */
+static u32 atl1c_wait_until_idle(struct atl1c_hw *hw)
+{
+ int timeout;
+ u32 data;
+
+ for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
+ AT_READ_REG(hw, REG_IDLE_STATUS, &data);
+ if ((data & IDLE_STATUS_MASK) == 0)
+ return 0;
+ msleep(1);
+ }
+ return data;
+}
+
+/*
+ * atl1c_phy_config - Timer Call-back
+ * @data: pointer to netdev cast into an unsigned long
+ */
+static void atl1c_phy_config(unsigned long data)
+{
+ struct atl1c_adapter *adapter = (struct atl1c_adapter *) data;
+ struct atl1c_hw *hw = &adapter->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->mdio_lock, flags);
+ atl1c_restart_autoneg(hw);
+ spin_unlock_irqrestore(&adapter->mdio_lock, flags);
+}
+
+void atl1c_reinit_locked(struct atl1c_adapter *adapter)
+{
+ WARN_ON(in_interrupt());
+ atl1c_down(adapter);
+ atl1c_up(adapter);
+ clear_bit(__AT_RESETTING, &adapter->flags);
+}
+
+static void atl1c_check_link_status(struct atl1c_adapter *adapter)
+{
+ struct atl1c_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+ unsigned long flags;
+ u16 speed, duplex, phy_data;
+
+ spin_lock_irqsave(&adapter->mdio_lock, flags);
+ /* MII_BMSR must read twise */
+ atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+ atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+ spin_unlock_irqrestore(&adapter->mdio_lock, flags);
+
+ if ((phy_data & BMSR_LSTATUS) == 0) {
+ /* link down */
+ hw->hibernate = true;
+ if (atl1c_stop_mac(hw) != 0)
+ if (netif_msg_hw(adapter))
+ dev_warn(&pdev->dev, "stop mac failed\n");
+ atl1c_set_aspm(hw, false);
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ atl1c_phy_reset(hw);
+ atl1c_phy_init(&adapter->hw);
+ } else {
+ /* Link Up */
+ hw->hibernate = false;
+ spin_lock_irqsave(&adapter->mdio_lock, flags);
+ err = atl1c_get_speed_and_duplex(hw, &speed, &duplex);
+ spin_unlock_irqrestore(&adapter->mdio_lock, flags);
+ if (unlikely(err))
+ return;
+ /* link result is our setting */
+ if (adapter->link_speed != speed ||
+ adapter->link_duplex != duplex) {
+ adapter->link_speed = speed;
+ adapter->link_duplex = duplex;
+ atl1c_set_aspm(hw, true);
+ atl1c_enable_tx_ctrl(hw);
+ atl1c_enable_rx_ctrl(hw);
+ atl1c_setup_mac_ctrl(adapter);
+ if (netif_msg_link(adapter))
+ dev_info(&pdev->dev,
+ "%s: %s NIC Link is Up<%d Mbps %s>\n",
+ atl1c_driver_name, netdev->name,
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "Full Duplex" : "Half Duplex");
+ }
+ if (!netif_carrier_ok(netdev))
+ netif_carrier_on(netdev);
+ }
+}
+
+static void atl1c_link_chg_event(struct atl1c_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ u16 phy_data;
+ u16 link_up;
+
+ spin_lock(&adapter->mdio_lock);
+ atl1c_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+ atl1c_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+ spin_unlock(&adapter->mdio_lock);
+ link_up = phy_data & BMSR_LSTATUS;
+ /* notify upper layer link down ASAP */
+ if (!link_up) {
+ if (netif_carrier_ok(netdev)) {
+ /* old link state: Up */
+ netif_carrier_off(netdev);
+ if (netif_msg_link(adapter))
+ dev_info(&pdev->dev,
+ "%s: %s NIC Link is Down\n",
+ atl1c_driver_name, netdev->name);
+ adapter->link_speed = SPEED_0;
+ }
+ }
+
+ set_bit(ATL1C_WORK_EVENT_LINK_CHANGE, &adapter->work_event);
+ schedule_work(&adapter->common_task);
+}
+
+static void atl1c_common_task(struct work_struct *work)
+{
+ struct atl1c_adapter *adapter;
+ struct net_device *netdev;
+
+ adapter = container_of(work, struct atl1c_adapter, common_task);
+ netdev = adapter->netdev;
+
+ if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) {
+ netif_device_detach(netdev);
+ atl1c_down(adapter);
+ atl1c_up(adapter);
+ netif_device_attach(netdev);
+ }
+
+ if (test_and_clear_bit(ATL1C_WORK_EVENT_LINK_CHANGE,
+ &adapter->work_event))
+ atl1c_check_link_status(adapter);
+}
+
+
+static void atl1c_del_timer(struct atl1c_adapter *adapter)
+{
+ del_timer_sync(&adapter->phy_config_timer);
+}
+
+
+/*
+ * atl1c_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ */
+static void atl1c_tx_timeout(struct net_device *netdev)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event);
+ schedule_work(&adapter->common_task);
+}
+
+/*
+ * atl1c_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ */
+static void atl1c_set_multi(struct net_device *netdev)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+ struct atl1c_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u32 mac_ctrl_data;
+ u32 hash_value;
+
+ /* Check for Promiscuous and All Multicast modes */
+ AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data);
+
+ if (netdev->flags & IFF_PROMISC) {
+ mac_ctrl_data |= MAC_CTRL_PROMIS_EN;
+ } else if (netdev->flags & IFF_ALLMULTI) {
+ mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
+ mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN;
+ } else {
+ mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
+ }
+
+ AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
+
+ /* clear the old settings from the multicast hash table */
+ AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
+ AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
+
+ /* comoute mc addresses' hash value ,and put it into hash table */
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash_value = atl1c_hash_mc_addr(hw, ha->addr);
+ atl1c_hash_set(hw, hash_value);
+ }
+}
+
+static void __atl1c_vlan_mode(u32 features, u32 *mac_ctrl_data)
+{
+ if (features & NETIF_F_HW_VLAN_RX) {
+ /* enable VLAN tag insert/strip */
+ *mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
+ } else {
+ /* disable VLAN tag insert/strip */
+ *mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN;
+ }
+}
+
+static void atl1c_vlan_mode(struct net_device *netdev, u32 features)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+ u32 mac_ctrl_data = 0;
+
+ if (netif_msg_pktdata(adapter))
+ dev_dbg(&pdev->dev, "atl1c_vlan_mode\n");
+
+ atl1c_irq_disable(adapter);
+ AT_READ_REG(&adapter->hw, REG_MAC_CTRL, &mac_ctrl_data);
+ __atl1c_vlan_mode(features, &mac_ctrl_data);
+ AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
+ atl1c_irq_enable(adapter);
+}
+
+static void atl1c_restore_vlan(struct atl1c_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ if (netif_msg_pktdata(adapter))
+ dev_dbg(&pdev->dev, "atl1c_restore_vlan\n");
+ atl1c_vlan_mode(adapter->netdev, adapter->netdev->features);
+}
+
+/*
+ * atl1c_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
+
+ atl1c_hw_set_mac_addr(&adapter->hw);
+
+ return 0;
+}
+
+static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
+ struct net_device *dev)
+{
+ int mtu = dev->mtu;
+
+ adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
+ roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
+}
+
+static u32 atl1c_fix_features(struct net_device *netdev, u32 features)
+{
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_RX)
+ features |= NETIF_F_HW_VLAN_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_TX;
+
+ if (netdev->mtu > MAX_TSO_FRAME_SIZE)
+ features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+
+ return features;
+}
+
+static int atl1c_set_features(struct net_device *netdev, u32 features)
+{
+ u32 changed = netdev->features ^ features;
+
+ if (changed & NETIF_F_HW_VLAN_RX)
+ atl1c_vlan_mode(netdev, features);
+
+ return 0;
+}
+
+/*
+ * atl1c_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+ int old_mtu = netdev->mtu;
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
+ (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+ if (netif_msg_link(adapter))
+ dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
+ return -EINVAL;
+ }
+ /* set MTU */
+ if (old_mtu != new_mtu && netif_running(netdev)) {
+ while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
+ msleep(1);
+ netdev->mtu = new_mtu;
+ adapter->hw.max_frame_size = new_mtu;
+ atl1c_set_rxbufsize(adapter, netdev);
+ atl1c_down(adapter);
+ netdev_update_features(netdev);
+ atl1c_up(adapter);
+ clear_bit(__AT_RESETTING, &adapter->flags);
+ if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) {
+ u32 phy_data;
+
+ AT_READ_REG(&adapter->hw, 0x1414, &phy_data);
+ phy_data |= 0x10000000;
+ AT_WRITE_REG(&adapter->hw, 0x1414, phy_data);
+ }
+
+ }
+ return 0;
+}
+
+/*
+ * caller should hold mdio_lock
+ */
+static int atl1c_mdio_read(struct net_device *netdev, int phy_id, int reg_num)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+ u16 result;
+
+ atl1c_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result);
+ return result;
+}
+
+static void atl1c_mdio_write(struct net_device *netdev, int phy_id,
+ int reg_num, int val)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+
+ atl1c_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val);
+}
+
+/*
+ * atl1c_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ */
+static int atl1c_mii_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+ struct mii_ioctl_data *data = if_mii(ifr);
+ unsigned long flags;
+ int retval = 0;
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ spin_lock_irqsave(&adapter->mdio_lock, flags);
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = 0;
+ break;
+
+ case SIOCGMIIREG:
+ if (atl1c_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
+ &data->val_out)) {
+ retval = -EIO;
+ goto out;
+ }
+ break;
+
+ case SIOCSMIIREG:
+ if (data->reg_num & ~(0x1F)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ dev_dbg(&pdev->dev, "<atl1c_mii_ioctl> write %x %x",
+ data->reg_num, data->val_in);
+ if (atl1c_write_phy_reg(&adapter->hw,
+ data->reg_num, data->val_in)) {
+ retval = -EIO;
+ goto out;
+ }
+ break;
+
+ default:
+ retval = -EOPNOTSUPP;
+ break;
+ }
+out:
+ spin_unlock_irqrestore(&adapter->mdio_lock, flags);
+ return retval;
+}
+
+/*
+ * atl1c_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ */
+static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return atl1c_mii_ioctl(netdev, ifr, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/*
+ * atl1c_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ */
+static int __devinit atl1c_alloc_queues(struct atl1c_adapter *adapter)
+{
+ return 0;
+}
+
+static void atl1c_set_mac_type(struct atl1c_hw *hw)
+{
+ switch (hw->device_id) {
+ case PCI_DEVICE_ID_ATTANSIC_L2C:
+ hw->nic_type = athr_l2c;
+ break;
+ case PCI_DEVICE_ID_ATTANSIC_L1C:
+ hw->nic_type = athr_l1c;
+ break;
+ case PCI_DEVICE_ID_ATHEROS_L2C_B:
+ hw->nic_type = athr_l2c_b;
+ break;
+ case PCI_DEVICE_ID_ATHEROS_L2C_B2:
+ hw->nic_type = athr_l2c_b2;
+ break;
+ case PCI_DEVICE_ID_ATHEROS_L1D:
+ hw->nic_type = athr_l1d;
+ break;
+ case PCI_DEVICE_ID_ATHEROS_L1D_2_0:
+ hw->nic_type = athr_l1d_2;
+ break;
+ default:
+ break;
+ }
+}
+
+static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
+{
+ u32 phy_status_data;
+ u32 link_ctrl_data;
+
+ atl1c_set_mac_type(hw);
+ AT_READ_REG(hw, REG_PHY_STATUS, &phy_status_data);
+ AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
+
+ hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE |
+ ATL1C_TXQ_MODE_ENHANCE;
+ if (link_ctrl_data & LINK_CTRL_L0S_EN)
+ hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT;
+ if (link_ctrl_data & LINK_CTRL_L1_EN)
+ hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT;
+ if (link_ctrl_data & LINK_CTRL_EXT_SYNC)
+ hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC;
+ hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
+
+ if (hw->nic_type == athr_l1c ||
+ hw->nic_type == athr_l1d ||
+ hw->nic_type == athr_l1d_2)
+ hw->link_cap_flags |= ATL1C_LINK_CAP_1000M;
+ return 0;
+}
+/*
+ * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * atl1c_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ */
+static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
+{
+ struct atl1c_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ u32 revision;
+
+
+ adapter->wol = 0;
+ device_set_wakeup_enable(&pdev->dev, false);
+ adapter->link_speed = SPEED_0;
+ adapter->link_duplex = FULL_DUPLEX;
+ adapter->num_rx_queues = AT_DEF_RECEIVE_QUEUE;
+ adapter->tpd_ring[0].count = 1024;
+ adapter->rfd_ring[0].count = 512;
+
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_id = pdev->subsystem_device;
+ AT_READ_REG(hw, PCI_CLASS_REVISION, &revision);
+ hw->revision_id = revision & 0xFF;
+ /* before link up, we assume hibernate is true */
+ hw->hibernate = true;
+ hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
+ if (atl1c_setup_mac_funcs(hw) != 0) {
+ dev_err(&pdev->dev, "set mac function pointers failed\n");
+ return -1;
+ }
+ hw->intr_mask = IMR_NORMAL_MASK;
+ hw->phy_configured = false;
+ hw->preamble_len = 7;
+ hw->max_frame_size = adapter->netdev->mtu;
+ if (adapter->num_rx_queues < 2) {
+ hw->rss_type = atl1c_rss_disable;
+ hw->rss_mode = atl1c_rss_mode_disable;
+ } else {
+ hw->rss_type = atl1c_rss_ipv4;
+ hw->rss_mode = atl1c_rss_mul_que_mul_int;
+ hw->rss_hash_bits = 16;
+ }
+ hw->autoneg_advertised = ADVERTISED_Autoneg;
+ hw->indirect_tab = 0xE4E4E4E4;
+ hw->base_cpu = 0;
+
+ hw->ict = 50000; /* 100ms */
+ hw->smb_timer = 200000; /* 400ms */
+ hw->cmb_tpd = 4;
+ hw->cmb_tx_timer = 1; /* 2 us */
+ hw->rx_imt = 200;
+ hw->tx_imt = 1000;
+
+ hw->tpd_burst = 5;
+ hw->rfd_burst = 8;
+ hw->dma_order = atl1c_dma_ord_out;
+ hw->dmar_block = atl1c_dma_req_1024;
+ hw->dmaw_block = atl1c_dma_req_1024;
+ hw->dmar_dly_cnt = 15;
+ hw->dmaw_dly_cnt = 4;
+
+ if (atl1c_alloc_queues(adapter)) {
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+ /* TODO */
+ atl1c_set_rxbufsize(adapter, adapter->netdev);
+ atomic_set(&adapter->irq_sem, 1);
+ spin_lock_init(&adapter->mdio_lock);
+ spin_lock_init(&adapter->tx_lock);
+ set_bit(__AT_DOWN, &adapter->flags);
+
+ return 0;
+}
+
+static inline void atl1c_clean_buffer(struct pci_dev *pdev,
+ struct atl1c_buffer *buffer_info, int in_irq)
+{
+ u16 pci_driection;
+ if (buffer_info->flags & ATL1C_BUFFER_FREE)
+ return;
+ if (buffer_info->dma) {
+ if (buffer_info->flags & ATL1C_PCIMAP_FROMDEVICE)
+ pci_driection = PCI_DMA_FROMDEVICE;
+ else
+ pci_driection = PCI_DMA_TODEVICE;
+
+ if (buffer_info->flags & ATL1C_PCIMAP_SINGLE)
+ pci_unmap_single(pdev, buffer_info->dma,
+ buffer_info->length, pci_driection);
+ else if (buffer_info->flags & ATL1C_PCIMAP_PAGE)
+ pci_unmap_page(pdev, buffer_info->dma,
+ buffer_info->length, pci_driection);
+ }
+ if (buffer_info->skb) {
+ if (in_irq)
+ dev_kfree_skb_irq(buffer_info->skb);
+ else
+ dev_kfree_skb(buffer_info->skb);
+ }
+ buffer_info->dma = 0;
+ buffer_info->skb = NULL;
+ ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
+}
+/*
+ * atl1c_clean_tx_ring - Free Tx-skb
+ * @adapter: board private structure
+ */
+static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
+ enum atl1c_trans_queue type)
+{
+ struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
+ struct atl1c_buffer *buffer_info;
+ struct pci_dev *pdev = adapter->pdev;
+ u16 index, ring_count;
+
+ ring_count = tpd_ring->count;
+ for (index = 0; index < ring_count; index++) {
+ buffer_info = &tpd_ring->buffer_info[index];
+ atl1c_clean_buffer(pdev, buffer_info, 0);
+ }
+
+ /* Zero out Tx-buffers */
+ memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
+ ring_count);
+ atomic_set(&tpd_ring->next_to_clean, 0);
+ tpd_ring->next_to_use = 0;
+}
+
+/*
+ * atl1c_clean_rx_ring - Free rx-reservation skbs
+ * @adapter: board private structure
+ */
+static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
+{
+ struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
+ struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
+ struct atl1c_buffer *buffer_info;
+ struct pci_dev *pdev = adapter->pdev;
+ int i, j;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ for (j = 0; j < rfd_ring[i].count; j++) {
+ buffer_info = &rfd_ring[i].buffer_info[j];
+ atl1c_clean_buffer(pdev, buffer_info, 0);
+ }
+ /* zero out the descriptor ring */
+ memset(rfd_ring[i].desc, 0, rfd_ring[i].size);
+ rfd_ring[i].next_to_clean = 0;
+ rfd_ring[i].next_to_use = 0;
+ rrd_ring[i].next_to_use = 0;
+ rrd_ring[i].next_to_clean = 0;
+ }
+}
+
+/*
+ * Read / Write Ptr Initialize:
+ */
+static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
+{
+ struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
+ struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
+ struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
+ struct atl1c_buffer *buffer_info;
+ int i, j;
+
+ for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
+ tpd_ring[i].next_to_use = 0;
+ atomic_set(&tpd_ring[i].next_to_clean, 0);
+ buffer_info = tpd_ring[i].buffer_info;
+ for (j = 0; j < tpd_ring->count; j++)
+ ATL1C_SET_BUFFER_STATE(&buffer_info[i],
+ ATL1C_BUFFER_FREE);
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rfd_ring[i].next_to_use = 0;
+ rfd_ring[i].next_to_clean = 0;
+ rrd_ring[i].next_to_use = 0;
+ rrd_ring[i].next_to_clean = 0;
+ for (j = 0; j < rfd_ring[i].count; j++) {
+ buffer_info = &rfd_ring[i].buffer_info[j];
+ ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
+ }
+ }
+}
+
+/*
+ * atl1c_free_ring_resources - Free Tx / RX descriptor Resources
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ */
+static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ pci_free_consistent(pdev, adapter->ring_header.size,
+ adapter->ring_header.desc,
+ adapter->ring_header.dma);
+ adapter->ring_header.desc = NULL;
+
+ /* Note: just free tdp_ring.buffer_info,
+ * it contain rfd_ring.buffer_info, do not double free */
+ if (adapter->tpd_ring[0].buffer_info) {
+ kfree(adapter->tpd_ring[0].buffer_info);
+ adapter->tpd_ring[0].buffer_info = NULL;
+ }
+}
+
+/*
+ * atl1c_setup_mem_resources - allocate Tx / RX descriptor resources
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ */
+static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
+ struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
+ struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
+ struct atl1c_ring_header *ring_header = &adapter->ring_header;
+ int num_rx_queues = adapter->num_rx_queues;
+ int size;
+ int i;
+ int count = 0;
+ int rx_desc_count = 0;
+ u32 offset = 0;
+
+ rrd_ring[0].count = rfd_ring[0].count;
+ for (i = 1; i < AT_MAX_TRANSMIT_QUEUE; i++)
+ tpd_ring[i].count = tpd_ring[0].count;
+
+ for (i = 1; i < adapter->num_rx_queues; i++)
+ rfd_ring[i].count = rrd_ring[i].count = rfd_ring[0].count;
+
+ /* 2 tpd queue, one high priority queue,
+ * another normal priority queue */
+ size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 +
+ rfd_ring->count * num_rx_queues);
+ tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
+ if (unlikely(!tpd_ring->buffer_info)) {
+ dev_err(&pdev->dev, "kzalloc failed, size = %d\n",
+ size);
+ goto err_nomem;
+ }
+ for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
+ tpd_ring[i].buffer_info =
+ (struct atl1c_buffer *) (tpd_ring->buffer_info + count);
+ count += tpd_ring[i].count;
+ }
+
+ for (i = 0; i < num_rx_queues; i++) {
+ rfd_ring[i].buffer_info =
+ (struct atl1c_buffer *) (tpd_ring->buffer_info + count);
+ count += rfd_ring[i].count;
+ rx_desc_count += rfd_ring[i].count;
+ }
+ /*
+ * real ring DMA buffer
+ * each ring/block may need up to 8 bytes for alignment, hence the
+ * additional bytes tacked onto the end.
+ */
+ ring_header->size = size =
+ sizeof(struct atl1c_tpd_desc) * tpd_ring->count * 2 +
+ sizeof(struct atl1c_rx_free_desc) * rx_desc_count +
+ sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
+ sizeof(struct atl1c_hw_stats) +
+ 8 * 4 + 8 * 2 * num_rx_queues;
+
+ ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
+ &ring_header->dma);
+ if (unlikely(!ring_header->desc)) {
+ dev_err(&pdev->dev, "pci_alloc_consistend failed\n");
+ goto err_nomem;
+ }
+ memset(ring_header->desc, 0, ring_header->size);
+ /* init TPD ring */
+
+ tpd_ring[0].dma = roundup(ring_header->dma, 8);
+ offset = tpd_ring[0].dma - ring_header->dma;
+ for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
+ tpd_ring[i].dma = ring_header->dma + offset;
+ tpd_ring[i].desc = (u8 *) ring_header->desc + offset;
+ tpd_ring[i].size =
+ sizeof(struct atl1c_tpd_desc) * tpd_ring[i].count;
+ offset += roundup(tpd_ring[i].size, 8);
+ }
+ /* init RFD ring */
+ for (i = 0; i < num_rx_queues; i++) {
+ rfd_ring[i].dma = ring_header->dma + offset;
+ rfd_ring[i].desc = (u8 *) ring_header->desc + offset;
+ rfd_ring[i].size = sizeof(struct atl1c_rx_free_desc) *
+ rfd_ring[i].count;
+ offset += roundup(rfd_ring[i].size, 8);
+ }
+
+ /* init RRD ring */
+ for (i = 0; i < num_rx_queues; i++) {
+ rrd_ring[i].dma = ring_header->dma + offset;
+ rrd_ring[i].desc = (u8 *) ring_header->desc + offset;
+ rrd_ring[i].size = sizeof(struct atl1c_recv_ret_status) *
+ rrd_ring[i].count;
+ offset += roundup(rrd_ring[i].size, 8);
+ }
+
+ adapter->smb.dma = ring_header->dma + offset;
+ adapter->smb.smb = (u8 *)ring_header->desc + offset;
+ return 0;
+
+err_nomem:
+ kfree(tpd_ring->buffer_info);
+ return -ENOMEM;
+}
+
+static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
+{
+ struct atl1c_hw *hw = &adapter->hw;
+ struct atl1c_rfd_ring *rfd_ring = (struct atl1c_rfd_ring *)
+ adapter->rfd_ring;
+ struct atl1c_rrd_ring *rrd_ring = (struct atl1c_rrd_ring *)
+ adapter->rrd_ring;
+ struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
+ adapter->tpd_ring;
+ struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb;
+ struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb;
+ int i;
+ u32 data;
+
+ /* TPD */
+ AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI,
+ (u32)((tpd_ring[atl1c_trans_normal].dma &
+ AT_DMA_HI_ADDR_MASK) >> 32));
+ /* just enable normal priority TX queue */
+ AT_WRITE_REG(hw, REG_NTPD_HEAD_ADDR_LO,
+ (u32)(tpd_ring[atl1c_trans_normal].dma &
+ AT_DMA_LO_ADDR_MASK));
+ AT_WRITE_REG(hw, REG_HTPD_HEAD_ADDR_LO,
+ (u32)(tpd_ring[atl1c_trans_high].dma &
+ AT_DMA_LO_ADDR_MASK));
+ AT_WRITE_REG(hw, REG_TPD_RING_SIZE,
+ (u32)(tpd_ring[0].count & TPD_RING_SIZE_MASK));
+
+
+ /* RFD */
+ AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI,
+ (u32)((rfd_ring[0].dma & AT_DMA_HI_ADDR_MASK) >> 32));
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ AT_WRITE_REG(hw, atl1c_rfd_addr_lo_regs[i],
+ (u32)(rfd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
+
+ AT_WRITE_REG(hw, REG_RFD_RING_SIZE,
+ rfd_ring[0].count & RFD_RING_SIZE_MASK);
+ AT_WRITE_REG(hw, REG_RX_BUF_SIZE,
+ adapter->rx_buffer_len & RX_BUF_SIZE_MASK);
+
+ /* RRD */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ AT_WRITE_REG(hw, atl1c_rrd_addr_lo_regs[i],
+ (u32)(rrd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
+ AT_WRITE_REG(hw, REG_RRD_RING_SIZE,
+ (rrd_ring[0].count & RRD_RING_SIZE_MASK));
+
+ /* CMB */
+ AT_WRITE_REG(hw, REG_CMB_BASE_ADDR_LO, cmb->dma & AT_DMA_LO_ADDR_MASK);
+
+ /* SMB */
+ AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_HI,
+ (u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32));
+ AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO,
+ (u32)(smb->dma & AT_DMA_LO_ADDR_MASK));
+ if (hw->nic_type == athr_l2c_b) {
+ AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L);
+ AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L);
+ AT_WRITE_REG(hw, REG_SRAM_RXF_ADDR, 0x029f0000L);
+ AT_WRITE_REG(hw, REG_SRAM_RFD0_INFO, 0x02bf02a0L);
+ AT_WRITE_REG(hw, REG_SRAM_TXF_ADDR, 0x03bf02c0L);
+ AT_WRITE_REG(hw, REG_SRAM_TRD_ADDR, 0x03df03c0L);
+ AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0); /* TX watermark, to enter l1 state.*/
+ AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0); /* RXD threshold.*/
+ }
+ if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d_2) {
+ /* Power Saving for L2c_B */
+ AT_READ_REG(hw, REG_SERDES_LOCK, &data);
+ data |= SERDES_MAC_CLK_SLOWDOWN;
+ data |= SERDES_PYH_CLK_SLOWDOWN;
+ AT_WRITE_REG(hw, REG_SERDES_LOCK, data);
+ }
+ /* Load all of base address above */
+ AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
+}
+
+static void atl1c_configure_tx(struct atl1c_adapter *adapter)
+{
+ struct atl1c_hw *hw = &adapter->hw;
+ u32 dev_ctrl_data;
+ u32 max_pay_load;
+ u16 tx_offload_thresh;
+ u32 txq_ctrl_data;
+ u32 max_pay_load_data;
+
+ tx_offload_thresh = MAX_TX_OFFLOAD_THRESH;
+ AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH,
+ (tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK);
+ AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data);
+ max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) &
+ DEVICE_CTRL_MAX_PAYLOAD_MASK;
+ hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
+ max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) &
+ DEVICE_CTRL_MAX_RREQ_SZ_MASK;
+ hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
+
+ txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) <<
+ TXQ_NUM_TPD_BURST_SHIFT;
+ if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE)
+ txq_ctrl_data |= TXQ_CTRL_ENH_MODE;
+ max_pay_load_data = (atl1c_pay_load_size[hw->dmar_block] &
+ TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT;
+ if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2)
+ max_pay_load_data >>= 1;
+ txq_ctrl_data |= max_pay_load_data;
+
+ AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data);
+}
+
+static void atl1c_configure_rx(struct atl1c_adapter *adapter)
+{
+ struct atl1c_hw *hw = &adapter->hw;
+ u32 rxq_ctrl_data;
+
+ rxq_ctrl_data = (hw->rfd_burst & RXQ_RFD_BURST_NUM_MASK) <<
+ RXQ_RFD_BURST_NUM_SHIFT;
+
+ if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM)
+ rxq_ctrl_data |= IPV6_CHKSUM_CTRL_EN;
+ if (hw->rss_type == atl1c_rss_ipv4)
+ rxq_ctrl_data |= RSS_HASH_IPV4;
+ if (hw->rss_type == atl1c_rss_ipv4_tcp)
+ rxq_ctrl_data |= RSS_HASH_IPV4_TCP;
+ if (hw->rss_type == atl1c_rss_ipv6)
+ rxq_ctrl_data |= RSS_HASH_IPV6;
+ if (hw->rss_type == atl1c_rss_ipv6_tcp)
+ rxq_ctrl_data |= RSS_HASH_IPV6_TCP;
+ if (hw->rss_type != atl1c_rss_disable)
+ rxq_ctrl_data |= RRS_HASH_CTRL_EN;
+
+ rxq_ctrl_data |= (hw->rss_mode & RSS_MODE_MASK) <<
+ RSS_MODE_SHIFT;
+ rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) <<
+ RSS_HASH_BITS_SHIFT;
+ if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON)
+ rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_1M &
+ ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT;
+
+ AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
+}
+
+static void atl1c_configure_rss(struct atl1c_adapter *adapter)
+{
+ struct atl1c_hw *hw = &adapter->hw;
+
+ AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab);
+ AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu);
+}
+
+static void atl1c_configure_dma(struct atl1c_adapter *adapter)
+{
+ struct atl1c_hw *hw = &adapter->hw;
+ u32 dma_ctrl_data;
+
+ dma_ctrl_data = DMA_CTRL_DMAR_REQ_PRI;
+ if (hw->ctrl_flags & ATL1C_CMB_ENABLE)
+ dma_ctrl_data |= DMA_CTRL_CMB_EN;
+ if (hw->ctrl_flags & ATL1C_SMB_ENABLE)
+ dma_ctrl_data |= DMA_CTRL_SMB_EN;
+ else
+ dma_ctrl_data |= MAC_CTRL_SMB_DIS;
+
+ switch (hw->dma_order) {
+ case atl1c_dma_ord_in:
+ dma_ctrl_data |= DMA_CTRL_DMAR_IN_ORDER;
+ break;
+ case atl1c_dma_ord_enh:
+ dma_ctrl_data |= DMA_CTRL_DMAR_ENH_ORDER;
+ break;
+ case atl1c_dma_ord_out:
+ dma_ctrl_data |= DMA_CTRL_DMAR_OUT_ORDER;
+ break;
+ default:
+ break;
+ }
+
+ dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
+ << DMA_CTRL_DMAR_BURST_LEN_SHIFT;
+ dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
+ << DMA_CTRL_DMAW_BURST_LEN_SHIFT;
+ dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK)
+ << DMA_CTRL_DMAR_DLY_CNT_SHIFT;
+ dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK)
+ << DMA_CTRL_DMAW_DLY_CNT_SHIFT;
+
+ AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
+}
+
+/*
+ * Stop the mac, transmit and receive units
+ * hw - Struct containing variables accessed by shared code
+ * return : 0 or idle status (if error)
+ */
+static int atl1c_stop_mac(struct atl1c_hw *hw)
+{
+ u32 data;
+
+ AT_READ_REG(hw, REG_RXQ_CTRL, &data);
+ data &= ~(RXQ1_CTRL_EN | RXQ2_CTRL_EN |
+ RXQ3_CTRL_EN | RXQ_CTRL_EN);
+ AT_WRITE_REG(hw, REG_RXQ_CTRL, data);
+
+ AT_READ_REG(hw, REG_TXQ_CTRL, &data);
+ data &= ~TXQ_CTRL_EN;
+ AT_WRITE_REG(hw, REG_TWSI_CTRL, data);
+
+ atl1c_wait_until_idle(hw);
+
+ AT_READ_REG(hw, REG_MAC_CTRL, &data);
+ data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN);
+ AT_WRITE_REG(hw, REG_MAC_CTRL, data);
+
+ return (int)atl1c_wait_until_idle(hw);
+}
+
+static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw)
+{
+ u32 data;
+
+ AT_READ_REG(hw, REG_RXQ_CTRL, &data);
+ switch (hw->adapter->num_rx_queues) {
+ case 4:
+ data |= (RXQ3_CTRL_EN | RXQ2_CTRL_EN | RXQ1_CTRL_EN);
+ break;
+ case 3:
+ data |= (RXQ2_CTRL_EN | RXQ1_CTRL_EN);
+ break;
+ case 2:
+ data |= RXQ1_CTRL_EN;
+ break;
+ default:
+ break;
+ }
+ data |= RXQ_CTRL_EN;
+ AT_WRITE_REG(hw, REG_RXQ_CTRL, data);
+}
+
+static void atl1c_enable_tx_ctrl(struct atl1c_hw *hw)
+{
+ u32 data;
+
+ AT_READ_REG(hw, REG_TXQ_CTRL, &data);
+ data |= TXQ_CTRL_EN;
+ AT_WRITE_REG(hw, REG_TXQ_CTRL, data);
+}
+
+/*
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ * hw - Struct containing variables accessed by shared code
+ * return : 0 or idle status (if error)
+ */
+static int atl1c_reset_mac(struct atl1c_hw *hw)
+{
+ struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct pci_dev *pdev = adapter->pdev;
+ u32 master_ctrl_data = 0;
+
+ AT_WRITE_REG(hw, REG_IMR, 0);
+ AT_WRITE_REG(hw, REG_ISR, ISR_DIS_INT);
+
+ atl1c_stop_mac(hw);
+ /*
+ * Issue Soft Reset to the MAC. This will reset the chip's
+ * transmit, receive, DMA. It will not effect
+ * the current PCI configuration. The global reset bit is self-
+ * clearing, and should clear within a microsecond.
+ */
+ AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
+ master_ctrl_data |= MASTER_CTRL_OOB_DIS_OFF;
+ AT_WRITE_REGW(hw, REG_MASTER_CTRL, ((master_ctrl_data | MASTER_CTRL_SOFT_RST)
+ & 0xFFFF));
+
+ AT_WRITE_FLUSH(hw);
+ msleep(10);
+ /* Wait at least 10ms for All module to be Idle */
+
+ if (atl1c_wait_until_idle(hw)) {
+ dev_err(&pdev->dev,
+ "MAC state machine can't be idle since"
+ " disabled for 10ms second\n");
+ return -1;
+ }
+ return 0;
+}
+
+static void atl1c_disable_l0s_l1(struct atl1c_hw *hw)
+{
+ u32 pm_ctrl_data;
+
+ AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
+ pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
+ PM_CTRL_L1_ENTRY_TIMER_SHIFT);
+ pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
+ pm_ctrl_data &= ~PM_CTRL_MAC_ASPM_CHK;
+ pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
+
+ pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
+ pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
+ pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
+ AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
+}
+
+/*
+ * Set ASPM state.
+ * Enable/disable L0s/L1 depend on link state.
+ */
+static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
+{
+ u32 pm_ctrl_data;
+ u32 link_ctrl_data;
+ u32 link_l1_timer = 0xF;
+
+ AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
+ AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
+
+ pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
+ pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
+ PM_CTRL_L1_ENTRY_TIMER_SHIFT);
+ pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK <<
+ PM_CTRL_LCKDET_TIMER_SHIFT);
+ pm_ctrl_data |= AT_LCKDET_TIMER << PM_CTRL_LCKDET_TIMER_SHIFT;
+
+ if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
+ hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
+ link_ctrl_data &= ~LINK_CTRL_EXT_SYNC;
+ if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) {
+ if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10)
+ link_ctrl_data |= LINK_CTRL_EXT_SYNC;
+ }
+
+ AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data);
+
+ pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER;
+ pm_ctrl_data &= ~(PM_CTRL_PM_REQ_TIMER_MASK <<
+ PM_CTRL_PM_REQ_TIMER_SHIFT);
+ pm_ctrl_data |= AT_ASPM_L1_TIMER <<
+ PM_CTRL_PM_REQ_TIMER_SHIFT;
+ pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN;
+ pm_ctrl_data &= ~PM_CTRL_HOTRST;
+ pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT;
+ pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1;
+ }
+ pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
+ if (linkup) {
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
+ if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
+ pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
+ if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
+ pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN;
+
+ if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
+ hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
+ if (hw->nic_type == athr_l2c_b)
+ if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE))
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
+ pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
+ pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
+ pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
+ pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
+ if (hw->adapter->link_speed == SPEED_100 ||
+ hw->adapter->link_speed == SPEED_1000) {
+ pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
+ PM_CTRL_L1_ENTRY_TIMER_SHIFT);
+ if (hw->nic_type == athr_l2c_b)
+ link_l1_timer = 7;
+ else if (hw->nic_type == athr_l2c_b2 ||
+ hw->nic_type == athr_l1d_2)
+ link_l1_timer = 4;
+ pm_ctrl_data |= link_l1_timer <<
+ PM_CTRL_L1_ENTRY_TIMER_SHIFT;
+ }
+ } else {
+ pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
+ pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
+ pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
+ pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
+
+ }
+ } else {
+ pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
+ pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
+ pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
+
+ if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
+ pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
+ else
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
+ }
+ AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
+
+ return;
+}
+
+static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
+{
+ struct atl1c_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 mac_ctrl_data;
+
+ mac_ctrl_data = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
+ mac_ctrl_data |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
+
+ if (adapter->link_duplex == FULL_DUPLEX) {
+ hw->mac_duplex = true;
+ mac_ctrl_data |= MAC_CTRL_DUPLX;
+ }
+
+ if (adapter->link_speed == SPEED_1000)
+ hw->mac_speed = atl1c_mac_speed_1000;
+ else
+ hw->mac_speed = atl1c_mac_speed_10_100;
+
+ mac_ctrl_data |= (hw->mac_speed & MAC_CTRL_SPEED_MASK) <<
+ MAC_CTRL_SPEED_SHIFT;
+
+ mac_ctrl_data |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
+ mac_ctrl_data |= ((hw->preamble_len & MAC_CTRL_PRMLEN_MASK) <<
+ MAC_CTRL_PRMLEN_SHIFT);
+
+ __atl1c_vlan_mode(netdev->features, &mac_ctrl_data);
+
+ mac_ctrl_data |= MAC_CTRL_BC_EN;
+ if (netdev->flags & IFF_PROMISC)
+ mac_ctrl_data |= MAC_CTRL_PROMIS_EN;
+ if (netdev->flags & IFF_ALLMULTI)
+ mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
+
+ mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN;
+ if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2 ||
+ hw->nic_type == athr_l1d_2) {
+ mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW;
+ mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32;
+ }
+ AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
+}
+
+/*
+ * atl1c_configure - Configure Transmit&Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx /Rx unit of the MAC after a reset.
+ */
+static int atl1c_configure(struct atl1c_adapter *adapter)
+{
+ struct atl1c_hw *hw = &adapter->hw;
+ u32 master_ctrl_data = 0;
+ u32 intr_modrt_data;
+ u32 data;
+
+ /* clear interrupt status */
+ AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF);
+ /* Clear any WOL status */
+ AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
+ /* set Interrupt Clear Timer
+ * HW will enable self to assert interrupt event to system after
+ * waiting x-time for software to notify it accept interrupt.
+ */
+
+ data = CLK_GATING_EN_ALL;
+ if (hw->ctrl_flags & ATL1C_CLK_GATING_EN) {
+ if (hw->nic_type == athr_l2c_b)
+ data &= ~CLK_GATING_RXMAC_EN;
+ } else
+ data = 0;
+ AT_WRITE_REG(hw, REG_CLK_GATING_CTRL, data);
+
+ AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER,
+ hw->ict & INT_RETRIG_TIMER_MASK);
+
+ atl1c_configure_des_ring(adapter);
+
+ if (hw->ctrl_flags & ATL1C_INTR_MODRT_ENABLE) {
+ intr_modrt_data = (hw->tx_imt & IRQ_MODRT_TIMER_MASK) <<
+ IRQ_MODRT_TX_TIMER_SHIFT;
+ intr_modrt_data |= (hw->rx_imt & IRQ_MODRT_TIMER_MASK) <<
+ IRQ_MODRT_RX_TIMER_SHIFT;
+ AT_WRITE_REG(hw, REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data);
+ master_ctrl_data |=
+ MASTER_CTRL_TX_ITIMER_EN | MASTER_CTRL_RX_ITIMER_EN;
+ }
+
+ if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ)
+ master_ctrl_data |= MASTER_CTRL_INT_RDCLR;
+
+ master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN;
+ AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
+
+ if (hw->ctrl_flags & ATL1C_CMB_ENABLE) {
+ AT_WRITE_REG(hw, REG_CMB_TPD_THRESH,
+ hw->cmb_tpd & CMB_TPD_THRESH_MASK);
+ AT_WRITE_REG(hw, REG_CMB_TX_TIMER,
+ hw->cmb_tx_timer & CMB_TX_TIMER_MASK);
+ }
+
+ if (hw->ctrl_flags & ATL1C_SMB_ENABLE)
+ AT_WRITE_REG(hw, REG_SMB_STAT_TIMER,
+ hw->smb_timer & SMB_STAT_TIMER_MASK);
+ /* set MTU */
+ AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN +
+ VLAN_HLEN + ETH_FCS_LEN);
+ /* HDS, disable */
+ AT_WRITE_REG(hw, REG_HDS_CTRL, 0);
+
+ atl1c_configure_tx(adapter);
+ atl1c_configure_rx(adapter);
+ atl1c_configure_rss(adapter);
+ atl1c_configure_dma(adapter);
+
+ return 0;
+}
+
+static void atl1c_update_hw_stats(struct atl1c_adapter *adapter)
+{
+ u16 hw_reg_addr = 0;
+ unsigned long *stats_item = NULL;
+ u32 data;
+
+ /* update rx status */
+ hw_reg_addr = REG_MAC_RX_STATUS_BIN;
+ stats_item = &adapter->hw_stats.rx_ok;
+ while (hw_reg_addr <= REG_MAC_RX_STATUS_END) {
+ AT_READ_REG(&adapter->hw, hw_reg_addr, &data);
+ *stats_item += data;
+ stats_item++;
+ hw_reg_addr += 4;
+ }
+/* update tx status */
+ hw_reg_addr = REG_MAC_TX_STATUS_BIN;
+ stats_item = &adapter->hw_stats.tx_ok;
+ while (hw_reg_addr <= REG_MAC_TX_STATUS_END) {
+ AT_READ_REG(&adapter->hw, hw_reg_addr, &data);
+ *stats_item += data;
+ stats_item++;
+ hw_reg_addr += 4;
+ }
+}
+
+/*
+ * atl1c_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ */
+static struct net_device_stats *atl1c_get_stats(struct net_device *netdev)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+ struct atl1c_hw_stats *hw_stats = &adapter->hw_stats;
+ struct net_device_stats *net_stats = &netdev->stats;
+
+ atl1c_update_hw_stats(adapter);
+ net_stats->rx_packets = hw_stats->rx_ok;
+ net_stats->tx_packets = hw_stats->tx_ok;
+ net_stats->rx_bytes = hw_stats->rx_byte_cnt;
+ net_stats->tx_bytes = hw_stats->tx_byte_cnt;
+ net_stats->multicast = hw_stats->rx_mcast;
+ net_stats->collisions = hw_stats->tx_1_col +
+ hw_stats->tx_2_col * 2 +
+ hw_stats->tx_late_col + hw_stats->tx_abort_col;
+ net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err +
+ hw_stats->rx_len_err + hw_stats->rx_sz_ov +
+ hw_stats->rx_rrd_ov + hw_stats->rx_align_err;
+ net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov;
+ net_stats->rx_length_errors = hw_stats->rx_len_err;
+ net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
+ net_stats->rx_frame_errors = hw_stats->rx_align_err;
+ net_stats->rx_over_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+
+ net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+
+ net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col +
+ hw_stats->tx_underrun + hw_stats->tx_trunc;
+ net_stats->tx_fifo_errors = hw_stats->tx_underrun;
+ net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
+ net_stats->tx_window_errors = hw_stats->tx_late_col;
+
+ return net_stats;
+}
+
+static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter)
+{
+ u16 phy_data;
+
+ spin_lock(&adapter->mdio_lock);
+ atl1c_read_phy_reg(&adapter->hw, MII_ISR, &phy_data);
+ spin_unlock(&adapter->mdio_lock);
+}
+
+static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
+ enum atl1c_trans_queue type)
+{
+ struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
+ &adapter->tpd_ring[type];
+ struct atl1c_buffer *buffer_info;
+ struct pci_dev *pdev = adapter->pdev;
+ u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
+ u16 hw_next_to_clean;
+ u16 shift;
+ u32 data;
+
+ if (type == atl1c_trans_high)
+ shift = MB_HTPD_CONS_IDX_SHIFT;
+ else
+ shift = MB_NTPD_CONS_IDX_SHIFT;
+
+ AT_READ_REG(&adapter->hw, REG_MB_PRIO_CONS_IDX, &data);
+ hw_next_to_clean = (data >> shift) & MB_PRIO_PROD_IDX_MASK;
+
+ while (next_to_clean != hw_next_to_clean) {
+ buffer_info = &tpd_ring->buffer_info[next_to_clean];
+ atl1c_clean_buffer(pdev, buffer_info, 1);
+ if (++next_to_clean == tpd_ring->count)
+ next_to_clean = 0;
+ atomic_set(&tpd_ring->next_to_clean, next_to_clean);
+ }
+
+ if (netif_queue_stopped(adapter->netdev) &&
+ netif_carrier_ok(adapter->netdev)) {
+ netif_wake_queue(adapter->netdev);
+ }
+
+ return true;
+}
+
+/*
+ * atl1c_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ * @pt_regs: CPU registers structure
+ */
+static irqreturn_t atl1c_intr(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+ struct atl1c_hw *hw = &adapter->hw;
+ int max_ints = AT_MAX_INT_WORK;
+ int handled = IRQ_NONE;
+ u32 status;
+ u32 reg_data;
+
+ do {
+ AT_READ_REG(hw, REG_ISR, &reg_data);
+ status = reg_data & hw->intr_mask;
+
+ if (status == 0 || (status & ISR_DIS_INT) != 0) {
+ if (max_ints != AT_MAX_INT_WORK)
+ handled = IRQ_HANDLED;
+ break;
+ }
+ /* link event */
+ if (status & ISR_GPHY)
+ atl1c_clear_phy_int(adapter);
+ /* Ack ISR */
+ AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
+ if (status & ISR_RX_PKT) {
+ if (likely(napi_schedule_prep(&adapter->napi))) {
+ hw->intr_mask &= ~ISR_RX_PKT;
+ AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
+ __napi_schedule(&adapter->napi);
+ }
+ }
+ if (status & ISR_TX_PKT)
+ atl1c_clean_tx_irq(adapter, atl1c_trans_normal);
+
+ handled = IRQ_HANDLED;
+ /* check if PCIE PHY Link down */
+ if (status & ISR_ERROR) {
+ if (netif_msg_hw(adapter))
+ dev_err(&pdev->dev,
+ "atl1c hardware error (status = 0x%x)\n",
+ status & ISR_ERROR);
+ /* reset MAC */
+ adapter->work_event |= ATL1C_WORK_EVENT_RESET;
+ schedule_work(&adapter->common_task);
+ return IRQ_HANDLED;
+ }
+
+ if (status & ISR_OVER)
+ if (netif_msg_intr(adapter))
+ dev_warn(&pdev->dev,
+ "TX/RX overflow (status = 0x%x)\n",
+ status & ISR_OVER);
+
+ /* link event */
+ if (status & (ISR_GPHY | ISR_MANUAL)) {
+ netdev->stats.tx_carrier_errors++;
+ atl1c_link_chg_event(adapter);
+ break;
+ }
+
+ } while (--max_ints > 0);
+ /* re-enable Interrupt*/
+ AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
+ return handled;
+}
+
+static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
+ struct sk_buff *skb, struct atl1c_recv_ret_status *prrs)
+{
+ /*
+ * The pid field in RRS in not correct sometimes, so we
+ * cannot figure out if the packet is fragmented or not,
+ * so we tell the KERNEL CHECKSUM_NONE
+ */
+ skb_checksum_none_assert(skb);
+}
+
+static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid)
+{
+ struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[ringid];
+ struct pci_dev *pdev = adapter->pdev;
+ struct atl1c_buffer *buffer_info, *next_info;
+ struct sk_buff *skb;
+ void *vir_addr = NULL;
+ u16 num_alloc = 0;
+ u16 rfd_next_to_use, next_next;
+ struct atl1c_rx_free_desc *rfd_desc;
+
+ next_next = rfd_next_to_use = rfd_ring->next_to_use;
+ if (++next_next == rfd_ring->count)
+ next_next = 0;
+ buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
+ next_info = &rfd_ring->buffer_info[next_next];
+
+ while (next_info->flags & ATL1C_BUFFER_FREE) {
+ rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
+
+ skb = dev_alloc_skb(adapter->rx_buffer_len);
+ if (unlikely(!skb)) {
+ if (netif_msg_rx_err(adapter))
+ dev_warn(&pdev->dev, "alloc rx buffer failed\n");
+ break;
+ }
+
+ /*
+ * Make buffer alignment 2 beyond a 16 byte boundary
+ * this will result in a 16 byte aligned IP header after
+ * the 14 byte MAC header is removed
+ */
+ vir_addr = skb->data;
+ ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
+ buffer_info->skb = skb;
+ buffer_info->length = adapter->rx_buffer_len;
+ buffer_info->dma = pci_map_single(pdev, vir_addr,
+ buffer_info->length,
+ PCI_DMA_FROMDEVICE);
+ ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
+ ATL1C_PCIMAP_FROMDEVICE);
+ rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+ rfd_next_to_use = next_next;
+ if (++next_next == rfd_ring->count)
+ next_next = 0;
+ buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
+ next_info = &rfd_ring->buffer_info[next_next];
+ num_alloc++;
+ }
+
+ if (num_alloc) {
+ /* TODO: update mailbox here */
+ wmb();
+ rfd_ring->next_to_use = rfd_next_to_use;
+ AT_WRITE_REG(&adapter->hw, atl1c_rfd_prod_idx_regs[ringid],
+ rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK);
+ }
+
+ return num_alloc;
+}
+
+static void atl1c_clean_rrd(struct atl1c_rrd_ring *rrd_ring,
+ struct atl1c_recv_ret_status *rrs, u16 num)
+{
+ u16 i;
+ /* the relationship between rrd and rfd is one map one */
+ for (i = 0; i < num; i++, rrs = ATL1C_RRD_DESC(rrd_ring,
+ rrd_ring->next_to_clean)) {
+ rrs->word3 &= ~RRS_RXD_UPDATED;
+ if (++rrd_ring->next_to_clean == rrd_ring->count)
+ rrd_ring->next_to_clean = 0;
+ }
+}
+
+static void atl1c_clean_rfd(struct atl1c_rfd_ring *rfd_ring,
+ struct atl1c_recv_ret_status *rrs, u16 num)
+{
+ u16 i;
+ u16 rfd_index;
+ struct atl1c_buffer *buffer_info = rfd_ring->buffer_info;
+
+ rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) &
+ RRS_RX_RFD_INDEX_MASK;
+ for (i = 0; i < num; i++) {
+ buffer_info[rfd_index].skb = NULL;
+ ATL1C_SET_BUFFER_STATE(&buffer_info[rfd_index],
+ ATL1C_BUFFER_FREE);
+ if (++rfd_index == rfd_ring->count)
+ rfd_index = 0;
+ }
+ rfd_ring->next_to_clean = rfd_index;
+}
+
+static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
+ int *work_done, int work_to_do)
+{
+ u16 rfd_num, rfd_index;
+ u16 count = 0;
+ u16 length;
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
+ struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[que];
+ struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[que];
+ struct sk_buff *skb;
+ struct atl1c_recv_ret_status *rrs;
+ struct atl1c_buffer *buffer_info;
+
+ while (1) {
+ if (*work_done >= work_to_do)
+ break;
+ rrs = ATL1C_RRD_DESC(rrd_ring, rrd_ring->next_to_clean);
+ if (likely(RRS_RXD_IS_VALID(rrs->word3))) {
+ rfd_num = (rrs->word0 >> RRS_RX_RFD_CNT_SHIFT) &
+ RRS_RX_RFD_CNT_MASK;
+ if (unlikely(rfd_num != 1))
+ /* TODO support mul rfd*/
+ if (netif_msg_rx_err(adapter))
+ dev_warn(&pdev->dev,
+ "Multi rfd not support yet!\n");
+ goto rrs_checked;
+ } else {
+ break;
+ }
+rrs_checked:
+ atl1c_clean_rrd(rrd_ring, rrs, rfd_num);
+ if (rrs->word3 & (RRS_RX_ERR_SUM | RRS_802_3_LEN_ERR)) {
+ atl1c_clean_rfd(rfd_ring, rrs, rfd_num);
+ if (netif_msg_rx_err(adapter))
+ dev_warn(&pdev->dev,
+ "wrong packet! rrs word3 is %x\n",
+ rrs->word3);
+ continue;
+ }
+
+ length = le16_to_cpu((rrs->word3 >> RRS_PKT_SIZE_SHIFT) &
+ RRS_PKT_SIZE_MASK);
+ /* Good Receive */
+ if (likely(rfd_num == 1)) {
+ rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) &
+ RRS_RX_RFD_INDEX_MASK;
+ buffer_info = &rfd_ring->buffer_info[rfd_index];
+ pci_unmap_single(pdev, buffer_info->dma,
+ buffer_info->length, PCI_DMA_FROMDEVICE);
+ skb = buffer_info->skb;
+ } else {
+ /* TODO */
+ if (netif_msg_rx_err(adapter))
+ dev_warn(&pdev->dev,
+ "Multi rfd not support yet!\n");
+ break;
+ }
+ atl1c_clean_rfd(rfd_ring, rrs, rfd_num);
+ skb_put(skb, length - ETH_FCS_LEN);
+ skb->protocol = eth_type_trans(skb, netdev);
+ atl1c_rx_checksum(adapter, skb, rrs);
+ if (rrs->word3 & RRS_VLAN_INS) {
+ u16 vlan;
+
+ AT_TAG_TO_VLAN(rrs->vlan_tag, vlan);
+ vlan = le16_to_cpu(vlan);
+ __vlan_hwaccel_put_tag(skb, vlan);
+ }
+ netif_receive_skb(skb);
+
+ (*work_done)++;
+ count++;
+ }
+ if (count)
+ atl1c_alloc_rx_buffer(adapter, que);
+}
+
+/*
+ * atl1c_clean - NAPI Rx polling callback
+ * @adapter: board private structure
+ */
+static int atl1c_clean(struct napi_struct *napi, int budget)
+{
+ struct atl1c_adapter *adapter =
+ container_of(napi, struct atl1c_adapter, napi);
+ int work_done = 0;
+
+ /* Keep link state information with original netdev */
+ if (!netif_carrier_ok(adapter->netdev))
+ goto quit_polling;
+ /* just enable one RXQ */
+ atl1c_clean_rx_irq(adapter, 0, &work_done, budget);
+
+ if (work_done < budget) {
+quit_polling:
+ napi_complete(napi);
+ adapter->hw.intr_mask |= ISR_RX_PKT;
+ AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
+ }
+ return work_done;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void atl1c_netpoll(struct net_device *netdev)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+
+ disable_irq(adapter->pdev->irq);
+ atl1c_intr(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+}
+#endif
+
+static inline u16 atl1c_tpd_avail(struct atl1c_adapter *adapter, enum atl1c_trans_queue type)
+{
+ struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
+ u16 next_to_use = 0;
+ u16 next_to_clean = 0;
+
+ next_to_clean = atomic_read(&tpd_ring->next_to_clean);
+ next_to_use = tpd_ring->next_to_use;
+
+ return (u16)(next_to_clean > next_to_use) ?
+ (next_to_clean - next_to_use - 1) :
+ (tpd_ring->count + next_to_clean - next_to_use - 1);
+}
+
+/*
+ * get next usable tpd
+ * Note: should call atl1c_tdp_avail to make sure
+ * there is enough tpd to use
+ */
+static struct atl1c_tpd_desc *atl1c_get_tpd(struct atl1c_adapter *adapter,
+ enum atl1c_trans_queue type)
+{
+ struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
+ struct atl1c_tpd_desc *tpd_desc;
+ u16 next_to_use = 0;
+
+ next_to_use = tpd_ring->next_to_use;
+ if (++tpd_ring->next_to_use == tpd_ring->count)
+ tpd_ring->next_to_use = 0;
+ tpd_desc = ATL1C_TPD_DESC(tpd_ring, next_to_use);
+ memset(tpd_desc, 0, sizeof(struct atl1c_tpd_desc));
+ return tpd_desc;
+}
+
+static struct atl1c_buffer *
+atl1c_get_tx_buffer(struct atl1c_adapter *adapter, struct atl1c_tpd_desc *tpd)
+{
+ struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
+
+ return &tpd_ring->buffer_info[tpd -
+ (struct atl1c_tpd_desc *)tpd_ring->desc];
+}
+
+/* Calculate the transmit packet descript needed*/
+static u16 atl1c_cal_tpd_req(const struct sk_buff *skb)
+{
+ u16 tpd_req;
+ u16 proto_hdr_len = 0;
+
+ tpd_req = skb_shinfo(skb)->nr_frags + 1;
+
+ if (skb_is_gso(skb)) {
+ proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ if (proto_hdr_len < skb_headlen(skb))
+ tpd_req++;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ tpd_req++;
+ }
+ return tpd_req;
+}
+
+static int atl1c_tso_csum(struct atl1c_adapter *adapter,
+ struct sk_buff *skb,
+ struct atl1c_tpd_desc **tpd,
+ enum atl1c_trans_queue type)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ u8 hdr_len;
+ u32 real_len;
+ unsigned short offload_type;
+ int err;
+
+ if (skb_is_gso(skb)) {
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (unlikely(err))
+ return -1;
+ }
+ offload_type = skb_shinfo(skb)->gso_type;
+
+ if (offload_type & SKB_GSO_TCPV4) {
+ real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
+ + ntohs(ip_hdr(skb)->tot_len));
+
+ if (real_len < skb->len)
+ pskb_trim(skb, real_len);
+
+ hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ if (unlikely(skb->len == hdr_len)) {
+ /* only xsum need */
+ if (netif_msg_tx_queued(adapter))
+ dev_warn(&pdev->dev,
+ "IPV4 tso with zero data??\n");
+ goto check_sum;
+ } else {
+ ip_hdr(skb)->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(
+ ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ (*tpd)->word1 |= 1 << TPD_IPV4_PACKET_SHIFT;
+ }
+ }
+
+ if (offload_type & SKB_GSO_TCPV6) {
+ struct atl1c_tpd_ext_desc *etpd =
+ *(struct atl1c_tpd_ext_desc **)(tpd);
+
+ memset(etpd, 0, sizeof(struct atl1c_tpd_ext_desc));
+ *tpd = atl1c_get_tpd(adapter, type);
+ ipv6_hdr(skb)->payload_len = 0;
+ /* check payload == 0 byte ? */
+ hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ if (unlikely(skb->len == hdr_len)) {
+ /* only xsum need */
+ if (netif_msg_tx_queued(adapter))
+ dev_warn(&pdev->dev,
+ "IPV6 tso with zero data??\n");
+ goto check_sum;
+ } else
+ tcp_hdr(skb)->check = ~csum_ipv6_magic(
+ &ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ etpd->word1 |= 1 << TPD_LSO_EN_SHIFT;
+ etpd->word1 |= 1 << TPD_LSO_VER_SHIFT;
+ etpd->pkt_len = cpu_to_le32(skb->len);
+ (*tpd)->word1 |= 1 << TPD_LSO_VER_SHIFT;
+ }
+
+ (*tpd)->word1 |= 1 << TPD_LSO_EN_SHIFT;
+ (*tpd)->word1 |= (skb_transport_offset(skb) & TPD_TCPHDR_OFFSET_MASK) <<
+ TPD_TCPHDR_OFFSET_SHIFT;
+ (*tpd)->word1 |= (skb_shinfo(skb)->gso_size & TPD_MSS_MASK) <<
+ TPD_MSS_SHIFT;
+ return 0;
+ }
+
+check_sum:
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ u8 css, cso;
+ cso = skb_checksum_start_offset(skb);
+
+ if (unlikely(cso & 0x1)) {
+ if (netif_msg_tx_err(adapter))
+ dev_err(&adapter->pdev->dev,
+ "payload offset should not an event number\n");
+ return -1;
+ } else {
+ css = cso + skb->csum_offset;
+
+ (*tpd)->word1 |= ((cso >> 1) & TPD_PLOADOFFSET_MASK) <<
+ TPD_PLOADOFFSET_SHIFT;
+ (*tpd)->word1 |= ((css >> 1) & TPD_CCSUM_OFFSET_MASK) <<
+ TPD_CCSUM_OFFSET_SHIFT;
+ (*tpd)->word1 |= 1 << TPD_CCSUM_EN_SHIFT;
+ }
+ }
+ return 0;
+}
+
+static void atl1c_tx_map(struct atl1c_adapter *adapter,
+ struct sk_buff *skb, struct atl1c_tpd_desc *tpd,
+ enum atl1c_trans_queue type)
+{
+ struct atl1c_tpd_desc *use_tpd = NULL;
+ struct atl1c_buffer *buffer_info = NULL;
+ u16 buf_len = skb_headlen(skb);
+ u16 map_len = 0;
+ u16 mapped_len = 0;
+ u16 hdr_len = 0;
+ u16 nr_frags;
+ u16 f;
+ int tso;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK;
+ if (tso) {
+ /* TSO */
+ map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ use_tpd = tpd;
+
+ buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
+ buffer_info->length = map_len;
+ buffer_info->dma = pci_map_single(adapter->pdev,
+ skb->data, hdr_len, PCI_DMA_TODEVICE);
+ ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
+ ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
+ ATL1C_PCIMAP_TODEVICE);
+ mapped_len += map_len;
+ use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
+ use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
+ }
+
+ if (mapped_len < buf_len) {
+ /* mapped_len == 0, means we should use the first tpd,
+ which is given by caller */
+ if (mapped_len == 0)
+ use_tpd = tpd;
+ else {
+ use_tpd = atl1c_get_tpd(adapter, type);
+ memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
+ }
+ buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
+ buffer_info->length = buf_len - mapped_len;
+ buffer_info->dma =
+ pci_map_single(adapter->pdev, skb->data + mapped_len,
+ buffer_info->length, PCI_DMA_TODEVICE);
+ ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
+ ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
+ ATL1C_PCIMAP_TODEVICE);
+ use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
+ use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
+ }
+
+ for (f = 0; f < nr_frags; f++) {
+ struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[f];
+
+ use_tpd = atl1c_get_tpd(adapter, type);
+ memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
+
+ buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
+ buffer_info->length = frag->size;
+ buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
+ frag, 0,
+ buffer_info->length,
+ DMA_TO_DEVICE);
+ ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
+ ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE,
+ ATL1C_PCIMAP_TODEVICE);
+ use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
+ use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
+ }
+
+ /* The last tpd */
+ use_tpd->word1 |= 1 << TPD_EOP_SHIFT;
+ /* The last buffer info contain the skb address,
+ so it will be free after unmap */
+ buffer_info->skb = skb;
+}
+
+static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb,
+ struct atl1c_tpd_desc *tpd, enum atl1c_trans_queue type)
+{
+ struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
+ u32 prod_data;
+
+ AT_READ_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, &prod_data);
+ switch (type) {
+ case atl1c_trans_high:
+ prod_data &= 0xFFFF0000;
+ prod_data |= tpd_ring->next_to_use & 0xFFFF;
+ break;
+ case atl1c_trans_normal:
+ prod_data &= 0x0000FFFF;
+ prod_data |= (tpd_ring->next_to_use & 0xFFFF) << 16;
+ break;
+ default:
+ break;
+ }
+ wmb();
+ AT_WRITE_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, prod_data);
+}
+
+static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
+ u16 tpd_req = 1;
+ struct atl1c_tpd_desc *tpd;
+ enum atl1c_trans_queue type = atl1c_trans_normal;
+
+ if (test_bit(__AT_DOWN, &adapter->flags)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ tpd_req = atl1c_cal_tpd_req(skb);
+ if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
+ if (netif_msg_pktdata(adapter))
+ dev_info(&adapter->pdev->dev, "tx locked\n");
+ return NETDEV_TX_LOCKED;
+ }
+ if (skb->mark == 0x01)
+ type = atl1c_trans_high;
+ else
+ type = atl1c_trans_normal;
+
+ if (atl1c_tpd_avail(adapter, type) < tpd_req) {
+ /* no enough descriptor, just stop queue */
+ netif_stop_queue(netdev);
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ tpd = atl1c_get_tpd(adapter, type);
+
+ /* do TSO and check sum */
+ if (atl1c_tso_csum(adapter, skb, &tpd, type) != 0) {
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (unlikely(vlan_tx_tag_present(skb))) {
+ u16 vlan = vlan_tx_tag_get(skb);
+ __le16 tag;
+
+ vlan = cpu_to_le16(vlan);
+ AT_VLAN_TO_TAG(vlan, tag);
+ tpd->word1 |= 1 << TPD_INS_VTAG_SHIFT;
+ tpd->vlan_tag = tag;
+ }
+
+ if (skb_network_offset(skb) != ETH_HLEN)
+ tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */
+
+ atl1c_tx_map(adapter, skb, tpd, type);
+ atl1c_tx_queue(adapter, skb, tpd, type);
+
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ return NETDEV_TX_OK;
+}
+
+static void atl1c_free_irq(struct atl1c_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ free_irq(adapter->pdev->irq, netdev);
+
+ if (adapter->have_msi)
+ pci_disable_msi(adapter->pdev);
+}
+
+static int atl1c_request_irq(struct atl1c_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
+ int flags = 0;
+ int err = 0;
+
+ adapter->have_msi = true;
+ err = pci_enable_msi(adapter->pdev);
+ if (err) {
+ if (netif_msg_ifup(adapter))
+ dev_err(&pdev->dev,
+ "Unable to allocate MSI interrupt Error: %d\n",
+ err);
+ adapter->have_msi = false;
+ } else
+ netdev->irq = pdev->irq;
+
+ if (!adapter->have_msi)
+ flags |= IRQF_SHARED;
+ err = request_irq(adapter->pdev->irq, atl1c_intr, flags,
+ netdev->name, netdev);
+ if (err) {
+ if (netif_msg_ifup(adapter))
+ dev_err(&pdev->dev,
+ "Unable to allocate interrupt Error: %d\n",
+ err);
+ if (adapter->have_msi)
+ pci_disable_msi(adapter->pdev);
+ return err;
+ }
+ if (netif_msg_ifup(adapter))
+ dev_dbg(&pdev->dev, "atl1c_request_irq OK\n");
+ return err;
+}
+
+static int atl1c_up(struct atl1c_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int num;
+ int err;
+ int i;
+
+ netif_carrier_off(netdev);
+ atl1c_init_ring_ptrs(adapter);
+ atl1c_set_multi(netdev);
+ atl1c_restore_vlan(adapter);
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ num = atl1c_alloc_rx_buffer(adapter, i);
+ if (unlikely(num == 0)) {
+ err = -ENOMEM;
+ goto err_alloc_rx;
+ }
+ }
+
+ if (atl1c_configure(adapter)) {
+ err = -EIO;
+ goto err_up;
+ }
+
+ err = atl1c_request_irq(adapter);
+ if (unlikely(err))
+ goto err_up;
+
+ clear_bit(__AT_DOWN, &adapter->flags);
+ napi_enable(&adapter->napi);
+ atl1c_irq_enable(adapter);
+ atl1c_check_link_status(adapter);
+ netif_start_queue(netdev);
+ return err;
+
+err_up:
+err_alloc_rx:
+ atl1c_clean_rx_ring(adapter);
+ return err;
+}
+
+static void atl1c_down(struct atl1c_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ atl1c_del_timer(adapter);
+ adapter->work_event = 0; /* clear all event */
+ /* signal that we're down so the interrupt handler does not
+ * reschedule our watchdog timer */
+ set_bit(__AT_DOWN, &adapter->flags);
+ netif_carrier_off(netdev);
+ napi_disable(&adapter->napi);
+ atl1c_irq_disable(adapter);
+ atl1c_free_irq(adapter);
+ /* reset MAC to disable all RX/TX */
+ atl1c_reset_mac(&adapter->hw);
+ msleep(1);
+
+ adapter->link_speed = SPEED_0;
+ adapter->link_duplex = -1;
+ atl1c_clean_tx_ring(adapter, atl1c_trans_normal);
+ atl1c_clean_tx_ring(adapter, atl1c_trans_high);
+ atl1c_clean_rx_ring(adapter);
+}
+
+/*
+ * atl1c_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ */
+static int atl1c_open(struct net_device *netdev)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__AT_TESTING, &adapter->flags))
+ return -EBUSY;
+
+ /* allocate rx/tx dma buffer & descriptors */
+ err = atl1c_setup_ring_resources(adapter);
+ if (unlikely(err))
+ return err;
+
+ err = atl1c_up(adapter);
+ if (unlikely(err))
+ goto err_up;
+
+ if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) {
+ u32 phy_data;
+
+ AT_READ_REG(&adapter->hw, REG_MDIO_CTRL, &phy_data);
+ phy_data |= MDIO_AP_EN;
+ AT_WRITE_REG(&adapter->hw, REG_MDIO_CTRL, phy_data);
+ }
+ return 0;
+
+err_up:
+ atl1c_free_irq(adapter);
+ atl1c_free_ring_resources(adapter);
+ atl1c_reset_mac(&adapter->hw);
+ return err;
+}
+
+/*
+ * atl1c_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ */
+static int atl1c_close(struct net_device *netdev)
+{
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+
+ WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
+ atl1c_down(adapter);
+ atl1c_free_ring_resources(adapter);
+ return 0;
+}
+
+static int atl1c_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+ struct atl1c_hw *hw = &adapter->hw;
+ u32 mac_ctrl_data = 0;
+ u32 master_ctrl_data = 0;
+ u32 wol_ctrl_data = 0;
+ u16 mii_intr_status_data = 0;
+ u32 wufc = adapter->wol;
+
+ atl1c_disable_l0s_l1(hw);
+ if (netif_running(netdev)) {
+ WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
+ atl1c_down(adapter);
+ }
+ netif_device_detach(netdev);
+
+ if (wufc)
+ if (atl1c_phy_power_saving(hw) != 0)
+ dev_dbg(&pdev->dev, "phy power saving failed");
+
+ AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
+ AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data);
+
+ master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS;
+ mac_ctrl_data &= ~(MAC_CTRL_PRMLEN_MASK << MAC_CTRL_PRMLEN_SHIFT);
+ mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
+ MAC_CTRL_PRMLEN_MASK) <<
+ MAC_CTRL_PRMLEN_SHIFT);
+ mac_ctrl_data &= ~(MAC_CTRL_SPEED_MASK << MAC_CTRL_SPEED_SHIFT);
+ mac_ctrl_data &= ~MAC_CTRL_DUPLX;
+
+ if (wufc) {
+ mac_ctrl_data |= MAC_CTRL_RX_EN;
+ if (adapter->link_speed == SPEED_1000 ||
+ adapter->link_speed == SPEED_0) {
+ mac_ctrl_data |= atl1c_mac_speed_1000 <<
+ MAC_CTRL_SPEED_SHIFT;
+ mac_ctrl_data |= MAC_CTRL_DUPLX;
+ } else
+ mac_ctrl_data |= atl1c_mac_speed_10_100 <<
+ MAC_CTRL_SPEED_SHIFT;
+
+ if (adapter->link_duplex == DUPLEX_FULL)
+ mac_ctrl_data |= MAC_CTRL_DUPLX;
+
+ /* turn on magic packet wol */
+ if (wufc & AT_WUFC_MAG)
+ wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
+
+ if (wufc & AT_WUFC_LNKC) {
+ wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
+ /* only link up can wake up */
+ if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) {
+ dev_dbg(&pdev->dev, "%s: read write phy "
+ "register failed.\n",
+ atl1c_driver_name);
+ }
+ }
+ /* clear phy interrupt */
+ atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data);
+ /* Config MAC Ctrl register */
+ __atl1c_vlan_mode(netdev->features, &mac_ctrl_data);
+
+ /* magic packet maybe Broadcast&multicast&Unicast frame */
+ if (wufc & AT_WUFC_MAG)
+ mac_ctrl_data |= MAC_CTRL_BC_EN;
+
+ dev_dbg(&pdev->dev,
+ "%s: suspend MAC=0x%x\n",
+ atl1c_driver_name, mac_ctrl_data);
+ AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
+ AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
+ AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
+
+ AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
+ GPHY_CTRL_EXT_RESET);
+ } else {
+ AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING);
+ master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS;
+ mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT;
+ mac_ctrl_data |= MAC_CTRL_DUPLX;
+ AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
+ AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
+ AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
+ hw->phy_configured = false; /* re-init PHY when resume */
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int atl1c_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+
+ AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
+ atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE |
+ ATL1C_PCIE_PHY_RESET);
+
+ atl1c_phy_reset(&adapter->hw);
+ atl1c_reset_mac(&adapter->hw);
+ atl1c_phy_init(&adapter->hw);
+
+#if 0
+ AT_READ_REG(&adapter->hw, REG_PM_CTRLSTAT, &pm_data);
+ pm_data &= ~PM_CTRLSTAT_PME_EN;
+ AT_WRITE_REG(&adapter->hw, REG_PM_CTRLSTAT, pm_data);
+#endif
+
+ netif_device_attach(netdev);
+ if (netif_running(netdev))
+ atl1c_up(adapter);
+
+ return 0;
+}
+#endif
+
+static void atl1c_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+
+ atl1c_suspend(&pdev->dev);
+ pci_wake_from_d3(pdev, adapter->wol);
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static const struct net_device_ops atl1c_netdev_ops = {
+ .ndo_open = atl1c_open,
+ .ndo_stop = atl1c_close,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_start_xmit = atl1c_xmit_frame,
+ .ndo_set_mac_address = atl1c_set_mac_addr,
+ .ndo_set_rx_mode = atl1c_set_multi,
+ .ndo_change_mtu = atl1c_change_mtu,
+ .ndo_fix_features = atl1c_fix_features,
+ .ndo_set_features = atl1c_set_features,
+ .ndo_do_ioctl = atl1c_ioctl,
+ .ndo_tx_timeout = atl1c_tx_timeout,
+ .ndo_get_stats = atl1c_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = atl1c_netpoll,
+#endif
+};
+
+static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
+{
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ pci_set_drvdata(pdev, netdev);
+
+ netdev->irq = pdev->irq;
+ netdev->netdev_ops = &atl1c_netdev_ops;
+ netdev->watchdog_timeo = AT_TX_WATCHDOG;
+ atl1c_set_ethtool_ops(netdev);
+
+ /* TODO: add when ready */
+ netdev->hw_features = NETIF_F_SG |
+ NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
+ netdev->features = netdev->hw_features |
+ NETIF_F_HW_VLAN_TX;
+ return 0;
+}
+
+/*
+ * atl1c_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in atl1c_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * atl1c_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ */
+static int __devinit atl1c_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct atl1c_adapter *adapter;
+ static int cards_found;
+
+ int err = 0;
+
+ /* enable device (incl. PCI PM wakeup and hotplug setup) */
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "cannot enable PCI device\n");
+ return err;
+ }
+
+ /*
+ * The atl1c chip can DMA to 64-bit addresses, but it uses a single
+ * shared register for the high 32 bits, so only a single, aligned,
+ * 4 GB physical address range can be used at a time.
+ *
+ * Supporting 64-bit DMA on this hardware is more trouble than it's
+ * worth. It is far easier to limit to 32-bit DMA than update
+ * various kernel subsystems to support the mechanics required by a
+ * fixed-high-32-bit system.
+ */
+ if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
+ (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
+ dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
+ goto err_dma;
+ }
+
+ err = pci_request_regions(pdev, atl1c_driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "cannot obtain PCI resources\n");
+ goto err_pci_reg;
+ }
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev(sizeof(struct atl1c_adapter));
+ if (netdev == NULL) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "etherdev alloc failed\n");
+ goto err_alloc_etherdev;
+ }
+
+ err = atl1c_init_netdev(netdev, pdev);
+ if (err) {
+ dev_err(&pdev->dev, "init netdevice failed\n");
+ goto err_init_netdev;
+ }
+ adapter = netdev_priv(netdev);
+ adapter->bd_number = cards_found;
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->hw.adapter = adapter;
+ adapter->msg_enable = netif_msg_init(-1, atl1c_default_msg);
+ adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
+ if (!adapter->hw.hw_addr) {
+ err = -EIO;
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ goto err_ioremap;
+ }
+ netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
+
+ /* init mii data */
+ adapter->mii.dev = netdev;
+ adapter->mii.mdio_read = atl1c_mdio_read;
+ adapter->mii.mdio_write = atl1c_mdio_write;
+ adapter->mii.phy_id_mask = 0x1f;
+ adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
+ netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64);
+ setup_timer(&adapter->phy_config_timer, atl1c_phy_config,
+ (unsigned long)adapter);
+ /* setup the private structure */
+ err = atl1c_sw_init(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "net device private data init failed\n");
+ goto err_sw_init;
+ }
+ atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE |
+ ATL1C_PCIE_PHY_RESET);
+
+ /* Init GPHY as early as possible due to power saving issue */
+ atl1c_phy_reset(&adapter->hw);
+
+ err = atl1c_reset_mac(&adapter->hw);
+ if (err) {
+ err = -EIO;
+ goto err_reset;
+ }
+
+ /* reset the controller to
+ * put the device in a known good starting state */
+ err = atl1c_phy_init(&adapter->hw);
+ if (err) {
+ err = -EIO;
+ goto err_reset;
+ }
+ if (atl1c_read_mac_addr(&adapter->hw) != 0) {
+ err = -EIO;
+ dev_err(&pdev->dev, "get mac address failed\n");
+ goto err_eeprom;
+ }
+ memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
+ if (netif_msg_probe(adapter))
+ dev_dbg(&pdev->dev, "mac address : %pM\n",
+ adapter->hw.mac_addr);
+
+ atl1c_hw_set_mac_addr(&adapter->hw);
+ INIT_WORK(&adapter->common_task, atl1c_common_task);
+ adapter->work_event = 0;
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "register netdevice failed\n");
+ goto err_register;
+ }
+
+ if (netif_msg_probe(adapter))
+ dev_info(&pdev->dev, "version %s\n", ATL1C_DRV_VERSION);
+ cards_found++;
+ return 0;
+
+err_reset:
+err_register:
+err_sw_init:
+err_eeprom:
+ iounmap(adapter->hw.hw_addr);
+err_init_netdev:
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/*
+ * atl1c_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * atl1c_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ */
+static void __devexit atl1c_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+
+ unregister_netdev(netdev);
+ atl1c_phy_disable(&adapter->hw);
+
+ iounmap(adapter->hw.hw_addr);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ free_netdev(netdev);
+}
+
+/*
+ * atl1c_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ atl1c_down(adapter);
+
+ pci_disable_device(pdev);
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/*
+ * atl1c_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the e1000_resume routine.
+ */
+static pci_ers_result_t atl1c_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+
+ if (pci_enable_device(pdev)) {
+ if (netif_msg_hw(adapter))
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ atl1c_reset_mac(&adapter->hw);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/*
+ * atl1c_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the atl1c_resume routine.
+ */
+static void atl1c_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev)) {
+ if (atl1c_up(adapter)) {
+ if (netif_msg_hw(adapter))
+ dev_err(&pdev->dev,
+ "Cannot bring device back up after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+}
+
+static struct pci_error_handlers atl1c_err_handler = {
+ .error_detected = atl1c_io_error_detected,
+ .slot_reset = atl1c_io_slot_reset,
+ .resume = atl1c_io_resume,
+};
+
+static SIMPLE_DEV_PM_OPS(atl1c_pm_ops, atl1c_suspend, atl1c_resume);
+
+static struct pci_driver atl1c_driver = {
+ .name = atl1c_driver_name,
+ .id_table = atl1c_pci_tbl,
+ .probe = atl1c_probe,
+ .remove = __devexit_p(atl1c_remove),
+ .shutdown = atl1c_shutdown,
+ .err_handler = &atl1c_err_handler,
+ .driver.pm = &atl1c_pm_ops,
+};
+
+/*
+ * atl1c_init_module - Driver Registration Routine
+ *
+ * atl1c_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ */
+static int __init atl1c_init_module(void)
+{
+ return pci_register_driver(&atl1c_driver);
+}
+
+/*
+ * atl1c_exit_module - Driver Exit Cleanup Routine
+ *
+ * atl1c_exit_module is called just before the driver is removed
+ * from memory.
+ */
+static void __exit atl1c_exit_module(void)
+{
+ pci_unregister_driver(&atl1c_driver);
+}
+
+module_init(atl1c_init_module);
+module_exit(atl1c_exit_module);
diff --git a/drivers/net/ethernet/atheros/atl1e/Makefile b/drivers/net/ethernet/atheros/atl1e/Makefile
new file mode 100644
index 000000000000..bc11be824e76
--- /dev/null
+++ b/drivers/net/ethernet/atheros/atl1e/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_ATL1E) += atl1e.o
+atl1e-objs += atl1e_main.o atl1e_hw.o atl1e_ethtool.o atl1e_param.o
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
new file mode 100644
index 000000000000..829b5ad71d0d
--- /dev/null
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
@@ -0,0 +1,509 @@
+/*
+ * Copyright(c) 2007 Atheros Corporation. All rights reserved.
+ * Copyright(c) 2007 xiong huang <xiong.huang@atheros.com>
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _ATL1E_H_
+#define _ATL1E_H_
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <linux/mii.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/tcp.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/workqueue.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+
+#include "atl1e_hw.h"
+
+#define PCI_REG_COMMAND 0x04 /* PCI Command Register */
+#define CMD_IO_SPACE 0x0001
+#define CMD_MEMORY_SPACE 0x0002
+#define CMD_BUS_MASTER 0x0004
+
+#define BAR_0 0
+#define BAR_1 1
+#define BAR_5 5
+
+/* Wake Up Filter Control */
+#define AT_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define AT_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define AT_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define AT_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */
+#define AT_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+
+#define SPEED_0 0xffff
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+/* Error Codes */
+#define AT_ERR_EEPROM 1
+#define AT_ERR_PHY 2
+#define AT_ERR_CONFIG 3
+#define AT_ERR_PARAM 4
+#define AT_ERR_MAC_TYPE 5
+#define AT_ERR_PHY_TYPE 6
+#define AT_ERR_PHY_SPEED 7
+#define AT_ERR_PHY_RES 8
+#define AT_ERR_TIMEOUT 9
+
+#define MAX_JUMBO_FRAME_SIZE 0x2000
+
+#define AT_VLAN_TAG_TO_TPD_TAG(_vlan, _tpd) \
+ _tpd = (((_vlan) << (4)) | (((_vlan) >> 13) & 7) |\
+ (((_vlan) >> 9) & 8))
+
+#define AT_TPD_TAG_TO_VLAN_TAG(_tpd, _vlan) \
+ _vlan = (((_tpd) >> 8) | (((_tpd) & 0x77) << 9) |\
+ (((_tdp) & 0x88) << 5))
+
+#define AT_MAX_RECEIVE_QUEUE 4
+#define AT_PAGE_NUM_PER_QUEUE 2
+
+#define AT_DMA_HI_ADDR_MASK 0xffffffff00000000ULL
+#define AT_DMA_LO_ADDR_MASK 0x00000000ffffffffULL
+
+#define AT_TX_WATCHDOG (5 * HZ)
+#define AT_MAX_INT_WORK 10
+#define AT_TWSI_EEPROM_TIMEOUT 100
+#define AT_HW_MAX_IDLE_DELAY 10
+#define AT_SUSPEND_LINK_TIMEOUT 28
+
+#define AT_REGS_LEN 75
+#define AT_EEPROM_LEN 512
+#define AT_ADV_MASK (ADVERTISE_10_HALF |\
+ ADVERTISE_10_FULL |\
+ ADVERTISE_100_HALF |\
+ ADVERTISE_100_FULL |\
+ ADVERTISE_1000_FULL)
+
+/* tpd word 2 */
+#define TPD_BUFLEN_MASK 0x3FFF
+#define TPD_BUFLEN_SHIFT 0
+#define TPD_DMAINT_MASK 0x0001
+#define TPD_DMAINT_SHIFT 14
+#define TPD_PKTNT_MASK 0x0001
+#define TPD_PKTINT_SHIFT 15
+#define TPD_VLANTAG_MASK 0xFFFF
+#define TPD_VLAN_SHIFT 16
+
+/* tpd word 3 bits 0:4 */
+#define TPD_EOP_MASK 0x0001
+#define TPD_EOP_SHIFT 0
+#define TPD_IP_VERSION_MASK 0x0001
+#define TPD_IP_VERSION_SHIFT 1 /* 0 : IPV4, 1 : IPV6 */
+#define TPD_INS_VL_TAG_MASK 0x0001
+#define TPD_INS_VL_TAG_SHIFT 2
+#define TPD_CC_SEGMENT_EN_MASK 0x0001
+#define TPD_CC_SEGMENT_EN_SHIFT 3
+#define TPD_SEGMENT_EN_MASK 0x0001
+#define TPD_SEGMENT_EN_SHIFT 4
+
+/* tdp word 3 bits 5:7 if ip version is 0 */
+#define TPD_IP_CSUM_MASK 0x0001
+#define TPD_IP_CSUM_SHIFT 5
+#define TPD_TCP_CSUM_MASK 0x0001
+#define TPD_TCP_CSUM_SHIFT 6
+#define TPD_UDP_CSUM_MASK 0x0001
+#define TPD_UDP_CSUM_SHIFT 7
+
+/* tdp word 3 bits 5:7 if ip version is 1 */
+#define TPD_V6_IPHLLO_MASK 0x0007
+#define TPD_V6_IPHLLO_SHIFT 7
+
+/* tpd word 3 bits 8:9 bit */
+#define TPD_VL_TAGGED_MASK 0x0001
+#define TPD_VL_TAGGED_SHIFT 8
+#define TPD_ETHTYPE_MASK 0x0001
+#define TPD_ETHTYPE_SHIFT 9
+
+/* tdp word 3 bits 10:13 if ip version is 0 */
+#define TDP_V4_IPHL_MASK 0x000F
+#define TPD_V4_IPHL_SHIFT 10
+
+/* tdp word 3 bits 10:13 if ip version is 1 */
+#define TPD_V6_IPHLHI_MASK 0x000F
+#define TPD_V6_IPHLHI_SHIFT 10
+
+/* tpd word 3 bit 14:31 if segment enabled */
+#define TPD_TCPHDRLEN_MASK 0x000F
+#define TPD_TCPHDRLEN_SHIFT 14
+#define TPD_HDRFLAG_MASK 0x0001
+#define TPD_HDRFLAG_SHIFT 18
+#define TPD_MSS_MASK 0x1FFF
+#define TPD_MSS_SHIFT 19
+
+/* tdp word 3 bit 16:31 if custom csum enabled */
+#define TPD_PLOADOFFSET_MASK 0x00FF
+#define TPD_PLOADOFFSET_SHIFT 16
+#define TPD_CCSUMOFFSET_MASK 0x00FF
+#define TPD_CCSUMOFFSET_SHIFT 24
+
+struct atl1e_tpd_desc {
+ __le64 buffer_addr;
+ __le32 word2;
+ __le32 word3;
+};
+
+/* how about 0x2000 */
+#define MAX_TX_BUF_LEN 0x2000
+#define MAX_TX_BUF_SHIFT 13
+/*#define MAX_TX_BUF_LEN 0x3000 */
+
+/* rrs word 1 bit 0:31 */
+#define RRS_RX_CSUM_MASK 0xFFFF
+#define RRS_RX_CSUM_SHIFT 0
+#define RRS_PKT_SIZE_MASK 0x3FFF
+#define RRS_PKT_SIZE_SHIFT 16
+#define RRS_CPU_NUM_MASK 0x0003
+#define RRS_CPU_NUM_SHIFT 30
+
+#define RRS_IS_RSS_IPV4 0x0001
+#define RRS_IS_RSS_IPV4_TCP 0x0002
+#define RRS_IS_RSS_IPV6 0x0004
+#define RRS_IS_RSS_IPV6_TCP 0x0008
+#define RRS_IS_IPV6 0x0010
+#define RRS_IS_IP_FRAG 0x0020
+#define RRS_IS_IP_DF 0x0040
+#define RRS_IS_802_3 0x0080
+#define RRS_IS_VLAN_TAG 0x0100
+#define RRS_IS_ERR_FRAME 0x0200
+#define RRS_IS_IPV4 0x0400
+#define RRS_IS_UDP 0x0800
+#define RRS_IS_TCP 0x1000
+#define RRS_IS_BCAST 0x2000
+#define RRS_IS_MCAST 0x4000
+#define RRS_IS_PAUSE 0x8000
+
+#define RRS_ERR_BAD_CRC 0x0001
+#define RRS_ERR_CODE 0x0002
+#define RRS_ERR_DRIBBLE 0x0004
+#define RRS_ERR_RUNT 0x0008
+#define RRS_ERR_RX_OVERFLOW 0x0010
+#define RRS_ERR_TRUNC 0x0020
+#define RRS_ERR_IP_CSUM 0x0040
+#define RRS_ERR_L4_CSUM 0x0080
+#define RRS_ERR_LENGTH 0x0100
+#define RRS_ERR_DES_ADDR 0x0200
+
+struct atl1e_recv_ret_status {
+ u16 seq_num;
+ u16 hash_lo;
+ __le32 word1;
+ u16 pkt_flag;
+ u16 err_flag;
+ u16 hash_hi;
+ u16 vtag;
+};
+
+enum atl1e_dma_req_block {
+ atl1e_dma_req_128 = 0,
+ atl1e_dma_req_256 = 1,
+ atl1e_dma_req_512 = 2,
+ atl1e_dma_req_1024 = 3,
+ atl1e_dma_req_2048 = 4,
+ atl1e_dma_req_4096 = 5
+};
+
+enum atl1e_rrs_type {
+ atl1e_rrs_disable = 0,
+ atl1e_rrs_ipv4 = 1,
+ atl1e_rrs_ipv4_tcp = 2,
+ atl1e_rrs_ipv6 = 4,
+ atl1e_rrs_ipv6_tcp = 8
+};
+
+enum atl1e_nic_type {
+ athr_l1e = 0,
+ athr_l2e_revA = 1,
+ athr_l2e_revB = 2
+};
+
+struct atl1e_hw_stats {
+ /* rx */
+ unsigned long rx_ok; /* The number of good packet received. */
+ unsigned long rx_bcast; /* The number of good broadcast packet received. */
+ unsigned long rx_mcast; /* The number of good multicast packet received. */
+ unsigned long rx_pause; /* The number of Pause packet received. */
+ unsigned long rx_ctrl; /* The number of Control packet received other than Pause frame. */
+ unsigned long rx_fcs_err; /* The number of packets with bad FCS. */
+ unsigned long rx_len_err; /* The number of packets with mismatch of length field and actual size. */
+ unsigned long rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */
+ unsigned long rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */
+ unsigned long rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */
+ unsigned long rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */
+ unsigned long rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */
+ unsigned long rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */
+ unsigned long rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */
+ unsigned long rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */
+ unsigned long rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */
+ unsigned long rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */
+ unsigned long rx_sz_ov; /* The number of good and bad packets received that are more than MTU size truncated by Selene. */
+ unsigned long rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */
+ unsigned long rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */
+ unsigned long rx_align_err; /* Alignment Error */
+ unsigned long rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */
+ unsigned long rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */
+ unsigned long rx_err_addr; /* The number of packets dropped due to address filtering. */
+
+ /* tx */
+ unsigned long tx_ok; /* The number of good packet transmitted. */
+ unsigned long tx_bcast; /* The number of good broadcast packet transmitted. */
+ unsigned long tx_mcast; /* The number of good multicast packet transmitted. */
+ unsigned long tx_pause; /* The number of Pause packet transmitted. */
+ unsigned long tx_exc_defer; /* The number of packets transmitted with excessive deferral. */
+ unsigned long tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */
+ unsigned long tx_defer; /* The number of packets transmitted that is deferred. */
+ unsigned long tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */
+ unsigned long tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */
+ unsigned long tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */
+ unsigned long tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */
+ unsigned long tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */
+ unsigned long tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */
+ unsigned long tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */
+ unsigned long tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */
+ unsigned long tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */
+ unsigned long tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */
+ unsigned long tx_late_col; /* The number of packets transmitted with late collisions. */
+ unsigned long tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */
+ unsigned long tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */
+ unsigned long tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */
+ unsigned long tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */
+ unsigned long tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */
+ unsigned long tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */
+ unsigned long tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */
+};
+
+struct atl1e_hw {
+ u8 __iomem *hw_addr; /* inner register address */
+ resource_size_t mem_rang;
+ struct atl1e_adapter *adapter;
+ enum atl1e_nic_type nic_type;
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u16 pci_cmd_word;
+ u8 mac_addr[ETH_ALEN];
+ u8 perm_mac_addr[ETH_ALEN];
+ u8 preamble_len;
+ u16 max_frame_size;
+ u16 rx_jumbo_th;
+ u16 tx_jumbo_th;
+
+ u16 media_type;
+#define MEDIA_TYPE_AUTO_SENSOR 0
+#define MEDIA_TYPE_100M_FULL 1
+#define MEDIA_TYPE_100M_HALF 2
+#define MEDIA_TYPE_10M_FULL 3
+#define MEDIA_TYPE_10M_HALF 4
+
+ u16 autoneg_advertised;
+#define ADVERTISE_10_HALF 0x0001
+#define ADVERTISE_10_FULL 0x0002
+#define ADVERTISE_100_HALF 0x0004
+#define ADVERTISE_100_FULL 0x0008
+#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL 0x0020
+ u16 mii_autoneg_adv_reg;
+ u16 mii_1000t_ctrl_reg;
+
+ u16 imt; /* Interrupt Moderator timer ( 2us resolution) */
+ u16 ict; /* Interrupt Clear timer (2us resolution) */
+ u32 smb_timer;
+ u16 rrd_thresh; /* Threshold of number of RRD produced to trigger
+ interrupt request */
+ u16 tpd_thresh;
+ u16 rx_count_down; /* 2us resolution */
+ u16 tx_count_down;
+
+ u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */
+ enum atl1e_rrs_type rrs_type;
+ u32 base_cpu;
+ u32 indirect_tab;
+
+ enum atl1e_dma_req_block dmar_block;
+ enum atl1e_dma_req_block dmaw_block;
+ u8 dmaw_dly_cnt;
+ u8 dmar_dly_cnt;
+
+ bool phy_configured;
+ bool re_autoneg;
+ bool emi_ca;
+};
+
+/*
+ * wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct atl1e_tx_buffer {
+ struct sk_buff *skb;
+ u16 flags;
+#define ATL1E_TX_PCIMAP_SINGLE 0x0001
+#define ATL1E_TX_PCIMAP_PAGE 0x0002
+#define ATL1E_TX_PCIMAP_TYPE_MASK 0x0003
+ u16 length;
+ dma_addr_t dma;
+};
+
+#define ATL1E_SET_PCIMAP_TYPE(tx_buff, type) do { \
+ ((tx_buff)->flags) &= ~ATL1E_TX_PCIMAP_TYPE_MASK; \
+ ((tx_buff)->flags) |= (type); \
+ } while (0)
+
+struct atl1e_rx_page {
+ dma_addr_t dma; /* receive rage DMA address */
+ u8 *addr; /* receive rage virtual address */
+ dma_addr_t write_offset_dma; /* the DMA address which contain the
+ receive data offset in the page */
+ u32 *write_offset_addr; /* the virtaul address which contain
+ the receive data offset in the page */
+ u32 read_offset; /* the offset where we have read */
+};
+
+struct atl1e_rx_page_desc {
+ struct atl1e_rx_page rx_page[AT_PAGE_NUM_PER_QUEUE];
+ u8 rx_using;
+ u16 rx_nxseq;
+};
+
+/* transmit packet descriptor (tpd) ring */
+struct atl1e_tx_ring {
+ struct atl1e_tpd_desc *desc; /* descriptor ring virtual address */
+ dma_addr_t dma; /* descriptor ring physical address */
+ u16 count; /* the count of transmit rings */
+ rwlock_t tx_lock;
+ u16 next_to_use;
+ atomic_t next_to_clean;
+ struct atl1e_tx_buffer *tx_buffer;
+ dma_addr_t cmb_dma;
+ u32 *cmb;
+};
+
+/* receive packet descriptor ring */
+struct atl1e_rx_ring {
+ void *desc;
+ dma_addr_t dma;
+ int size;
+ u32 page_size; /* bytes length of rxf page */
+ u32 real_page_size; /* real_page_size = page_size + jumbo + aliagn */
+ struct atl1e_rx_page_desc rx_page_desc[AT_MAX_RECEIVE_QUEUE];
+};
+
+/* board specific private data structure */
+struct atl1e_adapter {
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct napi_struct napi;
+ struct mii_if_info mii; /* MII interface info */
+ struct atl1e_hw hw;
+ struct atl1e_hw_stats hw_stats;
+
+ bool have_msi;
+ u32 wol;
+ u16 link_speed;
+ u16 link_duplex;
+
+ spinlock_t mdio_lock;
+ spinlock_t tx_lock;
+ atomic_t irq_sem;
+
+ struct work_struct reset_task;
+ struct work_struct link_chg_task;
+ struct timer_list watchdog_timer;
+ struct timer_list phy_config_timer;
+
+ /* All Descriptor memory */
+ dma_addr_t ring_dma;
+ void *ring_vir_addr;
+ u32 ring_size;
+
+ struct atl1e_tx_ring tx_ring;
+ struct atl1e_rx_ring rx_ring;
+ int num_rx_queues;
+ unsigned long flags;
+#define __AT_TESTING 0x0001
+#define __AT_RESETTING 0x0002
+#define __AT_DOWN 0x0003
+
+ u32 bd_number; /* board number;*/
+ u32 pci_state[16];
+ u32 *config_space;
+};
+
+#define AT_WRITE_REG(a, reg, value) ( \
+ writel((value), ((a)->hw_addr + reg)))
+
+#define AT_WRITE_FLUSH(a) (\
+ readl((a)->hw_addr))
+
+#define AT_READ_REG(a, reg) ( \
+ readl((a)->hw_addr + reg))
+
+#define AT_WRITE_REGB(a, reg, value) (\
+ writeb((value), ((a)->hw_addr + reg)))
+
+#define AT_READ_REGB(a, reg) (\
+ readb((a)->hw_addr + reg))
+
+#define AT_WRITE_REGW(a, reg, value) (\
+ writew((value), ((a)->hw_addr + reg)))
+
+#define AT_READ_REGW(a, reg) (\
+ readw((a)->hw_addr + reg))
+
+#define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+ writel((value), (((a)->hw_addr + reg) + ((offset) << 2))))
+
+#define AT_READ_REG_ARRAY(a, reg, offset) ( \
+ readl(((a)->hw_addr + reg) + ((offset) << 2)))
+
+extern char atl1e_driver_name[];
+extern char atl1e_driver_version[];
+
+extern void atl1e_check_options(struct atl1e_adapter *adapter);
+extern int atl1e_up(struct atl1e_adapter *adapter);
+extern void atl1e_down(struct atl1e_adapter *adapter);
+extern void atl1e_reinit_locked(struct atl1e_adapter *adapter);
+extern s32 atl1e_reset_hw(struct atl1e_hw *hw);
+extern void atl1e_set_ethtool_ops(struct net_device *netdev);
+#endif /* _ATL1_E_H_ */
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
new file mode 100644
index 000000000000..6269438d365f
--- /dev/null
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -0,0 +1,390 @@
+/*
+ * Copyright(c) 2007 Atheros Corporation. All rights reserved.
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/slab.h>
+
+#include "atl1e.h"
+
+static int atl1e_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct atl1e_hw *hw = &adapter->hw;
+
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
+ if (hw->nic_type == athr_l1e)
+ ecmd->supported |= SUPPORTED_1000baseT_Full;
+
+ ecmd->advertising = ADVERTISED_TP;
+
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ ecmd->advertising |= hw->autoneg_advertised;
+
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = 0;
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ if (adapter->link_speed != SPEED_0) {
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
+ if (adapter->link_duplex == FULL_DUPLEX)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ } else {
+ ethtool_cmd_speed_set(ecmd, -1);
+ ecmd->duplex = -1;
+ }
+
+ ecmd->autoneg = AUTONEG_ENABLE;
+ return 0;
+}
+
+static int atl1e_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct atl1e_hw *hw = &adapter->hw;
+
+ while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
+ msleep(1);
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ u16 adv4, adv9;
+
+ if ((ecmd->advertising&ADVERTISE_1000_FULL)) {
+ if (hw->nic_type == athr_l1e) {
+ hw->autoneg_advertised =
+ ecmd->advertising & AT_ADV_MASK;
+ } else {
+ clear_bit(__AT_RESETTING, &adapter->flags);
+ return -EINVAL;
+ }
+ } else if (ecmd->advertising&ADVERTISE_1000_HALF) {
+ clear_bit(__AT_RESETTING, &adapter->flags);
+ return -EINVAL;
+ } else {
+ hw->autoneg_advertised =
+ ecmd->advertising & AT_ADV_MASK;
+ }
+ ecmd->advertising = hw->autoneg_advertised |
+ ADVERTISED_TP | ADVERTISED_Autoneg;
+
+ adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL;
+ adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK;
+ if (hw->autoneg_advertised & ADVERTISE_10_HALF)
+ adv4 |= ADVERTISE_10HALF;
+ if (hw->autoneg_advertised & ADVERTISE_10_FULL)
+ adv4 |= ADVERTISE_10FULL;
+ if (hw->autoneg_advertised & ADVERTISE_100_HALF)
+ adv4 |= ADVERTISE_100HALF;
+ if (hw->autoneg_advertised & ADVERTISE_100_FULL)
+ adv4 |= ADVERTISE_100FULL;
+ if (hw->autoneg_advertised & ADVERTISE_1000_FULL)
+ adv9 |= ADVERTISE_1000FULL;
+
+ if (adv4 != hw->mii_autoneg_adv_reg ||
+ adv9 != hw->mii_1000t_ctrl_reg) {
+ hw->mii_autoneg_adv_reg = adv4;
+ hw->mii_1000t_ctrl_reg = adv9;
+ hw->re_autoneg = true;
+ }
+
+ } else {
+ clear_bit(__AT_RESETTING, &adapter->flags);
+ return -EINVAL;
+ }
+
+ /* reset the link */
+
+ if (netif_running(adapter->netdev)) {
+ atl1e_down(adapter);
+ atl1e_up(adapter);
+ } else
+ atl1e_reset_hw(&adapter->hw);
+
+ clear_bit(__AT_RESETTING, &adapter->flags);
+ return 0;
+}
+
+static u32 atl1e_get_msglevel(struct net_device *netdev)
+{
+#ifdef DBG
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+static int atl1e_get_regs_len(struct net_device *netdev)
+{
+ return AT_REGS_LEN * sizeof(u32);
+}
+
+static void atl1e_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct atl1e_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u16 phy_data;
+
+ memset(p, 0, AT_REGS_LEN * sizeof(u32));
+
+ regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+
+ regs_buff[0] = AT_READ_REG(hw, REG_VPD_CAP);
+ regs_buff[1] = AT_READ_REG(hw, REG_SPI_FLASH_CTRL);
+ regs_buff[2] = AT_READ_REG(hw, REG_SPI_FLASH_CONFIG);
+ regs_buff[3] = AT_READ_REG(hw, REG_TWSI_CTRL);
+ regs_buff[4] = AT_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL);
+ regs_buff[5] = AT_READ_REG(hw, REG_MASTER_CTRL);
+ regs_buff[6] = AT_READ_REG(hw, REG_MANUAL_TIMER_INIT);
+ regs_buff[7] = AT_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT);
+ regs_buff[8] = AT_READ_REG(hw, REG_GPHY_CTRL);
+ regs_buff[9] = AT_READ_REG(hw, REG_CMBDISDMA_TIMER);
+ regs_buff[10] = AT_READ_REG(hw, REG_IDLE_STATUS);
+ regs_buff[11] = AT_READ_REG(hw, REG_MDIO_CTRL);
+ regs_buff[12] = AT_READ_REG(hw, REG_SERDES_LOCK);
+ regs_buff[13] = AT_READ_REG(hw, REG_MAC_CTRL);
+ regs_buff[14] = AT_READ_REG(hw, REG_MAC_IPG_IFG);
+ regs_buff[15] = AT_READ_REG(hw, REG_MAC_STA_ADDR);
+ regs_buff[16] = AT_READ_REG(hw, REG_MAC_STA_ADDR+4);
+ regs_buff[17] = AT_READ_REG(hw, REG_RX_HASH_TABLE);
+ regs_buff[18] = AT_READ_REG(hw, REG_RX_HASH_TABLE+4);
+ regs_buff[19] = AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL);
+ regs_buff[20] = AT_READ_REG(hw, REG_MTU);
+ regs_buff[21] = AT_READ_REG(hw, REG_WOL_CTRL);
+ regs_buff[22] = AT_READ_REG(hw, REG_SRAM_TRD_ADDR);
+ regs_buff[23] = AT_READ_REG(hw, REG_SRAM_TRD_LEN);
+ regs_buff[24] = AT_READ_REG(hw, REG_SRAM_RXF_ADDR);
+ regs_buff[25] = AT_READ_REG(hw, REG_SRAM_RXF_LEN);
+ regs_buff[26] = AT_READ_REG(hw, REG_SRAM_TXF_ADDR);
+ regs_buff[27] = AT_READ_REG(hw, REG_SRAM_TXF_LEN);
+ regs_buff[28] = AT_READ_REG(hw, REG_SRAM_TCPH_ADDR);
+ regs_buff[29] = AT_READ_REG(hw, REG_SRAM_PKTH_ADDR);
+
+ atl1e_read_phy_reg(hw, MII_BMCR, &phy_data);
+ regs_buff[73] = (u32)phy_data;
+ atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
+ regs_buff[74] = (u32)phy_data;
+}
+
+static int atl1e_get_eeprom_len(struct net_device *netdev)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+
+ if (!atl1e_check_eeprom_exist(&adapter->hw))
+ return AT_EEPROM_LEN;
+ else
+ return 0;
+}
+
+static int atl1e_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct atl1e_hw *hw = &adapter->hw;
+ u32 *eeprom_buff;
+ int first_dword, last_dword;
+ int ret_val = 0;
+ int i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (atl1e_check_eeprom_exist(hw)) /* not exist */
+ return -EINVAL;
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ first_dword = eeprom->offset >> 2;
+ last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
+
+ eeprom_buff = kmalloc(sizeof(u32) *
+ (last_dword - first_dword + 1), GFP_KERNEL);
+ if (eeprom_buff == NULL)
+ return -ENOMEM;
+
+ for (i = first_dword; i < last_dword; i++) {
+ if (!atl1e_read_eeprom(hw, i * 4, &(eeprom_buff[i-first_dword]))) {
+ kfree(eeprom_buff);
+ return -EIO;
+ }
+ }
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3),
+ eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int atl1e_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct atl1e_hw *hw = &adapter->hw;
+ u32 *eeprom_buff;
+ u32 *ptr;
+ int first_dword, last_dword;
+ int ret_val = 0;
+ int i;
+
+ if (eeprom->len == 0)
+ return -EOPNOTSUPP;
+
+ if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+ return -EINVAL;
+
+ first_dword = eeprom->offset >> 2;
+ last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
+ eeprom_buff = kmalloc(AT_EEPROM_LEN, GFP_KERNEL);
+ if (eeprom_buff == NULL)
+ return -ENOMEM;
+
+ ptr = (u32 *)eeprom_buff;
+
+ if (eeprom->offset & 3) {
+ /* need read/modify/write of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ if (!atl1e_read_eeprom(hw, first_dword * 4, &(eeprom_buff[0]))) {
+ ret_val = -EIO;
+ goto out;
+ }
+ ptr++;
+ }
+ if (((eeprom->offset + eeprom->len) & 3)) {
+ /* need read/modify/write of last changed EEPROM word */
+ /* only the first byte of the word is being modified */
+
+ if (!atl1e_read_eeprom(hw, last_dword * 4,
+ &(eeprom_buff[last_dword - first_dword]))) {
+ ret_val = -EIO;
+ goto out;
+ }
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_dword - first_dword + 1; i++) {
+ if (!atl1e_write_eeprom(hw, ((first_dword + i) * 4),
+ eeprom_buff[i])) {
+ ret_val = -EIO;
+ goto out;
+ }
+ }
+out:
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
+static void atl1e_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+
+ strncpy(drvinfo->driver, atl1e_driver_name, 32);
+ strncpy(drvinfo->version, atl1e_driver_version, 32);
+ strncpy(drvinfo->fw_version, "L1e", 32);
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ drvinfo->n_stats = 0;
+ drvinfo->testinfo_len = 0;
+ drvinfo->regdump_len = atl1e_get_regs_len(netdev);
+ drvinfo->eedump_len = atl1e_get_eeprom_len(netdev);
+}
+
+static void atl1e_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = WAKE_MAGIC | WAKE_PHY;
+ wol->wolopts = 0;
+
+ if (adapter->wol & AT_WUFC_EX)
+ wol->wolopts |= WAKE_UCAST;
+ if (adapter->wol & AT_WUFC_MC)
+ wol->wolopts |= WAKE_MCAST;
+ if (adapter->wol & AT_WUFC_BC)
+ wol->wolopts |= WAKE_BCAST;
+ if (adapter->wol & AT_WUFC_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+ if (adapter->wol & AT_WUFC_LNKC)
+ wol->wolopts |= WAKE_PHY;
+}
+
+static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+
+ if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
+ WAKE_UCAST | WAKE_MCAST | WAKE_BCAST))
+ return -EOPNOTSUPP;
+ /* these settings will always override what we currently have */
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wol |= AT_WUFC_MAG;
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wol |= AT_WUFC_LNKC;
+
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
+static int atl1e_nway_reset(struct net_device *netdev)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ if (netif_running(netdev))
+ atl1e_reinit_locked(adapter);
+ return 0;
+}
+
+static const struct ethtool_ops atl1e_ethtool_ops = {
+ .get_settings = atl1e_get_settings,
+ .set_settings = atl1e_set_settings,
+ .get_drvinfo = atl1e_get_drvinfo,
+ .get_regs_len = atl1e_get_regs_len,
+ .get_regs = atl1e_get_regs,
+ .get_wol = atl1e_get_wol,
+ .set_wol = atl1e_set_wol,
+ .get_msglevel = atl1e_get_msglevel,
+ .nway_reset = atl1e_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = atl1e_get_eeprom_len,
+ .get_eeprom = atl1e_get_eeprom,
+ .set_eeprom = atl1e_set_eeprom,
+};
+
+void atl1e_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &atl1e_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_hw.c b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.c
new file mode 100644
index 000000000000..923063d2e5bb
--- /dev/null
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.c
@@ -0,0 +1,651 @@
+/*
+ * Copyright(c) 2007 Atheros Corporation. All rights reserved.
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+
+#include "atl1e.h"
+
+/*
+ * check_eeprom_exist
+ * return 0 if eeprom exist
+ */
+int atl1e_check_eeprom_exist(struct atl1e_hw *hw)
+{
+ u32 value;
+
+ value = AT_READ_REG(hw, REG_SPI_FLASH_CTRL);
+ if (value & SPI_FLASH_CTRL_EN_VPD) {
+ value &= ~SPI_FLASH_CTRL_EN_VPD;
+ AT_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
+ }
+ value = AT_READ_REGW(hw, REG_PCIE_CAP_LIST);
+ return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
+}
+
+void atl1e_hw_set_mac_addr(struct atl1e_hw *hw)
+{
+ u32 value;
+ /*
+ * 00-0B-6A-F6-00-DC
+ * 0: 6AF600DC 1: 000B
+ * low dword
+ */
+ value = (((u32)hw->mac_addr[2]) << 24) |
+ (((u32)hw->mac_addr[3]) << 16) |
+ (((u32)hw->mac_addr[4]) << 8) |
+ (((u32)hw->mac_addr[5])) ;
+ AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value);
+ /* hight dword */
+ value = (((u32)hw->mac_addr[0]) << 8) |
+ (((u32)hw->mac_addr[1])) ;
+ AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value);
+}
+
+/*
+ * atl1e_get_permanent_address
+ * return 0 if get valid mac address,
+ */
+static int atl1e_get_permanent_address(struct atl1e_hw *hw)
+{
+ u32 addr[2];
+ u32 i;
+ u32 twsi_ctrl_data;
+ u8 eth_addr[ETH_ALEN];
+
+ if (is_valid_ether_addr(hw->perm_mac_addr))
+ return 0;
+
+ /* init */
+ addr[0] = addr[1] = 0;
+
+ if (!atl1e_check_eeprom_exist(hw)) {
+ /* eeprom exist */
+ twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL);
+ twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART;
+ AT_WRITE_REG(hw, REG_TWSI_CTRL, twsi_ctrl_data);
+ for (i = 0; i < AT_TWSI_EEPROM_TIMEOUT; i++) {
+ msleep(10);
+ twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL);
+ if ((twsi_ctrl_data & TWSI_CTRL_SW_LDSTART) == 0)
+ break;
+ }
+ if (i >= AT_TWSI_EEPROM_TIMEOUT)
+ return AT_ERR_TIMEOUT;
+ }
+
+ /* maybe MAC-address is from BIOS */
+ addr[0] = AT_READ_REG(hw, REG_MAC_STA_ADDR);
+ addr[1] = AT_READ_REG(hw, REG_MAC_STA_ADDR + 4);
+ *(u32 *) &eth_addr[2] = swab32(addr[0]);
+ *(u16 *) &eth_addr[0] = swab16(*(u16 *)&addr[1]);
+
+ if (is_valid_ether_addr(eth_addr)) {
+ memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
+ return 0;
+ }
+
+ return AT_ERR_EEPROM;
+}
+
+bool atl1e_write_eeprom(struct atl1e_hw *hw, u32 offset, u32 value)
+{
+ return true;
+}
+
+bool atl1e_read_eeprom(struct atl1e_hw *hw, u32 offset, u32 *p_value)
+{
+ int i;
+ u32 control;
+
+ if (offset & 3)
+ return false; /* address do not align */
+
+ AT_WRITE_REG(hw, REG_VPD_DATA, 0);
+ control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
+ AT_WRITE_REG(hw, REG_VPD_CAP, control);
+
+ for (i = 0; i < 10; i++) {
+ msleep(2);
+ control = AT_READ_REG(hw, REG_VPD_CAP);
+ if (control & VPD_CAP_VPD_FLAG)
+ break;
+ }
+ if (control & VPD_CAP_VPD_FLAG) {
+ *p_value = AT_READ_REG(hw, REG_VPD_DATA);
+ return true;
+ }
+ return false; /* timeout */
+}
+
+void atl1e_force_ps(struct atl1e_hw *hw)
+{
+ AT_WRITE_REGW(hw, REG_GPHY_CTRL,
+ GPHY_CTRL_PW_WOL_DIS | GPHY_CTRL_EXT_RESET);
+}
+
+/*
+ * Reads the adapter's MAC address from the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ */
+int atl1e_read_mac_addr(struct atl1e_hw *hw)
+{
+ int err = 0;
+
+ err = atl1e_get_permanent_address(hw);
+ if (err)
+ return AT_ERR_EEPROM;
+ memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr));
+ return 0;
+}
+
+/*
+ * atl1e_hash_mc_addr
+ * purpose
+ * set hash value for a multicast address
+ */
+u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr)
+{
+ u32 crc32;
+ u32 value = 0;
+ int i;
+
+ crc32 = ether_crc_le(6, mc_addr);
+ for (i = 0; i < 32; i++)
+ value |= (((crc32 >> i) & 1) << (31 - i));
+
+ return value;
+}
+
+/*
+ * Sets the bit in the multicast table corresponding to the hash value.
+ * hw - Struct containing variables accessed by shared code
+ * hash_value - Multicast address hash value
+ */
+void atl1e_hash_set(struct atl1e_hw *hw, u32 hash_value)
+{
+ u32 hash_bit, hash_reg;
+ u32 mta;
+
+ /*
+ * The HASH Table is a register array of 2 32-bit registers.
+ * It is treated like an array of 64 bits. We want to set
+ * bit BitArray[hash_value]. So we figure out what register
+ * the bit is in, read it, OR in the new bit, then write
+ * back the new value. The register is determined by the
+ * upper 7 bits of the hash value and the bit within that
+ * register are determined by the lower 5 bits of the value.
+ */
+ hash_reg = (hash_value >> 31) & 0x1;
+ hash_bit = (hash_value >> 26) & 0x1F;
+
+ mta = AT_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg);
+
+ mta |= (1 << hash_bit);
+
+ AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta);
+}
+/*
+ * Reads the value from a PHY register
+ * hw - Struct containing variables accessed by shared code
+ * reg_addr - address of the PHY register to read
+ */
+int atl1e_read_phy_reg(struct atl1e_hw *hw, u16 reg_addr, u16 *phy_data)
+{
+ u32 val;
+ int i;
+
+ val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
+ MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW |
+ MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
+
+ AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
+
+ wmb();
+
+ for (i = 0; i < MDIO_WAIT_TIMES; i++) {
+ udelay(2);
+ val = AT_READ_REG(hw, REG_MDIO_CTRL);
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ break;
+ wmb();
+ }
+ if (!(val & (MDIO_START | MDIO_BUSY))) {
+ *phy_data = (u16)val;
+ return 0;
+ }
+
+ return AT_ERR_PHY;
+}
+
+/*
+ * Writes a value to a PHY register
+ * hw - Struct containing variables accessed by shared code
+ * reg_addr - address of the PHY register to write
+ * data - data to write to the PHY
+ */
+int atl1e_write_phy_reg(struct atl1e_hw *hw, u32 reg_addr, u16 phy_data)
+{
+ int i;
+ u32 val;
+
+ val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
+ (reg_addr&MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
+ MDIO_SUP_PREAMBLE |
+ MDIO_START |
+ MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
+
+ AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
+ wmb();
+
+ for (i = 0; i < MDIO_WAIT_TIMES; i++) {
+ udelay(2);
+ val = AT_READ_REG(hw, REG_MDIO_CTRL);
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ break;
+ wmb();
+ }
+
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ return 0;
+
+ return AT_ERR_PHY;
+}
+
+/*
+ * atl1e_init_pcie - init PCIE module
+ */
+static void atl1e_init_pcie(struct atl1e_hw *hw)
+{
+ u32 value;
+ /* comment 2lines below to save more power when sususpend
+ value = LTSSM_TEST_MODE_DEF;
+ AT_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value);
+ */
+
+ /* pcie flow control mode change */
+ value = AT_READ_REG(hw, 0x1008);
+ value |= 0x8000;
+ AT_WRITE_REG(hw, 0x1008, value);
+}
+/*
+ * Configures PHY autoneg and flow control advertisement settings
+ *
+ * hw - Struct containing variables accessed by shared code
+ */
+static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
+{
+ s32 ret_val;
+ u16 mii_autoneg_adv_reg;
+ u16 mii_1000t_ctrl_reg;
+
+ if (0 != hw->mii_autoneg_adv_reg)
+ return 0;
+ /* Read the MII Auto-Neg Advertisement Register (Address 4/9). */
+ mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
+ mii_1000t_ctrl_reg = MII_AT001_CR_1000T_DEFAULT_CAP_MASK;
+
+ /*
+ * Need to parse autoneg_advertised and set up
+ * the appropriate PHY registers. First we will parse for
+ * autoneg_advertised software override. Since we can advertise
+ * a plethora of combinations, we need to check each bit
+ * individually.
+ */
+
+ /*
+ * First we clear all the 10/100 mb speed bits in the Auto-Neg
+ * Advertisement Register (Address 4) and the 1000 mb speed bits in
+ * the 1000Base-T control Register (Address 9).
+ */
+ mii_autoneg_adv_reg &= ~ADVERTISE_ALL;
+ mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK;
+
+ /*
+ * Need to parse MediaType and setup the
+ * appropriate PHY registers.
+ */
+ switch (hw->media_type) {
+ case MEDIA_TYPE_AUTO_SENSOR:
+ mii_autoneg_adv_reg |= ADVERTISE_ALL;
+ hw->autoneg_advertised = ADVERTISE_ALL;
+ if (hw->nic_type == athr_l1e) {
+ mii_1000t_ctrl_reg |= ADVERTISE_1000FULL;
+ hw->autoneg_advertised |= ADVERTISE_1000_FULL;
+ }
+ break;
+
+ case MEDIA_TYPE_100M_FULL:
+ mii_autoneg_adv_reg |= ADVERTISE_100FULL;
+ hw->autoneg_advertised = ADVERTISE_100_FULL;
+ break;
+
+ case MEDIA_TYPE_100M_HALF:
+ mii_autoneg_adv_reg |= ADVERTISE_100_HALF;
+ hw->autoneg_advertised = ADVERTISE_100_HALF;
+ break;
+
+ case MEDIA_TYPE_10M_FULL:
+ mii_autoneg_adv_reg |= ADVERTISE_10_FULL;
+ hw->autoneg_advertised = ADVERTISE_10_FULL;
+ break;
+
+ default:
+ mii_autoneg_adv_reg |= ADVERTISE_10_HALF;
+ hw->autoneg_advertised = ADVERTISE_10_HALF;
+ break;
+ }
+
+ /* flow control fixed to enable all */
+ mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
+
+ hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
+ hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
+
+ ret_val = atl1e_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
+ ret_val = atl1e_write_phy_reg(hw, MII_CTRL1000,
+ mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Resets the PHY and make all config validate
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sets bit 15 and 12 of the MII control regiser (for F001 bug)
+ */
+int atl1e_phy_commit(struct atl1e_hw *hw)
+{
+ struct atl1e_adapter *adapter = hw->adapter;
+ int ret_val;
+ u16 phy_data;
+
+ phy_data = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
+
+ ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data);
+ if (ret_val) {
+ u32 val;
+ int i;
+ /**************************************
+ * pcie serdes link may be down !
+ **************************************/
+ for (i = 0; i < 25; i++) {
+ msleep(1);
+ val = AT_READ_REG(hw, REG_MDIO_CTRL);
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ break;
+ }
+
+ if (0 != (val & (MDIO_START | MDIO_BUSY))) {
+ netdev_err(adapter->netdev,
+ "pcie linkdown at least for 25ms\n");
+ return ret_val;
+ }
+
+ netdev_err(adapter->netdev, "pcie linkup after %d ms\n", i);
+ }
+ return 0;
+}
+
+int atl1e_phy_init(struct atl1e_hw *hw)
+{
+ struct atl1e_adapter *adapter = hw->adapter;
+ s32 ret_val;
+ u16 phy_val;
+
+ if (hw->phy_configured) {
+ if (hw->re_autoneg) {
+ hw->re_autoneg = false;
+ return atl1e_restart_autoneg(hw);
+ }
+ return 0;
+ }
+
+ /* RESET GPHY Core */
+ AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT);
+ msleep(2);
+ AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
+ GPHY_CTRL_EXT_RESET);
+ msleep(2);
+
+ /* patches */
+ /* p1. eable hibernation mode */
+ ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0xB);
+ if (ret_val)
+ return ret_val;
+ ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0xBC00);
+ if (ret_val)
+ return ret_val;
+ /* p2. set Class A/B for all modes */
+ ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+ phy_val = 0x02ef;
+ /* remove Class AB */
+ /* phy_val = hw->emi_ca ? 0x02ef : 0x02df; */
+ ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, phy_val);
+ if (ret_val)
+ return ret_val;
+ /* p3. 10B ??? */
+ ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x12);
+ if (ret_val)
+ return ret_val;
+ ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x4C04);
+ if (ret_val)
+ return ret_val;
+ /* p4. 1000T power */
+ ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x4);
+ if (ret_val)
+ return ret_val;
+ ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x8BBB);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x5);
+ if (ret_val)
+ return ret_val;
+ ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x2C46);
+ if (ret_val)
+ return ret_val;
+
+ msleep(1);
+
+ /*Enable PHY LinkChange Interrupt */
+ ret_val = atl1e_write_phy_reg(hw, MII_INT_CTRL, 0xC00);
+ if (ret_val) {
+ netdev_err(adapter->netdev,
+ "Error enable PHY linkChange Interrupt\n");
+ return ret_val;
+ }
+ /* setup AutoNeg parameters */
+ ret_val = atl1e_phy_setup_autoneg_adv(hw);
+ if (ret_val) {
+ netdev_err(adapter->netdev,
+ "Error Setting up Auto-Negotiation\n");
+ return ret_val;
+ }
+ /* SW.Reset & En-Auto-Neg to restart Auto-Neg*/
+ netdev_dbg(adapter->netdev, "Restarting Auto-Negotiation\n");
+ ret_val = atl1e_phy_commit(hw);
+ if (ret_val) {
+ netdev_err(adapter->netdev, "Error resetting the phy\n");
+ return ret_val;
+ }
+
+ hw->phy_configured = true;
+
+ return 0;
+}
+
+/*
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ * hw - Struct containing variables accessed by shared code
+ * return : 0 or idle status (if error)
+ */
+int atl1e_reset_hw(struct atl1e_hw *hw)
+{
+ struct atl1e_adapter *adapter = hw->adapter;
+ struct pci_dev *pdev = adapter->pdev;
+
+ u32 idle_status_data = 0;
+ u16 pci_cfg_cmd_word = 0;
+ int timeout = 0;
+
+ /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
+ pci_read_config_word(pdev, PCI_REG_COMMAND, &pci_cfg_cmd_word);
+ if ((pci_cfg_cmd_word & (CMD_IO_SPACE |
+ CMD_MEMORY_SPACE | CMD_BUS_MASTER))
+ != (CMD_IO_SPACE | CMD_MEMORY_SPACE | CMD_BUS_MASTER)) {
+ pci_cfg_cmd_word |= (CMD_IO_SPACE |
+ CMD_MEMORY_SPACE | CMD_BUS_MASTER);
+ pci_write_config_word(pdev, PCI_REG_COMMAND, pci_cfg_cmd_word);
+ }
+
+ /*
+ * Issue Soft Reset to the MAC. This will reset the chip's
+ * transmit, receive, DMA. It will not effect
+ * the current PCI configuration. The global reset bit is self-
+ * clearing, and should clear within a microsecond.
+ */
+ AT_WRITE_REG(hw, REG_MASTER_CTRL,
+ MASTER_CTRL_LED_MODE | MASTER_CTRL_SOFT_RST);
+ wmb();
+ msleep(1);
+
+ /* Wait at least 10ms for All module to be Idle */
+ for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
+ idle_status_data = AT_READ_REG(hw, REG_IDLE_STATUS);
+ if (idle_status_data == 0)
+ break;
+ msleep(1);
+ cpu_relax();
+ }
+
+ if (timeout >= AT_HW_MAX_IDLE_DELAY) {
+ netdev_err(adapter->netdev,
+ "MAC state machine can't be idle since disabled for 10ms second\n");
+ return AT_ERR_TIMEOUT;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Performs basic configuration of the adapter.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * Assumes that the controller has previously been reset and is in a
+ * post-reset uninitialized state. Initializes multicast table,
+ * and Calls routines to setup link
+ * Leaves the transmit and receive units disabled and uninitialized.
+ */
+int atl1e_init_hw(struct atl1e_hw *hw)
+{
+ s32 ret_val = 0;
+
+ atl1e_init_pcie(hw);
+
+ /* Zero out the Multicast HASH table */
+ /* clear the old settings from the multicast hash table */
+ AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
+ AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
+
+ ret_val = atl1e_phy_init(hw);
+
+ return ret_val;
+}
+
+/*
+ * Detects the current speed and duplex settings of the hardware.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * speed - Speed of the connection
+ * duplex - Duplex setting of the connection
+ */
+int atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex)
+{
+ int err;
+ u16 phy_data;
+
+ /* Read PHY Specific Status Register (17) */
+ err = atl1e_read_phy_reg(hw, MII_AT001_PSSR, &phy_data);
+ if (err)
+ return err;
+
+ if (!(phy_data & MII_AT001_PSSR_SPD_DPLX_RESOLVED))
+ return AT_ERR_PHY_RES;
+
+ switch (phy_data & MII_AT001_PSSR_SPEED) {
+ case MII_AT001_PSSR_1000MBS:
+ *speed = SPEED_1000;
+ break;
+ case MII_AT001_PSSR_100MBS:
+ *speed = SPEED_100;
+ break;
+ case MII_AT001_PSSR_10MBS:
+ *speed = SPEED_10;
+ break;
+ default:
+ return AT_ERR_PHY_SPEED;
+ break;
+ }
+
+ if (phy_data & MII_AT001_PSSR_DPLX)
+ *duplex = FULL_DUPLEX;
+ else
+ *duplex = HALF_DUPLEX;
+
+ return 0;
+}
+
+int atl1e_restart_autoneg(struct atl1e_hw *hw)
+{
+ int err = 0;
+
+ err = atl1e_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
+ if (err)
+ return err;
+
+ if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
+ err = atl1e_write_phy_reg(hw, MII_CTRL1000,
+ hw->mii_1000t_ctrl_reg);
+ if (err)
+ return err;
+ }
+
+ err = atl1e_write_phy_reg(hw, MII_BMCR,
+ BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
+ return err;
+}
+
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h
new file mode 100644
index 000000000000..74df16aef793
--- /dev/null
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h
@@ -0,0 +1,690 @@
+/*
+ * Copyright(c) 2007 Atheros Corporation. All rights reserved.
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _ATHL1E_HW_H_
+#define _ATHL1E_HW_H_
+
+#include <linux/types.h>
+#include <linux/mii.h>
+
+struct atl1e_adapter;
+struct atl1e_hw;
+
+/* function prototype */
+s32 atl1e_reset_hw(struct atl1e_hw *hw);
+s32 atl1e_read_mac_addr(struct atl1e_hw *hw);
+s32 atl1e_init_hw(struct atl1e_hw *hw);
+s32 atl1e_phy_commit(struct atl1e_hw *hw);
+s32 atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex);
+u32 atl1e_auto_get_fc(struct atl1e_adapter *adapter, u16 duplex);
+u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr);
+void atl1e_hash_set(struct atl1e_hw *hw, u32 hash_value);
+s32 atl1e_read_phy_reg(struct atl1e_hw *hw, u16 reg_addr, u16 *phy_data);
+s32 atl1e_write_phy_reg(struct atl1e_hw *hw, u32 reg_addr, u16 phy_data);
+s32 atl1e_validate_mdi_setting(struct atl1e_hw *hw);
+void atl1e_hw_set_mac_addr(struct atl1e_hw *hw);
+bool atl1e_read_eeprom(struct atl1e_hw *hw, u32 offset, u32 *p_value);
+bool atl1e_write_eeprom(struct atl1e_hw *hw, u32 offset, u32 value);
+s32 atl1e_phy_enter_power_saving(struct atl1e_hw *hw);
+s32 atl1e_phy_leave_power_saving(struct atl1e_hw *hw);
+s32 atl1e_phy_init(struct atl1e_hw *hw);
+int atl1e_check_eeprom_exist(struct atl1e_hw *hw);
+void atl1e_force_ps(struct atl1e_hw *hw);
+s32 atl1e_restart_autoneg(struct atl1e_hw *hw);
+
+/* register definition */
+#define REG_PM_CTRLSTAT 0x44
+
+#define REG_PCIE_CAP_LIST 0x58
+
+#define REG_DEVICE_CAP 0x5C
+#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7
+#define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0
+
+#define REG_DEVICE_CTRL 0x60
+#define DEVICE_CTRL_MAX_PAYLOAD_MASK 0x7
+#define DEVICE_CTRL_MAX_PAYLOAD_SHIFT 5
+#define DEVICE_CTRL_MAX_RREQ_SZ_MASK 0x7
+#define DEVICE_CTRL_MAX_RREQ_SZ_SHIFT 12
+
+#define REG_VPD_CAP 0x6C
+#define VPD_CAP_ID_MASK 0xff
+#define VPD_CAP_ID_SHIFT 0
+#define VPD_CAP_NEXT_PTR_MASK 0xFF
+#define VPD_CAP_NEXT_PTR_SHIFT 8
+#define VPD_CAP_VPD_ADDR_MASK 0x7FFF
+#define VPD_CAP_VPD_ADDR_SHIFT 16
+#define VPD_CAP_VPD_FLAG 0x80000000
+
+#define REG_VPD_DATA 0x70
+
+#define REG_SPI_FLASH_CTRL 0x200
+#define SPI_FLASH_CTRL_STS_NON_RDY 0x1
+#define SPI_FLASH_CTRL_STS_WEN 0x2
+#define SPI_FLASH_CTRL_STS_WPEN 0x80
+#define SPI_FLASH_CTRL_DEV_STS_MASK 0xFF
+#define SPI_FLASH_CTRL_DEV_STS_SHIFT 0
+#define SPI_FLASH_CTRL_INS_MASK 0x7
+#define SPI_FLASH_CTRL_INS_SHIFT 8
+#define SPI_FLASH_CTRL_START 0x800
+#define SPI_FLASH_CTRL_EN_VPD 0x2000
+#define SPI_FLASH_CTRL_LDSTART 0x8000
+#define SPI_FLASH_CTRL_CS_HI_MASK 0x3
+#define SPI_FLASH_CTRL_CS_HI_SHIFT 16
+#define SPI_FLASH_CTRL_CS_HOLD_MASK 0x3
+#define SPI_FLASH_CTRL_CS_HOLD_SHIFT 18
+#define SPI_FLASH_CTRL_CLK_LO_MASK 0x3
+#define SPI_FLASH_CTRL_CLK_LO_SHIFT 20
+#define SPI_FLASH_CTRL_CLK_HI_MASK 0x3
+#define SPI_FLASH_CTRL_CLK_HI_SHIFT 22
+#define SPI_FLASH_CTRL_CS_SETUP_MASK 0x3
+#define SPI_FLASH_CTRL_CS_SETUP_SHIFT 24
+#define SPI_FLASH_CTRL_EROM_PGSZ_MASK 0x3
+#define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT 26
+#define SPI_FLASH_CTRL_WAIT_READY 0x10000000
+
+#define REG_SPI_ADDR 0x204
+
+#define REG_SPI_DATA 0x208
+
+#define REG_SPI_FLASH_CONFIG 0x20C
+#define SPI_FLASH_CONFIG_LD_ADDR_MASK 0xFFFFFF
+#define SPI_FLASH_CONFIG_LD_ADDR_SHIFT 0
+#define SPI_FLASH_CONFIG_VPD_ADDR_MASK 0x3
+#define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT 24
+#define SPI_FLASH_CONFIG_LD_EXIST 0x4000000
+
+
+#define REG_SPI_FLASH_OP_PROGRAM 0x210
+#define REG_SPI_FLASH_OP_SC_ERASE 0x211
+#define REG_SPI_FLASH_OP_CHIP_ERASE 0x212
+#define REG_SPI_FLASH_OP_RDID 0x213
+#define REG_SPI_FLASH_OP_WREN 0x214
+#define REG_SPI_FLASH_OP_RDSR 0x215
+#define REG_SPI_FLASH_OP_WRSR 0x216
+#define REG_SPI_FLASH_OP_READ 0x217
+
+#define REG_TWSI_CTRL 0x218
+#define TWSI_CTRL_LD_OFFSET_MASK 0xFF
+#define TWSI_CTRL_LD_OFFSET_SHIFT 0
+#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7
+#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8
+#define TWSI_CTRL_SW_LDSTART 0x800
+#define TWSI_CTRL_HW_LDSTART 0x1000
+#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x0x7F
+#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15
+#define TWSI_CTRL_LD_EXIST 0x400000
+#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3
+#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23
+#define TWSI_CTRL_FREQ_SEL_100K 0
+#define TWSI_CTRL_FREQ_SEL_200K 1
+#define TWSI_CTRL_FREQ_SEL_300K 2
+#define TWSI_CTRL_FREQ_SEL_400K 3
+#define TWSI_CTRL_SMB_SLV_ADDR
+#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3
+#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24
+
+
+#define REG_PCIE_DEV_MISC_CTRL 0x21C
+#define PCIE_DEV_MISC_CTRL_EXT_PIPE 0x2
+#define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS 0x1
+#define PCIE_DEV_MISC_CTRL_SPIROM_EXIST 0x4
+#define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN 0x8
+#define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN 0x10
+
+#define REG_PCIE_PHYMISC 0x1000
+#define PCIE_PHYMISC_FORCE_RCV_DET 0x4
+
+#define REG_LTSSM_TEST_MODE 0x12FC
+#define LTSSM_TEST_MODE_DEF 0xE000
+
+/* Selene Master Control Register */
+#define REG_MASTER_CTRL 0x1400
+#define MASTER_CTRL_SOFT_RST 0x1
+#define MASTER_CTRL_MTIMER_EN 0x2
+#define MASTER_CTRL_ITIMER_EN 0x4
+#define MASTER_CTRL_MANUAL_INT 0x8
+#define MASTER_CTRL_ITIMER2_EN 0x20
+#define MASTER_CTRL_INT_RDCLR 0x40
+#define MASTER_CTRL_LED_MODE 0x200
+#define MASTER_CTRL_REV_NUM_SHIFT 16
+#define MASTER_CTRL_REV_NUM_MASK 0xff
+#define MASTER_CTRL_DEV_ID_SHIFT 24
+#define MASTER_CTRL_DEV_ID_MASK 0xff
+
+/* Timer Initial Value Register */
+#define REG_MANUAL_TIMER_INIT 0x1404
+
+
+/* IRQ ModeratorTimer Initial Value Register */
+#define REG_IRQ_MODU_TIMER_INIT 0x1408 /* w */
+#define REG_IRQ_MODU_TIMER2_INIT 0x140A /* w */
+
+
+#define REG_GPHY_CTRL 0x140C
+#define GPHY_CTRL_EXT_RESET 1
+#define GPHY_CTRL_PIPE_MOD 2
+#define GPHY_CTRL_TEST_MODE_MASK 3
+#define GPHY_CTRL_TEST_MODE_SHIFT 2
+#define GPHY_CTRL_BERT_START 0x10
+#define GPHY_CTRL_GATE_25M_EN 0x20
+#define GPHY_CTRL_LPW_EXIT 0x40
+#define GPHY_CTRL_PHY_IDDQ 0x80
+#define GPHY_CTRL_PHY_IDDQ_DIS 0x100
+#define GPHY_CTRL_PCLK_SEL_DIS 0x200
+#define GPHY_CTRL_HIB_EN 0x400
+#define GPHY_CTRL_HIB_PULSE 0x800
+#define GPHY_CTRL_SEL_ANA_RST 0x1000
+#define GPHY_CTRL_PHY_PLL_ON 0x2000
+#define GPHY_CTRL_PWDOWN_HW 0x4000
+#define GPHY_CTRL_DEFAULT (\
+ GPHY_CTRL_PHY_PLL_ON |\
+ GPHY_CTRL_SEL_ANA_RST |\
+ GPHY_CTRL_HIB_PULSE |\
+ GPHY_CTRL_HIB_EN)
+
+#define GPHY_CTRL_PW_WOL_DIS (\
+ GPHY_CTRL_PHY_PLL_ON |\
+ GPHY_CTRL_SEL_ANA_RST |\
+ GPHY_CTRL_HIB_PULSE |\
+ GPHY_CTRL_HIB_EN |\
+ GPHY_CTRL_PWDOWN_HW |\
+ GPHY_CTRL_PCLK_SEL_DIS |\
+ GPHY_CTRL_PHY_IDDQ)
+
+/* IRQ Anti-Lost Timer Initial Value Register */
+#define REG_CMBDISDMA_TIMER 0x140E
+
+
+/* Block IDLE Status Register */
+#define REG_IDLE_STATUS 0x1410
+#define IDLE_STATUS_RXMAC 1 /* 1: RXMAC state machine is in non-IDLE state. 0: RXMAC is idling */
+#define IDLE_STATUS_TXMAC 2 /* 1: TXMAC state machine is in non-IDLE state. 0: TXMAC is idling */
+#define IDLE_STATUS_RXQ 4 /* 1: RXQ state machine is in non-IDLE state. 0: RXQ is idling */
+#define IDLE_STATUS_TXQ 8 /* 1: TXQ state machine is in non-IDLE state. 0: TXQ is idling */
+#define IDLE_STATUS_DMAR 0x10 /* 1: DMAR state machine is in non-IDLE state. 0: DMAR is idling */
+#define IDLE_STATUS_DMAW 0x20 /* 1: DMAW state machine is in non-IDLE state. 0: DMAW is idling */
+#define IDLE_STATUS_SMB 0x40 /* 1: SMB state machine is in non-IDLE state. 0: SMB is idling */
+#define IDLE_STATUS_CMB 0x80 /* 1: CMB state machine is in non-IDLE state. 0: CMB is idling */
+
+/* MDIO Control Register */
+#define REG_MDIO_CTRL 0x1414
+#define MDIO_DATA_MASK 0xffff /* On MDIO write, the 16-bit control data to write to PHY MII management register */
+#define MDIO_DATA_SHIFT 0 /* On MDIO read, the 16-bit status data that was read from the PHY MII management register*/
+#define MDIO_REG_ADDR_MASK 0x1f /* MDIO register address */
+#define MDIO_REG_ADDR_SHIFT 16
+#define MDIO_RW 0x200000 /* 1: read, 0: write */
+#define MDIO_SUP_PREAMBLE 0x400000 /* Suppress preamble */
+#define MDIO_START 0x800000 /* Write 1 to initiate the MDIO master. And this bit is self cleared after one cycle*/
+#define MDIO_CLK_SEL_SHIFT 24
+#define MDIO_CLK_25_4 0
+#define MDIO_CLK_25_6 2
+#define MDIO_CLK_25_8 3
+#define MDIO_CLK_25_10 4
+#define MDIO_CLK_25_14 5
+#define MDIO_CLK_25_20 6
+#define MDIO_CLK_25_28 7
+#define MDIO_BUSY 0x8000000
+#define MDIO_AP_EN 0x10000000
+#define MDIO_WAIT_TIMES 10
+
+/* MII PHY Status Register */
+#define REG_PHY_STATUS 0x1418
+#define PHY_STATUS_100M 0x20000
+#define PHY_STATUS_EMI_CA 0x40000
+
+/* BIST Control and Status Register0 (for the Packet Memory) */
+#define REG_BIST0_CTRL 0x141c
+#define BIST0_NOW 0x1 /* 1: To trigger BIST0 logic. This bit stays high during the */
+/* BIST process and reset to zero when BIST is done */
+#define BIST0_SRAM_FAIL 0x2 /* 1: The SRAM failure is un-repairable because it has address */
+/* decoder failure or more than 1 cell stuck-to-x failure */
+#define BIST0_FUSE_FLAG 0x4 /* 1: Indicating one cell has been fixed */
+
+/* BIST Control and Status Register1(for the retry buffer of PCI Express) */
+#define REG_BIST1_CTRL 0x1420
+#define BIST1_NOW 0x1 /* 1: To trigger BIST0 logic. This bit stays high during the */
+/* BIST process and reset to zero when BIST is done */
+#define BIST1_SRAM_FAIL 0x2 /* 1: The SRAM failure is un-repairable because it has address */
+/* decoder failure or more than 1 cell stuck-to-x failure.*/
+#define BIST1_FUSE_FLAG 0x4
+
+/* SerDes Lock Detect Control and Status Register */
+#define REG_SERDES_LOCK 0x1424
+#define SERDES_LOCK_DETECT 1 /* 1: SerDes lock detected . This signal comes from Analog SerDes */
+#define SERDES_LOCK_DETECT_EN 2 /* 1: Enable SerDes Lock detect function */
+
+/* MAC Control Register */
+#define REG_MAC_CTRL 0x1480
+#define MAC_CTRL_TX_EN 1 /* 1: Transmit Enable */
+#define MAC_CTRL_RX_EN 2 /* 1: Receive Enable */
+#define MAC_CTRL_TX_FLOW 4 /* 1: Transmit Flow Control Enable */
+#define MAC_CTRL_RX_FLOW 8 /* 1: Receive Flow Control Enable */
+#define MAC_CTRL_LOOPBACK 0x10 /* 1: Loop back at G/MII Interface */
+#define MAC_CTRL_DUPLX 0x20 /* 1: Full-duplex mode 0: Half-duplex mode */
+#define MAC_CTRL_ADD_CRC 0x40 /* 1: Instruct MAC to attach CRC on all egress Ethernet frames */
+#define MAC_CTRL_PAD 0x80 /* 1: Instruct MAC to pad short frames to 60-bytes, and then attach CRC. This bit has higher priority over CRC_EN */
+#define MAC_CTRL_LENCHK 0x100 /* 1: Instruct MAC to check if length field matches the real packet length */
+#define MAC_CTRL_HUGE_EN 0x200 /* 1: receive Jumbo frame enable */
+#define MAC_CTRL_PRMLEN_SHIFT 10 /* Preamble length */
+#define MAC_CTRL_PRMLEN_MASK 0xf
+#define MAC_CTRL_RMV_VLAN 0x4000 /* 1: to remove VLAN Tag automatically from all receive packets */
+#define MAC_CTRL_PROMIS_EN 0x8000 /* 1: Promiscuous Mode Enable */
+#define MAC_CTRL_TX_PAUSE 0x10000 /* 1: transmit test pause */
+#define MAC_CTRL_SCNT 0x20000 /* 1: shortcut slot time counter */
+#define MAC_CTRL_SRST_TX 0x40000 /* 1: synchronized reset Transmit MAC module */
+#define MAC_CTRL_TX_SIMURST 0x80000 /* 1: transmit simulation reset */
+#define MAC_CTRL_SPEED_SHIFT 20 /* 10: gigabit 01:10M/100M */
+#define MAC_CTRL_SPEED_MASK 0x300000
+#define MAC_CTRL_SPEED_1000 2
+#define MAC_CTRL_SPEED_10_100 1
+#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000 /* 1: transmit maximum backoff (half-duplex test bit) */
+#define MAC_CTRL_TX_HUGE 0x800000 /* 1: transmit huge enable */
+#define MAC_CTRL_RX_CHKSUM_EN 0x1000000 /* 1: RX checksum enable */
+#define MAC_CTRL_MC_ALL_EN 0x2000000 /* 1: upload all multicast frame without error to system */
+#define MAC_CTRL_BC_EN 0x4000000 /* 1: upload all broadcast frame without error to system */
+#define MAC_CTRL_DBG 0x8000000 /* 1: upload all received frame to system (Debug Mode) */
+
+/* MAC IPG/IFG Control Register */
+#define REG_MAC_IPG_IFG 0x1484
+#define MAC_IPG_IFG_IPGT_SHIFT 0 /* Desired back to back inter-packet gap. The default is 96-bit time */
+#define MAC_IPG_IFG_IPGT_MASK 0x7f
+#define MAC_IPG_IFG_MIFG_SHIFT 8 /* Minimum number of IFG to enforce in between RX frames */
+#define MAC_IPG_IFG_MIFG_MASK 0xff /* Frame gap below such IFP is dropped */
+#define MAC_IPG_IFG_IPGR1_SHIFT 16 /* 64bit Carrier-Sense window */
+#define MAC_IPG_IFG_IPGR1_MASK 0x7f
+#define MAC_IPG_IFG_IPGR2_SHIFT 24 /* 96-bit IPG window */
+#define MAC_IPG_IFG_IPGR2_MASK 0x7f
+
+/* MAC STATION ADDRESS */
+#define REG_MAC_STA_ADDR 0x1488
+
+/* Hash table for multicast address */
+#define REG_RX_HASH_TABLE 0x1490
+
+
+/* MAC Half-Duplex Control Register */
+#define REG_MAC_HALF_DUPLX_CTRL 0x1498
+#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0 /* Collision Window */
+#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff
+#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12 /* Retransmission maximum, afterwards the packet will be discarded */
+#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf
+#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000 /* 1: Allow the transmission of a packet which has been excessively deferred */
+#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000 /* 1: No back-off on collision, immediately start the retransmission */
+#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000 /* 1: No back-off on backpressure, immediately start the transmission after back pressure */
+#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000 /* 1: Alternative Binary Exponential Back-off Enabled */
+#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20 /* Maximum binary exponential number */
+#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf
+#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24 /* IPG to start JAM for collision based flow control in half-duplex */
+#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf /* mode. In unit of 8-bit time */
+
+/* Maximum Frame Length Control Register */
+#define REG_MTU 0x149c
+
+/* Wake-On-Lan control register */
+#define REG_WOL_CTRL 0x14a0
+#define WOL_PATTERN_EN 0x00000001
+#define WOL_PATTERN_PME_EN 0x00000002
+#define WOL_MAGIC_EN 0x00000004
+#define WOL_MAGIC_PME_EN 0x00000008
+#define WOL_LINK_CHG_EN 0x00000010
+#define WOL_LINK_CHG_PME_EN 0x00000020
+#define WOL_PATTERN_ST 0x00000100
+#define WOL_MAGIC_ST 0x00000200
+#define WOL_LINKCHG_ST 0x00000400
+#define WOL_CLK_SWITCH_EN 0x00008000
+#define WOL_PT0_EN 0x00010000
+#define WOL_PT1_EN 0x00020000
+#define WOL_PT2_EN 0x00040000
+#define WOL_PT3_EN 0x00080000
+#define WOL_PT4_EN 0x00100000
+#define WOL_PT5_EN 0x00200000
+#define WOL_PT6_EN 0x00400000
+/* WOL Length ( 2 DWORD ) */
+#define REG_WOL_PATTERN_LEN 0x14a4
+#define WOL_PT_LEN_MASK 0x7f
+#define WOL_PT0_LEN_SHIFT 0
+#define WOL_PT1_LEN_SHIFT 8
+#define WOL_PT2_LEN_SHIFT 16
+#define WOL_PT3_LEN_SHIFT 24
+#define WOL_PT4_LEN_SHIFT 0
+#define WOL_PT5_LEN_SHIFT 8
+#define WOL_PT6_LEN_SHIFT 16
+
+/* Internal SRAM Partition Register */
+#define REG_SRAM_TRD_ADDR 0x1518
+#define REG_SRAM_TRD_LEN 0x151C
+#define REG_SRAM_RXF_ADDR 0x1520
+#define REG_SRAM_RXF_LEN 0x1524
+#define REG_SRAM_TXF_ADDR 0x1528
+#define REG_SRAM_TXF_LEN 0x152C
+#define REG_SRAM_TCPH_ADDR 0x1530
+#define REG_SRAM_PKTH_ADDR 0x1532
+
+/* Load Ptr Register */
+#define REG_LOAD_PTR 0x1534 /* Software sets this bit after the initialization of the head and tail */
+
+/*
+ * addresses of all descriptors, as well as the following descriptor
+ * control register, which triggers each function block to load the head
+ * pointer to prepare for the operation. This bit is then self-cleared
+ * after one cycle.
+ */
+
+/* Descriptor Control register */
+#define REG_RXF3_BASE_ADDR_HI 0x153C
+#define REG_DESC_BASE_ADDR_HI 0x1540
+#define REG_RXF0_BASE_ADDR_HI 0x1540 /* share with DESC BASE ADDR HI */
+#define REG_HOST_RXF0_PAGE0_LO 0x1544
+#define REG_HOST_RXF0_PAGE1_LO 0x1548
+#define REG_TPD_BASE_ADDR_LO 0x154C
+#define REG_RXF1_BASE_ADDR_HI 0x1550
+#define REG_RXF2_BASE_ADDR_HI 0x1554
+#define REG_HOST_RXFPAGE_SIZE 0x1558
+#define REG_TPD_RING_SIZE 0x155C
+/* RSS about */
+#define REG_RSS_KEY0 0x14B0
+#define REG_RSS_KEY1 0x14B4
+#define REG_RSS_KEY2 0x14B8
+#define REG_RSS_KEY3 0x14BC
+#define REG_RSS_KEY4 0x14C0
+#define REG_RSS_KEY5 0x14C4
+#define REG_RSS_KEY6 0x14C8
+#define REG_RSS_KEY7 0x14CC
+#define REG_RSS_KEY8 0x14D0
+#define REG_RSS_KEY9 0x14D4
+#define REG_IDT_TABLE4 0x14E0
+#define REG_IDT_TABLE5 0x14E4
+#define REG_IDT_TABLE6 0x14E8
+#define REG_IDT_TABLE7 0x14EC
+#define REG_IDT_TABLE0 0x1560
+#define REG_IDT_TABLE1 0x1564
+#define REG_IDT_TABLE2 0x1568
+#define REG_IDT_TABLE3 0x156C
+#define REG_IDT_TABLE REG_IDT_TABLE0
+#define REG_RSS_HASH_VALUE 0x1570
+#define REG_RSS_HASH_FLAG 0x1574
+#define REG_BASE_CPU_NUMBER 0x157C
+
+
+/* TXQ Control Register */
+#define REG_TXQ_CTRL 0x1580
+#define TXQ_CTRL_NUM_TPD_BURST_MASK 0xF
+#define TXQ_CTRL_NUM_TPD_BURST_SHIFT 0
+#define TXQ_CTRL_EN 0x20 /* 1: Enable TXQ */
+#define TXQ_CTRL_ENH_MODE 0x40 /* Performance enhancement mode, in which up to two back-to-back DMA read commands might be dispatched. */
+#define TXQ_CTRL_TXF_BURST_NUM_SHIFT 16 /* Number of data byte to read in a cache-aligned burst. Each SRAM entry is 8-byte in length. */
+#define TXQ_CTRL_TXF_BURST_NUM_MASK 0xffff
+
+/* Jumbo packet Threshold for task offload */
+#define REG_TX_EARLY_TH 0x1584 /* Jumbo frame threshold in QWORD unit. Packet greater than */
+/* JUMBO_TASK_OFFLOAD_THRESHOLD will not be task offloaded. */
+#define TX_TX_EARLY_TH_MASK 0x7ff
+#define TX_TX_EARLY_TH_SHIFT 0
+
+
+/* RXQ Control Register */
+#define REG_RXQ_CTRL 0x15A0
+#define RXQ_CTRL_PBA_ALIGN_32 0 /* rx-packet alignment */
+#define RXQ_CTRL_PBA_ALIGN_64 1
+#define RXQ_CTRL_PBA_ALIGN_128 2
+#define RXQ_CTRL_PBA_ALIGN_256 3
+#define RXQ_CTRL_Q1_EN 0x10
+#define RXQ_CTRL_Q2_EN 0x20
+#define RXQ_CTRL_Q3_EN 0x40
+#define RXQ_CTRL_IPV6_XSUM_VERIFY_EN 0x80
+#define RXQ_CTRL_HASH_TLEN_SHIFT 8
+#define RXQ_CTRL_HASH_TLEN_MASK 0xFF
+#define RXQ_CTRL_HASH_TYPE_IPV4 0x10000
+#define RXQ_CTRL_HASH_TYPE_IPV4_TCP 0x20000
+#define RXQ_CTRL_HASH_TYPE_IPV6 0x40000
+#define RXQ_CTRL_HASH_TYPE_IPV6_TCP 0x80000
+#define RXQ_CTRL_RSS_MODE_DISABLE 0
+#define RXQ_CTRL_RSS_MODE_SQSINT 0x4000000
+#define RXQ_CTRL_RSS_MODE_MQUESINT 0x8000000
+#define RXQ_CTRL_RSS_MODE_MQUEMINT 0xC000000
+#define RXQ_CTRL_NIP_QUEUE_SEL_TBL 0x10000000
+#define RXQ_CTRL_HASH_ENABLE 0x20000000
+#define RXQ_CTRL_CUT_THRU_EN 0x40000000
+#define RXQ_CTRL_EN 0x80000000
+
+/* Rx jumbo packet threshold and rrd retirement timer */
+#define REG_RXQ_JMBOSZ_RRDTIM 0x15A4
+/*
+ * Jumbo packet threshold for non-VLAN packet, in QWORD (64-bit) unit.
+ * When the packet length greater than or equal to this value, RXQ
+ * shall start cut-through forwarding of the received packet.
+ */
+#define RXQ_JMBOSZ_TH_MASK 0x7ff
+#define RXQ_JMBOSZ_TH_SHIFT 0 /* RRD retirement timer. Decrement by 1 after every 512ns passes*/
+#define RXQ_JMBO_LKAH_MASK 0xf
+#define RXQ_JMBO_LKAH_SHIFT 11
+
+/* RXF flow control register */
+#define REG_RXQ_RXF_PAUSE_THRESH 0x15A8
+#define RXQ_RXF_PAUSE_TH_HI_SHIFT 0
+#define RXQ_RXF_PAUSE_TH_HI_MASK 0xfff
+#define RXQ_RXF_PAUSE_TH_LO_SHIFT 16
+#define RXQ_RXF_PAUSE_TH_LO_MASK 0xfff
+
+
+/* DMA Engine Control Register */
+#define REG_DMA_CTRL 0x15C0
+#define DMA_CTRL_DMAR_IN_ORDER 0x1
+#define DMA_CTRL_DMAR_ENH_ORDER 0x2
+#define DMA_CTRL_DMAR_OUT_ORDER 0x4
+#define DMA_CTRL_RCB_VALUE 0x8
+#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4
+#define DMA_CTRL_DMAR_BURST_LEN_MASK 7
+#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7
+#define DMA_CTRL_DMAW_BURST_LEN_MASK 7
+#define DMA_CTRL_DMAR_REQ_PRI 0x400
+#define DMA_CTRL_DMAR_DLY_CNT_MASK 0x1F
+#define DMA_CTRL_DMAR_DLY_CNT_SHIFT 11
+#define DMA_CTRL_DMAW_DLY_CNT_MASK 0xF
+#define DMA_CTRL_DMAW_DLY_CNT_SHIFT 16
+#define DMA_CTRL_TXCMB_EN 0x100000
+#define DMA_CTRL_RXCMB_EN 0x200000
+
+
+/* CMB/SMB Control Register */
+#define REG_SMB_STAT_TIMER 0x15C4
+#define REG_TRIG_RRD_THRESH 0x15CA
+#define REG_TRIG_TPD_THRESH 0x15C8
+#define REG_TRIG_TXTIMER 0x15CC
+#define REG_TRIG_RXTIMER 0x15CE
+
+/* HOST RXF Page 1,2,3 address */
+#define REG_HOST_RXF1_PAGE0_LO 0x15D0
+#define REG_HOST_RXF1_PAGE1_LO 0x15D4
+#define REG_HOST_RXF2_PAGE0_LO 0x15D8
+#define REG_HOST_RXF2_PAGE1_LO 0x15DC
+#define REG_HOST_RXF3_PAGE0_LO 0x15E0
+#define REG_HOST_RXF3_PAGE1_LO 0x15E4
+
+/* Mail box */
+#define REG_MB_RXF1_RADDR 0x15B4
+#define REG_MB_RXF2_RADDR 0x15B8
+#define REG_MB_RXF3_RADDR 0x15BC
+#define REG_MB_TPD_PROD_IDX 0x15F0
+
+/* RXF-Page 0-3 PageNo & Valid bit */
+#define REG_HOST_RXF0_PAGE0_VLD 0x15F4
+#define HOST_RXF_VALID 1
+#define HOST_RXF_PAGENO_SHIFT 1
+#define HOST_RXF_PAGENO_MASK 0x7F
+#define REG_HOST_RXF0_PAGE1_VLD 0x15F5
+#define REG_HOST_RXF1_PAGE0_VLD 0x15F6
+#define REG_HOST_RXF1_PAGE1_VLD 0x15F7
+#define REG_HOST_RXF2_PAGE0_VLD 0x15F8
+#define REG_HOST_RXF2_PAGE1_VLD 0x15F9
+#define REG_HOST_RXF3_PAGE0_VLD 0x15FA
+#define REG_HOST_RXF3_PAGE1_VLD 0x15FB
+
+/* Interrupt Status Register */
+#define REG_ISR 0x1600
+#define ISR_SMB 1
+#define ISR_TIMER 2 /* Interrupt when Timer is counted down to zero */
+/*
+ * Software manual interrupt, for debug. Set when SW_MAN_INT_EN is set
+ * in Table 51 Selene Master Control Register (Offset 0x1400).
+ */
+#define ISR_MANUAL 4
+#define ISR_HW_RXF_OV 8 /* RXF overflow interrupt */
+#define ISR_HOST_RXF0_OV 0x10
+#define ISR_HOST_RXF1_OV 0x20
+#define ISR_HOST_RXF2_OV 0x40
+#define ISR_HOST_RXF3_OV 0x80
+#define ISR_TXF_UN 0x100
+#define ISR_RX0_PAGE_FULL 0x200
+#define ISR_DMAR_TO_RST 0x400
+#define ISR_DMAW_TO_RST 0x800
+#define ISR_GPHY 0x1000
+#define ISR_TX_CREDIT 0x2000
+#define ISR_GPHY_LPW 0x4000 /* GPHY low power state interrupt */
+#define ISR_RX_PKT 0x10000 /* One packet received, triggered by RFD */
+#define ISR_TX_PKT 0x20000 /* One packet transmitted, triggered by TPD */
+#define ISR_TX_DMA 0x40000
+#define ISR_RX_PKT_1 0x80000
+#define ISR_RX_PKT_2 0x100000
+#define ISR_RX_PKT_3 0x200000
+#define ISR_MAC_RX 0x400000
+#define ISR_MAC_TX 0x800000
+#define ISR_UR_DETECTED 0x1000000
+#define ISR_FERR_DETECTED 0x2000000
+#define ISR_NFERR_DETECTED 0x4000000
+#define ISR_CERR_DETECTED 0x8000000
+#define ISR_PHY_LINKDOWN 0x10000000
+#define ISR_DIS_INT 0x80000000
+
+
+/* Interrupt Mask Register */
+#define REG_IMR 0x1604
+
+
+#define IMR_NORMAL_MASK (\
+ ISR_SMB |\
+ ISR_TXF_UN |\
+ ISR_HW_RXF_OV |\
+ ISR_HOST_RXF0_OV|\
+ ISR_MANUAL |\
+ ISR_GPHY |\
+ ISR_GPHY_LPW |\
+ ISR_DMAR_TO_RST |\
+ ISR_DMAW_TO_RST |\
+ ISR_PHY_LINKDOWN|\
+ ISR_RX_PKT |\
+ ISR_TX_PKT)
+
+#define ISR_TX_EVENT (ISR_TXF_UN | ISR_TX_PKT)
+#define ISR_RX_EVENT (ISR_HOST_RXF0_OV | ISR_HW_RXF_OV | ISR_RX_PKT)
+
+#define REG_MAC_RX_STATUS_BIN 0x1700
+#define REG_MAC_RX_STATUS_END 0x175c
+#define REG_MAC_TX_STATUS_BIN 0x1760
+#define REG_MAC_TX_STATUS_END 0x17c0
+
+/* Hardware Offset Register */
+#define REG_HOST_RXF0_PAGEOFF 0x1800
+#define REG_TPD_CONS_IDX 0x1804
+#define REG_HOST_RXF1_PAGEOFF 0x1808
+#define REG_HOST_RXF2_PAGEOFF 0x180C
+#define REG_HOST_RXF3_PAGEOFF 0x1810
+
+/* RXF-Page 0-3 Offset DMA Address */
+#define REG_HOST_RXF0_MB0_LO 0x1820
+#define REG_HOST_RXF0_MB1_LO 0x1824
+#define REG_HOST_RXF1_MB0_LO 0x1828
+#define REG_HOST_RXF1_MB1_LO 0x182C
+#define REG_HOST_RXF2_MB0_LO 0x1830
+#define REG_HOST_RXF2_MB1_LO 0x1834
+#define REG_HOST_RXF3_MB0_LO 0x1838
+#define REG_HOST_RXF3_MB1_LO 0x183C
+
+/* Tpd CMB DMA Address */
+#define REG_HOST_TX_CMB_LO 0x1840
+#define REG_HOST_SMB_ADDR_LO 0x1844
+
+/* DEBUG ADDR */
+#define REG_DEBUG_DATA0 0x1900
+#define REG_DEBUG_DATA1 0x1904
+
+/***************************** MII definition ***************************************/
+/* PHY Common Register */
+#define MII_AT001_PSCR 0x10
+#define MII_AT001_PSSR 0x11
+#define MII_INT_CTRL 0x12
+#define MII_INT_STATUS 0x13
+#define MII_SMARTSPEED 0x14
+#define MII_LBRERROR 0x18
+#define MII_RESV2 0x1a
+
+#define MII_DBG_ADDR 0x1D
+#define MII_DBG_DATA 0x1E
+
+/* Autoneg Advertisement Register */
+#define MII_AR_DEFAULT_CAP_MASK 0
+
+/* 1000BASE-T Control Register */
+#define MII_AT001_CR_1000T_SPEED_MASK \
+ (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
+#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK MII_AT001_CR_1000T_SPEED_MASK
+
+/* AT001 PHY Specific Control Register */
+#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
+#define MII_AT001_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+#define MII_AT001_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
+#define MII_AT001_PSCR_MAC_POWERDOWN 0x0008
+#define MII_AT001_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low,
+ * 0=CLK125 toggling
+ */
+#define MII_AT001_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
+/* Manual MDI configuration */
+#define MII_AT001_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
+#define MII_AT001_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover,
+ * 100BASE-TX/10BASE-T:
+ * MDI Mode
+ */
+#define MII_AT001_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled
+ * all speeds.
+ */
+#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE 0x0080
+/* 1=Enable Extended 10BASE-T distance
+ * (Lower 10BASE-T RX Threshold)
+ * 0=Normal 10BASE-T RX Threshold */
+#define MII_AT001_PSCR_MII_5BIT_ENABLE 0x0100
+/* 1=5-Bit interface in 100BASE-TX
+ * 0=MII interface in 100BASE-TX */
+#define MII_AT001_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */
+#define MII_AT001_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
+#define MII_AT001_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
+#define MII_AT001_PSCR_POLARITY_REVERSAL_SHIFT 1
+#define MII_AT001_PSCR_AUTO_X_MODE_SHIFT 5
+#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
+/* AT001 PHY Specific Status Register */
+#define MII_AT001_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
+#define MII_AT001_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
+#define MII_AT001_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
+#define MII_AT001_PSSR_10MBS 0x0000 /* 00=10Mbs */
+#define MII_AT001_PSSR_100MBS 0x4000 /* 01=100Mbs */
+#define MII_AT001_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
+
+#endif /*_ATHL1E_HW_H_*/
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
new file mode 100644
index 000000000000..97c45a4b855a
--- /dev/null
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -0,0 +1,2557 @@
+/*
+ * Copyright(c) 2007 Atheros Corporation. All rights reserved.
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include "atl1e.h"
+
+#define DRV_VERSION "1.0.0.7-NAPI"
+
+char atl1e_driver_name[] = "ATL1E";
+char atl1e_driver_version[] = DRV_VERSION;
+#define PCI_DEVICE_ID_ATTANSIC_L1E 0x1026
+/*
+ * atl1e_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static DEFINE_PCI_DEVICE_TABLE(atl1e_pci_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)},
+ /* required last entry */
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, atl1e_pci_tbl);
+
+MODULE_AUTHOR("Atheros Corporation, <xiong.huang@atheros.com>, Jie Yang <jie.yang@atheros.com>");
+MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter);
+
+static const u16
+atl1e_rx_page_vld_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] =
+{
+ {REG_HOST_RXF0_PAGE0_VLD, REG_HOST_RXF0_PAGE1_VLD},
+ {REG_HOST_RXF1_PAGE0_VLD, REG_HOST_RXF1_PAGE1_VLD},
+ {REG_HOST_RXF2_PAGE0_VLD, REG_HOST_RXF2_PAGE1_VLD},
+ {REG_HOST_RXF3_PAGE0_VLD, REG_HOST_RXF3_PAGE1_VLD}
+};
+
+static const u16 atl1e_rx_page_hi_addr_regs[AT_MAX_RECEIVE_QUEUE] =
+{
+ REG_RXF0_BASE_ADDR_HI,
+ REG_RXF1_BASE_ADDR_HI,
+ REG_RXF2_BASE_ADDR_HI,
+ REG_RXF3_BASE_ADDR_HI
+};
+
+static const u16
+atl1e_rx_page_lo_addr_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] =
+{
+ {REG_HOST_RXF0_PAGE0_LO, REG_HOST_RXF0_PAGE1_LO},
+ {REG_HOST_RXF1_PAGE0_LO, REG_HOST_RXF1_PAGE1_LO},
+ {REG_HOST_RXF2_PAGE0_LO, REG_HOST_RXF2_PAGE1_LO},
+ {REG_HOST_RXF3_PAGE0_LO, REG_HOST_RXF3_PAGE1_LO}
+};
+
+static const u16
+atl1e_rx_page_write_offset_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] =
+{
+ {REG_HOST_RXF0_MB0_LO, REG_HOST_RXF0_MB1_LO},
+ {REG_HOST_RXF1_MB0_LO, REG_HOST_RXF1_MB1_LO},
+ {REG_HOST_RXF2_MB0_LO, REG_HOST_RXF2_MB1_LO},
+ {REG_HOST_RXF3_MB0_LO, REG_HOST_RXF3_MB1_LO}
+};
+
+static const u16 atl1e_pay_load_size[] = {
+ 128, 256, 512, 1024, 2048, 4096,
+};
+
+/*
+ * atl1e_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ */
+static inline void atl1e_irq_enable(struct atl1e_adapter *adapter)
+{
+ if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
+ AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
+ AT_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK);
+ AT_WRITE_FLUSH(&adapter->hw);
+ }
+}
+
+/*
+ * atl1e_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ */
+static inline void atl1e_irq_disable(struct atl1e_adapter *adapter)
+{
+ atomic_inc(&adapter->irq_sem);
+ AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
+ AT_WRITE_FLUSH(&adapter->hw);
+ synchronize_irq(adapter->pdev->irq);
+}
+
+/*
+ * atl1e_irq_reset - reset interrupt confiure on the NIC
+ * @adapter: board private structure
+ */
+static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
+{
+ atomic_set(&adapter->irq_sem, 0);
+ AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
+ AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
+ AT_WRITE_FLUSH(&adapter->hw);
+}
+
+/*
+ * atl1e_phy_config - Timer Call-back
+ * @data: pointer to netdev cast into an unsigned long
+ */
+static void atl1e_phy_config(unsigned long data)
+{
+ struct atl1e_adapter *adapter = (struct atl1e_adapter *) data;
+ struct atl1e_hw *hw = &adapter->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->mdio_lock, flags);
+ atl1e_restart_autoneg(hw);
+ spin_unlock_irqrestore(&adapter->mdio_lock, flags);
+}
+
+void atl1e_reinit_locked(struct atl1e_adapter *adapter)
+{
+
+ WARN_ON(in_interrupt());
+ while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
+ msleep(1);
+ atl1e_down(adapter);
+ atl1e_up(adapter);
+ clear_bit(__AT_RESETTING, &adapter->flags);
+}
+
+static void atl1e_reset_task(struct work_struct *work)
+{
+ struct atl1e_adapter *adapter;
+ adapter = container_of(work, struct atl1e_adapter, reset_task);
+
+ atl1e_reinit_locked(adapter);
+}
+
+static int atl1e_check_link(struct atl1e_adapter *adapter)
+{
+ struct atl1e_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ int err = 0;
+ u16 speed, duplex, phy_data;
+
+ /* MII_BMSR must read twice */
+ atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
+ atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
+ if ((phy_data & BMSR_LSTATUS) == 0) {
+ /* link down */
+ if (netif_carrier_ok(netdev)) { /* old link state: Up */
+ u32 value;
+ /* disable rx */
+ value = AT_READ_REG(hw, REG_MAC_CTRL);
+ value &= ~MAC_CTRL_RX_EN;
+ AT_WRITE_REG(hw, REG_MAC_CTRL, value);
+ adapter->link_speed = SPEED_0;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ } else {
+ /* Link Up */
+ err = atl1e_get_speed_and_duplex(hw, &speed, &duplex);
+ if (unlikely(err))
+ return err;
+
+ /* link result is our setting */
+ if (adapter->link_speed != speed ||
+ adapter->link_duplex != duplex) {
+ adapter->link_speed = speed;
+ adapter->link_duplex = duplex;
+ atl1e_setup_mac_ctrl(adapter);
+ netdev_info(netdev,
+ "NIC Link is Up <%d Mbps %s Duplex>\n",
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "Full" : "Half");
+ }
+
+ if (!netif_carrier_ok(netdev)) {
+ /* Link down -> Up */
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ }
+ }
+ return 0;
+}
+
+/*
+ * atl1e_link_chg_task - deal with link change event Out of interrupt context
+ * @netdev: network interface device structure
+ */
+static void atl1e_link_chg_task(struct work_struct *work)
+{
+ struct atl1e_adapter *adapter;
+ unsigned long flags;
+
+ adapter = container_of(work, struct atl1e_adapter, link_chg_task);
+ spin_lock_irqsave(&adapter->mdio_lock, flags);
+ atl1e_check_link(adapter);
+ spin_unlock_irqrestore(&adapter->mdio_lock, flags);
+}
+
+static void atl1e_link_chg_event(struct atl1e_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u16 phy_data = 0;
+ u16 link_up = 0;
+
+ spin_lock(&adapter->mdio_lock);
+ atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+ atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+ spin_unlock(&adapter->mdio_lock);
+ link_up = phy_data & BMSR_LSTATUS;
+ /* notify upper layer link down ASAP */
+ if (!link_up) {
+ if (netif_carrier_ok(netdev)) {
+ /* old link state: Up */
+ netdev_info(netdev, "NIC Link is Down\n");
+ adapter->link_speed = SPEED_0;
+ netif_stop_queue(netdev);
+ }
+ }
+ schedule_work(&adapter->link_chg_task);
+}
+
+static void atl1e_del_timer(struct atl1e_adapter *adapter)
+{
+ del_timer_sync(&adapter->phy_config_timer);
+}
+
+static void atl1e_cancel_work(struct atl1e_adapter *adapter)
+{
+ cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->link_chg_task);
+}
+
+/*
+ * atl1e_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ */
+static void atl1e_tx_timeout(struct net_device *netdev)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+}
+
+/*
+ * atl1e_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ */
+static void atl1e_set_multi(struct net_device *netdev)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct atl1e_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u32 mac_ctrl_data = 0;
+ u32 hash_value;
+
+ /* Check for Promiscuous and All Multicast modes */
+ mac_ctrl_data = AT_READ_REG(hw, REG_MAC_CTRL);
+
+ if (netdev->flags & IFF_PROMISC) {
+ mac_ctrl_data |= MAC_CTRL_PROMIS_EN;
+ } else if (netdev->flags & IFF_ALLMULTI) {
+ mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
+ mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN;
+ } else {
+ mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
+ }
+
+ AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
+
+ /* clear the old settings from the multicast hash table */
+ AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
+ AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
+
+ /* comoute mc addresses' hash value ,and put it into hash table */
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash_value = atl1e_hash_mc_addr(hw, ha->addr);
+ atl1e_hash_set(hw, hash_value);
+ }
+}
+
+static void __atl1e_vlan_mode(u32 features, u32 *mac_ctrl_data)
+{
+ if (features & NETIF_F_HW_VLAN_RX) {
+ /* enable VLAN tag insert/strip */
+ *mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
+ } else {
+ /* disable VLAN tag insert/strip */
+ *mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN;
+ }
+}
+
+static void atl1e_vlan_mode(struct net_device *netdev, u32 features)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ u32 mac_ctrl_data = 0;
+
+ netdev_dbg(adapter->netdev, "%s\n", __func__);
+
+ atl1e_irq_disable(adapter);
+ mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL);
+ __atl1e_vlan_mode(features, &mac_ctrl_data);
+ AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
+ atl1e_irq_enable(adapter);
+}
+
+static void atl1e_restore_vlan(struct atl1e_adapter *adapter)
+{
+ netdev_dbg(adapter->netdev, "%s\n", __func__);
+ atl1e_vlan_mode(adapter->netdev, adapter->netdev->features);
+}
+
+/*
+ * atl1e_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atl1e_set_mac_addr(struct net_device *netdev, void *p)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
+
+ atl1e_hw_set_mac_addr(&adapter->hw);
+
+ return 0;
+}
+
+static u32 atl1e_fix_features(struct net_device *netdev, u32 features)
+{
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_RX)
+ features |= NETIF_F_HW_VLAN_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_TX;
+
+ return features;
+}
+
+static int atl1e_set_features(struct net_device *netdev, u32 features)
+{
+ u32 changed = netdev->features ^ features;
+
+ if (changed & NETIF_F_HW_VLAN_RX)
+ atl1e_vlan_mode(netdev, features);
+
+ return 0;
+}
+
+/*
+ * atl1e_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atl1e_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ int old_mtu = netdev->mtu;
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
+ (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+ netdev_warn(adapter->netdev, "invalid MTU setting\n");
+ return -EINVAL;
+ }
+ /* set MTU */
+ if (old_mtu != new_mtu && netif_running(netdev)) {
+ while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
+ msleep(1);
+ netdev->mtu = new_mtu;
+ adapter->hw.max_frame_size = new_mtu;
+ adapter->hw.rx_jumbo_th = (max_frame + 7) >> 3;
+ atl1e_down(adapter);
+ atl1e_up(adapter);
+ clear_bit(__AT_RESETTING, &adapter->flags);
+ }
+ return 0;
+}
+
+/*
+ * caller should hold mdio_lock
+ */
+static int atl1e_mdio_read(struct net_device *netdev, int phy_id, int reg_num)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ u16 result;
+
+ atl1e_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result);
+ return result;
+}
+
+static void atl1e_mdio_write(struct net_device *netdev, int phy_id,
+ int reg_num, int val)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+
+ atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val);
+}
+
+/*
+ * atl1e_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ */
+static int atl1e_mii_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+ unsigned long flags;
+ int retval = 0;
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ spin_lock_irqsave(&adapter->mdio_lock, flags);
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = 0;
+ break;
+
+ case SIOCGMIIREG:
+ if (atl1e_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
+ &data->val_out)) {
+ retval = -EIO;
+ goto out;
+ }
+ break;
+
+ case SIOCSMIIREG:
+ if (data->reg_num & ~(0x1F)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ netdev_dbg(adapter->netdev, "<atl1e_mii_ioctl> write %x %x\n",
+ data->reg_num, data->val_in);
+ if (atl1e_write_phy_reg(&adapter->hw,
+ data->reg_num, data->val_in)) {
+ retval = -EIO;
+ goto out;
+ }
+ break;
+
+ default:
+ retval = -EOPNOTSUPP;
+ break;
+ }
+out:
+ spin_unlock_irqrestore(&adapter->mdio_lock, flags);
+ return retval;
+
+}
+
+/*
+ * atl1e_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ */
+static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return atl1e_mii_ioctl(netdev, ifr, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void atl1e_setup_pcicmd(struct pci_dev *pdev)
+{
+ u16 cmd;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ cmd &= ~(PCI_COMMAND_INTX_DISABLE | PCI_COMMAND_IO);
+ cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+
+ /*
+ * some motherboards BIOS(PXE/EFI) driver may set PME
+ * while they transfer control to OS (Windows/Linux)
+ * so we should clear this bit before NIC work normally
+ */
+ pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0);
+ msleep(1);
+}
+
+/*
+ * atl1e_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ */
+static int __devinit atl1e_alloc_queues(struct atl1e_adapter *adapter)
+{
+ return 0;
+}
+
+/*
+ * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * atl1e_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ */
+static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
+{
+ struct atl1e_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ u32 phy_status_data = 0;
+
+ adapter->wol = 0;
+ adapter->link_speed = SPEED_0; /* hardware init */
+ adapter->link_duplex = FULL_DUPLEX;
+ adapter->num_rx_queues = 1;
+
+ /* PCI config space info */
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_id = pdev->subsystem_device;
+ hw->revision_id = pdev->revision;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
+
+ phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
+ /* nic type */
+ if (hw->revision_id >= 0xF0) {
+ hw->nic_type = athr_l2e_revB;
+ } else {
+ if (phy_status_data & PHY_STATUS_100M)
+ hw->nic_type = athr_l1e;
+ else
+ hw->nic_type = athr_l2e_revA;
+ }
+
+ phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
+
+ if (phy_status_data & PHY_STATUS_EMI_CA)
+ hw->emi_ca = true;
+ else
+ hw->emi_ca = false;
+
+ hw->phy_configured = false;
+ hw->preamble_len = 7;
+ hw->max_frame_size = adapter->netdev->mtu;
+ hw->rx_jumbo_th = (hw->max_frame_size + ETH_HLEN +
+ VLAN_HLEN + ETH_FCS_LEN + 7) >> 3;
+
+ hw->rrs_type = atl1e_rrs_disable;
+ hw->indirect_tab = 0;
+ hw->base_cpu = 0;
+
+ /* need confirm */
+
+ hw->ict = 50000; /* 100ms */
+ hw->smb_timer = 200000; /* 200ms */
+ hw->tpd_burst = 5;
+ hw->rrd_thresh = 1;
+ hw->tpd_thresh = adapter->tx_ring.count / 2;
+ hw->rx_count_down = 4; /* 2us resolution */
+ hw->tx_count_down = hw->imt * 4 / 3;
+ hw->dmar_block = atl1e_dma_req_1024;
+ hw->dmaw_block = atl1e_dma_req_1024;
+ hw->dmar_dly_cnt = 15;
+ hw->dmaw_dly_cnt = 4;
+
+ if (atl1e_alloc_queues(adapter)) {
+ netdev_err(adapter->netdev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ atomic_set(&adapter->irq_sem, 1);
+ spin_lock_init(&adapter->mdio_lock);
+ spin_lock_init(&adapter->tx_lock);
+
+ set_bit(__AT_DOWN, &adapter->flags);
+
+ return 0;
+}
+
+/*
+ * atl1e_clean_tx_ring - Free Tx-skb
+ * @adapter: board private structure
+ */
+static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
+{
+ struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
+ &adapter->tx_ring;
+ struct atl1e_tx_buffer *tx_buffer = NULL;
+ struct pci_dev *pdev = adapter->pdev;
+ u16 index, ring_count;
+
+ if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL)
+ return;
+
+ ring_count = tx_ring->count;
+ /* first unmmap dma */
+ for (index = 0; index < ring_count; index++) {
+ tx_buffer = &tx_ring->tx_buffer[index];
+ if (tx_buffer->dma) {
+ if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE)
+ pci_unmap_single(pdev, tx_buffer->dma,
+ tx_buffer->length, PCI_DMA_TODEVICE);
+ else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE)
+ pci_unmap_page(pdev, tx_buffer->dma,
+ tx_buffer->length, PCI_DMA_TODEVICE);
+ tx_buffer->dma = 0;
+ }
+ }
+ /* second free skb */
+ for (index = 0; index < ring_count; index++) {
+ tx_buffer = &tx_ring->tx_buffer[index];
+ if (tx_buffer->skb) {
+ dev_kfree_skb_any(tx_buffer->skb);
+ tx_buffer->skb = NULL;
+ }
+ }
+ /* Zero out Tx-buffers */
+ memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) *
+ ring_count);
+ memset(tx_ring->tx_buffer, 0, sizeof(struct atl1e_tx_buffer) *
+ ring_count);
+}
+
+/*
+ * atl1e_clean_rx_ring - Free rx-reservation skbs
+ * @adapter: board private structure
+ */
+static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter)
+{
+ struct atl1e_rx_ring *rx_ring =
+ (struct atl1e_rx_ring *)&adapter->rx_ring;
+ struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc;
+ u16 i, j;
+
+
+ if (adapter->ring_vir_addr == NULL)
+ return;
+ /* Zero out the descriptor ring */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
+ if (rx_page_desc[i].rx_page[j].addr != NULL) {
+ memset(rx_page_desc[i].rx_page[j].addr, 0,
+ rx_ring->real_page_size);
+ }
+ }
+ }
+}
+
+static void atl1e_cal_ring_size(struct atl1e_adapter *adapter, u32 *ring_size)
+{
+ *ring_size = ((u32)(adapter->tx_ring.count *
+ sizeof(struct atl1e_tpd_desc) + 7
+ /* tx ring, qword align */
+ + adapter->rx_ring.real_page_size * AT_PAGE_NUM_PER_QUEUE *
+ adapter->num_rx_queues + 31
+ /* rx ring, 32 bytes align */
+ + (1 + AT_PAGE_NUM_PER_QUEUE * adapter->num_rx_queues) *
+ sizeof(u32) + 3));
+ /* tx, rx cmd, dword align */
+}
+
+static void atl1e_init_ring_resources(struct atl1e_adapter *adapter)
+{
+ struct atl1e_rx_ring *rx_ring = NULL;
+
+ rx_ring = &adapter->rx_ring;
+
+ rx_ring->real_page_size = adapter->rx_ring.page_size
+ + adapter->hw.max_frame_size
+ + ETH_HLEN + VLAN_HLEN
+ + ETH_FCS_LEN;
+ rx_ring->real_page_size = roundup(rx_ring->real_page_size, 32);
+ atl1e_cal_ring_size(adapter, &adapter->ring_size);
+
+ adapter->ring_vir_addr = NULL;
+ adapter->rx_ring.desc = NULL;
+ rwlock_init(&adapter->tx_ring.tx_lock);
+}
+
+/*
+ * Read / Write Ptr Initialize:
+ */
+static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter)
+{
+ struct atl1e_tx_ring *tx_ring = NULL;
+ struct atl1e_rx_ring *rx_ring = NULL;
+ struct atl1e_rx_page_desc *rx_page_desc = NULL;
+ int i, j;
+
+ tx_ring = &adapter->tx_ring;
+ rx_ring = &adapter->rx_ring;
+ rx_page_desc = rx_ring->rx_page_desc;
+
+ tx_ring->next_to_use = 0;
+ atomic_set(&tx_ring->next_to_clean, 0);
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rx_page_desc[i].rx_using = 0;
+ rx_page_desc[i].rx_nxseq = 0;
+ for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
+ *rx_page_desc[i].rx_page[j].write_offset_addr = 0;
+ rx_page_desc[i].rx_page[j].read_offset = 0;
+ }
+ }
+}
+
+/*
+ * atl1e_free_ring_resources - Free Tx / RX descriptor Resources
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ */
+static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ atl1e_clean_tx_ring(adapter);
+ atl1e_clean_rx_ring(adapter);
+
+ if (adapter->ring_vir_addr) {
+ pci_free_consistent(pdev, adapter->ring_size,
+ adapter->ring_vir_addr, adapter->ring_dma);
+ adapter->ring_vir_addr = NULL;
+ }
+
+ if (adapter->tx_ring.tx_buffer) {
+ kfree(adapter->tx_ring.tx_buffer);
+ adapter->tx_ring.tx_buffer = NULL;
+ }
+}
+
+/*
+ * atl1e_setup_mem_resources - allocate Tx / RX descriptor resources
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ */
+static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct atl1e_tx_ring *tx_ring;
+ struct atl1e_rx_ring *rx_ring;
+ struct atl1e_rx_page_desc *rx_page_desc;
+ int size, i, j;
+ u32 offset = 0;
+ int err = 0;
+
+ if (adapter->ring_vir_addr != NULL)
+ return 0; /* alloced already */
+
+ tx_ring = &adapter->tx_ring;
+ rx_ring = &adapter->rx_ring;
+
+ /* real ring DMA buffer */
+
+ size = adapter->ring_size;
+ adapter->ring_vir_addr = pci_alloc_consistent(pdev,
+ adapter->ring_size, &adapter->ring_dma);
+
+ if (adapter->ring_vir_addr == NULL) {
+ netdev_err(adapter->netdev,
+ "pci_alloc_consistent failed, size = D%d\n", size);
+ return -ENOMEM;
+ }
+
+ memset(adapter->ring_vir_addr, 0, adapter->ring_size);
+
+ rx_page_desc = rx_ring->rx_page_desc;
+
+ /* Init TPD Ring */
+ tx_ring->dma = roundup(adapter->ring_dma, 8);
+ offset = tx_ring->dma - adapter->ring_dma;
+ tx_ring->desc = adapter->ring_vir_addr + offset;
+ size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count);
+ tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL);
+ if (tx_ring->tx_buffer == NULL) {
+ netdev_err(adapter->netdev, "kzalloc failed, size = D%d\n",
+ size);
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ /* Init RXF-Pages */
+ offset += (sizeof(struct atl1e_tpd_desc) * tx_ring->count);
+ offset = roundup(offset, 32);
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
+ rx_page_desc[i].rx_page[j].dma =
+ adapter->ring_dma + offset;
+ rx_page_desc[i].rx_page[j].addr =
+ adapter->ring_vir_addr + offset;
+ offset += rx_ring->real_page_size;
+ }
+ }
+
+ /* Init CMB dma address */
+ tx_ring->cmb_dma = adapter->ring_dma + offset;
+ tx_ring->cmb = adapter->ring_vir_addr + offset;
+ offset += sizeof(u32);
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
+ rx_page_desc[i].rx_page[j].write_offset_dma =
+ adapter->ring_dma + offset;
+ rx_page_desc[i].rx_page[j].write_offset_addr =
+ adapter->ring_vir_addr + offset;
+ offset += sizeof(u32);
+ }
+ }
+
+ if (unlikely(offset > adapter->ring_size)) {
+ netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n",
+ offset, adapter->ring_size);
+ err = -1;
+ goto failed;
+ }
+
+ return 0;
+failed:
+ if (adapter->ring_vir_addr != NULL) {
+ pci_free_consistent(pdev, adapter->ring_size,
+ adapter->ring_vir_addr, adapter->ring_dma);
+ adapter->ring_vir_addr = NULL;
+ }
+ return err;
+}
+
+static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter)
+{
+
+ struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
+ struct atl1e_rx_ring *rx_ring =
+ (struct atl1e_rx_ring *)&adapter->rx_ring;
+ struct atl1e_tx_ring *tx_ring =
+ (struct atl1e_tx_ring *)&adapter->tx_ring;
+ struct atl1e_rx_page_desc *rx_page_desc = NULL;
+ int i, j;
+
+ AT_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI,
+ (u32)((adapter->ring_dma & AT_DMA_HI_ADDR_MASK) >> 32));
+ AT_WRITE_REG(hw, REG_TPD_BASE_ADDR_LO,
+ (u32)((tx_ring->dma) & AT_DMA_LO_ADDR_MASK));
+ AT_WRITE_REG(hw, REG_TPD_RING_SIZE, (u16)(tx_ring->count));
+ AT_WRITE_REG(hw, REG_HOST_TX_CMB_LO,
+ (u32)((tx_ring->cmb_dma) & AT_DMA_LO_ADDR_MASK));
+
+ rx_page_desc = rx_ring->rx_page_desc;
+ /* RXF Page Physical address / Page Length */
+ for (i = 0; i < AT_MAX_RECEIVE_QUEUE; i++) {
+ AT_WRITE_REG(hw, atl1e_rx_page_hi_addr_regs[i],
+ (u32)((adapter->ring_dma &
+ AT_DMA_HI_ADDR_MASK) >> 32));
+ for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
+ u32 page_phy_addr;
+ u32 offset_phy_addr;
+
+ page_phy_addr = rx_page_desc[i].rx_page[j].dma;
+ offset_phy_addr =
+ rx_page_desc[i].rx_page[j].write_offset_dma;
+
+ AT_WRITE_REG(hw, atl1e_rx_page_lo_addr_regs[i][j],
+ page_phy_addr & AT_DMA_LO_ADDR_MASK);
+ AT_WRITE_REG(hw, atl1e_rx_page_write_offset_regs[i][j],
+ offset_phy_addr & AT_DMA_LO_ADDR_MASK);
+ AT_WRITE_REGB(hw, atl1e_rx_page_vld_regs[i][j], 1);
+ }
+ }
+ /* Page Length */
+ AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size);
+ /* Load all of base address above */
+ AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
+}
+
+static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
+{
+ struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
+ u32 dev_ctrl_data = 0;
+ u32 max_pay_load = 0;
+ u32 jumbo_thresh = 0;
+ u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */
+
+ /* configure TXQ param */
+ if (hw->nic_type != athr_l2e_revB) {
+ extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
+ if (hw->max_frame_size <= 1500) {
+ jumbo_thresh = hw->max_frame_size + extra_size;
+ } else if (hw->max_frame_size < 6*1024) {
+ jumbo_thresh =
+ (hw->max_frame_size + extra_size) * 2 / 3;
+ } else {
+ jumbo_thresh = (hw->max_frame_size + extra_size) / 2;
+ }
+ AT_WRITE_REG(hw, REG_TX_EARLY_TH, (jumbo_thresh + 7) >> 3);
+ }
+
+ dev_ctrl_data = AT_READ_REG(hw, REG_DEVICE_CTRL);
+
+ max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) &
+ DEVICE_CTRL_MAX_PAYLOAD_MASK;
+
+ hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
+
+ max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) &
+ DEVICE_CTRL_MAX_RREQ_SZ_MASK;
+ hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
+
+ if (hw->nic_type != athr_l2e_revB)
+ AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2,
+ atl1e_pay_load_size[hw->dmar_block]);
+ /* enable TXQ */
+ AT_WRITE_REGW(hw, REG_TXQ_CTRL,
+ (((u16)hw->tpd_burst & TXQ_CTRL_NUM_TPD_BURST_MASK)
+ << TXQ_CTRL_NUM_TPD_BURST_SHIFT)
+ | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN);
+}
+
+static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
+{
+ struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
+ u32 rxf_len = 0;
+ u32 rxf_low = 0;
+ u32 rxf_high = 0;
+ u32 rxf_thresh_data = 0;
+ u32 rxq_ctrl_data = 0;
+
+ if (hw->nic_type != athr_l2e_revB) {
+ AT_WRITE_REGW(hw, REG_RXQ_JMBOSZ_RRDTIM,
+ (u16)((hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) <<
+ RXQ_JMBOSZ_TH_SHIFT |
+ (1 & RXQ_JMBO_LKAH_MASK) <<
+ RXQ_JMBO_LKAH_SHIFT));
+
+ rxf_len = AT_READ_REG(hw, REG_SRAM_RXF_LEN);
+ rxf_high = rxf_len * 4 / 5;
+ rxf_low = rxf_len / 5;
+ rxf_thresh_data = ((rxf_high & RXQ_RXF_PAUSE_TH_HI_MASK)
+ << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
+ ((rxf_low & RXQ_RXF_PAUSE_TH_LO_MASK)
+ << RXQ_RXF_PAUSE_TH_LO_SHIFT);
+
+ AT_WRITE_REG(hw, REG_RXQ_RXF_PAUSE_THRESH, rxf_thresh_data);
+ }
+
+ /* RRS */
+ AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab);
+ AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu);
+
+ if (hw->rrs_type & atl1e_rrs_ipv4)
+ rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4;
+
+ if (hw->rrs_type & atl1e_rrs_ipv4_tcp)
+ rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4_TCP;
+
+ if (hw->rrs_type & atl1e_rrs_ipv6)
+ rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6;
+
+ if (hw->rrs_type & atl1e_rrs_ipv6_tcp)
+ rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6_TCP;
+
+ if (hw->rrs_type != atl1e_rrs_disable)
+ rxq_ctrl_data |=
+ (RXQ_CTRL_HASH_ENABLE | RXQ_CTRL_RSS_MODE_MQUESINT);
+
+ rxq_ctrl_data |= RXQ_CTRL_IPV6_XSUM_VERIFY_EN | RXQ_CTRL_PBA_ALIGN_32 |
+ RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN;
+
+ AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
+}
+
+static inline void atl1e_configure_dma(struct atl1e_adapter *adapter)
+{
+ struct atl1e_hw *hw = &adapter->hw;
+ u32 dma_ctrl_data = 0;
+
+ dma_ctrl_data = DMA_CTRL_RXCMB_EN;
+ dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
+ << DMA_CTRL_DMAR_BURST_LEN_SHIFT;
+ dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
+ << DMA_CTRL_DMAW_BURST_LEN_SHIFT;
+ dma_ctrl_data |= DMA_CTRL_DMAR_REQ_PRI | DMA_CTRL_DMAR_OUT_ORDER;
+ dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK)
+ << DMA_CTRL_DMAR_DLY_CNT_SHIFT;
+ dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK)
+ << DMA_CTRL_DMAW_DLY_CNT_SHIFT;
+
+ AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
+}
+
+static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
+{
+ u32 value;
+ struct atl1e_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+
+ /* Config MAC CTRL Register */
+ value = MAC_CTRL_TX_EN |
+ MAC_CTRL_RX_EN ;
+
+ if (FULL_DUPLEX == adapter->link_duplex)
+ value |= MAC_CTRL_DUPLX;
+
+ value |= ((u32)((SPEED_1000 == adapter->link_speed) ?
+ MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
+ MAC_CTRL_SPEED_SHIFT);
+ value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
+
+ value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
+ value |= (((u32)adapter->hw.preamble_len &
+ MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
+
+ __atl1e_vlan_mode(netdev->features, &value);
+
+ value |= MAC_CTRL_BC_EN;
+ if (netdev->flags & IFF_PROMISC)
+ value |= MAC_CTRL_PROMIS_EN;
+ if (netdev->flags & IFF_ALLMULTI)
+ value |= MAC_CTRL_MC_ALL_EN;
+
+ AT_WRITE_REG(hw, REG_MAC_CTRL, value);
+}
+
+/*
+ * atl1e_configure - Configure Transmit&Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx /Rx unit of the MAC after a reset.
+ */
+static int atl1e_configure(struct atl1e_adapter *adapter)
+{
+ struct atl1e_hw *hw = &adapter->hw;
+
+ u32 intr_status_data = 0;
+
+ /* clear interrupt status */
+ AT_WRITE_REG(hw, REG_ISR, ~0);
+
+ /* 1. set MAC Address */
+ atl1e_hw_set_mac_addr(hw);
+
+ /* 2. Init the Multicast HASH table done by set_muti */
+
+ /* 3. Clear any WOL status */
+ AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
+
+ /* 4. Descripter Ring BaseMem/Length/Read ptr/Write ptr
+ * TPD Ring/SMB/RXF0 Page CMBs, they use the same
+ * High 32bits memory */
+ atl1e_configure_des_ring(adapter);
+
+ /* 5. set Interrupt Moderator Timer */
+ AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, hw->imt);
+ AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER2_INIT, hw->imt);
+ AT_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_LED_MODE |
+ MASTER_CTRL_ITIMER_EN | MASTER_CTRL_ITIMER2_EN);
+
+ /* 6. rx/tx threshold to trig interrupt */
+ AT_WRITE_REGW(hw, REG_TRIG_RRD_THRESH, hw->rrd_thresh);
+ AT_WRITE_REGW(hw, REG_TRIG_TPD_THRESH, hw->tpd_thresh);
+ AT_WRITE_REGW(hw, REG_TRIG_RXTIMER, hw->rx_count_down);
+ AT_WRITE_REGW(hw, REG_TRIG_TXTIMER, hw->tx_count_down);
+
+ /* 7. set Interrupt Clear Timer */
+ AT_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, hw->ict);
+
+ /* 8. set MTU */
+ AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN +
+ VLAN_HLEN + ETH_FCS_LEN);
+
+ /* 9. config TXQ early tx threshold */
+ atl1e_configure_tx(adapter);
+
+ /* 10. config RXQ */
+ atl1e_configure_rx(adapter);
+
+ /* 11. config DMA Engine */
+ atl1e_configure_dma(adapter);
+
+ /* 12. smb timer to trig interrupt */
+ AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, hw->smb_timer);
+
+ intr_status_data = AT_READ_REG(hw, REG_ISR);
+ if (unlikely((intr_status_data & ISR_PHY_LINKDOWN) != 0)) {
+ netdev_err(adapter->netdev,
+ "atl1e_configure failed, PCIE phy link down\n");
+ return -1;
+ }
+
+ AT_WRITE_REG(hw, REG_ISR, 0x7fffffff);
+ return 0;
+}
+
+/*
+ * atl1e_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ */
+static struct net_device_stats *atl1e_get_stats(struct net_device *netdev)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct atl1e_hw_stats *hw_stats = &adapter->hw_stats;
+ struct net_device_stats *net_stats = &netdev->stats;
+
+ net_stats->rx_packets = hw_stats->rx_ok;
+ net_stats->tx_packets = hw_stats->tx_ok;
+ net_stats->rx_bytes = hw_stats->rx_byte_cnt;
+ net_stats->tx_bytes = hw_stats->tx_byte_cnt;
+ net_stats->multicast = hw_stats->rx_mcast;
+ net_stats->collisions = hw_stats->tx_1_col +
+ hw_stats->tx_2_col * 2 +
+ hw_stats->tx_late_col + hw_stats->tx_abort_col;
+
+ net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err +
+ hw_stats->rx_len_err + hw_stats->rx_sz_ov +
+ hw_stats->rx_rrd_ov + hw_stats->rx_align_err;
+ net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov;
+ net_stats->rx_length_errors = hw_stats->rx_len_err;
+ net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
+ net_stats->rx_frame_errors = hw_stats->rx_align_err;
+ net_stats->rx_over_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+
+ net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
+
+ net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col +
+ hw_stats->tx_underrun + hw_stats->tx_trunc;
+ net_stats->tx_fifo_errors = hw_stats->tx_underrun;
+ net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
+ net_stats->tx_window_errors = hw_stats->tx_late_col;
+
+ return net_stats;
+}
+
+static void atl1e_update_hw_stats(struct atl1e_adapter *adapter)
+{
+ u16 hw_reg_addr = 0;
+ unsigned long *stats_item = NULL;
+
+ /* update rx status */
+ hw_reg_addr = REG_MAC_RX_STATUS_BIN;
+ stats_item = &adapter->hw_stats.rx_ok;
+ while (hw_reg_addr <= REG_MAC_RX_STATUS_END) {
+ *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr);
+ stats_item++;
+ hw_reg_addr += 4;
+ }
+ /* update tx status */
+ hw_reg_addr = REG_MAC_TX_STATUS_BIN;
+ stats_item = &adapter->hw_stats.tx_ok;
+ while (hw_reg_addr <= REG_MAC_TX_STATUS_END) {
+ *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr);
+ stats_item++;
+ hw_reg_addr += 4;
+ }
+}
+
+static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter)
+{
+ u16 phy_data;
+
+ spin_lock(&adapter->mdio_lock);
+ atl1e_read_phy_reg(&adapter->hw, MII_INT_STATUS, &phy_data);
+ spin_unlock(&adapter->mdio_lock);
+}
+
+static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
+{
+ struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
+ &adapter->tx_ring;
+ struct atl1e_tx_buffer *tx_buffer = NULL;
+ u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX);
+ u16 next_to_clean = atomic_read(&tx_ring->next_to_clean);
+
+ while (next_to_clean != hw_next_to_clean) {
+ tx_buffer = &tx_ring->tx_buffer[next_to_clean];
+ if (tx_buffer->dma) {
+ if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE)
+ pci_unmap_single(adapter->pdev, tx_buffer->dma,
+ tx_buffer->length, PCI_DMA_TODEVICE);
+ else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE)
+ pci_unmap_page(adapter->pdev, tx_buffer->dma,
+ tx_buffer->length, PCI_DMA_TODEVICE);
+ tx_buffer->dma = 0;
+ }
+
+ if (tx_buffer->skb) {
+ dev_kfree_skb_irq(tx_buffer->skb);
+ tx_buffer->skb = NULL;
+ }
+
+ if (++next_to_clean == tx_ring->count)
+ next_to_clean = 0;
+ }
+
+ atomic_set(&tx_ring->next_to_clean, next_to_clean);
+
+ if (netif_queue_stopped(adapter->netdev) &&
+ netif_carrier_ok(adapter->netdev)) {
+ netif_wake_queue(adapter->netdev);
+ }
+
+ return true;
+}
+
+/*
+ * atl1e_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ * @pt_regs: CPU registers structure
+ */
+static irqreturn_t atl1e_intr(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct atl1e_hw *hw = &adapter->hw;
+ int max_ints = AT_MAX_INT_WORK;
+ int handled = IRQ_NONE;
+ u32 status;
+
+ do {
+ status = AT_READ_REG(hw, REG_ISR);
+ if ((status & IMR_NORMAL_MASK) == 0 ||
+ (status & ISR_DIS_INT) != 0) {
+ if (max_ints != AT_MAX_INT_WORK)
+ handled = IRQ_HANDLED;
+ break;
+ }
+ /* link event */
+ if (status & ISR_GPHY)
+ atl1e_clear_phy_int(adapter);
+ /* Ack ISR */
+ AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
+
+ handled = IRQ_HANDLED;
+ /* check if PCIE PHY Link down */
+ if (status & ISR_PHY_LINKDOWN) {
+ netdev_err(adapter->netdev,
+ "pcie phy linkdown %x\n", status);
+ if (netif_running(adapter->netdev)) {
+ /* reset MAC */
+ atl1e_irq_reset(adapter);
+ schedule_work(&adapter->reset_task);
+ break;
+ }
+ }
+
+ /* check if DMA read/write error */
+ if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
+ netdev_err(adapter->netdev,
+ "PCIE DMA RW error (status = 0x%x)\n",
+ status);
+ atl1e_irq_reset(adapter);
+ schedule_work(&adapter->reset_task);
+ break;
+ }
+
+ if (status & ISR_SMB)
+ atl1e_update_hw_stats(adapter);
+
+ /* link event */
+ if (status & (ISR_GPHY | ISR_MANUAL)) {
+ netdev->stats.tx_carrier_errors++;
+ atl1e_link_chg_event(adapter);
+ break;
+ }
+
+ /* transmit event */
+ if (status & ISR_TX_EVENT)
+ atl1e_clean_tx_irq(adapter);
+
+ if (status & ISR_RX_EVENT) {
+ /*
+ * disable rx interrupts, without
+ * the synchronize_irq bit
+ */
+ AT_WRITE_REG(hw, REG_IMR,
+ IMR_NORMAL_MASK & ~ISR_RX_EVENT);
+ AT_WRITE_FLUSH(hw);
+ if (likely(napi_schedule_prep(
+ &adapter->napi)))
+ __napi_schedule(&adapter->napi);
+ }
+ } while (--max_ints > 0);
+ /* re-enable Interrupt*/
+ AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
+
+ return handled;
+}
+
+static inline void atl1e_rx_checksum(struct atl1e_adapter *adapter,
+ struct sk_buff *skb, struct atl1e_recv_ret_status *prrs)
+{
+ u8 *packet = (u8 *)(prrs + 1);
+ struct iphdr *iph;
+ u16 head_len = ETH_HLEN;
+ u16 pkt_flags;
+ u16 err_flags;
+
+ skb_checksum_none_assert(skb);
+ pkt_flags = prrs->pkt_flag;
+ err_flags = prrs->err_flag;
+ if (((pkt_flags & RRS_IS_IPV4) || (pkt_flags & RRS_IS_IPV6)) &&
+ ((pkt_flags & RRS_IS_TCP) || (pkt_flags & RRS_IS_UDP))) {
+ if (pkt_flags & RRS_IS_IPV4) {
+ if (pkt_flags & RRS_IS_802_3)
+ head_len += 8;
+ iph = (struct iphdr *) (packet + head_len);
+ if (iph->frag_off != 0 && !(pkt_flags & RRS_IS_IP_DF))
+ goto hw_xsum;
+ }
+ if (!(err_flags & (RRS_ERR_IP_CSUM | RRS_ERR_L4_CSUM))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return;
+ }
+ }
+
+hw_xsum :
+ return;
+}
+
+static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter,
+ u8 que)
+{
+ struct atl1e_rx_page_desc *rx_page_desc =
+ (struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc;
+ u8 rx_using = rx_page_desc[que].rx_using;
+
+ return (struct atl1e_rx_page *)&(rx_page_desc[que].rx_page[rx_using]);
+}
+
+static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
+ int *work_done, int work_to_do)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *)
+ &adapter->rx_ring;
+ struct atl1e_rx_page_desc *rx_page_desc =
+ (struct atl1e_rx_page_desc *) rx_ring->rx_page_desc;
+ struct sk_buff *skb = NULL;
+ struct atl1e_rx_page *rx_page = atl1e_get_rx_page(adapter, que);
+ u32 packet_size, write_offset;
+ struct atl1e_recv_ret_status *prrs;
+
+ write_offset = *(rx_page->write_offset_addr);
+ if (likely(rx_page->read_offset < write_offset)) {
+ do {
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+ /* get new packet's rrs */
+ prrs = (struct atl1e_recv_ret_status *) (rx_page->addr +
+ rx_page->read_offset);
+ /* check sequence number */
+ if (prrs->seq_num != rx_page_desc[que].rx_nxseq) {
+ netdev_err(netdev,
+ "rx sequence number error (rx=%d) (expect=%d)\n",
+ prrs->seq_num,
+ rx_page_desc[que].rx_nxseq);
+ rx_page_desc[que].rx_nxseq++;
+ /* just for debug use */
+ AT_WRITE_REG(&adapter->hw, REG_DEBUG_DATA0,
+ (((u32)prrs->seq_num) << 16) |
+ rx_page_desc[que].rx_nxseq);
+ goto fatal_err;
+ }
+ rx_page_desc[que].rx_nxseq++;
+
+ /* error packet */
+ if (prrs->pkt_flag & RRS_IS_ERR_FRAME) {
+ if (prrs->err_flag & (RRS_ERR_BAD_CRC |
+ RRS_ERR_DRIBBLE | RRS_ERR_CODE |
+ RRS_ERR_TRUNC)) {
+ /* hardware error, discard this packet*/
+ netdev_err(netdev,
+ "rx packet desc error %x\n",
+ *((u32 *)prrs + 1));
+ goto skip_pkt;
+ }
+ }
+
+ packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
+ RRS_PKT_SIZE_MASK) - 4; /* CRC */
+ skb = netdev_alloc_skb_ip_align(netdev, packet_size);
+ if (skb == NULL) {
+ netdev_warn(netdev,
+ "Memory squeeze, deferring packet\n");
+ goto skip_pkt;
+ }
+ memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
+ skb_put(skb, packet_size);
+ skb->protocol = eth_type_trans(skb, netdev);
+ atl1e_rx_checksum(adapter, skb, prrs);
+
+ if (prrs->pkt_flag & RRS_IS_VLAN_TAG) {
+ u16 vlan_tag = (prrs->vtag >> 4) |
+ ((prrs->vtag & 7) << 13) |
+ ((prrs->vtag & 8) << 9);
+ netdev_dbg(netdev,
+ "RXD VLAN TAG<RRD>=0x%04x\n",
+ prrs->vtag);
+ __vlan_hwaccel_put_tag(skb, vlan_tag);
+ }
+ netif_receive_skb(skb);
+
+skip_pkt:
+ /* skip current packet whether it's ok or not. */
+ rx_page->read_offset +=
+ (((u32)((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
+ RRS_PKT_SIZE_MASK) +
+ sizeof(struct atl1e_recv_ret_status) + 31) &
+ 0xFFFFFFE0);
+
+ if (rx_page->read_offset >= rx_ring->page_size) {
+ /* mark this page clean */
+ u16 reg_addr;
+ u8 rx_using;
+
+ rx_page->read_offset =
+ *(rx_page->write_offset_addr) = 0;
+ rx_using = rx_page_desc[que].rx_using;
+ reg_addr =
+ atl1e_rx_page_vld_regs[que][rx_using];
+ AT_WRITE_REGB(&adapter->hw, reg_addr, 1);
+ rx_page_desc[que].rx_using ^= 1;
+ rx_page = atl1e_get_rx_page(adapter, que);
+ }
+ write_offset = *(rx_page->write_offset_addr);
+ } while (rx_page->read_offset < write_offset);
+ }
+
+ return;
+
+fatal_err:
+ if (!test_bit(__AT_DOWN, &adapter->flags))
+ schedule_work(&adapter->reset_task);
+}
+
+/*
+ * atl1e_clean - NAPI Rx polling callback
+ * @adapter: board private structure
+ */
+static int atl1e_clean(struct napi_struct *napi, int budget)
+{
+ struct atl1e_adapter *adapter =
+ container_of(napi, struct atl1e_adapter, napi);
+ u32 imr_data;
+ int work_done = 0;
+
+ /* Keep link state information with original netdev */
+ if (!netif_carrier_ok(adapter->netdev))
+ goto quit_polling;
+
+ atl1e_clean_rx_irq(adapter, 0, &work_done, budget);
+
+ /* If no Tx and not enough Rx work done, exit the polling mode */
+ if (work_done < budget) {
+quit_polling:
+ napi_complete(napi);
+ imr_data = AT_READ_REG(&adapter->hw, REG_IMR);
+ AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT);
+ /* test debug */
+ if (test_bit(__AT_DOWN, &adapter->flags)) {
+ atomic_dec(&adapter->irq_sem);
+ netdev_err(adapter->netdev,
+ "atl1e_clean is called when AT_DOWN\n");
+ }
+ /* reenable RX intr */
+ /*atl1e_irq_enable(adapter); */
+
+ }
+ return work_done;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void atl1e_netpoll(struct net_device *netdev)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+
+ disable_irq(adapter->pdev->irq);
+ atl1e_intr(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+}
+#endif
+
+static inline u16 atl1e_tpd_avail(struct atl1e_adapter *adapter)
+{
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
+ u16 next_to_use = 0;
+ u16 next_to_clean = 0;
+
+ next_to_clean = atomic_read(&tx_ring->next_to_clean);
+ next_to_use = tx_ring->next_to_use;
+
+ return (u16)(next_to_clean > next_to_use) ?
+ (next_to_clean - next_to_use - 1) :
+ (tx_ring->count + next_to_clean - next_to_use - 1);
+}
+
+/*
+ * get next usable tpd
+ * Note: should call atl1e_tdp_avail to make sure
+ * there is enough tpd to use
+ */
+static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter)
+{
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
+ u16 next_to_use = 0;
+
+ next_to_use = tx_ring->next_to_use;
+ if (++tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+
+ memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc));
+ return (struct atl1e_tpd_desc *)&tx_ring->desc[next_to_use];
+}
+
+static struct atl1e_tx_buffer *
+atl1e_get_tx_buffer(struct atl1e_adapter *adapter, struct atl1e_tpd_desc *tpd)
+{
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
+
+ return &tx_ring->tx_buffer[tpd - tx_ring->desc];
+}
+
+/* Calculate the transmit packet descript needed*/
+static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
+{
+ int i = 0;
+ u16 tpd_req = 1;
+ u16 fg_size = 0;
+ u16 proto_hdr_len = 0;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ fg_size = skb_shinfo(skb)->frags[i].size;
+ tpd_req += ((fg_size + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT);
+ }
+
+ if (skb_is_gso(skb)) {
+ if (skb->protocol == htons(ETH_P_IP) ||
+ (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) {
+ proto_hdr_len = skb_transport_offset(skb) +
+ tcp_hdrlen(skb);
+ if (proto_hdr_len < skb_headlen(skb)) {
+ tpd_req += ((skb_headlen(skb) - proto_hdr_len +
+ MAX_TX_BUF_LEN - 1) >>
+ MAX_TX_BUF_SHIFT);
+ }
+ }
+
+ }
+ return tpd_req;
+}
+
+static int atl1e_tso_csum(struct atl1e_adapter *adapter,
+ struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
+{
+ u8 hdr_len;
+ u32 real_len;
+ unsigned short offload_type;
+ int err;
+
+ if (skb_is_gso(skb)) {
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (unlikely(err))
+ return -1;
+ }
+ offload_type = skb_shinfo(skb)->gso_type;
+
+ if (offload_type & SKB_GSO_TCPV4) {
+ real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
+ + ntohs(ip_hdr(skb)->tot_len));
+
+ if (real_len < skb->len)
+ pskb_trim(skb, real_len);
+
+ hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ if (unlikely(skb->len == hdr_len)) {
+ /* only xsum need */
+ netdev_warn(adapter->netdev,
+ "IPV4 tso with zero data??\n");
+ goto check_sum;
+ } else {
+ ip_hdr(skb)->check = 0;
+ ip_hdr(skb)->tot_len = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(
+ ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ tpd->word3 |= (ip_hdr(skb)->ihl &
+ TDP_V4_IPHL_MASK) <<
+ TPD_V4_IPHL_SHIFT;
+ tpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
+ TPD_TCPHDRLEN_MASK) <<
+ TPD_TCPHDRLEN_SHIFT;
+ tpd->word3 |= ((skb_shinfo(skb)->gso_size) &
+ TPD_MSS_MASK) << TPD_MSS_SHIFT;
+ tpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT;
+ }
+ return 0;
+ }
+ }
+
+check_sum:
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ u8 css, cso;
+
+ cso = skb_checksum_start_offset(skb);
+ if (unlikely(cso & 0x1)) {
+ netdev_err(adapter->netdev,
+ "payload offset should not ant event number\n");
+ return -1;
+ } else {
+ css = cso + skb->csum_offset;
+ tpd->word3 |= (cso & TPD_PLOADOFFSET_MASK) <<
+ TPD_PLOADOFFSET_SHIFT;
+ tpd->word3 |= (css & TPD_CCSUMOFFSET_MASK) <<
+ TPD_CCSUMOFFSET_SHIFT;
+ tpd->word3 |= 1 << TPD_CC_SEGMENT_EN_SHIFT;
+ }
+ }
+
+ return 0;
+}
+
+static void atl1e_tx_map(struct atl1e_adapter *adapter,
+ struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
+{
+ struct atl1e_tpd_desc *use_tpd = NULL;
+ struct atl1e_tx_buffer *tx_buffer = NULL;
+ u16 buf_len = skb_headlen(skb);
+ u16 map_len = 0;
+ u16 mapped_len = 0;
+ u16 hdr_len = 0;
+ u16 nr_frags;
+ u16 f;
+ int segment;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
+ if (segment) {
+ /* TSO */
+ map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ use_tpd = tpd;
+
+ tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
+ tx_buffer->length = map_len;
+ tx_buffer->dma = pci_map_single(adapter->pdev,
+ skb->data, hdr_len, PCI_DMA_TODEVICE);
+ ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
+ mapped_len += map_len;
+ use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
+ use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
+ ((cpu_to_le32(tx_buffer->length) &
+ TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT);
+ }
+
+ while (mapped_len < buf_len) {
+ /* mapped_len == 0, means we should use the first tpd,
+ which is given by caller */
+ if (mapped_len == 0) {
+ use_tpd = tpd;
+ } else {
+ use_tpd = atl1e_get_tpd(adapter);
+ memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc));
+ }
+ tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
+ tx_buffer->skb = NULL;
+
+ tx_buffer->length = map_len =
+ ((buf_len - mapped_len) >= MAX_TX_BUF_LEN) ?
+ MAX_TX_BUF_LEN : (buf_len - mapped_len);
+ tx_buffer->dma =
+ pci_map_single(adapter->pdev, skb->data + mapped_len,
+ map_len, PCI_DMA_TODEVICE);
+ ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
+ mapped_len += map_len;
+ use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
+ use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
+ ((cpu_to_le32(tx_buffer->length) &
+ TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT);
+ }
+
+ for (f = 0; f < nr_frags; f++) {
+ struct skb_frag_struct *frag;
+ u16 i;
+ u16 seg_num;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ buf_len = frag->size;
+
+ seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
+ for (i = 0; i < seg_num; i++) {
+ use_tpd = atl1e_get_tpd(adapter);
+ memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc));
+
+ tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
+ BUG_ON(tx_buffer->skb);
+
+ tx_buffer->skb = NULL;
+ tx_buffer->length =
+ (buf_len > MAX_TX_BUF_LEN) ?
+ MAX_TX_BUF_LEN : buf_len;
+ buf_len -= tx_buffer->length;
+
+ tx_buffer->dma = skb_frag_dma_map(&adapter->pdev->dev,
+ frag,
+ (i * MAX_TX_BUF_LEN),
+ tx_buffer->length,
+ DMA_TO_DEVICE);
+ ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE);
+ use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
+ use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
+ ((cpu_to_le32(tx_buffer->length) &
+ TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT);
+ }
+ }
+
+ if ((tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK)
+ /* note this one is a tcp header */
+ tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT;
+ /* The last tpd */
+
+ use_tpd->word3 |= 1 << TPD_EOP_SHIFT;
+ /* The last buffer info contain the skb address,
+ so it will be free after unmap */
+ tx_buffer->skb = skb;
+}
+
+static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count,
+ struct atl1e_tpd_desc *tpd)
+{
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+ AT_WRITE_REG(&adapter->hw, REG_MB_TPD_PROD_IDX, tx_ring->next_to_use);
+}
+
+static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
+ u16 tpd_req = 1;
+ struct atl1e_tpd_desc *tpd;
+
+ if (test_bit(__AT_DOWN, &adapter->flags)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (unlikely(skb->len <= 0)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ tpd_req = atl1e_cal_tdp_req(skb);
+ if (!spin_trylock_irqsave(&adapter->tx_lock, flags))
+ return NETDEV_TX_LOCKED;
+
+ if (atl1e_tpd_avail(adapter) < tpd_req) {
+ /* no enough descriptor, just stop queue */
+ netif_stop_queue(netdev);
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ tpd = atl1e_get_tpd(adapter);
+
+ if (vlan_tx_tag_present(skb)) {
+ u16 vlan_tag = vlan_tx_tag_get(skb);
+ u16 atl1e_vlan_tag;
+
+ tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
+ AT_VLAN_TAG_TO_TPD_TAG(vlan_tag, atl1e_vlan_tag);
+ tpd->word2 |= (atl1e_vlan_tag & TPD_VLANTAG_MASK) <<
+ TPD_VLAN_SHIFT;
+ }
+
+ if (skb->protocol == htons(ETH_P_8021Q))
+ tpd->word3 |= 1 << TPD_VL_TAGGED_SHIFT;
+
+ if (skb_network_offset(skb) != ETH_HLEN)
+ tpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; /* 802.3 frame */
+
+ /* do TSO and check sum */
+ if (atl1e_tso_csum(adapter, skb, tpd) != 0) {
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ atl1e_tx_map(adapter, skb, tpd);
+ atl1e_tx_queue(adapter, tpd_req, tpd);
+
+ netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ return NETDEV_TX_OK;
+}
+
+static void atl1e_free_irq(struct atl1e_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ free_irq(adapter->pdev->irq, netdev);
+
+ if (adapter->have_msi)
+ pci_disable_msi(adapter->pdev);
+}
+
+static int atl1e_request_irq(struct atl1e_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
+ int flags = 0;
+ int err = 0;
+
+ adapter->have_msi = true;
+ err = pci_enable_msi(adapter->pdev);
+ if (err) {
+ netdev_dbg(adapter->netdev,
+ "Unable to allocate MSI interrupt Error: %d\n", err);
+ adapter->have_msi = false;
+ } else
+ netdev->irq = pdev->irq;
+
+
+ if (!adapter->have_msi)
+ flags |= IRQF_SHARED;
+ err = request_irq(adapter->pdev->irq, atl1e_intr, flags,
+ netdev->name, netdev);
+ if (err) {
+ netdev_dbg(adapter->netdev,
+ "Unable to allocate interrupt Error: %d\n", err);
+ if (adapter->have_msi)
+ pci_disable_msi(adapter->pdev);
+ return err;
+ }
+ netdev_dbg(adapter->netdev, "atl1e_request_irq OK\n");
+ return err;
+}
+
+int atl1e_up(struct atl1e_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err = 0;
+ u32 val;
+
+ /* hardware has been reset, we need to reload some things */
+ err = atl1e_init_hw(&adapter->hw);
+ if (err) {
+ err = -EIO;
+ return err;
+ }
+ atl1e_init_ring_ptrs(adapter);
+ atl1e_set_multi(netdev);
+ atl1e_restore_vlan(adapter);
+
+ if (atl1e_configure(adapter)) {
+ err = -EIO;
+ goto err_up;
+ }
+
+ clear_bit(__AT_DOWN, &adapter->flags);
+ napi_enable(&adapter->napi);
+ atl1e_irq_enable(adapter);
+ val = AT_READ_REG(&adapter->hw, REG_MASTER_CTRL);
+ AT_WRITE_REG(&adapter->hw, REG_MASTER_CTRL,
+ val | MASTER_CTRL_MANUAL_INT);
+
+err_up:
+ return err;
+}
+
+void atl1e_down(struct atl1e_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ /* signal that we're down so the interrupt handler does not
+ * reschedule our watchdog timer */
+ set_bit(__AT_DOWN, &adapter->flags);
+
+ netif_stop_queue(netdev);
+
+ /* reset MAC to disable all RX/TX */
+ atl1e_reset_hw(&adapter->hw);
+ msleep(1);
+
+ napi_disable(&adapter->napi);
+ atl1e_del_timer(adapter);
+ atl1e_irq_disable(adapter);
+
+ netif_carrier_off(netdev);
+ adapter->link_speed = SPEED_0;
+ adapter->link_duplex = -1;
+ atl1e_clean_tx_ring(adapter);
+ atl1e_clean_rx_ring(adapter);
+}
+
+/*
+ * atl1e_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ */
+static int atl1e_open(struct net_device *netdev)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__AT_TESTING, &adapter->flags))
+ return -EBUSY;
+
+ /* allocate rx/tx dma buffer & descriptors */
+ atl1e_init_ring_resources(adapter);
+ err = atl1e_setup_ring_resources(adapter);
+ if (unlikely(err))
+ return err;
+
+ err = atl1e_request_irq(adapter);
+ if (unlikely(err))
+ goto err_req_irq;
+
+ err = atl1e_up(adapter);
+ if (unlikely(err))
+ goto err_up;
+
+ return 0;
+
+err_up:
+ atl1e_free_irq(adapter);
+err_req_irq:
+ atl1e_free_ring_resources(adapter);
+ atl1e_reset_hw(&adapter->hw);
+
+ return err;
+}
+
+/*
+ * atl1e_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ */
+static int atl1e_close(struct net_device *netdev)
+{
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+
+ WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
+ atl1e_down(adapter);
+ atl1e_free_irq(adapter);
+ atl1e_free_ring_resources(adapter);
+
+ return 0;
+}
+
+static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ struct atl1e_hw *hw = &adapter->hw;
+ u32 ctrl = 0;
+ u32 mac_ctrl_data = 0;
+ u32 wol_ctrl_data = 0;
+ u16 mii_advertise_data = 0;
+ u16 mii_bmsr_data = 0;
+ u16 mii_intr_status_data = 0;
+ u32 wufc = adapter->wol;
+ u32 i;
+#ifdef CONFIG_PM
+ int retval = 0;
+#endif
+
+ if (netif_running(netdev)) {
+ WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
+ atl1e_down(adapter);
+ }
+ netif_device_detach(netdev);
+
+#ifdef CONFIG_PM
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+#endif
+
+ if (wufc) {
+ /* get link status */
+ atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
+ atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
+
+ mii_advertise_data = ADVERTISE_10HALF;
+
+ if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) ||
+ (atl1e_write_phy_reg(hw,
+ MII_ADVERTISE, mii_advertise_data) != 0) ||
+ (atl1e_phy_commit(hw)) != 0) {
+ netdev_dbg(adapter->netdev, "set phy register failed\n");
+ goto wol_dis;
+ }
+
+ hw->phy_configured = false; /* re-init PHY when resume */
+
+ /* turn on magic packet wol */
+ if (wufc & AT_WUFC_MAG)
+ wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
+
+ if (wufc & AT_WUFC_LNKC) {
+ /* if orignal link status is link, just wait for retrive link */
+ if (mii_bmsr_data & BMSR_LSTATUS) {
+ for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
+ msleep(100);
+ atl1e_read_phy_reg(hw, MII_BMSR,
+ (u16 *)&mii_bmsr_data);
+ if (mii_bmsr_data & BMSR_LSTATUS)
+ break;
+ }
+
+ if ((mii_bmsr_data & BMSR_LSTATUS) == 0)
+ netdev_dbg(adapter->netdev,
+ "Link may change when suspend\n");
+ }
+ wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
+ /* only link up can wake up */
+ if (atl1e_write_phy_reg(hw, MII_INT_CTRL, 0x400) != 0) {
+ netdev_dbg(adapter->netdev,
+ "read write phy register failed\n");
+ goto wol_dis;
+ }
+ }
+ /* clear phy interrupt */
+ atl1e_read_phy_reg(hw, MII_INT_STATUS, &mii_intr_status_data);
+ /* Config MAC Ctrl register */
+ mac_ctrl_data = MAC_CTRL_RX_EN;
+ /* set to 10/100M halt duplex */
+ mac_ctrl_data |= MAC_CTRL_SPEED_10_100 << MAC_CTRL_SPEED_SHIFT;
+ mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
+ MAC_CTRL_PRMLEN_MASK) <<
+ MAC_CTRL_PRMLEN_SHIFT);
+
+ __atl1e_vlan_mode(netdev->features, &mac_ctrl_data);
+
+ /* magic packet maybe Broadcast&multicast&Unicast frame */
+ if (wufc & AT_WUFC_MAG)
+ mac_ctrl_data |= MAC_CTRL_BC_EN;
+
+ netdev_dbg(adapter->netdev, "suspend MAC=0x%x\n",
+ mac_ctrl_data);
+
+ AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
+ AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
+ /* pcie patch */
+ ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC);
+ ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
+ AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
+ goto suspend_exit;
+ }
+wol_dis:
+
+ /* WOL disabled */
+ AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
+
+ /* pcie patch */
+ ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC);
+ ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
+ AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
+
+ atl1e_force_ps(hw);
+ hw->phy_configured = false; /* re-init PHY when resume */
+
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+
+suspend_exit:
+
+ if (netif_running(netdev))
+ atl1e_free_irq(adapter);
+
+ pci_disable_device(pdev);
+
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int atl1e_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ netdev_err(adapter->netdev,
+ "Cannot enable PCI device from suspend\n");
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ AT_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
+
+ if (netif_running(netdev)) {
+ err = atl1e_request_irq(adapter);
+ if (err)
+ return err;
+ }
+
+ atl1e_reset_hw(&adapter->hw);
+
+ if (netif_running(netdev))
+ atl1e_up(adapter);
+
+ netif_device_attach(netdev);
+
+ return 0;
+}
+#endif
+
+static void atl1e_shutdown(struct pci_dev *pdev)
+{
+ atl1e_suspend(pdev, PMSG_SUSPEND);
+}
+
+static const struct net_device_ops atl1e_netdev_ops = {
+ .ndo_open = atl1e_open,
+ .ndo_stop = atl1e_close,
+ .ndo_start_xmit = atl1e_xmit_frame,
+ .ndo_get_stats = atl1e_get_stats,
+ .ndo_set_rx_mode = atl1e_set_multi,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = atl1e_set_mac_addr,
+ .ndo_fix_features = atl1e_fix_features,
+ .ndo_set_features = atl1e_set_features,
+ .ndo_change_mtu = atl1e_change_mtu,
+ .ndo_do_ioctl = atl1e_ioctl,
+ .ndo_tx_timeout = atl1e_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = atl1e_netpoll,
+#endif
+
+};
+
+static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
+{
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ pci_set_drvdata(pdev, netdev);
+
+ netdev->irq = pdev->irq;
+ netdev->netdev_ops = &atl1e_netdev_ops;
+
+ netdev->watchdog_timeo = AT_TX_WATCHDOG;
+ atl1e_set_ethtool_ops(netdev);
+
+ netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO |
+ NETIF_F_HW_VLAN_RX;
+ netdev->features = netdev->hw_features | NETIF_F_LLTX |
+ NETIF_F_HW_VLAN_TX;
+
+ return 0;
+}
+
+/*
+ * atl1e_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in atl1e_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * atl1e_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ */
+static int __devinit atl1e_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct atl1e_adapter *adapter = NULL;
+ static int cards_found;
+
+ int err = 0;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "cannot enable PCI device\n");
+ return err;
+ }
+
+ /*
+ * The atl1e chip can DMA to 64-bit addresses, but it uses a single
+ * shared register for the high 32 bits, so only a single, aligned,
+ * 4 GB physical address range can be used at a time.
+ *
+ * Supporting 64-bit DMA on this hardware is more trouble than it's
+ * worth. It is far easier to limit to 32-bit DMA than update
+ * various kernel subsystems to support the mechanics required by a
+ * fixed-high-32-bit system.
+ */
+ if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
+ (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
+ dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
+ goto err_dma;
+ }
+
+ err = pci_request_regions(pdev, atl1e_driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "cannot obtain PCI resources\n");
+ goto err_pci_reg;
+ }
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev(sizeof(struct atl1e_adapter));
+ if (netdev == NULL) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "etherdev alloc failed\n");
+ goto err_alloc_etherdev;
+ }
+
+ err = atl1e_init_netdev(netdev, pdev);
+ if (err) {
+ netdev_err(netdev, "init netdevice failed\n");
+ goto err_init_netdev;
+ }
+ adapter = netdev_priv(netdev);
+ adapter->bd_number = cards_found;
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->hw.adapter = adapter;
+ adapter->hw.hw_addr = pci_iomap(pdev, BAR_0, 0);
+ if (!adapter->hw.hw_addr) {
+ err = -EIO;
+ netdev_err(netdev, "cannot map device registers\n");
+ goto err_ioremap;
+ }
+ netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
+
+ /* init mii data */
+ adapter->mii.dev = netdev;
+ adapter->mii.mdio_read = atl1e_mdio_read;
+ adapter->mii.mdio_write = atl1e_mdio_write;
+ adapter->mii.phy_id_mask = 0x1f;
+ adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
+
+ netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
+
+ init_timer(&adapter->phy_config_timer);
+ adapter->phy_config_timer.function = atl1e_phy_config;
+ adapter->phy_config_timer.data = (unsigned long) adapter;
+
+ /* get user settings */
+ atl1e_check_options(adapter);
+ /*
+ * Mark all PCI regions associated with PCI device
+ * pdev as being reserved by owner atl1e_driver_name
+ * Enables bus-mastering on the device and calls
+ * pcibios_set_master to do the needed arch specific settings
+ */
+ atl1e_setup_pcicmd(pdev);
+ /* setup the private structure */
+ err = atl1e_sw_init(adapter);
+ if (err) {
+ netdev_err(netdev, "net device private data init failed\n");
+ goto err_sw_init;
+ }
+
+ /* Init GPHY as early as possible due to power saving issue */
+ atl1e_phy_init(&adapter->hw);
+ /* reset the controller to
+ * put the device in a known good starting state */
+ err = atl1e_reset_hw(&adapter->hw);
+ if (err) {
+ err = -EIO;
+ goto err_reset;
+ }
+
+ if (atl1e_read_mac_addr(&adapter->hw) != 0) {
+ err = -EIO;
+ netdev_err(netdev, "get mac address failed\n");
+ goto err_eeprom;
+ }
+
+ memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
+ netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr);
+
+ INIT_WORK(&adapter->reset_task, atl1e_reset_task);
+ INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
+ err = register_netdev(netdev);
+ if (err) {
+ netdev_err(netdev, "register netdevice failed\n");
+ goto err_register;
+ }
+
+ /* assume we have no link for now */
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+
+ cards_found++;
+
+ return 0;
+
+err_reset:
+err_register:
+err_sw_init:
+err_eeprom:
+ iounmap(adapter->hw.hw_addr);
+err_init_netdev:
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/*
+ * atl1e_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * atl1e_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ */
+static void __devexit atl1e_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+
+ /*
+ * flush_scheduled work may reschedule our watchdog task, so
+ * explicitly disable watchdog tasks from being rescheduled
+ */
+ set_bit(__AT_DOWN, &adapter->flags);
+
+ atl1e_del_timer(adapter);
+ atl1e_cancel_work(adapter);
+
+ unregister_netdev(netdev);
+ atl1e_free_ring_resources(adapter);
+ atl1e_force_ps(&adapter->hw);
+ iounmap(adapter->hw.hw_addr);
+ pci_release_regions(pdev);
+ free_netdev(netdev);
+ pci_disable_device(pdev);
+}
+
+/*
+ * atl1e_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t
+atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ atl1e_down(adapter);
+
+ pci_disable_device(pdev);
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/*
+ * atl1e_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the e1000_resume routine.
+ */
+static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+
+ if (pci_enable_device(pdev)) {
+ netdev_err(adapter->netdev,
+ "Cannot re-enable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ atl1e_reset_hw(&adapter->hw);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/*
+ * atl1e_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the atl1e_resume routine.
+ */
+static void atl1e_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1e_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev)) {
+ if (atl1e_up(adapter)) {
+ netdev_err(adapter->netdev,
+ "can't bring device back up after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+}
+
+static struct pci_error_handlers atl1e_err_handler = {
+ .error_detected = atl1e_io_error_detected,
+ .slot_reset = atl1e_io_slot_reset,
+ .resume = atl1e_io_resume,
+};
+
+static struct pci_driver atl1e_driver = {
+ .name = atl1e_driver_name,
+ .id_table = atl1e_pci_tbl,
+ .probe = atl1e_probe,
+ .remove = __devexit_p(atl1e_remove),
+ /* Power Management Hooks */
+#ifdef CONFIG_PM
+ .suspend = atl1e_suspend,
+ .resume = atl1e_resume,
+#endif
+ .shutdown = atl1e_shutdown,
+ .err_handler = &atl1e_err_handler
+};
+
+/*
+ * atl1e_init_module - Driver Registration Routine
+ *
+ * atl1e_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ */
+static int __init atl1e_init_module(void)
+{
+ return pci_register_driver(&atl1e_driver);
+}
+
+/*
+ * atl1e_exit_module - Driver Exit Cleanup Routine
+ *
+ * atl1e_exit_module is called just before the driver is removed
+ * from memory.
+ */
+static void __exit atl1e_exit_module(void)
+{
+ pci_unregister_driver(&atl1e_driver);
+}
+
+module_init(atl1e_init_module);
+module_exit(atl1e_exit_module);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_param.c b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
new file mode 100644
index 000000000000..0ce60b6e7ef0
--- /dev/null
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
@@ -0,0 +1,268 @@
+/*
+ * Copyright(c) 2007 Atheros Corporation. All rights reserved.
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/netdevice.h>
+
+#include "atl1e.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define ATL1E_MAX_NIC 32
+
+#define OPTION_UNSET -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED 1
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+#define ATL1E_PARAM_INIT { [0 ... ATL1E_MAX_NIC] = OPTION_UNSET }
+
+#define ATL1E_PARAM(x, desc) \
+ static int __devinitdata x[ATL1E_MAX_NIC + 1] = ATL1E_PARAM_INIT; \
+ static unsigned int num_##x; \
+ module_param_array_named(x, x, int, &num_##x, 0); \
+ MODULE_PARM_DESC(x, desc);
+
+/* Transmit Memory count
+ *
+ * Valid Range: 64-2048
+ *
+ * Default Value: 128
+ */
+#define ATL1E_MIN_TX_DESC_CNT 32
+#define ATL1E_MAX_TX_DESC_CNT 1020
+#define ATL1E_DEFAULT_TX_DESC_CNT 128
+ATL1E_PARAM(tx_desc_cnt, "Transmit description count");
+
+/* Receive Memory Block Count
+ *
+ * Valid Range: 16-512
+ *
+ * Default Value: 128
+ */
+#define ATL1E_MIN_RX_MEM_SIZE 8 /* 8KB */
+#define ATL1E_MAX_RX_MEM_SIZE 1024 /* 1MB */
+#define ATL1E_DEFAULT_RX_MEM_SIZE 256 /* 128KB */
+ATL1E_PARAM(rx_mem_size, "memory size of rx buffer(KB)");
+
+/* User Specified MediaType Override
+ *
+ * Valid Range: 0-5
+ * - 0 - auto-negotiate at all supported speeds
+ * - 1 - only link at 100Mbps Full Duplex
+ * - 2 - only link at 100Mbps Half Duplex
+ * - 3 - only link at 10Mbps Full Duplex
+ * - 4 - only link at 10Mbps Half Duplex
+ * Default Value: 0
+ */
+
+ATL1E_PARAM(media_type, "MediaType Select");
+
+/* Interrupt Moderate Timer in units of 2 us
+ *
+ * Valid Range: 10-65535
+ *
+ * Default Value: 45000(90ms)
+ */
+#define INT_MOD_DEFAULT_CNT 100 /* 200us */
+#define INT_MOD_MAX_CNT 65000
+#define INT_MOD_MIN_CNT 50
+ATL1E_PARAM(int_mod_timer, "Interrupt Moderator Timer");
+
+#define AUTONEG_ADV_DEFAULT 0x2F
+#define AUTONEG_ADV_MASK 0x2F
+#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
+
+#define FLASH_VENDOR_DEFAULT 0
+#define FLASH_VENDOR_MIN 0
+#define FLASH_VENDOR_MAX 2
+
+struct atl1e_option {
+ enum { enable_option, range_option, list_option } type;
+ char *name;
+ char *err;
+ int def;
+ union {
+ struct { /* range_option info */
+ int min;
+ int max;
+ } r;
+ struct { /* list_option info */
+ int nr;
+ struct atl1e_opt_list { int i; char *str; } *p;
+ } l;
+ } arg;
+};
+
+static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt, struct atl1e_adapter *adapter)
+{
+ if (*value == OPTION_UNSET) {
+ *value = opt->def;
+ return 0;
+ }
+
+ switch (opt->type) {
+ case enable_option:
+ switch (*value) {
+ case OPTION_ENABLED:
+ netdev_info(adapter->netdev,
+ "%s Enabled\n", opt->name);
+ return 0;
+ case OPTION_DISABLED:
+ netdev_info(adapter->netdev,
+ "%s Disabled\n", opt->name);
+ return 0;
+ }
+ break;
+ case range_option:
+ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+ netdev_info(adapter->netdev, "%s set to %i\n",
+ opt->name, *value);
+ return 0;
+ }
+ break;
+ case list_option:{
+ int i;
+ struct atl1e_opt_list *ent;
+
+ for (i = 0; i < opt->arg.l.nr; i++) {
+ ent = &opt->arg.l.p[i];
+ if (*value == ent->i) {
+ if (ent->str[0] != '\0')
+ netdev_info(adapter->netdev,
+ "%s\n", ent->str);
+ return 0;
+ }
+ }
+ break;
+ }
+ default:
+ BUG();
+ }
+
+ netdev_info(adapter->netdev, "Invalid %s specified (%i) %s\n",
+ opt->name, *value, opt->err);
+ *value = opt->def;
+ return -1;
+}
+
+/*
+ * atl1e_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input. If an invalid value is given, or if no user specified
+ * value exists, a default value is used. The final value is stored
+ * in a variable in the adapter structure.
+ */
+void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
+{
+ int bd = adapter->bd_number;
+
+ if (bd >= ATL1E_MAX_NIC) {
+ netdev_notice(adapter->netdev,
+ "no configuration for board #%i\n", bd);
+ netdev_notice(adapter->netdev,
+ "Using defaults for all values\n");
+ }
+
+ { /* Transmit Ring Size */
+ struct atl1e_option opt = {
+ .type = range_option,
+ .name = "Transmit Ddescription Count",
+ .err = "using default of "
+ __MODULE_STRING(ATL1E_DEFAULT_TX_DESC_CNT),
+ .def = ATL1E_DEFAULT_TX_DESC_CNT,
+ .arg = { .r = { .min = ATL1E_MIN_TX_DESC_CNT,
+ .max = ATL1E_MAX_TX_DESC_CNT} }
+ };
+ int val;
+ if (num_tx_desc_cnt > bd) {
+ val = tx_desc_cnt[bd];
+ atl1e_validate_option(&val, &opt, adapter);
+ adapter->tx_ring.count = (u16) val & 0xFFFC;
+ } else
+ adapter->tx_ring.count = (u16)opt.def;
+ }
+
+ { /* Receive Memory Block Count */
+ struct atl1e_option opt = {
+ .type = range_option,
+ .name = "Memory size of rx buffer(KB)",
+ .err = "using default of "
+ __MODULE_STRING(ATL1E_DEFAULT_RX_MEM_SIZE),
+ .def = ATL1E_DEFAULT_RX_MEM_SIZE,
+ .arg = { .r = { .min = ATL1E_MIN_RX_MEM_SIZE,
+ .max = ATL1E_MAX_RX_MEM_SIZE} }
+ };
+ int val;
+ if (num_rx_mem_size > bd) {
+ val = rx_mem_size[bd];
+ atl1e_validate_option(&val, &opt, adapter);
+ adapter->rx_ring.page_size = (u32)val * 1024;
+ } else {
+ adapter->rx_ring.page_size = (u32)opt.def * 1024;
+ }
+ }
+
+ { /* Interrupt Moderate Timer */
+ struct atl1e_option opt = {
+ .type = range_option,
+ .name = "Interrupt Moderate Timer",
+ .err = "using default of "
+ __MODULE_STRING(INT_MOD_DEFAULT_CNT),
+ .def = INT_MOD_DEFAULT_CNT,
+ .arg = { .r = { .min = INT_MOD_MIN_CNT,
+ .max = INT_MOD_MAX_CNT} }
+ } ;
+ int val;
+ if (num_int_mod_timer > bd) {
+ val = int_mod_timer[bd];
+ atl1e_validate_option(&val, &opt, adapter);
+ adapter->hw.imt = (u16) val;
+ } else
+ adapter->hw.imt = (u16)(opt.def);
+ }
+
+ { /* MediaType */
+ struct atl1e_option opt = {
+ .type = range_option,
+ .name = "Speed/Duplex Selection",
+ .err = "using default of "
+ __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR),
+ .def = MEDIA_TYPE_AUTO_SENSOR,
+ .arg = { .r = { .min = MEDIA_TYPE_AUTO_SENSOR,
+ .max = MEDIA_TYPE_10M_HALF} }
+ } ;
+ int val;
+ if (num_media_type > bd) {
+ val = media_type[bd];
+ atl1e_validate_option(&val, &opt, adapter);
+ adapter->hw.media_type = (u16) val;
+ } else
+ adapter->hw.media_type = (u16)(opt.def);
+
+ }
+}
diff --git a/drivers/net/ethernet/atheros/atlx/Makefile b/drivers/net/ethernet/atheros/atlx/Makefile
new file mode 100644
index 000000000000..e4f6022ca552
--- /dev/null
+++ b/drivers/net/ethernet/atheros/atlx/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_ATL1) += atl1.o
+obj-$(CONFIG_ATL2) += atl2.o
+
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
new file mode 100644
index 000000000000..43511ab8dd27
--- /dev/null
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -0,0 +1,3674 @@
+/*
+ * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
+ * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
+ * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ *
+ * Contact Information:
+ * Xiong Huang <xiong.huang@atheros.com>
+ * Jie Yang <jie.yang@atheros.com>
+ * Chris Snook <csnook@redhat.com>
+ * Jay Cliburn <jcliburn@gmail.com>
+ *
+ * This version is adapted from the Attansic reference driver.
+ *
+ * TODO:
+ * Add more ethtool functions.
+ * Fix abstruse irq enable/disable condition described here:
+ * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
+ *
+ * NEEDS TESTING:
+ * VLAN
+ * multicast
+ * promiscuous mode
+ * interrupt coalescing
+ * SMP torture testing
+ */
+
+#include <linux/atomic.h>
+#include <asm/byteorder.h>
+
+#include <linux/compiler.h>
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/hardirq.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/irqflags.h>
+#include <linux/irqreturn.h>
+#include <linux/jiffies.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/pm.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/tcp.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <net/checksum.h>
+
+#include "atl1.h"
+
+#define ATLX_DRIVER_VERSION "2.1.3"
+MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, "
+ "Chris Snook <csnook@redhat.com>, "
+ "Jay Cliburn <jcliburn@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ATLX_DRIVER_VERSION);
+
+/* Temporary hack for merging atl1 and atl2 */
+#include "atlx.c"
+
+static const struct ethtool_ops atl1_ethtool_ops;
+
+/*
+ * This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+#define ATL1_MAX_NIC 4
+
+#define OPTION_UNSET -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED 1
+
+#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET }
+
+/*
+ * Interrupt Moderate Timer in units of 2 us
+ *
+ * Valid Range: 10-65535
+ *
+ * Default Value: 100 (200us)
+ */
+static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
+static unsigned int num_int_mod_timer;
+module_param_array_named(int_mod_timer, int_mod_timer, int,
+ &num_int_mod_timer, 0);
+MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer");
+
+#define DEFAULT_INT_MOD_CNT 100 /* 200us */
+#define MAX_INT_MOD_CNT 65000
+#define MIN_INT_MOD_CNT 50
+
+struct atl1_option {
+ enum { enable_option, range_option, list_option } type;
+ char *name;
+ char *err;
+ int def;
+ union {
+ struct { /* range_option info */
+ int min;
+ int max;
+ } r;
+ struct { /* list_option info */
+ int nr;
+ struct atl1_opt_list {
+ int i;
+ char *str;
+ } *p;
+ } l;
+ } arg;
+};
+
+static int __devinit atl1_validate_option(int *value, struct atl1_option *opt,
+ struct pci_dev *pdev)
+{
+ if (*value == OPTION_UNSET) {
+ *value = opt->def;
+ return 0;
+ }
+
+ switch (opt->type) {
+ case enable_option:
+ switch (*value) {
+ case OPTION_ENABLED:
+ dev_info(&pdev->dev, "%s enabled\n", opt->name);
+ return 0;
+ case OPTION_DISABLED:
+ dev_info(&pdev->dev, "%s disabled\n", opt->name);
+ return 0;
+ }
+ break;
+ case range_option:
+ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+ dev_info(&pdev->dev, "%s set to %i\n", opt->name,
+ *value);
+ return 0;
+ }
+ break;
+ case list_option:{
+ int i;
+ struct atl1_opt_list *ent;
+
+ for (i = 0; i < opt->arg.l.nr; i++) {
+ ent = &opt->arg.l.p[i];
+ if (*value == ent->i) {
+ if (ent->str[0] != '\0')
+ dev_info(&pdev->dev, "%s\n",
+ ent->str);
+ return 0;
+ }
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ dev_info(&pdev->dev, "invalid %s specified (%i) %s\n",
+ opt->name, *value, opt->err);
+ *value = opt->def;
+ return -1;
+}
+
+/*
+ * atl1_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input. If an invalid value is given, or if no user specified
+ * value exists, a default value is used. The final value is stored
+ * in a variable in the adapter structure.
+ */
+static void __devinit atl1_check_options(struct atl1_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int bd = adapter->bd_number;
+ if (bd >= ATL1_MAX_NIC) {
+ dev_notice(&pdev->dev, "no configuration for board#%i\n", bd);
+ dev_notice(&pdev->dev, "using defaults for all values\n");
+ }
+ { /* Interrupt Moderate Timer */
+ struct atl1_option opt = {
+ .type = range_option,
+ .name = "Interrupt Moderator Timer",
+ .err = "using default of "
+ __MODULE_STRING(DEFAULT_INT_MOD_CNT),
+ .def = DEFAULT_INT_MOD_CNT,
+ .arg = {.r = {.min = MIN_INT_MOD_CNT,
+ .max = MAX_INT_MOD_CNT} }
+ };
+ int val;
+ if (num_int_mod_timer > bd) {
+ val = int_mod_timer[bd];
+ atl1_validate_option(&val, &opt, pdev);
+ adapter->imt = (u16) val;
+ } else
+ adapter->imt = (u16) (opt.def);
+ }
+}
+
+/*
+ * atl1_pci_tbl - PCI Device ID Table
+ */
+static DEFINE_PCI_DEVICE_TABLE(atl1_pci_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
+ /* required last entry */
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, atl1_pci_tbl);
+
+static const u32 atl1_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
+
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Message level (0=none,...,16=all)");
+
+/*
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ * hw - Struct containing variables accessed by shared code
+ * return : 0 or idle status (if error)
+ */
+static s32 atl1_reset_hw(struct atl1_hw *hw)
+{
+ struct pci_dev *pdev = hw->back->pdev;
+ struct atl1_adapter *adapter = hw->back;
+ u32 icr;
+ int i;
+
+ /*
+ * Clear Interrupt mask to stop board from generating
+ * interrupts & Clear any pending interrupt events
+ */
+ /*
+ * iowrite32(0, hw->hw_addr + REG_IMR);
+ * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
+ */
+
+ /*
+ * Issue Soft Reset to the MAC. This will reset the chip's
+ * transmit, receive, DMA. It will not effect
+ * the current PCI configuration. The global reset bit is self-
+ * clearing, and should clear within a microsecond.
+ */
+ iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL);
+ ioread32(hw->hw_addr + REG_MASTER_CTRL);
+
+ iowrite16(1, hw->hw_addr + REG_PHY_ENABLE);
+ ioread16(hw->hw_addr + REG_PHY_ENABLE);
+
+ /* delay about 1ms */
+ msleep(1);
+
+ /* Wait at least 10ms for All module to be Idle */
+ for (i = 0; i < 10; i++) {
+ icr = ioread32(hw->hw_addr + REG_IDLE_STATUS);
+ if (!icr)
+ break;
+ /* delay 1 ms */
+ msleep(1);
+ /* FIXME: still the right way to do this? */
+ cpu_relax();
+ }
+
+ if (icr) {
+ if (netif_msg_hw(adapter))
+ dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr);
+ return icr;
+ }
+
+ return 0;
+}
+
+/* function about EEPROM
+ *
+ * check_eeprom_exist
+ * return 0 if eeprom exist
+ */
+static int atl1_check_eeprom_exist(struct atl1_hw *hw)
+{
+ u32 value;
+ value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
+ if (value & SPI_FLASH_CTRL_EN_VPD) {
+ value &= ~SPI_FLASH_CTRL_EN_VPD;
+ iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
+ }
+
+ value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST);
+ return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
+}
+
+static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
+{
+ int i;
+ u32 control;
+
+ if (offset & 3)
+ /* address do not align */
+ return false;
+
+ iowrite32(0, hw->hw_addr + REG_VPD_DATA);
+ control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
+ iowrite32(control, hw->hw_addr + REG_VPD_CAP);
+ ioread32(hw->hw_addr + REG_VPD_CAP);
+
+ for (i = 0; i < 10; i++) {
+ msleep(2);
+ control = ioread32(hw->hw_addr + REG_VPD_CAP);
+ if (control & VPD_CAP_VPD_FLAG)
+ break;
+ }
+ if (control & VPD_CAP_VPD_FLAG) {
+ *p_value = ioread32(hw->hw_addr + REG_VPD_DATA);
+ return true;
+ }
+ /* timeout */
+ return false;
+}
+
+/*
+ * Reads the value from a PHY register
+ * hw - Struct containing variables accessed by shared code
+ * reg_addr - address of the PHY register to read
+ */
+static s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
+{
+ u32 val;
+ int i;
+
+ val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
+ MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 <<
+ MDIO_CLK_SEL_SHIFT;
+ iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
+ ioread32(hw->hw_addr + REG_MDIO_CTRL);
+
+ for (i = 0; i < MDIO_WAIT_TIMES; i++) {
+ udelay(2);
+ val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ break;
+ }
+ if (!(val & (MDIO_START | MDIO_BUSY))) {
+ *phy_data = (u16) val;
+ return 0;
+ }
+ return ATLX_ERR_PHY;
+}
+
+#define CUSTOM_SPI_CS_SETUP 2
+#define CUSTOM_SPI_CLK_HI 2
+#define CUSTOM_SPI_CLK_LO 2
+#define CUSTOM_SPI_CS_HOLD 2
+#define CUSTOM_SPI_CS_HI 3
+
+static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf)
+{
+ int i;
+ u32 value;
+
+ iowrite32(0, hw->hw_addr + REG_SPI_DATA);
+ iowrite32(addr, hw->hw_addr + REG_SPI_ADDR);
+
+ value = SPI_FLASH_CTRL_WAIT_READY |
+ (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
+ SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI &
+ SPI_FLASH_CTRL_CLK_HI_MASK) <<
+ SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO &
+ SPI_FLASH_CTRL_CLK_LO_MASK) <<
+ SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD &
+ SPI_FLASH_CTRL_CS_HOLD_MASK) <<
+ SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI &
+ SPI_FLASH_CTRL_CS_HI_MASK) <<
+ SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) <<
+ SPI_FLASH_CTRL_INS_SHIFT;
+
+ iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
+
+ value |= SPI_FLASH_CTRL_START;
+ iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
+ ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
+
+ for (i = 0; i < 10; i++) {
+ msleep(1);
+ value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
+ if (!(value & SPI_FLASH_CTRL_START))
+ break;
+ }
+
+ if (value & SPI_FLASH_CTRL_START)
+ return false;
+
+ *buf = ioread32(hw->hw_addr + REG_SPI_DATA);
+
+ return true;
+}
+
+/*
+ * get_permanent_address
+ * return 0 if get valid mac address,
+ */
+static int atl1_get_permanent_address(struct atl1_hw *hw)
+{
+ u32 addr[2];
+ u32 i, control;
+ u16 reg;
+ u8 eth_addr[ETH_ALEN];
+ bool key_valid;
+
+ if (is_valid_ether_addr(hw->perm_mac_addr))
+ return 0;
+
+ /* init */
+ addr[0] = addr[1] = 0;
+
+ if (!atl1_check_eeprom_exist(hw)) {
+ reg = 0;
+ key_valid = false;
+ /* Read out all EEPROM content */
+ i = 0;
+ while (1) {
+ if (atl1_read_eeprom(hw, i + 0x100, &control)) {
+ if (key_valid) {
+ if (reg == REG_MAC_STA_ADDR)
+ addr[0] = control;
+ else if (reg == (REG_MAC_STA_ADDR + 4))
+ addr[1] = control;
+ key_valid = false;
+ } else if ((control & 0xff) == 0x5A) {
+ key_valid = true;
+ reg = (u16) (control >> 16);
+ } else
+ break;
+ } else
+ /* read error */
+ break;
+ i += 4;
+ }
+
+ *(u32 *) &eth_addr[2] = swab32(addr[0]);
+ *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
+ if (is_valid_ether_addr(eth_addr)) {
+ memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
+ return 0;
+ }
+ }
+
+ /* see if SPI FLAGS exist ? */
+ addr[0] = addr[1] = 0;
+ reg = 0;
+ key_valid = false;
+ i = 0;
+ while (1) {
+ if (atl1_spi_read(hw, i + 0x1f000, &control)) {
+ if (key_valid) {
+ if (reg == REG_MAC_STA_ADDR)
+ addr[0] = control;
+ else if (reg == (REG_MAC_STA_ADDR + 4))
+ addr[1] = control;
+ key_valid = false;
+ } else if ((control & 0xff) == 0x5A) {
+ key_valid = true;
+ reg = (u16) (control >> 16);
+ } else
+ /* data end */
+ break;
+ } else
+ /* read error */
+ break;
+ i += 4;
+ }
+
+ *(u32 *) &eth_addr[2] = swab32(addr[0]);
+ *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
+ if (is_valid_ether_addr(eth_addr)) {
+ memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
+ return 0;
+ }
+
+ /*
+ * On some motherboards, the MAC address is written by the
+ * BIOS directly to the MAC register during POST, and is
+ * not stored in eeprom. If all else thus far has failed
+ * to fetch the permanent MAC address, try reading it directly.
+ */
+ addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR);
+ addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4));
+ *(u32 *) &eth_addr[2] = swab32(addr[0]);
+ *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
+ if (is_valid_ether_addr(eth_addr)) {
+ memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Reads the adapter's MAC address from the EEPROM
+ * hw - Struct containing variables accessed by shared code
+ */
+static s32 atl1_read_mac_addr(struct atl1_hw *hw)
+{
+ u16 i;
+
+ if (atl1_get_permanent_address(hw))
+ random_ether_addr(hw->perm_mac_addr);
+
+ for (i = 0; i < ETH_ALEN; i++)
+ hw->mac_addr[i] = hw->perm_mac_addr[i];
+ return 0;
+}
+
+/*
+ * Hashes an address to determine its location in the multicast table
+ * hw - Struct containing variables accessed by shared code
+ * mc_addr - the multicast address to hash
+ *
+ * atl1_hash_mc_addr
+ * purpose
+ * set hash value for a multicast address
+ * hash calcu processing :
+ * 1. calcu 32bit CRC for multicast address
+ * 2. reverse crc with MSB to LSB
+ */
+static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
+{
+ u32 crc32, value = 0;
+ int i;
+
+ crc32 = ether_crc_le(6, mc_addr);
+ for (i = 0; i < 32; i++)
+ value |= (((crc32 >> i) & 1) << (31 - i));
+
+ return value;
+}
+
+/*
+ * Sets the bit in the multicast table corresponding to the hash value.
+ * hw - Struct containing variables accessed by shared code
+ * hash_value - Multicast address hash value
+ */
+static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
+{
+ u32 hash_bit, hash_reg;
+ u32 mta;
+
+ /*
+ * The HASH Table is a register array of 2 32-bit registers.
+ * It is treated like an array of 64 bits. We want to set
+ * bit BitArray[hash_value]. So we figure out what register
+ * the bit is in, read it, OR in the new bit, then write
+ * back the new value. The register is determined by the
+ * upper 7 bits of the hash value and the bit within that
+ * register are determined by the lower 5 bits of the value.
+ */
+ hash_reg = (hash_value >> 31) & 0x1;
+ hash_bit = (hash_value >> 26) & 0x1F;
+ mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
+ mta |= (1 << hash_bit);
+ iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
+}
+
+/*
+ * Writes a value to a PHY register
+ * hw - Struct containing variables accessed by shared code
+ * reg_addr - address of the PHY register to write
+ * data - data to write to the PHY
+ */
+static s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data)
+{
+ int i;
+ u32 val;
+
+ val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
+ (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
+ MDIO_SUP_PREAMBLE |
+ MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
+ iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
+ ioread32(hw->hw_addr + REG_MDIO_CTRL);
+
+ for (i = 0; i < MDIO_WAIT_TIMES; i++) {
+ udelay(2);
+ val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ break;
+ }
+
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ return 0;
+
+ return ATLX_ERR_PHY;
+}
+
+/*
+ * Make L001's PHY out of Power Saving State (bug)
+ * hw - Struct containing variables accessed by shared code
+ * when power on, L001's PHY always on Power saving State
+ * (Gigabit Link forbidden)
+ */
+static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
+{
+ s32 ret;
+ ret = atl1_write_phy_reg(hw, 29, 0x0029);
+ if (ret)
+ return ret;
+ return atl1_write_phy_reg(hw, 30, 0);
+}
+
+/*
+ * Resets the PHY and make all config validate
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
+ */
+static s32 atl1_phy_reset(struct atl1_hw *hw)
+{
+ struct pci_dev *pdev = hw->back->pdev;
+ struct atl1_adapter *adapter = hw->back;
+ s32 ret_val;
+ u16 phy_data;
+
+ if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
+ hw->media_type == MEDIA_TYPE_1000M_FULL)
+ phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
+ else {
+ switch (hw->media_type) {
+ case MEDIA_TYPE_100M_FULL:
+ phy_data =
+ MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
+ MII_CR_RESET;
+ break;
+ case MEDIA_TYPE_100M_HALF:
+ phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
+ break;
+ case MEDIA_TYPE_10M_FULL:
+ phy_data =
+ MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
+ break;
+ default:
+ /* MEDIA_TYPE_10M_HALF: */
+ phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
+ break;
+ }
+ }
+
+ ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data);
+ if (ret_val) {
+ u32 val;
+ int i;
+ /* pcie serdes link may be down! */
+ if (netif_msg_hw(adapter))
+ dev_dbg(&pdev->dev, "pcie phy link down\n");
+
+ for (i = 0; i < 25; i++) {
+ msleep(1);
+ val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ break;
+ }
+
+ if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
+ if (netif_msg_hw(adapter))
+ dev_warn(&pdev->dev,
+ "pcie link down at least 25ms\n");
+ return ret_val;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Configures PHY autoneg and flow control advertisement settings
+ * hw - Struct containing variables accessed by shared code
+ */
+static s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw)
+{
+ s32 ret_val;
+ s16 mii_autoneg_adv_reg;
+ s16 mii_1000t_ctrl_reg;
+
+ /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+ mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
+
+ /* Read the MII 1000Base-T Control Register (Address 9). */
+ mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK;
+
+ /*
+ * First we clear all the 10/100 mb speed bits in the Auto-Neg
+ * Advertisement Register (Address 4) and the 1000 mb speed bits in
+ * the 1000Base-T Control Register (Address 9).
+ */
+ mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
+ mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK;
+
+ /*
+ * Need to parse media_type and set up
+ * the appropriate PHY registers.
+ */
+ switch (hw->media_type) {
+ case MEDIA_TYPE_AUTO_SENSOR:
+ mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
+ MII_AR_10T_FD_CAPS |
+ MII_AR_100TX_HD_CAPS |
+ MII_AR_100TX_FD_CAPS);
+ mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
+ break;
+
+ case MEDIA_TYPE_1000M_FULL:
+ mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
+ break;
+
+ case MEDIA_TYPE_100M_FULL:
+ mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
+ break;
+
+ case MEDIA_TYPE_100M_HALF:
+ mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
+ break;
+
+ case MEDIA_TYPE_10M_FULL:
+ mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
+ break;
+
+ default:
+ mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
+ break;
+ }
+
+ /* flow control fixed to enable all */
+ mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
+
+ hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
+ hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
+
+ ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+
+ return 0;
+}
+
+/*
+ * Configures link settings.
+ * hw - Struct containing variables accessed by shared code
+ * Assumes the hardware has previously been reset and the
+ * transmitter and receiver are not enabled.
+ */
+static s32 atl1_setup_link(struct atl1_hw *hw)
+{
+ struct pci_dev *pdev = hw->back->pdev;
+ struct atl1_adapter *adapter = hw->back;
+ s32 ret_val;
+
+ /*
+ * Options:
+ * PHY will advertise value(s) parsed from
+ * autoneg_advertised and fc
+ * no matter what autoneg is , We will not wait link result.
+ */
+ ret_val = atl1_phy_setup_autoneg_adv(hw);
+ if (ret_val) {
+ if (netif_msg_link(adapter))
+ dev_dbg(&pdev->dev,
+ "error setting up autonegotiation\n");
+ return ret_val;
+ }
+ /* SW.Reset , En-Auto-Neg if needed */
+ ret_val = atl1_phy_reset(hw);
+ if (ret_val) {
+ if (netif_msg_link(adapter))
+ dev_dbg(&pdev->dev, "error resetting phy\n");
+ return ret_val;
+ }
+ hw->phy_configured = true;
+ return ret_val;
+}
+
+static void atl1_init_flash_opcode(struct atl1_hw *hw)
+{
+ if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
+ /* Atmel */
+ hw->flash_vendor = 0;
+
+ /* Init OP table */
+ iowrite8(flash_table[hw->flash_vendor].cmd_program,
+ hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM);
+ iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase,
+ hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE);
+ iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase,
+ hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE);
+ iowrite8(flash_table[hw->flash_vendor].cmd_rdid,
+ hw->hw_addr + REG_SPI_FLASH_OP_RDID);
+ iowrite8(flash_table[hw->flash_vendor].cmd_wren,
+ hw->hw_addr + REG_SPI_FLASH_OP_WREN);
+ iowrite8(flash_table[hw->flash_vendor].cmd_rdsr,
+ hw->hw_addr + REG_SPI_FLASH_OP_RDSR);
+ iowrite8(flash_table[hw->flash_vendor].cmd_wrsr,
+ hw->hw_addr + REG_SPI_FLASH_OP_WRSR);
+ iowrite8(flash_table[hw->flash_vendor].cmd_read,
+ hw->hw_addr + REG_SPI_FLASH_OP_READ);
+}
+
+/*
+ * Performs basic configuration of the adapter.
+ * hw - Struct containing variables accessed by shared code
+ * Assumes that the controller has previously been reset and is in a
+ * post-reset uninitialized state. Initializes multicast table,
+ * and Calls routines to setup link
+ * Leaves the transmit and receive units disabled and uninitialized.
+ */
+static s32 atl1_init_hw(struct atl1_hw *hw)
+{
+ u32 ret_val = 0;
+
+ /* Zero out the Multicast HASH table */
+ iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
+ /* clear the old settings from the multicast hash table */
+ iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
+
+ atl1_init_flash_opcode(hw);
+
+ if (!hw->phy_configured) {
+ /* enable GPHY LinkChange Interrrupt */
+ ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
+ if (ret_val)
+ return ret_val;
+ /* make PHY out of power-saving state */
+ ret_val = atl1_phy_leave_power_saving(hw);
+ if (ret_val)
+ return ret_val;
+ /* Call a subroutine to configure the link */
+ ret_val = atl1_setup_link(hw);
+ }
+ return ret_val;
+}
+
+/*
+ * Detects the current speed and duplex settings of the hardware.
+ * hw - Struct containing variables accessed by shared code
+ * speed - Speed of the connection
+ * duplex - Duplex setting of the connection
+ */
+static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
+{
+ struct pci_dev *pdev = hw->back->pdev;
+ struct atl1_adapter *adapter = hw->back;
+ s32 ret_val;
+ u16 phy_data;
+
+ /* ; --- Read PHY Specific Status Register (17) */
+ ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
+ return ATLX_ERR_PHY_RES;
+
+ switch (phy_data & MII_ATLX_PSSR_SPEED) {
+ case MII_ATLX_PSSR_1000MBS:
+ *speed = SPEED_1000;
+ break;
+ case MII_ATLX_PSSR_100MBS:
+ *speed = SPEED_100;
+ break;
+ case MII_ATLX_PSSR_10MBS:
+ *speed = SPEED_10;
+ break;
+ default:
+ if (netif_msg_hw(adapter))
+ dev_dbg(&pdev->dev, "error getting speed\n");
+ return ATLX_ERR_PHY_SPEED;
+ break;
+ }
+ if (phy_data & MII_ATLX_PSSR_DPLX)
+ *duplex = FULL_DUPLEX;
+ else
+ *duplex = HALF_DUPLEX;
+
+ return 0;
+}
+
+static void atl1_set_mac_addr(struct atl1_hw *hw)
+{
+ u32 value;
+ /*
+ * 00-0B-6A-F6-00-DC
+ * 0: 6AF600DC 1: 000B
+ * low dword
+ */
+ value = (((u32) hw->mac_addr[2]) << 24) |
+ (((u32) hw->mac_addr[3]) << 16) |
+ (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5]));
+ iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
+ /* high dword */
+ value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
+ iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
+}
+
+/*
+ * atl1_sw_init - Initialize general software structures (struct atl1_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * atl1_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ */
+static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
+{
+ struct atl1_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+
+ hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+ adapter->wol = 0;
+ device_set_wakeup_enable(&adapter->pdev->dev, false);
+ adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
+ adapter->ict = 50000; /* 100ms */
+ adapter->link_speed = SPEED_0; /* hardware init */
+ adapter->link_duplex = FULL_DUPLEX;
+
+ hw->phy_configured = false;
+ hw->preamble_len = 7;
+ hw->ipgt = 0x60;
+ hw->min_ifg = 0x50;
+ hw->ipgr1 = 0x40;
+ hw->ipgr2 = 0x60;
+ hw->max_retry = 0xf;
+ hw->lcol = 0x37;
+ hw->jam_ipg = 7;
+ hw->rfd_burst = 8;
+ hw->rrd_burst = 8;
+ hw->rfd_fetch_gap = 1;
+ hw->rx_jumbo_th = adapter->rx_buffer_len / 8;
+ hw->rx_jumbo_lkah = 1;
+ hw->rrd_ret_timer = 16;
+ hw->tpd_burst = 4;
+ hw->tpd_fetch_th = 16;
+ hw->txf_burst = 0x100;
+ hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3;
+ hw->tpd_fetch_gap = 1;
+ hw->rcb_value = atl1_rcb_64;
+ hw->dma_ord = atl1_dma_ord_enh;
+ hw->dmar_block = atl1_dma_req_256;
+ hw->dmaw_block = atl1_dma_req_256;
+ hw->cmb_rrd = 4;
+ hw->cmb_tpd = 4;
+ hw->cmb_rx_timer = 1; /* about 2us */
+ hw->cmb_tx_timer = 1; /* about 2us */
+ hw->smb_timer = 100000; /* about 200ms */
+
+ spin_lock_init(&adapter->lock);
+ spin_lock_init(&adapter->mb_lock);
+
+ return 0;
+}
+
+static int mdio_read(struct net_device *netdev, int phy_id, int reg_num)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ u16 result;
+
+ atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result);
+
+ return result;
+}
+
+static void mdio_write(struct net_device *netdev, int phy_id, int reg_num,
+ int val)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+
+ atl1_write_phy_reg(&adapter->hw, reg_num, val);
+}
+
+/*
+ * atl1_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ */
+static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
+ int retval;
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+
+ return retval;
+}
+
+/*
+ * atl1_setup_mem_resources - allocate Tx / RX descriptor resources
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ */
+static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
+{
+ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+ struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
+ struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
+ struct atl1_ring_header *ring_header = &adapter->ring_header;
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+ u8 offset = 0;
+
+ size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count);
+ tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
+ if (unlikely(!tpd_ring->buffer_info)) {
+ if (netif_msg_drv(adapter))
+ dev_err(&pdev->dev, "kzalloc failed , size = D%d\n",
+ size);
+ goto err_nomem;
+ }
+ rfd_ring->buffer_info =
+ (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
+
+ /*
+ * real ring DMA buffer
+ * each ring/block may need up to 8 bytes for alignment, hence the
+ * additional 40 bytes tacked onto the end.
+ */
+ ring_header->size = size =
+ sizeof(struct tx_packet_desc) * tpd_ring->count
+ + sizeof(struct rx_free_desc) * rfd_ring->count
+ + sizeof(struct rx_return_desc) * rrd_ring->count
+ + sizeof(struct coals_msg_block)
+ + sizeof(struct stats_msg_block)
+ + 40;
+
+ ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
+ &ring_header->dma);
+ if (unlikely(!ring_header->desc)) {
+ if (netif_msg_drv(adapter))
+ dev_err(&pdev->dev, "pci_alloc_consistent failed\n");
+ goto err_nomem;
+ }
+
+ memset(ring_header->desc, 0, ring_header->size);
+
+ /* init TPD ring */
+ tpd_ring->dma = ring_header->dma;
+ offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0;
+ tpd_ring->dma += offset;
+ tpd_ring->desc = (u8 *) ring_header->desc + offset;
+ tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count;
+
+ /* init RFD ring */
+ rfd_ring->dma = tpd_ring->dma + tpd_ring->size;
+ offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0;
+ rfd_ring->dma += offset;
+ rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset);
+ rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count;
+
+
+ /* init RRD ring */
+ rrd_ring->dma = rfd_ring->dma + rfd_ring->size;
+ offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0;
+ rrd_ring->dma += offset;
+ rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset);
+ rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count;
+
+
+ /* init CMB */
+ adapter->cmb.dma = rrd_ring->dma + rrd_ring->size;
+ offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0;
+ adapter->cmb.dma += offset;
+ adapter->cmb.cmb = (struct coals_msg_block *)
+ ((u8 *) rrd_ring->desc + (rrd_ring->size + offset));
+
+ /* init SMB */
+ adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block);
+ offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0;
+ adapter->smb.dma += offset;
+ adapter->smb.smb = (struct stats_msg_block *)
+ ((u8 *) adapter->cmb.cmb +
+ (sizeof(struct coals_msg_block) + offset));
+
+ return 0;
+
+err_nomem:
+ kfree(tpd_ring->buffer_info);
+ return -ENOMEM;
+}
+
+static void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
+{
+ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+ struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
+ struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
+
+ atomic_set(&tpd_ring->next_to_use, 0);
+ atomic_set(&tpd_ring->next_to_clean, 0);
+
+ rfd_ring->next_to_clean = 0;
+ atomic_set(&rfd_ring->next_to_use, 0);
+
+ rrd_ring->next_to_use = 0;
+ atomic_set(&rrd_ring->next_to_clean, 0);
+}
+
+/*
+ * atl1_clean_rx_ring - Free RFD Buffers
+ * @adapter: board private structure
+ */
+static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
+{
+ struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
+ struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
+ struct atl1_buffer *buffer_info;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rfd_ring->count; i++) {
+ buffer_info = &rfd_ring->buffer_info[i];
+ if (buffer_info->dma) {
+ pci_unmap_page(pdev, buffer_info->dma,
+ buffer_info->length, PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
+ }
+ if (buffer_info->skb) {
+ dev_kfree_skb(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+ }
+
+ size = sizeof(struct atl1_buffer) * rfd_ring->count;
+ memset(rfd_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rfd_ring->desc, 0, rfd_ring->size);
+
+ rfd_ring->next_to_clean = 0;
+ atomic_set(&rfd_ring->next_to_use, 0);
+
+ rrd_ring->next_to_use = 0;
+ atomic_set(&rrd_ring->next_to_clean, 0);
+}
+
+/*
+ * atl1_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ */
+static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
+{
+ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+ struct atl1_buffer *buffer_info;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tpd_ring->count; i++) {
+ buffer_info = &tpd_ring->buffer_info[i];
+ if (buffer_info->dma) {
+ pci_unmap_page(pdev, buffer_info->dma,
+ buffer_info->length, PCI_DMA_TODEVICE);
+ buffer_info->dma = 0;
+ }
+ }
+
+ for (i = 0; i < tpd_ring->count; i++) {
+ buffer_info = &tpd_ring->buffer_info[i];
+ if (buffer_info->skb) {
+ dev_kfree_skb_any(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+ }
+
+ size = sizeof(struct atl1_buffer) * tpd_ring->count;
+ memset(tpd_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(tpd_ring->desc, 0, tpd_ring->size);
+
+ atomic_set(&tpd_ring->next_to_use, 0);
+ atomic_set(&tpd_ring->next_to_clean, 0);
+}
+
+/*
+ * atl1_free_ring_resources - Free Tx / RX descriptor Resources
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ */
+static void atl1_free_ring_resources(struct atl1_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+ struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
+ struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
+ struct atl1_ring_header *ring_header = &adapter->ring_header;
+
+ atl1_clean_tx_ring(adapter);
+ atl1_clean_rx_ring(adapter);
+
+ kfree(tpd_ring->buffer_info);
+ pci_free_consistent(pdev, ring_header->size, ring_header->desc,
+ ring_header->dma);
+
+ tpd_ring->buffer_info = NULL;
+ tpd_ring->desc = NULL;
+ tpd_ring->dma = 0;
+
+ rfd_ring->buffer_info = NULL;
+ rfd_ring->desc = NULL;
+ rfd_ring->dma = 0;
+
+ rrd_ring->desc = NULL;
+ rrd_ring->dma = 0;
+
+ adapter->cmb.dma = 0;
+ adapter->cmb.cmb = NULL;
+
+ adapter->smb.dma = 0;
+ adapter->smb.smb = NULL;
+}
+
+static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
+{
+ u32 value;
+ struct atl1_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ /* Config MAC CTRL Register */
+ value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
+ /* duplex */
+ if (FULL_DUPLEX == adapter->link_duplex)
+ value |= MAC_CTRL_DUPLX;
+ /* speed */
+ value |= ((u32) ((SPEED_1000 == adapter->link_speed) ?
+ MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
+ MAC_CTRL_SPEED_SHIFT);
+ /* flow control */
+ value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
+ /* PAD & CRC */
+ value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
+ /* preamble length */
+ value |= (((u32) adapter->hw.preamble_len
+ & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
+ /* vlan */
+ __atlx_vlan_mode(netdev->features, &value);
+ /* rx checksum
+ if (adapter->rx_csum)
+ value |= MAC_CTRL_RX_CHKSUM_EN;
+ */
+ /* filter mode */
+ value |= MAC_CTRL_BC_EN;
+ if (netdev->flags & IFF_PROMISC)
+ value |= MAC_CTRL_PROMIS_EN;
+ else if (netdev->flags & IFF_ALLMULTI)
+ value |= MAC_CTRL_MC_ALL_EN;
+ /* value |= MAC_CTRL_LOOPBACK; */
+ iowrite32(value, hw->hw_addr + REG_MAC_CTRL);
+}
+
+static u32 atl1_check_link(struct atl1_adapter *adapter)
+{
+ struct atl1_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 ret_val;
+ u16 speed, duplex, phy_data;
+ int reconfig = 0;
+
+ /* MII_BMSR must read twice */
+ atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
+ atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
+ if (!(phy_data & BMSR_LSTATUS)) {
+ /* link down */
+ if (netif_carrier_ok(netdev)) {
+ /* old link state: Up */
+ if (netif_msg_link(adapter))
+ dev_info(&adapter->pdev->dev, "link is down\n");
+ adapter->link_speed = SPEED_0;
+ netif_carrier_off(netdev);
+ }
+ return 0;
+ }
+
+ /* Link Up */
+ ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val)
+ return ret_val;
+
+ switch (hw->media_type) {
+ case MEDIA_TYPE_1000M_FULL:
+ if (speed != SPEED_1000 || duplex != FULL_DUPLEX)
+ reconfig = 1;
+ break;
+ case MEDIA_TYPE_100M_FULL:
+ if (speed != SPEED_100 || duplex != FULL_DUPLEX)
+ reconfig = 1;
+ break;
+ case MEDIA_TYPE_100M_HALF:
+ if (speed != SPEED_100 || duplex != HALF_DUPLEX)
+ reconfig = 1;
+ break;
+ case MEDIA_TYPE_10M_FULL:
+ if (speed != SPEED_10 || duplex != FULL_DUPLEX)
+ reconfig = 1;
+ break;
+ case MEDIA_TYPE_10M_HALF:
+ if (speed != SPEED_10 || duplex != HALF_DUPLEX)
+ reconfig = 1;
+ break;
+ }
+
+ /* link result is our setting */
+ if (!reconfig) {
+ if (adapter->link_speed != speed ||
+ adapter->link_duplex != duplex) {
+ adapter->link_speed = speed;
+ adapter->link_duplex = duplex;
+ atl1_setup_mac_ctrl(adapter);
+ if (netif_msg_link(adapter))
+ dev_info(&adapter->pdev->dev,
+ "%s link is up %d Mbps %s\n",
+ netdev->name, adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "full duplex" : "half duplex");
+ }
+ if (!netif_carrier_ok(netdev)) {
+ /* Link down -> Up */
+ netif_carrier_on(netdev);
+ }
+ return 0;
+ }
+
+ /* change original link status */
+ if (netif_carrier_ok(netdev)) {
+ adapter->link_speed = SPEED_0;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+
+ if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR &&
+ hw->media_type != MEDIA_TYPE_1000M_FULL) {
+ switch (hw->media_type) {
+ case MEDIA_TYPE_100M_FULL:
+ phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
+ MII_CR_RESET;
+ break;
+ case MEDIA_TYPE_100M_HALF:
+ phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
+ break;
+ case MEDIA_TYPE_10M_FULL:
+ phy_data =
+ MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
+ break;
+ default:
+ /* MEDIA_TYPE_10M_HALF: */
+ phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
+ break;
+ }
+ atl1_write_phy_reg(hw, MII_BMCR, phy_data);
+ return 0;
+ }
+
+ /* auto-neg, insert timer to re-config phy */
+ if (!adapter->phy_timer_pending) {
+ adapter->phy_timer_pending = true;
+ mod_timer(&adapter->phy_config_timer,
+ round_jiffies(jiffies + 3 * HZ));
+ }
+
+ return 0;
+}
+
+static void set_flow_ctrl_old(struct atl1_adapter *adapter)
+{
+ u32 hi, lo, value;
+
+ /* RFD Flow Control */
+ value = adapter->rfd_ring.count;
+ hi = value / 16;
+ if (hi < 2)
+ hi = 2;
+ lo = value * 7 / 8;
+
+ value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
+ ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
+ iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
+
+ /* RRD Flow Control */
+ value = adapter->rrd_ring.count;
+ lo = value / 16;
+ hi = value * 7 / 8;
+ if (lo < 2)
+ lo = 2;
+ value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
+ ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
+ iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
+}
+
+static void set_flow_ctrl_new(struct atl1_hw *hw)
+{
+ u32 hi, lo, value;
+
+ /* RXF Flow Control */
+ value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN);
+ lo = value / 16;
+ if (lo < 192)
+ lo = 192;
+ hi = value * 7 / 8;
+ if (hi < lo)
+ hi = lo + 16;
+ value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
+ ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
+ iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
+
+ /* RRD Flow Control */
+ value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN);
+ lo = value / 8;
+ hi = value * 7 / 8;
+ if (lo < 2)
+ lo = 2;
+ if (hi < lo)
+ hi = lo + 3;
+ value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
+ ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
+ iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
+}
+
+/*
+ * atl1_configure - Configure Transmit&Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx /Rx unit of the MAC after a reset.
+ */
+static u32 atl1_configure(struct atl1_adapter *adapter)
+{
+ struct atl1_hw *hw = &adapter->hw;
+ u32 value;
+
+ /* clear interrupt status */
+ iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR);
+
+ /* set MAC Address */
+ value = (((u32) hw->mac_addr[2]) << 24) |
+ (((u32) hw->mac_addr[3]) << 16) |
+ (((u32) hw->mac_addr[4]) << 8) |
+ (((u32) hw->mac_addr[5]));
+ iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
+ value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
+ iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4));
+
+ /* tx / rx ring */
+
+ /* HI base address */
+ iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32),
+ hw->hw_addr + REG_DESC_BASE_ADDR_HI);
+ /* LO base address */
+ iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL),
+ hw->hw_addr + REG_DESC_RFD_ADDR_LO);
+ iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL),
+ hw->hw_addr + REG_DESC_RRD_ADDR_LO);
+ iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL),
+ hw->hw_addr + REG_DESC_TPD_ADDR_LO);
+ iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL),
+ hw->hw_addr + REG_DESC_CMB_ADDR_LO);
+ iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL),
+ hw->hw_addr + REG_DESC_SMB_ADDR_LO);
+
+ /* element count */
+ value = adapter->rrd_ring.count;
+ value <<= 16;
+ value += adapter->rfd_ring.count;
+ iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE);
+ iowrite32(adapter->tpd_ring.count, hw->hw_addr +
+ REG_DESC_TPD_RING_SIZE);
+
+ /* Load Ptr */
+ iowrite32(1, hw->hw_addr + REG_LOAD_PTR);
+
+ /* config Mailbox */
+ value = ((atomic_read(&adapter->tpd_ring.next_to_use)
+ & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) |
+ ((atomic_read(&adapter->rrd_ring.next_to_clean)
+ & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) |
+ ((atomic_read(&adapter->rfd_ring.next_to_use)
+ & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT);
+ iowrite32(value, hw->hw_addr + REG_MAILBOX);
+
+ /* config IPG/IFG */
+ value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK)
+ << MAC_IPG_IFG_IPGT_SHIFT) |
+ (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK)
+ << MAC_IPG_IFG_MIFG_SHIFT) |
+ (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK)
+ << MAC_IPG_IFG_IPGR1_SHIFT) |
+ (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK)
+ << MAC_IPG_IFG_IPGR2_SHIFT);
+ iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG);
+
+ /* config Half-Duplex Control */
+ value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
+ (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK)
+ << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
+ MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
+ (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
+ (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK)
+ << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
+ iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL);
+
+ /* set Interrupt Moderator Timer */
+ iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT);
+ iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL);
+
+ /* set Interrupt Clear Timer */
+ iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER);
+
+ /* set max frame size hw will accept */
+ iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU);
+
+ /* jumbo size & rrd retirement timer */
+ value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK)
+ << RXQ_JMBOSZ_TH_SHIFT) |
+ (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK)
+ << RXQ_JMBO_LKAH_SHIFT) |
+ (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK)
+ << RXQ_RRD_TIMER_SHIFT);
+ iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM);
+
+ /* Flow Control */
+ switch (hw->dev_rev) {
+ case 0x8001:
+ case 0x9001:
+ case 0x9002:
+ case 0x9003:
+ set_flow_ctrl_old(adapter);
+ break;
+ default:
+ set_flow_ctrl_new(hw);
+ break;
+ }
+
+ /* config TXQ */
+ value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK)
+ << TXQ_CTRL_TPD_BURST_NUM_SHIFT) |
+ (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK)
+ << TXQ_CTRL_TXF_BURST_NUM_SHIFT) |
+ (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK)
+ << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE |
+ TXQ_CTRL_EN;
+ iowrite32(value, hw->hw_addr + REG_TXQ_CTRL);
+
+ /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */
+ value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK)
+ << TX_JUMBO_TASK_TH_SHIFT) |
+ (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK)
+ << TX_TPD_MIN_IPG_SHIFT);
+ iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG);
+
+ /* config RXQ */
+ value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK)
+ << RXQ_CTRL_RFD_BURST_NUM_SHIFT) |
+ (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK)
+ << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) |
+ (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK)
+ << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN |
+ RXQ_CTRL_EN;
+ iowrite32(value, hw->hw_addr + REG_RXQ_CTRL);
+
+ /* config DMA Engine */
+ value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
+ << DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
+ ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
+ << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN |
+ DMA_CTRL_DMAW_EN;
+ value |= (u32) hw->dma_ord;
+ if (atl1_rcb_128 == hw->rcb_value)
+ value |= DMA_CTRL_RCB_VALUE;
+ iowrite32(value, hw->hw_addr + REG_DMA_CTRL);
+
+ /* config CMB / SMB */
+ value = (hw->cmb_tpd > adapter->tpd_ring.count) ?
+ hw->cmb_tpd : adapter->tpd_ring.count;
+ value <<= 16;
+ value |= hw->cmb_rrd;
+ iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH);
+ value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16);
+ iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER);
+ iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER);
+
+ /* --- enable CMB / SMB */
+ value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN;
+ iowrite32(value, hw->hw_addr + REG_CSMB_CTRL);
+
+ value = ioread32(adapter->hw.hw_addr + REG_ISR);
+ if (unlikely((value & ISR_PHY_LINKDOWN) != 0))
+ value = 1; /* config failed */
+ else
+ value = 0;
+
+ /* clear all interrupt status */
+ iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR);
+ iowrite32(0, adapter->hw.hw_addr + REG_ISR);
+ return value;
+}
+
+/*
+ * atl1_pcie_patch - Patch for PCIE module
+ */
+static void atl1_pcie_patch(struct atl1_adapter *adapter)
+{
+ u32 value;
+
+ /* much vendor magic here */
+ value = 0x6500;
+ iowrite32(value, adapter->hw.hw_addr + 0x12FC);
+ /* pcie flow control mode change */
+ value = ioread32(adapter->hw.hw_addr + 0x1008);
+ value |= 0x8000;
+ iowrite32(value, adapter->hw.hw_addr + 0x1008);
+}
+
+/*
+ * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400
+ * on PCI Command register is disable.
+ * The function enable this bit.
+ * Brackett, 2006/03/15
+ */
+static void atl1_via_workaround(struct atl1_adapter *adapter)
+{
+ unsigned long value;
+
+ value = ioread16(adapter->hw.hw_addr + PCI_COMMAND);
+ if (value & PCI_COMMAND_INTX_DISABLE)
+ value &= ~PCI_COMMAND_INTX_DISABLE;
+ iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
+}
+
+static void atl1_inc_smb(struct atl1_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct stats_msg_block *smb = adapter->smb.smb;
+
+ /* Fill out the OS statistics structure */
+ adapter->soft_stats.rx_packets += smb->rx_ok;
+ adapter->soft_stats.tx_packets += smb->tx_ok;
+ adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
+ adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
+ adapter->soft_stats.multicast += smb->rx_mcast;
+ adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 +
+ smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry);
+
+ /* Rx Errors */
+ adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err +
+ smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov +
+ smb->rx_rrd_ov + smb->rx_align_err);
+ adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
+ adapter->soft_stats.rx_length_errors += smb->rx_len_err;
+ adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
+ adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
+ adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov +
+ smb->rx_rxf_ov);
+
+ adapter->soft_stats.rx_pause += smb->rx_pause;
+ adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
+ adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
+
+ /* Tx Errors */
+ adapter->soft_stats.tx_errors += (smb->tx_late_col +
+ smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc);
+ adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
+ adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
+ adapter->soft_stats.tx_window_errors += smb->tx_late_col;
+
+ adapter->soft_stats.excecol += smb->tx_abort_col;
+ adapter->soft_stats.deffer += smb->tx_defer;
+ adapter->soft_stats.scc += smb->tx_1_col;
+ adapter->soft_stats.mcc += smb->tx_2_col;
+ adapter->soft_stats.latecol += smb->tx_late_col;
+ adapter->soft_stats.tx_underun += smb->tx_underrun;
+ adapter->soft_stats.tx_trunc += smb->tx_trunc;
+ adapter->soft_stats.tx_pause += smb->tx_pause;
+
+ netdev->stats.rx_packets = adapter->soft_stats.rx_packets;
+ netdev->stats.tx_packets = adapter->soft_stats.tx_packets;
+ netdev->stats.rx_bytes = adapter->soft_stats.rx_bytes;
+ netdev->stats.tx_bytes = adapter->soft_stats.tx_bytes;
+ netdev->stats.multicast = adapter->soft_stats.multicast;
+ netdev->stats.collisions = adapter->soft_stats.collisions;
+ netdev->stats.rx_errors = adapter->soft_stats.rx_errors;
+ netdev->stats.rx_over_errors =
+ adapter->soft_stats.rx_missed_errors;
+ netdev->stats.rx_length_errors =
+ adapter->soft_stats.rx_length_errors;
+ netdev->stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
+ netdev->stats.rx_frame_errors =
+ adapter->soft_stats.rx_frame_errors;
+ netdev->stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
+ netdev->stats.rx_missed_errors =
+ adapter->soft_stats.rx_missed_errors;
+ netdev->stats.tx_errors = adapter->soft_stats.tx_errors;
+ netdev->stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
+ netdev->stats.tx_aborted_errors =
+ adapter->soft_stats.tx_aborted_errors;
+ netdev->stats.tx_window_errors =
+ adapter->soft_stats.tx_window_errors;
+ netdev->stats.tx_carrier_errors =
+ adapter->soft_stats.tx_carrier_errors;
+}
+
+static void atl1_update_mailbox(struct atl1_adapter *adapter)
+{
+ unsigned long flags;
+ u32 tpd_next_to_use;
+ u32 rfd_next_to_use;
+ u32 rrd_next_to_clean;
+ u32 value;
+
+ spin_lock_irqsave(&adapter->mb_lock, flags);
+
+ tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
+ rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use);
+ rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean);
+
+ value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
+ MB_RFD_PROD_INDX_SHIFT) |
+ ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
+ MB_RRD_CONS_INDX_SHIFT) |
+ ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
+ MB_TPD_PROD_INDX_SHIFT);
+ iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
+
+ spin_unlock_irqrestore(&adapter->mb_lock, flags);
+}
+
+static void atl1_clean_alloc_flag(struct atl1_adapter *adapter,
+ struct rx_return_desc *rrd, u16 offset)
+{
+ struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
+
+ while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) {
+ rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0;
+ if (++rfd_ring->next_to_clean == rfd_ring->count) {
+ rfd_ring->next_to_clean = 0;
+ }
+ }
+}
+
+static void atl1_update_rfd_index(struct atl1_adapter *adapter,
+ struct rx_return_desc *rrd)
+{
+ u16 num_buf;
+
+ num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) /
+ adapter->rx_buffer_len;
+ if (rrd->num_buf == num_buf)
+ /* clean alloc flag for bad rrd */
+ atl1_clean_alloc_flag(adapter, rrd, num_buf);
+}
+
+static void atl1_rx_checksum(struct atl1_adapter *adapter,
+ struct rx_return_desc *rrd, struct sk_buff *skb)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ /*
+ * The L1 hardware contains a bug that erroneously sets the
+ * PACKET_FLAG_ERR and ERR_FLAG_L4_CHKSUM bits whenever a
+ * fragmented IP packet is received, even though the packet
+ * is perfectly valid and its checksum is correct. There's
+ * no way to distinguish between one of these good packets
+ * and a packet that actually contains a TCP/UDP checksum
+ * error, so all we can do is allow it to be handed up to
+ * the higher layers and let it be sorted out there.
+ */
+
+ skb_checksum_none_assert(skb);
+
+ if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
+ if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
+ ERR_FLAG_CODE | ERR_FLAG_OV)) {
+ adapter->hw_csum_err++;
+ if (netif_msg_rx_err(adapter))
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "rx checksum error\n");
+ return;
+ }
+ }
+
+ /* not IPv4 */
+ if (!(rrd->pkt_flg & PACKET_FLAG_IPV4))
+ /* checksum is invalid, but it's not an IPv4 pkt, so ok */
+ return;
+
+ /* IPv4 packet */
+ if (likely(!(rrd->err_flg &
+ (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ adapter->hw_csum_good++;
+ return;
+ }
+}
+
+/*
+ * atl1_alloc_rx_buffers - Replace used receive buffers
+ * @adapter: address of board private structure
+ */
+static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
+{
+ struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ struct page *page;
+ unsigned long offset;
+ struct atl1_buffer *buffer_info, *next_info;
+ struct sk_buff *skb;
+ u16 num_alloc = 0;
+ u16 rfd_next_to_use, next_next;
+ struct rx_free_desc *rfd_desc;
+
+ next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use);
+ if (++next_next == rfd_ring->count)
+ next_next = 0;
+ buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
+ next_info = &rfd_ring->buffer_info[next_next];
+
+ while (!buffer_info->alloced && !next_info->alloced) {
+ if (buffer_info->skb) {
+ buffer_info->alloced = 1;
+ goto next;
+ }
+
+ rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
+
+ skb = netdev_alloc_skb_ip_align(adapter->netdev,
+ adapter->rx_buffer_len);
+ if (unlikely(!skb)) {
+ /* Better luck next round */
+ adapter->netdev->stats.rx_dropped++;
+ break;
+ }
+
+ buffer_info->alloced = 1;
+ buffer_info->skb = skb;
+ buffer_info->length = (u16) adapter->rx_buffer_len;
+ page = virt_to_page(skb->data);
+ offset = (unsigned long)skb->data & ~PAGE_MASK;
+ buffer_info->dma = pci_map_page(pdev, page, offset,
+ adapter->rx_buffer_len,
+ PCI_DMA_FROMDEVICE);
+ rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+ rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
+ rfd_desc->coalese = 0;
+
+next:
+ rfd_next_to_use = next_next;
+ if (unlikely(++next_next == rfd_ring->count))
+ next_next = 0;
+
+ buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
+ next_info = &rfd_ring->buffer_info[next_next];
+ num_alloc++;
+ }
+
+ if (num_alloc) {
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use);
+ }
+ return num_alloc;
+}
+
+static void atl1_intr_rx(struct atl1_adapter *adapter)
+{
+ int i, count;
+ u16 length;
+ u16 rrd_next_to_clean;
+ u32 value;
+ struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
+ struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
+ struct atl1_buffer *buffer_info;
+ struct rx_return_desc *rrd;
+ struct sk_buff *skb;
+
+ count = 0;
+
+ rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean);
+
+ while (1) {
+ rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean);
+ i = 1;
+ if (likely(rrd->xsz.valid)) { /* packet valid */
+chk_rrd:
+ /* check rrd status */
+ if (likely(rrd->num_buf == 1))
+ goto rrd_ok;
+ else if (netif_msg_rx_err(adapter)) {
+ dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+ "unexpected RRD buffer count\n");
+ dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+ "rx_buf_len = %d\n",
+ adapter->rx_buffer_len);
+ dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+ "RRD num_buf = %d\n",
+ rrd->num_buf);
+ dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+ "RRD pkt_len = %d\n",
+ rrd->xsz.xsum_sz.pkt_size);
+ dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+ "RRD pkt_flg = 0x%08X\n",
+ rrd->pkt_flg);
+ dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+ "RRD err_flg = 0x%08X\n",
+ rrd->err_flg);
+ dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+ "RRD vlan_tag = 0x%08X\n",
+ rrd->vlan_tag);
+ }
+
+ /* rrd seems to be bad */
+ if (unlikely(i-- > 0)) {
+ /* rrd may not be DMAed completely */
+ udelay(1);
+ goto chk_rrd;
+ }
+ /* bad rrd */
+ if (netif_msg_rx_err(adapter))
+ dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+ "bad RRD\n");
+ /* see if update RFD index */
+ if (rrd->num_buf > 1)
+ atl1_update_rfd_index(adapter, rrd);
+
+ /* update rrd */
+ rrd->xsz.valid = 0;
+ if (++rrd_next_to_clean == rrd_ring->count)
+ rrd_next_to_clean = 0;
+ count++;
+ continue;
+ } else { /* current rrd still not be updated */
+
+ break;
+ }
+rrd_ok:
+ /* clean alloc flag for bad rrd */
+ atl1_clean_alloc_flag(adapter, rrd, 0);
+
+ buffer_info = &rfd_ring->buffer_info[rrd->buf_indx];
+ if (++rfd_ring->next_to_clean == rfd_ring->count)
+ rfd_ring->next_to_clean = 0;
+
+ /* update rrd next to clean */
+ if (++rrd_next_to_clean == rrd_ring->count)
+ rrd_next_to_clean = 0;
+ count++;
+
+ if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
+ if (!(rrd->err_flg &
+ (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM
+ | ERR_FLAG_LEN))) {
+ /* packet error, don't need upstream */
+ buffer_info->alloced = 0;
+ rrd->xsz.valid = 0;
+ continue;
+ }
+ }
+
+ /* Good Receive */
+ pci_unmap_page(adapter->pdev, buffer_info->dma,
+ buffer_info->length, PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
+ skb = buffer_info->skb;
+ length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
+
+ skb_put(skb, length - ETH_FCS_LEN);
+
+ /* Receive Checksum Offload */
+ atl1_rx_checksum(adapter, rrd, skb);
+ skb->protocol = eth_type_trans(skb, adapter->netdev);
+
+ if (rrd->pkt_flg & PACKET_FLAG_VLAN_INS) {
+ u16 vlan_tag = (rrd->vlan_tag >> 4) |
+ ((rrd->vlan_tag & 7) << 13) |
+ ((rrd->vlan_tag & 8) << 9);
+
+ __vlan_hwaccel_put_tag(skb, vlan_tag);
+ }
+ netif_rx(skb);
+
+ /* let protocol layer free skb */
+ buffer_info->skb = NULL;
+ buffer_info->alloced = 0;
+ rrd->xsz.valid = 0;
+ }
+
+ atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean);
+
+ atl1_alloc_rx_buffers(adapter);
+
+ /* update mailbox ? */
+ if (count) {
+ u32 tpd_next_to_use;
+ u32 rfd_next_to_use;
+
+ spin_lock(&adapter->mb_lock);
+
+ tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
+ rfd_next_to_use =
+ atomic_read(&adapter->rfd_ring.next_to_use);
+ rrd_next_to_clean =
+ atomic_read(&adapter->rrd_ring.next_to_clean);
+ value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
+ MB_RFD_PROD_INDX_SHIFT) |
+ ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
+ MB_RRD_CONS_INDX_SHIFT) |
+ ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
+ MB_TPD_PROD_INDX_SHIFT);
+ iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
+ spin_unlock(&adapter->mb_lock);
+ }
+}
+
+static void atl1_intr_tx(struct atl1_adapter *adapter)
+{
+ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+ struct atl1_buffer *buffer_info;
+ u16 sw_tpd_next_to_clean;
+ u16 cmb_tpd_next_to_clean;
+
+ sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean);
+ cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
+
+ while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) {
+ buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean];
+ if (buffer_info->dma) {
+ pci_unmap_page(adapter->pdev, buffer_info->dma,
+ buffer_info->length, PCI_DMA_TODEVICE);
+ buffer_info->dma = 0;
+ }
+
+ if (buffer_info->skb) {
+ dev_kfree_skb_irq(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+
+ if (++sw_tpd_next_to_clean == tpd_ring->count)
+ sw_tpd_next_to_clean = 0;
+ }
+ atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
+
+ if (netif_queue_stopped(adapter->netdev) &&
+ netif_carrier_ok(adapter->netdev))
+ netif_wake_queue(adapter->netdev);
+}
+
+static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
+{
+ u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
+ u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
+ return (next_to_clean > next_to_use) ?
+ next_to_clean - next_to_use - 1 :
+ tpd_ring->count + next_to_clean - next_to_use - 1;
+}
+
+static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
+ struct tx_packet_desc *ptpd)
+{
+ u8 hdr_len, ip_off;
+ u32 real_len;
+ int err;
+
+ if (skb_shinfo(skb)->gso_size) {
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (unlikely(err))
+ return -1;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+
+ real_len = (((unsigned char *)iph - skb->data) +
+ ntohs(iph->tot_len));
+ if (real_len < skb->len)
+ pskb_trim(skb, real_len);
+ hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ if (skb->len == hdr_len) {
+ iph->check = 0;
+ tcp_hdr(skb)->check =
+ ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, tcp_hdrlen(skb),
+ IPPROTO_TCP, 0);
+ ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
+ TPD_IPHL_SHIFT;
+ ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
+ TPD_TCPHDRLEN_MASK) <<
+ TPD_TCPHDRLEN_SHIFT;
+ ptpd->word3 |= 1 << TPD_IP_CSUM_SHIFT;
+ ptpd->word3 |= 1 << TPD_TCP_CSUM_SHIFT;
+ return 1;
+ }
+
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0, IPPROTO_TCP, 0);
+ ip_off = (unsigned char *)iph -
+ (unsigned char *) skb_network_header(skb);
+ if (ip_off == 8) /* 802.3-SNAP frame */
+ ptpd->word3 |= 1 << TPD_ETHTYPE_SHIFT;
+ else if (ip_off != 0)
+ return -2;
+
+ ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
+ TPD_IPHL_SHIFT;
+ ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
+ TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT;
+ ptpd->word3 |= (skb_shinfo(skb)->gso_size &
+ TPD_MSS_MASK) << TPD_MSS_SHIFT;
+ ptpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT;
+ return 3;
+ }
+ }
+ return false;
+}
+
+static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
+ struct tx_packet_desc *ptpd)
+{
+ u8 css, cso;
+
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ css = skb_checksum_start_offset(skb);
+ cso = css + (u8) skb->csum_offset;
+ if (unlikely(css & 0x1)) {
+ /* L1 hardware requires an even number here */
+ if (netif_msg_tx_err(adapter))
+ dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+ "payload offset not an even number\n");
+ return -1;
+ }
+ ptpd->word3 |= (css & TPD_PLOADOFFSET_MASK) <<
+ TPD_PLOADOFFSET_SHIFT;
+ ptpd->word3 |= (cso & TPD_CCSUMOFFSET_MASK) <<
+ TPD_CCSUMOFFSET_SHIFT;
+ ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT;
+ return true;
+ }
+ return 0;
+}
+
+static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
+ struct tx_packet_desc *ptpd)
+{
+ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+ struct atl1_buffer *buffer_info;
+ u16 buf_len = skb->len;
+ struct page *page;
+ unsigned long offset;
+ unsigned int nr_frags;
+ unsigned int f;
+ int retval;
+ u16 next_to_use;
+ u16 data_len;
+ u8 hdr_len;
+
+ buf_len -= skb->data_len;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ next_to_use = atomic_read(&tpd_ring->next_to_use);
+ buffer_info = &tpd_ring->buffer_info[next_to_use];
+ BUG_ON(buffer_info->skb);
+ /* put skb in last TPD */
+ buffer_info->skb = NULL;
+
+ retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
+ if (retval) {
+ /* TSO */
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ buffer_info->length = hdr_len;
+ page = virt_to_page(skb->data);
+ offset = (unsigned long)skb->data & ~PAGE_MASK;
+ buffer_info->dma = pci_map_page(adapter->pdev, page,
+ offset, hdr_len,
+ PCI_DMA_TODEVICE);
+
+ if (++next_to_use == tpd_ring->count)
+ next_to_use = 0;
+
+ if (buf_len > hdr_len) {
+ int i, nseg;
+
+ data_len = buf_len - hdr_len;
+ nseg = (data_len + ATL1_MAX_TX_BUF_LEN - 1) /
+ ATL1_MAX_TX_BUF_LEN;
+ for (i = 0; i < nseg; i++) {
+ buffer_info =
+ &tpd_ring->buffer_info[next_to_use];
+ buffer_info->skb = NULL;
+ buffer_info->length =
+ (ATL1_MAX_TX_BUF_LEN >=
+ data_len) ? ATL1_MAX_TX_BUF_LEN : data_len;
+ data_len -= buffer_info->length;
+ page = virt_to_page(skb->data +
+ (hdr_len + i * ATL1_MAX_TX_BUF_LEN));
+ offset = (unsigned long)(skb->data +
+ (hdr_len + i * ATL1_MAX_TX_BUF_LEN)) &
+ ~PAGE_MASK;
+ buffer_info->dma = pci_map_page(adapter->pdev,
+ page, offset, buffer_info->length,
+ PCI_DMA_TODEVICE);
+ if (++next_to_use == tpd_ring->count)
+ next_to_use = 0;
+ }
+ }
+ } else {
+ /* not TSO */
+ buffer_info->length = buf_len;
+ page = virt_to_page(skb->data);
+ offset = (unsigned long)skb->data & ~PAGE_MASK;
+ buffer_info->dma = pci_map_page(adapter->pdev, page,
+ offset, buf_len, PCI_DMA_TODEVICE);
+ if (++next_to_use == tpd_ring->count)
+ next_to_use = 0;
+ }
+
+ for (f = 0; f < nr_frags; f++) {
+ struct skb_frag_struct *frag;
+ u16 i, nseg;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ buf_len = frag->size;
+
+ nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) /
+ ATL1_MAX_TX_BUF_LEN;
+ for (i = 0; i < nseg; i++) {
+ buffer_info = &tpd_ring->buffer_info[next_to_use];
+ BUG_ON(buffer_info->skb);
+
+ buffer_info->skb = NULL;
+ buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ?
+ ATL1_MAX_TX_BUF_LEN : buf_len;
+ buf_len -= buffer_info->length;
+ buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
+ frag, i * ATL1_MAX_TX_BUF_LEN,
+ buffer_info->length, DMA_TO_DEVICE);
+
+ if (++next_to_use == tpd_ring->count)
+ next_to_use = 0;
+ }
+ }
+
+ /* last tpd's buffer-info */
+ buffer_info->skb = skb;
+}
+
+static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
+ struct tx_packet_desc *ptpd)
+{
+ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+ struct atl1_buffer *buffer_info;
+ struct tx_packet_desc *tpd;
+ u16 j;
+ u32 val;
+ u16 next_to_use = (u16) atomic_read(&tpd_ring->next_to_use);
+
+ for (j = 0; j < count; j++) {
+ buffer_info = &tpd_ring->buffer_info[next_to_use];
+ tpd = ATL1_TPD_DESC(&adapter->tpd_ring, next_to_use);
+ if (tpd != ptpd)
+ memcpy(tpd, ptpd, sizeof(struct tx_packet_desc));
+ tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
+ tpd->word2 &= ~(TPD_BUFLEN_MASK << TPD_BUFLEN_SHIFT);
+ tpd->word2 |= (cpu_to_le16(buffer_info->length) &
+ TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT;
+
+ /*
+ * if this is the first packet in a TSO chain, set
+ * TPD_HDRFLAG, otherwise, clear it.
+ */
+ val = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) &
+ TPD_SEGMENT_EN_MASK;
+ if (val) {
+ if (!j)
+ tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT;
+ else
+ tpd->word3 &= ~(1 << TPD_HDRFLAG_SHIFT);
+ }
+
+ if (j == (count - 1))
+ tpd->word3 |= 1 << TPD_EOP_SHIFT;
+
+ if (++next_to_use == tpd_ring->count)
+ next_to_use = 0;
+ }
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ atomic_set(&tpd_ring->next_to_use, next_to_use);
+}
+
+static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+ int len;
+ int tso;
+ int count = 1;
+ int ret_val;
+ struct tx_packet_desc *ptpd;
+ u16 frag_size;
+ u16 vlan_tag;
+ unsigned int nr_frags = 0;
+ unsigned int mss = 0;
+ unsigned int f;
+ unsigned int proto_hdr_len;
+
+ len = skb_headlen(skb);
+
+ if (unlikely(skb->len <= 0)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ for (f = 0; f < nr_frags; f++) {
+ frag_size = skb_shinfo(skb)->frags[f].size;
+ if (frag_size)
+ count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) /
+ ATL1_MAX_TX_BUF_LEN;
+ }
+
+ mss = skb_shinfo(skb)->gso_size;
+ if (mss) {
+ if (skb->protocol == htons(ETH_P_IP)) {
+ proto_hdr_len = (skb_transport_offset(skb) +
+ tcp_hdrlen(skb));
+ if (unlikely(proto_hdr_len > len)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ /* need additional TPD ? */
+ if (proto_hdr_len != len)
+ count += (len - proto_hdr_len +
+ ATL1_MAX_TX_BUF_LEN - 1) /
+ ATL1_MAX_TX_BUF_LEN;
+ }
+ }
+
+ if (atl1_tpd_avail(&adapter->tpd_ring) < count) {
+ /* not enough descriptors */
+ netif_stop_queue(netdev);
+ if (netif_msg_tx_queued(adapter))
+ dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+ "tx busy\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ ptpd = ATL1_TPD_DESC(tpd_ring,
+ (u16) atomic_read(&tpd_ring->next_to_use));
+ memset(ptpd, 0, sizeof(struct tx_packet_desc));
+
+ if (vlan_tx_tag_present(skb)) {
+ vlan_tag = vlan_tx_tag_get(skb);
+ vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
+ ((vlan_tag >> 9) & 0x8);
+ ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
+ ptpd->word2 |= (vlan_tag & TPD_VLANTAG_MASK) <<
+ TPD_VLANTAG_SHIFT;
+ }
+
+ tso = atl1_tso(adapter, skb, ptpd);
+ if (tso < 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (!tso) {
+ ret_val = atl1_tx_csum(adapter, skb, ptpd);
+ if (ret_val < 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ atl1_tx_map(adapter, skb, ptpd);
+ atl1_tx_queue(adapter, count, ptpd);
+ atl1_update_mailbox(adapter);
+ mmiowb();
+ return NETDEV_TX_OK;
+}
+
+/*
+ * atl1_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ * @pt_regs: CPU registers structure
+ */
+static irqreturn_t atl1_intr(int irq, void *data)
+{
+ struct atl1_adapter *adapter = netdev_priv(data);
+ u32 status;
+ int max_ints = 10;
+
+ status = adapter->cmb.cmb->int_stats;
+ if (!status)
+ return IRQ_NONE;
+
+ do {
+ /* clear CMB interrupt status at once */
+ adapter->cmb.cmb->int_stats = 0;
+
+ if (status & ISR_GPHY) /* clear phy status */
+ atlx_clear_phy_int(adapter);
+
+ /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
+ iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
+
+ /* check if SMB intr */
+ if (status & ISR_SMB)
+ atl1_inc_smb(adapter);
+
+ /* check if PCIE PHY Link down */
+ if (status & ISR_PHY_LINKDOWN) {
+ if (netif_msg_intr(adapter))
+ dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+ "pcie phy link down %x\n", status);
+ if (netif_running(adapter->netdev)) { /* reset MAC */
+ iowrite32(0, adapter->hw.hw_addr + REG_IMR);
+ schedule_work(&adapter->pcie_dma_to_rst_task);
+ return IRQ_HANDLED;
+ }
+ }
+
+ /* check if DMA read/write error ? */
+ if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
+ if (netif_msg_intr(adapter))
+ dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+ "pcie DMA r/w error (status = 0x%x)\n",
+ status);
+ iowrite32(0, adapter->hw.hw_addr + REG_IMR);
+ schedule_work(&adapter->pcie_dma_to_rst_task);
+ return IRQ_HANDLED;
+ }
+
+ /* link event */
+ if (status & ISR_GPHY) {
+ adapter->soft_stats.tx_carrier_errors++;
+ atl1_check_for_link(adapter);
+ }
+
+ /* transmit event */
+ if (status & ISR_CMB_TX)
+ atl1_intr_tx(adapter);
+
+ /* rx exception */
+ if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
+ ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
+ ISR_HOST_RRD_OV | ISR_CMB_RX))) {
+ if (status & (ISR_RXF_OV | ISR_RFD_UNRUN |
+ ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
+ ISR_HOST_RRD_OV))
+ if (netif_msg_intr(adapter))
+ dev_printk(KERN_DEBUG,
+ &adapter->pdev->dev,
+ "rx exception, ISR = 0x%x\n",
+ status);
+ atl1_intr_rx(adapter);
+ }
+
+ if (--max_ints < 0)
+ break;
+
+ } while ((status = adapter->cmb.cmb->int_stats));
+
+ /* re-enable Interrupt */
+ iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
+ return IRQ_HANDLED;
+}
+
+
+/*
+ * atl1_phy_config - Timer Call-back
+ * @data: pointer to netdev cast into an unsigned long
+ */
+static void atl1_phy_config(unsigned long data)
+{
+ struct atl1_adapter *adapter = (struct atl1_adapter *)data;
+ struct atl1_hw *hw = &adapter->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ adapter->phy_timer_pending = false;
+ atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
+ atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg);
+ atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
+/*
+ * Orphaned vendor comment left intact here:
+ * <vendor comment>
+ * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
+ * will assert. We do soft reset <0x1400=1> according
+ * with the SPEC. BUT, it seemes that PCIE or DMA
+ * state-machine will not be reset. DMAR_TO_INT will
+ * assert again and again.
+ * </vendor comment>
+ */
+
+static int atl1_reset(struct atl1_adapter *adapter)
+{
+ int ret;
+ ret = atl1_reset_hw(&adapter->hw);
+ if (ret)
+ return ret;
+ return atl1_init_hw(&adapter->hw);
+}
+
+static s32 atl1_up(struct atl1_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+ int irq_flags = 0;
+
+ /* hardware has been reset, we need to reload some things */
+ atlx_set_multi(netdev);
+ atl1_init_ring_ptrs(adapter);
+ atlx_restore_vlan(adapter);
+ err = atl1_alloc_rx_buffers(adapter);
+ if (unlikely(!err))
+ /* no RX BUFFER allocated */
+ return -ENOMEM;
+
+ if (unlikely(atl1_configure(adapter))) {
+ err = -EIO;
+ goto err_up;
+ }
+
+ err = pci_enable_msi(adapter->pdev);
+ if (err) {
+ if (netif_msg_ifup(adapter))
+ dev_info(&adapter->pdev->dev,
+ "Unable to enable MSI: %d\n", err);
+ irq_flags |= IRQF_SHARED;
+ }
+
+ err = request_irq(adapter->pdev->irq, atl1_intr, irq_flags,
+ netdev->name, netdev);
+ if (unlikely(err))
+ goto err_up;
+
+ atlx_irq_enable(adapter);
+ atl1_check_link(adapter);
+ netif_start_queue(netdev);
+ return 0;
+
+err_up:
+ pci_disable_msi(adapter->pdev);
+ /* free rx_buffers */
+ atl1_clean_rx_ring(adapter);
+ return err;
+}
+
+static void atl1_down(struct atl1_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ netif_stop_queue(netdev);
+ del_timer_sync(&adapter->phy_config_timer);
+ adapter->phy_timer_pending = false;
+
+ atlx_irq_disable(adapter);
+ free_irq(adapter->pdev->irq, netdev);
+ pci_disable_msi(adapter->pdev);
+ atl1_reset_hw(&adapter->hw);
+ adapter->cmb.cmb->int_stats = 0;
+
+ adapter->link_speed = SPEED_0;
+ adapter->link_duplex = -1;
+ netif_carrier_off(netdev);
+
+ atl1_clean_tx_ring(adapter);
+ atl1_clean_rx_ring(adapter);
+}
+
+static void atl1_tx_timeout_task(struct work_struct *work)
+{
+ struct atl1_adapter *adapter =
+ container_of(work, struct atl1_adapter, tx_timeout_task);
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+ atl1_down(adapter);
+ atl1_up(adapter);
+ netif_device_attach(netdev);
+}
+
+/*
+ * atl1_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ int old_mtu = netdev->mtu;
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
+ (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+ if (netif_msg_link(adapter))
+ dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
+ return -EINVAL;
+ }
+
+ adapter->hw.max_frame_size = max_frame;
+ adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
+ adapter->rx_buffer_len = (max_frame + 7) & ~7;
+ adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
+
+ netdev->mtu = new_mtu;
+ if ((old_mtu != new_mtu) && netif_running(netdev)) {
+ atl1_down(adapter);
+ atl1_up(adapter);
+ }
+
+ return 0;
+}
+
+/*
+ * atl1_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ */
+static int atl1_open(struct net_device *netdev)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ netif_carrier_off(netdev);
+
+ /* allocate transmit descriptors */
+ err = atl1_setup_ring_resources(adapter);
+ if (err)
+ return err;
+
+ err = atl1_up(adapter);
+ if (err)
+ goto err_up;
+
+ return 0;
+
+err_up:
+ atl1_reset(adapter);
+ return err;
+}
+
+/*
+ * atl1_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ */
+static int atl1_close(struct net_device *netdev)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ atl1_down(adapter);
+ atl1_free_ring_resources(adapter);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int atl1_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct atl1_hw *hw = &adapter->hw;
+ u32 ctrl = 0;
+ u32 wufc = adapter->wol;
+ u32 val;
+ u16 speed;
+ u16 duplex;
+
+ netif_device_detach(netdev);
+ if (netif_running(netdev))
+ atl1_down(adapter);
+
+ atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
+ atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
+ val = ctrl & BMSR_LSTATUS;
+ if (val)
+ wufc &= ~ATLX_WUFC_LNKC;
+ if (!wufc)
+ goto disable_wol;
+
+ if (val) {
+ val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
+ if (val) {
+ if (netif_msg_ifdown(adapter))
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "error getting speed/duplex\n");
+ goto disable_wol;
+ }
+
+ ctrl = 0;
+
+ /* enable magic packet WOL */
+ if (wufc & ATLX_WUFC_MAG)
+ ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN);
+ iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
+ ioread32(hw->hw_addr + REG_WOL_CTRL);
+
+ /* configure the mac */
+ ctrl = MAC_CTRL_RX_EN;
+ ctrl |= ((u32)((speed == SPEED_1000) ? MAC_CTRL_SPEED_1000 :
+ MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT);
+ if (duplex == FULL_DUPLEX)
+ ctrl |= MAC_CTRL_DUPLX;
+ ctrl |= (((u32)adapter->hw.preamble_len &
+ MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
+ __atlx_vlan_mode(netdev->features, &ctrl);
+ if (wufc & ATLX_WUFC_MAG)
+ ctrl |= MAC_CTRL_BC_EN;
+ iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
+ ioread32(hw->hw_addr + REG_MAC_CTRL);
+
+ /* poke the PHY */
+ ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
+ ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
+ iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
+ ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
+ } else {
+ ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
+ iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
+ ioread32(hw->hw_addr + REG_WOL_CTRL);
+ iowrite32(0, hw->hw_addr + REG_MAC_CTRL);
+ ioread32(hw->hw_addr + REG_MAC_CTRL);
+ hw->phy_configured = false;
+ }
+
+ return 0;
+
+ disable_wol:
+ iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
+ ioread32(hw->hw_addr + REG_WOL_CTRL);
+ ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
+ ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
+ iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
+ ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
+ hw->phy_configured = false;
+
+ return 0;
+}
+
+static int atl1_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+
+ iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
+
+ atl1_reset_hw(&adapter->hw);
+
+ if (netif_running(netdev)) {
+ adapter->cmb.cmb->int_stats = 0;
+ atl1_up(adapter);
+ }
+ netif_device_attach(netdev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume);
+#define ATL1_PM_OPS (&atl1_pm_ops)
+
+#else
+
+static int atl1_suspend(struct device *dev) { return 0; }
+
+#define ATL1_PM_OPS NULL
+#endif
+
+static void atl1_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+
+ atl1_suspend(&pdev->dev);
+ pci_wake_from_d3(pdev, adapter->wol);
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void atl1_poll_controller(struct net_device *netdev)
+{
+ disable_irq(netdev->irq);
+ atl1_intr(netdev->irq, netdev);
+ enable_irq(netdev->irq);
+}
+#endif
+
+static const struct net_device_ops atl1_netdev_ops = {
+ .ndo_open = atl1_open,
+ .ndo_stop = atl1_close,
+ .ndo_start_xmit = atl1_xmit_frame,
+ .ndo_set_rx_mode = atlx_set_multi,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = atl1_set_mac,
+ .ndo_change_mtu = atl1_change_mtu,
+ .ndo_fix_features = atlx_fix_features,
+ .ndo_set_features = atlx_set_features,
+ .ndo_do_ioctl = atlx_ioctl,
+ .ndo_tx_timeout = atlx_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = atl1_poll_controller,
+#endif
+};
+
+/*
+ * atl1_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in atl1_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * atl1_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ */
+static int __devinit atl1_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct atl1_adapter *adapter;
+ static int cards_found = 0;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ /*
+ * The atl1 chip can DMA to 64-bit addresses, but it uses a single
+ * shared register for the high 32 bits, so only a single, aligned,
+ * 4 GB physical address range can be used at a time.
+ *
+ * Supporting 64-bit DMA on this hardware is more trouble than it's
+ * worth. It is far easier to limit to 32-bit DMA than update
+ * various kernel subsystems to support the mechanics required by a
+ * fixed-high-32-bit system.
+ */
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
+ goto err_dma;
+ }
+ /*
+ * Mark all PCI regions associated with PCI device
+ * pdev as being reserved by owner atl1_driver_name
+ */
+ err = pci_request_regions(pdev, ATLX_DRIVER_NAME);
+ if (err)
+ goto err_request_regions;
+
+ /*
+ * Enables bus-mastering on the device and calls
+ * pcibios_set_master to do the needed arch specific settings
+ */
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev(sizeof(struct atl1_adapter));
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->hw.back = adapter;
+ adapter->msg_enable = netif_msg_init(debug, atl1_default_msg);
+
+ adapter->hw.hw_addr = pci_iomap(pdev, 0, 0);
+ if (!adapter->hw.hw_addr) {
+ err = -EIO;
+ goto err_pci_iomap;
+ }
+ /* get device revision number */
+ adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr +
+ (REG_MASTER_CTRL + 2));
+ if (netif_msg_probe(adapter))
+ dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION);
+
+ /* set default ring resource counts */
+ adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD;
+ adapter->tpd_ring.count = ATL1_DEFAULT_TPD;
+
+ adapter->mii.dev = netdev;
+ adapter->mii.mdio_read = mdio_read;
+ adapter->mii.mdio_write = mdio_write;
+ adapter->mii.phy_id_mask = 0x1f;
+ adapter->mii.reg_num_mask = 0x1f;
+
+ netdev->netdev_ops = &atl1_netdev_ops;
+ netdev->watchdog_timeo = 5 * HZ;
+
+ netdev->ethtool_ops = &atl1_ethtool_ops;
+ adapter->bd_number = cards_found;
+
+ /* setup the private structure */
+ err = atl1_sw_init(adapter);
+ if (err)
+ goto err_common;
+
+ netdev->features = NETIF_F_HW_CSUM;
+ netdev->features |= NETIF_F_SG;
+ netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
+
+ netdev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_HW_VLAN_RX;
+
+ /* is this valid? see atl1_setup_mac_ctrl() */
+ netdev->features |= NETIF_F_RXCSUM;
+
+ /*
+ * patch for some L1 of old version,
+ * the final version of L1 may not need these
+ * patches
+ */
+ /* atl1_pcie_patch(adapter); */
+
+ /* really reset GPHY core */
+ iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
+
+ /*
+ * reset the controller to
+ * put the device in a known good starting state
+ */
+ if (atl1_reset_hw(&adapter->hw)) {
+ err = -EIO;
+ goto err_common;
+ }
+
+ /* copy the MAC address out of the EEPROM */
+ atl1_read_mac_addr(&adapter->hw);
+ memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ err = -EIO;
+ goto err_common;
+ }
+
+ atl1_check_options(adapter);
+
+ /* pre-init the MAC, and setup link */
+ err = atl1_init_hw(&adapter->hw);
+ if (err) {
+ err = -EIO;
+ goto err_common;
+ }
+
+ atl1_pcie_patch(adapter);
+ /* assume we have no link for now */
+ netif_carrier_off(netdev);
+
+ setup_timer(&adapter->phy_config_timer, atl1_phy_config,
+ (unsigned long)adapter);
+ adapter->phy_timer_pending = false;
+
+ INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task);
+
+ INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
+
+ INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_common;
+
+ cards_found++;
+ atl1_via_workaround(adapter);
+ return 0;
+
+err_common:
+ pci_iounmap(pdev, adapter->hw.hw_addr);
+err_pci_iomap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+err_dma:
+err_request_regions:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/*
+ * atl1_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * atl1_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ */
+static void __devexit atl1_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1_adapter *adapter;
+ /* Device not available. Return. */
+ if (!netdev)
+ return;
+
+ adapter = netdev_priv(netdev);
+
+ /*
+ * Some atl1 boards lack persistent storage for their MAC, and get it
+ * from the BIOS during POST. If we've been messing with the MAC
+ * address, we need to save the permanent one.
+ */
+ if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) {
+ memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr,
+ ETH_ALEN);
+ atl1_set_mac_addr(&adapter->hw);
+ }
+
+ iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
+ unregister_netdev(netdev);
+ pci_iounmap(pdev, adapter->hw.hw_addr);
+ pci_release_regions(pdev);
+ free_netdev(netdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver atl1_driver = {
+ .name = ATLX_DRIVER_NAME,
+ .id_table = atl1_pci_tbl,
+ .probe = atl1_probe,
+ .remove = __devexit_p(atl1_remove),
+ .shutdown = atl1_shutdown,
+ .driver.pm = ATL1_PM_OPS,
+};
+
+/*
+ * atl1_exit_module - Driver Exit Cleanup Routine
+ *
+ * atl1_exit_module is called just before the driver is removed
+ * from memory.
+ */
+static void __exit atl1_exit_module(void)
+{
+ pci_unregister_driver(&atl1_driver);
+}
+
+/*
+ * atl1_init_module - Driver Registration Routine
+ *
+ * atl1_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ */
+static int __init atl1_init_module(void)
+{
+ return pci_register_driver(&atl1_driver);
+}
+
+module_init(atl1_init_module);
+module_exit(atl1_exit_module);
+
+struct atl1_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define ATL1_STAT(m) \
+ sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m)
+
+static struct atl1_stats atl1_gstrings_stats[] = {
+ {"rx_packets", ATL1_STAT(soft_stats.rx_packets)},
+ {"tx_packets", ATL1_STAT(soft_stats.tx_packets)},
+ {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)},
+ {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)},
+ {"rx_errors", ATL1_STAT(soft_stats.rx_errors)},
+ {"tx_errors", ATL1_STAT(soft_stats.tx_errors)},
+ {"multicast", ATL1_STAT(soft_stats.multicast)},
+ {"collisions", ATL1_STAT(soft_stats.collisions)},
+ {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)},
+ {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
+ {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)},
+ {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)},
+ {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)},
+ {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
+ {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)},
+ {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)},
+ {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)},
+ {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)},
+ {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)},
+ {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)},
+ {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
+ {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
+ {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
+ {"tx_underun", ATL1_STAT(soft_stats.tx_underun)},
+ {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
+ {"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
+ {"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
+ {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)},
+ {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)}
+};
+
+static void atl1_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ int i;
+ char *p;
+
+ for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
+ p = (char *)adapter+atl1_gstrings_stats[i].stat_offset;
+ data[i] = (atl1_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+
+}
+
+static int atl1_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(atl1_gstrings_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int atl1_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct atl1_hw *hw = &adapter->hw;
+
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg | SUPPORTED_TP);
+ ecmd->advertising = ADVERTISED_TP;
+ if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
+ hw->media_type == MEDIA_TYPE_1000M_FULL) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ ecmd->advertising |=
+ (ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Full);
+ } else
+ ecmd->advertising |= (ADVERTISED_1000baseT_Full);
+ }
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = 0;
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ if (netif_carrier_ok(adapter->netdev)) {
+ u16 link_speed, link_duplex;
+ atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex);
+ ethtool_cmd_speed_set(ecmd, link_speed);
+ if (link_duplex == FULL_DUPLEX)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ } else {
+ ethtool_cmd_speed_set(ecmd, -1);
+ ecmd->duplex = -1;
+ }
+ if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
+ hw->media_type == MEDIA_TYPE_1000M_FULL)
+ ecmd->autoneg = AUTONEG_ENABLE;
+ else
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static int atl1_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct atl1_hw *hw = &adapter->hw;
+ u16 phy_data;
+ int ret_val = 0;
+ u16 old_media_type = hw->media_type;
+
+ if (netif_running(adapter->netdev)) {
+ if (netif_msg_link(adapter))
+ dev_dbg(&adapter->pdev->dev,
+ "ethtool shutting down adapter\n");
+ atl1_down(adapter);
+ }
+
+ if (ecmd->autoneg == AUTONEG_ENABLE)
+ hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
+ else {
+ u32 speed = ethtool_cmd_speed(ecmd);
+ if (speed == SPEED_1000) {
+ if (ecmd->duplex != DUPLEX_FULL) {
+ if (netif_msg_link(adapter))
+ dev_warn(&adapter->pdev->dev,
+ "1000M half is invalid\n");
+ ret_val = -EINVAL;
+ goto exit_sset;
+ }
+ hw->media_type = MEDIA_TYPE_1000M_FULL;
+ } else if (speed == SPEED_100) {
+ if (ecmd->duplex == DUPLEX_FULL)
+ hw->media_type = MEDIA_TYPE_100M_FULL;
+ else
+ hw->media_type = MEDIA_TYPE_100M_HALF;
+ } else {
+ if (ecmd->duplex == DUPLEX_FULL)
+ hw->media_type = MEDIA_TYPE_10M_FULL;
+ else
+ hw->media_type = MEDIA_TYPE_10M_HALF;
+ }
+ }
+ switch (hw->media_type) {
+ case MEDIA_TYPE_AUTO_SENSOR:
+ ecmd->advertising =
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_Autoneg | ADVERTISED_TP;
+ break;
+ case MEDIA_TYPE_1000M_FULL:
+ ecmd->advertising =
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_Autoneg | ADVERTISED_TP;
+ break;
+ default:
+ ecmd->advertising = 0;
+ break;
+ }
+ if (atl1_phy_setup_autoneg_adv(hw)) {
+ ret_val = -EINVAL;
+ if (netif_msg_link(adapter))
+ dev_warn(&adapter->pdev->dev,
+ "invalid ethtool speed/duplex setting\n");
+ goto exit_sset;
+ }
+ if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
+ hw->media_type == MEDIA_TYPE_1000M_FULL)
+ phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
+ else {
+ switch (hw->media_type) {
+ case MEDIA_TYPE_100M_FULL:
+ phy_data =
+ MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
+ MII_CR_RESET;
+ break;
+ case MEDIA_TYPE_100M_HALF:
+ phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
+ break;
+ case MEDIA_TYPE_10M_FULL:
+ phy_data =
+ MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
+ break;
+ default:
+ /* MEDIA_TYPE_10M_HALF: */
+ phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
+ break;
+ }
+ }
+ atl1_write_phy_reg(hw, MII_BMCR, phy_data);
+exit_sset:
+ if (ret_val)
+ hw->media_type = old_media_type;
+
+ if (netif_running(adapter->netdev)) {
+ if (netif_msg_link(adapter))
+ dev_dbg(&adapter->pdev->dev,
+ "ethtool starting adapter\n");
+ atl1_up(adapter);
+ } else if (!ret_val) {
+ if (netif_msg_link(adapter))
+ dev_dbg(&adapter->pdev->dev,
+ "ethtool resetting adapter\n");
+ atl1_reset(adapter);
+ }
+ return ret_val;
+}
+
+static void atl1_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ATLX_DRIVER_VERSION,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
+ drvinfo->eedump_len = ATL1_EEDUMP_LEN;
+}
+
+static void atl1_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = 0;
+ if (adapter->wol & ATLX_WUFC_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+static int atl1_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+
+ if (wol->wolopts & (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
+ WAKE_ARP | WAKE_MAGICSECURE))
+ return -EOPNOTSUPP;
+ adapter->wol = 0;
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wol |= ATLX_WUFC_MAG;
+
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
+static u32 atl1_get_msglevel(struct net_device *netdev)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void atl1_set_msglevel(struct net_device *netdev, u32 value)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = value;
+}
+
+static int atl1_get_regs_len(struct net_device *netdev)
+{
+ return ATL1_REG_COUNT * sizeof(u32);
+}
+
+static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct atl1_hw *hw = &adapter->hw;
+ unsigned int i;
+ u32 *regbuf = p;
+
+ for (i = 0; i < ATL1_REG_COUNT; i++) {
+ /*
+ * This switch statement avoids reserved regions
+ * of register space.
+ */
+ switch (i) {
+ case 6 ... 9:
+ case 14:
+ case 29 ... 31:
+ case 34 ... 63:
+ case 75 ... 127:
+ case 136 ... 1023:
+ case 1027 ... 1087:
+ case 1091 ... 1151:
+ case 1194 ... 1195:
+ case 1200 ... 1201:
+ case 1206 ... 1213:
+ case 1216 ... 1279:
+ case 1290 ... 1311:
+ case 1323 ... 1343:
+ case 1358 ... 1359:
+ case 1368 ... 1375:
+ case 1378 ... 1383:
+ case 1388 ... 1391:
+ case 1393 ... 1395:
+ case 1402 ... 1403:
+ case 1410 ... 1471:
+ case 1522 ... 1535:
+ /* reserved region; don't read it */
+ regbuf[i] = 0;
+ break;
+ default:
+ /* unreserved region */
+ regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32)));
+ }
+ }
+}
+
+static void atl1_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
+ struct atl1_rfd_ring *rxdr = &adapter->rfd_ring;
+
+ ring->rx_max_pending = ATL1_MAX_RFD;
+ ring->tx_max_pending = ATL1_MAX_TPD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rxdr->count;
+ ring->tx_pending = txdr->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int atl1_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
+ struct atl1_rrd_ring *rrdr = &adapter->rrd_ring;
+ struct atl1_rfd_ring *rfdr = &adapter->rfd_ring;
+
+ struct atl1_tpd_ring tpd_old, tpd_new;
+ struct atl1_rfd_ring rfd_old, rfd_new;
+ struct atl1_rrd_ring rrd_old, rrd_new;
+ struct atl1_ring_header rhdr_old, rhdr_new;
+ struct atl1_smb smb;
+ struct atl1_cmb cmb;
+ int err;
+
+ tpd_old = adapter->tpd_ring;
+ rfd_old = adapter->rfd_ring;
+ rrd_old = adapter->rrd_ring;
+ rhdr_old = adapter->ring_header;
+
+ if (netif_running(adapter->netdev))
+ atl1_down(adapter);
+
+ rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD);
+ rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD :
+ rfdr->count;
+ rfdr->count = (rfdr->count + 3) & ~3;
+ rrdr->count = rfdr->count;
+
+ tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD);
+ tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD :
+ tpdr->count;
+ tpdr->count = (tpdr->count + 3) & ~3;
+
+ if (netif_running(adapter->netdev)) {
+ /* try to get new resources before deleting old */
+ err = atl1_setup_ring_resources(adapter);
+ if (err)
+ goto err_setup_ring;
+
+ /*
+ * save the new, restore the old in order to free it,
+ * then restore the new back again
+ */
+
+ rfd_new = adapter->rfd_ring;
+ rrd_new = adapter->rrd_ring;
+ tpd_new = adapter->tpd_ring;
+ rhdr_new = adapter->ring_header;
+ adapter->rfd_ring = rfd_old;
+ adapter->rrd_ring = rrd_old;
+ adapter->tpd_ring = tpd_old;
+ adapter->ring_header = rhdr_old;
+ /*
+ * Save SMB and CMB, since atl1_free_ring_resources
+ * will clear them.
+ */
+ smb = adapter->smb;
+ cmb = adapter->cmb;
+ atl1_free_ring_resources(adapter);
+ adapter->rfd_ring = rfd_new;
+ adapter->rrd_ring = rrd_new;
+ adapter->tpd_ring = tpd_new;
+ adapter->ring_header = rhdr_new;
+ adapter->smb = smb;
+ adapter->cmb = cmb;
+
+ err = atl1_up(adapter);
+ if (err)
+ return err;
+ }
+ return 0;
+
+err_setup_ring:
+ adapter->rfd_ring = rfd_old;
+ adapter->rrd_ring = rrd_old;
+ adapter->tpd_ring = tpd_old;
+ adapter->ring_header = rhdr_old;
+ atl1_up(adapter);
+ return err;
+}
+
+static void atl1_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *epause)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct atl1_hw *hw = &adapter->hw;
+
+ if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
+ hw->media_type == MEDIA_TYPE_1000M_FULL) {
+ epause->autoneg = AUTONEG_ENABLE;
+ } else {
+ epause->autoneg = AUTONEG_DISABLE;
+ }
+ epause->rx_pause = 1;
+ epause->tx_pause = 1;
+}
+
+static int atl1_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *epause)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct atl1_hw *hw = &adapter->hw;
+
+ if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
+ hw->media_type == MEDIA_TYPE_1000M_FULL) {
+ epause->autoneg = AUTONEG_ENABLE;
+ } else {
+ epause->autoneg = AUTONEG_DISABLE;
+ }
+
+ epause->rx_pause = 1;
+ epause->tx_pause = 1;
+
+ return 0;
+}
+
+static void atl1_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
+ memcpy(p, atl1_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int atl1_nway_reset(struct net_device *netdev)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct atl1_hw *hw = &adapter->hw;
+
+ if (netif_running(netdev)) {
+ u16 phy_data;
+ atl1_down(adapter);
+
+ if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
+ hw->media_type == MEDIA_TYPE_1000M_FULL) {
+ phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
+ } else {
+ switch (hw->media_type) {
+ case MEDIA_TYPE_100M_FULL:
+ phy_data = MII_CR_FULL_DUPLEX |
+ MII_CR_SPEED_100 | MII_CR_RESET;
+ break;
+ case MEDIA_TYPE_100M_HALF:
+ phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
+ break;
+ case MEDIA_TYPE_10M_FULL:
+ phy_data = MII_CR_FULL_DUPLEX |
+ MII_CR_SPEED_10 | MII_CR_RESET;
+ break;
+ default:
+ /* MEDIA_TYPE_10M_HALF */
+ phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
+ }
+ }
+ atl1_write_phy_reg(hw, MII_BMCR, phy_data);
+ atl1_up(adapter);
+ }
+ return 0;
+}
+
+static const struct ethtool_ops atl1_ethtool_ops = {
+ .get_settings = atl1_get_settings,
+ .set_settings = atl1_set_settings,
+ .get_drvinfo = atl1_get_drvinfo,
+ .get_wol = atl1_get_wol,
+ .set_wol = atl1_set_wol,
+ .get_msglevel = atl1_get_msglevel,
+ .set_msglevel = atl1_set_msglevel,
+ .get_regs_len = atl1_get_regs_len,
+ .get_regs = atl1_get_regs,
+ .get_ringparam = atl1_get_ringparam,
+ .set_ringparam = atl1_set_ringparam,
+ .get_pauseparam = atl1_get_pauseparam,
+ .set_pauseparam = atl1_set_pauseparam,
+ .get_link = ethtool_op_get_link,
+ .get_strings = atl1_get_strings,
+ .nway_reset = atl1_nway_reset,
+ .get_ethtool_stats = atl1_get_ethtool_stats,
+ .get_sset_count = atl1_get_sset_count,
+};
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h
new file mode 100644
index 000000000000..109d6da8be97
--- /dev/null
+++ b/drivers/net/ethernet/atheros/atlx/atl1.h
@@ -0,0 +1,792 @@
+/*
+ * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
+ * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
+ * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef ATL1_H
+#define ATL1_H
+
+#include <linux/compiler.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "atlx.h"
+
+#define ATLX_DRIVER_NAME "atl1"
+
+MODULE_DESCRIPTION("Atheros L1 Gigabit Ethernet Driver");
+
+#define atlx_adapter atl1_adapter
+#define atlx_check_for_link atl1_check_for_link
+#define atlx_check_link atl1_check_link
+#define atlx_hash_mc_addr atl1_hash_mc_addr
+#define atlx_hash_set atl1_hash_set
+#define atlx_hw atl1_hw
+#define atlx_mii_ioctl atl1_mii_ioctl
+#define atlx_read_phy_reg atl1_read_phy_reg
+#define atlx_set_mac atl1_set_mac
+#define atlx_set_mac_addr atl1_set_mac_addr
+
+struct atl1_adapter;
+struct atl1_hw;
+
+/* function prototypes needed by multiple files */
+static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
+static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
+static void atl1_set_mac_addr(struct atl1_hw *hw);
+static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd);
+static u32 atl1_check_link(struct atl1_adapter *adapter);
+
+/* hardware definitions specific to L1 */
+
+/* Block IDLE Status Register */
+#define IDLE_STATUS_RXMAC 0x1
+#define IDLE_STATUS_TXMAC 0x2
+#define IDLE_STATUS_RXQ 0x4
+#define IDLE_STATUS_TXQ 0x8
+#define IDLE_STATUS_DMAR 0x10
+#define IDLE_STATUS_DMAW 0x20
+#define IDLE_STATUS_SMB 0x40
+#define IDLE_STATUS_CMB 0x80
+
+/* MDIO Control Register */
+#define MDIO_WAIT_TIMES 30
+
+/* MAC Control Register */
+#define MAC_CTRL_TX_PAUSE 0x10000
+#define MAC_CTRL_SCNT 0x20000
+#define MAC_CTRL_SRST_TX 0x40000
+#define MAC_CTRL_TX_SIMURST 0x80000
+#define MAC_CTRL_SPEED_SHIFT 20
+#define MAC_CTRL_SPEED_MASK 0x300000
+#define MAC_CTRL_SPEED_1000 0x2
+#define MAC_CTRL_SPEED_10_100 0x1
+#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000
+#define MAC_CTRL_TX_HUGE 0x800000
+#define MAC_CTRL_RX_CHKSUM_EN 0x1000000
+#define MAC_CTRL_DBG 0x8000000
+
+/* Wake-On-Lan control register */
+#define WOL_CLK_SWITCH_EN 0x8000
+#define WOL_PT5_EN 0x200000
+#define WOL_PT6_EN 0x400000
+#define WOL_PT5_MATCH 0x8000000
+#define WOL_PT6_MATCH 0x10000000
+
+/* WOL Length ( 2 DWORD ) */
+#define REG_WOL_PATTERN_LEN 0x14A4
+#define WOL_PT_LEN_MASK 0x7F
+#define WOL_PT0_LEN_SHIFT 0
+#define WOL_PT1_LEN_SHIFT 8
+#define WOL_PT2_LEN_SHIFT 16
+#define WOL_PT3_LEN_SHIFT 24
+#define WOL_PT4_LEN_SHIFT 0
+#define WOL_PT5_LEN_SHIFT 8
+#define WOL_PT6_LEN_SHIFT 16
+
+/* Internal SRAM Partition Registers, low 32 bits */
+#define REG_SRAM_RFD_LEN 0x1504
+#define REG_SRAM_RRD_ADDR 0x1508
+#define REG_SRAM_RRD_LEN 0x150C
+#define REG_SRAM_TPD_ADDR 0x1510
+#define REG_SRAM_TPD_LEN 0x1514
+#define REG_SRAM_TRD_ADDR 0x1518
+#define REG_SRAM_TRD_LEN 0x151C
+#define REG_SRAM_RXF_ADDR 0x1520
+#define REG_SRAM_RXF_LEN 0x1524
+#define REG_SRAM_TXF_ADDR 0x1528
+#define REG_SRAM_TXF_LEN 0x152C
+#define REG_SRAM_TCPH_PATH_ADDR 0x1530
+#define SRAM_TCPH_ADDR_MASK 0xFFF
+#define SRAM_TCPH_ADDR_SHIFT 0
+#define SRAM_PATH_ADDR_MASK 0xFFF
+#define SRAM_PATH_ADDR_SHIFT 16
+
+/* Load Ptr Register */
+#define REG_LOAD_PTR 0x1534
+
+/* Descriptor Control registers, low 32 bits */
+#define REG_DESC_RFD_ADDR_LO 0x1544
+#define REG_DESC_RRD_ADDR_LO 0x1548
+#define REG_DESC_TPD_ADDR_LO 0x154C
+#define REG_DESC_CMB_ADDR_LO 0x1550
+#define REG_DESC_SMB_ADDR_LO 0x1554
+#define REG_DESC_RFD_RRD_RING_SIZE 0x1558
+#define DESC_RFD_RING_SIZE_MASK 0x7FF
+#define DESC_RFD_RING_SIZE_SHIFT 0
+#define DESC_RRD_RING_SIZE_MASK 0x7FF
+#define DESC_RRD_RING_SIZE_SHIFT 16
+#define REG_DESC_TPD_RING_SIZE 0x155C
+#define DESC_TPD_RING_SIZE_MASK 0x3FF
+#define DESC_TPD_RING_SIZE_SHIFT 0
+
+/* TXQ Control Register */
+#define REG_TXQ_CTRL 0x1580
+#define TXQ_CTRL_TPD_BURST_NUM_SHIFT 0
+#define TXQ_CTRL_TPD_BURST_NUM_MASK 0x1F
+#define TXQ_CTRL_EN 0x20
+#define TXQ_CTRL_ENH_MODE 0x40
+#define TXQ_CTRL_TPD_FETCH_TH_SHIFT 8
+#define TXQ_CTRL_TPD_FETCH_TH_MASK 0x3F
+#define TXQ_CTRL_TXF_BURST_NUM_SHIFT 16
+#define TXQ_CTRL_TXF_BURST_NUM_MASK 0xFFFF
+
+/* Jumbo packet Threshold for task offload */
+#define REG_TX_JUMBO_TASK_TH_TPD_IPG 0x1584
+#define TX_JUMBO_TASK_TH_MASK 0x7FF
+#define TX_JUMBO_TASK_TH_SHIFT 0
+#define TX_TPD_MIN_IPG_MASK 0x1F
+#define TX_TPD_MIN_IPG_SHIFT 16
+
+/* RXQ Control Register */
+#define REG_RXQ_CTRL 0x15A0
+#define RXQ_CTRL_RFD_BURST_NUM_SHIFT 0
+#define RXQ_CTRL_RFD_BURST_NUM_MASK 0xFF
+#define RXQ_CTRL_RRD_BURST_THRESH_SHIFT 8
+#define RXQ_CTRL_RRD_BURST_THRESH_MASK 0xFF
+#define RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT 16
+#define RXQ_CTRL_RFD_PREF_MIN_IPG_MASK 0x1F
+#define RXQ_CTRL_CUT_THRU_EN 0x40000000
+#define RXQ_CTRL_EN 0x80000000
+
+/* Rx jumbo packet threshold and rrd retirement timer */
+#define REG_RXQ_JMBOSZ_RRDTIM 0x15A4
+#define RXQ_JMBOSZ_TH_MASK 0x7FF
+#define RXQ_JMBOSZ_TH_SHIFT 0
+#define RXQ_JMBO_LKAH_MASK 0xF
+#define RXQ_JMBO_LKAH_SHIFT 11
+#define RXQ_RRD_TIMER_MASK 0xFFFF
+#define RXQ_RRD_TIMER_SHIFT 16
+
+/* RFD flow control register */
+#define REG_RXQ_RXF_PAUSE_THRESH 0x15A8
+#define RXQ_RXF_PAUSE_TH_HI_SHIFT 16
+#define RXQ_RXF_PAUSE_TH_HI_MASK 0xFFF
+#define RXQ_RXF_PAUSE_TH_LO_SHIFT 0
+#define RXQ_RXF_PAUSE_TH_LO_MASK 0xFFF
+
+/* RRD flow control register */
+#define REG_RXQ_RRD_PAUSE_THRESH 0x15AC
+#define RXQ_RRD_PAUSE_TH_HI_SHIFT 0
+#define RXQ_RRD_PAUSE_TH_HI_MASK 0xFFF
+#define RXQ_RRD_PAUSE_TH_LO_SHIFT 16
+#define RXQ_RRD_PAUSE_TH_LO_MASK 0xFFF
+
+/* DMA Engine Control Register */
+#define REG_DMA_CTRL 0x15C0
+#define DMA_CTRL_DMAR_IN_ORDER 0x1
+#define DMA_CTRL_DMAR_ENH_ORDER 0x2
+#define DMA_CTRL_DMAR_OUT_ORDER 0x4
+#define DMA_CTRL_RCB_VALUE 0x8
+#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4
+#define DMA_CTRL_DMAR_BURST_LEN_MASK 7
+#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7
+#define DMA_CTRL_DMAW_BURST_LEN_MASK 7
+#define DMA_CTRL_DMAR_EN 0x400
+#define DMA_CTRL_DMAW_EN 0x800
+
+/* CMB/SMB Control Register */
+#define REG_CSMB_CTRL 0x15D0
+#define CSMB_CTRL_CMB_NOW 1
+#define CSMB_CTRL_SMB_NOW 2
+#define CSMB_CTRL_CMB_EN 4
+#define CSMB_CTRL_SMB_EN 8
+
+/* CMB DMA Write Threshold Register */
+#define REG_CMB_WRITE_TH 0x15D4
+#define CMB_RRD_TH_SHIFT 0
+#define CMB_RRD_TH_MASK 0x7FF
+#define CMB_TPD_TH_SHIFT 16
+#define CMB_TPD_TH_MASK 0x7FF
+
+/* RX/TX count-down timer to trigger CMB-write. 2us resolution. */
+#define REG_CMB_WRITE_TIMER 0x15D8
+#define CMB_RX_TM_SHIFT 0
+#define CMB_RX_TM_MASK 0xFFFF
+#define CMB_TX_TM_SHIFT 16
+#define CMB_TX_TM_MASK 0xFFFF
+
+/* Number of packet received since last CMB write */
+#define REG_CMB_RX_PKT_CNT 0x15DC
+
+/* Number of packet transmitted since last CMB write */
+#define REG_CMB_TX_PKT_CNT 0x15E0
+
+/* SMB auto DMA timer register */
+#define REG_SMB_TIMER 0x15E4
+
+/* Mailbox Register */
+#define REG_MAILBOX 0x15F0
+#define MB_RFD_PROD_INDX_SHIFT 0
+#define MB_RFD_PROD_INDX_MASK 0x7FF
+#define MB_RRD_CONS_INDX_SHIFT 11
+#define MB_RRD_CONS_INDX_MASK 0x7FF
+#define MB_TPD_PROD_INDX_SHIFT 22
+#define MB_TPD_PROD_INDX_MASK 0x3FF
+
+/* Interrupt Status Register */
+#define ISR_SMB 0x1
+#define ISR_TIMER 0x2
+#define ISR_MANUAL 0x4
+#define ISR_RXF_OV 0x8
+#define ISR_RFD_UNRUN 0x10
+#define ISR_RRD_OV 0x20
+#define ISR_TXF_UNRUN 0x40
+#define ISR_LINK 0x80
+#define ISR_HOST_RFD_UNRUN 0x100
+#define ISR_HOST_RRD_OV 0x200
+#define ISR_DMAR_TO_RST 0x400
+#define ISR_DMAW_TO_RST 0x800
+#define ISR_GPHY 0x1000
+#define ISR_RX_PKT 0x10000
+#define ISR_TX_PKT 0x20000
+#define ISR_TX_DMA 0x40000
+#define ISR_RX_DMA 0x80000
+#define ISR_CMB_RX 0x100000
+#define ISR_CMB_TX 0x200000
+#define ISR_MAC_RX 0x400000
+#define ISR_MAC_TX 0x800000
+#define ISR_DIS_SMB 0x20000000
+#define ISR_DIS_DMA 0x40000000
+
+/* Normal Interrupt mask */
+#define IMR_NORMAL_MASK (\
+ ISR_SMB |\
+ ISR_GPHY |\
+ ISR_PHY_LINKDOWN|\
+ ISR_DMAR_TO_RST |\
+ ISR_DMAW_TO_RST |\
+ ISR_CMB_TX |\
+ ISR_CMB_RX)
+
+/* Debug Interrupt Mask (enable all interrupt) */
+#define IMR_DEBUG_MASK (\
+ ISR_SMB |\
+ ISR_TIMER |\
+ ISR_MANUAL |\
+ ISR_RXF_OV |\
+ ISR_RFD_UNRUN |\
+ ISR_RRD_OV |\
+ ISR_TXF_UNRUN |\
+ ISR_LINK |\
+ ISR_CMB_TX |\
+ ISR_CMB_RX |\
+ ISR_RX_PKT |\
+ ISR_TX_PKT |\
+ ISR_MAC_RX |\
+ ISR_MAC_TX)
+
+#define MEDIA_TYPE_1000M_FULL 1
+#define MEDIA_TYPE_100M_FULL 2
+#define MEDIA_TYPE_100M_HALF 3
+#define MEDIA_TYPE_10M_FULL 4
+#define MEDIA_TYPE_10M_HALF 5
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* All but 1000-Half */
+
+#define MAX_JUMBO_FRAME_SIZE 10240
+
+#define ATL1_EEDUMP_LEN 48
+
+/* Statistics counters collected by the MAC */
+struct stats_msg_block {
+ /* rx */
+ u32 rx_ok; /* good RX packets */
+ u32 rx_bcast; /* good RX broadcast packets */
+ u32 rx_mcast; /* good RX multicast packets */
+ u32 rx_pause; /* RX pause frames */
+ u32 rx_ctrl; /* RX control packets other than pause frames */
+ u32 rx_fcs_err; /* RX packets with bad FCS */
+ u32 rx_len_err; /* RX packets with length != actual size */
+ u32 rx_byte_cnt; /* good bytes received. FCS is NOT included */
+ u32 rx_runt; /* RX packets < 64 bytes with good FCS */
+ u32 rx_frag; /* RX packets < 64 bytes with bad FCS */
+ u32 rx_sz_64; /* 64 byte RX packets */
+ u32 rx_sz_65_127;
+ u32 rx_sz_128_255;
+ u32 rx_sz_256_511;
+ u32 rx_sz_512_1023;
+ u32 rx_sz_1024_1518;
+ u32 rx_sz_1519_max; /* 1519 byte to MTU RX packets */
+ u32 rx_sz_ov; /* truncated RX packets > MTU */
+ u32 rx_rxf_ov; /* frames dropped due to RX FIFO overflow */
+ u32 rx_rrd_ov; /* frames dropped due to RRD overflow */
+ u32 rx_align_err; /* alignment errors */
+ u32 rx_bcast_byte_cnt; /* RX broadcast bytes, excluding FCS */
+ u32 rx_mcast_byte_cnt; /* RX multicast bytes, excluding FCS */
+ u32 rx_err_addr; /* packets dropped due to address filtering */
+
+ /* tx */
+ u32 tx_ok; /* good TX packets */
+ u32 tx_bcast; /* good TX broadcast packets */
+ u32 tx_mcast; /* good TX multicast packets */
+ u32 tx_pause; /* TX pause frames */
+ u32 tx_exc_defer; /* TX packets deferred excessively */
+ u32 tx_ctrl; /* TX control frames, excluding pause frames */
+ u32 tx_defer; /* TX packets deferred */
+ u32 tx_byte_cnt; /* bytes transmitted, FCS is NOT included */
+ u32 tx_sz_64; /* 64 byte TX packets */
+ u32 tx_sz_65_127;
+ u32 tx_sz_128_255;
+ u32 tx_sz_256_511;
+ u32 tx_sz_512_1023;
+ u32 tx_sz_1024_1518;
+ u32 tx_sz_1519_max; /* 1519 byte to MTU TX packets */
+ u32 tx_1_col; /* packets TX after a single collision */
+ u32 tx_2_col; /* packets TX after multiple collisions */
+ u32 tx_late_col; /* TX packets with late collisions */
+ u32 tx_abort_col; /* TX packets aborted w/excessive collisions */
+ u32 tx_underrun; /* TX packets aborted due to TX FIFO underrun
+ * or TRD FIFO underrun */
+ u32 tx_rd_eop; /* reads beyond the EOP into the next frame
+ * when TRD was not written timely */
+ u32 tx_len_err; /* TX packets where length != actual size */
+ u32 tx_trunc; /* TX packets truncated due to size > MTU */
+ u32 tx_bcast_byte; /* broadcast bytes transmitted, excluding FCS */
+ u32 tx_mcast_byte; /* multicast bytes transmitted, excluding FCS */
+ u32 smb_updated; /* 1: SMB Updated. This is used by software to
+ * indicate the statistics update. Software
+ * should clear this bit after retrieving the
+ * statistics information. */
+};
+
+/* Coalescing Message Block */
+struct coals_msg_block {
+ u32 int_stats; /* interrupt status */
+ u16 rrd_prod_idx; /* TRD Producer Index. */
+ u16 rfd_cons_idx; /* RFD Consumer Index. */
+ u16 update; /* Selene sets this bit every time it DMAs the
+ * CMB to host memory. Software should clear
+ * this bit when CMB info is processed. */
+ u16 tpd_cons_idx; /* TPD Consumer Index. */
+};
+
+/* RRD descriptor */
+struct rx_return_desc {
+ u8 num_buf; /* Number of RFD buffers used by the received packet */
+ u8 resved;
+ u16 buf_indx; /* RFD Index of the first buffer */
+ union {
+ u32 valid;
+ struct {
+ u16 rx_chksum;
+ u16 pkt_size;
+ } xsum_sz;
+ } xsz;
+
+ u16 pkt_flg; /* Packet flags */
+ u16 err_flg; /* Error flags */
+ u16 resved2;
+ u16 vlan_tag; /* VLAN TAG */
+};
+
+#define PACKET_FLAG_ETH_TYPE 0x0080
+#define PACKET_FLAG_VLAN_INS 0x0100
+#define PACKET_FLAG_ERR 0x0200
+#define PACKET_FLAG_IPV4 0x0400
+#define PACKET_FLAG_UDP 0x0800
+#define PACKET_FLAG_TCP 0x1000
+#define PACKET_FLAG_BCAST 0x2000
+#define PACKET_FLAG_MCAST 0x4000
+#define PACKET_FLAG_PAUSE 0x8000
+
+#define ERR_FLAG_CRC 0x0001
+#define ERR_FLAG_CODE 0x0002
+#define ERR_FLAG_DRIBBLE 0x0004
+#define ERR_FLAG_RUNT 0x0008
+#define ERR_FLAG_OV 0x0010
+#define ERR_FLAG_TRUNC 0x0020
+#define ERR_FLAG_IP_CHKSUM 0x0040
+#define ERR_FLAG_L4_CHKSUM 0x0080
+#define ERR_FLAG_LEN 0x0100
+#define ERR_FLAG_DES_ADDR 0x0200
+
+/* RFD descriptor */
+struct rx_free_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ __le16 buf_len; /* Size of the receive buffer in host memory */
+ u16 coalese; /* Update consumer index to host after the
+ * reception of this frame */
+ /* __packed is required */
+} __packed;
+
+/*
+ * The L1 transmit packet descriptor is comprised of four 32-bit words.
+ *
+ * 31 0
+ * +---------------------------------------+
+ * | Word 0: Buffer addr lo |
+ * +---------------------------------------+
+ * | Word 1: Buffer addr hi |
+ * +---------------------------------------+
+ * | Word 2 |
+ * +---------------------------------------+
+ * | Word 3 |
+ * +---------------------------------------+
+ *
+ * Words 0 and 1 combine to form a 64-bit buffer address.
+ *
+ * Word 2 is self explanatory in the #define block below.
+ *
+ * Word 3 has two forms, depending upon the state of bits 3 and 4.
+ * If bits 3 and 4 are both zero, then bits 14:31 are unused by the
+ * hardware. Otherwise, if either bit 3 or 4 is set, the definition
+ * of bits 14:31 vary according to the following depiction.
+ *
+ * 0 End of packet 0 End of packet
+ * 1 Coalesce 1 Coalesce
+ * 2 Insert VLAN tag 2 Insert VLAN tag
+ * 3 Custom csum enable = 0 3 Custom csum enable = 1
+ * 4 Segment enable = 1 4 Segment enable = 0
+ * 5 Generate IP checksum 5 Generate IP checksum
+ * 6 Generate TCP checksum 6 Generate TCP checksum
+ * 7 Generate UDP checksum 7 Generate UDP checksum
+ * 8 VLAN tagged 8 VLAN tagged
+ * 9 Ethernet frame type 9 Ethernet frame type
+ * 10-+ 10-+
+ * 11 | IP hdr length (10:13) 11 | IP hdr length (10:13)
+ * 12 | (num 32-bit words) 12 | (num 32-bit words)
+ * 13-+ 13-+
+ * 14-+ 14 Unused
+ * 15 | TCP hdr length (14:17) 15 Unused
+ * 16 | (num 32-bit words) 16-+
+ * 17-+ 17 |
+ * 18 Header TPD flag 18 |
+ * 19-+ 19 | Payload offset
+ * 20 | 20 | (16:23)
+ * 21 | 21 |
+ * 22 | 22 |
+ * 23 | 23-+
+ * 24 | 24-+
+ * 25 | MSS (19:31) 25 |
+ * 26 | 26 |
+ * 27 | 27 | Custom csum offset
+ * 28 | 28 | (24:31)
+ * 29 | 29 |
+ * 30 | 30 |
+ * 31-+ 31-+
+ */
+
+/* tpd word 2 */
+#define TPD_BUFLEN_MASK 0x3FFF
+#define TPD_BUFLEN_SHIFT 0
+#define TPD_DMAINT_MASK 0x0001
+#define TPD_DMAINT_SHIFT 14
+#define TPD_PKTNT_MASK 0x0001
+#define TPD_PKTINT_SHIFT 15
+#define TPD_VLANTAG_MASK 0xFFFF
+#define TPD_VLANTAG_SHIFT 16
+
+/* tpd word 3 bits 0:13 */
+#define TPD_EOP_MASK 0x0001
+#define TPD_EOP_SHIFT 0
+#define TPD_COALESCE_MASK 0x0001
+#define TPD_COALESCE_SHIFT 1
+#define TPD_INS_VL_TAG_MASK 0x0001
+#define TPD_INS_VL_TAG_SHIFT 2
+#define TPD_CUST_CSUM_EN_MASK 0x0001
+#define TPD_CUST_CSUM_EN_SHIFT 3
+#define TPD_SEGMENT_EN_MASK 0x0001
+#define TPD_SEGMENT_EN_SHIFT 4
+#define TPD_IP_CSUM_MASK 0x0001
+#define TPD_IP_CSUM_SHIFT 5
+#define TPD_TCP_CSUM_MASK 0x0001
+#define TPD_TCP_CSUM_SHIFT 6
+#define TPD_UDP_CSUM_MASK 0x0001
+#define TPD_UDP_CSUM_SHIFT 7
+#define TPD_VL_TAGGED_MASK 0x0001
+#define TPD_VL_TAGGED_SHIFT 8
+#define TPD_ETHTYPE_MASK 0x0001
+#define TPD_ETHTYPE_SHIFT 9
+#define TPD_IPHL_MASK 0x000F
+#define TPD_IPHL_SHIFT 10
+
+/* tpd word 3 bits 14:31 if segment enabled */
+#define TPD_TCPHDRLEN_MASK 0x000F
+#define TPD_TCPHDRLEN_SHIFT 14
+#define TPD_HDRFLAG_MASK 0x0001
+#define TPD_HDRFLAG_SHIFT 18
+#define TPD_MSS_MASK 0x1FFF
+#define TPD_MSS_SHIFT 19
+
+/* tpd word 3 bits 16:31 if custom csum enabled */
+#define TPD_PLOADOFFSET_MASK 0x00FF
+#define TPD_PLOADOFFSET_SHIFT 16
+#define TPD_CCSUMOFFSET_MASK 0x00FF
+#define TPD_CCSUMOFFSET_SHIFT 24
+
+struct tx_packet_desc {
+ __le64 buffer_addr;
+ __le32 word2;
+ __le32 word3;
+};
+
+/* DMA Order Settings */
+enum atl1_dma_order {
+ atl1_dma_ord_in = 1,
+ atl1_dma_ord_enh = 2,
+ atl1_dma_ord_out = 4
+};
+
+enum atl1_dma_rcb {
+ atl1_rcb_64 = 0,
+ atl1_rcb_128 = 1
+};
+
+enum atl1_dma_req_block {
+ atl1_dma_req_128 = 0,
+ atl1_dma_req_256 = 1,
+ atl1_dma_req_512 = 2,
+ atl1_dma_req_1024 = 3,
+ atl1_dma_req_2048 = 4,
+ atl1_dma_req_4096 = 5
+};
+
+#define ATL1_MAX_INTR 3
+#define ATL1_MAX_TX_BUF_LEN 0x3000 /* 12288 bytes */
+
+#define ATL1_DEFAULT_TPD 256
+#define ATL1_MAX_TPD 1024
+#define ATL1_MIN_TPD 64
+#define ATL1_DEFAULT_RFD 512
+#define ATL1_MIN_RFD 128
+#define ATL1_MAX_RFD 2048
+#define ATL1_REG_COUNT 1538
+
+#define ATL1_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i]))
+#define ATL1_RFD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_free_desc)
+#define ATL1_TPD_DESC(R, i) ATL1_GET_DESC(R, i, struct tx_packet_desc)
+#define ATL1_RRD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_return_desc)
+
+/*
+ * atl1_ring_header represents a single, contiguous block of DMA space
+ * mapped for the three descriptor rings (tpd, rfd, rrd) and the two
+ * message blocks (cmb, smb) described below
+ */
+struct atl1_ring_header {
+ void *desc; /* virtual address */
+ dma_addr_t dma; /* physical address*/
+ unsigned int size; /* length in bytes */
+};
+
+/*
+ * atl1_buffer is wrapper around a pointer to a socket buffer
+ * so a DMA handle can be stored along with the skb
+ */
+struct atl1_buffer {
+ struct sk_buff *skb; /* socket buffer */
+ u16 length; /* rx buffer length */
+ u16 alloced; /* 1 if skb allocated */
+ dma_addr_t dma;
+};
+
+/* transmit packet descriptor (tpd) ring */
+struct atl1_tpd_ring {
+ void *desc; /* descriptor ring virtual address */
+ dma_addr_t dma; /* descriptor ring physical address */
+ u16 size; /* descriptor ring length in bytes */
+ u16 count; /* number of descriptors in the ring */
+ u16 hw_idx; /* hardware index */
+ atomic_t next_to_clean;
+ atomic_t next_to_use;
+ struct atl1_buffer *buffer_info;
+};
+
+/* receive free descriptor (rfd) ring */
+struct atl1_rfd_ring {
+ void *desc; /* descriptor ring virtual address */
+ dma_addr_t dma; /* descriptor ring physical address */
+ u16 size; /* descriptor ring length in bytes */
+ u16 count; /* number of descriptors in the ring */
+ atomic_t next_to_use;
+ u16 next_to_clean;
+ struct atl1_buffer *buffer_info;
+};
+
+/* receive return descriptor (rrd) ring */
+struct atl1_rrd_ring {
+ void *desc; /* descriptor ring virtual address */
+ dma_addr_t dma; /* descriptor ring physical address */
+ unsigned int size; /* descriptor ring length in bytes */
+ u16 count; /* number of descriptors in the ring */
+ u16 next_to_use;
+ atomic_t next_to_clean;
+};
+
+/* coalescing message block (cmb) */
+struct atl1_cmb {
+ struct coals_msg_block *cmb;
+ dma_addr_t dma;
+};
+
+/* statistics message block (smb) */
+struct atl1_smb {
+ struct stats_msg_block *smb;
+ dma_addr_t dma;
+};
+
+/* Statistics counters */
+struct atl1_sft_stats {
+ u64 rx_packets;
+ u64 tx_packets;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ u64 multicast;
+ u64 collisions;
+ u64 rx_errors;
+ u64 rx_length_errors;
+ u64 rx_crc_errors;
+ u64 rx_frame_errors;
+ u64 rx_fifo_errors;
+ u64 rx_missed_errors;
+ u64 tx_errors;
+ u64 tx_fifo_errors;
+ u64 tx_aborted_errors;
+ u64 tx_window_errors;
+ u64 tx_carrier_errors;
+ u64 tx_pause; /* TX pause frames */
+ u64 excecol; /* TX packets w/ excessive collisions */
+ u64 deffer; /* TX packets deferred */
+ u64 scc; /* packets TX after a single collision */
+ u64 mcc; /* packets TX after multiple collisions */
+ u64 latecol; /* TX packets w/ late collisions */
+ u64 tx_underun; /* TX packets aborted due to TX FIFO underrun
+ * or TRD FIFO underrun */
+ u64 tx_trunc; /* TX packets truncated due to size > MTU */
+ u64 rx_pause; /* num Pause packets received. */
+ u64 rx_rrd_ov;
+ u64 rx_trunc;
+};
+
+/* hardware structure */
+struct atl1_hw {
+ u8 __iomem *hw_addr;
+ struct atl1_adapter *back;
+ enum atl1_dma_order dma_ord;
+ enum atl1_dma_rcb rcb_value;
+ enum atl1_dma_req_block dmar_block;
+ enum atl1_dma_req_block dmaw_block;
+ u8 preamble_len;
+ u8 max_retry;
+ u8 jam_ipg; /* IPG to start JAM for collision based flow
+ * control in half-duplex mode. In units of
+ * 8-bit time */
+ u8 ipgt; /* Desired back to back inter-packet gap.
+ * The default is 96-bit time */
+ u8 min_ifg; /* Minimum number of IFG to enforce in between
+ * receive frames. Frame gap below such IFP
+ * is dropped */
+ u8 ipgr1; /* 64bit Carrier-Sense window */
+ u8 ipgr2; /* 96-bit IPG window */
+ u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned
+ * burst. Each TPD is 16 bytes long */
+ u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned
+ * burst. Each RFD is 12 bytes long */
+ u8 rfd_fetch_gap;
+ u8 rrd_burst; /* Threshold number of RRDs that can be retired
+ * in a burst. Each RRD is 16 bytes long */
+ u8 tpd_fetch_th;
+ u8 tpd_fetch_gap;
+ u16 tx_jumbo_task_th;
+ u16 txf_burst; /* Number of data bytes to read in a cache-
+ * aligned burst. Each SRAM entry is 8 bytes */
+ u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN
+ * packets should add 4 bytes */
+ u16 rx_jumbo_lkah;
+ u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after
+ * every 512ns passes. */
+ u16 lcol; /* Collision Window */
+
+ u16 cmb_tpd;
+ u16 cmb_rrd;
+ u16 cmb_rx_timer;
+ u16 cmb_tx_timer;
+ u32 smb_timer;
+ u16 media_type;
+ u16 autoneg_advertised;
+
+ u16 mii_autoneg_adv_reg;
+ u16 mii_1000t_ctrl_reg;
+
+ u32 max_frame_size;
+ u32 min_frame_size;
+
+ u16 dev_rev;
+
+ /* spi flash */
+ u8 flash_vendor;
+
+ u8 mac_addr[ETH_ALEN];
+ u8 perm_mac_addr[ETH_ALEN];
+
+ bool phy_configured;
+};
+
+struct atl1_adapter {
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
+ struct atl1_sft_stats soft_stats;
+ u32 rx_buffer_len;
+ u32 wol;
+ u16 link_speed;
+ u16 link_duplex;
+ spinlock_t lock;
+ struct work_struct tx_timeout_task;
+ struct work_struct link_chg_task;
+ struct work_struct pcie_dma_to_rst_task;
+
+ struct timer_list phy_config_timer;
+ bool phy_timer_pending;
+
+ /* all descriptor rings' memory */
+ struct atl1_ring_header ring_header;
+
+ /* TX */
+ struct atl1_tpd_ring tpd_ring;
+ spinlock_t mb_lock;
+
+ /* RX */
+ struct atl1_rfd_ring rfd_ring;
+ struct atl1_rrd_ring rrd_ring;
+ u64 hw_csum_err;
+ u64 hw_csum_good;
+ u32 msg_enable;
+ u16 imt; /* interrupt moderator timer (2us resolution) */
+ u16 ict; /* interrupt clear timer (2us resolution */
+ struct mii_if_info mii; /* MII interface info */
+
+ u32 bd_number; /* board number */
+ bool pci_using_64;
+ struct atl1_hw hw;
+ struct atl1_smb smb;
+ struct atl1_cmb cmb;
+};
+
+#endif /* ATL1_H */
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
new file mode 100644
index 000000000000..1feae5928a4b
--- /dev/null
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -0,0 +1,3119 @@
+/*
+ * Copyright(c) 2006 - 2007 Atheros Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2008 Chris Snook <csnook@redhat.com>
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/atomic.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/hardirq.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/irqflags.h>
+#include <linux/irqreturn.h>
+#include <linux/mii.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/pm.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/tcp.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "atl2.h"
+
+#define ATL2_DRV_VERSION "2.2.3"
+
+static const char atl2_driver_name[] = "atl2";
+static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver";
+static const char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation.";
+static const char atl2_driver_version[] = ATL2_DRV_VERSION;
+
+MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>");
+MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ATL2_DRV_VERSION);
+
+/*
+ * atl2_pci_tbl - PCI Device ID Table
+ */
+static DEFINE_PCI_DEVICE_TABLE(atl2_pci_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)},
+ /* required last entry */
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, atl2_pci_tbl);
+
+static void atl2_set_ethtool_ops(struct net_device *netdev);
+
+static void atl2_check_options(struct atl2_adapter *adapter);
+
+/*
+ * atl2_sw_init - Initialize general software structures (struct atl2_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * atl2_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ */
+static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
+{
+ struct atl2_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* PCI config space info */
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_id = pdev->subsystem_device;
+ hw->revision_id = pdev->revision;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
+
+ adapter->wol = 0;
+ adapter->ict = 50000; /* ~100ms */
+ adapter->link_speed = SPEED_0; /* hardware init */
+ adapter->link_duplex = FULL_DUPLEX;
+
+ hw->phy_configured = false;
+ hw->preamble_len = 7;
+ hw->ipgt = 0x60;
+ hw->min_ifg = 0x50;
+ hw->ipgr1 = 0x40;
+ hw->ipgr2 = 0x60;
+ hw->retry_buf = 2;
+ hw->max_retry = 0xf;
+ hw->lcol = 0x37;
+ hw->jam_ipg = 7;
+ hw->fc_rxd_hi = 0;
+ hw->fc_rxd_lo = 0;
+ hw->max_frame_size = adapter->netdev->mtu;
+
+ spin_lock_init(&adapter->stats_lock);
+
+ set_bit(__ATL2_DOWN, &adapter->flags);
+
+ return 0;
+}
+
+/*
+ * atl2_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ */
+static void atl2_set_multi(struct net_device *netdev)
+{
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+ struct atl2_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u32 rctl;
+ u32 hash_value;
+
+ /* Check for Promiscuous and All Multicast modes */
+ rctl = ATL2_READ_REG(hw, REG_MAC_CTRL);
+
+ if (netdev->flags & IFF_PROMISC) {
+ rctl |= MAC_CTRL_PROMIS_EN;
+ } else if (netdev->flags & IFF_ALLMULTI) {
+ rctl |= MAC_CTRL_MC_ALL_EN;
+ rctl &= ~MAC_CTRL_PROMIS_EN;
+ } else
+ rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
+
+ ATL2_WRITE_REG(hw, REG_MAC_CTRL, rctl);
+
+ /* clear the old settings from the multicast hash table */
+ ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
+ ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
+
+ /* comoute mc addresses' hash value ,and put it into hash table */
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash_value = atl2_hash_mc_addr(hw, ha->addr);
+ atl2_hash_set(hw, hash_value);
+ }
+}
+
+static void init_ring_ptrs(struct atl2_adapter *adapter)
+{
+ /* Read / Write Ptr Initialize: */
+ adapter->txd_write_ptr = 0;
+ atomic_set(&adapter->txd_read_ptr, 0);
+
+ adapter->rxd_read_ptr = 0;
+ adapter->rxd_write_ptr = 0;
+
+ atomic_set(&adapter->txs_write_ptr, 0);
+ adapter->txs_next_clear = 0;
+}
+
+/*
+ * atl2_configure - Configure Transmit&Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx /Rx unit of the MAC after a reset.
+ */
+static int atl2_configure(struct atl2_adapter *adapter)
+{
+ struct atl2_hw *hw = &adapter->hw;
+ u32 value;
+
+ /* clear interrupt status */
+ ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0xffffffff);
+
+ /* set MAC Address */
+ value = (((u32)hw->mac_addr[2]) << 24) |
+ (((u32)hw->mac_addr[3]) << 16) |
+ (((u32)hw->mac_addr[4]) << 8) |
+ (((u32)hw->mac_addr[5]));
+ ATL2_WRITE_REG(hw, REG_MAC_STA_ADDR, value);
+ value = (((u32)hw->mac_addr[0]) << 8) |
+ (((u32)hw->mac_addr[1]));
+ ATL2_WRITE_REG(hw, (REG_MAC_STA_ADDR+4), value);
+
+ /* HI base address */
+ ATL2_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI,
+ (u32)((adapter->ring_dma & 0xffffffff00000000ULL) >> 32));
+
+ /* LO base address */
+ ATL2_WRITE_REG(hw, REG_TXD_BASE_ADDR_LO,
+ (u32)(adapter->txd_dma & 0x00000000ffffffffULL));
+ ATL2_WRITE_REG(hw, REG_TXS_BASE_ADDR_LO,
+ (u32)(adapter->txs_dma & 0x00000000ffffffffULL));
+ ATL2_WRITE_REG(hw, REG_RXD_BASE_ADDR_LO,
+ (u32)(adapter->rxd_dma & 0x00000000ffffffffULL));
+
+ /* element count */
+ ATL2_WRITE_REGW(hw, REG_TXD_MEM_SIZE, (u16)(adapter->txd_ring_size/4));
+ ATL2_WRITE_REGW(hw, REG_TXS_MEM_SIZE, (u16)adapter->txs_ring_size);
+ ATL2_WRITE_REGW(hw, REG_RXD_BUF_NUM, (u16)adapter->rxd_ring_size);
+
+ /* config Internal SRAM */
+/*
+ ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_tx_end);
+ ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_rx_end);
+*/
+
+ /* config IPG/IFG */
+ value = (((u32)hw->ipgt & MAC_IPG_IFG_IPGT_MASK) <<
+ MAC_IPG_IFG_IPGT_SHIFT) |
+ (((u32)hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) <<
+ MAC_IPG_IFG_MIFG_SHIFT) |
+ (((u32)hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) <<
+ MAC_IPG_IFG_IPGR1_SHIFT)|
+ (((u32)hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) <<
+ MAC_IPG_IFG_IPGR2_SHIFT);
+ ATL2_WRITE_REG(hw, REG_MAC_IPG_IFG, value);
+
+ /* config Half-Duplex Control */
+ value = ((u32)hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
+ (((u32)hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) <<
+ MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
+ MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
+ (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
+ (((u32)hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) <<
+ MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
+ ATL2_WRITE_REG(hw, REG_MAC_HALF_DUPLX_CTRL, value);
+
+ /* set Interrupt Moderator Timer */
+ ATL2_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, adapter->imt);
+ ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_ITIMER_EN);
+
+ /* set Interrupt Clear Timer */
+ ATL2_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, adapter->ict);
+
+ /* set MTU */
+ ATL2_WRITE_REG(hw, REG_MTU, adapter->netdev->mtu +
+ ENET_HEADER_SIZE + VLAN_SIZE + ETHERNET_FCS_SIZE);
+
+ /* 1590 */
+ ATL2_WRITE_REG(hw, REG_TX_CUT_THRESH, 0x177);
+
+ /* flow control */
+ ATL2_WRITE_REGW(hw, REG_PAUSE_ON_TH, hw->fc_rxd_hi);
+ ATL2_WRITE_REGW(hw, REG_PAUSE_OFF_TH, hw->fc_rxd_lo);
+
+ /* Init mailbox */
+ ATL2_WRITE_REGW(hw, REG_MB_TXD_WR_IDX, (u16)adapter->txd_write_ptr);
+ ATL2_WRITE_REGW(hw, REG_MB_RXD_RD_IDX, (u16)adapter->rxd_read_ptr);
+
+ /* enable DMA read/write */
+ ATL2_WRITE_REGB(hw, REG_DMAR, DMAR_EN);
+ ATL2_WRITE_REGB(hw, REG_DMAW, DMAW_EN);
+
+ value = ATL2_READ_REG(&adapter->hw, REG_ISR);
+ if ((value & ISR_PHY_LINKDOWN) != 0)
+ value = 1; /* config failed */
+ else
+ value = 0;
+
+ /* clear all interrupt status */
+ ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0x3fffffff);
+ ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0);
+ return value;
+}
+
+/*
+ * atl2_setup_ring_resources - allocate Tx / RX descriptor resources
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ */
+static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+ u8 offset = 0;
+
+ /* real ring DMA buffer */
+ adapter->ring_size = size =
+ adapter->txd_ring_size * 1 + 7 + /* dword align */
+ adapter->txs_ring_size * 4 + 7 + /* dword align */
+ adapter->rxd_ring_size * 1536 + 127; /* 128bytes align */
+
+ adapter->ring_vir_addr = pci_alloc_consistent(pdev, size,
+ &adapter->ring_dma);
+ if (!adapter->ring_vir_addr)
+ return -ENOMEM;
+ memset(adapter->ring_vir_addr, 0, adapter->ring_size);
+
+ /* Init TXD Ring */
+ adapter->txd_dma = adapter->ring_dma ;
+ offset = (adapter->txd_dma & 0x7) ? (8 - (adapter->txd_dma & 0x7)) : 0;
+ adapter->txd_dma += offset;
+ adapter->txd_ring = adapter->ring_vir_addr + offset;
+
+ /* Init TXS Ring */
+ adapter->txs_dma = adapter->txd_dma + adapter->txd_ring_size;
+ offset = (adapter->txs_dma & 0x7) ? (8 - (adapter->txs_dma & 0x7)) : 0;
+ adapter->txs_dma += offset;
+ adapter->txs_ring = (struct tx_pkt_status *)
+ (((u8 *)adapter->txd_ring) + (adapter->txd_ring_size + offset));
+
+ /* Init RXD Ring */
+ adapter->rxd_dma = adapter->txs_dma + adapter->txs_ring_size * 4;
+ offset = (adapter->rxd_dma & 127) ?
+ (128 - (adapter->rxd_dma & 127)) : 0;
+ if (offset > 7)
+ offset -= 8;
+ else
+ offset += (128 - 8);
+
+ adapter->rxd_dma += offset;
+ adapter->rxd_ring = (struct rx_desc *) (((u8 *)adapter->txs_ring) +
+ (adapter->txs_ring_size * 4 + offset));
+
+/*
+ * Read / Write Ptr Initialize:
+ * init_ring_ptrs(adapter);
+ */
+ return 0;
+}
+
+/*
+ * atl2_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ */
+static inline void atl2_irq_enable(struct atl2_adapter *adapter)
+{
+ ATL2_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK);
+ ATL2_WRITE_FLUSH(&adapter->hw);
+}
+
+/*
+ * atl2_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ */
+static inline void atl2_irq_disable(struct atl2_adapter *adapter)
+{
+ ATL2_WRITE_REG(&adapter->hw, REG_IMR, 0);
+ ATL2_WRITE_FLUSH(&adapter->hw);
+ synchronize_irq(adapter->pdev->irq);
+}
+
+static void __atl2_vlan_mode(u32 features, u32 *ctrl)
+{
+ if (features & NETIF_F_HW_VLAN_RX) {
+ /* enable VLAN tag insert/strip */
+ *ctrl |= MAC_CTRL_RMV_VLAN;
+ } else {
+ /* disable VLAN tag insert/strip */
+ *ctrl &= ~MAC_CTRL_RMV_VLAN;
+ }
+}
+
+static void atl2_vlan_mode(struct net_device *netdev, u32 features)
+{
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+ u32 ctrl;
+
+ atl2_irq_disable(adapter);
+
+ ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL);
+ __atl2_vlan_mode(features, &ctrl);
+ ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl);
+
+ atl2_irq_enable(adapter);
+}
+
+static void atl2_restore_vlan(struct atl2_adapter *adapter)
+{
+ atl2_vlan_mode(adapter->netdev, adapter->netdev->features);
+}
+
+static u32 atl2_fix_features(struct net_device *netdev, u32 features)
+{
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_RX)
+ features |= NETIF_F_HW_VLAN_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_TX;
+
+ return features;
+}
+
+static int atl2_set_features(struct net_device *netdev, u32 features)
+{
+ u32 changed = netdev->features ^ features;
+
+ if (changed & NETIF_F_HW_VLAN_RX)
+ atl2_vlan_mode(netdev, features);
+
+ return 0;
+}
+
+static void atl2_intr_rx(struct atl2_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct rx_desc *rxd;
+ struct sk_buff *skb;
+
+ do {
+ rxd = adapter->rxd_ring+adapter->rxd_write_ptr;
+ if (!rxd->status.update)
+ break; /* end of tx */
+
+ /* clear this flag at once */
+ rxd->status.update = 0;
+
+ if (rxd->status.ok && rxd->status.pkt_size >= 60) {
+ int rx_size = (int)(rxd->status.pkt_size - 4);
+ /* alloc new buffer */
+ skb = netdev_alloc_skb_ip_align(netdev, rx_size);
+ if (NULL == skb) {
+ printk(KERN_WARNING
+ "%s: Mem squeeze, deferring packet.\n",
+ netdev->name);
+ /*
+ * Check that some rx space is free. If not,
+ * free one and mark stats->rx_dropped++.
+ */
+ netdev->stats.rx_dropped++;
+ break;
+ }
+ memcpy(skb->data, rxd->packet, rx_size);
+ skb_put(skb, rx_size);
+ skb->protocol = eth_type_trans(skb, netdev);
+ if (rxd->status.vlan) {
+ u16 vlan_tag = (rxd->status.vtag>>4) |
+ ((rxd->status.vtag&7) << 13) |
+ ((rxd->status.vtag&8) << 9);
+
+ __vlan_hwaccel_put_tag(skb, vlan_tag);
+ }
+ netif_rx(skb);
+ netdev->stats.rx_bytes += rx_size;
+ netdev->stats.rx_packets++;
+ } else {
+ netdev->stats.rx_errors++;
+
+ if (rxd->status.ok && rxd->status.pkt_size <= 60)
+ netdev->stats.rx_length_errors++;
+ if (rxd->status.mcast)
+ netdev->stats.multicast++;
+ if (rxd->status.crc)
+ netdev->stats.rx_crc_errors++;
+ if (rxd->status.align)
+ netdev->stats.rx_frame_errors++;
+ }
+
+ /* advance write ptr */
+ if (++adapter->rxd_write_ptr == adapter->rxd_ring_size)
+ adapter->rxd_write_ptr = 0;
+ } while (1);
+
+ /* update mailbox? */
+ adapter->rxd_read_ptr = adapter->rxd_write_ptr;
+ ATL2_WRITE_REGW(&adapter->hw, REG_MB_RXD_RD_IDX, adapter->rxd_read_ptr);
+}
+
+static void atl2_intr_tx(struct atl2_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 txd_read_ptr;
+ u32 txs_write_ptr;
+ struct tx_pkt_status *txs;
+ struct tx_pkt_header *txph;
+ int free_hole = 0;
+
+ do {
+ txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr);
+ txs = adapter->txs_ring + txs_write_ptr;
+ if (!txs->update)
+ break; /* tx stop here */
+
+ free_hole = 1;
+ txs->update = 0;
+
+ if (++txs_write_ptr == adapter->txs_ring_size)
+ txs_write_ptr = 0;
+ atomic_set(&adapter->txs_write_ptr, (int)txs_write_ptr);
+
+ txd_read_ptr = (u32) atomic_read(&adapter->txd_read_ptr);
+ txph = (struct tx_pkt_header *)
+ (((u8 *)adapter->txd_ring) + txd_read_ptr);
+
+ if (txph->pkt_size != txs->pkt_size) {
+ struct tx_pkt_status *old_txs = txs;
+ printk(KERN_WARNING
+ "%s: txs packet size not consistent with txd"
+ " txd_:0x%08x, txs_:0x%08x!\n",
+ adapter->netdev->name,
+ *(u32 *)txph, *(u32 *)txs);
+ printk(KERN_WARNING
+ "txd read ptr: 0x%x\n",
+ txd_read_ptr);
+ txs = adapter->txs_ring + txs_write_ptr;
+ printk(KERN_WARNING
+ "txs-behind:0x%08x\n",
+ *(u32 *)txs);
+ if (txs_write_ptr < 2) {
+ txs = adapter->txs_ring +
+ (adapter->txs_ring_size +
+ txs_write_ptr - 2);
+ } else {
+ txs = adapter->txs_ring + (txs_write_ptr - 2);
+ }
+ printk(KERN_WARNING
+ "txs-before:0x%08x\n",
+ *(u32 *)txs);
+ txs = old_txs;
+ }
+
+ /* 4for TPH */
+ txd_read_ptr += (((u32)(txph->pkt_size) + 7) & ~3);
+ if (txd_read_ptr >= adapter->txd_ring_size)
+ txd_read_ptr -= adapter->txd_ring_size;
+
+ atomic_set(&adapter->txd_read_ptr, (int)txd_read_ptr);
+
+ /* tx statistics: */
+ if (txs->ok) {
+ netdev->stats.tx_bytes += txs->pkt_size;
+ netdev->stats.tx_packets++;
+ }
+ else
+ netdev->stats.tx_errors++;
+
+ if (txs->defer)
+ netdev->stats.collisions++;
+ if (txs->abort_col)
+ netdev->stats.tx_aborted_errors++;
+ if (txs->late_col)
+ netdev->stats.tx_window_errors++;
+ if (txs->underun)
+ netdev->stats.tx_fifo_errors++;
+ } while (1);
+
+ if (free_hole) {
+ if (netif_queue_stopped(adapter->netdev) &&
+ netif_carrier_ok(adapter->netdev))
+ netif_wake_queue(adapter->netdev);
+ }
+}
+
+static void atl2_check_for_link(struct atl2_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u16 phy_data = 0;
+
+ spin_lock(&adapter->stats_lock);
+ atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+ atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+ spin_unlock(&adapter->stats_lock);
+
+ /* notify upper layer link down ASAP */
+ if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */
+ if (netif_carrier_ok(netdev)) { /* old link state: Up */
+ printk(KERN_INFO "%s: %s NIC Link is Down\n",
+ atl2_driver_name, netdev->name);
+ adapter->link_speed = SPEED_0;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ }
+ schedule_work(&adapter->link_chg_task);
+}
+
+static inline void atl2_clear_phy_int(struct atl2_adapter *adapter)
+{
+ u16 phy_data;
+ spin_lock(&adapter->stats_lock);
+ atl2_read_phy_reg(&adapter->hw, 19, &phy_data);
+ spin_unlock(&adapter->stats_lock);
+}
+
+/*
+ * atl2_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ * @pt_regs: CPU registers structure
+ */
+static irqreturn_t atl2_intr(int irq, void *data)
+{
+ struct atl2_adapter *adapter = netdev_priv(data);
+ struct atl2_hw *hw = &adapter->hw;
+ u32 status;
+
+ status = ATL2_READ_REG(hw, REG_ISR);
+ if (0 == status)
+ return IRQ_NONE;
+
+ /* link event */
+ if (status & ISR_PHY)
+ atl2_clear_phy_int(adapter);
+
+ /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
+ ATL2_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
+
+ /* check if PCIE PHY Link down */
+ if (status & ISR_PHY_LINKDOWN) {
+ if (netif_running(adapter->netdev)) { /* reset MAC */
+ ATL2_WRITE_REG(hw, REG_ISR, 0);
+ ATL2_WRITE_REG(hw, REG_IMR, 0);
+ ATL2_WRITE_FLUSH(hw);
+ schedule_work(&adapter->reset_task);
+ return IRQ_HANDLED;
+ }
+ }
+
+ /* check if DMA read/write error? */
+ if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
+ ATL2_WRITE_REG(hw, REG_ISR, 0);
+ ATL2_WRITE_REG(hw, REG_IMR, 0);
+ ATL2_WRITE_FLUSH(hw);
+ schedule_work(&adapter->reset_task);
+ return IRQ_HANDLED;
+ }
+
+ /* link event */
+ if (status & (ISR_PHY | ISR_MANUAL)) {
+ adapter->netdev->stats.tx_carrier_errors++;
+ atl2_check_for_link(adapter);
+ }
+
+ /* transmit event */
+ if (status & ISR_TX_EVENT)
+ atl2_intr_tx(adapter);
+
+ /* rx exception */
+ if (status & ISR_RX_EVENT)
+ atl2_intr_rx(adapter);
+
+ /* re-enable Interrupt */
+ ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0);
+ return IRQ_HANDLED;
+}
+
+static int atl2_request_irq(struct atl2_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int flags, err = 0;
+
+ flags = IRQF_SHARED;
+ adapter->have_msi = true;
+ err = pci_enable_msi(adapter->pdev);
+ if (err)
+ adapter->have_msi = false;
+
+ if (adapter->have_msi)
+ flags &= ~IRQF_SHARED;
+
+ return request_irq(adapter->pdev->irq, atl2_intr, flags, netdev->name,
+ netdev);
+}
+
+/*
+ * atl2_free_ring_resources - Free Tx / RX descriptor Resources
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ */
+static void atl2_free_ring_resources(struct atl2_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ pci_free_consistent(pdev, adapter->ring_size, adapter->ring_vir_addr,
+ adapter->ring_dma);
+}
+
+/*
+ * atl2_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ */
+static int atl2_open(struct net_device *netdev)
+{
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+ int err;
+ u32 val;
+
+ /* disallow open during test */
+ if (test_bit(__ATL2_TESTING, &adapter->flags))
+ return -EBUSY;
+
+ /* allocate transmit descriptors */
+ err = atl2_setup_ring_resources(adapter);
+ if (err)
+ return err;
+
+ err = atl2_init_hw(&adapter->hw);
+ if (err) {
+ err = -EIO;
+ goto err_init_hw;
+ }
+
+ /* hardware has been reset, we need to reload some things */
+ atl2_set_multi(netdev);
+ init_ring_ptrs(adapter);
+
+ atl2_restore_vlan(adapter);
+
+ if (atl2_configure(adapter)) {
+ err = -EIO;
+ goto err_config;
+ }
+
+ err = atl2_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ clear_bit(__ATL2_DOWN, &adapter->flags);
+
+ mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 4*HZ));
+
+ val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL);
+ ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL,
+ val | MASTER_CTRL_MANUAL_INT);
+
+ atl2_irq_enable(adapter);
+
+ return 0;
+
+err_init_hw:
+err_req_irq:
+err_config:
+ atl2_free_ring_resources(adapter);
+ atl2_reset_hw(&adapter->hw);
+
+ return err;
+}
+
+static void atl2_down(struct atl2_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ /* signal that we're down so the interrupt handler does not
+ * reschedule our watchdog timer */
+ set_bit(__ATL2_DOWN, &adapter->flags);
+
+ netif_tx_disable(netdev);
+
+ /* reset MAC to disable all RX/TX */
+ atl2_reset_hw(&adapter->hw);
+ msleep(1);
+
+ atl2_irq_disable(adapter);
+
+ del_timer_sync(&adapter->watchdog_timer);
+ del_timer_sync(&adapter->phy_config_timer);
+ clear_bit(0, &adapter->cfg_phy);
+
+ netif_carrier_off(netdev);
+ adapter->link_speed = SPEED_0;
+ adapter->link_duplex = -1;
+}
+
+static void atl2_free_irq(struct atl2_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ free_irq(adapter->pdev->irq, netdev);
+
+#ifdef CONFIG_PCI_MSI
+ if (adapter->have_msi)
+ pci_disable_msi(adapter->pdev);
+#endif
+}
+
+/*
+ * atl2_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ */
+static int atl2_close(struct net_device *netdev)
+{
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+
+ WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags));
+
+ atl2_down(adapter);
+ atl2_free_irq(adapter);
+ atl2_free_ring_resources(adapter);
+
+ return 0;
+}
+
+static inline int TxsFreeUnit(struct atl2_adapter *adapter)
+{
+ u32 txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr);
+
+ return (adapter->txs_next_clear >= txs_write_ptr) ?
+ (int) (adapter->txs_ring_size - adapter->txs_next_clear +
+ txs_write_ptr - 1) :
+ (int) (txs_write_ptr - adapter->txs_next_clear - 1);
+}
+
+static inline int TxdFreeBytes(struct atl2_adapter *adapter)
+{
+ u32 txd_read_ptr = (u32)atomic_read(&adapter->txd_read_ptr);
+
+ return (adapter->txd_write_ptr >= txd_read_ptr) ?
+ (int) (adapter->txd_ring_size - adapter->txd_write_ptr +
+ txd_read_ptr - 1) :
+ (int) (txd_read_ptr - adapter->txd_write_ptr - 1);
+}
+
+static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+ struct tx_pkt_header *txph;
+ u32 offset, copy_len;
+ int txs_unused;
+ int txbuf_unused;
+
+ if (test_bit(__ATL2_DOWN, &adapter->flags)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (unlikely(skb->len <= 0)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ txs_unused = TxsFreeUnit(adapter);
+ txbuf_unused = TxdFreeBytes(adapter);
+
+ if (skb->len + sizeof(struct tx_pkt_header) + 4 > txbuf_unused ||
+ txs_unused < 1) {
+ /* not enough resources */
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ offset = adapter->txd_write_ptr;
+
+ txph = (struct tx_pkt_header *) (((u8 *)adapter->txd_ring) + offset);
+
+ *(u32 *)txph = 0;
+ txph->pkt_size = skb->len;
+
+ offset += 4;
+ if (offset >= adapter->txd_ring_size)
+ offset -= adapter->txd_ring_size;
+ copy_len = adapter->txd_ring_size - offset;
+ if (copy_len >= skb->len) {
+ memcpy(((u8 *)adapter->txd_ring) + offset, skb->data, skb->len);
+ offset += ((u32)(skb->len + 3) & ~3);
+ } else {
+ memcpy(((u8 *)adapter->txd_ring)+offset, skb->data, copy_len);
+ memcpy((u8 *)adapter->txd_ring, skb->data+copy_len,
+ skb->len-copy_len);
+ offset = ((u32)(skb->len-copy_len + 3) & ~3);
+ }
+#ifdef NETIF_F_HW_VLAN_TX
+ if (vlan_tx_tag_present(skb)) {
+ u16 vlan_tag = vlan_tx_tag_get(skb);
+ vlan_tag = (vlan_tag << 4) |
+ (vlan_tag >> 13) |
+ ((vlan_tag >> 9) & 0x8);
+ txph->ins_vlan = 1;
+ txph->vlan = vlan_tag;
+ }
+#endif
+ if (offset >= adapter->txd_ring_size)
+ offset -= adapter->txd_ring_size;
+ adapter->txd_write_ptr = offset;
+
+ /* clear txs before send */
+ adapter->txs_ring[adapter->txs_next_clear].update = 0;
+ if (++adapter->txs_next_clear == adapter->txs_ring_size)
+ adapter->txs_next_clear = 0;
+
+ ATL2_WRITE_REGW(&adapter->hw, REG_MB_TXD_WR_IDX,
+ (adapter->txd_write_ptr >> 2));
+
+ mmiowb();
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/*
+ * atl2_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atl2_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+ struct atl2_hw *hw = &adapter->hw;
+
+ if ((new_mtu < 40) || (new_mtu > (ETH_DATA_LEN + VLAN_SIZE)))
+ return -EINVAL;
+
+ /* set MTU */
+ if (hw->max_frame_size != new_mtu) {
+ netdev->mtu = new_mtu;
+ ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ENET_HEADER_SIZE +
+ VLAN_SIZE + ETHERNET_FCS_SIZE);
+ }
+
+ return 0;
+}
+
+/*
+ * atl2_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atl2_set_mac(struct net_device *netdev, void *p)
+{
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
+
+ atl2_set_mac_addr(&adapter->hw);
+
+ return 0;
+}
+
+/*
+ * atl2_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ */
+static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+ unsigned long flags;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = 0;
+ break;
+ case SIOCGMIIREG:
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+ if (atl2_read_phy_reg(&adapter->hw,
+ data->reg_num & 0x1F, &data->val_out)) {
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+ return -EIO;
+ }
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+ break;
+ case SIOCSMIIREG:
+ if (data->reg_num & ~(0x1F))
+ return -EFAULT;
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+ if (atl2_write_phy_reg(&adapter->hw, data->reg_num,
+ data->val_in)) {
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+ return -EIO;
+ }
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+/*
+ * atl2_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ */
+static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return atl2_mii_ioctl(netdev, ifr, cmd);
+#ifdef ETHTOOL_OPS_COMPAT
+ case SIOCETHTOOL:
+ return ethtool_ioctl(ifr);
+#endif
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/*
+ * atl2_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ */
+static void atl2_tx_timeout(struct net_device *netdev)
+{
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+}
+
+/*
+ * atl2_watchdog - Timer Call-back
+ * @data: pointer to netdev cast into an unsigned long
+ */
+static void atl2_watchdog(unsigned long data)
+{
+ struct atl2_adapter *adapter = (struct atl2_adapter *) data;
+
+ if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
+ u32 drop_rxd, drop_rxs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+ drop_rxd = ATL2_READ_REG(&adapter->hw, REG_STS_RXD_OV);
+ drop_rxs = ATL2_READ_REG(&adapter->hw, REG_STS_RXS_OV);
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+
+ adapter->netdev->stats.rx_over_errors += drop_rxd + drop_rxs;
+
+ /* Reset the timer */
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 4 * HZ));
+ }
+}
+
+/*
+ * atl2_phy_config - Timer Call-back
+ * @data: pointer to netdev cast into an unsigned long
+ */
+static void atl2_phy_config(unsigned long data)
+{
+ struct atl2_adapter *adapter = (struct atl2_adapter *) data;
+ struct atl2_hw *hw = &adapter->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+ atl2_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
+ atl2_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN |
+ MII_CR_RESTART_AUTO_NEG);
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+ clear_bit(0, &adapter->cfg_phy);
+}
+
+static int atl2_up(struct atl2_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err = 0;
+ u32 val;
+
+ /* hardware has been reset, we need to reload some things */
+
+ err = atl2_init_hw(&adapter->hw);
+ if (err) {
+ err = -EIO;
+ return err;
+ }
+
+ atl2_set_multi(netdev);
+ init_ring_ptrs(adapter);
+
+ atl2_restore_vlan(adapter);
+
+ if (atl2_configure(adapter)) {
+ err = -EIO;
+ goto err_up;
+ }
+
+ clear_bit(__ATL2_DOWN, &adapter->flags);
+
+ val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL);
+ ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, val |
+ MASTER_CTRL_MANUAL_INT);
+
+ atl2_irq_enable(adapter);
+
+err_up:
+ return err;
+}
+
+static void atl2_reinit_locked(struct atl2_adapter *adapter)
+{
+ WARN_ON(in_interrupt());
+ while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags))
+ msleep(1);
+ atl2_down(adapter);
+ atl2_up(adapter);
+ clear_bit(__ATL2_RESETTING, &adapter->flags);
+}
+
+static void atl2_reset_task(struct work_struct *work)
+{
+ struct atl2_adapter *adapter;
+ adapter = container_of(work, struct atl2_adapter, reset_task);
+
+ atl2_reinit_locked(adapter);
+}
+
+static void atl2_setup_mac_ctrl(struct atl2_adapter *adapter)
+{
+ u32 value;
+ struct atl2_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+
+ /* Config MAC CTRL Register */
+ value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY;
+
+ /* duplex */
+ if (FULL_DUPLEX == adapter->link_duplex)
+ value |= MAC_CTRL_DUPLX;
+
+ /* flow control */
+ value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
+
+ /* PAD & CRC */
+ value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
+
+ /* preamble length */
+ value |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) <<
+ MAC_CTRL_PRMLEN_SHIFT);
+
+ /* vlan */
+ __atl2_vlan_mode(netdev->features, &value);
+
+ /* filter mode */
+ value |= MAC_CTRL_BC_EN;
+ if (netdev->flags & IFF_PROMISC)
+ value |= MAC_CTRL_PROMIS_EN;
+ else if (netdev->flags & IFF_ALLMULTI)
+ value |= MAC_CTRL_MC_ALL_EN;
+
+ /* half retry buffer */
+ value |= (((u32)(adapter->hw.retry_buf &
+ MAC_CTRL_HALF_LEFT_BUF_MASK)) << MAC_CTRL_HALF_LEFT_BUF_SHIFT);
+
+ ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
+}
+
+static int atl2_check_link(struct atl2_adapter *adapter)
+{
+ struct atl2_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ int ret_val;
+ u16 speed, duplex, phy_data;
+ int reconfig = 0;
+
+ /* MII_BMSR must read twise */
+ atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
+ atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
+ if (!(phy_data&BMSR_LSTATUS)) { /* link down */
+ if (netif_carrier_ok(netdev)) { /* old link state: Up */
+ u32 value;
+ /* disable rx */
+ value = ATL2_READ_REG(hw, REG_MAC_CTRL);
+ value &= ~MAC_CTRL_RX_EN;
+ ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
+ adapter->link_speed = SPEED_0;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ return 0;
+ }
+
+ /* Link Up */
+ ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val)
+ return ret_val;
+ switch (hw->MediaType) {
+ case MEDIA_TYPE_100M_FULL:
+ if (speed != SPEED_100 || duplex != FULL_DUPLEX)
+ reconfig = 1;
+ break;
+ case MEDIA_TYPE_100M_HALF:
+ if (speed != SPEED_100 || duplex != HALF_DUPLEX)
+ reconfig = 1;
+ break;
+ case MEDIA_TYPE_10M_FULL:
+ if (speed != SPEED_10 || duplex != FULL_DUPLEX)
+ reconfig = 1;
+ break;
+ case MEDIA_TYPE_10M_HALF:
+ if (speed != SPEED_10 || duplex != HALF_DUPLEX)
+ reconfig = 1;
+ break;
+ }
+ /* link result is our setting */
+ if (reconfig == 0) {
+ if (adapter->link_speed != speed ||
+ adapter->link_duplex != duplex) {
+ adapter->link_speed = speed;
+ adapter->link_duplex = duplex;
+ atl2_setup_mac_ctrl(adapter);
+ printk(KERN_INFO "%s: %s NIC Link is Up<%d Mbps %s>\n",
+ atl2_driver_name, netdev->name,
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "Full Duplex" : "Half Duplex");
+ }
+
+ if (!netif_carrier_ok(netdev)) { /* Link down -> Up */
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ }
+ return 0;
+ }
+
+ /* change original link status */
+ if (netif_carrier_ok(netdev)) {
+ u32 value;
+ /* disable rx */
+ value = ATL2_READ_REG(hw, REG_MAC_CTRL);
+ value &= ~MAC_CTRL_RX_EN;
+ ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
+
+ adapter->link_speed = SPEED_0;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+
+ /* auto-neg, insert timer to re-config phy
+ * (if interval smaller than 5 seconds, something strange) */
+ if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
+ if (!test_and_set_bit(0, &adapter->cfg_phy))
+ mod_timer(&adapter->phy_config_timer,
+ round_jiffies(jiffies + 5 * HZ));
+ }
+
+ return 0;
+}
+
+/*
+ * atl2_link_chg_task - deal with link change event Out of interrupt context
+ * @netdev: network interface device structure
+ */
+static void atl2_link_chg_task(struct work_struct *work)
+{
+ struct atl2_adapter *adapter;
+ unsigned long flags;
+
+ adapter = container_of(work, struct atl2_adapter, link_chg_task);
+
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+ atl2_check_link(adapter);
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+}
+
+static void atl2_setup_pcicmd(struct pci_dev *pdev)
+{
+ u16 cmd;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+
+ if (cmd & PCI_COMMAND_INTX_DISABLE)
+ cmd &= ~PCI_COMMAND_INTX_DISABLE;
+ if (cmd & PCI_COMMAND_IO)
+ cmd &= ~PCI_COMMAND_IO;
+ if (0 == (cmd & PCI_COMMAND_MEMORY))
+ cmd |= PCI_COMMAND_MEMORY;
+ if (0 == (cmd & PCI_COMMAND_MASTER))
+ cmd |= PCI_COMMAND_MASTER;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+
+ /*
+ * some motherboards BIOS(PXE/EFI) driver may set PME
+ * while they transfer control to OS (Windows/Linux)
+ * so we should clear this bit before NIC work normally
+ */
+ pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void atl2_poll_controller(struct net_device *netdev)
+{
+ disable_irq(netdev->irq);
+ atl2_intr(netdev->irq, netdev);
+ enable_irq(netdev->irq);
+}
+#endif
+
+
+static const struct net_device_ops atl2_netdev_ops = {
+ .ndo_open = atl2_open,
+ .ndo_stop = atl2_close,
+ .ndo_start_xmit = atl2_xmit_frame,
+ .ndo_set_rx_mode = atl2_set_multi,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = atl2_set_mac,
+ .ndo_change_mtu = atl2_change_mtu,
+ .ndo_fix_features = atl2_fix_features,
+ .ndo_set_features = atl2_set_features,
+ .ndo_do_ioctl = atl2_ioctl,
+ .ndo_tx_timeout = atl2_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = atl2_poll_controller,
+#endif
+};
+
+/*
+ * atl2_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in atl2_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * atl2_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ */
+static int __devinit atl2_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct atl2_adapter *adapter;
+ static int cards_found;
+ unsigned long mmio_start;
+ int mmio_len;
+ int err;
+
+ cards_found = 0;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ /*
+ * atl2 is a shared-high-32-bit device, so we're stuck with 32-bit DMA
+ * until the kernel has the proper infrastructure to support 64-bit DMA
+ * on these devices.
+ */
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n");
+ goto err_dma;
+ }
+
+ /* Mark all PCI regions associated with PCI device
+ * pdev as being reserved by owner atl2_driver_name */
+ err = pci_request_regions(pdev, atl2_driver_name);
+ if (err)
+ goto err_pci_reg;
+
+ /* Enables bus-mastering on the device and calls
+ * pcibios_set_master to do the needed arch specific settings */
+ pci_set_master(pdev);
+
+ err = -ENOMEM;
+ netdev = alloc_etherdev(sizeof(struct atl2_adapter));
+ if (!netdev)
+ goto err_alloc_etherdev;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->hw.back = adapter;
+
+ mmio_start = pci_resource_start(pdev, 0x0);
+ mmio_len = pci_resource_len(pdev, 0x0);
+
+ adapter->hw.mem_rang = (u32)mmio_len;
+ adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
+ if (!adapter->hw.hw_addr) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ atl2_setup_pcicmd(pdev);
+
+ netdev->netdev_ops = &atl2_netdev_ops;
+ atl2_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+ strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+ netdev->mem_start = mmio_start;
+ netdev->mem_end = mmio_start + mmio_len;
+ adapter->bd_number = cards_found;
+ adapter->pci_using_64 = false;
+
+ /* setup the private structure */
+ err = atl2_sw_init(adapter);
+ if (err)
+ goto err_sw_init;
+
+ err = -EIO;
+
+ netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_RX;
+ netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
+
+ /* Init PHY as early as possible due to power saving issue */
+ atl2_phy_init(&adapter->hw);
+
+ /* reset the controller to
+ * put the device in a known good starting state */
+
+ if (atl2_reset_hw(&adapter->hw)) {
+ err = -EIO;
+ goto err_reset;
+ }
+
+ /* copy the MAC address out of the EEPROM */
+ atl2_read_mac_addr(&adapter->hw);
+ memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+/* FIXME: do we still need this? */
+#ifdef ETHTOOL_GPERMADDR
+ memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->perm_addr)) {
+#else
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+#endif
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ atl2_check_options(adapter);
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->watchdog_timer.function = atl2_watchdog;
+ adapter->watchdog_timer.data = (unsigned long) adapter;
+
+ init_timer(&adapter->phy_config_timer);
+ adapter->phy_config_timer.function = atl2_phy_config;
+ adapter->phy_config_timer.data = (unsigned long) adapter;
+
+ INIT_WORK(&adapter->reset_task, atl2_reset_task);
+ INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task);
+
+ strcpy(netdev->name, "eth%d"); /* ?? */
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ /* assume we have no link for now */
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ cards_found++;
+
+ return 0;
+
+err_reset:
+err_register:
+err_sw_init:
+err_eeprom:
+ iounmap(adapter->hw.hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/*
+ * atl2_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * atl2_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ */
+/* FIXME: write the original MAC address back in case it was changed from a
+ * BIOS-set value, as in atl1 -- CHS */
+static void __devexit atl2_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+
+ /* flush_scheduled work may reschedule our watchdog task, so
+ * explicitly disable watchdog tasks from being rescheduled */
+ set_bit(__ATL2_DOWN, &adapter->flags);
+
+ del_timer_sync(&adapter->watchdog_timer);
+ del_timer_sync(&adapter->phy_config_timer);
+ cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->link_chg_task);
+
+ unregister_netdev(netdev);
+
+ atl2_force_ps(&adapter->hw);
+
+ iounmap(adapter->hw.hw_addr);
+ pci_release_regions(pdev);
+
+ free_netdev(netdev);
+
+ pci_disable_device(pdev);
+}
+
+static int atl2_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+ struct atl2_hw *hw = &adapter->hw;
+ u16 speed, duplex;
+ u32 ctrl = 0;
+ u32 wufc = adapter->wol;
+
+#ifdef CONFIG_PM
+ int retval = 0;
+#endif
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags));
+ atl2_down(adapter);
+ }
+
+#ifdef CONFIG_PM
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+#endif
+
+ atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl);
+ atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl);
+ if (ctrl & BMSR_LSTATUS)
+ wufc &= ~ATLX_WUFC_LNKC;
+
+ if (0 != (ctrl & BMSR_LSTATUS) && 0 != wufc) {
+ u32 ret_val;
+ /* get current link speed & duplex */
+ ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ printk(KERN_DEBUG
+ "%s: get speed&duplex error while suspend\n",
+ atl2_driver_name);
+ goto wol_dis;
+ }
+
+ ctrl = 0;
+
+ /* turn on magic packet wol */
+ if (wufc & ATLX_WUFC_MAG)
+ ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN);
+
+ /* ignore Link Chg event when Link is up */
+ ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl);
+
+ /* Config MAC CTRL Register */
+ ctrl = MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY;
+ if (FULL_DUPLEX == adapter->link_duplex)
+ ctrl |= MAC_CTRL_DUPLX;
+ ctrl |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
+ ctrl |= (((u32)adapter->hw.preamble_len &
+ MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
+ ctrl |= (((u32)(adapter->hw.retry_buf &
+ MAC_CTRL_HALF_LEFT_BUF_MASK)) <<
+ MAC_CTRL_HALF_LEFT_BUF_SHIFT);
+ if (wufc & ATLX_WUFC_MAG) {
+ /* magic packet maybe Broadcast&multicast&Unicast */
+ ctrl |= MAC_CTRL_BC_EN;
+ }
+
+ ATL2_WRITE_REG(hw, REG_MAC_CTRL, ctrl);
+
+ /* pcie patch */
+ ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
+ ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
+ ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
+ ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
+ ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
+ ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
+
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
+ goto suspend_exit;
+ }
+
+ if (0 == (ctrl&BMSR_LSTATUS) && 0 != (wufc&ATLX_WUFC_LNKC)) {
+ /* link is down, so only LINK CHG WOL event enable */
+ ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
+ ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl);
+ ATL2_WRITE_REG(hw, REG_MAC_CTRL, 0);
+
+ /* pcie patch */
+ ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
+ ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
+ ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
+ ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
+ ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
+ ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
+
+ hw->phy_configured = false; /* re-init PHY when resume */
+
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
+
+ goto suspend_exit;
+ }
+
+wol_dis:
+ /* WOL disabled */
+ ATL2_WRITE_REG(hw, REG_WOL_CTRL, 0);
+
+ /* pcie patch */
+ ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
+ ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
+ ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
+ ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
+ ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
+ ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
+
+ atl2_force_ps(hw);
+ hw->phy_configured = false; /* re-init PHY when resume */
+
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+
+suspend_exit:
+ if (netif_running(netdev))
+ atl2_free_irq(adapter);
+
+ pci_disable_device(pdev);
+
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int atl2_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR
+ "atl2: Cannot enable PCI device from suspend\n");
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ ATL2_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
+
+ if (netif_running(netdev)) {
+ err = atl2_request_irq(adapter);
+ if (err)
+ return err;
+ }
+
+ atl2_reset_hw(&adapter->hw);
+
+ if (netif_running(netdev))
+ atl2_up(adapter);
+
+ netif_device_attach(netdev);
+
+ return 0;
+}
+#endif
+
+static void atl2_shutdown(struct pci_dev *pdev)
+{
+ atl2_suspend(pdev, PMSG_SUSPEND);
+}
+
+static struct pci_driver atl2_driver = {
+ .name = atl2_driver_name,
+ .id_table = atl2_pci_tbl,
+ .probe = atl2_probe,
+ .remove = __devexit_p(atl2_remove),
+ /* Power Management Hooks */
+ .suspend = atl2_suspend,
+#ifdef CONFIG_PM
+ .resume = atl2_resume,
+#endif
+ .shutdown = atl2_shutdown,
+};
+
+/*
+ * atl2_init_module - Driver Registration Routine
+ *
+ * atl2_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ */
+static int __init atl2_init_module(void)
+{
+ printk(KERN_INFO "%s - version %s\n", atl2_driver_string,
+ atl2_driver_version);
+ printk(KERN_INFO "%s\n", atl2_copyright);
+ return pci_register_driver(&atl2_driver);
+}
+module_init(atl2_init_module);
+
+/*
+ * atl2_exit_module - Driver Exit Cleanup Routine
+ *
+ * atl2_exit_module is called just before the driver is removed
+ * from memory.
+ */
+static void __exit atl2_exit_module(void)
+{
+ pci_unregister_driver(&atl2_driver);
+}
+module_exit(atl2_exit_module);
+
+static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value)
+{
+ struct atl2_adapter *adapter = hw->back;
+ pci_read_config_word(adapter->pdev, reg, value);
+}
+
+static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value)
+{
+ struct atl2_adapter *adapter = hw->back;
+ pci_write_config_word(adapter->pdev, reg, *value);
+}
+
+static int atl2_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+ struct atl2_hw *hw = &adapter->hw;
+
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
+ ecmd->advertising = ADVERTISED_TP;
+
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ ecmd->advertising |= hw->autoneg_advertised;
+
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = 0;
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ if (adapter->link_speed != SPEED_0) {
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
+ if (adapter->link_duplex == FULL_DUPLEX)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ } else {
+ ethtool_cmd_speed_set(ecmd, -1);
+ ecmd->duplex = -1;
+ }
+
+ ecmd->autoneg = AUTONEG_ENABLE;
+ return 0;
+}
+
+static int atl2_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+ struct atl2_hw *hw = &adapter->hw;
+
+ while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags))
+ msleep(1);
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+#define MY_ADV_MASK (ADVERTISE_10_HALF | \
+ ADVERTISE_10_FULL | \
+ ADVERTISE_100_HALF| \
+ ADVERTISE_100_FULL)
+
+ if ((ecmd->advertising & MY_ADV_MASK) == MY_ADV_MASK) {
+ hw->MediaType = MEDIA_TYPE_AUTO_SENSOR;
+ hw->autoneg_advertised = MY_ADV_MASK;
+ } else if ((ecmd->advertising & MY_ADV_MASK) ==
+ ADVERTISE_100_FULL) {
+ hw->MediaType = MEDIA_TYPE_100M_FULL;
+ hw->autoneg_advertised = ADVERTISE_100_FULL;
+ } else if ((ecmd->advertising & MY_ADV_MASK) ==
+ ADVERTISE_100_HALF) {
+ hw->MediaType = MEDIA_TYPE_100M_HALF;
+ hw->autoneg_advertised = ADVERTISE_100_HALF;
+ } else if ((ecmd->advertising & MY_ADV_MASK) ==
+ ADVERTISE_10_FULL) {
+ hw->MediaType = MEDIA_TYPE_10M_FULL;
+ hw->autoneg_advertised = ADVERTISE_10_FULL;
+ } else if ((ecmd->advertising & MY_ADV_MASK) ==
+ ADVERTISE_10_HALF) {
+ hw->MediaType = MEDIA_TYPE_10M_HALF;
+ hw->autoneg_advertised = ADVERTISE_10_HALF;
+ } else {
+ clear_bit(__ATL2_RESETTING, &adapter->flags);
+ return -EINVAL;
+ }
+ ecmd->advertising = hw->autoneg_advertised |
+ ADVERTISED_TP | ADVERTISED_Autoneg;
+ } else {
+ clear_bit(__ATL2_RESETTING, &adapter->flags);
+ return -EINVAL;
+ }
+
+ /* reset the link */
+ if (netif_running(adapter->netdev)) {
+ atl2_down(adapter);
+ atl2_up(adapter);
+ } else
+ atl2_reset_hw(&adapter->hw);
+
+ clear_bit(__ATL2_RESETTING, &adapter->flags);
+ return 0;
+}
+
+static u32 atl2_get_msglevel(struct net_device *netdev)
+{
+ return 0;
+}
+
+/*
+ * It's sane for this to be empty, but we might want to take advantage of this.
+ */
+static void atl2_set_msglevel(struct net_device *netdev, u32 data)
+{
+}
+
+static int atl2_get_regs_len(struct net_device *netdev)
+{
+#define ATL2_REGS_LEN 42
+ return sizeof(u32) * ATL2_REGS_LEN;
+}
+
+static void atl2_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+ struct atl2_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u16 phy_data;
+
+ memset(p, 0, sizeof(u32) * ATL2_REGS_LEN);
+
+ regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+
+ regs_buff[0] = ATL2_READ_REG(hw, REG_VPD_CAP);
+ regs_buff[1] = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
+ regs_buff[2] = ATL2_READ_REG(hw, REG_SPI_FLASH_CONFIG);
+ regs_buff[3] = ATL2_READ_REG(hw, REG_TWSI_CTRL);
+ regs_buff[4] = ATL2_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL);
+ regs_buff[5] = ATL2_READ_REG(hw, REG_MASTER_CTRL);
+ regs_buff[6] = ATL2_READ_REG(hw, REG_MANUAL_TIMER_INIT);
+ regs_buff[7] = ATL2_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT);
+ regs_buff[8] = ATL2_READ_REG(hw, REG_PHY_ENABLE);
+ regs_buff[9] = ATL2_READ_REG(hw, REG_CMBDISDMA_TIMER);
+ regs_buff[10] = ATL2_READ_REG(hw, REG_IDLE_STATUS);
+ regs_buff[11] = ATL2_READ_REG(hw, REG_MDIO_CTRL);
+ regs_buff[12] = ATL2_READ_REG(hw, REG_SERDES_LOCK);
+ regs_buff[13] = ATL2_READ_REG(hw, REG_MAC_CTRL);
+ regs_buff[14] = ATL2_READ_REG(hw, REG_MAC_IPG_IFG);
+ regs_buff[15] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR);
+ regs_buff[16] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR+4);
+ regs_buff[17] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE);
+ regs_buff[18] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE+4);
+ regs_buff[19] = ATL2_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL);
+ regs_buff[20] = ATL2_READ_REG(hw, REG_MTU);
+ regs_buff[21] = ATL2_READ_REG(hw, REG_WOL_CTRL);
+ regs_buff[22] = ATL2_READ_REG(hw, REG_SRAM_TXRAM_END);
+ regs_buff[23] = ATL2_READ_REG(hw, REG_DESC_BASE_ADDR_HI);
+ regs_buff[24] = ATL2_READ_REG(hw, REG_TXD_BASE_ADDR_LO);
+ regs_buff[25] = ATL2_READ_REG(hw, REG_TXD_MEM_SIZE);
+ regs_buff[26] = ATL2_READ_REG(hw, REG_TXS_BASE_ADDR_LO);
+ regs_buff[27] = ATL2_READ_REG(hw, REG_TXS_MEM_SIZE);
+ regs_buff[28] = ATL2_READ_REG(hw, REG_RXD_BASE_ADDR_LO);
+ regs_buff[29] = ATL2_READ_REG(hw, REG_RXD_BUF_NUM);
+ regs_buff[30] = ATL2_READ_REG(hw, REG_DMAR);
+ regs_buff[31] = ATL2_READ_REG(hw, REG_TX_CUT_THRESH);
+ regs_buff[32] = ATL2_READ_REG(hw, REG_DMAW);
+ regs_buff[33] = ATL2_READ_REG(hw, REG_PAUSE_ON_TH);
+ regs_buff[34] = ATL2_READ_REG(hw, REG_PAUSE_OFF_TH);
+ regs_buff[35] = ATL2_READ_REG(hw, REG_MB_TXD_WR_IDX);
+ regs_buff[36] = ATL2_READ_REG(hw, REG_MB_RXD_RD_IDX);
+ regs_buff[38] = ATL2_READ_REG(hw, REG_ISR);
+ regs_buff[39] = ATL2_READ_REG(hw, REG_IMR);
+
+ atl2_read_phy_reg(hw, MII_BMCR, &phy_data);
+ regs_buff[40] = (u32)phy_data;
+ atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
+ regs_buff[41] = (u32)phy_data;
+}
+
+static int atl2_get_eeprom_len(struct net_device *netdev)
+{
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+
+ if (!atl2_check_eeprom_exist(&adapter->hw))
+ return 512;
+ else
+ return 0;
+}
+
+static int atl2_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+ struct atl2_hw *hw = &adapter->hw;
+ u32 *eeprom_buff;
+ int first_dword, last_dword;
+ int ret_val = 0;
+ int i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (atl2_check_eeprom_exist(hw))
+ return -EINVAL;
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ first_dword = eeprom->offset >> 2;
+ last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
+
+ eeprom_buff = kmalloc(sizeof(u32) * (last_dword - first_dword + 1),
+ GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ for (i = first_dword; i < last_dword; i++) {
+ if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword]))) {
+ ret_val = -EIO;
+ goto free;
+ }
+ }
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3),
+ eeprom->len);
+free:
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int atl2_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+ struct atl2_hw *hw = &adapter->hw;
+ u32 *eeprom_buff;
+ u32 *ptr;
+ int max_len, first_dword, last_dword, ret_val = 0;
+ int i;
+
+ if (eeprom->len == 0)
+ return -EOPNOTSUPP;
+
+ if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+ return -EFAULT;
+
+ max_len = 512;
+
+ first_dword = eeprom->offset >> 2;
+ last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = eeprom_buff;
+
+ if (eeprom->offset & 3) {
+ /* need read/modify/write of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ if (!atl2_read_eeprom(hw, first_dword*4, &(eeprom_buff[0]))) {
+ ret_val = -EIO;
+ goto out;
+ }
+ ptr++;
+ }
+ if (((eeprom->offset + eeprom->len) & 3)) {
+ /*
+ * need read/modify/write of last changed EEPROM word
+ * only the first byte of the word is being modified
+ */
+ if (!atl2_read_eeprom(hw, last_dword * 4,
+ &(eeprom_buff[last_dword - first_dword]))) {
+ ret_val = -EIO;
+ goto out;
+ }
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_dword - first_dword + 1; i++) {
+ if (!atl2_write_eeprom(hw, ((first_dword+i)*4), eeprom_buff[i])) {
+ ret_val = -EIO;
+ goto out;
+ }
+ }
+ out:
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
+static void atl2_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+
+ strncpy(drvinfo->driver, atl2_driver_name, 32);
+ strncpy(drvinfo->version, atl2_driver_version, 32);
+ strncpy(drvinfo->fw_version, "L2", 32);
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ drvinfo->n_stats = 0;
+ drvinfo->testinfo_len = 0;
+ drvinfo->regdump_len = atl2_get_regs_len(netdev);
+ drvinfo->eedump_len = atl2_get_eeprom_len(netdev);
+}
+
+static void atl2_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = 0;
+
+ if (adapter->wol & ATLX_WUFC_EX)
+ wol->wolopts |= WAKE_UCAST;
+ if (adapter->wol & ATLX_WUFC_MC)
+ wol->wolopts |= WAKE_MCAST;
+ if (adapter->wol & ATLX_WUFC_BC)
+ wol->wolopts |= WAKE_BCAST;
+ if (adapter->wol & ATLX_WUFC_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+ if (adapter->wol & ATLX_WUFC_LNKC)
+ wol->wolopts |= WAKE_PHY;
+}
+
+static int atl2_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+
+ if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
+ return -EOPNOTSUPP;
+
+ if (wol->wolopts & (WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
+ return -EOPNOTSUPP;
+
+ /* these settings will always override what we currently have */
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wol |= ATLX_WUFC_MAG;
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wol |= ATLX_WUFC_LNKC;
+
+ return 0;
+}
+
+static int atl2_nway_reset(struct net_device *netdev)
+{
+ struct atl2_adapter *adapter = netdev_priv(netdev);
+ if (netif_running(netdev))
+ atl2_reinit_locked(adapter);
+ return 0;
+}
+
+static const struct ethtool_ops atl2_ethtool_ops = {
+ .get_settings = atl2_get_settings,
+ .set_settings = atl2_set_settings,
+ .get_drvinfo = atl2_get_drvinfo,
+ .get_regs_len = atl2_get_regs_len,
+ .get_regs = atl2_get_regs,
+ .get_wol = atl2_get_wol,
+ .set_wol = atl2_set_wol,
+ .get_msglevel = atl2_get_msglevel,
+ .set_msglevel = atl2_set_msglevel,
+ .nway_reset = atl2_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = atl2_get_eeprom_len,
+ .get_eeprom = atl2_get_eeprom,
+ .set_eeprom = atl2_set_eeprom,
+};
+
+static void atl2_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops);
+}
+
+#define LBYTESWAP(a) ((((a) & 0x00ff00ff) << 8) | \
+ (((a) & 0xff00ff00) >> 8))
+#define LONGSWAP(a) ((LBYTESWAP(a) << 16) | (LBYTESWAP(a) >> 16))
+#define SHORTSWAP(a) (((a) << 8) | ((a) >> 8))
+
+/*
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * return : 0 or idle status (if error)
+ */
+static s32 atl2_reset_hw(struct atl2_hw *hw)
+{
+ u32 icr;
+ u16 pci_cfg_cmd_word;
+ int i;
+
+ /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
+ atl2_read_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word);
+ if ((pci_cfg_cmd_word &
+ (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) !=
+ (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) {
+ pci_cfg_cmd_word |=
+ (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER);
+ atl2_write_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word);
+ }
+
+ /* Clear Interrupt mask to stop board from generating
+ * interrupts & Clear any pending interrupt events
+ */
+ /* FIXME */
+ /* ATL2_WRITE_REG(hw, REG_IMR, 0); */
+ /* ATL2_WRITE_REG(hw, REG_ISR, 0xffffffff); */
+
+ /* Issue Soft Reset to the MAC. This will reset the chip's
+ * transmit, receive, DMA. It will not effect
+ * the current PCI configuration. The global reset bit is self-
+ * clearing, and should clear within a microsecond.
+ */
+ ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST);
+ wmb();
+ msleep(1); /* delay about 1ms */
+
+ /* Wait at least 10ms for All module to be Idle */
+ for (i = 0; i < 10; i++) {
+ icr = ATL2_READ_REG(hw, REG_IDLE_STATUS);
+ if (!icr)
+ break;
+ msleep(1); /* delay 1 ms */
+ cpu_relax();
+ }
+
+ if (icr)
+ return icr;
+
+ return 0;
+}
+
+#define CUSTOM_SPI_CS_SETUP 2
+#define CUSTOM_SPI_CLK_HI 2
+#define CUSTOM_SPI_CLK_LO 2
+#define CUSTOM_SPI_CS_HOLD 2
+#define CUSTOM_SPI_CS_HI 3
+
+static struct atl2_spi_flash_dev flash_table[] =
+{
+/* MFR WRSR READ PROGRAM WREN WRDI RDSR RDID SECTOR_ERASE CHIP_ERASE */
+{"Atmel", 0x0, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62 },
+{"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60 },
+{"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7 },
+};
+
+static bool atl2_spi_read(struct atl2_hw *hw, u32 addr, u32 *buf)
+{
+ int i;
+ u32 value;
+
+ ATL2_WRITE_REG(hw, REG_SPI_DATA, 0);
+ ATL2_WRITE_REG(hw, REG_SPI_ADDR, addr);
+
+ value = SPI_FLASH_CTRL_WAIT_READY |
+ (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
+ SPI_FLASH_CTRL_CS_SETUP_SHIFT |
+ (CUSTOM_SPI_CLK_HI & SPI_FLASH_CTRL_CLK_HI_MASK) <<
+ SPI_FLASH_CTRL_CLK_HI_SHIFT |
+ (CUSTOM_SPI_CLK_LO & SPI_FLASH_CTRL_CLK_LO_MASK) <<
+ SPI_FLASH_CTRL_CLK_LO_SHIFT |
+ (CUSTOM_SPI_CS_HOLD & SPI_FLASH_CTRL_CS_HOLD_MASK) <<
+ SPI_FLASH_CTRL_CS_HOLD_SHIFT |
+ (CUSTOM_SPI_CS_HI & SPI_FLASH_CTRL_CS_HI_MASK) <<
+ SPI_FLASH_CTRL_CS_HI_SHIFT |
+ (0x1 & SPI_FLASH_CTRL_INS_MASK) << SPI_FLASH_CTRL_INS_SHIFT;
+
+ ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
+
+ value |= SPI_FLASH_CTRL_START;
+
+ ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
+
+ for (i = 0; i < 10; i++) {
+ msleep(1);
+ value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
+ if (!(value & SPI_FLASH_CTRL_START))
+ break;
+ }
+
+ if (value & SPI_FLASH_CTRL_START)
+ return false;
+
+ *buf = ATL2_READ_REG(hw, REG_SPI_DATA);
+
+ return true;
+}
+
+/*
+ * get_permanent_address
+ * return 0 if get valid mac address,
+ */
+static int get_permanent_address(struct atl2_hw *hw)
+{
+ u32 Addr[2];
+ u32 i, Control;
+ u16 Register;
+ u8 EthAddr[NODE_ADDRESS_SIZE];
+ bool KeyValid;
+
+ if (is_valid_ether_addr(hw->perm_mac_addr))
+ return 0;
+
+ Addr[0] = 0;
+ Addr[1] = 0;
+
+ if (!atl2_check_eeprom_exist(hw)) { /* eeprom exists */
+ Register = 0;
+ KeyValid = false;
+
+ /* Read out all EEPROM content */
+ i = 0;
+ while (1) {
+ if (atl2_read_eeprom(hw, i + 0x100, &Control)) {
+ if (KeyValid) {
+ if (Register == REG_MAC_STA_ADDR)
+ Addr[0] = Control;
+ else if (Register ==
+ (REG_MAC_STA_ADDR + 4))
+ Addr[1] = Control;
+ KeyValid = false;
+ } else if ((Control & 0xff) == 0x5A) {
+ KeyValid = true;
+ Register = (u16) (Control >> 16);
+ } else {
+ /* assume data end while encount an invalid KEYWORD */
+ break;
+ }
+ } else {
+ break; /* read error */
+ }
+ i += 4;
+ }
+
+ *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
+ *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]);
+
+ if (is_valid_ether_addr(EthAddr)) {
+ memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE);
+ return 0;
+ }
+ return 1;
+ }
+
+ /* see if SPI flash exists? */
+ Addr[0] = 0;
+ Addr[1] = 0;
+ Register = 0;
+ KeyValid = false;
+ i = 0;
+ while (1) {
+ if (atl2_spi_read(hw, i + 0x1f000, &Control)) {
+ if (KeyValid) {
+ if (Register == REG_MAC_STA_ADDR)
+ Addr[0] = Control;
+ else if (Register == (REG_MAC_STA_ADDR + 4))
+ Addr[1] = Control;
+ KeyValid = false;
+ } else if ((Control & 0xff) == 0x5A) {
+ KeyValid = true;
+ Register = (u16) (Control >> 16);
+ } else {
+ break; /* data end */
+ }
+ } else {
+ break; /* read error */
+ }
+ i += 4;
+ }
+
+ *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
+ *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *)&Addr[1]);
+ if (is_valid_ether_addr(EthAddr)) {
+ memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE);
+ return 0;
+ }
+ /* maybe MAC-address is from BIOS */
+ Addr[0] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR);
+ Addr[1] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR + 4);
+ *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
+ *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]);
+
+ if (is_valid_ether_addr(EthAddr)) {
+ memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE);
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Reads the adapter's MAC address from the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ */
+static s32 atl2_read_mac_addr(struct atl2_hw *hw)
+{
+ u16 i;
+
+ if (get_permanent_address(hw)) {
+ /* for test */
+ /* FIXME: shouldn't we use random_ether_addr() here? */
+ hw->perm_mac_addr[0] = 0x00;
+ hw->perm_mac_addr[1] = 0x13;
+ hw->perm_mac_addr[2] = 0x74;
+ hw->perm_mac_addr[3] = 0x00;
+ hw->perm_mac_addr[4] = 0x5c;
+ hw->perm_mac_addr[5] = 0x38;
+ }
+
+ for (i = 0; i < NODE_ADDRESS_SIZE; i++)
+ hw->mac_addr[i] = hw->perm_mac_addr[i];
+
+ return 0;
+}
+
+/*
+ * Hashes an address to determine its location in the multicast table
+ *
+ * hw - Struct containing variables accessed by shared code
+ * mc_addr - the multicast address to hash
+ *
+ * atl2_hash_mc_addr
+ * purpose
+ * set hash value for a multicast address
+ * hash calcu processing :
+ * 1. calcu 32bit CRC for multicast address
+ * 2. reverse crc with MSB to LSB
+ */
+static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr)
+{
+ u32 crc32, value;
+ int i;
+
+ value = 0;
+ crc32 = ether_crc_le(6, mc_addr);
+
+ for (i = 0; i < 32; i++)
+ value |= (((crc32 >> i) & 1) << (31 - i));
+
+ return value;
+}
+
+/*
+ * Sets the bit in the multicast table corresponding to the hash value.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * hash_value - Multicast address hash value
+ */
+static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value)
+{
+ u32 hash_bit, hash_reg;
+ u32 mta;
+
+ /* The HASH Table is a register array of 2 32-bit registers.
+ * It is treated like an array of 64 bits. We want to set
+ * bit BitArray[hash_value]. So we figure out what register
+ * the bit is in, read it, OR in the new bit, then write
+ * back the new value. The register is determined by the
+ * upper 7 bits of the hash value and the bit within that
+ * register are determined by the lower 5 bits of the value.
+ */
+ hash_reg = (hash_value >> 31) & 0x1;
+ hash_bit = (hash_value >> 26) & 0x1F;
+
+ mta = ATL2_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg);
+
+ mta |= (1 << hash_bit);
+
+ ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta);
+}
+
+/*
+ * atl2_init_pcie - init PCIE module
+ */
+static void atl2_init_pcie(struct atl2_hw *hw)
+{
+ u32 value;
+ value = LTSSM_TEST_MODE_DEF;
+ ATL2_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value);
+
+ value = PCIE_DLL_TX_CTRL1_DEF;
+ ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, value);
+}
+
+static void atl2_init_flash_opcode(struct atl2_hw *hw)
+{
+ if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
+ hw->flash_vendor = 0; /* ATMEL */
+
+ /* Init OP table */
+ ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_PROGRAM,
+ flash_table[hw->flash_vendor].cmdPROGRAM);
+ ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_SC_ERASE,
+ flash_table[hw->flash_vendor].cmdSECTOR_ERASE);
+ ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_CHIP_ERASE,
+ flash_table[hw->flash_vendor].cmdCHIP_ERASE);
+ ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDID,
+ flash_table[hw->flash_vendor].cmdRDID);
+ ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WREN,
+ flash_table[hw->flash_vendor].cmdWREN);
+ ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDSR,
+ flash_table[hw->flash_vendor].cmdRDSR);
+ ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WRSR,
+ flash_table[hw->flash_vendor].cmdWRSR);
+ ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_READ,
+ flash_table[hw->flash_vendor].cmdREAD);
+}
+
+/********************************************************************
+* Performs basic configuration of the adapter.
+*
+* hw - Struct containing variables accessed by shared code
+* Assumes that the controller has previously been reset and is in a
+* post-reset uninitialized state. Initializes multicast table,
+* and Calls routines to setup link
+* Leaves the transmit and receive units disabled and uninitialized.
+********************************************************************/
+static s32 atl2_init_hw(struct atl2_hw *hw)
+{
+ u32 ret_val = 0;
+
+ atl2_init_pcie(hw);
+
+ /* Zero out the Multicast HASH table */
+ /* clear the old settings from the multicast hash table */
+ ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
+ ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
+
+ atl2_init_flash_opcode(hw);
+
+ ret_val = atl2_phy_init(hw);
+
+ return ret_val;
+}
+
+/*
+ * Detects the current speed and duplex settings of the hardware.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * speed - Speed of the connection
+ * duplex - Duplex setting of the connection
+ */
+static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ /* Read PHY Specific Status Register (17) */
+ ret_val = atl2_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
+ return ATLX_ERR_PHY_RES;
+
+ switch (phy_data & MII_ATLX_PSSR_SPEED) {
+ case MII_ATLX_PSSR_100MBS:
+ *speed = SPEED_100;
+ break;
+ case MII_ATLX_PSSR_10MBS:
+ *speed = SPEED_10;
+ break;
+ default:
+ return ATLX_ERR_PHY_SPEED;
+ break;
+ }
+
+ if (phy_data & MII_ATLX_PSSR_DPLX)
+ *duplex = FULL_DUPLEX;
+ else
+ *duplex = HALF_DUPLEX;
+
+ return 0;
+}
+
+/*
+ * Reads the value from a PHY register
+ * hw - Struct containing variables accessed by shared code
+ * reg_addr - address of the PHY register to read
+ */
+static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data)
+{
+ u32 val;
+ int i;
+
+ val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
+ MDIO_START |
+ MDIO_SUP_PREAMBLE |
+ MDIO_RW |
+ MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
+ ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val);
+
+ wmb();
+
+ for (i = 0; i < MDIO_WAIT_TIMES; i++) {
+ udelay(2);
+ val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ break;
+ wmb();
+ }
+ if (!(val & (MDIO_START | MDIO_BUSY))) {
+ *phy_data = (u16)val;
+ return 0;
+ }
+
+ return ATLX_ERR_PHY;
+}
+
+/*
+ * Writes a value to a PHY register
+ * hw - Struct containing variables accessed by shared code
+ * reg_addr - address of the PHY register to write
+ * data - data to write to the PHY
+ */
+static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data)
+{
+ int i;
+ u32 val;
+
+ val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
+ (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
+ MDIO_SUP_PREAMBLE |
+ MDIO_START |
+ MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
+ ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val);
+
+ wmb();
+
+ for (i = 0; i < MDIO_WAIT_TIMES; i++) {
+ udelay(2);
+ val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ break;
+
+ wmb();
+ }
+
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ return 0;
+
+ return ATLX_ERR_PHY;
+}
+
+/*
+ * Configures PHY autoneg and flow control advertisement settings
+ *
+ * hw - Struct containing variables accessed by shared code
+ */
+static s32 atl2_phy_setup_autoneg_adv(struct atl2_hw *hw)
+{
+ s32 ret_val;
+ s16 mii_autoneg_adv_reg;
+
+ /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+ mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
+
+ /* Need to parse autoneg_advertised and set up
+ * the appropriate PHY registers. First we will parse for
+ * autoneg_advertised software override. Since we can advertise
+ * a plethora of combinations, we need to check each bit
+ * individually.
+ */
+
+ /* First we clear all the 10/100 mb speed bits in the Auto-Neg
+ * Advertisement Register (Address 4) and the 1000 mb speed bits in
+ * the 1000Base-T Control Register (Address 9). */
+ mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
+
+ /* Need to parse MediaType and setup the
+ * appropriate PHY registers. */
+ switch (hw->MediaType) {
+ case MEDIA_TYPE_AUTO_SENSOR:
+ mii_autoneg_adv_reg |=
+ (MII_AR_10T_HD_CAPS |
+ MII_AR_10T_FD_CAPS |
+ MII_AR_100TX_HD_CAPS|
+ MII_AR_100TX_FD_CAPS);
+ hw->autoneg_advertised =
+ ADVERTISE_10_HALF |
+ ADVERTISE_10_FULL |
+ ADVERTISE_100_HALF|
+ ADVERTISE_100_FULL;
+ break;
+ case MEDIA_TYPE_100M_FULL:
+ mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
+ hw->autoneg_advertised = ADVERTISE_100_FULL;
+ break;
+ case MEDIA_TYPE_100M_HALF:
+ mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
+ hw->autoneg_advertised = ADVERTISE_100_HALF;
+ break;
+ case MEDIA_TYPE_10M_FULL:
+ mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
+ hw->autoneg_advertised = ADVERTISE_10_FULL;
+ break;
+ default:
+ mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
+ hw->autoneg_advertised = ADVERTISE_10_HALF;
+ break;
+ }
+
+ /* flow control fixed to enable all */
+ mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
+
+ hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
+
+ ret_val = atl2_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
+
+ if (ret_val)
+ return ret_val;
+
+ return 0;
+}
+
+/*
+ * Resets the PHY and make all config validate
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
+ */
+static s32 atl2_phy_commit(struct atl2_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
+ ret_val = atl2_write_phy_reg(hw, MII_BMCR, phy_data);
+ if (ret_val) {
+ u32 val;
+ int i;
+ /* pcie serdes link may be down ! */
+ for (i = 0; i < 25; i++) {
+ msleep(1);
+ val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ break;
+ }
+
+ if (0 != (val & (MDIO_START | MDIO_BUSY))) {
+ printk(KERN_ERR "atl2: PCIe link down for at least 25ms !\n");
+ return ret_val;
+ }
+ }
+ return 0;
+}
+
+static s32 atl2_phy_init(struct atl2_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_val;
+
+ if (hw->phy_configured)
+ return 0;
+
+ /* Enable PHY */
+ ATL2_WRITE_REGW(hw, REG_PHY_ENABLE, 1);
+ ATL2_WRITE_FLUSH(hw);
+ msleep(1);
+
+ /* check if the PHY is in powersaving mode */
+ atl2_write_phy_reg(hw, MII_DBG_ADDR, 0);
+ atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val);
+
+ /* 024E / 124E 0r 0274 / 1274 ? */
+ if (phy_val & 0x1000) {
+ phy_val &= ~0x1000;
+ atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val);
+ }
+
+ msleep(1);
+
+ /*Enable PHY LinkChange Interrupt */
+ ret_val = atl2_write_phy_reg(hw, 18, 0xC00);
+ if (ret_val)
+ return ret_val;
+
+ /* setup AutoNeg parameters */
+ ret_val = atl2_phy_setup_autoneg_adv(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* SW.Reset & En-Auto-Neg to restart Auto-Neg */
+ ret_val = atl2_phy_commit(hw);
+ if (ret_val)
+ return ret_val;
+
+ hw->phy_configured = true;
+
+ return ret_val;
+}
+
+static void atl2_set_mac_addr(struct atl2_hw *hw)
+{
+ u32 value;
+ /* 00-0B-6A-F6-00-DC
+ * 0: 6AF600DC 1: 000B
+ * low dword */
+ value = (((u32)hw->mac_addr[2]) << 24) |
+ (((u32)hw->mac_addr[3]) << 16) |
+ (((u32)hw->mac_addr[4]) << 8) |
+ (((u32)hw->mac_addr[5]));
+ ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value);
+ /* hight dword */
+ value = (((u32)hw->mac_addr[0]) << 8) |
+ (((u32)hw->mac_addr[1]));
+ ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value);
+}
+
+/*
+ * check_eeprom_exist
+ * return 0 if eeprom exist
+ */
+static int atl2_check_eeprom_exist(struct atl2_hw *hw)
+{
+ u32 value;
+
+ value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
+ if (value & SPI_FLASH_CTRL_EN_VPD) {
+ value &= ~SPI_FLASH_CTRL_EN_VPD;
+ ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
+ }
+ value = ATL2_READ_REGW(hw, REG_PCIE_CAP_LIST);
+ return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
+}
+
+/* FIXME: This doesn't look right. -- CHS */
+static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value)
+{
+ return true;
+}
+
+static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue)
+{
+ int i;
+ u32 Control;
+
+ if (Offset & 0x3)
+ return false; /* address do not align */
+
+ ATL2_WRITE_REG(hw, REG_VPD_DATA, 0);
+ Control = (Offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
+ ATL2_WRITE_REG(hw, REG_VPD_CAP, Control);
+
+ for (i = 0; i < 10; i++) {
+ msleep(2);
+ Control = ATL2_READ_REG(hw, REG_VPD_CAP);
+ if (Control & VPD_CAP_VPD_FLAG)
+ break;
+ }
+
+ if (Control & VPD_CAP_VPD_FLAG) {
+ *pValue = ATL2_READ_REG(hw, REG_VPD_DATA);
+ return true;
+ }
+ return false; /* timeout */
+}
+
+static void atl2_force_ps(struct atl2_hw *hw)
+{
+ u16 phy_val;
+
+ atl2_write_phy_reg(hw, MII_DBG_ADDR, 0);
+ atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val);
+ atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val | 0x1000);
+
+ atl2_write_phy_reg(hw, MII_DBG_ADDR, 2);
+ atl2_write_phy_reg(hw, MII_DBG_DATA, 0x3000);
+ atl2_write_phy_reg(hw, MII_DBG_ADDR, 3);
+ atl2_write_phy_reg(hw, MII_DBG_DATA, 0);
+}
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+#define ATL2_MAX_NIC 4
+
+#define OPTION_UNSET -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED 1
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+#define ATL2_PARAM_INIT {[0 ... ATL2_MAX_NIC] = OPTION_UNSET}
+#ifndef module_param_array
+/* Module Parameters are always initialized to -1, so that the driver
+ * can tell the difference between no user specified value or the
+ * user asking for the default value.
+ * The true default values are loaded in when atl2_check_options is called.
+ *
+ * This is a GCC extension to ANSI C.
+ * See the item "Labeled Elements in Initializers" in the section
+ * "Extensions to the C Language Family" of the GCC documentation.
+ */
+
+#define ATL2_PARAM(X, desc) \
+ static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
+ MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
+ MODULE_PARM_DESC(X, desc);
+#else
+#define ATL2_PARAM(X, desc) \
+ static int __devinitdata X[ATL2_MAX_NIC+1] = ATL2_PARAM_INIT; \
+ static unsigned int num_##X; \
+ module_param_array_named(X, X, int, &num_##X, 0); \
+ MODULE_PARM_DESC(X, desc);
+#endif
+
+/*
+ * Transmit Memory Size
+ * Valid Range: 64-2048
+ * Default Value: 128
+ */
+#define ATL2_MIN_TX_MEMSIZE 4 /* 4KB */
+#define ATL2_MAX_TX_MEMSIZE 64 /* 64KB */
+#define ATL2_DEFAULT_TX_MEMSIZE 8 /* 8KB */
+ATL2_PARAM(TxMemSize, "Bytes of Transmit Memory");
+
+/*
+ * Receive Memory Block Count
+ * Valid Range: 16-512
+ * Default Value: 128
+ */
+#define ATL2_MIN_RXD_COUNT 16
+#define ATL2_MAX_RXD_COUNT 512
+#define ATL2_DEFAULT_RXD_COUNT 64
+ATL2_PARAM(RxMemBlock, "Number of receive memory block");
+
+/*
+ * User Specified MediaType Override
+ *
+ * Valid Range: 0-5
+ * - 0 - auto-negotiate at all supported speeds
+ * - 1 - only link at 1000Mbps Full Duplex
+ * - 2 - only link at 100Mbps Full Duplex
+ * - 3 - only link at 100Mbps Half Duplex
+ * - 4 - only link at 10Mbps Full Duplex
+ * - 5 - only link at 10Mbps Half Duplex
+ * Default Value: 0
+ */
+ATL2_PARAM(MediaType, "MediaType Select");
+
+/*
+ * Interrupt Moderate Timer in units of 2048 ns (~2 us)
+ * Valid Range: 10-65535
+ * Default Value: 45000(90ms)
+ */
+#define INT_MOD_DEFAULT_CNT 100 /* 200us */
+#define INT_MOD_MAX_CNT 65000
+#define INT_MOD_MIN_CNT 50
+ATL2_PARAM(IntModTimer, "Interrupt Moderator Timer");
+
+/*
+ * FlashVendor
+ * Valid Range: 0-2
+ * 0 - Atmel
+ * 1 - SST
+ * 2 - ST
+ */
+ATL2_PARAM(FlashVendor, "SPI Flash Vendor");
+
+#define AUTONEG_ADV_DEFAULT 0x2F
+#define AUTONEG_ADV_MASK 0x2F
+#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
+
+#define FLASH_VENDOR_DEFAULT 0
+#define FLASH_VENDOR_MIN 0
+#define FLASH_VENDOR_MAX 2
+
+struct atl2_option {
+ enum { enable_option, range_option, list_option } type;
+ char *name;
+ char *err;
+ int def;
+ union {
+ struct { /* range_option info */
+ int min;
+ int max;
+ } r;
+ struct { /* list_option info */
+ int nr;
+ struct atl2_opt_list { int i; char *str; } *p;
+ } l;
+ } arg;
+};
+
+static int __devinit atl2_validate_option(int *value, struct atl2_option *opt)
+{
+ int i;
+ struct atl2_opt_list *ent;
+
+ if (*value == OPTION_UNSET) {
+ *value = opt->def;
+ return 0;
+ }
+
+ switch (opt->type) {
+ case enable_option:
+ switch (*value) {
+ case OPTION_ENABLED:
+ printk(KERN_INFO "%s Enabled\n", opt->name);
+ return 0;
+ break;
+ case OPTION_DISABLED:
+ printk(KERN_INFO "%s Disabled\n", opt->name);
+ return 0;
+ break;
+ }
+ break;
+ case range_option:
+ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+ printk(KERN_INFO "%s set to %i\n", opt->name, *value);
+ return 0;
+ }
+ break;
+ case list_option:
+ for (i = 0; i < opt->arg.l.nr; i++) {
+ ent = &opt->arg.l.p[i];
+ if (*value == ent->i) {
+ if (ent->str[0] != '\0')
+ printk(KERN_INFO "%s\n", ent->str);
+ return 0;
+ }
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ printk(KERN_INFO "Invalid %s specified (%i) %s\n",
+ opt->name, *value, opt->err);
+ *value = opt->def;
+ return -1;
+}
+
+/*
+ * atl2_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input. If an invalid value is given, or if no user specified
+ * value exists, a default value is used. The final value is stored
+ * in a variable in the adapter structure.
+ */
+static void __devinit atl2_check_options(struct atl2_adapter *adapter)
+{
+ int val;
+ struct atl2_option opt;
+ int bd = adapter->bd_number;
+ if (bd >= ATL2_MAX_NIC) {
+ printk(KERN_NOTICE "Warning: no configuration for board #%i\n",
+ bd);
+ printk(KERN_NOTICE "Using defaults for all values\n");
+#ifndef module_param_array
+ bd = ATL2_MAX_NIC;
+#endif
+ }
+
+ /* Bytes of Transmit Memory */
+ opt.type = range_option;
+ opt.name = "Bytes of Transmit Memory";
+ opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_TX_MEMSIZE);
+ opt.def = ATL2_DEFAULT_TX_MEMSIZE;
+ opt.arg.r.min = ATL2_MIN_TX_MEMSIZE;
+ opt.arg.r.max = ATL2_MAX_TX_MEMSIZE;
+#ifdef module_param_array
+ if (num_TxMemSize > bd) {
+#endif
+ val = TxMemSize[bd];
+ atl2_validate_option(&val, &opt);
+ adapter->txd_ring_size = ((u32) val) * 1024;
+#ifdef module_param_array
+ } else
+ adapter->txd_ring_size = ((u32)opt.def) * 1024;
+#endif
+ /* txs ring size: */
+ adapter->txs_ring_size = adapter->txd_ring_size / 128;
+ if (adapter->txs_ring_size > 160)
+ adapter->txs_ring_size = 160;
+
+ /* Receive Memory Block Count */
+ opt.type = range_option;
+ opt.name = "Number of receive memory block";
+ opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_RXD_COUNT);
+ opt.def = ATL2_DEFAULT_RXD_COUNT;
+ opt.arg.r.min = ATL2_MIN_RXD_COUNT;
+ opt.arg.r.max = ATL2_MAX_RXD_COUNT;
+#ifdef module_param_array
+ if (num_RxMemBlock > bd) {
+#endif
+ val = RxMemBlock[bd];
+ atl2_validate_option(&val, &opt);
+ adapter->rxd_ring_size = (u32)val;
+ /* FIXME */
+ /* ((u16)val)&~1; */ /* even number */
+#ifdef module_param_array
+ } else
+ adapter->rxd_ring_size = (u32)opt.def;
+#endif
+ /* init RXD Flow control value */
+ adapter->hw.fc_rxd_hi = (adapter->rxd_ring_size / 8) * 7;
+ adapter->hw.fc_rxd_lo = (ATL2_MIN_RXD_COUNT / 8) >
+ (adapter->rxd_ring_size / 12) ? (ATL2_MIN_RXD_COUNT / 8) :
+ (adapter->rxd_ring_size / 12);
+
+ /* Interrupt Moderate Timer */
+ opt.type = range_option;
+ opt.name = "Interrupt Moderate Timer";
+ opt.err = "using default of " __MODULE_STRING(INT_MOD_DEFAULT_CNT);
+ opt.def = INT_MOD_DEFAULT_CNT;
+ opt.arg.r.min = INT_MOD_MIN_CNT;
+ opt.arg.r.max = INT_MOD_MAX_CNT;
+#ifdef module_param_array
+ if (num_IntModTimer > bd) {
+#endif
+ val = IntModTimer[bd];
+ atl2_validate_option(&val, &opt);
+ adapter->imt = (u16) val;
+#ifdef module_param_array
+ } else
+ adapter->imt = (u16)(opt.def);
+#endif
+ /* Flash Vendor */
+ opt.type = range_option;
+ opt.name = "SPI Flash Vendor";
+ opt.err = "using default of " __MODULE_STRING(FLASH_VENDOR_DEFAULT);
+ opt.def = FLASH_VENDOR_DEFAULT;
+ opt.arg.r.min = FLASH_VENDOR_MIN;
+ opt.arg.r.max = FLASH_VENDOR_MAX;
+#ifdef module_param_array
+ if (num_FlashVendor > bd) {
+#endif
+ val = FlashVendor[bd];
+ atl2_validate_option(&val, &opt);
+ adapter->hw.flash_vendor = (u8) val;
+#ifdef module_param_array
+ } else
+ adapter->hw.flash_vendor = (u8)(opt.def);
+#endif
+ /* MediaType */
+ opt.type = range_option;
+ opt.name = "Speed/Duplex Selection";
+ opt.err = "using default of " __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR);
+ opt.def = MEDIA_TYPE_AUTO_SENSOR;
+ opt.arg.r.min = MEDIA_TYPE_AUTO_SENSOR;
+ opt.arg.r.max = MEDIA_TYPE_10M_HALF;
+#ifdef module_param_array
+ if (num_MediaType > bd) {
+#endif
+ val = MediaType[bd];
+ atl2_validate_option(&val, &opt);
+ adapter->hw.MediaType = (u16) val;
+#ifdef module_param_array
+ } else
+ adapter->hw.MediaType = (u16)(opt.def);
+#endif
+}
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.h b/drivers/net/ethernet/atheros/atlx/atl2.h
new file mode 100644
index 000000000000..bf9016ebdd9b
--- /dev/null
+++ b/drivers/net/ethernet/atheros/atlx/atl2.h
@@ -0,0 +1,525 @@
+/* atl2.h -- atl2 driver definitions
+ *
+ * Copyright(c) 2007 Atheros Corporation. All rights reserved.
+ * Copyright(c) 2006 xiong huang <xiong.huang@atheros.com>
+ * Copyright(c) 2007 Chris Snook <csnook@redhat.com>
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _ATL2_H_
+#define _ATL2_H_
+
+#include <linux/atomic.h>
+#include <linux/netdevice.h>
+
+#ifndef _ATL2_HW_H_
+#define _ATL2_HW_H_
+
+#ifndef _ATL2_OSDEP_H_
+#define _ATL2_OSDEP_H_
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+
+#include "atlx.h"
+
+#ifdef ETHTOOL_OPS_COMPAT
+extern int ethtool_ioctl(struct ifreq *ifr);
+#endif
+
+#define PCI_COMMAND_REGISTER PCI_COMMAND
+#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
+#define ETH_ADDR_LEN ETH_ALEN
+
+#define ATL2_WRITE_REG(a, reg, value) (iowrite32((value), \
+ ((a)->hw_addr + (reg))))
+
+#define ATL2_WRITE_FLUSH(a) (ioread32((a)->hw_addr))
+
+#define ATL2_READ_REG(a, reg) (ioread32((a)->hw_addr + (reg)))
+
+#define ATL2_WRITE_REGB(a, reg, value) (iowrite8((value), \
+ ((a)->hw_addr + (reg))))
+
+#define ATL2_READ_REGB(a, reg) (ioread8((a)->hw_addr + (reg)))
+
+#define ATL2_WRITE_REGW(a, reg, value) (iowrite16((value), \
+ ((a)->hw_addr + (reg))))
+
+#define ATL2_READ_REGW(a, reg) (ioread16((a)->hw_addr + (reg)))
+
+#define ATL2_WRITE_REG_ARRAY(a, reg, offset, value) \
+ (iowrite32((value), (((a)->hw_addr + (reg)) + ((offset) << 2))))
+
+#define ATL2_READ_REG_ARRAY(a, reg, offset) \
+ (ioread32(((a)->hw_addr + (reg)) + ((offset) << 2)))
+
+#endif /* _ATL2_OSDEP_H_ */
+
+struct atl2_adapter;
+struct atl2_hw;
+
+/* function prototype */
+static s32 atl2_reset_hw(struct atl2_hw *hw);
+static s32 atl2_read_mac_addr(struct atl2_hw *hw);
+static s32 atl2_init_hw(struct atl2_hw *hw);
+static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed,
+ u16 *duplex);
+static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr);
+static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value);
+static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data);
+static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data);
+static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value);
+static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value);
+static void atl2_set_mac_addr(struct atl2_hw *hw);
+static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue);
+static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value);
+static s32 atl2_phy_init(struct atl2_hw *hw);
+static int atl2_check_eeprom_exist(struct atl2_hw *hw);
+static void atl2_force_ps(struct atl2_hw *hw);
+
+/* register definition */
+
+/* Block IDLE Status Register */
+#define IDLE_STATUS_RXMAC 1 /* 1: RXMAC is non-IDLE */
+#define IDLE_STATUS_TXMAC 2 /* 1: TXMAC is non-IDLE */
+#define IDLE_STATUS_DMAR 8 /* 1: DMAR is non-IDLE */
+#define IDLE_STATUS_DMAW 4 /* 1: DMAW is non-IDLE */
+
+/* MDIO Control Register */
+#define MDIO_WAIT_TIMES 10
+
+/* MAC Control Register */
+#define MAC_CTRL_DBG_TX_BKPRESURE 0x100000 /* 1: TX max backoff */
+#define MAC_CTRL_MACLP_CLK_PHY 0x8000000 /* 1: 25MHz from phy */
+#define MAC_CTRL_HALF_LEFT_BUF_SHIFT 28
+#define MAC_CTRL_HALF_LEFT_BUF_MASK 0xF /* MAC retry buf x32B */
+
+/* Internal SRAM Partition Register */
+#define REG_SRAM_TXRAM_END 0x1500 /* Internal tail address of TXRAM
+ * default: 2byte*1024 */
+#define REG_SRAM_RXRAM_END 0x1502 /* Internal tail address of RXRAM
+ * default: 2byte*1024 */
+
+/* Descriptor Control register */
+#define REG_TXD_BASE_ADDR_LO 0x1544 /* The base address of the Transmit
+ * Data Mem low 32-bit(dword align) */
+#define REG_TXD_MEM_SIZE 0x1548 /* Transmit Data Memory size(by
+ * double word , max 256KB) */
+#define REG_TXS_BASE_ADDR_LO 0x154C /* The base address of the Transmit
+ * Status Memory low 32-bit(dword word
+ * align) */
+#define REG_TXS_MEM_SIZE 0x1550 /* double word unit, max 4*2047
+ * bytes. */
+#define REG_RXD_BASE_ADDR_LO 0x1554 /* The base address of the Transmit
+ * Status Memory low 32-bit(unit 8
+ * bytes) */
+#define REG_RXD_BUF_NUM 0x1558 /* Receive Data & Status Memory buffer
+ * number (unit 1536bytes, max
+ * 1536*2047) */
+
+/* DMAR Control Register */
+#define REG_DMAR 0x1580
+#define DMAR_EN 0x1 /* 1: Enable DMAR */
+
+/* TX Cur-Through (early tx threshold) Control Register */
+#define REG_TX_CUT_THRESH 0x1590 /* TxMac begin transmit packet
+ * threshold(unit word) */
+
+/* DMAW Control Register */
+#define REG_DMAW 0x15A0
+#define DMAW_EN 0x1
+
+/* Flow control register */
+#define REG_PAUSE_ON_TH 0x15A8 /* RXD high watermark of overflow
+ * threshold configuration register */
+#define REG_PAUSE_OFF_TH 0x15AA /* RXD lower watermark of overflow
+ * threshold configuration register */
+
+/* Mailbox Register */
+#define REG_MB_TXD_WR_IDX 0x15f0 /* double word align */
+#define REG_MB_RXD_RD_IDX 0x15F4 /* RXD Read index (unit: 1536byets) */
+
+/* Interrupt Status Register */
+#define ISR_TIMER 1 /* Interrupt when Timer counts down to zero */
+#define ISR_MANUAL 2 /* Software manual interrupt, for debug. Set
+ * when SW_MAN_INT_EN is set in Table 51
+ * Selene Master Control Register
+ * (Offset 0x1400). */
+#define ISR_RXF_OV 4 /* RXF overflow interrupt */
+#define ISR_TXF_UR 8 /* TXF underrun interrupt */
+#define ISR_TXS_OV 0x10 /* Internal transmit status buffer full
+ * interrupt */
+#define ISR_RXS_OV 0x20 /* Internal receive status buffer full
+ * interrupt */
+#define ISR_LINK_CHG 0x40 /* Link Status Change Interrupt */
+#define ISR_HOST_TXD_UR 0x80
+#define ISR_HOST_RXD_OV 0x100 /* Host rx data memory full , one pulse */
+#define ISR_DMAR_TO_RST 0x200 /* DMAR op timeout interrupt. SW should
+ * do Reset */
+#define ISR_DMAW_TO_RST 0x400
+#define ISR_PHY 0x800 /* phy interrupt */
+#define ISR_TS_UPDATE 0x10000 /* interrupt after new tx pkt status written
+ * to host */
+#define ISR_RS_UPDATE 0x20000 /* interrupt ater new rx pkt status written
+ * to host. */
+#define ISR_TX_EARLY 0x40000 /* interrupt when txmac begin transmit one
+ * packet */
+
+#define ISR_TX_EVENT (ISR_TXF_UR | ISR_TXS_OV | ISR_HOST_TXD_UR |\
+ ISR_TS_UPDATE | ISR_TX_EARLY)
+#define ISR_RX_EVENT (ISR_RXF_OV | ISR_RXS_OV | ISR_HOST_RXD_OV |\
+ ISR_RS_UPDATE)
+
+#define IMR_NORMAL_MASK (\
+ /*ISR_LINK_CHG |*/\
+ ISR_MANUAL |\
+ ISR_DMAR_TO_RST |\
+ ISR_DMAW_TO_RST |\
+ ISR_PHY |\
+ ISR_PHY_LINKDOWN |\
+ ISR_TS_UPDATE |\
+ ISR_RS_UPDATE)
+
+/* Receive MAC Statistics Registers */
+#define REG_STS_RX_PAUSE 0x1700 /* Num pause packets received */
+#define REG_STS_RXD_OV 0x1704 /* Num frames dropped due to RX
+ * FIFO overflow */
+#define REG_STS_RXS_OV 0x1708 /* Num frames dropped due to RX
+ * Status Buffer Overflow */
+#define REG_STS_RX_FILTER 0x170C /* Num packets dropped due to
+ * address filtering */
+
+/* MII definitions */
+
+/* PHY Common Register */
+#define MII_SMARTSPEED 0x14
+#define MII_DBG_ADDR 0x1D
+#define MII_DBG_DATA 0x1E
+
+/* PCI Command Register Bit Definitions */
+#define PCI_REG_COMMAND 0x04
+#define CMD_IO_SPACE 0x0001
+#define CMD_MEMORY_SPACE 0x0002
+#define CMD_BUS_MASTER 0x0004
+
+#define MEDIA_TYPE_100M_FULL 1
+#define MEDIA_TYPE_100M_HALF 2
+#define MEDIA_TYPE_10M_FULL 3
+#define MEDIA_TYPE_10M_HALF 4
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x000F /* Everything */
+
+/* The size (in bytes) of a ethernet packet */
+#define ENET_HEADER_SIZE 14
+#define MAXIMUM_ETHERNET_FRAME_SIZE 1518 /* with FCS */
+#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* with FCS */
+#define ETHERNET_FCS_SIZE 4
+#define MAX_JUMBO_FRAME_SIZE 0x2000
+#define VLAN_SIZE 4
+
+struct tx_pkt_header {
+ unsigned pkt_size:11;
+ unsigned:4; /* reserved */
+ unsigned ins_vlan:1; /* txmac should insert vlan */
+ unsigned short vlan; /* vlan tag */
+};
+/* FIXME: replace above bitfields with MASK/SHIFT defines below */
+#define TX_PKT_HEADER_SIZE_MASK 0x7FF
+#define TX_PKT_HEADER_SIZE_SHIFT 0
+#define TX_PKT_HEADER_INS_VLAN_MASK 0x1
+#define TX_PKT_HEADER_INS_VLAN_SHIFT 15
+#define TX_PKT_HEADER_VLAN_TAG_MASK 0xFFFF
+#define TX_PKT_HEADER_VLAN_TAG_SHIFT 16
+
+struct tx_pkt_status {
+ unsigned pkt_size:11;
+ unsigned:5; /* reserved */
+ unsigned ok:1; /* current packet transmitted without error */
+ unsigned bcast:1; /* broadcast packet */
+ unsigned mcast:1; /* multicast packet */
+ unsigned pause:1; /* transmiited a pause frame */
+ unsigned ctrl:1;
+ unsigned defer:1; /* current packet is xmitted with defer */
+ unsigned exc_defer:1;
+ unsigned single_col:1;
+ unsigned multi_col:1;
+ unsigned late_col:1;
+ unsigned abort_col:1;
+ unsigned underun:1; /* current packet is aborted
+ * due to txram underrun */
+ unsigned:3; /* reserved */
+ unsigned update:1; /* always 1'b1 in tx_status_buf */
+};
+/* FIXME: replace above bitfields with MASK/SHIFT defines below */
+#define TX_PKT_STATUS_SIZE_MASK 0x7FF
+#define TX_PKT_STATUS_SIZE_SHIFT 0
+#define TX_PKT_STATUS_OK_MASK 0x1
+#define TX_PKT_STATUS_OK_SHIFT 16
+#define TX_PKT_STATUS_BCAST_MASK 0x1
+#define TX_PKT_STATUS_BCAST_SHIFT 17
+#define TX_PKT_STATUS_MCAST_MASK 0x1
+#define TX_PKT_STATUS_MCAST_SHIFT 18
+#define TX_PKT_STATUS_PAUSE_MASK 0x1
+#define TX_PKT_STATUS_PAUSE_SHIFT 19
+#define TX_PKT_STATUS_CTRL_MASK 0x1
+#define TX_PKT_STATUS_CTRL_SHIFT 20
+#define TX_PKT_STATUS_DEFER_MASK 0x1
+#define TX_PKT_STATUS_DEFER_SHIFT 21
+#define TX_PKT_STATUS_EXC_DEFER_MASK 0x1
+#define TX_PKT_STATUS_EXC_DEFER_SHIFT 22
+#define TX_PKT_STATUS_SINGLE_COL_MASK 0x1
+#define TX_PKT_STATUS_SINGLE_COL_SHIFT 23
+#define TX_PKT_STATUS_MULTI_COL_MASK 0x1
+#define TX_PKT_STATUS_MULTI_COL_SHIFT 24
+#define TX_PKT_STATUS_LATE_COL_MASK 0x1
+#define TX_PKT_STATUS_LATE_COL_SHIFT 25
+#define TX_PKT_STATUS_ABORT_COL_MASK 0x1
+#define TX_PKT_STATUS_ABORT_COL_SHIFT 26
+#define TX_PKT_STATUS_UNDERRUN_MASK 0x1
+#define TX_PKT_STATUS_UNDERRUN_SHIFT 27
+#define TX_PKT_STATUS_UPDATE_MASK 0x1
+#define TX_PKT_STATUS_UPDATE_SHIFT 31
+
+struct rx_pkt_status {
+ unsigned pkt_size:11; /* packet size, max 2047 bytes */
+ unsigned:5; /* reserved */
+ unsigned ok:1; /* current packet received ok without error */
+ unsigned bcast:1; /* current packet is broadcast */
+ unsigned mcast:1; /* current packet is multicast */
+ unsigned pause:1;
+ unsigned ctrl:1;
+ unsigned crc:1; /* received a packet with crc error */
+ unsigned code:1; /* received a packet with code error */
+ unsigned runt:1; /* received a packet less than 64 bytes
+ * with good crc */
+ unsigned frag:1; /* received a packet less than 64 bytes
+ * with bad crc */
+ unsigned trunc:1; /* current frame truncated due to rxram full */
+ unsigned align:1; /* this packet is alignment error */
+ unsigned vlan:1; /* this packet has vlan */
+ unsigned:3; /* reserved */
+ unsigned update:1;
+ unsigned short vtag; /* vlan tag */
+ unsigned:16;
+};
+/* FIXME: replace above bitfields with MASK/SHIFT defines below */
+#define RX_PKT_STATUS_SIZE_MASK 0x7FF
+#define RX_PKT_STATUS_SIZE_SHIFT 0
+#define RX_PKT_STATUS_OK_MASK 0x1
+#define RX_PKT_STATUS_OK_SHIFT 16
+#define RX_PKT_STATUS_BCAST_MASK 0x1
+#define RX_PKT_STATUS_BCAST_SHIFT 17
+#define RX_PKT_STATUS_MCAST_MASK 0x1
+#define RX_PKT_STATUS_MCAST_SHIFT 18
+#define RX_PKT_STATUS_PAUSE_MASK 0x1
+#define RX_PKT_STATUS_PAUSE_SHIFT 19
+#define RX_PKT_STATUS_CTRL_MASK 0x1
+#define RX_PKT_STATUS_CTRL_SHIFT 20
+#define RX_PKT_STATUS_CRC_MASK 0x1
+#define RX_PKT_STATUS_CRC_SHIFT 21
+#define RX_PKT_STATUS_CODE_MASK 0x1
+#define RX_PKT_STATUS_CODE_SHIFT 22
+#define RX_PKT_STATUS_RUNT_MASK 0x1
+#define RX_PKT_STATUS_RUNT_SHIFT 23
+#define RX_PKT_STATUS_FRAG_MASK 0x1
+#define RX_PKT_STATUS_FRAG_SHIFT 24
+#define RX_PKT_STATUS_TRUNK_MASK 0x1
+#define RX_PKT_STATUS_TRUNK_SHIFT 25
+#define RX_PKT_STATUS_ALIGN_MASK 0x1
+#define RX_PKT_STATUS_ALIGN_SHIFT 26
+#define RX_PKT_STATUS_VLAN_MASK 0x1
+#define RX_PKT_STATUS_VLAN_SHIFT 27
+#define RX_PKT_STATUS_UPDATE_MASK 0x1
+#define RX_PKT_STATUS_UPDATE_SHIFT 31
+#define RX_PKT_STATUS_VLAN_TAG_MASK 0xFFFF
+#define RX_PKT_STATUS_VLAN_TAG_SHIFT 32
+
+struct rx_desc {
+ struct rx_pkt_status status;
+ unsigned char packet[1536-sizeof(struct rx_pkt_status)];
+};
+
+enum atl2_speed_duplex {
+ atl2_10_half = 0,
+ atl2_10_full = 1,
+ atl2_100_half = 2,
+ atl2_100_full = 3
+};
+
+struct atl2_spi_flash_dev {
+ const char *manu_name; /* manufacturer id */
+ /* op-code */
+ u8 cmdWRSR;
+ u8 cmdREAD;
+ u8 cmdPROGRAM;
+ u8 cmdWREN;
+ u8 cmdWRDI;
+ u8 cmdRDSR;
+ u8 cmdRDID;
+ u8 cmdSECTOR_ERASE;
+ u8 cmdCHIP_ERASE;
+};
+
+/* Structure containing variables used by the shared code (atl2_hw.c) */
+struct atl2_hw {
+ u8 __iomem *hw_addr;
+ void *back;
+
+ u8 preamble_len;
+ u8 max_retry; /* Retransmission maximum, afterwards the
+ * packet will be discarded. */
+ u8 jam_ipg; /* IPG to start JAM for collision based flow
+ * control in half-duplex mode. In unit of
+ * 8-bit time. */
+ u8 ipgt; /* Desired back to back inter-packet gap. The
+ * default is 96-bit time. */
+ u8 min_ifg; /* Minimum number of IFG to enforce in between
+ * RX frames. Frame gap below such IFP is
+ * dropped. */
+ u8 ipgr1; /* 64bit Carrier-Sense window */
+ u8 ipgr2; /* 96-bit IPG window */
+ u8 retry_buf; /* When half-duplex mode, should hold some
+ * bytes for mac retry . (8*4bytes unit) */
+
+ u16 fc_rxd_hi;
+ u16 fc_rxd_lo;
+ u16 lcol; /* Collision Window */
+ u16 max_frame_size;
+
+ u16 MediaType;
+ u16 autoneg_advertised;
+ u16 pci_cmd_word;
+
+ u16 mii_autoneg_adv_reg;
+
+ u32 mem_rang;
+ u32 txcw;
+ u32 mc_filter_type;
+ u32 num_mc_addrs;
+ u32 collision_delta;
+ u32 tx_packet_delta;
+ u16 phy_spd_default;
+
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+
+ /* spi flash */
+ u8 flash_vendor;
+
+ u8 dma_fairness;
+ u8 mac_addr[NODE_ADDRESS_SIZE];
+ u8 perm_mac_addr[NODE_ADDRESS_SIZE];
+
+ /* FIXME */
+ /* bool phy_preamble_sup; */
+ bool phy_configured;
+};
+
+#endif /* _ATL2_HW_H_ */
+
+struct atl2_ring_header {
+ /* pointer to the descriptor ring memory */
+ void *desc;
+ /* physical address of the descriptor ring */
+ dma_addr_t dma;
+ /* length of descriptor ring in bytes */
+ unsigned int size;
+};
+
+/* board specific private data structure */
+struct atl2_adapter {
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ u32 wol;
+ u16 link_speed;
+ u16 link_duplex;
+
+ spinlock_t stats_lock;
+
+ struct work_struct reset_task;
+ struct work_struct link_chg_task;
+ struct timer_list watchdog_timer;
+ struct timer_list phy_config_timer;
+
+ unsigned long cfg_phy;
+ bool mac_disabled;
+
+ /* All Descriptor memory */
+ dma_addr_t ring_dma;
+ void *ring_vir_addr;
+ int ring_size;
+
+ struct tx_pkt_header *txd_ring;
+ dma_addr_t txd_dma;
+
+ struct tx_pkt_status *txs_ring;
+ dma_addr_t txs_dma;
+
+ struct rx_desc *rxd_ring;
+ dma_addr_t rxd_dma;
+
+ u32 txd_ring_size; /* bytes per unit */
+ u32 txs_ring_size; /* dwords per unit */
+ u32 rxd_ring_size; /* 1536 bytes per unit */
+
+ /* read /write ptr: */
+ /* host */
+ u32 txd_write_ptr;
+ u32 txs_next_clear;
+ u32 rxd_read_ptr;
+
+ /* nic */
+ atomic_t txd_read_ptr;
+ atomic_t txs_write_ptr;
+ u32 rxd_write_ptr;
+
+ /* Interrupt Moderator timer ( 2us resolution) */
+ u16 imt;
+ /* Interrupt Clear timer (2us resolution) */
+ u16 ict;
+
+ unsigned long flags;
+ /* structs defined in atl2_hw.h */
+ u32 bd_number; /* board number */
+ bool pci_using_64;
+ bool have_msi;
+ struct atl2_hw hw;
+
+ u32 usr_cmd;
+ /* FIXME */
+ /* u32 regs_buff[ATL2_REGS_LEN]; */
+ u32 pci_state[16];
+
+ u32 *config_space;
+};
+
+enum atl2_state_t {
+ __ATL2_TESTING,
+ __ATL2_RESETTING,
+ __ATL2_DOWN
+};
+
+#endif /* _ATL2_H_ */
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
new file mode 100644
index 000000000000..aabcf4b5745a
--- /dev/null
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -0,0 +1,269 @@
+/* atlx.c -- common functions for Attansic network drivers
+ *
+ * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
+ * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
+ * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
+ * Copyright(c) 2007 Atheros Corporation. All rights reserved.
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* Including this file like a header is a temporary hack, I promise. -- CHS */
+#ifndef ATLX_C
+#define ATLX_C
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "atlx.h"
+
+static s32 atlx_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
+static u32 atlx_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
+static void atlx_set_mac_addr(struct atl1_hw *hw);
+
+static struct atlx_spi_flash_dev flash_table[] = {
+/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SEC_ERS CHIP_ERS */
+ {"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62},
+ {"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60},
+ {"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7},
+};
+
+static int atlx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return atlx_mii_ioctl(netdev, ifr, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/*
+ * atlx_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atlx_set_mac(struct net_device *netdev, void *p)
+{
+ struct atlx_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
+
+ atlx_set_mac_addr(&adapter->hw);
+ return 0;
+}
+
+static void atlx_check_for_link(struct atlx_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u16 phy_data = 0;
+
+ spin_lock(&adapter->lock);
+ adapter->phy_timer_pending = false;
+ atlx_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+ atlx_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+ spin_unlock(&adapter->lock);
+
+ /* notify upper layer link down ASAP */
+ if (!(phy_data & BMSR_LSTATUS)) {
+ /* Link Down */
+ if (netif_carrier_ok(netdev)) {
+ /* old link state: Up */
+ dev_info(&adapter->pdev->dev, "%s link is down\n",
+ netdev->name);
+ adapter->link_speed = SPEED_0;
+ netif_carrier_off(netdev);
+ }
+ }
+ schedule_work(&adapter->link_chg_task);
+}
+
+/*
+ * atlx_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ */
+static void atlx_set_multi(struct net_device *netdev)
+{
+ struct atlx_adapter *adapter = netdev_priv(netdev);
+ struct atlx_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u32 rctl;
+ u32 hash_value;
+
+ /* Check for Promiscuous and All Multicast modes */
+ rctl = ioread32(hw->hw_addr + REG_MAC_CTRL);
+ if (netdev->flags & IFF_PROMISC)
+ rctl |= MAC_CTRL_PROMIS_EN;
+ else if (netdev->flags & IFF_ALLMULTI) {
+ rctl |= MAC_CTRL_MC_ALL_EN;
+ rctl &= ~MAC_CTRL_PROMIS_EN;
+ } else
+ rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
+
+ iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL);
+
+ /* clear the old settings from the multicast hash table */
+ iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
+ iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
+
+ /* compute mc addresses' hash value ,and put it into hash table */
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash_value = atlx_hash_mc_addr(hw, ha->addr);
+ atlx_hash_set(hw, hash_value);
+ }
+}
+
+/*
+ * atlx_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ */
+static void atlx_irq_enable(struct atlx_adapter *adapter)
+{
+ iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR);
+ ioread32(adapter->hw.hw_addr + REG_IMR);
+}
+
+/*
+ * atlx_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ */
+static void atlx_irq_disable(struct atlx_adapter *adapter)
+{
+ iowrite32(0, adapter->hw.hw_addr + REG_IMR);
+ ioread32(adapter->hw.hw_addr + REG_IMR);
+ synchronize_irq(adapter->pdev->irq);
+}
+
+static void atlx_clear_phy_int(struct atlx_adapter *adapter)
+{
+ u16 phy_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ atlx_read_phy_reg(&adapter->hw, 19, &phy_data);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
+/*
+ * atlx_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ */
+static void atlx_tx_timeout(struct net_device *netdev)
+{
+ struct atlx_adapter *adapter = netdev_priv(netdev);
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->tx_timeout_task);
+}
+
+/*
+ * atlx_link_chg_task - deal with link change event Out of interrupt context
+ */
+static void atlx_link_chg_task(struct work_struct *work)
+{
+ struct atlx_adapter *adapter;
+ unsigned long flags;
+
+ adapter = container_of(work, struct atlx_adapter, link_chg_task);
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ atlx_check_link(adapter);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
+static void __atlx_vlan_mode(u32 features, u32 *ctrl)
+{
+ if (features & NETIF_F_HW_VLAN_RX) {
+ /* enable VLAN tag insert/strip */
+ *ctrl |= MAC_CTRL_RMV_VLAN;
+ } else {
+ /* disable VLAN tag insert/strip */
+ *ctrl &= ~MAC_CTRL_RMV_VLAN;
+ }
+}
+
+static void atlx_vlan_mode(struct net_device *netdev, u32 features)
+{
+ struct atlx_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
+ u32 ctrl;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ /* atlx_irq_disable(adapter); FIXME: confirm/remove */
+ ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
+ __atlx_vlan_mode(features, &ctrl);
+ iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
+ /* atlx_irq_enable(adapter); FIXME */
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
+static void atlx_restore_vlan(struct atlx_adapter *adapter)
+{
+ atlx_vlan_mode(adapter->netdev, adapter->netdev->features);
+}
+
+static u32 atlx_fix_features(struct net_device *netdev, u32 features)
+{
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_RX)
+ features |= NETIF_F_HW_VLAN_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_TX;
+
+ return features;
+}
+
+static int atlx_set_features(struct net_device *netdev, u32 features)
+{
+ u32 changed = netdev->features ^ features;
+
+ if (changed & NETIF_F_HW_VLAN_RX)
+ atlx_vlan_mode(netdev, features);
+
+ return 0;
+}
+
+#endif /* ATLX_C */
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.h b/drivers/net/ethernet/atheros/atlx/atlx.h
new file mode 100644
index 000000000000..14054b75aa62
--- /dev/null
+++ b/drivers/net/ethernet/atheros/atlx/atlx.h
@@ -0,0 +1,503 @@
+/* atlx_hw.h -- common hardware definitions for Attansic network drivers
+ *
+ * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
+ * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
+ * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
+ * Copyright(c) 2007 Atheros Corporation. All rights reserved.
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef ATLX_H
+#define ATLX_H
+
+#include <linux/module.h>
+#include <linux/types.h>
+
+#define ATLX_ERR_PHY 2
+#define ATLX_ERR_PHY_SPEED 7
+#define ATLX_ERR_PHY_RES 8
+
+#define SPEED_0 0xffff
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+#define MEDIA_TYPE_AUTO_SENSOR 0
+
+/* register definitions */
+#define REG_PM_CTRLSTAT 0x44
+
+#define REG_PCIE_CAP_LIST 0x58
+
+#define REG_VPD_CAP 0x6C
+#define VPD_CAP_ID_MASK 0xFF
+#define VPD_CAP_ID_SHIFT 0
+#define VPD_CAP_NEXT_PTR_MASK 0xFF
+#define VPD_CAP_NEXT_PTR_SHIFT 8
+#define VPD_CAP_VPD_ADDR_MASK 0x7FFF
+#define VPD_CAP_VPD_ADDR_SHIFT 16
+#define VPD_CAP_VPD_FLAG 0x80000000
+
+#define REG_VPD_DATA 0x70
+
+#define REG_SPI_FLASH_CTRL 0x200
+#define SPI_FLASH_CTRL_STS_NON_RDY 0x1
+#define SPI_FLASH_CTRL_STS_WEN 0x2
+#define SPI_FLASH_CTRL_STS_WPEN 0x80
+#define SPI_FLASH_CTRL_DEV_STS_MASK 0xFF
+#define SPI_FLASH_CTRL_DEV_STS_SHIFT 0
+#define SPI_FLASH_CTRL_INS_MASK 0x7
+#define SPI_FLASH_CTRL_INS_SHIFT 8
+#define SPI_FLASH_CTRL_START 0x800
+#define SPI_FLASH_CTRL_EN_VPD 0x2000
+#define SPI_FLASH_CTRL_LDSTART 0x8000
+#define SPI_FLASH_CTRL_CS_HI_MASK 0x3
+#define SPI_FLASH_CTRL_CS_HI_SHIFT 16
+#define SPI_FLASH_CTRL_CS_HOLD_MASK 0x3
+#define SPI_FLASH_CTRL_CS_HOLD_SHIFT 18
+#define SPI_FLASH_CTRL_CLK_LO_MASK 0x3
+#define SPI_FLASH_CTRL_CLK_LO_SHIFT 20
+#define SPI_FLASH_CTRL_CLK_HI_MASK 0x3
+#define SPI_FLASH_CTRL_CLK_HI_SHIFT 22
+#define SPI_FLASH_CTRL_CS_SETUP_MASK 0x3
+#define SPI_FLASH_CTRL_CS_SETUP_SHIFT 24
+#define SPI_FLASH_CTRL_EROM_PGSZ_MASK 0x3
+#define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT 26
+#define SPI_FLASH_CTRL_WAIT_READY 0x10000000
+
+#define REG_SPI_ADDR 0x204
+
+#define REG_SPI_DATA 0x208
+
+#define REG_SPI_FLASH_CONFIG 0x20C
+#define SPI_FLASH_CONFIG_LD_ADDR_MASK 0xFFFFFF
+#define SPI_FLASH_CONFIG_LD_ADDR_SHIFT 0
+#define SPI_FLASH_CONFIG_VPD_ADDR_MASK 0x3
+#define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT 24
+#define SPI_FLASH_CONFIG_LD_EXIST 0x4000000
+
+#define REG_SPI_FLASH_OP_PROGRAM 0x210
+#define REG_SPI_FLASH_OP_SC_ERASE 0x211
+#define REG_SPI_FLASH_OP_CHIP_ERASE 0x212
+#define REG_SPI_FLASH_OP_RDID 0x213
+#define REG_SPI_FLASH_OP_WREN 0x214
+#define REG_SPI_FLASH_OP_RDSR 0x215
+#define REG_SPI_FLASH_OP_WRSR 0x216
+#define REG_SPI_FLASH_OP_READ 0x217
+
+#define REG_TWSI_CTRL 0x218
+#define TWSI_CTRL_LD_OFFSET_MASK 0xFF
+#define TWSI_CTRL_LD_OFFSET_SHIFT 0
+#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7
+#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8
+#define TWSI_CTRL_SW_LDSTART 0x800
+#define TWSI_CTRL_HW_LDSTART 0x1000
+#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F
+#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15
+#define TWSI_CTRL_LD_EXIST 0x400000
+#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3
+#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23
+#define TWSI_CTRL_FREQ_SEL_100K 0
+#define TWSI_CTRL_FREQ_SEL_200K 1
+#define TWSI_CTRL_FREQ_SEL_300K 2
+#define TWSI_CTRL_FREQ_SEL_400K 3
+#define TWSI_CTRL_SMB_SLV_ADDR /* FIXME: define or remove */
+#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3
+#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24
+
+#define REG_PCIE_DEV_MISC_CTRL 0x21C
+#define PCIE_DEV_MISC_CTRL_EXT_PIPE 0x2
+#define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS 0x1
+#define PCIE_DEV_MISC_CTRL_SPIROM_EXIST 0x4
+#define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN 0x8
+#define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN 0x10
+
+#define REG_PCIE_PHYMISC 0x1000
+#define PCIE_PHYMISC_FORCE_RCV_DET 0x4
+
+#define REG_PCIE_DLL_TX_CTRL1 0x1104
+#define PCIE_DLL_TX_CTRL1_SEL_NOR_CLK 0x400
+#define PCIE_DLL_TX_CTRL1_DEF 0x568
+
+#define REG_LTSSM_TEST_MODE 0x12FC
+#define LTSSM_TEST_MODE_DEF 0x6500
+
+/* Master Control Register */
+#define REG_MASTER_CTRL 0x1400
+#define MASTER_CTRL_SOFT_RST 0x1
+#define MASTER_CTRL_MTIMER_EN 0x2
+#define MASTER_CTRL_ITIMER_EN 0x4
+#define MASTER_CTRL_MANUAL_INT 0x8
+#define MASTER_CTRL_REV_NUM_SHIFT 16
+#define MASTER_CTRL_REV_NUM_MASK 0xFF
+#define MASTER_CTRL_DEV_ID_SHIFT 24
+#define MASTER_CTRL_DEV_ID_MASK 0xFF
+
+/* Timer Initial Value Register */
+#define REG_MANUAL_TIMER_INIT 0x1404
+
+/* IRQ Moderator Timer Initial Value Register */
+#define REG_IRQ_MODU_TIMER_INIT 0x1408
+
+#define REG_PHY_ENABLE 0x140C
+
+/* IRQ Anti-Lost Timer Initial Value Register */
+#define REG_CMBDISDMA_TIMER 0x140E
+
+/* Block IDLE Status Register */
+#define REG_IDLE_STATUS 0x1410
+
+/* MDIO Control Register */
+#define REG_MDIO_CTRL 0x1414
+#define MDIO_DATA_MASK 0xFFFF
+#define MDIO_DATA_SHIFT 0
+#define MDIO_REG_ADDR_MASK 0x1F
+#define MDIO_REG_ADDR_SHIFT 16
+#define MDIO_RW 0x200000
+#define MDIO_SUP_PREAMBLE 0x400000
+#define MDIO_START 0x800000
+#define MDIO_CLK_SEL_SHIFT 24
+#define MDIO_CLK_25_4 0
+#define MDIO_CLK_25_6 2
+#define MDIO_CLK_25_8 3
+#define MDIO_CLK_25_10 4
+#define MDIO_CLK_25_14 5
+#define MDIO_CLK_25_20 6
+#define MDIO_CLK_25_28 7
+#define MDIO_BUSY 0x8000000
+
+/* MII PHY Status Register */
+#define REG_PHY_STATUS 0x1418
+
+/* BIST Control and Status Register0 (for the Packet Memory) */
+#define REG_BIST0_CTRL 0x141C
+#define BIST0_NOW 0x1
+#define BIST0_SRAM_FAIL 0x2
+#define BIST0_FUSE_FLAG 0x4
+#define REG_BIST1_CTRL 0x1420
+#define BIST1_NOW 0x1
+#define BIST1_SRAM_FAIL 0x2
+#define BIST1_FUSE_FLAG 0x4
+
+/* SerDes Lock Detect Control and Status Register */
+#define REG_SERDES_LOCK 0x1424
+#define SERDES_LOCK_DETECT 1
+#define SERDES_LOCK_DETECT_EN 2
+
+/* MAC Control Register */
+#define REG_MAC_CTRL 0x1480
+#define MAC_CTRL_TX_EN 1
+#define MAC_CTRL_RX_EN 2
+#define MAC_CTRL_TX_FLOW 4
+#define MAC_CTRL_RX_FLOW 8
+#define MAC_CTRL_LOOPBACK 0x10
+#define MAC_CTRL_DUPLX 0x20
+#define MAC_CTRL_ADD_CRC 0x40
+#define MAC_CTRL_PAD 0x80
+#define MAC_CTRL_LENCHK 0x100
+#define MAC_CTRL_HUGE_EN 0x200
+#define MAC_CTRL_PRMLEN_SHIFT 10
+#define MAC_CTRL_PRMLEN_MASK 0xF
+#define MAC_CTRL_RMV_VLAN 0x4000
+#define MAC_CTRL_PROMIS_EN 0x8000
+#define MAC_CTRL_MC_ALL_EN 0x2000000
+#define MAC_CTRL_BC_EN 0x4000000
+
+/* MAC IPG/IFG Control Register */
+#define REG_MAC_IPG_IFG 0x1484
+#define MAC_IPG_IFG_IPGT_SHIFT 0
+#define MAC_IPG_IFG_IPGT_MASK 0x7F
+#define MAC_IPG_IFG_MIFG_SHIFT 8
+#define MAC_IPG_IFG_MIFG_MASK 0xFF
+#define MAC_IPG_IFG_IPGR1_SHIFT 16
+#define MAC_IPG_IFG_IPGR1_MASK 0x7F
+#define MAC_IPG_IFG_IPGR2_SHIFT 24
+#define MAC_IPG_IFG_IPGR2_MASK 0x7F
+
+/* MAC STATION ADDRESS */
+#define REG_MAC_STA_ADDR 0x1488
+
+/* Hash table for multicast address */
+#define REG_RX_HASH_TABLE 0x1490
+
+/* MAC Half-Duplex Control Register */
+#define REG_MAC_HALF_DUPLX_CTRL 0x1498
+#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0
+#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3FF
+#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12
+#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xF
+#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000
+#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000
+#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000
+#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000
+#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20
+#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xF
+#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24
+#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xF
+
+/* Maximum Frame Length Control Register */
+#define REG_MTU 0x149C
+
+/* Wake-On-Lan control register */
+#define REG_WOL_CTRL 0x14A0
+#define WOL_PATTERN_EN 0x1
+#define WOL_PATTERN_PME_EN 0x2
+#define WOL_MAGIC_EN 0x4
+#define WOL_MAGIC_PME_EN 0x8
+#define WOL_LINK_CHG_EN 0x10
+#define WOL_LINK_CHG_PME_EN 0x20
+#define WOL_PATTERN_ST 0x100
+#define WOL_MAGIC_ST 0x200
+#define WOL_LINKCHG_ST 0x400
+#define WOL_PT0_EN 0x10000
+#define WOL_PT1_EN 0x20000
+#define WOL_PT2_EN 0x40000
+#define WOL_PT3_EN 0x80000
+#define WOL_PT4_EN 0x100000
+#define WOL_PT0_MATCH 0x1000000
+#define WOL_PT1_MATCH 0x2000000
+#define WOL_PT2_MATCH 0x4000000
+#define WOL_PT3_MATCH 0x8000000
+#define WOL_PT4_MATCH 0x10000000
+
+/* Internal SRAM Partition Register, high 32 bits */
+#define REG_SRAM_RFD_ADDR 0x1500
+
+/* Descriptor Control register, high 32 bits */
+#define REG_DESC_BASE_ADDR_HI 0x1540
+
+/* Interrupt Status Register */
+#define REG_ISR 0x1600
+#define ISR_UR_DETECTED 0x1000000
+#define ISR_FERR_DETECTED 0x2000000
+#define ISR_NFERR_DETECTED 0x4000000
+#define ISR_CERR_DETECTED 0x8000000
+#define ISR_PHY_LINKDOWN 0x10000000
+#define ISR_DIS_INT 0x80000000
+
+/* Interrupt Mask Register */
+#define REG_IMR 0x1604
+
+#define REG_RFD_RRD_IDX 0x1800
+#define REG_TPD_IDX 0x1804
+
+/* MII definitions */
+
+/* PHY Common Register */
+#define MII_ATLX_CR 0x09
+#define MII_ATLX_SR 0x0A
+#define MII_ATLX_ESR 0x0F
+#define MII_ATLX_PSCR 0x10
+#define MII_ATLX_PSSR 0x11
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100,
+ * 00=10
+ */
+#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
+#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN 0x0800 /* Power down */
+#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100,
+ * 00=10
+ */
+#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_MASK 0x2040
+#define MII_CR_SPEED_1000 0x0040
+#define MII_CR_SPEED_100 0x2000
+#define MII_CR_SPEED_10 0x0000
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS 0x0001 /* Ext register capabilities */
+#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext stat info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
+#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
+
+/* Link partner ability register */
+#define MII_LPA_SLCT 0x001f /* Same as advertise selector */
+#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
+#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
+#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
+#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
+#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */
+#define MII_LPA_PAUSE 0x0400 /* PAUSE */
+#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */
+#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */
+#define MII_LPA_LPACK 0x4000 /* Link partner acked us */
+#define MII_LPA_NPAGE 0x8000 /* Next page bit */
+
+/* Autoneg Advertisement Register */
+#define MII_AR_SELECTOR_FIELD 0x0001 /* IEEE 802.3 CSMA/CD */
+#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
+#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
+#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
+#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
+#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
+#define MII_AR_PAUSE 0x0400 /* Pause operation desired */
+#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Dir bit */
+#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
+#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability support */
+#define MII_AR_SPEED_MASK 0x01E0
+#define MII_AR_DEFAULT_CAP_MASK 0x0DE0
+
+/* 1000BASE-T Control Register */
+#define MII_ATLX_CR_1000T_HD_CAPS 0x0100 /* Adv 1000T HD cap */
+#define MII_ATLX_CR_1000T_FD_CAPS 0x0200 /* Adv 1000T FD cap */
+#define MII_ATLX_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device,
+ * 0=DTE device */
+#define MII_ATLX_CR_1000T_MS_VALUE 0x0800 /* 1=Config PHY as Master,
+ * 0=Configure PHY as Slave */
+#define MII_ATLX_CR_1000T_MS_ENABLE 0x1000 /* 1=Man Master/Slave config,
+ * 0=Auto Master/Slave config
+ */
+#define MII_ATLX_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define MII_ATLX_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
+#define MII_ATLX_CR_1000T_TEST_MODE_2 0x4000 /* Master Xmit Jitter test */
+#define MII_ATLX_CR_1000T_TEST_MODE_3 0x6000 /* Slave Xmit Jitter test */
+#define MII_ATLX_CR_1000T_TEST_MODE_4 0x8000 /* Xmitter Distortion test */
+#define MII_ATLX_CR_1000T_SPEED_MASK 0x0300
+#define MII_ATLX_CR_1000T_DEFAULT_CAP_MASK 0x0300
+
+/* 1000BASE-T Status Register */
+#define MII_ATLX_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
+#define MII_ATLX_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
+#define MII_ATLX_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define MII_ATLX_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
+#define MII_ATLX_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master
+ * 0=Slave
+ */
+#define MII_ATLX_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config
+ * fault */
+#define MII_ATLX_SR_1000T_REMOTE_RX_STATUS_SHIFT 12
+#define MII_ATLX_SR_1000T_LOCAL_RX_STATUS_SHIFT 13
+
+/* Extended Status Register */
+#define MII_ATLX_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
+#define MII_ATLX_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
+#define MII_ATLX_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
+#define MII_ATLX_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
+
+/* ATLX PHY Specific Control Register */
+#define MII_ATLX_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Func disabled */
+#define MII_ATLX_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enbld */
+#define MII_ATLX_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
+#define MII_ATLX_PSCR_MAC_POWERDOWN 0x0008
+#define MII_ATLX_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low
+ * 0=CLK125 toggling
+ */
+#define MII_ATLX_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5,
+ * Manual MDI configuration
+ */
+#define MII_ATLX_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
+#define MII_ATLX_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover
+ * 100BASE-TX/10BASE-T: MDI
+ * Mode */
+#define MII_ATLX_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled
+ * all speeds.
+ */
+#define MII_ATLX_PSCR_10BT_EXT_DIST_ENABLE 0x0080 /* 1=Enable Extended
+ * 10BASE-T distance
+ * (Lower 10BASE-T RX
+ * Threshold)
+ * 0=Normal 10BASE-T RX
+ * Threshold
+ */
+#define MII_ATLX_PSCR_MII_5BIT_ENABLE 0x0100 /* 1=5-Bit interface in
+ * 100BASE-TX
+ * 0=MII interface in
+ * 100BASE-TX
+ */
+#define MII_ATLX_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler dsbl */
+#define MII_ATLX_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
+#define MII_ATLX_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
+#define MII_ATLX_PSCR_POLARITY_REVERSAL_SHIFT 1
+#define MII_ATLX_PSCR_AUTO_X_MODE_SHIFT 5
+#define MII_ATLX_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
+
+/* ATLX PHY Specific Status Register */
+#define MII_ATLX_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
+#define MII_ATLX_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
+#define MII_ATLX_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
+#define MII_ATLX_PSSR_10MBS 0x0000 /* 00=10Mbs */
+#define MII_ATLX_PSSR_100MBS 0x4000 /* 01=100Mbs */
+#define MII_ATLX_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
+
+#define MII_DBG_ADDR 0x1D
+#define MII_DBG_DATA 0x1E
+
+/* PCI Command Register Bit Definitions */
+#define PCI_REG_COMMAND 0x04 /* PCI Command Register */
+#define CMD_IO_SPACE 0x0001
+#define CMD_MEMORY_SPACE 0x0002
+#define CMD_BUS_MASTER 0x0004
+
+/* Wake Up Filter Control */
+#define ATLX_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define ATLX_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define ATLX_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define ATLX_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */
+#define ATLX_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+
+#define ADVERTISE_10_HALF 0x0001
+#define ADVERTISE_10_FULL 0x0002
+#define ADVERTISE_100_HALF 0x0004
+#define ADVERTISE_100_FULL 0x0008
+#define ADVERTISE_1000_HALF 0x0010
+#define ADVERTISE_1000_FULL 0x0020
+#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */
+#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */
+
+#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */
+#define PHY_FORCE_TIME 20 /* 2.0 Seconds */
+
+/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA */
+#define EEPROM_SUM 0xBABA
+#define NODE_ADDRESS_SIZE 6
+
+struct atlx_spi_flash_dev {
+ const char *manu_name; /* manufacturer id */
+ /* op-code */
+ u8 cmd_wrsr;
+ u8 cmd_read;
+ u8 cmd_program;
+ u8 cmd_wren;
+ u8 cmd_wrdi;
+ u8 cmd_rdsr;
+ u8 cmd_rdid;
+ u8 cmd_sector_erase;
+ u8 cmd_chip_erase;
+};
+
+#endif /* ATLX_H */
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
new file mode 100644
index 000000000000..f15e72e81ac4
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -0,0 +1,122 @@
+#
+# Broadcom device configuration
+#
+
+config NET_VENDOR_BROADCOM
+ bool "Broadcom devices"
+ default y
+ depends on (SSB_POSSIBLE && HAS_DMA) || PCI || BCM63XX || \
+ SIBYTE_SB1xxx_SOC
+ ---help---
+ If you have a network (Ethernet) chipset belonging to this class,
+ say Y.
+
+ Note that the answer to this question does not directly affect
+ the kernel: saying N will just case the configurator to skip all
+ the questions regarding AMD chipsets. If you say Y, you will be asked
+ for your specific chipset/driver in the following questions.
+
+if NET_VENDOR_BROADCOM
+
+config B44
+ tristate "Broadcom 440x/47xx ethernet support"
+ depends on SSB_POSSIBLE && HAS_DMA
+ select SSB
+ select NET_CORE
+ select MII
+ ---help---
+ If you have a network (Ethernet) controller of this type, say Y
+ or M and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called b44.
+
+# Auto-select SSB PCI-HOST support, if possible
+config B44_PCI_AUTOSELECT
+ bool
+ depends on B44 && SSB_PCIHOST_POSSIBLE
+ select SSB_PCIHOST
+ default y
+
+# Auto-select SSB PCICORE driver, if possible
+config B44_PCICORE_AUTOSELECT
+ bool
+ depends on B44 && SSB_DRIVER_PCICORE_POSSIBLE
+ select SSB_DRIVER_PCICORE
+ default y
+
+config B44_PCI
+ bool
+ depends on B44_PCI_AUTOSELECT && B44_PCICORE_AUTOSELECT
+ default y
+
+config BCM63XX_ENET
+ tristate "Broadcom 63xx internal mac support"
+ depends on BCM63XX
+ select NET_CORE
+ select MII
+ select PHYLIB
+ help
+ This driver supports the ethernet MACs in the Broadcom 63xx
+ MIPS chipset family (BCM63XX).
+
+config BNX2
+ tristate "Broadcom NetXtremeII support"
+ depends on PCI
+ select CRC32
+ select FW_LOADER
+ ---help---
+ This driver supports Broadcom NetXtremeII gigabit Ethernet cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called bnx2. This is recommended.
+
+config CNIC
+ tristate "Broadcom CNIC support"
+ depends on PCI
+ select BNX2
+ select UIO
+ ---help---
+ This driver supports offload features of Broadcom NetXtremeII
+ gigabit Ethernet cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called cnic. This is recommended.
+
+config SB1250_MAC
+ tristate "SB1250 Gigabit Ethernet support"
+ depends on SIBYTE_SB1xxx_SOC
+ select PHYLIB
+ ---help---
+ This driver supports Gigabit Ethernet interfaces based on the
+ Broadcom SiByte family of System-On-a-Chip parts. They include
+ the BCM1120, BCM1125, BCM1125H, BCM1250, BCM1255, BCM1280, BCM1455
+ and BCM1480 chips.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sb1250-mac.
+
+config TIGON3
+ tristate "Broadcom Tigon3 support"
+ depends on PCI
+ select PHYLIB
+ ---help---
+ This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called tg3. This is recommended.
+
+config BNX2X
+ tristate "Broadcom NetXtremeII 10Gb support"
+ depends on PCI
+ select FW_LOADER
+ select ZLIB_INFLATE
+ select LIBCRC32C
+ select MDIO
+ ---help---
+ This driver supports Broadcom NetXtremeII 10 gigabit Ethernet cards.
+ To compile this driver as a module, choose M here: the module
+ will be called bnx2x. This is recommended.
+
+endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
new file mode 100644
index 000000000000..b7896051d54e
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the Broadcom network device drivers.
+#
+
+obj-$(CONFIG_B44) += b44.o
+obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
+obj-$(CONFIG_BNX2) += bnx2.o
+obj-$(CONFIG_CNIC) += cnic.o
+obj-$(CONFIG_BNX2X) += bnx2x/
+obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
+obj-$(CONFIG_TIGON3) += tg3.o
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
new file mode 100644
index 000000000000..4cf835dbc122
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -0,0 +1,2374 @@
+/* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
+ *
+ * Copyright (C) 2002 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
+ * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
+ * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
+ * Copyright (C) 2006 Broadcom Corporation.
+ * Copyright (C) 2007 Michael Buesch <m@bues.ch>
+ *
+ * Distribute under GPL.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/ssb/ssb.h>
+#include <linux/slab.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+
+#include "b44.h"
+
+#define DRV_MODULE_NAME "b44"
+#define DRV_MODULE_VERSION "2.0"
+#define DRV_DESCRIPTION "Broadcom 44xx/47xx 10/100 PCI ethernet driver"
+
+#define B44_DEF_MSG_ENABLE \
+ (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+/* length of time before we decide the hardware is borked,
+ * and dev->tx_timeout() should be called to fix the problem
+ */
+#define B44_TX_TIMEOUT (5 * HZ)
+
+/* hardware minimum and maximum for a single frame's data payload */
+#define B44_MIN_MTU 60
+#define B44_MAX_MTU 1500
+
+#define B44_RX_RING_SIZE 512
+#define B44_DEF_RX_RING_PENDING 200
+#define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
+ B44_RX_RING_SIZE)
+#define B44_TX_RING_SIZE 512
+#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
+#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
+ B44_TX_RING_SIZE)
+
+#define TX_RING_GAP(BP) \
+ (B44_TX_RING_SIZE - (BP)->tx_pending)
+#define TX_BUFFS_AVAIL(BP) \
+ (((BP)->tx_cons <= (BP)->tx_prod) ? \
+ (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
+ (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
+#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
+
+#define RX_PKT_OFFSET (RX_HEADER_LEN + 2)
+#define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET)
+
+/* minimum number of free TX descriptors required to wake up TX process */
+#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
+
+/* b44 internal pattern match filter info */
+#define B44_PATTERN_BASE 0x400
+#define B44_PATTERN_SIZE 0x80
+#define B44_PMASK_BASE 0x600
+#define B44_PMASK_SIZE 0x10
+#define B44_MAX_PATTERNS 16
+#define B44_ETHIPV6UDP_HLEN 62
+#define B44_ETHIPV4UDP_HLEN 42
+
+MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
+module_param(b44_debug, int, 0);
+MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
+
+
+#ifdef CONFIG_B44_PCI
+static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
+ { 0 } /* terminate list with empty entry */
+};
+MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
+
+static struct pci_driver b44_pci_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = b44_pci_tbl,
+};
+#endif /* CONFIG_B44_PCI */
+
+static const struct ssb_device_id b44_ssb_tbl[] = {
+ SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
+ SSB_DEVTABLE_END
+};
+MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
+
+static void b44_halt(struct b44 *);
+static void b44_init_rings(struct b44 *);
+
+#define B44_FULL_RESET 1
+#define B44_FULL_RESET_SKIP_PHY 2
+#define B44_PARTIAL_RESET 3
+#define B44_CHIP_RESET_FULL 4
+#define B44_CHIP_RESET_PARTIAL 5
+
+static void b44_init_hw(struct b44 *, int);
+
+static int dma_desc_sync_size;
+static int instance;
+
+static const char b44_gstrings[][ETH_GSTRING_LEN] = {
+#define _B44(x...) # x,
+B44_STAT_REG_DECLARE
+#undef _B44
+};
+
+static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
+ dma_addr_t dma_base,
+ unsigned long offset,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
+ dma_desc_sync_size, dir);
+}
+
+static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
+ dma_addr_t dma_base,
+ unsigned long offset,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
+ dma_desc_sync_size, dir);
+}
+
+static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
+{
+ return ssb_read32(bp->sdev, reg);
+}
+
+static inline void bw32(const struct b44 *bp,
+ unsigned long reg, unsigned long val)
+{
+ ssb_write32(bp->sdev, reg, val);
+}
+
+static int b44_wait_bit(struct b44 *bp, unsigned long reg,
+ u32 bit, unsigned long timeout, const int clear)
+{
+ unsigned long i;
+
+ for (i = 0; i < timeout; i++) {
+ u32 val = br32(bp, reg);
+
+ if (clear && !(val & bit))
+ break;
+ if (!clear && (val & bit))
+ break;
+ udelay(10);
+ }
+ if (i == timeout) {
+ if (net_ratelimit())
+ netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n",
+ bit, reg, clear ? "clear" : "set");
+
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
+{
+ u32 val;
+
+ bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
+ (index << CAM_CTRL_INDEX_SHIFT)));
+
+ b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
+
+ val = br32(bp, B44_CAM_DATA_LO);
+
+ data[2] = (val >> 24) & 0xFF;
+ data[3] = (val >> 16) & 0xFF;
+ data[4] = (val >> 8) & 0xFF;
+ data[5] = (val >> 0) & 0xFF;
+
+ val = br32(bp, B44_CAM_DATA_HI);
+
+ data[0] = (val >> 8) & 0xFF;
+ data[1] = (val >> 0) & 0xFF;
+}
+
+static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
+{
+ u32 val;
+
+ val = ((u32) data[2]) << 24;
+ val |= ((u32) data[3]) << 16;
+ val |= ((u32) data[4]) << 8;
+ val |= ((u32) data[5]) << 0;
+ bw32(bp, B44_CAM_DATA_LO, val);
+ val = (CAM_DATA_HI_VALID |
+ (((u32) data[0]) << 8) |
+ (((u32) data[1]) << 0));
+ bw32(bp, B44_CAM_DATA_HI, val);
+ bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
+ (index << CAM_CTRL_INDEX_SHIFT)));
+ b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
+}
+
+static inline void __b44_disable_ints(struct b44 *bp)
+{
+ bw32(bp, B44_IMASK, 0);
+}
+
+static void b44_disable_ints(struct b44 *bp)
+{
+ __b44_disable_ints(bp);
+
+ /* Flush posted writes. */
+ br32(bp, B44_IMASK);
+}
+
+static void b44_enable_ints(struct b44 *bp)
+{
+ bw32(bp, B44_IMASK, bp->imask);
+}
+
+static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
+{
+ int err;
+
+ bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
+ bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
+ (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
+ (phy_addr << MDIO_DATA_PMD_SHIFT) |
+ (reg << MDIO_DATA_RA_SHIFT) |
+ (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
+ err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
+ *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
+
+ return err;
+}
+
+static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
+{
+ bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
+ bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
+ (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
+ (phy_addr << MDIO_DATA_PMD_SHIFT) |
+ (reg << MDIO_DATA_RA_SHIFT) |
+ (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
+ (val & MDIO_DATA_DATA)));
+ return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
+}
+
+static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
+{
+ if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+ return 0;
+
+ return __b44_readphy(bp, bp->phy_addr, reg, val);
+}
+
+static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
+{
+ if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+ return 0;
+
+ return __b44_writephy(bp, bp->phy_addr, reg, val);
+}
+
+/* miilib interface */
+static int b44_mii_read(struct net_device *dev, int phy_id, int location)
+{
+ u32 val;
+ struct b44 *bp = netdev_priv(dev);
+ int rc = __b44_readphy(bp, phy_id, location, &val);
+ if (rc)
+ return 0xffffffff;
+ return val;
+}
+
+static void b44_mii_write(struct net_device *dev, int phy_id, int location,
+ int val)
+{
+ struct b44 *bp = netdev_priv(dev);
+ __b44_writephy(bp, phy_id, location, val);
+}
+
+static int b44_phy_reset(struct b44 *bp)
+{
+ u32 val;
+ int err;
+
+ if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+ return 0;
+ err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
+ if (err)
+ return err;
+ udelay(100);
+ err = b44_readphy(bp, MII_BMCR, &val);
+ if (!err) {
+ if (val & BMCR_RESET) {
+ netdev_err(bp->dev, "PHY Reset would not complete\n");
+ err = -ENODEV;
+ }
+ }
+
+ return err;
+}
+
+static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
+{
+ u32 val;
+
+ bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
+ bp->flags |= pause_flags;
+
+ val = br32(bp, B44_RXCONFIG);
+ if (pause_flags & B44_FLAG_RX_PAUSE)
+ val |= RXCONFIG_FLOW;
+ else
+ val &= ~RXCONFIG_FLOW;
+ bw32(bp, B44_RXCONFIG, val);
+
+ val = br32(bp, B44_MAC_FLOW);
+ if (pause_flags & B44_FLAG_TX_PAUSE)
+ val |= (MAC_FLOW_PAUSE_ENAB |
+ (0xc0 & MAC_FLOW_RX_HI_WATER));
+ else
+ val &= ~MAC_FLOW_PAUSE_ENAB;
+ bw32(bp, B44_MAC_FLOW, val);
+}
+
+static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
+{
+ u32 pause_enab = 0;
+
+ /* The driver supports only rx pause by default because
+ the b44 mac tx pause mechanism generates excessive
+ pause frames.
+ Use ethtool to turn on b44 tx pause if necessary.
+ */
+ if ((local & ADVERTISE_PAUSE_CAP) &&
+ (local & ADVERTISE_PAUSE_ASYM)){
+ if ((remote & LPA_PAUSE_ASYM) &&
+ !(remote & LPA_PAUSE_CAP))
+ pause_enab |= B44_FLAG_RX_PAUSE;
+ }
+
+ __b44_set_flow_ctrl(bp, pause_enab);
+}
+
+#ifdef CONFIG_BCM47XX
+#include <asm/mach-bcm47xx/nvram.h>
+static void b44_wap54g10_workaround(struct b44 *bp)
+{
+ char buf[20];
+ u32 val;
+ int err;
+
+ /*
+ * workaround for bad hardware design in Linksys WAP54G v1.0
+ * see https://dev.openwrt.org/ticket/146
+ * check and reset bit "isolate"
+ */
+ if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
+ return;
+ if (simple_strtoul(buf, NULL, 0) == 2) {
+ err = __b44_readphy(bp, 0, MII_BMCR, &val);
+ if (err)
+ goto error;
+ if (!(val & BMCR_ISOLATE))
+ return;
+ val &= ~BMCR_ISOLATE;
+ err = __b44_writephy(bp, 0, MII_BMCR, val);
+ if (err)
+ goto error;
+ }
+ return;
+error:
+ pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
+}
+#else
+static inline void b44_wap54g10_workaround(struct b44 *bp)
+{
+}
+#endif
+
+static int b44_setup_phy(struct b44 *bp)
+{
+ u32 val;
+ int err;
+
+ b44_wap54g10_workaround(bp);
+
+ if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+ return 0;
+ if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
+ goto out;
+ if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
+ val & MII_ALEDCTRL_ALLMSK)) != 0)
+ goto out;
+ if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
+ goto out;
+ if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
+ val | MII_TLEDCTRL_ENABLE)) != 0)
+ goto out;
+
+ if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
+ u32 adv = ADVERTISE_CSMA;
+
+ if (bp->flags & B44_FLAG_ADV_10HALF)
+ adv |= ADVERTISE_10HALF;
+ if (bp->flags & B44_FLAG_ADV_10FULL)
+ adv |= ADVERTISE_10FULL;
+ if (bp->flags & B44_FLAG_ADV_100HALF)
+ adv |= ADVERTISE_100HALF;
+ if (bp->flags & B44_FLAG_ADV_100FULL)
+ adv |= ADVERTISE_100FULL;
+
+ if (bp->flags & B44_FLAG_PAUSE_AUTO)
+ adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+
+ if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
+ goto out;
+ if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
+ BMCR_ANRESTART))) != 0)
+ goto out;
+ } else {
+ u32 bmcr;
+
+ if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
+ goto out;
+ bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
+ if (bp->flags & B44_FLAG_100_BASE_T)
+ bmcr |= BMCR_SPEED100;
+ if (bp->flags & B44_FLAG_FULL_DUPLEX)
+ bmcr |= BMCR_FULLDPLX;
+ if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
+ goto out;
+
+ /* Since we will not be negotiating there is no safe way
+ * to determine if the link partner supports flow control
+ * or not. So just disable it completely in this case.
+ */
+ b44_set_flow_ctrl(bp, 0, 0);
+ }
+
+out:
+ return err;
+}
+
+static void b44_stats_update(struct b44 *bp)
+{
+ unsigned long reg;
+ u32 *val;
+
+ val = &bp->hw_stats.tx_good_octets;
+ for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
+ *val++ += br32(bp, reg);
+ }
+
+ /* Pad */
+ reg += 8*4UL;
+
+ for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
+ *val++ += br32(bp, reg);
+ }
+}
+
+static void b44_link_report(struct b44 *bp)
+{
+ if (!netif_carrier_ok(bp->dev)) {
+ netdev_info(bp->dev, "Link is down\n");
+ } else {
+ netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
+ (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
+ (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
+
+ netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
+ (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
+ (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
+ }
+}
+
+static void b44_check_phy(struct b44 *bp)
+{
+ u32 bmsr, aux;
+
+ if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
+ bp->flags |= B44_FLAG_100_BASE_T;
+ bp->flags |= B44_FLAG_FULL_DUPLEX;
+ if (!netif_carrier_ok(bp->dev)) {
+ u32 val = br32(bp, B44_TX_CTRL);
+ val |= TX_CTRL_DUPLEX;
+ bw32(bp, B44_TX_CTRL, val);
+ netif_carrier_on(bp->dev);
+ b44_link_report(bp);
+ }
+ return;
+ }
+
+ if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
+ !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
+ (bmsr != 0xffff)) {
+ if (aux & MII_AUXCTRL_SPEED)
+ bp->flags |= B44_FLAG_100_BASE_T;
+ else
+ bp->flags &= ~B44_FLAG_100_BASE_T;
+ if (aux & MII_AUXCTRL_DUPLEX)
+ bp->flags |= B44_FLAG_FULL_DUPLEX;
+ else
+ bp->flags &= ~B44_FLAG_FULL_DUPLEX;
+
+ if (!netif_carrier_ok(bp->dev) &&
+ (bmsr & BMSR_LSTATUS)) {
+ u32 val = br32(bp, B44_TX_CTRL);
+ u32 local_adv, remote_adv;
+
+ if (bp->flags & B44_FLAG_FULL_DUPLEX)
+ val |= TX_CTRL_DUPLEX;
+ else
+ val &= ~TX_CTRL_DUPLEX;
+ bw32(bp, B44_TX_CTRL, val);
+
+ if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
+ !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
+ !b44_readphy(bp, MII_LPA, &remote_adv))
+ b44_set_flow_ctrl(bp, local_adv, remote_adv);
+
+ /* Link now up */
+ netif_carrier_on(bp->dev);
+ b44_link_report(bp);
+ } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
+ /* Link now down */
+ netif_carrier_off(bp->dev);
+ b44_link_report(bp);
+ }
+
+ if (bmsr & BMSR_RFAULT)
+ netdev_warn(bp->dev, "Remote fault detected in PHY\n");
+ if (bmsr & BMSR_JCD)
+ netdev_warn(bp->dev, "Jabber detected in PHY\n");
+ }
+}
+
+static void b44_timer(unsigned long __opaque)
+{
+ struct b44 *bp = (struct b44 *) __opaque;
+
+ spin_lock_irq(&bp->lock);
+
+ b44_check_phy(bp);
+
+ b44_stats_update(bp);
+
+ spin_unlock_irq(&bp->lock);
+
+ mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
+}
+
+static void b44_tx(struct b44 *bp)
+{
+ u32 cur, cons;
+
+ cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
+ cur /= sizeof(struct dma_desc);
+
+ /* XXX needs updating when NETIF_F_SG is supported */
+ for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
+ struct ring_info *rp = &bp->tx_buffers[cons];
+ struct sk_buff *skb = rp->skb;
+
+ BUG_ON(skb == NULL);
+
+ dma_unmap_single(bp->sdev->dma_dev,
+ rp->mapping,
+ skb->len,
+ DMA_TO_DEVICE);
+ rp->skb = NULL;
+ dev_kfree_skb(skb);
+ }
+
+ bp->tx_cons = cons;
+ if (netif_queue_stopped(bp->dev) &&
+ TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
+ netif_wake_queue(bp->dev);
+
+ bw32(bp, B44_GPTIMER, 0);
+}
+
+/* Works like this. This chip writes a 'struct rx_header" 30 bytes
+ * before the DMA address you give it. So we allocate 30 more bytes
+ * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
+ * point the chip at 30 bytes past where the rx_header will go.
+ */
+static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
+{
+ struct dma_desc *dp;
+ struct ring_info *src_map, *map;
+ struct rx_header *rh;
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ int dest_idx;
+ u32 ctrl;
+
+ src_map = NULL;
+ if (src_idx >= 0)
+ src_map = &bp->rx_buffers[src_idx];
+ dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
+ map = &bp->rx_buffers[dest_idx];
+ skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
+ RX_PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+
+ /* Hardware bug work-around, the chip is unable to do PCI DMA
+ to/from anything above 1GB :-( */
+ if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
+ mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
+ /* Sigh... */
+ if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+ dma_unmap_single(bp->sdev->dma_dev, mapping,
+ RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
+ if (skb == NULL)
+ return -ENOMEM;
+ mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
+ RX_PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
+ mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
+ if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+ dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+ bp->force_copybreak = 1;
+ }
+
+ rh = (struct rx_header *) skb->data;
+
+ rh->len = 0;
+ rh->flags = 0;
+
+ map->skb = skb;
+ map->mapping = mapping;
+
+ if (src_map != NULL)
+ src_map->skb = NULL;
+
+ ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
+ if (dest_idx == (B44_RX_RING_SIZE - 1))
+ ctrl |= DESC_CTRL_EOT;
+
+ dp = &bp->rx_ring[dest_idx];
+ dp->ctrl = cpu_to_le32(ctrl);
+ dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
+
+ if (bp->flags & B44_FLAG_RX_RING_HACK)
+ b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
+ dest_idx * sizeof(*dp),
+ DMA_BIDIRECTIONAL);
+
+ return RX_PKT_BUF_SZ;
+}
+
+static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
+{
+ struct dma_desc *src_desc, *dest_desc;
+ struct ring_info *src_map, *dest_map;
+ struct rx_header *rh;
+ int dest_idx;
+ __le32 ctrl;
+
+ dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
+ dest_desc = &bp->rx_ring[dest_idx];
+ dest_map = &bp->rx_buffers[dest_idx];
+ src_desc = &bp->rx_ring[src_idx];
+ src_map = &bp->rx_buffers[src_idx];
+
+ dest_map->skb = src_map->skb;
+ rh = (struct rx_header *) src_map->skb->data;
+ rh->len = 0;
+ rh->flags = 0;
+ dest_map->mapping = src_map->mapping;
+
+ if (bp->flags & B44_FLAG_RX_RING_HACK)
+ b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
+ src_idx * sizeof(*src_desc),
+ DMA_BIDIRECTIONAL);
+
+ ctrl = src_desc->ctrl;
+ if (dest_idx == (B44_RX_RING_SIZE - 1))
+ ctrl |= cpu_to_le32(DESC_CTRL_EOT);
+ else
+ ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
+
+ dest_desc->ctrl = ctrl;
+ dest_desc->addr = src_desc->addr;
+
+ src_map->skb = NULL;
+
+ if (bp->flags & B44_FLAG_RX_RING_HACK)
+ b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
+ dest_idx * sizeof(*dest_desc),
+ DMA_BIDIRECTIONAL);
+
+ dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
+ RX_PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+}
+
+static int b44_rx(struct b44 *bp, int budget)
+{
+ int received;
+ u32 cons, prod;
+
+ received = 0;
+ prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
+ prod /= sizeof(struct dma_desc);
+ cons = bp->rx_cons;
+
+ while (cons != prod && budget > 0) {
+ struct ring_info *rp = &bp->rx_buffers[cons];
+ struct sk_buff *skb = rp->skb;
+ dma_addr_t map = rp->mapping;
+ struct rx_header *rh;
+ u16 len;
+
+ dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
+ RX_PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ rh = (struct rx_header *) skb->data;
+ len = le16_to_cpu(rh->len);
+ if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
+ (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
+ drop_it:
+ b44_recycle_rx(bp, cons, bp->rx_prod);
+ drop_it_no_recycle:
+ bp->dev->stats.rx_dropped++;
+ goto next_pkt;
+ }
+
+ if (len == 0) {
+ int i = 0;
+
+ do {
+ udelay(2);
+ barrier();
+ len = le16_to_cpu(rh->len);
+ } while (len == 0 && i++ < 5);
+ if (len == 0)
+ goto drop_it;
+ }
+
+ /* Omit CRC. */
+ len -= 4;
+
+ if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
+ int skb_size;
+ skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
+ if (skb_size < 0)
+ goto drop_it;
+ dma_unmap_single(bp->sdev->dma_dev, map,
+ skb_size, DMA_FROM_DEVICE);
+ /* Leave out rx_header */
+ skb_put(skb, len + RX_PKT_OFFSET);
+ skb_pull(skb, RX_PKT_OFFSET);
+ } else {
+ struct sk_buff *copy_skb;
+
+ b44_recycle_rx(bp, cons, bp->rx_prod);
+ copy_skb = netdev_alloc_skb(bp->dev, len + 2);
+ if (copy_skb == NULL)
+ goto drop_it_no_recycle;
+
+ skb_reserve(copy_skb, 2);
+ skb_put(copy_skb, len);
+ /* DMA sync done above, copy just the actual packet */
+ skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
+ copy_skb->data, len);
+ skb = copy_skb;
+ }
+ skb_checksum_none_assert(skb);
+ skb->protocol = eth_type_trans(skb, bp->dev);
+ netif_receive_skb(skb);
+ received++;
+ budget--;
+ next_pkt:
+ bp->rx_prod = (bp->rx_prod + 1) &
+ (B44_RX_RING_SIZE - 1);
+ cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
+ }
+
+ bp->rx_cons = cons;
+ bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
+
+ return received;
+}
+
+static int b44_poll(struct napi_struct *napi, int budget)
+{
+ struct b44 *bp = container_of(napi, struct b44, napi);
+ int work_done;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
+ /* spin_lock(&bp->tx_lock); */
+ b44_tx(bp);
+ /* spin_unlock(&bp->tx_lock); */
+ }
+ if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */
+ bp->istat &= ~ISTAT_RFO;
+ b44_disable_ints(bp);
+ ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
+ netif_wake_queue(bp->dev);
+ }
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ work_done = 0;
+ if (bp->istat & ISTAT_RX)
+ work_done += b44_rx(bp, budget);
+
+ if (bp->istat & ISTAT_ERRORS) {
+ spin_lock_irqsave(&bp->lock, flags);
+ b44_halt(bp);
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
+ netif_wake_queue(bp->dev);
+ spin_unlock_irqrestore(&bp->lock, flags);
+ work_done = 0;
+ }
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ b44_enable_ints(bp);
+ }
+
+ return work_done;
+}
+
+static irqreturn_t b44_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct b44 *bp = netdev_priv(dev);
+ u32 istat, imask;
+ int handled = 0;
+
+ spin_lock(&bp->lock);
+
+ istat = br32(bp, B44_ISTAT);
+ imask = br32(bp, B44_IMASK);
+
+ /* The interrupt mask register controls which interrupt bits
+ * will actually raise an interrupt to the CPU when set by hw/firmware,
+ * but doesn't mask off the bits.
+ */
+ istat &= imask;
+ if (istat) {
+ handled = 1;
+
+ if (unlikely(!netif_running(dev))) {
+ netdev_info(dev, "late interrupt\n");
+ goto irq_ack;
+ }
+
+ if (napi_schedule_prep(&bp->napi)) {
+ /* NOTE: These writes are posted by the readback of
+ * the ISTAT register below.
+ */
+ bp->istat = istat;
+ __b44_disable_ints(bp);
+ __napi_schedule(&bp->napi);
+ }
+
+irq_ack:
+ bw32(bp, B44_ISTAT, istat);
+ br32(bp, B44_ISTAT);
+ }
+ spin_unlock(&bp->lock);
+ return IRQ_RETVAL(handled);
+}
+
+static void b44_tx_timeout(struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ netdev_err(dev, "transmit timed out, resetting\n");
+
+ spin_lock_irq(&bp->lock);
+
+ b44_halt(bp);
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET);
+
+ spin_unlock_irq(&bp->lock);
+
+ b44_enable_ints(bp);
+
+ netif_wake_queue(dev);
+}
+
+static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+ int rc = NETDEV_TX_OK;
+ dma_addr_t mapping;
+ u32 len, entry, ctrl;
+ unsigned long flags;
+
+ len = skb->len;
+ spin_lock_irqsave(&bp->lock, flags);
+
+ /* This is a hard error, log it. */
+ if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
+ netif_stop_queue(dev);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ goto err_out;
+ }
+
+ mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
+ struct sk_buff *bounce_skb;
+
+ /* Chip can't handle DMA to/from >1GB, use bounce buffer */
+ if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+ dma_unmap_single(bp->sdev->dma_dev, mapping, len,
+ DMA_TO_DEVICE);
+
+ bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
+ if (!bounce_skb)
+ goto err_out;
+
+ mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
+ if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+ dma_unmap_single(bp->sdev->dma_dev, mapping,
+ len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(bounce_skb);
+ goto err_out;
+ }
+
+ skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
+ dev_kfree_skb_any(skb);
+ skb = bounce_skb;
+ }
+
+ entry = bp->tx_prod;
+ bp->tx_buffers[entry].skb = skb;
+ bp->tx_buffers[entry].mapping = mapping;
+
+ ctrl = (len & DESC_CTRL_LEN);
+ ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
+ if (entry == (B44_TX_RING_SIZE - 1))
+ ctrl |= DESC_CTRL_EOT;
+
+ bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
+ bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
+
+ if (bp->flags & B44_FLAG_TX_RING_HACK)
+ b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
+ entry * sizeof(bp->tx_ring[0]),
+ DMA_TO_DEVICE);
+
+ entry = NEXT_TX(entry);
+
+ bp->tx_prod = entry;
+
+ wmb();
+
+ bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
+ if (bp->flags & B44_FLAG_BUGGY_TXPTR)
+ bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
+ if (bp->flags & B44_FLAG_REORDER_BUG)
+ br32(bp, B44_DMATX_PTR);
+
+ if (TX_BUFFS_AVAIL(bp) < 1)
+ netif_stop_queue(dev);
+
+out_unlock:
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ return rc;
+
+err_out:
+ rc = NETDEV_TX_BUSY;
+ goto out_unlock;
+}
+
+static int b44_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
+ return -EINVAL;
+
+ if (!netif_running(dev)) {
+ /* We'll just catch it later when the
+ * device is up'd.
+ */
+ dev->mtu = new_mtu;
+ return 0;
+ }
+
+ spin_lock_irq(&bp->lock);
+ b44_halt(bp);
+ dev->mtu = new_mtu;
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET);
+ spin_unlock_irq(&bp->lock);
+
+ b44_enable_ints(bp);
+
+ return 0;
+}
+
+/* Free up pending packets in all rx/tx rings.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver. bp->lock is not held and we are not
+ * in an interrupt context and thus may sleep.
+ */
+static void b44_free_rings(struct b44 *bp)
+{
+ struct ring_info *rp;
+ int i;
+
+ for (i = 0; i < B44_RX_RING_SIZE; i++) {
+ rp = &bp->rx_buffers[i];
+
+ if (rp->skb == NULL)
+ continue;
+ dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(rp->skb);
+ rp->skb = NULL;
+ }
+
+ /* XXX needs changes once NETIF_F_SG is set... */
+ for (i = 0; i < B44_TX_RING_SIZE; i++) {
+ rp = &bp->tx_buffers[i];
+
+ if (rp->skb == NULL)
+ continue;
+ dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(rp->skb);
+ rp->skb = NULL;
+ }
+}
+
+/* Initialize tx/rx rings for packet processing.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver.
+ */
+static void b44_init_rings(struct b44 *bp)
+{
+ int i;
+
+ b44_free_rings(bp);
+
+ memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
+ memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
+
+ if (bp->flags & B44_FLAG_RX_RING_HACK)
+ dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
+ DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
+
+ if (bp->flags & B44_FLAG_TX_RING_HACK)
+ dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
+ DMA_TABLE_BYTES, DMA_TO_DEVICE);
+
+ for (i = 0; i < bp->rx_pending; i++) {
+ if (b44_alloc_rx_skb(bp, -1, i) < 0)
+ break;
+ }
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down.
+ */
+static void b44_free_consistent(struct b44 *bp)
+{
+ kfree(bp->rx_buffers);
+ bp->rx_buffers = NULL;
+ kfree(bp->tx_buffers);
+ bp->tx_buffers = NULL;
+ if (bp->rx_ring) {
+ if (bp->flags & B44_FLAG_RX_RING_HACK) {
+ dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
+ DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
+ kfree(bp->rx_ring);
+ } else
+ dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
+ bp->rx_ring, bp->rx_ring_dma);
+ bp->rx_ring = NULL;
+ bp->flags &= ~B44_FLAG_RX_RING_HACK;
+ }
+ if (bp->tx_ring) {
+ if (bp->flags & B44_FLAG_TX_RING_HACK) {
+ dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
+ DMA_TABLE_BYTES, DMA_TO_DEVICE);
+ kfree(bp->tx_ring);
+ } else
+ dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
+ bp->tx_ring, bp->tx_ring_dma);
+ bp->tx_ring = NULL;
+ bp->flags &= ~B44_FLAG_TX_RING_HACK;
+ }
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down. Can sleep.
+ */
+static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
+{
+ int size;
+
+ size = B44_RX_RING_SIZE * sizeof(struct ring_info);
+ bp->rx_buffers = kzalloc(size, gfp);
+ if (!bp->rx_buffers)
+ goto out_err;
+
+ size = B44_TX_RING_SIZE * sizeof(struct ring_info);
+ bp->tx_buffers = kzalloc(size, gfp);
+ if (!bp->tx_buffers)
+ goto out_err;
+
+ size = DMA_TABLE_BYTES;
+ bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
+ &bp->rx_ring_dma, gfp);
+ if (!bp->rx_ring) {
+ /* Allocation may have failed due to pci_alloc_consistent
+ insisting on use of GFP_DMA, which is more restrictive
+ than necessary... */
+ struct dma_desc *rx_ring;
+ dma_addr_t rx_ring_dma;
+
+ rx_ring = kzalloc(size, gfp);
+ if (!rx_ring)
+ goto out_err;
+
+ rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
+ DMA_TABLE_BYTES,
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
+ rx_ring_dma + size > DMA_BIT_MASK(30)) {
+ kfree(rx_ring);
+ goto out_err;
+ }
+
+ bp->rx_ring = rx_ring;
+ bp->rx_ring_dma = rx_ring_dma;
+ bp->flags |= B44_FLAG_RX_RING_HACK;
+ }
+
+ bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
+ &bp->tx_ring_dma, gfp);
+ if (!bp->tx_ring) {
+ /* Allocation may have failed due to ssb_dma_alloc_consistent
+ insisting on use of GFP_DMA, which is more restrictive
+ than necessary... */
+ struct dma_desc *tx_ring;
+ dma_addr_t tx_ring_dma;
+
+ tx_ring = kzalloc(size, gfp);
+ if (!tx_ring)
+ goto out_err;
+
+ tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
+ DMA_TABLE_BYTES,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
+ tx_ring_dma + size > DMA_BIT_MASK(30)) {
+ kfree(tx_ring);
+ goto out_err;
+ }
+
+ bp->tx_ring = tx_ring;
+ bp->tx_ring_dma = tx_ring_dma;
+ bp->flags |= B44_FLAG_TX_RING_HACK;
+ }
+
+ return 0;
+
+out_err:
+ b44_free_consistent(bp);
+ return -ENOMEM;
+}
+
+/* bp->lock is held. */
+static void b44_clear_stats(struct b44 *bp)
+{
+ unsigned long reg;
+
+ bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
+ for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
+ br32(bp, reg);
+ for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
+ br32(bp, reg);
+}
+
+/* bp->lock is held. */
+static void b44_chip_reset(struct b44 *bp, int reset_kind)
+{
+ struct ssb_device *sdev = bp->sdev;
+ bool was_enabled;
+
+ was_enabled = ssb_device_is_enabled(bp->sdev);
+
+ ssb_device_enable(bp->sdev, 0);
+ ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
+
+ if (was_enabled) {
+ bw32(bp, B44_RCV_LAZY, 0);
+ bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
+ b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
+ bw32(bp, B44_DMATX_CTRL, 0);
+ bp->tx_prod = bp->tx_cons = 0;
+ if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
+ b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
+ 100, 0);
+ }
+ bw32(bp, B44_DMARX_CTRL, 0);
+ bp->rx_prod = bp->rx_cons = 0;
+ }
+
+ b44_clear_stats(bp);
+
+ /*
+ * Don't enable PHY if we are doing a partial reset
+ * we are probably going to power down
+ */
+ if (reset_kind == B44_CHIP_RESET_PARTIAL)
+ return;
+
+ switch (sdev->bus->bustype) {
+ case SSB_BUSTYPE_SSB:
+ bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
+ (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
+ B44_MDC_RATIO)
+ & MDIO_CTRL_MAXF_MASK)));
+ break;
+ case SSB_BUSTYPE_PCI:
+ bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
+ (0x0d & MDIO_CTRL_MAXF_MASK)));
+ break;
+ case SSB_BUSTYPE_PCMCIA:
+ case SSB_BUSTYPE_SDIO:
+ WARN_ON(1); /* A device with this bus does not exist. */
+ break;
+ }
+
+ br32(bp, B44_MDIO_CTRL);
+
+ if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
+ bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
+ br32(bp, B44_ENET_CTRL);
+ bp->flags &= ~B44_FLAG_INTERNAL_PHY;
+ } else {
+ u32 val = br32(bp, B44_DEVCTRL);
+
+ if (val & DEVCTRL_EPR) {
+ bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
+ br32(bp, B44_DEVCTRL);
+ udelay(100);
+ }
+ bp->flags |= B44_FLAG_INTERNAL_PHY;
+ }
+}
+
+/* bp->lock is held. */
+static void b44_halt(struct b44 *bp)
+{
+ b44_disable_ints(bp);
+ /* reset PHY */
+ b44_phy_reset(bp);
+ /* power down PHY */
+ netdev_info(bp->dev, "powering down PHY\n");
+ bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
+ /* now reset the chip, but without enabling the MAC&PHY
+ * part of it. This has to be done _after_ we shut down the PHY */
+ b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
+}
+
+/* bp->lock is held. */
+static void __b44_set_mac_addr(struct b44 *bp)
+{
+ bw32(bp, B44_CAM_CTRL, 0);
+ if (!(bp->dev->flags & IFF_PROMISC)) {
+ u32 val;
+
+ __b44_cam_write(bp, bp->dev->dev_addr, 0);
+ val = br32(bp, B44_CAM_CTRL);
+ bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
+ }
+}
+
+static int b44_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct b44 *bp = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ u32 val;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ spin_lock_irq(&bp->lock);
+
+ val = br32(bp, B44_RXCONFIG);
+ if (!(val & RXCONFIG_CAM_ABSENT))
+ __b44_set_mac_addr(bp);
+
+ spin_unlock_irq(&bp->lock);
+
+ return 0;
+}
+
+/* Called at device open time to get the chip ready for
+ * packet processing. Invoked with bp->lock held.
+ */
+static void __b44_set_rx_mode(struct net_device *);
+static void b44_init_hw(struct b44 *bp, int reset_kind)
+{
+ u32 val;
+
+ b44_chip_reset(bp, B44_CHIP_RESET_FULL);
+ if (reset_kind == B44_FULL_RESET) {
+ b44_phy_reset(bp);
+ b44_setup_phy(bp);
+ }
+
+ /* Enable CRC32, set proper LED modes and power on PHY */
+ bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
+ bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
+
+ /* This sets the MAC address too. */
+ __b44_set_rx_mode(bp->dev);
+
+ /* MTU + eth header + possible VLAN tag + struct rx_header */
+ bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
+ bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
+
+ bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
+ if (reset_kind == B44_PARTIAL_RESET) {
+ bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
+ (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
+ } else {
+ bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
+ bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
+ bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
+ (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
+ bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
+
+ bw32(bp, B44_DMARX_PTR, bp->rx_pending);
+ bp->rx_prod = bp->rx_pending;
+
+ bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
+ }
+
+ val = br32(bp, B44_ENET_CTRL);
+ bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
+}
+
+static int b44_open(struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+ int err;
+
+ err = b44_alloc_consistent(bp, GFP_KERNEL);
+ if (err)
+ goto out;
+
+ napi_enable(&bp->napi);
+
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET);
+
+ b44_check_phy(bp);
+
+ err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
+ if (unlikely(err < 0)) {
+ napi_disable(&bp->napi);
+ b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
+ b44_free_rings(bp);
+ b44_free_consistent(bp);
+ goto out;
+ }
+
+ init_timer(&bp->timer);
+ bp->timer.expires = jiffies + HZ;
+ bp->timer.data = (unsigned long) bp;
+ bp->timer.function = b44_timer;
+ add_timer(&bp->timer);
+
+ b44_enable_ints(bp);
+ netif_start_queue(dev);
+out:
+ return err;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void b44_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ b44_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
+{
+ u32 i;
+ u32 *pattern = (u32 *) pp;
+
+ for (i = 0; i < bytes; i += sizeof(u32)) {
+ bw32(bp, B44_FILT_ADDR, table_offset + i);
+ bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
+ }
+}
+
+static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
+{
+ int magicsync = 6;
+ int k, j, len = offset;
+ int ethaddr_bytes = ETH_ALEN;
+
+ memset(ppattern + offset, 0xff, magicsync);
+ for (j = 0; j < magicsync; j++)
+ set_bit(len++, (unsigned long *) pmask);
+
+ for (j = 0; j < B44_MAX_PATTERNS; j++) {
+ if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
+ ethaddr_bytes = ETH_ALEN;
+ else
+ ethaddr_bytes = B44_PATTERN_SIZE - len;
+ if (ethaddr_bytes <=0)
+ break;
+ for (k = 0; k< ethaddr_bytes; k++) {
+ ppattern[offset + magicsync +
+ (j * ETH_ALEN) + k] = macaddr[k];
+ set_bit(len++, (unsigned long *) pmask);
+ }
+ }
+ return len - 1;
+}
+
+/* Setup magic packet patterns in the b44 WOL
+ * pattern matching filter.
+ */
+static void b44_setup_pseudo_magicp(struct b44 *bp)
+{
+
+ u32 val;
+ int plen0, plen1, plen2;
+ u8 *pwol_pattern;
+ u8 pwol_mask[B44_PMASK_SIZE];
+
+ pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
+ if (!pwol_pattern) {
+ pr_err("Memory not available for WOL\n");
+ return;
+ }
+
+ /* Ipv4 magic packet pattern - pattern 0.*/
+ memset(pwol_mask, 0, B44_PMASK_SIZE);
+ plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
+ B44_ETHIPV4UDP_HLEN);
+
+ bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
+ bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
+
+ /* Raw ethernet II magic packet pattern - pattern 1 */
+ memset(pwol_pattern, 0, B44_PATTERN_SIZE);
+ memset(pwol_mask, 0, B44_PMASK_SIZE);
+ plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
+ ETH_HLEN);
+
+ bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
+ B44_PATTERN_BASE + B44_PATTERN_SIZE);
+ bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
+ B44_PMASK_BASE + B44_PMASK_SIZE);
+
+ /* Ipv6 magic packet pattern - pattern 2 */
+ memset(pwol_pattern, 0, B44_PATTERN_SIZE);
+ memset(pwol_mask, 0, B44_PMASK_SIZE);
+ plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
+ B44_ETHIPV6UDP_HLEN);
+
+ bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
+ B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
+ bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
+ B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
+
+ kfree(pwol_pattern);
+
+ /* set these pattern's lengths: one less than each real length */
+ val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
+ bw32(bp, B44_WKUP_LEN, val);
+
+ /* enable wakeup pattern matching */
+ val = br32(bp, B44_DEVCTRL);
+ bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
+
+}
+
+#ifdef CONFIG_B44_PCI
+static void b44_setup_wol_pci(struct b44 *bp)
+{
+ u16 val;
+
+ if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
+ bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
+ pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
+ pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
+ }
+}
+#else
+static inline void b44_setup_wol_pci(struct b44 *bp) { }
+#endif /* CONFIG_B44_PCI */
+
+static void b44_setup_wol(struct b44 *bp)
+{
+ u32 val;
+
+ bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
+
+ if (bp->flags & B44_FLAG_B0_ANDLATER) {
+
+ bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
+
+ val = bp->dev->dev_addr[2] << 24 |
+ bp->dev->dev_addr[3] << 16 |
+ bp->dev->dev_addr[4] << 8 |
+ bp->dev->dev_addr[5];
+ bw32(bp, B44_ADDR_LO, val);
+
+ val = bp->dev->dev_addr[0] << 8 |
+ bp->dev->dev_addr[1];
+ bw32(bp, B44_ADDR_HI, val);
+
+ val = br32(bp, B44_DEVCTRL);
+ bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
+
+ } else {
+ b44_setup_pseudo_magicp(bp);
+ }
+ b44_setup_wol_pci(bp);
+}
+
+static int b44_close(struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ napi_disable(&bp->napi);
+
+ del_timer_sync(&bp->timer);
+
+ spin_lock_irq(&bp->lock);
+
+ b44_halt(bp);
+ b44_free_rings(bp);
+ netif_carrier_off(dev);
+
+ spin_unlock_irq(&bp->lock);
+
+ free_irq(dev->irq, dev);
+
+ if (bp->flags & B44_FLAG_WOL_ENABLE) {
+ b44_init_hw(bp, B44_PARTIAL_RESET);
+ b44_setup_wol(bp);
+ }
+
+ b44_free_consistent(bp);
+
+ return 0;
+}
+
+static struct net_device_stats *b44_get_stats(struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+ struct net_device_stats *nstat = &dev->stats;
+ struct b44_hw_stats *hwstat = &bp->hw_stats;
+
+ /* Convert HW stats into netdevice stats. */
+ nstat->rx_packets = hwstat->rx_pkts;
+ nstat->tx_packets = hwstat->tx_pkts;
+ nstat->rx_bytes = hwstat->rx_octets;
+ nstat->tx_bytes = hwstat->tx_octets;
+ nstat->tx_errors = (hwstat->tx_jabber_pkts +
+ hwstat->tx_oversize_pkts +
+ hwstat->tx_underruns +
+ hwstat->tx_excessive_cols +
+ hwstat->tx_late_cols);
+ nstat->multicast = hwstat->tx_multicast_pkts;
+ nstat->collisions = hwstat->tx_total_cols;
+
+ nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
+ hwstat->rx_undersize);
+ nstat->rx_over_errors = hwstat->rx_missed_pkts;
+ nstat->rx_frame_errors = hwstat->rx_align_errs;
+ nstat->rx_crc_errors = hwstat->rx_crc_errs;
+ nstat->rx_errors = (hwstat->rx_jabber_pkts +
+ hwstat->rx_oversize_pkts +
+ hwstat->rx_missed_pkts +
+ hwstat->rx_crc_align_errs +
+ hwstat->rx_undersize +
+ hwstat->rx_crc_errs +
+ hwstat->rx_align_errs +
+ hwstat->rx_symbol_errs);
+
+ nstat->tx_aborted_errors = hwstat->tx_underruns;
+#if 0
+ /* Carrier lost counter seems to be broken for some devices */
+ nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
+#endif
+
+ return nstat;
+}
+
+static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ int i, num_ents;
+
+ num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ if (i == num_ents)
+ break;
+ __b44_cam_write(bp, ha->addr, i++ + 1);
+ }
+ return i+1;
+}
+
+static void __b44_set_rx_mode(struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+ u32 val;
+
+ val = br32(bp, B44_RXCONFIG);
+ val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
+ if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
+ val |= RXCONFIG_PROMISC;
+ bw32(bp, B44_RXCONFIG, val);
+ } else {
+ unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
+ int i = 1;
+
+ __b44_set_mac_addr(bp);
+
+ if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
+ val |= RXCONFIG_ALLMULTI;
+ else
+ i = __b44_load_mcast(bp, dev);
+
+ for (; i < 64; i++)
+ __b44_cam_write(bp, zero, i);
+
+ bw32(bp, B44_RXCONFIG, val);
+ val = br32(bp, B44_CAM_CTRL);
+ bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
+ }
+}
+
+static void b44_set_rx_mode(struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ spin_lock_irq(&bp->lock);
+ __b44_set_rx_mode(dev);
+ spin_unlock_irq(&bp->lock);
+}
+
+static u32 b44_get_msglevel(struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+ return bp->msg_enable;
+}
+
+static void b44_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct b44 *bp = netdev_priv(dev);
+ bp->msg_enable = value;
+}
+
+static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct b44 *bp = netdev_priv(dev);
+ struct ssb_bus *bus = bp->sdev->bus;
+
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ switch (bus->bustype) {
+ case SSB_BUSTYPE_PCI:
+ strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
+ break;
+ case SSB_BUSTYPE_SSB:
+ strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
+ break;
+ case SSB_BUSTYPE_PCMCIA:
+ case SSB_BUSTYPE_SDIO:
+ WARN_ON(1); /* A device with this bus does not exist. */
+ break;
+ }
+}
+
+static int b44_nway_reset(struct net_device *dev)
+{
+ struct b44 *bp = netdev_priv(dev);
+ u32 bmcr;
+ int r;
+
+ spin_lock_irq(&bp->lock);
+ b44_readphy(bp, MII_BMCR, &bmcr);
+ b44_readphy(bp, MII_BMCR, &bmcr);
+ r = -EINVAL;
+ if (bmcr & BMCR_ANENABLE) {
+ b44_writephy(bp, MII_BMCR,
+ bmcr | BMCR_ANRESTART);
+ r = 0;
+ }
+ spin_unlock_irq(&bp->lock);
+
+ return r;
+}
+
+static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ cmd->supported = (SUPPORTED_Autoneg);
+ cmd->supported |= (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_MII);
+
+ cmd->advertising = 0;
+ if (bp->flags & B44_FLAG_ADV_10HALF)
+ cmd->advertising |= ADVERTISED_10baseT_Half;
+ if (bp->flags & B44_FLAG_ADV_10FULL)
+ cmd->advertising |= ADVERTISED_10baseT_Full;
+ if (bp->flags & B44_FLAG_ADV_100HALF)
+ cmd->advertising |= ADVERTISED_100baseT_Half;
+ if (bp->flags & B44_FLAG_ADV_100FULL)
+ cmd->advertising |= ADVERTISED_100baseT_Full;
+ cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ?
+ SPEED_100 : SPEED_10));
+ cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ cmd->port = 0;
+ cmd->phy_address = bp->phy_addr;
+ cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
+ XCVR_INTERNAL : XCVR_EXTERNAL;
+ cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
+ AUTONEG_DISABLE : AUTONEG_ENABLE;
+ if (cmd->autoneg == AUTONEG_ENABLE)
+ cmd->advertising |= ADVERTISED_Autoneg;
+ if (!netif_running(dev)){
+ ethtool_cmd_speed_set(cmd, 0);
+ cmd->duplex = 0xff;
+ }
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+ return 0;
+}
+
+static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct b44 *bp = netdev_priv(dev);
+ u32 speed = ethtool_cmd_speed(cmd);
+
+ /* We do not support gigabit. */
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->advertising &
+ (ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full))
+ return -EINVAL;
+ } else if ((speed != SPEED_100 &&
+ speed != SPEED_10) ||
+ (cmd->duplex != DUPLEX_HALF &&
+ cmd->duplex != DUPLEX_FULL)) {
+ return -EINVAL;
+ }
+
+ spin_lock_irq(&bp->lock);
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ bp->flags &= ~(B44_FLAG_FORCE_LINK |
+ B44_FLAG_100_BASE_T |
+ B44_FLAG_FULL_DUPLEX |
+ B44_FLAG_ADV_10HALF |
+ B44_FLAG_ADV_10FULL |
+ B44_FLAG_ADV_100HALF |
+ B44_FLAG_ADV_100FULL);
+ if (cmd->advertising == 0) {
+ bp->flags |= (B44_FLAG_ADV_10HALF |
+ B44_FLAG_ADV_10FULL |
+ B44_FLAG_ADV_100HALF |
+ B44_FLAG_ADV_100FULL);
+ } else {
+ if (cmd->advertising & ADVERTISED_10baseT_Half)
+ bp->flags |= B44_FLAG_ADV_10HALF;
+ if (cmd->advertising & ADVERTISED_10baseT_Full)
+ bp->flags |= B44_FLAG_ADV_10FULL;
+ if (cmd->advertising & ADVERTISED_100baseT_Half)
+ bp->flags |= B44_FLAG_ADV_100HALF;
+ if (cmd->advertising & ADVERTISED_100baseT_Full)
+ bp->flags |= B44_FLAG_ADV_100FULL;
+ }
+ } else {
+ bp->flags |= B44_FLAG_FORCE_LINK;
+ bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
+ if (speed == SPEED_100)
+ bp->flags |= B44_FLAG_100_BASE_T;
+ if (cmd->duplex == DUPLEX_FULL)
+ bp->flags |= B44_FLAG_FULL_DUPLEX;
+ }
+
+ if (netif_running(dev))
+ b44_setup_phy(bp);
+
+ spin_unlock_irq(&bp->lock);
+
+ return 0;
+}
+
+static void b44_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ ering->rx_max_pending = B44_RX_RING_SIZE - 1;
+ ering->rx_pending = bp->rx_pending;
+
+ /* XXX ethtool lacks a tx_max_pending, oops... */
+}
+
+static int b44_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
+ (ering->rx_mini_pending != 0) ||
+ (ering->rx_jumbo_pending != 0) ||
+ (ering->tx_pending > B44_TX_RING_SIZE - 1))
+ return -EINVAL;
+
+ spin_lock_irq(&bp->lock);
+
+ bp->rx_pending = ering->rx_pending;
+ bp->tx_pending = ering->tx_pending;
+
+ b44_halt(bp);
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET);
+ netif_wake_queue(bp->dev);
+ spin_unlock_irq(&bp->lock);
+
+ b44_enable_ints(bp);
+
+ return 0;
+}
+
+static void b44_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ epause->autoneg =
+ (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
+ epause->rx_pause =
+ (bp->flags & B44_FLAG_RX_PAUSE) != 0;
+ epause->tx_pause =
+ (bp->flags & B44_FLAG_TX_PAUSE) != 0;
+}
+
+static int b44_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ spin_lock_irq(&bp->lock);
+ if (epause->autoneg)
+ bp->flags |= B44_FLAG_PAUSE_AUTO;
+ else
+ bp->flags &= ~B44_FLAG_PAUSE_AUTO;
+ if (epause->rx_pause)
+ bp->flags |= B44_FLAG_RX_PAUSE;
+ else
+ bp->flags &= ~B44_FLAG_RX_PAUSE;
+ if (epause->tx_pause)
+ bp->flags |= B44_FLAG_TX_PAUSE;
+ else
+ bp->flags &= ~B44_FLAG_TX_PAUSE;
+ if (bp->flags & B44_FLAG_PAUSE_AUTO) {
+ b44_halt(bp);
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET);
+ } else {
+ __b44_set_flow_ctrl(bp, bp->flags);
+ }
+ spin_unlock_irq(&bp->lock);
+
+ b44_enable_ints(bp);
+
+ return 0;
+}
+
+static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ switch(stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
+ break;
+ }
+}
+
+static int b44_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(b44_gstrings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void b44_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct b44 *bp = netdev_priv(dev);
+ u32 *val = &bp->hw_stats.tx_good_octets;
+ u32 i;
+
+ spin_lock_irq(&bp->lock);
+
+ b44_stats_update(bp);
+
+ for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
+ *data++ = *val++;
+
+ spin_unlock_irq(&bp->lock);
+}
+
+static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ wol->supported = WAKE_MAGIC;
+ if (bp->flags & B44_FLAG_WOL_ENABLE)
+ wol->wolopts = WAKE_MAGIC;
+ else
+ wol->wolopts = 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct b44 *bp = netdev_priv(dev);
+
+ spin_lock_irq(&bp->lock);
+ if (wol->wolopts & WAKE_MAGIC)
+ bp->flags |= B44_FLAG_WOL_ENABLE;
+ else
+ bp->flags &= ~B44_FLAG_WOL_ENABLE;
+ spin_unlock_irq(&bp->lock);
+
+ return 0;
+}
+
+static const struct ethtool_ops b44_ethtool_ops = {
+ .get_drvinfo = b44_get_drvinfo,
+ .get_settings = b44_get_settings,
+ .set_settings = b44_set_settings,
+ .nway_reset = b44_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_wol = b44_get_wol,
+ .set_wol = b44_set_wol,
+ .get_ringparam = b44_get_ringparam,
+ .set_ringparam = b44_set_ringparam,
+ .get_pauseparam = b44_get_pauseparam,
+ .set_pauseparam = b44_set_pauseparam,
+ .get_msglevel = b44_get_msglevel,
+ .set_msglevel = b44_set_msglevel,
+ .get_strings = b44_get_strings,
+ .get_sset_count = b44_get_sset_count,
+ .get_ethtool_stats = b44_get_ethtool_stats,
+};
+
+static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(ifr);
+ struct b44 *bp = netdev_priv(dev);
+ int err = -EINVAL;
+
+ if (!netif_running(dev))
+ goto out;
+
+ spin_lock_irq(&bp->lock);
+ err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
+ spin_unlock_irq(&bp->lock);
+out:
+ return err;
+}
+
+static int __devinit b44_get_invariants(struct b44 *bp)
+{
+ struct ssb_device *sdev = bp->sdev;
+ int err = 0;
+ u8 *addr;
+
+ bp->dma_offset = ssb_dma_translation(sdev);
+
+ if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
+ instance > 1) {
+ addr = sdev->bus->sprom.et1mac;
+ bp->phy_addr = sdev->bus->sprom.et1phyaddr;
+ } else {
+ addr = sdev->bus->sprom.et0mac;
+ bp->phy_addr = sdev->bus->sprom.et0phyaddr;
+ }
+ /* Some ROMs have buggy PHY addresses with the high
+ * bits set (sign extension?). Truncate them to a
+ * valid PHY address. */
+ bp->phy_addr &= 0x1F;
+
+ memcpy(bp->dev->dev_addr, addr, 6);
+
+ if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
+ pr_err("Invalid MAC address found in EEPROM\n");
+ return -EINVAL;
+ }
+
+ memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
+
+ bp->imask = IMASK_DEF;
+
+ /* XXX - really required?
+ bp->flags |= B44_FLAG_BUGGY_TXPTR;
+ */
+
+ if (bp->sdev->id.revision >= 7)
+ bp->flags |= B44_FLAG_B0_ANDLATER;
+
+ return err;
+}
+
+static const struct net_device_ops b44_netdev_ops = {
+ .ndo_open = b44_open,
+ .ndo_stop = b44_close,
+ .ndo_start_xmit = b44_start_xmit,
+ .ndo_get_stats = b44_get_stats,
+ .ndo_set_rx_mode = b44_set_rx_mode,
+ .ndo_set_mac_address = b44_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = b44_ioctl,
+ .ndo_tx_timeout = b44_tx_timeout,
+ .ndo_change_mtu = b44_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = b44_poll_controller,
+#endif
+};
+
+static int __devinit b44_init_one(struct ssb_device *sdev,
+ const struct ssb_device_id *ent)
+{
+ struct net_device *dev;
+ struct b44 *bp;
+ int err;
+
+ instance++;
+
+ pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION);
+
+ dev = alloc_etherdev(sizeof(*bp));
+ if (!dev) {
+ dev_err(sdev->dev, "Etherdev alloc failed, aborting\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ SET_NETDEV_DEV(dev, sdev->dev);
+
+ /* No interesting netdevice features in this card... */
+ dev->features |= 0;
+
+ bp = netdev_priv(dev);
+ bp->sdev = sdev;
+ bp->dev = dev;
+ bp->force_copybreak = 0;
+
+ bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
+
+ spin_lock_init(&bp->lock);
+
+ bp->rx_pending = B44_DEF_RX_RING_PENDING;
+ bp->tx_pending = B44_DEF_TX_RING_PENDING;
+
+ dev->netdev_ops = &b44_netdev_ops;
+ netif_napi_add(dev, &bp->napi, b44_poll, 64);
+ dev->watchdog_timeo = B44_TX_TIMEOUT;
+ dev->irq = sdev->irq;
+ SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
+
+ err = ssb_bus_powerup(sdev->bus, 0);
+ if (err) {
+ dev_err(sdev->dev,
+ "Failed to powerup the bus\n");
+ goto err_out_free_dev;
+ }
+
+ if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
+ dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
+ dev_err(sdev->dev,
+ "Required 30BIT DMA mask unsupported by the system\n");
+ goto err_out_powerdown;
+ }
+
+ err = b44_get_invariants(bp);
+ if (err) {
+ dev_err(sdev->dev,
+ "Problem fetching invariants of chip, aborting\n");
+ goto err_out_powerdown;
+ }
+
+ bp->mii_if.dev = dev;
+ bp->mii_if.mdio_read = b44_mii_read;
+ bp->mii_if.mdio_write = b44_mii_write;
+ bp->mii_if.phy_id = bp->phy_addr;
+ bp->mii_if.phy_id_mask = 0x1f;
+ bp->mii_if.reg_num_mask = 0x1f;
+
+ /* By default, advertise all speed/duplex settings. */
+ bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
+ B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
+
+ /* By default, auto-negotiate PAUSE. */
+ bp->flags |= B44_FLAG_PAUSE_AUTO;
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(sdev->dev, "Cannot register net device, aborting\n");
+ goto err_out_powerdown;
+ }
+
+ netif_carrier_off(dev);
+
+ ssb_set_drvdata(sdev, dev);
+
+ /* Chip reset provides power to the b44 MAC & PCI cores, which
+ * is necessary for MAC register access.
+ */
+ b44_chip_reset(bp, B44_CHIP_RESET_FULL);
+
+ /* do a phy reset to test if there is an active phy */
+ if (b44_phy_reset(bp) < 0)
+ bp->phy_addr = B44_PHY_ADDR_NO_PHY;
+
+ netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
+
+ return 0;
+
+err_out_powerdown:
+ ssb_bus_may_powerdown(sdev->bus);
+
+err_out_free_dev:
+ free_netdev(dev);
+
+out:
+ return err;
+}
+
+static void __devexit b44_remove_one(struct ssb_device *sdev)
+{
+ struct net_device *dev = ssb_get_drvdata(sdev);
+
+ unregister_netdev(dev);
+ ssb_device_disable(sdev, 0);
+ ssb_bus_may_powerdown(sdev->bus);
+ free_netdev(dev);
+ ssb_pcihost_set_power_state(sdev, PCI_D3hot);
+ ssb_set_drvdata(sdev, NULL);
+}
+
+static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
+{
+ struct net_device *dev = ssb_get_drvdata(sdev);
+ struct b44 *bp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ del_timer_sync(&bp->timer);
+
+ spin_lock_irq(&bp->lock);
+
+ b44_halt(bp);
+ netif_carrier_off(bp->dev);
+ netif_device_detach(bp->dev);
+ b44_free_rings(bp);
+
+ spin_unlock_irq(&bp->lock);
+
+ free_irq(dev->irq, dev);
+ if (bp->flags & B44_FLAG_WOL_ENABLE) {
+ b44_init_hw(bp, B44_PARTIAL_RESET);
+ b44_setup_wol(bp);
+ }
+
+ ssb_pcihost_set_power_state(sdev, PCI_D3hot);
+ return 0;
+}
+
+static int b44_resume(struct ssb_device *sdev)
+{
+ struct net_device *dev = ssb_get_drvdata(sdev);
+ struct b44 *bp = netdev_priv(dev);
+ int rc = 0;
+
+ rc = ssb_bus_powerup(sdev->bus, 0);
+ if (rc) {
+ dev_err(sdev->dev,
+ "Failed to powerup the bus\n");
+ return rc;
+ }
+
+ if (!netif_running(dev))
+ return 0;
+
+ spin_lock_irq(&bp->lock);
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET);
+ spin_unlock_irq(&bp->lock);
+
+ /*
+ * As a shared interrupt, the handler can be called immediately. To be
+ * able to check the interrupt status the hardware must already be
+ * powered back on (b44_init_hw).
+ */
+ rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
+ if (rc) {
+ netdev_err(dev, "request_irq failed\n");
+ spin_lock_irq(&bp->lock);
+ b44_halt(bp);
+ b44_free_rings(bp);
+ spin_unlock_irq(&bp->lock);
+ return rc;
+ }
+
+ netif_device_attach(bp->dev);
+
+ b44_enable_ints(bp);
+ netif_wake_queue(dev);
+
+ mod_timer(&bp->timer, jiffies + 1);
+
+ return 0;
+}
+
+static struct ssb_driver b44_ssb_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = b44_ssb_tbl,
+ .probe = b44_init_one,
+ .remove = __devexit_p(b44_remove_one),
+ .suspend = b44_suspend,
+ .resume = b44_resume,
+};
+
+static inline int __init b44_pci_init(void)
+{
+ int err = 0;
+#ifdef CONFIG_B44_PCI
+ err = ssb_pcihost_register(&b44_pci_driver);
+#endif
+ return err;
+}
+
+static inline void __exit b44_pci_exit(void)
+{
+#ifdef CONFIG_B44_PCI
+ ssb_pcihost_unregister(&b44_pci_driver);
+#endif
+}
+
+static int __init b44_init(void)
+{
+ unsigned int dma_desc_align_size = dma_get_cache_alignment();
+ int err;
+
+ /* Setup paramaters for syncing RX/TX DMA descriptors */
+ dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
+
+ err = b44_pci_init();
+ if (err)
+ return err;
+ err = ssb_driver_register(&b44_ssb_driver);
+ if (err)
+ b44_pci_exit();
+ return err;
+}
+
+static void __exit b44_cleanup(void)
+{
+ ssb_driver_unregister(&b44_ssb_driver);
+ b44_pci_exit();
+}
+
+module_init(b44_init);
+module_exit(b44_cleanup);
+
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
new file mode 100644
index 000000000000..e1905a49279f
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -0,0 +1,401 @@
+#ifndef _B44_H
+#define _B44_H
+
+/* Register layout. (These correspond to struct _bcmenettregs in bcm4400.) */
+#define B44_DEVCTRL 0x0000UL /* Device Control */
+#define DEVCTRL_MPM 0x00000040 /* Magic Packet PME Enable (B0 only) */
+#define DEVCTRL_PFE 0x00000080 /* Pattern Filtering Enable */
+#define DEVCTRL_IPP 0x00000400 /* Internal EPHY Present */
+#define DEVCTRL_EPR 0x00008000 /* EPHY Reset */
+#define DEVCTRL_PME 0x00001000 /* PHY Mode Enable */
+#define DEVCTRL_PMCE 0x00002000 /* PHY Mode Clocks Enable */
+#define DEVCTRL_PADDR 0x0007c000 /* PHY Address */
+#define DEVCTRL_PADDR_SHIFT 18
+#define B44_BIST_STAT 0x000CUL /* Built-In Self-Test Status */
+#define B44_WKUP_LEN 0x0010UL /* Wakeup Length */
+#define WKUP_LEN_P0_MASK 0x0000007f /* Pattern 0 */
+#define WKUP_LEN_D0 0x00000080
+#define WKUP_LEN_P1_MASK 0x00007f00 /* Pattern 1 */
+#define WKUP_LEN_P1_SHIFT 8
+#define WKUP_LEN_D1 0x00008000
+#define WKUP_LEN_P2_MASK 0x007f0000 /* Pattern 2 */
+#define WKUP_LEN_P2_SHIFT 16
+#define WKUP_LEN_D2 0x00000000
+#define WKUP_LEN_P3_MASK 0x7f000000 /* Pattern 3 */
+#define WKUP_LEN_P3_SHIFT 24
+#define WKUP_LEN_D3 0x80000000
+#define WKUP_LEN_DISABLE 0x80808080
+#define WKUP_LEN_ENABLE_TWO 0x80800000
+#define WKUP_LEN_ENABLE_THREE 0x80000000
+#define B44_ISTAT 0x0020UL /* Interrupt Status */
+#define ISTAT_LS 0x00000020 /* Link Change (B0 only) */
+#define ISTAT_PME 0x00000040 /* Power Management Event */
+#define ISTAT_TO 0x00000080 /* General Purpose Timeout */
+#define ISTAT_DSCE 0x00000400 /* Descriptor Error */
+#define ISTAT_DATAE 0x00000800 /* Data Error */
+#define ISTAT_DPE 0x00001000 /* Descr. Protocol Error */
+#define ISTAT_RDU 0x00002000 /* Receive Descr. Underflow */
+#define ISTAT_RFO 0x00004000 /* Receive FIFO Overflow */
+#define ISTAT_TFU 0x00008000 /* Transmit FIFO Underflow */
+#define ISTAT_RX 0x00010000 /* RX Interrupt */
+#define ISTAT_TX 0x01000000 /* TX Interrupt */
+#define ISTAT_EMAC 0x04000000 /* EMAC Interrupt */
+#define ISTAT_MII_WRITE 0x08000000 /* MII Write Interrupt */
+#define ISTAT_MII_READ 0x10000000 /* MII Read Interrupt */
+#define ISTAT_ERRORS (ISTAT_DSCE|ISTAT_DATAE|ISTAT_DPE|ISTAT_RDU|ISTAT_RFO|ISTAT_TFU)
+#define B44_IMASK 0x0024UL /* Interrupt Mask */
+#define IMASK_DEF (ISTAT_ERRORS | ISTAT_TO | ISTAT_RX | ISTAT_TX)
+#define B44_GPTIMER 0x0028UL /* General Purpose Timer */
+#define B44_ADDR_LO 0x0088UL /* ENET Address Lo (B0 only) */
+#define B44_ADDR_HI 0x008CUL /* ENET Address Hi (B0 only) */
+#define B44_FILT_ADDR 0x0090UL /* ENET Filter Address */
+#define B44_FILT_DATA 0x0094UL /* ENET Filter Data */
+#define B44_TXBURST 0x00A0UL /* TX Max Burst Length */
+#define B44_RXBURST 0x00A4UL /* RX Max Burst Length */
+#define B44_MAC_CTRL 0x00A8UL /* MAC Control */
+#define MAC_CTRL_CRC32_ENAB 0x00000001 /* CRC32 Generation Enable */
+#define MAC_CTRL_PHY_PDOWN 0x00000004 /* Onchip EPHY Powerdown */
+#define MAC_CTRL_PHY_EDET 0x00000008 /* Onchip EPHY Energy Detected */
+#define MAC_CTRL_PHY_LEDCTRL 0x000000e0 /* Onchip EPHY LED Control */
+#define MAC_CTRL_PHY_LEDCTRL_SHIFT 5
+#define B44_MAC_FLOW 0x00ACUL /* MAC Flow Control */
+#define MAC_FLOW_RX_HI_WATER 0x000000ff /* Receive FIFO HI Water Mark */
+#define MAC_FLOW_PAUSE_ENAB 0x00008000 /* Enable Pause Frame Generation */
+#define B44_RCV_LAZY 0x0100UL /* Lazy Interrupt Control */
+#define RCV_LAZY_TO_MASK 0x00ffffff /* Timeout */
+#define RCV_LAZY_FC_MASK 0xff000000 /* Frame Count */
+#define RCV_LAZY_FC_SHIFT 24
+#define B44_DMATX_CTRL 0x0200UL /* DMA TX Control */
+#define DMATX_CTRL_ENABLE 0x00000001 /* Enable */
+#define DMATX_CTRL_SUSPEND 0x00000002 /* Suepend Request */
+#define DMATX_CTRL_LPBACK 0x00000004 /* Loopback Enable */
+#define DMATX_CTRL_FAIRPRIOR 0x00000008 /* Fair Priority */
+#define DMATX_CTRL_FLUSH 0x00000010 /* Flush Request */
+#define B44_DMATX_ADDR 0x0204UL /* DMA TX Descriptor Ring Address */
+#define B44_DMATX_PTR 0x0208UL /* DMA TX Last Posted Descriptor */
+#define B44_DMATX_STAT 0x020CUL /* DMA TX Current Active Desc. + Status */
+#define DMATX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */
+#define DMATX_STAT_SMASK 0x0000f000 /* State Mask */
+#define DMATX_STAT_SDISABLED 0x00000000 /* State Disabled */
+#define DMATX_STAT_SACTIVE 0x00001000 /* State Active */
+#define DMATX_STAT_SIDLE 0x00002000 /* State Idle Wait */
+#define DMATX_STAT_SSTOPPED 0x00003000 /* State Stopped */
+#define DMATX_STAT_SSUSP 0x00004000 /* State Suspend Pending */
+#define DMATX_STAT_EMASK 0x000f0000 /* Error Mask */
+#define DMATX_STAT_ENONE 0x00000000 /* Error None */
+#define DMATX_STAT_EDPE 0x00010000 /* Error Desc. Protocol Error */
+#define DMATX_STAT_EDFU 0x00020000 /* Error Data FIFO Underrun */
+#define DMATX_STAT_EBEBR 0x00030000 /* Error Bus Error on Buffer Read */
+#define DMATX_STAT_EBEDA 0x00040000 /* Error Bus Error on Desc. Access */
+#define DMATX_STAT_FLUSHED 0x00100000 /* Flushed */
+#define B44_DMARX_CTRL 0x0210UL /* DMA RX Control */
+#define DMARX_CTRL_ENABLE 0x00000001 /* Enable */
+#define DMARX_CTRL_ROMASK 0x000000fe /* Receive Offset Mask */
+#define DMARX_CTRL_ROSHIFT 1 /* Receive Offset Shift */
+#define B44_DMARX_ADDR 0x0214UL /* DMA RX Descriptor Ring Address */
+#define B44_DMARX_PTR 0x0218UL /* DMA RX Last Posted Descriptor */
+#define B44_DMARX_STAT 0x021CUL /* DMA RX Current Active Desc. + Status */
+#define DMARX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */
+#define DMARX_STAT_SMASK 0x0000f000 /* State Mask */
+#define DMARX_STAT_SDISABLED 0x00000000 /* State Disabled */
+#define DMARX_STAT_SACTIVE 0x00001000 /* State Active */
+#define DMARX_STAT_SIDLE 0x00002000 /* State Idle Wait */
+#define DMARX_STAT_SSTOPPED 0x00003000 /* State Stopped */
+#define DMARX_STAT_EMASK 0x000f0000 /* Error Mask */
+#define DMARX_STAT_ENONE 0x00000000 /* Error None */
+#define DMARX_STAT_EDPE 0x00010000 /* Error Desc. Protocol Error */
+#define DMARX_STAT_EDFO 0x00020000 /* Error Data FIFO Overflow */
+#define DMARX_STAT_EBEBW 0x00030000 /* Error Bus Error on Buffer Write */
+#define DMARX_STAT_EBEDA 0x00040000 /* Error Bus Error on Desc. Access */
+#define B44_DMAFIFO_AD 0x0220UL /* DMA FIFO Diag Address */
+#define DMAFIFO_AD_OMASK 0x0000ffff /* Offset Mask */
+#define DMAFIFO_AD_SMASK 0x000f0000 /* Select Mask */
+#define DMAFIFO_AD_SXDD 0x00000000 /* Select Transmit DMA Data */
+#define DMAFIFO_AD_SXDP 0x00010000 /* Select Transmit DMA Pointers */
+#define DMAFIFO_AD_SRDD 0x00040000 /* Select Receive DMA Data */
+#define DMAFIFO_AD_SRDP 0x00050000 /* Select Receive DMA Pointers */
+#define DMAFIFO_AD_SXFD 0x00080000 /* Select Transmit FIFO Data */
+#define DMAFIFO_AD_SXFP 0x00090000 /* Select Transmit FIFO Pointers */
+#define DMAFIFO_AD_SRFD 0x000c0000 /* Select Receive FIFO Data */
+#define DMAFIFO_AD_SRFP 0x000c0000 /* Select Receive FIFO Pointers */
+#define B44_DMAFIFO_LO 0x0224UL /* DMA FIFO Diag Low Data */
+#define B44_DMAFIFO_HI 0x0228UL /* DMA FIFO Diag High Data */
+#define B44_RXCONFIG 0x0400UL /* EMAC RX Config */
+#define RXCONFIG_DBCAST 0x00000001 /* Disable Broadcast */
+#define RXCONFIG_ALLMULTI 0x00000002 /* Accept All Multicast */
+#define RXCONFIG_NORX_WHILE_TX 0x00000004 /* Receive Disable While Transmitting */
+#define RXCONFIG_PROMISC 0x00000008 /* Promiscuous Enable */
+#define RXCONFIG_LPBACK 0x00000010 /* Loopback Enable */
+#define RXCONFIG_FLOW 0x00000020 /* Flow Control Enable */
+#define RXCONFIG_FLOW_ACCEPT 0x00000040 /* Accept Unicast Flow Control Frame */
+#define RXCONFIG_RFILT 0x00000080 /* Reject Filter */
+#define RXCONFIG_CAM_ABSENT 0x00000100 /* CAM Absent */
+#define B44_RXMAXLEN 0x0404UL /* EMAC RX Max Packet Length */
+#define B44_TXMAXLEN 0x0408UL /* EMAC TX Max Packet Length */
+#define B44_MDIO_CTRL 0x0410UL /* EMAC MDIO Control */
+#define MDIO_CTRL_MAXF_MASK 0x0000007f /* MDC Frequency */
+#define MDIO_CTRL_PREAMBLE 0x00000080 /* MII Preamble Enable */
+#define B44_MDIO_DATA 0x0414UL /* EMAC MDIO Data */
+#define MDIO_DATA_DATA 0x0000ffff /* R/W Data */
+#define MDIO_DATA_TA_MASK 0x00030000 /* Turnaround Value */
+#define MDIO_DATA_TA_SHIFT 16
+#define MDIO_TA_VALID 2
+#define MDIO_DATA_RA_MASK 0x007c0000 /* Register Address */
+#define MDIO_DATA_RA_SHIFT 18
+#define MDIO_DATA_PMD_MASK 0x0f800000 /* Physical Media Device */
+#define MDIO_DATA_PMD_SHIFT 23
+#define MDIO_DATA_OP_MASK 0x30000000 /* Opcode */
+#define MDIO_DATA_OP_SHIFT 28
+#define MDIO_OP_WRITE 1
+#define MDIO_OP_READ 2
+#define MDIO_DATA_SB_MASK 0xc0000000 /* Start Bits */
+#define MDIO_DATA_SB_SHIFT 30
+#define MDIO_DATA_SB_START 0x40000000 /* Start Of Frame */
+#define B44_EMAC_IMASK 0x0418UL /* EMAC Interrupt Mask */
+#define B44_EMAC_ISTAT 0x041CUL /* EMAC Interrupt Status */
+#define EMAC_INT_MII 0x00000001 /* MII MDIO Interrupt */
+#define EMAC_INT_MIB 0x00000002 /* MIB Interrupt */
+#define EMAC_INT_FLOW 0x00000003 /* Flow Control Interrupt */
+#define B44_CAM_DATA_LO 0x0420UL /* EMAC CAM Data Low */
+#define B44_CAM_DATA_HI 0x0424UL /* EMAC CAM Data High */
+#define CAM_DATA_HI_VALID 0x00010000 /* Valid Bit */
+#define B44_CAM_CTRL 0x0428UL /* EMAC CAM Control */
+#define CAM_CTRL_ENABLE 0x00000001 /* CAM Enable */
+#define CAM_CTRL_MSEL 0x00000002 /* Mask Select */
+#define CAM_CTRL_READ 0x00000004 /* Read */
+#define CAM_CTRL_WRITE 0x00000008 /* Read */
+#define CAM_CTRL_INDEX_MASK 0x003f0000 /* Index Mask */
+#define CAM_CTRL_INDEX_SHIFT 16
+#define CAM_CTRL_BUSY 0x80000000 /* CAM Busy */
+#define B44_ENET_CTRL 0x042CUL /* EMAC ENET Control */
+#define ENET_CTRL_ENABLE 0x00000001 /* EMAC Enable */
+#define ENET_CTRL_DISABLE 0x00000002 /* EMAC Disable */
+#define ENET_CTRL_SRST 0x00000004 /* EMAC Soft Reset */
+#define ENET_CTRL_EPSEL 0x00000008 /* External PHY Select */
+#define B44_TX_CTRL 0x0430UL /* EMAC TX Control */
+#define TX_CTRL_DUPLEX 0x00000001 /* Full Duplex */
+#define TX_CTRL_FMODE 0x00000002 /* Flow Mode */
+#define TX_CTRL_SBENAB 0x00000004 /* Single Backoff Enable */
+#define TX_CTRL_SMALL_SLOT 0x00000008 /* Small Slottime */
+#define B44_TX_WMARK 0x0434UL /* EMAC TX Watermark */
+#define B44_MIB_CTRL 0x0438UL /* EMAC MIB Control */
+#define MIB_CTRL_CLR_ON_READ 0x00000001 /* Autoclear on Read */
+#define B44_TX_GOOD_O 0x0500UL /* MIB TX Good Octets */
+#define B44_TX_GOOD_P 0x0504UL /* MIB TX Good Packets */
+#define B44_TX_O 0x0508UL /* MIB TX Octets */
+#define B44_TX_P 0x050CUL /* MIB TX Packets */
+#define B44_TX_BCAST 0x0510UL /* MIB TX Broadcast Packets */
+#define B44_TX_MCAST 0x0514UL /* MIB TX Multicast Packets */
+#define B44_TX_64 0x0518UL /* MIB TX <= 64 byte Packets */
+#define B44_TX_65_127 0x051CUL /* MIB TX 65 to 127 byte Packets */
+#define B44_TX_128_255 0x0520UL /* MIB TX 128 to 255 byte Packets */
+#define B44_TX_256_511 0x0524UL /* MIB TX 256 to 511 byte Packets */
+#define B44_TX_512_1023 0x0528UL /* MIB TX 512 to 1023 byte Packets */
+#define B44_TX_1024_MAX 0x052CUL /* MIB TX 1024 to max byte Packets */
+#define B44_TX_JABBER 0x0530UL /* MIB TX Jabber Packets */
+#define B44_TX_OSIZE 0x0534UL /* MIB TX Oversize Packets */
+#define B44_TX_FRAG 0x0538UL /* MIB TX Fragment Packets */
+#define B44_TX_URUNS 0x053CUL /* MIB TX Underruns */
+#define B44_TX_TCOLS 0x0540UL /* MIB TX Total Collisions */
+#define B44_TX_SCOLS 0x0544UL /* MIB TX Single Collisions */
+#define B44_TX_MCOLS 0x0548UL /* MIB TX Multiple Collisions */
+#define B44_TX_ECOLS 0x054CUL /* MIB TX Excessive Collisions */
+#define B44_TX_LCOLS 0x0550UL /* MIB TX Late Collisions */
+#define B44_TX_DEFERED 0x0554UL /* MIB TX Defered Packets */
+#define B44_TX_CLOST 0x0558UL /* MIB TX Carrier Lost */
+#define B44_TX_PAUSE 0x055CUL /* MIB TX Pause Packets */
+#define B44_RX_GOOD_O 0x0580UL /* MIB RX Good Octets */
+#define B44_RX_GOOD_P 0x0584UL /* MIB RX Good Packets */
+#define B44_RX_O 0x0588UL /* MIB RX Octets */
+#define B44_RX_P 0x058CUL /* MIB RX Packets */
+#define B44_RX_BCAST 0x0590UL /* MIB RX Broadcast Packets */
+#define B44_RX_MCAST 0x0594UL /* MIB RX Multicast Packets */
+#define B44_RX_64 0x0598UL /* MIB RX <= 64 byte Packets */
+#define B44_RX_65_127 0x059CUL /* MIB RX 65 to 127 byte Packets */
+#define B44_RX_128_255 0x05A0UL /* MIB RX 128 to 255 byte Packets */
+#define B44_RX_256_511 0x05A4UL /* MIB RX 256 to 511 byte Packets */
+#define B44_RX_512_1023 0x05A8UL /* MIB RX 512 to 1023 byte Packets */
+#define B44_RX_1024_MAX 0x05ACUL /* MIB RX 1024 to max byte Packets */
+#define B44_RX_JABBER 0x05B0UL /* MIB RX Jabber Packets */
+#define B44_RX_OSIZE 0x05B4UL /* MIB RX Oversize Packets */
+#define B44_RX_FRAG 0x05B8UL /* MIB RX Fragment Packets */
+#define B44_RX_MISS 0x05BCUL /* MIB RX Missed Packets */
+#define B44_RX_CRCA 0x05C0UL /* MIB RX CRC Align Errors */
+#define B44_RX_USIZE 0x05C4UL /* MIB RX Undersize Packets */
+#define B44_RX_CRC 0x05C8UL /* MIB RX CRC Errors */
+#define B44_RX_ALIGN 0x05CCUL /* MIB RX Align Errors */
+#define B44_RX_SYM 0x05D0UL /* MIB RX Symbol Errors */
+#define B44_RX_PAUSE 0x05D4UL /* MIB RX Pause Packets */
+#define B44_RX_NPAUSE 0x05D8UL /* MIB RX Non-Pause Packets */
+
+/* 4400 PHY registers */
+#define B44_MII_AUXCTRL 24 /* Auxiliary Control */
+#define MII_AUXCTRL_DUPLEX 0x0001 /* Full Duplex */
+#define MII_AUXCTRL_SPEED 0x0002 /* 1=100Mbps, 0=10Mbps */
+#define MII_AUXCTRL_FORCED 0x0004 /* Forced 10/100 */
+#define B44_MII_ALEDCTRL 26 /* Activity LED */
+#define MII_ALEDCTRL_ALLMSK 0x7fff
+#define B44_MII_TLEDCTRL 27 /* Traffic Meter LED */
+#define MII_TLEDCTRL_ENABLE 0x0040
+
+struct dma_desc {
+ __le32 ctrl;
+ __le32 addr;
+};
+
+/* There are only 12 bits in the DMA engine for descriptor offsetting
+ * so the table must be aligned on a boundary of this.
+ */
+#define DMA_TABLE_BYTES 4096
+
+#define DESC_CTRL_LEN 0x00001fff
+#define DESC_CTRL_CMASK 0x0ff00000 /* Core specific bits */
+#define DESC_CTRL_EOT 0x10000000 /* End of Table */
+#define DESC_CTRL_IOC 0x20000000 /* Interrupt On Completion */
+#define DESC_CTRL_EOF 0x40000000 /* End of Frame */
+#define DESC_CTRL_SOF 0x80000000 /* Start of Frame */
+
+#define RX_COPY_THRESHOLD 256
+
+struct rx_header {
+ __le16 len;
+ __le16 flags;
+ __le16 pad[12];
+};
+#define RX_HEADER_LEN 28
+
+#define RX_FLAG_OFIFO 0x00000001 /* FIFO Overflow */
+#define RX_FLAG_CRCERR 0x00000002 /* CRC Error */
+#define RX_FLAG_SERR 0x00000004 /* Receive Symbol Error */
+#define RX_FLAG_ODD 0x00000008 /* Frame has odd number of nibbles */
+#define RX_FLAG_LARGE 0x00000010 /* Frame is > RX MAX Length */
+#define RX_FLAG_MCAST 0x00000020 /* Dest is Multicast Address */
+#define RX_FLAG_BCAST 0x00000040 /* Dest is Broadcast Address */
+#define RX_FLAG_MISS 0x00000080 /* Received due to promisc mode */
+#define RX_FLAG_LAST 0x00000800 /* Last buffer in frame */
+#define RX_FLAG_ERRORS (RX_FLAG_ODD | RX_FLAG_SERR | RX_FLAG_CRCERR | RX_FLAG_OFIFO)
+
+struct ring_info {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+};
+
+#define B44_MCAST_TABLE_SIZE 32
+#define B44_PHY_ADDR_NO_PHY 30
+#define B44_MDC_RATIO 5000000
+
+#define B44_STAT_REG_DECLARE \
+ _B44(tx_good_octets) \
+ _B44(tx_good_pkts) \
+ _B44(tx_octets) \
+ _B44(tx_pkts) \
+ _B44(tx_broadcast_pkts) \
+ _B44(tx_multicast_pkts) \
+ _B44(tx_len_64) \
+ _B44(tx_len_65_to_127) \
+ _B44(tx_len_128_to_255) \
+ _B44(tx_len_256_to_511) \
+ _B44(tx_len_512_to_1023) \
+ _B44(tx_len_1024_to_max) \
+ _B44(tx_jabber_pkts) \
+ _B44(tx_oversize_pkts) \
+ _B44(tx_fragment_pkts) \
+ _B44(tx_underruns) \
+ _B44(tx_total_cols) \
+ _B44(tx_single_cols) \
+ _B44(tx_multiple_cols) \
+ _B44(tx_excessive_cols) \
+ _B44(tx_late_cols) \
+ _B44(tx_defered) \
+ _B44(tx_carrier_lost) \
+ _B44(tx_pause_pkts) \
+ _B44(rx_good_octets) \
+ _B44(rx_good_pkts) \
+ _B44(rx_octets) \
+ _B44(rx_pkts) \
+ _B44(rx_broadcast_pkts) \
+ _B44(rx_multicast_pkts) \
+ _B44(rx_len_64) \
+ _B44(rx_len_65_to_127) \
+ _B44(rx_len_128_to_255) \
+ _B44(rx_len_256_to_511) \
+ _B44(rx_len_512_to_1023) \
+ _B44(rx_len_1024_to_max) \
+ _B44(rx_jabber_pkts) \
+ _B44(rx_oversize_pkts) \
+ _B44(rx_fragment_pkts) \
+ _B44(rx_missed_pkts) \
+ _B44(rx_crc_align_errs) \
+ _B44(rx_undersize) \
+ _B44(rx_crc_errs) \
+ _B44(rx_align_errs) \
+ _B44(rx_symbol_errs) \
+ _B44(rx_pause_pkts) \
+ _B44(rx_nonpause_pkts)
+
+/* SW copy of device statistics, kept up to date by periodic timer
+ * which probes HW values. Check b44_stats_update if you mess with
+ * the layout
+ */
+struct b44_hw_stats {
+#define _B44(x) u32 x;
+B44_STAT_REG_DECLARE
+#undef _B44
+};
+
+struct ssb_device;
+
+struct b44 {
+ spinlock_t lock;
+
+ u32 imask, istat;
+
+ struct dma_desc *rx_ring, *tx_ring;
+
+ u32 tx_prod, tx_cons;
+ u32 rx_prod, rx_cons;
+
+ struct ring_info *rx_buffers;
+ struct ring_info *tx_buffers;
+
+ struct napi_struct napi;
+
+ u32 dma_offset;
+ u32 flags;
+#define B44_FLAG_B0_ANDLATER 0x00000001
+#define B44_FLAG_BUGGY_TXPTR 0x00000002
+#define B44_FLAG_REORDER_BUG 0x00000004
+#define B44_FLAG_PAUSE_AUTO 0x00008000
+#define B44_FLAG_FULL_DUPLEX 0x00010000
+#define B44_FLAG_100_BASE_T 0x00020000
+#define B44_FLAG_TX_PAUSE 0x00040000
+#define B44_FLAG_RX_PAUSE 0x00080000
+#define B44_FLAG_FORCE_LINK 0x00100000
+#define B44_FLAG_ADV_10HALF 0x01000000
+#define B44_FLAG_ADV_10FULL 0x02000000
+#define B44_FLAG_ADV_100HALF 0x04000000
+#define B44_FLAG_ADV_100FULL 0x08000000
+#define B44_FLAG_INTERNAL_PHY 0x10000000
+#define B44_FLAG_RX_RING_HACK 0x20000000
+#define B44_FLAG_TX_RING_HACK 0x40000000
+#define B44_FLAG_WOL_ENABLE 0x80000000
+
+ u32 msg_enable;
+
+ struct timer_list timer;
+
+ struct b44_hw_stats hw_stats;
+
+ struct ssb_device *sdev;
+ struct net_device *dev;
+
+ dma_addr_t rx_ring_dma, tx_ring_dma;
+
+ u32 rx_pending;
+ u32 tx_pending;
+ u8 phy_addr;
+ u8 force_copybreak;
+ struct mii_if_info mii_if;
+};
+
+#endif /* _B44_H */
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
new file mode 100644
index 000000000000..05b022866076
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -0,0 +1,1966 @@
+/*
+ * Driver for BCM963xx builtin Ethernet mac
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/if_vlan.h>
+
+#include <bcm63xx_dev_enet.h>
+#include "bcm63xx_enet.h"
+
+static char bcm_enet_driver_name[] = "bcm63xx_enet";
+static char bcm_enet_driver_version[] = "1.0";
+
+static int copybreak __read_mostly = 128;
+module_param(copybreak, int, 0);
+MODULE_PARM_DESC(copybreak, "Receive copy threshold");
+
+/* io memory shared between all devices */
+static void __iomem *bcm_enet_shared_base;
+
+/*
+ * io helpers to access mac registers
+ */
+static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
+{
+ return bcm_readl(priv->base + off);
+}
+
+static inline void enet_writel(struct bcm_enet_priv *priv,
+ u32 val, u32 off)
+{
+ bcm_writel(val, priv->base + off);
+}
+
+/*
+ * io helpers to access shared registers
+ */
+static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
+{
+ return bcm_readl(bcm_enet_shared_base + off);
+}
+
+static inline void enet_dma_writel(struct bcm_enet_priv *priv,
+ u32 val, u32 off)
+{
+ bcm_writel(val, bcm_enet_shared_base + off);
+}
+
+/*
+ * write given data into mii register and wait for transfer to end
+ * with timeout (average measured transfer time is 25us)
+ */
+static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
+{
+ int limit;
+
+ /* make sure mii interrupt status is cleared */
+ enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
+
+ enet_writel(priv, data, ENET_MIIDATA_REG);
+ wmb();
+
+ /* busy wait on mii interrupt bit, with timeout */
+ limit = 1000;
+ do {
+ if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
+ break;
+ udelay(1);
+ } while (limit-- > 0);
+
+ return (limit < 0) ? 1 : 0;
+}
+
+/*
+ * MII internal read callback
+ */
+static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
+ int regnum)
+{
+ u32 tmp, val;
+
+ tmp = regnum << ENET_MIIDATA_REG_SHIFT;
+ tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
+ tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
+ tmp |= ENET_MIIDATA_OP_READ_MASK;
+
+ if (do_mdio_op(priv, tmp))
+ return -1;
+
+ val = enet_readl(priv, ENET_MIIDATA_REG);
+ val &= 0xffff;
+ return val;
+}
+
+/*
+ * MII internal write callback
+ */
+static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
+ int regnum, u16 value)
+{
+ u32 tmp;
+
+ tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
+ tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
+ tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
+ tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
+ tmp |= ENET_MIIDATA_OP_WRITE_MASK;
+
+ (void)do_mdio_op(priv, tmp);
+ return 0;
+}
+
+/*
+ * MII read callback from phylib
+ */
+static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
+ int regnum)
+{
+ return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
+}
+
+/*
+ * MII write callback from phylib
+ */
+static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
+ int regnum, u16 value)
+{
+ return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
+}
+
+/*
+ * MII read callback from mii core
+ */
+static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
+ int regnum)
+{
+ return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
+}
+
+/*
+ * MII write callback from mii core
+ */
+static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
+ int regnum, int value)
+{
+ bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
+}
+
+/*
+ * refill rx queue
+ */
+static int bcm_enet_refill_rx(struct net_device *dev)
+{
+ struct bcm_enet_priv *priv;
+
+ priv = netdev_priv(dev);
+
+ while (priv->rx_desc_count < priv->rx_ring_size) {
+ struct bcm_enet_desc *desc;
+ struct sk_buff *skb;
+ dma_addr_t p;
+ int desc_idx;
+ u32 len_stat;
+
+ desc_idx = priv->rx_dirty_desc;
+ desc = &priv->rx_desc_cpu[desc_idx];
+
+ if (!priv->rx_skb[desc_idx]) {
+ skb = netdev_alloc_skb(dev, priv->rx_skb_size);
+ if (!skb)
+ break;
+ priv->rx_skb[desc_idx] = skb;
+
+ p = dma_map_single(&priv->pdev->dev, skb->data,
+ priv->rx_skb_size,
+ DMA_FROM_DEVICE);
+ desc->address = p;
+ }
+
+ len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
+ len_stat |= DMADESC_OWNER_MASK;
+ if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
+ len_stat |= DMADESC_WRAP_MASK;
+ priv->rx_dirty_desc = 0;
+ } else {
+ priv->rx_dirty_desc++;
+ }
+ wmb();
+ desc->len_stat = len_stat;
+
+ priv->rx_desc_count++;
+
+ /* tell dma engine we allocated one buffer */
+ enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
+ }
+
+ /* If rx ring is still empty, set a timer to try allocating
+ * again at a later time. */
+ if (priv->rx_desc_count == 0 && netif_running(dev)) {
+ dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
+ priv->rx_timeout.expires = jiffies + HZ;
+ add_timer(&priv->rx_timeout);
+ }
+
+ return 0;
+}
+
+/*
+ * timer callback to defer refill rx queue in case we're OOM
+ */
+static void bcm_enet_refill_rx_timer(unsigned long data)
+{
+ struct net_device *dev;
+ struct bcm_enet_priv *priv;
+
+ dev = (struct net_device *)data;
+ priv = netdev_priv(dev);
+
+ spin_lock(&priv->rx_lock);
+ bcm_enet_refill_rx((struct net_device *)data);
+ spin_unlock(&priv->rx_lock);
+}
+
+/*
+ * extract packet from rx queue
+ */
+static int bcm_enet_receive_queue(struct net_device *dev, int budget)
+{
+ struct bcm_enet_priv *priv;
+ struct device *kdev;
+ int processed;
+
+ priv = netdev_priv(dev);
+ kdev = &priv->pdev->dev;
+ processed = 0;
+
+ /* don't scan ring further than number of refilled
+ * descriptor */
+ if (budget > priv->rx_desc_count)
+ budget = priv->rx_desc_count;
+
+ do {
+ struct bcm_enet_desc *desc;
+ struct sk_buff *skb;
+ int desc_idx;
+ u32 len_stat;
+ unsigned int len;
+
+ desc_idx = priv->rx_curr_desc;
+ desc = &priv->rx_desc_cpu[desc_idx];
+
+ /* make sure we actually read the descriptor status at
+ * each loop */
+ rmb();
+
+ len_stat = desc->len_stat;
+
+ /* break if dma ownership belongs to hw */
+ if (len_stat & DMADESC_OWNER_MASK)
+ break;
+
+ processed++;
+ priv->rx_curr_desc++;
+ if (priv->rx_curr_desc == priv->rx_ring_size)
+ priv->rx_curr_desc = 0;
+ priv->rx_desc_count--;
+
+ /* if the packet does not have start of packet _and_
+ * end of packet flag set, then just recycle it */
+ if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
+ dev->stats.rx_dropped++;
+ continue;
+ }
+
+ /* recycle packet if it's marked as bad */
+ if (unlikely(len_stat & DMADESC_ERR_MASK)) {
+ dev->stats.rx_errors++;
+
+ if (len_stat & DMADESC_OVSIZE_MASK)
+ dev->stats.rx_length_errors++;
+ if (len_stat & DMADESC_CRC_MASK)
+ dev->stats.rx_crc_errors++;
+ if (len_stat & DMADESC_UNDER_MASK)
+ dev->stats.rx_frame_errors++;
+ if (len_stat & DMADESC_OV_MASK)
+ dev->stats.rx_fifo_errors++;
+ continue;
+ }
+
+ /* valid packet */
+ skb = priv->rx_skb[desc_idx];
+ len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
+ /* don't include FCS */
+ len -= 4;
+
+ if (len < copybreak) {
+ struct sk_buff *nskb;
+
+ nskb = netdev_alloc_skb_ip_align(dev, len);
+ if (!nskb) {
+ /* forget packet, just rearm desc */
+ dev->stats.rx_dropped++;
+ continue;
+ }
+
+ dma_sync_single_for_cpu(kdev, desc->address,
+ len, DMA_FROM_DEVICE);
+ memcpy(nskb->data, skb->data, len);
+ dma_sync_single_for_device(kdev, desc->address,
+ len, DMA_FROM_DEVICE);
+ skb = nskb;
+ } else {
+ dma_unmap_single(&priv->pdev->dev, desc->address,
+ priv->rx_skb_size, DMA_FROM_DEVICE);
+ priv->rx_skb[desc_idx] = NULL;
+ }
+
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, dev);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ netif_receive_skb(skb);
+
+ } while (--budget > 0);
+
+ if (processed || !priv->rx_desc_count) {
+ bcm_enet_refill_rx(dev);
+
+ /* kick rx dma */
+ enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
+ ENETDMA_CHANCFG_REG(priv->rx_chan));
+ }
+
+ return processed;
+}
+
+
+/*
+ * try to or force reclaim of transmitted buffers
+ */
+static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
+{
+ struct bcm_enet_priv *priv;
+ int released;
+
+ priv = netdev_priv(dev);
+ released = 0;
+
+ while (priv->tx_desc_count < priv->tx_ring_size) {
+ struct bcm_enet_desc *desc;
+ struct sk_buff *skb;
+
+ /* We run in a bh and fight against start_xmit, which
+ * is called with bh disabled */
+ spin_lock(&priv->tx_lock);
+
+ desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
+
+ if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
+ spin_unlock(&priv->tx_lock);
+ break;
+ }
+
+ /* ensure other field of the descriptor were not read
+ * before we checked ownership */
+ rmb();
+
+ skb = priv->tx_skb[priv->tx_dirty_desc];
+ priv->tx_skb[priv->tx_dirty_desc] = NULL;
+ dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
+ DMA_TO_DEVICE);
+
+ priv->tx_dirty_desc++;
+ if (priv->tx_dirty_desc == priv->tx_ring_size)
+ priv->tx_dirty_desc = 0;
+ priv->tx_desc_count++;
+
+ spin_unlock(&priv->tx_lock);
+
+ if (desc->len_stat & DMADESC_UNDER_MASK)
+ dev->stats.tx_errors++;
+
+ dev_kfree_skb(skb);
+ released++;
+ }
+
+ if (netif_queue_stopped(dev) && released)
+ netif_wake_queue(dev);
+
+ return released;
+}
+
+/*
+ * poll func, called by network core
+ */
+static int bcm_enet_poll(struct napi_struct *napi, int budget)
+{
+ struct bcm_enet_priv *priv;
+ struct net_device *dev;
+ int tx_work_done, rx_work_done;
+
+ priv = container_of(napi, struct bcm_enet_priv, napi);
+ dev = priv->net_dev;
+
+ /* ack interrupts */
+ enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
+ ENETDMA_IR_REG(priv->rx_chan));
+ enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
+ ENETDMA_IR_REG(priv->tx_chan));
+
+ /* reclaim sent skb */
+ tx_work_done = bcm_enet_tx_reclaim(dev, 0);
+
+ spin_lock(&priv->rx_lock);
+ rx_work_done = bcm_enet_receive_queue(dev, budget);
+ spin_unlock(&priv->rx_lock);
+
+ if (rx_work_done >= budget || tx_work_done > 0) {
+ /* rx/tx queue is not yet empty/clean */
+ return rx_work_done;
+ }
+
+ /* no more packet in rx/tx queue, remove device from poll
+ * queue */
+ napi_complete(napi);
+
+ /* restore rx/tx interrupt */
+ enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
+ ENETDMA_IRMASK_REG(priv->rx_chan));
+ enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
+ ENETDMA_IRMASK_REG(priv->tx_chan));
+
+ return rx_work_done;
+}
+
+/*
+ * mac interrupt handler
+ */
+static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
+{
+ struct net_device *dev;
+ struct bcm_enet_priv *priv;
+ u32 stat;
+
+ dev = dev_id;
+ priv = netdev_priv(dev);
+
+ stat = enet_readl(priv, ENET_IR_REG);
+ if (!(stat & ENET_IR_MIB))
+ return IRQ_NONE;
+
+ /* clear & mask interrupt */
+ enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
+ enet_writel(priv, 0, ENET_IRMASK_REG);
+
+ /* read mib registers in workqueue */
+ schedule_work(&priv->mib_update_task);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * rx/tx dma interrupt handler
+ */
+static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
+{
+ struct net_device *dev;
+ struct bcm_enet_priv *priv;
+
+ dev = dev_id;
+ priv = netdev_priv(dev);
+
+ /* mask rx/tx interrupts */
+ enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
+ enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
+
+ napi_schedule(&priv->napi);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * tx request callback
+ */
+static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bcm_enet_priv *priv;
+ struct bcm_enet_desc *desc;
+ u32 len_stat;
+ int ret;
+
+ priv = netdev_priv(dev);
+
+ /* lock against tx reclaim */
+ spin_lock(&priv->tx_lock);
+
+ /* make sure the tx hw queue is not full, should not happen
+ * since we stop queue before it's the case */
+ if (unlikely(!priv->tx_desc_count)) {
+ netif_stop_queue(dev);
+ dev_err(&priv->pdev->dev, "xmit called with no tx desc "
+ "available?\n");
+ ret = NETDEV_TX_BUSY;
+ goto out_unlock;
+ }
+
+ /* point to the next available desc */
+ desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
+ priv->tx_skb[priv->tx_curr_desc] = skb;
+
+ /* fill descriptor */
+ desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+
+ len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
+ len_stat |= DMADESC_ESOP_MASK |
+ DMADESC_APPEND_CRC |
+ DMADESC_OWNER_MASK;
+
+ priv->tx_curr_desc++;
+ if (priv->tx_curr_desc == priv->tx_ring_size) {
+ priv->tx_curr_desc = 0;
+ len_stat |= DMADESC_WRAP_MASK;
+ }
+ priv->tx_desc_count--;
+
+ /* dma might be already polling, make sure we update desc
+ * fields in correct order */
+ wmb();
+ desc->len_stat = len_stat;
+ wmb();
+
+ /* kick tx dma */
+ enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
+ ENETDMA_CHANCFG_REG(priv->tx_chan));
+
+ /* stop queue if no more desc available */
+ if (!priv->tx_desc_count)
+ netif_stop_queue(dev);
+
+ dev->stats.tx_bytes += skb->len;
+ dev->stats.tx_packets++;
+ ret = NETDEV_TX_OK;
+
+out_unlock:
+ spin_unlock(&priv->tx_lock);
+ return ret;
+}
+
+/*
+ * Change the interface's mac address.
+ */
+static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
+{
+ struct bcm_enet_priv *priv;
+ struct sockaddr *addr = p;
+ u32 val;
+
+ priv = netdev_priv(dev);
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+
+ /* use perfect match register 0 to store my mac address */
+ val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
+ (dev->dev_addr[4] << 8) | dev->dev_addr[5];
+ enet_writel(priv, val, ENET_PML_REG(0));
+
+ val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
+ val |= ENET_PMH_DATAVALID_MASK;
+ enet_writel(priv, val, ENET_PMH_REG(0));
+
+ return 0;
+}
+
+/*
+ * Change rx mode (promiscuous/allmulti) and update multicast list
+ */
+static void bcm_enet_set_multicast_list(struct net_device *dev)
+{
+ struct bcm_enet_priv *priv;
+ struct netdev_hw_addr *ha;
+ u32 val;
+ int i;
+
+ priv = netdev_priv(dev);
+
+ val = enet_readl(priv, ENET_RXCFG_REG);
+
+ if (dev->flags & IFF_PROMISC)
+ val |= ENET_RXCFG_PROMISC_MASK;
+ else
+ val &= ~ENET_RXCFG_PROMISC_MASK;
+
+ /* only 3 perfect match registers left, first one is used for
+ * own mac address */
+ if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
+ val |= ENET_RXCFG_ALLMCAST_MASK;
+ else
+ val &= ~ENET_RXCFG_ALLMCAST_MASK;
+
+ /* no need to set perfect match registers if we catch all
+ * multicast */
+ if (val & ENET_RXCFG_ALLMCAST_MASK) {
+ enet_writel(priv, val, ENET_RXCFG_REG);
+ return;
+ }
+
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ u8 *dmi_addr;
+ u32 tmp;
+
+ if (i == 3)
+ break;
+ /* update perfect match registers */
+ dmi_addr = ha->addr;
+ tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
+ (dmi_addr[4] << 8) | dmi_addr[5];
+ enet_writel(priv, tmp, ENET_PML_REG(i + 1));
+
+ tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
+ tmp |= ENET_PMH_DATAVALID_MASK;
+ enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
+ }
+
+ for (; i < 3; i++) {
+ enet_writel(priv, 0, ENET_PML_REG(i + 1));
+ enet_writel(priv, 0, ENET_PMH_REG(i + 1));
+ }
+
+ enet_writel(priv, val, ENET_RXCFG_REG);
+}
+
+/*
+ * set mac duplex parameters
+ */
+static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
+{
+ u32 val;
+
+ val = enet_readl(priv, ENET_TXCTL_REG);
+ if (fullduplex)
+ val |= ENET_TXCTL_FD_MASK;
+ else
+ val &= ~ENET_TXCTL_FD_MASK;
+ enet_writel(priv, val, ENET_TXCTL_REG);
+}
+
+/*
+ * set mac flow control parameters
+ */
+static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
+{
+ u32 val;
+
+ /* rx flow control (pause frame handling) */
+ val = enet_readl(priv, ENET_RXCFG_REG);
+ if (rx_en)
+ val |= ENET_RXCFG_ENFLOW_MASK;
+ else
+ val &= ~ENET_RXCFG_ENFLOW_MASK;
+ enet_writel(priv, val, ENET_RXCFG_REG);
+
+ /* tx flow control (pause frame generation) */
+ val = enet_dma_readl(priv, ENETDMA_CFG_REG);
+ if (tx_en)
+ val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
+ else
+ val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
+ enet_dma_writel(priv, val, ENETDMA_CFG_REG);
+}
+
+/*
+ * link changed callback (from phylib)
+ */
+static void bcm_enet_adjust_phy_link(struct net_device *dev)
+{
+ struct bcm_enet_priv *priv;
+ struct phy_device *phydev;
+ int status_changed;
+
+ priv = netdev_priv(dev);
+ phydev = priv->phydev;
+ status_changed = 0;
+
+ if (priv->old_link != phydev->link) {
+ status_changed = 1;
+ priv->old_link = phydev->link;
+ }
+
+ /* reflect duplex change in mac configuration */
+ if (phydev->link && phydev->duplex != priv->old_duplex) {
+ bcm_enet_set_duplex(priv,
+ (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
+ status_changed = 1;
+ priv->old_duplex = phydev->duplex;
+ }
+
+ /* enable flow control if remote advertise it (trust phylib to
+ * check that duplex is full */
+ if (phydev->link && phydev->pause != priv->old_pause) {
+ int rx_pause_en, tx_pause_en;
+
+ if (phydev->pause) {
+ /* pause was advertised by lpa and us */
+ rx_pause_en = 1;
+ tx_pause_en = 1;
+ } else if (!priv->pause_auto) {
+ /* pause setting overrided by user */
+ rx_pause_en = priv->pause_rx;
+ tx_pause_en = priv->pause_tx;
+ } else {
+ rx_pause_en = 0;
+ tx_pause_en = 0;
+ }
+
+ bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
+ status_changed = 1;
+ priv->old_pause = phydev->pause;
+ }
+
+ if (status_changed) {
+ pr_info("%s: link %s", dev->name, phydev->link ?
+ "UP" : "DOWN");
+ if (phydev->link)
+ pr_cont(" - %d/%s - flow control %s", phydev->speed,
+ DUPLEX_FULL == phydev->duplex ? "full" : "half",
+ phydev->pause == 1 ? "rx&tx" : "off");
+
+ pr_cont("\n");
+ }
+}
+
+/*
+ * link changed callback (if phylib is not used)
+ */
+static void bcm_enet_adjust_link(struct net_device *dev)
+{
+ struct bcm_enet_priv *priv;
+
+ priv = netdev_priv(dev);
+ bcm_enet_set_duplex(priv, priv->force_duplex_full);
+ bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
+ netif_carrier_on(dev);
+
+ pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
+ dev->name,
+ priv->force_speed_100 ? 100 : 10,
+ priv->force_duplex_full ? "full" : "half",
+ priv->pause_rx ? "rx" : "off",
+ priv->pause_tx ? "tx" : "off");
+}
+
+/*
+ * open callback, allocate dma rings & buffers and start rx operation
+ */
+static int bcm_enet_open(struct net_device *dev)
+{
+ struct bcm_enet_priv *priv;
+ struct sockaddr addr;
+ struct device *kdev;
+ struct phy_device *phydev;
+ int i, ret;
+ unsigned int size;
+ char phy_id[MII_BUS_ID_SIZE + 3];
+ void *p;
+ u32 val;
+
+ priv = netdev_priv(dev);
+ kdev = &priv->pdev->dev;
+
+ if (priv->has_phy) {
+ /* connect to PHY */
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
+ priv->mac_id ? "1" : "0", priv->phy_id);
+
+ phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0,
+ PHY_INTERFACE_MODE_MII);
+
+ if (IS_ERR(phydev)) {
+ dev_err(kdev, "could not attach to PHY\n");
+ return PTR_ERR(phydev);
+ }
+
+ /* mask with MAC supported features */
+ phydev->supported &= (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_MII);
+ phydev->advertising = phydev->supported;
+
+ if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
+ phydev->advertising |= SUPPORTED_Pause;
+ else
+ phydev->advertising &= ~SUPPORTED_Pause;
+
+ dev_info(kdev, "attached PHY at address %d [%s]\n",
+ phydev->addr, phydev->drv->name);
+
+ priv->old_link = 0;
+ priv->old_duplex = -1;
+ priv->old_pause = -1;
+ priv->phydev = phydev;
+ }
+
+ /* mask all interrupts and request them */
+ enet_writel(priv, 0, ENET_IRMASK_REG);
+ enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
+ enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
+
+ ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
+ if (ret)
+ goto out_phy_disconnect;
+
+ ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_DISABLED,
+ dev->name, dev);
+ if (ret)
+ goto out_freeirq;
+
+ ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
+ IRQF_DISABLED, dev->name, dev);
+ if (ret)
+ goto out_freeirq_rx;
+
+ /* initialize perfect match registers */
+ for (i = 0; i < 4; i++) {
+ enet_writel(priv, 0, ENET_PML_REG(i));
+ enet_writel(priv, 0, ENET_PMH_REG(i));
+ }
+
+ /* write device mac address */
+ memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
+ bcm_enet_set_mac_address(dev, &addr);
+
+ /* allocate rx dma ring */
+ size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
+ p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+ if (!p) {
+ dev_err(kdev, "cannot allocate rx ring %u\n", size);
+ ret = -ENOMEM;
+ goto out_freeirq_tx;
+ }
+
+ memset(p, 0, size);
+ priv->rx_desc_alloc_size = size;
+ priv->rx_desc_cpu = p;
+
+ /* allocate tx dma ring */
+ size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
+ p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+ if (!p) {
+ dev_err(kdev, "cannot allocate tx ring\n");
+ ret = -ENOMEM;
+ goto out_free_rx_ring;
+ }
+
+ memset(p, 0, size);
+ priv->tx_desc_alloc_size = size;
+ priv->tx_desc_cpu = p;
+
+ priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
+ GFP_KERNEL);
+ if (!priv->tx_skb) {
+ dev_err(kdev, "cannot allocate rx skb queue\n");
+ ret = -ENOMEM;
+ goto out_free_tx_ring;
+ }
+
+ priv->tx_desc_count = priv->tx_ring_size;
+ priv->tx_dirty_desc = 0;
+ priv->tx_curr_desc = 0;
+ spin_lock_init(&priv->tx_lock);
+
+ /* init & fill rx ring with skbs */
+ priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
+ GFP_KERNEL);
+ if (!priv->rx_skb) {
+ dev_err(kdev, "cannot allocate rx skb queue\n");
+ ret = -ENOMEM;
+ goto out_free_tx_skb;
+ }
+
+ priv->rx_desc_count = 0;
+ priv->rx_dirty_desc = 0;
+ priv->rx_curr_desc = 0;
+
+ /* initialize flow control buffer allocation */
+ enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
+ ENETDMA_BUFALLOC_REG(priv->rx_chan));
+
+ if (bcm_enet_refill_rx(dev)) {
+ dev_err(kdev, "cannot allocate rx skb queue\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* write rx & tx ring addresses */
+ enet_dma_writel(priv, priv->rx_desc_dma,
+ ENETDMA_RSTART_REG(priv->rx_chan));
+ enet_dma_writel(priv, priv->tx_desc_dma,
+ ENETDMA_RSTART_REG(priv->tx_chan));
+
+ /* clear remaining state ram for rx & tx channel */
+ enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
+ enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
+ enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
+ enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
+ enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
+ enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));
+
+ /* set max rx/tx length */
+ enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
+ enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
+
+ /* set dma maximum burst len */
+ enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
+ ENETDMA_MAXBURST_REG(priv->rx_chan));
+ enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
+ ENETDMA_MAXBURST_REG(priv->tx_chan));
+
+ /* set correct transmit fifo watermark */
+ enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
+
+ /* set flow control low/high threshold to 1/3 / 2/3 */
+ val = priv->rx_ring_size / 3;
+ enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
+ val = (priv->rx_ring_size * 2) / 3;
+ enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
+
+ /* all set, enable mac and interrupts, start dma engine and
+ * kick rx dma channel */
+ wmb();
+ val = enet_readl(priv, ENET_CTL_REG);
+ val |= ENET_CTL_ENABLE_MASK;
+ enet_writel(priv, val, ENET_CTL_REG);
+ enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
+ enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
+ ENETDMA_CHANCFG_REG(priv->rx_chan));
+
+ /* watch "mib counters about to overflow" interrupt */
+ enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
+ enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
+
+ /* watch "packet transferred" interrupt in rx and tx */
+ enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
+ ENETDMA_IR_REG(priv->rx_chan));
+ enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
+ ENETDMA_IR_REG(priv->tx_chan));
+
+ /* make sure we enable napi before rx interrupt */
+ napi_enable(&priv->napi);
+
+ enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
+ ENETDMA_IRMASK_REG(priv->rx_chan));
+ enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
+ ENETDMA_IRMASK_REG(priv->tx_chan));
+
+ if (priv->has_phy)
+ phy_start(priv->phydev);
+ else
+ bcm_enet_adjust_link(dev);
+
+ netif_start_queue(dev);
+ return 0;
+
+out:
+ for (i = 0; i < priv->rx_ring_size; i++) {
+ struct bcm_enet_desc *desc;
+
+ if (!priv->rx_skb[i])
+ continue;
+
+ desc = &priv->rx_desc_cpu[i];
+ dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
+ DMA_FROM_DEVICE);
+ kfree_skb(priv->rx_skb[i]);
+ }
+ kfree(priv->rx_skb);
+
+out_free_tx_skb:
+ kfree(priv->tx_skb);
+
+out_free_tx_ring:
+ dma_free_coherent(kdev, priv->tx_desc_alloc_size,
+ priv->tx_desc_cpu, priv->tx_desc_dma);
+
+out_free_rx_ring:
+ dma_free_coherent(kdev, priv->rx_desc_alloc_size,
+ priv->rx_desc_cpu, priv->rx_desc_dma);
+
+out_freeirq_tx:
+ free_irq(priv->irq_tx, dev);
+
+out_freeirq_rx:
+ free_irq(priv->irq_rx, dev);
+
+out_freeirq:
+ free_irq(dev->irq, dev);
+
+out_phy_disconnect:
+ phy_disconnect(priv->phydev);
+
+ return ret;
+}
+
+/*
+ * disable mac
+ */
+static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
+{
+ int limit;
+ u32 val;
+
+ val = enet_readl(priv, ENET_CTL_REG);
+ val |= ENET_CTL_DISABLE_MASK;
+ enet_writel(priv, val, ENET_CTL_REG);
+
+ limit = 1000;
+ do {
+ u32 val;
+
+ val = enet_readl(priv, ENET_CTL_REG);
+ if (!(val & ENET_CTL_DISABLE_MASK))
+ break;
+ udelay(1);
+ } while (limit--);
+}
+
+/*
+ * disable dma in given channel
+ */
+static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
+{
+ int limit;
+
+ enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan));
+
+ limit = 1000;
+ do {
+ u32 val;
+
+ val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan));
+ if (!(val & ENETDMA_CHANCFG_EN_MASK))
+ break;
+ udelay(1);
+ } while (limit--);
+}
+
+/*
+ * stop callback
+ */
+static int bcm_enet_stop(struct net_device *dev)
+{
+ struct bcm_enet_priv *priv;
+ struct device *kdev;
+ int i;
+
+ priv = netdev_priv(dev);
+ kdev = &priv->pdev->dev;
+
+ netif_stop_queue(dev);
+ napi_disable(&priv->napi);
+ if (priv->has_phy)
+ phy_stop(priv->phydev);
+ del_timer_sync(&priv->rx_timeout);
+
+ /* mask all interrupts */
+ enet_writel(priv, 0, ENET_IRMASK_REG);
+ enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
+ enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
+
+ /* make sure no mib update is scheduled */
+ cancel_work_sync(&priv->mib_update_task);
+
+ /* disable dma & mac */
+ bcm_enet_disable_dma(priv, priv->tx_chan);
+ bcm_enet_disable_dma(priv, priv->rx_chan);
+ bcm_enet_disable_mac(priv);
+
+ /* force reclaim of all tx buffers */
+ bcm_enet_tx_reclaim(dev, 1);
+
+ /* free the rx skb ring */
+ for (i = 0; i < priv->rx_ring_size; i++) {
+ struct bcm_enet_desc *desc;
+
+ if (!priv->rx_skb[i])
+ continue;
+
+ desc = &priv->rx_desc_cpu[i];
+ dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
+ DMA_FROM_DEVICE);
+ kfree_skb(priv->rx_skb[i]);
+ }
+
+ /* free remaining allocated memory */
+ kfree(priv->rx_skb);
+ kfree(priv->tx_skb);
+ dma_free_coherent(kdev, priv->rx_desc_alloc_size,
+ priv->rx_desc_cpu, priv->rx_desc_dma);
+ dma_free_coherent(kdev, priv->tx_desc_alloc_size,
+ priv->tx_desc_cpu, priv->tx_desc_dma);
+ free_irq(priv->irq_tx, dev);
+ free_irq(priv->irq_rx, dev);
+ free_irq(dev->irq, dev);
+
+ /* release phy */
+ if (priv->has_phy) {
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * ethtool callbacks
+ */
+struct bcm_enet_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+ int mib_reg;
+};
+
+#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \
+ offsetof(struct bcm_enet_priv, m)
+#define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \
+ offsetof(struct net_device_stats, m)
+
+static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
+ { "rx_packets", DEV_STAT(rx_packets), -1 },
+ { "tx_packets", DEV_STAT(tx_packets), -1 },
+ { "rx_bytes", DEV_STAT(rx_bytes), -1 },
+ { "tx_bytes", DEV_STAT(tx_bytes), -1 },
+ { "rx_errors", DEV_STAT(rx_errors), -1 },
+ { "tx_errors", DEV_STAT(tx_errors), -1 },
+ { "rx_dropped", DEV_STAT(rx_dropped), -1 },
+ { "tx_dropped", DEV_STAT(tx_dropped), -1 },
+
+ { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
+ { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
+ { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
+ { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
+ { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
+ { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
+ { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
+ { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
+ { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
+ { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
+ { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
+ { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
+ { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
+ { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
+ { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
+ { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
+ { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
+ { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
+ { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
+ { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
+ { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
+
+ { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
+ { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
+ { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
+ { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
+ { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
+ { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
+ { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
+ { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
+ { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
+ { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
+ { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
+ { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
+ { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
+ { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
+ { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
+ { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
+ { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
+ { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
+ { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
+ { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
+ { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
+ { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
+
+};
+
+#define BCM_ENET_STATS_LEN \
+ (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
+
+static const u32 unused_mib_regs[] = {
+ ETH_MIB_TX_ALL_OCTETS,
+ ETH_MIB_TX_ALL_PKTS,
+ ETH_MIB_RX_ALL_OCTETS,
+ ETH_MIB_RX_ALL_PKTS,
+};
+
+
+static void bcm_enet_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
+ strncpy(drvinfo->version, bcm_enet_driver_version, 32);
+ strncpy(drvinfo->fw_version, "N/A", 32);
+ strncpy(drvinfo->bus_info, "bcm63xx", 32);
+ drvinfo->n_stats = BCM_ENET_STATS_LEN;
+}
+
+static int bcm_enet_get_sset_count(struct net_device *netdev,
+ int string_set)
+{
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return BCM_ENET_STATS_LEN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void bcm_enet_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ bcm_enet_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ }
+}
+
+static void update_mib_counters(struct bcm_enet_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
+ const struct bcm_enet_stats *s;
+ u32 val;
+ char *p;
+
+ s = &bcm_enet_gstrings_stats[i];
+ if (s->mib_reg == -1)
+ continue;
+
+ val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
+ p = (char *)priv + s->stat_offset;
+
+ if (s->sizeof_stat == sizeof(u64))
+ *(u64 *)p += val;
+ else
+ *(u32 *)p += val;
+ }
+
+ /* also empty unused mib counters to make sure mib counter
+ * overflow interrupt is cleared */
+ for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
+ (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
+}
+
+static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
+{
+ struct bcm_enet_priv *priv;
+
+ priv = container_of(t, struct bcm_enet_priv, mib_update_task);
+ mutex_lock(&priv->mib_update_lock);
+ update_mib_counters(priv);
+ mutex_unlock(&priv->mib_update_lock);
+
+ /* reenable mib interrupt */
+ if (netif_running(priv->net_dev))
+ enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
+}
+
+static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct bcm_enet_priv *priv;
+ int i;
+
+ priv = netdev_priv(netdev);
+
+ mutex_lock(&priv->mib_update_lock);
+ update_mib_counters(priv);
+
+ for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
+ const struct bcm_enet_stats *s;
+ char *p;
+
+ s = &bcm_enet_gstrings_stats[i];
+ if (s->mib_reg == -1)
+ p = (char *)&netdev->stats;
+ else
+ p = (char *)priv;
+ p += s->stat_offset;
+ data[i] = (s->sizeof_stat == sizeof(u64)) ?
+ *(u64 *)p : *(u32 *)p;
+ }
+ mutex_unlock(&priv->mib_update_lock);
+}
+
+static int bcm_enet_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bcm_enet_priv *priv;
+
+ priv = netdev_priv(dev);
+
+ cmd->maxrxpkt = 0;
+ cmd->maxtxpkt = 0;
+
+ if (priv->has_phy) {
+ if (!priv->phydev)
+ return -ENODEV;
+ return phy_ethtool_gset(priv->phydev, cmd);
+ } else {
+ cmd->autoneg = 0;
+ ethtool_cmd_speed_set(cmd, ((priv->force_speed_100)
+ ? SPEED_100 : SPEED_10));
+ cmd->duplex = (priv->force_duplex_full) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ cmd->supported = ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full;
+ cmd->advertising = 0;
+ cmd->port = PORT_MII;
+ cmd->transceiver = XCVR_EXTERNAL;
+ }
+ return 0;
+}
+
+static int bcm_enet_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bcm_enet_priv *priv;
+
+ priv = netdev_priv(dev);
+ if (priv->has_phy) {
+ if (!priv->phydev)
+ return -ENODEV;
+ return phy_ethtool_sset(priv->phydev, cmd);
+ } else {
+
+ if (cmd->autoneg ||
+ (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
+ cmd->port != PORT_MII)
+ return -EINVAL;
+
+ priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
+ priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
+
+ if (netif_running(dev))
+ bcm_enet_adjust_link(dev);
+ return 0;
+ }
+}
+
+static void bcm_enet_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct bcm_enet_priv *priv;
+
+ priv = netdev_priv(dev);
+
+ /* rx/tx ring is actually only limited by memory */
+ ering->rx_max_pending = 8192;
+ ering->tx_max_pending = 8192;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = 0;
+ ering->rx_pending = priv->rx_ring_size;
+ ering->tx_pending = priv->tx_ring_size;
+}
+
+static int bcm_enet_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct bcm_enet_priv *priv;
+ int was_running;
+
+ priv = netdev_priv(dev);
+
+ was_running = 0;
+ if (netif_running(dev)) {
+ bcm_enet_stop(dev);
+ was_running = 1;
+ }
+
+ priv->rx_ring_size = ering->rx_pending;
+ priv->tx_ring_size = ering->tx_pending;
+
+ if (was_running) {
+ int err;
+
+ err = bcm_enet_open(dev);
+ if (err)
+ dev_close(dev);
+ else
+ bcm_enet_set_multicast_list(dev);
+ }
+ return 0;
+}
+
+static void bcm_enet_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *ecmd)
+{
+ struct bcm_enet_priv *priv;
+
+ priv = netdev_priv(dev);
+ ecmd->autoneg = priv->pause_auto;
+ ecmd->rx_pause = priv->pause_rx;
+ ecmd->tx_pause = priv->pause_tx;
+}
+
+static int bcm_enet_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *ecmd)
+{
+ struct bcm_enet_priv *priv;
+
+ priv = netdev_priv(dev);
+
+ if (priv->has_phy) {
+ if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
+ /* asymetric pause mode not supported,
+ * actually possible but integrated PHY has RO
+ * asym_pause bit */
+ return -EINVAL;
+ }
+ } else {
+ /* no pause autoneg on direct mii connection */
+ if (ecmd->autoneg)
+ return -EINVAL;
+ }
+
+ priv->pause_auto = ecmd->autoneg;
+ priv->pause_rx = ecmd->rx_pause;
+ priv->pause_tx = ecmd->tx_pause;
+
+ return 0;
+}
+
+static struct ethtool_ops bcm_enet_ethtool_ops = {
+ .get_strings = bcm_enet_get_strings,
+ .get_sset_count = bcm_enet_get_sset_count,
+ .get_ethtool_stats = bcm_enet_get_ethtool_stats,
+ .get_settings = bcm_enet_get_settings,
+ .set_settings = bcm_enet_set_settings,
+ .get_drvinfo = bcm_enet_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = bcm_enet_get_ringparam,
+ .set_ringparam = bcm_enet_set_ringparam,
+ .get_pauseparam = bcm_enet_get_pauseparam,
+ .set_pauseparam = bcm_enet_set_pauseparam,
+};
+
+static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct bcm_enet_priv *priv;
+
+ priv = netdev_priv(dev);
+ if (priv->has_phy) {
+ if (!priv->phydev)
+ return -ENODEV;
+ return phy_mii_ioctl(priv->phydev, rq, cmd);
+ } else {
+ struct mii_if_info mii;
+
+ mii.dev = dev;
+ mii.mdio_read = bcm_enet_mdio_read_mii;
+ mii.mdio_write = bcm_enet_mdio_write_mii;
+ mii.phy_id = 0;
+ mii.phy_id_mask = 0x3f;
+ mii.reg_num_mask = 0x1f;
+ return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
+ }
+}
+
+/*
+ * calculate actual hardware mtu
+ */
+static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
+{
+ int actual_mtu;
+
+ actual_mtu = mtu;
+
+ /* add ethernet header + vlan tag size */
+ actual_mtu += VLAN_ETH_HLEN;
+
+ if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU)
+ return -EINVAL;
+
+ /*
+ * setup maximum size before we get overflow mark in
+ * descriptor, note that this will not prevent reception of
+ * big frames, they will be split into multiple buffers
+ * anyway
+ */
+ priv->hw_mtu = actual_mtu;
+
+ /*
+ * align rx buffer size to dma burst len, account FCS since
+ * it's appended
+ */
+ priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
+ BCMENET_DMA_MAXBURST * 4);
+ return 0;
+}
+
+/*
+ * adjust mtu, can't be called while device is running
+ */
+static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int ret;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ ret = compute_hw_mtu(netdev_priv(dev), new_mtu);
+ if (ret)
+ return ret;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+/*
+ * preinit hardware to allow mii operation while device is down
+ */
+static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
+{
+ u32 val;
+ int limit;
+
+ /* make sure mac is disabled */
+ bcm_enet_disable_mac(priv);
+
+ /* soft reset mac */
+ val = ENET_CTL_SRESET_MASK;
+ enet_writel(priv, val, ENET_CTL_REG);
+ wmb();
+
+ limit = 1000;
+ do {
+ val = enet_readl(priv, ENET_CTL_REG);
+ if (!(val & ENET_CTL_SRESET_MASK))
+ break;
+ udelay(1);
+ } while (limit--);
+
+ /* select correct mii interface */
+ val = enet_readl(priv, ENET_CTL_REG);
+ if (priv->use_external_mii)
+ val |= ENET_CTL_EPHYSEL_MASK;
+ else
+ val &= ~ENET_CTL_EPHYSEL_MASK;
+ enet_writel(priv, val, ENET_CTL_REG);
+
+ /* turn on mdc clock */
+ enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
+ ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
+
+ /* set mib counters to self-clear when read */
+ val = enet_readl(priv, ENET_MIBCTL_REG);
+ val |= ENET_MIBCTL_RDCLEAR_MASK;
+ enet_writel(priv, val, ENET_MIBCTL_REG);
+}
+
+static const struct net_device_ops bcm_enet_ops = {
+ .ndo_open = bcm_enet_open,
+ .ndo_stop = bcm_enet_stop,
+ .ndo_start_xmit = bcm_enet_start_xmit,
+ .ndo_set_mac_address = bcm_enet_set_mac_address,
+ .ndo_set_rx_mode = bcm_enet_set_multicast_list,
+ .ndo_do_ioctl = bcm_enet_ioctl,
+ .ndo_change_mtu = bcm_enet_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = bcm_enet_netpoll,
+#endif
+};
+
+/*
+ * allocate netdevice, request register memory and register device.
+ */
+static int __devinit bcm_enet_probe(struct platform_device *pdev)
+{
+ struct bcm_enet_priv *priv;
+ struct net_device *dev;
+ struct bcm63xx_enet_platform_data *pd;
+ struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
+ struct mii_bus *bus;
+ const char *clk_name;
+ unsigned int iomem_size;
+ int i, ret;
+
+ /* stop if shared driver failed, assume driver->probe will be
+ * called in the same order we register devices (correct ?) */
+ if (!bcm_enet_shared_base)
+ return -ENODEV;
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
+ if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
+ return -ENODEV;
+
+ ret = 0;
+ dev = alloc_etherdev(sizeof(*priv));
+ if (!dev)
+ return -ENOMEM;
+ priv = netdev_priv(dev);
+
+ ret = compute_hw_mtu(priv, dev->mtu);
+ if (ret)
+ goto out;
+
+ iomem_size = resource_size(res_mem);
+ if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ priv->base = ioremap(res_mem->start, iomem_size);
+ if (priv->base == NULL) {
+ ret = -ENOMEM;
+ goto out_release_mem;
+ }
+ dev->irq = priv->irq = res_irq->start;
+ priv->irq_rx = res_irq_rx->start;
+ priv->irq_tx = res_irq_tx->start;
+ priv->mac_id = pdev->id;
+
+ /* get rx & tx dma channel id for this mac */
+ if (priv->mac_id == 0) {
+ priv->rx_chan = 0;
+ priv->tx_chan = 1;
+ clk_name = "enet0";
+ } else {
+ priv->rx_chan = 2;
+ priv->tx_chan = 3;
+ clk_name = "enet1";
+ }
+
+ priv->mac_clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(priv->mac_clk)) {
+ ret = PTR_ERR(priv->mac_clk);
+ goto out_unmap;
+ }
+ clk_enable(priv->mac_clk);
+
+ /* initialize default and fetch platform data */
+ priv->rx_ring_size = BCMENET_DEF_RX_DESC;
+ priv->tx_ring_size = BCMENET_DEF_TX_DESC;
+
+ pd = pdev->dev.platform_data;
+ if (pd) {
+ memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
+ priv->has_phy = pd->has_phy;
+ priv->phy_id = pd->phy_id;
+ priv->has_phy_interrupt = pd->has_phy_interrupt;
+ priv->phy_interrupt = pd->phy_interrupt;
+ priv->use_external_mii = !pd->use_internal_phy;
+ priv->pause_auto = pd->pause_auto;
+ priv->pause_rx = pd->pause_rx;
+ priv->pause_tx = pd->pause_tx;
+ priv->force_duplex_full = pd->force_duplex_full;
+ priv->force_speed_100 = pd->force_speed_100;
+ }
+
+ if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
+ /* using internal PHY, enable clock */
+ priv->phy_clk = clk_get(&pdev->dev, "ephy");
+ if (IS_ERR(priv->phy_clk)) {
+ ret = PTR_ERR(priv->phy_clk);
+ priv->phy_clk = NULL;
+ goto out_put_clk_mac;
+ }
+ clk_enable(priv->phy_clk);
+ }
+
+ /* do minimal hardware init to be able to probe mii bus */
+ bcm_enet_hw_preinit(priv);
+
+ /* MII bus registration */
+ if (priv->has_phy) {
+
+ priv->mii_bus = mdiobus_alloc();
+ if (!priv->mii_bus) {
+ ret = -ENOMEM;
+ goto out_uninit_hw;
+ }
+
+ bus = priv->mii_bus;
+ bus->name = "bcm63xx_enet MII bus";
+ bus->parent = &pdev->dev;
+ bus->priv = priv;
+ bus->read = bcm_enet_mdio_read_phylib;
+ bus->write = bcm_enet_mdio_write_phylib;
+ sprintf(bus->id, "%d", priv->mac_id);
+
+ /* only probe bus where we think the PHY is, because
+ * the mdio read operation return 0 instead of 0xffff
+ * if a slave is not present on hw */
+ bus->phy_mask = ~(1 << priv->phy_id);
+
+ bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!bus->irq) {
+ ret = -ENOMEM;
+ goto out_free_mdio;
+ }
+
+ if (priv->has_phy_interrupt)
+ bus->irq[priv->phy_id] = priv->phy_interrupt;
+ else
+ bus->irq[priv->phy_id] = PHY_POLL;
+
+ ret = mdiobus_register(bus);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to register mdio bus\n");
+ goto out_free_mdio;
+ }
+ } else {
+
+ /* run platform code to initialize PHY device */
+ if (pd->mii_config &&
+ pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
+ bcm_enet_mdio_write_mii)) {
+ dev_err(&pdev->dev, "unable to configure mdio bus\n");
+ goto out_uninit_hw;
+ }
+ }
+
+ spin_lock_init(&priv->rx_lock);
+
+ /* init rx timeout (used for oom) */
+ init_timer(&priv->rx_timeout);
+ priv->rx_timeout.function = bcm_enet_refill_rx_timer;
+ priv->rx_timeout.data = (unsigned long)dev;
+
+ /* init the mib update lock&work */
+ mutex_init(&priv->mib_update_lock);
+ INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
+
+ /* zero mib counters */
+ for (i = 0; i < ENET_MIB_REG_COUNT; i++)
+ enet_writel(priv, 0, ENET_MIB_REG(i));
+
+ /* register netdevice */
+ dev->netdev_ops = &bcm_enet_ops;
+ netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
+
+ SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ ret = register_netdev(dev);
+ if (ret)
+ goto out_unregister_mdio;
+
+ netif_carrier_off(dev);
+ platform_set_drvdata(pdev, dev);
+ priv->pdev = pdev;
+ priv->net_dev = dev;
+
+ return 0;
+
+out_unregister_mdio:
+ if (priv->mii_bus) {
+ mdiobus_unregister(priv->mii_bus);
+ kfree(priv->mii_bus->irq);
+ }
+
+out_free_mdio:
+ if (priv->mii_bus)
+ mdiobus_free(priv->mii_bus);
+
+out_uninit_hw:
+ /* turn off mdc clock */
+ enet_writel(priv, 0, ENET_MIISC_REG);
+ if (priv->phy_clk) {
+ clk_disable(priv->phy_clk);
+ clk_put(priv->phy_clk);
+ }
+
+out_put_clk_mac:
+ clk_disable(priv->mac_clk);
+ clk_put(priv->mac_clk);
+
+out_unmap:
+ iounmap(priv->base);
+
+out_release_mem:
+ release_mem_region(res_mem->start, iomem_size);
+out:
+ free_netdev(dev);
+ return ret;
+}
+
+
+/*
+ * exit func, stops hardware and unregisters netdevice
+ */
+static int __devexit bcm_enet_remove(struct platform_device *pdev)
+{
+ struct bcm_enet_priv *priv;
+ struct net_device *dev;
+ struct resource *res;
+
+ /* stop netdevice */
+ dev = platform_get_drvdata(pdev);
+ priv = netdev_priv(dev);
+ unregister_netdev(dev);
+
+ /* turn off mdc clock */
+ enet_writel(priv, 0, ENET_MIISC_REG);
+
+ if (priv->has_phy) {
+ mdiobus_unregister(priv->mii_bus);
+ kfree(priv->mii_bus->irq);
+ mdiobus_free(priv->mii_bus);
+ } else {
+ struct bcm63xx_enet_platform_data *pd;
+
+ pd = pdev->dev.platform_data;
+ if (pd && pd->mii_config)
+ pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
+ bcm_enet_mdio_write_mii);
+ }
+
+ /* release device resources */
+ iounmap(priv->base);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ /* disable hw block clocks */
+ if (priv->phy_clk) {
+ clk_disable(priv->phy_clk);
+ clk_put(priv->phy_clk);
+ }
+ clk_disable(priv->mac_clk);
+ clk_put(priv->mac_clk);
+
+ platform_set_drvdata(pdev, NULL);
+ free_netdev(dev);
+ return 0;
+}
+
+struct platform_driver bcm63xx_enet_driver = {
+ .probe = bcm_enet_probe,
+ .remove = __devexit_p(bcm_enet_remove),
+ .driver = {
+ .name = "bcm63xx_enet",
+ .owner = THIS_MODULE,
+ },
+};
+
+/*
+ * reserve & remap memory space shared between all macs
+ */
+static int __devinit bcm_enet_shared_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ unsigned int iomem_size;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ iomem_size = resource_size(res);
+ if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma"))
+ return -EBUSY;
+
+ bcm_enet_shared_base = ioremap(res->start, iomem_size);
+ if (!bcm_enet_shared_base) {
+ release_mem_region(res->start, iomem_size);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int __devexit bcm_enet_shared_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ iounmap(bcm_enet_shared_base);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+ return 0;
+}
+
+/*
+ * this "shared" driver is needed because both macs share a single
+ * address space
+ */
+struct platform_driver bcm63xx_enet_shared_driver = {
+ .probe = bcm_enet_shared_probe,
+ .remove = __devexit_p(bcm_enet_shared_remove),
+ .driver = {
+ .name = "bcm63xx_enet_shared",
+ .owner = THIS_MODULE,
+ },
+};
+
+/*
+ * entry point
+ */
+static int __init bcm_enet_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&bcm63xx_enet_shared_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&bcm63xx_enet_driver);
+ if (ret)
+ platform_driver_unregister(&bcm63xx_enet_shared_driver);
+
+ return ret;
+}
+
+static void __exit bcm_enet_exit(void)
+{
+ platform_driver_unregister(&bcm63xx_enet_driver);
+ platform_driver_unregister(&bcm63xx_enet_shared_driver);
+}
+
+
+module_init(bcm_enet_init);
+module_exit(bcm_enet_exit);
+
+MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
new file mode 100644
index 000000000000..0e3048b788c2
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -0,0 +1,302 @@
+#ifndef BCM63XX_ENET_H_
+#define BCM63XX_ENET_H_
+
+#include <linux/types.h>
+#include <linux/mii.h>
+#include <linux/mutex.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+
+#include <bcm63xx_regs.h>
+#include <bcm63xx_irq.h>
+#include <bcm63xx_io.h>
+
+/* default number of descriptor */
+#define BCMENET_DEF_RX_DESC 64
+#define BCMENET_DEF_TX_DESC 32
+
+/* maximum burst len for dma (4 bytes unit) */
+#define BCMENET_DMA_MAXBURST 16
+
+/* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value
+ * must be low enough so that a DMA transfer of above burst length can
+ * not overflow the fifo */
+#define BCMENET_TX_FIFO_TRESH 32
+
+/*
+ * hardware maximum rx/tx packet size including FCS, max mtu is
+ * actually 2047, but if we set max rx size register to 2047 we won't
+ * get overflow information if packet size is 2048 or above
+ */
+#define BCMENET_MAX_MTU 2046
+
+/*
+ * rx/tx dma descriptor
+ */
+struct bcm_enet_desc {
+ u32 len_stat;
+ u32 address;
+};
+
+#define DMADESC_LENGTH_SHIFT 16
+#define DMADESC_LENGTH_MASK (0xfff << DMADESC_LENGTH_SHIFT)
+#define DMADESC_OWNER_MASK (1 << 15)
+#define DMADESC_EOP_MASK (1 << 14)
+#define DMADESC_SOP_MASK (1 << 13)
+#define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
+#define DMADESC_WRAP_MASK (1 << 12)
+
+#define DMADESC_UNDER_MASK (1 << 9)
+#define DMADESC_APPEND_CRC (1 << 8)
+#define DMADESC_OVSIZE_MASK (1 << 4)
+#define DMADESC_RXER_MASK (1 << 2)
+#define DMADESC_CRC_MASK (1 << 1)
+#define DMADESC_OV_MASK (1 << 0)
+#define DMADESC_ERR_MASK (DMADESC_UNDER_MASK | \
+ DMADESC_OVSIZE_MASK | \
+ DMADESC_RXER_MASK | \
+ DMADESC_CRC_MASK | \
+ DMADESC_OV_MASK)
+
+
+/*
+ * MIB Counters register definitions
+*/
+#define ETH_MIB_TX_GD_OCTETS 0
+#define ETH_MIB_TX_GD_PKTS 1
+#define ETH_MIB_TX_ALL_OCTETS 2
+#define ETH_MIB_TX_ALL_PKTS 3
+#define ETH_MIB_TX_BRDCAST 4
+#define ETH_MIB_TX_MULT 5
+#define ETH_MIB_TX_64 6
+#define ETH_MIB_TX_65_127 7
+#define ETH_MIB_TX_128_255 8
+#define ETH_MIB_TX_256_511 9
+#define ETH_MIB_TX_512_1023 10
+#define ETH_MIB_TX_1024_MAX 11
+#define ETH_MIB_TX_JAB 12
+#define ETH_MIB_TX_OVR 13
+#define ETH_MIB_TX_FRAG 14
+#define ETH_MIB_TX_UNDERRUN 15
+#define ETH_MIB_TX_COL 16
+#define ETH_MIB_TX_1_COL 17
+#define ETH_MIB_TX_M_COL 18
+#define ETH_MIB_TX_EX_COL 19
+#define ETH_MIB_TX_LATE 20
+#define ETH_MIB_TX_DEF 21
+#define ETH_MIB_TX_CRS 22
+#define ETH_MIB_TX_PAUSE 23
+
+#define ETH_MIB_RX_GD_OCTETS 32
+#define ETH_MIB_RX_GD_PKTS 33
+#define ETH_MIB_RX_ALL_OCTETS 34
+#define ETH_MIB_RX_ALL_PKTS 35
+#define ETH_MIB_RX_BRDCAST 36
+#define ETH_MIB_RX_MULT 37
+#define ETH_MIB_RX_64 38
+#define ETH_MIB_RX_65_127 39
+#define ETH_MIB_RX_128_255 40
+#define ETH_MIB_RX_256_511 41
+#define ETH_MIB_RX_512_1023 42
+#define ETH_MIB_RX_1024_MAX 43
+#define ETH_MIB_RX_JAB 44
+#define ETH_MIB_RX_OVR 45
+#define ETH_MIB_RX_FRAG 46
+#define ETH_MIB_RX_DROP 47
+#define ETH_MIB_RX_CRC_ALIGN 48
+#define ETH_MIB_RX_UND 49
+#define ETH_MIB_RX_CRC 50
+#define ETH_MIB_RX_ALIGN 51
+#define ETH_MIB_RX_SYM 52
+#define ETH_MIB_RX_PAUSE 53
+#define ETH_MIB_RX_CNTRL 54
+
+
+struct bcm_enet_mib_counters {
+ u64 tx_gd_octets;
+ u32 tx_gd_pkts;
+ u32 tx_all_octets;
+ u32 tx_all_pkts;
+ u32 tx_brdcast;
+ u32 tx_mult;
+ u32 tx_64;
+ u32 tx_65_127;
+ u32 tx_128_255;
+ u32 tx_256_511;
+ u32 tx_512_1023;
+ u32 tx_1024_max;
+ u32 tx_jab;
+ u32 tx_ovr;
+ u32 tx_frag;
+ u32 tx_underrun;
+ u32 tx_col;
+ u32 tx_1_col;
+ u32 tx_m_col;
+ u32 tx_ex_col;
+ u32 tx_late;
+ u32 tx_def;
+ u32 tx_crs;
+ u32 tx_pause;
+ u64 rx_gd_octets;
+ u32 rx_gd_pkts;
+ u32 rx_all_octets;
+ u32 rx_all_pkts;
+ u32 rx_brdcast;
+ u32 rx_mult;
+ u32 rx_64;
+ u32 rx_65_127;
+ u32 rx_128_255;
+ u32 rx_256_511;
+ u32 rx_512_1023;
+ u32 rx_1024_max;
+ u32 rx_jab;
+ u32 rx_ovr;
+ u32 rx_frag;
+ u32 rx_drop;
+ u32 rx_crc_align;
+ u32 rx_und;
+ u32 rx_crc;
+ u32 rx_align;
+ u32 rx_sym;
+ u32 rx_pause;
+ u32 rx_cntrl;
+};
+
+
+struct bcm_enet_priv {
+
+ /* mac id (from platform device id) */
+ int mac_id;
+
+ /* base remapped address of device */
+ void __iomem *base;
+
+ /* mac irq, rx_dma irq, tx_dma irq */
+ int irq;
+ int irq_rx;
+ int irq_tx;
+
+ /* hw view of rx & tx dma ring */
+ dma_addr_t rx_desc_dma;
+ dma_addr_t tx_desc_dma;
+
+ /* allocated size (in bytes) for rx & tx dma ring */
+ unsigned int rx_desc_alloc_size;
+ unsigned int tx_desc_alloc_size;
+
+
+ struct napi_struct napi;
+
+ /* dma channel id for rx */
+ int rx_chan;
+
+ /* number of dma desc in rx ring */
+ int rx_ring_size;
+
+ /* cpu view of rx dma ring */
+ struct bcm_enet_desc *rx_desc_cpu;
+
+ /* current number of armed descriptor given to hardware for rx */
+ int rx_desc_count;
+
+ /* next rx descriptor to fetch from hardware */
+ int rx_curr_desc;
+
+ /* next dirty rx descriptor to refill */
+ int rx_dirty_desc;
+
+ /* size of allocated rx skbs */
+ unsigned int rx_skb_size;
+
+ /* list of skb given to hw for rx */
+ struct sk_buff **rx_skb;
+
+ /* used when rx skb allocation failed, so we defer rx queue
+ * refill */
+ struct timer_list rx_timeout;
+
+ /* lock rx_timeout against rx normal operation */
+ spinlock_t rx_lock;
+
+
+ /* dma channel id for tx */
+ int tx_chan;
+
+ /* number of dma desc in tx ring */
+ int tx_ring_size;
+
+ /* cpu view of rx dma ring */
+ struct bcm_enet_desc *tx_desc_cpu;
+
+ /* number of available descriptor for tx */
+ int tx_desc_count;
+
+ /* next tx descriptor avaiable */
+ int tx_curr_desc;
+
+ /* next dirty tx descriptor to reclaim */
+ int tx_dirty_desc;
+
+ /* list of skb given to hw for tx */
+ struct sk_buff **tx_skb;
+
+ /* lock used by tx reclaim and xmit */
+ spinlock_t tx_lock;
+
+
+ /* set if internal phy is ignored and external mii interface
+ * is selected */
+ int use_external_mii;
+
+ /* set if a phy is connected, phy address must be known,
+ * probing is not possible */
+ int has_phy;
+ int phy_id;
+
+ /* set if connected phy has an associated irq */
+ int has_phy_interrupt;
+ int phy_interrupt;
+
+ /* used when a phy is connected (phylib used) */
+ struct mii_bus *mii_bus;
+ struct phy_device *phydev;
+ int old_link;
+ int old_duplex;
+ int old_pause;
+
+ /* used when no phy is connected */
+ int force_speed_100;
+ int force_duplex_full;
+
+ /* pause parameters */
+ int pause_auto;
+ int pause_rx;
+ int pause_tx;
+
+ /* stats */
+ struct bcm_enet_mib_counters mib;
+
+ /* after mib interrupt, mib registers update is done in this
+ * work queue */
+ struct work_struct mib_update_task;
+
+ /* lock mib update between userspace request and workqueue */
+ struct mutex mib_update_lock;
+
+ /* mac clock */
+ struct clk *mac_clk;
+
+ /* phy clock if internal phy is used */
+ struct clk *phy_clk;
+
+ /* network device reference */
+ struct net_device *net_dev;
+
+ /* platform device reference */
+ struct platform_device *pdev;
+
+ /* maximum hardware transmit/receive size */
+ unsigned int hw_mtu;
+};
+
+#endif /* ! BCM63XX_ENET_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
new file mode 100644
index 000000000000..ad24d8c0b8a7
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -0,0 +1,8624 @@
+/* bnx2.c: Broadcom NX2 network driver.
+ *
+ * Copyright (c) 2004-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Michael Chan (mchan@broadcom.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+#include <linux/time.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/checksum.h>
+#include <linux/workqueue.h>
+#include <linux/crc32.h>
+#include <linux/prefetch.h>
+#include <linux/cache.h>
+#include <linux/firmware.h>
+#include <linux/log2.h>
+#include <linux/aer.h>
+
+#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
+#define BCM_CNIC 1
+#include "cnic_if.h"
+#endif
+#include "bnx2.h"
+#include "bnx2_fw.h"
+
+#define DRV_MODULE_NAME "bnx2"
+#define DRV_MODULE_VERSION "2.1.11"
+#define DRV_MODULE_RELDATE "July 20, 2011"
+#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw"
+#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
+#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1a.fw"
+#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
+#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
+
+#define RUN_AT(x) (jiffies + (x))
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (5*HZ)
+
+static char version[] __devinitdata =
+ "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_FIRMWARE(FW_MIPS_FILE_06);
+MODULE_FIRMWARE(FW_RV2P_FILE_06);
+MODULE_FIRMWARE(FW_MIPS_FILE_09);
+MODULE_FIRMWARE(FW_RV2P_FILE_09);
+MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
+
+static int disable_msi = 0;
+
+module_param(disable_msi, int, 0);
+MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
+
+typedef enum {
+ BCM5706 = 0,
+ NC370T,
+ NC370I,
+ BCM5706S,
+ NC370F,
+ BCM5708,
+ BCM5708S,
+ BCM5709,
+ BCM5709S,
+ BCM5716,
+ BCM5716S,
+} board_t;
+
+/* indexed by board_t, above */
+static struct {
+ char *name;
+} board_info[] __devinitdata = {
+ { "Broadcom NetXtreme II BCM5706 1000Base-T" },
+ { "HP NC370T Multifunction Gigabit Server Adapter" },
+ { "HP NC370i Multifunction Gigabit Server Adapter" },
+ { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
+ { "HP NC370F Multifunction Gigabit Server Adapter" },
+ { "Broadcom NetXtreme II BCM5708 1000Base-T" },
+ { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
+ { "Broadcom NetXtreme II BCM5709 1000Base-T" },
+ { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
+ { "Broadcom NetXtreme II BCM5716 1000Base-T" },
+ { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
+ };
+
+static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
+ PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
+ PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
+ PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
+ { PCI_VENDOR_ID_BROADCOM, 0x163b,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
+ { PCI_VENDOR_ID_BROADCOM, 0x163c,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
+ { 0, }
+};
+
+static const struct flash_spec flash_table[] =
+{
+#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
+#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
+ /* Slow EEPROM */
+ {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
+ BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
+ SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
+ "EEPROM - slow"},
+ /* Expansion entry 0001 */
+ {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
+ "Entry 0001"},
+ /* Saifun SA25F010 (non-buffered flash) */
+ /* strap, cfg1, & write1 need updates */
+ {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
+ "Non-buffered flash (128kB)"},
+ /* Saifun SA25F020 (non-buffered flash) */
+ /* strap, cfg1, & write1 need updates */
+ {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
+ "Non-buffered flash (256kB)"},
+ /* Expansion entry 0100 */
+ {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
+ "Entry 0100"},
+ /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
+ {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
+ NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
+ ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
+ "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
+ /* Entry 0110: ST M45PE20 (non-buffered flash)*/
+ {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
+ NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
+ ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
+ "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
+ /* Saifun SA25F005 (non-buffered flash) */
+ /* strap, cfg1, & write1 need updates */
+ {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
+ "Non-buffered flash (64kB)"},
+ /* Fast EEPROM */
+ {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
+ BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
+ SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
+ "EEPROM - fast"},
+ /* Expansion entry 1001 */
+ {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
+ "Entry 1001"},
+ /* Expansion entry 1010 */
+ {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
+ "Entry 1010"},
+ /* ATMEL AT45DB011B (buffered flash) */
+ {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
+ BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
+ BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
+ "Buffered flash (128kB)"},
+ /* Expansion entry 1100 */
+ {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
+ "Entry 1100"},
+ /* Expansion entry 1101 */
+ {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
+ "Entry 1101"},
+ /* Ateml Expansion entry 1110 */
+ {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
+ BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
+ BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
+ "Entry 1110 (Atmel)"},
+ /* ATMEL AT45DB021B (buffered flash) */
+ {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
+ BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
+ BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
+ "Buffered flash (256kB)"},
+};
+
+static const struct flash_spec flash_5709 = {
+ .flags = BNX2_NV_BUFFERED,
+ .page_bits = BCM5709_FLASH_PAGE_BITS,
+ .page_size = BCM5709_FLASH_PAGE_SIZE,
+ .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
+ .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
+ .name = "5709 Buffered flash (256kB)",
+};
+
+MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
+
+static void bnx2_init_napi(struct bnx2 *bp);
+static void bnx2_del_napi(struct bnx2 *bp);
+
+static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
+{
+ u32 diff;
+
+ /* Tell compiler to fetch tx_prod and tx_cons from memory. */
+ barrier();
+
+ /* The ring uses 256 indices for 255 entries, one of them
+ * needs to be skipped.
+ */
+ diff = txr->tx_prod - txr->tx_cons;
+ if (unlikely(diff >= TX_DESC_CNT)) {
+ diff &= 0xffff;
+ if (diff == TX_DESC_CNT)
+ diff = MAX_TX_DESC_CNT;
+ }
+ return bp->tx_ring_size - diff;
+}
+
+static u32
+bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
+{
+ u32 val;
+
+ spin_lock_bh(&bp->indirect_lock);
+ REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
+ val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
+ spin_unlock_bh(&bp->indirect_lock);
+ return val;
+}
+
+static void
+bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
+{
+ spin_lock_bh(&bp->indirect_lock);
+ REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
+ REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
+ spin_unlock_bh(&bp->indirect_lock);
+}
+
+static void
+bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
+{
+ bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
+}
+
+static u32
+bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
+{
+ return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
+}
+
+static void
+bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
+{
+ offset += cid_addr;
+ spin_lock_bh(&bp->indirect_lock);
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ int i;
+
+ REG_WR(bp, BNX2_CTX_CTX_DATA, val);
+ REG_WR(bp, BNX2_CTX_CTX_CTRL,
+ offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
+ for (i = 0; i < 5; i++) {
+ val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
+ if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
+ break;
+ udelay(5);
+ }
+ } else {
+ REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
+ REG_WR(bp, BNX2_CTX_DATA, val);
+ }
+ spin_unlock_bh(&bp->indirect_lock);
+}
+
+#ifdef BCM_CNIC
+static int
+bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ struct drv_ctl_io *io = &info->data.io;
+
+ switch (info->cmd) {
+ case DRV_CTL_IO_WR_CMD:
+ bnx2_reg_wr_ind(bp, io->offset, io->data);
+ break;
+ case DRV_CTL_IO_RD_CMD:
+ io->data = bnx2_reg_rd_ind(bp, io->offset);
+ break;
+ case DRV_CTL_CTX_WR_CMD:
+ bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
+{
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+ int sb_id;
+
+ if (bp->flags & BNX2_FLAG_USING_MSIX) {
+ cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
+ bnapi->cnic_present = 0;
+ sb_id = bp->irq_nvecs;
+ cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
+ } else {
+ cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
+ bnapi->cnic_tag = bnapi->last_status_idx;
+ bnapi->cnic_present = 1;
+ sb_id = 0;
+ cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
+ }
+
+ cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
+ cp->irq_arr[0].status_blk = (void *)
+ ((unsigned long) bnapi->status_blk.msi +
+ (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
+ cp->irq_arr[0].status_blk_num = sb_id;
+ cp->num_irq = 1;
+}
+
+static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
+ void *data)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ if (ops == NULL)
+ return -EINVAL;
+
+ if (cp->drv_state & CNIC_DRV_STATE_REGD)
+ return -EBUSY;
+
+ if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
+ return -ENODEV;
+
+ bp->cnic_data = data;
+ rcu_assign_pointer(bp->cnic_ops, ops);
+
+ cp->num_irq = 0;
+ cp->drv_state = CNIC_DRV_STATE_REGD;
+
+ bnx2_setup_cnic_irq_info(bp);
+
+ return 0;
+}
+
+static int bnx2_unregister_cnic(struct net_device *dev)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ mutex_lock(&bp->cnic_lock);
+ cp->drv_state = 0;
+ bnapi->cnic_present = 0;
+ rcu_assign_pointer(bp->cnic_ops, NULL);
+ mutex_unlock(&bp->cnic_lock);
+ synchronize_rcu();
+ return 0;
+}
+
+struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ if (!cp->max_iscsi_conn)
+ return NULL;
+
+ cp->drv_owner = THIS_MODULE;
+ cp->chip_id = bp->chip_id;
+ cp->pdev = bp->pdev;
+ cp->io_base = bp->regview;
+ cp->drv_ctl = bnx2_drv_ctl;
+ cp->drv_register_cnic = bnx2_register_cnic;
+ cp->drv_unregister_cnic = bnx2_unregister_cnic;
+
+ return cp;
+}
+EXPORT_SYMBOL(bnx2_cnic_probe);
+
+static void
+bnx2_cnic_stop(struct bnx2 *bp)
+{
+ struct cnic_ops *c_ops;
+ struct cnic_ctl_info info;
+
+ mutex_lock(&bp->cnic_lock);
+ c_ops = rcu_dereference_protected(bp->cnic_ops,
+ lockdep_is_held(&bp->cnic_lock));
+ if (c_ops) {
+ info.cmd = CNIC_CTL_STOP_CMD;
+ c_ops->cnic_ctl(bp->cnic_data, &info);
+ }
+ mutex_unlock(&bp->cnic_lock);
+}
+
+static void
+bnx2_cnic_start(struct bnx2 *bp)
+{
+ struct cnic_ops *c_ops;
+ struct cnic_ctl_info info;
+
+ mutex_lock(&bp->cnic_lock);
+ c_ops = rcu_dereference_protected(bp->cnic_ops,
+ lockdep_is_held(&bp->cnic_lock));
+ if (c_ops) {
+ if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+
+ bnapi->cnic_tag = bnapi->last_status_idx;
+ }
+ info.cmd = CNIC_CTL_START_CMD;
+ c_ops->cnic_ctl(bp->cnic_data, &info);
+ }
+ mutex_unlock(&bp->cnic_lock);
+}
+
+#else
+
+static void
+bnx2_cnic_stop(struct bnx2 *bp)
+{
+}
+
+static void
+bnx2_cnic_start(struct bnx2 *bp)
+{
+}
+
+#endif
+
+static int
+bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
+{
+ u32 val1;
+ int i, ret;
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
+ val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+ val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
+
+ REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
+ REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+
+ udelay(40);
+ }
+
+ val1 = (bp->phy_addr << 21) | (reg << 16) |
+ BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
+ BNX2_EMAC_MDIO_COMM_START_BUSY;
+ REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
+ if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
+ udelay(5);
+
+ val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
+ val1 &= BNX2_EMAC_MDIO_COMM_DATA;
+
+ break;
+ }
+ }
+
+ if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
+ *val = 0x0;
+ ret = -EBUSY;
+ }
+ else {
+ *val = val1;
+ ret = 0;
+ }
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
+ val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+ val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
+
+ REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
+ REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+
+ udelay(40);
+ }
+
+ return ret;
+}
+
+static int
+bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
+{
+ u32 val1;
+ int i, ret;
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
+ val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+ val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
+
+ REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
+ REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+
+ udelay(40);
+ }
+
+ val1 = (bp->phy_addr << 21) | (reg << 16) | val |
+ BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
+ BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
+ REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
+ if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
+ udelay(5);
+ break;
+ }
+ }
+
+ if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
+ ret = -EBUSY;
+ else
+ ret = 0;
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
+ val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+ val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
+
+ REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
+ REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+
+ udelay(40);
+ }
+
+ return ret;
+}
+
+static void
+bnx2_disable_int(struct bnx2 *bp)
+{
+ int i;
+ struct bnx2_napi *bnapi;
+
+ for (i = 0; i < bp->irq_nvecs; i++) {
+ bnapi = &bp->bnx2_napi[i];
+ REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+ BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+ }
+ REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
+}
+
+static void
+bnx2_enable_int(struct bnx2 *bp)
+{
+ int i;
+ struct bnx2_napi *bnapi;
+
+ for (i = 0; i < bp->irq_nvecs; i++) {
+ bnapi = &bp->bnx2_napi[i];
+
+ REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
+ bnapi->last_status_idx);
+
+ REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ bnapi->last_status_idx);
+ }
+ REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
+}
+
+static void
+bnx2_disable_int_sync(struct bnx2 *bp)
+{
+ int i;
+
+ atomic_inc(&bp->intr_sem);
+ if (!netif_running(bp->dev))
+ return;
+
+ bnx2_disable_int(bp);
+ for (i = 0; i < bp->irq_nvecs; i++)
+ synchronize_irq(bp->irq_tbl[i].vector);
+}
+
+static void
+bnx2_napi_disable(struct bnx2 *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->irq_nvecs; i++)
+ napi_disable(&bp->bnx2_napi[i].napi);
+}
+
+static void
+bnx2_napi_enable(struct bnx2 *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->irq_nvecs; i++)
+ napi_enable(&bp->bnx2_napi[i].napi);
+}
+
+static void
+bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
+{
+ if (stop_cnic)
+ bnx2_cnic_stop(bp);
+ if (netif_running(bp->dev)) {
+ bnx2_napi_disable(bp);
+ netif_tx_disable(bp->dev);
+ }
+ bnx2_disable_int_sync(bp);
+ netif_carrier_off(bp->dev); /* prevent tx timeout */
+}
+
+static void
+bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
+{
+ if (atomic_dec_and_test(&bp->intr_sem)) {
+ if (netif_running(bp->dev)) {
+ netif_tx_wake_all_queues(bp->dev);
+ spin_lock_bh(&bp->phy_lock);
+ if (bp->link_up)
+ netif_carrier_on(bp->dev);
+ spin_unlock_bh(&bp->phy_lock);
+ bnx2_napi_enable(bp);
+ bnx2_enable_int(bp);
+ if (start_cnic)
+ bnx2_cnic_start(bp);
+ }
+ }
+}
+
+static void
+bnx2_free_tx_mem(struct bnx2 *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->num_tx_rings; i++) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+
+ if (txr->tx_desc_ring) {
+ dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
+ txr->tx_desc_ring,
+ txr->tx_desc_mapping);
+ txr->tx_desc_ring = NULL;
+ }
+ kfree(txr->tx_buf_ring);
+ txr->tx_buf_ring = NULL;
+ }
+}
+
+static void
+bnx2_free_rx_mem(struct bnx2 *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->num_rx_rings; i++) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+ int j;
+
+ for (j = 0; j < bp->rx_max_ring; j++) {
+ if (rxr->rx_desc_ring[j])
+ dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
+ rxr->rx_desc_ring[j],
+ rxr->rx_desc_mapping[j]);
+ rxr->rx_desc_ring[j] = NULL;
+ }
+ vfree(rxr->rx_buf_ring);
+ rxr->rx_buf_ring = NULL;
+
+ for (j = 0; j < bp->rx_max_pg_ring; j++) {
+ if (rxr->rx_pg_desc_ring[j])
+ dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
+ rxr->rx_pg_desc_ring[j],
+ rxr->rx_pg_desc_mapping[j]);
+ rxr->rx_pg_desc_ring[j] = NULL;
+ }
+ vfree(rxr->rx_pg_ring);
+ rxr->rx_pg_ring = NULL;
+ }
+}
+
+static int
+bnx2_alloc_tx_mem(struct bnx2 *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->num_tx_rings; i++) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+
+ txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
+ if (txr->tx_buf_ring == NULL)
+ return -ENOMEM;
+
+ txr->tx_desc_ring =
+ dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
+ &txr->tx_desc_mapping, GFP_KERNEL);
+ if (txr->tx_desc_ring == NULL)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int
+bnx2_alloc_rx_mem(struct bnx2 *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->num_rx_rings; i++) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+ int j;
+
+ rxr->rx_buf_ring =
+ vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
+ if (rxr->rx_buf_ring == NULL)
+ return -ENOMEM;
+
+ for (j = 0; j < bp->rx_max_ring; j++) {
+ rxr->rx_desc_ring[j] =
+ dma_alloc_coherent(&bp->pdev->dev,
+ RXBD_RING_SIZE,
+ &rxr->rx_desc_mapping[j],
+ GFP_KERNEL);
+ if (rxr->rx_desc_ring[j] == NULL)
+ return -ENOMEM;
+
+ }
+
+ if (bp->rx_pg_ring_size) {
+ rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
+ bp->rx_max_pg_ring);
+ if (rxr->rx_pg_ring == NULL)
+ return -ENOMEM;
+
+ }
+
+ for (j = 0; j < bp->rx_max_pg_ring; j++) {
+ rxr->rx_pg_desc_ring[j] =
+ dma_alloc_coherent(&bp->pdev->dev,
+ RXBD_RING_SIZE,
+ &rxr->rx_pg_desc_mapping[j],
+ GFP_KERNEL);
+ if (rxr->rx_pg_desc_ring[j] == NULL)
+ return -ENOMEM;
+
+ }
+ }
+ return 0;
+}
+
+static void
+bnx2_free_mem(struct bnx2 *bp)
+{
+ int i;
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+
+ bnx2_free_tx_mem(bp);
+ bnx2_free_rx_mem(bp);
+
+ for (i = 0; i < bp->ctx_pages; i++) {
+ if (bp->ctx_blk[i]) {
+ dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
+ bp->ctx_blk[i],
+ bp->ctx_blk_mapping[i]);
+ bp->ctx_blk[i] = NULL;
+ }
+ }
+ if (bnapi->status_blk.msi) {
+ dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
+ bnapi->status_blk.msi,
+ bp->status_blk_mapping);
+ bnapi->status_blk.msi = NULL;
+ bp->stats_blk = NULL;
+ }
+}
+
+static int
+bnx2_alloc_mem(struct bnx2 *bp)
+{
+ int i, status_blk_size, err;
+ struct bnx2_napi *bnapi;
+ void *status_blk;
+
+ /* Combine status and statistics blocks into one allocation. */
+ status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
+ if (bp->flags & BNX2_FLAG_MSIX_CAP)
+ status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
+ BNX2_SBLK_MSIX_ALIGN_SIZE);
+ bp->status_stats_size = status_blk_size +
+ sizeof(struct statistics_block);
+
+ status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
+ &bp->status_blk_mapping, GFP_KERNEL);
+ if (status_blk == NULL)
+ goto alloc_mem_err;
+
+ memset(status_blk, 0, bp->status_stats_size);
+
+ bnapi = &bp->bnx2_napi[0];
+ bnapi->status_blk.msi = status_blk;
+ bnapi->hw_tx_cons_ptr =
+ &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
+ bnapi->hw_rx_cons_ptr =
+ &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
+ if (bp->flags & BNX2_FLAG_MSIX_CAP) {
+ for (i = 1; i < bp->irq_nvecs; i++) {
+ struct status_block_msix *sblk;
+
+ bnapi = &bp->bnx2_napi[i];
+
+ sblk = (void *) (status_blk +
+ BNX2_SBLK_MSIX_ALIGN_SIZE * i);
+ bnapi->status_blk.msix = sblk;
+ bnapi->hw_tx_cons_ptr =
+ &sblk->status_tx_quick_consumer_index;
+ bnapi->hw_rx_cons_ptr =
+ &sblk->status_rx_quick_consumer_index;
+ bnapi->int_num = i << 24;
+ }
+ }
+
+ bp->stats_blk = status_blk + status_blk_size;
+
+ bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
+ if (bp->ctx_pages == 0)
+ bp->ctx_pages = 1;
+ for (i = 0; i < bp->ctx_pages; i++) {
+ bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
+ BCM_PAGE_SIZE,
+ &bp->ctx_blk_mapping[i],
+ GFP_KERNEL);
+ if (bp->ctx_blk[i] == NULL)
+ goto alloc_mem_err;
+ }
+ }
+
+ err = bnx2_alloc_rx_mem(bp);
+ if (err)
+ goto alloc_mem_err;
+
+ err = bnx2_alloc_tx_mem(bp);
+ if (err)
+ goto alloc_mem_err;
+
+ return 0;
+
+alloc_mem_err:
+ bnx2_free_mem(bp);
+ return -ENOMEM;
+}
+
+static void
+bnx2_report_fw_link(struct bnx2 *bp)
+{
+ u32 fw_link_status = 0;
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+ return;
+
+ if (bp->link_up) {
+ u32 bmsr;
+
+ switch (bp->line_speed) {
+ case SPEED_10:
+ if (bp->duplex == DUPLEX_HALF)
+ fw_link_status = BNX2_LINK_STATUS_10HALF;
+ else
+ fw_link_status = BNX2_LINK_STATUS_10FULL;
+ break;
+ case SPEED_100:
+ if (bp->duplex == DUPLEX_HALF)
+ fw_link_status = BNX2_LINK_STATUS_100HALF;
+ else
+ fw_link_status = BNX2_LINK_STATUS_100FULL;
+ break;
+ case SPEED_1000:
+ if (bp->duplex == DUPLEX_HALF)
+ fw_link_status = BNX2_LINK_STATUS_1000HALF;
+ else
+ fw_link_status = BNX2_LINK_STATUS_1000FULL;
+ break;
+ case SPEED_2500:
+ if (bp->duplex == DUPLEX_HALF)
+ fw_link_status = BNX2_LINK_STATUS_2500HALF;
+ else
+ fw_link_status = BNX2_LINK_STATUS_2500FULL;
+ break;
+ }
+
+ fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
+
+ if (bp->autoneg) {
+ fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
+
+ bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
+ bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
+
+ if (!(bmsr & BMSR_ANEGCOMPLETE) ||
+ bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
+ fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
+ else
+ fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
+ }
+ }
+ else
+ fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
+
+ bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
+}
+
+static char *
+bnx2_xceiver_str(struct bnx2 *bp)
+{
+ return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
+ ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
+ "Copper");
+}
+
+static void
+bnx2_report_link(struct bnx2 *bp)
+{
+ if (bp->link_up) {
+ netif_carrier_on(bp->dev);
+ netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
+ bnx2_xceiver_str(bp),
+ bp->line_speed,
+ bp->duplex == DUPLEX_FULL ? "full" : "half");
+
+ if (bp->flow_ctrl) {
+ if (bp->flow_ctrl & FLOW_CTRL_RX) {
+ pr_cont(", receive ");
+ if (bp->flow_ctrl & FLOW_CTRL_TX)
+ pr_cont("& transmit ");
+ }
+ else {
+ pr_cont(", transmit ");
+ }
+ pr_cont("flow control ON");
+ }
+ pr_cont("\n");
+ } else {
+ netif_carrier_off(bp->dev);
+ netdev_err(bp->dev, "NIC %s Link is Down\n",
+ bnx2_xceiver_str(bp));
+ }
+
+ bnx2_report_fw_link(bp);
+}
+
+static void
+bnx2_resolve_flow_ctrl(struct bnx2 *bp)
+{
+ u32 local_adv, remote_adv;
+
+ bp->flow_ctrl = 0;
+ if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
+ (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
+
+ if (bp->duplex == DUPLEX_FULL) {
+ bp->flow_ctrl = bp->req_flow_ctrl;
+ }
+ return;
+ }
+
+ if (bp->duplex != DUPLEX_FULL) {
+ return;
+ }
+
+ if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
+ (CHIP_NUM(bp) == CHIP_NUM_5708)) {
+ u32 val;
+
+ bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
+ if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
+ bp->flow_ctrl |= FLOW_CTRL_TX;
+ if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
+ bp->flow_ctrl |= FLOW_CTRL_RX;
+ return;
+ }
+
+ bnx2_read_phy(bp, bp->mii_adv, &local_adv);
+ bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+ u32 new_local_adv = 0;
+ u32 new_remote_adv = 0;
+
+ if (local_adv & ADVERTISE_1000XPAUSE)
+ new_local_adv |= ADVERTISE_PAUSE_CAP;
+ if (local_adv & ADVERTISE_1000XPSE_ASYM)
+ new_local_adv |= ADVERTISE_PAUSE_ASYM;
+ if (remote_adv & ADVERTISE_1000XPAUSE)
+ new_remote_adv |= ADVERTISE_PAUSE_CAP;
+ if (remote_adv & ADVERTISE_1000XPSE_ASYM)
+ new_remote_adv |= ADVERTISE_PAUSE_ASYM;
+
+ local_adv = new_local_adv;
+ remote_adv = new_remote_adv;
+ }
+
+ /* See Table 28B-3 of 802.3ab-1999 spec. */
+ if (local_adv & ADVERTISE_PAUSE_CAP) {
+ if(local_adv & ADVERTISE_PAUSE_ASYM) {
+ if (remote_adv & ADVERTISE_PAUSE_CAP) {
+ bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
+ }
+ else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
+ bp->flow_ctrl = FLOW_CTRL_RX;
+ }
+ }
+ else {
+ if (remote_adv & ADVERTISE_PAUSE_CAP) {
+ bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
+ }
+ }
+ }
+ else if (local_adv & ADVERTISE_PAUSE_ASYM) {
+ if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
+ (remote_adv & ADVERTISE_PAUSE_ASYM)) {
+
+ bp->flow_ctrl = FLOW_CTRL_TX;
+ }
+ }
+}
+
+static int
+bnx2_5709s_linkup(struct bnx2 *bp)
+{
+ u32 val, speed;
+
+ bp->link_up = 1;
+
+ bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
+ bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
+ bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+
+ if ((bp->autoneg & AUTONEG_SPEED) == 0) {
+ bp->line_speed = bp->req_line_speed;
+ bp->duplex = bp->req_duplex;
+ return 0;
+ }
+ speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
+ switch (speed) {
+ case MII_BNX2_GP_TOP_AN_SPEED_10:
+ bp->line_speed = SPEED_10;
+ break;
+ case MII_BNX2_GP_TOP_AN_SPEED_100:
+ bp->line_speed = SPEED_100;
+ break;
+ case MII_BNX2_GP_TOP_AN_SPEED_1G:
+ case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
+ bp->line_speed = SPEED_1000;
+ break;
+ case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
+ bp->line_speed = SPEED_2500;
+ break;
+ }
+ if (val & MII_BNX2_GP_TOP_AN_FD)
+ bp->duplex = DUPLEX_FULL;
+ else
+ bp->duplex = DUPLEX_HALF;
+ return 0;
+}
+
+static int
+bnx2_5708s_linkup(struct bnx2 *bp)
+{
+ u32 val;
+
+ bp->link_up = 1;
+ bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
+ switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
+ case BCM5708S_1000X_STAT1_SPEED_10:
+ bp->line_speed = SPEED_10;
+ break;
+ case BCM5708S_1000X_STAT1_SPEED_100:
+ bp->line_speed = SPEED_100;
+ break;
+ case BCM5708S_1000X_STAT1_SPEED_1G:
+ bp->line_speed = SPEED_1000;
+ break;
+ case BCM5708S_1000X_STAT1_SPEED_2G5:
+ bp->line_speed = SPEED_2500;
+ break;
+ }
+ if (val & BCM5708S_1000X_STAT1_FD)
+ bp->duplex = DUPLEX_FULL;
+ else
+ bp->duplex = DUPLEX_HALF;
+
+ return 0;
+}
+
+static int
+bnx2_5706s_linkup(struct bnx2 *bp)
+{
+ u32 bmcr, local_adv, remote_adv, common;
+
+ bp->link_up = 1;
+ bp->line_speed = SPEED_1000;
+
+ bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+ if (bmcr & BMCR_FULLDPLX) {
+ bp->duplex = DUPLEX_FULL;
+ }
+ else {
+ bp->duplex = DUPLEX_HALF;
+ }
+
+ if (!(bmcr & BMCR_ANENABLE)) {
+ return 0;
+ }
+
+ bnx2_read_phy(bp, bp->mii_adv, &local_adv);
+ bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
+
+ common = local_adv & remote_adv;
+ if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
+
+ if (common & ADVERTISE_1000XFULL) {
+ bp->duplex = DUPLEX_FULL;
+ }
+ else {
+ bp->duplex = DUPLEX_HALF;
+ }
+ }
+
+ return 0;
+}
+
+static int
+bnx2_copper_linkup(struct bnx2 *bp)
+{
+ u32 bmcr;
+
+ bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+ if (bmcr & BMCR_ANENABLE) {
+ u32 local_adv, remote_adv, common;
+
+ bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
+ bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
+
+ common = local_adv & (remote_adv >> 2);
+ if (common & ADVERTISE_1000FULL) {
+ bp->line_speed = SPEED_1000;
+ bp->duplex = DUPLEX_FULL;
+ }
+ else if (common & ADVERTISE_1000HALF) {
+ bp->line_speed = SPEED_1000;
+ bp->duplex = DUPLEX_HALF;
+ }
+ else {
+ bnx2_read_phy(bp, bp->mii_adv, &local_adv);
+ bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
+
+ common = local_adv & remote_adv;
+ if (common & ADVERTISE_100FULL) {
+ bp->line_speed = SPEED_100;
+ bp->duplex = DUPLEX_FULL;
+ }
+ else if (common & ADVERTISE_100HALF) {
+ bp->line_speed = SPEED_100;
+ bp->duplex = DUPLEX_HALF;
+ }
+ else if (common & ADVERTISE_10FULL) {
+ bp->line_speed = SPEED_10;
+ bp->duplex = DUPLEX_FULL;
+ }
+ else if (common & ADVERTISE_10HALF) {
+ bp->line_speed = SPEED_10;
+ bp->duplex = DUPLEX_HALF;
+ }
+ else {
+ bp->line_speed = 0;
+ bp->link_up = 0;
+ }
+ }
+ }
+ else {
+ if (bmcr & BMCR_SPEED100) {
+ bp->line_speed = SPEED_100;
+ }
+ else {
+ bp->line_speed = SPEED_10;
+ }
+ if (bmcr & BMCR_FULLDPLX) {
+ bp->duplex = DUPLEX_FULL;
+ }
+ else {
+ bp->duplex = DUPLEX_HALF;
+ }
+ }
+
+ return 0;
+}
+
+static void
+bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
+{
+ u32 val, rx_cid_addr = GET_CID_ADDR(cid);
+
+ val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
+ val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
+ val |= 0x02 << 8;
+
+ if (bp->flow_ctrl & FLOW_CTRL_TX)
+ val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
+
+ bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
+}
+
+static void
+bnx2_init_all_rx_contexts(struct bnx2 *bp)
+{
+ int i;
+ u32 cid;
+
+ for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
+ if (i == 1)
+ cid = RX_RSS_CID;
+ bnx2_init_rx_context(bp, cid);
+ }
+}
+
+static void
+bnx2_set_mac_link(struct bnx2 *bp)
+{
+ u32 val;
+
+ REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
+ if (bp->link_up && (bp->line_speed == SPEED_1000) &&
+ (bp->duplex == DUPLEX_HALF)) {
+ REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
+ }
+
+ /* Configure the EMAC mode register. */
+ val = REG_RD(bp, BNX2_EMAC_MODE);
+
+ val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
+ BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
+ BNX2_EMAC_MODE_25G_MODE);
+
+ if (bp->link_up) {
+ switch (bp->line_speed) {
+ case SPEED_10:
+ if (CHIP_NUM(bp) != CHIP_NUM_5706) {
+ val |= BNX2_EMAC_MODE_PORT_MII_10M;
+ break;
+ }
+ /* fall through */
+ case SPEED_100:
+ val |= BNX2_EMAC_MODE_PORT_MII;
+ break;
+ case SPEED_2500:
+ val |= BNX2_EMAC_MODE_25G_MODE;
+ /* fall through */
+ case SPEED_1000:
+ val |= BNX2_EMAC_MODE_PORT_GMII;
+ break;
+ }
+ }
+ else {
+ val |= BNX2_EMAC_MODE_PORT_GMII;
+ }
+
+ /* Set the MAC to operate in the appropriate duplex mode. */
+ if (bp->duplex == DUPLEX_HALF)
+ val |= BNX2_EMAC_MODE_HALF_DUPLEX;
+ REG_WR(bp, BNX2_EMAC_MODE, val);
+
+ /* Enable/disable rx PAUSE. */
+ bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
+
+ if (bp->flow_ctrl & FLOW_CTRL_RX)
+ bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
+ REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
+
+ /* Enable/disable tx PAUSE. */
+ val = REG_RD(bp, BNX2_EMAC_TX_MODE);
+ val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
+
+ if (bp->flow_ctrl & FLOW_CTRL_TX)
+ val |= BNX2_EMAC_TX_MODE_FLOW_EN;
+ REG_WR(bp, BNX2_EMAC_TX_MODE, val);
+
+ /* Acknowledge the interrupt. */
+ REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
+
+ bnx2_init_all_rx_contexts(bp);
+}
+
+static void
+bnx2_enable_bmsr1(struct bnx2 *bp)
+{
+ if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
+ (CHIP_NUM(bp) == CHIP_NUM_5709))
+ bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+ MII_BNX2_BLK_ADDR_GP_STATUS);
+}
+
+static void
+bnx2_disable_bmsr1(struct bnx2 *bp)
+{
+ if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
+ (CHIP_NUM(bp) == CHIP_NUM_5709))
+ bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+ MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+}
+
+static int
+bnx2_test_and_enable_2g5(struct bnx2 *bp)
+{
+ u32 up1;
+ int ret = 1;
+
+ if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
+ return 0;
+
+ if (bp->autoneg & AUTONEG_SPEED)
+ bp->advertising |= ADVERTISED_2500baseX_Full;
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
+
+ bnx2_read_phy(bp, bp->mii_up1, &up1);
+ if (!(up1 & BCM5708S_UP1_2G5)) {
+ up1 |= BCM5708S_UP1_2G5;
+ bnx2_write_phy(bp, bp->mii_up1, up1);
+ ret = 0;
+ }
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+ MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+
+ return ret;
+}
+
+static int
+bnx2_test_and_disable_2g5(struct bnx2 *bp)
+{
+ u32 up1;
+ int ret = 0;
+
+ if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
+ return 0;
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
+
+ bnx2_read_phy(bp, bp->mii_up1, &up1);
+ if (up1 & BCM5708S_UP1_2G5) {
+ up1 &= ~BCM5708S_UP1_2G5;
+ bnx2_write_phy(bp, bp->mii_up1, up1);
+ ret = 1;
+ }
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+ MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+
+ return ret;
+}
+
+static void
+bnx2_enable_forced_2g5(struct bnx2 *bp)
+{
+ u32 uninitialized_var(bmcr);
+ int err;
+
+ if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
+ return;
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ u32 val;
+
+ bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+ MII_BNX2_BLK_ADDR_SERDES_DIG);
+ if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
+ val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
+ val |= MII_BNX2_SD_MISC1_FORCE |
+ MII_BNX2_SD_MISC1_FORCE_2_5G;
+ bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
+ }
+
+ bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+ MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+ err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+
+ } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
+ err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+ if (!err)
+ bmcr |= BCM5708S_BMCR_FORCE_2500;
+ } else {
+ return;
+ }
+
+ if (err)
+ return;
+
+ if (bp->autoneg & AUTONEG_SPEED) {
+ bmcr &= ~BMCR_ANENABLE;
+ if (bp->req_duplex == DUPLEX_FULL)
+ bmcr |= BMCR_FULLDPLX;
+ }
+ bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
+}
+
+static void
+bnx2_disable_forced_2g5(struct bnx2 *bp)
+{
+ u32 uninitialized_var(bmcr);
+ int err;
+
+ if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
+ return;
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ u32 val;
+
+ bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+ MII_BNX2_BLK_ADDR_SERDES_DIG);
+ if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
+ val &= ~MII_BNX2_SD_MISC1_FORCE;
+ bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
+ }
+
+ bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+ MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+ err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+
+ } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
+ err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+ if (!err)
+ bmcr &= ~BCM5708S_BMCR_FORCE_2500;
+ } else {
+ return;
+ }
+
+ if (err)
+ return;
+
+ if (bp->autoneg & AUTONEG_SPEED)
+ bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
+ bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
+}
+
+static void
+bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
+{
+ u32 val;
+
+ bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
+ bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
+ if (start)
+ bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
+ else
+ bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
+}
+
+static int
+bnx2_set_link(struct bnx2 *bp)
+{
+ u32 bmsr;
+ u8 link_up;
+
+ if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
+ bp->link_up = 1;
+ return 0;
+ }
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+ return 0;
+
+ link_up = bp->link_up;
+
+ bnx2_enable_bmsr1(bp);
+ bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
+ bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
+ bnx2_disable_bmsr1(bp);
+
+ if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
+ (CHIP_NUM(bp) == CHIP_NUM_5706)) {
+ u32 val, an_dbg;
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
+ bnx2_5706s_force_link_dn(bp, 0);
+ bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
+ }
+ val = REG_RD(bp, BNX2_EMAC_STATUS);
+
+ bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
+ bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
+ bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
+
+ if ((val & BNX2_EMAC_STATUS_LINK) &&
+ !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
+ bmsr |= BMSR_LSTATUS;
+ else
+ bmsr &= ~BMSR_LSTATUS;
+ }
+
+ if (bmsr & BMSR_LSTATUS) {
+ bp->link_up = 1;
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+ if (CHIP_NUM(bp) == CHIP_NUM_5706)
+ bnx2_5706s_linkup(bp);
+ else if (CHIP_NUM(bp) == CHIP_NUM_5708)
+ bnx2_5708s_linkup(bp);
+ else if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ bnx2_5709s_linkup(bp);
+ }
+ else {
+ bnx2_copper_linkup(bp);
+ }
+ bnx2_resolve_flow_ctrl(bp);
+ }
+ else {
+ if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
+ (bp->autoneg & AUTONEG_SPEED))
+ bnx2_disable_forced_2g5(bp);
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
+ u32 bmcr;
+
+ bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+ bmcr |= BMCR_ANENABLE;
+ bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
+
+ bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
+ }
+ bp->link_up = 0;
+ }
+
+ if (bp->link_up != link_up) {
+ bnx2_report_link(bp);
+ }
+
+ bnx2_set_mac_link(bp);
+
+ return 0;
+}
+
+static int
+bnx2_reset_phy(struct bnx2 *bp)
+{
+ int i;
+ u32 reg;
+
+ bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
+
+#define PHY_RESET_MAX_WAIT 100
+ for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
+ udelay(10);
+
+ bnx2_read_phy(bp, bp->mii_bmcr, &reg);
+ if (!(reg & BMCR_RESET)) {
+ udelay(20);
+ break;
+ }
+ }
+ if (i == PHY_RESET_MAX_WAIT) {
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static u32
+bnx2_phy_get_pause_adv(struct bnx2 *bp)
+{
+ u32 adv = 0;
+
+ if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
+ (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+ adv = ADVERTISE_1000XPAUSE;
+ }
+ else {
+ adv = ADVERTISE_PAUSE_CAP;
+ }
+ }
+ else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
+ if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+ adv = ADVERTISE_1000XPSE_ASYM;
+ }
+ else {
+ adv = ADVERTISE_PAUSE_ASYM;
+ }
+ }
+ else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
+ if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+ adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
+ }
+ else {
+ adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ }
+ }
+ return adv;
+}
+
+static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
+
+static int
+bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
+{
+ u32 speed_arg = 0, pause_adv;
+
+ pause_adv = bnx2_phy_get_pause_adv(bp);
+
+ if (bp->autoneg & AUTONEG_SPEED) {
+ speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
+ if (bp->advertising & ADVERTISED_10baseT_Half)
+ speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
+ if (bp->advertising & ADVERTISED_10baseT_Full)
+ speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
+ if (bp->advertising & ADVERTISED_100baseT_Half)
+ speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
+ if (bp->advertising & ADVERTISED_100baseT_Full)
+ speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
+ if (bp->advertising & ADVERTISED_1000baseT_Full)
+ speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
+ if (bp->advertising & ADVERTISED_2500baseX_Full)
+ speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
+ } else {
+ if (bp->req_line_speed == SPEED_2500)
+ speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
+ else if (bp->req_line_speed == SPEED_1000)
+ speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
+ else if (bp->req_line_speed == SPEED_100) {
+ if (bp->req_duplex == DUPLEX_FULL)
+ speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
+ else
+ speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
+ } else if (bp->req_line_speed == SPEED_10) {
+ if (bp->req_duplex == DUPLEX_FULL)
+ speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
+ else
+ speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
+ }
+ }
+
+ if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
+ speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
+ if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
+ speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
+
+ if (port == PORT_TP)
+ speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
+ BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
+
+ bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
+
+ spin_unlock_bh(&bp->phy_lock);
+ bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
+ spin_lock_bh(&bp->phy_lock);
+
+ return 0;
+}
+
+static int
+bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
+{
+ u32 adv, bmcr;
+ u32 new_adv = 0;
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+ return bnx2_setup_remote_phy(bp, port);
+
+ if (!(bp->autoneg & AUTONEG_SPEED)) {
+ u32 new_bmcr;
+ int force_link_down = 0;
+
+ if (bp->req_line_speed == SPEED_2500) {
+ if (!bnx2_test_and_enable_2g5(bp))
+ force_link_down = 1;
+ } else if (bp->req_line_speed == SPEED_1000) {
+ if (bnx2_test_and_disable_2g5(bp))
+ force_link_down = 1;
+ }
+ bnx2_read_phy(bp, bp->mii_adv, &adv);
+ adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
+
+ bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+ new_bmcr = bmcr & ~BMCR_ANENABLE;
+ new_bmcr |= BMCR_SPEED1000;
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ if (bp->req_line_speed == SPEED_2500)
+ bnx2_enable_forced_2g5(bp);
+ else if (bp->req_line_speed == SPEED_1000) {
+ bnx2_disable_forced_2g5(bp);
+ new_bmcr &= ~0x2000;
+ }
+
+ } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
+ if (bp->req_line_speed == SPEED_2500)
+ new_bmcr |= BCM5708S_BMCR_FORCE_2500;
+ else
+ new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
+ }
+
+ if (bp->req_duplex == DUPLEX_FULL) {
+ adv |= ADVERTISE_1000XFULL;
+ new_bmcr |= BMCR_FULLDPLX;
+ }
+ else {
+ adv |= ADVERTISE_1000XHALF;
+ new_bmcr &= ~BMCR_FULLDPLX;
+ }
+ if ((new_bmcr != bmcr) || (force_link_down)) {
+ /* Force a link down visible on the other side */
+ if (bp->link_up) {
+ bnx2_write_phy(bp, bp->mii_adv, adv &
+ ~(ADVERTISE_1000XFULL |
+ ADVERTISE_1000XHALF));
+ bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
+ BMCR_ANRESTART | BMCR_ANENABLE);
+
+ bp->link_up = 0;
+ netif_carrier_off(bp->dev);
+ bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
+ bnx2_report_link(bp);
+ }
+ bnx2_write_phy(bp, bp->mii_adv, adv);
+ bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
+ } else {
+ bnx2_resolve_flow_ctrl(bp);
+ bnx2_set_mac_link(bp);
+ }
+ return 0;
+ }
+
+ bnx2_test_and_enable_2g5(bp);
+
+ if (bp->advertising & ADVERTISED_1000baseT_Full)
+ new_adv |= ADVERTISE_1000XFULL;
+
+ new_adv |= bnx2_phy_get_pause_adv(bp);
+
+ bnx2_read_phy(bp, bp->mii_adv, &adv);
+ bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+
+ bp->serdes_an_pending = 0;
+ if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
+ /* Force a link down visible on the other side */
+ if (bp->link_up) {
+ bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
+ spin_unlock_bh(&bp->phy_lock);
+ msleep(20);
+ spin_lock_bh(&bp->phy_lock);
+ }
+
+ bnx2_write_phy(bp, bp->mii_adv, new_adv);
+ bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
+ BMCR_ANENABLE);
+ /* Speed up link-up time when the link partner
+ * does not autonegotiate which is very common
+ * in blade servers. Some blade servers use
+ * IPMI for kerboard input and it's important
+ * to minimize link disruptions. Autoneg. involves
+ * exchanging base pages plus 3 next pages and
+ * normally completes in about 120 msec.
+ */
+ bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
+ bp->serdes_an_pending = 1;
+ mod_timer(&bp->timer, jiffies + bp->current_interval);
+ } else {
+ bnx2_resolve_flow_ctrl(bp);
+ bnx2_set_mac_link(bp);
+ }
+
+ return 0;
+}
+
+#define ETHTOOL_ALL_FIBRE_SPEED \
+ (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
+ (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
+ (ADVERTISED_1000baseT_Full)
+
+#define ETHTOOL_ALL_COPPER_SPEED \
+ (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
+ ADVERTISED_1000baseT_Full)
+
+#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
+ ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
+
+#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
+
+static void
+bnx2_set_default_remote_link(struct bnx2 *bp)
+{
+ u32 link;
+
+ if (bp->phy_port == PORT_TP)
+ link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
+ else
+ link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
+
+ if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
+ bp->req_line_speed = 0;
+ bp->autoneg |= AUTONEG_SPEED;
+ bp->advertising = ADVERTISED_Autoneg;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
+ bp->advertising |= ADVERTISED_10baseT_Half;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
+ bp->advertising |= ADVERTISED_10baseT_Full;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
+ bp->advertising |= ADVERTISED_100baseT_Half;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
+ bp->advertising |= ADVERTISED_100baseT_Full;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
+ bp->advertising |= ADVERTISED_1000baseT_Full;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
+ bp->advertising |= ADVERTISED_2500baseX_Full;
+ } else {
+ bp->autoneg = 0;
+ bp->advertising = 0;
+ bp->req_duplex = DUPLEX_FULL;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
+ bp->req_line_speed = SPEED_10;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
+ bp->req_duplex = DUPLEX_HALF;
+ }
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
+ bp->req_line_speed = SPEED_100;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
+ bp->req_duplex = DUPLEX_HALF;
+ }
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
+ bp->req_line_speed = SPEED_1000;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
+ bp->req_line_speed = SPEED_2500;
+ }
+}
+
+static void
+bnx2_set_default_link(struct bnx2 *bp)
+{
+ if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
+ bnx2_set_default_remote_link(bp);
+ return;
+ }
+
+ bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
+ bp->req_line_speed = 0;
+ if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+ u32 reg;
+
+ bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
+
+ reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
+ reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
+ if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
+ bp->autoneg = 0;
+ bp->req_line_speed = bp->line_speed = SPEED_1000;
+ bp->req_duplex = DUPLEX_FULL;
+ }
+ } else
+ bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
+}
+
+static void
+bnx2_send_heart_beat(struct bnx2 *bp)
+{
+ u32 msg;
+ u32 addr;
+
+ spin_lock(&bp->indirect_lock);
+ msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
+ addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
+ REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
+ REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
+ spin_unlock(&bp->indirect_lock);
+}
+
+static void
+bnx2_remote_phy_event(struct bnx2 *bp)
+{
+ u32 msg;
+ u8 link_up = bp->link_up;
+ u8 old_port;
+
+ msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
+
+ if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
+ bnx2_send_heart_beat(bp);
+
+ msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
+
+ if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
+ bp->link_up = 0;
+ else {
+ u32 speed;
+
+ bp->link_up = 1;
+ speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
+ bp->duplex = DUPLEX_FULL;
+ switch (speed) {
+ case BNX2_LINK_STATUS_10HALF:
+ bp->duplex = DUPLEX_HALF;
+ case BNX2_LINK_STATUS_10FULL:
+ bp->line_speed = SPEED_10;
+ break;
+ case BNX2_LINK_STATUS_100HALF:
+ bp->duplex = DUPLEX_HALF;
+ case BNX2_LINK_STATUS_100BASE_T4:
+ case BNX2_LINK_STATUS_100FULL:
+ bp->line_speed = SPEED_100;
+ break;
+ case BNX2_LINK_STATUS_1000HALF:
+ bp->duplex = DUPLEX_HALF;
+ case BNX2_LINK_STATUS_1000FULL:
+ bp->line_speed = SPEED_1000;
+ break;
+ case BNX2_LINK_STATUS_2500HALF:
+ bp->duplex = DUPLEX_HALF;
+ case BNX2_LINK_STATUS_2500FULL:
+ bp->line_speed = SPEED_2500;
+ break;
+ default:
+ bp->line_speed = 0;
+ break;
+ }
+
+ bp->flow_ctrl = 0;
+ if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
+ (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
+ if (bp->duplex == DUPLEX_FULL)
+ bp->flow_ctrl = bp->req_flow_ctrl;
+ } else {
+ if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
+ bp->flow_ctrl |= FLOW_CTRL_TX;
+ if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
+ bp->flow_ctrl |= FLOW_CTRL_RX;
+ }
+
+ old_port = bp->phy_port;
+ if (msg & BNX2_LINK_STATUS_SERDES_LINK)
+ bp->phy_port = PORT_FIBRE;
+ else
+ bp->phy_port = PORT_TP;
+
+ if (old_port != bp->phy_port)
+ bnx2_set_default_link(bp);
+
+ }
+ if (bp->link_up != link_up)
+ bnx2_report_link(bp);
+
+ bnx2_set_mac_link(bp);
+}
+
+static int
+bnx2_set_remote_link(struct bnx2 *bp)
+{
+ u32 evt_code;
+
+ evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
+ switch (evt_code) {
+ case BNX2_FW_EVT_CODE_LINK_EVENT:
+ bnx2_remote_phy_event(bp);
+ break;
+ case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
+ default:
+ bnx2_send_heart_beat(bp);
+ break;
+ }
+ return 0;
+}
+
+static int
+bnx2_setup_copper_phy(struct bnx2 *bp)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
+{
+ u32 bmcr;
+ u32 new_bmcr;
+
+ bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+
+ if (bp->autoneg & AUTONEG_SPEED) {
+ u32 adv_reg, adv1000_reg;
+ u32 new_adv_reg = 0;
+ u32 new_adv1000_reg = 0;
+
+ bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
+ adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
+ ADVERTISE_PAUSE_ASYM);
+
+ bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
+ adv1000_reg &= PHY_ALL_1000_SPEED;
+
+ if (bp->advertising & ADVERTISED_10baseT_Half)
+ new_adv_reg |= ADVERTISE_10HALF;
+ if (bp->advertising & ADVERTISED_10baseT_Full)
+ new_adv_reg |= ADVERTISE_10FULL;
+ if (bp->advertising & ADVERTISED_100baseT_Half)
+ new_adv_reg |= ADVERTISE_100HALF;
+ if (bp->advertising & ADVERTISED_100baseT_Full)
+ new_adv_reg |= ADVERTISE_100FULL;
+ if (bp->advertising & ADVERTISED_1000baseT_Full)
+ new_adv1000_reg |= ADVERTISE_1000FULL;
+
+ new_adv_reg |= ADVERTISE_CSMA;
+
+ new_adv_reg |= bnx2_phy_get_pause_adv(bp);
+
+ if ((adv1000_reg != new_adv1000_reg) ||
+ (adv_reg != new_adv_reg) ||
+ ((bmcr & BMCR_ANENABLE) == 0)) {
+
+ bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
+ bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
+ bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
+ BMCR_ANENABLE);
+ }
+ else if (bp->link_up) {
+ /* Flow ctrl may have changed from auto to forced */
+ /* or vice-versa. */
+
+ bnx2_resolve_flow_ctrl(bp);
+ bnx2_set_mac_link(bp);
+ }
+ return 0;
+ }
+
+ new_bmcr = 0;
+ if (bp->req_line_speed == SPEED_100) {
+ new_bmcr |= BMCR_SPEED100;
+ }
+ if (bp->req_duplex == DUPLEX_FULL) {
+ new_bmcr |= BMCR_FULLDPLX;
+ }
+ if (new_bmcr != bmcr) {
+ u32 bmsr;
+
+ bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
+ bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
+
+ if (bmsr & BMSR_LSTATUS) {
+ /* Force link down */
+ bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
+ spin_unlock_bh(&bp->phy_lock);
+ msleep(50);
+ spin_lock_bh(&bp->phy_lock);
+
+ bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
+ bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
+ }
+
+ bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
+
+ /* Normally, the new speed is setup after the link has
+ * gone down and up again. In some cases, link will not go
+ * down so we need to set up the new speed here.
+ */
+ if (bmsr & BMSR_LSTATUS) {
+ bp->line_speed = bp->req_line_speed;
+ bp->duplex = bp->req_duplex;
+ bnx2_resolve_flow_ctrl(bp);
+ bnx2_set_mac_link(bp);
+ }
+ } else {
+ bnx2_resolve_flow_ctrl(bp);
+ bnx2_set_mac_link(bp);
+ }
+ return 0;
+}
+
+static int
+bnx2_setup_phy(struct bnx2 *bp, u8 port)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
+{
+ if (bp->loopback == MAC_LOOPBACK)
+ return 0;
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+ return bnx2_setup_serdes_phy(bp, port);
+ }
+ else {
+ return bnx2_setup_copper_phy(bp);
+ }
+}
+
+static int
+bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
+{
+ u32 val;
+
+ bp->mii_bmcr = MII_BMCR + 0x10;
+ bp->mii_bmsr = MII_BMSR + 0x10;
+ bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
+ bp->mii_adv = MII_ADVERTISE + 0x10;
+ bp->mii_lpa = MII_LPA + 0x10;
+ bp->mii_up1 = MII_BNX2_OVER1G_UP1;
+
+ bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
+ bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
+
+ bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+ if (reset_phy)
+ bnx2_reset_phy(bp);
+
+ bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
+
+ bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
+ val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
+ val |= MII_BNX2_SD_1000XCTL1_FIBER;
+ bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
+
+ bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
+ bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
+ if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
+ val |= BCM5708S_UP1_2G5;
+ else
+ val &= ~BCM5708S_UP1_2G5;
+ bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
+
+ bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
+ bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
+ val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
+ bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
+
+ bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
+
+ val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
+ MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
+ bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
+
+ bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+
+ return 0;
+}
+
+static int
+bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
+{
+ u32 val;
+
+ if (reset_phy)
+ bnx2_reset_phy(bp);
+
+ bp->mii_up1 = BCM5708S_UP1;
+
+ bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
+ bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
+ bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
+
+ bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
+ val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
+ bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
+
+ bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
+ val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
+ bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
+ bnx2_read_phy(bp, BCM5708S_UP1, &val);
+ val |= BCM5708S_UP1_2G5;
+ bnx2_write_phy(bp, BCM5708S_UP1, val);
+ }
+
+ if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
+ (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
+ (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
+ /* increase tx signal amplitude */
+ bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
+ BCM5708S_BLK_ADDR_TX_MISC);
+ bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
+ val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
+ bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
+ bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
+ }
+
+ val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
+ BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
+
+ if (val) {
+ u32 is_backplane;
+
+ is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
+ if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
+ bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
+ BCM5708S_BLK_ADDR_TX_MISC);
+ bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
+ bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
+ BCM5708S_BLK_ADDR_DIG);
+ }
+ }
+ return 0;
+}
+
+static int
+bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
+{
+ if (reset_phy)
+ bnx2_reset_phy(bp);
+
+ bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5706)
+ REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
+
+ if (bp->dev->mtu > 1500) {
+ u32 val;
+
+ /* Set extended packet length bit */
+ bnx2_write_phy(bp, 0x18, 0x7);
+ bnx2_read_phy(bp, 0x18, &val);
+ bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
+
+ bnx2_write_phy(bp, 0x1c, 0x6c00);
+ bnx2_read_phy(bp, 0x1c, &val);
+ bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
+ }
+ else {
+ u32 val;
+
+ bnx2_write_phy(bp, 0x18, 0x7);
+ bnx2_read_phy(bp, 0x18, &val);
+ bnx2_write_phy(bp, 0x18, val & ~0x4007);
+
+ bnx2_write_phy(bp, 0x1c, 0x6c00);
+ bnx2_read_phy(bp, 0x1c, &val);
+ bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
+ }
+
+ return 0;
+}
+
+static int
+bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
+{
+ u32 val;
+
+ if (reset_phy)
+ bnx2_reset_phy(bp);
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
+ bnx2_write_phy(bp, 0x18, 0x0c00);
+ bnx2_write_phy(bp, 0x17, 0x000a);
+ bnx2_write_phy(bp, 0x15, 0x310b);
+ bnx2_write_phy(bp, 0x17, 0x201f);
+ bnx2_write_phy(bp, 0x15, 0x9506);
+ bnx2_write_phy(bp, 0x17, 0x401f);
+ bnx2_write_phy(bp, 0x15, 0x14e2);
+ bnx2_write_phy(bp, 0x18, 0x0400);
+ }
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
+ bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
+ MII_BNX2_DSP_EXPAND_REG | 0x8);
+ bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
+ val &= ~(1 << 8);
+ bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
+ }
+
+ if (bp->dev->mtu > 1500) {
+ /* Set extended packet length bit */
+ bnx2_write_phy(bp, 0x18, 0x7);
+ bnx2_read_phy(bp, 0x18, &val);
+ bnx2_write_phy(bp, 0x18, val | 0x4000);
+
+ bnx2_read_phy(bp, 0x10, &val);
+ bnx2_write_phy(bp, 0x10, val | 0x1);
+ }
+ else {
+ bnx2_write_phy(bp, 0x18, 0x7);
+ bnx2_read_phy(bp, 0x18, &val);
+ bnx2_write_phy(bp, 0x18, val & ~0x4007);
+
+ bnx2_read_phy(bp, 0x10, &val);
+ bnx2_write_phy(bp, 0x10, val & ~0x1);
+ }
+
+ /* ethernet@wirespeed */
+ bnx2_write_phy(bp, 0x18, 0x7007);
+ bnx2_read_phy(bp, 0x18, &val);
+ bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
+ return 0;
+}
+
+
+static int
+bnx2_init_phy(struct bnx2 *bp, int reset_phy)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
+{
+ u32 val;
+ int rc = 0;
+
+ bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
+ bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
+
+ bp->mii_bmcr = MII_BMCR;
+ bp->mii_bmsr = MII_BMSR;
+ bp->mii_bmsr1 = MII_BMSR;
+ bp->mii_adv = MII_ADVERTISE;
+ bp->mii_lpa = MII_LPA;
+
+ REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+ goto setup_phy;
+
+ bnx2_read_phy(bp, MII_PHYSID1, &val);
+ bp->phy_id = val << 16;
+ bnx2_read_phy(bp, MII_PHYSID2, &val);
+ bp->phy_id |= val & 0xffff;
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+ if (CHIP_NUM(bp) == CHIP_NUM_5706)
+ rc = bnx2_init_5706s_phy(bp, reset_phy);
+ else if (CHIP_NUM(bp) == CHIP_NUM_5708)
+ rc = bnx2_init_5708s_phy(bp, reset_phy);
+ else if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ rc = bnx2_init_5709s_phy(bp, reset_phy);
+ }
+ else {
+ rc = bnx2_init_copper_phy(bp, reset_phy);
+ }
+
+setup_phy:
+ if (!rc)
+ rc = bnx2_setup_phy(bp, bp->phy_port);
+
+ return rc;
+}
+
+static int
+bnx2_set_mac_loopback(struct bnx2 *bp)
+{
+ u32 mac_mode;
+
+ mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
+ mac_mode &= ~BNX2_EMAC_MODE_PORT;
+ mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
+ REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
+ bp->link_up = 1;
+ return 0;
+}
+
+static int bnx2_test_link(struct bnx2 *);
+
+static int
+bnx2_set_phy_loopback(struct bnx2 *bp)
+{
+ u32 mac_mode;
+ int rc, i;
+
+ spin_lock_bh(&bp->phy_lock);
+ rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
+ BMCR_SPEED1000);
+ spin_unlock_bh(&bp->phy_lock);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < 10; i++) {
+ if (bnx2_test_link(bp) == 0)
+ break;
+ msleep(100);
+ }
+
+ mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
+ mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
+ BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
+ BNX2_EMAC_MODE_25G_MODE);
+
+ mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
+ REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
+ bp->link_up = 1;
+ return 0;
+}
+
+static void
+bnx2_dump_mcp_state(struct bnx2 *bp)
+{
+ struct net_device *dev = bp->dev;
+ u32 mcp_p0, mcp_p1;
+
+ netdev_err(dev, "<--- start MCP states dump --->\n");
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ mcp_p0 = BNX2_MCP_STATE_P0;
+ mcp_p1 = BNX2_MCP_STATE_P1;
+ } else {
+ mcp_p0 = BNX2_MCP_STATE_P0_5708;
+ mcp_p1 = BNX2_MCP_STATE_P1_5708;
+ }
+ netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
+ bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
+ netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
+ bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
+ bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
+ bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
+ netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
+ bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
+ bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
+ bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
+ netdev_err(dev, "DEBUG: shmem states:\n");
+ netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
+ bnx2_shmem_rd(bp, BNX2_DRV_MB),
+ bnx2_shmem_rd(bp, BNX2_FW_MB),
+ bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
+ pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
+ netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
+ bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
+ bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
+ pr_cont(" condition[%08x]\n",
+ bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
+ DP_SHMEM_LINE(bp, 0x3cc);
+ DP_SHMEM_LINE(bp, 0x3dc);
+ DP_SHMEM_LINE(bp, 0x3ec);
+ netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
+ netdev_err(dev, "<--- end MCP states dump --->\n");
+}
+
+static int
+bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
+{
+ int i;
+ u32 val;
+
+ bp->fw_wr_seq++;
+ msg_data |= bp->fw_wr_seq;
+
+ bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
+
+ if (!ack)
+ return 0;
+
+ /* wait for an acknowledgement. */
+ for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
+ msleep(10);
+
+ val = bnx2_shmem_rd(bp, BNX2_FW_MB);
+
+ if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
+ break;
+ }
+ if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
+ return 0;
+
+ /* If we timed out, inform the firmware that this is the case. */
+ if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
+ msg_data &= ~BNX2_DRV_MSG_CODE;
+ msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
+
+ bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
+ if (!silent) {
+ pr_err("fw sync timeout, reset code = %x\n", msg_data);
+ bnx2_dump_mcp_state(bp);
+ }
+
+ return -EBUSY;
+ }
+
+ if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
+ return -EIO;
+
+ return 0;
+}
+
+static int
+bnx2_init_5709_context(struct bnx2 *bp)
+{
+ int i, ret = 0;
+ u32 val;
+
+ val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
+ val |= (BCM_PAGE_BITS - 8) << 16;
+ REG_WR(bp, BNX2_CTX_COMMAND, val);
+ for (i = 0; i < 10; i++) {
+ val = REG_RD(bp, BNX2_CTX_COMMAND);
+ if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
+ break;
+ udelay(2);
+ }
+ if (val & BNX2_CTX_COMMAND_MEM_INIT)
+ return -EBUSY;
+
+ for (i = 0; i < bp->ctx_pages; i++) {
+ int j;
+
+ if (bp->ctx_blk[i])
+ memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
+ else
+ return -ENOMEM;
+
+ REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
+ (bp->ctx_blk_mapping[i] & 0xffffffff) |
+ BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
+ REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
+ (u64) bp->ctx_blk_mapping[i] >> 32);
+ REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
+ BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
+ for (j = 0; j < 10; j++) {
+
+ val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
+ if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
+ break;
+ udelay(5);
+ }
+ if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+ return ret;
+}
+
+static void
+bnx2_init_context(struct bnx2 *bp)
+{
+ u32 vcid;
+
+ vcid = 96;
+ while (vcid) {
+ u32 vcid_addr, pcid_addr, offset;
+ int i;
+
+ vcid--;
+
+ if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
+ u32 new_vcid;
+
+ vcid_addr = GET_PCID_ADDR(vcid);
+ if (vcid & 0x8) {
+ new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
+ }
+ else {
+ new_vcid = vcid;
+ }
+ pcid_addr = GET_PCID_ADDR(new_vcid);
+ }
+ else {
+ vcid_addr = GET_CID_ADDR(vcid);
+ pcid_addr = vcid_addr;
+ }
+
+ for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
+ vcid_addr += (i << PHY_CTX_SHIFT);
+ pcid_addr += (i << PHY_CTX_SHIFT);
+
+ REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
+ REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
+
+ /* Zero out the context. */
+ for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
+ bnx2_ctx_wr(bp, vcid_addr, offset, 0);
+ }
+ }
+}
+
+static int
+bnx2_alloc_bad_rbuf(struct bnx2 *bp)
+{
+ u16 *good_mbuf;
+ u32 good_mbuf_cnt;
+ u32 val;
+
+ good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
+ if (good_mbuf == NULL) {
+ pr_err("Failed to allocate memory in %s\n", __func__);
+ return -ENOMEM;
+ }
+
+ REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
+ BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
+
+ good_mbuf_cnt = 0;
+
+ /* Allocate a bunch of mbufs and save the good ones in an array. */
+ val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
+ while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
+ bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
+ BNX2_RBUF_COMMAND_ALLOC_REQ);
+
+ val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
+
+ val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
+
+ /* The addresses with Bit 9 set are bad memory blocks. */
+ if (!(val & (1 << 9))) {
+ good_mbuf[good_mbuf_cnt] = (u16) val;
+ good_mbuf_cnt++;
+ }
+
+ val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
+ }
+
+ /* Free the good ones back to the mbuf pool thus discarding
+ * all the bad ones. */
+ while (good_mbuf_cnt) {
+ good_mbuf_cnt--;
+
+ val = good_mbuf[good_mbuf_cnt];
+ val = (val << 9) | val | 1;
+
+ bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
+ }
+ kfree(good_mbuf);
+ return 0;
+}
+
+static void
+bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
+{
+ u32 val;
+
+ val = (mac_addr[0] << 8) | mac_addr[1];
+
+ REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
+
+ val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5];
+
+ REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
+}
+
+static inline int
+bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
+{
+ dma_addr_t mapping;
+ struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
+ struct rx_bd *rxbd =
+ &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
+ struct page *page = alloc_page(gfp);
+
+ if (!page)
+ return -ENOMEM;
+ mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, mapping)) {
+ __free_page(page);
+ return -EIO;
+ }
+
+ rx_pg->page = page;
+ dma_unmap_addr_set(rx_pg, mapping, mapping);
+ rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
+ rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
+ return 0;
+}
+
+static void
+bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
+{
+ struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
+ struct page *page = rx_pg->page;
+
+ if (!page)
+ return;
+
+ dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
+ PAGE_SIZE, PCI_DMA_FROMDEVICE);
+
+ __free_page(page);
+ rx_pg->page = NULL;
+}
+
+static inline int
+bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
+{
+ struct sk_buff *skb;
+ struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
+ dma_addr_t mapping;
+ struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
+ unsigned long align;
+
+ skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
+ if (skb == NULL) {
+ return -ENOMEM;
+ }
+
+ if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
+ skb_reserve(skb, BNX2_RX_ALIGN - align);
+
+ mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, mapping)) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
+
+ rx_buf->skb = skb;
+ rx_buf->desc = (struct l2_fhdr *) skb->data;
+ dma_unmap_addr_set(rx_buf, mapping, mapping);
+
+ rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
+ rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
+
+ rxr->rx_prod_bseq += bp->rx_buf_use_size;
+
+ return 0;
+}
+
+static int
+bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
+{
+ struct status_block *sblk = bnapi->status_blk.msi;
+ u32 new_link_state, old_link_state;
+ int is_set = 1;
+
+ new_link_state = sblk->status_attn_bits & event;
+ old_link_state = sblk->status_attn_bits_ack & event;
+ if (new_link_state != old_link_state) {
+ if (new_link_state)
+ REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
+ else
+ REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
+ } else
+ is_set = 0;
+
+ return is_set;
+}
+
+static void
+bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
+{
+ spin_lock(&bp->phy_lock);
+
+ if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
+ bnx2_set_link(bp);
+ if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
+ bnx2_set_remote_link(bp);
+
+ spin_unlock(&bp->phy_lock);
+
+}
+
+static inline u16
+bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
+{
+ u16 cons;
+
+ /* Tell compiler that status block fields can change. */
+ barrier();
+ cons = *bnapi->hw_tx_cons_ptr;
+ barrier();
+ if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
+ cons++;
+ return cons;
+}
+
+static int
+bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
+{
+ struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+ u16 hw_cons, sw_cons, sw_ring_cons;
+ int tx_pkt = 0, index;
+ struct netdev_queue *txq;
+
+ index = (bnapi - bp->bnx2_napi);
+ txq = netdev_get_tx_queue(bp->dev, index);
+
+ hw_cons = bnx2_get_hw_tx_cons(bnapi);
+ sw_cons = txr->tx_cons;
+
+ while (sw_cons != hw_cons) {
+ struct sw_tx_bd *tx_buf;
+ struct sk_buff *skb;
+ int i, last;
+
+ sw_ring_cons = TX_RING_IDX(sw_cons);
+
+ tx_buf = &txr->tx_buf_ring[sw_ring_cons];
+ skb = tx_buf->skb;
+
+ /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
+ prefetch(&skb->end);
+
+ /* partial BD completions possible with TSO packets */
+ if (tx_buf->is_gso) {
+ u16 last_idx, last_ring_idx;
+
+ last_idx = sw_cons + tx_buf->nr_frags + 1;
+ last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
+ if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
+ last_idx++;
+ }
+ if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
+ break;
+ }
+ }
+
+ dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+
+ tx_buf->skb = NULL;
+ last = tx_buf->nr_frags;
+
+ for (i = 0; i < last; i++) {
+ sw_cons = NEXT_TX_BD(sw_cons);
+
+ dma_unmap_page(&bp->pdev->dev,
+ dma_unmap_addr(
+ &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
+ mapping),
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_TODEVICE);
+ }
+
+ sw_cons = NEXT_TX_BD(sw_cons);
+
+ dev_kfree_skb(skb);
+ tx_pkt++;
+ if (tx_pkt == budget)
+ break;
+
+ if (hw_cons == sw_cons)
+ hw_cons = bnx2_get_hw_tx_cons(bnapi);
+ }
+
+ txr->hw_tx_cons = hw_cons;
+ txr->tx_cons = sw_cons;
+
+ /* Need to make the tx_cons update visible to bnx2_start_xmit()
+ * before checking for netif_tx_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that bnx2_start_xmit()
+ * will miss it and cause the queue to be stopped forever.
+ */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(txq)) &&
+ (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
+ __netif_tx_lock(txq, smp_processor_id());
+ if ((netif_tx_queue_stopped(txq)) &&
+ (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock(txq);
+ }
+
+ return tx_pkt;
+}
+
+static void
+bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
+ struct sk_buff *skb, int count)
+{
+ struct sw_pg *cons_rx_pg, *prod_rx_pg;
+ struct rx_bd *cons_bd, *prod_bd;
+ int i;
+ u16 hw_prod, prod;
+ u16 cons = rxr->rx_pg_cons;
+
+ cons_rx_pg = &rxr->rx_pg_ring[cons];
+
+ /* The caller was unable to allocate a new page to replace the
+ * last one in the frags array, so we need to recycle that page
+ * and then free the skb.
+ */
+ if (skb) {
+ struct page *page;
+ struct skb_shared_info *shinfo;
+
+ shinfo = skb_shinfo(skb);
+ shinfo->nr_frags--;
+ page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
+ __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
+
+ cons_rx_pg->page = page;
+ dev_kfree_skb(skb);
+ }
+
+ hw_prod = rxr->rx_pg_prod;
+
+ for (i = 0; i < count; i++) {
+ prod = RX_PG_RING_IDX(hw_prod);
+
+ prod_rx_pg = &rxr->rx_pg_ring[prod];
+ cons_rx_pg = &rxr->rx_pg_ring[cons];
+ cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+ prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+
+ if (prod != cons) {
+ prod_rx_pg->page = cons_rx_pg->page;
+ cons_rx_pg->page = NULL;
+ dma_unmap_addr_set(prod_rx_pg, mapping,
+ dma_unmap_addr(cons_rx_pg, mapping));
+
+ prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
+ prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
+
+ }
+ cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
+ hw_prod = NEXT_RX_BD(hw_prod);
+ }
+ rxr->rx_pg_prod = hw_prod;
+ rxr->rx_pg_cons = cons;
+}
+
+static inline void
+bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
+ struct sk_buff *skb, u16 cons, u16 prod)
+{
+ struct sw_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_bd *cons_bd, *prod_bd;
+
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+ prod_rx_buf = &rxr->rx_buf_ring[prod];
+
+ dma_sync_single_for_device(&bp->pdev->dev,
+ dma_unmap_addr(cons_rx_buf, mapping),
+ BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
+
+ rxr->rx_prod_bseq += bp->rx_buf_use_size;
+
+ prod_rx_buf->skb = skb;
+ prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
+
+ if (cons == prod)
+ return;
+
+ dma_unmap_addr_set(prod_rx_buf, mapping,
+ dma_unmap_addr(cons_rx_buf, mapping));
+
+ cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+ prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
+ prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
+}
+
+static int
+bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
+ unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
+ u32 ring_idx)
+{
+ int err;
+ u16 prod = ring_idx & 0xffff;
+
+ err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
+ if (unlikely(err)) {
+ bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
+ if (hdr_len) {
+ unsigned int raw_len = len + 4;
+ int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
+
+ bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
+ }
+ return err;
+ }
+
+ skb_reserve(skb, BNX2_RX_OFFSET);
+ dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
+
+ if (hdr_len == 0) {
+ skb_put(skb, len);
+ return 0;
+ } else {
+ unsigned int i, frag_len, frag_size, pages;
+ struct sw_pg *rx_pg;
+ u16 pg_cons = rxr->rx_pg_cons;
+ u16 pg_prod = rxr->rx_pg_prod;
+
+ frag_size = len + 4 - hdr_len;
+ pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
+ skb_put(skb, hdr_len);
+
+ for (i = 0; i < pages; i++) {
+ dma_addr_t mapping_old;
+
+ frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
+ if (unlikely(frag_len <= 4)) {
+ unsigned int tail = 4 - frag_len;
+
+ rxr->rx_pg_cons = pg_cons;
+ rxr->rx_pg_prod = pg_prod;
+ bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
+ pages - i);
+ skb->len -= tail;
+ if (i == 0) {
+ skb->tail -= tail;
+ } else {
+ skb_frag_t *frag =
+ &skb_shinfo(skb)->frags[i - 1];
+ frag->size -= tail;
+ skb->data_len -= tail;
+ skb->truesize -= tail;
+ }
+ return 0;
+ }
+ rx_pg = &rxr->rx_pg_ring[pg_cons];
+
+ /* Don't unmap yet. If we're unable to allocate a new
+ * page, we need to recycle the page and the DMA addr.
+ */
+ mapping_old = dma_unmap_addr(rx_pg, mapping);
+ if (i == pages - 1)
+ frag_len -= 4;
+
+ skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
+ rx_pg->page = NULL;
+
+ err = bnx2_alloc_rx_page(bp, rxr,
+ RX_PG_RING_IDX(pg_prod),
+ GFP_ATOMIC);
+ if (unlikely(err)) {
+ rxr->rx_pg_cons = pg_cons;
+ rxr->rx_pg_prod = pg_prod;
+ bnx2_reuse_rx_skb_pages(bp, rxr, skb,
+ pages - i);
+ return err;
+ }
+
+ dma_unmap_page(&bp->pdev->dev, mapping_old,
+ PAGE_SIZE, PCI_DMA_FROMDEVICE);
+
+ frag_size -= frag_len;
+ skb->data_len += frag_len;
+ skb->truesize += frag_len;
+ skb->len += frag_len;
+
+ pg_prod = NEXT_RX_BD(pg_prod);
+ pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
+ }
+ rxr->rx_pg_prod = pg_prod;
+ rxr->rx_pg_cons = pg_cons;
+ }
+ return 0;
+}
+
+static inline u16
+bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
+{
+ u16 cons;
+
+ /* Tell compiler that status block fields can change. */
+ barrier();
+ cons = *bnapi->hw_rx_cons_ptr;
+ barrier();
+ if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
+ cons++;
+ return cons;
+}
+
+static int
+bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
+{
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+ u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
+ struct l2_fhdr *rx_hdr;
+ int rx_pkt = 0, pg_ring_used = 0;
+
+ hw_cons = bnx2_get_hw_rx_cons(bnapi);
+ sw_cons = rxr->rx_cons;
+ sw_prod = rxr->rx_prod;
+
+ /* Memory barrier necessary as speculative reads of the rx
+ * buffer can be ahead of the index in the status block
+ */
+ rmb();
+ while (sw_cons != hw_cons) {
+ unsigned int len, hdr_len;
+ u32 status;
+ struct sw_bd *rx_buf, *next_rx_buf;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+
+ sw_ring_cons = RX_RING_IDX(sw_cons);
+ sw_ring_prod = RX_RING_IDX(sw_prod);
+
+ rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
+ skb = rx_buf->skb;
+ prefetchw(skb);
+
+ next_rx_buf =
+ &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
+ prefetch(next_rx_buf->desc);
+
+ rx_buf->skb = NULL;
+
+ dma_addr = dma_unmap_addr(rx_buf, mapping);
+
+ dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
+ BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
+ PCI_DMA_FROMDEVICE);
+
+ rx_hdr = rx_buf->desc;
+ len = rx_hdr->l2_fhdr_pkt_len;
+ status = rx_hdr->l2_fhdr_status;
+
+ hdr_len = 0;
+ if (status & L2_FHDR_STATUS_SPLIT) {
+ hdr_len = rx_hdr->l2_fhdr_ip_xsum;
+ pg_ring_used = 1;
+ } else if (len > bp->rx_jumbo_thresh) {
+ hdr_len = bp->rx_jumbo_thresh;
+ pg_ring_used = 1;
+ }
+
+ if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
+ L2_FHDR_ERRORS_PHY_DECODE |
+ L2_FHDR_ERRORS_ALIGNMENT |
+ L2_FHDR_ERRORS_TOO_SHORT |
+ L2_FHDR_ERRORS_GIANT_FRAME))) {
+
+ bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
+ sw_ring_prod);
+ if (pg_ring_used) {
+ int pages;
+
+ pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
+
+ bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
+ }
+ goto next_rx;
+ }
+
+ len -= 4;
+
+ if (len <= bp->rx_copy_thresh) {
+ struct sk_buff *new_skb;
+
+ new_skb = netdev_alloc_skb(bp->dev, len + 6);
+ if (new_skb == NULL) {
+ bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
+ sw_ring_prod);
+ goto next_rx;
+ }
+
+ /* aligned copy */
+ skb_copy_from_linear_data_offset(skb,
+ BNX2_RX_OFFSET - 6,
+ new_skb->data, len + 6);
+ skb_reserve(new_skb, 6);
+ skb_put(new_skb, len);
+
+ bnx2_reuse_rx_skb(bp, rxr, skb,
+ sw_ring_cons, sw_ring_prod);
+
+ skb = new_skb;
+ } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
+ dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
+ goto next_rx;
+
+ if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
+ !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
+ __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
+
+ skb->protocol = eth_type_trans(skb, bp->dev);
+
+ if ((len > (bp->dev->mtu + ETH_HLEN)) &&
+ (ntohs(skb->protocol) != 0x8100)) {
+
+ dev_kfree_skb(skb);
+ goto next_rx;
+
+ }
+
+ skb_checksum_none_assert(skb);
+ if ((bp->dev->features & NETIF_F_RXCSUM) &&
+ (status & (L2_FHDR_STATUS_TCP_SEGMENT |
+ L2_FHDR_STATUS_UDP_DATAGRAM))) {
+
+ if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
+ L2_FHDR_ERRORS_UDP_XSUM)) == 0))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ if ((bp->dev->features & NETIF_F_RXHASH) &&
+ ((status & L2_FHDR_STATUS_USE_RXHASH) ==
+ L2_FHDR_STATUS_USE_RXHASH))
+ skb->rxhash = rx_hdr->l2_fhdr_hash;
+
+ skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
+ napi_gro_receive(&bnapi->napi, skb);
+ rx_pkt++;
+
+next_rx:
+ sw_cons = NEXT_RX_BD(sw_cons);
+ sw_prod = NEXT_RX_BD(sw_prod);
+
+ if ((rx_pkt == budget))
+ break;
+
+ /* Refresh hw_cons to see if there is new work */
+ if (sw_cons == hw_cons) {
+ hw_cons = bnx2_get_hw_rx_cons(bnapi);
+ rmb();
+ }
+ }
+ rxr->rx_cons = sw_cons;
+ rxr->rx_prod = sw_prod;
+
+ if (pg_ring_used)
+ REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
+
+ REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
+
+ REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
+
+ mmiowb();
+
+ return rx_pkt;
+
+}
+
+/* MSI ISR - The only difference between this and the INTx ISR
+ * is that the MSI interrupt is always serviced.
+ */
+static irqreturn_t
+bnx2_msi(int irq, void *dev_instance)
+{
+ struct bnx2_napi *bnapi = dev_instance;
+ struct bnx2 *bp = bnapi->bp;
+
+ prefetch(bnapi->status_blk.msi);
+ REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+ BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
+ BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+
+ /* Return here if interrupt is disabled. */
+ if (unlikely(atomic_read(&bp->intr_sem) != 0))
+ return IRQ_HANDLED;
+
+ napi_schedule(&bnapi->napi);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+bnx2_msi_1shot(int irq, void *dev_instance)
+{
+ struct bnx2_napi *bnapi = dev_instance;
+ struct bnx2 *bp = bnapi->bp;
+
+ prefetch(bnapi->status_blk.msi);
+
+ /* Return here if interrupt is disabled. */
+ if (unlikely(atomic_read(&bp->intr_sem) != 0))
+ return IRQ_HANDLED;
+
+ napi_schedule(&bnapi->napi);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+bnx2_interrupt(int irq, void *dev_instance)
+{
+ struct bnx2_napi *bnapi = dev_instance;
+ struct bnx2 *bp = bnapi->bp;
+ struct status_block *sblk = bnapi->status_blk.msi;
+
+ /* When using INTx, it is possible for the interrupt to arrive
+ * at the CPU before the status block posted prior to the
+ * interrupt. Reading a register will flush the status block.
+ * When using MSI, the MSI message will always complete after
+ * the status block write.
+ */
+ if ((sblk->status_idx == bnapi->last_status_idx) &&
+ (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
+ BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
+ return IRQ_NONE;
+
+ REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+ BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
+ BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+
+ /* Read back to deassert IRQ immediately to avoid too many
+ * spurious interrupts.
+ */
+ REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
+
+ /* Return here if interrupt is shared and is disabled. */
+ if (unlikely(atomic_read(&bp->intr_sem) != 0))
+ return IRQ_HANDLED;
+
+ if (napi_schedule_prep(&bnapi->napi)) {
+ bnapi->last_status_idx = sblk->status_idx;
+ __napi_schedule(&bnapi->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static inline int
+bnx2_has_fast_work(struct bnx2_napi *bnapi)
+{
+ struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+
+ if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
+ (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
+ return 1;
+ return 0;
+}
+
+#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
+ STATUS_ATTN_BITS_TIMER_ABORT)
+
+static inline int
+bnx2_has_work(struct bnx2_napi *bnapi)
+{
+ struct status_block *sblk = bnapi->status_blk.msi;
+
+ if (bnx2_has_fast_work(bnapi))
+ return 1;
+
+#ifdef BCM_CNIC
+ if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
+ return 1;
+#endif
+
+ if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
+ (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
+ return 1;
+
+ return 0;
+}
+
+static void
+bnx2_chk_missed_msi(struct bnx2 *bp)
+{
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+ u32 msi_ctrl;
+
+ if (bnx2_has_work(bnapi)) {
+ msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
+ if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
+ return;
+
+ if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
+ REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
+ ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
+ REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
+ bnx2_msi(bp->irq_tbl[0].vector, bnapi);
+ }
+ }
+
+ bp->idle_chk_status_idx = bnapi->last_status_idx;
+}
+
+#ifdef BCM_CNIC
+static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
+{
+ struct cnic_ops *c_ops;
+
+ if (!bnapi->cnic_present)
+ return;
+
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops)
+ bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
+ bnapi->status_blk.msi);
+ rcu_read_unlock();
+}
+#endif
+
+static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
+{
+ struct status_block *sblk = bnapi->status_blk.msi;
+ u32 status_attn_bits = sblk->status_attn_bits;
+ u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
+
+ if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
+ (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
+
+ bnx2_phy_int(bp, bnapi);
+
+ /* This is needed to take care of transient status
+ * during link changes.
+ */
+ REG_WR(bp, BNX2_HC_COMMAND,
+ bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
+ REG_RD(bp, BNX2_HC_COMMAND);
+ }
+}
+
+static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
+ int work_done, int budget)
+{
+ struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+
+ if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
+ bnx2_tx_int(bp, bnapi, 0);
+
+ if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
+ work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
+
+ return work_done;
+}
+
+static int bnx2_poll_msix(struct napi_struct *napi, int budget)
+{
+ struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
+ struct bnx2 *bp = bnapi->bp;
+ int work_done = 0;
+ struct status_block_msix *sblk = bnapi->status_blk.msix;
+
+ while (1) {
+ work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
+ if (unlikely(work_done >= budget))
+ break;
+
+ bnapi->last_status_idx = sblk->status_idx;
+ /* status idx must be read before checking for more work. */
+ rmb();
+ if (likely(!bnx2_has_fast_work(bnapi))) {
+
+ napi_complete(napi);
+ REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ bnapi->last_status_idx);
+ break;
+ }
+ }
+ return work_done;
+}
+
+static int bnx2_poll(struct napi_struct *napi, int budget)
+{
+ struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
+ struct bnx2 *bp = bnapi->bp;
+ int work_done = 0;
+ struct status_block *sblk = bnapi->status_blk.msi;
+
+ while (1) {
+ bnx2_poll_link(bp, bnapi);
+
+ work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
+
+#ifdef BCM_CNIC
+ bnx2_poll_cnic(bp, bnapi);
+#endif
+
+ /* bnapi->last_status_idx is used below to tell the hw how
+ * much work has been processed, so we must read it before
+ * checking for more work.
+ */
+ bnapi->last_status_idx = sblk->status_idx;
+
+ if (unlikely(work_done >= budget))
+ break;
+
+ rmb();
+ if (likely(!bnx2_has_work(bnapi))) {
+ napi_complete(napi);
+ if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
+ REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ bnapi->last_status_idx);
+ break;
+ }
+ REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
+ bnapi->last_status_idx);
+
+ REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+ bnapi->last_status_idx);
+ break;
+ }
+ }
+
+ return work_done;
+}
+
+/* Called with rtnl_lock from vlan functions and also netif_tx_lock
+ * from set_multicast.
+ */
+static void
+bnx2_set_rx_mode(struct net_device *dev)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ u32 rx_mode, sort_mode;
+ struct netdev_hw_addr *ha;
+ int i;
+
+ if (!netif_running(dev))
+ return;
+
+ spin_lock_bh(&bp->phy_lock);
+
+ rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
+ BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
+ sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
+ if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
+ (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
+ rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
+ if (dev->flags & IFF_PROMISC) {
+ /* Promiscuous mode. */
+ rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
+ sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
+ BNX2_RPM_SORT_USER0_PROM_VLAN;
+ }
+ else if (dev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
+ REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
+ 0xffffffff);
+ }
+ sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
+ }
+ else {
+ /* Accept one or more multicast(s). */
+ u32 mc_filter[NUM_MC_HASH_REGISTERS];
+ u32 regidx;
+ u32 bit;
+ u32 crc;
+
+ memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ bit = crc & 0xff;
+ regidx = (bit & 0xe0) >> 5;
+ bit &= 0x1f;
+ mc_filter[regidx] |= (1 << bit);
+ }
+
+ for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
+ REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
+ mc_filter[i]);
+ }
+
+ sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
+ }
+
+ if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
+ rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
+ sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
+ BNX2_RPM_SORT_USER0_PROM_VLAN;
+ } else if (!(dev->flags & IFF_PROMISC)) {
+ /* Add all entries into to the match filter list */
+ i = 0;
+ netdev_for_each_uc_addr(ha, dev) {
+ bnx2_set_mac_addr(bp, ha->addr,
+ i + BNX2_START_UNICAST_ADDRESS_INDEX);
+ sort_mode |= (1 <<
+ (i + BNX2_START_UNICAST_ADDRESS_INDEX));
+ i++;
+ }
+
+ }
+
+ if (rx_mode != bp->rx_mode) {
+ bp->rx_mode = rx_mode;
+ REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
+ }
+
+ REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
+ REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
+ REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
+
+ spin_unlock_bh(&bp->phy_lock);
+}
+
+static int
+check_fw_section(const struct firmware *fw,
+ const struct bnx2_fw_file_section *section,
+ u32 alignment, bool non_empty)
+{
+ u32 offset = be32_to_cpu(section->offset);
+ u32 len = be32_to_cpu(section->len);
+
+ if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
+ return -EINVAL;
+ if ((non_empty && len == 0) || len > fw->size - offset ||
+ len & (alignment - 1))
+ return -EINVAL;
+ return 0;
+}
+
+static int
+check_mips_fw_entry(const struct firmware *fw,
+ const struct bnx2_mips_fw_file_entry *entry)
+{
+ if (check_fw_section(fw, &entry->text, 4, true) ||
+ check_fw_section(fw, &entry->data, 4, false) ||
+ check_fw_section(fw, &entry->rodata, 4, false))
+ return -EINVAL;
+ return 0;
+}
+
+static void bnx2_release_firmware(struct bnx2 *bp)
+{
+ if (bp->rv2p_firmware) {
+ release_firmware(bp->mips_firmware);
+ release_firmware(bp->rv2p_firmware);
+ bp->rv2p_firmware = NULL;
+ }
+}
+
+static int bnx2_request_uncached_firmware(struct bnx2 *bp)
+{
+ const char *mips_fw_file, *rv2p_fw_file;
+ const struct bnx2_mips_fw_file *mips_fw;
+ const struct bnx2_rv2p_fw_file *rv2p_fw;
+ int rc;
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ mips_fw_file = FW_MIPS_FILE_09;
+ if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
+ (CHIP_ID(bp) == CHIP_ID_5709_A1))
+ rv2p_fw_file = FW_RV2P_FILE_09_Ax;
+ else
+ rv2p_fw_file = FW_RV2P_FILE_09;
+ } else {
+ mips_fw_file = FW_MIPS_FILE_06;
+ rv2p_fw_file = FW_RV2P_FILE_06;
+ }
+
+ rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
+ if (rc) {
+ pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
+ goto out;
+ }
+
+ rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
+ if (rc) {
+ pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
+ goto err_release_mips_firmware;
+ }
+ mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
+ rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
+ if (bp->mips_firmware->size < sizeof(*mips_fw) ||
+ check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
+ check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
+ check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
+ check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
+ check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
+ pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
+ rc = -EINVAL;
+ goto err_release_firmware;
+ }
+ if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
+ check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
+ check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
+ pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
+ rc = -EINVAL;
+ goto err_release_firmware;
+ }
+out:
+ return rc;
+
+err_release_firmware:
+ release_firmware(bp->rv2p_firmware);
+ bp->rv2p_firmware = NULL;
+err_release_mips_firmware:
+ release_firmware(bp->mips_firmware);
+ goto out;
+}
+
+static int bnx2_request_firmware(struct bnx2 *bp)
+{
+ return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
+}
+
+static u32
+rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
+{
+ switch (idx) {
+ case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
+ rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
+ rv2p_code |= RV2P_BD_PAGE_SIZE;
+ break;
+ }
+ return rv2p_code;
+}
+
+static int
+load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
+ const struct bnx2_rv2p_fw_file_entry *fw_entry)
+{
+ u32 rv2p_code_len, file_offset;
+ __be32 *rv2p_code;
+ int i;
+ u32 val, cmd, addr;
+
+ rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
+ file_offset = be32_to_cpu(fw_entry->rv2p.offset);
+
+ rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
+
+ if (rv2p_proc == RV2P_PROC1) {
+ cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
+ addr = BNX2_RV2P_PROC1_ADDR_CMD;
+ } else {
+ cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
+ addr = BNX2_RV2P_PROC2_ADDR_CMD;
+ }
+
+ for (i = 0; i < rv2p_code_len; i += 8) {
+ REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
+ rv2p_code++;
+ REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
+ rv2p_code++;
+
+ val = (i / 8) | cmd;
+ REG_WR(bp, addr, val);
+ }
+
+ rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
+ for (i = 0; i < 8; i++) {
+ u32 loc, code;
+
+ loc = be32_to_cpu(fw_entry->fixup[i]);
+ if (loc && ((loc * 4) < rv2p_code_len)) {
+ code = be32_to_cpu(*(rv2p_code + loc - 1));
+ REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
+ code = be32_to_cpu(*(rv2p_code + loc));
+ code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
+ REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
+
+ val = (loc / 2) | cmd;
+ REG_WR(bp, addr, val);
+ }
+ }
+
+ /* Reset the processor, un-stall is done later. */
+ if (rv2p_proc == RV2P_PROC1) {
+ REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
+ }
+ else {
+ REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
+ }
+
+ return 0;
+}
+
+static int
+load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
+ const struct bnx2_mips_fw_file_entry *fw_entry)
+{
+ u32 addr, len, file_offset;
+ __be32 *data;
+ u32 offset;
+ u32 val;
+
+ /* Halt the CPU. */
+ val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
+ val |= cpu_reg->mode_value_halt;
+ bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
+ bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
+
+ /* Load the Text area. */
+ addr = be32_to_cpu(fw_entry->text.addr);
+ len = be32_to_cpu(fw_entry->text.len);
+ file_offset = be32_to_cpu(fw_entry->text.offset);
+ data = (__be32 *)(bp->mips_firmware->data + file_offset);
+
+ offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
+ if (len) {
+ int j;
+
+ for (j = 0; j < (len / 4); j++, offset += 4)
+ bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
+ }
+
+ /* Load the Data area. */
+ addr = be32_to_cpu(fw_entry->data.addr);
+ len = be32_to_cpu(fw_entry->data.len);
+ file_offset = be32_to_cpu(fw_entry->data.offset);
+ data = (__be32 *)(bp->mips_firmware->data + file_offset);
+
+ offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
+ if (len) {
+ int j;
+
+ for (j = 0; j < (len / 4); j++, offset += 4)
+ bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
+ }
+
+ /* Load the Read-Only area. */
+ addr = be32_to_cpu(fw_entry->rodata.addr);
+ len = be32_to_cpu(fw_entry->rodata.len);
+ file_offset = be32_to_cpu(fw_entry->rodata.offset);
+ data = (__be32 *)(bp->mips_firmware->data + file_offset);
+
+ offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
+ if (len) {
+ int j;
+
+ for (j = 0; j < (len / 4); j++, offset += 4)
+ bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
+ }
+
+ /* Clear the pre-fetch instruction. */
+ bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
+
+ val = be32_to_cpu(fw_entry->start_addr);
+ bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
+
+ /* Start the CPU. */
+ val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
+ val &= ~cpu_reg->mode_value_halt;
+ bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
+ bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
+
+ return 0;
+}
+
+static int
+bnx2_init_cpus(struct bnx2 *bp)
+{
+ const struct bnx2_mips_fw_file *mips_fw =
+ (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
+ const struct bnx2_rv2p_fw_file *rv2p_fw =
+ (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
+ int rc;
+
+ /* Initialize the RV2P processor. */
+ load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
+ load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
+
+ /* Initialize the RX Processor. */
+ rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
+ if (rc)
+ goto init_cpu_err;
+
+ /* Initialize the TX Processor. */
+ rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
+ if (rc)
+ goto init_cpu_err;
+
+ /* Initialize the TX Patch-up Processor. */
+ rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
+ if (rc)
+ goto init_cpu_err;
+
+ /* Initialize the Completion Processor. */
+ rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
+ if (rc)
+ goto init_cpu_err;
+
+ /* Initialize the Command Processor. */
+ rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
+
+init_cpu_err:
+ return rc;
+}
+
+static int
+bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
+{
+ u16 pmcsr;
+
+ pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
+
+ switch (state) {
+ case PCI_D0: {
+ u32 val;
+
+ pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+ (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
+ PCI_PM_CTRL_PME_STATUS);
+
+ if (pmcsr & PCI_PM_CTRL_STATE_MASK)
+ /* delay required during transition out of D3hot */
+ msleep(20);
+
+ val = REG_RD(bp, BNX2_EMAC_MODE);
+ val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
+ val &= ~BNX2_EMAC_MODE_MPKT;
+ REG_WR(bp, BNX2_EMAC_MODE, val);
+
+ val = REG_RD(bp, BNX2_RPM_CONFIG);
+ val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
+ REG_WR(bp, BNX2_RPM_CONFIG, val);
+ break;
+ }
+ case PCI_D3hot: {
+ int i;
+ u32 val, wol_msg;
+
+ if (bp->wol) {
+ u32 advertising;
+ u8 autoneg;
+
+ autoneg = bp->autoneg;
+ advertising = bp->advertising;
+
+ if (bp->phy_port == PORT_TP) {
+ bp->autoneg = AUTONEG_SPEED;
+ bp->advertising = ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_Autoneg;
+ }
+
+ spin_lock_bh(&bp->phy_lock);
+ bnx2_setup_phy(bp, bp->phy_port);
+ spin_unlock_bh(&bp->phy_lock);
+
+ bp->autoneg = autoneg;
+ bp->advertising = advertising;
+
+ bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
+
+ val = REG_RD(bp, BNX2_EMAC_MODE);
+
+ /* Enable port mode. */
+ val &= ~BNX2_EMAC_MODE_PORT;
+ val |= BNX2_EMAC_MODE_MPKT_RCVD |
+ BNX2_EMAC_MODE_ACPI_RCVD |
+ BNX2_EMAC_MODE_MPKT;
+ if (bp->phy_port == PORT_TP)
+ val |= BNX2_EMAC_MODE_PORT_MII;
+ else {
+ val |= BNX2_EMAC_MODE_PORT_GMII;
+ if (bp->line_speed == SPEED_2500)
+ val |= BNX2_EMAC_MODE_25G_MODE;
+ }
+
+ REG_WR(bp, BNX2_EMAC_MODE, val);
+
+ /* receive all multicast */
+ for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
+ REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
+ 0xffffffff);
+ }
+ REG_WR(bp, BNX2_EMAC_RX_MODE,
+ BNX2_EMAC_RX_MODE_SORT_MODE);
+
+ val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
+ BNX2_RPM_SORT_USER0_MC_EN;
+ REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
+ REG_WR(bp, BNX2_RPM_SORT_USER0, val);
+ REG_WR(bp, BNX2_RPM_SORT_USER0, val |
+ BNX2_RPM_SORT_USER0_ENA);
+
+ /* Need to enable EMAC and RPM for WOL. */
+ REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
+ BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
+ BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
+ BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
+
+ val = REG_RD(bp, BNX2_RPM_CONFIG);
+ val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
+ REG_WR(bp, BNX2_RPM_CONFIG, val);
+
+ wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
+ }
+ else {
+ wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
+ }
+
+ if (!(bp->flags & BNX2_FLAG_NO_WOL))
+ bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
+ 1, 0);
+
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
+ (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
+
+ if (bp->wol)
+ pmcsr |= 3;
+ }
+ else {
+ pmcsr |= 3;
+ }
+ if (bp->wol) {
+ pmcsr |= PCI_PM_CTRL_PME_ENABLE;
+ }
+ pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+ pmcsr);
+
+ /* No more memory access after this point until
+ * device is brought back to D0.
+ */
+ udelay(50);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+bnx2_acquire_nvram_lock(struct bnx2 *bp)
+{
+ u32 val;
+ int j;
+
+ /* Request access to the flash interface. */
+ REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
+ for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
+ val = REG_RD(bp, BNX2_NVM_SW_ARB);
+ if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
+ break;
+
+ udelay(5);
+ }
+
+ if (j >= NVRAM_TIMEOUT_COUNT)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int
+bnx2_release_nvram_lock(struct bnx2 *bp)
+{
+ int j;
+ u32 val;
+
+ /* Relinquish nvram interface. */
+ REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
+
+ for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
+ val = REG_RD(bp, BNX2_NVM_SW_ARB);
+ if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
+ break;
+
+ udelay(5);
+ }
+
+ if (j >= NVRAM_TIMEOUT_COUNT)
+ return -EBUSY;
+
+ return 0;
+}
+
+
+static int
+bnx2_enable_nvram_write(struct bnx2 *bp)
+{
+ u32 val;
+
+ val = REG_RD(bp, BNX2_MISC_CFG);
+ REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
+
+ if (bp->flash_info->flags & BNX2_NV_WREN) {
+ int j;
+
+ REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
+ REG_WR(bp, BNX2_NVM_COMMAND,
+ BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
+
+ for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
+ udelay(5);
+
+ val = REG_RD(bp, BNX2_NVM_COMMAND);
+ if (val & BNX2_NVM_COMMAND_DONE)
+ break;
+ }
+
+ if (j >= NVRAM_TIMEOUT_COUNT)
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static void
+bnx2_disable_nvram_write(struct bnx2 *bp)
+{
+ u32 val;
+
+ val = REG_RD(bp, BNX2_MISC_CFG);
+ REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
+}
+
+
+static void
+bnx2_enable_nvram_access(struct bnx2 *bp)
+{
+ u32 val;
+
+ val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
+ /* Enable both bits, even on read. */
+ REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
+ val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
+}
+
+static void
+bnx2_disable_nvram_access(struct bnx2 *bp)
+{
+ u32 val;
+
+ val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
+ /* Disable both bits, even after read. */
+ REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
+ val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
+ BNX2_NVM_ACCESS_ENABLE_WR_EN));
+}
+
+static int
+bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
+{
+ u32 cmd;
+ int j;
+
+ if (bp->flash_info->flags & BNX2_NV_BUFFERED)
+ /* Buffered flash, no erase needed */
+ return 0;
+
+ /* Build an erase command */
+ cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
+ BNX2_NVM_COMMAND_DOIT;
+
+ /* Need to clear DONE bit separately. */
+ REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
+
+ /* Address of the NVRAM to read from. */
+ REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
+
+ /* Issue an erase command. */
+ REG_WR(bp, BNX2_NVM_COMMAND, cmd);
+
+ /* Wait for completion. */
+ for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
+ u32 val;
+
+ udelay(5);
+
+ val = REG_RD(bp, BNX2_NVM_COMMAND);
+ if (val & BNX2_NVM_COMMAND_DONE)
+ break;
+ }
+
+ if (j >= NVRAM_TIMEOUT_COUNT)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int
+bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
+{
+ u32 cmd;
+ int j;
+
+ /* Build the command word. */
+ cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
+
+ /* Calculate an offset of a buffered flash, not needed for 5709. */
+ if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
+ offset = ((offset / bp->flash_info->page_size) <<
+ bp->flash_info->page_bits) +
+ (offset % bp->flash_info->page_size);
+ }
+
+ /* Need to clear DONE bit separately. */
+ REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
+
+ /* Address of the NVRAM to read from. */
+ REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
+
+ /* Issue a read command. */
+ REG_WR(bp, BNX2_NVM_COMMAND, cmd);
+
+ /* Wait for completion. */
+ for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
+ u32 val;
+
+ udelay(5);
+
+ val = REG_RD(bp, BNX2_NVM_COMMAND);
+ if (val & BNX2_NVM_COMMAND_DONE) {
+ __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
+ memcpy(ret_val, &v, 4);
+ break;
+ }
+ }
+ if (j >= NVRAM_TIMEOUT_COUNT)
+ return -EBUSY;
+
+ return 0;
+}
+
+
+static int
+bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
+{
+ u32 cmd;
+ __be32 val32;
+ int j;
+
+ /* Build the command word. */
+ cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
+
+ /* Calculate an offset of a buffered flash, not needed for 5709. */
+ if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
+ offset = ((offset / bp->flash_info->page_size) <<
+ bp->flash_info->page_bits) +
+ (offset % bp->flash_info->page_size);
+ }
+
+ /* Need to clear DONE bit separately. */
+ REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
+
+ memcpy(&val32, val, 4);
+
+ /* Write the data. */
+ REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
+
+ /* Address of the NVRAM to write to. */
+ REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
+
+ /* Issue the write command. */
+ REG_WR(bp, BNX2_NVM_COMMAND, cmd);
+
+ /* Wait for completion. */
+ for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
+ udelay(5);
+
+ if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
+ break;
+ }
+ if (j >= NVRAM_TIMEOUT_COUNT)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int
+bnx2_init_nvram(struct bnx2 *bp)
+{
+ u32 val;
+ int j, entry_count, rc = 0;
+ const struct flash_spec *flash;
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ bp->flash_info = &flash_5709;
+ goto get_flash_size;
+ }
+
+ /* Determine the selected interface. */
+ val = REG_RD(bp, BNX2_NVM_CFG1);
+
+ entry_count = ARRAY_SIZE(flash_table);
+
+ if (val & 0x40000000) {
+
+ /* Flash interface has been reconfigured */
+ for (j = 0, flash = &flash_table[0]; j < entry_count;
+ j++, flash++) {
+ if ((val & FLASH_BACKUP_STRAP_MASK) ==
+ (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
+ bp->flash_info = flash;
+ break;
+ }
+ }
+ }
+ else {
+ u32 mask;
+ /* Not yet been reconfigured */
+
+ if (val & (1 << 23))
+ mask = FLASH_BACKUP_STRAP_MASK;
+ else
+ mask = FLASH_STRAP_MASK;
+
+ for (j = 0, flash = &flash_table[0]; j < entry_count;
+ j++, flash++) {
+
+ if ((val & mask) == (flash->strapping & mask)) {
+ bp->flash_info = flash;
+
+ /* Request access to the flash interface. */
+ if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
+ return rc;
+
+ /* Enable access to flash interface */
+ bnx2_enable_nvram_access(bp);
+
+ /* Reconfigure the flash interface */
+ REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
+ REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
+ REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
+ REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
+
+ /* Disable access to flash interface */
+ bnx2_disable_nvram_access(bp);
+ bnx2_release_nvram_lock(bp);
+
+ break;
+ }
+ }
+ } /* if (val & 0x40000000) */
+
+ if (j == entry_count) {
+ bp->flash_info = NULL;
+ pr_alert("Unknown flash/EEPROM type\n");
+ return -ENODEV;
+ }
+
+get_flash_size:
+ val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
+ val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
+ if (val)
+ bp->flash_size = val;
+ else
+ bp->flash_size = bp->flash_info->total_size;
+
+ return rc;
+}
+
+static int
+bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
+ int buf_size)
+{
+ int rc = 0;
+ u32 cmd_flags, offset32, len32, extra;
+
+ if (buf_size == 0)
+ return 0;
+
+ /* Request access to the flash interface. */
+ if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
+ return rc;
+
+ /* Enable access to flash interface */
+ bnx2_enable_nvram_access(bp);
+
+ len32 = buf_size;
+ offset32 = offset;
+ extra = 0;
+
+ cmd_flags = 0;
+
+ if (offset32 & 3) {
+ u8 buf[4];
+ u32 pre_len;
+
+ offset32 &= ~3;
+ pre_len = 4 - (offset & 3);
+
+ if (pre_len >= len32) {
+ pre_len = len32;
+ cmd_flags = BNX2_NVM_COMMAND_FIRST |
+ BNX2_NVM_COMMAND_LAST;
+ }
+ else {
+ cmd_flags = BNX2_NVM_COMMAND_FIRST;
+ }
+
+ rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
+
+ if (rc)
+ return rc;
+
+ memcpy(ret_buf, buf + (offset & 3), pre_len);
+
+ offset32 += 4;
+ ret_buf += pre_len;
+ len32 -= pre_len;
+ }
+ if (len32 & 3) {
+ extra = 4 - (len32 & 3);
+ len32 = (len32 + 4) & ~3;
+ }
+
+ if (len32 == 4) {
+ u8 buf[4];
+
+ if (cmd_flags)
+ cmd_flags = BNX2_NVM_COMMAND_LAST;
+ else
+ cmd_flags = BNX2_NVM_COMMAND_FIRST |
+ BNX2_NVM_COMMAND_LAST;
+
+ rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
+
+ memcpy(ret_buf, buf, 4 - extra);
+ }
+ else if (len32 > 0) {
+ u8 buf[4];
+
+ /* Read the first word. */
+ if (cmd_flags)
+ cmd_flags = 0;
+ else
+ cmd_flags = BNX2_NVM_COMMAND_FIRST;
+
+ rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
+
+ /* Advance to the next dword. */
+ offset32 += 4;
+ ret_buf += 4;
+ len32 -= 4;
+
+ while (len32 > 4 && rc == 0) {
+ rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
+
+ /* Advance to the next dword. */
+ offset32 += 4;
+ ret_buf += 4;
+ len32 -= 4;
+ }
+
+ if (rc)
+ return rc;
+
+ cmd_flags = BNX2_NVM_COMMAND_LAST;
+ rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
+
+ memcpy(ret_buf, buf, 4 - extra);
+ }
+
+ /* Disable access to flash interface */
+ bnx2_disable_nvram_access(bp);
+
+ bnx2_release_nvram_lock(bp);
+
+ return rc;
+}
+
+static int
+bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
+ int buf_size)
+{
+ u32 written, offset32, len32;
+ u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
+ int rc = 0;
+ int align_start, align_end;
+
+ buf = data_buf;
+ offset32 = offset;
+ len32 = buf_size;
+ align_start = align_end = 0;
+
+ if ((align_start = (offset32 & 3))) {
+ offset32 &= ~3;
+ len32 += align_start;
+ if (len32 < 4)
+ len32 = 4;
+ if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
+ return rc;
+ }
+
+ if (len32 & 3) {
+ align_end = 4 - (len32 & 3);
+ len32 += align_end;
+ if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
+ return rc;
+ }
+
+ if (align_start || align_end) {
+ align_buf = kmalloc(len32, GFP_KERNEL);
+ if (align_buf == NULL)
+ return -ENOMEM;
+ if (align_start) {
+ memcpy(align_buf, start, 4);
+ }
+ if (align_end) {
+ memcpy(align_buf + len32 - 4, end, 4);
+ }
+ memcpy(align_buf + align_start, data_buf, buf_size);
+ buf = align_buf;
+ }
+
+ if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
+ flash_buffer = kmalloc(264, GFP_KERNEL);
+ if (flash_buffer == NULL) {
+ rc = -ENOMEM;
+ goto nvram_write_end;
+ }
+ }
+
+ written = 0;
+ while ((written < len32) && (rc == 0)) {
+ u32 page_start, page_end, data_start, data_end;
+ u32 addr, cmd_flags;
+ int i;
+
+ /* Find the page_start addr */
+ page_start = offset32 + written;
+ page_start -= (page_start % bp->flash_info->page_size);
+ /* Find the page_end addr */
+ page_end = page_start + bp->flash_info->page_size;
+ /* Find the data_start addr */
+ data_start = (written == 0) ? offset32 : page_start;
+ /* Find the data_end addr */
+ data_end = (page_end > offset32 + len32) ?
+ (offset32 + len32) : page_end;
+
+ /* Request access to the flash interface. */
+ if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
+ goto nvram_write_end;
+
+ /* Enable access to flash interface */
+ bnx2_enable_nvram_access(bp);
+
+ cmd_flags = BNX2_NVM_COMMAND_FIRST;
+ if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
+ int j;
+
+ /* Read the whole page into the buffer
+ * (non-buffer flash only) */
+ for (j = 0; j < bp->flash_info->page_size; j += 4) {
+ if (j == (bp->flash_info->page_size - 4)) {
+ cmd_flags |= BNX2_NVM_COMMAND_LAST;
+ }
+ rc = bnx2_nvram_read_dword(bp,
+ page_start + j,
+ &flash_buffer[j],
+ cmd_flags);
+
+ if (rc)
+ goto nvram_write_end;
+
+ cmd_flags = 0;
+ }
+ }
+
+ /* Enable writes to flash interface (unlock write-protect) */
+ if ((rc = bnx2_enable_nvram_write(bp)) != 0)
+ goto nvram_write_end;
+
+ /* Loop to write back the buffer data from page_start to
+ * data_start */
+ i = 0;
+ if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
+ /* Erase the page */
+ if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
+ goto nvram_write_end;
+
+ /* Re-enable the write again for the actual write */
+ bnx2_enable_nvram_write(bp);
+
+ for (addr = page_start; addr < data_start;
+ addr += 4, i += 4) {
+
+ rc = bnx2_nvram_write_dword(bp, addr,
+ &flash_buffer[i], cmd_flags);
+
+ if (rc != 0)
+ goto nvram_write_end;
+
+ cmd_flags = 0;
+ }
+ }
+
+ /* Loop to write the new data from data_start to data_end */
+ for (addr = data_start; addr < data_end; addr += 4, i += 4) {
+ if ((addr == page_end - 4) ||
+ ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
+ (addr == data_end - 4))) {
+
+ cmd_flags |= BNX2_NVM_COMMAND_LAST;
+ }
+ rc = bnx2_nvram_write_dword(bp, addr, buf,
+ cmd_flags);
+
+ if (rc != 0)
+ goto nvram_write_end;
+
+ cmd_flags = 0;
+ buf += 4;
+ }
+
+ /* Loop to write back the buffer data from data_end
+ * to page_end */
+ if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
+ for (addr = data_end; addr < page_end;
+ addr += 4, i += 4) {
+
+ if (addr == page_end-4) {
+ cmd_flags = BNX2_NVM_COMMAND_LAST;
+ }
+ rc = bnx2_nvram_write_dword(bp, addr,
+ &flash_buffer[i], cmd_flags);
+
+ if (rc != 0)
+ goto nvram_write_end;
+
+ cmd_flags = 0;
+ }
+ }
+
+ /* Disable writes to flash interface (lock write-protect) */
+ bnx2_disable_nvram_write(bp);
+
+ /* Disable access to flash interface */
+ bnx2_disable_nvram_access(bp);
+ bnx2_release_nvram_lock(bp);
+
+ /* Increment written */
+ written += data_end - data_start;
+ }
+
+nvram_write_end:
+ kfree(flash_buffer);
+ kfree(align_buf);
+ return rc;
+}
+
+static void
+bnx2_init_fw_cap(struct bnx2 *bp)
+{
+ u32 val, sig = 0;
+
+ bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
+ bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
+
+ if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
+ bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
+
+ val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
+ if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
+ return;
+
+ if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
+ bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
+ sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
+ }
+
+ if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
+ (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
+ u32 link;
+
+ bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
+
+ link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
+ if (link & BNX2_LINK_STATUS_SERDES_LINK)
+ bp->phy_port = PORT_FIBRE;
+ else
+ bp->phy_port = PORT_TP;
+
+ sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
+ BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
+ }
+
+ if (netif_running(bp->dev) && sig)
+ bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
+}
+
+static void
+bnx2_setup_msix_tbl(struct bnx2 *bp)
+{
+ REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
+
+ REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
+ REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
+}
+
+static int
+bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
+{
+ u32 val;
+ int i, rc = 0;
+ u8 old_port;
+
+ /* Wait for the current PCI transaction to complete before
+ * issuing a reset. */
+ if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
+ (CHIP_NUM(bp) == CHIP_NUM_5708)) {
+ REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
+ BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
+ BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
+ BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
+ BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
+ val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
+ udelay(5);
+ } else { /* 5709 */
+ val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
+ val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
+ REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
+ val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
+
+ for (i = 0; i < 100; i++) {
+ msleep(1);
+ val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
+ if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
+ break;
+ }
+ }
+
+ /* Wait for the firmware to tell us it is ok to issue a reset. */
+ bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
+
+ /* Deposit a driver reset signature so the firmware knows that
+ * this is a soft reset. */
+ bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
+ BNX2_DRV_RESET_SIGNATURE_MAGIC);
+
+ /* Do a dummy read to force the chip to complete all current transaction
+ * before we issue a reset. */
+ val = REG_RD(bp, BNX2_MISC_ID);
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
+ REG_RD(bp, BNX2_MISC_COMMAND);
+ udelay(5);
+
+ val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
+ BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
+
+ REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
+
+ } else {
+ val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
+ BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
+ BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
+
+ /* Chip reset. */
+ REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
+
+ /* Reading back any register after chip reset will hang the
+ * bus on 5706 A0 and A1. The msleep below provides plenty
+ * of margin for write posting.
+ */
+ if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
+ (CHIP_ID(bp) == CHIP_ID_5706_A1))
+ msleep(20);
+
+ /* Reset takes approximate 30 usec */
+ for (i = 0; i < 10; i++) {
+ val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
+ if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
+ BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
+ break;
+ udelay(10);
+ }
+
+ if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
+ BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
+ pr_err("Chip reset did not complete\n");
+ return -EBUSY;
+ }
+ }
+
+ /* Make sure byte swapping is properly configured. */
+ val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
+ if (val != 0x01020304) {
+ pr_err("Chip not in correct endian mode\n");
+ return -ENODEV;
+ }
+
+ /* Wait for the firmware to finish its initialization. */
+ rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
+ if (rc)
+ return rc;
+
+ spin_lock_bh(&bp->phy_lock);
+ old_port = bp->phy_port;
+ bnx2_init_fw_cap(bp);
+ if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
+ old_port != bp->phy_port)
+ bnx2_set_default_remote_link(bp);
+ spin_unlock_bh(&bp->phy_lock);
+
+ if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
+ /* Adjust the voltage regular to two steps lower. The default
+ * of this register is 0x0000000e. */
+ REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
+
+ /* Remove bad rbuf memory from the free pool. */
+ rc = bnx2_alloc_bad_rbuf(bp);
+ }
+
+ if (bp->flags & BNX2_FLAG_USING_MSIX) {
+ bnx2_setup_msix_tbl(bp);
+ /* Prevent MSIX table reads and write from timing out */
+ REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
+ BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
+ }
+
+ return rc;
+}
+
+static int
+bnx2_init_chip(struct bnx2 *bp)
+{
+ u32 val, mtu;
+ int rc, i;
+
+ /* Make sure the interrupt is not active. */
+ REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+
+ val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
+ BNX2_DMA_CONFIG_DATA_WORD_SWAP |
+#ifdef __BIG_ENDIAN
+ BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
+#endif
+ BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
+ DMA_READ_CHANS << 12 |
+ DMA_WRITE_CHANS << 16;
+
+ val |= (0x2 << 20) | (1 << 11);
+
+ if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
+ val |= (1 << 23);
+
+ if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
+ (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
+ val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
+
+ REG_WR(bp, BNX2_DMA_CONFIG, val);
+
+ if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
+ val = REG_RD(bp, BNX2_TDMA_CONFIG);
+ val |= BNX2_TDMA_CONFIG_ONE_DMA;
+ REG_WR(bp, BNX2_TDMA_CONFIG, val);
+ }
+
+ if (bp->flags & BNX2_FLAG_PCIX) {
+ u16 val16;
+
+ pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
+ &val16);
+ pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
+ val16 & ~PCI_X_CMD_ERO);
+ }
+
+ REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
+ BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
+ BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
+ BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
+
+ /* Initialize context mapping and zero out the quick contexts. The
+ * context block must have already been enabled. */
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ rc = bnx2_init_5709_context(bp);
+ if (rc)
+ return rc;
+ } else
+ bnx2_init_context(bp);
+
+ if ((rc = bnx2_init_cpus(bp)) != 0)
+ return rc;
+
+ bnx2_init_nvram(bp);
+
+ bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
+
+ val = REG_RD(bp, BNX2_MQ_CONFIG);
+ val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
+ val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
+ if (CHIP_REV(bp) == CHIP_REV_Ax)
+ val |= BNX2_MQ_CONFIG_HALT_DIS;
+ }
+
+ REG_WR(bp, BNX2_MQ_CONFIG, val);
+
+ val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
+ REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
+ REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
+
+ val = (BCM_PAGE_BITS - 8) << 24;
+ REG_WR(bp, BNX2_RV2P_CONFIG, val);
+
+ /* Configure page size. */
+ val = REG_RD(bp, BNX2_TBDR_CONFIG);
+ val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
+ val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
+ REG_WR(bp, BNX2_TBDR_CONFIG, val);
+
+ val = bp->mac_addr[0] +
+ (bp->mac_addr[1] << 8) +
+ (bp->mac_addr[2] << 16) +
+ bp->mac_addr[3] +
+ (bp->mac_addr[4] << 8) +
+ (bp->mac_addr[5] << 16);
+ REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
+
+ /* Program the MTU. Also include 4 bytes for CRC32. */
+ mtu = bp->dev->mtu;
+ val = mtu + ETH_HLEN + ETH_FCS_LEN;
+ if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
+ val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
+ REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
+
+ if (mtu < 1500)
+ mtu = 1500;
+
+ bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
+ bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
+ bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
+
+ memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
+ for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
+ bp->bnx2_napi[i].last_status_idx = 0;
+
+ bp->idle_chk_status_idx = 0xffff;
+
+ bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
+
+ /* Set up how to generate a link change interrupt. */
+ REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
+
+ REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
+ (u64) bp->status_blk_mapping & 0xffffffff);
+ REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
+
+ REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
+ (u64) bp->stats_blk_mapping & 0xffffffff);
+ REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
+ (u64) bp->stats_blk_mapping >> 32);
+
+ REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
+ (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
+
+ REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
+ (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
+
+ REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
+ (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
+
+ REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
+
+ REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
+
+ REG_WR(bp, BNX2_HC_COM_TICKS,
+ (bp->com_ticks_int << 16) | bp->com_ticks);
+
+ REG_WR(bp, BNX2_HC_CMD_TICKS,
+ (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
+
+ if (bp->flags & BNX2_FLAG_BROKEN_STATS)
+ REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
+ else
+ REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
+ REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
+
+ if (CHIP_ID(bp) == CHIP_ID_5706_A1)
+ val = BNX2_HC_CONFIG_COLLECT_STATS;
+ else {
+ val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
+ BNX2_HC_CONFIG_COLLECT_STATS;
+ }
+
+ if (bp->flags & BNX2_FLAG_USING_MSIX) {
+ REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
+ BNX2_HC_MSIX_BIT_VECTOR_VAL);
+
+ val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
+ }
+
+ if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
+ val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
+
+ REG_WR(bp, BNX2_HC_CONFIG, val);
+
+ if (bp->rx_ticks < 25)
+ bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
+ else
+ bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
+
+ for (i = 1; i < bp->irq_nvecs; i++) {
+ u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
+ BNX2_HC_SB_CONFIG_1;
+
+ REG_WR(bp, base,
+ BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
+ BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
+ BNX2_HC_SB_CONFIG_1_ONE_SHOT);
+
+ REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
+ (bp->tx_quick_cons_trip_int << 16) |
+ bp->tx_quick_cons_trip);
+
+ REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
+ (bp->tx_ticks_int << 16) | bp->tx_ticks);
+
+ REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
+ (bp->rx_quick_cons_trip_int << 16) |
+ bp->rx_quick_cons_trip);
+
+ REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
+ (bp->rx_ticks_int << 16) | bp->rx_ticks);
+ }
+
+ /* Clear internal stats counters. */
+ REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
+
+ REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
+
+ /* Initialize the receive filter. */
+ bnx2_set_rx_mode(bp->dev);
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
+ val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
+ REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
+ }
+ rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
+ 1, 0);
+
+ REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
+ REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
+
+ udelay(20);
+
+ bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
+
+ return rc;
+}
+
+static void
+bnx2_clear_ring_states(struct bnx2 *bp)
+{
+ struct bnx2_napi *bnapi;
+ struct bnx2_tx_ring_info *txr;
+ struct bnx2_rx_ring_info *rxr;
+ int i;
+
+ for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
+ bnapi = &bp->bnx2_napi[i];
+ txr = &bnapi->tx_ring;
+ rxr = &bnapi->rx_ring;
+
+ txr->tx_cons = 0;
+ txr->hw_tx_cons = 0;
+ rxr->rx_prod_bseq = 0;
+ rxr->rx_prod = 0;
+ rxr->rx_cons = 0;
+ rxr->rx_pg_prod = 0;
+ rxr->rx_pg_cons = 0;
+ }
+}
+
+static void
+bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
+{
+ u32 val, offset0, offset1, offset2, offset3;
+ u32 cid_addr = GET_CID_ADDR(cid);
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ offset0 = BNX2_L2CTX_TYPE_XI;
+ offset1 = BNX2_L2CTX_CMD_TYPE_XI;
+ offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
+ offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
+ } else {
+ offset0 = BNX2_L2CTX_TYPE;
+ offset1 = BNX2_L2CTX_CMD_TYPE;
+ offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
+ offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
+ }
+ val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
+ bnx2_ctx_wr(bp, cid_addr, offset0, val);
+
+ val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
+ bnx2_ctx_wr(bp, cid_addr, offset1, val);
+
+ val = (u64) txr->tx_desc_mapping >> 32;
+ bnx2_ctx_wr(bp, cid_addr, offset2, val);
+
+ val = (u64) txr->tx_desc_mapping & 0xffffffff;
+ bnx2_ctx_wr(bp, cid_addr, offset3, val);
+}
+
+static void
+bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
+{
+ struct tx_bd *txbd;
+ u32 cid = TX_CID;
+ struct bnx2_napi *bnapi;
+ struct bnx2_tx_ring_info *txr;
+
+ bnapi = &bp->bnx2_napi[ring_num];
+ txr = &bnapi->tx_ring;
+
+ if (ring_num == 0)
+ cid = TX_CID;
+ else
+ cid = TX_TSS_CID + ring_num - 1;
+
+ bp->tx_wake_thresh = bp->tx_ring_size / 2;
+
+ txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
+
+ txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
+ txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
+
+ txr->tx_prod = 0;
+ txr->tx_prod_bseq = 0;
+
+ txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
+ txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
+
+ bnx2_init_tx_context(bp, cid, txr);
+}
+
+static void
+bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
+ int num_rings)
+{
+ int i;
+ struct rx_bd *rxbd;
+
+ for (i = 0; i < num_rings; i++) {
+ int j;
+
+ rxbd = &rx_ring[i][0];
+ for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
+ rxbd->rx_bd_len = buf_size;
+ rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
+ }
+ if (i == (num_rings - 1))
+ j = 0;
+ else
+ j = i + 1;
+ rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
+ rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
+ }
+}
+
+static void
+bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
+{
+ int i;
+ u16 prod, ring_prod;
+ u32 cid, rx_cid_addr, val;
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+
+ if (ring_num == 0)
+ cid = RX_CID;
+ else
+ cid = RX_RSS_CID + ring_num - 1;
+
+ rx_cid_addr = GET_CID_ADDR(cid);
+
+ bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
+ bp->rx_buf_use_size, bp->rx_max_ring);
+
+ bnx2_init_rx_context(bp, cid);
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
+ REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
+ }
+
+ bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
+ if (bp->rx_pg_ring_size) {
+ bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
+ rxr->rx_pg_desc_mapping,
+ PAGE_SIZE, bp->rx_max_pg_ring);
+ val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
+ bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
+ bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
+ BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
+
+ val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
+ bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
+
+ val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
+ bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
+ }
+
+ val = (u64) rxr->rx_desc_mapping[0] >> 32;
+ bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
+
+ val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
+ bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
+
+ ring_prod = prod = rxr->rx_pg_prod;
+ for (i = 0; i < bp->rx_pg_ring_size; i++) {
+ if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
+ netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
+ ring_num, i, bp->rx_pg_ring_size);
+ break;
+ }
+ prod = NEXT_RX_BD(prod);
+ ring_prod = RX_PG_RING_IDX(prod);
+ }
+ rxr->rx_pg_prod = prod;
+
+ ring_prod = prod = rxr->rx_prod;
+ for (i = 0; i < bp->rx_ring_size; i++) {
+ if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
+ netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
+ ring_num, i, bp->rx_ring_size);
+ break;
+ }
+ prod = NEXT_RX_BD(prod);
+ ring_prod = RX_RING_IDX(prod);
+ }
+ rxr->rx_prod = prod;
+
+ rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
+ rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
+ rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
+
+ REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
+ REG_WR16(bp, rxr->rx_bidx_addr, prod);
+
+ REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
+}
+
+static void
+bnx2_init_all_rings(struct bnx2 *bp)
+{
+ int i;
+ u32 val;
+
+ bnx2_clear_ring_states(bp);
+
+ REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
+ for (i = 0; i < bp->num_tx_rings; i++)
+ bnx2_init_tx_ring(bp, i);
+
+ if (bp->num_tx_rings > 1)
+ REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
+ (TX_TSS_CID << 7));
+
+ REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
+ bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
+
+ for (i = 0; i < bp->num_rx_rings; i++)
+ bnx2_init_rx_ring(bp, i);
+
+ if (bp->num_rx_rings > 1) {
+ u32 tbl_32 = 0;
+
+ for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
+ int shift = (i % 8) << 2;
+
+ tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
+ if ((i % 8) == 7) {
+ REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
+ REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
+ BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
+ BNX2_RLUP_RSS_COMMAND_WRITE |
+ BNX2_RLUP_RSS_COMMAND_HASH_MASK);
+ tbl_32 = 0;
+ }
+ }
+
+ val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
+ BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
+
+ REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
+
+ }
+}
+
+static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
+{
+ u32 max, num_rings = 1;
+
+ while (ring_size > MAX_RX_DESC_CNT) {
+ ring_size -= MAX_RX_DESC_CNT;
+ num_rings++;
+ }
+ /* round to next power of 2 */
+ max = max_size;
+ while ((max & num_rings) == 0)
+ max >>= 1;
+
+ if (num_rings != max)
+ max <<= 1;
+
+ return max;
+}
+
+static void
+bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
+{
+ u32 rx_size, rx_space, jumbo_size;
+
+ /* 8 for CRC and VLAN */
+ rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
+
+ rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
+ sizeof(struct skb_shared_info);
+
+ bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
+ bp->rx_pg_ring_size = 0;
+ bp->rx_max_pg_ring = 0;
+ bp->rx_max_pg_ring_idx = 0;
+ if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
+ int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
+
+ jumbo_size = size * pages;
+ if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
+ jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
+
+ bp->rx_pg_ring_size = jumbo_size;
+ bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
+ MAX_RX_PG_RINGS);
+ bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
+ rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
+ bp->rx_copy_thresh = 0;
+ }
+
+ bp->rx_buf_use_size = rx_size;
+ /* hw alignment */
+ bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
+ bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
+ bp->rx_ring_size = size;
+ bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
+ bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
+}
+
+static void
+bnx2_free_tx_skbs(struct bnx2 *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->num_tx_rings; i++) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+ int j;
+
+ if (txr->tx_buf_ring == NULL)
+ continue;
+
+ for (j = 0; j < TX_DESC_CNT; ) {
+ struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
+ struct sk_buff *skb = tx_buf->skb;
+ int k, last;
+
+ if (skb == NULL) {
+ j++;
+ continue;
+ }
+
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+
+ tx_buf->skb = NULL;
+
+ last = tx_buf->nr_frags;
+ j++;
+ for (k = 0; k < last; k++, j++) {
+ tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
+ dma_unmap_page(&bp->pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_shinfo(skb)->frags[k].size,
+ PCI_DMA_TODEVICE);
+ }
+ dev_kfree_skb(skb);
+ }
+ }
+}
+
+static void
+bnx2_free_rx_skbs(struct bnx2 *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->num_rx_rings; i++) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+ int j;
+
+ if (rxr->rx_buf_ring == NULL)
+ return;
+
+ for (j = 0; j < bp->rx_max_ring_idx; j++) {
+ struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
+ struct sk_buff *skb = rx_buf->skb;
+
+ if (skb == NULL)
+ continue;
+
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
+
+ rx_buf->skb = NULL;
+
+ dev_kfree_skb(skb);
+ }
+ for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
+ bnx2_free_rx_page(bp, rxr, j);
+ }
+}
+
+static void
+bnx2_free_skbs(struct bnx2 *bp)
+{
+ bnx2_free_tx_skbs(bp);
+ bnx2_free_rx_skbs(bp);
+}
+
+static int
+bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
+{
+ int rc;
+
+ rc = bnx2_reset_chip(bp, reset_code);
+ bnx2_free_skbs(bp);
+ if (rc)
+ return rc;
+
+ if ((rc = bnx2_init_chip(bp)) != 0)
+ return rc;
+
+ bnx2_init_all_rings(bp);
+ return 0;
+}
+
+static int
+bnx2_init_nic(struct bnx2 *bp, int reset_phy)
+{
+ int rc;
+
+ if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
+ return rc;
+
+ spin_lock_bh(&bp->phy_lock);
+ bnx2_init_phy(bp, reset_phy);
+ bnx2_set_link(bp);
+ if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+ bnx2_remote_phy_event(bp);
+ spin_unlock_bh(&bp->phy_lock);
+ return 0;
+}
+
+static int
+bnx2_shutdown_chip(struct bnx2 *bp)
+{
+ u32 reset_code;
+
+ if (bp->flags & BNX2_FLAG_NO_WOL)
+ reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
+ else if (bp->wol)
+ reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
+ else
+ reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
+
+ return bnx2_reset_chip(bp, reset_code);
+}
+
+static int
+bnx2_test_registers(struct bnx2 *bp)
+{
+ int ret;
+ int i, is_5709;
+ static const struct {
+ u16 offset;
+ u16 flags;
+#define BNX2_FL_NOT_5709 1
+ u32 rw_mask;
+ u32 ro_mask;
+ } reg_tbl[] = {
+ { 0x006c, 0, 0x00000000, 0x0000003f },
+ { 0x0090, 0, 0xffffffff, 0x00000000 },
+ { 0x0094, 0, 0x00000000, 0x00000000 },
+
+ { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
+ { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
+ { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
+ { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
+ { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
+ { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
+ { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
+ { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
+ { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
+
+ { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
+ { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
+ { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
+ { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
+ { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
+ { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
+
+ { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
+ { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
+ { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
+
+ { 0x1000, 0, 0x00000000, 0x00000001 },
+ { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
+
+ { 0x1408, 0, 0x01c00800, 0x00000000 },
+ { 0x149c, 0, 0x8000ffff, 0x00000000 },
+ { 0x14a8, 0, 0x00000000, 0x000001ff },
+ { 0x14ac, 0, 0x0fffffff, 0x10000000 },
+ { 0x14b0, 0, 0x00000002, 0x00000001 },
+ { 0x14b8, 0, 0x00000000, 0x00000000 },
+ { 0x14c0, 0, 0x00000000, 0x00000009 },
+ { 0x14c4, 0, 0x00003fff, 0x00000000 },
+ { 0x14cc, 0, 0x00000000, 0x00000001 },
+ { 0x14d0, 0, 0xffffffff, 0x00000000 },
+
+ { 0x1800, 0, 0x00000000, 0x00000001 },
+ { 0x1804, 0, 0x00000000, 0x00000003 },
+
+ { 0x2800, 0, 0x00000000, 0x00000001 },
+ { 0x2804, 0, 0x00000000, 0x00003f01 },
+ { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
+ { 0x2810, 0, 0xffff0000, 0x00000000 },
+ { 0x2814, 0, 0xffff0000, 0x00000000 },
+ { 0x2818, 0, 0xffff0000, 0x00000000 },
+ { 0x281c, 0, 0xffff0000, 0x00000000 },
+ { 0x2834, 0, 0xffffffff, 0x00000000 },
+ { 0x2840, 0, 0x00000000, 0xffffffff },
+ { 0x2844, 0, 0x00000000, 0xffffffff },
+ { 0x2848, 0, 0xffffffff, 0x00000000 },
+ { 0x284c, 0, 0xf800f800, 0x07ff07ff },
+
+ { 0x2c00, 0, 0x00000000, 0x00000011 },
+ { 0x2c04, 0, 0x00000000, 0x00030007 },
+
+ { 0x3c00, 0, 0x00000000, 0x00000001 },
+ { 0x3c04, 0, 0x00000000, 0x00070000 },
+ { 0x3c08, 0, 0x00007f71, 0x07f00000 },
+ { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
+ { 0x3c10, 0, 0xffffffff, 0x00000000 },
+ { 0x3c14, 0, 0x00000000, 0xffffffff },
+ { 0x3c18, 0, 0x00000000, 0xffffffff },
+ { 0x3c1c, 0, 0xfffff000, 0x00000000 },
+ { 0x3c20, 0, 0xffffff00, 0x00000000 },
+
+ { 0x5004, 0, 0x00000000, 0x0000007f },
+ { 0x5008, 0, 0x0f0007ff, 0x00000000 },
+
+ { 0x5c00, 0, 0x00000000, 0x00000001 },
+ { 0x5c04, 0, 0x00000000, 0x0003000f },
+ { 0x5c08, 0, 0x00000003, 0x00000000 },
+ { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
+ { 0x5c10, 0, 0x00000000, 0xffffffff },
+ { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
+ { 0x5c84, 0, 0x00000000, 0x0000f333 },
+ { 0x5c88, 0, 0x00000000, 0x00077373 },
+ { 0x5c8c, 0, 0x00000000, 0x0007f737 },
+
+ { 0x6808, 0, 0x0000ff7f, 0x00000000 },
+ { 0x680c, 0, 0xffffffff, 0x00000000 },
+ { 0x6810, 0, 0xffffffff, 0x00000000 },
+ { 0x6814, 0, 0xffffffff, 0x00000000 },
+ { 0x6818, 0, 0xffffffff, 0x00000000 },
+ { 0x681c, 0, 0xffffffff, 0x00000000 },
+ { 0x6820, 0, 0x00ff00ff, 0x00000000 },
+ { 0x6824, 0, 0x00ff00ff, 0x00000000 },
+ { 0x6828, 0, 0x00ff00ff, 0x00000000 },
+ { 0x682c, 0, 0x03ff03ff, 0x00000000 },
+ { 0x6830, 0, 0x03ff03ff, 0x00000000 },
+ { 0x6834, 0, 0x03ff03ff, 0x00000000 },
+ { 0x6838, 0, 0x03ff03ff, 0x00000000 },
+ { 0x683c, 0, 0x0000ffff, 0x00000000 },
+ { 0x6840, 0, 0x00000ff0, 0x00000000 },
+ { 0x6844, 0, 0x00ffff00, 0x00000000 },
+ { 0x684c, 0, 0xffffffff, 0x00000000 },
+ { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
+ { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
+ { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
+ { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
+ { 0x6908, 0, 0x00000000, 0x0001ff0f },
+ { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
+
+ { 0xffff, 0, 0x00000000, 0x00000000 },
+ };
+
+ ret = 0;
+ is_5709 = 0;
+ if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ is_5709 = 1;
+
+ for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
+ u32 offset, rw_mask, ro_mask, save_val, val;
+ u16 flags = reg_tbl[i].flags;
+
+ if (is_5709 && (flags & BNX2_FL_NOT_5709))
+ continue;
+
+ offset = (u32) reg_tbl[i].offset;
+ rw_mask = reg_tbl[i].rw_mask;
+ ro_mask = reg_tbl[i].ro_mask;
+
+ save_val = readl(bp->regview + offset);
+
+ writel(0, bp->regview + offset);
+
+ val = readl(bp->regview + offset);
+ if ((val & rw_mask) != 0) {
+ goto reg_test_err;
+ }
+
+ if ((val & ro_mask) != (save_val & ro_mask)) {
+ goto reg_test_err;
+ }
+
+ writel(0xffffffff, bp->regview + offset);
+
+ val = readl(bp->regview + offset);
+ if ((val & rw_mask) != rw_mask) {
+ goto reg_test_err;
+ }
+
+ if ((val & ro_mask) != (save_val & ro_mask)) {
+ goto reg_test_err;
+ }
+
+ writel(save_val, bp->regview + offset);
+ continue;
+
+reg_test_err:
+ writel(save_val, bp->regview + offset);
+ ret = -ENODEV;
+ break;
+ }
+ return ret;
+}
+
+static int
+bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
+{
+ static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
+ 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
+ int i;
+
+ for (i = 0; i < sizeof(test_pattern) / 4; i++) {
+ u32 offset;
+
+ for (offset = 0; offset < size; offset += 4) {
+
+ bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
+
+ if (bnx2_reg_rd_ind(bp, start + offset) !=
+ test_pattern[i]) {
+ return -ENODEV;
+ }
+ }
+ }
+ return 0;
+}
+
+static int
+bnx2_test_memory(struct bnx2 *bp)
+{
+ int ret = 0;
+ int i;
+ static struct mem_entry {
+ u32 offset;
+ u32 len;
+ } mem_tbl_5706[] = {
+ { 0x60000, 0x4000 },
+ { 0xa0000, 0x3000 },
+ { 0xe0000, 0x4000 },
+ { 0x120000, 0x4000 },
+ { 0x1a0000, 0x4000 },
+ { 0x160000, 0x4000 },
+ { 0xffffffff, 0 },
+ },
+ mem_tbl_5709[] = {
+ { 0x60000, 0x4000 },
+ { 0xa0000, 0x3000 },
+ { 0xe0000, 0x4000 },
+ { 0x120000, 0x4000 },
+ { 0x1a0000, 0x4000 },
+ { 0xffffffff, 0 },
+ };
+ struct mem_entry *mem_tbl;
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ mem_tbl = mem_tbl_5709;
+ else
+ mem_tbl = mem_tbl_5706;
+
+ for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
+ if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
+ mem_tbl[i].len)) != 0) {
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+#define BNX2_MAC_LOOPBACK 0
+#define BNX2_PHY_LOOPBACK 1
+
+static int
+bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
+{
+ unsigned int pkt_size, num_pkts, i;
+ struct sk_buff *skb, *rx_skb;
+ unsigned char *packet;
+ u16 rx_start_idx, rx_idx;
+ dma_addr_t map;
+ struct tx_bd *txbd;
+ struct sw_bd *rx_buf;
+ struct l2_fhdr *rx_hdr;
+ int ret = -ENODEV;
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
+ struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+
+ tx_napi = bnapi;
+
+ txr = &tx_napi->tx_ring;
+ rxr = &bnapi->rx_ring;
+ if (loopback_mode == BNX2_MAC_LOOPBACK) {
+ bp->loopback = MAC_LOOPBACK;
+ bnx2_set_mac_loopback(bp);
+ }
+ else if (loopback_mode == BNX2_PHY_LOOPBACK) {
+ if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+ return 0;
+
+ bp->loopback = PHY_LOOPBACK;
+ bnx2_set_phy_loopback(bp);
+ }
+ else
+ return -EINVAL;
+
+ pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
+ skb = netdev_alloc_skb(bp->dev, pkt_size);
+ if (!skb)
+ return -ENOMEM;
+ packet = skb_put(skb, pkt_size);
+ memcpy(packet, bp->dev->dev_addr, 6);
+ memset(packet + 6, 0x0, 8);
+ for (i = 14; i < pkt_size; i++)
+ packet[i] = (unsigned char) (i & 0xff);
+
+ map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
+ PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, map)) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
+
+ REG_WR(bp, BNX2_HC_COMMAND,
+ bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
+
+ REG_RD(bp, BNX2_HC_COMMAND);
+
+ udelay(5);
+ rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
+
+ num_pkts = 0;
+
+ txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
+
+ txbd->tx_bd_haddr_hi = (u64) map >> 32;
+ txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
+ txbd->tx_bd_mss_nbytes = pkt_size;
+ txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
+
+ num_pkts++;
+ txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
+ txr->tx_prod_bseq += pkt_size;
+
+ REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
+ REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
+
+ udelay(100);
+
+ REG_WR(bp, BNX2_HC_COMMAND,
+ bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
+
+ REG_RD(bp, BNX2_HC_COMMAND);
+
+ udelay(5);
+
+ dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+
+ if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
+ goto loopback_test_done;
+
+ rx_idx = bnx2_get_hw_rx_cons(bnapi);
+ if (rx_idx != rx_start_idx + num_pkts) {
+ goto loopback_test_done;
+ }
+
+ rx_buf = &rxr->rx_buf_ring[rx_start_idx];
+ rx_skb = rx_buf->skb;
+
+ rx_hdr = rx_buf->desc;
+ skb_reserve(rx_skb, BNX2_RX_OFFSET);
+
+ dma_sync_single_for_cpu(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+
+ if (rx_hdr->l2_fhdr_status &
+ (L2_FHDR_ERRORS_BAD_CRC |
+ L2_FHDR_ERRORS_PHY_DECODE |
+ L2_FHDR_ERRORS_ALIGNMENT |
+ L2_FHDR_ERRORS_TOO_SHORT |
+ L2_FHDR_ERRORS_GIANT_FRAME)) {
+
+ goto loopback_test_done;
+ }
+
+ if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
+ goto loopback_test_done;
+ }
+
+ for (i = 14; i < pkt_size; i++) {
+ if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
+ goto loopback_test_done;
+ }
+ }
+
+ ret = 0;
+
+loopback_test_done:
+ bp->loopback = 0;
+ return ret;
+}
+
+#define BNX2_MAC_LOOPBACK_FAILED 1
+#define BNX2_PHY_LOOPBACK_FAILED 2
+#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
+ BNX2_PHY_LOOPBACK_FAILED)
+
+static int
+bnx2_test_loopback(struct bnx2 *bp)
+{
+ int rc = 0;
+
+ if (!netif_running(bp->dev))
+ return BNX2_LOOPBACK_FAILED;
+
+ bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
+ spin_lock_bh(&bp->phy_lock);
+ bnx2_init_phy(bp, 1);
+ spin_unlock_bh(&bp->phy_lock);
+ if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
+ rc |= BNX2_MAC_LOOPBACK_FAILED;
+ if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
+ rc |= BNX2_PHY_LOOPBACK_FAILED;
+ return rc;
+}
+
+#define NVRAM_SIZE 0x200
+#define CRC32_RESIDUAL 0xdebb20e3
+
+static int
+bnx2_test_nvram(struct bnx2 *bp)
+{
+ __be32 buf[NVRAM_SIZE / 4];
+ u8 *data = (u8 *) buf;
+ int rc = 0;
+ u32 magic, csum;
+
+ if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
+ goto test_nvram_done;
+
+ magic = be32_to_cpu(buf[0]);
+ if (magic != 0x669955aa) {
+ rc = -ENODEV;
+ goto test_nvram_done;
+ }
+
+ if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
+ goto test_nvram_done;
+
+ csum = ether_crc_le(0x100, data);
+ if (csum != CRC32_RESIDUAL) {
+ rc = -ENODEV;
+ goto test_nvram_done;
+ }
+
+ csum = ether_crc_le(0x100, data + 0x100);
+ if (csum != CRC32_RESIDUAL) {
+ rc = -ENODEV;
+ }
+
+test_nvram_done:
+ return rc;
+}
+
+static int
+bnx2_test_link(struct bnx2 *bp)
+{
+ u32 bmsr;
+
+ if (!netif_running(bp->dev))
+ return -ENODEV;
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
+ if (bp->link_up)
+ return 0;
+ return -ENODEV;
+ }
+ spin_lock_bh(&bp->phy_lock);
+ bnx2_enable_bmsr1(bp);
+ bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
+ bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
+ bnx2_disable_bmsr1(bp);
+ spin_unlock_bh(&bp->phy_lock);
+
+ if (bmsr & BMSR_LSTATUS) {
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static int
+bnx2_test_intr(struct bnx2 *bp)
+{
+ int i;
+ u16 status_idx;
+
+ if (!netif_running(bp->dev))
+ return -ENODEV;
+
+ status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
+
+ /* This register is not touched during run-time. */
+ REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
+ REG_RD(bp, BNX2_HC_COMMAND);
+
+ for (i = 0; i < 10; i++) {
+ if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
+ status_idx) {
+
+ break;
+ }
+
+ msleep_interruptible(10);
+ }
+ if (i < 10)
+ return 0;
+
+ return -ENODEV;
+}
+
+/* Determining link for parallel detection. */
+static int
+bnx2_5706_serdes_has_link(struct bnx2 *bp)
+{
+ u32 mode_ctl, an_dbg, exp;
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
+ return 0;
+
+ bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
+ bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
+
+ if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
+ return 0;
+
+ bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
+ bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
+ bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
+
+ if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
+ return 0;
+
+ bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
+ bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
+ bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
+
+ if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
+ return 0;
+
+ return 1;
+}
+
+static void
+bnx2_5706_serdes_timer(struct bnx2 *bp)
+{
+ int check_link = 1;
+
+ spin_lock(&bp->phy_lock);
+ if (bp->serdes_an_pending) {
+ bp->serdes_an_pending--;
+ check_link = 0;
+ } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
+ u32 bmcr;
+
+ bp->current_interval = BNX2_TIMER_INTERVAL;
+
+ bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+
+ if (bmcr & BMCR_ANENABLE) {
+ if (bnx2_5706_serdes_has_link(bp)) {
+ bmcr &= ~BMCR_ANENABLE;
+ bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
+ bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
+ bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
+ }
+ }
+ }
+ else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
+ (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
+ u32 phy2;
+
+ bnx2_write_phy(bp, 0x17, 0x0f01);
+ bnx2_read_phy(bp, 0x15, &phy2);
+ if (phy2 & 0x20) {
+ u32 bmcr;
+
+ bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+ bmcr |= BMCR_ANENABLE;
+ bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
+
+ bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
+ }
+ } else
+ bp->current_interval = BNX2_TIMER_INTERVAL;
+
+ if (check_link) {
+ u32 val;
+
+ bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
+ bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
+ bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
+
+ if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
+ if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
+ bnx2_5706s_force_link_dn(bp, 1);
+ bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
+ } else
+ bnx2_set_link(bp);
+ } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
+ bnx2_set_link(bp);
+ }
+ spin_unlock(&bp->phy_lock);
+}
+
+static void
+bnx2_5708_serdes_timer(struct bnx2 *bp)
+{
+ if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+ return;
+
+ if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
+ bp->serdes_an_pending = 0;
+ return;
+ }
+
+ spin_lock(&bp->phy_lock);
+ if (bp->serdes_an_pending)
+ bp->serdes_an_pending--;
+ else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
+ u32 bmcr;
+
+ bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+ if (bmcr & BMCR_ANENABLE) {
+ bnx2_enable_forced_2g5(bp);
+ bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
+ } else {
+ bnx2_disable_forced_2g5(bp);
+ bp->serdes_an_pending = 2;
+ bp->current_interval = BNX2_TIMER_INTERVAL;
+ }
+
+ } else
+ bp->current_interval = BNX2_TIMER_INTERVAL;
+
+ spin_unlock(&bp->phy_lock);
+}
+
+static void
+bnx2_timer(unsigned long data)
+{
+ struct bnx2 *bp = (struct bnx2 *) data;
+
+ if (!netif_running(bp->dev))
+ return;
+
+ if (atomic_read(&bp->intr_sem) != 0)
+ goto bnx2_restart_timer;
+
+ if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
+ BNX2_FLAG_USING_MSI)
+ bnx2_chk_missed_msi(bp);
+
+ bnx2_send_heart_beat(bp);
+
+ bp->stats_blk->stat_FwRxDrop =
+ bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
+
+ /* workaround occasional corrupted counters */
+ if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
+ REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
+ BNX2_HC_COMMAND_STATS_NOW);
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+ if (CHIP_NUM(bp) == CHIP_NUM_5706)
+ bnx2_5706_serdes_timer(bp);
+ else
+ bnx2_5708_serdes_timer(bp);
+ }
+
+bnx2_restart_timer:
+ mod_timer(&bp->timer, jiffies + bp->current_interval);
+}
+
+static int
+bnx2_request_irq(struct bnx2 *bp)
+{
+ unsigned long flags;
+ struct bnx2_irq *irq;
+ int rc = 0, i;
+
+ if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
+ flags = 0;
+ else
+ flags = IRQF_SHARED;
+
+ for (i = 0; i < bp->irq_nvecs; i++) {
+ irq = &bp->irq_tbl[i];
+ rc = request_irq(irq->vector, irq->handler, flags, irq->name,
+ &bp->bnx2_napi[i]);
+ if (rc)
+ break;
+ irq->requested = 1;
+ }
+ return rc;
+}
+
+static void
+__bnx2_free_irq(struct bnx2 *bp)
+{
+ struct bnx2_irq *irq;
+ int i;
+
+ for (i = 0; i < bp->irq_nvecs; i++) {
+ irq = &bp->irq_tbl[i];
+ if (irq->requested)
+ free_irq(irq->vector, &bp->bnx2_napi[i]);
+ irq->requested = 0;
+ }
+}
+
+static void
+bnx2_free_irq(struct bnx2 *bp)
+{
+
+ __bnx2_free_irq(bp);
+ if (bp->flags & BNX2_FLAG_USING_MSI)
+ pci_disable_msi(bp->pdev);
+ else if (bp->flags & BNX2_FLAG_USING_MSIX)
+ pci_disable_msix(bp->pdev);
+
+ bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
+}
+
+static void
+bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
+{
+ int i, total_vecs, rc;
+ struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
+ struct net_device *dev = bp->dev;
+ const int len = sizeof(bp->irq_tbl[0].name);
+
+ bnx2_setup_msix_tbl(bp);
+ REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
+ REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
+ REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
+
+ /* Need to flush the previous three writes to ensure MSI-X
+ * is setup properly */
+ REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
+
+ for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
+ msix_ent[i].entry = i;
+ msix_ent[i].vector = 0;
+ }
+
+ total_vecs = msix_vecs;
+#ifdef BCM_CNIC
+ total_vecs++;
+#endif
+ rc = -ENOSPC;
+ while (total_vecs >= BNX2_MIN_MSIX_VEC) {
+ rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
+ if (rc <= 0)
+ break;
+ if (rc > 0)
+ total_vecs = rc;
+ }
+
+ if (rc != 0)
+ return;
+
+ msix_vecs = total_vecs;
+#ifdef BCM_CNIC
+ msix_vecs--;
+#endif
+ bp->irq_nvecs = msix_vecs;
+ bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
+ for (i = 0; i < total_vecs; i++) {
+ bp->irq_tbl[i].vector = msix_ent[i].vector;
+ snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
+ bp->irq_tbl[i].handler = bnx2_msi_1shot;
+ }
+}
+
+static int
+bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
+{
+ int cpus = num_online_cpus();
+ int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
+
+ bp->irq_tbl[0].handler = bnx2_interrupt;
+ strcpy(bp->irq_tbl[0].name, bp->dev->name);
+ bp->irq_nvecs = 1;
+ bp->irq_tbl[0].vector = bp->pdev->irq;
+
+ if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
+ bnx2_enable_msix(bp, msix_vecs);
+
+ if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
+ !(bp->flags & BNX2_FLAG_USING_MSIX)) {
+ if (pci_enable_msi(bp->pdev) == 0) {
+ bp->flags |= BNX2_FLAG_USING_MSI;
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
+ bp->irq_tbl[0].handler = bnx2_msi_1shot;
+ } else
+ bp->irq_tbl[0].handler = bnx2_msi;
+
+ bp->irq_tbl[0].vector = bp->pdev->irq;
+ }
+ }
+
+ bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
+ netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
+
+ bp->num_rx_rings = bp->irq_nvecs;
+ return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
+}
+
+/* Called with rtnl_lock */
+static int
+bnx2_open(struct net_device *dev)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ int rc;
+
+ rc = bnx2_request_firmware(bp);
+ if (rc < 0)
+ goto out;
+
+ netif_carrier_off(dev);
+
+ bnx2_set_power_state(bp, PCI_D0);
+ bnx2_disable_int(bp);
+
+ rc = bnx2_setup_int_mode(bp, disable_msi);
+ if (rc)
+ goto open_err;
+ bnx2_init_napi(bp);
+ bnx2_napi_enable(bp);
+ rc = bnx2_alloc_mem(bp);
+ if (rc)
+ goto open_err;
+
+ rc = bnx2_request_irq(bp);
+ if (rc)
+ goto open_err;
+
+ rc = bnx2_init_nic(bp, 1);
+ if (rc)
+ goto open_err;
+
+ mod_timer(&bp->timer, jiffies + bp->current_interval);
+
+ atomic_set(&bp->intr_sem, 0);
+
+ memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
+
+ bnx2_enable_int(bp);
+
+ if (bp->flags & BNX2_FLAG_USING_MSI) {
+ /* Test MSI to make sure it is working
+ * If MSI test fails, go back to INTx mode
+ */
+ if (bnx2_test_intr(bp) != 0) {
+ netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
+
+ bnx2_disable_int(bp);
+ bnx2_free_irq(bp);
+
+ bnx2_setup_int_mode(bp, 1);
+
+ rc = bnx2_init_nic(bp, 0);
+
+ if (!rc)
+ rc = bnx2_request_irq(bp);
+
+ if (rc) {
+ del_timer_sync(&bp->timer);
+ goto open_err;
+ }
+ bnx2_enable_int(bp);
+ }
+ }
+ if (bp->flags & BNX2_FLAG_USING_MSI)
+ netdev_info(dev, "using MSI\n");
+ else if (bp->flags & BNX2_FLAG_USING_MSIX)
+ netdev_info(dev, "using MSIX\n");
+
+ netif_tx_start_all_queues(dev);
+out:
+ return rc;
+
+open_err:
+ bnx2_napi_disable(bp);
+ bnx2_free_skbs(bp);
+ bnx2_free_irq(bp);
+ bnx2_free_mem(bp);
+ bnx2_del_napi(bp);
+ bnx2_release_firmware(bp);
+ goto out;
+}
+
+static void
+bnx2_reset_task(struct work_struct *work)
+{
+ struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
+ int rc;
+
+ rtnl_lock();
+ if (!netif_running(bp->dev)) {
+ rtnl_unlock();
+ return;
+ }
+
+ bnx2_netif_stop(bp, true);
+
+ rc = bnx2_init_nic(bp, 1);
+ if (rc) {
+ netdev_err(bp->dev, "failed to reset NIC, closing\n");
+ bnx2_napi_enable(bp);
+ dev_close(bp->dev);
+ rtnl_unlock();
+ return;
+ }
+
+ atomic_set(&bp->intr_sem, 1);
+ bnx2_netif_start(bp, true);
+ rtnl_unlock();
+}
+
+static void
+bnx2_dump_state(struct bnx2 *bp)
+{
+ struct net_device *dev = bp->dev;
+ u32 val1, val2;
+
+ pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
+ netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
+ atomic_read(&bp->intr_sem), val1);
+ pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
+ pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
+ netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
+ netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
+ REG_RD(bp, BNX2_EMAC_TX_STATUS),
+ REG_RD(bp, BNX2_EMAC_RX_STATUS));
+ netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
+ REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
+ netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
+ REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
+ if (bp->flags & BNX2_FLAG_USING_MSIX)
+ netdev_err(dev, "DEBUG: PBA[%08x]\n",
+ REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
+}
+
+static void
+bnx2_tx_timeout(struct net_device *dev)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ bnx2_dump_state(bp);
+ bnx2_dump_mcp_state(bp);
+
+ /* This allows the netif to be shutdown gracefully before resetting */
+ schedule_work(&bp->reset_task);
+}
+
+/* Called with netif_tx_lock.
+ * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
+ * netif_wake_queue().
+ */
+static netdev_tx_t
+bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ dma_addr_t mapping;
+ struct tx_bd *txbd;
+ struct sw_tx_bd *tx_buf;
+ u32 len, vlan_tag_flags, last_frag, mss;
+ u16 prod, ring_prod;
+ int i;
+ struct bnx2_napi *bnapi;
+ struct bnx2_tx_ring_info *txr;
+ struct netdev_queue *txq;
+
+ /* Determine which tx ring we will be placed on */
+ i = skb_get_queue_mapping(skb);
+ bnapi = &bp->bnx2_napi[i];
+ txr = &bnapi->tx_ring;
+ txq = netdev_get_tx_queue(dev, i);
+
+ if (unlikely(bnx2_tx_avail(bp, txr) <
+ (skb_shinfo(skb)->nr_frags + 1))) {
+ netif_tx_stop_queue(txq);
+ netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
+
+ return NETDEV_TX_BUSY;
+ }
+ len = skb_headlen(skb);
+ prod = txr->tx_prod;
+ ring_prod = TX_RING_IDX(prod);
+
+ vlan_tag_flags = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
+ }
+
+ if (vlan_tx_tag_present(skb)) {
+ vlan_tag_flags |=
+ (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
+ }
+
+ if ((mss = skb_shinfo(skb)->gso_size)) {
+ u32 tcp_opt_len;
+ struct iphdr *iph;
+
+ vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
+
+ tcp_opt_len = tcp_optlen(skb);
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
+ u32 tcp_off = skb_transport_offset(skb) -
+ sizeof(struct ipv6hdr) - ETH_HLEN;
+
+ vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
+ TX_BD_FLAGS_SW_FLAGS;
+ if (likely(tcp_off == 0))
+ vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
+ else {
+ tcp_off >>= 3;
+ vlan_tag_flags |= ((tcp_off & 0x3) <<
+ TX_BD_FLAGS_TCP6_OFF0_SHL) |
+ ((tcp_off & 0x10) <<
+ TX_BD_FLAGS_TCP6_OFF4_SHL);
+ mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
+ }
+ } else {
+ iph = ip_hdr(skb);
+ if (tcp_opt_len || (iph->ihl > 5)) {
+ vlan_tag_flags |= ((iph->ihl - 5) +
+ (tcp_opt_len >> 2)) << 8;
+ }
+ }
+ } else
+ mss = 0;
+
+ mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, mapping)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ tx_buf = &txr->tx_buf_ring[ring_prod];
+ tx_buf->skb = skb;
+ dma_unmap_addr_set(tx_buf, mapping, mapping);
+
+ txbd = &txr->tx_desc_ring[ring_prod];
+
+ txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
+ txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
+ txbd->tx_bd_mss_nbytes = len | (mss << 16);
+ txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
+
+ last_frag = skb_shinfo(skb)->nr_frags;
+ tx_buf->nr_frags = last_frag;
+ tx_buf->is_gso = skb_is_gso(skb);
+
+ for (i = 0; i < last_frag; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ prod = NEXT_TX_BD(prod);
+ ring_prod = TX_RING_IDX(prod);
+ txbd = &txr->tx_desc_ring[ring_prod];
+
+ len = frag->size;
+ mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, mapping))
+ goto dma_error;
+ dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
+ mapping);
+
+ txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
+ txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
+ txbd->tx_bd_mss_nbytes = len | (mss << 16);
+ txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
+
+ }
+ txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
+
+ prod = NEXT_TX_BD(prod);
+ txr->tx_prod_bseq += skb->len;
+
+ REG_WR16(bp, txr->tx_bidx_addr, prod);
+ REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
+
+ mmiowb();
+
+ txr->tx_prod = prod;
+
+ if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
+ netif_tx_stop_queue(txq);
+
+ /* netif_tx_stop_queue() must be done before checking
+ * tx index in bnx2_tx_avail() below, because in
+ * bnx2_tx_int(), we update tx index before checking for
+ * netif_tx_queue_stopped().
+ */
+ smp_mb();
+ if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
+ netif_tx_wake_queue(txq);
+ }
+
+ return NETDEV_TX_OK;
+dma_error:
+ /* save value of frag that failed */
+ last_frag = i;
+
+ /* start back at beginning and unmap skb */
+ prod = txr->tx_prod;
+ ring_prod = TX_RING_IDX(prod);
+ tx_buf = &txr->tx_buf_ring[ring_prod];
+ tx_buf->skb = NULL;
+ dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+
+ /* unmap remaining mapped pages */
+ for (i = 0; i < last_frag; i++) {
+ prod = NEXT_TX_BD(prod);
+ ring_prod = TX_RING_IDX(prod);
+ tx_buf = &txr->tx_buf_ring[ring_prod];
+ dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_TODEVICE);
+ }
+
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+/* Called with rtnl_lock */
+static int
+bnx2_close(struct net_device *dev)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ bnx2_disable_int_sync(bp);
+ bnx2_napi_disable(bp);
+ del_timer_sync(&bp->timer);
+ bnx2_shutdown_chip(bp);
+ bnx2_free_irq(bp);
+ bnx2_free_skbs(bp);
+ bnx2_free_mem(bp);
+ bnx2_del_napi(bp);
+ bp->link_up = 0;
+ netif_carrier_off(bp->dev);
+ bnx2_set_power_state(bp, PCI_D3hot);
+ return 0;
+}
+
+static void
+bnx2_save_stats(struct bnx2 *bp)
+{
+ u32 *hw_stats = (u32 *) bp->stats_blk;
+ u32 *temp_stats = (u32 *) bp->temp_stats_blk;
+ int i;
+
+ /* The 1st 10 counters are 64-bit counters */
+ for (i = 0; i < 20; i += 2) {
+ u32 hi;
+ u64 lo;
+
+ hi = temp_stats[i] + hw_stats[i];
+ lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
+ if (lo > 0xffffffff)
+ hi++;
+ temp_stats[i] = hi;
+ temp_stats[i + 1] = lo & 0xffffffff;
+ }
+
+ for ( ; i < sizeof(struct statistics_block) / 4; i++)
+ temp_stats[i] += hw_stats[i];
+}
+
+#define GET_64BIT_NET_STATS64(ctr) \
+ (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
+
+#define GET_64BIT_NET_STATS(ctr) \
+ GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
+ GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
+
+#define GET_32BIT_NET_STATS(ctr) \
+ (unsigned long) (bp->stats_blk->ctr + \
+ bp->temp_stats_blk->ctr)
+
+static struct rtnl_link_stats64 *
+bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ if (bp->stats_blk == NULL)
+ return net_stats;
+
+ net_stats->rx_packets =
+ GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
+ GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
+ GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
+
+ net_stats->tx_packets =
+ GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
+ GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
+ GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
+
+ net_stats->rx_bytes =
+ GET_64BIT_NET_STATS(stat_IfHCInOctets);
+
+ net_stats->tx_bytes =
+ GET_64BIT_NET_STATS(stat_IfHCOutOctets);
+
+ net_stats->multicast =
+ GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
+
+ net_stats->collisions =
+ GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
+
+ net_stats->rx_length_errors =
+ GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
+ GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
+
+ net_stats->rx_over_errors =
+ GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
+ GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
+
+ net_stats->rx_frame_errors =
+ GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
+
+ net_stats->rx_crc_errors =
+ GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
+
+ net_stats->rx_errors = net_stats->rx_length_errors +
+ net_stats->rx_over_errors + net_stats->rx_frame_errors +
+ net_stats->rx_crc_errors;
+
+ net_stats->tx_aborted_errors =
+ GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
+ GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
+
+ if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
+ (CHIP_ID(bp) == CHIP_ID_5708_A0))
+ net_stats->tx_carrier_errors = 0;
+ else {
+ net_stats->tx_carrier_errors =
+ GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
+ }
+
+ net_stats->tx_errors =
+ GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
+ net_stats->tx_aborted_errors +
+ net_stats->tx_carrier_errors;
+
+ net_stats->rx_missed_errors =
+ GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
+ GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
+ GET_32BIT_NET_STATS(stat_FwRxDrop);
+
+ return net_stats;
+}
+
+/* All ethtool functions called with rtnl_lock */
+
+static int
+bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ int support_serdes = 0, support_copper = 0;
+
+ cmd->supported = SUPPORTED_Autoneg;
+ if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
+ support_serdes = 1;
+ support_copper = 1;
+ } else if (bp->phy_port == PORT_FIBRE)
+ support_serdes = 1;
+ else
+ support_copper = 1;
+
+ if (support_serdes) {
+ cmd->supported |= SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE;
+ if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
+ cmd->supported |= SUPPORTED_2500baseX_Full;
+
+ }
+ if (support_copper) {
+ cmd->supported |= SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_TP;
+
+ }
+
+ spin_lock_bh(&bp->phy_lock);
+ cmd->port = bp->phy_port;
+ cmd->advertising = bp->advertising;
+
+ if (bp->autoneg & AUTONEG_SPEED) {
+ cmd->autoneg = AUTONEG_ENABLE;
+ } else {
+ cmd->autoneg = AUTONEG_DISABLE;
+ }
+
+ if (netif_carrier_ok(dev)) {
+ ethtool_cmd_speed_set(cmd, bp->line_speed);
+ cmd->duplex = bp->duplex;
+ }
+ else {
+ ethtool_cmd_speed_set(cmd, -1);
+ cmd->duplex = -1;
+ }
+ spin_unlock_bh(&bp->phy_lock);
+
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->phy_address = bp->phy_addr;
+
+ return 0;
+}
+
+static int
+bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ u8 autoneg = bp->autoneg;
+ u8 req_duplex = bp->req_duplex;
+ u16 req_line_speed = bp->req_line_speed;
+ u32 advertising = bp->advertising;
+ int err = -EINVAL;
+
+ spin_lock_bh(&bp->phy_lock);
+
+ if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
+ goto err_out_unlock;
+
+ if (cmd->port != bp->phy_port &&
+ !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
+ goto err_out_unlock;
+
+ /* If device is down, we can store the settings only if the user
+ * is setting the currently active port.
+ */
+ if (!netif_running(dev) && cmd->port != bp->phy_port)
+ goto err_out_unlock;
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ autoneg |= AUTONEG_SPEED;
+
+ advertising = cmd->advertising;
+ if (cmd->port == PORT_TP) {
+ advertising &= ETHTOOL_ALL_COPPER_SPEED;
+ if (!advertising)
+ advertising = ETHTOOL_ALL_COPPER_SPEED;
+ } else {
+ advertising &= ETHTOOL_ALL_FIBRE_SPEED;
+ if (!advertising)
+ advertising = ETHTOOL_ALL_FIBRE_SPEED;
+ }
+ advertising |= ADVERTISED_Autoneg;
+ }
+ else {
+ u32 speed = ethtool_cmd_speed(cmd);
+ if (cmd->port == PORT_FIBRE) {
+ if ((speed != SPEED_1000 &&
+ speed != SPEED_2500) ||
+ (cmd->duplex != DUPLEX_FULL))
+ goto err_out_unlock;
+
+ if (speed == SPEED_2500 &&
+ !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
+ goto err_out_unlock;
+ } else if (speed == SPEED_1000 || speed == SPEED_2500)
+ goto err_out_unlock;
+
+ autoneg &= ~AUTONEG_SPEED;
+ req_line_speed = speed;
+ req_duplex = cmd->duplex;
+ advertising = 0;
+ }
+
+ bp->autoneg = autoneg;
+ bp->advertising = advertising;
+ bp->req_line_speed = req_line_speed;
+ bp->req_duplex = req_duplex;
+
+ err = 0;
+ /* If device is down, the new settings will be picked up when it is
+ * brought up.
+ */
+ if (netif_running(dev))
+ err = bnx2_setup_phy(bp, cmd->port);
+
+err_out_unlock:
+ spin_unlock_bh(&bp->phy_lock);
+
+ return err;
+}
+
+static void
+bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_MODULE_NAME);
+ strcpy(info->version, DRV_MODULE_VERSION);
+ strcpy(info->bus_info, pci_name(bp->pdev));
+ strcpy(info->fw_version, bp->fw_version);
+}
+
+#define BNX2_REGDUMP_LEN (32 * 1024)
+
+static int
+bnx2_get_regs_len(struct net_device *dev)
+{
+ return BNX2_REGDUMP_LEN;
+}
+
+static void
+bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
+{
+ u32 *p = _p, i, offset;
+ u8 *orig_p = _p;
+ struct bnx2 *bp = netdev_priv(dev);
+ static const u32 reg_boundaries[] = {
+ 0x0000, 0x0098, 0x0400, 0x045c,
+ 0x0800, 0x0880, 0x0c00, 0x0c10,
+ 0x0c30, 0x0d08, 0x1000, 0x101c,
+ 0x1040, 0x1048, 0x1080, 0x10a4,
+ 0x1400, 0x1490, 0x1498, 0x14f0,
+ 0x1500, 0x155c, 0x1580, 0x15dc,
+ 0x1600, 0x1658, 0x1680, 0x16d8,
+ 0x1800, 0x1820, 0x1840, 0x1854,
+ 0x1880, 0x1894, 0x1900, 0x1984,
+ 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
+ 0x1c80, 0x1c94, 0x1d00, 0x1d84,
+ 0x2000, 0x2030, 0x23c0, 0x2400,
+ 0x2800, 0x2820, 0x2830, 0x2850,
+ 0x2b40, 0x2c10, 0x2fc0, 0x3058,
+ 0x3c00, 0x3c94, 0x4000, 0x4010,
+ 0x4080, 0x4090, 0x43c0, 0x4458,
+ 0x4c00, 0x4c18, 0x4c40, 0x4c54,
+ 0x4fc0, 0x5010, 0x53c0, 0x5444,
+ 0x5c00, 0x5c18, 0x5c80, 0x5c90,
+ 0x5fc0, 0x6000, 0x6400, 0x6428,
+ 0x6800, 0x6848, 0x684c, 0x6860,
+ 0x6888, 0x6910, 0x8000
+ };
+
+ regs->version = 0;
+
+ memset(p, 0, BNX2_REGDUMP_LEN);
+
+ if (!netif_running(bp->dev))
+ return;
+
+ i = 0;
+ offset = reg_boundaries[0];
+ p += offset;
+ while (offset < BNX2_REGDUMP_LEN) {
+ *p++ = REG_RD(bp, offset);
+ offset += 4;
+ if (offset == reg_boundaries[i + 1]) {
+ offset = reg_boundaries[i + 2];
+ p = (u32 *) (orig_p + offset);
+ i += 2;
+ }
+ }
+}
+
+static void
+bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ if (bp->flags & BNX2_FLAG_NO_WOL) {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ }
+ else {
+ wol->supported = WAKE_MAGIC;
+ if (bp->wol)
+ wol->wolopts = WAKE_MAGIC;
+ else
+ wol->wolopts = 0;
+ }
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int
+bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ if (bp->flags & BNX2_FLAG_NO_WOL)
+ return -EINVAL;
+
+ bp->wol = 1;
+ }
+ else {
+ bp->wol = 0;
+ }
+ return 0;
+}
+
+static int
+bnx2_nway_reset(struct net_device *dev)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ u32 bmcr;
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ if (!(bp->autoneg & AUTONEG_SPEED)) {
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&bp->phy_lock);
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
+ int rc;
+
+ rc = bnx2_setup_remote_phy(bp, bp->phy_port);
+ spin_unlock_bh(&bp->phy_lock);
+ return rc;
+ }
+
+ /* Force a link down visible on the other side */
+ if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+ bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
+ spin_unlock_bh(&bp->phy_lock);
+
+ msleep(20);
+
+ spin_lock_bh(&bp->phy_lock);
+
+ bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
+ bp->serdes_an_pending = 1;
+ mod_timer(&bp->timer, jiffies + bp->current_interval);
+ }
+
+ bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+ bmcr &= ~BMCR_LOOPBACK;
+ bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
+
+ spin_unlock_bh(&bp->phy_lock);
+
+ return 0;
+}
+
+static u32
+bnx2_get_link(struct net_device *dev)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ return bp->link_up;
+}
+
+static int
+bnx2_get_eeprom_len(struct net_device *dev)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ if (bp->flash_info == NULL)
+ return 0;
+
+ return (int) bp->flash_size;
+}
+
+static int
+bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *eebuf)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ int rc;
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ /* parameters already validated in ethtool_get_eeprom */
+
+ rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
+
+ return rc;
+}
+
+static int
+bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *eebuf)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ int rc;
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ /* parameters already validated in ethtool_set_eeprom */
+
+ rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
+
+ return rc;
+}
+
+static int
+bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ memset(coal, 0, sizeof(struct ethtool_coalesce));
+
+ coal->rx_coalesce_usecs = bp->rx_ticks;
+ coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
+ coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
+ coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
+
+ coal->tx_coalesce_usecs = bp->tx_ticks;
+ coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
+ coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
+ coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
+
+ coal->stats_block_coalesce_usecs = bp->stats_ticks;
+
+ return 0;
+}
+
+static int
+bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
+ if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
+
+ bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
+ if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
+
+ bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
+ if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
+
+ bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
+ if (bp->rx_quick_cons_trip_int > 0xff)
+ bp->rx_quick_cons_trip_int = 0xff;
+
+ bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
+ if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
+
+ bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
+ if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
+
+ bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
+ if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
+
+ bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
+ if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
+ 0xff;
+
+ bp->stats_ticks = coal->stats_block_coalesce_usecs;
+ if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
+ if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
+ bp->stats_ticks = USEC_PER_SEC;
+ }
+ if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
+ bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
+ bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
+
+ if (netif_running(bp->dev)) {
+ bnx2_netif_stop(bp, true);
+ bnx2_init_nic(bp, 0);
+ bnx2_netif_start(bp, true);
+ }
+
+ return 0;
+}
+
+static void
+bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
+
+ ering->rx_pending = bp->rx_ring_size;
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = bp->rx_pg_ring_size;
+
+ ering->tx_max_pending = MAX_TX_DESC_CNT;
+ ering->tx_pending = bp->tx_ring_size;
+}
+
+static int
+bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
+{
+ if (netif_running(bp->dev)) {
+ /* Reset will erase chipset stats; save them */
+ bnx2_save_stats(bp);
+
+ bnx2_netif_stop(bp, true);
+ bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
+ __bnx2_free_irq(bp);
+ bnx2_free_skbs(bp);
+ bnx2_free_mem(bp);
+ }
+
+ bnx2_set_rx_ring_size(bp, rx);
+ bp->tx_ring_size = tx;
+
+ if (netif_running(bp->dev)) {
+ int rc;
+
+ rc = bnx2_alloc_mem(bp);
+ if (!rc)
+ rc = bnx2_request_irq(bp);
+
+ if (!rc)
+ rc = bnx2_init_nic(bp, 0);
+
+ if (rc) {
+ bnx2_napi_enable(bp);
+ dev_close(bp->dev);
+ return rc;
+ }
+#ifdef BCM_CNIC
+ mutex_lock(&bp->cnic_lock);
+ /* Let cnic know about the new status block. */
+ if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
+ bnx2_setup_cnic_irq_info(bp);
+ mutex_unlock(&bp->cnic_lock);
+#endif
+ bnx2_netif_start(bp, true);
+ }
+ return 0;
+}
+
+static int
+bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ int rc;
+
+ if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
+ (ering->tx_pending > MAX_TX_DESC_CNT) ||
+ (ering->tx_pending <= MAX_SKB_FRAGS)) {
+
+ return -EINVAL;
+ }
+ rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
+ return rc;
+}
+
+static void
+bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
+ epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
+ epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
+}
+
+static int
+bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ bp->req_flow_ctrl = 0;
+ if (epause->rx_pause)
+ bp->req_flow_ctrl |= FLOW_CTRL_RX;
+ if (epause->tx_pause)
+ bp->req_flow_ctrl |= FLOW_CTRL_TX;
+
+ if (epause->autoneg) {
+ bp->autoneg |= AUTONEG_FLOW_CTRL;
+ }
+ else {
+ bp->autoneg &= ~AUTONEG_FLOW_CTRL;
+ }
+
+ if (netif_running(dev)) {
+ spin_lock_bh(&bp->phy_lock);
+ bnx2_setup_phy(bp, bp->phy_port);
+ spin_unlock_bh(&bp->phy_lock);
+ }
+
+ return 0;
+}
+
+static struct {
+ char string[ETH_GSTRING_LEN];
+} bnx2_stats_str_arr[] = {
+ { "rx_bytes" },
+ { "rx_error_bytes" },
+ { "tx_bytes" },
+ { "tx_error_bytes" },
+ { "rx_ucast_packets" },
+ { "rx_mcast_packets" },
+ { "rx_bcast_packets" },
+ { "tx_ucast_packets" },
+ { "tx_mcast_packets" },
+ { "tx_bcast_packets" },
+ { "tx_mac_errors" },
+ { "tx_carrier_errors" },
+ { "rx_crc_errors" },
+ { "rx_align_errors" },
+ { "tx_single_collisions" },
+ { "tx_multi_collisions" },
+ { "tx_deferred" },
+ { "tx_excess_collisions" },
+ { "tx_late_collisions" },
+ { "tx_total_collisions" },
+ { "rx_fragments" },
+ { "rx_jabbers" },
+ { "rx_undersize_packets" },
+ { "rx_oversize_packets" },
+ { "rx_64_byte_packets" },
+ { "rx_65_to_127_byte_packets" },
+ { "rx_128_to_255_byte_packets" },
+ { "rx_256_to_511_byte_packets" },
+ { "rx_512_to_1023_byte_packets" },
+ { "rx_1024_to_1522_byte_packets" },
+ { "rx_1523_to_9022_byte_packets" },
+ { "tx_64_byte_packets" },
+ { "tx_65_to_127_byte_packets" },
+ { "tx_128_to_255_byte_packets" },
+ { "tx_256_to_511_byte_packets" },
+ { "tx_512_to_1023_byte_packets" },
+ { "tx_1024_to_1522_byte_packets" },
+ { "tx_1523_to_9022_byte_packets" },
+ { "rx_xon_frames" },
+ { "rx_xoff_frames" },
+ { "tx_xon_frames" },
+ { "tx_xoff_frames" },
+ { "rx_mac_ctrl_frames" },
+ { "rx_filtered_packets" },
+ { "rx_ftq_discards" },
+ { "rx_discards" },
+ { "rx_fw_discards" },
+};
+
+#define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
+ sizeof(bnx2_stats_str_arr[0]))
+
+#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
+
+static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
+ STATS_OFFSET32(stat_IfHCInOctets_hi),
+ STATS_OFFSET32(stat_IfHCInBadOctets_hi),
+ STATS_OFFSET32(stat_IfHCOutOctets_hi),
+ STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
+ STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
+ STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
+ STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
+ STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
+ STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
+ STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
+ STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
+ STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
+ STATS_OFFSET32(stat_Dot3StatsFCSErrors),
+ STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
+ STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
+ STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
+ STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
+ STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
+ STATS_OFFSET32(stat_Dot3StatsLateCollisions),
+ STATS_OFFSET32(stat_EtherStatsCollisions),
+ STATS_OFFSET32(stat_EtherStatsFragments),
+ STATS_OFFSET32(stat_EtherStatsJabbers),
+ STATS_OFFSET32(stat_EtherStatsUndersizePkts),
+ STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
+ STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
+ STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
+ STATS_OFFSET32(stat_XonPauseFramesReceived),
+ STATS_OFFSET32(stat_XoffPauseFramesReceived),
+ STATS_OFFSET32(stat_OutXonSent),
+ STATS_OFFSET32(stat_OutXoffSent),
+ STATS_OFFSET32(stat_MacControlFramesReceived),
+ STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
+ STATS_OFFSET32(stat_IfInFTQDiscards),
+ STATS_OFFSET32(stat_IfInMBUFDiscards),
+ STATS_OFFSET32(stat_FwRxDrop),
+};
+
+/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
+ * skipped because of errata.
+ */
+static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
+ 8,0,8,8,8,8,8,8,8,8,
+ 4,0,4,4,4,4,4,4,4,4,
+ 4,4,4,4,4,4,4,4,4,4,
+ 4,4,4,4,4,4,4,4,4,4,
+ 4,4,4,4,4,4,4,
+};
+
+static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
+ 8,0,8,8,8,8,8,8,8,8,
+ 4,4,4,4,4,4,4,4,4,4,
+ 4,4,4,4,4,4,4,4,4,4,
+ 4,4,4,4,4,4,4,4,4,4,
+ 4,4,4,4,4,4,4,
+};
+
+#define BNX2_NUM_TESTS 6
+
+static struct {
+ char string[ETH_GSTRING_LEN];
+} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
+ { "register_test (offline)" },
+ { "memory_test (offline)" },
+ { "loopback_test (offline)" },
+ { "nvram_test (online)" },
+ { "interrupt_test (online)" },
+ { "link_test (online)" },
+};
+
+static int
+bnx2_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return BNX2_NUM_TESTS;
+ case ETH_SS_STATS:
+ return BNX2_NUM_STATS;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ bnx2_set_power_state(bp, PCI_D0);
+
+ memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
+ if (etest->flags & ETH_TEST_FL_OFFLINE) {
+ int i;
+
+ bnx2_netif_stop(bp, true);
+ bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
+ bnx2_free_skbs(bp);
+
+ if (bnx2_test_registers(bp) != 0) {
+ buf[0] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ if (bnx2_test_memory(bp) != 0) {
+ buf[1] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ if ((buf[2] = bnx2_test_loopback(bp)) != 0)
+ etest->flags |= ETH_TEST_FL_FAILED;
+
+ if (!netif_running(bp->dev))
+ bnx2_shutdown_chip(bp);
+ else {
+ bnx2_init_nic(bp, 1);
+ bnx2_netif_start(bp, true);
+ }
+
+ /* wait for link up */
+ for (i = 0; i < 7; i++) {
+ if (bp->link_up)
+ break;
+ msleep_interruptible(1000);
+ }
+ }
+
+ if (bnx2_test_nvram(bp) != 0) {
+ buf[3] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ if (bnx2_test_intr(bp) != 0) {
+ buf[4] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (bnx2_test_link(bp) != 0) {
+ buf[5] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+
+ }
+ if (!netif_running(bp->dev))
+ bnx2_set_power_state(bp, PCI_D3hot);
+}
+
+static void
+bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, bnx2_stats_str_arr,
+ sizeof(bnx2_stats_str_arr));
+ break;
+ case ETH_SS_TEST:
+ memcpy(buf, bnx2_tests_str_arr,
+ sizeof(bnx2_tests_str_arr));
+ break;
+ }
+}
+
+static void
+bnx2_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *buf)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ int i;
+ u32 *hw_stats = (u32 *) bp->stats_blk;
+ u32 *temp_stats = (u32 *) bp->temp_stats_blk;
+ u8 *stats_len_arr = NULL;
+
+ if (hw_stats == NULL) {
+ memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
+ return;
+ }
+
+ if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
+ (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
+ (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
+ (CHIP_ID(bp) == CHIP_ID_5708_A0))
+ stats_len_arr = bnx2_5706_stats_len_arr;
+ else
+ stats_len_arr = bnx2_5708_stats_len_arr;
+
+ for (i = 0; i < BNX2_NUM_STATS; i++) {
+ unsigned long offset;
+
+ if (stats_len_arr[i] == 0) {
+ /* skip this counter */
+ buf[i] = 0;
+ continue;
+ }
+
+ offset = bnx2_stats_offset_arr[i];
+ if (stats_len_arr[i] == 4) {
+ /* 4-byte counter */
+ buf[i] = (u64) *(hw_stats + offset) +
+ *(temp_stats + offset);
+ continue;
+ }
+ /* 8-byte counter */
+ buf[i] = (((u64) *(hw_stats + offset)) << 32) +
+ *(hw_stats + offset + 1) +
+ (((u64) *(temp_stats + offset)) << 32) +
+ *(temp_stats + offset + 1);
+ }
+}
+
+static int
+bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ bnx2_set_power_state(bp, PCI_D0);
+
+ bp->leds_save = REG_RD(bp, BNX2_MISC_CFG);
+ REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
+ return 1; /* cycle on/off once per second */
+
+ case ETHTOOL_ID_ON:
+ REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
+ BNX2_EMAC_LED_1000MB_OVERRIDE |
+ BNX2_EMAC_LED_100MB_OVERRIDE |
+ BNX2_EMAC_LED_10MB_OVERRIDE |
+ BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
+ BNX2_EMAC_LED_TRAFFIC);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ REG_WR(bp, BNX2_EMAC_LED, 0);
+ REG_WR(bp, BNX2_MISC_CFG, bp->leds_save);
+
+ if (!netif_running(dev))
+ bnx2_set_power_state(bp, PCI_D3hot);
+ break;
+ }
+
+ return 0;
+}
+
+static u32
+bnx2_fix_features(struct net_device *dev, u32 features)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
+ features |= NETIF_F_HW_VLAN_RX;
+
+ return features;
+}
+
+static int
+bnx2_set_features(struct net_device *dev, u32 features)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ /* TSO with VLAN tag won't work with current firmware */
+ if (features & NETIF_F_HW_VLAN_TX)
+ dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
+ else
+ dev->vlan_features &= ~NETIF_F_ALL_TSO;
+
+ if ((!!(features & NETIF_F_HW_VLAN_RX) !=
+ !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
+ netif_running(dev)) {
+ bnx2_netif_stop(bp, false);
+ dev->features = features;
+ bnx2_set_rx_mode(dev);
+ bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
+ bnx2_netif_start(bp, false);
+ return 1;
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops bnx2_ethtool_ops = {
+ .get_settings = bnx2_get_settings,
+ .set_settings = bnx2_set_settings,
+ .get_drvinfo = bnx2_get_drvinfo,
+ .get_regs_len = bnx2_get_regs_len,
+ .get_regs = bnx2_get_regs,
+ .get_wol = bnx2_get_wol,
+ .set_wol = bnx2_set_wol,
+ .nway_reset = bnx2_nway_reset,
+ .get_link = bnx2_get_link,
+ .get_eeprom_len = bnx2_get_eeprom_len,
+ .get_eeprom = bnx2_get_eeprom,
+ .set_eeprom = bnx2_set_eeprom,
+ .get_coalesce = bnx2_get_coalesce,
+ .set_coalesce = bnx2_set_coalesce,
+ .get_ringparam = bnx2_get_ringparam,
+ .set_ringparam = bnx2_set_ringparam,
+ .get_pauseparam = bnx2_get_pauseparam,
+ .set_pauseparam = bnx2_set_pauseparam,
+ .self_test = bnx2_self_test,
+ .get_strings = bnx2_get_strings,
+ .set_phys_id = bnx2_set_phys_id,
+ .get_ethtool_stats = bnx2_get_ethtool_stats,
+ .get_sset_count = bnx2_get_sset_count,
+};
+
+/* Called with rtnl_lock */
+static int
+bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(ifr);
+ struct bnx2 *bp = netdev_priv(dev);
+ int err;
+
+ switch(cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = bp->phy_addr;
+
+ /* fallthru */
+ case SIOCGMIIREG: {
+ u32 mii_regval;
+
+ if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+ return -EOPNOTSUPP;
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ spin_lock_bh(&bp->phy_lock);
+ err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
+ spin_unlock_bh(&bp->phy_lock);
+
+ data->val_out = mii_regval;
+
+ return err;
+ }
+
+ case SIOCSMIIREG:
+ if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+ return -EOPNOTSUPP;
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ spin_lock_bh(&bp->phy_lock);
+ err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
+ spin_unlock_bh(&bp->phy_lock);
+
+ return err;
+
+ default:
+ /* do nothing */
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+/* Called with rtnl_lock */
+static int
+bnx2_change_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct bnx2 *bp = netdev_priv(dev);
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ if (netif_running(dev))
+ bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
+
+ return 0;
+}
+
+/* Called with rtnl_lock */
+static int
+bnx2_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
+ ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void
+poll_bnx2(struct net_device *dev)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < bp->irq_nvecs; i++) {
+ struct bnx2_irq *irq = &bp->irq_tbl[i];
+
+ disable_irq(irq->vector);
+ irq->handler(irq->vector, &bp->bnx2_napi[i]);
+ enable_irq(irq->vector);
+ }
+}
+#endif
+
+static void __devinit
+bnx2_get_5709_media(struct bnx2 *bp)
+{
+ u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
+ u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
+ u32 strap;
+
+ if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
+ return;
+ else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
+ bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
+ return;
+ }
+
+ if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
+ strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
+ else
+ strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
+
+ if (PCI_FUNC(bp->pdev->devfn) == 0) {
+ switch (strap) {
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
+ return;
+ }
+ } else {
+ switch (strap) {
+ case 0x1:
+ case 0x2:
+ case 0x4:
+ bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
+ return;
+ }
+ }
+}
+
+static void __devinit
+bnx2_get_pci_speed(struct bnx2 *bp)
+{
+ u32 reg;
+
+ reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
+ if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
+ u32 clkreg;
+
+ bp->flags |= BNX2_FLAG_PCIX;
+
+ clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
+
+ clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
+ switch (clkreg) {
+ case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
+ bp->bus_speed_mhz = 133;
+ break;
+
+ case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
+ bp->bus_speed_mhz = 100;
+ break;
+
+ case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
+ case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
+ bp->bus_speed_mhz = 66;
+ break;
+
+ case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
+ case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
+ bp->bus_speed_mhz = 50;
+ break;
+
+ case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
+ case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
+ case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
+ bp->bus_speed_mhz = 33;
+ break;
+ }
+ }
+ else {
+ if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
+ bp->bus_speed_mhz = 66;
+ else
+ bp->bus_speed_mhz = 33;
+ }
+
+ if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
+ bp->flags |= BNX2_FLAG_PCI_32BIT;
+
+}
+
+static void __devinit
+bnx2_read_vpd_fw_ver(struct bnx2 *bp)
+{
+ int rc, i, j;
+ u8 *data;
+ unsigned int block_end, rosize, len;
+
+#define BNX2_VPD_NVRAM_OFFSET 0x300
+#define BNX2_VPD_LEN 128
+#define BNX2_MAX_VER_SLEN 30
+
+ data = kmalloc(256, GFP_KERNEL);
+ if (!data)
+ return;
+
+ rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
+ BNX2_VPD_LEN);
+ if (rc)
+ goto vpd_done;
+
+ for (i = 0; i < BNX2_VPD_LEN; i += 4) {
+ data[i] = data[i + BNX2_VPD_LEN + 3];
+ data[i + 1] = data[i + BNX2_VPD_LEN + 2];
+ data[i + 2] = data[i + BNX2_VPD_LEN + 1];
+ data[i + 3] = data[i + BNX2_VPD_LEN];
+ }
+
+ i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
+ if (i < 0)
+ goto vpd_done;
+
+ rosize = pci_vpd_lrdt_size(&data[i]);
+ i += PCI_VPD_LRDT_TAG_SIZE;
+ block_end = i + rosize;
+
+ if (block_end > BNX2_VPD_LEN)
+ goto vpd_done;
+
+ j = pci_vpd_find_info_keyword(data, i, rosize,
+ PCI_VPD_RO_KEYWORD_MFR_ID);
+ if (j < 0)
+ goto vpd_done;
+
+ len = pci_vpd_info_field_size(&data[j]);
+
+ j += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (j + len > block_end || len != 4 ||
+ memcmp(&data[j], "1028", 4))
+ goto vpd_done;
+
+ j = pci_vpd_find_info_keyword(data, i, rosize,
+ PCI_VPD_RO_KEYWORD_VENDOR0);
+ if (j < 0)
+ goto vpd_done;
+
+ len = pci_vpd_info_field_size(&data[j]);
+
+ j += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
+ goto vpd_done;
+
+ memcpy(bp->fw_version, &data[j], len);
+ bp->fw_version[len] = ' ';
+
+vpd_done:
+ kfree(data);
+}
+
+static int __devinit
+bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
+{
+ struct bnx2 *bp;
+ unsigned long mem_len;
+ int rc, i, j;
+ u32 reg;
+ u64 dma_mask, persist_dma_mask;
+ int err;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ bp = netdev_priv(dev);
+
+ bp->flags = 0;
+ bp->phy_flags = 0;
+
+ bp->temp_stats_blk =
+ kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
+
+ if (bp->temp_stats_blk == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ /* enable device (incl. PCI PM wakeup), and bus-mastering */
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+ goto err_out;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev,
+ "Cannot find PCI device base address, aborting\n");
+ rc = -ENODEV;
+ goto err_out_disable;
+ }
+
+ rc = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+ goto err_out_disable;
+ }
+
+ pci_set_master(pdev);
+
+ bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (bp->pm_cap == 0) {
+ dev_err(&pdev->dev,
+ "Cannot find power management capability, aborting\n");
+ rc = -EIO;
+ goto err_out_release;
+ }
+
+ bp->dev = dev;
+ bp->pdev = pdev;
+
+ spin_lock_init(&bp->phy_lock);
+ spin_lock_init(&bp->indirect_lock);
+#ifdef BCM_CNIC
+ mutex_init(&bp->cnic_lock);
+#endif
+ INIT_WORK(&bp->reset_task, bnx2_reset_task);
+
+ dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
+ mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
+ dev->mem_end = dev->mem_start + mem_len;
+ dev->irq = pdev->irq;
+
+ bp->regview = ioremap_nocache(dev->base_addr, mem_len);
+
+ if (!bp->regview) {
+ dev_err(&pdev->dev, "Cannot map register space, aborting\n");
+ rc = -ENOMEM;
+ goto err_out_release;
+ }
+
+ bnx2_set_power_state(bp, PCI_D0);
+
+ /* Configure byte swap and enable write to the reg_window registers.
+ * Rely on CPU to do target byte swapping on big endian systems
+ * The chip's target access swapping will not swap all accesses
+ */
+ REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
+ BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
+ BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
+
+ bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ if (!pci_is_pcie(pdev)) {
+ dev_err(&pdev->dev, "Not PCIE, aborting\n");
+ rc = -EIO;
+ goto err_out_unmap;
+ }
+ bp->flags |= BNX2_FLAG_PCIE;
+ if (CHIP_REV(bp) == CHIP_REV_Ax)
+ bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
+
+ /* AER (Advanced Error Reporting) hooks */
+ err = pci_enable_pcie_error_reporting(pdev);
+ if (!err)
+ bp->flags |= BNX2_FLAG_AER_ENABLED;
+
+ } else {
+ bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
+ if (bp->pcix_cap == 0) {
+ dev_err(&pdev->dev,
+ "Cannot find PCIX capability, aborting\n");
+ rc = -EIO;
+ goto err_out_unmap;
+ }
+ bp->flags |= BNX2_FLAG_BROKEN_STATS;
+ }
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
+ if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
+ bp->flags |= BNX2_FLAG_MSIX_CAP;
+ }
+
+ if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
+ if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
+ bp->flags |= BNX2_FLAG_MSI_CAP;
+ }
+
+ /* 5708 cannot support DMA addresses > 40-bit. */
+ if (CHIP_NUM(bp) == CHIP_NUM_5708)
+ persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
+ else
+ persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
+
+ /* Configure DMA attributes. */
+ if (pci_set_dma_mask(pdev, dma_mask) == 0) {
+ dev->features |= NETIF_F_HIGHDMA;
+ rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "pci_set_consistent_dma_mask failed, aborting\n");
+ goto err_out_unmap;
+ }
+ } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
+ dev_err(&pdev->dev, "System does not support DMA, aborting\n");
+ goto err_out_unmap;
+ }
+
+ if (!(bp->flags & BNX2_FLAG_PCIE))
+ bnx2_get_pci_speed(bp);
+
+ /* 5706A0 may falsely detect SERR and PERR. */
+ if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
+ reg = REG_RD(bp, PCI_COMMAND);
+ reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+ REG_WR(bp, PCI_COMMAND, reg);
+ }
+ else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
+ !(bp->flags & BNX2_FLAG_PCIX)) {
+
+ dev_err(&pdev->dev,
+ "5706 A1 can only be used in a PCIX bus, aborting\n");
+ goto err_out_unmap;
+ }
+
+ bnx2_init_nvram(bp);
+
+ reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
+
+ if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
+ BNX2_SHM_HDR_SIGNATURE_SIG) {
+ u32 off = PCI_FUNC(pdev->devfn) << 2;
+
+ bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
+ } else
+ bp->shmem_base = HOST_VIEW_SHMEM_BASE;
+
+ /* Get the permanent MAC address. First we need to make sure the
+ * firmware is actually running.
+ */
+ reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
+
+ if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
+ BNX2_DEV_INFO_SIGNATURE_MAGIC) {
+ dev_err(&pdev->dev, "Firmware not running, aborting\n");
+ rc = -ENODEV;
+ goto err_out_unmap;
+ }
+
+ bnx2_read_vpd_fw_ver(bp);
+
+ j = strlen(bp->fw_version);
+ reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
+ for (i = 0; i < 3 && j < 24; i++) {
+ u8 num, k, skip0;
+
+ if (i == 0) {
+ bp->fw_version[j++] = 'b';
+ bp->fw_version[j++] = 'c';
+ bp->fw_version[j++] = ' ';
+ }
+ num = (u8) (reg >> (24 - (i * 8)));
+ for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
+ if (num >= k || !skip0 || k == 1) {
+ bp->fw_version[j++] = (num / k) + '0';
+ skip0 = 0;
+ }
+ }
+ if (i != 2)
+ bp->fw_version[j++] = '.';
+ }
+ reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
+ if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
+ bp->wol = 1;
+
+ if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
+ bp->flags |= BNX2_FLAG_ASF_ENABLE;
+
+ for (i = 0; i < 30; i++) {
+ reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
+ if (reg & BNX2_CONDITION_MFW_RUN_MASK)
+ break;
+ msleep(10);
+ }
+ }
+ reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
+ reg &= BNX2_CONDITION_MFW_RUN_MASK;
+ if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
+ reg != BNX2_CONDITION_MFW_RUN_NONE) {
+ u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
+
+ if (j < 32)
+ bp->fw_version[j++] = ' ';
+ for (i = 0; i < 3 && j < 28; i++) {
+ reg = bnx2_reg_rd_ind(bp, addr + i * 4);
+ reg = be32_to_cpu(reg);
+ memcpy(&bp->fw_version[j], &reg, 4);
+ j += 4;
+ }
+ }
+
+ reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
+ bp->mac_addr[0] = (u8) (reg >> 8);
+ bp->mac_addr[1] = (u8) reg;
+
+ reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
+ bp->mac_addr[2] = (u8) (reg >> 24);
+ bp->mac_addr[3] = (u8) (reg >> 16);
+ bp->mac_addr[4] = (u8) (reg >> 8);
+ bp->mac_addr[5] = (u8) reg;
+
+ bp->tx_ring_size = MAX_TX_DESC_CNT;
+ bnx2_set_rx_ring_size(bp, 255);
+
+ bp->tx_quick_cons_trip_int = 2;
+ bp->tx_quick_cons_trip = 20;
+ bp->tx_ticks_int = 18;
+ bp->tx_ticks = 80;
+
+ bp->rx_quick_cons_trip_int = 2;
+ bp->rx_quick_cons_trip = 12;
+ bp->rx_ticks_int = 18;
+ bp->rx_ticks = 18;
+
+ bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
+
+ bp->current_interval = BNX2_TIMER_INTERVAL;
+
+ bp->phy_addr = 1;
+
+ /* Disable WOL support if we are running on a SERDES chip. */
+ if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ bnx2_get_5709_media(bp);
+ else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
+ bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
+
+ bp->phy_port = PORT_TP;
+ if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+ bp->phy_port = PORT_FIBRE;
+ reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
+ if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
+ bp->flags |= BNX2_FLAG_NO_WOL;
+ bp->wol = 0;
+ }
+ if (CHIP_NUM(bp) == CHIP_NUM_5706) {
+ /* Don't do parallel detect on this board because of
+ * some board problems. The link will not go down
+ * if we do parallel detect.
+ */
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
+ pdev->subsystem_device == 0x310c)
+ bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
+ } else {
+ bp->phy_addr = 2;
+ if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
+ bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
+ }
+ } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
+ CHIP_NUM(bp) == CHIP_NUM_5708)
+ bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
+ else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
+ (CHIP_REV(bp) == CHIP_REV_Ax ||
+ CHIP_REV(bp) == CHIP_REV_Bx))
+ bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
+
+ bnx2_init_fw_cap(bp);
+
+ if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
+ (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
+ (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
+ !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
+ bp->flags |= BNX2_FLAG_NO_WOL;
+ bp->wol = 0;
+ }
+
+ if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
+ bp->tx_quick_cons_trip_int =
+ bp->tx_quick_cons_trip;
+ bp->tx_ticks_int = bp->tx_ticks;
+ bp->rx_quick_cons_trip_int =
+ bp->rx_quick_cons_trip;
+ bp->rx_ticks_int = bp->rx_ticks;
+ bp->comp_prod_trip_int = bp->comp_prod_trip;
+ bp->com_ticks_int = bp->com_ticks;
+ bp->cmd_ticks_int = bp->cmd_ticks;
+ }
+
+ /* Disable MSI on 5706 if AMD 8132 bridge is found.
+ *
+ * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
+ * with byte enables disabled on the unused 32-bit word. This is legal
+ * but causes problems on the AMD 8132 which will eventually stop
+ * responding after a while.
+ *
+ * AMD believes this incompatibility is unique to the 5706, and
+ * prefers to locally disable MSI rather than globally disabling it.
+ */
+ if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
+ struct pci_dev *amd_8132 = NULL;
+
+ while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_8132_BRIDGE,
+ amd_8132))) {
+
+ if (amd_8132->revision >= 0x10 &&
+ amd_8132->revision <= 0x13) {
+ disable_msi = 1;
+ pci_dev_put(amd_8132);
+ break;
+ }
+ }
+ }
+
+ bnx2_set_default_link(bp);
+ bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
+
+ init_timer(&bp->timer);
+ bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
+ bp->timer.data = (unsigned long) bp;
+ bp->timer.function = bnx2_timer;
+
+#ifdef BCM_CNIC
+ if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
+ bp->cnic_eth_dev.max_iscsi_conn =
+ (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
+ BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
+#endif
+ pci_save_state(pdev);
+
+ return 0;
+
+err_out_unmap:
+ if (bp->flags & BNX2_FLAG_AER_ENABLED) {
+ pci_disable_pcie_error_reporting(pdev);
+ bp->flags &= ~BNX2_FLAG_AER_ENABLED;
+ }
+
+ if (bp->regview) {
+ iounmap(bp->regview);
+ bp->regview = NULL;
+ }
+
+err_out_release:
+ pci_release_regions(pdev);
+
+err_out_disable:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+err_out:
+ return rc;
+}
+
+static char * __devinit
+bnx2_bus_string(struct bnx2 *bp, char *str)
+{
+ char *s = str;
+
+ if (bp->flags & BNX2_FLAG_PCIE) {
+ s += sprintf(s, "PCI Express");
+ } else {
+ s += sprintf(s, "PCI");
+ if (bp->flags & BNX2_FLAG_PCIX)
+ s += sprintf(s, "-X");
+ if (bp->flags & BNX2_FLAG_PCI_32BIT)
+ s += sprintf(s, " 32-bit");
+ else
+ s += sprintf(s, " 64-bit");
+ s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
+ }
+ return str;
+}
+
+static void
+bnx2_del_napi(struct bnx2 *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->irq_nvecs; i++)
+ netif_napi_del(&bp->bnx2_napi[i].napi);
+}
+
+static void
+bnx2_init_napi(struct bnx2 *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->irq_nvecs; i++) {
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+ int (*poll)(struct napi_struct *, int);
+
+ if (i == 0)
+ poll = bnx2_poll;
+ else
+ poll = bnx2_poll_msix;
+
+ netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
+ bnapi->bp = bp;
+ }
+}
+
+static const struct net_device_ops bnx2_netdev_ops = {
+ .ndo_open = bnx2_open,
+ .ndo_start_xmit = bnx2_start_xmit,
+ .ndo_stop = bnx2_close,
+ .ndo_get_stats64 = bnx2_get_stats64,
+ .ndo_set_rx_mode = bnx2_set_rx_mode,
+ .ndo_do_ioctl = bnx2_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = bnx2_change_mac_addr,
+ .ndo_change_mtu = bnx2_change_mtu,
+ .ndo_fix_features = bnx2_fix_features,
+ .ndo_set_features = bnx2_set_features,
+ .ndo_tx_timeout = bnx2_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = poll_bnx2,
+#endif
+};
+
+static int __devinit
+bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int version_printed = 0;
+ struct net_device *dev = NULL;
+ struct bnx2 *bp;
+ int rc;
+ char str[40];
+
+ if (version_printed++ == 0)
+ pr_info("%s", version);
+
+ /* dev zeroed in init_etherdev */
+ dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
+
+ if (!dev)
+ return -ENOMEM;
+
+ rc = bnx2_init_board(pdev, dev);
+ if (rc < 0) {
+ free_netdev(dev);
+ return rc;
+ }
+
+ dev->netdev_ops = &bnx2_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->ethtool_ops = &bnx2_ethtool_ops;
+
+ bp = netdev_priv(dev);
+
+ pci_set_drvdata(pdev, dev);
+
+ memcpy(dev->dev_addr, bp->mac_addr, 6);
+ memcpy(dev->perm_addr, bp->mac_addr, 6);
+
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_TSO_ECN |
+ NETIF_F_RXHASH | NETIF_F_RXCSUM;
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+
+ dev->vlan_features = dev->hw_features;
+ dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->features |= dev->hw_features;
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
+ if ((rc = register_netdev(dev))) {
+ dev_err(&pdev->dev, "Cannot register net device\n");
+ goto error;
+ }
+
+ netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
+ board_info[ent->driver_data].name,
+ ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
+ ((CHIP_ID(bp) & 0x0ff0) >> 4),
+ bnx2_bus_string(bp, str),
+ dev->base_addr,
+ bp->pdev->irq, dev->dev_addr);
+
+ return 0;
+
+error:
+ if (bp->regview)
+ iounmap(bp->regview);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(dev);
+ return rc;
+}
+
+static void __devexit
+bnx2_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2 *bp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ del_timer_sync(&bp->timer);
+ cancel_work_sync(&bp->reset_task);
+
+ if (bp->regview)
+ iounmap(bp->regview);
+
+ kfree(bp->temp_stats_blk);
+
+ if (bp->flags & BNX2_FLAG_AER_ENABLED) {
+ pci_disable_pcie_error_reporting(pdev);
+ bp->flags &= ~BNX2_FLAG_AER_ENABLED;
+ }
+
+ bnx2_release_firmware(bp);
+
+ free_netdev(dev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static int
+bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2 *bp = netdev_priv(dev);
+
+ /* PCI register 4 needs to be saved whether netif_running() or not.
+ * MSI address and data need to be saved if using MSI and
+ * netif_running().
+ */
+ pci_save_state(pdev);
+ if (!netif_running(dev))
+ return 0;
+
+ cancel_work_sync(&bp->reset_task);
+ bnx2_netif_stop(bp, true);
+ netif_device_detach(dev);
+ del_timer_sync(&bp->timer);
+ bnx2_shutdown_chip(bp);
+ bnx2_free_skbs(bp);
+ bnx2_set_power_state(bp, pci_choose_state(pdev, state));
+ return 0;
+}
+
+static int
+bnx2_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2 *bp = netdev_priv(dev);
+
+ pci_restore_state(pdev);
+ if (!netif_running(dev))
+ return 0;
+
+ bnx2_set_power_state(bp, PCI_D0);
+ netif_device_attach(dev);
+ bnx2_init_nic(bp, 1);
+ bnx2_netif_start(bp, true);
+ return 0;
+}
+
+/**
+ * bnx2_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2 *bp = netdev_priv(dev);
+
+ rtnl_lock();
+ netif_device_detach(dev);
+
+ if (state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (netif_running(dev)) {
+ bnx2_netif_stop(bp, true);
+ del_timer_sync(&bp->timer);
+ bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
+ }
+
+ pci_disable_device(pdev);
+ rtnl_unlock();
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * bnx2_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ */
+static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2 *bp = netdev_priv(dev);
+ pci_ers_result_t result;
+ int err;
+
+ rtnl_lock();
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset\n");
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ if (netif_running(dev)) {
+ bnx2_set_power_state(bp, PCI_D0);
+ bnx2_init_nic(bp, 1);
+ }
+ result = PCI_ERS_RESULT_RECOVERED;
+ }
+ rtnl_unlock();
+
+ if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
+ return result;
+
+ err = pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
+ err); /* non-fatal, continue */
+ }
+
+ return result;
+}
+
+/**
+ * bnx2_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation.
+ */
+static void bnx2_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2 *bp = netdev_priv(dev);
+
+ rtnl_lock();
+ if (netif_running(dev))
+ bnx2_netif_start(bp, true);
+
+ netif_device_attach(dev);
+ rtnl_unlock();
+}
+
+static struct pci_error_handlers bnx2_err_handler = {
+ .error_detected = bnx2_io_error_detected,
+ .slot_reset = bnx2_io_slot_reset,
+ .resume = bnx2_io_resume,
+};
+
+static struct pci_driver bnx2_pci_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = bnx2_pci_tbl,
+ .probe = bnx2_init_one,
+ .remove = __devexit_p(bnx2_remove_one),
+ .suspend = bnx2_suspend,
+ .resume = bnx2_resume,
+ .err_handler = &bnx2_err_handler,
+};
+
+static int __init bnx2_init(void)
+{
+ return pci_register_driver(&bnx2_pci_driver);
+}
+
+static void __exit bnx2_cleanup(void)
+{
+ pci_unregister_driver(&bnx2_pci_driver);
+}
+
+module_init(bnx2_init);
+module_exit(bnx2_cleanup);
+
+
+
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
new file mode 100644
index 000000000000..fc50d4267df8
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -0,0 +1,7388 @@
+/* bnx2.h: Broadcom NX2 network driver.
+ *
+ * Copyright (c) 2004-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Michael Chan (mchan@broadcom.com)
+ */
+
+
+#ifndef BNX2_H
+#define BNX2_H
+
+/* Hardware data structures and register definitions automatically
+ * generated from RTL code. Do not modify.
+ */
+
+/*
+ * tx_bd definition
+ */
+struct tx_bd {
+ u32 tx_bd_haddr_hi;
+ u32 tx_bd_haddr_lo;
+ u32 tx_bd_mss_nbytes;
+ #define TX_BD_TCP6_OFF2_SHL (14)
+ u32 tx_bd_vlan_tag_flags;
+ #define TX_BD_FLAGS_CONN_FAULT (1<<0)
+ #define TX_BD_FLAGS_TCP6_OFF0_MSK (3<<1)
+ #define TX_BD_FLAGS_TCP6_OFF0_SHL (1)
+ #define TX_BD_FLAGS_TCP_UDP_CKSUM (1<<1)
+ #define TX_BD_FLAGS_IP_CKSUM (1<<2)
+ #define TX_BD_FLAGS_VLAN_TAG (1<<3)
+ #define TX_BD_FLAGS_COAL_NOW (1<<4)
+ #define TX_BD_FLAGS_DONT_GEN_CRC (1<<5)
+ #define TX_BD_FLAGS_END (1<<6)
+ #define TX_BD_FLAGS_START (1<<7)
+ #define TX_BD_FLAGS_SW_OPTION_WORD (0x1f<<8)
+ #define TX_BD_FLAGS_TCP6_OFF4_SHL (12)
+ #define TX_BD_FLAGS_SW_FLAGS (1<<13)
+ #define TX_BD_FLAGS_SW_SNAP (1<<14)
+ #define TX_BD_FLAGS_SW_LSO (1<<15)
+
+};
+
+
+/*
+ * rx_bd definition
+ */
+struct rx_bd {
+ u32 rx_bd_haddr_hi;
+ u32 rx_bd_haddr_lo;
+ u32 rx_bd_len;
+ u32 rx_bd_flags;
+ #define RX_BD_FLAGS_NOPUSH (1<<0)
+ #define RX_BD_FLAGS_DUMMY (1<<1)
+ #define RX_BD_FLAGS_END (1<<2)
+ #define RX_BD_FLAGS_START (1<<3)
+
+};
+
+#define BNX2_RX_ALIGN 16
+
+/*
+ * status_block definition
+ */
+struct status_block {
+ u32 status_attn_bits;
+ #define STATUS_ATTN_BITS_LINK_STATE (1L<<0)
+ #define STATUS_ATTN_BITS_TX_SCHEDULER_ABORT (1L<<1)
+ #define STATUS_ATTN_BITS_TX_BD_READ_ABORT (1L<<2)
+ #define STATUS_ATTN_BITS_TX_BD_CACHE_ABORT (1L<<3)
+ #define STATUS_ATTN_BITS_TX_PROCESSOR_ABORT (1L<<4)
+ #define STATUS_ATTN_BITS_TX_DMA_ABORT (1L<<5)
+ #define STATUS_ATTN_BITS_TX_PATCHUP_ABORT (1L<<6)
+ #define STATUS_ATTN_BITS_TX_ASSEMBLER_ABORT (1L<<7)
+ #define STATUS_ATTN_BITS_RX_PARSER_MAC_ABORT (1L<<8)
+ #define STATUS_ATTN_BITS_RX_PARSER_CATCHUP_ABORT (1L<<9)
+ #define STATUS_ATTN_BITS_RX_MBUF_ABORT (1L<<10)
+ #define STATUS_ATTN_BITS_RX_LOOKUP_ABORT (1L<<11)
+ #define STATUS_ATTN_BITS_RX_PROCESSOR_ABORT (1L<<12)
+ #define STATUS_ATTN_BITS_RX_V2P_ABORT (1L<<13)
+ #define STATUS_ATTN_BITS_RX_BD_CACHE_ABORT (1L<<14)
+ #define STATUS_ATTN_BITS_RX_DMA_ABORT (1L<<15)
+ #define STATUS_ATTN_BITS_COMPLETION_ABORT (1L<<16)
+ #define STATUS_ATTN_BITS_HOST_COALESCE_ABORT (1L<<17)
+ #define STATUS_ATTN_BITS_MAILBOX_QUEUE_ABORT (1L<<18)
+ #define STATUS_ATTN_BITS_CONTEXT_ABORT (1L<<19)
+ #define STATUS_ATTN_BITS_CMD_SCHEDULER_ABORT (1L<<20)
+ #define STATUS_ATTN_BITS_CMD_PROCESSOR_ABORT (1L<<21)
+ #define STATUS_ATTN_BITS_MGMT_PROCESSOR_ABORT (1L<<22)
+ #define STATUS_ATTN_BITS_MAC_ABORT (1L<<23)
+ #define STATUS_ATTN_BITS_TIMER_ABORT (1L<<24)
+ #define STATUS_ATTN_BITS_DMAE_ABORT (1L<<25)
+ #define STATUS_ATTN_BITS_FLSH_ABORT (1L<<26)
+ #define STATUS_ATTN_BITS_GRC_ABORT (1L<<27)
+ #define STATUS_ATTN_BITS_EPB_ERROR (1L<<30)
+ #define STATUS_ATTN_BITS_PARITY_ERROR (1L<<31)
+
+ u32 status_attn_bits_ack;
+#if defined(__BIG_ENDIAN)
+ u16 status_tx_quick_consumer_index0;
+ u16 status_tx_quick_consumer_index1;
+ u16 status_tx_quick_consumer_index2;
+ u16 status_tx_quick_consumer_index3;
+ u16 status_rx_quick_consumer_index0;
+ u16 status_rx_quick_consumer_index1;
+ u16 status_rx_quick_consumer_index2;
+ u16 status_rx_quick_consumer_index3;
+ u16 status_rx_quick_consumer_index4;
+ u16 status_rx_quick_consumer_index5;
+ u16 status_rx_quick_consumer_index6;
+ u16 status_rx_quick_consumer_index7;
+ u16 status_rx_quick_consumer_index8;
+ u16 status_rx_quick_consumer_index9;
+ u16 status_rx_quick_consumer_index10;
+ u16 status_rx_quick_consumer_index11;
+ u16 status_rx_quick_consumer_index12;
+ u16 status_rx_quick_consumer_index13;
+ u16 status_rx_quick_consumer_index14;
+ u16 status_rx_quick_consumer_index15;
+ u16 status_completion_producer_index;
+ u16 status_cmd_consumer_index;
+ u16 status_idx;
+ u8 status_unused;
+ u8 status_blk_num;
+#elif defined(__LITTLE_ENDIAN)
+ u16 status_tx_quick_consumer_index1;
+ u16 status_tx_quick_consumer_index0;
+ u16 status_tx_quick_consumer_index3;
+ u16 status_tx_quick_consumer_index2;
+ u16 status_rx_quick_consumer_index1;
+ u16 status_rx_quick_consumer_index0;
+ u16 status_rx_quick_consumer_index3;
+ u16 status_rx_quick_consumer_index2;
+ u16 status_rx_quick_consumer_index5;
+ u16 status_rx_quick_consumer_index4;
+ u16 status_rx_quick_consumer_index7;
+ u16 status_rx_quick_consumer_index6;
+ u16 status_rx_quick_consumer_index9;
+ u16 status_rx_quick_consumer_index8;
+ u16 status_rx_quick_consumer_index11;
+ u16 status_rx_quick_consumer_index10;
+ u16 status_rx_quick_consumer_index13;
+ u16 status_rx_quick_consumer_index12;
+ u16 status_rx_quick_consumer_index15;
+ u16 status_rx_quick_consumer_index14;
+ u16 status_cmd_consumer_index;
+ u16 status_completion_producer_index;
+ u8 status_blk_num;
+ u8 status_unused;
+ u16 status_idx;
+#endif
+};
+
+/*
+ * status_block definition
+ */
+struct status_block_msix {
+#if defined(__BIG_ENDIAN)
+ u16 status_tx_quick_consumer_index;
+ u16 status_rx_quick_consumer_index;
+ u16 status_completion_producer_index;
+ u16 status_cmd_consumer_index;
+ u32 status_unused;
+ u16 status_idx;
+ u8 status_unused2;
+ u8 status_blk_num;
+#elif defined(__LITTLE_ENDIAN)
+ u16 status_rx_quick_consumer_index;
+ u16 status_tx_quick_consumer_index;
+ u16 status_cmd_consumer_index;
+ u16 status_completion_producer_index;
+ u32 status_unused;
+ u8 status_blk_num;
+ u8 status_unused2;
+ u16 status_idx;
+#endif
+};
+
+#define BNX2_SBLK_MSIX_ALIGN_SIZE 128
+
+
+/*
+ * statistics_block definition
+ */
+struct statistics_block {
+ u32 stat_IfHCInOctets_hi;
+ u32 stat_IfHCInOctets_lo;
+ u32 stat_IfHCInBadOctets_hi;
+ u32 stat_IfHCInBadOctets_lo;
+ u32 stat_IfHCOutOctets_hi;
+ u32 stat_IfHCOutOctets_lo;
+ u32 stat_IfHCOutBadOctets_hi;
+ u32 stat_IfHCOutBadOctets_lo;
+ u32 stat_IfHCInUcastPkts_hi;
+ u32 stat_IfHCInUcastPkts_lo;
+ u32 stat_IfHCInMulticastPkts_hi;
+ u32 stat_IfHCInMulticastPkts_lo;
+ u32 stat_IfHCInBroadcastPkts_hi;
+ u32 stat_IfHCInBroadcastPkts_lo;
+ u32 stat_IfHCOutUcastPkts_hi;
+ u32 stat_IfHCOutUcastPkts_lo;
+ u32 stat_IfHCOutMulticastPkts_hi;
+ u32 stat_IfHCOutMulticastPkts_lo;
+ u32 stat_IfHCOutBroadcastPkts_hi;
+ u32 stat_IfHCOutBroadcastPkts_lo;
+ u32 stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
+ u32 stat_Dot3StatsCarrierSenseErrors;
+ u32 stat_Dot3StatsFCSErrors;
+ u32 stat_Dot3StatsAlignmentErrors;
+ u32 stat_Dot3StatsSingleCollisionFrames;
+ u32 stat_Dot3StatsMultipleCollisionFrames;
+ u32 stat_Dot3StatsDeferredTransmissions;
+ u32 stat_Dot3StatsExcessiveCollisions;
+ u32 stat_Dot3StatsLateCollisions;
+ u32 stat_EtherStatsCollisions;
+ u32 stat_EtherStatsFragments;
+ u32 stat_EtherStatsJabbers;
+ u32 stat_EtherStatsUndersizePkts;
+ u32 stat_EtherStatsOverrsizePkts;
+ u32 stat_EtherStatsPktsRx64Octets;
+ u32 stat_EtherStatsPktsRx65Octetsto127Octets;
+ u32 stat_EtherStatsPktsRx128Octetsto255Octets;
+ u32 stat_EtherStatsPktsRx256Octetsto511Octets;
+ u32 stat_EtherStatsPktsRx512Octetsto1023Octets;
+ u32 stat_EtherStatsPktsRx1024Octetsto1522Octets;
+ u32 stat_EtherStatsPktsRx1523Octetsto9022Octets;
+ u32 stat_EtherStatsPktsTx64Octets;
+ u32 stat_EtherStatsPktsTx65Octetsto127Octets;
+ u32 stat_EtherStatsPktsTx128Octetsto255Octets;
+ u32 stat_EtherStatsPktsTx256Octetsto511Octets;
+ u32 stat_EtherStatsPktsTx512Octetsto1023Octets;
+ u32 stat_EtherStatsPktsTx1024Octetsto1522Octets;
+ u32 stat_EtherStatsPktsTx1523Octetsto9022Octets;
+ u32 stat_XonPauseFramesReceived;
+ u32 stat_XoffPauseFramesReceived;
+ u32 stat_OutXonSent;
+ u32 stat_OutXoffSent;
+ u32 stat_FlowControlDone;
+ u32 stat_MacControlFramesReceived;
+ u32 stat_XoffStateEntered;
+ u32 stat_IfInFramesL2FilterDiscards;
+ u32 stat_IfInRuleCheckerDiscards;
+ u32 stat_IfInFTQDiscards;
+ u32 stat_IfInMBUFDiscards;
+ u32 stat_IfInRuleCheckerP4Hit;
+ u32 stat_CatchupInRuleCheckerDiscards;
+ u32 stat_CatchupInFTQDiscards;
+ u32 stat_CatchupInMBUFDiscards;
+ u32 stat_CatchupInRuleCheckerP4Hit;
+ u32 stat_GenStat00;
+ u32 stat_GenStat01;
+ u32 stat_GenStat02;
+ u32 stat_GenStat03;
+ u32 stat_GenStat04;
+ u32 stat_GenStat05;
+ u32 stat_GenStat06;
+ u32 stat_GenStat07;
+ u32 stat_GenStat08;
+ u32 stat_GenStat09;
+ u32 stat_GenStat10;
+ u32 stat_GenStat11;
+ u32 stat_GenStat12;
+ u32 stat_GenStat13;
+ u32 stat_GenStat14;
+ u32 stat_GenStat15;
+ u32 stat_FwRxDrop;
+};
+
+
+/*
+ * l2_fhdr definition
+ */
+struct l2_fhdr {
+ u32 l2_fhdr_status;
+ #define L2_FHDR_STATUS_RULE_CLASS (0x7<<0)
+ #define L2_FHDR_STATUS_RULE_P2 (1<<3)
+ #define L2_FHDR_STATUS_RULE_P3 (1<<4)
+ #define L2_FHDR_STATUS_RULE_P4 (1<<5)
+ #define L2_FHDR_STATUS_L2_VLAN_TAG (1<<6)
+ #define L2_FHDR_STATUS_L2_LLC_SNAP (1<<7)
+ #define L2_FHDR_STATUS_RSS_HASH (1<<8)
+ #define L2_FHDR_STATUS_IP_DATAGRAM (1<<13)
+ #define L2_FHDR_STATUS_TCP_SEGMENT (1<<14)
+ #define L2_FHDR_STATUS_UDP_DATAGRAM (1<<15)
+
+ #define L2_FHDR_STATUS_SPLIT (1<<16)
+ #define L2_FHDR_ERRORS_BAD_CRC (1<<17)
+ #define L2_FHDR_ERRORS_PHY_DECODE (1<<18)
+ #define L2_FHDR_ERRORS_ALIGNMENT (1<<19)
+ #define L2_FHDR_ERRORS_TOO_SHORT (1<<20)
+ #define L2_FHDR_ERRORS_GIANT_FRAME (1<<21)
+ #define L2_FHDR_ERRORS_TCP_XSUM (1<<28)
+ #define L2_FHDR_ERRORS_UDP_XSUM (1<<31)
+
+ #define L2_FHDR_STATUS_USE_RXHASH \
+ (L2_FHDR_STATUS_TCP_SEGMENT | L2_FHDR_STATUS_RSS_HASH)
+
+ u32 l2_fhdr_hash;
+#if defined(__BIG_ENDIAN)
+ u16 l2_fhdr_pkt_len;
+ u16 l2_fhdr_vlan_tag;
+ u16 l2_fhdr_ip_xsum;
+ u16 l2_fhdr_tcp_udp_xsum;
+#elif defined(__LITTLE_ENDIAN)
+ u16 l2_fhdr_vlan_tag;
+ u16 l2_fhdr_pkt_len;
+ u16 l2_fhdr_tcp_udp_xsum;
+ u16 l2_fhdr_ip_xsum;
+#endif
+};
+
+#define BNX2_RX_OFFSET (sizeof(struct l2_fhdr) + 2)
+
+/*
+ * l2_context definition
+ */
+#define BNX2_L2CTX_TYPE 0x00000000
+#define BNX2_L2CTX_TYPE_SIZE_L2 ((0xc0/0x20)<<16)
+#define BNX2_L2CTX_TYPE_TYPE (0xf<<28)
+#define BNX2_L2CTX_TYPE_TYPE_EMPTY (0<<28)
+#define BNX2_L2CTX_TYPE_TYPE_L2 (1<<28)
+
+#define BNX2_L2CTX_TX_HOST_BIDX 0x00000088
+#define BNX2_L2CTX_EST_NBD 0x00000088
+#define BNX2_L2CTX_CMD_TYPE 0x00000088
+#define BNX2_L2CTX_CMD_TYPE_TYPE (0xf<<24)
+#define BNX2_L2CTX_CMD_TYPE_TYPE_L2 (0<<24)
+#define BNX2_L2CTX_CMD_TYPE_TYPE_TCP (1<<24)
+
+#define BNX2_L2CTX_TX_HOST_BSEQ 0x00000090
+#define BNX2_L2CTX_TSCH_BSEQ 0x00000094
+#define BNX2_L2CTX_TBDR_BSEQ 0x00000098
+#define BNX2_L2CTX_TBDR_BOFF 0x0000009c
+#define BNX2_L2CTX_TBDR_BIDX 0x0000009c
+#define BNX2_L2CTX_TBDR_BHADDR_HI 0x000000a0
+#define BNX2_L2CTX_TBDR_BHADDR_LO 0x000000a4
+#define BNX2_L2CTX_TXP_BOFF 0x000000a8
+#define BNX2_L2CTX_TXP_BIDX 0x000000a8
+#define BNX2_L2CTX_TXP_BSEQ 0x000000ac
+
+#define BNX2_L2CTX_TYPE_XI 0x00000080
+#define BNX2_L2CTX_CMD_TYPE_XI 0x00000240
+#define BNX2_L2CTX_TBDR_BHADDR_HI_XI 0x00000258
+#define BNX2_L2CTX_TBDR_BHADDR_LO_XI 0x0000025c
+
+/*
+ * l2_bd_chain_context definition
+ */
+#define BNX2_L2CTX_BD_PRE_READ 0x00000000
+#define BNX2_L2CTX_CTX_SIZE 0x00000000
+#define BNX2_L2CTX_CTX_TYPE 0x00000000
+#define BNX2_L2CTX_FLOW_CTRL_ENABLE 0x000000ff
+#define BNX2_L2CTX_CTX_TYPE_SIZE_L2 ((0x20/20)<<16)
+#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE (0xf<<28)
+#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_UNDEFINED (0<<28)
+#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28)
+
+#define BNX2_L2CTX_HOST_BDIDX 0x00000004
+#define BNX2_L2CTX_L5_STATUSB_NUM_SHIFT 16
+#define BNX2_L2CTX_L2_STATUSB_NUM_SHIFT 24
+#define BNX2_L2CTX_L5_STATUSB_NUM(sb_id) \
+ (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L5_STATUSB_NUM_SHIFT) : 0)
+#define BNX2_L2CTX_L2_STATUSB_NUM(sb_id) \
+ (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT) : 0)
+#define BNX2_L2CTX_HOST_BSEQ 0x00000008
+#define BNX2_L2CTX_NX_BSEQ 0x0000000c
+#define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010
+#define BNX2_L2CTX_NX_BDHADDR_LO 0x00000014
+#define BNX2_L2CTX_NX_BDIDX 0x00000018
+
+#define BNX2_L2CTX_HOST_PG_BDIDX 0x00000044
+#define BNX2_L2CTX_PG_BUF_SIZE 0x00000048
+#define BNX2_L2CTX_RBDC_KEY 0x0000004c
+#define BNX2_L2CTX_RBDC_JUMBO_KEY 0x3ffe
+#define BNX2_L2CTX_NX_PG_BDHADDR_HI 0x00000050
+#define BNX2_L2CTX_NX_PG_BDHADDR_LO 0x00000054
+
+/*
+ * pci_config_l definition
+ * offset: 0000
+ */
+#define BNX2_PCICFG_MSI_CONTROL 0x00000058
+#define BNX2_PCICFG_MSI_CONTROL_ENABLE (1L<<16)
+
+#define BNX2_PCICFG_MISC_CONFIG 0x00000068
+#define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP (1L<<2)
+#define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP (1L<<3)
+#define BNX2_PCICFG_MISC_CONFIG_RESERVED1 (1L<<4)
+#define BNX2_PCICFG_MISC_CONFIG_CLOCK_CTL_ENA (1L<<5)
+#define BNX2_PCICFG_MISC_CONFIG_TARGET_GRC_WORD_SWAP (1L<<6)
+#define BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA (1L<<7)
+#define BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ (1L<<8)
+#define BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY (1L<<9)
+#define BNX2_PCICFG_MISC_CONFIG_GRC_WIN1_SWAP_EN (1L<<10)
+#define BNX2_PCICFG_MISC_CONFIG_GRC_WIN2_SWAP_EN (1L<<11)
+#define BNX2_PCICFG_MISC_CONFIG_GRC_WIN3_SWAP_EN (1L<<12)
+#define BNX2_PCICFG_MISC_CONFIG_ASIC_METAL_REV (0xffL<<16)
+#define BNX2_PCICFG_MISC_CONFIG_ASIC_BASE_REV (0xfL<<24)
+#define BNX2_PCICFG_MISC_CONFIG_ASIC_ID (0xfL<<28)
+
+#define BNX2_PCICFG_MISC_STATUS 0x0000006c
+#define BNX2_PCICFG_MISC_STATUS_INTA_VALUE (1L<<0)
+#define BNX2_PCICFG_MISC_STATUS_32BIT_DET (1L<<1)
+#define BNX2_PCICFG_MISC_STATUS_M66EN (1L<<2)
+#define BNX2_PCICFG_MISC_STATUS_PCIX_DET (1L<<3)
+#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED (0x3L<<4)
+#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_66 (0L<<4)
+#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_100 (1L<<4)
+#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_133 (2L<<4)
+#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_PCI_MODE (3L<<4)
+#define BNX2_PCICFG_MISC_STATUS_BAD_MEM_WRITE_BE (1L<<8)
+
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS 0x00000070
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET (0xfL<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ (0L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ (1L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ (2L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ (3L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ (4L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ (5L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ (6L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ (7L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW (0xfL<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_DISABLE (1L<<6)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT (1L<<7)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC (0x7L<<8)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_UNDEF (0L<<8)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_12 (1L<<8)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_6 (2L<<8)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_62 (4L<<8)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_MIN_POWER (1L<<11)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED (0xfL<<12)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_100 (0L<<12)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_80 (1L<<12)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_50 (2L<<12)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_40 (4L<<12)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_25 (8L<<12)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_STOP (1L<<16)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED_17 (1L<<17)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED_18 (1L<<18)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED_19 (1L<<19)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED (0xfffL<<20)
+
+#define BNX2_PCICFG_REG_WINDOW_ADDRESS 0x00000078
+#define BNX2_PCICFG_REG_WINDOW_ADDRESS_VAL (0xfffffL<<2)
+
+#define BNX2_PCICFG_REG_WINDOW 0x00000080
+#define BNX2_PCICFG_INT_ACK_CMD 0x00000084
+#define BNX2_PCICFG_INT_ACK_CMD_INDEX (0xffffL<<0)
+#define BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID (1L<<16)
+#define BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM (1L<<17)
+#define BNX2_PCICFG_INT_ACK_CMD_MASK_INT (1L<<18)
+#define BNX2_PCICFG_INT_ACK_CMD_INTERRUPT_NUM (0xfL<<24)
+#define BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT 24
+
+#define BNX2_PCICFG_STATUS_BIT_SET_CMD 0x00000088
+#define BNX2_PCICFG_STATUS_BIT_CLEAR_CMD 0x0000008c
+#define BNX2_PCICFG_MAILBOX_QUEUE_ADDR 0x00000090
+#define BNX2_PCICFG_MAILBOX_QUEUE_DATA 0x00000094
+
+#define BNX2_PCICFG_DEVICE_CONTROL 0x000000b4
+#define BNX2_PCICFG_DEVICE_STATUS_NO_PEND ((1L<<5)<<16)
+
+/*
+ * pci_reg definition
+ * offset: 0x400
+ */
+#define BNX2_PCI_GRC_WINDOW_ADDR 0x00000400
+#define BNX2_PCI_GRC_WINDOW_ADDR_VALUE (0x1ffL<<13)
+#define BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN (1L<<31)
+
+#define BNX2_PCI_GRC_WINDOW2_BASE 0xc000
+#define BNX2_PCI_GRC_WINDOW3_BASE 0xe000
+
+#define BNX2_PCI_CONFIG_1 0x00000404
+#define BNX2_PCI_CONFIG_1_RESERVED0 (0xffL<<0)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY (0x7L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_OFF (0L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_16 (1L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_32 (2L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_64 (3L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_128 (4L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_256 (5L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_512 (6L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_1024 (7L<<8)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY (0x7L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_OFF (0L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_16 (1L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_32 (2L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_64 (3L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_128 (4L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_256 (5L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_512 (6L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_1024 (7L<<11)
+#define BNX2_PCI_CONFIG_1_RESERVED1 (0x3ffffL<<14)
+
+#define BNX2_PCI_CONFIG_2 0x00000408
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE (0xfL<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_256K (3L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_512K (4L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_1M (5L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_2M (6L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_4M (7L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_8M (8L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_16M (9L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_32M (10L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_64M (11L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_128M (12L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_64ENA (1L<<4)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5)
+#define BNX2_PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6)
+#define BNX2_PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_1K (1L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_2K (2L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_4K (3L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_8K (4L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_16K (5L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_32K (6L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_64K (7L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_128K (8L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_256K (9L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_512K (10L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_1M (11L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_2M (12L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_4M (13L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_8M (14L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_16M (15L<<8)
+#define BNX2_PCI_CONFIG_2_MAX_SPLIT_LIMIT (0x1fL<<16)
+#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT (0x3L<<21)
+#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_512 (0L<<21)
+#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_1K (1L<<21)
+#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_2K (2L<<21)
+#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_4K (3L<<21)
+#define BNX2_PCI_CONFIG_2_FORCE_32_BIT_MSTR (1L<<23)
+#define BNX2_PCI_CONFIG_2_FORCE_32_BIT_TGT (1L<<24)
+#define BNX2_PCI_CONFIG_2_KEEP_REQ_ASSERT (1L<<25)
+#define BNX2_PCI_CONFIG_2_RESERVED0 (0x3fL<<26)
+#define BNX2_PCI_CONFIG_2_BAR_PREFETCH_XI (1L<<16)
+#define BNX2_PCI_CONFIG_2_RESERVED0_XI (0x7fffL<<17)
+
+#define BNX2_PCI_CONFIG_3 0x0000040c
+#define BNX2_PCI_CONFIG_3_STICKY_BYTE (0xffL<<0)
+#define BNX2_PCI_CONFIG_3_REG_STICKY_BYTE (0xffL<<8)
+#define BNX2_PCI_CONFIG_3_FORCE_PME (1L<<24)
+#define BNX2_PCI_CONFIG_3_PME_STATUS (1L<<25)
+#define BNX2_PCI_CONFIG_3_PME_ENABLE (1L<<26)
+#define BNX2_PCI_CONFIG_3_PM_STATE (0x3L<<27)
+#define BNX2_PCI_CONFIG_3_VAUX_PRESET (1L<<30)
+#define BNX2_PCI_CONFIG_3_PCI_POWER (1L<<31)
+
+#define BNX2_PCI_PM_DATA_A 0x00000410
+#define BNX2_PCI_PM_DATA_A_PM_DATA_0_PRG (0xffL<<0)
+#define BNX2_PCI_PM_DATA_A_PM_DATA_1_PRG (0xffL<<8)
+#define BNX2_PCI_PM_DATA_A_PM_DATA_2_PRG (0xffL<<16)
+#define BNX2_PCI_PM_DATA_A_PM_DATA_3_PRG (0xffL<<24)
+
+#define BNX2_PCI_PM_DATA_B 0x00000414
+#define BNX2_PCI_PM_DATA_B_PM_DATA_4_PRG (0xffL<<0)
+#define BNX2_PCI_PM_DATA_B_PM_DATA_5_PRG (0xffL<<8)
+#define BNX2_PCI_PM_DATA_B_PM_DATA_6_PRG (0xffL<<16)
+#define BNX2_PCI_PM_DATA_B_PM_DATA_7_PRG (0xffL<<24)
+
+#define BNX2_PCI_SWAP_DIAG0 0x00000418
+#define BNX2_PCI_SWAP_DIAG1 0x0000041c
+#define BNX2_PCI_EXP_ROM_ADDR 0x00000420
+#define BNX2_PCI_EXP_ROM_ADDR_ADDRESS (0x3fffffL<<2)
+#define BNX2_PCI_EXP_ROM_ADDR_REQ (1L<<31)
+
+#define BNX2_PCI_EXP_ROM_DATA 0x00000424
+#define BNX2_PCI_VPD_INTF 0x00000428
+#define BNX2_PCI_VPD_INTF_INTF_REQ (1L<<0)
+
+#define BNX2_PCI_VPD_ADDR_FLAG 0x0000042c
+#define BNX2_PCI_VPD_ADDR_FLAG_MSK 0x0000ffff
+#define BNX2_PCI_VPD_ADDR_FLAG_SL 0L
+#define BNX2_PCI_VPD_ADDR_FLAG_ADDRESS (0x1fffL<<2)
+#define BNX2_PCI_VPD_ADDR_FLAG_WR (1L<<15)
+
+#define BNX2_PCI_VPD_DATA 0x00000430
+#define BNX2_PCI_ID_VAL1 0x00000434
+#define BNX2_PCI_ID_VAL1_DEVICE_ID (0xffffL<<0)
+#define BNX2_PCI_ID_VAL1_VENDOR_ID (0xffffL<<16)
+
+#define BNX2_PCI_ID_VAL2 0x00000438
+#define BNX2_PCI_ID_VAL2_SUBSYSTEM_VENDOR_ID (0xffffL<<0)
+#define BNX2_PCI_ID_VAL2_SUBSYSTEM_ID (0xffffL<<16)
+
+#define BNX2_PCI_ID_VAL3 0x0000043c
+#define BNX2_PCI_ID_VAL3_CLASS_CODE (0xffffffL<<0)
+#define BNX2_PCI_ID_VAL3_REVISION_ID (0xffL<<24)
+
+#define BNX2_PCI_ID_VAL4 0x00000440
+#define BNX2_PCI_ID_VAL4_CAP_ENA (0xfL<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_0 (0L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_1 (1L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_2 (2L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_3 (3L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_4 (4L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_5 (5L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_6 (6L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_7 (7L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_8 (8L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_9 (9L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_10 (10L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_11 (11L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_12 (12L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_13 (13L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_14 (14L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_15 (15L<<0)
+#define BNX2_PCI_ID_VAL4_RESERVED0 (0x3L<<4)
+#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG (0x3L<<6)
+#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_0 (0L<<6)
+#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_1 (1L<<6)
+#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_2 (2L<<6)
+#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_3 (3L<<6)
+#define BNX2_PCI_ID_VAL4_MSI_PV_MASK_CAP (1L<<8)
+#define BNX2_PCI_ID_VAL4_MSI_LIMIT (0x7L<<9)
+#define BNX2_PCI_ID_VAL4_MULTI_MSG_CAP (0x7L<<12)
+#define BNX2_PCI_ID_VAL4_MSI_ENABLE (1L<<15)
+#define BNX2_PCI_ID_VAL4_MAX_64_ADVERTIZE (1L<<16)
+#define BNX2_PCI_ID_VAL4_MAX_133_ADVERTIZE (1L<<17)
+#define BNX2_PCI_ID_VAL4_RESERVED2 (0x7L<<18)
+#define BNX2_PCI_ID_VAL4_MAX_CUMULATIVE_SIZE_B21 (0x3L<<21)
+#define BNX2_PCI_ID_VAL4_MAX_SPLIT_SIZE_B21 (0x3L<<23)
+#define BNX2_PCI_ID_VAL4_MAX_CUMULATIVE_SIZE_B0 (1L<<25)
+#define BNX2_PCI_ID_VAL4_MAX_MEM_READ_SIZE_B10 (0x3L<<26)
+#define BNX2_PCI_ID_VAL4_MAX_SPLIT_SIZE_B0 (1L<<28)
+#define BNX2_PCI_ID_VAL4_RESERVED3 (0x7L<<29)
+#define BNX2_PCI_ID_VAL4_RESERVED3_XI (0xffffL<<16)
+
+#define BNX2_PCI_ID_VAL5 0x00000444
+#define BNX2_PCI_ID_VAL5_D1_SUPPORT (1L<<0)
+#define BNX2_PCI_ID_VAL5_D2_SUPPORT (1L<<1)
+#define BNX2_PCI_ID_VAL5_PME_IN_D0 (1L<<2)
+#define BNX2_PCI_ID_VAL5_PME_IN_D1 (1L<<3)
+#define BNX2_PCI_ID_VAL5_PME_IN_D2 (1L<<4)
+#define BNX2_PCI_ID_VAL5_PME_IN_D3_HOT (1L<<5)
+#define BNX2_PCI_ID_VAL5_RESERVED0_TE (0x3ffffffL<<6)
+#define BNX2_PCI_ID_VAL5_PM_VERSION_XI (0x7L<<6)
+#define BNX2_PCI_ID_VAL5_NO_SOFT_RESET_XI (1L<<9)
+#define BNX2_PCI_ID_VAL5_RESERVED0_XI (0x3fffffL<<10)
+
+#define BNX2_PCI_PCIX_EXTENDED_STATUS 0x00000448
+#define BNX2_PCI_PCIX_EXTENDED_STATUS_NO_SNOOP (1L<<8)
+#define BNX2_PCI_PCIX_EXTENDED_STATUS_LONG_BURST (1L<<9)
+#define BNX2_PCI_PCIX_EXTENDED_STATUS_SPLIT_COMP_MSG_CLASS (0xfL<<16)
+#define BNX2_PCI_PCIX_EXTENDED_STATUS_SPLIT_COMP_MSG_IDX (0xffL<<24)
+
+#define BNX2_PCI_ID_VAL6 0x0000044c
+#define BNX2_PCI_ID_VAL6_MAX_LAT (0xffL<<0)
+#define BNX2_PCI_ID_VAL6_MIN_GNT (0xffL<<8)
+#define BNX2_PCI_ID_VAL6_BIST (0xffL<<16)
+#define BNX2_PCI_ID_VAL6_RESERVED0 (0xffL<<24)
+
+#define BNX2_PCI_MSI_DATA 0x00000450
+#define BNX2_PCI_MSI_DATA_MSI_DATA (0xffffL<<0)
+
+#define BNX2_PCI_MSI_ADDR_H 0x00000454
+#define BNX2_PCI_MSI_ADDR_L 0x00000458
+#define BNX2_PCI_MSI_ADDR_L_VAL (0x3fffffffL<<2)
+
+#define BNX2_PCI_CFG_ACCESS_CMD 0x0000045c
+#define BNX2_PCI_CFG_ACCESS_CMD_ADR (0x3fL<<2)
+#define BNX2_PCI_CFG_ACCESS_CMD_RD_REQ (1L<<27)
+#define BNX2_PCI_CFG_ACCESS_CMD_WR_REQ (0xfL<<28)
+
+#define BNX2_PCI_CFG_ACCESS_DATA 0x00000460
+#define BNX2_PCI_MSI_MASK 0x00000464
+#define BNX2_PCI_MSI_MASK_MSI_MASK (0xffffffffL<<0)
+
+#define BNX2_PCI_MSI_PEND 0x00000468
+#define BNX2_PCI_MSI_PEND_MSI_PEND (0xffffffffL<<0)
+
+#define BNX2_PCI_PM_DATA_C 0x0000046c
+#define BNX2_PCI_PM_DATA_C_PM_DATA_8_PRG (0xffL<<0)
+#define BNX2_PCI_PM_DATA_C_RESERVED0 (0xffffffL<<8)
+
+#define BNX2_PCI_MSIX_CONTROL 0x000004c0
+#define BNX2_PCI_MSIX_CONTROL_MSIX_TBL_SIZ (0x7ffL<<0)
+#define BNX2_PCI_MSIX_CONTROL_RESERVED0 (0x1fffffL<<11)
+
+#define BNX2_PCI_MSIX_TBL_OFF_BIR 0x000004c4
+#define BNX2_PCI_MSIX_TBL_OFF_BIR_MSIX_TBL_BIR (0x7L<<0)
+#define BNX2_PCI_MSIX_TBL_OFF_BIR_MSIX_TBL_OFF (0x1fffffffL<<3)
+
+#define BNX2_PCI_MSIX_PBA_OFF_BIT 0x000004c8
+#define BNX2_PCI_MSIX_PBA_OFF_BIT_MSIX_PBA_BIR (0x7L<<0)
+#define BNX2_PCI_MSIX_PBA_OFF_BIT_MSIX_PBA_OFF (0x1fffffffL<<3)
+
+#define BNX2_PCI_PCIE_CAPABILITY 0x000004d0
+#define BNX2_PCI_PCIE_CAPABILITY_INTERRUPT_MSG_NUM (0x1fL<<0)
+#define BNX2_PCI_PCIE_CAPABILITY_COMPLY_PCIE_1_1 (1L<<5)
+
+#define BNX2_PCI_DEVICE_CAPABILITY 0x000004d4
+#define BNX2_PCI_DEVICE_CAPABILITY_MAX_PL_SIZ_SUPPORTED (0x7L<<0)
+#define BNX2_PCI_DEVICE_CAPABILITY_EXTENDED_TAG_SUPPORT (1L<<5)
+#define BNX2_PCI_DEVICE_CAPABILITY_L0S_ACCEPTABLE_LATENCY (0x7L<<6)
+#define BNX2_PCI_DEVICE_CAPABILITY_L1_ACCEPTABLE_LATENCY (0x7L<<9)
+#define BNX2_PCI_DEVICE_CAPABILITY_ROLE_BASED_ERR_RPT (1L<<15)
+
+#define BNX2_PCI_LINK_CAPABILITY 0x000004dc
+#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_SPEED (0xfL<<0)
+#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_SPEED_0001 (1L<<0)
+#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_SPEED_0010 (1L<<0)
+#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_WIDTH (0x1fL<<4)
+#define BNX2_PCI_LINK_CAPABILITY_CLK_POWER_MGMT (1L<<9)
+#define BNX2_PCI_LINK_CAPABILITY_ASPM_SUPPORT (0x3L<<10)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_LAT (0x7L<<12)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_LAT_101 (5L<<12)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_LAT_110 (6L<<12)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_LAT (0x7L<<15)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_LAT_001 (1L<<15)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_LAT_010 (2L<<15)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_COMM_LAT (0x7L<<18)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_COMM_LAT_101 (5L<<18)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_COMM_LAT_110 (6L<<18)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_COMM_LAT (0x7L<<21)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_COMM_LAT_001 (1L<<21)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_COMM_LAT_010 (2L<<21)
+#define BNX2_PCI_LINK_CAPABILITY_PORT_NUM (0xffL<<24)
+
+#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2 0x000004e4
+#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2_CMPL_TO_RANGE_SUPP (0xfL<<0)
+#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2_CMPL_TO_DISABL_SUPP (1L<<4)
+#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2_RESERVED (0x7ffffffL<<5)
+
+#define BNX2_PCI_PCIE_LINK_CAPABILITY_2 0x000004e8
+#define BNX2_PCI_PCIE_LINK_CAPABILITY_2_RESERVED (0xffffffffL<<0)
+
+#define BNX2_PCI_GRC_WINDOW1_ADDR 0x00000610
+#define BNX2_PCI_GRC_WINDOW1_ADDR_VALUE (0x1ffL<<13)
+
+#define BNX2_PCI_GRC_WINDOW2_ADDR 0x00000614
+#define BNX2_PCI_GRC_WINDOW2_ADDR_VALUE (0x1ffL<<13)
+
+#define BNX2_PCI_GRC_WINDOW3_ADDR 0x00000618
+#define BNX2_PCI_GRC_WINDOW3_ADDR_VALUE (0x1ffL<<13)
+
+#define BNX2_MSIX_TABLE_ADDR 0x318000
+#define BNX2_MSIX_PBA_ADDR 0x31c000
+
+/*
+ * misc_reg definition
+ * offset: 0x800
+ */
+#define BNX2_MISC_COMMAND 0x00000800
+#define BNX2_MISC_COMMAND_ENABLE_ALL (1L<<0)
+#define BNX2_MISC_COMMAND_DISABLE_ALL (1L<<1)
+#define BNX2_MISC_COMMAND_SW_RESET (1L<<4)
+#define BNX2_MISC_COMMAND_POR_RESET (1L<<5)
+#define BNX2_MISC_COMMAND_HD_RESET (1L<<6)
+#define BNX2_MISC_COMMAND_CMN_SW_RESET (1L<<7)
+#define BNX2_MISC_COMMAND_PAR_ERROR (1L<<8)
+#define BNX2_MISC_COMMAND_CS16_ERR (1L<<9)
+#define BNX2_MISC_COMMAND_CS16_ERR_LOC (0xfL<<12)
+#define BNX2_MISC_COMMAND_PAR_ERR_RAM (0x7fL<<16)
+#define BNX2_MISC_COMMAND_POWERDOWN_EVENT (1L<<23)
+#define BNX2_MISC_COMMAND_SW_SHUTDOWN (1L<<24)
+#define BNX2_MISC_COMMAND_SHUTDOWN_EN (1L<<25)
+#define BNX2_MISC_COMMAND_DINTEG_ATTN_EN (1L<<26)
+#define BNX2_MISC_COMMAND_PCIE_LINK_IN_L23 (1L<<27)
+#define BNX2_MISC_COMMAND_PCIE_DIS (1L<<28)
+
+#define BNX2_MISC_CFG 0x00000804
+#define BNX2_MISC_CFG_GRC_TMOUT (1L<<0)
+#define BNX2_MISC_CFG_NVM_WR_EN (0x3L<<1)
+#define BNX2_MISC_CFG_NVM_WR_EN_PROTECT (0L<<1)
+#define BNX2_MISC_CFG_NVM_WR_EN_PCI (1L<<1)
+#define BNX2_MISC_CFG_NVM_WR_EN_ALLOW (2L<<1)
+#define BNX2_MISC_CFG_NVM_WR_EN_ALLOW2 (3L<<1)
+#define BNX2_MISC_CFG_BIST_EN (1L<<3)
+#define BNX2_MISC_CFG_CK25_OUT_ALT_SRC (1L<<4)
+#define BNX2_MISC_CFG_RESERVED5_TE (1L<<5)
+#define BNX2_MISC_CFG_RESERVED6_TE (1L<<6)
+#define BNX2_MISC_CFG_CLK_CTL_OVERRIDE (1L<<7)
+#define BNX2_MISC_CFG_LEDMODE (0x7L<<8)
+#define BNX2_MISC_CFG_LEDMODE_MAC (0L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY1_TE (1L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY2_TE (2L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY3_TE (3L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY4_TE (4L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY5_TE (5L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY6_TE (6L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY7_TE (7L<<8)
+#define BNX2_MISC_CFG_MCP_GRC_TMOUT_TE (1L<<11)
+#define BNX2_MISC_CFG_DBU_GRC_TMOUT_TE (1L<<12)
+#define BNX2_MISC_CFG_LEDMODE_XI (0xfL<<8)
+#define BNX2_MISC_CFG_LEDMODE_MAC_XI (0L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY1_XI (1L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY2_XI (2L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY3_XI (3L<<8)
+#define BNX2_MISC_CFG_LEDMODE_MAC2_XI (4L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY4_XI (5L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY5_XI (6L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY6_XI (7L<<8)
+#define BNX2_MISC_CFG_LEDMODE_MAC3_XI (8L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY7_XI (9L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY8_XI (10L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY9_XI (11L<<8)
+#define BNX2_MISC_CFG_LEDMODE_MAC4_XI (12L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY10_XI (13L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY11_XI (14L<<8)
+#define BNX2_MISC_CFG_LEDMODE_UNUSED_XI (15L<<8)
+#define BNX2_MISC_CFG_PORT_SELECT_XI (1L<<13)
+#define BNX2_MISC_CFG_PARITY_MODE_XI (1L<<14)
+
+#define BNX2_MISC_ID 0x00000808
+#define BNX2_MISC_ID_BOND_ID (0xfL<<0)
+#define BNX2_MISC_ID_BOND_ID_X (0L<<0)
+#define BNX2_MISC_ID_BOND_ID_C (3L<<0)
+#define BNX2_MISC_ID_BOND_ID_S (12L<<0)
+#define BNX2_MISC_ID_CHIP_METAL (0xffL<<4)
+#define BNX2_MISC_ID_CHIP_REV (0xfL<<12)
+#define BNX2_MISC_ID_CHIP_NUM (0xffffL<<16)
+
+#define BNX2_MISC_ENABLE_STATUS_BITS 0x0000080c
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_SCHEDULER_ENABLE (1L<<0)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_BD_READ_ENABLE (1L<<1)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_BD_CACHE_ENABLE (1L<<2)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_PROCESSOR_ENABLE (1L<<3)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_DMA_ENABLE (1L<<4)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_PATCHUP_ENABLE (1L<<5)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_PAYLOAD_Q_ENABLE (1L<<6)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_HEADER_Q_ENABLE (1L<<7)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_ASSEMBLER_ENABLE (1L<<8)
+#define BNX2_MISC_ENABLE_STATUS_BITS_EMAC_ENABLE (1L<<9)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_PARSER_MAC_ENABLE (1L<<10)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_PARSER_CATCHUP_ENABLE (1L<<11)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_MBUF_ENABLE (1L<<12)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_LOOKUP_ENABLE (1L<<13)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_PROCESSOR_ENABLE (1L<<14)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE (1L<<15)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_BD_CACHE_ENABLE (1L<<16)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_DMA_ENABLE (1L<<17)
+#define BNX2_MISC_ENABLE_STATUS_BITS_COMPLETION_ENABLE (1L<<18)
+#define BNX2_MISC_ENABLE_STATUS_BITS_HOST_COALESCE_ENABLE (1L<<19)
+#define BNX2_MISC_ENABLE_STATUS_BITS_MAILBOX_QUEUE_ENABLE (1L<<20)
+#define BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE (1L<<21)
+#define BNX2_MISC_ENABLE_STATUS_BITS_CMD_SCHEDULER_ENABLE (1L<<22)
+#define BNX2_MISC_ENABLE_STATUS_BITS_CMD_PROCESSOR_ENABLE (1L<<23)
+#define BNX2_MISC_ENABLE_STATUS_BITS_MGMT_PROCESSOR_ENABLE (1L<<24)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TIMER_ENABLE (1L<<25)
+#define BNX2_MISC_ENABLE_STATUS_BITS_DMA_ENGINE_ENABLE (1L<<26)
+#define BNX2_MISC_ENABLE_STATUS_BITS_UMP_ENABLE (1L<<27)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RV2P_CMD_SCHEDULER_ENABLE (1L<<28)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RSVD_FUTURE_ENABLE (0x7L<<29)
+
+#define BNX2_MISC_ENABLE_SET_BITS 0x00000810
+#define BNX2_MISC_ENABLE_SET_BITS_TX_SCHEDULER_ENABLE (1L<<0)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_BD_READ_ENABLE (1L<<1)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_BD_CACHE_ENABLE (1L<<2)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_PROCESSOR_ENABLE (1L<<3)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_DMA_ENABLE (1L<<4)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_PATCHUP_ENABLE (1L<<5)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_PAYLOAD_Q_ENABLE (1L<<6)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE (1L<<7)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_ASSEMBLER_ENABLE (1L<<8)
+#define BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE (1L<<9)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE (1L<<10)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_CATCHUP_ENABLE (1L<<11)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE (1L<<12)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_LOOKUP_ENABLE (1L<<13)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_PROCESSOR_ENABLE (1L<<14)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE (1L<<15)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_BD_CACHE_ENABLE (1L<<16)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE (1L<<17)
+#define BNX2_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE (1L<<18)
+#define BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE (1L<<19)
+#define BNX2_MISC_ENABLE_SET_BITS_MAILBOX_QUEUE_ENABLE (1L<<20)
+#define BNX2_MISC_ENABLE_SET_BITS_CONTEXT_ENABLE (1L<<21)
+#define BNX2_MISC_ENABLE_SET_BITS_CMD_SCHEDULER_ENABLE (1L<<22)
+#define BNX2_MISC_ENABLE_SET_BITS_CMD_PROCESSOR_ENABLE (1L<<23)
+#define BNX2_MISC_ENABLE_SET_BITS_MGMT_PROCESSOR_ENABLE (1L<<24)
+#define BNX2_MISC_ENABLE_SET_BITS_TIMER_ENABLE (1L<<25)
+#define BNX2_MISC_ENABLE_SET_BITS_DMA_ENGINE_ENABLE (1L<<26)
+#define BNX2_MISC_ENABLE_SET_BITS_UMP_ENABLE (1L<<27)
+#define BNX2_MISC_ENABLE_SET_BITS_RV2P_CMD_SCHEDULER_ENABLE (1L<<28)
+#define BNX2_MISC_ENABLE_SET_BITS_RSVD_FUTURE_ENABLE (0x7L<<29)
+
+#define BNX2_MISC_ENABLE_CLR_BITS 0x00000814
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_SCHEDULER_ENABLE (1L<<0)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_BD_READ_ENABLE (1L<<1)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_BD_CACHE_ENABLE (1L<<2)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_PROCESSOR_ENABLE (1L<<3)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE (1L<<4)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_PATCHUP_ENABLE (1L<<5)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_PAYLOAD_Q_ENABLE (1L<<6)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_HEADER_Q_ENABLE (1L<<7)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_ASSEMBLER_ENABLE (1L<<8)
+#define BNX2_MISC_ENABLE_CLR_BITS_EMAC_ENABLE (1L<<9)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_PARSER_MAC_ENABLE (1L<<10)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_PARSER_CATCHUP_ENABLE (1L<<11)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_MBUF_ENABLE (1L<<12)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_LOOKUP_ENABLE (1L<<13)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_PROCESSOR_ENABLE (1L<<14)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_V2P_ENABLE (1L<<15)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_BD_CACHE_ENABLE (1L<<16)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE (1L<<17)
+#define BNX2_MISC_ENABLE_CLR_BITS_COMPLETION_ENABLE (1L<<18)
+#define BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE (1L<<19)
+#define BNX2_MISC_ENABLE_CLR_BITS_MAILBOX_QUEUE_ENABLE (1L<<20)
+#define BNX2_MISC_ENABLE_CLR_BITS_CONTEXT_ENABLE (1L<<21)
+#define BNX2_MISC_ENABLE_CLR_BITS_CMD_SCHEDULER_ENABLE (1L<<22)
+#define BNX2_MISC_ENABLE_CLR_BITS_CMD_PROCESSOR_ENABLE (1L<<23)
+#define BNX2_MISC_ENABLE_CLR_BITS_MGMT_PROCESSOR_ENABLE (1L<<24)
+#define BNX2_MISC_ENABLE_CLR_BITS_TIMER_ENABLE (1L<<25)
+#define BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE (1L<<26)
+#define BNX2_MISC_ENABLE_CLR_BITS_UMP_ENABLE (1L<<27)
+#define BNX2_MISC_ENABLE_CLR_BITS_RV2P_CMD_SCHEDULER_ENABLE (1L<<28)
+#define BNX2_MISC_ENABLE_CLR_BITS_RSVD_FUTURE_ENABLE (0x7L<<29)
+
+#define BNX2_MISC_CLOCK_CONTROL_BITS 0x00000818
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET (0xfL<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ (0L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ (1L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ (2L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ (3L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ (4L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ (5L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ (6L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ (7L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW (0xfL<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_DISABLE (1L<<6)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT (1L<<7)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC (0x7L<<8)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_UNDEF (0L<<8)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_12 (1L<<8)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_6 (2L<<8)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_62 (4L<<8)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED0_XI (0x7L<<8)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_MIN_POWER (1L<<11)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED (0xfL<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_100 (0L<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_80 (1L<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_50 (2L<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_40 (4L<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_25 (8L<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED1_XI (0xfL<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_STOP (1L<<16)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_17_TE (1L<<17)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_18_TE (1L<<18)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_19_TE (1L<<19)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_TE (0xfffL<<20)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_MGMT_XI (1L<<17)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED2_XI (0x3fL<<18)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_VCO_XI (0x7L<<24)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED3_XI (1L<<27)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_XI (0xfL<<28)
+
+#define BNX2_MISC_SPIO 0x0000081c
+#define BNX2_MISC_SPIO_VALUE (0xffL<<0)
+#define BNX2_MISC_SPIO_SET (0xffL<<8)
+#define BNX2_MISC_SPIO_CLR (0xffL<<16)
+#define BNX2_MISC_SPIO_FLOAT (0xffL<<24)
+
+#define BNX2_MISC_SPIO_INT 0x00000820
+#define BNX2_MISC_SPIO_INT_INT_STATE_TE (0xfL<<0)
+#define BNX2_MISC_SPIO_INT_OLD_VALUE_TE (0xfL<<8)
+#define BNX2_MISC_SPIO_INT_OLD_SET_TE (0xfL<<16)
+#define BNX2_MISC_SPIO_INT_OLD_CLR_TE (0xfL<<24)
+#define BNX2_MISC_SPIO_INT_INT_STATE_XI (0xffL<<0)
+#define BNX2_MISC_SPIO_INT_OLD_VALUE_XI (0xffL<<8)
+#define BNX2_MISC_SPIO_INT_OLD_SET_XI (0xffL<<16)
+#define BNX2_MISC_SPIO_INT_OLD_CLR_XI (0xffL<<24)
+
+#define BNX2_MISC_CONFIG_LFSR 0x00000824
+#define BNX2_MISC_CONFIG_LFSR_DIV (0xffffL<<0)
+
+#define BNX2_MISC_LFSR_MASK_BITS 0x00000828
+#define BNX2_MISC_LFSR_MASK_BITS_TX_SCHEDULER_ENABLE (1L<<0)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_BD_READ_ENABLE (1L<<1)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_BD_CACHE_ENABLE (1L<<2)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_PROCESSOR_ENABLE (1L<<3)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_DMA_ENABLE (1L<<4)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_PATCHUP_ENABLE (1L<<5)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_PAYLOAD_Q_ENABLE (1L<<6)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_HEADER_Q_ENABLE (1L<<7)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_ASSEMBLER_ENABLE (1L<<8)
+#define BNX2_MISC_LFSR_MASK_BITS_EMAC_ENABLE (1L<<9)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_PARSER_MAC_ENABLE (1L<<10)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_PARSER_CATCHUP_ENABLE (1L<<11)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_MBUF_ENABLE (1L<<12)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_LOOKUP_ENABLE (1L<<13)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_PROCESSOR_ENABLE (1L<<14)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_V2P_ENABLE (1L<<15)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_BD_CACHE_ENABLE (1L<<16)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_DMA_ENABLE (1L<<17)
+#define BNX2_MISC_LFSR_MASK_BITS_COMPLETION_ENABLE (1L<<18)
+#define BNX2_MISC_LFSR_MASK_BITS_HOST_COALESCE_ENABLE (1L<<19)
+#define BNX2_MISC_LFSR_MASK_BITS_MAILBOX_QUEUE_ENABLE (1L<<20)
+#define BNX2_MISC_LFSR_MASK_BITS_CONTEXT_ENABLE (1L<<21)
+#define BNX2_MISC_LFSR_MASK_BITS_CMD_SCHEDULER_ENABLE (1L<<22)
+#define BNX2_MISC_LFSR_MASK_BITS_CMD_PROCESSOR_ENABLE (1L<<23)
+#define BNX2_MISC_LFSR_MASK_BITS_MGMT_PROCESSOR_ENABLE (1L<<24)
+#define BNX2_MISC_LFSR_MASK_BITS_TIMER_ENABLE (1L<<25)
+#define BNX2_MISC_LFSR_MASK_BITS_DMA_ENGINE_ENABLE (1L<<26)
+#define BNX2_MISC_LFSR_MASK_BITS_UMP_ENABLE (1L<<27)
+#define BNX2_MISC_LFSR_MASK_BITS_RV2P_CMD_SCHEDULER_ENABLE (1L<<28)
+#define BNX2_MISC_LFSR_MASK_BITS_RSVD_FUTURE_ENABLE (0x7L<<29)
+
+#define BNX2_MISC_ARB_REQ0 0x0000082c
+#define BNX2_MISC_ARB_REQ1 0x00000830
+#define BNX2_MISC_ARB_REQ2 0x00000834
+#define BNX2_MISC_ARB_REQ3 0x00000838
+#define BNX2_MISC_ARB_REQ4 0x0000083c
+#define BNX2_MISC_ARB_FREE0 0x00000840
+#define BNX2_MISC_ARB_FREE1 0x00000844
+#define BNX2_MISC_ARB_FREE2 0x00000848
+#define BNX2_MISC_ARB_FREE3 0x0000084c
+#define BNX2_MISC_ARB_FREE4 0x00000850
+#define BNX2_MISC_ARB_REQ_STATUS0 0x00000854
+#define BNX2_MISC_ARB_REQ_STATUS1 0x00000858
+#define BNX2_MISC_ARB_REQ_STATUS2 0x0000085c
+#define BNX2_MISC_ARB_REQ_STATUS3 0x00000860
+#define BNX2_MISC_ARB_REQ_STATUS4 0x00000864
+#define BNX2_MISC_ARB_GNT0 0x00000868
+#define BNX2_MISC_ARB_GNT0_0 (0x7L<<0)
+#define BNX2_MISC_ARB_GNT0_1 (0x7L<<4)
+#define BNX2_MISC_ARB_GNT0_2 (0x7L<<8)
+#define BNX2_MISC_ARB_GNT0_3 (0x7L<<12)
+#define BNX2_MISC_ARB_GNT0_4 (0x7L<<16)
+#define BNX2_MISC_ARB_GNT0_5 (0x7L<<20)
+#define BNX2_MISC_ARB_GNT0_6 (0x7L<<24)
+#define BNX2_MISC_ARB_GNT0_7 (0x7L<<28)
+
+#define BNX2_MISC_ARB_GNT1 0x0000086c
+#define BNX2_MISC_ARB_GNT1_8 (0x7L<<0)
+#define BNX2_MISC_ARB_GNT1_9 (0x7L<<4)
+#define BNX2_MISC_ARB_GNT1_10 (0x7L<<8)
+#define BNX2_MISC_ARB_GNT1_11 (0x7L<<12)
+#define BNX2_MISC_ARB_GNT1_12 (0x7L<<16)
+#define BNX2_MISC_ARB_GNT1_13 (0x7L<<20)
+#define BNX2_MISC_ARB_GNT1_14 (0x7L<<24)
+#define BNX2_MISC_ARB_GNT1_15 (0x7L<<28)
+
+#define BNX2_MISC_ARB_GNT2 0x00000870
+#define BNX2_MISC_ARB_GNT2_16 (0x7L<<0)
+#define BNX2_MISC_ARB_GNT2_17 (0x7L<<4)
+#define BNX2_MISC_ARB_GNT2_18 (0x7L<<8)
+#define BNX2_MISC_ARB_GNT2_19 (0x7L<<12)
+#define BNX2_MISC_ARB_GNT2_20 (0x7L<<16)
+#define BNX2_MISC_ARB_GNT2_21 (0x7L<<20)
+#define BNX2_MISC_ARB_GNT2_22 (0x7L<<24)
+#define BNX2_MISC_ARB_GNT2_23 (0x7L<<28)
+
+#define BNX2_MISC_ARB_GNT3 0x00000874
+#define BNX2_MISC_ARB_GNT3_24 (0x7L<<0)
+#define BNX2_MISC_ARB_GNT3_25 (0x7L<<4)
+#define BNX2_MISC_ARB_GNT3_26 (0x7L<<8)
+#define BNX2_MISC_ARB_GNT3_27 (0x7L<<12)
+#define BNX2_MISC_ARB_GNT3_28 (0x7L<<16)
+#define BNX2_MISC_ARB_GNT3_29 (0x7L<<20)
+#define BNX2_MISC_ARB_GNT3_30 (0x7L<<24)
+#define BNX2_MISC_ARB_GNT3_31 (0x7L<<28)
+
+#define BNX2_MISC_RESERVED1 0x00000878
+#define BNX2_MISC_RESERVED1_MISC_RESERVED1_VALUE (0x3fL<<0)
+
+#define BNX2_MISC_RESERVED2 0x0000087c
+#define BNX2_MISC_RESERVED2_PCIE_DIS (1L<<0)
+#define BNX2_MISC_RESERVED2_LINK_IN_L23 (1L<<1)
+
+#define BNX2_MISC_SM_ASF_CONTROL 0x00000880
+#define BNX2_MISC_SM_ASF_CONTROL_ASF_RST (1L<<0)
+#define BNX2_MISC_SM_ASF_CONTROL_TSC_EN (1L<<1)
+#define BNX2_MISC_SM_ASF_CONTROL_WG_TO (1L<<2)
+#define BNX2_MISC_SM_ASF_CONTROL_HB_TO (1L<<3)
+#define BNX2_MISC_SM_ASF_CONTROL_PA_TO (1L<<4)
+#define BNX2_MISC_SM_ASF_CONTROL_PL_TO (1L<<5)
+#define BNX2_MISC_SM_ASF_CONTROL_RT_TO (1L<<6)
+#define BNX2_MISC_SM_ASF_CONTROL_SMB_EVENT (1L<<7)
+#define BNX2_MISC_SM_ASF_CONTROL_STRETCH_EN (1L<<8)
+#define BNX2_MISC_SM_ASF_CONTROL_STRETCH_PULSE (1L<<9)
+#define BNX2_MISC_SM_ASF_CONTROL_RES (0x3L<<10)
+#define BNX2_MISC_SM_ASF_CONTROL_SMB_EN (1L<<12)
+#define BNX2_MISC_SM_ASF_CONTROL_SMB_BB_EN (1L<<13)
+#define BNX2_MISC_SM_ASF_CONTROL_SMB_NO_ADDR_FILT (1L<<14)
+#define BNX2_MISC_SM_ASF_CONTROL_SMB_AUTOREAD (1L<<15)
+#define BNX2_MISC_SM_ASF_CONTROL_NIC_SMB_ADDR1 (0x7fL<<16)
+#define BNX2_MISC_SM_ASF_CONTROL_NIC_SMB_ADDR2 (0x7fL<<23)
+#define BNX2_MISC_SM_ASF_CONTROL_EN_NIC_SMB_ADDR_0 (1L<<30)
+#define BNX2_MISC_SM_ASF_CONTROL_SMB_EARLY_ATTN (1L<<31)
+
+#define BNX2_MISC_SMB_IN 0x00000884
+#define BNX2_MISC_SMB_IN_DAT_IN (0xffL<<0)
+#define BNX2_MISC_SMB_IN_RDY (1L<<8)
+#define BNX2_MISC_SMB_IN_DONE (1L<<9)
+#define BNX2_MISC_SMB_IN_FIRSTBYTE (1L<<10)
+#define BNX2_MISC_SMB_IN_STATUS (0x7L<<11)
+#define BNX2_MISC_SMB_IN_STATUS_OK (0x0L<<11)
+#define BNX2_MISC_SMB_IN_STATUS_PEC (0x1L<<11)
+#define BNX2_MISC_SMB_IN_STATUS_OFLOW (0x2L<<11)
+#define BNX2_MISC_SMB_IN_STATUS_STOP (0x3L<<11)
+#define BNX2_MISC_SMB_IN_STATUS_TIMEOUT (0x4L<<11)
+
+#define BNX2_MISC_SMB_OUT 0x00000888
+#define BNX2_MISC_SMB_OUT_DAT_OUT (0xffL<<0)
+#define BNX2_MISC_SMB_OUT_RDY (1L<<8)
+#define BNX2_MISC_SMB_OUT_START (1L<<9)
+#define BNX2_MISC_SMB_OUT_LAST (1L<<10)
+#define BNX2_MISC_SMB_OUT_ACC_TYPE (1L<<11)
+#define BNX2_MISC_SMB_OUT_ENB_PEC (1L<<12)
+#define BNX2_MISC_SMB_OUT_GET_RX_LEN (1L<<13)
+#define BNX2_MISC_SMB_OUT_SMB_READ_LEN (0x3fL<<14)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS (0xfL<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_OK (0L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_FIRST_NACK (1L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_UFLOW (2L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_STOP (3L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_TIMEOUT (4L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_FIRST_LOST (5L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_BADACK (6L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_SUB_NACK (9L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_SUB_LOST (0xdL<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_SLAVEMODE (1L<<24)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_DAT_EN (1L<<25)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_DAT_IN (1L<<26)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_CLK_EN (1L<<27)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_CLK_IN (1L<<28)
+
+#define BNX2_MISC_SMB_WATCHDOG 0x0000088c
+#define BNX2_MISC_SMB_WATCHDOG_WATCHDOG (0xffffL<<0)
+
+#define BNX2_MISC_SMB_HEARTBEAT 0x00000890
+#define BNX2_MISC_SMB_HEARTBEAT_HEARTBEAT (0xffffL<<0)
+
+#define BNX2_MISC_SMB_POLL_ASF 0x00000894
+#define BNX2_MISC_SMB_POLL_ASF_POLL_ASF (0xffffL<<0)
+
+#define BNX2_MISC_SMB_POLL_LEGACY 0x00000898
+#define BNX2_MISC_SMB_POLL_LEGACY_POLL_LEGACY (0xffffL<<0)
+
+#define BNX2_MISC_SMB_RETRAN 0x0000089c
+#define BNX2_MISC_SMB_RETRAN_RETRAN (0xffL<<0)
+
+#define BNX2_MISC_SMB_TIMESTAMP 0x000008a0
+#define BNX2_MISC_SMB_TIMESTAMP_TIMESTAMP (0xffffffffL<<0)
+
+#define BNX2_MISC_PERR_ENA0 0x000008a4
+#define BNX2_MISC_PERR_ENA0_COM_MISC_CTXC (1L<<0)
+#define BNX2_MISC_PERR_ENA0_COM_MISC_REGF (1L<<1)
+#define BNX2_MISC_PERR_ENA0_COM_MISC_SCPAD (1L<<2)
+#define BNX2_MISC_PERR_ENA0_CP_MISC_CTXC (1L<<3)
+#define BNX2_MISC_PERR_ENA0_CP_MISC_REGF (1L<<4)
+#define BNX2_MISC_PERR_ENA0_CP_MISC_SCPAD (1L<<5)
+#define BNX2_MISC_PERR_ENA0_CS_MISC_TMEM (1L<<6)
+#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM0 (1L<<7)
+#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM1 (1L<<8)
+#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM2 (1L<<9)
+#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM3 (1L<<10)
+#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM4 (1L<<11)
+#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM5 (1L<<12)
+#define BNX2_MISC_PERR_ENA0_CTX_MISC_PGTBL (1L<<13)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR0 (1L<<14)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR1 (1L<<15)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR2 (1L<<16)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR3 (1L<<17)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR4 (1L<<18)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DW0 (1L<<19)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DW1 (1L<<20)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DW2 (1L<<21)
+#define BNX2_MISC_PERR_ENA0_HC_MISC_DMA (1L<<22)
+#define BNX2_MISC_PERR_ENA0_MCP_MISC_REGF (1L<<23)
+#define BNX2_MISC_PERR_ENA0_MCP_MISC_SCPAD (1L<<24)
+#define BNX2_MISC_PERR_ENA0_MQ_MISC_CTX (1L<<25)
+#define BNX2_MISC_PERR_ENA0_RBDC_MISC (1L<<26)
+#define BNX2_MISC_PERR_ENA0_RBUF_MISC_MB (1L<<27)
+#define BNX2_MISC_PERR_ENA0_RBUF_MISC_PTR (1L<<28)
+#define BNX2_MISC_PERR_ENA0_RDE_MISC_RPC (1L<<29)
+#define BNX2_MISC_PERR_ENA0_RDE_MISC_RPM (1L<<30)
+#define BNX2_MISC_PERR_ENA0_RV2P_MISC_CB0REGS (1L<<31)
+#define BNX2_MISC_PERR_ENA0_COM_DMAE_PERR_EN_XI (1L<<0)
+#define BNX2_MISC_PERR_ENA0_CP_DMAE_PERR_EN_XI (1L<<1)
+#define BNX2_MISC_PERR_ENA0_RPM_ACPIBEMEM_PERR_EN_XI (1L<<2)
+#define BNX2_MISC_PERR_ENA0_CTX_USAGE_CNT_PERR_EN_XI (1L<<3)
+#define BNX2_MISC_PERR_ENA0_CTX_PGTBL_PERR_EN_XI (1L<<4)
+#define BNX2_MISC_PERR_ENA0_CTX_CACHE_PERR_EN_XI (1L<<5)
+#define BNX2_MISC_PERR_ENA0_CTX_MIRROR_PERR_EN_XI (1L<<6)
+#define BNX2_MISC_PERR_ENA0_COM_CTXC_PERR_EN_XI (1L<<7)
+#define BNX2_MISC_PERR_ENA0_COM_SCPAD_PERR_EN_XI (1L<<8)
+#define BNX2_MISC_PERR_ENA0_CP_CTXC_PERR_EN_XI (1L<<9)
+#define BNX2_MISC_PERR_ENA0_CP_SCPAD_PERR_EN_XI (1L<<10)
+#define BNX2_MISC_PERR_ENA0_RXP_RBUFC_PERR_EN_XI (1L<<11)
+#define BNX2_MISC_PERR_ENA0_RXP_CTXC_PERR_EN_XI (1L<<12)
+#define BNX2_MISC_PERR_ENA0_RXP_SCPAD_PERR_EN_XI (1L<<13)
+#define BNX2_MISC_PERR_ENA0_TPAT_SCPAD_PERR_EN_XI (1L<<14)
+#define BNX2_MISC_PERR_ENA0_TXP_CTXC_PERR_EN_XI (1L<<15)
+#define BNX2_MISC_PERR_ENA0_TXP_SCPAD_PERR_EN_XI (1L<<16)
+#define BNX2_MISC_PERR_ENA0_CS_TMEM_PERR_EN_XI (1L<<17)
+#define BNX2_MISC_PERR_ENA0_MQ_CTX_PERR_EN_XI (1L<<18)
+#define BNX2_MISC_PERR_ENA0_RPM_DFIFOMEM_PERR_EN_XI (1L<<19)
+#define BNX2_MISC_PERR_ENA0_RPC_DFIFOMEM_PERR_EN_XI (1L<<20)
+#define BNX2_MISC_PERR_ENA0_RBUF_PTRMEM_PERR_EN_XI (1L<<21)
+#define BNX2_MISC_PERR_ENA0_RBUF_DATAMEM_PERR_EN_XI (1L<<22)
+#define BNX2_MISC_PERR_ENA0_RV2P_P2IRAM_PERR_EN_XI (1L<<23)
+#define BNX2_MISC_PERR_ENA0_RV2P_P1IRAM_PERR_EN_XI (1L<<24)
+#define BNX2_MISC_PERR_ENA0_RV2P_CB1REGS_PERR_EN_XI (1L<<25)
+#define BNX2_MISC_PERR_ENA0_RV2P_CB0REGS_PERR_EN_XI (1L<<26)
+#define BNX2_MISC_PERR_ENA0_TPBUF_PERR_EN_XI (1L<<27)
+#define BNX2_MISC_PERR_ENA0_THBUF_PERR_EN_XI (1L<<28)
+#define BNX2_MISC_PERR_ENA0_TDMA_PERR_EN_XI (1L<<29)
+#define BNX2_MISC_PERR_ENA0_TBDC_PERR_EN_XI (1L<<30)
+#define BNX2_MISC_PERR_ENA0_TSCH_LR_PERR_EN_XI (1L<<31)
+
+#define BNX2_MISC_PERR_ENA1 0x000008a8
+#define BNX2_MISC_PERR_ENA1_RV2P_MISC_CB1REGS (1L<<0)
+#define BNX2_MISC_PERR_ENA1_RV2P_MISC_P1IRAM (1L<<1)
+#define BNX2_MISC_PERR_ENA1_RV2P_MISC_P2IRAM (1L<<2)
+#define BNX2_MISC_PERR_ENA1_RXP_MISC_CTXC (1L<<3)
+#define BNX2_MISC_PERR_ENA1_RXP_MISC_REGF (1L<<4)
+#define BNX2_MISC_PERR_ENA1_RXP_MISC_SCPAD (1L<<5)
+#define BNX2_MISC_PERR_ENA1_RXP_MISC_RBUFC (1L<<6)
+#define BNX2_MISC_PERR_ENA1_TBDC_MISC (1L<<7)
+#define BNX2_MISC_PERR_ENA1_TDMA_MISC (1L<<8)
+#define BNX2_MISC_PERR_ENA1_THBUF_MISC_MB0 (1L<<9)
+#define BNX2_MISC_PERR_ENA1_THBUF_MISC_MB1 (1L<<10)
+#define BNX2_MISC_PERR_ENA1_TPAT_MISC_REGF (1L<<11)
+#define BNX2_MISC_PERR_ENA1_TPAT_MISC_SCPAD (1L<<12)
+#define BNX2_MISC_PERR_ENA1_TPBUF_MISC_MB (1L<<13)
+#define BNX2_MISC_PERR_ENA1_TSCH_MISC_LR (1L<<14)
+#define BNX2_MISC_PERR_ENA1_TXP_MISC_CTXC (1L<<15)
+#define BNX2_MISC_PERR_ENA1_TXP_MISC_REGF (1L<<16)
+#define BNX2_MISC_PERR_ENA1_TXP_MISC_SCPAD (1L<<17)
+#define BNX2_MISC_PERR_ENA1_UMP_MISC_FIORX (1L<<18)
+#define BNX2_MISC_PERR_ENA1_UMP_MISC_FIOTX (1L<<19)
+#define BNX2_MISC_PERR_ENA1_UMP_MISC_RX (1L<<20)
+#define BNX2_MISC_PERR_ENA1_UMP_MISC_TX (1L<<21)
+#define BNX2_MISC_PERR_ENA1_RDMAQ_MISC (1L<<22)
+#define BNX2_MISC_PERR_ENA1_CSQ_MISC (1L<<23)
+#define BNX2_MISC_PERR_ENA1_CPQ_MISC (1L<<24)
+#define BNX2_MISC_PERR_ENA1_MCPQ_MISC (1L<<25)
+#define BNX2_MISC_PERR_ENA1_RV2PMQ_MISC (1L<<26)
+#define BNX2_MISC_PERR_ENA1_RV2PPQ_MISC (1L<<27)
+#define BNX2_MISC_PERR_ENA1_RV2PTQ_MISC (1L<<28)
+#define BNX2_MISC_PERR_ENA1_RXPQ_MISC (1L<<29)
+#define BNX2_MISC_PERR_ENA1_RXPCQ_MISC (1L<<30)
+#define BNX2_MISC_PERR_ENA1_RLUPQ_MISC (1L<<31)
+#define BNX2_MISC_PERR_ENA1_RBDC_PERR_EN_XI (1L<<0)
+#define BNX2_MISC_PERR_ENA1_RDMA_DFIFO_PERR_EN_XI (1L<<2)
+#define BNX2_MISC_PERR_ENA1_HC_STATS_PERR_EN_XI (1L<<3)
+#define BNX2_MISC_PERR_ENA1_HC_MSIX_PERR_EN_XI (1L<<4)
+#define BNX2_MISC_PERR_ENA1_HC_PRODUCSTB_PERR_EN_XI (1L<<5)
+#define BNX2_MISC_PERR_ENA1_HC_CONSUMSTB_PERR_EN_XI (1L<<6)
+#define BNX2_MISC_PERR_ENA1_TPATQ_PERR_EN_XI (1L<<7)
+#define BNX2_MISC_PERR_ENA1_MCPQ_PERR_EN_XI (1L<<8)
+#define BNX2_MISC_PERR_ENA1_TDMAQ_PERR_EN_XI (1L<<9)
+#define BNX2_MISC_PERR_ENA1_TXPQ_PERR_EN_XI (1L<<10)
+#define BNX2_MISC_PERR_ENA1_COMTQ_PERR_EN_XI (1L<<11)
+#define BNX2_MISC_PERR_ENA1_COMQ_PERR_EN_XI (1L<<12)
+#define BNX2_MISC_PERR_ENA1_RLUPQ_PERR_EN_XI (1L<<13)
+#define BNX2_MISC_PERR_ENA1_RXPQ_PERR_EN_XI (1L<<14)
+#define BNX2_MISC_PERR_ENA1_RV2PPQ_PERR_EN_XI (1L<<15)
+#define BNX2_MISC_PERR_ENA1_RDMAQ_PERR_EN_XI (1L<<16)
+#define BNX2_MISC_PERR_ENA1_TASQ_PERR_EN_XI (1L<<17)
+#define BNX2_MISC_PERR_ENA1_TBDRQ_PERR_EN_XI (1L<<18)
+#define BNX2_MISC_PERR_ENA1_TSCHQ_PERR_EN_XI (1L<<19)
+#define BNX2_MISC_PERR_ENA1_COMXQ_PERR_EN_XI (1L<<20)
+#define BNX2_MISC_PERR_ENA1_RXPCQ_PERR_EN_XI (1L<<21)
+#define BNX2_MISC_PERR_ENA1_RV2PTQ_PERR_EN_XI (1L<<22)
+#define BNX2_MISC_PERR_ENA1_RV2PMQ_PERR_EN_XI (1L<<23)
+#define BNX2_MISC_PERR_ENA1_CPQ_PERR_EN_XI (1L<<24)
+#define BNX2_MISC_PERR_ENA1_CSQ_PERR_EN_XI (1L<<25)
+#define BNX2_MISC_PERR_ENA1_RLUP_CID_PERR_EN_XI (1L<<26)
+#define BNX2_MISC_PERR_ENA1_RV2PCS_TMEM_PERR_EN_XI (1L<<27)
+#define BNX2_MISC_PERR_ENA1_RV2PCSQ_PERR_EN_XI (1L<<28)
+#define BNX2_MISC_PERR_ENA1_MQ_IDX_PERR_EN_XI (1L<<29)
+
+#define BNX2_MISC_PERR_ENA2 0x000008ac
+#define BNX2_MISC_PERR_ENA2_COMQ_MISC (1L<<0)
+#define BNX2_MISC_PERR_ENA2_COMXQ_MISC (1L<<1)
+#define BNX2_MISC_PERR_ENA2_COMTQ_MISC (1L<<2)
+#define BNX2_MISC_PERR_ENA2_TSCHQ_MISC (1L<<3)
+#define BNX2_MISC_PERR_ENA2_TBDRQ_MISC (1L<<4)
+#define BNX2_MISC_PERR_ENA2_TXPQ_MISC (1L<<5)
+#define BNX2_MISC_PERR_ENA2_TDMAQ_MISC (1L<<6)
+#define BNX2_MISC_PERR_ENA2_TPATQ_MISC (1L<<7)
+#define BNX2_MISC_PERR_ENA2_TASQ_MISC (1L<<8)
+#define BNX2_MISC_PERR_ENA2_TGT_FIFO_PERR_EN_XI (1L<<0)
+#define BNX2_MISC_PERR_ENA2_UMP_TX_PERR_EN_XI (1L<<1)
+#define BNX2_MISC_PERR_ENA2_UMP_RX_PERR_EN_XI (1L<<2)
+#define BNX2_MISC_PERR_ENA2_MCP_ROM_PERR_EN_XI (1L<<3)
+#define BNX2_MISC_PERR_ENA2_MCP_SCPAD_PERR_EN_XI (1L<<4)
+#define BNX2_MISC_PERR_ENA2_HB_MEM_PERR_EN_XI (1L<<5)
+#define BNX2_MISC_PERR_ENA2_PCIE_REPLAY_PERR_EN_XI (1L<<6)
+
+#define BNX2_MISC_DEBUG_VECTOR_SEL 0x000008b0
+#define BNX2_MISC_DEBUG_VECTOR_SEL_0 (0xfffL<<0)
+#define BNX2_MISC_DEBUG_VECTOR_SEL_1 (0xfffL<<12)
+#define BNX2_MISC_DEBUG_VECTOR_SEL_1_XI (0xfffL<<15)
+
+#define BNX2_MISC_VREG_CONTROL 0x000008b4
+#define BNX2_MISC_VREG_CONTROL_1_2 (0xfL<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_XI (0xfL<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS14_XI (0L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS12_XI (1L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS10_XI (2L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS8_XI (3L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS6_XI (4L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS4_XI (5L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS2_XI (6L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_NOM_XI (7L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS2_XI (8L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS4_XI (9L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS6_XI (10L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS8_XI (11L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS10_XI (12L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS12_XI (13L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS14_XI (14L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS16_XI (15L<<0)
+#define BNX2_MISC_VREG_CONTROL_2_5 (0xfL<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS14 (0L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS12 (1L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS10 (2L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS8 (3L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS6 (4L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS4 (5L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS2 (6L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_NOM (7L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS2 (8L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS4 (9L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS6 (10L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS8 (11L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS10 (12L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS12 (13L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS14 (14L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS16 (15L<<4)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT (0xfL<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS14 (0L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS12 (1L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS10 (2L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS8 (3L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS6 (4L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS4 (5L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS2 (6L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_NOM (7L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS2 (8L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS4 (9L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS6 (10L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS8 (11L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS10 (12L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS12 (13L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS14 (14L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS16 (15L<<8)
+
+#define BNX2_MISC_FINAL_CLK_CTL_VAL 0x000008b8
+#define BNX2_MISC_FINAL_CLK_CTL_VAL_MISC_FINAL_CLK_CTL_VAL (0x3ffffffL<<6)
+
+#define BNX2_MISC_GP_HW_CTL0 0x000008bc
+#define BNX2_MISC_GP_HW_CTL0_TX_DRIVE (1L<<0)
+#define BNX2_MISC_GP_HW_CTL0_RMII_MODE (1L<<1)
+#define BNX2_MISC_GP_HW_CTL0_RMII_CRSDV_SEL (1L<<2)
+#define BNX2_MISC_GP_HW_CTL0_RVMII_MODE (1L<<3)
+#define BNX2_MISC_GP_HW_CTL0_FLASH_SAMP_SCLK_NEGEDGE_TE (1L<<4)
+#define BNX2_MISC_GP_HW_CTL0_HIDDEN_REVISION_ID_TE (1L<<5)
+#define BNX2_MISC_GP_HW_CTL0_HC_CNTL_TMOUT_CTR_RST_TE (1L<<6)
+#define BNX2_MISC_GP_HW_CTL0_RESERVED1_XI (0x7L<<4)
+#define BNX2_MISC_GP_HW_CTL0_ENA_CORE_RST_ON_MAIN_PWR_GOING_AWAY (1L<<7)
+#define BNX2_MISC_GP_HW_CTL0_ENA_SEL_VAUX_B_IN_L2_TE (1L<<8)
+#define BNX2_MISC_GP_HW_CTL0_GRC_BNK_FREE_FIX_TE (1L<<9)
+#define BNX2_MISC_GP_HW_CTL0_LED_ACT_SEL_TE (1L<<10)
+#define BNX2_MISC_GP_HW_CTL0_RESERVED2_XI (0x7L<<8)
+#define BNX2_MISC_GP_HW_CTL0_UP1_DEF0 (1L<<11)
+#define BNX2_MISC_GP_HW_CTL0_FIBER_MODE_DIS_DEF (1L<<12)
+#define BNX2_MISC_GP_HW_CTL0_FORCE2500_DEF (1L<<13)
+#define BNX2_MISC_GP_HW_CTL0_AUTODETECT_DIS_DEF (1L<<14)
+#define BNX2_MISC_GP_HW_CTL0_PARALLEL_DETECT_DEF (1L<<15)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI (0xfL<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_3MA (0L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_2P5MA (1L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_2P0MA (3L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_1P5MA (5L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_1P0MA (7L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_PWRDN (15L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PRE2DIS (1L<<20)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PRE1DIS (1L<<21)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT (0x3L<<22)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_M6P (0L<<22)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_M0P (1L<<22)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_P0P (2L<<22)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_P6P (3L<<22)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT (0x3L<<24)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_M6P (0L<<24)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_M0P (1L<<24)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_P0P (2L<<24)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_P6P (3L<<24)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ (0x3L<<26)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_240UA (0L<<26)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_160UA (1L<<26)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_400UA (2L<<26)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_320UA (3L<<26)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ (0x3L<<28)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_240UA (0L<<28)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_160UA (1L<<28)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_400UA (2L<<28)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_320UA (3L<<28)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ (0x3L<<30)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P57 (0L<<30)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P45 (1L<<30)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P62 (2L<<30)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P66 (3L<<30)
+
+#define BNX2_MISC_GP_HW_CTL1 0x000008c0
+#define BNX2_MISC_GP_HW_CTL1_1_ATTN_BTN_PRSNT_TE (1L<<0)
+#define BNX2_MISC_GP_HW_CTL1_1_ATTN_IND_PRSNT_TE (1L<<1)
+#define BNX2_MISC_GP_HW_CTL1_1_PWR_IND_PRSNT_TE (1L<<2)
+#define BNX2_MISC_GP_HW_CTL1_0_PCIE_LOOPBACK_TE (1L<<3)
+#define BNX2_MISC_GP_HW_CTL1_RESERVED_SOFT_XI (0xffffL<<0)
+#define BNX2_MISC_GP_HW_CTL1_RESERVED_HARD_XI (0xffffL<<16)
+
+#define BNX2_MISC_NEW_HW_CTL 0x000008c4
+#define BNX2_MISC_NEW_HW_CTL_MAIN_POR_BYPASS (1L<<0)
+#define BNX2_MISC_NEW_HW_CTL_RINGOSC_ENABLE (1L<<1)
+#define BNX2_MISC_NEW_HW_CTL_RINGOSC_SEL0 (1L<<2)
+#define BNX2_MISC_NEW_HW_CTL_RINGOSC_SEL1 (1L<<3)
+#define BNX2_MISC_NEW_HW_CTL_RESERVED_SHARED (0xfffL<<4)
+#define BNX2_MISC_NEW_HW_CTL_RESERVED_SPLIT (0xffffL<<16)
+
+#define BNX2_MISC_NEW_CORE_CTL 0x000008c8
+#define BNX2_MISC_NEW_CORE_CTL_LINK_HOLDOFF_SUCCESS (1L<<0)
+#define BNX2_MISC_NEW_CORE_CTL_LINK_HOLDOFF_REQ (1L<<1)
+#define BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE (1L<<16)
+#define BNX2_MISC_NEW_CORE_CTL_RESERVED_CMN (0x3fffL<<2)
+#define BNX2_MISC_NEW_CORE_CTL_RESERVED_TC (0xffffL<<16)
+
+#define BNX2_MISC_ECO_HW_CTL 0x000008cc
+#define BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN (1L<<0)
+#define BNX2_MISC_ECO_HW_CTL_RESERVED_SOFT (0x7fffL<<1)
+#define BNX2_MISC_ECO_HW_CTL_RESERVED_HARD (0xffffL<<16)
+
+#define BNX2_MISC_ECO_CORE_CTL 0x000008d0
+#define BNX2_MISC_ECO_CORE_CTL_RESERVED_SOFT (0xffffL<<0)
+#define BNX2_MISC_ECO_CORE_CTL_RESERVED_HARD (0xffffL<<16)
+
+#define BNX2_MISC_PPIO 0x000008d4
+#define BNX2_MISC_PPIO_VALUE (0xfL<<0)
+#define BNX2_MISC_PPIO_SET (0xfL<<8)
+#define BNX2_MISC_PPIO_CLR (0xfL<<16)
+#define BNX2_MISC_PPIO_FLOAT (0xfL<<24)
+
+#define BNX2_MISC_PPIO_INT 0x000008d8
+#define BNX2_MISC_PPIO_INT_INT_STATE (0xfL<<0)
+#define BNX2_MISC_PPIO_INT_OLD_VALUE (0xfL<<8)
+#define BNX2_MISC_PPIO_INT_OLD_SET (0xfL<<16)
+#define BNX2_MISC_PPIO_INT_OLD_CLR (0xfL<<24)
+
+#define BNX2_MISC_RESET_NUMS 0x000008dc
+#define BNX2_MISC_RESET_NUMS_NUM_HARD_RESETS (0x7L<<0)
+#define BNX2_MISC_RESET_NUMS_NUM_PCIE_RESETS (0x7L<<4)
+#define BNX2_MISC_RESET_NUMS_NUM_PERSTB_RESETS (0x7L<<8)
+#define BNX2_MISC_RESET_NUMS_NUM_CMN_RESETS (0x7L<<12)
+#define BNX2_MISC_RESET_NUMS_NUM_PORT_RESETS (0x7L<<16)
+
+#define BNX2_MISC_CS16_ERR 0x000008e0
+#define BNX2_MISC_CS16_ERR_ENA_PCI (1L<<0)
+#define BNX2_MISC_CS16_ERR_ENA_RDMA (1L<<1)
+#define BNX2_MISC_CS16_ERR_ENA_TDMA (1L<<2)
+#define BNX2_MISC_CS16_ERR_ENA_EMAC (1L<<3)
+#define BNX2_MISC_CS16_ERR_ENA_CTX (1L<<4)
+#define BNX2_MISC_CS16_ERR_ENA_TBDR (1L<<5)
+#define BNX2_MISC_CS16_ERR_ENA_RBDC (1L<<6)
+#define BNX2_MISC_CS16_ERR_ENA_COM (1L<<7)
+#define BNX2_MISC_CS16_ERR_ENA_CP (1L<<8)
+#define BNX2_MISC_CS16_ERR_STA_PCI (1L<<16)
+#define BNX2_MISC_CS16_ERR_STA_RDMA (1L<<17)
+#define BNX2_MISC_CS16_ERR_STA_TDMA (1L<<18)
+#define BNX2_MISC_CS16_ERR_STA_EMAC (1L<<19)
+#define BNX2_MISC_CS16_ERR_STA_CTX (1L<<20)
+#define BNX2_MISC_CS16_ERR_STA_TBDR (1L<<21)
+#define BNX2_MISC_CS16_ERR_STA_RBDC (1L<<22)
+#define BNX2_MISC_CS16_ERR_STA_COM (1L<<23)
+#define BNX2_MISC_CS16_ERR_STA_CP (1L<<24)
+
+#define BNX2_MISC_SPIO_EVENT 0x000008e4
+#define BNX2_MISC_SPIO_EVENT_ENABLE (0xffL<<0)
+
+#define BNX2_MISC_PPIO_EVENT 0x000008e8
+#define BNX2_MISC_PPIO_EVENT_ENABLE (0xfL<<0)
+
+#define BNX2_MISC_DUAL_MEDIA_CTRL 0x000008ec
+#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID (0xffL<<0)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_X (0L<<0)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C (3L<<0)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S (12L<<0)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP (0x7L<<8)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PORT_SWAP_PIN (1L<<11)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES1_SIGDET (1L<<12)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES0_SIGDET (1L<<13)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY1_SIGDET (1L<<14)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY0_SIGDET (1L<<15)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_LCPLL_RST (1L<<16)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES1_RST (1L<<17)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES0_RST (1L<<18)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY1_RST (1L<<19)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY0_RST (1L<<20)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL (0x7L<<21)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PORT_SWAP (1L<<24)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE (1L<<25)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ (0xfL<<26)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_SER1_IDDQ (1L<<26)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_SER0_IDDQ (2L<<26)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_PHY1_IDDQ (4L<<26)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_PHY0_IDDQ (8L<<26)
+
+#define BNX2_MISC_OTP_CMD1 0x000008f0
+#define BNX2_MISC_OTP_CMD1_FMODE (0x7L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_IDLE (0L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_WRITE (1L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_INIT (2L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_SET (3L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_RST (4L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_VERIFY (5L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_RESERVED0 (6L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_RESERVED1 (7L<<0)
+#define BNX2_MISC_OTP_CMD1_USEPINS (1L<<8)
+#define BNX2_MISC_OTP_CMD1_PROGSEL (1L<<9)
+#define BNX2_MISC_OTP_CMD1_PROGSTART (1L<<10)
+#define BNX2_MISC_OTP_CMD1_PCOUNT (0x7L<<16)
+#define BNX2_MISC_OTP_CMD1_PBYP (1L<<19)
+#define BNX2_MISC_OTP_CMD1_VSEL (0xfL<<20)
+#define BNX2_MISC_OTP_CMD1_TM (0x7L<<27)
+#define BNX2_MISC_OTP_CMD1_SADBYP (1L<<30)
+#define BNX2_MISC_OTP_CMD1_DEBUG (1L<<31)
+
+#define BNX2_MISC_OTP_CMD2 0x000008f4
+#define BNX2_MISC_OTP_CMD2_OTP_ROM_ADDR (0x3ffL<<0)
+#define BNX2_MISC_OTP_CMD2_DOSEL (0x7fL<<16)
+#define BNX2_MISC_OTP_CMD2_DOSEL_0 (0L<<16)
+#define BNX2_MISC_OTP_CMD2_DOSEL_1 (1L<<16)
+#define BNX2_MISC_OTP_CMD2_DOSEL_127 (127L<<16)
+
+#define BNX2_MISC_OTP_STATUS 0x000008f8
+#define BNX2_MISC_OTP_STATUS_DATA (0xffL<<0)
+#define BNX2_MISC_OTP_STATUS_VALID (1L<<8)
+#define BNX2_MISC_OTP_STATUS_BUSY (1L<<9)
+#define BNX2_MISC_OTP_STATUS_BUSYSM (1L<<10)
+#define BNX2_MISC_OTP_STATUS_DONE (1L<<11)
+
+#define BNX2_MISC_OTP_SHIFT1_CMD 0x000008fc
+#define BNX2_MISC_OTP_SHIFT1_CMD_RESET_MODE_N (1L<<0)
+#define BNX2_MISC_OTP_SHIFT1_CMD_SHIFT_DONE (1L<<1)
+#define BNX2_MISC_OTP_SHIFT1_CMD_SHIFT_START (1L<<2)
+#define BNX2_MISC_OTP_SHIFT1_CMD_LOAD_DATA (1L<<3)
+#define BNX2_MISC_OTP_SHIFT1_CMD_SHIFT_SELECT (0x1fL<<8)
+
+#define BNX2_MISC_OTP_SHIFT1_DATA 0x00000900
+#define BNX2_MISC_OTP_SHIFT2_CMD 0x00000904
+#define BNX2_MISC_OTP_SHIFT2_CMD_RESET_MODE_N (1L<<0)
+#define BNX2_MISC_OTP_SHIFT2_CMD_SHIFT_DONE (1L<<1)
+#define BNX2_MISC_OTP_SHIFT2_CMD_SHIFT_START (1L<<2)
+#define BNX2_MISC_OTP_SHIFT2_CMD_LOAD_DATA (1L<<3)
+#define BNX2_MISC_OTP_SHIFT2_CMD_SHIFT_SELECT (0x1fL<<8)
+
+#define BNX2_MISC_OTP_SHIFT2_DATA 0x00000908
+#define BNX2_MISC_BIST_CS0 0x0000090c
+#define BNX2_MISC_BIST_CS0_MBIST_EN (1L<<0)
+#define BNX2_MISC_BIST_CS0_BIST_SETUP (0x3L<<1)
+#define BNX2_MISC_BIST_CS0_MBIST_ASYNC_RESET (1L<<3)
+#define BNX2_MISC_BIST_CS0_MBIST_DONE (1L<<8)
+#define BNX2_MISC_BIST_CS0_MBIST_GO (1L<<9)
+#define BNX2_MISC_BIST_CS0_BIST_OVERRIDE (1L<<31)
+
+#define BNX2_MISC_BIST_MEMSTATUS0 0x00000910
+#define BNX2_MISC_BIST_CS1 0x00000914
+#define BNX2_MISC_BIST_CS1_MBIST_EN (1L<<0)
+#define BNX2_MISC_BIST_CS1_BIST_SETUP (0x3L<<1)
+#define BNX2_MISC_BIST_CS1_MBIST_ASYNC_RESET (1L<<3)
+#define BNX2_MISC_BIST_CS1_MBIST_DONE (1L<<8)
+#define BNX2_MISC_BIST_CS1_MBIST_GO (1L<<9)
+
+#define BNX2_MISC_BIST_MEMSTATUS1 0x00000918
+#define BNX2_MISC_BIST_CS2 0x0000091c
+#define BNX2_MISC_BIST_CS2_MBIST_EN (1L<<0)
+#define BNX2_MISC_BIST_CS2_BIST_SETUP (0x3L<<1)
+#define BNX2_MISC_BIST_CS2_MBIST_ASYNC_RESET (1L<<3)
+#define BNX2_MISC_BIST_CS2_MBIST_DONE (1L<<8)
+#define BNX2_MISC_BIST_CS2_MBIST_GO (1L<<9)
+
+#define BNX2_MISC_BIST_MEMSTATUS2 0x00000920
+#define BNX2_MISC_BIST_CS3 0x00000924
+#define BNX2_MISC_BIST_CS3_MBIST_EN (1L<<0)
+#define BNX2_MISC_BIST_CS3_BIST_SETUP (0x3L<<1)
+#define BNX2_MISC_BIST_CS3_MBIST_ASYNC_RESET (1L<<3)
+#define BNX2_MISC_BIST_CS3_MBIST_DONE (1L<<8)
+#define BNX2_MISC_BIST_CS3_MBIST_GO (1L<<9)
+
+#define BNX2_MISC_BIST_MEMSTATUS3 0x00000928
+#define BNX2_MISC_BIST_CS4 0x0000092c
+#define BNX2_MISC_BIST_CS4_MBIST_EN (1L<<0)
+#define BNX2_MISC_BIST_CS4_BIST_SETUP (0x3L<<1)
+#define BNX2_MISC_BIST_CS4_MBIST_ASYNC_RESET (1L<<3)
+#define BNX2_MISC_BIST_CS4_MBIST_DONE (1L<<8)
+#define BNX2_MISC_BIST_CS4_MBIST_GO (1L<<9)
+
+#define BNX2_MISC_BIST_MEMSTATUS4 0x00000930
+#define BNX2_MISC_BIST_CS5 0x00000934
+#define BNX2_MISC_BIST_CS5_MBIST_EN (1L<<0)
+#define BNX2_MISC_BIST_CS5_BIST_SETUP (0x3L<<1)
+#define BNX2_MISC_BIST_CS5_MBIST_ASYNC_RESET (1L<<3)
+#define BNX2_MISC_BIST_CS5_MBIST_DONE (1L<<8)
+#define BNX2_MISC_BIST_CS5_MBIST_GO (1L<<9)
+
+#define BNX2_MISC_BIST_MEMSTATUS5 0x00000938
+#define BNX2_MISC_MEM_TM0 0x0000093c
+#define BNX2_MISC_MEM_TM0_PCIE_REPLAY_TM (0xfL<<0)
+#define BNX2_MISC_MEM_TM0_MCP_SCPAD (0xfL<<8)
+#define BNX2_MISC_MEM_TM0_UMP_TM (0xffL<<16)
+#define BNX2_MISC_MEM_TM0_HB_MEM_TM (0xfL<<24)
+
+#define BNX2_MISC_USPLL_CTRL 0x00000940
+#define BNX2_MISC_USPLL_CTRL_PH_DET_DIS (1L<<0)
+#define BNX2_MISC_USPLL_CTRL_FREQ_DET_DIS (1L<<1)
+#define BNX2_MISC_USPLL_CTRL_LCPX (0x3fL<<2)
+#define BNX2_MISC_USPLL_CTRL_RX (0x3L<<8)
+#define BNX2_MISC_USPLL_CTRL_VC_EN (1L<<10)
+#define BNX2_MISC_USPLL_CTRL_VCO_MG (0x3L<<11)
+#define BNX2_MISC_USPLL_CTRL_KVCO_XF (0x7L<<13)
+#define BNX2_MISC_USPLL_CTRL_KVCO_XS (0x7L<<16)
+#define BNX2_MISC_USPLL_CTRL_TESTD_EN (1L<<19)
+#define BNX2_MISC_USPLL_CTRL_TESTD_SEL (0x7L<<20)
+#define BNX2_MISC_USPLL_CTRL_TESTA_EN (1L<<23)
+#define BNX2_MISC_USPLL_CTRL_TESTA_SEL (0x3L<<24)
+#define BNX2_MISC_USPLL_CTRL_ATTEN_FREF (1L<<26)
+#define BNX2_MISC_USPLL_CTRL_DIGITAL_RST (1L<<27)
+#define BNX2_MISC_USPLL_CTRL_ANALOG_RST (1L<<28)
+#define BNX2_MISC_USPLL_CTRL_LOCK (1L<<29)
+
+#define BNX2_MISC_PERR_STATUS0 0x00000944
+#define BNX2_MISC_PERR_STATUS0_COM_DMAE_PERR (1L<<0)
+#define BNX2_MISC_PERR_STATUS0_CP_DMAE_PERR (1L<<1)
+#define BNX2_MISC_PERR_STATUS0_RPM_ACPIBEMEM_PERR (1L<<2)
+#define BNX2_MISC_PERR_STATUS0_CTX_USAGE_CNT_PERR (1L<<3)
+#define BNX2_MISC_PERR_STATUS0_CTX_PGTBL_PERR (1L<<4)
+#define BNX2_MISC_PERR_STATUS0_CTX_CACHE_PERR (1L<<5)
+#define BNX2_MISC_PERR_STATUS0_CTX_MIRROR_PERR (1L<<6)
+#define BNX2_MISC_PERR_STATUS0_COM_CTXC_PERR (1L<<7)
+#define BNX2_MISC_PERR_STATUS0_COM_SCPAD_PERR (1L<<8)
+#define BNX2_MISC_PERR_STATUS0_CP_CTXC_PERR (1L<<9)
+#define BNX2_MISC_PERR_STATUS0_CP_SCPAD_PERR (1L<<10)
+#define BNX2_MISC_PERR_STATUS0_RXP_RBUFC_PERR (1L<<11)
+#define BNX2_MISC_PERR_STATUS0_RXP_CTXC_PERR (1L<<12)
+#define BNX2_MISC_PERR_STATUS0_RXP_SCPAD_PERR (1L<<13)
+#define BNX2_MISC_PERR_STATUS0_TPAT_SCPAD_PERR (1L<<14)
+#define BNX2_MISC_PERR_STATUS0_TXP_CTXC_PERR (1L<<15)
+#define BNX2_MISC_PERR_STATUS0_TXP_SCPAD_PERR (1L<<16)
+#define BNX2_MISC_PERR_STATUS0_CS_TMEM_PERR (1L<<17)
+#define BNX2_MISC_PERR_STATUS0_MQ_CTX_PERR (1L<<18)
+#define BNX2_MISC_PERR_STATUS0_RPM_DFIFOMEM_PERR (1L<<19)
+#define BNX2_MISC_PERR_STATUS0_RPC_DFIFOMEM_PERR (1L<<20)
+#define BNX2_MISC_PERR_STATUS0_RBUF_PTRMEM_PERR (1L<<21)
+#define BNX2_MISC_PERR_STATUS0_RBUF_DATAMEM_PERR (1L<<22)
+#define BNX2_MISC_PERR_STATUS0_RV2P_P2IRAM_PERR (1L<<23)
+#define BNX2_MISC_PERR_STATUS0_RV2P_P1IRAM_PERR (1L<<24)
+#define BNX2_MISC_PERR_STATUS0_RV2P_CB1REGS_PERR (1L<<25)
+#define BNX2_MISC_PERR_STATUS0_RV2P_CB0REGS_PERR (1L<<26)
+#define BNX2_MISC_PERR_STATUS0_TPBUF_PERR (1L<<27)
+#define BNX2_MISC_PERR_STATUS0_THBUF_PERR (1L<<28)
+#define BNX2_MISC_PERR_STATUS0_TDMA_PERR (1L<<29)
+#define BNX2_MISC_PERR_STATUS0_TBDC_PERR (1L<<30)
+#define BNX2_MISC_PERR_STATUS0_TSCH_LR_PERR (1L<<31)
+
+#define BNX2_MISC_PERR_STATUS1 0x00000948
+#define BNX2_MISC_PERR_STATUS1_RBDC_PERR (1L<<0)
+#define BNX2_MISC_PERR_STATUS1_RDMA_DFIFO_PERR (1L<<2)
+#define BNX2_MISC_PERR_STATUS1_HC_STATS_PERR (1L<<3)
+#define BNX2_MISC_PERR_STATUS1_HC_MSIX_PERR (1L<<4)
+#define BNX2_MISC_PERR_STATUS1_HC_PRODUCSTB_PERR (1L<<5)
+#define BNX2_MISC_PERR_STATUS1_HC_CONSUMSTB_PERR (1L<<6)
+#define BNX2_MISC_PERR_STATUS1_TPATQ_PERR (1L<<7)
+#define BNX2_MISC_PERR_STATUS1_MCPQ_PERR (1L<<8)
+#define BNX2_MISC_PERR_STATUS1_TDMAQ_PERR (1L<<9)
+#define BNX2_MISC_PERR_STATUS1_TXPQ_PERR (1L<<10)
+#define BNX2_MISC_PERR_STATUS1_COMTQ_PERR (1L<<11)
+#define BNX2_MISC_PERR_STATUS1_COMQ_PERR (1L<<12)
+#define BNX2_MISC_PERR_STATUS1_RLUPQ_PERR (1L<<13)
+#define BNX2_MISC_PERR_STATUS1_RXPQ_PERR (1L<<14)
+#define BNX2_MISC_PERR_STATUS1_RV2PPQ_PERR (1L<<15)
+#define BNX2_MISC_PERR_STATUS1_RDMAQ_PERR (1L<<16)
+#define BNX2_MISC_PERR_STATUS1_TASQ_PERR (1L<<17)
+#define BNX2_MISC_PERR_STATUS1_TBDRQ_PERR (1L<<18)
+#define BNX2_MISC_PERR_STATUS1_TSCHQ_PERR (1L<<19)
+#define BNX2_MISC_PERR_STATUS1_COMXQ_PERR (1L<<20)
+#define BNX2_MISC_PERR_STATUS1_RXPCQ_PERR (1L<<21)
+#define BNX2_MISC_PERR_STATUS1_RV2PTQ_PERR (1L<<22)
+#define BNX2_MISC_PERR_STATUS1_RV2PMQ_PERR (1L<<23)
+#define BNX2_MISC_PERR_STATUS1_CPQ_PERR (1L<<24)
+#define BNX2_MISC_PERR_STATUS1_CSQ_PERR (1L<<25)
+#define BNX2_MISC_PERR_STATUS1_RLUP_CID_PERR (1L<<26)
+#define BNX2_MISC_PERR_STATUS1_RV2PCS_TMEM_PERR (1L<<27)
+#define BNX2_MISC_PERR_STATUS1_RV2PCSQ_PERR (1L<<28)
+#define BNX2_MISC_PERR_STATUS1_MQ_IDX_PERR (1L<<29)
+
+#define BNX2_MISC_PERR_STATUS2 0x0000094c
+#define BNX2_MISC_PERR_STATUS2_TGT_FIFO_PERR (1L<<0)
+#define BNX2_MISC_PERR_STATUS2_UMP_TX_PERR (1L<<1)
+#define BNX2_MISC_PERR_STATUS2_UMP_RX_PERR (1L<<2)
+#define BNX2_MISC_PERR_STATUS2_MCP_ROM_PERR (1L<<3)
+#define BNX2_MISC_PERR_STATUS2_MCP_SCPAD_PERR (1L<<4)
+#define BNX2_MISC_PERR_STATUS2_HB_MEM_PERR (1L<<5)
+#define BNX2_MISC_PERR_STATUS2_PCIE_REPLAY_PERR (1L<<6)
+
+#define BNX2_MISC_LCPLL_CTRL0 0x00000950
+#define BNX2_MISC_LCPLL_CTRL0_OAC (0x7L<<0)
+#define BNX2_MISC_LCPLL_CTRL0_OAC_NEGTWENTY (0L<<0)
+#define BNX2_MISC_LCPLL_CTRL0_OAC_ZERO (1L<<0)
+#define BNX2_MISC_LCPLL_CTRL0_OAC_TWENTY (3L<<0)
+#define BNX2_MISC_LCPLL_CTRL0_OAC_FORTY (7L<<0)
+#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL (0x7L<<3)
+#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_360 (0L<<3)
+#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_480 (1L<<3)
+#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_600 (3L<<3)
+#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_720 (7L<<3)
+#define BNX2_MISC_LCPLL_CTRL0_BIAS_CTRL (0x3L<<6)
+#define BNX2_MISC_LCPLL_CTRL0_PLL_OBSERVE (0x7L<<8)
+#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL (0x3L<<11)
+#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL_0 (0L<<11)
+#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL_1 (1L<<11)
+#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL_2 (2L<<11)
+#define BNX2_MISC_LCPLL_CTRL0_PLLSEQSTART (1L<<13)
+#define BNX2_MISC_LCPLL_CTRL0_RESERVED (1L<<14)
+#define BNX2_MISC_LCPLL_CTRL0_CAPRETRY_EN (1L<<15)
+#define BNX2_MISC_LCPLL_CTRL0_FREQMONITOR_EN (1L<<16)
+#define BNX2_MISC_LCPLL_CTRL0_FREQDETRESTART_EN (1L<<17)
+#define BNX2_MISC_LCPLL_CTRL0_FREQDETRETRY_EN (1L<<18)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCEFDONE_EN (1L<<19)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCEFDONE (1L<<20)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCEFPASS (1L<<21)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPDONE_EN (1L<<22)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPDONE (1L<<23)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPPASS_EN (1L<<24)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPPASS (1L<<25)
+#define BNX2_MISC_LCPLL_CTRL0_CAPRESTART (1L<<26)
+#define BNX2_MISC_LCPLL_CTRL0_CAPSELECTM_EN (1L<<27)
+
+#define BNX2_MISC_LCPLL_CTRL1 0x00000954
+#define BNX2_MISC_LCPLL_CTRL1_CAPSELECTM (0x1fL<<0)
+#define BNX2_MISC_LCPLL_CTRL1_CAPFORCESLOWDOWN_EN (1L<<5)
+#define BNX2_MISC_LCPLL_CTRL1_CAPFORCESLOWDOWN (1L<<6)
+#define BNX2_MISC_LCPLL_CTRL1_SLOWDN_XOR (1L<<7)
+
+#define BNX2_MISC_LCPLL_STATUS 0x00000958
+#define BNX2_MISC_LCPLL_STATUS_FREQDONE_SM (1L<<0)
+#define BNX2_MISC_LCPLL_STATUS_FREQPASS_SM (1L<<1)
+#define BNX2_MISC_LCPLL_STATUS_PLLSEQDONE (1L<<2)
+#define BNX2_MISC_LCPLL_STATUS_PLLSEQPASS (1L<<3)
+#define BNX2_MISC_LCPLL_STATUS_PLLSTATE (0x7L<<4)
+#define BNX2_MISC_LCPLL_STATUS_CAPSTATE (0x7L<<7)
+#define BNX2_MISC_LCPLL_STATUS_CAPSELECT (0x1fL<<10)
+#define BNX2_MISC_LCPLL_STATUS_SLOWDN_INDICATOR (1L<<15)
+#define BNX2_MISC_LCPLL_STATUS_SLOWDN_INDICATOR_0 (0L<<15)
+#define BNX2_MISC_LCPLL_STATUS_SLOWDN_INDICATOR_1 (1L<<15)
+
+#define BNX2_MISC_OSCFUNDS_CTRL 0x0000095c
+#define BNX2_MISC_OSCFUNDS_CTRL_FREQ_MON (1L<<5)
+#define BNX2_MISC_OSCFUNDS_CTRL_FREQ_MON_OFF (0L<<5)
+#define BNX2_MISC_OSCFUNDS_CTRL_FREQ_MON_ON (1L<<5)
+#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM (0x3L<<6)
+#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_0 (0L<<6)
+#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_1 (1L<<6)
+#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_2 (2L<<6)
+#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_3 (3L<<6)
+#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ (0x3L<<8)
+#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_0 (0L<<8)
+#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_1 (1L<<8)
+#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_2 (2L<<8)
+#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_3 (3L<<8)
+#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ (0x3L<<10)
+#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_0 (0L<<10)
+#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_1 (1L<<10)
+#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_2 (2L<<10)
+#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_3 (3L<<10)
+
+
+/*
+ * nvm_reg definition
+ * offset: 0x6400
+ */
+#define BNX2_NVM_COMMAND 0x00006400
+#define BNX2_NVM_COMMAND_RST (1L<<0)
+#define BNX2_NVM_COMMAND_DONE (1L<<3)
+#define BNX2_NVM_COMMAND_DOIT (1L<<4)
+#define BNX2_NVM_COMMAND_WR (1L<<5)
+#define BNX2_NVM_COMMAND_ERASE (1L<<6)
+#define BNX2_NVM_COMMAND_FIRST (1L<<7)
+#define BNX2_NVM_COMMAND_LAST (1L<<8)
+#define BNX2_NVM_COMMAND_WREN (1L<<16)
+#define BNX2_NVM_COMMAND_WRDI (1L<<17)
+#define BNX2_NVM_COMMAND_EWSR (1L<<18)
+#define BNX2_NVM_COMMAND_WRSR (1L<<19)
+#define BNX2_NVM_COMMAND_RD_ID (1L<<20)
+#define BNX2_NVM_COMMAND_RD_STATUS (1L<<21)
+#define BNX2_NVM_COMMAND_MODE_256 (1L<<22)
+
+#define BNX2_NVM_STATUS 0x00006404
+#define BNX2_NVM_STATUS_PI_FSM_STATE (0xfL<<0)
+#define BNX2_NVM_STATUS_EE_FSM_STATE (0xfL<<4)
+#define BNX2_NVM_STATUS_EQ_FSM_STATE (0xfL<<8)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_XI (0x1fL<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_IDLE_XI (0L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD0_XI (1L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD1_XI (2L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD_FINISH0_XI (3L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD_FINISH1_XI (4L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_ADDR0_XI (5L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WRITE_DATA0_XI (6L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WRITE_DATA1_XI (7L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WRITE_DATA2_XI (8L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_DATA0_XI (9L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_DATA1_XI (10L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_DATA2_XI (11L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID0_XI (12L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID1_XI (13L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID2_XI (14L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID3_XI (15L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID4_XI (16L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CHECK_BUSY0_XI (17L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_ST_WREN_XI (18L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WAIT_XI (19L<<0)
+
+#define BNX2_NVM_WRITE 0x00006408
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE (0xffffffffL<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_BIT_BANG (0L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_EECLK (1L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_EEDATA (2L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SCLK (4L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_CS_B (8L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SO (16L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SI (32L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SI_XI (1L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SO_XI (2L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_CS_B_XI (4L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SCLK_XI (8L<<0)
+
+#define BNX2_NVM_ADDR 0x0000640c
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_BIT_BANG (0L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_EECLK (1L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_EEDATA (2L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SCLK (4L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_CS_B (8L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SO (16L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SI (32L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SI_XI (1L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SO_XI (2L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_CS_B_XI (4L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SCLK_XI (8L<<0)
+
+#define BNX2_NVM_READ 0x00006410
+#define BNX2_NVM_READ_NVM_READ_VALUE (0xffffffffL<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_BIT_BANG (0L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_EECLK (1L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_EEDATA (2L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_SCLK (4L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_CS_B (8L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_SO (16L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_SI (32L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_SI_XI (1L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_SO_XI (2L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_CS_B_XI (4L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_SCLK_XI (8L<<0)
+
+#define BNX2_NVM_CFG1 0x00006414
+#define BNX2_NVM_CFG1_FLASH_MODE (1L<<0)
+#define BNX2_NVM_CFG1_BUFFER_MODE (1L<<1)
+#define BNX2_NVM_CFG1_PASS_MODE (1L<<2)
+#define BNX2_NVM_CFG1_BITBANG_MODE (1L<<3)
+#define BNX2_NVM_CFG1_STATUS_BIT (0x7L<<4)
+#define BNX2_NVM_CFG1_STATUS_BIT_FLASH_RDY (0L<<4)
+#define BNX2_NVM_CFG1_STATUS_BIT_BUFFER_RDY (7L<<4)
+#define BNX2_NVM_CFG1_SPI_CLK_DIV (0xfL<<7)
+#define BNX2_NVM_CFG1_SEE_CLK_DIV (0x7ffL<<11)
+#define BNX2_NVM_CFG1_STRAP_CONTROL_0 (1L<<23)
+#define BNX2_NVM_CFG1_PROTECT_MODE (1L<<24)
+#define BNX2_NVM_CFG1_FLASH_SIZE (1L<<25)
+#define BNX2_NVM_CFG1_FW_USTRAP_1 (1L<<26)
+#define BNX2_NVM_CFG1_FW_USTRAP_0 (1L<<27)
+#define BNX2_NVM_CFG1_FW_USTRAP_2 (1L<<28)
+#define BNX2_NVM_CFG1_FW_USTRAP_3 (1L<<29)
+#define BNX2_NVM_CFG1_FW_FLASH_TYPE_EN (1L<<30)
+#define BNX2_NVM_CFG1_COMPAT_BYPASSS (1L<<31)
+
+#define BNX2_NVM_CFG2 0x00006418
+#define BNX2_NVM_CFG2_ERASE_CMD (0xffL<<0)
+#define BNX2_NVM_CFG2_DUMMY (0xffL<<8)
+#define BNX2_NVM_CFG2_STATUS_CMD (0xffL<<16)
+#define BNX2_NVM_CFG2_READ_ID (0xffL<<24)
+
+#define BNX2_NVM_CFG3 0x0000641c
+#define BNX2_NVM_CFG3_BUFFER_RD_CMD (0xffL<<0)
+#define BNX2_NVM_CFG3_WRITE_CMD (0xffL<<8)
+#define BNX2_NVM_CFG3_BUFFER_WRITE_CMD (0xffL<<16)
+#define BNX2_NVM_CFG3_READ_CMD (0xffL<<24)
+
+#define BNX2_NVM_SW_ARB 0x00006420
+#define BNX2_NVM_SW_ARB_ARB_REQ_SET0 (1L<<0)
+#define BNX2_NVM_SW_ARB_ARB_REQ_SET1 (1L<<1)
+#define BNX2_NVM_SW_ARB_ARB_REQ_SET2 (1L<<2)
+#define BNX2_NVM_SW_ARB_ARB_REQ_SET3 (1L<<3)
+#define BNX2_NVM_SW_ARB_ARB_REQ_CLR0 (1L<<4)
+#define BNX2_NVM_SW_ARB_ARB_REQ_CLR1 (1L<<5)
+#define BNX2_NVM_SW_ARB_ARB_REQ_CLR2 (1L<<6)
+#define BNX2_NVM_SW_ARB_ARB_REQ_CLR3 (1L<<7)
+#define BNX2_NVM_SW_ARB_ARB_ARB0 (1L<<8)
+#define BNX2_NVM_SW_ARB_ARB_ARB1 (1L<<9)
+#define BNX2_NVM_SW_ARB_ARB_ARB2 (1L<<10)
+#define BNX2_NVM_SW_ARB_ARB_ARB3 (1L<<11)
+#define BNX2_NVM_SW_ARB_REQ0 (1L<<12)
+#define BNX2_NVM_SW_ARB_REQ1 (1L<<13)
+#define BNX2_NVM_SW_ARB_REQ2 (1L<<14)
+#define BNX2_NVM_SW_ARB_REQ3 (1L<<15)
+
+#define BNX2_NVM_ACCESS_ENABLE 0x00006424
+#define BNX2_NVM_ACCESS_ENABLE_EN (1L<<0)
+#define BNX2_NVM_ACCESS_ENABLE_WR_EN (1L<<1)
+
+#define BNX2_NVM_WRITE1 0x00006428
+#define BNX2_NVM_WRITE1_WREN_CMD (0xffL<<0)
+#define BNX2_NVM_WRITE1_WRDI_CMD (0xffL<<8)
+#define BNX2_NVM_WRITE1_SR_DATA (0xffL<<16)
+
+#define BNX2_NVM_CFG4 0x0000642c
+#define BNX2_NVM_CFG4_FLASH_SIZE (0x7L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_1MBIT (0L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_2MBIT (1L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_4MBIT (2L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_8MBIT (3L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_16MBIT (4L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_32MBIT (5L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_64MBIT (6L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_128MBIT (7L<<0)
+#define BNX2_NVM_CFG4_FLASH_VENDOR (1L<<3)
+#define BNX2_NVM_CFG4_FLASH_VENDOR_ST (0L<<3)
+#define BNX2_NVM_CFG4_FLASH_VENDOR_ATMEL (1L<<3)
+#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC (0x3L<<4)
+#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT8 (0L<<4)
+#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT9 (1L<<4)
+#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT10 (2L<<4)
+#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT11 (3L<<4)
+#define BNX2_NVM_CFG4_STATUS_BIT_POLARITY (1L<<6)
+#define BNX2_NVM_CFG4_RESERVED (0x1ffffffL<<7)
+
+#define BNX2_NVM_RECONFIG 0x00006430
+#define BNX2_NVM_RECONFIG_ORIG_STRAP_VALUE (0xfL<<0)
+#define BNX2_NVM_RECONFIG_ORIG_STRAP_VALUE_ST (0L<<0)
+#define BNX2_NVM_RECONFIG_ORIG_STRAP_VALUE_ATMEL (1L<<0)
+#define BNX2_NVM_RECONFIG_RECONFIG_STRAP_VALUE (0xfL<<4)
+#define BNX2_NVM_RECONFIG_RESERVED (0x7fffffL<<8)
+#define BNX2_NVM_RECONFIG_RECONFIG_DONE (1L<<31)
+
+
+
+/*
+ * dma_reg definition
+ * offset: 0xc00
+ */
+#define BNX2_DMA_COMMAND 0x00000c00
+#define BNX2_DMA_COMMAND_ENABLE (1L<<0)
+
+#define BNX2_DMA_STATUS 0x00000c04
+#define BNX2_DMA_STATUS_PAR_ERROR_STATE (1L<<0)
+#define BNX2_DMA_STATUS_READ_TRANSFERS_STAT (1L<<16)
+#define BNX2_DMA_STATUS_READ_DELAY_PCI_CLKS_STAT (1L<<17)
+#define BNX2_DMA_STATUS_BIG_READ_TRANSFERS_STAT (1L<<18)
+#define BNX2_DMA_STATUS_BIG_READ_DELAY_PCI_CLKS_STAT (1L<<19)
+#define BNX2_DMA_STATUS_BIG_READ_RETRY_AFTER_DATA_STAT (1L<<20)
+#define BNX2_DMA_STATUS_WRITE_TRANSFERS_STAT (1L<<21)
+#define BNX2_DMA_STATUS_WRITE_DELAY_PCI_CLKS_STAT (1L<<22)
+#define BNX2_DMA_STATUS_BIG_WRITE_TRANSFERS_STAT (1L<<23)
+#define BNX2_DMA_STATUS_BIG_WRITE_DELAY_PCI_CLKS_STAT (1L<<24)
+#define BNX2_DMA_STATUS_BIG_WRITE_RETRY_AFTER_DATA_STAT (1L<<25)
+#define BNX2_DMA_STATUS_GLOBAL_ERR_XI (1L<<0)
+#define BNX2_DMA_STATUS_BME_XI (1L<<4)
+
+#define BNX2_DMA_CONFIG 0x00000c08
+#define BNX2_DMA_CONFIG_DATA_BYTE_SWAP (1L<<0)
+#define BNX2_DMA_CONFIG_DATA_WORD_SWAP (1L<<1)
+#define BNX2_DMA_CONFIG_CNTL_BYTE_SWAP (1L<<4)
+#define BNX2_DMA_CONFIG_CNTL_WORD_SWAP (1L<<5)
+#define BNX2_DMA_CONFIG_ONE_DMA (1L<<6)
+#define BNX2_DMA_CONFIG_CNTL_TWO_DMA (1L<<7)
+#define BNX2_DMA_CONFIG_CNTL_FPGA_MODE (1L<<8)
+#define BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA (1L<<10)
+#define BNX2_DMA_CONFIG_CNTL_PCI_COMP_DLY (1L<<11)
+#define BNX2_DMA_CONFIG_NO_RCHANS_IN_USE (0xfL<<12)
+#define BNX2_DMA_CONFIG_NO_WCHANS_IN_USE (0xfL<<16)
+#define BNX2_DMA_CONFIG_PCI_CLK_CMP_BITS (0x7L<<20)
+#define BNX2_DMA_CONFIG_PCI_FAST_CLK_CMP (1L<<23)
+#define BNX2_DMA_CONFIG_BIG_SIZE (0xfL<<24)
+#define BNX2_DMA_CONFIG_BIG_SIZE_NONE (0x0L<<24)
+#define BNX2_DMA_CONFIG_BIG_SIZE_64 (0x1L<<24)
+#define BNX2_DMA_CONFIG_BIG_SIZE_128 (0x2L<<24)
+#define BNX2_DMA_CONFIG_BIG_SIZE_256 (0x4L<<24)
+#define BNX2_DMA_CONFIG_BIG_SIZE_512 (0x8L<<24)
+#define BNX2_DMA_CONFIG_DAT_WBSWAP_MODE_XI (0x3L<<0)
+#define BNX2_DMA_CONFIG_CTL_WBSWAP_MODE_XI (0x3L<<4)
+#define BNX2_DMA_CONFIG_MAX_PL_XI (0x7L<<12)
+#define BNX2_DMA_CONFIG_MAX_PL_128B_XI (0L<<12)
+#define BNX2_DMA_CONFIG_MAX_PL_256B_XI (1L<<12)
+#define BNX2_DMA_CONFIG_MAX_PL_512B_XI (2L<<12)
+#define BNX2_DMA_CONFIG_MAX_PL_EN_XI (1L<<15)
+#define BNX2_DMA_CONFIG_MAX_RRS_XI (0x7L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_128B_XI (0L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_256B_XI (1L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_512B_XI (2L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_1024B_XI (3L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_2048B_XI (4L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_4096B_XI (5L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_EN_XI (1L<<19)
+#define BNX2_DMA_CONFIG_NO_64SWAP_EN_XI (1L<<31)
+
+#define BNX2_DMA_BLACKOUT 0x00000c0c
+#define BNX2_DMA_BLACKOUT_RD_RETRY_BLACKOUT (0xffL<<0)
+#define BNX2_DMA_BLACKOUT_2ND_RD_RETRY_BLACKOUT (0xffL<<8)
+#define BNX2_DMA_BLACKOUT_WR_RETRY_BLACKOUT (0xffL<<16)
+
+#define BNX2_DMA_READ_MASTER_SETTING_0 0x00000c10
+#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_NO_SNOOP (1L<<0)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_RELAX_ORDER (1L<<1)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_PRIORITY (1L<<2)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_TRAFFIC_CLASS (0x7L<<4)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_PARAM_EN (1L<<7)
+#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_NO_SNOOP (1L<<8)
+#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_RELAX_ORDER (1L<<9)
+#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_PRIORITY (1L<<10)
+#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_TRAFFIC_CLASS (0x7L<<12)
+#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_PARAM_EN (1L<<15)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_NO_SNOOP (1L<<16)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_RELAX_ORDER (1L<<17)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_PRIORITY (1L<<18)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_TRAFFIC_CLASS (0x7L<<20)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_PARAM_EN (1L<<23)
+#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_NO_SNOOP (1L<<24)
+#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_RELAX_ORDER (1L<<25)
+#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_PRIORITY (1L<<26)
+#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_TRAFFIC_CLASS (0x7L<<28)
+#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_PARAM_EN (1L<<31)
+
+#define BNX2_DMA_READ_MASTER_SETTING_1 0x00000c14
+#define BNX2_DMA_READ_MASTER_SETTING_1_COM_NO_SNOOP (1L<<0)
+#define BNX2_DMA_READ_MASTER_SETTING_1_COM_RELAX_ORDER (1L<<1)
+#define BNX2_DMA_READ_MASTER_SETTING_1_COM_PRIORITY (1L<<2)
+#define BNX2_DMA_READ_MASTER_SETTING_1_COM_TRAFFIC_CLASS (0x7L<<4)
+#define BNX2_DMA_READ_MASTER_SETTING_1_COM_PARAM_EN (1L<<7)
+#define BNX2_DMA_READ_MASTER_SETTING_1_CP_NO_SNOOP (1L<<8)
+#define BNX2_DMA_READ_MASTER_SETTING_1_CP_RELAX_ORDER (1L<<9)
+#define BNX2_DMA_READ_MASTER_SETTING_1_CP_PRIORITY (1L<<10)
+#define BNX2_DMA_READ_MASTER_SETTING_1_CP_TRAFFIC_CLASS (0x7L<<12)
+#define BNX2_DMA_READ_MASTER_SETTING_1_CP_PARAM_EN (1L<<15)
+
+#define BNX2_DMA_WRITE_MASTER_SETTING_0 0x00000c18
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_NO_SNOOP (1L<<0)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_RELAX_ORDER (1L<<1)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_PRIORITY (1L<<2)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_CS_VLD (1L<<3)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_TRAFFIC_CLASS (0x7L<<4)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_PARAM_EN (1L<<7)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_NO_SNOOP (1L<<8)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_RELAX_ORDER (1L<<9)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_PRIORITY (1L<<10)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_CS_VLD (1L<<11)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_TRAFFIC_CLASS (0x7L<<12)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_PARAM_EN (1L<<15)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_NO_SNOOP (1L<<24)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_RELAX_ORDER (1L<<25)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_PRIORITY (1L<<26)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_CS_VLD (1L<<27)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_TRAFFIC_CLASS (0x7L<<28)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_PARAM_EN (1L<<31)
+
+#define BNX2_DMA_WRITE_MASTER_SETTING_1 0x00000c1c
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_NO_SNOOP (1L<<0)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_RELAX_ORDER (1L<<1)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_PRIORITY (1L<<2)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_CS_VLD (1L<<3)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_TRAFFIC_CLASS (0x7L<<4)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_PARAM_EN (1L<<7)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_NO_SNOOP (1L<<8)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_RELAX_ORDER (1L<<9)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_PRIORITY (1L<<10)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_CS_VLD (1L<<11)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_TRAFFIC_CLASS (0x7L<<12)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_PARAM_EN (1L<<15)
+
+#define BNX2_DMA_ARBITER 0x00000c20
+#define BNX2_DMA_ARBITER_NUM_READS (0x7L<<0)
+#define BNX2_DMA_ARBITER_WR_ARB_MODE (1L<<4)
+#define BNX2_DMA_ARBITER_WR_ARB_MODE_STRICT (0L<<4)
+#define BNX2_DMA_ARBITER_WR_ARB_MODE_RND_RBN (1L<<4)
+#define BNX2_DMA_ARBITER_RD_ARB_MODE (0x3L<<5)
+#define BNX2_DMA_ARBITER_RD_ARB_MODE_STRICT (0L<<5)
+#define BNX2_DMA_ARBITER_RD_ARB_MODE_RND_RBN (1L<<5)
+#define BNX2_DMA_ARBITER_RD_ARB_MODE_WGT_RND_RBN (2L<<5)
+#define BNX2_DMA_ARBITER_ALT_MODE_EN (1L<<8)
+#define BNX2_DMA_ARBITER_RR_MODE (1L<<9)
+#define BNX2_DMA_ARBITER_TIMER_MODE (1L<<10)
+#define BNX2_DMA_ARBITER_OUSTD_READ_REQ (0xfL<<12)
+
+#define BNX2_DMA_ARB_TIMERS 0x00000c24
+#define BNX2_DMA_ARB_TIMERS_RD_DRR_WAIT_TIME (0xffL<<0)
+#define BNX2_DMA_ARB_TIMERS_TM_MIN_TIMEOUT (0xffL<<12)
+#define BNX2_DMA_ARB_TIMERS_TM_MAX_TIMEOUT (0xfffL<<20)
+
+#define BNX2_DMA_DEBUG_VECT_PEEK 0x00000c2c
+#define BNX2_DMA_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0)
+#define BNX2_DMA_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11)
+#define BNX2_DMA_DEBUG_VECT_PEEK_1_SEL (0xfL<<12)
+#define BNX2_DMA_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16)
+#define BNX2_DMA_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27)
+#define BNX2_DMA_DEBUG_VECT_PEEK_2_SEL (0xfL<<28)
+
+#define BNX2_DMA_TAG_RAM_00 0x00000c30
+#define BNX2_DMA_TAG_RAM_00_CHANNEL (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_00_MASTER (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_CTX (0L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_RBDC (1L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_TBDC (2L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_COM (3L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_CP (4L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_TDMA (5L<<4)
+#define BNX2_DMA_TAG_RAM_00_SWAP (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_00_SWAP_CONFIG (0L<<7)
+#define BNX2_DMA_TAG_RAM_00_SWAP_DATA (1L<<7)
+#define BNX2_DMA_TAG_RAM_00_SWAP_CONTROL (2L<<7)
+#define BNX2_DMA_TAG_RAM_00_FUNCTION (1L<<9)
+#define BNX2_DMA_TAG_RAM_00_VALID (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_01 0x00000c34
+#define BNX2_DMA_TAG_RAM_01_CHANNEL (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_01_MASTER (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_CTX (0L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_RBDC (1L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_TBDC (2L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_COM (3L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_CP (4L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_TDMA (5L<<4)
+#define BNX2_DMA_TAG_RAM_01_SWAP (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_01_SWAP_CONFIG (0L<<7)
+#define BNX2_DMA_TAG_RAM_01_SWAP_DATA (1L<<7)
+#define BNX2_DMA_TAG_RAM_01_SWAP_CONTROL (2L<<7)
+#define BNX2_DMA_TAG_RAM_01_FUNCTION (1L<<9)
+#define BNX2_DMA_TAG_RAM_01_VALID (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_02 0x00000c38
+#define BNX2_DMA_TAG_RAM_02_CHANNEL (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_02_MASTER (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_CTX (0L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_RBDC (1L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_TBDC (2L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_COM (3L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_CP (4L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_TDMA (5L<<4)
+#define BNX2_DMA_TAG_RAM_02_SWAP (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_02_SWAP_CONFIG (0L<<7)
+#define BNX2_DMA_TAG_RAM_02_SWAP_DATA (1L<<7)
+#define BNX2_DMA_TAG_RAM_02_SWAP_CONTROL (2L<<7)
+#define BNX2_DMA_TAG_RAM_02_FUNCTION (1L<<9)
+#define BNX2_DMA_TAG_RAM_02_VALID (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_03 0x00000c3c
+#define BNX2_DMA_TAG_RAM_03_CHANNEL (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_03_MASTER (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_CTX (0L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_RBDC (1L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_TBDC (2L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_COM (3L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_CP (4L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_TDMA (5L<<4)
+#define BNX2_DMA_TAG_RAM_03_SWAP (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_03_SWAP_CONFIG (0L<<7)
+#define BNX2_DMA_TAG_RAM_03_SWAP_DATA (1L<<7)
+#define BNX2_DMA_TAG_RAM_03_SWAP_CONTROL (2L<<7)
+#define BNX2_DMA_TAG_RAM_03_FUNCTION (1L<<9)
+#define BNX2_DMA_TAG_RAM_03_VALID (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_04 0x00000c40
+#define BNX2_DMA_TAG_RAM_04_CHANNEL (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_04_MASTER (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_CTX (0L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_RBDC (1L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_TBDC (2L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_COM (3L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_CP (4L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_TDMA (5L<<4)
+#define BNX2_DMA_TAG_RAM_04_SWAP (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_04_SWAP_CONFIG (0L<<7)
+#define BNX2_DMA_TAG_RAM_04_SWAP_DATA (1L<<7)
+#define BNX2_DMA_TAG_RAM_04_SWAP_CONTROL (2L<<7)
+#define BNX2_DMA_TAG_RAM_04_FUNCTION (1L<<9)
+#define BNX2_DMA_TAG_RAM_04_VALID (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_05 0x00000c44
+#define BNX2_DMA_TAG_RAM_05_CHANNEL (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_05_MASTER (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_CTX (0L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_RBDC (1L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_TBDC (2L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_COM (3L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_CP (4L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_TDMA (5L<<4)
+#define BNX2_DMA_TAG_RAM_05_SWAP (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_05_SWAP_CONFIG (0L<<7)
+#define BNX2_DMA_TAG_RAM_05_SWAP_DATA (1L<<7)
+#define BNX2_DMA_TAG_RAM_05_SWAP_CONTROL (2L<<7)
+#define BNX2_DMA_TAG_RAM_05_FUNCTION (1L<<9)
+#define BNX2_DMA_TAG_RAM_05_VALID (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_06 0x00000c48
+#define BNX2_DMA_TAG_RAM_06_CHANNEL (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_06_MASTER (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_CTX (0L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_RBDC (1L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_TBDC (2L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_COM (3L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_CP (4L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_TDMA (5L<<4)
+#define BNX2_DMA_TAG_RAM_06_SWAP (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_06_SWAP_CONFIG (0L<<7)
+#define BNX2_DMA_TAG_RAM_06_SWAP_DATA (1L<<7)
+#define BNX2_DMA_TAG_RAM_06_SWAP_CONTROL (2L<<7)
+#define BNX2_DMA_TAG_RAM_06_FUNCTION (1L<<9)
+#define BNX2_DMA_TAG_RAM_06_VALID (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_07 0x00000c4c
+#define BNX2_DMA_TAG_RAM_07_CHANNEL (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_07_MASTER (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_CTX (0L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_RBDC (1L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_TBDC (2L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_COM (3L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_CP (4L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_TDMA (5L<<4)
+#define BNX2_DMA_TAG_RAM_07_SWAP (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_07_SWAP_CONFIG (0L<<7)
+#define BNX2_DMA_TAG_RAM_07_SWAP_DATA (1L<<7)
+#define BNX2_DMA_TAG_RAM_07_SWAP_CONTROL (2L<<7)
+#define BNX2_DMA_TAG_RAM_07_FUNCTION (1L<<9)
+#define BNX2_DMA_TAG_RAM_07_VALID (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_08 0x00000c50
+#define BNX2_DMA_TAG_RAM_08_CHANNEL (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_08_MASTER (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_CTX (0L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_RBDC (1L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_TBDC (2L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_COM (3L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_CP (4L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_TDMA (5L<<4)
+#define BNX2_DMA_TAG_RAM_08_SWAP (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_08_SWAP_CONFIG (0L<<7)
+#define BNX2_DMA_TAG_RAM_08_SWAP_DATA (1L<<7)
+#define BNX2_DMA_TAG_RAM_08_SWAP_CONTROL (2L<<7)
+#define BNX2_DMA_TAG_RAM_08_FUNCTION (1L<<9)
+#define BNX2_DMA_TAG_RAM_08_VALID (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_09 0x00000c54
+#define BNX2_DMA_TAG_RAM_09_CHANNEL (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_09_MASTER (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_CTX (0L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_RBDC (1L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_TBDC (2L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_COM (3L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_CP (4L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_TDMA (5L<<4)
+#define BNX2_DMA_TAG_RAM_09_SWAP (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_09_SWAP_CONFIG (0L<<7)
+#define BNX2_DMA_TAG_RAM_09_SWAP_DATA (1L<<7)
+#define BNX2_DMA_TAG_RAM_09_SWAP_CONTROL (2L<<7)
+#define BNX2_DMA_TAG_RAM_09_FUNCTION (1L<<9)
+#define BNX2_DMA_TAG_RAM_09_VALID (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_10 0x00000c58
+#define BNX2_DMA_TAG_RAM_10_CHANNEL (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_10_MASTER (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_CTX (0L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_RBDC (1L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_TBDC (2L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_COM (3L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_CP (4L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_TDMA (5L<<4)
+#define BNX2_DMA_TAG_RAM_10_SWAP (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_10_SWAP_CONFIG (0L<<7)
+#define BNX2_DMA_TAG_RAM_10_SWAP_DATA (1L<<7)
+#define BNX2_DMA_TAG_RAM_10_SWAP_CONTROL (2L<<7)
+#define BNX2_DMA_TAG_RAM_10_FUNCTION (1L<<9)
+#define BNX2_DMA_TAG_RAM_10_VALID (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_11 0x00000c5c
+#define BNX2_DMA_TAG_RAM_11_CHANNEL (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_11_MASTER (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_CTX (0L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_RBDC (1L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_TBDC (2L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_COM (3L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_CP (4L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_TDMA (5L<<4)
+#define BNX2_DMA_TAG_RAM_11_SWAP (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_11_SWAP_CONFIG (0L<<7)
+#define BNX2_DMA_TAG_RAM_11_SWAP_DATA (1L<<7)
+#define BNX2_DMA_TAG_RAM_11_SWAP_CONTROL (2L<<7)
+#define BNX2_DMA_TAG_RAM_11_FUNCTION (1L<<9)
+#define BNX2_DMA_TAG_RAM_11_VALID (1L<<10)
+
+#define BNX2_DMA_RCHAN_STAT_22 0x00000c60
+#define BNX2_DMA_RCHAN_STAT_30 0x00000c64
+#define BNX2_DMA_RCHAN_STAT_31 0x00000c68
+#define BNX2_DMA_RCHAN_STAT_32 0x00000c6c
+#define BNX2_DMA_RCHAN_STAT_40 0x00000c70
+#define BNX2_DMA_RCHAN_STAT_41 0x00000c74
+#define BNX2_DMA_RCHAN_STAT_42 0x00000c78
+#define BNX2_DMA_RCHAN_STAT_50 0x00000c7c
+#define BNX2_DMA_RCHAN_STAT_51 0x00000c80
+#define BNX2_DMA_RCHAN_STAT_52 0x00000c84
+#define BNX2_DMA_RCHAN_STAT_60 0x00000c88
+#define BNX2_DMA_RCHAN_STAT_61 0x00000c8c
+#define BNX2_DMA_RCHAN_STAT_62 0x00000c90
+#define BNX2_DMA_RCHAN_STAT_70 0x00000c94
+#define BNX2_DMA_RCHAN_STAT_71 0x00000c98
+#define BNX2_DMA_RCHAN_STAT_72 0x00000c9c
+#define BNX2_DMA_WCHAN_STAT_00 0x00000ca0
+#define BNX2_DMA_WCHAN_STAT_00_WCHAN_STA_HOST_ADDR_LOW (0xffffffffL<<0)
+
+#define BNX2_DMA_WCHAN_STAT_01 0x00000ca4
+#define BNX2_DMA_WCHAN_STAT_01_WCHAN_STA_HOST_ADDR_HIGH (0xffffffffL<<0)
+
+#define BNX2_DMA_WCHAN_STAT_02 0x00000ca8
+#define BNX2_DMA_WCHAN_STAT_02_LENGTH (0xffffL<<0)
+#define BNX2_DMA_WCHAN_STAT_02_WORD_SWAP (1L<<16)
+#define BNX2_DMA_WCHAN_STAT_02_BYTE_SWAP (1L<<17)
+#define BNX2_DMA_WCHAN_STAT_02_PRIORITY_LVL (1L<<18)
+
+#define BNX2_DMA_WCHAN_STAT_10 0x00000cac
+#define BNX2_DMA_WCHAN_STAT_11 0x00000cb0
+#define BNX2_DMA_WCHAN_STAT_12 0x00000cb4
+#define BNX2_DMA_WCHAN_STAT_20 0x00000cb8
+#define BNX2_DMA_WCHAN_STAT_21 0x00000cbc
+#define BNX2_DMA_WCHAN_STAT_22 0x00000cc0
+#define BNX2_DMA_WCHAN_STAT_30 0x00000cc4
+#define BNX2_DMA_WCHAN_STAT_31 0x00000cc8
+#define BNX2_DMA_WCHAN_STAT_32 0x00000ccc
+#define BNX2_DMA_WCHAN_STAT_40 0x00000cd0
+#define BNX2_DMA_WCHAN_STAT_41 0x00000cd4
+#define BNX2_DMA_WCHAN_STAT_42 0x00000cd8
+#define BNX2_DMA_WCHAN_STAT_50 0x00000cdc
+#define BNX2_DMA_WCHAN_STAT_51 0x00000ce0
+#define BNX2_DMA_WCHAN_STAT_52 0x00000ce4
+#define BNX2_DMA_WCHAN_STAT_60 0x00000ce8
+#define BNX2_DMA_WCHAN_STAT_61 0x00000cec
+#define BNX2_DMA_WCHAN_STAT_62 0x00000cf0
+#define BNX2_DMA_WCHAN_STAT_70 0x00000cf4
+#define BNX2_DMA_WCHAN_STAT_71 0x00000cf8
+#define BNX2_DMA_WCHAN_STAT_72 0x00000cfc
+#define BNX2_DMA_ARB_STAT_00 0x00000d00
+#define BNX2_DMA_ARB_STAT_00_MASTER (0xffffL<<0)
+#define BNX2_DMA_ARB_STAT_00_MASTER_ENC (0xffL<<16)
+#define BNX2_DMA_ARB_STAT_00_CUR_BINMSTR (0xffL<<24)
+
+#define BNX2_DMA_ARB_STAT_01 0x00000d04
+#define BNX2_DMA_ARB_STAT_01_LPR_RPTR (0xfL<<0)
+#define BNX2_DMA_ARB_STAT_01_LPR_WPTR (0xfL<<4)
+#define BNX2_DMA_ARB_STAT_01_LPB_RPTR (0xfL<<8)
+#define BNX2_DMA_ARB_STAT_01_LPB_WPTR (0xfL<<12)
+#define BNX2_DMA_ARB_STAT_01_HPR_RPTR (0xfL<<16)
+#define BNX2_DMA_ARB_STAT_01_HPR_WPTR (0xfL<<20)
+#define BNX2_DMA_ARB_STAT_01_HPB_RPTR (0xfL<<24)
+#define BNX2_DMA_ARB_STAT_01_HPB_WPTR (0xfL<<28)
+
+#define BNX2_DMA_FUSE_CTRL0_CMD 0x00000f00
+#define BNX2_DMA_FUSE_CTRL0_CMD_PWRUP_DONE (1L<<0)
+#define BNX2_DMA_FUSE_CTRL0_CMD_SHIFT_DONE (1L<<1)
+#define BNX2_DMA_FUSE_CTRL0_CMD_SHIFT (1L<<2)
+#define BNX2_DMA_FUSE_CTRL0_CMD_LOAD (1L<<3)
+#define BNX2_DMA_FUSE_CTRL0_CMD_SEL (0xfL<<8)
+
+#define BNX2_DMA_FUSE_CTRL0_DATA 0x00000f04
+#define BNX2_DMA_FUSE_CTRL1_CMD 0x00000f08
+#define BNX2_DMA_FUSE_CTRL1_CMD_PWRUP_DONE (1L<<0)
+#define BNX2_DMA_FUSE_CTRL1_CMD_SHIFT_DONE (1L<<1)
+#define BNX2_DMA_FUSE_CTRL1_CMD_SHIFT (1L<<2)
+#define BNX2_DMA_FUSE_CTRL1_CMD_LOAD (1L<<3)
+#define BNX2_DMA_FUSE_CTRL1_CMD_SEL (0xfL<<8)
+
+#define BNX2_DMA_FUSE_CTRL1_DATA 0x00000f0c
+#define BNX2_DMA_FUSE_CTRL2_CMD 0x00000f10
+#define BNX2_DMA_FUSE_CTRL2_CMD_PWRUP_DONE (1L<<0)
+#define BNX2_DMA_FUSE_CTRL2_CMD_SHIFT_DONE (1L<<1)
+#define BNX2_DMA_FUSE_CTRL2_CMD_SHIFT (1L<<2)
+#define BNX2_DMA_FUSE_CTRL2_CMD_LOAD (1L<<3)
+#define BNX2_DMA_FUSE_CTRL2_CMD_SEL (0xfL<<8)
+
+#define BNX2_DMA_FUSE_CTRL2_DATA 0x00000f14
+
+
+/*
+ * context_reg definition
+ * offset: 0x1000
+ */
+#define BNX2_CTX_COMMAND 0x00001000
+#define BNX2_CTX_COMMAND_ENABLED (1L<<0)
+#define BNX2_CTX_COMMAND_DISABLE_USAGE_CNT (1L<<1)
+#define BNX2_CTX_COMMAND_DISABLE_PLRU (1L<<2)
+#define BNX2_CTX_COMMAND_DISABLE_COMBINE_READ (1L<<3)
+#define BNX2_CTX_COMMAND_FLUSH_AHEAD (0x1fL<<8)
+#define BNX2_CTX_COMMAND_MEM_INIT (1L<<13)
+#define BNX2_CTX_COMMAND_PAGE_SIZE (0xfL<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_256 (0L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_512 (1L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_1K (2L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_2K (3L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_4K (4L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_8K (5L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_16K (6L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_32K (7L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_64K (8L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_128K (9L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_256K (10L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_512K (11L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_1M (12L<<16)
+
+#define BNX2_CTX_STATUS 0x00001004
+#define BNX2_CTX_STATUS_LOCK_WAIT (1L<<0)
+#define BNX2_CTX_STATUS_READ_STAT (1L<<16)
+#define BNX2_CTX_STATUS_WRITE_STAT (1L<<17)
+#define BNX2_CTX_STATUS_ACC_STALL_STAT (1L<<18)
+#define BNX2_CTX_STATUS_LOCK_STALL_STAT (1L<<19)
+#define BNX2_CTX_STATUS_EXT_READ_STAT (1L<<20)
+#define BNX2_CTX_STATUS_EXT_WRITE_STAT (1L<<21)
+#define BNX2_CTX_STATUS_MISS_STAT (1L<<22)
+#define BNX2_CTX_STATUS_HIT_STAT (1L<<23)
+#define BNX2_CTX_STATUS_DEAD_LOCK (1L<<24)
+#define BNX2_CTX_STATUS_USAGE_CNT_ERR (1L<<25)
+#define BNX2_CTX_STATUS_INVALID_PAGE (1L<<26)
+
+#define BNX2_CTX_VIRT_ADDR 0x00001008
+#define BNX2_CTX_VIRT_ADDR_VIRT_ADDR (0x7fffL<<6)
+
+#define BNX2_CTX_PAGE_TBL 0x0000100c
+#define BNX2_CTX_PAGE_TBL_PAGE_TBL (0x3fffL<<6)
+
+#define BNX2_CTX_DATA_ADR 0x00001010
+#define BNX2_CTX_DATA_ADR_DATA_ADR (0x7ffffL<<2)
+
+#define BNX2_CTX_DATA 0x00001014
+#define BNX2_CTX_LOCK 0x00001018
+#define BNX2_CTX_LOCK_TYPE (0x7L<<0)
+#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_VOID (0x0L<<0)
+#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_PROTOCOL (0x1L<<0)
+#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_TX (0x2L<<0)
+#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_TIMER (0x4L<<0)
+#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_COMPLETE (0x7L<<0)
+#define BNX2_CTX_LOCK_TYPE_VOID_XI (0L<<0)
+#define BNX2_CTX_LOCK_TYPE_PROTOCOL_XI (1L<<0)
+#define BNX2_CTX_LOCK_TYPE_TX_XI (2L<<0)
+#define BNX2_CTX_LOCK_TYPE_TIMER_XI (4L<<0)
+#define BNX2_CTX_LOCK_TYPE_COMPLETE_XI (7L<<0)
+#define BNX2_CTX_LOCK_CID_VALUE (0x3fffL<<7)
+#define BNX2_CTX_LOCK_GRANTED (1L<<26)
+#define BNX2_CTX_LOCK_MODE (0x7L<<27)
+#define BNX2_CTX_LOCK_MODE_UNLOCK (0x0L<<27)
+#define BNX2_CTX_LOCK_MODE_IMMEDIATE (0x1L<<27)
+#define BNX2_CTX_LOCK_MODE_SURE (0x2L<<27)
+#define BNX2_CTX_LOCK_STATUS (1L<<30)
+#define BNX2_CTX_LOCK_REQ (1L<<31)
+
+#define BNX2_CTX_CTX_CTRL 0x0000101c
+#define BNX2_CTX_CTX_CTRL_CTX_ADDR (0x7ffffL<<2)
+#define BNX2_CTX_CTX_CTRL_MOD_USAGE_CNT (0x3L<<21)
+#define BNX2_CTX_CTX_CTRL_NO_RAM_ACC (1L<<23)
+#define BNX2_CTX_CTX_CTRL_PREFETCH_SIZE (0x3L<<24)
+#define BNX2_CTX_CTX_CTRL_ATTR (1L<<26)
+#define BNX2_CTX_CTX_CTRL_WRITE_REQ (1L<<30)
+#define BNX2_CTX_CTX_CTRL_READ_REQ (1L<<31)
+
+#define BNX2_CTX_CTX_DATA 0x00001020
+#define BNX2_CTX_ACCESS_STATUS 0x00001040
+#define BNX2_CTX_ACCESS_STATUS_MASTERENCODED (0xfL<<0)
+#define BNX2_CTX_ACCESS_STATUS_ACCESSMEMORYSM (0x3L<<10)
+#define BNX2_CTX_ACCESS_STATUS_PAGETABLEINITSM (0x3L<<12)
+#define BNX2_CTX_ACCESS_STATUS_ACCESSMEMORYINITSM (0x3L<<14)
+#define BNX2_CTX_ACCESS_STATUS_QUALIFIED_REQUEST (0x7ffL<<17)
+#define BNX2_CTX_ACCESS_STATUS_CAMMASTERENCODED_XI (0x1fL<<0)
+#define BNX2_CTX_ACCESS_STATUS_CACHEMASTERENCODED_XI (0x1fL<<5)
+#define BNX2_CTX_ACCESS_STATUS_REQUEST_XI (0x3fffffL<<10)
+
+#define BNX2_CTX_DBG_LOCK_STATUS 0x00001044
+#define BNX2_CTX_DBG_LOCK_STATUS_SM (0x3ffL<<0)
+#define BNX2_CTX_DBG_LOCK_STATUS_MATCH (0x3ffL<<22)
+
+#define BNX2_CTX_CACHE_CTRL_STATUS 0x00001048
+#define BNX2_CTX_CACHE_CTRL_STATUS_RFIFO_OVERFLOW (1L<<0)
+#define BNX2_CTX_CACHE_CTRL_STATUS_INVALID_READ_COMP (1L<<1)
+#define BNX2_CTX_CACHE_CTRL_STATUS_FLUSH_START (1L<<6)
+#define BNX2_CTX_CACHE_CTRL_STATUS_FREE_ENTRY_CNT (0x3fL<<7)
+#define BNX2_CTX_CACHE_CTRL_STATUS_CACHE_ENTRY_NEEDED (0x3fL<<13)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN0_ACTIVE (1L<<19)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN1_ACTIVE (1L<<20)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN2_ACTIVE (1L<<21)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN3_ACTIVE (1L<<22)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN4_ACTIVE (1L<<23)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN5_ACTIVE (1L<<24)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN6_ACTIVE (1L<<25)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN7_ACTIVE (1L<<26)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN8_ACTIVE (1L<<27)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN9_ACTIVE (1L<<28)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN10_ACTIVE (1L<<29)
+
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS 0x0000104c
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_DWC (0x7L<<0)
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_WFIFOC (0x7L<<3)
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_RTAGC (0x7L<<6)
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_RFIFOC (0x7L<<9)
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS_INVALID_BLK_ADDR (0x7fffL<<16)
+
+#define BNX2_CTX_CACHE_STATUS 0x00001050
+#define BNX2_CTX_CACHE_STATUS_HELD_ENTRIES (0x3ffL<<0)
+#define BNX2_CTX_CACHE_STATUS_MAX_HELD_ENTRIES (0x3ffL<<16)
+
+#define BNX2_CTX_DMA_STATUS 0x00001054
+#define BNX2_CTX_DMA_STATUS_RD_CHAN0_STATUS (0x3L<<0)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN1_STATUS (0x3L<<2)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN2_STATUS (0x3L<<4)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN3_STATUS (0x3L<<6)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN4_STATUS (0x3L<<8)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN5_STATUS (0x3L<<10)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN6_STATUS (0x3L<<12)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN7_STATUS (0x3L<<14)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN8_STATUS (0x3L<<16)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN9_STATUS (0x3L<<18)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN10_STATUS (0x3L<<20)
+
+#define BNX2_CTX_REP_STATUS 0x00001058
+#define BNX2_CTX_REP_STATUS_ERROR_ENTRY (0x3ffL<<0)
+#define BNX2_CTX_REP_STATUS_ERROR_CLIENT_ID (0x1fL<<10)
+#define BNX2_CTX_REP_STATUS_USAGE_CNT_MAX_ERR (1L<<16)
+#define BNX2_CTX_REP_STATUS_USAGE_CNT_MIN_ERR (1L<<17)
+#define BNX2_CTX_REP_STATUS_USAGE_CNT_MISS_ERR (1L<<18)
+
+#define BNX2_CTX_CKSUM_ERROR_STATUS 0x0000105c
+#define BNX2_CTX_CKSUM_ERROR_STATUS_CALCULATED (0xffffL<<0)
+#define BNX2_CTX_CKSUM_ERROR_STATUS_EXPECTED (0xffffL<<16)
+
+#define BNX2_CTX_CHNL_LOCK_STATUS_0 0x00001080
+#define BNX2_CTX_CHNL_LOCK_STATUS_0_CID (0x3fffL<<0)
+#define BNX2_CTX_CHNL_LOCK_STATUS_0_TYPE (0x3L<<14)
+#define BNX2_CTX_CHNL_LOCK_STATUS_0_MODE (1L<<16)
+#define BNX2_CTX_CHNL_LOCK_STATUS_0_MODE_XI (1L<<14)
+#define BNX2_CTX_CHNL_LOCK_STATUS_0_TYPE_XI (0x7L<<15)
+
+#define BNX2_CTX_CHNL_LOCK_STATUS_1 0x00001084
+#define BNX2_CTX_CHNL_LOCK_STATUS_2 0x00001088
+#define BNX2_CTX_CHNL_LOCK_STATUS_3 0x0000108c
+#define BNX2_CTX_CHNL_LOCK_STATUS_4 0x00001090
+#define BNX2_CTX_CHNL_LOCK_STATUS_5 0x00001094
+#define BNX2_CTX_CHNL_LOCK_STATUS_6 0x00001098
+#define BNX2_CTX_CHNL_LOCK_STATUS_7 0x0000109c
+#define BNX2_CTX_CHNL_LOCK_STATUS_8 0x000010a0
+#define BNX2_CTX_CHNL_LOCK_STATUS_9 0x000010a4
+
+#define BNX2_CTX_CACHE_DATA 0x000010c4
+#define BNX2_CTX_HOST_PAGE_TBL_CTRL 0x000010c8
+#define BNX2_CTX_HOST_PAGE_TBL_CTRL_PAGE_TBL_ADDR (0x1ffL<<0)
+#define BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ (1L<<30)
+#define BNX2_CTX_HOST_PAGE_TBL_CTRL_READ_REQ (1L<<31)
+
+#define BNX2_CTX_HOST_PAGE_TBL_DATA0 0x000010cc
+#define BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID (1L<<0)
+#define BNX2_CTX_HOST_PAGE_TBL_DATA0_VALUE (0xffffffL<<8)
+
+#define BNX2_CTX_HOST_PAGE_TBL_DATA1 0x000010d0
+#define BNX2_CTX_CAM_CTRL 0x000010d4
+#define BNX2_CTX_CAM_CTRL_CAM_ADDR (0x3ffL<<0)
+#define BNX2_CTX_CAM_CTRL_RESET (1L<<27)
+#define BNX2_CTX_CAM_CTRL_INVALIDATE (1L<<28)
+#define BNX2_CTX_CAM_CTRL_SEARCH (1L<<29)
+#define BNX2_CTX_CAM_CTRL_WRITE_REQ (1L<<30)
+#define BNX2_CTX_CAM_CTRL_READ_REQ (1L<<31)
+
+
+/*
+ * emac_reg definition
+ * offset: 0x1400
+ */
+#define BNX2_EMAC_MODE 0x00001400
+#define BNX2_EMAC_MODE_RESET (1L<<0)
+#define BNX2_EMAC_MODE_HALF_DUPLEX (1L<<1)
+#define BNX2_EMAC_MODE_PORT (0x3L<<2)
+#define BNX2_EMAC_MODE_PORT_NONE (0L<<2)
+#define BNX2_EMAC_MODE_PORT_MII (1L<<2)
+#define BNX2_EMAC_MODE_PORT_GMII (2L<<2)
+#define BNX2_EMAC_MODE_PORT_MII_10M (3L<<2)
+#define BNX2_EMAC_MODE_MAC_LOOP (1L<<4)
+#define BNX2_EMAC_MODE_25G_MODE (1L<<5)
+#define BNX2_EMAC_MODE_TAGGED_MAC_CTL (1L<<7)
+#define BNX2_EMAC_MODE_TX_BURST (1L<<8)
+#define BNX2_EMAC_MODE_MAX_DEFER_DROP_ENA (1L<<9)
+#define BNX2_EMAC_MODE_EXT_LINK_POL (1L<<10)
+#define BNX2_EMAC_MODE_FORCE_LINK (1L<<11)
+#define BNX2_EMAC_MODE_SERDES_MODE (1L<<12)
+#define BNX2_EMAC_MODE_BOND_OVRD (1L<<13)
+#define BNX2_EMAC_MODE_MPKT (1L<<18)
+#define BNX2_EMAC_MODE_MPKT_RCVD (1L<<19)
+#define BNX2_EMAC_MODE_ACPI_RCVD (1L<<20)
+
+#define BNX2_EMAC_STATUS 0x00001404
+#define BNX2_EMAC_STATUS_LINK (1L<<11)
+#define BNX2_EMAC_STATUS_LINK_CHANGE (1L<<12)
+#define BNX2_EMAC_STATUS_SERDES_AUTONEG_COMPLETE (1L<<13)
+#define BNX2_EMAC_STATUS_SERDES_AUTONEG_CHANGE (1L<<14)
+#define BNX2_EMAC_STATUS_SERDES_NXT_PG_CHANGE (1L<<16)
+#define BNX2_EMAC_STATUS_SERDES_RX_CONFIG_IS_0 (1L<<17)
+#define BNX2_EMAC_STATUS_SERDES_RX_CONFIG_IS_0_CHANGE (1L<<18)
+#define BNX2_EMAC_STATUS_MI_COMPLETE (1L<<22)
+#define BNX2_EMAC_STATUS_MI_INT (1L<<23)
+#define BNX2_EMAC_STATUS_AP_ERROR (1L<<24)
+#define BNX2_EMAC_STATUS_PARITY_ERROR_STATE (1L<<31)
+
+#define BNX2_EMAC_ATTENTION_ENA 0x00001408
+#define BNX2_EMAC_ATTENTION_ENA_LINK (1L<<11)
+#define BNX2_EMAC_ATTENTION_ENA_AUTONEG_CHANGE (1L<<14)
+#define BNX2_EMAC_ATTENTION_ENA_NXT_PG_CHANGE (1L<<16)
+#define BNX2_EMAC_ATTENTION_ENA_SERDES_RX_CONFIG_IS_0_CHANGE (1L<<18)
+#define BNX2_EMAC_ATTENTION_ENA_MI_COMPLETE (1L<<22)
+#define BNX2_EMAC_ATTENTION_ENA_MI_INT (1L<<23)
+#define BNX2_EMAC_ATTENTION_ENA_AP_ERROR (1L<<24)
+
+#define BNX2_EMAC_LED 0x0000140c
+#define BNX2_EMAC_LED_OVERRIDE (1L<<0)
+#define BNX2_EMAC_LED_1000MB_OVERRIDE (1L<<1)
+#define BNX2_EMAC_LED_100MB_OVERRIDE (1L<<2)
+#define BNX2_EMAC_LED_10MB_OVERRIDE (1L<<3)
+#define BNX2_EMAC_LED_TRAFFIC_OVERRIDE (1L<<4)
+#define BNX2_EMAC_LED_BLNK_TRAFFIC (1L<<5)
+#define BNX2_EMAC_LED_TRAFFIC (1L<<6)
+#define BNX2_EMAC_LED_1000MB (1L<<7)
+#define BNX2_EMAC_LED_100MB (1L<<8)
+#define BNX2_EMAC_LED_10MB (1L<<9)
+#define BNX2_EMAC_LED_TRAFFIC_STAT (1L<<10)
+#define BNX2_EMAC_LED_2500MB (1L<<11)
+#define BNX2_EMAC_LED_2500MB_OVERRIDE (1L<<12)
+#define BNX2_EMAC_LED_ACTIVITY_SEL (0x3L<<17)
+#define BNX2_EMAC_LED_ACTIVITY_SEL_0 (0L<<17)
+#define BNX2_EMAC_LED_ACTIVITY_SEL_1 (1L<<17)
+#define BNX2_EMAC_LED_ACTIVITY_SEL_2 (2L<<17)
+#define BNX2_EMAC_LED_ACTIVITY_SEL_3 (3L<<17)
+#define BNX2_EMAC_LED_BLNK_RATE (0xfffL<<19)
+#define BNX2_EMAC_LED_BLNK_RATE_ENA (1L<<31)
+
+#define BNX2_EMAC_MAC_MATCH0 0x00001410
+#define BNX2_EMAC_MAC_MATCH1 0x00001414
+#define BNX2_EMAC_MAC_MATCH2 0x00001418
+#define BNX2_EMAC_MAC_MATCH3 0x0000141c
+#define BNX2_EMAC_MAC_MATCH4 0x00001420
+#define BNX2_EMAC_MAC_MATCH5 0x00001424
+#define BNX2_EMAC_MAC_MATCH6 0x00001428
+#define BNX2_EMAC_MAC_MATCH7 0x0000142c
+#define BNX2_EMAC_MAC_MATCH8 0x00001430
+#define BNX2_EMAC_MAC_MATCH9 0x00001434
+#define BNX2_EMAC_MAC_MATCH10 0x00001438
+#define BNX2_EMAC_MAC_MATCH11 0x0000143c
+#define BNX2_EMAC_MAC_MATCH12 0x00001440
+#define BNX2_EMAC_MAC_MATCH13 0x00001444
+#define BNX2_EMAC_MAC_MATCH14 0x00001448
+#define BNX2_EMAC_MAC_MATCH15 0x0000144c
+#define BNX2_EMAC_MAC_MATCH16 0x00001450
+#define BNX2_EMAC_MAC_MATCH17 0x00001454
+#define BNX2_EMAC_MAC_MATCH18 0x00001458
+#define BNX2_EMAC_MAC_MATCH19 0x0000145c
+#define BNX2_EMAC_MAC_MATCH20 0x00001460
+#define BNX2_EMAC_MAC_MATCH21 0x00001464
+#define BNX2_EMAC_MAC_MATCH22 0x00001468
+#define BNX2_EMAC_MAC_MATCH23 0x0000146c
+#define BNX2_EMAC_MAC_MATCH24 0x00001470
+#define BNX2_EMAC_MAC_MATCH25 0x00001474
+#define BNX2_EMAC_MAC_MATCH26 0x00001478
+#define BNX2_EMAC_MAC_MATCH27 0x0000147c
+#define BNX2_EMAC_MAC_MATCH28 0x00001480
+#define BNX2_EMAC_MAC_MATCH29 0x00001484
+#define BNX2_EMAC_MAC_MATCH30 0x00001488
+#define BNX2_EMAC_MAC_MATCH31 0x0000148c
+#define BNX2_EMAC_BACKOFF_SEED 0x00001498
+#define BNX2_EMAC_BACKOFF_SEED_EMAC_BACKOFF_SEED (0x3ffL<<0)
+
+#define BNX2_EMAC_RX_MTU_SIZE 0x0000149c
+#define BNX2_EMAC_RX_MTU_SIZE_MTU_SIZE (0xffffL<<0)
+#define BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31)
+
+#define BNX2_EMAC_SERDES_CNTL 0x000014a4
+#define BNX2_EMAC_SERDES_CNTL_RXR (0x7L<<0)
+#define BNX2_EMAC_SERDES_CNTL_RXG (0x3L<<3)
+#define BNX2_EMAC_SERDES_CNTL_RXCKSEL (1L<<6)
+#define BNX2_EMAC_SERDES_CNTL_TXBIAS (0x7L<<7)
+#define BNX2_EMAC_SERDES_CNTL_BGMAX (1L<<10)
+#define BNX2_EMAC_SERDES_CNTL_BGMIN (1L<<11)
+#define BNX2_EMAC_SERDES_CNTL_TXMODE (1L<<12)
+#define BNX2_EMAC_SERDES_CNTL_TXEDGE (1L<<13)
+#define BNX2_EMAC_SERDES_CNTL_SERDES_MODE (1L<<14)
+#define BNX2_EMAC_SERDES_CNTL_PLLTEST (1L<<15)
+#define BNX2_EMAC_SERDES_CNTL_CDET_EN (1L<<16)
+#define BNX2_EMAC_SERDES_CNTL_TBI_LBK (1L<<17)
+#define BNX2_EMAC_SERDES_CNTL_REMOTE_LBK (1L<<18)
+#define BNX2_EMAC_SERDES_CNTL_REV_PHASE (1L<<19)
+#define BNX2_EMAC_SERDES_CNTL_REGCTL12 (0x3L<<20)
+#define BNX2_EMAC_SERDES_CNTL_REGCTL25 (0x3L<<22)
+
+#define BNX2_EMAC_SERDES_STATUS 0x000014a8
+#define BNX2_EMAC_SERDES_STATUS_RX_STAT (0xffL<<0)
+#define BNX2_EMAC_SERDES_STATUS_COMMA_DET (1L<<8)
+
+#define BNX2_EMAC_MDIO_COMM 0x000014ac
+#define BNX2_EMAC_MDIO_COMM_DATA (0xffffL<<0)
+#define BNX2_EMAC_MDIO_COMM_REG_ADDR (0x1fL<<16)
+#define BNX2_EMAC_MDIO_COMM_PHY_ADDR (0x1fL<<21)
+#define BNX2_EMAC_MDIO_COMM_COMMAND (0x3L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_UNDEFINED_0 (0L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_WRITE (1L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_READ (2L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_WRITE_22_XI (1L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_WRITE_45_XI (1L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_READ_22_XI (2L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_READ_INC_45_XI (2L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_UNDEFINED_3 (3L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26)
+#define BNX2_EMAC_MDIO_COMM_FAIL (1L<<28)
+#define BNX2_EMAC_MDIO_COMM_START_BUSY (1L<<29)
+#define BNX2_EMAC_MDIO_COMM_DISEXT (1L<<30)
+
+#define BNX2_EMAC_MDIO_STATUS 0x000014b0
+#define BNX2_EMAC_MDIO_STATUS_LINK (1L<<0)
+#define BNX2_EMAC_MDIO_STATUS_10MB (1L<<1)
+
+#define BNX2_EMAC_MDIO_MODE 0x000014b4
+#define BNX2_EMAC_MDIO_MODE_SHORT_PREAMBLE (1L<<1)
+#define BNX2_EMAC_MDIO_MODE_AUTO_POLL (1L<<4)
+#define BNX2_EMAC_MDIO_MODE_BIT_BANG (1L<<8)
+#define BNX2_EMAC_MDIO_MODE_MDIO (1L<<9)
+#define BNX2_EMAC_MDIO_MODE_MDIO_OE (1L<<10)
+#define BNX2_EMAC_MDIO_MODE_MDC (1L<<11)
+#define BNX2_EMAC_MDIO_MODE_MDINT (1L<<12)
+#define BNX2_EMAC_MDIO_MODE_EXT_MDINT (1L<<13)
+#define BNX2_EMAC_MDIO_MODE_CLOCK_CNT (0x1fL<<16)
+#define BNX2_EMAC_MDIO_MODE_CLOCK_CNT_XI (0x3fL<<16)
+#define BNX2_EMAC_MDIO_MODE_CLAUSE_45_XI (1L<<31)
+
+#define BNX2_EMAC_MDIO_AUTO_STATUS 0x000014b8
+#define BNX2_EMAC_MDIO_AUTO_STATUS_AUTO_ERR (1L<<0)
+
+#define BNX2_EMAC_TX_MODE 0x000014bc
+#define BNX2_EMAC_TX_MODE_RESET (1L<<0)
+#define BNX2_EMAC_TX_MODE_CS16_TEST (1L<<2)
+#define BNX2_EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3)
+#define BNX2_EMAC_TX_MODE_FLOW_EN (1L<<4)
+#define BNX2_EMAC_TX_MODE_BIG_BACKOFF (1L<<5)
+#define BNX2_EMAC_TX_MODE_LONG_PAUSE (1L<<6)
+#define BNX2_EMAC_TX_MODE_LINK_AWARE (1L<<7)
+
+#define BNX2_EMAC_TX_STATUS 0x000014c0
+#define BNX2_EMAC_TX_STATUS_XOFFED (1L<<0)
+#define BNX2_EMAC_TX_STATUS_XOFF_SENT (1L<<1)
+#define BNX2_EMAC_TX_STATUS_XON_SENT (1L<<2)
+#define BNX2_EMAC_TX_STATUS_LINK_UP (1L<<3)
+#define BNX2_EMAC_TX_STATUS_UNDERRUN (1L<<4)
+#define BNX2_EMAC_TX_STATUS_CS16_ERROR (1L<<5)
+
+#define BNX2_EMAC_TX_LENGTHS 0x000014c4
+#define BNX2_EMAC_TX_LENGTHS_SLOT (0xffL<<0)
+#define BNX2_EMAC_TX_LENGTHS_IPG (0xfL<<8)
+#define BNX2_EMAC_TX_LENGTHS_IPG_CRS (0x3L<<12)
+
+#define BNX2_EMAC_RX_MODE 0x000014c8
+#define BNX2_EMAC_RX_MODE_RESET (1L<<0)
+#define BNX2_EMAC_RX_MODE_FLOW_EN (1L<<2)
+#define BNX2_EMAC_RX_MODE_KEEP_MAC_CONTROL (1L<<3)
+#define BNX2_EMAC_RX_MODE_KEEP_PAUSE (1L<<4)
+#define BNX2_EMAC_RX_MODE_ACCEPT_OVERSIZE (1L<<5)
+#define BNX2_EMAC_RX_MODE_ACCEPT_RUNTS (1L<<6)
+#define BNX2_EMAC_RX_MODE_LLC_CHK (1L<<7)
+#define BNX2_EMAC_RX_MODE_PROMISCUOUS (1L<<8)
+#define BNX2_EMAC_RX_MODE_NO_CRC_CHK (1L<<9)
+#define BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10)
+#define BNX2_EMAC_RX_MODE_FILT_BROADCAST (1L<<11)
+#define BNX2_EMAC_RX_MODE_SORT_MODE (1L<<12)
+
+#define BNX2_EMAC_RX_STATUS 0x000014cc
+#define BNX2_EMAC_RX_STATUS_FFED (1L<<0)
+#define BNX2_EMAC_RX_STATUS_FF_RECEIVED (1L<<1)
+#define BNX2_EMAC_RX_STATUS_N_RECEIVED (1L<<2)
+
+#define BNX2_EMAC_MULTICAST_HASH0 0x000014d0
+#define BNX2_EMAC_MULTICAST_HASH1 0x000014d4
+#define BNX2_EMAC_MULTICAST_HASH2 0x000014d8
+#define BNX2_EMAC_MULTICAST_HASH3 0x000014dc
+#define BNX2_EMAC_MULTICAST_HASH4 0x000014e0
+#define BNX2_EMAC_MULTICAST_HASH5 0x000014e4
+#define BNX2_EMAC_MULTICAST_HASH6 0x000014e8
+#define BNX2_EMAC_MULTICAST_HASH7 0x000014ec
+#define BNX2_EMAC_CKSUM_ERROR_STATUS 0x000014f0
+#define BNX2_EMAC_CKSUM_ERROR_STATUS_CALCULATED (0xffffL<<0)
+#define BNX2_EMAC_CKSUM_ERROR_STATUS_EXPECTED (0xffffL<<16)
+
+#define BNX2_EMAC_RX_STAT_IFHCINOCTETS 0x00001500
+#define BNX2_EMAC_RX_STAT_IFHCINBADOCTETS 0x00001504
+#define BNX2_EMAC_RX_STAT_ETHERSTATSFRAGMENTS 0x00001508
+#define BNX2_EMAC_RX_STAT_IFHCINUCASTPKTS 0x0000150c
+#define BNX2_EMAC_RX_STAT_IFHCINMULTICASTPKTS 0x00001510
+#define BNX2_EMAC_RX_STAT_IFHCINBROADCASTPKTS 0x00001514
+#define BNX2_EMAC_RX_STAT_DOT3STATSFCSERRORS 0x00001518
+#define BNX2_EMAC_RX_STAT_DOT3STATSALIGNMENTERRORS 0x0000151c
+#define BNX2_EMAC_RX_STAT_DOT3STATSCARRIERSENSEERRORS 0x00001520
+#define BNX2_EMAC_RX_STAT_XONPAUSEFRAMESRECEIVED 0x00001524
+#define BNX2_EMAC_RX_STAT_XOFFPAUSEFRAMESRECEIVED 0x00001528
+#define BNX2_EMAC_RX_STAT_MACCONTROLFRAMESRECEIVED 0x0000152c
+#define BNX2_EMAC_RX_STAT_XOFFSTATEENTERED 0x00001530
+#define BNX2_EMAC_RX_STAT_DOT3STATSFRAMESTOOLONG 0x00001534
+#define BNX2_EMAC_RX_STAT_ETHERSTATSJABBERS 0x00001538
+#define BNX2_EMAC_RX_STAT_ETHERSTATSUNDERSIZEPKTS 0x0000153c
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS64OCTETS 0x00001540
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS65OCTETSTO127OCTETS 0x00001544
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS128OCTETSTO255OCTETS 0x00001548
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS256OCTETSTO511OCTETS 0x0000154c
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS512OCTETSTO1023OCTETS 0x00001550
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS1024OCTETSTO1522OCTETS 0x00001554
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTSOVER1522OCTETS 0x00001558
+#define BNX2_EMAC_RXMAC_DEBUG0 0x0000155c
+#define BNX2_EMAC_RXMAC_DEBUG1 0x00001560
+#define BNX2_EMAC_RXMAC_DEBUG1_LENGTH_NE_BYTE_COUNT (1L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG1_LENGTH_OUT_RANGE (1L<<1)
+#define BNX2_EMAC_RXMAC_DEBUG1_BAD_CRC (1L<<2)
+#define BNX2_EMAC_RXMAC_DEBUG1_RX_ERROR (1L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG1_ALIGN_ERROR (1L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG1_LAST_DATA (1L<<5)
+#define BNX2_EMAC_RXMAC_DEBUG1_ODD_BYTE_START (1L<<6)
+#define BNX2_EMAC_RXMAC_DEBUG1_BYTE_COUNT (0xffffL<<7)
+#define BNX2_EMAC_RXMAC_DEBUG1_SLOT_TIME (0xffL<<23)
+
+#define BNX2_EMAC_RXMAC_DEBUG2 0x00001564
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE (0x7L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_IDLE (0x0L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_SFD (0x1L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_DATA (0x2L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_SKEEP (0x3L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_EXT (0x4L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_DROP (0x5L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_SDROP (0x6L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_FC (0x7L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE (0xfL<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_IDLE (0x0L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA0 (0x1L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA1 (0x2L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA2 (0x3L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA3 (0x4L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_ABORT (0x5L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_WAIT (0x6L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_STATUS (0x7L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_LAST (0x8L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_BYTE_IN (0xffL<<7)
+#define BNX2_EMAC_RXMAC_DEBUG2_FALSEC (1L<<15)
+#define BNX2_EMAC_RXMAC_DEBUG2_TAGGED (1L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG2_PAUSE_STATE (1L<<18)
+#define BNX2_EMAC_RXMAC_DEBUG2_PAUSE_STATE_IDLE (0L<<18)
+#define BNX2_EMAC_RXMAC_DEBUG2_PAUSE_STATE_PAUSED (1L<<18)
+#define BNX2_EMAC_RXMAC_DEBUG2_SE_COUNTER (0xfL<<19)
+#define BNX2_EMAC_RXMAC_DEBUG2_QUANTA (0x1fL<<23)
+
+#define BNX2_EMAC_RXMAC_DEBUG3 0x00001568
+#define BNX2_EMAC_RXMAC_DEBUG3_PAUSE_CTR (0xffffL<<0)
+#define BNX2_EMAC_RXMAC_DEBUG3_TMP_PAUSE_CTR (0xffffL<<16)
+
+#define BNX2_EMAC_RXMAC_DEBUG4 0x0000156c
+#define BNX2_EMAC_RXMAC_DEBUG4_TYPE_FIELD (0xffffL<<0)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE (0x3fL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_IDLE (0x0L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UMAC2 (0x1L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UMAC3 (0x2L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UNI (0x3L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MMAC3 (0x5L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PSA1 (0x6L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MMAC2 (0x7L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PSA2 (0x7L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PSA3 (0x8L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MC2 (0x9L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MC3 (0xaL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MWAIT1 (0xeL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MWAIT2 (0xfL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MCHECK (0x10L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MC (0x11L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BC2 (0x12L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BC3 (0x13L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BSA1 (0x14L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BSA2 (0x15L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BSA3 (0x16L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BTYPE (0x17L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BC (0x18L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PTYPE (0x19L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_CMD (0x1aL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MAC (0x1bL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_LATCH (0x1cL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_XOFF (0x1dL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_XON (0x1eL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PAUSED (0x1fL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_NPAUSED (0x20L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_TTYPE (0x21L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_TVAL (0x22L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_USA1 (0x23L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_USA2 (0x24L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_USA3 (0x25L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UTYPE (0x26L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UTTYPE (0x27L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UTVAL (0x28L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MTYPE (0x29L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_DROP (0x2aL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_DROP_PKT (1L<<22)
+#define BNX2_EMAC_RXMAC_DEBUG4_SLOT_FILLED (1L<<23)
+#define BNX2_EMAC_RXMAC_DEBUG4_FALSE_CARRIER (1L<<24)
+#define BNX2_EMAC_RXMAC_DEBUG4_LAST_DATA (1L<<25)
+#define BNX2_EMAC_RXMAC_DEBUG4_SFD_FOUND (1L<<26)
+#define BNX2_EMAC_RXMAC_DEBUG4_ADVANCE (1L<<27)
+#define BNX2_EMAC_RXMAC_DEBUG4_START (1L<<28)
+
+#define BNX2_EMAC_RXMAC_DEBUG5 0x00001570
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM (0x7L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_IDLE (0L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_WAIT_EOF (1L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_WAIT_STAT (2L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_SET_EOF4FCRC (3L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_SET_EOF4RDE (4L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_SET_EOF4ALL (5L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_1WD_WAIT_STAT (6L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1 (0x7L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_VDW (0x0L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_STAT (0x1L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_AEOF (0x2L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_NEOF (0x3L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_SOF (0x4L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_SAEOF (0x6L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_SNEOF (0x7L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_EOF_DETECTED (1L<<7)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF0 (0x7L<<8)
+#define BNX2_EMAC_RXMAC_DEBUG5_RPM_IDI_FIFO_FULL (1L<<11)
+#define BNX2_EMAC_RXMAC_DEBUG5_LOAD_CCODE (1L<<12)
+#define BNX2_EMAC_RXMAC_DEBUG5_LOAD_DATA (1L<<13)
+#define BNX2_EMAC_RXMAC_DEBUG5_LOAD_STAT (1L<<14)
+#define BNX2_EMAC_RXMAC_DEBUG5_CLR_STAT (1L<<15)
+#define BNX2_EMAC_RXMAC_DEBUG5_IDI_RPM_CCODE (0x3L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG5_IDI_RPM_ACCEPT (1L<<19)
+#define BNX2_EMAC_RXMAC_DEBUG5_FMLEN (0xfffL<<20)
+
+#define BNX2_EMAC_RX_STAT_FALSECARRIERERRORS 0x00001574
+#define BNX2_EMAC_RX_STAT_AC0 0x00001580
+#define BNX2_EMAC_RX_STAT_AC1 0x00001584
+#define BNX2_EMAC_RX_STAT_AC2 0x00001588
+#define BNX2_EMAC_RX_STAT_AC3 0x0000158c
+#define BNX2_EMAC_RX_STAT_AC4 0x00001590
+#define BNX2_EMAC_RX_STAT_AC5 0x00001594
+#define BNX2_EMAC_RX_STAT_AC6 0x00001598
+#define BNX2_EMAC_RX_STAT_AC7 0x0000159c
+#define BNX2_EMAC_RX_STAT_AC8 0x000015a0
+#define BNX2_EMAC_RX_STAT_AC9 0x000015a4
+#define BNX2_EMAC_RX_STAT_AC10 0x000015a8
+#define BNX2_EMAC_RX_STAT_AC11 0x000015ac
+#define BNX2_EMAC_RX_STAT_AC12 0x000015b0
+#define BNX2_EMAC_RX_STAT_AC13 0x000015b4
+#define BNX2_EMAC_RX_STAT_AC14 0x000015b8
+#define BNX2_EMAC_RX_STAT_AC15 0x000015bc
+#define BNX2_EMAC_RX_STAT_AC16 0x000015c0
+#define BNX2_EMAC_RX_STAT_AC17 0x000015c4
+#define BNX2_EMAC_RX_STAT_AC18 0x000015c8
+#define BNX2_EMAC_RX_STAT_AC19 0x000015cc
+#define BNX2_EMAC_RX_STAT_AC20 0x000015d0
+#define BNX2_EMAC_RX_STAT_AC21 0x000015d4
+#define BNX2_EMAC_RX_STAT_AC22 0x000015d8
+#define BNX2_EMAC_RXMAC_SUC_DBG_OVERRUNVEC 0x000015dc
+#define BNX2_EMAC_RX_STAT_AC_28 0x000015f4
+#define BNX2_EMAC_TX_STAT_IFHCOUTOCTETS 0x00001600
+#define BNX2_EMAC_TX_STAT_IFHCOUTBADOCTETS 0x00001604
+#define BNX2_EMAC_TX_STAT_ETHERSTATSCOLLISIONS 0x00001608
+#define BNX2_EMAC_TX_STAT_OUTXONSENT 0x0000160c
+#define BNX2_EMAC_TX_STAT_OUTXOFFSENT 0x00001610
+#define BNX2_EMAC_TX_STAT_FLOWCONTROLDONE 0x00001614
+#define BNX2_EMAC_TX_STAT_DOT3STATSSINGLECOLLISIONFRAMES 0x00001618
+#define BNX2_EMAC_TX_STAT_DOT3STATSMULTIPLECOLLISIONFRAMES 0x0000161c
+#define BNX2_EMAC_TX_STAT_DOT3STATSDEFERREDTRANSMISSIONS 0x00001620
+#define BNX2_EMAC_TX_STAT_DOT3STATSEXCESSIVECOLLISIONS 0x00001624
+#define BNX2_EMAC_TX_STAT_DOT3STATSLATECOLLISIONS 0x00001628
+#define BNX2_EMAC_TX_STAT_IFHCOUTUCASTPKTS 0x0000162c
+#define BNX2_EMAC_TX_STAT_IFHCOUTMULTICASTPKTS 0x00001630
+#define BNX2_EMAC_TX_STAT_IFHCOUTBROADCASTPKTS 0x00001634
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS64OCTETS 0x00001638
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS65OCTETSTO127OCTETS 0x0000163c
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS128OCTETSTO255OCTETS 0x00001640
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS256OCTETSTO511OCTETS 0x00001644
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS512OCTETSTO1023OCTETS 0x00001648
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS1024OCTETSTO1522OCTETS 0x0000164c
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTSOVER1522OCTETS 0x00001650
+#define BNX2_EMAC_TX_STAT_DOT3STATSINTERNALMACTRANSMITERRORS 0x00001654
+#define BNX2_EMAC_TXMAC_DEBUG0 0x00001658
+#define BNX2_EMAC_TXMAC_DEBUG1 0x0000165c
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE (0xfL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_IDLE (0x0L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_START0 (0x1L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA0 (0x4L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA1 (0x5L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA2 (0x6L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA3 (0x7L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_WAIT0 (0x8L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_WAIT1 (0x9L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_CRS_ENABLE (1L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG1_BAD_CRC (1L<<5)
+#define BNX2_EMAC_TXMAC_DEBUG1_SE_COUNTER (0xfL<<6)
+#define BNX2_EMAC_TXMAC_DEBUG1_SEND_PAUSE (1L<<10)
+#define BNX2_EMAC_TXMAC_DEBUG1_LATE_COLLISION (1L<<11)
+#define BNX2_EMAC_TXMAC_DEBUG1_MAX_DEFER (1L<<12)
+#define BNX2_EMAC_TXMAC_DEBUG1_DEFERRED (1L<<13)
+#define BNX2_EMAC_TXMAC_DEBUG1_ONE_BYTE (1L<<14)
+#define BNX2_EMAC_TXMAC_DEBUG1_IPG_TIME (0xfL<<15)
+#define BNX2_EMAC_TXMAC_DEBUG1_SLOT_TIME (0xffL<<19)
+
+#define BNX2_EMAC_TXMAC_DEBUG2 0x00001660
+#define BNX2_EMAC_TXMAC_DEBUG2_BACK_OFF (0x3ffL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG2_BYTE_COUNT (0xffffL<<10)
+#define BNX2_EMAC_TXMAC_DEBUG2_COL_COUNT (0x1fL<<26)
+#define BNX2_EMAC_TXMAC_DEBUG2_COL_BIT (1L<<31)
+
+#define BNX2_EMAC_TXMAC_DEBUG3 0x00001664
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE (0xfL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_IDLE (0x0L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_PRE1 (0x1L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_PRE2 (0x2L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_SFD (0x3L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_DATA (0x4L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_CRC1 (0x5L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_CRC2 (0x6L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_EXT (0x7L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_STATB (0x8L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_STATG (0x9L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_JAM (0xaL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_EJAM (0xbL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_BJAM (0xcL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_SWAIT (0xdL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_BACKOFF (0xeL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE (0x7L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_IDLE (0x0L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_WAIT (0x1L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_UNI (0x2L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_MC (0x3L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_BC2 (0x4L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_BC3 (0x5L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_BC (0x6L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_CRS_DONE (1L<<7)
+#define BNX2_EMAC_TXMAC_DEBUG3_XOFF (1L<<8)
+#define BNX2_EMAC_TXMAC_DEBUG3_SE_COUNTER (0xfL<<9)
+#define BNX2_EMAC_TXMAC_DEBUG3_QUANTA_COUNTER (0x1fL<<13)
+
+#define BNX2_EMAC_TXMAC_DEBUG4 0x00001668
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_COUNTER (0xffffL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE (0xfL<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_IDLE (0x0L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_MCA1 (0x2L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_MCA2 (0x3L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC3 (0x4L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC2 (0x5L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_MCA3 (0x6L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC1 (0x7L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CRC1 (0x8L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CRC2 (0x9L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_TIME (0xaL<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_TYPE (0xcL<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_WAIT (0xdL<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CMD (0xeL<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_STATS0_VALID (1L<<20)
+#define BNX2_EMAC_TXMAC_DEBUG4_APPEND_CRC (1L<<21)
+#define BNX2_EMAC_TXMAC_DEBUG4_SLOT_FILLED (1L<<22)
+#define BNX2_EMAC_TXMAC_DEBUG4_MAX_DEFER (1L<<23)
+#define BNX2_EMAC_TXMAC_DEBUG4_SEND_EXTEND (1L<<24)
+#define BNX2_EMAC_TXMAC_DEBUG4_SEND_PADDING (1L<<25)
+#define BNX2_EMAC_TXMAC_DEBUG4_EOF_LOC (1L<<26)
+#define BNX2_EMAC_TXMAC_DEBUG4_COLLIDING (1L<<27)
+#define BNX2_EMAC_TXMAC_DEBUG4_COL_IN (1L<<28)
+#define BNX2_EMAC_TXMAC_DEBUG4_BURSTING (1L<<29)
+#define BNX2_EMAC_TXMAC_DEBUG4_ADVANCE (1L<<30)
+#define BNX2_EMAC_TXMAC_DEBUG4_GO (1L<<31)
+
+#define BNX2_EMAC_TX_STAT_AC0 0x00001680
+#define BNX2_EMAC_TX_STAT_AC1 0x00001684
+#define BNX2_EMAC_TX_STAT_AC2 0x00001688
+#define BNX2_EMAC_TX_STAT_AC3 0x0000168c
+#define BNX2_EMAC_TX_STAT_AC4 0x00001690
+#define BNX2_EMAC_TX_STAT_AC5 0x00001694
+#define BNX2_EMAC_TX_STAT_AC6 0x00001698
+#define BNX2_EMAC_TX_STAT_AC7 0x0000169c
+#define BNX2_EMAC_TX_STAT_AC8 0x000016a0
+#define BNX2_EMAC_TX_STAT_AC9 0x000016a4
+#define BNX2_EMAC_TX_STAT_AC10 0x000016a8
+#define BNX2_EMAC_TX_STAT_AC11 0x000016ac
+#define BNX2_EMAC_TX_STAT_AC12 0x000016b0
+#define BNX2_EMAC_TX_STAT_AC13 0x000016b4
+#define BNX2_EMAC_TX_STAT_AC14 0x000016b8
+#define BNX2_EMAC_TX_STAT_AC15 0x000016bc
+#define BNX2_EMAC_TX_STAT_AC16 0x000016c0
+#define BNX2_EMAC_TX_STAT_AC17 0x000016c4
+#define BNX2_EMAC_TX_STAT_AC18 0x000016c8
+#define BNX2_EMAC_TX_STAT_AC19 0x000016cc
+#define BNX2_EMAC_TX_STAT_AC20 0x000016d0
+#define BNX2_EMAC_TXMAC_SUC_DBG_OVERRUNVEC 0x000016d8
+#define BNX2_EMAC_TX_RATE_LIMIT_CTRL 0x000016fc
+#define BNX2_EMAC_TX_RATE_LIMIT_CTRL_TX_THROTTLE_INC (0x7fL<<0)
+#define BNX2_EMAC_TX_RATE_LIMIT_CTRL_TX_THROTTLE_NUM (0x7fL<<16)
+#define BNX2_EMAC_TX_RATE_LIMIT_CTRL_RATE_LIMITER_EN (1L<<31)
+
+
+/*
+ * rpm_reg definition
+ * offset: 0x1800
+ */
+#define BNX2_RPM_COMMAND 0x00001800
+#define BNX2_RPM_COMMAND_ENABLED (1L<<0)
+#define BNX2_RPM_COMMAND_OVERRUN_ABORT (1L<<4)
+
+#define BNX2_RPM_STATUS 0x00001804
+#define BNX2_RPM_STATUS_MBUF_WAIT (1L<<0)
+#define BNX2_RPM_STATUS_FREE_WAIT (1L<<1)
+
+#define BNX2_RPM_CONFIG 0x00001808
+#define BNX2_RPM_CONFIG_NO_PSD_HDR_CKSUM (1L<<0)
+#define BNX2_RPM_CONFIG_ACPI_ENA (1L<<1)
+#define BNX2_RPM_CONFIG_ACPI_KEEP (1L<<2)
+#define BNX2_RPM_CONFIG_MP_KEEP (1L<<3)
+#define BNX2_RPM_CONFIG_SORT_VECT_VAL (0xfL<<4)
+#define BNX2_RPM_CONFIG_DISABLE_WOL_ASSERT (1L<<30)
+#define BNX2_RPM_CONFIG_IGNORE_VLAN (1L<<31)
+
+#define BNX2_RPM_MGMT_PKT_CTRL 0x0000180c
+#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_SORT (0xfL<<0)
+#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_RULE (0xfL<<4)
+#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_DISCARD_EN (1L<<30)
+#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_EN (1L<<31)
+
+#define BNX2_RPM_VLAN_MATCH0 0x00001810
+#define BNX2_RPM_VLAN_MATCH0_RPM_VLAN_MTCH0_VALUE (0xfffL<<0)
+
+#define BNX2_RPM_VLAN_MATCH1 0x00001814
+#define BNX2_RPM_VLAN_MATCH1_RPM_VLAN_MTCH1_VALUE (0xfffL<<0)
+
+#define BNX2_RPM_VLAN_MATCH2 0x00001818
+#define BNX2_RPM_VLAN_MATCH2_RPM_VLAN_MTCH2_VALUE (0xfffL<<0)
+
+#define BNX2_RPM_VLAN_MATCH3 0x0000181c
+#define BNX2_RPM_VLAN_MATCH3_RPM_VLAN_MTCH3_VALUE (0xfffL<<0)
+
+#define BNX2_RPM_SORT_USER0 0x00001820
+#define BNX2_RPM_SORT_USER0_PM_EN (0xffffL<<0)
+#define BNX2_RPM_SORT_USER0_BC_EN (1L<<16)
+#define BNX2_RPM_SORT_USER0_MC_EN (1L<<17)
+#define BNX2_RPM_SORT_USER0_MC_HSH_EN (1L<<18)
+#define BNX2_RPM_SORT_USER0_PROM_EN (1L<<19)
+#define BNX2_RPM_SORT_USER0_VLAN_EN (0xfL<<20)
+#define BNX2_RPM_SORT_USER0_PROM_VLAN (1L<<24)
+#define BNX2_RPM_SORT_USER0_VLAN_NOTMATCH (1L<<25)
+#define BNX2_RPM_SORT_USER0_ENA (1L<<31)
+
+#define BNX2_RPM_SORT_USER1 0x00001824
+#define BNX2_RPM_SORT_USER1_PM_EN (0xffffL<<0)
+#define BNX2_RPM_SORT_USER1_BC_EN (1L<<16)
+#define BNX2_RPM_SORT_USER1_MC_EN (1L<<17)
+#define BNX2_RPM_SORT_USER1_MC_HSH_EN (1L<<18)
+#define BNX2_RPM_SORT_USER1_PROM_EN (1L<<19)
+#define BNX2_RPM_SORT_USER1_VLAN_EN (0xfL<<20)
+#define BNX2_RPM_SORT_USER1_PROM_VLAN (1L<<24)
+#define BNX2_RPM_SORT_USER1_ENA (1L<<31)
+
+#define BNX2_RPM_SORT_USER2 0x00001828
+#define BNX2_RPM_SORT_USER2_PM_EN (0xffffL<<0)
+#define BNX2_RPM_SORT_USER2_BC_EN (1L<<16)
+#define BNX2_RPM_SORT_USER2_MC_EN (1L<<17)
+#define BNX2_RPM_SORT_USER2_MC_HSH_EN (1L<<18)
+#define BNX2_RPM_SORT_USER2_PROM_EN (1L<<19)
+#define BNX2_RPM_SORT_USER2_VLAN_EN (0xfL<<20)
+#define BNX2_RPM_SORT_USER2_PROM_VLAN (1L<<24)
+#define BNX2_RPM_SORT_USER2_ENA (1L<<31)
+
+#define BNX2_RPM_SORT_USER3 0x0000182c
+#define BNX2_RPM_SORT_USER3_PM_EN (0xffffL<<0)
+#define BNX2_RPM_SORT_USER3_BC_EN (1L<<16)
+#define BNX2_RPM_SORT_USER3_MC_EN (1L<<17)
+#define BNX2_RPM_SORT_USER3_MC_HSH_EN (1L<<18)
+#define BNX2_RPM_SORT_USER3_PROM_EN (1L<<19)
+#define BNX2_RPM_SORT_USER3_VLAN_EN (0xfL<<20)
+#define BNX2_RPM_SORT_USER3_PROM_VLAN (1L<<24)
+#define BNX2_RPM_SORT_USER3_ENA (1L<<31)
+
+#define BNX2_RPM_STAT_L2_FILTER_DISCARDS 0x00001840
+#define BNX2_RPM_STAT_RULE_CHECKER_DISCARDS 0x00001844
+#define BNX2_RPM_STAT_IFINFTQDISCARDS 0x00001848
+#define BNX2_RPM_STAT_IFINMBUFDISCARD 0x0000184c
+#define BNX2_RPM_STAT_RULE_CHECKER_P4_HIT 0x00001850
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0 0x00001854
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER_LEN (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER_LEN_TYPE (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER_EN (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1 0x00001858
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER_LEN (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER_LEN_TYPE (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER_EN (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2 0x0000185c
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER_LEN (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER_LEN_TYPE (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER_EN (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3 0x00001860
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER_LEN (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER_LEN_TYPE (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER_EN (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4 0x00001864
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER_LEN (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER_LEN_TYPE (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER_EN (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5 0x00001868
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER_LEN (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER_LEN_TYPE (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER_EN (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6 0x0000186c
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER_LEN (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER_LEN_TYPE (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER_EN (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7 0x00001870
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER_LEN (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER_LEN_TYPE (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER_EN (1L<<31)
+
+#define BNX2_RPM_STAT_AC0 0x00001880
+#define BNX2_RPM_STAT_AC1 0x00001884
+#define BNX2_RPM_STAT_AC2 0x00001888
+#define BNX2_RPM_STAT_AC3 0x0000188c
+#define BNX2_RPM_STAT_AC4 0x00001890
+#define BNX2_RPM_RC_CNTL_16 0x000018e0
+#define BNX2_RPM_RC_CNTL_16_OFFSET (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_16_CLASS (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_16_PRIORITY (1L<<11)
+#define BNX2_RPM_RC_CNTL_16_P4 (1L<<12)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_START (0L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_IP (1L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_TCP (2L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_UDP (3L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_DATA (4L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_TCP_UDP (5L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_ICMPV6 (6L<<13)
+#define BNX2_RPM_RC_CNTL_16_COMP (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_16_COMP_EQUAL (0L<<16)
+#define BNX2_RPM_RC_CNTL_16_COMP_NEQUAL (1L<<16)
+#define BNX2_RPM_RC_CNTL_16_COMP_GREATER (2L<<16)
+#define BNX2_RPM_RC_CNTL_16_COMP_LESS (3L<<16)
+#define BNX2_RPM_RC_CNTL_16_MAP (1L<<18)
+#define BNX2_RPM_RC_CNTL_16_SBIT (1L<<19)
+#define BNX2_RPM_RC_CNTL_16_CMDSEL (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_16_DISCARD (1L<<25)
+#define BNX2_RPM_RC_CNTL_16_MASK (1L<<26)
+#define BNX2_RPM_RC_CNTL_16_P1 (1L<<27)
+#define BNX2_RPM_RC_CNTL_16_P2 (1L<<28)
+#define BNX2_RPM_RC_CNTL_16_P3 (1L<<29)
+#define BNX2_RPM_RC_CNTL_16_NBIT (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_16 0x000018e4
+#define BNX2_RPM_RC_VALUE_MASK_16_VALUE (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_16_MASK (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_17 0x000018e8
+#define BNX2_RPM_RC_CNTL_17_OFFSET (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_17_CLASS (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_17_PRIORITY (1L<<11)
+#define BNX2_RPM_RC_CNTL_17_P4 (1L<<12)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_START (0L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_IP (1L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_TCP (2L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_UDP (3L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_DATA (4L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_TCP_UDP (5L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_ICMPV6 (6L<<13)
+#define BNX2_RPM_RC_CNTL_17_COMP (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_17_COMP_EQUAL (0L<<16)
+#define BNX2_RPM_RC_CNTL_17_COMP_NEQUAL (1L<<16)
+#define BNX2_RPM_RC_CNTL_17_COMP_GREATER (2L<<16)
+#define BNX2_RPM_RC_CNTL_17_COMP_LESS (3L<<16)
+#define BNX2_RPM_RC_CNTL_17_MAP (1L<<18)
+#define BNX2_RPM_RC_CNTL_17_SBIT (1L<<19)
+#define BNX2_RPM_RC_CNTL_17_CMDSEL (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_17_DISCARD (1L<<25)
+#define BNX2_RPM_RC_CNTL_17_MASK (1L<<26)
+#define BNX2_RPM_RC_CNTL_17_P1 (1L<<27)
+#define BNX2_RPM_RC_CNTL_17_P2 (1L<<28)
+#define BNX2_RPM_RC_CNTL_17_P3 (1L<<29)
+#define BNX2_RPM_RC_CNTL_17_NBIT (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_17 0x000018ec
+#define BNX2_RPM_RC_VALUE_MASK_17_VALUE (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_17_MASK (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_18 0x000018f0
+#define BNX2_RPM_RC_CNTL_18_OFFSET (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_18_CLASS (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_18_PRIORITY (1L<<11)
+#define BNX2_RPM_RC_CNTL_18_P4 (1L<<12)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_START (0L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_IP (1L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_TCP (2L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_UDP (3L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_DATA (4L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_TCP_UDP (5L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_ICMPV6 (6L<<13)
+#define BNX2_RPM_RC_CNTL_18_COMP (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_18_COMP_EQUAL (0L<<16)
+#define BNX2_RPM_RC_CNTL_18_COMP_NEQUAL (1L<<16)
+#define BNX2_RPM_RC_CNTL_18_COMP_GREATER (2L<<16)
+#define BNX2_RPM_RC_CNTL_18_COMP_LESS (3L<<16)
+#define BNX2_RPM_RC_CNTL_18_MAP (1L<<18)
+#define BNX2_RPM_RC_CNTL_18_SBIT (1L<<19)
+#define BNX2_RPM_RC_CNTL_18_CMDSEL (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_18_DISCARD (1L<<25)
+#define BNX2_RPM_RC_CNTL_18_MASK (1L<<26)
+#define BNX2_RPM_RC_CNTL_18_P1 (1L<<27)
+#define BNX2_RPM_RC_CNTL_18_P2 (1L<<28)
+#define BNX2_RPM_RC_CNTL_18_P3 (1L<<29)
+#define BNX2_RPM_RC_CNTL_18_NBIT (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_18 0x000018f4
+#define BNX2_RPM_RC_VALUE_MASK_18_VALUE (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_18_MASK (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_19 0x000018f8
+#define BNX2_RPM_RC_CNTL_19_OFFSET (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_19_CLASS (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_19_PRIORITY (1L<<11)
+#define BNX2_RPM_RC_CNTL_19_P4 (1L<<12)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_START (0L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_IP (1L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_TCP (2L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_UDP (3L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_DATA (4L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_TCP_UDP (5L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_ICMPV6 (6L<<13)
+#define BNX2_RPM_RC_CNTL_19_COMP (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_19_COMP_EQUAL (0L<<16)
+#define BNX2_RPM_RC_CNTL_19_COMP_NEQUAL (1L<<16)
+#define BNX2_RPM_RC_CNTL_19_COMP_GREATER (2L<<16)
+#define BNX2_RPM_RC_CNTL_19_COMP_LESS (3L<<16)
+#define BNX2_RPM_RC_CNTL_19_MAP (1L<<18)
+#define BNX2_RPM_RC_CNTL_19_SBIT (1L<<19)
+#define BNX2_RPM_RC_CNTL_19_CMDSEL (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_19_DISCARD (1L<<25)
+#define BNX2_RPM_RC_CNTL_19_MASK (1L<<26)
+#define BNX2_RPM_RC_CNTL_19_P1 (1L<<27)
+#define BNX2_RPM_RC_CNTL_19_P2 (1L<<28)
+#define BNX2_RPM_RC_CNTL_19_P3 (1L<<29)
+#define BNX2_RPM_RC_CNTL_19_NBIT (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_19 0x000018fc
+#define BNX2_RPM_RC_VALUE_MASK_19_VALUE (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_19_MASK (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_0 0x00001900
+#define BNX2_RPM_RC_CNTL_0_OFFSET (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_0_CLASS (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_0_PRIORITY (1L<<11)
+#define BNX2_RPM_RC_CNTL_0_P4 (1L<<12)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_START (0L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_IP (1L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_TCP (2L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_UDP (3L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_DATA (4L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_TCP_UDP (5L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_ICMPV6 (6L<<13)
+#define BNX2_RPM_RC_CNTL_0_COMP (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_0_COMP_EQUAL (0L<<16)
+#define BNX2_RPM_RC_CNTL_0_COMP_NEQUAL (1L<<16)
+#define BNX2_RPM_RC_CNTL_0_COMP_GREATER (2L<<16)
+#define BNX2_RPM_RC_CNTL_0_COMP_LESS (3L<<16)
+#define BNX2_RPM_RC_CNTL_0_MAP_XI (1L<<18)
+#define BNX2_RPM_RC_CNTL_0_SBIT (1L<<19)
+#define BNX2_RPM_RC_CNTL_0_CMDSEL (0xfL<<20)
+#define BNX2_RPM_RC_CNTL_0_MAP (1L<<24)
+#define BNX2_RPM_RC_CNTL_0_CMDSEL_XI (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_0_DISCARD (1L<<25)
+#define BNX2_RPM_RC_CNTL_0_MASK (1L<<26)
+#define BNX2_RPM_RC_CNTL_0_P1 (1L<<27)
+#define BNX2_RPM_RC_CNTL_0_P2 (1L<<28)
+#define BNX2_RPM_RC_CNTL_0_P3 (1L<<29)
+#define BNX2_RPM_RC_CNTL_0_NBIT (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_0 0x00001904
+#define BNX2_RPM_RC_VALUE_MASK_0_VALUE (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_0_MASK (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_1 0x00001908
+#define BNX2_RPM_RC_CNTL_1_A (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_1_B (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_1_OFFSET_XI (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_1_CLASS_XI (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_1_PRIORITY_XI (1L<<11)
+#define BNX2_RPM_RC_CNTL_1_P4_XI (1L<<12)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_XI (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_START_XI (0L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_IP_XI (1L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_TCP_XI (2L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_UDP_XI (3L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_DATA_XI (4L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_TCP_UDP_XI (5L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_ICMPV6_XI (6L<<13)
+#define BNX2_RPM_RC_CNTL_1_COMP_XI (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_1_COMP_EQUAL_XI (0L<<16)
+#define BNX2_RPM_RC_CNTL_1_COMP_NEQUAL_XI (1L<<16)
+#define BNX2_RPM_RC_CNTL_1_COMP_GREATER_XI (2L<<16)
+#define BNX2_RPM_RC_CNTL_1_COMP_LESS_XI (3L<<16)
+#define BNX2_RPM_RC_CNTL_1_MAP_XI (1L<<18)
+#define BNX2_RPM_RC_CNTL_1_SBIT_XI (1L<<19)
+#define BNX2_RPM_RC_CNTL_1_CMDSEL_XI (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_1_DISCARD_XI (1L<<25)
+#define BNX2_RPM_RC_CNTL_1_MASK_XI (1L<<26)
+#define BNX2_RPM_RC_CNTL_1_P1_XI (1L<<27)
+#define BNX2_RPM_RC_CNTL_1_P2_XI (1L<<28)
+#define BNX2_RPM_RC_CNTL_1_P3_XI (1L<<29)
+#define BNX2_RPM_RC_CNTL_1_NBIT_XI (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_1 0x0000190c
+#define BNX2_RPM_RC_VALUE_MASK_1_VALUE (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_1_MASK (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_2 0x00001910
+#define BNX2_RPM_RC_CNTL_2_A (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_2_B (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_2_OFFSET_XI (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_2_CLASS_XI (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_2_PRIORITY_XI (1L<<11)
+#define BNX2_RPM_RC_CNTL_2_P4_XI (1L<<12)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_XI (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_START_XI (0L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_IP_XI (1L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_TCP_XI (2L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_UDP_XI (3L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_DATA_XI (4L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_TCP_UDP_XI (5L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_ICMPV6_XI (6L<<13)
+#define BNX2_RPM_RC_CNTL_2_COMP_XI (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_2_COMP_EQUAL_XI (0L<<16)
+#define BNX2_RPM_RC_CNTL_2_COMP_NEQUAL_XI (1L<<16)
+#define BNX2_RPM_RC_CNTL_2_COMP_GREATER_XI (2L<<16)
+#define BNX2_RPM_RC_CNTL_2_COMP_LESS_XI (3L<<16)
+#define BNX2_RPM_RC_CNTL_2_MAP_XI (1L<<18)
+#define BNX2_RPM_RC_CNTL_2_SBIT_XI (1L<<19)
+#define BNX2_RPM_RC_CNTL_2_CMDSEL_XI (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_2_DISCARD_XI (1L<<25)
+#define BNX2_RPM_RC_CNTL_2_MASK_XI (1L<<26)
+#define BNX2_RPM_RC_CNTL_2_P1_XI (1L<<27)
+#define BNX2_RPM_RC_CNTL_2_P2_XI (1L<<28)
+#define BNX2_RPM_RC_CNTL_2_P3_XI (1L<<29)
+#define BNX2_RPM_RC_CNTL_2_NBIT_XI (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_2 0x00001914
+#define BNX2_RPM_RC_VALUE_MASK_2_VALUE (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_2_MASK (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_3 0x00001918
+#define BNX2_RPM_RC_CNTL_3_A (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_3_B (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_3_OFFSET_XI (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_3_CLASS_XI (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_3_PRIORITY_XI (1L<<11)
+#define BNX2_RPM_RC_CNTL_3_P4_XI (1L<<12)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_XI (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_START_XI (0L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_IP_XI (1L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_TCP_XI (2L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_UDP_XI (3L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_DATA_XI (4L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_TCP_UDP_XI (5L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_ICMPV6_XI (6L<<13)
+#define BNX2_RPM_RC_CNTL_3_COMP_XI (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_3_COMP_EQUAL_XI (0L<<16)
+#define BNX2_RPM_RC_CNTL_3_COMP_NEQUAL_XI (1L<<16)
+#define BNX2_RPM_RC_CNTL_3_COMP_GREATER_XI (2L<<16)
+#define BNX2_RPM_RC_CNTL_3_COMP_LESS_XI (3L<<16)
+#define BNX2_RPM_RC_CNTL_3_MAP_XI (1L<<18)
+#define BNX2_RPM_RC_CNTL_3_SBIT_XI (1L<<19)
+#define BNX2_RPM_RC_CNTL_3_CMDSEL_XI (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_3_DISCARD_XI (1L<<25)
+#define BNX2_RPM_RC_CNTL_3_MASK_XI (1L<<26)
+#define BNX2_RPM_RC_CNTL_3_P1_XI (1L<<27)
+#define BNX2_RPM_RC_CNTL_3_P2_XI (1L<<28)
+#define BNX2_RPM_RC_CNTL_3_P3_XI (1L<<29)
+#define BNX2_RPM_RC_CNTL_3_NBIT_XI (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_3 0x0000191c
+#define BNX2_RPM_RC_VALUE_MASK_3_VALUE (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_3_MASK (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_4 0x00001920
+#define BNX2_RPM_RC_CNTL_4_A (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_4_B (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_4_OFFSET_XI (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_4_CLASS_XI (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_4_PRIORITY_XI (1L<<11)
+#define BNX2_RPM_RC_CNTL_4_P4_XI (1L<<12)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_XI (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_START_XI (0L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_IP_XI (1L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_TCP_XI (2L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_UDP_XI (3L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_DATA_XI (4L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_TCP_UDP_XI (5L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_ICMPV6_XI (6L<<13)
+#define BNX2_RPM_RC_CNTL_4_COMP_XI (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_4_COMP_EQUAL_XI (0L<<16)
+#define BNX2_RPM_RC_CNTL_4_COMP_NEQUAL_XI (1L<<16)
+#define BNX2_RPM_RC_CNTL_4_COMP_GREATER_XI (2L<<16)
+#define BNX2_RPM_RC_CNTL_4_COMP_LESS_XI (3L<<16)
+#define BNX2_RPM_RC_CNTL_4_MAP_XI (1L<<18)
+#define BNX2_RPM_RC_CNTL_4_SBIT_XI (1L<<19)
+#define BNX2_RPM_RC_CNTL_4_CMDSEL_XI (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_4_DISCARD_XI (1L<<25)
+#define BNX2_RPM_RC_CNTL_4_MASK_XI (1L<<26)
+#define BNX2_RPM_RC_CNTL_4_P1_XI (1L<<27)
+#define BNX2_RPM_RC_CNTL_4_P2_XI (1L<<28)
+#define BNX2_RPM_RC_CNTL_4_P3_XI (1L<<29)
+#define BNX2_RPM_RC_CNTL_4_NBIT_XI (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_4 0x00001924
+#define BNX2_RPM_RC_VALUE_MASK_4_VALUE (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_4_MASK (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_5 0x00001928
+#define BNX2_RPM_RC_CNTL_5_A (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_5_B (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_5_OFFSET_XI (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_5_CLASS_XI (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_5_PRIORITY_XI (1L<<11)
+#define BNX2_RPM_RC_CNTL_5_P4_XI (1L<<12)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_XI (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_START_XI (0L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_IP_XI (1L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_TCP_XI (2L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_UDP_XI (3L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_DATA_XI (4L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_TCP_UDP_XI (5L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_ICMPV6_XI (6L<<13)
+#define BNX2_RPM_RC_CNTL_5_COMP_XI (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_5_COMP_EQUAL_XI (0L<<16)
+#define BNX2_RPM_RC_CNTL_5_COMP_NEQUAL_XI (1L<<16)
+#define BNX2_RPM_RC_CNTL_5_COMP_GREATER_XI (2L<<16)
+#define BNX2_RPM_RC_CNTL_5_COMP_LESS_XI (3L<<16)
+#define BNX2_RPM_RC_CNTL_5_MAP_XI (1L<<18)
+#define BNX2_RPM_RC_CNTL_5_SBIT_XI (1L<<19)
+#define BNX2_RPM_RC_CNTL_5_CMDSEL_XI (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_5_DISCARD_XI (1L<<25)
+#define BNX2_RPM_RC_CNTL_5_MASK_XI (1L<<26)
+#define BNX2_RPM_RC_CNTL_5_P1_XI (1L<<27)
+#define BNX2_RPM_RC_CNTL_5_P2_XI (1L<<28)
+#define BNX2_RPM_RC_CNTL_5_P3_XI (1L<<29)
+#define BNX2_RPM_RC_CNTL_5_NBIT_XI (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_5 0x0000192c
+#define BNX2_RPM_RC_VALUE_MASK_5_VALUE (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_5_MASK (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_6 0x00001930
+#define BNX2_RPM_RC_CNTL_6_A (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_6_B (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_6_OFFSET_XI (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_6_CLASS_XI (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_6_PRIORITY_XI (1L<<11)
+#define BNX2_RPM_RC_CNTL_6_P4_XI (1L<<12)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_XI (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_START_XI (0L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_IP_XI (1L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_TCP_XI (2L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_UDP_XI (3L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_DATA_XI (4L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_TCP_UDP_XI (5L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_ICMPV6_XI (6L<<13)
+#define BNX2_RPM_RC_CNTL_6_COMP_XI (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_6_COMP_EQUAL_XI (0L<<16)
+#define BNX2_RPM_RC_CNTL_6_COMP_NEQUAL_XI (1L<<16)
+#define BNX2_RPM_RC_CNTL_6_COMP_GREATER_XI (2L<<16)
+#define BNX2_RPM_RC_CNTL_6_COMP_LESS_XI (3L<<16)
+#define BNX2_RPM_RC_CNTL_6_MAP_XI (1L<<18)
+#define BNX2_RPM_RC_CNTL_6_SBIT_XI (1L<<19)
+#define BNX2_RPM_RC_CNTL_6_CMDSEL_XI (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_6_DISCARD_XI (1L<<25)
+#define BNX2_RPM_RC_CNTL_6_MASK_XI (1L<<26)
+#define BNX2_RPM_RC_CNTL_6_P1_XI (1L<<27)
+#define BNX2_RPM_RC_CNTL_6_P2_XI (1L<<28)
+#define BNX2_RPM_RC_CNTL_6_P3_XI (1L<<29)
+#define BNX2_RPM_RC_CNTL_6_NBIT_XI (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_6 0x00001934
+#define BNX2_RPM_RC_VALUE_MASK_6_VALUE (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_6_MASK (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_7 0x00001938
+#define BNX2_RPM_RC_CNTL_7_A (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_7_B (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_7_OFFSET_XI (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_7_CLASS_XI (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_7_PRIORITY_XI (1L<<11)
+#define BNX2_RPM_RC_CNTL_7_P4_XI (1L<<12)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_XI (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_START_XI (0L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_IP_XI (1L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_TCP_XI (2L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_UDP_XI (3L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_DATA_XI (4L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_TCP_UDP_XI (5L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_ICMPV6_XI (6L<<13)
+#define BNX2_RPM_RC_CNTL_7_COMP_XI (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_7_COMP_EQUAL_XI (0L<<16)
+#define BNX2_RPM_RC_CNTL_7_COMP_NEQUAL_XI (1L<<16)
+#define BNX2_RPM_RC_CNTL_7_COMP_GREATER_XI (2L<<16)
+#define BNX2_RPM_RC_CNTL_7_COMP_LESS_XI (3L<<16)
+#define BNX2_RPM_RC_CNTL_7_MAP_XI (1L<<18)
+#define BNX2_RPM_RC_CNTL_7_SBIT_XI (1L<<19)
+#define BNX2_RPM_RC_CNTL_7_CMDSEL_XI (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_7_DISCARD_XI (1L<<25)
+#define BNX2_RPM_RC_CNTL_7_MASK_XI (1L<<26)
+#define BNX2_RPM_RC_CNTL_7_P1_XI (1L<<27)
+#define BNX2_RPM_RC_CNTL_7_P2_XI (1L<<28)
+#define BNX2_RPM_RC_CNTL_7_P3_XI (1L<<29)
+#define BNX2_RPM_RC_CNTL_7_NBIT_XI (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_7 0x0000193c
+#define BNX2_RPM_RC_VALUE_MASK_7_VALUE (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_7_MASK (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_8 0x00001940
+#define BNX2_RPM_RC_CNTL_8_A (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_8_B (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_8_OFFSET_XI (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_8_CLASS_XI (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_8_PRIORITY_XI (1L<<11)
+#define BNX2_RPM_RC_CNTL_8_P4_XI (1L<<12)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_XI (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_START_XI (0L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_IP_XI (1L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_TCP_XI (2L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_UDP_XI (3L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_DATA_XI (4L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_TCP_UDP_XI (5L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_ICMPV6_XI (6L<<13)
+#define BNX2_RPM_RC_CNTL_8_COMP_XI (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_8_COMP_EQUAL_XI (0L<<16)
+#define BNX2_RPM_RC_CNTL_8_COMP_NEQUAL_XI (1L<<16)
+#define BNX2_RPM_RC_CNTL_8_COMP_GREATER_XI (2L<<16)
+#define BNX2_RPM_RC_CNTL_8_COMP_LESS_XI (3L<<16)
+#define BNX2_RPM_RC_CNTL_8_MAP_XI (1L<<18)
+#define BNX2_RPM_RC_CNTL_8_SBIT_XI (1L<<19)
+#define BNX2_RPM_RC_CNTL_8_CMDSEL_XI (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_8_DISCARD_XI (1L<<25)
+#define BNX2_RPM_RC_CNTL_8_MASK_XI (1L<<26)
+#define BNX2_RPM_RC_CNTL_8_P1_XI (1L<<27)
+#define BNX2_RPM_RC_CNTL_8_P2_XI (1L<<28)
+#define BNX2_RPM_RC_CNTL_8_P3_XI (1L<<29)
+#define BNX2_RPM_RC_CNTL_8_NBIT_XI (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_8 0x00001944
+#define BNX2_RPM_RC_VALUE_MASK_8_VALUE (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_8_MASK (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_9 0x00001948
+#define BNX2_RPM_RC_CNTL_9_A (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_9_B (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_9_OFFSET_XI (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_9_CLASS_XI (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_9_PRIORITY_XI (1L<<11)
+#define BNX2_RPM_RC_CNTL_9_P4_XI (1L<<12)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_XI (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_START_XI (0L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_IP_XI (1L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_TCP_XI (2L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_UDP_XI (3L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_DATA_XI (4L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_TCP_UDP_XI (5L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_ICMPV6_XI (6L<<13)
+#define BNX2_RPM_RC_CNTL_9_COMP_XI (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_9_COMP_EQUAL_XI (0L<<16)
+#define BNX2_RPM_RC_CNTL_9_COMP_NEQUAL_XI (1L<<16)
+#define BNX2_RPM_RC_CNTL_9_COMP_GREATER_XI (2L<<16)
+#define BNX2_RPM_RC_CNTL_9_COMP_LESS_XI (3L<<16)
+#define BNX2_RPM_RC_CNTL_9_MAP_XI (1L<<18)
+#define BNX2_RPM_RC_CNTL_9_SBIT_XI (1L<<19)
+#define BNX2_RPM_RC_CNTL_9_CMDSEL_XI (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_9_DISCARD_XI (1L<<25)
+#define BNX2_RPM_RC_CNTL_9_MASK_XI (1L<<26)
+#define BNX2_RPM_RC_CNTL_9_P1_XI (1L<<27)
+#define BNX2_RPM_RC_CNTL_9_P2_XI (1L<<28)
+#define BNX2_RPM_RC_CNTL_9_P3_XI (1L<<29)
+#define BNX2_RPM_RC_CNTL_9_NBIT_XI (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_9 0x0000194c
+#define BNX2_RPM_RC_VALUE_MASK_9_VALUE (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_9_MASK (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_10 0x00001950
+#define BNX2_RPM_RC_CNTL_10_A (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_10_B (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_10_OFFSET_XI (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_10_CLASS_XI (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_10_PRIORITY_XI (1L<<11)
+#define BNX2_RPM_RC_CNTL_10_P4_XI (1L<<12)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_XI (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_START_XI (0L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_IP_XI (1L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_TCP_XI (2L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_UDP_XI (3L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_DATA_XI (4L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_TCP_UDP_XI (5L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_ICMPV6_XI (6L<<13)
+#define BNX2_RPM_RC_CNTL_10_COMP_XI (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_10_COMP_EQUAL_XI (0L<<16)
+#define BNX2_RPM_RC_CNTL_10_COMP_NEQUAL_XI (1L<<16)
+#define BNX2_RPM_RC_CNTL_10_COMP_GREATER_XI (2L<<16)
+#define BNX2_RPM_RC_CNTL_10_COMP_LESS_XI (3L<<16)
+#define BNX2_RPM_RC_CNTL_10_MAP_XI (1L<<18)
+#define BNX2_RPM_RC_CNTL_10_SBIT_XI (1L<<19)
+#define BNX2_RPM_RC_CNTL_10_CMDSEL_XI (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_10_DISCARD_XI (1L<<25)
+#define BNX2_RPM_RC_CNTL_10_MASK_XI (1L<<26)
+#define BNX2_RPM_RC_CNTL_10_P1_XI (1L<<27)
+#define BNX2_RPM_RC_CNTL_10_P2_XI (1L<<28)
+#define BNX2_RPM_RC_CNTL_10_P3_XI (1L<<29)
+#define BNX2_RPM_RC_CNTL_10_NBIT_XI (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_10 0x00001954
+#define BNX2_RPM_RC_VALUE_MASK_10_VALUE (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_10_MASK (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_11 0x00001958
+#define BNX2_RPM_RC_CNTL_11_A (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_11_B (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_11_OFFSET_XI (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_11_CLASS_XI (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_11_PRIORITY_XI (1L<<11)
+#define BNX2_RPM_RC_CNTL_11_P4_XI (1L<<12)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_XI (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_START_XI (0L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_IP_XI (1L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_TCP_XI (2L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_UDP_XI (3L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_DATA_XI (4L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_TCP_UDP_XI (5L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_ICMPV6_XI (6L<<13)
+#define BNX2_RPM_RC_CNTL_11_COMP_XI (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_11_COMP_EQUAL_XI (0L<<16)
+#define BNX2_RPM_RC_CNTL_11_COMP_NEQUAL_XI (1L<<16)
+#define BNX2_RPM_RC_CNTL_11_COMP_GREATER_XI (2L<<16)
+#define BNX2_RPM_RC_CNTL_11_COMP_LESS_XI (3L<<16)
+#define BNX2_RPM_RC_CNTL_11_MAP_XI (1L<<18)
+#define BNX2_RPM_RC_CNTL_11_SBIT_XI (1L<<19)
+#define BNX2_RPM_RC_CNTL_11_CMDSEL_XI (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_11_DISCARD_XI (1L<<25)
+#define BNX2_RPM_RC_CNTL_11_MASK_XI (1L<<26)
+#define BNX2_RPM_RC_CNTL_11_P1_XI (1L<<27)
+#define BNX2_RPM_RC_CNTL_11_P2_XI (1L<<28)
+#define BNX2_RPM_RC_CNTL_11_P3_XI (1L<<29)
+#define BNX2_RPM_RC_CNTL_11_NBIT_XI (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_11 0x0000195c
+#define BNX2_RPM_RC_VALUE_MASK_11_VALUE (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_11_MASK (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_12 0x00001960
+#define BNX2_RPM_RC_CNTL_12_A (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_12_B (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_12_OFFSET_XI (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_12_CLASS_XI (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_12_PRIORITY_XI (1L<<11)
+#define BNX2_RPM_RC_CNTL_12_P4_XI (1L<<12)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_XI (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_START_XI (0L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_IP_XI (1L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_TCP_XI (2L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_UDP_XI (3L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_DATA_XI (4L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_TCP_UDP_XI (5L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_ICMPV6_XI (6L<<13)
+#define BNX2_RPM_RC_CNTL_12_COMP_XI (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_12_COMP_EQUAL_XI (0L<<16)
+#define BNX2_RPM_RC_CNTL_12_COMP_NEQUAL_XI (1L<<16)
+#define BNX2_RPM_RC_CNTL_12_COMP_GREATER_XI (2L<<16)
+#define BNX2_RPM_RC_CNTL_12_COMP_LESS_XI (3L<<16)
+#define BNX2_RPM_RC_CNTL_12_MAP_XI (1L<<18)
+#define BNX2_RPM_RC_CNTL_12_SBIT_XI (1L<<19)
+#define BNX2_RPM_RC_CNTL_12_CMDSEL_XI (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_12_DISCARD_XI (1L<<25)
+#define BNX2_RPM_RC_CNTL_12_MASK_XI (1L<<26)
+#define BNX2_RPM_RC_CNTL_12_P1_XI (1L<<27)
+#define BNX2_RPM_RC_CNTL_12_P2_XI (1L<<28)
+#define BNX2_RPM_RC_CNTL_12_P3_XI (1L<<29)
+#define BNX2_RPM_RC_CNTL_12_NBIT_XI (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_12 0x00001964
+#define BNX2_RPM_RC_VALUE_MASK_12_VALUE (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_12_MASK (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_13 0x00001968
+#define BNX2_RPM_RC_CNTL_13_A (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_13_B (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_13_OFFSET_XI (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_13_CLASS_XI (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_13_PRIORITY_XI (1L<<11)
+#define BNX2_RPM_RC_CNTL_13_P4_XI (1L<<12)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_XI (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_START_XI (0L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_IP_XI (1L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_TCP_XI (2L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_UDP_XI (3L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_DATA_XI (4L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_TCP_UDP_XI (5L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_ICMPV6_XI (6L<<13)
+#define BNX2_RPM_RC_CNTL_13_COMP_XI (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_13_COMP_EQUAL_XI (0L<<16)
+#define BNX2_RPM_RC_CNTL_13_COMP_NEQUAL_XI (1L<<16)
+#define BNX2_RPM_RC_CNTL_13_COMP_GREATER_XI (2L<<16)
+#define BNX2_RPM_RC_CNTL_13_COMP_LESS_XI (3L<<16)
+#define BNX2_RPM_RC_CNTL_13_MAP_XI (1L<<18)
+#define BNX2_RPM_RC_CNTL_13_SBIT_XI (1L<<19)
+#define BNX2_RPM_RC_CNTL_13_CMDSEL_XI (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_13_DISCARD_XI (1L<<25)
+#define BNX2_RPM_RC_CNTL_13_MASK_XI (1L<<26)
+#define BNX2_RPM_RC_CNTL_13_P1_XI (1L<<27)
+#define BNX2_RPM_RC_CNTL_13_P2_XI (1L<<28)
+#define BNX2_RPM_RC_CNTL_13_P3_XI (1L<<29)
+#define BNX2_RPM_RC_CNTL_13_NBIT_XI (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_13 0x0000196c
+#define BNX2_RPM_RC_VALUE_MASK_13_VALUE (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_13_MASK (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_14 0x00001970
+#define BNX2_RPM_RC_CNTL_14_A (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_14_B (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_14_OFFSET_XI (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_14_CLASS_XI (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_14_PRIORITY_XI (1L<<11)
+#define BNX2_RPM_RC_CNTL_14_P4_XI (1L<<12)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_XI (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_START_XI (0L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_IP_XI (1L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_TCP_XI (2L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_UDP_XI (3L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_DATA_XI (4L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_TCP_UDP_XI (5L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_ICMPV6_XI (6L<<13)
+#define BNX2_RPM_RC_CNTL_14_COMP_XI (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_14_COMP_EQUAL_XI (0L<<16)
+#define BNX2_RPM_RC_CNTL_14_COMP_NEQUAL_XI (1L<<16)
+#define BNX2_RPM_RC_CNTL_14_COMP_GREATER_XI (2L<<16)
+#define BNX2_RPM_RC_CNTL_14_COMP_LESS_XI (3L<<16)
+#define BNX2_RPM_RC_CNTL_14_MAP_XI (1L<<18)
+#define BNX2_RPM_RC_CNTL_14_SBIT_XI (1L<<19)
+#define BNX2_RPM_RC_CNTL_14_CMDSEL_XI (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_14_DISCARD_XI (1L<<25)
+#define BNX2_RPM_RC_CNTL_14_MASK_XI (1L<<26)
+#define BNX2_RPM_RC_CNTL_14_P1_XI (1L<<27)
+#define BNX2_RPM_RC_CNTL_14_P2_XI (1L<<28)
+#define BNX2_RPM_RC_CNTL_14_P3_XI (1L<<29)
+#define BNX2_RPM_RC_CNTL_14_NBIT_XI (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_14 0x00001974
+#define BNX2_RPM_RC_VALUE_MASK_14_VALUE (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_14_MASK (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_15 0x00001978
+#define BNX2_RPM_RC_CNTL_15_A (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_15_B (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_15_OFFSET_XI (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_15_CLASS_XI (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_15_PRIORITY_XI (1L<<11)
+#define BNX2_RPM_RC_CNTL_15_P4_XI (1L<<12)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_XI (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_START_XI (0L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_IP_XI (1L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_TCP_XI (2L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_UDP_XI (3L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_DATA_XI (4L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_TCP_UDP_XI (5L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_ICMPV6_XI (6L<<13)
+#define BNX2_RPM_RC_CNTL_15_COMP_XI (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_15_COMP_EQUAL_XI (0L<<16)
+#define BNX2_RPM_RC_CNTL_15_COMP_NEQUAL_XI (1L<<16)
+#define BNX2_RPM_RC_CNTL_15_COMP_GREATER_XI (2L<<16)
+#define BNX2_RPM_RC_CNTL_15_COMP_LESS_XI (3L<<16)
+#define BNX2_RPM_RC_CNTL_15_MAP_XI (1L<<18)
+#define BNX2_RPM_RC_CNTL_15_SBIT_XI (1L<<19)
+#define BNX2_RPM_RC_CNTL_15_CMDSEL_XI (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_15_DISCARD_XI (1L<<25)
+#define BNX2_RPM_RC_CNTL_15_MASK_XI (1L<<26)
+#define BNX2_RPM_RC_CNTL_15_P1_XI (1L<<27)
+#define BNX2_RPM_RC_CNTL_15_P2_XI (1L<<28)
+#define BNX2_RPM_RC_CNTL_15_P3_XI (1L<<29)
+#define BNX2_RPM_RC_CNTL_15_NBIT_XI (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_15 0x0000197c
+#define BNX2_RPM_RC_VALUE_MASK_15_VALUE (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_15_MASK (0xffffL<<16)
+
+#define BNX2_RPM_RC_CONFIG 0x00001980
+#define BNX2_RPM_RC_CONFIG_RULE_ENABLE (0xffffL<<0)
+#define BNX2_RPM_RC_CONFIG_RULE_ENABLE_XI (0xfffffL<<0)
+#define BNX2_RPM_RC_CONFIG_DEF_CLASS (0x7L<<24)
+#define BNX2_RPM_RC_CONFIG_KNUM_OVERWRITE (1L<<31)
+
+#define BNX2_RPM_DEBUG0 0x00001984
+#define BNX2_RPM_DEBUG0_FM_BCNT (0xffffL<<0)
+#define BNX2_RPM_DEBUG0_T_DATA_OFST_VLD (1L<<16)
+#define BNX2_RPM_DEBUG0_T_UDP_OFST_VLD (1L<<17)
+#define BNX2_RPM_DEBUG0_T_TCP_OFST_VLD (1L<<18)
+#define BNX2_RPM_DEBUG0_T_IP_OFST_VLD (1L<<19)
+#define BNX2_RPM_DEBUG0_IP_MORE_FRGMT (1L<<20)
+#define BNX2_RPM_DEBUG0_T_IP_NO_TCP_UDP_HDR (1L<<21)
+#define BNX2_RPM_DEBUG0_LLC_SNAP (1L<<22)
+#define BNX2_RPM_DEBUG0_FM_STARTED (1L<<23)
+#define BNX2_RPM_DEBUG0_DONE (1L<<24)
+#define BNX2_RPM_DEBUG0_WAIT_4_DONE (1L<<25)
+#define BNX2_RPM_DEBUG0_USE_TPBUF_CKSUM (1L<<26)
+#define BNX2_RPM_DEBUG0_RX_NO_PSD_HDR_CKSUM (1L<<27)
+#define BNX2_RPM_DEBUG0_IGNORE_VLAN (1L<<28)
+#define BNX2_RPM_DEBUG0_RP_ENA_ACTIVE (1L<<31)
+
+#define BNX2_RPM_DEBUG1 0x00001988
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST (0xffffL<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_IDLE (0L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B6_ALL (1L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B2_IPLLC (2L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B6_IP (4L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B2_IP (8L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_IP_START (16L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_IP (32L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_TCP (64L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_UDP (128L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_AH (256L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ESP (512L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ESP_PAYLOAD (1024L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_DATA (2048L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ADD_CARRY (0x2000L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ADD_CARRYOUT (0x4000L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_LATCH_RESULT (0x8000L<<0)
+#define BNX2_RPM_DEBUG1_HDR_BCNT (0x7ffL<<16)
+#define BNX2_RPM_DEBUG1_UNKNOWN_ETYPE_D (1L<<28)
+#define BNX2_RPM_DEBUG1_VLAN_REMOVED_D2 (1L<<29)
+#define BNX2_RPM_DEBUG1_VLAN_REMOVED_D1 (1L<<30)
+#define BNX2_RPM_DEBUG1_EOF_0XTRA_WD (1L<<31)
+
+#define BNX2_RPM_DEBUG2 0x0000198c
+#define BNX2_RPM_DEBUG2_CMD_HIT_VEC (0xffffL<<0)
+#define BNX2_RPM_DEBUG2_IP_BCNT (0xffL<<16)
+#define BNX2_RPM_DEBUG2_THIS_CMD_M4 (1L<<24)
+#define BNX2_RPM_DEBUG2_THIS_CMD_M3 (1L<<25)
+#define BNX2_RPM_DEBUG2_THIS_CMD_M2 (1L<<26)
+#define BNX2_RPM_DEBUG2_THIS_CMD_M1 (1L<<27)
+#define BNX2_RPM_DEBUG2_IPIPE_EMPTY (1L<<28)
+#define BNX2_RPM_DEBUG2_FM_DISCARD (1L<<29)
+#define BNX2_RPM_DEBUG2_LAST_RULE_IN_FM_D2 (1L<<30)
+#define BNX2_RPM_DEBUG2_LAST_RULE_IN_FM_D1 (1L<<31)
+
+#define BNX2_RPM_DEBUG3 0x00001990
+#define BNX2_RPM_DEBUG3_AVAIL_MBUF_PTR (0x1ffL<<0)
+#define BNX2_RPM_DEBUG3_RDE_RLUPQ_WR_REQ_INT (1L<<9)
+#define BNX2_RPM_DEBUG3_RDE_RBUF_WR_LAST_INT (1L<<10)
+#define BNX2_RPM_DEBUG3_RDE_RBUF_WR_REQ_INT (1L<<11)
+#define BNX2_RPM_DEBUG3_RDE_RBUF_FREE_REQ (1L<<12)
+#define BNX2_RPM_DEBUG3_RDE_RBUF_ALLOC_REQ (1L<<13)
+#define BNX2_RPM_DEBUG3_DFSM_MBUF_NOTAVAIL (1L<<14)
+#define BNX2_RPM_DEBUG3_RBUF_RDE_SOF_DROP (1L<<15)
+#define BNX2_RPM_DEBUG3_DFIFO_VLD_ENTRY_CT (0xfL<<16)
+#define BNX2_RPM_DEBUG3_RDE_SRC_FIFO_ALMFULL (1L<<21)
+#define BNX2_RPM_DEBUG3_DROP_NXT_VLD (1L<<22)
+#define BNX2_RPM_DEBUG3_DROP_NXT (1L<<23)
+#define BNX2_RPM_DEBUG3_FTQ_FSM (0x3L<<24)
+#define BNX2_RPM_DEBUG3_FTQ_FSM_IDLE (0x0L<<24)
+#define BNX2_RPM_DEBUG3_FTQ_FSM_WAIT_ACK (0x1L<<24)
+#define BNX2_RPM_DEBUG3_FTQ_FSM_WAIT_FREE (0x2L<<24)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM (0x3L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_SOF (0x0L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_GET_MBUF (0x1L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_DMA_DATA (0x2L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_DATA (0x3L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_EOF (0x4L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_MF_ACK (0x5L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_DROP_NXT_VLD (0x6L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_DONE (0x7L<<26)
+#define BNX2_RPM_DEBUG3_MBFREE_FSM (1L<<29)
+#define BNX2_RPM_DEBUG3_MBFREE_FSM_IDLE (0L<<29)
+#define BNX2_RPM_DEBUG3_MBFREE_FSM_WAIT_ACK (1L<<29)
+#define BNX2_RPM_DEBUG3_MBALLOC_FSM (1L<<30)
+#define BNX2_RPM_DEBUG3_MBALLOC_FSM_ET_MBUF (0x0L<<30)
+#define BNX2_RPM_DEBUG3_MBALLOC_FSM_IVE_MBUF (0x1L<<30)
+#define BNX2_RPM_DEBUG3_CCODE_EOF_ERROR (1L<<31)
+
+#define BNX2_RPM_DEBUG4 0x00001994
+#define BNX2_RPM_DEBUG4_DFSM_MBUF_CLUSTER (0x1ffffffL<<0)
+#define BNX2_RPM_DEBUG4_DFIFO_CUR_CCODE (0x7L<<25)
+#define BNX2_RPM_DEBUG4_MBWRITE_FSM (0x7L<<28)
+#define BNX2_RPM_DEBUG4_DFIFO_EMPTY (1L<<31)
+
+#define BNX2_RPM_DEBUG5 0x00001998
+#define BNX2_RPM_DEBUG5_RDROP_WPTR (0x1fL<<0)
+#define BNX2_RPM_DEBUG5_RDROP_ACPI_RPTR (0x1fL<<5)
+#define BNX2_RPM_DEBUG5_RDROP_MC_RPTR (0x1fL<<10)
+#define BNX2_RPM_DEBUG5_RDROP_RC_RPTR (0x1fL<<15)
+#define BNX2_RPM_DEBUG5_RDROP_ACPI_EMPTY (1L<<20)
+#define BNX2_RPM_DEBUG5_RDROP_MC_EMPTY (1L<<21)
+#define BNX2_RPM_DEBUG5_RDROP_AEOF_VEC_AT_RDROP_MC_RPTR (1L<<22)
+#define BNX2_RPM_DEBUG5_HOLDREG_WOL_DROP_INT (1L<<23)
+#define BNX2_RPM_DEBUG5_HOLDREG_DISCARD (1L<<24)
+#define BNX2_RPM_DEBUG5_HOLDREG_MBUF_NOTAVAIL (1L<<25)
+#define BNX2_RPM_DEBUG5_HOLDREG_MC_EMPTY (1L<<26)
+#define BNX2_RPM_DEBUG5_HOLDREG_RC_EMPTY (1L<<27)
+#define BNX2_RPM_DEBUG5_HOLDREG_FC_EMPTY (1L<<28)
+#define BNX2_RPM_DEBUG5_HOLDREG_ACPI_EMPTY (1L<<29)
+#define BNX2_RPM_DEBUG5_HOLDREG_FULL_T (1L<<30)
+#define BNX2_RPM_DEBUG5_HOLDREG_RD (1L<<31)
+
+#define BNX2_RPM_DEBUG6 0x0000199c
+#define BNX2_RPM_DEBUG6_ACPI_VEC (0xffffL<<0)
+#define BNX2_RPM_DEBUG6_VEC (0xffffL<<16)
+
+#define BNX2_RPM_DEBUG7 0x000019a0
+#define BNX2_RPM_DEBUG7_RPM_DBG7_LAST_CRC (0xffffffffL<<0)
+
+#define BNX2_RPM_DEBUG8 0x000019a4
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM (0xfL<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_IDLE (0L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_W1_ADDR (1L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_W2_ADDR (2L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_W3_ADDR (3L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_WAIT_THBUF (4L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W3_DATA (5L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W0_ADDR (6L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W1_ADDR (7L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W2_ADDR (8L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W3_ADDR (9L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_WAIT_THBUF (10L<<0)
+#define BNX2_RPM_DEBUG8_COMPARE_AT_W0 (1L<<4)
+#define BNX2_RPM_DEBUG8_COMPARE_AT_W3_DATA (1L<<5)
+#define BNX2_RPM_DEBUG8_COMPARE_AT_SOF_WAIT (1L<<6)
+#define BNX2_RPM_DEBUG8_COMPARE_AT_SOF_W3 (1L<<7)
+#define BNX2_RPM_DEBUG8_COMPARE_AT_SOF_W2 (1L<<8)
+#define BNX2_RPM_DEBUG8_EOF_W_LTEQ6_VLDBYTES (1L<<9)
+#define BNX2_RPM_DEBUG8_EOF_W_LTEQ4_VLDBYTES (1L<<10)
+#define BNX2_RPM_DEBUG8_NXT_EOF_W_12_VLDBYTES (1L<<11)
+#define BNX2_RPM_DEBUG8_EOF_DET (1L<<12)
+#define BNX2_RPM_DEBUG8_SOF_DET (1L<<13)
+#define BNX2_RPM_DEBUG8_WAIT_4_SOF (1L<<14)
+#define BNX2_RPM_DEBUG8_ALL_DONE (1L<<15)
+#define BNX2_RPM_DEBUG8_THBUF_ADDR (0x7fL<<16)
+#define BNX2_RPM_DEBUG8_BYTE_CTR (0xffL<<24)
+
+#define BNX2_RPM_DEBUG9 0x000019a8
+#define BNX2_RPM_DEBUG9_OUTFIFO_COUNT (0x7L<<0)
+#define BNX2_RPM_DEBUG9_RDE_ACPI_RDY (1L<<3)
+#define BNX2_RPM_DEBUG9_VLD_RD_ENTRY_CT (0x7L<<4)
+#define BNX2_RPM_DEBUG9_OUTFIFO_OVERRUN_OCCURRED (1L<<28)
+#define BNX2_RPM_DEBUG9_INFIFO_OVERRUN_OCCURRED (1L<<29)
+#define BNX2_RPM_DEBUG9_ACPI_MATCH_INT (1L<<30)
+#define BNX2_RPM_DEBUG9_ACPI_ENABLE_SYN (1L<<31)
+#define BNX2_RPM_DEBUG9_BEMEM_R_XI (0x1fL<<0)
+#define BNX2_RPM_DEBUG9_EO_XI (1L<<5)
+#define BNX2_RPM_DEBUG9_AEOF_DE_XI (1L<<6)
+#define BNX2_RPM_DEBUG9_SO_XI (1L<<7)
+#define BNX2_RPM_DEBUG9_WD64_CT_XI (0x1fL<<8)
+#define BNX2_RPM_DEBUG9_EOF_VLDBYTE_XI (0x7L<<13)
+#define BNX2_RPM_DEBUG9_ACPI_RDE_PAT_ID_XI (0xfL<<16)
+#define BNX2_RPM_DEBUG9_CALCRC_RESULT_XI (0x3ffL<<20)
+#define BNX2_RPM_DEBUG9_DATA_IN_VL_XI (1L<<30)
+#define BNX2_RPM_DEBUG9_CALCRC_BUFFER_VLD_XI (1L<<31)
+
+#define BNX2_RPM_ACPI_DBG_BUF_W00 0x000019c0
+#define BNX2_RPM_ACPI_DBG_BUF_W01 0x000019c4
+#define BNX2_RPM_ACPI_DBG_BUF_W02 0x000019c8
+#define BNX2_RPM_ACPI_DBG_BUF_W03 0x000019cc
+#define BNX2_RPM_ACPI_DBG_BUF_W10 0x000019d0
+#define BNX2_RPM_ACPI_DBG_BUF_W11 0x000019d4
+#define BNX2_RPM_ACPI_DBG_BUF_W12 0x000019d8
+#define BNX2_RPM_ACPI_DBG_BUF_W13 0x000019dc
+#define BNX2_RPM_ACPI_DBG_BUF_W20 0x000019e0
+#define BNX2_RPM_ACPI_DBG_BUF_W21 0x000019e4
+#define BNX2_RPM_ACPI_DBG_BUF_W22 0x000019e8
+#define BNX2_RPM_ACPI_DBG_BUF_W23 0x000019ec
+#define BNX2_RPM_ACPI_DBG_BUF_W30 0x000019f0
+#define BNX2_RPM_ACPI_DBG_BUF_W31 0x000019f4
+#define BNX2_RPM_ACPI_DBG_BUF_W32 0x000019f8
+#define BNX2_RPM_ACPI_DBG_BUF_W33 0x000019fc
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL 0x00001a00
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_BYTE_ADDRESS (0xffffL<<0)
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_DEBUGRD (1L<<28)
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_MODE (1L<<29)
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_INIT (1L<<30)
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_WR (1L<<31)
+
+#define BNX2_RPM_ACPI_PATTERN_CTRL 0x00001a04
+#define BNX2_RPM_ACPI_PATTERN_CTRL_PATTERN_ID (0xfL<<0)
+#define BNX2_RPM_ACPI_PATTERN_CTRL_CRC_SM_CLR (1L<<30)
+#define BNX2_RPM_ACPI_PATTERN_CTRL_WR (1L<<31)
+
+#define BNX2_RPM_ACPI_DATA 0x00001a08
+#define BNX2_RPM_ACPI_DATA_PATTERN_BE (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_LEN0 0x00001a0c
+#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN3 (0xffL<<0)
+#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN2 (0xffL<<8)
+#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN1 (0xffL<<16)
+#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN0 (0xffL<<24)
+
+#define BNX2_RPM_ACPI_PATTERN_LEN1 0x00001a10
+#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN7 (0xffL<<0)
+#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN6 (0xffL<<8)
+#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN5 (0xffL<<16)
+#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN4 (0xffL<<24)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC0 0x00001a18
+#define BNX2_RPM_ACPI_PATTERN_CRC0_PATTERN_CRC0 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC1 0x00001a1c
+#define BNX2_RPM_ACPI_PATTERN_CRC1_PATTERN_CRC1 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC2 0x00001a20
+#define BNX2_RPM_ACPI_PATTERN_CRC2_PATTERN_CRC2 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC3 0x00001a24
+#define BNX2_RPM_ACPI_PATTERN_CRC3_PATTERN_CRC3 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC4 0x00001a28
+#define BNX2_RPM_ACPI_PATTERN_CRC4_PATTERN_CRC4 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC5 0x00001a2c
+#define BNX2_RPM_ACPI_PATTERN_CRC5_PATTERN_CRC5 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC6 0x00001a30
+#define BNX2_RPM_ACPI_PATTERN_CRC6_PATTERN_CRC6 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC7 0x00001a34
+#define BNX2_RPM_ACPI_PATTERN_CRC7_PATTERN_CRC7 (0xffffffffL<<0)
+
+
+/*
+ * rlup_reg definition
+ * offset: 0x2000
+ */
+#define BNX2_RLUP_RSS_CONFIG 0x0000201c
+#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_XI (0x3L<<0)
+#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_OFF_XI (0L<<0)
+#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI (1L<<0)
+#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_IP_ONLY_XI (2L<<0)
+#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_RES_XI (3L<<0)
+#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_XI (0x3L<<2)
+#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_OFF_XI (0L<<2)
+#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI (1L<<2)
+#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_IP_ONLY_XI (2L<<2)
+#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_RES_XI (3L<<2)
+
+#define BNX2_RLUP_RSS_COMMAND 0x00002048
+#define BNX2_RLUP_RSS_COMMAND_RSS_IND_TABLE_ADDR (0xfUL<<0)
+#define BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK (0xffUL<<4)
+#define BNX2_RLUP_RSS_COMMAND_WRITE (1UL<<12)
+#define BNX2_RLUP_RSS_COMMAND_READ (1UL<<13)
+#define BNX2_RLUP_RSS_COMMAND_HASH_MASK (0x7UL<<14)
+
+#define BNX2_RLUP_RSS_DATA 0x0000204c
+
+
+/*
+ * rbuf_reg definition
+ * offset: 0x200000
+ */
+#define BNX2_RBUF_COMMAND 0x00200000
+#define BNX2_RBUF_COMMAND_ENABLED (1L<<0)
+#define BNX2_RBUF_COMMAND_FREE_INIT (1L<<1)
+#define BNX2_RBUF_COMMAND_RAM_INIT (1L<<2)
+#define BNX2_RBUF_COMMAND_PKT_OFFSET_OVFL (1L<<3)
+#define BNX2_RBUF_COMMAND_OVER_FREE (1L<<4)
+#define BNX2_RBUF_COMMAND_ALLOC_REQ (1L<<5)
+#define BNX2_RBUF_COMMAND_EN_PRI_CHNGE_TE (1L<<6)
+#define BNX2_RBUF_COMMAND_CU_ISOLATE_XI (1L<<5)
+#define BNX2_RBUF_COMMAND_EN_PRI_CHANGE_XI (1L<<6)
+#define BNX2_RBUF_COMMAND_GRC_ENDIAN_CONV_DIS_XI (1L<<7)
+
+#define BNX2_RBUF_STATUS1 0x00200004
+#define BNX2_RBUF_STATUS1_FREE_COUNT (0x3ffL<<0)
+
+#define BNX2_RBUF_STATUS2 0x00200008
+#define BNX2_RBUF_STATUS2_FREE_TAIL (0x1ffL<<0)
+#define BNX2_RBUF_STATUS2_FREE_HEAD (0x1ffL<<16)
+
+#define BNX2_RBUF_CONFIG 0x0020000c
+#define BNX2_RBUF_CONFIG_XOFF_TRIP (0x3ffL<<0)
+#define BNX2_RBUF_CONFIG_XOFF_TRIP_VAL(mtu) \
+ ((((mtu) - 1500) * 31 / 1000) + 54)
+#define BNX2_RBUF_CONFIG_XON_TRIP (0x3ffL<<16)
+#define BNX2_RBUF_CONFIG_XON_TRIP_VAL(mtu) \
+ ((((mtu) - 1500) * 39 / 1000) + 66)
+#define BNX2_RBUF_CONFIG_VAL(mtu) \
+ (BNX2_RBUF_CONFIG_XOFF_TRIP_VAL(mtu) | \
+ (BNX2_RBUF_CONFIG_XON_TRIP_VAL(mtu) << 16))
+
+#define BNX2_RBUF_FW_BUF_ALLOC 0x00200010
+#define BNX2_RBUF_FW_BUF_ALLOC_VALUE (0x1ffL<<7)
+#define BNX2_RBUF_FW_BUF_ALLOC_TYPE (1L<<16)
+#define BNX2_RBUF_FW_BUF_ALLOC_ALLOC_REQ (1L<<31)
+
+#define BNX2_RBUF_FW_BUF_FREE 0x00200014
+#define BNX2_RBUF_FW_BUF_FREE_COUNT (0x7fL<<0)
+#define BNX2_RBUF_FW_BUF_FREE_TAIL (0x1ffL<<7)
+#define BNX2_RBUF_FW_BUF_FREE_HEAD (0x1ffL<<16)
+#define BNX2_RBUF_FW_BUF_FREE_TYPE (1L<<25)
+#define BNX2_RBUF_FW_BUF_FREE_FREE_REQ (1L<<31)
+
+#define BNX2_RBUF_FW_BUF_SEL 0x00200018
+#define BNX2_RBUF_FW_BUF_SEL_COUNT (0x7fL<<0)
+#define BNX2_RBUF_FW_BUF_SEL_TAIL (0x1ffL<<7)
+#define BNX2_RBUF_FW_BUF_SEL_HEAD (0x1ffL<<16)
+#define BNX2_RBUF_FW_BUF_SEL_SEL_REQ (1L<<31)
+
+#define BNX2_RBUF_CONFIG2 0x0020001c
+#define BNX2_RBUF_CONFIG2_MAC_DROP_TRIP (0x3ffL<<0)
+#define BNX2_RBUF_CONFIG2_MAC_DROP_TRIP_VAL(mtu) \
+ ((((mtu) - 1500) * 4 / 1000) + 5)
+#define BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP (0x3ffL<<16)
+#define BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP_VAL(mtu) \
+ ((((mtu) - 1500) * 2 / 100) + 30)
+#define BNX2_RBUF_CONFIG2_VAL(mtu) \
+ (BNX2_RBUF_CONFIG2_MAC_DROP_TRIP_VAL(mtu) | \
+ (BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP_VAL(mtu) << 16))
+
+#define BNX2_RBUF_CONFIG3 0x00200020
+#define BNX2_RBUF_CONFIG3_CU_DROP_TRIP (0x3ffL<<0)
+#define BNX2_RBUF_CONFIG3_CU_DROP_TRIP_VAL(mtu) \
+ ((((mtu) - 1500) * 12 / 1000) + 18)
+#define BNX2_RBUF_CONFIG3_CU_KEEP_TRIP (0x3ffL<<16)
+#define BNX2_RBUF_CONFIG3_CU_KEEP_TRIP_VAL(mtu) \
+ ((((mtu) - 1500) * 2 / 100) + 30)
+#define BNX2_RBUF_CONFIG3_VAL(mtu) \
+ (BNX2_RBUF_CONFIG3_CU_DROP_TRIP_VAL(mtu) | \
+ (BNX2_RBUF_CONFIG3_CU_KEEP_TRIP_VAL(mtu) << 16))
+
+#define BNX2_RBUF_PKT_DATA 0x00208000
+#define BNX2_RBUF_CLIST_DATA 0x00210000
+#define BNX2_RBUF_BUF_DATA 0x00220000
+
+
+/*
+ * rv2p_reg definition
+ * offset: 0x2800
+ */
+#define BNX2_RV2P_COMMAND 0x00002800
+#define BNX2_RV2P_COMMAND_ENABLED (1L<<0)
+#define BNX2_RV2P_COMMAND_PROC1_INTRPT (1L<<1)
+#define BNX2_RV2P_COMMAND_PROC2_INTRPT (1L<<2)
+#define BNX2_RV2P_COMMAND_ABORT0 (1L<<4)
+#define BNX2_RV2P_COMMAND_ABORT1 (1L<<5)
+#define BNX2_RV2P_COMMAND_ABORT2 (1L<<6)
+#define BNX2_RV2P_COMMAND_ABORT3 (1L<<7)
+#define BNX2_RV2P_COMMAND_ABORT4 (1L<<8)
+#define BNX2_RV2P_COMMAND_ABORT5 (1L<<9)
+#define BNX2_RV2P_COMMAND_PROC1_RESET (1L<<16)
+#define BNX2_RV2P_COMMAND_PROC2_RESET (1L<<17)
+#define BNX2_RV2P_COMMAND_CTXIF_RESET (1L<<18)
+
+#define BNX2_RV2P_STATUS 0x00002804
+#define BNX2_RV2P_STATUS_ALWAYS_0 (1L<<0)
+#define BNX2_RV2P_STATUS_RV2P_GEN_STAT0_CNT (1L<<8)
+#define BNX2_RV2P_STATUS_RV2P_GEN_STAT1_CNT (1L<<9)
+#define BNX2_RV2P_STATUS_RV2P_GEN_STAT2_CNT (1L<<10)
+#define BNX2_RV2P_STATUS_RV2P_GEN_STAT3_CNT (1L<<11)
+#define BNX2_RV2P_STATUS_RV2P_GEN_STAT4_CNT (1L<<12)
+#define BNX2_RV2P_STATUS_RV2P_GEN_STAT5_CNT (1L<<13)
+
+#define BNX2_RV2P_CONFIG 0x00002808
+#define BNX2_RV2P_CONFIG_STALL_PROC1 (1L<<0)
+#define BNX2_RV2P_CONFIG_STALL_PROC2 (1L<<1)
+#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT0 (1L<<8)
+#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT1 (1L<<9)
+#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT2 (1L<<10)
+#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT3 (1L<<11)
+#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT4 (1L<<12)
+#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT5 (1L<<13)
+#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT0 (1L<<16)
+#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT1 (1L<<17)
+#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT2 (1L<<18)
+#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT3 (1L<<19)
+#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT4 (1L<<20)
+#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT5 (1L<<21)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE (0xfL<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_256 (0L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_512 (1L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_1K (2L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_2K (3L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_4K (4L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_8K (5L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_16K (6L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_32K (7L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_64K (8L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_128K (9L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_256K (10L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_512K (11L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_1M (12L<<24)
+
+#define BNX2_RV2P_GEN_BFR_ADDR_0 0x00002810
+#define BNX2_RV2P_GEN_BFR_ADDR_0_VALUE (0xffffL<<16)
+
+#define BNX2_RV2P_GEN_BFR_ADDR_1 0x00002814
+#define BNX2_RV2P_GEN_BFR_ADDR_1_VALUE (0xffffL<<16)
+
+#define BNX2_RV2P_GEN_BFR_ADDR_2 0x00002818
+#define BNX2_RV2P_GEN_BFR_ADDR_2_VALUE (0xffffL<<16)
+
+#define BNX2_RV2P_GEN_BFR_ADDR_3 0x0000281c
+#define BNX2_RV2P_GEN_BFR_ADDR_3_VALUE (0xffffL<<16)
+
+#define BNX2_RV2P_INSTR_HIGH 0x00002830
+#define BNX2_RV2P_INSTR_HIGH_HIGH (0x1fL<<0)
+
+#define BNX2_RV2P_INSTR_LOW 0x00002834
+#define BNX2_RV2P_INSTR_LOW_LOW (0xffffffffL<<0)
+
+#define BNX2_RV2P_PROC1_ADDR_CMD 0x00002838
+#define BNX2_RV2P_PROC1_ADDR_CMD_ADD (0x3ffL<<0)
+#define BNX2_RV2P_PROC1_ADDR_CMD_RDWR (1L<<31)
+
+#define BNX2_RV2P_PROC2_ADDR_CMD 0x0000283c
+#define BNX2_RV2P_PROC2_ADDR_CMD_ADD (0x3ffL<<0)
+#define BNX2_RV2P_PROC2_ADDR_CMD_RDWR (1L<<31)
+
+#define BNX2_RV2P_PROC1_GRC_DEBUG 0x00002840
+#define BNX2_RV2P_PROC2_GRC_DEBUG 0x00002844
+#define BNX2_RV2P_GRC_PROC_DEBUG 0x00002848
+#define BNX2_RV2P_DEBUG_VECT_PEEK 0x0000284c
+#define BNX2_RV2P_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0)
+#define BNX2_RV2P_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11)
+#define BNX2_RV2P_DEBUG_VECT_PEEK_1_SEL (0xfL<<12)
+#define BNX2_RV2P_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16)
+#define BNX2_RV2P_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27)
+#define BNX2_RV2P_DEBUG_VECT_PEEK_2_SEL (0xfL<<28)
+
+#define BNX2_RV2P_MPFE_PFE_CTL 0x00002afc
+#define BNX2_RV2P_MPFE_PFE_CTL_INC_USAGE_CNT (1L<<0)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE (0xfL<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_0 (0L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_1 (1L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_2 (2L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_3 (3L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_4 (4L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_5 (5L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_6 (6L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_7 (7L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_8 (8L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_9 (9L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_10 (10L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_11 (11L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_12 (12L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_13 (13L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_14 (14L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_15 (15L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_COUNT (0xfL<<12)
+#define BNX2_RV2P_MPFE_PFE_CTL_OFFSET (0x1ffL<<16)
+
+#define BNX2_RV2P_RV2PPQ 0x00002b40
+#define BNX2_RV2P_PFTQ_CMD 0x00002b78
+#define BNX2_RV2P_PFTQ_CMD_OFFSET (0x3ffL<<0)
+#define BNX2_RV2P_PFTQ_CMD_WR_TOP (1L<<10)
+#define BNX2_RV2P_PFTQ_CMD_WR_TOP_0 (0L<<10)
+#define BNX2_RV2P_PFTQ_CMD_WR_TOP_1 (1L<<10)
+#define BNX2_RV2P_PFTQ_CMD_SFT_RESET (1L<<25)
+#define BNX2_RV2P_PFTQ_CMD_RD_DATA (1L<<26)
+#define BNX2_RV2P_PFTQ_CMD_ADD_INTERVEN (1L<<27)
+#define BNX2_RV2P_PFTQ_CMD_ADD_DATA (1L<<28)
+#define BNX2_RV2P_PFTQ_CMD_INTERVENE_CLR (1L<<29)
+#define BNX2_RV2P_PFTQ_CMD_POP (1L<<30)
+#define BNX2_RV2P_PFTQ_CMD_BUSY (1L<<31)
+
+#define BNX2_RV2P_PFTQ_CTL 0x00002b7c
+#define BNX2_RV2P_PFTQ_CTL_INTERVENE (1L<<0)
+#define BNX2_RV2P_PFTQ_CTL_OVERFLOW (1L<<1)
+#define BNX2_RV2P_PFTQ_CTL_FORCE_INTERVENE (1L<<2)
+#define BNX2_RV2P_PFTQ_CTL_MAX_DEPTH (0x3ffL<<12)
+#define BNX2_RV2P_PFTQ_CTL_CUR_DEPTH (0x3ffL<<22)
+
+#define BNX2_RV2P_RV2PTQ 0x00002b80
+#define BNX2_RV2P_TFTQ_CMD 0x00002bb8
+#define BNX2_RV2P_TFTQ_CMD_OFFSET (0x3ffL<<0)
+#define BNX2_RV2P_TFTQ_CMD_WR_TOP (1L<<10)
+#define BNX2_RV2P_TFTQ_CMD_WR_TOP_0 (0L<<10)
+#define BNX2_RV2P_TFTQ_CMD_WR_TOP_1 (1L<<10)
+#define BNX2_RV2P_TFTQ_CMD_SFT_RESET (1L<<25)
+#define BNX2_RV2P_TFTQ_CMD_RD_DATA (1L<<26)
+#define BNX2_RV2P_TFTQ_CMD_ADD_INTERVEN (1L<<27)
+#define BNX2_RV2P_TFTQ_CMD_ADD_DATA (1L<<28)
+#define BNX2_RV2P_TFTQ_CMD_INTERVENE_CLR (1L<<29)
+#define BNX2_RV2P_TFTQ_CMD_POP (1L<<30)
+#define BNX2_RV2P_TFTQ_CMD_BUSY (1L<<31)
+
+#define BNX2_RV2P_TFTQ_CTL 0x00002bbc
+#define BNX2_RV2P_TFTQ_CTL_INTERVENE (1L<<0)
+#define BNX2_RV2P_TFTQ_CTL_OVERFLOW (1L<<1)
+#define BNX2_RV2P_TFTQ_CTL_FORCE_INTERVENE (1L<<2)
+#define BNX2_RV2P_TFTQ_CTL_MAX_DEPTH (0x3ffL<<12)
+#define BNX2_RV2P_TFTQ_CTL_CUR_DEPTH (0x3ffL<<22)
+
+#define BNX2_RV2P_RV2PMQ 0x00002bc0
+#define BNX2_RV2P_MFTQ_CMD 0x00002bf8
+#define BNX2_RV2P_MFTQ_CMD_OFFSET (0x3ffL<<0)
+#define BNX2_RV2P_MFTQ_CMD_WR_TOP (1L<<10)
+#define BNX2_RV2P_MFTQ_CMD_WR_TOP_0 (0L<<10)
+#define BNX2_RV2P_MFTQ_CMD_WR_TOP_1 (1L<<10)
+#define BNX2_RV2P_MFTQ_CMD_SFT_RESET (1L<<25)
+#define BNX2_RV2P_MFTQ_CMD_RD_DATA (1L<<26)
+#define BNX2_RV2P_MFTQ_CMD_ADD_INTERVEN (1L<<27)
+#define BNX2_RV2P_MFTQ_CMD_ADD_DATA (1L<<28)
+#define BNX2_RV2P_MFTQ_CMD_INTERVENE_CLR (1L<<29)
+#define BNX2_RV2P_MFTQ_CMD_POP (1L<<30)
+#define BNX2_RV2P_MFTQ_CMD_BUSY (1L<<31)
+
+#define BNX2_RV2P_MFTQ_CTL 0x00002bfc
+#define BNX2_RV2P_MFTQ_CTL_INTERVENE (1L<<0)
+#define BNX2_RV2P_MFTQ_CTL_OVERFLOW (1L<<1)
+#define BNX2_RV2P_MFTQ_CTL_FORCE_INTERVENE (1L<<2)
+#define BNX2_RV2P_MFTQ_CTL_MAX_DEPTH (0x3ffL<<12)
+#define BNX2_RV2P_MFTQ_CTL_CUR_DEPTH (0x3ffL<<22)
+
+
+
+/*
+ * mq_reg definition
+ * offset: 0x3c00
+ */
+#define BNX2_MQ_COMMAND 0x00003c00
+#define BNX2_MQ_COMMAND_ENABLED (1L<<0)
+#define BNX2_MQ_COMMAND_INIT (1L<<1)
+#define BNX2_MQ_COMMAND_OVERFLOW (1L<<4)
+#define BNX2_MQ_COMMAND_WR_ERROR (1L<<5)
+#define BNX2_MQ_COMMAND_RD_ERROR (1L<<6)
+#define BNX2_MQ_COMMAND_IDB_CFG_ERROR (1L<<7)
+#define BNX2_MQ_COMMAND_IDB_OVERFLOW (1L<<10)
+#define BNX2_MQ_COMMAND_NO_BIN_ERROR (1L<<11)
+#define BNX2_MQ_COMMAND_NO_MAP_ERROR (1L<<12)
+
+#define BNX2_MQ_STATUS 0x00003c04
+#define BNX2_MQ_STATUS_CTX_ACCESS_STAT (1L<<16)
+#define BNX2_MQ_STATUS_CTX_ACCESS64_STAT (1L<<17)
+#define BNX2_MQ_STATUS_PCI_STALL_STAT (1L<<18)
+#define BNX2_MQ_STATUS_IDB_OFLOW_STAT (1L<<19)
+
+#define BNX2_MQ_CONFIG 0x00003c08
+#define BNX2_MQ_CONFIG_TX_HIGH_PRI (1L<<0)
+#define BNX2_MQ_CONFIG_HALT_DIS (1L<<1)
+#define BNX2_MQ_CONFIG_BIN_MQ_MODE (1L<<2)
+#define BNX2_MQ_CONFIG_DIS_IDB_DROP (1L<<3)
+#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE (0x7L<<4)
+#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256 (0L<<4)
+#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_512 (1L<<4)
+#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_1K (2L<<4)
+#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_2K (3L<<4)
+#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_4K (4L<<4)
+#define BNX2_MQ_CONFIG_MAX_DEPTH (0x7fL<<8)
+#define BNX2_MQ_CONFIG_CUR_DEPTH (0x7fL<<20)
+
+#define BNX2_MQ_ENQUEUE1 0x00003c0c
+#define BNX2_MQ_ENQUEUE1_OFFSET (0x3fL<<2)
+#define BNX2_MQ_ENQUEUE1_CID (0x3fffL<<8)
+#define BNX2_MQ_ENQUEUE1_BYTE_MASK (0xfL<<24)
+#define BNX2_MQ_ENQUEUE1_KNL_MODE (1L<<28)
+
+#define BNX2_MQ_ENQUEUE2 0x00003c10
+#define BNX2_MQ_BAD_WR_ADDR 0x00003c14
+#define BNX2_MQ_BAD_RD_ADDR 0x00003c18
+#define BNX2_MQ_KNL_BYP_WIND_START 0x00003c1c
+#define BNX2_MQ_KNL_BYP_WIND_START_VALUE (0xfffffL<<12)
+
+#define BNX2_MQ_KNL_WIND_END 0x00003c20
+#define BNX2_MQ_KNL_WIND_END_VALUE (0xffffffL<<8)
+
+#define BNX2_MQ_KNL_WRITE_MASK1 0x00003c24
+#define BNX2_MQ_KNL_TX_MASK1 0x00003c28
+#define BNX2_MQ_KNL_CMD_MASK1 0x00003c2c
+#define BNX2_MQ_KNL_COND_ENQUEUE_MASK1 0x00003c30
+#define BNX2_MQ_KNL_RX_V2P_MASK1 0x00003c34
+#define BNX2_MQ_KNL_WRITE_MASK2 0x00003c38
+#define BNX2_MQ_KNL_TX_MASK2 0x00003c3c
+#define BNX2_MQ_KNL_CMD_MASK2 0x00003c40
+#define BNX2_MQ_KNL_COND_ENQUEUE_MASK2 0x00003c44
+#define BNX2_MQ_KNL_RX_V2P_MASK2 0x00003c48
+#define BNX2_MQ_KNL_BYP_WRITE_MASK1 0x00003c4c
+#define BNX2_MQ_KNL_BYP_TX_MASK1 0x00003c50
+#define BNX2_MQ_KNL_BYP_CMD_MASK1 0x00003c54
+#define BNX2_MQ_KNL_BYP_COND_ENQUEUE_MASK1 0x00003c58
+#define BNX2_MQ_KNL_BYP_RX_V2P_MASK1 0x00003c5c
+#define BNX2_MQ_KNL_BYP_WRITE_MASK2 0x00003c60
+#define BNX2_MQ_KNL_BYP_TX_MASK2 0x00003c64
+#define BNX2_MQ_KNL_BYP_CMD_MASK2 0x00003c68
+#define BNX2_MQ_KNL_BYP_COND_ENQUEUE_MASK2 0x00003c6c
+#define BNX2_MQ_KNL_BYP_RX_V2P_MASK2 0x00003c70
+#define BNX2_MQ_MEM_WR_ADDR 0x00003c74
+#define BNX2_MQ_MEM_WR_ADDR_VALUE (0x3fL<<0)
+
+#define BNX2_MQ_MEM_WR_DATA0 0x00003c78
+#define BNX2_MQ_MEM_WR_DATA0_VALUE (0xffffffffL<<0)
+
+#define BNX2_MQ_MEM_WR_DATA1 0x00003c7c
+#define BNX2_MQ_MEM_WR_DATA1_VALUE (0xffffffffL<<0)
+
+#define BNX2_MQ_MEM_WR_DATA2 0x00003c80
+#define BNX2_MQ_MEM_WR_DATA2_VALUE (0x3fffffffL<<0)
+#define BNX2_MQ_MEM_WR_DATA2_VALUE_XI (0x7fffffffL<<0)
+
+#define BNX2_MQ_MEM_RD_ADDR 0x00003c84
+#define BNX2_MQ_MEM_RD_ADDR_VALUE (0x3fL<<0)
+
+#define BNX2_MQ_MEM_RD_DATA0 0x00003c88
+#define BNX2_MQ_MEM_RD_DATA0_VALUE (0xffffffffL<<0)
+
+#define BNX2_MQ_MEM_RD_DATA1 0x00003c8c
+#define BNX2_MQ_MEM_RD_DATA1_VALUE (0xffffffffL<<0)
+
+#define BNX2_MQ_MEM_RD_DATA2 0x00003c90
+#define BNX2_MQ_MEM_RD_DATA2_VALUE (0x3fffffffL<<0)
+#define BNX2_MQ_MEM_RD_DATA2_VALUE_XI (0x7fffffffL<<0)
+
+#define BNX2_MQ_MAP_L2_3 0x00003d2c
+#define BNX2_MQ_MAP_L2_3_MQ_OFFSET (0xffL<<0)
+#define BNX2_MQ_MAP_L2_3_SZ (0x3L<<8)
+#define BNX2_MQ_MAP_L2_3_CTX_OFFSET (0x2ffL<<10)
+#define BNX2_MQ_MAP_L2_3_BIN_OFFSET (0x7L<<23)
+#define BNX2_MQ_MAP_L2_3_ARM (0x3L<<26)
+#define BNX2_MQ_MAP_L2_3_ENA (0x1L<<31)
+#define BNX2_MQ_MAP_L2_3_DEFAULT 0x82004646
+
+#define BNX2_MQ_MAP_L2_5 0x00003d34
+#define BNX2_MQ_MAP_L2_5_ARM (0x3L<<26)
+
+/*
+ * tsch_reg definition
+ * offset: 0x4c00
+ */
+#define BNX2_TSCH_TSS_CFG 0x00004c1c
+#define BNX2_TSCH_TSS_CFG_TSS_START_CID (0x7ffL<<8)
+#define BNX2_TSCH_TSS_CFG_NUM_OF_TSS_CON (0xfL<<24)
+
+
+
+/*
+ * tbdr_reg definition
+ * offset: 0x5000
+ */
+#define BNX2_TBDR_COMMAND 0x00005000
+#define BNX2_TBDR_COMMAND_ENABLE (1L<<0)
+#define BNX2_TBDR_COMMAND_SOFT_RST (1L<<1)
+#define BNX2_TBDR_COMMAND_MSTR_ABORT (1L<<4)
+
+#define BNX2_TBDR_STATUS 0x00005004
+#define BNX2_TBDR_STATUS_DMA_WAIT (1L<<0)
+#define BNX2_TBDR_STATUS_FTQ_WAIT (1L<<1)
+#define BNX2_TBDR_STATUS_FIFO_OVERFLOW (1L<<2)
+#define BNX2_TBDR_STATUS_FIFO_UNDERFLOW (1L<<3)
+#define BNX2_TBDR_STATUS_SEARCHMISS_ERROR (1L<<4)
+#define BNX2_TBDR_STATUS_FTQ_ENTRY_CNT (1L<<5)
+#define BNX2_TBDR_STATUS_BURST_CNT (1L<<6)
+
+#define BNX2_TBDR_CONFIG 0x00005008
+#define BNX2_TBDR_CONFIG_MAX_BDS (0xffL<<0)
+#define BNX2_TBDR_CONFIG_SWAP_MODE (1L<<8)
+#define BNX2_TBDR_CONFIG_PRIORITY (1L<<9)
+#define BNX2_TBDR_CONFIG_CACHE_NEXT_PAGE_PTRS (1L<<10)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE (0xfL<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_256 (0L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_512 (1L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_1K (2L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_2K (3L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_4K (4L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_8K (5L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_16K (6L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_32K (7L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_64K (8L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_128K (9L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_256K (10L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_512K (11L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_1M (12L<<24)
+
+#define BNX2_TBDR_DEBUG_VECT_PEEK 0x0000500c
+#define BNX2_TBDR_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0)
+#define BNX2_TBDR_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11)
+#define BNX2_TBDR_DEBUG_VECT_PEEK_1_SEL (0xfL<<12)
+#define BNX2_TBDR_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16)
+#define BNX2_TBDR_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27)
+#define BNX2_TBDR_DEBUG_VECT_PEEK_2_SEL (0xfL<<28)
+
+#define BNX2_TBDR_CKSUM_ERROR_STATUS 0x00005010
+#define BNX2_TBDR_CKSUM_ERROR_STATUS_CALCULATED (0xffffL<<0)
+#define BNX2_TBDR_CKSUM_ERROR_STATUS_EXPECTED (0xffffL<<16)
+
+#define BNX2_TBDR_TBDRQ 0x000053c0
+#define BNX2_TBDR_FTQ_CMD 0x000053f8
+#define BNX2_TBDR_FTQ_CMD_OFFSET (0x3ffL<<0)
+#define BNX2_TBDR_FTQ_CMD_WR_TOP (1L<<10)
+#define BNX2_TBDR_FTQ_CMD_WR_TOP_0 (0L<<10)
+#define BNX2_TBDR_FTQ_CMD_WR_TOP_1 (1L<<10)
+#define BNX2_TBDR_FTQ_CMD_SFT_RESET (1L<<25)
+#define BNX2_TBDR_FTQ_CMD_RD_DATA (1L<<26)
+#define BNX2_TBDR_FTQ_CMD_ADD_INTERVEN (1L<<27)
+#define BNX2_TBDR_FTQ_CMD_ADD_DATA (1L<<28)
+#define BNX2_TBDR_FTQ_CMD_INTERVENE_CLR (1L<<29)
+#define BNX2_TBDR_FTQ_CMD_POP (1L<<30)
+#define BNX2_TBDR_FTQ_CMD_BUSY (1L<<31)
+
+#define BNX2_TBDR_FTQ_CTL 0x000053fc
+#define BNX2_TBDR_FTQ_CTL_INTERVENE (1L<<0)
+#define BNX2_TBDR_FTQ_CTL_OVERFLOW (1L<<1)
+#define BNX2_TBDR_FTQ_CTL_FORCE_INTERVENE (1L<<2)
+#define BNX2_TBDR_FTQ_CTL_MAX_DEPTH (0x3ffL<<12)
+#define BNX2_TBDR_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
+
+
+
+/*
+ * tdma_reg definition
+ * offset: 0x5c00
+ */
+#define BNX2_TDMA_COMMAND 0x00005c00
+#define BNX2_TDMA_COMMAND_ENABLED (1L<<0)
+#define BNX2_TDMA_COMMAND_MASTER_ABORT (1L<<4)
+#define BNX2_TDMA_COMMAND_CS16_ERR (1L<<5)
+#define BNX2_TDMA_COMMAND_BAD_L2_LENGTH_ABORT (1L<<7)
+#define BNX2_TDMA_COMMAND_MASK_CS1 (1L<<20)
+#define BNX2_TDMA_COMMAND_MASK_CS2 (1L<<21)
+#define BNX2_TDMA_COMMAND_MASK_CS3 (1L<<22)
+#define BNX2_TDMA_COMMAND_MASK_CS4 (1L<<23)
+#define BNX2_TDMA_COMMAND_FORCE_ILOCK_CKERR (1L<<24)
+#define BNX2_TDMA_COMMAND_OFIFO_CLR (1L<<30)
+#define BNX2_TDMA_COMMAND_IFIFO_CLR (1L<<31)
+
+#define BNX2_TDMA_STATUS 0x00005c04
+#define BNX2_TDMA_STATUS_DMA_WAIT (1L<<0)
+#define BNX2_TDMA_STATUS_PAYLOAD_WAIT (1L<<1)
+#define BNX2_TDMA_STATUS_PATCH_FTQ_WAIT (1L<<2)
+#define BNX2_TDMA_STATUS_LOCK_WAIT (1L<<3)
+#define BNX2_TDMA_STATUS_FTQ_ENTRY_CNT (1L<<16)
+#define BNX2_TDMA_STATUS_BURST_CNT (1L<<17)
+#define BNX2_TDMA_STATUS_MAX_IFIFO_DEPTH (0x3fL<<20)
+#define BNX2_TDMA_STATUS_OFIFO_OVERFLOW (1L<<30)
+#define BNX2_TDMA_STATUS_IFIFO_OVERFLOW (1L<<31)
+
+#define BNX2_TDMA_CONFIG 0x00005c08
+#define BNX2_TDMA_CONFIG_ONE_DMA (1L<<0)
+#define BNX2_TDMA_CONFIG_ONE_RECORD (1L<<1)
+#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN (0x3L<<2)
+#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_0 (0L<<2)
+#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_1 (1L<<2)
+#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_2 (2L<<2)
+#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_3 (3L<<2)
+#define BNX2_TDMA_CONFIG_LIMIT_SZ (0xfL<<4)
+#define BNX2_TDMA_CONFIG_LIMIT_SZ_64 (0L<<4)
+#define BNX2_TDMA_CONFIG_LIMIT_SZ_128 (0x4L<<4)
+#define BNX2_TDMA_CONFIG_LIMIT_SZ_256 (0x6L<<4)
+#define BNX2_TDMA_CONFIG_LIMIT_SZ_512 (0x8L<<4)
+#define BNX2_TDMA_CONFIG_LINE_SZ (0xfL<<8)
+#define BNX2_TDMA_CONFIG_LINE_SZ_64 (0L<<8)
+#define BNX2_TDMA_CONFIG_LINE_SZ_128 (4L<<8)
+#define BNX2_TDMA_CONFIG_LINE_SZ_256 (6L<<8)
+#define BNX2_TDMA_CONFIG_LINE_SZ_512 (8L<<8)
+#define BNX2_TDMA_CONFIG_ALIGN_ENA (1L<<15)
+#define BNX2_TDMA_CONFIG_CHK_L2_BD (1L<<16)
+#define BNX2_TDMA_CONFIG_CMPL_ENTRY (1L<<17)
+#define BNX2_TDMA_CONFIG_OFIFO_CMP (1L<<19)
+#define BNX2_TDMA_CONFIG_OFIFO_CMP_3 (0L<<19)
+#define BNX2_TDMA_CONFIG_OFIFO_CMP_2 (1L<<19)
+#define BNX2_TDMA_CONFIG_FIFO_CMP (0xfL<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_XI (0x7L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_0_XI (0L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_4_XI (1L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_8_XI (2L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_16_XI (3L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_32_XI (4L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_64_XI (5L<<20)
+#define BNX2_TDMA_CONFIG_FIFO_CMP_EN_XI (1L<<23)
+#define BNX2_TDMA_CONFIG_BYTES_OST_XI (0x7L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_512_XI (0L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_1024_XI (1L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_2048_XI (2L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_4096_XI (3L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_8192_XI (4L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_16384_XI (5L<<24)
+#define BNX2_TDMA_CONFIG_HC_BYPASS_XI (1L<<27)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_XI (0x7L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_128_XI (0L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_256_XI (1L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_512_XI (2L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_1024_XI (3L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_2048_XI (4L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_4096_XI (5L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_EN_XI (1L<<31)
+
+#define BNX2_TDMA_PAYLOAD_PROD 0x00005c0c
+#define BNX2_TDMA_PAYLOAD_PROD_VALUE (0x1fffL<<3)
+
+#define BNX2_TDMA_DBG_WATCHDOG 0x00005c10
+#define BNX2_TDMA_DBG_TRIGGER 0x00005c14
+#define BNX2_TDMA_DMAD_FSM 0x00005c80
+#define BNX2_TDMA_DMAD_FSM_BD_INVLD (1L<<0)
+#define BNX2_TDMA_DMAD_FSM_PUSH (0xfL<<4)
+#define BNX2_TDMA_DMAD_FSM_ARB_TBDC (0x3L<<8)
+#define BNX2_TDMA_DMAD_FSM_ARB_CTX (1L<<12)
+#define BNX2_TDMA_DMAD_FSM_DR_INTF (1L<<16)
+#define BNX2_TDMA_DMAD_FSM_DMAD (0x7L<<20)
+#define BNX2_TDMA_DMAD_FSM_BD (0xfL<<24)
+
+#define BNX2_TDMA_DMAD_STATUS 0x00005c84
+#define BNX2_TDMA_DMAD_STATUS_RHOLD_PUSH_ENTRY (0x3L<<0)
+#define BNX2_TDMA_DMAD_STATUS_RHOLD_DMAD_ENTRY (0x3L<<4)
+#define BNX2_TDMA_DMAD_STATUS_RHOLD_BD_ENTRY (0x3L<<8)
+#define BNX2_TDMA_DMAD_STATUS_IFTQ_ENUM (0xfL<<12)
+
+#define BNX2_TDMA_DR_INTF_FSM 0x00005c88
+#define BNX2_TDMA_DR_INTF_FSM_L2_COMP (0x3L<<0)
+#define BNX2_TDMA_DR_INTF_FSM_TPATQ (0x7L<<4)
+#define BNX2_TDMA_DR_INTF_FSM_TPBUF (0x3L<<8)
+#define BNX2_TDMA_DR_INTF_FSM_DR_BUF (0x7L<<12)
+#define BNX2_TDMA_DR_INTF_FSM_DMAD (0x7L<<16)
+
+#define BNX2_TDMA_DR_INTF_STATUS 0x00005c8c
+#define BNX2_TDMA_DR_INTF_STATUS_HOLE_PHASE (0x7L<<0)
+#define BNX2_TDMA_DR_INTF_STATUS_DATA_AVAIL (0x3L<<4)
+#define BNX2_TDMA_DR_INTF_STATUS_SHIFT_ADDR (0x7L<<8)
+#define BNX2_TDMA_DR_INTF_STATUS_NXT_PNTR (0xfL<<12)
+#define BNX2_TDMA_DR_INTF_STATUS_BYTE_COUNT (0x7L<<16)
+
+#define BNX2_TDMA_PUSH_FSM 0x00005c90
+#define BNX2_TDMA_BD_IF_DEBUG 0x00005c94
+#define BNX2_TDMA_DMAD_IF_DEBUG 0x00005c98
+#define BNX2_TDMA_CTX_IF_DEBUG 0x00005c9c
+#define BNX2_TDMA_TPBUF_IF_DEBUG 0x00005ca0
+#define BNX2_TDMA_DR_IF_DEBUG 0x00005ca4
+#define BNX2_TDMA_TPATQ_IF_DEBUG 0x00005ca8
+#define BNX2_TDMA_TDMA_ILOCK_CKSUM 0x00005cac
+#define BNX2_TDMA_TDMA_ILOCK_CKSUM_CALCULATED (0xffffL<<0)
+#define BNX2_TDMA_TDMA_ILOCK_CKSUM_EXPECTED (0xffffL<<16)
+
+#define BNX2_TDMA_TDMA_PCIE_CKSUM 0x00005cb0
+#define BNX2_TDMA_TDMA_PCIE_CKSUM_CALCULATED (0xffffL<<0)
+#define BNX2_TDMA_TDMA_PCIE_CKSUM_EXPECTED (0xffffL<<16)
+
+#define BNX2_TDMA_TDMAQ 0x00005fc0
+#define BNX2_TDMA_FTQ_CMD 0x00005ff8
+#define BNX2_TDMA_FTQ_CMD_OFFSET (0x3ffL<<0)
+#define BNX2_TDMA_FTQ_CMD_WR_TOP (1L<<10)
+#define BNX2_TDMA_FTQ_CMD_WR_TOP_0 (0L<<10)
+#define BNX2_TDMA_FTQ_CMD_WR_TOP_1 (1L<<10)
+#define BNX2_TDMA_FTQ_CMD_SFT_RESET (1L<<25)
+#define BNX2_TDMA_FTQ_CMD_RD_DATA (1L<<26)
+#define BNX2_TDMA_FTQ_CMD_ADD_INTERVEN (1L<<27)
+#define BNX2_TDMA_FTQ_CMD_ADD_DATA (1L<<28)
+#define BNX2_TDMA_FTQ_CMD_INTERVENE_CLR (1L<<29)
+#define BNX2_TDMA_FTQ_CMD_POP (1L<<30)
+#define BNX2_TDMA_FTQ_CMD_BUSY (1L<<31)
+
+#define BNX2_TDMA_FTQ_CTL 0x00005ffc
+#define BNX2_TDMA_FTQ_CTL_INTERVENE (1L<<0)
+#define BNX2_TDMA_FTQ_CTL_OVERFLOW (1L<<1)
+#define BNX2_TDMA_FTQ_CTL_FORCE_INTERVENE (1L<<2)
+#define BNX2_TDMA_FTQ_CTL_MAX_DEPTH (0x3ffL<<12)
+#define BNX2_TDMA_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
+
+
+
+/*
+ * hc_reg definition
+ * offset: 0x6800
+ */
+#define BNX2_HC_COMMAND 0x00006800
+#define BNX2_HC_COMMAND_ENABLE (1L<<0)
+#define BNX2_HC_COMMAND_SKIP_ABORT (1L<<4)
+#define BNX2_HC_COMMAND_COAL_NOW (1L<<16)
+#define BNX2_HC_COMMAND_COAL_NOW_WO_INT (1L<<17)
+#define BNX2_HC_COMMAND_STATS_NOW (1L<<18)
+#define BNX2_HC_COMMAND_FORCE_INT (0x3L<<19)
+#define BNX2_HC_COMMAND_FORCE_INT_NULL (0L<<19)
+#define BNX2_HC_COMMAND_FORCE_INT_HIGH (1L<<19)
+#define BNX2_HC_COMMAND_FORCE_INT_LOW (2L<<19)
+#define BNX2_HC_COMMAND_FORCE_INT_FREE (3L<<19)
+#define BNX2_HC_COMMAND_CLR_STAT_NOW (1L<<21)
+#define BNX2_HC_COMMAND_MAIN_PWR_INT (1L<<22)
+#define BNX2_HC_COMMAND_COAL_ON_NEXT_EVENT (1L<<27)
+
+#define BNX2_HC_STATUS 0x00006804
+#define BNX2_HC_STATUS_MASTER_ABORT (1L<<0)
+#define BNX2_HC_STATUS_PARITY_ERROR_STATE (1L<<1)
+#define BNX2_HC_STATUS_PCI_CLK_CNT_STAT (1L<<16)
+#define BNX2_HC_STATUS_CORE_CLK_CNT_STAT (1L<<17)
+#define BNX2_HC_STATUS_NUM_STATUS_BLOCKS_STAT (1L<<18)
+#define BNX2_HC_STATUS_NUM_INT_GEN_STAT (1L<<19)
+#define BNX2_HC_STATUS_NUM_INT_MBOX_WR_STAT (1L<<20)
+#define BNX2_HC_STATUS_CORE_CLKS_TO_HW_INTACK_STAT (1L<<23)
+#define BNX2_HC_STATUS_CORE_CLKS_TO_SW_INTACK_STAT (1L<<24)
+#define BNX2_HC_STATUS_CORE_CLKS_DURING_SW_INTACK_STAT (1L<<25)
+
+#define BNX2_HC_CONFIG 0x00006808
+#define BNX2_HC_CONFIG_COLLECT_STATS (1L<<0)
+#define BNX2_HC_CONFIG_RX_TMR_MODE (1L<<1)
+#define BNX2_HC_CONFIG_TX_TMR_MODE (1L<<2)
+#define BNX2_HC_CONFIG_COM_TMR_MODE (1L<<3)
+#define BNX2_HC_CONFIG_CMD_TMR_MODE (1L<<4)
+#define BNX2_HC_CONFIG_STATISTIC_PRIORITY (1L<<5)
+#define BNX2_HC_CONFIG_STATUS_PRIORITY (1L<<6)
+#define BNX2_HC_CONFIG_STAT_MEM_ADDR (0xffL<<8)
+#define BNX2_HC_CONFIG_PER_MODE (1L<<16)
+#define BNX2_HC_CONFIG_ONE_SHOT (1L<<17)
+#define BNX2_HC_CONFIG_USE_INT_PARAM (1L<<18)
+#define BNX2_HC_CONFIG_SET_MASK_AT_RD (1L<<19)
+#define BNX2_HC_CONFIG_PER_COLLECT_LIMIT (0xfL<<20)
+#define BNX2_HC_CONFIG_SB_ADDR_INC (0x7L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_64B (0L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_128B (1L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_256B (2L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_512B (3L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_1024B (4L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_2048B (5L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_4096B (6L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_8192B (7L<<24)
+#define BNX2_HC_CONFIG_GEN_STAT_AVG_INTR (1L<<29)
+#define BNX2_HC_CONFIG_UNMASK_ALL (1L<<30)
+#define BNX2_HC_CONFIG_TX_SEL (1L<<31)
+
+#define BNX2_HC_ATTN_BITS_ENABLE 0x0000680c
+#define BNX2_HC_STATUS_ADDR_L 0x00006810
+#define BNX2_HC_STATUS_ADDR_H 0x00006814
+#define BNX2_HC_STATISTICS_ADDR_L 0x00006818
+#define BNX2_HC_STATISTICS_ADDR_H 0x0000681c
+#define BNX2_HC_TX_QUICK_CONS_TRIP 0x00006820
+#define BNX2_HC_TX_QUICK_CONS_TRIP_VALUE (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_INT (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP 0x00006824
+#define BNX2_HC_COMP_PROD_TRIP_VALUE (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_INT (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP 0x00006828
+#define BNX2_HC_RX_QUICK_CONS_TRIP_VALUE (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_INT (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS 0x0000682c
+#define BNX2_HC_RX_TICKS_VALUE (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_INT (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS 0x00006830
+#define BNX2_HC_TX_TICKS_VALUE (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_INT (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS 0x00006834
+#define BNX2_HC_COM_TICKS_VALUE (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_INT (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS 0x00006838
+#define BNX2_HC_CMD_TICKS_VALUE (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_INT (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS 0x0000683c
+#define BNX2_HC_PERIODIC_TICKS_HC_PERIODIC_TICKS (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_HC_INT_PERIODIC_TICKS (0xffffL<<16)
+
+#define BNX2_HC_STAT_COLLECT_TICKS 0x00006840
+#define BNX2_HC_STAT_COLLECT_TICKS_HC_STAT_COLL_TICKS (0xffL<<4)
+
+#define BNX2_HC_STATS_TICKS 0x00006844
+#define BNX2_HC_STATS_TICKS_HC_STAT_TICKS (0xffffL<<8)
+
+#define BNX2_HC_STATS_INTERRUPT_STATUS 0x00006848
+#define BNX2_HC_STATS_INTERRUPT_STATUS_SB_STATUS (0x1ffL<<0)
+#define BNX2_HC_STATS_INTERRUPT_STATUS_INT_STATUS (0x1ffL<<16)
+
+#define BNX2_HC_STAT_MEM_DATA 0x0000684c
+#define BNX2_HC_STAT_GEN_SEL_0 0x00006850
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0 (0x7fL<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT0 (0L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT1 (1L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT2 (2L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT3 (3L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT4 (4L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT5 (5L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT6 (6L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT7 (7L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT8 (8L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT9 (9L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT10 (10L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT11 (11L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT0 (12L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT1 (13L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT2 (14L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT3 (15L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT4 (16L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT5 (17L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT6 (18L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT7 (19L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT0 (20L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT1 (21L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT2 (22L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT3 (23L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT4 (24L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT5 (25L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT6 (26L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT7 (27L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT8 (28L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT9 (29L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT10 (30L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT11 (31L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT0 (32L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT1 (33L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT2 (34L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT3 (35L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT0 (36L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT1 (37L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT2 (38L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT3 (39L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT4 (40L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT5 (41L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT6 (42L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT7 (43L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT0 (44L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT1 (45L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT2 (46L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT3 (47L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT4 (48L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT5 (49L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT6 (50L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT7 (51L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_PCI_CLK_CNT (52L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CORE_CLK_CNT (53L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS (54L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN (55L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR (56L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK (59L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK (60L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK (61L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCH_CMD_CNT (62L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCH_SLOT_CNT (63L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSCH_CMD_CNT (64L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSCH_SLOT_CNT (65L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT (66L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT (67L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT (68L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT (69L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT (70L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT (71L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT (72L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT (73L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT (74L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT (75L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT (76L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT (77L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT (78L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT (79L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT (80L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT (81L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT (82L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT (83L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT (84L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_READ_TRANSFERS_CNT (85L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_READ_DELAY_PCI_CLKS_CNT (86L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_READ_TRANSFERS_CNT (87L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_READ_DELAY_PCI_CLKS_CNT (88L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_READ_RETRY_AFTER_DATA_CNT (89L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_WRITE_TRANSFERS_CNT (90L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_WRITE_DELAY_PCI_CLKS_CNT (91L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_WRITE_TRANSFERS_CNT (92L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_WRITE_DELAY_PCI_CLKS_CNT (93L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_WRITE_RETRY_AFTER_DATA_CNT (94L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_WR_CNT64 (95L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_RD_CNT64 (96L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_ACC_STALL_CLKS (97L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_LOCK_STALL_CLKS (98L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MBQ_CTX_ACCESS_STAT (99L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MBQ_CTX_ACCESS64_STAT (100L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MBQ_PCI_STALL_STAT (101L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDR_FTQ_ENTRY_CNT (102L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDR_BURST_CNT (103L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMA_FTQ_ENTRY_CNT (104L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMA_BURST_CNT (105L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMA_FTQ_ENTRY_CNT (106L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMA_BURST_CNT (107L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUP_MATCH_CNT (108L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_POLL_PASS_CNT (109L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR1_CNT (110L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR2_CNT (111L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR3_CNT (112L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR4_CNT (113L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR5_CNT (114L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT0 (115L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT1 (116L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT2 (117L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT3 (118L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT4 (119L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT5 (120L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RBDC_PROC1_MISS (121L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RBDC_PROC2_MISS (122L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RBDC_BURST_CNT (127L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_1 (0x7fL<<8)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_2 (0x7fL<<16)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_3 (0x7fL<<24)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_XI (0xffL<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UMP_RX_FRAME_DROP_XI (52L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S0_XI (57L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S1_XI (58L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S2_XI (85L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S3_XI (86L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S4_XI (87L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S5_XI (88L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S6_XI (89L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S7_XI (90L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S8_XI (91L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S9_XI (92L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S10_XI (93L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MQ_IDB_OFLOW_XI (94L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_BLK_RD_CNT_XI (123L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_BLK_WR_CNT_XI (124L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_HITS_XI (125L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_MISSES_XI (126L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC1_XI (128L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC1_XI (129L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC1_XI (130L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC1_XI (131L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC1_XI (132L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC1_XI (133L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC2_XI (134L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC2_XI (135L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC2_XI (136L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC2_XI (137L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC2_XI (138L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC2_XI (139L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC3_XI (140L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC3_XI (141L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC3_XI (142L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC3_XI (143L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC3_XI (144L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC3_XI (145L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC4_XI (146L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC4_XI (147L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC4_XI (148L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC4_XI (149L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC4_XI (150L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC4_XI (151L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC5_XI (152L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC5_XI (153L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC5_XI (154L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC5_XI (155L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC5_XI (156L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC5_XI (157L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC6_XI (158L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC6_XI (159L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC6_XI (160L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC6_XI (161L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC6_XI (162L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC6_XI (163L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC7_XI (164L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC7_XI (165L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC7_XI (166L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC7_XI (167L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC7_XI (168L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC7_XI (169L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC8_XI (170L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC8_XI (171L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC8_XI (172L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC8_XI (173L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC8_XI (174L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC8_XI (175L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCS_CMD_CNT_XI (176L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCS_SLOT_CNT_XI (177L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCSQ_VALID_CNT_XI (178L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_1_XI (0xffL<<8)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_2_XI (0xffL<<16)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_3_XI (0xffL<<24)
+
+#define BNX2_HC_STAT_GEN_SEL_1 0x00006854
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_4 (0x7fL<<0)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_5 (0x7fL<<8)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_6 (0x7fL<<16)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_7 (0x7fL<<24)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_4_XI (0xffL<<0)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_5_XI (0xffL<<8)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_6_XI (0xffL<<16)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_7_XI (0xffL<<24)
+
+#define BNX2_HC_STAT_GEN_SEL_2 0x00006858
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_8 (0x7fL<<0)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_9 (0x7fL<<8)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_10 (0x7fL<<16)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_11 (0x7fL<<24)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_8_XI (0xffL<<0)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_9_XI (0xffL<<8)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_10_XI (0xffL<<16)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_11_XI (0xffL<<24)
+
+#define BNX2_HC_STAT_GEN_SEL_3 0x0000685c
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_12 (0x7fL<<0)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_13 (0x7fL<<8)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_14 (0x7fL<<16)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_15 (0x7fL<<24)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_12_XI (0xffL<<0)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_13_XI (0xffL<<8)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_14_XI (0xffL<<16)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_15_XI (0xffL<<24)
+
+#define BNX2_HC_STAT_GEN_STAT0 0x00006888
+#define BNX2_HC_STAT_GEN_STAT1 0x0000688c
+#define BNX2_HC_STAT_GEN_STAT2 0x00006890
+#define BNX2_HC_STAT_GEN_STAT3 0x00006894
+#define BNX2_HC_STAT_GEN_STAT4 0x00006898
+#define BNX2_HC_STAT_GEN_STAT5 0x0000689c
+#define BNX2_HC_STAT_GEN_STAT6 0x000068a0
+#define BNX2_HC_STAT_GEN_STAT7 0x000068a4
+#define BNX2_HC_STAT_GEN_STAT8 0x000068a8
+#define BNX2_HC_STAT_GEN_STAT9 0x000068ac
+#define BNX2_HC_STAT_GEN_STAT10 0x000068b0
+#define BNX2_HC_STAT_GEN_STAT11 0x000068b4
+#define BNX2_HC_STAT_GEN_STAT12 0x000068b8
+#define BNX2_HC_STAT_GEN_STAT13 0x000068bc
+#define BNX2_HC_STAT_GEN_STAT14 0x000068c0
+#define BNX2_HC_STAT_GEN_STAT15 0x000068c4
+#define BNX2_HC_STAT_GEN_STAT_AC0 0x000068c8
+#define BNX2_HC_STAT_GEN_STAT_AC1 0x000068cc
+#define BNX2_HC_STAT_GEN_STAT_AC2 0x000068d0
+#define BNX2_HC_STAT_GEN_STAT_AC3 0x000068d4
+#define BNX2_HC_STAT_GEN_STAT_AC4 0x000068d8
+#define BNX2_HC_STAT_GEN_STAT_AC5 0x000068dc
+#define BNX2_HC_STAT_GEN_STAT_AC6 0x000068e0
+#define BNX2_HC_STAT_GEN_STAT_AC7 0x000068e4
+#define BNX2_HC_STAT_GEN_STAT_AC8 0x000068e8
+#define BNX2_HC_STAT_GEN_STAT_AC9 0x000068ec
+#define BNX2_HC_STAT_GEN_STAT_AC10 0x000068f0
+#define BNX2_HC_STAT_GEN_STAT_AC11 0x000068f4
+#define BNX2_HC_STAT_GEN_STAT_AC12 0x000068f8
+#define BNX2_HC_STAT_GEN_STAT_AC13 0x000068fc
+#define BNX2_HC_STAT_GEN_STAT_AC14 0x00006900
+#define BNX2_HC_STAT_GEN_STAT_AC15 0x00006904
+#define BNX2_HC_STAT_GEN_STAT_AC 0x000068c8
+#define BNX2_HC_VIS 0x00006908
+#define BNX2_HC_VIS_STAT_BUILD_STATE (0xfL<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_IDLE (0L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_START (1L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_REQUEST (2L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_UPDATE64 (3L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_UPDATE32 (4L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_UPDATE_DONE (5L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_DMA (6L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_CONTROL (7L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_LOW (8L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_HIGH (9L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_DATA (10L<<0)
+#define BNX2_HC_VIS_DMA_STAT_STATE (0xfL<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_IDLE (0L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_STATUS_PARAM (1L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_STATUS_DMA (2L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_WRITE_COMP (3L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_COMP (4L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_STATISTIC_PARAM (5L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_STATISTIC_DMA (6L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_WRITE_COMP_1 (7L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_WRITE_COMP_2 (8L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_WAIT (9L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_ABORT (15L<<8)
+#define BNX2_HC_VIS_DMA_MSI_STATE (0x7L<<12)
+#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE (0x3L<<15)
+#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE_IDLE (0L<<15)
+#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE_COUNT (1L<<15)
+#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE_START (2L<<15)
+
+#define BNX2_HC_VIS_1 0x0000690c
+#define BNX2_HC_VIS_1_HW_INTACK_STATE (1L<<4)
+#define BNX2_HC_VIS_1_HW_INTACK_STATE_IDLE (0L<<4)
+#define BNX2_HC_VIS_1_HW_INTACK_STATE_COUNT (1L<<4)
+#define BNX2_HC_VIS_1_SW_INTACK_STATE (1L<<5)
+#define BNX2_HC_VIS_1_SW_INTACK_STATE_IDLE (0L<<5)
+#define BNX2_HC_VIS_1_SW_INTACK_STATE_COUNT (1L<<5)
+#define BNX2_HC_VIS_1_DURING_SW_INTACK_STATE (1L<<6)
+#define BNX2_HC_VIS_1_DURING_SW_INTACK_STATE_IDLE (0L<<6)
+#define BNX2_HC_VIS_1_DURING_SW_INTACK_STATE_COUNT (1L<<6)
+#define BNX2_HC_VIS_1_MAILBOX_COUNT_STATE (1L<<7)
+#define BNX2_HC_VIS_1_MAILBOX_COUNT_STATE_IDLE (0L<<7)
+#define BNX2_HC_VIS_1_MAILBOX_COUNT_STATE_COUNT (1L<<7)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE (0xfL<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_IDLE (0L<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_DMA (1L<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_UPDATE (2L<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_ASSIGN (3L<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_WAIT (4L<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_REG_UPDATE (5L<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_REG_ASSIGN (6L<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_REG_WAIT (7L<<17)
+#define BNX2_HC_VIS_1_RAM_WR_ARB_STATE (0x3L<<21)
+#define BNX2_HC_VIS_1_RAM_WR_ARB_STATE_NORMAL (0L<<21)
+#define BNX2_HC_VIS_1_RAM_WR_ARB_STATE_CLEAR (1L<<21)
+#define BNX2_HC_VIS_1_INT_GEN_STATE (1L<<23)
+#define BNX2_HC_VIS_1_INT_GEN_STATE_DLE (0L<<23)
+#define BNX2_HC_VIS_1_INT_GEN_STATE_NTERRUPT (1L<<23)
+#define BNX2_HC_VIS_1_STAT_CHAN_ID (0x7L<<24)
+#define BNX2_HC_VIS_1_INT_B (1L<<27)
+
+#define BNX2_HC_DEBUG_VECT_PEEK 0x00006910
+#define BNX2_HC_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0)
+#define BNX2_HC_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11)
+#define BNX2_HC_DEBUG_VECT_PEEK_1_SEL (0xfL<<12)
+#define BNX2_HC_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16)
+#define BNX2_HC_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27)
+#define BNX2_HC_DEBUG_VECT_PEEK_2_SEL (0xfL<<28)
+
+#define BNX2_HC_COALESCE_NOW 0x00006914
+#define BNX2_HC_COALESCE_NOW_COAL_NOW (0x1ffL<<1)
+#define BNX2_HC_COALESCE_NOW_COAL_NOW_WO_INT (0x1ffL<<11)
+#define BNX2_HC_COALESCE_NOW_COAL_ON_NXT_EVENT (0x1ffL<<21)
+
+#define BNX2_HC_MSIX_BIT_VECTOR 0x00006918
+#define BNX2_HC_MSIX_BIT_VECTOR_VAL (0x1ffL<<0)
+
+#define BNX2_HC_SB_CONFIG_1 0x00006a00
+#define BNX2_HC_SB_CONFIG_1_RX_TMR_MODE (1L<<1)
+#define BNX2_HC_SB_CONFIG_1_TX_TMR_MODE (1L<<2)
+#define BNX2_HC_SB_CONFIG_1_COM_TMR_MODE (1L<<3)
+#define BNX2_HC_SB_CONFIG_1_CMD_TMR_MODE (1L<<4)
+#define BNX2_HC_SB_CONFIG_1_PER_MODE (1L<<16)
+#define BNX2_HC_SB_CONFIG_1_ONE_SHOT (1L<<17)
+#define BNX2_HC_SB_CONFIG_1_USE_INT_PARAM (1L<<18)
+#define BNX2_HC_SB_CONFIG_1_PER_COLLECT_LIMIT (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_1 0x00006a04
+#define BNX2_HC_TX_QUICK_CONS_TRIP_1_VALUE (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_1_INT (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_1 0x00006a08
+#define BNX2_HC_COMP_PROD_TRIP_1_VALUE (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_1_INT (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_1 0x00006a0c
+#define BNX2_HC_RX_QUICK_CONS_TRIP_1_VALUE (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_1_INT (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_1 0x00006a10
+#define BNX2_HC_RX_TICKS_1_VALUE (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_1_INT (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_1 0x00006a14
+#define BNX2_HC_TX_TICKS_1_VALUE (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_1_INT (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_1 0x00006a18
+#define BNX2_HC_COM_TICKS_1_VALUE (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_1_INT (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_1 0x00006a1c
+#define BNX2_HC_CMD_TICKS_1_VALUE (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_1_INT (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_1 0x00006a20
+#define BNX2_HC_PERIODIC_TICKS_1_HC_PERIODIC_TICKS (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_1_HC_INT_PERIODIC_TICKS (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_2 0x00006a24
+#define BNX2_HC_SB_CONFIG_2_RX_TMR_MODE (1L<<1)
+#define BNX2_HC_SB_CONFIG_2_TX_TMR_MODE (1L<<2)
+#define BNX2_HC_SB_CONFIG_2_COM_TMR_MODE (1L<<3)
+#define BNX2_HC_SB_CONFIG_2_CMD_TMR_MODE (1L<<4)
+#define BNX2_HC_SB_CONFIG_2_PER_MODE (1L<<16)
+#define BNX2_HC_SB_CONFIG_2_ONE_SHOT (1L<<17)
+#define BNX2_HC_SB_CONFIG_2_USE_INT_PARAM (1L<<18)
+#define BNX2_HC_SB_CONFIG_2_PER_COLLECT_LIMIT (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_2 0x00006a28
+#define BNX2_HC_TX_QUICK_CONS_TRIP_2_VALUE (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_2_INT (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_2 0x00006a2c
+#define BNX2_HC_COMP_PROD_TRIP_2_VALUE (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_2_INT (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_2 0x00006a30
+#define BNX2_HC_RX_QUICK_CONS_TRIP_2_VALUE (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_2_INT (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_2 0x00006a34
+#define BNX2_HC_RX_TICKS_2_VALUE (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_2_INT (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_2 0x00006a38
+#define BNX2_HC_TX_TICKS_2_VALUE (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_2_INT (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_2 0x00006a3c
+#define BNX2_HC_COM_TICKS_2_VALUE (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_2_INT (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_2 0x00006a40
+#define BNX2_HC_CMD_TICKS_2_VALUE (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_2_INT (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_2 0x00006a44
+#define BNX2_HC_PERIODIC_TICKS_2_HC_PERIODIC_TICKS (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_2_HC_INT_PERIODIC_TICKS (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_3 0x00006a48
+#define BNX2_HC_SB_CONFIG_3_RX_TMR_MODE (1L<<1)
+#define BNX2_HC_SB_CONFIG_3_TX_TMR_MODE (1L<<2)
+#define BNX2_HC_SB_CONFIG_3_COM_TMR_MODE (1L<<3)
+#define BNX2_HC_SB_CONFIG_3_CMD_TMR_MODE (1L<<4)
+#define BNX2_HC_SB_CONFIG_3_PER_MODE (1L<<16)
+#define BNX2_HC_SB_CONFIG_3_ONE_SHOT (1L<<17)
+#define BNX2_HC_SB_CONFIG_3_USE_INT_PARAM (1L<<18)
+#define BNX2_HC_SB_CONFIG_3_PER_COLLECT_LIMIT (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_3 0x00006a4c
+#define BNX2_HC_TX_QUICK_CONS_TRIP_3_VALUE (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_3_INT (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_3 0x00006a50
+#define BNX2_HC_COMP_PROD_TRIP_3_VALUE (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_3_INT (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_3 0x00006a54
+#define BNX2_HC_RX_QUICK_CONS_TRIP_3_VALUE (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_3_INT (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_3 0x00006a58
+#define BNX2_HC_RX_TICKS_3_VALUE (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_3_INT (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_3 0x00006a5c
+#define BNX2_HC_TX_TICKS_3_VALUE (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_3_INT (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_3 0x00006a60
+#define BNX2_HC_COM_TICKS_3_VALUE (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_3_INT (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_3 0x00006a64
+#define BNX2_HC_CMD_TICKS_3_VALUE (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_3_INT (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_3 0x00006a68
+#define BNX2_HC_PERIODIC_TICKS_3_HC_PERIODIC_TICKS (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_3_HC_INT_PERIODIC_TICKS (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_4 0x00006a6c
+#define BNX2_HC_SB_CONFIG_4_RX_TMR_MODE (1L<<1)
+#define BNX2_HC_SB_CONFIG_4_TX_TMR_MODE (1L<<2)
+#define BNX2_HC_SB_CONFIG_4_COM_TMR_MODE (1L<<3)
+#define BNX2_HC_SB_CONFIG_4_CMD_TMR_MODE (1L<<4)
+#define BNX2_HC_SB_CONFIG_4_PER_MODE (1L<<16)
+#define BNX2_HC_SB_CONFIG_4_ONE_SHOT (1L<<17)
+#define BNX2_HC_SB_CONFIG_4_USE_INT_PARAM (1L<<18)
+#define BNX2_HC_SB_CONFIG_4_PER_COLLECT_LIMIT (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_4 0x00006a70
+#define BNX2_HC_TX_QUICK_CONS_TRIP_4_VALUE (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_4_INT (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_4 0x00006a74
+#define BNX2_HC_COMP_PROD_TRIP_4_VALUE (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_4_INT (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_4 0x00006a78
+#define BNX2_HC_RX_QUICK_CONS_TRIP_4_VALUE (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_4_INT (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_4 0x00006a7c
+#define BNX2_HC_RX_TICKS_4_VALUE (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_4_INT (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_4 0x00006a80
+#define BNX2_HC_TX_TICKS_4_VALUE (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_4_INT (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_4 0x00006a84
+#define BNX2_HC_COM_TICKS_4_VALUE (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_4_INT (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_4 0x00006a88
+#define BNX2_HC_CMD_TICKS_4_VALUE (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_4_INT (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_4 0x00006a8c
+#define BNX2_HC_PERIODIC_TICKS_4_HC_PERIODIC_TICKS (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_4_HC_INT_PERIODIC_TICKS (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_5 0x00006a90
+#define BNX2_HC_SB_CONFIG_5_RX_TMR_MODE (1L<<1)
+#define BNX2_HC_SB_CONFIG_5_TX_TMR_MODE (1L<<2)
+#define BNX2_HC_SB_CONFIG_5_COM_TMR_MODE (1L<<3)
+#define BNX2_HC_SB_CONFIG_5_CMD_TMR_MODE (1L<<4)
+#define BNX2_HC_SB_CONFIG_5_PER_MODE (1L<<16)
+#define BNX2_HC_SB_CONFIG_5_ONE_SHOT (1L<<17)
+#define BNX2_HC_SB_CONFIG_5_USE_INT_PARAM (1L<<18)
+#define BNX2_HC_SB_CONFIG_5_PER_COLLECT_LIMIT (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_5 0x00006a94
+#define BNX2_HC_TX_QUICK_CONS_TRIP_5_VALUE (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_5_INT (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_5 0x00006a98
+#define BNX2_HC_COMP_PROD_TRIP_5_VALUE (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_5_INT (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_5 0x00006a9c
+#define BNX2_HC_RX_QUICK_CONS_TRIP_5_VALUE (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_5_INT (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_5 0x00006aa0
+#define BNX2_HC_RX_TICKS_5_VALUE (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_5_INT (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_5 0x00006aa4
+#define BNX2_HC_TX_TICKS_5_VALUE (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_5_INT (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_5 0x00006aa8
+#define BNX2_HC_COM_TICKS_5_VALUE (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_5_INT (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_5 0x00006aac
+#define BNX2_HC_CMD_TICKS_5_VALUE (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_5_INT (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_5 0x00006ab0
+#define BNX2_HC_PERIODIC_TICKS_5_HC_PERIODIC_TICKS (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_5_HC_INT_PERIODIC_TICKS (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_6 0x00006ab4
+#define BNX2_HC_SB_CONFIG_6_RX_TMR_MODE (1L<<1)
+#define BNX2_HC_SB_CONFIG_6_TX_TMR_MODE (1L<<2)
+#define BNX2_HC_SB_CONFIG_6_COM_TMR_MODE (1L<<3)
+#define BNX2_HC_SB_CONFIG_6_CMD_TMR_MODE (1L<<4)
+#define BNX2_HC_SB_CONFIG_6_PER_MODE (1L<<16)
+#define BNX2_HC_SB_CONFIG_6_ONE_SHOT (1L<<17)
+#define BNX2_HC_SB_CONFIG_6_USE_INT_PARAM (1L<<18)
+#define BNX2_HC_SB_CONFIG_6_PER_COLLECT_LIMIT (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_6 0x00006ab8
+#define BNX2_HC_TX_QUICK_CONS_TRIP_6_VALUE (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_6_INT (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_6 0x00006abc
+#define BNX2_HC_COMP_PROD_TRIP_6_VALUE (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_6_INT (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_6 0x00006ac0
+#define BNX2_HC_RX_QUICK_CONS_TRIP_6_VALUE (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_6_INT (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_6 0x00006ac4
+#define BNX2_HC_RX_TICKS_6_VALUE (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_6_INT (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_6 0x00006ac8
+#define BNX2_HC_TX_TICKS_6_VALUE (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_6_INT (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_6 0x00006acc
+#define BNX2_HC_COM_TICKS_6_VALUE (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_6_INT (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_6 0x00006ad0
+#define BNX2_HC_CMD_TICKS_6_VALUE (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_6_INT (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_6 0x00006ad4
+#define BNX2_HC_PERIODIC_TICKS_6_HC_PERIODIC_TICKS (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_6_HC_INT_PERIODIC_TICKS (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_7 0x00006ad8
+#define BNX2_HC_SB_CONFIG_7_RX_TMR_MODE (1L<<1)
+#define BNX2_HC_SB_CONFIG_7_TX_TMR_MODE (1L<<2)
+#define BNX2_HC_SB_CONFIG_7_COM_TMR_MODE (1L<<3)
+#define BNX2_HC_SB_CONFIG_7_CMD_TMR_MODE (1L<<4)
+#define BNX2_HC_SB_CONFIG_7_PER_MODE (1L<<16)
+#define BNX2_HC_SB_CONFIG_7_ONE_SHOT (1L<<17)
+#define BNX2_HC_SB_CONFIG_7_USE_INT_PARAM (1L<<18)
+#define BNX2_HC_SB_CONFIG_7_PER_COLLECT_LIMIT (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_7 0x00006adc
+#define BNX2_HC_TX_QUICK_CONS_TRIP_7_VALUE (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_7_INT (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_7 0x00006ae0
+#define BNX2_HC_COMP_PROD_TRIP_7_VALUE (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_7_INT (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_7 0x00006ae4
+#define BNX2_HC_RX_QUICK_CONS_TRIP_7_VALUE (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_7_INT (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_7 0x00006ae8
+#define BNX2_HC_RX_TICKS_7_VALUE (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_7_INT (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_7 0x00006aec
+#define BNX2_HC_TX_TICKS_7_VALUE (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_7_INT (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_7 0x00006af0
+#define BNX2_HC_COM_TICKS_7_VALUE (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_7_INT (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_7 0x00006af4
+#define BNX2_HC_CMD_TICKS_7_VALUE (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_7_INT (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_7 0x00006af8
+#define BNX2_HC_PERIODIC_TICKS_7_HC_PERIODIC_TICKS (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_7_HC_INT_PERIODIC_TICKS (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_8 0x00006afc
+#define BNX2_HC_SB_CONFIG_8_RX_TMR_MODE (1L<<1)
+#define BNX2_HC_SB_CONFIG_8_TX_TMR_MODE (1L<<2)
+#define BNX2_HC_SB_CONFIG_8_COM_TMR_MODE (1L<<3)
+#define BNX2_HC_SB_CONFIG_8_CMD_TMR_MODE (1L<<4)
+#define BNX2_HC_SB_CONFIG_8_PER_MODE (1L<<16)
+#define BNX2_HC_SB_CONFIG_8_ONE_SHOT (1L<<17)
+#define BNX2_HC_SB_CONFIG_8_USE_INT_PARAM (1L<<18)
+#define BNX2_HC_SB_CONFIG_8_PER_COLLECT_LIMIT (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_8 0x00006b00
+#define BNX2_HC_TX_QUICK_CONS_TRIP_8_VALUE (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_8_INT (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_8 0x00006b04
+#define BNX2_HC_COMP_PROD_TRIP_8_VALUE (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_8_INT (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_8 0x00006b08
+#define BNX2_HC_RX_QUICK_CONS_TRIP_8_VALUE (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_8_INT (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_8 0x00006b0c
+#define BNX2_HC_RX_TICKS_8_VALUE (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_8_INT (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_8 0x00006b10
+#define BNX2_HC_TX_TICKS_8_VALUE (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_8_INT (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_8 0x00006b14
+#define BNX2_HC_COM_TICKS_8_VALUE (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_8_INT (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_8 0x00006b18
+#define BNX2_HC_CMD_TICKS_8_VALUE (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_8_INT (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_8 0x00006b1c
+#define BNX2_HC_PERIODIC_TICKS_8_HC_PERIODIC_TICKS (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_8_HC_INT_PERIODIC_TICKS (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_SIZE (BNX2_HC_SB_CONFIG_2 - BNX2_HC_SB_CONFIG_1)
+#define BNX2_HC_COMP_PROD_TRIP_OFF (BNX2_HC_COMP_PROD_TRIP_1 - \
+ BNX2_HC_SB_CONFIG_1)
+#define BNX2_HC_COM_TICKS_OFF (BNX2_HC_COM_TICKS_1 - BNX2_HC_SB_CONFIG_1)
+#define BNX2_HC_CMD_TICKS_OFF (BNX2_HC_CMD_TICKS_1 - BNX2_HC_SB_CONFIG_1)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_OFF (BNX2_HC_TX_QUICK_CONS_TRIP_1 - \
+ BNX2_HC_SB_CONFIG_1)
+#define BNX2_HC_TX_TICKS_OFF (BNX2_HC_TX_TICKS_1 - BNX2_HC_SB_CONFIG_1)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_OFF (BNX2_HC_RX_QUICK_CONS_TRIP_1 - \
+ BNX2_HC_SB_CONFIG_1)
+#define BNX2_HC_RX_TICKS_OFF (BNX2_HC_RX_TICKS_1 - BNX2_HC_SB_CONFIG_1)
+
+
+/*
+ * txp_reg definition
+ * offset: 0x40000
+ */
+#define BNX2_TXP_CPU_MODE 0x00045000
+#define BNX2_TXP_CPU_MODE_LOCAL_RST (1L<<0)
+#define BNX2_TXP_CPU_MODE_STEP_ENA (1L<<1)
+#define BNX2_TXP_CPU_MODE_PAGE_0_DATA_ENA (1L<<2)
+#define BNX2_TXP_CPU_MODE_PAGE_0_INST_ENA (1L<<3)
+#define BNX2_TXP_CPU_MODE_MSG_BIT1 (1L<<6)
+#define BNX2_TXP_CPU_MODE_INTERRUPT_ENA (1L<<7)
+#define BNX2_TXP_CPU_MODE_SOFT_HALT (1L<<10)
+#define BNX2_TXP_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11)
+#define BNX2_TXP_CPU_MODE_BAD_INST_HALT_ENA (1L<<12)
+#define BNX2_TXP_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13)
+#define BNX2_TXP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15)
+
+#define BNX2_TXP_CPU_STATE 0x00045004
+#define BNX2_TXP_CPU_STATE_BREAKPOINT (1L<<0)
+#define BNX2_TXP_CPU_STATE_BAD_INST_HALTED (1L<<2)
+#define BNX2_TXP_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3)
+#define BNX2_TXP_CPU_STATE_PAGE_0_INST_HALTED (1L<<4)
+#define BNX2_TXP_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5)
+#define BNX2_TXP_CPU_STATE_BAD_PC_HALTED (1L<<6)
+#define BNX2_TXP_CPU_STATE_ALIGN_HALTED (1L<<7)
+#define BNX2_TXP_CPU_STATE_FIO_ABORT_HALTED (1L<<8)
+#define BNX2_TXP_CPU_STATE_SOFT_HALTED (1L<<10)
+#define BNX2_TXP_CPU_STATE_SPAD_UNDERFLOW (1L<<11)
+#define BNX2_TXP_CPU_STATE_INTERRRUPT (1L<<12)
+#define BNX2_TXP_CPU_STATE_DATA_ACCESS_STALL (1L<<14)
+#define BNX2_TXP_CPU_STATE_INST_FETCH_STALL (1L<<15)
+#define BNX2_TXP_CPU_STATE_BLOCKED_READ (1L<<31)
+
+#define BNX2_TXP_CPU_EVENT_MASK 0x00045008
+#define BNX2_TXP_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0)
+#define BNX2_TXP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2)
+#define BNX2_TXP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3)
+#define BNX2_TXP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4)
+#define BNX2_TXP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5)
+#define BNX2_TXP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6)
+#define BNX2_TXP_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7)
+#define BNX2_TXP_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8)
+#define BNX2_TXP_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10)
+#define BNX2_TXP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11)
+#define BNX2_TXP_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12)
+
+#define BNX2_TXP_CPU_PROGRAM_COUNTER 0x0004501c
+#define BNX2_TXP_CPU_INSTRUCTION 0x00045020
+#define BNX2_TXP_CPU_DATA_ACCESS 0x00045024
+#define BNX2_TXP_CPU_INTERRUPT_ENABLE 0x00045028
+#define BNX2_TXP_CPU_INTERRUPT_VECTOR 0x0004502c
+#define BNX2_TXP_CPU_INTERRUPT_SAVED_PC 0x00045030
+#define BNX2_TXP_CPU_HW_BREAKPOINT 0x00045034
+#define BNX2_TXP_CPU_HW_BREAKPOINT_DISABLE (1L<<0)
+#define BNX2_TXP_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2)
+
+#define BNX2_TXP_CPU_DEBUG_VECT_PEEK 0x00045038
+#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0)
+#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11)
+#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12)
+#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16)
+#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27)
+#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28)
+
+#define BNX2_TXP_CPU_LAST_BRANCH_ADDR 0x00045048
+#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1)
+#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1)
+#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1)
+#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2)
+
+#define BNX2_TXP_CPU_REG_FILE 0x00045200
+#define BNX2_TXP_TXPQ 0x000453c0
+#define BNX2_TXP_FTQ_CMD 0x000453f8
+#define BNX2_TXP_FTQ_CMD_OFFSET (0x3ffL<<0)
+#define BNX2_TXP_FTQ_CMD_WR_TOP (1L<<10)
+#define BNX2_TXP_FTQ_CMD_WR_TOP_0 (0L<<10)
+#define BNX2_TXP_FTQ_CMD_WR_TOP_1 (1L<<10)
+#define BNX2_TXP_FTQ_CMD_SFT_RESET (1L<<25)
+#define BNX2_TXP_FTQ_CMD_RD_DATA (1L<<26)
+#define BNX2_TXP_FTQ_CMD_ADD_INTERVEN (1L<<27)
+#define BNX2_TXP_FTQ_CMD_ADD_DATA (1L<<28)
+#define BNX2_TXP_FTQ_CMD_INTERVENE_CLR (1L<<29)
+#define BNX2_TXP_FTQ_CMD_POP (1L<<30)
+#define BNX2_TXP_FTQ_CMD_BUSY (1L<<31)
+
+#define BNX2_TXP_FTQ_CTL 0x000453fc
+#define BNX2_TXP_FTQ_CTL_INTERVENE (1L<<0)
+#define BNX2_TXP_FTQ_CTL_OVERFLOW (1L<<1)
+#define BNX2_TXP_FTQ_CTL_FORCE_INTERVENE (1L<<2)
+#define BNX2_TXP_FTQ_CTL_MAX_DEPTH (0x3ffL<<12)
+#define BNX2_TXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
+
+#define BNX2_TXP_SCRATCH 0x00060000
+
+
+/*
+ * tpat_reg definition
+ * offset: 0x80000
+ */
+#define BNX2_TPAT_CPU_MODE 0x00085000
+#define BNX2_TPAT_CPU_MODE_LOCAL_RST (1L<<0)
+#define BNX2_TPAT_CPU_MODE_STEP_ENA (1L<<1)
+#define BNX2_TPAT_CPU_MODE_PAGE_0_DATA_ENA (1L<<2)
+#define BNX2_TPAT_CPU_MODE_PAGE_0_INST_ENA (1L<<3)
+#define BNX2_TPAT_CPU_MODE_MSG_BIT1 (1L<<6)
+#define BNX2_TPAT_CPU_MODE_INTERRUPT_ENA (1L<<7)
+#define BNX2_TPAT_CPU_MODE_SOFT_HALT (1L<<10)
+#define BNX2_TPAT_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11)
+#define BNX2_TPAT_CPU_MODE_BAD_INST_HALT_ENA (1L<<12)
+#define BNX2_TPAT_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13)
+#define BNX2_TPAT_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15)
+
+#define BNX2_TPAT_CPU_STATE 0x00085004
+#define BNX2_TPAT_CPU_STATE_BREAKPOINT (1L<<0)
+#define BNX2_TPAT_CPU_STATE_BAD_INST_HALTED (1L<<2)
+#define BNX2_TPAT_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3)
+#define BNX2_TPAT_CPU_STATE_PAGE_0_INST_HALTED (1L<<4)
+#define BNX2_TPAT_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5)
+#define BNX2_TPAT_CPU_STATE_BAD_PC_HALTED (1L<<6)
+#define BNX2_TPAT_CPU_STATE_ALIGN_HALTED (1L<<7)
+#define BNX2_TPAT_CPU_STATE_FIO_ABORT_HALTED (1L<<8)
+#define BNX2_TPAT_CPU_STATE_SOFT_HALTED (1L<<10)
+#define BNX2_TPAT_CPU_STATE_SPAD_UNDERFLOW (1L<<11)
+#define BNX2_TPAT_CPU_STATE_INTERRRUPT (1L<<12)
+#define BNX2_TPAT_CPU_STATE_DATA_ACCESS_STALL (1L<<14)
+#define BNX2_TPAT_CPU_STATE_INST_FETCH_STALL (1L<<15)
+#define BNX2_TPAT_CPU_STATE_BLOCKED_READ (1L<<31)
+
+#define BNX2_TPAT_CPU_EVENT_MASK 0x00085008
+#define BNX2_TPAT_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0)
+#define BNX2_TPAT_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2)
+#define BNX2_TPAT_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3)
+#define BNX2_TPAT_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4)
+#define BNX2_TPAT_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5)
+#define BNX2_TPAT_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6)
+#define BNX2_TPAT_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7)
+#define BNX2_TPAT_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8)
+#define BNX2_TPAT_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10)
+#define BNX2_TPAT_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11)
+#define BNX2_TPAT_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12)
+
+#define BNX2_TPAT_CPU_PROGRAM_COUNTER 0x0008501c
+#define BNX2_TPAT_CPU_INSTRUCTION 0x00085020
+#define BNX2_TPAT_CPU_DATA_ACCESS 0x00085024
+#define BNX2_TPAT_CPU_INTERRUPT_ENABLE 0x00085028
+#define BNX2_TPAT_CPU_INTERRUPT_VECTOR 0x0008502c
+#define BNX2_TPAT_CPU_INTERRUPT_SAVED_PC 0x00085030
+#define BNX2_TPAT_CPU_HW_BREAKPOINT 0x00085034
+#define BNX2_TPAT_CPU_HW_BREAKPOINT_DISABLE (1L<<0)
+#define BNX2_TPAT_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2)
+
+#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK 0x00085038
+#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0)
+#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11)
+#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12)
+#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16)
+#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27)
+#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28)
+
+#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR 0x00085048
+#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1)
+#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1)
+#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1)
+#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2)
+
+#define BNX2_TPAT_CPU_REG_FILE 0x00085200
+#define BNX2_TPAT_TPATQ 0x000853c0
+#define BNX2_TPAT_FTQ_CMD 0x000853f8
+#define BNX2_TPAT_FTQ_CMD_OFFSET (0x3ffL<<0)
+#define BNX2_TPAT_FTQ_CMD_WR_TOP (1L<<10)
+#define BNX2_TPAT_FTQ_CMD_WR_TOP_0 (0L<<10)
+#define BNX2_TPAT_FTQ_CMD_WR_TOP_1 (1L<<10)
+#define BNX2_TPAT_FTQ_CMD_SFT_RESET (1L<<25)
+#define BNX2_TPAT_FTQ_CMD_RD_DATA (1L<<26)
+#define BNX2_TPAT_FTQ_CMD_ADD_INTERVEN (1L<<27)
+#define BNX2_TPAT_FTQ_CMD_ADD_DATA (1L<<28)
+#define BNX2_TPAT_FTQ_CMD_INTERVENE_CLR (1L<<29)
+#define BNX2_TPAT_FTQ_CMD_POP (1L<<30)
+#define BNX2_TPAT_FTQ_CMD_BUSY (1L<<31)
+
+#define BNX2_TPAT_FTQ_CTL 0x000853fc
+#define BNX2_TPAT_FTQ_CTL_INTERVENE (1L<<0)
+#define BNX2_TPAT_FTQ_CTL_OVERFLOW (1L<<1)
+#define BNX2_TPAT_FTQ_CTL_FORCE_INTERVENE (1L<<2)
+#define BNX2_TPAT_FTQ_CTL_MAX_DEPTH (0x3ffL<<12)
+#define BNX2_TPAT_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
+
+#define BNX2_TPAT_SCRATCH 0x000a0000
+
+
+/*
+ * rxp_reg definition
+ * offset: 0xc0000
+ */
+#define BNX2_RXP_CPU_MODE 0x000c5000
+#define BNX2_RXP_CPU_MODE_LOCAL_RST (1L<<0)
+#define BNX2_RXP_CPU_MODE_STEP_ENA (1L<<1)
+#define BNX2_RXP_CPU_MODE_PAGE_0_DATA_ENA (1L<<2)
+#define BNX2_RXP_CPU_MODE_PAGE_0_INST_ENA (1L<<3)
+#define BNX2_RXP_CPU_MODE_MSG_BIT1 (1L<<6)
+#define BNX2_RXP_CPU_MODE_INTERRUPT_ENA (1L<<7)
+#define BNX2_RXP_CPU_MODE_SOFT_HALT (1L<<10)
+#define BNX2_RXP_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11)
+#define BNX2_RXP_CPU_MODE_BAD_INST_HALT_ENA (1L<<12)
+#define BNX2_RXP_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13)
+#define BNX2_RXP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15)
+
+#define BNX2_RXP_CPU_STATE 0x000c5004
+#define BNX2_RXP_CPU_STATE_BREAKPOINT (1L<<0)
+#define BNX2_RXP_CPU_STATE_BAD_INST_HALTED (1L<<2)
+#define BNX2_RXP_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3)
+#define BNX2_RXP_CPU_STATE_PAGE_0_INST_HALTED (1L<<4)
+#define BNX2_RXP_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5)
+#define BNX2_RXP_CPU_STATE_BAD_PC_HALTED (1L<<6)
+#define BNX2_RXP_CPU_STATE_ALIGN_HALTED (1L<<7)
+#define BNX2_RXP_CPU_STATE_FIO_ABORT_HALTED (1L<<8)
+#define BNX2_RXP_CPU_STATE_SOFT_HALTED (1L<<10)
+#define BNX2_RXP_CPU_STATE_SPAD_UNDERFLOW (1L<<11)
+#define BNX2_RXP_CPU_STATE_INTERRRUPT (1L<<12)
+#define BNX2_RXP_CPU_STATE_DATA_ACCESS_STALL (1L<<14)
+#define BNX2_RXP_CPU_STATE_INST_FETCH_STALL (1L<<15)
+#define BNX2_RXP_CPU_STATE_BLOCKED_READ (1L<<31)
+
+#define BNX2_RXP_CPU_EVENT_MASK 0x000c5008
+#define BNX2_RXP_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0)
+#define BNX2_RXP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2)
+#define BNX2_RXP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3)
+#define BNX2_RXP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4)
+#define BNX2_RXP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5)
+#define BNX2_RXP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6)
+#define BNX2_RXP_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7)
+#define BNX2_RXP_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8)
+#define BNX2_RXP_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10)
+#define BNX2_RXP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11)
+#define BNX2_RXP_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12)
+
+#define BNX2_RXP_CPU_PROGRAM_COUNTER 0x000c501c
+#define BNX2_RXP_CPU_INSTRUCTION 0x000c5020
+#define BNX2_RXP_CPU_DATA_ACCESS 0x000c5024
+#define BNX2_RXP_CPU_INTERRUPT_ENABLE 0x000c5028
+#define BNX2_RXP_CPU_INTERRUPT_VECTOR 0x000c502c
+#define BNX2_RXP_CPU_INTERRUPT_SAVED_PC 0x000c5030
+#define BNX2_RXP_CPU_HW_BREAKPOINT 0x000c5034
+#define BNX2_RXP_CPU_HW_BREAKPOINT_DISABLE (1L<<0)
+#define BNX2_RXP_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2)
+
+#define BNX2_RXP_CPU_DEBUG_VECT_PEEK 0x000c5038
+#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0)
+#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11)
+#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12)
+#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16)
+#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27)
+#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28)
+
+#define BNX2_RXP_CPU_LAST_BRANCH_ADDR 0x000c5048
+#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1)
+#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1)
+#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1)
+#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2)
+
+#define BNX2_RXP_CPU_REG_FILE 0x000c5200
+#define BNX2_RXP_PFE_PFE_CTL 0x000c537c
+#define BNX2_RXP_PFE_PFE_CTL_INC_USAGE_CNT (1L<<0)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE (0xfL<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_0 (0L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_1 (1L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_2 (2L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_3 (3L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_4 (4L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_5 (5L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_6 (6L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_7 (7L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_8 (8L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_9 (9L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_10 (10L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_11 (11L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_12 (12L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_13 (13L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_14 (14L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_15 (15L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_COUNT (0xfL<<12)
+#define BNX2_RXP_PFE_PFE_CTL_OFFSET (0x1ffL<<16)
+
+#define BNX2_RXP_RXPCQ 0x000c5380
+#define BNX2_RXP_CFTQ_CMD 0x000c53b8
+#define BNX2_RXP_CFTQ_CMD_OFFSET (0x3ffL<<0)
+#define BNX2_RXP_CFTQ_CMD_WR_TOP (1L<<10)
+#define BNX2_RXP_CFTQ_CMD_WR_TOP_0 (0L<<10)
+#define BNX2_RXP_CFTQ_CMD_WR_TOP_1 (1L<<10)
+#define BNX2_RXP_CFTQ_CMD_SFT_RESET (1L<<25)
+#define BNX2_RXP_CFTQ_CMD_RD_DATA (1L<<26)
+#define BNX2_RXP_CFTQ_CMD_ADD_INTERVEN (1L<<27)
+#define BNX2_RXP_CFTQ_CMD_ADD_DATA (1L<<28)
+#define BNX2_RXP_CFTQ_CMD_INTERVENE_CLR (1L<<29)
+#define BNX2_RXP_CFTQ_CMD_POP (1L<<30)
+#define BNX2_RXP_CFTQ_CMD_BUSY (1L<<31)
+
+#define BNX2_RXP_CFTQ_CTL 0x000c53bc
+#define BNX2_RXP_CFTQ_CTL_INTERVENE (1L<<0)
+#define BNX2_RXP_CFTQ_CTL_OVERFLOW (1L<<1)
+#define BNX2_RXP_CFTQ_CTL_FORCE_INTERVENE (1L<<2)
+#define BNX2_RXP_CFTQ_CTL_MAX_DEPTH (0x3ffL<<12)
+#define BNX2_RXP_CFTQ_CTL_CUR_DEPTH (0x3ffL<<22)
+
+#define BNX2_RXP_RXPQ 0x000c53c0
+#define BNX2_RXP_FTQ_CMD 0x000c53f8
+#define BNX2_RXP_FTQ_CMD_OFFSET (0x3ffL<<0)
+#define BNX2_RXP_FTQ_CMD_WR_TOP (1L<<10)
+#define BNX2_RXP_FTQ_CMD_WR_TOP_0 (0L<<10)
+#define BNX2_RXP_FTQ_CMD_WR_TOP_1 (1L<<10)
+#define BNX2_RXP_FTQ_CMD_SFT_RESET (1L<<25)
+#define BNX2_RXP_FTQ_CMD_RD_DATA (1L<<26)
+#define BNX2_RXP_FTQ_CMD_ADD_INTERVEN (1L<<27)
+#define BNX2_RXP_FTQ_CMD_ADD_DATA (1L<<28)
+#define BNX2_RXP_FTQ_CMD_INTERVENE_CLR (1L<<29)
+#define BNX2_RXP_FTQ_CMD_POP (1L<<30)
+#define BNX2_RXP_FTQ_CMD_BUSY (1L<<31)
+
+#define BNX2_RXP_FTQ_CTL 0x000c53fc
+#define BNX2_RXP_FTQ_CTL_INTERVENE (1L<<0)
+#define BNX2_RXP_FTQ_CTL_OVERFLOW (1L<<1)
+#define BNX2_RXP_FTQ_CTL_FORCE_INTERVENE (1L<<2)
+#define BNX2_RXP_FTQ_CTL_MAX_DEPTH (0x3ffL<<12)
+#define BNX2_RXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
+
+#define BNX2_RXP_SCRATCH 0x000e0000
+#define BNX2_RXP_SCRATCH_RXP_FLOOD 0x000e0024
+#define BNX2_RXP_SCRATCH_RSS_TBL_SZ 0x000e0038
+#define BNX2_RXP_SCRATCH_RSS_TBL 0x000e003c
+#define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES 128
+
+
+/*
+ * com_reg definition
+ * offset: 0x100000
+ */
+#define BNX2_COM_CKSUM_ERROR_STATUS 0x00100000
+#define BNX2_COM_CKSUM_ERROR_STATUS_CALCULATED (0xffffL<<0)
+#define BNX2_COM_CKSUM_ERROR_STATUS_EXPECTED (0xffffL<<16)
+
+#define BNX2_COM_CPU_MODE 0x00105000
+#define BNX2_COM_CPU_MODE_LOCAL_RST (1L<<0)
+#define BNX2_COM_CPU_MODE_STEP_ENA (1L<<1)
+#define BNX2_COM_CPU_MODE_PAGE_0_DATA_ENA (1L<<2)
+#define BNX2_COM_CPU_MODE_PAGE_0_INST_ENA (1L<<3)
+#define BNX2_COM_CPU_MODE_MSG_BIT1 (1L<<6)
+#define BNX2_COM_CPU_MODE_INTERRUPT_ENA (1L<<7)
+#define BNX2_COM_CPU_MODE_SOFT_HALT (1L<<10)
+#define BNX2_COM_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11)
+#define BNX2_COM_CPU_MODE_BAD_INST_HALT_ENA (1L<<12)
+#define BNX2_COM_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13)
+#define BNX2_COM_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15)
+
+#define BNX2_COM_CPU_STATE 0x00105004
+#define BNX2_COM_CPU_STATE_BREAKPOINT (1L<<0)
+#define BNX2_COM_CPU_STATE_BAD_INST_HALTED (1L<<2)
+#define BNX2_COM_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3)
+#define BNX2_COM_CPU_STATE_PAGE_0_INST_HALTED (1L<<4)
+#define BNX2_COM_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5)
+#define BNX2_COM_CPU_STATE_BAD_PC_HALTED (1L<<6)
+#define BNX2_COM_CPU_STATE_ALIGN_HALTED (1L<<7)
+#define BNX2_COM_CPU_STATE_FIO_ABORT_HALTED (1L<<8)
+#define BNX2_COM_CPU_STATE_SOFT_HALTED (1L<<10)
+#define BNX2_COM_CPU_STATE_SPAD_UNDERFLOW (1L<<11)
+#define BNX2_COM_CPU_STATE_INTERRRUPT (1L<<12)
+#define BNX2_COM_CPU_STATE_DATA_ACCESS_STALL (1L<<14)
+#define BNX2_COM_CPU_STATE_INST_FETCH_STALL (1L<<15)
+#define BNX2_COM_CPU_STATE_BLOCKED_READ (1L<<31)
+
+#define BNX2_COM_CPU_EVENT_MASK 0x00105008
+#define BNX2_COM_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0)
+#define BNX2_COM_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2)
+#define BNX2_COM_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3)
+#define BNX2_COM_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4)
+#define BNX2_COM_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5)
+#define BNX2_COM_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6)
+#define BNX2_COM_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7)
+#define BNX2_COM_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8)
+#define BNX2_COM_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10)
+#define BNX2_COM_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11)
+#define BNX2_COM_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12)
+
+#define BNX2_COM_CPU_PROGRAM_COUNTER 0x0010501c
+#define BNX2_COM_CPU_INSTRUCTION 0x00105020
+#define BNX2_COM_CPU_DATA_ACCESS 0x00105024
+#define BNX2_COM_CPU_INTERRUPT_ENABLE 0x00105028
+#define BNX2_COM_CPU_INTERRUPT_VECTOR 0x0010502c
+#define BNX2_COM_CPU_INTERRUPT_SAVED_PC 0x00105030
+#define BNX2_COM_CPU_HW_BREAKPOINT 0x00105034
+#define BNX2_COM_CPU_HW_BREAKPOINT_DISABLE (1L<<0)
+#define BNX2_COM_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2)
+
+#define BNX2_COM_CPU_DEBUG_VECT_PEEK 0x00105038
+#define BNX2_COM_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0)
+#define BNX2_COM_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11)
+#define BNX2_COM_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12)
+#define BNX2_COM_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16)
+#define BNX2_COM_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27)
+#define BNX2_COM_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28)
+
+#define BNX2_COM_CPU_LAST_BRANCH_ADDR 0x00105048
+#define BNX2_COM_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1)
+#define BNX2_COM_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1)
+#define BNX2_COM_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1)
+#define BNX2_COM_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2)
+
+#define BNX2_COM_CPU_REG_FILE 0x00105200
+#define BNX2_COM_COMTQ_PFE_PFE_CTL 0x001052bc
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_INC_USAGE_CNT (1L<<0)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE (0xfL<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_0 (0L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_1 (1L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_2 (2L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_3 (3L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_4 (4L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_5 (5L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_6 (6L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_7 (7L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_8 (8L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_9 (9L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_10 (10L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_11 (11L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_12 (12L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_13 (13L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_14 (14L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_15 (15L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_COUNT (0xfL<<12)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_OFFSET (0x1ffL<<16)
+
+#define BNX2_COM_COMXQ 0x00105340
+#define BNX2_COM_COMXQ_FTQ_CMD 0x00105378
+#define BNX2_COM_COMXQ_FTQ_CMD_OFFSET (0x3ffL<<0)
+#define BNX2_COM_COMXQ_FTQ_CMD_WR_TOP (1L<<10)
+#define BNX2_COM_COMXQ_FTQ_CMD_WR_TOP_0 (0L<<10)
+#define BNX2_COM_COMXQ_FTQ_CMD_WR_TOP_1 (1L<<10)
+#define BNX2_COM_COMXQ_FTQ_CMD_SFT_RESET (1L<<25)
+#define BNX2_COM_COMXQ_FTQ_CMD_RD_DATA (1L<<26)
+#define BNX2_COM_COMXQ_FTQ_CMD_ADD_INTERVEN (1L<<27)
+#define BNX2_COM_COMXQ_FTQ_CMD_ADD_DATA (1L<<28)
+#define BNX2_COM_COMXQ_FTQ_CMD_INTERVENE_CLR (1L<<29)
+#define BNX2_COM_COMXQ_FTQ_CMD_POP (1L<<30)
+#define BNX2_COM_COMXQ_FTQ_CMD_BUSY (1L<<31)
+
+#define BNX2_COM_COMXQ_FTQ_CTL 0x0010537c
+#define BNX2_COM_COMXQ_FTQ_CTL_INTERVENE (1L<<0)
+#define BNX2_COM_COMXQ_FTQ_CTL_OVERFLOW (1L<<1)
+#define BNX2_COM_COMXQ_FTQ_CTL_FORCE_INTERVENE (1L<<2)
+#define BNX2_COM_COMXQ_FTQ_CTL_MAX_DEPTH (0x3ffL<<12)
+#define BNX2_COM_COMXQ_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
+
+#define BNX2_COM_COMTQ 0x00105380
+#define BNX2_COM_COMTQ_FTQ_CMD 0x001053b8
+#define BNX2_COM_COMTQ_FTQ_CMD_OFFSET (0x3ffL<<0)
+#define BNX2_COM_COMTQ_FTQ_CMD_WR_TOP (1L<<10)
+#define BNX2_COM_COMTQ_FTQ_CMD_WR_TOP_0 (0L<<10)
+#define BNX2_COM_COMTQ_FTQ_CMD_WR_TOP_1 (1L<<10)
+#define BNX2_COM_COMTQ_FTQ_CMD_SFT_RESET (1L<<25)
+#define BNX2_COM_COMTQ_FTQ_CMD_RD_DATA (1L<<26)
+#define BNX2_COM_COMTQ_FTQ_CMD_ADD_INTERVEN (1L<<27)
+#define BNX2_COM_COMTQ_FTQ_CMD_ADD_DATA (1L<<28)
+#define BNX2_COM_COMTQ_FTQ_CMD_INTERVENE_CLR (1L<<29)
+#define BNX2_COM_COMTQ_FTQ_CMD_POP (1L<<30)
+#define BNX2_COM_COMTQ_FTQ_CMD_BUSY (1L<<31)
+
+#define BNX2_COM_COMTQ_FTQ_CTL 0x001053bc
+#define BNX2_COM_COMTQ_FTQ_CTL_INTERVENE (1L<<0)
+#define BNX2_COM_COMTQ_FTQ_CTL_OVERFLOW (1L<<1)
+#define BNX2_COM_COMTQ_FTQ_CTL_FORCE_INTERVENE (1L<<2)
+#define BNX2_COM_COMTQ_FTQ_CTL_MAX_DEPTH (0x3ffL<<12)
+#define BNX2_COM_COMTQ_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
+
+#define BNX2_COM_COMQ 0x001053c0
+#define BNX2_COM_COMQ_FTQ_CMD 0x001053f8
+#define BNX2_COM_COMQ_FTQ_CMD_OFFSET (0x3ffL<<0)
+#define BNX2_COM_COMQ_FTQ_CMD_WR_TOP (1L<<10)
+#define BNX2_COM_COMQ_FTQ_CMD_WR_TOP_0 (0L<<10)
+#define BNX2_COM_COMQ_FTQ_CMD_WR_TOP_1 (1L<<10)
+#define BNX2_COM_COMQ_FTQ_CMD_SFT_RESET (1L<<25)
+#define BNX2_COM_COMQ_FTQ_CMD_RD_DATA (1L<<26)
+#define BNX2_COM_COMQ_FTQ_CMD_ADD_INTERVEN (1L<<27)
+#define BNX2_COM_COMQ_FTQ_CMD_ADD_DATA (1L<<28)
+#define BNX2_COM_COMQ_FTQ_CMD_INTERVENE_CLR (1L<<29)
+#define BNX2_COM_COMQ_FTQ_CMD_POP (1L<<30)
+#define BNX2_COM_COMQ_FTQ_CMD_BUSY (1L<<31)
+
+#define BNX2_COM_COMQ_FTQ_CTL 0x001053fc
+#define BNX2_COM_COMQ_FTQ_CTL_INTERVENE (1L<<0)
+#define BNX2_COM_COMQ_FTQ_CTL_OVERFLOW (1L<<1)
+#define BNX2_COM_COMQ_FTQ_CTL_FORCE_INTERVENE (1L<<2)
+#define BNX2_COM_COMQ_FTQ_CTL_MAX_DEPTH (0x3ffL<<12)
+#define BNX2_COM_COMQ_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
+
+#define BNX2_COM_SCRATCH 0x00120000
+
+#define BNX2_FW_RX_LOW_LATENCY 0x00120058
+#define BNX2_FW_RX_DROP_COUNT 0x00120084
+
+
+/*
+ * cp_reg definition
+ * offset: 0x180000
+ */
+#define BNX2_CP_CKSUM_ERROR_STATUS 0x00180000
+#define BNX2_CP_CKSUM_ERROR_STATUS_CALCULATED (0xffffL<<0)
+#define BNX2_CP_CKSUM_ERROR_STATUS_EXPECTED (0xffffL<<16)
+
+#define BNX2_CP_CPU_MODE 0x00185000
+#define BNX2_CP_CPU_MODE_LOCAL_RST (1L<<0)
+#define BNX2_CP_CPU_MODE_STEP_ENA (1L<<1)
+#define BNX2_CP_CPU_MODE_PAGE_0_DATA_ENA (1L<<2)
+#define BNX2_CP_CPU_MODE_PAGE_0_INST_ENA (1L<<3)
+#define BNX2_CP_CPU_MODE_MSG_BIT1 (1L<<6)
+#define BNX2_CP_CPU_MODE_INTERRUPT_ENA (1L<<7)
+#define BNX2_CP_CPU_MODE_SOFT_HALT (1L<<10)
+#define BNX2_CP_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11)
+#define BNX2_CP_CPU_MODE_BAD_INST_HALT_ENA (1L<<12)
+#define BNX2_CP_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13)
+#define BNX2_CP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15)
+
+#define BNX2_CP_CPU_STATE 0x00185004
+#define BNX2_CP_CPU_STATE_BREAKPOINT (1L<<0)
+#define BNX2_CP_CPU_STATE_BAD_INST_HALTED (1L<<2)
+#define BNX2_CP_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3)
+#define BNX2_CP_CPU_STATE_PAGE_0_INST_HALTED (1L<<4)
+#define BNX2_CP_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5)
+#define BNX2_CP_CPU_STATE_BAD_PC_HALTED (1L<<6)
+#define BNX2_CP_CPU_STATE_ALIGN_HALTED (1L<<7)
+#define BNX2_CP_CPU_STATE_FIO_ABORT_HALTED (1L<<8)
+#define BNX2_CP_CPU_STATE_SOFT_HALTED (1L<<10)
+#define BNX2_CP_CPU_STATE_SPAD_UNDERFLOW (1L<<11)
+#define BNX2_CP_CPU_STATE_INTERRRUPT (1L<<12)
+#define BNX2_CP_CPU_STATE_DATA_ACCESS_STALL (1L<<14)
+#define BNX2_CP_CPU_STATE_INST_FETCH_STALL (1L<<15)
+#define BNX2_CP_CPU_STATE_BLOCKED_READ (1L<<31)
+
+#define BNX2_CP_CPU_EVENT_MASK 0x00185008
+#define BNX2_CP_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0)
+#define BNX2_CP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2)
+#define BNX2_CP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3)
+#define BNX2_CP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4)
+#define BNX2_CP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5)
+#define BNX2_CP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6)
+#define BNX2_CP_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7)
+#define BNX2_CP_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8)
+#define BNX2_CP_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10)
+#define BNX2_CP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11)
+#define BNX2_CP_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12)
+
+#define BNX2_CP_CPU_PROGRAM_COUNTER 0x0018501c
+#define BNX2_CP_CPU_INSTRUCTION 0x00185020
+#define BNX2_CP_CPU_DATA_ACCESS 0x00185024
+#define BNX2_CP_CPU_INTERRUPT_ENABLE 0x00185028
+#define BNX2_CP_CPU_INTERRUPT_VECTOR 0x0018502c
+#define BNX2_CP_CPU_INTERRUPT_SAVED_PC 0x00185030
+#define BNX2_CP_CPU_HW_BREAKPOINT 0x00185034
+#define BNX2_CP_CPU_HW_BREAKPOINT_DISABLE (1L<<0)
+#define BNX2_CP_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2)
+
+#define BNX2_CP_CPU_DEBUG_VECT_PEEK 0x00185038
+#define BNX2_CP_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0)
+#define BNX2_CP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11)
+#define BNX2_CP_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12)
+#define BNX2_CP_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16)
+#define BNX2_CP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27)
+#define BNX2_CP_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28)
+
+#define BNX2_CP_CPU_LAST_BRANCH_ADDR 0x00185048
+#define BNX2_CP_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1)
+#define BNX2_CP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1)
+#define BNX2_CP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1)
+#define BNX2_CP_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2)
+
+#define BNX2_CP_CPU_REG_FILE 0x00185200
+#define BNX2_CP_CPQ_PFE_PFE_CTL 0x001853bc
+#define BNX2_CP_CPQ_PFE_PFE_CTL_INC_USAGE_CNT (1L<<0)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE (0xfL<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_0 (0L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_1 (1L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_2 (2L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_3 (3L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_4 (4L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_5 (5L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_6 (6L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_7 (7L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_8 (8L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_9 (9L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_10 (10L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_11 (11L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_12 (12L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_13 (13L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_14 (14L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_15 (15L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_COUNT (0xfL<<12)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_OFFSET (0x1ffL<<16)
+
+#define BNX2_CP_CPQ 0x001853c0
+#define BNX2_CP_CPQ_FTQ_CMD 0x001853f8
+#define BNX2_CP_CPQ_FTQ_CMD_OFFSET (0x3ffL<<0)
+#define BNX2_CP_CPQ_FTQ_CMD_WR_TOP (1L<<10)
+#define BNX2_CP_CPQ_FTQ_CMD_WR_TOP_0 (0L<<10)
+#define BNX2_CP_CPQ_FTQ_CMD_WR_TOP_1 (1L<<10)
+#define BNX2_CP_CPQ_FTQ_CMD_SFT_RESET (1L<<25)
+#define BNX2_CP_CPQ_FTQ_CMD_RD_DATA (1L<<26)
+#define BNX2_CP_CPQ_FTQ_CMD_ADD_INTERVEN (1L<<27)
+#define BNX2_CP_CPQ_FTQ_CMD_ADD_DATA (1L<<28)
+#define BNX2_CP_CPQ_FTQ_CMD_INTERVENE_CLR (1L<<29)
+#define BNX2_CP_CPQ_FTQ_CMD_POP (1L<<30)
+#define BNX2_CP_CPQ_FTQ_CMD_BUSY (1L<<31)
+
+#define BNX2_CP_CPQ_FTQ_CTL 0x001853fc
+#define BNX2_CP_CPQ_FTQ_CTL_INTERVENE (1L<<0)
+#define BNX2_CP_CPQ_FTQ_CTL_OVERFLOW (1L<<1)
+#define BNX2_CP_CPQ_FTQ_CTL_FORCE_INTERVENE (1L<<2)
+#define BNX2_CP_CPQ_FTQ_CTL_MAX_DEPTH (0x3ffL<<12)
+#define BNX2_CP_CPQ_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
+
+#define BNX2_CP_SCRATCH 0x001a0000
+
+#define BNX2_FW_MAX_ISCSI_CONN 0x001a0080
+
+
+/*
+ * mcp_reg definition
+ * offset: 0x140000
+ */
+#define BNX2_MCP_MCP_CONTROL 0x00140080
+#define BNX2_MCP_MCP_CONTROL_SMBUS_SEL (1L<<30)
+#define BNX2_MCP_MCP_CONTROL_MCP_ISOLATE (1L<<31)
+
+#define BNX2_MCP_MCP_ATTENTION_STATUS 0x00140084
+#define BNX2_MCP_MCP_ATTENTION_STATUS_DRV_DOORBELL (1L<<29)
+#define BNX2_MCP_MCP_ATTENTION_STATUS_WATCHDOG_TIMEOUT (1L<<30)
+#define BNX2_MCP_MCP_ATTENTION_STATUS_CPU_EVENT (1L<<31)
+
+#define BNX2_MCP_MCP_HEARTBEAT_CONTROL 0x00140088
+#define BNX2_MCP_MCP_HEARTBEAT_CONTROL_MCP_HEARTBEAT_ENABLE (1L<<31)
+
+#define BNX2_MCP_MCP_HEARTBEAT_STATUS 0x0014008c
+#define BNX2_MCP_MCP_HEARTBEAT_STATUS_MCP_HEARTBEAT_PERIOD (0x7ffL<<0)
+#define BNX2_MCP_MCP_HEARTBEAT_STATUS_VALID (1L<<31)
+
+#define BNX2_MCP_MCP_HEARTBEAT 0x00140090
+#define BNX2_MCP_MCP_HEARTBEAT_MCP_HEARTBEAT_COUNT (0x3fffffffL<<0)
+#define BNX2_MCP_MCP_HEARTBEAT_MCP_HEARTBEAT_INC (1L<<30)
+#define BNX2_MCP_MCP_HEARTBEAT_MCP_HEARTBEAT_RESET (1L<<31)
+
+#define BNX2_MCP_WATCHDOG_RESET 0x00140094
+#define BNX2_MCP_WATCHDOG_RESET_WATCHDOG_RESET (1L<<31)
+
+#define BNX2_MCP_WATCHDOG_CONTROL 0x00140098
+#define BNX2_MCP_WATCHDOG_CONTROL_WATCHDOG_TIMEOUT (0xfffffffL<<0)
+#define BNX2_MCP_WATCHDOG_CONTROL_WATCHDOG_ATTN (1L<<29)
+#define BNX2_MCP_WATCHDOG_CONTROL_MCP_RST_ENABLE (1L<<30)
+#define BNX2_MCP_WATCHDOG_CONTROL_WATCHDOG_ENABLE (1L<<31)
+
+#define BNX2_MCP_ACCESS_LOCK 0x0014009c
+#define BNX2_MCP_ACCESS_LOCK_LOCK (1L<<31)
+
+#define BNX2_MCP_TOE_ID 0x001400a0
+#define BNX2_MCP_TOE_ID_FUNCTION_ID (1L<<31)
+
+#define BNX2_MCP_MAILBOX_CFG 0x001400a4
+#define BNX2_MCP_MAILBOX_CFG_MAILBOX_OFFSET (0x3fffL<<0)
+#define BNX2_MCP_MAILBOX_CFG_MAILBOX_SIZE (0xfffL<<20)
+
+#define BNX2_MCP_MAILBOX_CFG_OTHER_FUNC 0x001400a8
+#define BNX2_MCP_MAILBOX_CFG_OTHER_FUNC_MAILBOX_OFFSET (0x3fffL<<0)
+#define BNX2_MCP_MAILBOX_CFG_OTHER_FUNC_MAILBOX_SIZE (0xfffL<<20)
+
+#define BNX2_MCP_MCP_DOORBELL 0x001400ac
+#define BNX2_MCP_MCP_DOORBELL_MCP_DOORBELL (1L<<31)
+
+#define BNX2_MCP_DRIVER_DOORBELL 0x001400b0
+#define BNX2_MCP_DRIVER_DOORBELL_DRIVER_DOORBELL (1L<<31)
+
+#define BNX2_MCP_DRIVER_DOORBELL_OTHER_FUNC 0x001400b4
+#define BNX2_MCP_DRIVER_DOORBELL_OTHER_FUNC_DRIVER_DOORBELL (1L<<31)
+
+#define BNX2_MCP_CPU_MODE 0x00145000
+#define BNX2_MCP_CPU_MODE_LOCAL_RST (1L<<0)
+#define BNX2_MCP_CPU_MODE_STEP_ENA (1L<<1)
+#define BNX2_MCP_CPU_MODE_PAGE_0_DATA_ENA (1L<<2)
+#define BNX2_MCP_CPU_MODE_PAGE_0_INST_ENA (1L<<3)
+#define BNX2_MCP_CPU_MODE_MSG_BIT1 (1L<<6)
+#define BNX2_MCP_CPU_MODE_INTERRUPT_ENA (1L<<7)
+#define BNX2_MCP_CPU_MODE_SOFT_HALT (1L<<10)
+#define BNX2_MCP_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11)
+#define BNX2_MCP_CPU_MODE_BAD_INST_HALT_ENA (1L<<12)
+#define BNX2_MCP_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13)
+#define BNX2_MCP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15)
+
+#define BNX2_MCP_CPU_STATE 0x00145004
+#define BNX2_MCP_CPU_STATE_BREAKPOINT (1L<<0)
+#define BNX2_MCP_CPU_STATE_BAD_INST_HALTED (1L<<2)
+#define BNX2_MCP_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3)
+#define BNX2_MCP_CPU_STATE_PAGE_0_INST_HALTED (1L<<4)
+#define BNX2_MCP_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5)
+#define BNX2_MCP_CPU_STATE_BAD_PC_HALTED (1L<<6)
+#define BNX2_MCP_CPU_STATE_ALIGN_HALTED (1L<<7)
+#define BNX2_MCP_CPU_STATE_FIO_ABORT_HALTED (1L<<8)
+#define BNX2_MCP_CPU_STATE_SOFT_HALTED (1L<<10)
+#define BNX2_MCP_CPU_STATE_SPAD_UNDERFLOW (1L<<11)
+#define BNX2_MCP_CPU_STATE_INTERRRUPT (1L<<12)
+#define BNX2_MCP_CPU_STATE_DATA_ACCESS_STALL (1L<<14)
+#define BNX2_MCP_CPU_STATE_INST_FETCH_STALL (1L<<15)
+#define BNX2_MCP_CPU_STATE_BLOCKED_READ (1L<<31)
+
+#define BNX2_MCP_CPU_EVENT_MASK 0x00145008
+#define BNX2_MCP_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0)
+#define BNX2_MCP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2)
+#define BNX2_MCP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3)
+#define BNX2_MCP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4)
+#define BNX2_MCP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5)
+#define BNX2_MCP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6)
+#define BNX2_MCP_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7)
+#define BNX2_MCP_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8)
+#define BNX2_MCP_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10)
+#define BNX2_MCP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11)
+#define BNX2_MCP_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12)
+
+#define BNX2_MCP_CPU_PROGRAM_COUNTER 0x0014501c
+#define BNX2_MCP_CPU_INSTRUCTION 0x00145020
+#define BNX2_MCP_CPU_DATA_ACCESS 0x00145024
+#define BNX2_MCP_CPU_INTERRUPT_ENABLE 0x00145028
+#define BNX2_MCP_CPU_INTERRUPT_VECTOR 0x0014502c
+#define BNX2_MCP_CPU_INTERRUPT_SAVED_PC 0x00145030
+#define BNX2_MCP_CPU_HW_BREAKPOINT 0x00145034
+#define BNX2_MCP_CPU_HW_BREAKPOINT_DISABLE (1L<<0)
+#define BNX2_MCP_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2)
+
+#define BNX2_MCP_CPU_DEBUG_VECT_PEEK 0x00145038
+#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0)
+#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11)
+#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12)
+#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16)
+#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27)
+#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28)
+
+#define BNX2_MCP_CPU_LAST_BRANCH_ADDR 0x00145048
+#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1)
+#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1)
+#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1)
+#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2)
+
+#define BNX2_MCP_CPU_REG_FILE 0x00145200
+#define BNX2_MCP_MCPQ 0x001453c0
+#define BNX2_MCP_MCPQ_FTQ_CMD 0x001453f8
+#define BNX2_MCP_MCPQ_FTQ_CMD_OFFSET (0x3ffL<<0)
+#define BNX2_MCP_MCPQ_FTQ_CMD_WR_TOP (1L<<10)
+#define BNX2_MCP_MCPQ_FTQ_CMD_WR_TOP_0 (0L<<10)
+#define BNX2_MCP_MCPQ_FTQ_CMD_WR_TOP_1 (1L<<10)
+#define BNX2_MCP_MCPQ_FTQ_CMD_SFT_RESET (1L<<25)
+#define BNX2_MCP_MCPQ_FTQ_CMD_RD_DATA (1L<<26)
+#define BNX2_MCP_MCPQ_FTQ_CMD_ADD_INTERVEN (1L<<27)
+#define BNX2_MCP_MCPQ_FTQ_CMD_ADD_DATA (1L<<28)
+#define BNX2_MCP_MCPQ_FTQ_CMD_INTERVENE_CLR (1L<<29)
+#define BNX2_MCP_MCPQ_FTQ_CMD_POP (1L<<30)
+#define BNX2_MCP_MCPQ_FTQ_CMD_BUSY (1L<<31)
+
+#define BNX2_MCP_MCPQ_FTQ_CTL 0x001453fc
+#define BNX2_MCP_MCPQ_FTQ_CTL_INTERVENE (1L<<0)
+#define BNX2_MCP_MCPQ_FTQ_CTL_OVERFLOW (1L<<1)
+#define BNX2_MCP_MCPQ_FTQ_CTL_FORCE_INTERVENE (1L<<2)
+#define BNX2_MCP_MCPQ_FTQ_CTL_MAX_DEPTH (0x3ffL<<12)
+#define BNX2_MCP_MCPQ_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
+
+#define BNX2_MCP_ROM 0x00150000
+#define BNX2_MCP_SCRATCH 0x00160000
+#define BNX2_MCP_STATE_P1 0x0016f9c8
+#define BNX2_MCP_STATE_P0 0x0016fdc8
+#define BNX2_MCP_STATE_P1_5708 0x001699c8
+#define BNX2_MCP_STATE_P0_5708 0x00169dc8
+
+#define BNX2_SHM_HDR_SIGNATURE BNX2_MCP_SCRATCH
+#define BNX2_SHM_HDR_SIGNATURE_SIG_MASK 0xffff0000
+#define BNX2_SHM_HDR_SIGNATURE_SIG 0x53530000
+#define BNX2_SHM_HDR_SIGNATURE_VER_MASK 0x000000ff
+#define BNX2_SHM_HDR_SIGNATURE_VER_ONE 0x00000001
+
+#define BNX2_SHM_HDR_ADDR_0 BNX2_MCP_SCRATCH + 4
+#define BNX2_SHM_HDR_ADDR_1 BNX2_MCP_SCRATCH + 8
+
+
+#define NUM_MC_HASH_REGISTERS 8
+
+
+/* PHY_ID1: bits 31-16; PHY_ID2: bits 15-0. */
+#define PHY_BCM5706_PHY_ID 0x00206160
+
+#define PHY_ID(id) ((id) & 0xfffffff0)
+#define PHY_REV_ID(id) ((id) & 0xf)
+
+/* 5708 Serdes PHY registers */
+
+#define BCM5708S_BMCR_FORCE_2500 0x20
+
+#define BCM5708S_UP1 0xb
+
+#define BCM5708S_UP1_2G5 0x1
+
+#define BCM5708S_BLK_ADDR 0x1f
+
+#define BCM5708S_BLK_ADDR_DIG 0x0000
+#define BCM5708S_BLK_ADDR_DIG3 0x0002
+#define BCM5708S_BLK_ADDR_TX_MISC 0x0005
+
+/* Digital Block */
+#define BCM5708S_1000X_CTL1 0x10
+
+#define BCM5708S_1000X_CTL1_FIBER_MODE 0x0001
+#define BCM5708S_1000X_CTL1_AUTODET_EN 0x0010
+
+#define BCM5708S_1000X_CTL2 0x11
+
+#define BCM5708S_1000X_CTL2_PLLEL_DET_EN 0x0001
+
+#define BCM5708S_1000X_STAT1 0x14
+
+#define BCM5708S_1000X_STAT1_SGMII 0x0001
+#define BCM5708S_1000X_STAT1_LINK 0x0002
+#define BCM5708S_1000X_STAT1_FD 0x0004
+#define BCM5708S_1000X_STAT1_SPEED_MASK 0x0018
+#define BCM5708S_1000X_STAT1_SPEED_10 0x0000
+#define BCM5708S_1000X_STAT1_SPEED_100 0x0008
+#define BCM5708S_1000X_STAT1_SPEED_1G 0x0010
+#define BCM5708S_1000X_STAT1_SPEED_2G5 0x0018
+#define BCM5708S_1000X_STAT1_TX_PAUSE 0x0020
+#define BCM5708S_1000X_STAT1_RX_PAUSE 0x0040
+
+/* Digital3 Block */
+#define BCM5708S_DIG_3_0 0x10
+
+#define BCM5708S_DIG_3_0_USE_IEEE 0x0001
+
+/* Tx/Misc Block */
+#define BCM5708S_TX_ACTL1 0x15
+
+#define BCM5708S_TX_ACTL1_DRIVER_VCM 0x30
+
+#define BCM5708S_TX_ACTL3 0x17
+
+#define MII_BNX2_DSP_RW_PORT 0x15
+#define MII_BNX2_DSP_ADDRESS 0x17
+#define MII_BNX2_DSP_EXPAND_REG 0x0f00
+#define MII_EXPAND_REG1 (MII_BNX2_DSP_EXPAND_REG | 1)
+#define MII_EXPAND_REG1_RUDI_C 0x20
+#define MII_EXPAND_SERDES_CTL (MII_BNX2_DSP_EXPAND_REG | 3)
+
+#define MII_BNX2_MISC_SHADOW 0x1c
+#define MISC_SHDW_AN_DBG 0x6800
+#define MISC_SHDW_AN_DBG_NOSYNC 0x0002
+#define MISC_SHDW_AN_DBG_RUDI_INVALID 0x0100
+#define MISC_SHDW_MODE_CTL 0x7c00
+#define MISC_SHDW_MODE_CTL_SIG_DET 0x0010
+
+#define MII_BNX2_BLK_ADDR 0x1f
+#define MII_BNX2_BLK_ADDR_IEEE0 0x0000
+#define MII_BNX2_BLK_ADDR_GP_STATUS 0x8120
+#define MII_BNX2_GP_TOP_AN_STATUS1 0x1b
+#define MII_BNX2_GP_TOP_AN_SPEED_MSK 0x3f00
+#define MII_BNX2_GP_TOP_AN_SPEED_10 0x0000
+#define MII_BNX2_GP_TOP_AN_SPEED_100 0x0100
+#define MII_BNX2_GP_TOP_AN_SPEED_1G 0x0200
+#define MII_BNX2_GP_TOP_AN_SPEED_2_5G 0x0300
+#define MII_BNX2_GP_TOP_AN_SPEED_1GKV 0x0d00
+#define MII_BNX2_GP_TOP_AN_FD 0x8
+#define MII_BNX2_BLK_ADDR_SERDES_DIG 0x8300
+#define MII_BNX2_SERDES_DIG_1000XCTL1 0x10
+#define MII_BNX2_SD_1000XCTL1_FIBER 0x01
+#define MII_BNX2_SD_1000XCTL1_AUTODET 0x10
+#define MII_BNX2_SERDES_DIG_MISC1 0x18
+#define MII_BNX2_SD_MISC1_FORCE_MSK 0xf
+#define MII_BNX2_SD_MISC1_FORCE_2_5G 0x0
+#define MII_BNX2_SD_MISC1_FORCE 0x10
+#define MII_BNX2_BLK_ADDR_OVER1G 0x8320
+#define MII_BNX2_OVER1G_UP1 0x19
+#define MII_BNX2_BLK_ADDR_BAM_NXTPG 0x8350
+#define MII_BNX2_BAM_NXTPG_CTL 0x10
+#define MII_BNX2_NXTPG_CTL_BAM 0x1
+#define MII_BNX2_NXTPG_CTL_T2 0x2
+#define MII_BNX2_BLK_ADDR_CL73_USERB0 0x8370
+#define MII_BNX2_CL73_BAM_CTL1 0x12
+#define MII_BNX2_CL73_BAM_EN 0x8000
+#define MII_BNX2_CL73_BAM_STA_MGR_EN 0x4000
+#define MII_BNX2_CL73_BAM_NP_AFT_BP_EN 0x2000
+#define MII_BNX2_BLK_ADDR_AER 0xffd0
+#define MII_BNX2_AER_AER 0x1e
+#define MII_BNX2_AER_AER_AN_MMD 0x3800
+#define MII_BNX2_BLK_ADDR_COMBO_IEEEB0 0xffe0
+
+#define MIN_ETHERNET_PACKET_SIZE 60
+#define MAX_ETHERNET_PACKET_SIZE 1514
+#define MAX_ETHERNET_JUMBO_PACKET_SIZE 9014
+
+#define BNX2_RX_COPY_THRESH 128
+
+#define BNX2_MISC_ENABLE_DEFAULT 0x17ffffff
+
+#define BNX2_START_UNICAST_ADDRESS_INDEX 4
+#define BNX2_END_UNICAST_ADDRESS_INDEX 7
+#define BNX2_MAX_UNICAST_ADDRESSES (BNX2_END_UNICAST_ADDRESS_INDEX - \
+ BNX2_START_UNICAST_ADDRESS_INDEX + 1)
+
+#define DMA_READ_CHANS 5
+#define DMA_WRITE_CHANS 3
+
+/* Use CPU native page size up to 16K for the ring sizes. */
+#if (PAGE_SHIFT > 14)
+#define BCM_PAGE_BITS 14
+#else
+#define BCM_PAGE_BITS PAGE_SHIFT
+#endif
+#define BCM_PAGE_SIZE (1 << BCM_PAGE_BITS)
+
+#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct tx_bd))
+#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
+
+#define MAX_RX_RINGS 8
+#define MAX_RX_PG_RINGS 32
+#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct rx_bd))
+#define MAX_RX_DESC_CNT (RX_DESC_CNT - 1)
+#define MAX_TOTAL_RX_DESC_CNT (MAX_RX_DESC_CNT * MAX_RX_RINGS)
+#define MAX_TOTAL_RX_PG_DESC_CNT (MAX_RX_DESC_CNT * MAX_RX_PG_RINGS)
+
+#define NEXT_TX_BD(x) (((x) & (MAX_TX_DESC_CNT - 1)) == \
+ (MAX_TX_DESC_CNT - 1)) ? \
+ (x) + 2 : (x) + 1
+
+#define TX_RING_IDX(x) ((x) & MAX_TX_DESC_CNT)
+
+#define NEXT_RX_BD(x) (((x) & (MAX_RX_DESC_CNT - 1)) == \
+ (MAX_RX_DESC_CNT - 1)) ? \
+ (x) + 2 : (x) + 1
+
+#define RX_RING_IDX(x) ((x) & bp->rx_max_ring_idx)
+#define RX_PG_RING_IDX(x) ((x) & bp->rx_max_pg_ring_idx)
+
+#define RX_RING(x) (((x) & ~MAX_RX_DESC_CNT) >> (BCM_PAGE_BITS - 4))
+#define RX_IDX(x) ((x) & MAX_RX_DESC_CNT)
+
+/* Context size. */
+#define CTX_SHIFT 7
+#define CTX_SIZE (1 << CTX_SHIFT)
+#define CTX_MASK (CTX_SIZE - 1)
+#define GET_CID_ADDR(_cid) ((_cid) << CTX_SHIFT)
+#define GET_CID(_cid_addr) ((_cid_addr) >> CTX_SHIFT)
+
+#define PHY_CTX_SHIFT 6
+#define PHY_CTX_SIZE (1 << PHY_CTX_SHIFT)
+#define PHY_CTX_MASK (PHY_CTX_SIZE - 1)
+#define GET_PCID_ADDR(_pcid) ((_pcid) << PHY_CTX_SHIFT)
+#define GET_PCID(_pcid_addr) ((_pcid_addr) >> PHY_CTX_SHIFT)
+
+#define MB_KERNEL_CTX_SHIFT 8
+#define MB_KERNEL_CTX_SIZE (1 << MB_KERNEL_CTX_SHIFT)
+#define MB_KERNEL_CTX_MASK (MB_KERNEL_CTX_SIZE - 1)
+#define MB_GET_CID_ADDR(_cid) (0x10000 + ((_cid) << MB_KERNEL_CTX_SHIFT))
+
+#define MAX_CID_CNT 0x4000
+#define MAX_CID_ADDR (GET_CID_ADDR(MAX_CID_CNT))
+#define INVALID_CID_ADDR 0xffffffff
+
+#define TX_CID 16
+#define TX_TSS_CID 32
+#define RX_CID 0
+#define RX_RSS_CID 4
+#define RX_MAX_RSS_RINGS 7
+#define RX_MAX_RINGS (RX_MAX_RSS_RINGS + 1)
+#define TX_MAX_TSS_RINGS 7
+#define TX_MAX_RINGS (TX_MAX_TSS_RINGS + 1)
+
+#define MB_TX_CID_ADDR MB_GET_CID_ADDR(TX_CID)
+#define MB_RX_CID_ADDR MB_GET_CID_ADDR(RX_CID)
+
+struct sw_bd {
+ struct sk_buff *skb;
+ struct l2_fhdr *desc;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct sw_pg {
+ struct page *page;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct sw_tx_bd {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ unsigned short is_gso;
+ unsigned short nr_frags;
+};
+
+#define SW_RXBD_RING_SIZE (sizeof(struct sw_bd) * RX_DESC_CNT)
+#define SW_RXPG_RING_SIZE (sizeof(struct sw_pg) * RX_DESC_CNT)
+#define RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT)
+#define SW_TXBD_RING_SIZE (sizeof(struct sw_tx_bd) * TX_DESC_CNT)
+#define TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
+
+/* Buffered flash (Atmel: AT45DB011B) specific information */
+#define SEEPROM_PAGE_BITS 2
+#define SEEPROM_PHY_PAGE_SIZE (1 << SEEPROM_PAGE_BITS)
+#define SEEPROM_BYTE_ADDR_MASK (SEEPROM_PHY_PAGE_SIZE-1)
+#define SEEPROM_PAGE_SIZE 4
+#define SEEPROM_TOTAL_SIZE 65536
+
+#define BUFFERED_FLASH_PAGE_BITS 9
+#define BUFFERED_FLASH_PHY_PAGE_SIZE (1 << BUFFERED_FLASH_PAGE_BITS)
+#define BUFFERED_FLASH_BYTE_ADDR_MASK (BUFFERED_FLASH_PHY_PAGE_SIZE-1)
+#define BUFFERED_FLASH_PAGE_SIZE 264
+#define BUFFERED_FLASH_TOTAL_SIZE 0x21000
+
+#define SAIFUN_FLASH_PAGE_BITS 8
+#define SAIFUN_FLASH_PHY_PAGE_SIZE (1 << SAIFUN_FLASH_PAGE_BITS)
+#define SAIFUN_FLASH_BYTE_ADDR_MASK (SAIFUN_FLASH_PHY_PAGE_SIZE-1)
+#define SAIFUN_FLASH_PAGE_SIZE 256
+#define SAIFUN_FLASH_BASE_TOTAL_SIZE 65536
+
+#define ST_MICRO_FLASH_PAGE_BITS 8
+#define ST_MICRO_FLASH_PHY_PAGE_SIZE (1 << ST_MICRO_FLASH_PAGE_BITS)
+#define ST_MICRO_FLASH_BYTE_ADDR_MASK (ST_MICRO_FLASH_PHY_PAGE_SIZE-1)
+#define ST_MICRO_FLASH_PAGE_SIZE 256
+#define ST_MICRO_FLASH_BASE_TOTAL_SIZE 65536
+
+#define BCM5709_FLASH_PAGE_BITS 8
+#define BCM5709_FLASH_PHY_PAGE_SIZE (1 << BCM5709_FLASH_PAGE_BITS)
+#define BCM5709_FLASH_BYTE_ADDR_MASK (BCM5709_FLASH_PHY_PAGE_SIZE-1)
+#define BCM5709_FLASH_PAGE_SIZE 256
+
+#define NVRAM_TIMEOUT_COUNT 30000
+
+
+#define FLASH_STRAP_MASK (BNX2_NVM_CFG1_FLASH_MODE | \
+ BNX2_NVM_CFG1_BUFFER_MODE | \
+ BNX2_NVM_CFG1_PROTECT_MODE | \
+ BNX2_NVM_CFG1_FLASH_SIZE)
+
+#define FLASH_BACKUP_STRAP_MASK (0xf << 26)
+
+struct flash_spec {
+ u32 strapping;
+ u32 config1;
+ u32 config2;
+ u32 config3;
+ u32 write1;
+ u32 flags;
+#define BNX2_NV_BUFFERED 0x00000001
+#define BNX2_NV_TRANSLATE 0x00000002
+#define BNX2_NV_WREN 0x00000004
+ u32 page_bits;
+ u32 page_size;
+ u32 addr_mask;
+ u32 total_size;
+ u8 *name;
+};
+
+#define BNX2_MAX_MSIX_HW_VEC 9
+#define BNX2_MAX_MSIX_VEC 9
+#ifdef BCM_CNIC
+#define BNX2_MIN_MSIX_VEC 2
+#else
+#define BNX2_MIN_MSIX_VEC 1
+#endif
+
+
+struct bnx2_irq {
+ irq_handler_t handler;
+ unsigned int vector;
+ u8 requested;
+ char name[IFNAMSIZ + 2];
+};
+
+struct bnx2_tx_ring_info {
+ u32 tx_prod_bseq;
+ u16 tx_prod;
+ u32 tx_bidx_addr;
+ u32 tx_bseq_addr;
+
+ struct tx_bd *tx_desc_ring;
+ struct sw_tx_bd *tx_buf_ring;
+
+ u16 tx_cons;
+ u16 hw_tx_cons;
+
+ dma_addr_t tx_desc_mapping;
+};
+
+struct bnx2_rx_ring_info {
+ u32 rx_prod_bseq;
+ u16 rx_prod;
+ u16 rx_cons;
+
+ u32 rx_bidx_addr;
+ u32 rx_bseq_addr;
+ u32 rx_pg_bidx_addr;
+
+ u16 rx_pg_prod;
+ u16 rx_pg_cons;
+
+ struct sw_bd *rx_buf_ring;
+ struct rx_bd *rx_desc_ring[MAX_RX_RINGS];
+ struct sw_pg *rx_pg_ring;
+ struct rx_bd *rx_pg_desc_ring[MAX_RX_PG_RINGS];
+
+ dma_addr_t rx_desc_mapping[MAX_RX_RINGS];
+ dma_addr_t rx_pg_desc_mapping[MAX_RX_PG_RINGS];
+};
+
+struct bnx2_napi {
+ struct napi_struct napi ____cacheline_aligned;
+ struct bnx2 *bp;
+ union {
+ struct status_block *msi;
+ struct status_block_msix *msix;
+ } status_blk;
+ u16 *hw_tx_cons_ptr;
+ u16 *hw_rx_cons_ptr;
+ u32 last_status_idx;
+ u32 int_num;
+
+#ifdef BCM_CNIC
+ u32 cnic_tag;
+ int cnic_present;
+#endif
+
+ struct bnx2_rx_ring_info rx_ring;
+ struct bnx2_tx_ring_info tx_ring;
+};
+
+struct bnx2 {
+ /* Fields used in the tx and intr/napi performance paths are grouped */
+ /* together in the beginning of the structure. */
+ void __iomem *regview;
+
+ struct net_device *dev;
+ struct pci_dev *pdev;
+
+ atomic_t intr_sem;
+
+ u32 flags;
+#define BNX2_FLAG_PCIX 0x00000001
+#define BNX2_FLAG_PCI_32BIT 0x00000002
+#define BNX2_FLAG_MSIX_CAP 0x00000004
+#define BNX2_FLAG_NO_WOL 0x00000008
+#define BNX2_FLAG_USING_MSI 0x00000020
+#define BNX2_FLAG_ASF_ENABLE 0x00000040
+#define BNX2_FLAG_MSI_CAP 0x00000080
+#define BNX2_FLAG_ONE_SHOT_MSI 0x00000100
+#define BNX2_FLAG_PCIE 0x00000200
+#define BNX2_FLAG_USING_MSIX 0x00000400
+#define BNX2_FLAG_USING_MSI_OR_MSIX (BNX2_FLAG_USING_MSI | \
+ BNX2_FLAG_USING_MSIX)
+#define BNX2_FLAG_JUMBO_BROKEN 0x00000800
+#define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000
+#define BNX2_FLAG_BROKEN_STATS 0x00002000
+#define BNX2_FLAG_AER_ENABLED 0x00004000
+
+ struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC];
+
+ u32 rx_buf_use_size; /* useable size */
+ u32 rx_buf_size; /* with alignment */
+ u32 rx_copy_thresh;
+ u32 rx_jumbo_thresh;
+ u32 rx_max_ring_idx;
+ u32 rx_max_pg_ring_idx;
+
+ /* TX constants */
+ int tx_ring_size;
+ u32 tx_wake_thresh;
+
+#ifdef BCM_CNIC
+ struct cnic_ops __rcu *cnic_ops;
+ void *cnic_data;
+#endif
+
+ /* End of fields used in the performance code paths. */
+
+ unsigned int current_interval;
+#define BNX2_TIMER_INTERVAL HZ
+#define BNX2_SERDES_AN_TIMEOUT (HZ / 3)
+#define BNX2_SERDES_FORCED_TIMEOUT (HZ / 10)
+
+ struct timer_list timer;
+ struct work_struct reset_task;
+
+ /* Used to synchronize phy accesses. */
+ spinlock_t phy_lock;
+ spinlock_t indirect_lock;
+
+ u32 phy_flags;
+#define BNX2_PHY_FLAG_SERDES 0x00000001
+#define BNX2_PHY_FLAG_CRC_FIX 0x00000002
+#define BNX2_PHY_FLAG_PARALLEL_DETECT 0x00000004
+#define BNX2_PHY_FLAG_2_5G_CAPABLE 0x00000008
+#define BNX2_PHY_FLAG_INT_MODE_MASK 0x00000300
+#define BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING 0x00000100
+#define BNX2_PHY_FLAG_INT_MODE_LINK_READY 0x00000200
+#define BNX2_PHY_FLAG_DIS_EARLY_DAC 0x00000400
+#define BNX2_PHY_FLAG_REMOTE_PHY_CAP 0x00000800
+#define BNX2_PHY_FLAG_FORCED_DOWN 0x00001000
+#define BNX2_PHY_FLAG_NO_PARALLEL 0x00002000
+
+ u32 mii_bmcr;
+ u32 mii_bmsr;
+ u32 mii_bmsr1;
+ u32 mii_adv;
+ u32 mii_lpa;
+ u32 mii_up1;
+
+ u32 chip_id;
+ /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
+#define CHIP_NUM(bp) (((bp)->chip_id) & 0xffff0000)
+#define CHIP_NUM_5706 0x57060000
+#define CHIP_NUM_5708 0x57080000
+#define CHIP_NUM_5709 0x57090000
+
+#define CHIP_REV(bp) (((bp)->chip_id) & 0x0000f000)
+#define CHIP_REV_Ax 0x00000000
+#define CHIP_REV_Bx 0x00001000
+#define CHIP_REV_Cx 0x00002000
+
+#define CHIP_METAL(bp) (((bp)->chip_id) & 0x00000ff0)
+#define CHIP_BONDING(bp) (((bp)->chip_id) & 0x0000000f)
+
+#define CHIP_ID(bp) (((bp)->chip_id) & 0xfffffff0)
+#define CHIP_ID_5706_A0 0x57060000
+#define CHIP_ID_5706_A1 0x57060010
+#define CHIP_ID_5706_A2 0x57060020
+#define CHIP_ID_5708_A0 0x57080000
+#define CHIP_ID_5708_B0 0x57081000
+#define CHIP_ID_5708_B1 0x57081010
+#define CHIP_ID_5709_A0 0x57090000
+#define CHIP_ID_5709_A1 0x57090010
+
+#define CHIP_BOND_ID(bp) (((bp)->chip_id) & 0xf)
+
+/* A serdes chip will have the first bit of the bond id set. */
+#define CHIP_BOND_ID_SERDES_BIT 0x01
+
+ u32 phy_addr;
+ u32 phy_id;
+
+ u16 bus_speed_mhz;
+ u8 wol;
+
+ u8 pad;
+
+ u16 fw_wr_seq;
+ u16 fw_drv_pulse_wr_seq;
+
+ int rx_max_ring;
+ int rx_ring_size;
+
+ int rx_max_pg_ring;
+ int rx_pg_ring_size;
+
+ u16 tx_quick_cons_trip;
+ u16 tx_quick_cons_trip_int;
+ u16 rx_quick_cons_trip;
+ u16 rx_quick_cons_trip_int;
+ u16 comp_prod_trip;
+ u16 comp_prod_trip_int;
+ u16 tx_ticks;
+ u16 tx_ticks_int;
+ u16 com_ticks;
+ u16 com_ticks_int;
+ u16 cmd_ticks;
+ u16 cmd_ticks_int;
+ u16 rx_ticks;
+ u16 rx_ticks_int;
+
+ u32 stats_ticks;
+
+ dma_addr_t status_blk_mapping;
+
+ struct statistics_block *stats_blk;
+ struct statistics_block *temp_stats_blk;
+ dma_addr_t stats_blk_mapping;
+
+ int ctx_pages;
+ void *ctx_blk[4];
+ dma_addr_t ctx_blk_mapping[4];
+
+ u32 hc_cmd;
+ u32 rx_mode;
+
+ u16 req_line_speed;
+ u8 req_duplex;
+
+ u8 phy_port;
+ u8 link_up;
+
+ u16 line_speed;
+ u8 duplex;
+ u8 flow_ctrl; /* actual flow ctrl settings */
+ /* may be different from */
+ /* req_flow_ctrl if autoneg */
+ u32 advertising;
+
+ u8 req_flow_ctrl; /* flow ctrl advertisement */
+ /* settings or forced */
+ /* settings */
+ u8 autoneg;
+#define AUTONEG_SPEED 1
+#define AUTONEG_FLOW_CTRL 2
+
+ u8 loopback;
+#define MAC_LOOPBACK 1
+#define PHY_LOOPBACK 2
+
+ u8 serdes_an_pending;
+
+ u8 mac_addr[8];
+
+ u32 shmem_base;
+
+ char fw_version[32];
+
+ int pm_cap;
+ int pcix_cap;
+
+ const struct flash_spec *flash_info;
+ u32 flash_size;
+
+ int status_stats_size;
+
+ struct bnx2_irq irq_tbl[BNX2_MAX_MSIX_VEC];
+ int irq_nvecs;
+
+ u8 num_tx_rings;
+ u8 num_rx_rings;
+
+ u32 leds_save;
+ u32 idle_chk_status_idx;
+
+#ifdef BCM_CNIC
+ struct mutex cnic_lock;
+ struct cnic_eth_dev cnic_eth_dev;
+#endif
+
+ const struct firmware *mips_firmware;
+ const struct firmware *rv2p_firmware;
+};
+
+#define REG_RD(bp, offset) \
+ readl(bp->regview + offset)
+
+#define REG_WR(bp, offset, val) \
+ writel(val, bp->regview + offset)
+
+#define REG_WR16(bp, offset, val) \
+ writew(val, bp->regview + offset)
+
+struct cpu_reg {
+ u32 mode;
+ u32 mode_value_halt;
+ u32 mode_value_sstep;
+
+ u32 state;
+ u32 state_value_clear;
+
+ u32 gpr0;
+ u32 evmask;
+ u32 pc;
+ u32 inst;
+ u32 bp;
+
+ u32 spad_base;
+
+ u32 mips_view_base;
+};
+
+struct bnx2_fw_file_section {
+ __be32 addr;
+ __be32 len;
+ __be32 offset;
+};
+
+struct bnx2_mips_fw_file_entry {
+ __be32 start_addr;
+ struct bnx2_fw_file_section text;
+ struct bnx2_fw_file_section data;
+ struct bnx2_fw_file_section rodata;
+};
+
+struct bnx2_rv2p_fw_file_entry {
+ struct bnx2_fw_file_section rv2p;
+ __be32 fixup[8];
+};
+
+struct bnx2_mips_fw_file {
+ struct bnx2_mips_fw_file_entry com;
+ struct bnx2_mips_fw_file_entry cp;
+ struct bnx2_mips_fw_file_entry rxp;
+ struct bnx2_mips_fw_file_entry tpat;
+ struct bnx2_mips_fw_file_entry txp;
+};
+
+struct bnx2_rv2p_fw_file {
+ struct bnx2_rv2p_fw_file_entry proc1;
+ struct bnx2_rv2p_fw_file_entry proc2;
+};
+
+#define RV2P_P1_FIXUP_PAGE_SIZE_IDX 0
+#define RV2P_BD_PAGE_SIZE_MSK 0xffff
+#define RV2P_BD_PAGE_SIZE ((BCM_PAGE_SIZE / 16) - 1)
+
+#define RV2P_PROC1 0
+#define RV2P_PROC2 1
+
+
+/* This value (in milliseconds) determines the frequency of the driver
+ * issuing the PULSE message code. The firmware monitors this periodic
+ * pulse to determine when to switch to an OS-absent mode. */
+#define BNX2_DRV_PULSE_PERIOD_MS 250
+
+/* This value (in milliseconds) determines how long the driver should
+ * wait for an acknowledgement from the firmware before timing out. Once
+ * the firmware has timed out, the driver will assume there is no firmware
+ * running and there won't be any firmware-driver synchronization during a
+ * driver reset. */
+#define BNX2_FW_ACK_TIME_OUT_MS 1000
+
+
+#define BNX2_DRV_RESET_SIGNATURE 0x00000000
+#define BNX2_DRV_RESET_SIGNATURE_MAGIC 0x4841564b /* HAVK */
+//#define DRV_RESET_SIGNATURE_MAGIC 0x47495352 /* RSIG */
+
+#define BNX2_DRV_MB 0x00000004
+#define BNX2_DRV_MSG_CODE 0xff000000
+#define BNX2_DRV_MSG_CODE_RESET 0x01000000
+#define BNX2_DRV_MSG_CODE_UNLOAD 0x02000000
+#define BNX2_DRV_MSG_CODE_SHUTDOWN 0x03000000
+#define BNX2_DRV_MSG_CODE_SUSPEND_WOL 0x04000000
+#define BNX2_DRV_MSG_CODE_FW_TIMEOUT 0x05000000
+#define BNX2_DRV_MSG_CODE_PULSE 0x06000000
+#define BNX2_DRV_MSG_CODE_DIAG 0x07000000
+#define BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL 0x09000000
+#define BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN 0x0b000000
+#define BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE 0x0d000000
+#define BNX2_DRV_MSG_CODE_CMD_SET_LINK 0x10000000
+
+#define BNX2_DRV_MSG_DATA 0x00ff0000
+#define BNX2_DRV_MSG_DATA_WAIT0 0x00010000
+#define BNX2_DRV_MSG_DATA_WAIT1 0x00020000
+#define BNX2_DRV_MSG_DATA_WAIT2 0x00030000
+#define BNX2_DRV_MSG_DATA_WAIT3 0x00040000
+
+#define BNX2_DRV_MSG_SEQ 0x0000ffff
+
+#define BNX2_FW_MB 0x00000008
+#define BNX2_FW_MSG_ACK 0x0000ffff
+#define BNX2_FW_MSG_STATUS_MASK 0x00ff0000
+#define BNX2_FW_MSG_STATUS_OK 0x00000000
+#define BNX2_FW_MSG_STATUS_FAILURE 0x00ff0000
+
+#define BNX2_LINK_STATUS 0x0000000c
+#define BNX2_LINK_STATUS_INIT_VALUE 0xffffffff
+#define BNX2_LINK_STATUS_LINK_UP 0x1
+#define BNX2_LINK_STATUS_LINK_DOWN 0x0
+#define BNX2_LINK_STATUS_SPEED_MASK 0x1e
+#define BNX2_LINK_STATUS_AN_INCOMPLETE (0<<1)
+#define BNX2_LINK_STATUS_10HALF (1<<1)
+#define BNX2_LINK_STATUS_10FULL (2<<1)
+#define BNX2_LINK_STATUS_100HALF (3<<1)
+#define BNX2_LINK_STATUS_100BASE_T4 (4<<1)
+#define BNX2_LINK_STATUS_100FULL (5<<1)
+#define BNX2_LINK_STATUS_1000HALF (6<<1)
+#define BNX2_LINK_STATUS_1000FULL (7<<1)
+#define BNX2_LINK_STATUS_2500HALF (8<<1)
+#define BNX2_LINK_STATUS_2500FULL (9<<1)
+#define BNX2_LINK_STATUS_AN_ENABLED (1<<5)
+#define BNX2_LINK_STATUS_AN_COMPLETE (1<<6)
+#define BNX2_LINK_STATUS_PARALLEL_DET (1<<7)
+#define BNX2_LINK_STATUS_RESERVED (1<<8)
+#define BNX2_LINK_STATUS_PARTNER_AD_1000FULL (1<<9)
+#define BNX2_LINK_STATUS_PARTNER_AD_1000HALF (1<<10)
+#define BNX2_LINK_STATUS_PARTNER_AD_100BT4 (1<<11)
+#define BNX2_LINK_STATUS_PARTNER_AD_100FULL (1<<12)
+#define BNX2_LINK_STATUS_PARTNER_AD_100HALF (1<<13)
+#define BNX2_LINK_STATUS_PARTNER_AD_10FULL (1<<14)
+#define BNX2_LINK_STATUS_PARTNER_AD_10HALF (1<<15)
+#define BNX2_LINK_STATUS_TX_FC_ENABLED (1<<16)
+#define BNX2_LINK_STATUS_RX_FC_ENABLED (1<<17)
+#define BNX2_LINK_STATUS_PARTNER_SYM_PAUSE_CAP (1<<18)
+#define BNX2_LINK_STATUS_PARTNER_ASYM_PAUSE_CAP (1<<19)
+#define BNX2_LINK_STATUS_SERDES_LINK (1<<20)
+#define BNX2_LINK_STATUS_PARTNER_AD_2500FULL (1<<21)
+#define BNX2_LINK_STATUS_PARTNER_AD_2500HALF (1<<22)
+#define BNX2_LINK_STATUS_HEART_BEAT_EXPIRED (1<<31)
+
+#define BNX2_DRV_PULSE_MB 0x00000010
+#define BNX2_DRV_PULSE_SEQ_MASK 0x00007fff
+
+/* Indicate to the firmware not to go into the
+ * OS absent when it is not getting driver pulse.
+ * This is used for debugging. */
+#define BNX2_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE 0x00080000
+
+#define BNX2_DRV_MB_ARG0 0x00000014
+#define BNX2_NETLINK_SET_LINK_SPEED_10HALF (1<<0)
+#define BNX2_NETLINK_SET_LINK_SPEED_10FULL (1<<1)
+#define BNX2_NETLINK_SET_LINK_SPEED_10 \
+ (BNX2_NETLINK_SET_LINK_SPEED_10HALF | \
+ BNX2_NETLINK_SET_LINK_SPEED_10FULL)
+#define BNX2_NETLINK_SET_LINK_SPEED_100HALF (1<<2)
+#define BNX2_NETLINK_SET_LINK_SPEED_100FULL (1<<3)
+#define BNX2_NETLINK_SET_LINK_SPEED_100 \
+ (BNX2_NETLINK_SET_LINK_SPEED_100HALF | \
+ BNX2_NETLINK_SET_LINK_SPEED_100FULL)
+#define BNX2_NETLINK_SET_LINK_SPEED_1GHALF (1<<4)
+#define BNX2_NETLINK_SET_LINK_SPEED_1GFULL (1<<5)
+#define BNX2_NETLINK_SET_LINK_SPEED_2G5HALF (1<<6)
+#define BNX2_NETLINK_SET_LINK_SPEED_2G5FULL (1<<7)
+#define BNX2_NETLINK_SET_LINK_SPEED_10GHALF (1<<8)
+#define BNX2_NETLINK_SET_LINK_SPEED_10GFULL (1<<9)
+#define BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG (1<<10)
+#define BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE (1<<11)
+#define BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE (1<<12)
+#define BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE (1<<13)
+#define BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED (1<<14)
+#define BNX2_NETLINK_SET_LINK_PHY_RESET (1<<15)
+
+#define BNX2_DEV_INFO_SIGNATURE 0x00000020
+#define BNX2_DEV_INFO_SIGNATURE_MAGIC 0x44564900
+#define BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK 0xffffff00
+#define BNX2_DEV_INFO_FEATURE_CFG_VALID 0x01
+#define BNX2_DEV_INFO_SECONDARY_PORT 0x80
+#define BNX2_DEV_INFO_DRV_ALWAYS_ALIVE 0x40
+
+#define BNX2_SHARED_HW_CFG_PART_NUM 0x00000024
+
+#define BNX2_SHARED_HW_CFG_POWER_DISSIPATED 0x00000034
+#define BNX2_SHARED_HW_CFG_POWER_STATE_D3_MASK 0xff000000
+#define BNX2_SHARED_HW_CFG_POWER_STATE_D2_MASK 0xff0000
+#define BNX2_SHARED_HW_CFG_POWER_STATE_D1_MASK 0xff00
+#define BNX2_SHARED_HW_CFG_POWER_STATE_D0_MASK 0xff
+
+#define BNX2_SHARED_HW_CFG POWER_CONSUMED 0x00000038
+#define BNX2_SHARED_HW_CFG_CONFIG 0x0000003c
+#define BNX2_SHARED_HW_CFG_DESIGN_NIC 0
+#define BNX2_SHARED_HW_CFG_DESIGN_LOM 0x1
+#define BNX2_SHARED_HW_CFG_PHY_COPPER 0
+#define BNX2_SHARED_HW_CFG_PHY_FIBER 0x2
+#define BNX2_SHARED_HW_CFG_PHY_2_5G 0x20
+#define BNX2_SHARED_HW_CFG_PHY_BACKPLANE 0x40
+#define BNX2_SHARED_HW_CFG_LED_MODE_SHIFT_BITS 8
+#define BNX2_SHARED_HW_CFG_LED_MODE_MASK 0x300
+#define BNX2_SHARED_HW_CFG_LED_MODE_MAC 0
+#define BNX2_SHARED_HW_CFG_LED_MODE_GPHY1 0x100
+#define BNX2_SHARED_HW_CFG_LED_MODE_GPHY2 0x200
+#define BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX 0x8000
+
+#define BNX2_SHARED_HW_CFG_CONFIG2 0x00000040
+#define BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK 0x00fff000
+
+#define BNX2_DEV_INFO_BC_REV 0x0000004c
+
+#define BNX2_PORT_HW_CFG_MAC_UPPER 0x00000050
+#define BNX2_PORT_HW_CFG_UPPERMAC_MASK 0xffff
+
+#define BNX2_PORT_HW_CFG_MAC_LOWER 0x00000054
+#define BNX2_PORT_HW_CFG_CONFIG 0x00000058
+#define BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK 0x0000ffff
+#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK 0x001f0000
+#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_AN 0x00000000
+#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G 0x00030000
+#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_2_5G 0x00040000
+
+#define BNX2_PORT_HW_CFG_IMD_MAC_A_UPPER 0x00000068
+#define BNX2_PORT_HW_CFG_IMD_MAC_A_LOWER 0x0000006c
+#define BNX2_PORT_HW_CFG_IMD_MAC_B_UPPER 0x00000070
+#define BNX2_PORT_HW_CFG_IMD_MAC_B_LOWER 0x00000074
+#define BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER 0x00000078
+#define BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER 0x0000007c
+
+#define BNX2_DEV_INFO_PER_PORT_HW_CONFIG2 0x000000b4
+
+#define BNX2_DEV_INFO_FORMAT_REV 0x000000c4
+#define BNX2_DEV_INFO_FORMAT_REV_MASK 0xff000000
+#define BNX2_DEV_INFO_FORMAT_REV_ID ('A' << 24)
+
+#define BNX2_SHARED_FEATURE 0x000000c8
+#define BNX2_SHARED_FEATURE_MASK 0xffffffff
+
+#define BNX2_PORT_FEATURE 0x000000d8
+#define BNX2_PORT2_FEATURE 0x00000014c
+#define BNX2_PORT_FEATURE_WOL_ENABLED 0x01000000
+#define BNX2_PORT_FEATURE_MBA_ENABLED 0x02000000
+#define BNX2_PORT_FEATURE_ASF_ENABLED 0x04000000
+#define BNX2_PORT_FEATURE_IMD_ENABLED 0x08000000
+#define BNX2_PORT_FEATURE_BAR1_SIZE_MASK 0xf
+#define BNX2_PORT_FEATURE_BAR1_SIZE_DISABLED 0x0
+#define BNX2_PORT_FEATURE_BAR1_SIZE_64K 0x1
+#define BNX2_PORT_FEATURE_BAR1_SIZE_128K 0x2
+#define BNX2_PORT_FEATURE_BAR1_SIZE_256K 0x3
+#define BNX2_PORT_FEATURE_BAR1_SIZE_512K 0x4
+#define BNX2_PORT_FEATURE_BAR1_SIZE_1M 0x5
+#define BNX2_PORT_FEATURE_BAR1_SIZE_2M 0x6
+#define BNX2_PORT_FEATURE_BAR1_SIZE_4M 0x7
+#define BNX2_PORT_FEATURE_BAR1_SIZE_8M 0x8
+#define BNX2_PORT_FEATURE_BAR1_SIZE_16M 0x9
+#define BNX2_PORT_FEATURE_BAR1_SIZE_32M 0xa
+#define BNX2_PORT_FEATURE_BAR1_SIZE_64M 0xb
+#define BNX2_PORT_FEATURE_BAR1_SIZE_128M 0xc
+#define BNX2_PORT_FEATURE_BAR1_SIZE_256M 0xd
+#define BNX2_PORT_FEATURE_BAR1_SIZE_512M 0xe
+#define BNX2_PORT_FEATURE_BAR1_SIZE_1G 0xf
+
+#define BNX2_PORT_FEATURE_WOL 0xdc
+#define BNX2_PORT2_FEATURE_WOL 0x150
+#define BNX2_PORT_FEATURE_WOL_DEFAULT_SHIFT_BITS 4
+#define BNX2_PORT_FEATURE_WOL_DEFAULT_MASK 0x30
+#define BNX2_PORT_FEATURE_WOL_DEFAULT_DISABLE 0
+#define BNX2_PORT_FEATURE_WOL_DEFAULT_MAGIC 0x10
+#define BNX2_PORT_FEATURE_WOL_DEFAULT_ACPI 0x20
+#define BNX2_PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI 0x30
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_MASK 0xf
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_AUTONEG 0
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_10HALF 1
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_10FULL 2
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_100HALF 3
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_100FULL 4
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_1000HALF 5
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_1000FULL 6
+#define BNX2_PORT_FEATURE_WOL_AUTONEG_ADVERTISE_1000 0x40
+#define BNX2_PORT_FEATURE_WOL_RESERVED_PAUSE_CAP 0x400
+#define BNX2_PORT_FEATURE_WOL_RESERVED_ASYM_PAUSE_CAP 0x800
+
+#define BNX2_PORT_FEATURE_MBA 0xe0
+#define BNX2_PORT2_FEATURE_MBA 0x154
+#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT_BITS 0
+#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x3
+#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0
+#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 1
+#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 2
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_SHIFT_BITS 2
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3c
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_AUTONEG 0
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_10HALF 0x4
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_10FULL 0x8
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_100HALF 0xc
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_100FULL 0x10
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_1000HALF 0x14
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_1000FULL 0x18
+#define BNX2_PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x40
+#define BNX2_PORT_FEATURE_MBA_HOTKEY_CTRL_S 0
+#define BNX2_PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x80
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT_BITS 8
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0xff00
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_1K 0x100
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x200
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x300
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x400
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x500
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x600
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x700
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x800
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x900
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0xa00
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0xb00
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0xc00
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0xd00
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0xe00
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0xf00
+#define BNX2_PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT_BITS 16
+#define BNX2_PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0xf0000
+#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT_BITS 20
+#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x300000
+#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0
+#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x100000
+#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x200000
+#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x300000
+
+#define BNX2_PORT_FEATURE_IMD 0xe4
+#define BNX2_PORT2_FEATURE_IMD 0x158
+#define BNX2_PORT_FEATURE_IMD_LINK_OVERRIDE_DEFAULT 0
+#define BNX2_PORT_FEATURE_IMD_LINK_OVERRIDE_ENABLE 1
+
+#define BNX2_PORT_FEATURE_VLAN 0xe8
+#define BNX2_PORT2_FEATURE_VLAN 0x15c
+#define BNX2_PORT_FEATURE_MBA_VLAN_TAG_MASK 0xffff
+#define BNX2_PORT_FEATURE_MBA_VLAN_ENABLE 0x10000
+
+#define BNX2_MFW_VER_PTR 0x00000014c
+
+#define BNX2_BC_STATE_RESET_TYPE 0x000001c0
+#define BNX2_BC_STATE_RESET_TYPE_SIG 0x00005254
+#define BNX2_BC_STATE_RESET_TYPE_SIG_MASK 0x0000ffff
+#define BNX2_BC_STATE_RESET_TYPE_NONE (BNX2_BC_STATE_RESET_TYPE_SIG | \
+ 0x00010000)
+#define BNX2_BC_STATE_RESET_TYPE_PCI (BNX2_BC_STATE_RESET_TYPE_SIG | \
+ 0x00020000)
+#define BNX2_BC_STATE_RESET_TYPE_VAUX (BNX2_BC_STATE_RESET_TYPE_SIG | \
+ 0x00030000)
+#define BNX2_BC_STATE_RESET_TYPE_DRV_MASK DRV_MSG_CODE
+#define BNX2_BC_STATE_RESET_TYPE_DRV_RESET (BNX2_BC_STATE_RESET_TYPE_SIG | \
+ DRV_MSG_CODE_RESET)
+#define BNX2_BC_STATE_RESET_TYPE_DRV_UNLOAD (BNX2_BC_STATE_RESET_TYPE_SIG | \
+ DRV_MSG_CODE_UNLOAD)
+#define BNX2_BC_STATE_RESET_TYPE_DRV_SHUTDOWN (BNX2_BC_STATE_RESET_TYPE_SIG | \
+ DRV_MSG_CODE_SHUTDOWN)
+#define BNX2_BC_STATE_RESET_TYPE_DRV_WOL (BNX2_BC_STATE_RESET_TYPE_SIG | \
+ DRV_MSG_CODE_WOL)
+#define BNX2_BC_STATE_RESET_TYPE_DRV_DIAG (BNX2_BC_STATE_RESET_TYPE_SIG | \
+ DRV_MSG_CODE_DIAG)
+#define BNX2_BC_STATE_RESET_TYPE_VALUE(msg) (BNX2_BC_STATE_RESET_TYPE_SIG | \
+ (msg))
+
+#define BNX2_BC_STATE 0x000001c4
+#define BNX2_BC_STATE_ERR_MASK 0x0000ff00
+#define BNX2_BC_STATE_SIGN 0x42530000
+#define BNX2_BC_STATE_SIGN_MASK 0xffff0000
+#define BNX2_BC_STATE_BC1_START (BNX2_BC_STATE_SIGN | 0x1)
+#define BNX2_BC_STATE_GET_NVM_CFG1 (BNX2_BC_STATE_SIGN | 0x2)
+#define BNX2_BC_STATE_PROG_BAR (BNX2_BC_STATE_SIGN | 0x3)
+#define BNX2_BC_STATE_INIT_VID (BNX2_BC_STATE_SIGN | 0x4)
+#define BNX2_BC_STATE_GET_NVM_CFG2 (BNX2_BC_STATE_SIGN | 0x5)
+#define BNX2_BC_STATE_APPLY_WKARND (BNX2_BC_STATE_SIGN | 0x6)
+#define BNX2_BC_STATE_LOAD_BC2 (BNX2_BC_STATE_SIGN | 0x7)
+#define BNX2_BC_STATE_GOING_BC2 (BNX2_BC_STATE_SIGN | 0x8)
+#define BNX2_BC_STATE_GOING_DIAG (BNX2_BC_STATE_SIGN | 0x9)
+#define BNX2_BC_STATE_RT_FINAL_INIT (BNX2_BC_STATE_SIGN | 0x81)
+#define BNX2_BC_STATE_RT_WKARND (BNX2_BC_STATE_SIGN | 0x82)
+#define BNX2_BC_STATE_RT_DRV_PULSE (BNX2_BC_STATE_SIGN | 0x83)
+#define BNX2_BC_STATE_RT_FIOEVTS (BNX2_BC_STATE_SIGN | 0x84)
+#define BNX2_BC_STATE_RT_DRV_CMD (BNX2_BC_STATE_SIGN | 0x85)
+#define BNX2_BC_STATE_RT_LOW_POWER (BNX2_BC_STATE_SIGN | 0x86)
+#define BNX2_BC_STATE_RT_SET_WOL (BNX2_BC_STATE_SIGN | 0x87)
+#define BNX2_BC_STATE_RT_OTHER_FW (BNX2_BC_STATE_SIGN | 0x88)
+#define BNX2_BC_STATE_RT_GOING_D3 (BNX2_BC_STATE_SIGN | 0x89)
+#define BNX2_BC_STATE_ERR_BAD_VERSION (BNX2_BC_STATE_SIGN | 0x0100)
+#define BNX2_BC_STATE_ERR_BAD_BC2_CRC (BNX2_BC_STATE_SIGN | 0x0200)
+#define BNX2_BC_STATE_ERR_BC1_LOOP (BNX2_BC_STATE_SIGN | 0x0300)
+#define BNX2_BC_STATE_ERR_UNKNOWN_CMD (BNX2_BC_STATE_SIGN | 0x0400)
+#define BNX2_BC_STATE_ERR_DRV_DEAD (BNX2_BC_STATE_SIGN | 0x0500)
+#define BNX2_BC_STATE_ERR_NO_RXP (BNX2_BC_STATE_SIGN | 0x0600)
+#define BNX2_BC_STATE_ERR_TOO_MANY_RBUF (BNX2_BC_STATE_SIGN | 0x0700)
+
+#define BNX2_BC_STATE_CONDITION 0x000001c8
+#define BNX2_CONDITION_MFW_RUN_UNKNOWN 0x00000000
+#define BNX2_CONDITION_MFW_RUN_IPMI 0x00002000
+#define BNX2_CONDITION_MFW_RUN_UMP 0x00004000
+#define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000
+#define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000
+#define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000
+
+#define BNX2_BC_STATE_DEBUG_CMD 0x1dc
+#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000
+#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE_MASK 0xffff0000
+#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_CNT_MASK 0xffff
+#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_INFINITE 0xffff
+
+#define BNX2_FW_EVT_CODE_MB 0x354
+#define BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT 0x00000000
+#define BNX2_FW_EVT_CODE_LINK_EVENT 0x00000001
+
+#define BNX2_DRV_ACK_CAP_MB 0x364
+#define BNX2_DRV_ACK_CAP_SIGNATURE 0x35450000
+#define BNX2_CAPABILITY_SIGNATURE_MASK 0xFFFF0000
+
+#define BNX2_FW_CAP_MB 0x368
+#define BNX2_FW_CAP_SIGNATURE 0xaa550000
+#define BNX2_FW_ACK_DRV_SIGNATURE 0x52500000
+#define BNX2_FW_CAP_SIGNATURE_MASK 0xffff0000
+#define BNX2_FW_CAP_REMOTE_PHY_CAPABLE 0x00000001
+#define BNX2_FW_CAP_REMOTE_PHY_PRESENT 0x00000002
+#define BNX2_FW_CAP_MFW_CAN_KEEP_VLAN 0x00000008
+#define BNX2_FW_CAP_BC_CAN_KEEP_VLAN 0x00000010
+#define BNX2_FW_CAP_CAN_KEEP_VLAN (BNX2_FW_CAP_BC_CAN_KEEP_VLAN | \
+ BNX2_FW_CAP_MFW_CAN_KEEP_VLAN)
+
+#define BNX2_RPHY_SIGNATURE 0x36c
+#define BNX2_RPHY_LOAD_SIGNATURE 0x5a5a5a5a
+
+#define BNX2_RPHY_FLAGS 0x370
+#define BNX2_RPHY_SERDES_LINK 0x374
+#define BNX2_RPHY_COPPER_LINK 0x378
+
+#define BNX2_ISCSI_INITIATOR 0x3dc
+#define BNX2_ISCSI_INITIATOR_EN 0x00080000
+
+#define BNX2_ISCSI_MAX_CONN 0x3e4
+#define BNX2_ISCSI_MAX_CONN_MASK 0xffff0000
+#define BNX2_ISCSI_MAX_CONN_SHIFT 16
+
+#define HOST_VIEW_SHMEM_BASE 0x167c00
+
+#define DP_SHMEM_LINE(bp, offset) \
+ netdev_err(bp->dev, "DEBUG: %08x: %08x %08x %08x %08x\n", \
+ offset, \
+ bnx2_shmem_rd(bp, offset), \
+ bnx2_shmem_rd(bp, offset + 4), \
+ bnx2_shmem_rd(bp, offset + 8), \
+ bnx2_shmem_rd(bp, offset + 12))
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2_fw.h b/drivers/net/ethernet/broadcom/bnx2_fw.h
new file mode 100644
index 000000000000..940eb91f209d
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2_fw.h
@@ -0,0 +1,88 @@
+/* bnx2_fw.h: Broadcom NX2 network driver.
+ *
+ * Copyright (c) 2004, 2005, 2006, 2007 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+/* Initialized Values for the Completion Processor. */
+static const struct cpu_reg cpu_reg_com = {
+ .mode = BNX2_COM_CPU_MODE,
+ .mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT,
+ .mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA,
+ .state = BNX2_COM_CPU_STATE,
+ .state_value_clear = 0xffffff,
+ .gpr0 = BNX2_COM_CPU_REG_FILE,
+ .evmask = BNX2_COM_CPU_EVENT_MASK,
+ .pc = BNX2_COM_CPU_PROGRAM_COUNTER,
+ .inst = BNX2_COM_CPU_INSTRUCTION,
+ .bp = BNX2_COM_CPU_HW_BREAKPOINT,
+ .spad_base = BNX2_COM_SCRATCH,
+ .mips_view_base = 0x8000000,
+};
+
+/* Initialized Values the Command Processor. */
+static const struct cpu_reg cpu_reg_cp = {
+ .mode = BNX2_CP_CPU_MODE,
+ .mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT,
+ .mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA,
+ .state = BNX2_CP_CPU_STATE,
+ .state_value_clear = 0xffffff,
+ .gpr0 = BNX2_CP_CPU_REG_FILE,
+ .evmask = BNX2_CP_CPU_EVENT_MASK,
+ .pc = BNX2_CP_CPU_PROGRAM_COUNTER,
+ .inst = BNX2_CP_CPU_INSTRUCTION,
+ .bp = BNX2_CP_CPU_HW_BREAKPOINT,
+ .spad_base = BNX2_CP_SCRATCH,
+ .mips_view_base = 0x8000000,
+};
+
+/* Initialized Values for the RX Processor. */
+static const struct cpu_reg cpu_reg_rxp = {
+ .mode = BNX2_RXP_CPU_MODE,
+ .mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT,
+ .mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA,
+ .state = BNX2_RXP_CPU_STATE,
+ .state_value_clear = 0xffffff,
+ .gpr0 = BNX2_RXP_CPU_REG_FILE,
+ .evmask = BNX2_RXP_CPU_EVENT_MASK,
+ .pc = BNX2_RXP_CPU_PROGRAM_COUNTER,
+ .inst = BNX2_RXP_CPU_INSTRUCTION,
+ .bp = BNX2_RXP_CPU_HW_BREAKPOINT,
+ .spad_base = BNX2_RXP_SCRATCH,
+ .mips_view_base = 0x8000000,
+};
+
+/* Initialized Values for the TX Patch-up Processor. */
+static const struct cpu_reg cpu_reg_tpat = {
+ .mode = BNX2_TPAT_CPU_MODE,
+ .mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT,
+ .mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA,
+ .state = BNX2_TPAT_CPU_STATE,
+ .state_value_clear = 0xffffff,
+ .gpr0 = BNX2_TPAT_CPU_REG_FILE,
+ .evmask = BNX2_TPAT_CPU_EVENT_MASK,
+ .pc = BNX2_TPAT_CPU_PROGRAM_COUNTER,
+ .inst = BNX2_TPAT_CPU_INSTRUCTION,
+ .bp = BNX2_TPAT_CPU_HW_BREAKPOINT,
+ .spad_base = BNX2_TPAT_SCRATCH,
+ .mips_view_base = 0x8000000,
+};
+
+/* Initialized Values for the TX Processor. */
+static const struct cpu_reg cpu_reg_txp = {
+ .mode = BNX2_TXP_CPU_MODE,
+ .mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT,
+ .mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA,
+ .state = BNX2_TXP_CPU_STATE,
+ .state_value_clear = 0xffffff,
+ .gpr0 = BNX2_TXP_CPU_REG_FILE,
+ .evmask = BNX2_TXP_CPU_EVENT_MASK,
+ .pc = BNX2_TXP_CPU_PROGRAM_COUNTER,
+ .inst = BNX2_TXP_CPU_INSTRUCTION,
+ .bp = BNX2_TXP_CPU_HW_BREAKPOINT,
+ .spad_base = BNX2_TXP_SCRATCH,
+ .mips_view_base = 0x8000000,
+};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/Makefile b/drivers/net/ethernet/broadcom/bnx2x/Makefile
new file mode 100644
index 000000000000..48fbdd48f88f
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for Broadcom 10-Gigabit ethernet driver
+#
+
+obj-$(CONFIG_BNX2X) += bnx2x.o
+
+bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
new file mode 100644
index 000000000000..2f92487724c6
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -0,0 +1,2069 @@
+/* bnx2x.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ */
+
+#ifndef BNX2X_H
+#define BNX2X_H
+#include <linux/netdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+
+/* compilation time flags */
+
+/* define this to make the driver freeze on error to allow getting debug info
+ * (you will need to reboot afterwards) */
+/* #define BNX2X_STOP_ON_ERROR */
+
+#define DRV_MODULE_VERSION "1.70.00-0"
+#define DRV_MODULE_RELDATE "2011/06/13"
+#define BNX2X_BC_VER 0x040200
+
+#if defined(CONFIG_DCB)
+#define BCM_DCBNL
+#endif
+#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
+#define BCM_CNIC 1
+#include "../cnic_if.h"
+#endif
+
+#ifdef BCM_CNIC
+#define BNX2X_MIN_MSIX_VEC_CNT 3
+#define BNX2X_MSIX_VEC_FP_START 2
+#else
+#define BNX2X_MIN_MSIX_VEC_CNT 2
+#define BNX2X_MSIX_VEC_FP_START 1
+#endif
+
+#include <linux/mdio.h>
+
+#include "bnx2x_reg.h"
+#include "bnx2x_fw_defs.h"
+#include "bnx2x_hsi.h"
+#include "bnx2x_link.h"
+#include "bnx2x_sp.h"
+#include "bnx2x_dcb.h"
+#include "bnx2x_stats.h"
+
+/* error/debug prints */
+
+#define DRV_MODULE_NAME "bnx2x"
+
+/* for messages that are currently off */
+#define BNX2X_MSG_OFF 0
+#define BNX2X_MSG_MCP 0x010000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_STATS 0x020000 /* was: NETIF_MSG_TIMER */
+#define BNX2X_MSG_NVM 0x040000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_DMAE 0x080000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */
+#define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */
+
+/* regular debug print */
+#define DP(__mask, fmt, ...) \
+do { \
+ if (bp->msg_enable & (__mask)) \
+ pr_notice("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define DP_CONT(__mask, fmt, ...) \
+do { \
+ if (bp->msg_enable & (__mask)) \
+ pr_cont(fmt, ##__VA_ARGS__); \
+} while (0)
+
+/* errors debug print */
+#define BNX2X_DBG_ERR(fmt, ...) \
+do { \
+ if (netif_msg_probe(bp)) \
+ pr_err("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__VA_ARGS__); \
+} while (0)
+
+/* for errors (never masked) */
+#define BNX2X_ERR(fmt, ...) \
+do { \
+ pr_err("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define BNX2X_ERROR(fmt, ...) \
+ pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__)
+
+
+/* before we have a dev->name use dev_info() */
+#define BNX2X_DEV_INFO(fmt, ...) \
+do { \
+ if (netif_msg_probe(bp)) \
+ dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#ifdef BNX2X_STOP_ON_ERROR
+void bnx2x_int_disable(struct bnx2x *bp);
+#define bnx2x_panic() \
+do { \
+ bp->panic = 1; \
+ BNX2X_ERR("driver assert\n"); \
+ bnx2x_int_disable(bp); \
+ bnx2x_panic_dump(bp); \
+} while (0)
+#else
+#define bnx2x_panic() \
+do { \
+ bp->panic = 1; \
+ BNX2X_ERR("driver assert\n"); \
+ bnx2x_panic_dump(bp); \
+} while (0)
+#endif
+
+#define bnx2x_mc_addr(ha) ((ha)->addr)
+#define bnx2x_uc_addr(ha) ((ha)->addr)
+
+#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
+#define U64_HI(x) (u32)(((u64)(x)) >> 32)
+#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
+
+
+#define REG_ADDR(bp, offset) ((bp->regview) + (offset))
+
+#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset))
+#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset))
+#define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset))
+
+#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset))
+#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset))
+#define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset))
+
+#define REG_RD_IND(bp, offset) bnx2x_reg_rd_ind(bp, offset)
+#define REG_WR_IND(bp, offset, val) bnx2x_reg_wr_ind(bp, offset, val)
+
+#define REG_RD_DMAE(bp, offset, valp, len32) \
+ do { \
+ bnx2x_read_dmae(bp, offset, len32);\
+ memcpy(valp, bnx2x_sp(bp, wb_data[0]), (len32) * 4); \
+ } while (0)
+
+#define REG_WR_DMAE(bp, offset, valp, len32) \
+ do { \
+ memcpy(bnx2x_sp(bp, wb_data[0]), valp, (len32) * 4); \
+ bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \
+ offset, len32); \
+ } while (0)
+
+#define REG_WR_DMAE_LEN(bp, offset, valp, len32) \
+ REG_WR_DMAE(bp, offset, valp, len32)
+
+#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
+ do { \
+ memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
+ bnx2x_write_big_buf_wb(bp, addr, len32); \
+ } while (0)
+
+#define SHMEM_ADDR(bp, field) (bp->common.shmem_base + \
+ offsetof(struct shmem_region, field))
+#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field))
+#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val)
+
+#define SHMEM2_ADDR(bp, field) (bp->common.shmem2_base + \
+ offsetof(struct shmem2_region, field))
+#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field))
+#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
+#define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \
+ offsetof(struct mf_cfg, field))
+#define MF2_CFG_ADDR(bp, field) (bp->common.mf2_cfg_base + \
+ offsetof(struct mf2_cfg, field))
+
+#define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field))
+#define MF_CFG_WR(bp, field, val) REG_WR(bp,\
+ MF_CFG_ADDR(bp, field), (val))
+#define MF2_CFG_RD(bp, field) REG_RD(bp, MF2_CFG_ADDR(bp, field))
+
+#define SHMEM2_HAS(bp, field) ((bp)->common.shmem2_base && \
+ (SHMEM2_RD((bp), size) > \
+ offsetof(struct shmem2_region, field)))
+
+#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
+#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
+
+/* SP SB indices */
+
+/* General SP events - stats query, cfc delete, etc */
+#define HC_SP_INDEX_ETH_DEF_CONS 3
+
+/* EQ completions */
+#define HC_SP_INDEX_EQ_CONS 7
+
+/* FCoE L2 connection completions */
+#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS 6
+#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS 4
+/* iSCSI L2 */
+#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
+#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
+
+/* Special clients parameters */
+
+/* SB indices */
+/* FCoE L2 */
+#define BNX2X_FCOE_L2_RX_INDEX \
+ (&bp->def_status_blk->sp_sb.\
+ index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS])
+
+#define BNX2X_FCOE_L2_TX_INDEX \
+ (&bp->def_status_blk->sp_sb.\
+ index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS])
+
+/**
+ * CIDs and CLIDs:
+ * CLIDs below is a CLID for func 0, then the CLID for other
+ * functions will be calculated by the formula:
+ *
+ * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
+ *
+ */
+/* iSCSI L2 */
+#define BNX2X_ISCSI_ETH_CL_ID_IDX 1
+#define BNX2X_ISCSI_ETH_CID 49
+
+/* FCoE L2 */
+#define BNX2X_FCOE_ETH_CL_ID_IDX 2
+#define BNX2X_FCOE_ETH_CID 50
+
+/** Additional rings budgeting */
+#ifdef BCM_CNIC
+#define CNIC_PRESENT 1
+#define FCOE_PRESENT 1
+#else
+#define CNIC_PRESENT 0
+#define FCOE_PRESENT 0
+#endif /* BCM_CNIC */
+#define NON_ETH_CONTEXT_USE (FCOE_PRESENT)
+
+#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
+ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
+
+#define SM_RX_ID 0
+#define SM_TX_ID 1
+
+/* defines for multiple tx priority indices */
+#define FIRST_TX_ONLY_COS_INDEX 1
+#define FIRST_TX_COS_INDEX 0
+
+/* defines for decodeing the fastpath index and the cos index out of the
+ * transmission queue index
+ */
+#define MAX_TXQS_PER_COS FP_SB_MAX_E1x
+
+#define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS)
+#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS)
+
+/* rules for calculating the cids of tx-only connections */
+#define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS)
+#define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS)
+
+/* fp index inside class of service range */
+#define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS)
+
+/*
+ * 0..15 eth cos0
+ * 16..31 eth cos1 if applicable
+ * 32..47 eth cos2 If applicable
+ * fcoe queue follows eth queues (16, 32, 48 depending on cos)
+ */
+#define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos)
+#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp))
+
+/* fast path */
+struct sw_rx_bd {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct sw_tx_bd {
+ struct sk_buff *skb;
+ u16 first_bd;
+ u8 flags;
+/* Set on the first BD descriptor when there is a split BD */
+#define BNX2X_TSO_SPLIT_BD (1<<0)
+};
+
+struct sw_rx_page {
+ struct page *page;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+union db_prod {
+ struct doorbell_set_prod data;
+ u32 raw;
+};
+
+/* dropless fc FW/HW related params */
+#define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512)
+#define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \
+ ETH_MAX_AGGREGATION_QUEUES_E1 :\
+ ETH_MAX_AGGREGATION_QUEUES_E1H_E2)
+#define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp))
+#define FW_PREFETCH_CNT 16
+#define DROPLESS_FC_HEADROOM 100
+
+/* MC hsi */
+#define BCM_PAGE_SHIFT 12
+#define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT)
+#define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1))
+#define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK)
+
+#define PAGES_PER_SGE_SHIFT 0
+#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
+#define SGE_PAGE_SIZE PAGE_SIZE
+#define SGE_PAGE_SHIFT PAGE_SHIFT
+#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
+
+/* SGE ring related macros */
+#define NUM_RX_SGE_PAGES 2
+#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
+#define NEXT_PAGE_SGE_DESC_CNT 2
+#define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT)
+/* RX_SGE_CNT is promised to be a power of 2 */
+#define RX_SGE_MASK (RX_SGE_CNT - 1)
+#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
+#define MAX_RX_SGE (NUM_RX_SGE - 1)
+#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \
+ (MAX_RX_SGE_CNT - 1)) ? \
+ (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \
+ (x) + 1)
+#define RX_SGE(x) ((x) & MAX_RX_SGE)
+
+/*
+ * Number of required SGEs is the sum of two:
+ * 1. Number of possible opened aggregations (next packet for
+ * these aggregations will probably consume SGE immidiatelly)
+ * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
+ * after placement on BD for new TPA aggregation)
+ *
+ * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page
+ */
+#define NUM_SGE_REQ (MAX_AGG_QS(bp) + \
+ (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2)
+#define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \
+ MAX_RX_SGE_CNT)
+#define SGE_TH_LO(bp) (NUM_SGE_REQ + \
+ NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT)
+#define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
+/* Manipulate a bit vector defined as an array of u64 */
+
+/* Number of bits in one sge_mask array element */
+#define BIT_VEC64_ELEM_SZ 64
+#define BIT_VEC64_ELEM_SHIFT 6
+#define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1)
+
+
+#define __BIT_VEC64_SET_BIT(el, bit) \
+ do { \
+ el = ((el) | ((u64)0x1 << (bit))); \
+ } while (0)
+
+#define __BIT_VEC64_CLEAR_BIT(el, bit) \
+ do { \
+ el = ((el) & (~((u64)0x1 << (bit)))); \
+ } while (0)
+
+
+#define BIT_VEC64_SET_BIT(vec64, idx) \
+ __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
+ (idx) & BIT_VEC64_ELEM_MASK)
+
+#define BIT_VEC64_CLEAR_BIT(vec64, idx) \
+ __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
+ (idx) & BIT_VEC64_ELEM_MASK)
+
+#define BIT_VEC64_TEST_BIT(vec64, idx) \
+ (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \
+ ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1)
+
+/* Creates a bitmask of all ones in less significant bits.
+ idx - index of the most significant bit in the created mask */
+#define BIT_VEC64_ONES_MASK(idx) \
+ (((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1)
+#define BIT_VEC64_ELEM_ONE_MASK ((u64)(~0))
+
+/*******************************************************/
+
+
+
+/* Number of u64 elements in SGE mask array */
+#define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \
+ BIT_VEC64_ELEM_SZ)
+#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
+#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
+
+union host_hc_status_block {
+ /* pointer to fp status block e1x */
+ struct host_hc_status_block_e1x *e1x_sb;
+ /* pointer to fp status block e2 */
+ struct host_hc_status_block_e2 *e2_sb;
+};
+
+struct bnx2x_agg_info {
+ /*
+ * First aggregation buffer is an skb, the following - are pages.
+ * We will preallocate the skbs for each aggregation when
+ * we open the interface and will replace the BD at the consumer
+ * with this one when we receive the TPA_START CQE in order to
+ * keep the Rx BD ring consistent.
+ */
+ struct sw_rx_bd first_buf;
+ u8 tpa_state;
+#define BNX2X_TPA_START 1
+#define BNX2X_TPA_STOP 2
+#define BNX2X_TPA_ERROR 3
+ u8 placement_offset;
+ u16 parsing_flags;
+ u16 vlan_tag;
+ u16 len_on_bd;
+};
+
+#define Q_STATS_OFFSET32(stat_name) \
+ (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
+
+struct bnx2x_fp_txdata {
+
+ struct sw_tx_bd *tx_buf_ring;
+
+ union eth_tx_bd_types *tx_desc_ring;
+ dma_addr_t tx_desc_mapping;
+
+ u32 cid;
+
+ union db_prod tx_db;
+
+ u16 tx_pkt_prod;
+ u16 tx_pkt_cons;
+ u16 tx_bd_prod;
+ u16 tx_bd_cons;
+
+ unsigned long tx_pkt;
+
+ __le16 *tx_cons_sb;
+
+ int txq_index;
+};
+
+struct bnx2x_fastpath {
+ struct bnx2x *bp; /* parent */
+
+#define BNX2X_NAPI_WEIGHT 128
+ struct napi_struct napi;
+ union host_hc_status_block status_blk;
+ /* chip independed shortcuts into sb structure */
+ __le16 *sb_index_values;
+ __le16 *sb_running_index;
+ /* chip independed shortcut into rx_prods_offset memory */
+ u32 ustorm_rx_prods_offset;
+
+ u32 rx_buf_size;
+
+ dma_addr_t status_blk_mapping;
+
+ u8 max_cos; /* actual number of active tx coses */
+ struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS];
+
+ struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */
+ struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */
+
+ struct eth_rx_bd *rx_desc_ring;
+ dma_addr_t rx_desc_mapping;
+
+ union eth_rx_cqe *rx_comp_ring;
+ dma_addr_t rx_comp_mapping;
+
+ /* SGE ring */
+ struct eth_rx_sge *rx_sge_ring;
+ dma_addr_t rx_sge_mapping;
+
+ u64 sge_mask[RX_SGE_MASK_LEN];
+
+ u32 cid;
+
+ __le16 fp_hc_idx;
+
+ u8 index; /* number in fp array */
+ u8 cl_id; /* eth client id */
+ u8 cl_qzone_id;
+ u8 fw_sb_id; /* status block number in FW */
+ u8 igu_sb_id; /* status block number in HW */
+
+ u16 rx_bd_prod;
+ u16 rx_bd_cons;
+ u16 rx_comp_prod;
+ u16 rx_comp_cons;
+ u16 rx_sge_prod;
+ /* The last maximal completed SGE */
+ u16 last_max_sge;
+ __le16 *rx_cons_sb;
+ unsigned long rx_pkt,
+ rx_calls;
+
+ /* TPA related */
+ struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2];
+ u8 disable_tpa;
+#ifdef BNX2X_STOP_ON_ERROR
+ u64 tpa_queue_used;
+#endif
+
+ struct tstorm_per_queue_stats old_tclient;
+ struct ustorm_per_queue_stats old_uclient;
+ struct xstorm_per_queue_stats old_xclient;
+ struct bnx2x_eth_q_stats eth_q_stats;
+
+ /* The size is calculated using the following:
+ sizeof name field from netdev structure +
+ 4 ('-Xx-' string) +
+ 4 (for the digits and to make it DWORD aligned) */
+#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
+ char name[FP_NAME_SIZE];
+
+ /* MACs object */
+ struct bnx2x_vlan_mac_obj mac_obj;
+
+ /* Queue State object */
+ struct bnx2x_queue_sp_obj q_obj;
+
+};
+
+#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
+
+/* Use 2500 as a mini-jumbo MTU for FCoE */
+#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
+
+/* FCoE L2 `fastpath' entry is right after the eth entries */
+#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
+#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX])
+#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
+#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \
+ txdata[FIRST_TX_COS_INDEX].var)
+
+
+#define IS_ETH_FP(fp) (fp->index < \
+ BNX2X_NUM_ETH_QUEUES(fp->bp))
+#ifdef BCM_CNIC
+#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX)
+#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX)
+#else
+#define IS_FCOE_FP(fp) false
+#define IS_FCOE_IDX(idx) false
+#endif
+
+
+/* MC hsi */
+#define MAX_FETCH_BD 13 /* HW max BDs per packet */
+#define RX_COPY_THRESH 92
+
+#define NUM_TX_RINGS 16
+#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
+#define NEXT_PAGE_TX_DESC_CNT 1
+#define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT)
+#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
+#define MAX_TX_BD (NUM_TX_BD - 1)
+#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
+#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
+ (MAX_TX_DESC_CNT - 1)) ? \
+ (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \
+ (x) + 1)
+#define TX_BD(x) ((x) & MAX_TX_BD)
+#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
+
+/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
+#define NUM_RX_RINGS 8
+#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
+#define NEXT_PAGE_RX_DESC_CNT 2
+#define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT)
+#define RX_DESC_MASK (RX_DESC_CNT - 1)
+#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
+#define MAX_RX_BD (NUM_RX_BD - 1)
+#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
+
+/* dropless fc calculations for BDs
+ *
+ * Number of BDs should as number of buffers in BRB:
+ * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT
+ * "next" elements on each page
+ */
+#define NUM_BD_REQ BRB_SIZE(bp)
+#define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \
+ MAX_RX_DESC_CNT)
+#define BD_TH_LO(bp) (NUM_BD_REQ + \
+ NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \
+ FW_DROP_LEVEL(bp))
+#define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
+#define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128)
+
+#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \
+ ETH_MIN_RX_CQES_WITH_TPA_E1 : \
+ ETH_MIN_RX_CQES_WITH_TPA_E1H_E2)
+#define MIN_RX_SIZE_NONTPA_HW ETH_MIN_RX_CQES_WITHOUT_TPA
+#define MIN_RX_SIZE_TPA (max_t(u32, MIN_RX_SIZE_TPA_HW, MIN_RX_AVAIL))
+#define MIN_RX_SIZE_NONTPA (max_t(u32, MIN_RX_SIZE_NONTPA_HW,\
+ MIN_RX_AVAIL))
+
+#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
+ (MAX_RX_DESC_CNT - 1)) ? \
+ (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \
+ (x) + 1)
+#define RX_BD(x) ((x) & MAX_RX_BD)
+
+/*
+ * As long as CQE is X times bigger than BD entry we have to allocate X times
+ * more pages for CQ ring in order to keep it balanced with BD ring
+ */
+#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
+#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL)
+#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
+#define NEXT_PAGE_RCQ_DESC_CNT 1
+#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT)
+#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
+#define MAX_RCQ_BD (NUM_RCQ_BD - 1)
+#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
+#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
+ (MAX_RCQ_DESC_CNT - 1)) ? \
+ (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \
+ (x) + 1)
+#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
+
+/* dropless fc calculations for RCQs
+ *
+ * Number of RCQs should be as number of buffers in BRB:
+ * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT
+ * "next" elements on each page
+ */
+#define NUM_RCQ_REQ BRB_SIZE(bp)
+#define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \
+ MAX_RCQ_DESC_CNT)
+#define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \
+ NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \
+ FW_DROP_LEVEL(bp))
+#define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
+
+/* This is needed for determining of last_max */
+#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
+#define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b))
+
+
+#define BNX2X_SWCID_SHIFT 17
+#define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1)
+
+/* used on a CID received from the HW */
+#define SW_CID(x) (le32_to_cpu(x) & BNX2X_SWCID_MASK)
+#define CQE_CMD(x) (le32_to_cpu(x) >> \
+ COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
+
+#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr_hi), \
+ le32_to_cpu((bd)->addr_lo))
+#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
+
+#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
+#define BNX2X_DB_SHIFT 7 /* 128 bytes*/
+#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
+#error "Min DB doorbell stride is 8"
+#endif
+#define DPM_TRIGER_TYPE 0x40
+#define DOORBELL(bp, cid, val) \
+ do { \
+ writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \
+ DPM_TRIGER_TYPE); \
+ } while (0)
+
+
+/* TX CSUM helpers */
+#define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \
+ skb->csum_offset)
+#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \
+ skb->csum_offset))
+
+#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
+
+#define XMIT_PLAIN 0
+#define XMIT_CSUM_V4 0x1
+#define XMIT_CSUM_V6 0x2
+#define XMIT_CSUM_TCP 0x4
+#define XMIT_GSO_V4 0x8
+#define XMIT_GSO_V6 0x10
+
+#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6)
+#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6)
+
+
+/* stuff added to make the code fit 80Col */
+#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
+#define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG)
+#define CQE_TYPE_STOP(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG)
+#define CQE_TYPE_SLOW(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD)
+#define CQE_TYPE_FAST(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH)
+
+#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
+
+#define BNX2X_IP_CSUM_ERR(cqe) \
+ (!((cqe)->fast_path_cqe.status_flags & \
+ ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
+ ((cqe)->fast_path_cqe.type_error_flags & \
+ ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
+
+#define BNX2X_L4_CSUM_ERR(cqe) \
+ (!((cqe)->fast_path_cqe.status_flags & \
+ ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
+ ((cqe)->fast_path_cqe.type_error_flags & \
+ ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
+
+#define BNX2X_RX_CSUM_OK(cqe) \
+ (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
+
+#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
+ (((le16_to_cpu(flags) & \
+ PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
+ PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) \
+ == PRS_FLAG_OVERETH_IPV4)
+#define BNX2X_RX_SUM_FIX(cqe) \
+ BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
+
+
+#define FP_USB_FUNC_OFF \
+ offsetof(struct cstorm_status_block_u, func)
+#define FP_CSB_FUNC_OFF \
+ offsetof(struct cstorm_status_block_c, func)
+
+#define HC_INDEX_ETH_RX_CQ_CONS 1
+
+#define HC_INDEX_OOO_TX_CQ_CONS 4
+
+#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5
+
+#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6
+
+#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7
+
+#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
+
+#define BNX2X_RX_SB_INDEX \
+ (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
+
+#define BNX2X_TX_SB_INDEX_BASE BNX2X_TX_SB_INDEX_COS0
+
+#define BNX2X_TX_SB_INDEX_COS0 \
+ (&fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0])
+
+/* end of fast path */
+
+/* common */
+
+struct bnx2x_common {
+
+ u32 chip_id;
+/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
+#define CHIP_ID(bp) (bp->common.chip_id & 0xfffffff0)
+
+#define CHIP_NUM(bp) (bp->common.chip_id >> 16)
+#define CHIP_NUM_57710 0x164e
+#define CHIP_NUM_57711 0x164f
+#define CHIP_NUM_57711E 0x1650
+#define CHIP_NUM_57712 0x1662
+#define CHIP_NUM_57712_MF 0x1663
+#define CHIP_NUM_57713 0x1651
+#define CHIP_NUM_57713E 0x1652
+#define CHIP_NUM_57800 0x168a
+#define CHIP_NUM_57800_MF 0x16a5
+#define CHIP_NUM_57810 0x168e
+#define CHIP_NUM_57810_MF 0x16ae
+#define CHIP_NUM_57840 0x168d
+#define CHIP_NUM_57840_MF 0x16ab
+#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
+#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
+#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
+#define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712)
+#define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF)
+#define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800)
+#define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF)
+#define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810)
+#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF)
+#define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840)
+#define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF)
+#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
+ CHIP_IS_57711E(bp))
+#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
+ CHIP_IS_57712_MF(bp))
+#define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \
+ CHIP_IS_57800_MF(bp) || \
+ CHIP_IS_57810(bp) || \
+ CHIP_IS_57810_MF(bp) || \
+ CHIP_IS_57840(bp) || \
+ CHIP_IS_57840_MF(bp))
+#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
+#define USES_WARPCORE(bp) (CHIP_IS_E3(bp))
+#define IS_E1H_OFFSET (!CHIP_IS_E1(bp))
+
+#define CHIP_REV_SHIFT 12
+#define CHIP_REV_MASK (0xF << CHIP_REV_SHIFT)
+#define CHIP_REV_VAL(bp) (bp->common.chip_id & CHIP_REV_MASK)
+#define CHIP_REV_Ax (0x0 << CHIP_REV_SHIFT)
+#define CHIP_REV_Bx (0x1 << CHIP_REV_SHIFT)
+/* assume maximum 5 revisions */
+#define CHIP_REV_IS_SLOW(bp) (CHIP_REV_VAL(bp) > 0x00005000)
+/* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */
+#define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \
+ !(CHIP_REV_VAL(bp) & 0x00001000))
+/* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */
+#define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \
+ (CHIP_REV_VAL(bp) & 0x00001000))
+
+#define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \
+ ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1))
+
+#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0)
+#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f)
+#define CHIP_REV_SIM(bp) (((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\
+ (CHIP_REV_SHIFT + 1)) \
+ << CHIP_REV_SHIFT)
+#define CHIP_REV(bp) (CHIP_REV_IS_SLOW(bp) ? \
+ CHIP_REV_SIM(bp) :\
+ CHIP_REV_VAL(bp))
+#define CHIP_IS_E3B0(bp) (CHIP_IS_E3(bp) && \
+ (CHIP_REV(bp) == CHIP_REV_Bx))
+#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \
+ (CHIP_REV(bp) == CHIP_REV_Ax))
+
+ int flash_size;
+#define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
+#define BNX2X_NVRAM_TIMEOUT_COUNT 30000
+#define BNX2X_NVRAM_PAGE_SIZE 256
+
+ u32 shmem_base;
+ u32 shmem2_base;
+ u32 mf_cfg_base;
+ u32 mf2_cfg_base;
+
+ u32 hw_config;
+
+ u32 bc_ver;
+
+ u8 int_block;
+#define INT_BLOCK_HC 0
+#define INT_BLOCK_IGU 1
+#define INT_BLOCK_MODE_NORMAL 0
+#define INT_BLOCK_MODE_BW_COMP 2
+#define CHIP_INT_MODE_IS_NBC(bp) \
+ (!CHIP_IS_E1x(bp) && \
+ !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP))
+#define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp))
+
+ u8 chip_port_mode;
+#define CHIP_4_PORT_MODE 0x0
+#define CHIP_2_PORT_MODE 0x1
+#define CHIP_PORT_MODE_NONE 0x2
+#define CHIP_MODE(bp) (bp->common.chip_port_mode)
+#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
+};
+
+/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
+#define BNX2X_IGU_STAS_MSG_VF_CNT 64
+#define BNX2X_IGU_STAS_MSG_PF_CNT 4
+
+/* end of common */
+
+/* port */
+
+struct bnx2x_port {
+ u32 pmf;
+
+ u32 link_config[LINK_CONFIG_SIZE];
+
+ u32 supported[LINK_CONFIG_SIZE];
+/* link settings - missing defines */
+#define SUPPORTED_2500baseX_Full (1 << 15)
+
+ u32 advertising[LINK_CONFIG_SIZE];
+/* link settings - missing defines */
+#define ADVERTISED_2500baseX_Full (1 << 15)
+
+ u32 phy_addr;
+
+ /* used to synchronize phy accesses */
+ struct mutex phy_mutex;
+ int need_hw_lock;
+
+ u32 port_stx;
+
+ struct nig_stats old_nig_stats;
+};
+
+/* end of port */
+
+#define STATS_OFFSET32(stat_name) \
+ (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
+
+/* slow path */
+
+/* slow path work-queue */
+extern struct workqueue_struct *bnx2x_wq;
+
+#define BNX2X_MAX_NUM_OF_VFS 64
+#define BNX2X_VF_ID_INVALID 0xFF
+
+/*
+ * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
+ * control by the number of fast-path status blocks supported by the
+ * device (HW/FW). Each fast-path status block (FP-SB) aka non-default
+ * status block represents an independent interrupts context that can
+ * serve a regular L2 networking queue. However special L2 queues such
+ * as the FCoE queue do not require a FP-SB and other components like
+ * the CNIC may consume FP-SB reducing the number of possible L2 queues
+ *
+ * If the maximum number of FP-SB available is X then:
+ * a. If CNIC is supported it consumes 1 FP-SB thus the max number of
+ * regular L2 queues is Y=X-1
+ * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
+ * c. If the FCoE L2 queue is supported the actual number of L2 queues
+ * is Y+1
+ * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
+ * slow-path interrupts) or Y+2 if CNIC is supported (one additional
+ * FP interrupt context for the CNIC).
+ * e. The number of HW context (CID count) is always X or X+1 if FCoE
+ * L2 queue is supported. the cid for the FCoE L2 queue is always X.
+ */
+
+/* fast-path interrupt contexts E1x */
+#define FP_SB_MAX_E1x 16
+/* fast-path interrupt contexts E2 */
+#define FP_SB_MAX_E2 HC_SB_MAX_SB_E2
+
+union cdu_context {
+ struct eth_context eth;
+ char pad[1024];
+};
+
+/* CDU host DB constants */
+#define CDU_ILT_PAGE_SZ_HW 3
+#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */
+#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
+
+#ifdef BCM_CNIC
+#define CNIC_ISCSI_CID_MAX 256
+#define CNIC_FCOE_CID_MAX 2048
+#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
+#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
+#endif
+
+#define QM_ILT_PAGE_SZ_HW 0
+#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */
+#define QM_CID_ROUND 1024
+
+#ifdef BCM_CNIC
+/* TM (timers) host DB constants */
+#define TM_ILT_PAGE_SZ_HW 0
+#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
+/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
+#define TM_CONN_NUM 1024
+#define TM_ILT_SZ (8 * TM_CONN_NUM)
+#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
+
+/* SRC (Searcher) host DB constants */
+#define SRC_ILT_PAGE_SZ_HW 0
+#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */
+#define SRC_HASH_BITS 10
+#define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */
+#define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM)
+#define SRC_T2_SZ SRC_ILT_SZ
+#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
+
+#endif
+
+#define MAX_DMAE_C 8
+
+/* DMA memory not used in fastpath */
+struct bnx2x_slowpath {
+ union {
+ struct mac_configuration_cmd e1x;
+ struct eth_classify_rules_ramrod_data e2;
+ } mac_rdata;
+
+
+ union {
+ struct tstorm_eth_mac_filter_config e1x;
+ struct eth_filter_rules_ramrod_data e2;
+ } rx_mode_rdata;
+
+ union {
+ struct mac_configuration_cmd e1;
+ struct eth_multicast_rules_ramrod_data e2;
+ } mcast_rdata;
+
+ struct eth_rss_update_ramrod_data rss_rdata;
+
+ /* Queue State related ramrods are always sent under rtnl_lock */
+ union {
+ struct client_init_ramrod_data init_data;
+ struct client_update_ramrod_data update_data;
+ } q_rdata;
+
+ union {
+ struct function_start_data func_start;
+ /* pfc configuration for DCBX ramrod */
+ struct flow_control_configuration pfc_config;
+ } func_rdata;
+
+ /* used by dmae command executer */
+ struct dmae_command dmae[MAX_DMAE_C];
+
+ u32 stats_comp;
+ union mac_stats mac_stats;
+ struct nig_stats nig_stats;
+ struct host_port_stats port_stats;
+ struct host_func_stats func_stats;
+ struct host_func_stats func_stats_base;
+
+ u32 wb_comp;
+ u32 wb_data[4];
+};
+
+#define bnx2x_sp(bp, var) (&bp->slowpath->var)
+#define bnx2x_sp_mapping(bp, var) \
+ (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var))
+
+
+/* attn group wiring */
+#define MAX_DYNAMIC_ATTN_GRPS 8
+
+struct attn_route {
+ u32 sig[5];
+};
+
+struct iro {
+ u32 base;
+ u16 m1;
+ u16 m2;
+ u16 m3;
+ u16 size;
+};
+
+struct hw_context {
+ union cdu_context *vcxt;
+ dma_addr_t cxt_mapping;
+ size_t size;
+};
+
+/* forward */
+struct bnx2x_ilt;
+
+
+enum bnx2x_recovery_state {
+ BNX2X_RECOVERY_DONE,
+ BNX2X_RECOVERY_INIT,
+ BNX2X_RECOVERY_WAIT,
+ BNX2X_RECOVERY_FAILED
+};
+
+/*
+ * Event queue (EQ or event ring) MC hsi
+ * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2
+ */
+#define NUM_EQ_PAGES 1
+#define EQ_DESC_CNT_PAGE (BCM_PAGE_SIZE / sizeof(union event_ring_elem))
+#define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1)
+#define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES)
+#define EQ_DESC_MASK (NUM_EQ_DESC - 1)
+#define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2)
+
+/* depends on EQ_DESC_CNT_PAGE being a power of 2 */
+#define NEXT_EQ_IDX(x) ((((x) & EQ_DESC_MAX_PAGE) == \
+ (EQ_DESC_MAX_PAGE - 1)) ? (x) + 2 : (x) + 1)
+
+/* depends on the above and on NUM_EQ_PAGES being a power of 2 */
+#define EQ_DESC(x) ((x) & EQ_DESC_MASK)
+
+#define BNX2X_EQ_INDEX \
+ (&bp->def_status_blk->sp_sb.\
+ index_values[HC_SP_INDEX_EQ_CONS])
+
+/* This is a data that will be used to create a link report message.
+ * We will keep the data used for the last link report in order
+ * to prevent reporting the same link parameters twice.
+ */
+struct bnx2x_link_report_data {
+ u16 line_speed; /* Effective line speed */
+ unsigned long link_report_flags;/* BNX2X_LINK_REPORT_XXX flags */
+};
+
+enum {
+ BNX2X_LINK_REPORT_FD, /* Full DUPLEX */
+ BNX2X_LINK_REPORT_LINK_DOWN,
+ BNX2X_LINK_REPORT_RX_FC_ON,
+ BNX2X_LINK_REPORT_TX_FC_ON,
+};
+
+enum {
+ BNX2X_PORT_QUERY_IDX,
+ BNX2X_PF_QUERY_IDX,
+ BNX2X_FIRST_QUEUE_QUERY_IDX,
+};
+
+struct bnx2x_fw_stats_req {
+ struct stats_query_header hdr;
+ struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
+};
+
+struct bnx2x_fw_stats_data {
+ struct stats_counter storm_counters;
+ struct per_port_stats port;
+ struct per_pf_stats pf;
+ struct per_queue_stats queue_stats[1];
+};
+
+/* Public slow path states */
+enum {
+ BNX2X_SP_RTNL_SETUP_TC,
+ BNX2X_SP_RTNL_TX_TIMEOUT,
+};
+
+
+struct bnx2x {
+ /* Fields used in the tx and intr/napi performance paths
+ * are grouped together in the beginning of the structure
+ */
+ struct bnx2x_fastpath *fp;
+ void __iomem *regview;
+ void __iomem *doorbells;
+ u16 db_size;
+
+ u8 pf_num; /* absolute PF number */
+ u8 pfid; /* per-path PF number */
+ int base_fw_ndsb; /**/
+#define BP_PATH(bp) (CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1))
+#define BP_PORT(bp) (bp->pfid & 1)
+#define BP_FUNC(bp) (bp->pfid)
+#define BP_ABS_FUNC(bp) (bp->pf_num)
+#define BP_VN(bp) ((bp)->pfid >> 1)
+#define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4)
+#define BP_L_ID(bp) (BP_VN(bp) << 2)
+#define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\
+ (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
+#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
+
+ struct net_device *dev;
+ struct pci_dev *pdev;
+
+ const struct iro *iro_arr;
+#define IRO (bp->iro_arr)
+
+ enum bnx2x_recovery_state recovery_state;
+ int is_leader;
+ struct msix_entry *msix_table;
+
+ int tx_ring_size;
+
+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
+#define ETH_MIN_PACKET_SIZE 60
+#define ETH_MAX_PACKET_SIZE 1500
+#define ETH_MAX_JUMBO_PACKET_SIZE 9600
+
+ /* Max supported alignment is 256 (8 shift) */
+#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \
+ L1_CACHE_SHIFT : 8)
+ /* FW use 2 Cache lines Alignment for start packet and size */
+#define BNX2X_FW_RX_ALIGN (2 << BNX2X_RX_ALIGN_SHIFT)
+#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
+
+ struct host_sp_status_block *def_status_blk;
+#define DEF_SB_IGU_ID 16
+#define DEF_SB_ID HC_SP_SB_ID
+ __le16 def_idx;
+ __le16 def_att_idx;
+ u32 attn_state;
+ struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
+
+ /* slow path ring */
+ struct eth_spe *spq;
+ dma_addr_t spq_mapping;
+ u16 spq_prod_idx;
+ struct eth_spe *spq_prod_bd;
+ struct eth_spe *spq_last_bd;
+ __le16 *dsb_sp_prod;
+ atomic_t cq_spq_left; /* ETH_XXX ramrods credit */
+ /* used to synchronize spq accesses */
+ spinlock_t spq_lock;
+
+ /* event queue */
+ union event_ring_elem *eq_ring;
+ dma_addr_t eq_mapping;
+ u16 eq_prod;
+ u16 eq_cons;
+ __le16 *eq_cons_sb;
+ atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */
+
+
+
+ /* Counter for marking that there is a STAT_QUERY ramrod pending */
+ u16 stats_pending;
+ /* Counter for completed statistics ramrods */
+ u16 stats_comp;
+
+ /* End of fields used in the performance code paths */
+
+ int panic;
+ int msg_enable;
+
+ u32 flags;
+#define PCIX_FLAG (1 << 0)
+#define PCI_32BIT_FLAG (1 << 1)
+#define ONE_PORT_FLAG (1 << 2)
+#define NO_WOL_FLAG (1 << 3)
+#define USING_DAC_FLAG (1 << 4)
+#define USING_MSIX_FLAG (1 << 5)
+#define USING_MSI_FLAG (1 << 6)
+#define DISABLE_MSI_FLAG (1 << 7)
+#define TPA_ENABLE_FLAG (1 << 8)
+#define NO_MCP_FLAG (1 << 9)
+
+#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
+#define MF_FUNC_DIS (1 << 11)
+#define OWN_CNIC_IRQ (1 << 12)
+#define NO_ISCSI_OOO_FLAG (1 << 13)
+#define NO_ISCSI_FLAG (1 << 14)
+#define NO_FCOE_FLAG (1 << 15)
+
+#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
+#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
+#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
+
+ int pm_cap;
+ int mrrs;
+
+ struct delayed_work sp_task;
+ struct delayed_work sp_rtnl_task;
+
+ struct delayed_work period_task;
+ struct timer_list timer;
+ int current_interval;
+
+ u16 fw_seq;
+ u16 fw_drv_pulse_wr_seq;
+ u32 func_stx;
+
+ struct link_params link_params;
+ struct link_vars link_vars;
+ u32 link_cnt;
+ struct bnx2x_link_report_data last_reported_link;
+
+ struct mdio_if_info mdio;
+
+ struct bnx2x_common common;
+ struct bnx2x_port port;
+
+ struct cmng_struct_per_port cmng;
+ u32 vn_weight_sum;
+ u32 mf_config[E1HVN_MAX];
+ u32 mf2_config[E2_FUNC_MAX];
+ u32 path_has_ovlan; /* E3 */
+ u16 mf_ov;
+ u8 mf_mode;
+#define IS_MF(bp) (bp->mf_mode != 0)
+#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI)
+#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD)
+
+ u8 wol;
+
+ int rx_ring_size;
+
+ u16 tx_quick_cons_trip_int;
+ u16 tx_quick_cons_trip;
+ u16 tx_ticks_int;
+ u16 tx_ticks;
+
+ u16 rx_quick_cons_trip_int;
+ u16 rx_quick_cons_trip;
+ u16 rx_ticks_int;
+ u16 rx_ticks;
+/* Maximal coalescing timeout in us */
+#define BNX2X_MAX_COALESCE_TOUT (0xf0*12)
+
+ u32 lin_cnt;
+
+ u16 state;
+#define BNX2X_STATE_CLOSED 0
+#define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000
+#define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000
+#define BNX2X_STATE_OPEN 0x3000
+#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000
+#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
+
+#define BNX2X_STATE_DIAG 0xe000
+#define BNX2X_STATE_ERROR 0xf000
+
+ int multi_mode;
+#define BNX2X_MAX_PRIORITY 8
+#define BNX2X_MAX_ENTRIES_PER_PRI 16
+#define BNX2X_MAX_COS 3
+#define BNX2X_MAX_TX_COS 2
+ int num_queues;
+ int disable_tpa;
+
+ u32 rx_mode;
+#define BNX2X_RX_MODE_NONE 0
+#define BNX2X_RX_MODE_NORMAL 1
+#define BNX2X_RX_MODE_ALLMULTI 2
+#define BNX2X_RX_MODE_PROMISC 3
+#define BNX2X_MAX_MULTICAST 64
+
+ u8 igu_dsb_id;
+ u8 igu_base_sb;
+ u8 igu_sb_cnt;
+ dma_addr_t def_status_blk_mapping;
+
+ struct bnx2x_slowpath *slowpath;
+ dma_addr_t slowpath_mapping;
+
+ /* Total number of FW statistics requests */
+ u8 fw_stats_num;
+
+ /*
+ * This is a memory buffer that will contain both statistics
+ * ramrod request and data.
+ */
+ void *fw_stats;
+ dma_addr_t fw_stats_mapping;
+
+ /*
+ * FW statistics request shortcut (points at the
+ * beginning of fw_stats buffer).
+ */
+ struct bnx2x_fw_stats_req *fw_stats_req;
+ dma_addr_t fw_stats_req_mapping;
+ int fw_stats_req_sz;
+
+ /*
+ * FW statistics data shortcut (points at the begining of
+ * fw_stats buffer + fw_stats_req_sz).
+ */
+ struct bnx2x_fw_stats_data *fw_stats_data;
+ dma_addr_t fw_stats_data_mapping;
+ int fw_stats_data_sz;
+
+ struct hw_context context;
+
+ struct bnx2x_ilt *ilt;
+#define BP_ILT(bp) ((bp)->ilt)
+#define ILT_MAX_LINES 256
+/*
+ * Maximum supported number of RSS queues: number of IGU SBs minus one that goes
+ * to CNIC.
+ */
+#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_PRESENT)
+
+/*
+ * Maximum CID count that might be required by the bnx2x:
+ * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related)
+ */
+#define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\
+ NON_ETH_CONTEXT_USE + CNIC_PRESENT)
+#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
+ ILT_PAGE_CIDS))
+#define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT))
+
+ int qm_cid_count;
+
+ int dropless_fc;
+
+#ifdef BCM_CNIC
+ u32 cnic_flags;
+#define BNX2X_CNIC_FLAG_MAC_SET 1
+ void *t2;
+ dma_addr_t t2_mapping;
+ struct cnic_ops __rcu *cnic_ops;
+ void *cnic_data;
+ u32 cnic_tag;
+ struct cnic_eth_dev cnic_eth_dev;
+ union host_hc_status_block cnic_sb;
+ dma_addr_t cnic_sb_mapping;
+ struct eth_spe *cnic_kwq;
+ struct eth_spe *cnic_kwq_prod;
+ struct eth_spe *cnic_kwq_cons;
+ struct eth_spe *cnic_kwq_last;
+ u16 cnic_kwq_pending;
+ u16 cnic_spq_pending;
+ u8 fip_mac[ETH_ALEN];
+ struct mutex cnic_mutex;
+ struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj;
+
+ /* Start index of the "special" (CNIC related) L2 cleints */
+ u8 cnic_base_cl_id;
+#endif
+
+ int dmae_ready;
+ /* used to synchronize dmae accesses */
+ spinlock_t dmae_lock;
+
+ /* used to protect the FW mail box */
+ struct mutex fw_mb_mutex;
+
+ /* used to synchronize stats collecting */
+ int stats_state;
+
+ /* used for synchronization of concurrent threads statistics handling */
+ spinlock_t stats_lock;
+
+ /* used by dmae command loader */
+ struct dmae_command stats_dmae;
+ int executer_idx;
+
+ u16 stats_counter;
+ struct bnx2x_eth_stats eth_stats;
+
+ struct z_stream_s *strm;
+ void *gunzip_buf;
+ dma_addr_t gunzip_mapping;
+ int gunzip_outlen;
+#define FW_BUF_SIZE 0x8000
+#define GUNZIP_BUF(bp) (bp->gunzip_buf)
+#define GUNZIP_PHYS(bp) (bp->gunzip_mapping)
+#define GUNZIP_OUTLEN(bp) (bp->gunzip_outlen)
+
+ struct raw_op *init_ops;
+ /* Init blocks offsets inside init_ops */
+ u16 *init_ops_offsets;
+ /* Data blob - has 32 bit granularity */
+ u32 *init_data;
+ u32 init_mode_flags;
+#define INIT_MODE_FLAGS(bp) (bp->init_mode_flags)
+ /* Zipped PRAM blobs - raw data */
+ const u8 *tsem_int_table_data;
+ const u8 *tsem_pram_data;
+ const u8 *usem_int_table_data;
+ const u8 *usem_pram_data;
+ const u8 *xsem_int_table_data;
+ const u8 *xsem_pram_data;
+ const u8 *csem_int_table_data;
+ const u8 *csem_pram_data;
+#define INIT_OPS(bp) (bp->init_ops)
+#define INIT_OPS_OFFSETS(bp) (bp->init_ops_offsets)
+#define INIT_DATA(bp) (bp->init_data)
+#define INIT_TSEM_INT_TABLE_DATA(bp) (bp->tsem_int_table_data)
+#define INIT_TSEM_PRAM_DATA(bp) (bp->tsem_pram_data)
+#define INIT_USEM_INT_TABLE_DATA(bp) (bp->usem_int_table_data)
+#define INIT_USEM_PRAM_DATA(bp) (bp->usem_pram_data)
+#define INIT_XSEM_INT_TABLE_DATA(bp) (bp->xsem_int_table_data)
+#define INIT_XSEM_PRAM_DATA(bp) (bp->xsem_pram_data)
+#define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data)
+#define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data)
+
+#define PHY_FW_VER_LEN 20
+ char fw_ver[32];
+ const struct firmware *firmware;
+
+ /* DCB support on/off */
+ u16 dcb_state;
+#define BNX2X_DCB_STATE_OFF 0
+#define BNX2X_DCB_STATE_ON 1
+
+ /* DCBX engine mode */
+ int dcbx_enabled;
+#define BNX2X_DCBX_ENABLED_OFF 0
+#define BNX2X_DCBX_ENABLED_ON_NEG_OFF 1
+#define BNX2X_DCBX_ENABLED_ON_NEG_ON 2
+#define BNX2X_DCBX_ENABLED_INVALID (-1)
+
+ bool dcbx_mode_uset;
+
+ struct bnx2x_config_dcbx_params dcbx_config_params;
+ struct bnx2x_dcbx_port_params dcbx_port_params;
+ int dcb_version;
+
+ /* CAM credit pools */
+ struct bnx2x_credit_pool_obj macs_pool;
+
+ /* RX_MODE object */
+ struct bnx2x_rx_mode_obj rx_mode_obj;
+
+ /* MCAST object */
+ struct bnx2x_mcast_obj mcast_obj;
+
+ /* RSS configuration object */
+ struct bnx2x_rss_config_obj rss_conf_obj;
+
+ /* Function State controlling object */
+ struct bnx2x_func_sp_obj func_obj;
+
+ unsigned long sp_state;
+
+ /* operation indication for the sp_rtnl task */
+ unsigned long sp_rtnl_state;
+
+ /* DCBX Negotation results */
+ struct dcbx_features dcbx_local_feat;
+ u32 dcbx_error;
+
+#ifdef BCM_DCBNL
+ struct dcbx_features dcbx_remote_feat;
+ u32 dcbx_remote_flags;
+#endif
+ u32 pending_max;
+
+ /* multiple tx classes of service */
+ u8 max_cos;
+
+ /* priority to cos mapping */
+ u8 prio_to_cos[8];
+};
+
+/* Tx queues may be less or equal to Rx queues */
+extern int num_queues;
+#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
+#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE)
+#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp)
+
+#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
+
+#define BNX2X_MAX_QUEUES(bp) BNX2X_MAX_RSS_COUNT(bp)
+/* #define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1) */
+
+#define RSS_IPV4_CAP_MASK \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
+
+#define RSS_IPV4_TCP_CAP_MASK \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY
+
+#define RSS_IPV6_CAP_MASK \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY
+
+#define RSS_IPV6_TCP_CAP_MASK \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
+
+/* func init flags */
+#define FUNC_FLG_RSS 0x0001
+#define FUNC_FLG_STATS 0x0002
+/* removed FUNC_FLG_UNMATCHED 0x0004 */
+#define FUNC_FLG_TPA 0x0008
+#define FUNC_FLG_SPQ 0x0010
+#define FUNC_FLG_LEADING 0x0020 /* PF only */
+
+
+struct bnx2x_func_init_params {
+ /* dma */
+ dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
+ dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */
+
+ u16 func_flgs;
+ u16 func_id; /* abs fid */
+ u16 pf_id;
+ u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
+};
+
+#define for_each_eth_queue(bp, var) \
+ for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
+
+#define for_each_nondefault_eth_queue(bp, var) \
+ for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
+
+#define for_each_queue(bp, var) \
+ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
+ if (skip_queue(bp, var)) \
+ continue; \
+ else
+
+/* Skip forwarding FP */
+#define for_each_rx_queue(bp, var) \
+ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
+ if (skip_rx_queue(bp, var)) \
+ continue; \
+ else
+
+/* Skip OOO FP */
+#define for_each_tx_queue(bp, var) \
+ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
+ if (skip_tx_queue(bp, var)) \
+ continue; \
+ else
+
+#define for_each_nondefault_queue(bp, var) \
+ for ((var) = 1; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
+ if (skip_queue(bp, var)) \
+ continue; \
+ else
+
+#define for_each_cos_in_tx_queue(fp, var) \
+ for ((var) = 0; (var) < (fp)->max_cos; (var)++)
+
+/* skip rx queue
+ * if FCOE l2 support is disabled and this is the fcoe L2 queue
+ */
+#define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
+
+/* skip tx queue
+ * if FCOE l2 support is disabled and this is the fcoe L2 queue
+ */
+#define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
+
+#define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
+
+
+
+
+/**
+ * bnx2x_set_mac_one - configure a single MAC address
+ *
+ * @bp: driver handle
+ * @mac: MAC to configure
+ * @obj: MAC object handle
+ * @set: if 'true' add a new MAC, otherwise - delete
+ * @mac_type: the type of the MAC to configure (e.g. ETH, UC list)
+ * @ramrod_flags: RAMROD_XXX flags (e.g. RAMROD_CONT, RAMROD_COMP_WAIT)
+ *
+ * Configures one MAC according to provided parameters or continues the
+ * execution of previously scheduled commands if RAMROD_CONT is set in
+ * ramrod_flags.
+ *
+ * Returns zero if operation has successfully completed, a positive value if the
+ * operation has been successfully scheduled and a negative - if a requested
+ * operations has failed.
+ */
+int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+ struct bnx2x_vlan_mac_obj *obj, bool set,
+ int mac_type, unsigned long *ramrod_flags);
+/**
+ * Deletes all MACs configured for the specific MAC object.
+ *
+ * @param bp Function driver instance
+ * @param mac_obj MAC object to cleanup
+ *
+ * @return zero if all MACs were cleaned
+ */
+
+/**
+ * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
+ *
+ * @bp: driver handle
+ * @mac_obj: MAC object handle
+ * @mac_type: type of the MACs to clear (BNX2X_XXX_MAC)
+ * @wait_for_comp: if 'true' block until completion
+ *
+ * Deletes all MACs of the specific type (e.g. ETH, UC list).
+ *
+ * Returns zero if operation has successfully completed, a positive value if the
+ * operation has been successfully scheduled and a negative - if a requested
+ * operations has failed.
+ */
+int bnx2x_del_all_macs(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *mac_obj,
+ int mac_type, bool wait_for_comp);
+
+/* Init Function API */
+void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
+int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
+int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
+int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
+int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
+void bnx2x_read_mf_cfg(struct bnx2x *bp);
+
+
+/* dmae */
+void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
+void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
+ u32 len32);
+void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
+u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
+u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
+u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
+ bool with_comp, u8 comp_type);
+
+
+void bnx2x_calc_fc_adv(struct bnx2x *bp);
+int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
+ u32 data_hi, u32 data_lo, int cmd_type);
+void bnx2x_update_coalesce(struct bnx2x *bp);
+int bnx2x_get_cur_phy_idx(struct bnx2x *bp);
+
+static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
+ int wait)
+{
+ u32 val;
+
+ do {
+ val = REG_RD(bp, reg);
+ if (val == expected)
+ break;
+ ms -= wait;
+ msleep(wait);
+
+ } while (ms > 0);
+
+ return val;
+}
+
+#define BNX2X_ILT_ZALLOC(x, y, size) \
+ do { \
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+ if (x) \
+ memset(x, 0, size); \
+ } while (0)
+
+#define BNX2X_ILT_FREE(x, y, size) \
+ do { \
+ if (x) { \
+ dma_free_coherent(&bp->pdev->dev, size, x, y); \
+ x = NULL; \
+ y = 0; \
+ } \
+ } while (0)
+
+#define ILOG2(x) (ilog2((x)))
+
+#define ILT_NUM_PAGE_ENTRIES (3072)
+/* In 57710/11 we use whole table since we have 8 func
+ * In 57712 we have only 4 func, but use same size per func, then only half of
+ * the table in use
+ */
+#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES/8)
+
+#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
+/*
+ * the phys address is shifted right 12 bits and has an added
+ * 1=valid bit added to the 53rd bit
+ * then since this is a wide register(TM)
+ * we split it into two 32 bit writes
+ */
+#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
+#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
+
+/* load/unload mode */
+#define LOAD_NORMAL 0
+#define LOAD_OPEN 1
+#define LOAD_DIAG 2
+#define UNLOAD_NORMAL 0
+#define UNLOAD_CLOSE 1
+#define UNLOAD_RECOVERY 2
+
+
+/* DMAE command defines */
+#define DMAE_TIMEOUT -1
+#define DMAE_PCI_ERROR -2 /* E2 and onward */
+#define DMAE_NOT_RDY -3
+#define DMAE_PCI_ERR_FLAG 0x80000000
+
+#define DMAE_SRC_PCI 0
+#define DMAE_SRC_GRC 1
+
+#define DMAE_DST_NONE 0
+#define DMAE_DST_PCI 1
+#define DMAE_DST_GRC 2
+
+#define DMAE_COMP_PCI 0
+#define DMAE_COMP_GRC 1
+
+/* E2 and onward - PCI error handling in the completion */
+
+#define DMAE_COMP_REGULAR 0
+#define DMAE_COM_SET_ERR 1
+
+#define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << \
+ DMAE_COMMAND_SRC_SHIFT)
+#define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << \
+ DMAE_COMMAND_SRC_SHIFT)
+
+#define DMAE_CMD_DST_PCI (DMAE_DST_PCI << \
+ DMAE_COMMAND_DST_SHIFT)
+#define DMAE_CMD_DST_GRC (DMAE_DST_GRC << \
+ DMAE_COMMAND_DST_SHIFT)
+
+#define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << \
+ DMAE_COMMAND_C_DST_SHIFT)
+#define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << \
+ DMAE_COMMAND_C_DST_SHIFT)
+
+#define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE
+
+#define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT)
+
+#define DMAE_CMD_PORT_0 0
+#define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT
+
+#define DMAE_CMD_SRC_RESET DMAE_COMMAND_SRC_RESET
+#define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET
+#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT
+
+#define DMAE_SRC_PF 0
+#define DMAE_SRC_VF 1
+
+#define DMAE_DST_PF 0
+#define DMAE_DST_VF 1
+
+#define DMAE_C_SRC 0
+#define DMAE_C_DST 1
+
+#define DMAE_LEN32_RD_MAX 0x80
+#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
+
+#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit
+ indicates eror */
+
+#define MAX_DMAE_C_PER_PORT 8
+#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
+ BP_VN(bp))
+#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
+ E1HVN_MAX)
+
+/* PCIE link and speed */
+#define PCICFG_LINK_WIDTH 0x1f00000
+#define PCICFG_LINK_WIDTH_SHIFT 20
+#define PCICFG_LINK_SPEED 0xf0000
+#define PCICFG_LINK_SPEED_SHIFT 16
+
+
+#define BNX2X_NUM_TESTS 7
+
+#define BNX2X_PHY_LOOPBACK 0
+#define BNX2X_MAC_LOOPBACK 1
+#define BNX2X_PHY_LOOPBACK_FAILED 1
+#define BNX2X_MAC_LOOPBACK_FAILED 2
+#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \
+ BNX2X_PHY_LOOPBACK_FAILED)
+
+
+#define STROM_ASSERT_ARRAY_SIZE 50
+
+
+/* must be used on a CID before placing it on a HW ring */
+#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
+ (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
+ (x))
+
+#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
+#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
+
+
+#define BNX2X_BTR 4
+#define MAX_SPQ_PENDING 8
+
+/* CMNG constants, as derived from system spec calculations */
+/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
+#define DEF_MIN_RATE 100
+/* resolution of the rate shaping timer - 400 usec */
+#define RS_PERIODIC_TIMEOUT_USEC 400
+/* number of bytes in single QM arbitration cycle -
+ * coefficient for calculating the fairness timer */
+#define QM_ARB_BYTES 160000
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES 100
+/* how many bytes above threshold for the minimal credit of Min algorithm*/
+#define MIN_ABOVE_THRESH 32768
+/* Fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair */
+#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
+/* Memory of fairness algorithm . 2 cycles */
+#define FAIR_MEM 2
+
+
+#define ATTN_NIG_FOR_FUNC (1L << 8)
+#define ATTN_SW_TIMER_4_FUNC (1L << 9)
+#define GPIO_2_FUNC (1L << 10)
+#define GPIO_3_FUNC (1L << 11)
+#define GPIO_4_FUNC (1L << 12)
+#define ATTN_GENERAL_ATTN_1 (1L << 13)
+#define ATTN_GENERAL_ATTN_2 (1L << 14)
+#define ATTN_GENERAL_ATTN_3 (1L << 15)
+#define ATTN_GENERAL_ATTN_4 (1L << 13)
+#define ATTN_GENERAL_ATTN_5 (1L << 14)
+#define ATTN_GENERAL_ATTN_6 (1L << 15)
+
+#define ATTN_HARD_WIRED_MASK 0xff00
+#define ATTENTION_ID 4
+
+
+/* stuff added to make the code fit 80Col */
+
+#define BNX2X_PMF_LINK_ASSERT \
+ GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp))
+
+#define BNX2X_MC_ASSERT_BITS \
+ (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+ GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+ GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+ GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT))
+
+#define BNX2X_MCP_ASSERT \
+ GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT)
+
+#define BNX2X_GRC_TIMEOUT GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC)
+#define BNX2X_GRC_RSV (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
+ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
+
+#define HW_INTERRUT_ASSERT_SET_0 \
+ (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT)
+#define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR)
+#define HW_INTERRUT_ASSERT_SET_1 \
+ (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT)
+#define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR)
+#define HW_INTERRUT_ASSERT_SET_2 \
+ (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\
+ AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT)
+#define HW_PRTY_ASSERT_SET_2 (AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
+
+#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+
+#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
+
+#define RSS_FLAGS(bp) \
+ (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \
+ (bp->multi_mode << \
+ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
+#define MULTI_MASK 0x7f
+
+
+#define DEF_USB_FUNC_OFF offsetof(struct cstorm_def_status_block_u, func)
+#define DEF_CSB_FUNC_OFF offsetof(struct cstorm_def_status_block_c, func)
+#define DEF_XSB_FUNC_OFF offsetof(struct xstorm_def_status_block, func)
+#define DEF_TSB_FUNC_OFF offsetof(struct tstorm_def_status_block, func)
+
+#define DEF_USB_IGU_INDEX_OFF \
+ offsetof(struct cstorm_def_status_block_u, igu_index)
+#define DEF_CSB_IGU_INDEX_OFF \
+ offsetof(struct cstorm_def_status_block_c, igu_index)
+#define DEF_XSB_IGU_INDEX_OFF \
+ offsetof(struct xstorm_def_status_block, igu_index)
+#define DEF_TSB_IGU_INDEX_OFF \
+ offsetof(struct tstorm_def_status_block, igu_index)
+
+#define DEF_USB_SEGMENT_OFF \
+ offsetof(struct cstorm_def_status_block_u, segment)
+#define DEF_CSB_SEGMENT_OFF \
+ offsetof(struct cstorm_def_status_block_c, segment)
+#define DEF_XSB_SEGMENT_OFF \
+ offsetof(struct xstorm_def_status_block, segment)
+#define DEF_TSB_SEGMENT_OFF \
+ offsetof(struct tstorm_def_status_block, segment)
+
+#define BNX2X_SP_DSB_INDEX \
+ (&bp->def_status_blk->sp_sb.\
+ index_values[HC_SP_INDEX_ETH_DEF_CONS])
+
+#define SET_FLAG(value, mask, flag) \
+ do {\
+ (value) &= ~(mask);\
+ (value) |= ((flag) << (mask##_SHIFT));\
+ } while (0)
+
+#define GET_FLAG(value, mask) \
+ (((value) & (mask)) >> (mask##_SHIFT))
+
+#define GET_FIELD(value, fname) \
+ (((value) & (fname##_MASK)) >> (fname##_SHIFT))
+
+#define CAM_IS_INVALID(x) \
+ (GET_FLAG(x.flags, \
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
+ (T_ETH_MAC_COMMAND_INVALIDATE))
+
+/* Number of u32 elements in MC hash array */
+#define MC_HASH_SIZE 8
+#define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \
+ TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4)
+
+
+#ifndef PXP2_REG_PXP2_INT_STS
+#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
+#endif
+
+#ifndef ETH_MAX_RX_CLIENTS_E2
+#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
+#endif
+
+#define BNX2X_VPD_LEN 128
+#define VENDOR_ID_LEN 4
+
+/* Congestion management fairness mode */
+#define CMNG_FNS_NONE 0
+#define CMNG_FNS_MINMAX 1
+
+#define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/
+#define HC_SEG_ACCESS_ATTN 4
+#define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/
+
+static const u32 dmae_reg_go_c[] = {
+ DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
+ DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
+ DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
+ DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
+};
+
+void bnx2x_set_ethtool_ops(struct net_device *netdev);
+void bnx2x_notify_link_changed(struct bnx2x *bp);
+#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
new file mode 100644
index 000000000000..e575e89c7d46
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -0,0 +1,3598 @@
+/* bnx2x_cmn.c: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/ip6_checksum.h>
+#include <linux/firmware.h>
+#include <linux/prefetch.h>
+#include "bnx2x_cmn.h"
+#include "bnx2x_init.h"
+#include "bnx2x_sp.h"
+
+
+
+/**
+ * bnx2x_bz_fp - zero content of the fastpath structure.
+ *
+ * @bp: driver handle
+ * @index: fastpath index to be zeroed
+ *
+ * Makes sure the contents of the bp->fp[index].napi is kept
+ * intact.
+ */
+static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
+{
+ struct bnx2x_fastpath *fp = &bp->fp[index];
+ struct napi_struct orig_napi = fp->napi;
+ /* bzero bnx2x_fastpath contents */
+ memset(fp, 0, sizeof(*fp));
+
+ /* Restore the NAPI object as it has been already initialized */
+ fp->napi = orig_napi;
+
+ fp->bp = bp;
+ fp->index = index;
+ if (IS_ETH_FP(fp))
+ fp->max_cos = bp->max_cos;
+ else
+ /* Special queues support only one CoS */
+ fp->max_cos = 1;
+
+ /*
+ * set the tpa flag for each queue. The tpa flag determines the queue
+ * minimal size so it must be set prior to queue memory allocation
+ */
+ fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
+
+#ifdef BCM_CNIC
+ /* We don't want TPA on an FCoE L2 ring */
+ if (IS_FCOE_FP(fp))
+ fp->disable_tpa = 1;
+#endif
+}
+
+/**
+ * bnx2x_move_fp - move content of the fastpath structure.
+ *
+ * @bp: driver handle
+ * @from: source FP index
+ * @to: destination FP index
+ *
+ * Makes sure the contents of the bp->fp[to].napi is kept
+ * intact.
+ */
+static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
+{
+ struct bnx2x_fastpath *from_fp = &bp->fp[from];
+ struct bnx2x_fastpath *to_fp = &bp->fp[to];
+ struct napi_struct orig_napi = to_fp->napi;
+ /* Move bnx2x_fastpath contents */
+ memcpy(to_fp, from_fp, sizeof(*to_fp));
+ to_fp->index = to;
+
+ /* Restore the NAPI object as it has been already initialized */
+ to_fp->napi = orig_napi;
+}
+
+int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
+
+/* free skb in the packet ring at pos idx
+ * return idx of last bd freed
+ */
+static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
+ u16 idx)
+{
+ struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
+ struct eth_tx_start_bd *tx_start_bd;
+ struct eth_tx_bd *tx_data_bd;
+ struct sk_buff *skb = tx_buf->skb;
+ u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
+ int nbd;
+
+ /* prefetch skb end pointer to speedup dev_kfree_skb() */
+ prefetch(&skb->end);
+
+ DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
+ txdata->txq_index, idx, tx_buf, skb);
+
+ /* unmap first bd */
+ DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
+ tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
+ dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
+ BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
+
+
+ nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
+#ifdef BNX2X_STOP_ON_ERROR
+ if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
+ BNX2X_ERR("BAD nbd!\n");
+ bnx2x_panic();
+ }
+#endif
+ new_cons = nbd + tx_buf->first_bd;
+
+ /* Get the next bd */
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+
+ /* Skip a parse bd... */
+ --nbd;
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+
+ /* ...and the TSO split header bd since they have no mapping */
+ if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
+ --nbd;
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+ }
+
+ /* now free frags */
+ while (nbd > 0) {
+
+ DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
+ tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
+ dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+ BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+ if (--nbd)
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+ }
+
+ /* release skb */
+ WARN_ON(!skb);
+ dev_kfree_skb_any(skb);
+ tx_buf->first_bd = 0;
+ tx_buf->skb = NULL;
+
+ return new_cons;
+}
+
+int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
+{
+ struct netdev_queue *txq;
+ u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return -1;
+#endif
+
+ txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
+ hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
+ sw_cons = txdata->tx_pkt_cons;
+
+ while (sw_cons != hw_cons) {
+ u16 pkt_cons;
+
+ pkt_cons = TX_BD(sw_cons);
+
+ DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
+ " pkt_cons %u\n",
+ txdata->txq_index, hw_cons, sw_cons, pkt_cons);
+
+ bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
+ sw_cons++;
+ }
+
+ txdata->tx_pkt_cons = sw_cons;
+ txdata->tx_bd_cons = bd_cons;
+
+ /* Need to make the tx_bd_cons update visible to start_xmit()
+ * before checking for netif_tx_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that
+ * start_xmit() will miss it and cause the queue to be stopped
+ * forever.
+ * On the other hand we need an rmb() here to ensure the proper
+ * ordering of bit testing in the following
+ * netif_tx_queue_stopped(txq) call.
+ */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(txq))) {
+ /* Taking tx_lock() is needed to prevent reenabling the queue
+ * while it's empty. This could have happen if rx_action() gets
+ * suspended in bnx2x_tx_int() after the condition before
+ * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
+ *
+ * stops the queue->sees fresh tx_bd_cons->releases the queue->
+ * sends some packets consuming the whole queue again->
+ * stops the queue
+ */
+
+ __netif_tx_lock(txq, smp_processor_id());
+
+ if ((netif_tx_queue_stopped(txq)) &&
+ (bp->state == BNX2X_STATE_OPEN) &&
+ (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
+ netif_tx_wake_queue(txq);
+
+ __netif_tx_unlock(txq);
+ }
+ return 0;
+}
+
+static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
+ u16 idx)
+{
+ u16 last_max = fp->last_max_sge;
+
+ if (SUB_S16(idx, last_max) > 0)
+ fp->last_max_sge = idx;
+}
+
+static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
+ struct eth_fast_path_rx_cqe *fp_cqe)
+{
+ struct bnx2x *bp = fp->bp;
+ u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
+ le16_to_cpu(fp_cqe->len_on_bd)) >>
+ SGE_PAGE_SHIFT;
+ u16 last_max, last_elem, first_elem;
+ u16 delta = 0;
+ u16 i;
+
+ if (!sge_len)
+ return;
+
+ /* First mark all used pages */
+ for (i = 0; i < sge_len; i++)
+ BIT_VEC64_CLEAR_BIT(fp->sge_mask,
+ RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
+
+ DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
+ sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
+
+ /* Here we assume that the last SGE index is the biggest */
+ prefetch((void *)(fp->sge_mask));
+ bnx2x_update_last_max_sge(fp,
+ le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
+
+ last_max = RX_SGE(fp->last_max_sge);
+ last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
+ first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
+
+ /* If ring is not full */
+ if (last_elem + 1 != first_elem)
+ last_elem++;
+
+ /* Now update the prod */
+ for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
+ if (likely(fp->sge_mask[i]))
+ break;
+
+ fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
+ delta += BIT_VEC64_ELEM_SZ;
+ }
+
+ if (delta > 0) {
+ fp->rx_sge_prod += delta;
+ /* clear page-end entries */
+ bnx2x_clear_sge_mask_next_elems(fp);
+ }
+
+ DP(NETIF_MSG_RX_STATUS,
+ "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
+ fp->last_max_sge, fp->rx_sge_prod);
+}
+
+static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
+ struct sk_buff *skb, u16 cons, u16 prod,
+ struct eth_fast_path_rx_cqe *cqe)
+{
+ struct bnx2x *bp = fp->bp;
+ struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
+ struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
+ struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
+ dma_addr_t mapping;
+ struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+ struct sw_rx_bd *first_buf = &tpa_info->first_buf;
+
+ /* print error if current state != stop */
+ if (tpa_info->tpa_state != BNX2X_TPA_STOP)
+ BNX2X_ERR("start of bin not in stop [%d]\n", queue);
+
+ /* Try to map an empty skb from the aggregation info */
+ mapping = dma_map_single(&bp->pdev->dev,
+ first_buf->skb->data,
+ fp->rx_buf_size, DMA_FROM_DEVICE);
+ /*
+ * ...if it fails - move the skb from the consumer to the producer
+ * and set the current aggregation state as ERROR to drop it
+ * when TPA_STOP arrives.
+ */
+
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ /* Move the BD from the consumer to the producer */
+ bnx2x_reuse_rx_skb(fp, cons, prod);
+ tpa_info->tpa_state = BNX2X_TPA_ERROR;
+ return;
+ }
+
+ /* move empty skb from pool to prod */
+ prod_rx_buf->skb = first_buf->skb;
+ dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
+ /* point prod_bd to new skb */
+ prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+ prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+
+ /* move partial skb from cons to pool (don't unmap yet) */
+ *first_buf = *cons_rx_buf;
+
+ /* mark bin state as START */
+ tpa_info->parsing_flags =
+ le16_to_cpu(cqe->pars_flags.flags);
+ tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
+ tpa_info->tpa_state = BNX2X_TPA_START;
+ tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
+ tpa_info->placement_offset = cqe->placement_offset;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ fp->tpa_queue_used |= (1 << queue);
+#ifdef _ASM_GENERIC_INT_L64_H
+ DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
+#else
+ DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
+#endif
+ fp->tpa_queue_used);
+#endif
+}
+
+/* Timestamp option length allowed for TPA aggregation:
+ *
+ * nop nop kind length echo val
+ */
+#define TPA_TSTAMP_OPT_LEN 12
+/**
+ * bnx2x_set_lro_mss - calculate the approximate value of the MSS
+ *
+ * @bp: driver handle
+ * @parsing_flags: parsing flags from the START CQE
+ * @len_on_bd: total length of the first packet for the
+ * aggregation.
+ *
+ * Approximate value of the MSS for this aggregation calculated using
+ * the first packet of it.
+ */
+static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
+ u16 len_on_bd)
+{
+ /*
+ * TPA arrgregation won't have either IP options or TCP options
+ * other than timestamp or IPv6 extension headers.
+ */
+ u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
+
+ if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
+ PRS_FLAG_OVERETH_IPV6)
+ hdrs_len += sizeof(struct ipv6hdr);
+ else /* IPv4 */
+ hdrs_len += sizeof(struct iphdr);
+
+
+ /* Check if there was a TCP timestamp, if there is it's will
+ * always be 12 bytes length: nop nop kind length echo val.
+ *
+ * Otherwise FW would close the aggregation.
+ */
+ if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
+ hdrs_len += TPA_TSTAMP_OPT_LEN;
+
+ return len_on_bd - hdrs_len;
+}
+
+static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 queue, struct sk_buff *skb,
+ struct eth_end_agg_rx_cqe *cqe,
+ u16 cqe_idx)
+{
+ struct sw_rx_page *rx_pg, old_rx_pg;
+ u32 i, frag_len, frag_size, pages;
+ int err;
+ int j;
+ struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+ u16 len_on_bd = tpa_info->len_on_bd;
+
+ frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
+ pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
+
+ /* This is needed in order to enable forwarding support */
+ if (frag_size)
+ skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
+ tpa_info->parsing_flags, len_on_bd);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
+ BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
+ pages, cqe_idx);
+ BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
+ bnx2x_panic();
+ return -EINVAL;
+ }
+#endif
+
+ /* Run through the SGL and compose the fragmented skb */
+ for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
+ u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
+
+ /* FW gives the indices of the SGE as if the ring is an array
+ (meaning that "next" element will consume 2 indices) */
+ frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
+ rx_pg = &fp->rx_page_ring[sge_idx];
+ old_rx_pg = *rx_pg;
+
+ /* If we fail to allocate a substitute page, we simply stop
+ where we are and drop the whole packet */
+ err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
+ if (unlikely(err)) {
+ fp->eth_q_stats.rx_skb_alloc_failed++;
+ return err;
+ }
+
+ /* Unmap the page as we r going to pass it to the stack */
+ dma_unmap_page(&bp->pdev->dev,
+ dma_unmap_addr(&old_rx_pg, mapping),
+ SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+
+ /* Add one frag and update the appropriate fields in the skb */
+ skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+
+ skb->data_len += frag_len;
+ skb->truesize += frag_len;
+ skb->len += frag_len;
+
+ frag_size -= frag_len;
+ }
+
+ return 0;
+}
+
+static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 queue, struct eth_end_agg_rx_cqe *cqe,
+ u16 cqe_idx)
+{
+ struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+ struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
+ u8 pad = tpa_info->placement_offset;
+ u16 len = tpa_info->len_on_bd;
+ struct sk_buff *skb = rx_buf->skb;
+ /* alloc new skb */
+ struct sk_buff *new_skb;
+ u8 old_tpa_state = tpa_info->tpa_state;
+
+ tpa_info->tpa_state = BNX2X_TPA_STOP;
+
+ /* If we there was an error during the handling of the TPA_START -
+ * drop this aggregation.
+ */
+ if (old_tpa_state == BNX2X_TPA_ERROR)
+ goto drop;
+
+ /* Try to allocate the new skb */
+ new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
+
+ /* Unmap skb in the pool anyway, as we are going to change
+ pool entry status to BNX2X_TPA_STOP even if new skb allocation
+ fails. */
+ dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
+ fp->rx_buf_size, DMA_FROM_DEVICE);
+
+ if (likely(new_skb)) {
+ prefetch(skb);
+ prefetch(((char *)(skb)) + L1_CACHE_BYTES);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (pad + len > fp->rx_buf_size) {
+ BNX2X_ERR("skb_put is about to fail... "
+ "pad %d len %d rx_buf_size %d\n",
+ pad, len, fp->rx_buf_size);
+ bnx2x_panic();
+ return;
+ }
+#endif
+
+ skb_reserve(skb, pad);
+ skb_put(skb, len);
+
+ skb->protocol = eth_type_trans(skb, bp->dev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
+ if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
+ __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
+ napi_gro_receive(&fp->napi, skb);
+ } else {
+ DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
+ " - dropping packet!\n");
+ dev_kfree_skb_any(skb);
+ }
+
+
+ /* put new skb in bin */
+ rx_buf->skb = new_skb;
+
+ return;
+ }
+
+drop:
+ /* drop the packet and keep the buffer in the bin */
+ DP(NETIF_MSG_RX_STATUS,
+ "Failed to allocate or map a new skb - dropping packet!\n");
+ fp->eth_q_stats.rx_skb_alloc_failed++;
+}
+
+/* Set Toeplitz hash value in the skb using the value from the
+ * CQE (calculated by HW).
+ */
+static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
+ struct sk_buff *skb)
+{
+ /* Set Toeplitz hash from CQE */
+ if ((bp->dev->features & NETIF_F_RXHASH) &&
+ (cqe->fast_path_cqe.status_flags &
+ ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+ skb->rxhash =
+ le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
+}
+
+int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
+{
+ struct bnx2x *bp = fp->bp;
+ u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
+ u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
+ int rx_pkt = 0;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return 0;
+#endif
+
+ /* CQ "next element" is of the size of the regular element,
+ that's why it's ok here */
+ hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
+ if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
+ hw_comp_cons++;
+
+ bd_cons = fp->rx_bd_cons;
+ bd_prod = fp->rx_bd_prod;
+ bd_prod_fw = bd_prod;
+ sw_comp_cons = fp->rx_comp_cons;
+ sw_comp_prod = fp->rx_comp_prod;
+
+ /* Memory barrier necessary as speculative reads of the rx
+ * buffer can be ahead of the index in the status block
+ */
+ rmb();
+
+ DP(NETIF_MSG_RX_STATUS,
+ "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
+ fp->index, hw_comp_cons, sw_comp_cons);
+
+ while (sw_comp_cons != hw_comp_cons) {
+ struct sw_rx_bd *rx_buf = NULL;
+ struct sk_buff *skb;
+ union eth_rx_cqe *cqe;
+ struct eth_fast_path_rx_cqe *cqe_fp;
+ u8 cqe_fp_flags;
+ enum eth_rx_cqe_type cqe_fp_type;
+ u16 len, pad;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return 0;
+#endif
+
+ comp_ring_cons = RCQ_BD(sw_comp_cons);
+ bd_prod = RX_BD(bd_prod);
+ bd_cons = RX_BD(bd_cons);
+
+ /* Prefetch the page containing the BD descriptor
+ at producer's index. It will be needed when new skb is
+ allocated */
+ prefetch((void *)(PAGE_ALIGN((unsigned long)
+ (&fp->rx_desc_ring[bd_prod])) -
+ PAGE_SIZE + 1));
+
+ cqe = &fp->rx_comp_ring[comp_ring_cons];
+ cqe_fp = &cqe->fast_path_cqe;
+ cqe_fp_flags = cqe_fp->type_error_flags;
+ cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
+
+ DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
+ " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
+ cqe_fp_flags, cqe_fp->status_flags,
+ le32_to_cpu(cqe_fp->rss_hash_result),
+ le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
+
+ /* is this a slowpath msg? */
+ if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
+ bnx2x_sp_event(fp, cqe);
+ goto next_cqe;
+
+ /* this is an rx packet */
+ } else {
+ rx_buf = &fp->rx_buf_ring[bd_cons];
+ skb = rx_buf->skb;
+ prefetch(skb);
+
+ if (!CQE_TYPE_FAST(cqe_fp_type)) {
+#ifdef BNX2X_STOP_ON_ERROR
+ /* sanity check */
+ if (fp->disable_tpa &&
+ (CQE_TYPE_START(cqe_fp_type) ||
+ CQE_TYPE_STOP(cqe_fp_type)))
+ BNX2X_ERR("START/STOP packet while "
+ "disable_tpa type %x\n",
+ CQE_TYPE(cqe_fp_type));
+#endif
+
+ if (CQE_TYPE_START(cqe_fp_type)) {
+ u16 queue = cqe_fp->queue_index;
+ DP(NETIF_MSG_RX_STATUS,
+ "calling tpa_start on queue %d\n",
+ queue);
+
+ bnx2x_tpa_start(fp, queue, skb,
+ bd_cons, bd_prod,
+ cqe_fp);
+
+ /* Set Toeplitz hash for LRO skb */
+ bnx2x_set_skb_rxhash(bp, cqe, skb);
+
+ goto next_rx;
+
+ } else {
+ u16 queue =
+ cqe->end_agg_cqe.queue_index;
+ DP(NETIF_MSG_RX_STATUS,
+ "calling tpa_stop on queue %d\n",
+ queue);
+
+ bnx2x_tpa_stop(bp, fp, queue,
+ &cqe->end_agg_cqe,
+ comp_ring_cons);
+#ifdef BNX2X_STOP_ON_ERROR
+ if (bp->panic)
+ return 0;
+#endif
+
+ bnx2x_update_sge_prod(fp, cqe_fp);
+ goto next_cqe;
+ }
+ }
+ /* non TPA */
+ len = le16_to_cpu(cqe_fp->pkt_len);
+ pad = cqe_fp->placement_offset;
+ dma_sync_single_for_cpu(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ pad + RX_COPY_THRESH,
+ DMA_FROM_DEVICE);
+ prefetch(((char *)(skb)) + L1_CACHE_BYTES);
+
+ /* is this an error packet? */
+ if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
+ DP(NETIF_MSG_RX_ERR,
+ "ERROR flags %x rx packet %u\n",
+ cqe_fp_flags, sw_comp_cons);
+ fp->eth_q_stats.rx_err_discard_pkt++;
+ goto reuse_rx;
+ }
+
+ /* Since we don't have a jumbo ring
+ * copy small packets if mtu > 1500
+ */
+ if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
+ (len <= RX_COPY_THRESH)) {
+ struct sk_buff *new_skb;
+
+ new_skb = netdev_alloc_skb(bp->dev, len + pad);
+ if (new_skb == NULL) {
+ DP(NETIF_MSG_RX_ERR,
+ "ERROR packet dropped "
+ "because of alloc failure\n");
+ fp->eth_q_stats.rx_skb_alloc_failed++;
+ goto reuse_rx;
+ }
+
+ /* aligned copy */
+ skb_copy_from_linear_data_offset(skb, pad,
+ new_skb->data + pad, len);
+ skb_reserve(new_skb, pad);
+ skb_put(new_skb, len);
+
+ bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
+
+ skb = new_skb;
+
+ } else
+ if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ fp->rx_buf_size,
+ DMA_FROM_DEVICE);
+ skb_reserve(skb, pad);
+ skb_put(skb, len);
+
+ } else {
+ DP(NETIF_MSG_RX_ERR,
+ "ERROR packet dropped because "
+ "of alloc failure\n");
+ fp->eth_q_stats.rx_skb_alloc_failed++;
+reuse_rx:
+ bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
+ goto next_rx;
+ }
+
+ skb->protocol = eth_type_trans(skb, bp->dev);
+
+ /* Set Toeplitz hash for a none-LRO skb */
+ bnx2x_set_skb_rxhash(bp, cqe, skb);
+
+ skb_checksum_none_assert(skb);
+
+ if (bp->dev->features & NETIF_F_RXCSUM) {
+
+ if (likely(BNX2X_RX_CSUM_OK(cqe)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ fp->eth_q_stats.hw_csum_err++;
+ }
+ }
+
+ skb_record_rx_queue(skb, fp->index);
+
+ if (le16_to_cpu(cqe_fp->pars_flags.flags) &
+ PARSING_FLAGS_VLAN)
+ __vlan_hwaccel_put_tag(skb,
+ le16_to_cpu(cqe_fp->vlan_tag));
+ napi_gro_receive(&fp->napi, skb);
+
+
+next_rx:
+ rx_buf->skb = NULL;
+
+ bd_cons = NEXT_RX_IDX(bd_cons);
+ bd_prod = NEXT_RX_IDX(bd_prod);
+ bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
+ rx_pkt++;
+next_cqe:
+ sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
+ sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
+
+ if (rx_pkt == budget)
+ break;
+ } /* while */
+
+ fp->rx_bd_cons = bd_cons;
+ fp->rx_bd_prod = bd_prod_fw;
+ fp->rx_comp_cons = sw_comp_cons;
+ fp->rx_comp_prod = sw_comp_prod;
+
+ /* Update producers */
+ bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
+ fp->rx_sge_prod);
+
+ fp->rx_pkt += rx_pkt;
+ fp->rx_calls++;
+
+ return rx_pkt;
+}
+
+static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
+{
+ struct bnx2x_fastpath *fp = fp_cookie;
+ struct bnx2x *bp = fp->bp;
+ u8 cos;
+
+ DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
+ "[fp %d fw_sd %d igusb %d]\n",
+ fp->index, fp->fw_sb_id, fp->igu_sb_id);
+ bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return IRQ_HANDLED;
+#endif
+
+ /* Handle Rx and Tx according to MSI-X vector */
+ prefetch(fp->rx_cons_sb);
+
+ for_each_cos_in_tx_queue(fp, cos)
+ prefetch(fp->txdata[cos].tx_cons_sb);
+
+ prefetch(&fp->sb_running_index[SM_RX_ID]);
+ napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+
+ return IRQ_HANDLED;
+}
+
+/* HW Lock for shared dual port PHYs */
+void bnx2x_acquire_phy_lock(struct bnx2x *bp)
+{
+ mutex_lock(&bp->port.phy_mutex);
+
+ if (bp->port.need_hw_lock)
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
+}
+
+void bnx2x_release_phy_lock(struct bnx2x *bp)
+{
+ if (bp->port.need_hw_lock)
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
+
+ mutex_unlock(&bp->port.phy_mutex);
+}
+
+/* calculates MF speed according to current linespeed and MF configuration */
+u16 bnx2x_get_mf_speed(struct bnx2x *bp)
+{
+ u16 line_speed = bp->link_vars.line_speed;
+ if (IS_MF(bp)) {
+ u16 maxCfg = bnx2x_extract_max_cfg(bp,
+ bp->mf_config[BP_VN(bp)]);
+
+ /* Calculate the current MAX line speed limit for the MF
+ * devices
+ */
+ if (IS_MF_SI(bp))
+ line_speed = (line_speed * maxCfg) / 100;
+ else { /* SD mode */
+ u16 vn_max_rate = maxCfg * 100;
+
+ if (vn_max_rate < line_speed)
+ line_speed = vn_max_rate;
+ }
+ }
+
+ return line_speed;
+}
+
+/**
+ * bnx2x_fill_report_data - fill link report data to report
+ *
+ * @bp: driver handle
+ * @data: link state to update
+ *
+ * It uses a none-atomic bit operations because is called under the mutex.
+ */
+static inline void bnx2x_fill_report_data(struct bnx2x *bp,
+ struct bnx2x_link_report_data *data)
+{
+ u16 line_speed = bnx2x_get_mf_speed(bp);
+
+ memset(data, 0, sizeof(*data));
+
+ /* Fill the report data: efective line speed */
+ data->line_speed = line_speed;
+
+ /* Link is down */
+ if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
+ __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &data->link_report_flags);
+
+ /* Full DUPLEX */
+ if (bp->link_vars.duplex == DUPLEX_FULL)
+ __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
+
+ /* Rx Flow Control is ON */
+ if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
+
+ /* Tx Flow Control is ON */
+ if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
+}
+
+/**
+ * bnx2x_link_report - report link status to OS.
+ *
+ * @bp: driver handle
+ *
+ * Calls the __bnx2x_link_report() under the same locking scheme
+ * as a link/PHY state managing code to ensure a consistent link
+ * reporting.
+ */
+
+void bnx2x_link_report(struct bnx2x *bp)
+{
+ bnx2x_acquire_phy_lock(bp);
+ __bnx2x_link_report(bp);
+ bnx2x_release_phy_lock(bp);
+}
+
+/**
+ * __bnx2x_link_report - report link status to OS.
+ *
+ * @bp: driver handle
+ *
+ * None atomic inmlementation.
+ * Should be called under the phy_lock.
+ */
+void __bnx2x_link_report(struct bnx2x *bp)
+{
+ struct bnx2x_link_report_data cur_data;
+
+ /* reread mf_cfg */
+ if (!CHIP_IS_E1(bp))
+ bnx2x_read_mf_cfg(bp);
+
+ /* Read the current link report info */
+ bnx2x_fill_report_data(bp, &cur_data);
+
+ /* Don't report link down or exactly the same link status twice */
+ if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
+ (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &bp->last_reported_link.link_report_flags) &&
+ test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &cur_data.link_report_flags)))
+ return;
+
+ bp->link_cnt++;
+
+ /* We are going to report a new link parameters now -
+ * remember the current data for the next time.
+ */
+ memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
+
+ if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &cur_data.link_report_flags)) {
+ netif_carrier_off(bp->dev);
+ netdev_err(bp->dev, "NIC Link is Down\n");
+ return;
+ } else {
+ const char *duplex;
+ const char *flow;
+
+ netif_carrier_on(bp->dev);
+
+ if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
+ &cur_data.link_report_flags))
+ duplex = "full";
+ else
+ duplex = "half";
+
+ /* Handle the FC at the end so that only these flags would be
+ * possibly set. This way we may easily check if there is no FC
+ * enabled.
+ */
+ if (cur_data.link_report_flags) {
+ if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+ &cur_data.link_report_flags)) {
+ if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+ &cur_data.link_report_flags))
+ flow = "ON - receive & transmit";
+ else
+ flow = "ON - receive";
+ } else {
+ flow = "ON - transmit";
+ }
+ } else {
+ flow = "none";
+ }
+ netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
+ cur_data.line_speed, duplex, flow);
+ }
+}
+
+void bnx2x_init_rx_rings(struct bnx2x *bp)
+{
+ int func = BP_FUNC(bp);
+ u16 ring_prod;
+ int i, j;
+
+ /* Allocate TPA resources */
+ for_each_rx_queue(bp, j) {
+ struct bnx2x_fastpath *fp = &bp->fp[j];
+
+ DP(NETIF_MSG_IFUP,
+ "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
+
+ if (!fp->disable_tpa) {
+ /* Fill the per-aggregtion pool */
+ for (i = 0; i < MAX_AGG_QS(bp); i++) {
+ struct bnx2x_agg_info *tpa_info =
+ &fp->tpa_info[i];
+ struct sw_rx_bd *first_buf =
+ &tpa_info->first_buf;
+
+ first_buf->skb = netdev_alloc_skb(bp->dev,
+ fp->rx_buf_size);
+ if (!first_buf->skb) {
+ BNX2X_ERR("Failed to allocate TPA "
+ "skb pool for queue[%d] - "
+ "disabling TPA on this "
+ "queue!\n", j);
+ bnx2x_free_tpa_pool(bp, fp, i);
+ fp->disable_tpa = 1;
+ break;
+ }
+ dma_unmap_addr_set(first_buf, mapping, 0);
+ tpa_info->tpa_state = BNX2X_TPA_STOP;
+ }
+
+ /* "next page" elements initialization */
+ bnx2x_set_next_page_sgl(fp);
+
+ /* set SGEs bit mask */
+ bnx2x_init_sge_ring_bit_mask(fp);
+
+ /* Allocate SGEs and initialize the ring elements */
+ for (i = 0, ring_prod = 0;
+ i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
+
+ if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
+ BNX2X_ERR("was only able to allocate "
+ "%d rx sges\n", i);
+ BNX2X_ERR("disabling TPA for "
+ "queue[%d]\n", j);
+ /* Cleanup already allocated elements */
+ bnx2x_free_rx_sge_range(bp, fp,
+ ring_prod);
+ bnx2x_free_tpa_pool(bp, fp,
+ MAX_AGG_QS(bp));
+ fp->disable_tpa = 1;
+ ring_prod = 0;
+ break;
+ }
+ ring_prod = NEXT_SGE_IDX(ring_prod);
+ }
+
+ fp->rx_sge_prod = ring_prod;
+ }
+ }
+
+ for_each_rx_queue(bp, j) {
+ struct bnx2x_fastpath *fp = &bp->fp[j];
+
+ fp->rx_bd_cons = 0;
+
+ /* Activate BD ring */
+ /* Warning!
+ * this will generate an interrupt (to the TSTORM)
+ * must only be done after chip is initialized
+ */
+ bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
+ fp->rx_sge_prod);
+
+ if (j != 0)
+ continue;
+
+ if (CHIP_IS_E1(bp)) {
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
+ U64_LO(fp->rx_comp_mapping));
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
+ U64_HI(fp->rx_comp_mapping));
+ }
+ }
+}
+
+static void bnx2x_free_tx_skbs(struct bnx2x *bp)
+{
+ int i;
+ u8 cos;
+
+ for_each_tx_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ for_each_cos_in_tx_queue(fp, cos) {
+ struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+
+ u16 bd_cons = txdata->tx_bd_cons;
+ u16 sw_prod = txdata->tx_pkt_prod;
+ u16 sw_cons = txdata->tx_pkt_cons;
+
+ while (sw_cons != sw_prod) {
+ bd_cons = bnx2x_free_tx_pkt(bp, txdata,
+ TX_BD(sw_cons));
+ sw_cons++;
+ }
+ }
+ }
+}
+
+static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
+{
+ struct bnx2x *bp = fp->bp;
+ int i;
+
+ /* ring wasn't allocated */
+ if (fp->rx_buf_ring == NULL)
+ return;
+
+ for (i = 0; i < NUM_RX_BD; i++) {
+ struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
+ struct sk_buff *skb = rx_buf->skb;
+
+ if (skb == NULL)
+ continue;
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ fp->rx_buf_size, DMA_FROM_DEVICE);
+
+ rx_buf->skb = NULL;
+ dev_kfree_skb(skb);
+ }
+}
+
+static void bnx2x_free_rx_skbs(struct bnx2x *bp)
+{
+ int j;
+
+ for_each_rx_queue(bp, j) {
+ struct bnx2x_fastpath *fp = &bp->fp[j];
+
+ bnx2x_free_rx_bds(fp);
+
+ if (!fp->disable_tpa)
+ bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
+ }
+}
+
+void bnx2x_free_skbs(struct bnx2x *bp)
+{
+ bnx2x_free_tx_skbs(bp);
+ bnx2x_free_rx_skbs(bp);
+}
+
+void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
+{
+ /* load old values */
+ u32 mf_cfg = bp->mf_config[BP_VN(bp)];
+
+ if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
+ /* leave all but MAX value */
+ mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
+
+ /* set new MAX value */
+ mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
+ & FUNC_MF_CFG_MAX_BW_MASK;
+
+ bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
+ }
+}
+
+/**
+ * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
+ *
+ * @bp: driver handle
+ * @nvecs: number of vectors to be released
+ */
+static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
+{
+ int i, offset = 0;
+
+ if (nvecs == offset)
+ return;
+ free_irq(bp->msix_table[offset].vector, bp->dev);
+ DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
+ bp->msix_table[offset].vector);
+ offset++;
+#ifdef BCM_CNIC
+ if (nvecs == offset)
+ return;
+ offset++;
+#endif
+
+ for_each_eth_queue(bp, i) {
+ if (nvecs == offset)
+ return;
+ DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
+ "irq\n", i, bp->msix_table[offset].vector);
+
+ free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
+ }
+}
+
+void bnx2x_free_irq(struct bnx2x *bp)
+{
+ if (bp->flags & USING_MSIX_FLAG)
+ bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
+ CNIC_PRESENT + 1);
+ else if (bp->flags & USING_MSI_FLAG)
+ free_irq(bp->pdev->irq, bp->dev);
+ else
+ free_irq(bp->pdev->irq, bp->dev);
+}
+
+int bnx2x_enable_msix(struct bnx2x *bp)
+{
+ int msix_vec = 0, i, rc, req_cnt;
+
+ bp->msix_table[msix_vec].entry = msix_vec;
+ DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
+ bp->msix_table[0].entry);
+ msix_vec++;
+
+#ifdef BCM_CNIC
+ bp->msix_table[msix_vec].entry = msix_vec;
+ DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
+ bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
+ msix_vec++;
+#endif
+ /* We need separate vectors for ETH queues only (not FCoE) */
+ for_each_eth_queue(bp, i) {
+ bp->msix_table[msix_vec].entry = msix_vec;
+ DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
+ "(fastpath #%u)\n", msix_vec, msix_vec, i);
+ msix_vec++;
+ }
+
+ req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
+
+ rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
+
+ /*
+ * reconfigure number of tx/rx queues according to available
+ * MSI-X vectors
+ */
+ if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
+ /* how less vectors we will have? */
+ int diff = req_cnt - rc;
+
+ DP(NETIF_MSG_IFUP,
+ "Trying to use less MSI-X vectors: %d\n", rc);
+
+ rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
+
+ if (rc) {
+ DP(NETIF_MSG_IFUP,
+ "MSI-X is not attainable rc %d\n", rc);
+ return rc;
+ }
+ /*
+ * decrease number of queues by number of unallocated entries
+ */
+ bp->num_queues -= diff;
+
+ DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
+ bp->num_queues);
+ } else if (rc) {
+ /* fall to INTx if not enough memory */
+ if (rc == -ENOMEM)
+ bp->flags |= DISABLE_MSI_FLAG;
+ DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
+ return rc;
+ }
+
+ bp->flags |= USING_MSIX_FLAG;
+
+ return 0;
+}
+
+static int bnx2x_req_msix_irqs(struct bnx2x *bp)
+{
+ int i, rc, offset = 0;
+
+ rc = request_irq(bp->msix_table[offset++].vector,
+ bnx2x_msix_sp_int, 0,
+ bp->dev->name, bp->dev);
+ if (rc) {
+ BNX2X_ERR("request sp irq failed\n");
+ return -EBUSY;
+ }
+
+#ifdef BCM_CNIC
+ offset++;
+#endif
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+ bp->dev->name, i);
+
+ rc = request_irq(bp->msix_table[offset].vector,
+ bnx2x_msix_fp_int, 0, fp->name, fp);
+ if (rc) {
+ BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
+ bp->msix_table[offset].vector, rc);
+ bnx2x_free_msix_irqs(bp, offset);
+ return -EBUSY;
+ }
+
+ offset++;
+ }
+
+ i = BNX2X_NUM_ETH_QUEUES(bp);
+ offset = 1 + CNIC_PRESENT;
+ netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
+ " ... fp[%d] %d\n",
+ bp->msix_table[0].vector,
+ 0, bp->msix_table[offset].vector,
+ i - 1, bp->msix_table[offset + i - 1].vector);
+
+ return 0;
+}
+
+int bnx2x_enable_msi(struct bnx2x *bp)
+{
+ int rc;
+
+ rc = pci_enable_msi(bp->pdev);
+ if (rc) {
+ DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
+ return -1;
+ }
+ bp->flags |= USING_MSI_FLAG;
+
+ return 0;
+}
+
+static int bnx2x_req_irq(struct bnx2x *bp)
+{
+ unsigned long flags;
+ int rc;
+
+ if (bp->flags & USING_MSI_FLAG)
+ flags = 0;
+ else
+ flags = IRQF_SHARED;
+
+ rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
+ bp->dev->name, bp->dev);
+ return rc;
+}
+
+static inline int bnx2x_setup_irqs(struct bnx2x *bp)
+{
+ int rc = 0;
+ if (bp->flags & USING_MSIX_FLAG) {
+ rc = bnx2x_req_msix_irqs(bp);
+ if (rc)
+ return rc;
+ } else {
+ bnx2x_ack_int(bp);
+ rc = bnx2x_req_irq(bp);
+ if (rc) {
+ BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
+ return rc;
+ }
+ if (bp->flags & USING_MSI_FLAG) {
+ bp->dev->irq = bp->pdev->irq;
+ netdev_info(bp->dev, "using MSI IRQ %d\n",
+ bp->pdev->irq);
+ }
+ }
+
+ return 0;
+}
+
+static inline void bnx2x_napi_enable(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_rx_queue(bp, i)
+ napi_enable(&bnx2x_fp(bp, i, napi));
+}
+
+static inline void bnx2x_napi_disable(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_rx_queue(bp, i)
+ napi_disable(&bnx2x_fp(bp, i, napi));
+}
+
+void bnx2x_netif_start(struct bnx2x *bp)
+{
+ if (netif_running(bp->dev)) {
+ bnx2x_napi_enable(bp);
+ bnx2x_int_enable(bp);
+ if (bp->state == BNX2X_STATE_OPEN)
+ netif_tx_wake_all_queues(bp->dev);
+ }
+}
+
+void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
+{
+ bnx2x_int_disable_sync(bp, disable_hw);
+ bnx2x_napi_disable(bp);
+}
+
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+#ifdef BCM_CNIC
+ if (!NO_FCOE(bp)) {
+ struct ethhdr *hdr = (struct ethhdr *)skb->data;
+ u16 ether_type = ntohs(hdr->h_proto);
+
+ /* Skip VLAN tag if present */
+ if (ether_type == ETH_P_8021Q) {
+ struct vlan_ethhdr *vhdr =
+ (struct vlan_ethhdr *)skb->data;
+
+ ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
+ }
+
+ /* If ethertype is FCoE or FIP - use FCoE ring */
+ if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
+ return bnx2x_fcoe_tx(bp, txq_index);
+ }
+#endif
+ /* select a non-FCoE queue */
+ return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
+}
+
+void bnx2x_set_num_queues(struct bnx2x *bp)
+{
+ switch (bp->multi_mode) {
+ case ETH_RSS_MODE_DISABLED:
+ bp->num_queues = 1;
+ break;
+ case ETH_RSS_MODE_REGULAR:
+ bp->num_queues = bnx2x_calc_num_queues(bp);
+ break;
+
+ default:
+ bp->num_queues = 1;
+ break;
+ }
+
+ /* Add special queues */
+ bp->num_queues += NON_ETH_CONTEXT_USE;
+}
+
+/**
+ * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
+ *
+ * @bp: Driver handle
+ *
+ * We currently support for at most 16 Tx queues for each CoS thus we will
+ * allocate a multiple of 16 for ETH L2 rings according to the value of the
+ * bp->max_cos.
+ *
+ * If there is an FCoE L2 queue the appropriate Tx queue will have the next
+ * index after all ETH L2 indices.
+ *
+ * If the actual number of Tx queues (for each CoS) is less than 16 then there
+ * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
+ * 16..31,...) with indicies that are not coupled with any real Tx queue.
+ *
+ * The proper configuration of skb->queue_mapping is handled by
+ * bnx2x_select_queue() and __skb_tx_hash().
+ *
+ * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
+ * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
+ */
+static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
+{
+ int rc, tx, rx;
+
+ tx = MAX_TXQS_PER_COS * bp->max_cos;
+ rx = BNX2X_NUM_ETH_QUEUES(bp);
+
+/* account for fcoe queue */
+#ifdef BCM_CNIC
+ if (!NO_FCOE(bp)) {
+ rx += FCOE_PRESENT;
+ tx += FCOE_PRESENT;
+ }
+#endif
+
+ rc = netif_set_real_num_tx_queues(bp->dev, tx);
+ if (rc) {
+ BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
+ return rc;
+ }
+ rc = netif_set_real_num_rx_queues(bp->dev, rx);
+ if (rc) {
+ BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
+ return rc;
+ }
+
+ DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
+ tx, rx);
+
+ return rc;
+}
+
+static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ /* Always use a mini-jumbo MTU for the FCoE L2 ring */
+ if (IS_FCOE_IDX(i))
+ /*
+ * Although there are no IP frames expected to arrive to
+ * this ring we still want to add an
+ * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
+ * overrun attack.
+ */
+ fp->rx_buf_size =
+ BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
+ BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+ else
+ fp->rx_buf_size =
+ bp->dev->mtu + ETH_OVREHEAD +
+ BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+ }
+}
+
+static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
+{
+ int i;
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
+ u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
+
+ /*
+ * Prepare the inital contents fo the indirection table if RSS is
+ * enabled
+ */
+ if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
+ for (i = 0; i < sizeof(ind_table); i++)
+ ind_table[i] =
+ bp->fp->cl_id + (i % num_eth_queues);
+ }
+
+ /*
+ * For 57710 and 57711 SEARCHER configuration (rss_keys) is
+ * per-port, so if explicit configuration is needed , do it only
+ * for a PMF.
+ *
+ * For 57712 and newer on the other hand it's a per-function
+ * configuration.
+ */
+ return bnx2x_config_rss_pf(bp, ind_table,
+ bp->port.pmf || !CHIP_IS_E1x(bp));
+}
+
+int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
+{
+ struct bnx2x_config_rss_params params = {0};
+ int i;
+
+ /* Although RSS is meaningless when there is a single HW queue we
+ * still need it enabled in order to have HW Rx hash generated.
+ *
+ * if (!is_eth_multi(bp))
+ * bp->multi_mode = ETH_RSS_MODE_DISABLED;
+ */
+
+ params.rss_obj = &bp->rss_conf_obj;
+
+ __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
+
+ /* RSS mode */
+ switch (bp->multi_mode) {
+ case ETH_RSS_MODE_DISABLED:
+ __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
+ break;
+ case ETH_RSS_MODE_REGULAR:
+ __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
+ break;
+ case ETH_RSS_MODE_VLAN_PRI:
+ __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
+ break;
+ case ETH_RSS_MODE_E1HOV_PRI:
+ __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
+ break;
+ case ETH_RSS_MODE_IP_DSCP:
+ __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
+ break;
+ default:
+ BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
+ return -EINVAL;
+ }
+
+ /* If RSS is enabled */
+ if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
+ /* RSS configuration */
+ __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
+
+ /* Hash bits */
+ params.rss_result_mask = MULTI_MASK;
+
+ memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
+
+ if (config_hash) {
+ /* RSS keys */
+ for (i = 0; i < sizeof(params.rss_key) / 4; i++)
+ params.rss_key[i] = random32();
+
+ __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
+ }
+ }
+
+ return bnx2x_config_rss(bp, &params);
+}
+
+static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
+{
+ struct bnx2x_func_state_params func_params = {0};
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_HW_INIT;
+
+ func_params.params.hw_init.load_phase = load_code;
+
+ return bnx2x_func_state_change(bp, &func_params);
+}
+
+/*
+ * Cleans the object that have internal lists without sending
+ * ramrods. Should be run when interrutps are disabled.
+ */
+static void bnx2x_squeeze_objects(struct bnx2x *bp)
+{
+ int rc;
+ unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
+ struct bnx2x_mcast_ramrod_params rparam = {0};
+ struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+
+ /***************** Cleanup MACs' object first *************************/
+
+ /* Wait for completion of requested */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ /* Perform a dry cleanup */
+ __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
+
+ /* Clean ETH primary MAC */
+ __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
+ rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
+ &ramrod_flags);
+ if (rc != 0)
+ BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
+
+ /* Cleanup UC list */
+ vlan_mac_flags = 0;
+ __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
+ rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
+ &ramrod_flags);
+ if (rc != 0)
+ BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
+
+ /***************** Now clean mcast object *****************************/
+ rparam.mcast_obj = &bp->mcast_obj;
+ __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
+
+ /* Add a DEL command... */
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+ if (rc < 0)
+ BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
+ "object: %d\n", rc);
+
+ /* ...and wait until all pending commands are cleared */
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+ while (rc != 0) {
+ if (rc < 0) {
+ BNX2X_ERR("Failed to clean multi-cast object: %d\n",
+ rc);
+ return;
+ }
+
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+ }
+}
+
+#ifndef BNX2X_STOP_ON_ERROR
+#define LOAD_ERROR_EXIT(bp, label) \
+ do { \
+ (bp)->state = BNX2X_STATE_ERROR; \
+ goto label; \
+ } while (0)
+#else
+#define LOAD_ERROR_EXIT(bp, label) \
+ do { \
+ (bp)->state = BNX2X_STATE_ERROR; \
+ (bp)->panic = 1; \
+ return -EBUSY; \
+ } while (0)
+#endif
+
+/* must be called with rtnl_lock */
+int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
+{
+ int port = BP_PORT(bp);
+ u32 load_code;
+ int i, rc;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return -EPERM;
+#endif
+
+ bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
+
+ /* Set the initial link reported state to link down */
+ bnx2x_acquire_phy_lock(bp);
+ memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
+ __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &bp->last_reported_link.link_report_flags);
+ bnx2x_release_phy_lock(bp);
+
+ /* must be called before memory allocation and HW init */
+ bnx2x_ilt_set_info(bp);
+
+ /*
+ * Zero fastpath structures preserving invariants like napi, which are
+ * allocated only once, fp index, max_cos, bp pointer.
+ * Also set fp->disable_tpa.
+ */
+ for_each_queue(bp, i)
+ bnx2x_bz_fp(bp, i);
+
+
+ /* Set the receive queues buffer size */
+ bnx2x_set_rx_buf_size(bp);
+
+ if (bnx2x_alloc_mem(bp))
+ return -ENOMEM;
+
+ /* As long as bnx2x_alloc_mem() may possibly update
+ * bp->num_queues, bnx2x_set_real_num_queues() should always
+ * come after it.
+ */
+ rc = bnx2x_set_real_num_queues(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to set real_num_queues\n");
+ LOAD_ERROR_EXIT(bp, load_error0);
+ }
+
+ /* configure multi cos mappings in kernel.
+ * this configuration may be overriden by a multi class queue discipline
+ * or by a dcbx negotiation result.
+ */
+ bnx2x_setup_tc(bp->dev, bp->max_cos);
+
+ bnx2x_napi_enable(bp);
+
+ /* Send LOAD_REQUEST command to MCP
+ * Returns the type of LOAD command:
+ * if it is the first port to be initialized
+ * common blocks should be initialized, otherwise - not
+ */
+ if (!BP_NOMCP(bp)) {
+ load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
+ if (!load_code) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ rc = -EBUSY;
+ LOAD_ERROR_EXIT(bp, load_error1);
+ }
+ if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
+ rc = -EBUSY; /* other port in diagnostic mode */
+ LOAD_ERROR_EXIT(bp, load_error1);
+ }
+
+ } else {
+ int path = BP_PATH(bp);
+
+ DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ load_count[path][0]++;
+ load_count[path][1 + port]++;
+ DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ if (load_count[path][0] == 1)
+ load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
+ else if (load_count[path][1 + port] == 1)
+ load_code = FW_MSG_CODE_DRV_LOAD_PORT;
+ else
+ load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
+ }
+
+ if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
+ bp->port.pmf = 1;
+ /*
+ * We need the barrier to ensure the ordering between the
+ * writing to bp->port.pmf here and reading it from the
+ * bnx2x_periodic_task().
+ */
+ smp_mb();
+ queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
+ } else
+ bp->port.pmf = 0;
+
+ DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+
+ /* Init Function state controlling object */
+ bnx2x__init_func_obj(bp);
+
+ /* Initialize HW */
+ rc = bnx2x_init_hw(bp, load_code);
+ if (rc) {
+ BNX2X_ERR("HW init failed, aborting\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ LOAD_ERROR_EXIT(bp, load_error2);
+ }
+
+ /* Connect to IRQs */
+ rc = bnx2x_setup_irqs(bp);
+ if (rc) {
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ LOAD_ERROR_EXIT(bp, load_error2);
+ }
+
+ /* Setup NIC internals and enable interrupts */
+ bnx2x_nic_init(bp, load_code);
+
+ /* Init per-function objects */
+ bnx2x_init_bp_objs(bp);
+
+ if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
+ (bp->common.shmem2_base)) {
+ if (SHMEM2_HAS(bp, dcc_support))
+ SHMEM2_WR(bp, dcc_support,
+ (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
+ SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
+ }
+
+ bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
+ rc = bnx2x_func_start(bp);
+ if (rc) {
+ BNX2X_ERR("Function start failed!\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+
+ /* Send LOAD_DONE command to MCP */
+ if (!BP_NOMCP(bp)) {
+ load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ if (!load_code) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ rc = -EBUSY;
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+ }
+
+ rc = bnx2x_setup_leading(bp);
+ if (rc) {
+ BNX2X_ERR("Setup leading failed!\n");
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+
+#ifdef BCM_CNIC
+ /* Enable Timer scan */
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
+#endif
+
+ for_each_nondefault_queue(bp, i) {
+ rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error4);
+ }
+
+ rc = bnx2x_init_rss_pf(bp);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error4);
+
+ /* Now when Clients are configured we are ready to work */
+ bp->state = BNX2X_STATE_OPEN;
+
+ /* Configure a ucast MAC */
+ rc = bnx2x_set_eth_mac(bp, true);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error4);
+
+ if (bp->pending_max) {
+ bnx2x_update_max_mf_config(bp, bp->pending_max);
+ bp->pending_max = 0;
+ }
+
+ if (bp->port.pmf)
+ bnx2x_initial_phy_init(bp, load_mode);
+
+ /* Start fast path */
+
+ /* Initialize Rx filter. */
+ netif_addr_lock_bh(bp->dev);
+ bnx2x_set_rx_mode(bp->dev);
+ netif_addr_unlock_bh(bp->dev);
+
+ /* Start the Tx */
+ switch (load_mode) {
+ case LOAD_NORMAL:
+ /* Tx queue should be only reenabled */
+ netif_tx_wake_all_queues(bp->dev);
+ break;
+
+ case LOAD_OPEN:
+ netif_tx_start_all_queues(bp->dev);
+ smp_mb__after_clear_bit();
+ break;
+
+ case LOAD_DIAG:
+ bp->state = BNX2X_STATE_DIAG;
+ break;
+
+ default:
+ break;
+ }
+
+ if (!bp->port.pmf)
+ bnx2x__link_status_update(bp);
+
+ /* start the timer */
+ mod_timer(&bp->timer, jiffies + bp->current_interval);
+
+#ifdef BCM_CNIC
+ bnx2x_setup_cnic_irq_info(bp);
+ if (bp->state == BNX2X_STATE_OPEN)
+ bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
+#endif
+ bnx2x_inc_load_cnt(bp);
+
+ /* Wait for all pending SP commands to complete */
+ if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
+ BNX2X_ERR("Timeout waiting for SP elements to complete\n");
+ bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+ return -EBUSY;
+ }
+
+ bnx2x_dcbx_init(bp);
+ return 0;
+
+#ifndef BNX2X_STOP_ON_ERROR
+load_error4:
+#ifdef BCM_CNIC
+ /* Disable Timer scan */
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
+#endif
+load_error3:
+ bnx2x_int_disable_sync(bp, 1);
+
+ /* Clean queueable objects */
+ bnx2x_squeeze_objects(bp);
+
+ /* Free SKBs, SGEs, TPA pool and driver internals */
+ bnx2x_free_skbs(bp);
+ for_each_rx_queue(bp, i)
+ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+load_error2:
+ if (!BP_NOMCP(bp)) {
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+ }
+
+ bp->port.pmf = 0;
+load_error1:
+ bnx2x_napi_disable(bp);
+load_error0:
+ bnx2x_free_mem(bp);
+
+ return rc;
+#endif /* ! BNX2X_STOP_ON_ERROR */
+}
+
+/* must be called with rtnl_lock */
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
+{
+ int i;
+ bool global = false;
+
+ if ((bp->state == BNX2X_STATE_CLOSED) ||
+ (bp->state == BNX2X_STATE_ERROR)) {
+ /* We can get here if the driver has been unloaded
+ * during parity error recovery and is either waiting for a
+ * leader to complete or for other functions to unload and
+ * then ifdown has been issued. In this case we want to
+ * unload and let other functions to complete a recovery
+ * process.
+ */
+ bp->recovery_state = BNX2X_RECOVERY_DONE;
+ bp->is_leader = 0;
+ bnx2x_release_leader_lock(bp);
+ smp_mb();
+
+ DP(NETIF_MSG_HW, "Releasing a leadership...\n");
+
+ return -EINVAL;
+ }
+
+ /*
+ * It's important to set the bp->state to the value different from
+ * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
+ * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
+ */
+ bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
+ smp_mb();
+
+ /* Stop Tx */
+ bnx2x_tx_disable(bp);
+
+#ifdef BCM_CNIC
+ bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+#endif
+
+ bp->rx_mode = BNX2X_RX_MODE_NONE;
+
+ del_timer_sync(&bp->timer);
+
+ /* Set ALWAYS_ALIVE bit in shmem */
+ bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
+
+ bnx2x_drv_pulse(bp);
+
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+ /* Cleanup the chip if needed */
+ if (unload_mode != UNLOAD_RECOVERY)
+ bnx2x_chip_cleanup(bp, unload_mode);
+ else {
+ /* Send the UNLOAD_REQUEST to the MCP */
+ bnx2x_send_unload_req(bp, unload_mode);
+
+ /*
+ * Prevent transactions to host from the functions on the
+ * engine that doesn't reset global blocks in case of global
+ * attention once gloabl blocks are reset and gates are opened
+ * (the engine which leader will perform the recovery
+ * last).
+ */
+ if (!CHIP_IS_E1x(bp))
+ bnx2x_pf_disable(bp);
+
+ /* Disable HW interrupts, NAPI */
+ bnx2x_netif_stop(bp, 1);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+
+ /* Report UNLOAD_DONE to MCP */
+ bnx2x_send_unload_done(bp);
+ }
+
+ /*
+ * At this stage no more interrupts will arrive so we may safly clean
+ * the queueable objects here in case they failed to get cleaned so far.
+ */
+ bnx2x_squeeze_objects(bp);
+
+ /* There should be no more pending SP commands at this stage */
+ bp->sp_state = 0;
+
+ bp->port.pmf = 0;
+
+ /* Free SKBs, SGEs, TPA pool and driver internals */
+ bnx2x_free_skbs(bp);
+ for_each_rx_queue(bp, i)
+ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+
+ bnx2x_free_mem(bp);
+
+ bp->state = BNX2X_STATE_CLOSED;
+
+ /* Check if there are pending parity attentions. If there are - set
+ * RECOVERY_IN_PROGRESS.
+ */
+ if (bnx2x_chk_parity_attn(bp, &global, false)) {
+ bnx2x_set_reset_in_progress(bp);
+
+ /* Set RESET_IS_GLOBAL if needed */
+ if (global)
+ bnx2x_set_reset_global(bp);
+ }
+
+
+ /* The last driver must disable a "close the gate" if there is no
+ * parity attention or "process kill" pending.
+ */
+ if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
+ bnx2x_disable_close_the_gate(bp);
+
+ return 0;
+}
+
+int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
+{
+ u16 pmcsr;
+
+ /* If there is no power capability, silently succeed */
+ if (!bp->pm_cap) {
+ DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
+ return 0;
+ }
+
+ pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
+
+ switch (state) {
+ case PCI_D0:
+ pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+ ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
+ PCI_PM_CTRL_PME_STATUS));
+
+ if (pmcsr & PCI_PM_CTRL_STATE_MASK)
+ /* delay required during transition out of D3hot */
+ msleep(20);
+ break;
+
+ case PCI_D3hot:
+ /* If there are other clients above don't
+ shut down the power */
+ if (atomic_read(&bp->pdev->enable_cnt) != 1)
+ return 0;
+ /* Don't shut down the power for emulation and FPGA */
+ if (CHIP_REV_IS_SLOW(bp))
+ return 0;
+
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ pmcsr |= 3;
+
+ if (bp->wol)
+ pmcsr |= PCI_PM_CTRL_PME_ENABLE;
+
+ pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+ pmcsr);
+
+ /* No more memory access after this point until
+ * device is brought back to D0.
+ */
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * net_device service functions
+ */
+int bnx2x_poll(struct napi_struct *napi, int budget)
+{
+ int work_done = 0;
+ u8 cos;
+ struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
+ napi);
+ struct bnx2x *bp = fp->bp;
+
+ while (1) {
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic)) {
+ napi_complete(napi);
+ return 0;
+ }
+#endif
+
+ for_each_cos_in_tx_queue(fp, cos)
+ if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
+ bnx2x_tx_int(bp, &fp->txdata[cos]);
+
+
+ if (bnx2x_has_rx_work(fp)) {
+ work_done += bnx2x_rx_int(fp, budget - work_done);
+
+ /* must not complete if we consumed full budget */
+ if (work_done >= budget)
+ break;
+ }
+
+ /* Fall out from the NAPI loop if needed */
+ if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+#ifdef BCM_CNIC
+ /* No need to update SB for FCoE L2 ring as long as
+ * it's connected to the default SB and the SB
+ * has been updated when NAPI was scheduled.
+ */
+ if (IS_FCOE_FP(fp)) {
+ napi_complete(napi);
+ break;
+ }
+#endif
+
+ bnx2x_update_fpsb_idx(fp);
+ /* bnx2x_has_rx_work() reads the status block,
+ * thus we need to ensure that status block indices
+ * have been actually read (bnx2x_update_fpsb_idx)
+ * prior to this check (bnx2x_has_rx_work) so that
+ * we won't write the "newer" value of the status block
+ * to IGU (if there was a DMA right after
+ * bnx2x_has_rx_work and if there is no rmb, the memory
+ * reading (bnx2x_update_fpsb_idx) may be postponed
+ * to right before bnx2x_ack_sb). In this case there
+ * will never be another interrupt until there is
+ * another update of the status block, while there
+ * is still unhandled work.
+ */
+ rmb();
+
+ if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+ napi_complete(napi);
+ /* Re-enable interrupts */
+ DP(NETIF_MSG_HW,
+ "Update index to %d\n", fp->fp_hc_idx);
+ bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
+ le16_to_cpu(fp->fp_hc_idx),
+ IGU_INT_ENABLE, 1);
+ break;
+ }
+ }
+ }
+
+ return work_done;
+}
+
+/* we split the first BD into headers and data BDs
+ * to ease the pain of our fellow microcode engineers
+ * we use one mapping for both BDs
+ * So far this has only been observed to happen
+ * in Other Operating Systems(TM)
+ */
+static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata,
+ struct sw_tx_bd *tx_buf,
+ struct eth_tx_start_bd **tx_bd, u16 hlen,
+ u16 bd_prod, int nbd)
+{
+ struct eth_tx_start_bd *h_tx_bd = *tx_bd;
+ struct eth_tx_bd *d_tx_bd;
+ dma_addr_t mapping;
+ int old_len = le16_to_cpu(h_tx_bd->nbytes);
+
+ /* first fix first BD */
+ h_tx_bd->nbd = cpu_to_le16(nbd);
+ h_tx_bd->nbytes = cpu_to_le16(hlen);
+
+ DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
+ "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
+ h_tx_bd->addr_lo, h_tx_bd->nbd);
+
+ /* now get a new data BD
+ * (after the pbd) and fill it */
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+ d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
+
+ mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
+ le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
+
+ d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+ d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+ d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
+
+ /* this marks the BD as one that has no individual mapping */
+ tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
+
+ DP(NETIF_MSG_TX_QUEUED,
+ "TSO split data size is %d (%x:%x)\n",
+ d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
+
+ /* update tx_bd */
+ *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
+
+ return bd_prod;
+}
+
+static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
+{
+ if (fix > 0)
+ csum = (u16) ~csum_fold(csum_sub(csum,
+ csum_partial(t_header - fix, fix, 0)));
+
+ else if (fix < 0)
+ csum = (u16) ~csum_fold(csum_add(csum,
+ csum_partial(t_header, -fix, 0)));
+
+ return swab16(csum);
+}
+
+static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
+{
+ u32 rc;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ rc = XMIT_PLAIN;
+
+ else {
+ if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
+ rc = XMIT_CSUM_V6;
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ rc |= XMIT_CSUM_TCP;
+
+ } else {
+ rc = XMIT_CSUM_V4;
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ rc |= XMIT_CSUM_TCP;
+ }
+ }
+
+ if (skb_is_gso_v6(skb))
+ rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
+ else if (skb_is_gso(skb))
+ rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
+
+ return rc;
+}
+
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
+/* check if packet requires linearization (packet is too fragmented)
+ no need to check fragmentation if page size > 8K (there will be no
+ violation to FW restrictions) */
+static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
+ u32 xmit_type)
+{
+ int to_copy = 0;
+ int hlen = 0;
+ int first_bd_sz = 0;
+
+ /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
+ if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
+
+ if (xmit_type & XMIT_GSO) {
+ unsigned short lso_mss = skb_shinfo(skb)->gso_size;
+ /* Check if LSO packet needs to be copied:
+ 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
+ int wnd_size = MAX_FETCH_BD - 3;
+ /* Number of windows to check */
+ int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
+ int wnd_idx = 0;
+ int frag_idx = 0;
+ u32 wnd_sum = 0;
+
+ /* Headers length */
+ hlen = (int)(skb_transport_header(skb) - skb->data) +
+ tcp_hdrlen(skb);
+
+ /* Amount of data (w/o headers) on linear part of SKB*/
+ first_bd_sz = skb_headlen(skb) - hlen;
+
+ wnd_sum = first_bd_sz;
+
+ /* Calculate the first sum - it's special */
+ for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
+ wnd_sum +=
+ skb_shinfo(skb)->frags[frag_idx].size;
+
+ /* If there was data on linear skb data - check it */
+ if (first_bd_sz > 0) {
+ if (unlikely(wnd_sum < lso_mss)) {
+ to_copy = 1;
+ goto exit_lbl;
+ }
+
+ wnd_sum -= first_bd_sz;
+ }
+
+ /* Others are easier: run through the frag list and
+ check all windows */
+ for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
+ wnd_sum +=
+ skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
+
+ if (unlikely(wnd_sum < lso_mss)) {
+ to_copy = 1;
+ break;
+ }
+ wnd_sum -=
+ skb_shinfo(skb)->frags[wnd_idx].size;
+ }
+ } else {
+ /* in non-LSO too fragmented packet should always
+ be linearized */
+ to_copy = 1;
+ }
+ }
+
+exit_lbl:
+ if (unlikely(to_copy))
+ DP(NETIF_MSG_TX_QUEUED,
+ "Linearization IS REQUIRED for %s packet. "
+ "num_frags %d hlen %d first_bd_sz %d\n",
+ (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
+ skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
+
+ return to_copy;
+}
+#endif
+
+static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
+ u32 xmit_type)
+{
+ *parsing_data |= (skb_shinfo(skb)->gso_size <<
+ ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
+ ETH_TX_PARSE_BD_E2_LSO_MSS;
+ if ((xmit_type & XMIT_GSO_V6) &&
+ (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+ *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
+}
+
+/**
+ * bnx2x_set_pbd_gso - update PBD in GSO case.
+ *
+ * @skb: packet skb
+ * @pbd: parse BD
+ * @xmit_type: xmit flags
+ */
+static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
+ struct eth_tx_parse_bd_e1x *pbd,
+ u32 xmit_type)
+{
+ pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
+ pbd->tcp_flags = pbd_tcp_flags(skb);
+
+ if (xmit_type & XMIT_GSO_V4) {
+ pbd->ip_id = swab16(ip_hdr(skb)->id);
+ pbd->tcp_pseudo_csum =
+ swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+
+ } else
+ pbd->tcp_pseudo_csum =
+ swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+
+ pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
+}
+
+/**
+ * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
+ *
+ * @bp: driver handle
+ * @skb: packet skb
+ * @parsing_data: data to be updated
+ * @xmit_type: xmit flags
+ *
+ * 57712 related
+ */
+static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
+ u32 *parsing_data, u32 xmit_type)
+{
+ *parsing_data |=
+ ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
+
+ if (xmit_type & XMIT_CSUM_TCP) {
+ *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
+
+ return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
+ } else
+ /* We support checksum offload for TCP and UDP only.
+ * No need to pass the UDP header length - it's a constant.
+ */
+ return skb_transport_header(skb) +
+ sizeof(struct udphdr) - skb->data;
+}
+
+static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
+{
+ tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
+
+ if (xmit_type & XMIT_CSUM_V4)
+ tx_start_bd->bd_flags.as_bitfield |=
+ ETH_TX_BD_FLAGS_IP_CSUM;
+ else
+ tx_start_bd->bd_flags.as_bitfield |=
+ ETH_TX_BD_FLAGS_IPV6;
+
+ if (!(xmit_type & XMIT_CSUM_TCP))
+ tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
+}
+
+/**
+ * bnx2x_set_pbd_csum - update PBD with checksum and return header length
+ *
+ * @bp: driver handle
+ * @skb: packet skb
+ * @pbd: parse BD to be updated
+ * @xmit_type: xmit flags
+ */
+static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ struct eth_tx_parse_bd_e1x *pbd,
+ u32 xmit_type)
+{
+ u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
+
+ /* for now NS flag is not used in Linux */
+ pbd->global_data =
+ (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
+ ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
+
+ pbd->ip_hlen_w = (skb_transport_header(skb) -
+ skb_network_header(skb)) >> 1;
+
+ hlen += pbd->ip_hlen_w;
+
+ /* We support checksum offload for TCP and UDP only */
+ if (xmit_type & XMIT_CSUM_TCP)
+ hlen += tcp_hdrlen(skb) / 2;
+ else
+ hlen += sizeof(struct udphdr) / 2;
+
+ pbd->total_hlen_w = cpu_to_le16(hlen);
+ hlen = hlen*2;
+
+ if (xmit_type & XMIT_CSUM_TCP) {
+ pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
+
+ } else {
+ s8 fix = SKB_CS_OFF(skb); /* signed! */
+
+ DP(NETIF_MSG_TX_QUEUED,
+ "hlen %d fix %d csum before fix %x\n",
+ le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
+
+ /* HW bug: fixup the CSUM */
+ pbd->tcp_pseudo_csum =
+ bnx2x_csum_fix(skb_transport_header(skb),
+ SKB_CS(skb), fix);
+
+ DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
+ pbd->tcp_pseudo_csum);
+ }
+
+ return hlen;
+}
+
+/* called with netif_tx_lock
+ * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
+ * netif_wake_queue()
+ */
+netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ struct bnx2x_fastpath *fp;
+ struct netdev_queue *txq;
+ struct bnx2x_fp_txdata *txdata;
+ struct sw_tx_bd *tx_buf;
+ struct eth_tx_start_bd *tx_start_bd, *first_bd;
+ struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
+ struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
+ struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
+ u32 pbd_e2_parsing_data = 0;
+ u16 pkt_prod, bd_prod;
+ int nbd, txq_index, fp_index, txdata_index;
+ dma_addr_t mapping;
+ u32 xmit_type = bnx2x_xmit_type(bp, skb);
+ int i;
+ u8 hlen = 0;
+ __le16 pkt_size = 0;
+ struct ethhdr *eth;
+ u8 mac_type = UNICAST_ADDRESS;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return NETDEV_TX_BUSY;
+#endif
+
+ txq_index = skb_get_queue_mapping(skb);
+ txq = netdev_get_tx_queue(dev, txq_index);
+
+ BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
+
+ /* decode the fastpath index and the cos index from the txq */
+ fp_index = TXQ_TO_FP(txq_index);
+ txdata_index = TXQ_TO_COS(txq_index);
+
+#ifdef BCM_CNIC
+ /*
+ * Override the above for the FCoE queue:
+ * - FCoE fp entry is right after the ETH entries.
+ * - FCoE L2 queue uses bp->txdata[0] only.
+ */
+ if (unlikely(!NO_FCOE(bp) && (txq_index ==
+ bnx2x_fcoe_tx(bp, txq_index)))) {
+ fp_index = FCOE_IDX;
+ txdata_index = 0;
+ }
+#endif
+
+ /* enable this debug print to view the transmission queue being used
+ DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
+ txq_index, fp_index, txdata_index); */
+
+ /* locate the fastpath and the txdata */
+ fp = &bp->fp[fp_index];
+ txdata = &fp->txdata[txdata_index];
+
+ /* enable this debug print to view the tranmission details
+ DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
+ " tx_data ptr %p fp pointer %p\n",
+ txdata->cid, fp_index, txdata_index, txdata, fp); */
+
+ if (unlikely(bnx2x_tx_avail(bp, txdata) <
+ (skb_shinfo(skb)->nr_frags + 3))) {
+ fp->eth_q_stats.driver_xoff++;
+ netif_tx_stop_queue(txq);
+ BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
+ "protocol(%x,%x) gso type %x xmit_type %x\n",
+ txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
+ ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
+
+ eth = (struct ethhdr *)skb->data;
+
+ /* set flag according to packet type (UNICAST_ADDRESS is default)*/
+ if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
+ if (is_broadcast_ether_addr(eth->h_dest))
+ mac_type = BROADCAST_ADDRESS;
+ else
+ mac_type = MULTICAST_ADDRESS;
+ }
+
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
+ /* First, check if we need to linearize the skb (due to FW
+ restrictions). No need to check fragmentation if page size > 8K
+ (there will be no violation to FW restrictions) */
+ if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
+ /* Statistics of linearization */
+ bp->lin_cnt++;
+ if (skb_linearize(skb) != 0) {
+ DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
+ "silently dropping this SKB\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+#endif
+ /* Map skb linear data for DMA */
+ mapping = dma_map_single(&bp->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
+ "silently dropping this SKB\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ /*
+ Please read carefully. First we use one BD which we mark as start,
+ then we have a parsing info BD (used for TSO or xsum),
+ and only then we have the rest of the TSO BDs.
+ (don't forget to mark the last one as last,
+ and to unmap only AFTER you write to the BD ...)
+ And above all, all pdb sizes are in words - NOT DWORDS!
+ */
+
+ /* get current pkt produced now - advance it just before sending packet
+ * since mapping of pages may fail and cause packet to be dropped
+ */
+ pkt_prod = txdata->tx_pkt_prod;
+ bd_prod = TX_BD(txdata->tx_bd_prod);
+
+ /* get a tx_buf and first BD
+ * tx_start_bd may be changed during SPLIT,
+ * but first_bd will always stay first
+ */
+ tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
+ tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
+ first_bd = tx_start_bd;
+
+ tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+ SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
+ mac_type);
+
+ /* header nbd */
+ SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
+
+ /* remember the first BD of the packet */
+ tx_buf->first_bd = txdata->tx_bd_prod;
+ tx_buf->skb = skb;
+ tx_buf->flags = 0;
+
+ DP(NETIF_MSG_TX_QUEUED,
+ "sending pkt %u @%p next_idx %u bd %u @%p\n",
+ pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
+
+ if (vlan_tx_tag_present(skb)) {
+ tx_start_bd->vlan_or_ethertype =
+ cpu_to_le16(vlan_tx_tag_get(skb));
+ tx_start_bd->bd_flags.as_bitfield |=
+ (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
+ } else
+ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+
+ /* turn on parsing and get a BD */
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+ if (xmit_type & XMIT_CSUM)
+ bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
+
+ if (!CHIP_IS_E1x(bp)) {
+ pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
+ memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
+ /* Set PBD in checksum offload case */
+ if (xmit_type & XMIT_CSUM)
+ hlen = bnx2x_set_pbd_csum_e2(bp, skb,
+ &pbd_e2_parsing_data,
+ xmit_type);
+ if (IS_MF_SI(bp)) {
+ /*
+ * fill in the MAC addresses in the PBD - for local
+ * switching
+ */
+ bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
+ &pbd_e2->src_mac_addr_mid,
+ &pbd_e2->src_mac_addr_lo,
+ eth->h_source);
+ bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
+ &pbd_e2->dst_mac_addr_mid,
+ &pbd_e2->dst_mac_addr_lo,
+ eth->h_dest);
+ }
+ } else {
+ pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
+ memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
+ /* Set PBD in checksum offload case */
+ if (xmit_type & XMIT_CSUM)
+ hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
+
+ }
+
+ /* Setup the data pointer of the first BD of the packet */
+ tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+ tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+ nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
+ tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
+ pkt_size = tx_start_bd->nbytes;
+
+ DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
+ " nbytes %d flags %x vlan %x\n",
+ tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
+ le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
+ tx_start_bd->bd_flags.as_bitfield,
+ le16_to_cpu(tx_start_bd->vlan_or_ethertype));
+
+ if (xmit_type & XMIT_GSO) {
+
+ DP(NETIF_MSG_TX_QUEUED,
+ "TSO packet len %d hlen %d total len %d tso size %d\n",
+ skb->len, hlen, skb_headlen(skb),
+ skb_shinfo(skb)->gso_size);
+
+ tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
+
+ if (unlikely(skb_headlen(skb) > hlen))
+ bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
+ &tx_start_bd, hlen,
+ bd_prod, ++nbd);
+ if (!CHIP_IS_E1x(bp))
+ bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
+ xmit_type);
+ else
+ bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
+ }
+
+ /* Set the PBD's parsing_data field if not zero
+ * (for the chips newer than 57711).
+ */
+ if (pbd_e2_parsing_data)
+ pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
+
+ tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
+
+ /* Handle fragmented skb */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, frag->size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+
+ DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
+ "dropping packet...\n");
+
+ /* we need unmap all buffers already mapped
+ * for this SKB;
+ * first_bd->nbd need to be properly updated
+ * before call to bnx2x_free_tx_pkt
+ */
+ first_bd->nbd = cpu_to_le16(nbd);
+ bnx2x_free_tx_pkt(bp, txdata,
+ TX_BD(txdata->tx_pkt_prod));
+ return NETDEV_TX_OK;
+ }
+
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+ tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
+ if (total_pkt_bd == NULL)
+ total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
+
+ tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+ tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+ tx_data_bd->nbytes = cpu_to_le16(frag->size);
+ le16_add_cpu(&pkt_size, frag->size);
+ nbd++;
+
+ DP(NETIF_MSG_TX_QUEUED,
+ "frag %d bd @%p addr (%x:%x) nbytes %d\n",
+ i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
+ le16_to_cpu(tx_data_bd->nbytes));
+ }
+
+ DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
+
+ /* update with actual num BDs */
+ first_bd->nbd = cpu_to_le16(nbd);
+
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+ /* now send a tx doorbell, counting the next BD
+ * if the packet contains or ends with it
+ */
+ if (TX_BD_POFF(bd_prod) < nbd)
+ nbd++;
+
+ /* total_pkt_bytes should be set on the first data BD if
+ * it's not an LSO packet and there is more than one
+ * data BD. In this case pkt_size is limited by an MTU value.
+ * However we prefer to set it for an LSO packet (while we don't
+ * have to) in order to save some CPU cycles in a none-LSO
+ * case, when we much more care about them.
+ */
+ if (total_pkt_bd != NULL)
+ total_pkt_bd->total_pkt_bytes = pkt_size;
+
+ if (pbd_e1x)
+ DP(NETIF_MSG_TX_QUEUED,
+ "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
+ " tcp_flags %x xsum %x seq %u hlen %u\n",
+ pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
+ pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
+ pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
+ le16_to_cpu(pbd_e1x->total_hlen_w));
+ if (pbd_e2)
+ DP(NETIF_MSG_TX_QUEUED,
+ "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
+ pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
+ pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
+ pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
+ pbd_e2->parsing_data);
+ DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
+
+ txdata->tx_pkt_prod++;
+ /*
+ * Make sure that the BD data is updated before updating the producer
+ * since FW might read the BD right after the producer is updated.
+ * This is only applicable for weak-ordered memory model archs such
+ * as IA-64. The following barrier is also mandatory since FW will
+ * assumes packets must have BDs.
+ */
+ wmb();
+
+ txdata->tx_db.data.prod += nbd;
+ barrier();
+
+ DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
+
+ mmiowb();
+
+ txdata->tx_bd_prod += nbd;
+
+ if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
+ netif_tx_stop_queue(txq);
+
+ /* paired memory barrier is in bnx2x_tx_int(), we have to keep
+ * ordering of set_bit() in netif_tx_stop_queue() and read of
+ * fp->bd_tx_cons */
+ smp_mb();
+
+ fp->eth_q_stats.driver_xoff++;
+ if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
+ netif_tx_wake_queue(txq);
+ }
+ txdata->tx_pkt++;
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * bnx2x_setup_tc - routine to configure net_device for multi tc
+ *
+ * @netdev: net device to configure
+ * @tc: number of traffic classes to enable
+ *
+ * callback connected to the ndo_setup_tc function pointer
+ */
+int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
+{
+ int cos, prio, count, offset;
+ struct bnx2x *bp = netdev_priv(dev);
+
+ /* setup tc must be called under rtnl lock */
+ ASSERT_RTNL();
+
+ /* no traffic classes requested. aborting */
+ if (!num_tc) {
+ netdev_reset_tc(dev);
+ return 0;
+ }
+
+ /* requested to support too many traffic classes */
+ if (num_tc > bp->max_cos) {
+ DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
+ " requested: %d. max supported is %d\n",
+ num_tc, bp->max_cos);
+ return -EINVAL;
+ }
+
+ /* declare amount of supported traffic classes */
+ if (netdev_set_num_tc(dev, num_tc)) {
+ DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n",
+ num_tc);
+ return -EINVAL;
+ }
+
+ /* configure priority to traffic class mapping */
+ for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
+ netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
+ DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n",
+ prio, bp->prio_to_cos[prio]);
+ }
+
+
+ /* Use this configuration to diffrentiate tc0 from other COSes
+ This can be used for ets or pfc, and save the effort of setting
+ up a multio class queue disc or negotiating DCBX with a switch
+ netdev_set_prio_tc_map(dev, 0, 0);
+ DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
+ for (prio = 1; prio < 16; prio++) {
+ netdev_set_prio_tc_map(dev, prio, 1);
+ DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
+ } */
+
+ /* configure traffic class to transmission queue mapping */
+ for (cos = 0; cos < bp->max_cos; cos++) {
+ count = BNX2X_NUM_ETH_QUEUES(bp);
+ offset = cos * MAX_TXQS_PER_COS;
+ netdev_set_tc_queue(dev, cos, count, offset);
+ DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n",
+ cos, offset, count);
+ }
+
+ return 0;
+}
+
+/* called with rtnl_lock */
+int bnx2x_change_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc = 0;
+
+ if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
+ return -EINVAL;
+
+ if (netif_running(dev)) {
+ rc = bnx2x_set_eth_mac(bp, false);
+ if (rc)
+ return rc;
+ }
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ if (netif_running(dev))
+ rc = bnx2x_set_eth_mac(bp, true);
+
+ return rc;
+}
+
+static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
+{
+ union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
+ struct bnx2x_fastpath *fp = &bp->fp[fp_index];
+ u8 cos;
+
+ /* Common */
+#ifdef BCM_CNIC
+ if (IS_FCOE_IDX(fp_index)) {
+ memset(sb, 0, sizeof(union host_hc_status_block));
+ fp->status_blk_mapping = 0;
+
+ } else {
+#endif
+ /* status blocks */
+ if (!CHIP_IS_E1x(bp))
+ BNX2X_PCI_FREE(sb->e2_sb,
+ bnx2x_fp(bp, fp_index,
+ status_blk_mapping),
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_FREE(sb->e1x_sb,
+ bnx2x_fp(bp, fp_index,
+ status_blk_mapping),
+ sizeof(struct host_hc_status_block_e1x));
+#ifdef BCM_CNIC
+ }
+#endif
+ /* Rx */
+ if (!skip_rx_queue(bp, fp_index)) {
+ bnx2x_free_rx_bds(fp);
+
+ /* fastpath rx rings: rx_buf rx_desc rx_comp */
+ BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
+ BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
+ bnx2x_fp(bp, fp_index, rx_desc_mapping),
+ sizeof(struct eth_rx_bd) * NUM_RX_BD);
+
+ BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
+ bnx2x_fp(bp, fp_index, rx_comp_mapping),
+ sizeof(struct eth_fast_path_rx_cqe) *
+ NUM_RCQ_BD);
+
+ /* SGE ring */
+ BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
+ BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
+ bnx2x_fp(bp, fp_index, rx_sge_mapping),
+ BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ }
+
+ /* Tx */
+ if (!skip_tx_queue(bp, fp_index)) {
+ /* fastpath tx rings: tx_buf tx_desc */
+ for_each_cos_in_tx_queue(fp, cos) {
+ struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+
+ DP(BNX2X_MSG_SP,
+ "freeing tx memory of fp %d cos %d cid %d\n",
+ fp_index, cos, txdata->cid);
+
+ BNX2X_FREE(txdata->tx_buf_ring);
+ BNX2X_PCI_FREE(txdata->tx_desc_ring,
+ txdata->tx_desc_mapping,
+ sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ }
+ }
+ /* end of fastpath */
+}
+
+void bnx2x_free_fp_mem(struct bnx2x *bp)
+{
+ int i;
+ for_each_queue(bp, i)
+ bnx2x_free_fp_mem_at(bp, i);
+}
+
+static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
+{
+ union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
+ if (!CHIP_IS_E1x(bp)) {
+ bnx2x_fp(bp, index, sb_index_values) =
+ (__le16 *)status_blk.e2_sb->sb.index_values;
+ bnx2x_fp(bp, index, sb_running_index) =
+ (__le16 *)status_blk.e2_sb->sb.running_index;
+ } else {
+ bnx2x_fp(bp, index, sb_index_values) =
+ (__le16 *)status_blk.e1x_sb->sb.index_values;
+ bnx2x_fp(bp, index, sb_running_index) =
+ (__le16 *)status_blk.e1x_sb->sb.running_index;
+ }
+}
+
+static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
+{
+ union host_hc_status_block *sb;
+ struct bnx2x_fastpath *fp = &bp->fp[index];
+ int ring_size = 0;
+ u8 cos;
+ int rx_ring_size = 0;
+
+ /* if rx_ring_size specified - use it */
+ if (!bp->rx_ring_size) {
+
+ rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
+
+ /* allocate at least number of buffers required by FW */
+ rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
+ MIN_RX_SIZE_TPA, rx_ring_size);
+
+ bp->rx_ring_size = rx_ring_size;
+ } else
+ rx_ring_size = bp->rx_ring_size;
+
+ /* Common */
+ sb = &bnx2x_fp(bp, index, status_blk);
+#ifdef BCM_CNIC
+ if (!IS_FCOE_IDX(index)) {
+#endif
+ /* status blocks */
+ if (!CHIP_IS_E1x(bp))
+ BNX2X_PCI_ALLOC(sb->e2_sb,
+ &bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_ALLOC(sb->e1x_sb,
+ &bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e1x));
+#ifdef BCM_CNIC
+ }
+#endif
+
+ /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
+ * set shortcuts for it.
+ */
+ if (!IS_FCOE_IDX(index))
+ set_sb_shortcuts(bp, index);
+
+ /* Tx */
+ if (!skip_tx_queue(bp, index)) {
+ /* fastpath tx rings: tx_buf tx_desc */
+ for_each_cos_in_tx_queue(fp, cos) {
+ struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+
+ DP(BNX2X_MSG_SP, "allocating tx memory of "
+ "fp %d cos %d\n",
+ index, cos);
+
+ BNX2X_ALLOC(txdata->tx_buf_ring,
+ sizeof(struct sw_tx_bd) * NUM_TX_BD);
+ BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
+ &txdata->tx_desc_mapping,
+ sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ }
+ }
+
+ /* Rx */
+ if (!skip_rx_queue(bp, index)) {
+ /* fastpath rx rings: rx_buf rx_desc rx_comp */
+ BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
+ sizeof(struct sw_rx_bd) * NUM_RX_BD);
+ BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
+ &bnx2x_fp(bp, index, rx_desc_mapping),
+ sizeof(struct eth_rx_bd) * NUM_RX_BD);
+
+ BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
+ &bnx2x_fp(bp, index, rx_comp_mapping),
+ sizeof(struct eth_fast_path_rx_cqe) *
+ NUM_RCQ_BD);
+
+ /* SGE ring */
+ BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
+ sizeof(struct sw_rx_page) * NUM_RX_SGE);
+ BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
+ &bnx2x_fp(bp, index, rx_sge_mapping),
+ BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ /* RX BD ring */
+ bnx2x_set_next_page_rx_bd(fp);
+
+ /* CQ ring */
+ bnx2x_set_next_page_rx_cq(fp);
+
+ /* BDs */
+ ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
+ if (ring_size < rx_ring_size)
+ goto alloc_mem_err;
+ }
+
+ return 0;
+
+/* handles low memory cases */
+alloc_mem_err:
+ BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
+ index, ring_size);
+ /* FW will drop all packets if queue is not big enough,
+ * In these cases we disable the queue
+ * Min size is different for OOO, TPA and non-TPA queues
+ */
+ if (ring_size < (fp->disable_tpa ?
+ MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
+ /* release memory allocated for this queue */
+ bnx2x_free_fp_mem_at(bp, index);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int bnx2x_alloc_fp_mem(struct bnx2x *bp)
+{
+ int i;
+
+ /**
+ * 1. Allocate FP for leading - fatal if error
+ * 2. {CNIC} Allocate FCoE FP - fatal if error
+ * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
+ * 4. Allocate RSS - fix number of queues if error
+ */
+
+ /* leading */
+ if (bnx2x_alloc_fp_mem_at(bp, 0))
+ return -ENOMEM;
+
+#ifdef BCM_CNIC
+ if (!NO_FCOE(bp))
+ /* FCoE */
+ if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
+ /* we will fail load process instead of mark
+ * NO_FCOE_FLAG
+ */
+ return -ENOMEM;
+#endif
+
+ /* RSS */
+ for_each_nondefault_eth_queue(bp, i)
+ if (bnx2x_alloc_fp_mem_at(bp, i))
+ break;
+
+ /* handle memory failures */
+ if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
+ int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
+
+ WARN_ON(delta < 0);
+#ifdef BCM_CNIC
+ /**
+ * move non eth FPs next to last eth FP
+ * must be done in that order
+ * FCOE_IDX < FWD_IDX < OOO_IDX
+ */
+
+ /* move FCoE fp even NO_FCOE_FLAG is on */
+ bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
+#endif
+ bp->num_queues -= delta;
+ BNX2X_ERR("Adjusted num of queues from %d to %d\n",
+ bp->num_queues + delta, bp->num_queues);
+ }
+
+ return 0;
+}
+
+void bnx2x_free_mem_bp(struct bnx2x *bp)
+{
+ kfree(bp->fp);
+ kfree(bp->msix_table);
+ kfree(bp->ilt);
+}
+
+int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
+{
+ struct bnx2x_fastpath *fp;
+ struct msix_entry *tbl;
+ struct bnx2x_ilt *ilt;
+ int msix_table_size = 0;
+
+ /*
+ * The biggest MSI-X table we might need is as a maximum number of fast
+ * path IGU SBs plus default SB (for PF).
+ */
+ msix_table_size = bp->igu_sb_cnt + 1;
+
+ /* fp array: RSS plus CNIC related L2 queues */
+ fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
+ sizeof(*fp), GFP_KERNEL);
+ if (!fp)
+ goto alloc_err;
+ bp->fp = fp;
+
+ /* msix table */
+ tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
+ if (!tbl)
+ goto alloc_err;
+ bp->msix_table = tbl;
+
+ /* ilt */
+ ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
+ if (!ilt)
+ goto alloc_err;
+ bp->ilt = ilt;
+
+ return 0;
+alloc_err:
+ bnx2x_free_mem_bp(bp);
+ return -ENOMEM;
+
+}
+
+int bnx2x_reload_if_running(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (unlikely(!netif_running(dev)))
+ return 0;
+
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ return bnx2x_nic_load(bp, LOAD_NORMAL);
+}
+
+int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
+{
+ u32 sel_phy_idx = 0;
+ if (bp->link_params.num_phys <= 1)
+ return INT_PHY;
+
+ if (bp->link_vars.link_up) {
+ sel_phy_idx = EXT_PHY1;
+ /* In case link is SERDES, check if the EXT_PHY2 is the one */
+ if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
+ (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
+ sel_phy_idx = EXT_PHY2;
+ } else {
+
+ switch (bnx2x_phy_selection(&bp->link_params)) {
+ case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ sel_phy_idx = EXT_PHY1;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ sel_phy_idx = EXT_PHY2;
+ break;
+ }
+ }
+
+ return sel_phy_idx;
+
+}
+int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
+{
+ u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
+ /*
+ * The selected actived PHY is always after swapping (in case PHY
+ * swapping is enabled). So when swapping is enabled, we need to reverse
+ * the configuration
+ */
+
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
+ if (sel_phy_idx == EXT_PHY1)
+ sel_phy_idx = EXT_PHY2;
+ else if (sel_phy_idx == EXT_PHY2)
+ sel_phy_idx = EXT_PHY1;
+ }
+ return LINK_CONFIG_IDX(sel_phy_idx);
+}
+
+#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
+int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ switch (type) {
+ case NETDEV_FCOE_WWNN:
+ *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
+ cp->fcoe_wwn_node_name_lo);
+ break;
+ case NETDEV_FCOE_WWPN:
+ *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
+ cp->fcoe_wwn_port_name_lo);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
+
+/* called with rtnl_lock */
+int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ pr_err("Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
+ if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
+ ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
+ return -EINVAL;
+
+ /* This does not race with packet allocation
+ * because the actual alloc size is
+ * only updated as part of load
+ */
+ dev->mtu = new_mtu;
+
+ return bnx2x_reload_if_running(dev);
+}
+
+u32 bnx2x_fix_features(struct net_device *dev, u32 features)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ /* TPA requires Rx CSUM offloading */
+ if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
+ features &= ~NETIF_F_LRO;
+
+ return features;
+}
+
+int bnx2x_set_features(struct net_device *dev, u32 features)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 flags = bp->flags;
+ bool bnx2x_reload = false;
+
+ if (features & NETIF_F_LRO)
+ flags |= TPA_ENABLE_FLAG;
+ else
+ flags &= ~TPA_ENABLE_FLAG;
+
+ if (features & NETIF_F_LOOPBACK) {
+ if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
+ bp->link_params.loopback_mode = LOOPBACK_BMAC;
+ bnx2x_reload = true;
+ }
+ } else {
+ if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
+ bp->link_params.loopback_mode = LOOPBACK_NONE;
+ bnx2x_reload = true;
+ }
+ }
+
+ if (flags ^ bp->flags) {
+ bp->flags = flags;
+ bnx2x_reload = true;
+ }
+
+ if (bnx2x_reload) {
+ if (bp->recovery_state == BNX2X_RECOVERY_DONE)
+ return bnx2x_reload_if_running(dev);
+ /* else: bnx2x_nic_load() will be called at end of recovery */
+ }
+
+ return 0;
+}
+
+void bnx2x_tx_timeout(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (!bp->panic)
+ bnx2x_panic();
+#endif
+
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+
+ /* This allows the netif to be shutdown gracefully before resetting */
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+}
+
+int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp;
+
+ if (!dev) {
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+ return -ENODEV;
+ }
+ bp = netdev_priv(dev);
+
+ rtnl_lock();
+
+ pci_save_state(pdev);
+
+ if (!netif_running(dev)) {
+ rtnl_unlock();
+ return 0;
+ }
+
+ netif_device_detach(dev);
+
+ bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+
+ bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+int bnx2x_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp;
+ int rc;
+
+ if (!dev) {
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+ return -ENODEV;
+ }
+ bp = netdev_priv(dev);
+
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ pr_err("Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
+ rtnl_lock();
+
+ pci_restore_state(pdev);
+
+ if (!netif_running(dev)) {
+ rtnl_unlock();
+ return 0;
+ }
+
+ bnx2x_set_power_state(bp, PCI_D0);
+ netif_device_attach(dev);
+
+ /* Since the chip was reset, clear the FW sequence number */
+ bp->fw_seq = 0;
+ rc = bnx2x_nic_load(bp, LOAD_OPEN);
+
+ rtnl_unlock();
+
+ return rc;
+}
+
+
+void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
+ u32 cid)
+{
+ /* ustorm cxt validation */
+ cxt->ustorm_ag_context.cdu_usage =
+ CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
+ CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
+ /* xcontext validation */
+ cxt->xstorm_ag_context.cdu_reserved =
+ CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
+ CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
+}
+
+static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
+ u8 fw_sb_id, u8 sb_index,
+ u8 ticks)
+{
+
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
+ REG_WR8(bp, addr, ticks);
+ DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
+ port, fw_sb_id, sb_index, ticks);
+}
+
+static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
+ u16 fw_sb_id, u8 sb_index,
+ u8 disable)
+{
+ u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
+ u16 flags = REG_RD16(bp, addr);
+ /* clear and set */
+ flags &= ~HC_INDEX_DATA_HC_ENABLED;
+ flags |= enable_flag;
+ REG_WR16(bp, addr, flags);
+ DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
+ port, fw_sb_id, sb_index, disable);
+}
+
+void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
+ u8 sb_index, u8 disable, u16 usec)
+{
+ int port = BP_PORT(bp);
+ u8 ticks = usec / BNX2X_BTR;
+
+ storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
+
+ disable = disable ? 1 : (usec ? 0 : 1);
+ storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
new file mode 100644
index 000000000000..5b1f9b5ec499
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -0,0 +1,1491 @@
+/* bnx2x_cmn.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+#ifndef BNX2X_CMN_H
+#define BNX2X_CMN_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+
+
+#include "bnx2x.h"
+
+/* This is used as a replacement for an MCP if it's not present */
+extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
+
+extern int num_queues;
+
+/************************ Macros ********************************/
+#define BNX2X_PCI_FREE(x, y, size) \
+ do { \
+ if (x) { \
+ dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
+ x = NULL; \
+ y = 0; \
+ } \
+ } while (0)
+
+#define BNX2X_FREE(x) \
+ do { \
+ if (x) { \
+ kfree((void *)x); \
+ x = NULL; \
+ } \
+ } while (0)
+
+#define BNX2X_PCI_ALLOC(x, y, size) \
+ do { \
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+ if (x == NULL) \
+ goto alloc_mem_err; \
+ memset((void *)x, 0, size); \
+ } while (0)
+
+#define BNX2X_ALLOC(x, size) \
+ do { \
+ x = kzalloc(size, GFP_KERNEL); \
+ if (x == NULL) \
+ goto alloc_mem_err; \
+ } while (0)
+
+/*********************** Interfaces ****************************
+ * Functions that need to be implemented by each driver version
+ */
+/* Init */
+
+/**
+ * bnx2x_send_unload_req - request unload mode from the MCP.
+ *
+ * @bp: driver handle
+ * @unload_mode: requested function's unload mode
+ *
+ * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
+ */
+u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
+
+/**
+ * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_send_unload_done(struct bnx2x *bp);
+
+/**
+ * bnx2x_config_rss_pf - configure RSS parameters.
+ *
+ * @bp: driver handle
+ * @ind_table: indirection table to configure
+ * @config_hash: re-configure RSS hash keys configuration
+ */
+int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash);
+
+/**
+ * bnx2x__init_func_obj - init function object
+ *
+ * @bp: driver handle
+ *
+ * Initializes the Function Object with the appropriate
+ * parameters which include a function slow path driver
+ * interface.
+ */
+void bnx2x__init_func_obj(struct bnx2x *bp);
+
+/**
+ * bnx2x_setup_queue - setup eth queue.
+ *
+ * @bp: driver handle
+ * @fp: pointer to the fastpath structure
+ * @leading: boolean
+ *
+ */
+int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ bool leading);
+
+/**
+ * bnx2x_setup_leading - bring up a leading eth queue.
+ *
+ * @bp: driver handle
+ */
+int bnx2x_setup_leading(struct bnx2x *bp);
+
+/**
+ * bnx2x_fw_command - send the MCP a request
+ *
+ * @bp: driver handle
+ * @command: request
+ * @param: request's parameter
+ *
+ * block until there is a reply
+ */
+u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
+
+/**
+ * bnx2x_initial_phy_init - initialize link parameters structure variables.
+ *
+ * @bp: driver handle
+ * @load_mode: current mode
+ */
+u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
+
+/**
+ * bnx2x_link_set - configure hw according to link parameters structure.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_link_set(struct bnx2x *bp);
+
+/**
+ * bnx2x_link_test - query link status.
+ *
+ * @bp: driver handle
+ * @is_serdes: bool
+ *
+ * Returns 0 if link is UP.
+ */
+u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
+
+/**
+ * bnx2x_drv_pulse - write driver pulse to shmem
+ *
+ * @bp: driver handle
+ *
+ * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox
+ * in the shmem.
+ */
+void bnx2x_drv_pulse(struct bnx2x *bp);
+
+/**
+ * bnx2x_igu_ack_sb - update IGU with current SB value
+ *
+ * @bp: driver handle
+ * @igu_sb_id: SB id
+ * @segment: SB segment
+ * @index: SB index
+ * @op: SB operation
+ * @update: is HW update required
+ */
+void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
+ u16 index, u8 op, u8 update);
+
+/* Disable transactions from chip to host */
+void bnx2x_pf_disable(struct bnx2x *bp);
+
+/**
+ * bnx2x__link_status_update - handles link status change.
+ *
+ * @bp: driver handle
+ */
+void bnx2x__link_status_update(struct bnx2x *bp);
+
+/**
+ * bnx2x_link_report - report link status to upper layer.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_link_report(struct bnx2x *bp);
+
+/* None-atomic version of bnx2x_link_report() */
+void __bnx2x_link_report(struct bnx2x *bp);
+
+/**
+ * bnx2x_get_mf_speed - calculate MF speed.
+ *
+ * @bp: driver handle
+ *
+ * Takes into account current linespeed and MF configuration.
+ */
+u16 bnx2x_get_mf_speed(struct bnx2x *bp);
+
+/**
+ * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler
+ *
+ * @irq: irq number
+ * @dev_instance: private instance
+ */
+irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
+
+/**
+ * bnx2x_interrupt - non MSI-X interrupt handler
+ *
+ * @irq: irq number
+ * @dev_instance: private instance
+ */
+irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
+#ifdef BCM_CNIC
+
+/**
+ * bnx2x_cnic_notify - send command to cnic driver
+ *
+ * @bp: driver handle
+ * @cmd: command
+ */
+int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
+
+/**
+ * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information
+ *
+ * @bp: driver handle
+ */
+void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
+#endif
+
+/**
+ * bnx2x_int_enable - enable HW interrupts.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_int_enable(struct bnx2x *bp);
+
+/**
+ * bnx2x_int_disable_sync - disable interrupts.
+ *
+ * @bp: driver handle
+ * @disable_hw: true, disable HW interrupts.
+ *
+ * This function ensures that there are no
+ * ISRs or SP DPCs (sp_task) are running after it returns.
+ */
+void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
+
+/**
+ * bnx2x_nic_init - init driver internals.
+ *
+ * @bp: driver handle
+ * @load_code: COMMON, PORT or FUNCTION
+ *
+ * Initializes:
+ * - rings
+ * - status blocks
+ * - etc.
+ */
+void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
+
+/**
+ * bnx2x_alloc_mem - allocate driver's memory.
+ *
+ * @bp: driver handle
+ */
+int bnx2x_alloc_mem(struct bnx2x *bp);
+
+/**
+ * bnx2x_free_mem - release driver's memory.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_free_mem(struct bnx2x *bp);
+
+/**
+ * bnx2x_set_num_queues - set number of queues according to mode.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_set_num_queues(struct bnx2x *bp);
+
+/**
+ * bnx2x_chip_cleanup - cleanup chip internals.
+ *
+ * @bp: driver handle
+ * @unload_mode: COMMON, PORT, FUNCTION
+ *
+ * - Cleanup MAC configuration.
+ * - Closes clients.
+ * - etc.
+ */
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
+
+/**
+ * bnx2x_acquire_hw_lock - acquire HW lock.
+ *
+ * @bp: driver handle
+ * @resource: resource bit which was locked
+ */
+int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
+
+/**
+ * bnx2x_release_hw_lock - release HW lock.
+ *
+ * @bp: driver handle
+ * @resource: resource bit which was locked
+ */
+int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
+
+/**
+ * bnx2x_release_leader_lock - release recovery leader lock
+ *
+ * @bp: driver handle
+ */
+int bnx2x_release_leader_lock(struct bnx2x *bp);
+
+/**
+ * bnx2x_set_eth_mac - configure eth MAC address in the HW
+ *
+ * @bp: driver handle
+ * @set: set or clear
+ *
+ * Configures according to the value in netdev->dev_addr.
+ */
+int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
+
+/**
+ * bnx2x_set_rx_mode - set MAC filtering configurations.
+ *
+ * @dev: netdevice
+ *
+ * called with netif_tx_lock from dev_mcast.c
+ * If bp->state is OPEN, should be called with
+ * netif_addr_lock_bh()
+ */
+void bnx2x_set_rx_mode(struct net_device *dev);
+
+/**
+ * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
+ *
+ * @bp: driver handle
+ *
+ * If bp->state is OPEN, should be called with
+ * netif_addr_lock_bh().
+ */
+void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
+
+/**
+ * bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
+ *
+ * @bp: driver handle
+ * @cl_id: client id
+ * @rx_mode_flags: rx mode configuration
+ * @rx_accept_flags: rx accept configuration
+ * @tx_accept_flags: tx accept configuration (tx switch)
+ * @ramrod_flags: ramrod configuration
+ */
+void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+ unsigned long rx_mode_flags,
+ unsigned long rx_accept_flags,
+ unsigned long tx_accept_flags,
+ unsigned long ramrod_flags);
+
+/* Parity errors related */
+void bnx2x_inc_load_cnt(struct bnx2x *bp);
+u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
+bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print);
+bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
+void bnx2x_set_reset_in_progress(struct bnx2x *bp);
+void bnx2x_set_reset_global(struct bnx2x *bp);
+void bnx2x_disable_close_the_gate(struct bnx2x *bp);
+
+/**
+ * bnx2x_sp_event - handle ramrods completion.
+ *
+ * @fp: fastpath handle for the event
+ * @rr_cqe: eth_rx_cqe
+ */
+void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
+
+/**
+ * bnx2x_ilt_set_info - prepare ILT configurations.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_ilt_set_info(struct bnx2x *bp);
+
+/**
+ * bnx2x_dcbx_init - initialize dcbx protocol.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_dcbx_init(struct bnx2x *bp);
+
+/**
+ * bnx2x_set_power_state - set power state to the requested value.
+ *
+ * @bp: driver handle
+ * @state: required state D0 or D3hot
+ *
+ * Currently only D0 and D3hot are supported.
+ */
+int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
+
+/**
+ * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW.
+ *
+ * @bp: driver handle
+ * @value: new value
+ */
+void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
+/* Error handling */
+void bnx2x_panic_dump(struct bnx2x *bp);
+
+void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
+
+/* dev_close main block */
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
+
+/* dev_open main block */
+int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
+
+/* hard_xmit callback */
+netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
+
+/* setup_tc callback */
+int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
+
+/* select_queue callback */
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
+
+/* reload helper */
+int bnx2x_reload_if_running(struct net_device *dev);
+
+int bnx2x_change_mac_addr(struct net_device *dev, void *p);
+
+/* NAPI poll Rx part */
+int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
+
+void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod);
+
+/* NAPI poll Tx part */
+int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
+
+/* suspend/resume callbacks */
+int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
+int bnx2x_resume(struct pci_dev *pdev);
+
+/* Release IRQ vectors */
+void bnx2x_free_irq(struct bnx2x *bp);
+
+void bnx2x_free_fp_mem(struct bnx2x *bp);
+int bnx2x_alloc_fp_mem(struct bnx2x *bp);
+void bnx2x_init_rx_rings(struct bnx2x *bp);
+void bnx2x_free_skbs(struct bnx2x *bp);
+void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
+void bnx2x_netif_start(struct bnx2x *bp);
+
+/**
+ * bnx2x_enable_msix - set msix configuration.
+ *
+ * @bp: driver handle
+ *
+ * fills msix_table, requests vectors, updates num_queues
+ * according to number of available vectors.
+ */
+int bnx2x_enable_msix(struct bnx2x *bp);
+
+/**
+ * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
+ *
+ * @bp: driver handle
+ */
+int bnx2x_enable_msi(struct bnx2x *bp);
+
+/**
+ * bnx2x_poll - NAPI callback
+ *
+ * @napi: napi structure
+ * @budget:
+ *
+ */
+int bnx2x_poll(struct napi_struct *napi, int budget);
+
+/**
+ * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure
+ *
+ * @bp: driver handle
+ */
+int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
+
+/**
+ * bnx2x_free_mem_bp - release memories outsize main driver structure
+ *
+ * @bp: driver handle
+ */
+void bnx2x_free_mem_bp(struct bnx2x *bp);
+
+/**
+ * bnx2x_change_mtu - change mtu netdev callback
+ *
+ * @dev: net device
+ * @new_mtu: requested mtu
+ *
+ */
+int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
+
+#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
+/**
+ * bnx2x_fcoe_get_wwn - return the requested WWN value for this port
+ *
+ * @dev: net_device
+ * @wwn: output buffer
+ * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port)
+ *
+ */
+int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
+#endif
+u32 bnx2x_fix_features(struct net_device *dev, u32 features);
+int bnx2x_set_features(struct net_device *dev, u32 features);
+
+/**
+ * bnx2x_tx_timeout - tx timeout netdev callback
+ *
+ * @dev: net device
+ */
+void bnx2x_tx_timeout(struct net_device *dev);
+
+/*********************** Inlines **********************************/
+/*********************** Fast path ********************************/
+static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
+{
+ barrier(); /* status block is written to by the chip */
+ fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
+}
+
+static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, u16 bd_prod,
+ u16 rx_comp_prod, u16 rx_sge_prod, u32 start)
+{
+ struct ustorm_eth_rx_producers rx_prods = {0};
+ u32 i;
+
+ /* Update producers */
+ rx_prods.bd_prod = bd_prod;
+ rx_prods.cqe_prod = rx_comp_prod;
+ rx_prods.sge_prod = rx_sge_prod;
+
+ /*
+ * Make sure that the BD and SGE data is updated before updating the
+ * producers since FW might read the BD/SGE right after the producer
+ * is updated.
+ * This is only applicable for weak-ordered memory model archs such
+ * as IA-64. The following barrier is also mandatory since FW will
+ * assumes BDs must have buffers.
+ */
+ wmb();
+
+ for (i = 0; i < sizeof(rx_prods)/4; i++)
+ REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]);
+
+ mmiowb(); /* keep prod updates ordered */
+
+ DP(NETIF_MSG_RX_STATUS,
+ "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
+ fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
+}
+
+static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
+ u8 segment, u16 index, u8 op,
+ u8 update, u32 igu_addr)
+{
+ struct igu_regular cmd_data = {0};
+
+ cmd_data.sb_id_and_flags =
+ ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
+ (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
+ (update << IGU_REGULAR_BUPDATE_SHIFT) |
+ (op << IGU_REGULAR_ENABLE_INT_SHIFT));
+
+ DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n",
+ cmd_data.sb_id_and_flags, igu_addr);
+ REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
+
+ /* Make sure that ACK is written */
+ mmiowb();
+ barrier();
+}
+
+static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
+ u8 idu_sb_id, bool is_Pf)
+{
+ u32 data, ctl, cnt = 100;
+ u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
+ u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
+ u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
+ u32 sb_bit = 1 << (idu_sb_id%32);
+ u32 func_encode = func |
+ ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT);
+ u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
+
+ /* Not supported in BC mode */
+ if (CHIP_INT_MODE_IS_BC(bp))
+ return;
+
+ data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
+ << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
+ IGU_REGULAR_CLEANUP_SET |
+ IGU_REGULAR_BCLEANUP;
+
+ ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
+ func_encode << IGU_CTRL_REG_FID_SHIFT |
+ IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
+
+ DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+ data, igu_addr_data);
+ REG_WR(bp, igu_addr_data, data);
+ mmiowb();
+ barrier();
+ DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+ ctl, igu_addr_ctl);
+ REG_WR(bp, igu_addr_ctl, ctl);
+ mmiowb();
+ barrier();
+
+ /* wait for clean up to finish */
+ while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
+ msleep(20);
+
+
+ if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
+ DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: "
+ "idu_sb_id %d offset %d bit %d (cnt %d)\n",
+ idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
+ }
+}
+
+static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
+ u8 storm, u16 index, u8 op, u8 update)
+{
+ u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
+ COMMAND_REG_INT_ACK);
+ struct igu_ack_register igu_ack;
+
+ igu_ack.status_block_index = index;
+ igu_ack.sb_id_and_flags =
+ ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
+ (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
+ (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
+ (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
+
+ DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
+ (*(u32 *)&igu_ack), hc_addr);
+ REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
+
+ /* Make sure that ACK is written */
+ mmiowb();
+ barrier();
+}
+
+static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
+ u16 index, u8 op, u8 update)
+{
+ if (bp->common.int_block == INT_BLOCK_HC)
+ bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update);
+ else {
+ u8 segment;
+
+ if (CHIP_INT_MODE_IS_BC(bp))
+ segment = storm;
+ else if (igu_sb_id != bp->igu_dsb_id)
+ segment = IGU_SEG_ACCESS_DEF;
+ else if (storm == ATTENTION_ID)
+ segment = IGU_SEG_ACCESS_ATTN;
+ else
+ segment = IGU_SEG_ACCESS_DEF;
+ bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update);
+ }
+}
+
+static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp)
+{
+ u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
+ COMMAND_REG_SIMD_MASK);
+ u32 result = REG_RD(bp, hc_addr);
+
+ DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
+ result, hc_addr);
+
+ barrier();
+ return result;
+}
+
+static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp)
+{
+ u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8);
+ u32 result = REG_RD(bp, igu_addr);
+
+ DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n",
+ result, igu_addr);
+
+ barrier();
+ return result;
+}
+
+static inline u16 bnx2x_ack_int(struct bnx2x *bp)
+{
+ barrier();
+ if (bp->common.int_block == INT_BLOCK_HC)
+ return bnx2x_hc_ack_int(bp);
+ else
+ return bnx2x_igu_ack_int(bp);
+}
+
+static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata)
+{
+ /* Tell compiler that consumer and producer can change */
+ barrier();
+ return txdata->tx_pkt_prod != txdata->tx_pkt_cons;
+}
+
+static inline u16 bnx2x_tx_avail(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata)
+{
+ s16 used;
+ u16 prod;
+ u16 cons;
+
+ prod = txdata->tx_bd_prod;
+ cons = txdata->tx_bd_cons;
+
+ /* NUM_TX_RINGS = number of "next-page" entries
+ It will be used as a threshold */
+ used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ WARN_ON(used < 0);
+ WARN_ON(used > bp->tx_ring_size);
+ WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL);
+#endif
+
+ return (s16)(bp->tx_ring_size) - used;
+}
+
+static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata)
+{
+ u16 hw_cons;
+
+ /* Tell compiler that status block fields can change */
+ barrier();
+ hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
+ return hw_cons != txdata->tx_pkt_cons;
+}
+
+static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
+{
+ u8 cos;
+ for_each_cos_in_tx_queue(fp, cos)
+ if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
+ return true;
+ return false;
+}
+
+static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
+{
+ u16 rx_cons_sb;
+
+ /* Tell compiler that status block fields can change */
+ barrier();
+ rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
+ if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
+ rx_cons_sb++;
+ return (fp->rx_comp_cons != rx_cons_sb);
+}
+
+/**
+ * bnx2x_tx_disable - disables tx from stack point of view
+ *
+ * @bp: driver handle
+ */
+static inline void bnx2x_tx_disable(struct bnx2x *bp)
+{
+ netif_tx_disable(bp->dev);
+ netif_carrier_off(bp->dev);
+}
+
+static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, u16 index)
+{
+ struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
+ struct page *page = sw_buf->page;
+ struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
+
+ /* Skip "next page" elements */
+ if (!page)
+ return;
+
+ dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
+ SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+ __free_pages(page, PAGES_PER_SGE_SHIFT);
+
+ sw_buf->page = NULL;
+ sge->addr_hi = 0;
+ sge->addr_lo = 0;
+}
+
+static inline void bnx2x_add_all_napi(struct bnx2x *bp)
+{
+ int i;
+
+ /* Add NAPI objects */
+ for_each_rx_queue(bp, i)
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+ bnx2x_poll, BNX2X_NAPI_WEIGHT);
+}
+
+static inline void bnx2x_del_all_napi(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_rx_queue(bp, i)
+ netif_napi_del(&bnx2x_fp(bp, i, napi));
+}
+
+static inline void bnx2x_disable_msi(struct bnx2x *bp)
+{
+ if (bp->flags & USING_MSIX_FLAG) {
+ pci_disable_msix(bp->pdev);
+ bp->flags &= ~USING_MSIX_FLAG;
+ } else if (bp->flags & USING_MSI_FLAG) {
+ pci_disable_msi(bp->pdev);
+ bp->flags &= ~USING_MSI_FLAG;
+ }
+}
+
+static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
+{
+ return num_queues ?
+ min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
+ min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp));
+}
+
+static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
+{
+ int i, j;
+
+ for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
+ int idx = RX_SGE_CNT * i - 1;
+
+ for (j = 0; j < 2; j++) {
+ BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
+ idx--;
+ }
+ }
+}
+
+static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
+{
+ /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
+ memset(fp->sge_mask, 0xff,
+ (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64));
+
+ /* Clear the two last indices in the page to 1:
+ these are the indices that correspond to the "next" element,
+ hence will never be indicated and should be removed from
+ the calculations. */
+ bnx2x_clear_sge_mask_next_elems(fp);
+}
+
+static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, u16 index)
+{
+ struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
+ struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
+ struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
+ dma_addr_t mapping;
+
+ if (unlikely(page == NULL))
+ return -ENOMEM;
+
+ mapping = dma_map_page(&bp->pdev->dev, page, 0,
+ SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ __free_pages(page, PAGES_PER_SGE_SHIFT);
+ return -ENOMEM;
+ }
+
+ sw_buf->page = page;
+ dma_unmap_addr_set(sw_buf, mapping, mapping);
+
+ sge->addr_hi = cpu_to_le32(U64_HI(mapping));
+ sge->addr_lo = cpu_to_le32(U64_LO(mapping));
+
+ return 0;
+}
+
+static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, u16 index)
+{
+ struct sk_buff *skb;
+ struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
+ struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
+ dma_addr_t mapping;
+
+ skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
+ if (unlikely(skb == NULL))
+ return -ENOMEM;
+
+ mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+
+ rx_buf->skb = skb;
+ dma_unmap_addr_set(rx_buf, mapping, mapping);
+
+ rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+ rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+
+ return 0;
+}
+
+/* note that we are not allocating a new skb,
+ * we are just moving one from cons to prod
+ * we are not creating a new mapping,
+ * so there is no need to check for dma_mapping_error().
+ */
+static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
+ u16 cons, u16 prod)
+{
+ struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
+ struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
+ struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
+ struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
+
+ dma_unmap_addr_set(prod_rx_buf, mapping,
+ dma_unmap_addr(cons_rx_buf, mapping));
+ prod_rx_buf->skb = cons_rx_buf->skb;
+ *prod_bd = *cons_bd;
+}
+
+/************************* Init ******************************************/
+
+/**
+ * bnx2x_func_start - init function
+ *
+ * @bp: driver handle
+ *
+ * Must be called before sending CLIENT_SETUP for the first client.
+ */
+static inline int bnx2x_func_start(struct bnx2x *bp)
+{
+ struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_start_params *start_params =
+ &func_params.params.start;
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_START;
+
+ /* Function parameters */
+ start_params->mf_mode = bp->mf_mode;
+ start_params->sd_vlan_tag = bp->mf_ov;
+ if (CHIP_IS_E1x(bp))
+ start_params->network_cos_mode = OVERRIDE_COS;
+ else
+ start_params->network_cos_mode = STATIC_COS;
+
+ return bnx2x_func_state_change(bp, &func_params);
+}
+
+
+/**
+ * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format
+ *
+ * @fw_hi: pointer to upper part
+ * @fw_mid: pointer to middle part
+ * @fw_lo: pointer to lower part
+ * @mac: pointer to MAC address
+ */
+static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo,
+ u8 *mac)
+{
+ ((u8 *)fw_hi)[0] = mac[1];
+ ((u8 *)fw_hi)[1] = mac[0];
+ ((u8 *)fw_mid)[0] = mac[3];
+ ((u8 *)fw_mid)[1] = mac[2];
+ ((u8 *)fw_lo)[0] = mac[5];
+ ((u8 *)fw_lo)[1] = mac[4];
+}
+
+static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, int last)
+{
+ int i;
+
+ if (fp->disable_tpa)
+ return;
+
+ for (i = 0; i < last; i++)
+ bnx2x_free_rx_sge(bp, fp, i);
+}
+
+static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, int last)
+{
+ int i;
+
+ for (i = 0; i < last; i++) {
+ struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
+ struct sw_rx_bd *first_buf = &tpa_info->first_buf;
+ struct sk_buff *skb = first_buf->skb;
+
+ if (skb == NULL) {
+ DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
+ continue;
+ }
+ if (tpa_info->tpa_state == BNX2X_TPA_START)
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(first_buf, mapping),
+ fp->rx_buf_size, DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ first_buf->skb = NULL;
+ }
+}
+
+static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
+{
+ int i;
+
+ for (i = 1; i <= NUM_TX_RINGS; i++) {
+ struct eth_tx_next_bd *tx_next_bd =
+ &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
+
+ tx_next_bd->addr_hi =
+ cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
+ BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
+ tx_next_bd->addr_lo =
+ cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
+ BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
+ }
+
+ SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
+ txdata->tx_db.data.zero_fill1 = 0;
+ txdata->tx_db.data.prod = 0;
+
+ txdata->tx_pkt_prod = 0;
+ txdata->tx_pkt_cons = 0;
+ txdata->tx_bd_prod = 0;
+ txdata->tx_bd_cons = 0;
+ txdata->tx_pkt = 0;
+}
+
+static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
+{
+ int i;
+ u8 cos;
+
+ for_each_tx_queue(bp, i)
+ for_each_cos_in_tx_queue(&bp->fp[i], cos)
+ bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
+}
+
+static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
+{
+ int i;
+
+ for (i = 1; i <= NUM_RX_RINGS; i++) {
+ struct eth_rx_bd *rx_bd;
+
+ rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
+ rx_bd->addr_hi =
+ cpu_to_le32(U64_HI(fp->rx_desc_mapping +
+ BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
+ rx_bd->addr_lo =
+ cpu_to_le32(U64_LO(fp->rx_desc_mapping +
+ BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
+ }
+}
+
+static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
+{
+ int i;
+
+ for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
+ struct eth_rx_sge *sge;
+
+ sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
+ sge->addr_hi =
+ cpu_to_le32(U64_HI(fp->rx_sge_mapping +
+ BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
+
+ sge->addr_lo =
+ cpu_to_le32(U64_LO(fp->rx_sge_mapping +
+ BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
+ }
+}
+
+static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
+{
+ int i;
+ for (i = 1; i <= NUM_RCQ_RINGS; i++) {
+ struct eth_rx_cqe_next_page *nextpg;
+
+ nextpg = (struct eth_rx_cqe_next_page *)
+ &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
+ nextpg->addr_hi =
+ cpu_to_le32(U64_HI(fp->rx_comp_mapping +
+ BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
+ nextpg->addr_lo =
+ cpu_to_le32(U64_LO(fp->rx_comp_mapping +
+ BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
+ }
+}
+
+/* Returns the number of actually allocated BDs */
+static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
+ int rx_ring_size)
+{
+ struct bnx2x *bp = fp->bp;
+ u16 ring_prod, cqe_ring_prod;
+ int i;
+
+ fp->rx_comp_cons = 0;
+ cqe_ring_prod = ring_prod = 0;
+
+ /* This routine is called only during fo init so
+ * fp->eth_q_stats.rx_skb_alloc_failed = 0
+ */
+ for (i = 0; i < rx_ring_size; i++) {
+ if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
+ fp->eth_q_stats.rx_skb_alloc_failed++;
+ continue;
+ }
+ ring_prod = NEXT_RX_IDX(ring_prod);
+ cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
+ WARN_ON(ring_prod <= (i - fp->eth_q_stats.rx_skb_alloc_failed));
+ }
+
+ if (fp->eth_q_stats.rx_skb_alloc_failed)
+ BNX2X_ERR("was only able to allocate "
+ "%d rx skbs on queue[%d]\n",
+ (i - fp->eth_q_stats.rx_skb_alloc_failed), fp->index);
+
+ fp->rx_bd_prod = ring_prod;
+ /* Limit the CQE producer by the CQE ring size */
+ fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
+ cqe_ring_prod);
+ fp->rx_pkt = fp->rx_calls = 0;
+
+ return i - fp->eth_q_stats.rx_skb_alloc_failed;
+}
+
+/* Statistics ID are global per chip/path, while Client IDs for E1x are per
+ * port.
+ */
+static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
+{
+ if (!CHIP_IS_E1x(fp->bp))
+ return fp->cl_id;
+ else
+ return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x;
+}
+
+static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
+ bnx2x_obj_type obj_type)
+{
+ struct bnx2x *bp = fp->bp;
+
+ /* Configure classification DBs */
+ bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid,
+ BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
+ bnx2x_sp_mapping(bp, mac_rdata),
+ BNX2X_FILTER_MAC_PENDING,
+ &bp->sp_state, obj_type,
+ &bp->macs_pool);
+}
+
+/**
+ * bnx2x_get_path_func_num - get number of active functions
+ *
+ * @bp: driver handle
+ *
+ * Calculates the number of active (not hidden) functions on the
+ * current path.
+ */
+static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
+{
+ u8 func_num = 0, i;
+
+ /* 57710 has only one function per-port */
+ if (CHIP_IS_E1(bp))
+ return 1;
+
+ /* Calculate a number of functions enabled on the current
+ * PATH/PORT.
+ */
+ if (CHIP_REV_IS_SLOW(bp)) {
+ if (IS_MF(bp))
+ func_num = 4;
+ else
+ func_num = 2;
+ } else {
+ for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
+ u32 func_config =
+ MF_CFG_RD(bp,
+ func_mf_config[BP_PORT(bp) + 2 * i].
+ config);
+ func_num +=
+ ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
+ }
+ }
+
+ WARN_ON(!func_num);
+
+ return func_num;
+}
+
+static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
+{
+ /* RX_MODE controlling object */
+ bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
+
+ /* multicast configuration controlling object */
+ bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
+ BP_FUNC(bp), BP_FUNC(bp),
+ bnx2x_sp(bp, mcast_rdata),
+ bnx2x_sp_mapping(bp, mcast_rdata),
+ BNX2X_FILTER_MCAST_PENDING, &bp->sp_state,
+ BNX2X_OBJ_TYPE_RX);
+
+ /* Setup CAM credit pools */
+ bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
+ bnx2x_get_path_func_num(bp));
+
+ /* RSS configuration object */
+ bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id,
+ bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp),
+ bnx2x_sp(bp, rss_rdata),
+ bnx2x_sp_mapping(bp, rss_rdata),
+ BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
+ BNX2X_OBJ_TYPE_RX);
+}
+
+static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
+{
+ if (CHIP_IS_E1x(fp->bp))
+ return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H;
+ else
+ return fp->cl_id;
+}
+
+static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
+{
+ struct bnx2x *bp = fp->bp;
+
+ if (!CHIP_IS_E1x(bp))
+ return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
+ else
+ return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
+}
+
+static inline void bnx2x_init_txdata(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index,
+ __le16 *tx_cons_sb)
+{
+ txdata->cid = cid;
+ txdata->txq_index = txq_index;
+ txdata->tx_cons_sb = tx_cons_sb;
+
+ DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d\n",
+ txdata->cid, txdata->txq_index);
+}
+
+#ifdef BCM_CNIC
+static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
+{
+ return bp->cnic_base_cl_id + cl_idx +
+ (bp->pf_num >> 1) * NON_ETH_CONTEXT_USE;
+}
+
+static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
+{
+
+ /* the 'first' id is allocated for the cnic */
+ return bp->base_fw_ndsb;
+}
+
+static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
+{
+ return bp->igu_base_sb;
+}
+
+
+static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
+{
+ struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+ unsigned long q_type = 0;
+
+ bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
+ BNX2X_FCOE_ETH_CL_ID_IDX);
+ /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
+ * 16 ETH clients per function when CNIC is enabled!
+ *
+ * Fix it ASAP!!!
+ */
+ bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
+ bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
+ bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
+ bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
+
+ bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]),
+ fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX);
+
+ DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)\n", fp->index);
+
+ /* qZone id equals to FW (per path) client id */
+ bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
+ /* init shortcut */
+ bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
+ bnx2x_rx_ustorm_prods_offset(fp);
+
+ /* Configure Queue State object */
+ __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+ __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+ /* No multi-CoS for FCoE L2 client */
+ BUG_ON(fp->max_cos != 1);
+
+ bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1,
+ BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_sp_mapping(bp, q_rdata), q_type);
+
+ DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d "
+ "igu_sb %d\n",
+ fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+ fp->igu_sb_id);
+}
+#endif
+
+static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata)
+{
+ int cnt = 1000;
+
+ while (bnx2x_has_tx_work_unload(txdata)) {
+ if (!cnt) {
+ BNX2X_ERR("timeout waiting for queue[%d]: "
+ "txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
+ txdata->txq_index, txdata->tx_pkt_prod,
+ txdata->tx_pkt_cons);
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+ return -EBUSY;
+#else
+ break;
+#endif
+ }
+ cnt--;
+ usleep_range(1000, 1000);
+ }
+
+ return 0;
+}
+
+int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
+
+static inline void __storm_memset_struct(struct bnx2x *bp,
+ u32 addr, size_t size, u32 *data)
+{
+ int i;
+ for (i = 0; i < size/4; i++)
+ REG_WR(bp, addr + (i * 4), data[i]);
+}
+
+static inline void storm_memset_func_cfg(struct bnx2x *bp,
+ struct tstorm_eth_function_common_config *tcfg,
+ u16 abs_fid)
+{
+ size_t size = sizeof(struct tstorm_eth_function_common_config);
+
+ u32 addr = BAR_TSTRORM_INTMEM +
+ TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
+}
+
+static inline void storm_memset_cmng(struct bnx2x *bp,
+ struct cmng_struct_per_port *cmng,
+ u8 port)
+{
+ size_t size = sizeof(struct cmng_struct_per_port);
+
+ u32 addr = BAR_XSTRORM_INTMEM +
+ XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)cmng);
+}
+
+/**
+ * bnx2x_wait_sp_comp - wait for the outstanding SP commands.
+ *
+ * @bp: driver handle
+ * @mask: bits that need to be cleared
+ */
+static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
+{
+ int tout = 5000; /* Wait for 5 secs tops */
+
+ while (tout--) {
+ smp_mb();
+ netif_addr_lock_bh(bp->dev);
+ if (!(bp->sp_state & mask)) {
+ netif_addr_unlock_bh(bp->dev);
+ return true;
+ }
+ netif_addr_unlock_bh(bp->dev);
+
+ usleep_range(1000, 1000);
+ }
+
+ smp_mb();
+
+ netif_addr_lock_bh(bp->dev);
+ if (bp->sp_state & mask) {
+ BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, "
+ "mask 0x%lx\n", bp->sp_state, mask);
+ netif_addr_unlock_bh(bp->dev);
+ return false;
+ }
+ netif_addr_unlock_bh(bp->dev);
+
+ return true;
+}
+
+/**
+ * bnx2x_set_ctx_validation - set CDU context validation values
+ *
+ * @bp: driver handle
+ * @cxt: context of the connection on the host memory
+ * @cid: SW CID of the connection to be configured
+ */
+void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
+ u32 cid);
+
+void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
+ u8 sb_index, u8 disable, u16 usec);
+void bnx2x_acquire_phy_lock(struct bnx2x *bp);
+void bnx2x_release_phy_lock(struct bnx2x *bp);
+
+/**
+ * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration.
+ *
+ * @bp: driver handle
+ * @mf_cfg: MF configuration
+ *
+ */
+static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
+{
+ u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT;
+ if (!max_cfg) {
+ DP(NETIF_MSG_LINK,
+ "Max BW configured to 0 - using 100 instead\n");
+ max_cfg = 100;
+ }
+ return max_cfg;
+}
+
+#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
new file mode 100644
index 000000000000..51bd7485ab18
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -0,0 +1,2511 @@
+/* bnx2x_dcb.c: Broadcom Everest network driver.
+ *
+ * Copyright 2009-2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Dmitry Kravkov
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/rtnetlink.h>
+#include <net/dcbnl.h>
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_dcb.h"
+
+/* forward declarations of dcbx related functions */
+static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
+static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
+static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
+static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
+static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
+ u32 *set_configuration_ets_pg,
+ u32 *pri_pg_tbl);
+static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
+ u32 *pg_pri_orginal_spread,
+ struct pg_help_data *help_data);
+static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
+ struct pg_help_data *help_data,
+ struct dcbx_ets_feature *ets,
+ u32 *pg_pri_orginal_spread);
+static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ struct dcbx_ets_feature *ets);
+static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
+ struct bnx2x_func_tx_start_params*);
+
+/* helpers: read/write len bytes from addr into buff by REG_RD/REG_WR */
+static void bnx2x_read_data(struct bnx2x *bp, u32 *buff,
+ u32 addr, u32 len)
+{
+ int i;
+ for (i = 0; i < len; i += 4, buff++)
+ *buff = REG_RD(bp, addr + i);
+}
+
+static void bnx2x_write_data(struct bnx2x *bp, u32 *buff,
+ u32 addr, u32 len)
+{
+ int i;
+ for (i = 0; i < len; i += 4, buff++)
+ REG_WR(bp, addr + i, *buff);
+}
+
+static void bnx2x_pfc_set(struct bnx2x *bp)
+{
+ struct bnx2x_nig_brb_pfc_port_params pfc_params = {0};
+ u32 pri_bit, val = 0;
+ int i;
+
+ pfc_params.num_of_rx_cos_priority_mask =
+ bp->dcbx_port_params.ets.num_of_cos;
+
+ /* Tx COS configuration */
+ for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++)
+ /*
+ * We configure only the pauseable bits (non pauseable aren't
+ * configured at all) it's done to avoid false pauses from
+ * network
+ */
+ pfc_params.rx_cos_priority_mask[i] =
+ bp->dcbx_port_params.ets.cos_params[i].pri_bitmask
+ & DCBX_PFC_PRI_PAUSE_MASK(bp);
+
+ /*
+ * Rx COS configuration
+ * Changing PFC RX configuration .
+ * In RX COS0 will always be configured to lossy and COS1 to lossless
+ */
+ for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) {
+ pri_bit = 1 << i;
+
+ if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp))
+ val |= 1 << (i * 4);
+ }
+
+ pfc_params.pkt_priority_to_cos = val;
+
+ /* RX COS0 */
+ pfc_params.llfc_low_priority_classes = 0;
+ /* RX COS1 */
+ pfc_params.llfc_high_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
+
+ /* BRB configuration */
+ pfc_params.cos0_pauseable = false;
+ pfc_params.cos1_pauseable = true;
+
+ bnx2x_acquire_phy_lock(bp);
+ bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED;
+ bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &pfc_params);
+ bnx2x_release_phy_lock(bp);
+}
+
+static void bnx2x_pfc_clear(struct bnx2x *bp)
+{
+ struct bnx2x_nig_brb_pfc_port_params nig_params = {0};
+ nig_params.pause_enable = 1;
+#ifdef BNX2X_SAFC
+ if (bp->flags & SAFC_TX_FLAG) {
+ u32 high = 0, low = 0;
+ int i;
+
+ for (i = 0; i < BNX2X_MAX_PRIORITY; i++) {
+ if (bp->pri_map[i] == 1)
+ high |= (1 << i);
+ if (bp->pri_map[i] == 0)
+ low |= (1 << i);
+ }
+
+ nig_params.llfc_low_priority_classes = high;
+ nig_params.llfc_low_priority_classes = low;
+
+ nig_params.pause_enable = 0;
+ nig_params.llfc_enable = 1;
+ nig_params.llfc_out_en = 1;
+ }
+#endif /* BNX2X_SAFC */
+ bnx2x_acquire_phy_lock(bp);
+ bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED;
+ bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params);
+ bnx2x_release_phy_lock(bp);
+}
+
+static void bnx2x_dump_dcbx_drv_param(struct bnx2x *bp,
+ struct dcbx_features *features,
+ u32 error)
+{
+ u8 i = 0;
+ DP(NETIF_MSG_LINK, "local_mib.error %x\n", error);
+
+ /* PG */
+ DP(NETIF_MSG_LINK,
+ "local_mib.features.ets.enabled %x\n", features->ets.enabled);
+ for (i = 0; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++)
+ DP(NETIF_MSG_LINK,
+ "local_mib.features.ets.pg_bw_tbl[%d] %d\n", i,
+ DCBX_PG_BW_GET(features->ets.pg_bw_tbl, i));
+ for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++)
+ DP(NETIF_MSG_LINK,
+ "local_mib.features.ets.pri_pg_tbl[%d] %d\n", i,
+ DCBX_PRI_PG_GET(features->ets.pri_pg_tbl, i));
+
+ /* pfc */
+ DP(NETIF_MSG_LINK, "dcbx_features.pfc.pri_en_bitmap %x\n",
+ features->pfc.pri_en_bitmap);
+ DP(NETIF_MSG_LINK, "dcbx_features.pfc.pfc_caps %x\n",
+ features->pfc.pfc_caps);
+ DP(NETIF_MSG_LINK, "dcbx_features.pfc.enabled %x\n",
+ features->pfc.enabled);
+
+ DP(NETIF_MSG_LINK, "dcbx_features.app.default_pri %x\n",
+ features->app.default_pri);
+ DP(NETIF_MSG_LINK, "dcbx_features.app.tc_supported %x\n",
+ features->app.tc_supported);
+ DP(NETIF_MSG_LINK, "dcbx_features.app.enabled %x\n",
+ features->app.enabled);
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ DP(NETIF_MSG_LINK,
+ "dcbx_features.app.app_pri_tbl[%x].app_id %x\n",
+ i, features->app.app_pri_tbl[i].app_id);
+ DP(NETIF_MSG_LINK,
+ "dcbx_features.app.app_pri_tbl[%x].pri_bitmap %x\n",
+ i, features->app.app_pri_tbl[i].pri_bitmap);
+ DP(NETIF_MSG_LINK,
+ "dcbx_features.app.app_pri_tbl[%x].appBitfield %x\n",
+ i, features->app.app_pri_tbl[i].appBitfield);
+ }
+}
+
+static void bnx2x_dcbx_get_ap_priority(struct bnx2x *bp,
+ u8 pri_bitmap,
+ u8 llfc_traf_type)
+{
+ u32 pri = MAX_PFC_PRIORITIES;
+ u32 index = MAX_PFC_PRIORITIES - 1;
+ u32 pri_mask;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+
+ /* Choose the highest priority */
+ while ((MAX_PFC_PRIORITIES == pri) && (0 != index)) {
+ pri_mask = 1 << index;
+ if (GET_FLAGS(pri_bitmap, pri_mask))
+ pri = index ;
+ index--;
+ }
+
+ if (pri < MAX_PFC_PRIORITIES)
+ ttp[llfc_traf_type] = max_t(u32, ttp[llfc_traf_type], pri);
+}
+
+static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp,
+ struct dcbx_app_priority_feature *app,
+ u32 error) {
+ u8 index;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+
+ if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR))
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_ERROR\n");
+
+ if (GET_FLAGS(error, DCBX_LOCAL_APP_MISMATCH))
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_MISMATCH\n");
+
+ if (app->enabled &&
+ !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH)) {
+
+ bp->dcbx_port_params.app.enabled = true;
+
+ for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
+ ttp[index] = 0;
+
+ if (app->default_pri < MAX_PFC_PRIORITIES)
+ ttp[LLFC_TRAFFIC_TYPE_NW] = app->default_pri;
+
+ for (index = 0 ; index < DCBX_MAX_APP_PROTOCOL; index++) {
+ struct dcbx_app_priority_entry *entry =
+ app->app_pri_tbl;
+
+ if (GET_FLAGS(entry[index].appBitfield,
+ DCBX_APP_SF_ETH_TYPE) &&
+ ETH_TYPE_FCOE == entry[index].app_id)
+ bnx2x_dcbx_get_ap_priority(bp,
+ entry[index].pri_bitmap,
+ LLFC_TRAFFIC_TYPE_FCOE);
+
+ if (GET_FLAGS(entry[index].appBitfield,
+ DCBX_APP_SF_PORT) &&
+ TCP_PORT_ISCSI == entry[index].app_id)
+ bnx2x_dcbx_get_ap_priority(bp,
+ entry[index].pri_bitmap,
+ LLFC_TRAFFIC_TYPE_ISCSI);
+ }
+ } else {
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_DISABLED\n");
+ bp->dcbx_port_params.app.enabled = false;
+ for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
+ ttp[index] = INVALID_TRAFFIC_TYPE_PRIORITY;
+ }
+}
+
+static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
+ struct dcbx_ets_feature *ets,
+ u32 error) {
+ int i = 0;
+ u32 pg_pri_orginal_spread[DCBX_MAX_NUM_PG_BW_ENTRIES] = {0};
+ struct pg_help_data pg_help_data;
+ struct bnx2x_dcbx_cos_params *cos_params =
+ bp->dcbx_port_params.ets.cos_params;
+
+ memset(&pg_help_data, 0, sizeof(struct pg_help_data));
+
+
+ if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR))
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ERROR\n");
+
+
+ /* Clean up old settings of ets on COS */
+ for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) {
+ cos_params[i].pauseable = false;
+ cos_params[i].strict = BNX2X_DCBX_STRICT_INVALID;
+ cos_params[i].bw_tbl = DCBX_INVALID_COS_BW;
+ cos_params[i].pri_bitmask = 0;
+ }
+
+ if (bp->dcbx_port_params.app.enabled &&
+ !GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR) &&
+ ets->enabled) {
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ENABLE\n");
+ bp->dcbx_port_params.ets.enabled = true;
+
+ bnx2x_dcbx_get_ets_pri_pg_tbl(bp,
+ pg_pri_orginal_spread,
+ ets->pri_pg_tbl);
+
+ bnx2x_dcbx_get_num_pg_traf_type(bp,
+ pg_pri_orginal_spread,
+ &pg_help_data);
+
+ bnx2x_dcbx_fill_cos_params(bp, &pg_help_data,
+ ets, pg_pri_orginal_spread);
+
+ } else {
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_DISABLED\n");
+ bp->dcbx_port_params.ets.enabled = false;
+ ets->pri_pg_tbl[0] = 0;
+
+ for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES ; i++)
+ DCBX_PG_BW_SET(ets->pg_bw_tbl, i, 1);
+ }
+}
+
+static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
+ struct dcbx_pfc_feature *pfc, u32 error)
+{
+
+ if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR))
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_ERROR\n");
+
+ if (bp->dcbx_port_params.app.enabled &&
+ !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH) &&
+ pfc->enabled) {
+ bp->dcbx_port_params.pfc.enabled = true;
+ bp->dcbx_port_params.pfc.priority_non_pauseable_mask =
+ ~(pfc->pri_en_bitmap);
+ } else {
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_DISABLED\n");
+ bp->dcbx_port_params.pfc.enabled = false;
+ bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0;
+ }
+}
+
+/* maps unmapped priorities to to the same COS as L2 */
+static void bnx2x_dcbx_map_nw(struct bnx2x *bp)
+{
+ int i;
+ u32 unmapped = (1 << MAX_PFC_PRIORITIES) - 1; /* all ones */
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+ u32 nw_prio = 1 << ttp[LLFC_TRAFFIC_TYPE_NW];
+ struct bnx2x_dcbx_cos_params *cos_params =
+ bp->dcbx_port_params.ets.cos_params;
+
+ /* get unmapped priorities by clearing mapped bits */
+ for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
+ unmapped &= ~(1 << ttp[i]);
+
+ /* find cos for nw prio and extend it with unmapped */
+ for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) {
+ if (cos_params[i].pri_bitmask & nw_prio) {
+ /* extend the bitmask with unmapped */
+ DP(NETIF_MSG_LINK,
+ "cos %d extended with 0x%08x\n", i, unmapped);
+ cos_params[i].pri_bitmask |= unmapped;
+ break;
+ }
+ }
+}
+
+static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp,
+ struct dcbx_features *features,
+ u32 error)
+{
+ bnx2x_dcbx_get_ap_feature(bp, &features->app, error);
+
+ bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error);
+
+ bnx2x_dcbx_get_ets_feature(bp, &features->ets, error);
+
+ bnx2x_dcbx_map_nw(bp);
+}
+
+#define DCBX_LOCAL_MIB_MAX_TRY_READ (100)
+static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
+ u32 *base_mib_addr,
+ u32 offset,
+ int read_mib_type)
+{
+ int max_try_read = 0;
+ u32 mib_size, prefix_seq_num, suffix_seq_num;
+ struct lldp_remote_mib *remote_mib ;
+ struct lldp_local_mib *local_mib;
+
+
+ switch (read_mib_type) {
+ case DCBX_READ_LOCAL_MIB:
+ mib_size = sizeof(struct lldp_local_mib);
+ break;
+ case DCBX_READ_REMOTE_MIB:
+ mib_size = sizeof(struct lldp_remote_mib);
+ break;
+ default:
+ return 1; /*error*/
+ }
+
+ offset += BP_PORT(bp) * mib_size;
+
+ do {
+ bnx2x_read_data(bp, base_mib_addr, offset, mib_size);
+
+ max_try_read++;
+
+ switch (read_mib_type) {
+ case DCBX_READ_LOCAL_MIB:
+ local_mib = (struct lldp_local_mib *) base_mib_addr;
+ prefix_seq_num = local_mib->prefix_seq_num;
+ suffix_seq_num = local_mib->suffix_seq_num;
+ break;
+ case DCBX_READ_REMOTE_MIB:
+ remote_mib = (struct lldp_remote_mib *) base_mib_addr;
+ prefix_seq_num = remote_mib->prefix_seq_num;
+ suffix_seq_num = remote_mib->suffix_seq_num;
+ break;
+ default:
+ return 1; /*error*/
+ }
+ } while ((prefix_seq_num != suffix_seq_num) &&
+ (max_try_read < DCBX_LOCAL_MIB_MAX_TRY_READ));
+
+ if (max_try_read >= DCBX_LOCAL_MIB_MAX_TRY_READ) {
+ BNX2X_ERR("MIB could not be read\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
+{
+ if (bp->dcbx_port_params.pfc.enabled &&
+ !(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR))
+ /*
+ * 1. Fills up common PFC structures if required
+ * 2. Configure NIG, MAC and BRB via the elink
+ */
+ bnx2x_pfc_set(bp);
+ else
+ bnx2x_pfc_clear(bp);
+}
+
+static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
+{
+ struct bnx2x_func_state_params func_params = {0};
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_TX_STOP;
+
+ DP(NETIF_MSG_LINK, "STOP TRAFFIC\n");
+ return bnx2x_func_state_change(bp, &func_params);
+}
+
+static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
+{
+ struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_tx_start_params *tx_params =
+ &func_params.params.tx_start;
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_TX_START;
+
+ bnx2x_dcbx_fw_struct(bp, tx_params);
+
+ DP(NETIF_MSG_LINK, "START TRAFFIC\n");
+ return bnx2x_func_state_change(bp, &func_params);
+}
+
+static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
+{
+ struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
+ int rc = 0;
+
+ if (ets->num_of_cos == 0 || ets->num_of_cos > DCBX_COS_MAX_NUM_E2) {
+ BNX2X_ERR("Illegal number of COSes %d\n", ets->num_of_cos);
+ return;
+ }
+
+ /* valid COS entries */
+ if (ets->num_of_cos == 1) /* no ETS */
+ return;
+
+ /* sanity */
+ if (((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[0].strict) &&
+ (DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) ||
+ ((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[1].strict) &&
+ (DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) {
+ BNX2X_ERR("all COS should have at least bw_limit or strict"
+ "ets->cos_params[0].strict= %x"
+ "ets->cos_params[0].bw_tbl= %x"
+ "ets->cos_params[1].strict= %x"
+ "ets->cos_params[1].bw_tbl= %x",
+ ets->cos_params[0].strict,
+ ets->cos_params[0].bw_tbl,
+ ets->cos_params[1].strict,
+ ets->cos_params[1].bw_tbl);
+ return;
+ }
+ /* If we join a group and there is bw_tbl and strict then bw rules */
+ if ((DCBX_INVALID_COS_BW != ets->cos_params[0].bw_tbl) &&
+ (DCBX_INVALID_COS_BW != ets->cos_params[1].bw_tbl)) {
+ u32 bw_tbl_0 = ets->cos_params[0].bw_tbl;
+ u32 bw_tbl_1 = ets->cos_params[1].bw_tbl;
+ /* Do not allow 0-100 configuration
+ * since PBF does not support it
+ * force 1-99 instead
+ */
+ if (bw_tbl_0 == 0) {
+ bw_tbl_0 = 1;
+ bw_tbl_1 = 99;
+ } else if (bw_tbl_1 == 0) {
+ bw_tbl_1 = 1;
+ bw_tbl_0 = 99;
+ }
+
+ bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1);
+ } else {
+ if (ets->cos_params[0].strict == BNX2X_DCBX_STRICT_COS_HIGHEST)
+ rc = bnx2x_ets_strict(&bp->link_params, 0);
+ else if (ets->cos_params[1].strict
+ == BNX2X_DCBX_STRICT_COS_HIGHEST)
+ rc = bnx2x_ets_strict(&bp->link_params, 1);
+ if (rc)
+ BNX2X_ERR("update_ets_params failed\n");
+ }
+}
+
+/*
+ * In E3B0 the configuration may have more than 2 COS.
+ */
+void bnx2x_dcbx_update_ets_config(struct bnx2x *bp)
+{
+ struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
+ struct bnx2x_ets_params ets_params = { 0 };
+ u8 i;
+
+ ets_params.num_of_cos = ets->num_of_cos;
+
+ for (i = 0; i < ets->num_of_cos; i++) {
+ /* COS is SP */
+ if (ets->cos_params[i].strict != BNX2X_DCBX_STRICT_INVALID) {
+ if (ets->cos_params[i].bw_tbl != DCBX_INVALID_COS_BW) {
+ BNX2X_ERR("COS can't be not BW and not SP\n");
+ return;
+ }
+
+ ets_params.cos[i].state = bnx2x_cos_state_strict;
+ ets_params.cos[i].params.sp_params.pri =
+ ets->cos_params[i].strict;
+ } else { /* COS is BW */
+ if (ets->cos_params[i].bw_tbl == DCBX_INVALID_COS_BW) {
+ BNX2X_ERR("COS can't be not BW and not SP\n");
+ return;
+ }
+ ets_params.cos[i].state = bnx2x_cos_state_bw;
+ ets_params.cos[i].params.bw_params.bw =
+ (u8)ets->cos_params[i].bw_tbl;
+ }
+ }
+
+ /* Configure the ETS in HW */
+ if (bnx2x_ets_e3b0_config(&bp->link_params, &bp->link_vars,
+ &ets_params)) {
+ BNX2X_ERR("bnx2x_ets_e3b0_config failed\n");
+ bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
+ }
+}
+
+static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
+{
+ bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
+
+ if (!bp->dcbx_port_params.ets.enabled ||
+ (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR))
+ return;
+
+ if (CHIP_IS_E3B0(bp))
+ bnx2x_dcbx_update_ets_config(bp);
+ else
+ bnx2x_dcbx_2cos_limit_update_ets_config(bp);
+}
+
+#ifdef BCM_DCBNL
+static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp)
+{
+ struct lldp_remote_mib remote_mib = {0};
+ u32 dcbx_remote_mib_offset = SHMEM2_RD(bp, dcbx_remote_mib_offset);
+ int rc;
+
+ DP(NETIF_MSG_LINK, "dcbx_remote_mib_offset 0x%x\n",
+ dcbx_remote_mib_offset);
+
+ if (SHMEM_DCBX_REMOTE_MIB_NONE == dcbx_remote_mib_offset) {
+ BNX2X_ERR("FW doesn't support dcbx_remote_mib_offset\n");
+ return -EINVAL;
+ }
+
+ rc = bnx2x_dcbx_read_mib(bp, (u32 *)&remote_mib, dcbx_remote_mib_offset,
+ DCBX_READ_REMOTE_MIB);
+
+ if (rc) {
+ BNX2X_ERR("Faild to read remote mib from FW\n");
+ return rc;
+ }
+
+ /* save features and flags */
+ bp->dcbx_remote_feat = remote_mib.features;
+ bp->dcbx_remote_flags = remote_mib.flags;
+ return 0;
+}
+#endif
+
+static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
+{
+ struct lldp_local_mib local_mib = {0};
+ u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset);
+ int rc;
+
+ DP(NETIF_MSG_LINK, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset);
+
+ if (SHMEM_DCBX_NEG_RES_NONE == dcbx_neg_res_offset) {
+ BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n");
+ return -EINVAL;
+ }
+
+ rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset,
+ DCBX_READ_LOCAL_MIB);
+
+ if (rc) {
+ BNX2X_ERR("Faild to read local mib from FW\n");
+ return rc;
+ }
+
+ /* save features and error */
+ bp->dcbx_local_feat = local_mib.features;
+ bp->dcbx_error = local_mib.error;
+ return 0;
+}
+
+
+#ifdef BCM_DCBNL
+static inline
+u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent)
+{
+ u8 pri;
+
+ /* Choose the highest priority */
+ for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--)
+ if (ent->pri_bitmap & (1 << pri))
+ break;
+ return pri;
+}
+
+static inline
+u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent)
+{
+ return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) ==
+ DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM :
+ DCB_APP_IDTYPE_ETHTYPE;
+}
+
+int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
+{
+ int i, err = 0;
+
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) {
+ struct dcbx_app_priority_entry *ent =
+ &bp->dcbx_local_feat.app.app_pri_tbl[i];
+
+ if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
+ u8 up = bnx2x_dcbx_dcbnl_app_up(ent);
+
+ /* avoid invalid user-priority */
+ if (up) {
+ struct dcb_app app;
+ app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
+ app.protocol = ent->app_id;
+ app.priority = delall ? 0 : up;
+ err = dcb_setapp(bp->dev, &app);
+ }
+ }
+ }
+ return err;
+}
+#endif
+
+static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
+{
+ if (SHMEM2_HAS(bp, drv_flags)) {
+ u32 drv_flags;
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+ drv_flags = SHMEM2_RD(bp, drv_flags);
+
+ if (set)
+ SET_FLAGS(drv_flags, flags);
+ else
+ RESET_FLAGS(drv_flags, flags);
+
+ SHMEM2_WR(bp, drv_flags, drv_flags);
+ DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
+ bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+ }
+}
+
+static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
+{
+ u8 prio, cos;
+ for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) {
+ for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
+ if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask
+ & (1 << prio)) {
+ bp->prio_to_cos[prio] = cos;
+ DP(NETIF_MSG_LINK,
+ "tx_mapping %d --> %d\n", prio, cos);
+ }
+ }
+ }
+
+ /* setup tc must be called under rtnl lock, but we can't take it here
+ * as we are handling an attetntion on a work queue which must be
+ * flushed at some rtnl-locked contexts (e.g. if down)
+ */
+ if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+}
+
+void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
+{
+ switch (state) {
+ case BNX2X_DCBX_STATE_NEG_RECEIVED:
+ {
+ DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
+#ifdef BCM_DCBNL
+ /**
+ * Delete app tlvs from dcbnl before reading new
+ * negotiation results
+ */
+ bnx2x_dcbnl_update_applist(bp, true);
+
+ /* Read rmeote mib if dcbx is in the FW */
+ if (bnx2x_dcbx_read_shmem_remote_mib(bp))
+ return;
+#endif
+ /* Read neg results if dcbx is in the FW */
+ if (bnx2x_dcbx_read_shmem_neg_results(bp))
+ return;
+
+ bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+ bp->dcbx_error);
+
+ bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+ bp->dcbx_error);
+
+ /* mark DCBX result for PMF migration */
+ bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1);
+#ifdef BCM_DCBNL
+ /**
+ * Add new app tlvs to dcbnl
+ */
+ bnx2x_dcbnl_update_applist(bp, false);
+#endif
+ bnx2x_dcbx_stop_hw_tx(bp);
+
+ /* reconfigure the netdevice with the results of the new
+ * dcbx negotiation.
+ */
+ bnx2x_dcbx_update_tc_mapping(bp);
+
+ return;
+ }
+ case BNX2X_DCBX_STATE_TX_PAUSED:
+ DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
+ bnx2x_pfc_set_pfc(bp);
+
+ bnx2x_dcbx_update_ets_params(bp);
+ bnx2x_dcbx_resume_hw_tx(bp);
+ return;
+ case BNX2X_DCBX_STATE_TX_RELEASED:
+ DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
+#ifdef BCM_DCBNL
+ /*
+ * Send a notification for the new negotiated parameters
+ */
+ dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+#endif
+ return;
+ default:
+ BNX2X_ERR("Unknown DCBX_STATE\n");
+ }
+}
+
+#define LLDP_ADMIN_MIB_OFFSET(bp) (PORT_MAX*sizeof(struct lldp_params) + \
+ BP_PORT(bp)*sizeof(struct lldp_admin_mib))
+
+static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
+ u32 dcbx_lldp_params_offset)
+{
+ struct lldp_admin_mib admin_mib;
+ u32 i, other_traf_type = PREDEFINED_APP_IDX_MAX, traf_type = 0;
+ u32 offset = dcbx_lldp_params_offset + LLDP_ADMIN_MIB_OFFSET(bp);
+
+ /*shortcuts*/
+ struct dcbx_features *af = &admin_mib.features;
+ struct bnx2x_config_dcbx_params *dp = &bp->dcbx_config_params;
+
+ memset(&admin_mib, 0, sizeof(struct lldp_admin_mib));
+
+ /* Read the data first */
+ bnx2x_read_data(bp, (u32 *)&admin_mib, offset,
+ sizeof(struct lldp_admin_mib));
+
+ if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON)
+ SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
+
+ if (dp->overwrite_settings == BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE) {
+
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_CEE_VERSION_MASK);
+ admin_mib.ver_cfg_flags |=
+ (dp->admin_dcbx_version << DCBX_CEE_VERSION_SHIFT) &
+ DCBX_CEE_VERSION_MASK;
+
+ af->ets.enabled = (u8)dp->admin_ets_enable;
+
+ af->pfc.enabled = (u8)dp->admin_pfc_enable;
+
+ /* FOR IEEE dp->admin_tc_supported_tx_enable */
+ if (dp->admin_ets_configuration_tx_enable)
+ SET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_ETS_CONFIG_TX_ENABLED);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_ETS_CONFIG_TX_ENABLED);
+ /* For IEEE admin_ets_recommendation_tx_enable */
+ if (dp->admin_pfc_tx_enable)
+ SET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_PFC_CONFIG_TX_ENABLED);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_PFC_CONFIG_TX_ENABLED);
+
+ if (dp->admin_application_priority_tx_enable)
+ SET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_APP_CONFIG_TX_ENABLED);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_APP_CONFIG_TX_ENABLED);
+
+ if (dp->admin_ets_willing)
+ SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
+ /* For IEEE admin_ets_reco_valid */
+ if (dp->admin_pfc_willing)
+ SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
+
+ if (dp->admin_app_priority_willing)
+ SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
+
+ for (i = 0 ; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) {
+ DCBX_PG_BW_SET(af->ets.pg_bw_tbl, i,
+ (u8)dp->admin_configuration_bw_precentage[i]);
+
+ DP(NETIF_MSG_LINK, "pg_bw_tbl[%d] = %02x\n",
+ i, DCBX_PG_BW_GET(af->ets.pg_bw_tbl, i));
+ }
+
+ for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
+ DCBX_PRI_PG_SET(af->ets.pri_pg_tbl, i,
+ (u8)dp->admin_configuration_ets_pg[i]);
+
+ DP(NETIF_MSG_LINK, "pri_pg_tbl[%d] = %02x\n",
+ i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i));
+ }
+
+ /*For IEEE admin_recommendation_bw_precentage
+ *For IEEE admin_recommendation_ets_pg */
+ af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
+ for (i = 0; i < 4; i++) {
+ if (dp->admin_priority_app_table[i].valid) {
+ struct bnx2x_admin_priority_app_table *table =
+ dp->admin_priority_app_table;
+ if ((ETH_TYPE_FCOE == table[i].app_id) &&
+ (TRAFFIC_TYPE_ETH == table[i].traffic_type))
+ traf_type = FCOE_APP_IDX;
+ else if ((TCP_PORT_ISCSI == table[i].app_id) &&
+ (TRAFFIC_TYPE_PORT == table[i].traffic_type))
+ traf_type = ISCSI_APP_IDX;
+ else
+ traf_type = other_traf_type++;
+
+ af->app.app_pri_tbl[traf_type].app_id =
+ table[i].app_id;
+
+ af->app.app_pri_tbl[traf_type].pri_bitmap =
+ (u8)(1 << table[i].priority);
+
+ af->app.app_pri_tbl[traf_type].appBitfield =
+ (DCBX_APP_ENTRY_VALID);
+
+ af->app.app_pri_tbl[traf_type].appBitfield |=
+ (TRAFFIC_TYPE_ETH == table[i].traffic_type) ?
+ DCBX_APP_SF_ETH_TYPE : DCBX_APP_SF_PORT;
+ }
+ }
+
+ af->app.default_pri = (u8)dp->admin_default_priority;
+
+ }
+
+ /* Write the data. */
+ bnx2x_write_data(bp, (u32 *)&admin_mib, offset,
+ sizeof(struct lldp_admin_mib));
+
+}
+
+void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
+{
+ if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) {
+ bp->dcb_state = dcb_on;
+ bp->dcbx_enabled = dcbx_enabled;
+ } else {
+ bp->dcb_state = false;
+ bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID;
+ }
+ DP(NETIF_MSG_LINK, "DCB state [%s:%s]\n",
+ dcb_on ? "ON" : "OFF",
+ dcbx_enabled == BNX2X_DCBX_ENABLED_OFF ? "user-mode" :
+ dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF ? "on-chip static" :
+ dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON ?
+ "on-chip with negotiation" : "invalid");
+}
+
+void bnx2x_dcbx_init_params(struct bnx2x *bp)
+{
+ bp->dcbx_config_params.admin_dcbx_version = 0x0; /* 0 - CEE; 1 - IEEE */
+ bp->dcbx_config_params.admin_ets_willing = 1;
+ bp->dcbx_config_params.admin_pfc_willing = 1;
+ bp->dcbx_config_params.overwrite_settings = 1;
+ bp->dcbx_config_params.admin_ets_enable = 1;
+ bp->dcbx_config_params.admin_pfc_enable = 1;
+ bp->dcbx_config_params.admin_tc_supported_tx_enable = 1;
+ bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
+ bp->dcbx_config_params.admin_pfc_tx_enable = 1;
+ bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
+ bp->dcbx_config_params.admin_ets_reco_valid = 1;
+ bp->dcbx_config_params.admin_app_priority_willing = 1;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 00;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 50;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 50;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[0] = 1;
+ bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[3] = 2;
+ bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 1;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 2;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 7;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 5;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 6;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 7;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[3] = 3;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[4] = 4;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7;
+ bp->dcbx_config_params.admin_pfc_bitmap = 0x8; /* FCoE(3) enable */
+ bp->dcbx_config_params.admin_priority_app_table[0].valid = 1;
+ bp->dcbx_config_params.admin_priority_app_table[1].valid = 1;
+ bp->dcbx_config_params.admin_priority_app_table[2].valid = 0;
+ bp->dcbx_config_params.admin_priority_app_table[3].valid = 0;
+ bp->dcbx_config_params.admin_priority_app_table[0].priority = 3;
+ bp->dcbx_config_params.admin_priority_app_table[1].priority = 0;
+ bp->dcbx_config_params.admin_priority_app_table[2].priority = 0;
+ bp->dcbx_config_params.admin_priority_app_table[3].priority = 0;
+ bp->dcbx_config_params.admin_priority_app_table[0].traffic_type = 0;
+ bp->dcbx_config_params.admin_priority_app_table[1].traffic_type = 1;
+ bp->dcbx_config_params.admin_priority_app_table[2].traffic_type = 0;
+ bp->dcbx_config_params.admin_priority_app_table[3].traffic_type = 0;
+ bp->dcbx_config_params.admin_priority_app_table[0].app_id = 0x8906;
+ bp->dcbx_config_params.admin_priority_app_table[1].app_id = 3260;
+ bp->dcbx_config_params.admin_priority_app_table[2].app_id = 0;
+ bp->dcbx_config_params.admin_priority_app_table[3].app_id = 0;
+ bp->dcbx_config_params.admin_default_priority =
+ bp->dcbx_config_params.admin_priority_app_table[1].priority;
+}
+
+void bnx2x_dcbx_init(struct bnx2x *bp)
+{
+ u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE;
+
+ if (bp->dcbx_enabled <= 0)
+ return;
+
+ /* validate:
+ * chip of good for dcbx version,
+ * dcb is wanted
+ * the function is pmf
+ * shmem2 contains DCBX support fields
+ */
+ DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n",
+ bp->dcb_state, bp->port.pmf);
+
+ if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&
+ SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
+ dcbx_lldp_params_offset =
+ SHMEM2_RD(bp, dcbx_lldp_params_offset);
+
+ DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n",
+ dcbx_lldp_params_offset);
+
+ bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
+
+ if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
+ bnx2x_dcbx_admin_mib_updated_params(bp,
+ dcbx_lldp_params_offset);
+
+ /* Let HW start negotiation */
+ bnx2x_fw_command(bp,
+ DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
+ }
+ }
+}
+static void
+bnx2x_dcbx_print_cos_params(struct bnx2x *bp,
+ struct bnx2x_func_tx_start_params *pfc_fw_cfg)
+{
+ u8 pri = 0;
+ u8 cos = 0;
+
+ DP(NETIF_MSG_LINK,
+ "pfc_fw_cfg->dcb_version %x\n", pfc_fw_cfg->dcb_version);
+ DP(NETIF_MSG_LINK,
+ "pdev->params.dcbx_port_params.pfc."
+ "priority_non_pauseable_mask %x\n",
+ bp->dcbx_port_params.pfc.priority_non_pauseable_mask);
+
+ for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) {
+ DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
+ "cos_params[%d].pri_bitmask %x\n", cos,
+ bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask);
+
+ DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
+ "cos_params[%d].bw_tbl %x\n", cos,
+ bp->dcbx_port_params.ets.cos_params[cos].bw_tbl);
+
+ DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
+ "cos_params[%d].strict %x\n", cos,
+ bp->dcbx_port_params.ets.cos_params[cos].strict);
+
+ DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
+ "cos_params[%d].pauseable %x\n", cos,
+ bp->dcbx_port_params.ets.cos_params[cos].pauseable);
+ }
+
+ for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
+ DP(NETIF_MSG_LINK,
+ "pfc_fw_cfg->traffic_type_to_priority_cos[%d]."
+ "priority %x\n", pri,
+ pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority);
+
+ DP(NETIF_MSG_LINK,
+ "pfc_fw_cfg->traffic_type_to_priority_cos[%d].cos %x\n",
+ pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].cos);
+ }
+}
+
+/* fills help_data according to pg_info */
+static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
+ u32 *pg_pri_orginal_spread,
+ struct pg_help_data *help_data)
+{
+ bool pg_found = false;
+ u32 i, traf_type, add_traf_type, add_pg;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+ struct pg_entry_help_data *data = help_data->data; /*shotcut*/
+
+ /* Set to invalid */
+ for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
+ data[i].pg = DCBX_ILLEGAL_PG;
+
+ for (add_traf_type = 0;
+ add_traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX; add_traf_type++) {
+ pg_found = false;
+ if (ttp[add_traf_type] < MAX_PFC_PRIORITIES) {
+ add_pg = (u8)pg_pri_orginal_spread[ttp[add_traf_type]];
+ for (traf_type = 0;
+ traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX;
+ traf_type++) {
+ if (data[traf_type].pg == add_pg) {
+ if (!(data[traf_type].pg_priority &
+ (1 << ttp[add_traf_type])))
+ data[traf_type].
+ num_of_dif_pri++;
+ data[traf_type].pg_priority |=
+ (1 << ttp[add_traf_type]);
+ pg_found = true;
+ break;
+ }
+ }
+ if (false == pg_found) {
+ data[help_data->num_of_pg].pg = add_pg;
+ data[help_data->num_of_pg].pg_priority =
+ (1 << ttp[add_traf_type]);
+ data[help_data->num_of_pg].num_of_dif_pri = 1;
+ help_data->num_of_pg++;
+ }
+ }
+ DP(NETIF_MSG_LINK,
+ "add_traf_type %d pg_found %s num_of_pg %d\n",
+ add_traf_type, (false == pg_found) ? "NO" : "YES",
+ help_data->num_of_pg);
+ }
+}
+
+static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u32 pri_join_mask)
+{
+ /* Only one priority than only one COS */
+ cos_data->data[0].pausable =
+ IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+ cos_data->data[0].pri_join_mask = pri_join_mask;
+ cos_data->data[0].cos_bw = 100;
+ cos_data->num_of_cos = 1;
+}
+
+static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp,
+ struct cos_entry_help_data *data,
+ u8 pg_bw)
+{
+ if (data->cos_bw == DCBX_INVALID_COS_BW)
+ data->cos_bw = pg_bw;
+ else
+ data->cos_bw += pg_bw;
+}
+
+static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ struct dcbx_ets_feature *ets)
+{
+ u32 pri_tested = 0;
+ u8 i = 0;
+ u8 entry = 0;
+ u8 pg_entry = 0;
+ u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX;
+
+ cos_data->data[0].pausable = true;
+ cos_data->data[1].pausable = false;
+ cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
+
+ for (i = 0 ; i < num_of_pri ; i++) {
+ pri_tested = 1 << bp->dcbx_port_params.
+ app.traffic_type_priority[i];
+
+ if (pri_tested & DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) {
+ cos_data->data[1].pri_join_mask |= pri_tested;
+ entry = 1;
+ } else {
+ cos_data->data[0].pri_join_mask |= pri_tested;
+ entry = 0;
+ }
+ pg_entry = (u8)pg_pri_orginal_spread[bp->dcbx_port_params.
+ app.traffic_type_priority[i]];
+ /* There can be only one strict pg */
+ if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES)
+ bnx2x_dcbx_add_to_cos_bw(bp, &cos_data->data[entry],
+ DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry));
+ else
+ /* If we join a group and one is strict
+ * than the bw rulls */
+ cos_data->data[entry].strict =
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
+ }
+ if ((0 == cos_data->data[0].pri_join_mask) &&
+ (0 == cos_data->data[1].pri_join_mask))
+ BNX2X_ERR("dcbx error: Both groups must have priorities\n");
+}
+
+
+#ifndef POWER_OF_2
+#define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1))))
+#endif
+
+static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp,
+ struct pg_help_data *pg_help_data,
+ struct cos_help_data *cos_data,
+ u32 pri_join_mask,
+ u8 num_of_dif_pri)
+{
+ u8 i = 0;
+ u32 pri_tested = 0;
+ u32 pri_mask_without_pri = 0;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+ /*debug*/
+ if (num_of_dif_pri == 1) {
+ bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask);
+ return;
+ }
+ /* single priority group */
+ if (pg_help_data->data[0].pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
+ /* If there are both pauseable and non-pauseable priorities,
+ * the pauseable priorities go to the first queue and
+ * the non-pauseable priorities go to the second queue.
+ */
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
+ /* Pauseable */
+ cos_data->data[0].pausable = true;
+ /* Non pauseable.*/
+ cos_data->data[1].pausable = false;
+
+ if (2 == num_of_dif_pri) {
+ cos_data->data[0].cos_bw = 50;
+ cos_data->data[1].cos_bw = 50;
+ }
+
+ if (3 == num_of_dif_pri) {
+ if (POWER_OF_2(DCBX_PFC_PRI_GET_PAUSE(bp,
+ pri_join_mask))) {
+ cos_data->data[0].cos_bw = 33;
+ cos_data->data[1].cos_bw = 67;
+ } else {
+ cos_data->data[0].cos_bw = 67;
+ cos_data->data[1].cos_bw = 33;
+ }
+ }
+
+ } else if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask)) {
+ /* If there are only pauseable priorities,
+ * then one/two priorities go to the first queue
+ * and one priority goes to the second queue.
+ */
+ if (2 == num_of_dif_pri) {
+ cos_data->data[0].cos_bw = 50;
+ cos_data->data[1].cos_bw = 50;
+ } else {
+ cos_data->data[0].cos_bw = 67;
+ cos_data->data[1].cos_bw = 33;
+ }
+ cos_data->data[1].pausable = true;
+ cos_data->data[0].pausable = true;
+ /* All priorities except FCOE */
+ cos_data->data[0].pri_join_mask = (pri_join_mask &
+ ((u8)~(1 << ttp[LLFC_TRAFFIC_TYPE_FCOE])));
+ /* Only FCOE priority.*/
+ cos_data->data[1].pri_join_mask =
+ (1 << ttp[LLFC_TRAFFIC_TYPE_FCOE]);
+ } else
+ /* If there are only non-pauseable priorities,
+ * they will all go to the same queue.
+ */
+ bnx2x_dcbx_ets_disabled_entry_data(bp,
+ cos_data, pri_join_mask);
+ } else {
+ /* priority group which is not BW limited (PG#15):*/
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
+ /* If there are both pauseable and non-pauseable
+ * priorities, the pauseable priorities go to the first
+ * queue and the non-pauseable priorities
+ * go to the second queue.
+ */
+ if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) >
+ DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) {
+ cos_data->data[0].strict =
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
+ cos_data->data[1].strict =
+ BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
+ BNX2X_DCBX_STRICT_COS_HIGHEST);
+ } else {
+ cos_data->data[0].strict =
+ BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
+ BNX2X_DCBX_STRICT_COS_HIGHEST);
+ cos_data->data[1].strict =
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
+ }
+ /* Pauseable */
+ cos_data->data[0].pausable = true;
+ /* Non pause-able.*/
+ cos_data->data[1].pausable = false;
+ } else {
+ /* If there are only pauseable priorities or
+ * only non-pauseable,* the lower priorities go
+ * to the first queue and the higherpriorities go
+ * to the second queue.
+ */
+ cos_data->data[0].pausable =
+ cos_data->data[1].pausable =
+ IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+
+ for (i = 0 ; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) {
+ pri_tested = 1 << bp->dcbx_port_params.
+ app.traffic_type_priority[i];
+ /* Remove priority tested */
+ pri_mask_without_pri =
+ (pri_join_mask & ((u8)(~pri_tested)));
+ if (pri_mask_without_pri < pri_tested)
+ break;
+ }
+
+ if (i == LLFC_DRIVER_TRAFFIC_TYPE_MAX)
+ BNX2X_ERR("Invalid value for pri_join_mask -"
+ " could not find a priority\n");
+
+ cos_data->data[0].pri_join_mask = pri_mask_without_pri;
+ cos_data->data[1].pri_join_mask = pri_tested;
+ /* Both queues are strict priority,
+ * and that with the highest priority
+ * gets the highest strict priority in the arbiter.
+ */
+ cos_data->data[0].strict =
+ BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
+ BNX2X_DCBX_STRICT_COS_HIGHEST);
+ cos_data->data[1].strict =
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
+ }
+ }
+}
+
+static void bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params(
+ struct bnx2x *bp,
+ struct pg_help_data *pg_help_data,
+ struct dcbx_ets_feature *ets,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ u32 pri_join_mask,
+ u8 num_of_dif_pri)
+{
+ u8 i = 0;
+ u8 pg[DCBX_COS_MAX_NUM_E2] = { 0 };
+
+ /* If there are both pauseable and non-pauseable priorities,
+ * the pauseable priorities go to the first queue and
+ * the non-pauseable priorities go to the second queue.
+ */
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
+ pg_help_data->data[0].pg_priority) ||
+ IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
+ pg_help_data->data[1].pg_priority)) {
+ /* If one PG contains both pauseable and
+ * non-pauseable priorities then ETS is disabled.
+ */
+ bnx2x_dcbx_separate_pauseable_from_non(bp, cos_data,
+ pg_pri_orginal_spread, ets);
+ bp->dcbx_port_params.ets.enabled = false;
+ return;
+ }
+
+ /* Pauseable */
+ cos_data->data[0].pausable = true;
+ /* Non pauseable. */
+ cos_data->data[1].pausable = false;
+ if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp,
+ pg_help_data->data[0].pg_priority)) {
+ /* 0 is pauseable */
+ cos_data->data[0].pri_join_mask =
+ pg_help_data->data[0].pg_priority;
+ pg[0] = pg_help_data->data[0].pg;
+ cos_data->data[1].pri_join_mask =
+ pg_help_data->data[1].pg_priority;
+ pg[1] = pg_help_data->data[1].pg;
+ } else {/* 1 is pauseable */
+ cos_data->data[0].pri_join_mask =
+ pg_help_data->data[1].pg_priority;
+ pg[0] = pg_help_data->data[1].pg;
+ cos_data->data[1].pri_join_mask =
+ pg_help_data->data[0].pg_priority;
+ pg[1] = pg_help_data->data[0].pg;
+ }
+ } else {
+ /* If there are only pauseable priorities or
+ * only non-pauseable, each PG goes to a queue.
+ */
+ cos_data->data[0].pausable = cos_data->data[1].pausable =
+ IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+ cos_data->data[0].pri_join_mask =
+ pg_help_data->data[0].pg_priority;
+ pg[0] = pg_help_data->data[0].pg;
+ cos_data->data[1].pri_join_mask =
+ pg_help_data->data[1].pg_priority;
+ pg[1] = pg_help_data->data[1].pg;
+ }
+
+ /* There can be only one strict pg */
+ for (i = 0 ; i < ARRAY_SIZE(pg); i++) {
+ if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES)
+ cos_data->data[i].cos_bw =
+ DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]);
+ else
+ cos_data->data[i].strict =
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
+ }
+}
+
+static int bnx2x_dcbx_join_pgs(
+ struct bnx2x *bp,
+ struct dcbx_ets_feature *ets,
+ struct pg_help_data *pg_help_data,
+ u8 required_num_of_pg)
+{
+ u8 entry_joined = pg_help_data->num_of_pg - 1;
+ u8 entry_removed = entry_joined + 1;
+ u8 pg_joined = 0;
+
+ if (required_num_of_pg == 0 || ARRAY_SIZE(pg_help_data->data)
+ <= pg_help_data->num_of_pg) {
+
+ BNX2X_ERR("required_num_of_pg can't be zero\n");
+ return -EINVAL;
+ }
+
+ while (required_num_of_pg < pg_help_data->num_of_pg) {
+ entry_joined = pg_help_data->num_of_pg - 2;
+ entry_removed = entry_joined + 1;
+ /* protect index */
+ entry_removed %= ARRAY_SIZE(pg_help_data->data);
+
+ pg_help_data->data[entry_joined].pg_priority |=
+ pg_help_data->data[entry_removed].pg_priority;
+
+ pg_help_data->data[entry_joined].num_of_dif_pri +=
+ pg_help_data->data[entry_removed].num_of_dif_pri;
+
+ if (pg_help_data->data[entry_joined].pg == DCBX_STRICT_PRI_PG ||
+ pg_help_data->data[entry_removed].pg == DCBX_STRICT_PRI_PG)
+ /* Entries joined strict priority rules */
+ pg_help_data->data[entry_joined].pg =
+ DCBX_STRICT_PRI_PG;
+ else {
+ /* Entries can be joined join BW */
+ pg_joined = DCBX_PG_BW_GET(ets->pg_bw_tbl,
+ pg_help_data->data[entry_joined].pg) +
+ DCBX_PG_BW_GET(ets->pg_bw_tbl,
+ pg_help_data->data[entry_removed].pg);
+
+ DCBX_PG_BW_SET(ets->pg_bw_tbl,
+ pg_help_data->data[entry_joined].pg, pg_joined);
+ }
+ /* Joined the entries */
+ pg_help_data->num_of_pg--;
+ }
+
+ return 0;
+}
+
+static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
+ struct bnx2x *bp,
+ struct pg_help_data *pg_help_data,
+ struct dcbx_ets_feature *ets,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ u32 pri_join_mask,
+ u8 num_of_dif_pri)
+{
+ u8 i = 0;
+ u32 pri_tested = 0;
+ u8 entry = 0;
+ u8 pg_entry = 0;
+ bool b_found_strict = false;
+ u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX;
+
+ cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
+ /* If there are both pauseable and non-pauseable priorities,
+ * the pauseable priorities go to the first queue and the
+ * non-pauseable priorities go to the second queue.
+ */
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask))
+ bnx2x_dcbx_separate_pauseable_from_non(bp,
+ cos_data, pg_pri_orginal_spread, ets);
+ else {
+ /* If two BW-limited PG-s were combined to one queue,
+ * the BW is their sum.
+ *
+ * If there are only pauseable priorities or only non-pauseable,
+ * and there are both BW-limited and non-BW-limited PG-s,
+ * the BW-limited PG/s go to one queue and the non-BW-limited
+ * PG/s go to the second queue.
+ *
+ * If there are only pauseable priorities or only non-pauseable
+ * and all are BW limited, then two priorities go to the first
+ * queue and one priority goes to the second queue.
+ *
+ * We will join this two cases:
+ * if one is BW limited it will go to the secoend queue
+ * otherwise the last priority will get it
+ */
+
+ cos_data->data[0].pausable = cos_data->data[1].pausable =
+ IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+
+ for (i = 0 ; i < num_of_pri; i++) {
+ pri_tested = 1 << bp->dcbx_port_params.
+ app.traffic_type_priority[i];
+ pg_entry = (u8)pg_pri_orginal_spread[bp->
+ dcbx_port_params.app.traffic_type_priority[i]];
+
+ if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) {
+ entry = 0;
+
+ if (i == (num_of_pri-1) &&
+ false == b_found_strict)
+ /* last entry will be handled separately
+ * If no priority is strict than last
+ * enty goes to last queue.*/
+ entry = 1;
+ cos_data->data[entry].pri_join_mask |=
+ pri_tested;
+ bnx2x_dcbx_add_to_cos_bw(bp,
+ &cos_data->data[entry],
+ DCBX_PG_BW_GET(ets->pg_bw_tbl,
+ pg_entry));
+ } else {
+ b_found_strict = true;
+ cos_data->data[1].pri_join_mask |= pri_tested;
+ /* If we join a group and one is strict
+ * than the bw rulls */
+ cos_data->data[1].strict =
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
+ }
+ }
+ }
+}
+
+
+static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp,
+ struct pg_help_data *help_data,
+ struct dcbx_ets_feature *ets,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ u32 pri_join_mask,
+ u8 num_of_dif_pri)
+{
+
+ /* default E2 settings */
+ cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2;
+
+ switch (help_data->num_of_pg) {
+ case 1:
+ bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(
+ bp,
+ help_data,
+ cos_data,
+ pri_join_mask,
+ num_of_dif_pri);
+ break;
+ case 2:
+ bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params(
+ bp,
+ help_data,
+ ets,
+ cos_data,
+ pg_pri_orginal_spread,
+ pri_join_mask,
+ num_of_dif_pri);
+ break;
+
+ case 3:
+ bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
+ bp,
+ help_data,
+ ets,
+ cos_data,
+ pg_pri_orginal_spread,
+ pri_join_mask,
+ num_of_dif_pri);
+ break;
+ default:
+ BNX2X_ERR("Wrong pg_help_data.num_of_pg\n");
+ bnx2x_dcbx_ets_disabled_entry_data(bp,
+ cos_data, pri_join_mask);
+ }
+}
+
+static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u8 entry,
+ u8 num_spread_of_entries,
+ u8 strict_app_pris)
+{
+ u8 strict_pri = BNX2X_DCBX_STRICT_COS_HIGHEST;
+ u8 num_of_app_pri = MAX_PFC_PRIORITIES;
+ u8 app_pri_bit = 0;
+
+ while (num_spread_of_entries && num_of_app_pri > 0) {
+ app_pri_bit = 1 << (num_of_app_pri - 1);
+ if (app_pri_bit & strict_app_pris) {
+ struct cos_entry_help_data *data = &cos_data->
+ data[entry];
+ num_spread_of_entries--;
+ if (num_spread_of_entries == 0) {
+ /* last entry needed put all the entries left */
+ data->cos_bw = DCBX_INVALID_COS_BW;
+ data->strict = strict_pri;
+ data->pri_join_mask = strict_app_pris;
+ data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+ data->pri_join_mask);
+ } else {
+ strict_app_pris &= ~app_pri_bit;
+
+ data->cos_bw = DCBX_INVALID_COS_BW;
+ data->strict = strict_pri;
+ data->pri_join_mask = app_pri_bit;
+ data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+ data->pri_join_mask);
+ }
+
+ strict_pri =
+ BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(strict_pri);
+ entry++;
+ }
+
+ num_of_app_pri--;
+ }
+
+ if (num_spread_of_entries)
+ return -EINVAL;
+
+ return 0;
+}
+
+static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u8 entry,
+ u8 num_spread_of_entries,
+ u8 strict_app_pris)
+{
+
+ if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry,
+ num_spread_of_entries,
+ strict_app_pris)) {
+ struct cos_entry_help_data *data = &cos_data->
+ data[entry];
+ /* Fill BW entry */
+ data->cos_bw = DCBX_INVALID_COS_BW;
+ data->strict = BNX2X_DCBX_STRICT_COS_HIGHEST;
+ data->pri_join_mask = strict_app_pris;
+ data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+ data->pri_join_mask);
+ return 1;
+ }
+
+ return num_spread_of_entries;
+}
+
+static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp,
+ struct pg_help_data *help_data,
+ struct dcbx_ets_feature *ets,
+ struct cos_help_data *cos_data,
+ u32 pri_join_mask)
+
+{
+ u8 need_num_of_entries = 0;
+ u8 i = 0;
+ u8 entry = 0;
+
+ /*
+ * if the number of requested PG-s in CEE is greater than 3
+ * then the results are not determined since this is a violation
+ * of the standard.
+ */
+ if (help_data->num_of_pg > DCBX_COS_MAX_NUM_E3B0) {
+ if (bnx2x_dcbx_join_pgs(bp, ets, help_data,
+ DCBX_COS_MAX_NUM_E3B0)) {
+ BNX2X_ERR("Unable to reduce the number of PGs -"
+ "we will disables ETS\n");
+ bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data,
+ pri_join_mask);
+ return;
+ }
+ }
+
+ for (i = 0 ; i < help_data->num_of_pg; i++) {
+ struct pg_entry_help_data *pg = &help_data->data[i];
+ if (pg->pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
+ struct cos_entry_help_data *data = &cos_data->
+ data[entry];
+ /* Fill BW entry */
+ data->cos_bw = DCBX_PG_BW_GET(ets->pg_bw_tbl, pg->pg);
+ data->strict = BNX2X_DCBX_STRICT_INVALID;
+ data->pri_join_mask = pg->pg_priority;
+ data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+ data->pri_join_mask);
+
+ entry++;
+ } else {
+ need_num_of_entries = min_t(u8,
+ (u8)pg->num_of_dif_pri,
+ (u8)DCBX_COS_MAX_NUM_E3B0 -
+ help_data->num_of_pg + 1);
+ /*
+ * If there are still VOQ-s which have no associated PG,
+ * then associate these VOQ-s to PG15. These PG-s will
+ * be used for SP between priorities on PG15.
+ */
+ entry += bnx2x_dcbx_cee_fill_strict_pri(bp, cos_data,
+ entry, need_num_of_entries, pg->pg_priority);
+ }
+ }
+
+ /* the entry will represent the number of COSes used */
+ cos_data->num_of_cos = entry;
+}
+static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
+ struct pg_help_data *help_data,
+ struct dcbx_ets_feature *ets,
+ u32 *pg_pri_orginal_spread)
+{
+ struct cos_help_data cos_data;
+ u8 i = 0;
+ u32 pri_join_mask = 0;
+ u8 num_of_dif_pri = 0;
+
+ memset(&cos_data, 0, sizeof(cos_data));
+
+ /* Validate the pg value */
+ for (i = 0; i < help_data->num_of_pg ; i++) {
+ if (DCBX_STRICT_PRIORITY != help_data->data[i].pg &&
+ DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg)
+ BNX2X_ERR("Invalid pg[%d] data %x\n", i,
+ help_data->data[i].pg);
+ pri_join_mask |= help_data->data[i].pg_priority;
+ num_of_dif_pri += help_data->data[i].num_of_dif_pri;
+ }
+
+ /* defaults */
+ cos_data.num_of_cos = 1;
+ for (i = 0; i < ARRAY_SIZE(cos_data.data); i++) {
+ cos_data.data[i].pri_join_mask = 0;
+ cos_data.data[i].pausable = false;
+ cos_data.data[i].strict = BNX2X_DCBX_STRICT_INVALID;
+ cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW;
+ }
+
+ if (CHIP_IS_E3B0(bp))
+ bnx2x_dcbx_cee_fill_cos_params(bp, help_data, ets,
+ &cos_data, pri_join_mask);
+ else /* E2 + E3A0 */
+ bnx2x_dcbx_2cos_limit_cee_fill_cos_params(bp,
+ help_data, ets,
+ &cos_data,
+ pg_pri_orginal_spread,
+ pri_join_mask,
+ num_of_dif_pri);
+
+ for (i = 0; i < cos_data.num_of_cos ; i++) {
+ struct bnx2x_dcbx_cos_params *p =
+ &bp->dcbx_port_params.ets.cos_params[i];
+
+ p->strict = cos_data.data[i].strict;
+ p->bw_tbl = cos_data.data[i].cos_bw;
+ p->pri_bitmask = cos_data.data[i].pri_join_mask;
+ p->pauseable = cos_data.data[i].pausable;
+
+ /* sanity */
+ if (p->bw_tbl != DCBX_INVALID_COS_BW ||
+ p->strict != BNX2X_DCBX_STRICT_INVALID) {
+ if (p->pri_bitmask == 0)
+ BNX2X_ERR("Invalid pri_bitmask for %d\n", i);
+
+ if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) {
+
+ if (p->pauseable &&
+ DCBX_PFC_PRI_GET_NON_PAUSE(bp,
+ p->pri_bitmask) != 0)
+ BNX2X_ERR("Inconsistent config for "
+ "pausable COS %d\n", i);
+
+ if (!p->pauseable &&
+ DCBX_PFC_PRI_GET_PAUSE(bp,
+ p->pri_bitmask) != 0)
+ BNX2X_ERR("Inconsistent config for "
+ "nonpausable COS %d\n", i);
+ }
+ }
+
+ if (p->pauseable)
+ DP(NETIF_MSG_LINK, "COS %d PAUSABLE prijoinmask 0x%x\n",
+ i, cos_data.data[i].pri_join_mask);
+ else
+ DP(NETIF_MSG_LINK, "COS %d NONPAUSABLE prijoinmask "
+ "0x%x\n",
+ i, cos_data.data[i].pri_join_mask);
+ }
+
+ bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ;
+}
+
+static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
+ u32 *set_configuration_ets_pg,
+ u32 *pri_pg_tbl)
+{
+ int i;
+
+ for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
+ set_configuration_ets_pg[i] = DCBX_PRI_PG_GET(pri_pg_tbl, i);
+
+ DP(NETIF_MSG_LINK, "set_configuration_ets_pg[%d] = 0x%x\n",
+ i, set_configuration_ets_pg[i]);
+ }
+}
+
+static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
+ struct bnx2x_func_tx_start_params *pfc_fw_cfg)
+{
+ u16 pri_bit = 0;
+ u8 cos = 0, pri = 0;
+ struct priority_cos *tt2cos;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+
+ memset(pfc_fw_cfg, 0, sizeof(*pfc_fw_cfg));
+
+ /* to disable DCB - the structure must be zeroed */
+ if (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR)
+ return;
+
+ /*shortcut*/
+ tt2cos = pfc_fw_cfg->traffic_type_to_priority_cos;
+
+ /* Fw version should be incremented each update */
+ pfc_fw_cfg->dcb_version = ++bp->dcb_version;
+ pfc_fw_cfg->dcb_enabled = 1;
+
+ /* Fill priority parameters */
+ for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
+ tt2cos[pri].priority = ttp[pri];
+ pri_bit = 1 << tt2cos[pri].priority;
+
+ /* Fill COS parameters based on COS calculated to
+ * make it more general for future use */
+ for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++)
+ if (bp->dcbx_port_params.ets.cos_params[cos].
+ pri_bitmask & pri_bit)
+ tt2cos[pri].cos = cos;
+ }
+
+ /* we never want the FW to add a 0 vlan tag */
+ pfc_fw_cfg->dont_add_pri_0_en = 1;
+
+ bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg);
+}
+
+void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
+{
+ /* if we need to syncronize DCBX result from prev PMF
+ * read it from shmem and update bp accordingly
+ */
+ if (SHMEM2_HAS(bp, drv_flags) &&
+ GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) {
+ /* Read neg results if dcbx is in the FW */
+ if (bnx2x_dcbx_read_shmem_neg_results(bp))
+ return;
+
+ bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+ bp->dcbx_error);
+ bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+ bp->dcbx_error);
+ }
+}
+
+/* DCB netlink */
+#ifdef BCM_DCBNL
+
+#define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \
+ DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC)
+
+static inline bool bnx2x_dcbnl_set_valid(struct bnx2x *bp)
+{
+ /* validate dcbnl call that may change HW state:
+ * DCB is on and DCBX mode was SUCCESSFULLY set by the user.
+ */
+ return bp->dcb_state && bp->dcbx_mode_uset;
+}
+
+static u8 bnx2x_dcbnl_get_state(struct net_device *netdev)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "state = %d\n", bp->dcb_state);
+ return bp->dcb_state;
+}
+
+static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off");
+
+ bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled);
+ return 0;
+}
+
+static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev,
+ u8 *perm_addr)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "GET-PERM-ADDR\n");
+
+ /* first the HW mac address */
+ memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
+
+#ifdef BCM_CNIC
+ /* second SAN address */
+ memcpy(perm_addr+netdev->addr_len, bp->fip_mac, netdev->addr_len);
+#endif
+}
+
+static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
+ u8 prio_type, u8 pgid, u8 bw_pct,
+ u8 up_map)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, pgid);
+ if (!bnx2x_dcbnl_set_valid(bp) || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
+ return;
+
+ /**
+ * bw_pct ingnored - band-width percentage devision between user
+ * priorities within the same group is not
+ * standard and hence not supported
+ *
+ * prio_type igonred - priority levels within the same group are not
+ * standard and hence are not supported. According
+ * to the standard pgid 15 is dedicated to strict
+ * prioirty traffic (on the port level).
+ *
+ * up_map ignored
+ */
+
+ bp->dcbx_config_params.admin_configuration_ets_pg[prio] = pgid;
+ bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
+}
+
+static void bnx2x_dcbnl_set_pg_bwgcfg_tx(struct net_device *netdev,
+ int pgid, u8 bw_pct)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "pgid[%d] = %d\n", pgid, bw_pct);
+
+ if (!bnx2x_dcbnl_set_valid(bp) || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
+ return;
+
+ bp->dcbx_config_params.admin_configuration_bw_precentage[pgid] = bw_pct;
+ bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
+}
+
+static void bnx2x_dcbnl_set_pg_tccfg_rx(struct net_device *netdev, int prio,
+ u8 prio_type, u8 pgid, u8 bw_pct,
+ u8 up_map)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n");
+}
+
+static void bnx2x_dcbnl_set_pg_bwgcfg_rx(struct net_device *netdev,
+ int pgid, u8 bw_pct)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n");
+}
+
+static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "prio = %d\n", prio);
+
+ /**
+ * bw_pct ingnored - band-width percentage devision between user
+ * priorities within the same group is not
+ * standard and hence not supported
+ *
+ * prio_type igonred - priority levels within the same group are not
+ * standard and hence are not supported. According
+ * to the standard pgid 15 is dedicated to strict
+ * prioirty traffic (on the port level).
+ *
+ * up_map ignored
+ */
+ *up_map = *bw_pct = *prio_type = *pgid = 0;
+
+ if (!bp->dcb_state || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
+ return;
+
+ *pgid = DCBX_PRI_PG_GET(bp->dcbx_local_feat.ets.pri_pg_tbl, prio);
+}
+
+static void bnx2x_dcbnl_get_pg_bwgcfg_tx(struct net_device *netdev,
+ int pgid, u8 *bw_pct)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "pgid = %d\n", pgid);
+
+ *bw_pct = 0;
+
+ if (!bp->dcb_state || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
+ return;
+
+ *bw_pct = DCBX_PG_BW_GET(bp->dcbx_local_feat.ets.pg_bw_tbl, pgid);
+}
+
+static void bnx2x_dcbnl_get_pg_tccfg_rx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n");
+
+ *prio_type = *pgid = *bw_pct = *up_map = 0;
+}
+
+static void bnx2x_dcbnl_get_pg_bwgcfg_rx(struct net_device *netdev,
+ int pgid, u8 *bw_pct)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n");
+
+ *bw_pct = 0;
+}
+
+static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
+ u8 setting)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, setting);
+
+ if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES)
+ return;
+
+ bp->dcbx_config_params.admin_pfc_bitmap |= ((setting ? 1 : 0) << prio);
+
+ if (setting)
+ bp->dcbx_config_params.admin_pfc_tx_enable = 1;
+}
+
+static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
+ u8 *setting)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "prio = %d\n", prio);
+
+ *setting = 0;
+
+ if (!bp->dcb_state || prio >= MAX_PFC_PRIORITIES)
+ return;
+
+ *setting = (bp->dcbx_local_feat.pfc.pri_en_bitmap >> prio) & 0x1;
+}
+
+static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ int rc = 0;
+
+ DP(NETIF_MSG_LINK, "SET-ALL\n");
+
+ if (!bnx2x_dcbnl_set_valid(bp))
+ return 1;
+
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ netdev_err(bp->dev, "Handling parity error recovery. "
+ "Try again later\n");
+ return 1;
+ }
+ if (netif_running(bp->dev)) {
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+ }
+ DP(NETIF_MSG_LINK, "set_dcbx_params done (%d)\n", rc);
+ if (rc)
+ return 1;
+
+ return 0;
+}
+
+static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u8 rval = 0;
+
+ if (bp->dcb_state) {
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_UP2TC:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ *cap = 0x80; /* 8 priorities for PGs */
+ break;
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80; /* 8 priorities for PFC */
+ break;
+ case DCB_CAP_ATTR_GSP:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_BCN:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = BNX2X_DCBX_CAPS;
+ break;
+ default:
+ rval = -EINVAL;
+ break;
+ }
+ } else
+ rval = -EINVAL;
+
+ DP(NETIF_MSG_LINK, "capid %d:%x\n", capid, *cap);
+ return rval;
+}
+
+static u8 bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u8 rval = 0;
+
+ DP(NETIF_MSG_LINK, "tcid %d\n", tcid);
+
+ if (bp->dcb_state) {
+ switch (tcid) {
+ case DCB_NUMTCS_ATTR_PG:
+ *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 :
+ DCBX_COS_MAX_NUM_E2;
+ break;
+ case DCB_NUMTCS_ATTR_PFC:
+ *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 :
+ DCBX_COS_MAX_NUM_E2;
+ break;
+ default:
+ rval = -EINVAL;
+ break;
+ }
+ } else
+ rval = -EINVAL;
+
+ return rval;
+}
+
+static u8 bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "num tcs = %d; Not supported\n", num);
+ return -EINVAL;
+}
+
+static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
+
+ if (!bp->dcb_state)
+ return 0;
+
+ return bp->dcbx_local_feat.pfc.enabled;
+}
+
+static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off");
+
+ if (!bnx2x_dcbnl_set_valid(bp))
+ return;
+
+ bp->dcbx_config_params.admin_pfc_tx_enable =
+ bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0);
+}
+
+static void bnx2x_admin_app_set_ent(
+ struct bnx2x_admin_priority_app_table *app_ent,
+ u8 idtype, u16 idval, u8 up)
+{
+ app_ent->valid = 1;
+
+ switch (idtype) {
+ case DCB_APP_IDTYPE_ETHTYPE:
+ app_ent->traffic_type = TRAFFIC_TYPE_ETH;
+ break;
+ case DCB_APP_IDTYPE_PORTNUM:
+ app_ent->traffic_type = TRAFFIC_TYPE_PORT;
+ break;
+ default:
+ break; /* never gets here */
+ }
+ app_ent->app_id = idval;
+ app_ent->priority = up;
+}
+
+static bool bnx2x_admin_app_is_equal(
+ struct bnx2x_admin_priority_app_table *app_ent,
+ u8 idtype, u16 idval)
+{
+ if (!app_ent->valid)
+ return false;
+
+ switch (idtype) {
+ case DCB_APP_IDTYPE_ETHTYPE:
+ if (app_ent->traffic_type != TRAFFIC_TYPE_ETH)
+ return false;
+ break;
+ case DCB_APP_IDTYPE_PORTNUM:
+ if (app_ent->traffic_type != TRAFFIC_TYPE_PORT)
+ return false;
+ break;
+ default:
+ return false;
+ }
+ if (app_ent->app_id != idval)
+ return false;
+
+ return true;
+}
+
+static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
+{
+ int i, ff;
+
+ /* iterate over the app entries looking for idtype and idval */
+ for (i = 0, ff = -1; i < 4; i++) {
+ struct bnx2x_admin_priority_app_table *app_ent =
+ &bp->dcbx_config_params.admin_priority_app_table[i];
+ if (bnx2x_admin_app_is_equal(app_ent, idtype, idval))
+ break;
+
+ if (ff < 0 && !app_ent->valid)
+ ff = i;
+ }
+ if (i < 4)
+ /* if found overwrite up */
+ bp->dcbx_config_params.
+ admin_priority_app_table[i].priority = up;
+ else if (ff >= 0)
+ /* not found use first-free */
+ bnx2x_admin_app_set_ent(
+ &bp->dcbx_config_params.admin_priority_app_table[ff],
+ idtype, idval, up);
+ else
+ /* app table is full */
+ return -EBUSY;
+
+ /* up configured, if not 0 make sure feature is enabled */
+ if (up)
+ bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
+
+ return 0;
+}
+
+static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
+ u16 idval, u8 up)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ DP(NETIF_MSG_LINK, "app_type %d, app_id %x, prio bitmap %d\n",
+ idtype, idval, up);
+
+ if (!bnx2x_dcbnl_set_valid(bp))
+ return -EINVAL;
+
+ /* verify idtype */
+ switch (idtype) {
+ case DCB_APP_IDTYPE_ETHTYPE:
+ case DCB_APP_IDTYPE_PORTNUM:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return bnx2x_set_admin_app_up(bp, idtype, idval, up);
+}
+
+static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u8 state;
+
+ state = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE;
+
+ if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF)
+ state |= DCB_CAP_DCBX_STATIC;
+
+ return state;
+}
+
+static u8 bnx2x_dcbnl_set_dcbx(struct net_device *netdev, u8 state)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "state = %02x\n", state);
+
+ /* set dcbx mode */
+
+ if ((state & BNX2X_DCBX_CAPS) != state) {
+ BNX2X_ERR("Requested DCBX mode %x is beyond advertised "
+ "capabilities\n", state);
+ return 1;
+ }
+
+ if (bp->dcb_state != BNX2X_DCB_STATE_ON) {
+ BNX2X_ERR("DCB turned off, DCBX configuration is invalid\n");
+ return 1;
+ }
+
+ if (state & DCB_CAP_DCBX_STATIC)
+ bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_OFF;
+ else
+ bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_ON;
+
+ bp->dcbx_mode_uset = true;
+ return 0;
+}
+
+static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
+ u8 *flags)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u8 rval = 0;
+
+ DP(NETIF_MSG_LINK, "featid %d\n", featid);
+
+ if (bp->dcb_state) {
+ *flags = 0;
+ switch (featid) {
+ case DCB_FEATCFG_ATTR_PG:
+ if (bp->dcbx_local_feat.ets.enabled)
+ *flags |= DCB_FEATCFG_ENABLE;
+ if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR)
+ *flags |= DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ if (bp->dcbx_local_feat.pfc.enabled)
+ *flags |= DCB_FEATCFG_ENABLE;
+ if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR |
+ DCBX_LOCAL_PFC_MISMATCH))
+ *flags |= DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ if (bp->dcbx_local_feat.app.enabled)
+ *flags |= DCB_FEATCFG_ENABLE;
+ if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR |
+ DCBX_LOCAL_APP_MISMATCH))
+ *flags |= DCB_FEATCFG_ERROR;
+ break;
+ default:
+ rval = -EINVAL;
+ break;
+ }
+ } else
+ rval = -EINVAL;
+
+ return rval;
+}
+
+static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
+ u8 flags)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u8 rval = 0;
+
+ DP(NETIF_MSG_LINK, "featid = %d flags = %02x\n", featid, flags);
+
+ /* ignore the 'advertise' flag */
+ if (bnx2x_dcbnl_set_valid(bp)) {
+ switch (featid) {
+ case DCB_FEATCFG_ATTR_PG:
+ bp->dcbx_config_params.admin_ets_enable =
+ flags & DCB_FEATCFG_ENABLE ? 1 : 0;
+ bp->dcbx_config_params.admin_ets_willing =
+ flags & DCB_FEATCFG_WILLING ? 1 : 0;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ bp->dcbx_config_params.admin_pfc_enable =
+ flags & DCB_FEATCFG_ENABLE ? 1 : 0;
+ bp->dcbx_config_params.admin_pfc_willing =
+ flags & DCB_FEATCFG_WILLING ? 1 : 0;
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ /* ignore enable, always enabled */
+ bp->dcbx_config_params.admin_app_priority_willing =
+ flags & DCB_FEATCFG_WILLING ? 1 : 0;
+ break;
+ default:
+ rval = -EINVAL;
+ break;
+ }
+ } else
+ rval = -EINVAL;
+
+ return rval;
+}
+
+static int bnx2x_peer_appinfo(struct net_device *netdev,
+ struct dcb_peer_app_info *info, u16* app_count)
+{
+ int i;
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ DP(NETIF_MSG_LINK, "APP-INFO\n");
+
+ info->willing = (bp->dcbx_remote_flags & DCBX_APP_REM_WILLING) ?: 0;
+ info->error = (bp->dcbx_remote_flags & DCBX_APP_RX_ERROR) ?: 0;
+ *app_count = 0;
+
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
+ if (bp->dcbx_remote_feat.app.app_pri_tbl[i].appBitfield &
+ DCBX_APP_ENTRY_VALID)
+ (*app_count)++;
+ return 0;
+}
+
+static int bnx2x_peer_apptable(struct net_device *netdev,
+ struct dcb_app *table)
+{
+ int i, j;
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ DP(NETIF_MSG_LINK, "APP-TABLE\n");
+
+ for (i = 0, j = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ struct dcbx_app_priority_entry *ent =
+ &bp->dcbx_remote_feat.app.app_pri_tbl[i];
+
+ if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
+ table[j].selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
+ table[j].priority = bnx2x_dcbx_dcbnl_app_up(ent);
+ table[j++].protocol = ent->app_id;
+ }
+ }
+ return 0;
+}
+
+static int bnx2x_cee_peer_getpg(struct net_device *netdev, struct cee_pg *pg)
+{
+ int i;
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ pg->willing = (bp->dcbx_remote_flags & DCBX_ETS_REM_WILLING) ?: 0;
+
+ for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
+ pg->pg_bw[i] =
+ DCBX_PG_BW_GET(bp->dcbx_remote_feat.ets.pg_bw_tbl, i);
+ pg->prio_pg[i] =
+ DCBX_PRI_PG_GET(bp->dcbx_remote_feat.ets.pri_pg_tbl, i);
+ }
+ return 0;
+}
+
+static int bnx2x_cee_peer_getpfc(struct net_device *netdev,
+ struct cee_pfc *pfc)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ pfc->tcs_supported = bp->dcbx_remote_feat.pfc.pfc_caps;
+ pfc->pfc_en = bp->dcbx_remote_feat.pfc.pri_en_bitmap;
+ return 0;
+}
+
+const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
+ .getstate = bnx2x_dcbnl_get_state,
+ .setstate = bnx2x_dcbnl_set_state,
+ .getpermhwaddr = bnx2x_dcbnl_get_perm_hw_addr,
+ .setpgtccfgtx = bnx2x_dcbnl_set_pg_tccfg_tx,
+ .setpgbwgcfgtx = bnx2x_dcbnl_set_pg_bwgcfg_tx,
+ .setpgtccfgrx = bnx2x_dcbnl_set_pg_tccfg_rx,
+ .setpgbwgcfgrx = bnx2x_dcbnl_set_pg_bwgcfg_rx,
+ .getpgtccfgtx = bnx2x_dcbnl_get_pg_tccfg_tx,
+ .getpgbwgcfgtx = bnx2x_dcbnl_get_pg_bwgcfg_tx,
+ .getpgtccfgrx = bnx2x_dcbnl_get_pg_tccfg_rx,
+ .getpgbwgcfgrx = bnx2x_dcbnl_get_pg_bwgcfg_rx,
+ .setpfccfg = bnx2x_dcbnl_set_pfc_cfg,
+ .getpfccfg = bnx2x_dcbnl_get_pfc_cfg,
+ .setall = bnx2x_dcbnl_set_all,
+ .getcap = bnx2x_dcbnl_get_cap,
+ .getnumtcs = bnx2x_dcbnl_get_numtcs,
+ .setnumtcs = bnx2x_dcbnl_set_numtcs,
+ .getpfcstate = bnx2x_dcbnl_get_pfc_state,
+ .setpfcstate = bnx2x_dcbnl_set_pfc_state,
+ .setapp = bnx2x_dcbnl_set_app_up,
+ .getdcbx = bnx2x_dcbnl_get_dcbx,
+ .setdcbx = bnx2x_dcbnl_set_dcbx,
+ .getfeatcfg = bnx2x_dcbnl_get_featcfg,
+ .setfeatcfg = bnx2x_dcbnl_set_featcfg,
+ .peer_getappinfo = bnx2x_peer_appinfo,
+ .peer_getapptable = bnx2x_peer_apptable,
+ .cee_peer_getpg = bnx2x_cee_peer_getpg,
+ .cee_peer_getpfc = bnx2x_cee_peer_getpfc,
+};
+
+#endif /* BCM_DCBNL */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
new file mode 100644
index 000000000000..2c6a3bca6f28
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -0,0 +1,203 @@
+/* bnx2x_dcb.h: Broadcom Everest network driver.
+ *
+ * Copyright 2009-2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Dmitry Kravkov
+ *
+ */
+#ifndef BNX2X_DCB_H
+#define BNX2X_DCB_H
+
+#include "bnx2x_hsi.h"
+
+#define LLFC_DRIVER_TRAFFIC_TYPE_MAX 3 /* NW, iSCSI, FCoE */
+struct bnx2x_dcbx_app_params {
+ u32 enabled;
+ u32 traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
+};
+
+#define DCBX_COS_MAX_NUM_E2 DCBX_E2E3_MAX_NUM_COS
+/* bnx2x currently limits numbers of supported COSes to 3 to be extended to 6 */
+#define BNX2X_MAX_COS_SUPPORT 3
+#define DCBX_COS_MAX_NUM_E3B0 BNX2X_MAX_COS_SUPPORT
+#define DCBX_COS_MAX_NUM BNX2X_MAX_COS_SUPPORT
+
+struct bnx2x_dcbx_cos_params {
+ u32 bw_tbl;
+ u32 pri_bitmask;
+ /*
+ * strict priority: valid values are 0..5; 0 is highest priority.
+ * There can't be two COSes with the same priority.
+ */
+ u8 strict;
+#define BNX2X_DCBX_STRICT_INVALID DCBX_COS_MAX_NUM
+#define BNX2X_DCBX_STRICT_COS_HIGHEST 0
+#define BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(sp) ((sp) + 1)
+ u8 pauseable;
+};
+
+struct bnx2x_dcbx_pg_params {
+ u32 enabled;
+ u8 num_of_cos; /* valid COS entries */
+ struct bnx2x_dcbx_cos_params cos_params[DCBX_COS_MAX_NUM];
+};
+
+struct bnx2x_dcbx_pfc_params {
+ u32 enabled;
+ u32 priority_non_pauseable_mask;
+};
+
+struct bnx2x_dcbx_port_params {
+ struct bnx2x_dcbx_pfc_params pfc;
+ struct bnx2x_dcbx_pg_params ets;
+ struct bnx2x_dcbx_app_params app;
+};
+
+#define BNX2X_DCBX_CONFIG_INV_VALUE (0xFFFFFFFF)
+#define BNX2X_DCBX_OVERWRITE_SETTINGS_DISABLE 0
+#define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE 1
+#define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID (BNX2X_DCBX_CONFIG_INV_VALUE)
+#define BNX2X_IS_ETS_ENABLED(bp) ((bp)->dcb_state == BNX2X_DCB_STATE_ON &&\
+ (bp)->dcbx_port_params.ets.enabled)
+
+struct bnx2x_config_lldp_params {
+ u32 overwrite_settings;
+ u32 msg_tx_hold;
+ u32 msg_fast_tx;
+ u32 tx_credit_max;
+ u32 msg_tx_interval;
+ u32 tx_fast;
+};
+
+struct bnx2x_admin_priority_app_table {
+ u32 valid;
+ u32 priority;
+#define INVALID_TRAFFIC_TYPE_PRIORITY (0xFFFFFFFF)
+ u32 traffic_type;
+#define TRAFFIC_TYPE_ETH 0
+#define TRAFFIC_TYPE_PORT 1
+ u32 app_id;
+};
+
+struct bnx2x_config_dcbx_params {
+ u32 overwrite_settings;
+ u32 admin_dcbx_version;
+ u32 admin_ets_enable;
+ u32 admin_pfc_enable;
+ u32 admin_tc_supported_tx_enable;
+ u32 admin_ets_configuration_tx_enable;
+ u32 admin_ets_recommendation_tx_enable;
+ u32 admin_pfc_tx_enable;
+ u32 admin_application_priority_tx_enable;
+ u32 admin_ets_willing;
+ u32 admin_ets_reco_valid;
+ u32 admin_pfc_willing;
+ u32 admin_app_priority_willing;
+ u32 admin_configuration_bw_precentage[8];
+ u32 admin_configuration_ets_pg[8];
+ u32 admin_recommendation_bw_precentage[8];
+ u32 admin_recommendation_ets_pg[8];
+ u32 admin_pfc_bitmap;
+ struct bnx2x_admin_priority_app_table admin_priority_app_table[4];
+ u32 admin_default_priority;
+};
+
+#define GET_FLAGS(flags, bits) ((flags) & (bits))
+#define SET_FLAGS(flags, bits) ((flags) |= (bits))
+#define RESET_FLAGS(flags, bits) ((flags) &= ~(bits))
+
+enum {
+ DCBX_READ_LOCAL_MIB,
+ DCBX_READ_REMOTE_MIB
+};
+
+#define ETH_TYPE_FCOE (0x8906)
+#define TCP_PORT_ISCSI (0xCBC)
+
+#define PFC_VALUE_FRAME_SIZE (512)
+#define PFC_QUANTA_IN_NANOSEC_FROM_SPEED_MEGA(mega_speed) \
+ ((1000 * PFC_VALUE_FRAME_SIZE)/(mega_speed))
+
+#define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD 130
+#define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD 170
+
+
+
+struct cos_entry_help_data {
+ u32 pri_join_mask;
+ u32 cos_bw;
+ u8 strict;
+ bool pausable;
+};
+
+struct cos_help_data {
+ struct cos_entry_help_data data[DCBX_COS_MAX_NUM];
+ u8 num_of_cos;
+};
+
+#define DCBX_ILLEGAL_PG (0xFF)
+#define DCBX_PFC_PRI_MASK (0xFF)
+#define DCBX_STRICT_PRIORITY (15)
+#define DCBX_INVALID_COS_BW (0xFFFFFFFF)
+#define DCBX_PFC_PRI_NON_PAUSE_MASK(bp) \
+ ((bp)->dcbx_port_params.pfc.priority_non_pauseable_mask)
+#define DCBX_PFC_PRI_PAUSE_MASK(bp) \
+ ((u8)~DCBX_PFC_PRI_NON_PAUSE_MASK(bp))
+#define DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri) \
+ ((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp)))
+#define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri) \
+ (DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri))
+#define DCBX_IS_PFC_PRI_SOME_PAUSE(bp, pg_pri) \
+ (0 != DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri))
+#define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri) \
+ (pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri)))
+#define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\
+ ((pg_pri) == DCBX_PFC_PRI_GET_NON_PAUSE((bp), (pg_pri)))
+#define IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pg_pri) \
+ (!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \
+ IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri))))
+
+
+struct pg_entry_help_data {
+ u8 num_of_dif_pri;
+ u8 pg;
+ u32 pg_priority;
+};
+
+struct pg_help_data {
+ struct pg_entry_help_data data[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
+ u8 num_of_pg;
+};
+
+/* forward DCB/PFC related declarations */
+struct bnx2x;
+void bnx2x_dcbx_update(struct work_struct *work);
+void bnx2x_dcbx_init_params(struct bnx2x *bp);
+void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled);
+
+enum {
+ BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1,
+ BNX2X_DCBX_STATE_TX_PAUSED,
+ BNX2X_DCBX_STATE_TX_RELEASED
+};
+
+void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
+void bnx2x_dcbx_pmf_update(struct bnx2x *bp);
+/* DCB netlink */
+#ifdef BCM_DCBNL
+extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
+int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
+#endif /* BCM_DCBNL */
+
+#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
new file mode 100644
index 000000000000..b983825d0ee9
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
@@ -0,0 +1,1156 @@
+/* bnx2x_dump.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ */
+
+
+/* This struct holds a signature to ensure the dump returned from the driver
+ * match the meta data file inserted to grc_dump.tcl
+ * The signature is time stamp, diag version and grc_dump version
+ */
+
+#ifndef BNX2X_DUMP_H
+#define BNX2X_DUMP_H
+
+
+
+/*definitions */
+#define XSTORM_WAITP_ADDR 0x2b8a80
+#define TSTORM_WAITP_ADDR 0x1b8a80
+#define USTORM_WAITP_ADDR 0x338a80
+#define CSTORM_WAITP_ADDR 0x238a80
+#define TSTORM_CAM_MODE 0x1B1440
+
+#define MAX_TIMER_PENDING 200
+#define TIMER_SCAN_DONT_CARE 0xFF
+#define RI_E1 0x1
+#define RI_E1H 0x2
+#define RI_E2 0x4
+#define RI_E3 0x8
+#define RI_E3B0 0x10
+#define RI_ONLINE 0x100
+#define RI_OFFLINE 0x0
+#define RI_PATH0_DUMP 0x200
+#define RI_PATH1_DUMP 0x400
+
+#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
+#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
+#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
+#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
+#define RI_E1E2_ONLINE (RI_E1 | RI_E2 | RI_ONLINE)
+#define RI_E1HE2_ONLINE (RI_E1H | RI_E2 | RI_ONLINE)
+#define RI_E1E1HE2_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
+#define RI_E3_ONLINE (RI_E3 | RI_ONLINE)
+#define RI_E1E3_ONLINE (RI_E1 | RI_E3 | RI_ONLINE)
+#define RI_E1HE3_ONLINE (RI_E1H | RI_E3 | RI_ONLINE)
+#define RI_E1E1HE3_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_ONLINE)
+#define RI_E2E3_ONLINE (RI_E2 | RI_E3 | RI_ONLINE)
+#define RI_E1E2E3_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_ONLINE)
+#define RI_E1HE2E3_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_ONLINE)
+#define RI_E1E1HE2E3_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_ONLINE)
+#define RI_E3B0_ONLINE (RI_E3B0 | RI_ONLINE)
+#define RI_E1E3B0_ONLINE (RI_E1 | RI_E3B0 | RI_ONLINE)
+#define RI_E1HE3B0_ONLINE (RI_E1H | RI_E3B0 | RI_ONLINE)
+#define RI_E1E1HE3B0_ONLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_ONLINE)
+#define RI_E2E3B0_ONLINE (RI_E2 | RI_E3B0 | RI_ONLINE)
+#define RI_E1E2E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_ONLINE)
+#define RI_E1HE2E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE)
+#define RI_E1E1HE2E3B0_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE)
+#define RI_E3E3B0_ONLINE (RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E1E3E3B0_ONLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E1HE3E3B0_ONLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E1E1HE3E3B0_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E2E3E3B0_ONLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E1E2E3E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E1HE2E3E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E1E1HE2E3E3B0_ONLINE \
+ (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E1_OFFLINE (RI_E1 | RI_OFFLINE)
+#define RI_E1H_OFFLINE (RI_E1H | RI_OFFLINE)
+#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H | RI_OFFLINE)
+#define RI_E2_OFFLINE (RI_E2 | RI_OFFLINE)
+#define RI_E1E2_OFFLINE (RI_E1 | RI_E2 | RI_OFFLINE)
+#define RI_E1HE2_OFFLINE (RI_E1H | RI_E2 | RI_OFFLINE)
+#define RI_E1E1HE2_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_OFFLINE)
+#define RI_E3_OFFLINE (RI_E3 | RI_OFFLINE)
+#define RI_E1E3_OFFLINE (RI_E1 | RI_E3 | RI_OFFLINE)
+#define RI_E1HE3_OFFLINE (RI_E1H | RI_E3 | RI_OFFLINE)
+#define RI_E1E1HE3_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_OFFLINE)
+#define RI_E2E3_OFFLINE (RI_E2 | RI_E3 | RI_OFFLINE)
+#define RI_E1E2E3_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_OFFLINE)
+#define RI_E1HE2E3_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE)
+#define RI_E1E1HE2E3_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE)
+#define RI_E3B0_OFFLINE (RI_E3B0 | RI_OFFLINE)
+#define RI_E1E3B0_OFFLINE (RI_E1 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1HE3B0_OFFLINE (RI_E1H | RI_E3B0 | RI_OFFLINE)
+#define RI_E1E1HE3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_OFFLINE)
+#define RI_E2E3B0_OFFLINE (RI_E2 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1E2E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1HE2E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1E1HE2E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE)
+#define RI_E3E3B0_OFFLINE (RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1E3E3B0_OFFLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1HE3E3B0_OFFLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1E1HE3E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_E2E3E3B0_OFFLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1E2E3E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1HE2E3E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1E1HE2E3E3B0_OFFLINE \
+ (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_ALL_ONLINE RI_E1E1HE2E3E3B0_ONLINE
+#define RI_ALL_OFFLINE RI_E1E1HE2E3E3B0_OFFLINE
+
+#define DBG_DMP_TRACE_BUFFER_SIZE 0x800
+#define DBG_DMP_TRACE_BUFFER_OFFSET(shmem0_offset) \
+ ((shmem0_offset) - DBG_DMP_TRACE_BUFFER_SIZE)
+
+struct dump_sign {
+ u32 time_stamp;
+ u32 diag_ver;
+ u32 grc_dump_ver;
+};
+
+struct dump_hdr {
+ u32 hdr_size; /* in dwords, excluding this field */
+ struct dump_sign dump_sign;
+ u32 xstorm_waitp;
+ u32 tstorm_waitp;
+ u32 ustorm_waitp;
+ u32 cstorm_waitp;
+ u16 info;
+ u8 idle_chk;
+ u8 reserved;
+};
+
+struct reg_addr {
+ u32 addr;
+ u32 size;
+ u16 info;
+};
+
+struct wreg_addr {
+ u32 addr;
+ u32 size;
+ u32 read_regs_count;
+ const u32 *read_regs;
+ u16 info;
+};
+
+static const struct reg_addr reg_addrs[] = {
+ { 0x2000, 341, RI_ALL_ONLINE },
+ { 0x2800, 103, RI_ALL_ONLINE },
+ { 0x3000, 287, RI_ALL_ONLINE },
+ { 0x3800, 331, RI_ALL_ONLINE },
+ { 0x8800, 6, RI_ALL_ONLINE },
+ { 0x8818, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x9000, 147, RI_E2E3E3B0_ONLINE },
+ { 0x924c, 1, RI_E2_ONLINE },
+ { 0x9250, 16, RI_E2E3E3B0_ONLINE },
+ { 0x9400, 33, RI_E2E3E3B0_ONLINE },
+ { 0x9484, 5, RI_E3E3B0_ONLINE },
+ { 0xa000, 27, RI_ALL_ONLINE },
+ { 0xa06c, 1, RI_E1E1H_ONLINE },
+ { 0xa070, 71, RI_ALL_ONLINE },
+ { 0xa18c, 4, RI_E1E1H_ONLINE },
+ { 0xa19c, 62, RI_ALL_ONLINE },
+ { 0xa294, 2, RI_E1E1H_ONLINE },
+ { 0xa29c, 2, RI_ALL_ONLINE },
+ { 0xa2a4, 2, RI_E1E1HE2_ONLINE },
+ { 0xa2ac, 52, RI_ALL_ONLINE },
+ { 0xa39c, 7, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa3b8, 2, RI_E3E3B0_ONLINE },
+ { 0xa3c0, 3, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa3d0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa3d8, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa3e0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa3e8, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa3f0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa3f8, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa400, 40, RI_ALL_ONLINE },
+ { 0xa4a0, 1, RI_E1E1HE2_ONLINE },
+ { 0xa4a4, 2, RI_ALL_ONLINE },
+ { 0xa4ac, 2, RI_E1E1H_ONLINE },
+ { 0xa4b4, 1, RI_E1E1HE2_ONLINE },
+ { 0xa4b8, 2, RI_E1E1H_ONLINE },
+ { 0xa4c0, 3, RI_ALL_ONLINE },
+ { 0xa4cc, 5, RI_E1E1H_ONLINE },
+ { 0xa4e0, 3, RI_ALL_ONLINE },
+ { 0xa4fc, 2, RI_ALL_ONLINE },
+ { 0xa504, 1, RI_E1E1H_ONLINE },
+ { 0xa508, 3, RI_ALL_ONLINE },
+ { 0xa518, 1, RI_ALL_ONLINE },
+ { 0xa520, 1, RI_ALL_ONLINE },
+ { 0xa528, 1, RI_ALL_ONLINE },
+ { 0xa530, 1, RI_ALL_ONLINE },
+ { 0xa538, 1, RI_ALL_ONLINE },
+ { 0xa540, 1, RI_ALL_ONLINE },
+ { 0xa548, 1, RI_E1E1H_ONLINE },
+ { 0xa550, 1, RI_E1E1H_ONLINE },
+ { 0xa558, 1, RI_E1E1H_ONLINE },
+ { 0xa560, 1, RI_E1E1H_ONLINE },
+ { 0xa568, 1, RI_E1E1H_ONLINE },
+ { 0xa570, 1, RI_ALL_ONLINE },
+ { 0xa580, 1, RI_ALL_ONLINE },
+ { 0xa590, 1, RI_ALL_ONLINE },
+ { 0xa5a0, 1, RI_E1E1HE2_ONLINE },
+ { 0xa5c0, 1, RI_ALL_ONLINE },
+ { 0xa5e0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa5e8, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa5f0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa5f8, 1, RI_E1HE2_ONLINE },
+ { 0xa5fc, 9, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa620, 6, RI_E2E3E3B0_ONLINE },
+ { 0xa638, 20, RI_E2_ONLINE },
+ { 0xa688, 42, RI_E2E3E3B0_ONLINE },
+ { 0xa730, 1, RI_E2_ONLINE },
+ { 0xa734, 2, RI_E2E3E3B0_ONLINE },
+ { 0xa73c, 4, RI_E2_ONLINE },
+ { 0xa74c, 5, RI_E2E3E3B0_ONLINE },
+ { 0xa760, 5, RI_E2_ONLINE },
+ { 0xa774, 7, RI_E2E3E3B0_ONLINE },
+ { 0xa790, 15, RI_E2_ONLINE },
+ { 0xa7cc, 4, RI_E2E3E3B0_ONLINE },
+ { 0xa7e0, 6, RI_E3E3B0_ONLINE },
+ { 0xa800, 18, RI_E2_ONLINE },
+ { 0xa848, 33, RI_E2E3E3B0_ONLINE },
+ { 0xa8cc, 2, RI_E3E3B0_ONLINE },
+ { 0xa8d4, 4, RI_E2E3E3B0_ONLINE },
+ { 0xa8e4, 1, RI_E3E3B0_ONLINE },
+ { 0xa8e8, 1, RI_E2E3E3B0_ONLINE },
+ { 0xa8f0, 1, RI_E2E3E3B0_ONLINE },
+ { 0xa8f8, 30, RI_E3E3B0_ONLINE },
+ { 0xa974, 73, RI_E3E3B0_ONLINE },
+ { 0xac30, 1, RI_E3E3B0_ONLINE },
+ { 0xac40, 1, RI_E3E3B0_ONLINE },
+ { 0xac50, 1, RI_E3E3B0_ONLINE },
+ { 0xac60, 1, RI_E3B0_ONLINE },
+ { 0x10000, 9, RI_ALL_ONLINE },
+ { 0x10024, 1, RI_E1E1HE2_ONLINE },
+ { 0x10028, 5, RI_ALL_ONLINE },
+ { 0x1003c, 6, RI_E1E1HE2_ONLINE },
+ { 0x10054, 20, RI_ALL_ONLINE },
+ { 0x100a4, 4, RI_E1E1HE2_ONLINE },
+ { 0x100b4, 11, RI_ALL_ONLINE },
+ { 0x100e0, 4, RI_E1E1HE2_ONLINE },
+ { 0x100f0, 8, RI_ALL_ONLINE },
+ { 0x10110, 6, RI_E1E1HE2_ONLINE },
+ { 0x10128, 110, RI_ALL_ONLINE },
+ { 0x102e0, 4, RI_E1E1HE2_ONLINE },
+ { 0x102f0, 18, RI_ALL_ONLINE },
+ { 0x10338, 20, RI_E1E1HE2_ONLINE },
+ { 0x10388, 10, RI_ALL_ONLINE },
+ { 0x10400, 6, RI_E1E1HE2_ONLINE },
+ { 0x10418, 6, RI_ALL_ONLINE },
+ { 0x10430, 10, RI_E1E1HE2_ONLINE },
+ { 0x10458, 22, RI_ALL_ONLINE },
+ { 0x104b0, 12, RI_E1E1HE2_ONLINE },
+ { 0x104e0, 1, RI_ALL_ONLINE },
+ { 0x104e8, 2, RI_ALL_ONLINE },
+ { 0x104f4, 2, RI_ALL_ONLINE },
+ { 0x10500, 146, RI_ALL_ONLINE },
+ { 0x10750, 2, RI_E1E1HE2_ONLINE },
+ { 0x10760, 2, RI_E1E1HE2_ONLINE },
+ { 0x10770, 2, RI_E1E1HE2_ONLINE },
+ { 0x10780, 2, RI_E1E1HE2_ONLINE },
+ { 0x10790, 2, RI_ALL_ONLINE },
+ { 0x107a0, 2, RI_E1E1HE2_ONLINE },
+ { 0x107b0, 2, RI_E1E1HE2_ONLINE },
+ { 0x107c0, 2, RI_E1E1HE2_ONLINE },
+ { 0x107d0, 2, RI_E1E1HE2_ONLINE },
+ { 0x107e0, 2, RI_ALL_ONLINE },
+ { 0x10880, 2, RI_ALL_ONLINE },
+ { 0x10900, 2, RI_ALL_ONLINE },
+ { 0x16000, 1, RI_E1HE2_ONLINE },
+ { 0x16004, 25, RI_E1HE2E3E3B0_ONLINE },
+ { 0x16070, 8, RI_E1HE2E3E3B0_ONLINE },
+ { 0x16090, 4, RI_E1HE2E3_ONLINE },
+ { 0x160a0, 6, RI_E1HE2E3E3B0_ONLINE },
+ { 0x160c0, 7, RI_E1HE2E3E3B0_ONLINE },
+ { 0x160dc, 2, RI_E1HE2_ONLINE },
+ { 0x160e4, 10, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1610c, 2, RI_E1HE2_ONLINE },
+ { 0x16114, 6, RI_E1HE2E3E3B0_ONLINE },
+ { 0x16140, 48, RI_E1HE2E3E3B0_ONLINE },
+ { 0x16204, 5, RI_E1HE2E3E3B0_ONLINE },
+ { 0x18000, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x18008, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x18010, 35, RI_E2E3E3B0_ONLINE },
+ { 0x180a4, 2, RI_E2E3E3B0_ONLINE },
+ { 0x180c0, 9, RI_E2E3E3B0_ONLINE },
+ { 0x180e4, 1, RI_E2E3_ONLINE },
+ { 0x180e8, 2, RI_E2E3E3B0_ONLINE },
+ { 0x180f0, 1, RI_E2E3_ONLINE },
+ { 0x180f4, 79, RI_E2E3E3B0_ONLINE },
+ { 0x18230, 1, RI_E2E3_ONLINE },
+ { 0x18234, 2, RI_E2E3E3B0_ONLINE },
+ { 0x1823c, 1, RI_E2E3_ONLINE },
+ { 0x18240, 13, RI_E2E3E3B0_ONLINE },
+ { 0x18274, 1, RI_E2_ONLINE },
+ { 0x18278, 81, RI_E2E3E3B0_ONLINE },
+ { 0x18440, 63, RI_E2E3E3B0_ONLINE },
+ { 0x18570, 42, RI_E3E3B0_ONLINE },
+ { 0x18618, 25, RI_E3B0_ONLINE },
+ { 0x18680, 44, RI_E3B0_ONLINE },
+ { 0x18748, 12, RI_E3B0_ONLINE },
+ { 0x18788, 1, RI_E3B0_ONLINE },
+ { 0x1879c, 6, RI_E3B0_ONLINE },
+ { 0x187c4, 51, RI_E3B0_ONLINE },
+ { 0x18a00, 48, RI_E3B0_ONLINE },
+ { 0x20000, 24, RI_ALL_ONLINE },
+ { 0x20060, 8, RI_ALL_ONLINE },
+ { 0x20080, 94, RI_ALL_ONLINE },
+ { 0x201f8, 1, RI_E1E1H_ONLINE },
+ { 0x201fc, 1, RI_ALL_ONLINE },
+ { 0x20200, 1, RI_E1E1H_ONLINE },
+ { 0x20204, 1, RI_ALL_ONLINE },
+ { 0x20208, 1, RI_E1E1H_ONLINE },
+ { 0x2020c, 39, RI_ALL_ONLINE },
+ { 0x202c8, 1, RI_E2E3E3B0_ONLINE },
+ { 0x202d8, 4, RI_E2E3E3B0_ONLINE },
+ { 0x202f0, 1, RI_E3B0_ONLINE },
+ { 0x20400, 2, RI_ALL_ONLINE },
+ { 0x2040c, 8, RI_ALL_ONLINE },
+ { 0x2042c, 18, RI_E1HE2E3E3B0_ONLINE },
+ { 0x20480, 1, RI_ALL_ONLINE },
+ { 0x20500, 1, RI_ALL_ONLINE },
+ { 0x20600, 1, RI_ALL_ONLINE },
+ { 0x28000, 1, RI_ALL_ONLINE },
+ { 0x28004, 8191, RI_ALL_OFFLINE },
+ { 0x30000, 1, RI_ALL_ONLINE },
+ { 0x30004, 16383, RI_ALL_OFFLINE },
+ { 0x40000, 98, RI_ALL_ONLINE },
+ { 0x401a8, 8, RI_E1HE2E3E3B0_ONLINE },
+ { 0x401c8, 1, RI_E1H_ONLINE },
+ { 0x401cc, 2, RI_E1HE2E3E3B0_ONLINE },
+ { 0x401d4, 2, RI_E2E3E3B0_ONLINE },
+ { 0x40200, 4, RI_ALL_ONLINE },
+ { 0x40220, 6, RI_E2E3E3B0_ONLINE },
+ { 0x40238, 8, RI_E2E3_ONLINE },
+ { 0x40258, 4, RI_E2E3E3B0_ONLINE },
+ { 0x40268, 2, RI_E3E3B0_ONLINE },
+ { 0x40270, 17, RI_E3B0_ONLINE },
+ { 0x40400, 43, RI_ALL_ONLINE },
+ { 0x404cc, 3, RI_E1HE2E3E3B0_ONLINE },
+ { 0x404e0, 1, RI_E2E3E3B0_ONLINE },
+ { 0x40500, 2, RI_ALL_ONLINE },
+ { 0x40510, 2, RI_ALL_ONLINE },
+ { 0x40520, 2, RI_ALL_ONLINE },
+ { 0x40530, 2, RI_ALL_ONLINE },
+ { 0x40540, 2, RI_ALL_ONLINE },
+ { 0x40550, 10, RI_E2E3E3B0_ONLINE },
+ { 0x40610, 2, RI_E2E3E3B0_ONLINE },
+ { 0x42000, 164, RI_ALL_ONLINE },
+ { 0x422c0, 4, RI_E2E3E3B0_ONLINE },
+ { 0x422d4, 5, RI_E1HE2E3E3B0_ONLINE },
+ { 0x422e8, 1, RI_E2E3E3B0_ONLINE },
+ { 0x42400, 49, RI_ALL_ONLINE },
+ { 0x424c8, 38, RI_ALL_ONLINE },
+ { 0x42568, 2, RI_ALL_ONLINE },
+ { 0x42640, 5, RI_E2E3E3B0_ONLINE },
+ { 0x42800, 1, RI_ALL_ONLINE },
+ { 0x50000, 1, RI_ALL_ONLINE },
+ { 0x50004, 19, RI_ALL_ONLINE },
+ { 0x50050, 8, RI_ALL_ONLINE },
+ { 0x50070, 88, RI_ALL_ONLINE },
+ { 0x501f0, 4, RI_E1HE2E3E3B0_ONLINE },
+ { 0x50200, 2, RI_ALL_ONLINE },
+ { 0x5020c, 7, RI_ALL_ONLINE },
+ { 0x50228, 6, RI_E1HE2E3E3B0_ONLINE },
+ { 0x50240, 1, RI_ALL_ONLINE },
+ { 0x50280, 1, RI_ALL_ONLINE },
+ { 0x50300, 1, RI_E2E3E3B0_ONLINE },
+ { 0x5030c, 1, RI_E2E3E3B0_ONLINE },
+ { 0x50318, 1, RI_E2E3E3B0_ONLINE },
+ { 0x5031c, 1, RI_E2E3E3B0_ONLINE },
+ { 0x50320, 2, RI_E2E3E3B0_ONLINE },
+ { 0x50330, 1, RI_E3B0_ONLINE },
+ { 0x52000, 1, RI_ALL_ONLINE },
+ { 0x54000, 1, RI_ALL_ONLINE },
+ { 0x54004, 3327, RI_ALL_OFFLINE },
+ { 0x58000, 1, RI_ALL_ONLINE },
+ { 0x58004, 8191, RI_E1E1H_OFFLINE },
+ { 0x60000, 26, RI_ALL_ONLINE },
+ { 0x60068, 8, RI_E1E1H_ONLINE },
+ { 0x60088, 12, RI_ALL_ONLINE },
+ { 0x600b8, 9, RI_E1E1H_ONLINE },
+ { 0x600dc, 1, RI_ALL_ONLINE },
+ { 0x600e0, 5, RI_E1E1H_ONLINE },
+ { 0x600f4, 1, RI_E1E1HE2_ONLINE },
+ { 0x600f8, 1, RI_E1E1H_ONLINE },
+ { 0x600fc, 8, RI_ALL_ONLINE },
+ { 0x6013c, 24, RI_E1H_ONLINE },
+ { 0x6019c, 2, RI_E2E3E3B0_ONLINE },
+ { 0x601ac, 18, RI_E2E3E3B0_ONLINE },
+ { 0x60200, 1, RI_ALL_ONLINE },
+ { 0x60204, 2, RI_ALL_OFFLINE },
+ { 0x60210, 13, RI_E2E3E3B0_ONLINE },
+ { 0x60244, 16, RI_E3B0_ONLINE },
+ { 0x61000, 1, RI_ALL_ONLINE },
+ { 0x61004, 511, RI_ALL_OFFLINE },
+ { 0x61800, 512, RI_E3E3B0_OFFLINE },
+ { 0x70000, 8, RI_ALL_ONLINE },
+ { 0x70020, 8184, RI_ALL_OFFLINE },
+ { 0x78000, 8192, RI_E3E3B0_OFFLINE },
+ { 0x85000, 3, RI_ALL_ONLINE },
+ { 0x8501c, 7, RI_ALL_ONLINE },
+ { 0x85048, 1, RI_ALL_ONLINE },
+ { 0x85200, 32, RI_ALL_ONLINE },
+ { 0xb0000, 16384, RI_E1H_ONLINE },
+ { 0xc1000, 7, RI_ALL_ONLINE },
+ { 0xc103c, 2, RI_E2E3E3B0_ONLINE },
+ { 0xc1800, 2, RI_ALL_ONLINE },
+ { 0xc2000, 164, RI_ALL_ONLINE },
+ { 0xc22c0, 5, RI_E2E3E3B0_ONLINE },
+ { 0xc22d8, 4, RI_E2E3E3B0_ONLINE },
+ { 0xc2400, 49, RI_ALL_ONLINE },
+ { 0xc24c8, 38, RI_ALL_ONLINE },
+ { 0xc2568, 2, RI_ALL_ONLINE },
+ { 0xc2600, 1, RI_ALL_ONLINE },
+ { 0xc4000, 165, RI_ALL_ONLINE },
+ { 0xc42d8, 2, RI_E2E3E3B0_ONLINE },
+ { 0xc42e0, 7, RI_E1HE2E3E3B0_ONLINE },
+ { 0xc42fc, 1, RI_E2E3E3B0_ONLINE },
+ { 0xc4400, 51, RI_ALL_ONLINE },
+ { 0xc44d0, 38, RI_ALL_ONLINE },
+ { 0xc4570, 2, RI_ALL_ONLINE },
+ { 0xc4578, 5, RI_E2E3E3B0_ONLINE },
+ { 0xc4600, 1, RI_ALL_ONLINE },
+ { 0xd0000, 19, RI_ALL_ONLINE },
+ { 0xd004c, 8, RI_ALL_ONLINE },
+ { 0xd006c, 91, RI_ALL_ONLINE },
+ { 0xd01fc, 1, RI_E2E3E3B0_ONLINE },
+ { 0xd0200, 2, RI_ALL_ONLINE },
+ { 0xd020c, 7, RI_ALL_ONLINE },
+ { 0xd0228, 18, RI_E1HE2E3E3B0_ONLINE },
+ { 0xd0280, 1, RI_ALL_ONLINE },
+ { 0xd0300, 1, RI_ALL_ONLINE },
+ { 0xd0400, 1, RI_ALL_ONLINE },
+ { 0xd0818, 1, RI_E3B0_ONLINE },
+ { 0xd4000, 1, RI_ALL_ONLINE },
+ { 0xd4004, 2559, RI_ALL_OFFLINE },
+ { 0xd8000, 1, RI_ALL_ONLINE },
+ { 0xd8004, 8191, RI_ALL_OFFLINE },
+ { 0xe0000, 21, RI_ALL_ONLINE },
+ { 0xe0054, 8, RI_ALL_ONLINE },
+ { 0xe0074, 49, RI_ALL_ONLINE },
+ { 0xe0138, 1, RI_E1E1H_ONLINE },
+ { 0xe013c, 35, RI_ALL_ONLINE },
+ { 0xe01f4, 1, RI_E2_ONLINE },
+ { 0xe01f8, 1, RI_E2E3E3B0_ONLINE },
+ { 0xe0200, 2, RI_ALL_ONLINE },
+ { 0xe020c, 8, RI_ALL_ONLINE },
+ { 0xe022c, 18, RI_E1HE2E3E3B0_ONLINE },
+ { 0xe0280, 1, RI_ALL_ONLINE },
+ { 0xe0300, 1, RI_ALL_ONLINE },
+ { 0xe0400, 1, RI_E3B0_ONLINE },
+ { 0xe1000, 1, RI_ALL_ONLINE },
+ { 0xe2000, 1, RI_ALL_ONLINE },
+ { 0xe2004, 2047, RI_ALL_OFFLINE },
+ { 0xf0000, 1, RI_ALL_ONLINE },
+ { 0xf0004, 16383, RI_ALL_OFFLINE },
+ { 0x101000, 12, RI_ALL_ONLINE },
+ { 0x101050, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x101054, 3, RI_E2E3E3B0_ONLINE },
+ { 0x101100, 1, RI_ALL_ONLINE },
+ { 0x101800, 8, RI_ALL_ONLINE },
+ { 0x102000, 18, RI_ALL_ONLINE },
+ { 0x102068, 6, RI_E2E3E3B0_ONLINE },
+ { 0x102080, 17, RI_ALL_ONLINE },
+ { 0x1020c8, 8, RI_E1H_ONLINE },
+ { 0x1020e8, 9, RI_E2E3E3B0_ONLINE },
+ { 0x102400, 1, RI_ALL_ONLINE },
+ { 0x103000, 26, RI_ALL_ONLINE },
+ { 0x103098, 5, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1030ac, 2, RI_E2E3E3B0_ONLINE },
+ { 0x1030b4, 1, RI_E2_ONLINE },
+ { 0x1030b8, 7, RI_E2E3E3B0_ONLINE },
+ { 0x1030d8, 8, RI_E2E3E3B0_ONLINE },
+ { 0x103400, 1, RI_E2E3E3B0_ONLINE },
+ { 0x103404, 135, RI_E2E3E3B0_OFFLINE },
+ { 0x103800, 8, RI_ALL_ONLINE },
+ { 0x104000, 63, RI_ALL_ONLINE },
+ { 0x10411c, 16, RI_E2E3E3B0_ONLINE },
+ { 0x104200, 17, RI_ALL_ONLINE },
+ { 0x104400, 64, RI_ALL_ONLINE },
+ { 0x104500, 192, RI_ALL_OFFLINE },
+ { 0x104800, 64, RI_ALL_ONLINE },
+ { 0x104900, 192, RI_ALL_OFFLINE },
+ { 0x105000, 256, RI_ALL_ONLINE },
+ { 0x105400, 768, RI_ALL_OFFLINE },
+ { 0x107000, 7, RI_E2E3E3B0_ONLINE },
+ { 0x10701c, 1, RI_E3E3B0_ONLINE },
+ { 0x108000, 33, RI_E1E1H_ONLINE },
+ { 0x1080ac, 5, RI_E1H_ONLINE },
+ { 0x108100, 5, RI_E1E1H_ONLINE },
+ { 0x108120, 5, RI_E1E1H_ONLINE },
+ { 0x108200, 74, RI_E1E1H_ONLINE },
+ { 0x108400, 74, RI_E1E1H_ONLINE },
+ { 0x108800, 152, RI_E1E1H_ONLINE },
+ { 0x110000, 111, RI_E2E3E3B0_ONLINE },
+ { 0x1101dc, 1, RI_E3E3B0_ONLINE },
+ { 0x110200, 4, RI_E2E3E3B0_ONLINE },
+ { 0x120000, 2, RI_ALL_ONLINE },
+ { 0x120008, 4, RI_ALL_ONLINE },
+ { 0x120018, 3, RI_ALL_ONLINE },
+ { 0x120024, 4, RI_ALL_ONLINE },
+ { 0x120034, 3, RI_ALL_ONLINE },
+ { 0x120040, 4, RI_ALL_ONLINE },
+ { 0x120050, 3, RI_ALL_ONLINE },
+ { 0x12005c, 4, RI_ALL_ONLINE },
+ { 0x12006c, 3, RI_ALL_ONLINE },
+ { 0x120078, 4, RI_ALL_ONLINE },
+ { 0x120088, 3, RI_ALL_ONLINE },
+ { 0x120094, 4, RI_ALL_ONLINE },
+ { 0x1200a4, 3, RI_ALL_ONLINE },
+ { 0x1200b0, 4, RI_ALL_ONLINE },
+ { 0x1200c0, 3, RI_ALL_ONLINE },
+ { 0x1200cc, 4, RI_ALL_ONLINE },
+ { 0x1200dc, 3, RI_ALL_ONLINE },
+ { 0x1200e8, 4, RI_ALL_ONLINE },
+ { 0x1200f8, 3, RI_ALL_ONLINE },
+ { 0x120104, 4, RI_ALL_ONLINE },
+ { 0x120114, 1, RI_ALL_ONLINE },
+ { 0x120118, 22, RI_ALL_ONLINE },
+ { 0x120170, 2, RI_E1E1H_ONLINE },
+ { 0x120178, 243, RI_ALL_ONLINE },
+ { 0x120544, 4, RI_E1E1H_ONLINE },
+ { 0x120554, 6, RI_ALL_ONLINE },
+ { 0x12059c, 6, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1205b4, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1205b8, 15, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1205f4, 1, RI_E1HE2_ONLINE },
+ { 0x1205f8, 4, RI_E2E3E3B0_ONLINE },
+ { 0x120618, 1, RI_E2E3E3B0_ONLINE },
+ { 0x12061c, 20, RI_E1HE2E3E3B0_ONLINE },
+ { 0x12066c, 11, RI_E1HE2E3E3B0_ONLINE },
+ { 0x120698, 3, RI_E2E3E3B0_ONLINE },
+ { 0x1206a4, 1, RI_E2_ONLINE },
+ { 0x1206a8, 1, RI_E2E3E3B0_ONLINE },
+ { 0x1206b0, 75, RI_E2E3E3B0_ONLINE },
+ { 0x1207dc, 1, RI_E2_ONLINE },
+ { 0x1207fc, 1, RI_E2E3E3B0_ONLINE },
+ { 0x12080c, 65, RI_ALL_ONLINE },
+ { 0x120910, 7, RI_E2E3E3B0_ONLINE },
+ { 0x120930, 9, RI_E2E3E3B0_ONLINE },
+ { 0x12095c, 37, RI_E3E3B0_ONLINE },
+ { 0x120a00, 2, RI_E1E1HE2_ONLINE },
+ { 0x120b00, 1, RI_E3E3B0_ONLINE },
+ { 0x122000, 2, RI_ALL_ONLINE },
+ { 0x122008, 2046, RI_E1_OFFLINE },
+ { 0x128000, 2, RI_E1HE2E3E3B0_ONLINE },
+ { 0x128008, 6142, RI_E1HE2E3E3B0_OFFLINE },
+ { 0x130000, 35, RI_E2E3E3B0_ONLINE },
+ { 0x130100, 29, RI_E2E3E3B0_ONLINE },
+ { 0x130180, 1, RI_E2E3E3B0_ONLINE },
+ { 0x130200, 1, RI_E2E3E3B0_ONLINE },
+ { 0x130280, 1, RI_E2E3E3B0_ONLINE },
+ { 0x130300, 5, RI_E2E3E3B0_ONLINE },
+ { 0x130380, 1, RI_E2E3E3B0_ONLINE },
+ { 0x130400, 1, RI_E2E3E3B0_ONLINE },
+ { 0x130480, 5, RI_E2E3E3B0_ONLINE },
+ { 0x130800, 72, RI_E2E3E3B0_ONLINE },
+ { 0x131000, 136, RI_E2E3E3B0_ONLINE },
+ { 0x132000, 148, RI_E2E3E3B0_ONLINE },
+ { 0x134000, 544, RI_E2E3E3B0_ONLINE },
+ { 0x140000, 1, RI_ALL_ONLINE },
+ { 0x140004, 9, RI_E1E1HE2E3_ONLINE },
+ { 0x140028, 8, RI_ALL_ONLINE },
+ { 0x140048, 10, RI_E1E1HE2E3_ONLINE },
+ { 0x140070, 1, RI_ALL_ONLINE },
+ { 0x140074, 10, RI_E1E1HE2E3_ONLINE },
+ { 0x14009c, 1, RI_ALL_ONLINE },
+ { 0x1400a0, 5, RI_E1E1HE2E3_ONLINE },
+ { 0x1400b4, 7, RI_ALL_ONLINE },
+ { 0x1400d0, 10, RI_E1E1HE2E3_ONLINE },
+ { 0x1400f8, 2, RI_ALL_ONLINE },
+ { 0x140100, 5, RI_E1E1H_ONLINE },
+ { 0x140114, 5, RI_E1E1HE2E3_ONLINE },
+ { 0x140128, 7, RI_ALL_ONLINE },
+ { 0x140144, 9, RI_E1E1HE2E3_ONLINE },
+ { 0x140168, 8, RI_ALL_ONLINE },
+ { 0x140188, 3, RI_E1E1HE2E3_ONLINE },
+ { 0x140194, 13, RI_ALL_ONLINE },
+ { 0x140200, 6, RI_E1E1HE2E3_ONLINE },
+ { 0x140220, 4, RI_E2E3_ONLINE },
+ { 0x140240, 4, RI_E2E3_ONLINE },
+ { 0x140260, 4, RI_E2E3_ONLINE },
+ { 0x140280, 4, RI_E2E3_ONLINE },
+ { 0x1402a0, 4, RI_E2E3_ONLINE },
+ { 0x1402c0, 4, RI_E2E3_ONLINE },
+ { 0x1402e0, 2, RI_E2E3_ONLINE },
+ { 0x1402e8, 2, RI_E2E3E3B0_ONLINE },
+ { 0x1402f0, 9, RI_E2E3_ONLINE },
+ { 0x140314, 44, RI_E3B0_ONLINE },
+ { 0x1403d0, 70, RI_E3B0_ONLINE },
+ { 0x144000, 4, RI_E1E1H_ONLINE },
+ { 0x148000, 4, RI_E1E1H_ONLINE },
+ { 0x14c000, 4, RI_E1E1H_ONLINE },
+ { 0x150000, 4, RI_E1E1H_ONLINE },
+ { 0x154000, 4, RI_E1E1H_ONLINE },
+ { 0x158000, 4, RI_E1E1H_ONLINE },
+ { 0x15c000, 2, RI_E1HE2E3E3B0_ONLINE },
+ { 0x15c008, 5, RI_E1H_ONLINE },
+ { 0x15c020, 8, RI_E2E3E3B0_ONLINE },
+ { 0x15c040, 1, RI_E2E3_ONLINE },
+ { 0x15c044, 2, RI_E2E3E3B0_ONLINE },
+ { 0x15c04c, 8, RI_E2E3_ONLINE },
+ { 0x15c06c, 8, RI_E2E3E3B0_ONLINE },
+ { 0x15c090, 13, RI_E2E3E3B0_ONLINE },
+ { 0x15c0c8, 24, RI_E2E3E3B0_ONLINE },
+ { 0x15c128, 2, RI_E2E3_ONLINE },
+ { 0x15c130, 8, RI_E2E3E3B0_ONLINE },
+ { 0x15c150, 2, RI_E3E3B0_ONLINE },
+ { 0x15c158, 2, RI_E3_ONLINE },
+ { 0x15c160, 149, RI_E3B0_ONLINE },
+ { 0x161000, 7, RI_ALL_ONLINE },
+ { 0x16103c, 2, RI_E2E3E3B0_ONLINE },
+ { 0x161800, 2, RI_ALL_ONLINE },
+ { 0x162000, 54, RI_E3E3B0_ONLINE },
+ { 0x162200, 60, RI_E3E3B0_ONLINE },
+ { 0x162400, 54, RI_E3E3B0_ONLINE },
+ { 0x162600, 60, RI_E3E3B0_ONLINE },
+ { 0x162800, 54, RI_E3E3B0_ONLINE },
+ { 0x162a00, 60, RI_E3E3B0_ONLINE },
+ { 0x162c00, 54, RI_E3E3B0_ONLINE },
+ { 0x162e00, 60, RI_E3E3B0_ONLINE },
+ { 0x164000, 60, RI_ALL_ONLINE },
+ { 0x164110, 2, RI_E1HE2E3E3B0_ONLINE },
+ { 0x164118, 15, RI_E2E3E3B0_ONLINE },
+ { 0x164200, 1, RI_ALL_ONLINE },
+ { 0x164208, 1, RI_ALL_ONLINE },
+ { 0x164210, 1, RI_ALL_ONLINE },
+ { 0x164218, 1, RI_ALL_ONLINE },
+ { 0x164220, 1, RI_ALL_ONLINE },
+ { 0x164228, 1, RI_ALL_ONLINE },
+ { 0x164230, 1, RI_ALL_ONLINE },
+ { 0x164238, 1, RI_ALL_ONLINE },
+ { 0x164240, 1, RI_ALL_ONLINE },
+ { 0x164248, 1, RI_ALL_ONLINE },
+ { 0x164250, 1, RI_ALL_ONLINE },
+ { 0x164258, 1, RI_ALL_ONLINE },
+ { 0x164260, 1, RI_ALL_ONLINE },
+ { 0x164270, 2, RI_ALL_ONLINE },
+ { 0x164280, 2, RI_ALL_ONLINE },
+ { 0x164800, 2, RI_ALL_ONLINE },
+ { 0x165000, 2, RI_ALL_ONLINE },
+ { 0x166000, 164, RI_ALL_ONLINE },
+ { 0x1662cc, 7, RI_E2E3E3B0_ONLINE },
+ { 0x166400, 49, RI_ALL_ONLINE },
+ { 0x1664c8, 38, RI_ALL_ONLINE },
+ { 0x166568, 2, RI_ALL_ONLINE },
+ { 0x166570, 5, RI_E2E3E3B0_ONLINE },
+ { 0x166800, 1, RI_ALL_ONLINE },
+ { 0x168000, 137, RI_ALL_ONLINE },
+ { 0x168224, 2, RI_E1E1H_ONLINE },
+ { 0x16822c, 29, RI_ALL_ONLINE },
+ { 0x1682a0, 12, RI_E1E1H_ONLINE },
+ { 0x1682d0, 12, RI_ALL_ONLINE },
+ { 0x168300, 2, RI_E1E1H_ONLINE },
+ { 0x168308, 68, RI_ALL_ONLINE },
+ { 0x168418, 2, RI_E1E1H_ONLINE },
+ { 0x168420, 6, RI_ALL_ONLINE },
+ { 0x168800, 19, RI_ALL_ONLINE },
+ { 0x168900, 1, RI_ALL_ONLINE },
+ { 0x168a00, 128, RI_ALL_ONLINE },
+ { 0x16a000, 1, RI_ALL_ONLINE },
+ { 0x16a004, 1535, RI_ALL_OFFLINE },
+ { 0x16c000, 1, RI_ALL_ONLINE },
+ { 0x16c004, 1535, RI_ALL_OFFLINE },
+ { 0x16e000, 16, RI_E1H_ONLINE },
+ { 0x16e040, 8, RI_E2E3E3B0_ONLINE },
+ { 0x16e100, 1, RI_E1H_ONLINE },
+ { 0x16e200, 2, RI_E1H_ONLINE },
+ { 0x16e400, 161, RI_E1H_ONLINE },
+ { 0x16e684, 2, RI_E1HE2E3E3B0_ONLINE },
+ { 0x16e68c, 12, RI_E1H_ONLINE },
+ { 0x16e6bc, 4, RI_E1HE2E3E3B0_ONLINE },
+ { 0x16e6cc, 4, RI_E1H_ONLINE },
+ { 0x16e6e0, 2, RI_E2E3E3B0_ONLINE },
+ { 0x16e6e8, 5, RI_E2E3_ONLINE },
+ { 0x16e6fc, 5, RI_E2E3E3B0_ONLINE },
+ { 0x16e768, 17, RI_E2E3E3B0_ONLINE },
+ { 0x16e7ac, 12, RI_E3B0_ONLINE },
+ { 0x170000, 24, RI_ALL_ONLINE },
+ { 0x170060, 4, RI_E1E1H_ONLINE },
+ { 0x170070, 65, RI_ALL_ONLINE },
+ { 0x170194, 11, RI_E2E3E3B0_ONLINE },
+ { 0x1701c4, 1, RI_E2E3E3B0_ONLINE },
+ { 0x1701cc, 7, RI_E2E3E3B0_ONLINE },
+ { 0x1701e8, 1, RI_E3E3B0_ONLINE },
+ { 0x1701ec, 1, RI_E2E3E3B0_ONLINE },
+ { 0x1701f4, 1, RI_E2E3E3B0_ONLINE },
+ { 0x170200, 4, RI_ALL_ONLINE },
+ { 0x170214, 1, RI_ALL_ONLINE },
+ { 0x170218, 77, RI_E2E3E3B0_ONLINE },
+ { 0x170400, 64, RI_E2E3E3B0_ONLINE },
+ { 0x178000, 1, RI_ALL_ONLINE },
+ { 0x180000, 61, RI_ALL_ONLINE },
+ { 0x18013c, 2, RI_E1HE2E3E3B0_ONLINE },
+ { 0x180200, 58, RI_ALL_ONLINE },
+ { 0x180340, 4, RI_ALL_ONLINE },
+ { 0x180380, 1, RI_E2E3E3B0_ONLINE },
+ { 0x180388, 1, RI_E2E3E3B0_ONLINE },
+ { 0x180390, 1, RI_E2E3E3B0_ONLINE },
+ { 0x180398, 1, RI_E2E3E3B0_ONLINE },
+ { 0x1803a0, 5, RI_E2E3E3B0_ONLINE },
+ { 0x1803b4, 2, RI_E3E3B0_ONLINE },
+ { 0x180400, 1, RI_ALL_ONLINE },
+ { 0x180404, 255, RI_E1E1H_OFFLINE },
+ { 0x181000, 4, RI_ALL_ONLINE },
+ { 0x181010, 1020, RI_ALL_OFFLINE },
+ { 0x182000, 4, RI_E3E3B0_ONLINE },
+ { 0x1a0000, 1, RI_ALL_ONLINE },
+ { 0x1a0004, 5631, RI_ALL_OFFLINE },
+ { 0x1a5800, 2560, RI_E1HE2E3E3B0_OFFLINE },
+ { 0x1a8000, 1, RI_ALL_ONLINE },
+ { 0x1a8004, 8191, RI_E1HE2E3E3B0_OFFLINE },
+ { 0x1b0000, 1, RI_ALL_ONLINE },
+ { 0x1b0004, 15, RI_E1H_OFFLINE },
+ { 0x1b0040, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b0044, 239, RI_E1H_OFFLINE },
+ { 0x1b0400, 1, RI_ALL_ONLINE },
+ { 0x1b0404, 255, RI_E1H_OFFLINE },
+ { 0x1b0800, 1, RI_ALL_ONLINE },
+ { 0x1b0840, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b0c00, 1, RI_ALL_ONLINE },
+ { 0x1b1000, 1, RI_ALL_ONLINE },
+ { 0x1b1040, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b1400, 1, RI_ALL_ONLINE },
+ { 0x1b1440, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b1480, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b14c0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b1800, 128, RI_ALL_OFFLINE },
+ { 0x1b1c00, 128, RI_ALL_OFFLINE },
+ { 0x1b2000, 1, RI_ALL_ONLINE },
+ { 0x1b2400, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b2404, 5631, RI_E2E3E3B0_OFFLINE },
+ { 0x1b8000, 1, RI_ALL_ONLINE },
+ { 0x1b8040, 1, RI_ALL_ONLINE },
+ { 0x1b8080, 1, RI_ALL_ONLINE },
+ { 0x1b80c0, 1, RI_ALL_ONLINE },
+ { 0x1b8100, 1, RI_ALL_ONLINE },
+ { 0x1b8140, 1, RI_ALL_ONLINE },
+ { 0x1b8180, 1, RI_ALL_ONLINE },
+ { 0x1b81c0, 1, RI_ALL_ONLINE },
+ { 0x1b8200, 1, RI_ALL_ONLINE },
+ { 0x1b8240, 1, RI_ALL_ONLINE },
+ { 0x1b8280, 1, RI_ALL_ONLINE },
+ { 0x1b82c0, 1, RI_ALL_ONLINE },
+ { 0x1b8300, 1, RI_ALL_ONLINE },
+ { 0x1b8340, 1, RI_ALL_ONLINE },
+ { 0x1b8380, 1, RI_ALL_ONLINE },
+ { 0x1b83c0, 1, RI_ALL_ONLINE },
+ { 0x1b8400, 1, RI_ALL_ONLINE },
+ { 0x1b8440, 1, RI_ALL_ONLINE },
+ { 0x1b8480, 1, RI_ALL_ONLINE },
+ { 0x1b84c0, 1, RI_ALL_ONLINE },
+ { 0x1b8500, 1, RI_ALL_ONLINE },
+ { 0x1b8540, 1, RI_ALL_ONLINE },
+ { 0x1b8580, 1, RI_ALL_ONLINE },
+ { 0x1b85c0, 19, RI_E2E3E3B0_ONLINE },
+ { 0x1b8800, 1, RI_ALL_ONLINE },
+ { 0x1b8840, 1, RI_ALL_ONLINE },
+ { 0x1b8880, 1, RI_ALL_ONLINE },
+ { 0x1b88c0, 1, RI_ALL_ONLINE },
+ { 0x1b8900, 1, RI_ALL_ONLINE },
+ { 0x1b8940, 1, RI_ALL_ONLINE },
+ { 0x1b8980, 1, RI_ALL_ONLINE },
+ { 0x1b89c0, 1, RI_ALL_ONLINE },
+ { 0x1b8a00, 1, RI_ALL_ONLINE },
+ { 0x1b8a40, 1, RI_ALL_ONLINE },
+ { 0x1b8a80, 1, RI_ALL_ONLINE },
+ { 0x1b8ac0, 1, RI_ALL_ONLINE },
+ { 0x1b8b00, 1, RI_ALL_ONLINE },
+ { 0x1b8b40, 1, RI_ALL_ONLINE },
+ { 0x1b8b80, 1, RI_ALL_ONLINE },
+ { 0x1b8bc0, 1, RI_ALL_ONLINE },
+ { 0x1b8c00, 1, RI_ALL_ONLINE },
+ { 0x1b8c40, 1, RI_ALL_ONLINE },
+ { 0x1b8c80, 1, RI_ALL_ONLINE },
+ { 0x1b8cc0, 1, RI_ALL_ONLINE },
+ { 0x1b8cc4, 1, RI_E2E3E3B0_ONLINE },
+ { 0x1b8d00, 1, RI_ALL_ONLINE },
+ { 0x1b8d40, 1, RI_ALL_ONLINE },
+ { 0x1b8d80, 1, RI_ALL_ONLINE },
+ { 0x1b8dc0, 1, RI_ALL_ONLINE },
+ { 0x1b8e00, 1, RI_ALL_ONLINE },
+ { 0x1b8e40, 1, RI_ALL_ONLINE },
+ { 0x1b8e80, 1, RI_ALL_ONLINE },
+ { 0x1b8e84, 1, RI_E2E3E3B0_ONLINE },
+ { 0x1b8ec0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b8f00, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b8f40, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b8f80, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b8fc0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b8fc4, 2, RI_E2E3E3B0_ONLINE },
+ { 0x1b8fd0, 6, RI_E2E3E3B0_ONLINE },
+ { 0x1b8fe8, 2, RI_E3E3B0_ONLINE },
+ { 0x1b9000, 1, RI_E2E3E3B0_ONLINE },
+ { 0x1b9040, 3, RI_E2E3E3B0_ONLINE },
+ { 0x1b905c, 1, RI_E3E3B0_ONLINE },
+ { 0x1b9064, 1, RI_E3B0_ONLINE },
+ { 0x1b9080, 10, RI_E3B0_ONLINE },
+ { 0x1b9400, 14, RI_E2E3E3B0_ONLINE },
+ { 0x1b943c, 19, RI_E2E3E3B0_ONLINE },
+ { 0x1b9490, 10, RI_E2E3E3B0_ONLINE },
+ { 0x1c0000, 2, RI_ALL_ONLINE },
+ { 0x200000, 65, RI_ALL_ONLINE },
+ { 0x20014c, 2, RI_E1HE2E3E3B0_ONLINE },
+ { 0x200200, 58, RI_ALL_ONLINE },
+ { 0x200340, 4, RI_ALL_ONLINE },
+ { 0x200380, 1, RI_E2E3E3B0_ONLINE },
+ { 0x200388, 1, RI_E2E3E3B0_ONLINE },
+ { 0x200390, 1, RI_E2E3E3B0_ONLINE },
+ { 0x200398, 1, RI_E2E3E3B0_ONLINE },
+ { 0x2003a0, 1, RI_E2E3E3B0_ONLINE },
+ { 0x2003a8, 2, RI_E2E3E3B0_ONLINE },
+ { 0x200400, 1, RI_ALL_ONLINE },
+ { 0x200404, 255, RI_E1E1H_OFFLINE },
+ { 0x202000, 4, RI_ALL_ONLINE },
+ { 0x202010, 2044, RI_ALL_OFFLINE },
+ { 0x204000, 4, RI_E3E3B0_ONLINE },
+ { 0x220000, 1, RI_ALL_ONLINE },
+ { 0x220004, 5631, RI_ALL_OFFLINE },
+ { 0x225800, 2560, RI_E1HE2E3E3B0_OFFLINE },
+ { 0x228000, 1, RI_ALL_ONLINE },
+ { 0x228004, 8191, RI_E1HE2E3E3B0_OFFLINE },
+ { 0x230000, 1, RI_ALL_ONLINE },
+ { 0x230004, 15, RI_E1H_OFFLINE },
+ { 0x230040, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x230044, 239, RI_E1H_OFFLINE },
+ { 0x230400, 1, RI_ALL_ONLINE },
+ { 0x230404, 255, RI_E1H_OFFLINE },
+ { 0x230800, 1, RI_ALL_ONLINE },
+ { 0x230840, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x230c00, 1, RI_ALL_ONLINE },
+ { 0x231000, 1, RI_ALL_ONLINE },
+ { 0x231040, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x231400, 1, RI_ALL_ONLINE },
+ { 0x231440, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x231480, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2314c0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x231800, 128, RI_ALL_OFFLINE },
+ { 0x231c00, 128, RI_ALL_OFFLINE },
+ { 0x232000, 1, RI_ALL_ONLINE },
+ { 0x232400, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x232404, 5631, RI_E2E3E3B0_OFFLINE },
+ { 0x238000, 1, RI_ALL_ONLINE },
+ { 0x238040, 1, RI_ALL_ONLINE },
+ { 0x238080, 1, RI_ALL_ONLINE },
+ { 0x2380c0, 1, RI_ALL_ONLINE },
+ { 0x238100, 1, RI_ALL_ONLINE },
+ { 0x238140, 1, RI_ALL_ONLINE },
+ { 0x238180, 1, RI_ALL_ONLINE },
+ { 0x2381c0, 1, RI_ALL_ONLINE },
+ { 0x238200, 1, RI_ALL_ONLINE },
+ { 0x238240, 1, RI_ALL_ONLINE },
+ { 0x238280, 1, RI_ALL_ONLINE },
+ { 0x2382c0, 1, RI_ALL_ONLINE },
+ { 0x238300, 1, RI_ALL_ONLINE },
+ { 0x238340, 1, RI_ALL_ONLINE },
+ { 0x238380, 1, RI_ALL_ONLINE },
+ { 0x2383c0, 1, RI_ALL_ONLINE },
+ { 0x238400, 1, RI_ALL_ONLINE },
+ { 0x238440, 1, RI_ALL_ONLINE },
+ { 0x238480, 1, RI_ALL_ONLINE },
+ { 0x2384c0, 1, RI_ALL_ONLINE },
+ { 0x238500, 1, RI_ALL_ONLINE },
+ { 0x238540, 1, RI_ALL_ONLINE },
+ { 0x238580, 1, RI_ALL_ONLINE },
+ { 0x2385c0, 19, RI_E2E3E3B0_ONLINE },
+ { 0x238800, 1, RI_ALL_ONLINE },
+ { 0x238840, 1, RI_ALL_ONLINE },
+ { 0x238880, 1, RI_ALL_ONLINE },
+ { 0x2388c0, 1, RI_ALL_ONLINE },
+ { 0x238900, 1, RI_ALL_ONLINE },
+ { 0x238940, 1, RI_ALL_ONLINE },
+ { 0x238980, 1, RI_ALL_ONLINE },
+ { 0x2389c0, 1, RI_ALL_ONLINE },
+ { 0x238a00, 1, RI_ALL_ONLINE },
+ { 0x238a40, 1, RI_ALL_ONLINE },
+ { 0x238a80, 1, RI_ALL_ONLINE },
+ { 0x238ac0, 1, RI_ALL_ONLINE },
+ { 0x238b00, 1, RI_ALL_ONLINE },
+ { 0x238b40, 1, RI_ALL_ONLINE },
+ { 0x238b80, 1, RI_ALL_ONLINE },
+ { 0x238bc0, 1, RI_ALL_ONLINE },
+ { 0x238c00, 1, RI_ALL_ONLINE },
+ { 0x238c40, 1, RI_ALL_ONLINE },
+ { 0x238c80, 1, RI_ALL_ONLINE },
+ { 0x238cc0, 1, RI_ALL_ONLINE },
+ { 0x238cc4, 1, RI_E2E3E3B0_ONLINE },
+ { 0x238d00, 1, RI_ALL_ONLINE },
+ { 0x238d40, 1, RI_ALL_ONLINE },
+ { 0x238d80, 1, RI_ALL_ONLINE },
+ { 0x238dc0, 1, RI_ALL_ONLINE },
+ { 0x238e00, 1, RI_ALL_ONLINE },
+ { 0x238e40, 1, RI_ALL_ONLINE },
+ { 0x238e80, 1, RI_ALL_ONLINE },
+ { 0x238e84, 1, RI_E2E3E3B0_ONLINE },
+ { 0x238ec0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x238f00, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x238f40, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x238f80, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x238fc0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x238fc4, 2, RI_E2E3E3B0_ONLINE },
+ { 0x238fd0, 6, RI_E2E3E3B0_ONLINE },
+ { 0x238fe8, 2, RI_E3E3B0_ONLINE },
+ { 0x239000, 1, RI_E2E3E3B0_ONLINE },
+ { 0x239040, 3, RI_E2E3E3B0_ONLINE },
+ { 0x23905c, 1, RI_E3E3B0_ONLINE },
+ { 0x239064, 1, RI_E3B0_ONLINE },
+ { 0x239080, 10, RI_E3B0_ONLINE },
+ { 0x240000, 2, RI_ALL_ONLINE },
+ { 0x280000, 65, RI_ALL_ONLINE },
+ { 0x28014c, 2, RI_E1HE2E3E3B0_ONLINE },
+ { 0x280200, 58, RI_ALL_ONLINE },
+ { 0x280340, 4, RI_ALL_ONLINE },
+ { 0x280380, 1, RI_E2E3E3B0_ONLINE },
+ { 0x280388, 1, RI_E2E3E3B0_ONLINE },
+ { 0x280390, 1, RI_E2E3E3B0_ONLINE },
+ { 0x280398, 1, RI_E2E3E3B0_ONLINE },
+ { 0x2803a0, 1, RI_E2E3E3B0_ONLINE },
+ { 0x2803a8, 2, RI_E2E3E3B0_ONLINE },
+ { 0x280400, 1, RI_ALL_ONLINE },
+ { 0x280404, 255, RI_E1E1H_OFFLINE },
+ { 0x282000, 4, RI_ALL_ONLINE },
+ { 0x282010, 2044, RI_ALL_OFFLINE },
+ { 0x284000, 4, RI_E3E3B0_ONLINE },
+ { 0x2a0000, 1, RI_ALL_ONLINE },
+ { 0x2a0004, 5631, RI_ALL_OFFLINE },
+ { 0x2a5800, 2560, RI_E1HE2E3E3B0_OFFLINE },
+ { 0x2a8000, 1, RI_ALL_ONLINE },
+ { 0x2a8004, 8191, RI_E1HE2E3E3B0_OFFLINE },
+ { 0x2b0000, 1, RI_ALL_ONLINE },
+ { 0x2b0004, 15, RI_E1H_OFFLINE },
+ { 0x2b0040, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b0044, 239, RI_E1H_OFFLINE },
+ { 0x2b0400, 1, RI_ALL_ONLINE },
+ { 0x2b0404, 255, RI_E1H_OFFLINE },
+ { 0x2b0800, 1, RI_ALL_ONLINE },
+ { 0x2b0840, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b0c00, 1, RI_ALL_ONLINE },
+ { 0x2b1000, 1, RI_ALL_ONLINE },
+ { 0x2b1040, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b1400, 1, RI_ALL_ONLINE },
+ { 0x2b1440, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b1480, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b14c0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b1800, 128, RI_ALL_OFFLINE },
+ { 0x2b1c00, 128, RI_ALL_OFFLINE },
+ { 0x2b2000, 1, RI_ALL_ONLINE },
+ { 0x2b2400, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b2404, 5631, RI_E2E3E3B0_OFFLINE },
+ { 0x2b8000, 1, RI_ALL_ONLINE },
+ { 0x2b8040, 1, RI_ALL_ONLINE },
+ { 0x2b8080, 1, RI_ALL_ONLINE },
+ { 0x2b80c0, 1, RI_ALL_ONLINE },
+ { 0x2b8100, 1, RI_ALL_ONLINE },
+ { 0x2b8140, 1, RI_ALL_ONLINE },
+ { 0x2b8180, 1, RI_ALL_ONLINE },
+ { 0x2b81c0, 1, RI_ALL_ONLINE },
+ { 0x2b8200, 1, RI_ALL_ONLINE },
+ { 0x2b8240, 1, RI_ALL_ONLINE },
+ { 0x2b8280, 1, RI_ALL_ONLINE },
+ { 0x2b82c0, 1, RI_ALL_ONLINE },
+ { 0x2b8300, 1, RI_ALL_ONLINE },
+ { 0x2b8340, 1, RI_ALL_ONLINE },
+ { 0x2b8380, 1, RI_ALL_ONLINE },
+ { 0x2b83c0, 1, RI_ALL_ONLINE },
+ { 0x2b8400, 1, RI_ALL_ONLINE },
+ { 0x2b8440, 1, RI_ALL_ONLINE },
+ { 0x2b8480, 1, RI_ALL_ONLINE },
+ { 0x2b84c0, 1, RI_ALL_ONLINE },
+ { 0x2b8500, 1, RI_ALL_ONLINE },
+ { 0x2b8540, 1, RI_ALL_ONLINE },
+ { 0x2b8580, 1, RI_ALL_ONLINE },
+ { 0x2b85c0, 19, RI_E2E3E3B0_ONLINE },
+ { 0x2b8800, 1, RI_ALL_ONLINE },
+ { 0x2b8840, 1, RI_ALL_ONLINE },
+ { 0x2b8880, 1, RI_ALL_ONLINE },
+ { 0x2b88c0, 1, RI_ALL_ONLINE },
+ { 0x2b8900, 1, RI_ALL_ONLINE },
+ { 0x2b8940, 1, RI_ALL_ONLINE },
+ { 0x2b8980, 1, RI_ALL_ONLINE },
+ { 0x2b89c0, 1, RI_ALL_ONLINE },
+ { 0x2b8a00, 1, RI_ALL_ONLINE },
+ { 0x2b8a40, 1, RI_ALL_ONLINE },
+ { 0x2b8a80, 1, RI_ALL_ONLINE },
+ { 0x2b8ac0, 1, RI_ALL_ONLINE },
+ { 0x2b8b00, 1, RI_ALL_ONLINE },
+ { 0x2b8b40, 1, RI_ALL_ONLINE },
+ { 0x2b8b80, 1, RI_ALL_ONLINE },
+ { 0x2b8bc0, 1, RI_ALL_ONLINE },
+ { 0x2b8c00, 1, RI_ALL_ONLINE },
+ { 0x2b8c40, 1, RI_ALL_ONLINE },
+ { 0x2b8c80, 1, RI_ALL_ONLINE },
+ { 0x2b8cc0, 1, RI_ALL_ONLINE },
+ { 0x2b8cc4, 1, RI_E2E3E3B0_ONLINE },
+ { 0x2b8d00, 1, RI_ALL_ONLINE },
+ { 0x2b8d40, 1, RI_ALL_ONLINE },
+ { 0x2b8d80, 1, RI_ALL_ONLINE },
+ { 0x2b8dc0, 1, RI_ALL_ONLINE },
+ { 0x2b8e00, 1, RI_ALL_ONLINE },
+ { 0x2b8e40, 1, RI_ALL_ONLINE },
+ { 0x2b8e80, 1, RI_ALL_ONLINE },
+ { 0x2b8e84, 1, RI_E2E3E3B0_ONLINE },
+ { 0x2b8ec0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b8f00, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b8f40, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b8f80, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b8fc0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b8fc4, 2, RI_E2E3E3B0_ONLINE },
+ { 0x2b8fd0, 6, RI_E2E3E3B0_ONLINE },
+ { 0x2b8fe8, 2, RI_E3E3B0_ONLINE },
+ { 0x2b9000, 1, RI_E2E3E3B0_ONLINE },
+ { 0x2b9040, 3, RI_E2E3E3B0_ONLINE },
+ { 0x2b905c, 1, RI_E3E3B0_ONLINE },
+ { 0x2b9064, 1, RI_E3B0_ONLINE },
+ { 0x2b9080, 10, RI_E3B0_ONLINE },
+ { 0x2b9400, 14, RI_E2E3E3B0_ONLINE },
+ { 0x2b943c, 19, RI_E2E3E3B0_ONLINE },
+ { 0x2b9490, 10, RI_E2E3E3B0_ONLINE },
+ { 0x2c0000, 2, RI_ALL_ONLINE },
+ { 0x300000, 65, RI_ALL_ONLINE },
+ { 0x30014c, 2, RI_E1HE2E3E3B0_ONLINE },
+ { 0x300200, 58, RI_ALL_ONLINE },
+ { 0x300340, 4, RI_ALL_ONLINE },
+ { 0x300380, 1, RI_E2E3E3B0_ONLINE },
+ { 0x300388, 1, RI_E2E3E3B0_ONLINE },
+ { 0x300390, 1, RI_E2E3E3B0_ONLINE },
+ { 0x300398, 1, RI_E2E3E3B0_ONLINE },
+ { 0x3003a0, 1, RI_E2E3E3B0_ONLINE },
+ { 0x3003a8, 2, RI_E2E3E3B0_ONLINE },
+ { 0x300400, 1, RI_ALL_ONLINE },
+ { 0x300404, 255, RI_E1E1H_OFFLINE },
+ { 0x302000, 4, RI_ALL_ONLINE },
+ { 0x302010, 2044, RI_ALL_OFFLINE },
+ { 0x304000, 4, RI_E3E3B0_ONLINE },
+ { 0x320000, 1, RI_ALL_ONLINE },
+ { 0x320004, 5631, RI_ALL_OFFLINE },
+ { 0x325800, 2560, RI_E1HE2E3E3B0_OFFLINE },
+ { 0x328000, 1, RI_ALL_ONLINE },
+ { 0x328004, 8191, RI_E1HE2E3E3B0_OFFLINE },
+ { 0x330000, 1, RI_ALL_ONLINE },
+ { 0x330004, 15, RI_E1H_OFFLINE },
+ { 0x330040, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x330044, 239, RI_E1H_OFFLINE },
+ { 0x330400, 1, RI_ALL_ONLINE },
+ { 0x330404, 255, RI_E1H_OFFLINE },
+ { 0x330800, 1, RI_ALL_ONLINE },
+ { 0x330840, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x330c00, 1, RI_ALL_ONLINE },
+ { 0x331000, 1, RI_ALL_ONLINE },
+ { 0x331040, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x331400, 1, RI_ALL_ONLINE },
+ { 0x331440, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x331480, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x3314c0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x331800, 128, RI_ALL_OFFLINE },
+ { 0x331c00, 128, RI_ALL_OFFLINE },
+ { 0x332000, 1, RI_ALL_ONLINE },
+ { 0x332400, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x332404, 5631, RI_E2E3E3B0_OFFLINE },
+ { 0x338000, 1, RI_ALL_ONLINE },
+ { 0x338040, 1, RI_ALL_ONLINE },
+ { 0x338080, 1, RI_ALL_ONLINE },
+ { 0x3380c0, 1, RI_ALL_ONLINE },
+ { 0x338100, 1, RI_ALL_ONLINE },
+ { 0x338140, 1, RI_ALL_ONLINE },
+ { 0x338180, 1, RI_ALL_ONLINE },
+ { 0x3381c0, 1, RI_ALL_ONLINE },
+ { 0x338200, 1, RI_ALL_ONLINE },
+ { 0x338240, 1, RI_ALL_ONLINE },
+ { 0x338280, 1, RI_ALL_ONLINE },
+ { 0x3382c0, 1, RI_ALL_ONLINE },
+ { 0x338300, 1, RI_ALL_ONLINE },
+ { 0x338340, 1, RI_ALL_ONLINE },
+ { 0x338380, 1, RI_ALL_ONLINE },
+ { 0x3383c0, 1, RI_ALL_ONLINE },
+ { 0x338400, 1, RI_ALL_ONLINE },
+ { 0x338440, 1, RI_ALL_ONLINE },
+ { 0x338480, 1, RI_ALL_ONLINE },
+ { 0x3384c0, 1, RI_ALL_ONLINE },
+ { 0x338500, 1, RI_ALL_ONLINE },
+ { 0x338540, 1, RI_ALL_ONLINE },
+ { 0x338580, 1, RI_ALL_ONLINE },
+ { 0x3385c0, 19, RI_E2E3E3B0_ONLINE },
+ { 0x338800, 1, RI_ALL_ONLINE },
+ { 0x338840, 1, RI_ALL_ONLINE },
+ { 0x338880, 1, RI_ALL_ONLINE },
+ { 0x3388c0, 1, RI_ALL_ONLINE },
+ { 0x338900, 1, RI_ALL_ONLINE },
+ { 0x338940, 1, RI_ALL_ONLINE },
+ { 0x338980, 1, RI_ALL_ONLINE },
+ { 0x3389c0, 1, RI_ALL_ONLINE },
+ { 0x338a00, 1, RI_ALL_ONLINE },
+ { 0x338a40, 1, RI_ALL_ONLINE },
+ { 0x338a80, 1, RI_ALL_ONLINE },
+ { 0x338ac0, 1, RI_ALL_ONLINE },
+ { 0x338b00, 1, RI_ALL_ONLINE },
+ { 0x338b40, 1, RI_ALL_ONLINE },
+ { 0x338b80, 1, RI_ALL_ONLINE },
+ { 0x338bc0, 1, RI_ALL_ONLINE },
+ { 0x338c00, 1, RI_ALL_ONLINE },
+ { 0x338c40, 1, RI_ALL_ONLINE },
+ { 0x338c80, 1, RI_ALL_ONLINE },
+ { 0x338cc0, 1, RI_ALL_ONLINE },
+ { 0x338cc4, 1, RI_E2E3E3B0_ONLINE },
+ { 0x338d00, 1, RI_ALL_ONLINE },
+ { 0x338d40, 1, RI_ALL_ONLINE },
+ { 0x338d80, 1, RI_ALL_ONLINE },
+ { 0x338dc0, 1, RI_ALL_ONLINE },
+ { 0x338e00, 1, RI_ALL_ONLINE },
+ { 0x338e40, 1, RI_ALL_ONLINE },
+ { 0x338e80, 1, RI_ALL_ONLINE },
+ { 0x338e84, 1, RI_E2E3E3B0_ONLINE },
+ { 0x338ec0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x338f00, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x338f40, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x338f80, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x338fc0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x338fc4, 2, RI_E2E3E3B0_ONLINE },
+ { 0x338fd0, 6, RI_E2E3E3B0_ONLINE },
+ { 0x338fe8, 2, RI_E3E3B0_ONLINE },
+ { 0x339000, 1, RI_E2E3E3B0_ONLINE },
+ { 0x339040, 3, RI_E2E3E3B0_ONLINE },
+ { 0x33905c, 1, RI_E3E3B0_ONLINE },
+ { 0x339064, 1, RI_E3B0_ONLINE },
+ { 0x339080, 10, RI_E3B0_ONLINE },
+ { 0x340000, 2, RI_ALL_ONLINE },
+};
+#define REGS_COUNT ARRAY_SIZE(reg_addrs)
+
+static const struct dump_sign dump_sign_all = { 0x4e23fde1, 0x70017, 0x3a };
+
+static const u32 page_vals_e2[] = { 0, 128 };
+#define PAGE_MODE_VALUES_E2 ARRAY_SIZE(page_vals_e2)
+
+static const u32 page_write_regs_e2[] = { 328476 };
+#define PAGE_WRITE_REGS_E2 ARRAY_SIZE(page_write_regs_e2)
+
+static const struct reg_addr page_read_regs_e2[] = {
+ { 0x58000, 4608, RI_E2_ONLINE } };
+#define PAGE_READ_REGS_E2 ARRAY_SIZE(page_read_regs_e2)
+
+static const u32 page_vals_e3[] = { 0, 128 };
+#define PAGE_MODE_VALUES_E3 ARRAY_SIZE(page_vals_e3)
+
+static const u32 page_write_regs_e3[] = { 328476 };
+#define PAGE_WRITE_REGS_E3 ARRAY_SIZE(page_write_regs_e3)
+
+static const struct reg_addr page_read_regs_e3[] = {
+ { 0x58000, 4608, RI_E3E3B0_ONLINE } };
+#define PAGE_READ_REGS_E3 ARRAY_SIZE(page_read_regs_e3)
+
+#endif /* BNX2X_DUMP_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
new file mode 100644
index 000000000000..a49f8cfa2dc6
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -0,0 +1,2392 @@
+/* bnx2x_ethtool.c: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/crc32.h>
+
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_dump.h"
+#include "bnx2x_init.h"
+#include "bnx2x_sp.h"
+
+/* Note: in the format strings below %s is replaced by the queue-name which is
+ * either its index or 'fcoe' for the fcoe queue. Make sure the format string
+ * length does not exceed ETH_GSTRING_LEN - MAX_QUEUE_NAME_LEN + 2
+ */
+#define MAX_QUEUE_NAME_LEN 4
+static const struct {
+ long offset;
+ int size;
+ char string[ETH_GSTRING_LEN];
+} bnx2x_q_stats_arr[] = {
+/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
+ { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
+ 8, "[%s]: rx_ucast_packets" },
+ { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
+ 8, "[%s]: rx_mcast_packets" },
+ { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
+ 8, "[%s]: rx_bcast_packets" },
+ { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" },
+ { Q_STATS_OFFSET32(rx_err_discard_pkt),
+ 4, "[%s]: rx_phy_ip_err_discards"},
+ { Q_STATS_OFFSET32(rx_skb_alloc_failed),
+ 4, "[%s]: rx_skb_alloc_discard" },
+ { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
+
+ { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
+/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+ 8, "[%s]: tx_ucast_packets" },
+ { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+ 8, "[%s]: tx_mcast_packets" },
+ { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+ 8, "[%s]: tx_bcast_packets" },
+ { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
+ 8, "[%s]: tpa_aggregations" },
+ { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
+ 8, "[%s]: tpa_aggregated_frames"},
+ { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"}
+};
+
+#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
+
+static const struct {
+ long offset;
+ int size;
+ u32 flags;
+#define STATS_FLAGS_PORT 1
+#define STATS_FLAGS_FUNC 2
+#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
+ char string[ETH_GSTRING_LEN];
+} bnx2x_stats_arr[] = {
+/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
+ 8, STATS_FLAGS_BOTH, "rx_bytes" },
+ { STATS_OFFSET32(error_bytes_received_hi),
+ 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
+ { STATS_OFFSET32(total_unicast_packets_received_hi),
+ 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
+ { STATS_OFFSET32(total_multicast_packets_received_hi),
+ 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
+ { STATS_OFFSET32(total_broadcast_packets_received_hi),
+ 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
+ { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
+ 8, STATS_FLAGS_PORT, "rx_crc_errors" },
+ { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
+ 8, STATS_FLAGS_PORT, "rx_align_errors" },
+ { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
+ 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
+ { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
+ 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
+/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
+ 8, STATS_FLAGS_PORT, "rx_fragments" },
+ { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
+ 8, STATS_FLAGS_PORT, "rx_jabbers" },
+ { STATS_OFFSET32(no_buff_discard_hi),
+ 8, STATS_FLAGS_BOTH, "rx_discards" },
+ { STATS_OFFSET32(mac_filter_discard),
+ 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
+ { STATS_OFFSET32(mf_tag_discard),
+ 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
+ { STATS_OFFSET32(brb_drop_hi),
+ 8, STATS_FLAGS_PORT, "rx_brb_discard" },
+ { STATS_OFFSET32(brb_truncate_hi),
+ 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
+ { STATS_OFFSET32(pause_frames_received_hi),
+ 8, STATS_FLAGS_PORT, "rx_pause_frames" },
+ { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
+ 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
+ { STATS_OFFSET32(nig_timer_max),
+ 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
+/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
+ 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
+ { STATS_OFFSET32(rx_skb_alloc_failed),
+ 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
+ { STATS_OFFSET32(hw_csum_err),
+ 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
+
+ { STATS_OFFSET32(total_bytes_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_bytes" },
+ { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
+ 8, STATS_FLAGS_PORT, "tx_error_bytes" },
+ { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
+ { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
+ { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
+ { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
+ 8, STATS_FLAGS_PORT, "tx_mac_errors" },
+ { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
+ 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
+/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
+ 8, STATS_FLAGS_PORT, "tx_single_collisions" },
+ { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
+ 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
+ { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
+ 8, STATS_FLAGS_PORT, "tx_deferred" },
+ { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
+ 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
+ { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
+ 8, STATS_FLAGS_PORT, "tx_late_collisions" },
+ { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
+ 8, STATS_FLAGS_PORT, "tx_total_collisions" },
+ { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
+ { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
+ { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
+ { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
+/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
+ { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
+ { STATS_OFFSET32(etherstatspktsover1522octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
+ { STATS_OFFSET32(pause_frames_sent_hi),
+ 8, STATS_FLAGS_PORT, "tx_pause_frames" },
+ { STATS_OFFSET32(total_tpa_aggregations_hi),
+ 8, STATS_FLAGS_FUNC, "tpa_aggregations" },
+ { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
+ 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
+ { STATS_OFFSET32(total_tpa_bytes_hi),
+ 8, STATS_FLAGS_FUNC, "tpa_bytes"}
+};
+
+#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
+static int bnx2x_get_port_type(struct bnx2x *bp)
+{
+ int port_type;
+ u32 phy_idx = bnx2x_get_cur_phy_idx(bp);
+ switch (bp->link_params.phy[phy_idx].media_type) {
+ case ETH_PHY_SFP_FIBER:
+ case ETH_PHY_XFP_FIBER:
+ case ETH_PHY_KR:
+ case ETH_PHY_CX4:
+ port_type = PORT_FIBRE;
+ break;
+ case ETH_PHY_DA_TWINAX:
+ port_type = PORT_DA;
+ break;
+ case ETH_PHY_BASE_T:
+ port_type = PORT_TP;
+ break;
+ case ETH_PHY_NOT_PRESENT:
+ port_type = PORT_NONE;
+ break;
+ case ETH_PHY_UNSPECIFIED:
+ default:
+ port_type = PORT_OTHER;
+ break;
+ }
+ return port_type;
+}
+
+static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+
+ /* Dual Media boards present all available port types */
+ cmd->supported = bp->port.supported[cfg_idx] |
+ (bp->port.supported[cfg_idx ^ 1] &
+ (SUPPORTED_TP | SUPPORTED_FIBRE));
+ cmd->advertising = bp->port.advertising[cfg_idx];
+
+ if ((bp->state == BNX2X_STATE_OPEN) &&
+ !(bp->flags & MF_FUNC_DIS) &&
+ (bp->link_vars.link_up)) {
+ ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
+ cmd->duplex = bp->link_vars.duplex;
+ } else {
+ ethtool_cmd_speed_set(
+ cmd, bp->link_params.req_line_speed[cfg_idx]);
+ cmd->duplex = bp->link_params.req_duplex[cfg_idx];
+ }
+
+ if (IS_MF(bp))
+ ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
+
+ cmd->port = bnx2x_get_port_type(bp);
+
+ cmd->phy_address = bp->mdio.prtad;
+ cmd->transceiver = XCVR_INTERNAL;
+
+ if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG)
+ cmd->autoneg = AUTONEG_ENABLE;
+ else
+ cmd->autoneg = AUTONEG_DISABLE;
+
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+
+ DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
+ " supported 0x%x advertising 0x%x speed %u\n"
+ " duplex %d port %d phy_address %d transceiver %d\n"
+ " autoneg %d maxtxpkt %d maxrxpkt %d\n",
+ cmd->cmd, cmd->supported, cmd->advertising,
+ ethtool_cmd_speed(cmd),
+ cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
+ cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+
+ return 0;
+}
+
+static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
+ u32 speed;
+
+ if (IS_MF_SD(bp))
+ return 0;
+
+ DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
+ " supported 0x%x advertising 0x%x speed %u\n"
+ " duplex %d port %d phy_address %d transceiver %d\n"
+ " autoneg %d maxtxpkt %d maxrxpkt %d\n",
+ cmd->cmd, cmd->supported, cmd->advertising,
+ ethtool_cmd_speed(cmd),
+ cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
+ cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+
+ speed = ethtool_cmd_speed(cmd);
+
+ if (IS_MF_SI(bp)) {
+ u32 part;
+ u32 line_speed = bp->link_vars.line_speed;
+
+ /* use 10G if no link detected */
+ if (!line_speed)
+ line_speed = 10000;
+
+ if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) {
+ BNX2X_DEV_INFO("To set speed BC %X or higher "
+ "is required, please upgrade BC\n",
+ REQ_BC_VER_4_SET_MF_BW);
+ return -EINVAL;
+ }
+
+ part = (speed * 100) / line_speed;
+
+ if (line_speed < speed || !part) {
+ BNX2X_DEV_INFO("Speed setting should be in a range "
+ "from 1%% to 100%% "
+ "of actual line speed\n");
+ return -EINVAL;
+ }
+
+ if (bp->state != BNX2X_STATE_OPEN)
+ /* store value for following "load" */
+ bp->pending_max = part;
+ else
+ bnx2x_update_max_mf_config(bp, part);
+
+ return 0;
+ }
+
+ cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ old_multi_phy_config = bp->link_params.multi_phy_config;
+ switch (cmd->port) {
+ case PORT_TP:
+ if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
+ break; /* no port change */
+
+ if (!(bp->port.supported[0] & SUPPORTED_TP ||
+ bp->port.supported[1] & SUPPORTED_TP)) {
+ DP(NETIF_MSG_LINK, "Unsupported port type\n");
+ return -EINVAL;
+ }
+ bp->link_params.multi_phy_config &=
+ ~PORT_HW_CFG_PHY_SELECTION_MASK;
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ else
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ break;
+ case PORT_FIBRE:
+ if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
+ break; /* no port change */
+
+ if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
+ bp->port.supported[1] & SUPPORTED_FIBRE)) {
+ DP(NETIF_MSG_LINK, "Unsupported port type\n");
+ return -EINVAL;
+ }
+ bp->link_params.multi_phy_config &=
+ ~PORT_HW_CFG_PHY_SELECTION_MASK;
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ else
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Unsupported port type\n");
+ return -EINVAL;
+ }
+ /* Save new config in case command complete successuly */
+ new_multi_phy_config = bp->link_params.multi_phy_config;
+ /* Get the new cfg_idx */
+ cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ /* Restore old config in case command failed */
+ bp->link_params.multi_phy_config = old_multi_phy_config;
+ DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx);
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
+ DP(NETIF_MSG_LINK, "Autoneg not supported\n");
+ return -EINVAL;
+ }
+
+ /* advertise the requested speed and duplex if supported */
+ if (cmd->advertising & ~(bp->port.supported[cfg_idx])) {
+ DP(NETIF_MSG_LINK, "Advertisement parameters "
+ "are not supported\n");
+ return -EINVAL;
+ }
+
+ bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
+ bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
+ bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
+ cmd->advertising);
+ if (cmd->advertising) {
+
+ bp->link_params.speed_cap_mask[cfg_idx] = 0;
+ if (cmd->advertising & ADVERTISED_10baseT_Half) {
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
+ }
+ if (cmd->advertising & ADVERTISED_10baseT_Full)
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
+
+ if (cmd->advertising & ADVERTISED_100baseT_Full)
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
+
+ if (cmd->advertising & ADVERTISED_100baseT_Half) {
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
+ }
+ if (cmd->advertising & ADVERTISED_1000baseT_Half) {
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
+ }
+ if (cmd->advertising & (ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseKX_Full))
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
+
+ if (cmd->advertising & (ADVERTISED_10000baseT_Full |
+ ADVERTISED_10000baseKX4_Full |
+ ADVERTISED_10000baseKR_Full))
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
+ }
+ } else { /* forced speed */
+ /* advertise the requested speed and duplex if supported */
+ switch (speed) {
+ case SPEED_10:
+ if (cmd->duplex == DUPLEX_FULL) {
+ if (!(bp->port.supported[cfg_idx] &
+ SUPPORTED_10baseT_Full)) {
+ DP(NETIF_MSG_LINK,
+ "10M full not supported\n");
+ return -EINVAL;
+ }
+
+ advertising = (ADVERTISED_10baseT_Full |
+ ADVERTISED_TP);
+ } else {
+ if (!(bp->port.supported[cfg_idx] &
+ SUPPORTED_10baseT_Half)) {
+ DP(NETIF_MSG_LINK,
+ "10M half not supported\n");
+ return -EINVAL;
+ }
+
+ advertising = (ADVERTISED_10baseT_Half |
+ ADVERTISED_TP);
+ }
+ break;
+
+ case SPEED_100:
+ if (cmd->duplex == DUPLEX_FULL) {
+ if (!(bp->port.supported[cfg_idx] &
+ SUPPORTED_100baseT_Full)) {
+ DP(NETIF_MSG_LINK,
+ "100M full not supported\n");
+ return -EINVAL;
+ }
+
+ advertising = (ADVERTISED_100baseT_Full |
+ ADVERTISED_TP);
+ } else {
+ if (!(bp->port.supported[cfg_idx] &
+ SUPPORTED_100baseT_Half)) {
+ DP(NETIF_MSG_LINK,
+ "100M half not supported\n");
+ return -EINVAL;
+ }
+
+ advertising = (ADVERTISED_100baseT_Half |
+ ADVERTISED_TP);
+ }
+ break;
+
+ case SPEED_1000:
+ if (cmd->duplex != DUPLEX_FULL) {
+ DP(NETIF_MSG_LINK, "1G half not supported\n");
+ return -EINVAL;
+ }
+
+ if (!(bp->port.supported[cfg_idx] &
+ SUPPORTED_1000baseT_Full)) {
+ DP(NETIF_MSG_LINK, "1G full not supported\n");
+ return -EINVAL;
+ }
+
+ advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_TP);
+ break;
+
+ case SPEED_2500:
+ if (cmd->duplex != DUPLEX_FULL) {
+ DP(NETIF_MSG_LINK,
+ "2.5G half not supported\n");
+ return -EINVAL;
+ }
+
+ if (!(bp->port.supported[cfg_idx]
+ & SUPPORTED_2500baseX_Full)) {
+ DP(NETIF_MSG_LINK,
+ "2.5G full not supported\n");
+ return -EINVAL;
+ }
+
+ advertising = (ADVERTISED_2500baseX_Full |
+ ADVERTISED_TP);
+ break;
+
+ case SPEED_10000:
+ if (cmd->duplex != DUPLEX_FULL) {
+ DP(NETIF_MSG_LINK, "10G half not supported\n");
+ return -EINVAL;
+ }
+
+ if (!(bp->port.supported[cfg_idx]
+ & SUPPORTED_10000baseT_Full)) {
+ DP(NETIF_MSG_LINK, "10G full not supported\n");
+ return -EINVAL;
+ }
+
+ advertising = (ADVERTISED_10000baseT_Full |
+ ADVERTISED_FIBRE);
+ break;
+
+ default:
+ DP(NETIF_MSG_LINK, "Unsupported speed %u\n", speed);
+ return -EINVAL;
+ }
+
+ bp->link_params.req_line_speed[cfg_idx] = speed;
+ bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
+ bp->port.advertising[cfg_idx] = advertising;
+ }
+
+ DP(NETIF_MSG_LINK, "req_line_speed %d\n"
+ " req_duplex %d advertising 0x%x\n",
+ bp->link_params.req_line_speed[cfg_idx],
+ bp->link_params.req_duplex[cfg_idx],
+ bp->port.advertising[cfg_idx]);
+
+ /* Set new config */
+ bp->link_params.multi_phy_config = new_multi_phy_config;
+ if (netif_running(dev)) {
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_link_set(bp);
+ }
+
+ return 0;
+}
+
+#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
+#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
+#define IS_E2_ONLINE(info) (((info) & RI_E2_ONLINE) == RI_E2_ONLINE)
+#define IS_E3_ONLINE(info) (((info) & RI_E3_ONLINE) == RI_E3_ONLINE)
+#define IS_E3B0_ONLINE(info) (((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE)
+
+static inline bool bnx2x_is_reg_online(struct bnx2x *bp,
+ const struct reg_addr *reg_info)
+{
+ if (CHIP_IS_E1(bp))
+ return IS_E1_ONLINE(reg_info->info);
+ else if (CHIP_IS_E1H(bp))
+ return IS_E1H_ONLINE(reg_info->info);
+ else if (CHIP_IS_E2(bp))
+ return IS_E2_ONLINE(reg_info->info);
+ else if (CHIP_IS_E3A0(bp))
+ return IS_E3_ONLINE(reg_info->info);
+ else if (CHIP_IS_E3B0(bp))
+ return IS_E3B0_ONLINE(reg_info->info);
+ else
+ return false;
+}
+
+/******* Paged registers info selectors ********/
+static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return page_vals_e2;
+ else if (CHIP_IS_E3(bp))
+ return page_vals_e3;
+ else
+ return NULL;
+}
+
+static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return PAGE_MODE_VALUES_E2;
+ else if (CHIP_IS_E3(bp))
+ return PAGE_MODE_VALUES_E3;
+ else
+ return 0;
+}
+
+static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return page_write_regs_e2;
+ else if (CHIP_IS_E3(bp))
+ return page_write_regs_e3;
+ else
+ return NULL;
+}
+
+static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return PAGE_WRITE_REGS_E2;
+ else if (CHIP_IS_E3(bp))
+ return PAGE_WRITE_REGS_E3;
+ else
+ return 0;
+}
+
+static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return page_read_regs_e2;
+ else if (CHIP_IS_E3(bp))
+ return page_read_regs_e3;
+ else
+ return NULL;
+}
+
+static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return PAGE_READ_REGS_E2;
+ else if (CHIP_IS_E3(bp))
+ return PAGE_READ_REGS_E3;
+ else
+ return 0;
+}
+
+static inline int __bnx2x_get_regs_len(struct bnx2x *bp)
+{
+ int num_pages = __bnx2x_get_page_reg_num(bp);
+ int page_write_num = __bnx2x_get_page_write_num(bp);
+ const struct reg_addr *page_read_addr = __bnx2x_get_page_read_ar(bp);
+ int page_read_num = __bnx2x_get_page_read_num(bp);
+ int regdump_len = 0;
+ int i, j, k;
+
+ for (i = 0; i < REGS_COUNT; i++)
+ if (bnx2x_is_reg_online(bp, &reg_addrs[i]))
+ regdump_len += reg_addrs[i].size;
+
+ for (i = 0; i < num_pages; i++)
+ for (j = 0; j < page_write_num; j++)
+ for (k = 0; k < page_read_num; k++)
+ if (bnx2x_is_reg_online(bp, &page_read_addr[k]))
+ regdump_len += page_read_addr[k].size;
+
+ return regdump_len;
+}
+
+static int bnx2x_get_regs_len(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int regdump_len = 0;
+
+ regdump_len = __bnx2x_get_regs_len(bp);
+ regdump_len *= 4;
+ regdump_len += sizeof(struct dump_hdr);
+
+ return regdump_len;
+}
+
+/**
+ * bnx2x_read_pages_regs - read "paged" registers
+ *
+ * @bp device handle
+ * @p output buffer
+ *
+ * Reads "paged" memories: memories that may only be read by first writing to a
+ * specific address ("write address") and then reading from a specific address
+ * ("read address"). There may be more than one write address per "page" and
+ * more than one read address per write address.
+ */
+static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
+{
+ u32 i, j, k, n;
+ /* addresses of the paged registers */
+ const u32 *page_addr = __bnx2x_get_page_addr_ar(bp);
+ /* number of paged registers */
+ int num_pages = __bnx2x_get_page_reg_num(bp);
+ /* write addresses */
+ const u32 *write_addr = __bnx2x_get_page_write_ar(bp);
+ /* number of write addresses */
+ int write_num = __bnx2x_get_page_write_num(bp);
+ /* read addresses info */
+ const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp);
+ /* number of read addresses */
+ int read_num = __bnx2x_get_page_read_num(bp);
+
+ for (i = 0; i < num_pages; i++) {
+ for (j = 0; j < write_num; j++) {
+ REG_WR(bp, write_addr[j], page_addr[i]);
+ for (k = 0; k < read_num; k++)
+ if (bnx2x_is_reg_online(bp, &read_addr[k]))
+ for (n = 0; n <
+ read_addr[k].size; n++)
+ *p++ = REG_RD(bp,
+ read_addr[k].addr + n*4);
+ }
+ }
+}
+
+static inline void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
+{
+ u32 i, j;
+
+ /* Read the regular registers */
+ for (i = 0; i < REGS_COUNT; i++)
+ if (bnx2x_is_reg_online(bp, &reg_addrs[i]))
+ for (j = 0; j < reg_addrs[i].size; j++)
+ *p++ = REG_RD(bp, reg_addrs[i].addr + j*4);
+
+ /* Read "paged" registes */
+ bnx2x_read_pages_regs(bp, p);
+}
+
+static void bnx2x_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *_p)
+{
+ u32 *p = _p;
+ struct bnx2x *bp = netdev_priv(dev);
+ struct dump_hdr dump_hdr = {0};
+
+ regs->version = 0;
+ memset(p, 0, regs->len);
+
+ if (!netif_running(bp->dev))
+ return;
+
+ /* Disable parity attentions as long as following dump may
+ * cause false alarms by reading never written registers. We
+ * will re-enable parity attentions right after the dump.
+ */
+ bnx2x_disable_blocks_parity(bp);
+
+ dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
+ dump_hdr.dump_sign = dump_sign_all;
+ dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
+ dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
+ dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
+ dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
+
+ if (CHIP_IS_E1(bp))
+ dump_hdr.info = RI_E1_ONLINE;
+ else if (CHIP_IS_E1H(bp))
+ dump_hdr.info = RI_E1H_ONLINE;
+ else if (!CHIP_IS_E1x(bp))
+ dump_hdr.info = RI_E2_ONLINE |
+ (BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP);
+
+ memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
+ p += dump_hdr.hdr_size + 1;
+
+ /* Actually read the registers */
+ __bnx2x_get_regs(bp, p);
+
+ /* Re-enable parity attentions */
+ bnx2x_clear_blocks_parity(bp);
+ bnx2x_enable_blocks_parity(bp);
+}
+
+static void bnx2x_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u8 phy_fw_ver[PHY_FW_VER_LEN];
+
+ strcpy(info->driver, DRV_MODULE_NAME);
+ strcpy(info->version, DRV_MODULE_VERSION);
+
+ phy_fw_ver[0] = '\0';
+ if (bp->port.pmf) {
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_get_ext_phy_fw_version(&bp->link_params,
+ (bp->state != BNX2X_STATE_CLOSED),
+ phy_fw_ver, PHY_FW_VER_LEN);
+ bnx2x_release_phy_lock(bp);
+ }
+
+ strncpy(info->fw_version, bp->fw_ver, 32);
+ snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
+ "bc %d.%d.%d%s%s",
+ (bp->common.bc_ver & 0xff0000) >> 16,
+ (bp->common.bc_ver & 0xff00) >> 8,
+ (bp->common.bc_ver & 0xff),
+ ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
+ strcpy(info->bus_info, pci_name(bp->pdev));
+ info->n_stats = BNX2X_NUM_STATS;
+ info->testinfo_len = BNX2X_NUM_TESTS;
+ info->eedump_len = bp->common.flash_size;
+ info->regdump_len = bnx2x_get_regs_len(dev);
+}
+
+static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (bp->flags & NO_WOL_FLAG) {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ } else {
+ wol->supported = WAKE_MAGIC;
+ if (bp->wol)
+ wol->wolopts = WAKE_MAGIC;
+ else
+ wol->wolopts = 0;
+ }
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ if (bp->flags & NO_WOL_FLAG)
+ return -EINVAL;
+
+ bp->wol = 1;
+ } else
+ bp->wol = 0;
+
+ return 0;
+}
+
+static u32 bnx2x_get_msglevel(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ return bp->msg_enable;
+}
+
+static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (capable(CAP_NET_ADMIN)) {
+ /* dump MCP trace */
+ if (level & BNX2X_MSG_MCP)
+ bnx2x_fw_dump_lvl(bp, KERN_INFO);
+ bp->msg_enable = level;
+ }
+}
+
+static int bnx2x_nway_reset(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (!bp->port.pmf)
+ return 0;
+
+ if (netif_running(dev)) {
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_link_set(bp);
+ }
+
+ return 0;
+}
+
+static u32 bnx2x_get_link(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN))
+ return 0;
+
+ return bp->link_vars.link_up;
+}
+
+static int bnx2x_get_eeprom_len(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ return bp->common.flash_size;
+}
+
+static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int count, i;
+ u32 val = 0;
+
+ /* adjust timeout for emulation/FPGA */
+ count = BNX2X_NVRAM_TIMEOUT_COUNT;
+ if (CHIP_REV_IS_SLOW(bp))
+ count *= 100;
+
+ /* request access to nvram interface */
+ REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
+ (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
+
+ for (i = 0; i < count*10; i++) {
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
+ if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
+ break;
+
+ udelay(5);
+ }
+
+ if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
+ DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int bnx2x_release_nvram_lock(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int count, i;
+ u32 val = 0;
+
+ /* adjust timeout for emulation/FPGA */
+ count = BNX2X_NVRAM_TIMEOUT_COUNT;
+ if (CHIP_REV_IS_SLOW(bp))
+ count *= 100;
+
+ /* relinquish nvram interface */
+ REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
+ (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
+
+ for (i = 0; i < count*10; i++) {
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
+ if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
+ break;
+
+ udelay(5);
+ }
+
+ if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
+ DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void bnx2x_enable_nvram_access(struct bnx2x *bp)
+{
+ u32 val;
+
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
+
+ /* enable both bits, even on read */
+ REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
+ (val | MCPR_NVM_ACCESS_ENABLE_EN |
+ MCPR_NVM_ACCESS_ENABLE_WR_EN));
+}
+
+static void bnx2x_disable_nvram_access(struct bnx2x *bp)
+{
+ u32 val;
+
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
+
+ /* disable both bits, even after read */
+ REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
+ (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
+ MCPR_NVM_ACCESS_ENABLE_WR_EN)));
+}
+
+static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
+ u32 cmd_flags)
+{
+ int count, i, rc;
+ u32 val;
+
+ /* build the command word */
+ cmd_flags |= MCPR_NVM_COMMAND_DOIT;
+
+ /* need to clear DONE bit separately */
+ REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
+
+ /* address of the NVRAM to read from */
+ REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
+ (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
+
+ /* issue a read command */
+ REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
+
+ /* adjust timeout for emulation/FPGA */
+ count = BNX2X_NVRAM_TIMEOUT_COUNT;
+ if (CHIP_REV_IS_SLOW(bp))
+ count *= 100;
+
+ /* wait for completion */
+ *ret_val = 0;
+ rc = -EBUSY;
+ for (i = 0; i < count; i++) {
+ udelay(5);
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
+
+ if (val & MCPR_NVM_COMMAND_DONE) {
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
+ /* we read nvram data in cpu order
+ * but ethtool sees it as an array of bytes
+ * converting to big-endian will do the work */
+ *ret_val = cpu_to_be32(val);
+ rc = 0;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+ int buf_size)
+{
+ int rc;
+ u32 cmd_flags;
+ __be32 val;
+
+ if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
+ DP(BNX2X_MSG_NVM,
+ "Invalid parameter: offset 0x%x buf_size 0x%x\n",
+ offset, buf_size);
+ return -EINVAL;
+ }
+
+ if (offset + buf_size > bp->common.flash_size) {
+ DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
+ " buf_size (0x%x) > flash_size (0x%x)\n",
+ offset, buf_size, bp->common.flash_size);
+ return -EINVAL;
+ }
+
+ /* request access to nvram interface */
+ rc = bnx2x_acquire_nvram_lock(bp);
+ if (rc)
+ return rc;
+
+ /* enable access to nvram interface */
+ bnx2x_enable_nvram_access(bp);
+
+ /* read the first word(s) */
+ cmd_flags = MCPR_NVM_COMMAND_FIRST;
+ while ((buf_size > sizeof(u32)) && (rc == 0)) {
+ rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
+ memcpy(ret_buf, &val, 4);
+
+ /* advance to the next dword */
+ offset += sizeof(u32);
+ ret_buf += sizeof(u32);
+ buf_size -= sizeof(u32);
+ cmd_flags = 0;
+ }
+
+ if (rc == 0) {
+ cmd_flags |= MCPR_NVM_COMMAND_LAST;
+ rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
+ memcpy(ret_buf, &val, 4);
+ }
+
+ /* disable access to nvram interface */
+ bnx2x_disable_nvram_access(bp);
+ bnx2x_release_nvram_lock(bp);
+
+ return rc;
+}
+
+static int bnx2x_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *eebuf)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc;
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+ " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
+ eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
+ eeprom->len, eeprom->len);
+
+ /* parameters already validated in ethtool_get_eeprom */
+
+ rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
+
+ return rc;
+}
+
+static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
+ u32 cmd_flags)
+{
+ int count, i, rc;
+
+ /* build the command word */
+ cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
+
+ /* need to clear DONE bit separately */
+ REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
+
+ /* write the data */
+ REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
+
+ /* address of the NVRAM to write to */
+ REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
+ (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
+
+ /* issue the write command */
+ REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
+
+ /* adjust timeout for emulation/FPGA */
+ count = BNX2X_NVRAM_TIMEOUT_COUNT;
+ if (CHIP_REV_IS_SLOW(bp))
+ count *= 100;
+
+ /* wait for completion */
+ rc = -EBUSY;
+ for (i = 0; i < count; i++) {
+ udelay(5);
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
+ if (val & MCPR_NVM_COMMAND_DONE) {
+ rc = 0;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
+
+static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
+ int buf_size)
+{
+ int rc;
+ u32 cmd_flags;
+ u32 align_offset;
+ __be32 val;
+
+ if (offset + buf_size > bp->common.flash_size) {
+ DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
+ " buf_size (0x%x) > flash_size (0x%x)\n",
+ offset, buf_size, bp->common.flash_size);
+ return -EINVAL;
+ }
+
+ /* request access to nvram interface */
+ rc = bnx2x_acquire_nvram_lock(bp);
+ if (rc)
+ return rc;
+
+ /* enable access to nvram interface */
+ bnx2x_enable_nvram_access(bp);
+
+ cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
+ align_offset = (offset & ~0x03);
+ rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
+
+ if (rc == 0) {
+ val &= ~(0xff << BYTE_OFFSET(offset));
+ val |= (*data_buf << BYTE_OFFSET(offset));
+
+ /* nvram data is returned as an array of bytes
+ * convert it back to cpu order */
+ val = be32_to_cpu(val);
+
+ rc = bnx2x_nvram_write_dword(bp, align_offset, val,
+ cmd_flags);
+ }
+
+ /* disable access to nvram interface */
+ bnx2x_disable_nvram_access(bp);
+ bnx2x_release_nvram_lock(bp);
+
+ return rc;
+}
+
+static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
+ int buf_size)
+{
+ int rc;
+ u32 cmd_flags;
+ u32 val;
+ u32 written_so_far;
+
+ if (buf_size == 1) /* ethtool */
+ return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
+
+ if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
+ DP(BNX2X_MSG_NVM,
+ "Invalid parameter: offset 0x%x buf_size 0x%x\n",
+ offset, buf_size);
+ return -EINVAL;
+ }
+
+ if (offset + buf_size > bp->common.flash_size) {
+ DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
+ " buf_size (0x%x) > flash_size (0x%x)\n",
+ offset, buf_size, bp->common.flash_size);
+ return -EINVAL;
+ }
+
+ /* request access to nvram interface */
+ rc = bnx2x_acquire_nvram_lock(bp);
+ if (rc)
+ return rc;
+
+ /* enable access to nvram interface */
+ bnx2x_enable_nvram_access(bp);
+
+ written_so_far = 0;
+ cmd_flags = MCPR_NVM_COMMAND_FIRST;
+ while ((written_so_far < buf_size) && (rc == 0)) {
+ if (written_so_far == (buf_size - sizeof(u32)))
+ cmd_flags |= MCPR_NVM_COMMAND_LAST;
+ else if (((offset + 4) % BNX2X_NVRAM_PAGE_SIZE) == 0)
+ cmd_flags |= MCPR_NVM_COMMAND_LAST;
+ else if ((offset % BNX2X_NVRAM_PAGE_SIZE) == 0)
+ cmd_flags |= MCPR_NVM_COMMAND_FIRST;
+
+ memcpy(&val, data_buf, 4);
+
+ rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
+
+ /* advance to the next dword */
+ offset += sizeof(u32);
+ data_buf += sizeof(u32);
+ written_so_far += sizeof(u32);
+ cmd_flags = 0;
+ }
+
+ /* disable access to nvram interface */
+ bnx2x_disable_nvram_access(bp);
+ bnx2x_release_nvram_lock(bp);
+
+ return rc;
+}
+
+static int bnx2x_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *eebuf)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int port = BP_PORT(bp);
+ int rc = 0;
+ u32 ext_phy_config;
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+ " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
+ eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
+ eeprom->len, eeprom->len);
+
+ /* parameters already validated in ethtool_set_eeprom */
+
+ /* PHY eeprom can be accessed only by the PMF */
+ if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
+ !bp->port.pmf)
+ return -EINVAL;
+
+ ext_phy_config =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config);
+
+ if (eeprom->magic == 0x50485950) {
+ /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+ bnx2x_acquire_phy_lock(bp);
+ rc |= bnx2x_link_reset(&bp->link_params,
+ &bp->link_vars, 0);
+ if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+ MISC_REGISTERS_GPIO_HIGH, port);
+ bnx2x_release_phy_lock(bp);
+ bnx2x_link_report(bp);
+
+ } else if (eeprom->magic == 0x50485952) {
+ /* 'PHYR' (0x50485952): re-init link after FW upgrade */
+ if (bp->state == BNX2X_STATE_OPEN) {
+ bnx2x_acquire_phy_lock(bp);
+ rc |= bnx2x_link_reset(&bp->link_params,
+ &bp->link_vars, 1);
+
+ rc |= bnx2x_phy_init(&bp->link_params,
+ &bp->link_vars);
+ bnx2x_release_phy_lock(bp);
+ bnx2x_calc_fc_adv(bp);
+ }
+ } else if (eeprom->magic == 0x53985943) {
+ /* 'PHYC' (0x53985943): PHY FW upgrade completed */
+ if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
+
+ /* DSP Remove Download Mode */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+ MISC_REGISTERS_GPIO_LOW, port);
+
+ bnx2x_acquire_phy_lock(bp);
+
+ bnx2x_sfx7101_sp_sw_reset(bp,
+ &bp->link_params.phy[EXT_PHY1]);
+
+ /* wait 0.5 sec to allow it to run */
+ msleep(500);
+ bnx2x_ext_phy_hw_reset(bp, port);
+ msleep(500);
+ bnx2x_release_phy_lock(bp);
+ }
+ } else
+ rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
+
+ return rc;
+}
+
+static int bnx2x_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ memset(coal, 0, sizeof(struct ethtool_coalesce));
+
+ coal->rx_coalesce_usecs = bp->rx_ticks;
+ coal->tx_coalesce_usecs = bp->tx_ticks;
+
+ return 0;
+}
+
+static int bnx2x_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
+ if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
+ bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
+
+ bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
+ if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
+ bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
+
+ if (netif_running(dev))
+ bnx2x_update_coalesce(bp);
+
+ return 0;
+}
+
+static void bnx2x_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ ering->rx_max_pending = MAX_RX_AVAIL;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = 0;
+
+ if (bp->rx_ring_size)
+ ering->rx_pending = bp->rx_ring_size;
+ else
+ ering->rx_pending = MAX_RX_AVAIL;
+
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = 0;
+
+ ering->tx_max_pending = MAX_TX_AVAIL;
+ ering->tx_pending = bp->tx_ring_size;
+}
+
+static int bnx2x_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ pr_err("Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
+ if ((ering->rx_pending > MAX_RX_AVAIL) ||
+ (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
+ MIN_RX_SIZE_TPA)) ||
+ (ering->tx_pending > MAX_TX_AVAIL) ||
+ (ering->tx_pending <= MAX_SKB_FRAGS + 4))
+ return -EINVAL;
+
+ bp->rx_ring_size = ering->rx_pending;
+ bp->tx_ring_size = ering->tx_pending;
+
+ return bnx2x_reload_if_running(dev);
+}
+
+static void bnx2x_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] ==
+ BNX2X_FLOW_CTRL_AUTO);
+
+ epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
+ BNX2X_FLOW_CTRL_RX);
+ epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
+ BNX2X_FLOW_CTRL_TX);
+
+ DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
+ " autoneg %d rx_pause %d tx_pause %d\n",
+ epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
+}
+
+static int bnx2x_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ if (IS_MF(bp))
+ return 0;
+
+ DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
+ " autoneg %d rx_pause %d tx_pause %d\n",
+ epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
+
+ bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO;
+
+ if (epause->rx_pause)
+ bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX;
+
+ if (epause->tx_pause)
+ bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX;
+
+ if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO)
+ bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE;
+
+ if (epause->autoneg) {
+ if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
+ DP(NETIF_MSG_LINK, "autoneg not supported\n");
+ return -EINVAL;
+ }
+
+ if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) {
+ bp->link_params.req_flow_ctrl[cfg_idx] =
+ BNX2X_FLOW_CTRL_AUTO;
+ }
+ }
+
+ DP(NETIF_MSG_LINK,
+ "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]);
+
+ if (netif_running(dev)) {
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_link_set(bp);
+ }
+
+ return 0;
+}
+
+static const struct {
+ char string[ETH_GSTRING_LEN];
+} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
+ { "register_test (offline)" },
+ { "memory_test (offline)" },
+ { "loopback_test (offline)" },
+ { "nvram_test (online)" },
+ { "interrupt_test (online)" },
+ { "link_test (online)" },
+ { "idle check (online)" }
+};
+
+enum {
+ BNX2X_CHIP_E1_OFST = 0,
+ BNX2X_CHIP_E1H_OFST,
+ BNX2X_CHIP_E2_OFST,
+ BNX2X_CHIP_E3_OFST,
+ BNX2X_CHIP_E3B0_OFST,
+ BNX2X_CHIP_MAX_OFST
+};
+
+#define BNX2X_CHIP_MASK_E1 (1 << BNX2X_CHIP_E1_OFST)
+#define BNX2X_CHIP_MASK_E1H (1 << BNX2X_CHIP_E1H_OFST)
+#define BNX2X_CHIP_MASK_E2 (1 << BNX2X_CHIP_E2_OFST)
+#define BNX2X_CHIP_MASK_E3 (1 << BNX2X_CHIP_E3_OFST)
+#define BNX2X_CHIP_MASK_E3B0 (1 << BNX2X_CHIP_E3B0_OFST)
+
+#define BNX2X_CHIP_MASK_ALL ((1 << BNX2X_CHIP_MAX_OFST) - 1)
+#define BNX2X_CHIP_MASK_E1X (BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H)
+
+static int bnx2x_test_registers(struct bnx2x *bp)
+{
+ int idx, i, rc = -ENODEV;
+ u32 wr_val = 0, hw;
+ int port = BP_PORT(bp);
+ static const struct {
+ u32 hw;
+ u32 offset0;
+ u32 offset1;
+ u32 mask;
+ } reg_tbl[] = {
+/* 0 */ { BNX2X_CHIP_MASK_ALL,
+ BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
+ { BNX2X_CHIP_MASK_ALL,
+ DORQ_REG_DB_ADDR0, 4, 0xffffffff },
+ { BNX2X_CHIP_MASK_E1X,
+ HC_REG_AGG_INT_0, 4, 0x000003ff },
+ { BNX2X_CHIP_MASK_ALL,
+ PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3,
+ PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
+ { BNX2X_CHIP_MASK_E3B0,
+ PBF_REG_INIT_CRD_Q0, 4, 0x000007ff },
+ { BNX2X_CHIP_MASK_ALL,
+ PRS_REG_CID_PORT_0, 4, 0x00ffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
+ { BNX2X_CHIP_MASK_ALL,
+ PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
+ { BNX2X_CHIP_MASK_ALL,
+ PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
+/* 10 */ { BNX2X_CHIP_MASK_ALL,
+ PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
+ { BNX2X_CHIP_MASK_ALL,
+ PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
+ { BNX2X_CHIP_MASK_ALL,
+ QM_REG_CONNNUM_0, 4, 0x000fffff },
+ { BNX2X_CHIP_MASK_ALL,
+ TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
+ { BNX2X_CHIP_MASK_ALL,
+ SRC_REG_KEYRSS0_0, 40, 0xffffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ SRC_REG_KEYRSS0_7, 40, 0xffffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_ALL,
+ XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
+ { BNX2X_CHIP_MASK_ALL,
+ XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
+/* 20 */ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+ NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+ NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
+/* 30 */ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
+ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+ NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001},
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
+ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+ NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
+ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+ NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
+
+ { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 }
+ };
+
+ if (!netif_running(bp->dev))
+ return rc;
+
+ if (CHIP_IS_E1(bp))
+ hw = BNX2X_CHIP_MASK_E1;
+ else if (CHIP_IS_E1H(bp))
+ hw = BNX2X_CHIP_MASK_E1H;
+ else if (CHIP_IS_E2(bp))
+ hw = BNX2X_CHIP_MASK_E2;
+ else if (CHIP_IS_E3B0(bp))
+ hw = BNX2X_CHIP_MASK_E3B0;
+ else /* e3 A0 */
+ hw = BNX2X_CHIP_MASK_E3;
+
+ /* Repeat the test twice:
+ First by writing 0x00000000, second by writing 0xffffffff */
+ for (idx = 0; idx < 2; idx++) {
+
+ switch (idx) {
+ case 0:
+ wr_val = 0;
+ break;
+ case 1:
+ wr_val = 0xffffffff;
+ break;
+ }
+
+ for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
+ u32 offset, mask, save_val, val;
+ if (!(hw & reg_tbl[i].hw))
+ continue;
+
+ offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
+ mask = reg_tbl[i].mask;
+
+ save_val = REG_RD(bp, offset);
+
+ REG_WR(bp, offset, wr_val & mask);
+
+ val = REG_RD(bp, offset);
+
+ /* Restore the original register's value */
+ REG_WR(bp, offset, save_val);
+
+ /* verify value is as expected */
+ if ((val & mask) != (wr_val & mask)) {
+ DP(NETIF_MSG_HW,
+ "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
+ offset, val, wr_val, mask);
+ goto test_reg_exit;
+ }
+ }
+ }
+
+ rc = 0;
+
+test_reg_exit:
+ return rc;
+}
+
+static int bnx2x_test_memory(struct bnx2x *bp)
+{
+ int i, j, rc = -ENODEV;
+ u32 val, index;
+ static const struct {
+ u32 offset;
+ int size;
+ } mem_tbl[] = {
+ { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
+ { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
+ { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
+ { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
+ { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
+ { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
+ { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
+
+ { 0xffffffff, 0 }
+ };
+
+ static const struct {
+ char *name;
+ u32 offset;
+ u32 hw_mask[BNX2X_CHIP_MAX_OFST];
+ } prty_tbl[] = {
+ { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS,
+ {0x3ffc0, 0, 0, 0} },
+ { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS,
+ {0x2, 0x2, 0, 0} },
+ { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS,
+ {0, 0, 0, 0} },
+ { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS,
+ {0x3ffc0, 0, 0, 0} },
+ { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS,
+ {0x3ffc0, 0, 0, 0} },
+ { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS,
+ {0x3ffc1, 0, 0, 0} },
+
+ { NULL, 0xffffffff, {0, 0, 0, 0} }
+ };
+
+ if (!netif_running(bp->dev))
+ return rc;
+
+ if (CHIP_IS_E1(bp))
+ index = BNX2X_CHIP_E1_OFST;
+ else if (CHIP_IS_E1H(bp))
+ index = BNX2X_CHIP_E1H_OFST;
+ else if (CHIP_IS_E2(bp))
+ index = BNX2X_CHIP_E2_OFST;
+ else /* e3 */
+ index = BNX2X_CHIP_E3_OFST;
+
+ /* pre-Check the parity status */
+ for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
+ val = REG_RD(bp, prty_tbl[i].offset);
+ if (val & ~(prty_tbl[i].hw_mask[index])) {
+ DP(NETIF_MSG_HW,
+ "%s is 0x%x\n", prty_tbl[i].name, val);
+ goto test_mem_exit;
+ }
+ }
+
+ /* Go through all the memories */
+ for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
+ for (j = 0; j < mem_tbl[i].size; j++)
+ REG_RD(bp, mem_tbl[i].offset + j*4);
+
+ /* Check the parity status */
+ for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
+ val = REG_RD(bp, prty_tbl[i].offset);
+ if (val & ~(prty_tbl[i].hw_mask[index])) {
+ DP(NETIF_MSG_HW,
+ "%s is 0x%x\n", prty_tbl[i].name, val);
+ goto test_mem_exit;
+ }
+ }
+
+ rc = 0;
+
+test_mem_exit:
+ return rc;
+}
+
+static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
+{
+ int cnt = 1400;
+
+ if (link_up) {
+ while (bnx2x_link_test(bp, is_serdes) && cnt--)
+ msleep(20);
+
+ if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
+ DP(NETIF_MSG_LINK, "Timeout waiting for link up\n");
+ }
+}
+
+static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
+{
+ unsigned int pkt_size, num_pkts, i;
+ struct sk_buff *skb;
+ unsigned char *packet;
+ struct bnx2x_fastpath *fp_rx = &bp->fp[0];
+ struct bnx2x_fastpath *fp_tx = &bp->fp[0];
+ struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0];
+ u16 tx_start_idx, tx_idx;
+ u16 rx_start_idx, rx_idx;
+ u16 pkt_prod, bd_prod, rx_comp_cons;
+ struct sw_tx_bd *tx_buf;
+ struct eth_tx_start_bd *tx_start_bd;
+ struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
+ struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
+ dma_addr_t mapping;
+ union eth_rx_cqe *cqe;
+ u8 cqe_fp_flags, cqe_fp_type;
+ struct sw_rx_bd *rx_buf;
+ u16 len;
+ int rc = -ENODEV;
+
+ /* check the loopback mode */
+ switch (loopback_mode) {
+ case BNX2X_PHY_LOOPBACK:
+ if (bp->link_params.loopback_mode != LOOPBACK_XGXS)
+ return -EINVAL;
+ break;
+ case BNX2X_MAC_LOOPBACK:
+ bp->link_params.loopback_mode = CHIP_IS_E3(bp) ?
+ LOOPBACK_XMAC : LOOPBACK_BMAC;
+ bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* prepare the loopback packet */
+ pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
+ bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
+ skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto test_loopback_exit;
+ }
+ packet = skb_put(skb, pkt_size);
+ memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
+ memset(packet + ETH_ALEN, 0, ETH_ALEN);
+ memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
+ for (i = ETH_HLEN; i < pkt_size; i++)
+ packet[i] = (unsigned char) (i & 0xff);
+ mapping = dma_map_single(&bp->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ rc = -ENOMEM;
+ dev_kfree_skb(skb);
+ BNX2X_ERR("Unable to map SKB\n");
+ goto test_loopback_exit;
+ }
+
+ /* send the loopback packet */
+ num_pkts = 0;
+ tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
+ rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
+
+ pkt_prod = txdata->tx_pkt_prod++;
+ tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
+ tx_buf->first_bd = txdata->tx_bd_prod;
+ tx_buf->skb = skb;
+ tx_buf->flags = 0;
+
+ bd_prod = TX_BD(txdata->tx_bd_prod);
+ tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
+ tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+ tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+ tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
+ tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
+ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+ tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_ETH_ADDR_TYPE,
+ UNICAST_ADDRESS);
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_HDR_NBDS,
+ 1);
+
+ /* turn on parsing and get a BD */
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+ pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
+ pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
+
+ memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
+ memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
+
+ wmb();
+
+ txdata->tx_db.data.prod += 2;
+ barrier();
+ DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
+
+ mmiowb();
+ barrier();
+
+ num_pkts++;
+ txdata->tx_bd_prod += 2; /* start + pbd */
+
+ udelay(100);
+
+ tx_idx = le16_to_cpu(*txdata->tx_cons_sb);
+ if (tx_idx != tx_start_idx + num_pkts)
+ goto test_loopback_exit;
+
+ /* Unlike HC IGU won't generate an interrupt for status block
+ * updates that have been performed while interrupts were
+ * disabled.
+ */
+ if (bp->common.int_block == INT_BLOCK_IGU) {
+ /* Disable local BHes to prevent a dead-lock situation between
+ * sch_direct_xmit() and bnx2x_run_loopback() (calling
+ * bnx2x_tx_int()), as both are taking netif_tx_lock().
+ */
+ local_bh_disable();
+ bnx2x_tx_int(bp, txdata);
+ local_bh_enable();
+ }
+
+ rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
+ if (rx_idx != rx_start_idx + num_pkts)
+ goto test_loopback_exit;
+
+ rx_comp_cons = le16_to_cpu(fp_rx->rx_comp_cons);
+ cqe = &fp_rx->rx_comp_ring[RCQ_BD(rx_comp_cons)];
+ cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+ cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
+ if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
+ goto test_loopback_rx_exit;
+
+ len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
+ if (len != pkt_size)
+ goto test_loopback_rx_exit;
+
+ rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
+ dma_sync_single_for_cpu(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ fp_rx->rx_buf_size, DMA_FROM_DEVICE);
+ skb = rx_buf->skb;
+ skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
+ for (i = ETH_HLEN; i < pkt_size; i++)
+ if (*(skb->data + i) != (unsigned char) (i & 0xff))
+ goto test_loopback_rx_exit;
+
+ rc = 0;
+
+test_loopback_rx_exit:
+
+ fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
+ fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
+ fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
+ fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
+
+ /* Update producers */
+ bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
+ fp_rx->rx_sge_prod);
+
+test_loopback_exit:
+ bp->link_params.loopback_mode = LOOPBACK_NONE;
+
+ return rc;
+}
+
+static int bnx2x_test_loopback(struct bnx2x *bp)
+{
+ int rc = 0, res;
+
+ if (BP_NOMCP(bp))
+ return rc;
+
+ if (!netif_running(bp->dev))
+ return BNX2X_LOOPBACK_FAILED;
+
+ bnx2x_netif_stop(bp, 1);
+ bnx2x_acquire_phy_lock(bp);
+
+ res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK);
+ if (res) {
+ DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
+ rc |= BNX2X_PHY_LOOPBACK_FAILED;
+ }
+
+ res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK);
+ if (res) {
+ DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
+ rc |= BNX2X_MAC_LOOPBACK_FAILED;
+ }
+
+ bnx2x_release_phy_lock(bp);
+ bnx2x_netif_start(bp);
+
+ return rc;
+}
+
+#define CRC32_RESIDUAL 0xdebb20e3
+
+static int bnx2x_test_nvram(struct bnx2x *bp)
+{
+ static const struct {
+ int offset;
+ int size;
+ } nvram_tbl[] = {
+ { 0, 0x14 }, /* bootstrap */
+ { 0x14, 0xec }, /* dir */
+ { 0x100, 0x350 }, /* manuf_info */
+ { 0x450, 0xf0 }, /* feature_info */
+ { 0x640, 0x64 }, /* upgrade_key_info */
+ { 0x708, 0x70 }, /* manuf_key_info */
+ { 0, 0 }
+ };
+ __be32 buf[0x350 / 4];
+ u8 *data = (u8 *)buf;
+ int i, rc;
+ u32 magic, crc;
+
+ if (BP_NOMCP(bp))
+ return 0;
+
+ rc = bnx2x_nvram_read(bp, 0, data, 4);
+ if (rc) {
+ DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
+ goto test_nvram_exit;
+ }
+
+ magic = be32_to_cpu(buf[0]);
+ if (magic != 0x669955aa) {
+ DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
+ rc = -ENODEV;
+ goto test_nvram_exit;
+ }
+
+ for (i = 0; nvram_tbl[i].size; i++) {
+
+ rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
+ nvram_tbl[i].size);
+ if (rc) {
+ DP(NETIF_MSG_PROBE,
+ "nvram_tbl[%d] read data (rc %d)\n", i, rc);
+ goto test_nvram_exit;
+ }
+
+ crc = ether_crc_le(nvram_tbl[i].size, data);
+ if (crc != CRC32_RESIDUAL) {
+ DP(NETIF_MSG_PROBE,
+ "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
+ rc = -ENODEV;
+ goto test_nvram_exit;
+ }
+ }
+
+test_nvram_exit:
+ return rc;
+}
+
+/* Send an EMPTY ramrod on the first queue */
+static int bnx2x_test_intr(struct bnx2x *bp)
+{
+ struct bnx2x_queue_state_params params = {0};
+
+ if (!netif_running(bp->dev))
+ return -ENODEV;
+
+ params.q_obj = &bp->fp->q_obj;
+ params.cmd = BNX2X_Q_CMD_EMPTY;
+
+ __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
+
+ return bnx2x_queue_state_change(bp, &params);
+}
+
+static void bnx2x_self_test(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u8 is_serdes;
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ pr_err("Handling parity error recovery. Try again later\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+
+ memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
+
+ if (!netif_running(dev))
+ return;
+
+ /* offline tests are not supported in MF mode */
+ if (IS_MF(bp))
+ etest->flags &= ~ETH_TEST_FL_OFFLINE;
+ is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
+
+ if (etest->flags & ETH_TEST_FL_OFFLINE) {
+ int port = BP_PORT(bp);
+ u32 val;
+ u8 link_up;
+
+ /* save current value of input enable for TX port IF */
+ val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
+ /* disable input for TX port IF */
+ REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
+
+ link_up = bp->link_vars.link_up;
+
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_load(bp, LOAD_DIAG);
+ /* wait until link state is restored */
+ bnx2x_wait_for_link(bp, 1, is_serdes);
+
+ if (bnx2x_test_registers(bp) != 0) {
+ buf[0] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ if (bnx2x_test_memory(bp) != 0) {
+ buf[1] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ buf[2] = bnx2x_test_loopback(bp);
+ if (buf[2] != 0)
+ etest->flags |= ETH_TEST_FL_FAILED;
+
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+
+ /* restore input for TX port IF */
+ REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
+
+ bnx2x_nic_load(bp, LOAD_NORMAL);
+ /* wait until link state is restored */
+ bnx2x_wait_for_link(bp, link_up, is_serdes);
+ }
+ if (bnx2x_test_nvram(bp) != 0) {
+ buf[3] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ if (bnx2x_test_intr(bp) != 0) {
+ buf[4] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (bnx2x_link_test(bp, is_serdes) != 0) {
+ buf[5] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+#ifdef BNX2X_EXTRA_DEBUG
+ bnx2x_panic_dump(bp);
+#endif
+}
+
+#define IS_PORT_STAT(i) \
+ ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
+#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
+#define IS_MF_MODE_STAT(bp) \
+ (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
+
+/* ethtool statistics are displayed for all regular ethernet queues and the
+ * fcoe L2 queue if not disabled
+ */
+static inline int bnx2x_num_stat_queues(struct bnx2x *bp)
+{
+ return BNX2X_NUM_ETH_QUEUES(bp);
+}
+
+static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int i, num_stats;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ if (is_multi(bp)) {
+ num_stats = bnx2x_num_stat_queues(bp) *
+ BNX2X_NUM_Q_STATS;
+ if (!IS_MF_MODE_STAT(bp))
+ num_stats += BNX2X_NUM_STATS;
+ } else {
+ if (IS_MF_MODE_STAT(bp)) {
+ num_stats = 0;
+ for (i = 0; i < BNX2X_NUM_STATS; i++)
+ if (IS_FUNC_STAT(i))
+ num_stats++;
+ } else
+ num_stats = BNX2X_NUM_STATS;
+ }
+ return num_stats;
+
+ case ETH_SS_TEST:
+ return BNX2X_NUM_TESTS;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int i, j, k;
+ char queue_name[MAX_QUEUE_NAME_LEN+1];
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ if (is_multi(bp)) {
+ k = 0;
+ for_each_eth_queue(bp, i) {
+ memset(queue_name, 0, sizeof(queue_name));
+ sprintf(queue_name, "%d", i);
+ for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
+ snprintf(buf + (k + j)*ETH_GSTRING_LEN,
+ ETH_GSTRING_LEN,
+ bnx2x_q_stats_arr[j].string,
+ queue_name);
+ k += BNX2X_NUM_Q_STATS;
+ }
+ if (IS_MF_MODE_STAT(bp))
+ break;
+ for (j = 0; j < BNX2X_NUM_STATS; j++)
+ strcpy(buf + (k + j)*ETH_GSTRING_LEN,
+ bnx2x_stats_arr[j].string);
+ } else {
+ for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+ if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
+ continue;
+ strcpy(buf + j*ETH_GSTRING_LEN,
+ bnx2x_stats_arr[i].string);
+ j++;
+ }
+ }
+ break;
+
+ case ETH_SS_TEST:
+ memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
+ break;
+ }
+}
+
+static void bnx2x_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *buf)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 *hw_stats, *offset;
+ int i, j, k;
+
+ if (is_multi(bp)) {
+ k = 0;
+ for_each_eth_queue(bp, i) {
+ hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
+ for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
+ if (bnx2x_q_stats_arr[j].size == 0) {
+ /* skip this counter */
+ buf[k + j] = 0;
+ continue;
+ }
+ offset = (hw_stats +
+ bnx2x_q_stats_arr[j].offset);
+ if (bnx2x_q_stats_arr[j].size == 4) {
+ /* 4-byte counter */
+ buf[k + j] = (u64) *offset;
+ continue;
+ }
+ /* 8-byte counter */
+ buf[k + j] = HILO_U64(*offset, *(offset + 1));
+ }
+ k += BNX2X_NUM_Q_STATS;
+ }
+ if (IS_MF_MODE_STAT(bp))
+ return;
+ hw_stats = (u32 *)&bp->eth_stats;
+ for (j = 0; j < BNX2X_NUM_STATS; j++) {
+ if (bnx2x_stats_arr[j].size == 0) {
+ /* skip this counter */
+ buf[k + j] = 0;
+ continue;
+ }
+ offset = (hw_stats + bnx2x_stats_arr[j].offset);
+ if (bnx2x_stats_arr[j].size == 4) {
+ /* 4-byte counter */
+ buf[k + j] = (u64) *offset;
+ continue;
+ }
+ /* 8-byte counter */
+ buf[k + j] = HILO_U64(*offset, *(offset + 1));
+ }
+ } else {
+ hw_stats = (u32 *)&bp->eth_stats;
+ for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+ if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
+ continue;
+ if (bnx2x_stats_arr[i].size == 0) {
+ /* skip this counter */
+ buf[j] = 0;
+ j++;
+ continue;
+ }
+ offset = (hw_stats + bnx2x_stats_arr[i].offset);
+ if (bnx2x_stats_arr[i].size == 4) {
+ /* 4-byte counter */
+ buf[j] = (u64) *offset;
+ j++;
+ continue;
+ }
+ /* 8-byte counter */
+ buf[j] = HILO_U64(*offset, *(offset + 1));
+ j++;
+ }
+ }
+}
+
+static int bnx2x_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ if (!bp->port.pmf)
+ return -EOPNOTSUPP;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 1; /* cycle on/off once per second */
+
+ case ETHTOOL_ID_ON:
+ bnx2x_set_led(&bp->link_params, &bp->link_vars,
+ LED_MODE_ON, SPEED_1000);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ bnx2x_set_led(&bp->link_params, &bp->link_vars,
+ LED_MODE_FRONT_PANEL_OFF, 0);
+
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ bnx2x_set_led(&bp->link_params, &bp->link_vars,
+ LED_MODE_OPER,
+ bp->link_vars.line_speed);
+ }
+
+ return 0;
+}
+
+static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rules __always_unused)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = BNX2X_NUM_ETH_QUEUES(bp);
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bnx2x_get_rxfh_indir(struct net_device *dev,
+ struct ethtool_rxfh_indir *indir)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ size_t copy_size =
+ min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE);
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
+ size_t i;
+
+ if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
+ return -EOPNOTSUPP;
+
+ /* Get the current configuration of the RSS indirection table */
+ bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
+
+ /*
+ * We can't use a memcpy() as an internal storage of an
+ * indirection table is a u8 array while indir->ring_index
+ * points to an array of u32.
+ *
+ * Indirection table contains the FW Client IDs, so we need to
+ * align the returned table to the Client ID of the leading RSS
+ * queue.
+ */
+ for (i = 0; i < copy_size; i++)
+ indir->ring_index[i] = ind_table[i] - bp->fp->cl_id;
+
+ indir->size = T_ETH_INDIRECTION_TABLE_SIZE;
+
+ return 0;
+}
+
+static int bnx2x_set_rxfh_indir(struct net_device *dev,
+ const struct ethtool_rxfh_indir *indir)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ size_t i;
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
+ u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
+
+ if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
+ return -EOPNOTSUPP;
+
+ /* validate the size */
+ if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE)
+ return -EINVAL;
+
+ for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
+ /* validate the indices */
+ if (indir->ring_index[i] >= num_eth_queues)
+ return -EINVAL;
+ /*
+ * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
+ * as an internal storage of an indirection table is a u8 array
+ * while indir->ring_index points to an array of u32.
+ *
+ * Indirection table contains the FW Client IDs, so we need to
+ * align the received table to the Client ID of the leading RSS
+ * queue
+ */
+ ind_table[i] = indir->ring_index[i] + bp->fp->cl_id;
+ }
+
+ return bnx2x_config_rss_pf(bp, ind_table, false);
+}
+
+static const struct ethtool_ops bnx2x_ethtool_ops = {
+ .get_settings = bnx2x_get_settings,
+ .set_settings = bnx2x_set_settings,
+ .get_drvinfo = bnx2x_get_drvinfo,
+ .get_regs_len = bnx2x_get_regs_len,
+ .get_regs = bnx2x_get_regs,
+ .get_wol = bnx2x_get_wol,
+ .set_wol = bnx2x_set_wol,
+ .get_msglevel = bnx2x_get_msglevel,
+ .set_msglevel = bnx2x_set_msglevel,
+ .nway_reset = bnx2x_nway_reset,
+ .get_link = bnx2x_get_link,
+ .get_eeprom_len = bnx2x_get_eeprom_len,
+ .get_eeprom = bnx2x_get_eeprom,
+ .set_eeprom = bnx2x_set_eeprom,
+ .get_coalesce = bnx2x_get_coalesce,
+ .set_coalesce = bnx2x_set_coalesce,
+ .get_ringparam = bnx2x_get_ringparam,
+ .set_ringparam = bnx2x_set_ringparam,
+ .get_pauseparam = bnx2x_get_pauseparam,
+ .set_pauseparam = bnx2x_set_pauseparam,
+ .self_test = bnx2x_self_test,
+ .get_sset_count = bnx2x_get_sset_count,
+ .get_strings = bnx2x_get_strings,
+ .set_phys_id = bnx2x_set_phys_id,
+ .get_ethtool_stats = bnx2x_get_ethtool_stats,
+ .get_rxnfc = bnx2x_get_rxnfc,
+ .get_rxfh_indir = bnx2x_get_rxfh_indir,
+ .set_rxfh_indir = bnx2x_set_rxfh_indir,
+};
+
+void bnx2x_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
new file mode 100644
index 000000000000..998652a1b858
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -0,0 +1,410 @@
+/* bnx2x_fw_defs.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNX2X_FW_DEFS_H
+#define BNX2X_FW_DEFS_H
+
+#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[148].base)
+#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+ (IRO[147].base + ((assertListEntry) * IRO[147].m1))
+#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
+ (IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \
+ IRO[153].m2))
+#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
+ (IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \
+ IRO[154].m2))
+#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
+ (IRO[159].base + ((funcId) * IRO[159].m1))
+#define CSTORM_FUNC_EN_OFFSET(funcId) \
+ (IRO[149].base + ((funcId) * IRO[149].m1))
+#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
+#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
+ (IRO[315].base + ((pfId) * IRO[315].m1))
+#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[316].base + ((pfId) * IRO[316].m1))
+#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
+ (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2))
+#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
+ (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
+#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
+ (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
+#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
+ (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
+#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
+ (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * IRO[307].m2))
+#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
+ (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
+#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
+ (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
+#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
+ (IRO[314].base + ((pfId) * IRO[314].m1))
+#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+ (IRO[306].base + ((pfId) * IRO[306].m1))
+#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+ (IRO[305].base + ((pfId) * IRO[305].m1))
+#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+ (IRO[304].base + ((pfId) * IRO[304].m1))
+#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+ (IRO[151].base + ((funcId) * IRO[151].m1))
+#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
+ (IRO[142].base + ((pfId) * IRO[142].m1))
+#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \
+ (IRO[143].base + ((pfId) * IRO[143].m1))
+#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
+ (IRO[141].base + ((pfId) * IRO[141].m1))
+#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size)
+#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
+ (IRO[144].base + ((pfId) * IRO[144].m1))
+#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size)
+#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \
+ (IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2))
+#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
+ (IRO[133].base + ((sbId) * IRO[133].m1))
+#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \
+ (IRO[134].base + ((sbId) * IRO[134].m1))
+#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \
+ (IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2))
+#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
+ (IRO[132].base + ((sbId) * IRO[132].m1))
+#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size)
+#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
+ (IRO[137].base + ((sbId) * IRO[137].m1))
+#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size)
+#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
+ (IRO[155].base + ((vfId) * IRO[155].m1))
+#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
+ (IRO[156].base + ((vfId) * IRO[156].m1))
+#define CSTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[150].base + ((funcId) * IRO[150].m1))
+#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
+#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
+ (IRO[203].base + ((pfId) * IRO[203].m1))
+#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
+#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+ (IRO[101].base + ((assertListEntry) * IRO[101].m1))
+#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[107].base)
+#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
+ (IRO[108].base)
+#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
+ (IRO[201].base + ((pfId) * IRO[201].m1))
+#define TSTORM_FUNC_EN_OFFSET(funcId) \
+ (IRO[103].base + ((funcId) * IRO[103].m1))
+#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
+ (IRO[271].base + ((pfId) * IRO[271].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
+ (IRO[272].base + ((pfId) * IRO[272].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
+ (IRO[273].base + ((pfId) * IRO[273].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
+ (IRO[274].base + ((pfId) * IRO[274].m1))
+#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+ (IRO[270].base + ((pfId) * IRO[270].m1))
+#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+ (IRO[269].base + ((pfId) * IRO[269].m1))
+#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+ (IRO[268].base + ((pfId) * IRO[268].m1))
+#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+ (IRO[267].base + ((pfId) * IRO[267].m1))
+#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
+ (IRO[276].base + ((pfId) * IRO[276].m1))
+#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
+ (IRO[263].base + ((pfId) * IRO[263].m1))
+#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+ (IRO[264].base + ((pfId) * IRO[264].m1))
+#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
+ (IRO[265].base + ((pfId) * IRO[265].m1))
+#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+ (IRO[266].base + ((pfId) * IRO[266].m1))
+#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
+ (IRO[202].base + ((pfId) * IRO[202].m1))
+#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+ (IRO[105].base + ((funcId) * IRO[105].m1))
+#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
+ (IRO[216].base + ((pfId) * IRO[216].m1))
+#define TSTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[104].base + ((funcId) * IRO[104].m1))
+#define USTORM_AGG_DATA_OFFSET (IRO[206].base)
+#define USTORM_AGG_DATA_SIZE (IRO[206].size)
+#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base)
+#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+ (IRO[176].base + ((assertListEntry) * IRO[176].m1))
+#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \
+ (IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \
+ IRO[205].m2))
+#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
+ (IRO[183].base + ((portId) * IRO[183].m1))
+#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
+ (IRO[317].base + ((pfId) * IRO[317].m1))
+#define USTORM_FUNC_EN_OFFSET(funcId) \
+ (IRO[178].base + ((funcId) * IRO[178].m1))
+#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
+ (IRO[281].base + ((pfId) * IRO[281].m1))
+#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[282].base + ((pfId) * IRO[282].m1))
+#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
+ (IRO[286].base + ((pfId) * IRO[286].m1))
+#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
+ (IRO[283].base + ((pfId) * IRO[283].m1))
+#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+ (IRO[279].base + ((pfId) * IRO[279].m1))
+#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+ (IRO[278].base + ((pfId) * IRO[278].m1))
+#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+ (IRO[277].base + ((pfId) * IRO[277].m1))
+#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
+ (IRO[280].base + ((pfId) * IRO[280].m1))
+#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
+ (IRO[284].base + ((pfId) * IRO[284].m1))
+#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+ (IRO[285].base + ((pfId) * IRO[285].m1))
+#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
+ (IRO[182].base + ((pfId) * IRO[182].m1))
+#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+ (IRO[180].base + ((funcId) * IRO[180].m1))
+#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
+ (IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \
+ IRO[209].m2))
+#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
+ (IRO[210].base + ((qzoneId) * IRO[210].m1))
+#define USTORM_TPA_BTR_OFFSET (IRO[207].base)
+#define USTORM_TPA_BTR_SIZE (IRO[207].size)
+#define USTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[179].base + ((funcId) * IRO[179].m1))
+#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
+#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base)
+#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base)
+#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+ (IRO[50].base + ((assertListEntry) * IRO[50].m1))
+#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \
+ (IRO[43].base + ((portId) * IRO[43].m1))
+#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \
+ (IRO[45].base + ((pfId) * IRO[45].m1))
+#define XSTORM_FUNC_EN_OFFSET(funcId) \
+ (IRO[47].base + ((funcId) * IRO[47].m1))
+#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
+ (IRO[294].base + ((pfId) * IRO[294].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
+ (IRO[297].base + ((pfId) * IRO[297].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
+ (IRO[298].base + ((pfId) * IRO[298].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
+ (IRO[299].base + ((pfId) * IRO[299].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
+ (IRO[300].base + ((pfId) * IRO[300].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
+ (IRO[301].base + ((pfId) * IRO[301].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
+ (IRO[302].base + ((pfId) * IRO[302].m1))
+#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+ (IRO[303].base + ((pfId) * IRO[303].m1))
+#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+ (IRO[293].base + ((pfId) * IRO[293].m1))
+#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+ (IRO[292].base + ((pfId) * IRO[292].m1))
+#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+ (IRO[291].base + ((pfId) * IRO[291].m1))
+#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
+ (IRO[296].base + ((pfId) * IRO[296].m1))
+#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
+ (IRO[295].base + ((pfId) * IRO[295].m1))
+#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
+ (IRO[290].base + ((pfId) * IRO[290].m1))
+#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
+ (IRO[289].base + ((pfId) * IRO[289].m1))
+#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
+ (IRO[288].base + ((pfId) * IRO[288].m1))
+#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
+ (IRO[287].base + ((pfId) * IRO[287].m1))
+#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
+ (IRO[44].base + ((pfId) * IRO[44].m1))
+#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+ (IRO[49].base + ((funcId) * IRO[49].m1))
+#define XSTORM_SPQ_DATA_OFFSET(funcId) \
+ (IRO[32].base + ((funcId) * IRO[32].m1))
+#define XSTORM_SPQ_DATA_SIZE (IRO[32].size)
+#define XSTORM_SPQ_PAGE_BASE_OFFSET(funcId) \
+ (IRO[30].base + ((funcId) * IRO[30].m1))
+#define XSTORM_SPQ_PROD_OFFSET(funcId) \
+ (IRO[31].base + ((funcId) * IRO[31].m1))
+#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
+ (IRO[211].base + ((portId) * IRO[211].m1))
+#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
+ (IRO[212].base + ((portId) * IRO[212].m1))
+#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
+ (IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \
+ IRO[214].m2))
+#define XSTORM_VF_TO_PF_OFFSET(funcId) \
+ (IRO[48].base + ((funcId) * IRO[48].m1))
+#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
+
+/**
+* This file defines HSI constants for the ETH flow
+*/
+#ifdef _EVEREST_MICROCODE
+#include "Microcode\Generated\DataTypes\eth_rx_bd.h"
+#include "Microcode\Generated\DataTypes\eth_tx_bd.h"
+#include "Microcode\Generated\DataTypes\eth_rx_cqe.h"
+#include "Microcode\Generated\DataTypes\eth_rx_sge.h"
+#include "Microcode\Generated\DataTypes\eth_rx_cqe_next_page.h"
+#endif
+
+
+/* Ethernet Ring parameters */
+#define X_ETH_LOCAL_RING_SIZE 13
+#define FIRST_BD_IN_PKT 0
+#define PARSE_BD_INDEX 1
+#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8))
+#define U_ETH_NUM_OF_SGES_TO_FETCH 8
+#define U_ETH_MAX_SGES_FOR_PACKET 3
+
+/* Rx ring params */
+#define U_ETH_LOCAL_BD_RING_SIZE 8
+#define U_ETH_LOCAL_SGE_RING_SIZE 10
+#define U_ETH_SGL_SIZE 8
+ /* The fw will padd the buffer with this value, so the IP header \
+ will be align to 4 Byte */
+#define IP_HEADER_ALIGNMENT_PADDING 2
+
+#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \
+ (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1))
+
+#define TU_ETH_CQES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8))
+#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))
+#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))
+
+#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1)
+#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1)
+#define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1)
+
+#define U_ETH_UNDEFINED_Q 0xFF
+
+#define T_ETH_INDIRECTION_TABLE_SIZE 128
+#define T_ETH_RSS_KEY 10
+#define ETH_NUM_OF_RSS_ENGINES_E2 72
+
+#define FILTER_RULES_COUNT 16
+#define MULTICAST_RULES_COUNT 16
+#define CLASSIFY_RULES_COUNT 16
+
+/*The CRC32 seed, that is used for the hash(reduction) multicast address */
+#define ETH_CRC32_HASH_SEED 0x00000000
+
+#define ETH_CRC32_HASH_BIT_SIZE (8)
+#define ETH_CRC32_HASH_MASK EVAL((1<<ETH_CRC32_HASH_BIT_SIZE)-1)
+
+/* Maximal L2 clients supported */
+#define ETH_MAX_RX_CLIENTS_E1 18
+#define ETH_MAX_RX_CLIENTS_E1H 28
+#define ETH_MAX_RX_CLIENTS_E2 152
+
+/* Maximal statistics client Ids */
+#define MAX_STAT_COUNTER_ID_E1 36
+#define MAX_STAT_COUNTER_ID_E1H 56
+#define MAX_STAT_COUNTER_ID_E2 140
+
+#define MAX_MAC_CREDIT_E1 192 /* Per Chip */
+#define MAX_MAC_CREDIT_E1H 256 /* Per Chip */
+#define MAX_MAC_CREDIT_E2 272 /* Per Path */
+#define MAX_VLAN_CREDIT_E1 0 /* Per Chip */
+#define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */
+#define MAX_VLAN_CREDIT_E2 272 /* Per Path */
+
+
+/* Maximal aggregation queues supported */
+#define ETH_MAX_AGGREGATION_QUEUES_E1 32
+#define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64
+
+
+#define ETH_NUM_OF_MCAST_BINS 256
+#define ETH_NUM_OF_MCAST_ENGINES_E2 72
+
+#define ETH_MIN_RX_CQES_WITHOUT_TPA (MAX_RAMRODS_PER_PORT + 3)
+#define ETH_MIN_RX_CQES_WITH_TPA_E1 \
+ (ETH_MAX_AGGREGATION_QUEUES_E1 + ETH_MIN_RX_CQES_WITHOUT_TPA)
+#define ETH_MIN_RX_CQES_WITH_TPA_E1H_E2 \
+ (ETH_MAX_AGGREGATION_QUEUES_E1H_E2 + ETH_MIN_RX_CQES_WITHOUT_TPA)
+
+#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0
+
+
+/**
+ * This file defines HSI constants common to all microcode flows
+ */
+
+#define PROTOCOL_STATE_BIT_OFFSET 6
+
+#define ETH_STATE (ETH_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
+#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
+#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
+
+/* microcode fixed page page size 4K (chains and ring segments) */
+#define MC_PAGE_SIZE 4096
+
+/* Number of indices per slow-path SB */
+#define HC_SP_SB_MAX_INDICES 16
+
+/* Number of indices per SB */
+#define HC_SB_MAX_INDICES_E1X 8
+#define HC_SB_MAX_INDICES_E2 8
+
+#define HC_SB_MAX_SB_E1X 32
+#define HC_SB_MAX_SB_E2 136
+
+#define HC_SP_SB_ID 0xde
+
+#define HC_SB_MAX_SM 2
+
+#define HC_SB_MAX_DYNAMIC_INDICES 4
+
+/* max number of slow path commands per port */
+#define MAX_RAMRODS_PER_PORT 8
+
+
+/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
+
+#define TIMERS_TICK_SIZE_CHIP (1e-3)
+
+#define TSEMI_CLK1_RESUL_CHIP (1e-3)
+
+#define XSEMI_CLK1_RESUL_CHIP (1e-3)
+
+#define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))
+
+/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
+
+#define XSTORM_IP_ID_ROLL_HALF 0x8000
+#define XSTORM_IP_ID_ROLL_ALL 0
+
+#define FW_LOG_LIST_SIZE 50
+
+#define NUM_OF_SAFC_BITS 16
+#define MAX_COS_NUMBER 4
+#define MAX_TRAFFIC_TYPES 8
+#define MAX_PFC_PRIORITIES 8
+
+ /* used by array traffic_type_to_priority[] to mark traffic type \
+ that is not mapped to priority*/
+#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
+
+
+#define C_ERES_PER_PAGE \
+ (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
+#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
+
+#define STATS_QUERY_CMD_COUNT 16
+
+#define NIV_LIST_TABLE_SIZE 4096
+
+#define INVALID_VNIC_ID 0xFF
+
+
+#define UNDEF_IRO 0x80000000
+
+
+#endif /* BNX2X_FW_DEFS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
new file mode 100644
index 000000000000..f4a07fbaed05
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -0,0 +1,38 @@
+/* bnx2x_fw_file_hdr.h: FW binary file header structure.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Vladislav Zolotarov <vladz@broadcom.com>
+ * Based on the original idea of John Wright <john.wright@hp.com>.
+ */
+
+#ifndef BNX2X_INIT_FILE_HDR_H
+#define BNX2X_INIT_FILE_HDR_H
+
+struct bnx2x_fw_file_section {
+ __be32 len;
+ __be32 offset;
+};
+
+struct bnx2x_fw_file_hdr {
+ struct bnx2x_fw_file_section init_ops;
+ struct bnx2x_fw_file_section init_ops_offsets;
+ struct bnx2x_fw_file_section init_data;
+ struct bnx2x_fw_file_section tsem_int_table_data;
+ struct bnx2x_fw_file_section tsem_pram_data;
+ struct bnx2x_fw_file_section usem_int_table_data;
+ struct bnx2x_fw_file_section usem_pram_data;
+ struct bnx2x_fw_file_section csem_int_table_data;
+ struct bnx2x_fw_file_section csem_pram_data;
+ struct bnx2x_fw_file_section xsem_int_table_data;
+ struct bnx2x_fw_file_section xsem_pram_data;
+ struct bnx2x_fw_file_section iro_arr;
+ struct bnx2x_fw_file_section fw_version;
+};
+
+#endif /* BNX2X_INIT_FILE_HDR_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
new file mode 100644
index 000000000000..e44b858ff12f
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -0,0 +1,5133 @@
+/* bnx2x_hsi.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+#ifndef BNX2X_HSI_H
+#define BNX2X_HSI_H
+
+#include "bnx2x_fw_defs.h"
+
+#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
+
+struct license_key {
+ u32 reserved[6];
+
+ u32 max_iscsi_conn;
+#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF
+#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT 0
+#define BNX2X_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000
+#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT 16
+
+ u32 reserved_a;
+
+ u32 max_fcoe_conn;
+#define BNX2X_MAX_FCOE_TRGT_CONN_MASK 0xFFFF
+#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT 0
+#define BNX2X_MAX_FCOE_INIT_CONN_MASK 0xFFFF0000
+#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT 16
+
+ u32 reserved_b[4];
+};
+
+
+#define PORT_0 0
+#define PORT_1 1
+#define PORT_MAX 2
+
+/****************************************************************************
+ * Shared HW configuration *
+ ****************************************************************************/
+#define PIN_CFG_NA 0x00000000
+#define PIN_CFG_GPIO0_P0 0x00000001
+#define PIN_CFG_GPIO1_P0 0x00000002
+#define PIN_CFG_GPIO2_P0 0x00000003
+#define PIN_CFG_GPIO3_P0 0x00000004
+#define PIN_CFG_GPIO0_P1 0x00000005
+#define PIN_CFG_GPIO1_P1 0x00000006
+#define PIN_CFG_GPIO2_P1 0x00000007
+#define PIN_CFG_GPIO3_P1 0x00000008
+#define PIN_CFG_EPIO0 0x00000009
+#define PIN_CFG_EPIO1 0x0000000a
+#define PIN_CFG_EPIO2 0x0000000b
+#define PIN_CFG_EPIO3 0x0000000c
+#define PIN_CFG_EPIO4 0x0000000d
+#define PIN_CFG_EPIO5 0x0000000e
+#define PIN_CFG_EPIO6 0x0000000f
+#define PIN_CFG_EPIO7 0x00000010
+#define PIN_CFG_EPIO8 0x00000011
+#define PIN_CFG_EPIO9 0x00000012
+#define PIN_CFG_EPIO10 0x00000013
+#define PIN_CFG_EPIO11 0x00000014
+#define PIN_CFG_EPIO12 0x00000015
+#define PIN_CFG_EPIO13 0x00000016
+#define PIN_CFG_EPIO14 0x00000017
+#define PIN_CFG_EPIO15 0x00000018
+#define PIN_CFG_EPIO16 0x00000019
+#define PIN_CFG_EPIO17 0x0000001a
+#define PIN_CFG_EPIO18 0x0000001b
+#define PIN_CFG_EPIO19 0x0000001c
+#define PIN_CFG_EPIO20 0x0000001d
+#define PIN_CFG_EPIO21 0x0000001e
+#define PIN_CFG_EPIO22 0x0000001f
+#define PIN_CFG_EPIO23 0x00000020
+#define PIN_CFG_EPIO24 0x00000021
+#define PIN_CFG_EPIO25 0x00000022
+#define PIN_CFG_EPIO26 0x00000023
+#define PIN_CFG_EPIO27 0x00000024
+#define PIN_CFG_EPIO28 0x00000025
+#define PIN_CFG_EPIO29 0x00000026
+#define PIN_CFG_EPIO30 0x00000027
+#define PIN_CFG_EPIO31 0x00000028
+
+/* EPIO definition */
+#define EPIO_CFG_NA 0x00000000
+#define EPIO_CFG_EPIO0 0x00000001
+#define EPIO_CFG_EPIO1 0x00000002
+#define EPIO_CFG_EPIO2 0x00000003
+#define EPIO_CFG_EPIO3 0x00000004
+#define EPIO_CFG_EPIO4 0x00000005
+#define EPIO_CFG_EPIO5 0x00000006
+#define EPIO_CFG_EPIO6 0x00000007
+#define EPIO_CFG_EPIO7 0x00000008
+#define EPIO_CFG_EPIO8 0x00000009
+#define EPIO_CFG_EPIO9 0x0000000a
+#define EPIO_CFG_EPIO10 0x0000000b
+#define EPIO_CFG_EPIO11 0x0000000c
+#define EPIO_CFG_EPIO12 0x0000000d
+#define EPIO_CFG_EPIO13 0x0000000e
+#define EPIO_CFG_EPIO14 0x0000000f
+#define EPIO_CFG_EPIO15 0x00000010
+#define EPIO_CFG_EPIO16 0x00000011
+#define EPIO_CFG_EPIO17 0x00000012
+#define EPIO_CFG_EPIO18 0x00000013
+#define EPIO_CFG_EPIO19 0x00000014
+#define EPIO_CFG_EPIO20 0x00000015
+#define EPIO_CFG_EPIO21 0x00000016
+#define EPIO_CFG_EPIO22 0x00000017
+#define EPIO_CFG_EPIO23 0x00000018
+#define EPIO_CFG_EPIO24 0x00000019
+#define EPIO_CFG_EPIO25 0x0000001a
+#define EPIO_CFG_EPIO26 0x0000001b
+#define EPIO_CFG_EPIO27 0x0000001c
+#define EPIO_CFG_EPIO28 0x0000001d
+#define EPIO_CFG_EPIO29 0x0000001e
+#define EPIO_CFG_EPIO30 0x0000001f
+#define EPIO_CFG_EPIO31 0x00000020
+
+
+struct shared_hw_cfg { /* NVRAM Offset */
+ /* Up to 16 bytes of NULL-terminated string */
+ u8 part_num[16]; /* 0x104 */
+
+ u32 config; /* 0x114 */
+ #define SHARED_HW_CFG_MDIO_VOLTAGE_MASK 0x00000001
+ #define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT 0
+ #define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V 0x00000000
+ #define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V 0x00000001
+ #define SHARED_HW_CFG_MCP_RST_ON_CORE_RST_EN 0x00000002
+
+ #define SHARED_HW_CFG_PORT_SWAP 0x00000004
+
+ #define SHARED_HW_CFG_BEACON_WOL_EN 0x00000008
+
+ #define SHARED_HW_CFG_PCIE_GEN3_DISABLED 0x00000000
+ #define SHARED_HW_CFG_PCIE_GEN3_ENABLED 0x00000010
+
+ #define SHARED_HW_CFG_MFW_SELECT_MASK 0x00000700
+ #define SHARED_HW_CFG_MFW_SELECT_SHIFT 8
+ /* Whatever MFW found in NVM
+ (if multiple found, priority order is: NC-SI, UMP, IPMI) */
+ #define SHARED_HW_CFG_MFW_SELECT_DEFAULT 0x00000000
+ #define SHARED_HW_CFG_MFW_SELECT_NC_SI 0x00000100
+ #define SHARED_HW_CFG_MFW_SELECT_UMP 0x00000200
+ #define SHARED_HW_CFG_MFW_SELECT_IPMI 0x00000300
+ /* Use SPIO4 as an arbiter between: 0-NC_SI, 1-IPMI
+ (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
+ #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI 0x00000400
+ /* Use SPIO4 as an arbiter between: 0-UMP, 1-IPMI
+ (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
+ #define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI 0x00000500
+ /* Use SPIO4 as an arbiter between: 0-NC-SI, 1-UMP
+ (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
+ #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP 0x00000600
+
+ #define SHARED_HW_CFG_LED_MODE_MASK 0x000f0000
+ #define SHARED_HW_CFG_LED_MODE_SHIFT 16
+ #define SHARED_HW_CFG_LED_MAC1 0x00000000
+ #define SHARED_HW_CFG_LED_PHY1 0x00010000
+ #define SHARED_HW_CFG_LED_PHY2 0x00020000
+ #define SHARED_HW_CFG_LED_PHY3 0x00030000
+ #define SHARED_HW_CFG_LED_MAC2 0x00040000
+ #define SHARED_HW_CFG_LED_PHY4 0x00050000
+ #define SHARED_HW_CFG_LED_PHY5 0x00060000
+ #define SHARED_HW_CFG_LED_PHY6 0x00070000
+ #define SHARED_HW_CFG_LED_MAC3 0x00080000
+ #define SHARED_HW_CFG_LED_PHY7 0x00090000
+ #define SHARED_HW_CFG_LED_PHY9 0x000a0000
+ #define SHARED_HW_CFG_LED_PHY11 0x000b0000
+ #define SHARED_HW_CFG_LED_MAC4 0x000c0000
+ #define SHARED_HW_CFG_LED_PHY8 0x000d0000
+ #define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000
+
+
+ #define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000
+ #define SHARED_HW_CFG_AN_ENABLE_SHIFT 24
+ #define SHARED_HW_CFG_AN_ENABLE_CL37 0x01000000
+ #define SHARED_HW_CFG_AN_ENABLE_CL73 0x02000000
+ #define SHARED_HW_CFG_AN_ENABLE_BAM 0x04000000
+ #define SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 0x08000000
+ #define SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT 0x10000000
+ #define SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY 0x20000000
+
+ #define SHARED_HW_CFG_SRIOV_MASK 0x40000000
+ #define SHARED_HW_CFG_SRIOV_DISABLED 0x00000000
+ #define SHARED_HW_CFG_SRIOV_ENABLED 0x40000000
+
+ #define SHARED_HW_CFG_ATC_MASK 0x80000000
+ #define SHARED_HW_CFG_ATC_DISABLED 0x00000000
+ #define SHARED_HW_CFG_ATC_ENABLED 0x80000000
+
+ u32 config2; /* 0x118 */
+ /* one time auto detect grace period (in sec) */
+ #define SHARED_HW_CFG_GRACE_PERIOD_MASK 0x000000ff
+ #define SHARED_HW_CFG_GRACE_PERIOD_SHIFT 0
+
+ #define SHARED_HW_CFG_PCIE_GEN2_ENABLED 0x00000100
+ #define SHARED_HW_CFG_PCIE_GEN2_DISABLED 0x00000000
+
+ /* The default value for the core clock is 250MHz and it is
+ achieved by setting the clock change to 4 */
+ #define SHARED_HW_CFG_CLOCK_CHANGE_MASK 0x00000e00
+ #define SHARED_HW_CFG_CLOCK_CHANGE_SHIFT 9
+
+ #define SHARED_HW_CFG_SMBUS_TIMING_MASK 0x00001000
+ #define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000
+ #define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000
+
+ #define SHARED_HW_CFG_HIDE_PORT1 0x00002000
+
+ #define SHARED_HW_CFG_WOL_CAPABLE_MASK 0x00004000
+ #define SHARED_HW_CFG_WOL_CAPABLE_DISABLED 0x00000000
+ #define SHARED_HW_CFG_WOL_CAPABLE_ENABLED 0x00004000
+
+ /* Output low when PERST is asserted */
+ #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_MASK 0x00008000
+ #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_DISABLED 0x00000000
+ #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_ENABLED 0x00008000
+
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_MASK 0x00070000
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_SHIFT 16
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_HW 0x00000000
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_0DB 0x00010000
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_3_5DB 0x00020000
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_6_0DB 0x00030000
+
+ /* The fan failure mechanism is usually related to the PHY type
+ since the power consumption of the board is determined by the PHY.
+ Currently, fan is required for most designs with SFX7101, BCM8727
+ and BCM8481. If a fan is not required for a board which uses one
+ of those PHYs, this field should be set to "Disabled". If a fan is
+ required for a different PHY type, this option should be set to
+ "Enabled". The fan failure indication is expected on SPIO5 */
+ #define SHARED_HW_CFG_FAN_FAILURE_MASK 0x00180000
+ #define SHARED_HW_CFG_FAN_FAILURE_SHIFT 19
+ #define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE 0x00000000
+ #define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000
+ #define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000
+
+ /* ASPM Power Management support */
+ #define SHARED_HW_CFG_ASPM_SUPPORT_MASK 0x00600000
+ #define SHARED_HW_CFG_ASPM_SUPPORT_SHIFT 21
+ #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_ENABLED 0x00000000
+ #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_DISABLED 0x00200000
+ #define SHARED_HW_CFG_ASPM_SUPPORT_L1_DISABLED 0x00400000
+ #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_DISABLED 0x00600000
+
+ /* The value of PM_TL_IGNORE_REQS (bit0) in PCI register
+ tl_control_0 (register 0x2800) */
+ #define SHARED_HW_CFG_PREVENT_L1_ENTRY_MASK 0x00800000
+ #define SHARED_HW_CFG_PREVENT_L1_ENTRY_DISABLED 0x00000000
+ #define SHARED_HW_CFG_PREVENT_L1_ENTRY_ENABLED 0x00800000
+
+ #define SHARED_HW_CFG_PORT_MODE_MASK 0x01000000
+ #define SHARED_HW_CFG_PORT_MODE_2 0x00000000
+ #define SHARED_HW_CFG_PORT_MODE_4 0x01000000
+
+ #define SHARED_HW_CFG_PATH_SWAP_MASK 0x02000000
+ #define SHARED_HW_CFG_PATH_SWAP_DISABLED 0x00000000
+ #define SHARED_HW_CFG_PATH_SWAP_ENABLED 0x02000000
+
+ /* Set the MDC/MDIO access for the first external phy */
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000
+
+ /* Set the MDC/MDIO access for the second external phy */
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000
+
+
+ u32 power_dissipated; /* 0x11c */
+ #define SHARED_HW_CFG_POWER_MGNT_SCALE_MASK 0x00ff0000
+ #define SHARED_HW_CFG_POWER_MGNT_SCALE_SHIFT 16
+ #define SHARED_HW_CFG_POWER_MGNT_UNKNOWN_SCALE 0x00000000
+ #define SHARED_HW_CFG_POWER_MGNT_DOT_1_WATT 0x00010000
+ #define SHARED_HW_CFG_POWER_MGNT_DOT_01_WATT 0x00020000
+ #define SHARED_HW_CFG_POWER_MGNT_DOT_001_WATT 0x00030000
+
+ #define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000
+ #define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24
+
+ u32 ump_nc_si_config; /* 0x120 */
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT 0
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC 0x00000000
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY 0x00000001
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII 0x00000000
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII 0x00000002
+
+ #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_MASK 0x00000f00
+ #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_SHIFT 8
+
+ #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_MASK 0x00ff0000
+ #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_SHIFT 16
+ #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_NONE 0x00000000
+ #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_BCM5221 0x00010000
+
+ u32 board; /* 0x124 */
+ #define SHARED_HW_CFG_E3_I2C_MUX0_MASK 0x0000003F
+ #define SHARED_HW_CFG_E3_I2C_MUX0_SHIFT 0
+ #define SHARED_HW_CFG_E3_I2C_MUX1_MASK 0x00000FC0
+ #define SHARED_HW_CFG_E3_I2C_MUX1_SHIFT 6
+ /* Use the PIN_CFG_XXX defines on top */
+ #define SHARED_HW_CFG_BOARD_REV_MASK 0x00ff0000
+ #define SHARED_HW_CFG_BOARD_REV_SHIFT 16
+
+ #define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK 0x0f000000
+ #define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT 24
+
+ #define SHARED_HW_CFG_BOARD_MINOR_VER_MASK 0xf0000000
+ #define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT 28
+
+ u32 wc_lane_config; /* 0x128 */
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_MASK 0x0000FFFF
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_SHIFT 0
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_32103210 0x00001b1b
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_32100123 0x00001be4
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_01233210 0x0000e41b
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_01230123 0x0000e4e4
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000FF
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000FF00
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8
+
+ /* TX lane Polarity swap */
+ #define SHARED_HW_CFG_TX_LANE0_POL_FLIP_ENABLED 0x00010000
+ #define SHARED_HW_CFG_TX_LANE1_POL_FLIP_ENABLED 0x00020000
+ #define SHARED_HW_CFG_TX_LANE2_POL_FLIP_ENABLED 0x00040000
+ #define SHARED_HW_CFG_TX_LANE3_POL_FLIP_ENABLED 0x00080000
+ /* TX lane Polarity swap */
+ #define SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED 0x00100000
+ #define SHARED_HW_CFG_RX_LANE1_POL_FLIP_ENABLED 0x00200000
+ #define SHARED_HW_CFG_RX_LANE2_POL_FLIP_ENABLED 0x00400000
+ #define SHARED_HW_CFG_RX_LANE3_POL_FLIP_ENABLED 0x00800000
+
+ /* Selects the port layout of the board */
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_MASK 0x0F000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_SHIFT 24
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_01 0x00000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_10 0x01000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_0123 0x02000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_1032 0x03000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_2301 0x04000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_3210 0x05000000
+};
+
+
+/****************************************************************************
+ * Port HW configuration *
+ ****************************************************************************/
+struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
+
+ u32 pci_id;
+ #define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xffff0000
+ #define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000ffff
+
+ u32 pci_sub_id;
+ #define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK 0xffff0000
+ #define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK 0x0000ffff
+
+ u32 power_dissipated;
+ #define PORT_HW_CFG_POWER_DIS_D0_MASK 0x000000ff
+ #define PORT_HW_CFG_POWER_DIS_D0_SHIFT 0
+ #define PORT_HW_CFG_POWER_DIS_D1_MASK 0x0000ff00
+ #define PORT_HW_CFG_POWER_DIS_D1_SHIFT 8
+ #define PORT_HW_CFG_POWER_DIS_D2_MASK 0x00ff0000
+ #define PORT_HW_CFG_POWER_DIS_D2_SHIFT 16
+ #define PORT_HW_CFG_POWER_DIS_D3_MASK 0xff000000
+ #define PORT_HW_CFG_POWER_DIS_D3_SHIFT 24
+
+ u32 power_consumed;
+ #define PORT_HW_CFG_POWER_CONS_D0_MASK 0x000000ff
+ #define PORT_HW_CFG_POWER_CONS_D0_SHIFT 0
+ #define PORT_HW_CFG_POWER_CONS_D1_MASK 0x0000ff00
+ #define PORT_HW_CFG_POWER_CONS_D1_SHIFT 8
+ #define PORT_HW_CFG_POWER_CONS_D2_MASK 0x00ff0000
+ #define PORT_HW_CFG_POWER_CONS_D2_SHIFT 16
+ #define PORT_HW_CFG_POWER_CONS_D3_MASK 0xff000000
+ #define PORT_HW_CFG_POWER_CONS_D3_SHIFT 24
+
+ u32 mac_upper;
+ #define PORT_HW_CFG_UPPERMAC_MASK 0x0000ffff
+ #define PORT_HW_CFG_UPPERMAC_SHIFT 0
+ u32 mac_lower;
+
+ u32 iscsi_mac_upper; /* Upper 16 bits are always zeroes */
+ u32 iscsi_mac_lower;
+
+ u32 rdma_mac_upper; /* Upper 16 bits are always zeroes */
+ u32 rdma_mac_lower;
+
+ u32 serdes_config;
+ #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000ffff
+ #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT 0
+
+ #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK 0xffff0000
+ #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16
+
+
+ /* Default values: 2P-64, 4P-32 */
+ u32 pf_config; /* 0x158 */
+ #define PORT_HW_CFG_PF_NUM_VF_MASK 0x0000007F
+ #define PORT_HW_CFG_PF_NUM_VF_SHIFT 0
+
+ /* Default values: 17 */
+ #define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_MASK 0x00007F00
+ #define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_SHIFT 8
+
+ #define PORT_HW_CFG_ENABLE_FLR_MASK 0x00010000
+ #define PORT_HW_CFG_FLR_ENABLED 0x00010000
+
+ u32 vf_config; /* 0x15C */
+ #define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_MASK 0x0000007F
+ #define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_SHIFT 0
+
+ #define PORT_HW_CFG_VF_PCI_DEVICE_ID_MASK 0xFFFF0000
+ #define PORT_HW_CFG_VF_PCI_DEVICE_ID_SHIFT 16
+
+ u32 mf_pci_id; /* 0x160 */
+ #define PORT_HW_CFG_MF_PCI_DEVICE_ID_MASK 0x0000FFFF
+ #define PORT_HW_CFG_MF_PCI_DEVICE_ID_SHIFT 0
+
+ /* Controls the TX laser of the SFP+ module */
+ u32 sfp_ctrl; /* 0x164 */
+ #define PORT_HW_CFG_TX_LASER_MASK 0x000000FF
+ #define PORT_HW_CFG_TX_LASER_SHIFT 0
+ #define PORT_HW_CFG_TX_LASER_MDIO 0x00000000
+ #define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001
+ #define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002
+ #define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003
+ #define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004
+
+ /* Controls the fault module LED of the SFP+ */
+ #define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00
+ #define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8
+ #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000
+ #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100
+ #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200
+ #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300
+ #define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400
+
+ /* The output pin TX_DIS that controls the TX laser of the SFP+
+ module. Use the PIN_CFG_XXX defines on top */
+ u32 e3_sfp_ctrl; /* 0x168 */
+ #define PORT_HW_CFG_E3_TX_LASER_MASK 0x000000FF
+ #define PORT_HW_CFG_E3_TX_LASER_SHIFT 0
+
+ /* The output pin for SFPP_TYPE which turns on the Fault module LED */
+ #define PORT_HW_CFG_E3_FAULT_MDL_LED_MASK 0x0000FF00
+ #define PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT 8
+
+ /* The input pin MOD_ABS that indicates whether SFP+ module is
+ present or not. Use the PIN_CFG_XXX defines on top */
+ #define PORT_HW_CFG_E3_MOD_ABS_MASK 0x00FF0000
+ #define PORT_HW_CFG_E3_MOD_ABS_SHIFT 16
+
+ /* The output pin PWRDIS_SFP_X which disable the power of the SFP+
+ module. Use the PIN_CFG_XXX defines on top */
+ #define PORT_HW_CFG_E3_PWR_DIS_MASK 0xFF000000
+ #define PORT_HW_CFG_E3_PWR_DIS_SHIFT 24
+
+ /*
+ * The input pin which signals module transmit fault. Use the
+ * PIN_CFG_XXX defines on top
+ */
+ u32 e3_cmn_pin_cfg; /* 0x16C */
+ #define PORT_HW_CFG_E3_TX_FAULT_MASK 0x000000FF
+ #define PORT_HW_CFG_E3_TX_FAULT_SHIFT 0
+
+ /* The output pin which reset the PHY. Use the PIN_CFG_XXX defines on
+ top */
+ #define PORT_HW_CFG_E3_PHY_RESET_MASK 0x0000FF00
+ #define PORT_HW_CFG_E3_PHY_RESET_SHIFT 8
+
+ /*
+ * The output pin which powers down the PHY. Use the PIN_CFG_XXX
+ * defines on top
+ */
+ #define PORT_HW_CFG_E3_PWR_DOWN_MASK 0x00FF0000
+ #define PORT_HW_CFG_E3_PWR_DOWN_SHIFT 16
+
+ /* The output pin values BSC_SEL which selects the I2C for this port
+ in the I2C Mux */
+ #define PORT_HW_CFG_E3_I2C_MUX0_MASK 0x01000000
+ #define PORT_HW_CFG_E3_I2C_MUX1_MASK 0x02000000
+
+
+ /*
+ * The input pin I_FAULT which indicate over-current has occurred.
+ * Use the PIN_CFG_XXX defines on top
+ */
+ u32 e3_cmn_pin_cfg1; /* 0x170 */
+ #define PORT_HW_CFG_E3_OVER_CURRENT_MASK 0x000000FF
+ #define PORT_HW_CFG_E3_OVER_CURRENT_SHIFT 0
+ u32 reserved0[7]; /* 0x174 */
+
+ u32 aeu_int_mask; /* 0x190 */
+
+ u32 media_type; /* 0x194 */
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK 0x000000FF
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT 0
+
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK 0x0000FF00
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT 8
+
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK 0x00FF0000
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT 16
+
+ /* 4 times 16 bits for all 4 lanes. In case external PHY is present
+ (not direct mode), those values will not take effect on the 4 XGXS
+ lanes. For some external PHYs (such as 8706 and 8726) the values
+ will be used to configure the external PHY in those cases, not
+ all 4 values are needed. */
+ u16 xgxs_config_rx[4]; /* 0x198 */
+ u16 xgxs_config_tx[4]; /* 0x1A0 */
+
+ /* For storing FCOE mac on shared memory */
+ u32 fcoe_fip_mac_upper;
+ #define PORT_HW_CFG_FCOE_UPPERMAC_MASK 0x0000ffff
+ #define PORT_HW_CFG_FCOE_UPPERMAC_SHIFT 0
+ u32 fcoe_fip_mac_lower;
+
+ u32 fcoe_wwn_port_name_upper;
+ u32 fcoe_wwn_port_name_lower;
+
+ u32 fcoe_wwn_node_name_upper;
+ u32 fcoe_wwn_node_name_lower;
+
+ u32 Reserved1[49]; /* 0x1C0 */
+
+ /* Enable RJ45 magjack pair swapping on 10GBase-T PHY (0=default),
+ 84833 only */
+ u32 xgbt_phy_cfg; /* 0x284 */
+ #define PORT_HW_CFG_RJ45_PAIR_SWAP_MASK 0x000000FF
+ #define PORT_HW_CFG_RJ45_PAIR_SWAP_SHIFT 0
+
+ u32 default_cfg; /* 0x288 */
+ #define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003
+ #define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0
+ #define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000
+ #define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001
+ #define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002
+ #define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003
+
+ #define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C
+ #define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2
+ #define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000
+ #define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004
+ #define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008
+ #define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c
+
+ #define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030
+ #define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4
+ #define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000
+ #define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010
+ #define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020
+ #define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030
+
+ #define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0
+ #define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6
+ #define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000
+ #define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040
+ #define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080
+ #define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0
+
+ /* When KR link is required to be set to force which is not
+ KR-compliant, this parameter determine what is the trigger for it.
+ When GPIO is selected, low input will force the speed. Currently
+ default speed is 1G. In the future, it may be widen to select the
+ forced speed in with another parameter. Note when force-1G is
+ enabled, it override option 56: Link Speed option. */
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900
+ /* Enable to determine with which GPIO to reset the external phy */
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000
+
+ /* Enable BAM on KR */
+ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
+ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
+ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
+ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
+
+ /* Enable Common Mode Sense */
+ #define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000
+ #define PORT_HW_CFG_ENABLE_CMS_SHIFT 21
+ #define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000
+ #define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000
+
+ /* Enable RJ45 magjack pair swapping on 10GBase-T PHY, 84833 only */
+ #define PORT_HW_CFG_RJ45_PR_SWP_MASK 0x00400000
+ #define PORT_HW_CFG_RJ45_PR_SWP_SHIFT 22
+ #define PORT_HW_CFG_RJ45_PR_SWP_DISABLED 0x00000000
+ #define PORT_HW_CFG_RJ45_PR_SWP_ENABLED 0x00400000
+
+ /* Determine the Serdes electrical interface */
+ #define PORT_HW_CFG_NET_SERDES_IF_MASK 0x0F000000
+ #define PORT_HW_CFG_NET_SERDES_IF_SHIFT 24
+ #define PORT_HW_CFG_NET_SERDES_IF_SGMII 0x00000000
+ #define PORT_HW_CFG_NET_SERDES_IF_XFI 0x01000000
+ #define PORT_HW_CFG_NET_SERDES_IF_SFI 0x02000000
+ #define PORT_HW_CFG_NET_SERDES_IF_KR 0x03000000
+ #define PORT_HW_CFG_NET_SERDES_IF_DXGXS 0x04000000
+ #define PORT_HW_CFG_NET_SERDES_IF_KR2 0x05000000
+
+
+ u32 speed_capability_mask2; /* 0x28C */
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3__ 0x00000002
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3___ 0x00000004
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G 0x00000020
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_20G 0x00000080
+
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0__ 0x00020000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0___ 0x00040000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G 0x00200000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_20G 0x00800000
+
+
+ /* In the case where two media types (e.g. copper and fiber) are
+ present and electrically active at the same time, PHY Selection
+ will determine which of the two PHYs will be designated as the
+ Active PHY and used for a connection to the network. */
+ u32 multi_phy_config; /* 0x290 */
+ #define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007
+ #define PORT_HW_CFG_PHY_SELECTION_SHIFT 0
+ #define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000
+ #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001
+ #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002
+ #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003
+ #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004
+
+ /* When enabled, all second phy nvram parameters will be swapped
+ with the first phy parameters */
+ #define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008
+ #define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3
+ #define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000
+ #define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008
+
+
+ /* Address of the second external phy */
+ u32 external_phy_config2; /* 0x294 */
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0
+
+ /* The second XGXS external PHY type */
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071 0x00000100
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072 0x00000200
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073 0x00000300
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705 0x00000400
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706 0x00000500
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726 0x00000600
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481 0x00000700
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727 0x00000900
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC 0x00000a00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823 0x00000b00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640 0x00000c00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833 0x00000d00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54618SE 0x00000e00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616 0x00001000
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00
+
+
+ /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as
+ 8706, 8726 and 8727) not all 4 values are needed. */
+ u16 xgxs_config2_rx[4]; /* 0x296 */
+ u16 xgxs_config2_tx[4]; /* 0x2A0 */
+
+ u32 lane_config;
+ #define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff
+ #define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0
+ /* AN and forced */
+ #define PORT_HW_CFG_LANE_SWAP_CFG_01230123 0x00001b1b
+ /* forced only */
+ #define PORT_HW_CFG_LANE_SWAP_CFG_01233210 0x00001be4
+ /* forced only */
+ #define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8
+ /* forced only */
+ #define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4
+ #define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff
+ #define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0
+ #define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00
+ #define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8
+ #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK 0x0000c000
+ #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT 14
+
+ /* Indicate whether to swap the external phy polarity */
+ #define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000
+ #define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000
+ #define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000
+
+
+ u32 external_phy_config;
+ #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000ff
+ #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT 0
+
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK 0x0000ff00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT 8
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT 0x00000000
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8071 0x00000100
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072 0x00000200
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073 0x00000300
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705 0x00000400
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706 0x00000500
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726 0x00000600
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481 0x00000700
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54640 0x00000c00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE 0x00000e00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616 0x00001000
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
+
+ #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK 0x00ff0000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT 16
+
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT 24
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT 0x00000000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482 0x01000000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD 0x02000000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN 0xff000000
+
+ u32 speed_capability_mask;
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK 0x0000ffff
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT 0
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL 0x00000001
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF 0x00000002
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF 0x00000004
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL 0x00000008
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G 0x00000010
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G 0x00000020
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G 0x00000040
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_20G 0x00000080
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED 0x0000f000
+
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK 0xffff0000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT 16
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL 0x00010000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF 0x00020000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF 0x00040000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL 0x00080000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G 0x00100000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G 0x00200000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G 0x00400000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_20G 0x00800000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED 0xf0000000
+
+ /* A place to hold the original MAC address as a backup */
+ u32 backup_mac_upper; /* 0x2B4 */
+ u32 backup_mac_lower; /* 0x2B8 */
+
+};
+
+
+/****************************************************************************
+ * Shared Feature configuration *
+ ****************************************************************************/
+struct shared_feat_cfg { /* NVRAM Offset */
+
+ u32 config; /* 0x450 */
+ #define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001
+
+ /* Use NVRAM values instead of HW default values */
+ #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_MASK \
+ 0x00000002
+ #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED \
+ 0x00000000
+ #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED \
+ 0x00000002
+
+ #define SHARED_FEAT_CFG_NCSI_ID_METHOD_MASK 0x00000008
+ #define SHARED_FEAT_CFG_NCSI_ID_METHOD_SPIO 0x00000000
+ #define SHARED_FEAT_CFG_NCSI_ID_METHOD_NVRAM 0x00000008
+
+ #define SHARED_FEAT_CFG_NCSI_ID_MASK 0x00000030
+ #define SHARED_FEAT_CFG_NCSI_ID_SHIFT 4
+
+ /* Override the OTP back to single function mode. When using GPIO,
+ high means only SF, 0 is according to CLP configuration */
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
+
+ /* The interval in seconds between sending LLDP packets. Set to zero
+ to disable the feature */
+ #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_MASK 0x00ff0000
+ #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_SHIFT 16
+
+ /* The assigned device type ID for LLDP usage */
+ #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_MASK 0xff000000
+ #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_SHIFT 24
+
+};
+
+
+/****************************************************************************
+ * Port Feature configuration *
+ ****************************************************************************/
+struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
+
+ u32 config;
+ #define PORT_FEATURE_BAR1_SIZE_MASK 0x0000000f
+ #define PORT_FEATURE_BAR1_SIZE_SHIFT 0
+ #define PORT_FEATURE_BAR1_SIZE_DISABLED 0x00000000
+ #define PORT_FEATURE_BAR1_SIZE_64K 0x00000001
+ #define PORT_FEATURE_BAR1_SIZE_128K 0x00000002
+ #define PORT_FEATURE_BAR1_SIZE_256K 0x00000003
+ #define PORT_FEATURE_BAR1_SIZE_512K 0x00000004
+ #define PORT_FEATURE_BAR1_SIZE_1M 0x00000005
+ #define PORT_FEATURE_BAR1_SIZE_2M 0x00000006
+ #define PORT_FEATURE_BAR1_SIZE_4M 0x00000007
+ #define PORT_FEATURE_BAR1_SIZE_8M 0x00000008
+ #define PORT_FEATURE_BAR1_SIZE_16M 0x00000009
+ #define PORT_FEATURE_BAR1_SIZE_32M 0x0000000a
+ #define PORT_FEATURE_BAR1_SIZE_64M 0x0000000b
+ #define PORT_FEATURE_BAR1_SIZE_128M 0x0000000c
+ #define PORT_FEATURE_BAR1_SIZE_256M 0x0000000d
+ #define PORT_FEATURE_BAR1_SIZE_512M 0x0000000e
+ #define PORT_FEATURE_BAR1_SIZE_1G 0x0000000f
+ #define PORT_FEATURE_BAR2_SIZE_MASK 0x000000f0
+ #define PORT_FEATURE_BAR2_SIZE_SHIFT 4
+ #define PORT_FEATURE_BAR2_SIZE_DISABLED 0x00000000
+ #define PORT_FEATURE_BAR2_SIZE_64K 0x00000010
+ #define PORT_FEATURE_BAR2_SIZE_128K 0x00000020
+ #define PORT_FEATURE_BAR2_SIZE_256K 0x00000030
+ #define PORT_FEATURE_BAR2_SIZE_512K 0x00000040
+ #define PORT_FEATURE_BAR2_SIZE_1M 0x00000050
+ #define PORT_FEATURE_BAR2_SIZE_2M 0x00000060
+ #define PORT_FEATURE_BAR2_SIZE_4M 0x00000070
+ #define PORT_FEATURE_BAR2_SIZE_8M 0x00000080
+ #define PORT_FEATURE_BAR2_SIZE_16M 0x00000090
+ #define PORT_FEATURE_BAR2_SIZE_32M 0x000000a0
+ #define PORT_FEATURE_BAR2_SIZE_64M 0x000000b0
+ #define PORT_FEATURE_BAR2_SIZE_128M 0x000000c0
+ #define PORT_FEATURE_BAR2_SIZE_256M 0x000000d0
+ #define PORT_FEATURE_BAR2_SIZE_512M 0x000000e0
+ #define PORT_FEATURE_BAR2_SIZE_1G 0x000000f0
+
+ #define PORT_FEAT_CFG_DCBX_MASK 0x00000100
+ #define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100
+
+ #define PORT_FEAT_CFG_AUTOGREEN_MASK 0x00000200
+ #define PORT_FEAT_CFG_AUTOGREEN_SHIFT 9
+ #define PORT_FEAT_CFG_AUTOGREEN_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_AUTOGREEN_ENABLED 0x00000200
+
+ #define PORT_FEATURE_EN_SIZE_MASK 0x0f000000
+ #define PORT_FEATURE_EN_SIZE_SHIFT 24
+ #define PORT_FEATURE_WOL_ENABLED 0x01000000
+ #define PORT_FEATURE_MBA_ENABLED 0x02000000
+ #define PORT_FEATURE_MFW_ENABLED 0x04000000
+
+ /* Advertise expansion ROM even if MBA is disabled */
+ #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_MASK 0x08000000
+ #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_ENABLED 0x08000000
+
+ /* Check the optic vendor via i2c against a list of approved modules
+ in a separate nvram image */
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK 0xe0000000
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT 29
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT \
+ 0x00000000
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER \
+ 0x20000000
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG 0x40000000
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN 0x60000000
+
+ u32 wol_config;
+ /* Default is used when driver sets to "auto" mode */
+ #define PORT_FEATURE_WOL_DEFAULT_MASK 0x00000003
+ #define PORT_FEATURE_WOL_DEFAULT_SHIFT 0
+ #define PORT_FEATURE_WOL_DEFAULT_DISABLE 0x00000000
+ #define PORT_FEATURE_WOL_DEFAULT_MAGIC 0x00000001
+ #define PORT_FEATURE_WOL_DEFAULT_ACPI 0x00000002
+ #define PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI 0x00000003
+ #define PORT_FEATURE_WOL_RES_PAUSE_CAP 0x00000004
+ #define PORT_FEATURE_WOL_RES_ASYM_PAUSE_CAP 0x00000008
+ #define PORT_FEATURE_WOL_ACPI_UPON_MGMT 0x00000010
+
+ u32 mba_config;
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x00000007
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT 0
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0x00000000
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 0x00000001
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 0x00000002
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB 0x00000003
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT 0x00000004
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE 0x00000007
+
+ #define PORT_FEATURE_MBA_BOOT_RETRY_MASK 0x00000038
+ #define PORT_FEATURE_MBA_BOOT_RETRY_SHIFT 3
+
+ #define PORT_FEATURE_MBA_RES_PAUSE_CAP 0x00000100
+ #define PORT_FEATURE_MBA_RES_ASYM_PAUSE_CAP 0x00000200
+ #define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x00000400
+ #define PORT_FEATURE_MBA_HOTKEY_MASK 0x00000800
+ #define PORT_FEATURE_MBA_HOTKEY_CTRL_S 0x00000000
+ #define PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x00000800
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0x000ff000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT 12
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0x00000000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x00001000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x00002000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x00003000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x00004000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x00005000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x00006000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x00007000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x00008000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0x00009000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0x0000a000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0x0000b000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0x0000c000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0x0000d000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0x0000e000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M 0x0000f000
+ #define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0x00f00000
+ #define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT 20
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x03000000
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT 24
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0x00000000
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x01000000
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x02000000
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x03000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3c000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_SHIFT 26
+ #define PORT_FEATURE_MBA_LINK_SPEED_AUTO 0x00000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_10HD 0x04000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_10FD 0x08000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_100HD 0x0c000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_100FD 0x10000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_1GBPS 0x14000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_2_5GBPS 0x18000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_CX4 0x1c000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_20GBPS 0x20000000
+ u32 bmc_config;
+ #define PORT_FEATURE_BMC_LINK_OVERRIDE_MASK 0x00000001
+ #define PORT_FEATURE_BMC_LINK_OVERRIDE_DEFAULT 0x00000000
+ #define PORT_FEATURE_BMC_LINK_OVERRIDE_EN 0x00000001
+
+ u32 mba_vlan_cfg;
+ #define PORT_FEATURE_MBA_VLAN_TAG_MASK 0x0000ffff
+ #define PORT_FEATURE_MBA_VLAN_TAG_SHIFT 0
+ #define PORT_FEATURE_MBA_VLAN_EN 0x00010000
+
+ u32 resource_cfg;
+ #define PORT_FEATURE_RESOURCE_CFG_VALID 0x00000001
+ #define PORT_FEATURE_RESOURCE_CFG_DIAG 0x00000002
+ #define PORT_FEATURE_RESOURCE_CFG_L2 0x00000004
+ #define PORT_FEATURE_RESOURCE_CFG_ISCSI 0x00000008
+ #define PORT_FEATURE_RESOURCE_CFG_RDMA 0x00000010
+
+ u32 smbus_config;
+ #define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe
+ #define PORT_FEATURE_SMBUS_ADDR_SHIFT 1
+
+ u32 vf_config;
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_MASK 0x0000000f
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_SHIFT 0
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_4K 0x00000001
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_8K 0x00000002
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_16K 0x00000003
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_32K 0x00000004
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_64K 0x00000005
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_128K 0x00000006
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_256K 0x00000007
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_512K 0x00000008
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_1M 0x00000009
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_2M 0x0000000a
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_4M 0x0000000b
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_8M 0x0000000c
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_16M 0x0000000d
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_32M 0x0000000e
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_64M 0x0000000f
+
+ u32 link_config; /* Used as HW defaults for the driver */
+ #define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000
+ #define PORT_FEATURE_CONNECTED_SWITCH_SHIFT 24
+ /* (forced) low speed switch (< 10G) */
+ #define PORT_FEATURE_CON_SWITCH_1G_SWITCH 0x00000000
+ /* (forced) high speed switch (>= 10G) */
+ #define PORT_FEATURE_CON_SWITCH_10G_SWITCH 0x01000000
+ #define PORT_FEATURE_CON_SWITCH_AUTO_DETECT 0x02000000
+ #define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT 0x03000000
+
+ #define PORT_FEATURE_LINK_SPEED_MASK 0x000f0000
+ #define PORT_FEATURE_LINK_SPEED_SHIFT 16
+ #define PORT_FEATURE_LINK_SPEED_AUTO 0x00000000
+ #define PORT_FEATURE_LINK_SPEED_10M_FULL 0x00010000
+ #define PORT_FEATURE_LINK_SPEED_10M_HALF 0x00020000
+ #define PORT_FEATURE_LINK_SPEED_100M_HALF 0x00030000
+ #define PORT_FEATURE_LINK_SPEED_100M_FULL 0x00040000
+ #define PORT_FEATURE_LINK_SPEED_1G 0x00050000
+ #define PORT_FEATURE_LINK_SPEED_2_5G 0x00060000
+ #define PORT_FEATURE_LINK_SPEED_10G_CX4 0x00070000
+ #define PORT_FEATURE_LINK_SPEED_20G 0x00080000
+
+ #define PORT_FEATURE_FLOW_CONTROL_MASK 0x00000700
+ #define PORT_FEATURE_FLOW_CONTROL_SHIFT 8
+ #define PORT_FEATURE_FLOW_CONTROL_AUTO 0x00000000
+ #define PORT_FEATURE_FLOW_CONTROL_TX 0x00000100
+ #define PORT_FEATURE_FLOW_CONTROL_RX 0x00000200
+ #define PORT_FEATURE_FLOW_CONTROL_BOTH 0x00000300
+ #define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400
+
+ /* The default for MCP link configuration,
+ uses the same defines as link_config */
+ u32 mfw_wol_link_cfg;
+
+ /* The default for the driver of the second external phy,
+ uses the same defines as link_config */
+ u32 link_config2; /* 0x47C */
+
+ /* The default for MCP of the second external phy,
+ uses the same defines as link_config */
+ u32 mfw_wol_link_cfg2; /* 0x480 */
+
+ u32 Reserved2[17]; /* 0x484 */
+
+};
+
+
+/****************************************************************************
+ * Device Information *
+ ****************************************************************************/
+struct shm_dev_info { /* size */
+
+ u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */
+
+ struct shared_hw_cfg shared_hw_config; /* 40 */
+
+ struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */
+
+ struct shared_feat_cfg shared_feature_config; /* 4 */
+
+ struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */
+
+};
+
+
+#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)
+ #error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition."
+#endif
+
+#define FUNC_0 0
+#define FUNC_1 1
+#define FUNC_2 2
+#define FUNC_3 3
+#define FUNC_4 4
+#define FUNC_5 5
+#define FUNC_6 6
+#define FUNC_7 7
+#define E1_FUNC_MAX 2
+#define E1H_FUNC_MAX 8
+#define E2_FUNC_MAX 4 /* per path */
+
+#define VN_0 0
+#define VN_1 1
+#define VN_2 2
+#define VN_3 3
+#define E1VN_MAX 1
+#define E1HVN_MAX 4
+
+#define E2_VF_MAX 64 /* HC_REG_VF_CONFIGURATION_SIZE */
+/* This value (in milliseconds) determines the frequency of the driver
+ * issuing the PULSE message code. The firmware monitors this periodic
+ * pulse to determine when to switch to an OS-absent mode. */
+#define DRV_PULSE_PERIOD_MS 250
+
+/* This value (in milliseconds) determines how long the driver should
+ * wait for an acknowledgement from the firmware before timing out. Once
+ * the firmware has timed out, the driver will assume there is no firmware
+ * running and there won't be any firmware-driver synchronization during a
+ * driver reset. */
+#define FW_ACK_TIME_OUT_MS 5000
+
+#define FW_ACK_POLL_TIME_MS 1
+
+#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS)
+
+/* LED Blink rate that will achieve ~15.9Hz */
+#define LED_BLINK_RATE_VAL 480
+
+/****************************************************************************
+ * Driver <-> FW Mailbox *
+ ****************************************************************************/
+struct drv_port_mb {
+
+ u32 link_status;
+ /* Driver should update this field on any link change event */
+
+ #define LINK_STATUS_LINK_FLAG_MASK 0x00000001
+ #define LINK_STATUS_LINK_UP 0x00000001
+ #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E
+ #define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_20GTFD (11<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_20GXFD (11<<1)
+
+ #define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020
+ #define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+
+ #define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+ #define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080
+ #define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+
+ #define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
+ #define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
+ #define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800
+ #define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000
+ #define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000
+ #define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000
+ #define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000
+
+ #define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000
+ #define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000
+
+ #define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000
+ #define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000
+
+ #define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
+ #define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18)
+ #define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18)
+ #define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18)
+ #define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18)
+
+ #define LINK_STATUS_SERDES_LINK 0x00100000
+
+ #define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000
+ #define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000
+ #define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000
+ #define LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE 0x10000000
+
+ #define LINK_STATUS_PFC_ENABLED 0x20000000
+
+ #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000
+
+ u32 port_stx;
+
+ u32 stat_nig_timer;
+
+ /* MCP firmware does not use this field */
+ u32 ext_phy_fw_version;
+
+};
+
+
+struct drv_func_mb {
+
+ u32 drv_mb_header;
+ #define DRV_MSG_CODE_MASK 0xffff0000
+ #define DRV_MSG_CODE_LOAD_REQ 0x10000000
+ #define DRV_MSG_CODE_LOAD_DONE 0x11000000
+ #define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000
+ #define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000
+ #define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000
+ #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
+ #define DRV_MSG_CODE_DCC_OK 0x30000000
+ #define DRV_MSG_CODE_DCC_FAILURE 0x31000000
+ #define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000
+ #define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000
+ #define DRV_MSG_CODE_VALIDATE_KEY 0x70000000
+ #define DRV_MSG_CODE_GET_CURR_KEY 0x80000000
+ #define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000
+ #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000
+ #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000
+ /*
+ * The optic module verification command requires bootcode
+ * v5.0.6 or later, te specific optic module verification command
+ * requires bootcode v5.2.12 or later
+ */
+ #define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000
+ #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006
+ #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
+ #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
+ #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
+
+ #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
+ #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
+
+ #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
+
+ #define DRV_MSG_CODE_SET_MF_BW 0xe0000000
+ #define REQ_BC_VER_4_SET_MF_BW 0x00060202
+ #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
+
+ #define DRV_MSG_CODE_LINK_STATUS_CHANGED 0x01000000
+
+ #define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
+ #define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
+ #define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000
+ #define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000
+
+ #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ u32 drv_mb_param;
+ #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000
+ #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000
+
+ u32 fw_mb_header;
+ #define FW_MSG_CODE_MASK 0xffff0000
+ #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
+ #define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
+ #define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
+ /* Load common chip is supported from bc 6.0.0 */
+ #define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000
+ #define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000
+
+ #define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000
+ #define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
+ #define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000
+ #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000
+ #define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000
+ #define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
+ #define FW_MSG_CODE_DCC_DONE 0x30100000
+ #define FW_MSG_CODE_LLDP_DONE 0x40100000
+ #define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000
+ #define FW_MSG_CODE_DIAG_REFUSE 0x50200000
+ #define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000
+ #define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000
+ #define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000
+ #define FW_MSG_CODE_GET_KEY_DONE 0x80100000
+ #define FW_MSG_CODE_NO_KEY 0x80f00000
+ #define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000
+ #define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000
+ #define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000
+ #define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000
+ #define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000
+ #define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000
+ #define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS 0xa0100000
+ #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000
+ #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000
+ #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
+
+ #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
+ #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
+
+ #define FW_MSG_CODE_LINK_CHANGED_ACK 0x01100000
+
+ #define FW_MSG_CODE_LIC_CHALLENGE 0xff010000
+ #define FW_MSG_CODE_LIC_RESPONSE 0xff020000
+ #define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000
+ #define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000
+
+ #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ u32 fw_mb_param;
+
+ u32 drv_pulse_mb;
+ #define DRV_PULSE_SEQ_MASK 0x00007fff
+ #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
+ /*
+ * The system time is in the format of
+ * (year-2001)*12*32 + month*32 + day.
+ */
+ #define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+ /*
+ * Indicate to the firmware not to go into the
+ * OS-absent when it is not getting driver pulse.
+ * This is used for debugging as well for PXE(MBA).
+ */
+
+ u32 mcp_pulse_mb;
+ #define MCP_PULSE_SEQ_MASK 0x00007fff
+ #define MCP_PULSE_ALWAYS_ALIVE 0x00008000
+ /* Indicates to the driver not to assert due to lack
+ * of MCP response */
+ #define MCP_EVENT_MASK 0xffff0000
+ #define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
+
+ u32 iscsi_boot_signature;
+ u32 iscsi_boot_block_offset;
+
+ u32 drv_status;
+ #define DRV_STATUS_PMF 0x00000001
+ #define DRV_STATUS_VF_DISABLED 0x00000002
+ #define DRV_STATUS_SET_MF_BW 0x00000004
+ #define DRV_STATUS_LINK_EVENT 0x00000008
+
+ #define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00
+ #define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100
+ #define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200
+ #define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS 0x00000400
+ #define DRV_STATUS_DCC_RESERVED1 0x00000800
+ #define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000
+ #define DRV_STATUS_DCC_SET_PRIORITY 0x00002000
+
+ #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000
+ #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000
+
+ u32 virt_mac_upper;
+ #define VIRT_MAC_SIGN_MASK 0xffff0000
+ #define VIRT_MAC_SIGNATURE 0x564d0000
+ u32 virt_mac_lower;
+
+};
+
+
+/****************************************************************************
+ * Management firmware state *
+ ****************************************************************************/
+/* Allocate 440 bytes for management firmware */
+#define MGMTFW_STATE_WORD_SIZE 110
+
+struct mgmtfw_state {
+ u32 opaque[MGMTFW_STATE_WORD_SIZE];
+};
+
+
+/****************************************************************************
+ * Multi-Function configuration *
+ ****************************************************************************/
+struct shared_mf_cfg {
+
+ u32 clp_mb;
+ #define SHARED_MF_CLP_SET_DEFAULT 0x00000000
+ /* set by CLP */
+ #define SHARED_MF_CLP_EXIT 0x00000001
+ /* set by MCP */
+ #define SHARED_MF_CLP_EXIT_DONE 0x00010000
+
+};
+
+struct port_mf_cfg {
+
+ u32 dynamic_cfg; /* device control channel */
+ #define PORT_MF_CFG_E1HOV_TAG_MASK 0x0000ffff
+ #define PORT_MF_CFG_E1HOV_TAG_SHIFT 0
+ #define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK
+
+ u32 reserved[3];
+
+};
+
+struct func_mf_cfg {
+
+ u32 config;
+ /* E/R/I/D */
+ /* function 0 of each port cannot be hidden */
+ #define FUNC_MF_CFG_FUNC_HIDE 0x00000001
+
+ #define FUNC_MF_CFG_PROTOCOL_MASK 0x00000006
+ #define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000000
+ #define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002
+ #define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004
+ #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006
+ #define FUNC_MF_CFG_PROTOCOL_DEFAULT \
+ FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA
+
+ #define FUNC_MF_CFG_FUNC_DISABLED 0x00000008
+ #define FUNC_MF_CFG_FUNC_DELETED 0x00000010
+
+ /* PRI */
+ /* 0 - low priority, 3 - high priority */
+ #define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300
+ #define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8
+ #define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000
+
+ /* MINBW, MAXBW */
+ /* value range - 0..100, increments in 100Mbps */
+ #define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000
+ #define FUNC_MF_CFG_MIN_BW_SHIFT 16
+ #define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
+ #define FUNC_MF_CFG_MAX_BW_MASK 0xff000000
+ #define FUNC_MF_CFG_MAX_BW_SHIFT 24
+ #define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000
+
+ u32 mac_upper; /* MAC */
+ #define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
+ #define FUNC_MF_CFG_UPPERMAC_SHIFT 0
+ #define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
+ u32 mac_lower;
+ #define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
+
+ u32 e1hov_tag; /* VNI */
+ #define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff
+ #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0
+ #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK
+
+ u32 reserved[2];
+};
+
+/* This structure is not applicable and should not be accessed on 57711 */
+struct func_ext_cfg {
+ u32 func_cfg;
+ #define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF
+ #define MACP_FUNC_CFG_FLAGS_SHIFT 0
+ #define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001
+ #define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002
+ #define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004
+ #define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008
+
+ u32 iscsi_mac_addr_upper;
+ u32 iscsi_mac_addr_lower;
+
+ u32 fcoe_mac_addr_upper;
+ u32 fcoe_mac_addr_lower;
+
+ u32 fcoe_wwn_port_name_upper;
+ u32 fcoe_wwn_port_name_lower;
+
+ u32 fcoe_wwn_node_name_upper;
+ u32 fcoe_wwn_node_name_lower;
+
+ u32 preserve_data;
+ #define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0)
+ #define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1)
+ #define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2)
+ #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3)
+ #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4)
+ #define MF_FUNC_CFG_PRESERVE_TX_BW (1<<5)
+};
+
+struct mf_cfg {
+
+ struct shared_mf_cfg shared_mf_config; /* 0x4 */
+ struct port_mf_cfg port_mf_config[PORT_MAX]; /* 0x10 * 2 = 0x20 */
+ /* for all chips, there are 8 mf functions */
+ struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */
+ /*
+ * Extended configuration per function - this array does not exist and
+ * should not be accessed on 57711
+ */
+ struct func_ext_cfg func_ext_config[E1H_FUNC_MAX]; /* 0x28 * 8 = 0x140*/
+}; /* 0x224 */
+
+/****************************************************************************
+ * Shared Memory Region *
+ ****************************************************************************/
+struct shmem_region { /* SharedMem Offset (size) */
+
+ u32 validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */
+ #define SHR_MEM_FORMAT_REV_MASK 0xff000000
+ #define SHR_MEM_FORMAT_REV_ID ('A'<<24)
+ /* validity bits */
+ #define SHR_MEM_VALIDITY_PCI_CFG 0x00100000
+ #define SHR_MEM_VALIDITY_MB 0x00200000
+ #define SHR_MEM_VALIDITY_DEV_INFO 0x00400000
+ #define SHR_MEM_VALIDITY_RESERVED 0x00000007
+ /* One licensing bit should be set */
+ #define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038
+ #define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
+ #define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
+ #define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
+ /* Active MFW */
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
+
+ struct shm_dev_info dev_info; /* 0x8 (0x438) */
+
+ struct license_key drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */
+
+ /* FW information (for internal FW use) */
+ u32 fw_info_fio_offset; /* 0x4a8 (0x4) */
+ struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */
+
+ struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */
+
+#ifdef BMAPI
+ /* This is a variable length array */
+ /* the number of function depends on the chip type */
+ struct drv_func_mb func_mb[1]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */
+#else
+ /* the number of function depends on the chip type */
+ struct drv_func_mb func_mb[]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */
+#endif /* BMAPI */
+
+}; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */
+
+/****************************************************************************
+ * Shared Memory 2 Region *
+ ****************************************************************************/
+/* The fw_flr_ack is actually built in the following way: */
+/* 8 bit: PF ack */
+/* 64 bit: VF ack */
+/* 8 bit: ios_dis_ack */
+/* In order to maintain endianity in the mailbox hsi, we want to keep using */
+/* u32. The fw must have the VF right after the PF since this is how it */
+/* access arrays(it expects always the VF to reside after the PF, and that */
+/* makes the calculation much easier for it. ) */
+/* In order to answer both limitations, and keep the struct small, the code */
+/* will abuse the structure defined here to achieve the actual partition */
+/* above */
+/****************************************************************************/
+struct fw_flr_ack {
+ u32 pf_ack;
+ u32 vf_ack[1];
+ u32 iov_dis_ack;
+};
+
+struct fw_flr_mb {
+ u32 aggint;
+ u32 opgen_addr;
+ struct fw_flr_ack ack;
+};
+
+/**** SUPPORT FOR SHMEM ARRRAYS ***
+ * The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to
+ * define arrays with storage types smaller then unsigned dwords.
+ * The macros below add generic support for SHMEM arrays with numeric elements
+ * that can span 2,4,8 or 16 bits. The array underlying type is a 32 bit dword
+ * array with individual bit-filed elements accessed using shifts and masks.
+ *
+ */
+
+/* eb is the bitwidth of a single element */
+#define SHMEM_ARRAY_MASK(eb) ((1<<(eb))-1)
+#define SHMEM_ARRAY_ENTRY(i, eb) ((i)/(32/(eb)))
+
+/* the bit-position macro allows the used to flip the order of the arrays
+ * elements on a per byte or word boundary.
+ *
+ * example: an array with 8 entries each 4 bit wide. This array will fit into
+ * a single dword. The diagrmas below show the array order of the nibbles.
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering:
+ *
+ * | | | |
+ * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+ * | | | |
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte:
+ *
+ * | | | |
+ * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 |
+ * | | | |
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word:
+ *
+ * | | | |
+ * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 |
+ * | | | |
+ */
+#define SHMEM_ARRAY_BITPOS(i, eb, fb) \
+ ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \
+ (((i)%((fb)/(eb))) * (eb)))
+
+#define SHMEM_ARRAY_GET(a, i, eb, fb) \
+ ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \
+ SHMEM_ARRAY_MASK(eb))
+
+#define SHMEM_ARRAY_SET(a, i, eb, fb, val) \
+do { \
+ a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \
+ SHMEM_ARRAY_BITPOS(i, eb, fb)); \
+ a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \
+ SHMEM_ARRAY_BITPOS(i, eb, fb)); \
+} while (0)
+
+
+/****START OF DCBX STRUCTURES DECLARATIONS****/
+#define DCBX_MAX_NUM_PRI_PG_ENTRIES 8
+#define DCBX_PRI_PG_BITWIDTH 4
+#define DCBX_PRI_PG_FBITS 8
+#define DCBX_PRI_PG_GET(a, i) \
+ SHMEM_ARRAY_GET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS)
+#define DCBX_PRI_PG_SET(a, i, val) \
+ SHMEM_ARRAY_SET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS, val)
+#define DCBX_MAX_NUM_PG_BW_ENTRIES 8
+#define DCBX_BW_PG_BITWIDTH 8
+#define DCBX_PG_BW_GET(a, i) \
+ SHMEM_ARRAY_GET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH)
+#define DCBX_PG_BW_SET(a, i, val) \
+ SHMEM_ARRAY_SET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH, val)
+#define DCBX_STRICT_PRI_PG 15
+#define DCBX_MAX_APP_PROTOCOL 16
+#define FCOE_APP_IDX 0
+#define ISCSI_APP_IDX 1
+#define PREDEFINED_APP_IDX_MAX 2
+
+
+/* Big/Little endian have the same representation. */
+struct dcbx_ets_feature {
+ /*
+ * For Admin MIB - is this feature supported by the
+ * driver | For Local MIB - should this feature be enabled.
+ */
+ u32 enabled;
+ u32 pg_bw_tbl[2];
+ u32 pri_pg_tbl[1];
+};
+
+/* Driver structure in LE */
+struct dcbx_pfc_feature {
+#ifdef __BIG_ENDIAN
+ u8 pri_en_bitmap;
+ #define DCBX_PFC_PRI_0 0x01
+ #define DCBX_PFC_PRI_1 0x02
+ #define DCBX_PFC_PRI_2 0x04
+ #define DCBX_PFC_PRI_3 0x08
+ #define DCBX_PFC_PRI_4 0x10
+ #define DCBX_PFC_PRI_5 0x20
+ #define DCBX_PFC_PRI_6 0x40
+ #define DCBX_PFC_PRI_7 0x80
+ u8 pfc_caps;
+ u8 reserved;
+ u8 enabled;
+#elif defined(__LITTLE_ENDIAN)
+ u8 enabled;
+ u8 reserved;
+ u8 pfc_caps;
+ u8 pri_en_bitmap;
+ #define DCBX_PFC_PRI_0 0x01
+ #define DCBX_PFC_PRI_1 0x02
+ #define DCBX_PFC_PRI_2 0x04
+ #define DCBX_PFC_PRI_3 0x08
+ #define DCBX_PFC_PRI_4 0x10
+ #define DCBX_PFC_PRI_5 0x20
+ #define DCBX_PFC_PRI_6 0x40
+ #define DCBX_PFC_PRI_7 0x80
+#endif
+};
+
+struct dcbx_app_priority_entry {
+#ifdef __BIG_ENDIAN
+ u16 app_id;
+ u8 pri_bitmap;
+ u8 appBitfield;
+ #define DCBX_APP_ENTRY_VALID 0x01
+ #define DCBX_APP_ENTRY_SF_MASK 0x30
+ #define DCBX_APP_ENTRY_SF_SHIFT 4
+ #define DCBX_APP_SF_ETH_TYPE 0x10
+ #define DCBX_APP_SF_PORT 0x20
+#elif defined(__LITTLE_ENDIAN)
+ u8 appBitfield;
+ #define DCBX_APP_ENTRY_VALID 0x01
+ #define DCBX_APP_ENTRY_SF_MASK 0x30
+ #define DCBX_APP_ENTRY_SF_SHIFT 4
+ #define DCBX_APP_SF_ETH_TYPE 0x10
+ #define DCBX_APP_SF_PORT 0x20
+ u8 pri_bitmap;
+ u16 app_id;
+#endif
+};
+
+
+/* FW structure in BE */
+struct dcbx_app_priority_feature {
+#ifdef __BIG_ENDIAN
+ u8 reserved;
+ u8 default_pri;
+ u8 tc_supported;
+ u8 enabled;
+#elif defined(__LITTLE_ENDIAN)
+ u8 enabled;
+ u8 tc_supported;
+ u8 default_pri;
+ u8 reserved;
+#endif
+ struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+/* FW structure in BE */
+struct dcbx_features {
+ /* PG feature */
+ struct dcbx_ets_feature ets;
+ /* PFC feature */
+ struct dcbx_pfc_feature pfc;
+ /* APP feature */
+ struct dcbx_app_priority_feature app;
+};
+
+/* LLDP protocol parameters */
+/* FW structure in BE */
+struct lldp_params {
+#ifdef __BIG_ENDIAN
+ u8 msg_fast_tx_interval;
+ u8 msg_tx_hold;
+ u8 msg_tx_interval;
+ u8 admin_status;
+ #define LLDP_TX_ONLY 0x01
+ #define LLDP_RX_ONLY 0x02
+ #define LLDP_TX_RX 0x03
+ #define LLDP_DISABLED 0x04
+ u8 reserved1;
+ u8 tx_fast;
+ u8 tx_crd_max;
+ u8 tx_crd;
+#elif defined(__LITTLE_ENDIAN)
+ u8 admin_status;
+ #define LLDP_TX_ONLY 0x01
+ #define LLDP_RX_ONLY 0x02
+ #define LLDP_TX_RX 0x03
+ #define LLDP_DISABLED 0x04
+ u8 msg_tx_interval;
+ u8 msg_tx_hold;
+ u8 msg_fast_tx_interval;
+ u8 tx_crd;
+ u8 tx_crd_max;
+ u8 tx_fast;
+ u8 reserved1;
+#endif
+ #define REM_CHASSIS_ID_STAT_LEN 4
+ #define REM_PORT_ID_STAT_LEN 4
+ /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
+ u32 peer_chassis_id[REM_CHASSIS_ID_STAT_LEN];
+ /* Holds remote Port ID TLV header, subtype and 9B of payload. */
+ u32 peer_port_id[REM_PORT_ID_STAT_LEN];
+};
+
+struct lldp_dcbx_stat {
+ #define LOCAL_CHASSIS_ID_STAT_LEN 2
+ #define LOCAL_PORT_ID_STAT_LEN 2
+ /* Holds local Chassis ID 8B payload of constant subtype 4. */
+ u32 local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN];
+ /* Holds local Port ID 8B payload of constant subtype 3. */
+ u32 local_port_id[LOCAL_PORT_ID_STAT_LEN];
+ /* Number of DCBX frames transmitted. */
+ u32 num_tx_dcbx_pkts;
+ /* Number of DCBX frames received. */
+ u32 num_rx_dcbx_pkts;
+};
+
+/* ADMIN MIB - DCBX local machine default configuration. */
+struct lldp_admin_mib {
+ u32 ver_cfg_flags;
+ #define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001
+ #define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002
+ #define DCBX_APP_CONFIG_TX_ENABLED 0x00000004
+ #define DCBX_ETS_RECO_TX_ENABLED 0x00000008
+ #define DCBX_ETS_RECO_VALID 0x00000010
+ #define DCBX_ETS_WILLING 0x00000020
+ #define DCBX_PFC_WILLING 0x00000040
+ #define DCBX_APP_WILLING 0x00000080
+ #define DCBX_VERSION_CEE 0x00000100
+ #define DCBX_VERSION_IEEE 0x00000200
+ #define DCBX_DCBX_ENABLED 0x00000400
+ #define DCBX_CEE_VERSION_MASK 0x0000f000
+ #define DCBX_CEE_VERSION_SHIFT 12
+ #define DCBX_CEE_MAX_VERSION_MASK 0x000f0000
+ #define DCBX_CEE_MAX_VERSION_SHIFT 16
+ struct dcbx_features features;
+};
+
+/* REMOTE MIB - remote machine DCBX configuration. */
+struct lldp_remote_mib {
+ u32 prefix_seq_num;
+ u32 flags;
+ #define DCBX_ETS_TLV_RX 0x00000001
+ #define DCBX_PFC_TLV_RX 0x00000002
+ #define DCBX_APP_TLV_RX 0x00000004
+ #define DCBX_ETS_RX_ERROR 0x00000010
+ #define DCBX_PFC_RX_ERROR 0x00000020
+ #define DCBX_APP_RX_ERROR 0x00000040
+ #define DCBX_ETS_REM_WILLING 0x00000100
+ #define DCBX_PFC_REM_WILLING 0x00000200
+ #define DCBX_APP_REM_WILLING 0x00000400
+ #define DCBX_REMOTE_ETS_RECO_VALID 0x00001000
+ #define DCBX_REMOTE_MIB_VALID 0x00002000
+ struct dcbx_features features;
+ u32 suffix_seq_num;
+};
+
+/* LOCAL MIB - operational DCBX configuration - transmitted on Tx LLDPDU. */
+struct lldp_local_mib {
+ u32 prefix_seq_num;
+ /* Indicates if there is mismatch with negotiation results. */
+ u32 error;
+ #define DCBX_LOCAL_ETS_ERROR 0x00000001
+ #define DCBX_LOCAL_PFC_ERROR 0x00000002
+ #define DCBX_LOCAL_APP_ERROR 0x00000004
+ #define DCBX_LOCAL_PFC_MISMATCH 0x00000010
+ #define DCBX_LOCAL_APP_MISMATCH 0x00000020
+ #define DCBX_REMOTE_MIB_ERROR 0x00000040
+ struct dcbx_features features;
+ u32 suffix_seq_num;
+};
+/***END OF DCBX STRUCTURES DECLARATIONS***/
+
+struct ncsi_oem_fcoe_features {
+ u32 fcoe_features1;
+ #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF
+ #define FCOE_FEATURES1_IOS_PER_CONNECTION_OFFSET 0
+
+ #define FCOE_FEATURES1_LOGINS_PER_PORT_MASK 0xFFFF0000
+ #define FCOE_FEATURES1_LOGINS_PER_PORT_OFFSET 16
+
+ u32 fcoe_features2;
+ #define FCOE_FEATURES2_EXCHANGES_MASK 0x0000FFFF
+ #define FCOE_FEATURES2_EXCHANGES_OFFSET 0
+
+ #define FCOE_FEATURES2_NPIV_WWN_PER_PORT_MASK 0xFFFF0000
+ #define FCOE_FEATURES2_NPIV_WWN_PER_PORT_OFFSET 16
+
+ u32 fcoe_features3;
+ #define FCOE_FEATURES3_TARGETS_SUPPORTED_MASK 0x0000FFFF
+ #define FCOE_FEATURES3_TARGETS_SUPPORTED_OFFSET 0
+
+ #define FCOE_FEATURES3_OUTSTANDING_COMMANDS_MASK 0xFFFF0000
+ #define FCOE_FEATURES3_OUTSTANDING_COMMANDS_OFFSET 16
+
+ u32 fcoe_features4;
+ #define FCOE_FEATURES4_FEATURE_SETTINGS_MASK 0x0000000F
+ #define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET 0
+};
+
+struct ncsi_oem_data {
+ u32 driver_version[4];
+ struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features;
+};
+
+struct shmem2_region {
+
+ u32 size; /* 0x0000 */
+
+ u32 dcc_support; /* 0x0004 */
+ #define SHMEM_DCC_SUPPORT_NONE 0x00000000
+ #define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV 0x00000001
+ #define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV 0x00000004
+ #define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV 0x00000008
+ #define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040
+ #define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080
+
+ u32 ext_phy_fw_version2[PORT_MAX]; /* 0x0008 */
+ /*
+ * For backwards compatibility, if the mf_cfg_addr does not exist
+ * (the size filed is smaller than 0xc) the mf_cfg resides at the
+ * end of struct shmem_region
+ */
+ u32 mf_cfg_addr; /* 0x0010 */
+ #define SHMEM_MF_CFG_ADDR_NONE 0x00000000
+
+ struct fw_flr_mb flr_mb; /* 0x0014 */
+ u32 dcbx_lldp_params_offset; /* 0x0028 */
+ #define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000
+ u32 dcbx_neg_res_offset; /* 0x002c */
+ #define SHMEM_DCBX_NEG_RES_NONE 0x00000000
+ u32 dcbx_remote_mib_offset; /* 0x0030 */
+ #define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000
+ /*
+ * The other shmemX_base_addr holds the other path's shmem address
+ * required for example in case of common phy init, or for path1 to know
+ * the address of mcp debug trace which is located in offset from shmem
+ * of path0
+ */
+ u32 other_shmem_base_addr; /* 0x0034 */
+ u32 other_shmem2_base_addr; /* 0x0038 */
+ /*
+ * mcp_vf_disabled is set by the MCP to indicate the driver about VFs
+ * which were disabled/flred
+ */
+ u32 mcp_vf_disabled[E2_VF_MAX / 32]; /* 0x003c */
+
+ /*
+ * drv_ack_vf_disabled is set by the PF driver to ack handled disabled
+ * VFs
+ */
+ u32 drv_ack_vf_disabled[E2_FUNC_MAX][E2_VF_MAX / 32]; /* 0x0044 */
+
+ u32 dcbx_lldp_dcbx_stat_offset; /* 0x0064 */
+ #define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000
+
+ /*
+ * edebug_driver_if field is used to transfer messages between edebug
+ * app to the driver through shmem2.
+ *
+ * message format:
+ * bits 0-2 - function number / instance of driver to perform request
+ * bits 3-5 - op code / is_ack?
+ * bits 6-63 - data
+ */
+ u32 edebug_driver_if[2]; /* 0x0068 */
+ #define EDEBUG_DRIVER_IF_OP_CODE_GET_PHYS_ADDR 1
+ #define EDEBUG_DRIVER_IF_OP_CODE_GET_BUS_ADDR 2
+ #define EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT 3
+
+ u32 nvm_retain_bitmap_addr; /* 0x0070 */
+
+ u32 reserved1; /* 0x0074 */
+
+ u32 reserved2[E2_FUNC_MAX];
+
+ u32 reserved3[E2_FUNC_MAX];/* 0x0088 */
+ u32 reserved4[E2_FUNC_MAX];/* 0x0098 */
+
+ u32 swim_base_addr; /* 0x0108 */
+ u32 swim_funcs;
+ u32 swim_main_cb;
+
+ u32 reserved5[2];
+
+ /* generic flags controlled by the driver */
+ u32 drv_flags;
+ #define DRV_FLAGS_DCB_CONFIGURED 0x1
+
+ /* pointer to extended dev_info shared data copied from nvm image */
+ u32 extended_dev_info_shared_addr;
+ u32 ncsi_oem_data_addr;
+
+ u32 ocsd_host_addr;
+ u32 ocbb_host_addr;
+ u32 ocsd_req_update_interval;
+};
+
+
+struct emac_stats {
+ u32 rx_stat_ifhcinoctets;
+ u32 rx_stat_ifhcinbadoctets;
+ u32 rx_stat_etherstatsfragments;
+ u32 rx_stat_ifhcinucastpkts;
+ u32 rx_stat_ifhcinmulticastpkts;
+ u32 rx_stat_ifhcinbroadcastpkts;
+ u32 rx_stat_dot3statsfcserrors;
+ u32 rx_stat_dot3statsalignmenterrors;
+ u32 rx_stat_dot3statscarriersenseerrors;
+ u32 rx_stat_xonpauseframesreceived;
+ u32 rx_stat_xoffpauseframesreceived;
+ u32 rx_stat_maccontrolframesreceived;
+ u32 rx_stat_xoffstateentered;
+ u32 rx_stat_dot3statsframestoolong;
+ u32 rx_stat_etherstatsjabbers;
+ u32 rx_stat_etherstatsundersizepkts;
+ u32 rx_stat_etherstatspkts64octets;
+ u32 rx_stat_etherstatspkts65octetsto127octets;
+ u32 rx_stat_etherstatspkts128octetsto255octets;
+ u32 rx_stat_etherstatspkts256octetsto511octets;
+ u32 rx_stat_etherstatspkts512octetsto1023octets;
+ u32 rx_stat_etherstatspkts1024octetsto1522octets;
+ u32 rx_stat_etherstatspktsover1522octets;
+
+ u32 rx_stat_falsecarriererrors;
+
+ u32 tx_stat_ifhcoutoctets;
+ u32 tx_stat_ifhcoutbadoctets;
+ u32 tx_stat_etherstatscollisions;
+ u32 tx_stat_outxonsent;
+ u32 tx_stat_outxoffsent;
+ u32 tx_stat_flowcontroldone;
+ u32 tx_stat_dot3statssinglecollisionframes;
+ u32 tx_stat_dot3statsmultiplecollisionframes;
+ u32 tx_stat_dot3statsdeferredtransmissions;
+ u32 tx_stat_dot3statsexcessivecollisions;
+ u32 tx_stat_dot3statslatecollisions;
+ u32 tx_stat_ifhcoutucastpkts;
+ u32 tx_stat_ifhcoutmulticastpkts;
+ u32 tx_stat_ifhcoutbroadcastpkts;
+ u32 tx_stat_etherstatspkts64octets;
+ u32 tx_stat_etherstatspkts65octetsto127octets;
+ u32 tx_stat_etherstatspkts128octetsto255octets;
+ u32 tx_stat_etherstatspkts256octetsto511octets;
+ u32 tx_stat_etherstatspkts512octetsto1023octets;
+ u32 tx_stat_etherstatspkts1024octetsto1522octets;
+ u32 tx_stat_etherstatspktsover1522octets;
+ u32 tx_stat_dot3statsinternalmactransmiterrors;
+};
+
+
+struct bmac1_stats {
+ u32 tx_stat_gtpkt_lo;
+ u32 tx_stat_gtpkt_hi;
+ u32 tx_stat_gtxpf_lo;
+ u32 tx_stat_gtxpf_hi;
+ u32 tx_stat_gtfcs_lo;
+ u32 tx_stat_gtfcs_hi;
+ u32 tx_stat_gtmca_lo;
+ u32 tx_stat_gtmca_hi;
+ u32 tx_stat_gtbca_lo;
+ u32 tx_stat_gtbca_hi;
+ u32 tx_stat_gtfrg_lo;
+ u32 tx_stat_gtfrg_hi;
+ u32 tx_stat_gtovr_lo;
+ u32 tx_stat_gtovr_hi;
+ u32 tx_stat_gt64_lo;
+ u32 tx_stat_gt64_hi;
+ u32 tx_stat_gt127_lo;
+ u32 tx_stat_gt127_hi;
+ u32 tx_stat_gt255_lo;
+ u32 tx_stat_gt255_hi;
+ u32 tx_stat_gt511_lo;
+ u32 tx_stat_gt511_hi;
+ u32 tx_stat_gt1023_lo;
+ u32 tx_stat_gt1023_hi;
+ u32 tx_stat_gt1518_lo;
+ u32 tx_stat_gt1518_hi;
+ u32 tx_stat_gt2047_lo;
+ u32 tx_stat_gt2047_hi;
+ u32 tx_stat_gt4095_lo;
+ u32 tx_stat_gt4095_hi;
+ u32 tx_stat_gt9216_lo;
+ u32 tx_stat_gt9216_hi;
+ u32 tx_stat_gt16383_lo;
+ u32 tx_stat_gt16383_hi;
+ u32 tx_stat_gtmax_lo;
+ u32 tx_stat_gtmax_hi;
+ u32 tx_stat_gtufl_lo;
+ u32 tx_stat_gtufl_hi;
+ u32 tx_stat_gterr_lo;
+ u32 tx_stat_gterr_hi;
+ u32 tx_stat_gtbyt_lo;
+ u32 tx_stat_gtbyt_hi;
+
+ u32 rx_stat_gr64_lo;
+ u32 rx_stat_gr64_hi;
+ u32 rx_stat_gr127_lo;
+ u32 rx_stat_gr127_hi;
+ u32 rx_stat_gr255_lo;
+ u32 rx_stat_gr255_hi;
+ u32 rx_stat_gr511_lo;
+ u32 rx_stat_gr511_hi;
+ u32 rx_stat_gr1023_lo;
+ u32 rx_stat_gr1023_hi;
+ u32 rx_stat_gr1518_lo;
+ u32 rx_stat_gr1518_hi;
+ u32 rx_stat_gr2047_lo;
+ u32 rx_stat_gr2047_hi;
+ u32 rx_stat_gr4095_lo;
+ u32 rx_stat_gr4095_hi;
+ u32 rx_stat_gr9216_lo;
+ u32 rx_stat_gr9216_hi;
+ u32 rx_stat_gr16383_lo;
+ u32 rx_stat_gr16383_hi;
+ u32 rx_stat_grmax_lo;
+ u32 rx_stat_grmax_hi;
+ u32 rx_stat_grpkt_lo;
+ u32 rx_stat_grpkt_hi;
+ u32 rx_stat_grfcs_lo;
+ u32 rx_stat_grfcs_hi;
+ u32 rx_stat_grmca_lo;
+ u32 rx_stat_grmca_hi;
+ u32 rx_stat_grbca_lo;
+ u32 rx_stat_grbca_hi;
+ u32 rx_stat_grxcf_lo;
+ u32 rx_stat_grxcf_hi;
+ u32 rx_stat_grxpf_lo;
+ u32 rx_stat_grxpf_hi;
+ u32 rx_stat_grxuo_lo;
+ u32 rx_stat_grxuo_hi;
+ u32 rx_stat_grjbr_lo;
+ u32 rx_stat_grjbr_hi;
+ u32 rx_stat_grovr_lo;
+ u32 rx_stat_grovr_hi;
+ u32 rx_stat_grflr_lo;
+ u32 rx_stat_grflr_hi;
+ u32 rx_stat_grmeg_lo;
+ u32 rx_stat_grmeg_hi;
+ u32 rx_stat_grmeb_lo;
+ u32 rx_stat_grmeb_hi;
+ u32 rx_stat_grbyt_lo;
+ u32 rx_stat_grbyt_hi;
+ u32 rx_stat_grund_lo;
+ u32 rx_stat_grund_hi;
+ u32 rx_stat_grfrg_lo;
+ u32 rx_stat_grfrg_hi;
+ u32 rx_stat_grerb_lo;
+ u32 rx_stat_grerb_hi;
+ u32 rx_stat_grfre_lo;
+ u32 rx_stat_grfre_hi;
+ u32 rx_stat_gripj_lo;
+ u32 rx_stat_gripj_hi;
+};
+
+struct bmac2_stats {
+ u32 tx_stat_gtpk_lo; /* gtpok */
+ u32 tx_stat_gtpk_hi; /* gtpok */
+ u32 tx_stat_gtxpf_lo; /* gtpf */
+ u32 tx_stat_gtxpf_hi; /* gtpf */
+ u32 tx_stat_gtpp_lo; /* NEW BMAC2 */
+ u32 tx_stat_gtpp_hi; /* NEW BMAC2 */
+ u32 tx_stat_gtfcs_lo;
+ u32 tx_stat_gtfcs_hi;
+ u32 tx_stat_gtuca_lo; /* NEW BMAC2 */
+ u32 tx_stat_gtuca_hi; /* NEW BMAC2 */
+ u32 tx_stat_gtmca_lo;
+ u32 tx_stat_gtmca_hi;
+ u32 tx_stat_gtbca_lo;
+ u32 tx_stat_gtbca_hi;
+ u32 tx_stat_gtovr_lo;
+ u32 tx_stat_gtovr_hi;
+ u32 tx_stat_gtfrg_lo;
+ u32 tx_stat_gtfrg_hi;
+ u32 tx_stat_gtpkt1_lo; /* gtpkt */
+ u32 tx_stat_gtpkt1_hi; /* gtpkt */
+ u32 tx_stat_gt64_lo;
+ u32 tx_stat_gt64_hi;
+ u32 tx_stat_gt127_lo;
+ u32 tx_stat_gt127_hi;
+ u32 tx_stat_gt255_lo;
+ u32 tx_stat_gt255_hi;
+ u32 tx_stat_gt511_lo;
+ u32 tx_stat_gt511_hi;
+ u32 tx_stat_gt1023_lo;
+ u32 tx_stat_gt1023_hi;
+ u32 tx_stat_gt1518_lo;
+ u32 tx_stat_gt1518_hi;
+ u32 tx_stat_gt2047_lo;
+ u32 tx_stat_gt2047_hi;
+ u32 tx_stat_gt4095_lo;
+ u32 tx_stat_gt4095_hi;
+ u32 tx_stat_gt9216_lo;
+ u32 tx_stat_gt9216_hi;
+ u32 tx_stat_gt16383_lo;
+ u32 tx_stat_gt16383_hi;
+ u32 tx_stat_gtmax_lo;
+ u32 tx_stat_gtmax_hi;
+ u32 tx_stat_gtufl_lo;
+ u32 tx_stat_gtufl_hi;
+ u32 tx_stat_gterr_lo;
+ u32 tx_stat_gterr_hi;
+ u32 tx_stat_gtbyt_lo;
+ u32 tx_stat_gtbyt_hi;
+
+ u32 rx_stat_gr64_lo;
+ u32 rx_stat_gr64_hi;
+ u32 rx_stat_gr127_lo;
+ u32 rx_stat_gr127_hi;
+ u32 rx_stat_gr255_lo;
+ u32 rx_stat_gr255_hi;
+ u32 rx_stat_gr511_lo;
+ u32 rx_stat_gr511_hi;
+ u32 rx_stat_gr1023_lo;
+ u32 rx_stat_gr1023_hi;
+ u32 rx_stat_gr1518_lo;
+ u32 rx_stat_gr1518_hi;
+ u32 rx_stat_gr2047_lo;
+ u32 rx_stat_gr2047_hi;
+ u32 rx_stat_gr4095_lo;
+ u32 rx_stat_gr4095_hi;
+ u32 rx_stat_gr9216_lo;
+ u32 rx_stat_gr9216_hi;
+ u32 rx_stat_gr16383_lo;
+ u32 rx_stat_gr16383_hi;
+ u32 rx_stat_grmax_lo;
+ u32 rx_stat_grmax_hi;
+ u32 rx_stat_grpkt_lo;
+ u32 rx_stat_grpkt_hi;
+ u32 rx_stat_grfcs_lo;
+ u32 rx_stat_grfcs_hi;
+ u32 rx_stat_gruca_lo;
+ u32 rx_stat_gruca_hi;
+ u32 rx_stat_grmca_lo;
+ u32 rx_stat_grmca_hi;
+ u32 rx_stat_grbca_lo;
+ u32 rx_stat_grbca_hi;
+ u32 rx_stat_grxpf_lo; /* grpf */
+ u32 rx_stat_grxpf_hi; /* grpf */
+ u32 rx_stat_grpp_lo;
+ u32 rx_stat_grpp_hi;
+ u32 rx_stat_grxuo_lo; /* gruo */
+ u32 rx_stat_grxuo_hi; /* gruo */
+ u32 rx_stat_grjbr_lo;
+ u32 rx_stat_grjbr_hi;
+ u32 rx_stat_grovr_lo;
+ u32 rx_stat_grovr_hi;
+ u32 rx_stat_grxcf_lo; /* grcf */
+ u32 rx_stat_grxcf_hi; /* grcf */
+ u32 rx_stat_grflr_lo;
+ u32 rx_stat_grflr_hi;
+ u32 rx_stat_grpok_lo;
+ u32 rx_stat_grpok_hi;
+ u32 rx_stat_grmeg_lo;
+ u32 rx_stat_grmeg_hi;
+ u32 rx_stat_grmeb_lo;
+ u32 rx_stat_grmeb_hi;
+ u32 rx_stat_grbyt_lo;
+ u32 rx_stat_grbyt_hi;
+ u32 rx_stat_grund_lo;
+ u32 rx_stat_grund_hi;
+ u32 rx_stat_grfrg_lo;
+ u32 rx_stat_grfrg_hi;
+ u32 rx_stat_grerb_lo; /* grerrbyt */
+ u32 rx_stat_grerb_hi; /* grerrbyt */
+ u32 rx_stat_grfre_lo; /* grfrerr */
+ u32 rx_stat_grfre_hi; /* grfrerr */
+ u32 rx_stat_gripj_lo;
+ u32 rx_stat_gripj_hi;
+};
+
+struct mstat_stats {
+ struct {
+ /* OTE MSTAT on E3 has a bug where this register's contents are
+ * actually tx_gtxpok + tx_gtxpf + (possibly)tx_gtxpp
+ */
+ u32 tx_gtxpok_lo;
+ u32 tx_gtxpok_hi;
+ u32 tx_gtxpf_lo;
+ u32 tx_gtxpf_hi;
+ u32 tx_gtxpp_lo;
+ u32 tx_gtxpp_hi;
+ u32 tx_gtfcs_lo;
+ u32 tx_gtfcs_hi;
+ u32 tx_gtuca_lo;
+ u32 tx_gtuca_hi;
+ u32 tx_gtmca_lo;
+ u32 tx_gtmca_hi;
+ u32 tx_gtgca_lo;
+ u32 tx_gtgca_hi;
+ u32 tx_gtpkt_lo;
+ u32 tx_gtpkt_hi;
+ u32 tx_gt64_lo;
+ u32 tx_gt64_hi;
+ u32 tx_gt127_lo;
+ u32 tx_gt127_hi;
+ u32 tx_gt255_lo;
+ u32 tx_gt255_hi;
+ u32 tx_gt511_lo;
+ u32 tx_gt511_hi;
+ u32 tx_gt1023_lo;
+ u32 tx_gt1023_hi;
+ u32 tx_gt1518_lo;
+ u32 tx_gt1518_hi;
+ u32 tx_gt2047_lo;
+ u32 tx_gt2047_hi;
+ u32 tx_gt4095_lo;
+ u32 tx_gt4095_hi;
+ u32 tx_gt9216_lo;
+ u32 tx_gt9216_hi;
+ u32 tx_gt16383_lo;
+ u32 tx_gt16383_hi;
+ u32 tx_gtufl_lo;
+ u32 tx_gtufl_hi;
+ u32 tx_gterr_lo;
+ u32 tx_gterr_hi;
+ u32 tx_gtbyt_lo;
+ u32 tx_gtbyt_hi;
+ u32 tx_collisions_lo;
+ u32 tx_collisions_hi;
+ u32 tx_singlecollision_lo;
+ u32 tx_singlecollision_hi;
+ u32 tx_multiplecollisions_lo;
+ u32 tx_multiplecollisions_hi;
+ u32 tx_deferred_lo;
+ u32 tx_deferred_hi;
+ u32 tx_excessivecollisions_lo;
+ u32 tx_excessivecollisions_hi;
+ u32 tx_latecollisions_lo;
+ u32 tx_latecollisions_hi;
+ } stats_tx;
+
+ struct {
+ u32 rx_gr64_lo;
+ u32 rx_gr64_hi;
+ u32 rx_gr127_lo;
+ u32 rx_gr127_hi;
+ u32 rx_gr255_lo;
+ u32 rx_gr255_hi;
+ u32 rx_gr511_lo;
+ u32 rx_gr511_hi;
+ u32 rx_gr1023_lo;
+ u32 rx_gr1023_hi;
+ u32 rx_gr1518_lo;
+ u32 rx_gr1518_hi;
+ u32 rx_gr2047_lo;
+ u32 rx_gr2047_hi;
+ u32 rx_gr4095_lo;
+ u32 rx_gr4095_hi;
+ u32 rx_gr9216_lo;
+ u32 rx_gr9216_hi;
+ u32 rx_gr16383_lo;
+ u32 rx_gr16383_hi;
+ u32 rx_grpkt_lo;
+ u32 rx_grpkt_hi;
+ u32 rx_grfcs_lo;
+ u32 rx_grfcs_hi;
+ u32 rx_gruca_lo;
+ u32 rx_gruca_hi;
+ u32 rx_grmca_lo;
+ u32 rx_grmca_hi;
+ u32 rx_grbca_lo;
+ u32 rx_grbca_hi;
+ u32 rx_grxpf_lo;
+ u32 rx_grxpf_hi;
+ u32 rx_grxpp_lo;
+ u32 rx_grxpp_hi;
+ u32 rx_grxuo_lo;
+ u32 rx_grxuo_hi;
+ u32 rx_grovr_lo;
+ u32 rx_grovr_hi;
+ u32 rx_grxcf_lo;
+ u32 rx_grxcf_hi;
+ u32 rx_grflr_lo;
+ u32 rx_grflr_hi;
+ u32 rx_grpok_lo;
+ u32 rx_grpok_hi;
+ u32 rx_grbyt_lo;
+ u32 rx_grbyt_hi;
+ u32 rx_grund_lo;
+ u32 rx_grund_hi;
+ u32 rx_grfrg_lo;
+ u32 rx_grfrg_hi;
+ u32 rx_grerb_lo;
+ u32 rx_grerb_hi;
+ u32 rx_grfre_lo;
+ u32 rx_grfre_hi;
+
+ u32 rx_alignmenterrors_lo;
+ u32 rx_alignmenterrors_hi;
+ u32 rx_falsecarrier_lo;
+ u32 rx_falsecarrier_hi;
+ u32 rx_llfcmsgcnt_lo;
+ u32 rx_llfcmsgcnt_hi;
+ } stats_rx;
+};
+
+union mac_stats {
+ struct emac_stats emac_stats;
+ struct bmac1_stats bmac1_stats;
+ struct bmac2_stats bmac2_stats;
+ struct mstat_stats mstat_stats;
+};
+
+
+struct mac_stx {
+ /* in_bad_octets */
+ u32 rx_stat_ifhcinbadoctets_hi;
+ u32 rx_stat_ifhcinbadoctets_lo;
+
+ /* out_bad_octets */
+ u32 tx_stat_ifhcoutbadoctets_hi;
+ u32 tx_stat_ifhcoutbadoctets_lo;
+
+ /* crc_receive_errors */
+ u32 rx_stat_dot3statsfcserrors_hi;
+ u32 rx_stat_dot3statsfcserrors_lo;
+ /* alignment_errors */
+ u32 rx_stat_dot3statsalignmenterrors_hi;
+ u32 rx_stat_dot3statsalignmenterrors_lo;
+ /* carrier_sense_errors */
+ u32 rx_stat_dot3statscarriersenseerrors_hi;
+ u32 rx_stat_dot3statscarriersenseerrors_lo;
+ /* false_carrier_detections */
+ u32 rx_stat_falsecarriererrors_hi;
+ u32 rx_stat_falsecarriererrors_lo;
+
+ /* runt_packets_received */
+ u32 rx_stat_etherstatsundersizepkts_hi;
+ u32 rx_stat_etherstatsundersizepkts_lo;
+ /* jabber_packets_received */
+ u32 rx_stat_dot3statsframestoolong_hi;
+ u32 rx_stat_dot3statsframestoolong_lo;
+
+ /* error_runt_packets_received */
+ u32 rx_stat_etherstatsfragments_hi;
+ u32 rx_stat_etherstatsfragments_lo;
+ /* error_jabber_packets_received */
+ u32 rx_stat_etherstatsjabbers_hi;
+ u32 rx_stat_etherstatsjabbers_lo;
+
+ /* control_frames_received */
+ u32 rx_stat_maccontrolframesreceived_hi;
+ u32 rx_stat_maccontrolframesreceived_lo;
+ u32 rx_stat_mac_xpf_hi;
+ u32 rx_stat_mac_xpf_lo;
+ u32 rx_stat_mac_xcf_hi;
+ u32 rx_stat_mac_xcf_lo;
+
+ /* xoff_state_entered */
+ u32 rx_stat_xoffstateentered_hi;
+ u32 rx_stat_xoffstateentered_lo;
+ /* pause_xon_frames_received */
+ u32 rx_stat_xonpauseframesreceived_hi;
+ u32 rx_stat_xonpauseframesreceived_lo;
+ /* pause_xoff_frames_received */
+ u32 rx_stat_xoffpauseframesreceived_hi;
+ u32 rx_stat_xoffpauseframesreceived_lo;
+ /* pause_xon_frames_transmitted */
+ u32 tx_stat_outxonsent_hi;
+ u32 tx_stat_outxonsent_lo;
+ /* pause_xoff_frames_transmitted */
+ u32 tx_stat_outxoffsent_hi;
+ u32 tx_stat_outxoffsent_lo;
+ /* flow_control_done */
+ u32 tx_stat_flowcontroldone_hi;
+ u32 tx_stat_flowcontroldone_lo;
+
+ /* ether_stats_collisions */
+ u32 tx_stat_etherstatscollisions_hi;
+ u32 tx_stat_etherstatscollisions_lo;
+ /* single_collision_transmit_frames */
+ u32 tx_stat_dot3statssinglecollisionframes_hi;
+ u32 tx_stat_dot3statssinglecollisionframes_lo;
+ /* multiple_collision_transmit_frames */
+ u32 tx_stat_dot3statsmultiplecollisionframes_hi;
+ u32 tx_stat_dot3statsmultiplecollisionframes_lo;
+ /* deferred_transmissions */
+ u32 tx_stat_dot3statsdeferredtransmissions_hi;
+ u32 tx_stat_dot3statsdeferredtransmissions_lo;
+ /* excessive_collision_frames */
+ u32 tx_stat_dot3statsexcessivecollisions_hi;
+ u32 tx_stat_dot3statsexcessivecollisions_lo;
+ /* late_collision_frames */
+ u32 tx_stat_dot3statslatecollisions_hi;
+ u32 tx_stat_dot3statslatecollisions_lo;
+
+ /* frames_transmitted_64_bytes */
+ u32 tx_stat_etherstatspkts64octets_hi;
+ u32 tx_stat_etherstatspkts64octets_lo;
+ /* frames_transmitted_65_127_bytes */
+ u32 tx_stat_etherstatspkts65octetsto127octets_hi;
+ u32 tx_stat_etherstatspkts65octetsto127octets_lo;
+ /* frames_transmitted_128_255_bytes */
+ u32 tx_stat_etherstatspkts128octetsto255octets_hi;
+ u32 tx_stat_etherstatspkts128octetsto255octets_lo;
+ /* frames_transmitted_256_511_bytes */
+ u32 tx_stat_etherstatspkts256octetsto511octets_hi;
+ u32 tx_stat_etherstatspkts256octetsto511octets_lo;
+ /* frames_transmitted_512_1023_bytes */
+ u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
+ u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
+ /* frames_transmitted_1024_1522_bytes */
+ u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
+ u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
+ /* frames_transmitted_1523_9022_bytes */
+ u32 tx_stat_etherstatspktsover1522octets_hi;
+ u32 tx_stat_etherstatspktsover1522octets_lo;
+ u32 tx_stat_mac_2047_hi;
+ u32 tx_stat_mac_2047_lo;
+ u32 tx_stat_mac_4095_hi;
+ u32 tx_stat_mac_4095_lo;
+ u32 tx_stat_mac_9216_hi;
+ u32 tx_stat_mac_9216_lo;
+ u32 tx_stat_mac_16383_hi;
+ u32 tx_stat_mac_16383_lo;
+
+ /* internal_mac_transmit_errors */
+ u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
+ u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
+
+ /* if_out_discards */
+ u32 tx_stat_mac_ufl_hi;
+ u32 tx_stat_mac_ufl_lo;
+};
+
+
+#define MAC_STX_IDX_MAX 2
+
+struct host_port_stats {
+ u32 host_port_stats_start;
+
+ struct mac_stx mac_stx[MAC_STX_IDX_MAX];
+
+ u32 brb_drop_hi;
+ u32 brb_drop_lo;
+
+ u32 host_port_stats_end;
+};
+
+
+struct host_func_stats {
+ u32 host_func_stats_start;
+
+ u32 total_bytes_received_hi;
+ u32 total_bytes_received_lo;
+
+ u32 total_bytes_transmitted_hi;
+ u32 total_bytes_transmitted_lo;
+
+ u32 total_unicast_packets_received_hi;
+ u32 total_unicast_packets_received_lo;
+
+ u32 total_multicast_packets_received_hi;
+ u32 total_multicast_packets_received_lo;
+
+ u32 total_broadcast_packets_received_hi;
+ u32 total_broadcast_packets_received_lo;
+
+ u32 total_unicast_packets_transmitted_hi;
+ u32 total_unicast_packets_transmitted_lo;
+
+ u32 total_multicast_packets_transmitted_hi;
+ u32 total_multicast_packets_transmitted_lo;
+
+ u32 total_broadcast_packets_transmitted_hi;
+ u32 total_broadcast_packets_transmitted_lo;
+
+ u32 valid_bytes_received_hi;
+ u32 valid_bytes_received_lo;
+
+ u32 host_func_stats_end;
+};
+
+/* VIC definitions */
+#define VICSTATST_UIF_INDEX 2
+
+#define BCM_5710_FW_MAJOR_VERSION 7
+#define BCM_5710_FW_MINOR_VERSION 0
+#define BCM_5710_FW_REVISION_VERSION 23
+#define BCM_5710_FW_ENGINEERING_VERSION 0
+#define BCM_5710_FW_COMPILE_FLAGS 1
+
+
+/*
+ * attention bits
+ */
+struct atten_sp_status_block {
+ __le32 attn_bits;
+ __le32 attn_bits_ack;
+ u8 status_block_id;
+ u8 reserved0;
+ __le16 attn_bits_index;
+ __le32 reserved1;
+};
+
+
+/*
+ * The eth aggregative context of Cstorm
+ */
+struct cstorm_eth_ag_context {
+ u32 __reserved0[10];
+};
+
+
+/*
+ * dmae command structure
+ */
+struct dmae_command {
+ u32 opcode;
+#define DMAE_COMMAND_SRC (0x1<<0)
+#define DMAE_COMMAND_SRC_SHIFT 0
+#define DMAE_COMMAND_DST (0x3<<1)
+#define DMAE_COMMAND_DST_SHIFT 1
+#define DMAE_COMMAND_C_DST (0x1<<3)
+#define DMAE_COMMAND_C_DST_SHIFT 3
+#define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4)
+#define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4
+#define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5)
+#define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5
+#define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6)
+#define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6
+#define DMAE_COMMAND_ENDIANITY (0x3<<9)
+#define DMAE_COMMAND_ENDIANITY_SHIFT 9
+#define DMAE_COMMAND_PORT (0x1<<11)
+#define DMAE_COMMAND_PORT_SHIFT 11
+#define DMAE_COMMAND_CRC_RESET (0x1<<12)
+#define DMAE_COMMAND_CRC_RESET_SHIFT 12
+#define DMAE_COMMAND_SRC_RESET (0x1<<13)
+#define DMAE_COMMAND_SRC_RESET_SHIFT 13
+#define DMAE_COMMAND_DST_RESET (0x1<<14)
+#define DMAE_COMMAND_DST_RESET_SHIFT 14
+#define DMAE_COMMAND_E1HVN (0x3<<15)
+#define DMAE_COMMAND_E1HVN_SHIFT 15
+#define DMAE_COMMAND_DST_VN (0x3<<17)
+#define DMAE_COMMAND_DST_VN_SHIFT 17
+#define DMAE_COMMAND_C_FUNC (0x1<<19)
+#define DMAE_COMMAND_C_FUNC_SHIFT 19
+#define DMAE_COMMAND_ERR_POLICY (0x3<<20)
+#define DMAE_COMMAND_ERR_POLICY_SHIFT 20
+#define DMAE_COMMAND_RESERVED0 (0x3FF<<22)
+#define DMAE_COMMAND_RESERVED0_SHIFT 22
+ u32 src_addr_lo;
+ u32 src_addr_hi;
+ u32 dst_addr_lo;
+ u32 dst_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 opcode_iov;
+#define DMAE_COMMAND_SRC_VFID (0x3F<<0)
+#define DMAE_COMMAND_SRC_VFID_SHIFT 0
+#define DMAE_COMMAND_SRC_VFPF (0x1<<6)
+#define DMAE_COMMAND_SRC_VFPF_SHIFT 6
+#define DMAE_COMMAND_RESERVED1 (0x1<<7)
+#define DMAE_COMMAND_RESERVED1_SHIFT 7
+#define DMAE_COMMAND_DST_VFID (0x3F<<8)
+#define DMAE_COMMAND_DST_VFID_SHIFT 8
+#define DMAE_COMMAND_DST_VFPF (0x1<<14)
+#define DMAE_COMMAND_DST_VFPF_SHIFT 14
+#define DMAE_COMMAND_RESERVED2 (0x1<<15)
+#define DMAE_COMMAND_RESERVED2_SHIFT 15
+ u16 len;
+#elif defined(__LITTLE_ENDIAN)
+ u16 len;
+ u16 opcode_iov;
+#define DMAE_COMMAND_SRC_VFID (0x3F<<0)
+#define DMAE_COMMAND_SRC_VFID_SHIFT 0
+#define DMAE_COMMAND_SRC_VFPF (0x1<<6)
+#define DMAE_COMMAND_SRC_VFPF_SHIFT 6
+#define DMAE_COMMAND_RESERVED1 (0x1<<7)
+#define DMAE_COMMAND_RESERVED1_SHIFT 7
+#define DMAE_COMMAND_DST_VFID (0x3F<<8)
+#define DMAE_COMMAND_DST_VFID_SHIFT 8
+#define DMAE_COMMAND_DST_VFPF (0x1<<14)
+#define DMAE_COMMAND_DST_VFPF_SHIFT 14
+#define DMAE_COMMAND_RESERVED2 (0x1<<15)
+#define DMAE_COMMAND_RESERVED2_SHIFT 15
+#endif
+ u32 comp_addr_lo;
+ u32 comp_addr_hi;
+ u32 comp_val;
+ u32 crc32;
+ u32 crc32_c;
+#if defined(__BIG_ENDIAN)
+ u16 crc16_c;
+ u16 crc16;
+#elif defined(__LITTLE_ENDIAN)
+ u16 crc16;
+ u16 crc16_c;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 reserved3;
+ u16 crc_t10;
+#elif defined(__LITTLE_ENDIAN)
+ u16 crc_t10;
+ u16 reserved3;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 xsum8;
+ u16 xsum16;
+#elif defined(__LITTLE_ENDIAN)
+ u16 xsum16;
+ u16 xsum8;
+#endif
+};
+
+
+/*
+ * common data for all protocols
+ */
+struct doorbell_hdr {
+ u8 header;
+#define DOORBELL_HDR_RX (0x1<<0)
+#define DOORBELL_HDR_RX_SHIFT 0
+#define DOORBELL_HDR_DB_TYPE (0x1<<1)
+#define DOORBELL_HDR_DB_TYPE_SHIFT 1
+#define DOORBELL_HDR_DPM_SIZE (0x3<<2)
+#define DOORBELL_HDR_DPM_SIZE_SHIFT 2
+#define DOORBELL_HDR_CONN_TYPE (0xF<<4)
+#define DOORBELL_HDR_CONN_TYPE_SHIFT 4
+};
+
+/*
+ * Ethernet doorbell
+ */
+struct eth_tx_doorbell {
+#if defined(__BIG_ENDIAN)
+ u16 npackets;
+ u8 params;
+#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
+#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
+#define ETH_TX_DOORBELL_SPARE (0x1<<7)
+#define ETH_TX_DOORBELL_SPARE_SHIFT 7
+ struct doorbell_hdr hdr;
+#elif defined(__LITTLE_ENDIAN)
+ struct doorbell_hdr hdr;
+ u8 params;
+#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
+#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
+#define ETH_TX_DOORBELL_SPARE (0x1<<7)
+#define ETH_TX_DOORBELL_SPARE_SHIFT 7
+ u16 npackets;
+#endif
+};
+
+
+/*
+ * 3 lines. status block
+ */
+struct hc_status_block_e1x {
+ __le16 index_values[HC_SB_MAX_INDICES_E1X];
+ __le16 running_index[HC_SB_MAX_SM];
+ __le32 rsrv[11];
+};
+
+/*
+ * host status block
+ */
+struct host_hc_status_block_e1x {
+ struct hc_status_block_e1x sb;
+};
+
+
+/*
+ * 3 lines. status block
+ */
+struct hc_status_block_e2 {
+ __le16 index_values[HC_SB_MAX_INDICES_E2];
+ __le16 running_index[HC_SB_MAX_SM];
+ __le32 reserved[11];
+};
+
+/*
+ * host status block
+ */
+struct host_hc_status_block_e2 {
+ struct hc_status_block_e2 sb;
+};
+
+
+/*
+ * 5 lines. slow-path status block
+ */
+struct hc_sp_status_block {
+ __le16 index_values[HC_SP_SB_MAX_INDICES];
+ __le16 running_index;
+ __le16 rsrv;
+ u32 rsrv1;
+};
+
+/*
+ * host status block
+ */
+struct host_sp_status_block {
+ struct atten_sp_status_block atten_status_block;
+ struct hc_sp_status_block sp_sb;
+};
+
+
+/*
+ * IGU driver acknowledgment register
+ */
+struct igu_ack_register {
+#if defined(__BIG_ENDIAN)
+ u16 sb_id_and_flags;
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0)
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0
+#define IGU_ACK_REGISTER_STORM_ID (0x7<<5)
+#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5
+#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8)
+#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8
+#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9)
+#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9
+#define IGU_ACK_REGISTER_RESERVED (0x1F<<11)
+#define IGU_ACK_REGISTER_RESERVED_SHIFT 11
+ u16 status_block_index;
+#elif defined(__LITTLE_ENDIAN)
+ u16 status_block_index;
+ u16 sb_id_and_flags;
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0)
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0
+#define IGU_ACK_REGISTER_STORM_ID (0x7<<5)
+#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5
+#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8)
+#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8
+#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9)
+#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9
+#define IGU_ACK_REGISTER_RESERVED (0x1F<<11)
+#define IGU_ACK_REGISTER_RESERVED_SHIFT 11
+#endif
+};
+
+
+/*
+ * IGU driver acknowledgement register
+ */
+struct igu_backward_compatible {
+ u32 sb_id_and_flags;
+#define IGU_BACKWARD_COMPATIBLE_SB_INDEX (0xFFFF<<0)
+#define IGU_BACKWARD_COMPATIBLE_SB_INDEX_SHIFT 0
+#define IGU_BACKWARD_COMPATIBLE_SB_SELECT (0x1F<<16)
+#define IGU_BACKWARD_COMPATIBLE_SB_SELECT_SHIFT 16
+#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS (0x7<<21)
+#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS_SHIFT 21
+#define IGU_BACKWARD_COMPATIBLE_BUPDATE (0x1<<24)
+#define IGU_BACKWARD_COMPATIBLE_BUPDATE_SHIFT 24
+#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT (0x3<<25)
+#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT_SHIFT 25
+#define IGU_BACKWARD_COMPATIBLE_RESERVED_0 (0x1F<<27)
+#define IGU_BACKWARD_COMPATIBLE_RESERVED_0_SHIFT 27
+ u32 reserved_2;
+};
+
+
+/*
+ * IGU driver acknowledgement register
+ */
+struct igu_regular {
+ u32 sb_id_and_flags;
+#define IGU_REGULAR_SB_INDEX (0xFFFFF<<0)
+#define IGU_REGULAR_SB_INDEX_SHIFT 0
+#define IGU_REGULAR_RESERVED0 (0x1<<20)
+#define IGU_REGULAR_RESERVED0_SHIFT 20
+#define IGU_REGULAR_SEGMENT_ACCESS (0x7<<21)
+#define IGU_REGULAR_SEGMENT_ACCESS_SHIFT 21
+#define IGU_REGULAR_BUPDATE (0x1<<24)
+#define IGU_REGULAR_BUPDATE_SHIFT 24
+#define IGU_REGULAR_ENABLE_INT (0x3<<25)
+#define IGU_REGULAR_ENABLE_INT_SHIFT 25
+#define IGU_REGULAR_RESERVED_1 (0x1<<27)
+#define IGU_REGULAR_RESERVED_1_SHIFT 27
+#define IGU_REGULAR_CLEANUP_TYPE (0x3<<28)
+#define IGU_REGULAR_CLEANUP_TYPE_SHIFT 28
+#define IGU_REGULAR_CLEANUP_SET (0x1<<30)
+#define IGU_REGULAR_CLEANUP_SET_SHIFT 30
+#define IGU_REGULAR_BCLEANUP (0x1<<31)
+#define IGU_REGULAR_BCLEANUP_SHIFT 31
+ u32 reserved_2;
+};
+
+/*
+ * IGU driver acknowledgement register
+ */
+union igu_consprod_reg {
+ struct igu_regular regular;
+ struct igu_backward_compatible backward_compatible;
+};
+
+
+/*
+ * Igu control commands
+ */
+enum igu_ctrl_cmd {
+ IGU_CTRL_CMD_TYPE_RD,
+ IGU_CTRL_CMD_TYPE_WR,
+ MAX_IGU_CTRL_CMD
+};
+
+
+/*
+ * Control register for the IGU command register
+ */
+struct igu_ctrl_reg {
+ u32 ctrl_data;
+#define IGU_CTRL_REG_ADDRESS (0xFFF<<0)
+#define IGU_CTRL_REG_ADDRESS_SHIFT 0
+#define IGU_CTRL_REG_FID (0x7F<<12)
+#define IGU_CTRL_REG_FID_SHIFT 12
+#define IGU_CTRL_REG_RESERVED (0x1<<19)
+#define IGU_CTRL_REG_RESERVED_SHIFT 19
+#define IGU_CTRL_REG_TYPE (0x1<<20)
+#define IGU_CTRL_REG_TYPE_SHIFT 20
+#define IGU_CTRL_REG_UNUSED (0x7FF<<21)
+#define IGU_CTRL_REG_UNUSED_SHIFT 21
+};
+
+
+/*
+ * Igu interrupt command
+ */
+enum igu_int_cmd {
+ IGU_INT_ENABLE,
+ IGU_INT_DISABLE,
+ IGU_INT_NOP,
+ IGU_INT_NOP2,
+ MAX_IGU_INT_CMD
+};
+
+
+/*
+ * Igu segments
+ */
+enum igu_seg_access {
+ IGU_SEG_ACCESS_NORM,
+ IGU_SEG_ACCESS_DEF,
+ IGU_SEG_ACCESS_ATTN,
+ MAX_IGU_SEG_ACCESS
+};
+
+
+/*
+ * Parser parsing flags field
+ */
+struct parsing_flags {
+ __le16 flags;
+#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE (0x1<<0)
+#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE_SHIFT 0
+#define PARSING_FLAGS_VLAN (0x1<<1)
+#define PARSING_FLAGS_VLAN_SHIFT 1
+#define PARSING_FLAGS_EXTRA_VLAN (0x1<<2)
+#define PARSING_FLAGS_EXTRA_VLAN_SHIFT 2
+#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL (0x3<<3)
+#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT 3
+#define PARSING_FLAGS_IP_OPTIONS (0x1<<5)
+#define PARSING_FLAGS_IP_OPTIONS_SHIFT 5
+#define PARSING_FLAGS_FRAGMENTATION_STATUS (0x1<<6)
+#define PARSING_FLAGS_FRAGMENTATION_STATUS_SHIFT 6
+#define PARSING_FLAGS_OVER_IP_PROTOCOL (0x3<<7)
+#define PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT 7
+#define PARSING_FLAGS_PURE_ACK_INDICATION (0x1<<9)
+#define PARSING_FLAGS_PURE_ACK_INDICATION_SHIFT 9
+#define PARSING_FLAGS_TCP_OPTIONS_EXIST (0x1<<10)
+#define PARSING_FLAGS_TCP_OPTIONS_EXIST_SHIFT 10
+#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG (0x1<<11)
+#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG_SHIFT 11
+#define PARSING_FLAGS_CONNECTION_MATCH (0x1<<12)
+#define PARSING_FLAGS_CONNECTION_MATCH_SHIFT 12
+#define PARSING_FLAGS_LLC_SNAP (0x1<<13)
+#define PARSING_FLAGS_LLC_SNAP_SHIFT 13
+#define PARSING_FLAGS_RESERVED0 (0x3<<14)
+#define PARSING_FLAGS_RESERVED0_SHIFT 14
+};
+
+
+/*
+ * Parsing flags for TCP ACK type
+ */
+enum prs_flags_ack_type {
+ PRS_FLAG_PUREACK_PIGGY,
+ PRS_FLAG_PUREACK_PURE,
+ MAX_PRS_FLAGS_ACK_TYPE
+};
+
+
+/*
+ * Parsing flags for Ethernet address type
+ */
+enum prs_flags_eth_addr_type {
+ PRS_FLAG_ETHTYPE_NON_UNICAST,
+ PRS_FLAG_ETHTYPE_UNICAST,
+ MAX_PRS_FLAGS_ETH_ADDR_TYPE
+};
+
+
+/*
+ * Parsing flags for over-ethernet protocol
+ */
+enum prs_flags_over_eth {
+ PRS_FLAG_OVERETH_UNKNOWN,
+ PRS_FLAG_OVERETH_IPV4,
+ PRS_FLAG_OVERETH_IPV6,
+ PRS_FLAG_OVERETH_LLCSNAP_UNKNOWN,
+ MAX_PRS_FLAGS_OVER_ETH
+};
+
+
+/*
+ * Parsing flags for over-IP protocol
+ */
+enum prs_flags_over_ip {
+ PRS_FLAG_OVERIP_UNKNOWN,
+ PRS_FLAG_OVERIP_TCP,
+ PRS_FLAG_OVERIP_UDP,
+ MAX_PRS_FLAGS_OVER_IP
+};
+
+
+/*
+ * SDM operation gen command (generate aggregative interrupt)
+ */
+struct sdm_op_gen {
+ __le32 command;
+#define SDM_OP_GEN_COMP_PARAM (0x1F<<0)
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE (0x7<<5)
+#define SDM_OP_GEN_COMP_TYPE_SHIFT 5
+#define SDM_OP_GEN_AGG_VECT_IDX (0xFF<<8)
+#define SDM_OP_GEN_AGG_VECT_IDX_SHIFT 8
+#define SDM_OP_GEN_AGG_VECT_IDX_VALID (0x1<<16)
+#define SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT 16
+#define SDM_OP_GEN_RESERVED (0x7FFF<<17)
+#define SDM_OP_GEN_RESERVED_SHIFT 17
+};
+
+
+/*
+ * Timers connection context
+ */
+struct timers_block_context {
+ u32 __reserved_0;
+ u32 __reserved_1;
+ u32 __reserved_2;
+ u32 flags;
+#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0)
+#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0
+#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2)
+#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2
+#define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3)
+#define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3
+};
+
+
+/*
+ * The eth aggregative context of Tstorm
+ */
+struct tstorm_eth_ag_context {
+ u32 __reserved0[14];
+};
+
+
+/*
+ * The eth aggregative context of Ustorm
+ */
+struct ustorm_eth_ag_context {
+ u32 __reserved0;
+#if defined(__BIG_ENDIAN)
+ u8 cdu_usage;
+ u8 __reserved2;
+ u16 __reserved1;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __reserved1;
+ u8 __reserved2;
+ u8 cdu_usage;
+#endif
+ u32 __reserved3[6];
+};
+
+
+/*
+ * The eth aggregative context of Xstorm
+ */
+struct xstorm_eth_ag_context {
+ u32 reserved0;
+#if defined(__BIG_ENDIAN)
+ u8 cdu_reserved;
+ u8 reserved2;
+ u16 reserved1;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved1;
+ u8 reserved2;
+ u8 cdu_reserved;
+#endif
+ u32 reserved3[30];
+};
+
+
+/*
+ * doorbell message sent to the chip
+ */
+struct doorbell {
+#if defined(__BIG_ENDIAN)
+ u16 zero_fill2;
+ u8 zero_fill1;
+ struct doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+ struct doorbell_hdr header;
+ u8 zero_fill1;
+ u16 zero_fill2;
+#endif
+};
+
+
+/*
+ * doorbell message sent to the chip
+ */
+struct doorbell_set_prod {
+#if defined(__BIG_ENDIAN)
+ u16 prod;
+ u8 zero_fill1;
+ struct doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+ struct doorbell_hdr header;
+ u8 zero_fill1;
+ u16 prod;
+#endif
+};
+
+
+struct regpair {
+ __le32 lo;
+ __le32 hi;
+};
+
+
+/*
+ * Classify rule opcodes in E2/E3
+ */
+enum classify_rule {
+ CLASSIFY_RULE_OPCODE_MAC,
+ CLASSIFY_RULE_OPCODE_VLAN,
+ CLASSIFY_RULE_OPCODE_PAIR,
+ MAX_CLASSIFY_RULE
+};
+
+
+/*
+ * Classify rule types in E2/E3
+ */
+enum classify_rule_action_type {
+ CLASSIFY_RULE_REMOVE,
+ CLASSIFY_RULE_ADD,
+ MAX_CLASSIFY_RULE_ACTION_TYPE
+};
+
+
+/*
+ * client init ramrod data
+ */
+struct client_init_general_data {
+ u8 client_id;
+ u8 statistics_counter_id;
+ u8 statistics_en_flg;
+ u8 is_fcoe_flg;
+ u8 activate_flg;
+ u8 sp_client_id;
+ __le16 mtu;
+ u8 statistics_zero_flg;
+ u8 func_id;
+ u8 cos;
+ u8 traffic_type;
+ u32 reserved0;
+};
+
+
+/*
+ * client init rx data
+ */
+struct client_init_rx_data {
+ u8 tpa_en;
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4 (0x1<<0)
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1)
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1
+#define CLIENT_INIT_RX_DATA_RESERVED5 (0x3F<<2)
+#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 2
+ u8 vmqueue_mode_en_flg;
+ u8 extra_data_over_sgl_en_flg;
+ u8 cache_line_alignment_log_size;
+ u8 enable_dynamic_hc;
+ u8 max_sges_for_packet;
+ u8 client_qzone_id;
+ u8 drop_ip_cs_err_flg;
+ u8 drop_tcp_cs_err_flg;
+ u8 drop_ttl0_flg;
+ u8 drop_udp_cs_err_flg;
+ u8 inner_vlan_removal_enable_flg;
+ u8 outer_vlan_removal_enable_flg;
+ u8 status_block_id;
+ u8 rx_sb_index_number;
+ u8 reserved0;
+ u8 max_tpa_queues;
+ u8 silent_vlan_removal_flg;
+ __le16 max_bytes_on_bd;
+ __le16 sge_buff_size;
+ u8 approx_mcast_engine_id;
+ u8 rss_engine_id;
+ struct regpair bd_page_base;
+ struct regpair sge_page_base;
+ struct regpair cqe_page_base;
+ u8 is_leading_rss;
+ u8 is_approx_mcast;
+ __le16 max_agg_size;
+ __le16 state;
+#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL (0x1<<0)
+#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL_SHIFT 0
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL (0x1<<1)
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL_SHIFT 1
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED (0x1<<2)
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL (0x1<<3)
+#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL_SHIFT 3
+#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL (0x1<<4)
+#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL_SHIFT 4
+#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL (0x1<<5)
+#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL_SHIFT 5
+#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN (0x1<<6)
+#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN_SHIFT 6
+#define CLIENT_INIT_RX_DATA_RESERVED2 (0x1FF<<7)
+#define CLIENT_INIT_RX_DATA_RESERVED2_SHIFT 7
+ __le16 cqe_pause_thr_low;
+ __le16 cqe_pause_thr_high;
+ __le16 bd_pause_thr_low;
+ __le16 bd_pause_thr_high;
+ __le16 sge_pause_thr_low;
+ __le16 sge_pause_thr_high;
+ __le16 rx_cos_mask;
+ __le16 silent_vlan_value;
+ __le16 silent_vlan_mask;
+ __le32 reserved6[2];
+};
+
+/*
+ * client init tx data
+ */
+struct client_init_tx_data {
+ u8 enforce_security_flg;
+ u8 tx_status_block_id;
+ u8 tx_sb_index_number;
+ u8 tss_leading_client_id;
+ u8 tx_switching_flg;
+ u8 anti_spoofing_flg;
+ __le16 default_vlan;
+ struct regpair tx_bd_page_base;
+ __le16 state;
+#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL (0x1<<0)
+#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL_SHIFT 0
+#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL (0x1<<1)
+#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL_SHIFT 1
+#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL (0x1<<2)
+#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2
+#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3)
+#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3
+#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4)
+#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4
+ u8 default_vlan_flg;
+ u8 reserved2;
+ __le32 reserved3;
+};
+
+/*
+ * client init ramrod data
+ */
+struct client_init_ramrod_data {
+ struct client_init_general_data general;
+ struct client_init_rx_data rx;
+ struct client_init_tx_data tx;
+};
+
+
+/*
+ * client update ramrod data
+ */
+struct client_update_ramrod_data {
+ u8 client_id;
+ u8 func_id;
+ u8 inner_vlan_removal_enable_flg;
+ u8 inner_vlan_removal_change_flg;
+ u8 outer_vlan_removal_enable_flg;
+ u8 outer_vlan_removal_change_flg;
+ u8 anti_spoofing_enable_flg;
+ u8 anti_spoofing_change_flg;
+ u8 activate_flg;
+ u8 activate_change_flg;
+ __le16 default_vlan;
+ u8 default_vlan_enable_flg;
+ u8 default_vlan_change_flg;
+ __le16 silent_vlan_value;
+ __le16 silent_vlan_mask;
+ u8 silent_vlan_removal_flg;
+ u8 silent_vlan_change_flg;
+ __le32 echo;
+};
+
+
+/*
+ * The eth storm context of Cstorm
+ */
+struct cstorm_eth_st_context {
+ u32 __reserved0[4];
+};
+
+
+struct double_regpair {
+ u32 regpair0_lo;
+ u32 regpair0_hi;
+ u32 regpair1_lo;
+ u32 regpair1_hi;
+};
+
+
+/*
+ * Ethernet address typesm used in ethernet tx BDs
+ */
+enum eth_addr_type {
+ UNKNOWN_ADDRESS,
+ UNICAST_ADDRESS,
+ MULTICAST_ADDRESS,
+ BROADCAST_ADDRESS,
+ MAX_ETH_ADDR_TYPE
+};
+
+
+/*
+ *
+ */
+struct eth_classify_cmd_header {
+ u8 cmd_general_data;
+#define ETH_CLASSIFY_CMD_HEADER_RX_CMD (0x1<<0)
+#define ETH_CLASSIFY_CMD_HEADER_RX_CMD_SHIFT 0
+#define ETH_CLASSIFY_CMD_HEADER_TX_CMD (0x1<<1)
+#define ETH_CLASSIFY_CMD_HEADER_TX_CMD_SHIFT 1
+#define ETH_CLASSIFY_CMD_HEADER_OPCODE (0x3<<2)
+#define ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT 2
+#define ETH_CLASSIFY_CMD_HEADER_IS_ADD (0x1<<4)
+#define ETH_CLASSIFY_CMD_HEADER_IS_ADD_SHIFT 4
+#define ETH_CLASSIFY_CMD_HEADER_RESERVED0 (0x7<<5)
+#define ETH_CLASSIFY_CMD_HEADER_RESERVED0_SHIFT 5
+ u8 func_id;
+ u8 client_id;
+ u8 reserved1;
+};
+
+
+/*
+ * header for eth classification config ramrod
+ */
+struct eth_classify_header {
+ u8 rule_cnt;
+ u8 reserved0;
+ __le16 reserved1;
+ __le32 echo;
+};
+
+
+/*
+ * Command for adding/removing a MAC classification rule
+ */
+struct eth_classify_mac_cmd {
+ struct eth_classify_cmd_header header;
+ __le32 reserved0;
+ __le16 mac_lsb;
+ __le16 mac_mid;
+ __le16 mac_msb;
+ __le16 reserved1;
+};
+
+
+/*
+ * Command for adding/removing a MAC-VLAN pair classification rule
+ */
+struct eth_classify_pair_cmd {
+ struct eth_classify_cmd_header header;
+ __le32 reserved0;
+ __le16 mac_lsb;
+ __le16 mac_mid;
+ __le16 mac_msb;
+ __le16 vlan;
+};
+
+
+/*
+ * Command for adding/removing a VLAN classification rule
+ */
+struct eth_classify_vlan_cmd {
+ struct eth_classify_cmd_header header;
+ __le32 reserved0;
+ __le32 reserved1;
+ __le16 reserved2;
+ __le16 vlan;
+};
+
+/*
+ * union for eth classification rule
+ */
+union eth_classify_rule_cmd {
+ struct eth_classify_mac_cmd mac;
+ struct eth_classify_vlan_cmd vlan;
+ struct eth_classify_pair_cmd pair;
+};
+
+/*
+ * parameters for eth classification configuration ramrod
+ */
+struct eth_classify_rules_ramrod_data {
+ struct eth_classify_header header;
+ union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT];
+};
+
+
+/*
+ * The data contain client ID need to the ramrod
+ */
+struct eth_common_ramrod_data {
+ __le32 client_id;
+ __le32 reserved1;
+};
+
+
+/*
+ * The eth storm context of Ustorm
+ */
+struct ustorm_eth_st_context {
+ u32 reserved0[52];
+};
+
+/*
+ * The eth storm context of Tstorm
+ */
+struct tstorm_eth_st_context {
+ u32 __reserved0[28];
+};
+
+/*
+ * The eth storm context of Xstorm
+ */
+struct xstorm_eth_st_context {
+ u32 reserved0[60];
+};
+
+/*
+ * Ethernet connection context
+ */
+struct eth_context {
+ struct ustorm_eth_st_context ustorm_st_context;
+ struct tstorm_eth_st_context tstorm_st_context;
+ struct xstorm_eth_ag_context xstorm_ag_context;
+ struct tstorm_eth_ag_context tstorm_ag_context;
+ struct cstorm_eth_ag_context cstorm_ag_context;
+ struct ustorm_eth_ag_context ustorm_ag_context;
+ struct timers_block_context timers_context;
+ struct xstorm_eth_st_context xstorm_st_context;
+ struct cstorm_eth_st_context cstorm_st_context;
+};
+
+
+/*
+ * union for sgl and raw data.
+ */
+union eth_sgl_or_raw_data {
+ __le16 sgl[8];
+ u32 raw_data[4];
+};
+
+/*
+ * eth FP end aggregation CQE parameters struct
+ */
+struct eth_end_agg_rx_cqe {
+ u8 type_error_flags;
+#define ETH_END_AGG_RX_CQE_TYPE (0x3<<0)
+#define ETH_END_AGG_RX_CQE_TYPE_SHIFT 0
+#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL (0x1<<2)
+#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL_SHIFT 2
+#define ETH_END_AGG_RX_CQE_RESERVED0 (0x1F<<3)
+#define ETH_END_AGG_RX_CQE_RESERVED0_SHIFT 3
+ u8 reserved1;
+ u8 queue_index;
+ u8 reserved2;
+ __le32 timestamp_delta;
+ __le16 num_of_coalesced_segs;
+ __le16 pkt_len;
+ u8 pure_ack_count;
+ u8 reserved3;
+ __le16 reserved4;
+ union eth_sgl_or_raw_data sgl_or_raw_data;
+ __le32 reserved5[8];
+};
+
+
+/*
+ * regular eth FP CQE parameters struct
+ */
+struct eth_fast_path_rx_cqe {
+ u8 type_error_flags;
+#define ETH_FAST_PATH_RX_CQE_TYPE (0x3<<0)
+#define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x1<<2)
+#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 2
+#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<3)
+#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 3
+#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<4)
+#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4
+#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5)
+#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5
+#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6)
+#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6
+ u8 status_flags;
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0)
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG (0x1<<3)
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG_SHIFT 3
+#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG (0x1<<4)
+#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG_SHIFT 4
+#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG (0x1<<5)
+#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG_SHIFT 5
+#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG (0x1<<6)
+#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6
+#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7)
+#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7
+ u8 queue_index;
+ u8 placement_offset;
+ __le32 rss_hash_result;
+ __le16 vlan_tag;
+ __le16 pkt_len;
+ __le16 len_on_bd;
+ struct parsing_flags pars_flags;
+ union eth_sgl_or_raw_data sgl_or_raw_data;
+ __le32 reserved1[8];
+};
+
+
+/*
+ * Command for setting classification flags for a client
+ */
+struct eth_filter_rules_cmd {
+ u8 cmd_general_data;
+#define ETH_FILTER_RULES_CMD_RX_CMD (0x1<<0)
+#define ETH_FILTER_RULES_CMD_RX_CMD_SHIFT 0
+#define ETH_FILTER_RULES_CMD_TX_CMD (0x1<<1)
+#define ETH_FILTER_RULES_CMD_TX_CMD_SHIFT 1
+#define ETH_FILTER_RULES_CMD_RESERVED0 (0x3F<<2)
+#define ETH_FILTER_RULES_CMD_RESERVED0_SHIFT 2
+ u8 func_id;
+ u8 client_id;
+ u8 reserved1;
+ __le16 state;
+#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL (0x1<<0)
+#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL_SHIFT 0
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL (0x1<<1)
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED (0x1<<2)
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL (0x1<<3)
+#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL_SHIFT 3
+#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL (0x1<<4)
+#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL (0x1<<5)
+#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL_SHIFT 5
+#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN (0x1<<6)
+#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN_SHIFT 6
+#define ETH_FILTER_RULES_CMD_RESERVED2 (0x1FF<<7)
+#define ETH_FILTER_RULES_CMD_RESERVED2_SHIFT 7
+ __le16 reserved3;
+ struct regpair reserved4;
+};
+
+
+/*
+ * parameters for eth classification filters ramrod
+ */
+struct eth_filter_rules_ramrod_data {
+ struct eth_classify_header header;
+ struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT];
+};
+
+
+/*
+ * parameters for eth classification configuration ramrod
+ */
+struct eth_general_rules_ramrod_data {
+ struct eth_classify_header header;
+ union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT];
+};
+
+
+/*
+ * The data for Halt ramrod
+ */
+struct eth_halt_ramrod_data {
+ __le32 client_id;
+ __le32 reserved0;
+};
+
+
+/*
+ * Command for setting multicast classification for a client
+ */
+struct eth_multicast_rules_cmd {
+ u8 cmd_general_data;
+#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0)
+#define ETH_MULTICAST_RULES_CMD_RX_CMD_SHIFT 0
+#define ETH_MULTICAST_RULES_CMD_TX_CMD (0x1<<1)
+#define ETH_MULTICAST_RULES_CMD_TX_CMD_SHIFT 1
+#define ETH_MULTICAST_RULES_CMD_IS_ADD (0x1<<2)
+#define ETH_MULTICAST_RULES_CMD_IS_ADD_SHIFT 2
+#define ETH_MULTICAST_RULES_CMD_RESERVED0 (0x1F<<3)
+#define ETH_MULTICAST_RULES_CMD_RESERVED0_SHIFT 3
+ u8 func_id;
+ u8 bin_id;
+ u8 engine_id;
+ __le32 reserved2;
+ struct regpair reserved3;
+};
+
+
+/*
+ * parameters for multicast classification ramrod
+ */
+struct eth_multicast_rules_ramrod_data {
+ struct eth_classify_header header;
+ struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT];
+};
+
+
+/*
+ * Place holder for ramrods protocol specific data
+ */
+struct ramrod_data {
+ __le32 data_lo;
+ __le32 data_hi;
+};
+
+/*
+ * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits)
+ */
+union eth_ramrod_data {
+ struct ramrod_data general;
+};
+
+
+/*
+ * RSS toeplitz hash type, as reported in CQE
+ */
+enum eth_rss_hash_type {
+ DEFAULT_HASH_TYPE,
+ IPV4_HASH_TYPE,
+ TCP_IPV4_HASH_TYPE,
+ IPV6_HASH_TYPE,
+ TCP_IPV6_HASH_TYPE,
+ VLAN_PRI_HASH_TYPE,
+ E1HOV_PRI_HASH_TYPE,
+ DSCP_HASH_TYPE,
+ MAX_ETH_RSS_HASH_TYPE
+};
+
+
+/*
+ * Ethernet RSS mode
+ */
+enum eth_rss_mode {
+ ETH_RSS_MODE_DISABLED,
+ ETH_RSS_MODE_REGULAR,
+ ETH_RSS_MODE_VLAN_PRI,
+ ETH_RSS_MODE_E1HOV_PRI,
+ ETH_RSS_MODE_IP_DSCP,
+ MAX_ETH_RSS_MODE
+};
+
+
+/*
+ * parameters for RSS update ramrod (E2)
+ */
+struct eth_rss_update_ramrod_data {
+ u8 rss_engine_id;
+ u8 capabilities;
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<3)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 3
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<4)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<6)
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 6
+#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0 (0x1<<7)
+#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0_SHIFT 7
+ u8 rss_result_mask;
+ u8 rss_mode;
+ __le32 __reserved2;
+ u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ __le32 rss_key[T_ETH_RSS_KEY];
+ __le32 echo;
+ __le32 reserved3;
+};
+
+
+/*
+ * The eth Rx Buffer Descriptor
+ */
+struct eth_rx_bd {
+ __le32 addr_lo;
+ __le32 addr_hi;
+};
+
+
+/*
+ * Eth Rx Cqe structure- general structure for ramrods
+ */
+struct common_ramrod_eth_rx_cqe {
+ u8 ramrod_type;
+#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x3<<0)
+#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0
+#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<2)
+#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 2
+#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x1F<<3)
+#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 3
+ u8 conn_type;
+ __le16 reserved1;
+ __le32 conn_and_cmd_data;
+#define COMMON_RAMROD_ETH_RX_CQE_CID (0xFFFFFF<<0)
+#define COMMON_RAMROD_ETH_RX_CQE_CID_SHIFT 0
+#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24)
+#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24
+ struct ramrod_data protocol_data;
+ __le32 echo;
+ __le32 reserved2[11];
+};
+
+/*
+ * Rx Last CQE in page (in ETH)
+ */
+struct eth_rx_cqe_next_page {
+ __le32 addr_lo;
+ __le32 addr_hi;
+ __le32 reserved[14];
+};
+
+/*
+ * union for all eth rx cqe types (fix their sizes)
+ */
+union eth_rx_cqe {
+ struct eth_fast_path_rx_cqe fast_path_cqe;
+ struct common_ramrod_eth_rx_cqe ramrod_cqe;
+ struct eth_rx_cqe_next_page next_page_cqe;
+ struct eth_end_agg_rx_cqe end_agg_cqe;
+};
+
+
+/*
+ * Values for RX ETH CQE type field
+ */
+enum eth_rx_cqe_type {
+ RX_ETH_CQE_TYPE_ETH_FASTPATH,
+ RX_ETH_CQE_TYPE_ETH_RAMROD,
+ RX_ETH_CQE_TYPE_ETH_START_AGG,
+ RX_ETH_CQE_TYPE_ETH_STOP_AGG,
+ MAX_ETH_RX_CQE_TYPE
+};
+
+
+/*
+ * Type of SGL/Raw field in ETH RX fast path CQE
+ */
+enum eth_rx_fp_sel {
+ ETH_FP_CQE_REGULAR,
+ ETH_FP_CQE_RAW,
+ MAX_ETH_RX_FP_SEL
+};
+
+
+/*
+ * The eth Rx SGE Descriptor
+ */
+struct eth_rx_sge {
+ __le32 addr_lo;
+ __le32 addr_hi;
+};
+
+
+/*
+ * common data for all protocols
+ */
+struct spe_hdr {
+ __le32 conn_and_cmd_data;
+#define SPE_HDR_CID (0xFFFFFF<<0)
+#define SPE_HDR_CID_SHIFT 0
+#define SPE_HDR_CMD_ID (0xFF<<24)
+#define SPE_HDR_CMD_ID_SHIFT 24
+ __le16 type;
+#define SPE_HDR_CONN_TYPE (0xFF<<0)
+#define SPE_HDR_CONN_TYPE_SHIFT 0
+#define SPE_HDR_FUNCTION_ID (0xFF<<8)
+#define SPE_HDR_FUNCTION_ID_SHIFT 8
+ __le16 reserved1;
+};
+
+/*
+ * specific data for ethernet slow path element
+ */
+union eth_specific_data {
+ u8 protocol_data[8];
+ struct regpair client_update_ramrod_data;
+ struct regpair client_init_ramrod_init_data;
+ struct eth_halt_ramrod_data halt_ramrod_data;
+ struct regpair update_data_addr;
+ struct eth_common_ramrod_data common_ramrod_data;
+ struct regpair classify_cfg_addr;
+ struct regpair filter_cfg_addr;
+ struct regpair mcast_cfg_addr;
+};
+
+/*
+ * Ethernet slow path element
+ */
+struct eth_spe {
+ struct spe_hdr hdr;
+ union eth_specific_data data;
+};
+
+
+/*
+ * Ethernet command ID for slow path elements
+ */
+enum eth_spqe_cmd_id {
+ RAMROD_CMD_ID_ETH_UNUSED,
+ RAMROD_CMD_ID_ETH_CLIENT_SETUP,
+ RAMROD_CMD_ID_ETH_HALT,
+ RAMROD_CMD_ID_ETH_FORWARD_SETUP,
+ RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP,
+ RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
+ RAMROD_CMD_ID_ETH_EMPTY,
+ RAMROD_CMD_ID_ETH_TERMINATE,
+ RAMROD_CMD_ID_ETH_TPA_UPDATE,
+ RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES,
+ RAMROD_CMD_ID_ETH_FILTER_RULES,
+ RAMROD_CMD_ID_ETH_MULTICAST_RULES,
+ RAMROD_CMD_ID_ETH_RSS_UPDATE,
+ RAMROD_CMD_ID_ETH_SET_MAC,
+ MAX_ETH_SPQE_CMD_ID
+};
+
+
+/*
+ * eth tpa update command
+ */
+enum eth_tpa_update_command {
+ TPA_UPDATE_NONE_COMMAND,
+ TPA_UPDATE_ENABLE_COMMAND,
+ TPA_UPDATE_DISABLE_COMMAND,
+ MAX_ETH_TPA_UPDATE_COMMAND
+};
+
+
+/*
+ * Tx regular BD structure
+ */
+struct eth_tx_bd {
+ __le32 addr_lo;
+ __le32 addr_hi;
+ __le16 total_pkt_bytes;
+ __le16 nbytes;
+ u8 reserved[4];
+};
+
+
+/*
+ * structure for easy accessibility to assembler
+ */
+struct eth_tx_bd_flags {
+ u8 as_bitfield;
+#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0)
+#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0
+#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1)
+#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1
+#define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2)
+#define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2
+#define ETH_TX_BD_FLAGS_START_BD (0x1<<4)
+#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4
+#define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5)
+#define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5
+#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6)
+#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6
+#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7)
+#define ETH_TX_BD_FLAGS_IPV6_SHIFT 7
+};
+
+/*
+ * The eth Tx Buffer Descriptor
+ */
+struct eth_tx_start_bd {
+ __le32 addr_lo;
+ __le32 addr_hi;
+ __le16 nbd;
+ __le16 nbytes;
+ __le16 vlan_or_ethertype;
+ struct eth_tx_bd_flags bd_flags;
+ u8 general_data;
+#define ETH_TX_START_BD_HDR_NBDS (0xF<<0)
+#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
+#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4)
+#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
+#define ETH_TX_START_BD_RESREVED (0x1<<5)
+#define ETH_TX_START_BD_RESREVED_SHIFT 5
+#define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6)
+#define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6
+};
+
+/*
+ * Tx parsing BD structure for ETH E1/E1h
+ */
+struct eth_tx_parse_bd_e1x {
+ u8 global_data;
+#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0)
+#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4)
+#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5)
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6)
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6
+#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7)
+#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7
+ u8 tcp_flags;
+#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0)
+#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0
+#define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1)
+#define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1
+#define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2)
+#define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2
+#define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3)
+#define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3
+#define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4)
+#define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4
+#define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5)
+#define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5
+#define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6)
+#define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6
+#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7)
+#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7
+ u8 ip_hlen_w;
+ s8 reserved;
+ __le16 total_hlen_w;
+ __le16 tcp_pseudo_csum;
+ __le16 lso_mss;
+ __le16 ip_id;
+ __le32 tcp_send_seq;
+};
+
+/*
+ * Tx parsing BD structure for ETH E2
+ */
+struct eth_tx_parse_bd_e2 {
+ __le16 dst_mac_addr_lo;
+ __le16 dst_mac_addr_mid;
+ __le16 dst_mac_addr_hi;
+ __le16 src_mac_addr_lo;
+ __le16 src_mac_addr_mid;
+ __le16 src_mac_addr_hi;
+ __le32 parsing_data;
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0)
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13)
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13
+#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17)
+#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31)
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31
+};
+
+/*
+ * The last BD in the BD memory will hold a pointer to the next BD memory
+ */
+struct eth_tx_next_bd {
+ __le32 addr_lo;
+ __le32 addr_hi;
+ u8 reserved[8];
+};
+
+/*
+ * union for 4 Bd types
+ */
+union eth_tx_bd_types {
+ struct eth_tx_start_bd start_bd;
+ struct eth_tx_bd reg_bd;
+ struct eth_tx_parse_bd_e1x parse_bd_e1x;
+ struct eth_tx_parse_bd_e2 parse_bd_e2;
+ struct eth_tx_next_bd next_bd;
+};
+
+/*
+ * array of 13 bds as appears in the eth xstorm context
+ */
+struct eth_tx_bds_array {
+ union eth_tx_bd_types bds[13];
+};
+
+
+/*
+ * VLAN mode on TX BDs
+ */
+enum eth_tx_vlan_type {
+ X_ETH_NO_VLAN,
+ X_ETH_OUTBAND_VLAN,
+ X_ETH_INBAND_VLAN,
+ X_ETH_FW_ADDED_VLAN,
+ MAX_ETH_TX_VLAN_TYPE
+};
+
+
+/*
+ * Ethernet VLAN filtering mode in E1x
+ */
+enum eth_vlan_filter_mode {
+ ETH_VLAN_FILTER_ANY_VLAN,
+ ETH_VLAN_FILTER_SPECIFIC_VLAN,
+ ETH_VLAN_FILTER_CLASSIFY,
+ MAX_ETH_VLAN_FILTER_MODE
+};
+
+
+/*
+ * MAC filtering configuration command header
+ */
+struct mac_configuration_hdr {
+ u8 length;
+ u8 offset;
+ __le16 client_id;
+ __le32 echo;
+};
+
+/*
+ * MAC address in list for ramrod
+ */
+struct mac_configuration_entry {
+ __le16 lsb_mac_addr;
+ __le16 middle_mac_addr;
+ __le16 msb_mac_addr;
+ __le16 vlan_id;
+ u8 pf_id;
+ u8 flags;
+#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE (0x1<<0)
+#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE_SHIFT 0
+#define MAC_CONFIGURATION_ENTRY_RDMA_MAC (0x1<<1)
+#define MAC_CONFIGURATION_ENTRY_RDMA_MAC_SHIFT 1
+#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE (0x3<<2)
+#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE_SHIFT 2
+#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<4)
+#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 4
+#define MAC_CONFIGURATION_ENTRY_BROADCAST (0x1<<5)
+#define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5
+#define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6)
+#define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6
+ __le16 reserved0;
+ __le32 clients_bit_vector;
+};
+
+/*
+ * MAC filtering configuration command
+ */
+struct mac_configuration_cmd {
+ struct mac_configuration_hdr hdr;
+ struct mac_configuration_entry config_table[64];
+};
+
+
+/*
+ * Set-MAC command type (in E1x)
+ */
+enum set_mac_action_type {
+ T_ETH_MAC_COMMAND_INVALIDATE,
+ T_ETH_MAC_COMMAND_SET,
+ MAX_SET_MAC_ACTION_TYPE
+};
+
+
+/*
+ * tpa update ramrod data
+ */
+struct tpa_update_ramrod_data {
+ u8 update_ipv4;
+ u8 update_ipv6;
+ u8 client_id;
+ u8 max_tpa_queues;
+ u8 max_sges_for_packet;
+ u8 complete_on_both_clients;
+ __le16 reserved1;
+ __le16 sge_buff_size;
+ __le16 max_agg_size;
+ __le32 sge_page_base_lo;
+ __le32 sge_page_base_hi;
+ __le16 sge_pause_thr_low;
+ __le16 sge_pause_thr_high;
+};
+
+
+/*
+ * approximate-match multicast filtering for E1H per function in Tstorm
+ */
+struct tstorm_eth_approximate_match_multicast_filtering {
+ u32 mcast_add_hash_bit_array[8];
+};
+
+
+/*
+ * Common configuration parameters per function in Tstorm
+ */
+struct tstorm_eth_function_common_config {
+ __le16 config_flags;
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<7)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 7
+#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0xFF<<8)
+#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 8
+ u8 rss_result_mask;
+ u8 reserved1;
+ __le16 vlan_id[2];
+};
+
+
+/*
+ * MAC filtering configuration parameters per port in Tstorm
+ */
+struct tstorm_eth_mac_filter_config {
+ __le32 ucast_drop_all;
+ __le32 ucast_accept_all;
+ __le32 mcast_drop_all;
+ __le32 mcast_accept_all;
+ __le32 bcast_accept_all;
+ __le32 vlan_filter[2];
+ __le32 unmatched_unicast;
+};
+
+
+/*
+ * tx only queue init ramrod data
+ */
+struct tx_queue_init_ramrod_data {
+ struct client_init_general_data general;
+ struct client_init_tx_data tx;
+};
+
+
+/*
+ * Three RX producers for ETH
+ */
+struct ustorm_eth_rx_producers {
+#if defined(__BIG_ENDIAN)
+ u16 bd_prod;
+ u16 cqe_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 cqe_prod;
+ u16 bd_prod;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 reserved;
+ u16 sge_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sge_prod;
+ u16 reserved;
+#endif
+};
+
+
+/*
+ * cfc delete event data
+ */
+struct cfc_del_event_data {
+ u32 cid;
+ u32 reserved0;
+ u32 reserved1;
+};
+
+
+/*
+ * per-port SAFC demo variables
+ */
+struct cmng_flags_per_port {
+ u32 cmng_enables;
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN (0x1<<0)
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN_SHIFT 0
+#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN (0x1<<1)
+#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN_SHIFT 1
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<2)
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 2
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<3)
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 3
+#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0xFFFFFFF<<4)
+#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 4
+ u32 __reserved1;
+};
+
+
+/*
+ * per-port rate shaping variables
+ */
+struct rate_shaping_vars_per_port {
+ u32 rs_periodic_timeout;
+ u32 rs_threshold;
+};
+
+/*
+ * per-port fairness variables
+ */
+struct fairness_vars_per_port {
+ u32 upper_bound;
+ u32 fair_threshold;
+ u32 fairness_timeout;
+ u32 reserved0;
+};
+
+/*
+ * per-port SAFC variables
+ */
+struct safc_struct_per_port {
+#if defined(__BIG_ENDIAN)
+ u16 __reserved1;
+ u8 __reserved0;
+ u8 safc_timeout_usec;
+#elif defined(__LITTLE_ENDIAN)
+ u8 safc_timeout_usec;
+ u8 __reserved0;
+ u16 __reserved1;
+#endif
+ u8 cos_to_traffic_types[MAX_COS_NUMBER];
+ u16 cos_to_pause_mask[NUM_OF_SAFC_BITS];
+};
+
+/*
+ * Per-port congestion management variables
+ */
+struct cmng_struct_per_port {
+ struct rate_shaping_vars_per_port rs_vars;
+ struct fairness_vars_per_port fair_vars;
+ struct safc_struct_per_port safc_vars;
+ struct cmng_flags_per_port flags;
+};
+
+
+/*
+ * Protocol-common command ID for slow path elements
+ */
+enum common_spqe_cmd_id {
+ RAMROD_CMD_ID_COMMON_UNUSED,
+ RAMROD_CMD_ID_COMMON_FUNCTION_START,
+ RAMROD_CMD_ID_COMMON_FUNCTION_STOP,
+ RAMROD_CMD_ID_COMMON_CFC_DEL,
+ RAMROD_CMD_ID_COMMON_CFC_DEL_WB,
+ RAMROD_CMD_ID_COMMON_STAT_QUERY,
+ RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
+ RAMROD_CMD_ID_COMMON_START_TRAFFIC,
+ RAMROD_CMD_ID_COMMON_RESERVED1,
+ RAMROD_CMD_ID_COMMON_RESERVED2,
+ MAX_COMMON_SPQE_CMD_ID
+};
+
+
+/*
+ * Per-protocol connection types
+ */
+enum connection_type {
+ ETH_CONNECTION_TYPE,
+ TOE_CONNECTION_TYPE,
+ RDMA_CONNECTION_TYPE,
+ ISCSI_CONNECTION_TYPE,
+ FCOE_CONNECTION_TYPE,
+ RESERVED_CONNECTION_TYPE_0,
+ RESERVED_CONNECTION_TYPE_1,
+ RESERVED_CONNECTION_TYPE_2,
+ NONE_CONNECTION_TYPE,
+ MAX_CONNECTION_TYPE
+};
+
+
+/*
+ * Cos modes
+ */
+enum cos_mode {
+ OVERRIDE_COS,
+ STATIC_COS,
+ FW_WRR,
+ MAX_COS_MODE
+};
+
+
+/*
+ * Dynamic HC counters set by the driver
+ */
+struct hc_dynamic_drv_counter {
+ u32 val[HC_SB_MAX_DYNAMIC_INDICES];
+};
+
+/*
+ * zone A per-queue data
+ */
+struct cstorm_queue_zone_data {
+ struct hc_dynamic_drv_counter hc_dyn_drv_cnt;
+ struct regpair reserved[2];
+};
+
+
+/*
+ * Vf-PF channel data in cstorm ram (non-triggered zone)
+ */
+struct vf_pf_channel_zone_data {
+ u32 msg_addr_lo;
+ u32 msg_addr_hi;
+};
+
+/*
+ * zone for VF non-triggered data
+ */
+struct non_trigger_vf_zone {
+ struct vf_pf_channel_zone_data vf_pf_channel;
+};
+
+/*
+ * Vf-PF channel trigger zone in cstorm ram
+ */
+struct vf_pf_channel_zone_trigger {
+ u8 addr_valid;
+};
+
+/*
+ * zone that triggers the in-bound interrupt
+ */
+struct trigger_vf_zone {
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u8 reserved0;
+ struct vf_pf_channel_zone_trigger vf_pf_channel;
+#elif defined(__LITTLE_ENDIAN)
+ struct vf_pf_channel_zone_trigger vf_pf_channel;
+ u8 reserved0;
+ u16 reserved1;
+#endif
+ u32 reserved2;
+};
+
+/*
+ * zone B per-VF data
+ */
+struct cstorm_vf_zone_data {
+ struct non_trigger_vf_zone non_trigger;
+ struct trigger_vf_zone trigger;
+};
+
+
+/*
+ * Dynamic host coalescing init parameters, per state machine
+ */
+struct dynamic_hc_sm_config {
+ u32 threshold[3];
+ u8 shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES];
+ u8 hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES];
+ u8 hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES];
+ u8 hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES];
+ u8 hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES];
+};
+
+/*
+ * Dynamic host coalescing init parameters
+ */
+struct dynamic_hc_config {
+ struct dynamic_hc_sm_config sm_config[HC_SB_MAX_SM];
+};
+
+
+struct e2_integ_data {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define E2_INTEG_DATA_TESTING_EN (0x1<<0)
+#define E2_INTEG_DATA_TESTING_EN_SHIFT 0
+#define E2_INTEG_DATA_LB_TX (0x1<<1)
+#define E2_INTEG_DATA_LB_TX_SHIFT 1
+#define E2_INTEG_DATA_COS_TX (0x1<<2)
+#define E2_INTEG_DATA_COS_TX_SHIFT 2
+#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3)
+#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4)
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4
+#define E2_INTEG_DATA_RESERVED (0x7<<5)
+#define E2_INTEG_DATA_RESERVED_SHIFT 5
+ u8 cos;
+ u8 voq;
+ u8 pbf_queue;
+#elif defined(__LITTLE_ENDIAN)
+ u8 pbf_queue;
+ u8 voq;
+ u8 cos;
+ u8 flags;
+#define E2_INTEG_DATA_TESTING_EN (0x1<<0)
+#define E2_INTEG_DATA_TESTING_EN_SHIFT 0
+#define E2_INTEG_DATA_LB_TX (0x1<<1)
+#define E2_INTEG_DATA_LB_TX_SHIFT 1
+#define E2_INTEG_DATA_COS_TX (0x1<<2)
+#define E2_INTEG_DATA_COS_TX_SHIFT 2
+#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3)
+#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4)
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4
+#define E2_INTEG_DATA_RESERVED (0x7<<5)
+#define E2_INTEG_DATA_RESERVED_SHIFT 5
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 reserved3;
+ u8 reserved2;
+ u8 ramEn;
+#elif defined(__LITTLE_ENDIAN)
+ u8 ramEn;
+ u8 reserved2;
+ u16 reserved3;
+#endif
+};
+
+
+/*
+ * set mac event data
+ */
+struct eth_event_data {
+ u32 echo;
+ u32 reserved0;
+ u32 reserved1;
+};
+
+
+/*
+ * pf-vf event data
+ */
+struct vf_pf_event_data {
+ u8 vf_id;
+ u8 reserved0;
+ u16 reserved1;
+ u32 msg_addr_lo;
+ u32 msg_addr_hi;
+};
+
+/*
+ * VF FLR event data
+ */
+struct vf_flr_event_data {
+ u8 vf_id;
+ u8 reserved0;
+ u16 reserved1;
+ u32 reserved2;
+ u32 reserved3;
+};
+
+/*
+ * malicious VF event data
+ */
+struct malicious_vf_event_data {
+ u8 vf_id;
+ u8 reserved0;
+ u16 reserved1;
+ u32 reserved2;
+ u32 reserved3;
+};
+
+/*
+ * union for all event ring message types
+ */
+union event_data {
+ struct vf_pf_event_data vf_pf_event;
+ struct eth_event_data eth_event;
+ struct cfc_del_event_data cfc_del_event;
+ struct vf_flr_event_data vf_flr_event;
+ struct malicious_vf_event_data malicious_vf_event;
+};
+
+
+/*
+ * per PF event ring data
+ */
+struct event_ring_data {
+ struct regpair base_addr;
+#if defined(__BIG_ENDIAN)
+ u8 index_id;
+ u8 sb_id;
+ u16 producer;
+#elif defined(__LITTLE_ENDIAN)
+ u16 producer;
+ u8 sb_id;
+ u8 index_id;
+#endif
+ u32 reserved0;
+};
+
+
+/*
+ * event ring message element (each element is 128 bits)
+ */
+struct event_ring_msg {
+ u8 opcode;
+ u8 error;
+ u16 reserved1;
+ union event_data data;
+};
+
+/*
+ * event ring next page element (128 bits)
+ */
+struct event_ring_next {
+ struct regpair addr;
+ u32 reserved[2];
+};
+
+/*
+ * union for event ring element types (each element is 128 bits)
+ */
+union event_ring_elem {
+ struct event_ring_msg message;
+ struct event_ring_next next_page;
+};
+
+
+/*
+ * Common event ring opcodes
+ */
+enum event_ring_opcode {
+ EVENT_RING_OPCODE_VF_PF_CHANNEL,
+ EVENT_RING_OPCODE_FUNCTION_START,
+ EVENT_RING_OPCODE_FUNCTION_STOP,
+ EVENT_RING_OPCODE_CFC_DEL,
+ EVENT_RING_OPCODE_CFC_DEL_WB,
+ EVENT_RING_OPCODE_STAT_QUERY,
+ EVENT_RING_OPCODE_STOP_TRAFFIC,
+ EVENT_RING_OPCODE_START_TRAFFIC,
+ EVENT_RING_OPCODE_VF_FLR,
+ EVENT_RING_OPCODE_MALICIOUS_VF,
+ EVENT_RING_OPCODE_FORWARD_SETUP,
+ EVENT_RING_OPCODE_RSS_UPDATE_RULES,
+ EVENT_RING_OPCODE_RESERVED1,
+ EVENT_RING_OPCODE_RESERVED2,
+ EVENT_RING_OPCODE_SET_MAC,
+ EVENT_RING_OPCODE_CLASSIFICATION_RULES,
+ EVENT_RING_OPCODE_FILTERS_RULES,
+ EVENT_RING_OPCODE_MULTICAST_RULES,
+ MAX_EVENT_RING_OPCODE
+};
+
+
+/*
+ * Modes for fairness algorithm
+ */
+enum fairness_mode {
+ FAIRNESS_COS_WRR_MODE,
+ FAIRNESS_COS_ETS_MODE,
+ MAX_FAIRNESS_MODE
+};
+
+
+/*
+ * per-vnic fairness variables
+ */
+struct fairness_vars_per_vn {
+ u32 cos_credit_delta[MAX_COS_NUMBER];
+ u32 vn_credit_delta;
+ u32 __reserved0;
+};
+
+
+/*
+ * Priority and cos
+ */
+struct priority_cos {
+ u8 priority;
+ u8 cos;
+ __le16 reserved1;
+};
+
+/*
+ * The data for flow control configuration
+ */
+struct flow_control_configuration {
+ struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
+ u8 dcb_enabled;
+ u8 dcb_version;
+ u8 dont_add_pri_0_en;
+ u8 reserved1;
+ __le32 reserved2;
+};
+
+
+/*
+ *
+ */
+struct function_start_data {
+ __le16 function_mode;
+ __le16 sd_vlan_tag;
+ u16 reserved;
+ u8 path_id;
+ u8 network_cos_mode;
+};
+
+
+/*
+ * FW version stored in the Xstorm RAM
+ */
+struct fw_version {
+#if defined(__BIG_ENDIAN)
+ u8 engineering;
+ u8 revision;
+ u8 minor;
+ u8 major;
+#elif defined(__LITTLE_ENDIAN)
+ u8 major;
+ u8 minor;
+ u8 revision;
+ u8 engineering;
+#endif
+ u32 flags;
+#define FW_VERSION_OPTIMIZED (0x1<<0)
+#define FW_VERSION_OPTIMIZED_SHIFT 0
+#define FW_VERSION_BIG_ENDIEN (0x1<<1)
+#define FW_VERSION_BIG_ENDIEN_SHIFT 1
+#define FW_VERSION_CHIP_VERSION (0x3<<2)
+#define FW_VERSION_CHIP_VERSION_SHIFT 2
+#define __FW_VERSION_RESERVED (0xFFFFFFF<<4)
+#define __FW_VERSION_RESERVED_SHIFT 4
+};
+
+
+/*
+ * Dynamic Host-Coalescing - Driver(host) counters
+ */
+struct hc_dynamic_sb_drv_counters {
+ u32 dynamic_hc_drv_counter[HC_SB_MAX_DYNAMIC_INDICES];
+};
+
+
+/*
+ * 2 bytes. configuration/state parameters for a single protocol index
+ */
+struct hc_index_data {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define HC_INDEX_DATA_SM_ID (0x1<<0)
+#define HC_INDEX_DATA_SM_ID_SHIFT 0
+#define HC_INDEX_DATA_HC_ENABLED (0x1<<1)
+#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2)
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
+#define HC_INDEX_DATA_RESERVE (0x1F<<3)
+#define HC_INDEX_DATA_RESERVE_SHIFT 3
+ u8 timeout;
+#elif defined(__LITTLE_ENDIAN)
+ u8 timeout;
+ u8 flags;
+#define HC_INDEX_DATA_SM_ID (0x1<<0)
+#define HC_INDEX_DATA_SM_ID_SHIFT 0
+#define HC_INDEX_DATA_HC_ENABLED (0x1<<1)
+#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2)
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
+#define HC_INDEX_DATA_RESERVE (0x1F<<3)
+#define HC_INDEX_DATA_RESERVE_SHIFT 3
+#endif
+};
+
+
+/*
+ * HC state-machine
+ */
+struct hc_status_block_sm {
+#if defined(__BIG_ENDIAN)
+ u8 igu_seg_id;
+ u8 igu_sb_id;
+ u8 timer_value;
+ u8 __flags;
+#elif defined(__LITTLE_ENDIAN)
+ u8 __flags;
+ u8 timer_value;
+ u8 igu_sb_id;
+ u8 igu_seg_id;
+#endif
+ u32 time_to_expire;
+};
+
+/*
+ * hold PCI identification variables- used in various places in firmware
+ */
+struct pci_entity {
+#if defined(__BIG_ENDIAN)
+ u8 vf_valid;
+ u8 vf_id;
+ u8 vnic_id;
+ u8 pf_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 pf_id;
+ u8 vnic_id;
+ u8 vf_id;
+ u8 vf_valid;
+#endif
+};
+
+/*
+ * The fast-path status block meta-data, common to all chips
+ */
+struct hc_sb_data {
+ struct regpair host_sb_addr;
+ struct hc_status_block_sm state_machine[HC_SB_MAX_SM];
+ struct pci_entity p_func;
+#if defined(__BIG_ENDIAN)
+ u8 rsrv0;
+ u8 state;
+ u8 dhc_qzone_id;
+ u8 same_igu_sb_1b;
+#elif defined(__LITTLE_ENDIAN)
+ u8 same_igu_sb_1b;
+ u8 dhc_qzone_id;
+ u8 state;
+ u8 rsrv0;
+#endif
+ struct regpair rsrv1[2];
+};
+
+
+/*
+ * Segment types for host coaslescing
+ */
+enum hc_segment {
+ HC_REGULAR_SEGMENT,
+ HC_DEFAULT_SEGMENT,
+ MAX_HC_SEGMENT
+};
+
+
+/*
+ * The fast-path status block meta-data
+ */
+struct hc_sp_status_block_data {
+ struct regpair host_sb_addr;
+#if defined(__BIG_ENDIAN)
+ u8 rsrv1;
+ u8 state;
+ u8 igu_seg_id;
+ u8 igu_sb_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 igu_sb_id;
+ u8 igu_seg_id;
+ u8 state;
+ u8 rsrv1;
+#endif
+ struct pci_entity p_func;
+};
+
+
+/*
+ * The fast-path status block meta-data
+ */
+struct hc_status_block_data_e1x {
+ struct hc_index_data index_data[HC_SB_MAX_INDICES_E1X];
+ struct hc_sb_data common;
+};
+
+
+/*
+ * The fast-path status block meta-data
+ */
+struct hc_status_block_data_e2 {
+ struct hc_index_data index_data[HC_SB_MAX_INDICES_E2];
+ struct hc_sb_data common;
+};
+
+
+/*
+ * IGU block operartion modes (in Everest2)
+ */
+enum igu_mode {
+ HC_IGU_BC_MODE,
+ HC_IGU_NBC_MODE,
+ MAX_IGU_MODE
+};
+
+
+/*
+ * IP versions
+ */
+enum ip_ver {
+ IP_V4,
+ IP_V6,
+ MAX_IP_VER
+};
+
+
+/*
+ * Multi-function modes
+ */
+enum mf_mode {
+ SINGLE_FUNCTION,
+ MULTI_FUNCTION_SD,
+ MULTI_FUNCTION_SI,
+ MULTI_FUNCTION_RESERVED,
+ MAX_MF_MODE
+};
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per pf)
+ */
+struct tstorm_per_pf_stats {
+ struct regpair rcv_error_bytes;
+};
+
+/*
+ *
+ */
+struct per_pf_stats {
+ struct tstorm_per_pf_stats tstorm_pf_statistics;
+};
+
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per port)
+ */
+struct tstorm_per_port_stats {
+ __le32 mac_discard;
+ __le32 mac_filter_discard;
+ __le32 brb_truncate_discard;
+ __le32 mf_tag_discard;
+ __le32 packet_drop;
+ __le32 reserved;
+};
+
+/*
+ *
+ */
+struct per_port_stats {
+ struct tstorm_per_port_stats tstorm_port_statistics;
+};
+
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per client)
+ */
+struct tstorm_per_queue_stats {
+ struct regpair rcv_ucast_bytes;
+ __le32 rcv_ucast_pkts;
+ __le32 checksum_discard;
+ struct regpair rcv_bcast_bytes;
+ __le32 rcv_bcast_pkts;
+ __le32 pkts_too_big_discard;
+ struct regpair rcv_mcast_bytes;
+ __le32 rcv_mcast_pkts;
+ __le32 ttl0_discard;
+ __le16 no_buff_discard;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+/*
+ * Protocol-common statistics collected by the Ustorm (per client)
+ */
+struct ustorm_per_queue_stats {
+ struct regpair ucast_no_buff_bytes;
+ struct regpair mcast_no_buff_bytes;
+ struct regpair bcast_no_buff_bytes;
+ __le32 ucast_no_buff_pkts;
+ __le32 mcast_no_buff_pkts;
+ __le32 bcast_no_buff_pkts;
+ __le32 coalesced_pkts;
+ struct regpair coalesced_bytes;
+ __le32 coalesced_events;
+ __le32 coalesced_aborts;
+};
+
+/*
+ * Protocol-common statistics collected by the Xstorm (per client)
+ */
+struct xstorm_per_queue_stats {
+ struct regpair ucast_bytes_sent;
+ struct regpair mcast_bytes_sent;
+ struct regpair bcast_bytes_sent;
+ __le32 ucast_pkts_sent;
+ __le32 mcast_pkts_sent;
+ __le32 bcast_pkts_sent;
+ __le32 error_drop_pkts;
+};
+
+/*
+ *
+ */
+struct per_queue_stats {
+ struct tstorm_per_queue_stats tstorm_queue_statistics;
+ struct ustorm_per_queue_stats ustorm_queue_statistics;
+ struct xstorm_per_queue_stats xstorm_queue_statistics;
+};
+
+
+/*
+ * FW version stored in first line of pram
+ */
+struct pram_fw_version {
+ u8 major;
+ u8 minor;
+ u8 revision;
+ u8 engineering;
+ u8 flags;
+#define PRAM_FW_VERSION_OPTIMIZED (0x1<<0)
+#define PRAM_FW_VERSION_OPTIMIZED_SHIFT 0
+#define PRAM_FW_VERSION_STORM_ID (0x3<<1)
+#define PRAM_FW_VERSION_STORM_ID_SHIFT 1
+#define PRAM_FW_VERSION_BIG_ENDIEN (0x1<<3)
+#define PRAM_FW_VERSION_BIG_ENDIEN_SHIFT 3
+#define PRAM_FW_VERSION_CHIP_VERSION (0x3<<4)
+#define PRAM_FW_VERSION_CHIP_VERSION_SHIFT 4
+#define __PRAM_FW_VERSION_RESERVED0 (0x3<<6)
+#define __PRAM_FW_VERSION_RESERVED0_SHIFT 6
+};
+
+
+/*
+ * Ethernet slow path element
+ */
+union protocol_common_specific_data {
+ u8 protocol_data[8];
+ struct regpair phy_address;
+ struct regpair mac_config_addr;
+};
+
+/*
+ * The send queue element
+ */
+struct protocol_common_spe {
+ struct spe_hdr hdr;
+ union protocol_common_specific_data data;
+};
+
+
+/*
+ * a single rate shaping counter. can be used as protocol or vnic counter
+ */
+struct rate_shaping_counter {
+ u32 quota;
+#if defined(__BIG_ENDIAN)
+ u16 __reserved0;
+ u16 rate;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rate;
+ u16 __reserved0;
+#endif
+};
+
+
+/*
+ * per-vnic rate shaping variables
+ */
+struct rate_shaping_vars_per_vn {
+ struct rate_shaping_counter vn_counter;
+};
+
+
+/*
+ * The send queue element
+ */
+struct slow_path_element {
+ struct spe_hdr hdr;
+ struct regpair protocol_data;
+};
+
+
+/*
+ * Protocol-common statistics counter
+ */
+struct stats_counter {
+ __le16 xstats_counter;
+ __le16 reserved0;
+ __le32 reserved1;
+ __le16 tstats_counter;
+ __le16 reserved2;
+ __le32 reserved3;
+ __le16 ustats_counter;
+ __le16 reserved4;
+ __le32 reserved5;
+ __le16 cstats_counter;
+ __le16 reserved6;
+ __le32 reserved7;
+};
+
+
+/*
+ *
+ */
+struct stats_query_entry {
+ u8 kind;
+ u8 index;
+ __le16 funcID;
+ __le32 reserved;
+ struct regpair address;
+};
+
+/*
+ * statistic command
+ */
+struct stats_query_cmd_group {
+ struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
+};
+
+
+/*
+ * statistic command header
+ */
+struct stats_query_header {
+ u8 cmd_num;
+ u8 reserved0;
+ __le16 drv_stats_counter;
+ __le32 reserved1;
+ struct regpair stats_counters_addrs;
+};
+
+
+/*
+ * Types of statistcis query entry
+ */
+enum stats_query_type {
+ STATS_TYPE_QUEUE,
+ STATS_TYPE_PORT,
+ STATS_TYPE_PF,
+ STATS_TYPE_TOE,
+ STATS_TYPE_FCOE,
+ MAX_STATS_QUERY_TYPE
+};
+
+
+/*
+ * Indicate of the function status block state
+ */
+enum status_block_state {
+ SB_DISABLED,
+ SB_ENABLED,
+ SB_CLEANED,
+ MAX_STATUS_BLOCK_STATE
+};
+
+
+/*
+ * Storm IDs (including attentions for IGU related enums)
+ */
+enum storm_id {
+ USTORM_ID,
+ CSTORM_ID,
+ XSTORM_ID,
+ TSTORM_ID,
+ ATTENTION_ID,
+ MAX_STORM_ID
+};
+
+
+/*
+ * Taffic types used in ETS and flow control algorithms
+ */
+enum traffic_type {
+ LLFC_TRAFFIC_TYPE_NW,
+ LLFC_TRAFFIC_TYPE_FCOE,
+ LLFC_TRAFFIC_TYPE_ISCSI,
+ MAX_TRAFFIC_TYPE
+};
+
+
+/*
+ * zone A per-queue data
+ */
+struct tstorm_queue_zone_data {
+ struct regpair reserved[4];
+};
+
+
+/*
+ * zone B per-VF data
+ */
+struct tstorm_vf_zone_data {
+ struct regpair reserved;
+};
+
+
+/*
+ * zone A per-queue data
+ */
+struct ustorm_queue_zone_data {
+ struct ustorm_eth_rx_producers eth_rx_producers;
+ struct regpair reserved[3];
+};
+
+
+/*
+ * zone B per-VF data
+ */
+struct ustorm_vf_zone_data {
+ struct regpair reserved;
+};
+
+
+/*
+ * data per VF-PF channel
+ */
+struct vf_pf_channel_data {
+#if defined(__BIG_ENDIAN)
+ u16 reserved0;
+ u8 valid;
+ u8 state;
+#elif defined(__LITTLE_ENDIAN)
+ u8 state;
+ u8 valid;
+ u16 reserved0;
+#endif
+ u32 reserved1;
+};
+
+
+/*
+ * State of VF-PF channel
+ */
+enum vf_pf_channel_state {
+ VF_PF_CHANNEL_STATE_READY,
+ VF_PF_CHANNEL_STATE_WAITING_FOR_ACK,
+ MAX_VF_PF_CHANNEL_STATE
+};
+
+
+/*
+ * zone A per-queue data
+ */
+struct xstorm_queue_zone_data {
+ struct regpair reserved[4];
+};
+
+
+/*
+ * zone B per-VF data
+ */
+struct xstorm_vf_zone_data {
+ struct regpair reserved;
+};
+
+#endif /* BNX2X_HSI_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
new file mode 100644
index 000000000000..4d748e77d1ac
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -0,0 +1,567 @@
+/* bnx2x_init.h: Broadcom Everest network driver.
+ * Structures and macroes needed during the initialization.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Modified by: Vladislav Zolotarov <vladz@broadcom.com>
+ */
+
+#ifndef BNX2X_INIT_H
+#define BNX2X_INIT_H
+
+/* Init operation types and structures */
+enum {
+ OP_RD = 0x1, /* read a single register */
+ OP_WR, /* write a single register */
+ OP_SW, /* copy a string to the device */
+ OP_ZR, /* clear memory */
+ OP_ZP, /* unzip then copy with DMAE */
+ OP_WR_64, /* write 64 bit pattern */
+ OP_WB, /* copy a string using DMAE */
+ OP_WB_ZR, /* Clear a string using DMAE or indirect-wr */
+ /* Skip the following ops if all of the init modes don't match */
+ OP_IF_MODE_OR,
+ /* Skip the following ops if any of the init modes don't match */
+ OP_IF_MODE_AND,
+ OP_MAX
+};
+
+enum {
+ STAGE_START,
+ STAGE_END,
+};
+
+/* Returns the index of start or end of a specific block stage in ops array*/
+#define BLOCK_OPS_IDX(block, stage, end) \
+ (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
+
+
+/* structs for the various opcodes */
+struct raw_op {
+ u32 op:8;
+ u32 offset:24;
+ u32 raw_data;
+};
+
+struct op_read {
+ u32 op:8;
+ u32 offset:24;
+ u32 val;
+};
+
+struct op_write {
+ u32 op:8;
+ u32 offset:24;
+ u32 val;
+};
+
+struct op_arr_write {
+ u32 op:8;
+ u32 offset:24;
+#ifdef __BIG_ENDIAN
+ u16 data_len;
+ u16 data_off;
+#else /* __LITTLE_ENDIAN */
+ u16 data_off;
+ u16 data_len;
+#endif
+};
+
+struct op_zero {
+ u32 op:8;
+ u32 offset:24;
+ u32 len;
+};
+
+struct op_if_mode {
+ u32 op:8;
+ u32 cmd_offset:24;
+ u32 mode_bit_map;
+};
+
+
+union init_op {
+ struct op_read read;
+ struct op_write write;
+ struct op_arr_write arr_wr;
+ struct op_zero zero;
+ struct raw_op raw;
+ struct op_if_mode if_mode;
+};
+
+
+/* Init Phases */
+enum {
+ PHASE_COMMON,
+ PHASE_PORT0,
+ PHASE_PORT1,
+ PHASE_PF0,
+ PHASE_PF1,
+ PHASE_PF2,
+ PHASE_PF3,
+ PHASE_PF4,
+ PHASE_PF5,
+ PHASE_PF6,
+ PHASE_PF7,
+ NUM_OF_INIT_PHASES
+};
+
+/* Init Modes */
+enum {
+ MODE_ASIC = 0x00000001,
+ MODE_FPGA = 0x00000002,
+ MODE_EMUL = 0x00000004,
+ MODE_E2 = 0x00000008,
+ MODE_E3 = 0x00000010,
+ MODE_PORT2 = 0x00000020,
+ MODE_PORT4 = 0x00000040,
+ MODE_SF = 0x00000080,
+ MODE_MF = 0x00000100,
+ MODE_MF_SD = 0x00000200,
+ MODE_MF_SI = 0x00000400,
+ MODE_MF_NIV = 0x00000800,
+ MODE_E3_A0 = 0x00001000,
+ MODE_E3_B0 = 0x00002000,
+ MODE_COS3 = 0x00004000,
+ MODE_COS6 = 0x00008000,
+ MODE_LITTLE_ENDIAN = 0x00010000,
+ MODE_BIG_ENDIAN = 0x00020000,
+};
+
+/* Init Blocks */
+enum {
+ BLOCK_ATC,
+ BLOCK_BRB1,
+ BLOCK_CCM,
+ BLOCK_CDU,
+ BLOCK_CFC,
+ BLOCK_CSDM,
+ BLOCK_CSEM,
+ BLOCK_DBG,
+ BLOCK_DMAE,
+ BLOCK_DORQ,
+ BLOCK_HC,
+ BLOCK_IGU,
+ BLOCK_MISC,
+ BLOCK_NIG,
+ BLOCK_PBF,
+ BLOCK_PGLUE_B,
+ BLOCK_PRS,
+ BLOCK_PXP2,
+ BLOCK_PXP,
+ BLOCK_QM,
+ BLOCK_SRC,
+ BLOCK_TCM,
+ BLOCK_TM,
+ BLOCK_TSDM,
+ BLOCK_TSEM,
+ BLOCK_UCM,
+ BLOCK_UPB,
+ BLOCK_USDM,
+ BLOCK_USEM,
+ BLOCK_XCM,
+ BLOCK_XPB,
+ BLOCK_XSDM,
+ BLOCK_XSEM,
+ BLOCK_MISC_AEU,
+ NUM_OF_INIT_BLOCKS
+};
+
+/* QM queue numbers */
+#define BNX2X_ETH_Q 0
+#define BNX2X_TOE_Q 3
+#define BNX2X_TOE_ACK_Q 6
+#define BNX2X_ISCSI_Q 9
+#define BNX2X_ISCSI_ACK_Q 11
+#define BNX2X_FCOE_Q 10
+
+/* Vnics per mode */
+#define BNX2X_PORT2_MODE_NUM_VNICS 4
+#define BNX2X_PORT4_MODE_NUM_VNICS 2
+
+/* COS offset for port1 in E3 B0 4port mode */
+#define BNX2X_E3B0_PORT1_COS_OFFSET 3
+
+/* QM Register addresses */
+#define BNX2X_Q_VOQ_REG_ADDR(pf_q_num)\
+ (QM_REG_QVOQIDX_0 + 4 * (pf_q_num))
+#define BNX2X_VOQ_Q_REG_ADDR(cos, pf_q_num)\
+ (QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5)))
+#define BNX2X_Q_CMDQ_REG_ADDR(pf_q_num)\
+ (QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4))
+
+/* extracts the QM queue number for the specified port and vnic */
+#define BNX2X_PF_Q_NUM(q_num, port, vnic)\
+ ((((port) << 1) | (vnic)) * 16 + (q_num))
+
+
+/* Maps the specified queue to the specified COS */
+static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos)
+{
+ /* find current COS mapping */
+ u32 curr_cos = REG_RD(bp, QM_REG_QVOQIDX_0 + q_num * 4);
+
+ /* check if queue->COS mapping has changed */
+ if (curr_cos != new_cos) {
+ u32 num_vnics = BNX2X_PORT2_MODE_NUM_VNICS;
+ u32 reg_addr, reg_bit_map, vnic;
+
+ /* update parameters for 4port mode */
+ if (INIT_MODE_FLAGS(bp) & MODE_PORT4) {
+ num_vnics = BNX2X_PORT4_MODE_NUM_VNICS;
+ if (BP_PORT(bp)) {
+ curr_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
+ new_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
+ }
+ }
+
+ /* change queue mapping for each VNIC */
+ for (vnic = 0; vnic < num_vnics; vnic++) {
+ u32 pf_q_num =
+ BNX2X_PF_Q_NUM(q_num, BP_PORT(bp), vnic);
+ u32 q_bit_map = 1 << (pf_q_num & 0x1f);
+
+ /* overwrite queue->VOQ mapping */
+ REG_WR(bp, BNX2X_Q_VOQ_REG_ADDR(pf_q_num), new_cos);
+
+ /* clear queue bit from current COS bit map */
+ reg_addr = BNX2X_VOQ_Q_REG_ADDR(curr_cos, pf_q_num);
+ reg_bit_map = REG_RD(bp, reg_addr);
+ REG_WR(bp, reg_addr, reg_bit_map & (~q_bit_map));
+
+ /* set queue bit in new COS bit map */
+ reg_addr = BNX2X_VOQ_Q_REG_ADDR(new_cos, pf_q_num);
+ reg_bit_map = REG_RD(bp, reg_addr);
+ REG_WR(bp, reg_addr, reg_bit_map | q_bit_map);
+
+ /* set/clear queue bit in command-queue bit map
+ (E2/E3A0 only, valid COS values are 0/1) */
+ if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) {
+ reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num);
+ reg_bit_map = REG_RD(bp, reg_addr);
+ q_bit_map = 1 << (2 * (pf_q_num & 0xf));
+ reg_bit_map = new_cos ?
+ (reg_bit_map | q_bit_map) :
+ (reg_bit_map & (~q_bit_map));
+ REG_WR(bp, reg_addr, reg_bit_map);
+ }
+ }
+ }
+}
+
+/* Configures the QM according to the specified per-traffic-type COSes */
+static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode,
+ struct priority_cos *traffic_cos)
+{
+ bnx2x_map_q_cos(bp, BNX2X_FCOE_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos);
+ bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
+ bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
+ if (mode != STATIC_COS) {
+ /* required only in backward compatible COS mode */
+ bnx2x_map_q_cos(bp, BNX2X_ETH_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+ bnx2x_map_q_cos(bp, BNX2X_TOE_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+ bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+ }
+}
+
+
+/* Returns the index of start or end of a specific block stage in ops array*/
+#define BLOCK_OPS_IDX(block, stage, end) \
+ (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
+
+
+#define INITOP_SET 0 /* set the HW directly */
+#define INITOP_CLEAR 1 /* clear the HW directly */
+#define INITOP_INIT 2 /* set the init-value array */
+
+/****************************************************************************
+* ILT management
+****************************************************************************/
+struct ilt_line {
+ dma_addr_t page_mapping;
+ void *page;
+ u32 size;
+};
+
+struct ilt_client_info {
+ u32 page_size;
+ u16 start;
+ u16 end;
+ u16 client_num;
+ u16 flags;
+#define ILT_CLIENT_SKIP_INIT 0x1
+#define ILT_CLIENT_SKIP_MEM 0x2
+};
+
+struct bnx2x_ilt {
+ u32 start_line;
+ struct ilt_line *lines;
+ struct ilt_client_info clients[4];
+#define ILT_CLIENT_CDU 0
+#define ILT_CLIENT_QM 1
+#define ILT_CLIENT_SRC 2
+#define ILT_CLIENT_TM 3
+};
+
+/****************************************************************************
+* SRC configuration
+****************************************************************************/
+struct src_ent {
+ u8 opaque[56];
+ u64 next;
+};
+
+/****************************************************************************
+* Parity configuration
+****************************************************************************/
+#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2, m3) \
+{ \
+ block##_REG_##block##_PRTY_MASK, \
+ block##_REG_##block##_PRTY_STS_CLR, \
+ en_mask, {m1, m1h, m2, m3}, #block \
+}
+
+#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2, m3) \
+{ \
+ block##_REG_##block##_PRTY_MASK_0, \
+ block##_REG_##block##_PRTY_STS_CLR_0, \
+ en_mask, {m1, m1h, m2, m3}, #block"_0" \
+}
+
+#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2, m3) \
+{ \
+ block##_REG_##block##_PRTY_MASK_1, \
+ block##_REG_##block##_PRTY_STS_CLR_1, \
+ en_mask, {m1, m1h, m2, m3}, #block"_1" \
+}
+
+static const struct {
+ u32 mask_addr;
+ u32 sts_clr_addr;
+ u32 en_mask; /* Mask to enable parity attentions */
+ struct {
+ u32 e1; /* 57710 */
+ u32 e1h; /* 57711 */
+ u32 e2; /* 57712 */
+ u32 e3; /* 578xx */
+ } reg_mask; /* Register mask (all valid bits) */
+ char name[7]; /* Block's longest name is 6 characters long
+ * (name + suffix)
+ */
+} bnx2x_blocks_parity_data[] = {
+ /* bit 19 masked */
+ /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
+ /* bit 5,18,20-31 */
+ /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
+ /* bit 5 */
+ /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */
+ /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
+ /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
+
+ /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
+ * want to handle "system kill" flow at the moment.
+ */
+ BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff,
+ 0x7ffffff),
+ BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff),
+ BLOCK_PRTY_INFO_1(PXP2, 0x1ffffff, 0x7f, 0x7f, 0x7ff, 0x1ffffff),
+ BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0, 0),
+ BLOCK_PRTY_INFO(NIG, 0xffffffff, 0x3fffffff, 0xffffffff, 0, 0),
+ BLOCK_PRTY_INFO_0(NIG, 0xffffffff, 0, 0, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(NIG, 0xffff, 0, 0, 0xff, 0xffff),
+ BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1, 0x1),
+ BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff, 0xfff),
+ BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0, 0x3, 0x3),
+ BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3, 0x3),
+ {GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
+ GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf,
+ {0xf, 0xf, 0xf, 0xf}, "UPB"},
+ {GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
+ GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
+ {0xf, 0xf, 0xf, 0xf}, "XPB"},
+ BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7, 0x7),
+ BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf, 0x3f),
+ BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1, 0x1),
+ BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf, 0xf),
+ BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf, 0xf),
+ BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff, 0xff),
+ BLOCK_PRTY_INFO(PBF, 0, 0, 0x3ffff, 0xfffff, 0xfffffff),
+ BLOCK_PRTY_INFO(TM, 0, 0, 0x7f, 0x7f, 0x7f),
+ BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(TCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+ BLOCK_PRTY_INFO(CCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+ BLOCK_PRTY_INFO(UCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+ BLOCK_PRTY_INFO(XCM, 0, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff),
+ BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff),
+ BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
+ BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff),
+ BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff),
+ BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff),
+ BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
+};
+
+
+/* [28] MCP Latched rom_parity
+ * [29] MCP Latched ump_rx_parity
+ * [30] MCP Latched ump_tx_parity
+ * [31] MCP Latched scpad_parity
+ */
+#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
+ (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+
+/* Below registers control the MCP parity attention output. When
+ * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
+ * enabled, when cleared - disabled.
+ */
+static const u32 mcp_attn_ctl_regs[] = {
+ MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
+ MISC_REG_AEU_ENABLE4_NIG_0,
+ MISC_REG_AEU_ENABLE4_PXP_0,
+ MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
+ MISC_REG_AEU_ENABLE4_NIG_1,
+ MISC_REG_AEU_ENABLE4_PXP_1
+};
+
+static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
+{
+ int i;
+ u32 reg_val;
+
+ for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
+ reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
+
+ if (enable)
+ reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
+ else
+ reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
+
+ REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
+ }
+}
+
+static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx)
+{
+ if (CHIP_IS_E1(bp))
+ return bnx2x_blocks_parity_data[idx].reg_mask.e1;
+ else if (CHIP_IS_E1H(bp))
+ return bnx2x_blocks_parity_data[idx].reg_mask.e1h;
+ else if (CHIP_IS_E2(bp))
+ return bnx2x_blocks_parity_data[idx].reg_mask.e2;
+ else /* CHIP_IS_E3 */
+ return bnx2x_blocks_parity_data[idx].reg_mask.e3;
+}
+
+static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+ u32 dis_mask = bnx2x_parity_reg_mask(bp, i);
+
+ if (dis_mask) {
+ REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
+ dis_mask);
+ DP(NETIF_MSG_HW, "Setting parity mask "
+ "for %s to\t\t0x%x\n",
+ bnx2x_blocks_parity_data[i].name, dis_mask);
+ }
+ }
+
+ /* Disable MCP parity attentions */
+ bnx2x_set_mcp_parity(bp, false);
+}
+
+/**
+ * Clear the parity error status registers.
+ */
+static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
+{
+ int i;
+ u32 reg_val, mcp_aeu_bits =
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
+
+ /* Clear SEM_FAST parities */
+ REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+ REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+ REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+ REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+
+ for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+ u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
+
+ if (reg_mask) {
+ reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i].
+ sts_clr_addr);
+ if (reg_val & reg_mask)
+ DP(NETIF_MSG_HW,
+ "Parity errors in %s: 0x%x\n",
+ bnx2x_blocks_parity_data[i].name,
+ reg_val & reg_mask);
+ }
+ }
+
+ /* Check if there were parity attentions in MCP */
+ reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP);
+ if (reg_val & mcp_aeu_bits)
+ DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n",
+ reg_val & mcp_aeu_bits);
+
+ /* Clear parity attentions in MCP:
+ * [7] clears Latched rom_parity
+ * [8] clears Latched ump_rx_parity
+ * [9] clears Latched ump_tx_parity
+ * [10] clears Latched scpad_parity (both ports)
+ */
+ REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
+}
+
+static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+ u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
+
+ if (reg_mask)
+ REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
+ bnx2x_blocks_parity_data[i].en_mask & reg_mask);
+ }
+
+ /* Enable MCP parity attentions */
+ bnx2x_set_mcp_parity(bp, true);
+}
+
+
+#endif /* BNX2X_INIT_H */
+
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
new file mode 100644
index 000000000000..7ec1724753ad
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -0,0 +1,912 @@
+/* bnx2x_init_ops.h: Broadcom Everest network driver.
+ * Static functions needed during the initialization.
+ * This file is "included" in bnx2x_main.c.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Vladislav Zolotarov <vladz@broadcom.com>
+ */
+
+#ifndef BNX2X_INIT_OPS_H
+#define BNX2X_INIT_OPS_H
+
+
+#ifndef BP_ILT
+#define BP_ILT(bp) NULL
+#endif
+
+#ifndef BP_FUNC
+#define BP_FUNC(bp) 0
+#endif
+
+#ifndef BP_PORT
+#define BP_PORT(bp) 0
+#endif
+
+#ifndef BNX2X_ILT_FREE
+#define BNX2X_ILT_FREE(x, y, sz)
+#endif
+
+#ifndef BNX2X_ILT_ZALLOC
+#define BNX2X_ILT_ZALLOC(x, y, sz)
+#endif
+
+#ifndef ILOG2
+#define ILOG2(x) x
+#endif
+
+static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp,
+ dma_addr_t phys_addr, u32 addr,
+ u32 len);
+
+static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr,
+ const u32 *data, u32 len)
+{
+ u32 i;
+
+ for (i = 0; i < len; i++)
+ REG_WR(bp, addr + i*4, data[i]);
+}
+
+static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr,
+ const u32 *data, u32 len)
+{
+ u32 i;
+
+ for (i = 0; i < len; i++)
+ bnx2x_reg_wr_ind(bp, addr + i*4, data[i]);
+}
+
+static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len,
+ u8 wb)
+{
+ if (bp->dmae_ready)
+ bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
+ else if (wb)
+ /*
+ * Wide bus registers with no dmae need to be written
+ * using indirect write.
+ */
+ bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
+ else
+ bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
+}
+
+static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill,
+ u32 len, u8 wb)
+{
+ u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
+ u32 buf_len32 = buf_len/4;
+ u32 i;
+
+ memset(GUNZIP_BUF(bp), (u8)fill, buf_len);
+
+ for (i = 0; i < len; i += buf_len32) {
+ u32 cur_len = min(buf_len32, len - i);
+
+ bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb);
+ }
+}
+
+static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
+{
+ if (bp->dmae_ready)
+ bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
+ else
+ bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
+}
+
+static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr,
+ const u32 *data, u32 len64)
+{
+ u32 buf_len32 = FW_BUF_SIZE/4;
+ u32 len = len64*2;
+ u64 data64 = 0;
+ u32 i;
+
+ /* 64 bit value is in a blob: first low DWORD, then high DWORD */
+ data64 = HILO_U64((*(data + 1)), (*data));
+
+ len64 = min((u32)(FW_BUF_SIZE/8), len64);
+ for (i = 0; i < len64; i++) {
+ u64 *pdata = ((u64 *)(GUNZIP_BUF(bp))) + i;
+
+ *pdata = data64;
+ }
+
+ for (i = 0; i < len; i += buf_len32) {
+ u32 cur_len = min(buf_len32, len - i);
+
+ bnx2x_write_big_buf_wb(bp, addr + i*4, cur_len);
+ }
+}
+
+/*********************************************************
+ There are different blobs for each PRAM section.
+ In addition, each blob write operation is divided into a few operations
+ in order to decrease the amount of phys. contiguous buffer needed.
+ Thus, when we select a blob the address may be with some offset
+ from the beginning of PRAM section.
+ The same holds for the INT_TABLE sections.
+**********************************************************/
+#define IF_IS_INT_TABLE_ADDR(base, addr) \
+ if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
+
+#define IF_IS_PRAM_ADDR(base, addr) \
+ if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
+
+static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr,
+ const u8 *data)
+{
+ IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
+ data = INIT_TSEM_INT_TABLE_DATA(bp);
+ else
+ IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
+ data = INIT_CSEM_INT_TABLE_DATA(bp);
+ else
+ IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
+ data = INIT_USEM_INT_TABLE_DATA(bp);
+ else
+ IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
+ data = INIT_XSEM_INT_TABLE_DATA(bp);
+ else
+ IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
+ data = INIT_TSEM_PRAM_DATA(bp);
+ else
+ IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
+ data = INIT_CSEM_PRAM_DATA(bp);
+ else
+ IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
+ data = INIT_USEM_PRAM_DATA(bp);
+ else
+ IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
+ data = INIT_XSEM_PRAM_DATA(bp);
+
+ return data;
+}
+
+static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr,
+ const u32 *data, u32 len)
+{
+ if (bp->dmae_ready)
+ VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
+ else
+ bnx2x_init_ind_wr(bp, addr, data, len);
+}
+
+static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo,
+ u32 val_hi)
+{
+ u32 wb_write[2];
+
+ wb_write[0] = val_lo;
+ wb_write[1] = val_hi;
+ REG_WR_DMAE_LEN(bp, reg, wb_write, 2);
+}
+static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len,
+ u32 blob_off)
+{
+ const u8 *data = NULL;
+ int rc;
+ u32 i;
+
+ data = bnx2x_sel_blob(bp, addr, data) + blob_off*4;
+
+ rc = bnx2x_gunzip(bp, data, len);
+ if (rc)
+ return;
+
+ /* gunzip_outlen is in dwords */
+ len = GUNZIP_OUTLEN(bp);
+ for (i = 0; i < len; i++)
+ ((u32 *)GUNZIP_BUF(bp))[i] =
+ cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]);
+
+ bnx2x_write_big_buf_wb(bp, addr, len);
+}
+
+static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
+{
+ u16 op_start =
+ INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
+ STAGE_START)];
+ u16 op_end =
+ INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
+ STAGE_END)];
+ union init_op *op;
+ u32 op_idx, op_type, addr, len;
+ const u32 *data, *data_base;
+
+ /* If empty block */
+ if (op_start == op_end)
+ return;
+
+ data_base = INIT_DATA(bp);
+
+ for (op_idx = op_start; op_idx < op_end; op_idx++) {
+
+ op = (union init_op *)&(INIT_OPS(bp)[op_idx]);
+ /* Get generic data */
+ op_type = op->raw.op;
+ addr = op->raw.offset;
+ /* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and
+ * OP_WR64 (we assume that op_arr_write and op_write have the
+ * same structure).
+ */
+ len = op->arr_wr.data_len;
+ data = data_base + op->arr_wr.data_off;
+
+ switch (op_type) {
+ case OP_RD:
+ REG_RD(bp, addr);
+ break;
+ case OP_WR:
+ REG_WR(bp, addr, op->write.val);
+ break;
+ case OP_SW:
+ bnx2x_init_str_wr(bp, addr, data, len);
+ break;
+ case OP_WB:
+ bnx2x_init_wr_wb(bp, addr, data, len);
+ break;
+ case OP_ZR:
+ bnx2x_init_fill(bp, addr, 0, op->zero.len, 0);
+ break;
+ case OP_WB_ZR:
+ bnx2x_init_fill(bp, addr, 0, op->zero.len, 1);
+ break;
+ case OP_ZP:
+ bnx2x_init_wr_zp(bp, addr, len,
+ op->arr_wr.data_off);
+ break;
+ case OP_WR_64:
+ bnx2x_init_wr_64(bp, addr, data, len);
+ break;
+ case OP_IF_MODE_AND:
+ /* if any of the flags doesn't match, skip the
+ * conditional block.
+ */
+ if ((INIT_MODE_FLAGS(bp) &
+ op->if_mode.mode_bit_map) !=
+ op->if_mode.mode_bit_map)
+ op_idx += op->if_mode.cmd_offset;
+ break;
+ case OP_IF_MODE_OR:
+ /* if all the flags don't match, skip the conditional
+ * block.
+ */
+ if ((INIT_MODE_FLAGS(bp) &
+ op->if_mode.mode_bit_map) == 0)
+ op_idx += op->if_mode.cmd_offset;
+ break;
+ default:
+ /* Should never get here! */
+
+ break;
+ }
+ }
+}
+
+
+/****************************************************************************
+* PXP Arbiter
+****************************************************************************/
+/*
+ * This code configures the PCI read/write arbiter
+ * which implements a weighted round robin
+ * between the virtual queues in the chip.
+ *
+ * The values were derived for each PCI max payload and max request size.
+ * since max payload and max request size are only known at run time,
+ * this is done as a separate init stage.
+ */
+
+#define NUM_WR_Q 13
+#define NUM_RD_Q 29
+#define MAX_RD_ORD 3
+#define MAX_WR_ORD 2
+
+/* configuration for one arbiter queue */
+struct arb_line {
+ int l;
+ int add;
+ int ubound;
+};
+
+/* derived configuration for each read queue for each max request size */
+static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = {
+/* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
+ { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} },
+ { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} },
+ { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} },
+ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
+/* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+/* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
+ { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} }
+};
+
+/* derived configuration for each write queue for each max request size */
+static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
+/* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} },
+ { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} },
+ { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
+ { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
+ { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
+ { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
+ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} },
+ { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
+ { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
+/* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} },
+ { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} },
+ { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} },
+ { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} }
+};
+
+/* register addresses for read queues */
+static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
+/* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
+ PXP2_REG_RQ_BW_RD_UBOUND0},
+ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
+ PXP2_REG_PSWRQ_BW_UB1},
+ {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
+ PXP2_REG_PSWRQ_BW_UB2},
+ {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
+ PXP2_REG_PSWRQ_BW_UB3},
+ {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4,
+ PXP2_REG_RQ_BW_RD_UBOUND4},
+ {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5,
+ PXP2_REG_RQ_BW_RD_UBOUND5},
+ {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
+ PXP2_REG_PSWRQ_BW_UB6},
+ {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
+ PXP2_REG_PSWRQ_BW_UB7},
+ {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
+ PXP2_REG_PSWRQ_BW_UB8},
+/* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
+ PXP2_REG_PSWRQ_BW_UB9},
+ {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
+ PXP2_REG_PSWRQ_BW_UB10},
+ {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
+ PXP2_REG_PSWRQ_BW_UB11},
+ {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12,
+ PXP2_REG_RQ_BW_RD_UBOUND12},
+ {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13,
+ PXP2_REG_RQ_BW_RD_UBOUND13},
+ {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14,
+ PXP2_REG_RQ_BW_RD_UBOUND14},
+ {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15,
+ PXP2_REG_RQ_BW_RD_UBOUND15},
+ {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16,
+ PXP2_REG_RQ_BW_RD_UBOUND16},
+ {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17,
+ PXP2_REG_RQ_BW_RD_UBOUND17},
+ {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18,
+ PXP2_REG_RQ_BW_RD_UBOUND18},
+/* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19,
+ PXP2_REG_RQ_BW_RD_UBOUND19},
+ {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20,
+ PXP2_REG_RQ_BW_RD_UBOUND20},
+ {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22,
+ PXP2_REG_RQ_BW_RD_UBOUND22},
+ {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23,
+ PXP2_REG_RQ_BW_RD_UBOUND23},
+ {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24,
+ PXP2_REG_RQ_BW_RD_UBOUND24},
+ {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25,
+ PXP2_REG_RQ_BW_RD_UBOUND25},
+ {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26,
+ PXP2_REG_RQ_BW_RD_UBOUND26},
+ {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27,
+ PXP2_REG_RQ_BW_RD_UBOUND27},
+ {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
+ PXP2_REG_PSWRQ_BW_UB28}
+};
+
+/* register addresses for write queues */
+static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
+/* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
+ PXP2_REG_PSWRQ_BW_UB1},
+ {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
+ PXP2_REG_PSWRQ_BW_UB2},
+ {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
+ PXP2_REG_PSWRQ_BW_UB3},
+ {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
+ PXP2_REG_PSWRQ_BW_UB6},
+ {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
+ PXP2_REG_PSWRQ_BW_UB7},
+ {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
+ PXP2_REG_PSWRQ_BW_UB8},
+ {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
+ PXP2_REG_PSWRQ_BW_UB9},
+ {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
+ PXP2_REG_PSWRQ_BW_UB10},
+ {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
+ PXP2_REG_PSWRQ_BW_UB11},
+/* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
+ PXP2_REG_PSWRQ_BW_UB28},
+ {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29,
+ PXP2_REG_RQ_BW_WR_UBOUND29},
+ {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30,
+ PXP2_REG_RQ_BW_WR_UBOUND30}
+};
+
+static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order,
+ int w_order)
+{
+ u32 val, i;
+
+ if (r_order > MAX_RD_ORD) {
+ DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
+ r_order, MAX_RD_ORD);
+ r_order = MAX_RD_ORD;
+ }
+ if (w_order > MAX_WR_ORD) {
+ DP(NETIF_MSG_HW, "write order of %d order adjusted to %d\n",
+ w_order, MAX_WR_ORD);
+ w_order = MAX_WR_ORD;
+ }
+ if (CHIP_REV_IS_FPGA(bp)) {
+ DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n");
+ w_order = 0;
+ }
+ DP(NETIF_MSG_HW, "read order %d write order %d\n", r_order, w_order);
+
+ for (i = 0; i < NUM_RD_Q-1; i++) {
+ REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l);
+ REG_WR(bp, read_arb_addr[i].add,
+ read_arb_data[i][r_order].add);
+ REG_WR(bp, read_arb_addr[i].ubound,
+ read_arb_data[i][r_order].ubound);
+ }
+
+ for (i = 0; i < NUM_WR_Q-1; i++) {
+ if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
+ (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
+
+ REG_WR(bp, write_arb_addr[i].l,
+ write_arb_data[i][w_order].l);
+
+ REG_WR(bp, write_arb_addr[i].add,
+ write_arb_data[i][w_order].add);
+
+ REG_WR(bp, write_arb_addr[i].ubound,
+ write_arb_data[i][w_order].ubound);
+ } else {
+
+ val = REG_RD(bp, write_arb_addr[i].l);
+ REG_WR(bp, write_arb_addr[i].l,
+ val | (write_arb_data[i][w_order].l << 10));
+
+ val = REG_RD(bp, write_arb_addr[i].add);
+ REG_WR(bp, write_arb_addr[i].add,
+ val | (write_arb_data[i][w_order].add << 10));
+
+ val = REG_RD(bp, write_arb_addr[i].ubound);
+ REG_WR(bp, write_arb_addr[i].ubound,
+ val | (write_arb_data[i][w_order].ubound << 7));
+ }
+ }
+
+ val = write_arb_data[NUM_WR_Q-1][w_order].add;
+ val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
+ val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
+ REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val);
+
+ val = read_arb_data[NUM_RD_Q-1][r_order].add;
+ val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
+ val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
+ REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val);
+
+ REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order);
+ REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order);
+ REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
+ REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
+
+ if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD))
+ REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
+
+ if (CHIP_IS_E3(bp))
+ REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order));
+ else if (CHIP_IS_E2(bp))
+ REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
+ else
+ REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
+
+ if (!CHIP_IS_E1(bp)) {
+ /* MPS w_order optimal TH presently TH
+ * 128 0 0 2
+ * 256 1 1 3
+ * >=512 2 2 3
+ */
+ /* DMAE is special */
+ if (!CHIP_IS_E1H(bp)) {
+ /* E2 can use optimal TH */
+ val = w_order;
+ REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val);
+ } else {
+ val = ((w_order == 0) ? 2 : 3);
+ REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2);
+ }
+
+ REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_QM_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
+ REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
+ }
+
+ /* Validate number of tags suppoted by device */
+#define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980
+ val = REG_RD(bp, PCIE_REG_PCIER_TL_HDR_FC_ST);
+ val &= 0xFF;
+ if (val <= 0x20)
+ REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x20);
+}
+
+/****************************************************************************
+* ILT management
+****************************************************************************/
+/*
+ * This codes hides the low level HW interaction for ILT management and
+ * configuration. The API consists of a shadow ILT table which is set by the
+ * driver and a set of routines to use it to configure the HW.
+ *
+ */
+
+/* ILT HW init operations */
+
+/* ILT memory management operations */
+#define ILT_MEMOP_ALLOC 0
+#define ILT_MEMOP_FREE 1
+
+/* the phys address is shifted right 12 bits and has an added
+ * 1=valid bit added to the 53rd bit
+ * then since this is a wide register(TM)
+ * we split it into two 32 bit writes
+ */
+#define ILT_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
+#define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
+#define ILT_RANGE(f, l) (((l) << 10) | f)
+
+static int bnx2x_ilt_line_mem_op(struct bnx2x *bp,
+ struct ilt_line *line, u32 size, u8 memop)
+{
+ if (memop == ILT_MEMOP_FREE) {
+ BNX2X_ILT_FREE(line->page, line->page_mapping, line->size);
+ return 0;
+ }
+ BNX2X_ILT_ZALLOC(line->page, &line->page_mapping, size);
+ if (!line->page)
+ return -1;
+ line->size = size;
+ return 0;
+}
+
+
+static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
+ u8 memop)
+{
+ int i, rc;
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+ struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+
+ if (!ilt || !ilt->lines)
+ return -1;
+
+ if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
+ return 0;
+
+ for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) {
+ rc = bnx2x_ilt_line_mem_op(bp, &ilt->lines[i],
+ ilt_cli->page_size, memop);
+ }
+ return rc;
+}
+
+static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
+{
+ int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
+ if (!rc)
+ rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop);
+ if (!rc)
+ rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
+ if (!rc)
+ rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
+
+ return rc;
+}
+
+static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx,
+ dma_addr_t page_mapping)
+{
+ u32 reg;
+
+ if (CHIP_IS_E1(bp))
+ reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx*8;
+ else
+ reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8;
+
+ bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
+}
+
+static void bnx2x_ilt_line_init_op(struct bnx2x *bp,
+ struct bnx2x_ilt *ilt, int idx, u8 initop)
+{
+ dma_addr_t null_mapping;
+ int abs_idx = ilt->start_line + idx;
+
+
+ switch (initop) {
+ case INITOP_INIT:
+ /* set in the init-value array */
+ case INITOP_SET:
+ bnx2x_ilt_line_wr(bp, abs_idx, ilt->lines[idx].page_mapping);
+ break;
+ case INITOP_CLEAR:
+ null_mapping = 0;
+ bnx2x_ilt_line_wr(bp, abs_idx, null_mapping);
+ break;
+ }
+}
+
+static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
+ struct ilt_client_info *ilt_cli,
+ u32 ilt_start, u8 initop)
+{
+ u32 start_reg = 0;
+ u32 end_reg = 0;
+
+ /* The boundary is either SET or INIT,
+ CLEAR => SET and for now SET ~~ INIT */
+
+ /* find the appropriate regs */
+ if (CHIP_IS_E1(bp)) {
+ switch (ilt_cli->client_num) {
+ case ILT_CLIENT_CDU:
+ start_reg = PXP2_REG_PSWRQ_CDU0_L2P;
+ break;
+ case ILT_CLIENT_QM:
+ start_reg = PXP2_REG_PSWRQ_QM0_L2P;
+ break;
+ case ILT_CLIENT_SRC:
+ start_reg = PXP2_REG_PSWRQ_SRC0_L2P;
+ break;
+ case ILT_CLIENT_TM:
+ start_reg = PXP2_REG_PSWRQ_TM0_L2P;
+ break;
+ }
+ REG_WR(bp, start_reg + BP_FUNC(bp)*4,
+ ILT_RANGE((ilt_start + ilt_cli->start),
+ (ilt_start + ilt_cli->end)));
+ } else {
+ switch (ilt_cli->client_num) {
+ case ILT_CLIENT_CDU:
+ start_reg = PXP2_REG_RQ_CDU_FIRST_ILT;
+ end_reg = PXP2_REG_RQ_CDU_LAST_ILT;
+ break;
+ case ILT_CLIENT_QM:
+ start_reg = PXP2_REG_RQ_QM_FIRST_ILT;
+ end_reg = PXP2_REG_RQ_QM_LAST_ILT;
+ break;
+ case ILT_CLIENT_SRC:
+ start_reg = PXP2_REG_RQ_SRC_FIRST_ILT;
+ end_reg = PXP2_REG_RQ_SRC_LAST_ILT;
+ break;
+ case ILT_CLIENT_TM:
+ start_reg = PXP2_REG_RQ_TM_FIRST_ILT;
+ end_reg = PXP2_REG_RQ_TM_LAST_ILT;
+ break;
+ }
+ REG_WR(bp, start_reg, (ilt_start + ilt_cli->start));
+ REG_WR(bp, end_reg, (ilt_start + ilt_cli->end));
+ }
+}
+
+static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp,
+ struct bnx2x_ilt *ilt,
+ struct ilt_client_info *ilt_cli,
+ u8 initop)
+{
+ int i;
+
+ if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
+ return;
+
+ for (i = ilt_cli->start; i <= ilt_cli->end; i++)
+ bnx2x_ilt_line_init_op(bp, ilt, i, initop);
+
+ /* init/clear the ILT boundries */
+ bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
+}
+
+static void bnx2x_ilt_client_init_op(struct bnx2x *bp,
+ struct ilt_client_info *ilt_cli, u8 initop)
+{
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+
+ bnx2x_ilt_client_init_op_ilt(bp, ilt, ilt_cli, initop);
+}
+
+static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
+ int cli_num, u8 initop)
+{
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+ struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+
+ bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
+}
+
+static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
+{
+ bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
+ bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
+ bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
+ bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
+}
+
+static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
+ u32 psz_reg, u8 initop)
+{
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+ struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+
+ if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
+ return;
+
+ switch (initop) {
+ case INITOP_INIT:
+ /* set in the init-value array */
+ case INITOP_SET:
+ REG_WR(bp, psz_reg, ILOG2(ilt_cli->page_size >> 12));
+ break;
+ case INITOP_CLEAR:
+ break;
+ }
+}
+
+/*
+ * called during init common stage, ilt clients should be initialized
+ * prioir to calling this function
+ */
+static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
+{
+ bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
+ PXP2_REG_RQ_CDU_P_SIZE, initop);
+ bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_QM,
+ PXP2_REG_RQ_QM_P_SIZE, initop);
+ bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_SRC,
+ PXP2_REG_RQ_SRC_P_SIZE, initop);
+ bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_TM,
+ PXP2_REG_RQ_TM_P_SIZE, initop);
+}
+
+/****************************************************************************
+* QM initializations
+****************************************************************************/
+#define QM_QUEUES_PER_FUNC 16 /* E1 has 32, but only 16 are used */
+#define QM_INIT_MIN_CID_COUNT 31
+#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
+
+/* called during init port stage */
+static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
+ u8 initop)
+{
+ int port = BP_PORT(bp);
+
+ if (QM_INIT(qm_cid_count)) {
+ switch (initop) {
+ case INITOP_INIT:
+ /* set in the init-value array */
+ case INITOP_SET:
+ REG_WR(bp, QM_REG_CONNNUM_0 + port*4,
+ qm_cid_count/16 - 1);
+ break;
+ case INITOP_CLEAR:
+ break;
+ }
+ }
+}
+
+static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count)
+{
+ int i;
+ u32 wb_data[2];
+
+ wb_data[0] = wb_data[1] = 0;
+
+ for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
+ REG_WR(bp, QM_REG_BASEADDR + i*4,
+ qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
+ bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8,
+ wb_data, 2);
+
+ if (CHIP_IS_E1H(bp)) {
+ REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4,
+ qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
+ bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
+ wb_data, 2);
+ }
+ }
+}
+
+/* called during init common stage */
+static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
+ u8 initop)
+{
+ if (!QM_INIT(qm_cid_count))
+ return;
+
+ switch (initop) {
+ case INITOP_INIT:
+ /* set in the init-value array */
+ case INITOP_SET:
+ bnx2x_qm_set_ptr_table(bp, qm_cid_count);
+ break;
+ case INITOP_CLEAR:
+ break;
+ }
+}
+
+/****************************************************************************
+* SRC initializations
+****************************************************************************/
+#ifdef BCM_CNIC
+/* called during init func stage */
+static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
+ dma_addr_t t2_mapping, int src_cid_count)
+{
+ int i;
+ int port = BP_PORT(bp);
+
+ /* Initialize T2 */
+ for (i = 0; i < src_cid_count-1; i++)
+ t2[i].next = (u64)(t2_mapping +
+ (i+1)*sizeof(struct src_ent));
+
+ /* tell the searcher where the T2 table is */
+ REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count);
+
+ bnx2x_wr_64(bp, SRC_REG_FIRSTFREE0 + port*16,
+ U64_LO(t2_mapping), U64_HI(t2_mapping));
+
+ bnx2x_wr_64(bp, SRC_REG_LASTFREE0 + port*16,
+ U64_LO((u64)t2_mapping +
+ (src_cid_count-1) * sizeof(struct src_ent)),
+ U64_HI((u64)t2_mapping +
+ (src_cid_count-1) * sizeof(struct src_ent)));
+}
+#endif
+#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
new file mode 100644
index 000000000000..818723c9e678
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -0,0 +1,12480 @@
+/* Copyright 2008-2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Written by Yaniv Rosner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mutex.h>
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+
+
+/********************************************************/
+#define ETH_HLEN 14
+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
+#define ETH_MIN_PACKET_SIZE 60
+#define ETH_MAX_PACKET_SIZE 1500
+#define ETH_MAX_JUMBO_PACKET_SIZE 9600
+#define MDIO_ACCESS_TIMEOUT 1000
+#define BMAC_CONTROL_RX_ENABLE 2
+#define WC_LANE_MAX 4
+#define I2C_SWITCH_WIDTH 2
+#define I2C_BSC0 0
+#define I2C_BSC1 1
+#define I2C_WA_RETRY_CNT 3
+#define MCPR_IMC_COMMAND_READ_OP 1
+#define MCPR_IMC_COMMAND_WRITE_OP 2
+
+/***********************************************************/
+/* Shortcut definitions */
+/***********************************************************/
+
+#define NIG_LATCH_BC_ENABLE_MI_INT 0
+
+#define NIG_STATUS_EMAC0_MI_INT \
+ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT
+#define NIG_STATUS_XGXS0_LINK10G \
+ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G
+#define NIG_STATUS_XGXS0_LINK_STATUS \
+ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS
+#define NIG_STATUS_XGXS0_LINK_STATUS_SIZE \
+ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE
+#define NIG_STATUS_SERDES0_LINK_STATUS \
+ NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS
+#define NIG_MASK_MI_INT \
+ NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT
+#define NIG_MASK_XGXS0_LINK10G \
+ NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G
+#define NIG_MASK_XGXS0_LINK_STATUS \
+ NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS
+#define NIG_MASK_SERDES0_LINK_STATUS \
+ NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS
+
+#define MDIO_AN_CL73_OR_37_COMPLETE \
+ (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE)
+
+#define XGXS_RESET_BITS \
+ (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB)
+
+#define SERDES_RESET_BITS \
+ (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN | \
+ MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD)
+
+#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
+#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
+#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
+#define AUTONEG_PARALLEL \
+ SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
+#define AUTONEG_SGMII_FIBER_AUTODET \
+ SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT
+#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
+
+#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE
+#define GP_STATUS_PAUSE_RSOLUTION_RXSIDE \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE
+#define GP_STATUS_SPEED_MASK \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK
+#define GP_STATUS_10M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M
+#define GP_STATUS_100M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M
+#define GP_STATUS_1G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G
+#define GP_STATUS_2_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G
+#define GP_STATUS_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G
+#define GP_STATUS_6G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G
+#define GP_STATUS_10G_HIG \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG
+#define GP_STATUS_10G_CX4 \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4
+#define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX
+#define GP_STATUS_10G_KX4 \
+ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
+#define GP_STATUS_10G_KR MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR
+#define GP_STATUS_10G_XFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI
+#define GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS
+#define GP_STATUS_10G_SFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI
+#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
+#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
+#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
+#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
+#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
+#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD
+#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
+#define LINK_1000XFD LINK_STATUS_SPEED_AND_DUPLEX_1000XFD
+#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD
+#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
+#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
+#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
+#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
+#define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD
+#define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD
+
+
+
+/* */
+#define SFP_EEPROM_CON_TYPE_ADDR 0x2
+ #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
+ #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
+
+
+#define SFP_EEPROM_COMP_CODE_ADDR 0x3
+ #define SFP_EEPROM_COMP_CODE_SR_MASK (1<<4)
+ #define SFP_EEPROM_COMP_CODE_LR_MASK (1<<5)
+ #define SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6)
+
+#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
+ #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
+ #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
+
+#define SFP_EEPROM_OPTIONS_ADDR 0x40
+ #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
+#define SFP_EEPROM_OPTIONS_SIZE 2
+
+#define EDC_MODE_LINEAR 0x0022
+#define EDC_MODE_LIMITING 0x0044
+#define EDC_MODE_PASSIVE_DAC 0x0055
+
+
+/* BRB thresholds for E2*/
+#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE 170
+#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
+
+#define PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE 250
+#define PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
+
+#define PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE 10
+#define PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 90
+
+#define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE 50
+#define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE 250
+
+/* BRB thresholds for E3A0 */
+#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE 290
+#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
+
+#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE 410
+#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
+
+#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE 10
+#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 170
+
+#define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE 50
+#define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE 410
+
+
+/* BRB thresholds for E3B0 2 port mode*/
+#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 1025
+#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
+
+#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE 1025
+#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
+
+#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE 10
+#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 1025
+
+#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE 50
+#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE 1025
+
+/* only for E3B0*/
+#define PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR 1025
+#define PFC_E3B0_2P_BRB_FULL_LB_XON_THR 1025
+
+/* Lossy +Lossless GUARANTIED == GUART */
+#define PFC_E3B0_2P_MIX_PAUSE_LB_GUART 284
+/* Lossless +Lossless*/
+#define PFC_E3B0_2P_PAUSE_LB_GUART 236
+/* Lossy +Lossy*/
+#define PFC_E3B0_2P_NON_PAUSE_LB_GUART 342
+
+/* Lossy +Lossless*/
+#define PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART 284
+/* Lossless +Lossless*/
+#define PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART 236
+/* Lossy +Lossy*/
+#define PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART 336
+#define PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST 80
+
+#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART 0
+#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST 0
+
+/* BRB thresholds for E3B0 4 port mode */
+#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 304
+#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
+
+#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE 384
+#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
+
+#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE 10
+#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 304
+
+#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE 50
+#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE 384
+
+
+/* only for E3B0*/
+#define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR 304
+#define PFC_E3B0_4P_BRB_FULL_LB_XON_THR 384
+#define PFC_E3B0_4P_LB_GUART 120
+
+#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART 120
+#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80
+
+#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART 80
+#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120
+
+#define DCBX_INVALID_COS (0xFF)
+
+#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
+#define ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000)
+#define ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS (1360)
+#define ETS_E3B0_NIG_MIN_W_VAL_20GBPS (2720)
+#define ETS_E3B0_PBF_MIN_W_VAL (10000)
+
+#define MAX_PACKET_SIZE (9700)
+#define WC_UC_TIMEOUT 100
+
+/**********************************************************/
+/* INTERFACE */
+/**********************************************************/
+
+#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
+ bnx2x_cl45_write(_bp, _phy, \
+ (_phy)->def_md_devad, \
+ (_bank + (_addr & 0xf)), \
+ _val)
+
+#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
+ bnx2x_cl45_read(_bp, _phy, \
+ (_phy)->def_md_devad, \
+ (_bank + (_addr & 0xf)), \
+ _val)
+
+static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
+{
+ u32 val = REG_RD(bp, reg);
+
+ val |= bits;
+ REG_WR(bp, reg, val);
+ return val;
+}
+
+static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
+{
+ u32 val = REG_RD(bp, reg);
+
+ val &= ~bits;
+ REG_WR(bp, reg, val);
+ return val;
+}
+
+/******************************************************************/
+/* EPIO/GPIO section */
+/******************************************************************/
+static void bnx2x_get_epio(struct bnx2x *bp, u32 epio_pin, u32 *en)
+{
+ u32 epio_mask, gp_oenable;
+ *en = 0;
+ /* Sanity check */
+ if (epio_pin > 31) {
+ DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to get\n", epio_pin);
+ return;
+ }
+
+ epio_mask = 1 << epio_pin;
+ /* Set this EPIO to output */
+ gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
+ REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask);
+
+ *en = (REG_RD(bp, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin;
+}
+static void bnx2x_set_epio(struct bnx2x *bp, u32 epio_pin, u32 en)
+{
+ u32 epio_mask, gp_output, gp_oenable;
+
+ /* Sanity check */
+ if (epio_pin > 31) {
+ DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to set\n", epio_pin);
+ return;
+ }
+ DP(NETIF_MSG_LINK, "Setting EPIO pin %d to %d\n", epio_pin, en);
+ epio_mask = 1 << epio_pin;
+ /* Set this EPIO to output */
+ gp_output = REG_RD(bp, MCP_REG_MCPR_GP_OUTPUTS);
+ if (en)
+ gp_output |= epio_mask;
+ else
+ gp_output &= ~epio_mask;
+
+ REG_WR(bp, MCP_REG_MCPR_GP_OUTPUTS, gp_output);
+
+ /* Set the value for this EPIO */
+ gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
+ REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask);
+}
+
+static void bnx2x_set_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 val)
+{
+ if (pin_cfg == PIN_CFG_NA)
+ return;
+ if (pin_cfg >= PIN_CFG_EPIO0) {
+ bnx2x_set_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
+ } else {
+ u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
+ u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
+ bnx2x_set_gpio(bp, gpio_num, (u8)val, gpio_port);
+ }
+}
+
+static u32 bnx2x_get_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 *val)
+{
+ if (pin_cfg == PIN_CFG_NA)
+ return -EINVAL;
+ if (pin_cfg >= PIN_CFG_EPIO0) {
+ bnx2x_get_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
+ } else {
+ u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
+ u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
+ *val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
+ }
+ return 0;
+
+}
+/******************************************************************/
+/* ETS section */
+/******************************************************************/
+static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
+{
+ /* ETS disabled configuration*/
+ struct bnx2x *bp = params->bp;
+
+ DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n");
+
+ /*
+ * mapping between entry priority to client number (0,1,2 -debug and
+ * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+ * 3bits client num.
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * cos1-100 cos0-011 dbg1-010 dbg0-001 MCP-000
+ */
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
+ /*
+ * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries, 3 -
+ * COS0 entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
+ /* defines which entries (clients) are subjected to WFQ arbitration */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
+ /*
+ * For strict priority entries defines the number of consecutive
+ * slots for the highest priority.
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+ /*
+ * mapping between the CREDIT_WEIGHT registers and actual client
+ * numbers
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0);
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0);
+ REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
+ /* ETS mode disable */
+ REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
+ /*
+ * If ETS mode is enabled (there is no strict priority) defines a WFQ
+ * weight for COS0/COS1.
+ */
+ REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710);
+ REG_WR(bp, PBF_REG_COS1_WEIGHT, 0x2710);
+ /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter */
+ REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, 0x989680);
+ REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, 0x989680);
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
+}
+/******************************************************************************
+* Description:
+* Getting min_w_val will be set according to line speed .
+*.
+******************************************************************************/
+static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars)
+{
+ u32 min_w_val = 0;
+ /* Calculate min_w_val.*/
+ if (vars->link_up) {
+ if (SPEED_20000 == vars->line_speed)
+ min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
+ else
+ min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
+ } else
+ min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
+ /**
+ * If the link isn't up (static configuration for example ) The
+ * link will be according to 20GBPS.
+ */
+ return min_w_val;
+}
+/******************************************************************************
+* Description:
+* Getting credit upper bound form min_w_val.
+*.
+******************************************************************************/
+static u32 bnx2x_ets_get_credit_upper_bound(const u32 min_w_val)
+{
+ const u32 credit_upper_bound = (u32)MAXVAL((150 * min_w_val),
+ MAX_PACKET_SIZE);
+ return credit_upper_bound;
+}
+/******************************************************************************
+* Description:
+* Set credit upper bound for NIG.
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_set_credit_upper_bound_nig(
+ const struct link_params *params,
+ const u32 min_w_val)
+{
+ struct bnx2x *bp = params->bp;
+ const u8 port = params->port;
+ const u32 credit_upper_bound =
+ bnx2x_ets_get_credit_upper_bound(min_w_val);
+
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, credit_upper_bound);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, credit_upper_bound);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2, credit_upper_bound);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3, credit_upper_bound);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4, credit_upper_bound);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound);
+
+ if (0 == port) {
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6,
+ credit_upper_bound);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7,
+ credit_upper_bound);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8,
+ credit_upper_bound);
+ }
+}
+/******************************************************************************
+* Description:
+* Will return the NIG ETS registers to init values.Except
+* credit_upper_bound.
+* That isn't used in this configuration (No WFQ is enabled) and will be
+* configured acording to spec
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
+ const struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ const u8 port = params->port;
+ const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars);
+ /**
+ * mapping between entry priority to client number (0,1,2 -debug and
+ * management clients, 3 - COS0 client, 4 - COS1, ... 8 -
+ * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by
+ * reset value or init tool
+ */
+ if (port) {
+ REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210);
+ REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0);
+ } else {
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8);
+ }
+ /**
+ * For strict priority entries defines the number of consecutive
+ * slots for the highest priority.
+ */
+ /* TODO_ETS - Should be done by reset value or init tool */
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
+ NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+ /**
+ * mapping between the CREDIT_WEIGHT registers and actual client
+ * numbers
+ */
+ /* TODO_ETS - Should be done by reset value or init tool */
+ if (port) {
+ /*Port 1 has 6 COS*/
+ REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543);
+ REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0);
+ } else {
+ /*Port 0 has 9 COS*/
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB,
+ 0x43210876);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5);
+ }
+
+ /**
+ * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries, 3 -
+ * COS0 entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+ if (port)
+ REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f);
+ else
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff);
+ /* defines which entries (clients) are subjected to WFQ arbitration */
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
+ NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
+
+ /**
+ * Please notice the register address are note continuous and a
+ * for here is note appropriate.In 2 port mode port0 only COS0-5
+ * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
+ * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
+ * are never used for WFQ
+ */
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0x0);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2, 0x0);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3, 0x0);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0);
+ if (0 == port) {
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0);
+ }
+
+ bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val);
+}
+/******************************************************************************
+* Description:
+* Set credit upper bound for PBF.
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
+ const struct link_params *params,
+ const u32 min_w_val)
+{
+ struct bnx2x *bp = params->bp;
+ const u32 credit_upper_bound =
+ bnx2x_ets_get_credit_upper_bound(min_w_val);
+ const u8 port = params->port;
+ u32 base_upper_bound = 0;
+ u8 max_cos = 0;
+ u8 i = 0;
+ /**
+ * In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
+ * port mode port1 has COS0-2 that can be used for WFQ.
+ */
+ if (0 == port) {
+ base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
+ max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
+ } else {
+ base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P1;
+ max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
+ }
+
+ for (i = 0; i < max_cos; i++)
+ REG_WR(bp, base_upper_bound + (i << 2), credit_upper_bound);
+}
+
+/******************************************************************************
+* Description:
+* Will return the PBF ETS registers to init values.Except
+* credit_upper_bound.
+* That isn't used in this configuration (No WFQ is enabled) and will be
+* configured acording to spec
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ const u8 port = params->port;
+ const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
+ u8 i = 0;
+ u32 base_weight = 0;
+ u8 max_cos = 0;
+
+ /**
+ * mapping between entry priority to client number 0 - COS0
+ * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num.
+ * TODO_ETS - Should be done by reset value or init tool
+ */
+ if (port)
+ /* 0x688 (|011|0 10|00 1|000) */
+ REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , 0x688);
+ else
+ /* (10 1|100 |011|0 10|00 1|000) */
+ REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , 0x2C688);
+
+ /* TODO_ETS - Should be done by reset value or init tool */
+ if (port)
+ /* 0x688 (|011|0 10|00 1|000)*/
+ REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688);
+ else
+ /* 0x2C688 (10 1|100 |011|0 10|00 1|000) */
+ REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688);
+
+ REG_WR(bp, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 :
+ PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 , 0x100);
+
+
+ REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
+ PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , 0);
+
+ REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
+ PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0);
+ /**
+ * In 2 port mode port0 has COS0-5 that can be used for WFQ.
+ * In 4 port mode port1 has COS0-2 that can be used for WFQ.
+ */
+ if (0 == port) {
+ base_weight = PBF_REG_COS0_WEIGHT_P0;
+ max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
+ } else {
+ base_weight = PBF_REG_COS0_WEIGHT_P1;
+ max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
+ }
+
+ for (i = 0; i < max_cos; i++)
+ REG_WR(bp, base_weight + (0x4 * i), 0);
+
+ bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
+}
+/******************************************************************************
+* Description:
+* E3B0 disable will return basicly the values to init values.
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
+ const struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (!CHIP_IS_E3B0(bp)) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_disabled the chip isn't E3B0\n");
+ return -EINVAL;
+ }
+
+ bnx2x_ets_e3b0_nig_disabled(params, vars);
+
+ bnx2x_ets_e3b0_pbf_disabled(params);
+
+ return 0;
+}
+
+/******************************************************************************
+* Description:
+* Disable will return basicly the values to init values.
+*.
+******************************************************************************/
+int bnx2x_ets_disabled(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ int bnx2x_status = 0;
+
+ if ((CHIP_IS_E2(bp)) || (CHIP_IS_E3A0(bp)))
+ bnx2x_ets_e2e3a0_disabled(params);
+ else if (CHIP_IS_E3B0(bp))
+ bnx2x_status = bnx2x_ets_e3b0_disabled(params, vars);
+ else {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_disabled - chip not supported\n");
+ return -EINVAL;
+ }
+
+ return bnx2x_status;
+}
+
+/******************************************************************************
+* Description
+* Set the COS mappimg to SP and BW until this point all the COS are not
+* set as SP or BW.
+******************************************************************************/
+static int bnx2x_ets_e3b0_cli_map(const struct link_params *params,
+ const struct bnx2x_ets_params *ets_params,
+ const u8 cos_sp_bitmap,
+ const u8 cos_bw_bitmap)
+{
+ struct bnx2x *bp = params->bp;
+ const u8 port = params->port;
+ const u8 nig_cli_sp_bitmap = 0x7 | (cos_sp_bitmap << 3);
+ const u8 pbf_cli_sp_bitmap = cos_sp_bitmap;
+ const u8 nig_cli_subject2wfq_bitmap = cos_bw_bitmap << 3;
+ const u8 pbf_cli_subject2wfq_bitmap = cos_bw_bitmap;
+
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT :
+ NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, nig_cli_sp_bitmap);
+
+ REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
+ PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , pbf_cli_sp_bitmap);
+
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
+ NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
+ nig_cli_subject2wfq_bitmap);
+
+ REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
+ PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0,
+ pbf_cli_subject2wfq_bitmap);
+
+ return 0;
+}
+
+/******************************************************************************
+* Description:
+* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
+* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
+******************************************************************************/
+static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
+ const u8 cos_entry,
+ const u32 min_w_val_nig,
+ const u32 min_w_val_pbf,
+ const u16 total_bw,
+ const u8 bw,
+ const u8 port)
+{
+ u32 nig_reg_adress_crd_weight = 0;
+ u32 pbf_reg_adress_crd_weight = 0;
+ /* Calculate and set BW for this COS - use 1 instead of 0 for BW */
+ const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw;
+ const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;
+
+ switch (cos_entry) {
+ case 0:
+ nig_reg_adress_crd_weight =
+ (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
+ pbf_reg_adress_crd_weight = (port) ?
+ PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
+ break;
+ case 1:
+ nig_reg_adress_crd_weight = (port) ?
+ NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
+ pbf_reg_adress_crd_weight = (port) ?
+ PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
+ break;
+ case 2:
+ nig_reg_adress_crd_weight = (port) ?
+ NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
+
+ pbf_reg_adress_crd_weight = (port) ?
+ PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
+ break;
+ case 3:
+ if (port)
+ return -EINVAL;
+ nig_reg_adress_crd_weight =
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
+ pbf_reg_adress_crd_weight =
+ PBF_REG_COS3_WEIGHT_P0;
+ break;
+ case 4:
+ if (port)
+ return -EINVAL;
+ nig_reg_adress_crd_weight =
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
+ pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0;
+ break;
+ case 5:
+ if (port)
+ return -EINVAL;
+ nig_reg_adress_crd_weight =
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
+ pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0;
+ break;
+ }
+
+ REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig);
+
+ REG_WR(bp, pbf_reg_adress_crd_weight, cos_bw_pbf);
+
+ return 0;
+}
+/******************************************************************************
+* Description:
+* Calculate the total BW.A value of 0 isn't legal.
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_get_total_bw(
+ const struct link_params *params,
+ const struct bnx2x_ets_params *ets_params,
+ u16 *total_bw)
+{
+ struct bnx2x *bp = params->bp;
+ u8 cos_idx = 0;
+
+ *total_bw = 0 ;
+ /* Calculate total BW requested */
+ for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
+ if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
+ *total_bw +=
+ ets_params->cos[cos_idx].params.bw_params.bw;
+ }
+ }
+
+ /* Check total BW is valid */
+ if ((100 != *total_bw) || (0 == *total_bw)) {
+ if (0 == *total_bw) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_E3B0_config toatl BW shouldn't be 0\n");
+ return -EINVAL;
+ }
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_E3B0_config toatl BW should be 100\n");
+ /**
+ * We can handle a case whre the BW isn't 100 this can happen
+ * if the TC are joined.
+ */
+ }
+ return 0;
+}
+
+/******************************************************************************
+* Description:
+* Invalidate all the sp_pri_to_cos.
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos)
+{
+ u8 pri = 0;
+ for (pri = 0; pri < DCBX_MAX_NUM_COS; pri++)
+ sp_pri_to_cos[pri] = DCBX_INVALID_COS;
+}
+/******************************************************************************
+* Description:
+* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
+* according to sp_pri_to_cos.
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
+ u8 *sp_pri_to_cos, const u8 pri,
+ const u8 cos_entry)
+{
+ struct bnx2x *bp = params->bp;
+ const u8 port = params->port;
+ const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
+ DCBX_E3B0_MAX_NUM_COS_PORT0;
+
+ if (DCBX_INVALID_COS != sp_pri_to_cos[pri]) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
+ "parameter There can't be two COS's with "
+ "the same strict pri\n");
+ return -EINVAL;
+ }
+
+ if (pri > max_num_of_cos) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
+ "parameter Illegal strict priority\n");
+ return -EINVAL;
+ }
+
+ sp_pri_to_cos[pri] = cos_entry;
+ return 0;
+
+}
+
+/******************************************************************************
+* Description:
+* Returns the correct value according to COS and priority in
+* the sp_pri_cli register.
+*.
+******************************************************************************/
+static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset,
+ const u8 pri_set,
+ const u8 pri_offset,
+ const u8 entry_size)
+{
+ u64 pri_cli_nig = 0;
+ pri_cli_nig = ((u64)(cos + cos_offset)) << (entry_size *
+ (pri_set + pri_offset));
+
+ return pri_cli_nig;
+}
+/******************************************************************************
+* Description:
+* Returns the correct value according to COS and priority in the
+* sp_pri_cli register for NIG.
+*.
+******************************************************************************/
+static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set)
+{
+ /* MCP Dbg0 and dbg1 are always with higher strict pri*/
+ const u8 nig_cos_offset = 3;
+ const u8 nig_pri_offset = 3;
+
+ return bnx2x_e3b0_sp_get_pri_cli_reg(cos, nig_cos_offset, pri_set,
+ nig_pri_offset, 4);
+
+}
+/******************************************************************************
+* Description:
+* Returns the correct value according to COS and priority in the
+* sp_pri_cli register for PBF.
+*.
+******************************************************************************/
+static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set)
+{
+ const u8 pbf_cos_offset = 0;
+ const u8 pbf_pri_offset = 0;
+
+ return bnx2x_e3b0_sp_get_pri_cli_reg(cos, pbf_cos_offset, pri_set,
+ pbf_pri_offset, 3);
+
+}
+
+/******************************************************************************
+* Description:
+* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
+* according to sp_pri_to_cos.(which COS has higher priority)
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
+ u8 *sp_pri_to_cos)
+{
+ struct bnx2x *bp = params->bp;
+ u8 i = 0;
+ const u8 port = params->port;
+ /* MCP Dbg0 and dbg1 are always with higher strict pri*/
+ u64 pri_cli_nig = 0x210;
+ u32 pri_cli_pbf = 0x0;
+ u8 pri_set = 0;
+ u8 pri_bitmask = 0;
+ const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
+ DCBX_E3B0_MAX_NUM_COS_PORT0;
+
+ u8 cos_bit_to_set = (1 << max_num_of_cos) - 1;
+
+ /* Set all the strict priority first */
+ for (i = 0; i < max_num_of_cos; i++) {
+ if (DCBX_INVALID_COS != sp_pri_to_cos[i]) {
+ if (DCBX_MAX_NUM_COS <= sp_pri_to_cos[i]) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
+ "invalid cos entry\n");
+ return -EINVAL;
+ }
+
+ pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
+ sp_pri_to_cos[i], pri_set);
+
+ pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
+ sp_pri_to_cos[i], pri_set);
+ pri_bitmask = 1 << sp_pri_to_cos[i];
+ /* COS is used remove it from bitmap.*/
+ if (0 == (pri_bitmask & cos_bit_to_set)) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
+ "invalid There can't be two COS's with"
+ " the same strict pri\n");
+ return -EINVAL;
+ }
+ cos_bit_to_set &= ~pri_bitmask;
+ pri_set++;
+ }
+ }
+
+ /* Set all the Non strict priority i= COS*/
+ for (i = 0; i < max_num_of_cos; i++) {
+ pri_bitmask = 1 << i;
+ /* Check if COS was already used for SP */
+ if (pri_bitmask & cos_bit_to_set) {
+ /* COS wasn't used for SP */
+ pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
+ i, pri_set);
+
+ pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
+ i, pri_set);
+ /* COS is used remove it from bitmap.*/
+ cos_bit_to_set &= ~pri_bitmask;
+ pri_set++;
+ }
+ }
+
+ if (pri_set != max_num_of_cos) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_set_pri_cli_reg not all "
+ "entries were set\n");
+ return -EINVAL;
+ }
+
+ if (port) {
+ /* Only 6 usable clients*/
+ REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB,
+ (u32)pri_cli_nig);
+
+ REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , pri_cli_pbf);
+ } else {
+ /* Only 9 usable clients*/
+ const u32 pri_cli_nig_lsb = (u32) (pri_cli_nig);
+ const u32 pri_cli_nig_msb = (u32) ((pri_cli_nig >> 32) & 0xF);
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB,
+ pri_cli_nig_lsb);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB,
+ pri_cli_nig_msb);
+
+ REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , pri_cli_pbf);
+ }
+ return 0;
+}
+
+/******************************************************************************
+* Description:
+* Configure the COS to ETS according to BW and SP settings.
+******************************************************************************/
+int bnx2x_ets_e3b0_config(const struct link_params *params,
+ const struct link_vars *vars,
+ const struct bnx2x_ets_params *ets_params)
+{
+ struct bnx2x *bp = params->bp;
+ int bnx2x_status = 0;
+ const u8 port = params->port;
+ u16 total_bw = 0;
+ const u32 min_w_val_nig = bnx2x_ets_get_min_w_val_nig(vars);
+ const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
+ u8 cos_bw_bitmap = 0;
+ u8 cos_sp_bitmap = 0;
+ u8 sp_pri_to_cos[DCBX_MAX_NUM_COS] = {0};
+ const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
+ DCBX_E3B0_MAX_NUM_COS_PORT0;
+ u8 cos_entry = 0;
+
+ if (!CHIP_IS_E3B0(bp)) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_disabled the chip isn't E3B0\n");
+ return -EINVAL;
+ }
+
+ if ((ets_params->num_of_cos > max_num_of_cos)) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config the number of COS "
+ "isn't supported\n");
+ return -EINVAL;
+ }
+
+ /* Prepare sp strict priority parameters*/
+ bnx2x_ets_e3b0_sp_pri_to_cos_init(sp_pri_to_cos);
+
+ /* Prepare BW parameters*/
+ bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params,
+ &total_bw);
+ if (0 != bnx2x_status) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_E3B0_config get_total_bw failed\n");
+ return -EINVAL;
+ }
+
+ /**
+ * Upper bound is set according to current link speed (min_w_val
+ * should be the same for upper bound and COS credit val).
+ */
+ bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
+ bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
+
+
+ for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
+ if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
+ cos_bw_bitmap |= (1 << cos_entry);
+ /**
+ * The function also sets the BW in HW(not the mappin
+ * yet)
+ */
+ bnx2x_status = bnx2x_ets_e3b0_set_cos_bw(
+ bp, cos_entry, min_w_val_nig, min_w_val_pbf,
+ total_bw,
+ ets_params->cos[cos_entry].params.bw_params.bw,
+ port);
+ } else if (bnx2x_cos_state_strict ==
+ ets_params->cos[cos_entry].state){
+ cos_sp_bitmap |= (1 << cos_entry);
+
+ bnx2x_status = bnx2x_ets_e3b0_sp_pri_to_cos_set(
+ params,
+ sp_pri_to_cos,
+ ets_params->cos[cos_entry].params.sp_params.pri,
+ cos_entry);
+
+ } else {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_config cos state not valid\n");
+ return -EINVAL;
+ }
+ if (0 != bnx2x_status) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_config set cos bw failed\n");
+ return bnx2x_status;
+ }
+ }
+
+ /* Set SP register (which COS has higher priority) */
+ bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params,
+ sp_pri_to_cos);
+
+ if (0 != bnx2x_status) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_E3B0_config set_pri_cli_reg failed\n");
+ return bnx2x_status;
+ }
+
+ /* Set client mapping of BW and strict */
+ bnx2x_status = bnx2x_ets_e3b0_cli_map(params, ets_params,
+ cos_sp_bitmap,
+ cos_bw_bitmap);
+
+ if (0 != bnx2x_status) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n");
+ return bnx2x_status;
+ }
+ return 0;
+}
+static void bnx2x_ets_bw_limit_common(const struct link_params *params)
+{
+ /* ETS disabled configuration */
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
+ /*
+ * defines which entries (clients) are subjected to WFQ arbitration
+ * COS0 0x8
+ * COS1 0x10
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
+ /*
+ * mapping between the ARB_CREDIT_WEIGHT registers and actual
+ * client numbers (WEIGHT_0 does not actually have to represent
+ * client 0)
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
+ ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1,
+ ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+
+ /* ETS mode enabled*/
+ REG_WR(bp, PBF_REG_ETS_ENABLED, 1);
+
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
+ /*
+ * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
+ * entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
+
+ /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
+ REG_WR(bp, PBF_REG_COS0_UPPER_BOUND,
+ ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+ REG_WR(bp, PBF_REG_COS1_UPPER_BOUND,
+ ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+}
+
+void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
+ const u32 cos1_bw)
+{
+ /* ETS disabled configuration*/
+ struct bnx2x *bp = params->bp;
+ const u32 total_bw = cos0_bw + cos1_bw;
+ u32 cos0_credit_weight = 0;
+ u32 cos1_credit_weight = 0;
+
+ DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
+
+ if ((0 == total_bw) ||
+ (0 == cos0_bw) ||
+ (0 == cos1_bw)) {
+ DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
+ return;
+ }
+
+ cos0_credit_weight = (cos0_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
+ total_bw;
+ cos1_credit_weight = (cos1_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
+ total_bw;
+
+ bnx2x_ets_bw_limit_common(params);
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight);
+
+ REG_WR(bp, PBF_REG_COS0_WEIGHT, cos0_credit_weight);
+ REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight);
+}
+
+int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
+{
+ /* ETS disabled configuration*/
+ struct bnx2x *bp = params->bp;
+ u32 val = 0;
+
+ DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
+ /*
+ * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries,
+ * 3 - COS0 entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
+ /*
+ * For strict priority entries defines the number of consecutive slots
+ * for the highest priority.
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+ /* ETS mode disable */
+ REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100);
+
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
+
+ /*
+ * mapping between entry priority to client number (0,1,2 -debug and
+ * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+ * 3bits client num.
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
+ * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
+ */
+ val = (0 == strict_cos) ? 0x2318 : 0x22E0;
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
+
+ return 0;
+}
+/******************************************************************/
+/* PFC section */
+/******************************************************************/
+
+static void bnx2x_update_pfc_xmac(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
+{
+ struct bnx2x *bp = params->bp;
+ u32 xmac_base;
+ u32 pause_val, pfc0_val, pfc1_val;
+
+ /* XMAC base adrr */
+ xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+ /* Initialize pause and pfc registers */
+ pause_val = 0x18000;
+ pfc0_val = 0xFFFF8000;
+ pfc1_val = 0x2;
+
+ /* No PFC support */
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) {
+
+ /*
+ * RX flow control - Process pause frame in receive direction
+ */
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN;
+
+ /*
+ * TX flow control - Send pause packet when buffer is full
+ */
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN;
+ } else {/* PFC support */
+ pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN |
+ XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN |
+ XMAC_PFC_CTRL_HI_REG_RX_PFC_EN |
+ XMAC_PFC_CTRL_HI_REG_TX_PFC_EN;
+ }
+
+ /* Write pause and PFC registers */
+ REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
+
+
+ /* Set MAC address for source TX Pause/PFC frames */
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_LO,
+ ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) |
+ (params->mac_addr[5])));
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_HI,
+ ((params->mac_addr[0] << 8) |
+ (params->mac_addr[1])));
+
+ udelay(30);
+}
+
+
+static void bnx2x_emac_get_pfc_stat(struct link_params *params,
+ u32 pfc_frames_sent[2],
+ u32 pfc_frames_received[2])
+{
+ /* Read pfc statistic */
+ struct bnx2x *bp = params->bp;
+ u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ u32 val_xon = 0;
+ u32 val_xoff = 0;
+
+ DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n");
+
+ /* PFC received frames */
+ val_xoff = REG_RD(bp, emac_base +
+ EMAC_REG_RX_PFC_STATS_XOFF_RCVD);
+ val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT;
+ val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD);
+ val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT;
+
+ pfc_frames_received[0] = val_xon + val_xoff;
+
+ /* PFC received sent */
+ val_xoff = REG_RD(bp, emac_base +
+ EMAC_REG_RX_PFC_STATS_XOFF_SENT);
+ val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT;
+ val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT);
+ val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT;
+
+ pfc_frames_sent[0] = val_xon + val_xoff;
+}
+
+/* Read pfc statistic*/
+void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
+ u32 pfc_frames_sent[2],
+ u32 pfc_frames_received[2])
+{
+ /* Read pfc statistic */
+ struct bnx2x *bp = params->bp;
+
+ DP(NETIF_MSG_LINK, "pfc statistic\n");
+
+ if (!vars->link_up)
+ return;
+
+ if (MAC_TYPE_EMAC == vars->mac_type) {
+ DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n");
+ bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
+ pfc_frames_received);
+ }
+}
+/******************************************************************/
+/* MAC/PBF section */
+/******************************************************************/
+static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
+{
+ u32 mode, emac_base;
+ /**
+ * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
+ * (a value of 49==0x31) and make sure that the AUTO poll is off
+ */
+
+ if (CHIP_IS_E2(bp))
+ emac_base = GRCBASE_EMAC0;
+ else
+ emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
+ mode &= ~(EMAC_MDIO_MODE_AUTO_POLL |
+ EMAC_MDIO_MODE_CLOCK_CNT);
+ if (USES_WARPCORE(bp))
+ mode |= (74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
+ else
+ mode |= (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
+
+ mode |= (EMAC_MDIO_MODE_CLAUSE_45);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, mode);
+
+ udelay(40);
+}
+
+static void bnx2x_emac_init(struct link_params *params,
+ struct link_vars *vars)
+{
+ /* reset and unreset the emac core */
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ u32 val;
+ u16 timeout;
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+ udelay(5);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+
+ /* init emac - use read-modify-write */
+ /* self clear reset */
+ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+ EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
+
+ timeout = 200;
+ do {
+ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+ DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
+ if (!timeout) {
+ DP(NETIF_MSG_LINK, "EMAC timeout!\n");
+ return;
+ }
+ timeout--;
+ } while (val & EMAC_MODE_RESET);
+ bnx2x_set_mdio_clk(bp, params->chip_id, port);
+ /* Set mac address */
+ val = ((params->mac_addr[0] << 8) |
+ params->mac_addr[1]);
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val);
+
+ val = ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) |
+ params->mac_addr[5]);
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val);
+}
+
+static void bnx2x_set_xumac_nig(struct link_params *params,
+ u16 tx_pause_en,
+ u8 enable)
+{
+ struct bnx2x *bp = params->bp;
+
+ REG_WR(bp, params->port ? NIG_REG_P1_MAC_IN_EN : NIG_REG_P0_MAC_IN_EN,
+ enable);
+ REG_WR(bp, params->port ? NIG_REG_P1_MAC_OUT_EN : NIG_REG_P0_MAC_OUT_EN,
+ enable);
+ REG_WR(bp, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN :
+ NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en);
+}
+
+static void bnx2x_umac_enable(struct link_params *params,
+ struct link_vars *vars, u8 lb)
+{
+ u32 val;
+ u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+ struct bnx2x *bp = params->bp;
+ /* Reset UMAC */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
+ usleep_range(1000, 1000);
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
+
+ DP(NETIF_MSG_LINK, "enabling UMAC\n");
+
+ /**
+ * This register determines on which events the MAC will assert
+ * error on the i/f to the NIG along w/ EOP.
+ */
+
+ /**
+ * BD REG_WR(bp, NIG_REG_P0_MAC_RSV_ERR_MASK +
+ * params->port*0x14, 0xfffff.
+ */
+ /* This register opens the gate for the UMAC despite its name */
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
+
+ val = UMAC_COMMAND_CONFIG_REG_PROMIS_EN |
+ UMAC_COMMAND_CONFIG_REG_PAD_EN |
+ UMAC_COMMAND_CONFIG_REG_SW_RESET |
+ UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK;
+ switch (vars->line_speed) {
+ case SPEED_10:
+ val |= (0<<2);
+ break;
+ case SPEED_100:
+ val |= (1<<2);
+ break;
+ case SPEED_1000:
+ val |= (2<<2);
+ break;
+ case SPEED_2500:
+ val |= (3<<2);
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Invalid speed for UMAC %d\n",
+ vars->line_speed);
+ break;
+ }
+ if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val |= UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE;
+
+ if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
+ val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE;
+
+ REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+ udelay(50);
+
+ /* Set MAC address for source TX Pause/PFC frames (under SW reset) */
+ REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0,
+ ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) |
+ (params->mac_addr[5])));
+ REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR1,
+ ((params->mac_addr[0] << 8) |
+ (params->mac_addr[1])));
+
+ /* Enable RX and TX */
+ val &= ~UMAC_COMMAND_CONFIG_REG_PAD_EN;
+ val |= UMAC_COMMAND_CONFIG_REG_TX_ENA |
+ UMAC_COMMAND_CONFIG_REG_RX_ENA;
+ REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+ udelay(50);
+
+ /* Remove SW Reset */
+ val &= ~UMAC_COMMAND_CONFIG_REG_SW_RESET;
+
+ /* Check loopback mode */
+ if (lb)
+ val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA;
+ REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+
+ /*
+ * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+ * length used by the MAC receive logic to check frames.
+ */
+ REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
+ bnx2x_set_xumac_nig(params,
+ ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
+ vars->mac_type = MAC_TYPE_UMAC;
+
+}
+
+static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
+{
+ u32 port4mode_ovwr_val;
+ /* Check 4-port override enabled */
+ port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
+ if (port4mode_ovwr_val & (1<<0)) {
+ /* Return 4-port mode override value */
+ return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
+ }
+ /* Return 4-port mode from input pin */
+ return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
+}
+
+/* Define the XMAC mode */
+static void bnx2x_xmac_init(struct bnx2x *bp, u32 max_speed)
+{
+ u32 is_port4mode = bnx2x_is_4_port_mode(bp);
+
+ /**
+ * In 4-port mode, need to set the mode only once, so if XMAC is
+ * already out of reset, it means the mode has already been set,
+ * and it must not* reset the XMAC again, since it controls both
+ * ports of the path
+ **/
+
+ if (is_port4mode && (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ MISC_REGISTERS_RESET_REG_2_XMAC)) {
+ DP(NETIF_MSG_LINK,
+ "XMAC already out of reset in 4-port mode\n");
+ return;
+ }
+
+ /* Hard reset */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ MISC_REGISTERS_RESET_REG_2_XMAC);
+ usleep_range(1000, 1000);
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ MISC_REGISTERS_RESET_REG_2_XMAC);
+ if (is_port4mode) {
+ DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n");
+
+ /* Set the number of ports on the system side to up to 2 */
+ REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1);
+
+ /* Set the number of ports on the Warp Core to 10G */
+ REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+ } else {
+ /* Set the number of ports on the system side to 1 */
+ REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0);
+ if (max_speed == SPEED_10000) {
+ DP(NETIF_MSG_LINK,
+ "Init XMAC to 10G x 1 port per path\n");
+ /* Set the number of ports on the Warp Core to 10G */
+ REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+ } else {
+ DP(NETIF_MSG_LINK,
+ "Init XMAC to 20G x 2 ports per path\n");
+ /* Set the number of ports on the Warp Core to 20G */
+ REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 1);
+ }
+ }
+ /* Soft reset */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
+ usleep_range(1000, 1000);
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
+
+}
+
+static void bnx2x_xmac_disable(struct link_params *params)
+{
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+ u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+ if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ MISC_REGISTERS_RESET_REG_2_XMAC) {
+ /*
+ * Send an indication to change the state in the NIG back to XON
+ * Clearing this bit enables the next set of this bit to get
+ * rising edge
+ */
+ pfc_ctrl = REG_RD(bp, xmac_base + XMAC_REG_PFC_CTRL_HI);
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
+ (pfc_ctrl & ~(1<<1)));
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
+ (pfc_ctrl | (1<<1)));
+ DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port);
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0);
+ usleep_range(1000, 1000);
+ bnx2x_set_xumac_nig(params, 0, 0);
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL,
+ XMAC_CTRL_REG_SOFT_RESET);
+ }
+}
+
+static int bnx2x_xmac_enable(struct link_params *params,
+ struct link_vars *vars, u8 lb)
+{
+ u32 val, xmac_base;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "enabling XMAC\n");
+
+ xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+ bnx2x_xmac_init(bp, vars->line_speed);
+
+ /*
+ * This register determines on which events the MAC will assert
+ * error on the i/f to the NIG along w/ EOP.
+ */
+
+ /*
+ * This register tells the NIG whether to send traffic to UMAC
+ * or XMAC
+ */
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0);
+
+ /* Set Max packet size */
+ REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710);
+
+ /* CRC append for Tx packets */
+ REG_WR(bp, xmac_base + XMAC_REG_TX_CTRL, 0xC800);
+
+ /* update PFC */
+ bnx2x_update_pfc_xmac(params, vars, 0);
+
+ /* Enable TX and RX */
+ val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
+
+ /* Check loopback mode */
+ if (lb)
+ val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
+ bnx2x_set_xumac_nig(params,
+ ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
+
+ vars->mac_type = MAC_TYPE_XMAC;
+
+ return 0;
+}
+static int bnx2x_emac_enable(struct link_params *params,
+ struct link_vars *vars, u8 lb)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ u32 val;
+
+ DP(NETIF_MSG_LINK, "enabling EMAC\n");
+
+ /* Disable BMAC */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
+ /* enable emac and not bmac */
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
+
+ /* ASIC */
+ if (vars->phy_flags & PHY_XGXS_FLAG) {
+ u32 ser_lane = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+
+ DP(NETIF_MSG_LINK, "XGXS\n");
+ /* select the master lanes (out of 0-3) */
+ REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane);
+ /* select XGXS */
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
+
+ } else { /* SerDes */
+ DP(NETIF_MSG_LINK, "SerDes\n");
+ /* select SerDes */
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
+ }
+
+ bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_RESET);
+ bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+ EMAC_TX_MODE_RESET);
+
+ if (CHIP_REV_IS_SLOW(bp)) {
+ /* config GMII mode */
+ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+ EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
+ } else { /* ASIC */
+ /* pause enable/disable */
+ bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_FLOW_EN);
+
+ bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) {
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ bnx2x_bits_en(bp, emac_base +
+ EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_FLOW_EN);
+
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ bnx2x_bits_en(bp, emac_base +
+ EMAC_REG_EMAC_TX_MODE,
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ } else
+ bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+ EMAC_TX_MODE_FLOW_EN);
+ }
+
+ /* KEEP_VLAN_TAG, promiscuous */
+ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
+ val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
+
+ /*
+ * Setting this bit causes MAC control frames (except for pause
+ * frames) to be passed on for processing. This setting has no
+ * affect on the operation of the pause frames. This bit effects
+ * all packets regardless of RX Parser packet sorting logic.
+ * Turn the PFC off to make sure we are in Xon state before
+ * enabling it.
+ */
+ EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
+ DP(NETIF_MSG_LINK, "PFC is enabled\n");
+ /* Enable PFC again */
+ EMAC_WR(bp, EMAC_REG_RX_PFC_MODE,
+ EMAC_REG_RX_PFC_MODE_RX_EN |
+ EMAC_REG_RX_PFC_MODE_TX_EN |
+ EMAC_REG_RX_PFC_MODE_PRIORITIES);
+
+ EMAC_WR(bp, EMAC_REG_RX_PFC_PARAM,
+ ((0x0101 <<
+ EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) |
+ (0x00ff <<
+ EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT)));
+ val |= EMAC_RX_MODE_KEEP_MAC_CONTROL;
+ }
+ EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
+
+ /* Set Loopback */
+ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+ if (lb)
+ val |= 0x810;
+ else
+ val &= ~0x810;
+ EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
+
+ /* enable emac */
+ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
+
+ /* enable emac for jumbo packets */
+ EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
+ (EMAC_RX_MTU_SIZE_JUMBO_ENA |
+ (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
+
+ /* strip CRC */
+ REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
+
+ /* disable the NIG in/out to the bmac */
+ REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
+
+ /* enable the NIG in/out to the emac */
+ REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
+ val = 0;
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) ||
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val = 1;
+
+ REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
+
+ REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
+
+ vars->mac_type = MAC_TYPE_EMAC;
+ return 0;
+}
+
+static void bnx2x_update_pfc_bmac1(struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 wb_data[2];
+ struct bnx2x *bp = params->bp;
+ u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+
+ u32 val = 0x14;
+ if ((!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) &&
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
+ /* Enable BigMAC to react on received Pause packets */
+ val |= (1<<5);
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
+
+ /* tx control */
+ val = 0xc0;
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) &&
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val |= 0x800000;
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2);
+}
+
+static void bnx2x_update_pfc_bmac2(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
+{
+ /*
+ * Set rx control: Strip CRC and enable BigMAC to relay
+ * control packets to the system as well
+ */
+ u32 wb_data[2];
+ struct bnx2x *bp = params->bp;
+ u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ u32 val = 0x14;
+
+ if ((!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) &&
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
+ /* Enable BigMAC to react on received Pause packets */
+ val |= (1<<5);
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
+ udelay(30);
+
+ /* Tx control */
+ val = 0xc0;
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) &&
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val |= 0x800000;
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2);
+
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
+ DP(NETIF_MSG_LINK, "PFC is enabled\n");
+ /* Enable PFC RX & TX & STATS and set 8 COS */
+ wb_data[0] = 0x0;
+ wb_data[0] |= (1<<0); /* RX */
+ wb_data[0] |= (1<<1); /* TX */
+ wb_data[0] |= (1<<2); /* Force initial Xon */
+ wb_data[0] |= (1<<3); /* 8 cos */
+ wb_data[0] |= (1<<5); /* STATS */
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL,
+ wb_data, 2);
+ /* Clear the force Xon */
+ wb_data[0] &= ~(1<<2);
+ } else {
+ DP(NETIF_MSG_LINK, "PFC is disabled\n");
+ /* disable PFC RX & TX & STATS and set 8 COS */
+ wb_data[0] = 0x8;
+ wb_data[1] = 0;
+ }
+
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
+
+ /*
+ * Set Time (based unit is 512 bit time) between automatic
+ * re-sending of PP packets amd enable automatic re-send of
+ * Per-Priroity Packet as long as pp_gen is asserted and
+ * pp_disable is low.
+ */
+ val = 0x8000;
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+ val |= (1<<16); /* enable automatic re-send */
+
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
+ wb_data, 2);
+
+ /* mac control */
+ val = 0x3; /* Enable RX and TX */
+ if (is_lb) {
+ val |= 0x4; /* Local loopback */
+ DP(NETIF_MSG_LINK, "enable bmac loopback\n");
+ }
+ /* When PFC enabled, Pass pause frames towards the NIG. */
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+ val |= ((1<<6)|(1<<5));
+
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
+}
+
+
+/* PFC BRB internal port configuration params */
+struct bnx2x_pfc_brb_threshold_val {
+ u32 pause_xoff;
+ u32 pause_xon;
+ u32 full_xoff;
+ u32 full_xon;
+};
+
+struct bnx2x_pfc_brb_e3b0_val {
+ u32 full_lb_xoff_th;
+ u32 full_lb_xon_threshold;
+ u32 lb_guarantied;
+ u32 mac_0_class_t_guarantied;
+ u32 mac_0_class_t_guarantied_hyst;
+ u32 mac_1_class_t_guarantied;
+ u32 mac_1_class_t_guarantied_hyst;
+};
+
+struct bnx2x_pfc_brb_th_val {
+ struct bnx2x_pfc_brb_threshold_val pauseable_th;
+ struct bnx2x_pfc_brb_threshold_val non_pauseable_th;
+};
+static int bnx2x_pfc_brb_get_config_params(
+ struct link_params *params,
+ struct bnx2x_pfc_brb_th_val *config_val)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n");
+ if (CHIP_IS_E2(bp)) {
+ config_val->pauseable_th.pause_xoff =
+ PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ config_val->pauseable_th.pause_xon =
+ PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ config_val->pauseable_th.full_xoff =
+ PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ config_val->pauseable_th.full_xon =
+ PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
+ /* non pause able*/
+ config_val->non_pauseable_th.pause_xoff =
+ PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.pause_xon =
+ PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xoff =
+ PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xon =
+ PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ } else if (CHIP_IS_E3A0(bp)) {
+ config_val->pauseable_th.pause_xoff =
+ PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ config_val->pauseable_th.pause_xon =
+ PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ config_val->pauseable_th.full_xoff =
+ PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ config_val->pauseable_th.full_xon =
+ PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
+ /* non pause able*/
+ config_val->non_pauseable_th.pause_xoff =
+ PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.pause_xon =
+ PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xoff =
+ PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xon =
+ PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ } else if (CHIP_IS_E3B0(bp)) {
+ if (params->phy[INT_PHY].flags &
+ FLAGS_4_PORT_MODE) {
+ config_val->pauseable_th.pause_xoff =
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ config_val->pauseable_th.pause_xon =
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ config_val->pauseable_th.full_xoff =
+ PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ config_val->pauseable_th.full_xon =
+ PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
+ /* non pause able*/
+ config_val->non_pauseable_th.pause_xoff =
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.pause_xon =
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xoff =
+ PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xon =
+ PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ } else {
+ config_val->pauseable_th.pause_xoff =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ config_val->pauseable_th.pause_xon =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ config_val->pauseable_th.full_xoff =
+ PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ config_val->pauseable_th.full_xon =
+ PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
+ /* non pause able*/
+ config_val->non_pauseable_th.pause_xoff =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.pause_xon =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xoff =
+ PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xon =
+ PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ }
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+
+static void bnx2x_pfc_brb_get_e3b0_config_params(struct link_params *params,
+ struct bnx2x_pfc_brb_e3b0_val
+ *e3b0_val,
+ u32 cos0_pauseable,
+ u32 cos1_pauseable)
+{
+ if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) {
+ e3b0_val->full_lb_xoff_th =
+ PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
+ e3b0_val->full_lb_xon_threshold =
+ PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_4P_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
+ e3b0_val->mac_0_class_t_guarantied_hyst =
+ PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
+ e3b0_val->mac_1_class_t_guarantied =
+ PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
+ e3b0_val->mac_1_class_t_guarantied_hyst =
+ PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
+ } else {
+ e3b0_val->full_lb_xoff_th =
+ PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
+ e3b0_val->full_lb_xon_threshold =
+ PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
+ e3b0_val->mac_0_class_t_guarantied_hyst =
+ PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
+ e3b0_val->mac_1_class_t_guarantied =
+ PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
+ e3b0_val->mac_1_class_t_guarantied_hyst =
+ PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
+
+ if (cos0_pauseable != cos1_pauseable) {
+ /* nonpauseable= Lossy + pauseable = Lossless*/
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
+ } else if (cos0_pauseable) {
+ /* Lossless +Lossless*/
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_2P_PAUSE_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
+ } else {
+ /* Lossy +Lossy*/
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_2P_NON_PAUSE_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
+ }
+ }
+}
+static int bnx2x_update_pfc_brb(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_nig_brb_pfc_port_params
+ *pfc_params)
+{
+ struct bnx2x *bp = params->bp;
+ struct bnx2x_pfc_brb_th_val config_val = { {0} };
+ struct bnx2x_pfc_brb_threshold_val *reg_th_config =
+ &config_val.pauseable_th;
+ struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0};
+ int set_pfc = params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED;
+ int bnx2x_status = 0;
+ u8 port = params->port;
+
+ /* default - pause configuration */
+ reg_th_config = &config_val.pauseable_th;
+ bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val);
+ if (0 != bnx2x_status)
+ return bnx2x_status;
+
+ if (set_pfc && pfc_params)
+ /* First COS */
+ if (!pfc_params->cos0_pauseable)
+ reg_th_config = &config_val.non_pauseable_th;
+ /*
+ * The number of free blocks below which the pause signal to class 0
+ * of MAC #n is asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 :
+ BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 ,
+ reg_th_config->pause_xoff);
+ /*
+ * The number of free blocks above which the pause signal to class 0
+ * of MAC #n is de-asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 :
+ BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon);
+ /*
+ * The number of free blocks below which the full signal to class 0
+ * of MAC #n is asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 :
+ BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff);
+ /*
+ * The number of free blocks above which the full signal to class 0
+ * of MAC #n is de-asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
+ BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon);
+
+ if (set_pfc && pfc_params) {
+ /* Second COS */
+ if (pfc_params->cos1_pauseable)
+ reg_th_config = &config_val.pauseable_th;
+ else
+ reg_th_config = &config_val.non_pauseable_th;
+ /*
+ * The number of free blocks below which the pause signal to
+ * class 1 of MAC #n is asserted. n=0,1
+ **/
+ REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
+ BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
+ reg_th_config->pause_xoff);
+ /*
+ * The number of free blocks above which the pause signal to
+ * class 1 of MAC #n is de-asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
+ BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
+ reg_th_config->pause_xon);
+ /*
+ * The number of free blocks below which the full signal to
+ * class 1 of MAC #n is asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
+ BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
+ reg_th_config->full_xoff);
+ /*
+ * The number of free blocks above which the full signal to
+ * class 1 of MAC #n is de-asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
+ BRB1_REG_FULL_1_XON_THRESHOLD_0,
+ reg_th_config->full_xon);
+
+
+ if (CHIP_IS_E3B0(bp)) {
+ /*Should be done by init tool */
+ /*
+ * BRB_empty_for_dup = BRB1_REG_BRB_EMPTY_THRESHOLD
+ * reset value
+ * 944
+ */
+
+ /**
+ * The hysteresis on the guarantied buffer space for the Lb port
+ * before signaling XON.
+ **/
+ REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 80);
+
+ bnx2x_pfc_brb_get_e3b0_config_params(
+ params,
+ &e3b0_val,
+ pfc_params->cos0_pauseable,
+ pfc_params->cos1_pauseable);
+ /**
+ * The number of free blocks below which the full signal to the
+ * LB port is asserted.
+ */
+ REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
+ e3b0_val.full_lb_xoff_th);
+ /**
+ * The number of free blocks above which the full signal to the
+ * LB port is de-asserted.
+ */
+ REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
+ e3b0_val.full_lb_xon_threshold);
+ /**
+ * The number of blocks guarantied for the MAC #n port. n=0,1
+ */
+
+ /*The number of blocks guarantied for the LB port.*/
+ REG_WR(bp, BRB1_REG_LB_GUARANTIED,
+ e3b0_val.lb_guarantied);
+
+ /**
+ * The number of blocks guarantied for the MAC #n port.
+ */
+ REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
+ 2 * e3b0_val.mac_0_class_t_guarantied);
+ REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
+ 2 * e3b0_val.mac_1_class_t_guarantied);
+ /**
+ * The number of blocks guarantied for class #t in MAC0. t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
+ e3b0_val.mac_0_class_t_guarantied);
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
+ e3b0_val.mac_0_class_t_guarantied);
+ /**
+ * The hysteresis on the guarantied buffer space for class in
+ * MAC0. t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
+ e3b0_val.mac_0_class_t_guarantied_hyst);
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
+ e3b0_val.mac_0_class_t_guarantied_hyst);
+
+ /**
+ * The number of blocks guarantied for class #t in MAC1.t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
+ e3b0_val.mac_1_class_t_guarantied);
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
+ e3b0_val.mac_1_class_t_guarantied);
+ /**
+ * The hysteresis on the guarantied buffer space for class #t
+ * in MAC1. t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
+ e3b0_val.mac_1_class_t_guarantied_hyst);
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
+ e3b0_val.mac_1_class_t_guarantied_hyst);
+
+ }
+
+ }
+
+ return bnx2x_status;
+}
+
+/******************************************************************************
+* Description:
+* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
+* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
+******************************************************************************/
+int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
+ u8 cos_entry,
+ u32 priority_mask, u8 port)
+{
+ u32 nig_reg_rx_priority_mask_add = 0;
+
+ switch (cos_entry) {
+ case 0:
+ nig_reg_rx_priority_mask_add = (port) ?
+ NIG_REG_P1_RX_COS0_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS0_PRIORITY_MASK;
+ break;
+ case 1:
+ nig_reg_rx_priority_mask_add = (port) ?
+ NIG_REG_P1_RX_COS1_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS1_PRIORITY_MASK;
+ break;
+ case 2:
+ nig_reg_rx_priority_mask_add = (port) ?
+ NIG_REG_P1_RX_COS2_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS2_PRIORITY_MASK;
+ break;
+ case 3:
+ if (port)
+ return -EINVAL;
+ nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK;
+ break;
+ case 4:
+ if (port)
+ return -EINVAL;
+ nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK;
+ break;
+ case 5:
+ if (port)
+ return -EINVAL;
+ nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK;
+ break;
+ }
+
+ REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask);
+
+ return 0;
+}
+static void bnx2x_update_mng(struct link_params *params, u32 link_status)
+{
+ struct bnx2x *bp = params->bp;
+
+ REG_WR(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ port_mb[params->port].link_status), link_status);
+}
+
+static void bnx2x_update_pfc_nig(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_nig_brb_pfc_port_params *nig_params)
+{
+ u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
+ u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0;
+ u32 pkt_priority_to_cos = 0;
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+
+ int set_pfc = params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED;
+ DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
+
+ /*
+ * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
+ * MAC control frames (that are not pause packets)
+ * will be forwarded to the XCM.
+ */
+ xcm_mask = REG_RD(bp,
+ port ? NIG_REG_LLH1_XCM_MASK :
+ NIG_REG_LLH0_XCM_MASK);
+ /*
+ * nig params will override non PFC params, since it's possible to
+ * do transition from PFC to SAFC
+ */
+ if (set_pfc) {
+ pause_enable = 0;
+ llfc_out_en = 0;
+ llfc_enable = 0;
+ if (CHIP_IS_E3(bp))
+ ppp_enable = 0;
+ else
+ ppp_enable = 1;
+ xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
+ NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
+ xcm0_out_en = 0;
+ p0_hwpfc_enable = 1;
+ } else {
+ if (nig_params) {
+ llfc_out_en = nig_params->llfc_out_en;
+ llfc_enable = nig_params->llfc_enable;
+ pause_enable = nig_params->pause_enable;
+ } else /*defaul non PFC mode - PAUSE */
+ pause_enable = 1;
+
+ xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
+ NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
+ xcm0_out_en = 1;
+ }
+
+ if (CHIP_IS_E3(bp))
+ REG_WR(bp, port ? NIG_REG_BRB1_PAUSE_IN_EN :
+ NIG_REG_BRB0_PAUSE_IN_EN, pause_enable);
+ REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 :
+ NIG_REG_LLFC_OUT_EN_0, llfc_out_en);
+ REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 :
+ NIG_REG_LLFC_ENABLE_0, llfc_enable);
+ REG_WR(bp, port ? NIG_REG_PAUSE_ENABLE_1 :
+ NIG_REG_PAUSE_ENABLE_0, pause_enable);
+
+ REG_WR(bp, port ? NIG_REG_PPP_ENABLE_1 :
+ NIG_REG_PPP_ENABLE_0, ppp_enable);
+
+ REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK :
+ NIG_REG_LLH0_XCM_MASK, xcm_mask);
+
+ REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
+
+ /* output enable for RX_XCM # IF */
+ REG_WR(bp, NIG_REG_XCM0_OUT_EN, xcm0_out_en);
+
+ /* HW PFC TX enable */
+ REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable);
+
+ if (nig_params) {
+ u8 i = 0;
+ pkt_priority_to_cos = nig_params->pkt_priority_to_cos;
+
+ for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++)
+ bnx2x_pfc_nig_rx_priority_mask(bp, i,
+ nig_params->rx_cos_priority_mask[i], port);
+
+ REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 :
+ NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0,
+ nig_params->llfc_high_priority_classes);
+
+ REG_WR(bp, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 :
+ NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0,
+ nig_params->llfc_low_priority_classes);
+ }
+ REG_WR(bp, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS :
+ NIG_REG_P0_PKT_PRIORITY_TO_COS,
+ pkt_priority_to_cos);
+}
+
+int bnx2x_update_pfc(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_nig_brb_pfc_port_params *pfc_params)
+{
+ /*
+ * The PFC and pause are orthogonal to one another, meaning when
+ * PFC is enabled, the pause are disabled, and when PFC is
+ * disabled, pause are set according to the pause result.
+ */
+ u32 val;
+ struct bnx2x *bp = params->bp;
+ int bnx2x_status = 0;
+ u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC);
+
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+ vars->link_status |= LINK_STATUS_PFC_ENABLED;
+ else
+ vars->link_status &= ~LINK_STATUS_PFC_ENABLED;
+
+ bnx2x_update_mng(params, vars->link_status);
+
+ /* update NIG params */
+ bnx2x_update_pfc_nig(params, vars, pfc_params);
+
+ /* update BRB params */
+ bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
+ if (0 != bnx2x_status)
+ return bnx2x_status;
+
+ if (!vars->link_up)
+ return bnx2x_status;
+
+ DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
+ if (CHIP_IS_E3(bp))
+ bnx2x_update_pfc_xmac(params, vars, 0);
+ else {
+ val = REG_RD(bp, MISC_REG_RESET_REG_2);
+ if ((val &
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
+ == 0) {
+ DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
+ bnx2x_emac_enable(params, vars, 0);
+ return bnx2x_status;
+ }
+
+ if (CHIP_IS_E2(bp))
+ bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
+ else
+ bnx2x_update_pfc_bmac1(params, vars);
+
+ val = 0;
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) ||
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val = 1;
+ REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
+ }
+ return bnx2x_status;
+}
+
+
+static int bnx2x_bmac1_enable(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ u32 wb_data[2];
+ u32 val;
+
+ DP(NETIF_MSG_LINK, "Enabling BigMAC1\n");
+
+ /* XGXS control */
+ wb_data[0] = 0x3c;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
+ wb_data, 2);
+
+ /* tx MAC SA */
+ wb_data[0] = ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) |
+ params->mac_addr[5]);
+ wb_data[1] = ((params->mac_addr[0] << 8) |
+ params->mac_addr[1]);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
+
+ /* mac control */
+ val = 0x3;
+ if (is_lb) {
+ val |= 0x4;
+ DP(NETIF_MSG_LINK, "enable bmac loopback\n");
+ }
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
+
+ /* set rx mtu */
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
+
+ bnx2x_update_pfc_bmac1(params, vars);
+
+ /* set tx mtu */
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
+
+ /* set cnt max size */
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
+
+ /* configure safc */
+ wb_data[0] = 0x1000200;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
+ wb_data, 2);
+
+ return 0;
+}
+
+static int bnx2x_bmac2_enable(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ u32 wb_data[2];
+
+ DP(NETIF_MSG_LINK, "Enabling BigMAC2\n");
+
+ wb_data[0] = 0;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
+ udelay(30);
+
+ /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
+ wb_data[0] = 0x3c;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
+ wb_data, 2);
+
+ udelay(30);
+
+ /* tx MAC SA */
+ wb_data[0] = ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) |
+ params->mac_addr[5]);
+ wb_data[1] = ((params->mac_addr[0] << 8) |
+ params->mac_addr[1]);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
+ wb_data, 2);
+
+ udelay(30);
+
+ /* Configure SAFC */
+ wb_data[0] = 0x1000200;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
+ wb_data, 2);
+ udelay(30);
+
+ /* set rx mtu */
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
+ udelay(30);
+
+ /* set tx mtu */
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
+ udelay(30);
+ /* set cnt max size */
+ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
+ udelay(30);
+ bnx2x_update_pfc_bmac2(params, vars, is_lb);
+
+ return 0;
+}
+
+static int bnx2x_bmac_enable(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
+{
+ int rc = 0;
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+ u32 val;
+ /* reset and unreset the BigMac */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ msleep(1);
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
+ /* enable access for bmac registers */
+ REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
+
+ /* Enable BMAC according to BMAC type*/
+ if (CHIP_IS_E2(bp))
+ rc = bnx2x_bmac2_enable(params, vars, is_lb);
+ else
+ rc = bnx2x_bmac1_enable(params, vars, is_lb);
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
+ REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
+ val = 0;
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) ||
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val = 1;
+ REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1);
+ REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
+
+ vars->mac_type = MAC_TYPE_BMAC;
+ return rc;
+}
+
+static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
+{
+ u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ u32 wb_data[2];
+ u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
+
+ /* Only if the bmac is out of reset */
+ if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
+ nig_bmac_enable) {
+
+ if (CHIP_IS_E2(bp)) {
+ /* Clear Rx Enable bit in BMAC_CONTROL register */
+ REG_RD_DMAE(bp, bmac_addr +
+ BIGMAC2_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
+ wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
+ REG_WR_DMAE(bp, bmac_addr +
+ BIGMAC2_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
+ } else {
+ /* Clear Rx Enable bit in BMAC_CONTROL register */
+ REG_RD_DMAE(bp, bmac_addr +
+ BIGMAC_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
+ wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
+ REG_WR_DMAE(bp, bmac_addr +
+ BIGMAC_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
+ }
+ msleep(1);
+ }
+}
+
+static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
+ u32 line_speed)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 init_crd, crd;
+ u32 count = 1000;
+
+ /* disable port */
+ REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
+
+ /* wait for init credit */
+ init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
+ crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
+ DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
+
+ while ((init_crd != crd) && count) {
+ msleep(5);
+
+ crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
+ count--;
+ }
+ crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
+ if (init_crd != crd) {
+ DP(NETIF_MSG_LINK, "BUG! init_crd 0x%x != crd 0x%x\n",
+ init_crd, crd);
+ return -EINVAL;
+ }
+
+ if (flow_ctrl & BNX2X_FLOW_CTRL_RX ||
+ line_speed == SPEED_10 ||
+ line_speed == SPEED_100 ||
+ line_speed == SPEED_1000 ||
+ line_speed == SPEED_2500) {
+ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
+ /* update threshold */
+ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
+ /* update init credit */
+ init_crd = 778; /* (800-18-4) */
+
+ } else {
+ u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
+ ETH_OVREHEAD)/16;
+ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
+ /* update threshold */
+ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
+ /* update init credit */
+ switch (line_speed) {
+ case SPEED_10000:
+ init_crd = thresh + 553 - 22;
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
+ line_speed);
+ return -EINVAL;
+ }
+ }
+ REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
+ DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
+ line_speed, init_crd);
+
+ /* probe the credit changes */
+ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
+ msleep(5);
+ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
+
+ /* enable port */
+ REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
+ return 0;
+}
+
+/**
+ * bnx2x_get_emac_base - retrive emac base address
+ *
+ * @bp: driver handle
+ * @mdc_mdio_access: access type
+ * @port: port id
+ *
+ * This function selects the MDC/MDIO access (through emac0 or
+ * emac1) depend on the mdc_mdio_access, port, port swapped. Each
+ * phy has a default access mode, which could also be overridden
+ * by nvram configuration. This parameter, whether this is the
+ * default phy configuration, or the nvram overrun
+ * configuration, is passed here as mdc_mdio_access and selects
+ * the emac_base for the CL45 read/writes operations
+ */
+static u32 bnx2x_get_emac_base(struct bnx2x *bp,
+ u32 mdc_mdio_access, u8 port)
+{
+ u32 emac_base = 0;
+ switch (mdc_mdio_access) {
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE:
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0:
+ if (REG_RD(bp, NIG_REG_PORT_SWAP))
+ emac_base = GRCBASE_EMAC1;
+ else
+ emac_base = GRCBASE_EMAC0;
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1:
+ if (REG_RD(bp, NIG_REG_PORT_SWAP))
+ emac_base = GRCBASE_EMAC0;
+ else
+ emac_base = GRCBASE_EMAC1;
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH:
+ emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED:
+ emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
+ break;
+ default:
+ break;
+ }
+ return emac_base;
+
+}
+
+/******************************************************************/
+/* CL22 access functions */
+/******************************************************************/
+static int bnx2x_cl22_write(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 reg, u16 val)
+{
+ u32 tmp, mode;
+ u8 i;
+ int rc = 0;
+ /* Switch to CL22 */
+ mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
+ mode & ~EMAC_MDIO_MODE_CLAUSE_45);
+
+ /* address */
+ tmp = ((phy->addr << 21) | (reg << 16) | val |
+ EMAC_MDIO_COMM_COMMAND_WRITE_22 |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+ udelay(5);
+ break;
+ }
+ }
+ if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+ DP(NETIF_MSG_LINK, "write phy register failed\n");
+ rc = -EFAULT;
+ }
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
+ return rc;
+}
+
+static int bnx2x_cl22_read(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 reg, u16 *ret_val)
+{
+ u32 val, mode;
+ u16 i;
+ int rc = 0;
+
+ /* Switch to CL22 */
+ mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
+ mode & ~EMAC_MDIO_MODE_CLAUSE_45);
+
+ /* address */
+ val = ((phy->addr << 21) | (reg << 16) |
+ EMAC_MDIO_COMM_COMMAND_READ_22 |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+ *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
+ udelay(5);
+ break;
+ }
+ }
+ if (val & EMAC_MDIO_COMM_START_BUSY) {
+ DP(NETIF_MSG_LINK, "read phy register failed\n");
+
+ *ret_val = 0;
+ rc = -EFAULT;
+ }
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
+ return rc;
+}
+
+/******************************************************************/
+/* CL45 access functions */
+/******************************************************************/
+static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 *ret_val)
+{
+ u32 val;
+ u16 i;
+ int rc = 0;
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
+ /* address */
+ val = ((phy->addr << 21) | (devad << 16) | reg |
+ EMAC_MDIO_COMM_COMMAND_ADDRESS |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+ udelay(5);
+ break;
+ }
+ }
+ if (val & EMAC_MDIO_COMM_START_BUSY) {
+ DP(NETIF_MSG_LINK, "read phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
+ *ret_val = 0;
+ rc = -EFAULT;
+ } else {
+ /* data */
+ val = ((phy->addr << 21) | (devad << 16) |
+ EMAC_MDIO_COMM_COMMAND_READ_45 |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ val = REG_RD(bp, phy->mdio_ctrl +
+ EMAC_REG_EMAC_MDIO_COMM);
+ if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+ *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
+ break;
+ }
+ }
+ if (val & EMAC_MDIO_COMM_START_BUSY) {
+ DP(NETIF_MSG_LINK, "read phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
+ *ret_val = 0;
+ rc = -EFAULT;
+ }
+ }
+ /* Work around for E3 A0 */
+ if (phy->flags & FLAGS_MDC_MDIO_WA) {
+ phy->flags ^= FLAGS_DUMMY_READ;
+ if (phy->flags & FLAGS_DUMMY_READ) {
+ u16 temp_val;
+ bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
+ }
+ }
+
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
+ return rc;
+}
+
+static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 val)
+{
+ u32 tmp;
+ u8 i;
+ int rc = 0;
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
+
+ /* address */
+
+ tmp = ((phy->addr << 21) | (devad << 16) | reg |
+ EMAC_MDIO_COMM_COMMAND_ADDRESS |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+ udelay(5);
+ break;
+ }
+ }
+ if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+ DP(NETIF_MSG_LINK, "write phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
+ rc = -EFAULT;
+
+ } else {
+ /* data */
+ tmp = ((phy->addr << 21) | (devad << 16) | val |
+ EMAC_MDIO_COMM_COMMAND_WRITE_45 |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ tmp = REG_RD(bp, phy->mdio_ctrl +
+ EMAC_REG_EMAC_MDIO_COMM);
+ if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+ udelay(5);
+ break;
+ }
+ }
+ if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+ DP(NETIF_MSG_LINK, "write phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
+ rc = -EFAULT;
+ }
+ }
+ /* Work around for E3 A0 */
+ if (phy->flags & FLAGS_MDC_MDIO_WA) {
+ phy->flags ^= FLAGS_DUMMY_READ;
+ if (phy->flags & FLAGS_DUMMY_READ) {
+ u16 temp_val;
+ bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
+ }
+ }
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
+ return rc;
+}
+
+
+/******************************************************************/
+/* BSC access functions from E3 */
+/******************************************************************/
+static void bnx2x_bsc_module_sel(struct link_params *params)
+{
+ int idx;
+ u32 board_cfg, sfp_ctrl;
+ u32 i2c_pins[I2C_SWITCH_WIDTH], i2c_val[I2C_SWITCH_WIDTH];
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ /* Read I2C output PINs */
+ board_cfg = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.shared_hw_config.board));
+ i2c_pins[I2C_BSC0] = board_cfg & SHARED_HW_CFG_E3_I2C_MUX0_MASK;
+ i2c_pins[I2C_BSC1] = (board_cfg & SHARED_HW_CFG_E3_I2C_MUX1_MASK) >>
+ SHARED_HW_CFG_E3_I2C_MUX1_SHIFT;
+
+ /* Read I2C output value */
+ sfp_ctrl = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg));
+ i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0;
+ i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0;
+ DP(NETIF_MSG_LINK, "Setting BSC switch\n");
+ for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++)
+ bnx2x_set_cfg_pin(bp, i2c_pins[idx], i2c_val[idx]);
+}
+
+static int bnx2x_bsc_read(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 sl_devid,
+ u16 sl_addr,
+ u8 lc_addr,
+ u8 xfer_cnt,
+ u32 *data_array)
+{
+ u32 val, i;
+ int rc = 0;
+ struct bnx2x *bp = params->bp;
+
+ if ((sl_devid != 0xa0) && (sl_devid != 0xa2)) {
+ DP(NETIF_MSG_LINK, "invalid sl_devid 0x%x\n", sl_devid);
+ return -EINVAL;
+ }
+
+ if (xfer_cnt > 16) {
+ DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
+ xfer_cnt);
+ return -EINVAL;
+ }
+ bnx2x_bsc_module_sel(params);
+
+ xfer_cnt = 16 - lc_addr;
+
+ /* enable the engine */
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+ val |= MCPR_IMC_COMMAND_ENABLE;
+ REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
+
+ /* program slave device ID */
+ val = (sl_devid << 16) | sl_addr;
+ REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val);
+
+ /* start xfer with 0 byte to update the address pointer ???*/
+ val = (MCPR_IMC_COMMAND_ENABLE) |
+ (MCPR_IMC_COMMAND_WRITE_OP <<
+ MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
+ (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0);
+ REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
+
+ /* poll for completion */
+ i = 0;
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+ while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
+ udelay(10);
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+ if (i++ > 1000) {
+ DP(NETIF_MSG_LINK, "wr 0 byte timed out after %d try\n",
+ i);
+ rc = -EFAULT;
+ break;
+ }
+ }
+ if (rc == -EFAULT)
+ return rc;
+
+ /* start xfer with read op */
+ val = (MCPR_IMC_COMMAND_ENABLE) |
+ (MCPR_IMC_COMMAND_READ_OP <<
+ MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
+ (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) |
+ (xfer_cnt);
+ REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
+
+ /* poll for completion */
+ i = 0;
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+ while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
+ udelay(10);
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+ if (i++ > 1000) {
+ DP(NETIF_MSG_LINK, "rd op timed out after %d try\n", i);
+ rc = -EFAULT;
+ break;
+ }
+ }
+ if (rc == -EFAULT)
+ return rc;
+
+ for (i = (lc_addr >> 2); i < 4; i++) {
+ data_array[i] = REG_RD(bp, (MCP_REG_MCPR_IMC_DATAREG0 + i*4));
+#ifdef __BIG_ENDIAN
+ data_array[i] = ((data_array[i] & 0x000000ff) << 24) |
+ ((data_array[i] & 0x0000ff00) << 8) |
+ ((data_array[i] & 0x00ff0000) >> 8) |
+ ((data_array[i] & 0xff000000) >> 24);
+#endif
+ }
+ return rc;
+}
+
+static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 or_val)
+{
+ u16 val;
+ bnx2x_cl45_read(bp, phy, devad, reg, &val);
+ bnx2x_cl45_write(bp, phy, devad, reg, val | or_val);
+}
+
+int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 *ret_val)
+{
+ u8 phy_index;
+ /*
+ * Probe for the phy according to the given phy_addr, and execute
+ * the read request on it
+ */
+ for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
+ if (params->phy[phy_index].addr == phy_addr) {
+ return bnx2x_cl45_read(params->bp,
+ &params->phy[phy_index], devad,
+ reg, ret_val);
+ }
+ }
+ return -EINVAL;
+}
+
+int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 val)
+{
+ u8 phy_index;
+ /*
+ * Probe for the phy according to the given phy_addr, and execute
+ * the write request on it
+ */
+ for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
+ if (params->phy[phy_index].addr == phy_addr) {
+ return bnx2x_cl45_write(params->bp,
+ &params->phy[phy_index], devad,
+ reg, val);
+ }
+ }
+ return -EINVAL;
+}
+static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u8 lane = 0;
+ struct bnx2x *bp = params->bp;
+ u32 path_swap, path_swap_ovr;
+ u8 path, port;
+
+ path = BP_PATH(bp);
+ port = params->port;
+
+ if (bnx2x_is_4_port_mode(bp)) {
+ u32 port_swap, port_swap_ovr;
+
+ /*figure out path swap value */
+ path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR);
+ if (path_swap_ovr & 0x1)
+ path_swap = (path_swap_ovr & 0x2);
+ else
+ path_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP);
+
+ if (path_swap)
+ path = path ^ 1;
+
+ /*figure out port swap value */
+ port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR);
+ if (port_swap_ovr & 0x1)
+ port_swap = (port_swap_ovr & 0x2);
+ else
+ port_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP);
+
+ if (port_swap)
+ port = port ^ 1;
+
+ lane = (port<<1) + path;
+ } else { /* two port mode - no port swap */
+
+ /*figure out path swap value */
+ path_swap_ovr =
+ REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR);
+ if (path_swap_ovr & 0x1) {
+ path_swap = (path_swap_ovr & 0x2);
+ } else {
+ path_swap =
+ REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP);
+ }
+ if (path_swap)
+ path = path ^ 1;
+
+ lane = path << 1 ;
+ }
+ return lane;
+}
+
+static void bnx2x_set_aer_mmd(struct link_params *params,
+ struct bnx2x_phy *phy)
+{
+ u32 ser_lane;
+ u16 offset, aer_val;
+ struct bnx2x *bp = params->bp;
+ ser_lane = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+
+ offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ?
+ (phy->addr + ser_lane) : 0;
+
+ if (USES_WARPCORE(bp)) {
+ aer_val = bnx2x_get_warpcore_lane(phy, params);
+ /*
+ * In Dual-lane mode, two lanes are joined together,
+ * so in order to configure them, the AER broadcast method is
+ * used here.
+ * 0x200 is the broadcast address for lanes 0,1
+ * 0x201 is the broadcast address for lanes 2,3
+ */
+ if (phy->flags & FLAGS_WC_DUAL_MODE)
+ aer_val = (aer_val >> 1) | 0x200;
+ } else if (CHIP_IS_E2(bp))
+ aer_val = 0x3800 + offset - 1;
+ else
+ aer_val = 0x3800 + offset;
+ DP(NETIF_MSG_LINK, "Set AER to 0x%x\n", aer_val);
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, aer_val);
+
+}
+
+/******************************************************************/
+/* Internal phy section */
+/******************************************************************/
+
+static void bnx2x_set_serdes_access(struct bnx2x *bp, u8 port)
+{
+ u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+
+ /* Set Clause 22 */
+ REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 1);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000);
+ udelay(500);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
+ udelay(500);
+ /* Set Clause 45 */
+ REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 0);
+}
+
+static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
+{
+ u32 val;
+
+ DP(NETIF_MSG_LINK, "bnx2x_serdes_deassert\n");
+
+ val = SERDES_RESET_BITS << (port*16);
+
+ /* reset and unreset the SerDes/XGXS */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
+ udelay(500);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
+
+ bnx2x_set_serdes_access(bp, port);
+
+ REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10,
+ DEFAULT_PHY_DEV_ADDR);
+}
+
+static void bnx2x_xgxs_deassert(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port;
+ u32 val;
+ DP(NETIF_MSG_LINK, "bnx2x_xgxs_deassert\n");
+ port = params->port;
+
+ val = XGXS_RESET_BITS << (port*16);
+
+ /* reset and unreset the SerDes/XGXS */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
+ udelay(500);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
+
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0);
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
+ params->phy[INT_PHY].def_md_devad);
+}
+
+static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
+ struct link_params *params, u16 *ieee_fc)
+{
+ struct bnx2x *bp = params->bp;
+ *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
+ /**
+ * resolve pause mode and advertisement Please refer to Table
+ * 28B-3 of the 802.3ab-1999 spec
+ */
+
+ switch (phy->req_flow_ctrl) {
+ case BNX2X_FLOW_CTRL_AUTO:
+ if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ else
+ *ieee_fc |=
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ break;
+
+ case BNX2X_FLOW_CTRL_TX:
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ break;
+
+ case BNX2X_FLOW_CTRL_RX:
+ case BNX2X_FLOW_CTRL_BOTH:
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ break;
+
+ case BNX2X_FLOW_CTRL_NONE:
+ default:
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
+ break;
+ }
+ DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
+}
+
+static void set_phy_vars(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 actual_phy_idx, phy_index, link_cfg_idx;
+ u8 phy_config_swapped = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+ for (phy_index = INT_PHY; phy_index < params->num_phys;
+ phy_index++) {
+ link_cfg_idx = LINK_CONFIG_IDX(phy_index);
+ actual_phy_idx = phy_index;
+ if (phy_config_swapped) {
+ if (phy_index == EXT_PHY1)
+ actual_phy_idx = EXT_PHY2;
+ else if (phy_index == EXT_PHY2)
+ actual_phy_idx = EXT_PHY1;
+ }
+ params->phy[actual_phy_idx].req_flow_ctrl =
+ params->req_flow_ctrl[link_cfg_idx];
+
+ params->phy[actual_phy_idx].req_line_speed =
+ params->req_line_speed[link_cfg_idx];
+
+ params->phy[actual_phy_idx].speed_cap_mask =
+ params->speed_cap_mask[link_cfg_idx];
+
+ params->phy[actual_phy_idx].req_duplex =
+ params->req_duplex[link_cfg_idx];
+
+ if (params->req_line_speed[link_cfg_idx] ==
+ SPEED_AUTO_NEG)
+ vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
+
+ DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x,"
+ " speed_cap_mask %x\n",
+ params->phy[actual_phy_idx].req_flow_ctrl,
+ params->phy[actual_phy_idx].req_line_speed,
+ params->phy[actual_phy_idx].speed_cap_mask);
+ }
+}
+
+static void bnx2x_ext_phy_set_pause(struct link_params *params,
+ struct bnx2x_phy *phy,
+ struct link_vars *vars)
+{
+ u16 val;
+ struct bnx2x *bp = params->bp;
+ /* read modify write pause advertizing */
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
+
+ val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
+
+ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
+ val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+ }
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
+ val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
+ }
+ DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
+}
+
+static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
+{ /* LD LP */
+ switch (pause_result) { /* ASYM P ASYM P */
+ case 0xb: /* 1 0 1 1 */
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
+ break;
+
+ case 0xe: /* 1 1 1 0 */
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
+ break;
+
+ case 0x5: /* 0 1 0 1 */
+ case 0x7: /* 0 1 1 1 */
+ case 0xd: /* 1 1 0 1 */
+ case 0xf: /* 1 1 1 1 */
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
+ break;
+
+ default:
+ break;
+ }
+ if (pause_result & (1<<0))
+ vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
+ if (pause_result & (1<<1))
+ vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
+}
+
+static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 ld_pause; /* local */
+ u16 lp_pause; /* link partner */
+ u16 pause_result;
+ u8 ret = 0;
+ /* read twice */
+
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+ if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
+ vars->flow_ctrl = phy->req_flow_ctrl;
+ else if (phy->req_line_speed != SPEED_AUTO_NEG)
+ vars->flow_ctrl = params->req_fc_auto_adv;
+ else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+ ret = 1;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) {
+ bnx2x_cl22_read(bp, phy,
+ 0x4, &ld_pause);
+ bnx2x_cl22_read(bp, phy,
+ 0x5, &lp_pause);
+ } else {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+ }
+ pause_result = (ld_pause &
+ MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
+ pause_result |= (lp_pause &
+ MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
+ DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
+ pause_result);
+ bnx2x_pause_resolve(vars, pause_result);
+ }
+ return ret;
+}
+/******************************************************************/
+/* Warpcore section */
+/******************************************************************/
+/* The init_internal_warpcore should mirror the xgxs,
+ * i.e. reset the lane (if needed), set aer for the
+ * init configuration, and set/clear SGMII flag. Internal
+ * phy init is done purely in phy_init stage.
+ */
+static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars) {
+ u16 val16 = 0, lane, bam37 = 0;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
+ /* Check adding advertisement for 1G KX */
+ if (((vars->line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (vars->line_speed == SPEED_1000)) {
+ u16 sd_digital;
+ val16 |= (1<<5);
+
+ /* Enable CL37 1G Parallel Detect */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &sd_digital);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ (sd_digital | 0x1));
+
+ DP(NETIF_MSG_LINK, "Advertize 1G\n");
+ }
+ if (((vars->line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
+ (vars->line_speed == SPEED_10000)) {
+ /* Check adding advertisement for 10G KR */
+ val16 |= (1<<7);
+ /* Enable 10G Parallel Detect */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
+
+ DP(NETIF_MSG_LINK, "Advertize 10G\n");
+ }
+
+ /* Set Transmit PMD settings */
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+ (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+ (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
+ 0x03f0);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL,
+ 0x03f0);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+ 0x383f);
+
+ /* Advertised speeds */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16);
+
+ /* Advertised and set FEC (Forward Error Correction) */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2,
+ (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY |
+ MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ));
+
+ /* Enable CL37 BAM */
+ if (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, &bam37);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, bam37 | 1);
+ DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
+ }
+
+ /* Advertise pause */
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+
+ /* Enable Autoneg */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1000);
+
+ /* Over 1G - AN local device user page 1 */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL3_UP1, 0x1f);
+
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC7, &val16);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100);
+}
+
+static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val;
+
+ /* Disable Autoneg */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00);
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL3_UP1, 0x1);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC7, 0xa);
+
+ /* Disable CL36 PCS Tx */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0);
+
+ /* Double Wide Single Data Rate @ pll rate */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF);
+
+ /* Leave cl72 training enable, needed for KR */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
+ 0x2);
+
+ /* Leave CL72 enabled */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+ &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+ val | 0x3800);
+
+ /* Set speed via PMA/PMD register */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
+
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB);
+
+ /*Enable encoded forced speed */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30);
+
+ /* Turn TX scramble payload only the 64/66 scrambler */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX66_CONTROL, 0x9);
+
+ /* Turn RX scramble payload only the 64/66 scrambler */
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, 0xF9);
+
+ /* set and clear loopback to cause a reset to 64/66 decoder */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
+
+}
+
+static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 is_xfi)
+{
+ struct bnx2x *bp = params->bp;
+ u16 misc1_val, tap_val, tx_driver_val, lane, val;
+ /* Hold rxSeqStart */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val | 0x8000));
+
+ /* Hold tx_fifo_reset */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, (val | 0x1));
+
+ /* Disable CL73 AN */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
+
+ /* Disable 100FX Enable and Auto-Detect */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL1, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA));
+
+ /* Disable 100FX Idle detect */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL3, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL3, (val | 0x0080));
+
+ /* Set Block address to Remote PHY & Clear forced_speed[5] */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, (val & 0xFF7F));
+
+ /* Turn off auto-detect & fiber mode */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ (val & 0xFFEE));
+
+ /* Set filter_force_link, disable_false_link and parallel_detect */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ ((val | 0x0006) & 0xFFFE));
+
+ /* Set XFI / SFI */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC1, &misc1_val);
+
+ misc1_val &= ~(0x1f);
+
+ if (is_xfi) {
+ misc1_val |= 0x5;
+ tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
+ (0x37 << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
+ (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
+ tx_driver_val =
+ ((0x00 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+ (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+ (0x03 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
+
+ } else {
+ misc1_val |= 0x9;
+ tap_val = ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
+ (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
+ (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
+ tx_driver_val =
+ ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+ (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+ (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
+ }
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
+
+ /* Set Transmit PMD settings */
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX_FIR_TAP,
+ tap_val | MDIO_WC_REG_TX_FIR_TAP_ENABLE);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ tx_driver_val);
+
+ /* Enable fiber mode, enable and invert sig_det */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, val | 0xd);
+
+ /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, val | 0x8080);
+
+ /* 10G XFI Full Duplex */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100);
+
+ /* Release tx_fifo_reset */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, val & 0xFFFE);
+
+ /* Release rxSeqStart */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val & 0x7FFF));
+}
+
+static void bnx2x_warpcore_set_20G_KR2(struct bnx2x *bp,
+ struct bnx2x_phy *phy)
+{
+ DP(NETIF_MSG_LINK, "KR2 still not supported !!!\n");
+}
+
+static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 lane)
+{
+ /* Rx0 anaRxControl1G */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX0_ANARXCONTROL1G, 0x90);
+
+ /* Rx2 anaRxControl1G */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX2_ANARXCONTROL1G, 0x90);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW0, 0xE070);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW1, 0xC0D0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW2, 0xA0B0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW3, 0x8090);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW0_MASK, 0xF0F0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW1_MASK, 0xF0F0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW2_MASK, 0xF0F0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW3_MASK, 0xF0F0);
+
+ /* Serdes Digital Misc1 */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6008);
+
+ /* Serdes Digital4 Misc3 */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, 0x8088);
+
+ /* Set Transmit PMD settings */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX_FIR_TAP,
+ ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
+ (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
+ (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET) |
+ MDIO_WC_REG_TX_FIR_TAP_ENABLE));
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+ (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+ (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+}
+
+static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 fiber_mode)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val16, digctrl_kx1, digctrl_kx2;
+ u8 lane;
+
+ lane = bnx2x_get_warpcore_lane(phy, params);
+
+ /* Clear XFI clock comp in non-10G single lane mode. */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
+
+ if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ /* SGMII Autoneg */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
+ val16 | 0x1000);
+ DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n");
+ } else {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ val16 &= 0xcfbf;
+ switch (phy->req_line_speed) {
+ case SPEED_10:
+ break;
+ case SPEED_100:
+ val16 |= 0x2000;
+ break;
+ case SPEED_1000:
+ val16 |= 0x0040;
+ break;
+ default:
+ DP(NETIF_MSG_LINK,
+ "Speed not supported: 0x%x\n", phy->req_line_speed);
+ return;
+ }
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ val16 |= 0x0100;
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16);
+
+ DP(NETIF_MSG_LINK, "set SGMII force speed %d\n",
+ phy->req_line_speed);
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ DP(NETIF_MSG_LINK, " (readback) %x\n", val16);
+ }
+
+ /* SGMII Slave mode and disable signal detect */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &digctrl_kx1);
+ if (fiber_mode)
+ digctrl_kx1 = 1;
+ else
+ digctrl_kx1 &= 0xff4a;
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ digctrl_kx1);
+
+ /* Turn off parallel detect */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &digctrl_kx2);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ (digctrl_kx2 & ~(1<<2)));
+
+ /* Re-enable parallel detect */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ (digctrl_kx2 | (1<<2)));
+
+ /* Enable autodet */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ (digctrl_kx1 | 0x10));
+}
+
+static void bnx2x_warpcore_reset_lane(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 reset)
+{
+ u16 val;
+ /* Take lane out of reset after configuration is finished */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6, &val);
+ if (reset)
+ val |= 0xC000;
+ else
+ val &= 0x3FFF;
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6, val);
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6, &val);
+}
+
+
+ /* Clear SFI/XFI link settings registers */
+static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 lane)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val16;
+
+ /* Set XFI clock comp as default. */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, val16 | (3<<13));
+
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL1, 0x014a);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL3, 0x0800);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, 0x8008);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x0195);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x0007);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x0002);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000);
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX_FIR_TAP, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140);
+ bnx2x_warpcore_reset_lane(bp, phy, 0);
+}
+
+static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
+ u32 chip_id,
+ u32 shmem_base, u8 port,
+ u8 *gpio_num, u8 *gpio_port)
+{
+ u32 cfg_pin;
+ *gpio_num = 0;
+ *gpio_port = 0;
+ if (CHIP_IS_E3(bp)) {
+ cfg_pin = (REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_MOD_ABS_MASK) >>
+ PORT_HW_CFG_E3_MOD_ABS_SHIFT;
+
+ /*
+ * Should not happen. This function called upon interrupt
+ * triggered by GPIO ( since EPIO can only generate interrupts
+ * to MCP).
+ * So if this function was called and none of the GPIOs was set,
+ * it means the shit hit the fan.
+ */
+ if ((cfg_pin < PIN_CFG_GPIO0_P0) ||
+ (cfg_pin > PIN_CFG_GPIO3_P1)) {
+ DP(NETIF_MSG_LINK,
+ "ERROR: Invalid cfg pin %x for module detect indication\n",
+ cfg_pin);
+ return -EINVAL;
+ }
+
+ *gpio_num = (cfg_pin - PIN_CFG_GPIO0_P0) & 0x3;
+ *gpio_port = (cfg_pin - PIN_CFG_GPIO0_P0) >> 2;
+ } else {
+ *gpio_num = MISC_REGISTERS_GPIO_3;
+ *gpio_port = port;
+ }
+ DP(NETIF_MSG_LINK, "MOD_ABS int GPIO%d_P%d\n", *gpio_num, *gpio_port);
+ return 0;
+}
+
+static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 gpio_num, gpio_port;
+ u32 gpio_val;
+ if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id,
+ params->shmem_base, params->port,
+ &gpio_num, &gpio_port) != 0)
+ return 0;
+ gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
+
+ /* Call the handling function in case module is detected */
+ if (gpio_val == 0)
+ return 1;
+ else
+ return 0;
+}
+
+static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u32 serdes_net_if;
+ u8 fiber_mode;
+ u16 lane = bnx2x_get_warpcore_lane(phy, params);
+ serdes_net_if = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_NET_SERDES_IF_MASK);
+ DP(NETIF_MSG_LINK, "Begin Warpcore init, link_speed %d, "
+ "serdes_net_if = 0x%x\n",
+ vars->line_speed, serdes_net_if);
+ bnx2x_set_aer_mmd(params, phy);
+
+ vars->phy_flags |= PHY_XGXS_FLAG;
+ if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) ||
+ (phy->req_line_speed &&
+ ((phy->req_line_speed == SPEED_100) ||
+ (phy->req_line_speed == SPEED_10)))) {
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ DP(NETIF_MSG_LINK, "Setting SGMII mode\n");
+ bnx2x_warpcore_clear_regs(phy, params, lane);
+ bnx2x_warpcore_set_sgmii_speed(phy, params, 0);
+ } else {
+ switch (serdes_net_if) {
+ case PORT_HW_CFG_NET_SERDES_IF_KR:
+ /* Enable KR Auto Neg */
+ if (params->loopback_mode == LOOPBACK_NONE)
+ bnx2x_warpcore_enable_AN_KR(phy, params, vars);
+ else {
+ DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n");
+ bnx2x_warpcore_set_10G_KR(phy, params, vars);
+ }
+ break;
+
+ case PORT_HW_CFG_NET_SERDES_IF_XFI:
+ bnx2x_warpcore_clear_regs(phy, params, lane);
+ if (vars->line_speed == SPEED_10000) {
+ DP(NETIF_MSG_LINK, "Setting 10G XFI\n");
+ bnx2x_warpcore_set_10G_XFI(phy, params, 1);
+ } else {
+ if (SINGLE_MEDIA_DIRECT(params)) {
+ DP(NETIF_MSG_LINK, "1G Fiber\n");
+ fiber_mode = 1;
+ } else {
+ DP(NETIF_MSG_LINK, "10/100/1G SGMII\n");
+ fiber_mode = 0;
+ }
+ bnx2x_warpcore_set_sgmii_speed(phy,
+ params,
+ fiber_mode);
+ }
+
+ break;
+
+ case PORT_HW_CFG_NET_SERDES_IF_SFI:
+
+ bnx2x_warpcore_clear_regs(phy, params, lane);
+ if (vars->line_speed == SPEED_10000) {
+ DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
+ bnx2x_warpcore_set_10G_XFI(phy, params, 0);
+ } else if (vars->line_speed == SPEED_1000) {
+ DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
+ bnx2x_warpcore_set_sgmii_speed(phy, params, 1);
+ }
+ /* Issue Module detection */
+ if (bnx2x_is_sfp_module_plugged(phy, params))
+ bnx2x_sfp_module_detection(phy, params);
+ break;
+
+ case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
+ if (vars->line_speed != SPEED_20000) {
+ DP(NETIF_MSG_LINK, "Speed not supported yet\n");
+ return;
+ }
+ DP(NETIF_MSG_LINK, "Setting 20G DXGXS\n");
+ bnx2x_warpcore_set_20G_DXGXS(bp, phy, lane);
+ /* Issue Module detection */
+
+ bnx2x_sfp_module_detection(phy, params);
+ break;
+
+ case PORT_HW_CFG_NET_SERDES_IF_KR2:
+ if (vars->line_speed != SPEED_20000) {
+ DP(NETIF_MSG_LINK, "Speed not supported yet\n");
+ return;
+ }
+ DP(NETIF_MSG_LINK, "Setting 20G KR2\n");
+ bnx2x_warpcore_set_20G_KR2(bp, phy);
+ break;
+
+ default:
+ DP(NETIF_MSG_LINK,
+ "Unsupported Serdes Net Interface 0x%x\n",
+ serdes_net_if);
+ return;
+ }
+ }
+
+ /* Take lane out of reset after configuration is finished */
+ bnx2x_warpcore_reset_lane(bp, phy, 0);
+ DP(NETIF_MSG_LINK, "Exit config init\n");
+}
+
+static void bnx2x_sfp_e3_set_transmitter(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 tx_en)
+{
+ struct bnx2x *bp = params->bp;
+ u32 cfg_pin;
+ u8 port = params->port;
+
+ cfg_pin = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_TX_LASER_MASK;
+ /* Set the !tx_en since this pin is DISABLE_TX_LASER */
+ DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en);
+ /* For 20G, the expected pin to be used is 3 pins after the current */
+
+ bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1);
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
+ bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1);
+}
+
+static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val16;
+ bnx2x_sfp_e3_set_transmitter(params, phy, 0);
+ bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
+ bnx2x_set_aer_mmd(params, phy);
+ /* Global register */
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
+
+ /* Clear loopback settings (if any) */
+ /* 10G & 20G */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 &
+ 0xBFFF);
+
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 & 0xfffe);
+
+ /* Update those 1-copy registers */
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+ /* Enable 1G MDIO (1-copy) */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ val16 & ~0x10);
+
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL2,
+ val16 & 0xff00);
+
+}
+
+static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val16;
+ u32 lane;
+ DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n",
+ params->loopback_mode, phy->req_line_speed);
+
+ if (phy->req_line_speed < SPEED_10000) {
+ /* 10/100/1000 */
+
+ /* Update those 1-copy registers */
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+ /* Enable 1G MDIO (1-copy) */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ val16 | 0x10);
+ /* Set 1G loopback based on lane (1-copy) */
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL2,
+ val16 | (1<<lane));
+
+ /* Switch back to 4-copy registers */
+ bnx2x_set_aer_mmd(params, phy);
+ /* Global loopback, not recommended. */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
+ 0x4000);
+ } else {
+ /* 10G & 20G */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
+ 0x4000);
+
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 | 0x1);
+ }
+}
+
+
+void bnx2x_link_status_update(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 link_10g_plus;
+ u8 port = params->port;
+ u32 sync_offset, media_types;
+ /* Update PHY configuration */
+ set_phy_vars(params, vars);
+
+ vars->link_status = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ port_mb[port].link_status));
+
+ vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
+ vars->phy_flags = PHY_XGXS_FLAG;
+ if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
+ vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
+
+ if (vars->link_up) {
+ DP(NETIF_MSG_LINK, "phy link up\n");
+
+ vars->phy_link_up = 1;
+ vars->duplex = DUPLEX_FULL;
+ switch (vars->link_status &
+ LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
+ case LINK_10THD:
+ vars->duplex = DUPLEX_HALF;
+ /* fall thru */
+ case LINK_10TFD:
+ vars->line_speed = SPEED_10;
+ break;
+
+ case LINK_100TXHD:
+ vars->duplex = DUPLEX_HALF;
+ /* fall thru */
+ case LINK_100T4:
+ case LINK_100TXFD:
+ vars->line_speed = SPEED_100;
+ break;
+
+ case LINK_1000THD:
+ vars->duplex = DUPLEX_HALF;
+ /* fall thru */
+ case LINK_1000TFD:
+ vars->line_speed = SPEED_1000;
+ break;
+
+ case LINK_2500THD:
+ vars->duplex = DUPLEX_HALF;
+ /* fall thru */
+ case LINK_2500TFD:
+ vars->line_speed = SPEED_2500;
+ break;
+
+ case LINK_10GTFD:
+ vars->line_speed = SPEED_10000;
+ break;
+ case LINK_20GTFD:
+ vars->line_speed = SPEED_20000;
+ break;
+ default:
+ break;
+ }
+ vars->flow_ctrl = 0;
+ if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
+ vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
+
+ if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
+ vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
+
+ if (!vars->flow_ctrl)
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+ if (vars->line_speed &&
+ ((vars->line_speed == SPEED_10) ||
+ (vars->line_speed == SPEED_100))) {
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ } else {
+ vars->phy_flags &= ~PHY_SGMII_FLAG;
+ }
+ if (vars->line_speed &&
+ USES_WARPCORE(bp) &&
+ (vars->line_speed == SPEED_1000))
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ /* anything 10 and over uses the bmac */
+ link_10g_plus = (vars->line_speed >= SPEED_10000);
+
+ if (link_10g_plus) {
+ if (USES_WARPCORE(bp))
+ vars->mac_type = MAC_TYPE_XMAC;
+ else
+ vars->mac_type = MAC_TYPE_BMAC;
+ } else {
+ if (USES_WARPCORE(bp))
+ vars->mac_type = MAC_TYPE_UMAC;
+ else
+ vars->mac_type = MAC_TYPE_EMAC;
+ }
+ } else { /* link down */
+ DP(NETIF_MSG_LINK, "phy link down\n");
+
+ vars->phy_link_up = 0;
+
+ vars->line_speed = 0;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+ /* indicate no mac active */
+ vars->mac_type = MAC_TYPE_NONE;
+ if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
+ vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ }
+
+ /* Sync media type */
+ sync_offset = params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].media_type);
+ media_types = REG_RD(bp, sync_offset);
+
+ params->phy[INT_PHY].media_type =
+ (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >>
+ PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT;
+ params->phy[EXT_PHY1].media_type =
+ (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >>
+ PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT;
+ params->phy[EXT_PHY2].media_type =
+ (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >>
+ PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT;
+ DP(NETIF_MSG_LINK, "media_types = 0x%x\n", media_types);
+
+ /* Sync AEU offset */
+ sync_offset = params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].aeu_int_mask);
+
+ vars->aeu_int_mask = REG_RD(bp, sync_offset);
+
+ /* Sync PFC status */
+ if (vars->link_status & LINK_STATUS_PFC_ENABLED)
+ params->feature_config_flags |=
+ FEATURE_CONFIG_PFC_ENABLED;
+ else
+ params->feature_config_flags &=
+ ~FEATURE_CONFIG_PFC_ENABLED;
+
+ DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n",
+ vars->link_status, vars->phy_link_up, vars->aeu_int_mask);
+ DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
+ vars->line_speed, vars->duplex, vars->flow_ctrl);
+}
+
+
+static void bnx2x_set_master_ln(struct link_params *params,
+ struct bnx2x_phy *phy)
+{
+ struct bnx2x *bp = params->bp;
+ u16 new_master_ln, ser_lane;
+ ser_lane = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+
+ /* set the master_ln for AN */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+ &new_master_ln);
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2 ,
+ MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+ (new_master_ln | ser_lane));
+}
+
+static int bnx2x_reset_unicore(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 set_serdes)
+{
+ struct bnx2x *bp = params->bp;
+ u16 mii_control;
+ u16 i;
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
+
+ /* reset the unicore */
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ (mii_control |
+ MDIO_COMBO_IEEO_MII_CONTROL_RESET));
+ if (set_serdes)
+ bnx2x_set_serdes_access(bp, params->port);
+
+ /* wait for the reset to self clear */
+ for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
+ udelay(5);
+
+ /* the reset erased the previous bank value */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
+
+ if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
+ udelay(5);
+ return 0;
+ }
+ }
+
+ netdev_err(bp->dev, "Warning: PHY was not initialized,"
+ " Port %d\n",
+ params->port);
+ DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
+ return -EINVAL;
+
+}
+
+static void bnx2x_set_swap_lanes(struct link_params *params,
+ struct bnx2x_phy *phy)
+{
+ struct bnx2x *bp = params->bp;
+ /*
+ * Each two bits represents a lane number:
+ * No swap is 0123 => 0x1b no need to enable the swap
+ */
+ u16 ser_lane, rx_lane_swap, tx_lane_swap;
+
+ ser_lane = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ rx_lane_swap = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
+ tx_lane_swap = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
+
+ if (rx_lane_swap != 0x1b) {
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP,
+ (rx_lane_swap |
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
+ } else {
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
+ }
+
+ if (tx_lane_swap != 0x1b) {
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP,
+ (tx_lane_swap |
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
+ } else {
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
+ }
+}
+
+static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 control2;
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+ &control2);
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+ control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
+ else
+ control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
+ DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
+ phy->speed_cap_mask, control2);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+ control2);
+
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+ DP(NETIF_MSG_LINK, "XGXS\n");
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+ &control2);
+
+
+ control2 |=
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+ control2);
+
+ /* Disable parallel detection of HiG */
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
+ }
+}
+
+static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars,
+ u8 enable_cl73)
+{
+ struct bnx2x *bp = params->bp;
+ u16 reg_val;
+
+ /* CL37 Autoneg */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+
+ /* CL37 Autoneg Enabled */
+ if (vars->line_speed == SPEED_AUTO_NEG)
+ reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
+ else /* CL37 Autoneg Disabled */
+ reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+ MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+
+ /* Enable/Disable Autodetection */
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
+ reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
+ reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
+ if (vars->line_speed == SPEED_AUTO_NEG)
+ reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
+ else
+ reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
+
+ /* Enable TetonII and BAM autoneg */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_BAM_NEXT_PAGE,
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+ &reg_val);
+ if (vars->line_speed == SPEED_AUTO_NEG) {
+ /* Enable BAM aneg Mode and TetonII aneg Mode */
+ reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
+ } else {
+ /* TetonII and BAM Autoneg Disabled */
+ reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
+ }
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_BAM_NEXT_PAGE,
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+ reg_val);
+
+ if (enable_cl73) {
+ /* Enable Cl73 FSM status bits */
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_USERB0,
+ MDIO_CL73_USERB0_CL73_UCTRL,
+ 0xe);
+
+ /* Enable BAM Station Manager*/
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_USERB0,
+ MDIO_CL73_USERB0_CL73_BAM_CTRL1,
+ MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
+ MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
+ MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
+
+ /* Advertise CL73 link speeds */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV2,
+ &reg_val);
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+ reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV2,
+ reg_val);
+
+ /* CL73 Autoneg Enabled */
+ reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
+
+ } else /* CL73 Autoneg Disabled */
+ reg_val = 0;
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
+}
+
+/* program SerDes, forced speed */
+static void bnx2x_program_serdes(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 reg_val;
+
+ /* program duplex, disable autoneg and sgmii*/
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+ reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
+ MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
+ if (phy->req_duplex == DUPLEX_FULL)
+ reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+
+ /*
+ * program speed
+ * - needed only if the speed is greater than 1G (2.5G or 10G)
+ */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_MISC1, &reg_val);
+ /* clearing the speed value before setting the right speed */
+ DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
+
+ reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
+ MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
+
+ if (!((vars->line_speed == SPEED_1000) ||
+ (vars->line_speed == SPEED_100) ||
+ (vars->line_speed == SPEED_10))) {
+
+ reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
+ MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
+ if (vars->line_speed == SPEED_10000)
+ reg_val |=
+ MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
+ }
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_MISC1, reg_val);
+
+}
+
+static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val = 0;
+
+ /* configure the 48 bits for BAM AN */
+
+ /* set extended capabilities */
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
+ val |= MDIO_OVER_1G_UP1_2_5G;
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ val |= MDIO_OVER_1G_UP1_10G;
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_UP1, val);
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_UP3, 0x400);
+}
+
+static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 ieee_fc)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val;
+ /* for AN, we are always publishing full duplex */
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1, &val);
+ val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
+ val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1, val);
+}
+
+static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 enable_cl73)
+{
+ struct bnx2x *bp = params->bp;
+ u16 mii_control;
+
+ DP(NETIF_MSG_LINK, "bnx2x_restart_autoneg\n");
+ /* Enable and restart BAM/CL37 aneg */
+
+ if (enable_cl73) {
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ &mii_control);
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ (mii_control |
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
+ } else {
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
+ DP(NETIF_MSG_LINK,
+ "bnx2x_restart_autoneg mii_control before = 0x%x\n",
+ mii_control);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ (mii_control |
+ MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+ MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
+ }
+}
+
+static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 control1;
+
+ /* in SGMII mode, the unicore is always slave */
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+ &control1);
+ control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
+ /* set sgmii mode (and not fiber) */
+ control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+ control1);
+
+ /* if forced speed */
+ if (!(vars->line_speed == SPEED_AUTO_NEG)) {
+ /* set speed, disable autoneg */
+ u16 mii_control;
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
+ mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
+ MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
+
+ switch (vars->line_speed) {
+ case SPEED_100:
+ mii_control |=
+ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
+ break;
+ case SPEED_1000:
+ mii_control |=
+ MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
+ break;
+ case SPEED_10:
+ /* there is nothing to set for 10M */
+ break;
+ default:
+ /* invalid speed for SGMII */
+ DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
+ vars->line_speed);
+ break;
+ }
+
+ /* setting the full duplex */
+ if (phy->req_duplex == DUPLEX_FULL)
+ mii_control |=
+ MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ mii_control);
+
+ } else { /* AN mode */
+ /* enable and restart AN */
+ bnx2x_restart_autoneg(phy, params, 0);
+ }
+}
+
+
+/*
+ * link management
+ */
+
+static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 pd_10g, status2_1000x;
+ if (phy->req_line_speed != SPEED_AUTO_NEG)
+ return 0;
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+ &status2_1000x);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+ &status2_1000x);
+ if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
+ DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
+ params->port);
+ return 1;
+ }
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
+ &pd_10g);
+
+ if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
+ DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
+ params->port);
+ return 1;
+ }
+ return 0;
+}
+
+static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars,
+ u32 gp_status)
+{
+ struct bnx2x *bp = params->bp;
+ u16 ld_pause; /* local driver */
+ u16 lp_pause; /* link partner */
+ u16 pause_result;
+
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+ /* resolve from gp_status in case of AN complete and not sgmii */
+ if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
+ vars->flow_ctrl = phy->req_flow_ctrl;
+ else if (phy->req_line_speed != SPEED_AUTO_NEG)
+ vars->flow_ctrl = params->req_fc_auto_adv;
+ else if ((gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
+ (!(vars->phy_flags & PHY_SGMII_FLAG))) {
+ if (bnx2x_direct_parallel_detect_used(phy, params)) {
+ vars->flow_ctrl = params->req_fc_auto_adv;
+ return;
+ }
+ if ((gp_status &
+ (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
+ MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) ==
+ (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
+ MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1,
+ &ld_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_LP_ADV1,
+ &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
+ >> 8;
+ pause_result |= (lp_pause &
+ MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK)
+ >> 10;
+ DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
+ pause_result);
+ } else {
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
+ &ld_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
+ &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
+ pause_result |= (lp_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
+ DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
+ pause_result);
+ }
+ bnx2x_pause_resolve(vars, pause_result);
+ }
+ DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
+}
+
+static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 rx_status, ustat_val, cl37_fsm_received;
+ DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
+ /* Step 1: Make sure signal is detected */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_RX0,
+ MDIO_RX0_RX_STATUS,
+ &rx_status);
+ if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
+ (MDIO_RX0_RX_STATUS_SIGDET)) {
+ DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
+ "rx_status(0x80b0) = 0x%x\n", rx_status);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
+ return;
+ }
+ /* Step 2: Check CL73 state machine */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_USERB0,
+ MDIO_CL73_USERB0_CL73_USTAT1,
+ &ustat_val);
+ if ((ustat_val &
+ (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
+ MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
+ (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
+ MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) {
+ DP(NETIF_MSG_LINK, "CL73 state-machine is not stable. "
+ "ustat_val(0x8371) = 0x%x\n", ustat_val);
+ return;
+ }
+ /*
+ * Step 3: Check CL37 Message Pages received to indicate LP
+ * supports only CL37
+ */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_REMOTE_PHY,
+ MDIO_REMOTE_PHY_MISC_RX_STATUS,
+ &cl37_fsm_received);
+ if ((cl37_fsm_received &
+ (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
+ MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
+ (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
+ MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) {
+ DP(NETIF_MSG_LINK, "No CL37 FSM were received. "
+ "misc_rx_status(0x8330) = 0x%x\n",
+ cl37_fsm_received);
+ return;
+ }
+ /*
+ * The combined cl37/cl73 fsm state information indicating that
+ * we are connected to a device which does not support cl73, but
+ * does support cl37 BAM. In this case we disable cl73 and
+ * restart cl37 auto-neg
+ */
+
+ /* Disable CL73 */
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ 0);
+ /* Restart CL37 autoneg */
+ bnx2x_restart_autoneg(phy, params, 0);
+ DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
+}
+
+static void bnx2x_xgxs_an_resolve(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars,
+ u32 gp_status)
+{
+ if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+
+ if (bnx2x_direct_parallel_detect_used(phy, params))
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
+}
+static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars,
+ u16 is_link_up,
+ u16 speed_mask,
+ u16 is_duplex)
+{
+ struct bnx2x *bp = params->bp;
+ if (phy->req_line_speed == SPEED_AUTO_NEG)
+ vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
+ if (is_link_up) {
+ DP(NETIF_MSG_LINK, "phy link up\n");
+
+ vars->phy_link_up = 1;
+ vars->link_status |= LINK_STATUS_LINK_UP;
+
+ switch (speed_mask) {
+ case GP_STATUS_10M:
+ vars->line_speed = SPEED_10;
+ if (vars->duplex == DUPLEX_FULL)
+ vars->link_status |= LINK_10TFD;
+ else
+ vars->link_status |= LINK_10THD;
+ break;
+
+ case GP_STATUS_100M:
+ vars->line_speed = SPEED_100;
+ if (vars->duplex == DUPLEX_FULL)
+ vars->link_status |= LINK_100TXFD;
+ else
+ vars->link_status |= LINK_100TXHD;
+ break;
+
+ case GP_STATUS_1G:
+ case GP_STATUS_1G_KX:
+ vars->line_speed = SPEED_1000;
+ if (vars->duplex == DUPLEX_FULL)
+ vars->link_status |= LINK_1000TFD;
+ else
+ vars->link_status |= LINK_1000THD;
+ break;
+
+ case GP_STATUS_2_5G:
+ vars->line_speed = SPEED_2500;
+ if (vars->duplex == DUPLEX_FULL)
+ vars->link_status |= LINK_2500TFD;
+ else
+ vars->link_status |= LINK_2500THD;
+ break;
+
+ case GP_STATUS_5G:
+ case GP_STATUS_6G:
+ DP(NETIF_MSG_LINK,
+ "link speed unsupported gp_status 0x%x\n",
+ speed_mask);
+ return -EINVAL;
+
+ case GP_STATUS_10G_KX4:
+ case GP_STATUS_10G_HIG:
+ case GP_STATUS_10G_CX4:
+ case GP_STATUS_10G_KR:
+ case GP_STATUS_10G_SFI:
+ case GP_STATUS_10G_XFI:
+ vars->line_speed = SPEED_10000;
+ vars->link_status |= LINK_10GTFD;
+ break;
+ case GP_STATUS_20G_DXGXS:
+ vars->line_speed = SPEED_20000;
+ vars->link_status |= LINK_20GTFD;
+ break;
+ default:
+ DP(NETIF_MSG_LINK,
+ "link speed unsupported gp_status 0x%x\n",
+ speed_mask);
+ return -EINVAL;
+ }
+ } else { /* link_down */
+ DP(NETIF_MSG_LINK, "phy link down\n");
+
+ vars->phy_link_up = 0;
+
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_NONE;
+ }
+ DP(NETIF_MSG_LINK, " phy_link_up %x line_speed %d\n",
+ vars->phy_link_up, vars->line_speed);
+ return 0;
+}
+
+static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+
+ struct bnx2x *bp = params->bp;
+
+ u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask;
+ int rc = 0;
+
+ /* Read gp_status */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
+ if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
+ duplex = DUPLEX_FULL;
+ if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)
+ link_up = 1;
+ speed_mask = gp_status & GP_STATUS_SPEED_MASK;
+ DP(NETIF_MSG_LINK, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x\n",
+ gp_status, link_up, speed_mask);
+ rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, speed_mask,
+ duplex);
+ if (rc == -EINVAL)
+ return rc;
+
+ if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
+ if (SINGLE_MEDIA_DIRECT(params)) {
+ bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status);
+ if (phy->req_line_speed == SPEED_AUTO_NEG)
+ bnx2x_xgxs_an_resolve(phy, params, vars,
+ gp_status);
+ }
+ } else { /* link_down */
+ if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ SINGLE_MEDIA_DIRECT(params)) {
+ /* Check signal is detected */
+ bnx2x_check_fallback_to_cl37(phy, params);
+ }
+ }
+
+ DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
+ vars->duplex, vars->flow_ctrl, vars->link_status);
+ return rc;
+}
+
+static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+
+ struct bnx2x *bp = params->bp;
+
+ u8 lane;
+ u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL;
+ int rc = 0;
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ /* Read gp_status */
+ if (phy->req_line_speed > SPEED_10000) {
+ u16 temp_link_up;
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ 1, &temp_link_up);
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ 1, &link_up);
+ DP(NETIF_MSG_LINK, "PCS RX link status = 0x%x-->0x%x\n",
+ temp_link_up, link_up);
+ link_up &= (1<<2);
+ if (link_up)
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ } else {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_1, &gp_status1);
+ DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1);
+ /* Check for either KR or generic link up. */
+ gp_status1 = ((gp_status1 >> 8) & 0xf) |
+ ((gp_status1 >> 12) & 0xf);
+ link_up = gp_status1 & (1 << lane);
+ if (link_up && SINGLE_MEDIA_DIRECT(params)) {
+ u16 pd, gp_status4;
+ if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ /* Check Autoneg complete */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_4,
+ &gp_status4);
+ if (gp_status4 & ((1<<12)<<lane))
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+
+ /* Check parallel detect used */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_PAR_DET_10G_STATUS,
+ &pd);
+ if (pd & (1<<15))
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
+ }
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ }
+ }
+
+ if (lane < 2) {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed);
+ } else {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_3, &gp_speed);
+ }
+ DP(NETIF_MSG_LINK, "lane %d gp_speed 0x%x\n", lane, gp_speed);
+
+ if ((lane & 1) == 0)
+ gp_speed <<= 8;
+ gp_speed &= 0x3f00;
+
+
+ rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
+ duplex);
+
+ DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
+ vars->duplex, vars->flow_ctrl, vars->link_status);
+ return rc;
+}
+static void bnx2x_set_gmii_tx_driver(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ struct bnx2x_phy *phy = &params->phy[INT_PHY];
+ u16 lp_up2;
+ u16 tx_driver;
+ u16 bank;
+
+ /* read precomp */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_LP_UP2, &lp_up2);
+
+ /* bits [10:7] at lp_up2, positioned at [15:12] */
+ lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
+ MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
+ MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
+
+ if (lp_up2 == 0)
+ return;
+
+ for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
+ bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
+ CL22_RD_OVER_CL45(bp, phy,
+ bank,
+ MDIO_TX0_TX_DRIVER, &tx_driver);
+
+ /* replace tx_driver bits [15:12] */
+ if (lp_up2 !=
+ (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
+ tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
+ tx_driver |= lp_up2;
+ CL22_WR_OVER_CL45(bp, phy,
+ bank,
+ MDIO_TX0_TX_DRIVER, tx_driver);
+ }
+ }
+}
+
+static int bnx2x_emac_program(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u16 mode = 0;
+
+ DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
+ bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
+ EMAC_REG_EMAC_MODE,
+ (EMAC_MODE_25G_MODE |
+ EMAC_MODE_PORT_MII_10M |
+ EMAC_MODE_HALF_DUPLEX));
+ switch (vars->line_speed) {
+ case SPEED_10:
+ mode |= EMAC_MODE_PORT_MII_10M;
+ break;
+
+ case SPEED_100:
+ mode |= EMAC_MODE_PORT_MII;
+ break;
+
+ case SPEED_1000:
+ mode |= EMAC_MODE_PORT_GMII;
+ break;
+
+ case SPEED_2500:
+ mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
+ break;
+
+ default:
+ /* 10G not valid for EMAC */
+ DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
+ vars->line_speed);
+ return -EINVAL;
+ }
+
+ if (vars->duplex == DUPLEX_HALF)
+ mode |= EMAC_MODE_HALF_DUPLEX;
+ bnx2x_bits_en(bp,
+ GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
+ mode);
+
+ bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
+ return 0;
+}
+
+static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+
+ u16 bank, i = 0;
+ struct bnx2x *bp = params->bp;
+
+ for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
+ bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
+ CL22_WR_OVER_CL45(bp, phy,
+ bank,
+ MDIO_RX0_RX_EQ_BOOST,
+ phy->rx_preemphasis[i]);
+ }
+
+ for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
+ bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
+ CL22_WR_OVER_CL45(bp, phy,
+ bank,
+ MDIO_TX0_TX_DRIVER,
+ phy->tx_preemphasis[i]);
+ }
+}
+
+static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) ||
+ (params->loopback_mode == LOOPBACK_XGXS));
+ if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
+ if (SINGLE_MEDIA_DIRECT(params) &&
+ (params->feature_config_flags &
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
+ bnx2x_set_preemphasis(phy, params);
+
+ /* forced speed requested? */
+ if (vars->line_speed != SPEED_AUTO_NEG ||
+ (SINGLE_MEDIA_DIRECT(params) &&
+ params->loopback_mode == LOOPBACK_EXT)) {
+ DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
+
+ /* disable autoneg */
+ bnx2x_set_autoneg(phy, params, vars, 0);
+
+ /* program speed and duplex */
+ bnx2x_program_serdes(phy, params, vars);
+
+ } else { /* AN_mode */
+ DP(NETIF_MSG_LINK, "not SGMII, AN\n");
+
+ /* AN enabled */
+ bnx2x_set_brcm_cl37_advertisement(phy, params);
+
+ /* program duplex & pause advertisement (for aneg) */
+ bnx2x_set_ieee_aneg_advertisement(phy, params,
+ vars->ieee_fc);
+
+ /* enable autoneg */
+ bnx2x_set_autoneg(phy, params, vars, enable_cl73);
+
+ /* enable and restart AN */
+ bnx2x_restart_autoneg(phy, params, enable_cl73);
+ }
+
+ } else { /* SGMII mode */
+ DP(NETIF_MSG_LINK, "SGMII\n");
+
+ bnx2x_initialize_sgmii_process(phy, params, vars);
+ }
+}
+
+static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ int rc;
+ vars->phy_flags |= PHY_XGXS_FLAG;
+ if ((phy->req_line_speed &&
+ ((phy->req_line_speed == SPEED_100) ||
+ (phy->req_line_speed == SPEED_10))) ||
+ (!phy->req_line_speed &&
+ (phy->speed_cap_mask >=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
+ (phy->speed_cap_mask <
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->type == PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD))
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ else
+ vars->phy_flags &= ~PHY_SGMII_FLAG;
+
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ bnx2x_set_aer_mmd(params, phy);
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+ bnx2x_set_master_ln(params, phy);
+
+ rc = bnx2x_reset_unicore(params, phy, 0);
+ /* reset the SerDes and wait for reset bit return low */
+ if (rc != 0)
+ return rc;
+
+ bnx2x_set_aer_mmd(params, phy);
+ /* setting the masterLn_def again after the reset */
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
+ bnx2x_set_master_ln(params, phy);
+ bnx2x_set_swap_lanes(params, phy);
+ }
+
+ return rc;
+}
+
+static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u16 cnt, ctrl;
+ /* Wait for soft reset to get cleared up to 1 sec */
+ for (cnt = 0; cnt < 1000; cnt++) {
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
+ bnx2x_cl22_read(bp, phy,
+ MDIO_PMA_REG_CTRL, &ctrl);
+ else
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL, &ctrl);
+ if (!(ctrl & (1<<15)))
+ break;
+ msleep(1);
+ }
+
+ if (cnt == 1000)
+ netdev_err(bp->dev, "Warning: PHY was not initialized,"
+ " Port %d\n",
+ params->port);
+ DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
+ return cnt;
+}
+
+static void bnx2x_link_int_enable(struct link_params *params)
+{
+ u8 port = params->port;
+ u32 mask;
+ struct bnx2x *bp = params->bp;
+
+ /* Setting the status to report on link up for either XGXS or SerDes */
+ if (CHIP_IS_E3(bp)) {
+ mask = NIG_MASK_XGXS0_LINK_STATUS;
+ if (!(SINGLE_MEDIA_DIRECT(params)))
+ mask |= NIG_MASK_MI_INT;
+ } else if (params->switch_cfg == SWITCH_CFG_10G) {
+ mask = (NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_XGXS0_LINK_STATUS);
+ DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
+ if (!(SINGLE_MEDIA_DIRECT(params)) &&
+ params->phy[INT_PHY].type !=
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) {
+ mask |= NIG_MASK_MI_INT;
+ DP(NETIF_MSG_LINK, "enabled external phy int\n");
+ }
+
+ } else { /* SerDes */
+ mask = NIG_MASK_SERDES0_LINK_STATUS;
+ DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
+ if (!(SINGLE_MEDIA_DIRECT(params)) &&
+ params->phy[INT_PHY].type !=
+ PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) {
+ mask |= NIG_MASK_MI_INT;
+ DP(NETIF_MSG_LINK, "enabled external phy int\n");
+ }
+ }
+ bnx2x_bits_en(bp,
+ NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+ mask);
+
+ DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port,
+ (params->switch_cfg == SWITCH_CFG_10G),
+ REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
+ DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n",
+ REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
+ REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
+ REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c));
+ DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
+ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
+ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
+}
+
+static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
+ u8 exp_mi_int)
+{
+ u32 latch_status = 0;
+
+ /*
+ * Disable the MI INT ( external phy int ) by writing 1 to the
+ * status register. Link down indication is high-active-signal,
+ * so in this case we need to write the status to clear the XOR
+ */
+ /* Read Latched signals */
+ latch_status = REG_RD(bp,
+ NIG_REG_LATCH_STATUS_0 + port*8);
+ DP(NETIF_MSG_LINK, "latch_status = 0x%x\n", latch_status);
+ /* Handle only those with latched-signal=up.*/
+ if (exp_mi_int)
+ bnx2x_bits_en(bp,
+ NIG_REG_STATUS_INTERRUPT_PORT0
+ + port*4,
+ NIG_STATUS_EMAC0_MI_INT);
+ else
+ bnx2x_bits_dis(bp,
+ NIG_REG_STATUS_INTERRUPT_PORT0
+ + port*4,
+ NIG_STATUS_EMAC0_MI_INT);
+
+ if (latch_status & 1) {
+
+ /* For all latched-signal=up : Re-Arm Latch signals */
+ REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
+ (latch_status & 0xfffe) | (latch_status & 1));
+ }
+ /* For all latched-signal=up,Write original_signal to status */
+}
+
+static void bnx2x_link_int_ack(struct link_params *params,
+ struct link_vars *vars, u8 is_10g_plus)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 mask;
+ /*
+ * First reset all status we assume only one line will be
+ * change at a time
+ */
+ bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+ (NIG_STATUS_XGXS0_LINK10G |
+ NIG_STATUS_XGXS0_LINK_STATUS |
+ NIG_STATUS_SERDES0_LINK_STATUS));
+ if (vars->phy_link_up) {
+ if (USES_WARPCORE(bp))
+ mask = NIG_STATUS_XGXS0_LINK_STATUS;
+ else {
+ if (is_10g_plus)
+ mask = NIG_STATUS_XGXS0_LINK10G;
+ else if (params->switch_cfg == SWITCH_CFG_10G) {
+ /*
+ * Disable the link interrupt by writing 1 to
+ * the relevant lane in the status register
+ */
+ u32 ser_lane =
+ ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ mask = ((1 << ser_lane) <<
+ NIG_STATUS_XGXS0_LINK_STATUS_SIZE);
+ } else
+ mask = NIG_STATUS_SERDES0_LINK_STATUS;
+ }
+ DP(NETIF_MSG_LINK, "Ack link up interrupt with mask 0x%x\n",
+ mask);
+ bnx2x_bits_en(bp,
+ NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+ mask);
+ }
+}
+
+static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
+{
+ u8 *str_ptr = str;
+ u32 mask = 0xf0000000;
+ u8 shift = 8*4;
+ u8 digit;
+ u8 remove_leading_zeros = 1;
+ if (*len < 10) {
+ /* Need more than 10chars for this format */
+ *str_ptr = '\0';
+ (*len)--;
+ return -EINVAL;
+ }
+ while (shift > 0) {
+
+ shift -= 4;
+ digit = ((num & mask) >> shift);
+ if (digit == 0 && remove_leading_zeros) {
+ mask = mask >> 4;
+ continue;
+ } else if (digit < 0xa)
+ *str_ptr = digit + '0';
+ else
+ *str_ptr = digit - 0xa + 'a';
+ remove_leading_zeros = 0;
+ str_ptr++;
+ (*len)--;
+ mask = mask >> 4;
+ if (shift == 4*4) {
+ *str_ptr = '.';
+ str_ptr++;
+ (*len)--;
+ remove_leading_zeros = 1;
+ }
+ }
+ return 0;
+}
+
+
+static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+{
+ str[0] = '\0';
+ (*len)--;
+ return 0;
+}
+
+int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
+ u8 *version, u16 len)
+{
+ struct bnx2x *bp;
+ u32 spirom_ver = 0;
+ int status = 0;
+ u8 *ver_p = version;
+ u16 remain_len = len;
+ if (version == NULL || params == NULL)
+ return -EINVAL;
+ bp = params->bp;
+
+ /* Extract first external phy*/
+ version[0] = '\0';
+ spirom_ver = REG_RD(bp, params->phy[EXT_PHY1].ver_addr);
+
+ if (params->phy[EXT_PHY1].format_fw_ver) {
+ status |= params->phy[EXT_PHY1].format_fw_ver(spirom_ver,
+ ver_p,
+ &remain_len);
+ ver_p += (len - remain_len);
+ }
+ if ((params->num_phys == MAX_PHYS) &&
+ (params->phy[EXT_PHY2].ver_addr != 0)) {
+ spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr);
+ if (params->phy[EXT_PHY2].format_fw_ver) {
+ *ver_p = '/';
+ ver_p++;
+ remain_len--;
+ status |= params->phy[EXT_PHY2].format_fw_ver(
+ spirom_ver,
+ ver_p,
+ &remain_len);
+ ver_p = version + (len - remain_len);
+ }
+ }
+ *ver_p = '\0';
+ return status;
+}
+
+static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+
+ if (phy->req_line_speed != SPEED_1000) {
+ u32 md_devad = 0;
+
+ DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
+
+ if (!CHIP_IS_E3(bp)) {
+ /* change the uni_phy_addr in the nig */
+ md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
+ port*0x18));
+
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
+ 0x5);
+ }
+
+ bnx2x_cl45_write(bp, phy,
+ 5,
+ (MDIO_REG_BANK_AER_BLOCK +
+ (MDIO_AER_BLOCK_AER_REG & 0xf)),
+ 0x2800);
+
+ bnx2x_cl45_write(bp, phy,
+ 5,
+ (MDIO_REG_BANK_CL73_IEEEB0 +
+ (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
+ 0x6041);
+ msleep(200);
+ /* set aer mmd back */
+ bnx2x_set_aer_mmd(params, phy);
+
+ if (!CHIP_IS_E3(bp)) {
+ /* and md_devad */
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
+ md_devad);
+ }
+ } else {
+ u16 mii_ctrl;
+ DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
+ bnx2x_cl45_read(bp, phy, 5,
+ (MDIO_REG_BANK_COMBO_IEEE0 +
+ (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
+ &mii_ctrl);
+ bnx2x_cl45_write(bp, phy, 5,
+ (MDIO_REG_BANK_COMBO_IEEE0 +
+ (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
+ mii_ctrl |
+ MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK);
+ }
+}
+
+int bnx2x_set_led(struct link_params *params,
+ struct link_vars *vars, u8 mode, u32 speed)
+{
+ u8 port = params->port;
+ u16 hw_led_mode = params->hw_led_mode;
+ int rc = 0;
+ u8 phy_idx;
+ u32 tmp;
+ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
+ DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
+ speed, hw_led_mode);
+ /* In case */
+ for (phy_idx = EXT_PHY1; phy_idx < MAX_PHYS; phy_idx++) {
+ if (params->phy[phy_idx].set_link_led) {
+ params->phy[phy_idx].set_link_led(
+ &params->phy[phy_idx], params, mode);
+ }
+ }
+
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
+ SHARED_HW_CFG_LED_MAC1);
+
+ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+ EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
+ break;
+
+ case LED_MODE_OPER:
+ /*
+ * For all other phys, OPER mode is same as ON, so in case
+ * link is down, do nothing
+ */
+ if (!vars->link_up)
+ break;
+ case LED_MODE_ON:
+ if (((params->phy[EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
+ (params->phy[EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) &&
+ CHIP_IS_E2(bp) && params->num_phys == 2) {
+ /*
+ * This is a work-around for E2+8727 Configurations
+ */
+ if (mode == LED_MODE_ON ||
+ speed == SPEED_10000){
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+ REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+
+ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+ EMAC_WR(bp, EMAC_REG_EMAC_LED,
+ (tmp | EMAC_LED_OVERRIDE));
+ /*
+ * return here without enabling traffic
+ * LED blink and setting rate in ON mode.
+ * In oper mode, enabling LED blink
+ * and setting rate is needed.
+ */
+ if (mode == LED_MODE_ON)
+ return rc;
+ }
+ } else if (SINGLE_MEDIA_DIRECT(params)) {
+ /*
+ * This is a work-around for HW issue found when link
+ * is up in CL73
+ */
+ if ((!CHIP_IS_E3(bp)) ||
+ (CHIP_IS_E3(bp) &&
+ mode == LED_MODE_ON))
+ REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+
+ if (CHIP_IS_E1x(bp) ||
+ CHIP_IS_E2(bp) ||
+ (mode == LED_MODE_ON))
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+ else
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
+ hw_led_mode);
+ } else
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
+
+ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
+ /* Set blinking rate to ~15.9Hz */
+ REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
+ LED_BLINK_RATE_VAL);
+ REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
+ port*4, 1);
+ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+ EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE)));
+
+ if (CHIP_IS_E1(bp) &&
+ ((speed == SPEED_2500) ||
+ (speed == SPEED_1000) ||
+ (speed == SPEED_100) ||
+ (speed == SPEED_10))) {
+ /*
+ * On Everest 1 Ax chip versions for speeds less than
+ * 10G LED scheme is different
+ */
+ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
+ + port*4, 1);
+ REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
+ port*4, 0);
+ REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
+ port*4, 1);
+ }
+ break;
+
+ default:
+ rc = -EINVAL;
+ DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n",
+ mode);
+ break;
+ }
+ return rc;
+
+}
+
+/*
+ * This function comes to reflect the actual link state read DIRECTLY from the
+ * HW
+ */
+int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
+ u8 is_serdes)
+{
+ struct bnx2x *bp = params->bp;
+ u16 gp_status = 0, phy_index = 0;
+ u8 ext_phy_link_up = 0, serdes_phy_type;
+ struct link_vars temp_vars;
+ struct bnx2x_phy *int_phy = &params->phy[INT_PHY];
+
+ if (CHIP_IS_E3(bp)) {
+ u16 link_up;
+ if (params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)]
+ > SPEED_10000) {
+ /* Check 20G link */
+ bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
+ 1, &link_up);
+ bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
+ 1, &link_up);
+ link_up &= (1<<2);
+ } else {
+ /* Check 10G link and below*/
+ u8 lane = bnx2x_get_warpcore_lane(int_phy, params);
+ bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_1,
+ &gp_status);
+ gp_status = ((gp_status >> 8) & 0xf) |
+ ((gp_status >> 12) & 0xf);
+ link_up = gp_status & (1 << lane);
+ }
+ if (!link_up)
+ return -ESRCH;
+ } else {
+ CL22_RD_OVER_CL45(bp, int_phy,
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
+ /* link is up only if both local phy and external phy are up */
+ if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
+ return -ESRCH;
+ }
+ /* In XGXS loopback mode, do not check external PHY */
+ if (params->loopback_mode == LOOPBACK_XGXS)
+ return 0;
+
+ switch (params->num_phys) {
+ case 1:
+ /* No external PHY */
+ return 0;
+ case 2:
+ ext_phy_link_up = params->phy[EXT_PHY1].read_status(
+ &params->phy[EXT_PHY1],
+ params, &temp_vars);
+ break;
+ case 3: /* Dual Media */
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ serdes_phy_type = ((params->phy[phy_index].media_type ==
+ ETH_PHY_SFP_FIBER) ||
+ (params->phy[phy_index].media_type ==
+ ETH_PHY_XFP_FIBER) ||
+ (params->phy[phy_index].media_type ==
+ ETH_PHY_DA_TWINAX));
+
+ if (is_serdes != serdes_phy_type)
+ continue;
+ if (params->phy[phy_index].read_status) {
+ ext_phy_link_up |=
+ params->phy[phy_index].read_status(
+ &params->phy[phy_index],
+ params, &temp_vars);
+ }
+ }
+ break;
+ }
+ if (ext_phy_link_up)
+ return 0;
+ return -ESRCH;
+}
+
+static int bnx2x_link_initialize(struct link_params *params,
+ struct link_vars *vars)
+{
+ int rc = 0;
+ u8 phy_index, non_ext_phy;
+ struct bnx2x *bp = params->bp;
+ /*
+ * In case of external phy existence, the line speed would be the
+ * line speed linked up by the external phy. In case it is direct
+ * only, then the line_speed during initialization will be
+ * equal to the req_line_speed
+ */
+ vars->line_speed = params->phy[INT_PHY].req_line_speed;
+
+ /*
+ * Initialize the internal phy in case this is a direct board
+ * (no external phys), or this board has external phy which requires
+ * to first.
+ */
+ if (!USES_WARPCORE(bp))
+ bnx2x_prepare_xgxs(&params->phy[INT_PHY], params, vars);
+ /* init ext phy and enable link state int */
+ non_ext_phy = (SINGLE_MEDIA_DIRECT(params) ||
+ (params->loopback_mode == LOOPBACK_XGXS));
+
+ if (non_ext_phy ||
+ (params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) ||
+ (params->loopback_mode == LOOPBACK_EXT_PHY)) {
+ struct bnx2x_phy *phy = &params->phy[INT_PHY];
+ if (vars->line_speed == SPEED_AUTO_NEG &&
+ (CHIP_IS_E1x(bp) ||
+ CHIP_IS_E2(bp)))
+ bnx2x_set_parallel_detection(phy, params);
+ if (params->phy[INT_PHY].config_init)
+ params->phy[INT_PHY].config_init(phy,
+ params,
+ vars);
+ }
+
+ /* Init external phy*/
+ if (non_ext_phy) {
+ if (params->phy[INT_PHY].supported &
+ SUPPORTED_FIBRE)
+ vars->link_status |= LINK_STATUS_SERDES_LINK;
+ } else {
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ /*
+ * No need to initialize second phy in case of first
+ * phy only selection. In case of second phy, we do
+ * need to initialize the first phy, since they are
+ * connected.
+ */
+ if (params->phy[phy_index].supported &
+ SUPPORTED_FIBRE)
+ vars->link_status |= LINK_STATUS_SERDES_LINK;
+
+ if (phy_index == EXT_PHY2 &&
+ (bnx2x_phy_selection(params) ==
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
+ DP(NETIF_MSG_LINK,
+ "Not initializing second phy\n");
+ continue;
+ }
+ params->phy[phy_index].config_init(
+ &params->phy[phy_index],
+ params, vars);
+ }
+ }
+ /* Reset the interrupt indication after phy was initialized */
+ bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 +
+ params->port*4,
+ (NIG_STATUS_XGXS0_LINK10G |
+ NIG_STATUS_XGXS0_LINK_STATUS |
+ NIG_STATUS_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+ bnx2x_update_mng(params, vars->link_status);
+ return rc;
+}
+
+static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ /* reset the SerDes/XGXS */
+ REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
+ (0x1ff << (params->port*16)));
+}
+
+static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 gpio_port;
+ /* HW reset */
+ if (CHIP_IS_E2(bp))
+ gpio_port = BP_PATH(bp);
+ else
+ gpio_port = params->port;
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
+ DP(NETIF_MSG_LINK, "reset external PHY\n");
+}
+
+static int bnx2x_update_link_down(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+
+ DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
+ bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
+ vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG;
+ /* indicate no mac active */
+ vars->mac_type = MAC_TYPE_NONE;
+
+ /* update shared memory */
+ vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK |
+ LINK_STATUS_LINK_UP |
+ LINK_STATUS_PHYSICAL_LINK_FLAG |
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE |
+ LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK |
+ LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK |
+ LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK);
+ vars->line_speed = 0;
+ bnx2x_update_mng(params, vars->link_status);
+
+ /* activate nig drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+
+ /* disable emac */
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+
+ msleep(10);
+ /* reset BigMac/Xmac */
+ if (CHIP_IS_E1x(bp) ||
+ CHIP_IS_E2(bp)) {
+ bnx2x_bmac_rx_disable(bp, params->port);
+ REG_WR(bp, GRCBASE_MISC +
+ MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ }
+ if (CHIP_IS_E3(bp))
+ bnx2x_xmac_disable(params);
+
+ return 0;
+}
+
+static int bnx2x_update_link_up(struct link_params *params,
+ struct link_vars *vars,
+ u8 link_10g)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ int rc = 0;
+
+ vars->link_status |= (LINK_STATUS_LINK_UP |
+ LINK_STATUS_PHYSICAL_LINK_FLAG);
+ vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
+
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ vars->link_status |=
+ LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
+
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ vars->link_status |=
+ LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
+ if (USES_WARPCORE(bp)) {
+ if (link_10g) {
+ if (bnx2x_xmac_enable(params, vars, 0) ==
+ -ESRCH) {
+ DP(NETIF_MSG_LINK, "Found errors on XMAC\n");
+ vars->link_up = 0;
+ vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ vars->link_status &= ~LINK_STATUS_LINK_UP;
+ }
+ } else
+ bnx2x_umac_enable(params, vars, 0);
+ bnx2x_set_led(params, vars,
+ LED_MODE_OPER, vars->line_speed);
+ }
+ if ((CHIP_IS_E1x(bp) ||
+ CHIP_IS_E2(bp))) {
+ if (link_10g) {
+ if (bnx2x_bmac_enable(params, vars, 0) ==
+ -ESRCH) {
+ DP(NETIF_MSG_LINK, "Found errors on BMAC\n");
+ vars->link_up = 0;
+ vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ vars->link_status &= ~LINK_STATUS_LINK_UP;
+ }
+
+ bnx2x_set_led(params, vars,
+ LED_MODE_OPER, SPEED_10000);
+ } else {
+ rc = bnx2x_emac_program(params, vars);
+ bnx2x_emac_enable(params, vars, 0);
+
+ /* AN complete? */
+ if ((vars->link_status &
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
+ && (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
+ SINGLE_MEDIA_DIRECT(params))
+ bnx2x_set_gmii_tx_driver(params);
+ }
+ }
+
+ /* PBF - link up */
+ if (CHIP_IS_E1x(bp))
+ rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
+ vars->line_speed);
+
+ /* disable drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
+
+ /* update shared memory */
+ bnx2x_update_mng(params, vars->link_status);
+ msleep(20);
+ return rc;
+}
+/*
+ * The bnx2x_link_update function should be called upon link
+ * interrupt.
+ * Link is considered up as follows:
+ * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs
+ * to be up
+ * - SINGLE_MEDIA - The link between the 577xx and the external
+ * phy (XGXS) need to up as well as the external link of the
+ * phy (PHY_EXT1)
+ * - DUAL_MEDIA - The link between the 577xx and the first
+ * external phy needs to be up, and at least one of the 2
+ * external phy link must be up.
+ */
+int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ struct link_vars phy_vars[MAX_PHYS];
+ u8 port = params->port;
+ u8 link_10g_plus, phy_index;
+ u8 ext_phy_link_up = 0, cur_link_up;
+ int rc = 0;
+ u8 is_mi_int = 0;
+ u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
+ u8 active_external_phy = INT_PHY;
+ vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
+ for (phy_index = INT_PHY; phy_index < params->num_phys;
+ phy_index++) {
+ phy_vars[phy_index].flow_ctrl = 0;
+ phy_vars[phy_index].link_status = 0;
+ phy_vars[phy_index].line_speed = 0;
+ phy_vars[phy_index].duplex = DUPLEX_FULL;
+ phy_vars[phy_index].phy_link_up = 0;
+ phy_vars[phy_index].link_up = 0;
+ phy_vars[phy_index].fault_detected = 0;
+ }
+
+ if (USES_WARPCORE(bp))
+ bnx2x_set_aer_mmd(params, &params->phy[INT_PHY]);
+
+ DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
+ port, (vars->phy_flags & PHY_XGXS_FLAG),
+ REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
+
+ is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
+ port*0x18) > 0);
+ DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
+ REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
+ is_mi_int,
+ REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
+
+ DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
+ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
+ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
+
+ /* disable emac */
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+
+ /*
+ * Step 1:
+ * Check external link change only for external phys, and apply
+ * priority selection between them in case the link on both phys
+ * is up. Note that instead of the common vars, a temporary
+ * vars argument is used since each phy may have different link/
+ * speed/duplex result
+ */
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ struct bnx2x_phy *phy = &params->phy[phy_index];
+ if (!phy->read_status)
+ continue;
+ /* Read link status and params of this ext phy */
+ cur_link_up = phy->read_status(phy, params,
+ &phy_vars[phy_index]);
+ if (cur_link_up) {
+ DP(NETIF_MSG_LINK, "phy in index %d link is up\n",
+ phy_index);
+ } else {
+ DP(NETIF_MSG_LINK, "phy in index %d link is down\n",
+ phy_index);
+ continue;
+ }
+
+ if (!ext_phy_link_up) {
+ ext_phy_link_up = 1;
+ active_external_phy = phy_index;
+ } else {
+ switch (bnx2x_phy_selection(params)) {
+ case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ /*
+ * In this option, the first PHY makes sure to pass the
+ * traffic through itself only.
+ * Its not clear how to reset the link on the second phy
+ */
+ active_external_phy = EXT_PHY1;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ /*
+ * In this option, the first PHY makes sure to pass the
+ * traffic through the second PHY.
+ */
+ active_external_phy = EXT_PHY2;
+ break;
+ default:
+ /*
+ * Link indication on both PHYs with the following cases
+ * is invalid:
+ * - FIRST_PHY means that second phy wasn't initialized,
+ * hence its link is expected to be down
+ * - SECOND_PHY means that first phy should not be able
+ * to link up by itself (using configuration)
+ * - DEFAULT should be overriden during initialiazation
+ */
+ DP(NETIF_MSG_LINK, "Invalid link indication"
+ "mpc=0x%x. DISABLING LINK !!!\n",
+ params->multi_phy_config);
+ ext_phy_link_up = 0;
+ break;
+ }
+ }
+ }
+ prev_line_speed = vars->line_speed;
+ /*
+ * Step 2:
+ * Read the status of the internal phy. In case of
+ * DIRECT_SINGLE_MEDIA board, this link is the external link,
+ * otherwise this is the link between the 577xx and the first
+ * external phy
+ */
+ if (params->phy[INT_PHY].read_status)
+ params->phy[INT_PHY].read_status(
+ &params->phy[INT_PHY],
+ params, vars);
+ /*
+ * The INT_PHY flow control reside in the vars. This include the
+ * case where the speed or flow control are not set to AUTO.
+ * Otherwise, the active external phy flow control result is set
+ * to the vars. The ext_phy_line_speed is needed to check if the
+ * speed is different between the internal phy and external phy.
+ * This case may be result of intermediate link speed change.
+ */
+ if (active_external_phy > INT_PHY) {
+ vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
+ /*
+ * Link speed is taken from the XGXS. AN and FC result from
+ * the external phy.
+ */
+ vars->link_status |= phy_vars[active_external_phy].link_status;
+
+ /*
+ * if active_external_phy is first PHY and link is up - disable
+ * disable TX on second external PHY
+ */
+ if (active_external_phy == EXT_PHY1) {
+ if (params->phy[EXT_PHY2].phy_specific_func) {
+ DP(NETIF_MSG_LINK,
+ "Disabling TX on EXT_PHY2\n");
+ params->phy[EXT_PHY2].phy_specific_func(
+ &params->phy[EXT_PHY2],
+ params, DISABLE_TX);
+ }
+ }
+
+ ext_phy_line_speed = phy_vars[active_external_phy].line_speed;
+ vars->duplex = phy_vars[active_external_phy].duplex;
+ if (params->phy[active_external_phy].supported &
+ SUPPORTED_FIBRE)
+ vars->link_status |= LINK_STATUS_SERDES_LINK;
+ else
+ vars->link_status &= ~LINK_STATUS_SERDES_LINK;
+ DP(NETIF_MSG_LINK, "Active external phy selected: %x\n",
+ active_external_phy);
+ }
+
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ if (params->phy[phy_index].flags &
+ FLAGS_REARM_LATCH_SIGNAL) {
+ bnx2x_rearm_latch_signal(bp, port,
+ phy_index ==
+ active_external_phy);
+ break;
+ }
+ }
+ DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
+ " ext_phy_line_speed = %d\n", vars->flow_ctrl,
+ vars->link_status, ext_phy_line_speed);
+ /*
+ * Upon link speed change set the NIG into drain mode. Comes to
+ * deals with possible FIFO glitch due to clk change when speed
+ * is decreased without link down indicator
+ */
+
+ if (vars->phy_link_up) {
+ if (!(SINGLE_MEDIA_DIRECT(params)) && ext_phy_link_up &&
+ (ext_phy_line_speed != vars->line_speed)) {
+ DP(NETIF_MSG_LINK, "Internal link speed %d is"
+ " different than the external"
+ " link speed %d\n", vars->line_speed,
+ ext_phy_line_speed);
+ vars->phy_link_up = 0;
+ } else if (prev_line_speed != vars->line_speed) {
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
+ 0);
+ msleep(1);
+ }
+ }
+
+ /* anything 10 and over uses the bmac */
+ link_10g_plus = (vars->line_speed >= SPEED_10000);
+
+ bnx2x_link_int_ack(params, vars, link_10g_plus);
+
+ /*
+ * In case external phy link is up, and internal link is down
+ * (not initialized yet probably after link initialization, it
+ * needs to be initialized.
+ * Note that after link down-up as result of cable plug, the xgxs
+ * link would probably become up again without the need
+ * initialize it
+ */
+ if (!(SINGLE_MEDIA_DIRECT(params))) {
+ DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
+ " init_preceding = %d\n", ext_phy_link_up,
+ vars->phy_link_up,
+ params->phy[EXT_PHY1].flags &
+ FLAGS_INIT_XGXS_FIRST);
+ if (!(params->phy[EXT_PHY1].flags &
+ FLAGS_INIT_XGXS_FIRST)
+ && ext_phy_link_up && !vars->phy_link_up) {
+ vars->line_speed = ext_phy_line_speed;
+ if (vars->line_speed < SPEED_1000)
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ else
+ vars->phy_flags &= ~PHY_SGMII_FLAG;
+
+ if (params->phy[INT_PHY].config_init)
+ params->phy[INT_PHY].config_init(
+ &params->phy[INT_PHY], params,
+ vars);
+ }
+ }
+ /*
+ * Link is up only if both local phy and external phy (in case of
+ * non-direct board) are up and no fault detected on active PHY.
+ */
+ vars->link_up = (vars->phy_link_up &&
+ (ext_phy_link_up ||
+ SINGLE_MEDIA_DIRECT(params)) &&
+ (phy_vars[active_external_phy].fault_detected == 0));
+
+ if (vars->link_up)
+ rc = bnx2x_update_link_up(params, vars, link_10g_plus);
+ else
+ rc = bnx2x_update_link_down(params, vars);
+
+ return rc;
+}
+
+
+/*****************************************************************************/
+/* External Phy section */
+/*****************************************************************************/
+void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
+{
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ msleep(1);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+}
+
+static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
+ u32 spirom_ver, u32 ver_addr)
+{
+ DP(NETIF_MSG_LINK, "FW version 0x%x:0x%x for port %d\n",
+ (u16)(spirom_ver>>16), (u16)spirom_ver, port);
+
+ if (ver_addr)
+ REG_WR(bp, ver_addr, spirom_ver);
+}
+
+static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 port)
+{
+ u16 fw_ver1, fw_ver2;
+
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2, &fw_ver2);
+ bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
+ phy->ver_addr);
+}
+
+static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ struct link_vars *vars)
+{
+ u16 val;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_STATUS, &val);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_STATUS, &val);
+ if (val & (1<<5))
+ vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+ if ((val & (1<<0)) == 0)
+ vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED;
+}
+
+/******************************************************************/
+/* common BCM8073/BCM8727 PHY SECTION */
+/******************************************************************/
+static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ if (phy->req_line_speed == SPEED_10 ||
+ phy->req_line_speed == SPEED_100) {
+ vars->flow_ctrl = phy->req_flow_ctrl;
+ return;
+ }
+
+ if (bnx2x_ext_phy_resolve_fc(phy, params, vars) &&
+ (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE)) {
+ u16 pause_result;
+ u16 ld_pause; /* local */
+ u16 lp_pause; /* link partner */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LD, &ld_pause);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LP, &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
+ pause_result |= (lp_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
+
+ bnx2x_pause_resolve(vars, pause_result);
+ DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
+ pause_result);
+ }
+}
+static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 port)
+{
+ u32 count = 0;
+ u16 fw_ver1, fw_msgout;
+ int rc = 0;
+
+ /* Boot port from external ROM */
+ /* EDC grst */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ 0x0001);
+
+ /* ucode reboot and rst */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ 0x008c);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+
+ /* Reset internal microprocessor */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+
+ /* Release srst bit */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+
+ /* Delay 100ms per the PHY specifications */
+ msleep(100);
+
+ /* 8073 sometimes taking longer to download */
+ do {
+ count++;
+ if (count > 300) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_8073_8727_external_rom_boot port %x:"
+ "Download failed. fw version = 0x%x\n",
+ port, fw_ver1);
+ rc = -EINVAL;
+ break;
+ }
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
+
+ msleep(1);
+ } while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
+ ((fw_msgout & 0xff) != 0x03 && (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
+
+ /* Clear ser_boot_ctl bit */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+ bnx2x_save_bcm_spirom_ver(bp, phy, port);
+
+ DP(NETIF_MSG_LINK,
+ "bnx2x_8073_8727_external_rom_boot port %x:"
+ "Download complete. fw version = 0x%x\n",
+ port, fw_ver1);
+
+ return rc;
+}
+
+/******************************************************************/
+/* BCM8073 PHY SECTION */
+/******************************************************************/
+static int bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+ /* This is only required for 8073A1, version 102 only */
+ u16 val;
+
+ /* Read 8073 HW revision*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_CHIP_REV, &val);
+
+ if (val != 1) {
+ /* No need to workaround in 8073 A1 */
+ return 0;
+ }
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2, &val);
+
+ /* SNR should be applied only for version 0x102 */
+ if (val != 0x102)
+ return 0;
+
+ return 1;
+}
+
+static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+ u16 val, cnt, cnt1 ;
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_CHIP_REV, &val);
+
+ if (val > 0) {
+ /* No need to workaround in 8073 A1 */
+ return 0;
+ }
+ /* XAUI workaround in 8073 A0: */
+
+ /*
+ * After loading the boot ROM and restarting Autoneg, poll
+ * Dev1, Reg $C820:
+ */
+
+ for (cnt = 0; cnt < 1000; cnt++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
+ &val);
+ /*
+ * If bit [14] = 0 or bit [13] = 0, continue on with
+ * system initialization (XAUI work-around not required, as
+ * these bits indicate 2.5G or 1G link up).
+ */
+ if (!(val & (1<<14)) || !(val & (1<<13))) {
+ DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
+ return 0;
+ } else if (!(val & (1<<15))) {
+ DP(NETIF_MSG_LINK, "bit 15 went off\n");
+ /*
+ * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
+ * MSB (bit15) goes to 1 (indicating that the XAUI
+ * workaround has completed), then continue on with
+ * system initialization.
+ */
+ for (cnt1 = 0; cnt1 < 1000; cnt1++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_XAUI_WA, &val);
+ if (val & (1<<15)) {
+ DP(NETIF_MSG_LINK,
+ "XAUI workaround has completed\n");
+ return 0;
+ }
+ msleep(3);
+ }
+ break;
+ }
+ msleep(3);
+ }
+ DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n");
+ return -EINVAL;
+}
+
+static void bnx2x_807x_force_10G(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+ /* Force KR or KX */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0x000b);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0000);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
+}
+
+static void bnx2x_8073_set_pause_cl37(struct link_params *params,
+ struct bnx2x_phy *phy,
+ struct link_vars *vars)
+{
+ u16 cl37_val;
+ struct bnx2x *bp = params->bp;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &cl37_val);
+
+ cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
+ }
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ }
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ }
+ DP(NETIF_MSG_LINK,
+ "Ext phy AN advertize cl37 0x%x\n", cl37_val);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, cl37_val);
+ msleep(500);
+}
+
+static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val = 0, tmp1;
+ u8 gpio_port;
+ DP(NETIF_MSG_LINK, "Init 8073\n");
+
+ if (CHIP_IS_E2(bp))
+ gpio_port = BP_PATH(bp);
+ else
+ gpio_port = params->port;
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+
+ /* enable LASI */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004);
+
+ bnx2x_8073_set_pause_cl37(params, phy, vars);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
+
+ DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
+
+ /* Swap polarity if required - Must be done only in non-1G mode */
+ if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+ /* Configure the 8073 to swap _P and _N of the KR lines */
+ DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n");
+ /* 10G Rx/Tx and 1G Tx signal polarity swap */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL,
+ (val | (3<<9)));
+ }
+
+
+ /* Enable CL37 BAM */
+ if (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, &val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, val | 1);
+ DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
+ }
+ if (params->loopback_mode == LOOPBACK_EXT) {
+ bnx2x_807x_force_10G(bp, phy);
+ DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
+ return 0;
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002);
+ }
+ if (phy->req_line_speed != SPEED_AUTO_NEG) {
+ if (phy->req_line_speed == SPEED_10000) {
+ val = (1<<7);
+ } else if (phy->req_line_speed == SPEED_2500) {
+ val = (1<<5);
+ /*
+ * Note that 2.5G works only when used with 1G
+ * advertisement
+ */
+ } else
+ val = (1<<5);
+ } else {
+ val = 0;
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ val |= (1<<7);
+
+ /* Note that 2.5G works only when used with 1G advertisement */
+ if (phy->speed_cap_mask &
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
+ val |= (1<<5);
+ DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val);
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1);
+
+ if (((phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
+ (phy->req_line_speed == SPEED_AUTO_NEG)) ||
+ (phy->req_line_speed == SPEED_2500)) {
+ u16 phy_ver;
+ /* Allow 2.5G for A1 and above */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV,
+ &phy_ver);
+ DP(NETIF_MSG_LINK, "Add 2.5G\n");
+ if (phy_ver > 0)
+ tmp1 |= 1;
+ else
+ tmp1 &= 0xfffe;
+ } else {
+ DP(NETIF_MSG_LINK, "Disable 2.5G\n");
+ tmp1 &= 0xfffe;
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1);
+ /* Add support for CL37 (passive mode) II */
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD,
+ (tmp1 | ((phy->req_duplex == DUPLEX_FULL) ?
+ 0x20 : 0x40)));
+
+ /* Add support for CL37 (passive mode) III */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+
+ /*
+ * The SNR will improve about 2db by changing BW and FEE main
+ * tap. Rest commands are executed after link is up
+ * Change FFE main cursor to 5 in EDC register
+ */
+ if (bnx2x_8073_is_snr_needed(bp, phy))
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
+ 0xFB0C);
+
+ /* Enable FEC (Forware Error Correction) Request in the AN */
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1);
+ tmp1 |= (1<<15);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1);
+
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+
+ /* Restart autoneg */
+ msleep(500);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+ DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n",
+ ((val & (1<<5)) > 0), ((val & (1<<7)) > 0));
+ return 0;
+}
+
+static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 link_up = 0;
+ u16 val1, val2;
+ u16 link_status = 0;
+ u16 an1000_status = 0;
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+
+ DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
+
+ /* clear the interrupt LASI status register */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", val2, val1);
+ /* Clear MSG-OUT */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
+
+ /* Check the LASI */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
+
+ DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
+
+ /* Check the link status */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
+ DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+ link_up = ((val1 & 4) == 4);
+ DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
+
+ if (link_up &&
+ ((phy->req_line_speed != SPEED_10000))) {
+ if (bnx2x_8073_xaui_wa(bp, phy) != 0)
+ return 0;
+ }
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
+
+ /* Check the link status on 1.1.2 */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x,"
+ "an_link_status=0x%x\n", val2, val1, an1000_status);
+
+ link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
+ if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
+ /*
+ * The SNR will improve about 2dbby changing the BW and FEE main
+ * tap. The 1st write to change FFE main tap is set before
+ * restart AN. Change PLL Bandwidth in EDC register
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
+ 0x26BC);
+
+ /* Change CDR Bandwidth in EDC register */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CDR_BANDWIDTH,
+ 0x0333);
+ }
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
+ &link_status);
+
+ /* Bits 0..2 --> speed detected, bits 13..15--> link is down */
+ if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_10000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
+ params->port);
+ } else if ((link_status & (1<<1)) && (!(link_status & (1<<14)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_2500;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 2.5G\n",
+ params->port);
+ } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_1000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
+ params->port);
+ } else {
+ link_up = 0;
+ DP(NETIF_MSG_LINK, "port %x: External link is down\n",
+ params->port);
+ }
+
+ if (link_up) {
+ /* Swap polarity if required */
+ if (params->lane_config &
+ PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+ /* Configure the 8073 to swap P and N of the KR lines */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_XS_DEVAD,
+ MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
+ /*
+ * Set bit 3 to invert Rx in 1G mode and clear this bit
+ * when it`s in 10G mode.
+ */
+ if (vars->line_speed == SPEED_1000) {
+ DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
+ "the 8073\n");
+ val1 |= (1<<3);
+ } else
+ val1 &= ~(1<<3);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_XS_DEVAD,
+ MDIO_XS_REG_8073_RX_CTRL_PCIE,
+ val1);
+ }
+ bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
+ bnx2x_8073_resolve_fc(phy, params, vars);
+ vars->duplex = DUPLEX_FULL;
+ }
+ return link_up;
+}
+
+static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 gpio_port;
+ if (CHIP_IS_E2(bp))
+ gpio_port = BP_PATH(bp);
+ else
+ gpio_port = params->port;
+ DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
+ gpio_port);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
+}
+
+/******************************************************************/
+/* BCM8705 PHY SECTION */
+/******************************************************************/
+static int bnx2x_8705_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "init 8705\n");
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ /* HW reset */
+ bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 0x7fbf);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CMU_PLL_BYPASS, 0x0100);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1);
+ /* BCM8705 doesn't have microcode, hence the 0 */
+ bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0);
+ return 0;
+}
+
+static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u8 link_up = 0;
+ u16 val1, rx_sd;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "read status 8705\n");
+ bnx2x_cl45_read(bp, phy,
+ MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, 0xc809, &val1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, 0xc809, &val1);
+
+ DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1);
+ link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) && ((val1 & (1<<8)) == 0));
+ if (link_up) {
+ vars->line_speed = SPEED_10000;
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ }
+ return link_up;
+}
+
+/******************************************************************/
+/* SFP+ module Section */
+/******************************************************************/
+static void bnx2x_set_disable_pmd_transmit(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 pmd_dis)
+{
+ struct bnx2x *bp = params->bp;
+ /*
+ * Disable transmitter only for bootcodes which can enable it afterwards
+ * (for D3 link)
+ */
+ if (pmd_dis) {
+ if (params->feature_config_flags &
+ FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED)
+ DP(NETIF_MSG_LINK, "Disabling PMD transmitter\n");
+ else {
+ DP(NETIF_MSG_LINK, "NOT disabling PMD transmitter\n");
+ return;
+ }
+ } else
+ DP(NETIF_MSG_LINK, "Enabling PMD transmitter\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_DISABLE, pmd_dis);
+}
+
+static u8 bnx2x_get_gpio_port(struct link_params *params)
+{
+ u8 gpio_port;
+ u32 swap_val, swap_override;
+ struct bnx2x *bp = params->bp;
+ if (CHIP_IS_E2(bp))
+ gpio_port = BP_PATH(bp);
+ else
+ gpio_port = params->port;
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ return gpio_port ^ (swap_val && swap_override);
+}
+
+static void bnx2x_sfp_e1e2_set_transmitter(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 tx_en)
+{
+ u16 val;
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+ u32 tx_en_mode;
+
+ /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
+ tx_en_mode = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].sfp_ctrl)) &
+ PORT_HW_CFG_TX_LASER_MASK;
+ DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x "
+ "mode = %x\n", tx_en, port, tx_en_mode);
+ switch (tx_en_mode) {
+ case PORT_HW_CFG_TX_LASER_MDIO:
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ &val);
+
+ if (tx_en)
+ val &= ~(1<<15);
+ else
+ val |= (1<<15);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ val);
+ break;
+ case PORT_HW_CFG_TX_LASER_GPIO0:
+ case PORT_HW_CFG_TX_LASER_GPIO1:
+ case PORT_HW_CFG_TX_LASER_GPIO2:
+ case PORT_HW_CFG_TX_LASER_GPIO3:
+ {
+ u16 gpio_pin;
+ u8 gpio_port, gpio_mode;
+ if (tx_en)
+ gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
+ else
+ gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
+
+ gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
+ gpio_port = bnx2x_get_gpio_port(params);
+ bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+ break;
+ }
+ default:
+ DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode);
+ break;
+ }
+}
+
+static void bnx2x_sfp_set_transmitter(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 tx_en)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Setting SFP+ transmitter to %d\n", tx_en);
+ if (CHIP_IS_E3(bp))
+ bnx2x_sfp_e3_set_transmitter(params, phy, tx_en);
+ else
+ bnx2x_sfp_e1e2_set_transmitter(params, phy, tx_en);
+}
+
+static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 addr, u8 byte_cnt, u8 *o_buf)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val = 0;
+ u16 i;
+ if (byte_cnt > 16) {
+ DP(NETIF_MSG_LINK,
+ "Reading from eeprom is limited to 0xf\n");
+ return -EINVAL;
+ }
+ /* Set the read command byte count */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
+ (byte_cnt | 0xa000));
+
+ /* Set the read command address */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
+ addr);
+
+ /* Activate read command */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ 0x2c0f);
+
+ /* Wait up to 500us for command complete status */
+ for (i = 0; i < 100; i++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
+ break;
+ udelay(5);
+ }
+
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
+ DP(NETIF_MSG_LINK,
+ "Got bad status 0x%x when reading from SFP+ EEPROM\n",
+ (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
+ return -EINVAL;
+ }
+
+ /* Read the buffer */
+ for (i = 0; i < byte_cnt; i++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
+ o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
+ }
+
+ for (i = 0; i < 100; i++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
+ return 0;
+ msleep(1);
+ }
+ return -EINVAL;
+}
+
+static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 addr, u8 byte_cnt,
+ u8 *o_buf)
+{
+ int rc = 0;
+ u8 i, j = 0, cnt = 0;
+ u32 data_array[4];
+ u16 addr32;
+ struct bnx2x *bp = params->bp;
+ /*DP(NETIF_MSG_LINK, "bnx2x_direct_read_sfp_module_eeprom:"
+ " addr %d, cnt %d\n",
+ addr, byte_cnt);*/
+ if (byte_cnt > 16) {
+ DP(NETIF_MSG_LINK,
+ "Reading from eeprom is limited to 16 bytes\n");
+ return -EINVAL;
+ }
+
+ /* 4 byte aligned address */
+ addr32 = addr & (~0x3);
+ do {
+ rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
+ data_array);
+ } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
+
+ if (rc == 0) {
+ for (i = (addr - addr32); i < byte_cnt + (addr - addr32); i++) {
+ o_buf[j] = *((u8 *)data_array + i);
+ j++;
+ }
+ }
+
+ return rc;
+}
+
+static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 addr, u8 byte_cnt, u8 *o_buf)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val, i;
+
+ if (byte_cnt > 16) {
+ DP(NETIF_MSG_LINK,
+ "Reading from eeprom is limited to 0xf\n");
+ return -EINVAL;
+ }
+
+ /* Need to read from 1.8000 to clear it */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ &val);
+
+ /* Set the read command byte count */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
+ ((byte_cnt < 2) ? 2 : byte_cnt));
+
+ /* Set the read command address */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
+ addr);
+ /* Set the destination address */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ 0x8004,
+ MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
+
+ /* Activate read command */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ 0x8002);
+ /*
+ * Wait appropriate time for two-wire command to finish before
+ * polling the status register
+ */
+ msleep(1);
+
+ /* Wait up to 500us for command complete status */
+ for (i = 0; i < 100; i++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
+ break;
+ udelay(5);
+ }
+
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
+ DP(NETIF_MSG_LINK,
+ "Got bad status 0x%x when reading from SFP+ EEPROM\n",
+ (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
+ return -EFAULT;
+ }
+
+ /* Read the buffer */
+ for (i = 0; i < byte_cnt; i++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
+ o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
+ }
+
+ for (i = 0; i < 100; i++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+ MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
+ return 0;
+ msleep(1);
+ }
+
+ return -EINVAL;
+}
+
+int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u16 addr,
+ u8 byte_cnt, u8 *o_buf)
+{
+ int rc = -EINVAL;
+ switch (phy->type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+ rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
+ byte_cnt, o_buf);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+ rc = bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
+ byte_cnt, o_buf);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+ rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr,
+ byte_cnt, o_buf);
+ break;
+ }
+ return rc;
+}
+
+static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 *edc_mode)
+{
+ struct bnx2x *bp = params->bp;
+ u32 sync_offset = 0, phy_idx, media_types;
+ u8 val, check_limiting_mode = 0;
+ *edc_mode = EDC_MODE_LIMITING;
+
+ phy->media_type = ETH_PHY_UNSPECIFIED;
+ /* First check for copper cable */
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
+ SFP_EEPROM_CON_TYPE_ADDR,
+ 1,
+ &val) != 0) {
+ DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
+ return -EINVAL;
+ }
+
+ switch (val) {
+ case SFP_EEPROM_CON_TYPE_VAL_COPPER:
+ {
+ u8 copper_module_type;
+ phy->media_type = ETH_PHY_DA_TWINAX;
+ /*
+ * Check if its active cable (includes SFP+ module)
+ * of passive cable
+ */
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
+ SFP_EEPROM_FC_TX_TECH_ADDR,
+ 1,
+ &copper_module_type) != 0) {
+ DP(NETIF_MSG_LINK,
+ "Failed to read copper-cable-type"
+ " from SFP+ EEPROM\n");
+ return -EINVAL;
+ }
+
+ if (copper_module_type &
+ SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
+ DP(NETIF_MSG_LINK, "Active Copper cable detected\n");
+ check_limiting_mode = 1;
+ } else if (copper_module_type &
+ SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
+ DP(NETIF_MSG_LINK,
+ "Passive Copper cable detected\n");
+ *edc_mode =
+ EDC_MODE_PASSIVE_DAC;
+ } else {
+ DP(NETIF_MSG_LINK,
+ "Unknown copper-cable-type 0x%x !!!\n",
+ copper_module_type);
+ return -EINVAL;
+ }
+ break;
+ }
+ case SFP_EEPROM_CON_TYPE_VAL_LC:
+ phy->media_type = ETH_PHY_SFP_FIBER;
+ DP(NETIF_MSG_LINK, "Optic module detected\n");
+ check_limiting_mode = 1;
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
+ val);
+ return -EINVAL;
+ }
+ sync_offset = params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].media_type);
+ media_types = REG_RD(bp, sync_offset);
+ /* Update media type for non-PMF sync */
+ for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
+ if (&(params->phy[phy_idx]) == phy) {
+ media_types &= ~(PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
+ media_types |= ((phy->media_type &
+ PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
+ break;
+ }
+ }
+ REG_WR(bp, sync_offset, media_types);
+ if (check_limiting_mode) {
+ u8 options[SFP_EEPROM_OPTIONS_SIZE];
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
+ SFP_EEPROM_OPTIONS_ADDR,
+ SFP_EEPROM_OPTIONS_SIZE,
+ options) != 0) {
+ DP(NETIF_MSG_LINK,
+ "Failed to read Option field from module EEPROM\n");
+ return -EINVAL;
+ }
+ if ((options[0] & SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK))
+ *edc_mode = EDC_MODE_LINEAR;
+ else
+ *edc_mode = EDC_MODE_LIMITING;
+ }
+ DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
+ return 0;
+}
+/*
+ * This function read the relevant field from the module (SFP+), and verify it
+ * is compliant with this board
+ */
+static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u32 val, cmd;
+ u32 fw_resp, fw_cmd_param;
+ char vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE+1];
+ char vendor_pn[SFP_EEPROM_PART_NO_SIZE+1];
+ phy->flags &= ~FLAGS_SFP_NOT_APPROVED;
+ val = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].config));
+ if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT) {
+ DP(NETIF_MSG_LINK, "NOT enforcing module verification\n");
+ return 0;
+ }
+
+ if (params->feature_config_flags &
+ FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY) {
+ /* Use specific phy request */
+ cmd = DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL;
+ } else if (params->feature_config_flags &
+ FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) {
+ /* Use first phy request only in case of non-dual media*/
+ if (DUAL_MEDIA(params)) {
+ DP(NETIF_MSG_LINK,
+ "FW does not support OPT MDL verification\n");
+ return -EINVAL;
+ }
+ cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL;
+ } else {
+ /* No support in OPT MDL detection */
+ DP(NETIF_MSG_LINK,
+ "FW does not support OPT MDL verification\n");
+ return -EINVAL;
+ }
+
+ fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl);
+ fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param);
+ if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) {
+ DP(NETIF_MSG_LINK, "Approved module\n");
+ return 0;
+ }
+
+ /* format the warning message */
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
+ SFP_EEPROM_VENDOR_NAME_ADDR,
+ SFP_EEPROM_VENDOR_NAME_SIZE,
+ (u8 *)vendor_name))
+ vendor_name[0] = '\0';
+ else
+ vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
+ SFP_EEPROM_PART_NO_ADDR,
+ SFP_EEPROM_PART_NO_SIZE,
+ (u8 *)vendor_pn))
+ vendor_pn[0] = '\0';
+ else
+ vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
+
+ netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected,"
+ " Port %d from %s part number %s\n",
+ params->port, vendor_name, vendor_pn);
+ phy->flags |= FLAGS_SFP_NOT_APPROVED;
+ return -EINVAL;
+}
+
+static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
+ struct link_params *params)
+
+{
+ u8 val;
+ struct bnx2x *bp = params->bp;
+ u16 timeout;
+ /*
+ * Initialization time after hot-plug may take up to 300ms for
+ * some phys type ( e.g. JDSU )
+ */
+
+ for (timeout = 0; timeout < 60; timeout++) {
+ if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
+ == 0) {
+ DP(NETIF_MSG_LINK,
+ "SFP+ module initialization took %d ms\n",
+ timeout * 5);
+ return 0;
+ }
+ msleep(5);
+ }
+ return -EINVAL;
+}
+
+static void bnx2x_8727_power_module(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 is_power_up) {
+ /* Make sure GPIOs are not using for LED mode */
+ u16 val;
+ /*
+ * In the GPIO register, bit 4 is use to determine if the GPIOs are
+ * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
+ * output
+ * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0
+ * Bits 8-9 determine the GPIOs value for INPUT in case bit 4 val is 1
+ * where the 1st bit is the over-current(only input), and 2nd bit is
+ * for power( only output )
+ *
+ * In case of NOC feature is disabled and power is up, set GPIO control
+ * as input to enable listening of over-current indication
+ */
+ if (phy->flags & FLAGS_NOC)
+ return;
+ if (is_power_up)
+ val = (1<<4);
+ else
+ /*
+ * Set GPIO control to OUTPUT, and set the power bit
+ * to according to the is_power_up
+ */
+ val = (1<<1);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_GPIO_CTRL,
+ val);
+}
+
+static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 edc_mode)
+{
+ u16 cur_limiting_mode;
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ &cur_limiting_mode);
+ DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
+ cur_limiting_mode);
+
+ if (edc_mode == EDC_MODE_LIMITING) {
+ DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ EDC_MODE_LIMITING);
+ } else { /* LRM mode ( default )*/
+
+ DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
+
+ /*
+ * Changing to LRM mode takes quite few seconds. So do it only
+ * if current mode is limiting (default is LRM)
+ */
+ if (cur_limiting_mode != EDC_MODE_LIMITING)
+ return 0;
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_LRM_MODE,
+ 0);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ 0x128);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL0,
+ 0x4008);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_LRM_MODE,
+ 0xaaaa);
+ }
+ return 0;
+}
+
+static int bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 edc_mode)
+{
+ u16 phy_identifier;
+ u16 rom_ver2_val;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ &phy_identifier);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ (phy_identifier & ~(1<<9)));
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ &rom_ver2_val);
+ /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ (phy_identifier | (1<<9)));
+
+ return 0;
+}
+
+static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u32 action)
+{
+ struct bnx2x *bp = params->bp;
+
+ switch (action) {
+ case DISABLE_TX:
+ bnx2x_sfp_set_transmitter(params, phy, 0);
+ break;
+ case ENABLE_TX:
+ if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
+ bnx2x_sfp_set_transmitter(params, phy, 1);
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
+ action);
+ return;
+ }
+}
+
+static void bnx2x_set_e1e2_module_fault_led(struct link_params *params,
+ u8 gpio_mode)
+{
+ struct bnx2x *bp = params->bp;
+
+ u32 fault_led_gpio = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl)) &
+ PORT_HW_CFG_FAULT_MODULE_LED_MASK;
+ switch (fault_led_gpio) {
+ case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED:
+ return;
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3:
+ {
+ u8 gpio_port = bnx2x_get_gpio_port(params);
+ u16 gpio_pin = fault_led_gpio -
+ PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
+ DP(NETIF_MSG_LINK, "Set fault module-detected led "
+ "pin %x port %x mode %x\n",
+ gpio_pin, gpio_port, gpio_mode);
+ bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+ }
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n",
+ fault_led_gpio);
+ }
+}
+
+static void bnx2x_set_e3_module_fault_led(struct link_params *params,
+ u8 gpio_mode)
+{
+ u32 pin_cfg;
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+ pin_cfg = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_FAULT_MDL_LED_MASK) >>
+ PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT;
+ DP(NETIF_MSG_LINK, "Setting Fault LED to %d using pin cfg %d\n",
+ gpio_mode, pin_cfg);
+ bnx2x_set_cfg_pin(bp, pin_cfg, gpio_mode);
+}
+
+static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
+ u8 gpio_mode)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode);
+ if (CHIP_IS_E3(bp)) {
+ /*
+ * Low ==> if SFP+ module is supported otherwise
+ * High ==> if SFP+ module is not on the approved vendor list
+ */
+ bnx2x_set_e3_module_fault_led(params, gpio_mode);
+ } else
+ bnx2x_set_e1e2_module_fault_led(params, gpio_mode);
+}
+
+static void bnx2x_warpcore_power_module(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 power)
+{
+ u32 pin_cfg;
+ struct bnx2x *bp = params->bp;
+
+ pin_cfg = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_PWR_DIS_MASK) >>
+ PORT_HW_CFG_E3_PWR_DIS_SHIFT;
+
+ if (pin_cfg == PIN_CFG_NA)
+ return;
+ DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
+ power, pin_cfg);
+ /*
+ * Low ==> corresponding SFP+ module is powered
+ * high ==> the SFP+ module is powered down
+ */
+ bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
+}
+
+static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ bnx2x_warpcore_power_module(params, phy, 0);
+}
+
+static void bnx2x_power_sfp_module(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 power)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Setting SFP+ power to %x\n", power);
+
+ switch (phy->type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+ bnx2x_8727_power_module(params->bp, phy, power);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+ bnx2x_warpcore_power_module(params, phy, power);
+ break;
+ default:
+ break;
+ }
+}
+static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u16 edc_mode)
+{
+ u16 val = 0;
+ u16 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
+ struct bnx2x *bp = params->bp;
+
+ u8 lane = bnx2x_get_warpcore_lane(phy, params);
+ /* This is a global register which controls all lanes */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
+ val &= ~(0xf << (lane << 2));
+
+ switch (edc_mode) {
+ case EDC_MODE_LINEAR:
+ case EDC_MODE_LIMITING:
+ mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
+ break;
+ case EDC_MODE_PASSIVE_DAC:
+ mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC;
+ break;
+ default:
+ break;
+ }
+
+ val |= (mode << (lane << 2));
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, val);
+ /* A must read */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
+
+ /* Restart microcode to re-read the new mode */
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
+ bnx2x_warpcore_reset_lane(bp, phy, 0);
+
+}
+
+static void bnx2x_set_limiting_mode(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u16 edc_mode)
+{
+ switch (phy->type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+ bnx2x_8726_set_limiting_mode(params->bp, phy, edc_mode);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+ bnx2x_8727_set_limiting_mode(params->bp, phy, edc_mode);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+ bnx2x_warpcore_set_limiting_mode(params, phy, edc_mode);
+ break;
+ }
+}
+
+int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 edc_mode;
+ int rc = 0;
+
+ u32 val = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].config));
+
+ DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n",
+ params->port);
+ /* Power up module */
+ bnx2x_power_sfp_module(params, phy, 1);
+ if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
+ DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
+ return -EINVAL;
+ } else if (bnx2x_verify_sfp_module(phy, params) != 0) {
+ /* check SFP+ module compatibility */
+ DP(NETIF_MSG_LINK, "Module verification failed!!\n");
+ rc = -EINVAL;
+ /* Turn on fault module-detected led */
+ bnx2x_set_sfp_module_fault_led(params,
+ MISC_REGISTERS_GPIO_HIGH);
+
+ /* Check if need to power down the SFP+ module */
+ if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN) {
+ DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n");
+ bnx2x_power_sfp_module(params, phy, 0);
+ return rc;
+ }
+ } else {
+ /* Turn off fault module-detected led */
+ bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
+ }
+
+ /*
+ * Check and set limiting mode / LRM mode on 8726. On 8727 it
+ * is done automatically
+ */
+ bnx2x_set_limiting_mode(params, phy, edc_mode);
+
+ /*
+ * Enable transmit for this module if the module is approved, or
+ * if unapproved modules should also enable the Tx laser
+ */
+ if (rc == 0 ||
+ (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
+ bnx2x_sfp_set_transmitter(params, phy, 1);
+ else
+ bnx2x_sfp_set_transmitter(params, phy, 0);
+
+ return rc;
+}
+
+void bnx2x_handle_module_detect_int(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ struct bnx2x_phy *phy;
+ u32 gpio_val;
+ u8 gpio_num, gpio_port;
+ if (CHIP_IS_E3(bp))
+ phy = &params->phy[INT_PHY];
+ else
+ phy = &params->phy[EXT_PHY1];
+
+ if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base,
+ params->port, &gpio_num, &gpio_port) ==
+ -EINVAL) {
+ DP(NETIF_MSG_LINK, "Failed to get MOD_ABS interrupt config\n");
+ return;
+ }
+
+ /* Set valid module led off */
+ bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
+
+ /* Get current gpio val reflecting module plugged in / out*/
+ gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
+
+ /* Call the handling function in case module is detected */
+ if (gpio_val == 0) {
+ bnx2x_power_sfp_module(params, phy, 1);
+ bnx2x_set_gpio_int(bp, gpio_num,
+ MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
+ gpio_port);
+ if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
+ bnx2x_sfp_module_detection(phy, params);
+ else
+ DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
+ } else {
+ u32 val = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].
+ config));
+ bnx2x_set_gpio_int(bp, gpio_num,
+ MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
+ gpio_port);
+ /*
+ * Module was plugged out.
+ * Disable transmit for this module
+ */
+ phy->media_type = ETH_PHY_NOT_PRESENT;
+ if (((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) ||
+ CHIP_IS_E3(bp))
+ bnx2x_sfp_set_transmitter(params, phy, 0);
+ }
+}
+
+/******************************************************************/
+/* Used by 8706 and 8727 */
+/******************************************************************/
+static void bnx2x_sfp_mask_fault(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 alarm_status_offset,
+ u16 alarm_ctrl_offset)
+{
+ u16 alarm_status, val;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, alarm_status_offset,
+ &alarm_status);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, alarm_status_offset,
+ &alarm_status);
+ /* Mask or enable the fault event. */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val);
+ if (alarm_status & (1<<0))
+ val &= ~(1<<0);
+ else
+ val |= (1<<0);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val);
+}
+/******************************************************************/
+/* common BCM8706/BCM8726 PHY SECTION */
+/******************************************************************/
+static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u8 link_up = 0;
+ u16 val1, val2, rx_sd, pcs_status;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "XGXS 8706/8726\n");
+ /* Clear RX Alarm*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
+
+ bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
+ MDIO_PMA_LASI_TXCTRL);
+
+ /* clear LASI indication*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
+ DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x--> 0x%x\n", val1, val2);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &pcs_status);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
+
+ DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
+ " link_status 0x%x\n", rx_sd, pcs_status, val2);
+ /*
+ * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
+ * are set, or if the autoneg bit 1 is set
+ */
+ link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
+ if (link_up) {
+ if (val2 & (1<<1))
+ vars->line_speed = SPEED_1000;
+ else
+ vars->line_speed = SPEED_10000;
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ vars->duplex = DUPLEX_FULL;
+ }
+
+ /* Capture 10G link fault. Read twice to clear stale value. */
+ if (vars->line_speed == SPEED_10000) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_TXSTAT, &val1);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_TXSTAT, &val1);
+ if (val1 & (1<<0))
+ vars->fault_detected = 1;
+ }
+
+ return link_up;
+}
+
+/******************************************************************/
+/* BCM8706 PHY SECTION */
+/******************************************************************/
+static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 tx_en_mode;
+ u16 cnt, val, tmp1;
+ struct bnx2x *bp = params->bp;
+
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ /* HW reset */
+ bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ /* Wait until fw is loaded */
+ for (cnt = 0; cnt < 100; cnt++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val);
+ if (val)
+ break;
+ msleep(10);
+ }
+ DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt);
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+ u8 i;
+ u16 reg;
+ for (i = 0; i < 4; i++) {
+ reg = MDIO_XS_8706_REG_BANK_RX0 +
+ i*(MDIO_XS_8706_REG_BANK_RX1 -
+ MDIO_XS_8706_REG_BANK_RX0);
+ bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, reg, &val);
+ /* Clear first 3 bits of the control */
+ val &= ~0x7;
+ /* Set control bits according to configuration */
+ val |= (phy->rx_preemphasis[i] & 0x7);
+ DP(NETIF_MSG_LINK, "Setting RX Equalizer to BCM8706"
+ " reg 0x%x <-- val 0x%x\n", reg, val);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, reg, val);
+ }
+ }
+ /* Force speed */
+ if (phy->req_line_speed == SPEED_10000) {
+ DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_DIGITAL_CTRL, 0x400);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
+ 0);
+ /* Arm LASI for link and Tx fault. */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 3);
+ } else {
+ /* Force 1Gbps using autoneg with 1G advertisement */
+
+ /* Allow CL37 through CL73 */
+ DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
+
+ /* Enable Full-Duplex advertisement on CL37 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LP, 0x0020);
+ /* Enable CL37 AN */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+ /* 1G support */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_ADV, (1<<5));
+
+ /* Enable clause 73 AN */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ 0x0400);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
+ 0x0004);
+ }
+ bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
+
+ /*
+ * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+ * power mode, if TX Laser is disabled
+ */
+
+ tx_en_mode = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl))
+ & PORT_HW_CFG_TX_LASER_MASK;
+
+ if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+ DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1);
+ tmp1 |= 0x1;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
+ }
+
+ return 0;
+}
+
+static int bnx2x_8706_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ return bnx2x_8706_8726_read_status(phy, params, vars);
+}
+
+/******************************************************************/
+/* BCM8726 PHY SECTION */
+/******************************************************************/
+static void bnx2x_8726_config_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n");
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001);
+}
+
+static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ /* Need to wait 100ms after reset */
+ msleep(100);
+
+ /* Micro controller re-boot */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x018B);
+
+ /* Set soft reset */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+
+ /* wait for 150ms for microcode load */
+ msleep(150);
+
+ /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+
+ msleep(200);
+ bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
+}
+
+static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val1;
+ u8 link_up = bnx2x_8706_8726_read_status(phy, params, vars);
+ if (link_up) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
+ &val1);
+ if (val1 & (1<<15)) {
+ DP(NETIF_MSG_LINK, "Tx is disabled\n");
+ link_up = 0;
+ vars->line_speed = 0;
+ }
+ }
+ return link_up;
+}
+
+
+static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
+
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ bnx2x_8726_external_rom_boot(phy, params);
+
+ /*
+ * Need to call module detected on initialization since the module
+ * detection triggered by actual module insertion might occur before
+ * driver is loaded, and when driver is loaded, it reset all
+ * registers, including the transmitter
+ */
+ bnx2x_sfp_module_detection(phy, params);
+
+ if (phy->req_line_speed == SPEED_1000) {
+ DP(NETIF_MSG_LINK, "Setting 1G force\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x5);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ 0x400);
+ } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
+ /* Set Flow control */
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, 0x0020);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+ /*
+ * Enable RX-ALARM control to receive interrupt for 1G speed
+ * change
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x4);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ 0x400);
+
+ } else { /* Default 10G. Set only LASI control */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 1);
+ }
+
+ /* Set TX PreEmphasis if needed */
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+ DP(NETIF_MSG_LINK,
+ "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n",
+ phy->tx_preemphasis[0],
+ phy->tx_preemphasis[1]);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8726_TX_CTRL1,
+ phy->tx_preemphasis[0]);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8726_TX_CTRL2,
+ phy->tx_preemphasis[1]);
+ }
+
+ return 0;
+
+}
+
+static void bnx2x_8726_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "bnx2x_8726_link_reset port %d\n", params->port);
+ /* Set serial boot control for external load */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL, 0x0001);
+}
+
+/******************************************************************/
+/* BCM8727 PHY SECTION */
+/******************************************************************/
+
+static void bnx2x_8727_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ struct bnx2x *bp = params->bp;
+ u16 led_mode_bitmask = 0;
+ u16 gpio_pins_bitmask = 0;
+ u16 val;
+ /* Only NOC flavor requires to set the LED specifically */
+ if (!(phy->flags & FLAGS_NOC))
+ return;
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ led_mode_bitmask = 0;
+ gpio_pins_bitmask = 0x03;
+ break;
+ case LED_MODE_ON:
+ led_mode_bitmask = 0;
+ gpio_pins_bitmask = 0x02;
+ break;
+ case LED_MODE_OPER:
+ led_mode_bitmask = 0x60;
+ gpio_pins_bitmask = 0x11;
+ break;
+ }
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ &val);
+ val &= 0xff8f;
+ val |= led_mode_bitmask;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ val);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_GPIO_CTRL,
+ &val);
+ val &= 0xffe0;
+ val |= gpio_pins_bitmask;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_GPIO_CTRL,
+ val);
+}
+static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
+ struct link_params *params) {
+ u32 swap_val, swap_override;
+ u8 port;
+ /*
+ * The PHY reset is controlled by GPIO 1. Fake the port number
+ * to cancel the swap done in set_gpio()
+ */
+ struct bnx2x *bp = params->bp;
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ port = (swap_val && swap_override) ^ 1;
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+}
+
+static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 tx_en_mode;
+ u16 tmp1, val, mod_abs, tmp2;
+ u16 rx_alarm_ctrl_val;
+ u16 lasi_ctrl_val;
+ struct bnx2x *bp = params->bp;
+ /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
+
+ bnx2x_wait_reset_complete(bp, phy, params);
+ rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
+ /* Should be 0x6 to enable XS on Tx side. */
+ lasi_ctrl_val = 0x0006;
+
+ DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
+ /* enable LASI */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ rx_alarm_ctrl_val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
+ 0);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val);
+
+ /*
+ * Initially configure MOD_ABS to interrupt when module is
+ * presence( bit 8)
+ */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
+ /*
+ * Set EDC off by setting OPTXLOS signal input to low (bit 9).
+ * When the EDC is off it locks onto a reference clock and avoids
+ * becoming 'lost'
+ */
+ mod_abs &= ~(1<<8);
+ if (!(phy->flags & FLAGS_NOC))
+ mod_abs &= ~(1<<9);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+
+
+ /* Enable/Disable PHY transmitter output */
+ bnx2x_set_disable_pmd_transmit(params, phy, 0);
+
+ /* Make MOD_ABS give interrupt on change */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ &val);
+ val |= (1<<12);
+ if (phy->flags & FLAGS_NOC)
+ val |= (3<<5);
+
+ /*
+ * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
+ * status which reflect SFP+ module over-current
+ */
+ if (!(phy->flags & FLAGS_NOC))
+ val &= 0xff8f; /* Reset bits 4-6 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val);
+
+ bnx2x_8727_power_module(bp, phy, 1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
+
+ /* Set option 1G speed */
+ if (phy->req_line_speed == SPEED_1000) {
+ DP(NETIF_MSG_LINK, "Setting 1G force\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
+ DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
+ /*
+ * Power down the XAUI until link is up in case of dual-media
+ * and 1G
+ */
+ if (DUAL_MEDIA(params)) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, &val);
+ val |= (3<<10);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, val);
+ }
+ } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
+ } else {
+ /*
+ * Since the 8727 has only single reset pin, need to set the 10G
+ * registers although it is default
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
+ 0x0020);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
+ 0x0008);
+ }
+
+ /*
+ * Set 2-wire transfer rate of SFP+ module EEPROM
+ * to 100Khz since some DACs(direct attached cables) do
+ * not work at 400Khz.
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
+ 0xa001);
+
+ /* Set TX PreEmphasis if needed */
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+ DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n",
+ phy->tx_preemphasis[0],
+ phy->tx_preemphasis[1]);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL1,
+ phy->tx_preemphasis[0]);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL2,
+ phy->tx_preemphasis[1]);
+ }
+
+ /*
+ * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+ * power mode, if TX Laser is disabled
+ */
+ tx_en_mode = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl))
+ & PORT_HW_CFG_TX_LASER_MASK;
+
+ if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+
+ DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2);
+ tmp2 |= 0x1000;
+ tmp2 &= 0xFFEF;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
+ }
+
+ return 0;
+}
+
+static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 mod_abs, rx_alarm_status;
+ u32 val = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].
+ config));
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
+ if (mod_abs & (1<<8)) {
+
+ /* Module is absent */
+ DP(NETIF_MSG_LINK,
+ "MOD_ABS indication show module is absent\n");
+ phy->media_type = ETH_PHY_NOT_PRESENT;
+ /*
+ * 1. Set mod_abs to detect next module
+ * presence event
+ * 2. Set EDC off by setting OPTXLOS signal input to low
+ * (bit 9).
+ * When the EDC is off it locks onto a reference clock and
+ * avoids becoming 'lost'.
+ */
+ mod_abs &= ~(1<<8);
+ if (!(phy->flags & FLAGS_NOC))
+ mod_abs &= ~(1<<9);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+
+ /*
+ * Clear RX alarm since it stays up as long as
+ * the mod_abs wasn't changed
+ */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+
+ } else {
+ /* Module is present */
+ DP(NETIF_MSG_LINK,
+ "MOD_ABS indication show module is present\n");
+ /*
+ * First disable transmitter, and if the module is ok, the
+ * module_detection will enable it
+ * 1. Set mod_abs to detect next module absent event ( bit 8)
+ * 2. Restore the default polarity of the OPRXLOS signal and
+ * this signal will then correctly indicate the presence or
+ * absence of the Rx signal. (bit 9)
+ */
+ mod_abs |= (1<<8);
+ if (!(phy->flags & FLAGS_NOC))
+ mod_abs |= (1<<9);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+
+ /*
+ * Clear RX alarm since it stays up as long as the mod_abs
+ * wasn't changed. This is need to be done before calling the
+ * module detection, otherwise it will clear* the link update
+ * alarm
+ */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+
+
+ if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
+ bnx2x_sfp_set_transmitter(params, phy, 0);
+
+ if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
+ bnx2x_sfp_module_detection(phy, params);
+ else
+ DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
+ }
+
+ DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
+ rx_alarm_status);
+ /* No need to check link status in case of module plugged in/out */
+}
+
+static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+
+{
+ struct bnx2x *bp = params->bp;
+ u8 link_up = 0, oc_port = params->port;
+ u16 link_status = 0;
+ u16 rx_alarm_status, lasi_ctrl, val1;
+
+ /* If PHY is not initialized, do not check link status */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
+ &lasi_ctrl);
+ if (!lasi_ctrl)
+ return 0;
+
+ /* Check the LASI on Rx */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT,
+ &rx_alarm_status);
+ vars->line_speed = 0;
+ DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", rx_alarm_status);
+
+ bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
+ MDIO_PMA_LASI_TXCTRL);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+
+ DP(NETIF_MSG_LINK, "8727 LASI status 0x%x\n", val1);
+
+ /* Clear MSG-OUT */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
+
+ /*
+ * If a module is present and there is need to check
+ * for over current
+ */
+ if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) {
+ /* Check over-current using 8727 GPIO0 input*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL,
+ &val1);
+
+ if ((val1 & (1<<8)) == 0) {
+ if (!CHIP_IS_E1x(bp))
+ oc_port = BP_PATH(bp) + (params->port << 1);
+ DP(NETIF_MSG_LINK,
+ "8727 Power fault has been detected on port %d\n",
+ oc_port);
+ netdev_err(bp->dev, "Error: Power fault on Port %d has"
+ " been detected and the power to "
+ "that SFP+ module has been removed"
+ " to prevent failure of the card."
+ " Please remove the SFP+ module and"
+ " restart the system to clear this"
+ " error.\n",
+ oc_port);
+ /* Disable all RX_ALARMs except for mod_abs */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_RXCTRL, (1<<5));
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
+ /* Wait for module_absent_event */
+ val1 |= (1<<8);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, val1);
+ /* Clear RX alarm */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+ return 0;
+ }
+ } /* Over current check */
+
+ /* When module absent bit is set, check module */
+ if (rx_alarm_status & (1<<5)) {
+ bnx2x_8727_handle_mod_abs(phy, params);
+ /* Enable all mod_abs and link detection bits */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ ((1<<5) | (1<<2)));
+ }
+ DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n");
+ bnx2x_8727_specific_func(phy, params, ENABLE_TX);
+ /* If transmitter is disabled, ignore false link up indication */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
+ if (val1 & (1<<15)) {
+ DP(NETIF_MSG_LINK, "Tx is disabled\n");
+ return 0;
+ }
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
+
+ /*
+ * Bits 0..2 --> speed detected,
+ * Bits 13..15--> link is down
+ */
+ if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_10000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
+ params->port);
+ } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_1000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
+ params->port);
+ } else {
+ link_up = 0;
+ DP(NETIF_MSG_LINK, "port %x: External link is down\n",
+ params->port);
+ }
+
+ /* Capture 10G link fault. */
+ if (vars->line_speed == SPEED_10000) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_TXSTAT, &val1);
+
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_TXSTAT, &val1);
+
+ if (val1 & (1<<0)) {
+ vars->fault_detected = 1;
+ }
+ }
+
+ if (link_up) {
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ vars->duplex = DUPLEX_FULL;
+ DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex);
+ }
+
+ if ((DUAL_MEDIA(params)) &&
+ (phy->req_line_speed == SPEED_1000)) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, &val1);
+ /*
+ * In case of dual-media board and 1G, power up the XAUI side,
+ * otherwise power it down. For 10G it is done automatically
+ */
+ if (link_up)
+ val1 &= ~(3<<10);
+ else
+ val1 |= (3<<10);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, val1);
+ }
+ return link_up;
+}
+
+static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+
+ /* Enable/Disable PHY transmitter output */
+ bnx2x_set_disable_pmd_transmit(params, phy, 1);
+
+ /* Disable Transmitter */
+ bnx2x_sfp_set_transmitter(params, phy, 0);
+ /* Clear LASI */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0);
+
+}
+
+/******************************************************************/
+/* BCM8481/BCM84823/BCM84833 PHY SECTION */
+/******************************************************************/
+static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u16 val, fw_ver1, fw_ver2, cnt;
+ u8 port;
+ struct bnx2x *bp = params->bp;
+
+ port = params->port;
+
+ /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
+ /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
+
+ for (cnt = 0; cnt < 100; cnt++) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ if (val & 1)
+ break;
+ udelay(5);
+ }
+ if (cnt == 100) {
+ DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n");
+ bnx2x_save_spirom_version(bp, port, 0,
+ phy->ver_addr);
+ return;
+ }
+
+
+ /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
+ for (cnt = 0; cnt < 100; cnt++) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ if (val & 1)
+ break;
+ udelay(5);
+ }
+ if (cnt == 100) {
+ DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n");
+ bnx2x_save_spirom_version(bp, port, 0,
+ phy->ver_addr);
+ return;
+ }
+
+ /* lower 16 bits of the register SPI_FW_STATUS */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
+ /* upper 16 bits of register SPI_FW_STATUS */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
+
+ bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1,
+ phy->ver_addr);
+}
+
+static void bnx2x_848xx_set_led(struct bnx2x *bp,
+ struct bnx2x_phy *phy)
+{
+ u16 val;
+
+ /* PHYC_CTL_LED_CTL */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+ val &= 0xFE00;
+ val |= 0x0092;
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL, val);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x80);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x18);
+
+ /* Select activity source by Tx and Rx, as suggested by PHY AE */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0006);
+
+ /* Select the closest activity blink rate to that in 10/100/1000 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_BLINK,
+ 0);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
+ val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
+
+ /* 'Interrupt Mask' */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ 0xFFFB, 0xFFFD);
+}
+
+static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 autoneg_val, an_1000_val, an_10_100_val;
+ u16 tmp_req_line_speed;
+
+ tmp_req_line_speed = phy->req_line_speed;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ if (phy->req_line_speed == SPEED_10000)
+ phy->req_line_speed = SPEED_AUTO_NEG;
+
+ /*
+ * This phy uses the NIG latch mechanism since link indication
+ * arrives through its LED4 and not via its LASI signal, so we
+ * get steady signal instead of clear on read
+ */
+ bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
+ 1 << NIG_LATCH_BC_ENABLE_MI_INT);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
+
+ bnx2x_848xx_set_led(bp, phy);
+
+ /* set 1000 speed advertisement */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
+ &an_1000_val);
+
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_AN_ADV,
+ &an_10_100_val);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL,
+ &autoneg_val);
+ /* Disable forced speed */
+ autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13));
+ an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8));
+
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->req_line_speed == SPEED_1000)) {
+ an_1000_val |= (1<<8);
+ autoneg_val |= (1<<9 | 1<<12);
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_1000_val |= (1<<9);
+ DP(NETIF_MSG_LINK, "Advertising 1G\n");
+ } else
+ an_1000_val &= ~((1<<8) | (1<<9));
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
+ an_1000_val);
+
+ /* set 100 speed advertisement */
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) &&
+ (phy->supported &
+ (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full)))) {
+ an_10_100_val |= (1<<7);
+ /* Enable autoneg and restart autoneg for legacy speeds */
+ autoneg_val |= (1<<9 | 1<<12);
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_10_100_val |= (1<<8);
+ DP(NETIF_MSG_LINK, "Advertising 100M\n");
+ }
+ /* set 10 speed advertisement */
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) &&
+ (phy->supported &
+ (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full)))) {
+ an_10_100_val |= (1<<5);
+ autoneg_val |= (1<<9 | 1<<12);
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_10_100_val |= (1<<6);
+ DP(NETIF_MSG_LINK, "Advertising 10M\n");
+ }
+
+ /* Only 10/100 are allowed to work in FORCE mode */
+ if ((phy->req_line_speed == SPEED_100) &&
+ (phy->supported &
+ (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full))) {
+ autoneg_val |= (1<<13);
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
+ (1<<15 | 1<<9 | 7<<0));
+ DP(NETIF_MSG_LINK, "Setting 100M force\n");
+ }
+ if ((phy->req_line_speed == SPEED_10) &&
+ (phy->supported &
+ (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full))) {
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
+ (1<<15 | 1<<9 | 7<<0));
+ DP(NETIF_MSG_LINK, "Setting 10M force\n");
+ }
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_AN_ADV,
+ an_10_100_val);
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ autoneg_val |= (1<<8);
+
+ /*
+ * Always write this if this is not 84833.
+ * For 84833, write it only when it's a forced speed.
+ */
+ if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+ ((autoneg_val & (1<<12)) == 0))
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
+
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
+ (phy->req_line_speed == SPEED_10000)) {
+ DP(NETIF_MSG_LINK, "Advertising 10G\n");
+ /* Restart autoneg for 10G*/
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
+ 0x3200);
+ } else
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+ 1);
+
+ /* Save spirom version */
+ bnx2x_save_848xx_spirom_version(phy, params);
+
+ phy->req_line_speed = tmp_req_line_speed;
+
+ return 0;
+}
+
+static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+
+ /* HW reset */
+ bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
+ return bnx2x_848xx_cmn_config_init(phy, params, vars);
+}
+
+
+#define PHY84833_HDSHK_WAIT 300
+static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 idx;
+ u32 pair_swap;
+ u16 val;
+ u16 data;
+ struct bnx2x *bp = params->bp;
+ /* Do pair swap */
+
+ /* Check for configuration. */
+ pair_swap = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].xgbt_phy_cfg)) &
+ PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
+
+ if (pair_swap == 0)
+ return 0;
+
+ data = (u16)pair_swap;
+
+ /* Write CMD_OPEN_OVERRIDE to STATUS reg */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG2,
+ PHY84833_CMD_OPEN_OVERRIDE);
+ for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
+ if (val == PHY84833_CMD_OPEN_FOR_CMDS)
+ break;
+ msleep(1);
+ }
+ if (idx >= PHY84833_HDSHK_WAIT) {
+ DP(NETIF_MSG_LINK, "Pairswap: FW not ready.\n");
+ return -EINVAL;
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG4,
+ data);
+ /* Issue pair swap command */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG0,
+ PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE);
+ for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
+ if ((val == PHY84833_CMD_COMPLETE_PASS) ||
+ (val == PHY84833_CMD_COMPLETE_ERROR))
+ break;
+ msleep(1);
+ }
+ if ((idx >= PHY84833_HDSHK_WAIT) ||
+ (val == PHY84833_CMD_COMPLETE_ERROR)) {
+ DP(NETIF_MSG_LINK, "Pairswap: override failed.\n");
+ return -EINVAL;
+ }
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG2,
+ PHY84833_CMD_CLEAR_COMPLETE);
+ DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data);
+ return 0;
+}
+
+
+static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 chip_id)
+{
+ u32 reset_pin[2];
+ u32 idx;
+ u8 reset_gpios;
+ if (CHIP_IS_E3(bp)) {
+ /* Assume that these will be GPIOs, not EPIOs. */
+ for (idx = 0; idx < 2; idx++) {
+ /* Map config param to register bit. */
+ reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[0].e3_cmn_pin_cfg));
+ reset_pin[idx] = (reset_pin[idx] &
+ PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+ PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+ reset_pin[idx] -= PIN_CFG_GPIO0_P0;
+ reset_pin[idx] = (1 << reset_pin[idx]);
+ }
+ reset_gpios = (u8)(reset_pin[0] | reset_pin[1]);
+ } else {
+ /* E2, look from diff place of shmem. */
+ for (idx = 0; idx < 2; idx++) {
+ reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[0].default_cfg));
+ reset_pin[idx] &= PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK;
+ reset_pin[idx] -= PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0;
+ reset_pin[idx] >>= PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT;
+ reset_pin[idx] = (1 << reset_pin[idx]);
+ }
+ reset_gpios = (u8)(reset_pin[0] | reset_pin[1]);
+ }
+
+ return reset_gpios;
+}
+
+static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 reset_gpios;
+ u32 other_shmem_base_addr = REG_RD(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ other_shmem_base_addr));
+
+ u32 shmem_base_path[2];
+ shmem_base_path[0] = params->shmem_base;
+ shmem_base_path[1] = other_shmem_base_addr;
+
+ reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path,
+ params->chip_id);
+
+ bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ udelay(10);
+ DP(NETIF_MSG_LINK, "84833 hw reset on pin values 0x%x\n",
+ reset_gpios);
+
+ return 0;
+}
+
+static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 chip_id)
+{
+ u8 reset_gpios;
+
+ reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
+
+ bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ udelay(10);
+ bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+ msleep(800);
+ DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
+ reset_gpios);
+
+ return 0;
+}
+
+#define PHY84833_CONSTANT_LATENCY 1193
+static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port, initialize = 1;
+ u16 val;
+ u16 temp;
+ u32 actual_phy_selection, cms_enable, idx;
+ int rc = 0;
+
+ msleep(1);
+
+ if (!(CHIP_IS_E1(bp)))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
+
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
+ } else {
+ /* MDIO reset */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL, 0x8000);
+ /* Bring PHY out of super isolate mode */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
+ val &= ~MDIO_84833_SUPER_ISOLATE;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+ }
+
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ /* Wait for GPHY to come out of reset */
+ msleep(50);
+
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ bnx2x_84833_pair_swap_cfg(phy, params, vars);
+
+ /*
+ * BCM84823 requires that XGXS links up first @ 10G for normal behavior
+ */
+ temp = vars->line_speed;
+ vars->line_speed = SPEED_10000;
+ bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
+ bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
+ vars->line_speed = temp;
+
+ /* Set dual-media configuration according to configuration */
+
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_MEDIA, &val);
+ val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
+ MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
+ MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
+ MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK |
+ MDIO_CTL_REG_84823_MEDIA_FIBER_1G);
+
+ if (CHIP_IS_E3(bp)) {
+ val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
+ MDIO_CTL_REG_84823_MEDIA_LINE_MASK);
+ } else {
+ val |= (MDIO_CTL_REG_84823_CTRL_MAC_XFI |
+ MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L);
+ }
+
+ actual_phy_selection = bnx2x_phy_selection(params);
+
+ switch (actual_phy_selection) {
+ case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+ /* Do nothing. Essentially this is like the priority copper */
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+ /* Do nothing here. The first PHY won't be initialized at all */
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+ val |= MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN;
+ initialize = 0;
+ break;
+ }
+ if (params->phy[EXT_PHY2].req_line_speed == SPEED_1000)
+ val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
+
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_MEDIA, val);
+ DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
+ params->multi_phy_config, val);
+
+ /* AutogrEEEn */
+ if (params->feature_config_flags &
+ FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
+ /* Ensure that f/w is ready */
+ for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
+ if (val == PHY84833_CMD_OPEN_FOR_CMDS)
+ break;
+ usleep_range(1000, 1000);
+ }
+ if (idx >= PHY84833_HDSHK_WAIT) {
+ DP(NETIF_MSG_LINK, "AutogrEEEn: FW not ready.\n");
+ return -EINVAL;
+ }
+
+ /* Select EEE mode */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG3,
+ 0x2);
+
+ /* Set Idle and Latency */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG4,
+ PHY84833_CONSTANT_LATENCY + 1);
+
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_DATA3_REG,
+ PHY84833_CONSTANT_LATENCY + 1);
+
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_DATA4_REG,
+ PHY84833_CONSTANT_LATENCY);
+
+ /* Send EEE instruction to command register */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG0,
+ PHY84833_DIAG_CMD_SET_EEE_MODE);
+
+ /* Ensure that the command has completed */
+ for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
+ if ((val == PHY84833_CMD_COMPLETE_PASS) ||
+ (val == PHY84833_CMD_COMPLETE_ERROR))
+ break;
+ usleep_range(1000, 1000);
+ }
+ if ((idx >= PHY84833_HDSHK_WAIT) ||
+ (val == PHY84833_CMD_COMPLETE_ERROR)) {
+ DP(NETIF_MSG_LINK, "AutogrEEEn: command failed.\n");
+ return -EINVAL;
+ }
+
+ /* Reset command handler */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG2,
+ PHY84833_CMD_CLEAR_COMPLETE);
+ }
+
+ if (initialize)
+ rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
+ else
+ bnx2x_save_848xx_spirom_version(phy, params);
+ /* 84833 PHY has a better feature and doesn't need to support this. */
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
+ cms_enable = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_ENABLE_CMS_MASK;
+
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
+ if (cms_enable)
+ val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
+ else
+ val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_USER_CTRL_REG, val);
+ }
+
+ return rc;
+}
+
+static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val, val1, val2;
+ u8 link_up = 0;
+
+
+ /* Check 10G-BaseT link status */
+ /* Check PMD signal ok */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, 0xFFFA, &val1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL,
+ &val2);
+ DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
+
+ /* Check link 10G */
+ if (val2 & (1<<11)) {
+ vars->line_speed = SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
+ link_up = 1;
+ bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
+ } else { /* Check Legacy speed link */
+ u16 legacy_status, legacy_speed;
+
+ /* Enable expansion register 0x42 (Operation mode status) */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf42);
+
+ /* Get legacy speed operation status */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
+ &legacy_status);
+
+ DP(NETIF_MSG_LINK, "Legacy speed status = 0x%x\n",
+ legacy_status);
+ link_up = ((legacy_status & (1<<11)) == (1<<11));
+ if (link_up) {
+ legacy_speed = (legacy_status & (3<<9));
+ if (legacy_speed == (0<<9))
+ vars->line_speed = SPEED_10;
+ else if (legacy_speed == (1<<9))
+ vars->line_speed = SPEED_100;
+ else if (legacy_speed == (2<<9))
+ vars->line_speed = SPEED_1000;
+ else /* Should not happen */
+ vars->line_speed = 0;
+
+ if (legacy_status & (1<<8))
+ vars->duplex = DUPLEX_FULL;
+ else
+ vars->duplex = DUPLEX_HALF;
+
+ DP(NETIF_MSG_LINK,
+ "Link is up in %dMbps, is_duplex_full= %d\n",
+ vars->line_speed,
+ (vars->duplex == DUPLEX_FULL));
+ /* Check legacy speed AN resolution */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_MII_STATUS,
+ &val);
+ if (val & (1<<5))
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_AN_EXPANSION,
+ &val);
+ if ((val & (1<<0)) == 0)
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
+ }
+ }
+ if (link_up) {
+ DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n",
+ vars->line_speed);
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ }
+
+ return link_up;
+}
+
+
+static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
+{
+ int status = 0;
+ u32 spirom_ver;
+ spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F);
+ status = bnx2x_format_ver(spirom_ver, str, len);
+ return status;
+}
+
+static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
+ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
+}
+
+static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ bnx2x_cl45_write(params->bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
+ bnx2x_cl45_write(params->bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1);
+}
+
+static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port;
+ u16 val16;
+
+ if (!(CHIP_IS_E1(bp)))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
+
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
+ } else {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_CTL_DEVAD,
+ 0x400f, &val16);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL, 0x800);
+ }
+}
+
+static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val;
+ u8 port;
+
+ if (!(CHIP_IS_E1(bp)))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
+
+ switch (mode) {
+ case LED_MODE_OFF:
+
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", port);
+
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+
+ /* Set LED masks */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x0);
+
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+ }
+ break;
+ case LED_MODE_FRONT_PANEL_OFF:
+
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE FRONT PANEL OFF\n",
+ port);
+
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+
+ /* Set LED masks */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x20);
+
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+ }
+ break;
+ case LED_MODE_ON:
+
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", port);
+
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+ /* Set control reg */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ &val);
+ val &= 0x8000;
+ val |= 0x2492;
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ val);
+
+ /* Set LED masks */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x20);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x20);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x0);
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x20);
+ }
+ break;
+
+ case LED_MODE_OPER:
+
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", port);
+
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+
+ /* Set control reg */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ &val);
+
+ if (!((val &
+ MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
+ >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) {
+ DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ 0xa492);
+ }
+
+ /* Set LED masks */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x10);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x80);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x98);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x40);
+
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x80);
+
+ /* Tell LED3 to blink on source */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ &val);
+ val &= ~(7<<6);
+ val |= (1<<6); /* A83B[8:6]= 1 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ val);
+ }
+ break;
+ }
+
+ /*
+ * This is a workaround for E3+84833 until autoneg
+ * restart is fixed in f/w
+ */
+ if (CHIP_IS_E3(bp)) {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_1, &val);
+ }
+}
+
+/******************************************************************/
+/* 54618SE PHY SECTION */
+/******************************************************************/
+static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port;
+ u16 autoneg_val, an_1000_val, an_10_100_val, fc_val, temp;
+ u32 cfg_pin;
+
+ DP(NETIF_MSG_LINK, "54618SE cfg init\n");
+ usleep_range(1000, 1000);
+
+ /* This works with E3 only, no need to check the chip
+ before determining the port. */
+ port = params->port;
+
+ cfg_pin = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+ PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+ PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+
+ /* Drive pin high to bring the GPHY out of reset. */
+ bnx2x_set_cfg_pin(bp, cfg_pin, 1);
+
+ /* wait for GPHY to reset */
+ msleep(50);
+
+ /* reset phy */
+ bnx2x_cl22_write(bp, phy,
+ MDIO_PMA_REG_CTRL, 0x8000);
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ /*wait for GPHY to reset */
+ msleep(50);
+
+ /* Configure LED4: set to INTR (0x6). */
+ /* Accessing shadow register 0xe. */
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_LED_SEL2);
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ &temp);
+ temp &= ~(0xf << 4);
+ temp |= (0x6 << 4);
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+ /* Configure INTR based on link status change. */
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_INTR_MASK,
+ ~MDIO_REG_INTR_MASK_LINK_STATUS);
+
+ /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_AUTO_DET_MED);
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ &temp);
+ temp |= MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD;
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+
+ /* Set up fc */
+ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ fc_val = 0;
+ if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC)
+ fc_val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+
+ if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
+ fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
+
+ /* read all advertisement */
+ bnx2x_cl22_read(bp, phy,
+ 0x09,
+ &an_1000_val);
+
+ bnx2x_cl22_read(bp, phy,
+ 0x04,
+ &an_10_100_val);
+
+ bnx2x_cl22_read(bp, phy,
+ MDIO_PMA_REG_CTRL,
+ &autoneg_val);
+
+ /* Disable forced speed */
+ autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13));
+ an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8) | (1<<10) |
+ (1<<11));
+
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->req_line_speed == SPEED_1000)) {
+ an_1000_val |= (1<<8);
+ autoneg_val |= (1<<9 | 1<<12);
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_1000_val |= (1<<9);
+ DP(NETIF_MSG_LINK, "Advertising 1G\n");
+ } else
+ an_1000_val &= ~((1<<8) | (1<<9));
+
+ bnx2x_cl22_write(bp, phy,
+ 0x09,
+ an_1000_val);
+ bnx2x_cl22_read(bp, phy,
+ 0x09,
+ &an_1000_val);
+
+ /* set 100 speed advertisement */
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) {
+ an_10_100_val |= (1<<7);
+ /* Enable autoneg and restart autoneg for legacy speeds */
+ autoneg_val |= (1<<9 | 1<<12);
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_10_100_val |= (1<<8);
+ DP(NETIF_MSG_LINK, "Advertising 100M\n");
+ }
+
+ /* set 10 speed advertisement */
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
+ an_10_100_val |= (1<<5);
+ autoneg_val |= (1<<9 | 1<<12);
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_10_100_val |= (1<<6);
+ DP(NETIF_MSG_LINK, "Advertising 10M\n");
+ }
+
+ /* Only 10/100 are allowed to work in FORCE mode */
+ if (phy->req_line_speed == SPEED_100) {
+ autoneg_val |= (1<<13);
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ bnx2x_cl22_write(bp, phy,
+ 0x18,
+ (1<<15 | 1<<9 | 7<<0));
+ DP(NETIF_MSG_LINK, "Setting 100M force\n");
+ }
+ if (phy->req_line_speed == SPEED_10) {
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ bnx2x_cl22_write(bp, phy,
+ 0x18,
+ (1<<15 | 1<<9 | 7<<0));
+ DP(NETIF_MSG_LINK, "Setting 10M force\n");
+ }
+
+ /* Check if we should turn on Auto-GrEEEn */
+ bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &temp);
+ if (temp == MDIO_REG_GPHY_ID_54618SE) {
+ if (params->feature_config_flags &
+ FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
+ temp = 6;
+ DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n");
+ } else {
+ temp = 0;
+ DP(NETIF_MSG_LINK, "Disabling Auto-GrEEEn\n");
+ }
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_CL45_ADDR_REG, MDIO_AN_DEVAD);
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_CL45_DATA_REG,
+ MDIO_REG_GPHY_EEE_ADV);
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_CL45_ADDR_REG,
+ (0x1 << 14) | MDIO_AN_DEVAD);
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_CL45_DATA_REG,
+ temp);
+ }
+
+ bnx2x_cl22_write(bp, phy,
+ 0x04,
+ an_10_100_val | fc_val);
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ autoneg_val |= (1<<8);
+
+ bnx2x_cl22_write(bp, phy,
+ MDIO_PMA_REG_CTRL, autoneg_val);
+
+ return 0;
+}
+
+static void bnx2x_54618se_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "54618SE set link led (mode=%x)\n", mode);
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ case LED_MODE_OPER:
+ case LED_MODE_ON:
+ default:
+ break;
+ }
+ return;
+}
+
+static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u32 cfg_pin;
+ u8 port;
+
+ /*
+ * In case of no EPIO routed to reset the GPHY, put it
+ * in low power mode.
+ */
+ bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800);
+ /*
+ * This works with E3 only, no need to check the chip
+ * before determining the port.
+ */
+ port = params->port;
+ cfg_pin = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+ PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+ PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+
+ /* Drive pin low to put GPHY in reset. */
+ bnx2x_set_cfg_pin(bp, cfg_pin, 0);
+}
+
+static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val;
+ u8 link_up = 0;
+ u16 legacy_status, legacy_speed;
+
+ /* Get speed operation status */
+ bnx2x_cl22_read(bp, phy,
+ 0x19,
+ &legacy_status);
+ DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status);
+
+ /* Read status to clear the PHY interrupt. */
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_INTR_STATUS,
+ &val);
+
+ link_up = ((legacy_status & (1<<2)) == (1<<2));
+
+ if (link_up) {
+ legacy_speed = (legacy_status & (7<<8));
+ if (legacy_speed == (7<<8)) {
+ vars->line_speed = SPEED_1000;
+ vars->duplex = DUPLEX_FULL;
+ } else if (legacy_speed == (6<<8)) {
+ vars->line_speed = SPEED_1000;
+ vars->duplex = DUPLEX_HALF;
+ } else if (legacy_speed == (5<<8)) {
+ vars->line_speed = SPEED_100;
+ vars->duplex = DUPLEX_FULL;
+ }
+ /* Omitting 100Base-T4 for now */
+ else if (legacy_speed == (3<<8)) {
+ vars->line_speed = SPEED_100;
+ vars->duplex = DUPLEX_HALF;
+ } else if (legacy_speed == (2<<8)) {
+ vars->line_speed = SPEED_10;
+ vars->duplex = DUPLEX_FULL;
+ } else if (legacy_speed == (1<<8)) {
+ vars->line_speed = SPEED_10;
+ vars->duplex = DUPLEX_HALF;
+ } else /* Should not happen */
+ vars->line_speed = 0;
+
+ DP(NETIF_MSG_LINK,
+ "Link is up in %dMbps, is_duplex_full= %d\n",
+ vars->line_speed,
+ (vars->duplex == DUPLEX_FULL));
+
+ /* Check legacy speed AN resolution */
+ bnx2x_cl22_read(bp, phy,
+ 0x01,
+ &val);
+ if (val & (1<<5))
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+ bnx2x_cl22_read(bp, phy,
+ 0x06,
+ &val);
+ if ((val & (1<<0)) == 0)
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
+
+ DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n",
+ vars->line_speed);
+
+ /* Report whether EEE is resolved. */
+ bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &val);
+ if (val == MDIO_REG_GPHY_ID_54618SE) {
+ if (vars->link_status &
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
+ val = 0;
+ else {
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_CL45_ADDR_REG,
+ MDIO_AN_DEVAD);
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_CL45_DATA_REG,
+ MDIO_REG_GPHY_EEE_RESOLVED);
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_CL45_ADDR_REG,
+ (0x1 << 14) | MDIO_AN_DEVAD);
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_GPHY_CL45_DATA_REG,
+ &val);
+ }
+ DP(NETIF_MSG_LINK, "EEE resolution: 0x%x\n", val);
+ }
+
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ }
+ return link_up;
+}
+
+static void bnx2x_54618se_config_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val;
+ u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+
+ DP(NETIF_MSG_LINK, "2PMA/PMD ext_phy_loopback: 54618se\n");
+
+ /* Enable master/slave manual mmode and set to master */
+ /* mii write 9 [bits set 11 12] */
+ bnx2x_cl22_write(bp, phy, 0x09, 3<<11);
+
+ /* forced 1G and disable autoneg */
+ /* set val [mii read 0] */
+ /* set val [expr $val & [bits clear 6 12 13]] */
+ /* set val [expr $val | [bits set 6 8]] */
+ /* mii write 0 $val */
+ bnx2x_cl22_read(bp, phy, 0x00, &val);
+ val &= ~((1<<6) | (1<<12) | (1<<13));
+ val |= (1<<6) | (1<<8);
+ bnx2x_cl22_write(bp, phy, 0x00, val);
+
+ /* Set external loopback and Tx using 6dB coding */
+ /* mii write 0x18 7 */
+ /* set val [mii read 0x18] */
+ /* mii write 0x18 [expr $val | [bits set 10 15]] */
+ bnx2x_cl22_write(bp, phy, 0x18, 7);
+ bnx2x_cl22_read(bp, phy, 0x18, &val);
+ bnx2x_cl22_write(bp, phy, 0x18, val | (1<<10) | (1<<15));
+
+ /* This register opens the gate for the UMAC despite its name */
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
+
+ /*
+ * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+ * length used by the MAC receive logic to check frames.
+ */
+ REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
+}
+
+/******************************************************************/
+/* SFX7101 PHY SECTION */
+/******************************************************************/
+static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ /* SFX7101_XGXS_TEST1 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
+}
+
+static int bnx2x_7101_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u16 fw_ver1, fw_ver2, val;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Setting the SFX7101 LASI indication\n");
+
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ /* HW reset */
+ bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1);
+ DP(NETIF_MSG_LINK, "Setting the SFX7101 LED to blink on traffic\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
+
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+ /* Restart autoneg */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val);
+ val |= 0x200;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val);
+
+ /* Save spirom version */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER1, &fw_ver1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2);
+ bnx2x_save_spirom_version(bp, params->port,
+ (u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr);
+ return 0;
+}
+
+static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 link_up;
+ u16 val1, val2;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+ DP(NETIF_MSG_LINK, "10G-base-T LASI status 0x%x->0x%x\n",
+ val2, val1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
+ val2, val1);
+ link_up = ((val1 & 4) == 4);
+ /* if link is up print the AN outcome of the SFX7101 PHY */
+ if (link_up) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
+ &val2);
+ vars->line_speed = SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
+ DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
+ val2, (val2 & (1<<14)));
+ bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ }
+ return link_up;
+}
+
+static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+{
+ if (*len < 5)
+ return -EINVAL;
+ str[0] = (spirom_ver & 0xFF);
+ str[1] = (spirom_ver & 0xFF00) >> 8;
+ str[2] = (spirom_ver & 0xFF0000) >> 16;
+ str[3] = (spirom_ver & 0xFF000000) >> 24;
+ str[4] = '\0';
+ *len -= 5;
+ return 0;
+}
+
+void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+ u16 val, cnt;
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET, &val);
+
+ for (cnt = 0; cnt < 10; cnt++) {
+ msleep(50);
+ /* Writes a self-clearing reset */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET,
+ (val | (1<<15)));
+ /* Wait for clear */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET, &val);
+
+ if ((val & (1<<15)) == 0)
+ break;
+ }
+}
+
+static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
+ struct link_params *params) {
+ /* Low power mode is controlled by GPIO 2 */
+ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+ /* The PHY reset is controlled by GPIO 1 */
+ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+}
+
+static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ u16 val = 0;
+ struct bnx2x *bp = params->bp;
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ val = 2;
+ break;
+ case LED_MODE_ON:
+ val = 1;
+ break;
+ case LED_MODE_OPER:
+ val = 0;
+ break;
+ }
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7107_LINK_LED_CNTL,
+ val);
+}
+
+/******************************************************************/
+/* STATIC PHY DECLARATION */
+/******************************************************************/
+
+static struct bnx2x_phy phy_null = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN,
+ .addr = 0,
+ .def_md_devad = 0,
+ .flags = FLAGS_INIT_XGXS_FIRST,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = 0,
+ .media_type = ETH_PHY_NOT_PRESENT,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)NULL,
+ .read_status = (read_status_t)NULL,
+ .link_reset = (link_reset_t)NULL,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_serdes = {
+ .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_2500baseX_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_xgxs_config_init,
+ .read_status = (read_status_t)bnx2x_link_settings_status,
+ .link_reset = (link_reset_t)bnx2x_int_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_xgxs = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_2500baseX_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_CX4,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_xgxs_config_init,
+ .read_status = (read_status_t)bnx2x_link_settings_status,
+ .link_reset = (link_reset_t)bnx2x_int_link_reset,
+ .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+static struct bnx2x_phy phy_warpcore = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_HW_LOCK_REQUIRED,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_20000baseMLD2_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_UNSPECIFIED,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ /* req_duplex = */0,
+ /* rsrv = */0,
+ .config_init = (config_init_t)bnx2x_warpcore_config_init,
+ .read_status = (read_status_t)bnx2x_warpcore_read_status,
+ .link_reset = (link_reset_t)bnx2x_warpcore_link_reset,
+ .config_loopback = (config_loopback_t)bnx2x_set_warpcore_loopback,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)bnx2x_warpcore_hw_reset,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+
+static struct bnx2x_phy phy_7101 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_7101_config_init,
+ .read_status = (read_status_t)bnx2x_7101_read_status,
+ .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
+ .config_loopback = (config_loopback_t)bnx2x_7101_config_loopback,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_7101_format_ver,
+ .hw_reset = (hw_reset_t)bnx2x_7101_hw_reset,
+ .set_link_led = (set_link_led_t)bnx2x_7101_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+static struct bnx2x_phy phy_8073 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_HW_LOCK_REQUIRED,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_2500baseX_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_KR,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8073_config_init,
+ .read_status = (read_status_t)bnx2x_8073_read_status,
+ .link_reset = (link_reset_t)bnx2x_8073_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+static struct bnx2x_phy phy_8705 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_INIT_XGXS_FIRST,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_XFP_FIBER,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8705_config_init,
+ .read_status = (read_status_t)bnx2x_8705_read_status,
+ .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_null_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+static struct bnx2x_phy phy_8706 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_INIT_XGXS_FIRST,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_SFP_FIBER,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8706_config_init,
+ .read_status = (read_status_t)bnx2x_8706_read_status,
+ .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_8726 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = (FLAGS_HW_LOCK_REQUIRED |
+ FLAGS_INIT_XGXS_FIRST),
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_NOT_PRESENT,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8726_config_init,
+ .read_status = (read_status_t)bnx2x_8726_read_status,
+ .link_reset = (link_reset_t)bnx2x_8726_link_reset,
+ .config_loopback = (config_loopback_t)bnx2x_8726_config_loopback,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_8727 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_NOT_PRESENT,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8727_config_init,
+ .read_status = (read_status_t)bnx2x_8727_read_status,
+ .link_reset = (link_reset_t)bnx2x_8727_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
+ .hw_reset = (hw_reset_t)bnx2x_8727_hw_reset,
+ .set_link_led = (set_link_led_t)bnx2x_8727_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func
+};
+static struct bnx2x_phy phy_8481 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8481_config_init,
+ .read_status = (read_status_t)bnx2x_848xx_read_status,
+ .link_reset = (link_reset_t)bnx2x_8481_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
+ .hw_reset = (hw_reset_t)bnx2x_8481_hw_reset,
+ .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_84823 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_848x3_config_init,
+ .read_status = (read_status_t)bnx2x_848xx_read_status,
+ .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_84833 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_848x3_config_init,
+ .read_status = (read_status_t)bnx2x_848xx_read_status,
+ .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
+ .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
+ .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_54618se = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_INIT_XGXS_FIRST,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ /* req_duplex = */0,
+ /* rsrv = */0,
+ .config_init = (config_init_t)bnx2x_54618se_config_init,
+ .read_status = (read_status_t)bnx2x_54618se_read_status,
+ .link_reset = (link_reset_t)bnx2x_54618se_link_reset,
+ .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)bnx2x_54618se_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+/*****************************************************************/
+/* */
+/* Populate the phy according. Main function: bnx2x_populate_phy */
+/* */
+/*****************************************************************/
+
+static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
+ struct bnx2x_phy *phy, u8 port,
+ u8 phy_index)
+{
+ /* Get the 4 lanes xgxs config rx and tx */
+ u32 rx = 0, tx = 0, i;
+ for (i = 0; i < 2; i++) {
+ /*
+ * INT_PHY and EXT_PHY1 share the same value location in the
+ * shmem. When num_phys is greater than 1, than this value
+ * applies only to EXT_PHY1
+ */
+ if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
+ rx = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
+
+ tx = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
+ } else {
+ rx = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+
+ tx = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+ }
+
+ phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
+ phy->rx_preemphasis[(i << 1) + 1] = (rx & 0xffff);
+
+ phy->tx_preemphasis[i << 1] = ((tx>>16) & 0xffff);
+ phy->tx_preemphasis[(i << 1) + 1] = (tx & 0xffff);
+ }
+}
+
+static u32 bnx2x_get_ext_phy_config(struct bnx2x *bp, u32 shmem_base,
+ u8 phy_index, u8 port)
+{
+ u32 ext_phy_config = 0;
+ switch (phy_index) {
+ case EXT_PHY1:
+ ext_phy_config = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].external_phy_config));
+ break;
+ case EXT_PHY2:
+ ext_phy_config = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].external_phy_config2));
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Invalid phy_index %d\n", phy_index);
+ return -EINVAL;
+ }
+
+ return ext_phy_config;
+}
+static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
+ struct bnx2x_phy *phy)
+{
+ u32 phy_addr;
+ u32 chip_id;
+ u32 switch_cfg = (REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_feature_config[port].link_config)) &
+ PORT_FEATURE_CONNECTED_SWITCH_MASK);
+ chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16;
+ DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id);
+ if (USES_WARPCORE(bp)) {
+ u32 serdes_net_if;
+ phy_addr = REG_RD(bp,
+ MISC_REG_WC0_CTRL_PHY_ADDR);
+ *phy = phy_warpcore;
+ if (REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR) == 0x3)
+ phy->flags |= FLAGS_4_PORT_MODE;
+ else
+ phy->flags &= ~FLAGS_4_PORT_MODE;
+ /* Check Dual mode */
+ serdes_net_if = (REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[port].default_cfg)) &
+ PORT_HW_CFG_NET_SERDES_IF_MASK);
+ /*
+ * Set the appropriate supported and flags indications per
+ * interface type of the chip
+ */
+ switch (serdes_net_if) {
+ case PORT_HW_CFG_NET_SERDES_IF_SGMII:
+ phy->supported &= (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ phy->media_type = ETH_PHY_BASE_T;
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_XFI:
+ phy->media_type = ETH_PHY_XFP_FIBER;
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_SFI:
+ phy->supported &= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ phy->media_type = ETH_PHY_SFP_FIBER;
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_KR:
+ phy->media_type = ETH_PHY_KR;
+ phy->supported &= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
+ phy->media_type = ETH_PHY_KR;
+ phy->flags |= FLAGS_WC_DUAL_MODE;
+ phy->supported &= (SUPPORTED_20000baseMLD2_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_KR2:
+ phy->media_type = ETH_PHY_KR;
+ phy->flags |= FLAGS_WC_DUAL_MODE;
+ phy->supported &= (SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Unknown WC interface type 0x%x\n",
+ serdes_net_if);
+ break;
+ }
+
+ /*
+ * Enable MDC/MDIO work-around for E3 A0 since free running MDC
+ * was not set as expected. For B0, ECO will be enabled so there
+ * won't be an issue there
+ */
+ if (CHIP_REV(bp) == CHIP_REV_Ax)
+ phy->flags |= FLAGS_MDC_MDIO_WA;
+ else
+ phy->flags |= FLAGS_MDC_MDIO_WA_B0;
+ } else {
+ switch (switch_cfg) {
+ case SWITCH_CFG_1G:
+ phy_addr = REG_RD(bp,
+ NIG_REG_SERDES0_CTRL_PHY_ADDR +
+ port * 0x10);
+ *phy = phy_serdes;
+ break;
+ case SWITCH_CFG_10G:
+ phy_addr = REG_RD(bp,
+ NIG_REG_XGXS0_CTRL_PHY_ADDR +
+ port * 0x18);
+ *phy = phy_xgxs;
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
+ return -EINVAL;
+ }
+ }
+ phy->addr = (u8)phy_addr;
+ phy->mdio_ctrl = bnx2x_get_emac_base(bp,
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH,
+ port);
+ if (CHIP_IS_E2(bp))
+ phy->def_md_devad = E2_DEFAULT_PHY_DEV_ADDR;
+ else
+ phy->def_md_devad = DEFAULT_PHY_DEV_ADDR;
+
+ DP(NETIF_MSG_LINK, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x\n",
+ port, phy->addr, phy->mdio_ctrl);
+
+ bnx2x_populate_preemphasis(bp, shmem_base, phy, port, INT_PHY);
+ return 0;
+}
+
+static int bnx2x_populate_ext_phy(struct bnx2x *bp,
+ u8 phy_index,
+ u32 shmem_base,
+ u32 shmem2_base,
+ u8 port,
+ struct bnx2x_phy *phy)
+{
+ u32 ext_phy_config, phy_type, config2;
+ u32 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH;
+ ext_phy_config = bnx2x_get_ext_phy_config(bp, shmem_base,
+ phy_index, port);
+ phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
+ /* Select the phy type */
+ switch (phy_type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED;
+ *phy = phy_8073;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+ *phy = phy_8705;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
+ *phy = phy_8706;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+ *phy = phy_8726;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
+ /* BCM8727_NOC => BCM8727 no over current */
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+ *phy = phy_8727;
+ phy->flags |= FLAGS_NOC;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+ *phy = phy_8727;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
+ *phy = phy_8481;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
+ *phy = phy_84823;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+ *phy = phy_84833;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
+ *phy = phy_54618se;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+ *phy = phy_7101;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
+ *phy = phy_null;
+ return -EINVAL;
+ default:
+ *phy = phy_null;
+ return 0;
+ }
+
+ phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
+ bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
+
+ /*
+ * The shmem address of the phy version is located on different
+ * structures. In case this structure is too old, do not set
+ * the address
+ */
+ config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
+ dev_info.shared_hw_config.config2));
+ if (phy_index == EXT_PHY1) {
+ phy->ver_addr = shmem_base + offsetof(struct shmem_region,
+ port_mb[port].ext_phy_fw_version);
+
+ /* Check specific mdc mdio settings */
+ if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
+ mdc_mdio_access = config2 &
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
+ } else {
+ u32 size = REG_RD(bp, shmem2_base);
+
+ if (size >
+ offsetof(struct shmem2_region, ext_phy_fw_version2)) {
+ phy->ver_addr = shmem2_base +
+ offsetof(struct shmem2_region,
+ ext_phy_fw_version2[port]);
+ }
+ /* Check specific mdc mdio settings */
+ if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK)
+ mdc_mdio_access = (config2 &
+ SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) >>
+ (SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT -
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT);
+ }
+ phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
+
+ /*
+ * In case mdc/mdio_access of the external phy is different than the
+ * mdc/mdio access of the XGXS, a HW lock must be taken in each access
+ * to prevent one port interfere with another port's CL45 operations.
+ */
+ if (mdc_mdio_access != SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH)
+ phy->flags |= FLAGS_HW_LOCK_REQUIRED;
+ DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n",
+ phy_type, port, phy_index);
+ DP(NETIF_MSG_LINK, " addr=0x%x, mdio_ctl=0x%x\n",
+ phy->addr, phy->mdio_ctrl);
+ return 0;
+}
+
+static int bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base,
+ u32 shmem2_base, u8 port, struct bnx2x_phy *phy)
+{
+ int status = 0;
+ phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN;
+ if (phy_index == INT_PHY)
+ return bnx2x_populate_int_phy(bp, shmem_base, port, phy);
+ status = bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base,
+ port, phy);
+ return status;
+}
+
+static void bnx2x_phy_def_cfg(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 phy_index)
+{
+ struct bnx2x *bp = params->bp;
+ u32 link_config;
+ /* Populate the default phy configuration for MF mode */
+ if (phy_index == EXT_PHY2) {
+ link_config = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].link_config2));
+ phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.
+ port_hw_config[params->port].speed_capability_mask2));
+ } else {
+ link_config = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].link_config));
+ phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.
+ port_hw_config[params->port].speed_capability_mask));
+ }
+ DP(NETIF_MSG_LINK,
+ "Default config phy idx %x cfg 0x%x speed_cap_mask 0x%x\n",
+ phy_index, link_config, phy->speed_cap_mask);
+
+ phy->req_duplex = DUPLEX_FULL;
+ switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
+ case PORT_FEATURE_LINK_SPEED_10M_HALF:
+ phy->req_duplex = DUPLEX_HALF;
+ case PORT_FEATURE_LINK_SPEED_10M_FULL:
+ phy->req_line_speed = SPEED_10;
+ break;
+ case PORT_FEATURE_LINK_SPEED_100M_HALF:
+ phy->req_duplex = DUPLEX_HALF;
+ case PORT_FEATURE_LINK_SPEED_100M_FULL:
+ phy->req_line_speed = SPEED_100;
+ break;
+ case PORT_FEATURE_LINK_SPEED_1G:
+ phy->req_line_speed = SPEED_1000;
+ break;
+ case PORT_FEATURE_LINK_SPEED_2_5G:
+ phy->req_line_speed = SPEED_2500;
+ break;
+ case PORT_FEATURE_LINK_SPEED_10G_CX4:
+ phy->req_line_speed = SPEED_10000;
+ break;
+ default:
+ phy->req_line_speed = SPEED_AUTO_NEG;
+ break;
+ }
+
+ switch (link_config & PORT_FEATURE_FLOW_CONTROL_MASK) {
+ case PORT_FEATURE_FLOW_CONTROL_AUTO:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
+ break;
+ case PORT_FEATURE_FLOW_CONTROL_TX:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_TX;
+ break;
+ case PORT_FEATURE_FLOW_CONTROL_RX:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_RX;
+ break;
+ case PORT_FEATURE_FLOW_CONTROL_BOTH:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
+ break;
+ default:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ break;
+ }
+}
+
+u32 bnx2x_phy_selection(struct link_params *params)
+{
+ u32 phy_config_swapped, prio_cfg;
+ u32 return_cfg = PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT;
+
+ phy_config_swapped = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+
+ prio_cfg = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SELECTION_MASK;
+
+ if (phy_config_swapped) {
+ switch (prio_cfg) {
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ break;
+ }
+ } else
+ return_cfg = prio_cfg;
+
+ return return_cfg;
+}
+
+
+int bnx2x_phy_probe(struct link_params *params)
+{
+ u8 phy_index, actual_phy_idx, link_cfg_idx;
+ u32 phy_config_swapped, sync_offset, media_types;
+ struct bnx2x *bp = params->bp;
+ struct bnx2x_phy *phy;
+ params->num_phys = 0;
+ DP(NETIF_MSG_LINK, "Begin phy probe\n");
+ phy_config_swapped = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+
+ for (phy_index = INT_PHY; phy_index < MAX_PHYS;
+ phy_index++) {
+ link_cfg_idx = LINK_CONFIG_IDX(phy_index);
+ actual_phy_idx = phy_index;
+ if (phy_config_swapped) {
+ if (phy_index == EXT_PHY1)
+ actual_phy_idx = EXT_PHY2;
+ else if (phy_index == EXT_PHY2)
+ actual_phy_idx = EXT_PHY1;
+ }
+ DP(NETIF_MSG_LINK, "phy_config_swapped %x, phy_index %x,"
+ " actual_phy_idx %x\n", phy_config_swapped,
+ phy_index, actual_phy_idx);
+ phy = &params->phy[actual_phy_idx];
+ if (bnx2x_populate_phy(bp, phy_index, params->shmem_base,
+ params->shmem2_base, params->port,
+ phy) != 0) {
+ params->num_phys = 0;
+ DP(NETIF_MSG_LINK, "phy probe failed in phy index %d\n",
+ phy_index);
+ for (phy_index = INT_PHY;
+ phy_index < MAX_PHYS;
+ phy_index++)
+ *phy = phy_null;
+ return -EINVAL;
+ }
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)
+ break;
+
+ sync_offset = params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].media_type);
+ media_types = REG_RD(bp, sync_offset);
+
+ /*
+ * Update media type for non-PMF sync only for the first time
+ * In case the media type changes afterwards, it will be updated
+ * using the update_status function
+ */
+ if ((media_types & (PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
+ actual_phy_idx))) == 0) {
+ media_types |= ((phy->media_type &
+ PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
+ actual_phy_idx));
+ }
+ REG_WR(bp, sync_offset, media_types);
+
+ bnx2x_phy_def_cfg(params, phy, phy_index);
+ params->num_phys++;
+ }
+
+ DP(NETIF_MSG_LINK, "End phy probe. #phys found %x\n", params->num_phys);
+ return 0;
+}
+
+void bnx2x_init_bmac_loopback(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ vars->link_up = 1;
+ vars->line_speed = SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_BMAC;
+
+ vars->phy_flags = PHY_XGXS_FLAG;
+
+ bnx2x_xgxs_deassert(params);
+
+ /* set bmac loopback */
+ bnx2x_bmac_enable(params, vars, 1);
+
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+void bnx2x_init_emac_loopback(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ vars->link_up = 1;
+ vars->line_speed = SPEED_1000;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_EMAC;
+
+ vars->phy_flags = PHY_XGXS_FLAG;
+
+ bnx2x_xgxs_deassert(params);
+ /* set bmac loopback */
+ bnx2x_emac_enable(params, vars, 1);
+ bnx2x_emac_program(params, vars);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+void bnx2x_init_xmac_loopback(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ vars->link_up = 1;
+ if (!params->req_line_speed[0])
+ vars->line_speed = SPEED_10000;
+ else
+ vars->line_speed = params->req_line_speed[0];
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_XMAC;
+ vars->phy_flags = PHY_XGXS_FLAG;
+ /*
+ * Set WC to loopback mode since link is required to provide clock
+ * to the XMAC in 20G mode
+ */
+ bnx2x_set_aer_mmd(params, &params->phy[0]);
+ bnx2x_warpcore_reset_lane(bp, &params->phy[0], 0);
+ params->phy[INT_PHY].config_loopback(
+ &params->phy[INT_PHY],
+ params);
+
+ bnx2x_xmac_enable(params, vars, 1);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+void bnx2x_init_umac_loopback(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ vars->link_up = 1;
+ vars->line_speed = SPEED_1000;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_UMAC;
+ vars->phy_flags = PHY_XGXS_FLAG;
+ bnx2x_umac_enable(params, vars, 1);
+
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+void bnx2x_init_xgxs_loopback(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ vars->link_up = 1;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->duplex = DUPLEX_FULL;
+ if (params->req_line_speed[0] == SPEED_1000)
+ vars->line_speed = SPEED_1000;
+ else
+ vars->line_speed = SPEED_10000;
+
+ if (!USES_WARPCORE(bp))
+ bnx2x_xgxs_deassert(params);
+ bnx2x_link_initialize(params, vars);
+
+ if (params->req_line_speed[0] == SPEED_1000) {
+ if (USES_WARPCORE(bp))
+ bnx2x_umac_enable(params, vars, 0);
+ else {
+ bnx2x_emac_program(params, vars);
+ bnx2x_emac_enable(params, vars, 0);
+ }
+ } else {
+ if (USES_WARPCORE(bp))
+ bnx2x_xmac_enable(params, vars, 0);
+ else
+ bnx2x_bmac_enable(params, vars, 0);
+ }
+
+ if (params->loopback_mode == LOOPBACK_XGXS) {
+ /* set 10G XGXS loopback */
+ params->phy[INT_PHY].config_loopback(
+ &params->phy[INT_PHY],
+ params);
+
+ } else {
+ /* set external phy loopback */
+ u8 phy_index;
+ for (phy_index = EXT_PHY1;
+ phy_index < params->num_phys; phy_index++) {
+ if (params->phy[phy_index].config_loopback)
+ params->phy[phy_index].config_loopback(
+ &params->phy[phy_index],
+ params);
+ }
+ }
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+
+ bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
+}
+
+int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Phy Initialization started\n");
+ DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n",
+ params->req_line_speed[0], params->req_flow_ctrl[0]);
+ DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n",
+ params->req_line_speed[1], params->req_flow_ctrl[1]);
+ vars->link_status = 0;
+ vars->phy_link_up = 0;
+ vars->link_up = 0;
+ vars->line_speed = 0;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_NONE;
+ vars->phy_flags = 0;
+
+ /* disable attentions */
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+
+ bnx2x_emac_init(params, vars);
+
+ if (params->num_phys == 0) {
+ DP(NETIF_MSG_LINK, "No phy found for initialization !!\n");
+ return -EINVAL;
+ }
+ set_phy_vars(params, vars);
+
+ DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
+ switch (params->loopback_mode) {
+ case LOOPBACK_BMAC:
+ bnx2x_init_bmac_loopback(params, vars);
+ break;
+ case LOOPBACK_EMAC:
+ bnx2x_init_emac_loopback(params, vars);
+ break;
+ case LOOPBACK_XMAC:
+ bnx2x_init_xmac_loopback(params, vars);
+ break;
+ case LOOPBACK_UMAC:
+ bnx2x_init_umac_loopback(params, vars);
+ break;
+ case LOOPBACK_XGXS:
+ case LOOPBACK_EXT_PHY:
+ bnx2x_init_xgxs_loopback(params, vars);
+ break;
+ default:
+ if (!CHIP_IS_E3(bp)) {
+ if (params->switch_cfg == SWITCH_CFG_10G)
+ bnx2x_xgxs_deassert(params);
+ else
+ bnx2x_serdes_deassert(bp, params->port);
+ }
+ bnx2x_link_initialize(params, vars);
+ msleep(30);
+ bnx2x_link_int_enable(params);
+ break;
+ }
+ return 0;
+}
+
+int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
+ u8 reset_ext_phy)
+{
+ struct bnx2x *bp = params->bp;
+ u8 phy_index, port = params->port, clear_latch_ind = 0;
+ DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
+ /* disable attentions */
+ vars->link_status = 0;
+ bnx2x_update_mng(params, vars->link_status);
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+
+ /* activate nig drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+
+ /* disable nig egress interface */
+ if (!CHIP_IS_E3(bp)) {
+ REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
+ }
+
+ /* Stop BigMac rx */
+ if (!CHIP_IS_E3(bp))
+ bnx2x_bmac_rx_disable(bp, port);
+ else
+ bnx2x_xmac_disable(params);
+ /* disable emac */
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+
+ msleep(10);
+ /* The PHY reset is controlled by GPIO 1
+ * Hold it as vars low
+ */
+ /* clear link led */
+ bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
+
+ if (reset_ext_phy) {
+ bnx2x_set_mdio_clk(bp, params->chip_id, port);
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ if (params->phy[phy_index].link_reset) {
+ bnx2x_set_aer_mmd(params,
+ &params->phy[phy_index]);
+ params->phy[phy_index].link_reset(
+ &params->phy[phy_index],
+ params);
+ }
+ if (params->phy[phy_index].flags &
+ FLAGS_REARM_LATCH_SIGNAL)
+ clear_latch_ind = 1;
+ }
+ }
+
+ if (clear_latch_ind) {
+ /* Clear latching indication */
+ bnx2x_rearm_latch_signal(bp, port, 0);
+ bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4,
+ 1 << NIG_LATCH_BC_ENABLE_MI_INT);
+ }
+ if (params->phy[INT_PHY].link_reset)
+ params->phy[INT_PHY].link_reset(
+ &params->phy[INT_PHY], params);
+ /* reset BigMac */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
+ /* disable nig ingress interface */
+ if (!CHIP_IS_E3(bp)) {
+ REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
+ REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0);
+ }
+ vars->link_up = 0;
+ vars->phy_flags = 0;
+ return 0;
+}
+
+/****************************************************************************/
+/* Common function */
+/****************************************************************************/
+static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 shmem2_base_path[], u8 phy_index,
+ u32 chip_id)
+{
+ struct bnx2x_phy phy[PORT_MAX];
+ struct bnx2x_phy *phy_blk[PORT_MAX];
+ u16 val;
+ s8 port = 0;
+ s8 port_of_path = 0;
+ u32 swap_val, swap_override;
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ port ^= (swap_val && swap_override);
+ bnx2x_ext_phy_hw_reset(bp, port);
+ /* PART1 - Reset both phys */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ u32 shmem_base, shmem2_base;
+ /* In E2, same phy is using for port0 of the two paths */
+ if (CHIP_IS_E1x(bp)) {
+ shmem_base = shmem_base_path[0];
+ shmem2_base = shmem2_base_path[0];
+ port_of_path = port;
+ } else {
+ shmem_base = shmem_base_path[port];
+ shmem2_base = shmem2_base_path[port];
+ port_of_path = 0;
+ }
+
+ /* Extract the ext phy address for the port */
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ port_of_path, &phy[port]) !=
+ 0) {
+ DP(NETIF_MSG_LINK, "populate_phy failed\n");
+ return -EINVAL;
+ }
+ /* disable attentions */
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ port_of_path*4,
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+
+ /* Need to take the phy out of low power mode in order
+ to write to access its registers */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
+
+ /* Reset the phy */
+ bnx2x_cl45_write(bp, &phy[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL,
+ 1<<15);
+ }
+
+ /* Add delay of 150ms after reset */
+ msleep(150);
+
+ if (phy[PORT_0].addr & 0x1) {
+ phy_blk[PORT_0] = &(phy[PORT_1]);
+ phy_blk[PORT_1] = &(phy[PORT_0]);
+ } else {
+ phy_blk[PORT_0] = &(phy[PORT_0]);
+ phy_blk[PORT_1] = &(phy[PORT_1]);
+ }
+
+ /* PART2 - Download firmware to both phys */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ if (CHIP_IS_E1x(bp))
+ port_of_path = port;
+ else
+ port_of_path = 0;
+
+ DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
+ phy_blk[port]->addr);
+ if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+ port_of_path))
+ return -EINVAL;
+
+ /* Only set bit 10 = 1 (Tx power down) */
+ bnx2x_cl45_read(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, &val);
+
+ /* Phase1 of TX_POWER_DOWN reset */
+ bnx2x_cl45_write(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN,
+ (val | 1<<10));
+ }
+
+ /*
+ * Toggle Transmitter: Power down and then up with 600ms delay
+ * between
+ */
+ msleep(600);
+
+ /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ /* Phase2 of POWER_DOWN_RESET */
+ /* Release bit 10 (Release Tx power down) */
+ bnx2x_cl45_read(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, &val);
+
+ bnx2x_cl45_write(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
+ msleep(15);
+
+ /* Read modify write the SPI-ROM version select register */
+ bnx2x_cl45_read(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_EDC_FFE_MAIN, &val);
+ bnx2x_cl45_write(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
+
+ /* set GPIO2 back to LOW */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ }
+ return 0;
+}
+static int bnx2x_8726_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 shmem2_base_path[], u8 phy_index,
+ u32 chip_id)
+{
+ u32 val;
+ s8 port;
+ struct bnx2x_phy phy;
+ /* Use port1 because of the static port-swap */
+ /* Enable the module detection interrupt */
+ val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
+ val |= ((1<<MISC_REGISTERS_GPIO_3)|
+ (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
+ REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
+
+ bnx2x_ext_phy_hw_reset(bp, 0);
+ msleep(5);
+ for (port = 0; port < PORT_MAX; port++) {
+ u32 shmem_base, shmem2_base;
+
+ /* In E2, same phy is using for port0 of the two paths */
+ if (CHIP_IS_E1x(bp)) {
+ shmem_base = shmem_base_path[0];
+ shmem2_base = shmem2_base_path[0];
+ } else {
+ shmem_base = shmem_base_path[port];
+ shmem2_base = shmem2_base_path[port];
+ }
+ /* Extract the ext phy address for the port */
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ port, &phy) !=
+ 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return -EINVAL;
+ }
+
+ /* Reset phy*/
+ bnx2x_cl45_write(bp, &phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001);
+
+
+ /* Set fault module detected LED on */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+ MISC_REGISTERS_GPIO_HIGH,
+ port);
+ }
+
+ return 0;
+}
+static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base,
+ u8 *io_gpio, u8 *io_port)
+{
+
+ u32 phy_gpio_reset = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[PORT_0].default_cfg));
+ switch (phy_gpio_reset) {
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0:
+ *io_gpio = 0;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0:
+ *io_gpio = 1;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0:
+ *io_gpio = 2;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0:
+ *io_gpio = 3;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1:
+ *io_gpio = 0;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1:
+ *io_gpio = 1;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1:
+ *io_gpio = 2;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1:
+ *io_gpio = 3;
+ *io_port = 1;
+ break;
+ default:
+ /* Don't override the io_gpio and io_port */
+ break;
+ }
+}
+
+static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 shmem2_base_path[], u8 phy_index,
+ u32 chip_id)
+{
+ s8 port, reset_gpio;
+ u32 swap_val, swap_override;
+ struct bnx2x_phy phy[PORT_MAX];
+ struct bnx2x_phy *phy_blk[PORT_MAX];
+ s8 port_of_path;
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+
+ reset_gpio = MISC_REGISTERS_GPIO_1;
+ port = 1;
+
+ /*
+ * Retrieve the reset gpio/port which control the reset.
+ * Default is GPIO1, PORT1
+ */
+ bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
+ (u8 *)&reset_gpio, (u8 *)&port);
+
+ /* Calculate the port based on port swap */
+ port ^= (swap_val && swap_override);
+
+ /* Initiate PHY reset*/
+ bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
+ msleep(1);
+ bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
+
+ msleep(5);
+
+ /* PART1 - Reset both phys */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ u32 shmem_base, shmem2_base;
+
+ /* In E2, same phy is using for port0 of the two paths */
+ if (CHIP_IS_E1x(bp)) {
+ shmem_base = shmem_base_path[0];
+ shmem2_base = shmem2_base_path[0];
+ port_of_path = port;
+ } else {
+ shmem_base = shmem_base_path[port];
+ shmem2_base = shmem2_base_path[port];
+ port_of_path = 0;
+ }
+
+ /* Extract the ext phy address for the port */
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ port_of_path, &phy[port]) !=
+ 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return -EINVAL;
+ }
+ /* disable attentions */
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ port_of_path*4,
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+
+
+ /* Reset the phy */
+ bnx2x_cl45_write(bp, &phy[port],
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
+ }
+
+ /* Add delay of 150ms after reset */
+ msleep(150);
+ if (phy[PORT_0].addr & 0x1) {
+ phy_blk[PORT_0] = &(phy[PORT_1]);
+ phy_blk[PORT_1] = &(phy[PORT_0]);
+ } else {
+ phy_blk[PORT_0] = &(phy[PORT_0]);
+ phy_blk[PORT_1] = &(phy[PORT_1]);
+ }
+ /* PART2 - Download firmware to both phys */
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ if (CHIP_IS_E1x(bp))
+ port_of_path = port;
+ else
+ port_of_path = 0;
+ DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
+ phy_blk[port]->addr);
+ if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+ port_of_path))
+ return -EINVAL;
+ /* Disable PHY transmitter output */
+ bnx2x_cl45_write(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_DISABLE, 1);
+
+ }
+ return 0;
+}
+
+static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
+ u32 shmem2_base_path[], u8 phy_index,
+ u32 ext_phy_type, u32 chip_id)
+{
+ int rc = 0;
+
+ switch (ext_phy_type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+ rc = bnx2x_8073_common_init_phy(bp, shmem_base_path,
+ shmem2_base_path,
+ phy_index, chip_id);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
+ rc = bnx2x_8727_common_init_phy(bp, shmem_base_path,
+ shmem2_base_path,
+ phy_index, chip_id);
+ break;
+
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+ /*
+ * GPIO1 affects both ports, so there's need to pull
+ * it for single port alone
+ */
+ rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
+ shmem2_base_path,
+ phy_index, chip_id);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+ /*
+ * GPIO3's are linked, and so both need to be toggled
+ * to obtain required 2us pulse.
+ */
+ rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, chip_id);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
+ rc = -EINVAL;
+ break;
+ default:
+ DP(NETIF_MSG_LINK,
+ "ext_phy 0x%x common init not required\n",
+ ext_phy_type);
+ break;
+ }
+
+ if (rc != 0)
+ netdev_err(bp->dev, "Warning: PHY was not initialized,"
+ " Port %d\n",
+ 0);
+ return rc;
+}
+
+int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
+ u32 shmem2_base_path[], u32 chip_id)
+{
+ int rc = 0;
+ u32 phy_ver, val;
+ u8 phy_index = 0;
+ u32 ext_phy_type, ext_phy_config;
+ bnx2x_set_mdio_clk(bp, chip_id, PORT_0);
+ bnx2x_set_mdio_clk(bp, chip_id, PORT_1);
+ DP(NETIF_MSG_LINK, "Begin common phy init\n");
+ if (CHIP_IS_E3(bp)) {
+ /* Enable EPIO */
+ val = REG_RD(bp, MISC_REG_GEN_PURP_HWG);
+ REG_WR(bp, MISC_REG_GEN_PURP_HWG, val | 1);
+ }
+ /* Check if common init was already done */
+ phy_ver = REG_RD(bp, shmem_base_path[0] +
+ offsetof(struct shmem_region,
+ port_mb[PORT_0].ext_phy_fw_version));
+ if (phy_ver) {
+ DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n",
+ phy_ver);
+ return 0;
+ }
+
+ /* Read the ext_phy_type for arbitrary port(0) */
+ for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+ phy_index++) {
+ ext_phy_config = bnx2x_get_ext_phy_config(bp,
+ shmem_base_path[0],
+ phy_index, 0);
+ ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
+ rc |= bnx2x_ext_phy_common_init(bp, shmem_base_path,
+ shmem2_base_path,
+ phy_index, ext_phy_type,
+ chip_id);
+ }
+ return rc;
+}
+
+static void bnx2x_check_over_curr(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u32 cfg_pin;
+ u8 port = params->port;
+ u32 pin_val;
+
+ cfg_pin = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg1)) &
+ PORT_HW_CFG_E3_OVER_CURRENT_MASK) >>
+ PORT_HW_CFG_E3_OVER_CURRENT_SHIFT;
+
+ /* Ignore check if no external input PIN available */
+ if (bnx2x_get_cfg_pin(bp, cfg_pin, &pin_val) != 0)
+ return;
+
+ if (!pin_val) {
+ if ((vars->phy_flags & PHY_OVER_CURRENT_FLAG) == 0) {
+ netdev_err(bp->dev, "Error: Power fault on Port %d has"
+ " been detected and the power to "
+ "that SFP+ module has been removed"
+ " to prevent failure of the card."
+ " Please remove the SFP+ module and"
+ " restart the system to clear this"
+ " error.\n",
+ params->port);
+ vars->phy_flags |= PHY_OVER_CURRENT_FLAG;
+ }
+ } else
+ vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
+}
+
+static void bnx2x_analyze_link_error(struct link_params *params,
+ struct link_vars *vars, u32 lss_status)
+{
+ struct bnx2x *bp = params->bp;
+ /* Compare new value with previous value */
+ u8 led_mode;
+ u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0;
+
+ if ((lss_status ^ half_open_conn) == 0)
+ return;
+
+ /* If values differ */
+ DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up,
+ half_open_conn, lss_status);
+
+ /*
+ * a. Update shmem->link_status accordingly
+ * b. Update link_vars->link_up
+ */
+ if (lss_status) {
+ DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n");
+ vars->link_status &= ~LINK_STATUS_LINK_UP;
+ vars->link_up = 0;
+ vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ /*
+ * Set LED mode to off since the PHY doesn't know about these
+ * errors
+ */
+ led_mode = LED_MODE_OFF;
+ } else {
+ DP(NETIF_MSG_LINK, "Remote Fault cleared\n");
+ vars->link_status |= LINK_STATUS_LINK_UP;
+ vars->link_up = 1;
+ vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
+ led_mode = LED_MODE_OPER;
+ }
+ /* Update the LED according to the link state */
+ bnx2x_set_led(params, vars, led_mode, SPEED_10000);
+
+ /* Update link status in the shared memory */
+ bnx2x_update_mng(params, vars->link_status);
+
+ /* C. Trigger General Attention */
+ vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
+ bnx2x_notify_link_changed(bp);
+}
+
+/******************************************************************************
+* Description:
+* This function checks for half opened connection change indication.
+* When such change occurs, it calls the bnx2x_analyze_link_error
+* to check if Remote Fault is set or cleared. Reception of remote fault
+* status message in the MAC indicates that the peer's MAC has detected
+* a fault, for example, due to break in the TX side of fiber.
+*
+******************************************************************************/
+static void bnx2x_check_half_open_conn(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u32 lss_status = 0;
+ u32 mac_base;
+ /* In case link status is physically up @ 10G do */
+ if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
+ return;
+
+ if (CHIP_IS_E3(bp) &&
+ (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ (MISC_REGISTERS_RESET_REG_2_XMAC))) {
+ /* Check E3 XMAC */
+ /*
+ * Note that link speed cannot be queried here, since it may be
+ * zero while link is down. In case UMAC is active, LSS will
+ * simply not be set
+ */
+ mac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+ /* Clear stick bits (Requires rising edge) */
+ REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0);
+ REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS,
+ XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS |
+ XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS);
+ if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
+ lss_status = 1;
+
+ bnx2x_analyze_link_error(params, vars, lss_status);
+ } else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
+ /* Check E1X / E2 BMAC */
+ u32 lss_status_reg;
+ u32 wb_data[2];
+ mac_base = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ /* Read BIGMAC_REGISTER_RX_LSS_STATUS */
+ if (CHIP_IS_E2(bp))
+ lss_status_reg = BIGMAC2_REGISTER_RX_LSS_STAT;
+ else
+ lss_status_reg = BIGMAC_REGISTER_RX_LSS_STATUS;
+
+ REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
+ lss_status = (wb_data[0] > 0);
+
+ bnx2x_analyze_link_error(params, vars, lss_status);
+ }
+}
+
+void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 phy_idx;
+ if (!params) {
+ DP(NETIF_MSG_LINK, "Uninitialized params !\n");
+ return;
+ }
+
+ for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
+ if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
+ bnx2x_set_aer_mmd(params, &params->phy[phy_idx]);
+ bnx2x_check_half_open_conn(params, vars);
+ break;
+ }
+ }
+
+ if (CHIP_IS_E3(bp))
+ bnx2x_check_over_curr(params, vars);
+}
+
+u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base)
+{
+ u8 phy_index;
+ struct bnx2x_phy phy;
+ for (phy_index = INT_PHY; phy_index < MAX_PHYS;
+ phy_index++) {
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ 0, &phy) != 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return 0;
+ }
+
+ if (phy.flags & FLAGS_HW_LOCK_REQUIRED)
+ return 1;
+ }
+ return 0;
+}
+
+u8 bnx2x_fan_failure_det_req(struct bnx2x *bp,
+ u32 shmem_base,
+ u32 shmem2_base,
+ u8 port)
+{
+ u8 phy_index, fan_failure_det_req = 0;
+ struct bnx2x_phy phy;
+ for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+ phy_index++) {
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ port, &phy)
+ != 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return 0;
+ }
+ fan_failure_det_req |= (phy.flags &
+ FLAGS_FAN_FAILURE_DET_REQ);
+ }
+ return fan_failure_det_req;
+}
+
+void bnx2x_hw_reset_phy(struct link_params *params)
+{
+ u8 phy_index;
+ struct bnx2x *bp = params->bp;
+ bnx2x_update_mng(params, 0);
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+
+ for (phy_index = INT_PHY; phy_index < MAX_PHYS;
+ phy_index++) {
+ if (params->phy[phy_index].hw_reset) {
+ params->phy[phy_index].hw_reset(
+ &params->phy[phy_index],
+ params);
+ params->phy[phy_index] = phy_null;
+ }
+ }
+}
+
+void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
+ u32 chip_id, u32 shmem_base, u32 shmem2_base,
+ u8 port)
+{
+ u8 gpio_num = 0xff, gpio_port = 0xff, phy_index;
+ u32 val;
+ u32 offset, aeu_mask, swap_val, swap_override, sync_offset;
+ if (CHIP_IS_E3(bp)) {
+ if (bnx2x_get_mod_abs_int_cfg(bp, chip_id,
+ shmem_base,
+ port,
+ &gpio_num,
+ &gpio_port) != 0)
+ return;
+ } else {
+ struct bnx2x_phy phy;
+ for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+ phy_index++) {
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base,
+ shmem2_base, port, &phy)
+ != 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return;
+ }
+ if (phy.type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) {
+ gpio_num = MISC_REGISTERS_GPIO_3;
+ gpio_port = port;
+ break;
+ }
+ }
+ }
+
+ if (gpio_num == 0xff)
+ return;
+
+ /* Set GPIO3 to trigger SFP+ module insertion/removal */
+ bnx2x_set_gpio(bp, gpio_num, MISC_REGISTERS_GPIO_INPUT_HI_Z, gpio_port);
+
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ gpio_port ^= (swap_val && swap_override);
+
+ vars->aeu_int_mask = AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 <<
+ (gpio_num + (gpio_port << 2));
+
+ sync_offset = shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].aeu_int_mask);
+ REG_WR(bp, sync_offset, vars->aeu_int_mask);
+
+ DP(NETIF_MSG_LINK, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x\n",
+ gpio_num, gpio_port, vars->aeu_int_mask);
+
+ if (port == 0)
+ offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
+ else
+ offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
+
+ /* Open appropriate AEU for interrupts */
+ aeu_mask = REG_RD(bp, offset);
+ aeu_mask |= vars->aeu_int_mask;
+ REG_WR(bp, offset, aeu_mask);
+
+ /* Enable the GPIO to trigger interrupt */
+ val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
+ val |= 1 << (gpio_num + (gpio_port << 2));
+ REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
new file mode 100644
index 000000000000..c12db6da213e
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -0,0 +1,493 @@
+/* Copyright 2008-2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Written by Yaniv Rosner
+ *
+ */
+
+#ifndef BNX2X_LINK_H
+#define BNX2X_LINK_H
+
+
+
+/***********************************************************/
+/* Defines */
+/***********************************************************/
+#define DEFAULT_PHY_DEV_ADDR 3
+#define E2_DEFAULT_PHY_DEV_ADDR 5
+
+
+
+#define BNX2X_FLOW_CTRL_AUTO PORT_FEATURE_FLOW_CONTROL_AUTO
+#define BNX2X_FLOW_CTRL_TX PORT_FEATURE_FLOW_CONTROL_TX
+#define BNX2X_FLOW_CTRL_RX PORT_FEATURE_FLOW_CONTROL_RX
+#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
+#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
+
+#define NET_SERDES_IF_XFI 1
+#define NET_SERDES_IF_SFI 2
+#define NET_SERDES_IF_KR 3
+#define NET_SERDES_IF_DXGXS 4
+
+#define SPEED_AUTO_NEG 0
+#define SPEED_20000 20000
+
+#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
+#define SFP_EEPROM_VENDOR_NAME_SIZE 16
+#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
+#define SFP_EEPROM_VENDOR_OUI_SIZE 3
+#define SFP_EEPROM_PART_NO_ADDR 0x28
+#define SFP_EEPROM_PART_NO_SIZE 16
+#define SFP_EEPROM_REVISION_ADDR 0x38
+#define SFP_EEPROM_REVISION_SIZE 4
+#define SFP_EEPROM_SERIAL_ADDR 0x44
+#define SFP_EEPROM_SERIAL_SIZE 16
+#define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */
+#define SFP_EEPROM_DATE_SIZE 6
+#define PWR_FLT_ERR_MSG_LEN 250
+
+#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
+ ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
+#define XGXS_EXT_PHY_ADDR(ext_phy_config) \
+ (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \
+ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT)
+#define SERDES_EXT_PHY_TYPE(ext_phy_config) \
+ ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
+
+/* Single Media Direct board is the plain 577xx board with CX4/RJ45 jacks */
+#define SINGLE_MEDIA_DIRECT(params) (params->num_phys == 1)
+/* Single Media board contains single external phy */
+#define SINGLE_MEDIA(params) (params->num_phys == 2)
+/* Dual Media board contains two external phy with different media */
+#define DUAL_MEDIA(params) (params->num_phys == 3)
+
+#define FW_PARAM_PHY_ADDR_MASK 0x000000FF
+#define FW_PARAM_PHY_TYPE_MASK 0x0000FF00
+#define FW_PARAM_MDIO_CTRL_MASK 0xFFFF0000
+#define FW_PARAM_MDIO_CTRL_OFFSET 16
+#define FW_PARAM_PHY_ADDR(fw_param) (fw_param & \
+ FW_PARAM_PHY_ADDR_MASK)
+#define FW_PARAM_PHY_TYPE(fw_param) (fw_param & \
+ FW_PARAM_PHY_TYPE_MASK)
+#define FW_PARAM_MDIO_CTRL(fw_param) ((fw_param & \
+ FW_PARAM_MDIO_CTRL_MASK) >> \
+ FW_PARAM_MDIO_CTRL_OFFSET)
+#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
+ (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
+
+
+#define PFC_BRB_FULL_LB_XOFF_THRESHOLD 170
+#define PFC_BRB_FULL_LB_XON_THRESHOLD 250
+
+#define MAXVAL(a, b) (((a) > (b)) ? (a) : (b))
+/***********************************************************/
+/* Structs */
+/***********************************************************/
+#define INT_PHY 0
+#define EXT_PHY1 1
+#define EXT_PHY2 2
+#define MAX_PHYS 3
+
+/* Same configuration is shared between the XGXS and the first external phy */
+#define LINK_CONFIG_SIZE (MAX_PHYS - 1)
+#define LINK_CONFIG_IDX(_phy_idx) ((_phy_idx == INT_PHY) ? \
+ 0 : (_phy_idx - 1))
+/***********************************************************/
+/* bnx2x_phy struct */
+/* Defines the required arguments and function per phy */
+/***********************************************************/
+struct link_vars;
+struct link_params;
+struct bnx2x_phy;
+
+typedef u8 (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params,
+ struct link_vars *vars);
+typedef u8 (*read_status_t)(struct bnx2x_phy *phy, struct link_params *params,
+ struct link_vars *vars);
+typedef void (*link_reset_t)(struct bnx2x_phy *phy,
+ struct link_params *params);
+typedef void (*config_loopback_t)(struct bnx2x_phy *phy,
+ struct link_params *params);
+typedef u8 (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len);
+typedef void (*hw_reset_t)(struct bnx2x_phy *phy, struct link_params *params);
+typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode);
+typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy,
+ struct link_params *params, u32 action);
+
+struct bnx2x_phy {
+ u32 type;
+
+ /* Loaded during init */
+ u8 addr;
+ u8 def_md_devad;
+ u16 flags;
+ /* Require HW lock */
+#define FLAGS_HW_LOCK_REQUIRED (1<<0)
+ /* No Over-Current detection */
+#define FLAGS_NOC (1<<1)
+ /* Fan failure detection required */
+#define FLAGS_FAN_FAILURE_DET_REQ (1<<2)
+ /* Initialize first the XGXS and only then the phy itself */
+#define FLAGS_INIT_XGXS_FIRST (1<<3)
+#define FLAGS_WC_DUAL_MODE (1<<4)
+#define FLAGS_4_PORT_MODE (1<<5)
+#define FLAGS_REARM_LATCH_SIGNAL (1<<6)
+#define FLAGS_SFP_NOT_APPROVED (1<<7)
+#define FLAGS_MDC_MDIO_WA (1<<8)
+#define FLAGS_DUMMY_READ (1<<9)
+#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
+#define FLAGS_TX_ERROR_CHECK (1<<12)
+
+ /* preemphasis values for the rx side */
+ u16 rx_preemphasis[4];
+
+ /* preemphasis values for the tx side */
+ u16 tx_preemphasis[4];
+
+ /* EMAC address for access MDIO */
+ u32 mdio_ctrl;
+
+ u32 supported;
+
+ u32 media_type;
+#define ETH_PHY_UNSPECIFIED 0x0
+#define ETH_PHY_SFP_FIBER 0x1
+#define ETH_PHY_XFP_FIBER 0x2
+#define ETH_PHY_DA_TWINAX 0x3
+#define ETH_PHY_BASE_T 0x4
+#define ETH_PHY_KR 0xf0
+#define ETH_PHY_CX4 0xf1
+#define ETH_PHY_NOT_PRESENT 0xff
+
+ /* The address in which version is located*/
+ u32 ver_addr;
+
+ u16 req_flow_ctrl;
+
+ u16 req_line_speed;
+
+ u32 speed_cap_mask;
+
+ u16 req_duplex;
+ u16 rsrv;
+ /* Called per phy/port init, and it configures LASI, speed, autoneg,
+ duplex, flow control negotiation, etc. */
+ config_init_t config_init;
+
+ /* Called due to interrupt. It determines the link, speed */
+ read_status_t read_status;
+
+ /* Called when driver is unloading. Should reset the phy */
+ link_reset_t link_reset;
+
+ /* Set the loopback configuration for the phy */
+ config_loopback_t config_loopback;
+
+ /* Format the given raw number into str up to len */
+ format_fw_ver_t format_fw_ver;
+
+ /* Reset the phy (both ports) */
+ hw_reset_t hw_reset;
+
+ /* Set link led mode (on/off/oper)*/
+ set_link_led_t set_link_led;
+
+ /* PHY Specific tasks */
+ phy_specific_func_t phy_specific_func;
+#define DISABLE_TX 1
+#define ENABLE_TX 2
+};
+
+/* Inputs parameters to the CLC */
+struct link_params {
+
+ u8 port;
+
+ /* Default / User Configuration */
+ u8 loopback_mode;
+#define LOOPBACK_NONE 0
+#define LOOPBACK_EMAC 1
+#define LOOPBACK_BMAC 2
+#define LOOPBACK_XGXS 3
+#define LOOPBACK_EXT_PHY 4
+#define LOOPBACK_EXT 5
+#define LOOPBACK_UMAC 6
+#define LOOPBACK_XMAC 7
+
+ /* Device parameters */
+ u8 mac_addr[6];
+
+ u16 req_duplex[LINK_CONFIG_SIZE];
+ u16 req_flow_ctrl[LINK_CONFIG_SIZE];
+
+ u16 req_line_speed[LINK_CONFIG_SIZE]; /* Also determine AutoNeg */
+
+ /* shmem parameters */
+ u32 shmem_base;
+ u32 shmem2_base;
+ u32 speed_cap_mask[LINK_CONFIG_SIZE];
+ u32 switch_cfg;
+#define SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH
+#define SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH
+#define SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT
+
+ u32 lane_config;
+
+ /* Phy register parameter */
+ u32 chip_id;
+
+ /* features */
+ u32 feature_config_flags;
+#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
+#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
+#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
+#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
+#define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9)
+#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10)
+ /* Will be populated during common init */
+ struct bnx2x_phy phy[MAX_PHYS];
+
+ /* Will be populated during common init */
+ u8 num_phys;
+
+ u8 rsrv;
+ u16 hw_led_mode; /* part of the hw_config read from the shmem */
+ u32 multi_phy_config;
+
+ /* Device pointer passed to all callback functions */
+ struct bnx2x *bp;
+ u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
+ req_flow_ctrl is set to AUTO */
+};
+
+/* Output parameters */
+struct link_vars {
+ u8 phy_flags;
+#define PHY_XGXS_FLAG (1<<0)
+#define PHY_SGMII_FLAG (1<<1)
+#define PHY_PHYSICAL_LINK_FLAG (1<<2)
+#define PHY_HALF_OPEN_CONN_FLAG (1<<3)
+#define PHY_OVER_CURRENT_FLAG (1<<4)
+
+ u8 mac_type;
+#define MAC_TYPE_NONE 0
+#define MAC_TYPE_EMAC 1
+#define MAC_TYPE_BMAC 2
+#define MAC_TYPE_UMAC 3
+#define MAC_TYPE_XMAC 4
+
+ u8 phy_link_up; /* internal phy link indication */
+ u8 link_up;
+
+ u16 line_speed;
+ u16 duplex;
+
+ u16 flow_ctrl;
+ u16 ieee_fc;
+
+ /* The same definitions as the shmem parameter */
+ u32 link_status;
+ u8 fault_detected;
+ u8 rsrv1;
+ u16 periodic_flags;
+#define PERIODIC_FLAGS_LINK_EVENT 0x0001
+
+ u32 aeu_int_mask;
+};
+
+/***********************************************************/
+/* Functions */
+/***********************************************************/
+int bnx2x_phy_init(struct link_params *params, struct link_vars *vars);
+
+/* Reset the link. Should be called when driver or interface goes down
+ Before calling phy firmware upgrade, the reset_ext_phy should be set
+ to 0 */
+int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
+ u8 reset_ext_phy);
+
+/* bnx2x_link_update should be called upon link interrupt */
+int bnx2x_link_update(struct link_params *params, struct link_vars *vars);
+
+/* use the following phy functions to read/write from external_phy
+ In order to use it to read/write internal phy registers, use
+ DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as
+ the register */
+int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 *ret_val);
+
+int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 val);
+
+/* Reads the link_status from the shmem,
+ and update the link vars accordingly */
+void bnx2x_link_status_update(struct link_params *input,
+ struct link_vars *output);
+/* returns string representing the fw_version of the external phy */
+int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
+ u8 *version, u16 len);
+
+/* Set/Unset the led
+ Basically, the CLC takes care of the led for the link, but in case one needs
+ to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
+ blink the led, and LED_MODE_OFF to set the led off.*/
+int bnx2x_set_led(struct link_params *params,
+ struct link_vars *vars, u8 mode, u32 speed);
+#define LED_MODE_OFF 0
+#define LED_MODE_ON 1
+#define LED_MODE_OPER 2
+#define LED_MODE_FRONT_PANEL_OFF 3
+
+/* bnx2x_handle_module_detect_int should be called upon module detection
+ interrupt */
+void bnx2x_handle_module_detect_int(struct link_params *params);
+
+/* Get the actual link status. In case it returns 0, link is up,
+ otherwise link is down*/
+int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
+ u8 is_serdes);
+
+/* One-time initialization for external phy after power up */
+int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
+ u32 shmem2_base_path[], u32 chip_id);
+
+/* Reset the external PHY using GPIO */
+void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
+
+/* Reset the external of SFX7101 */
+void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
+
+/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
+int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u16 addr,
+ u8 byte_cnt, u8 *o_buf);
+
+void bnx2x_hw_reset_phy(struct link_params *params);
+
+/* Checks if HW lock is required for this phy/board type */
+u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
+ u32 shmem2_base);
+
+/* Check swap bit and adjust PHY order */
+u32 bnx2x_phy_selection(struct link_params *params);
+
+/* Probe the phys on board, and populate them in "params" */
+int bnx2x_phy_probe(struct link_params *params);
+
+/* Checks if fan failure detection is required on one of the phys on board */
+u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
+ u32 shmem2_base, u8 port);
+
+
+
+/* DCBX structs */
+
+/* Number of maximum COS per chip */
+#define DCBX_E2E3_MAX_NUM_COS (2)
+#define DCBX_E3B0_MAX_NUM_COS_PORT0 (6)
+#define DCBX_E3B0_MAX_NUM_COS_PORT1 (3)
+#define DCBX_E3B0_MAX_NUM_COS ( \
+ MAXVAL(DCBX_E3B0_MAX_NUM_COS_PORT0, \
+ DCBX_E3B0_MAX_NUM_COS_PORT1))
+
+#define DCBX_MAX_NUM_COS ( \
+ MAXVAL(DCBX_E3B0_MAX_NUM_COS, \
+ DCBX_E2E3_MAX_NUM_COS))
+
+/* PFC port configuration params */
+struct bnx2x_nig_brb_pfc_port_params {
+ /* NIG */
+ u32 pause_enable;
+ u32 llfc_out_en;
+ u32 llfc_enable;
+ u32 pkt_priority_to_cos;
+ u8 num_of_rx_cos_priority_mask;
+ u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS];
+ u32 llfc_high_priority_classes;
+ u32 llfc_low_priority_classes;
+ /* BRB */
+ u32 cos0_pauseable;
+ u32 cos1_pauseable;
+};
+
+
+/* ETS port configuration params */
+struct bnx2x_ets_bw_params {
+ u8 bw;
+};
+
+struct bnx2x_ets_sp_params {
+ /**
+ * valid values are 0 - 5. 0 is highest strict priority.
+ * There can't be two COS's with the same pri.
+ */
+ u8 pri;
+};
+
+enum bnx2x_cos_state {
+ bnx2x_cos_state_strict = 0,
+ bnx2x_cos_state_bw = 1,
+};
+
+struct bnx2x_ets_cos_params {
+ enum bnx2x_cos_state state ;
+ union {
+ struct bnx2x_ets_bw_params bw_params;
+ struct bnx2x_ets_sp_params sp_params;
+ } params;
+};
+
+struct bnx2x_ets_params {
+ u8 num_of_cos; /* Number of valid COS entries*/
+ struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS];
+};
+
+/**
+ * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
+ * when link is already up
+ */
+int bnx2x_update_pfc(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_nig_brb_pfc_port_params *pfc_params);
+
+
+/* Used to configure the ETS to disable */
+int bnx2x_ets_disabled(struct link_params *params,
+ struct link_vars *vars);
+
+/* Used to configure the ETS to BW limited */
+void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
+ const u32 cos1_bw);
+
+/* Used to configure the ETS to strict */
+int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
+
+
+/* Configure the COS to ETS according to BW and SP settings.*/
+int bnx2x_ets_e3b0_config(const struct link_params *params,
+ const struct link_vars *vars,
+ const struct bnx2x_ets_params *ets_params);
+/* Read pfc statistic*/
+void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
+ u32 pfc_frames_sent[2],
+ u32 pfc_frames_received[2]);
+void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
+ u32 chip_id, u32 shmem_base, u32 shmem2_base,
+ u8 port);
+
+int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+ struct link_params *params);
+
+void bnx2x_period_func(struct link_params *params, struct link_vars *vars);
+
+#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
new file mode 100644
index 000000000000..6486ab8c8fc8
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -0,0 +1,11617 @@
+/* bnx2x_main.c: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/device.h> /* for dev_info() */
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <linux/time.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/workqueue.h>
+#include <linux/crc32.h>
+#include <linux/crc32c.h>
+#include <linux/prefetch.h>
+#include <linux/zlib.h>
+#include <linux/io.h>
+#include <linux/stringify.h>
+#include <linux/vmalloc.h>
+
+#include "bnx2x.h"
+#include "bnx2x_init.h"
+#include "bnx2x_init_ops.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_dcb.h"
+#include "bnx2x_sp.h"
+
+#include <linux/firmware.h>
+#include "bnx2x_fw_file_hdr.h"
+/* FW files */
+#define FW_FILE_VERSION \
+ __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
+ __stringify(BCM_5710_FW_MINOR_VERSION) "." \
+ __stringify(BCM_5710_FW_REVISION_VERSION) "." \
+ __stringify(BCM_5710_FW_ENGINEERING_VERSION)
+#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
+
+/* Time in jiffies before concluding the transmitter is hung */
+#define TX_TIMEOUT (5*HZ)
+
+static char version[] __devinitdata =
+ "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
+ DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Eliezer Tamir");
+MODULE_DESCRIPTION("Broadcom NetXtreme II "
+ "BCM57710/57711/57711E/"
+ "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
+ "57840/57840_MF Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_FIRMWARE(FW_FILE_NAME_E1);
+MODULE_FIRMWARE(FW_FILE_NAME_E1H);
+MODULE_FIRMWARE(FW_FILE_NAME_E2);
+
+static int multi_mode = 1;
+module_param(multi_mode, int, 0);
+MODULE_PARM_DESC(multi_mode, " Multi queue mode "
+ "(0 Disable; 1 Enable (default))");
+
+int num_queues;
+module_param(num_queues, int, 0);
+MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
+ " (default is as a number of CPUs)");
+
+static int disable_tpa;
+module_param(disable_tpa, int, 0);
+MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
+
+#define INT_MODE_INTx 1
+#define INT_MODE_MSI 2
+static int int_mode;
+module_param(int_mode, int, 0);
+MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
+ "(1 INT#x; 2 MSI)");
+
+static int dropless_fc;
+module_param(dropless_fc, int, 0);
+MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
+
+static int poll;
+module_param(poll, int, 0);
+MODULE_PARM_DESC(poll, " Use polling (for debug)");
+
+static int mrrs = -1;
+module_param(mrrs, int, 0);
+MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
+
+static int debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, " Default debug msglevel");
+
+
+
+struct workqueue_struct *bnx2x_wq;
+
+enum bnx2x_board_type {
+ BCM57710 = 0,
+ BCM57711,
+ BCM57711E,
+ BCM57712,
+ BCM57712_MF,
+ BCM57800,
+ BCM57800_MF,
+ BCM57810,
+ BCM57810_MF,
+ BCM57840,
+ BCM57840_MF
+};
+
+/* indexed by board_type, above */
+static struct {
+ char *name;
+} board_info[] __devinitdata = {
+ { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
+ { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
+ { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
+ { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
+ { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
+ { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
+ { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57840 10/20 Gigabit "
+ "Ethernet Multi Function"}
+};
+
+#ifndef PCI_DEVICE_ID_NX2_57710
+#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57711
+#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57711E
+#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57712
+#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57712_MF
+#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57800
+#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57800_MF
+#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57810
+#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57810_MF
+#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840
+#define PCI_DEVICE_ID_NX2_57840 CHIP_NUM_57840
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_MF
+#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
+#endif
+static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
+
+/****************************************************************************
+* General service functions
+****************************************************************************/
+
+static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
+ u32 addr, dma_addr_t mapping)
+{
+ REG_WR(bp, addr, U64_LO(mapping));
+ REG_WR(bp, addr + 4, U64_HI(mapping));
+}
+
+static inline void storm_memset_spq_addr(struct bnx2x *bp,
+ dma_addr_t mapping, u16 abs_fid)
+{
+ u32 addr = XSEM_REG_FAST_MEMORY +
+ XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
+
+ __storm_memset_dma_mapping(bp, addr, mapping);
+}
+
+static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
+ u16 pf_id)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+}
+
+static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
+ u8 enable)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+}
+
+static inline void storm_memset_eq_data(struct bnx2x *bp,
+ struct event_ring_data *eq_data,
+ u16 pfid)
+{
+ size_t size = sizeof(struct event_ring_data);
+
+ u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
+}
+
+static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
+ u16 pfid)
+{
+ u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
+ REG_WR16(bp, addr, eq_prod);
+}
+
+/* used only at init
+ * locking is done by mcp
+ */
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
+{
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+ PCICFG_VENDOR_ID_OFFSET);
+}
+
+static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
+{
+ u32 val;
+
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
+ pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+ PCICFG_VENDOR_ID_OFFSET);
+
+ return val;
+}
+
+#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
+#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
+#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
+#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
+#define DMAE_DP_DST_NONE "dst_addr [none]"
+
+static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
+ int msglvl)
+{
+ u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
+
+ switch (dmae->opcode & DMAE_COMMAND_DST) {
+ case DMAE_CMD_DST_PCI:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%08x], len [%d*4], dst [%x:%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ case DMAE_CMD_DST_GRC:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->dst_addr_lo >> 2,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%08x], len [%d*4], dst [%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->dst_addr_lo >> 2,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ default:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
+ "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
+ "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ }
+
+}
+
+/* copy command into DMAE command memory and set DMAE command go */
+void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
+{
+ u32 cmd_offset;
+ int i;
+
+ cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
+ for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
+ REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
+
+ DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
+ idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
+ }
+ REG_WR(bp, dmae_reg_go_c[idx], 1);
+}
+
+u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
+{
+ return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
+ DMAE_CMD_C_ENABLE);
+}
+
+u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
+{
+ return opcode & ~DMAE_CMD_SRC_RESET;
+}
+
+u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
+ bool with_comp, u8 comp_type)
+{
+ u32 opcode = 0;
+
+ opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
+ (dst_type << DMAE_COMMAND_DST_SHIFT));
+
+ opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
+
+ opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
+ opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
+ (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
+ opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
+
+#ifdef __BIG_ENDIAN
+ opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
+#else
+ opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
+#endif
+ if (with_comp)
+ opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
+ return opcode;
+}
+
+static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
+ struct dmae_command *dmae,
+ u8 src_type, u8 dst_type)
+{
+ memset(dmae, 0, sizeof(struct dmae_command));
+
+ /* set the opcode */
+ dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
+ true, DMAE_COMP_PCI);
+
+ /* fill in the completion parameters */
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+}
+
+/* issue a dmae command over the init-channel and wailt for completion */
+static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
+ struct dmae_command *dmae)
+{
+ u32 *wb_comp = bnx2x_sp(bp, wb_comp);
+ int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
+ int rc = 0;
+
+ DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
+ bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
+ bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
+
+ /*
+ * Lock the dmae channel. Disable BHs to prevent a dead-lock
+ * as long as this code is called both from syscall context and
+ * from ndo_set_rx_mode() flow that may be called from BH.
+ */
+ spin_lock_bh(&bp->dmae_lock);
+
+ /* reset completion */
+ *wb_comp = 0;
+
+ /* post the command on the channel used for initializations */
+ bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
+
+ /* wait for completion */
+ udelay(5);
+ while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
+ DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
+
+ if (!cnt) {
+ BNX2X_ERR("DMAE timeout!\n");
+ rc = DMAE_TIMEOUT;
+ goto unlock;
+ }
+ cnt--;
+ udelay(50);
+ }
+ if (*wb_comp & DMAE_PCI_ERR_FLAG) {
+ BNX2X_ERR("DMAE PCI error!\n");
+ rc = DMAE_PCI_ERROR;
+ }
+
+ DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
+ bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
+ bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
+
+unlock:
+ spin_unlock_bh(&bp->dmae_lock);
+ return rc;
+}
+
+void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
+ u32 len32)
+{
+ struct dmae_command dmae;
+
+ if (!bp->dmae_ready) {
+ u32 *data = bnx2x_sp(bp, wb_data[0]);
+
+ DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
+ " using indirect\n", dst_addr, len32);
+ bnx2x_init_ind_wr(bp, dst_addr, data, len32);
+ return;
+ }
+
+ /* set opcode and fixed command fields */
+ bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
+
+ /* fill in addresses and len */
+ dmae.src_addr_lo = U64_LO(dma_addr);
+ dmae.src_addr_hi = U64_HI(dma_addr);
+ dmae.dst_addr_lo = dst_addr >> 2;
+ dmae.dst_addr_hi = 0;
+ dmae.len = len32;
+
+ bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
+
+ /* issue the command and wait for completion */
+ bnx2x_issue_dmae_with_comp(bp, &dmae);
+}
+
+void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
+{
+ struct dmae_command dmae;
+
+ if (!bp->dmae_ready) {
+ u32 *data = bnx2x_sp(bp, wb_data[0]);
+ int i;
+
+ DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
+ " using indirect\n", src_addr, len32);
+ for (i = 0; i < len32; i++)
+ data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
+ return;
+ }
+
+ /* set opcode and fixed command fields */
+ bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
+
+ /* fill in addresses and len */
+ dmae.src_addr_lo = src_addr >> 2;
+ dmae.src_addr_hi = 0;
+ dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
+ dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
+ dmae.len = len32;
+
+ bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
+
+ /* issue the command and wait for completion */
+ bnx2x_issue_dmae_with_comp(bp, &dmae);
+}
+
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
+ u32 addr, u32 len)
+{
+ int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
+ int offset = 0;
+
+ while (len > dmae_wr_max) {
+ bnx2x_write_dmae(bp, phys_addr + offset,
+ addr + offset, dmae_wr_max);
+ offset += dmae_wr_max * 4;
+ len -= dmae_wr_max;
+ }
+
+ bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
+}
+
+/* used only for slowpath so not inlined */
+static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
+{
+ u32 wb_write[2];
+
+ wb_write[0] = val_hi;
+ wb_write[1] = val_lo;
+ REG_WR_DMAE(bp, reg, wb_write, 2);
+}
+
+#ifdef USE_WB_RD
+static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
+{
+ u32 wb_data[2];
+
+ REG_RD_DMAE(bp, reg, wb_data, 2);
+
+ return HILO_U64(wb_data[0], wb_data[1]);
+}
+#endif
+
+static int bnx2x_mc_assert(struct bnx2x *bp)
+{
+ char last_idx;
+ int i, rc = 0;
+ u32 row0, row1, row2, row3;
+
+ /* XSTORM */
+ last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ASSERT_LIST_INDEX_OFFSET);
+ if (last_idx)
+ BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
+
+ /* print the asserts */
+ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+
+ row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ASSERT_LIST_OFFSET(i));
+ row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ASSERT_LIST_OFFSET(i) + 4);
+ row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ASSERT_LIST_OFFSET(i) + 8);
+ row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ASSERT_LIST_OFFSET(i) + 12);
+
+ if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+ BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
+ " 0x%08x 0x%08x 0x%08x\n",
+ i, row3, row2, row1, row0);
+ rc++;
+ } else {
+ break;
+ }
+ }
+
+ /* TSTORM */
+ last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ASSERT_LIST_INDEX_OFFSET);
+ if (last_idx)
+ BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
+
+ /* print the asserts */
+ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+
+ row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ASSERT_LIST_OFFSET(i));
+ row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ASSERT_LIST_OFFSET(i) + 4);
+ row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ASSERT_LIST_OFFSET(i) + 8);
+ row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ASSERT_LIST_OFFSET(i) + 12);
+
+ if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+ BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
+ " 0x%08x 0x%08x 0x%08x\n",
+ i, row3, row2, row1, row0);
+ rc++;
+ } else {
+ break;
+ }
+ }
+
+ /* CSTORM */
+ last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_ASSERT_LIST_INDEX_OFFSET);
+ if (last_idx)
+ BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
+
+ /* print the asserts */
+ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+
+ row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_ASSERT_LIST_OFFSET(i));
+ row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_ASSERT_LIST_OFFSET(i) + 4);
+ row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_ASSERT_LIST_OFFSET(i) + 8);
+ row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_ASSERT_LIST_OFFSET(i) + 12);
+
+ if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+ BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
+ " 0x%08x 0x%08x 0x%08x\n",
+ i, row3, row2, row1, row0);
+ rc++;
+ } else {
+ break;
+ }
+ }
+
+ /* USTORM */
+ last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
+ USTORM_ASSERT_LIST_INDEX_OFFSET);
+ if (last_idx)
+ BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
+
+ /* print the asserts */
+ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+
+ row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
+ USTORM_ASSERT_LIST_OFFSET(i));
+ row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
+ USTORM_ASSERT_LIST_OFFSET(i) + 4);
+ row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
+ USTORM_ASSERT_LIST_OFFSET(i) + 8);
+ row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
+ USTORM_ASSERT_LIST_OFFSET(i) + 12);
+
+ if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+ BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
+ " 0x%08x 0x%08x 0x%08x\n",
+ i, row3, row2, row1, row0);
+ rc++;
+ } else {
+ break;
+ }
+ }
+
+ return rc;
+}
+
+void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
+{
+ u32 addr, val;
+ u32 mark, offset;
+ __be32 data[9];
+ int word;
+ u32 trace_shmem_base;
+ if (BP_NOMCP(bp)) {
+ BNX2X_ERR("NO MCP - can not dump\n");
+ return;
+ }
+ netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
+ (bp->common.bc_ver & 0xff0000) >> 16,
+ (bp->common.bc_ver & 0xff00) >> 8,
+ (bp->common.bc_ver & 0xff));
+
+ val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
+ if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
+ printk("%s" "MCP PC at 0x%x\n", lvl, val);
+
+ if (BP_PATH(bp) == 0)
+ trace_shmem_base = bp->common.shmem_base;
+ else
+ trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
+ addr = trace_shmem_base - 0x0800 + 4;
+ mark = REG_RD(bp, addr);
+ mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+ + ((mark + 0x3) & ~0x3) - 0x08000000;
+ printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
+
+ printk("%s", lvl);
+ for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
+ for (word = 0; word < 8; word++)
+ data[word] = htonl(REG_RD(bp, offset + 4*word));
+ data[8] = 0x0;
+ pr_cont("%s", (char *)data);
+ }
+ for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
+ for (word = 0; word < 8; word++)
+ data[word] = htonl(REG_RD(bp, offset + 4*word));
+ data[8] = 0x0;
+ pr_cont("%s", (char *)data);
+ }
+ printk("%s" "end of fw dump\n", lvl);
+}
+
+static inline void bnx2x_fw_dump(struct bnx2x *bp)
+{
+ bnx2x_fw_dump_lvl(bp, KERN_ERR);
+}
+
+void bnx2x_panic_dump(struct bnx2x *bp)
+{
+ int i;
+ u16 j;
+ struct hc_sp_status_block_data sp_sb_data;
+ int func = BP_FUNC(bp);
+#ifdef BNX2X_STOP_ON_ERROR
+ u16 start = 0, end = 0;
+ u8 cos;
+#endif
+
+ bp->stats_state = STATS_STATE_DISABLED;
+ DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
+
+ BNX2X_ERR("begin crash dump -----------------\n");
+
+ /* Indices */
+ /* Common */
+ BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
+ " spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
+ bp->def_idx, bp->def_att_idx, bp->attn_state,
+ bp->spq_prod_idx, bp->stats_counter);
+ BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
+ bp->def_status_blk->atten_status_block.attn_bits,
+ bp->def_status_blk->atten_status_block.attn_bits_ack,
+ bp->def_status_blk->atten_status_block.status_block_id,
+ bp->def_status_blk->atten_status_block.attn_bits_index);
+ BNX2X_ERR(" def (");
+ for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
+ pr_cont("0x%x%s",
+ bp->def_status_blk->sp_sb.index_values[i],
+ (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
+
+ for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
+ *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
+ i*sizeof(u32));
+
+ pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
+ sp_sb_data.igu_sb_id,
+ sp_sb_data.igu_seg_id,
+ sp_sb_data.p_func.pf_id,
+ sp_sb_data.p_func.vnic_id,
+ sp_sb_data.p_func.vf_id,
+ sp_sb_data.p_func.vf_valid,
+ sp_sb_data.state);
+
+
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ int loop;
+ struct hc_status_block_data_e2 sb_data_e2;
+ struct hc_status_block_data_e1x sb_data_e1x;
+ struct hc_status_block_sm *hc_sm_p =
+ CHIP_IS_E1x(bp) ?
+ sb_data_e1x.common.state_machine :
+ sb_data_e2.common.state_machine;
+ struct hc_index_data *hc_index_p =
+ CHIP_IS_E1x(bp) ?
+ sb_data_e1x.index_data :
+ sb_data_e2.index_data;
+ u8 data_size, cos;
+ u32 *sb_data_p;
+ struct bnx2x_fp_txdata txdata;
+
+ /* Rx */
+ BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
+ " rx_comp_prod(0x%x)"
+ " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
+ i, fp->rx_bd_prod, fp->rx_bd_cons,
+ fp->rx_comp_prod,
+ fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
+ BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
+ " fp_hc_idx(0x%x)\n",
+ fp->rx_sge_prod, fp->last_max_sge,
+ le16_to_cpu(fp->fp_hc_idx));
+
+ /* Tx */
+ for_each_cos_in_tx_queue(fp, cos)
+ {
+ txdata = fp->txdata[cos];
+ BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
+ " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
+ " *tx_cons_sb(0x%x)\n",
+ i, txdata.tx_pkt_prod,
+ txdata.tx_pkt_cons, txdata.tx_bd_prod,
+ txdata.tx_bd_cons,
+ le16_to_cpu(*txdata.tx_cons_sb));
+ }
+
+ loop = CHIP_IS_E1x(bp) ?
+ HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
+
+ /* host sb data */
+
+#ifdef BCM_CNIC
+ if (IS_FCOE_FP(fp))
+ continue;
+#endif
+ BNX2X_ERR(" run indexes (");
+ for (j = 0; j < HC_SB_MAX_SM; j++)
+ pr_cont("0x%x%s",
+ fp->sb_running_index[j],
+ (j == HC_SB_MAX_SM - 1) ? ")" : " ");
+
+ BNX2X_ERR(" indexes (");
+ for (j = 0; j < loop; j++)
+ pr_cont("0x%x%s",
+ fp->sb_index_values[j],
+ (j == loop - 1) ? ")" : " ");
+ /* fw sb data */
+ data_size = CHIP_IS_E1x(bp) ?
+ sizeof(struct hc_status_block_data_e1x) :
+ sizeof(struct hc_status_block_data_e2);
+ data_size /= sizeof(u32);
+ sb_data_p = CHIP_IS_E1x(bp) ?
+ (u32 *)&sb_data_e1x :
+ (u32 *)&sb_data_e2;
+ /* copy sb data in here */
+ for (j = 0; j < data_size; j++)
+ *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
+ j * sizeof(u32));
+
+ if (!CHIP_IS_E1x(bp)) {
+ pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) "
+ "vnic_id(0x%x) same_igu_sb_1b(0x%x) "
+ "state(0x%x)\n",
+ sb_data_e2.common.p_func.pf_id,
+ sb_data_e2.common.p_func.vf_id,
+ sb_data_e2.common.p_func.vf_valid,
+ sb_data_e2.common.p_func.vnic_id,
+ sb_data_e2.common.same_igu_sb_1b,
+ sb_data_e2.common.state);
+ } else {
+ pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) "
+ "vnic_id(0x%x) same_igu_sb_1b(0x%x) "
+ "state(0x%x)\n",
+ sb_data_e1x.common.p_func.pf_id,
+ sb_data_e1x.common.p_func.vf_id,
+ sb_data_e1x.common.p_func.vf_valid,
+ sb_data_e1x.common.p_func.vnic_id,
+ sb_data_e1x.common.same_igu_sb_1b,
+ sb_data_e1x.common.state);
+ }
+
+ /* SB_SMs data */
+ for (j = 0; j < HC_SB_MAX_SM; j++) {
+ pr_cont("SM[%d] __flags (0x%x) "
+ "igu_sb_id (0x%x) igu_seg_id(0x%x) "
+ "time_to_expire (0x%x) "
+ "timer_value(0x%x)\n", j,
+ hc_sm_p[j].__flags,
+ hc_sm_p[j].igu_sb_id,
+ hc_sm_p[j].igu_seg_id,
+ hc_sm_p[j].time_to_expire,
+ hc_sm_p[j].timer_value);
+ }
+
+ /* Indecies data */
+ for (j = 0; j < loop; j++) {
+ pr_cont("INDEX[%d] flags (0x%x) "
+ "timeout (0x%x)\n", j,
+ hc_index_p[j].flags,
+ hc_index_p[j].timeout);
+ }
+ }
+
+#ifdef BNX2X_STOP_ON_ERROR
+ /* Rings */
+ /* Rx */
+ for_each_rx_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
+ end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
+ for (j = start; j != end; j = RX_BD(j + 1)) {
+ u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
+ struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
+
+ BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
+ i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
+ }
+
+ start = RX_SGE(fp->rx_sge_prod);
+ end = RX_SGE(fp->last_max_sge);
+ for (j = start; j != end; j = RX_SGE(j + 1)) {
+ u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
+ struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
+
+ BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
+ i, j, rx_sge[1], rx_sge[0], sw_page->page);
+ }
+
+ start = RCQ_BD(fp->rx_comp_cons - 10);
+ end = RCQ_BD(fp->rx_comp_cons + 503);
+ for (j = start; j != end; j = RCQ_BD(j + 1)) {
+ u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
+
+ BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
+ i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
+ }
+ }
+
+ /* Tx */
+ for_each_tx_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ for_each_cos_in_tx_queue(fp, cos) {
+ struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+
+ start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
+ end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
+ for (j = start; j != end; j = TX_BD(j + 1)) {
+ struct sw_tx_bd *sw_bd =
+ &txdata->tx_buf_ring[j];
+
+ BNX2X_ERR("fp%d: txdata %d, "
+ "packet[%x]=[%p,%x]\n",
+ i, cos, j, sw_bd->skb,
+ sw_bd->first_bd);
+ }
+
+ start = TX_BD(txdata->tx_bd_cons - 10);
+ end = TX_BD(txdata->tx_bd_cons + 254);
+ for (j = start; j != end; j = TX_BD(j + 1)) {
+ u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
+
+ BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]="
+ "[%x:%x:%x:%x]\n",
+ i, cos, j, tx_bd[0], tx_bd[1],
+ tx_bd[2], tx_bd[3]);
+ }
+ }
+ }
+#endif
+ bnx2x_fw_dump(bp);
+ bnx2x_mc_assert(bp);
+ BNX2X_ERR("end crash dump -----------------\n");
+}
+
+/*
+ * FLR Support for E2
+ *
+ * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
+ * initialization.
+ */
+#define FLR_WAIT_USEC 10000 /* 10 miliseconds */
+#define FLR_WAIT_INTERAVAL 50 /* usec */
+#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERAVAL) /* 200 */
+
+struct pbf_pN_buf_regs {
+ int pN;
+ u32 init_crd;
+ u32 crd;
+ u32 crd_freed;
+};
+
+struct pbf_pN_cmd_regs {
+ int pN;
+ u32 lines_occup;
+ u32 lines_freed;
+};
+
+static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
+ struct pbf_pN_buf_regs *regs,
+ u32 poll_count)
+{
+ u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
+ u32 cur_cnt = poll_count;
+
+ crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
+ crd = crd_start = REG_RD(bp, regs->crd);
+ init_crd = REG_RD(bp, regs->init_crd);
+
+ DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
+ DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
+ DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
+
+ while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
+ (init_crd - crd_start))) {
+ if (cur_cnt--) {
+ udelay(FLR_WAIT_INTERAVAL);
+ crd = REG_RD(bp, regs->crd);
+ crd_freed = REG_RD(bp, regs->crd_freed);
+ } else {
+ DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
+ regs->pN);
+ DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
+ regs->pN, crd);
+ DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
+ regs->pN, crd_freed);
+ break;
+ }
+ }
+ DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
+ poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
+}
+
+static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
+ struct pbf_pN_cmd_regs *regs,
+ u32 poll_count)
+{
+ u32 occup, to_free, freed, freed_start;
+ u32 cur_cnt = poll_count;
+
+ occup = to_free = REG_RD(bp, regs->lines_occup);
+ freed = freed_start = REG_RD(bp, regs->lines_freed);
+
+ DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
+ DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
+
+ while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
+ if (cur_cnt--) {
+ udelay(FLR_WAIT_INTERAVAL);
+ occup = REG_RD(bp, regs->lines_occup);
+ freed = REG_RD(bp, regs->lines_freed);
+ } else {
+ DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
+ regs->pN);
+ DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
+ regs->pN, occup);
+ DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
+ regs->pN, freed);
+ break;
+ }
+ }
+ DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
+ poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
+}
+
+static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
+ u32 expected, u32 poll_count)
+{
+ u32 cur_cnt = poll_count;
+ u32 val;
+
+ while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
+ udelay(FLR_WAIT_INTERAVAL);
+
+ return val;
+}
+
+static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
+ char *msg, u32 poll_cnt)
+{
+ u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
+ if (val != 0) {
+ BNX2X_ERR("%s usage count=%d\n", msg, val);
+ return 1;
+ }
+ return 0;
+}
+
+static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
+{
+ /* adjust polling timeout */
+ if (CHIP_REV_IS_EMUL(bp))
+ return FLR_POLL_CNT * 2000;
+
+ if (CHIP_REV_IS_FPGA(bp))
+ return FLR_POLL_CNT * 120;
+
+ return FLR_POLL_CNT;
+}
+
+static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
+{
+ struct pbf_pN_cmd_regs cmd_regs[] = {
+ {0, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_OCCUPANCY_Q0 :
+ PBF_REG_P0_TQ_OCCUPANCY,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_LINES_FREED_CNT_Q0 :
+ PBF_REG_P0_TQ_LINES_FREED_CNT},
+ {1, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_OCCUPANCY_Q1 :
+ PBF_REG_P1_TQ_OCCUPANCY,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_LINES_FREED_CNT_Q1 :
+ PBF_REG_P1_TQ_LINES_FREED_CNT},
+ {4, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_OCCUPANCY_LB_Q :
+ PBF_REG_P4_TQ_OCCUPANCY,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
+ PBF_REG_P4_TQ_LINES_FREED_CNT}
+ };
+
+ struct pbf_pN_buf_regs buf_regs[] = {
+ {0, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INIT_CRD_Q0 :
+ PBF_REG_P0_INIT_CRD ,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_CREDIT_Q0 :
+ PBF_REG_P0_CREDIT,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
+ PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
+ {1, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INIT_CRD_Q1 :
+ PBF_REG_P1_INIT_CRD,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_CREDIT_Q1 :
+ PBF_REG_P1_CREDIT,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
+ PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
+ {4, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INIT_CRD_LB_Q :
+ PBF_REG_P4_INIT_CRD,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_CREDIT_LB_Q :
+ PBF_REG_P4_CREDIT,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
+ PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
+ };
+
+ int i;
+
+ /* Verify the command queues are flushed P0, P1, P4 */
+ for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
+ bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
+
+
+ /* Verify the transmission buffers are flushed P0, P1, P4 */
+ for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
+ bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
+}
+
+#define OP_GEN_PARAM(param) \
+ (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
+
+#define OP_GEN_TYPE(type) \
+ (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
+
+#define OP_GEN_AGG_VECT(index) \
+ (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
+
+
+static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
+ u32 poll_cnt)
+{
+ struct sdm_op_gen op_gen = {0};
+
+ u32 comp_addr = BAR_CSTRORM_INTMEM +
+ CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
+ int ret = 0;
+
+ if (REG_RD(bp, comp_addr)) {
+ BNX2X_ERR("Cleanup complete is not 0\n");
+ return 1;
+ }
+
+ op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
+ op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
+ op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
+ op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
+
+ DP(BNX2X_MSG_SP, "FW Final cleanup\n");
+ REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
+
+ if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
+ BNX2X_ERR("FW final cleanup did not succeed\n");
+ ret = 1;
+ }
+ /* Zero completion for nxt FLR */
+ REG_WR(bp, comp_addr, 0);
+
+ return ret;
+}
+
+static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
+{
+ int pos;
+ u16 status;
+
+ pos = pci_pcie_cap(dev);
+ if (!pos)
+ return false;
+
+ pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
+ return status & PCI_EXP_DEVSTA_TRPND;
+}
+
+/* PF FLR specific routines
+*/
+static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
+{
+
+ /* wait for CFC PF usage-counter to zero (includes all the VFs) */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ CFC_REG_NUM_LCIDS_INSIDE_PF,
+ "CFC PF usage counter timed out",
+ poll_cnt))
+ return 1;
+
+
+ /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ DORQ_REG_PF_USAGE_CNT,
+ "DQ PF usage counter timed out",
+ poll_cnt))
+ return 1;
+
+ /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
+ "QM PF usage counter timed out",
+ poll_cnt))
+ return 1;
+
+ /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
+ "Timers VNIC usage counter timed out",
+ poll_cnt))
+ return 1;
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
+ "Timers NUM_SCANS usage counter timed out",
+ poll_cnt))
+ return 1;
+
+ /* Wait DMAE PF usage counter to zero */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ dmae_reg_go_c[INIT_DMAE_C(bp)],
+ "DMAE dommand register timed out",
+ poll_cnt))
+ return 1;
+
+ return 0;
+}
+
+static void bnx2x_hw_enable_status(struct bnx2x *bp)
+{
+ u32 val;
+
+ val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
+ DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
+
+ val = REG_RD(bp, PBF_REG_DISABLE_PF);
+ DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
+
+ val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
+ DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
+
+ val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
+ DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
+
+ val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
+ DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
+
+ val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
+ DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
+
+ val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
+ DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
+
+ val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+ DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
+ val);
+}
+
+static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
+{
+ u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
+
+ DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
+
+ /* Re-enable PF target read access */
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+
+ /* Poll HW usage counters */
+ if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
+ return -EBUSY;
+
+ /* Zero the igu 'trailing edge' and 'leading edge' */
+
+ /* Send the FW cleanup command */
+ if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
+ return -EBUSY;
+
+ /* ATC cleanup */
+
+ /* Verify TX hw is flushed */
+ bnx2x_tx_hw_flushed(bp, poll_cnt);
+
+ /* Wait 100ms (not adjusted according to platform) */
+ msleep(100);
+
+ /* Verify no pending pci transactions */
+ if (bnx2x_is_pcie_pending(bp->pdev))
+ BNX2X_ERR("PCIE Transactions still pending\n");
+
+ /* Debug */
+ bnx2x_hw_enable_status(bp);
+
+ /*
+ * Master enable - Due to WB DMAE writes performed before this
+ * register is re-initialized as part of the regular function init
+ */
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+
+ return 0;
+}
+
+static void bnx2x_hc_int_enable(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+ u32 val = REG_RD(bp, addr);
+ int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+ int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
+
+ if (msix) {
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0);
+ val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+ } else if (msi) {
+ val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
+ val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+ } else {
+ val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+
+ if (!CHIP_IS_E1(bp)) {
+ DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
+ val, port, addr);
+
+ REG_WR(bp, addr, val);
+
+ val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
+ }
+ }
+
+ if (CHIP_IS_E1(bp))
+ REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
+
+ DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
+ val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
+
+ REG_WR(bp, addr, val);
+ /*
+ * Ensure that HC_CONFIG is written before leading/trailing edge config
+ */
+ mmiowb();
+ barrier();
+
+ if (!CHIP_IS_E1(bp)) {
+ /* init leading/trailing edge */
+ if (IS_MF(bp)) {
+ val = (0xee0f | (1 << (BP_VN(bp) + 4)));
+ if (bp->port.pmf)
+ /* enable nig and gpio3 attention */
+ val |= 0x1100;
+ } else
+ val = 0xffff;
+
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
+ }
+
+ /* Make sure that interrupts are indeed enabled from here on */
+ mmiowb();
+}
+
+static void bnx2x_igu_int_enable(struct bnx2x *bp)
+{
+ u32 val;
+ int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+ int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
+
+ val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+
+ if (msix) {
+ val &= ~(IGU_PF_CONF_INT_LINE_EN |
+ IGU_PF_CONF_SINGLE_ISR_EN);
+ val |= (IGU_PF_CONF_FUNC_EN |
+ IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_ATTN_BIT_EN);
+ } else if (msi) {
+ val &= ~IGU_PF_CONF_INT_LINE_EN;
+ val |= (IGU_PF_CONF_FUNC_EN |
+ IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_ATTN_BIT_EN |
+ IGU_PF_CONF_SINGLE_ISR_EN);
+ } else {
+ val &= ~IGU_PF_CONF_MSI_MSIX_EN;
+ val |= (IGU_PF_CONF_FUNC_EN |
+ IGU_PF_CONF_INT_LINE_EN |
+ IGU_PF_CONF_ATTN_BIT_EN |
+ IGU_PF_CONF_SINGLE_ISR_EN);
+ }
+
+ DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
+ val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
+
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+
+ barrier();
+
+ /* init leading/trailing edge */
+ if (IS_MF(bp)) {
+ val = (0xee0f | (1 << (BP_VN(bp) + 4)));
+ if (bp->port.pmf)
+ /* enable nig and gpio3 attention */
+ val |= 0x1100;
+ } else
+ val = 0xffff;
+
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
+
+ /* Make sure that interrupts are indeed enabled from here on */
+ mmiowb();
+}
+
+void bnx2x_int_enable(struct bnx2x *bp)
+{
+ if (bp->common.int_block == INT_BLOCK_HC)
+ bnx2x_hc_int_enable(bp);
+ else
+ bnx2x_igu_int_enable(bp);
+}
+
+static void bnx2x_hc_int_disable(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+ u32 val = REG_RD(bp, addr);
+
+ /*
+ * in E1 we must use only PCI configuration space to disable
+ * MSI/MSIX capablility
+ * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
+ */
+ if (CHIP_IS_E1(bp)) {
+ /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
+ * Use mask register to prevent from HC sending interrupts
+ * after we exit the function
+ */
+ REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
+
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+ } else
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+
+ DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
+ val, port, addr);
+
+ /* flush all outstanding writes */
+ mmiowb();
+
+ REG_WR(bp, addr, val);
+ if (REG_RD(bp, addr) != val)
+ BNX2X_ERR("BUG! proper val not read from IGU!\n");
+}
+
+static void bnx2x_igu_int_disable(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+
+ val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_INT_LINE_EN |
+ IGU_PF_CONF_ATTN_BIT_EN);
+
+ DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
+
+ /* flush all outstanding writes */
+ mmiowb();
+
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+ if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
+ BNX2X_ERR("BUG! proper val not read from IGU!\n");
+}
+
+void bnx2x_int_disable(struct bnx2x *bp)
+{
+ if (bp->common.int_block == INT_BLOCK_HC)
+ bnx2x_hc_int_disable(bp);
+ else
+ bnx2x_igu_int_disable(bp);
+}
+
+void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
+{
+ int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+ int i, offset;
+
+ if (disable_hw)
+ /* prevent the HW from sending interrupts */
+ bnx2x_int_disable(bp);
+
+ /* make sure all ISRs are done */
+ if (msix) {
+ synchronize_irq(bp->msix_table[0].vector);
+ offset = 1;
+#ifdef BCM_CNIC
+ offset++;
+#endif
+ for_each_eth_queue(bp, i)
+ synchronize_irq(bp->msix_table[offset++].vector);
+ } else
+ synchronize_irq(bp->pdev->irq);
+
+ /* make sure sp_task is not running */
+ cancel_delayed_work(&bp->sp_task);
+ cancel_delayed_work(&bp->period_task);
+ flush_workqueue(bnx2x_wq);
+}
+
+/* fast path */
+
+/*
+ * General service functions
+ */
+
+/* Return true if succeeded to acquire the lock */
+static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
+{
+ u32 lock_status;
+ u32 resource_bit = (1 << resource);
+ int func = BP_FUNC(bp);
+ u32 hw_lock_control_reg;
+
+ DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
+
+ /* Validating that the resource is within range */
+ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+ DP(NETIF_MSG_HW,
+ "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+ resource, HW_LOCK_MAX_RESOURCE_VALUE);
+ return false;
+ }
+
+ if (func <= 5)
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+ else
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+
+ /* Try to acquire the lock */
+ REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+ lock_status = REG_RD(bp, hw_lock_control_reg);
+ if (lock_status & resource_bit)
+ return true;
+
+ DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
+ return false;
+}
+
+/**
+ * bnx2x_get_leader_lock_resource - get the recovery leader resource id
+ *
+ * @bp: driver handle
+ *
+ * Returns the recovery leader resource id according to the engine this function
+ * belongs to. Currently only only 2 engines is supported.
+ */
+static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
+{
+ if (BP_PATH(bp))
+ return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
+ else
+ return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
+}
+
+/**
+ * bnx2x_trylock_leader_lock- try to aquire a leader lock.
+ *
+ * @bp: driver handle
+ *
+ * Tries to aquire a leader lock for cuurent engine.
+ */
+static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
+{
+ return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
+}
+
+#ifdef BCM_CNIC
+static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
+#endif
+
+void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
+{
+ struct bnx2x *bp = fp->bp;
+ int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
+ int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
+ enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
+ struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj;
+
+ DP(BNX2X_MSG_SP,
+ "fp %d cid %d got ramrod #%d state is %x type is %d\n",
+ fp->index, cid, command, bp->state,
+ rr_cqe->ramrod_cqe.ramrod_type);
+
+ switch (command) {
+ case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
+ DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
+ drv_cmd = BNX2X_Q_CMD_UPDATE;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
+ DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_SETUP;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
+ DP(NETIF_MSG_IFUP, "got MULTI[%d] tx-only setup ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_HALT):
+ DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_HALT;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_TERMINATE):
+ DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_TERMINATE;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_EMPTY):
+ DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_EMPTY;
+ break;
+
+ default:
+ BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
+ command, fp->index);
+ return;
+ }
+
+ if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
+ q_obj->complete_cmd(bp, q_obj, drv_cmd))
+ /* q_obj->complete_cmd() failure means that this was
+ * an unexpected completion.
+ *
+ * In this case we don't want to increase the bp->spq_left
+ * because apparently we haven't sent this command the first
+ * place.
+ */
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#else
+ return;
+#endif
+
+ smp_mb__before_atomic_inc();
+ atomic_inc(&bp->cq_spq_left);
+ /* push the change in bp->spq_left and towards the memory */
+ smp_mb__after_atomic_inc();
+
+ DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
+
+ return;
+}
+
+void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod)
+{
+ u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset;
+
+ bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod,
+ start);
+}
+
+irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
+{
+ struct bnx2x *bp = netdev_priv(dev_instance);
+ u16 status = bnx2x_ack_int(bp);
+ u16 mask;
+ int i;
+ u8 cos;
+
+ /* Return here if interrupt is shared and it's not for us */
+ if (unlikely(status == 0)) {
+ DP(NETIF_MSG_INTR, "not our interrupt!\n");
+ return IRQ_NONE;
+ }
+ DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return IRQ_HANDLED;
+#endif
+
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ mask = 0x2 << (fp->index + CNIC_PRESENT);
+ if (status & mask) {
+ /* Handle Rx or Tx according to SB id */
+ prefetch(fp->rx_cons_sb);
+ for_each_cos_in_tx_queue(fp, cos)
+ prefetch(fp->txdata[cos].tx_cons_sb);
+ prefetch(&fp->sb_running_index[SM_RX_ID]);
+ napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+ status &= ~mask;
+ }
+ }
+
+#ifdef BCM_CNIC
+ mask = 0x2;
+ if (status & (mask | 0x1)) {
+ struct cnic_ops *c_ops = NULL;
+
+ if (likely(bp->state == BNX2X_STATE_OPEN)) {
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops)
+ c_ops->cnic_handler(bp->cnic_data, NULL);
+ rcu_read_unlock();
+ }
+
+ status &= ~mask;
+ }
+#endif
+
+ if (unlikely(status & 0x1)) {
+ queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+
+ status &= ~0x1;
+ if (!status)
+ return IRQ_HANDLED;
+ }
+
+ if (unlikely(status))
+ DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
+ status);
+
+ return IRQ_HANDLED;
+}
+
+/* Link */
+
+/*
+ * General service functions
+ */
+
+int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
+{
+ u32 lock_status;
+ u32 resource_bit = (1 << resource);
+ int func = BP_FUNC(bp);
+ u32 hw_lock_control_reg;
+ int cnt;
+
+ /* Validating that the resource is within range */
+ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+ DP(NETIF_MSG_HW,
+ "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+ resource, HW_LOCK_MAX_RESOURCE_VALUE);
+ return -EINVAL;
+ }
+
+ if (func <= 5) {
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+ } else {
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+ }
+
+ /* Validating that the resource is not already taken */
+ lock_status = REG_RD(bp, hw_lock_control_reg);
+ if (lock_status & resource_bit) {
+ DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
+ lock_status, resource_bit);
+ return -EEXIST;
+ }
+
+ /* Try for 5 second every 5ms */
+ for (cnt = 0; cnt < 1000; cnt++) {
+ /* Try to acquire the lock */
+ REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+ lock_status = REG_RD(bp, hw_lock_control_reg);
+ if (lock_status & resource_bit)
+ return 0;
+
+ msleep(5);
+ }
+ DP(NETIF_MSG_HW, "Timeout\n");
+ return -EAGAIN;
+}
+
+int bnx2x_release_leader_lock(struct bnx2x *bp)
+{
+ return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
+}
+
+int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
+{
+ u32 lock_status;
+ u32 resource_bit = (1 << resource);
+ int func = BP_FUNC(bp);
+ u32 hw_lock_control_reg;
+
+ DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
+
+ /* Validating that the resource is within range */
+ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+ DP(NETIF_MSG_HW,
+ "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+ resource, HW_LOCK_MAX_RESOURCE_VALUE);
+ return -EINVAL;
+ }
+
+ if (func <= 5) {
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+ } else {
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+ }
+
+ /* Validating that the resource is currently taken */
+ lock_status = REG_RD(bp, hw_lock_control_reg);
+ if (!(lock_status & resource_bit)) {
+ DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
+ lock_status, resource_bit);
+ return -EFAULT;
+ }
+
+ REG_WR(bp, hw_lock_control_reg, resource_bit);
+ return 0;
+}
+
+
+int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
+{
+ /* The GPIO should be swapped if swap register is set and active */
+ int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+ REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+ int gpio_shift = gpio_num +
+ (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+ u32 gpio_mask = (1 << gpio_shift);
+ u32 gpio_reg;
+ int value;
+
+ if (gpio_num > MISC_REGISTERS_GPIO_3) {
+ BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+ return -EINVAL;
+ }
+
+ /* read GPIO value */
+ gpio_reg = REG_RD(bp, MISC_REG_GPIO);
+
+ /* get the requested pin value */
+ if ((gpio_reg & gpio_mask) == gpio_mask)
+ value = 1;
+ else
+ value = 0;
+
+ DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
+
+ return value;
+}
+
+int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
+{
+ /* The GPIO should be swapped if swap register is set and active */
+ int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+ REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+ int gpio_shift = gpio_num +
+ (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+ u32 gpio_mask = (1 << gpio_shift);
+ u32 gpio_reg;
+
+ if (gpio_num > MISC_REGISTERS_GPIO_3) {
+ BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+ return -EINVAL;
+ }
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+ /* read GPIO and mask except the float bits */
+ gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
+
+ switch (mode) {
+ case MISC_REGISTERS_GPIO_OUTPUT_LOW:
+ DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
+ gpio_num, gpio_shift);
+ /* clear FLOAT and set CLR */
+ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
+ DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
+ gpio_num, gpio_shift);
+ /* clear FLOAT and set SET */
+ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_INPUT_HI_Z:
+ DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
+ gpio_num, gpio_shift);
+ /* set FLOAT */
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+ break;
+
+ default:
+ break;
+ }
+
+ REG_WR(bp, MISC_REG_GPIO, gpio_reg);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+ return 0;
+}
+
+int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
+{
+ u32 gpio_reg = 0;
+ int rc = 0;
+
+ /* Any port swapping should be handled by caller. */
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+ /* read GPIO and mask except the float bits */
+ gpio_reg = REG_RD(bp, MISC_REG_GPIO);
+ gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
+ gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
+ gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
+
+ switch (mode) {
+ case MISC_REGISTERS_GPIO_OUTPUT_LOW:
+ DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
+ /* set CLR */
+ gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
+ DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
+ /* set SET */
+ gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_INPUT_HI_Z:
+ DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
+ /* set FLOAT */
+ gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
+ break;
+
+ default:
+ BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc == 0)
+ REG_WR(bp, MISC_REG_GPIO, gpio_reg);
+
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+ return rc;
+}
+
+int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
+{
+ /* The GPIO should be swapped if swap register is set and active */
+ int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+ REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+ int gpio_shift = gpio_num +
+ (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+ u32 gpio_mask = (1 << gpio_shift);
+ u32 gpio_reg;
+
+ if (gpio_num > MISC_REGISTERS_GPIO_3) {
+ BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+ return -EINVAL;
+ }
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+ /* read GPIO int */
+ gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
+
+ switch (mode) {
+ case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
+ DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
+ "output low\n", gpio_num, gpio_shift);
+ /* clear SET and set CLR */
+ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
+ DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
+ "output high\n", gpio_num, gpio_shift);
+ /* clear CLR and set SET */
+ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
+ break;
+
+ default:
+ break;
+ }
+
+ REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+ return 0;
+}
+
+static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
+{
+ u32 spio_mask = (1 << spio_num);
+ u32 spio_reg;
+
+ if ((spio_num < MISC_REGISTERS_SPIO_4) ||
+ (spio_num > MISC_REGISTERS_SPIO_7)) {
+ BNX2X_ERR("Invalid SPIO %d\n", spio_num);
+ return -EINVAL;
+ }
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
+ /* read SPIO and mask except the float bits */
+ spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
+
+ switch (mode) {
+ case MISC_REGISTERS_SPIO_OUTPUT_LOW:
+ DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
+ /* clear FLOAT and set CLR */
+ spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+ spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
+ break;
+
+ case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
+ DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
+ /* clear FLOAT and set SET */
+ spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+ spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
+ break;
+
+ case MISC_REGISTERS_SPIO_INPUT_HI_Z:
+ DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
+ /* set FLOAT */
+ spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+ break;
+
+ default:
+ break;
+ }
+
+ REG_WR(bp, MISC_REG_SPIO, spio_reg);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
+
+ return 0;
+}
+
+void bnx2x_calc_fc_adv(struct bnx2x *bp)
+{
+ u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ switch (bp->link_vars.ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
+ case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
+ bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
+ ADVERTISED_Pause);
+ break;
+
+ case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
+ bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
+ ADVERTISED_Pause);
+ break;
+
+ case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
+ bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
+ break;
+
+ default:
+ bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
+ ADVERTISED_Pause);
+ break;
+ }
+}
+
+u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
+{
+ if (!BP_NOMCP(bp)) {
+ u8 rc;
+ int cfx_idx = bnx2x_get_link_cfg_idx(bp);
+ u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
+ /*
+ * Initialize link parameters structure variables
+ * It is recommended to turn off RX FC for jumbo frames
+ * for better performance
+ */
+ if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
+ bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
+ else
+ bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
+
+ bnx2x_acquire_phy_lock(bp);
+
+ if (load_mode == LOAD_DIAG) {
+ struct link_params *lp = &bp->link_params;
+ lp->loopback_mode = LOOPBACK_XGXS;
+ /* do PHY loopback at 10G speed, if possible */
+ if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
+ if (lp->speed_cap_mask[cfx_idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ lp->req_line_speed[cfx_idx] =
+ SPEED_10000;
+ else
+ lp->req_line_speed[cfx_idx] =
+ SPEED_1000;
+ }
+ }
+
+ rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+
+ bnx2x_release_phy_lock(bp);
+
+ bnx2x_calc_fc_adv(bp);
+
+ if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
+ bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+ bnx2x_link_report(bp);
+ } else
+ queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
+ bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
+ return rc;
+ }
+ BNX2X_ERR("Bootcode is missing - can not initialize link\n");
+ return -EINVAL;
+}
+
+void bnx2x_link_set(struct bnx2x *bp)
+{
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
+ bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+ bnx2x_release_phy_lock(bp);
+
+ bnx2x_calc_fc_adv(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not set link\n");
+}
+
+static void bnx2x__link_reset(struct bnx2x *bp)
+{
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
+ bnx2x_release_phy_lock(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not reset link\n");
+}
+
+u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
+{
+ u8 rc = 0;
+
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
+ is_serdes);
+ bnx2x_release_phy_lock(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not test link\n");
+
+ return rc;
+}
+
+static void bnx2x_init_port_minmax(struct bnx2x *bp)
+{
+ u32 r_param = bp->link_vars.line_speed / 8;
+ u32 fair_periodic_timeout_usec;
+ u32 t_fair;
+
+ memset(&(bp->cmng.rs_vars), 0,
+ sizeof(struct rate_shaping_vars_per_port));
+ memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
+
+ /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
+ bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
+
+ /* this is the threshold below which no timer arming will occur
+ 1.25 coefficient is for the threshold to be a little bigger
+ than the real time, to compensate for timer in-accuracy */
+ bp->cmng.rs_vars.rs_threshold =
+ (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
+
+ /* resolution of fairness timer */
+ fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
+ /* for 10G it is 1000usec. for 1G it is 10000usec. */
+ t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
+
+ /* this is the threshold below which we won't arm the timer anymore */
+ bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
+
+ /* we multiply by 1e3/8 to get bytes/msec.
+ We don't want the credits to pass a credit
+ of the t_fair*FAIR_MEM (algorithm resolution) */
+ bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
+ /* since each tick is 4 usec */
+ bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
+}
+
+/* Calculates the sum of vn_min_rates.
+ It's needed for further normalizing of the min_rates.
+ Returns:
+ sum of vn_min_rates.
+ or
+ 0 - if all the min_rates are 0.
+ In the later case fainess algorithm should be deactivated.
+ If not all min_rates are zero then those that are zeroes will be set to 1.
+ */
+static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
+{
+ int all_zero = 1;
+ int vn;
+
+ bp->vn_weight_sum = 0;
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+ u32 vn_cfg = bp->mf_config[vn];
+ u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
+
+ /* Skip hidden vns */
+ if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
+ continue;
+
+ /* If min rate is zero - set it to 1 */
+ if (!vn_min_rate)
+ vn_min_rate = DEF_MIN_RATE;
+ else
+ all_zero = 0;
+
+ bp->vn_weight_sum += vn_min_rate;
+ }
+
+ /* if ETS or all min rates are zeros - disable fairness */
+ if (BNX2X_IS_ETS_ENABLED(bp)) {
+ bp->cmng.flags.cmng_enables &=
+ ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+ DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
+ } else if (all_zero) {
+ bp->cmng.flags.cmng_enables &=
+ ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+ DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
+ " fairness will be disabled\n");
+ } else
+ bp->cmng.flags.cmng_enables |=
+ CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+}
+
+/* returns func by VN for current port */
+static inline int func_by_vn(struct bnx2x *bp, int vn)
+{
+ return 2 * vn + BP_PORT(bp);
+}
+
+static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
+{
+ struct rate_shaping_vars_per_vn m_rs_vn;
+ struct fairness_vars_per_vn m_fair_vn;
+ u32 vn_cfg = bp->mf_config[vn];
+ int func = func_by_vn(bp, vn);
+ u16 vn_min_rate, vn_max_rate;
+ int i;
+
+ /* If function is hidden - set min and max to zeroes */
+ if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
+ vn_min_rate = 0;
+ vn_max_rate = 0;
+
+ } else {
+ u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
+
+ vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
+ /* If fairness is enabled (not all min rates are zeroes) and
+ if current min rate is zero - set it to 1.
+ This is a requirement of the algorithm. */
+ if (bp->vn_weight_sum && (vn_min_rate == 0))
+ vn_min_rate = DEF_MIN_RATE;
+
+ if (IS_MF_SI(bp))
+ /* maxCfg in percents of linkspeed */
+ vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
+ else
+ /* maxCfg is absolute in 100Mb units */
+ vn_max_rate = maxCfg * 100;
+ }
+
+ DP(NETIF_MSG_IFUP,
+ "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
+ func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
+
+ memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
+ memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
+
+ /* global vn counter - maximal Mbps for this vn */
+ m_rs_vn.vn_counter.rate = vn_max_rate;
+
+ /* quota - number of bytes transmitted in this period */
+ m_rs_vn.vn_counter.quota =
+ (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
+
+ if (bp->vn_weight_sum) {
+ /* credit for each period of the fairness algorithm:
+ number of bytes in T_FAIR (the vn share the port rate).
+ vn_weight_sum should not be larger than 10000, thus
+ T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
+ than zero */
+ m_fair_vn.vn_credit_delta =
+ max_t(u32, (vn_min_rate * (T_FAIR_COEF /
+ (8 * bp->vn_weight_sum))),
+ (bp->cmng.fair_vars.fair_threshold +
+ MIN_ABOVE_THRESH));
+ DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
+ m_fair_vn.vn_credit_delta);
+ }
+
+ /* Store it to internal memory */
+ for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
+ REG_WR(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
+ ((u32 *)(&m_rs_vn))[i]);
+
+ for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
+ REG_WR(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
+ ((u32 *)(&m_fair_vn))[i]);
+}
+
+static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
+{
+ if (CHIP_REV_IS_SLOW(bp))
+ return CMNG_FNS_NONE;
+ if (IS_MF(bp))
+ return CMNG_FNS_MINMAX;
+
+ return CMNG_FNS_NONE;
+}
+
+void bnx2x_read_mf_cfg(struct bnx2x *bp)
+{
+ int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
+
+ if (BP_NOMCP(bp))
+ return; /* what should be the default bvalue in this case */
+
+ /* For 2 port configuration the absolute function number formula
+ * is:
+ * abs_func = 2 * vn + BP_PORT + BP_PATH
+ *
+ * and there are 4 functions per port
+ *
+ * For 4 port configuration it is
+ * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
+ *
+ * and there are 2 functions per port
+ */
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+ int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
+
+ if (func >= E1H_FUNC_MAX)
+ break;
+
+ bp->mf_config[vn] =
+ MF_CFG_RD(bp, func_mf_config[func].config);
+ }
+}
+
+static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
+{
+
+ if (cmng_type == CMNG_FNS_MINMAX) {
+ int vn;
+
+ /* clear cmng_enables */
+ bp->cmng.flags.cmng_enables = 0;
+
+ /* read mf conf from shmem */
+ if (read_cfg)
+ bnx2x_read_mf_cfg(bp);
+
+ /* Init rate shaping and fairness contexts */
+ bnx2x_init_port_minmax(bp);
+
+ /* vn_weight_sum and enable fairness if not 0 */
+ bnx2x_calc_vn_weight_sum(bp);
+
+ /* calculate and set min-max rate for each vn */
+ if (bp->port.pmf)
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
+ bnx2x_init_vn_minmax(bp, vn);
+
+ /* always enable rate shaping and fairness */
+ bp->cmng.flags.cmng_enables |=
+ CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
+ if (!bp->vn_weight_sum)
+ DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
+ " fairness will be disabled\n");
+ return;
+ }
+
+ /* rate shaping and fairness are disabled */
+ DP(NETIF_MSG_IFUP,
+ "rate shaping and fairness are disabled\n");
+}
+
+static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
+{
+ int func;
+ int vn;
+
+ /* Set the attention towards other drivers on the same port */
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+ if (vn == BP_VN(bp))
+ continue;
+
+ func = func_by_vn(bp, vn);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
+ (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
+ }
+}
+
+/* This function is called upon link interrupt */
+static void bnx2x_link_attn(struct bnx2x *bp)
+{
+ /* Make sure that we are synced with the current statistics */
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+ bnx2x_link_update(&bp->link_params, &bp->link_vars);
+
+ if (bp->link_vars.link_up) {
+
+ /* dropless flow control */
+ if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
+ int port = BP_PORT(bp);
+ u32 pause_enabled = 0;
+
+ if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ pause_enabled = 1;
+
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
+ pause_enabled);
+ }
+
+ if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
+ struct host_port_stats *pstats;
+
+ pstats = bnx2x_sp(bp, port_stats);
+ /* reset old mac stats */
+ memset(&(pstats->mac_stx[0]), 0,
+ sizeof(struct mac_stx));
+ }
+ if (bp->state == BNX2X_STATE_OPEN)
+ bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+ }
+
+ if (bp->link_vars.link_up && bp->link_vars.line_speed) {
+ int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
+
+ if (cmng_fns != CMNG_FNS_NONE) {
+ bnx2x_cmng_fns_init(bp, false, cmng_fns);
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+ } else
+ /* rate shaping and fairness are disabled */
+ DP(NETIF_MSG_IFUP,
+ "single function mode without fairness\n");
+ }
+
+ __bnx2x_link_report(bp);
+
+ if (IS_MF(bp))
+ bnx2x_link_sync_notify(bp);
+}
+
+void bnx2x__link_status_update(struct bnx2x *bp)
+{
+ if (bp->state != BNX2X_STATE_OPEN)
+ return;
+
+ bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
+
+ if (bp->link_vars.link_up)
+ bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+ else
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+ /* indicate link status */
+ bnx2x_link_report(bp);
+}
+
+static void bnx2x_pmf_update(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 val;
+
+ bp->port.pmf = 1;
+ DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+
+ /*
+ * We need the mb() to ensure the ordering between the writing to
+ * bp->port.pmf here and reading it from the bnx2x_periodic_task().
+ */
+ smp_mb();
+
+ /* queue a periodic task */
+ queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
+
+ bnx2x_dcbx_pmf_update(bp);
+
+ /* enable nig attention */
+ val = (0xff0f | (1 << (BP_VN(bp) + 4)));
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
+ } else if (!CHIP_IS_E1x(bp)) {
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
+ }
+
+ bnx2x_stats_handle(bp, STATS_EVENT_PMF);
+}
+
+/* end of Link */
+
+/* slow path */
+
+/*
+ * General service functions
+ */
+
+/* send the MCP a request, block until there is a reply */
+u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
+{
+ int mb_idx = BP_FW_MB_IDX(bp);
+ u32 seq;
+ u32 rc = 0;
+ u32 cnt = 1;
+ u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
+
+ mutex_lock(&bp->fw_mb_mutex);
+ seq = ++bp->fw_seq;
+ SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
+ SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
+
+ DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
+ (command | seq), param);
+
+ do {
+ /* let the FW do it's magic ... */
+ msleep(delay);
+
+ rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
+
+ /* Give the FW up to 5 second (500*10ms) */
+ } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
+
+ DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
+ cnt*delay, rc, seq);
+
+ /* is this a reply to our command? */
+ if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
+ rc &= FW_MSG_CODE_MASK;
+ else {
+ /* FW BUG! */
+ BNX2X_ERR("FW failed to respond!\n");
+ bnx2x_fw_dump(bp);
+ rc = 0;
+ }
+ mutex_unlock(&bp->fw_mb_mutex);
+
+ return rc;
+}
+
+static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
+{
+#ifdef BCM_CNIC
+ /* Statistics are not supported for CNIC Clients at the moment */
+ if (IS_FCOE_FP(fp))
+ return false;
+#endif
+ return true;
+}
+
+void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
+{
+ if (CHIP_IS_E1x(bp)) {
+ struct tstorm_eth_function_common_config tcfg = {0};
+
+ storm_memset_func_cfg(bp, &tcfg, p->func_id);
+ }
+
+ /* Enable the function in the FW */
+ storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
+ storm_memset_func_en(bp, p->func_id, 1);
+
+ /* spq */
+ if (p->func_flgs & FUNC_FLG_SPQ) {
+ storm_memset_spq_addr(bp, p->spq_map, p->func_id);
+ REG_WR(bp, XSEM_REG_FAST_MEMORY +
+ XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
+ }
+}
+
+/**
+ * bnx2x_get_tx_only_flags - Return common flags
+ *
+ * @bp device handle
+ * @fp queue handle
+ * @zero_stats TRUE if statistics zeroing is needed
+ *
+ * Return the flags that are common for the Tx-only and not normal connections.
+ */
+static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp,
+ bool zero_stats)
+{
+ unsigned long flags = 0;
+
+ /* PF driver will always initialize the Queue to an ACTIVE state */
+ __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
+
+ /* tx only connections collect statistics (on the same index as the
+ * parent connection). The statistics are zeroed when the parent
+ * connection is initialized.
+ */
+ if (stat_counter_valid(bp, fp)) {
+ __set_bit(BNX2X_Q_FLG_STATS, &flags);
+ if (zero_stats)
+ __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+ }
+
+ return flags;
+}
+
+static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp,
+ bool leading)
+{
+ unsigned long flags = 0;
+
+ /* calculate other queue flags */
+ if (IS_MF_SD(bp))
+ __set_bit(BNX2X_Q_FLG_OV, &flags);
+
+ if (IS_FCOE_FP(fp))
+ __set_bit(BNX2X_Q_FLG_FCOE, &flags);
+
+ if (!fp->disable_tpa) {
+ __set_bit(BNX2X_Q_FLG_TPA, &flags);
+ __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
+ }
+
+ if (leading) {
+ __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
+ __set_bit(BNX2X_Q_FLG_MCAST, &flags);
+ }
+
+ /* Always set HW VLAN stripping */
+ __set_bit(BNX2X_Q_FLG_VLAN, &flags);
+
+
+ return flags | bnx2x_get_common_flags(bp, fp, true);
+}
+
+static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
+ u8 cos)
+{
+ gen_init->stat_id = bnx2x_stats_id(fp);
+ gen_init->spcl_id = fp->cl_id;
+
+ /* Always use mini-jumbo MTU for FCoE L2 ring */
+ if (IS_FCOE_FP(fp))
+ gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
+ else
+ gen_init->mtu = bp->dev->mtu;
+
+ gen_init->cos = cos;
+}
+
+static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
+ struct bnx2x_rxq_setup_params *rxq_init)
+{
+ u8 max_sge = 0;
+ u16 sge_sz = 0;
+ u16 tpa_agg_size = 0;
+
+ if (!fp->disable_tpa) {
+ pause->sge_th_lo = SGE_TH_LO(bp);
+ pause->sge_th_hi = SGE_TH_HI(bp);
+
+ /* validate SGE ring has enough to cross high threshold */
+ WARN_ON(bp->dropless_fc &&
+ pause->sge_th_hi + FW_PREFETCH_CNT >
+ MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
+
+ tpa_agg_size = min_t(u32,
+ (min_t(u32, 8, MAX_SKB_FRAGS) *
+ SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
+ max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
+ SGE_PAGE_SHIFT;
+ max_sge = ((max_sge + PAGES_PER_SGE - 1) &
+ (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
+ sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
+ 0xffff);
+ }
+
+ /* pause - not for e1 */
+ if (!CHIP_IS_E1(bp)) {
+ pause->bd_th_lo = BD_TH_LO(bp);
+ pause->bd_th_hi = BD_TH_HI(bp);
+
+ pause->rcq_th_lo = RCQ_TH_LO(bp);
+ pause->rcq_th_hi = RCQ_TH_HI(bp);
+ /*
+ * validate that rings have enough entries to cross
+ * high thresholds
+ */
+ WARN_ON(bp->dropless_fc &&
+ pause->bd_th_hi + FW_PREFETCH_CNT >
+ bp->rx_ring_size);
+ WARN_ON(bp->dropless_fc &&
+ pause->rcq_th_hi + FW_PREFETCH_CNT >
+ NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
+
+ pause->pri_map = 1;
+ }
+
+ /* rxq setup */
+ rxq_init->dscr_map = fp->rx_desc_mapping;
+ rxq_init->sge_map = fp->rx_sge_mapping;
+ rxq_init->rcq_map = fp->rx_comp_mapping;
+ rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
+
+ /* This should be a maximum number of data bytes that may be
+ * placed on the BD (not including paddings).
+ */
+ rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN -
+ IP_HEADER_ALIGNMENT_PADDING;
+
+ rxq_init->cl_qzone_id = fp->cl_qzone_id;
+ rxq_init->tpa_agg_sz = tpa_agg_size;
+ rxq_init->sge_buf_sz = sge_sz;
+ rxq_init->max_sges_pkt = max_sge;
+ rxq_init->rss_engine_id = BP_FUNC(bp);
+
+ /* Maximum number or simultaneous TPA aggregation for this Queue.
+ *
+ * For PF Clients it should be the maximum avaliable number.
+ * VF driver(s) may want to define it to a smaller value.
+ */
+ rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
+
+ rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
+ rxq_init->fw_sb_id = fp->fw_sb_id;
+
+ if (IS_FCOE_FP(fp))
+ rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
+ else
+ rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
+}
+
+static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
+ u8 cos)
+{
+ txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping;
+ txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
+ txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
+ txq_init->fw_sb_id = fp->fw_sb_id;
+
+ /*
+ * set the tss leading client id for TX classfication ==
+ * leading RSS client id
+ */
+ txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
+
+ if (IS_FCOE_FP(fp)) {
+ txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
+ txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
+ }
+}
+
+static void bnx2x_pf_init(struct bnx2x *bp)
+{
+ struct bnx2x_func_init_params func_init = {0};
+ struct event_ring_data eq_data = { {0} };
+ u16 flags;
+
+ if (!CHIP_IS_E1x(bp)) {
+ /* reset IGU PF statistics: MSIX + ATTN */
+ /* PF */
+ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
+ BNX2X_IGU_STAS_MSG_VF_CNT*4 +
+ (CHIP_MODE_IS_4_PORT(bp) ?
+ BP_FUNC(bp) : BP_VN(bp))*4, 0);
+ /* ATTN */
+ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
+ BNX2X_IGU_STAS_MSG_VF_CNT*4 +
+ BNX2X_IGU_STAS_MSG_PF_CNT*4 +
+ (CHIP_MODE_IS_4_PORT(bp) ?
+ BP_FUNC(bp) : BP_VN(bp))*4, 0);
+ }
+
+ /* function setup flags */
+ flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
+
+ /* This flag is relevant for E1x only.
+ * E2 doesn't have a TPA configuration in a function level.
+ */
+ flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
+
+ func_init.func_flgs = flags;
+ func_init.pf_id = BP_FUNC(bp);
+ func_init.func_id = BP_FUNC(bp);
+ func_init.spq_map = bp->spq_mapping;
+ func_init.spq_prod = bp->spq_prod_idx;
+
+ bnx2x_func_init(bp, &func_init);
+
+ memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
+
+ /*
+ * Congestion management values depend on the link rate
+ * There is no active link so initial link rate is set to 10 Gbps.
+ * When the link comes up The congestion management values are
+ * re-calculated according to the actual link rate.
+ */
+ bp->link_vars.line_speed = SPEED_10000;
+ bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
+
+ /* Only the PMF sets the HW */
+ if (bp->port.pmf)
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+
+ /* init Event Queue */
+ eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
+ eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
+ eq_data.producer = bp->eq_prod;
+ eq_data.index_id = HC_SP_INDEX_EQ_CONS;
+ eq_data.sb_id = DEF_SB_ID;
+ storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
+}
+
+
+static void bnx2x_e1h_disable(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+
+ bnx2x_tx_disable(bp);
+
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+}
+
+static void bnx2x_e1h_enable(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
+
+ /* Tx queue should be only reenabled */
+ netif_tx_wake_all_queues(bp->dev);
+
+ /*
+ * Should not call netif_carrier_on since it will be called if the link
+ * is up when checking for link state
+ */
+}
+
+/* called due to MCP event (on pmf):
+ * reread new bandwidth configuration
+ * configure FW
+ * notify others function about the change
+ */
+static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
+{
+ if (bp->link_vars.link_up) {
+ bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
+ bnx2x_link_sync_notify(bp);
+ }
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+}
+
+static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
+{
+ bnx2x_config_mf_bw(bp);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
+}
+
+static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
+{
+ DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
+
+ if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
+
+ /*
+ * This is the only place besides the function initialization
+ * where the bp->flags can change so it is done without any
+ * locks
+ */
+ if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
+ DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
+ bp->flags |= MF_FUNC_DIS;
+
+ bnx2x_e1h_disable(bp);
+ } else {
+ DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
+ bp->flags &= ~MF_FUNC_DIS;
+
+ bnx2x_e1h_enable(bp);
+ }
+ dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
+ }
+ if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
+ bnx2x_config_mf_bw(bp);
+ dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
+ }
+
+ /* Report results to MCP */
+ if (dcc_event)
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
+ else
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
+}
+
+/* must be called under the spq lock */
+static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
+{
+ struct eth_spe *next_spe = bp->spq_prod_bd;
+
+ if (bp->spq_prod_bd == bp->spq_last_bd) {
+ bp->spq_prod_bd = bp->spq;
+ bp->spq_prod_idx = 0;
+ DP(NETIF_MSG_TIMER, "end of spq\n");
+ } else {
+ bp->spq_prod_bd++;
+ bp->spq_prod_idx++;
+ }
+ return next_spe;
+}
+
+/* must be called under the spq lock */
+static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
+{
+ int func = BP_FUNC(bp);
+
+ /*
+ * Make sure that BD data is updated before writing the producer:
+ * BD data is written to the memory, the producer is read from the
+ * memory, thus we need a full memory barrier to ensure the ordering.
+ */
+ mb();
+
+ REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
+ bp->spq_prod_idx);
+ mmiowb();
+}
+
+/**
+ * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
+ *
+ * @cmd: command to check
+ * @cmd_type: command type
+ */
+static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
+{
+ if ((cmd_type == NONE_CONNECTION_TYPE) ||
+ (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
+ (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
+ (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
+ (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
+ (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
+ (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
+ return true;
+ else
+ return false;
+
+}
+
+
+/**
+ * bnx2x_sp_post - place a single command on an SP ring
+ *
+ * @bp: driver handle
+ * @command: command to place (e.g. SETUP, FILTER_RULES, etc.)
+ * @cid: SW CID the command is related to
+ * @data_hi: command private data address (high 32 bits)
+ * @data_lo: command private data address (low 32 bits)
+ * @cmd_type: command type (e.g. NONE, ETH)
+ *
+ * SP data is handled as if it's always an address pair, thus data fields are
+ * not swapped to little endian in upper functions. Instead this function swaps
+ * data as if it's two u32 fields.
+ */
+int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
+ u32 data_hi, u32 data_lo, int cmd_type)
+{
+ struct eth_spe *spe;
+ u16 type;
+ bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return -EIO;
+#endif
+
+ spin_lock_bh(&bp->spq_lock);
+
+ if (common) {
+ if (!atomic_read(&bp->eq_spq_left)) {
+ BNX2X_ERR("BUG! EQ ring full!\n");
+ spin_unlock_bh(&bp->spq_lock);
+ bnx2x_panic();
+ return -EBUSY;
+ }
+ } else if (!atomic_read(&bp->cq_spq_left)) {
+ BNX2X_ERR("BUG! SPQ ring full!\n");
+ spin_unlock_bh(&bp->spq_lock);
+ bnx2x_panic();
+ return -EBUSY;
+ }
+
+ spe = bnx2x_sp_get_next(bp);
+
+ /* CID needs port number to be encoded int it */
+ spe->hdr.conn_and_cmd_data =
+ cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
+ HW_CID(bp, cid));
+
+ type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
+
+ type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
+ SPE_HDR_FUNCTION_ID);
+
+ spe->hdr.type = cpu_to_le16(type);
+
+ spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
+ spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
+
+ /*
+ * It's ok if the actual decrement is issued towards the memory
+ * somewhere between the spin_lock and spin_unlock. Thus no
+ * more explict memory barrier is needed.
+ */
+ if (common)
+ atomic_dec(&bp->eq_spq_left);
+ else
+ atomic_dec(&bp->cq_spq_left);
+
+
+ DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
+ "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) "
+ "type(0x%x) left (CQ, EQ) (%x,%x)\n",
+ bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
+ (u32)(U64_LO(bp->spq_mapping) +
+ (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
+ HW_CID(bp, cid), data_hi, data_lo, type,
+ atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
+
+ bnx2x_sp_prod_update(bp);
+ spin_unlock_bh(&bp->spq_lock);
+ return 0;
+}
+
+/* acquire split MCP access lock register */
+static int bnx2x_acquire_alr(struct bnx2x *bp)
+{
+ u32 j, val;
+ int rc = 0;
+
+ might_sleep();
+ for (j = 0; j < 1000; j++) {
+ val = (1UL << 31);
+ REG_WR(bp, GRCBASE_MCP + 0x9c, val);
+ val = REG_RD(bp, GRCBASE_MCP + 0x9c);
+ if (val & (1L << 31))
+ break;
+
+ msleep(5);
+ }
+ if (!(val & (1L << 31))) {
+ BNX2X_ERR("Cannot acquire MCP access lock register\n");
+ rc = -EBUSY;
+ }
+
+ return rc;
+}
+
+/* release split MCP access lock register */
+static void bnx2x_release_alr(struct bnx2x *bp)
+{
+ REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
+}
+
+#define BNX2X_DEF_SB_ATT_IDX 0x0001
+#define BNX2X_DEF_SB_IDX 0x0002
+
+static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
+{
+ struct host_sp_status_block *def_sb = bp->def_status_blk;
+ u16 rc = 0;
+
+ barrier(); /* status block is written to by the chip */
+ if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
+ bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
+ rc |= BNX2X_DEF_SB_ATT_IDX;
+ }
+
+ if (bp->def_idx != def_sb->sp_sb.running_index) {
+ bp->def_idx = def_sb->sp_sb.running_index;
+ rc |= BNX2X_DEF_SB_IDX;
+ }
+
+ /* Do not reorder: indecies reading should complete before handling */
+ barrier();
+ return rc;
+}
+
+/*
+ * slow path service functions
+ */
+
+static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
+{
+ int port = BP_PORT(bp);
+ u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0;
+ u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
+ NIG_REG_MASK_INTERRUPT_PORT0;
+ u32 aeu_mask;
+ u32 nig_mask = 0;
+ u32 reg_addr;
+
+ if (bp->attn_state & asserted)
+ BNX2X_ERR("IGU ERROR\n");
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+ aeu_mask = REG_RD(bp, aeu_addr);
+
+ DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
+ aeu_mask, asserted);
+ aeu_mask &= ~(asserted & 0x3ff);
+ DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
+
+ REG_WR(bp, aeu_addr, aeu_mask);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+
+ DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
+ bp->attn_state |= asserted;
+ DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
+
+ if (asserted & ATTN_HARD_WIRED_MASK) {
+ if (asserted & ATTN_NIG_FOR_FUNC) {
+
+ bnx2x_acquire_phy_lock(bp);
+
+ /* save nig interrupt mask */
+ nig_mask = REG_RD(bp, nig_int_mask_addr);
+
+ /* If nig_mask is not set, no need to call the update
+ * function.
+ */
+ if (nig_mask) {
+ REG_WR(bp, nig_int_mask_addr, 0);
+
+ bnx2x_link_attn(bp);
+ }
+
+ /* handle unicore attn? */
+ }
+ if (asserted & ATTN_SW_TIMER_4_FUNC)
+ DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
+
+ if (asserted & GPIO_2_FUNC)
+ DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
+
+ if (asserted & GPIO_3_FUNC)
+ DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
+
+ if (asserted & GPIO_4_FUNC)
+ DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
+
+ if (port == 0) {
+ if (asserted & ATTN_GENERAL_ATTN_1) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
+ }
+ if (asserted & ATTN_GENERAL_ATTN_2) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
+ }
+ if (asserted & ATTN_GENERAL_ATTN_3) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
+ }
+ } else {
+ if (asserted & ATTN_GENERAL_ATTN_4) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
+ }
+ if (asserted & ATTN_GENERAL_ATTN_5) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
+ }
+ if (asserted & ATTN_GENERAL_ATTN_6) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
+ }
+ }
+
+ } /* if hardwired */
+
+ if (bp->common.int_block == INT_BLOCK_HC)
+ reg_addr = (HC_REG_COMMAND_REG + port*32 +
+ COMMAND_REG_ATTN_BITS_SET);
+ else
+ reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
+
+ DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
+ (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
+ REG_WR(bp, reg_addr, asserted);
+
+ /* now set back the mask */
+ if (asserted & ATTN_NIG_FOR_FUNC) {
+ REG_WR(bp, nig_int_mask_addr, nig_mask);
+ bnx2x_release_phy_lock(bp);
+ }
+}
+
+static inline void bnx2x_fan_failure(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 ext_phy_config;
+ /* mark the failure */
+ ext_phy_config =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config);
+
+ ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
+ ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
+ SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
+ ext_phy_config);
+
+ /* log the failure */
+ netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
+ " the driver to shutdown the card to prevent permanent"
+ " damage. Please contact OEM Support for assistance\n");
+}
+
+static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
+{
+ int port = BP_PORT(bp);
+ int reg_offset;
+ u32 val;
+
+ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+
+ if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
+
+ val = REG_RD(bp, reg_offset);
+ val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
+ REG_WR(bp, reg_offset, val);
+
+ BNX2X_ERR("SPIO5 hw attention\n");
+
+ /* Fan failure attention */
+ bnx2x_hw_reset_phy(&bp->link_params);
+ bnx2x_fan_failure(bp);
+ }
+
+ if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_handle_module_detect_int(&bp->link_params);
+ bnx2x_release_phy_lock(bp);
+ }
+
+ if (attn & HW_INTERRUT_ASSERT_SET_0) {
+
+ val = REG_RD(bp, reg_offset);
+ val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
+ REG_WR(bp, reg_offset, val);
+
+ BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
+ (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
+ bnx2x_panic();
+ }
+}
+
+static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
+{
+ u32 val;
+
+ if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
+
+ val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
+ BNX2X_ERR("DB hw attention 0x%x\n", val);
+ /* DORQ discard attention */
+ if (val & 0x2)
+ BNX2X_ERR("FATAL error from DORQ\n");
+ }
+
+ if (attn & HW_INTERRUT_ASSERT_SET_1) {
+
+ int port = BP_PORT(bp);
+ int reg_offset;
+
+ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
+
+ val = REG_RD(bp, reg_offset);
+ val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
+ REG_WR(bp, reg_offset, val);
+
+ BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
+ (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
+ bnx2x_panic();
+ }
+}
+
+static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
+{
+ u32 val;
+
+ if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
+
+ val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
+ BNX2X_ERR("CFC hw attention 0x%x\n", val);
+ /* CFC error attention */
+ if (val & 0x2)
+ BNX2X_ERR("FATAL error from CFC\n");
+ }
+
+ if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
+ val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
+ BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
+ /* RQ_USDMDP_FIFO_OVERFLOW */
+ if (val & 0x18000)
+ BNX2X_ERR("FATAL error from PXP\n");
+
+ if (!CHIP_IS_E1x(bp)) {
+ val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
+ BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
+ }
+ }
+
+ if (attn & HW_INTERRUT_ASSERT_SET_2) {
+
+ int port = BP_PORT(bp);
+ int reg_offset;
+
+ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
+
+ val = REG_RD(bp, reg_offset);
+ val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
+ REG_WR(bp, reg_offset, val);
+
+ BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
+ (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
+ bnx2x_panic();
+ }
+}
+
+static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
+{
+ u32 val;
+
+ if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
+
+ if (attn & BNX2X_PMF_LINK_ASSERT) {
+ int func = BP_FUNC(bp);
+
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+ bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
+ func_mf_config[BP_ABS_FUNC(bp)].config);
+ val = SHMEM_RD(bp,
+ func_mb[BP_FW_MB_IDX(bp)].drv_status);
+ if (val & DRV_STATUS_DCC_EVENT_MASK)
+ bnx2x_dcc_event(bp,
+ (val & DRV_STATUS_DCC_EVENT_MASK));
+
+ if (val & DRV_STATUS_SET_MF_BW)
+ bnx2x_set_mf_bw(bp);
+
+ if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
+ bnx2x_pmf_update(bp);
+
+ if (bp->port.pmf &&
+ (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
+ bp->dcbx_enabled > 0)
+ /* start dcbx state machine */
+ bnx2x_dcbx_set_params(bp,
+ BNX2X_DCBX_STATE_NEG_RECEIVED);
+ if (bp->link_vars.periodic_flags &
+ PERIODIC_FLAGS_LINK_EVENT) {
+ /* sync with link */
+ bnx2x_acquire_phy_lock(bp);
+ bp->link_vars.periodic_flags &=
+ ~PERIODIC_FLAGS_LINK_EVENT;
+ bnx2x_release_phy_lock(bp);
+ if (IS_MF(bp))
+ bnx2x_link_sync_notify(bp);
+ bnx2x_link_report(bp);
+ }
+ /* Always call it here: bnx2x_link_report() will
+ * prevent the link indication duplication.
+ */
+ bnx2x__link_status_update(bp);
+ } else if (attn & BNX2X_MC_ASSERT_BITS) {
+
+ BNX2X_ERR("MC assert!\n");
+ bnx2x_mc_assert(bp);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
+ bnx2x_panic();
+
+ } else if (attn & BNX2X_MCP_ASSERT) {
+
+ BNX2X_ERR("MCP assert!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
+ bnx2x_fw_dump(bp);
+
+ } else
+ BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
+ }
+
+ if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
+ BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
+ if (attn & BNX2X_GRC_TIMEOUT) {
+ val = CHIP_IS_E1(bp) ? 0 :
+ REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
+ BNX2X_ERR("GRC time-out 0x%08x\n", val);
+ }
+ if (attn & BNX2X_GRC_RSV) {
+ val = CHIP_IS_E1(bp) ? 0 :
+ REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
+ BNX2X_ERR("GRC reserved 0x%08x\n", val);
+ }
+ REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
+ }
+}
+
+/*
+ * Bits map:
+ * 0-7 - Engine0 load counter.
+ * 8-15 - Engine1 load counter.
+ * 16 - Engine0 RESET_IN_PROGRESS bit.
+ * 17 - Engine1 RESET_IN_PROGRESS bit.
+ * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function
+ * on the engine
+ * 19 - Engine1 ONE_IS_LOADED.
+ * 20 - Chip reset flow bit. When set none-leader must wait for both engines
+ * leader to complete (check for both RESET_IN_PROGRESS bits and not for
+ * just the one belonging to its engine).
+ *
+ */
+#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
+
+#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
+#define BNX2X_PATH0_LOAD_CNT_SHIFT 0
+#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
+#define BNX2X_PATH1_LOAD_CNT_SHIFT 8
+#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
+#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
+#define BNX2X_GLOBAL_RESET_BIT 0x00040000
+
+/*
+ * Set the GLOBAL_RESET bit.
+ *
+ * Should be run under rtnl lock
+ */
+void bnx2x_set_reset_global(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * Clear the GLOBAL_RESET bit.
+ *
+ * Should be run under rtnl lock
+ */
+static inline void bnx2x_clear_reset_global(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
+ barrier();
+ mmiowb();
+}
+
+/*
+ * Checks the GLOBAL_RESET bit.
+ *
+ * should be run under rtnl lock
+ */
+static inline bool bnx2x_reset_is_global(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
+ return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
+}
+
+/*
+ * Clear RESET_IN_PROGRESS bit for the current engine.
+ *
+ * Should be run under rtnl lock
+ */
+static inline void bnx2x_set_reset_done(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 bit = BP_PATH(bp) ?
+ BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+
+ /* Clear the bit */
+ val &= ~bit;
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * Set RESET_IN_PROGRESS for the current engine.
+ *
+ * should be run under rtnl lock
+ */
+void bnx2x_set_reset_in_progress(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 bit = BP_PATH(bp) ?
+ BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+
+ /* Set the bit */
+ val |= bit;
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * Checks the RESET_IN_PROGRESS bit for the given engine.
+ * should be run under rtnl lock
+ */
+bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 bit = engine ?
+ BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+
+ /* return false if bit is set */
+ return (val & bit) ? false : true;
+}
+
+/*
+ * Increment the load counter for the current engine.
+ *
+ * should be run under rtnl lock
+ */
+void bnx2x_inc_load_cnt(struct bnx2x *bp)
+{
+ u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK;
+ u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+ BNX2X_PATH0_LOAD_CNT_SHIFT;
+
+ DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+
+ /* get the current counter value */
+ val1 = (val & mask) >> shift;
+
+ /* increment... */
+ val1++;
+
+ /* clear the old value */
+ val &= ~mask;
+
+ /* set the new one */
+ val |= ((val1 << shift) & mask);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+ barrier();
+ mmiowb();
+}
+
+/**
+ * bnx2x_dec_load_cnt - decrement the load counter
+ *
+ * @bp: driver handle
+ *
+ * Should be run under rtnl lock.
+ * Decrements the load counter for the current engine. Returns
+ * the new counter value.
+ */
+u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
+{
+ u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK;
+ u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+ BNX2X_PATH0_LOAD_CNT_SHIFT;
+
+ DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+
+ /* get the current counter value */
+ val1 = (val & mask) >> shift;
+
+ /* decrement... */
+ val1--;
+
+ /* clear the old value */
+ val &= ~mask;
+
+ /* set the new one */
+ val |= ((val1 << shift) & mask);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+ barrier();
+ mmiowb();
+
+ return val1;
+}
+
+/*
+ * Read the load counter for the current engine.
+ *
+ * should be run under rtnl lock
+ */
+static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine)
+{
+ u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK);
+ u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+ BNX2X_PATH0_LOAD_CNT_SHIFT);
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ DP(NETIF_MSG_HW, "GLOB_REG=0x%08x\n", val);
+
+ val = (val & mask) >> shift;
+
+ DP(NETIF_MSG_HW, "load_cnt for engine %d = %d\n", engine, val);
+
+ return val;
+}
+
+/*
+ * Reset the load counter for the current engine.
+ *
+ * should be run under rtnl lock
+ */
+static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask));
+}
+
+static inline void _print_next_block(int idx, const char *blk)
+{
+ pr_cont("%s%s", idx ? ", " : "", blk);
+}
+
+static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
+ bool print)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "BRB");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "PARSER");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "TSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++,
+ "SEARCHER");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "TCM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "TSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "XPB");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
+ bool *global, bool print)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "PBF");
+ break;
+ case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "QM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "TM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "XSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "XCM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "XSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++,
+ "DOORBELLQ");
+ break;
+ case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "NIG");
+ break;
+ case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++,
+ "VAUX PCI CORE");
+ *global = true;
+ break;
+ case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "DEBUG");
+ break;
+ case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "USDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "UCM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "USEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "UPB");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "CSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "CCM");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
+ bool print)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "CSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "PXP");
+ break;
+ case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++,
+ "PXPPCICLOCKCLIENT");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "CFC");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "CDU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "DMAE");
+ break;
+ case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "IGU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "MISC");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
+ bool *global, bool print)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
+ if (print)
+ _print_next_block(par_num++, "MCP ROM");
+ *global = true;
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
+ if (print)
+ _print_next_block(par_num++,
+ "MCP UMP RX");
+ *global = true;
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
+ if (print)
+ _print_next_block(par_num++,
+ "MCP UMP TX");
+ *global = true;
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
+ if (print)
+ _print_next_block(par_num++,
+ "MCP SCPAD");
+ *global = true;
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
+ bool print)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "PGLUE_B");
+ break;
+ case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "ATC");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
+ u32 *sig)
+{
+ if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
+ (sig[1] & HW_PRTY_ASSERT_SET_1) ||
+ (sig[2] & HW_PRTY_ASSERT_SET_2) ||
+ (sig[3] & HW_PRTY_ASSERT_SET_3) ||
+ (sig[4] & HW_PRTY_ASSERT_SET_4)) {
+ int par_num = 0;
+ DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
+ "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x "
+ "[4]:0x%08x\n",
+ sig[0] & HW_PRTY_ASSERT_SET_0,
+ sig[1] & HW_PRTY_ASSERT_SET_1,
+ sig[2] & HW_PRTY_ASSERT_SET_2,
+ sig[3] & HW_PRTY_ASSERT_SET_3,
+ sig[4] & HW_PRTY_ASSERT_SET_4);
+ if (print)
+ netdev_err(bp->dev,
+ "Parity errors detected in blocks: ");
+ par_num = bnx2x_check_blocks_with_parity0(
+ sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
+ par_num = bnx2x_check_blocks_with_parity1(
+ sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
+ par_num = bnx2x_check_blocks_with_parity2(
+ sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
+ par_num = bnx2x_check_blocks_with_parity3(
+ sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
+ par_num = bnx2x_check_blocks_with_parity4(
+ sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
+
+ if (print)
+ pr_cont("\n");
+
+ return true;
+ } else
+ return false;
+}
+
+/**
+ * bnx2x_chk_parity_attn - checks for parity attentions.
+ *
+ * @bp: driver handle
+ * @global: true if there was a global attention
+ * @print: show parity attention in syslog
+ */
+bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
+{
+ struct attn_route attn = { {0} };
+ int port = BP_PORT(bp);
+
+ attn.sig[0] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
+ port*4);
+ attn.sig[1] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
+ port*4);
+ attn.sig[2] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
+ port*4);
+ attn.sig[3] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
+ port*4);
+
+ if (!CHIP_IS_E1x(bp))
+ attn.sig[4] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
+ port*4);
+
+ return bnx2x_parity_attn(bp, global, print, attn.sig);
+}
+
+
+static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
+{
+ u32 val;
+ if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
+
+ val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
+ BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "ADDRESS_ERROR\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "INCORRECT_RCV_BEHAVIOR\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "WAS_ERROR_ATTN\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "VF_LENGTH_VIOLATION_ATTN\n");
+ if (val &
+ PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "VF_GRC_SPACE_VIOLATION_ATTN\n");
+ if (val &
+ PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "VF_MSIX_BAR_VIOLATION_ATTN\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "TCPL_ERROR_ATTN\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "TCPL_IN_TWO_RCBS_ATTN\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "CSSNOOP_FIFO_OVERFLOW\n");
+ }
+ if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
+ val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
+ BNX2X_ERR("ATC hw attention 0x%x\n", val);
+ if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG"
+ "_ATC_TCPL_TO_NOT_PEND\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_"
+ "ATC_GPA_MULTIPLE_HITS\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_"
+ "ATC_RCPL_TO_EMPTY_CNT\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_"
+ "ATC_IREQ_LESS_THAN_STU\n");
+ }
+
+ if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
+ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
+ BNX2X_ERR("FATAL parity attention set4 0x%x\n",
+ (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
+ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
+ }
+
+}
+
+static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
+{
+ struct attn_route attn, *group_mask;
+ int port = BP_PORT(bp);
+ int index;
+ u32 reg_addr;
+ u32 val;
+ u32 aeu_mask;
+ bool global = false;
+
+ /* need to take HW lock because MCP or other port might also
+ try to handle this event */
+ bnx2x_acquire_alr(bp);
+
+ if (bnx2x_chk_parity_attn(bp, &global, true)) {
+#ifndef BNX2X_STOP_ON_ERROR
+ bp->recovery_state = BNX2X_RECOVERY_INIT;
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ /* Disable HW interrupts */
+ bnx2x_int_disable(bp);
+ /* In case of parity errors don't handle attentions so that
+ * other function would "see" parity errors.
+ */
+#else
+ bnx2x_panic();
+#endif
+ bnx2x_release_alr(bp);
+ return;
+ }
+
+ attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
+ attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
+ attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
+ attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
+ if (!CHIP_IS_E1x(bp))
+ attn.sig[4] =
+ REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
+ else
+ attn.sig[4] = 0;
+
+ DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
+ attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
+
+ for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
+ if (deasserted & (1 << index)) {
+ group_mask = &bp->attn_group[index];
+
+ DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
+ "%08x %08x %08x\n",
+ index,
+ group_mask->sig[0], group_mask->sig[1],
+ group_mask->sig[2], group_mask->sig[3],
+ group_mask->sig[4]);
+
+ bnx2x_attn_int_deasserted4(bp,
+ attn.sig[4] & group_mask->sig[4]);
+ bnx2x_attn_int_deasserted3(bp,
+ attn.sig[3] & group_mask->sig[3]);
+ bnx2x_attn_int_deasserted1(bp,
+ attn.sig[1] & group_mask->sig[1]);
+ bnx2x_attn_int_deasserted2(bp,
+ attn.sig[2] & group_mask->sig[2]);
+ bnx2x_attn_int_deasserted0(bp,
+ attn.sig[0] & group_mask->sig[0]);
+ }
+ }
+
+ bnx2x_release_alr(bp);
+
+ if (bp->common.int_block == INT_BLOCK_HC)
+ reg_addr = (HC_REG_COMMAND_REG + port*32 +
+ COMMAND_REG_ATTN_BITS_CLR);
+ else
+ reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
+
+ val = ~deasserted;
+ DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
+ (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
+ REG_WR(bp, reg_addr, val);
+
+ if (~bp->attn_state & deasserted)
+ BNX2X_ERR("IGU ERROR\n");
+
+ reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0;
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+ aeu_mask = REG_RD(bp, reg_addr);
+
+ DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
+ aeu_mask, deasserted);
+ aeu_mask |= (deasserted & 0x3ff);
+ DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
+
+ REG_WR(bp, reg_addr, aeu_mask);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+
+ DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
+ bp->attn_state &= ~deasserted;
+ DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
+}
+
+static void bnx2x_attn_int(struct bnx2x *bp)
+{
+ /* read local copy of bits */
+ u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
+ attn_bits);
+ u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
+ attn_bits_ack);
+ u32 attn_state = bp->attn_state;
+
+ /* look for changed bits */
+ u32 asserted = attn_bits & ~attn_ack & ~attn_state;
+ u32 deasserted = ~attn_bits & attn_ack & attn_state;
+
+ DP(NETIF_MSG_HW,
+ "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
+ attn_bits, attn_ack, asserted, deasserted);
+
+ if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
+ BNX2X_ERR("BAD attention state\n");
+
+ /* handle bits that were raised */
+ if (asserted)
+ bnx2x_attn_int_asserted(bp, asserted);
+
+ if (deasserted)
+ bnx2x_attn_int_deasserted(bp, deasserted);
+}
+
+void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
+ u16 index, u8 op, u8 update)
+{
+ u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
+
+ bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
+ igu_addr);
+}
+
+static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
+{
+ /* No memory barriers */
+ storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
+ mmiowb(); /* keep prod updates ordered */
+}
+
+#ifdef BCM_CNIC
+static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
+ union event_ring_elem *elem)
+{
+ u8 err = elem->message.error;
+
+ if (!bp->cnic_eth_dev.starting_cid ||
+ (cid < bp->cnic_eth_dev.starting_cid &&
+ cid != bp->cnic_eth_dev.iscsi_l2_cid))
+ return 1;
+
+ DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
+
+ if (unlikely(err)) {
+
+ BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
+ cid);
+ bnx2x_panic_dump(bp);
+ }
+ bnx2x_cnic_cfc_comp(bp, cid, err);
+ return 0;
+}
+#endif
+
+static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
+{
+ struct bnx2x_mcast_ramrod_params rparam;
+ int rc;
+
+ memset(&rparam, 0, sizeof(rparam));
+
+ rparam.mcast_obj = &bp->mcast_obj;
+
+ netif_addr_lock_bh(bp->dev);
+
+ /* Clear pending state for the last command */
+ bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
+
+ /* If there are pending mcast commands - send them */
+ if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+ if (rc < 0)
+ BNX2X_ERR("Failed to send pending mcast commands: %d\n",
+ rc);
+ }
+
+ netif_addr_unlock_bh(bp->dev);
+}
+
+static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp,
+ union event_ring_elem *elem)
+{
+ unsigned long ramrod_flags = 0;
+ int rc = 0;
+ u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
+ struct bnx2x_vlan_mac_obj *vlan_mac_obj;
+
+ /* Always push next commands out, don't wait here */
+ __set_bit(RAMROD_CONT, &ramrod_flags);
+
+ switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
+ case BNX2X_FILTER_MAC_PENDING:
+#ifdef BCM_CNIC
+ if (cid == BNX2X_ISCSI_ETH_CID)
+ vlan_mac_obj = &bp->iscsi_l2_mac_obj;
+ else
+#endif
+ vlan_mac_obj = &bp->fp[cid].mac_obj;
+
+ break;
+ case BNX2X_FILTER_MCAST_PENDING:
+ /* This is only relevant for 57710 where multicast MACs are
+ * configured as unicast MACs using the same ramrod.
+ */
+ bnx2x_handle_mcast_eqe(bp);
+ return;
+ default:
+ BNX2X_ERR("Unsupported classification command: %d\n",
+ elem->message.data.eth_event.echo);
+ return;
+ }
+
+ rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
+
+ if (rc < 0)
+ BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
+ else if (rc > 0)
+ DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
+
+}
+
+#ifdef BCM_CNIC
+static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
+#endif
+
+static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
+{
+ netif_addr_lock_bh(bp->dev);
+
+ clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
+
+ /* Send rx_mode command again if was requested */
+ if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
+ bnx2x_set_storm_rx_mode(bp);
+#ifdef BCM_CNIC
+ else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
+ &bp->sp_state))
+ bnx2x_set_iscsi_eth_rx_mode(bp, true);
+ else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
+ &bp->sp_state))
+ bnx2x_set_iscsi_eth_rx_mode(bp, false);
+#endif
+
+ netif_addr_unlock_bh(bp->dev);
+}
+
+static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
+ struct bnx2x *bp, u32 cid)
+{
+ DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
+#ifdef BCM_CNIC
+ if (cid == BNX2X_FCOE_ETH_CID)
+ return &bnx2x_fcoe(bp, q_obj);
+ else
+#endif
+ return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj);
+}
+
+static void bnx2x_eq_int(struct bnx2x *bp)
+{
+ u16 hw_cons, sw_cons, sw_prod;
+ union event_ring_elem *elem;
+ u32 cid;
+ u8 opcode;
+ int spqe_cnt = 0;
+ struct bnx2x_queue_sp_obj *q_obj;
+ struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
+ struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
+
+ hw_cons = le16_to_cpu(*bp->eq_cons_sb);
+
+ /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
+ * when we get the the next-page we nned to adjust so the loop
+ * condition below will be met. The next element is the size of a
+ * regular element and hence incrementing by 1
+ */
+ if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
+ hw_cons++;
+
+ /* This function may never run in parallel with itself for a
+ * specific bp, thus there is no need in "paired" read memory
+ * barrier here.
+ */
+ sw_cons = bp->eq_cons;
+ sw_prod = bp->eq_prod;
+
+ DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
+ hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
+
+ for (; sw_cons != hw_cons;
+ sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
+
+
+ elem = &bp->eq_ring[EQ_DESC(sw_cons)];
+
+ cid = SW_CID(elem->message.data.cfc_del_event.cid);
+ opcode = elem->message.opcode;
+
+
+ /* handle eq element */
+ switch (opcode) {
+ case EVENT_RING_OPCODE_STAT_QUERY:
+ DP(NETIF_MSG_TIMER, "got statistics comp event %d\n",
+ bp->stats_comp++);
+ /* nothing to do with stats comp */
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_CFC_DEL:
+ /* handle according to cid range */
+ /*
+ * we may want to verify here that the bp state is
+ * HALTING
+ */
+ DP(BNX2X_MSG_SP,
+ "got delete ramrod for MULTI[%d]\n", cid);
+#ifdef BCM_CNIC
+ if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
+ goto next_spqe;
+#endif
+ q_obj = bnx2x_cid_to_q_obj(bp, cid);
+
+ if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
+ break;
+
+
+
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_STOP_TRAFFIC:
+ DP(BNX2X_MSG_SP, "got STOP TRAFFIC\n");
+ if (f_obj->complete_cmd(bp, f_obj,
+ BNX2X_F_CMD_TX_STOP))
+ break;
+ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_START_TRAFFIC:
+ DP(BNX2X_MSG_SP, "got START TRAFFIC\n");
+ if (f_obj->complete_cmd(bp, f_obj,
+ BNX2X_F_CMD_TX_START))
+ break;
+ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
+ goto next_spqe;
+ case EVENT_RING_OPCODE_FUNCTION_START:
+ DP(BNX2X_MSG_SP, "got FUNC_START ramrod\n");
+ if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
+ break;
+
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_FUNCTION_STOP:
+ DP(BNX2X_MSG_SP, "got FUNC_STOP ramrod\n");
+ if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
+ break;
+
+ goto next_spqe;
+ }
+
+ switch (opcode | bp->state) {
+ case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+ BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+ BNX2X_STATE_OPENING_WAIT4_PORT):
+ cid = elem->message.data.eth_event.echo &
+ BNX2X_SWCID_MASK;
+ DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
+ cid);
+ rss_raw->clear_pending(rss_raw);
+ break;
+
+ case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_SET_MAC |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
+ case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+ BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+ BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
+ DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
+ bnx2x_handle_classification_eqe(bp, elem);
+ break;
+
+ case (EVENT_RING_OPCODE_MULTICAST_RULES |
+ BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_MULTICAST_RULES |
+ BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_MULTICAST_RULES |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
+ DP(BNX2X_MSG_SP, "got mcast ramrod\n");
+ bnx2x_handle_mcast_eqe(bp);
+ break;
+
+ case (EVENT_RING_OPCODE_FILTERS_RULES |
+ BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_FILTERS_RULES |
+ BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_FILTERS_RULES |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
+ DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
+ bnx2x_handle_rx_mode_eqe(bp);
+ break;
+ default:
+ /* unknown event log error and continue */
+ BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
+ elem->message.opcode, bp->state);
+ }
+next_spqe:
+ spqe_cnt++;
+ } /* for */
+
+ smp_mb__before_atomic_inc();
+ atomic_add(spqe_cnt, &bp->eq_spq_left);
+
+ bp->eq_cons = sw_cons;
+ bp->eq_prod = sw_prod;
+ /* Make sure that above mem writes were issued towards the memory */
+ smp_wmb();
+
+ /* update producer */
+ bnx2x_update_eq_prod(bp, bp->eq_prod);
+}
+
+static void bnx2x_sp_task(struct work_struct *work)
+{
+ struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
+ u16 status;
+
+ status = bnx2x_update_dsb_idx(bp);
+/* if (status == 0) */
+/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
+
+ DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
+
+ /* HW attentions */
+ if (status & BNX2X_DEF_SB_ATT_IDX) {
+ bnx2x_attn_int(bp);
+ status &= ~BNX2X_DEF_SB_ATT_IDX;
+ }
+
+ /* SP events: STAT_QUERY and others */
+ if (status & BNX2X_DEF_SB_IDX) {
+#ifdef BCM_CNIC
+ struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+
+ if ((!NO_FCOE(bp)) &&
+ (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+ /*
+ * Prevent local bottom-halves from running as
+ * we are going to change the local NAPI list.
+ */
+ local_bh_disable();
+ napi_schedule(&bnx2x_fcoe(bp, napi));
+ local_bh_enable();
+ }
+#endif
+ /* Handle EQ completions */
+ bnx2x_eq_int(bp);
+
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
+ le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
+
+ status &= ~BNX2X_DEF_SB_IDX;
+ }
+
+ if (unlikely(status))
+ DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
+ status);
+
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
+ le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
+}
+
+irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
+{
+ struct net_device *dev = dev_instance;
+ struct bnx2x *bp = netdev_priv(dev);
+
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
+ IGU_INT_DISABLE, 0);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return IRQ_HANDLED;
+#endif
+
+#ifdef BCM_CNIC
+ {
+ struct cnic_ops *c_ops;
+
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops)
+ c_ops->cnic_handler(bp->cnic_data, NULL);
+ rcu_read_unlock();
+ }
+#endif
+ queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+
+ return IRQ_HANDLED;
+}
+
+/* end of slow path */
+
+
+void bnx2x_drv_pulse(struct bnx2x *bp)
+{
+ SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
+ bp->fw_drv_pulse_wr_seq);
+}
+
+
+static void bnx2x_timer(unsigned long data)
+{
+ u8 cos;
+ struct bnx2x *bp = (struct bnx2x *) data;
+
+ if (!netif_running(bp->dev))
+ return;
+
+ if (poll) {
+ struct bnx2x_fastpath *fp = &bp->fp[0];
+
+ for_each_cos_in_tx_queue(fp, cos)
+ bnx2x_tx_int(bp, &fp->txdata[cos]);
+ bnx2x_rx_int(fp, 1000);
+ }
+
+ if (!BP_NOMCP(bp)) {
+ int mb_idx = BP_FW_MB_IDX(bp);
+ u32 drv_pulse;
+ u32 mcp_pulse;
+
+ ++bp->fw_drv_pulse_wr_seq;
+ bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
+ /* TBD - add SYSTEM_TIME */
+ drv_pulse = bp->fw_drv_pulse_wr_seq;
+ bnx2x_drv_pulse(bp);
+
+ mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
+ MCP_PULSE_SEQ_MASK);
+ /* The delta between driver pulse and mcp response
+ * should be 1 (before mcp response) or 0 (after mcp response)
+ */
+ if ((drv_pulse != mcp_pulse) &&
+ (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
+ /* someone lost a heartbeat... */
+ BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
+ drv_pulse, mcp_pulse);
+ }
+ }
+
+ if (bp->state == BNX2X_STATE_OPEN)
+ bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
+
+ mod_timer(&bp->timer, jiffies + bp->current_interval);
+}
+
+/* end of Statistics */
+
+/* nic init */
+
+/*
+ * nic init service functions
+ */
+
+static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
+{
+ u32 i;
+ if (!(len%4) && !(addr%4))
+ for (i = 0; i < len; i += 4)
+ REG_WR(bp, addr + i, fill);
+ else
+ for (i = 0; i < len; i++)
+ REG_WR8(bp, addr + i, fill);
+
+}
+
+/* helper: writes FP SP data to FW - data_size in dwords */
+static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
+ int fw_sb_id,
+ u32 *sb_data_p,
+ u32 data_size)
+{
+ int index;
+ for (index = 0; index < data_size; index++)
+ REG_WR(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
+ sizeof(u32)*index,
+ *(sb_data_p + index));
+}
+
+static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
+{
+ u32 *sb_data_p;
+ u32 data_size = 0;
+ struct hc_status_block_data_e2 sb_data_e2;
+ struct hc_status_block_data_e1x sb_data_e1x;
+
+ /* disable the function first */
+ if (!CHIP_IS_E1x(bp)) {
+ memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
+ sb_data_e2.common.state = SB_DISABLED;
+ sb_data_e2.common.p_func.vf_valid = false;
+ sb_data_p = (u32 *)&sb_data_e2;
+ data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
+ } else {
+ memset(&sb_data_e1x, 0,
+ sizeof(struct hc_status_block_data_e1x));
+ sb_data_e1x.common.state = SB_DISABLED;
+ sb_data_e1x.common.p_func.vf_valid = false;
+ sb_data_p = (u32 *)&sb_data_e1x;
+ data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
+ }
+ bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
+
+ bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
+ CSTORM_STATUS_BLOCK_SIZE);
+ bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
+ CSTORM_SYNC_BLOCK_SIZE);
+}
+
+/* helper: writes SP SB data to FW */
+static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
+ struct hc_sp_status_block_data *sp_sb_data)
+{
+ int func = BP_FUNC(bp);
+ int i;
+ for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
+ REG_WR(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
+ i*sizeof(u32),
+ *((u32 *)sp_sb_data + i));
+}
+
+static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
+{
+ int func = BP_FUNC(bp);
+ struct hc_sp_status_block_data sp_sb_data;
+ memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
+
+ sp_sb_data.state = SB_DISABLED;
+ sp_sb_data.p_func.vf_valid = false;
+
+ bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
+
+ bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
+ CSTORM_SP_STATUS_BLOCK_SIZE);
+ bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
+ CSTORM_SP_SYNC_BLOCK_SIZE);
+
+}
+
+
+static inline
+void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
+ int igu_sb_id, int igu_seg_id)
+{
+ hc_sm->igu_sb_id = igu_sb_id;
+ hc_sm->igu_seg_id = igu_seg_id;
+ hc_sm->timer_value = 0xFF;
+ hc_sm->time_to_expire = 0xFFFFFFFF;
+}
+
+
+/* allocates state machine ids. */
+static inline
+void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
+{
+ /* zero out state machine indices */
+ /* rx indices */
+ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
+
+ /* tx indices */
+ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
+
+ /* map indices */
+ /* rx indices */
+ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
+ SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+
+ /* tx indices */
+ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+}
+
+static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+ u8 vf_valid, int fw_sb_id, int igu_sb_id)
+{
+ int igu_seg_id;
+
+ struct hc_status_block_data_e2 sb_data_e2;
+ struct hc_status_block_data_e1x sb_data_e1x;
+ struct hc_status_block_sm *hc_sm_p;
+ int data_size;
+ u32 *sb_data_p;
+
+ if (CHIP_INT_MODE_IS_BC(bp))
+ igu_seg_id = HC_SEG_ACCESS_NORM;
+ else
+ igu_seg_id = IGU_SEG_ACCESS_NORM;
+
+ bnx2x_zero_fp_sb(bp, fw_sb_id);
+
+ if (!CHIP_IS_E1x(bp)) {
+ memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
+ sb_data_e2.common.state = SB_ENABLED;
+ sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
+ sb_data_e2.common.p_func.vf_id = vfid;
+ sb_data_e2.common.p_func.vf_valid = vf_valid;
+ sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
+ sb_data_e2.common.same_igu_sb_1b = true;
+ sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
+ sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
+ hc_sm_p = sb_data_e2.common.state_machine;
+ sb_data_p = (u32 *)&sb_data_e2;
+ data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
+ bnx2x_map_sb_state_machines(sb_data_e2.index_data);
+ } else {
+ memset(&sb_data_e1x, 0,
+ sizeof(struct hc_status_block_data_e1x));
+ sb_data_e1x.common.state = SB_ENABLED;
+ sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
+ sb_data_e1x.common.p_func.vf_id = 0xff;
+ sb_data_e1x.common.p_func.vf_valid = false;
+ sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
+ sb_data_e1x.common.same_igu_sb_1b = true;
+ sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
+ sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
+ hc_sm_p = sb_data_e1x.common.state_machine;
+ sb_data_p = (u32 *)&sb_data_e1x;
+ data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
+ bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
+ }
+
+ bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
+ igu_sb_id, igu_seg_id);
+ bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
+ igu_sb_id, igu_seg_id);
+
+ DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
+
+ /* write indecies to HW */
+ bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
+}
+
+static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
+ u16 tx_usec, u16 rx_usec)
+{
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
+ false, rx_usec);
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
+ HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
+ tx_usec);
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
+ HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
+ tx_usec);
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
+ HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
+ tx_usec);
+}
+
+static void bnx2x_init_def_sb(struct bnx2x *bp)
+{
+ struct host_sp_status_block *def_sb = bp->def_status_blk;
+ dma_addr_t mapping = bp->def_status_blk_mapping;
+ int igu_sp_sb_index;
+ int igu_seg_id;
+ int port = BP_PORT(bp);
+ int func = BP_FUNC(bp);
+ int reg_offset, reg_offset_en5;
+ u64 section;
+ int index;
+ struct hc_sp_status_block_data sp_sb_data;
+ memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
+
+ if (CHIP_INT_MODE_IS_BC(bp)) {
+ igu_sp_sb_index = DEF_SB_IGU_ID;
+ igu_seg_id = HC_SEG_ACCESS_DEF;
+ } else {
+ igu_sp_sb_index = bp->igu_dsb_id;
+ igu_seg_id = IGU_SEG_ACCESS_DEF;
+ }
+
+ /* ATTN */
+ section = ((u64)mapping) + offsetof(struct host_sp_status_block,
+ atten_status_block);
+ def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
+
+ bp->attn_state = 0;
+
+ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+ reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
+ MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
+ for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
+ int sindex;
+ /* take care of sig[0]..sig[4] */
+ for (sindex = 0; sindex < 4; sindex++)
+ bp->attn_group[index].sig[sindex] =
+ REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
+
+ if (!CHIP_IS_E1x(bp))
+ /*
+ * enable5 is separate from the rest of the registers,
+ * and therefore the address skip is 4
+ * and not 16 between the different groups
+ */
+ bp->attn_group[index].sig[4] = REG_RD(bp,
+ reg_offset_en5 + 0x4*index);
+ else
+ bp->attn_group[index].sig[4] = 0;
+ }
+
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
+ HC_REG_ATTN_MSG0_ADDR_L);
+
+ REG_WR(bp, reg_offset, U64_LO(section));
+ REG_WR(bp, reg_offset + 4, U64_HI(section));
+ } else if (!CHIP_IS_E1x(bp)) {
+ REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
+ REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
+ }
+
+ section = ((u64)mapping) + offsetof(struct host_sp_status_block,
+ sp_sb);
+
+ bnx2x_zero_sp_sb(bp);
+
+ sp_sb_data.state = SB_ENABLED;
+ sp_sb_data.host_sb_addr.lo = U64_LO(section);
+ sp_sb_data.host_sb_addr.hi = U64_HI(section);
+ sp_sb_data.igu_sb_id = igu_sp_sb_index;
+ sp_sb_data.igu_seg_id = igu_seg_id;
+ sp_sb_data.p_func.pf_id = func;
+ sp_sb_data.p_func.vnic_id = BP_VN(bp);
+ sp_sb_data.p_func.vf_id = 0xff;
+
+ bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
+
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
+}
+
+void bnx2x_update_coalesce(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_eth_queue(bp, i)
+ bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
+ bp->tx_ticks, bp->rx_ticks);
+}
+
+static void bnx2x_init_sp_ring(struct bnx2x *bp)
+{
+ spin_lock_init(&bp->spq_lock);
+ atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
+
+ bp->spq_prod_idx = 0;
+ bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
+ bp->spq_prod_bd = bp->spq;
+ bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
+}
+
+static void bnx2x_init_eq_ring(struct bnx2x *bp)
+{
+ int i;
+ for (i = 1; i <= NUM_EQ_PAGES; i++) {
+ union event_ring_elem *elem =
+ &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
+
+ elem->next_page.addr.hi =
+ cpu_to_le32(U64_HI(bp->eq_mapping +
+ BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
+ elem->next_page.addr.lo =
+ cpu_to_le32(U64_LO(bp->eq_mapping +
+ BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
+ }
+ bp->eq_cons = 0;
+ bp->eq_prod = NUM_EQ_DESC;
+ bp->eq_cons_sb = BNX2X_EQ_INDEX;
+ /* we want a warning message before it gets rought... */
+ atomic_set(&bp->eq_spq_left,
+ min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
+}
+
+
+/* called with netif_addr_lock_bh() */
+void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+ unsigned long rx_mode_flags,
+ unsigned long rx_accept_flags,
+ unsigned long tx_accept_flags,
+ unsigned long ramrod_flags)
+{
+ struct bnx2x_rx_mode_ramrod_params ramrod_param;
+ int rc;
+
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+ /* Prepare ramrod parameters */
+ ramrod_param.cid = 0;
+ ramrod_param.cl_id = cl_id;
+ ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
+ ramrod_param.func_id = BP_FUNC(bp);
+
+ ramrod_param.pstate = &bp->sp_state;
+ ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
+
+ ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
+ ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
+
+ set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
+
+ ramrod_param.ramrod_flags = ramrod_flags;
+ ramrod_param.rx_mode_flags = rx_mode_flags;
+
+ ramrod_param.rx_accept_flags = rx_accept_flags;
+ ramrod_param.tx_accept_flags = tx_accept_flags;
+
+ rc = bnx2x_config_rx_mode(bp, &ramrod_param);
+ if (rc < 0) {
+ BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
+ return;
+ }
+}
+
+/* called with netif_addr_lock_bh() */
+void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+{
+ unsigned long rx_mode_flags = 0, ramrod_flags = 0;
+ unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
+
+#ifdef BCM_CNIC
+ if (!NO_FCOE(bp))
+
+ /* Configure rx_mode of FCoE Queue */
+ __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
+#endif
+
+ switch (bp->rx_mode) {
+ case BNX2X_RX_MODE_NONE:
+ /*
+ * 'drop all' supersedes any accept flags that may have been
+ * passed to the function.
+ */
+ break;
+ case BNX2X_RX_MODE_NORMAL:
+ __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+
+ /* internal switching mode */
+ __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+
+ break;
+ case BNX2X_RX_MODE_ALLMULTI:
+ __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+
+ /* internal switching mode */
+ __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+
+ break;
+ case BNX2X_RX_MODE_PROMISC:
+ /* According to deffinition of SI mode, iface in promisc mode
+ * should receive matched and unmatched (in resolution of port)
+ * unicast packets.
+ */
+ __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+
+ /* internal switching mode */
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+
+ if (IS_MF_SI(bp))
+ __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags);
+ else
+ __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+
+ break;
+ default:
+ BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode);
+ return;
+ }
+
+ if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags);
+ }
+
+ __set_bit(RAMROD_RX, &ramrod_flags);
+ __set_bit(RAMROD_TX, &ramrod_flags);
+
+ bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags,
+ tx_accept_flags, ramrod_flags);
+}
+
+static void bnx2x_init_internal_common(struct bnx2x *bp)
+{
+ int i;
+
+ if (IS_MF_SI(bp))
+ /*
+ * In switch independent mode, the TSTORM needs to accept
+ * packets that failed classification, since approximate match
+ * mac addresses aren't written to NIG LLH
+ */
+ REG_WR8(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
+ else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
+ REG_WR8(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
+
+ /* Zero this manually as its initialization is
+ currently missing in the initTool */
+ for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_AGG_DATA_OFFSET + i * 4, 0);
+ if (!CHIP_IS_E1x(bp)) {
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
+ CHIP_INT_MODE_IS_BC(bp) ?
+ HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
+ }
+}
+
+static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
+{
+ switch (load_code) {
+ case FW_MSG_CODE_DRV_LOAD_COMMON:
+ case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
+ bnx2x_init_internal_common(bp);
+ /* no break */
+
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ /* nothing to do */
+ /* no break */
+
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ /* internal memory per function is
+ initialized inside bnx2x_pf_init */
+ break;
+
+ default:
+ BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
+ break;
+ }
+}
+
+static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
+{
+ return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT;
+}
+
+static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
+{
+ return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT;
+}
+
+static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
+{
+ if (CHIP_IS_E1x(fp->bp))
+ return BP_L_ID(fp->bp) + fp->index;
+ else /* We want Client ID to be the same as IGU SB ID for 57712 */
+ return bnx2x_fp_igu_sb_id(fp);
+}
+
+static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
+{
+ struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
+ u8 cos;
+ unsigned long q_type = 0;
+ u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
+
+ fp->cid = fp_idx;
+ fp->cl_id = bnx2x_fp_cl_id(fp);
+ fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
+ fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
+ /* qZone id equals to FW (per path) client id */
+ fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
+
+ /* init shortcut */
+ fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
+ /* Setup SB indicies */
+ fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
+
+ /* Configure Queue State object */
+ __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+ __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+ BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
+
+ /* init tx data */
+ for_each_cos_in_tx_queue(fp, cos) {
+ bnx2x_init_txdata(bp, &fp->txdata[cos],
+ CID_COS_TO_TX_ONLY_CID(fp->cid, cos),
+ FP_COS_TO_TXQ(fp, cos),
+ BNX2X_TX_SB_INDEX_BASE + cos);
+ cids[cos] = fp->txdata[cos].cid;
+ }
+
+ bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos,
+ BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_sp_mapping(bp, q_rdata), q_type);
+
+ /**
+ * Configure classification DBs: Always enable Tx switching
+ */
+ bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
+
+ DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
+ "cl_id %d fw_sb %d igu_sb %d\n",
+ fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+ fp->igu_sb_id);
+ bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
+ fp->fw_sb_id, fp->igu_sb_id);
+
+ bnx2x_update_fpsb_idx(fp);
+}
+
+void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
+{
+ int i;
+
+ for_each_eth_queue(bp, i)
+ bnx2x_init_eth_fp(bp, i);
+#ifdef BCM_CNIC
+ if (!NO_FCOE(bp))
+ bnx2x_init_fcoe_fp(bp);
+
+ bnx2x_init_sb(bp, bp->cnic_sb_mapping,
+ BNX2X_VF_ID_INVALID, false,
+ bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
+
+#endif
+
+ /* Initialize MOD_ABS interrupts */
+ bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
+ bp->common.shmem_base, bp->common.shmem2_base,
+ BP_PORT(bp));
+ /* ensure status block indices were read */
+ rmb();
+
+ bnx2x_init_def_sb(bp);
+ bnx2x_update_dsb_idx(bp);
+ bnx2x_init_rx_rings(bp);
+ bnx2x_init_tx_rings(bp);
+ bnx2x_init_sp_ring(bp);
+ bnx2x_init_eq_ring(bp);
+ bnx2x_init_internal(bp, load_code);
+ bnx2x_pf_init(bp);
+ bnx2x_stats_init(bp);
+
+ /* flush all before enabling interrupts */
+ mb();
+ mmiowb();
+
+ bnx2x_int_enable(bp);
+
+ /* Check for SPIO5 */
+ bnx2x_attn_int_deasserted0(bp,
+ REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
+ AEU_INPUTS_ATTN_BITS_SPIO5);
+}
+
+/* end of nic init */
+
+/*
+ * gzip service functions
+ */
+
+static int bnx2x_gunzip_init(struct bnx2x *bp)
+{
+ bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
+ &bp->gunzip_mapping, GFP_KERNEL);
+ if (bp->gunzip_buf == NULL)
+ goto gunzip_nomem1;
+
+ bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
+ if (bp->strm == NULL)
+ goto gunzip_nomem2;
+
+ bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
+ if (bp->strm->workspace == NULL)
+ goto gunzip_nomem3;
+
+ return 0;
+
+gunzip_nomem3:
+ kfree(bp->strm);
+ bp->strm = NULL;
+
+gunzip_nomem2:
+ dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+ bp->gunzip_mapping);
+ bp->gunzip_buf = NULL;
+
+gunzip_nomem1:
+ netdev_err(bp->dev, "Cannot allocate firmware buffer for"
+ " un-compression\n");
+ return -ENOMEM;
+}
+
+static void bnx2x_gunzip_end(struct bnx2x *bp)
+{
+ if (bp->strm) {
+ vfree(bp->strm->workspace);
+ kfree(bp->strm);
+ bp->strm = NULL;
+ }
+
+ if (bp->gunzip_buf) {
+ dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+ bp->gunzip_mapping);
+ bp->gunzip_buf = NULL;
+ }
+}
+
+static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
+{
+ int n, rc;
+
+ /* check gzip header */
+ if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
+ BNX2X_ERR("Bad gzip header\n");
+ return -EINVAL;
+ }
+
+ n = 10;
+
+#define FNAME 0x8
+
+ if (zbuf[3] & FNAME)
+ while ((zbuf[n++] != 0) && (n < len));
+
+ bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
+ bp->strm->avail_in = len - n;
+ bp->strm->next_out = bp->gunzip_buf;
+ bp->strm->avail_out = FW_BUF_SIZE;
+
+ rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
+ if (rc != Z_OK)
+ return rc;
+
+ rc = zlib_inflate(bp->strm, Z_FINISH);
+ if ((rc != Z_OK) && (rc != Z_STREAM_END))
+ netdev_err(bp->dev, "Firmware decompression error: %s\n",
+ bp->strm->msg);
+
+ bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
+ if (bp->gunzip_outlen & 0x3)
+ netdev_err(bp->dev, "Firmware decompression error:"
+ " gunzip_outlen (%d) not aligned\n",
+ bp->gunzip_outlen);
+ bp->gunzip_outlen >>= 2;
+
+ zlib_inflateEnd(bp->strm);
+
+ if (rc == Z_STREAM_END)
+ return 0;
+
+ return rc;
+}
+
+/* nic load/unload */
+
+/*
+ * General service functions
+ */
+
+/* send a NIG loopback debug packet */
+static void bnx2x_lb_pckt(struct bnx2x *bp)
+{
+ u32 wb_write[3];
+
+ /* Ethernet source and destination addresses */
+ wb_write[0] = 0x55555555;
+ wb_write[1] = 0x55555555;
+ wb_write[2] = 0x20; /* SOP */
+ REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
+
+ /* NON-IP protocol */
+ wb_write[0] = 0x09000000;
+ wb_write[1] = 0x55555555;
+ wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
+ REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
+}
+
+/* some of the internal memories
+ * are not directly readable from the driver
+ * to test them we send debug packets
+ */
+static int bnx2x_int_mem_test(struct bnx2x *bp)
+{
+ int factor;
+ int count, i;
+ u32 val = 0;
+
+ if (CHIP_REV_IS_FPGA(bp))
+ factor = 120;
+ else if (CHIP_REV_IS_EMUL(bp))
+ factor = 200;
+ else
+ factor = 1;
+
+ /* Disable inputs of parser neighbor blocks */
+ REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
+ REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
+ REG_WR(bp, CFC_REG_DEBUG0, 0x1);
+ REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
+
+ /* Write 0 to parser credits for CFC search request */
+ REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
+
+ /* send Ethernet packet */
+ bnx2x_lb_pckt(bp);
+
+ /* TODO do i reset NIG statistic? */
+ /* Wait until NIG register shows 1 packet of size 0x10 */
+ count = 1000 * factor;
+ while (count) {
+
+ bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+ val = *bnx2x_sp(bp, wb_data[0]);
+ if (val == 0x10)
+ break;
+
+ msleep(10);
+ count--;
+ }
+ if (val != 0x10) {
+ BNX2X_ERR("NIG timeout val = 0x%x\n", val);
+ return -1;
+ }
+
+ /* Wait until PRS register shows 1 packet */
+ count = 1000 * factor;
+ while (count) {
+ val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+ if (val == 1)
+ break;
+
+ msleep(10);
+ count--;
+ }
+ if (val != 0x1) {
+ BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+ return -2;
+ }
+
+ /* Reset and init BRB, PRS */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
+ msleep(50);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
+ msleep(50);
+ bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
+
+ DP(NETIF_MSG_HW, "part2\n");
+
+ /* Disable inputs of parser neighbor blocks */
+ REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
+ REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
+ REG_WR(bp, CFC_REG_DEBUG0, 0x1);
+ REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
+
+ /* Write 0 to parser credits for CFC search request */
+ REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
+
+ /* send 10 Ethernet packets */
+ for (i = 0; i < 10; i++)
+ bnx2x_lb_pckt(bp);
+
+ /* Wait until NIG register shows 10 + 1
+ packets of size 11*0x10 = 0xb0 */
+ count = 1000 * factor;
+ while (count) {
+
+ bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+ val = *bnx2x_sp(bp, wb_data[0]);
+ if (val == 0xb0)
+ break;
+
+ msleep(10);
+ count--;
+ }
+ if (val != 0xb0) {
+ BNX2X_ERR("NIG timeout val = 0x%x\n", val);
+ return -3;
+ }
+
+ /* Wait until PRS register shows 2 packets */
+ val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+ if (val != 2)
+ BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+
+ /* Write 1 to parser credits for CFC search request */
+ REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
+
+ /* Wait until PRS register shows 3 packets */
+ msleep(10 * factor);
+ /* Wait until NIG register shows 1 packet of size 0x10 */
+ val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+ if (val != 3)
+ BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+
+ /* clear NIG EOP FIFO */
+ for (i = 0; i < 11; i++)
+ REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
+ val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
+ if (val != 1) {
+ BNX2X_ERR("clear of NIG failed\n");
+ return -4;
+ }
+
+ /* Reset and init BRB, PRS, NIG */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
+ msleep(50);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
+ msleep(50);
+ bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
+#ifndef BCM_CNIC
+ /* set NIC mode */
+ REG_WR(bp, PRS_REG_NIC_MODE, 1);
+#endif
+
+ /* Enable inputs of parser neighbor blocks */
+ REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
+ REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
+ REG_WR(bp, CFC_REG_DEBUG0, 0x0);
+ REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
+
+ DP(NETIF_MSG_HW, "done\n");
+
+ return 0; /* OK */
+}
+
+static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
+{
+ REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
+ else
+ REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
+ REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
+ REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
+ /*
+ * mask read length error interrupts in brb for parser
+ * (parsing unit and 'checksum and crc' unit)
+ * these errors are legal (PU reads fixed length and CAC can cause
+ * read length error on truncated packets)
+ */
+ REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
+ REG_WR(bp, QM_REG_QM_INT_MASK, 0);
+ REG_WR(bp, TM_REG_TM_INT_MASK, 0);
+ REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
+ REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
+ REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
+/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
+/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
+ REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
+ REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
+ REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
+/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
+/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
+ REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
+ REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
+ REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
+ REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
+/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
+/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
+
+ if (CHIP_REV_IS_FPGA(bp))
+ REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
+ else if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
+ (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
+ | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
+ | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
+ | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
+ | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
+ else
+ REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
+ REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
+ REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
+ REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
+/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
+
+ if (!CHIP_IS_E1x(bp))
+ /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
+ REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
+
+ REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
+ REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
+/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
+ REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
+}
+
+static void bnx2x_reset_common(struct bnx2x *bp)
+{
+ u32 val = 0x1400;
+
+ /* reset_common */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ 0xd3ffff7f);
+
+ if (CHIP_IS_E3(bp)) {
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+ }
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
+}
+
+static void bnx2x_setup_dmae(struct bnx2x *bp)
+{
+ bp->dmae_ready = 0;
+ spin_lock_init(&bp->dmae_lock);
+}
+
+static void bnx2x_init_pxp(struct bnx2x *bp)
+{
+ u16 devctl;
+ int r_order, w_order;
+
+ pci_read_config_word(bp->pdev,
+ pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl);
+ DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
+ w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
+ if (bp->mrrs == -1)
+ r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
+ else {
+ DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
+ r_order = bp->mrrs;
+ }
+
+ bnx2x_init_pxp_arb(bp, r_order, w_order);
+}
+
+static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
+{
+ int is_required;
+ u32 val;
+ int port;
+
+ if (BP_NOMCP(bp))
+ return;
+
+ is_required = 0;
+ val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
+ SHARED_HW_CFG_FAN_FAILURE_MASK;
+
+ if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
+ is_required = 1;
+
+ /*
+ * The fan failure mechanism is usually related to the PHY type since
+ * the power consumption of the board is affected by the PHY. Currently,
+ * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
+ */
+ else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
+ for (port = PORT_0; port < PORT_MAX; port++) {
+ is_required |=
+ bnx2x_fan_failure_det_req(
+ bp,
+ bp->common.shmem_base,
+ bp->common.shmem2_base,
+ port);
+ }
+
+ DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
+
+ if (is_required == 0)
+ return;
+
+ /* Fan failure is indicated by SPIO 5 */
+ bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
+ MISC_REGISTERS_SPIO_INPUT_HI_Z);
+
+ /* set to active low mode */
+ val = REG_RD(bp, MISC_REG_SPIO_INT);
+ val |= ((1 << MISC_REGISTERS_SPIO_5) <<
+ MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
+ REG_WR(bp, MISC_REG_SPIO_INT, val);
+
+ /* enable interrupt to signal the IGU */
+ val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
+ val |= (1 << MISC_REGISTERS_SPIO_5);
+ REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
+}
+
+static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
+{
+ u32 offset = 0;
+
+ if (CHIP_IS_E1(bp))
+ return;
+ if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
+ return;
+
+ switch (BP_ABS_FUNC(bp)) {
+ case 0:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
+ break;
+ case 1:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
+ break;
+ case 2:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
+ break;
+ case 3:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
+ break;
+ case 4:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
+ break;
+ case 5:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
+ break;
+ case 6:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
+ break;
+ case 7:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
+ break;
+ default:
+ return;
+ }
+
+ REG_WR(bp, offset, pretend_func_num);
+ REG_RD(bp, offset);
+ DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
+}
+
+void bnx2x_pf_disable(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+ val &= ~IGU_PF_CONF_FUNC_EN;
+
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+ REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
+}
+
+static inline void bnx2x__common_init_phy(struct bnx2x *bp)
+{
+ u32 shmem_base[2], shmem2_base[2];
+ shmem_base[0] = bp->common.shmem_base;
+ shmem2_base[0] = bp->common.shmem2_base;
+ if (!CHIP_IS_E1x(bp)) {
+ shmem_base[1] =
+ SHMEM2_RD(bp, other_shmem_base_addr);
+ shmem2_base[1] =
+ SHMEM2_RD(bp, other_shmem2_base_addr);
+ }
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
+ bp->common.chip_id);
+ bnx2x_release_phy_lock(bp);
+}
+
+/**
+ * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
+ *
+ * @bp: driver handle
+ */
+static int bnx2x_init_hw_common(struct bnx2x *bp)
+{
+ u32 val;
+
+ DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
+
+ /*
+ * take the UNDI lock to protect undi_unload flow from accessing
+ * registers while we're resetting the chip
+ */
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
+ bnx2x_reset_common(bp);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
+
+ val = 0xfffc;
+ if (CHIP_IS_E3(bp)) {
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+ }
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
+
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
+ bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
+
+ if (!CHIP_IS_E1x(bp)) {
+ u8 abs_func_id;
+
+ /**
+ * 4-port mode or 2-port mode we need to turn of master-enable
+ * for everyone, after that, turn it back on for self.
+ * so, we disregard multi-function or not, and always disable
+ * for all functions on the given path, this means 0,2,4,6 for
+ * path 0 and 1,3,5,7 for path 1
+ */
+ for (abs_func_id = BP_PATH(bp);
+ abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
+ if (abs_func_id == BP_ABS_FUNC(bp)) {
+ REG_WR(bp,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
+ 1);
+ continue;
+ }
+
+ bnx2x_pretend_func(bp, abs_func_id);
+ /* clear pf enable */
+ bnx2x_pf_disable(bp);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+ }
+ }
+
+ bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
+ if (CHIP_IS_E1(bp)) {
+ /* enable HW interrupt from PXP on USDM overflow
+ bit 16 on INT_MASK_0 */
+ REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
+ }
+
+ bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
+ bnx2x_init_pxp(bp);
+
+#ifdef __BIG_ENDIAN
+ REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
+ REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
+ REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
+ REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
+ REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
+ /* make sure this value is 0 */
+ REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
+
+/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
+ REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
+ REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
+ REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
+ REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
+#endif
+
+ bnx2x_ilt_init_page_size(bp, INITOP_SET);
+
+ if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
+ REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
+
+ /* let the HW do it's magic ... */
+ msleep(100);
+ /* finish PXP init */
+ val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
+ if (val != 1) {
+ BNX2X_ERR("PXP2 CFG failed\n");
+ return -EBUSY;
+ }
+ val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
+ if (val != 1) {
+ BNX2X_ERR("PXP2 RD_INIT failed\n");
+ return -EBUSY;
+ }
+
+ /* Timers bug workaround E2 only. We need to set the entire ILT to
+ * have entries with value "0" and valid bit on.
+ * This needs to be done by the first PF that is loaded in a path
+ * (i.e. common phase)
+ */
+ if (!CHIP_IS_E1x(bp)) {
+/* In E2 there is a bug in the timers block that can cause function 6 / 7
+ * (i.e. vnic3) to start even if it is marked as "scan-off".
+ * This occurs when a different function (func2,3) is being marked
+ * as "scan-off". Real-life scenario for example: if a driver is being
+ * load-unloaded while func6,7 are down. This will cause the timer to access
+ * the ilt, translate to a logical address and send a request to read/write.
+ * Since the ilt for the function that is down is not valid, this will cause
+ * a translation error which is unrecoverable.
+ * The Workaround is intended to make sure that when this happens nothing fatal
+ * will occur. The workaround:
+ * 1. First PF driver which loads on a path will:
+ * a. After taking the chip out of reset, by using pretend,
+ * it will write "0" to the following registers of
+ * the other vnics.
+ * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+ * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
+ * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
+ * And for itself it will write '1' to
+ * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
+ * dmae-operations (writing to pram for example.)
+ * note: can be done for only function 6,7 but cleaner this
+ * way.
+ * b. Write zero+valid to the entire ILT.
+ * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of
+ * VNIC3 (of that port). The range allocated will be the
+ * entire ILT. This is needed to prevent ILT range error.
+ * 2. Any PF driver load flow:
+ * a. ILT update with the physical addresses of the allocated
+ * logical pages.
+ * b. Wait 20msec. - note that this timeout is needed to make
+ * sure there are no requests in one of the PXP internal
+ * queues with "old" ILT addresses.
+ * c. PF enable in the PGLC.
+ * d. Clear the was_error of the PF in the PGLC. (could have
+ * occured while driver was down)
+ * e. PF enable in the CFC (WEAK + STRONG)
+ * f. Timers scan enable
+ * 3. PF driver unload flow:
+ * a. Clear the Timers scan_en.
+ * b. Polling for scan_on=0 for that PF.
+ * c. Clear the PF enable bit in the PXP.
+ * d. Clear the PF enable in the CFC (WEAK + STRONG)
+ * e. Write zero+valid to all ILT entries (The valid bit must
+ * stay set)
+ * f. If this is VNIC 3 of a port then also init
+ * first_timers_ilt_entry to zero and last_timers_ilt_entry
+ * to the last enrty in the ILT.
+ *
+ * Notes:
+ * Currently the PF error in the PGLC is non recoverable.
+ * In the future the there will be a recovery routine for this error.
+ * Currently attention is masked.
+ * Having an MCP lock on the load/unload process does not guarantee that
+ * there is no Timer disable during Func6/7 enable. This is because the
+ * Timers scan is currently being cleared by the MCP on FLR.
+ * Step 2.d can be done only for PF6/7 and the driver can also check if
+ * there is error before clearing it. But the flow above is simpler and
+ * more general.
+ * All ILT entries are written by zero+valid and not just PF6/7
+ * ILT entries since in the future the ILT entries allocation for
+ * PF-s might be dynamic.
+ */
+ struct ilt_client_info ilt_cli;
+ struct bnx2x_ilt ilt;
+ memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
+ memset(&ilt, 0, sizeof(struct bnx2x_ilt));
+
+ /* initialize dummy TM client */
+ ilt_cli.start = 0;
+ ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
+ ilt_cli.client_num = ILT_CLIENT_TM;
+
+ /* Step 1: set zeroes to all ilt page entries with valid bit on
+ * Step 2: set the timers first/last ilt entry to point
+ * to the entire range to prevent ILT range error for 3rd/4th
+ * vnic (this code assumes existance of the vnic)
+ *
+ * both steps performed by call to bnx2x_ilt_client_init_op()
+ * with dummy TM client
+ *
+ * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
+ * and his brother are split registers
+ */
+ bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
+ bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
+ REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
+ REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
+ }
+
+
+ REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
+ REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
+
+ if (!CHIP_IS_E1x(bp)) {
+ int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
+ (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
+ bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
+
+ /* let the HW do it's magic ... */
+ do {
+ msleep(200);
+ val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
+ } while (factor-- && (val != 1));
+
+ if (val != 1) {
+ BNX2X_ERR("ATC_INIT failed\n");
+ return -EBUSY;
+ }
+ }
+
+ bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
+
+ /* clean the DMAE memory */
+ bp->dmae_ready = 1;
+ bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
+
+ bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
+
+ bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
+ bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
+ bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
+ bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
+
+ bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
+
+
+ /* QM queues pointers table */
+ bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
+
+ /* soft reset pulse */
+ REG_WR(bp, QM_REG_SOFT_RESET, 1);
+ REG_WR(bp, QM_REG_SOFT_RESET, 0);
+
+#ifdef BCM_CNIC
+ bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
+#endif
+
+ bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
+ REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
+ if (!CHIP_REV_IS_SLOW(bp))
+ /* enable hw interrupt from doorbell Q */
+ REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
+
+ bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
+ REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
+
+ if (!CHIP_IS_E1(bp))
+ REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
+
+ if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp))
+ /* Bit-map indicating which L2 hdrs may appear
+ * after the basic Ethernet header
+ */
+ REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
+ bp->path_has_ovlan ? 7 : 6);
+
+ bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
+
+ if (!CHIP_IS_E1x(bp)) {
+ /* reset VFC memories */
+ REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
+ VFC_MEMORIES_RST_REG_CAM_RST |
+ VFC_MEMORIES_RST_REG_RAM_RST);
+ REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
+ VFC_MEMORIES_RST_REG_CAM_RST |
+ VFC_MEMORIES_RST_REG_RAM_RST);
+
+ msleep(20);
+ }
+
+ bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
+
+ /* sync semi rtc */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ 0x80000000);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+ 0x80000000);
+
+ bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
+
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
+ bp->path_has_ovlan ? 7 : 6);
+
+ REG_WR(bp, SRC_REG_SOFT_RST, 1);
+
+ bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
+
+#ifdef BCM_CNIC
+ REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
+ REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
+ REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
+ REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
+ REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
+ REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
+ REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
+ REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
+ REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
+ REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
+#endif
+ REG_WR(bp, SRC_REG_SOFT_RST, 0);
+
+ if (sizeof(union cdu_context) != 1024)
+ /* we currently assume that a context is 1024 bytes */
+ dev_alert(&bp->pdev->dev, "please adjust the size "
+ "of cdu_context(%ld)\n",
+ (long)sizeof(union cdu_context));
+
+ bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
+ val = (4 << 24) + (0 << 12) + 1024;
+ REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
+
+ bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
+ REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
+ /* enable context validation interrupt from CFC */
+ REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
+
+ /* set the thresholds to prevent CFC/CDU race */
+ REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
+
+ bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
+
+ if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
+ REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
+
+ bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
+
+ /* Reset PCIE errors for debug */
+ REG_WR(bp, 0x2814, 0xffffffff);
+ REG_WR(bp, 0x3820, 0xffffffff);
+
+ if (!CHIP_IS_E1x(bp)) {
+ REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
+ (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
+ PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
+ REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
+ (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
+ PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
+ PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
+ REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
+ (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
+ PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
+ PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
+ }
+
+ bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
+ if (!CHIP_IS_E1(bp)) {
+ /* in E3 this done in per-port section */
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
+ }
+ if (CHIP_IS_E1H(bp))
+ /* not applicable for E2 (and above ...) */
+ REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
+
+ if (CHIP_REV_IS_SLOW(bp))
+ msleep(200);
+
+ /* finish CFC init */
+ val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
+ if (val != 1) {
+ BNX2X_ERR("CFC LL_INIT failed\n");
+ return -EBUSY;
+ }
+ val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
+ if (val != 1) {
+ BNX2X_ERR("CFC AC_INIT failed\n");
+ return -EBUSY;
+ }
+ val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
+ if (val != 1) {
+ BNX2X_ERR("CFC CAM_INIT failed\n");
+ return -EBUSY;
+ }
+ REG_WR(bp, CFC_REG_DEBUG0, 0);
+
+ if (CHIP_IS_E1(bp)) {
+ /* read NIG statistic
+ to see if this is our first up since powerup */
+ bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+ val = *bnx2x_sp(bp, wb_data[0]);
+
+ /* do internal memory self test */
+ if ((val == 0) && bnx2x_int_mem_test(bp)) {
+ BNX2X_ERR("internal mem self test failed\n");
+ return -EBUSY;
+ }
+ }
+
+ bnx2x_setup_fan_failure_detection(bp);
+
+ /* clear PXP2 attentions */
+ REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
+
+ bnx2x_enable_blocks_attention(bp);
+ bnx2x_enable_blocks_parity(bp);
+
+ if (!BP_NOMCP(bp)) {
+ if (CHIP_IS_E1x(bp))
+ bnx2x__common_init_phy(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not initialize link\n");
+
+ return 0;
+}
+
+/**
+ * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
+ *
+ * @bp: driver handle
+ */
+static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
+{
+ int rc = bnx2x_init_hw_common(bp);
+
+ if (rc)
+ return rc;
+
+ /* In E2 2-PORT mode, same ext phy is used for the two paths */
+ if (!BP_NOMCP(bp))
+ bnx2x__common_init_phy(bp);
+
+ return 0;
+}
+
+static int bnx2x_init_hw_port(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
+ u32 low, high;
+ u32 val;
+
+ bnx2x__link_reset(bp);
+
+ DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
+
+ REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
+
+ bnx2x_init_block(bp, BLOCK_MISC, init_phase);
+ bnx2x_init_block(bp, BLOCK_PXP, init_phase);
+ bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
+
+ /* Timers bug workaround: disables the pf_master bit in pglue at
+ * common phase, we need to enable it here before any dmae access are
+ * attempted. Therefore we manually added the enable-master to the
+ * port phase (it also happens in the function phase)
+ */
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+
+ bnx2x_init_block(bp, BLOCK_ATC, init_phase);
+ bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
+ bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
+ bnx2x_init_block(bp, BLOCK_QM, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_TCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_UCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XCM, init_phase);
+
+ /* QM cid (connection) count */
+ bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
+
+#ifdef BCM_CNIC
+ bnx2x_init_block(bp, BLOCK_TM, init_phase);
+ REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
+ REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
+#endif
+
+ bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+
+ if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
+ bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
+
+ if (IS_MF(bp))
+ low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
+ else if (bp->dev->mtu > 4096) {
+ if (bp->flags & ONE_PORT_FLAG)
+ low = 160;
+ else {
+ val = bp->dev->mtu;
+ /* (24*1024 + val*4)/256 */
+ low = 96 + (val/64) +
+ ((val % 64) ? 1 : 0);
+ }
+ } else
+ low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
+ high = low + 56; /* 14*1024/256 */
+ REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
+ REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
+ }
+
+ if (CHIP_MODE_IS_4_PORT(bp))
+ REG_WR(bp, (BP_PORT(bp) ?
+ BRB1_REG_MAC_GUARANTIED_1 :
+ BRB1_REG_MAC_GUARANTIED_0), 40);
+
+
+ bnx2x_init_block(bp, BLOCK_PRS, init_phase);
+ if (CHIP_IS_E3B0(bp))
+ /* Ovlan exists only if we are in multi-function +
+ * switch-dependent mode, in switch-independent there
+ * is no ovlan headers
+ */
+ REG_WR(bp, BP_PORT(bp) ?
+ PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
+ PRS_REG_HDRS_AFTER_BASIC_PORT_0,
+ (bp->path_has_ovlan ? 7 : 6));
+
+ bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_USDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_USEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_UPB, init_phase);
+ bnx2x_init_block(bp, BLOCK_XPB, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_PBF, init_phase);
+
+ if (CHIP_IS_E1x(bp)) {
+ /* configure PBF to work without PAUSE mtu 9000 */
+ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
+
+ /* update threshold */
+ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
+ /* update init credit */
+ REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
+
+ /* probe changes */
+ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
+ udelay(50);
+ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
+ }
+
+#ifdef BCM_CNIC
+ bnx2x_init_block(bp, BLOCK_SRC, init_phase);
+#endif
+ bnx2x_init_block(bp, BLOCK_CDU, init_phase);
+ bnx2x_init_block(bp, BLOCK_CFC, init_phase);
+
+ if (CHIP_IS_E1(bp)) {
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+ }
+ bnx2x_init_block(bp, BLOCK_HC, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_IGU, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
+ /* init aeu_mask_attn_func_0/1:
+ * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
+ * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
+ * bits 4-7 are used for "per vn group attention" */
+ val = IS_MF(bp) ? 0xF7 : 0x7;
+ /* Enable DCBX attention for all but E1 */
+ val |= CHIP_IS_E1(bp) ? 0 : 0x10;
+ REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
+
+ bnx2x_init_block(bp, BLOCK_NIG, init_phase);
+
+ if (!CHIP_IS_E1x(bp)) {
+ /* Bit-map indicating which L2 hdrs may appear after the
+ * basic Ethernet header
+ */
+ REG_WR(bp, BP_PORT(bp) ?
+ NIG_REG_P1_HDRS_AFTER_BASIC :
+ NIG_REG_P0_HDRS_AFTER_BASIC,
+ IS_MF_SD(bp) ? 7 : 6);
+
+ if (CHIP_IS_E3(bp))
+ REG_WR(bp, BP_PORT(bp) ?
+ NIG_REG_LLH1_MF_MODE :
+ NIG_REG_LLH_MF_MODE, IS_MF(bp));
+ }
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
+
+ if (!CHIP_IS_E1(bp)) {
+ /* 0x2 disable mf_ov, 0x1 enable */
+ REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
+ (IS_MF_SD(bp) ? 0x1 : 0x2));
+
+ if (!CHIP_IS_E1x(bp)) {
+ val = 0;
+ switch (bp->mf_mode) {
+ case MULTI_FUNCTION_SD:
+ val = 1;
+ break;
+ case MULTI_FUNCTION_SI:
+ val = 2;
+ break;
+ }
+
+ REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
+ NIG_REG_LLH0_CLS_TYPE), val);
+ }
+ {
+ REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
+ REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
+ REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
+ }
+ }
+
+
+ /* If SPIO5 is set to generate interrupts, enable it for this port */
+ val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
+ if (val & (1 << MISC_REGISTERS_SPIO_5)) {
+ u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+ val = REG_RD(bp, reg_addr);
+ val |= AEU_INPUTS_ATTN_BITS_SPIO5;
+ REG_WR(bp, reg_addr, val);
+ }
+
+ return 0;
+}
+
+static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
+{
+ int reg;
+
+ if (CHIP_IS_E1(bp))
+ reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
+ else
+ reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
+
+ bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
+}
+
+static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
+{
+ bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
+}
+
+static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
+{
+ u32 i, base = FUNC_ILT_BASE(func);
+ for (i = base; i < base + ILT_PER_FUNC; i++)
+ bnx2x_ilt_wr(bp, i, 0);
+}
+
+static int bnx2x_init_hw_func(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int func = BP_FUNC(bp);
+ int init_phase = PHASE_PF0 + func;
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+ u16 cdu_ilt_start;
+ u32 addr, val;
+ u32 main_mem_base, main_mem_size, main_mem_prty_clr;
+ int i, main_mem_width;
+
+ DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
+
+ /* FLR cleanup - hmmm */
+ if (!CHIP_IS_E1x(bp))
+ bnx2x_pf_flr_clnup(bp);
+
+ /* set MSI reconfigure capability */
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
+ val = REG_RD(bp, addr);
+ val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
+ REG_WR(bp, addr, val);
+ }
+
+ bnx2x_init_block(bp, BLOCK_PXP, init_phase);
+ bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
+
+ ilt = BP_ILT(bp);
+ cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
+
+ for (i = 0; i < L2_ILT_LINES(bp); i++) {
+ ilt->lines[cdu_ilt_start + i].page =
+ bp->context.vcxt + (ILT_PAGE_CIDS * i);
+ ilt->lines[cdu_ilt_start + i].page_mapping =
+ bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
+ /* cdu ilt pages are allocated manually so there's no need to
+ set the size */
+ }
+ bnx2x_ilt_init_op(bp, INITOP_SET);
+
+#ifdef BCM_CNIC
+ bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
+
+ /* T1 hash bits value determines the T1 number of entries */
+ REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
+#endif
+
+#ifndef BCM_CNIC
+ /* set NIC mode */
+ REG_WR(bp, PRS_REG_NIC_MODE, 1);
+#endif /* BCM_CNIC */
+
+ if (!CHIP_IS_E1x(bp)) {
+ u32 pf_conf = IGU_PF_CONF_FUNC_EN;
+
+ /* Turn on a single ISR mode in IGU if driver is going to use
+ * INT#x or MSI
+ */
+ if (!(bp->flags & USING_MSIX_FLAG))
+ pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ /*
+ * Timers workaround bug: function init part.
+ * Need to wait 20msec after initializing ILT,
+ * needed to make sure there are no requests in
+ * one of the PXP internal queues with "old" ILT addresses
+ */
+ msleep(20);
+ /*
+ * Master enable - Due to WB DMAE writes performed before this
+ * register is re-initialized as part of the regular function
+ * init
+ */
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+ /* Enable the function in IGU */
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
+ }
+
+ bp->dmae_ready = 1;
+
+ bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
+
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
+
+ bnx2x_init_block(bp, BLOCK_ATC, init_phase);
+ bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
+ bnx2x_init_block(bp, BLOCK_NIG, init_phase);
+ bnx2x_init_block(bp, BLOCK_SRC, init_phase);
+ bnx2x_init_block(bp, BLOCK_MISC, init_phase);
+ bnx2x_init_block(bp, BLOCK_TCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_UCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_USEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
+
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, QM_REG_PF_EN, 1);
+
+ if (!CHIP_IS_E1x(bp)) {
+ REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ }
+ bnx2x_init_block(bp, BLOCK_QM, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_TM, init_phase);
+ bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+ bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
+ bnx2x_init_block(bp, BLOCK_PRS, init_phase);
+ bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_USDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_UPB, init_phase);
+ bnx2x_init_block(bp, BLOCK_XPB, init_phase);
+ bnx2x_init_block(bp, BLOCK_PBF, init_phase);
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PBF_REG_DISABLE_PF, 0);
+
+ bnx2x_init_block(bp, BLOCK_CDU, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_CFC, init_phase);
+
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
+
+ if (IS_MF(bp)) {
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
+ REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
+ }
+
+ bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
+
+ /* HC init per function */
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ if (CHIP_IS_E1H(bp)) {
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+ }
+ bnx2x_init_block(bp, BLOCK_HC, init_phase);
+
+ } else {
+ int num_segs, sb_idx, prod_offset;
+
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+
+ if (!CHIP_IS_E1x(bp)) {
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
+ }
+
+ bnx2x_init_block(bp, BLOCK_IGU, init_phase);
+
+ if (!CHIP_IS_E1x(bp)) {
+ int dsb_idx = 0;
+ /**
+ * Producer memory:
+ * E2 mode: address 0-135 match to the mapping memory;
+ * 136 - PF0 default prod; 137 - PF1 default prod;
+ * 138 - PF2 default prod; 139 - PF3 default prod;
+ * 140 - PF0 attn prod; 141 - PF1 attn prod;
+ * 142 - PF2 attn prod; 143 - PF3 attn prod;
+ * 144-147 reserved.
+ *
+ * E1.5 mode - In backward compatible mode;
+ * for non default SB; each even line in the memory
+ * holds the U producer and each odd line hold
+ * the C producer. The first 128 producers are for
+ * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
+ * producers are for the DSB for each PF.
+ * Each PF has five segments: (the order inside each
+ * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
+ * 132-135 C prods; 136-139 X prods; 140-143 T prods;
+ * 144-147 attn prods;
+ */
+ /* non-default-status-blocks */
+ num_segs = CHIP_INT_MODE_IS_BC(bp) ?
+ IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
+ for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
+ prod_offset = (bp->igu_base_sb + sb_idx) *
+ num_segs;
+
+ for (i = 0; i < num_segs; i++) {
+ addr = IGU_REG_PROD_CONS_MEMORY +
+ (prod_offset + i) * 4;
+ REG_WR(bp, addr, 0);
+ }
+ /* send consumer update with value 0 */
+ bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
+ USTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_igu_clear_sb(bp,
+ bp->igu_base_sb + sb_idx);
+ }
+
+ /* default-status-blocks */
+ num_segs = CHIP_INT_MODE_IS_BC(bp) ?
+ IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
+
+ if (CHIP_MODE_IS_4_PORT(bp))
+ dsb_idx = BP_FUNC(bp);
+ else
+ dsb_idx = BP_VN(bp);
+
+ prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
+ IGU_BC_BASE_DSB_PROD + dsb_idx :
+ IGU_NORM_BASE_DSB_PROD + dsb_idx);
+
+ /*
+ * igu prods come in chunks of E1HVN_MAX (4) -
+ * does not matters what is the current chip mode
+ */
+ for (i = 0; i < (num_segs * E1HVN_MAX);
+ i += E1HVN_MAX) {
+ addr = IGU_REG_PROD_CONS_MEMORY +
+ (prod_offset + i)*4;
+ REG_WR(bp, addr, 0);
+ }
+ /* send consumer update with 0 */
+ if (CHIP_INT_MODE_IS_BC(bp)) {
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ USTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ CSTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ XSTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ TSTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ ATTENTION_ID, 0, IGU_INT_NOP, 1);
+ } else {
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ USTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ ATTENTION_ID, 0, IGU_INT_NOP, 1);
+ }
+ bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
+
+ /* !!! these should become driver const once
+ rf-tool supports split-68 const */
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
+ REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
+ REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
+ REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
+ REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
+ }
+ }
+
+ /* Reset PCIE errors for debug */
+ REG_WR(bp, 0x2114, 0xffffffff);
+ REG_WR(bp, 0x2120, 0xffffffff);
+
+ if (CHIP_IS_E1x(bp)) {
+ main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
+ main_mem_base = HC_REG_MAIN_MEMORY +
+ BP_PORT(bp) * (main_mem_size * 4);
+ main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
+ main_mem_width = 8;
+
+ val = REG_RD(bp, main_mem_prty_clr);
+ if (val)
+ DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
+ "block during "
+ "function init (0x%x)!\n", val);
+
+ /* Clear "false" parity errors in MSI-X table */
+ for (i = main_mem_base;
+ i < main_mem_base + main_mem_size * 4;
+ i += main_mem_width) {
+ bnx2x_read_dmae(bp, i, main_mem_width / 4);
+ bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
+ i, main_mem_width / 4);
+ }
+ /* Clear HC parity attention */
+ REG_RD(bp, main_mem_prty_clr);
+ }
+
+#ifdef BNX2X_STOP_ON_ERROR
+ /* Enable STORMs SP logging */
+ REG_WR8(bp, BAR_USTRORM_INTMEM +
+ USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+ REG_WR8(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+#endif
+
+ bnx2x_phy_probe(&bp->link_params);
+
+ return 0;
+}
+
+
+void bnx2x_free_mem(struct bnx2x *bp)
+{
+ /* fastpath */
+ bnx2x_free_fp_mem(bp);
+ /* end of fastpath */
+
+ BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
+ sizeof(struct host_sp_status_block));
+
+ BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+
+ BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
+ sizeof(struct bnx2x_slowpath));
+
+ BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
+ bp->context.size);
+
+ bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
+
+ BNX2X_FREE(bp->ilt->lines);
+
+#ifdef BCM_CNIC
+ if (!CHIP_IS_E1x(bp))
+ BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e1x));
+
+ BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
+#endif
+
+ BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
+
+ BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
+ BCM_PAGE_SIZE * NUM_EQ_PAGES);
+}
+
+static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
+{
+ int num_groups;
+
+ /* number of eth_queues */
+ u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp);
+
+ /* Total number of FW statistics requests =
+ * 1 for port stats + 1 for PF stats + num_eth_queues */
+ bp->fw_stats_num = 2 + num_queue_stats;
+
+
+ /* Request is built from stats_query_header and an array of
+ * stats_query_cmd_group each of which contains
+ * STATS_QUERY_CMD_COUNT rules. The real number or requests is
+ * configured in the stats_query_header.
+ */
+ num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT +
+ (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
+
+ bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
+ num_groups * sizeof(struct stats_query_cmd_group);
+
+ /* Data for statistics requests + stats_conter
+ *
+ * stats_counter holds per-STORM counters that are incremented
+ * when STORM has finished with the current request.
+ */
+ bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
+ sizeof(struct per_pf_stats) +
+ sizeof(struct per_queue_stats) * num_queue_stats +
+ sizeof(struct stats_counter);
+
+ BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+
+ /* Set shortcuts */
+ bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
+ bp->fw_stats_req_mapping = bp->fw_stats_mapping;
+
+ bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
+ ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
+
+ bp->fw_stats_data_mapping = bp->fw_stats_mapping +
+ bp->fw_stats_req_sz;
+ return 0;
+
+alloc_mem_err:
+ BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ return -ENOMEM;
+}
+
+
+int bnx2x_alloc_mem(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+ if (!CHIP_IS_E1x(bp))
+ /* size = the status block + ramrod buffers */
+ BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e1x));
+
+ /* allocate searcher T2 table */
+ BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
+#endif
+
+
+ BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
+ sizeof(struct host_sp_status_block));
+
+ BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
+ sizeof(struct bnx2x_slowpath));
+
+ /* Allocated memory for FW statistics */
+ if (bnx2x_alloc_fw_stats_mem(bp))
+ goto alloc_mem_err;
+
+ bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
+
+ BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
+ bp->context.size);
+
+ BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
+
+ if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
+ goto alloc_mem_err;
+
+ /* Slow path ring */
+ BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
+
+ /* EQ */
+ BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
+ BCM_PAGE_SIZE * NUM_EQ_PAGES);
+
+
+ /* fastpath */
+ /* need to be done at the end, since it's self adjusting to amount
+ * of memory available for RSS queues
+ */
+ if (bnx2x_alloc_fp_mem(bp))
+ goto alloc_mem_err;
+ return 0;
+
+alloc_mem_err:
+ bnx2x_free_mem(bp);
+ return -ENOMEM;
+}
+
+/*
+ * Init service functions
+ */
+
+int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+ struct bnx2x_vlan_mac_obj *obj, bool set,
+ int mac_type, unsigned long *ramrod_flags)
+{
+ int rc;
+ struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+ /* Fill general parameters */
+ ramrod_param.vlan_mac_obj = obj;
+ ramrod_param.ramrod_flags = *ramrod_flags;
+
+ /* Fill a user request section if needed */
+ if (!test_bit(RAMROD_CONT, ramrod_flags)) {
+ memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
+
+ __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
+
+ /* Set the command: ADD or DEL */
+ if (set)
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+ else
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+ }
+
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+ if (rc < 0)
+ BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
+ return rc;
+}
+
+int bnx2x_del_all_macs(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *mac_obj,
+ int mac_type, bool wait_for_comp)
+{
+ int rc;
+ unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
+
+ /* Wait for completion of requested */
+ if (wait_for_comp)
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+
+ /* Set the mac type of addresses we want to clear */
+ __set_bit(mac_type, &vlan_mac_flags);
+
+ rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
+ if (rc < 0)
+ BNX2X_ERR("Failed to delete MACs: %d\n", rc);
+
+ return rc;
+}
+
+int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
+{
+ unsigned long ramrod_flags = 0;
+
+ DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
+
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ /* Eth MAC is set on RSS leading client (fp[0]) */
+ return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set,
+ BNX2X_ETH_MAC, &ramrod_flags);
+}
+
+int bnx2x_setup_leading(struct bnx2x *bp)
+{
+ return bnx2x_setup_queue(bp, &bp->fp[0], 1);
+}
+
+/**
+ * bnx2x_set_int_mode - configure interrupt mode
+ *
+ * @bp: driver handle
+ *
+ * In case of MSI-X it will also try to enable MSI-X.
+ */
+static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
+{
+ switch (int_mode) {
+ case INT_MODE_MSI:
+ bnx2x_enable_msi(bp);
+ /* falling through... */
+ case INT_MODE_INTx:
+ bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
+ DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
+ break;
+ default:
+ /* Set number of queues according to bp->multi_mode value */
+ bnx2x_set_num_queues(bp);
+
+ DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
+ bp->num_queues);
+
+ /* if we can't use MSI-X we only need one fp,
+ * so try to enable MSI-X with the requested number of fp's
+ * and fallback to MSI or legacy INTx with one fp
+ */
+ if (bnx2x_enable_msix(bp)) {
+ /* failed to enable MSI-X */
+ if (bp->multi_mode)
+ DP(NETIF_MSG_IFUP,
+ "Multi requested but failed to "
+ "enable MSI-X (%d), "
+ "set number of queues to %d\n",
+ bp->num_queues,
+ 1 + NON_ETH_CONTEXT_USE);
+ bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
+
+ /* Try to enable MSI */
+ if (!(bp->flags & DISABLE_MSI_FLAG))
+ bnx2x_enable_msi(bp);
+ }
+ break;
+ }
+}
+
+/* must be called prioir to any HW initializations */
+static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
+{
+ return L2_ILT_LINES(bp);
+}
+
+void bnx2x_ilt_set_info(struct bnx2x *bp)
+{
+ struct ilt_client_info *ilt_client;
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+ u16 line = 0;
+
+ ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
+ DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
+
+ /* CDU */
+ ilt_client = &ilt->clients[ILT_CLIENT_CDU];
+ ilt_client->client_num = ILT_CLIENT_CDU;
+ ilt_client->page_size = CDU_ILT_PAGE_SZ;
+ ilt_client->flags = ILT_CLIENT_SKIP_MEM;
+ ilt_client->start = line;
+ line += bnx2x_cid_ilt_lines(bp);
+#ifdef BCM_CNIC
+ line += CNIC_ILT_LINES;
+#endif
+ ilt_client->end = line - 1;
+
+ DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
+ "flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
+
+ /* QM */
+ if (QM_INIT(bp->qm_cid_count)) {
+ ilt_client = &ilt->clients[ILT_CLIENT_QM];
+ ilt_client->client_num = ILT_CLIENT_QM;
+ ilt_client->page_size = QM_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+
+ /* 4 bytes for each cid */
+ line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
+ QM_ILT_PAGE_SZ);
+
+ ilt_client->end = line - 1;
+
+ DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
+ "flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
+
+ }
+ /* SRC */
+ ilt_client = &ilt->clients[ILT_CLIENT_SRC];
+#ifdef BCM_CNIC
+ ilt_client->client_num = ILT_CLIENT_SRC;
+ ilt_client->page_size = SRC_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+ line += SRC_ILT_LINES;
+ ilt_client->end = line - 1;
+
+ DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
+ "flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
+
+#else
+ ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
+#endif
+
+ /* TM */
+ ilt_client = &ilt->clients[ILT_CLIENT_TM];
+#ifdef BCM_CNIC
+ ilt_client->client_num = ILT_CLIENT_TM;
+ ilt_client->page_size = TM_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+ line += TM_ILT_LINES;
+ ilt_client->end = line - 1;
+
+ DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
+ "flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
+
+#else
+ ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
+#endif
+ BUG_ON(line > ILT_MAX_LINES);
+}
+
+/**
+ * bnx2x_pf_q_prep_init - prepare INIT transition parameters
+ *
+ * @bp: driver handle
+ * @fp: pointer to fastpath
+ * @init_params: pointer to parameters structure
+ *
+ * parameters configured:
+ * - HC configuration
+ * - Queue's CDU context
+ */
+static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
+{
+
+ u8 cos;
+ /* FCoE Queue uses Default SB, thus has no HC capabilities */
+ if (!IS_FCOE_FP(fp)) {
+ __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
+ __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
+
+ /* If HC is supporterd, enable host coalescing in the transition
+ * to INIT state.
+ */
+ __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
+ __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
+
+ /* HC rate */
+ init_params->rx.hc_rate = bp->rx_ticks ?
+ (1000000 / bp->rx_ticks) : 0;
+ init_params->tx.hc_rate = bp->tx_ticks ?
+ (1000000 / bp->tx_ticks) : 0;
+
+ /* FW SB ID */
+ init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
+ fp->fw_sb_id;
+
+ /*
+ * CQ index among the SB indices: FCoE clients uses the default
+ * SB, therefore it's different.
+ */
+ init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
+ init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
+ }
+
+ /* set maximum number of COSs supported by this queue */
+ init_params->max_cos = fp->max_cos;
+
+ DP(BNX2X_MSG_SP, "fp: %d setting queue params max cos to: %d\n",
+ fp->index, init_params->max_cos);
+
+ /* set the context pointers queue object */
+ for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++)
+ init_params->cxts[cos] =
+ &bp->context.vcxt[fp->txdata[cos].cid].eth;
+}
+
+int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ struct bnx2x_queue_state_params *q_params,
+ struct bnx2x_queue_setup_tx_only_params *tx_only_params,
+ int tx_index, bool leading)
+{
+ memset(tx_only_params, 0, sizeof(*tx_only_params));
+
+ /* Set the command */
+ q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
+
+ /* Set tx-only QUEUE flags: don't zero statistics */
+ tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
+
+ /* choose the index of the cid to send the slow path on */
+ tx_only_params->cid_index = tx_index;
+
+ /* Set general TX_ONLY_SETUP parameters */
+ bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
+
+ /* Set Tx TX_ONLY_SETUP parameters */
+ bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
+
+ DP(BNX2X_MSG_SP, "preparing to send tx-only ramrod for connection:"
+ "cos %d, primary cid %d, cid %d, "
+ "client id %d, sp-client id %d, flags %lx\n",
+ tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
+ q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
+ tx_only_params->gen_params.spcl_id, tx_only_params->flags);
+
+ /* send the ramrod */
+ return bnx2x_queue_state_change(bp, q_params);
+}
+
+
+/**
+ * bnx2x_setup_queue - setup queue
+ *
+ * @bp: driver handle
+ * @fp: pointer to fastpath
+ * @leading: is leading
+ *
+ * This function performs 2 steps in a Queue state machine
+ * actually: 1) RESET->INIT 2) INIT->SETUP
+ */
+
+int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ bool leading)
+{
+ struct bnx2x_queue_state_params q_params = {0};
+ struct bnx2x_queue_setup_params *setup_params =
+ &q_params.params.setup;
+ struct bnx2x_queue_setup_tx_only_params *tx_only_params =
+ &q_params.params.tx_only;
+ int rc;
+ u8 tx_index;
+
+ DP(BNX2X_MSG_SP, "setting up queue %d\n", fp->index);
+
+ /* reset IGU state skip FCoE L2 queue */
+ if (!IS_FCOE_FP(fp))
+ bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
+ IGU_INT_ENABLE, 0);
+
+ q_params.q_obj = &fp->q_obj;
+ /* We want to wait for completion in this context */
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+
+ /* Prepare the INIT parameters */
+ bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
+
+ /* Set the command */
+ q_params.cmd = BNX2X_Q_CMD_INIT;
+
+ /* Change the state to INIT */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
+ return rc;
+ }
+
+ DP(BNX2X_MSG_SP, "init complete\n");
+
+
+ /* Now move the Queue to the SETUP state... */
+ memset(setup_params, 0, sizeof(*setup_params));
+
+ /* Set QUEUE flags */
+ setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
+
+ /* Set general SETUP parameters */
+ bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
+ FIRST_TX_COS_INDEX);
+
+ bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
+ &setup_params->rxq_params);
+
+ bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
+ FIRST_TX_COS_INDEX);
+
+ /* Set the command */
+ q_params.cmd = BNX2X_Q_CMD_SETUP;
+
+ /* Change the state to SETUP */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
+ return rc;
+ }
+
+ /* loop through the relevant tx-only indices */
+ for (tx_index = FIRST_TX_ONLY_COS_INDEX;
+ tx_index < fp->max_cos;
+ tx_index++) {
+
+ /* prepare and send tx-only ramrod*/
+ rc = bnx2x_setup_tx_only(bp, fp, &q_params,
+ tx_only_params, tx_index, leading);
+ if (rc) {
+ BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
+ fp->index, tx_index);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int bnx2x_stop_queue(struct bnx2x *bp, int index)
+{
+ struct bnx2x_fastpath *fp = &bp->fp[index];
+ struct bnx2x_fp_txdata *txdata;
+ struct bnx2x_queue_state_params q_params = {0};
+ int rc, tx_index;
+
+ DP(BNX2X_MSG_SP, "stopping queue %d cid %d\n", index, fp->cid);
+
+ q_params.q_obj = &fp->q_obj;
+ /* We want to wait for completion in this context */
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+
+
+ /* close tx-only connections */
+ for (tx_index = FIRST_TX_ONLY_COS_INDEX;
+ tx_index < fp->max_cos;
+ tx_index++){
+
+ /* ascertain this is a normal queue*/
+ txdata = &fp->txdata[tx_index];
+
+ DP(BNX2X_MSG_SP, "stopping tx-only queue %d\n",
+ txdata->txq_index);
+
+ /* send halt terminate on tx-only connection */
+ q_params.cmd = BNX2X_Q_CMD_TERMINATE;
+ memset(&q_params.params.terminate, 0,
+ sizeof(q_params.params.terminate));
+ q_params.params.terminate.cid_index = tx_index;
+
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc)
+ return rc;
+
+ /* send halt terminate on tx-only connection */
+ q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
+ memset(&q_params.params.cfc_del, 0,
+ sizeof(q_params.params.cfc_del));
+ q_params.params.cfc_del.cid_index = tx_index;
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc)
+ return rc;
+ }
+ /* Stop the primary connection: */
+ /* ...halt the connection */
+ q_params.cmd = BNX2X_Q_CMD_HALT;
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc)
+ return rc;
+
+ /* ...terminate the connection */
+ q_params.cmd = BNX2X_Q_CMD_TERMINATE;
+ memset(&q_params.params.terminate, 0,
+ sizeof(q_params.params.terminate));
+ q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc)
+ return rc;
+ /* ...delete cfc entry */
+ q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
+ memset(&q_params.params.cfc_del, 0,
+ sizeof(q_params.params.cfc_del));
+ q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
+ return bnx2x_queue_state_change(bp, &q_params);
+}
+
+
+static void bnx2x_reset_func(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int func = BP_FUNC(bp);
+ int i;
+
+ /* Disable the function in the FW */
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
+
+ /* FP SBs */
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
+ SB_DISABLED);
+ }
+
+#ifdef BCM_CNIC
+ /* CNIC SB */
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)),
+ SB_DISABLED);
+#endif
+ /* SP SB */
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
+ SB_DISABLED);
+
+ for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
+ REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
+ 0);
+
+ /* Configure IGU */
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+ } else {
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
+ }
+
+#ifdef BCM_CNIC
+ /* Disable Timer scan */
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
+ /*
+ * Wait for at least 10ms and up to 2 second for the timers scan to
+ * complete
+ */
+ for (i = 0; i < 200; i++) {
+ msleep(10);
+ if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
+ break;
+ }
+#endif
+ /* Clear ILT */
+ bnx2x_clear_func_ilt(bp, func);
+
+ /* Timers workaround bug for E2: if this is vnic-3,
+ * we need to set the entire ilt range for this timers.
+ */
+ if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
+ struct ilt_client_info ilt_cli;
+ /* use dummy TM client */
+ memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
+ ilt_cli.start = 0;
+ ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
+ ilt_cli.client_num = ILT_CLIENT_TM;
+
+ bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
+ }
+
+ /* this assumes that reset_port() called before reset_func()*/
+ if (!CHIP_IS_E1x(bp))
+ bnx2x_pf_disable(bp);
+
+ bp->dmae_ready = 0;
+}
+
+static void bnx2x_reset_port(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 val;
+
+ /* Reset physical Link */
+ bnx2x__link_reset(bp);
+
+ REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
+
+ /* Do not rcv packets to BRB */
+ REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
+ /* Do not direct rcv packets that are not for MCP to the BRB */
+ REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+ NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
+
+ /* Configure AEU */
+ REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
+
+ msleep(100);
+ /* Check for BRB port occupancy */
+ val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
+ if (val)
+ DP(NETIF_MSG_IFDOWN,
+ "BRB1 is not empty %d blocks are occupied\n", val);
+
+ /* TODO: Close Doorbell port? */
+}
+
+static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
+{
+ struct bnx2x_func_state_params func_params = {0};
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_HW_RESET;
+
+ func_params.params.hw_init.load_phase = load_code;
+
+ return bnx2x_func_state_change(bp, &func_params);
+}
+
+static inline int bnx2x_func_stop(struct bnx2x *bp)
+{
+ struct bnx2x_func_state_params func_params = {0};
+ int rc;
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_STOP;
+
+ /*
+ * Try to stop the function the 'good way'. If fails (in case
+ * of a parity error during bnx2x_chip_cleanup()) and we are
+ * not in a debug mode, perform a state transaction in order to
+ * enable further HW_RESET transaction.
+ */
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc) {
+#ifdef BNX2X_STOP_ON_ERROR
+ return rc;
+#else
+ BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry "
+ "transaction\n");
+ __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
+ return bnx2x_func_state_change(bp, &func_params);
+#endif
+ }
+
+ return 0;
+}
+
+/**
+ * bnx2x_send_unload_req - request unload mode from the MCP.
+ *
+ * @bp: driver handle
+ * @unload_mode: requested function's unload mode
+ *
+ * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
+ */
+u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
+{
+ u32 reset_code = 0;
+ int port = BP_PORT(bp);
+
+ /* Select the UNLOAD request mode */
+ if (unload_mode == UNLOAD_NORMAL)
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+ else if (bp->flags & NO_WOL_FLAG)
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
+
+ else if (bp->wol) {
+ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ u8 *mac_addr = bp->dev->dev_addr;
+ u32 val;
+ u16 pmc;
+
+ /* The mac address is written to entries 1-4 to
+ * preserve entry 0 which is used by the PMF
+ */
+ u8 entry = (BP_VN(bp) + 1)*8;
+
+ val = (mac_addr[0] << 8) | mac_addr[1];
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
+
+ val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5];
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
+
+ /* Enable the PME and clear the status */
+ pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
+ pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
+ pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
+
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
+
+ } else
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+ /* Send the request to the MCP */
+ if (!BP_NOMCP(bp))
+ reset_code = bnx2x_fw_command(bp, reset_code, 0);
+ else {
+ int path = BP_PATH(bp);
+
+ DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
+ "%d, %d, %d\n",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ load_count[path][0]--;
+ load_count[path][1 + port]--;
+ DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
+ "%d, %d, %d\n",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ if (load_count[path][0] == 0)
+ reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
+ else if (load_count[path][1 + port] == 0)
+ reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
+ else
+ reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
+ }
+
+ return reset_code;
+}
+
+/**
+ * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_send_unload_done(struct bnx2x *bp)
+{
+ /* Report UNLOAD_DONE to MCP */
+ if (!BP_NOMCP(bp))
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+}
+
+static inline int bnx2x_func_wait_started(struct bnx2x *bp)
+{
+ int tout = 50;
+ int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+
+ if (!bp->port.pmf)
+ return 0;
+
+ /*
+ * (assumption: No Attention from MCP at this stage)
+ * PMF probably in the middle of TXdisable/enable transaction
+ * 1. Sync IRS for default SB
+ * 2. Sync SP queue - this guarantes us that attention handling started
+ * 3. Wait, that TXdisable/enable transaction completes
+ *
+ * 1+2 guranty that if DCBx attention was scheduled it already changed
+ * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy
+ * received complettion for the transaction the state is TX_STOPPED.
+ * State will return to STARTED after completion of TX_STOPPED-->STARTED
+ * transaction.
+ */
+
+ /* make sure default SB ISR is done */
+ if (msix)
+ synchronize_irq(bp->msix_table[0].vector);
+ else
+ synchronize_irq(bp->pdev->irq);
+
+ flush_workqueue(bnx2x_wq);
+
+ while (bnx2x_func_get_state(bp, &bp->func_obj) !=
+ BNX2X_F_STATE_STARTED && tout--)
+ msleep(20);
+
+ if (bnx2x_func_get_state(bp, &bp->func_obj) !=
+ BNX2X_F_STATE_STARTED) {
+#ifdef BNX2X_STOP_ON_ERROR
+ return -EBUSY;
+#else
+ /*
+ * Failed to complete the transaction in a "good way"
+ * Force both transactions with CLR bit
+ */
+ struct bnx2x_func_state_params func_params = {0};
+
+ DP(BNX2X_MSG_SP, "Hmmm... unexpected function state! "
+ "Forcing STARTED-->TX_ST0PPED-->STARTED\n");
+
+ func_params.f_obj = &bp->func_obj;
+ __set_bit(RAMROD_DRV_CLR_ONLY,
+ &func_params.ramrod_flags);
+
+ /* STARTED-->TX_ST0PPED */
+ func_params.cmd = BNX2X_F_CMD_TX_STOP;
+ bnx2x_func_state_change(bp, &func_params);
+
+ /* TX_ST0PPED-->STARTED */
+ func_params.cmd = BNX2X_F_CMD_TX_START;
+ return bnx2x_func_state_change(bp, &func_params);
+#endif
+ }
+
+ return 0;
+}
+
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
+{
+ int port = BP_PORT(bp);
+ int i, rc = 0;
+ u8 cos;
+ struct bnx2x_mcast_ramrod_params rparam = {0};
+ u32 reset_code;
+
+ /* Wait until tx fastpath tasks complete */
+ for_each_tx_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ for_each_cos_in_tx_queue(fp, cos)
+ rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]);
+#ifdef BNX2X_STOP_ON_ERROR
+ if (rc)
+ return;
+#endif
+ }
+
+ /* Give HW time to discard old tx messages */
+ usleep_range(1000, 1000);
+
+ /* Clean all ETH MACs */
+ rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false);
+ if (rc < 0)
+ BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
+
+ /* Clean up UC list */
+ rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC,
+ true);
+ if (rc < 0)
+ BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: "
+ "%d\n", rc);
+
+ /* Disable LLH */
+ if (!CHIP_IS_E1(bp))
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+
+ /* Set "drop all" (stop Rx).
+ * We need to take a netif_addr_lock() here in order to prevent
+ * a race between the completion code and this code.
+ */
+ netif_addr_lock_bh(bp->dev);
+ /* Schedule the rx_mode command */
+ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
+ set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+ else
+ bnx2x_set_storm_rx_mode(bp);
+
+ /* Cleanup multicast configuration */
+ rparam.mcast_obj = &bp->mcast_obj;
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+ if (rc < 0)
+ BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
+
+ netif_addr_unlock_bh(bp->dev);
+
+
+
+ /*
+ * Send the UNLOAD_REQUEST to the MCP. This will return if
+ * this function should perform FUNC, PORT or COMMON HW
+ * reset.
+ */
+ reset_code = bnx2x_send_unload_req(bp, unload_mode);
+
+ /*
+ * (assumption: No Attention from MCP at this stage)
+ * PMF probably in the middle of TXdisable/enable transaction
+ */
+ rc = bnx2x_func_wait_started(bp);
+ if (rc) {
+ BNX2X_ERR("bnx2x_func_wait_started failed\n");
+#ifdef BNX2X_STOP_ON_ERROR
+ return;
+#endif
+ }
+
+ /* Close multi and leading connections
+ * Completions for ramrods are collected in a synchronous way
+ */
+ for_each_queue(bp, i)
+ if (bnx2x_stop_queue(bp, i))
+#ifdef BNX2X_STOP_ON_ERROR
+ return;
+#else
+ goto unload_error;
+#endif
+ /* If SP settings didn't get completed so far - something
+ * very wrong has happen.
+ */
+ if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
+ BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
+
+#ifndef BNX2X_STOP_ON_ERROR
+unload_error:
+#endif
+ rc = bnx2x_func_stop(bp);
+ if (rc) {
+ BNX2X_ERR("Function stop failed!\n");
+#ifdef BNX2X_STOP_ON_ERROR
+ return;
+#endif
+ }
+
+ /* Disable HW interrupts, NAPI */
+ bnx2x_netif_stop(bp, 1);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+
+ /* Reset the chip */
+ rc = bnx2x_reset_hw(bp, reset_code);
+ if (rc)
+ BNX2X_ERR("HW_RESET failed\n");
+
+
+ /* Report UNLOAD_DONE to MCP */
+ bnx2x_send_unload_done(bp);
+}
+
+void bnx2x_disable_close_the_gate(struct bnx2x *bp)
+{
+ u32 val;
+
+ DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
+
+ if (CHIP_IS_E1(bp)) {
+ int port = BP_PORT(bp);
+ u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0;
+
+ val = REG_RD(bp, addr);
+ val &= ~(0x300);
+ REG_WR(bp, addr, val);
+ } else {
+ val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
+ val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
+ MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
+ }
+}
+
+/* Close gates #2, #3 and #4: */
+static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
+{
+ u32 val;
+
+ /* Gates #2 and #4a are closed/opened for "not E1" only */
+ if (!CHIP_IS_E1(bp)) {
+ /* #4 */
+ REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
+ /* #2 */
+ REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
+ }
+
+ /* #3 */
+ if (CHIP_IS_E1x(bp)) {
+ /* Prevent interrupts from HC on both ports */
+ val = REG_RD(bp, HC_REG_CONFIG_1);
+ REG_WR(bp, HC_REG_CONFIG_1,
+ (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
+ (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
+
+ val = REG_RD(bp, HC_REG_CONFIG_0);
+ REG_WR(bp, HC_REG_CONFIG_0,
+ (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
+ (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
+ } else {
+ /* Prevent incomming interrupts in IGU */
+ val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
+
+ REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
+ (!close) ?
+ (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
+ (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
+ }
+
+ DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
+ close ? "closing" : "opening");
+ mmiowb();
+}
+
+#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
+
+static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
+{
+ /* Do some magic... */
+ u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+ *magic_val = val & SHARED_MF_CLP_MAGIC;
+ MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
+}
+
+/**
+ * bnx2x_clp_reset_done - restore the value of the `magic' bit.
+ *
+ * @bp: driver handle
+ * @magic_val: old value of the `magic' bit.
+ */
+static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
+{
+ /* Restore the `magic' bit value... */
+ u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+ MF_CFG_WR(bp, shared_mf_config.clp_mb,
+ (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
+}
+
+/**
+ * bnx2x_reset_mcp_prep - prepare for MCP reset.
+ *
+ * @bp: driver handle
+ * @magic_val: old value of 'magic' bit.
+ *
+ * Takes care of CLP configurations.
+ */
+static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
+{
+ u32 shmem;
+ u32 validity_offset;
+
+ DP(NETIF_MSG_HW, "Starting\n");
+
+ /* Set `magic' bit in order to save MF config */
+ if (!CHIP_IS_E1(bp))
+ bnx2x_clp_reset_prep(bp, magic_val);
+
+ /* Get shmem offset */
+ shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+ validity_offset = offsetof(struct shmem_region, validity_map[0]);
+
+ /* Clear validity map flags */
+ if (shmem > 0)
+ REG_WR(bp, shmem + validity_offset, 0);
+}
+
+#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
+#define MCP_ONE_TIMEOUT 100 /* 100 ms */
+
+/**
+ * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
+ *
+ * @bp: driver handle
+ */
+static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
+{
+ /* special handling for emulation and FPGA,
+ wait 10 times longer */
+ if (CHIP_REV_IS_SLOW(bp))
+ msleep(MCP_ONE_TIMEOUT*10);
+ else
+ msleep(MCP_ONE_TIMEOUT);
+}
+
+/*
+ * initializes bp->common.shmem_base and waits for validity signature to appear
+ */
+static int bnx2x_init_shmem(struct bnx2x *bp)
+{
+ int cnt = 0;
+ u32 val = 0;
+
+ do {
+ bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+ if (bp->common.shmem_base) {
+ val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
+ if (val & SHR_MEM_VALIDITY_MB)
+ return 0;
+ }
+
+ bnx2x_mcp_wait_one(bp);
+
+ } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
+
+ BNX2X_ERR("BAD MCP validity signature\n");
+
+ return -ENODEV;
+}
+
+static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
+{
+ int rc = bnx2x_init_shmem(bp);
+
+ /* Restore the `magic' bit value */
+ if (!CHIP_IS_E1(bp))
+ bnx2x_clp_reset_done(bp, magic_val);
+
+ return rc;
+}
+
+static void bnx2x_pxp_prep(struct bnx2x *bp)
+{
+ if (!CHIP_IS_E1(bp)) {
+ REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
+ REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
+ mmiowb();
+ }
+}
+
+/*
+ * Reset the whole chip except for:
+ * - PCIE core
+ * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
+ * one reset bit)
+ * - IGU
+ * - MISC (including AEU)
+ * - GRC
+ * - RBCN, RBCP
+ */
+static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
+{
+ u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
+ u32 global_bits2, stay_reset2;
+
+ /*
+ * Bits that have to be set in reset_mask2 if we want to reset 'global'
+ * (per chip) blocks.
+ */
+ global_bits2 =
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
+
+ /* Don't reset the following blocks */
+ not_reset_mask1 =
+ MISC_REGISTERS_RESET_REG_1_RST_HC |
+ MISC_REGISTERS_RESET_REG_1_RST_PXPV |
+ MISC_REGISTERS_RESET_REG_1_RST_PXP;
+
+ not_reset_mask2 =
+ MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_RBCN |
+ MISC_REGISTERS_RESET_REG_2_RST_GRC |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
+ MISC_REGISTERS_RESET_REG_2_RST_ATC |
+ MISC_REGISTERS_RESET_REG_2_PGLC;
+
+ /*
+ * Keep the following blocks in reset:
+ * - all xxMACs are handled by the bnx2x_link code.
+ */
+ stay_reset2 =
+ MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
+ MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
+ MISC_REGISTERS_RESET_REG_2_UMAC0 |
+ MISC_REGISTERS_RESET_REG_2_UMAC1 |
+ MISC_REGISTERS_RESET_REG_2_XMAC |
+ MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
+
+ /* Full reset masks according to the chip */
+ reset_mask1 = 0xffffffff;
+
+ if (CHIP_IS_E1(bp))
+ reset_mask2 = 0xffff;
+ else if (CHIP_IS_E1H(bp))
+ reset_mask2 = 0x1ffff;
+ else if (CHIP_IS_E2(bp))
+ reset_mask2 = 0xfffff;
+ else /* CHIP_IS_E3 */
+ reset_mask2 = 0x3ffffff;
+
+ /* Don't reset global blocks unless we need to */
+ if (!global)
+ reset_mask2 &= ~global_bits2;
+
+ /*
+ * In case of attention in the QM, we need to reset PXP
+ * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
+ * because otherwise QM reset would release 'close the gates' shortly
+ * before resetting the PXP, then the PSWRQ would send a write
+ * request to PGLUE. Then when PXP is reset, PGLUE would try to
+ * read the payload data from PSWWR, but PSWWR would not
+ * respond. The write queue in PGLUE would stuck, dmae commands
+ * would not return. Therefore it's important to reset the second
+ * reset register (containing the
+ * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
+ * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
+ * bit).
+ */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ reset_mask2 & (~not_reset_mask2));
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ reset_mask1 & (~not_reset_mask1));
+
+ barrier();
+ mmiowb();
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ reset_mask2 & (~stay_reset2));
+
+ barrier();
+ mmiowb();
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
+ mmiowb();
+}
+
+/**
+ * bnx2x_er_poll_igu_vq - poll for pending writes bit.
+ * It should get cleared in no more than 1s.
+ *
+ * @bp: driver handle
+ *
+ * It should get cleared in no more than 1s. Returns 0 if
+ * pending writes bit gets cleared.
+ */
+static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
+{
+ u32 cnt = 1000;
+ u32 pend_bits = 0;
+
+ do {
+ pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
+
+ if (pend_bits == 0)
+ break;
+
+ usleep_range(1000, 1000);
+ } while (cnt-- > 0);
+
+ if (cnt <= 0) {
+ BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
+ pend_bits);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int bnx2x_process_kill(struct bnx2x *bp, bool global)
+{
+ int cnt = 1000;
+ u32 val = 0;
+ u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
+
+
+ /* Empty the Tetris buffer, wait for 1s */
+ do {
+ sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
+ blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
+ port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
+ port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
+ pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
+ if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
+ ((port_is_idle_0 & 0x1) == 0x1) &&
+ ((port_is_idle_1 & 0x1) == 0x1) &&
+ (pgl_exp_rom2 == 0xffffffff))
+ break;
+ usleep_range(1000, 1000);
+ } while (cnt-- > 0);
+
+ if (cnt <= 0) {
+ DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
+ " are still"
+ " outstanding read requests after 1s!\n");
+ DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
+ " port_is_idle_0=0x%08x,"
+ " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
+ sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
+ pgl_exp_rom2);
+ return -EAGAIN;
+ }
+
+ barrier();
+
+ /* Close gates #2, #3 and #4 */
+ bnx2x_set_234_gates(bp, true);
+
+ /* Poll for IGU VQs for 57712 and newer chips */
+ if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
+ return -EAGAIN;
+
+
+ /* TBD: Indicate that "process kill" is in progress to MCP */
+
+ /* Clear "unprepared" bit */
+ REG_WR(bp, MISC_REG_UNPREPARED, 0);
+ barrier();
+
+ /* Make sure all is written to the chip before the reset */
+ mmiowb();
+
+ /* Wait for 1ms to empty GLUE and PCI-E core queues,
+ * PSWHST, GRC and PSWRD Tetris buffer.
+ */
+ usleep_range(1000, 1000);
+
+ /* Prepare to chip reset: */
+ /* MCP */
+ if (global)
+ bnx2x_reset_mcp_prep(bp, &val);
+
+ /* PXP */
+ bnx2x_pxp_prep(bp);
+ barrier();
+
+ /* reset the chip */
+ bnx2x_process_kill_chip_reset(bp, global);
+ barrier();
+
+ /* Recover after reset: */
+ /* MCP */
+ if (global && bnx2x_reset_mcp_comp(bp, val))
+ return -EAGAIN;
+
+ /* TBD: Add resetting the NO_MCP mode DB here */
+
+ /* PXP */
+ bnx2x_pxp_prep(bp);
+
+ /* Open the gates #2, #3 and #4 */
+ bnx2x_set_234_gates(bp, false);
+
+ /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
+ * reset state, re-enable attentions. */
+
+ return 0;
+}
+
+int bnx2x_leader_reset(struct bnx2x *bp)
+{
+ int rc = 0;
+ bool global = bnx2x_reset_is_global(bp);
+
+ /* Try to recover after the failure */
+ if (bnx2x_process_kill(bp, global)) {
+ netdev_err(bp->dev, "Something bad had happen on engine %d! "
+ "Aii!\n", BP_PATH(bp));
+ rc = -EAGAIN;
+ goto exit_leader_reset;
+ }
+
+ /*
+ * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver
+ * state.
+ */
+ bnx2x_set_reset_done(bp);
+ if (global)
+ bnx2x_clear_reset_global(bp);
+
+exit_leader_reset:
+ bp->is_leader = 0;
+ bnx2x_release_leader_lock(bp);
+ smp_mb();
+ return rc;
+}
+
+static inline void bnx2x_recovery_failed(struct bnx2x *bp)
+{
+ netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
+
+ /* Disconnect this device */
+ netif_device_detach(bp->dev);
+
+ /*
+ * Block ifup for all function on this engine until "process kill"
+ * or power cycle.
+ */
+ bnx2x_set_reset_in_progress(bp);
+
+ /* Shut down the power */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+
+ bp->recovery_state = BNX2X_RECOVERY_FAILED;
+
+ smp_mb();
+}
+
+/*
+ * Assumption: runs under rtnl lock. This together with the fact
+ * that it's called only from bnx2x_sp_rtnl() ensure that it
+ * will never be called when netif_running(bp->dev) is false.
+ */
+static void bnx2x_parity_recover(struct bnx2x *bp)
+{
+ bool global = false;
+
+ DP(NETIF_MSG_HW, "Handling parity\n");
+ while (1) {
+ switch (bp->recovery_state) {
+ case BNX2X_RECOVERY_INIT:
+ DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
+ bnx2x_chk_parity_attn(bp, &global, false);
+
+ /* Try to get a LEADER_LOCK HW lock */
+ if (bnx2x_trylock_leader_lock(bp)) {
+ bnx2x_set_reset_in_progress(bp);
+ /*
+ * Check if there is a global attention and if
+ * there was a global attention, set the global
+ * reset bit.
+ */
+
+ if (global)
+ bnx2x_set_reset_global(bp);
+
+ bp->is_leader = 1;
+ }
+
+ /* Stop the driver */
+ /* If interface has been removed - break */
+ if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
+ return;
+
+ bp->recovery_state = BNX2X_RECOVERY_WAIT;
+
+ /*
+ * Reset MCP command sequence number and MCP mail box
+ * sequence as we are going to reset the MCP.
+ */
+ if (global) {
+ bp->fw_seq = 0;
+ bp->fw_drv_pulse_wr_seq = 0;
+ }
+
+ /* Ensure "is_leader", MCP command sequence and
+ * "recovery_state" update values are seen on other
+ * CPUs.
+ */
+ smp_mb();
+ break;
+
+ case BNX2X_RECOVERY_WAIT:
+ DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
+ if (bp->is_leader) {
+ int other_engine = BP_PATH(bp) ? 0 : 1;
+ u32 other_load_counter =
+ bnx2x_get_load_cnt(bp, other_engine);
+ u32 load_counter =
+ bnx2x_get_load_cnt(bp, BP_PATH(bp));
+ global = bnx2x_reset_is_global(bp);
+
+ /*
+ * In case of a parity in a global block, let
+ * the first leader that performs a
+ * leader_reset() reset the global blocks in
+ * order to clear global attentions. Otherwise
+ * the the gates will remain closed for that
+ * engine.
+ */
+ if (load_counter ||
+ (global && other_load_counter)) {
+ /* Wait until all other functions get
+ * down.
+ */
+ schedule_delayed_work(&bp->sp_rtnl_task,
+ HZ/10);
+ return;
+ } else {
+ /* If all other functions got down -
+ * try to bring the chip back to
+ * normal. In any case it's an exit
+ * point for a leader.
+ */
+ if (bnx2x_leader_reset(bp)) {
+ bnx2x_recovery_failed(bp);
+ return;
+ }
+
+ /* If we are here, means that the
+ * leader has succeeded and doesn't
+ * want to be a leader any more. Try
+ * to continue as a none-leader.
+ */
+ break;
+ }
+ } else { /* non-leader */
+ if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
+ /* Try to get a LEADER_LOCK HW lock as
+ * long as a former leader may have
+ * been unloaded by the user or
+ * released a leadership by another
+ * reason.
+ */
+ if (bnx2x_trylock_leader_lock(bp)) {
+ /* I'm a leader now! Restart a
+ * switch case.
+ */
+ bp->is_leader = 1;
+ break;
+ }
+
+ schedule_delayed_work(&bp->sp_rtnl_task,
+ HZ/10);
+ return;
+
+ } else {
+ /*
+ * If there was a global attention, wait
+ * for it to be cleared.
+ */
+ if (bnx2x_reset_is_global(bp)) {
+ schedule_delayed_work(
+ &bp->sp_rtnl_task,
+ HZ/10);
+ return;
+ }
+
+ if (bnx2x_nic_load(bp, LOAD_NORMAL))
+ bnx2x_recovery_failed(bp);
+ else {
+ bp->recovery_state =
+ BNX2X_RECOVERY_DONE;
+ smp_mb();
+ }
+
+ return;
+ }
+ }
+ default:
+ return;
+ }
+ }
+}
+
+/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
+ * scheduled on a general queue in order to prevent a dead lock.
+ */
+static void bnx2x_sp_rtnl_task(struct work_struct *work)
+{
+ struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
+
+ rtnl_lock();
+
+ if (!netif_running(bp->dev))
+ goto sp_rtnl_exit;
+
+ /* if stop on error is defined no recovery flows should be executed */
+#ifdef BNX2X_STOP_ON_ERROR
+ BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined "
+ "so reset not done to allow debug dump,\n"
+ "you will need to reboot when done\n");
+ goto sp_rtnl_not_reset;
+#endif
+
+ if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
+ /*
+ * Clear all pending SP commands as we are going to reset the
+ * function anyway.
+ */
+ bp->sp_rtnl_state = 0;
+ smp_mb();
+
+ bnx2x_parity_recover(bp);
+
+ goto sp_rtnl_exit;
+ }
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
+ /*
+ * Clear all pending SP commands as we are going to reset the
+ * function anyway.
+ */
+ bp->sp_rtnl_state = 0;
+ smp_mb();
+
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_load(bp, LOAD_NORMAL);
+
+ goto sp_rtnl_exit;
+ }
+#ifdef BNX2X_STOP_ON_ERROR
+sp_rtnl_not_reset:
+#endif
+ if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
+ bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
+
+sp_rtnl_exit:
+ rtnl_unlock();
+}
+
+/* end of nic load/unload */
+
+static void bnx2x_period_task(struct work_struct *work)
+{
+ struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
+
+ if (!netif_running(bp->dev))
+ goto period_task_exit;
+
+ if (CHIP_REV_IS_SLOW(bp)) {
+ BNX2X_ERR("period task called on emulation, ignoring\n");
+ goto period_task_exit;
+ }
+
+ bnx2x_acquire_phy_lock(bp);
+ /*
+ * The barrier is needed to ensure the ordering between the writing to
+ * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
+ * the reading here.
+ */
+ smp_mb();
+ if (bp->port.pmf) {
+ bnx2x_period_func(&bp->link_params, &bp->link_vars);
+
+ /* Re-queue task in 1 sec */
+ queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
+ }
+
+ bnx2x_release_phy_lock(bp);
+period_task_exit:
+ return;
+}
+
+/*
+ * Init service functions
+ */
+
+static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+{
+ u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
+ u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
+ return base + (BP_ABS_FUNC(bp)) * stride;
+}
+
+static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
+{
+ u32 reg = bnx2x_get_pretend_reg(bp);
+
+ /* Flush all outstanding writes */
+ mmiowb();
+
+ /* Pretend to be function 0 */
+ REG_WR(bp, reg, 0);
+ REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
+
+ /* From now we are in the "like-E1" mode */
+ bnx2x_int_disable(bp);
+
+ /* Flush all outstanding writes */
+ mmiowb();
+
+ /* Restore the original function */
+ REG_WR(bp, reg, BP_ABS_FUNC(bp));
+ REG_RD(bp, reg);
+}
+
+static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
+{
+ if (CHIP_IS_E1(bp))
+ bnx2x_int_disable(bp);
+ else
+ bnx2x_undi_int_disable_e1h(bp);
+}
+
+static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
+{
+ u32 val;
+
+ /* Check if there is any driver already loaded */
+ val = REG_RD(bp, MISC_REG_UNPREPARED);
+ if (val == 0x1) {
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+ /*
+ * Check if it is the UNDI driver
+ * UNDI driver initializes CID offset for normal bell to 0x7
+ */
+ val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
+ if (val == 0x7) {
+ u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+ /* save our pf_num */
+ int orig_pf_num = bp->pf_num;
+ int port;
+ u32 swap_en, swap_val, value;
+
+ /* clear the UNDI indication */
+ REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
+
+ BNX2X_DEV_INFO("UNDI is active! reset device\n");
+
+ /* try unload UNDI on port 0 */
+ bp->pf_num = 0;
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ reset_code = bnx2x_fw_command(bp, reset_code, 0);
+
+ /* if UNDI is loaded on the other port */
+ if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
+
+ /* send "DONE" for previous unload */
+ bnx2x_fw_command(bp,
+ DRV_MSG_CODE_UNLOAD_DONE, 0);
+
+ /* unload UNDI on port 1 */
+ bp->pf_num = 1;
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+ bnx2x_fw_command(bp, reset_code, 0);
+ }
+
+ bnx2x_undi_int_disable(bp);
+ port = BP_PORT(bp);
+
+ /* close input traffic and wait for it */
+ /* Do not rcv packets to BRB */
+ REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
+ NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
+ /* Do not direct rcv packets that are not for MCP to
+ * the BRB */
+ REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+ NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
+ /* clear AEU */
+ REG_WR(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
+ msleep(10);
+
+ /* save NIG port swap info */
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ /* reset device */
+ REG_WR(bp,
+ GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ 0xd3ffffff);
+
+ value = 0x1400;
+ if (CHIP_IS_E3(bp)) {
+ value |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+ value |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+ }
+
+ REG_WR(bp,
+ GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ value);
+
+ /* take the NIG out of reset and restore swap values */
+ REG_WR(bp,
+ GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+ MISC_REGISTERS_RESET_REG_1_RST_NIG);
+ REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
+ REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
+
+ /* send unload done to the MCP */
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+
+ /* restore our func and fw_seq */
+ bp->pf_num = orig_pf_num;
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ }
+
+ /* now it's safe to release the lock */
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+ }
+}
+
+static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
+{
+ u32 val, val2, val3, val4, id;
+ u16 pmc;
+
+ /* Get the chip revision id and number. */
+ /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
+ val = REG_RD(bp, MISC_REG_CHIP_NUM);
+ id = ((val & 0xffff) << 16);
+ val = REG_RD(bp, MISC_REG_CHIP_REV);
+ id |= ((val & 0xf) << 12);
+ val = REG_RD(bp, MISC_REG_CHIP_METAL);
+ id |= ((val & 0xff) << 4);
+ val = REG_RD(bp, MISC_REG_BOND_ID);
+ id |= (val & 0xf);
+ bp->common.chip_id = id;
+
+ /* Set doorbell size */
+ bp->db_size = (1 << BNX2X_DB_SHIFT);
+
+ if (!CHIP_IS_E1x(bp)) {
+ val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
+ if ((val & 1) == 0)
+ val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
+ else
+ val = (val >> 1) & 1;
+ BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
+ "2_PORT_MODE");
+ bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
+ CHIP_2_PORT_MODE;
+
+ if (CHIP_MODE_IS_4_PORT(bp))
+ bp->pfid = (bp->pf_num >> 1); /* 0..3 */
+ else
+ bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
+ } else {
+ bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
+ bp->pfid = bp->pf_num; /* 0..7 */
+ }
+
+ bp->link_params.chip_id = bp->common.chip_id;
+ BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
+
+ val = (REG_RD(bp, 0x2874) & 0x55);
+ if ((bp->common.chip_id & 0x1) ||
+ (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
+ bp->flags |= ONE_PORT_FLAG;
+ BNX2X_DEV_INFO("single port device\n");
+ }
+
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
+ bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
+ (val & MCPR_NVM_CFG4_FLASH_SIZE));
+ BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
+ bp->common.flash_size, bp->common.flash_size);
+
+ bnx2x_init_shmem(bp);
+
+
+
+ bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
+ MISC_REG_GENERIC_CR_1 :
+ MISC_REG_GENERIC_CR_0));
+
+ bp->link_params.shmem_base = bp->common.shmem_base;
+ bp->link_params.shmem2_base = bp->common.shmem2_base;
+ BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
+ bp->common.shmem_base, bp->common.shmem2_base);
+
+ if (!bp->common.shmem_base) {
+ BNX2X_DEV_INFO("MCP not active\n");
+ bp->flags |= NO_MCP_FLAG;
+ return;
+ }
+
+ bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
+ BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
+
+ bp->link_params.hw_led_mode = ((bp->common.hw_config &
+ SHARED_HW_CFG_LED_MODE_MASK) >>
+ SHARED_HW_CFG_LED_MODE_SHIFT);
+
+ bp->link_params.feature_config_flags = 0;
+ val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
+ if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
+ bp->link_params.feature_config_flags |=
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
+ else
+ bp->link_params.feature_config_flags &=
+ ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
+
+ val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
+ bp->common.bc_ver = val;
+ BNX2X_DEV_INFO("bc_ver %X\n", val);
+ if (val < BNX2X_BC_VER) {
+ /* for now only warn
+ * later we might need to enforce this */
+ BNX2X_ERR("This driver needs bc_ver %X but found %X, "
+ "please upgrade BC\n", BNX2X_BC_VER, val);
+ }
+ bp->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
+ FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
+
+ bp->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
+ FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
+
+ bp->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
+ FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+
+ pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
+ bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
+
+ BNX2X_DEV_INFO("%sWoL capable\n",
+ (bp->flags & NO_WOL_FLAG) ? "not " : "");
+
+ val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
+ val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
+ val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
+ val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
+
+ dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
+ val, val2, val3, val4);
+}
+
+#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
+#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
+
+static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
+{
+ int pfid = BP_FUNC(bp);
+ int igu_sb_id;
+ u32 val;
+ u8 fid, igu_sb_cnt = 0;
+
+ bp->igu_base_sb = 0xff;
+ if (CHIP_INT_MODE_IS_BC(bp)) {
+ int vn = BP_VN(bp);
+ igu_sb_cnt = bp->igu_sb_cnt;
+ bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
+ FP_SB_MAX_E1x;
+
+ bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
+ (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
+
+ return;
+ }
+
+ /* IGU in normal mode - read CAM */
+ for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
+ igu_sb_id++) {
+ val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
+ if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
+ continue;
+ fid = IGU_FID(val);
+ if ((fid & IGU_FID_ENCODE_IS_PF)) {
+ if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
+ continue;
+ if (IGU_VEC(val) == 0)
+ /* default status block */
+ bp->igu_dsb_id = igu_sb_id;
+ else {
+ if (bp->igu_base_sb == 0xff)
+ bp->igu_base_sb = igu_sb_id;
+ igu_sb_cnt++;
+ }
+ }
+ }
+
+#ifdef CONFIG_PCI_MSI
+ /*
+ * It's expected that number of CAM entries for this functions is equal
+ * to the number evaluated based on the MSI-X table size. We want a
+ * harsh warning if these values are different!
+ */
+ WARN_ON(bp->igu_sb_cnt != igu_sb_cnt);
+#endif
+
+ if (igu_sb_cnt == 0)
+ BNX2X_ERR("CAM configuration error\n");
+}
+
+static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
+ u32 switch_cfg)
+{
+ int cfg_size = 0, idx, port = BP_PORT(bp);
+
+ /* Aggregation of supported attributes of all external phys */
+ bp->port.supported[0] = 0;
+ bp->port.supported[1] = 0;
+ switch (bp->link_params.num_phys) {
+ case 1:
+ bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
+ cfg_size = 1;
+ break;
+ case 2:
+ bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
+ cfg_size = 1;
+ break;
+ case 3:
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
+ bp->port.supported[1] =
+ bp->link_params.phy[EXT_PHY1].supported;
+ bp->port.supported[0] =
+ bp->link_params.phy[EXT_PHY2].supported;
+ } else {
+ bp->port.supported[0] =
+ bp->link_params.phy[EXT_PHY1].supported;
+ bp->port.supported[1] =
+ bp->link_params.phy[EXT_PHY2].supported;
+ }
+ cfg_size = 2;
+ break;
+ }
+
+ if (!(bp->port.supported[0] || bp->port.supported[1])) {
+ BNX2X_ERR("NVRAM config error. BAD phy config."
+ "PHY1 config 0x%x, PHY2 config 0x%x\n",
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config),
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config2));
+ return;
+ }
+
+ if (CHIP_IS_E3(bp))
+ bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
+ else {
+ switch (switch_cfg) {
+ case SWITCH_CFG_1G:
+ bp->port.phy_addr = REG_RD(
+ bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
+ break;
+ case SWITCH_CFG_10G:
+ bp->port.phy_addr = REG_RD(
+ bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
+ break;
+ default:
+ BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
+ bp->port.link_config[0]);
+ return;
+ }
+ }
+ BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
+ /* mask what we support according to speed_cap_mask per configuration */
+ for (idx = 0; idx < cfg_size; idx++) {
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
+ bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
+ bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
+ bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
+ bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
+ bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
+ bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
+ bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
+
+ }
+
+ BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
+ bp->port.supported[1]);
+}
+
+static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
+{
+ u32 link_config, idx, cfg_size = 0;
+ bp->port.advertising[0] = 0;
+ bp->port.advertising[1] = 0;
+ switch (bp->link_params.num_phys) {
+ case 1:
+ case 2:
+ cfg_size = 1;
+ break;
+ case 3:
+ cfg_size = 2;
+ break;
+ }
+ for (idx = 0; idx < cfg_size; idx++) {
+ bp->link_params.req_duplex[idx] = DUPLEX_FULL;
+ link_config = bp->port.link_config[idx];
+ switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
+ case PORT_FEATURE_LINK_SPEED_AUTO:
+ if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_AUTO_NEG;
+ bp->port.advertising[idx] |=
+ bp->port.supported[idx];
+ } else {
+ /* force 10G, no AN */
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10000;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10000baseT_Full |
+ ADVERTISED_FIBRE);
+ continue;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_10M_FULL:
+ if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10baseT_Full |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_10M_HALF:
+ if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10;
+ bp->link_params.req_duplex[idx] =
+ DUPLEX_HALF;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10baseT_Half |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_100M_FULL:
+ if (bp->port.supported[idx] &
+ SUPPORTED_100baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_100;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_100baseT_Full |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_100M_HALF:
+ if (bp->port.supported[idx] &
+ SUPPORTED_100baseT_Half) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_100;
+ bp->link_params.req_duplex[idx] =
+ DUPLEX_HALF;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_100baseT_Half |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_1G:
+ if (bp->port.supported[idx] &
+ SUPPORTED_1000baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_1000;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_1000baseT_Full |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_2_5G:
+ if (bp->port.supported[idx] &
+ SUPPORTED_2500baseX_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_2500;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_2500baseX_Full |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_10G_CX4:
+ if (bp->port.supported[idx] &
+ SUPPORTED_10000baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10000;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10000baseT_Full |
+ ADVERTISED_FIBRE);
+ } else {
+ BNX2X_ERR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+ case PORT_FEATURE_LINK_SPEED_20G:
+ bp->link_params.req_line_speed[idx] = SPEED_20000;
+
+ break;
+ default:
+ BNX2X_ERR("NVRAM config error. "
+ "BAD link speed link_config 0x%x\n",
+ link_config);
+ bp->link_params.req_line_speed[idx] =
+ SPEED_AUTO_NEG;
+ bp->port.advertising[idx] =
+ bp->port.supported[idx];
+ break;
+ }
+
+ bp->link_params.req_flow_ctrl[idx] = (link_config &
+ PORT_FEATURE_FLOW_CONTROL_MASK);
+ if ((bp->link_params.req_flow_ctrl[idx] ==
+ BNX2X_FLOW_CTRL_AUTO) &&
+ !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
+ bp->link_params.req_flow_ctrl[idx] =
+ BNX2X_FLOW_CTRL_NONE;
+ }
+
+ BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
+ " 0x%x advertising 0x%x\n",
+ bp->link_params.req_line_speed[idx],
+ bp->link_params.req_duplex[idx],
+ bp->link_params.req_flow_ctrl[idx],
+ bp->port.advertising[idx]);
+ }
+}
+
+static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
+{
+ mac_hi = cpu_to_be16(mac_hi);
+ mac_lo = cpu_to_be32(mac_lo);
+ memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
+ memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
+}
+
+static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 config;
+ u32 ext_phy_type, ext_phy_config;
+
+ bp->link_params.bp = bp;
+ bp->link_params.port = port;
+
+ bp->link_params.lane_config =
+ SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
+
+ bp->link_params.speed_cap_mask[0] =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].speed_capability_mask);
+ bp->link_params.speed_cap_mask[1] =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].speed_capability_mask2);
+ bp->port.link_config[0] =
+ SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
+
+ bp->port.link_config[1] =
+ SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
+
+ bp->link_params.multi_phy_config =
+ SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
+ /* If the device is capable of WoL, set the default state according
+ * to the HW
+ */
+ config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
+ bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
+ (config & PORT_FEATURE_WOL_ENABLED));
+
+ BNX2X_DEV_INFO("lane_config 0x%08x "
+ "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
+ bp->link_params.lane_config,
+ bp->link_params.speed_cap_mask[0],
+ bp->port.link_config[0]);
+
+ bp->link_params.switch_cfg = (bp->port.link_config[0] &
+ PORT_FEATURE_CONNECTED_SWITCH_MASK);
+ bnx2x_phy_probe(&bp->link_params);
+ bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
+
+ bnx2x_link_settings_requested(bp);
+
+ /*
+ * If connected directly, work with the internal PHY, otherwise, work
+ * with the external PHY
+ */
+ ext_phy_config =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config);
+ ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
+ if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+ bp->mdio.prtad = bp->port.phy_addr;
+
+ else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
+ (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
+ bp->mdio.prtad =
+ XGXS_EXT_PHY_ADDR(ext_phy_config);
+
+ /*
+ * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
+ * In MF mode, it is set to cover self test cases
+ */
+ if (IS_MF(bp))
+ bp->port.need_hw_lock = 1;
+ else
+ bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
+ bp->common.shmem_base,
+ bp->common.shmem2_base);
+}
+
+#ifdef BCM_CNIC
+static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int func = BP_ABS_FUNC(bp);
+
+ u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+ drv_lic_key[port].max_iscsi_conn);
+ u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+ drv_lic_key[port].max_fcoe_conn);
+
+ /* Get the number of maximum allowed iSCSI and FCoE connections */
+ bp->cnic_eth_dev.max_iscsi_conn =
+ (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
+ BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
+
+ bp->cnic_eth_dev.max_fcoe_conn =
+ (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
+ BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
+
+ /* Read the WWN: */
+ if (!IS_MF(bp)) {
+ /* Port info */
+ bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].
+ fcoe_wwn_port_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].
+ fcoe_wwn_port_name_lower);
+
+ /* Node info */
+ bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].
+ fcoe_wwn_node_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].
+ fcoe_wwn_node_name_lower);
+ } else if (!IS_MF_SD(bp)) {
+ u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
+
+ /*
+ * Read the WWN info only if the FCoE feature is enabled for
+ * this function.
+ */
+ if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
+ /* Port info */
+ bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
+ MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_wwn_port_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
+ MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_wwn_port_name_lower);
+
+ /* Node info */
+ bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
+ MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_wwn_node_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
+ MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_wwn_node_name_lower);
+ }
+ }
+
+ BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
+ bp->cnic_eth_dev.max_iscsi_conn,
+ bp->cnic_eth_dev.max_fcoe_conn);
+
+ /*
+ * If maximum allowed number of connections is zero -
+ * disable the feature.
+ */
+ if (!bp->cnic_eth_dev.max_iscsi_conn)
+ bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+ if (!bp->cnic_eth_dev.max_fcoe_conn)
+ bp->flags |= NO_FCOE_FLAG;
+}
+#endif
+
+static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
+{
+ u32 val, val2;
+ int func = BP_ABS_FUNC(bp);
+ int port = BP_PORT(bp);
+#ifdef BCM_CNIC
+ u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
+ u8 *fip_mac = bp->fip_mac;
+#endif
+
+ /* Zero primary MAC configuration */
+ memset(bp->dev->dev_addr, 0, ETH_ALEN);
+
+ if (BP_NOMCP(bp)) {
+ BNX2X_ERROR("warning: random MAC workaround active\n");
+ random_ether_addr(bp->dev->dev_addr);
+ } else if (IS_MF(bp)) {
+ val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
+ val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
+ if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
+ (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
+ bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+
+#ifdef BCM_CNIC
+ /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+ * FCoE MAC then the appropriate feature should be disabled.
+ */
+ if (IS_MF_SI(bp)) {
+ u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
+ if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
+ val2 = MF_CFG_RD(bp, func_ext_config[func].
+ iscsi_mac_addr_upper);
+ val = MF_CFG_RD(bp, func_ext_config[func].
+ iscsi_mac_addr_lower);
+ bnx2x_set_mac_buf(iscsi_mac, val, val2);
+ BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
+ iscsi_mac);
+ } else
+ bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+ if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
+ val2 = MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_mac_addr_upper);
+ val = MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_mac_addr_lower);
+ bnx2x_set_mac_buf(fip_mac, val, val2);
+ BNX2X_DEV_INFO("Read FCoE L2 MAC to %pM\n",
+ fip_mac);
+
+ } else
+ bp->flags |= NO_FCOE_FLAG;
+ }
+#endif
+ } else {
+ /* in SF read MACs from port configuration */
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+ bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+
+#ifdef BCM_CNIC
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ iscsi_mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ iscsi_mac_lower);
+ bnx2x_set_mac_buf(iscsi_mac, val, val2);
+
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ fcoe_fip_mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ fcoe_fip_mac_lower);
+ bnx2x_set_mac_buf(fip_mac, val, val2);
+#endif
+ }
+
+ memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
+ memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
+
+#ifdef BCM_CNIC
+ /* Set the FCoE MAC in MF_SD mode */
+ if (!CHIP_IS_E1x(bp) && IS_MF_SD(bp))
+ memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
+
+ /* Disable iSCSI if MAC configuration is
+ * invalid.
+ */
+ if (!is_valid_ether_addr(iscsi_mac)) {
+ bp->flags |= NO_ISCSI_FLAG;
+ memset(iscsi_mac, 0, ETH_ALEN);
+ }
+
+ /* Disable FCoE if MAC configuration is
+ * invalid.
+ */
+ if (!is_valid_ether_addr(fip_mac)) {
+ bp->flags |= NO_FCOE_FLAG;
+ memset(bp->fip_mac, 0, ETH_ALEN);
+ }
+#endif
+
+ if (!is_valid_ether_addr(bp->dev->dev_addr))
+ dev_err(&bp->pdev->dev,
+ "bad Ethernet MAC address configuration: "
+ "%pM, change it manually before bringing up "
+ "the appropriate network interface\n",
+ bp->dev->dev_addr);
+}
+
+static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
+{
+ int /*abs*/func = BP_ABS_FUNC(bp);
+ int vn;
+ u32 val = 0;
+ int rc = 0;
+
+ bnx2x_get_common_hwinfo(bp);
+
+ /*
+ * initialize IGU parameters
+ */
+ if (CHIP_IS_E1x(bp)) {
+ bp->common.int_block = INT_BLOCK_HC;
+
+ bp->igu_dsb_id = DEF_SB_IGU_ID;
+ bp->igu_base_sb = 0;
+ } else {
+ bp->common.int_block = INT_BLOCK_IGU;
+
+ /* do not allow device reset during IGU info preocessing */
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
+ val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
+
+ if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
+ int tout = 5000;
+
+ BNX2X_DEV_INFO("FORCING Normal Mode\n");
+
+ val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
+ REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
+ REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
+
+ while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
+ tout--;
+ usleep_range(1000, 1000);
+ }
+
+ if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
+ dev_err(&bp->pdev->dev,
+ "FORCING Normal Mode failed!!!\n");
+ return -EPERM;
+ }
+ }
+
+ if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
+ BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
+ bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
+ } else
+ BNX2X_DEV_INFO("IGU Normal Mode\n");
+
+ bnx2x_get_igu_cam_info(bp);
+
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+ }
+
+ /*
+ * set base FW non-default (fast path) status block id, this value is
+ * used to initialize the fw_sb_id saved on the fp/queue structure to
+ * determine the id used by the FW.
+ */
+ if (CHIP_IS_E1x(bp))
+ bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
+ else /*
+ * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of
+ * the same queue are indicated on the same IGU SB). So we prefer
+ * FW and IGU SBs to be the same value.
+ */
+ bp->base_fw_ndsb = bp->igu_base_sb;
+
+ BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
+ "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
+ bp->igu_sb_cnt, bp->base_fw_ndsb);
+
+ /*
+ * Initialize MF configuration
+ */
+
+ bp->mf_ov = 0;
+ bp->mf_mode = 0;
+ vn = BP_VN(bp);
+
+ if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
+ BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
+ bp->common.shmem2_base, SHMEM2_RD(bp, size),
+ (u32)offsetof(struct shmem2_region, mf_cfg_addr));
+
+ if (SHMEM2_HAS(bp, mf_cfg_addr))
+ bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
+ else
+ bp->common.mf_cfg_base = bp->common.shmem_base +
+ offsetof(struct shmem_region, func_mb) +
+ E1H_FUNC_MAX * sizeof(struct drv_func_mb);
+ /*
+ * get mf configuration:
+ * 1. existence of MF configuration
+ * 2. MAC address must be legal (check only upper bytes)
+ * for Switch-Independent mode;
+ * OVLAN must be legal for Switch-Dependent mode
+ * 3. SF_MODE configures specific MF mode
+ */
+ if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
+ /* get mf configuration */
+ val = SHMEM_RD(bp,
+ dev_info.shared_feature_config.config);
+ val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
+
+ switch (val) {
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
+ val = MF_CFG_RD(bp, func_mf_config[func].
+ mac_upper);
+ /* check for legal mac (upper bytes)*/
+ if (val != 0xffff) {
+ bp->mf_mode = MULTI_FUNCTION_SI;
+ bp->mf_config[vn] = MF_CFG_RD(bp,
+ func_mf_config[func].config);
+ } else
+ BNX2X_DEV_INFO("illegal MAC address "
+ "for SI\n");
+ break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
+ /* get OV configuration */
+ val = MF_CFG_RD(bp,
+ func_mf_config[FUNC_0].e1hov_tag);
+ val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
+
+ if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+ bp->mf_mode = MULTI_FUNCTION_SD;
+ bp->mf_config[vn] = MF_CFG_RD(bp,
+ func_mf_config[func].config);
+ } else
+ BNX2X_DEV_INFO("illegal OV for SD\n");
+ break;
+ default:
+ /* Unknown configuration: reset mf_config */
+ bp->mf_config[vn] = 0;
+ BNX2X_DEV_INFO("unkown MF mode 0x%x\n", val);
+ }
+ }
+
+ BNX2X_DEV_INFO("%s function mode\n",
+ IS_MF(bp) ? "multi" : "single");
+
+ switch (bp->mf_mode) {
+ case MULTI_FUNCTION_SD:
+ val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+ FUNC_MF_CFG_E1HOV_TAG_MASK;
+ if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+ bp->mf_ov = val;
+ bp->path_has_ovlan = true;
+
+ BNX2X_DEV_INFO("MF OV for func %d is %d "
+ "(0x%04x)\n", func, bp->mf_ov,
+ bp->mf_ov);
+ } else {
+ dev_err(&bp->pdev->dev,
+ "No valid MF OV for func %d, "
+ "aborting\n", func);
+ return -EPERM;
+ }
+ break;
+ case MULTI_FUNCTION_SI:
+ BNX2X_DEV_INFO("func %d is in MF "
+ "switch-independent mode\n", func);
+ break;
+ default:
+ if (vn) {
+ dev_err(&bp->pdev->dev,
+ "VN %d is in a single function mode, "
+ "aborting\n", vn);
+ return -EPERM;
+ }
+ break;
+ }
+
+ /* check if other port on the path needs ovlan:
+ * Since MF configuration is shared between ports
+ * Possible mixed modes are only
+ * {SF, SI} {SF, SD} {SD, SF} {SI, SF}
+ */
+ if (CHIP_MODE_IS_4_PORT(bp) &&
+ !bp->path_has_ovlan &&
+ !IS_MF(bp) &&
+ bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
+ u8 other_port = !BP_PORT(bp);
+ u8 other_func = BP_PATH(bp) + 2*other_port;
+ val = MF_CFG_RD(bp,
+ func_mf_config[other_func].e1hov_tag);
+ if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
+ bp->path_has_ovlan = true;
+ }
+ }
+
+ /* adjust igu_sb_cnt to MF for E1x */
+ if (CHIP_IS_E1x(bp) && IS_MF(bp))
+ bp->igu_sb_cnt /= E1HVN_MAX;
+
+ /* port info */
+ bnx2x_get_port_hwinfo(bp);
+
+ /* Get MAC addresses */
+ bnx2x_get_mac_hwinfo(bp);
+
+#ifdef BCM_CNIC
+ bnx2x_get_cnic_info(bp);
+#endif
+
+ /* Get current FW pulse sequence */
+ if (!BP_NOMCP(bp)) {
+ int mb_idx = BP_FW_MB_IDX(bp);
+
+ bp->fw_drv_pulse_wr_seq =
+ (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
+ DRV_PULSE_SEQ_MASK);
+ BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
+ }
+
+ return rc;
+}
+
+static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
+{
+ int cnt, i, block_end, rodi;
+ char vpd_data[BNX2X_VPD_LEN+1];
+ char str_id_reg[VENDOR_ID_LEN+1];
+ char str_id_cap[VENDOR_ID_LEN+1];
+ u8 len;
+
+ cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
+ memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
+
+ if (cnt < BNX2X_VPD_LEN)
+ goto out_not_found;
+
+ i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
+ PCI_VPD_LRDT_RO_DATA);
+ if (i < 0)
+ goto out_not_found;
+
+
+ block_end = i + PCI_VPD_LRDT_TAG_SIZE +
+ pci_vpd_lrdt_size(&vpd_data[i]);
+
+ i += PCI_VPD_LRDT_TAG_SIZE;
+
+ if (block_end > BNX2X_VPD_LEN)
+ goto out_not_found;
+
+ rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+ PCI_VPD_RO_KEYWORD_MFR_ID);
+ if (rodi < 0)
+ goto out_not_found;
+
+ len = pci_vpd_info_field_size(&vpd_data[rodi]);
+
+ if (len != VENDOR_ID_LEN)
+ goto out_not_found;
+
+ rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+ /* vendor specific info */
+ snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
+ snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
+ if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
+ !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
+
+ rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+ PCI_VPD_RO_KEYWORD_VENDOR0);
+ if (rodi >= 0) {
+ len = pci_vpd_info_field_size(&vpd_data[rodi]);
+
+ rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+ if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
+ memcpy(bp->fw_ver, &vpd_data[rodi], len);
+ bp->fw_ver[len] = ' ';
+ }
+ }
+ return;
+ }
+out_not_found:
+ return;
+}
+
+static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
+{
+ u32 flags = 0;
+
+ if (CHIP_REV_IS_FPGA(bp))
+ SET_FLAGS(flags, MODE_FPGA);
+ else if (CHIP_REV_IS_EMUL(bp))
+ SET_FLAGS(flags, MODE_EMUL);
+ else
+ SET_FLAGS(flags, MODE_ASIC);
+
+ if (CHIP_MODE_IS_4_PORT(bp))
+ SET_FLAGS(flags, MODE_PORT4);
+ else
+ SET_FLAGS(flags, MODE_PORT2);
+
+ if (CHIP_IS_E2(bp))
+ SET_FLAGS(flags, MODE_E2);
+ else if (CHIP_IS_E3(bp)) {
+ SET_FLAGS(flags, MODE_E3);
+ if (CHIP_REV(bp) == CHIP_REV_Ax)
+ SET_FLAGS(flags, MODE_E3_A0);
+ else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
+ SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
+ }
+
+ if (IS_MF(bp)) {
+ SET_FLAGS(flags, MODE_MF);
+ switch (bp->mf_mode) {
+ case MULTI_FUNCTION_SD:
+ SET_FLAGS(flags, MODE_MF_SD);
+ break;
+ case MULTI_FUNCTION_SI:
+ SET_FLAGS(flags, MODE_MF_SI);
+ break;
+ }
+ } else
+ SET_FLAGS(flags, MODE_SF);
+
+#if defined(__LITTLE_ENDIAN)
+ SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
+#else /*(__BIG_ENDIAN)*/
+ SET_FLAGS(flags, MODE_BIG_ENDIAN);
+#endif
+ INIT_MODE_FLAGS(bp) = flags;
+}
+
+static int __devinit bnx2x_init_bp(struct bnx2x *bp)
+{
+ int func;
+ int timer_interval;
+ int rc;
+
+ mutex_init(&bp->port.phy_mutex);
+ mutex_init(&bp->fw_mb_mutex);
+ spin_lock_init(&bp->stats_lock);
+#ifdef BCM_CNIC
+ mutex_init(&bp->cnic_mutex);
+#endif
+
+ INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
+ INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
+ INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
+ rc = bnx2x_get_hwinfo(bp);
+ if (rc)
+ return rc;
+
+ bnx2x_set_modes_bitmap(bp);
+
+ rc = bnx2x_alloc_mem_bp(bp);
+ if (rc)
+ return rc;
+
+ bnx2x_read_fwinfo(bp);
+
+ func = BP_FUNC(bp);
+
+ /* need to reset chip if undi was active */
+ if (!BP_NOMCP(bp))
+ bnx2x_undi_unload(bp);
+
+ /* init fw_seq after undi_unload! */
+ if (!BP_NOMCP(bp)) {
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+ }
+
+ if (CHIP_REV_IS_FPGA(bp))
+ dev_err(&bp->pdev->dev, "FPGA detected\n");
+
+ if (BP_NOMCP(bp) && (func == 0))
+ dev_err(&bp->pdev->dev, "MCP disabled, "
+ "must load devices in order!\n");
+
+ bp->multi_mode = multi_mode;
+
+ /* Set TPA flags */
+ if (disable_tpa) {
+ bp->flags &= ~TPA_ENABLE_FLAG;
+ bp->dev->features &= ~NETIF_F_LRO;
+ } else {
+ bp->flags |= TPA_ENABLE_FLAG;
+ bp->dev->features |= NETIF_F_LRO;
+ }
+ bp->disable_tpa = disable_tpa;
+
+ if (CHIP_IS_E1(bp))
+ bp->dropless_fc = 0;
+ else
+ bp->dropless_fc = dropless_fc;
+
+ bp->mrrs = mrrs;
+
+ bp->tx_ring_size = MAX_TX_AVAIL;
+
+ /* make sure that the numbers are in the right granularity */
+ bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
+ bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
+
+ timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
+ bp->current_interval = (poll ? poll : timer_interval);
+
+ init_timer(&bp->timer);
+ bp->timer.expires = jiffies + bp->current_interval;
+ bp->timer.data = (unsigned long) bp;
+ bp->timer.function = bnx2x_timer;
+
+ bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
+ bnx2x_dcbx_init_params(bp);
+
+#ifdef BCM_CNIC
+ if (CHIP_IS_E1x(bp))
+ bp->cnic_base_cl_id = FP_SB_MAX_E1x;
+ else
+ bp->cnic_base_cl_id = FP_SB_MAX_E2;
+#endif
+
+ /* multiple tx priority */
+ if (CHIP_IS_E1x(bp))
+ bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
+ if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
+ bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
+ if (CHIP_IS_E3B0(bp))
+ bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
+
+ return rc;
+}
+
+
+/****************************************************************************
+* General service functions
+****************************************************************************/
+
+/*
+ * net_device service functions
+ */
+
+/* called with rtnl_lock */
+static int bnx2x_open(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ bool global = false;
+ int other_engine = BP_PATH(bp) ? 0 : 1;
+ u32 other_load_counter, load_counter;
+
+ netif_carrier_off(dev);
+
+ bnx2x_set_power_state(bp, PCI_D0);
+
+ other_load_counter = bnx2x_get_load_cnt(bp, other_engine);
+ load_counter = bnx2x_get_load_cnt(bp, BP_PATH(bp));
+
+ /*
+ * If parity had happen during the unload, then attentions
+ * and/or RECOVERY_IN_PROGRES may still be set. In this case we
+ * want the first function loaded on the current engine to
+ * complete the recovery.
+ */
+ if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
+ bnx2x_chk_parity_attn(bp, &global, true))
+ do {
+ /*
+ * If there are attentions and they are in a global
+ * blocks, set the GLOBAL_RESET bit regardless whether
+ * it will be this function that will complete the
+ * recovery or not.
+ */
+ if (global)
+ bnx2x_set_reset_global(bp);
+
+ /*
+ * Only the first function on the current engine should
+ * try to recover in open. In case of attentions in
+ * global blocks only the first in the chip should try
+ * to recover.
+ */
+ if ((!load_counter &&
+ (!global || !other_load_counter)) &&
+ bnx2x_trylock_leader_lock(bp) &&
+ !bnx2x_leader_reset(bp)) {
+ netdev_info(bp->dev, "Recovered in open\n");
+ break;
+ }
+
+ /* recovery has failed... */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+ bp->recovery_state = BNX2X_RECOVERY_FAILED;
+
+ netdev_err(bp->dev, "Recovery flow hasn't been properly"
+ " completed yet. Try again later. If u still see this"
+ " message after a few retries then power cycle is"
+ " required.\n");
+
+ return -EAGAIN;
+ } while (0);
+
+ bp->recovery_state = BNX2X_RECOVERY_DONE;
+ return bnx2x_nic_load(bp, LOAD_OPEN);
+}
+
+/* called with rtnl_lock */
+static int bnx2x_close(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ /* Unload the driver, release IRQs */
+ bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+
+ /* Power off */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+
+ return 0;
+}
+
+static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p)
+{
+ int mc_count = netdev_mc_count(bp->dev);
+ struct bnx2x_mcast_list_elem *mc_mac =
+ kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
+ struct netdev_hw_addr *ha;
+
+ if (!mc_mac)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&p->mcast_list);
+
+ netdev_for_each_mc_addr(ha, bp->dev) {
+ mc_mac->mac = bnx2x_mc_addr(ha);
+ list_add_tail(&mc_mac->link, &p->mcast_list);
+ mc_mac++;
+ }
+
+ p->mcast_list_len = mc_count;
+
+ return 0;
+}
+
+static inline void bnx2x_free_mcast_macs_list(
+ struct bnx2x_mcast_ramrod_params *p)
+{
+ struct bnx2x_mcast_list_elem *mc_mac =
+ list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
+ link);
+
+ WARN_ON(!mc_mac);
+ kfree(mc_mac);
+}
+
+/**
+ * bnx2x_set_uc_list - configure a new unicast MACs list.
+ *
+ * @bp: driver handle
+ *
+ * We will use zero (0) as a MAC type for these MACs.
+ */
+static inline int bnx2x_set_uc_list(struct bnx2x *bp)
+{
+ int rc;
+ struct net_device *dev = bp->dev;
+ struct netdev_hw_addr *ha;
+ struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+ unsigned long ramrod_flags = 0;
+
+ /* First schedule a cleanup up of old configuration */
+ rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
+ if (rc < 0) {
+ BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
+ return rc;
+ }
+
+ netdev_for_each_uc_addr(ha, dev) {
+ rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
+ BNX2X_UC_LIST_MAC, &ramrod_flags);
+ if (rc < 0) {
+ BNX2X_ERR("Failed to schedule ADD operations: %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* Execute the pending commands */
+ __set_bit(RAMROD_CONT, &ramrod_flags);
+ return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
+ BNX2X_UC_LIST_MAC, &ramrod_flags);
+}
+
+static inline int bnx2x_set_mc_list(struct bnx2x *bp)
+{
+ struct net_device *dev = bp->dev;
+ struct bnx2x_mcast_ramrod_params rparam = {0};
+ int rc = 0;
+
+ rparam.mcast_obj = &bp->mcast_obj;
+
+ /* first, clear all configured multicast MACs */
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+ if (rc < 0) {
+ BNX2X_ERR("Failed to clear multicast "
+ "configuration: %d\n", rc);
+ return rc;
+ }
+
+ /* then, configure a new MACs list */
+ if (netdev_mc_count(dev)) {
+ rc = bnx2x_init_mcast_macs_list(bp, &rparam);
+ if (rc) {
+ BNX2X_ERR("Failed to create multicast MACs "
+ "list: %d\n", rc);
+ return rc;
+ }
+
+ /* Now add the new MACs */
+ rc = bnx2x_config_mcast(bp, &rparam,
+ BNX2X_MCAST_CMD_ADD);
+ if (rc < 0)
+ BNX2X_ERR("Failed to set a new multicast "
+ "configuration: %d\n", rc);
+
+ bnx2x_free_mcast_macs_list(&rparam);
+ }
+
+ return rc;
+}
+
+
+/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
+void bnx2x_set_rx_mode(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 rx_mode = BNX2X_RX_MODE_NORMAL;
+
+ if (bp->state != BNX2X_STATE_OPEN) {
+ DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
+ return;
+ }
+
+ DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
+
+ if (dev->flags & IFF_PROMISC)
+ rx_mode = BNX2X_RX_MODE_PROMISC;
+ else if ((dev->flags & IFF_ALLMULTI) ||
+ ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
+ CHIP_IS_E1(bp)))
+ rx_mode = BNX2X_RX_MODE_ALLMULTI;
+ else {
+ /* some multicasts */
+ if (bnx2x_set_mc_list(bp) < 0)
+ rx_mode = BNX2X_RX_MODE_ALLMULTI;
+
+ if (bnx2x_set_uc_list(bp) < 0)
+ rx_mode = BNX2X_RX_MODE_PROMISC;
+ }
+
+ bp->rx_mode = rx_mode;
+
+ /* Schedule the rx_mode command */
+ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
+ set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+ return;
+ }
+
+ bnx2x_set_storm_rx_mode(bp);
+}
+
+/* called with rtnl_lock */
+static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
+ int devad, u16 addr)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u16 value;
+ int rc;
+
+ DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
+ prtad, devad, addr);
+
+ /* The HW expects different devad if CL22 is used */
+ devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
+
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
+ bnx2x_release_phy_lock(bp);
+ DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
+
+ if (!rc)
+ rc = value;
+ return rc;
+}
+
+/* called with rtnl_lock */
+static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
+ u16 addr, u16 value)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ int rc;
+
+ DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
+ " value 0x%x\n", prtad, devad, addr, value);
+
+ /* The HW expects different devad if CL22 is used */
+ devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
+
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
+ bnx2x_release_phy_lock(bp);
+ return rc;
+}
+
+/* called with rtnl_lock */
+static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct mii_ioctl_data *mdio = if_mii(ifr);
+
+ DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
+ mdio->phy_id, mdio->reg_num, mdio->val_in);
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void poll_bnx2x(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ disable_irq(bp->pdev->irq);
+ bnx2x_interrupt(bp->pdev->irq, dev);
+ enable_irq(bp->pdev->irq);
+}
+#endif
+
+static const struct net_device_ops bnx2x_netdev_ops = {
+ .ndo_open = bnx2x_open,
+ .ndo_stop = bnx2x_close,
+ .ndo_start_xmit = bnx2x_start_xmit,
+ .ndo_select_queue = bnx2x_select_queue,
+ .ndo_set_rx_mode = bnx2x_set_rx_mode,
+ .ndo_set_mac_address = bnx2x_change_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = bnx2x_ioctl,
+ .ndo_change_mtu = bnx2x_change_mtu,
+ .ndo_fix_features = bnx2x_fix_features,
+ .ndo_set_features = bnx2x_set_features,
+ .ndo_tx_timeout = bnx2x_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = poll_bnx2x,
+#endif
+ .ndo_setup_tc = bnx2x_setup_tc,
+
+#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
+ .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
+#endif
+};
+
+static inline int bnx2x_set_coherency_mask(struct bnx2x *bp)
+{
+ struct device *dev = &bp->pdev->dev;
+
+ if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
+ bp->flags |= USING_DAC_FLAG;
+ if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
+ dev_err(dev, "dma_set_coherent_mask failed, "
+ "aborting\n");
+ return -EIO;
+ }
+ } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
+ dev_err(dev, "System does not support DMA, aborting\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
+ struct net_device *dev,
+ unsigned long board_type)
+{
+ struct bnx2x *bp;
+ int rc;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ bp = netdev_priv(dev);
+
+ bp->dev = dev;
+ bp->pdev = pdev;
+ bp->flags = 0;
+ bp->pf_num = PCI_FUNC(pdev->devfn);
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&bp->pdev->dev,
+ "Cannot enable PCI device, aborting\n");
+ goto err_out;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&bp->pdev->dev,
+ "Cannot find PCI device base address, aborting\n");
+ rc = -ENODEV;
+ goto err_out_disable;
+ }
+
+ if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ dev_err(&bp->pdev->dev, "Cannot find second PCI device"
+ " base address, aborting\n");
+ rc = -ENODEV;
+ goto err_out_disable;
+ }
+
+ if (atomic_read(&pdev->enable_cnt) == 1) {
+ rc = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (rc) {
+ dev_err(&bp->pdev->dev,
+ "Cannot obtain PCI resources, aborting\n");
+ goto err_out_disable;
+ }
+
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+ }
+
+ bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (bp->pm_cap == 0) {
+ dev_err(&bp->pdev->dev,
+ "Cannot find power management capability, aborting\n");
+ rc = -EIO;
+ goto err_out_release;
+ }
+
+ if (!pci_is_pcie(pdev)) {
+ dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
+ rc = -EIO;
+ goto err_out_release;
+ }
+
+ rc = bnx2x_set_coherency_mask(bp);
+ if (rc)
+ goto err_out_release;
+
+ dev->mem_start = pci_resource_start(pdev, 0);
+ dev->base_addr = dev->mem_start;
+ dev->mem_end = pci_resource_end(pdev, 0);
+
+ dev->irq = pdev->irq;
+
+ bp->regview = pci_ioremap_bar(pdev, 0);
+ if (!bp->regview) {
+ dev_err(&bp->pdev->dev,
+ "Cannot map register space, aborting\n");
+ rc = -ENOMEM;
+ goto err_out_release;
+ }
+
+ bnx2x_set_power_state(bp, PCI_D0);
+
+ /* clean indirect addresses */
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+ PCICFG_VENDOR_ID_OFFSET);
+ /*
+ * Clean the following indirect addresses for all functions since it
+ * is not used by the driver.
+ */
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
+
+ if (CHIP_IS_E1x(bp)) {
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+ }
+
+ /*
+ * Enable internal target-read (in case we are probed after PF FLR).
+ * Must be done prior to any BAR read access. Only for 57712 and up
+ */
+ if (board_type != BCM57710 &&
+ board_type != BCM57711 &&
+ board_type != BCM57711E)
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+
+ /* Reset the load counter */
+ bnx2x_clear_load_cnt(bp);
+
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ dev->netdev_ops = &bnx2x_netdev_ops;
+ bnx2x_set_ethtool_ops(dev);
+
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_LRO |
+ NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
+
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
+
+ dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
+ if (bp->flags & USING_DAC_FLAG)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ /* Add Loopback capability to the device */
+ dev->hw_features |= NETIF_F_LOOPBACK;
+
+#ifdef BCM_DCBNL
+ dev->dcbnl_ops = &bnx2x_dcbnl_ops;
+#endif
+
+ /* get_port_hwinfo() will set prtad and mmds properly */
+ bp->mdio.prtad = MDIO_PRTAD_NONE;
+ bp->mdio.mmds = 0;
+ bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ bp->mdio.dev = dev;
+ bp->mdio.mdio_read = bnx2x_mdio_read;
+ bp->mdio.mdio_write = bnx2x_mdio_write;
+
+ return 0;
+
+err_out_release:
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
+
+err_out_disable:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+err_out:
+ return rc;
+}
+
+static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
+ int *width, int *speed)
+{
+ u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
+
+ *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
+
+ /* return value of 1=2.5GHz 2=5GHz */
+ *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
+}
+
+static int bnx2x_check_firmware(struct bnx2x *bp)
+{
+ const struct firmware *firmware = bp->firmware;
+ struct bnx2x_fw_file_hdr *fw_hdr;
+ struct bnx2x_fw_file_section *sections;
+ u32 offset, len, num_ops;
+ u16 *ops_offsets;
+ int i;
+ const u8 *fw_ver;
+
+ if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
+ return -EINVAL;
+
+ fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
+ sections = (struct bnx2x_fw_file_section *)fw_hdr;
+
+ /* Make sure none of the offsets and sizes make us read beyond
+ * the end of the firmware data */
+ for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
+ offset = be32_to_cpu(sections[i].offset);
+ len = be32_to_cpu(sections[i].len);
+ if (offset + len > firmware->size) {
+ dev_err(&bp->pdev->dev,
+ "Section %d length is out of bounds\n", i);
+ return -EINVAL;
+ }
+ }
+
+ /* Likewise for the init_ops offsets */
+ offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
+ ops_offsets = (u16 *)(firmware->data + offset);
+ num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
+
+ for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
+ if (be16_to_cpu(ops_offsets[i]) > num_ops) {
+ dev_err(&bp->pdev->dev,
+ "Section offset %d is out of bounds\n", i);
+ return -EINVAL;
+ }
+ }
+
+ /* Check FW version */
+ offset = be32_to_cpu(fw_hdr->fw_version.offset);
+ fw_ver = firmware->data + offset;
+ if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
+ (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
+ (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
+ (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
+ dev_err(&bp->pdev->dev,
+ "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
+ fw_ver[0], fw_ver[1], fw_ver[2],
+ fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
+ BCM_5710_FW_MINOR_VERSION,
+ BCM_5710_FW_REVISION_VERSION,
+ BCM_5710_FW_ENGINEERING_VERSION);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
+{
+ const __be32 *source = (const __be32 *)_source;
+ u32 *target = (u32 *)_target;
+ u32 i;
+
+ for (i = 0; i < n/4; i++)
+ target[i] = be32_to_cpu(source[i]);
+}
+
+/*
+ Ops array is stored in the following format:
+ {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
+ */
+static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
+{
+ const __be32 *source = (const __be32 *)_source;
+ struct raw_op *target = (struct raw_op *)_target;
+ u32 i, j, tmp;
+
+ for (i = 0, j = 0; i < n/8; i++, j += 2) {
+ tmp = be32_to_cpu(source[j]);
+ target[i].op = (tmp >> 24) & 0xff;
+ target[i].offset = tmp & 0xffffff;
+ target[i].raw_data = be32_to_cpu(source[j + 1]);
+ }
+}
+
+/**
+ * IRO array is stored in the following format:
+ * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
+ */
+static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
+{
+ const __be32 *source = (const __be32 *)_source;
+ struct iro *target = (struct iro *)_target;
+ u32 i, j, tmp;
+
+ for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
+ target[i].base = be32_to_cpu(source[j]);
+ j++;
+ tmp = be32_to_cpu(source[j]);
+ target[i].m1 = (tmp >> 16) & 0xffff;
+ target[i].m2 = tmp & 0xffff;
+ j++;
+ tmp = be32_to_cpu(source[j]);
+ target[i].m3 = (tmp >> 16) & 0xffff;
+ target[i].size = tmp & 0xffff;
+ j++;
+ }
+}
+
+static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
+{
+ const __be16 *source = (const __be16 *)_source;
+ u16 *target = (u16 *)_target;
+ u32 i;
+
+ for (i = 0; i < n/2; i++)
+ target[i] = be16_to_cpu(source[i]);
+}
+
+#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
+do { \
+ u32 len = be32_to_cpu(fw_hdr->arr.len); \
+ bp->arr = kmalloc(len, GFP_KERNEL); \
+ if (!bp->arr) { \
+ pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
+ goto lbl; \
+ } \
+ func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
+ (u8 *)bp->arr, len); \
+} while (0)
+
+int bnx2x_init_firmware(struct bnx2x *bp)
+{
+ const char *fw_file_name;
+ struct bnx2x_fw_file_hdr *fw_hdr;
+ int rc;
+
+ if (CHIP_IS_E1(bp))
+ fw_file_name = FW_FILE_NAME_E1;
+ else if (CHIP_IS_E1H(bp))
+ fw_file_name = FW_FILE_NAME_E1H;
+ else if (!CHIP_IS_E1x(bp))
+ fw_file_name = FW_FILE_NAME_E2;
+ else {
+ BNX2X_ERR("Unsupported chip revision\n");
+ return -EINVAL;
+ }
+
+ BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
+
+ rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
+ if (rc) {
+ BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
+ goto request_firmware_exit;
+ }
+
+ rc = bnx2x_check_firmware(bp);
+ if (rc) {
+ BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
+ goto request_firmware_exit;
+ }
+
+ fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
+
+ /* Initialize the pointers to the init arrays */
+ /* Blob */
+ BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
+
+ /* Opcodes */
+ BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
+
+ /* Offsets */
+ BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
+ be16_to_cpu_n);
+
+ /* STORMs firmware */
+ INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
+ INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->tsem_pram_data.offset);
+ INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->usem_int_table_data.offset);
+ INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->usem_pram_data.offset);
+ INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
+ INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->xsem_pram_data.offset);
+ INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->csem_int_table_data.offset);
+ INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->csem_pram_data.offset);
+ /* IRO */
+ BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
+
+ return 0;
+
+iro_alloc_err:
+ kfree(bp->init_ops_offsets);
+init_offsets_alloc_err:
+ kfree(bp->init_ops);
+init_ops_alloc_err:
+ kfree(bp->init_data);
+request_firmware_exit:
+ release_firmware(bp->firmware);
+
+ return rc;
+}
+
+static void bnx2x_release_firmware(struct bnx2x *bp)
+{
+ kfree(bp->init_ops_offsets);
+ kfree(bp->init_ops);
+ kfree(bp->init_data);
+ release_firmware(bp->firmware);
+}
+
+
+static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
+ .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
+ .init_hw_cmn = bnx2x_init_hw_common,
+ .init_hw_port = bnx2x_init_hw_port,
+ .init_hw_func = bnx2x_init_hw_func,
+
+ .reset_hw_cmn = bnx2x_reset_common,
+ .reset_hw_port = bnx2x_reset_port,
+ .reset_hw_func = bnx2x_reset_func,
+
+ .gunzip_init = bnx2x_gunzip_init,
+ .gunzip_end = bnx2x_gunzip_end,
+
+ .init_fw = bnx2x_init_firmware,
+ .release_fw = bnx2x_release_firmware,
+};
+
+void bnx2x__init_func_obj(struct bnx2x *bp)
+{
+ /* Prepare DMAE related driver resources */
+ bnx2x_setup_dmae(bp);
+
+ bnx2x_init_func_obj(bp, &bp->func_obj,
+ bnx2x_sp(bp, func_rdata),
+ bnx2x_sp_mapping(bp, func_rdata),
+ &bnx2x_func_sp_drv);
+}
+
+/* must be called after sriov-enable */
+static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp)
+{
+ int cid_count = BNX2X_L2_CID_COUNT(bp);
+
+#ifdef BCM_CNIC
+ cid_count += CNIC_CID_MAX;
+#endif
+ return roundup(cid_count, QM_CID_ROUND);
+}
+
+/**
+ * bnx2x_get_num_none_def_sbs - return the number of none default SBs
+ *
+ * @dev: pci device
+ *
+ */
+static inline int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
+{
+ int pos;
+ u16 control;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+
+ /*
+ * If MSI-X is not supported - return number of SBs needed to support
+ * one fast path queue: one FP queue + SB for CNIC
+ */
+ if (!pos)
+ return 1 + CNIC_PRESENT;
+
+ /*
+ * The value in the PCI configuration space is the index of the last
+ * entry, namely one less than the actual size of the table, which is
+ * exactly what we want to return from this function: number of all SBs
+ * without the default SB.
+ */
+ pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
+ return control & PCI_MSIX_FLAGS_QSIZE;
+}
+
+static int __devinit bnx2x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev = NULL;
+ struct bnx2x *bp;
+ int pcie_width, pcie_speed;
+ int rc, max_non_def_sbs;
+ int rx_count, tx_count, rss_count;
+ /*
+ * An estimated maximum supported CoS number according to the chip
+ * version.
+ * We will try to roughly estimate the maximum number of CoSes this chip
+ * may support in order to minimize the memory allocated for Tx
+ * netdev_queue's. This number will be accurately calculated during the
+ * initialization of bp->max_cos based on the chip versions AND chip
+ * revision in the bnx2x_init_bp().
+ */
+ u8 max_cos_est = 0;
+
+ switch (ent->driver_data) {
+ case BCM57710:
+ case BCM57711:
+ case BCM57711E:
+ max_cos_est = BNX2X_MULTI_TX_COS_E1X;
+ break;
+
+ case BCM57712:
+ case BCM57712_MF:
+ max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0;
+ break;
+
+ case BCM57800:
+ case BCM57800_MF:
+ case BCM57810:
+ case BCM57810_MF:
+ case BCM57840:
+ case BCM57840_MF:
+ max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
+ break;
+
+ default:
+ pr_err("Unknown board_type (%ld), aborting\n",
+ ent->driver_data);
+ return -ENODEV;
+ }
+
+ max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
+
+ /* !!! FIXME !!!
+ * Do not allow the maximum SB count to grow above 16
+ * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48.
+ * We will use the FP_SB_MAX_E1x macro for this matter.
+ */
+ max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs);
+
+ WARN_ON(!max_non_def_sbs);
+
+ /* Maximum number of RSS queues: one IGU SB goes to CNIC */
+ rss_count = max_non_def_sbs - CNIC_PRESENT;
+
+ /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
+ rx_count = rss_count + FCOE_PRESENT;
+
+ /*
+ * Maximum number of netdev Tx queues:
+ * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
+ */
+ tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT;
+
+ /* dev zeroed in init_etherdev */
+ dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
+ if (!dev) {
+ dev_err(&pdev->dev, "Cannot allocate net device\n");
+ return -ENOMEM;
+ }
+
+ bp = netdev_priv(dev);
+
+ DP(NETIF_MSG_DRV, "Allocated netdev with %d tx and %d rx queues\n",
+ tx_count, rx_count);
+
+ bp->igu_sb_cnt = max_non_def_sbs;
+ bp->msg_enable = debug;
+ pci_set_drvdata(pdev, dev);
+
+ rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
+ if (rc < 0) {
+ free_netdev(dev);
+ return rc;
+ }
+
+ DP(NETIF_MSG_DRV, "max_non_def_sbs %d\n", max_non_def_sbs);
+
+ rc = bnx2x_init_bp(bp);
+ if (rc)
+ goto init_one_exit;
+
+ /*
+ * Map doorbels here as we need the real value of bp->max_cos which
+ * is initialized in bnx2x_init_bp().
+ */
+ bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
+ min_t(u64, BNX2X_DB_SIZE(bp),
+ pci_resource_len(pdev, 2)));
+ if (!bp->doorbells) {
+ dev_err(&bp->pdev->dev,
+ "Cannot map doorbell space, aborting\n");
+ rc = -ENOMEM;
+ goto init_one_exit;
+ }
+
+ /* calc qm_cid_count */
+ bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
+
+#ifdef BCM_CNIC
+ /* disable FCOE L2 queue for E1x and E3*/
+ if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp))
+ bp->flags |= NO_FCOE_FLAG;
+
+#endif
+
+ /* Configure interrupt mode: try to enable MSI-X/MSI if
+ * needed, set bp->num_queues appropriately.
+ */
+ bnx2x_set_int_mode(bp);
+
+ /* Add all NAPI objects */
+ bnx2x_add_all_napi(bp);
+
+ rc = register_netdev(dev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot register net device\n");
+ goto init_one_exit;
+ }
+
+#ifdef BCM_CNIC
+ if (!NO_FCOE(bp)) {
+ /* Add storage MAC address */
+ rtnl_lock();
+ dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
+ rtnl_unlock();
+ }
+#endif
+
+ bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
+
+ netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
+ board_info[ent->driver_data].name,
+ (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
+ pcie_width,
+ ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
+ (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
+ "5GHz (Gen2)" : "2.5GHz",
+ dev->base_addr, bp->pdev->irq, dev->dev_addr);
+
+ return 0;
+
+init_one_exit:
+ if (bp->regview)
+ iounmap(bp->regview);
+
+ if (bp->doorbells)
+ iounmap(bp->doorbells);
+
+ free_netdev(dev);
+
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ return rc;
+}
+
+static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp;
+
+ if (!dev) {
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+ return;
+ }
+ bp = netdev_priv(dev);
+
+#ifdef BCM_CNIC
+ /* Delete storage MAC address */
+ if (!NO_FCOE(bp)) {
+ rtnl_lock();
+ dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
+ rtnl_unlock();
+ }
+#endif
+
+#ifdef BCM_DCBNL
+ /* Delete app tlvs from dcbnl */
+ bnx2x_dcbnl_update_applist(bp, true);
+#endif
+
+ unregister_netdev(dev);
+
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
+
+ /* Power on: we can't let PCI layer write to us while we are in D3 */
+ bnx2x_set_power_state(bp, PCI_D0);
+
+ /* Disable MSI/MSI-X */
+ bnx2x_disable_msi(bp);
+
+ /* Power off */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+
+ /* Make sure RESET task is not scheduled before continuing */
+ cancel_delayed_work_sync(&bp->sp_rtnl_task);
+
+ if (bp->regview)
+ iounmap(bp->regview);
+
+ if (bp->doorbells)
+ iounmap(bp->doorbells);
+
+ bnx2x_free_mem_bp(bp);
+
+ free_netdev(dev);
+
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
+{
+ int i;
+
+ bp->state = BNX2X_STATE_ERROR;
+
+ bp->rx_mode = BNX2X_RX_MODE_NONE;
+
+#ifdef BCM_CNIC
+ bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+#endif
+ /* Stop Tx */
+ bnx2x_tx_disable(bp);
+
+ bnx2x_netif_stop(bp, 0);
+
+ del_timer_sync(&bp->timer);
+
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+
+ /* Free SKBs, SGEs, TPA pool and driver internals */
+ bnx2x_free_skbs(bp);
+
+ for_each_rx_queue(bp, i)
+ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+
+ bnx2x_free_mem(bp);
+
+ bp->state = BNX2X_STATE_CLOSED;
+
+ netif_carrier_off(bp->dev);
+
+ return 0;
+}
+
+static void bnx2x_eeh_recover(struct bnx2x *bp)
+{
+ u32 val;
+
+ mutex_init(&bp->port.phy_mutex);
+
+ bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+ bp->link_params.shmem_base = bp->common.shmem_base;
+ BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
+
+ if (!bp->common.shmem_base ||
+ (bp->common.shmem_base < 0xA0000) ||
+ (bp->common.shmem_base >= 0xC0000)) {
+ BNX2X_DEV_INFO("MCP not active\n");
+ bp->flags |= NO_MCP_FLAG;
+ return;
+ }
+
+ val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
+ if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+ != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+ BNX2X_ERR("BAD MCP validity signature\n");
+
+ if (!BP_NOMCP(bp)) {
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+ }
+}
+
+/**
+ * bnx2x_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp = netdev_priv(dev);
+
+ rtnl_lock();
+
+ netif_device_detach(dev);
+
+ if (state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (netif_running(dev))
+ bnx2x_eeh_nic_unload(bp);
+
+ pci_disable_device(pdev);
+
+ rtnl_unlock();
+
+ /* Request a slot reset */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * bnx2x_io_slot_reset - called after the PCI bus has been reset
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ */
+static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp = netdev_priv(dev);
+
+ rtnl_lock();
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset\n");
+ rtnl_unlock();
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ if (netif_running(dev))
+ bnx2x_set_power_state(bp, PCI_D0);
+
+ rtnl_unlock();
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * bnx2x_io_resume - called when traffic can start flowing again
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation.
+ */
+static void bnx2x_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ netdev_err(bp->dev, "Handling parity error recovery. "
+ "Try again later\n");
+ return;
+ }
+
+ rtnl_lock();
+
+ bnx2x_eeh_recover(bp);
+
+ if (netif_running(dev))
+ bnx2x_nic_load(bp, LOAD_NORMAL);
+
+ netif_device_attach(dev);
+
+ rtnl_unlock();
+}
+
+static struct pci_error_handlers bnx2x_err_handler = {
+ .error_detected = bnx2x_io_error_detected,
+ .slot_reset = bnx2x_io_slot_reset,
+ .resume = bnx2x_io_resume,
+};
+
+static struct pci_driver bnx2x_pci_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = bnx2x_pci_tbl,
+ .probe = bnx2x_init_one,
+ .remove = __devexit_p(bnx2x_remove_one),
+ .suspend = bnx2x_suspend,
+ .resume = bnx2x_resume,
+ .err_handler = &bnx2x_err_handler,
+};
+
+static int __init bnx2x_init(void)
+{
+ int ret;
+
+ pr_info("%s", version);
+
+ bnx2x_wq = create_singlethread_workqueue("bnx2x");
+ if (bnx2x_wq == NULL) {
+ pr_err("Cannot create workqueue\n");
+ return -ENOMEM;
+ }
+
+ ret = pci_register_driver(&bnx2x_pci_driver);
+ if (ret) {
+ pr_err("Cannot register driver\n");
+ destroy_workqueue(bnx2x_wq);
+ }
+ return ret;
+}
+
+static void __exit bnx2x_cleanup(void)
+{
+ pci_unregister_driver(&bnx2x_pci_driver);
+
+ destroy_workqueue(bnx2x_wq);
+}
+
+void bnx2x_notify_link_changed(struct bnx2x *bp)
+{
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
+}
+
+module_init(bnx2x_init);
+module_exit(bnx2x_cleanup);
+
+#ifdef BCM_CNIC
+/**
+ * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
+ *
+ * @bp: driver handle
+ * @set: set or clear the CAM entry
+ *
+ * This function will wait until the ramdord completion returns.
+ * Return 0 if success, -ENODEV if ramrod doesn't return.
+ */
+static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
+{
+ unsigned long ramrod_flags = 0;
+
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
+ &bp->iscsi_l2_mac_obj, true,
+ BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
+}
+
+/* count denotes the number of new completions we have seen */
+static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
+{
+ struct eth_spe *spe;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return;
+#endif
+
+ spin_lock_bh(&bp->spq_lock);
+ BUG_ON(bp->cnic_spq_pending < count);
+ bp->cnic_spq_pending -= count;
+
+
+ for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
+ u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
+ & SPE_HDR_CONN_TYPE) >>
+ SPE_HDR_CONN_TYPE_SHIFT;
+ u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
+ >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
+
+ /* Set validation for iSCSI L2 client before sending SETUP
+ * ramrod
+ */
+ if (type == ETH_CONNECTION_TYPE) {
+ if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
+ bnx2x_set_ctx_validation(bp, &bp->context.
+ vcxt[BNX2X_ISCSI_ETH_CID].eth,
+ BNX2X_ISCSI_ETH_CID);
+ }
+
+ /*
+ * There may be not more than 8 L2, not more than 8 L5 SPEs
+ * and in the air. We also check that number of outstanding
+ * COMMON ramrods is not more than the EQ and SPQ can
+ * accommodate.
+ */
+ if (type == ETH_CONNECTION_TYPE) {
+ if (!atomic_read(&bp->cq_spq_left))
+ break;
+ else
+ atomic_dec(&bp->cq_spq_left);
+ } else if (type == NONE_CONNECTION_TYPE) {
+ if (!atomic_read(&bp->eq_spq_left))
+ break;
+ else
+ atomic_dec(&bp->eq_spq_left);
+ } else if ((type == ISCSI_CONNECTION_TYPE) ||
+ (type == FCOE_CONNECTION_TYPE)) {
+ if (bp->cnic_spq_pending >=
+ bp->cnic_eth_dev.max_kwqe_pending)
+ break;
+ else
+ bp->cnic_spq_pending++;
+ } else {
+ BNX2X_ERR("Unknown SPE type: %d\n", type);
+ bnx2x_panic();
+ break;
+ }
+
+ spe = bnx2x_sp_get_next(bp);
+ *spe = *bp->cnic_kwq_cons;
+
+ DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
+ bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
+
+ if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
+ bp->cnic_kwq_cons = bp->cnic_kwq;
+ else
+ bp->cnic_kwq_cons++;
+ }
+ bnx2x_sp_prod_update(bp);
+ spin_unlock_bh(&bp->spq_lock);
+}
+
+static int bnx2x_cnic_sp_queue(struct net_device *dev,
+ struct kwqe_16 *kwqes[], u32 count)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int i;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return -EIO;
+#endif
+
+ spin_lock_bh(&bp->spq_lock);
+
+ for (i = 0; i < count; i++) {
+ struct eth_spe *spe = (struct eth_spe *)kwqes[i];
+
+ if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
+ break;
+
+ *bp->cnic_kwq_prod = *spe;
+
+ bp->cnic_kwq_pending++;
+
+ DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
+ spe->hdr.conn_and_cmd_data, spe->hdr.type,
+ spe->data.update_data_addr.hi,
+ spe->data.update_data_addr.lo,
+ bp->cnic_kwq_pending);
+
+ if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
+ bp->cnic_kwq_prod = bp->cnic_kwq;
+ else
+ bp->cnic_kwq_prod++;
+ }
+
+ spin_unlock_bh(&bp->spq_lock);
+
+ if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
+ bnx2x_cnic_sp_post(bp, 0);
+
+ return i;
+}
+
+static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
+{
+ struct cnic_ops *c_ops;
+ int rc = 0;
+
+ mutex_lock(&bp->cnic_mutex);
+ c_ops = rcu_dereference_protected(bp->cnic_ops,
+ lockdep_is_held(&bp->cnic_mutex));
+ if (c_ops)
+ rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
+ mutex_unlock(&bp->cnic_mutex);
+
+ return rc;
+}
+
+static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
+{
+ struct cnic_ops *c_ops;
+ int rc = 0;
+
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops)
+ rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
+ rcu_read_unlock();
+
+ return rc;
+}
+
+/*
+ * for commands that have no data
+ */
+int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
+{
+ struct cnic_ctl_info ctl = {0};
+
+ ctl.cmd = cmd;
+
+ return bnx2x_cnic_ctl_send(bp, &ctl);
+}
+
+static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
+{
+ struct cnic_ctl_info ctl = {0};
+
+ /* first we tell CNIC and only then we count this as a completion */
+ ctl.cmd = CNIC_CTL_COMPLETION_CMD;
+ ctl.data.comp.cid = cid;
+ ctl.data.comp.error = err;
+
+ bnx2x_cnic_ctl_send_bh(bp, &ctl);
+ bnx2x_cnic_sp_post(bp, 0);
+}
+
+
+/* Called with netif_addr_lock_bh() taken.
+ * Sets an rx_mode config for an iSCSI ETH client.
+ * Doesn't block.
+ * Completion should be checked outside.
+ */
+static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
+{
+ unsigned long accept_flags = 0, ramrod_flags = 0;
+ u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
+ int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
+
+ if (start) {
+ /* Start accepting on iSCSI L2 ring. Accept all multicasts
+ * because it's the only way for UIO Queue to accept
+ * multicasts (in non-promiscuous mode only one Queue per
+ * function will receive multicast packets (leading in our
+ * case).
+ */
+ __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+ /* Clear STOP_PENDING bit if START is requested */
+ clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
+
+ sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
+ } else
+ /* Clear START_PENDING bit if STOP is requested */
+ clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
+
+ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
+ set_bit(sched_state, &bp->sp_state);
+ else {
+ __set_bit(RAMROD_RX, &ramrod_flags);
+ bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
+ ramrod_flags);
+ }
+}
+
+
+static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc = 0;
+
+ switch (ctl->cmd) {
+ case DRV_CTL_CTXTBL_WR_CMD: {
+ u32 index = ctl->data.io.offset;
+ dma_addr_t addr = ctl->data.io.dma_addr;
+
+ bnx2x_ilt_wr(bp, index, addr);
+ break;
+ }
+
+ case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
+ int count = ctl->data.credit.credit_count;
+
+ bnx2x_cnic_sp_post(bp, count);
+ break;
+ }
+
+ /* rtnl_lock is held. */
+ case DRV_CTL_START_L2_CMD: {
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+ unsigned long sp_bits = 0;
+
+ /* Configure the iSCSI classification object */
+ bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
+ cp->iscsi_l2_client_id,
+ cp->iscsi_l2_cid, BP_FUNC(bp),
+ bnx2x_sp(bp, mac_rdata),
+ bnx2x_sp_mapping(bp, mac_rdata),
+ BNX2X_FILTER_MAC_PENDING,
+ &bp->sp_state, BNX2X_OBJ_TYPE_RX,
+ &bp->macs_pool);
+
+ /* Set iSCSI MAC address */
+ rc = bnx2x_set_iscsi_eth_mac_addr(bp);
+ if (rc)
+ break;
+
+ mmiowb();
+ barrier();
+
+ /* Start accepting on iSCSI L2 ring */
+
+ netif_addr_lock_bh(dev);
+ bnx2x_set_iscsi_eth_rx_mode(bp, true);
+ netif_addr_unlock_bh(dev);
+
+ /* bits to wait on */
+ __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
+ __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
+
+ if (!bnx2x_wait_sp_comp(bp, sp_bits))
+ BNX2X_ERR("rx_mode completion timed out!\n");
+
+ break;
+ }
+
+ /* rtnl_lock is held. */
+ case DRV_CTL_STOP_L2_CMD: {
+ unsigned long sp_bits = 0;
+
+ /* Stop accepting on iSCSI L2 ring */
+ netif_addr_lock_bh(dev);
+ bnx2x_set_iscsi_eth_rx_mode(bp, false);
+ netif_addr_unlock_bh(dev);
+
+ /* bits to wait on */
+ __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
+ __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
+
+ if (!bnx2x_wait_sp_comp(bp, sp_bits))
+ BNX2X_ERR("rx_mode completion timed out!\n");
+
+ mmiowb();
+ barrier();
+
+ /* Unset iSCSI L2 MAC */
+ rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
+ BNX2X_ISCSI_ETH_MAC, true);
+ break;
+ }
+ case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
+ int count = ctl->data.credit.credit_count;
+
+ smp_mb__before_atomic_inc();
+ atomic_add(count, &bp->cq_spq_left);
+ smp_mb__after_atomic_inc();
+ break;
+ }
+
+ default:
+ BNX2X_ERR("unknown command %x\n", ctl->cmd);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
+{
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ if (bp->flags & USING_MSIX_FLAG) {
+ cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
+ cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
+ cp->irq_arr[0].vector = bp->msix_table[1].vector;
+ } else {
+ cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
+ cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
+ }
+ if (!CHIP_IS_E1x(bp))
+ cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
+ else
+ cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
+
+ cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
+ cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
+ cp->irq_arr[1].status_blk = bp->def_status_blk;
+ cp->irq_arr[1].status_blk_num = DEF_SB_ID;
+ cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
+
+ cp->num_irq = 2;
+}
+
+static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
+ void *data)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ if (ops == NULL)
+ return -EINVAL;
+
+ bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!bp->cnic_kwq)
+ return -ENOMEM;
+
+ bp->cnic_kwq_cons = bp->cnic_kwq;
+ bp->cnic_kwq_prod = bp->cnic_kwq;
+ bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
+
+ bp->cnic_spq_pending = 0;
+ bp->cnic_kwq_pending = 0;
+
+ bp->cnic_data = data;
+
+ cp->num_irq = 0;
+ cp->drv_state |= CNIC_DRV_STATE_REGD;
+ cp->iro_arr = bp->iro_arr;
+
+ bnx2x_setup_cnic_irq_info(bp);
+
+ rcu_assign_pointer(bp->cnic_ops, ops);
+
+ return 0;
+}
+
+static int bnx2x_unregister_cnic(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ mutex_lock(&bp->cnic_mutex);
+ cp->drv_state = 0;
+ rcu_assign_pointer(bp->cnic_ops, NULL);
+ mutex_unlock(&bp->cnic_mutex);
+ synchronize_rcu();
+ kfree(bp->cnic_kwq);
+ bp->cnic_kwq = NULL;
+
+ return 0;
+}
+
+struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ /* If both iSCSI and FCoE are disabled - return NULL in
+ * order to indicate CNIC that it should not try to work
+ * with this device.
+ */
+ if (NO_ISCSI(bp) && NO_FCOE(bp))
+ return NULL;
+
+ cp->drv_owner = THIS_MODULE;
+ cp->chip_id = CHIP_ID(bp);
+ cp->pdev = bp->pdev;
+ cp->io_base = bp->regview;
+ cp->io_base2 = bp->doorbells;
+ cp->max_kwqe_pending = 8;
+ cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
+ cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
+ bnx2x_cid_ilt_lines(bp);
+ cp->ctx_tbl_len = CNIC_ILT_LINES;
+ cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
+ cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
+ cp->drv_ctl = bnx2x_drv_ctl;
+ cp->drv_register_cnic = bnx2x_register_cnic;
+ cp->drv_unregister_cnic = bnx2x_unregister_cnic;
+ cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
+ cp->iscsi_l2_client_id =
+ bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
+ cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
+
+ if (NO_ISCSI_OOO(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+
+ if (NO_ISCSI(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
+
+ if (NO_FCOE(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
+
+ DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
+ "starting cid %d\n",
+ cp->ctx_blk_size,
+ cp->ctx_tbl_offset,
+ cp->ctx_tbl_len,
+ cp->starting_cid);
+ return cp;
+}
+EXPORT_SYMBOL(bnx2x_cnic_probe);
+
+#endif /* BCM_CNIC */
+
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
new file mode 100644
index 000000000000..fc7bd0f23c0b
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -0,0 +1,7177 @@
+/* bnx2x_reg.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * The registers description starts with the register Access type followed
+ * by size in bits. For example [RW 32]. The access types are:
+ * R - Read only
+ * RC - Clear on read
+ * RW - Read/Write
+ * ST - Statistics register (clear on read)
+ * W - Write only
+ * WB - Wide bus register - the size is over 32 bits and it should be
+ * read/write in consecutive 32 bits accesses
+ * WR - Write Clear (write 1 to clear the bit)
+ *
+ */
+#ifndef BNX2X_REG_H
+#define BNX2X_REG_H
+
+#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
+#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2)
+#define ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU (0x1<<5)
+#define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT (0x1<<3)
+#define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR (0x1<<4)
+#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND (0x1<<1)
+/* [RW 1] Initiate the ATC array - reset all the valid bits */
+#define ATC_REG_ATC_INIT_ARRAY 0x1100b8
+/* [R 1] ATC initalization done */
+#define ATC_REG_ATC_INIT_DONE 0x1100bc
+/* [RC 6] Interrupt register #0 read clear */
+#define ATC_REG_ATC_INT_STS_CLR 0x1101c0
+/* [RW 5] Parity mask register #0 read/write */
+#define ATC_REG_ATC_PRTY_MASK 0x1101d8
+/* [RC 5] Parity register #0 read clear */
+#define ATC_REG_ATC_PRTY_STS_CLR 0x1101d0
+/* [RW 19] Interrupt mask register #0 read/write */
+#define BRB1_REG_BRB1_INT_MASK 0x60128
+/* [R 19] Interrupt register #0 read */
+#define BRB1_REG_BRB1_INT_STS 0x6011c
+/* [RW 4] Parity mask register #0 read/write */
+#define BRB1_REG_BRB1_PRTY_MASK 0x60138
+/* [R 4] Parity register #0 read */
+#define BRB1_REG_BRB1_PRTY_STS 0x6012c
+/* [RC 4] Parity register #0 read clear */
+#define BRB1_REG_BRB1_PRTY_STS_CLR 0x60130
+/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
+ * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
+ * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
+ * following reset the first rbc access to this reg must be write; there can
+ * be no more rbc writes after the first one; there can be any number of rbc
+ * read following the first write; rbc access not following these rules will
+ * result in hang condition. */
+#define BRB1_REG_FREE_LIST_PRS_CRDT 0x60200
+/* [RW 10] The number of free blocks below which the full signal to class 0
+ * is asserted */
+#define BRB1_REG_FULL_0_XOFF_THRESHOLD_0 0x601d0
+#define BRB1_REG_FULL_0_XOFF_THRESHOLD_1 0x60230
+/* [RW 11] The number of free blocks above which the full signal to class 0
+ * is de-asserted */
+#define BRB1_REG_FULL_0_XON_THRESHOLD_0 0x601d4
+#define BRB1_REG_FULL_0_XON_THRESHOLD_1 0x60234
+/* [RW 11] The number of free blocks below which the full signal to class 1
+ * is asserted */
+#define BRB1_REG_FULL_1_XOFF_THRESHOLD_0 0x601d8
+#define BRB1_REG_FULL_1_XOFF_THRESHOLD_1 0x60238
+/* [RW 11] The number of free blocks above which the full signal to class 1
+ * is de-asserted */
+#define BRB1_REG_FULL_1_XON_THRESHOLD_0 0x601dc
+#define BRB1_REG_FULL_1_XON_THRESHOLD_1 0x6023c
+/* [RW 11] The number of free blocks below which the full signal to the LB
+ * port is asserted */
+#define BRB1_REG_FULL_LB_XOFF_THRESHOLD 0x601e0
+/* [RW 10] The number of free blocks above which the full signal to the LB
+ * port is de-asserted */
+#define BRB1_REG_FULL_LB_XON_THRESHOLD 0x601e4
+/* [RW 10] The number of free blocks above which the High_llfc signal to
+ interface #n is de-asserted. */
+#define BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD_0 0x6014c
+/* [RW 10] The number of free blocks below which the High_llfc signal to
+ interface #n is asserted. */
+#define BRB1_REG_HIGH_LLFC_LOW_THRESHOLD_0 0x6013c
+/* [RW 11] The number of blocks guarantied for the LB port */
+#define BRB1_REG_LB_GUARANTIED 0x601ec
+/* [RW 11] The hysteresis on the guarantied buffer space for the Lb port
+ * before signaling XON. */
+#define BRB1_REG_LB_GUARANTIED_HYST 0x60264
+/* [RW 24] LL RAM data. */
+#define BRB1_REG_LL_RAM 0x61000
+/* [RW 10] The number of free blocks above which the Low_llfc signal to
+ interface #n is de-asserted. */
+#define BRB1_REG_LOW_LLFC_HIGH_THRESHOLD_0 0x6016c
+/* [RW 10] The number of free blocks below which the Low_llfc signal to
+ interface #n is asserted. */
+#define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c
+/* [RW 11] The number of blocks guarantied for class 0 in MAC 0. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED 0x60244
+/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST 0x60254
+/* [RW 11] The number of blocks guarantied for class 1 in MAC 0. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED 0x60248
+/* [RW 11] The hysteresis on the guarantied buffer space for class 1in MAC 0
+ * before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST 0x60258
+/* [RW 11] The number of blocks guarantied for class 0in MAC1.The register
+ * is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED 0x6024c
+/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST 0x6025c
+/* [RW 11] The number of blocks guarantied for class 1 in MAC 1. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED 0x60250
+/* [RW 11] The hysteresis on the guarantied buffer space for class 1 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST 0x60260
+/* [RW 11] The number of blocks guarantied for the MAC port. The register is
+ * applicable only when per_class_guaranty_mode is reset. */
+#define BRB1_REG_MAC_GUARANTIED_0 0x601e8
+#define BRB1_REG_MAC_GUARANTIED_1 0x60240
+/* [R 24] The number of full blocks. */
+#define BRB1_REG_NUM_OF_FULL_BLOCKS 0x60090
+/* [ST 32] The number of cycles that the write_full signal towards MAC #0
+ was asserted. */
+#define BRB1_REG_NUM_OF_FULL_CYCLES_0 0x600c8
+#define BRB1_REG_NUM_OF_FULL_CYCLES_1 0x600cc
+#define BRB1_REG_NUM_OF_FULL_CYCLES_4 0x600d8
+/* [ST 32] The number of cycles that the pause signal towards MAC #0 was
+ asserted. */
+#define BRB1_REG_NUM_OF_PAUSE_CYCLES_0 0x600b8
+#define BRB1_REG_NUM_OF_PAUSE_CYCLES_1 0x600bc
+/* [RW 10] The number of free blocks below which the pause signal to class 0
+ * is asserted */
+#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 0x601c0
+#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 0x60220
+/* [RW 11] The number of free blocks above which the pause signal to class 0
+ * is de-asserted */
+#define BRB1_REG_PAUSE_0_XON_THRESHOLD_0 0x601c4
+#define BRB1_REG_PAUSE_0_XON_THRESHOLD_1 0x60224
+/* [RW 11] The number of free blocks below which the pause signal to class 1
+ * is asserted */
+#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0 0x601c8
+#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 0x60228
+/* [RW 11] The number of free blocks above which the pause signal to class 1
+ * is de-asserted */
+#define BRB1_REG_PAUSE_1_XON_THRESHOLD_0 0x601cc
+#define BRB1_REG_PAUSE_1_XON_THRESHOLD_1 0x6022c
+/* [RW 10] Write client 0: De-assert pause threshold. Not Functional */
+#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078
+#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c
+/* [RW 10] Write client 0: Assert pause threshold. */
+#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068
+#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c
+/* [R 24] The number of full blocks occupied by port. */
+#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094
+/* [RW 1] Reset the design by software. */
+#define BRB1_REG_SOFT_RESET 0x600dc
+/* [R 5] Used to read the value of the XX protection CAM occupancy counter. */
+#define CCM_REG_CAM_OCCUP 0xd0188
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CCM_CFC_IFEN 0xd003c
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CCM_CQM_IFEN 0xd000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID.
+ Otherwise 0 is inserted. */
+#define CCM_REG_CCM_CQM_USE_Q 0xd00c0
+/* [RW 11] Interrupt mask register #0 read/write */
+#define CCM_REG_CCM_INT_MASK 0xd01e4
+/* [R 11] Interrupt register #0 read */
+#define CCM_REG_CCM_INT_STS 0xd01d8
+/* [RW 27] Parity mask register #0 read/write */
+#define CCM_REG_CCM_PRTY_MASK 0xd01f4
+/* [R 27] Parity register #0 read */
+#define CCM_REG_CCM_PRTY_STS 0xd01e8
+/* [RC 27] Parity register #0 read clear */
+#define CCM_REG_CCM_PRTY_STS_CLR 0xd01ec
+/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
+ REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+ Is used to determine the number of the AG context REG-pairs written back;
+ when the input message Reg1WbFlg isn't set. */
+#define CCM_REG_CCM_REG0_SZ 0xd00c4
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CCM_STORM0_IFEN 0xd0004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CCM_STORM1_IFEN 0xd0008
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define CCM_REG_CDU_AG_RD_IFEN 0xd0030
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+ are disregarded; all other signals are treated as usual; if 1 - normal
+ activity. */
+#define CCM_REG_CDU_AG_WR_IFEN 0xd002c
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define CCM_REG_CDU_SM_RD_IFEN 0xd0038
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+ input is disregarded; all other signals are treated as usual; if 1 -
+ normal activity. */
+#define CCM_REG_CDU_SM_WR_IFEN 0xd0034
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 1 at start-up. */
+#define CCM_REG_CFC_INIT_CRD 0xd0204
+/* [RW 2] Auxiliary counter flag Q number 1. */
+#define CCM_REG_CNT_AUX1_Q 0xd00c8
+/* [RW 2] Auxiliary counter flag Q number 2. */
+#define CCM_REG_CNT_AUX2_Q 0xd00cc
+/* [RW 28] The CM header value for QM request (primary). */
+#define CCM_REG_CQM_CCM_HDR_P 0xd008c
+/* [RW 28] The CM header value for QM request (secondary). */
+#define CCM_REG_CQM_CCM_HDR_S 0xd0090
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CQM_CCM_IFEN 0xd0014
+/* [RW 6] QM output initial credit. Max credit available - 32. Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 32 at start-up. */
+#define CCM_REG_CQM_INIT_CRD 0xd020c
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_CQM_P_WEIGHT 0xd00b8
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_CQM_S_WEIGHT 0xd00bc
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CSDM_IFEN 0xd0018
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the SDM interface is detected. */
+#define CCM_REG_CSDM_LENGTH_MIS 0xd0170
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_CSDM_WEIGHT 0xd00b4
+/* [RW 28] The CM header for QM formatting in case of an error in the QM
+ inputs. */
+#define CCM_REG_ERR_CCM_HDR 0xd0094
+/* [RW 8] The Event ID in case the input message ErrorFlg is set. */
+#define CCM_REG_ERR_EVNT_ID 0xd0098
+/* [RW 8] FIC0 output initial credit. Max credit available - 255. Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define CCM_REG_FIC0_INIT_CRD 0xd0210
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define CCM_REG_FIC1_INIT_CRD 0xd0214
+/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
+ - strict priority defined by ~ccm_registers_gr_ag_pr.gr_ag_pr;
+ ~ccm_registers_gr_ld0_pr.gr_ld0_pr and
+ ~ccm_registers_gr_ld1_pr.gr_ld1_pr. Groups are according to channels and
+ outputs to STORM: aggregation; load FIC0; load FIC1 and store. */
+#define CCM_REG_GR_ARB_TYPE 0xd015c
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed; that the Store channel priority is
+ the compliment to 4 of the rest priorities - Aggregation channel; Load
+ (FIC0) channel and Load (FIC1). */
+#define CCM_REG_GR_LD0_PR 0xd0164
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed; that the Store channel priority is
+ the compliment to 4 of the rest priorities - Aggregation channel; Load
+ (FIC0) channel and Load (FIC1). */
+#define CCM_REG_GR_LD1_PR 0xd0168
+/* [RW 2] General flags index. */
+#define CCM_REG_INV_DONE_Q 0xd0108
+/* [RW 4] The number of double REG-pairs(128 bits); loaded from the STORM
+ context and sent to STORM; for a specific connection type. The double
+ REG-pairs are used in order to align to STORM context row size of 128
+ bits. The offset of these data in the STORM context is always 0. Index
+ _(0..15) stands for the connection type (one of 16). */
+#define CCM_REG_N_SM_CTX_LD_0 0xd004c
+#define CCM_REG_N_SM_CTX_LD_1 0xd0050
+#define CCM_REG_N_SM_CTX_LD_2 0xd0054
+#define CCM_REG_N_SM_CTX_LD_3 0xd0058
+#define CCM_REG_N_SM_CTX_LD_4 0xd005c
+/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_PBF_IFEN 0xd0028
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the pbf interface is detected. */
+#define CCM_REG_PBF_LENGTH_MIS 0xd0180
+/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_PBF_WEIGHT 0xd00ac
+#define CCM_REG_PHYS_QNUM1_0 0xd0134
+#define CCM_REG_PHYS_QNUM1_1 0xd0138
+#define CCM_REG_PHYS_QNUM2_0 0xd013c
+#define CCM_REG_PHYS_QNUM2_1 0xd0140
+#define CCM_REG_PHYS_QNUM3_0 0xd0144
+#define CCM_REG_PHYS_QNUM3_1 0xd0148
+#define CCM_REG_QOS_PHYS_QNUM0_0 0xd0114
+#define CCM_REG_QOS_PHYS_QNUM0_1 0xd0118
+#define CCM_REG_QOS_PHYS_QNUM1_0 0xd011c
+#define CCM_REG_QOS_PHYS_QNUM1_1 0xd0120
+#define CCM_REG_QOS_PHYS_QNUM2_0 0xd0124
+#define CCM_REG_QOS_PHYS_QNUM2_1 0xd0128
+#define CCM_REG_QOS_PHYS_QNUM3_0 0xd012c
+#define CCM_REG_QOS_PHYS_QNUM3_1 0xd0130
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define CCM_REG_STORM_CCM_IFEN 0xd0010
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the STORM interface is detected. */
+#define CCM_REG_STORM_LENGTH_MIS 0xd016c
+/* [RW 3] The weight of the STORM input in the WRR (Weighted Round robin)
+ mechanism. 0 stands for weight 8 (the most prioritised); 1 stands for
+ weight 1(least prioritised); 2 stands for weight 2 (more prioritised);
+ tc. */
+#define CCM_REG_STORM_WEIGHT 0xd009c
+/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define CCM_REG_TSEM_IFEN 0xd001c
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the tsem interface is detected. */
+#define CCM_REG_TSEM_LENGTH_MIS 0xd0174
+/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_TSEM_WEIGHT 0xd00a0
+/* [RW 1] Input usem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define CCM_REG_USEM_IFEN 0xd0024
+/* [RC 1] Set when message length mismatch (relative to last indication) at
+ the usem interface is detected. */
+#define CCM_REG_USEM_LENGTH_MIS 0xd017c
+/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_USEM_WEIGHT 0xd00a8
+/* [RW 1] Input xsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define CCM_REG_XSEM_IFEN 0xd0020
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the xsem interface is detected. */
+#define CCM_REG_XSEM_LENGTH_MIS 0xd0178
+/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_XSEM_WEIGHT 0xd00a4
+/* [RW 19] Indirect access to the descriptor table of the XX protection
+ mechanism. The fields are: [5:0] - message length; [12:6] - message
+ pointer; 18:13] - next pointer. */
+#define CCM_REG_XX_DESCR_TABLE 0xd0300
+#define CCM_REG_XX_DESCR_TABLE_SIZE 24
+/* [R 7] Used to read the value of XX protection Free counter. */
+#define CCM_REG_XX_FREE 0xd0184
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+ of the Input Stage XX protection buffer by the XX protection pending
+ messages. Max credit available - 127. Write writes the initial credit
+ value; read returns the current value of the credit counter. Must be
+ initialized to maximum XX protected message size - 2 at start-up. */
+#define CCM_REG_XX_INIT_CRD 0xd0220
+/* [RW 7] The maximum number of pending messages; which may be stored in XX
+ protection. At read the ~ccm_registers_xx_free.xx_free counter is read.
+ At write comprises the start value of the ~ccm_registers_xx_free.xx_free
+ counter. */
+#define CCM_REG_XX_MSG_NUM 0xd0224
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define CCM_REG_XX_OVFL_EVNT_ID 0xd0044
+/* [RW 18] Indirect access to the XX table of the XX protection mechanism.
+ The fields are: [5:0] - tail pointer; 11:6] - Link List size; 17:12] -
+ header pointer. */
+#define CCM_REG_XX_TABLE 0xd0280
+#define CDU_REG_CDU_CHK_MASK0 0x101000
+#define CDU_REG_CDU_CHK_MASK1 0x101004
+#define CDU_REG_CDU_CONTROL0 0x101008
+#define CDU_REG_CDU_DEBUG 0x101010
+#define CDU_REG_CDU_GLOBAL_PARAMS 0x101020
+/* [RW 7] Interrupt mask register #0 read/write */
+#define CDU_REG_CDU_INT_MASK 0x10103c
+/* [R 7] Interrupt register #0 read */
+#define CDU_REG_CDU_INT_STS 0x101030
+/* [RW 5] Parity mask register #0 read/write */
+#define CDU_REG_CDU_PRTY_MASK 0x10104c
+/* [R 5] Parity register #0 read */
+#define CDU_REG_CDU_PRTY_STS 0x101040
+/* [RC 5] Parity register #0 read clear */
+#define CDU_REG_CDU_PRTY_STS_CLR 0x101044
+/* [RC 32] logging of error data in case of a CDU load error:
+ {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
+ ype_error; ctual_active; ctual_compressed_context}; */
+#define CDU_REG_ERROR_DATA 0x101014
+/* [WB 216] L1TT ram access. each entry has the following format :
+ {mrege_regions[7:0]; ffset12[5:0]...offset0[5:0];
+ ength12[5:0]...length0[5:0]; d12[3:0]...id0[3:0]} */
+#define CDU_REG_L1TT 0x101800
+/* [WB 24] MATT ram access. each entry has the following
+ format:{RegionLength[11:0]; egionOffset[11:0]} */
+#define CDU_REG_MATT 0x101100
+/* [RW 1] when this bit is set the CDU operates in e1hmf mode */
+#define CDU_REG_MF_MODE 0x101050
+/* [R 1] indication the initializing the activity counter by the hardware
+ was done. */
+#define CFC_REG_AC_INIT_DONE 0x104078
+/* [RW 13] activity counter ram access */
+#define CFC_REG_ACTIVITY_COUNTER 0x104400
+#define CFC_REG_ACTIVITY_COUNTER_SIZE 256
+/* [R 1] indication the initializing the cams by the hardware was done. */
+#define CFC_REG_CAM_INIT_DONE 0x10407c
+/* [RW 2] Interrupt mask register #0 read/write */
+#define CFC_REG_CFC_INT_MASK 0x104108
+/* [R 2] Interrupt register #0 read */
+#define CFC_REG_CFC_INT_STS 0x1040fc
+/* [RC 2] Interrupt register #0 read clear */
+#define CFC_REG_CFC_INT_STS_CLR 0x104100
+/* [RW 4] Parity mask register #0 read/write */
+#define CFC_REG_CFC_PRTY_MASK 0x104118
+/* [R 4] Parity register #0 read */
+#define CFC_REG_CFC_PRTY_STS 0x10410c
+/* [RC 4] Parity register #0 read clear */
+#define CFC_REG_CFC_PRTY_STS_CLR 0x104110
+/* [RW 21] CID cam access (21:1 - Data; alid - 0) */
+#define CFC_REG_CID_CAM 0x104800
+#define CFC_REG_CONTROL0 0x104028
+#define CFC_REG_DEBUG0 0x104050
+/* [RW 14] indicates per error (in #cfc_registers_cfc_error_vector.cfc_error
+ vector) whether the cfc should be disabled upon it */
+#define CFC_REG_DISABLE_ON_ERROR 0x104044
+/* [RC 14] CFC error vector. when the CFC detects an internal error it will
+ set one of these bits. the bit description can be found in CFC
+ specifications */
+#define CFC_REG_ERROR_VECTOR 0x10403c
+/* [WB 93] LCID info ram access */
+#define CFC_REG_INFO_RAM 0x105000
+#define CFC_REG_INFO_RAM_SIZE 1024
+#define CFC_REG_INIT_REG 0x10404c
+#define CFC_REG_INTERFACES 0x104058
+/* [RW 24] {weight_load_client7[2:0] to weight_load_client0[2:0]}. this
+ field allows changing the priorities of the weighted-round-robin arbiter
+ which selects which CFC load client should be served next */
+#define CFC_REG_LCREQ_WEIGHTS 0x104084
+/* [RW 16] Link List ram access; data = {prev_lcid; ext_lcid} */
+#define CFC_REG_LINK_LIST 0x104c00
+#define CFC_REG_LINK_LIST_SIZE 256
+/* [R 1] indication the initializing the link list by the hardware was done. */
+#define CFC_REG_LL_INIT_DONE 0x104074
+/* [R 9] Number of allocated LCIDs which are at empty state */
+#define CFC_REG_NUM_LCIDS_ALLOC 0x104020
+/* [R 9] Number of Arriving LCIDs in Link List Block */
+#define CFC_REG_NUM_LCIDS_ARRIVING 0x104004
+#define CFC_REG_NUM_LCIDS_INSIDE_PF 0x104120
+/* [R 9] Number of Leaving LCIDs in Link List Block */
+#define CFC_REG_NUM_LCIDS_LEAVING 0x104018
+#define CFC_REG_WEAK_ENABLE_PF 0x104124
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define CSDM_REG_AGG_INT_EVENT_0 0xc2038
+#define CSDM_REG_AGG_INT_EVENT_10 0xc2060
+#define CSDM_REG_AGG_INT_EVENT_11 0xc2064
+#define CSDM_REG_AGG_INT_EVENT_12 0xc2068
+#define CSDM_REG_AGG_INT_EVENT_13 0xc206c
+#define CSDM_REG_AGG_INT_EVENT_14 0xc2070
+#define CSDM_REG_AGG_INT_EVENT_15 0xc2074
+#define CSDM_REG_AGG_INT_EVENT_16 0xc2078
+#define CSDM_REG_AGG_INT_EVENT_2 0xc2040
+#define CSDM_REG_AGG_INT_EVENT_3 0xc2044
+#define CSDM_REG_AGG_INT_EVENT_4 0xc2048
+#define CSDM_REG_AGG_INT_EVENT_5 0xc204c
+#define CSDM_REG_AGG_INT_EVENT_6 0xc2050
+#define CSDM_REG_AGG_INT_EVENT_7 0xc2054
+#define CSDM_REG_AGG_INT_EVENT_8 0xc2058
+#define CSDM_REG_AGG_INT_EVENT_9 0xc205c
+/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
+ or auto-mask-mode (1) */
+#define CSDM_REG_AGG_INT_MODE_10 0xc21e0
+#define CSDM_REG_AGG_INT_MODE_11 0xc21e4
+#define CSDM_REG_AGG_INT_MODE_12 0xc21e8
+#define CSDM_REG_AGG_INT_MODE_13 0xc21ec
+#define CSDM_REG_AGG_INT_MODE_14 0xc21f0
+#define CSDM_REG_AGG_INT_MODE_15 0xc21f4
+#define CSDM_REG_AGG_INT_MODE_16 0xc21f8
+#define CSDM_REG_AGG_INT_MODE_6 0xc21d0
+#define CSDM_REG_AGG_INT_MODE_7 0xc21d4
+#define CSDM_REG_AGG_INT_MODE_8 0xc21d8
+#define CSDM_REG_AGG_INT_MODE_9 0xc21dc
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define CSDM_REG_CFC_RSP_START_ADDR 0xc2008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define CSDM_REG_CMP_COUNTER_MAX0 0xc201c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define CSDM_REG_CMP_COUNTER_MAX1 0xc2020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define CSDM_REG_CMP_COUNTER_MAX2 0xc2024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define CSDM_REG_CMP_COUNTER_MAX3 0xc2028
+/* [RW 13] The start address in the internal RAM for the completion
+ counters. */
+#define CSDM_REG_CMP_COUNTER_START_ADDR 0xc200c
+/* [RW 32] Interrupt mask register #0 read/write */
+#define CSDM_REG_CSDM_INT_MASK_0 0xc229c
+#define CSDM_REG_CSDM_INT_MASK_1 0xc22ac
+/* [R 32] Interrupt register #0 read */
+#define CSDM_REG_CSDM_INT_STS_0 0xc2290
+#define CSDM_REG_CSDM_INT_STS_1 0xc22a0
+/* [RW 11] Parity mask register #0 read/write */
+#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc
+/* [R 11] Parity register #0 read */
+#define CSDM_REG_CSDM_PRTY_STS 0xc22b0
+/* [RC 11] Parity register #0 read clear */
+#define CSDM_REG_CSDM_PRTY_STS_CLR 0xc22b4
+#define CSDM_REG_ENABLE_IN1 0xc2238
+#define CSDM_REG_ENABLE_IN2 0xc223c
+#define CSDM_REG_ENABLE_OUT1 0xc2240
+#define CSDM_REG_ENABLE_OUT2 0xc2244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+ interface without receiving any ACK. */
+#define CSDM_REG_INIT_CREDIT_PXP_CTRL 0xc24bc
+/* [ST 32] The number of ACK after placement messages received */
+#define CSDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc227c
+/* [ST 32] The number of packet end messages received from the parser */
+#define CSDM_REG_NUM_OF_PKT_END_MSG 0xc2274
+/* [ST 32] The number of requests received from the pxp async if */
+#define CSDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc2278
+/* [ST 32] The number of commands received in queue 0 */
+#define CSDM_REG_NUM_OF_Q0_CMD 0xc2248
+/* [ST 32] The number of commands received in queue 10 */
+#define CSDM_REG_NUM_OF_Q10_CMD 0xc226c
+/* [ST 32] The number of commands received in queue 11 */
+#define CSDM_REG_NUM_OF_Q11_CMD 0xc2270
+/* [ST 32] The number of commands received in queue 1 */
+#define CSDM_REG_NUM_OF_Q1_CMD 0xc224c
+/* [ST 32] The number of commands received in queue 3 */
+#define CSDM_REG_NUM_OF_Q3_CMD 0xc2250
+/* [ST 32] The number of commands received in queue 4 */
+#define CSDM_REG_NUM_OF_Q4_CMD 0xc2254
+/* [ST 32] The number of commands received in queue 5 */
+#define CSDM_REG_NUM_OF_Q5_CMD 0xc2258
+/* [ST 32] The number of commands received in queue 6 */
+#define CSDM_REG_NUM_OF_Q6_CMD 0xc225c
+/* [ST 32] The number of commands received in queue 7 */
+#define CSDM_REG_NUM_OF_Q7_CMD 0xc2260
+/* [ST 32] The number of commands received in queue 8 */
+#define CSDM_REG_NUM_OF_Q8_CMD 0xc2264
+/* [ST 32] The number of commands received in queue 9 */
+#define CSDM_REG_NUM_OF_Q9_CMD 0xc2268
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define CSDM_REG_Q_COUNTER_START_ADDR 0xc2010
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define CSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc2548
+/* [R 1] parser fifo empty in sdm_sync block */
+#define CSDM_REG_SYNC_PARSER_EMPTY 0xc2550
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define CSDM_REG_SYNC_SYNC_EMPTY 0xc2558
+/* [RW 32] Tick for timer counter. Applicable only when
+ ~csdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define CSDM_REG_TIMER_TICK 0xc2000
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define CSEM_REG_ARB_CYCLE_SIZE 0x200034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define CSEM_REG_ARB_ELEMENT0 0x200020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~csem_registers_arb_element0.arb_element0 */
+#define CSEM_REG_ARB_ELEMENT1 0x200024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~csem_registers_arb_element0.arb_element0
+ and ~csem_registers_arb_element1.arb_element1 */
+#define CSEM_REG_ARB_ELEMENT2 0x200028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+ not be equal to register ~csem_registers_arb_element0.arb_element0 and
+ ~csem_registers_arb_element1.arb_element1 and
+ ~csem_registers_arb_element2.arb_element2 */
+#define CSEM_REG_ARB_ELEMENT3 0x20002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~csem_registers_arb_element0.arb_element0
+ and ~csem_registers_arb_element1.arb_element1 and
+ ~csem_registers_arb_element2.arb_element2 and
+ ~csem_registers_arb_element3.arb_element3 */
+#define CSEM_REG_ARB_ELEMENT4 0x200030
+/* [RW 32] Interrupt mask register #0 read/write */
+#define CSEM_REG_CSEM_INT_MASK_0 0x200110
+#define CSEM_REG_CSEM_INT_MASK_1 0x200120
+/* [R 32] Interrupt register #0 read */
+#define CSEM_REG_CSEM_INT_STS_0 0x200104
+#define CSEM_REG_CSEM_INT_STS_1 0x200114
+/* [RW 32] Parity mask register #0 read/write */
+#define CSEM_REG_CSEM_PRTY_MASK_0 0x200130
+#define CSEM_REG_CSEM_PRTY_MASK_1 0x200140
+/* [R 32] Parity register #0 read */
+#define CSEM_REG_CSEM_PRTY_STS_0 0x200124
+#define CSEM_REG_CSEM_PRTY_STS_1 0x200134
+/* [RC 32] Parity register #0 read clear */
+#define CSEM_REG_CSEM_PRTY_STS_CLR_0 0x200128
+#define CSEM_REG_CSEM_PRTY_STS_CLR_1 0x200138
+#define CSEM_REG_ENABLE_IN 0x2000a4
+#define CSEM_REG_ENABLE_OUT 0x2000a8
+/* [RW 32] This address space contains all registers and memories that are
+ placed in SEM_FAST block. The SEM_FAST registers are described in
+ appendix B. In order to access the sem_fast registers the base address
+ ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define CSEM_REG_FAST_MEMORY 0x220000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+ by the microcode */
+#define CSEM_REG_FIC0_DISABLE 0x200224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+ by the microcode */
+#define CSEM_REG_FIC1_DISABLE 0x200234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+ the middle of the work */
+#define CSEM_REG_INT_TABLE 0x200400
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC0 */
+#define CSEM_REG_MSG_NUM_FIC0 0x200000
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC1 */
+#define CSEM_REG_MSG_NUM_FIC1 0x200004
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC0 */
+#define CSEM_REG_MSG_NUM_FOC0 0x200008
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC1 */
+#define CSEM_REG_MSG_NUM_FOC1 0x20000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC2 */
+#define CSEM_REG_MSG_NUM_FOC2 0x200010
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC3 */
+#define CSEM_REG_MSG_NUM_FOC3 0x200014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+ during run_time by the microcode */
+#define CSEM_REG_PAS_DISABLE 0x20024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define CSEM_REG_PASSIVE_BUFFER 0x202000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define CSEM_REG_PRAM 0x240000
+/* [R 16] Valid sleeping threads indication have bit per thread */
+#define CSEM_REG_SLEEP_THREADS_VALID 0x20026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define CSEM_REG_SLOW_EXT_STORE_EMPTY 0x2002a0
+/* [RW 16] List of free threads . There is a bit per thread. */
+#define CSEM_REG_THREADS_LIST 0x2002e4
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define CSEM_REG_TS_0_AS 0x200038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define CSEM_REG_TS_10_AS 0x200060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define CSEM_REG_TS_11_AS 0x200064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define CSEM_REG_TS_12_AS 0x200068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define CSEM_REG_TS_13_AS 0x20006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define CSEM_REG_TS_14_AS 0x200070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define CSEM_REG_TS_15_AS 0x200074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define CSEM_REG_TS_16_AS 0x200078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define CSEM_REG_TS_17_AS 0x20007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define CSEM_REG_TS_18_AS 0x200080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define CSEM_REG_TS_1_AS 0x20003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define CSEM_REG_TS_2_AS 0x200040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define CSEM_REG_TS_3_AS 0x200044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define CSEM_REG_TS_4_AS 0x200048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define CSEM_REG_TS_5_AS 0x20004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define CSEM_REG_TS_6_AS 0x200050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define CSEM_REG_TS_7_AS 0x200054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define CSEM_REG_TS_8_AS 0x200058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define CSEM_REG_TS_9_AS 0x20005c
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define CSEM_REG_VFPF_ERR_NUM 0x200380
+/* [RW 1] Parity mask register #0 read/write */
+#define DBG_REG_DBG_PRTY_MASK 0xc0a8
+/* [R 1] Parity register #0 read */
+#define DBG_REG_DBG_PRTY_STS 0xc09c
+/* [RC 1] Parity register #0 read clear */
+#define DBG_REG_DBG_PRTY_STS_CLR 0xc0a0
+/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
+ * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
+ * 4.Completion function=0; 5.Error handling=0 */
+#define DMAE_REG_BACKWARD_COMP_EN 0x10207c
+/* [RW 32] Commands memory. The address to command X; row Y is to calculated
+ as 14*X+Y. */
+#define DMAE_REG_CMD_MEM 0x102400
+#define DMAE_REG_CMD_MEM_SIZE 224
+/* [RW 1] If 0 - the CRC-16c initial value is all zeroes; if 1 - the CRC-16c
+ initial value is all ones. */
+#define DMAE_REG_CRC16C_INIT 0x10201c
+/* [RW 1] If 0 - the CRC-16 T10 initial value is all zeroes; if 1 - the
+ CRC-16 T10 initial value is all ones. */
+#define DMAE_REG_CRC16T10_INIT 0x102020
+/* [RW 2] Interrupt mask register #0 read/write */
+#define DMAE_REG_DMAE_INT_MASK 0x102054
+/* [RW 4] Parity mask register #0 read/write */
+#define DMAE_REG_DMAE_PRTY_MASK 0x102064
+/* [R 4] Parity register #0 read */
+#define DMAE_REG_DMAE_PRTY_STS 0x102058
+/* [RC 4] Parity register #0 read clear */
+#define DMAE_REG_DMAE_PRTY_STS_CLR 0x10205c
+/* [RW 1] Command 0 go. */
+#define DMAE_REG_GO_C0 0x102080
+/* [RW 1] Command 1 go. */
+#define DMAE_REG_GO_C1 0x102084
+/* [RW 1] Command 10 go. */
+#define DMAE_REG_GO_C10 0x102088
+/* [RW 1] Command 11 go. */
+#define DMAE_REG_GO_C11 0x10208c
+/* [RW 1] Command 12 go. */
+#define DMAE_REG_GO_C12 0x102090
+/* [RW 1] Command 13 go. */
+#define DMAE_REG_GO_C13 0x102094
+/* [RW 1] Command 14 go. */
+#define DMAE_REG_GO_C14 0x102098
+/* [RW 1] Command 15 go. */
+#define DMAE_REG_GO_C15 0x10209c
+/* [RW 1] Command 2 go. */
+#define DMAE_REG_GO_C2 0x1020a0
+/* [RW 1] Command 3 go. */
+#define DMAE_REG_GO_C3 0x1020a4
+/* [RW 1] Command 4 go. */
+#define DMAE_REG_GO_C4 0x1020a8
+/* [RW 1] Command 5 go. */
+#define DMAE_REG_GO_C5 0x1020ac
+/* [RW 1] Command 6 go. */
+#define DMAE_REG_GO_C6 0x1020b0
+/* [RW 1] Command 7 go. */
+#define DMAE_REG_GO_C7 0x1020b4
+/* [RW 1] Command 8 go. */
+#define DMAE_REG_GO_C8 0x1020b8
+/* [RW 1] Command 9 go. */
+#define DMAE_REG_GO_C9 0x1020bc
+/* [RW 1] DMAE GRC Interface (Target; aster) enable. If 0 - the acknowledge
+ input is disregarded; valid is deasserted; all other signals are treated
+ as usual; if 1 - normal activity. */
+#define DMAE_REG_GRC_IFEN 0x102008
+/* [RW 1] DMAE PCI Interface (Request; ead; rite) enable. If 0 - the
+ acknowledge input is disregarded; valid is deasserted; full is asserted;
+ all other signals are treated as usual; if 1 - normal activity. */
+#define DMAE_REG_PCI_IFEN 0x102004
+/* [RW 4] DMAE- PCI Request Interface initial credit. Write writes the
+ initial value to the credit counter; related to the address. Read returns
+ the current value of the counter. */
+#define DMAE_REG_PXP_REQ_INIT_CRD 0x1020c0
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD0 0x170060
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD1 0x170064
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD2 0x170068
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD3 0x17006c
+/* [RW 28] UCM Header. */
+#define DORQ_REG_CMHEAD_RX 0x170050
+/* [RW 32] Doorbell address for RBC doorbells (function 0). */
+#define DORQ_REG_DB_ADDR0 0x17008c
+/* [RW 5] Interrupt mask register #0 read/write */
+#define DORQ_REG_DORQ_INT_MASK 0x170180
+/* [R 5] Interrupt register #0 read */
+#define DORQ_REG_DORQ_INT_STS 0x170174
+/* [RC 5] Interrupt register #0 read clear */
+#define DORQ_REG_DORQ_INT_STS_CLR 0x170178
+/* [RW 2] Parity mask register #0 read/write */
+#define DORQ_REG_DORQ_PRTY_MASK 0x170190
+/* [R 2] Parity register #0 read */
+#define DORQ_REG_DORQ_PRTY_STS 0x170184
+/* [RC 2] Parity register #0 read clear */
+#define DORQ_REG_DORQ_PRTY_STS_CLR 0x170188
+/* [RW 8] The address to write the DPM CID to STORM. */
+#define DORQ_REG_DPM_CID_ADDR 0x170044
+/* [RW 5] The DPM mode CID extraction offset. */
+#define DORQ_REG_DPM_CID_OFST 0x170030
+/* [RW 12] The threshold of the DQ FIFO to send the almost full interrupt. */
+#define DORQ_REG_DQ_FIFO_AFULL_TH 0x17007c
+/* [RW 12] The threshold of the DQ FIFO to send the full interrupt. */
+#define DORQ_REG_DQ_FIFO_FULL_TH 0x170078
+/* [R 13] Current value of the DQ FIFO fill level according to following
+ pointer. The range is 0 - 256 FIFO rows; where each row stands for the
+ doorbell. */
+#define DORQ_REG_DQ_FILL_LVLF 0x1700a4
+/* [R 1] DQ FIFO full status. Is set; when FIFO filling level is more or
+ equal to full threshold; reset on full clear. */
+#define DORQ_REG_DQ_FULL_ST 0x1700c0
+/* [RW 28] The value sent to CM header in the case of CFC load error. */
+#define DORQ_REG_ERR_CMHEAD 0x170058
+#define DORQ_REG_IF_EN 0x170004
+#define DORQ_REG_MODE_ACT 0x170008
+/* [RW 5] The normal mode CID extraction offset. */
+#define DORQ_REG_NORM_CID_OFST 0x17002c
+/* [RW 28] TCM Header when only TCP context is loaded. */
+#define DORQ_REG_NORM_CMHEAD_TX 0x17004c
+/* [RW 3] The number of simultaneous outstanding requests to Context Fetch
+ Interface. */
+#define DORQ_REG_OUTST_REQ 0x17003c
+#define DORQ_REG_PF_USAGE_CNT 0x1701d0
+#define DORQ_REG_REGN 0x170038
+/* [R 4] Current value of response A counter credit. Initial credit is
+ configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
+ register. */
+#define DORQ_REG_RSPA_CRD_CNT 0x1700ac
+/* [R 4] Current value of response B counter credit. Initial credit is
+ configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
+ register. */
+#define DORQ_REG_RSPB_CRD_CNT 0x1700b0
+/* [RW 4] The initial credit at the Doorbell Response Interface. The write
+ writes the same initial credit to the rspa_crd_cnt and rspb_crd_cnt. The
+ read reads this written value. */
+#define DORQ_REG_RSP_INIT_CRD 0x170048
+/* [RW 4] Initial activity counter value on the load request; when the
+ shortcut is done. */
+#define DORQ_REG_SHRT_ACT_CNT 0x170070
+/* [RW 28] TCM Header when both ULP and TCP context is loaded. */
+#define DORQ_REG_SHRT_CMHEAD 0x170054
+#define HC_CONFIG_0_REG_ATTN_BIT_EN_0 (0x1<<4)
+#define HC_CONFIG_0_REG_BLOCK_DISABLE_0 (0x1<<0)
+#define HC_CONFIG_0_REG_INT_LINE_EN_0 (0x1<<3)
+#define HC_CONFIG_0_REG_MSI_ATTN_EN_0 (0x1<<7)
+#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2)
+#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1)
+#define HC_CONFIG_1_REG_BLOCK_DISABLE_1 (0x1<<0)
+#define HC_REG_AGG_INT_0 0x108050
+#define HC_REG_AGG_INT_1 0x108054
+#define HC_REG_ATTN_BIT 0x108120
+#define HC_REG_ATTN_IDX 0x108100
+#define HC_REG_ATTN_MSG0_ADDR_L 0x108018
+#define HC_REG_ATTN_MSG1_ADDR_L 0x108020
+#define HC_REG_ATTN_NUM_P0 0x108038
+#define HC_REG_ATTN_NUM_P1 0x10803c
+#define HC_REG_COMMAND_REG 0x108180
+#define HC_REG_CONFIG_0 0x108000
+#define HC_REG_CONFIG_1 0x108004
+#define HC_REG_FUNC_NUM_P0 0x1080ac
+#define HC_REG_FUNC_NUM_P1 0x1080b0
+/* [RW 3] Parity mask register #0 read/write */
+#define HC_REG_HC_PRTY_MASK 0x1080a0
+/* [R 3] Parity register #0 read */
+#define HC_REG_HC_PRTY_STS 0x108094
+/* [RC 3] Parity register #0 read clear */
+#define HC_REG_HC_PRTY_STS_CLR 0x108098
+#define HC_REG_INT_MASK 0x108108
+#define HC_REG_LEADING_EDGE_0 0x108040
+#define HC_REG_LEADING_EDGE_1 0x108048
+#define HC_REG_MAIN_MEMORY 0x108800
+#define HC_REG_MAIN_MEMORY_SIZE 152
+#define HC_REG_P0_PROD_CONS 0x108200
+#define HC_REG_P1_PROD_CONS 0x108400
+#define HC_REG_PBA_COMMAND 0x108140
+#define HC_REG_PCI_CONFIG_0 0x108010
+#define HC_REG_PCI_CONFIG_1 0x108014
+#define HC_REG_STATISTIC_COUNTERS 0x109000
+#define HC_REG_TRAILING_EDGE_0 0x108044
+#define HC_REG_TRAILING_EDGE_1 0x10804c
+#define HC_REG_UC_RAM_ADDR_0 0x108028
+#define HC_REG_UC_RAM_ADDR_1 0x108030
+#define HC_REG_USTORM_ADDR_FOR_COALESCE 0x108068
+#define HC_REG_VQID_0 0x108008
+#define HC_REG_VQID_1 0x10800c
+#define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN (0x1<<1)
+#define IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE (0x1<<0)
+#define IGU_REG_ATTENTION_ACK_BITS 0x130108
+/* [R 4] Debug: attn_fsm */
+#define IGU_REG_ATTN_FSM 0x130054
+#define IGU_REG_ATTN_MSG_ADDR_H 0x13011c
+#define IGU_REG_ATTN_MSG_ADDR_L 0x130120
+/* [R 4] Debug: [3] - attention write done message is pending (0-no pending;
+ * 1-pending). [2:0] = PFID. Pending means attention message was sent; but
+ * write done didn't receive. */
+#define IGU_REG_ATTN_WRITE_DONE_PENDING 0x130030
+#define IGU_REG_BLOCK_CONFIGURATION 0x130000
+#define IGU_REG_COMMAND_REG_32LSB_DATA 0x130124
+#define IGU_REG_COMMAND_REG_CTRL 0x13012c
+/* [WB_R 32] Cleanup bit status per SB. 1 = cleanup is set. 0 = cleanup bit
+ * is clear. The bits in this registers are set and clear via the producer
+ * command. Data valid only in addresses 0-4. all the rest are zero. */
+#define IGU_REG_CSTORM_TYPE_0_SB_CLEANUP 0x130200
+/* [R 5] Debug: ctrl_fsm */
+#define IGU_REG_CTRL_FSM 0x130064
+/* [R 1] data available for error memory. If this bit is clear do not red
+ * from error_handling_memory. */
+#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130
+/* [RW 11] Parity mask register #0 read/write */
+#define IGU_REG_IGU_PRTY_MASK 0x1300a8
+/* [R 11] Parity register #0 read */
+#define IGU_REG_IGU_PRTY_STS 0x13009c
+/* [RC 11] Parity register #0 read clear */
+#define IGU_REG_IGU_PRTY_STS_CLR 0x1300a0
+/* [R 4] Debug: int_handle_fsm */
+#define IGU_REG_INT_HANDLE_FSM 0x130050
+#define IGU_REG_LEADING_EDGE_LATCH 0x130134
+/* [RW 14] mapping CAM; relevant for E2 operating mode only. [0] - valid.
+ * [6:1] - vector number; [13:7] - FID (if VF - [13] = 0; [12:7] = VF
+ * number; if PF - [13] = 1; [12:10] = 0; [9:7] = PF number); */
+#define IGU_REG_MAPPING_MEMORY 0x131000
+#define IGU_REG_MAPPING_MEMORY_SIZE 136
+#define IGU_REG_PBA_STATUS_LSB 0x130138
+#define IGU_REG_PBA_STATUS_MSB 0x13013c
+#define IGU_REG_PCI_PF_MSI_EN 0x130140
+#define IGU_REG_PCI_PF_MSIX_EN 0x130144
+#define IGU_REG_PCI_PF_MSIX_FUNC_MASK 0x130148
+/* [WB_R 32] Each bit represent the pending bits status for that SB. 0 = no
+ * pending; 1 = pending. Pendings means interrupt was asserted; and write
+ * done was not received. Data valid only in addresses 0-4. all the rest are
+ * zero. */
+#define IGU_REG_PENDING_BITS_STATUS 0x130300
+#define IGU_REG_PF_CONFIGURATION 0x130154
+/* [RW 20] producers only. E2 mode: address 0-135 match to the mapping
+ * memory; 136 - PF0 default prod; 137 PF1 default prod; 138 - PF2 default
+ * prod; 139 PF3 default prod; 140 - PF0 - ATTN prod; 141 - PF1 - ATTN prod;
+ * 142 - PF2 - ATTN prod; 143 - PF3 - ATTN prod; 144-147 reserved. E1.5 mode
+ * - In backward compatible mode; for non default SB; each even line in the
+ * memory holds the U producer and each odd line hold the C producer. The
+ * first 128 producer are for NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The
+ * last 20 producers are for the DSB for each PF. each PF has five segments
+ * (the order inside each segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
+ * 132-135 C prods; 136-139 X prods; 140-143 T prods; 144-147 ATTN prods; */
+#define IGU_REG_PROD_CONS_MEMORY 0x132000
+/* [R 3] Debug: pxp_arb_fsm */
+#define IGU_REG_PXP_ARB_FSM 0x130068
+/* [RW 6] Write one for each bit will reset the appropriate memory. When the
+ * memory reset finished the appropriate bit will be clear. Bit 0 - mapping
+ * memory; Bit 1 - SB memory; Bit 2 - SB interrupt and mask register; Bit 3
+ * - MSIX memory; Bit 4 - PBA memory; Bit 5 - statistics; */
+#define IGU_REG_RESET_MEMORIES 0x130158
+/* [R 4] Debug: sb_ctrl_fsm */
+#define IGU_REG_SB_CTRL_FSM 0x13004c
+#define IGU_REG_SB_INT_BEFORE_MASK_LSB 0x13015c
+#define IGU_REG_SB_INT_BEFORE_MASK_MSB 0x130160
+#define IGU_REG_SB_MASK_LSB 0x130164
+#define IGU_REG_SB_MASK_MSB 0x130168
+/* [RW 16] Number of command that were dropped without causing an interrupt
+ * due to: read access for WO BAR address; or write access for RO BAR
+ * address or any access for reserved address or PCI function error is set
+ * and address is not MSIX; PBA or cleanup */
+#define IGU_REG_SILENT_DROP 0x13016c
+/* [RW 10] Number of MSI/MSIX/ATTN messages sent for the function: 0-63 -
+ * number of MSIX messages per VF; 64-67 - number of MSI/MSIX messages per
+ * PF; 68-71 number of ATTN messages per PF */
+#define IGU_REG_STATISTIC_NUM_MESSAGE_SENT 0x130800
+/* [RW 32] Number of cycles the timer mask masking the IGU interrupt when a
+ * timer mask command arrives. Value must be bigger than 100. */
+#define IGU_REG_TIMER_MASKING_VALUE 0x13003c
+#define IGU_REG_TRAILING_EDGE_LATCH 0x130104
+#define IGU_REG_VF_CONFIGURATION 0x130170
+/* [WB_R 32] Each bit represent write done pending bits status for that SB
+ * (MSI/MSIX message was sent and write done was not received yet). 0 =
+ * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */
+#define IGU_REG_WRITE_DONE_PENDING 0x130480
+#define MCP_A_REG_MCPR_SCRATCH 0x3a0000
+#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c
+#define MCP_REG_MCPR_GP_INPUTS 0x800c0
+#define MCP_REG_MCPR_GP_OENABLE 0x800c8
+#define MCP_REG_MCPR_GP_OUTPUTS 0x800c4
+#define MCP_REG_MCPR_IMC_COMMAND 0x85900
+#define MCP_REG_MCPR_IMC_DATAREG0 0x85920
+#define MCP_REG_MCPR_IMC_SLAVE_CONTROL 0x85904
+#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c
+#define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424
+#define MCP_REG_MCPR_NVM_ADDR 0x8640c
+#define MCP_REG_MCPR_NVM_CFG4 0x8642c
+#define MCP_REG_MCPR_NVM_COMMAND 0x86400
+#define MCP_REG_MCPR_NVM_READ 0x86410
+#define MCP_REG_MCPR_NVM_SW_ARB 0x86420
+#define MCP_REG_MCPR_NVM_WRITE 0x86408
+#define MCP_REG_MCPR_SCRATCH 0xa0000
+#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK (0x1<<1)
+#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK (0x1<<0)
+/* [R 32] read first 32 bit after inversion of function 0. mapped as
+ follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp;
+ [6] GPIO1 function 1; [7] GPIO2 function 1; [8] GPIO3 function 1; [9]
+ GPIO4 function 1; [10] PCIE glue/PXP VPD event function0; [11] PCIE
+ glue/PXP VPD event function1; [12] PCIE glue/PXP Expansion ROM event0;
+ [13] PCIE glue/PXP Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16]
+ MSI/X indication for mcp; [17] MSI/X indication for function 1; [18] BRB
+ Parity error; [19] BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw
+ interrupt; [22] SRC Parity error; [23] SRC Hw interrupt; [24] TSDM Parity
+ error; [25] TSDM Hw interrupt; [26] TCM Parity error; [27] TCM Hw
+ interrupt; [28] TSEMI Parity error; [29] TSEMI Hw interrupt; [30] PBF
+ Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 0xa42c
+#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_1 0xa430
+/* [R 32] read first 32 bit after inversion of mcp. mapped as follows: [0]
+ NIG attention for function0; [1] NIG attention for function1; [2] GPIO1
+ mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1;
+ [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10]
+ PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event
+ function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP
+ Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for
+ mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19]
+ BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC
+ Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw
+ interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI
+ Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw
+ interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_1_MCP 0xa434
+/* [R 32] read second 32 bit after inversion of function 0. mapped as
+ follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+ interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 0xa438
+#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_1 0xa43c
+/* [R 32] read second 32 bit after inversion of mcp. mapped as follows: [0]
+ PBClient Parity error; [1] PBClient Hw interrupt; [2] QM Parity error;
+ [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw interrupt;
+ [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9]
+ XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12]
+ DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity
+ error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux
+ PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt;
+ [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error;
+ [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt;
+ [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error;
+ [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_2_MCP 0xa440
+/* [R 32] read third 32 bit after inversion of function 0. mapped as
+ follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity
+ error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; [5]
+ PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+ attn1; */
+#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 0xa444
+#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_1 0xa448
+/* [R 32] read third 32 bit after inversion of mcp. mapped as follows: [0]
+ CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity error; [3] PXP
+ Hw interrupt; [4] PXPpciClockClient Parity error; [5] PXPpciClockClient
+ Hw interrupt; [6] CFC Parity error; [7] CFC Hw interrupt; [8] CDU Parity
+ error; [9] CDU Hw interrupt; [10] DMAE Parity error; [11] DMAE Hw
+ interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) Hw interrupt; [14]
+ MISC Parity error; [15] MISC Hw interrupt; [16] pxp_misc_mps_attn; [17]
+ Flash event; [18] SMB event; [19] MCP attn0; [20] MCP attn1; [21] SW
+ timers attn_1 func0; [22] SW timers attn_2 func0; [23] SW timers attn_3
+ func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW timers attn_1
+ func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 func1; [29] SW
+ timers attn_4 func1; [30] General attn0; [31] General attn1; */
+#define MISC_REG_AEU_AFTER_INVERT_3_MCP 0xa44c
+/* [R 32] read fourth 32 bit after inversion of function 0. mapped as
+ follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+ [14] General attn16; [15] General attn17; [16] General attn18; [17]
+ General attn19; [18] General attn20; [19] General attn21; [20] Main power
+ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+ Latched timeout attention; [27] GRC Latched reserved access attention;
+ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 0xa450
+#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_1 0xa454
+/* [R 32] read fourth 32 bit after inversion of mcp. mapped as follows: [0]
+ General attn2; [1] General attn3; [2] General attn4; [3] General attn5;
+ [4] General attn6; [5] General attn7; [6] General attn8; [7] General
+ attn9; [8] General attn10; [9] General attn11; [10] General attn12; [11]
+ General attn13; [12] General attn14; [13] General attn15; [14] General
+ attn16; [15] General attn17; [16] General attn18; [17] General attn19;
+ [18] General attn20; [19] General attn21; [20] Main power interrupt; [21]
+ RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN Latched attn; [24]
+ RBCU Latched attn; [25] RBCP Latched attn; [26] GRC Latched timeout
+ attention; [27] GRC Latched reserved access attention; [28] MCP Latched
+ rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched
+ ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_AFTER_INVERT_4_MCP 0xa458
+/* [R 32] Read fifth 32 bit after inversion of function 0. Mapped as
+ * follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
+ * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
+ * CNIG attention (reserved); [7] CNIG parity (reserved); [31-8] Reserved; */
+#define MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 0xa700
+/* [W 14] write to this register results with the clear of the latched
+ signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in
+ d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP
+ latch; one in d5 clears GRC Latched timeout attention; one in d6 clears
+ GRC Latched reserved access attention; one in d7 clears Latched
+ rom_parity; one in d8 clears Latched ump_rx_parity; one in d9 clears
+ Latched ump_tx_parity; one in d10 clears Latched scpad_parity (both
+ ports); one in d11 clears pxpv_misc_mps_attn; one in d12 clears
+ pxp_misc_exp_rom_attn0; one in d13 clears pxp_misc_exp_rom_attn1; read
+ from this register return zero */
+#define MISC_REG_AEU_CLR_LATCH_SIGNAL 0xa45c
+/* [RW 32] first 32b for enabling the output for function 0 output0. mapped
+ as follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
+ 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+ GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+ function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+ Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+ SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
+ indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+ [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+ SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+ TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+ TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0 0xa06c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1 0xa07c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2 0xa08c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_3 0xa09c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_5 0xa0bc
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_6 0xa0cc
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_7 0xa0dc
+/* [RW 32] first 32b for enabling the output for function 1 output0. mapped
+ as follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 function 1; [3] GPIO2 function 1; [4] GPIO3 function
+ 1; [5] GPIO4 function 1; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+ GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+ function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+ Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+ SPIO4; [15] SPIO5; [16] MSI/X indication for function 1; [17] MSI/X
+ indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+ [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+ SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+ TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+ TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 0xa10c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 0xa11c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 0xa12c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_3 0xa13c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_5 0xa15c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_6 0xa16c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_7 0xa17c
+/* [RW 32] first 32b for enabling the output for close the gate nig. mapped
+ as follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
+ 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+ GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+ function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+ Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+ SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
+ indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+ [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+ SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+ TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+ TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_NIG_0 0xa0ec
+#define MISC_REG_AEU_ENABLE1_NIG_1 0xa18c
+/* [RW 32] first 32b for enabling the output for close the gate pxp. mapped
+ as follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
+ 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+ GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+ function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+ Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+ SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
+ indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+ [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+ SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+ TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+ TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_PXP_0 0xa0fc
+#define MISC_REG_AEU_ENABLE1_PXP_1 0xa19c
+/* [RW 32] second 32b for enabling the output for function 0 output0. mapped
+ as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+ interrupt; */
+#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0 0xa070
+#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_1 0xa080
+/* [RW 32] second 32b for enabling the output for function 1 output0. mapped
+ as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+ interrupt; */
+#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0 0xa110
+#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_1 0xa120
+/* [RW 32] second 32b for enabling the output for close the gate nig. mapped
+ as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+ interrupt; */
+#define MISC_REG_AEU_ENABLE2_NIG_0 0xa0f0
+#define MISC_REG_AEU_ENABLE2_NIG_1 0xa190
+/* [RW 32] second 32b for enabling the output for close the gate pxp. mapped
+ as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+ interrupt; */
+#define MISC_REG_AEU_ENABLE2_PXP_0 0xa100
+#define MISC_REG_AEU_ENABLE2_PXP_1 0xa1a0
+/* [RW 32] third 32b for enabling the output for function 0 output0. mapped
+ as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+ Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+ [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+ attn1; */
+#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_0 0xa074
+#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_1 0xa084
+/* [RW 32] third 32b for enabling the output for function 1 output0. mapped
+ as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+ Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+ [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+ attn1; */
+#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_0 0xa114
+#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_1 0xa124
+/* [RW 32] third 32b for enabling the output for close the gate nig. mapped
+ as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+ Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+ [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+ attn1; */
+#define MISC_REG_AEU_ENABLE3_NIG_0 0xa0f4
+#define MISC_REG_AEU_ENABLE3_NIG_1 0xa194
+/* [RW 32] third 32b for enabling the output for close the gate pxp. mapped
+ as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+ Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+ [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+ attn1; */
+#define MISC_REG_AEU_ENABLE3_PXP_0 0xa104
+#define MISC_REG_AEU_ENABLE3_PXP_1 0xa1a4
+/* [RW 32] fourth 32b for enabling the output for function 0 output0.mapped
+ as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+ [14] General attn16; [15] General attn17; [16] General attn18; [17]
+ General attn19; [18] General attn20; [19] General attn21; [20] Main power
+ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+ Latched timeout attention; [27] GRC Latched reserved access attention;
+ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 0xa078
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_2 0xa098
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_4 0xa0b8
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_5 0xa0c8
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_6 0xa0d8
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_7 0xa0e8
+/* [RW 32] fourth 32b for enabling the output for function 1 output0.mapped
+ as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+ [14] General attn16; [15] General attn17; [16] General attn18; [17]
+ General attn19; [18] General attn20; [19] General attn21; [20] Main power
+ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+ Latched timeout attention; [27] GRC Latched reserved access attention;
+ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0 0xa118
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_2 0xa138
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_4 0xa158
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_5 0xa168
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_6 0xa178
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_7 0xa188
+/* [RW 32] fourth 32b for enabling the output for close the gate nig.mapped
+ as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+ [14] General attn16; [15] General attn17; [16] General attn18; [17]
+ General attn19; [18] General attn20; [19] General attn21; [20] Main power
+ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+ Latched timeout attention; [27] GRC Latched reserved access attention;
+ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_NIG_0 0xa0f8
+#define MISC_REG_AEU_ENABLE4_NIG_1 0xa198
+/* [RW 32] fourth 32b for enabling the output for close the gate pxp.mapped
+ as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+ [14] General attn16; [15] General attn17; [16] General attn18; [17]
+ General attn19; [18] General attn20; [19] General attn21; [20] Main power
+ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+ Latched timeout attention; [27] GRC Latched reserved access attention;
+ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_PXP_0 0xa108
+#define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8
+/* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped
+ * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
+ * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
+ * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
+ * parity; [31-10] Reserved; */
+#define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 0xa688
+/* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped
+ * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
+ * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
+ * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
+ * parity; [31-10] Reserved; */
+#define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 0xa6b0
+/* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu
+ 128 bit vector */
+#define MISC_REG_AEU_GENERAL_ATTN_0 0xa000
+#define MISC_REG_AEU_GENERAL_ATTN_1 0xa004
+#define MISC_REG_AEU_GENERAL_ATTN_10 0xa028
+#define MISC_REG_AEU_GENERAL_ATTN_11 0xa02c
+#define MISC_REG_AEU_GENERAL_ATTN_12 0xa030
+#define MISC_REG_AEU_GENERAL_ATTN_2 0xa008
+#define MISC_REG_AEU_GENERAL_ATTN_3 0xa00c
+#define MISC_REG_AEU_GENERAL_ATTN_4 0xa010
+#define MISC_REG_AEU_GENERAL_ATTN_5 0xa014
+#define MISC_REG_AEU_GENERAL_ATTN_6 0xa018
+#define MISC_REG_AEU_GENERAL_ATTN_7 0xa01c
+#define MISC_REG_AEU_GENERAL_ATTN_8 0xa020
+#define MISC_REG_AEU_GENERAL_ATTN_9 0xa024
+#define MISC_REG_AEU_GENERAL_MASK 0xa61c
+/* [RW 32] first 32b for inverting the input for function 0; for each bit:
+ 0= do not invert; 1= invert; mapped as follows: [0] NIG attention for
+ function0; [1] NIG attention for function1; [2] GPIO1 mcp; [3] GPIO2 mcp;
+ [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; [7] GPIO2 function 1;
+ [8] GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+ function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+ Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+ SPIO4; [15] SPIO5; [16] MSI/X indication for mcp; [17] MSI/X indication
+ for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; [20] PRS
+ Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] SRC Hw
+ interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] TCM
+ Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] TSEMI
+ Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_INVERTER_1_FUNC_0 0xa22c
+#define MISC_REG_AEU_INVERTER_1_FUNC_1 0xa23c
+/* [RW 32] second 32b for inverting the input for function 0; for each bit:
+ 0= do not invert; 1= invert. mapped as follows: [0] PBClient Parity
+ error; [1] PBClient Hw interrupt; [2] QM Parity error; [3] QM Hw
+ interrupt; [4] Timers Parity error; [5] Timers Hw interrupt; [6] XSDM
+ Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9] XCM Hw
+ interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12]
+ DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity
+ error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux
+ PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt;
+ [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error;
+ [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt;
+ [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error;
+ [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */
+#define MISC_REG_AEU_INVERTER_2_FUNC_0 0xa230
+#define MISC_REG_AEU_INVERTER_2_FUNC_1 0xa240
+/* [RW 10] [7:0] = mask 8 attention output signals toward IGU function0;
+ [9:8] = raserved. Zero = mask; one = unmask */
+#define MISC_REG_AEU_MASK_ATTN_FUNC_0 0xa060
+#define MISC_REG_AEU_MASK_ATTN_FUNC_1 0xa064
+/* [RW 1] If set a system kill occurred */
+#define MISC_REG_AEU_SYS_KILL_OCCURRED 0xa610
+/* [RW 32] Represent the status of the input vector to the AEU when a system
+ kill occurred. The register is reset in por reset. Mapped as follows: [0]
+ NIG attention for function0; [1] NIG attention for function1; [2] GPIO1
+ mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1;
+ [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10]
+ PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event
+ function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP
+ Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for
+ mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19]
+ BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC
+ Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw
+ interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI
+ Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw
+ interrupt; */
+#define MISC_REG_AEU_SYS_KILL_STATUS_0 0xa600
+#define MISC_REG_AEU_SYS_KILL_STATUS_1 0xa604
+#define MISC_REG_AEU_SYS_KILL_STATUS_2 0xa608
+#define MISC_REG_AEU_SYS_KILL_STATUS_3 0xa60c
+/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1
+ Port. */
+#define MISC_REG_BOND_ID 0xa400
+/* [R 8] These bits indicate the metal revision of the chip. This value
+ starts at 0x00 for each all-layer tape-out and increments by one for each
+ tape-out. */
+#define MISC_REG_CHIP_METAL 0xa404
+/* [R 16] These bits indicate the part number for the chip. */
+#define MISC_REG_CHIP_NUM 0xa408
+/* [R 4] These bits indicate the base revision of the chip. This value
+ starts at 0x0 for the A0 tape-out and increments by one for each
+ all-layer tape-out. */
+#define MISC_REG_CHIP_REV 0xa40c
+/* [RW 32] The following driver registers(1...16) represent 16 drivers and
+ 32 clients. Each client can be controlled by one driver only. One in each
+ bit represent that this driver control the appropriate client (Ex: bit 5
+ is set means this driver control client number 5). addr1 = set; addr0 =
+ clear; read from both addresses will give the same result = status. write
+ to address 1 will set a request to control all the clients that their
+ appropriate bit (in the write command) is set. if the client is free (the
+ appropriate bit in all the other drivers is clear) one will be written to
+ that driver register; if the client isn't free the bit will remain zero.
+ if the appropriate bit is set (the driver request to gain control on a
+ client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
+ interrupt will be asserted). write to address 0 will set a request to
+ free all the clients that their appropriate bit (in the write command) is
+ set. if the appropriate bit is clear (the driver request to free a client
+ it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
+ be asserted). */
+#define MISC_REG_DRIVER_CONTROL_1 0xa510
+#define MISC_REG_DRIVER_CONTROL_7 0xa3c8
+/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0
+ only. */
+#define MISC_REG_E1HMF_MODE 0xa5f8
+/* [R 1] Status of four port mode path swap input pin. */
+#define MISC_REG_FOUR_PORT_PATH_SWAP 0xa75c
+/* [RW 2] 4 port path swap overwrite.[0] - Overwrite control; if it is 0 -
+ the path_swap output is equal to 4 port mode path swap input pin; if it
+ is 1 - the path_swap output is equal to bit[1] of this register; [1] -
+ Overwrite value. If bit[0] of this register is 1 this is the value that
+ receives the path_swap output. Reset on Hard reset. */
+#define MISC_REG_FOUR_PORT_PATH_SWAP_OVWR 0xa738
+/* [R 1] Status of 4 port mode port swap input pin. */
+#define MISC_REG_FOUR_PORT_PORT_SWAP 0xa754
+/* [RW 2] 4 port port swap overwrite.[0] - Overwrite control; if it is 0 -
+ the port_swap output is equal to 4 port mode port swap input pin; if it
+ is 1 - the port_swap output is equal to bit[1] of this register; [1] -
+ Overwrite value. If bit[0] of this register is 1 this is the value that
+ receives the port_swap output. Reset on Hard reset. */
+#define MISC_REG_FOUR_PORT_PORT_SWAP_OVWR 0xa734
+/* [RW 32] Debug only: spare RW register reset by core reset */
+#define MISC_REG_GENERIC_CR_0 0xa460
+#define MISC_REG_GENERIC_CR_1 0xa464
+/* [RW 32] Debug only: spare RW register reset by por reset */
+#define MISC_REG_GENERIC_POR_1 0xa474
+/* [RW 32] Bit[0]: EPIO MODE SEL: Setting this bit to 1 will allow SW/FW to
+ use all of the 32 Extended GPIO pins. Without setting this bit; an EPIO
+ can not be configured as an output. Each output has its output enable in
+ the MCP register space; but this bit needs to be set to make use of that.
+ Bit[3:1] spare. Bit[4]: WCVTMON_PWRDN: Powerdown for Warpcore VTMON. When
+ set to 1 - Powerdown. Bit[5]: WCVTMON_RESETB: Reset for Warpcore VTMON.
+ When set to 0 - vTMON is in reset. Bit[6]: setting this bit will change
+ the i/o to an output and will drive the TimeSync output. Bit[31:7]:
+ spare. Global register. Reset by hard reset. */
+#define MISC_REG_GEN_PURP_HWG 0xa9a0
+/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
+ these bits is written as a '1'; the corresponding SPIO bit will turn off
+ it's drivers and become an input. This is the reset state of all GPIO
+ pins. The read value of these bits will be a '1' if that last command
+ (#SET; #CLR; or #FLOAT) for this bit was a #FLOAT. (reset value 0xff).
+ [23-20] CLR port 1; 19-16] CLR port 0; When any of these bits is written
+ as a '1'; the corresponding GPIO bit will drive low. The read value of
+ these bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for
+ this bit was a #CLR. (reset value 0). [15-12] SET port 1; 11-8] port 0;
+ SET When any of these bits is written as a '1'; the corresponding GPIO
+ bit will drive high (if it has that capability). The read value of these
+ bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for this
+ bit was a #SET. (reset value 0). [7-4] VALUE port 1; [3-0] VALUE port 0;
+ RO; These bits indicate the read value of each of the eight GPIO pins.
+ This is the result value of the pin; not the drive value. Writing these
+ bits will have not effect. */
+#define MISC_REG_GPIO 0xa490
+/* [RW 8] These bits enable the GPIO_INTs to signals event to the
+ IGU/MCP.according to the following map: [0] p0_gpio_0; [1] p0_gpio_1; [2]
+ p0_gpio_2; [3] p0_gpio_3; [4] p1_gpio_0; [5] p1_gpio_1; [6] p1_gpio_2;
+ [7] p1_gpio_3; */
+#define MISC_REG_GPIO_EVENT_EN 0xa2bc
+/* [RW 32] GPIO INT. [31-28] OLD_CLR port1; [27-24] OLD_CLR port0; Writing a
+ '1' to these bit clears the corresponding bit in the #OLD_VALUE register.
+ This will acknowledge an interrupt on the falling edge of corresponding
+ GPIO input (reset value 0). [23-16] OLD_SET [23-16] port1; OLD_SET port0;
+ Writing a '1' to these bit sets the corresponding bit in the #OLD_VALUE
+ register. This will acknowledge an interrupt on the rising edge of
+ corresponding SPIO input (reset value 0). [15-12] OLD_VALUE [11-8] port1;
+ OLD_VALUE port0; RO; These bits indicate the old value of the GPIO input
+ value. When the ~INT_STATE bit is set; this bit indicates the OLD value
+ of the pin such that if ~INT_STATE is set and this bit is '0'; then the
+ interrupt is due to a low to high edge. If ~INT_STATE is set and this bit
+ is '1'; then the interrupt is due to a high to low edge (reset value 0).
+ [7-4] INT_STATE port1; [3-0] INT_STATE RO port0; These bits indicate the
+ current GPIO interrupt state for each GPIO pin. This bit is cleared when
+ the appropriate #OLD_SET or #OLD_CLR command bit is written. This bit is
+ set when the GPIO input does not match the current value in #OLD_VALUE
+ (reset value 0). */
+#define MISC_REG_GPIO_INT 0xa494
+/* [R 28] this field hold the last information that caused reserved
+ attention. bits [19:0] - address; [22:20] function; [23] reserved;
+ [27:24] the master that caused the attention - according to the following
+ encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
+ dbu; 8 = dmae */
+#define MISC_REG_GRC_RSV_ATTN 0xa3c0
+/* [R 28] this field hold the last information that caused timeout
+ attention. bits [19:0] - address; [22:20] function; [23] reserved;
+ [27:24] the master that caused the attention - according to the following
+ encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
+ dbu; 8 = dmae */
+#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4
+/* [RW 1] Setting this bit enables a timer in the GRC block to timeout any
+ access that does not finish within
+ ~misc_registers_grc_timout_val.grc_timeout_val cycles. When this bit is
+ cleared; this timeout is disabled. If this timeout occurs; the GRC shall
+ assert it attention output. */
+#define MISC_REG_GRC_TIMEOUT_EN 0xa280
+/* [RW 28] 28 LSB of LCPLL first register; reset val = 521. inside order of
+ the bits is: [2:0] OAC reset value 001) CML output buffer bias control;
+ 111 for +40%; 011 for +20%; 001 for 0%; 000 for -20%. [5:3] Icp_ctrl
+ (reset value 001) Charge pump current control; 111 for 720u; 011 for
+ 600u; 001 for 480u and 000 for 360u. [7:6] Bias_ctrl (reset value 00)
+ Global bias control; When bit 7 is high bias current will be 10 0gh; When
+ bit 6 is high bias will be 100w; Valid values are 00; 10; 01. [10:8]
+ Pll_observe (reset value 010) Bits to control observability. bit 10 is
+ for test bias; bit 9 is for test CK; bit 8 is test Vc. [12:11] Vth_ctrl
+ (reset value 00) Comparator threshold control. 00 for 0.6V; 01 for 0.54V
+ and 10 for 0.66V. [13] pllSeqStart (reset value 0) Enables VCO tuning
+ sequencer: 1= sequencer disabled; 0= sequencer enabled (inverted
+ internally). [14] reserved (reset value 0) Reset for VCO sequencer is
+ connected to RESET input directly. [15] capRetry_en (reset value 0)
+ enable retry on cap search failure (inverted). [16] freqMonitor_e (reset
+ value 0) bit to continuously monitor vco freq (inverted). [17]
+ freqDetRestart_en (reset value 0) bit to enable restart when not freq
+ locked (inverted). [18] freqDetRetry_en (reset value 0) bit to enable
+ retry on freq det failure(inverted). [19] pllForceFdone_en (reset value
+ 0) bit to enable pllForceFdone & pllForceFpass into pllSeq. [20]
+ pllForceFdone (reset value 0) bit to force freqDone. [21] pllForceFpass
+ (reset value 0) bit to force freqPass. [22] pllForceDone_en (reset value
+ 0) bit to enable pllForceCapDone. [23] pllForceCapDone (reset value 0)
+ bit to force capDone. [24] pllForceCapPass_en (reset value 0) bit to
+ enable pllForceCapPass. [25] pllForceCapPass (reset value 0) bit to force
+ capPass. [26] capRestart (reset value 0) bit to force cap sequencer to
+ restart. [27] capSelectM_en (reset value 0) bit to enable cap select
+ register bits. */
+#define MISC_REG_LCPLL_CTRL_1 0xa2a4
+#define MISC_REG_LCPLL_CTRL_REG_2 0xa2a8
+/* [RW 4] Interrupt mask register #0 read/write */
+#define MISC_REG_MISC_INT_MASK 0xa388
+/* [RW 1] Parity mask register #0 read/write */
+#define MISC_REG_MISC_PRTY_MASK 0xa398
+/* [R 1] Parity register #0 read */
+#define MISC_REG_MISC_PRTY_STS 0xa38c
+/* [RC 1] Parity register #0 read clear */
+#define MISC_REG_MISC_PRTY_STS_CLR 0xa390
+#define MISC_REG_NIG_WOL_P0 0xa270
+#define MISC_REG_NIG_WOL_P1 0xa274
+/* [R 1] If set indicate that the pcie_rst_b was asserted without perst
+ assertion */
+#define MISC_REG_PCIE_HOT_RESET 0xa618
+/* [RW 32] 32 LSB of storm PLL first register; reset val = 0x 071d2911.
+ inside order of the bits is: [0] P1 divider[0] (reset value 1); [1] P1
+ divider[1] (reset value 0); [2] P1 divider[2] (reset value 0); [3] P1
+ divider[3] (reset value 0); [4] P2 divider[0] (reset value 1); [5] P2
+ divider[1] (reset value 0); [6] P2 divider[2] (reset value 0); [7] P2
+ divider[3] (reset value 0); [8] ph_det_dis (reset value 1); [9]
+ freq_det_dis (reset value 0); [10] Icpx[0] (reset value 0); [11] Icpx[1]
+ (reset value 1); [12] Icpx[2] (reset value 0); [13] Icpx[3] (reset value
+ 1); [14] Icpx[4] (reset value 0); [15] Icpx[5] (reset value 0); [16]
+ Rx[0] (reset value 1); [17] Rx[1] (reset value 0); [18] vc_en (reset
+ value 1); [19] vco_rng[0] (reset value 1); [20] vco_rng[1] (reset value
+ 1); [21] Kvco_xf[0] (reset value 0); [22] Kvco_xf[1] (reset value 0);
+ [23] Kvco_xf[2] (reset value 0); [24] Kvco_xs[0] (reset value 1); [25]
+ Kvco_xs[1] (reset value 1); [26] Kvco_xs[2] (reset value 1); [27]
+ testd_en (reset value 0); [28] testd_sel[0] (reset value 0); [29]
+ testd_sel[1] (reset value 0); [30] testd_sel[2] (reset value 0); [31]
+ testa_en (reset value 0); */
+#define MISC_REG_PLL_STORM_CTRL_1 0xa294
+#define MISC_REG_PLL_STORM_CTRL_2 0xa298
+#define MISC_REG_PLL_STORM_CTRL_3 0xa29c
+#define MISC_REG_PLL_STORM_CTRL_4 0xa2a0
+/* [R 1] Status of 4 port mode enable input pin. */
+#define MISC_REG_PORT4MODE_EN 0xa750
+/* [RW 2] 4 port mode enable overwrite.[0] - Overwrite control; if it is 0 -
+ * the port4mode_en output is equal to 4 port mode input pin; if it is 1 -
+ * the port4mode_en output is equal to bit[1] of this register; [1] -
+ * Overwrite value. If bit[0] of this register is 1 this is the value that
+ * receives the port4mode_en output . */
+#define MISC_REG_PORT4MODE_EN_OVWR 0xa720
+/* [RW 32] reset reg#2; rite/read one = the specific block is out of reset;
+ write/read zero = the specific block is in reset; addr 0-wr- the write
+ value will be written to the register; addr 1-set - one will be written
+ to all the bits that have the value of one in the data written (bits that
+ have the value of zero will not be change) ; addr 2-clear - zero will be
+ written to all the bits that have the value of one in the data written
+ (bits that have the value of zero will not be change); addr 3-ignore;
+ read ignore from all addr except addr 00; inside order of the bits is:
+ [0] rst_bmac0; [1] rst_bmac1; [2] rst_emac0; [3] rst_emac1; [4] rst_grc;
+ [5] rst_mcp_n_reset_reg_hard_core; [6] rst_ mcp_n_hard_core_rst_b; [7]
+ rst_ mcp_n_reset_cmn_cpu; [8] rst_ mcp_n_reset_cmn_core; [9] rst_rbcn;
+ [10] rst_dbg; [11] rst_misc_core; [12] rst_dbue (UART); [13]
+ Pci_resetmdio_n; [14] rst_emac0_hard_core; [15] rst_emac1_hard_core; 16]
+ rst_pxp_rq_rd_wr; 31:17] reserved */
+#define MISC_REG_RESET_REG_2 0xa590
+/* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is
+ shared with the driver resides */
+#define MISC_REG_SHARED_MEM_ADDR 0xa2b4
+/* [RW 32] SPIO. [31-24] FLOAT When any of these bits is written as a '1';
+ the corresponding SPIO bit will turn off it's drivers and become an
+ input. This is the reset state of all SPIO pins. The read value of these
+ bits will be a '1' if that last command (#SET; #CL; or #FLOAT) for this
+ bit was a #FLOAT. (reset value 0xff). [23-16] CLR When any of these bits
+ is written as a '1'; the corresponding SPIO bit will drive low. The read
+ value of these bits will be a '1' if that last command (#SET; #CLR; or
+#FLOAT) for this bit was a #CLR. (reset value 0). [15-8] SET When any of
+ these bits is written as a '1'; the corresponding SPIO bit will drive
+ high (if it has that capability). The read value of these bits will be a
+ '1' if that last command (#SET; #CLR; or #FLOAT) for this bit was a #SET.
+ (reset value 0). [7-0] VALUE RO; These bits indicate the read value of
+ each of the eight SPIO pins. This is the result value of the pin; not the
+ drive value. Writing these bits will have not effect. Each 8 bits field
+ is divided as follows: [0] VAUX Enable; when pulsed low; enables supply
+ from VAUX. (This is an output pin only; the FLOAT field is not applicable
+ for this pin); [1] VAUX Disable; when pulsed low; disables supply form
+ VAUX. (This is an output pin only; FLOAT field is not applicable for this
+ pin); [2] SEL_VAUX_B - Control to power switching logic. Drive low to
+ select VAUX supply. (This is an output pin only; it is not controlled by
+ the SET and CLR fields; it is controlled by the Main Power SM; the FLOAT
+ field is not applicable for this pin; only the VALUE fields is relevant -
+ it reflects the output value); [3] port swap [4] spio_4; [5] spio_5; [6]
+ Bit 0 of UMP device ID select; read by UMP firmware; [7] Bit 1 of UMP
+ device ID select; read by UMP firmware. */
+#define MISC_REG_SPIO 0xa4fc
+/* [RW 8] These bits enable the SPIO_INTs to signals event to the IGU/MC.
+ according to the following map: [3:0] reserved; [4] spio_4 [5] spio_5;
+ [7:0] reserved */
+#define MISC_REG_SPIO_EVENT_EN 0xa2b8
+/* [RW 32] SPIO INT. [31-24] OLD_CLR Writing a '1' to these bit clears the
+ corresponding bit in the #OLD_VALUE register. This will acknowledge an
+ interrupt on the falling edge of corresponding SPIO input (reset value
+ 0). [23-16] OLD_SET Writing a '1' to these bit sets the corresponding bit
+ in the #OLD_VALUE register. This will acknowledge an interrupt on the
+ rising edge of corresponding SPIO input (reset value 0). [15-8] OLD_VALUE
+ RO; These bits indicate the old value of the SPIO input value. When the
+ ~INT_STATE bit is set; this bit indicates the OLD value of the pin such
+ that if ~INT_STATE is set and this bit is '0'; then the interrupt is due
+ to a low to high edge. If ~INT_STATE is set and this bit is '1'; then the
+ interrupt is due to a high to low edge (reset value 0). [7-0] INT_STATE
+ RO; These bits indicate the current SPIO interrupt state for each SPIO
+ pin. This bit is cleared when the appropriate #OLD_SET or #OLD_CLR
+ command bit is written. This bit is set when the SPIO input does not
+ match the current value in #OLD_VALUE (reset value 0). */
+#define MISC_REG_SPIO_INT 0xa500
+/* [RW 32] reload value for counter 4 if reload; the value will be reload if
+ the counter reached zero and the reload bit
+ (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
+#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc
+/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
+ in this register. address 0 - timer 1; address 1 - timer 2, ... address 7 -
+ timer 8 */
+#define MISC_REG_SW_TIMER_VAL 0xa5c0
+/* [R 1] Status of two port mode path swap input pin. */
+#define MISC_REG_TWO_PORT_PATH_SWAP 0xa758
+/* [RW 2] 2 port swap overwrite.[0] - Overwrite control; if it is 0 - the
+ path_swap output is equal to 2 port mode path swap input pin; if it is 1
+ - the path_swap output is equal to bit[1] of this register; [1] -
+ Overwrite value. If bit[0] of this register is 1 this is the value that
+ receives the path_swap output. Reset on Hard reset. */
+#define MISC_REG_TWO_PORT_PATH_SWAP_OVWR 0xa72c
+/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
+ loaded; 0-prepare; -unprepare */
+#define MISC_REG_UNPREPARED 0xa424
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3)
+/* [RW 5] MDIO PHY Address. The WC uses this address to determine whether or
+ * not it is the recipient of the message on the MDIO interface. The value
+ * is compared to the value on ctrl_md_devad. Drives output
+ * misc_xgxs0_phy_addr. Global register. */
+#define MISC_REG_WC0_CTRL_PHY_ADDR 0xa9cc
+/* [RW 2] XMAC Core port mode. Indicates the number of ports on the system
+ side. This should be less than or equal to phy_port_mode; if some of the
+ ports are not used. This enables reduction of frequency on the core side.
+ This is a strap input for the XMAC_MP core. 00 - Single Port Mode; 01 -
+ Dual Port Mode; 10 - Tri Port Mode; 11 - Quad Port Mode. This is a strap
+ input for the XMAC_MP core; and should be changed only while reset is
+ held low. Reset on Hard reset. */
+#define MISC_REG_XMAC_CORE_PORT_MODE 0xa964
+/* [RW 2] XMAC PHY port mode. Indicates the number of ports on the Warp
+ Core. This is a strap input for the XMAC_MP core. 00 - Single Port Mode;
+ 01 - Dual Port Mode; 1x - Quad Port Mode; This is a strap input for the
+ XMAC_MP core; and should be changed only while reset is held low. Reset
+ on Hard reset. */
+#define MISC_REG_XMAC_PHY_PORT_MODE 0xa960
+/* [RW 32] 1 [47] Packet Size = 64 Write to this register write bits 31:0.
+ * Reads from this register will clear bits 31:0. */
+#define MSTAT_REG_RX_STAT_GR64_LO 0x200
+/* [RW 32] 1 [00] Tx Good Packet Count Write to this register write bits
+ * 31:0. Reads from this register will clear bits 31:0. */
+#define MSTAT_REG_TX_STAT_GTXPOK_LO 0
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3)
+#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1<<0)
+#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1<<0)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS (0xf<<18)
+/* [RW 1] Input enable for RX_BMAC0 IF */
+#define NIG_REG_BMAC0_IN_EN 0x100ac
+/* [RW 1] output enable for TX_BMAC0 IF */
+#define NIG_REG_BMAC0_OUT_EN 0x100e0
+/* [RW 1] output enable for TX BMAC pause port 0 IF */
+#define NIG_REG_BMAC0_PAUSE_OUT_EN 0x10110
+/* [RW 1] output enable for RX_BMAC0_REGS IF */
+#define NIG_REG_BMAC0_REGS_OUT_EN 0x100e8
+/* [RW 1] output enable for RX BRB1 port0 IF */
+#define NIG_REG_BRB0_OUT_EN 0x100f8
+/* [RW 1] Input enable for TX BRB1 pause port 0 IF */
+#define NIG_REG_BRB0_PAUSE_IN_EN 0x100c4
+/* [RW 1] output enable for RX BRB1 port1 IF */
+#define NIG_REG_BRB1_OUT_EN 0x100fc
+/* [RW 1] Input enable for TX BRB1 pause port 1 IF */
+#define NIG_REG_BRB1_PAUSE_IN_EN 0x100c8
+/* [RW 1] output enable for RX BRB1 LP IF */
+#define NIG_REG_BRB_LB_OUT_EN 0x10100
+/* [WB_W 82] Debug packet to LP from RBC; Data spelling:[63:0] data; 64]
+ error; [67:65]eop_bvalid; [68]eop; [69]sop; [70]port_id; 71]flush;
+ 72:73]-vnic_num; 81:74]-sideband_info */
+#define NIG_REG_DEBUG_PACKET_LB 0x10800
+/* [RW 1] Input enable for TX Debug packet */
+#define NIG_REG_EGRESS_DEBUG_IN_EN 0x100dc
+/* [RW 1] If 1 - egress drain mode for port0 is active. In this mode all
+ packets from PBFare not forwarded to the MAC and just deleted from FIFO.
+ First packet may be deleted from the middle. And last packet will be
+ always deleted till the end. */
+#define NIG_REG_EGRESS_DRAIN0_MODE 0x10060
+/* [RW 1] Output enable to EMAC0 */
+#define NIG_REG_EGRESS_EMAC0_OUT_EN 0x10120
+/* [RW 1] MAC configuration for packets of port0. If 1 - all packet outputs
+ to emac for port0; other way to bmac for port0 */
+#define NIG_REG_EGRESS_EMAC0_PORT 0x10058
+/* [RW 1] Input enable for TX PBF user packet port0 IF */
+#define NIG_REG_EGRESS_PBF0_IN_EN 0x100cc
+/* [RW 1] Input enable for TX PBF user packet port1 IF */
+#define NIG_REG_EGRESS_PBF1_IN_EN 0x100d0
+/* [RW 1] Input enable for TX UMP management packet port0 IF */
+#define NIG_REG_EGRESS_UMP0_IN_EN 0x100d4
+/* [RW 1] Input enable for RX_EMAC0 IF */
+#define NIG_REG_EMAC0_IN_EN 0x100a4
+/* [RW 1] output enable for TX EMAC pause port 0 IF */
+#define NIG_REG_EMAC0_PAUSE_OUT_EN 0x10118
+/* [R 1] status from emac0. This bit is set when MDINT from either the
+ EXT_MDINT pin or from the Copper PHY is driven low. This condition must
+ be cleared in the attached PHY device that is driving the MINT pin. */
+#define NIG_REG_EMAC0_STATUS_MISC_MI_INT 0x10494
+/* [WB 48] This address space contains BMAC0 registers. The BMAC registers
+ are described in appendix A. In order to access the BMAC0 registers; the
+ base address; NIG_REGISTERS_INGRESS_BMAC0_MEM; Offset: 0x10c00; should be
+ added to each BMAC register offset */
+#define NIG_REG_INGRESS_BMAC0_MEM 0x10c00
+/* [WB 48] This address space contains BMAC1 registers. The BMAC registers
+ are described in appendix A. In order to access the BMAC0 registers; the
+ base address; NIG_REGISTERS_INGRESS_BMAC1_MEM; Offset: 0x11000; should be
+ added to each BMAC register offset */
+#define NIG_REG_INGRESS_BMAC1_MEM 0x11000
+/* [R 1] FIFO empty in EOP descriptor FIFO of LP in NIG_RX_EOP */
+#define NIG_REG_INGRESS_EOP_LB_EMPTY 0x104e0
+/* [RW 17] Debug only. RX_EOP_DSCR_lb_FIFO in NIG_RX_EOP. Data
+ packet_length[13:0]; mac_error[14]; trunc_error[15]; parity[16] */
+#define NIG_REG_INGRESS_EOP_LB_FIFO 0x104e4
+/* [RW 27] 0 - must be active for Everest A0; 1- for Everest B0 when latch
+ logic for interrupts must be used. Enable per bit of interrupt of
+ ~latch_status.latch_status */
+#define NIG_REG_LATCH_BC_0 0x16210
+/* [RW 27] Latch for each interrupt from Unicore.b[0]
+ status_emac0_misc_mi_int; b[1] status_emac0_misc_mi_complete;
+ b[2]status_emac0_misc_cfg_change; b[3]status_emac0_misc_link_status;
+ b[4]status_emac0_misc_link_change; b[5]status_emac0_misc_attn;
+ b[6]status_serdes0_mac_crs; b[7]status_serdes0_autoneg_complete;
+ b[8]status_serdes0_fiber_rxact; b[9]status_serdes0_link_status;
+ b[10]status_serdes0_mr_page_rx; b[11]status_serdes0_cl73_an_complete;
+ b[12]status_serdes0_cl73_mr_page_rx; b[13]status_serdes0_rx_sigdet;
+ b[14]status_xgxs0_remotemdioreq; b[15]status_xgxs0_link10g;
+ b[16]status_xgxs0_autoneg_complete; b[17]status_xgxs0_fiber_rxact;
+ b[21:18]status_xgxs0_link_status; b[22]status_xgxs0_mr_page_rx;
+ b[23]status_xgxs0_cl73_an_complete; b[24]status_xgxs0_cl73_mr_page_rx;
+ b[25]status_xgxs0_rx_sigdet; b[26]status_xgxs0_mac_crs */
+#define NIG_REG_LATCH_STATUS_0 0x18000
+/* [RW 1] led 10g for port 0 */
+#define NIG_REG_LED_10G_P0 0x10320
+/* [RW 1] led 10g for port 1 */
+#define NIG_REG_LED_10G_P1 0x10324
+/* [RW 1] Port0: This bit is set to enable the use of the
+ ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 field
+ defined below. If this bit is cleared; then the blink rate will be about
+ 8Hz. */
+#define NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 0x10318
+/* [RW 12] Port0: Specifies the period of each blink cycle (on + off) for
+ Traffic LED in milliseconds. Must be a non-zero value. This 12-bit field
+ is reset to 0x080; giving a default blink period of approximately 8Hz. */
+#define NIG_REG_LED_CONTROL_BLINK_RATE_P0 0x10310
+/* [RW 1] Port0: If set along with the
+ ~nig_registers_led_control_override_traffic_p0.led_control_override_traffic_p0
+ bit and ~nig_registers_led_control_traffic_p0.led_control_traffic_p0 LED
+ bit; the Traffic LED will blink with the blink rate specified in
+ ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and
+ ~nig_registers_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0
+ fields. */
+#define NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 0x10308
+/* [RW 1] Port0: If set overrides hardware control of the Traffic LED. The
+ Traffic LED will then be controlled via bit ~nig_registers_
+ led_control_traffic_p0.led_control_traffic_p0 and bit
+ ~nig_registers_led_control_blink_traffic_p0.led_control_blink_traffic_p0 */
+#define NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 0x102f8
+/* [RW 1] Port0: If set along with the led_control_override_trafic_p0 bit;
+ turns on the Traffic LED. If the led_control_blink_traffic_p0 bit is also
+ set; the LED will blink with blink rate specified in
+ ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and
+ ~nig_regsters_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0
+ fields. */
+#define NIG_REG_LED_CONTROL_TRAFFIC_P0 0x10300
+/* [RW 4] led mode for port0: 0 MAC; 1-3 PHY1; 4 MAC2; 5-7 PHY4; 8-MAC3;
+ 9-11PHY7; 12 MAC4; 13-15 PHY10; */
+#define NIG_REG_LED_MODE_P0 0x102f0
+/* [RW 3] for port0 enable for llfc ppp and pause. b0 - brb1 enable; b1-
+ tsdm enable; b2- usdm enable */
+#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 0x16070
+#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 0x16074
+/* [RW 1] SAFC enable for port0. This register may get 1 only when
+ ~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same
+ port */
+#define NIG_REG_LLFC_ENABLE_0 0x16208
+#define NIG_REG_LLFC_ENABLE_1 0x1620c
+/* [RW 16] classes are high-priority for port0 */
+#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 0x16058
+#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 0x1605c
+/* [RW 16] classes are low-priority for port0 */
+#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 0x16060
+#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 0x16064
+/* [RW 1] Output enable of message to LLFC BMAC IF for port0 */
+#define NIG_REG_LLFC_OUT_EN_0 0x160c8
+#define NIG_REG_LLFC_OUT_EN_1 0x160cc
+#define NIG_REG_LLH0_ACPI_PAT_0_CRC 0x1015c
+#define NIG_REG_LLH0_ACPI_PAT_6_LEN 0x10154
+#define NIG_REG_LLH0_BRB1_DRV_MASK 0x10244
+#define NIG_REG_LLH0_BRB1_DRV_MASK_MF 0x16048
+/* [RW 1] send to BRB1 if no match on any of RMP rules. */
+#define NIG_REG_LLH0_BRB1_NOT_MCP 0x1025c
+/* [RW 2] Determine the classification participants. 0: no classification.1:
+ classification upon VLAN id. 2: classification upon MAC address. 3:
+ classification upon both VLAN id & MAC addr. */
+#define NIG_REG_LLH0_CLS_TYPE 0x16080
+/* [RW 32] cm header for llh0 */
+#define NIG_REG_LLH0_CM_HEADER 0x1007c
+#define NIG_REG_LLH0_DEST_IP_0_1 0x101dc
+#define NIG_REG_LLH0_DEST_MAC_0_0 0x101c0
+/* [RW 16] destination TCP address 1. The LLH will look for this address in
+ all incoming packets. */
+#define NIG_REG_LLH0_DEST_TCP_0 0x10220
+/* [RW 16] destination UDP address 1 The LLH will look for this address in
+ all incoming packets. */
+#define NIG_REG_LLH0_DEST_UDP_0 0x10214
+#define NIG_REG_LLH0_ERROR_MASK 0x1008c
+/* [RW 8] event id for llh0 */
+#define NIG_REG_LLH0_EVENT_ID 0x10084
+#define NIG_REG_LLH0_FUNC_EN 0x160fc
+#define NIG_REG_LLH0_FUNC_MEM 0x16180
+#define NIG_REG_LLH0_FUNC_MEM_ENABLE 0x16140
+#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100
+/* [RW 1] Determine the IP version to look for in
+ ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */
+#define NIG_REG_LLH0_IPV4_IPV6_0 0x10208
+/* [RW 1] t bit for llh0 */
+#define NIG_REG_LLH0_T_BIT 0x10074
+/* [RW 12] VLAN ID 1. In case of VLAN packet the LLH will look for this ID. */
+#define NIG_REG_LLH0_VLAN_ID_0 0x1022c
+/* [RW 8] init credit counter for port0 in LLH */
+#define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554
+#define NIG_REG_LLH0_XCM_MASK 0x10130
+#define NIG_REG_LLH1_BRB1_DRV_MASK 0x10248
+/* [RW 1] send to BRB1 if no match on any of RMP rules. */
+#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc
+/* [RW 2] Determine the classification participants. 0: no classification.1:
+ classification upon VLAN id. 2: classification upon MAC address. 3:
+ classification upon both VLAN id & MAC addr. */
+#define NIG_REG_LLH1_CLS_TYPE 0x16084
+/* [RW 32] cm header for llh1 */
+#define NIG_REG_LLH1_CM_HEADER 0x10080
+#define NIG_REG_LLH1_ERROR_MASK 0x10090
+/* [RW 8] event id for llh1 */
+#define NIG_REG_LLH1_EVENT_ID 0x10088
+#define NIG_REG_LLH1_FUNC_MEM 0x161c0
+#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160
+#define NIG_REG_LLH1_FUNC_MEM_SIZE 16
+/* [RW 1] When this bit is set; the LLH will classify the packet before
+ * sending it to the BRB or calculating WoL on it. This bit controls port 1
+ * only. The legacy llh_multi_function_mode bit controls port 0. */
+#define NIG_REG_LLH1_MF_MODE 0x18614
+/* [RW 8] init credit counter for port1 in LLH */
+#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564
+#define NIG_REG_LLH1_XCM_MASK 0x10134
+/* [RW 1] When this bit is set; the LLH will expect all packets to be with
+ e1hov */
+#define NIG_REG_LLH_E1HOV_MODE 0x160d8
+/* [RW 1] When this bit is set; the LLH will classify the packet before
+ sending it to the BRB or calculating WoL on it. */
+#define NIG_REG_LLH_MF_MODE 0x16024
+#define NIG_REG_MASK_INTERRUPT_PORT0 0x10330
+#define NIG_REG_MASK_INTERRUPT_PORT1 0x10334
+/* [RW 1] Output signal from NIG to EMAC0. When set enables the EMAC0 block. */
+#define NIG_REG_NIG_EMAC0_EN 0x1003c
+/* [RW 1] Output signal from NIG to EMAC1. When set enables the EMAC1 block. */
+#define NIG_REG_NIG_EMAC1_EN 0x10040
+/* [RW 1] Output signal from NIG to TX_EMAC0. When set indicates to the
+ EMAC0 to strip the CRC from the ingress packets. */
+#define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC 0x10044
+/* [R 32] Interrupt register #0 read */
+#define NIG_REG_NIG_INT_STS_0 0x103b0
+#define NIG_REG_NIG_INT_STS_1 0x103c0
+/* [R 32] Legacy E1 and E1H location for parity error mask register. */
+#define NIG_REG_NIG_PRTY_MASK 0x103dc
+/* [RW 32] Parity mask register #0 read/write */
+#define NIG_REG_NIG_PRTY_MASK_0 0x183c8
+#define NIG_REG_NIG_PRTY_MASK_1 0x183d8
+/* [R 32] Legacy E1 and E1H location for parity error status register. */
+#define NIG_REG_NIG_PRTY_STS 0x103d0
+/* [R 32] Parity register #0 read */
+#define NIG_REG_NIG_PRTY_STS_0 0x183bc
+#define NIG_REG_NIG_PRTY_STS_1 0x183cc
+/* [R 32] Legacy E1 and E1H location for parity error status clear register. */
+#define NIG_REG_NIG_PRTY_STS_CLR 0x103d4
+/* [RC 32] Parity register #0 read clear */
+#define NIG_REG_NIG_PRTY_STS_CLR_0 0x183c0
+#define NIG_REG_NIG_PRTY_STS_CLR_1 0x183d0
+#define MCPR_IMC_COMMAND_ENABLE (1L<<31)
+#define MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT 16
+#define MCPR_IMC_COMMAND_OPERATION_BITSHIFT 28
+#define MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT 8
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define NIG_REG_P0_HDRS_AFTER_BASIC 0x18038
+/* [RW 1] HW PFC enable bit. Set this bit to enable the PFC functionality in
+ * the NIG. Other flow control modes such as PAUSE and SAFC/LLFC should be
+ * disabled when this bit is set. */
+#define NIG_REG_P0_HWPFC_ENABLE 0x18078
+#define NIG_REG_P0_LLH_FUNC_MEM2 0x18480
+#define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440
+/* [RW 1] Input enable for RX MAC interface. */
+#define NIG_REG_P0_MAC_IN_EN 0x185ac
+/* [RW 1] Output enable for TX MAC interface */
+#define NIG_REG_P0_MAC_OUT_EN 0x185b0
+/* [RW 1] Output enable for TX PAUSE signal to the MAC. */
+#define NIG_REG_P0_MAC_PAUSE_OUT_EN 0x185b4
+/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
+ * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
+ * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
+ * priority field is extracted from the outer-most VLAN in receive packet.
+ * Only COS 0 and COS 1 are supported in E2. */
+#define NIG_REG_P0_PKT_PRIORITY_TO_COS 0x18054
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
+ * priority is mapped to COS 0 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS0_PRIORITY_MASK 0x18058
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
+ * priority is mapped to COS 1 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS1_PRIORITY_MASK 0x1805c
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
+ * priority is mapped to COS 2 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS2_PRIORITY_MASK 0x186b0
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 3. A
+ * priority is mapped to COS 3 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS3_PRIORITY_MASK 0x186b4
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 4. A
+ * priority is mapped to COS 4 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS4_PRIORITY_MASK 0x186b8
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 5. A
+ * priority is mapped to COS 5 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS5_PRIORITY_MASK 0x186bc
+/* [R 1] RX FIFO for receiving data from MAC is empty. */
+/* [RW 15] Specify which of the credit registers the client is to be mapped
+ * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For
+ * clients that are not subject to WFQ credit blocking - their
+ * specifications here are not used. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP 0x180f0
+/* [RW 32] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x18688
+/* [RW 4] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x1868c
+/* [RW 5] Specify whether the client competes directly in the strict
+ * priority arbiter. The bits are mapped according to client ID (client IDs
+ * are defined in tx_arb_priority_client). Default value is set to enable
+ * strict priorities for clients 0-2 -- management and debug traffic. */
+#define NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT 0x180e8
+/* [RW 5] Specify whether the client is subject to WFQ credit blocking. The
+ * bits are mapped according to client ID (client IDs are defined in
+ * tx_arb_priority_client). Default value is 0 for not using WFQ credit
+ * blocking. */
+#define NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x180ec
+/* [RW 32] Specify the upper bound that credit register 0 is allowed to
+ * reach. */
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0 0x1810c
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1 0x18110
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2 0x18114
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3 0x18118
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4 0x1811c
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5 0x186a0
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6 0x186a4
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7 0x186a8
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8 0x186ac
+/* [RW 32] Specify the weight (in bytes) to be added to credit register 0
+ * when it is time to increment. */
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0 0x180f8
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1 0x180fc
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2 0x18100
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3 0x18104
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4 0x18108
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5 0x18690
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6 0x18694
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7 0x18698
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8 0x1869c
+/* [RW 12] Specify the number of strict priority arbitration slots between
+ * two round-robin arbitration slots to avoid starvation. A value of 0 means
+ * no strict priority cycles - the strict priority with anti-starvation
+ * arbiter becomes a round-robin arbiter. */
+#define NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS 0x180f4
+/* [RW 15] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. Priority 0 is the highest priority. Bits [2:0]
+ * are for priority 0 client; bits [14:12] are for priority 4 client. The
+ * clients are assigned the following IDs: 0-management; 1-debug traffic
+ * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1
+ * traffic. The reset value[14:0] is set to 0x4688 (15'b100_011_010_001_000)
+ * for management at priority 0; debug traffic at priorities 1 and 2; COS0
+ * traffic at priority 3; and COS1 traffic at priority 4. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT 0x180e4
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define NIG_REG_P1_HDRS_AFTER_BASIC 0x1818c
+#define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0
+#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460
+/* [RW 32] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. This register specifies bits 31:0 of the 36-bit
+ * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ * client; bits [35-32] are for priority 8 client. The clients are assigned
+ * the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ * accommodate the 9 input clients to ETS arbiter. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB 0x18680
+/* [RW 4] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. This register specifies bits 35:32 of the 36-bit
+ * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ * client; bits [35-32] are for priority 8 client. The clients are assigned
+ * the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ * accommodate the 9 input clients to ETS arbiter. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684
+#define NIG_REG_P1_MAC_IN_EN 0x185c0
+/* [RW 1] Output enable for TX MAC interface */
+#define NIG_REG_P1_MAC_OUT_EN 0x185c4
+/* [RW 1] Output enable for TX PAUSE signal to the MAC. */
+#define NIG_REG_P1_MAC_PAUSE_OUT_EN 0x185c8
+/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
+ * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
+ * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
+ * priority field is extracted from the outer-most VLAN in receive packet.
+ * Only COS 0 and COS 1 are supported in E2. */
+#define NIG_REG_P1_PKT_PRIORITY_TO_COS 0x181a8
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
+ * priority is mapped to COS 0 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS0_PRIORITY_MASK 0x181ac
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
+ * priority is mapped to COS 1 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS1_PRIORITY_MASK 0x181b0
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
+ * priority is mapped to COS 2 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS2_PRIORITY_MASK 0x186f8
+/* [R 1] RX FIFO for receiving data from MAC is empty. */
+#define NIG_REG_P1_RX_MACFIFO_EMPTY 0x1858c
+/* [R 1] TLLH FIFO is empty. */
+#define NIG_REG_P1_TLLH_FIFO_EMPTY 0x18338
+/* [RW 32] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. Note also that there are
+ * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only
+ * credit registers 0-5 are valid. This register should be configured
+ * appropriately before enabling WFQ. */
+#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x186e8
+/* [RW 4] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. Note also that there are
+ * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only
+ * credit registers 0-5 are valid. This register should be configured
+ * appropriately before enabling WFQ. */
+#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x186ec
+/* [RW 9] Specify whether the client competes directly in the strict
+ * priority arbiter. The bits are mapped according to client ID (client IDs
+ * are defined in tx_arb_priority_client2): 0-management; 1-debug traffic
+ * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1
+ * traffic; 5-COS2 traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic.
+ * Default value is set to enable strict priorities for all clients. */
+#define NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT 0x18234
+/* [RW 9] Specify whether the client is subject to WFQ credit blocking. The
+ * bits are mapped according to client ID (client IDs are defined in
+ * tx_arb_priority_client2): 0-management; 1-debug traffic from this port;
+ * 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2
+ * traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. Default value is
+ * 0 for not using WFQ credit blocking. */
+#define NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x18238
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 0x18258
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 0x1825c
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 0x18260
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 0x18264
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 0x18268
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 0x186f4
+/* [RW 32] Specify the weight (in bytes) to be added to credit register 0
+ * when it is time to increment. */
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 0x18244
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 0x18248
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 0x1824c
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 0x18250
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 0x18254
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 0x186f0
+/* [RW 12] Specify the number of strict priority arbitration slots between
+ two round-robin arbitration slots to avoid starvation. A value of 0 means
+ no strict priority cycles - the strict priority with anti-starvation
+ arbiter becomes a round-robin arbiter. */
+#define NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS 0x18240
+/* [RW 32] Specify the client number to be assigned to each priority of the
+ strict priority arbiter. This register specifies bits 31:0 of the 36-bit
+ value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ client; bits [35-32] are for priority 8 client. The clients are assigned
+ the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ accommodate the 9 input clients to ETS arbiter. Note that this register
+ is the same as the one for port 0, except that port 1 only has COS 0-2
+ traffic. There is no traffic for COS 3-5 of port 1. */
+#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB 0x186e0
+/* [RW 4] Specify the client number to be assigned to each priority of the
+ strict priority arbiter. This register specifies bits 35:32 of the 36-bit
+ value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ client; bits [35-32] are for priority 8 client. The clients are assigned
+ the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ accommodate the 9 input clients to ETS arbiter. Note that this register
+ is the same as the one for port 0, except that port 1 only has COS 0-2
+ traffic. There is no traffic for COS 3-5 of port 1. */
+#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4
+/* [R 1] TX FIFO for transmitting data to MAC is empty. */
+#define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594
+/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets
+ forwarded to the host. */
+#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8
+/* [RW 32] Specify the upper bound that credit register 0 is allowed to
+ * reach. */
+/* [RW 1] Pause enable for port0. This register may get 1 only when
+ ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
+ port */
+#define NIG_REG_PAUSE_ENABLE_0 0x160c0
+#define NIG_REG_PAUSE_ENABLE_1 0x160c4
+/* [RW 1] Input enable for RX PBF LP IF */
+#define NIG_REG_PBF_LB_IN_EN 0x100b4
+/* [RW 1] Value of this register will be transmitted to port swap when
+ ~nig_registers_strap_override.strap_override =1 */
+#define NIG_REG_PORT_SWAP 0x10394
+/* [RW 1] PPP enable for port0. This register may get 1 only when
+ * ~safc_enable.safc_enable = 0 and pause_enable.pause_enable =0 for the
+ * same port */
+#define NIG_REG_PPP_ENABLE_0 0x160b0
+#define NIG_REG_PPP_ENABLE_1 0x160b4
+/* [RW 1] output enable for RX parser descriptor IF */
+#define NIG_REG_PRS_EOP_OUT_EN 0x10104
+/* [RW 1] Input enable for RX parser request IF */
+#define NIG_REG_PRS_REQ_IN_EN 0x100b8
+/* [RW 5] control to serdes - CL45 DEVAD */
+#define NIG_REG_SERDES0_CTRL_MD_DEVAD 0x10370
+/* [RW 1] control to serdes; 0 - clause 45; 1 - clause 22 */
+#define NIG_REG_SERDES0_CTRL_MD_ST 0x1036c
+/* [RW 5] control to serdes - CL22 PHY_ADD and CL45 PRTAD */
+#define NIG_REG_SERDES0_CTRL_PHY_ADDR 0x10374
+/* [R 1] status from serdes0 that inputs to interrupt logic of link status */
+#define NIG_REG_SERDES0_STATUS_LINK_STATUS 0x10578
+/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
+ for port0 */
+#define NIG_REG_STAT0_BRB_DISCARD 0x105f0
+/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure
+ for port0 */
+#define NIG_REG_STAT0_BRB_TRUNCATE 0x105f8
+/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
+ between 1024 and 1522 bytes for port0 */
+#define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750
+/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
+ between 1523 bytes and above for port0 */
+#define NIG_REG_STAT0_EGRESS_MAC_PKT1 0x10760
+/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
+ for port1 */
+#define NIG_REG_STAT1_BRB_DISCARD 0x10628
+/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that
+ between 1024 and 1522 bytes for port1 */
+#define NIG_REG_STAT1_EGRESS_MAC_PKT0 0x107a0
+/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that
+ between 1523 bytes and above for port1 */
+#define NIG_REG_STAT1_EGRESS_MAC_PKT1 0x107b0
+/* [WB_R 64] Rx statistics : User octets received for LP */
+#define NIG_REG_STAT2_BRB_OCTET 0x107e0
+#define NIG_REG_STATUS_INTERRUPT_PORT0 0x10328
+#define NIG_REG_STATUS_INTERRUPT_PORT1 0x1032c
+/* [RW 1] port swap mux selection. If this register equal to 0 then port
+ swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then
+ ort swap is equal to ~nig_registers_port_swap.port_swap */
+#define NIG_REG_STRAP_OVERRIDE 0x10398
+/* [RW 1] output enable for RX_XCM0 IF */
+#define NIG_REG_XCM0_OUT_EN 0x100f0
+/* [RW 1] output enable for RX_XCM1 IF */
+#define NIG_REG_XCM1_OUT_EN 0x100f4
+/* [RW 1] control to xgxs - remote PHY in-band MDIO */
+#define NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST 0x10348
+/* [RW 5] control to xgxs - CL45 DEVAD */
+#define NIG_REG_XGXS0_CTRL_MD_DEVAD 0x1033c
+/* [RW 1] control to xgxs; 0 - clause 45; 1 - clause 22 */
+#define NIG_REG_XGXS0_CTRL_MD_ST 0x10338
+/* [RW 5] control to xgxs - CL22 PHY_ADD and CL45 PRTAD */
+#define NIG_REG_XGXS0_CTRL_PHY_ADDR 0x10340
+/* [R 1] status from xgxs0 that inputs to interrupt logic of link10g. */
+#define NIG_REG_XGXS0_STATUS_LINK10G 0x10680
+/* [R 4] status from xgxs0 that inputs to interrupt logic of link status */
+#define NIG_REG_XGXS0_STATUS_LINK_STATUS 0x10684
+/* [RW 2] selection for XGXS lane of port 0 in NIG_MUX block */
+#define NIG_REG_XGXS_LANE_SEL_P0 0x102e8
+/* [RW 1] selection for port0 for NIG_MUX block : 0 = SerDes; 1 = XGXS */
+#define NIG_REG_XGXS_SERDES0_MODE_SEL 0x102e0
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT (0x1<<0)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS (0x1<<9)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G (0x1<<15)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS (0xf<<18)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */
+#define PBF_REG_COS0_UPPER_BOUND 0x15c05c
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter
+ * of port 0. */
+#define PBF_REG_COS0_UPPER_BOUND_P0 0x15c2cc
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter
+ * of port 1. */
+#define PBF_REG_COS0_UPPER_BOUND_P1 0x15c2e4
+/* [RW 31] The weight of COS0 in the ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT 0x15c054
+/* [RW 31] The weight of COS0 in port 0 ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT_P0 0x15c2a8
+/* [RW 31] The weight of COS0 in port 1 ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT_P1 0x15c2c0
+/* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */
+#define PBF_REG_COS1_UPPER_BOUND 0x15c060
+/* [RW 31] The weight of COS1 in the ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT 0x15c058
+/* [RW 31] The weight of COS1 in port 0 ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT_P0 0x15c2ac
+/* [RW 31] The weight of COS1 in port 1 ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT_P1 0x15c2c4
+/* [RW 31] The weight of COS2 in port 0 ETS command arbiter. */
+#define PBF_REG_COS2_WEIGHT_P0 0x15c2b0
+/* [RW 31] The weight of COS2 in port 1 ETS command arbiter. */
+#define PBF_REG_COS2_WEIGHT_P1 0x15c2c8
+/* [RW 31] The weight of COS3 in port 0 ETS command arbiter. */
+#define PBF_REG_COS3_WEIGHT_P0 0x15c2b4
+/* [RW 31] The weight of COS4 in port 0 ETS command arbiter. */
+#define PBF_REG_COS4_WEIGHT_P0 0x15c2b8
+/* [RW 31] The weight of COS5 in port 0 ETS command arbiter. */
+#define PBF_REG_COS5_WEIGHT_P0 0x15c2bc
+/* [R 11] Current credit for the LB queue in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_LB_Q 0x140338
+/* [R 11] Current credit for queue 0 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_Q0 0x14033c
+/* [R 11] Current credit for queue 1 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_Q1 0x140340
+/* [RW 1] Disable processing further tasks from port 0 (after ending the
+ current task in process). */
+#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c
+/* [RW 1] Disable processing further tasks from port 1 (after ending the
+ current task in process). */
+#define PBF_REG_DISABLE_NEW_TASK_PROC_P1 0x140060
+/* [RW 1] Disable processing further tasks from port 4 (after ending the
+ current task in process). */
+#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c
+#define PBF_REG_DISABLE_PF 0x1402e8
+/* [RW 18] For port 0: For each client that is subject to WFQ (the
+ * corresponding bit is 1); indicates to which of the credit registers this
+ * client is mapped. For clients which are not credit blocked; their mapping
+ * is dont care. */
+#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0 0x15c288
+/* [RW 9] For port 1: For each client that is subject to WFQ (the
+ * corresponding bit is 1); indicates to which of the credit registers this
+ * client is mapped. For clients which are not credit blocked; their mapping
+ * is dont care. */
+#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1 0x15c28c
+/* [RW 6] For port 0: Bit per client to indicate if the client competes in
+ * the strict priority arbiter directly (corresponding bit = 1); or first
+ * goes to the RR arbiter (corresponding bit = 0); and then competes in the
+ * lowest priority in the strict-priority arbiter. */
+#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 0x15c278
+/* [RW 3] For port 1: Bit per client to indicate if the client competes in
+ * the strict priority arbiter directly (corresponding bit = 1); or first
+ * goes to the RR arbiter (corresponding bit = 0); and then competes in the
+ * lowest priority in the strict-priority arbiter. */
+#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 0x15c27c
+/* [RW 6] For port 0: Bit per client to indicate if the client is subject to
+ * WFQ credit blocking (corresponding bit = 1). */
+#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 0x15c280
+/* [RW 3] For port 0: Bit per client to indicate if the client is subject to
+ * WFQ credit blocking (corresponding bit = 1). */
+#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 0x15c284
+/* [RW 16] For port 0: The number of strict priority arbitration slots
+ * between 2 RR arbitration slots. A value of 0 means no strict priority
+ * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR
+ * arbiter. */
+#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 0x15c2a0
+/* [RW 16] For port 1: The number of strict priority arbitration slots
+ * between 2 RR arbitration slots. A value of 0 means no strict priority
+ * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR
+ * arbiter. */
+#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 0x15c2a4
+/* [RW 18] For port 0: Indicates which client is connected to each priority
+ * in the strict-priority arbiter. Priority 0 is the highest priority, and
+ * priority 5 is the lowest; to which the RR output is connected to (this is
+ * not configurable). */
+#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 0x15c270
+/* [RW 9] For port 1: Indicates which client is connected to each priority
+ * in the strict-priority arbiter. Priority 0 is the highest priority, and
+ * priority 5 is the lowest; to which the RR output is connected to (this is
+ * not configurable). */
+#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 0x15c274
+/* [RW 1] Indicates that ETS is performed between the COSes in the command
+ * arbiter. If reset strict priority w/ anti-starvation will be performed
+ * w/o WFQ. */
+#define PBF_REG_ETS_ENABLED 0x15c050
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
+#define PBF_REG_HDRS_AFTER_TAG_0 0x15c0b8
+/* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest
+ * priority in the command arbiter. */
+#define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c
+#define PBF_REG_IF_ENABLE_REG 0x140044
+/* [RW 1] Init bit. When set the initial credits are copied to the credit
+ registers (except the port credits). Should be set and then reset after
+ the configuration of the block has ended. */
+#define PBF_REG_INIT 0x140000
+/* [RW 11] Initial credit for the LB queue in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_LB_Q 0x15c248
+/* [RW 11] Initial credit for queue 0 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_Q0 0x15c230
+/* [RW 11] Initial credit for queue 1 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_Q1 0x15c234
+/* [RW 1] Init bit for port 0. When set the initial credit of port 0 is
+ copied to the credit register. Should be set and then reset after the
+ configuration of the port has ended. */
+#define PBF_REG_INIT_P0 0x140004
+/* [RW 1] Init bit for port 1. When set the initial credit of port 1 is
+ copied to the credit register. Should be set and then reset after the
+ configuration of the port has ended. */
+#define PBF_REG_INIT_P1 0x140008
+/* [RW 1] Init bit for port 4. When set the initial credit of port 4 is
+ copied to the credit register. Should be set and then reset after the
+ configuration of the port has ended. */
+#define PBF_REG_INIT_P4 0x14000c
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * the LB queue. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q 0x140354
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * queue 0. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 0x140358
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * queue 1. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 0x14035c
+/* [RW 1] Enable for mac interface 0. */
+#define PBF_REG_MAC_IF0_ENABLE 0x140030
+/* [RW 1] Enable for mac interface 1. */
+#define PBF_REG_MAC_IF1_ENABLE 0x140034
+/* [RW 1] Enable for the loopback interface. */
+#define PBF_REG_MAC_LB_ENABLE 0x140040
+/* [RW 6] Bit-map indicating which headers must appear in the packet */
+#define PBF_REG_MUST_HAVE_HDRS 0x15c0c4
+/* [RW 16] The number of strict priority arbitration slots between 2 RR
+ * arbitration slots. A value of 0 means no strict priority cycles; i.e. the
+ * strict-priority w/ anti-starvation arbiter is a RR arbiter. */
+#define PBF_REG_NUM_STRICT_ARB_SLOTS 0x15c064
+/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause
+ not suppoterd. */
+#define PBF_REG_P0_ARB_THRSH 0x1400e4
+/* [R 11] Current credit for port 0 in the tx port buffers in 16 byte lines. */
+#define PBF_REG_P0_CREDIT 0x140200
+/* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte
+ lines. */
+#define PBF_REG_P0_INIT_CRD 0x1400d0
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 0. Reset upon init. */
+#define PBF_REG_P0_INTERNAL_CRD_FREED_CNT 0x140308
+/* [R 1] Removed for E3 B0 - Indication that pause is enabled for port 0. */
+#define PBF_REG_P0_PAUSE_ENABLE 0x140014
+/* [R 8] Removed for E3 B0 - Number of tasks in port 0 task queue. */
+#define PBF_REG_P0_TASK_CNT 0x140204
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 0. Reset upon init. */
+#define PBF_REG_P0_TQ_LINES_FREED_CNT 0x1402f0
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 0. */
+#define PBF_REG_P0_TQ_OCCUPANCY 0x1402fc
+/* [R 11] Removed for E3 B0 - Current credit for port 1 in the tx port
+ * buffers in 16 byte lines. */
+#define PBF_REG_P1_CREDIT 0x140208
+/* [R 11] Removed for E3 B0 - Initial credit for port 0 in the tx port
+ * buffers in 16 byte lines. */
+#define PBF_REG_P1_INIT_CRD 0x1400d4
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 1. Reset upon init. */
+#define PBF_REG_P1_INTERNAL_CRD_FREED_CNT 0x14030c
+/* [R 8] Removed for E3 B0 - Number of tasks in port 1 task queue. */
+#define PBF_REG_P1_TASK_CNT 0x14020c
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 1. Reset upon init. */
+#define PBF_REG_P1_TQ_LINES_FREED_CNT 0x1402f4
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 1. */
+#define PBF_REG_P1_TQ_OCCUPANCY 0x140300
+/* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */
+#define PBF_REG_P4_CREDIT 0x140210
+/* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte
+ lines. */
+#define PBF_REG_P4_INIT_CRD 0x1400e0
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 4. Reset upon init. */
+#define PBF_REG_P4_INTERNAL_CRD_FREED_CNT 0x140310
+/* [R 8] Removed for E3 B0 - Number of tasks in port 4 task queue. */
+#define PBF_REG_P4_TASK_CNT 0x140214
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 4. Reset upon init. */
+#define PBF_REG_P4_TQ_LINES_FREED_CNT 0x1402f8
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 4. */
+#define PBF_REG_P4_TQ_OCCUPANCY 0x140304
+/* [RW 5] Interrupt mask register #0 read/write */
+#define PBF_REG_PBF_INT_MASK 0x1401d4
+/* [R 5] Interrupt register #0 read */
+#define PBF_REG_PBF_INT_STS 0x1401c8
+/* [RW 20] Parity mask register #0 read/write */
+#define PBF_REG_PBF_PRTY_MASK 0x1401e4
+/* [RC 20] Parity register #0 read clear */
+#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc
+/* [RW 16] The Ethernet type value for L2 tag 0 */
+#define PBF_REG_TAG_ETHERTYPE_0 0x15c090
+/* [RW 4] The length of the info field for L2 tag 0. The length is between
+ * 2B and 14B; in 2B granularity */
+#define PBF_REG_TAG_LEN_0 0x15c09c
+/* [R 32] Cyclic counter for number of 8 byte lines freed from the LB task
+ * queue. Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_LB_Q 0x14038c
+/* [R 32] Cyclic counter for number of 8 byte lines freed from the task
+ * queue 0. Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_Q0 0x140390
+/* [R 32] Cyclic counter for number of 8 byte lines freed from task queue 1.
+ * Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_Q1 0x140394
+/* [R 13] Number of 8 bytes lines occupied in the task queue of the LB
+ * queue. */
+#define PBF_REG_TQ_OCCUPANCY_LB_Q 0x1403a8
+/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 0. */
+#define PBF_REG_TQ_OCCUPANCY_Q0 0x1403ac
+/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */
+#define PBF_REG_TQ_OCCUPANCY_Q1 0x1403b0
+#define PB_REG_CONTROL 0
+/* [RW 2] Interrupt mask register #0 read/write */
+#define PB_REG_PB_INT_MASK 0x28
+/* [R 2] Interrupt register #0 read */
+#define PB_REG_PB_INT_STS 0x1c
+/* [RW 4] Parity mask register #0 read/write */
+#define PB_REG_PB_PRTY_MASK 0x38
+/* [R 4] Parity register #0 read */
+#define PB_REG_PB_PRTY_STS 0x2c
+/* [RC 4] Parity register #0 read clear */
+#define PB_REG_PB_PRTY_STS_CLR 0x30
+#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN (0x1<<6)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN (0x1<<7)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN (0x1<<4)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN (0x1<<3)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN (0x1<<5)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN (0x1<<2)
+/* [R 8] Config space A attention dirty bits. Each bit indicates that the
+ * corresponding PF generates config space A attention. Set by PXP. Reset by
+ * MCP writing 1 to icfg_space_a_request_clr. Note: register contains bits
+ * from both paths. */
+#define PGLUE_B_REG_CFG_SPACE_A_REQUEST 0x9010
+/* [R 8] Config space B attention dirty bits. Each bit indicates that the
+ * corresponding PF generates config space B attention. Set by PXP. Reset by
+ * MCP writing 1 to icfg_space_b_request_clr. Note: register contains bits
+ * from both paths. */
+#define PGLUE_B_REG_CFG_SPACE_B_REQUEST 0x9014
+/* [RW 1] Type A PF enable inbound interrupt table for CSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_CSDM_INB_INT_A_PF_ENABLE 0x9194
+/* [RW 18] Type B VF inbound interrupt table for CSDM: bits[17:9]-mask;
+ * its[8:0]-address. Bits [1:0] must be zero (DW resolution address). */
+#define PGLUE_B_REG_CSDM_INB_INT_B_VF 0x916c
+/* [RW 1] Type B VF enable inbound interrupt table for CSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_CSDM_INB_INT_B_VF_ENABLE 0x919c
+/* [RW 16] Start offset of CSDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_CSDM_START_OFFSET_A 0x9100
+/* [RW 16] Start offset of CSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_CSDM_START_OFFSET_B 0x9108
+/* [RW 5] VF Shift of CSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_CSDM_VF_SHIFT_B 0x9110
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_CSDM_ZONE_A_SIZE_PF 0x91ac
+/* [R 8] FLR request attention dirty bits for PFs 0 to 7. Each bit indicates
+ * that the FLR register of the corresponding PF was set. Set by PXP. Reset
+ * by MCP writing 1 to flr_request_pf_7_0_clr. Note: register contains bits
+ * from both paths. */
+#define PGLUE_B_REG_FLR_REQUEST_PF_7_0 0x9028
+/* [W 8] FLR request attention dirty bits clear for PFs 0 to 7. MCP writes 1
+ * to a bit in this register in order to clear the corresponding bit in
+ * flr_request_pf_7_0 register. Note: register contains bits from both
+ * paths. */
+#define PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR 0x9418
+/* [R 32] FLR request attention dirty bits for VFs 96 to 127. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_127_96_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_127_96 0x9024
+/* [R 32] FLR request attention dirty bits for VFs 0 to 31. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_31_0_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_31_0 0x9018
+/* [R 32] FLR request attention dirty bits for VFs 32 to 63. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_63_32_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_63_32 0x901c
+/* [R 32] FLR request attention dirty bits for VFs 64 to 95. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_95_64_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_95_64 0x9020
+/* [R 8] Each bit indicates an incorrect behavior in user RX interface. Bit
+ * 0 - Target memory read arrived with a correctable error. Bit 1 - Target
+ * memory read arrived with an uncorrectable error. Bit 2 - Configuration RW
+ * arrived with a correctable error. Bit 3 - Configuration RW arrived with
+ * an uncorrectable error. Bit 4 - Completion with Configuration Request
+ * Retry Status. Bit 5 - Expansion ROM access received with a write request.
+ * Bit 6 - Completion with pcie_rx_err of 0000; CMPL_STATUS of non-zero; and
+ * pcie_rx_last not asserted. Bit 7 - Completion with pcie_rx_err of 1010;
+ * and pcie_rx_last not asserted. */
+#define PGLUE_B_REG_INCORRECT_RCV_DETAILS 0x9068
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER 0x942c
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434
+#define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438
+/* [R 9] Interrupt register #0 read */
+#define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298
+/* [RC 9] Interrupt register #0 read clear */
+#define PGLUE_B_REG_PGLUE_B_INT_STS_CLR 0x929c
+/* [RW 2] Parity mask register #0 read/write */
+#define PGLUE_B_REG_PGLUE_B_PRTY_MASK 0x92b4
+/* [R 2] Parity register #0 read */
+#define PGLUE_B_REG_PGLUE_B_PRTY_STS 0x92a8
+/* [RC 2] Parity register #0 read clear */
+#define PGLUE_B_REG_PGLUE_B_PRTY_STS_CLR 0x92ac
+/* [R 13] Details of first request received with error. [2:0] - PFID. [3] -
+ * VF_VALID. [9:4] - VFID. [11:10] - Error Code - 0 - Indicates Completion
+ * Timeout of a User Tx non-posted request. 1 - unsupported request. 2 -
+ * completer abort. 3 - Illegal value for this field. [12] valid - indicates
+ * if there was a completion error since the last time this register was
+ * cleared. */
+#define PGLUE_B_REG_RX_ERR_DETAILS 0x9080
+/* [R 18] Details of first ATS Translation Completion request received with
+ * error. [2:0] - PFID. [3] - VF_VALID. [9:4] - VFID. [11:10] - Error Code -
+ * 0 - Indicates Completion Timeout of a User Tx non-posted request. 1 -
+ * unsupported request. 2 - completer abort. 3 - Illegal value for this
+ * field. [16:12] - ATC OTB EntryID. [17] valid - indicates if there was a
+ * completion error since the last time this register was cleared. */
+#define PGLUE_B_REG_RX_TCPL_ERR_DETAILS 0x9084
+/* [W 8] Debug only - Shadow BME bits clear for PFs 0 to 7. MCP writes 1 to
+ * a bit in this register in order to clear the corresponding bit in
+ * shadow_bme_pf_7_0 register. MCP should never use this unless a
+ * work-around is needed. Note: register contains bits from both paths. */
+#define PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR 0x9458
+/* [R 8] SR IOV disabled attention dirty bits. Each bit indicates that the
+ * VF enable register of the corresponding PF is written to 0 and was
+ * previously 1. Set by PXP. Reset by MCP writing 1 to
+ * sr_iov_disabled_request_clr. Note: register contains bits from both
+ * paths. */
+#define PGLUE_B_REG_SR_IOV_DISABLED_REQUEST 0x9030
+/* [R 32] Indicates the status of tags 32-63. 0 - tags is used - read
+ * completion did not return yet. 1 - tag is unused. Same functionality as
+ * pxp2_registers_pgl_exp_rom_data2 for tags 0-31. */
+#define PGLUE_B_REG_TAGS_63_32 0x9244
+/* [RW 1] Type A PF enable inbound interrupt table for TSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_TSDM_INB_INT_A_PF_ENABLE 0x9170
+/* [RW 16] Start offset of TSDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_TSDM_START_OFFSET_A 0x90c4
+/* [RW 16] Start offset of TSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_TSDM_START_OFFSET_B 0x90cc
+/* [RW 5] VF Shift of TSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_TSDM_VF_SHIFT_B 0x90d4
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_TSDM_ZONE_A_SIZE_PF 0x91a0
+/* [R 32] Address [31:0] of first read request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x9098
+/* [R 32] Address [63:32] of first read request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x909c
+/* [R 31] Details of first read request not submitted due to error. [4:0]
+ * VQID. [5] TREQ. 1 - Indicates the request is a Translation Request.
+ * [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25] -
+ * VFID. */
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x90a0
+/* [R 26] Details of first read request not submitted due to error. [15:0]
+ * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
+ * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
+ * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
+ * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
+ * indicates if there was a request not submitted due to error since the
+ * last time this register was cleared. */
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x90a4
+/* [R 32] Address [31:0] of first write request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x9088
+/* [R 32] Address [63:32] of first write request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x908c
+/* [R 31] Details of first write request not submitted due to error. [4:0]
+ * VQID. [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25]
+ * - VFID. */
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x9090
+/* [R 26] Details of first write request not submitted due to error. [15:0]
+ * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
+ * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
+ * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
+ * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
+ * indicates if there was a request not submitted due to error since the
+ * last time this register was cleared. */
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x9094
+/* [RW 10] Type A PF/VF inbound interrupt table for USDM: bits[9:5]-mask;
+ * its[4:0]-address relative to start_offset_a. Bits [1:0] can have any
+ * value (Byte resolution address). */
+#define PGLUE_B_REG_USDM_INB_INT_A_0 0x9128
+#define PGLUE_B_REG_USDM_INB_INT_A_1 0x912c
+#define PGLUE_B_REG_USDM_INB_INT_A_2 0x9130
+#define PGLUE_B_REG_USDM_INB_INT_A_3 0x9134
+#define PGLUE_B_REG_USDM_INB_INT_A_4 0x9138
+#define PGLUE_B_REG_USDM_INB_INT_A_5 0x913c
+#define PGLUE_B_REG_USDM_INB_INT_A_6 0x9140
+/* [RW 1] Type A PF enable inbound interrupt table for USDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_USDM_INB_INT_A_PF_ENABLE 0x917c
+/* [RW 1] Type A VF enable inbound interrupt table for USDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_USDM_INB_INT_A_VF_ENABLE 0x9180
+/* [RW 1] Type B VF enable inbound interrupt table for USDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_USDM_INB_INT_B_VF_ENABLE 0x9184
+/* [RW 16] Start offset of USDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_USDM_START_OFFSET_A 0x90d8
+/* [RW 16] Start offset of USDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_USDM_START_OFFSET_B 0x90e0
+/* [RW 5] VF Shift of USDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_USDM_VF_SHIFT_B 0x90e8
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_USDM_ZONE_A_SIZE_PF 0x91a4
+/* [R 26] Details of first target VF request accessing VF GRC space that
+ * failed permission check. [14:0] Address. [15] w_nr: 0 - Read; 1 - Write.
+ * [21:16] VFID. [24:22] - PFID. [25] valid - indicates if there was a
+ * request accessing VF GRC space that failed permission check since the
+ * last time this register was cleared. Permission checks are: function
+ * permission; R/W permission; address range permission. */
+#define PGLUE_B_REG_VF_GRC_SPACE_VIOLATION_DETAILS 0x9234
+/* [R 31] Details of first target VF request with length violation (too many
+ * DWs) accessing BAR0. [12:0] Address in DWs (bits [14:2] of byte address).
+ * [14:13] BAR. [20:15] VFID. [23:21] - PFID. [29:24] - Length in DWs. [30]
+ * valid - indicates if there was a request with length violation since the
+ * last time this register was cleared. Length violations: length of more
+ * than 2DWs; length of 2DWs and address not QW aligned; window is GRC and
+ * length is more than 1 DW. */
+#define PGLUE_B_REG_VF_LENGTH_VIOLATION_DETAILS 0x9230
+/* [R 8] Was_error indication dirty bits for PFs 0 to 7. Each bit indicates
+ * that there was a completion with uncorrectable error for the
+ * corresponding PF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_pf_7_0_clr. */
+#define PGLUE_B_REG_WAS_ERROR_PF_7_0 0x907c
+/* [W 8] Was_error indication dirty bits clear for PFs 0 to 7. MCP writes 1
+ * to a bit in this register in order to clear the corresponding bit in
+ * flr_request_pf_7_0 register. */
+#define PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR 0x9470
+/* [R 32] Was_error indication dirty bits for VFs 96 to 127. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_127_96_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_127_96 0x9078
+/* [W 32] Was_error indication dirty bits clear for VFs 96 to 127. MCP
+ * writes 1 to a bit in this register in order to clear the corresponding
+ * bit in was_error_vf_127_96 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR 0x9474
+/* [R 32] Was_error indication dirty bits for VFs 0 to 31. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_31_0_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0 0x906c
+/* [W 32] Was_error indication dirty bits clear for VFs 0 to 31. MCP writes
+ * 1 to a bit in this register in order to clear the corresponding bit in
+ * was_error_vf_31_0 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x9478
+/* [R 32] Was_error indication dirty bits for VFs 32 to 63. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_63_32_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_63_32 0x9070
+/* [W 32] Was_error indication dirty bits clear for VFs 32 to 63. MCP writes
+ * 1 to a bit in this register in order to clear the corresponding bit in
+ * was_error_vf_63_32 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR 0x947c
+/* [R 32] Was_error indication dirty bits for VFs 64 to 95. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_95_64_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_95_64 0x9074
+/* [W 32] Was_error indication dirty bits clear for VFs 64 to 95. MCP writes
+ * 1 to a bit in this register in order to clear the corresponding bit in
+ * was_error_vf_95_64 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR 0x9480
+/* [RW 1] Type A PF enable inbound interrupt table for XSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_XSDM_INB_INT_A_PF_ENABLE 0x9188
+/* [RW 16] Start offset of XSDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_XSDM_START_OFFSET_A 0x90ec
+/* [RW 16] Start offset of XSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_XSDM_START_OFFSET_B 0x90f4
+/* [RW 5] VF Shift of XSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_XSDM_VF_SHIFT_B 0x90fc
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_XSDM_ZONE_A_SIZE_PF 0x91a8
+#define PRS_REG_A_PRSU_20 0x40134
+/* [R 8] debug only: CFC load request current credit. Transaction based. */
+#define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164
+/* [R 8] debug only: CFC search request current credit. Transaction based. */
+#define PRS_REG_CFC_SEARCH_CURRENT_CREDIT 0x40168
+/* [RW 6] The initial credit for the search message to the CFC interface.
+ Credit is transaction based. */
+#define PRS_REG_CFC_SEARCH_INITIAL_CREDIT 0x4011c
+/* [RW 24] CID for port 0 if no match */
+#define PRS_REG_CID_PORT_0 0x400fc
+/* [RW 32] The CM header for flush message where 'load existed' bit in CFC
+ load response is reset and packet type is 0. Used in packet start message
+ to TCM. */
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_0 0x400dc
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_1 0x400e0
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_2 0x400e4
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_3 0x400e8
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_4 0x400ec
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_5 0x400f0
+/* [RW 32] The CM header for flush message where 'load existed' bit in CFC
+ load response is set and packet type is 0. Used in packet start message
+ to TCM. */
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_0 0x400bc
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_1 0x400c0
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_2 0x400c4
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_3 0x400c8
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_4 0x400cc
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_5 0x400d0
+/* [RW 32] The CM header for a match and packet type 1 for loopback port.
+ Used in packet start message to TCM. */
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_1 0x4009c
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_2 0x400a0
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_3 0x400a4
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_4 0x400a8
+/* [RW 32] The CM header for a match and packet type 0. Used in packet start
+ message to TCM. */
+#define PRS_REG_CM_HDR_TYPE_0 0x40078
+#define PRS_REG_CM_HDR_TYPE_1 0x4007c
+#define PRS_REG_CM_HDR_TYPE_2 0x40080
+#define PRS_REG_CM_HDR_TYPE_3 0x40084
+#define PRS_REG_CM_HDR_TYPE_4 0x40088
+/* [RW 32] The CM header in case there was not a match on the connection */
+#define PRS_REG_CM_NO_MATCH_HDR 0x400b8
+/* [RW 1] Indicates if in e1hov mode. 0=non-e1hov mode; 1=e1hov mode. */
+#define PRS_REG_E1HOV_MODE 0x401c8
+/* [RW 8] The 8-bit event ID for a match and packet type 1. Used in packet
+ start message to TCM. */
+#define PRS_REG_EVENT_ID_1 0x40054
+#define PRS_REG_EVENT_ID_2 0x40058
+#define PRS_REG_EVENT_ID_3 0x4005c
+/* [RW 16] The Ethernet type value for FCoE */
+#define PRS_REG_FCOE_TYPE 0x401d0
+/* [RW 8] Context region for flush packet with packet type 0. Used in CFC
+ load request message. */
+#define PRS_REG_FLUSH_REGIONS_TYPE_0 0x40004
+#define PRS_REG_FLUSH_REGIONS_TYPE_1 0x40008
+#define PRS_REG_FLUSH_REGIONS_TYPE_2 0x4000c
+#define PRS_REG_FLUSH_REGIONS_TYPE_3 0x40010
+#define PRS_REG_FLUSH_REGIONS_TYPE_4 0x40014
+#define PRS_REG_FLUSH_REGIONS_TYPE_5 0x40018
+#define PRS_REG_FLUSH_REGIONS_TYPE_6 0x4001c
+#define PRS_REG_FLUSH_REGIONS_TYPE_7 0x40020
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define PRS_REG_HDRS_AFTER_BASIC 0x40238
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header for port 0 packets. */
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_0 0x40270
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_1 0x40290
+/* [R 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
+#define PRS_REG_HDRS_AFTER_TAG_0 0x40248
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 for
+ * port 0 packets */
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_0 0x40280
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_1 0x402a0
+/* [RW 4] The increment value to send in the CFC load request message */
+#define PRS_REG_INC_VALUE 0x40048
+/* [RW 6] Bit-map indicating which headers must appear in the packet */
+#define PRS_REG_MUST_HAVE_HDRS 0x40254
+/* [RW 6] Bit-map indicating which headers must appear in the packet for
+ * port 0 packets */
+#define PRS_REG_MUST_HAVE_HDRS_PORT_0 0x4028c
+#define PRS_REG_MUST_HAVE_HDRS_PORT_1 0x402ac
+#define PRS_REG_NIC_MODE 0x40138
+/* [RW 8] The 8-bit event ID for cases where there is no match on the
+ connection. Used in packet start message to TCM. */
+#define PRS_REG_NO_MATCH_EVENT_ID 0x40070
+/* [ST 24] The number of input CFC flush packets */
+#define PRS_REG_NUM_OF_CFC_FLUSH_MESSAGES 0x40128
+/* [ST 32] The number of cycles the Parser halted its operation since it
+ could not allocate the next serial number */
+#define PRS_REG_NUM_OF_DEAD_CYCLES 0x40130
+/* [ST 24] The number of input packets */
+#define PRS_REG_NUM_OF_PACKETS 0x40124
+/* [ST 24] The number of input transparent flush packets */
+#define PRS_REG_NUM_OF_TRANSPARENT_FLUSH_MESSAGES 0x4012c
+/* [RW 8] Context region for received Ethernet packet with a match and
+ packet type 0. Used in CFC load request message */
+#define PRS_REG_PACKET_REGIONS_TYPE_0 0x40028
+#define PRS_REG_PACKET_REGIONS_TYPE_1 0x4002c
+#define PRS_REG_PACKET_REGIONS_TYPE_2 0x40030
+#define PRS_REG_PACKET_REGIONS_TYPE_3 0x40034
+#define PRS_REG_PACKET_REGIONS_TYPE_4 0x40038
+#define PRS_REG_PACKET_REGIONS_TYPE_5 0x4003c
+#define PRS_REG_PACKET_REGIONS_TYPE_6 0x40040
+#define PRS_REG_PACKET_REGIONS_TYPE_7 0x40044
+/* [R 2] debug only: Number of pending requests for CAC on port 0. */
+#define PRS_REG_PENDING_BRB_CAC0_RQ 0x40174
+/* [R 2] debug only: Number of pending requests for header parsing. */
+#define PRS_REG_PENDING_BRB_PRS_RQ 0x40170
+/* [R 1] Interrupt register #0 read */
+#define PRS_REG_PRS_INT_STS 0x40188
+/* [RW 8] Parity mask register #0 read/write */
+#define PRS_REG_PRS_PRTY_MASK 0x401a4
+/* [R 8] Parity register #0 read */
+#define PRS_REG_PRS_PRTY_STS 0x40198
+/* [RC 8] Parity register #0 read clear */
+#define PRS_REG_PRS_PRTY_STS_CLR 0x4019c
+/* [RW 8] Context region for pure acknowledge packets. Used in CFC load
+ request message */
+#define PRS_REG_PURE_REGIONS 0x40024
+/* [R 32] debug only: Serial number status lsb 32 bits. '1' indicates this
+ serail number was released by SDM but cannot be used because a previous
+ serial number was not released. */
+#define PRS_REG_SERIAL_NUM_STATUS_LSB 0x40154
+/* [R 32] debug only: Serial number status msb 32 bits. '1' indicates this
+ serail number was released by SDM but cannot be used because a previous
+ serial number was not released. */
+#define PRS_REG_SERIAL_NUM_STATUS_MSB 0x40158
+/* [R 4] debug only: SRC current credit. Transaction based. */
+#define PRS_REG_SRC_CURRENT_CREDIT 0x4016c
+/* [RW 16] The Ethernet type value for L2 tag 0 */
+#define PRS_REG_TAG_ETHERTYPE_0 0x401d4
+/* [RW 4] The length of the info field for L2 tag 0. The length is between
+ * 2B and 14B; in 2B granularity */
+#define PRS_REG_TAG_LEN_0 0x4022c
+/* [R 8] debug only: TCM current credit. Cycle based. */
+#define PRS_REG_TCM_CURRENT_CREDIT 0x40160
+/* [R 8] debug only: TSDM current credit. Transaction based. */
+#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT (0x1<<19)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF (0x1<<20)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN (0x1<<22)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED (0x1<<23)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED (0x1<<24)
+#define PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7)
+#define PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7)
+/* [R 6] Debug only: Number of used entries in the data FIFO */
+#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c
+/* [R 7] Debug only: Number of used entries in the header FIFO */
+#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478
+#define PXP2_REG_PGL_ADDR_88_F0 0x120534
+/* [R 32] GRC address for configuration access to PCIE config address 0x88.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_88_F1 0x120544
+#define PXP2_REG_PGL_ADDR_8C_F0 0x120538
+/* [R 32] GRC address for configuration access to PCIE config address 0x8c.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_8C_F1 0x120548
+#define PXP2_REG_PGL_ADDR_90_F0 0x12053c
+/* [R 32] GRC address for configuration access to PCIE config address 0x90.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_90_F1 0x12054c
+#define PXP2_REG_PGL_ADDR_94_F0 0x120540
+/* [R 32] GRC address for configuration access to PCIE config address 0x94.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_94_F1 0x120550
+#define PXP2_REG_PGL_CONTROL0 0x120490
+#define PXP2_REG_PGL_CONTROL1 0x120514
+#define PXP2_REG_PGL_DEBUG 0x120520
+/* [RW 32] third dword data of expansion rom request. this register is
+ special. reading from it provides a vector outstanding read requests. if
+ a bit is zero it means that a read request on the corresponding tag did
+ not finish yet (not all completions have arrived for it) */
+#define PXP2_REG_PGL_EXP_ROM2 0x120808
+/* [RW 32] Inbound interrupt table for CSDM: bits[31:16]-mask;
+ its[15:0]-address */
+#define PXP2_REG_PGL_INT_CSDM_0 0x1204f4
+#define PXP2_REG_PGL_INT_CSDM_1 0x1204f8
+#define PXP2_REG_PGL_INT_CSDM_2 0x1204fc
+#define PXP2_REG_PGL_INT_CSDM_3 0x120500
+#define PXP2_REG_PGL_INT_CSDM_4 0x120504
+#define PXP2_REG_PGL_INT_CSDM_5 0x120508
+#define PXP2_REG_PGL_INT_CSDM_6 0x12050c
+#define PXP2_REG_PGL_INT_CSDM_7 0x120510
+/* [RW 32] Inbound interrupt table for TSDM: bits[31:16]-mask;
+ its[15:0]-address */
+#define PXP2_REG_PGL_INT_TSDM_0 0x120494
+#define PXP2_REG_PGL_INT_TSDM_1 0x120498
+#define PXP2_REG_PGL_INT_TSDM_2 0x12049c
+#define PXP2_REG_PGL_INT_TSDM_3 0x1204a0
+#define PXP2_REG_PGL_INT_TSDM_4 0x1204a4
+#define PXP2_REG_PGL_INT_TSDM_5 0x1204a8
+#define PXP2_REG_PGL_INT_TSDM_6 0x1204ac
+#define PXP2_REG_PGL_INT_TSDM_7 0x1204b0
+/* [RW 32] Inbound interrupt table for USDM: bits[31:16]-mask;
+ its[15:0]-address */
+#define PXP2_REG_PGL_INT_USDM_0 0x1204b4
+#define PXP2_REG_PGL_INT_USDM_1 0x1204b8
+#define PXP2_REG_PGL_INT_USDM_2 0x1204bc
+#define PXP2_REG_PGL_INT_USDM_3 0x1204c0
+#define PXP2_REG_PGL_INT_USDM_4 0x1204c4
+#define PXP2_REG_PGL_INT_USDM_5 0x1204c8
+#define PXP2_REG_PGL_INT_USDM_6 0x1204cc
+#define PXP2_REG_PGL_INT_USDM_7 0x1204d0
+/* [RW 32] Inbound interrupt table for XSDM: bits[31:16]-mask;
+ its[15:0]-address */
+#define PXP2_REG_PGL_INT_XSDM_0 0x1204d4
+#define PXP2_REG_PGL_INT_XSDM_1 0x1204d8
+#define PXP2_REG_PGL_INT_XSDM_2 0x1204dc
+#define PXP2_REG_PGL_INT_XSDM_3 0x1204e0
+#define PXP2_REG_PGL_INT_XSDM_4 0x1204e4
+#define PXP2_REG_PGL_INT_XSDM_5 0x1204e8
+#define PXP2_REG_PGL_INT_XSDM_6 0x1204ec
+#define PXP2_REG_PGL_INT_XSDM_7 0x1204f0
+/* [RW 3] this field allows one function to pretend being another function
+ when accessing any BAR mapped resource within the device. the value of
+ the field is the number of the function that will be accessed
+ effectively. after software write to this bit it must read it in order to
+ know that the new value is updated */
+#define PXP2_REG_PGL_PRETEND_FUNC_F0 0x120674
+#define PXP2_REG_PGL_PRETEND_FUNC_F1 0x120678
+#define PXP2_REG_PGL_PRETEND_FUNC_F2 0x12067c
+#define PXP2_REG_PGL_PRETEND_FUNC_F3 0x120680
+#define PXP2_REG_PGL_PRETEND_FUNC_F4 0x120684
+#define PXP2_REG_PGL_PRETEND_FUNC_F5 0x120688
+#define PXP2_REG_PGL_PRETEND_FUNC_F6 0x12068c
+#define PXP2_REG_PGL_PRETEND_FUNC_F7 0x120690
+/* [R 1] this bit indicates that a read request was blocked because of
+ bus_master_en was deasserted */
+#define PXP2_REG_PGL_READ_BLOCKED 0x120568
+#define PXP2_REG_PGL_TAGS_LIMIT 0x1205a8
+/* [R 18] debug only */
+#define PXP2_REG_PGL_TXW_CDTS 0x12052c
+/* [R 1] this bit indicates that a write request was blocked because of
+ bus_master_en was deasserted */
+#define PXP2_REG_PGL_WRITE_BLOCKED 0x120564
+#define PXP2_REG_PSWRQ_BW_ADD1 0x1201c0
+#define PXP2_REG_PSWRQ_BW_ADD10 0x1201e4
+#define PXP2_REG_PSWRQ_BW_ADD11 0x1201e8
+#define PXP2_REG_PSWRQ_BW_ADD2 0x1201c4
+#define PXP2_REG_PSWRQ_BW_ADD28 0x120228
+#define PXP2_REG_PSWRQ_BW_ADD3 0x1201c8
+#define PXP2_REG_PSWRQ_BW_ADD6 0x1201d4
+#define PXP2_REG_PSWRQ_BW_ADD7 0x1201d8
+#define PXP2_REG_PSWRQ_BW_ADD8 0x1201dc
+#define PXP2_REG_PSWRQ_BW_ADD9 0x1201e0
+#define PXP2_REG_PSWRQ_BW_CREDIT 0x12032c
+#define PXP2_REG_PSWRQ_BW_L1 0x1202b0
+#define PXP2_REG_PSWRQ_BW_L10 0x1202d4
+#define PXP2_REG_PSWRQ_BW_L11 0x1202d8
+#define PXP2_REG_PSWRQ_BW_L2 0x1202b4
+#define PXP2_REG_PSWRQ_BW_L28 0x120318
+#define PXP2_REG_PSWRQ_BW_L3 0x1202b8
+#define PXP2_REG_PSWRQ_BW_L6 0x1202c4
+#define PXP2_REG_PSWRQ_BW_L7 0x1202c8
+#define PXP2_REG_PSWRQ_BW_L8 0x1202cc
+#define PXP2_REG_PSWRQ_BW_L9 0x1202d0
+#define PXP2_REG_PSWRQ_BW_RD 0x120324
+#define PXP2_REG_PSWRQ_BW_UB1 0x120238
+#define PXP2_REG_PSWRQ_BW_UB10 0x12025c
+#define PXP2_REG_PSWRQ_BW_UB11 0x120260
+#define PXP2_REG_PSWRQ_BW_UB2 0x12023c
+#define PXP2_REG_PSWRQ_BW_UB28 0x1202a0
+#define PXP2_REG_PSWRQ_BW_UB3 0x120240
+#define PXP2_REG_PSWRQ_BW_UB6 0x12024c
+#define PXP2_REG_PSWRQ_BW_UB7 0x120250
+#define PXP2_REG_PSWRQ_BW_UB8 0x120254
+#define PXP2_REG_PSWRQ_BW_UB9 0x120258
+#define PXP2_REG_PSWRQ_BW_WR 0x120328
+#define PXP2_REG_PSWRQ_CDU0_L2P 0x120000
+#define PXP2_REG_PSWRQ_QM0_L2P 0x120038
+#define PXP2_REG_PSWRQ_SRC0_L2P 0x120054
+#define PXP2_REG_PSWRQ_TM0_L2P 0x12001c
+#define PXP2_REG_PSWRQ_TSDM0_L2P 0x1200e0
+/* [RW 32] Interrupt mask register #0 read/write */
+#define PXP2_REG_PXP2_INT_MASK_0 0x120578
+/* [R 32] Interrupt register #0 read */
+#define PXP2_REG_PXP2_INT_STS_0 0x12056c
+#define PXP2_REG_PXP2_INT_STS_1 0x120608
+/* [RC 32] Interrupt register #0 read clear */
+#define PXP2_REG_PXP2_INT_STS_CLR_0 0x120570
+/* [RW 32] Parity mask register #0 read/write */
+#define PXP2_REG_PXP2_PRTY_MASK_0 0x120588
+#define PXP2_REG_PXP2_PRTY_MASK_1 0x120598
+/* [R 32] Parity register #0 read */
+#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c
+#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c
+/* [RC 32] Parity register #0 read clear */
+#define PXP2_REG_PXP2_PRTY_STS_CLR_0 0x120580
+#define PXP2_REG_PXP2_PRTY_STS_CLR_1 0x120590
+/* [R 1] Debug only: The 'almost full' indication from each fifo (gives
+ indication about backpressure) */
+#define PXP2_REG_RD_ALMOST_FULL_0 0x120424
+/* [R 8] Debug only: The blocks counter - number of unused block ids */
+#define PXP2_REG_RD_BLK_CNT 0x120418
+/* [RW 8] Debug only: Total number of available blocks in Tetris Buffer.
+ Must be bigger than 6. Normally should not be changed. */
+#define PXP2_REG_RD_BLK_NUM_CFG 0x12040c
+/* [RW 2] CDU byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_CDURD_SWAP_MODE 0x120404
+/* [RW 1] When '1'; inputs to the PSWRD block are ignored */
+#define PXP2_REG_RD_DISABLE_INPUTS 0x120374
+/* [R 1] PSWRD internal memories initialization is done */
+#define PXP2_REG_RD_INIT_DONE 0x120370
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq10 */
+#define PXP2_REG_RD_MAX_BLKS_VQ10 0x1203a0
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq11 */
+#define PXP2_REG_RD_MAX_BLKS_VQ11 0x1203a4
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq17 */
+#define PXP2_REG_RD_MAX_BLKS_VQ17 0x1203bc
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq18 */
+#define PXP2_REG_RD_MAX_BLKS_VQ18 0x1203c0
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq19 */
+#define PXP2_REG_RD_MAX_BLKS_VQ19 0x1203c4
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq22 */
+#define PXP2_REG_RD_MAX_BLKS_VQ22 0x1203d0
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq25 */
+#define PXP2_REG_RD_MAX_BLKS_VQ25 0x1203dc
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq6 */
+#define PXP2_REG_RD_MAX_BLKS_VQ6 0x120390
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq9 */
+#define PXP2_REG_RD_MAX_BLKS_VQ9 0x12039c
+/* [RW 2] PBF byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_PBF_SWAP_MODE 0x1203f4
+/* [R 1] Debug only: Indication if delivery ports are idle */
+#define PXP2_REG_RD_PORT_IS_IDLE_0 0x12041c
+#define PXP2_REG_RD_PORT_IS_IDLE_1 0x120420
+/* [RW 2] QM byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_QM_SWAP_MODE 0x1203f8
+/* [R 7] Debug only: The SR counter - number of unused sub request ids */
+#define PXP2_REG_RD_SR_CNT 0x120414
+/* [RW 2] SRC byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_SRC_SWAP_MODE 0x120400
+/* [RW 7] Debug only: Total number of available PCI read sub-requests. Must
+ be bigger than 1. Normally should not be changed. */
+#define PXP2_REG_RD_SR_NUM_CFG 0x120408
+/* [RW 1] Signals the PSWRD block to start initializing internal memories */
+#define PXP2_REG_RD_START_INIT 0x12036c
+/* [RW 2] TM byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_TM_SWAP_MODE 0x1203fc
+/* [RW 10] Bandwidth addition to VQ0 write requests */
+#define PXP2_REG_RQ_BW_RD_ADD0 0x1201bc
+/* [RW 10] Bandwidth addition to VQ12 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD12 0x1201ec
+/* [RW 10] Bandwidth addition to VQ13 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD13 0x1201f0
+/* [RW 10] Bandwidth addition to VQ14 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD14 0x1201f4
+/* [RW 10] Bandwidth addition to VQ15 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD15 0x1201f8
+/* [RW 10] Bandwidth addition to VQ16 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD16 0x1201fc
+/* [RW 10] Bandwidth addition to VQ17 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD17 0x120200
+/* [RW 10] Bandwidth addition to VQ18 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD18 0x120204
+/* [RW 10] Bandwidth addition to VQ19 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD19 0x120208
+/* [RW 10] Bandwidth addition to VQ20 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD20 0x12020c
+/* [RW 10] Bandwidth addition to VQ22 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD22 0x120210
+/* [RW 10] Bandwidth addition to VQ23 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD23 0x120214
+/* [RW 10] Bandwidth addition to VQ24 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD24 0x120218
+/* [RW 10] Bandwidth addition to VQ25 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD25 0x12021c
+/* [RW 10] Bandwidth addition to VQ26 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD26 0x120220
+/* [RW 10] Bandwidth addition to VQ27 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD27 0x120224
+/* [RW 10] Bandwidth addition to VQ4 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD4 0x1201cc
+/* [RW 10] Bandwidth addition to VQ5 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD5 0x1201d0
+/* [RW 10] Bandwidth Typical L for VQ0 Read requests */
+#define PXP2_REG_RQ_BW_RD_L0 0x1202ac
+/* [RW 10] Bandwidth Typical L for VQ12 Read requests */
+#define PXP2_REG_RQ_BW_RD_L12 0x1202dc
+/* [RW 10] Bandwidth Typical L for VQ13 Read requests */
+#define PXP2_REG_RQ_BW_RD_L13 0x1202e0
+/* [RW 10] Bandwidth Typical L for VQ14 Read requests */
+#define PXP2_REG_RQ_BW_RD_L14 0x1202e4
+/* [RW 10] Bandwidth Typical L for VQ15 Read requests */
+#define PXP2_REG_RQ_BW_RD_L15 0x1202e8
+/* [RW 10] Bandwidth Typical L for VQ16 Read requests */
+#define PXP2_REG_RQ_BW_RD_L16 0x1202ec
+/* [RW 10] Bandwidth Typical L for VQ17 Read requests */
+#define PXP2_REG_RQ_BW_RD_L17 0x1202f0
+/* [RW 10] Bandwidth Typical L for VQ18 Read requests */
+#define PXP2_REG_RQ_BW_RD_L18 0x1202f4
+/* [RW 10] Bandwidth Typical L for VQ19 Read requests */
+#define PXP2_REG_RQ_BW_RD_L19 0x1202f8
+/* [RW 10] Bandwidth Typical L for VQ20 Read requests */
+#define PXP2_REG_RQ_BW_RD_L20 0x1202fc
+/* [RW 10] Bandwidth Typical L for VQ22 Read requests */
+#define PXP2_REG_RQ_BW_RD_L22 0x120300
+/* [RW 10] Bandwidth Typical L for VQ23 Read requests */
+#define PXP2_REG_RQ_BW_RD_L23 0x120304
+/* [RW 10] Bandwidth Typical L for VQ24 Read requests */
+#define PXP2_REG_RQ_BW_RD_L24 0x120308
+/* [RW 10] Bandwidth Typical L for VQ25 Read requests */
+#define PXP2_REG_RQ_BW_RD_L25 0x12030c
+/* [RW 10] Bandwidth Typical L for VQ26 Read requests */
+#define PXP2_REG_RQ_BW_RD_L26 0x120310
+/* [RW 10] Bandwidth Typical L for VQ27 Read requests */
+#define PXP2_REG_RQ_BW_RD_L27 0x120314
+/* [RW 10] Bandwidth Typical L for VQ4 Read requests */
+#define PXP2_REG_RQ_BW_RD_L4 0x1202bc
+/* [RW 10] Bandwidth Typical L for VQ5 Read- currently not used */
+#define PXP2_REG_RQ_BW_RD_L5 0x1202c0
+/* [RW 7] Bandwidth upper bound for VQ0 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND0 0x120234
+/* [RW 7] Bandwidth upper bound for VQ12 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND12 0x120264
+/* [RW 7] Bandwidth upper bound for VQ13 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND13 0x120268
+/* [RW 7] Bandwidth upper bound for VQ14 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND14 0x12026c
+/* [RW 7] Bandwidth upper bound for VQ15 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND15 0x120270
+/* [RW 7] Bandwidth upper bound for VQ16 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND16 0x120274
+/* [RW 7] Bandwidth upper bound for VQ17 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND17 0x120278
+/* [RW 7] Bandwidth upper bound for VQ18 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND18 0x12027c
+/* [RW 7] Bandwidth upper bound for VQ19 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND19 0x120280
+/* [RW 7] Bandwidth upper bound for VQ20 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND20 0x120284
+/* [RW 7] Bandwidth upper bound for VQ22 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND22 0x120288
+/* [RW 7] Bandwidth upper bound for VQ23 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND23 0x12028c
+/* [RW 7] Bandwidth upper bound for VQ24 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND24 0x120290
+/* [RW 7] Bandwidth upper bound for VQ25 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND25 0x120294
+/* [RW 7] Bandwidth upper bound for VQ26 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND26 0x120298
+/* [RW 7] Bandwidth upper bound for VQ27 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND27 0x12029c
+/* [RW 7] Bandwidth upper bound for VQ4 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND4 0x120244
+/* [RW 7] Bandwidth upper bound for VQ5 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND5 0x120248
+/* [RW 10] Bandwidth addition to VQ29 write requests */
+#define PXP2_REG_RQ_BW_WR_ADD29 0x12022c
+/* [RW 10] Bandwidth addition to VQ30 write requests */
+#define PXP2_REG_RQ_BW_WR_ADD30 0x120230
+/* [RW 10] Bandwidth Typical L for VQ29 Write requests */
+#define PXP2_REG_RQ_BW_WR_L29 0x12031c
+/* [RW 10] Bandwidth Typical L for VQ30 Write requests */
+#define PXP2_REG_RQ_BW_WR_L30 0x120320
+/* [RW 7] Bandwidth upper bound for VQ29 */
+#define PXP2_REG_RQ_BW_WR_UBOUND29 0x1202a4
+/* [RW 7] Bandwidth upper bound for VQ30 */
+#define PXP2_REG_RQ_BW_WR_UBOUND30 0x1202a8
+/* [RW 18] external first_mem_addr field in L2P table for CDU module port 0 */
+#define PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR 0x120008
+/* [RW 2] Endian mode for cdu */
+#define PXP2_REG_RQ_CDU_ENDIAN_M 0x1201a0
+#define PXP2_REG_RQ_CDU_FIRST_ILT 0x12061c
+#define PXP2_REG_RQ_CDU_LAST_ILT 0x120620
+/* [RW 3] page size in L2P table for CDU module; -4k; -8k; -16k; -32k; -64k;
+ -128k */
+#define PXP2_REG_RQ_CDU_P_SIZE 0x120018
+/* [R 1] 1' indicates that the requester has finished its internal
+ configuration */
+#define PXP2_REG_RQ_CFG_DONE 0x1201b4
+/* [RW 2] Endian mode for debug */
+#define PXP2_REG_RQ_DBG_ENDIAN_M 0x1201a4
+/* [RW 1] When '1'; requests will enter input buffers but wont get out
+ towards the glue */
+#define PXP2_REG_RQ_DISABLE_INPUTS 0x120330
+/* [RW 4] Determines alignment of write SRs when a request is split into
+ * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
+ * aligned. 4 - 512B aligned. */
+#define PXP2_REG_RQ_DRAM_ALIGN 0x1205b0
+/* [RW 4] Determines alignment of read SRs when a request is split into
+ * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
+ * aligned. 4 - 512B aligned. */
+#define PXP2_REG_RQ_DRAM_ALIGN_RD 0x12092c
+/* [RW 1] when set the new alignment method (E2) will be applied; when reset
+ * the original alignment method (E1 E1H) will be applied */
+#define PXP2_REG_RQ_DRAM_ALIGN_SEL 0x120930
+/* [RW 1] If 1 ILT failiue will not result in ELT access; An interrupt will
+ be asserted */
+#define PXP2_REG_RQ_ELT_DISABLE 0x12066c
+/* [RW 2] Endian mode for hc */
+#define PXP2_REG_RQ_HC_ENDIAN_M 0x1201a8
+/* [RW 1] when '0' ILT logic will work as in A0; otherwise B0; for back
+ compatibility needs; Note that different registers are used per mode */
+#define PXP2_REG_RQ_ILT_MODE 0x1205b4
+/* [WB 53] Onchip address table */
+#define PXP2_REG_RQ_ONCHIP_AT 0x122000
+/* [WB 53] Onchip address table - B0 */
+#define PXP2_REG_RQ_ONCHIP_AT_B0 0x128000
+/* [RW 13] Pending read limiter threshold; in Dwords */
+#define PXP2_REG_RQ_PDR_LIMIT 0x12033c
+/* [RW 2] Endian mode for qm */
+#define PXP2_REG_RQ_QM_ENDIAN_M 0x120194
+#define PXP2_REG_RQ_QM_FIRST_ILT 0x120634
+#define PXP2_REG_RQ_QM_LAST_ILT 0x120638
+/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k;
+ -128k */
+#define PXP2_REG_RQ_QM_P_SIZE 0x120050
+/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */
+#define PXP2_REG_RQ_RBC_DONE 0x1201b0
+/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B;
+ 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
+#define PXP2_REG_RQ_RD_MBS0 0x120160
+/* [RW 3] Max burst size filed for read requests port 1; 000 - 128B;
+ 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
+#define PXP2_REG_RQ_RD_MBS1 0x120168
+/* [RW 2] Endian mode for src */
+#define PXP2_REG_RQ_SRC_ENDIAN_M 0x12019c
+#define PXP2_REG_RQ_SRC_FIRST_ILT 0x12063c
+#define PXP2_REG_RQ_SRC_LAST_ILT 0x120640
+/* [RW 3] page size in L2P table for SRC module; -4k; -8k; -16k; -32k; -64k;
+ -128k */
+#define PXP2_REG_RQ_SRC_P_SIZE 0x12006c
+/* [RW 2] Endian mode for tm */
+#define PXP2_REG_RQ_TM_ENDIAN_M 0x120198
+#define PXP2_REG_RQ_TM_FIRST_ILT 0x120644
+#define PXP2_REG_RQ_TM_LAST_ILT 0x120648
+/* [RW 3] page size in L2P table for TM module; -4k; -8k; -16k; -32k; -64k;
+ -128k */
+#define PXP2_REG_RQ_TM_P_SIZE 0x120034
+/* [R 5] Number of entries in the ufifo; his fifo has l2p completions */
+#define PXP2_REG_RQ_UFIFO_NUM_OF_ENTRY 0x12080c
+/* [RW 18] external first_mem_addr field in L2P table for USDM module port 0 */
+#define PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR 0x120094
+/* [R 8] Number of entries occupied by vq 0 in pswrq memory */
+#define PXP2_REG_RQ_VQ0_ENTRY_CNT 0x120810
+/* [R 8] Number of entries occupied by vq 10 in pswrq memory */
+#define PXP2_REG_RQ_VQ10_ENTRY_CNT 0x120818
+/* [R 8] Number of entries occupied by vq 11 in pswrq memory */
+#define PXP2_REG_RQ_VQ11_ENTRY_CNT 0x120820
+/* [R 8] Number of entries occupied by vq 12 in pswrq memory */
+#define PXP2_REG_RQ_VQ12_ENTRY_CNT 0x120828
+/* [R 8] Number of entries occupied by vq 13 in pswrq memory */
+#define PXP2_REG_RQ_VQ13_ENTRY_CNT 0x120830
+/* [R 8] Number of entries occupied by vq 14 in pswrq memory */
+#define PXP2_REG_RQ_VQ14_ENTRY_CNT 0x120838
+/* [R 8] Number of entries occupied by vq 15 in pswrq memory */
+#define PXP2_REG_RQ_VQ15_ENTRY_CNT 0x120840
+/* [R 8] Number of entries occupied by vq 16 in pswrq memory */
+#define PXP2_REG_RQ_VQ16_ENTRY_CNT 0x120848
+/* [R 8] Number of entries occupied by vq 17 in pswrq memory */
+#define PXP2_REG_RQ_VQ17_ENTRY_CNT 0x120850
+/* [R 8] Number of entries occupied by vq 18 in pswrq memory */
+#define PXP2_REG_RQ_VQ18_ENTRY_CNT 0x120858
+/* [R 8] Number of entries occupied by vq 19 in pswrq memory */
+#define PXP2_REG_RQ_VQ19_ENTRY_CNT 0x120860
+/* [R 8] Number of entries occupied by vq 1 in pswrq memory */
+#define PXP2_REG_RQ_VQ1_ENTRY_CNT 0x120868
+/* [R 8] Number of entries occupied by vq 20 in pswrq memory */
+#define PXP2_REG_RQ_VQ20_ENTRY_CNT 0x120870
+/* [R 8] Number of entries occupied by vq 21 in pswrq memory */
+#define PXP2_REG_RQ_VQ21_ENTRY_CNT 0x120878
+/* [R 8] Number of entries occupied by vq 22 in pswrq memory */
+#define PXP2_REG_RQ_VQ22_ENTRY_CNT 0x120880
+/* [R 8] Number of entries occupied by vq 23 in pswrq memory */
+#define PXP2_REG_RQ_VQ23_ENTRY_CNT 0x120888
+/* [R 8] Number of entries occupied by vq 24 in pswrq memory */
+#define PXP2_REG_RQ_VQ24_ENTRY_CNT 0x120890
+/* [R 8] Number of entries occupied by vq 25 in pswrq memory */
+#define PXP2_REG_RQ_VQ25_ENTRY_CNT 0x120898
+/* [R 8] Number of entries occupied by vq 26 in pswrq memory */
+#define PXP2_REG_RQ_VQ26_ENTRY_CNT 0x1208a0
+/* [R 8] Number of entries occupied by vq 27 in pswrq memory */
+#define PXP2_REG_RQ_VQ27_ENTRY_CNT 0x1208a8
+/* [R 8] Number of entries occupied by vq 28 in pswrq memory */
+#define PXP2_REG_RQ_VQ28_ENTRY_CNT 0x1208b0
+/* [R 8] Number of entries occupied by vq 29 in pswrq memory */
+#define PXP2_REG_RQ_VQ29_ENTRY_CNT 0x1208b8
+/* [R 8] Number of entries occupied by vq 2 in pswrq memory */
+#define PXP2_REG_RQ_VQ2_ENTRY_CNT 0x1208c0
+/* [R 8] Number of entries occupied by vq 30 in pswrq memory */
+#define PXP2_REG_RQ_VQ30_ENTRY_CNT 0x1208c8
+/* [R 8] Number of entries occupied by vq 31 in pswrq memory */
+#define PXP2_REG_RQ_VQ31_ENTRY_CNT 0x1208d0
+/* [R 8] Number of entries occupied by vq 3 in pswrq memory */
+#define PXP2_REG_RQ_VQ3_ENTRY_CNT 0x1208d8
+/* [R 8] Number of entries occupied by vq 4 in pswrq memory */
+#define PXP2_REG_RQ_VQ4_ENTRY_CNT 0x1208e0
+/* [R 8] Number of entries occupied by vq 5 in pswrq memory */
+#define PXP2_REG_RQ_VQ5_ENTRY_CNT 0x1208e8
+/* [R 8] Number of entries occupied by vq 6 in pswrq memory */
+#define PXP2_REG_RQ_VQ6_ENTRY_CNT 0x1208f0
+/* [R 8] Number of entries occupied by vq 7 in pswrq memory */
+#define PXP2_REG_RQ_VQ7_ENTRY_CNT 0x1208f8
+/* [R 8] Number of entries occupied by vq 8 in pswrq memory */
+#define PXP2_REG_RQ_VQ8_ENTRY_CNT 0x120900
+/* [R 8] Number of entries occupied by vq 9 in pswrq memory */
+#define PXP2_REG_RQ_VQ9_ENTRY_CNT 0x120908
+/* [RW 3] Max burst size filed for write requests port 0; 000 - 128B;
+ 001:256B; 010: 512B; */
+#define PXP2_REG_RQ_WR_MBS0 0x12015c
+/* [RW 3] Max burst size filed for write requests port 1; 000 - 128B;
+ 001:256B; 010: 512B; */
+#define PXP2_REG_RQ_WR_MBS1 0x120164
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_CDU_MPS 0x1205f0
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_CSDM_MPS 0x1205d0
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_DBG_MPS 0x1205e8
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_DMAE_MPS 0x1205ec
+/* [RW 10] if Number of entries in dmae fifo will be higher than this
+ threshold then has_payload indication will be asserted; the default value
+ should be equal to &gt; write MBS size! */
+#define PXP2_REG_WR_DMAE_TH 0x120368
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_HC_MPS 0x1205c8
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_QM_MPS 0x1205dc
+/* [RW 1] 0 - working in A0 mode; - working in B0 mode */
+#define PXP2_REG_WR_REV_MODE 0x120670
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_SRC_MPS 0x1205e4
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_TM_MPS 0x1205e0
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_TSDM_MPS 0x1205d4
+/* [RW 10] if Number of entries in usdmdp fifo will be higher than this
+ threshold then has_payload indication will be asserted; the default value
+ should be equal to &gt; write MBS size! */
+#define PXP2_REG_WR_USDMDP_TH 0x120348
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_USDM_MPS 0x1205cc
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_XSDM_MPS 0x1205d8
+/* [R 1] debug only: Indication if PSWHST arbiter is idle */
+#define PXP_REG_HST_ARB_IS_IDLE 0x103004
+/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means
+ this client is waiting for the arbiter. */
+#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008
+/* [RW 1] When 1; doorbells are discarded and not passed to doorbell queue
+ block. Should be used for close the gates. */
+#define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4
+/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit
+ should update according to 'hst_discard_doorbells' register when the state
+ machine is idle */
+#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0
+/* [RW 1] When 1; new internal writes arriving to the block are discarded.
+ Should be used for close the gates. */
+#define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8
+/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1'
+ means this PSWHST is discarding inputs from this client. Each bit should
+ update according to 'hst_discard_internal_writes' register when the state
+ machine is idle. */
+#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c
+/* [WB 160] Used for initialization of the inbound interrupts memory */
+#define PXP_REG_HST_INBOUND_INT 0x103800
+/* [RW 32] Interrupt mask register #0 read/write */
+#define PXP_REG_PXP_INT_MASK_0 0x103074
+#define PXP_REG_PXP_INT_MASK_1 0x103084
+/* [R 32] Interrupt register #0 read */
+#define PXP_REG_PXP_INT_STS_0 0x103068
+#define PXP_REG_PXP_INT_STS_1 0x103078
+/* [RC 32] Interrupt register #0 read clear */
+#define PXP_REG_PXP_INT_STS_CLR_0 0x10306c
+#define PXP_REG_PXP_INT_STS_CLR_1 0x10307c
+/* [RW 27] Parity mask register #0 read/write */
+#define PXP_REG_PXP_PRTY_MASK 0x103094
+/* [R 26] Parity register #0 read */
+#define PXP_REG_PXP_PRTY_STS 0x103088
+/* [RC 27] Parity register #0 read clear */
+#define PXP_REG_PXP_PRTY_STS_CLR 0x10308c
+/* [RW 4] The activity counter initial increment value sent in the load
+ request */
+#define QM_REG_ACTCTRINITVAL_0 0x168040
+#define QM_REG_ACTCTRINITVAL_1 0x168044
+#define QM_REG_ACTCTRINITVAL_2 0x168048
+#define QM_REG_ACTCTRINITVAL_3 0x16804c
+/* [RW 32] The base logical address (in bytes) of each physical queue. The
+ index I represents the physical queue number. The 12 lsbs are ignore and
+ considered zero so practically there are only 20 bits in this register;
+ queues 63-0 */
+#define QM_REG_BASEADDR 0x168900
+/* [RW 32] The base logical address (in bytes) of each physical queue. The
+ index I represents the physical queue number. The 12 lsbs are ignore and
+ considered zero so practically there are only 20 bits in this register;
+ queues 127-64 */
+#define QM_REG_BASEADDR_EXT_A 0x16e100
+/* [RW 16] The byte credit cost for each task. This value is for both ports */
+#define QM_REG_BYTECRDCOST 0x168234
+/* [RW 16] The initial byte credit value for both ports. */
+#define QM_REG_BYTECRDINITVAL 0x168238
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+ queue uses port 0 else it uses port 1; queues 31-0 */
+#define QM_REG_BYTECRDPORT_LSB 0x168228
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+ queue uses port 0 else it uses port 1; queues 95-64 */
+#define QM_REG_BYTECRDPORT_LSB_EXT_A 0x16e520
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+ queue uses port 0 else it uses port 1; queues 63-32 */
+#define QM_REG_BYTECRDPORT_MSB 0x168224
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+ queue uses port 0 else it uses port 1; queues 127-96 */
+#define QM_REG_BYTECRDPORT_MSB_EXT_A 0x16e51c
+/* [RW 16] The byte credit value that if above the QM is considered almost
+ full */
+#define QM_REG_BYTECREDITAFULLTHR 0x168094
+/* [RW 4] The initial credit for interface */
+#define QM_REG_CMINITCRD_0 0x1680cc
+#define QM_REG_BYTECRDCMDQ_0 0x16e6e8
+#define QM_REG_CMINITCRD_1 0x1680d0
+#define QM_REG_CMINITCRD_2 0x1680d4
+#define QM_REG_CMINITCRD_3 0x1680d8
+#define QM_REG_CMINITCRD_4 0x1680dc
+#define QM_REG_CMINITCRD_5 0x1680e0
+#define QM_REG_CMINITCRD_6 0x1680e4
+#define QM_REG_CMINITCRD_7 0x1680e8
+/* [RW 8] A mask bit per CM interface. If this bit is 0 then this interface
+ is masked */
+#define QM_REG_CMINTEN 0x1680ec
+/* [RW 12] A bit vector which indicates which one of the queues are tied to
+ interface 0 */
+#define QM_REG_CMINTVOQMASK_0 0x1681f4
+#define QM_REG_CMINTVOQMASK_1 0x1681f8
+#define QM_REG_CMINTVOQMASK_2 0x1681fc
+#define QM_REG_CMINTVOQMASK_3 0x168200
+#define QM_REG_CMINTVOQMASK_4 0x168204
+#define QM_REG_CMINTVOQMASK_5 0x168208
+#define QM_REG_CMINTVOQMASK_6 0x16820c
+#define QM_REG_CMINTVOQMASK_7 0x168210
+/* [RW 20] The number of connections divided by 16 which dictates the size
+ of each queue which belongs to even function number. */
+#define QM_REG_CONNNUM_0 0x168020
+/* [R 6] Keep the fill level of the fifo from write client 4 */
+#define QM_REG_CQM_WRC_FIFOLVL 0x168018
+/* [RW 8] The context regions sent in the CFC load request */
+#define QM_REG_CTXREG_0 0x168030
+#define QM_REG_CTXREG_1 0x168034
+#define QM_REG_CTXREG_2 0x168038
+#define QM_REG_CTXREG_3 0x16803c
+/* [RW 12] The VOQ mask used to select the VOQs which needs to be full for
+ bypass enable */
+#define QM_REG_ENBYPVOQMASK 0x16823c
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+ physical queue uses the byte credit; queues 31-0 */
+#define QM_REG_ENBYTECRD_LSB 0x168220
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+ physical queue uses the byte credit; queues 95-64 */
+#define QM_REG_ENBYTECRD_LSB_EXT_A 0x16e518
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+ physical queue uses the byte credit; queues 63-32 */
+#define QM_REG_ENBYTECRD_MSB 0x16821c
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+ physical queue uses the byte credit; queues 127-96 */
+#define QM_REG_ENBYTECRD_MSB_EXT_A 0x16e514
+/* [RW 4] If cleared then the secondary interface will not be served by the
+ RR arbiter */
+#define QM_REG_ENSEC 0x1680f0
+/* [RW 32] NA */
+#define QM_REG_FUNCNUMSEL_LSB 0x168230
+/* [RW 32] NA */
+#define QM_REG_FUNCNUMSEL_MSB 0x16822c
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+ be use for the almost empty indication to the HW block; queues 31:0 */
+#define QM_REG_HWAEMPTYMASK_LSB 0x168218
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+ be use for the almost empty indication to the HW block; queues 95-64 */
+#define QM_REG_HWAEMPTYMASK_LSB_EXT_A 0x16e510
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+ be use for the almost empty indication to the HW block; queues 63:32 */
+#define QM_REG_HWAEMPTYMASK_MSB 0x168214
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+ be use for the almost empty indication to the HW block; queues 127-96 */
+#define QM_REG_HWAEMPTYMASK_MSB_EXT_A 0x16e50c
+/* [RW 4] The number of outstanding request to CFC */
+#define QM_REG_OUTLDREQ 0x168804
+/* [RC 1] A flag to indicate that overflow error occurred in one of the
+ queues. */
+#define QM_REG_OVFERROR 0x16805c
+/* [RC 7] the Q where the overflow occurs */
+#define QM_REG_OVFQNUM 0x168058
+/* [R 16] Pause state for physical queues 15-0 */
+#define QM_REG_PAUSESTATE0 0x168410
+/* [R 16] Pause state for physical queues 31-16 */
+#define QM_REG_PAUSESTATE1 0x168414
+/* [R 16] Pause state for physical queues 47-32 */
+#define QM_REG_PAUSESTATE2 0x16e684
+/* [R 16] Pause state for physical queues 63-48 */
+#define QM_REG_PAUSESTATE3 0x16e688
+/* [R 16] Pause state for physical queues 79-64 */
+#define QM_REG_PAUSESTATE4 0x16e68c
+/* [R 16] Pause state for physical queues 95-80 */
+#define QM_REG_PAUSESTATE5 0x16e690
+/* [R 16] Pause state for physical queues 111-96 */
+#define QM_REG_PAUSESTATE6 0x16e694
+/* [R 16] Pause state for physical queues 127-112 */
+#define QM_REG_PAUSESTATE7 0x16e698
+/* [RW 2] The PCI attributes field used in the PCI request. */
+#define QM_REG_PCIREQAT 0x168054
+#define QM_REG_PF_EN 0x16e70c
+/* [R 24] The number of tasks stored in the QM for the PF. only even
+ * functions are valid in E2 (odd I registers will be hard wired to 0) */
+#define QM_REG_PF_USG_CNT_0 0x16e040
+/* [R 16] NOT USED */
+#define QM_REG_PORT0BYTECRD 0x168300
+/* [R 16] The byte credit of port 1 */
+#define QM_REG_PORT1BYTECRD 0x168304
+/* [RW 3] pci function number of queues 15-0 */
+#define QM_REG_PQ2PCIFUNC_0 0x16e6bc
+#define QM_REG_PQ2PCIFUNC_1 0x16e6c0
+#define QM_REG_PQ2PCIFUNC_2 0x16e6c4
+#define QM_REG_PQ2PCIFUNC_3 0x16e6c8
+#define QM_REG_PQ2PCIFUNC_4 0x16e6cc
+#define QM_REG_PQ2PCIFUNC_5 0x16e6d0
+#define QM_REG_PQ2PCIFUNC_6 0x16e6d4
+#define QM_REG_PQ2PCIFUNC_7 0x16e6d8
+/* [WB 54] Pointer Table Memory for queues 63-0; The mapping is as follow:
+ ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read
+ bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */
+#define QM_REG_PTRTBL 0x168a00
+/* [WB 54] Pointer Table Memory for queues 127-64; The mapping is as follow:
+ ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read
+ bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */
+#define QM_REG_PTRTBL_EXT_A 0x16e200
+/* [RW 2] Interrupt mask register #0 read/write */
+#define QM_REG_QM_INT_MASK 0x168444
+/* [R 2] Interrupt register #0 read */
+#define QM_REG_QM_INT_STS 0x168438
+/* [RW 12] Parity mask register #0 read/write */
+#define QM_REG_QM_PRTY_MASK 0x168454
+/* [R 12] Parity register #0 read */
+#define QM_REG_QM_PRTY_STS 0x168448
+/* [RC 12] Parity register #0 read clear */
+#define QM_REG_QM_PRTY_STS_CLR 0x16844c
+/* [R 32] Current queues in pipeline: Queues from 32 to 63 */
+#define QM_REG_QSTATUS_HIGH 0x16802c
+/* [R 32] Current queues in pipeline: Queues from 96 to 127 */
+#define QM_REG_QSTATUS_HIGH_EXT_A 0x16e408
+/* [R 32] Current queues in pipeline: Queues from 0 to 31 */
+#define QM_REG_QSTATUS_LOW 0x168028
+/* [R 32] Current queues in pipeline: Queues from 64 to 95 */
+#define QM_REG_QSTATUS_LOW_EXT_A 0x16e404
+/* [R 24] The number of tasks queued for each queue; queues 63-0 */
+#define QM_REG_QTASKCTR_0 0x168308
+/* [R 24] The number of tasks queued for each queue; queues 127-64 */
+#define QM_REG_QTASKCTR_EXT_A_0 0x16e584
+/* [RW 4] Queue tied to VOQ */
+#define QM_REG_QVOQIDX_0 0x1680f4
+#define QM_REG_QVOQIDX_10 0x16811c
+#define QM_REG_QVOQIDX_100 0x16e49c
+#define QM_REG_QVOQIDX_101 0x16e4a0
+#define QM_REG_QVOQIDX_102 0x16e4a4
+#define QM_REG_QVOQIDX_103 0x16e4a8
+#define QM_REG_QVOQIDX_104 0x16e4ac
+#define QM_REG_QVOQIDX_105 0x16e4b0
+#define QM_REG_QVOQIDX_106 0x16e4b4
+#define QM_REG_QVOQIDX_107 0x16e4b8
+#define QM_REG_QVOQIDX_108 0x16e4bc
+#define QM_REG_QVOQIDX_109 0x16e4c0
+#define QM_REG_QVOQIDX_11 0x168120
+#define QM_REG_QVOQIDX_110 0x16e4c4
+#define QM_REG_QVOQIDX_111 0x16e4c8
+#define QM_REG_QVOQIDX_112 0x16e4cc
+#define QM_REG_QVOQIDX_113 0x16e4d0
+#define QM_REG_QVOQIDX_114 0x16e4d4
+#define QM_REG_QVOQIDX_115 0x16e4d8
+#define QM_REG_QVOQIDX_116 0x16e4dc
+#define QM_REG_QVOQIDX_117 0x16e4e0
+#define QM_REG_QVOQIDX_118 0x16e4e4
+#define QM_REG_QVOQIDX_119 0x16e4e8
+#define QM_REG_QVOQIDX_12 0x168124
+#define QM_REG_QVOQIDX_120 0x16e4ec
+#define QM_REG_QVOQIDX_121 0x16e4f0
+#define QM_REG_QVOQIDX_122 0x16e4f4
+#define QM_REG_QVOQIDX_123 0x16e4f8
+#define QM_REG_QVOQIDX_124 0x16e4fc
+#define QM_REG_QVOQIDX_125 0x16e500
+#define QM_REG_QVOQIDX_126 0x16e504
+#define QM_REG_QVOQIDX_127 0x16e508
+#define QM_REG_QVOQIDX_13 0x168128
+#define QM_REG_QVOQIDX_14 0x16812c
+#define QM_REG_QVOQIDX_15 0x168130
+#define QM_REG_QVOQIDX_16 0x168134
+#define QM_REG_QVOQIDX_17 0x168138
+#define QM_REG_QVOQIDX_21 0x168148
+#define QM_REG_QVOQIDX_22 0x16814c
+#define QM_REG_QVOQIDX_23 0x168150
+#define QM_REG_QVOQIDX_24 0x168154
+#define QM_REG_QVOQIDX_25 0x168158
+#define QM_REG_QVOQIDX_26 0x16815c
+#define QM_REG_QVOQIDX_27 0x168160
+#define QM_REG_QVOQIDX_28 0x168164
+#define QM_REG_QVOQIDX_29 0x168168
+#define QM_REG_QVOQIDX_30 0x16816c
+#define QM_REG_QVOQIDX_31 0x168170
+#define QM_REG_QVOQIDX_32 0x168174
+#define QM_REG_QVOQIDX_33 0x168178
+#define QM_REG_QVOQIDX_34 0x16817c
+#define QM_REG_QVOQIDX_35 0x168180
+#define QM_REG_QVOQIDX_36 0x168184
+#define QM_REG_QVOQIDX_37 0x168188
+#define QM_REG_QVOQIDX_38 0x16818c
+#define QM_REG_QVOQIDX_39 0x168190
+#define QM_REG_QVOQIDX_40 0x168194
+#define QM_REG_QVOQIDX_41 0x168198
+#define QM_REG_QVOQIDX_42 0x16819c
+#define QM_REG_QVOQIDX_43 0x1681a0
+#define QM_REG_QVOQIDX_44 0x1681a4
+#define QM_REG_QVOQIDX_45 0x1681a8
+#define QM_REG_QVOQIDX_46 0x1681ac
+#define QM_REG_QVOQIDX_47 0x1681b0
+#define QM_REG_QVOQIDX_48 0x1681b4
+#define QM_REG_QVOQIDX_49 0x1681b8
+#define QM_REG_QVOQIDX_5 0x168108
+#define QM_REG_QVOQIDX_50 0x1681bc
+#define QM_REG_QVOQIDX_51 0x1681c0
+#define QM_REG_QVOQIDX_52 0x1681c4
+#define QM_REG_QVOQIDX_53 0x1681c8
+#define QM_REG_QVOQIDX_54 0x1681cc
+#define QM_REG_QVOQIDX_55 0x1681d0
+#define QM_REG_QVOQIDX_56 0x1681d4
+#define QM_REG_QVOQIDX_57 0x1681d8
+#define QM_REG_QVOQIDX_58 0x1681dc
+#define QM_REG_QVOQIDX_59 0x1681e0
+#define QM_REG_QVOQIDX_6 0x16810c
+#define QM_REG_QVOQIDX_60 0x1681e4
+#define QM_REG_QVOQIDX_61 0x1681e8
+#define QM_REG_QVOQIDX_62 0x1681ec
+#define QM_REG_QVOQIDX_63 0x1681f0
+#define QM_REG_QVOQIDX_64 0x16e40c
+#define QM_REG_QVOQIDX_65 0x16e410
+#define QM_REG_QVOQIDX_69 0x16e420
+#define QM_REG_QVOQIDX_7 0x168110
+#define QM_REG_QVOQIDX_70 0x16e424
+#define QM_REG_QVOQIDX_71 0x16e428
+#define QM_REG_QVOQIDX_72 0x16e42c
+#define QM_REG_QVOQIDX_73 0x16e430
+#define QM_REG_QVOQIDX_74 0x16e434
+#define QM_REG_QVOQIDX_75 0x16e438
+#define QM_REG_QVOQIDX_76 0x16e43c
+#define QM_REG_QVOQIDX_77 0x16e440
+#define QM_REG_QVOQIDX_78 0x16e444
+#define QM_REG_QVOQIDX_79 0x16e448
+#define QM_REG_QVOQIDX_8 0x168114
+#define QM_REG_QVOQIDX_80 0x16e44c
+#define QM_REG_QVOQIDX_81 0x16e450
+#define QM_REG_QVOQIDX_85 0x16e460
+#define QM_REG_QVOQIDX_86 0x16e464
+#define QM_REG_QVOQIDX_87 0x16e468
+#define QM_REG_QVOQIDX_88 0x16e46c
+#define QM_REG_QVOQIDX_89 0x16e470
+#define QM_REG_QVOQIDX_9 0x168118
+#define QM_REG_QVOQIDX_90 0x16e474
+#define QM_REG_QVOQIDX_91 0x16e478
+#define QM_REG_QVOQIDX_92 0x16e47c
+#define QM_REG_QVOQIDX_93 0x16e480
+#define QM_REG_QVOQIDX_94 0x16e484
+#define QM_REG_QVOQIDX_95 0x16e488
+#define QM_REG_QVOQIDX_96 0x16e48c
+#define QM_REG_QVOQIDX_97 0x16e490
+#define QM_REG_QVOQIDX_98 0x16e494
+#define QM_REG_QVOQIDX_99 0x16e498
+/* [RW 1] Initialization bit command */
+#define QM_REG_SOFT_RESET 0x168428
+/* [RW 8] The credit cost per every task in the QM. A value per each VOQ */
+#define QM_REG_TASKCRDCOST_0 0x16809c
+#define QM_REG_TASKCRDCOST_1 0x1680a0
+#define QM_REG_TASKCRDCOST_2 0x1680a4
+#define QM_REG_TASKCRDCOST_4 0x1680ac
+#define QM_REG_TASKCRDCOST_5 0x1680b0
+/* [R 6] Keep the fill level of the fifo from write client 3 */
+#define QM_REG_TQM_WRC_FIFOLVL 0x168010
+/* [R 6] Keep the fill level of the fifo from write client 2 */
+#define QM_REG_UQM_WRC_FIFOLVL 0x168008
+/* [RC 32] Credit update error register */
+#define QM_REG_VOQCRDERRREG 0x168408
+/* [R 16] The credit value for each VOQ */
+#define QM_REG_VOQCREDIT_0 0x1682d0
+#define QM_REG_VOQCREDIT_1 0x1682d4
+#define QM_REG_VOQCREDIT_4 0x1682e0
+/* [RW 16] The credit value that if above the QM is considered almost full */
+#define QM_REG_VOQCREDITAFULLTHR 0x168090
+/* [RW 16] The init and maximum credit for each VoQ */
+#define QM_REG_VOQINITCREDIT_0 0x168060
+#define QM_REG_VOQINITCREDIT_1 0x168064
+#define QM_REG_VOQINITCREDIT_2 0x168068
+#define QM_REG_VOQINITCREDIT_4 0x168070
+#define QM_REG_VOQINITCREDIT_5 0x168074
+/* [RW 1] The port of which VOQ belongs */
+#define QM_REG_VOQPORT_0 0x1682a0
+#define QM_REG_VOQPORT_1 0x1682a4
+#define QM_REG_VOQPORT_2 0x1682a8
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_0_LSB 0x168240
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_0_LSB_EXT_A 0x16e524
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_0_MSB 0x168244
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_0_MSB_EXT_A 0x16e528
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_10_LSB 0x168290
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_10_LSB_EXT_A 0x16e574
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_10_MSB 0x168294
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_10_MSB_EXT_A 0x16e578
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_11_LSB 0x168298
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_11_LSB_EXT_A 0x16e57c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_11_MSB 0x16829c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_11_MSB_EXT_A 0x16e580
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_1_LSB 0x168248
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_1_LSB_EXT_A 0x16e52c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_1_MSB 0x16824c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_1_MSB_EXT_A 0x16e530
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_2_LSB 0x168250
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_2_LSB_EXT_A 0x16e534
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_2_MSB 0x168254
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_2_MSB_EXT_A 0x16e538
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_3_LSB 0x168258
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_3_LSB_EXT_A 0x16e53c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_3_MSB_EXT_A 0x16e540
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_4_LSB 0x168260
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_4_LSB_EXT_A 0x16e544
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_4_MSB 0x168264
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_4_MSB_EXT_A 0x16e548
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_5_LSB 0x168268
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_5_LSB_EXT_A 0x16e54c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_5_MSB 0x16826c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_5_MSB_EXT_A 0x16e550
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_6_LSB 0x168270
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_6_LSB_EXT_A 0x16e554
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_6_MSB 0x168274
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_6_MSB_EXT_A 0x16e558
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_7_LSB 0x168278
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_7_LSB_EXT_A 0x16e55c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_7_MSB 0x16827c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_7_MSB_EXT_A 0x16e560
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_8_LSB 0x168280
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_8_LSB_EXT_A 0x16e564
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_8_MSB 0x168284
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_8_MSB_EXT_A 0x16e568
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_9_LSB 0x168288
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_9_LSB_EXT_A 0x16e56c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_9_MSB_EXT_A 0x16e570
+/* [RW 32] Wrr weights */
+#define QM_REG_WRRWEIGHTS_0 0x16880c
+#define QM_REG_WRRWEIGHTS_1 0x168810
+#define QM_REG_WRRWEIGHTS_10 0x168814
+#define QM_REG_WRRWEIGHTS_11 0x168818
+#define QM_REG_WRRWEIGHTS_12 0x16881c
+#define QM_REG_WRRWEIGHTS_13 0x168820
+#define QM_REG_WRRWEIGHTS_14 0x168824
+#define QM_REG_WRRWEIGHTS_15 0x168828
+#define QM_REG_WRRWEIGHTS_16 0x16e000
+#define QM_REG_WRRWEIGHTS_17 0x16e004
+#define QM_REG_WRRWEIGHTS_18 0x16e008
+#define QM_REG_WRRWEIGHTS_19 0x16e00c
+#define QM_REG_WRRWEIGHTS_2 0x16882c
+#define QM_REG_WRRWEIGHTS_20 0x16e010
+#define QM_REG_WRRWEIGHTS_21 0x16e014
+#define QM_REG_WRRWEIGHTS_22 0x16e018
+#define QM_REG_WRRWEIGHTS_23 0x16e01c
+#define QM_REG_WRRWEIGHTS_24 0x16e020
+#define QM_REG_WRRWEIGHTS_25 0x16e024
+#define QM_REG_WRRWEIGHTS_26 0x16e028
+#define QM_REG_WRRWEIGHTS_27 0x16e02c
+#define QM_REG_WRRWEIGHTS_28 0x16e030
+#define QM_REG_WRRWEIGHTS_29 0x16e034
+#define QM_REG_WRRWEIGHTS_3 0x168830
+#define QM_REG_WRRWEIGHTS_30 0x16e038
+#define QM_REG_WRRWEIGHTS_31 0x16e03c
+#define QM_REG_WRRWEIGHTS_4 0x168834
+#define QM_REG_WRRWEIGHTS_5 0x168838
+#define QM_REG_WRRWEIGHTS_6 0x16883c
+#define QM_REG_WRRWEIGHTS_7 0x168840
+#define QM_REG_WRRWEIGHTS_8 0x168844
+#define QM_REG_WRRWEIGHTS_9 0x168848
+/* [R 6] Keep the fill level of the fifo from write client 1 */
+#define QM_REG_XQM_WRC_FIFOLVL 0x168000
+/* [W 1] reset to parity interrupt */
+#define SEM_FAST_REG_PARITY_RST 0x18840
+#define SRC_REG_COUNTFREE0 0x40500
+/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
+ ports. If set the searcher support 8 functions. */
+#define SRC_REG_E1HMF_ENABLE 0x404cc
+#define SRC_REG_FIRSTFREE0 0x40510
+#define SRC_REG_KEYRSS0_0 0x40408
+#define SRC_REG_KEYRSS0_7 0x40424
+#define SRC_REG_KEYRSS1_9 0x40454
+#define SRC_REG_KEYSEARCH_0 0x40458
+#define SRC_REG_KEYSEARCH_1 0x4045c
+#define SRC_REG_KEYSEARCH_2 0x40460
+#define SRC_REG_KEYSEARCH_3 0x40464
+#define SRC_REG_KEYSEARCH_4 0x40468
+#define SRC_REG_KEYSEARCH_5 0x4046c
+#define SRC_REG_KEYSEARCH_6 0x40470
+#define SRC_REG_KEYSEARCH_7 0x40474
+#define SRC_REG_KEYSEARCH_8 0x40478
+#define SRC_REG_KEYSEARCH_9 0x4047c
+#define SRC_REG_LASTFREE0 0x40530
+#define SRC_REG_NUMBER_HASH_BITS0 0x40400
+/* [RW 1] Reset internal state machines. */
+#define SRC_REG_SOFT_RST 0x4049c
+/* [R 3] Interrupt register #0 read */
+#define SRC_REG_SRC_INT_STS 0x404ac
+/* [RW 3] Parity mask register #0 read/write */
+#define SRC_REG_SRC_PRTY_MASK 0x404c8
+/* [R 3] Parity register #0 read */
+#define SRC_REG_SRC_PRTY_STS 0x404bc
+/* [RC 3] Parity register #0 read clear */
+#define SRC_REG_SRC_PRTY_STS_CLR 0x404c0
+/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
+#define TCM_REG_CAM_OCCUP 0x5017c
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define TCM_REG_CDU_AG_RD_IFEN 0x50034
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+ are disregarded; all other signals are treated as usual; if 1 - normal
+ activity. */
+#define TCM_REG_CDU_AG_WR_IFEN 0x50030
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define TCM_REG_CDU_SM_RD_IFEN 0x5003c
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+ input is disregarded; all other signals are treated as usual; if 1 -
+ normal activity. */
+#define TCM_REG_CDU_SM_WR_IFEN 0x50038
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 1 at start-up. */
+#define TCM_REG_CFC_INIT_CRD 0x50204
+/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_CP_WEIGHT 0x500c0
+/* [RW 1] Input csem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define TCM_REG_CSEM_IFEN 0x5002c
+/* [RC 1] Message length mismatch (relative to last indication) at the In#9
+ interface. */
+#define TCM_REG_CSEM_LENGTH_MIS 0x50174
+/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_CSEM_WEIGHT 0x500bc
+/* [RW 8] The Event ID in case of ErrorFlg is set in the input message. */
+#define TCM_REG_ERR_EVNT_ID 0x500a0
+/* [RW 28] The CM erroneous header for QM and Timers formatting. */
+#define TCM_REG_ERR_TCM_HDR 0x5009c
+/* [RW 8] The Event ID for Timers expiration. */
+#define TCM_REG_EXPR_EVNT_ID 0x500a4
+/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define TCM_REG_FIC0_INIT_CRD 0x5020c
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define TCM_REG_FIC1_INIT_CRD 0x50210
+/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
+ - strict priority defined by ~tcm_registers_gr_ag_pr.gr_ag_pr;
+ ~tcm_registers_gr_ld0_pr.gr_ld0_pr and
+ ~tcm_registers_gr_ld1_pr.gr_ld1_pr. */
+#define TCM_REG_GR_ARB_TYPE 0x50114
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Store channel is the
+ compliment of the other 3 groups. */
+#define TCM_REG_GR_LD0_PR 0x5011c
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Store channel is the
+ compliment of the other 3 groups. */
+#define TCM_REG_GR_LD1_PR 0x50120
+/* [RW 4] The number of double REG-pairs; loaded from the STORM context and
+ sent to STORM; for a specific connection type. The double REG-pairs are
+ used to align to STORM context row size of 128 bits. The offset of these
+ data in the STORM context is always 0. Index _i stands for the connection
+ type (one of 16). */
+#define TCM_REG_N_SM_CTX_LD_0 0x50050
+#define TCM_REG_N_SM_CTX_LD_1 0x50054
+#define TCM_REG_N_SM_CTX_LD_2 0x50058
+#define TCM_REG_N_SM_CTX_LD_3 0x5005c
+#define TCM_REG_N_SM_CTX_LD_4 0x50060
+#define TCM_REG_N_SM_CTX_LD_5 0x50064
+/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_PBF_IFEN 0x50024
+/* [RC 1] Message length mismatch (relative to last indication) at the In#7
+ interface. */
+#define TCM_REG_PBF_LENGTH_MIS 0x5016c
+/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_PBF_WEIGHT 0x500b4
+#define TCM_REG_PHYS_QNUM0_0 0x500e0
+#define TCM_REG_PHYS_QNUM0_1 0x500e4
+#define TCM_REG_PHYS_QNUM1_0 0x500e8
+#define TCM_REG_PHYS_QNUM1_1 0x500ec
+#define TCM_REG_PHYS_QNUM2_0 0x500f0
+#define TCM_REG_PHYS_QNUM2_1 0x500f4
+#define TCM_REG_PHYS_QNUM3_0 0x500f8
+#define TCM_REG_PHYS_QNUM3_1 0x500fc
+/* [RW 1] Input prs Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_PRS_IFEN 0x50020
+/* [RC 1] Message length mismatch (relative to last indication) at the In#6
+ interface. */
+#define TCM_REG_PRS_LENGTH_MIS 0x50168
+/* [RW 3] The weight of the input prs in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_PRS_WEIGHT 0x500b0
+/* [RW 8] The Event ID for Timers formatting in case of stop done. */
+#define TCM_REG_STOP_EVNT_ID 0x500a8
+/* [RC 1] Message length mismatch (relative to last indication) at the STORM
+ interface. */
+#define TCM_REG_STORM_LENGTH_MIS 0x50160
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define TCM_REG_STORM_TCM_IFEN 0x50010
+/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_STORM_WEIGHT 0x500ac
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TCM_CFC_IFEN 0x50040
+/* [RW 11] Interrupt mask register #0 read/write */
+#define TCM_REG_TCM_INT_MASK 0x501dc
+/* [R 11] Interrupt register #0 read */
+#define TCM_REG_TCM_INT_STS 0x501d0
+/* [RW 27] Parity mask register #0 read/write */
+#define TCM_REG_TCM_PRTY_MASK 0x501ec
+/* [R 27] Parity register #0 read */
+#define TCM_REG_TCM_PRTY_STS 0x501e0
+/* [RC 27] Parity register #0 read clear */
+#define TCM_REG_TCM_PRTY_STS_CLR 0x501e4
+/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
+ REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+ Is used to determine the number of the AG context REG-pairs written back;
+ when the input message Reg1WbFlg isn't set. */
+#define TCM_REG_TCM_REG0_SZ 0x500d8
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TCM_STORM0_IFEN 0x50004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TCM_STORM1_IFEN 0x50008
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TCM_TQM_IFEN 0x5000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
+#define TCM_REG_TCM_TQM_USE_Q 0x500d4
+/* [RW 28] The CM header for Timers expiration command. */
+#define TCM_REG_TM_TCM_HDR 0x50098
+/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define TCM_REG_TM_TCM_IFEN 0x5001c
+/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TM_WEIGHT 0x500d0
+/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 32 at start-up. */
+#define TCM_REG_TQM_INIT_CRD 0x5021c
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TQM_P_WEIGHT 0x500c8
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TQM_S_WEIGHT 0x500cc
+/* [RW 28] The CM header value for QM request (primary). */
+#define TCM_REG_TQM_TCM_HDR_P 0x50090
+/* [RW 28] The CM header value for QM request (secondary). */
+#define TCM_REG_TQM_TCM_HDR_S 0x50094
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TQM_TCM_IFEN 0x50014
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TSDM_IFEN 0x50018
+/* [RC 1] Message length mismatch (relative to last indication) at the SDM
+ interface. */
+#define TCM_REG_TSDM_LENGTH_MIS 0x50164
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TSDM_WEIGHT 0x500c4
+/* [RW 1] Input usem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define TCM_REG_USEM_IFEN 0x50028
+/* [RC 1] Message length mismatch (relative to last indication) at the In#8
+ interface. */
+#define TCM_REG_USEM_LENGTH_MIS 0x50170
+/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_USEM_WEIGHT 0x500b8
+/* [RW 21] Indirect access to the descriptor table of the XX protection
+ mechanism. The fields are: [5:0] - length of the message; 15:6] - message
+ pointer; 20:16] - next pointer. */
+#define TCM_REG_XX_DESCR_TABLE 0x50280
+#define TCM_REG_XX_DESCR_TABLE_SIZE 29
+/* [R 6] Use to read the value of XX protection Free counter. */
+#define TCM_REG_XX_FREE 0x50178
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+ of the Input Stage XX protection buffer by the XX protection pending
+ messages. Max credit available - 127.Write writes the initial credit
+ value; read returns the current value of the credit counter. Must be
+ initialized to 19 at start-up. */
+#define TCM_REG_XX_INIT_CRD 0x50220
+/* [RW 6] Maximum link list size (messages locked) per connection in the XX
+ protection. */
+#define TCM_REG_XX_MAX_LL_SZ 0x50044
+/* [RW 6] The maximum number of pending messages; which may be stored in XX
+ protection. ~tcm_registers_xx_free.xx_free is read on read. */
+#define TCM_REG_XX_MSG_NUM 0x50224
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define TCM_REG_XX_OVFL_EVNT_ID 0x50048
+/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
+ The fields are:[4:0] - tail pointer; [10:5] - Link List size; 15:11] -
+ header pointer. */
+#define TCM_REG_XX_TABLE 0x50240
+/* [RW 4] Load value for cfc ac credit cnt. */
+#define TM_REG_CFC_AC_CRDCNT_VAL 0x164208
+/* [RW 4] Load value for cfc cld credit cnt. */
+#define TM_REG_CFC_CLD_CRDCNT_VAL 0x164210
+/* [RW 8] Client0 context region. */
+#define TM_REG_CL0_CONT_REGION 0x164030
+/* [RW 8] Client1 context region. */
+#define TM_REG_CL1_CONT_REGION 0x164034
+/* [RW 8] Client2 context region. */
+#define TM_REG_CL2_CONT_REGION 0x164038
+/* [RW 2] Client in High priority client number. */
+#define TM_REG_CLIN_PRIOR0_CLIENT 0x164024
+/* [RW 4] Load value for clout0 cred cnt. */
+#define TM_REG_CLOUT_CRDCNT0_VAL 0x164220
+/* [RW 4] Load value for clout1 cred cnt. */
+#define TM_REG_CLOUT_CRDCNT1_VAL 0x164228
+/* [RW 4] Load value for clout2 cred cnt. */
+#define TM_REG_CLOUT_CRDCNT2_VAL 0x164230
+/* [RW 1] Enable client0 input. */
+#define TM_REG_EN_CL0_INPUT 0x164008
+/* [RW 1] Enable client1 input. */
+#define TM_REG_EN_CL1_INPUT 0x16400c
+/* [RW 1] Enable client2 input. */
+#define TM_REG_EN_CL2_INPUT 0x164010
+#define TM_REG_EN_LINEAR0_TIMER 0x164014
+/* [RW 1] Enable real time counter. */
+#define TM_REG_EN_REAL_TIME_CNT 0x1640d8
+/* [RW 1] Enable for Timers state machines. */
+#define TM_REG_EN_TIMERS 0x164000
+/* [RW 4] Load value for expiration credit cnt. CFC max number of
+ outstanding load requests for timers (expiration) context loading. */
+#define TM_REG_EXP_CRDCNT_VAL 0x164238
+/* [RW 32] Linear0 logic address. */
+#define TM_REG_LIN0_LOGIC_ADDR 0x164240
+/* [RW 18] Linear0 Max active cid (in banks of 32 entries). */
+#define TM_REG_LIN0_MAX_ACTIVE_CID 0x164048
+/* [ST 16] Linear0 Number of scans counter. */
+#define TM_REG_LIN0_NUM_SCANS 0x1640a0
+/* [WB 64] Linear0 phy address. */
+#define TM_REG_LIN0_PHY_ADDR 0x164270
+/* [RW 1] Linear0 physical address valid. */
+#define TM_REG_LIN0_PHY_ADDR_VALID 0x164248
+#define TM_REG_LIN0_SCAN_ON 0x1640d0
+/* [RW 24] Linear0 array scan timeout. */
+#define TM_REG_LIN0_SCAN_TIME 0x16403c
+#define TM_REG_LIN0_VNIC_UC 0x164128
+/* [RW 32] Linear1 logic address. */
+#define TM_REG_LIN1_LOGIC_ADDR 0x164250
+/* [WB 64] Linear1 phy address. */
+#define TM_REG_LIN1_PHY_ADDR 0x164280
+/* [RW 1] Linear1 physical address valid. */
+#define TM_REG_LIN1_PHY_ADDR_VALID 0x164258
+/* [RW 6] Linear timer set_clear fifo threshold. */
+#define TM_REG_LIN_SETCLR_FIFO_ALFULL_THR 0x164070
+/* [RW 2] Load value for pci arbiter credit cnt. */
+#define TM_REG_PCIARB_CRDCNT_VAL 0x164260
+/* [RW 20] The amount of hardware cycles for each timer tick. */
+#define TM_REG_TIMER_TICK_SIZE 0x16401c
+/* [RW 8] Timers Context region. */
+#define TM_REG_TM_CONTEXT_REGION 0x164044
+/* [RW 1] Interrupt mask register #0 read/write */
+#define TM_REG_TM_INT_MASK 0x1640fc
+/* [R 1] Interrupt register #0 read */
+#define TM_REG_TM_INT_STS 0x1640f0
+/* [RW 7] Parity mask register #0 read/write */
+#define TM_REG_TM_PRTY_MASK 0x16410c
+/* [RC 7] Parity register #0 read clear */
+#define TM_REG_TM_PRTY_STS_CLR 0x164104
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define TSDM_REG_AGG_INT_EVENT_0 0x42038
+#define TSDM_REG_AGG_INT_EVENT_1 0x4203c
+#define TSDM_REG_AGG_INT_EVENT_2 0x42040
+#define TSDM_REG_AGG_INT_EVENT_3 0x42044
+#define TSDM_REG_AGG_INT_EVENT_4 0x42048
+/* [RW 1] The T bit for aggregated interrupt 0 */
+#define TSDM_REG_AGG_INT_T_0 0x420b8
+#define TSDM_REG_AGG_INT_T_1 0x420bc
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define TSDM_REG_CFC_RSP_START_ADDR 0x42008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define TSDM_REG_CMP_COUNTER_MAX0 0x4201c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define TSDM_REG_CMP_COUNTER_MAX1 0x42020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define TSDM_REG_CMP_COUNTER_MAX2 0x42024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define TSDM_REG_CMP_COUNTER_MAX3 0x42028
+/* [RW 13] The start address in the internal RAM for the completion
+ counters. */
+#define TSDM_REG_CMP_COUNTER_START_ADDR 0x4200c
+#define TSDM_REG_ENABLE_IN1 0x42238
+#define TSDM_REG_ENABLE_IN2 0x4223c
+#define TSDM_REG_ENABLE_OUT1 0x42240
+#define TSDM_REG_ENABLE_OUT2 0x42244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+ interface without receiving any ACK. */
+#define TSDM_REG_INIT_CREDIT_PXP_CTRL 0x424bc
+/* [ST 32] The number of ACK after placement messages received */
+#define TSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x4227c
+/* [ST 32] The number of packet end messages received from the parser */
+#define TSDM_REG_NUM_OF_PKT_END_MSG 0x42274
+/* [ST 32] The number of requests received from the pxp async if */
+#define TSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x42278
+/* [ST 32] The number of commands received in queue 0 */
+#define TSDM_REG_NUM_OF_Q0_CMD 0x42248
+/* [ST 32] The number of commands received in queue 10 */
+#define TSDM_REG_NUM_OF_Q10_CMD 0x4226c
+/* [ST 32] The number of commands received in queue 11 */
+#define TSDM_REG_NUM_OF_Q11_CMD 0x42270
+/* [ST 32] The number of commands received in queue 1 */
+#define TSDM_REG_NUM_OF_Q1_CMD 0x4224c
+/* [ST 32] The number of commands received in queue 3 */
+#define TSDM_REG_NUM_OF_Q3_CMD 0x42250
+/* [ST 32] The number of commands received in queue 4 */
+#define TSDM_REG_NUM_OF_Q4_CMD 0x42254
+/* [ST 32] The number of commands received in queue 5 */
+#define TSDM_REG_NUM_OF_Q5_CMD 0x42258
+/* [ST 32] The number of commands received in queue 6 */
+#define TSDM_REG_NUM_OF_Q6_CMD 0x4225c
+/* [ST 32] The number of commands received in queue 7 */
+#define TSDM_REG_NUM_OF_Q7_CMD 0x42260
+/* [ST 32] The number of commands received in queue 8 */
+#define TSDM_REG_NUM_OF_Q8_CMD 0x42264
+/* [ST 32] The number of commands received in queue 9 */
+#define TSDM_REG_NUM_OF_Q9_CMD 0x42268
+/* [RW 13] The start address in the internal RAM for the packet end message */
+#define TSDM_REG_PCK_END_MSG_START_ADDR 0x42014
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define TSDM_REG_Q_COUNTER_START_ADDR 0x42010
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define TSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x42548
+/* [R 1] parser fifo empty in sdm_sync block */
+#define TSDM_REG_SYNC_PARSER_EMPTY 0x42550
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define TSDM_REG_SYNC_SYNC_EMPTY 0x42558
+/* [RW 32] Tick for timer counter. Applicable only when
+ ~tsdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define TSDM_REG_TIMER_TICK 0x42000
+/* [RW 32] Interrupt mask register #0 read/write */
+#define TSDM_REG_TSDM_INT_MASK_0 0x4229c
+#define TSDM_REG_TSDM_INT_MASK_1 0x422ac
+/* [R 32] Interrupt register #0 read */
+#define TSDM_REG_TSDM_INT_STS_0 0x42290
+#define TSDM_REG_TSDM_INT_STS_1 0x422a0
+/* [RW 11] Parity mask register #0 read/write */
+#define TSDM_REG_TSDM_PRTY_MASK 0x422bc
+/* [R 11] Parity register #0 read */
+#define TSDM_REG_TSDM_PRTY_STS 0x422b0
+/* [RC 11] Parity register #0 read clear */
+#define TSDM_REG_TSDM_PRTY_STS_CLR 0x422b4
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define TSEM_REG_ARB_CYCLE_SIZE 0x180034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define TSEM_REG_ARB_ELEMENT0 0x180020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~tsem_registers_arb_element0.arb_element0 */
+#define TSEM_REG_ARB_ELEMENT1 0x180024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~tsem_registers_arb_element0.arb_element0
+ and ~tsem_registers_arb_element1.arb_element1 */
+#define TSEM_REG_ARB_ELEMENT2 0x180028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+ not be equal to register ~tsem_registers_arb_element0.arb_element0 and
+ ~tsem_registers_arb_element1.arb_element1 and
+ ~tsem_registers_arb_element2.arb_element2 */
+#define TSEM_REG_ARB_ELEMENT3 0x18002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~tsem_registers_arb_element0.arb_element0
+ and ~tsem_registers_arb_element1.arb_element1 and
+ ~tsem_registers_arb_element2.arb_element2 and
+ ~tsem_registers_arb_element3.arb_element3 */
+#define TSEM_REG_ARB_ELEMENT4 0x180030
+#define TSEM_REG_ENABLE_IN 0x1800a4
+#define TSEM_REG_ENABLE_OUT 0x1800a8
+/* [RW 32] This address space contains all registers and memories that are
+ placed in SEM_FAST block. The SEM_FAST registers are described in
+ appendix B. In order to access the sem_fast registers the base address
+ ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define TSEM_REG_FAST_MEMORY 0x1a0000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+ by the microcode */
+#define TSEM_REG_FIC0_DISABLE 0x180224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+ by the microcode */
+#define TSEM_REG_FIC1_DISABLE 0x180234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+ the middle of the work */
+#define TSEM_REG_INT_TABLE 0x180400
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC0 */
+#define TSEM_REG_MSG_NUM_FIC0 0x180000
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC1 */
+#define TSEM_REG_MSG_NUM_FIC1 0x180004
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC0 */
+#define TSEM_REG_MSG_NUM_FOC0 0x180008
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC1 */
+#define TSEM_REG_MSG_NUM_FOC1 0x18000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC2 */
+#define TSEM_REG_MSG_NUM_FOC2 0x180010
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC3 */
+#define TSEM_REG_MSG_NUM_FOC3 0x180014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+ during run_time by the microcode */
+#define TSEM_REG_PAS_DISABLE 0x18024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define TSEM_REG_PASSIVE_BUFFER 0x181000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define TSEM_REG_PRAM 0x1c0000
+/* [R 8] Valid sleeping threads indication have bit per thread */
+#define TSEM_REG_SLEEP_THREADS_VALID 0x18026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0
+/* [RW 8] List of free threads . There is a bit per thread. */
+#define TSEM_REG_THREADS_LIST 0x1802e4
+/* [RC 32] Parity register #0 read clear */
+#define TSEM_REG_TSEM_PRTY_STS_CLR_0 0x180118
+#define TSEM_REG_TSEM_PRTY_STS_CLR_1 0x180128
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define TSEM_REG_TS_0_AS 0x180038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define TSEM_REG_TS_10_AS 0x180060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define TSEM_REG_TS_11_AS 0x180064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define TSEM_REG_TS_12_AS 0x180068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define TSEM_REG_TS_13_AS 0x18006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define TSEM_REG_TS_14_AS 0x180070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define TSEM_REG_TS_15_AS 0x180074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define TSEM_REG_TS_16_AS 0x180078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define TSEM_REG_TS_17_AS 0x18007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define TSEM_REG_TS_18_AS 0x180080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define TSEM_REG_TS_1_AS 0x18003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define TSEM_REG_TS_2_AS 0x180040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define TSEM_REG_TS_3_AS 0x180044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define TSEM_REG_TS_4_AS 0x180048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define TSEM_REG_TS_5_AS 0x18004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define TSEM_REG_TS_6_AS 0x180050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define TSEM_REG_TS_7_AS 0x180054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define TSEM_REG_TS_8_AS 0x180058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define TSEM_REG_TS_9_AS 0x18005c
+/* [RW 32] Interrupt mask register #0 read/write */
+#define TSEM_REG_TSEM_INT_MASK_0 0x180100
+#define TSEM_REG_TSEM_INT_MASK_1 0x180110
+/* [R 32] Interrupt register #0 read */
+#define TSEM_REG_TSEM_INT_STS_0 0x1800f4
+#define TSEM_REG_TSEM_INT_STS_1 0x180104
+/* [RW 32] Parity mask register #0 read/write */
+#define TSEM_REG_TSEM_PRTY_MASK_0 0x180120
+#define TSEM_REG_TSEM_PRTY_MASK_1 0x180130
+/* [R 32] Parity register #0 read */
+#define TSEM_REG_TSEM_PRTY_STS_0 0x180114
+#define TSEM_REG_TSEM_PRTY_STS_1 0x180124
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define TSEM_REG_VFPF_ERR_NUM 0x180380
+/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
+ * [10:8] of the address should be the offset within the accessed LCID
+ * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
+ * LCID100. The RBC address should be 12'ha64. */
+#define UCM_REG_AG_CTX 0xe2000
+/* [R 5] Used to read the XX protection CAM occupancy counter. */
+#define UCM_REG_CAM_OCCUP 0xe0170
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define UCM_REG_CDU_AG_RD_IFEN 0xe0038
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+ are disregarded; all other signals are treated as usual; if 1 - normal
+ activity. */
+#define UCM_REG_CDU_AG_WR_IFEN 0xe0034
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define UCM_REG_CDU_SM_RD_IFEN 0xe0040
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+ input is disregarded; all other signals are treated as usual; if 1 -
+ normal activity. */
+#define UCM_REG_CDU_SM_WR_IFEN 0xe003c
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 1 at start-up. */
+#define UCM_REG_CFC_INIT_CRD 0xe0204
+/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_CP_WEIGHT 0xe00c4
+/* [RW 1] Input csem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_CSEM_IFEN 0xe0028
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the csem interface is detected. */
+#define UCM_REG_CSEM_LENGTH_MIS 0xe0160
+/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_CSEM_WEIGHT 0xe00b8
+/* [RW 1] Input dorq Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_DORQ_IFEN 0xe0030
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the dorq interface is detected. */
+#define UCM_REG_DORQ_LENGTH_MIS 0xe0168
+/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_DORQ_WEIGHT 0xe00c0
+/* [RW 8] The Event ID in case ErrorFlg input message bit is set. */
+#define UCM_REG_ERR_EVNT_ID 0xe00a4
+/* [RW 28] The CM erroneous header for QM and Timers formatting. */
+#define UCM_REG_ERR_UCM_HDR 0xe00a0
+/* [RW 8] The Event ID for Timers expiration. */
+#define UCM_REG_EXPR_EVNT_ID 0xe00a8
+/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define UCM_REG_FIC0_INIT_CRD 0xe020c
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define UCM_REG_FIC1_INIT_CRD 0xe0210
+/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
+ - strict priority defined by ~ucm_registers_gr_ag_pr.gr_ag_pr;
+ ~ucm_registers_gr_ld0_pr.gr_ld0_pr and
+ ~ucm_registers_gr_ld1_pr.gr_ld1_pr. */
+#define UCM_REG_GR_ARB_TYPE 0xe0144
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Store channel group is
+ compliment to the others. */
+#define UCM_REG_GR_LD0_PR 0xe014c
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Store channel group is
+ compliment to the others. */
+#define UCM_REG_GR_LD1_PR 0xe0150
+/* [RW 2] The queue index for invalidate counter flag decision. */
+#define UCM_REG_INV_CFLG_Q 0xe00e4
+/* [RW 5] The number of double REG-pairs; loaded from the STORM context and
+ sent to STORM; for a specific connection type. the double REG-pairs are
+ used in order to align to STORM context row size of 128 bits. The offset
+ of these data in the STORM context is always 0. Index _i stands for the
+ connection type (one of 16). */
+#define UCM_REG_N_SM_CTX_LD_0 0xe0054
+#define UCM_REG_N_SM_CTX_LD_1 0xe0058
+#define UCM_REG_N_SM_CTX_LD_2 0xe005c
+#define UCM_REG_N_SM_CTX_LD_3 0xe0060
+#define UCM_REG_N_SM_CTX_LD_4 0xe0064
+#define UCM_REG_N_SM_CTX_LD_5 0xe0068
+#define UCM_REG_PHYS_QNUM0_0 0xe0110
+#define UCM_REG_PHYS_QNUM0_1 0xe0114
+#define UCM_REG_PHYS_QNUM1_0 0xe0118
+#define UCM_REG_PHYS_QNUM1_1 0xe011c
+#define UCM_REG_PHYS_QNUM2_0 0xe0120
+#define UCM_REG_PHYS_QNUM2_1 0xe0124
+#define UCM_REG_PHYS_QNUM3_0 0xe0128
+#define UCM_REG_PHYS_QNUM3_1 0xe012c
+/* [RW 8] The Event ID for Timers formatting in case of stop done. */
+#define UCM_REG_STOP_EVNT_ID 0xe00ac
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the STORM interface is detected. */
+#define UCM_REG_STORM_LENGTH_MIS 0xe0154
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_STORM_UCM_IFEN 0xe0010
+/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_STORM_WEIGHT 0xe00b0
+/* [RW 4] Timers output initial credit. Max credit available - 15.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 4 at start-up. */
+#define UCM_REG_TM_INIT_CRD 0xe021c
+/* [RW 28] The CM header for Timers expiration command. */
+#define UCM_REG_TM_UCM_HDR 0xe009c
+/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_TM_UCM_IFEN 0xe001c
+/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_TM_WEIGHT 0xe00d4
+/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_TSEM_IFEN 0xe0024
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the tsem interface is detected. */
+#define UCM_REG_TSEM_LENGTH_MIS 0xe015c
+/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_TSEM_WEIGHT 0xe00b4
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_UCM_CFC_IFEN 0xe0044
+/* [RW 11] Interrupt mask register #0 read/write */
+#define UCM_REG_UCM_INT_MASK 0xe01d4
+/* [R 11] Interrupt register #0 read */
+#define UCM_REG_UCM_INT_STS 0xe01c8
+/* [RW 27] Parity mask register #0 read/write */
+#define UCM_REG_UCM_PRTY_MASK 0xe01e4
+/* [R 27] Parity register #0 read */
+#define UCM_REG_UCM_PRTY_STS 0xe01d8
+/* [RC 27] Parity register #0 read clear */
+#define UCM_REG_UCM_PRTY_STS_CLR 0xe01dc
+/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
+ REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+ Is used to determine the number of the AG context REG-pairs written back;
+ when the Reg1WbFlg isn't set. */
+#define UCM_REG_UCM_REG0_SZ 0xe00dc
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_UCM_STORM0_IFEN 0xe0004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_UCM_STORM1_IFEN 0xe0008
+/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_UCM_TM_IFEN 0xe0020
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_UCM_UQM_IFEN 0xe000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
+#define UCM_REG_UCM_UQM_USE_Q 0xe00d8
+/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 32 at start-up. */
+#define UCM_REG_UQM_INIT_CRD 0xe0220
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_UQM_P_WEIGHT 0xe00cc
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_UQM_S_WEIGHT 0xe00d0
+/* [RW 28] The CM header value for QM request (primary). */
+#define UCM_REG_UQM_UCM_HDR_P 0xe0094
+/* [RW 28] The CM header value for QM request (secondary). */
+#define UCM_REG_UQM_UCM_HDR_S 0xe0098
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_UQM_UCM_IFEN 0xe0014
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_USDM_IFEN 0xe0018
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the SDM interface is detected. */
+#define UCM_REG_USDM_LENGTH_MIS 0xe0158
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_USDM_WEIGHT 0xe00c8
+/* [RW 1] Input xsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_XSEM_IFEN 0xe002c
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the xsem interface isdetected. */
+#define UCM_REG_XSEM_LENGTH_MIS 0xe0164
+/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_XSEM_WEIGHT 0xe00bc
+/* [RW 20] Indirect access to the descriptor table of the XX protection
+ mechanism. The fields are:[5:0] - message length; 14:6] - message
+ pointer; 19:15] - next pointer. */
+#define UCM_REG_XX_DESCR_TABLE 0xe0280
+#define UCM_REG_XX_DESCR_TABLE_SIZE 27
+/* [R 6] Use to read the XX protection Free counter. */
+#define UCM_REG_XX_FREE 0xe016c
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+ of the Input Stage XX protection buffer by the XX protection pending
+ messages. Write writes the initial credit value; read returns the current
+ value of the credit counter. Must be initialized to 12 at start-up. */
+#define UCM_REG_XX_INIT_CRD 0xe0224
+/* [RW 6] The maximum number of pending messages; which may be stored in XX
+ protection. ~ucm_registers_xx_free.xx_free read on read. */
+#define UCM_REG_XX_MSG_NUM 0xe0228
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define UCM_REG_XX_OVFL_EVNT_ID 0xe004c
+/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
+ The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] -
+ header pointer. */
+#define UCM_REG_XX_TABLE 0xe0300
+#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE (0x1<<28)
+#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15)
+#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24)
+#define UMAC_COMMAND_CONFIG_REG_PAD_EN (0x1<<5)
+#define UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE (0x1<<8)
+#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN (0x1<<4)
+#define UMAC_COMMAND_CONFIG_REG_RX_ENA (0x1<<1)
+#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13)
+#define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1<<0)
+#define UMAC_REG_COMMAND_CONFIG 0x8
+/* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers
+ * to bit 17 of the MAC address etc. */
+#define UMAC_REG_MAC_ADDR0 0xc
+/* [RW 16] Register Bit 0 refers to Bit 0 of the MAC address; Register Bit 1
+ * refers to Bit 1 of the MAC address etc. Bits 16 to 31 are reserved. */
+#define UMAC_REG_MAC_ADDR1 0x10
+/* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive
+ * logic to check frames. */
+#define UMAC_REG_MAXFR 0x14
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define USDM_REG_AGG_INT_EVENT_0 0xc4038
+#define USDM_REG_AGG_INT_EVENT_1 0xc403c
+#define USDM_REG_AGG_INT_EVENT_2 0xc4040
+#define USDM_REG_AGG_INT_EVENT_4 0xc4048
+#define USDM_REG_AGG_INT_EVENT_5 0xc404c
+#define USDM_REG_AGG_INT_EVENT_6 0xc4050
+/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
+ or auto-mask-mode (1) */
+#define USDM_REG_AGG_INT_MODE_0 0xc41b8
+#define USDM_REG_AGG_INT_MODE_1 0xc41bc
+#define USDM_REG_AGG_INT_MODE_4 0xc41c8
+#define USDM_REG_AGG_INT_MODE_5 0xc41cc
+#define USDM_REG_AGG_INT_MODE_6 0xc41d0
+/* [RW 1] The T bit for aggregated interrupt 5 */
+#define USDM_REG_AGG_INT_T_5 0xc40cc
+#define USDM_REG_AGG_INT_T_6 0xc40d0
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define USDM_REG_CFC_RSP_START_ADDR 0xc4008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define USDM_REG_CMP_COUNTER_MAX0 0xc401c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define USDM_REG_CMP_COUNTER_MAX1 0xc4020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define USDM_REG_CMP_COUNTER_MAX2 0xc4024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define USDM_REG_CMP_COUNTER_MAX3 0xc4028
+/* [RW 13] The start address in the internal RAM for the completion
+ counters. */
+#define USDM_REG_CMP_COUNTER_START_ADDR 0xc400c
+#define USDM_REG_ENABLE_IN1 0xc4238
+#define USDM_REG_ENABLE_IN2 0xc423c
+#define USDM_REG_ENABLE_OUT1 0xc4240
+#define USDM_REG_ENABLE_OUT2 0xc4244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+ interface without receiving any ACK. */
+#define USDM_REG_INIT_CREDIT_PXP_CTRL 0xc44c0
+/* [ST 32] The number of ACK after placement messages received */
+#define USDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc4280
+/* [ST 32] The number of packet end messages received from the parser */
+#define USDM_REG_NUM_OF_PKT_END_MSG 0xc4278
+/* [ST 32] The number of requests received from the pxp async if */
+#define USDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc427c
+/* [ST 32] The number of commands received in queue 0 */
+#define USDM_REG_NUM_OF_Q0_CMD 0xc4248
+/* [ST 32] The number of commands received in queue 10 */
+#define USDM_REG_NUM_OF_Q10_CMD 0xc4270
+/* [ST 32] The number of commands received in queue 11 */
+#define USDM_REG_NUM_OF_Q11_CMD 0xc4274
+/* [ST 32] The number of commands received in queue 1 */
+#define USDM_REG_NUM_OF_Q1_CMD 0xc424c
+/* [ST 32] The number of commands received in queue 2 */
+#define USDM_REG_NUM_OF_Q2_CMD 0xc4250
+/* [ST 32] The number of commands received in queue 3 */
+#define USDM_REG_NUM_OF_Q3_CMD 0xc4254
+/* [ST 32] The number of commands received in queue 4 */
+#define USDM_REG_NUM_OF_Q4_CMD 0xc4258
+/* [ST 32] The number of commands received in queue 5 */
+#define USDM_REG_NUM_OF_Q5_CMD 0xc425c
+/* [ST 32] The number of commands received in queue 6 */
+#define USDM_REG_NUM_OF_Q6_CMD 0xc4260
+/* [ST 32] The number of commands received in queue 7 */
+#define USDM_REG_NUM_OF_Q7_CMD 0xc4264
+/* [ST 32] The number of commands received in queue 8 */
+#define USDM_REG_NUM_OF_Q8_CMD 0xc4268
+/* [ST 32] The number of commands received in queue 9 */
+#define USDM_REG_NUM_OF_Q9_CMD 0xc426c
+/* [RW 13] The start address in the internal RAM for the packet end message */
+#define USDM_REG_PCK_END_MSG_START_ADDR 0xc4014
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define USDM_REG_Q_COUNTER_START_ADDR 0xc4010
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define USDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc4550
+/* [R 1] parser fifo empty in sdm_sync block */
+#define USDM_REG_SYNC_PARSER_EMPTY 0xc4558
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define USDM_REG_SYNC_SYNC_EMPTY 0xc4560
+/* [RW 32] Tick for timer counter. Applicable only when
+ ~usdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define USDM_REG_TIMER_TICK 0xc4000
+/* [RW 32] Interrupt mask register #0 read/write */
+#define USDM_REG_USDM_INT_MASK_0 0xc42a0
+#define USDM_REG_USDM_INT_MASK_1 0xc42b0
+/* [R 32] Interrupt register #0 read */
+#define USDM_REG_USDM_INT_STS_0 0xc4294
+#define USDM_REG_USDM_INT_STS_1 0xc42a4
+/* [RW 11] Parity mask register #0 read/write */
+#define USDM_REG_USDM_PRTY_MASK 0xc42c0
+/* [R 11] Parity register #0 read */
+#define USDM_REG_USDM_PRTY_STS 0xc42b4
+/* [RC 11] Parity register #0 read clear */
+#define USDM_REG_USDM_PRTY_STS_CLR 0xc42b8
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define USEM_REG_ARB_CYCLE_SIZE 0x300034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define USEM_REG_ARB_ELEMENT0 0x300020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~usem_registers_arb_element0.arb_element0 */
+#define USEM_REG_ARB_ELEMENT1 0x300024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~usem_registers_arb_element0.arb_element0
+ and ~usem_registers_arb_element1.arb_element1 */
+#define USEM_REG_ARB_ELEMENT2 0x300028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+ not be equal to register ~usem_registers_arb_element0.arb_element0 and
+ ~usem_registers_arb_element1.arb_element1 and
+ ~usem_registers_arb_element2.arb_element2 */
+#define USEM_REG_ARB_ELEMENT3 0x30002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~usem_registers_arb_element0.arb_element0
+ and ~usem_registers_arb_element1.arb_element1 and
+ ~usem_registers_arb_element2.arb_element2 and
+ ~usem_registers_arb_element3.arb_element3 */
+#define USEM_REG_ARB_ELEMENT4 0x300030
+#define USEM_REG_ENABLE_IN 0x3000a4
+#define USEM_REG_ENABLE_OUT 0x3000a8
+/* [RW 32] This address space contains all registers and memories that are
+ placed in SEM_FAST block. The SEM_FAST registers are described in
+ appendix B. In order to access the sem_fast registers the base address
+ ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define USEM_REG_FAST_MEMORY 0x320000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+ by the microcode */
+#define USEM_REG_FIC0_DISABLE 0x300224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+ by the microcode */
+#define USEM_REG_FIC1_DISABLE 0x300234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+ the middle of the work */
+#define USEM_REG_INT_TABLE 0x300400
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC0 */
+#define USEM_REG_MSG_NUM_FIC0 0x300000
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC1 */
+#define USEM_REG_MSG_NUM_FIC1 0x300004
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC0 */
+#define USEM_REG_MSG_NUM_FOC0 0x300008
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC1 */
+#define USEM_REG_MSG_NUM_FOC1 0x30000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC2 */
+#define USEM_REG_MSG_NUM_FOC2 0x300010
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC3 */
+#define USEM_REG_MSG_NUM_FOC3 0x300014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+ during run_time by the microcode */
+#define USEM_REG_PAS_DISABLE 0x30024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define USEM_REG_PASSIVE_BUFFER 0x302000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define USEM_REG_PRAM 0x340000
+/* [R 16] Valid sleeping threads indication have bit per thread */
+#define USEM_REG_SLEEP_THREADS_VALID 0x30026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define USEM_REG_SLOW_EXT_STORE_EMPTY 0x3002a0
+/* [RW 16] List of free threads . There is a bit per thread. */
+#define USEM_REG_THREADS_LIST 0x3002e4
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define USEM_REG_TS_0_AS 0x300038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define USEM_REG_TS_10_AS 0x300060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define USEM_REG_TS_11_AS 0x300064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define USEM_REG_TS_12_AS 0x300068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define USEM_REG_TS_13_AS 0x30006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define USEM_REG_TS_14_AS 0x300070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define USEM_REG_TS_15_AS 0x300074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define USEM_REG_TS_16_AS 0x300078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define USEM_REG_TS_17_AS 0x30007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define USEM_REG_TS_18_AS 0x300080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define USEM_REG_TS_1_AS 0x30003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define USEM_REG_TS_2_AS 0x300040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define USEM_REG_TS_3_AS 0x300044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define USEM_REG_TS_4_AS 0x300048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define USEM_REG_TS_5_AS 0x30004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define USEM_REG_TS_6_AS 0x300050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define USEM_REG_TS_7_AS 0x300054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define USEM_REG_TS_8_AS 0x300058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define USEM_REG_TS_9_AS 0x30005c
+/* [RW 32] Interrupt mask register #0 read/write */
+#define USEM_REG_USEM_INT_MASK_0 0x300110
+#define USEM_REG_USEM_INT_MASK_1 0x300120
+/* [R 32] Interrupt register #0 read */
+#define USEM_REG_USEM_INT_STS_0 0x300104
+#define USEM_REG_USEM_INT_STS_1 0x300114
+/* [RW 32] Parity mask register #0 read/write */
+#define USEM_REG_USEM_PRTY_MASK_0 0x300130
+#define USEM_REG_USEM_PRTY_MASK_1 0x300140
+/* [R 32] Parity register #0 read */
+#define USEM_REG_USEM_PRTY_STS_0 0x300124
+#define USEM_REG_USEM_PRTY_STS_1 0x300134
+/* [RC 32] Parity register #0 read clear */
+#define USEM_REG_USEM_PRTY_STS_CLR_0 0x300128
+#define USEM_REG_USEM_PRTY_STS_CLR_1 0x300138
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define USEM_REG_VFPF_ERR_NUM 0x300380
+#define VFC_MEMORIES_RST_REG_CAM_RST (0x1<<0)
+#define VFC_MEMORIES_RST_REG_RAM_RST (0x1<<1)
+#define VFC_REG_MEMORIES_RST 0x1943c
+/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
+ * [12:8] of the address should be the offset within the accessed LCID
+ * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
+ * LCID100. The RBC address should be 13'ha64. */
+#define XCM_REG_AG_CTX 0x28000
+/* [RW 2] The queue index for registration on Aux1 counter flag. */
+#define XCM_REG_AUX1_Q 0x20134
+/* [RW 2] Per each decision rule the queue index to register to. */
+#define XCM_REG_AUX_CNT_FLG_Q_19 0x201b0
+/* [R 5] Used to read the XX protection CAM occupancy counter. */
+#define XCM_REG_CAM_OCCUP 0x20244
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define XCM_REG_CDU_AG_RD_IFEN 0x20044
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+ are disregarded; all other signals are treated as usual; if 1 - normal
+ activity. */
+#define XCM_REG_CDU_AG_WR_IFEN 0x20040
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define XCM_REG_CDU_SM_RD_IFEN 0x2004c
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+ input is disregarded; all other signals are treated as usual; if 1 -
+ normal activity. */
+#define XCM_REG_CDU_SM_WR_IFEN 0x20048
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 1 at start-up. */
+#define XCM_REG_CFC_INIT_CRD 0x20404
+/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_CP_WEIGHT 0x200dc
+/* [RW 1] Input csem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_CSEM_IFEN 0x20028
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the csem interface. */
+#define XCM_REG_CSEM_LENGTH_MIS 0x20228
+/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_CSEM_WEIGHT 0x200c4
+/* [RW 1] Input dorq Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_DORQ_IFEN 0x20030
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the dorq interface. */
+#define XCM_REG_DORQ_LENGTH_MIS 0x20230
+/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_DORQ_WEIGHT 0x200cc
+/* [RW 8] The Event ID in case the ErrorFlg input message bit is set. */
+#define XCM_REG_ERR_EVNT_ID 0x200b0
+/* [RW 28] The CM erroneous header for QM and Timers formatting. */
+#define XCM_REG_ERR_XCM_HDR 0x200ac
+/* [RW 8] The Event ID for Timers expiration. */
+#define XCM_REG_EXPR_EVNT_ID 0x200b4
+/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define XCM_REG_FIC0_INIT_CRD 0x2040c
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define XCM_REG_FIC1_INIT_CRD 0x20410
+#define XCM_REG_GLB_DEL_ACK_MAX_CNT_0 0x20118
+#define XCM_REG_GLB_DEL_ACK_MAX_CNT_1 0x2011c
+#define XCM_REG_GLB_DEL_ACK_TMR_VAL_0 0x20108
+#define XCM_REG_GLB_DEL_ACK_TMR_VAL_1 0x2010c
+/* [RW 1] Arbitratiojn between Input Arbiter groups: 0 - fair Round-Robin; 1
+ - strict priority defined by ~xcm_registers_gr_ag_pr.gr_ag_pr;
+ ~xcm_registers_gr_ld0_pr.gr_ld0_pr and
+ ~xcm_registers_gr_ld1_pr.gr_ld1_pr. */
+#define XCM_REG_GR_ARB_TYPE 0x2020c
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Channel group is the
+ compliment of the other 3 groups. */
+#define XCM_REG_GR_LD0_PR 0x20214
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Channel group is the
+ compliment of the other 3 groups. */
+#define XCM_REG_GR_LD1_PR 0x20218
+/* [RW 1] Input nig0 Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_NIG0_IFEN 0x20038
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the nig0 interface. */
+#define XCM_REG_NIG0_LENGTH_MIS 0x20238
+/* [RW 3] The weight of the input nig0 in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_NIG0_WEIGHT 0x200d4
+/* [RW 1] Input nig1 Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_NIG1_IFEN 0x2003c
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the nig1 interface. */
+#define XCM_REG_NIG1_LENGTH_MIS 0x2023c
+/* [RW 5] The number of double REG-pairs; loaded from the STORM context and
+ sent to STORM; for a specific connection type. The double REG-pairs are
+ used in order to align to STORM context row size of 128 bits. The offset
+ of these data in the STORM context is always 0. Index _i stands for the
+ connection type (one of 16). */
+#define XCM_REG_N_SM_CTX_LD_0 0x20060
+#define XCM_REG_N_SM_CTX_LD_1 0x20064
+#define XCM_REG_N_SM_CTX_LD_2 0x20068
+#define XCM_REG_N_SM_CTX_LD_3 0x2006c
+#define XCM_REG_N_SM_CTX_LD_4 0x20070
+#define XCM_REG_N_SM_CTX_LD_5 0x20074
+/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_PBF_IFEN 0x20034
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the pbf interface. */
+#define XCM_REG_PBF_LENGTH_MIS 0x20234
+/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_PBF_WEIGHT 0x200d0
+#define XCM_REG_PHYS_QNUM3_0 0x20100
+#define XCM_REG_PHYS_QNUM3_1 0x20104
+/* [RW 8] The Event ID for Timers formatting in case of stop done. */
+#define XCM_REG_STOP_EVNT_ID 0x200b8
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the STORM interface. */
+#define XCM_REG_STORM_LENGTH_MIS 0x2021c
+/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_STORM_WEIGHT 0x200bc
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_STORM_XCM_IFEN 0x20010
+/* [RW 4] Timers output initial credit. Max credit available - 15.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 4 at start-up. */
+#define XCM_REG_TM_INIT_CRD 0x2041c
+/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_TM_WEIGHT 0x200ec
+/* [RW 28] The CM header for Timers expiration command. */
+#define XCM_REG_TM_XCM_HDR 0x200a8
+/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_TM_XCM_IFEN 0x2001c
+/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_TSEM_IFEN 0x20024
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the tsem interface. */
+#define XCM_REG_TSEM_LENGTH_MIS 0x20224
+/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_TSEM_WEIGHT 0x200c0
+/* [RW 2] The queue index for registration on UNA greater NXT decision rule. */
+#define XCM_REG_UNA_GT_NXT_Q 0x20120
+/* [RW 1] Input usem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_USEM_IFEN 0x2002c
+/* [RC 1] Message length mismatch (relative to last indication) at the usem
+ interface. */
+#define XCM_REG_USEM_LENGTH_MIS 0x2022c
+/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_USEM_WEIGHT 0x200c8
+#define XCM_REG_WU_DA_CNT_CMD00 0x201d4
+#define XCM_REG_WU_DA_CNT_CMD01 0x201d8
+#define XCM_REG_WU_DA_CNT_CMD10 0x201dc
+#define XCM_REG_WU_DA_CNT_CMD11 0x201e0
+#define XCM_REG_WU_DA_CNT_UPD_VAL00 0x201e4
+#define XCM_REG_WU_DA_CNT_UPD_VAL01 0x201e8
+#define XCM_REG_WU_DA_CNT_UPD_VAL10 0x201ec
+#define XCM_REG_WU_DA_CNT_UPD_VAL11 0x201f0
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00 0x201c4
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01 0x201c8
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD10 0x201cc
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD11 0x201d0
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XCM_CFC_IFEN 0x20050
+/* [RW 14] Interrupt mask register #0 read/write */
+#define XCM_REG_XCM_INT_MASK 0x202b4
+/* [R 14] Interrupt register #0 read */
+#define XCM_REG_XCM_INT_STS 0x202a8
+/* [RW 30] Parity mask register #0 read/write */
+#define XCM_REG_XCM_PRTY_MASK 0x202c4
+/* [R 30] Parity register #0 read */
+#define XCM_REG_XCM_PRTY_STS 0x202b8
+/* [RC 30] Parity register #0 read clear */
+#define XCM_REG_XCM_PRTY_STS_CLR 0x202bc
+
+/* [RW 4] The size of AG context region 0 in REG-pairs. Designates the MS
+ REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+ Is used to determine the number of the AG context REG-pairs written back;
+ when the Reg1WbFlg isn't set. */
+#define XCM_REG_XCM_REG0_SZ 0x200f4
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XCM_STORM0_IFEN 0x20004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XCM_STORM1_IFEN 0x20008
+/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_XCM_TM_IFEN 0x20020
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XCM_XQM_IFEN 0x2000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
+#define XCM_REG_XCM_XQM_USE_Q 0x200f0
+/* [RW 4] The value by which CFC updates the activity counter at QM bypass. */
+#define XCM_REG_XQM_BYP_ACT_UPD 0x200fc
+/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 32 at start-up. */
+#define XCM_REG_XQM_INIT_CRD 0x20420
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_XQM_P_WEIGHT 0x200e4
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_XQM_S_WEIGHT 0x200e8
+/* [RW 28] The CM header value for QM request (primary). */
+#define XCM_REG_XQM_XCM_HDR_P 0x200a0
+/* [RW 28] The CM header value for QM request (secondary). */
+#define XCM_REG_XQM_XCM_HDR_S 0x200a4
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XQM_XCM_IFEN 0x20014
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XSDM_IFEN 0x20018
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the SDM interface. */
+#define XCM_REG_XSDM_LENGTH_MIS 0x20220
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_XSDM_WEIGHT 0x200e0
+/* [RW 17] Indirect access to the descriptor table of the XX protection
+ mechanism. The fields are: [5:0] - message length; 11:6] - message
+ pointer; 16:12] - next pointer. */
+#define XCM_REG_XX_DESCR_TABLE 0x20480
+#define XCM_REG_XX_DESCR_TABLE_SIZE 32
+/* [R 6] Used to read the XX protection Free counter. */
+#define XCM_REG_XX_FREE 0x20240
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+ of the Input Stage XX protection buffer by the XX protection pending
+ messages. Max credit available - 3.Write writes the initial credit value;
+ read returns the current value of the credit counter. Must be initialized
+ to 2 at start-up. */
+#define XCM_REG_XX_INIT_CRD 0x20424
+/* [RW 6] The maximum number of pending messages; which may be stored in XX
+ protection. ~xcm_registers_xx_free.xx_free read on read. */
+#define XCM_REG_XX_MSG_NUM 0x20428
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define XCM_REG_XX_OVFL_EVNT_ID 0x20058
+#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0)
+#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1)
+#define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1<<2)
+#define XMAC_CTRL_REG_RX_EN (0x1<<1)
+#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6)
+#define XMAC_CTRL_REG_TX_EN (0x1<<0)
+#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18)
+#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17)
+#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN (0x1<<0)
+#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN (0x1<<3)
+#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN (0x1<<4)
+#define XMAC_PFC_CTRL_HI_REG_TX_PFC_EN (0x1<<5)
+#define XMAC_REG_CLEAR_RX_LSS_STATUS 0x60
+#define XMAC_REG_CTRL 0
+/* [RW 16] Upper 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
+ * packets transmitted by the MAC */
+#define XMAC_REG_CTRL_SA_HI 0x2c
+/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
+ * packets transmitted by the MAC */
+#define XMAC_REG_CTRL_SA_LO 0x28
+#define XMAC_REG_PAUSE_CTRL 0x68
+#define XMAC_REG_PFC_CTRL 0x70
+#define XMAC_REG_PFC_CTRL_HI 0x74
+#define XMAC_REG_RX_LSS_STATUS 0x58
+/* [RW 14] Maximum packet size in receive direction; exclusive of preamble &
+ * CRC in strip mode */
+#define XMAC_REG_RX_MAX_SIZE 0x40
+#define XMAC_REG_TX_CTRL 0x20
+/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
+ The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] -
+ header pointer. */
+#define XCM_REG_XX_TABLE 0x20500
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define XSDM_REG_AGG_INT_EVENT_0 0x166038
+#define XSDM_REG_AGG_INT_EVENT_1 0x16603c
+#define XSDM_REG_AGG_INT_EVENT_10 0x166060
+#define XSDM_REG_AGG_INT_EVENT_11 0x166064
+#define XSDM_REG_AGG_INT_EVENT_12 0x166068
+#define XSDM_REG_AGG_INT_EVENT_13 0x16606c
+#define XSDM_REG_AGG_INT_EVENT_14 0x166070
+#define XSDM_REG_AGG_INT_EVENT_2 0x166040
+#define XSDM_REG_AGG_INT_EVENT_3 0x166044
+#define XSDM_REG_AGG_INT_EVENT_4 0x166048
+#define XSDM_REG_AGG_INT_EVENT_5 0x16604c
+#define XSDM_REG_AGG_INT_EVENT_6 0x166050
+#define XSDM_REG_AGG_INT_EVENT_7 0x166054
+#define XSDM_REG_AGG_INT_EVENT_8 0x166058
+#define XSDM_REG_AGG_INT_EVENT_9 0x16605c
+/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
+ or auto-mask-mode (1) */
+#define XSDM_REG_AGG_INT_MODE_0 0x1661b8
+#define XSDM_REG_AGG_INT_MODE_1 0x1661bc
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define XSDM_REG_CFC_RSP_START_ADDR 0x166008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define XSDM_REG_CMP_COUNTER_MAX0 0x16601c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define XSDM_REG_CMP_COUNTER_MAX1 0x166020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define XSDM_REG_CMP_COUNTER_MAX2 0x166024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define XSDM_REG_CMP_COUNTER_MAX3 0x166028
+/* [RW 13] The start address in the internal RAM for the completion
+ counters. */
+#define XSDM_REG_CMP_COUNTER_START_ADDR 0x16600c
+#define XSDM_REG_ENABLE_IN1 0x166238
+#define XSDM_REG_ENABLE_IN2 0x16623c
+#define XSDM_REG_ENABLE_OUT1 0x166240
+#define XSDM_REG_ENABLE_OUT2 0x166244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+ interface without receiving any ACK. */
+#define XSDM_REG_INIT_CREDIT_PXP_CTRL 0x1664bc
+/* [ST 32] The number of ACK after placement messages received */
+#define XSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x16627c
+/* [ST 32] The number of packet end messages received from the parser */
+#define XSDM_REG_NUM_OF_PKT_END_MSG 0x166274
+/* [ST 32] The number of requests received from the pxp async if */
+#define XSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x166278
+/* [ST 32] The number of commands received in queue 0 */
+#define XSDM_REG_NUM_OF_Q0_CMD 0x166248
+/* [ST 32] The number of commands received in queue 10 */
+#define XSDM_REG_NUM_OF_Q10_CMD 0x16626c
+/* [ST 32] The number of commands received in queue 11 */
+#define XSDM_REG_NUM_OF_Q11_CMD 0x166270
+/* [ST 32] The number of commands received in queue 1 */
+#define XSDM_REG_NUM_OF_Q1_CMD 0x16624c
+/* [ST 32] The number of commands received in queue 3 */
+#define XSDM_REG_NUM_OF_Q3_CMD 0x166250
+/* [ST 32] The number of commands received in queue 4 */
+#define XSDM_REG_NUM_OF_Q4_CMD 0x166254
+/* [ST 32] The number of commands received in queue 5 */
+#define XSDM_REG_NUM_OF_Q5_CMD 0x166258
+/* [ST 32] The number of commands received in queue 6 */
+#define XSDM_REG_NUM_OF_Q6_CMD 0x16625c
+/* [ST 32] The number of commands received in queue 7 */
+#define XSDM_REG_NUM_OF_Q7_CMD 0x166260
+/* [ST 32] The number of commands received in queue 8 */
+#define XSDM_REG_NUM_OF_Q8_CMD 0x166264
+/* [ST 32] The number of commands received in queue 9 */
+#define XSDM_REG_NUM_OF_Q9_CMD 0x166268
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define XSDM_REG_Q_COUNTER_START_ADDR 0x166010
+/* [W 17] Generate an operation after completion; bit-16 is
+ * AggVectIdx_valid; bits 15:8 are AggVectIdx; bits 7:5 are the TRIG and
+ * bits 4:0 are the T124Param[4:0] */
+#define XSDM_REG_OPERATION_GEN 0x1664c4
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x166548
+/* [R 1] parser fifo empty in sdm_sync block */
+#define XSDM_REG_SYNC_PARSER_EMPTY 0x166550
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define XSDM_REG_SYNC_SYNC_EMPTY 0x166558
+/* [RW 32] Tick for timer counter. Applicable only when
+ ~xsdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define XSDM_REG_TIMER_TICK 0x166000
+/* [RW 32] Interrupt mask register #0 read/write */
+#define XSDM_REG_XSDM_INT_MASK_0 0x16629c
+#define XSDM_REG_XSDM_INT_MASK_1 0x1662ac
+/* [R 32] Interrupt register #0 read */
+#define XSDM_REG_XSDM_INT_STS_0 0x166290
+#define XSDM_REG_XSDM_INT_STS_1 0x1662a0
+/* [RW 11] Parity mask register #0 read/write */
+#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc
+/* [R 11] Parity register #0 read */
+#define XSDM_REG_XSDM_PRTY_STS 0x1662b0
+/* [RC 11] Parity register #0 read clear */
+#define XSDM_REG_XSDM_PRTY_STS_CLR 0x1662b4
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define XSEM_REG_ARB_CYCLE_SIZE 0x280034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define XSEM_REG_ARB_ELEMENT0 0x280020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~xsem_registers_arb_element0.arb_element0 */
+#define XSEM_REG_ARB_ELEMENT1 0x280024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~xsem_registers_arb_element0.arb_element0
+ and ~xsem_registers_arb_element1.arb_element1 */
+#define XSEM_REG_ARB_ELEMENT2 0x280028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+ not be equal to register ~xsem_registers_arb_element0.arb_element0 and
+ ~xsem_registers_arb_element1.arb_element1 and
+ ~xsem_registers_arb_element2.arb_element2 */
+#define XSEM_REG_ARB_ELEMENT3 0x28002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~xsem_registers_arb_element0.arb_element0
+ and ~xsem_registers_arb_element1.arb_element1 and
+ ~xsem_registers_arb_element2.arb_element2 and
+ ~xsem_registers_arb_element3.arb_element3 */
+#define XSEM_REG_ARB_ELEMENT4 0x280030
+#define XSEM_REG_ENABLE_IN 0x2800a4
+#define XSEM_REG_ENABLE_OUT 0x2800a8
+/* [RW 32] This address space contains all registers and memories that are
+ placed in SEM_FAST block. The SEM_FAST registers are described in
+ appendix B. In order to access the sem_fast registers the base address
+ ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define XSEM_REG_FAST_MEMORY 0x2a0000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+ by the microcode */
+#define XSEM_REG_FIC0_DISABLE 0x280224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+ by the microcode */
+#define XSEM_REG_FIC1_DISABLE 0x280234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+ the middle of the work */
+#define XSEM_REG_INT_TABLE 0x280400
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC0 */
+#define XSEM_REG_MSG_NUM_FIC0 0x280000
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC1 */
+#define XSEM_REG_MSG_NUM_FIC1 0x280004
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC0 */
+#define XSEM_REG_MSG_NUM_FOC0 0x280008
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC1 */
+#define XSEM_REG_MSG_NUM_FOC1 0x28000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC2 */
+#define XSEM_REG_MSG_NUM_FOC2 0x280010
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC3 */
+#define XSEM_REG_MSG_NUM_FOC3 0x280014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+ during run_time by the microcode */
+#define XSEM_REG_PAS_DISABLE 0x28024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define XSEM_REG_PASSIVE_BUFFER 0x282000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define XSEM_REG_PRAM 0x2c0000
+/* [R 16] Valid sleeping threads indication have bit per thread */
+#define XSEM_REG_SLEEP_THREADS_VALID 0x28026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define XSEM_REG_SLOW_EXT_STORE_EMPTY 0x2802a0
+/* [RW 16] List of free threads . There is a bit per thread. */
+#define XSEM_REG_THREADS_LIST 0x2802e4
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define XSEM_REG_TS_0_AS 0x280038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define XSEM_REG_TS_10_AS 0x280060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define XSEM_REG_TS_11_AS 0x280064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define XSEM_REG_TS_12_AS 0x280068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define XSEM_REG_TS_13_AS 0x28006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define XSEM_REG_TS_14_AS 0x280070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define XSEM_REG_TS_15_AS 0x280074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define XSEM_REG_TS_16_AS 0x280078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define XSEM_REG_TS_17_AS 0x28007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define XSEM_REG_TS_18_AS 0x280080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define XSEM_REG_TS_1_AS 0x28003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define XSEM_REG_TS_2_AS 0x280040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define XSEM_REG_TS_3_AS 0x280044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define XSEM_REG_TS_4_AS 0x280048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define XSEM_REG_TS_5_AS 0x28004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define XSEM_REG_TS_6_AS 0x280050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define XSEM_REG_TS_7_AS 0x280054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define XSEM_REG_TS_8_AS 0x280058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define XSEM_REG_TS_9_AS 0x28005c
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define XSEM_REG_VFPF_ERR_NUM 0x280380
+/* [RW 32] Interrupt mask register #0 read/write */
+#define XSEM_REG_XSEM_INT_MASK_0 0x280110
+#define XSEM_REG_XSEM_INT_MASK_1 0x280120
+/* [R 32] Interrupt register #0 read */
+#define XSEM_REG_XSEM_INT_STS_0 0x280104
+#define XSEM_REG_XSEM_INT_STS_1 0x280114
+/* [RW 32] Parity mask register #0 read/write */
+#define XSEM_REG_XSEM_PRTY_MASK_0 0x280130
+#define XSEM_REG_XSEM_PRTY_MASK_1 0x280140
+/* [R 32] Parity register #0 read */
+#define XSEM_REG_XSEM_PRTY_STS_0 0x280124
+#define XSEM_REG_XSEM_PRTY_STS_1 0x280134
+/* [RC 32] Parity register #0 read clear */
+#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128
+#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138
+#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0)
+#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1)
+#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0)
+#define MCPR_NVM_CFG4_FLASH_SIZE (0x7L<<0)
+#define MCPR_NVM_COMMAND_DOIT (1L<<4)
+#define MCPR_NVM_COMMAND_DONE (1L<<3)
+#define MCPR_NVM_COMMAND_FIRST (1L<<7)
+#define MCPR_NVM_COMMAND_LAST (1L<<8)
+#define MCPR_NVM_COMMAND_WR (1L<<5)
+#define MCPR_NVM_SW_ARB_ARB_ARB1 (1L<<9)
+#define MCPR_NVM_SW_ARB_ARB_REQ_CLR1 (1L<<5)
+#define MCPR_NVM_SW_ARB_ARB_REQ_SET1 (1L<<1)
+#define BIGMAC_REGISTER_BMAC_CONTROL (0x00<<3)
+#define BIGMAC_REGISTER_BMAC_XGXS_CONTROL (0x01<<3)
+#define BIGMAC_REGISTER_CNT_MAX_SIZE (0x05<<3)
+#define BIGMAC_REGISTER_RX_CONTROL (0x21<<3)
+#define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS (0x46<<3)
+#define BIGMAC_REGISTER_RX_LSS_STATUS (0x43<<3)
+#define BIGMAC_REGISTER_RX_MAX_SIZE (0x23<<3)
+#define BIGMAC_REGISTER_RX_STAT_GR64 (0x26<<3)
+#define BIGMAC_REGISTER_RX_STAT_GRIPJ (0x42<<3)
+#define BIGMAC_REGISTER_TX_CONTROL (0x07<<3)
+#define BIGMAC_REGISTER_TX_MAX_SIZE (0x09<<3)
+#define BIGMAC_REGISTER_TX_PAUSE_THRESHOLD (0x0A<<3)
+#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3)
+#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3)
+#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3)
+#define BIGMAC2_REGISTER_BMAC_CONTROL (0x00<<3)
+#define BIGMAC2_REGISTER_BMAC_XGXS_CONTROL (0x01<<3)
+#define BIGMAC2_REGISTER_CNT_MAX_SIZE (0x05<<3)
+#define BIGMAC2_REGISTER_PFC_CONTROL (0x06<<3)
+#define BIGMAC2_REGISTER_RX_CONTROL (0x3A<<3)
+#define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS (0x62<<3)
+#define BIGMAC2_REGISTER_RX_LSS_STAT (0x3E<<3)
+#define BIGMAC2_REGISTER_RX_MAX_SIZE (0x3C<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GR64 (0x40<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GRIPJ (0x5f<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GRPP (0x51<<3)
+#define BIGMAC2_REGISTER_TX_CONTROL (0x1C<<3)
+#define BIGMAC2_REGISTER_TX_MAX_SIZE (0x1E<<3)
+#define BIGMAC2_REGISTER_TX_PAUSE_CONTROL (0x20<<3)
+#define BIGMAC2_REGISTER_TX_SOURCE_ADDR (0x1D<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTBYT (0x39<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTPOK (0x22<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTPP (0x24<<3)
+#define EMAC_LED_1000MB_OVERRIDE (1L<<1)
+#define EMAC_LED_100MB_OVERRIDE (1L<<2)
+#define EMAC_LED_10MB_OVERRIDE (1L<<3)
+#define EMAC_LED_2500MB_OVERRIDE (1L<<12)
+#define EMAC_LED_OVERRIDE (1L<<0)
+#define EMAC_LED_TRAFFIC (1L<<6)
+#define EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26)
+#define EMAC_MDIO_COMM_COMMAND_READ_22 (2L<<26)
+#define EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26)
+#define EMAC_MDIO_COMM_COMMAND_WRITE_22 (1L<<26)
+#define EMAC_MDIO_COMM_COMMAND_WRITE_45 (1L<<26)
+#define EMAC_MDIO_COMM_DATA (0xffffL<<0)
+#define EMAC_MDIO_COMM_START_BUSY (1L<<29)
+#define EMAC_MDIO_MODE_AUTO_POLL (1L<<4)
+#define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31)
+#define EMAC_MDIO_MODE_CLOCK_CNT (0x3ffL<<16)
+#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16
+#define EMAC_MDIO_STATUS_10MB (1L<<1)
+#define EMAC_MODE_25G_MODE (1L<<5)
+#define EMAC_MODE_HALF_DUPLEX (1L<<1)
+#define EMAC_MODE_PORT_GMII (2L<<2)
+#define EMAC_MODE_PORT_MII (1L<<2)
+#define EMAC_MODE_PORT_MII_10M (3L<<2)
+#define EMAC_MODE_RESET (1L<<0)
+#define EMAC_REG_EMAC_LED 0xc
+#define EMAC_REG_EMAC_MAC_MATCH 0x10
+#define EMAC_REG_EMAC_MDIO_COMM 0xac
+#define EMAC_REG_EMAC_MDIO_MODE 0xb4
+#define EMAC_REG_EMAC_MDIO_STATUS 0xb0
+#define EMAC_REG_EMAC_MODE 0x0
+#define EMAC_REG_EMAC_RX_MODE 0xc8
+#define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c
+#define EMAC_REG_EMAC_RX_STAT_AC 0x180
+#define EMAC_REG_EMAC_RX_STAT_AC_28 0x1f4
+#define EMAC_REG_EMAC_RX_STAT_AC_COUNT 23
+#define EMAC_REG_EMAC_TX_MODE 0xbc
+#define EMAC_REG_EMAC_TX_STAT_AC 0x280
+#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22
+#define EMAC_REG_RX_PFC_MODE 0x320
+#define EMAC_REG_RX_PFC_MODE_PRIORITIES (1L<<2)
+#define EMAC_REG_RX_PFC_MODE_RX_EN (1L<<1)
+#define EMAC_REG_RX_PFC_MODE_TX_EN (1L<<0)
+#define EMAC_REG_RX_PFC_PARAM 0x324
+#define EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT 0
+#define EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT 16
+#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD 0x328
+#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XOFF_SENT 0x330
+#define EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XON_RCVD 0x32c
+#define EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XON_SENT 0x334
+#define EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT (0xffff<<0)
+#define EMAC_RX_MODE_FLOW_EN (1L<<2)
+#define EMAC_RX_MODE_KEEP_MAC_CONTROL (1L<<3)
+#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10)
+#define EMAC_RX_MODE_PROMISCUOUS (1L<<8)
+#define EMAC_RX_MODE_RESET (1L<<0)
+#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31)
+#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3)
+#define EMAC_TX_MODE_FLOW_EN (1L<<4)
+#define EMAC_TX_MODE_RESET (1L<<0)
+#define MISC_REGISTERS_GPIO_0 0
+#define MISC_REGISTERS_GPIO_1 1
+#define MISC_REGISTERS_GPIO_2 2
+#define MISC_REGISTERS_GPIO_3 3
+#define MISC_REGISTERS_GPIO_CLR_POS 16
+#define MISC_REGISTERS_GPIO_FLOAT (0xffL<<24)
+#define MISC_REGISTERS_GPIO_FLOAT_POS 24
+#define MISC_REGISTERS_GPIO_HIGH 1
+#define MISC_REGISTERS_GPIO_INPUT_HI_Z 2
+#define MISC_REGISTERS_GPIO_INT_CLR_POS 24
+#define MISC_REGISTERS_GPIO_INT_OUTPUT_CLR 0
+#define MISC_REGISTERS_GPIO_INT_OUTPUT_SET 1
+#define MISC_REGISTERS_GPIO_INT_SET_POS 16
+#define MISC_REGISTERS_GPIO_LOW 0
+#define MISC_REGISTERS_GPIO_OUTPUT_HIGH 1
+#define MISC_REGISTERS_GPIO_OUTPUT_LOW 0
+#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
+#define MISC_REGISTERS_GPIO_SET_POS 8
+#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
+#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29)
+#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27)
+#define MISC_REGISTERS_RESET_REG_1_SET 0x584
+#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
+#define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1<<24)
+#define MISC_REGISTERS_RESET_REG_2_MSTAT1 (0x1<<25)
+#define MISC_REGISTERS_RESET_REG_2_PGLC (0x1<<19)
+#define MISC_REGISTERS_RESET_REG_2_RST_ATC (0x1<<17)
+#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
+#define MISC_REGISTERS_RESET_REG_2_RST_BMAC1 (0x1<<1)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0 (0x1<<2)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1 (0x1<<3)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15)
+#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE (0x1<<8)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5)
+#define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13)
+#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11)
+#define MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO (0x1<<13)
+#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9)
+#define MISC_REGISTERS_RESET_REG_2_SET 0x594
+#define MISC_REGISTERS_RESET_REG_2_UMAC0 (0x1<<20)
+#define MISC_REGISTERS_RESET_REG_2_UMAC1 (0x1<<21)
+#define MISC_REGISTERS_RESET_REG_2_XMAC (0x1<<22)
+#define MISC_REGISTERS_RESET_REG_2_XMAC_SOFT (0x1<<23)
+#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN (0x1<<2)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD (0x1<<3)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW (0x1<<0)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ (0x1<<5)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW (0x1<<4)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1<<8)
+#define MISC_REGISTERS_RESET_REG_3_SET 0x5a4
+#define MISC_REGISTERS_SPIO_4 4
+#define MISC_REGISTERS_SPIO_5 5
+#define MISC_REGISTERS_SPIO_7 7
+#define MISC_REGISTERS_SPIO_CLR_POS 16
+#define MISC_REGISTERS_SPIO_FLOAT (0xffL<<24)
+#define MISC_REGISTERS_SPIO_FLOAT_POS 24
+#define MISC_REGISTERS_SPIO_INPUT_HI_Z 2
+#define MISC_REGISTERS_SPIO_INT_OLD_SET_POS 16
+#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1
+#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
+#define MISC_REGISTERS_SPIO_SET_POS 8
+#define HW_LOCK_DRV_FLAGS 10
+#define HW_LOCK_MAX_RESOURCE_VALUE 31
+#define HW_LOCK_RESOURCE_GPIO 1
+#define HW_LOCK_RESOURCE_MDIO 0
+#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
+#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
+#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9
+#define HW_LOCK_RESOURCE_SPIO 2
+#define HW_LOCK_RESOURCE_RESET 5
+#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
+#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (0x1<<9)
+#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (0x1<<8)
+#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT (0x1<<7)
+#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR (0x1<<6)
+#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT (0x1<<1)
+#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR (0x1<<0)
+#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR (0x1<<18)
+#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT (0x1<<11)
+#define AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR (0x1<<10)
+#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT (0x1<<13)
+#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR (0x1<<12)
+#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15)
+#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (0x1<<14)
+#define AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR (0x1<<14)
+#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (0x1<<20)
+#define AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR (0x1<<0)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (0x1<<22)
+#define AEU_INPUTS_ATTN_BITS_SPIO5 (0x1<<15)
+#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (0x1<<27)
+#define AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR (0x1<<26)
+#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (0x1<<25)
+#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR (0x1<<24)
+#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT (0x1<<23)
+#define AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR (0x1<<22)
+#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT (0x1<<27)
+#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR (0x1<<26)
+#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT (0x1<<21)
+#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR (0x1<<20)
+#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT (0x1<<25)
+#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR (0x1<<24)
+#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR (0x1<<16)
+#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT (0x1<<9)
+#define AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR (0x1<<8)
+#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT (0x1<<7)
+#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR (0x1<<6)
+#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT (0x1<<11)
+#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (0x1<<10)
+
+#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (0x1<<9)
+
+#define RESERVED_GENERAL_ATTENTION_BIT_0 0
+
+#define EVEREST_GEN_ATTN_IN_USE_MASK 0x7ffe0
+#define EVEREST_LATCHED_ATTN_IN_USE_MASK 0xffe00000
+
+#define RESERVED_GENERAL_ATTENTION_BIT_6 6
+#define RESERVED_GENERAL_ATTENTION_BIT_7 7
+#define RESERVED_GENERAL_ATTENTION_BIT_8 8
+#define RESERVED_GENERAL_ATTENTION_BIT_9 9
+#define RESERVED_GENERAL_ATTENTION_BIT_10 10
+#define RESERVED_GENERAL_ATTENTION_BIT_11 11
+#define RESERVED_GENERAL_ATTENTION_BIT_12 12
+#define RESERVED_GENERAL_ATTENTION_BIT_13 13
+#define RESERVED_GENERAL_ATTENTION_BIT_14 14
+#define RESERVED_GENERAL_ATTENTION_BIT_15 15
+#define RESERVED_GENERAL_ATTENTION_BIT_16 16
+#define RESERVED_GENERAL_ATTENTION_BIT_17 17
+#define RESERVED_GENERAL_ATTENTION_BIT_18 18
+#define RESERVED_GENERAL_ATTENTION_BIT_19 19
+#define RESERVED_GENERAL_ATTENTION_BIT_20 20
+#define RESERVED_GENERAL_ATTENTION_BIT_21 21
+
+/* storm asserts attention bits */
+#define TSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_7
+#define USTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_8
+#define CSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_9
+#define XSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_10
+
+/* mcp error attention bit */
+#define MCP_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_11
+
+/*E1H NIG status sync attention mapped to group 4-7*/
+#define LINK_SYNC_ATTENTION_BIT_FUNC_0 RESERVED_GENERAL_ATTENTION_BIT_12
+#define LINK_SYNC_ATTENTION_BIT_FUNC_1 RESERVED_GENERAL_ATTENTION_BIT_13
+#define LINK_SYNC_ATTENTION_BIT_FUNC_2 RESERVED_GENERAL_ATTENTION_BIT_14
+#define LINK_SYNC_ATTENTION_BIT_FUNC_3 RESERVED_GENERAL_ATTENTION_BIT_15
+#define LINK_SYNC_ATTENTION_BIT_FUNC_4 RESERVED_GENERAL_ATTENTION_BIT_16
+#define LINK_SYNC_ATTENTION_BIT_FUNC_5 RESERVED_GENERAL_ATTENTION_BIT_17
+#define LINK_SYNC_ATTENTION_BIT_FUNC_6 RESERVED_GENERAL_ATTENTION_BIT_18
+#define LINK_SYNC_ATTENTION_BIT_FUNC_7 RESERVED_GENERAL_ATTENTION_BIT_19
+
+
+#define LATCHED_ATTN_RBCR 23
+#define LATCHED_ATTN_RBCT 24
+#define LATCHED_ATTN_RBCN 25
+#define LATCHED_ATTN_RBCU 26
+#define LATCHED_ATTN_RBCP 27
+#define LATCHED_ATTN_TIMEOUT_GRC 28
+#define LATCHED_ATTN_RSVD_GRC 29
+#define LATCHED_ATTN_ROM_PARITY_MCP 30
+#define LATCHED_ATTN_UM_RX_PARITY_MCP 31
+#define LATCHED_ATTN_UM_TX_PARITY_MCP 32
+#define LATCHED_ATTN_SCPAD_PARITY_MCP 33
+
+#define GENERAL_ATTEN_WORD(atten_name) ((94 + atten_name) / 32)
+#define GENERAL_ATTEN_OFFSET(atten_name)\
+ (1UL << ((94 + atten_name) % 32))
+/*
+ * This file defines GRC base address for every block.
+ * This file is included by chipsim, asm microcode and cpp microcode.
+ * These values are used in Design.xml on regBase attribute
+ * Use the base with the generated offsets of specific registers.
+ */
+
+#define GRCBASE_PXPCS 0x000000
+#define GRCBASE_PCICONFIG 0x002000
+#define GRCBASE_PCIREG 0x002400
+#define GRCBASE_EMAC0 0x008000
+#define GRCBASE_EMAC1 0x008400
+#define GRCBASE_DBU 0x008800
+#define GRCBASE_MISC 0x00A000
+#define GRCBASE_DBG 0x00C000
+#define GRCBASE_NIG 0x010000
+#define GRCBASE_XCM 0x020000
+#define GRCBASE_PRS 0x040000
+#define GRCBASE_SRCH 0x040400
+#define GRCBASE_TSDM 0x042000
+#define GRCBASE_TCM 0x050000
+#define GRCBASE_BRB1 0x060000
+#define GRCBASE_MCP 0x080000
+#define GRCBASE_UPB 0x0C1000
+#define GRCBASE_CSDM 0x0C2000
+#define GRCBASE_USDM 0x0C4000
+#define GRCBASE_CCM 0x0D0000
+#define GRCBASE_UCM 0x0E0000
+#define GRCBASE_CDU 0x101000
+#define GRCBASE_DMAE 0x102000
+#define GRCBASE_PXP 0x103000
+#define GRCBASE_CFC 0x104000
+#define GRCBASE_HC 0x108000
+#define GRCBASE_PXP2 0x120000
+#define GRCBASE_PBF 0x140000
+#define GRCBASE_UMAC0 0x160000
+#define GRCBASE_UMAC1 0x160400
+#define GRCBASE_XPB 0x161000
+#define GRCBASE_MSTAT0 0x162000
+#define GRCBASE_MSTAT1 0x162800
+#define GRCBASE_XMAC0 0x163000
+#define GRCBASE_XMAC1 0x163800
+#define GRCBASE_TIMERS 0x164000
+#define GRCBASE_XSDM 0x166000
+#define GRCBASE_QM 0x168000
+#define GRCBASE_DQ 0x170000
+#define GRCBASE_TSEM 0x180000
+#define GRCBASE_CSEM 0x200000
+#define GRCBASE_XSEM 0x280000
+#define GRCBASE_USEM 0x300000
+#define GRCBASE_MISC_AEU GRCBASE_MISC
+
+
+/* offset of configuration space in the pci core register */
+#define PCICFG_OFFSET 0x2000
+#define PCICFG_VENDOR_ID_OFFSET 0x00
+#define PCICFG_DEVICE_ID_OFFSET 0x02
+#define PCICFG_COMMAND_OFFSET 0x04
+#define PCICFG_COMMAND_IO_SPACE (1<<0)
+#define PCICFG_COMMAND_MEM_SPACE (1<<1)
+#define PCICFG_COMMAND_BUS_MASTER (1<<2)
+#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3)
+#define PCICFG_COMMAND_MWI_CYCLES (1<<4)
+#define PCICFG_COMMAND_VGA_SNOOP (1<<5)
+#define PCICFG_COMMAND_PERR_ENA (1<<6)
+#define PCICFG_COMMAND_STEPPING (1<<7)
+#define PCICFG_COMMAND_SERR_ENA (1<<8)
+#define PCICFG_COMMAND_FAST_B2B (1<<9)
+#define PCICFG_COMMAND_INT_DISABLE (1<<10)
+#define PCICFG_COMMAND_RESERVED (0x1f<<11)
+#define PCICFG_STATUS_OFFSET 0x06
+#define PCICFG_REVESION_ID_OFFSET 0x08
+#define PCICFG_CACHE_LINE_SIZE 0x0c
+#define PCICFG_LATENCY_TIMER 0x0d
+#define PCICFG_BAR_1_LOW 0x10
+#define PCICFG_BAR_1_HIGH 0x14
+#define PCICFG_BAR_2_LOW 0x18
+#define PCICFG_BAR_2_HIGH 0x1c
+#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c
+#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e
+#define PCICFG_INT_LINE 0x3c
+#define PCICFG_INT_PIN 0x3d
+#define PCICFG_PM_CAPABILITY 0x48
+#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16)
+#define PCICFG_PM_CAPABILITY_CLOCK (1<<19)
+#define PCICFG_PM_CAPABILITY_RESERVED (1<<20)
+#define PCICFG_PM_CAPABILITY_DSI (1<<21)
+#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22)
+#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25)
+#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26)
+#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27)
+#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28)
+#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31)
+#define PCICFG_PM_CSR_OFFSET 0x4c
+#define PCICFG_PM_CSR_STATE (0x3<<0)
+#define PCICFG_PM_CSR_PME_ENABLE (1<<8)
+#define PCICFG_PM_CSR_PME_STATUS (1<<15)
+#define PCICFG_MSI_CAP_ID_OFFSET 0x58
+#define PCICFG_MSI_CONTROL_ENABLE (0x1<<16)
+#define PCICFG_MSI_CONTROL_MCAP (0x7<<17)
+#define PCICFG_MSI_CONTROL_MENA (0x7<<20)
+#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP (0x1<<23)
+#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE (0x1<<24)
+#define PCICFG_GRC_ADDRESS 0x78
+#define PCICFG_GRC_DATA 0x80
+#define PCICFG_MSIX_CAP_ID_OFFSET 0xa0
+#define PCICFG_MSIX_CONTROL_TABLE_SIZE (0x7ff<<16)
+#define PCICFG_MSIX_CONTROL_RESERVED (0x7<<27)
+#define PCICFG_MSIX_CONTROL_FUNC_MASK (0x1<<30)
+#define PCICFG_MSIX_CONTROL_MSIX_ENABLE (0x1<<31)
+
+#define PCICFG_DEVICE_CONTROL 0xb4
+#define PCICFG_DEVICE_STATUS 0xb6
+#define PCICFG_DEVICE_STATUS_CORR_ERR_DET (1<<0)
+#define PCICFG_DEVICE_STATUS_NON_FATAL_ERR_DET (1<<1)
+#define PCICFG_DEVICE_STATUS_FATAL_ERR_DET (1<<2)
+#define PCICFG_DEVICE_STATUS_UNSUP_REQ_DET (1<<3)
+#define PCICFG_DEVICE_STATUS_AUX_PWR_DET (1<<4)
+#define PCICFG_DEVICE_STATUS_NO_PEND (1<<5)
+#define PCICFG_LINK_CONTROL 0xbc
+
+
+#define BAR_USTRORM_INTMEM 0x400000
+#define BAR_CSTRORM_INTMEM 0x410000
+#define BAR_XSTRORM_INTMEM 0x420000
+#define BAR_TSTRORM_INTMEM 0x430000
+
+/* for accessing the IGU in case of status block ACK */
+#define BAR_IGU_INTMEM 0x440000
+
+#define BAR_DOORBELL_OFFSET 0x800000
+
+#define BAR_ME_REGISTER 0x450000
+
+/* config_2 offset */
+#define GRC_CONFIG_2_SIZE_REG 0x408
+#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_256K (3L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_512K (4L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_1M (5L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_2M (6L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_4M (7L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_8M (8L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_16M (9L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_32M (10L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_64M (11L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_128M (12L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0)
+#define PCI_CONFIG_2_BAR1_64ENA (1L<<4)
+#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5)
+#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6)
+#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7)
+#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_8K (3L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_16K (4L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_32K (5L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_64K (6L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_128K (7L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_256K (8L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_512K (9L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_1M (10L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_2M (11L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_4M (12L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8)
+#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16)
+#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17)
+
+/* config_3 offset */
+#define GRC_CONFIG_3_SIZE_REG 0x40c
+#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0)
+#define PCI_CONFIG_3_FORCE_PME (1L<<24)
+#define PCI_CONFIG_3_PME_STATUS (1L<<25)
+#define PCI_CONFIG_3_PME_ENABLE (1L<<26)
+#define PCI_CONFIG_3_PM_STATE (0x3L<<27)
+#define PCI_CONFIG_3_VAUX_PRESET (1L<<30)
+#define PCI_CONFIG_3_PCI_POWER (1L<<31)
+
+#define GRC_BAR2_CONFIG 0x4e0
+#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0)
+#define PCI_CONFIG_2_BAR2_64ENA (1L<<4)
+
+#define PCI_PM_DATA_A 0x410
+#define PCI_PM_DATA_B 0x414
+#define PCI_ID_VAL1 0x434
+#define PCI_ID_VAL2 0x438
+
+#define PXPCS_TL_CONTROL_5 0x814
+#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/
+#define PXPCS_TL_CONTROL_5_BOUNDARY4K_ERR_ATTN (1 << 28) /*WC*/
+#define PXPCS_TL_CONTROL_5_MRRS_ERR_ATTN (1 << 27) /*WC*/
+#define PXPCS_TL_CONTROL_5_MPS_ERR_ATTN (1 << 26) /*WC*/
+#define PXPCS_TL_CONTROL_5_TTX_BRIDGE_FORWARD_ERR (1 << 25) /*WC*/
+#define PXPCS_TL_CONTROL_5_TTX_TXINTF_OVERFLOW (1 << 24) /*WC*/
+#define PXPCS_TL_CONTROL_5_PHY_ERR_ATTN (1 << 23) /*RO*/
+#define PXPCS_TL_CONTROL_5_DL_ERR_ATTN (1 << 22) /*RO*/
+#define PXPCS_TL_CONTROL_5_TTX_ERR_NP_TAG_IN_USE (1 << 21) /*WC*/
+#define PXPCS_TL_CONTROL_5_TRX_ERR_UNEXP_RTAG (1 << 20) /*WC*/
+#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT1 (1 << 19) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 (1 << 18) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_ECRC1 (1 << 17) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP1 (1 << 16) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW1 (1 << 15) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL1 (1 << 14) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT1 (1 << 13) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT1 (1 << 12) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL1 (1 << 11) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP1 (1 << 10) /*WC*/
+#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT (1 << 9) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT (1 << 8) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_ECRC (1 << 7) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP (1 << 6) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW (1 << 5) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL (1 << 4) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT (1 << 3) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT (1 << 2) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL (1 << 1) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP (1 << 0) /*WC*/
+
+
+#define PXPCS_TL_FUNC345_STAT 0x854
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT4 (1 << 29) /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4\
+ (1 << 28) /* Unsupported Request Error Status in function4, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC4\
+ (1 << 27) /* ECRC Error TLP Status Status in function 4, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP4\
+ (1 << 26) /* Malformed TLP Status Status in function 4, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW4\
+ (1 << 25) /* Receiver Overflow Status Status in function 4, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL4\
+ (1 << 24) /* Unexpected Completion Status Status in function 4, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT4\
+ (1 << 23) /* Receive UR Statusin function 4. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT4\
+ (1 << 22) /* Completer Timeout Status Status in function 4, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL4\
+ (1 << 21) /* Flow Control Protocol Error Status Status in \
+ function 4, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP4\
+ (1 << 20) /* Poisoned Error Status Status in function 4, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT3 (1 << 19) /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3\
+ (1 << 18) /* Unsupported Request Error Status in function3, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC3\
+ (1 << 17) /* ECRC Error TLP Status Status in function 3, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP3\
+ (1 << 16) /* Malformed TLP Status Status in function 3, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW3\
+ (1 << 15) /* Receiver Overflow Status Status in function 3, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL3\
+ (1 << 14) /* Unexpected Completion Status Status in function 3, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT3\
+ (1 << 13) /* Receive UR Statusin function 3. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT3\
+ (1 << 12) /* Completer Timeout Status Status in function 3, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL3\
+ (1 << 11) /* Flow Control Protocol Error Status Status in \
+ function 3, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP3\
+ (1 << 10) /* Poisoned Error Status Status in function 3, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT2 (1 << 9) /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2\
+ (1 << 8) /* Unsupported Request Error Status for Function 2, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC2\
+ (1 << 7) /* ECRC Error TLP Status Status for Function 2, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP2\
+ (1 << 6) /* Malformed TLP Status Status for Function 2, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW2\
+ (1 << 5) /* Receiver Overflow Status Status for Function 2, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL2\
+ (1 << 4) /* Unexpected Completion Status Status for Function 2, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2\
+ (1 << 3) /* Receive UR Statusfor Function 2. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2\
+ (1 << 2) /* Completer Timeout Status Status for Function 2, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL2\
+ (1 << 1) /* Flow Control Protocol Error Status Status for \
+ Function 2, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP2\
+ (1 << 0) /* Poisoned Error Status Status for Function 2, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+
+
+#define PXPCS_TL_FUNC678_STAT 0x85C
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT7 (1 << 29) /* WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7\
+ (1 << 28) /* Unsupported Request Error Status in function7, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC7\
+ (1 << 27) /* ECRC Error TLP Status Status in function 7, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP7\
+ (1 << 26) /* Malformed TLP Status Status in function 7, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW7\
+ (1 << 25) /* Receiver Overflow Status Status in function 7, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL7\
+ (1 << 24) /* Unexpected Completion Status Status in function 7, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT7\
+ (1 << 23) /* Receive UR Statusin function 7. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT7\
+ (1 << 22) /* Completer Timeout Status Status in function 7, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL7\
+ (1 << 21) /* Flow Control Protocol Error Status Status in \
+ function 7, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP7\
+ (1 << 20) /* Poisoned Error Status Status in function 7, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT6 (1 << 19) /* WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6\
+ (1 << 18) /* Unsupported Request Error Status in function6, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC6\
+ (1 << 17) /* ECRC Error TLP Status Status in function 6, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP6\
+ (1 << 16) /* Malformed TLP Status Status in function 6, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW6\
+ (1 << 15) /* Receiver Overflow Status Status in function 6, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL6\
+ (1 << 14) /* Unexpected Completion Status Status in function 6, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT6\
+ (1 << 13) /* Receive UR Statusin function 6. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT6\
+ (1 << 12) /* Completer Timeout Status Status in function 6, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL6\
+ (1 << 11) /* Flow Control Protocol Error Status Status in \
+ function 6, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP6\
+ (1 << 10) /* Poisoned Error Status Status in function 6, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT5 (1 << 9) /* WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5\
+ (1 << 8) /* Unsupported Request Error Status for Function 5, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC5\
+ (1 << 7) /* ECRC Error TLP Status Status for Function 5, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP5\
+ (1 << 6) /* Malformed TLP Status Status for Function 5, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW5\
+ (1 << 5) /* Receiver Overflow Status Status for Function 5, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL5\
+ (1 << 4) /* Unexpected Completion Status Status for Function 5, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5\
+ (1 << 3) /* Receive UR Statusfor Function 5. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5\
+ (1 << 2) /* Completer Timeout Status Status for Function 5, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL5\
+ (1 << 1) /* Flow Control Protocol Error Status Status for \
+ Function 5, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP5\
+ (1 << 0) /* Poisoned Error Status Status for Function 5, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+
+
+#define BAR_USTRORM_INTMEM 0x400000
+#define BAR_CSTRORM_INTMEM 0x410000
+#define BAR_XSTRORM_INTMEM 0x420000
+#define BAR_TSTRORM_INTMEM 0x430000
+
+/* for accessing the IGU in case of status block ACK */
+#define BAR_IGU_INTMEM 0x440000
+
+#define BAR_DOORBELL_OFFSET 0x800000
+
+#define BAR_ME_REGISTER 0x450000
+#define ME_REG_PF_NUM_SHIFT 0
+#define ME_REG_PF_NUM\
+ (7L<<ME_REG_PF_NUM_SHIFT) /* Relative PF Num */
+#define ME_REG_VF_VALID (1<<8)
+#define ME_REG_VF_NUM_SHIFT 9
+#define ME_REG_VF_NUM_MASK (0x3f<<ME_REG_VF_NUM_SHIFT)
+#define ME_REG_VF_ERR (0x1<<3)
+#define ME_REG_ABS_PF_NUM_SHIFT 16
+#define ME_REG_ABS_PF_NUM\
+ (7L<<ME_REG_ABS_PF_NUM_SHIFT) /* Absolute PF Num */
+
+
+#define MDIO_REG_BANK_CL73_IEEEB0 0x0
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN 0x1000
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000
+
+#define MDIO_REG_BANK_CL73_IEEEB1 0x10
+#define MDIO_CL73_IEEEB1_AN_ADV1 0x00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE 0x0400
+#define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC 0x0800
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH 0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK 0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV2 0x01
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR 0x0080
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1 0x03
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE 0x0400
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC 0x0800
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH 0x0C00
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK 0x0C00
+
+#define MDIO_REG_BANK_RX0 0x80b0
+#define MDIO_RX0_RX_STATUS 0x10
+#define MDIO_RX0_RX_STATUS_SIGDET 0x8000
+#define MDIO_RX0_RX_STATUS_RX_SEQ_DONE 0x1000
+#define MDIO_RX0_RX_EQ_BOOST 0x1c
+#define MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX1 0x80c0
+#define MDIO_RX1_RX_EQ_BOOST 0x1c
+#define MDIO_RX1_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX1_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX2 0x80d0
+#define MDIO_RX2_RX_EQ_BOOST 0x1c
+#define MDIO_RX2_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX2_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX3 0x80e0
+#define MDIO_RX3_RX_EQ_BOOST 0x1c
+#define MDIO_RX3_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX3_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX_ALL 0x80f0
+#define MDIO_RX_ALL_RX_EQ_BOOST 0x1c
+#define MDIO_RX_ALL_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX_ALL_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_TX0 0x8060
+#define MDIO_TX0_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_TX1 0x8070
+#define MDIO_TX1_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_TX2 0x8080
+#define MDIO_TX2_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_TX3 0x8090
+#define MDIO_TX3_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_XGXS_BLOCK0 0x8000
+#define MDIO_BLOCK0_XGXS_CONTROL 0x10
+
+#define MDIO_REG_BANK_XGXS_BLOCK1 0x8010
+#define MDIO_BLOCK1_LANE_CTRL0 0x15
+#define MDIO_BLOCK1_LANE_CTRL1 0x16
+#define MDIO_BLOCK1_LANE_CTRL2 0x17
+#define MDIO_BLOCK1_LANE_PRBS 0x19
+
+#define MDIO_REG_BANK_XGXS_BLOCK2 0x8100
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP 0x10
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE 0x8000
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE 0x4000
+#define MDIO_XGXS_BLOCK2_TX_LN_SWAP 0x11
+#define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE 0x8000
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G 0x14
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS 0x0001
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS 0x0010
+#define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 0x15
+
+#define MDIO_REG_BANK_GP_STATUS 0x8120
+#define MDIO_GP_STATUS_TOP_AN_STATUS1 0x1B
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE 0x0001
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE 0x0002
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS 0x0004
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS 0x0008
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE 0x0010
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_LP_NP_BAM_ABLE 0x0020
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE 0x0040
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE 0x0080
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK 0x3f00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M 0x0000
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M 0x0100
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G 0x0200
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G 0x0300
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G 0x0400
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G 0x0500
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG 0x0600
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 0x0700
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG 0x0800
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G 0x0900
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G 0x0A00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G 0x0B00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G 0x0C00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 0x0D00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 0x0E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR 0x0F00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 0x1B00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 0x1E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 0x1F00
+
+
+#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS 0x10
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK 0x8000
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL 0x11
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN 0x1
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK 0x13
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT (0xb71<<1)
+
+#define MDIO_REG_BANK_SERDES_DIGITAL 0x8300
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1 0x10
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_TBI_IF 0x0002
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN 0x0004
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT 0x0008
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET 0x0010
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE 0x0020
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2 0x11
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SGMII 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_LINK 0x0002
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_2_5G 0x0018
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G 0x0010
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M 0x0008
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M 0x0000
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2 0x15
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED 0x0002
+#define MDIO_SERDES_DIGITAL_MISC1 0x18
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK 0xE000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M 0x0000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_100M 0x2000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_125M 0x4000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M 0x6000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_187_5M 0x8000
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL 0x0010
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK 0x000f
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_2_5G 0x0000
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_5G 0x0001
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_6G 0x0002
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_HIG 0x0003
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4 0x0004
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12G 0x0005
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12_5G 0x0006
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G 0x0007
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_15G 0x0008
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_16G 0x0009
+
+#define MDIO_REG_BANK_OVER_1G 0x8320
+#define MDIO_OVER_1G_DIGCTL_3_4 0x14
+#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_MASK 0xffe0
+#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_SHIFT 5
+#define MDIO_OVER_1G_UP1 0x19
+#define MDIO_OVER_1G_UP1_2_5G 0x0001
+#define MDIO_OVER_1G_UP1_5G 0x0002
+#define MDIO_OVER_1G_UP1_6G 0x0004
+#define MDIO_OVER_1G_UP1_10G 0x0010
+#define MDIO_OVER_1G_UP1_10GH 0x0008
+#define MDIO_OVER_1G_UP1_12G 0x0020
+#define MDIO_OVER_1G_UP1_12_5G 0x0040
+#define MDIO_OVER_1G_UP1_13G 0x0080
+#define MDIO_OVER_1G_UP1_15G 0x0100
+#define MDIO_OVER_1G_UP1_16G 0x0200
+#define MDIO_OVER_1G_UP2 0x1A
+#define MDIO_OVER_1G_UP2_IPREDRIVER_MASK 0x0007
+#define MDIO_OVER_1G_UP2_IDRIVER_MASK 0x0038
+#define MDIO_OVER_1G_UP2_PREEMPHASIS_MASK 0x03C0
+#define MDIO_OVER_1G_UP3 0x1B
+#define MDIO_OVER_1G_UP3_HIGIG2 0x0001
+#define MDIO_OVER_1G_LP_UP1 0x1C
+#define MDIO_OVER_1G_LP_UP2 0x1D
+#define MDIO_OVER_1G_LP_UP2_MR_ADV_OVER_1G_MASK 0x03ff
+#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK 0x0780
+#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT 7
+#define MDIO_OVER_1G_LP_UP3 0x1E
+
+#define MDIO_REG_BANK_REMOTE_PHY 0x8330
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS 0x10
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG 0x0010
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG 0x0600
+
+#define MDIO_REG_BANK_BAM_NEXT_PAGE 0x8350
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL 0x10
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE 0x0001
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN 0x0002
+
+#define MDIO_REG_BANK_CL73_USERB0 0x8370
+#define MDIO_CL73_USERB0_CL73_UCTRL 0x10
+#define MDIO_CL73_USERB0_CL73_UCTRL_USTAT1_MUXSEL 0x0002
+#define MDIO_CL73_USERB0_CL73_USTAT1 0x11
+#define MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK 0x0100
+#define MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37 0x0400
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1 0x12
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN 0x8000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN 0x4000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN 0x2000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL3 0x14
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR 0x0001
+
+#define MDIO_REG_BANK_AER_BLOCK 0xFFD0
+#define MDIO_AER_BLOCK_AER_REG 0x1E
+
+#define MDIO_REG_BANK_COMBO_IEEE0 0xFFE0
+#define MDIO_COMBO_IEEE0_MII_CONTROL 0x10
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK 0x2040
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_10 0x0000
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100 0x2000
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000 0x0040
+#define MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX 0x0100
+#define MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN 0x0200
+#define MDIO_COMBO_IEEO_MII_CONTROL_AN_EN 0x1000
+#define MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK 0x4000
+#define MDIO_COMBO_IEEO_MII_CONTROL_RESET 0x8000
+#define MDIO_COMBO_IEEE0_MII_STATUS 0x11
+#define MDIO_COMBO_IEEE0_MII_STATUS_LINK_PASS 0x0004
+#define MDIO_COMBO_IEEE0_MII_STATUS_AUTONEG_COMPLETE 0x0020
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV 0x14
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX 0x0020
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_HALF_DUPLEX 0x0040
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE 0x0000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC 0x0080
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC 0x0100
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_NEXT_PAGE 0x8000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1 0x15
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_NEXT_PAGE 0x8000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_ACK 0x4000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_MASK 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_NONE 0x0000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_BOTH 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_HALF_DUP_CAP 0x0040
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_FULL_DUP_CAP 0x0020
+/*WhenthelinkpartnerisinSGMIImode(bit0=1),then
+bit15=link,bit12=duplex,bits11:10=speed,bit14=acknowledge.
+Theotherbitsarereservedandshouldbezero*/
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE 0x0001
+
+
+#define MDIO_PMA_DEVAD 0x1
+/*ieee*/
+#define MDIO_PMA_REG_CTRL 0x0
+#define MDIO_PMA_REG_STATUS 0x1
+#define MDIO_PMA_REG_10G_CTRL2 0x7
+#define MDIO_PMA_REG_TX_DISABLE 0x0009
+#define MDIO_PMA_REG_RX_SD 0xa
+/*bcm*/
+#define MDIO_PMA_REG_BCM_CTRL 0x0096
+#define MDIO_PMA_REG_FEC_CTRL 0x00ab
+#define MDIO_PMA_REG_PHY_IDENTIFIER 0xc800
+#define MDIO_PMA_REG_DIGITAL_CTRL 0xc808
+#define MDIO_PMA_REG_DIGITAL_STATUS 0xc809
+#define MDIO_PMA_REG_TX_POWER_DOWN 0xca02
+#define MDIO_PMA_REG_CMU_PLL_BYPASS 0xca09
+#define MDIO_PMA_REG_MISC_CTRL 0xca0a
+#define MDIO_PMA_REG_GEN_CTRL 0xca10
+#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188
+#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a
+#define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12
+#define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13
+#define MDIO_PMA_REG_ROM_VER1 0xca19
+#define MDIO_PMA_REG_ROM_VER2 0xca1a
+#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b
+#define MDIO_PMA_REG_PLL_BANDWIDTH 0xca1d
+#define MDIO_PMA_REG_PLL_CTRL 0xca1e
+#define MDIO_PMA_REG_MISC_CTRL0 0xca23
+#define MDIO_PMA_REG_LRM_MODE 0xca3f
+#define MDIO_PMA_REG_CDR_BANDWIDTH 0xca46
+#define MDIO_PMA_REG_MISC_CTRL1 0xca85
+
+#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL 0x8000
+#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK 0x000c
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE 0x0000
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE 0x0004
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IN_PROGRESS 0x0008
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_FAILED 0x000c
+#define MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT 0x8002
+#define MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR 0x8003
+#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF 0xc820
+#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK 0xff
+#define MDIO_PMA_REG_8726_TX_CTRL1 0xca01
+#define MDIO_PMA_REG_8726_TX_CTRL2 0xca05
+
+#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005
+#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007
+#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff
+#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02
+#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05
+#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
+#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
+#define MDIO_PMA_REG_8727_PCS_GP 0xc842
+#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4
+
+#define MDIO_AN_REG_8727_MISC_CTRL 0x8309
+
+#define MDIO_PMA_REG_8073_CHIP_REV 0xc801
+#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS 0xc820
+#define MDIO_PMA_REG_8073_XAUI_WA 0xc841
+#define MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL 0xcd08
+
+#define MDIO_PMA_REG_7101_RESET 0xc000
+#define MDIO_PMA_REG_7107_LED_CNTL 0xc007
+#define MDIO_PMA_REG_7107_LINK_LED_CNTL 0xc009
+#define MDIO_PMA_REG_7101_VER1 0xc026
+#define MDIO_PMA_REG_7101_VER2 0xc027
+
+#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811
+#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c
+#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f
+#define MDIO_PMA_REG_8481_LED3_MASK 0xa832
+#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834
+#define MDIO_PMA_REG_8481_LED5_MASK 0xa838
+#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835
+#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK 0x800
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT 11
+
+
+#define MDIO_WIS_DEVAD 0x2
+/*bcm*/
+#define MDIO_WIS_REG_LASI_CNTL 0x9002
+#define MDIO_WIS_REG_LASI_STATUS 0x9005
+
+#define MDIO_PCS_DEVAD 0x3
+#define MDIO_PCS_REG_STATUS 0x0020
+#define MDIO_PCS_REG_LASI_STATUS 0x9005
+#define MDIO_PCS_REG_7101_DSP_ACCESS 0xD000
+#define MDIO_PCS_REG_7101_SPI_MUX 0xD008
+#define MDIO_PCS_REG_7101_SPI_CTRL_ADDR 0xE12A
+#define MDIO_PCS_REG_7101_SPI_RESET_BIT (5)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR 0xE02A
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_WRITE_ENABLE_CMD (6)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_BULK_ERASE_CMD (0xC7)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_PAGE_PROGRAM_CMD (2)
+#define MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR 0xE028
+
+
+#define MDIO_XS_DEVAD 0x4
+#define MDIO_XS_PLL_SEQUENCER 0x8000
+#define MDIO_XS_SFX7101_XGXS_TEST1 0xc00a
+
+#define MDIO_XS_8706_REG_BANK_RX0 0x80bc
+#define MDIO_XS_8706_REG_BANK_RX1 0x80cc
+#define MDIO_XS_8706_REG_BANK_RX2 0x80dc
+#define MDIO_XS_8706_REG_BANK_RX3 0x80ec
+#define MDIO_XS_8706_REG_BANK_RXA 0x80fc
+
+#define MDIO_XS_REG_8073_RX_CTRL_PCIE 0x80FA
+
+#define MDIO_AN_DEVAD 0x7
+/*ieee*/
+#define MDIO_AN_REG_CTRL 0x0000
+#define MDIO_AN_REG_STATUS 0x0001
+#define MDIO_AN_REG_STATUS_AN_COMPLETE 0x0020
+#define MDIO_AN_REG_ADV_PAUSE 0x0010
+#define MDIO_AN_REG_ADV_PAUSE_PAUSE 0x0400
+#define MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC 0x0800
+#define MDIO_AN_REG_ADV_PAUSE_BOTH 0x0C00
+#define MDIO_AN_REG_ADV_PAUSE_MASK 0x0C00
+#define MDIO_AN_REG_ADV 0x0011
+#define MDIO_AN_REG_ADV2 0x0012
+#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
+#define MDIO_AN_REG_MASTER_STATUS 0x0021
+/*bcm*/
+#define MDIO_AN_REG_LINK_STATUS 0x8304
+#define MDIO_AN_REG_CL37_CL73 0x8370
+#define MDIO_AN_REG_CL37_AN 0xffe0
+#define MDIO_AN_REG_CL37_FC_LD 0xffe4
+#define MDIO_AN_REG_CL37_FC_LP 0xffe5
+
+#define MDIO_AN_REG_8073_2_5G 0x8329
+#define MDIO_AN_REG_8073_BAM 0x8350
+
+#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020
+#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
+#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1
+#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
+#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6
+#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
+#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5
+#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7
+#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8
+#define MDIO_AN_REG_8481_LEGACY_SHADOW 0xfffc
+
+/* BCM84823 only */
+#define MDIO_CTL_DEVAD 0x1e
+#define MDIO_CTL_REG_84823_MEDIA 0x401a
+#define MDIO_CTL_REG_84823_MEDIA_MAC_MASK 0x0018
+ /* These pins configure the BCM84823 interface to MAC after reset. */
+#define MDIO_CTL_REG_84823_CTRL_MAC_XFI 0x0008
+#define MDIO_CTL_REG_84823_MEDIA_MAC_XAUI_M 0x0010
+ /* These pins configure the BCM84823 interface to Line after reset. */
+#define MDIO_CTL_REG_84823_MEDIA_LINE_MASK 0x0060
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L 0x0020
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XFI 0x0040
+ /* When this pin is active high during reset, 10GBASE-T core is power
+ * down, When it is active low the 10GBASE-T is power up
+ */
+#define MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN 0x0080
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK 0x0100
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
+#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
+#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
+#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
+
+#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
+#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
+
+/* BCM84833 only */
+#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
+#define MDIO_84833_SUPER_ISOLATE 0x8000
+/* These are mailbox register set used by 84833. */
+#define MDIO_84833_TOP_CFG_SCRATCH_REG0 0x4005
+#define MDIO_84833_TOP_CFG_SCRATCH_REG1 0x4006
+#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007
+#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008
+#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009
+#define MDIO_84833_TOP_CFG_DATA3_REG 0x4011
+#define MDIO_84833_TOP_CFG_DATA4_REG 0x4012
+
+/* Mailbox command set used by 84833. */
+#define PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE 0x2
+/* Mailbox status set used by 84833. */
+#define PHY84833_CMD_RECEIVED 0x0001
+#define PHY84833_CMD_IN_PROGRESS 0x0002
+#define PHY84833_CMD_COMPLETE_PASS 0x0004
+#define PHY84833_CMD_COMPLETE_ERROR 0x0008
+#define PHY84833_CMD_OPEN_FOR_CMDS 0x0010
+#define PHY84833_CMD_SYSTEM_BOOT 0x0020
+#define PHY84833_CMD_NOT_OPEN_FOR_CMDS 0x0040
+#define PHY84833_CMD_CLEAR_COMPLETE 0x0080
+#define PHY84833_CMD_OPEN_OVERRIDE 0xa5a5
+
+
+/* 84833 F/W Feature Commands */
+#define PHY84833_DIAG_CMD_GET_EEE_MODE 0x27
+#define PHY84833_DIAG_CMD_SET_EEE_MODE 0x28
+
+/* Warpcore clause 45 addressing */
+#define MDIO_WC_DEVAD 0x3
+#define MDIO_WC_REG_IEEE0BLK_MIICNTL 0x0
+#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000
+#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96
+#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000
+#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
+#define MDIO_WC_REG_XGXSBLK1_DESKEW 0x8010
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL0 0x8015
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL1 0x8016
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL2 0x8017
+#define MDIO_WC_REG_TX0_ANA_CTRL0 0x8061
+#define MDIO_WC_REG_TX1_ANA_CTRL0 0x8071
+#define MDIO_WC_REG_TX2_ANA_CTRL0 0x8081
+#define MDIO_WC_REG_TX3_ANA_CTRL0 0x8091
+#define MDIO_WC_REG_TX0_TX_DRIVER 0x8067
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET 0x04
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK 0x00f0
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET 0x08
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET 0x0c
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_MASK 0x7000
+#define MDIO_WC_REG_TX1_TX_DRIVER 0x8077
+#define MDIO_WC_REG_TX2_TX_DRIVER 0x8087
+#define MDIO_WC_REG_TX3_TX_DRIVER 0x8097
+#define MDIO_WC_REG_RX0_ANARXCONTROL1G 0x80b9
+#define MDIO_WC_REG_RX2_ANARXCONTROL1G 0x80d9
+#define MDIO_WC_REG_RX0_PCI_CTRL 0x80ba
+#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
+#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
+#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
+#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
+#define MDIO_WC_REG_XGXS_STATUS3 0x8129
+#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
+#define MDIO_WC_REG_PAR_DET_10G_CTRL 0x8131
+#define MDIO_WC_REG_XGXS_X2_CONTROL2 0x8141
+#define MDIO_WC_REG_XGXS_RX_LN_SWAP1 0x816B
+#define MDIO_WC_REG_XGXS_TX_LN_SWAP1 0x8169
+#define MDIO_WC_REG_GP2_STATUS_GP_2_0 0x81d0
+#define MDIO_WC_REG_GP2_STATUS_GP_2_1 0x81d1
+#define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2
+#define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4
+#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE
+#define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE0_OFFSET 0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT 0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_OPT_LR 0x1
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC 0x2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_XLAUI 0x3
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_LONG_CH_6G 0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE1_OFFSET 0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE2_OFFSET 0x8
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE3_OFFSET 0xc
+#define MDIO_WC_REG_UC_INFO_B1_CRC 0x81FE
+#define MDIO_WC_REG_DSC_SMC 0x8213
+#define MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0 0x821e
+#define MDIO_WC_REG_TX_FIR_TAP 0x82e2
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET 0x00
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_MASK 0x000f
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET 0x04
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_MASK 0x03f0
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET 0x0a
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK 0x7c00
+#define MDIO_WC_REG_TX_FIR_TAP_ENABLE 0x8000
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL 0x82e3
+#define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL 0x82e6
+#define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL 0x82e7
+#define MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL 0x82e8
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC4_CONTROL 0x82ec
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1 0x8300
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2 0x8301
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3 0x8302
+#define MDIO_WC_REG_SERDESDIGITAL_STATUS1000X1 0x8304
+#define MDIO_WC_REG_SERDESDIGITAL_MISC1 0x8308
+#define MDIO_WC_REG_SERDESDIGITAL_MISC2 0x8309
+#define MDIO_WC_REG_DIGITAL3_UP1 0x8329
+#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c
+#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345
+#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349
+#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e
+#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350
+#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368
+#define MDIO_WC_REG_TX66_CONTROL 0x83b0
+#define MDIO_WC_REG_RX66_CONTROL 0x83c0
+#define MDIO_WC_REG_RX66_SCW0 0x83c2
+#define MDIO_WC_REG_RX66_SCW1 0x83c3
+#define MDIO_WC_REG_RX66_SCW2 0x83c4
+#define MDIO_WC_REG_RX66_SCW3 0x83c5
+#define MDIO_WC_REG_RX66_SCW0_MASK 0x83c6
+#define MDIO_WC_REG_RX66_SCW1_MASK 0x83c7
+#define MDIO_WC_REG_RX66_SCW2_MASK 0x83c8
+#define MDIO_WC_REG_RX66_SCW3_MASK 0x83c9
+#define MDIO_WC_REG_FX100_CTRL1 0x8400
+#define MDIO_WC_REG_FX100_CTRL3 0x8402
+
+#define MDIO_WC_REG_MICROBLK_CMD 0xffc2
+#define MDIO_WC_REG_MICROBLK_DL_STATUS 0xffc5
+#define MDIO_WC_REG_MICROBLK_CMD3 0xffcc
+
+#define MDIO_WC_REG_AERBLK_AER 0xffde
+#define MDIO_WC_REG_COMBO_IEEE0_MIICTRL 0xffe0
+#define MDIO_WC_REG_COMBO_IEEE0_MIIISTAT 0xffe1
+
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET 0x810A
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_RX_BITSHIFT 0
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_TX_BITSHIFT 4
+
+#define MDIO_WC0_XGXS_BLK6_XGXS_X2_CONTROL2 0x8141
+
+#define DIGITAL5_ACTUAL_SPEED_TX_MASK 0x003f
+
+/* 54618se */
+#define MDIO_REG_GPHY_PHYID_LSB 0x3
+#define MDIO_REG_GPHY_ID_54618SE 0x5cd5
+#define MDIO_REG_GPHY_CL45_ADDR_REG 0xd
+#define MDIO_REG_GPHY_CL45_DATA_REG 0xe
+#define MDIO_REG_GPHY_EEE_ADV 0x3c
+#define MDIO_REG_GPHY_EEE_1G (0x1 << 2)
+#define MDIO_REG_GPHY_EEE_100 (0x1 << 1)
+#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
+#define MDIO_REG_INTR_STATUS 0x1a
+#define MDIO_REG_INTR_MASK 0x1b
+#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
+#define MDIO_REG_GPHY_SHADOW 0x1c
+#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10)
+#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15)
+#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10)
+#define MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD (0x1 << 8)
+
+#define IGU_FUNC_BASE 0x0400
+
+#define IGU_ADDR_MSIX 0x0000
+#define IGU_ADDR_INT_ACK 0x0200
+#define IGU_ADDR_PROD_UPD 0x0201
+#define IGU_ADDR_ATTN_BITS_UPD 0x0202
+#define IGU_ADDR_ATTN_BITS_SET 0x0203
+#define IGU_ADDR_ATTN_BITS_CLR 0x0204
+#define IGU_ADDR_COALESCE_NOW 0x0205
+#define IGU_ADDR_SIMD_MASK 0x0206
+#define IGU_ADDR_SIMD_NOMASK 0x0207
+#define IGU_ADDR_MSI_CTL 0x0210
+#define IGU_ADDR_MSI_ADDR_LO 0x0211
+#define IGU_ADDR_MSI_ADDR_HI 0x0212
+#define IGU_ADDR_MSI_DATA 0x0213
+
+#define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup 0
+#define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup 1
+#define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 2
+#define IGU_USE_REGISTER_cstorm_type_1_sb_cleanup 3
+
+#define COMMAND_REG_INT_ACK 0x0
+#define COMMAND_REG_PROD_UPD 0x4
+#define COMMAND_REG_ATTN_BITS_UPD 0x8
+#define COMMAND_REG_ATTN_BITS_SET 0xc
+#define COMMAND_REG_ATTN_BITS_CLR 0x10
+#define COMMAND_REG_COALESCE_NOW 0x14
+#define COMMAND_REG_SIMD_MASK 0x18
+#define COMMAND_REG_SIMD_NOMASK 0x1c
+
+
+#define IGU_MEM_BASE 0x0000
+
+#define IGU_MEM_MSIX_BASE 0x0000
+#define IGU_MEM_MSIX_UPPER 0x007f
+#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff
+
+#define IGU_MEM_PBA_MSIX_BASE 0x0200
+#define IGU_MEM_PBA_MSIX_UPPER 0x0200
+
+#define IGU_CMD_BACKWARD_COMP_PROD_UPD 0x0201
+#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
+
+#define IGU_CMD_INT_ACK_BASE 0x0400
+#define IGU_CMD_INT_ACK_UPPER\
+ (IGU_CMD_INT_ACK_BASE + MAX_SB_PER_PORT * NUM_OF_PORTS_PER_PATH - 1)
+#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x04ff
+
+#define IGU_CMD_E2_PROD_UPD_BASE 0x0500
+#define IGU_CMD_E2_PROD_UPD_UPPER\
+ (IGU_CMD_E2_PROD_UPD_BASE + MAX_SB_PER_PORT * NUM_OF_PORTS_PER_PATH - 1)
+#define IGU_CMD_E2_PROD_UPD_RESERVED_UPPER 0x059f
+
+#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05a0
+#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05a1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05a2
+
+#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05a3
+#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05a4
+#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05a5
+#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6
+
+#define IGU_REG_RESERVED_UPPER 0x05ff
+/* Fields of IGU PF CONFIGRATION REGISTER */
+#define IGU_PF_CONF_FUNC_EN (0x1<<0) /* function enable */
+#define IGU_PF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
+#define IGU_PF_CONF_INT_LINE_EN (0x1<<2) /* INT enable */
+#define IGU_PF_CONF_ATTN_BIT_EN (0x1<<3) /* attention enable */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE (0x1<<5) /* simd all ones mode */
+
+/* Fields of IGU VF CONFIGRATION REGISTER */
+#define IGU_VF_CONF_FUNC_EN (0x1<<0) /* function enable */
+#define IGU_VF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
+#define IGU_VF_CONF_PARENT_MASK (0x3<<2) /* Parent PF */
+#define IGU_VF_CONF_PARENT_SHIFT 2 /* Parent PF */
+#define IGU_VF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
+
+
+#define IGU_BC_DSB_NUM_SEGS 5
+#define IGU_BC_NDSB_NUM_SEGS 2
+#define IGU_NORM_DSB_NUM_SEGS 2
+#define IGU_NORM_NDSB_NUM_SEGS 1
+#define IGU_BC_BASE_DSB_PROD 128
+#define IGU_NORM_BASE_DSB_PROD 136
+
+ /* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \
+ [5:2] = 0; [1:0] = PF number) */
+#define IGU_FID_ENCODE_IS_PF (0x1<<6)
+#define IGU_FID_ENCODE_IS_PF_SHIFT 6
+#define IGU_FID_VF_NUM_MASK (0x3f)
+#define IGU_FID_PF_NUM_MASK (0x7)
+
+#define IGU_REG_MAPPING_MEMORY_VALID (1<<0)
+#define IGU_REG_MAPPING_MEMORY_VECTOR_MASK (0x3F<<1)
+#define IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT 1
+#define IGU_REG_MAPPING_MEMORY_FID_MASK (0x7F<<7)
+#define IGU_REG_MAPPING_MEMORY_FID_SHIFT 7
+
+
+#define CDU_REGION_NUMBER_XCM_AG 2
+#define CDU_REGION_NUMBER_UCM_AG 4
+
+
+/**
+ * String-to-compress [31:8] = CID (all 24 bits)
+ * String-to-compress [7:4] = Region
+ * String-to-compress [3:0] = Type
+ */
+#define CDU_VALID_DATA(_cid, _region, _type)\
+ (((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf)))
+#define CDU_CRC8(_cid, _region, _type)\
+ (calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff))
+#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type)\
+ (0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f))
+#define CDU_RSRVD_VALUE_TYPE_B(_crc, _type)\
+ (0x80 | ((_type)&0xf << 3) | ((CDU_CRC8(_cid, _region, _type)) & 0x7))
+#define CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(_val) ((_val) & ~0x80)
+
+/******************************************************************************
+ * Description:
+ * Calculates crc 8 on a word value: polynomial 0-1-2-8
+ * Code was translated from Verilog.
+ * Return:
+ *****************************************************************************/
+static inline u8 calc_crc8(u32 data, u8 crc)
+{
+ u8 D[32];
+ u8 NewCRC[8];
+ u8 C[8];
+ u8 crc_res;
+ u8 i;
+
+ /* split the data into 31 bits */
+ for (i = 0; i < 32; i++) {
+ D[i] = (u8)(data & 1);
+ data = data >> 1;
+ }
+
+ /* split the crc into 8 bits */
+ for (i = 0; i < 8; i++) {
+ C[i] = crc & 1;
+ crc = crc >> 1;
+ }
+
+ NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
+ D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
+ C[6] ^ C[7];
+ NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
+ D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
+ D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^
+ C[6];
+ NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
+ D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
+ C[0] ^ C[1] ^ C[4] ^ C[5];
+ NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
+ D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
+ C[1] ^ C[2] ^ C[5] ^ C[6];
+ NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
+ D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
+ C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
+ NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
+ D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
+ C[3] ^ C[4] ^ C[7];
+ NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
+ D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^
+ C[5];
+ NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
+ D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^
+ C[6];
+
+ crc_res = 0;
+ for (i = 0; i < 8; i++)
+ crc_res |= (NewCRC[i] << i);
+
+ return crc_res;
+}
+
+
+#endif /* BNX2X_REG_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
new file mode 100644
index 000000000000..0440425c83d6
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -0,0 +1,5689 @@
+/* bnx2x_sp.c: Broadcom Everest network driver.
+ *
+ * Copyright 2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Vladislav Zolotarov
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32c.h>
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_sp.h"
+
+#define BNX2X_MAX_EMUL_MULTI 16
+
+/**** Exe Queue interfaces ****/
+
+/**
+ * bnx2x_exe_queue_init - init the Exe Queue object
+ *
+ * @o: poiter to the object
+ * @exe_len: length
+ * @owner: poiter to the owner
+ * @validate: validate function pointer
+ * @optimize: optimize function pointer
+ * @exec: execute function pointer
+ * @get: get function pointer
+ */
+static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
+ struct bnx2x_exe_queue_obj *o,
+ int exe_len,
+ union bnx2x_qable_obj *owner,
+ exe_q_validate validate,
+ exe_q_optimize optimize,
+ exe_q_execute exec,
+ exe_q_get get)
+{
+ memset(o, 0, sizeof(*o));
+
+ INIT_LIST_HEAD(&o->exe_queue);
+ INIT_LIST_HEAD(&o->pending_comp);
+
+ spin_lock_init(&o->lock);
+
+ o->exe_chunk_len = exe_len;
+ o->owner = owner;
+
+ /* Owner specific callbacks */
+ o->validate = validate;
+ o->optimize = optimize;
+ o->execute = exec;
+ o->get = get;
+
+ DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk "
+ "length of %d\n", exe_len);
+}
+
+static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
+ struct bnx2x_exeq_elem *elem)
+{
+ DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
+ kfree(elem);
+}
+
+static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
+{
+ struct bnx2x_exeq_elem *elem;
+ int cnt = 0;
+
+ spin_lock_bh(&o->lock);
+
+ list_for_each_entry(elem, &o->exe_queue, link)
+ cnt++;
+
+ spin_unlock_bh(&o->lock);
+
+ return cnt;
+}
+
+/**
+ * bnx2x_exe_queue_add - add a new element to the execution queue
+ *
+ * @bp: driver handle
+ * @o: queue
+ * @cmd: new command to add
+ * @restore: true - do not optimize the command
+ *
+ * If the element is optimized or is illegal, frees it.
+ */
+static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
+ struct bnx2x_exe_queue_obj *o,
+ struct bnx2x_exeq_elem *elem,
+ bool restore)
+{
+ int rc;
+
+ spin_lock_bh(&o->lock);
+
+ if (!restore) {
+ /* Try to cancel this element queue */
+ rc = o->optimize(bp, o->owner, elem);
+ if (rc)
+ goto free_and_exit;
+
+ /* Check if this request is ok */
+ rc = o->validate(bp, o->owner, elem);
+ if (rc) {
+ BNX2X_ERR("Preamble failed: %d\n", rc);
+ goto free_and_exit;
+ }
+ }
+
+ /* If so, add it to the execution queue */
+ list_add_tail(&elem->link, &o->exe_queue);
+
+ spin_unlock_bh(&o->lock);
+
+ return 0;
+
+free_and_exit:
+ bnx2x_exe_queue_free_elem(bp, elem);
+
+ spin_unlock_bh(&o->lock);
+
+ return rc;
+
+}
+
+static inline void __bnx2x_exe_queue_reset_pending(
+ struct bnx2x *bp,
+ struct bnx2x_exe_queue_obj *o)
+{
+ struct bnx2x_exeq_elem *elem;
+
+ while (!list_empty(&o->pending_comp)) {
+ elem = list_first_entry(&o->pending_comp,
+ struct bnx2x_exeq_elem, link);
+
+ list_del(&elem->link);
+ bnx2x_exe_queue_free_elem(bp, elem);
+ }
+}
+
+static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
+ struct bnx2x_exe_queue_obj *o)
+{
+
+ spin_lock_bh(&o->lock);
+
+ __bnx2x_exe_queue_reset_pending(bp, o);
+
+ spin_unlock_bh(&o->lock);
+
+}
+
+/**
+ * bnx2x_exe_queue_step - execute one execution chunk atomically
+ *
+ * @bp: driver handle
+ * @o: queue
+ * @ramrod_flags: flags
+ *
+ * (Atomicy is ensured using the exe_queue->lock).
+ */
+static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
+ struct bnx2x_exe_queue_obj *o,
+ unsigned long *ramrod_flags)
+{
+ struct bnx2x_exeq_elem *elem, spacer;
+ int cur_len = 0, rc;
+
+ memset(&spacer, 0, sizeof(spacer));
+
+ spin_lock_bh(&o->lock);
+
+ /*
+ * Next step should not be performed until the current is finished,
+ * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
+ * properly clear object internals without sending any command to the FW
+ * which also implies there won't be any completion to clear the
+ * 'pending' list.
+ */
+ if (!list_empty(&o->pending_comp)) {
+ if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
+ DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
+ "resetting pending_comp\n");
+ __bnx2x_exe_queue_reset_pending(bp, o);
+ } else {
+ spin_unlock_bh(&o->lock);
+ return 1;
+ }
+ }
+
+ /*
+ * Run through the pending commands list and create a next
+ * execution chunk.
+ */
+ while (!list_empty(&o->exe_queue)) {
+ elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
+ link);
+ WARN_ON(!elem->cmd_len);
+
+ if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
+ cur_len += elem->cmd_len;
+ /*
+ * Prevent from both lists being empty when moving an
+ * element. This will allow the call of
+ * bnx2x_exe_queue_empty() without locking.
+ */
+ list_add_tail(&spacer.link, &o->pending_comp);
+ mb();
+ list_del(&elem->link);
+ list_add_tail(&elem->link, &o->pending_comp);
+ list_del(&spacer.link);
+ } else
+ break;
+ }
+
+ /* Sanity check */
+ if (!cur_len) {
+ spin_unlock_bh(&o->lock);
+ return 0;
+ }
+
+ rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
+ if (rc < 0)
+ /*
+ * In case of an error return the commands back to the queue
+ * and reset the pending_comp.
+ */
+ list_splice_init(&o->pending_comp, &o->exe_queue);
+ else if (!rc)
+ /*
+ * If zero is returned, means there are no outstanding pending
+ * completions and we may dismiss the pending list.
+ */
+ __bnx2x_exe_queue_reset_pending(bp, o);
+
+ spin_unlock_bh(&o->lock);
+ return rc;
+}
+
+static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
+{
+ bool empty = list_empty(&o->exe_queue);
+
+ /* Don't reorder!!! */
+ mb();
+
+ return empty && list_empty(&o->pending_comp);
+}
+
+static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
+ struct bnx2x *bp)
+{
+ DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
+ return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
+}
+
+/************************ raw_obj functions ***********************************/
+static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
+{
+ return !!test_bit(o->state, o->pstate);
+}
+
+static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
+{
+ smp_mb__before_clear_bit();
+ clear_bit(o->state, o->pstate);
+ smp_mb__after_clear_bit();
+}
+
+static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
+{
+ smp_mb__before_clear_bit();
+ set_bit(o->state, o->pstate);
+ smp_mb__after_clear_bit();
+}
+
+/**
+ * bnx2x_state_wait - wait until the given bit(state) is cleared
+ *
+ * @bp: device handle
+ * @state: state which is to be cleared
+ * @state_p: state buffer
+ *
+ */
+static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
+ unsigned long *pstate)
+{
+ /* can take a while if any port is running */
+ int cnt = 5000;
+
+
+ if (CHIP_REV_IS_EMUL(bp))
+ cnt *= 20;
+
+ DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
+
+ might_sleep();
+ while (cnt--) {
+ if (!test_bit(state, pstate)) {
+#ifdef BNX2X_STOP_ON_ERROR
+ DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
+#endif
+ return 0;
+ }
+
+ usleep_range(1000, 1000);
+
+ if (bp->panic)
+ return -EIO;
+ }
+
+ /* timeout! */
+ BNX2X_ERR("timeout waiting for state %d\n", state);
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
+
+ return -EBUSY;
+}
+
+static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
+{
+ return bnx2x_state_wait(bp, raw->state, raw->pstate);
+}
+
+/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
+/* credit handling callbacks */
+static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+ WARN_ON(!mp);
+
+ return mp->get_entry(mp, offset);
+}
+
+static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+ WARN_ON(!mp);
+
+ return mp->get(mp, 1);
+}
+
+static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
+{
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ WARN_ON(!vp);
+
+ return vp->get_entry(vp, offset);
+}
+
+static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ WARN_ON(!vp);
+
+ return vp->get(vp, 1);
+}
+
+static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ if (!mp->get(mp, 1))
+ return false;
+
+ if (!vp->get(vp, 1)) {
+ mp->put(mp, 1);
+ return false;
+ }
+
+ return true;
+}
+
+static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+ return mp->put_entry(mp, offset);
+}
+
+static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+ return mp->put(mp, 1);
+}
+
+static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
+{
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ return vp->put_entry(vp, offset);
+}
+
+static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ return vp->put(vp, 1);
+}
+
+static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ if (!mp->put(mp, 1))
+ return false;
+
+ if (!vp->put(vp, 1)) {
+ mp->get(mp, 1);
+ return false;
+ }
+
+ return true;
+}
+
+/* check_add() callbacks */
+static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ if (!is_valid_ether_addr(data->mac.mac))
+ return -EINVAL;
+
+ /* Check if a requested MAC already exists */
+ list_for_each_entry(pos, &o->head, link)
+ if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+ return -EEXIST;
+
+ return 0;
+}
+
+static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ list_for_each_entry(pos, &o->head, link)
+ if (data->vlan.vlan == pos->u.vlan.vlan)
+ return -EEXIST;
+
+ return 0;
+}
+
+static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ list_for_each_entry(pos, &o->head, link)
+ if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+ (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+ ETH_ALEN)))
+ return -EEXIST;
+
+ return 0;
+}
+
+
+/* check_del() callbacks */
+static struct bnx2x_vlan_mac_registry_elem *
+ bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ list_for_each_entry(pos, &o->head, link)
+ if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+ return pos;
+
+ return NULL;
+}
+
+static struct bnx2x_vlan_mac_registry_elem *
+ bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ list_for_each_entry(pos, &o->head, link)
+ if (data->vlan.vlan == pos->u.vlan.vlan)
+ return pos;
+
+ return NULL;
+}
+
+static struct bnx2x_vlan_mac_registry_elem *
+ bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ list_for_each_entry(pos, &o->head, link)
+ if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+ (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+ ETH_ALEN)))
+ return pos;
+
+ return NULL;
+}
+
+/* check_move() callback */
+static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
+ struct bnx2x_vlan_mac_obj *dst_o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+ int rc;
+
+ /* Check if we can delete the requested configuration from the first
+ * object.
+ */
+ pos = src_o->check_del(src_o, data);
+
+ /* check if configuration can be added */
+ rc = dst_o->check_add(dst_o, data);
+
+ /* If this classification can not be added (is already set)
+ * or can't be deleted - return an error.
+ */
+ if (rc || !pos)
+ return false;
+
+ return true;
+}
+
+static bool bnx2x_check_move_always_err(
+ struct bnx2x_vlan_mac_obj *src_o,
+ struct bnx2x_vlan_mac_obj *dst_o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ return false;
+}
+
+
+static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ u8 rx_tx_flag = 0;
+
+ if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
+ (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+ rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
+
+ if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
+ (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+ rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
+
+ return rx_tx_flag;
+}
+
+/* LLH CAM line allocations */
+enum {
+ LLH_CAM_ISCSI_ETH_LINE = 0,
+ LLH_CAM_ETH_LINE,
+ LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
+};
+
+static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+ bool add, unsigned char *dev_addr, int index)
+{
+ u32 wb_data[2];
+ u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
+ NIG_REG_LLH0_FUNC_MEM;
+
+ if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
+ return;
+
+ DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
+ (add ? "ADD" : "DELETE"), index);
+
+ if (add) {
+ /* LLH_FUNC_MEM is a u64 WB register */
+ reg_offset += 8*index;
+
+ wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
+ (dev_addr[4] << 8) | dev_addr[5]);
+ wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
+
+ REG_WR_DMAE(bp, reg_offset, wb_data, 2);
+ }
+
+ REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
+ NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
+}
+
+/**
+ * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
+ *
+ * @bp: device handle
+ * @o: queue for which we want to configure this rule
+ * @add: if true the command is an ADD command, DEL otherwise
+ * @opcode: CLASSIFY_RULE_OPCODE_XXX
+ * @hdr: pointer to a header to setup
+ *
+ */
+static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
+ struct eth_classify_cmd_header *hdr)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+
+ hdr->client_id = raw->cl_id;
+ hdr->func_id = raw->func_id;
+
+ /* Rx or/and Tx (internal switching) configuration ? */
+ hdr->cmd_general_data |=
+ bnx2x_vlan_mac_get_rx_tx_flag(o);
+
+ if (add)
+ hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
+
+ hdr->cmd_general_data |=
+ (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
+}
+
+/**
+ * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
+ *
+ * @cid: connection id
+ * @type: BNX2X_FILTER_XXX_PENDING
+ * @hdr: poiter to header to setup
+ * @rule_cnt:
+ *
+ * currently we always configure one rule and echo field to contain a CID and an
+ * opcode type.
+ */
+static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
+ struct eth_classify_header *hdr, int rule_cnt)
+{
+ hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
+ hdr->rule_cnt = (u8)rule_cnt;
+}
+
+
+/* hw_config() callbacks */
+static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem, int rule_idx,
+ int cam_offset)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct eth_classify_rules_ramrod_data *data =
+ (struct eth_classify_rules_ramrod_data *)(raw->rdata);
+ int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
+ union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+ bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+ unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
+ u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
+
+ /*
+ * Set LLH CAM entry: currently only iSCSI and ETH macs are
+ * relevant. In addition, current implementation is tuned for a
+ * single ETH MAC.
+ *
+ * When multiple unicast ETH MACs PF configuration in switch
+ * independent mode is required (NetQ, multiple netdev MACs,
+ * etc.), consider better utilisation of 8 per function MAC
+ * entries in the LLH register. There is also
+ * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
+ * total number of CAM entries to 16.
+ *
+ * Currently we won't configure NIG for MACs other than a primary ETH
+ * MAC and iSCSI L2 MAC.
+ *
+ * If this MAC is moving from one Queue to another, no need to change
+ * NIG configuration.
+ */
+ if (cmd != BNX2X_VLAN_MAC_MOVE) {
+ if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
+ bnx2x_set_mac_in_nig(bp, add, mac,
+ LLH_CAM_ISCSI_ETH_LINE);
+ else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
+ bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE);
+ }
+
+ /* Reset the ramrod data buffer for the first rule */
+ if (rule_idx == 0)
+ memset(data, 0, sizeof(*data));
+
+ /* Setup a command header */
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
+ &rule_entry->mac.header);
+
+ DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
+ add ? "add" : "delete", mac, raw->cl_id);
+
+ /* Set a MAC itself */
+ bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
+ &rule_entry->mac.mac_mid,
+ &rule_entry->mac.mac_lsb, mac);
+
+ /* MOVE: Add a rule that will add this MAC to the target Queue */
+ if (cmd == BNX2X_VLAN_MAC_MOVE) {
+ rule_entry++;
+ rule_cnt++;
+
+ /* Setup ramrod data */
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
+ elem->cmd_data.vlan_mac.target_obj,
+ true, CLASSIFY_RULE_OPCODE_MAC,
+ &rule_entry->mac.header);
+
+ /* Set a MAC itself */
+ bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
+ &rule_entry->mac.mac_mid,
+ &rule_entry->mac.mac_lsb, mac);
+ }
+
+ /* Set the ramrod data header */
+ /* TODO: take this to the higher level in order to prevent multiple
+ writing */
+ bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+ rule_cnt);
+}
+
+/**
+ * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
+ *
+ * @bp: device handle
+ * @o: queue
+ * @type:
+ * @cam_offset: offset in cam memory
+ * @hdr: pointer to a header to setup
+ *
+ * E1/E1H
+ */
+static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
+ struct mac_configuration_hdr *hdr)
+{
+ struct bnx2x_raw_obj *r = &o->raw;
+
+ hdr->length = 1;
+ hdr->offset = (u8)cam_offset;
+ hdr->client_id = 0xff;
+ hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
+}
+
+static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
+ u16 vlan_id, struct mac_configuration_entry *cfg_entry)
+{
+ struct bnx2x_raw_obj *r = &o->raw;
+ u32 cl_bit_vec = (1 << r->cl_id);
+
+ cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
+ cfg_entry->pf_id = r->func_id;
+ cfg_entry->vlan_id = cpu_to_le16(vlan_id);
+
+ if (add) {
+ SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_SET);
+ SET_FLAG(cfg_entry->flags,
+ MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
+
+ /* Set a MAC in a ramrod data */
+ bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
+ &cfg_entry->middle_mac_addr,
+ &cfg_entry->lsb_mac_addr, mac);
+ } else
+ SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_INVALIDATE);
+}
+
+static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
+ u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
+{
+ struct mac_configuration_entry *cfg_entry = &config->config_table[0];
+ struct bnx2x_raw_obj *raw = &o->raw;
+
+ bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
+ &config->hdr);
+ bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
+ cfg_entry);
+
+ DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
+ add ? "setting" : "clearing",
+ mac, raw->cl_id, cam_offset);
+}
+
+/**
+ * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
+ *
+ * @bp: device handle
+ * @o: bnx2x_vlan_mac_obj
+ * @elem: bnx2x_exeq_elem
+ * @rule_idx: rule_idx
+ * @cam_offset: cam_offset
+ */
+static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem, int rule_idx,
+ int cam_offset)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct mac_configuration_cmd *config =
+ (struct mac_configuration_cmd *)(raw->rdata);
+ /*
+ * 57710 and 57711 do not support MOVE command,
+ * so it's either ADD or DEL
+ */
+ bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+ true : false;
+
+ /* Reset the ramrod data buffer */
+ memset(config, 0, sizeof(*config));
+
+ bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING,
+ cam_offset, add,
+ elem->cmd_data.vlan_mac.u.mac.mac, 0,
+ ETH_VLAN_FILTER_ANY_VLAN, config);
+}
+
+static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem, int rule_idx,
+ int cam_offset)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct eth_classify_rules_ramrod_data *data =
+ (struct eth_classify_rules_ramrod_data *)(raw->rdata);
+ int rule_cnt = rule_idx + 1;
+ union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+ int cmd = elem->cmd_data.vlan_mac.cmd;
+ bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+ u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
+
+ /* Reset the ramrod data buffer for the first rule */
+ if (rule_idx == 0)
+ memset(data, 0, sizeof(*data));
+
+ /* Set a rule header */
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
+ &rule_entry->vlan.header);
+
+ DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
+ vlan);
+
+ /* Set a VLAN itself */
+ rule_entry->vlan.vlan = cpu_to_le16(vlan);
+
+ /* MOVE: Add a rule that will add this MAC to the target Queue */
+ if (cmd == BNX2X_VLAN_MAC_MOVE) {
+ rule_entry++;
+ rule_cnt++;
+
+ /* Setup ramrod data */
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
+ elem->cmd_data.vlan_mac.target_obj,
+ true, CLASSIFY_RULE_OPCODE_VLAN,
+ &rule_entry->vlan.header);
+
+ /* Set a VLAN itself */
+ rule_entry->vlan.vlan = cpu_to_le16(vlan);
+ }
+
+ /* Set the ramrod data header */
+ /* TODO: take this to the higher level in order to prevent multiple
+ writing */
+ bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+ rule_cnt);
+}
+
+static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem,
+ int rule_idx, int cam_offset)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct eth_classify_rules_ramrod_data *data =
+ (struct eth_classify_rules_ramrod_data *)(raw->rdata);
+ int rule_cnt = rule_idx + 1;
+ union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+ int cmd = elem->cmd_data.vlan_mac.cmd;
+ bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+ u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
+ u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
+
+
+ /* Reset the ramrod data buffer for the first rule */
+ if (rule_idx == 0)
+ memset(data, 0, sizeof(*data));
+
+ /* Set a rule header */
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
+ &rule_entry->pair.header);
+
+ /* Set VLAN and MAC themselvs */
+ rule_entry->pair.vlan = cpu_to_le16(vlan);
+ bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+ &rule_entry->pair.mac_mid,
+ &rule_entry->pair.mac_lsb, mac);
+
+ /* MOVE: Add a rule that will add this MAC to the target Queue */
+ if (cmd == BNX2X_VLAN_MAC_MOVE) {
+ rule_entry++;
+ rule_cnt++;
+
+ /* Setup ramrod data */
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
+ elem->cmd_data.vlan_mac.target_obj,
+ true, CLASSIFY_RULE_OPCODE_PAIR,
+ &rule_entry->pair.header);
+
+ /* Set a VLAN itself */
+ rule_entry->pair.vlan = cpu_to_le16(vlan);
+ bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+ &rule_entry->pair.mac_mid,
+ &rule_entry->pair.mac_lsb, mac);
+ }
+
+ /* Set the ramrod data header */
+ /* TODO: take this to the higher level in order to prevent multiple
+ writing */
+ bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+ rule_cnt);
+}
+
+/**
+ * bnx2x_set_one_vlan_mac_e1h -
+ *
+ * @bp: device handle
+ * @o: bnx2x_vlan_mac_obj
+ * @elem: bnx2x_exeq_elem
+ * @rule_idx: rule_idx
+ * @cam_offset: cam_offset
+ */
+static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem,
+ int rule_idx, int cam_offset)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct mac_configuration_cmd *config =
+ (struct mac_configuration_cmd *)(raw->rdata);
+ /*
+ * 57710 and 57711 do not support MOVE command,
+ * so it's either ADD or DEL
+ */
+ bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+ true : false;
+
+ /* Reset the ramrod data buffer */
+ memset(config, 0, sizeof(*config));
+
+ bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
+ cam_offset, add,
+ elem->cmd_data.vlan_mac.u.vlan_mac.mac,
+ elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
+ ETH_VLAN_FILTER_CLASSIFY, config);
+}
+
+#define list_next_entry(pos, member) \
+ list_entry((pos)->member.next, typeof(*(pos)), member)
+
+/**
+ * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
+ *
+ * @bp: device handle
+ * @p: command parameters
+ * @ppos: pointer to the cooky
+ *
+ * reconfigure next MAC/VLAN/VLAN-MAC element from the
+ * previously configured elements list.
+ *
+ * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
+ * into an account
+ *
+ * pointer to the cooky - that should be given back in the next call to make
+ * function handle the next element. If *ppos is set to NULL it will restart the
+ * iterator. If returned *ppos == NULL this means that the last element has been
+ * handled.
+ *
+ */
+static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p,
+ struct bnx2x_vlan_mac_registry_elem **ppos)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+ struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
+
+ /* If list is empty - there is nothing to do here */
+ if (list_empty(&o->head)) {
+ *ppos = NULL;
+ return 0;
+ }
+
+ /* make a step... */
+ if (*ppos == NULL)
+ *ppos = list_first_entry(&o->head,
+ struct bnx2x_vlan_mac_registry_elem,
+ link);
+ else
+ *ppos = list_next_entry(*ppos, link);
+
+ pos = *ppos;
+
+ /* If it's the last step - return NULL */
+ if (list_is_last(&pos->link, &o->head))
+ *ppos = NULL;
+
+ /* Prepare a 'user_req' */
+ memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
+
+ /* Set the command */
+ p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
+
+ /* Set vlan_mac_flags */
+ p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
+
+ /* Set a restore bit */
+ __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
+
+ return bnx2x_config_vlan_mac(bp, p);
+}
+
+/*
+ * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
+ * pointer to an element with a specific criteria and NULL if such an element
+ * hasn't been found.
+ */
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
+ struct bnx2x_exe_queue_obj *o,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_exeq_elem *pos;
+ struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
+
+ /* Check pending for execution commands */
+ list_for_each_entry(pos, &o->exe_queue, link)
+ if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
+ sizeof(*data)) &&
+ (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
+ return pos;
+
+ return NULL;
+}
+
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
+ struct bnx2x_exe_queue_obj *o,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_exeq_elem *pos;
+ struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
+
+ /* Check pending for execution commands */
+ list_for_each_entry(pos, &o->exe_queue, link)
+ if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
+ sizeof(*data)) &&
+ (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
+ return pos;
+
+ return NULL;
+}
+
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
+ struct bnx2x_exe_queue_obj *o,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_exeq_elem *pos;
+ struct bnx2x_vlan_mac_ramrod_data *data =
+ &elem->cmd_data.vlan_mac.u.vlan_mac;
+
+ /* Check pending for execution commands */
+ list_for_each_entry(pos, &o->exe_queue, link)
+ if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
+ sizeof(*data)) &&
+ (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
+ return pos;
+
+ return NULL;
+}
+
+/**
+ * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
+ *
+ * @bp: device handle
+ * @qo: bnx2x_qable_obj
+ * @elem: bnx2x_exeq_elem
+ *
+ * Checks that the requested configuration can be added. If yes and if
+ * requested, consume CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ *
+ */
+static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
+ union bnx2x_qable_obj *qo,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
+ struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+ int rc;
+
+ /* Check the registry */
+ rc = o->check_add(o, &elem->cmd_data.vlan_mac.u);
+ if (rc) {
+ DP(BNX2X_MSG_SP, "ADD command is not allowed considering "
+ "current registry state\n");
+ return rc;
+ }
+
+ /*
+ * Check if there is a pending ADD command for this
+ * MAC/VLAN/VLAN-MAC. Return an error if there is.
+ */
+ if (exeq->get(exeq, elem)) {
+ DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
+ return -EEXIST;
+ }
+
+ /*
+ * TODO: Check the pending MOVE from other objects where this
+ * object is a destination object.
+ */
+
+ /* Consume the credit if not requested not to */
+ if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+ o->get_credit(o)))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
+ *
+ * @bp: device handle
+ * @qo: quable object to check
+ * @elem: element that needs to be deleted
+ *
+ * Checks that the requested configuration can be deleted. If yes and if
+ * requested, returns a CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ */
+static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
+ union bnx2x_qable_obj *qo,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
+ struct bnx2x_vlan_mac_registry_elem *pos;
+ struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+ struct bnx2x_exeq_elem query_elem;
+
+ /* If this classification can not be deleted (doesn't exist)
+ * - return a BNX2X_EXIST.
+ */
+ pos = o->check_del(o, &elem->cmd_data.vlan_mac.u);
+ if (!pos) {
+ DP(BNX2X_MSG_SP, "DEL command is not allowed considering "
+ "current registry state\n");
+ return -EEXIST;
+ }
+
+ /*
+ * Check if there are pending DEL or MOVE commands for this
+ * MAC/VLAN/VLAN-MAC. Return an error if so.
+ */
+ memcpy(&query_elem, elem, sizeof(query_elem));
+
+ /* Check for MOVE commands */
+ query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
+ if (exeq->get(exeq, &query_elem)) {
+ BNX2X_ERR("There is a pending MOVE command already\n");
+ return -EINVAL;
+ }
+
+ /* Check for DEL commands */
+ if (exeq->get(exeq, elem)) {
+ DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
+ return -EEXIST;
+ }
+
+ /* Return the credit to the credit pool if not requested not to */
+ if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+ o->put_credit(o))) {
+ BNX2X_ERR("Failed to return a credit\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
+ *
+ * @bp: device handle
+ * @qo: quable object to check (source)
+ * @elem: element that needs to be moved
+ *
+ * Checks that the requested configuration can be moved. If yes and if
+ * requested, returns a CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ */
+static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
+ union bnx2x_qable_obj *qo,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
+ struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
+ struct bnx2x_exeq_elem query_elem;
+ struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
+ struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
+
+ /*
+ * Check if we can perform this operation based on the current registry
+ * state.
+ */
+ if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
+ DP(BNX2X_MSG_SP, "MOVE command is not allowed considering "
+ "current registry state\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Check if there is an already pending DEL or MOVE command for the
+ * source object or ADD command for a destination object. Return an
+ * error if so.
+ */
+ memcpy(&query_elem, elem, sizeof(query_elem));
+
+ /* Check DEL on source */
+ query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
+ if (src_exeq->get(src_exeq, &query_elem)) {
+ BNX2X_ERR("There is a pending DEL command on the source "
+ "queue already\n");
+ return -EINVAL;
+ }
+
+ /* Check MOVE on source */
+ if (src_exeq->get(src_exeq, elem)) {
+ DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
+ return -EEXIST;
+ }
+
+ /* Check ADD on destination */
+ query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
+ if (dest_exeq->get(dest_exeq, &query_elem)) {
+ BNX2X_ERR("There is a pending ADD command on the "
+ "destination queue already\n");
+ return -EINVAL;
+ }
+
+ /* Consume the credit if not requested not to */
+ if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
+ &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+ dest_o->get_credit(dest_o)))
+ return -EINVAL;
+
+ if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+ src_o->put_credit(src_o))) {
+ /* return the credit taken from dest... */
+ dest_o->put_credit(dest_o);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
+ union bnx2x_qable_obj *qo,
+ struct bnx2x_exeq_elem *elem)
+{
+ switch (elem->cmd_data.vlan_mac.cmd) {
+ case BNX2X_VLAN_MAC_ADD:
+ return bnx2x_validate_vlan_mac_add(bp, qo, elem);
+ case BNX2X_VLAN_MAC_DEL:
+ return bnx2x_validate_vlan_mac_del(bp, qo, elem);
+ case BNX2X_VLAN_MAC_MOVE:
+ return bnx2x_validate_vlan_mac_move(bp, qo, elem);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
+ *
+ * @bp: device handle
+ * @o: bnx2x_vlan_mac_obj
+ *
+ */
+static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ int cnt = 5000, rc;
+ struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+ struct bnx2x_raw_obj *raw = &o->raw;
+
+ while (cnt--) {
+ /* Wait for the current command to complete */
+ rc = raw->wait_comp(bp, raw);
+ if (rc)
+ return rc;
+
+ /* Wait until there are no pending commands */
+ if (!bnx2x_exe_queue_empty(exeq))
+ usleep_range(1000, 1000);
+ else
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+/**
+ * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
+ *
+ * @bp: device handle
+ * @o: bnx2x_vlan_mac_obj
+ * @cqe:
+ * @cont: if true schedule next execution chunk
+ *
+ */
+static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ union event_ring_elem *cqe,
+ unsigned long *ramrod_flags)
+{
+ struct bnx2x_raw_obj *r = &o->raw;
+ int rc;
+
+ /* Reset pending list */
+ bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
+
+ /* Clear pending */
+ r->clear_pending(r);
+
+ /* If ramrod failed this is most likely a SW bug */
+ if (cqe->message.error)
+ return -EINVAL;
+
+ /* Run the next bulk of pending commands if requeted */
+ if (test_bit(RAMROD_CONT, ramrod_flags)) {
+ rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+ if (rc < 0)
+ return rc;
+ }
+
+ /* If there is more work to do return PENDING */
+ if (!bnx2x_exe_queue_empty(&o->exe_queue))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
+ *
+ * @bp: device handle
+ * @o: bnx2x_qable_obj
+ * @elem: bnx2x_exeq_elem
+ */
+static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
+ union bnx2x_qable_obj *qo,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_exeq_elem query, *pos;
+ struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
+ struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+
+ memcpy(&query, elem, sizeof(query));
+
+ switch (elem->cmd_data.vlan_mac.cmd) {
+ case BNX2X_VLAN_MAC_ADD:
+ query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
+ break;
+ case BNX2X_VLAN_MAC_DEL:
+ query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
+ break;
+ default:
+ /* Don't handle anything other than ADD or DEL */
+ return 0;
+ }
+
+ /* If we found the appropriate element - delete it */
+ pos = exeq->get(exeq, &query);
+ if (pos) {
+
+ /* Return the credit of the optimized command */
+ if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
+ if ((query.cmd_data.vlan_mac.cmd ==
+ BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
+ BNX2X_ERR("Failed to return the credit for the "
+ "optimized ADD command\n");
+ return -EINVAL;
+ } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
+ BNX2X_ERR("Failed to recover the credit from "
+ "the optimized DEL command\n");
+ return -EINVAL;
+ }
+ }
+
+ DP(BNX2X_MSG_SP, "Optimizing %s command\n",
+ (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+ "ADD" : "DEL");
+
+ list_del(&pos->link);
+ bnx2x_exe_queue_free_elem(bp, pos);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
+ *
+ * @bp: device handle
+ * @o:
+ * @elem:
+ * @restore:
+ * @re:
+ *
+ * prepare a registry element according to the current command request.
+ */
+static inline int bnx2x_vlan_mac_get_registry_elem(
+ struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem,
+ bool restore,
+ struct bnx2x_vlan_mac_registry_elem **re)
+{
+ int cmd = elem->cmd_data.vlan_mac.cmd;
+ struct bnx2x_vlan_mac_registry_elem *reg_elem;
+
+ /* Allocate a new registry element if needed. */
+ if (!restore &&
+ ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
+ reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
+ if (!reg_elem)
+ return -ENOMEM;
+
+ /* Get a new CAM offset */
+ if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
+ /*
+ * This shell never happen, because we have checked the
+ * CAM availiability in the 'validate'.
+ */
+ WARN_ON(1);
+ kfree(reg_elem);
+ return -EINVAL;
+ }
+
+ DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
+
+ /* Set a VLAN-MAC data */
+ memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
+ sizeof(reg_elem->u));
+
+ /* Copy the flags (needed for DEL and RESTORE flows) */
+ reg_elem->vlan_mac_flags =
+ elem->cmd_data.vlan_mac.vlan_mac_flags;
+ } else /* DEL, RESTORE */
+ reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
+
+ *re = reg_elem;
+ return 0;
+}
+
+/**
+ * bnx2x_execute_vlan_mac - execute vlan mac command
+ *
+ * @bp: device handle
+ * @qo:
+ * @exe_chunk:
+ * @ramrod_flags:
+ *
+ * go and send a ramrod!
+ */
+static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
+ union bnx2x_qable_obj *qo,
+ struct list_head *exe_chunk,
+ unsigned long *ramrod_flags)
+{
+ struct bnx2x_exeq_elem *elem;
+ struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
+ struct bnx2x_raw_obj *r = &o->raw;
+ int rc, idx = 0;
+ bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
+ bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
+ struct bnx2x_vlan_mac_registry_elem *reg_elem;
+ int cmd;
+
+ /*
+ * If DRIVER_ONLY execution is requested, cleanup a registry
+ * and exit. Otherwise send a ramrod to FW.
+ */
+ if (!drv_only) {
+ WARN_ON(r->check_pending(r));
+
+ /* Set pending */
+ r->set_pending(r);
+
+ /* Fill tha ramrod data */
+ list_for_each_entry(elem, exe_chunk, link) {
+ cmd = elem->cmd_data.vlan_mac.cmd;
+ /*
+ * We will add to the target object in MOVE command, so
+ * change the object for a CAM search.
+ */
+ if (cmd == BNX2X_VLAN_MAC_MOVE)
+ cam_obj = elem->cmd_data.vlan_mac.target_obj;
+ else
+ cam_obj = o;
+
+ rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
+ elem, restore,
+ &reg_elem);
+ if (rc)
+ goto error_exit;
+
+ WARN_ON(!reg_elem);
+
+ /* Push a new entry into the registry */
+ if (!restore &&
+ ((cmd == BNX2X_VLAN_MAC_ADD) ||
+ (cmd == BNX2X_VLAN_MAC_MOVE)))
+ list_add(&reg_elem->link, &cam_obj->head);
+
+ /* Configure a single command in a ramrod data buffer */
+ o->set_one_rule(bp, o, elem, idx,
+ reg_elem->cam_offset);
+
+ /* MOVE command consumes 2 entries in the ramrod data */
+ if (cmd == BNX2X_VLAN_MAC_MOVE)
+ idx += 2;
+ else
+ idx++;
+ }
+
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
+ U64_HI(r->rdata_mapping),
+ U64_LO(r->rdata_mapping),
+ ETH_CONNECTION_TYPE);
+ if (rc)
+ goto error_exit;
+ }
+
+ /* Now, when we are done with the ramrod - clean up the registry */
+ list_for_each_entry(elem, exe_chunk, link) {
+ cmd = elem->cmd_data.vlan_mac.cmd;
+ if ((cmd == BNX2X_VLAN_MAC_DEL) ||
+ (cmd == BNX2X_VLAN_MAC_MOVE)) {
+ reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
+
+ WARN_ON(!reg_elem);
+
+ o->put_cam_offset(o, reg_elem->cam_offset);
+ list_del(&reg_elem->link);
+ kfree(reg_elem);
+ }
+ }
+
+ if (!drv_only)
+ return 1;
+ else
+ return 0;
+
+error_exit:
+ r->clear_pending(r);
+
+ /* Cleanup a registry in case of a failure */
+ list_for_each_entry(elem, exe_chunk, link) {
+ cmd = elem->cmd_data.vlan_mac.cmd;
+
+ if (cmd == BNX2X_VLAN_MAC_MOVE)
+ cam_obj = elem->cmd_data.vlan_mac.target_obj;
+ else
+ cam_obj = o;
+
+ /* Delete all newly added above entries */
+ if (!restore &&
+ ((cmd == BNX2X_VLAN_MAC_ADD) ||
+ (cmd == BNX2X_VLAN_MAC_MOVE))) {
+ reg_elem = o->check_del(cam_obj,
+ &elem->cmd_data.vlan_mac.u);
+ if (reg_elem) {
+ list_del(&reg_elem->link);
+ kfree(reg_elem);
+ }
+ }
+ }
+
+ return rc;
+}
+
+static inline int bnx2x_vlan_mac_push_new_cmd(
+ struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p)
+{
+ struct bnx2x_exeq_elem *elem;
+ struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
+ bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
+
+ /* Allocate the execution queue element */
+ elem = bnx2x_exe_queue_alloc_elem(bp);
+ if (!elem)
+ return -ENOMEM;
+
+ /* Set the command 'length' */
+ switch (p->user_req.cmd) {
+ case BNX2X_VLAN_MAC_MOVE:
+ elem->cmd_len = 2;
+ break;
+ default:
+ elem->cmd_len = 1;
+ }
+
+ /* Fill the object specific info */
+ memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
+
+ /* Try to add a new command to the pending list */
+ return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
+}
+
+/**
+ * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
+ *
+ * @bp: device handle
+ * @p:
+ *
+ */
+int bnx2x_config_vlan_mac(
+ struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p)
+{
+ int rc = 0;
+ struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
+ unsigned long *ramrod_flags = &p->ramrod_flags;
+ bool cont = test_bit(RAMROD_CONT, ramrod_flags);
+ struct bnx2x_raw_obj *raw = &o->raw;
+
+ /*
+ * Add new elements to the execution list for commands that require it.
+ */
+ if (!cont) {
+ rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
+ if (rc)
+ return rc;
+ }
+
+ /*
+ * If nothing will be executed further in this iteration we want to
+ * return PENDING if there are pending commands
+ */
+ if (!bnx2x_exe_queue_empty(&o->exe_queue))
+ rc = 1;
+
+ if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
+ DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
+ "clearing a pending bit.\n");
+ raw->clear_pending(raw);
+ }
+
+ /* Execute commands if required */
+ if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
+ test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
+ rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+ if (rc < 0)
+ return rc;
+ }
+
+ /*
+ * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
+ * then user want to wait until the last command is done.
+ */
+ if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
+ /*
+ * Wait maximum for the current exe_queue length iterations plus
+ * one (for the current pending command).
+ */
+ int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
+
+ while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
+ max_iterations--) {
+
+ /* Wait for the current command to complete */
+ rc = raw->wait_comp(bp, raw);
+ if (rc)
+ return rc;
+
+ /* Make a next step */
+ rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
+ ramrod_flags);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+ }
+
+ return rc;
+}
+
+
+
+/**
+ * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
+ *
+ * @bp: device handle
+ * @o:
+ * @vlan_mac_flags:
+ * @ramrod_flags: execution flags to be used for this deletion
+ *
+ * if the last operation has completed successfully and there are no
+ * moreelements left, positive value if the last operation has completed
+ * successfully and there are more previously configured elements, negative
+ * value is current operation has failed.
+ */
+static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ unsigned long *vlan_mac_flags,
+ unsigned long *ramrod_flags)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos = NULL;
+ int rc = 0;
+ struct bnx2x_vlan_mac_ramrod_params p;
+ struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+ struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
+
+ /* Clear pending commands first */
+
+ spin_lock_bh(&exeq->lock);
+
+ list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
+ if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
+ *vlan_mac_flags)
+ list_del(&exeq_pos->link);
+ }
+
+ spin_unlock_bh(&exeq->lock);
+
+ /* Prepare a command request */
+ memset(&p, 0, sizeof(p));
+ p.vlan_mac_obj = o;
+ p.ramrod_flags = *ramrod_flags;
+ p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+
+ /*
+ * Add all but the last VLAN-MAC to the execution queue without actually
+ * execution anything.
+ */
+ __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
+ __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
+ __clear_bit(RAMROD_CONT, &p.ramrod_flags);
+
+ list_for_each_entry(pos, &o->head, link) {
+ if (pos->vlan_mac_flags == *vlan_mac_flags) {
+ p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
+ memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
+ rc = bnx2x_config_vlan_mac(bp, &p);
+ if (rc < 0) {
+ BNX2X_ERR("Failed to add a new DEL command\n");
+ return rc;
+ }
+ }
+ }
+
+ p.ramrod_flags = *ramrod_flags;
+ __set_bit(RAMROD_CONT, &p.ramrod_flags);
+
+ return bnx2x_config_vlan_mac(bp, &p);
+}
+
+static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
+ u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type)
+{
+ raw->func_id = func_id;
+ raw->cid = cid;
+ raw->cl_id = cl_id;
+ raw->rdata = rdata;
+ raw->rdata_mapping = rdata_mapping;
+ raw->state = state;
+ raw->pstate = pstate;
+ raw->obj_type = type;
+ raw->check_pending = bnx2x_raw_check_pending;
+ raw->clear_pending = bnx2x_raw_clear_pending;
+ raw->set_pending = bnx2x_raw_set_pending;
+ raw->wait_comp = bnx2x_raw_wait;
+}
+
+static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
+ int state, unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *macs_pool,
+ struct bnx2x_credit_pool_obj *vlans_pool)
+{
+ INIT_LIST_HEAD(&o->head);
+
+ o->macs_pool = macs_pool;
+ o->vlans_pool = vlans_pool;
+
+ o->delete_all = bnx2x_vlan_mac_del_all;
+ o->restore = bnx2x_vlan_mac_restore;
+ o->complete = bnx2x_complete_vlan_mac;
+ o->wait = bnx2x_wait_vlan_mac;
+
+ bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
+ state, pstate, type);
+}
+
+
+void bnx2x_init_mac_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *mac_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *macs_pool)
+{
+ union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
+
+ bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
+ rdata_mapping, state, pstate, type,
+ macs_pool, NULL);
+
+ /* CAM credit pool handling */
+ mac_obj->get_credit = bnx2x_get_credit_mac;
+ mac_obj->put_credit = bnx2x_put_credit_mac;
+ mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+ mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+ if (CHIP_IS_E1x(bp)) {
+ mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
+ mac_obj->check_del = bnx2x_check_mac_del;
+ mac_obj->check_add = bnx2x_check_mac_add;
+ mac_obj->check_move = bnx2x_check_move_always_err;
+ mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
+
+ /* Exe Queue */
+ bnx2x_exe_queue_init(bp,
+ &mac_obj->exe_queue, 1, qable_obj,
+ bnx2x_validate_vlan_mac,
+ bnx2x_optimize_vlan_mac,
+ bnx2x_execute_vlan_mac,
+ bnx2x_exeq_get_mac);
+ } else {
+ mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
+ mac_obj->check_del = bnx2x_check_mac_del;
+ mac_obj->check_add = bnx2x_check_mac_add;
+ mac_obj->check_move = bnx2x_check_move;
+ mac_obj->ramrod_cmd =
+ RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+ /* Exe Queue */
+ bnx2x_exe_queue_init(bp,
+ &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
+ qable_obj, bnx2x_validate_vlan_mac,
+ bnx2x_optimize_vlan_mac,
+ bnx2x_execute_vlan_mac,
+ bnx2x_exeq_get_mac);
+ }
+}
+
+void bnx2x_init_vlan_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *vlans_pool)
+{
+ union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
+
+ bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
+ rdata_mapping, state, pstate, type, NULL,
+ vlans_pool);
+
+ vlan_obj->get_credit = bnx2x_get_credit_vlan;
+ vlan_obj->put_credit = bnx2x_put_credit_vlan;
+ vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
+ vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
+
+ if (CHIP_IS_E1x(bp)) {
+ BNX2X_ERR("Do not support chips others than E2 and newer\n");
+ BUG();
+ } else {
+ vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
+ vlan_obj->check_del = bnx2x_check_vlan_del;
+ vlan_obj->check_add = bnx2x_check_vlan_add;
+ vlan_obj->check_move = bnx2x_check_move;
+ vlan_obj->ramrod_cmd =
+ RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+ /* Exe Queue */
+ bnx2x_exe_queue_init(bp,
+ &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
+ qable_obj, bnx2x_validate_vlan_mac,
+ bnx2x_optimize_vlan_mac,
+ bnx2x_execute_vlan_mac,
+ bnx2x_exeq_get_vlan);
+ }
+}
+
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *macs_pool,
+ struct bnx2x_credit_pool_obj *vlans_pool)
+{
+ union bnx2x_qable_obj *qable_obj =
+ (union bnx2x_qable_obj *)vlan_mac_obj;
+
+ bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
+ rdata_mapping, state, pstate, type,
+ macs_pool, vlans_pool);
+
+ /* CAM pool handling */
+ vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
+ vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
+ /*
+ * CAM offset is relevant for 57710 and 57711 chips only which have a
+ * single CAM for both MACs and VLAN-MAC pairs. So the offset
+ * will be taken from MACs' pool object only.
+ */
+ vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+ vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+ if (CHIP_IS_E1(bp)) {
+ BNX2X_ERR("Do not support chips others than E2\n");
+ BUG();
+ } else if (CHIP_IS_E1H(bp)) {
+ vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
+ vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
+ vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
+ vlan_mac_obj->check_move = bnx2x_check_move_always_err;
+ vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
+
+ /* Exe Queue */
+ bnx2x_exe_queue_init(bp,
+ &vlan_mac_obj->exe_queue, 1, qable_obj,
+ bnx2x_validate_vlan_mac,
+ bnx2x_optimize_vlan_mac,
+ bnx2x_execute_vlan_mac,
+ bnx2x_exeq_get_vlan_mac);
+ } else {
+ vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
+ vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
+ vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
+ vlan_mac_obj->check_move = bnx2x_check_move;
+ vlan_mac_obj->ramrod_cmd =
+ RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+ /* Exe Queue */
+ bnx2x_exe_queue_init(bp,
+ &vlan_mac_obj->exe_queue,
+ CLASSIFY_RULES_COUNT,
+ qable_obj, bnx2x_validate_vlan_mac,
+ bnx2x_optimize_vlan_mac,
+ bnx2x_execute_vlan_mac,
+ bnx2x_exeq_get_vlan_mac);
+ }
+
+}
+
+/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
+static inline void __storm_memset_mac_filters(struct bnx2x *bp,
+ struct tstorm_eth_mac_filter_config *mac_filters,
+ u16 pf_id)
+{
+ size_t size = sizeof(struct tstorm_eth_mac_filter_config);
+
+ u32 addr = BAR_TSTRORM_INTMEM +
+ TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
+}
+
+static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p)
+{
+ /* update the bp MAC filter structure */
+ u32 mask = (1 << p->cl_id);
+
+ struct tstorm_eth_mac_filter_config *mac_filters =
+ (struct tstorm_eth_mac_filter_config *)p->rdata;
+
+ /* initial seeting is drop-all */
+ u8 drop_all_ucast = 1, drop_all_mcast = 1;
+ u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
+ u8 unmatched_unicast = 0;
+
+ /* In e1x there we only take into account rx acceot flag since tx switching
+ * isn't enabled. */
+ if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
+ /* accept matched ucast */
+ drop_all_ucast = 0;
+
+ if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
+ /* accept matched mcast */
+ drop_all_mcast = 0;
+
+ if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
+ /* accept all mcast */
+ drop_all_ucast = 0;
+ accp_all_ucast = 1;
+ }
+ if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
+ /* accept all mcast */
+ drop_all_mcast = 0;
+ accp_all_mcast = 1;
+ }
+ if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
+ /* accept (all) bcast */
+ accp_all_bcast = 1;
+ if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
+ /* accept unmatched unicasts */
+ unmatched_unicast = 1;
+
+ mac_filters->ucast_drop_all = drop_all_ucast ?
+ mac_filters->ucast_drop_all | mask :
+ mac_filters->ucast_drop_all & ~mask;
+
+ mac_filters->mcast_drop_all = drop_all_mcast ?
+ mac_filters->mcast_drop_all | mask :
+ mac_filters->mcast_drop_all & ~mask;
+
+ mac_filters->ucast_accept_all = accp_all_ucast ?
+ mac_filters->ucast_accept_all | mask :
+ mac_filters->ucast_accept_all & ~mask;
+
+ mac_filters->mcast_accept_all = accp_all_mcast ?
+ mac_filters->mcast_accept_all | mask :
+ mac_filters->mcast_accept_all & ~mask;
+
+ mac_filters->bcast_accept_all = accp_all_bcast ?
+ mac_filters->bcast_accept_all | mask :
+ mac_filters->bcast_accept_all & ~mask;
+
+ mac_filters->unmatched_unicast = unmatched_unicast ?
+ mac_filters->unmatched_unicast | mask :
+ mac_filters->unmatched_unicast & ~mask;
+
+ DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
+ "accp_mcast 0x%x\naccp_bcast 0x%x\n",
+ mac_filters->ucast_drop_all,
+ mac_filters->mcast_drop_all,
+ mac_filters->ucast_accept_all,
+ mac_filters->mcast_accept_all,
+ mac_filters->bcast_accept_all);
+
+ /* write the MAC filter structure*/
+ __storm_memset_mac_filters(bp, mac_filters, p->func_id);
+
+ /* The operation is completed */
+ clear_bit(p->state, p->pstate);
+ smp_mb__after_clear_bit();
+
+ return 0;
+}
+
+/* Setup ramrod data */
+static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
+ struct eth_classify_header *hdr,
+ u8 rule_cnt)
+{
+ hdr->echo = cid;
+ hdr->rule_cnt = rule_cnt;
+}
+
+static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
+ unsigned long accept_flags,
+ struct eth_filter_rules_cmd *cmd,
+ bool clear_accept_all)
+{
+ u16 state;
+
+ /* start with 'drop-all' */
+ state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
+ ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+
+ if (accept_flags) {
+ if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+
+ if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
+ state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+
+ if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+ state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
+ }
+
+ if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
+ state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
+ state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+ }
+ if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
+ state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+
+ if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+ state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
+ }
+ if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
+ state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
+ }
+
+ /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
+ if (clear_accept_all) {
+ state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
+ state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
+ }
+
+ cmd->state = cpu_to_le16(state);
+
+}
+
+static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p)
+{
+ struct eth_filter_rules_ramrod_data *data = p->rdata;
+ int rc;
+ u8 rule_idx = 0;
+
+ /* Reset the ramrod data buffer */
+ memset(data, 0, sizeof(*data));
+
+ /* Setup ramrod data */
+
+ /* Tx (internal switching) */
+ if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
+ data->rules[rule_idx].client_id = p->cl_id;
+ data->rules[rule_idx].func_id = p->func_id;
+
+ data->rules[rule_idx].cmd_general_data =
+ ETH_FILTER_RULES_CMD_TX_CMD;
+
+ bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
+ &(data->rules[rule_idx++]), false);
+ }
+
+ /* Rx */
+ if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
+ data->rules[rule_idx].client_id = p->cl_id;
+ data->rules[rule_idx].func_id = p->func_id;
+
+ data->rules[rule_idx].cmd_general_data =
+ ETH_FILTER_RULES_CMD_RX_CMD;
+
+ bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
+ &(data->rules[rule_idx++]), false);
+ }
+
+
+ /*
+ * If FCoE Queue configuration has been requested configure the Rx and
+ * internal switching modes for this queue in separate rules.
+ *
+ * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
+ * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
+ */
+ if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
+ /* Tx (internal switching) */
+ if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
+ data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
+ data->rules[rule_idx].func_id = p->func_id;
+
+ data->rules[rule_idx].cmd_general_data =
+ ETH_FILTER_RULES_CMD_TX_CMD;
+
+ bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
+ &(data->rules[rule_idx++]),
+ true);
+ }
+
+ /* Rx */
+ if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
+ data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
+ data->rules[rule_idx].func_id = p->func_id;
+
+ data->rules[rule_idx].cmd_general_data =
+ ETH_FILTER_RULES_CMD_RX_CMD;
+
+ bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
+ &(data->rules[rule_idx++]),
+ true);
+ }
+ }
+
+ /*
+ * Set the ramrod header (most importantly - number of rules to
+ * configure).
+ */
+ bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
+
+ DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, "
+ "tx_accept_flags 0x%lx\n",
+ data->header.rule_cnt, p->rx_accept_flags,
+ p->tx_accept_flags);
+
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ /* Send a ramrod */
+ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
+ U64_HI(p->rdata_mapping),
+ U64_LO(p->rdata_mapping),
+ ETH_CONNECTION_TYPE);
+ if (rc)
+ return rc;
+
+ /* Ramrod completion is pending */
+ return 1;
+}
+
+static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p)
+{
+ return bnx2x_state_wait(bp, p->state, p->pstate);
+}
+
+static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p)
+{
+ /* Do nothing */
+ return 0;
+}
+
+int bnx2x_config_rx_mode(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p)
+{
+ int rc;
+
+ /* Configure the new classification in the chip */
+ rc = p->rx_mode_obj->config_rx_mode(bp, p);
+ if (rc < 0)
+ return rc;
+
+ /* Wait for a ramrod completion if was requested */
+ if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
+ rc = p->rx_mode_obj->wait_comp(bp, p);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
+ struct bnx2x_rx_mode_obj *o)
+{
+ if (CHIP_IS_E1x(bp)) {
+ o->wait_comp = bnx2x_empty_rx_mode_wait;
+ o->config_rx_mode = bnx2x_set_rx_mode_e1x;
+ } else {
+ o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
+ o->config_rx_mode = bnx2x_set_rx_mode_e2;
+ }
+}
+
+/********************* Multicast verbs: SET, CLEAR ****************************/
+static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
+{
+ return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
+}
+
+struct bnx2x_mcast_mac_elem {
+ struct list_head link;
+ u8 mac[ETH_ALEN];
+ u8 pad[2]; /* For a natural alignment of the following buffer */
+};
+
+struct bnx2x_pending_mcast_cmd {
+ struct list_head link;
+ int type; /* BNX2X_MCAST_CMD_X */
+ union {
+ struct list_head macs_head;
+ u32 macs_num; /* Needed for DEL command */
+ int next_bin; /* Needed for RESTORE flow with aprox match */
+ } data;
+
+ bool done; /* set to true, when the command has been handled,
+ * practically used in 57712 handling only, where one pending
+ * command may be handled in a few operations. As long as for
+ * other chips every operation handling is completed in a
+ * single ramrod, there is no need to utilize this field.
+ */
+};
+
+static int bnx2x_mcast_wait(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o)
+{
+ if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
+ o->raw.wait_comp(bp, &o->raw))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o,
+ struct bnx2x_mcast_ramrod_params *p,
+ int cmd)
+{
+ int total_sz;
+ struct bnx2x_pending_mcast_cmd *new_cmd;
+ struct bnx2x_mcast_mac_elem *cur_mac = NULL;
+ struct bnx2x_mcast_list_elem *pos;
+ int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
+ p->mcast_list_len : 0);
+
+ /* If the command is empty ("handle pending commands only"), break */
+ if (!p->mcast_list_len)
+ return 0;
+
+ total_sz = sizeof(*new_cmd) +
+ macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
+
+ /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
+ new_cmd = kzalloc(total_sz, GFP_ATOMIC);
+
+ if (!new_cmd)
+ return -ENOMEM;
+
+ DP(BNX2X_MSG_SP, "About to enqueue a new %d command. "
+ "macs_list_len=%d\n", cmd, macs_list_len);
+
+ INIT_LIST_HEAD(&new_cmd->data.macs_head);
+
+ new_cmd->type = cmd;
+ new_cmd->done = false;
+
+ switch (cmd) {
+ case BNX2X_MCAST_CMD_ADD:
+ cur_mac = (struct bnx2x_mcast_mac_elem *)
+ ((u8 *)new_cmd + sizeof(*new_cmd));
+
+ /* Push the MACs of the current command into the pendig command
+ * MACs list: FIFO
+ */
+ list_for_each_entry(pos, &p->mcast_list, link) {
+ memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
+ list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
+ cur_mac++;
+ }
+
+ break;
+
+ case BNX2X_MCAST_CMD_DEL:
+ new_cmd->data.macs_num = p->mcast_list_len;
+ break;
+
+ case BNX2X_MCAST_CMD_RESTORE:
+ new_cmd->data.next_bin = 0;
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ /* Push the new pending command to the tail of the pending list: FIFO */
+ list_add_tail(&new_cmd->link, &o->pending_cmds_head);
+
+ o->set_sched(o);
+
+ return 1;
+}
+
+/**
+ * bnx2x_mcast_get_next_bin - get the next set bin (index)
+ *
+ * @o:
+ * @last: index to start looking from (including)
+ *
+ * returns the next found (set) bin or a negative value if none is found.
+ */
+static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
+{
+ int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
+
+ for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
+ if (o->registry.aprox_match.vec[i])
+ for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
+ int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
+ if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
+ vec, cur_bit)) {
+ return cur_bit;
+ }
+ }
+ inner_start = 0;
+ }
+
+ /* None found */
+ return -1;
+}
+
+/**
+ * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
+ *
+ * @o:
+ *
+ * returns the index of the found bin or -1 if none is found
+ */
+static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
+{
+ int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
+
+ if (cur_bit >= 0)
+ BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
+
+ return cur_bit;
+}
+
+static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ u8 rx_tx_flag = 0;
+
+ if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
+ (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+ rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
+
+ if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
+ (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+ rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
+
+ return rx_tx_flag;
+}
+
+static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, int idx,
+ union bnx2x_mcast_config_data *cfg_data,
+ int cmd)
+{
+ struct bnx2x_raw_obj *r = &o->raw;
+ struct eth_multicast_rules_ramrod_data *data =
+ (struct eth_multicast_rules_ramrod_data *)(r->rdata);
+ u8 func_id = r->func_id;
+ u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
+ int bin;
+
+ if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
+ rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
+
+ data->rules[idx].cmd_general_data |= rx_tx_add_flag;
+
+ /* Get a bin and update a bins' vector */
+ switch (cmd) {
+ case BNX2X_MCAST_CMD_ADD:
+ bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
+ BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
+ break;
+
+ case BNX2X_MCAST_CMD_DEL:
+ /* If there were no more bins to clear
+ * (bnx2x_mcast_clear_first_bin() returns -1) then we would
+ * clear any (0xff) bin.
+ * See bnx2x_mcast_validate_e2() for explanation when it may
+ * happen.
+ */
+ bin = bnx2x_mcast_clear_first_bin(o);
+ break;
+
+ case BNX2X_MCAST_CMD_RESTORE:
+ bin = cfg_data->bin;
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd);
+ return;
+ }
+
+ DP(BNX2X_MSG_SP, "%s bin %d\n",
+ ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
+ "Setting" : "Clearing"), bin);
+
+ data->rules[idx].bin_id = (u8)bin;
+ data->rules[idx].func_id = func_id;
+ data->rules[idx].engine_id = o->engine_id;
+}
+
+/**
+ * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
+ *
+ * @bp: device handle
+ * @o:
+ * @start_bin: index in the registry to start from (including)
+ * @rdata_idx: index in the ramrod data to start from
+ *
+ * returns last handled bin index or -1 if all bins have been handled
+ */
+static inline int bnx2x_mcast_handle_restore_cmd_e2(
+ struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
+ int *rdata_idx)
+{
+ int cur_bin, cnt = *rdata_idx;
+ union bnx2x_mcast_config_data cfg_data = {0};
+
+ /* go through the registry and configure the bins from it */
+ for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
+ cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
+
+ cfg_data.bin = (u8)cur_bin;
+ o->set_one_rule(bp, o, cnt, &cfg_data,
+ BNX2X_MCAST_CMD_RESTORE);
+
+ cnt++;
+
+ DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
+
+ /* Break if we reached the maximum number
+ * of rules.
+ */
+ if (cnt >= o->max_cmd_len)
+ break;
+ }
+
+ *rdata_idx = cnt;
+
+ return cur_bin;
+}
+
+static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
+ int *line_idx)
+{
+ struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
+ int cnt = *line_idx;
+ union bnx2x_mcast_config_data cfg_data = {0};
+
+ list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
+ link) {
+
+ cfg_data.mac = &pmac_pos->mac[0];
+ o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
+
+ cnt++;
+
+ DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
+ pmac_pos->mac);
+
+ list_del(&pmac_pos->link);
+
+ /* Break if we reached the maximum number
+ * of rules.
+ */
+ if (cnt >= o->max_cmd_len)
+ break;
+ }
+
+ *line_idx = cnt;
+
+ /* if no more MACs to configure - we are done */
+ if (list_empty(&cmd_pos->data.macs_head))
+ cmd_pos->done = true;
+}
+
+static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
+ int *line_idx)
+{
+ int cnt = *line_idx;
+
+ while (cmd_pos->data.macs_num) {
+ o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
+
+ cnt++;
+
+ cmd_pos->data.macs_num--;
+
+ DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
+ cmd_pos->data.macs_num, cnt);
+
+ /* Break if we reached the maximum
+ * number of rules.
+ */
+ if (cnt >= o->max_cmd_len)
+ break;
+ }
+
+ *line_idx = cnt;
+
+ /* If we cleared all bins - we are done */
+ if (!cmd_pos->data.macs_num)
+ cmd_pos->done = true;
+}
+
+static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
+ int *line_idx)
+{
+ cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
+ line_idx);
+
+ if (cmd_pos->data.next_bin < 0)
+ /* If o->set_restore returned -1 we are done */
+ cmd_pos->done = true;
+ else
+ /* Start from the next bin next time */
+ cmd_pos->data.next_bin++;
+}
+
+static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p)
+{
+ struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
+ int cnt = 0;
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+
+ list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
+ link) {
+ switch (cmd_pos->type) {
+ case BNX2X_MCAST_CMD_ADD:
+ bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
+ break;
+
+ case BNX2X_MCAST_CMD_DEL:
+ bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
+ break;
+
+ case BNX2X_MCAST_CMD_RESTORE:
+ bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
+ &cnt);
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
+ return -EINVAL;
+ }
+
+ /* If the command has been completed - remove it from the list
+ * and free the memory
+ */
+ if (cmd_pos->done) {
+ list_del(&cmd_pos->link);
+ kfree(cmd_pos);
+ }
+
+ /* Break if we reached the maximum number of rules */
+ if (cnt >= o->max_cmd_len)
+ break;
+ }
+
+ return cnt;
+}
+
+static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
+ int *line_idx)
+{
+ struct bnx2x_mcast_list_elem *mlist_pos;
+ union bnx2x_mcast_config_data cfg_data = {0};
+ int cnt = *line_idx;
+
+ list_for_each_entry(mlist_pos, &p->mcast_list, link) {
+ cfg_data.mac = mlist_pos->mac;
+ o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
+
+ cnt++;
+
+ DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
+ mlist_pos->mac);
+ }
+
+ *line_idx = cnt;
+}
+
+static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
+ int *line_idx)
+{
+ int cnt = *line_idx, i;
+
+ for (i = 0; i < p->mcast_list_len; i++) {
+ o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
+
+ cnt++;
+
+ DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
+ p->mcast_list_len - i - 1);
+ }
+
+ *line_idx = cnt;
+}
+
+/**
+ * bnx2x_mcast_handle_current_cmd -
+ *
+ * @bp: device handle
+ * @p:
+ * @cmd:
+ * @start_cnt: first line in the ramrod data that may be used
+ *
+ * This function is called iff there is enough place for the current command in
+ * the ramrod data.
+ * Returns number of lines filled in the ramrod data in total.
+ */
+static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p, int cmd,
+ int start_cnt)
+{
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ int cnt = start_cnt;
+
+ DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
+
+ switch (cmd) {
+ case BNX2X_MCAST_CMD_ADD:
+ bnx2x_mcast_hdl_add(bp, o, p, &cnt);
+ break;
+
+ case BNX2X_MCAST_CMD_DEL:
+ bnx2x_mcast_hdl_del(bp, o, p, &cnt);
+ break;
+
+ case BNX2X_MCAST_CMD_RESTORE:
+ o->hdl_restore(bp, o, 0, &cnt);
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ /* The current command has been handled */
+ p->mcast_list_len = 0;
+
+ return cnt;
+}
+
+static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int cmd)
+{
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ int reg_sz = o->get_registry_size(o);
+
+ switch (cmd) {
+ /* DEL command deletes all currently configured MACs */
+ case BNX2X_MCAST_CMD_DEL:
+ o->set_registry_size(o, 0);
+ /* Don't break */
+
+ /* RESTORE command will restore the entire multicast configuration */
+ case BNX2X_MCAST_CMD_RESTORE:
+ /* Here we set the approximate amount of work to do, which in
+ * fact may be only less as some MACs in postponed ADD
+ * command(s) scheduled before this command may fall into
+ * the same bin and the actual number of bins set in the
+ * registry would be less than we estimated here. See
+ * bnx2x_mcast_set_one_rule_e2() for further details.
+ */
+ p->mcast_list_len = reg_sz;
+ break;
+
+ case BNX2X_MCAST_CMD_ADD:
+ case BNX2X_MCAST_CMD_CONT:
+ /* Here we assume that all new MACs will fall into new bins.
+ * However we will correct the real registry size after we
+ * handle all pending commands.
+ */
+ o->set_registry_size(o, reg_sz + p->mcast_list_len);
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd);
+ return -EINVAL;
+
+ }
+
+ /* Increase the total number of MACs pending to be configured */
+ o->total_pending_num += p->mcast_list_len;
+
+ return 0;
+}
+
+static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int old_num_bins)
+{
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+
+ o->set_registry_size(o, old_num_bins);
+ o->total_pending_num -= p->mcast_list_len;
+}
+
+/**
+ * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
+ *
+ * @bp: device handle
+ * @p:
+ * @len: number of rules to handle
+ */
+static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ u8 len)
+{
+ struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
+ struct eth_multicast_rules_ramrod_data *data =
+ (struct eth_multicast_rules_ramrod_data *)(r->rdata);
+
+ data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
+ (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
+ data->header.rule_cnt = len;
+}
+
+/**
+ * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
+ *
+ * @bp: device handle
+ * @o:
+ *
+ * Recalculate the actual number of set bins in the registry using Brian
+ * Kernighan's algorithm: it's execution complexity is as a number of set bins.
+ *
+ * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
+ */
+static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o)
+{
+ int i, cnt = 0;
+ u64 elem;
+
+ for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
+ elem = o->registry.aprox_match.vec[i];
+ for (; elem; cnt++)
+ elem &= elem - 1;
+ }
+
+ o->set_registry_size(o, cnt);
+
+ return 0;
+}
+
+static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int cmd)
+{
+ struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ struct eth_multicast_rules_ramrod_data *data =
+ (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
+ int cnt = 0, rc;
+
+ /* Reset the ramrod data buffer */
+ memset(data, 0, sizeof(*data));
+
+ cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
+
+ /* If there are no more pending commands - clear SCHEDULED state */
+ if (list_empty(&o->pending_cmds_head))
+ o->clear_sched(o);
+
+ /* The below may be true iff there was enough room in ramrod
+ * data for all pending commands and for the current
+ * command. Otherwise the current command would have been added
+ * to the pending commands and p->mcast_list_len would have been
+ * zeroed.
+ */
+ if (p->mcast_list_len > 0)
+ cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
+
+ /* We've pulled out some MACs - update the total number of
+ * outstanding.
+ */
+ o->total_pending_num -= cnt;
+
+ /* send a ramrod */
+ WARN_ON(o->total_pending_num < 0);
+ WARN_ON(cnt > o->max_cmd_len);
+
+ bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
+
+ /* Update a registry size if there are no more pending operations.
+ *
+ * We don't want to change the value of the registry size if there are
+ * pending operations because we want it to always be equal to the
+ * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
+ * set bins after the last requested operation in order to properly
+ * evaluate the size of the next DEL/RESTORE operation.
+ *
+ * Note that we update the registry itself during command(s) handling
+ * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
+ * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
+ * with a limited amount of update commands (per MAC/bin) and we don't
+ * know in this scope what the actual state of bins configuration is
+ * going to be after this ramrod.
+ */
+ if (!o->total_pending_num)
+ bnx2x_mcast_refresh_registry_e2(bp, o);
+
+ /*
+ * If CLEAR_ONLY was requested - don't send a ramrod and clear
+ * RAMROD_PENDING status immediately.
+ */
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+ raw->clear_pending(raw);
+ return 0;
+ } else {
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ /* Send a ramrod */
+ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
+ raw->cid, U64_HI(raw->rdata_mapping),
+ U64_LO(raw->rdata_mapping),
+ ETH_CONNECTION_TYPE);
+ if (rc)
+ return rc;
+
+ /* Ramrod completion is pending */
+ return 1;
+ }
+}
+
+static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int cmd)
+{
+ /* Mark, that there is a work to do */
+ if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
+ p->mcast_list_len = 1;
+
+ return 0;
+}
+
+static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int old_num_bins)
+{
+ /* Do nothing */
+}
+
+#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
+do { \
+ (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
+} while (0)
+
+static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o,
+ struct bnx2x_mcast_ramrod_params *p,
+ u32 *mc_filter)
+{
+ struct bnx2x_mcast_list_elem *mlist_pos;
+ int bit;
+
+ list_for_each_entry(mlist_pos, &p->mcast_list, link) {
+ bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
+ BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
+
+ DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
+ mlist_pos->mac, bit);
+
+ /* bookkeeping... */
+ BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
+ bit);
+ }
+}
+
+static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
+ u32 *mc_filter)
+{
+ int bit;
+
+ for (bit = bnx2x_mcast_get_next_bin(o, 0);
+ bit >= 0;
+ bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
+ BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
+ DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
+ }
+}
+
+/* On 57711 we write the multicast MACs' aproximate match
+ * table by directly into the TSTORM's internal RAM. So we don't
+ * really need to handle any tricks to make it work.
+ */
+static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int cmd)
+{
+ int i;
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ struct bnx2x_raw_obj *r = &o->raw;
+
+ /* If CLEAR_ONLY has been requested - clear the registry
+ * and clear a pending bit.
+ */
+ if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+ u32 mc_filter[MC_HASH_SIZE] = {0};
+
+ /* Set the multicast filter bits before writing it into
+ * the internal memory.
+ */
+ switch (cmd) {
+ case BNX2X_MCAST_CMD_ADD:
+ bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
+ break;
+
+ case BNX2X_MCAST_CMD_DEL:
+ DP(BNX2X_MSG_SP,
+ "Invalidating multicast MACs configuration\n");
+
+ /* clear the registry */
+ memset(o->registry.aprox_match.vec, 0,
+ sizeof(o->registry.aprox_match.vec));
+ break;
+
+ case BNX2X_MCAST_CMD_RESTORE:
+ bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ /* Set the mcast filter in the internal memory */
+ for (i = 0; i < MC_HASH_SIZE; i++)
+ REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
+ } else
+ /* clear the registry */
+ memset(o->registry.aprox_match.vec, 0,
+ sizeof(o->registry.aprox_match.vec));
+
+ /* We are done */
+ r->clear_pending(r);
+
+ return 0;
+}
+
+static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int cmd)
+{
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ int reg_sz = o->get_registry_size(o);
+
+ switch (cmd) {
+ /* DEL command deletes all currently configured MACs */
+ case BNX2X_MCAST_CMD_DEL:
+ o->set_registry_size(o, 0);
+ /* Don't break */
+
+ /* RESTORE command will restore the entire multicast configuration */
+ case BNX2X_MCAST_CMD_RESTORE:
+ p->mcast_list_len = reg_sz;
+ DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
+ cmd, p->mcast_list_len);
+ break;
+
+ case BNX2X_MCAST_CMD_ADD:
+ case BNX2X_MCAST_CMD_CONT:
+ /* Multicast MACs on 57710 are configured as unicast MACs and
+ * there is only a limited number of CAM entries for that
+ * matter.
+ */
+ if (p->mcast_list_len > o->max_cmd_len) {
+ BNX2X_ERR("Can't configure more than %d multicast MACs"
+ "on 57710\n", o->max_cmd_len);
+ return -EINVAL;
+ }
+ /* Every configured MAC should be cleared if DEL command is
+ * called. Only the last ADD command is relevant as long as
+ * every ADD commands overrides the previous configuration.
+ */
+ DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
+ if (p->mcast_list_len > 0)
+ o->set_registry_size(o, p->mcast_list_len);
+
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd);
+ return -EINVAL;
+
+ }
+
+ /* We want to ensure that commands are executed one by one for 57710.
+ * Therefore each none-empty command will consume o->max_cmd_len.
+ */
+ if (p->mcast_list_len)
+ o->total_pending_num += o->max_cmd_len;
+
+ return 0;
+}
+
+static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int old_num_macs)
+{
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+
+ o->set_registry_size(o, old_num_macs);
+
+ /* If current command hasn't been handled yet and we are
+ * here means that it's meant to be dropped and we have to
+ * update the number of outstandling MACs accordingly.
+ */
+ if (p->mcast_list_len)
+ o->total_pending_num -= o->max_cmd_len;
+}
+
+static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, int idx,
+ union bnx2x_mcast_config_data *cfg_data,
+ int cmd)
+{
+ struct bnx2x_raw_obj *r = &o->raw;
+ struct mac_configuration_cmd *data =
+ (struct mac_configuration_cmd *)(r->rdata);
+
+ /* copy mac */
+ if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
+ bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
+ &data->config_table[idx].middle_mac_addr,
+ &data->config_table[idx].lsb_mac_addr,
+ cfg_data->mac);
+
+ data->config_table[idx].vlan_id = 0;
+ data->config_table[idx].pf_id = r->func_id;
+ data->config_table[idx].clients_bit_vector =
+ cpu_to_le32(1 << r->cl_id);
+
+ SET_FLAG(data->config_table[idx].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_SET);
+ }
+}
+
+/**
+ * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
+ *
+ * @bp: device handle
+ * @p:
+ * @len: number of rules to handle
+ */
+static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ u8 len)
+{
+ struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
+ struct mac_configuration_cmd *data =
+ (struct mac_configuration_cmd *)(r->rdata);
+
+ u8 offset = (CHIP_REV_IS_SLOW(bp) ?
+ BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
+ BNX2X_MAX_MULTICAST*(1 + r->func_id));
+
+ data->hdr.offset = offset;
+ data->hdr.client_id = 0xff;
+ data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
+ (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
+ data->hdr.length = len;
+}
+
+/**
+ * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
+ *
+ * @bp: device handle
+ * @o:
+ * @start_idx: index in the registry to start from
+ * @rdata_idx: index in the ramrod data to start from
+ *
+ * restore command for 57710 is like all other commands - always a stand alone
+ * command - start_idx and rdata_idx will always be 0. This function will always
+ * succeed.
+ * returns -1 to comply with 57712 variant.
+ */
+static inline int bnx2x_mcast_handle_restore_cmd_e1(
+ struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
+ int *rdata_idx)
+{
+ struct bnx2x_mcast_mac_elem *elem;
+ int i = 0;
+ union bnx2x_mcast_config_data cfg_data = {0};
+
+ /* go through the registry and configure the MACs from it. */
+ list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
+ cfg_data.mac = &elem->mac[0];
+ o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
+
+ i++;
+
+ DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
+ cfg_data.mac);
+ }
+
+ *rdata_idx = i;
+
+ return -1;
+}
+
+
+static inline int bnx2x_mcast_handle_pending_cmds_e1(
+ struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
+{
+ struct bnx2x_pending_mcast_cmd *cmd_pos;
+ struct bnx2x_mcast_mac_elem *pmac_pos;
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ union bnx2x_mcast_config_data cfg_data = {0};
+ int cnt = 0;
+
+
+ /* If nothing to be done - return */
+ if (list_empty(&o->pending_cmds_head))
+ return 0;
+
+ /* Handle the first command */
+ cmd_pos = list_first_entry(&o->pending_cmds_head,
+ struct bnx2x_pending_mcast_cmd, link);
+
+ switch (cmd_pos->type) {
+ case BNX2X_MCAST_CMD_ADD:
+ list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
+ cfg_data.mac = &pmac_pos->mac[0];
+ o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
+
+ cnt++;
+
+ DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
+ pmac_pos->mac);
+ }
+ break;
+
+ case BNX2X_MCAST_CMD_DEL:
+ cnt = cmd_pos->data.macs_num;
+ DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
+ break;
+
+ case BNX2X_MCAST_CMD_RESTORE:
+ o->hdl_restore(bp, o, 0, &cnt);
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
+ return -EINVAL;
+ }
+
+ list_del(&cmd_pos->link);
+ kfree(cmd_pos);
+
+ return cnt;
+}
+
+/**
+ * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
+ *
+ * @fw_hi:
+ * @fw_mid:
+ * @fw_lo:
+ * @mac:
+ */
+static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
+ __le16 *fw_lo, u8 *mac)
+{
+ mac[1] = ((u8 *)fw_hi)[0];
+ mac[0] = ((u8 *)fw_hi)[1];
+ mac[3] = ((u8 *)fw_mid)[0];
+ mac[2] = ((u8 *)fw_mid)[1];
+ mac[5] = ((u8 *)fw_lo)[0];
+ mac[4] = ((u8 *)fw_lo)[1];
+}
+
+/**
+ * bnx2x_mcast_refresh_registry_e1 -
+ *
+ * @bp: device handle
+ * @cnt:
+ *
+ * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
+ * and update the registry correspondingly: if ADD - allocate a memory and add
+ * the entries to the registry (list), if DELETE - clear the registry and free
+ * the memory.
+ */
+static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct bnx2x_mcast_mac_elem *elem;
+ struct mac_configuration_cmd *data =
+ (struct mac_configuration_cmd *)(raw->rdata);
+
+ /* If first entry contains a SET bit - the command was ADD,
+ * otherwise - DEL_ALL
+ */
+ if (GET_FLAG(data->config_table[0].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
+ int i, len = data->hdr.length;
+
+ /* Break if it was a RESTORE command */
+ if (!list_empty(&o->registry.exact_match.macs))
+ return 0;
+
+ elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC);
+ if (!elem) {
+ BNX2X_ERR("Failed to allocate registry memory\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < len; i++, elem++) {
+ bnx2x_get_fw_mac_addr(
+ &data->config_table[i].msb_mac_addr,
+ &data->config_table[i].middle_mac_addr,
+ &data->config_table[i].lsb_mac_addr,
+ elem->mac);
+ DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
+ elem->mac);
+ list_add_tail(&elem->link,
+ &o->registry.exact_match.macs);
+ }
+ } else {
+ elem = list_first_entry(&o->registry.exact_match.macs,
+ struct bnx2x_mcast_mac_elem, link);
+ DP(BNX2X_MSG_SP, "Deleting a registry\n");
+ kfree(elem);
+ INIT_LIST_HEAD(&o->registry.exact_match.macs);
+ }
+
+ return 0;
+}
+
+static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int cmd)
+{
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct mac_configuration_cmd *data =
+ (struct mac_configuration_cmd *)(raw->rdata);
+ int cnt = 0, i, rc;
+
+ /* Reset the ramrod data buffer */
+ memset(data, 0, sizeof(*data));
+
+ /* First set all entries as invalid */
+ for (i = 0; i < o->max_cmd_len ; i++)
+ SET_FLAG(data->config_table[i].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_INVALIDATE);
+
+ /* Handle pending commands first */
+ cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
+
+ /* If there are no more pending commands - clear SCHEDULED state */
+ if (list_empty(&o->pending_cmds_head))
+ o->clear_sched(o);
+
+ /* The below may be true iff there were no pending commands */
+ if (!cnt)
+ cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
+
+ /* For 57710 every command has o->max_cmd_len length to ensure that
+ * commands are done one at a time.
+ */
+ o->total_pending_num -= o->max_cmd_len;
+
+ /* send a ramrod */
+
+ WARN_ON(cnt > o->max_cmd_len);
+
+ /* Set ramrod header (in particular, a number of entries to update) */
+ bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
+
+ /* update a registry: we need the registry contents to be always up
+ * to date in order to be able to execute a RESTORE opcode. Here
+ * we use the fact that for 57710 we sent one command at a time
+ * hence we may take the registry update out of the command handling
+ * and do it in a simpler way here.
+ */
+ rc = bnx2x_mcast_refresh_registry_e1(bp, o);
+ if (rc)
+ return rc;
+
+ /*
+ * If CLEAR_ONLY was requested - don't send a ramrod and clear
+ * RAMROD_PENDING status immediately.
+ */
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+ raw->clear_pending(raw);
+ return 0;
+ } else {
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ /* Send a ramrod */
+ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
+ U64_HI(raw->rdata_mapping),
+ U64_LO(raw->rdata_mapping),
+ ETH_CONNECTION_TYPE);
+ if (rc)
+ return rc;
+
+ /* Ramrod completion is pending */
+ return 1;
+ }
+
+}
+
+static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
+{
+ return o->registry.exact_match.num_macs_set;
+}
+
+static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
+{
+ return o->registry.aprox_match.num_bins_set;
+}
+
+static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
+ int n)
+{
+ o->registry.exact_match.num_macs_set = n;
+}
+
+static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
+ int n)
+{
+ o->registry.aprox_match.num_bins_set = n;
+}
+
+int bnx2x_config_mcast(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int cmd)
+{
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ struct bnx2x_raw_obj *r = &o->raw;
+ int rc = 0, old_reg_size;
+
+ /* This is needed to recover number of currently configured mcast macs
+ * in case of failure.
+ */
+ old_reg_size = o->get_registry_size(o);
+
+ /* Do some calculations and checks */
+ rc = o->validate(bp, p, cmd);
+ if (rc)
+ return rc;
+
+ /* Return if there is no work to do */
+ if ((!p->mcast_list_len) && (!o->check_sched(o)))
+ return 0;
+
+ DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d "
+ "o->max_cmd_len=%d\n", o->total_pending_num,
+ p->mcast_list_len, o->max_cmd_len);
+
+ /* Enqueue the current command to the pending list if we can't complete
+ * it in the current iteration
+ */
+ if (r->check_pending(r) ||
+ ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
+ rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
+ if (rc < 0)
+ goto error_exit1;
+
+ /* As long as the current command is in a command list we
+ * don't need to handle it separately.
+ */
+ p->mcast_list_len = 0;
+ }
+
+ if (!r->check_pending(r)) {
+
+ /* Set 'pending' state */
+ r->set_pending(r);
+
+ /* Configure the new classification in the chip */
+ rc = o->config_mcast(bp, p, cmd);
+ if (rc < 0)
+ goto error_exit2;
+
+ /* Wait for a ramrod completion if was requested */
+ if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
+ rc = o->wait_comp(bp, o);
+ }
+
+ return rc;
+
+error_exit2:
+ r->clear_pending(r);
+
+error_exit1:
+ o->revert(bp, p, old_reg_size);
+
+ return rc;
+}
+
+static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
+{
+ smp_mb__before_clear_bit();
+ clear_bit(o->sched_state, o->raw.pstate);
+ smp_mb__after_clear_bit();
+}
+
+static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
+{
+ smp_mb__before_clear_bit();
+ set_bit(o->sched_state, o->raw.pstate);
+ smp_mb__after_clear_bit();
+}
+
+static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
+{
+ return !!test_bit(o->sched_state, o->raw.pstate);
+}
+
+static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
+{
+ return o->raw.check_pending(&o->raw) || o->check_sched(o);
+}
+
+void bnx2x_init_mcast_obj(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *mcast_obj,
+ u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
+ u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
+ int state, unsigned long *pstate, bnx2x_obj_type type)
+{
+ memset(mcast_obj, 0, sizeof(*mcast_obj));
+
+ bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
+ rdata, rdata_mapping, state, pstate, type);
+
+ mcast_obj->engine_id = engine_id;
+
+ INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
+
+ mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
+ mcast_obj->check_sched = bnx2x_mcast_check_sched;
+ mcast_obj->set_sched = bnx2x_mcast_set_sched;
+ mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
+
+ if (CHIP_IS_E1(bp)) {
+ mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
+ mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
+ mcast_obj->hdl_restore =
+ bnx2x_mcast_handle_restore_cmd_e1;
+ mcast_obj->check_pending = bnx2x_mcast_check_pending;
+
+ if (CHIP_REV_IS_SLOW(bp))
+ mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
+ else
+ mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
+
+ mcast_obj->wait_comp = bnx2x_mcast_wait;
+ mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
+ mcast_obj->validate = bnx2x_mcast_validate_e1;
+ mcast_obj->revert = bnx2x_mcast_revert_e1;
+ mcast_obj->get_registry_size =
+ bnx2x_mcast_get_registry_size_exact;
+ mcast_obj->set_registry_size =
+ bnx2x_mcast_set_registry_size_exact;
+
+ /* 57710 is the only chip that uses the exact match for mcast
+ * at the moment.
+ */
+ INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
+
+ } else if (CHIP_IS_E1H(bp)) {
+ mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
+ mcast_obj->enqueue_cmd = NULL;
+ mcast_obj->hdl_restore = NULL;
+ mcast_obj->check_pending = bnx2x_mcast_check_pending;
+
+ /* 57711 doesn't send a ramrod, so it has unlimited credit
+ * for one command.
+ */
+ mcast_obj->max_cmd_len = -1;
+ mcast_obj->wait_comp = bnx2x_mcast_wait;
+ mcast_obj->set_one_rule = NULL;
+ mcast_obj->validate = bnx2x_mcast_validate_e1h;
+ mcast_obj->revert = bnx2x_mcast_revert_e1h;
+ mcast_obj->get_registry_size =
+ bnx2x_mcast_get_registry_size_aprox;
+ mcast_obj->set_registry_size =
+ bnx2x_mcast_set_registry_size_aprox;
+ } else {
+ mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
+ mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
+ mcast_obj->hdl_restore =
+ bnx2x_mcast_handle_restore_cmd_e2;
+ mcast_obj->check_pending = bnx2x_mcast_check_pending;
+ /* TODO: There should be a proper HSI define for this number!!!
+ */
+ mcast_obj->max_cmd_len = 16;
+ mcast_obj->wait_comp = bnx2x_mcast_wait;
+ mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
+ mcast_obj->validate = bnx2x_mcast_validate_e2;
+ mcast_obj->revert = bnx2x_mcast_revert_e2;
+ mcast_obj->get_registry_size =
+ bnx2x_mcast_get_registry_size_aprox;
+ mcast_obj->set_registry_size =
+ bnx2x_mcast_set_registry_size_aprox;
+ }
+}
+
+/*************************** Credit handling **********************************/
+
+/**
+ * atomic_add_ifless - add if the result is less than a given value.
+ *
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...if (v + a) is less than u.
+ *
+ * returns true if (v + a) was less than u, and false otherwise.
+ *
+ */
+static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
+{
+ int c, old;
+
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c + a >= u))
+ return false;
+
+ old = atomic_cmpxchg((v), c, c + a);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+
+ return true;
+}
+
+/**
+ * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
+ *
+ * @v: pointer of type atomic_t
+ * @a: the amount to dec from v...
+ * @u: ...if (v - a) is more or equal than u.
+ *
+ * returns true if (v - a) was more or equal than u, and false
+ * otherwise.
+ */
+static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
+{
+ int c, old;
+
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c - a < u))
+ return false;
+
+ old = atomic_cmpxchg((v), c, c - a);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+
+ return true;
+}
+
+static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
+{
+ bool rc;
+
+ smp_mb();
+ rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
+ smp_mb();
+
+ return rc;
+}
+
+static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
+{
+ bool rc;
+
+ smp_mb();
+
+ /* Don't let to refill if credit + cnt > pool_sz */
+ rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
+
+ smp_mb();
+
+ return rc;
+}
+
+static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
+{
+ int cur_credit;
+
+ smp_mb();
+ cur_credit = atomic_read(&o->credit);
+
+ return cur_credit;
+}
+
+static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
+ int cnt)
+{
+ return true;
+}
+
+
+static bool bnx2x_credit_pool_get_entry(
+ struct bnx2x_credit_pool_obj *o,
+ int *offset)
+{
+ int idx, vec, i;
+
+ *offset = -1;
+
+ /* Find "internal cam-offset" then add to base for this object... */
+ for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
+
+ /* Skip the current vector if there are no free entries in it */
+ if (!o->pool_mirror[vec])
+ continue;
+
+ /* If we've got here we are going to find a free entry */
+ for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0;
+ i < BIT_VEC64_ELEM_SZ; idx++, i++)
+
+ if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
+ /* Got one!! */
+ BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
+ *offset = o->base_pool_offset + idx;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool bnx2x_credit_pool_put_entry(
+ struct bnx2x_credit_pool_obj *o,
+ int offset)
+{
+ if (offset < o->base_pool_offset)
+ return false;
+
+ offset -= o->base_pool_offset;
+
+ if (offset >= o->pool_sz)
+ return false;
+
+ /* Return the entry to the pool */
+ BIT_VEC64_SET_BIT(o->pool_mirror, offset);
+
+ return true;
+}
+
+static bool bnx2x_credit_pool_put_entry_always_true(
+ struct bnx2x_credit_pool_obj *o,
+ int offset)
+{
+ return true;
+}
+
+static bool bnx2x_credit_pool_get_entry_always_true(
+ struct bnx2x_credit_pool_obj *o,
+ int *offset)
+{
+ *offset = -1;
+ return true;
+}
+/**
+ * bnx2x_init_credit_pool - initialize credit pool internals.
+ *
+ * @p:
+ * @base: Base entry in the CAM to use.
+ * @credit: pool size.
+ *
+ * If base is negative no CAM entries handling will be performed.
+ * If credit is negative pool operations will always succeed (unlimited pool).
+ *
+ */
+static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+ int base, int credit)
+{
+ /* Zero the object first */
+ memset(p, 0, sizeof(*p));
+
+ /* Set the table to all 1s */
+ memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
+
+ /* Init a pool as full */
+ atomic_set(&p->credit, credit);
+
+ /* The total poll size */
+ p->pool_sz = credit;
+
+ p->base_pool_offset = base;
+
+ /* Commit the change */
+ smp_mb();
+
+ p->check = bnx2x_credit_pool_check;
+
+ /* if pool credit is negative - disable the checks */
+ if (credit >= 0) {
+ p->put = bnx2x_credit_pool_put;
+ p->get = bnx2x_credit_pool_get;
+ p->put_entry = bnx2x_credit_pool_put_entry;
+ p->get_entry = bnx2x_credit_pool_get_entry;
+ } else {
+ p->put = bnx2x_credit_pool_always_true;
+ p->get = bnx2x_credit_pool_always_true;
+ p->put_entry = bnx2x_credit_pool_put_entry_always_true;
+ p->get_entry = bnx2x_credit_pool_get_entry_always_true;
+ }
+
+ /* If base is negative - disable entries handling */
+ if (base < 0) {
+ p->put_entry = bnx2x_credit_pool_put_entry_always_true;
+ p->get_entry = bnx2x_credit_pool_get_entry_always_true;
+ }
+}
+
+void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
+ struct bnx2x_credit_pool_obj *p, u8 func_id,
+ u8 func_num)
+{
+/* TODO: this will be defined in consts as well... */
+#define BNX2X_CAM_SIZE_EMUL 5
+
+ int cam_sz;
+
+ if (CHIP_IS_E1(bp)) {
+ /* In E1, Multicast is saved in cam... */
+ if (!CHIP_REV_IS_SLOW(bp))
+ cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
+ else
+ cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
+
+ bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
+
+ } else if (CHIP_IS_E1H(bp)) {
+ /* CAM credit is equaly divided between all active functions
+ * on the PORT!.
+ */
+ if ((func_num > 0)) {
+ if (!CHIP_REV_IS_SLOW(bp))
+ cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
+ else
+ cam_sz = BNX2X_CAM_SIZE_EMUL;
+ bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
+ } else {
+ /* this should never happen! Block MAC operations. */
+ bnx2x_init_credit_pool(p, 0, 0);
+ }
+
+ } else {
+
+ /*
+ * CAM credit is equaly divided between all active functions
+ * on the PATH.
+ */
+ if ((func_num > 0)) {
+ if (!CHIP_REV_IS_SLOW(bp))
+ cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
+ else
+ cam_sz = BNX2X_CAM_SIZE_EMUL;
+
+ /*
+ * No need for CAM entries handling for 57712 and
+ * newer.
+ */
+ bnx2x_init_credit_pool(p, -1, cam_sz);
+ } else {
+ /* this should never happen! Block MAC operations. */
+ bnx2x_init_credit_pool(p, 0, 0);
+ }
+
+ }
+}
+
+void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
+ struct bnx2x_credit_pool_obj *p,
+ u8 func_id,
+ u8 func_num)
+{
+ if (CHIP_IS_E1x(bp)) {
+ /*
+ * There is no VLAN credit in HW on 57710 and 57711 only
+ * MAC / MAC-VLAN can be set
+ */
+ bnx2x_init_credit_pool(p, 0, -1);
+ } else {
+ /*
+ * CAM credit is equaly divided between all active functions
+ * on the PATH.
+ */
+ if (func_num > 0) {
+ int credit = MAX_VLAN_CREDIT_E2 / func_num;
+ bnx2x_init_credit_pool(p, func_id * credit, credit);
+ } else
+ /* this should never happen! Block VLAN operations. */
+ bnx2x_init_credit_pool(p, 0, 0);
+ }
+}
+
+/****************** RSS Configuration ******************/
+/**
+ * bnx2x_debug_print_ind_table - prints the indirection table configuration.
+ *
+ * @bp: driver hanlde
+ * @p: pointer to rss configuration
+ *
+ * Prints it when NETIF_MSG_IFUP debug level is configured.
+ */
+static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *p)
+{
+ int i;
+
+ DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
+ DP(BNX2X_MSG_SP, "0x0000: ");
+ for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
+ DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
+
+ /* Print 4 bytes in a line */
+ if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
+ (((i + 1) & 0x3) == 0)) {
+ DP_CONT(BNX2X_MSG_SP, "\n");
+ DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
+ }
+ }
+
+ DP_CONT(BNX2X_MSG_SP, "\n");
+}
+
+/**
+ * bnx2x_setup_rss - configure RSS
+ *
+ * @bp: device handle
+ * @p: rss configuration
+ *
+ * sends on UPDATE ramrod for that matter.
+ */
+static int bnx2x_setup_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *p)
+{
+ struct bnx2x_rss_config_obj *o = p->rss_obj;
+ struct bnx2x_raw_obj *r = &o->raw;
+ struct eth_rss_update_ramrod_data *data =
+ (struct eth_rss_update_ramrod_data *)(r->rdata);
+ u8 rss_mode = 0;
+ int rc;
+
+ memset(data, 0, sizeof(*data));
+
+ DP(BNX2X_MSG_SP, "Configuring RSS\n");
+
+ /* Set an echo field */
+ data->echo = (r->cid & BNX2X_SWCID_MASK) |
+ (r->state << BNX2X_SWCID_SHIFT);
+
+ /* RSS mode */
+ if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
+ rss_mode = ETH_RSS_MODE_DISABLED;
+ else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
+ rss_mode = ETH_RSS_MODE_REGULAR;
+ else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
+ rss_mode = ETH_RSS_MODE_VLAN_PRI;
+ else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
+ rss_mode = ETH_RSS_MODE_E1HOV_PRI;
+ else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
+ rss_mode = ETH_RSS_MODE_IP_DSCP;
+
+ data->rss_mode = rss_mode;
+
+ DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
+
+ /* RSS capabilities */
+ if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
+
+ if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
+
+ if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
+
+ if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
+
+ /* Hashing mask */
+ data->rss_result_mask = p->rss_result_mask;
+
+ /* RSS engine ID */
+ data->rss_engine_id = o->engine_id;
+
+ DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
+
+ /* Indirection table */
+ memcpy(data->indirection_table, p->ind_table,
+ T_ETH_INDIRECTION_TABLE_SIZE);
+
+ /* Remember the last configuration */
+ memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
+
+ /* Print the indirection table */
+ if (netif_msg_ifup(bp))
+ bnx2x_debug_print_ind_table(bp, p);
+
+ /* RSS keys */
+ if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
+ memcpy(&data->rss_key[0], &p->rss_key[0],
+ sizeof(data->rss_key));
+ data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
+ }
+
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ /* Send a ramrod */
+ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
+ U64_HI(r->rdata_mapping),
+ U64_LO(r->rdata_mapping),
+ ETH_CONNECTION_TYPE);
+
+ if (rc < 0)
+ return rc;
+
+ return 1;
+}
+
+void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
+ u8 *ind_table)
+{
+ memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
+}
+
+int bnx2x_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *p)
+{
+ int rc;
+ struct bnx2x_rss_config_obj *o = p->rss_obj;
+ struct bnx2x_raw_obj *r = &o->raw;
+
+ /* Do nothing if only driver cleanup was requested */
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
+ return 0;
+
+ r->set_pending(r);
+
+ rc = o->config_rss(bp, p);
+ if (rc < 0) {
+ r->clear_pending(r);
+ return rc;
+ }
+
+ if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
+ rc = r->wait_comp(bp, r);
+
+ return rc;
+}
+
+
+void bnx2x_init_rss_config_obj(struct bnx2x *bp,
+ struct bnx2x_rss_config_obj *rss_obj,
+ u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
+ void *rdata, dma_addr_t rdata_mapping,
+ int state, unsigned long *pstate,
+ bnx2x_obj_type type)
+{
+ bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
+ rdata_mapping, state, pstate, type);
+
+ rss_obj->engine_id = engine_id;
+ rss_obj->config_rss = bnx2x_setup_rss;
+}
+
+/********************** Queue state object ***********************************/
+
+/**
+ * bnx2x_queue_state_change - perform Queue state change transition
+ *
+ * @bp: device handle
+ * @params: parameters to perform the transition
+ *
+ * returns 0 in case of successfully completed transition, negative error
+ * code in case of failure, positive (EBUSY) value if there is a completion
+ * to that is still pending (possible only if RAMROD_COMP_WAIT is
+ * not set in params->ramrod_flags for asynchronous commands).
+ *
+ */
+int bnx2x_queue_state_change(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ int rc, pending_bit;
+ unsigned long *pending = &o->pending;
+
+ /* Check that the requested transition is legal */
+ if (o->check_transition(bp, o, params))
+ return -EINVAL;
+
+ /* Set "pending" bit */
+ pending_bit = o->set_pending(o, params);
+
+ /* Don't send a command if only driver cleanup was requested */
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
+ o->complete_cmd(bp, o, pending_bit);
+ else {
+ /* Send a ramrod */
+ rc = o->send_cmd(bp, params);
+ if (rc) {
+ o->next_state = BNX2X_Q_STATE_MAX;
+ clear_bit(pending_bit, pending);
+ smp_mb__after_clear_bit();
+ return rc;
+ }
+
+ if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
+ rc = o->wait_comp(bp, o, pending_bit);
+ if (rc)
+ return rc;
+
+ return 0;
+ }
+ }
+
+ return !!test_bit(pending_bit, pending);
+}
+
+
+static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
+ struct bnx2x_queue_state_params *params)
+{
+ enum bnx2x_queue_cmd cmd = params->cmd, bit;
+
+ /* ACTIVATE and DEACTIVATE commands are implemented on top of
+ * UPDATE command.
+ */
+ if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
+ (cmd == BNX2X_Q_CMD_DEACTIVATE))
+ bit = BNX2X_Q_CMD_UPDATE;
+ else
+ bit = cmd;
+
+ set_bit(bit, &obj->pending);
+ return bit;
+}
+
+static int bnx2x_queue_wait_comp(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *o,
+ enum bnx2x_queue_cmd cmd)
+{
+ return bnx2x_state_wait(bp, cmd, &o->pending);
+}
+
+/**
+ * bnx2x_queue_comp_cmd - complete the state change command.
+ *
+ * @bp: device handle
+ * @o:
+ * @cmd:
+ *
+ * Checks that the arrived completion is expected.
+ */
+static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *o,
+ enum bnx2x_queue_cmd cmd)
+{
+ unsigned long cur_pending = o->pending;
+
+ if (!test_and_clear_bit(cmd, &cur_pending)) {
+ BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
+ "pending 0x%lx, next_state %d\n", cmd,
+ o->cids[BNX2X_PRIMARY_CID_INDEX],
+ o->state, cur_pending, o->next_state);
+ return -EINVAL;
+ }
+
+ if (o->next_tx_only >= o->max_cos)
+ /* >= becuase tx only must always be smaller than cos since the
+ * primary connection suports COS 0
+ */
+ BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
+ o->next_tx_only, o->max_cos);
+
+ DP(BNX2X_MSG_SP, "Completing command %d for queue %d, "
+ "setting state to %d\n", cmd,
+ o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
+
+ if (o->next_tx_only) /* print num tx-only if any exist */
+ DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
+ o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
+
+ o->state = o->next_state;
+ o->num_tx_only = o->next_tx_only;
+ o->next_state = BNX2X_Q_STATE_MAX;
+
+ /* It's important that o->state and o->next_state are
+ * updated before o->pending.
+ */
+ wmb();
+
+ clear_bit(cmd, &o->pending);
+ smp_mb__after_clear_bit();
+
+ return 0;
+}
+
+static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *cmd_params,
+ struct client_init_ramrod_data *data)
+{
+ struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
+
+ /* Rx data */
+
+ /* IPv6 TPA supported for E2 and above only */
+ data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
+ CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
+}
+
+static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *o,
+ struct bnx2x_general_setup_params *params,
+ struct client_init_general_data *gen_data,
+ unsigned long *flags)
+{
+ gen_data->client_id = o->cl_id;
+
+ if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
+ gen_data->statistics_counter_id =
+ params->stat_id;
+ gen_data->statistics_en_flg = 1;
+ gen_data->statistics_zero_flg =
+ test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
+ } else
+ gen_data->statistics_counter_id =
+ DISABLE_STATISTIC_COUNTER_ID_VALUE;
+
+ gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
+ gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
+ gen_data->sp_client_id = params->spcl_id;
+ gen_data->mtu = cpu_to_le16(params->mtu);
+ gen_data->func_id = o->func_id;
+
+
+ gen_data->cos = params->cos;
+
+ gen_data->traffic_type =
+ test_bit(BNX2X_Q_FLG_FCOE, flags) ?
+ LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
+
+ DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
+ gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
+}
+
+static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
+ struct bnx2x_txq_setup_params *params,
+ struct client_init_tx_data *tx_data,
+ unsigned long *flags)
+{
+ tx_data->enforce_security_flg =
+ test_bit(BNX2X_Q_FLG_TX_SEC, flags);
+ tx_data->default_vlan =
+ cpu_to_le16(params->default_vlan);
+ tx_data->default_vlan_flg =
+ test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
+ tx_data->tx_switching_flg =
+ test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
+ tx_data->anti_spoofing_flg =
+ test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
+ tx_data->tx_status_block_id = params->fw_sb_id;
+ tx_data->tx_sb_index_number = params->sb_cq_index;
+ tx_data->tss_leading_client_id = params->tss_leading_cl_id;
+
+ tx_data->tx_bd_page_base.lo =
+ cpu_to_le32(U64_LO(params->dscr_map));
+ tx_data->tx_bd_page_base.hi =
+ cpu_to_le32(U64_HI(params->dscr_map));
+
+ /* Don't configure any Tx switching mode during queue SETUP */
+ tx_data->state = 0;
+}
+
+static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
+ struct rxq_pause_params *params,
+ struct client_init_rx_data *rx_data)
+{
+ /* flow control data */
+ rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
+ rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
+ rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
+ rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
+ rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
+ rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
+ rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
+}
+
+static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
+ struct bnx2x_rxq_setup_params *params,
+ struct client_init_rx_data *rx_data,
+ unsigned long *flags)
+{
+ /* Rx data */
+ rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
+ CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
+ rx_data->vmqueue_mode_en_flg = 0;
+
+ rx_data->cache_line_alignment_log_size =
+ params->cache_line_log;
+ rx_data->enable_dynamic_hc =
+ test_bit(BNX2X_Q_FLG_DHC, flags);
+ rx_data->max_sges_for_packet = params->max_sges_pkt;
+ rx_data->client_qzone_id = params->cl_qzone_id;
+ rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
+
+ /* Always start in DROP_ALL mode */
+ rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
+ CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
+
+ /* We don't set drop flags */
+ rx_data->drop_ip_cs_err_flg = 0;
+ rx_data->drop_tcp_cs_err_flg = 0;
+ rx_data->drop_ttl0_flg = 0;
+ rx_data->drop_udp_cs_err_flg = 0;
+ rx_data->inner_vlan_removal_enable_flg =
+ test_bit(BNX2X_Q_FLG_VLAN, flags);
+ rx_data->outer_vlan_removal_enable_flg =
+ test_bit(BNX2X_Q_FLG_OV, flags);
+ rx_data->status_block_id = params->fw_sb_id;
+ rx_data->rx_sb_index_number = params->sb_cq_index;
+ rx_data->max_tpa_queues = params->max_tpa_queues;
+ rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
+ rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
+ rx_data->bd_page_base.lo =
+ cpu_to_le32(U64_LO(params->dscr_map));
+ rx_data->bd_page_base.hi =
+ cpu_to_le32(U64_HI(params->dscr_map));
+ rx_data->sge_page_base.lo =
+ cpu_to_le32(U64_LO(params->sge_map));
+ rx_data->sge_page_base.hi =
+ cpu_to_le32(U64_HI(params->sge_map));
+ rx_data->cqe_page_base.lo =
+ cpu_to_le32(U64_LO(params->rcq_map));
+ rx_data->cqe_page_base.hi =
+ cpu_to_le32(U64_HI(params->rcq_map));
+ rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
+
+ if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
+ rx_data->approx_mcast_engine_id = o->func_id;
+ rx_data->is_approx_mcast = 1;
+ }
+
+ rx_data->rss_engine_id = params->rss_engine_id;
+
+ /* silent vlan removal */
+ rx_data->silent_vlan_removal_flg =
+ test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
+ rx_data->silent_vlan_value =
+ cpu_to_le16(params->silent_removal_value);
+ rx_data->silent_vlan_mask =
+ cpu_to_le16(params->silent_removal_mask);
+
+}
+
+/* initialize the general, tx and rx parts of a queue object */
+static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *cmd_params,
+ struct client_init_ramrod_data *data)
+{
+ bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
+ &cmd_params->params.setup.gen_params,
+ &data->general,
+ &cmd_params->params.setup.flags);
+
+ bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
+ &cmd_params->params.setup.txq_params,
+ &data->tx,
+ &cmd_params->params.setup.flags);
+
+ bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
+ &cmd_params->params.setup.rxq_params,
+ &data->rx,
+ &cmd_params->params.setup.flags);
+
+ bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
+ &cmd_params->params.setup.pause_params,
+ &data->rx);
+}
+
+/* initialize the general and tx parts of a tx-only queue object */
+static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *cmd_params,
+ struct tx_queue_init_ramrod_data *data)
+{
+ bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
+ &cmd_params->params.tx_only.gen_params,
+ &data->general,
+ &cmd_params->params.tx_only.flags);
+
+ bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
+ &cmd_params->params.tx_only.txq_params,
+ &data->tx,
+ &cmd_params->params.tx_only.flags);
+
+ DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x\n",cmd_params->q_obj->cids[0],
+ data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
+}
+
+/**
+ * bnx2x_q_init - init HW/FW queue
+ *
+ * @bp: device handle
+ * @params:
+ *
+ * HW/FW initial Queue configuration:
+ * - HC: Rx and Tx
+ * - CDU context validation
+ *
+ */
+static inline int bnx2x_q_init(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct bnx2x_queue_init_params *init = &params->params.init;
+ u16 hc_usec;
+ u8 cos;
+
+ /* Tx HC configuration */
+ if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
+ test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
+ hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
+
+ bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
+ init->tx.sb_cq_index,
+ !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
+ hc_usec);
+ }
+
+ /* Rx HC configuration */
+ if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
+ test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
+ hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
+
+ bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
+ init->rx.sb_cq_index,
+ !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
+ hc_usec);
+ }
+
+ /* Set CDU context validation values */
+ for (cos = 0; cos < o->max_cos; cos++) {
+ DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
+ o->cids[cos], cos);
+ DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
+ bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
+ }
+
+ /* As no ramrod is sent, complete the command immediately */
+ o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
+
+ mmiowb();
+ smp_mb();
+
+ return 0;
+}
+
+static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct client_init_ramrod_data *rdata =
+ (struct client_init_ramrod_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
+
+ /* Clear the ramrod data */
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
+
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct client_init_ramrod_data *rdata =
+ (struct client_init_ramrod_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
+
+ /* Clear the ramrod data */
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
+ bnx2x_q_fill_setup_data_e2(bp, params, rdata);
+
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct tx_queue_init_ramrod_data *rdata =
+ (struct tx_queue_init_ramrod_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
+ struct bnx2x_queue_setup_tx_only_params *tx_only_params =
+ &params->params.tx_only;
+ u8 cid_index = tx_only_params->cid_index;
+
+
+ if (cid_index >= o->max_cos) {
+ BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
+ o->cl_id, cid_index);
+ return -EINVAL;
+ }
+
+ DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
+ tx_only_params->gen_params.cos,
+ tx_only_params->gen_params.spcl_id);
+
+ /* Clear the ramrod data */
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ bnx2x_q_fill_setup_tx_only(bp, params, rdata);
+
+ DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d,"
+ "sp-client id %d, cos %d\n",
+ o->cids[cid_index],
+ rdata->general.client_id,
+ rdata->general.sp_client_id, rdata->general.cos);
+
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+static void bnx2x_q_fill_update_data(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj,
+ struct bnx2x_queue_update_params *params,
+ struct client_update_ramrod_data *data)
+{
+ /* Client ID of the client to update */
+ data->client_id = obj->cl_id;
+
+ /* Function ID of the client to update */
+ data->func_id = obj->func_id;
+
+ /* Default VLAN value */
+ data->default_vlan = cpu_to_le16(params->def_vlan);
+
+ /* Inner VLAN stripping */
+ data->inner_vlan_removal_enable_flg =
+ test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
+ data->inner_vlan_removal_change_flg =
+ test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
+ &params->update_flags);
+
+ /* Outer VLAN sripping */
+ data->outer_vlan_removal_enable_flg =
+ test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
+ data->outer_vlan_removal_change_flg =
+ test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
+ &params->update_flags);
+
+ /* Drop packets that have source MAC that doesn't belong to this
+ * Queue.
+ */
+ data->anti_spoofing_enable_flg =
+ test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
+ data->anti_spoofing_change_flg =
+ test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
+
+ /* Activate/Deactivate */
+ data->activate_flg =
+ test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
+ data->activate_change_flg =
+ test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
+
+ /* Enable default VLAN */
+ data->default_vlan_enable_flg =
+ test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
+ data->default_vlan_change_flg =
+ test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ &params->update_flags);
+
+ /* silent vlan removal */
+ data->silent_vlan_change_flg =
+ test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ &params->update_flags);
+ data->silent_vlan_removal_flg =
+ test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
+ data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
+ data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
+}
+
+static inline int bnx2x_q_send_update(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct client_update_ramrod_data *rdata =
+ (struct client_update_ramrod_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ struct bnx2x_queue_update_params *update_params =
+ &params->params.update;
+ u8 cid_index = update_params->cid_index;
+
+ if (cid_index >= o->max_cos) {
+ BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
+ o->cl_id, cid_index);
+ return -EINVAL;
+ }
+
+
+ /* Clear the ramrod data */
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ bnx2x_q_fill_update_data(bp, o, update_params, rdata);
+
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
+ o->cids[cid_index], U64_HI(data_mapping),
+ U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+/**
+ * bnx2x_q_send_deactivate - send DEACTIVATE command
+ *
+ * @bp: device handle
+ * @params:
+ *
+ * implemented using the UPDATE command.
+ */
+static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_update_params *update = &params->params.update;
+
+ memset(update, 0, sizeof(*update));
+
+ __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
+
+ return bnx2x_q_send_update(bp, params);
+}
+
+/**
+ * bnx2x_q_send_activate - send ACTIVATE command
+ *
+ * @bp: device handle
+ * @params:
+ *
+ * implemented using the UPDATE command.
+ */
+static inline int bnx2x_q_send_activate(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_update_params *update = &params->params.update;
+
+ memset(update, 0, sizeof(*update));
+
+ __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
+
+ return bnx2x_q_send_update(bp, params);
+}
+
+static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ /* TODO: Not implemented yet. */
+ return -1;
+}
+
+static inline int bnx2x_q_send_halt(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
+ o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
+ ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ u8 cid_idx = params->params.cfc_del.cid_index;
+
+ if (cid_idx >= o->max_cos) {
+ BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
+ o->cl_id, cid_idx);
+ return -EINVAL;
+ }
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
+ o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ u8 cid_index = params->params.terminate.cid_index;
+
+ if (cid_index >= o->max_cos) {
+ BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
+ o->cl_id, cid_index);
+ return -EINVAL;
+ }
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
+ o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_empty(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
+ o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
+ ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ switch (params->cmd) {
+ case BNX2X_Q_CMD_INIT:
+ return bnx2x_q_init(bp, params);
+ case BNX2X_Q_CMD_SETUP_TX_ONLY:
+ return bnx2x_q_send_setup_tx_only(bp, params);
+ case BNX2X_Q_CMD_DEACTIVATE:
+ return bnx2x_q_send_deactivate(bp, params);
+ case BNX2X_Q_CMD_ACTIVATE:
+ return bnx2x_q_send_activate(bp, params);
+ case BNX2X_Q_CMD_UPDATE:
+ return bnx2x_q_send_update(bp, params);
+ case BNX2X_Q_CMD_UPDATE_TPA:
+ return bnx2x_q_send_update_tpa(bp, params);
+ case BNX2X_Q_CMD_HALT:
+ return bnx2x_q_send_halt(bp, params);
+ case BNX2X_Q_CMD_CFC_DEL:
+ return bnx2x_q_send_cfc_del(bp, params);
+ case BNX2X_Q_CMD_TERMINATE:
+ return bnx2x_q_send_terminate(bp, params);
+ case BNX2X_Q_CMD_EMPTY:
+ return bnx2x_q_send_empty(bp, params);
+ default:
+ BNX2X_ERR("Unknown command: %d\n", params->cmd);
+ return -EINVAL;
+ }
+}
+
+static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ switch (params->cmd) {
+ case BNX2X_Q_CMD_SETUP:
+ return bnx2x_q_send_setup_e1x(bp, params);
+ case BNX2X_Q_CMD_INIT:
+ case BNX2X_Q_CMD_SETUP_TX_ONLY:
+ case BNX2X_Q_CMD_DEACTIVATE:
+ case BNX2X_Q_CMD_ACTIVATE:
+ case BNX2X_Q_CMD_UPDATE:
+ case BNX2X_Q_CMD_UPDATE_TPA:
+ case BNX2X_Q_CMD_HALT:
+ case BNX2X_Q_CMD_CFC_DEL:
+ case BNX2X_Q_CMD_TERMINATE:
+ case BNX2X_Q_CMD_EMPTY:
+ return bnx2x_queue_send_cmd_cmn(bp, params);
+ default:
+ BNX2X_ERR("Unknown command: %d\n", params->cmd);
+ return -EINVAL;
+ }
+}
+
+static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ switch (params->cmd) {
+ case BNX2X_Q_CMD_SETUP:
+ return bnx2x_q_send_setup_e2(bp, params);
+ case BNX2X_Q_CMD_INIT:
+ case BNX2X_Q_CMD_SETUP_TX_ONLY:
+ case BNX2X_Q_CMD_DEACTIVATE:
+ case BNX2X_Q_CMD_ACTIVATE:
+ case BNX2X_Q_CMD_UPDATE:
+ case BNX2X_Q_CMD_UPDATE_TPA:
+ case BNX2X_Q_CMD_HALT:
+ case BNX2X_Q_CMD_CFC_DEL:
+ case BNX2X_Q_CMD_TERMINATE:
+ case BNX2X_Q_CMD_EMPTY:
+ return bnx2x_queue_send_cmd_cmn(bp, params);
+ default:
+ BNX2X_ERR("Unknown command: %d\n", params->cmd);
+ return -EINVAL;
+ }
+}
+
+/**
+ * bnx2x_queue_chk_transition - check state machine of a regular Queue
+ *
+ * @bp: device handle
+ * @o:
+ * @params:
+ *
+ * (not Forwarding)
+ * It both checks if the requested command is legal in a current
+ * state and, if it's legal, sets a `next_state' in the object
+ * that will be used in the completion flow to set the `state'
+ * of the object.
+ *
+ * returns 0 if a requested command is a legal transition,
+ * -EINVAL otherwise.
+ */
+static int bnx2x_queue_chk_transition(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *o,
+ struct bnx2x_queue_state_params *params)
+{
+ enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
+ enum bnx2x_queue_cmd cmd = params->cmd;
+ struct bnx2x_queue_update_params *update_params =
+ &params->params.update;
+ u8 next_tx_only = o->num_tx_only;
+
+ /*
+ * Forget all pending for completion commands if a driver only state
+ * transition has been requested.
+ */
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
+ o->pending = 0;
+ o->next_state = BNX2X_Q_STATE_MAX;
+ }
+
+ /*
+ * Don't allow a next state transition if we are in the middle of
+ * the previous one.
+ */
+ if (o->pending)
+ return -EBUSY;
+
+ switch (state) {
+ case BNX2X_Q_STATE_RESET:
+ if (cmd == BNX2X_Q_CMD_INIT)
+ next_state = BNX2X_Q_STATE_INITIALIZED;
+
+ break;
+ case BNX2X_Q_STATE_INITIALIZED:
+ if (cmd == BNX2X_Q_CMD_SETUP) {
+ if (test_bit(BNX2X_Q_FLG_ACTIVE,
+ &params->params.setup.flags))
+ next_state = BNX2X_Q_STATE_ACTIVE;
+ else
+ next_state = BNX2X_Q_STATE_INACTIVE;
+ }
+
+ break;
+ case BNX2X_Q_STATE_ACTIVE:
+ if (cmd == BNX2X_Q_CMD_DEACTIVATE)
+ next_state = BNX2X_Q_STATE_INACTIVE;
+
+ else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
+ (cmd == BNX2X_Q_CMD_UPDATE_TPA))
+ next_state = BNX2X_Q_STATE_ACTIVE;
+
+ else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
+ next_state = BNX2X_Q_STATE_MULTI_COS;
+ next_tx_only = 1;
+ }
+
+ else if (cmd == BNX2X_Q_CMD_HALT)
+ next_state = BNX2X_Q_STATE_STOPPED;
+
+ else if (cmd == BNX2X_Q_CMD_UPDATE) {
+ /* If "active" state change is requested, update the
+ * state accordingly.
+ */
+ if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+ &update_params->update_flags) &&
+ !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
+ &update_params->update_flags))
+ next_state = BNX2X_Q_STATE_INACTIVE;
+ else
+ next_state = BNX2X_Q_STATE_ACTIVE;
+ }
+
+ break;
+ case BNX2X_Q_STATE_MULTI_COS:
+ if (cmd == BNX2X_Q_CMD_TERMINATE)
+ next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
+
+ else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
+ next_state = BNX2X_Q_STATE_MULTI_COS;
+ next_tx_only = o->num_tx_only + 1;
+ }
+
+ else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
+ (cmd == BNX2X_Q_CMD_UPDATE_TPA))
+ next_state = BNX2X_Q_STATE_MULTI_COS;
+
+ else if (cmd == BNX2X_Q_CMD_UPDATE) {
+ /* If "active" state change is requested, update the
+ * state accordingly.
+ */
+ if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+ &update_params->update_flags) &&
+ !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
+ &update_params->update_flags))
+ next_state = BNX2X_Q_STATE_INACTIVE;
+ else
+ next_state = BNX2X_Q_STATE_MULTI_COS;
+ }
+
+ break;
+ case BNX2X_Q_STATE_MCOS_TERMINATED:
+ if (cmd == BNX2X_Q_CMD_CFC_DEL) {
+ next_tx_only = o->num_tx_only - 1;
+ if (next_tx_only == 0)
+ next_state = BNX2X_Q_STATE_ACTIVE;
+ else
+ next_state = BNX2X_Q_STATE_MULTI_COS;
+ }
+
+ break;
+ case BNX2X_Q_STATE_INACTIVE:
+ if (cmd == BNX2X_Q_CMD_ACTIVATE)
+ next_state = BNX2X_Q_STATE_ACTIVE;
+
+ else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
+ (cmd == BNX2X_Q_CMD_UPDATE_TPA))
+ next_state = BNX2X_Q_STATE_INACTIVE;
+
+ else if (cmd == BNX2X_Q_CMD_HALT)
+ next_state = BNX2X_Q_STATE_STOPPED;
+
+ else if (cmd == BNX2X_Q_CMD_UPDATE) {
+ /* If "active" state change is requested, update the
+ * state accordingly.
+ */
+ if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+ &update_params->update_flags) &&
+ test_bit(BNX2X_Q_UPDATE_ACTIVATE,
+ &update_params->update_flags)){
+ if (o->num_tx_only == 0)
+ next_state = BNX2X_Q_STATE_ACTIVE;
+ else /* tx only queues exist for this queue */
+ next_state = BNX2X_Q_STATE_MULTI_COS;
+ } else
+ next_state = BNX2X_Q_STATE_INACTIVE;
+ }
+
+ break;
+ case BNX2X_Q_STATE_STOPPED:
+ if (cmd == BNX2X_Q_CMD_TERMINATE)
+ next_state = BNX2X_Q_STATE_TERMINATED;
+
+ break;
+ case BNX2X_Q_STATE_TERMINATED:
+ if (cmd == BNX2X_Q_CMD_CFC_DEL)
+ next_state = BNX2X_Q_STATE_RESET;
+
+ break;
+ default:
+ BNX2X_ERR("Illegal state: %d\n", state);
+ }
+
+ /* Transition is assured */
+ if (next_state != BNX2X_Q_STATE_MAX) {
+ DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
+ state, cmd, next_state);
+ o->next_state = next_state;
+ o->next_tx_only = next_tx_only;
+ return 0;
+ }
+
+ DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
+
+ return -EINVAL;
+}
+
+void bnx2x_init_queue_obj(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj,
+ u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
+ void *rdata,
+ dma_addr_t rdata_mapping, unsigned long type)
+{
+ memset(obj, 0, sizeof(*obj));
+
+ /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
+ BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
+
+ memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
+ obj->max_cos = cid_cnt;
+ obj->cl_id = cl_id;
+ obj->func_id = func_id;
+ obj->rdata = rdata;
+ obj->rdata_mapping = rdata_mapping;
+ obj->type = type;
+ obj->next_state = BNX2X_Q_STATE_MAX;
+
+ if (CHIP_IS_E1x(bp))
+ obj->send_cmd = bnx2x_queue_send_cmd_e1x;
+ else
+ obj->send_cmd = bnx2x_queue_send_cmd_e2;
+
+ obj->check_transition = bnx2x_queue_chk_transition;
+
+ obj->complete_cmd = bnx2x_queue_comp_cmd;
+ obj->wait_comp = bnx2x_queue_wait_comp;
+ obj->set_pending = bnx2x_queue_set_pending;
+}
+
+void bnx2x_queue_set_cos_cid(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj,
+ u32 cid, u8 index)
+{
+ obj->cids[index] = cid;
+}
+
+/********************** Function state object *********************************/
+enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o)
+{
+ /* in the middle of transaction - return INVALID state */
+ if (o->pending)
+ return BNX2X_F_STATE_MAX;
+
+ /*
+ * unsure the order of reading of o->pending and o->state
+ * o->pending should be read first
+ */
+ rmb();
+
+ return o->state;
+}
+
+static int bnx2x_func_wait_comp(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o,
+ enum bnx2x_func_cmd cmd)
+{
+ return bnx2x_state_wait(bp, cmd, &o->pending);
+}
+
+/**
+ * bnx2x_func_state_change_comp - complete the state machine transition
+ *
+ * @bp: device handle
+ * @o:
+ * @cmd:
+ *
+ * Called on state change transition. Completes the state
+ * machine transition only - no HW interaction.
+ */
+static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o,
+ enum bnx2x_func_cmd cmd)
+{
+ unsigned long cur_pending = o->pending;
+
+ if (!test_and_clear_bit(cmd, &cur_pending)) {
+ BNX2X_ERR("Bad MC reply %d for func %d in state %d "
+ "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp),
+ o->state, cur_pending, o->next_state);
+ return -EINVAL;
+ }
+
+ DP(BNX2X_MSG_SP,
+ "Completing command %d for func %d, setting state to %d\n",
+ cmd, BP_FUNC(bp), o->next_state);
+
+ o->state = o->next_state;
+ o->next_state = BNX2X_F_STATE_MAX;
+
+ /* It's important that o->state and o->next_state are
+ * updated before o->pending.
+ */
+ wmb();
+
+ clear_bit(cmd, &o->pending);
+ smp_mb__after_clear_bit();
+
+ return 0;
+}
+
+/**
+ * bnx2x_func_comp_cmd - complete the state change command
+ *
+ * @bp: device handle
+ * @o:
+ * @cmd:
+ *
+ * Checks that the arrived completion is expected.
+ */
+static int bnx2x_func_comp_cmd(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o,
+ enum bnx2x_func_cmd cmd)
+{
+ /* Complete the state machine part first, check if it's a
+ * legal completion.
+ */
+ int rc = bnx2x_func_state_change_comp(bp, o, cmd);
+ return rc;
+}
+
+/**
+ * bnx2x_func_chk_transition - perform function state machine transition
+ *
+ * @bp: device handle
+ * @o:
+ * @params:
+ *
+ * It both checks if the requested command is legal in a current
+ * state and, if it's legal, sets a `next_state' in the object
+ * that will be used in the completion flow to set the `state'
+ * of the object.
+ *
+ * returns 0 if a requested command is a legal transition,
+ * -EINVAL otherwise.
+ */
+static int bnx2x_func_chk_transition(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o,
+ struct bnx2x_func_state_params *params)
+{
+ enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
+ enum bnx2x_func_cmd cmd = params->cmd;
+
+ /*
+ * Forget all pending for completion commands if a driver only state
+ * transition has been requested.
+ */
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
+ o->pending = 0;
+ o->next_state = BNX2X_F_STATE_MAX;
+ }
+
+ /*
+ * Don't allow a next state transition if we are in the middle of
+ * the previous one.
+ */
+ if (o->pending)
+ return -EBUSY;
+
+ switch (state) {
+ case BNX2X_F_STATE_RESET:
+ if (cmd == BNX2X_F_CMD_HW_INIT)
+ next_state = BNX2X_F_STATE_INITIALIZED;
+
+ break;
+ case BNX2X_F_STATE_INITIALIZED:
+ if (cmd == BNX2X_F_CMD_START)
+ next_state = BNX2X_F_STATE_STARTED;
+
+ else if (cmd == BNX2X_F_CMD_HW_RESET)
+ next_state = BNX2X_F_STATE_RESET;
+
+ break;
+ case BNX2X_F_STATE_STARTED:
+ if (cmd == BNX2X_F_CMD_STOP)
+ next_state = BNX2X_F_STATE_INITIALIZED;
+ else if (cmd == BNX2X_F_CMD_TX_STOP)
+ next_state = BNX2X_F_STATE_TX_STOPPED;
+
+ break;
+ case BNX2X_F_STATE_TX_STOPPED:
+ if (cmd == BNX2X_F_CMD_TX_START)
+ next_state = BNX2X_F_STATE_STARTED;
+
+ break;
+ default:
+ BNX2X_ERR("Unknown state: %d\n", state);
+ }
+
+ /* Transition is assured */
+ if (next_state != BNX2X_F_STATE_MAX) {
+ DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
+ state, cmd, next_state);
+ o->next_state = next_state;
+ return 0;
+ }
+
+ DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
+ state, cmd);
+
+ return -EINVAL;
+}
+
+/**
+ * bnx2x_func_init_func - performs HW init at function stage
+ *
+ * @bp: device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
+ * HW blocks.
+ */
+static inline int bnx2x_func_init_func(struct bnx2x *bp,
+ const struct bnx2x_func_sp_drv_ops *drv)
+{
+ return drv->init_hw_func(bp);
+}
+
+/**
+ * bnx2x_func_init_port - performs HW init at port stage
+ *
+ * @bp: device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
+ * FUNCTION-only HW blocks.
+ *
+ */
+static inline int bnx2x_func_init_port(struct bnx2x *bp,
+ const struct bnx2x_func_sp_drv_ops *drv)
+{
+ int rc = drv->init_hw_port(bp);
+ if (rc)
+ return rc;
+
+ return bnx2x_func_init_func(bp, drv);
+}
+
+/**
+ * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
+ *
+ * @bp: device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
+ * PORT-only and FUNCTION-only HW blocks.
+ */
+static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
+ const struct bnx2x_func_sp_drv_ops *drv)
+{
+ int rc = drv->init_hw_cmn_chip(bp);
+ if (rc)
+ return rc;
+
+ return bnx2x_func_init_port(bp, drv);
+}
+
+/**
+ * bnx2x_func_init_cmn - performs HW init at common stage
+ *
+ * @bp: device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
+ * PORT-only and FUNCTION-only HW blocks.
+ */
+static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
+ const struct bnx2x_func_sp_drv_ops *drv)
+{
+ int rc = drv->init_hw_cmn(bp);
+ if (rc)
+ return rc;
+
+ return bnx2x_func_init_port(bp, drv);
+}
+
+static int bnx2x_func_hw_init(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ u32 load_code = params->params.hw_init.load_phase;
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ const struct bnx2x_func_sp_drv_ops *drv = o->drv;
+ int rc = 0;
+
+ DP(BNX2X_MSG_SP, "function %d load_code %x\n",
+ BP_ABS_FUNC(bp), load_code);
+
+ /* Prepare buffers for unzipping the FW */
+ rc = drv->gunzip_init(bp);
+ if (rc)
+ return rc;
+
+ /* Prepare FW */
+ rc = drv->init_fw(bp);
+ if (rc) {
+ BNX2X_ERR("Error loading firmware\n");
+ goto fw_init_err;
+ }
+
+ /* Handle the beginning of COMMON_XXX pases separatelly... */
+ switch (load_code) {
+ case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
+ rc = bnx2x_func_init_cmn_chip(bp, drv);
+ if (rc)
+ goto init_hw_err;
+
+ break;
+ case FW_MSG_CODE_DRV_LOAD_COMMON:
+ rc = bnx2x_func_init_cmn(bp, drv);
+ if (rc)
+ goto init_hw_err;
+
+ break;
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ rc = bnx2x_func_init_port(bp, drv);
+ if (rc)
+ goto init_hw_err;
+
+ break;
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ rc = bnx2x_func_init_func(bp, drv);
+ if (rc)
+ goto init_hw_err;
+
+ break;
+ default:
+ BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
+ rc = -EINVAL;
+ }
+
+init_hw_err:
+ drv->release_fw(bp);
+
+fw_init_err:
+ drv->gunzip_end(bp);
+
+ /* In case of success, complete the comand immediatelly: no ramrods
+ * have been sent.
+ */
+ if (!rc)
+ o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
+
+ return rc;
+}
+
+/**
+ * bnx2x_func_reset_func - reset HW at function stage
+ *
+ * @bp: device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
+ * FUNCTION-only HW blocks.
+ */
+static inline void bnx2x_func_reset_func(struct bnx2x *bp,
+ const struct bnx2x_func_sp_drv_ops *drv)
+{
+ drv->reset_hw_func(bp);
+}
+
+/**
+ * bnx2x_func_reset_port - reser HW at port stage
+ *
+ * @bp: device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
+ * FUNCTION-only and PORT-only HW blocks.
+ *
+ * !!!IMPORTANT!!!
+ *
+ * It's important to call reset_port before reset_func() as the last thing
+ * reset_func does is pf_disable() thus disabling PGLUE_B, which
+ * makes impossible any DMAE transactions.
+ */
+static inline void bnx2x_func_reset_port(struct bnx2x *bp,
+ const struct bnx2x_func_sp_drv_ops *drv)
+{
+ drv->reset_hw_port(bp);
+ bnx2x_func_reset_func(bp, drv);
+}
+
+/**
+ * bnx2x_func_reset_cmn - reser HW at common stage
+ *
+ * @bp: device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
+ * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
+ * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
+ */
+static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
+ const struct bnx2x_func_sp_drv_ops *drv)
+{
+ bnx2x_func_reset_port(bp, drv);
+ drv->reset_hw_cmn(bp);
+}
+
+
+static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ u32 reset_phase = params->params.hw_reset.reset_phase;
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ const struct bnx2x_func_sp_drv_ops *drv = o->drv;
+
+ DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
+ reset_phase);
+
+ switch (reset_phase) {
+ case FW_MSG_CODE_DRV_UNLOAD_COMMON:
+ bnx2x_func_reset_cmn(bp, drv);
+ break;
+ case FW_MSG_CODE_DRV_UNLOAD_PORT:
+ bnx2x_func_reset_port(bp, drv);
+ break;
+ case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
+ bnx2x_func_reset_func(bp, drv);
+ break;
+ default:
+ BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
+ reset_phase);
+ break;
+ }
+
+ /* Complete the comand immediatelly: no ramrods have been sent. */
+ o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
+
+ return 0;
+}
+
+static inline int bnx2x_func_send_start(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ struct function_start_data *rdata =
+ (struct function_start_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ struct bnx2x_func_start_params *start_params = &params->params.start;
+
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data with provided parameters */
+ rdata->function_mode = cpu_to_le16(start_params->mf_mode);
+ rdata->sd_vlan_tag = start_params->sd_vlan_tag;
+ rdata->path_id = BP_PATH(bp);
+ rdata->network_cos_mode = start_params->network_cos_mode;
+
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_func_send_stop(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
+ NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
+ NONE_CONNECTION_TYPE);
+}
+static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ struct flow_control_configuration *rdata =
+ (struct flow_control_configuration *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ struct bnx2x_func_tx_start_params *tx_start_params =
+ &params->params.tx_start;
+ int i;
+
+ memset(rdata, 0, sizeof(*rdata));
+
+ rdata->dcb_enabled = tx_start_params->dcb_enabled;
+ rdata->dcb_version = tx_start_params->dcb_version;
+ rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
+
+ for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
+ rdata->traffic_type_to_priority_cos[i] =
+ tx_start_params->traffic_type_to_priority_cos[i];
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
+static int bnx2x_func_send_cmd(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ switch (params->cmd) {
+ case BNX2X_F_CMD_HW_INIT:
+ return bnx2x_func_hw_init(bp, params);
+ case BNX2X_F_CMD_START:
+ return bnx2x_func_send_start(bp, params);
+ case BNX2X_F_CMD_STOP:
+ return bnx2x_func_send_stop(bp, params);
+ case BNX2X_F_CMD_HW_RESET:
+ return bnx2x_func_hw_reset(bp, params);
+ case BNX2X_F_CMD_TX_STOP:
+ return bnx2x_func_send_tx_stop(bp, params);
+ case BNX2X_F_CMD_TX_START:
+ return bnx2x_func_send_tx_start(bp, params);
+ default:
+ BNX2X_ERR("Unknown command: %d\n", params->cmd);
+ return -EINVAL;
+ }
+}
+
+void bnx2x_init_func_obj(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *obj,
+ void *rdata, dma_addr_t rdata_mapping,
+ struct bnx2x_func_sp_drv_ops *drv_iface)
+{
+ memset(obj, 0, sizeof(*obj));
+
+ mutex_init(&obj->one_pending_mutex);
+
+ obj->rdata = rdata;
+ obj->rdata_mapping = rdata_mapping;
+
+ obj->send_cmd = bnx2x_func_send_cmd;
+ obj->check_transition = bnx2x_func_chk_transition;
+ obj->complete_cmd = bnx2x_func_comp_cmd;
+ obj->wait_comp = bnx2x_func_wait_comp;
+
+ obj->drv = drv_iface;
+}
+
+/**
+ * bnx2x_func_state_change - perform Function state change transition
+ *
+ * @bp: device handle
+ * @params: parameters to perform the transaction
+ *
+ * returns 0 in case of successfully completed transition,
+ * negative error code in case of failure, positive
+ * (EBUSY) value if there is a completion to that is
+ * still pending (possible only if RAMROD_COMP_WAIT is
+ * not set in params->ramrod_flags for asynchronous
+ * commands).
+ */
+int bnx2x_func_state_change(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ int rc;
+ enum bnx2x_func_cmd cmd = params->cmd;
+ unsigned long *pending = &o->pending;
+
+ mutex_lock(&o->one_pending_mutex);
+
+ /* Check that the requested transition is legal */
+ if (o->check_transition(bp, o, params)) {
+ mutex_unlock(&o->one_pending_mutex);
+ return -EINVAL;
+ }
+
+ /* Set "pending" bit */
+ set_bit(cmd, pending);
+
+ /* Don't send a command if only driver cleanup was requested */
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
+ bnx2x_func_state_change_comp(bp, o, cmd);
+ mutex_unlock(&o->one_pending_mutex);
+ } else {
+ /* Send a ramrod */
+ rc = o->send_cmd(bp, params);
+
+ mutex_unlock(&o->one_pending_mutex);
+
+ if (rc) {
+ o->next_state = BNX2X_F_STATE_MAX;
+ clear_bit(cmd, pending);
+ smp_mb__after_clear_bit();
+ return rc;
+ }
+
+ if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
+ rc = o->wait_comp(bp, o, cmd);
+ if (rc)
+ return rc;
+
+ return 0;
+ }
+ }
+
+ return !!test_bit(cmd, pending);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
new file mode 100644
index 000000000000..9a517c2e9f1b
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -0,0 +1,1297 @@
+/* bnx2x_sp.h: Broadcom Everest network driver.
+ *
+ * Copyright 2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Vladislav Zolotarov
+ *
+ */
+#ifndef BNX2X_SP_VERBS
+#define BNX2X_SP_VERBS
+
+struct bnx2x;
+struct eth_context;
+
+/* Bits representing general command's configuration */
+enum {
+ RAMROD_TX,
+ RAMROD_RX,
+ /* Wait until all pending commands complete */
+ RAMROD_COMP_WAIT,
+ /* Don't send a ramrod, only update a registry */
+ RAMROD_DRV_CLR_ONLY,
+ /* Configure HW according to the current object state */
+ RAMROD_RESTORE,
+ /* Execute the next command now */
+ RAMROD_EXEC,
+ /*
+ * Don't add a new command and continue execution of posponed
+ * commands. If not set a new command will be added to the
+ * pending commands list.
+ */
+ RAMROD_CONT,
+};
+
+typedef enum {
+ BNX2X_OBJ_TYPE_RX,
+ BNX2X_OBJ_TYPE_TX,
+ BNX2X_OBJ_TYPE_RX_TX,
+} bnx2x_obj_type;
+
+/* Filtering states */
+enum {
+ BNX2X_FILTER_MAC_PENDING,
+ BNX2X_FILTER_VLAN_PENDING,
+ BNX2X_FILTER_VLAN_MAC_PENDING,
+ BNX2X_FILTER_RX_MODE_PENDING,
+ BNX2X_FILTER_RX_MODE_SCHED,
+ BNX2X_FILTER_ISCSI_ETH_START_SCHED,
+ BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
+ BNX2X_FILTER_FCOE_ETH_START_SCHED,
+ BNX2X_FILTER_FCOE_ETH_STOP_SCHED,
+ BNX2X_FILTER_MCAST_PENDING,
+ BNX2X_FILTER_MCAST_SCHED,
+ BNX2X_FILTER_RSS_CONF_PENDING,
+};
+
+struct bnx2x_raw_obj {
+ u8 func_id;
+
+ /* Queue params */
+ u8 cl_id;
+ u32 cid;
+
+ /* Ramrod data buffer params */
+ void *rdata;
+ dma_addr_t rdata_mapping;
+
+ /* Ramrod state params */
+ int state; /* "ramrod is pending" state bit */
+ unsigned long *pstate; /* pointer to state buffer */
+
+ bnx2x_obj_type obj_type;
+
+ int (*wait_comp)(struct bnx2x *bp,
+ struct bnx2x_raw_obj *o);
+
+ bool (*check_pending)(struct bnx2x_raw_obj *o);
+ void (*clear_pending)(struct bnx2x_raw_obj *o);
+ void (*set_pending)(struct bnx2x_raw_obj *o);
+};
+
+/************************* VLAN-MAC commands related parameters ***************/
+struct bnx2x_mac_ramrod_data {
+ u8 mac[ETH_ALEN];
+};
+
+struct bnx2x_vlan_ramrod_data {
+ u16 vlan;
+};
+
+struct bnx2x_vlan_mac_ramrod_data {
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+};
+
+union bnx2x_classification_ramrod_data {
+ struct bnx2x_mac_ramrod_data mac;
+ struct bnx2x_vlan_ramrod_data vlan;
+ struct bnx2x_vlan_mac_ramrod_data vlan_mac;
+};
+
+/* VLAN_MAC commands */
+enum bnx2x_vlan_mac_cmd {
+ BNX2X_VLAN_MAC_ADD,
+ BNX2X_VLAN_MAC_DEL,
+ BNX2X_VLAN_MAC_MOVE,
+};
+
+struct bnx2x_vlan_mac_data {
+ /* Requested command: BNX2X_VLAN_MAC_XX */
+ enum bnx2x_vlan_mac_cmd cmd;
+ /*
+ * used to contain the data related vlan_mac_flags bits from
+ * ramrod parameters.
+ */
+ unsigned long vlan_mac_flags;
+
+ /* Needed for MOVE command */
+ struct bnx2x_vlan_mac_obj *target_obj;
+
+ union bnx2x_classification_ramrod_data u;
+};
+
+/*************************** Exe Queue obj ************************************/
+union bnx2x_exe_queue_cmd_data {
+ struct bnx2x_vlan_mac_data vlan_mac;
+
+ struct {
+ /* TODO */
+ } mcast;
+};
+
+struct bnx2x_exeq_elem {
+ struct list_head link;
+
+ /* Length of this element in the exe_chunk. */
+ int cmd_len;
+
+ union bnx2x_exe_queue_cmd_data cmd_data;
+};
+
+union bnx2x_qable_obj;
+
+union bnx2x_exeq_comp_elem {
+ union event_ring_elem *elem;
+};
+
+struct bnx2x_exe_queue_obj;
+
+typedef int (*exe_q_validate)(struct bnx2x *bp,
+ union bnx2x_qable_obj *o,
+ struct bnx2x_exeq_elem *elem);
+
+/**
+ * @return positive is entry was optimized, 0 - if not, negative
+ * in case of an error.
+ */
+typedef int (*exe_q_optimize)(struct bnx2x *bp,
+ union bnx2x_qable_obj *o,
+ struct bnx2x_exeq_elem *elem);
+typedef int (*exe_q_execute)(struct bnx2x *bp,
+ union bnx2x_qable_obj *o,
+ struct list_head *exe_chunk,
+ unsigned long *ramrod_flags);
+typedef struct bnx2x_exeq_elem *
+ (*exe_q_get)(struct bnx2x_exe_queue_obj *o,
+ struct bnx2x_exeq_elem *elem);
+
+struct bnx2x_exe_queue_obj {
+ /*
+ * Commands pending for an execution.
+ */
+ struct list_head exe_queue;
+
+ /*
+ * Commands pending for an completion.
+ */
+ struct list_head pending_comp;
+
+ spinlock_t lock;
+
+ /* Maximum length of commands' list for one execution */
+ int exe_chunk_len;
+
+ union bnx2x_qable_obj *owner;
+
+ /****** Virtual functions ******/
+ /**
+ * Called before commands execution for commands that are really
+ * going to be executed (after 'optimize').
+ *
+ * Must run under exe_queue->lock
+ */
+ exe_q_validate validate;
+
+
+ /**
+ * This will try to cancel the current pending commands list
+ * considering the new command.
+ *
+ * Must run under exe_queue->lock
+ */
+ exe_q_optimize optimize;
+
+ /**
+ * Run the next commands chunk (owner specific).
+ */
+ exe_q_execute execute;
+
+ /**
+ * Return the exe_queue element containing the specific command
+ * if any. Otherwise return NULL.
+ */
+ exe_q_get get;
+};
+/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
+/*
+ * Element in the VLAN_MAC registry list having all currenty configured
+ * rules.
+ */
+struct bnx2x_vlan_mac_registry_elem {
+ struct list_head link;
+
+ /*
+ * Used to store the cam offset used for the mac/vlan/vlan-mac.
+ * Relevant for 57710 and 57711 only. VLANs and MACs share the
+ * same CAM for these chips.
+ */
+ int cam_offset;
+
+ /* Needed for DEL and RESTORE flows */
+ unsigned long vlan_mac_flags;
+
+ union bnx2x_classification_ramrod_data u;
+};
+
+/* Bits representing VLAN_MAC commands specific flags */
+enum {
+ BNX2X_UC_LIST_MAC,
+ BNX2X_ETH_MAC,
+ BNX2X_ISCSI_ETH_MAC,
+ BNX2X_NETQ_ETH_MAC,
+ BNX2X_DONT_CONSUME_CAM_CREDIT,
+ BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
+};
+
+struct bnx2x_vlan_mac_ramrod_params {
+ /* Object to run the command from */
+ struct bnx2x_vlan_mac_obj *vlan_mac_obj;
+
+ /* General command flags: COMP_WAIT, etc. */
+ unsigned long ramrod_flags;
+
+ /* Command specific configuration request */
+ struct bnx2x_vlan_mac_data user_req;
+};
+
+struct bnx2x_vlan_mac_obj {
+ struct bnx2x_raw_obj raw;
+
+ /* Bookkeeping list: will prevent the addition of already existing
+ * entries.
+ */
+ struct list_head head;
+
+ /* TODO: Add it's initialization in the init functions */
+ struct bnx2x_exe_queue_obj exe_queue;
+
+ /* MACs credit pool */
+ struct bnx2x_credit_pool_obj *macs_pool;
+
+ /* VLANs credit pool */
+ struct bnx2x_credit_pool_obj *vlans_pool;
+
+ /* RAMROD command to be used */
+ int ramrod_cmd;
+
+ /**
+ * Checks if ADD-ramrod with the given params may be performed.
+ *
+ * @return zero if the element may be added
+ */
+
+ int (*check_add)(struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data);
+
+ /**
+ * Checks if DEL-ramrod with the given params may be performed.
+ *
+ * @return true if the element may be deleted
+ */
+ struct bnx2x_vlan_mac_registry_elem *
+ (*check_del)(struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data);
+
+ /**
+ * Checks if DEL-ramrod with the given params may be performed.
+ *
+ * @return true if the element may be deleted
+ */
+ bool (*check_move)(struct bnx2x_vlan_mac_obj *src_o,
+ struct bnx2x_vlan_mac_obj *dst_o,
+ union bnx2x_classification_ramrod_data *data);
+
+ /**
+ * Update the relevant credit object(s) (consume/return
+ * correspondingly).
+ */
+ bool (*get_credit)(struct bnx2x_vlan_mac_obj *o);
+ bool (*put_credit)(struct bnx2x_vlan_mac_obj *o);
+ bool (*get_cam_offset)(struct bnx2x_vlan_mac_obj *o, int *offset);
+ bool (*put_cam_offset)(struct bnx2x_vlan_mac_obj *o, int offset);
+
+ /**
+ * Configures one rule in the ramrod data buffer.
+ */
+ void (*set_one_rule)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem, int rule_idx,
+ int cam_offset);
+
+ /**
+ * Delete all configured elements having the given
+ * vlan_mac_flags specification. Assumes no pending for
+ * execution commands. Will schedule all all currently
+ * configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags
+ * specification for deletion and will use the given
+ * ramrod_flags for the last DEL operation.
+ *
+ * @param bp
+ * @param o
+ * @param ramrod_flags RAMROD_XX flags
+ *
+ * @return 0 if the last operation has completed successfully
+ * and there are no more elements left, positive value
+ * if there are pending for completion commands,
+ * negative value in case of failure.
+ */
+ int (*delete_all)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ unsigned long *vlan_mac_flags,
+ unsigned long *ramrod_flags);
+
+ /**
+ * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously
+ * configured elements list.
+ *
+ * @param bp
+ * @param p Command parameters (RAMROD_COMP_WAIT bit in
+ * ramrod_flags is only taken into an account)
+ * @param ppos a pointer to the cooky that should be given back in the
+ * next call to make function handle the next element. If
+ * *ppos is set to NULL it will restart the iterator.
+ * If returned *ppos == NULL this means that the last
+ * element has been handled.
+ *
+ * @return int
+ */
+ int (*restore)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p,
+ struct bnx2x_vlan_mac_registry_elem **ppos);
+
+ /**
+ * Should be called on a completion arival.
+ *
+ * @param bp
+ * @param o
+ * @param cqe Completion element we are handling
+ * @param ramrod_flags if RAMROD_CONT is set the next bulk of
+ * pending commands will be executed.
+ * RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE
+ * may also be set if needed.
+ *
+ * @return 0 if there are neither pending nor waiting for
+ * completion commands. Positive value if there are
+ * pending for execution or for completion commands.
+ * Negative value in case of an error (including an
+ * error in the cqe).
+ */
+ int (*complete)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+ union event_ring_elem *cqe,
+ unsigned long *ramrod_flags);
+
+ /**
+ * Wait for completion of all commands. Don't schedule new ones,
+ * just wait. It assumes that the completion code will schedule
+ * for new commands.
+ */
+ int (*wait)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o);
+};
+
+/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
+
+/* RX_MODE ramrod spesial flags: set in rx_mode_flags field in
+ * a bnx2x_rx_mode_ramrod_params.
+ */
+enum {
+ BNX2X_RX_MODE_FCOE_ETH,
+ BNX2X_RX_MODE_ISCSI_ETH,
+};
+
+enum {
+ BNX2X_ACCEPT_UNICAST,
+ BNX2X_ACCEPT_MULTICAST,
+ BNX2X_ACCEPT_ALL_UNICAST,
+ BNX2X_ACCEPT_ALL_MULTICAST,
+ BNX2X_ACCEPT_BROADCAST,
+ BNX2X_ACCEPT_UNMATCHED,
+ BNX2X_ACCEPT_ANY_VLAN
+};
+
+struct bnx2x_rx_mode_ramrod_params {
+ struct bnx2x_rx_mode_obj *rx_mode_obj;
+ unsigned long *pstate;
+ int state;
+ u8 cl_id;
+ u32 cid;
+ u8 func_id;
+ unsigned long ramrod_flags;
+ unsigned long rx_mode_flags;
+
+ /*
+ * rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
+ * a tstorm_eth_mac_filter_config (e1x).
+ */
+ void *rdata;
+ dma_addr_t rdata_mapping;
+
+ /* Rx mode settings */
+ unsigned long rx_accept_flags;
+
+ /* internal switching settings */
+ unsigned long tx_accept_flags;
+};
+
+struct bnx2x_rx_mode_obj {
+ int (*config_rx_mode)(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p);
+
+ int (*wait_comp)(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p);
+};
+
+/********************** Set multicast group ***********************************/
+
+struct bnx2x_mcast_list_elem {
+ struct list_head link;
+ u8 *mac;
+};
+
+union bnx2x_mcast_config_data {
+ u8 *mac;
+ u8 bin; /* used in a RESTORE flow */
+};
+
+struct bnx2x_mcast_ramrod_params {
+ struct bnx2x_mcast_obj *mcast_obj;
+
+ /* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */
+ unsigned long ramrod_flags;
+
+ struct list_head mcast_list; /* list of struct bnx2x_mcast_list_elem */
+ /** TODO:
+ * - rename it to macs_num.
+ * - Add a new command type for handling pending commands
+ * (remove "zero semantics").
+ *
+ * Length of mcast_list. If zero and ADD_CONT command - post
+ * pending commands.
+ */
+ int mcast_list_len;
+};
+
+enum {
+ BNX2X_MCAST_CMD_ADD,
+ BNX2X_MCAST_CMD_CONT,
+ BNX2X_MCAST_CMD_DEL,
+ BNX2X_MCAST_CMD_RESTORE,
+};
+
+struct bnx2x_mcast_obj {
+ struct bnx2x_raw_obj raw;
+
+ union {
+ struct {
+ #define BNX2X_MCAST_BINS_NUM 256
+ #define BNX2X_MCAST_VEC_SZ (BNX2X_MCAST_BINS_NUM / 64)
+ u64 vec[BNX2X_MCAST_VEC_SZ];
+
+ /** Number of BINs to clear. Should be updated
+ * immediately when a command arrives in order to
+ * properly create DEL commands.
+ */
+ int num_bins_set;
+ } aprox_match;
+
+ struct {
+ struct list_head macs;
+ int num_macs_set;
+ } exact_match;
+ } registry;
+
+ /* Pending commands */
+ struct list_head pending_cmds_head;
+
+ /* A state that is set in raw.pstate, when there are pending commands */
+ int sched_state;
+
+ /* Maximal number of mcast MACs configured in one command */
+ int max_cmd_len;
+
+ /* Total number of currently pending MACs to configure: both
+ * in the pending commands list and in the current command.
+ */
+ int total_pending_num;
+
+ u8 engine_id;
+
+ /**
+ * @param cmd command to execute (BNX2X_MCAST_CMD_X, see above)
+ */
+ int (*config_mcast)(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p, int cmd);
+
+ /**
+ * Fills the ramrod data during the RESTORE flow.
+ *
+ * @param bp
+ * @param o
+ * @param start_idx Registry index to start from
+ * @param rdata_idx Index in the ramrod data to start from
+ *
+ * @return -1 if we handled the whole registry or index of the last
+ * handled registry element.
+ */
+ int (*hdl_restore)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
+ int start_bin, int *rdata_idx);
+
+ int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
+ struct bnx2x_mcast_ramrod_params *p, int cmd);
+
+ void (*set_one_rule)(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, int idx,
+ union bnx2x_mcast_config_data *cfg_data, int cmd);
+
+ /** Checks if there are more mcast MACs to be set or a previous
+ * command is still pending.
+ */
+ bool (*check_pending)(struct bnx2x_mcast_obj *o);
+
+ /**
+ * Set/Clear/Check SCHEDULED state of the object
+ */
+ void (*set_sched)(struct bnx2x_mcast_obj *o);
+ void (*clear_sched)(struct bnx2x_mcast_obj *o);
+ bool (*check_sched)(struct bnx2x_mcast_obj *o);
+
+ /* Wait until all pending commands complete */
+ int (*wait_comp)(struct bnx2x *bp, struct bnx2x_mcast_obj *o);
+
+ /**
+ * Handle the internal object counters needed for proper
+ * commands handling. Checks that the provided parameters are
+ * feasible.
+ */
+ int (*validate)(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p, int cmd);
+
+ /**
+ * Restore the values of internal counters in case of a failure.
+ */
+ void (*revert)(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int old_num_bins);
+
+ int (*get_registry_size)(struct bnx2x_mcast_obj *o);
+ void (*set_registry_size)(struct bnx2x_mcast_obj *o, int n);
+};
+
+/*************************** Credit handling **********************************/
+struct bnx2x_credit_pool_obj {
+
+ /* Current amount of credit in the pool */
+ atomic_t credit;
+
+ /* Maximum allowed credit. put() will check against it. */
+ int pool_sz;
+
+ /*
+ * Allocate a pool table statically.
+ *
+ * Currently the mamimum allowed size is MAX_MAC_CREDIT_E2(272)
+ *
+ * The set bit in the table will mean that the entry is available.
+ */
+#define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64)
+ u64 pool_mirror[BNX2X_POOL_VEC_SIZE];
+
+ /* Base pool offset (initialized differently */
+ int base_pool_offset;
+
+ /**
+ * Get the next free pool entry.
+ *
+ * @return true if there was a free entry in the pool
+ */
+ bool (*get_entry)(struct bnx2x_credit_pool_obj *o, int *entry);
+
+ /**
+ * Return the entry back to the pool.
+ *
+ * @return true if entry is legal and has been successfully
+ * returned to the pool.
+ */
+ bool (*put_entry)(struct bnx2x_credit_pool_obj *o, int entry);
+
+ /**
+ * Get the requested amount of credit from the pool.
+ *
+ * @param cnt Amount of requested credit
+ * @return true if the operation is successful
+ */
+ bool (*get)(struct bnx2x_credit_pool_obj *o, int cnt);
+
+ /**
+ * Returns the credit to the pool.
+ *
+ * @param cnt Amount of credit to return
+ * @return true if the operation is successful
+ */
+ bool (*put)(struct bnx2x_credit_pool_obj *o, int cnt);
+
+ /**
+ * Reads the current amount of credit.
+ */
+ int (*check)(struct bnx2x_credit_pool_obj *o);
+};
+
+/*************************** RSS configuration ********************************/
+enum {
+ /* RSS_MODE bits are mutually exclusive */
+ BNX2X_RSS_MODE_DISABLED,
+ BNX2X_RSS_MODE_REGULAR,
+ BNX2X_RSS_MODE_VLAN_PRI,
+ BNX2X_RSS_MODE_E1HOV_PRI,
+ BNX2X_RSS_MODE_IP_DSCP,
+
+ BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */
+
+ BNX2X_RSS_IPV4,
+ BNX2X_RSS_IPV4_TCP,
+ BNX2X_RSS_IPV6,
+ BNX2X_RSS_IPV6_TCP,
+};
+
+struct bnx2x_config_rss_params {
+ struct bnx2x_rss_config_obj *rss_obj;
+
+ /* may have RAMROD_COMP_WAIT set only */
+ unsigned long ramrod_flags;
+
+ /* BNX2X_RSS_X bits */
+ unsigned long rss_flags;
+
+ /* Number hash bits to take into an account */
+ u8 rss_result_mask;
+
+ /* Indirection table */
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+
+ /* RSS hash values */
+ u32 rss_key[10];
+
+ /* valid only iff BNX2X_RSS_UPDATE_TOE is set */
+ u16 toe_rss_bitmap;
+};
+
+struct bnx2x_rss_config_obj {
+ struct bnx2x_raw_obj raw;
+
+ /* RSS engine to use */
+ u8 engine_id;
+
+ /* Last configured indirection table */
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+
+ int (*config_rss)(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *p);
+};
+
+/*********************** Queue state update ***********************************/
+
+/* UPDATE command options */
+enum {
+ BNX2X_Q_UPDATE_IN_VLAN_REM,
+ BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
+ BNX2X_Q_UPDATE_OUT_VLAN_REM,
+ BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
+ BNX2X_Q_UPDATE_ANTI_SPOOF,
+ BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG,
+ BNX2X_Q_UPDATE_ACTIVATE,
+ BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+ BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ BNX2X_Q_UPDATE_SILENT_VLAN_REM
+};
+
+/* Allowed Queue states */
+enum bnx2x_q_state {
+ BNX2X_Q_STATE_RESET,
+ BNX2X_Q_STATE_INITIALIZED,
+ BNX2X_Q_STATE_ACTIVE,
+ BNX2X_Q_STATE_MULTI_COS,
+ BNX2X_Q_STATE_MCOS_TERMINATED,
+ BNX2X_Q_STATE_INACTIVE,
+ BNX2X_Q_STATE_STOPPED,
+ BNX2X_Q_STATE_TERMINATED,
+ BNX2X_Q_STATE_FLRED,
+ BNX2X_Q_STATE_MAX,
+};
+
+/* Allowed commands */
+enum bnx2x_queue_cmd {
+ BNX2X_Q_CMD_INIT,
+ BNX2X_Q_CMD_SETUP,
+ BNX2X_Q_CMD_SETUP_TX_ONLY,
+ BNX2X_Q_CMD_DEACTIVATE,
+ BNX2X_Q_CMD_ACTIVATE,
+ BNX2X_Q_CMD_UPDATE,
+ BNX2X_Q_CMD_UPDATE_TPA,
+ BNX2X_Q_CMD_HALT,
+ BNX2X_Q_CMD_CFC_DEL,
+ BNX2X_Q_CMD_TERMINATE,
+ BNX2X_Q_CMD_EMPTY,
+ BNX2X_Q_CMD_MAX,
+};
+
+/* queue SETUP + INIT flags */
+enum {
+ BNX2X_Q_FLG_TPA,
+ BNX2X_Q_FLG_TPA_IPV6,
+ BNX2X_Q_FLG_STATS,
+ BNX2X_Q_FLG_ZERO_STATS,
+ BNX2X_Q_FLG_ACTIVE,
+ BNX2X_Q_FLG_OV,
+ BNX2X_Q_FLG_VLAN,
+ BNX2X_Q_FLG_COS,
+ BNX2X_Q_FLG_HC,
+ BNX2X_Q_FLG_HC_EN,
+ BNX2X_Q_FLG_DHC,
+ BNX2X_Q_FLG_FCOE,
+ BNX2X_Q_FLG_LEADING_RSS,
+ BNX2X_Q_FLG_MCAST,
+ BNX2X_Q_FLG_DEF_VLAN,
+ BNX2X_Q_FLG_TX_SWITCH,
+ BNX2X_Q_FLG_TX_SEC,
+ BNX2X_Q_FLG_ANTI_SPOOF,
+ BNX2X_Q_FLG_SILENT_VLAN_REM
+};
+
+/* Queue type options: queue type may be a compination of below. */
+enum bnx2x_q_type {
+ /** TODO: Consider moving both these flags into the init()
+ * ramrod params.
+ */
+ BNX2X_Q_TYPE_HAS_RX,
+ BNX2X_Q_TYPE_HAS_TX,
+};
+
+#define BNX2X_PRIMARY_CID_INDEX 0
+#define BNX2X_MULTI_TX_COS_E1X 1
+#define BNX2X_MULTI_TX_COS_E2_E3A0 2
+#define BNX2X_MULTI_TX_COS_E3B0 3
+#define BNX2X_MULTI_TX_COS BNX2X_MULTI_TX_COS_E3B0
+
+
+struct bnx2x_queue_init_params {
+ struct {
+ unsigned long flags;
+ u16 hc_rate;
+ u8 fw_sb_id;
+ u8 sb_cq_index;
+ } tx;
+
+ struct {
+ unsigned long flags;
+ u16 hc_rate;
+ u8 fw_sb_id;
+ u8 sb_cq_index;
+ } rx;
+
+ /* CID context in the host memory */
+ struct eth_context *cxts[BNX2X_MULTI_TX_COS];
+
+ /* maximum number of cos supported by hardware */
+ u8 max_cos;
+};
+
+struct bnx2x_queue_terminate_params {
+ /* index within the tx_only cids of this queue object */
+ u8 cid_index;
+};
+
+struct bnx2x_queue_cfc_del_params {
+ /* index within the tx_only cids of this queue object */
+ u8 cid_index;
+};
+
+struct bnx2x_queue_update_params {
+ unsigned long update_flags; /* BNX2X_Q_UPDATE_XX bits */
+ u16 def_vlan;
+ u16 silent_removal_value;
+ u16 silent_removal_mask;
+/* index within the tx_only cids of this queue object */
+ u8 cid_index;
+};
+
+struct rxq_pause_params {
+ u16 bd_th_lo;
+ u16 bd_th_hi;
+ u16 rcq_th_lo;
+ u16 rcq_th_hi;
+ u16 sge_th_lo; /* valid iff BNX2X_Q_FLG_TPA */
+ u16 sge_th_hi; /* valid iff BNX2X_Q_FLG_TPA */
+ u16 pri_map;
+};
+
+/* general */
+struct bnx2x_general_setup_params {
+ /* valid iff BNX2X_Q_FLG_STATS */
+ u8 stat_id;
+
+ u8 spcl_id;
+ u16 mtu;
+ u8 cos;
+};
+
+struct bnx2x_rxq_setup_params {
+ /* dma */
+ dma_addr_t dscr_map;
+ dma_addr_t sge_map;
+ dma_addr_t rcq_map;
+ dma_addr_t rcq_np_map;
+
+ u16 drop_flags;
+ u16 buf_sz;
+ u8 fw_sb_id;
+ u8 cl_qzone_id;
+
+ /* valid iff BNX2X_Q_FLG_TPA */
+ u16 tpa_agg_sz;
+ u16 sge_buf_sz;
+ u8 max_sges_pkt;
+ u8 max_tpa_queues;
+ u8 rss_engine_id;
+
+ u8 cache_line_log;
+
+ u8 sb_cq_index;
+
+ /* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */
+ u16 silent_removal_value;
+ u16 silent_removal_mask;
+};
+
+struct bnx2x_txq_setup_params {
+ /* dma */
+ dma_addr_t dscr_map;
+
+ u8 fw_sb_id;
+ u8 sb_cq_index;
+ u8 cos; /* valid iff BNX2X_Q_FLG_COS */
+ u16 traffic_type;
+ /* equals to the leading rss client id, used for TX classification*/
+ u8 tss_leading_cl_id;
+
+ /* valid iff BNX2X_Q_FLG_DEF_VLAN */
+ u16 default_vlan;
+};
+
+struct bnx2x_queue_setup_params {
+ struct bnx2x_general_setup_params gen_params;
+ struct bnx2x_txq_setup_params txq_params;
+ struct bnx2x_rxq_setup_params rxq_params;
+ struct rxq_pause_params pause_params;
+ unsigned long flags;
+};
+
+struct bnx2x_queue_setup_tx_only_params {
+ struct bnx2x_general_setup_params gen_params;
+ struct bnx2x_txq_setup_params txq_params;
+ unsigned long flags;
+ /* index within the tx_only cids of this queue object */
+ u8 cid_index;
+};
+
+struct bnx2x_queue_state_params {
+ struct bnx2x_queue_sp_obj *q_obj;
+
+ /* Current command */
+ enum bnx2x_queue_cmd cmd;
+
+ /* may have RAMROD_COMP_WAIT set only */
+ unsigned long ramrod_flags;
+
+ /* Params according to the current command */
+ union {
+ struct bnx2x_queue_update_params update;
+ struct bnx2x_queue_setup_params setup;
+ struct bnx2x_queue_init_params init;
+ struct bnx2x_queue_setup_tx_only_params tx_only;
+ struct bnx2x_queue_terminate_params terminate;
+ struct bnx2x_queue_cfc_del_params cfc_del;
+ } params;
+};
+
+struct bnx2x_queue_sp_obj {
+ u32 cids[BNX2X_MULTI_TX_COS];
+ u8 cl_id;
+ u8 func_id;
+
+ /*
+ * number of traffic classes supported by queue.
+ * The primary connection of the queue suppotrs the first traffic
+ * class. Any further traffic class is suppoted by a tx-only
+ * connection.
+ *
+ * Therefore max_cos is also a number of valid entries in the cids
+ * array.
+ */
+ u8 max_cos;
+ u8 num_tx_only, next_tx_only;
+
+ enum bnx2x_q_state state, next_state;
+
+ /* bits from enum bnx2x_q_type */
+ unsigned long type;
+
+ /* BNX2X_Q_CMD_XX bits. This object implements "one
+ * pending" paradigm but for debug and tracing purposes it's
+ * more convinient to have different bits for different
+ * commands.
+ */
+ unsigned long pending;
+
+ /* Buffer to use as a ramrod data and its mapping */
+ void *rdata;
+ dma_addr_t rdata_mapping;
+
+ /**
+ * Performs one state change according to the given parameters.
+ *
+ * @return 0 in case of success and negative value otherwise.
+ */
+ int (*send_cmd)(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params);
+
+ /**
+ * Sets the pending bit according to the requested transition.
+ */
+ int (*set_pending)(struct bnx2x_queue_sp_obj *o,
+ struct bnx2x_queue_state_params *params);
+
+ /**
+ * Checks that the requested state transition is legal.
+ */
+ int (*check_transition)(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *o,
+ struct bnx2x_queue_state_params *params);
+
+ /**
+ * Completes the pending command.
+ */
+ int (*complete_cmd)(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *o,
+ enum bnx2x_queue_cmd);
+
+ int (*wait_comp)(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *o,
+ enum bnx2x_queue_cmd cmd);
+};
+
+/********************** Function state update *********************************/
+/* Allowed Function states */
+enum bnx2x_func_state {
+ BNX2X_F_STATE_RESET,
+ BNX2X_F_STATE_INITIALIZED,
+ BNX2X_F_STATE_STARTED,
+ BNX2X_F_STATE_TX_STOPPED,
+ BNX2X_F_STATE_MAX,
+};
+
+/* Allowed Function commands */
+enum bnx2x_func_cmd {
+ BNX2X_F_CMD_HW_INIT,
+ BNX2X_F_CMD_START,
+ BNX2X_F_CMD_STOP,
+ BNX2X_F_CMD_HW_RESET,
+ BNX2X_F_CMD_TX_STOP,
+ BNX2X_F_CMD_TX_START,
+ BNX2X_F_CMD_MAX,
+};
+
+struct bnx2x_func_hw_init_params {
+ /* A load phase returned by MCP.
+ *
+ * May be:
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
+ * FW_MSG_CODE_DRV_LOAD_COMMON
+ * FW_MSG_CODE_DRV_LOAD_PORT
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION
+ */
+ u32 load_phase;
+};
+
+struct bnx2x_func_hw_reset_params {
+ /* A load phase returned by MCP.
+ *
+ * May be:
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
+ * FW_MSG_CODE_DRV_LOAD_COMMON
+ * FW_MSG_CODE_DRV_LOAD_PORT
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION
+ */
+ u32 reset_phase;
+};
+
+struct bnx2x_func_start_params {
+ /* Multi Function mode:
+ * - Single Function
+ * - Switch Dependent
+ * - Switch Independent
+ */
+ u16 mf_mode;
+
+ /* Switch Dependent mode outer VLAN tag */
+ u16 sd_vlan_tag;
+
+ /* Function cos mode */
+ u8 network_cos_mode;
+};
+
+struct bnx2x_func_tx_start_params {
+ struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
+ u8 dcb_enabled;
+ u8 dcb_version;
+ u8 dont_add_pri_0_en;
+};
+
+struct bnx2x_func_state_params {
+ struct bnx2x_func_sp_obj *f_obj;
+
+ /* Current command */
+ enum bnx2x_func_cmd cmd;
+
+ /* may have RAMROD_COMP_WAIT set only */
+ unsigned long ramrod_flags;
+
+ /* Params according to the current command */
+ union {
+ struct bnx2x_func_hw_init_params hw_init;
+ struct bnx2x_func_hw_reset_params hw_reset;
+ struct bnx2x_func_start_params start;
+ struct bnx2x_func_tx_start_params tx_start;
+ } params;
+};
+
+struct bnx2x_func_sp_drv_ops {
+ /* Init tool + runtime initialization:
+ * - Common Chip
+ * - Common (per Path)
+ * - Port
+ * - Function phases
+ */
+ int (*init_hw_cmn_chip)(struct bnx2x *bp);
+ int (*init_hw_cmn)(struct bnx2x *bp);
+ int (*init_hw_port)(struct bnx2x *bp);
+ int (*init_hw_func)(struct bnx2x *bp);
+
+ /* Reset Function HW: Common, Port, Function phases. */
+ void (*reset_hw_cmn)(struct bnx2x *bp);
+ void (*reset_hw_port)(struct bnx2x *bp);
+ void (*reset_hw_func)(struct bnx2x *bp);
+
+ /* Init/Free GUNZIP resources */
+ int (*gunzip_init)(struct bnx2x *bp);
+ void (*gunzip_end)(struct bnx2x *bp);
+
+ /* Prepare/Release FW resources */
+ int (*init_fw)(struct bnx2x *bp);
+ void (*release_fw)(struct bnx2x *bp);
+};
+
+struct bnx2x_func_sp_obj {
+ enum bnx2x_func_state state, next_state;
+
+ /* BNX2X_FUNC_CMD_XX bits. This object implements "one
+ * pending" paradigm but for debug and tracing purposes it's
+ * more convinient to have different bits for different
+ * commands.
+ */
+ unsigned long pending;
+
+ /* Buffer to use as a ramrod data and its mapping */
+ void *rdata;
+ dma_addr_t rdata_mapping;
+
+ /* this mutex validates that when pending flag is taken, the next
+ * ramrod to be sent will be the one set the pending bit
+ */
+ struct mutex one_pending_mutex;
+
+ /* Driver interface */
+ struct bnx2x_func_sp_drv_ops *drv;
+
+ /**
+ * Performs one state change according to the given parameters.
+ *
+ * @return 0 in case of success and negative value otherwise.
+ */
+ int (*send_cmd)(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params);
+
+ /**
+ * Checks that the requested state transition is legal.
+ */
+ int (*check_transition)(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o,
+ struct bnx2x_func_state_params *params);
+
+ /**
+ * Completes the pending command.
+ */
+ int (*complete_cmd)(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o,
+ enum bnx2x_func_cmd cmd);
+
+ int (*wait_comp)(struct bnx2x *bp, struct bnx2x_func_sp_obj *o,
+ enum bnx2x_func_cmd cmd);
+};
+
+/********************** Interfaces ********************************************/
+/* Queueable objects set */
+union bnx2x_qable_obj {
+ struct bnx2x_vlan_mac_obj vlan_mac;
+};
+/************** Function state update *********/
+void bnx2x_init_func_obj(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *obj,
+ void *rdata, dma_addr_t rdata_mapping,
+ struct bnx2x_func_sp_drv_ops *drv_iface);
+
+int bnx2x_func_state_change(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params);
+
+enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o);
+/******************* Queue State **************/
+void bnx2x_init_queue_obj(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj, u8 cl_id, u32 *cids,
+ u8 cid_cnt, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, unsigned long type);
+
+int bnx2x_queue_state_change(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params);
+
+/********************* VLAN-MAC ****************/
+void bnx2x_init_mac_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *mac_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *macs_pool);
+
+void bnx2x_init_vlan_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *vlans_pool);
+
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *macs_pool,
+ struct bnx2x_credit_pool_obj *vlans_pool);
+
+int bnx2x_config_vlan_mac(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p);
+
+int bnx2x_vlan_mac_move(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p,
+ struct bnx2x_vlan_mac_obj *dest_o);
+
+/********************* RX MODE ****************/
+
+void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
+ struct bnx2x_rx_mode_obj *o);
+
+/**
+ * Send and RX_MODE ramrod according to the provided parameters.
+ *
+ * @param bp
+ * @param p Command parameters
+ *
+ * @return 0 - if operation was successfull and there is no pending completions,
+ * positive number - if there are pending completions,
+ * negative - if there were errors
+ */
+int bnx2x_config_rx_mode(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p);
+
+/****************** MULTICASTS ****************/
+
+void bnx2x_init_mcast_obj(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *mcast_obj,
+ u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
+ u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
+ int state, unsigned long *pstate,
+ bnx2x_obj_type type);
+
+/**
+ * Configure multicast MACs list. May configure a new list
+ * provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up
+ * (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current
+ * configuration, continue to execute the pending commands
+ * (BNX2X_MCAST_CMD_CONT).
+ *
+ * If previous command is still pending or if number of MACs to
+ * configure is more that maximum number of MACs in one command,
+ * the current command will be enqueued to the tail of the
+ * pending commands list.
+ *
+ * @param bp
+ * @param p
+ * @param command to execute: BNX2X_MCAST_CMD_X
+ *
+ * @return 0 is operation was sucessfull and there are no pending completions,
+ * negative if there were errors, positive if there are pending
+ * completions.
+ */
+int bnx2x_config_mcast(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p, int cmd);
+
+/****************** CREDIT POOL ****************/
+void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
+ struct bnx2x_credit_pool_obj *p, u8 func_id,
+ u8 func_num);
+void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
+ struct bnx2x_credit_pool_obj *p, u8 func_id,
+ u8 func_num);
+
+
+/****************** RSS CONFIGURATION ****************/
+void bnx2x_init_rss_config_obj(struct bnx2x *bp,
+ struct bnx2x_rss_config_obj *rss_obj,
+ u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
+ void *rdata, dma_addr_t rdata_mapping,
+ int state, unsigned long *pstate,
+ bnx2x_obj_type type);
+
+/**
+ * Updates RSS configuration according to provided parameters.
+ *
+ * @param bp
+ * @param p
+ *
+ * @return 0 in case of success
+ */
+int bnx2x_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *p);
+
+/**
+ * Return the current ind_table configuration.
+ *
+ * @param bp
+ * @param ind_table buffer to fill with the current indirection
+ * table content. Should be at least
+ * T_ETH_INDIRECTION_TABLE_SIZE bytes long.
+ */
+void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
+ u8 *ind_table);
+
+#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
new file mode 100644
index 000000000000..02ac6a771bf9
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -0,0 +1,1599 @@
+/* bnx2x_stats.c: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "bnx2x_stats.h"
+#include "bnx2x_cmn.h"
+
+
+/* Statistics */
+
+/*
+ * General service functions
+ */
+
+static inline long bnx2x_hilo(u32 *hiref)
+{
+ u32 lo = *(hiref + 1);
+#if (BITS_PER_LONG == 64)
+ u32 hi = *hiref;
+
+ return HILO_U64(hi, lo);
+#else
+ return lo;
+#endif
+}
+
+/*
+ * Init service functions
+ */
+
+/* Post the next statistics ramrod. Protect it with the spin in
+ * order to ensure the strict order between statistics ramrods
+ * (each ramrod has a sequence number passed in a
+ * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
+ * sent in order).
+ */
+static void bnx2x_storm_stats_post(struct bnx2x *bp)
+{
+ if (!bp->stats_pending) {
+ int rc;
+
+ spin_lock_bh(&bp->stats_lock);
+
+ if (bp->stats_pending) {
+ spin_unlock_bh(&bp->stats_lock);
+ return;
+ }
+
+ bp->fw_stats_req->hdr.drv_stats_counter =
+ cpu_to_le16(bp->stats_counter++);
+
+ DP(NETIF_MSG_TIMER, "Sending statistics ramrod %d\n",
+ bp->fw_stats_req->hdr.drv_stats_counter);
+
+
+
+ /* send FW stats ramrod */
+ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
+ U64_HI(bp->fw_stats_req_mapping),
+ U64_LO(bp->fw_stats_req_mapping),
+ NONE_CONNECTION_TYPE);
+ if (rc == 0)
+ bp->stats_pending = 1;
+
+ spin_unlock_bh(&bp->stats_lock);
+ }
+}
+
+static void bnx2x_hw_stats_post(struct bnx2x *bp)
+{
+ struct dmae_command *dmae = &bp->stats_dmae;
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ *stats_comp = DMAE_COMP_VAL;
+ if (CHIP_REV_IS_SLOW(bp))
+ return;
+
+ /* loader */
+ if (bp->executer_idx) {
+ int loader_idx = PMF_DMAE_C(bp);
+ u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+ true, DMAE_COMP_GRC);
+ opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
+
+ memset(dmae, 0, sizeof(struct dmae_command));
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
+ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
+ dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
+ sizeof(struct dmae_command) *
+ (loader_idx + 1)) >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = sizeof(struct dmae_command) >> 2;
+ if (CHIP_IS_E1(bp))
+ dmae->len--;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ *stats_comp = 0;
+ bnx2x_post_dmae(bp, dmae, loader_idx);
+
+ } else if (bp->func_stx) {
+ *stats_comp = 0;
+ bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
+ }
+}
+
+static int bnx2x_stats_comp(struct bnx2x *bp)
+{
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+ int cnt = 10;
+
+ might_sleep();
+ while (*stats_comp != DMAE_COMP_VAL) {
+ if (!cnt) {
+ BNX2X_ERR("timeout waiting for stats finished\n");
+ break;
+ }
+ cnt--;
+ usleep_range(1000, 1000);
+ }
+ return 1;
+}
+
+/*
+ * Statistics service functions
+ */
+
+static void bnx2x_stats_pmf_update(struct bnx2x *bp)
+{
+ struct dmae_command *dmae;
+ u32 opcode;
+ int loader_idx = PMF_DMAE_C(bp);
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ /* sanity */
+ if (!IS_MF(bp) || !bp->port.pmf || !bp->port.port_stx) {
+ BNX2X_ERR("BUG!\n");
+ return;
+ }
+
+ bp->executer_idx = 0;
+
+ opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
+ dmae->src_addr_lo = bp->port.port_stx >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+ dmae->len = DMAE_LEN32_RD_MAX;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
+ dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
+ DMAE_LEN32_RD_MAX * 4);
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
+ DMAE_LEN32_RD_MAX * 4);
+ dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+ bnx2x_hw_stats_post(bp);
+ bnx2x_stats_comp(bp);
+}
+
+static void bnx2x_port_stats_init(struct bnx2x *bp)
+{
+ struct dmae_command *dmae;
+ int port = BP_PORT(bp);
+ u32 opcode;
+ int loader_idx = PMF_DMAE_C(bp);
+ u32 mac_addr;
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ /* sanity */
+ if (!bp->link_vars.link_up || !bp->port.pmf) {
+ BNX2X_ERR("BUG!\n");
+ return;
+ }
+
+ bp->executer_idx = 0;
+
+ /* MCP */
+ opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+ true, DMAE_COMP_GRC);
+
+ if (bp->port.port_stx) {
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+ dmae->dst_addr_lo = bp->port.port_stx >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = sizeof(struct host_port_stats) >> 2;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ }
+
+ if (bp->func_stx) {
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
+ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
+ dmae->dst_addr_lo = bp->func_stx >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = sizeof(struct host_func_stats) >> 2;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ }
+
+ /* MAC */
+ opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
+ true, DMAE_COMP_GRC);
+
+ /* EMAC is special */
+ if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
+ mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
+
+ /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = (mac_addr +
+ EMAC_REG_EMAC_RX_STAT_AC) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
+ dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ /* EMAC_REG_EMAC_RX_STAT_AC_28 */
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = (mac_addr +
+ EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+ offsetof(struct emac_stats, rx_stat_falsecarriererrors));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+ offsetof(struct emac_stats, rx_stat_falsecarriererrors));
+ dmae->len = 1;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = (mac_addr +
+ EMAC_REG_EMAC_TX_STAT_AC) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+ offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+ offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
+ dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ } else {
+ u32 tx_src_addr_lo, rx_src_addr_lo;
+ u16 rx_len, tx_len;
+
+ /* configure the params according to MAC type */
+ switch (bp->link_vars.mac_type) {
+ case MAC_TYPE_BMAC:
+ mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM);
+
+ /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
+ BIGMAC_REGISTER_TX_STAT_GTBYT */
+ if (CHIP_IS_E1x(bp)) {
+ tx_src_addr_lo = (mac_addr +
+ BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+ tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
+ BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+ rx_src_addr_lo = (mac_addr +
+ BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+ rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
+ BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+ } else {
+ tx_src_addr_lo = (mac_addr +
+ BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
+ tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
+ BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
+ rx_src_addr_lo = (mac_addr +
+ BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
+ rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
+ BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
+ }
+ break;
+
+ case MAC_TYPE_UMAC: /* handled by MSTAT */
+ case MAC_TYPE_XMAC: /* handled by MSTAT */
+ default:
+ mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
+ tx_src_addr_lo = (mac_addr +
+ MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
+ rx_src_addr_lo = (mac_addr +
+ MSTAT_REG_RX_STAT_GR64_LO) >> 2;
+ tx_len = sizeof(bp->slowpath->
+ mac_stats.mstat_stats.stats_tx) >> 2;
+ rx_len = sizeof(bp->slowpath->
+ mac_stats.mstat_stats.stats_rx) >> 2;
+ break;
+ }
+
+ /* TX stats */
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = tx_src_addr_lo;
+ dmae->src_addr_hi = 0;
+ dmae->len = tx_len;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ /* RX stats */
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_hi = 0;
+ dmae->src_addr_lo = rx_src_addr_lo;
+ dmae->dst_addr_lo =
+ U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
+ dmae->dst_addr_hi =
+ U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
+ dmae->len = rx_len;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ }
+
+ /* NIG */
+ if (!CHIP_IS_E3(bp)) {
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
+ NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
+ offsetof(struct nig_stats, egress_mac_pkt0_lo));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
+ offsetof(struct nig_stats, egress_mac_pkt0_lo));
+ dmae->len = (2*sizeof(u32)) >> 2;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
+ NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
+ offsetof(struct nig_stats, egress_mac_pkt1_lo));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
+ offsetof(struct nig_stats, egress_mac_pkt1_lo));
+ dmae->len = (2*sizeof(u32)) >> 2;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ }
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
+ true, DMAE_COMP_PCI);
+ dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
+ NIG_REG_STAT0_BRB_DISCARD) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
+ dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
+
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+}
+
+static void bnx2x_func_stats_init(struct bnx2x *bp)
+{
+ struct dmae_command *dmae = &bp->stats_dmae;
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ /* sanity */
+ if (!bp->func_stx) {
+ BNX2X_ERR("BUG!\n");
+ return;
+ }
+
+ bp->executer_idx = 0;
+ memset(dmae, 0, sizeof(struct dmae_command));
+
+ dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+ true, DMAE_COMP_PCI);
+ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
+ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
+ dmae->dst_addr_lo = bp->func_stx >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = sizeof(struct host_func_stats) >> 2;
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+}
+
+static void bnx2x_stats_start(struct bnx2x *bp)
+{
+ if (bp->port.pmf)
+ bnx2x_port_stats_init(bp);
+
+ else if (bp->func_stx)
+ bnx2x_func_stats_init(bp);
+
+ bnx2x_hw_stats_post(bp);
+ bnx2x_storm_stats_post(bp);
+}
+
+static void bnx2x_stats_pmf_start(struct bnx2x *bp)
+{
+ bnx2x_stats_comp(bp);
+ bnx2x_stats_pmf_update(bp);
+ bnx2x_stats_start(bp);
+}
+
+static void bnx2x_stats_restart(struct bnx2x *bp)
+{
+ bnx2x_stats_comp(bp);
+ bnx2x_stats_start(bp);
+}
+
+static void bnx2x_bmac_stats_update(struct bnx2x *bp)
+{
+ struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct {
+ u32 lo;
+ u32 hi;
+ } diff;
+
+ if (CHIP_IS_E1x(bp)) {
+ struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
+
+ /* the macros below will use "bmac1_stats" type */
+ UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
+ UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
+ UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
+ UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
+ UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
+ UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
+ UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
+
+ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
+ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
+ UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
+ UPDATE_STAT64(tx_stat_gt127,
+ tx_stat_etherstatspkts65octetsto127octets);
+ UPDATE_STAT64(tx_stat_gt255,
+ tx_stat_etherstatspkts128octetsto255octets);
+ UPDATE_STAT64(tx_stat_gt511,
+ tx_stat_etherstatspkts256octetsto511octets);
+ UPDATE_STAT64(tx_stat_gt1023,
+ tx_stat_etherstatspkts512octetsto1023octets);
+ UPDATE_STAT64(tx_stat_gt1518,
+ tx_stat_etherstatspkts1024octetsto1522octets);
+ UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
+ UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
+ UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
+ UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
+ UPDATE_STAT64(tx_stat_gterr,
+ tx_stat_dot3statsinternalmactransmiterrors);
+ UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
+
+ } else {
+ struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
+
+ /* the macros below will use "bmac2_stats" type */
+ UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
+ UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
+ UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
+ UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
+ UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
+ UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
+ UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
+ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
+ UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
+ UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
+ UPDATE_STAT64(tx_stat_gt127,
+ tx_stat_etherstatspkts65octetsto127octets);
+ UPDATE_STAT64(tx_stat_gt255,
+ tx_stat_etherstatspkts128octetsto255octets);
+ UPDATE_STAT64(tx_stat_gt511,
+ tx_stat_etherstatspkts256octetsto511octets);
+ UPDATE_STAT64(tx_stat_gt1023,
+ tx_stat_etherstatspkts512octetsto1023octets);
+ UPDATE_STAT64(tx_stat_gt1518,
+ tx_stat_etherstatspkts1024octetsto1522octets);
+ UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
+ UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
+ UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
+ UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
+ UPDATE_STAT64(tx_stat_gterr,
+ tx_stat_dot3statsinternalmactransmiterrors);
+ UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
+ }
+
+ estats->pause_frames_received_hi =
+ pstats->mac_stx[1].rx_stat_mac_xpf_hi;
+ estats->pause_frames_received_lo =
+ pstats->mac_stx[1].rx_stat_mac_xpf_lo;
+
+ estats->pause_frames_sent_hi =
+ pstats->mac_stx[1].tx_stat_outxoffsent_hi;
+ estats->pause_frames_sent_lo =
+ pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+}
+
+static void bnx2x_mstat_stats_update(struct bnx2x *bp)
+{
+ struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+
+ struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
+
+ ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
+ ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
+ ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
+ ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
+ ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
+ ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
+ ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
+ ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
+ ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
+ ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
+
+
+ ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
+ ADD_STAT64(stats_tx.tx_gt127,
+ tx_stat_etherstatspkts65octetsto127octets);
+ ADD_STAT64(stats_tx.tx_gt255,
+ tx_stat_etherstatspkts128octetsto255octets);
+ ADD_STAT64(stats_tx.tx_gt511,
+ tx_stat_etherstatspkts256octetsto511octets);
+ ADD_STAT64(stats_tx.tx_gt1023,
+ tx_stat_etherstatspkts512octetsto1023octets);
+ ADD_STAT64(stats_tx.tx_gt1518,
+ tx_stat_etherstatspkts1024octetsto1522octets);
+ ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
+
+ ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
+ ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
+ ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
+
+ ADD_STAT64(stats_tx.tx_gterr,
+ tx_stat_dot3statsinternalmactransmiterrors);
+ ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
+
+ ADD_64(estats->etherstatspkts1024octetsto1522octets_hi,
+ new->stats_tx.tx_gt1518_hi,
+ estats->etherstatspkts1024octetsto1522octets_lo,
+ new->stats_tx.tx_gt1518_lo);
+
+ ADD_64(estats->etherstatspktsover1522octets_hi,
+ new->stats_tx.tx_gt2047_hi,
+ estats->etherstatspktsover1522octets_lo,
+ new->stats_tx.tx_gt2047_lo);
+
+ ADD_64(estats->etherstatspktsover1522octets_hi,
+ new->stats_tx.tx_gt4095_hi,
+ estats->etherstatspktsover1522octets_lo,
+ new->stats_tx.tx_gt4095_lo);
+
+ ADD_64(estats->etherstatspktsover1522octets_hi,
+ new->stats_tx.tx_gt9216_hi,
+ estats->etherstatspktsover1522octets_lo,
+ new->stats_tx.tx_gt9216_lo);
+
+
+ ADD_64(estats->etherstatspktsover1522octets_hi,
+ new->stats_tx.tx_gt16383_hi,
+ estats->etherstatspktsover1522octets_lo,
+ new->stats_tx.tx_gt16383_lo);
+
+ estats->pause_frames_received_hi =
+ pstats->mac_stx[1].rx_stat_mac_xpf_hi;
+ estats->pause_frames_received_lo =
+ pstats->mac_stx[1].rx_stat_mac_xpf_lo;
+
+ estats->pause_frames_sent_hi =
+ pstats->mac_stx[1].tx_stat_outxoffsent_hi;
+ estats->pause_frames_sent_lo =
+ pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+}
+
+static void bnx2x_emac_stats_update(struct bnx2x *bp)
+{
+ struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
+ struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+
+ UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
+ UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
+ UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
+ UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
+ UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
+ UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
+ UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
+ UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
+ UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
+ UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
+ UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
+ UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
+ UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
+ UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
+ UPDATE_EXTEND_STAT(tx_stat_outxonsent);
+ UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
+ UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
+ UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
+ UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
+
+ estats->pause_frames_received_hi =
+ pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
+ estats->pause_frames_received_lo =
+ pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
+ ADD_64(estats->pause_frames_received_hi,
+ pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
+ estats->pause_frames_received_lo,
+ pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
+
+ estats->pause_frames_sent_hi =
+ pstats->mac_stx[1].tx_stat_outxonsent_hi;
+ estats->pause_frames_sent_lo =
+ pstats->mac_stx[1].tx_stat_outxonsent_lo;
+ ADD_64(estats->pause_frames_sent_hi,
+ pstats->mac_stx[1].tx_stat_outxoffsent_hi,
+ estats->pause_frames_sent_lo,
+ pstats->mac_stx[1].tx_stat_outxoffsent_lo);
+}
+
+static int bnx2x_hw_stats_update(struct bnx2x *bp)
+{
+ struct nig_stats *new = bnx2x_sp(bp, nig_stats);
+ struct nig_stats *old = &(bp->port.old_nig_stats);
+ struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct {
+ u32 lo;
+ u32 hi;
+ } diff;
+
+ switch (bp->link_vars.mac_type) {
+ case MAC_TYPE_BMAC:
+ bnx2x_bmac_stats_update(bp);
+ break;
+
+ case MAC_TYPE_EMAC:
+ bnx2x_emac_stats_update(bp);
+ break;
+
+ case MAC_TYPE_UMAC:
+ case MAC_TYPE_XMAC:
+ bnx2x_mstat_stats_update(bp);
+ break;
+
+ case MAC_TYPE_NONE: /* unreached */
+ DP(BNX2X_MSG_STATS,
+ "stats updated by DMAE but no MAC active\n");
+ return -1;
+
+ default: /* unreached */
+ BNX2X_ERR("Unknown MAC type\n");
+ }
+
+ ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
+ new->brb_discard - old->brb_discard);
+ ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
+ new->brb_truncate - old->brb_truncate);
+
+ if (!CHIP_IS_E3(bp)) {
+ UPDATE_STAT64_NIG(egress_mac_pkt0,
+ etherstatspkts1024octetsto1522octets);
+ UPDATE_STAT64_NIG(egress_mac_pkt1,
+ etherstatspktsover1522octets);
+ }
+
+ memcpy(old, new, sizeof(struct nig_stats));
+
+ memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
+ sizeof(struct mac_stx));
+ estats->brb_drop_hi = pstats->brb_drop_hi;
+ estats->brb_drop_lo = pstats->brb_drop_lo;
+
+ pstats->host_port_stats_start = ++pstats->host_port_stats_end;
+
+ if (!BP_NOMCP(bp)) {
+ u32 nig_timer_max =
+ SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
+ if (nig_timer_max != estats->nig_timer_max) {
+ estats->nig_timer_max = nig_timer_max;
+ BNX2X_ERR("NIG timer max (%u)\n",
+ estats->nig_timer_max);
+ }
+ }
+
+ return 0;
+}
+
+static int bnx2x_storm_stats_update(struct bnx2x *bp)
+{
+ struct tstorm_per_port_stats *tport =
+ &bp->fw_stats_data->port.tstorm_port_statistics;
+ struct tstorm_per_pf_stats *tfunc =
+ &bp->fw_stats_data->pf.tstorm_pf_statistics;
+ struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
+ int i;
+ u16 cur_stats_counter;
+
+ /* Make sure we use the value of the counter
+ * used for sending the last stats ramrod.
+ */
+ spin_lock_bh(&bp->stats_lock);
+ cur_stats_counter = bp->stats_counter - 1;
+ spin_unlock_bh(&bp->stats_lock);
+
+ /* are storm stats valid? */
+ if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
+ DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
+ " xstorm counter (0x%x) != stats_counter (0x%x)\n",
+ le16_to_cpu(counters->xstats_counter), bp->stats_counter);
+ return -EAGAIN;
+ }
+
+ if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
+ DP(BNX2X_MSG_STATS, "stats not updated by ustorm"
+ " ustorm counter (0x%x) != stats_counter (0x%x)\n",
+ le16_to_cpu(counters->ustats_counter), bp->stats_counter);
+ return -EAGAIN;
+ }
+
+ if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
+ DP(BNX2X_MSG_STATS, "stats not updated by cstorm"
+ " cstorm counter (0x%x) != stats_counter (0x%x)\n",
+ le16_to_cpu(counters->cstats_counter), bp->stats_counter);
+ return -EAGAIN;
+ }
+
+ if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
+ DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
+ " tstorm counter (0x%x) != stats_counter (0x%x)\n",
+ le16_to_cpu(counters->tstats_counter), bp->stats_counter);
+ return -EAGAIN;
+ }
+
+ memcpy(&(fstats->total_bytes_received_hi),
+ &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
+ sizeof(struct host_func_stats) - 2*sizeof(u32));
+ estats->error_bytes_received_hi = 0;
+ estats->error_bytes_received_lo = 0;
+ estats->etherstatsoverrsizepkts_hi = 0;
+ estats->etherstatsoverrsizepkts_lo = 0;
+ estats->no_buff_discard_hi = 0;
+ estats->no_buff_discard_lo = 0;
+ estats->total_tpa_aggregations_hi = 0;
+ estats->total_tpa_aggregations_lo = 0;
+ estats->total_tpa_aggregated_frames_hi = 0;
+ estats->total_tpa_aggregated_frames_lo = 0;
+ estats->total_tpa_bytes_hi = 0;
+ estats->total_tpa_bytes_lo = 0;
+
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ struct tstorm_per_queue_stats *tclient =
+ &bp->fw_stats_data->queue_stats[i].
+ tstorm_queue_statistics;
+ struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
+ struct ustorm_per_queue_stats *uclient =
+ &bp->fw_stats_data->queue_stats[i].
+ ustorm_queue_statistics;
+ struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
+ struct xstorm_per_queue_stats *xclient =
+ &bp->fw_stats_data->queue_stats[i].
+ xstorm_queue_statistics;
+ struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
+ struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+ u32 diff;
+
+ DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, "
+ "bcast_sent 0x%x mcast_sent 0x%x\n",
+ i, xclient->ucast_pkts_sent,
+ xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
+
+ DP(BNX2X_MSG_STATS, "---------------\n");
+
+ qstats->total_broadcast_bytes_received_hi =
+ le32_to_cpu(tclient->rcv_bcast_bytes.hi);
+ qstats->total_broadcast_bytes_received_lo =
+ le32_to_cpu(tclient->rcv_bcast_bytes.lo);
+
+ qstats->total_multicast_bytes_received_hi =
+ le32_to_cpu(tclient->rcv_mcast_bytes.hi);
+ qstats->total_multicast_bytes_received_lo =
+ le32_to_cpu(tclient->rcv_mcast_bytes.lo);
+
+ qstats->total_unicast_bytes_received_hi =
+ le32_to_cpu(tclient->rcv_ucast_bytes.hi);
+ qstats->total_unicast_bytes_received_lo =
+ le32_to_cpu(tclient->rcv_ucast_bytes.lo);
+
+ /*
+ * sum to total_bytes_received all
+ * unicast/multicast/broadcast
+ */
+ qstats->total_bytes_received_hi =
+ qstats->total_broadcast_bytes_received_hi;
+ qstats->total_bytes_received_lo =
+ qstats->total_broadcast_bytes_received_lo;
+
+ ADD_64(qstats->total_bytes_received_hi,
+ qstats->total_multicast_bytes_received_hi,
+ qstats->total_bytes_received_lo,
+ qstats->total_multicast_bytes_received_lo);
+
+ ADD_64(qstats->total_bytes_received_hi,
+ qstats->total_unicast_bytes_received_hi,
+ qstats->total_bytes_received_lo,
+ qstats->total_unicast_bytes_received_lo);
+
+ qstats->valid_bytes_received_hi =
+ qstats->total_bytes_received_hi;
+ qstats->valid_bytes_received_lo =
+ qstats->total_bytes_received_lo;
+
+
+ UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
+ total_unicast_packets_received);
+ UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
+ total_multicast_packets_received);
+ UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
+ total_broadcast_packets_received);
+ UPDATE_EXTEND_TSTAT(pkts_too_big_discard,
+ etherstatsoverrsizepkts);
+ UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
+
+ SUB_EXTEND_USTAT(ucast_no_buff_pkts,
+ total_unicast_packets_received);
+ SUB_EXTEND_USTAT(mcast_no_buff_pkts,
+ total_multicast_packets_received);
+ SUB_EXTEND_USTAT(bcast_no_buff_pkts,
+ total_broadcast_packets_received);
+ UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
+ UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
+ UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
+
+ qstats->total_broadcast_bytes_transmitted_hi =
+ le32_to_cpu(xclient->bcast_bytes_sent.hi);
+ qstats->total_broadcast_bytes_transmitted_lo =
+ le32_to_cpu(xclient->bcast_bytes_sent.lo);
+
+ qstats->total_multicast_bytes_transmitted_hi =
+ le32_to_cpu(xclient->mcast_bytes_sent.hi);
+ qstats->total_multicast_bytes_transmitted_lo =
+ le32_to_cpu(xclient->mcast_bytes_sent.lo);
+
+ qstats->total_unicast_bytes_transmitted_hi =
+ le32_to_cpu(xclient->ucast_bytes_sent.hi);
+ qstats->total_unicast_bytes_transmitted_lo =
+ le32_to_cpu(xclient->ucast_bytes_sent.lo);
+ /*
+ * sum to total_bytes_transmitted all
+ * unicast/multicast/broadcast
+ */
+ qstats->total_bytes_transmitted_hi =
+ qstats->total_unicast_bytes_transmitted_hi;
+ qstats->total_bytes_transmitted_lo =
+ qstats->total_unicast_bytes_transmitted_lo;
+
+ ADD_64(qstats->total_bytes_transmitted_hi,
+ qstats->total_broadcast_bytes_transmitted_hi,
+ qstats->total_bytes_transmitted_lo,
+ qstats->total_broadcast_bytes_transmitted_lo);
+
+ ADD_64(qstats->total_bytes_transmitted_hi,
+ qstats->total_multicast_bytes_transmitted_hi,
+ qstats->total_bytes_transmitted_lo,
+ qstats->total_multicast_bytes_transmitted_lo);
+
+ UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
+ total_unicast_packets_transmitted);
+ UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
+ total_multicast_packets_transmitted);
+ UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
+ total_broadcast_packets_transmitted);
+
+ UPDATE_EXTEND_TSTAT(checksum_discard,
+ total_packets_received_checksum_discarded);
+ UPDATE_EXTEND_TSTAT(ttl0_discard,
+ total_packets_received_ttl0_discarded);
+
+ UPDATE_EXTEND_XSTAT(error_drop_pkts,
+ total_transmitted_dropped_packets_error);
+
+ /* TPA aggregations completed */
+ UPDATE_EXTEND_USTAT(coalesced_events, total_tpa_aggregations);
+ /* Number of network frames aggregated by TPA */
+ UPDATE_EXTEND_USTAT(coalesced_pkts,
+ total_tpa_aggregated_frames);
+ /* Total number of bytes in completed TPA aggregations */
+ qstats->total_tpa_bytes_lo =
+ le32_to_cpu(uclient->coalesced_bytes.lo);
+ qstats->total_tpa_bytes_hi =
+ le32_to_cpu(uclient->coalesced_bytes.hi);
+
+ /* TPA stats per-function */
+ ADD_64(estats->total_tpa_aggregations_hi,
+ qstats->total_tpa_aggregations_hi,
+ estats->total_tpa_aggregations_lo,
+ qstats->total_tpa_aggregations_lo);
+ ADD_64(estats->total_tpa_aggregated_frames_hi,
+ qstats->total_tpa_aggregated_frames_hi,
+ estats->total_tpa_aggregated_frames_lo,
+ qstats->total_tpa_aggregated_frames_lo);
+ ADD_64(estats->total_tpa_bytes_hi,
+ qstats->total_tpa_bytes_hi,
+ estats->total_tpa_bytes_lo,
+ qstats->total_tpa_bytes_lo);
+
+ ADD_64(fstats->total_bytes_received_hi,
+ qstats->total_bytes_received_hi,
+ fstats->total_bytes_received_lo,
+ qstats->total_bytes_received_lo);
+ ADD_64(fstats->total_bytes_transmitted_hi,
+ qstats->total_bytes_transmitted_hi,
+ fstats->total_bytes_transmitted_lo,
+ qstats->total_bytes_transmitted_lo);
+ ADD_64(fstats->total_unicast_packets_received_hi,
+ qstats->total_unicast_packets_received_hi,
+ fstats->total_unicast_packets_received_lo,
+ qstats->total_unicast_packets_received_lo);
+ ADD_64(fstats->total_multicast_packets_received_hi,
+ qstats->total_multicast_packets_received_hi,
+ fstats->total_multicast_packets_received_lo,
+ qstats->total_multicast_packets_received_lo);
+ ADD_64(fstats->total_broadcast_packets_received_hi,
+ qstats->total_broadcast_packets_received_hi,
+ fstats->total_broadcast_packets_received_lo,
+ qstats->total_broadcast_packets_received_lo);
+ ADD_64(fstats->total_unicast_packets_transmitted_hi,
+ qstats->total_unicast_packets_transmitted_hi,
+ fstats->total_unicast_packets_transmitted_lo,
+ qstats->total_unicast_packets_transmitted_lo);
+ ADD_64(fstats->total_multicast_packets_transmitted_hi,
+ qstats->total_multicast_packets_transmitted_hi,
+ fstats->total_multicast_packets_transmitted_lo,
+ qstats->total_multicast_packets_transmitted_lo);
+ ADD_64(fstats->total_broadcast_packets_transmitted_hi,
+ qstats->total_broadcast_packets_transmitted_hi,
+ fstats->total_broadcast_packets_transmitted_lo,
+ qstats->total_broadcast_packets_transmitted_lo);
+ ADD_64(fstats->valid_bytes_received_hi,
+ qstats->valid_bytes_received_hi,
+ fstats->valid_bytes_received_lo,
+ qstats->valid_bytes_received_lo);
+
+ ADD_64(estats->etherstatsoverrsizepkts_hi,
+ qstats->etherstatsoverrsizepkts_hi,
+ estats->etherstatsoverrsizepkts_lo,
+ qstats->etherstatsoverrsizepkts_lo);
+ ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
+ estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
+ }
+
+ ADD_64(fstats->total_bytes_received_hi,
+ estats->rx_stat_ifhcinbadoctets_hi,
+ fstats->total_bytes_received_lo,
+ estats->rx_stat_ifhcinbadoctets_lo);
+
+ ADD_64(fstats->total_bytes_received_hi,
+ tfunc->rcv_error_bytes.hi,
+ fstats->total_bytes_received_lo,
+ tfunc->rcv_error_bytes.lo);
+
+ memcpy(estats, &(fstats->total_bytes_received_hi),
+ sizeof(struct host_func_stats) - 2*sizeof(u32));
+
+ ADD_64(estats->error_bytes_received_hi,
+ tfunc->rcv_error_bytes.hi,
+ estats->error_bytes_received_lo,
+ tfunc->rcv_error_bytes.lo);
+
+ ADD_64(estats->etherstatsoverrsizepkts_hi,
+ estats->rx_stat_dot3statsframestoolong_hi,
+ estats->etherstatsoverrsizepkts_lo,
+ estats->rx_stat_dot3statsframestoolong_lo);
+ ADD_64(estats->error_bytes_received_hi,
+ estats->rx_stat_ifhcinbadoctets_hi,
+ estats->error_bytes_received_lo,
+ estats->rx_stat_ifhcinbadoctets_lo);
+
+ if (bp->port.pmf) {
+ estats->mac_filter_discard =
+ le32_to_cpu(tport->mac_filter_discard);
+ estats->mf_tag_discard =
+ le32_to_cpu(tport->mf_tag_discard);
+ estats->brb_truncate_discard =
+ le32_to_cpu(tport->brb_truncate_discard);
+ estats->mac_discard = le32_to_cpu(tport->mac_discard);
+ }
+
+ fstats->host_func_stats_start = ++fstats->host_func_stats_end;
+
+ bp->stats_pending = 0;
+
+ return 0;
+}
+
+static void bnx2x_net_stats_update(struct bnx2x *bp)
+{
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct net_device_stats *nstats = &bp->dev->stats;
+ unsigned long tmp;
+ int i;
+
+ nstats->rx_packets =
+ bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
+ bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
+ bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
+
+ nstats->tx_packets =
+ bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
+ bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
+ bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
+
+ nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
+
+ nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
+
+ tmp = estats->mac_discard;
+ for_each_rx_queue(bp, i)
+ tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
+ nstats->rx_dropped = tmp;
+
+ nstats->tx_dropped = 0;
+
+ nstats->multicast =
+ bnx2x_hilo(&estats->total_multicast_packets_received_hi);
+
+ nstats->collisions =
+ bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
+
+ nstats->rx_length_errors =
+ bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
+ bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
+ nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
+ bnx2x_hilo(&estats->brb_truncate_hi);
+ nstats->rx_crc_errors =
+ bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
+ nstats->rx_frame_errors =
+ bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
+ nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
+ nstats->rx_missed_errors = 0;
+
+ nstats->rx_errors = nstats->rx_length_errors +
+ nstats->rx_over_errors +
+ nstats->rx_crc_errors +
+ nstats->rx_frame_errors +
+ nstats->rx_fifo_errors +
+ nstats->rx_missed_errors;
+
+ nstats->tx_aborted_errors =
+ bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
+ bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
+ nstats->tx_carrier_errors =
+ bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
+ nstats->tx_fifo_errors = 0;
+ nstats->tx_heartbeat_errors = 0;
+ nstats->tx_window_errors = 0;
+
+ nstats->tx_errors = nstats->tx_aborted_errors +
+ nstats->tx_carrier_errors +
+ bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
+}
+
+static void bnx2x_drv_stats_update(struct bnx2x *bp)
+{
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ int i;
+
+ estats->driver_xoff = 0;
+ estats->rx_err_discard_pkt = 0;
+ estats->rx_skb_alloc_failed = 0;
+ estats->hw_csum_err = 0;
+ for_each_queue(bp, i) {
+ struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
+
+ estats->driver_xoff += qstats->driver_xoff;
+ estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
+ estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
+ estats->hw_csum_err += qstats->hw_csum_err;
+ }
+}
+
+static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
+{
+ u32 val;
+
+ if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
+ val = SHMEM2_RD(bp, edebug_driver_if[1]);
+
+ if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
+ return true;
+ }
+
+ return false;
+}
+
+static void bnx2x_stats_update(struct bnx2x *bp)
+{
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ if (bnx2x_edebug_stats_stopped(bp))
+ return;
+
+ if (*stats_comp != DMAE_COMP_VAL)
+ return;
+
+ if (bp->port.pmf)
+ bnx2x_hw_stats_update(bp);
+
+ if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
+ BNX2X_ERR("storm stats were not updated for 3 times\n");
+ bnx2x_panic();
+ return;
+ }
+
+ bnx2x_net_stats_update(bp);
+ bnx2x_drv_stats_update(bp);
+
+ if (netif_msg_timer(bp)) {
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ int i, cos;
+
+ netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n",
+ estats->brb_drop_lo, estats->brb_truncate_lo);
+
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+
+ pr_debug("%s: rx usage(%4u) *rx_cons_sb(%u) rx pkt(%lu) rx calls(%lu %lu)\n",
+ fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
+ fp->rx_comp_cons),
+ le16_to_cpu(*fp->rx_cons_sb),
+ bnx2x_hilo(&qstats->
+ total_unicast_packets_received_hi),
+ fp->rx_calls, fp->rx_pkt);
+ }
+
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ struct bnx2x_fp_txdata *txdata;
+ struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+ struct netdev_queue *txq;
+
+ pr_debug("%s: tx pkt(%lu) (Xoff events %u)",
+ fp->name,
+ bnx2x_hilo(
+ &qstats->total_unicast_packets_transmitted_hi),
+ qstats->driver_xoff);
+
+ for_each_cos_in_tx_queue(fp, cos) {
+ txdata = &fp->txdata[cos];
+ txq = netdev_get_tx_queue(bp->dev,
+ FP_COS_TO_TXQ(fp, cos));
+
+ pr_debug("%d: tx avail(%4u) *tx_cons_sb(%u) tx calls (%lu) %s\n",
+ cos,
+ bnx2x_tx_avail(bp, txdata),
+ le16_to_cpu(*txdata->tx_cons_sb),
+ txdata->tx_pkt,
+ (netif_tx_queue_stopped(txq) ?
+ "Xoff" : "Xon")
+ );
+ }
+ }
+ }
+
+ bnx2x_hw_stats_post(bp);
+ bnx2x_storm_stats_post(bp);
+}
+
+static void bnx2x_port_stats_stop(struct bnx2x *bp)
+{
+ struct dmae_command *dmae;
+ u32 opcode;
+ int loader_idx = PMF_DMAE_C(bp);
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ bp->executer_idx = 0;
+
+ opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
+
+ if (bp->port.port_stx) {
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ if (bp->func_stx)
+ dmae->opcode = bnx2x_dmae_opcode_add_comp(
+ opcode, DMAE_COMP_GRC);
+ else
+ dmae->opcode = bnx2x_dmae_opcode_add_comp(
+ opcode, DMAE_COMP_PCI);
+
+ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+ dmae->dst_addr_lo = bp->port.port_stx >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = sizeof(struct host_port_stats) >> 2;
+ if (bp->func_stx) {
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ } else {
+ dmae->comp_addr_lo =
+ U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_addr_hi =
+ U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+ }
+ }
+
+ if (bp->func_stx) {
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode =
+ bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
+ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
+ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
+ dmae->dst_addr_lo = bp->func_stx >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = sizeof(struct host_func_stats) >> 2;
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+ }
+}
+
+static void bnx2x_stats_stop(struct bnx2x *bp)
+{
+ int update = 0;
+
+ bnx2x_stats_comp(bp);
+
+ if (bp->port.pmf)
+ update = (bnx2x_hw_stats_update(bp) == 0);
+
+ update |= (bnx2x_storm_stats_update(bp) == 0);
+
+ if (update) {
+ bnx2x_net_stats_update(bp);
+
+ if (bp->port.pmf)
+ bnx2x_port_stats_stop(bp);
+
+ bnx2x_hw_stats_post(bp);
+ bnx2x_stats_comp(bp);
+ }
+}
+
+static void bnx2x_stats_do_nothing(struct bnx2x *bp)
+{
+}
+
+static const struct {
+ void (*action)(struct bnx2x *bp);
+ enum bnx2x_stats_state next_state;
+} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
+/* state event */
+{
+/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
+/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
+/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
+/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
+},
+{
+/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
+/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
+/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
+/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
+}
+};
+
+void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
+{
+ enum bnx2x_stats_state state;
+ if (unlikely(bp->panic))
+ return;
+ bnx2x_stats_stm[bp->stats_state][event].action(bp);
+ spin_lock_bh(&bp->stats_lock);
+ state = bp->stats_state;
+ bp->stats_state = bnx2x_stats_stm[state][event].next_state;
+ spin_unlock_bh(&bp->stats_lock);
+
+ if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
+ DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
+ state, event, bp->stats_state);
+}
+
+static void bnx2x_port_stats_base_init(struct bnx2x *bp)
+{
+ struct dmae_command *dmae;
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ /* sanity */
+ if (!bp->port.pmf || !bp->port.port_stx) {
+ BNX2X_ERR("BUG!\n");
+ return;
+ }
+
+ bp->executer_idx = 0;
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+ true, DMAE_COMP_PCI);
+ dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+ dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+ dmae->dst_addr_lo = bp->port.port_stx >> 2;
+ dmae->dst_addr_hi = 0;
+ dmae->len = sizeof(struct host_port_stats) >> 2;
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+ bnx2x_hw_stats_post(bp);
+ bnx2x_stats_comp(bp);
+}
+
+static void bnx2x_func_stats_base_init(struct bnx2x *bp)
+{
+ int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX;
+ u32 func_stx;
+
+ /* sanity */
+ if (!bp->port.pmf || !bp->func_stx) {
+ BNX2X_ERR("BUG!\n");
+ return;
+ }
+
+ /* save our func_stx */
+ func_stx = bp->func_stx;
+
+ for (vn = VN_0; vn < vn_max; vn++) {
+ int mb_idx = BP_FW_MB_IDX_VN(bp, vn);
+
+ bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
+ bnx2x_func_stats_init(bp);
+ bnx2x_hw_stats_post(bp);
+ bnx2x_stats_comp(bp);
+ }
+
+ /* restore our func_stx */
+ bp->func_stx = func_stx;
+}
+
+static void bnx2x_func_stats_base_update(struct bnx2x *bp)
+{
+ struct dmae_command *dmae = &bp->stats_dmae;
+ u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+ /* sanity */
+ if (!bp->func_stx) {
+ BNX2X_ERR("BUG!\n");
+ return;
+ }
+
+ bp->executer_idx = 0;
+ memset(dmae, 0, sizeof(struct dmae_command));
+
+ dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
+ true, DMAE_COMP_PCI);
+ dmae->src_addr_lo = bp->func_stx >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
+ dmae->len = sizeof(struct host_func_stats) >> 2;
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+
+ *stats_comp = 0;
+ bnx2x_hw_stats_post(bp);
+ bnx2x_stats_comp(bp);
+}
+
+/**
+ * This function will prepare the statistics ramrod data the way
+ * we will only have to increment the statistics counter and
+ * send the ramrod each time we have to.
+ *
+ * @param bp
+ */
+static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
+{
+ int i;
+ struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
+
+ dma_addr_t cur_data_offset;
+ struct stats_query_entry *cur_query_entry;
+
+ stats_hdr->cmd_num = bp->fw_stats_num;
+ stats_hdr->drv_stats_counter = 0;
+
+ /* storm_counters struct contains the counters of completed
+ * statistics requests per storm which are incremented by FW
+ * each time it completes hadning a statistics ramrod. We will
+ * check these counters in the timer handler and discard a
+ * (statistics) ramrod completion.
+ */
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, storm_counters);
+
+ stats_hdr->stats_counters_addrs.hi =
+ cpu_to_le32(U64_HI(cur_data_offset));
+ stats_hdr->stats_counters_addrs.lo =
+ cpu_to_le32(U64_LO(cur_data_offset));
+
+ /* prepare to the first stats ramrod (will be completed with
+ * the counters equal to zero) - init counters to somethig different.
+ */
+ memset(&bp->fw_stats_data->storm_counters, 0xff,
+ sizeof(struct stats_counter));
+
+ /**** Port FW statistics data ****/
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, port);
+
+ cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
+
+ cur_query_entry->kind = STATS_TYPE_PORT;
+ /* For port query index is a DONT CARE */
+ cur_query_entry->index = BP_PORT(bp);
+ /* For port query funcID is a DONT CARE */
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
+
+ /**** PF FW statistics data ****/
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, pf);
+
+ cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
+
+ cur_query_entry->kind = STATS_TYPE_PF;
+ /* For PF query index is a DONT CARE */
+ cur_query_entry->index = BP_PORT(bp);
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
+
+ /**** Clients' queries ****/
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, queue_stats);
+
+ for_each_eth_queue(bp, i) {
+ cur_query_entry =
+ &bp->fw_stats_req->
+ query[BNX2X_FIRST_QUEUE_QUERY_IDX + i];
+
+ cur_query_entry->kind = STATS_TYPE_QUEUE;
+ cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi =
+ cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo =
+ cpu_to_le32(U64_LO(cur_data_offset));
+
+ cur_data_offset += sizeof(struct per_queue_stats);
+ }
+}
+
+void bnx2x_stats_init(struct bnx2x *bp)
+{
+ int /*abs*/port = BP_PORT(bp);
+ int mb_idx = BP_FW_MB_IDX(bp);
+ int i;
+
+ bp->stats_pending = 0;
+ bp->executer_idx = 0;
+ bp->stats_counter = 0;
+
+ /* port and func stats for management */
+ if (!BP_NOMCP(bp)) {
+ bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
+ bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
+
+ } else {
+ bp->port.port_stx = 0;
+ bp->func_stx = 0;
+ }
+ DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
+ bp->port.port_stx, bp->func_stx);
+
+ port = BP_PORT(bp);
+ /* port stats */
+ memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
+ bp->port.old_nig_stats.brb_discard =
+ REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
+ bp->port.old_nig_stats.brb_truncate =
+ REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
+ if (!CHIP_IS_E3(bp)) {
+ REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
+ &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
+ REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
+ &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
+ }
+
+ /* function stats */
+ for_each_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ memset(&fp->old_tclient, 0, sizeof(fp->old_tclient));
+ memset(&fp->old_uclient, 0, sizeof(fp->old_uclient));
+ memset(&fp->old_xclient, 0, sizeof(fp->old_xclient));
+ memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats));
+ }
+
+ /* Prepare statistics ramrod data */
+ bnx2x_prep_fw_stats_req(bp);
+
+ memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
+ memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
+
+ bp->stats_state = STATS_STATE_DISABLED;
+
+ if (bp->port.pmf) {
+ if (bp->port.port_stx)
+ bnx2x_port_stats_base_init(bp);
+
+ if (bp->func_stx)
+ bnx2x_func_stats_base_init(bp);
+
+ } else if (bp->func_stx)
+ bnx2x_func_stats_base_update(bp);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
new file mode 100644
index 000000000000..5d8ce2f6afef
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -0,0 +1,381 @@
+/* bnx2x_stats.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+#ifndef BNX2X_STATS_H
+#define BNX2X_STATS_H
+
+#include <linux/types.h>
+
+struct nig_stats {
+ u32 brb_discard;
+ u32 brb_packet;
+ u32 brb_truncate;
+ u32 flow_ctrl_discard;
+ u32 flow_ctrl_octets;
+ u32 flow_ctrl_packet;
+ u32 mng_discard;
+ u32 mng_octet_inp;
+ u32 mng_octet_out;
+ u32 mng_packet_inp;
+ u32 mng_packet_out;
+ u32 pbf_octets;
+ u32 pbf_packet;
+ u32 safc_inp;
+ u32 egress_mac_pkt0_lo;
+ u32 egress_mac_pkt0_hi;
+ u32 egress_mac_pkt1_lo;
+ u32 egress_mac_pkt1_hi;
+};
+
+
+enum bnx2x_stats_event {
+ STATS_EVENT_PMF = 0,
+ STATS_EVENT_LINK_UP,
+ STATS_EVENT_UPDATE,
+ STATS_EVENT_STOP,
+ STATS_EVENT_MAX
+};
+
+enum bnx2x_stats_state {
+ STATS_STATE_DISABLED = 0,
+ STATS_STATE_ENABLED,
+ STATS_STATE_MAX
+};
+
+struct bnx2x_eth_stats {
+ u32 total_bytes_received_hi;
+ u32 total_bytes_received_lo;
+ u32 total_bytes_transmitted_hi;
+ u32 total_bytes_transmitted_lo;
+ u32 total_unicast_packets_received_hi;
+ u32 total_unicast_packets_received_lo;
+ u32 total_multicast_packets_received_hi;
+ u32 total_multicast_packets_received_lo;
+ u32 total_broadcast_packets_received_hi;
+ u32 total_broadcast_packets_received_lo;
+ u32 total_unicast_packets_transmitted_hi;
+ u32 total_unicast_packets_transmitted_lo;
+ u32 total_multicast_packets_transmitted_hi;
+ u32 total_multicast_packets_transmitted_lo;
+ u32 total_broadcast_packets_transmitted_hi;
+ u32 total_broadcast_packets_transmitted_lo;
+ u32 valid_bytes_received_hi;
+ u32 valid_bytes_received_lo;
+
+ u32 error_bytes_received_hi;
+ u32 error_bytes_received_lo;
+ u32 etherstatsoverrsizepkts_hi;
+ u32 etherstatsoverrsizepkts_lo;
+ u32 no_buff_discard_hi;
+ u32 no_buff_discard_lo;
+
+ u32 rx_stat_ifhcinbadoctets_hi;
+ u32 rx_stat_ifhcinbadoctets_lo;
+ u32 tx_stat_ifhcoutbadoctets_hi;
+ u32 tx_stat_ifhcoutbadoctets_lo;
+ u32 rx_stat_dot3statsfcserrors_hi;
+ u32 rx_stat_dot3statsfcserrors_lo;
+ u32 rx_stat_dot3statsalignmenterrors_hi;
+ u32 rx_stat_dot3statsalignmenterrors_lo;
+ u32 rx_stat_dot3statscarriersenseerrors_hi;
+ u32 rx_stat_dot3statscarriersenseerrors_lo;
+ u32 rx_stat_falsecarriererrors_hi;
+ u32 rx_stat_falsecarriererrors_lo;
+ u32 rx_stat_etherstatsundersizepkts_hi;
+ u32 rx_stat_etherstatsundersizepkts_lo;
+ u32 rx_stat_dot3statsframestoolong_hi;
+ u32 rx_stat_dot3statsframestoolong_lo;
+ u32 rx_stat_etherstatsfragments_hi;
+ u32 rx_stat_etherstatsfragments_lo;
+ u32 rx_stat_etherstatsjabbers_hi;
+ u32 rx_stat_etherstatsjabbers_lo;
+ u32 rx_stat_maccontrolframesreceived_hi;
+ u32 rx_stat_maccontrolframesreceived_lo;
+ u32 rx_stat_bmac_xpf_hi;
+ u32 rx_stat_bmac_xpf_lo;
+ u32 rx_stat_bmac_xcf_hi;
+ u32 rx_stat_bmac_xcf_lo;
+ u32 rx_stat_xoffstateentered_hi;
+ u32 rx_stat_xoffstateentered_lo;
+ u32 rx_stat_xonpauseframesreceived_hi;
+ u32 rx_stat_xonpauseframesreceived_lo;
+ u32 rx_stat_xoffpauseframesreceived_hi;
+ u32 rx_stat_xoffpauseframesreceived_lo;
+ u32 tx_stat_outxonsent_hi;
+ u32 tx_stat_outxonsent_lo;
+ u32 tx_stat_outxoffsent_hi;
+ u32 tx_stat_outxoffsent_lo;
+ u32 tx_stat_flowcontroldone_hi;
+ u32 tx_stat_flowcontroldone_lo;
+ u32 tx_stat_etherstatscollisions_hi;
+ u32 tx_stat_etherstatscollisions_lo;
+ u32 tx_stat_dot3statssinglecollisionframes_hi;
+ u32 tx_stat_dot3statssinglecollisionframes_lo;
+ u32 tx_stat_dot3statsmultiplecollisionframes_hi;
+ u32 tx_stat_dot3statsmultiplecollisionframes_lo;
+ u32 tx_stat_dot3statsdeferredtransmissions_hi;
+ u32 tx_stat_dot3statsdeferredtransmissions_lo;
+ u32 tx_stat_dot3statsexcessivecollisions_hi;
+ u32 tx_stat_dot3statsexcessivecollisions_lo;
+ u32 tx_stat_dot3statslatecollisions_hi;
+ u32 tx_stat_dot3statslatecollisions_lo;
+ u32 tx_stat_etherstatspkts64octets_hi;
+ u32 tx_stat_etherstatspkts64octets_lo;
+ u32 tx_stat_etherstatspkts65octetsto127octets_hi;
+ u32 tx_stat_etherstatspkts65octetsto127octets_lo;
+ u32 tx_stat_etherstatspkts128octetsto255octets_hi;
+ u32 tx_stat_etherstatspkts128octetsto255octets_lo;
+ u32 tx_stat_etherstatspkts256octetsto511octets_hi;
+ u32 tx_stat_etherstatspkts256octetsto511octets_lo;
+ u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
+ u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
+ u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
+ u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
+ u32 tx_stat_etherstatspktsover1522octets_hi;
+ u32 tx_stat_etherstatspktsover1522octets_lo;
+ u32 tx_stat_bmac_2047_hi;
+ u32 tx_stat_bmac_2047_lo;
+ u32 tx_stat_bmac_4095_hi;
+ u32 tx_stat_bmac_4095_lo;
+ u32 tx_stat_bmac_9216_hi;
+ u32 tx_stat_bmac_9216_lo;
+ u32 tx_stat_bmac_16383_hi;
+ u32 tx_stat_bmac_16383_lo;
+ u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
+ u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
+ u32 tx_stat_bmac_ufl_hi;
+ u32 tx_stat_bmac_ufl_lo;
+
+ u32 pause_frames_received_hi;
+ u32 pause_frames_received_lo;
+ u32 pause_frames_sent_hi;
+ u32 pause_frames_sent_lo;
+
+ u32 etherstatspkts1024octetsto1522octets_hi;
+ u32 etherstatspkts1024octetsto1522octets_lo;
+ u32 etherstatspktsover1522octets_hi;
+ u32 etherstatspktsover1522octets_lo;
+
+ u32 brb_drop_hi;
+ u32 brb_drop_lo;
+ u32 brb_truncate_hi;
+ u32 brb_truncate_lo;
+
+ u32 mac_filter_discard;
+ u32 mf_tag_discard;
+ u32 brb_truncate_discard;
+ u32 mac_discard;
+
+ u32 driver_xoff;
+ u32 rx_err_discard_pkt;
+ u32 rx_skb_alloc_failed;
+ u32 hw_csum_err;
+
+ u32 nig_timer_max;
+
+ /* TPA */
+ u32 total_tpa_aggregations_hi;
+ u32 total_tpa_aggregations_lo;
+ u32 total_tpa_aggregated_frames_hi;
+ u32 total_tpa_aggregated_frames_lo;
+ u32 total_tpa_bytes_hi;
+ u32 total_tpa_bytes_lo;
+};
+
+
+struct bnx2x_eth_q_stats {
+ u32 total_unicast_bytes_received_hi;
+ u32 total_unicast_bytes_received_lo;
+ u32 total_broadcast_bytes_received_hi;
+ u32 total_broadcast_bytes_received_lo;
+ u32 total_multicast_bytes_received_hi;
+ u32 total_multicast_bytes_received_lo;
+ u32 total_bytes_received_hi;
+ u32 total_bytes_received_lo;
+ u32 total_unicast_bytes_transmitted_hi;
+ u32 total_unicast_bytes_transmitted_lo;
+ u32 total_broadcast_bytes_transmitted_hi;
+ u32 total_broadcast_bytes_transmitted_lo;
+ u32 total_multicast_bytes_transmitted_hi;
+ u32 total_multicast_bytes_transmitted_lo;
+ u32 total_bytes_transmitted_hi;
+ u32 total_bytes_transmitted_lo;
+ u32 total_unicast_packets_received_hi;
+ u32 total_unicast_packets_received_lo;
+ u32 total_multicast_packets_received_hi;
+ u32 total_multicast_packets_received_lo;
+ u32 total_broadcast_packets_received_hi;
+ u32 total_broadcast_packets_received_lo;
+ u32 total_unicast_packets_transmitted_hi;
+ u32 total_unicast_packets_transmitted_lo;
+ u32 total_multicast_packets_transmitted_hi;
+ u32 total_multicast_packets_transmitted_lo;
+ u32 total_broadcast_packets_transmitted_hi;
+ u32 total_broadcast_packets_transmitted_lo;
+ u32 valid_bytes_received_hi;
+ u32 valid_bytes_received_lo;
+
+ u32 etherstatsoverrsizepkts_hi;
+ u32 etherstatsoverrsizepkts_lo;
+ u32 no_buff_discard_hi;
+ u32 no_buff_discard_lo;
+
+ u32 driver_xoff;
+ u32 rx_err_discard_pkt;
+ u32 rx_skb_alloc_failed;
+ u32 hw_csum_err;
+
+ u32 total_packets_received_checksum_discarded_hi;
+ u32 total_packets_received_checksum_discarded_lo;
+ u32 total_packets_received_ttl0_discarded_hi;
+ u32 total_packets_received_ttl0_discarded_lo;
+ u32 total_transmitted_dropped_packets_error_hi;
+ u32 total_transmitted_dropped_packets_error_lo;
+
+ /* TPA */
+ u32 total_tpa_aggregations_hi;
+ u32 total_tpa_aggregations_lo;
+ u32 total_tpa_aggregated_frames_hi;
+ u32 total_tpa_aggregated_frames_lo;
+ u32 total_tpa_bytes_hi;
+ u32 total_tpa_bytes_lo;
+};
+
+/****************************************************************************
+* Macros
+****************************************************************************/
+
+/* sum[hi:lo] += add[hi:lo] */
+#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
+ do { \
+ s_lo += a_lo; \
+ s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
+ } while (0)
+
+/* difference = minuend - subtrahend */
+#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
+ do { \
+ if (m_lo < s_lo) { \
+ /* underflow */ \
+ d_hi = m_hi - s_hi; \
+ if (d_hi > 0) { \
+ /* we can 'loan' 1 */ \
+ d_hi--; \
+ d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
+ } else { \
+ /* m_hi <= s_hi */ \
+ d_hi = 0; \
+ d_lo = 0; \
+ } \
+ } else { \
+ /* m_lo >= s_lo */ \
+ if (m_hi < s_hi) { \
+ d_hi = 0; \
+ d_lo = 0; \
+ } else { \
+ /* m_hi >= s_hi */ \
+ d_hi = m_hi - s_hi; \
+ d_lo = m_lo - s_lo; \
+ } \
+ } \
+ } while (0)
+
+#define UPDATE_STAT64(s, t) \
+ do { \
+ DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
+ diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
+ pstats->mac_stx[0].t##_hi = new->s##_hi; \
+ pstats->mac_stx[0].t##_lo = new->s##_lo; \
+ ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
+ pstats->mac_stx[1].t##_lo, diff.lo); \
+ } while (0)
+
+#define UPDATE_STAT64_NIG(s, t) \
+ do { \
+ DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
+ diff.lo, new->s##_lo, old->s##_lo); \
+ ADD_64(estats->t##_hi, diff.hi, \
+ estats->t##_lo, diff.lo); \
+ } while (0)
+
+/* sum[hi:lo] += add */
+#define ADD_EXTEND_64(s_hi, s_lo, a) \
+ do { \
+ s_lo += a; \
+ s_hi += (s_lo < a) ? 1 : 0; \
+ } while (0)
+
+#define ADD_STAT64(diff, t) \
+ do { \
+ ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \
+ pstats->mac_stx[1].t##_lo, new->diff##_lo); \
+ } while (0)
+
+#define UPDATE_EXTEND_STAT(s) \
+ do { \
+ ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
+ pstats->mac_stx[1].s##_lo, \
+ new->s); \
+ } while (0)
+
+#define UPDATE_EXTEND_TSTAT(s, t) \
+ do { \
+ diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
+ old_tclient->s = tclient->s; \
+ ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+ } while (0)
+
+#define UPDATE_EXTEND_USTAT(s, t) \
+ do { \
+ diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
+ old_uclient->s = uclient->s; \
+ ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+ } while (0)
+
+#define UPDATE_EXTEND_XSTAT(s, t) \
+ do { \
+ diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
+ old_xclient->s = xclient->s; \
+ ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+ } while (0)
+
+/* minuend -= subtrahend */
+#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
+ do { \
+ DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
+ } while (0)
+
+/* minuend[hi:lo] -= subtrahend */
+#define SUB_EXTEND_64(m_hi, m_lo, s) \
+ do { \
+ SUB_64(m_hi, 0, m_lo, s); \
+ } while (0)
+
+#define SUB_EXTEND_USTAT(s, t) \
+ do { \
+ diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
+ SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+ } while (0)
+
+
+/* forward */
+struct bnx2x;
+
+void bnx2x_stats_init(struct bnx2x *bp);
+
+void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
+
+#endif /* BNX2X_STATS_H */
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
new file mode 100644
index 000000000000..6f10c6939834
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -0,0 +1,5492 @@
+/* cnic.c: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
+ * Modified and maintained by: Michael Chan <mchan@broadcom.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/uio_driver.h>
+#include <linux/in.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/prefetch.h>
+#include <linux/random.h>
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define BCM_VLAN 1
+#endif
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/route.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/ip6_checksum.h>
+#include <scsi/iscsi_if.h>
+
+#include "cnic_if.h"
+#include "bnx2.h"
+#include "bnx2x/bnx2x_reg.h"
+#include "bnx2x/bnx2x_fw_defs.h"
+#include "bnx2x/bnx2x_hsi.h"
+#include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
+#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
+#include "cnic.h"
+#include "cnic_defs.h"
+
+#define DRV_MODULE_NAME "cnic"
+
+static char version[] __devinitdata =
+ "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
+ "Chen (zongxi@broadcom.com");
+MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(CNIC_MODULE_VERSION);
+
+/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
+static LIST_HEAD(cnic_dev_list);
+static LIST_HEAD(cnic_udev_list);
+static DEFINE_RWLOCK(cnic_dev_lock);
+static DEFINE_MUTEX(cnic_lock);
+
+static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+
+/* helper function, assuming cnic_lock is held */
+static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
+{
+ return rcu_dereference_protected(cnic_ulp_tbl[type],
+ lockdep_is_held(&cnic_lock));
+}
+
+static int cnic_service_bnx2(void *, void *);
+static int cnic_service_bnx2x(void *, void *);
+static int cnic_ctl(void *, struct cnic_ctl_info *);
+
+static struct cnic_ops cnic_bnx2_ops = {
+ .cnic_owner = THIS_MODULE,
+ .cnic_handler = cnic_service_bnx2,
+ .cnic_ctl = cnic_ctl,
+};
+
+static struct cnic_ops cnic_bnx2x_ops = {
+ .cnic_owner = THIS_MODULE,
+ .cnic_handler = cnic_service_bnx2x,
+ .cnic_ctl = cnic_ctl,
+};
+
+static struct workqueue_struct *cnic_wq;
+
+static void cnic_shutdown_rings(struct cnic_dev *);
+static void cnic_init_rings(struct cnic_dev *);
+static int cnic_cm_set_pg(struct cnic_sock *);
+
+static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
+{
+ struct cnic_uio_dev *udev = uinfo->priv;
+ struct cnic_dev *dev;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (udev->uio_dev != -1)
+ return -EBUSY;
+
+ rtnl_lock();
+ dev = udev->dev;
+
+ if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
+ rtnl_unlock();
+ return -ENODEV;
+ }
+
+ udev->uio_dev = iminor(inode);
+
+ cnic_shutdown_rings(dev);
+ cnic_init_rings(dev);
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
+{
+ struct cnic_uio_dev *udev = uinfo->priv;
+
+ udev->uio_dev = -1;
+ return 0;
+}
+
+static inline void cnic_hold(struct cnic_dev *dev)
+{
+ atomic_inc(&dev->ref_count);
+}
+
+static inline void cnic_put(struct cnic_dev *dev)
+{
+ atomic_dec(&dev->ref_count);
+}
+
+static inline void csk_hold(struct cnic_sock *csk)
+{
+ atomic_inc(&csk->ref_count);
+}
+
+static inline void csk_put(struct cnic_sock *csk)
+{
+ atomic_dec(&csk->ref_count);
+}
+
+static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
+{
+ struct cnic_dev *cdev;
+
+ read_lock(&cnic_dev_lock);
+ list_for_each_entry(cdev, &cnic_dev_list, list) {
+ if (netdev == cdev->netdev) {
+ cnic_hold(cdev);
+ read_unlock(&cnic_dev_lock);
+ return cdev;
+ }
+ }
+ read_unlock(&cnic_dev_lock);
+ return NULL;
+}
+
+static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
+{
+ atomic_inc(&ulp_ops->ref_count);
+}
+
+static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
+{
+ atomic_dec(&ulp_ops->ref_count);
+}
+
+static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ struct drv_ctl_info info;
+ struct drv_ctl_io *io = &info.data.io;
+
+ info.cmd = DRV_CTL_CTX_WR_CMD;
+ io->cid_addr = cid_addr;
+ io->offset = off;
+ io->data = val;
+ ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ struct drv_ctl_info info;
+ struct drv_ctl_io *io = &info.data.io;
+
+ info.cmd = DRV_CTL_CTXTBL_WR_CMD;
+ io->offset = off;
+ io->dma_addr = addr;
+ ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ struct drv_ctl_info info;
+ struct drv_ctl_l2_ring *ring = &info.data.ring;
+
+ if (start)
+ info.cmd = DRV_CTL_START_L2_CMD;
+ else
+ info.cmd = DRV_CTL_STOP_L2_CMD;
+
+ ring->cid = cid;
+ ring->client_id = cl_id;
+ ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ struct drv_ctl_info info;
+ struct drv_ctl_io *io = &info.data.io;
+
+ info.cmd = DRV_CTL_IO_WR_CMD;
+ io->offset = off;
+ io->data = val;
+ ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ struct drv_ctl_info info;
+ struct drv_ctl_io *io = &info.data.io;
+
+ info.cmd = DRV_CTL_IO_RD_CMD;
+ io->offset = off;
+ ethdev->drv_ctl(dev->netdev, &info);
+ return io->data;
+}
+
+static int cnic_in_use(struct cnic_sock *csk)
+{
+ return test_bit(SK_F_INUSE, &csk->flags);
+}
+
+static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ struct drv_ctl_info info;
+
+ info.cmd = cmd;
+ info.data.credit.credit_count = count;
+ ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
+{
+ u32 i;
+
+ for (i = 0; i < cp->max_cid_space; i++) {
+ if (cp->ctx_tbl[i].cid == cid) {
+ *l5_cid = i;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
+ struct cnic_sock *csk)
+{
+ struct iscsi_path path_req;
+ char *buf = NULL;
+ u16 len = 0;
+ u32 msg_type = ISCSI_KEVENT_IF_DOWN;
+ struct cnic_ulp_ops *ulp_ops;
+ struct cnic_uio_dev *udev = cp->udev;
+ int rc = 0, retry = 0;
+
+ if (!udev || udev->uio_dev == -1)
+ return -ENODEV;
+
+ if (csk) {
+ len = sizeof(path_req);
+ buf = (char *) &path_req;
+ memset(&path_req, 0, len);
+
+ msg_type = ISCSI_KEVENT_PATH_REQ;
+ path_req.handle = (u64) csk->l5_cid;
+ if (test_bit(SK_F_IPV6, &csk->flags)) {
+ memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
+ sizeof(struct in6_addr));
+ path_req.ip_addr_len = 16;
+ } else {
+ memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
+ sizeof(struct in_addr));
+ path_req.ip_addr_len = 4;
+ }
+ path_req.vlan_id = csk->vlan_id;
+ path_req.pmtu = csk->mtu;
+ }
+
+ while (retry < 3) {
+ rc = 0;
+ rcu_read_lock();
+ ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
+ if (ulp_ops)
+ rc = ulp_ops->iscsi_nl_send_msg(
+ cp->ulp_handle[CNIC_ULP_ISCSI],
+ msg_type, buf, len);
+ rcu_read_unlock();
+ if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
+ break;
+
+ msleep(100);
+ retry++;
+ }
+ return rc;
+}
+
+static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
+
+static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
+ char *buf, u16 len)
+{
+ int rc = -EINVAL;
+
+ switch (msg_type) {
+ case ISCSI_UEVENT_PATH_UPDATE: {
+ struct cnic_local *cp;
+ u32 l5_cid;
+ struct cnic_sock *csk;
+ struct iscsi_path *path_resp;
+
+ if (len < sizeof(*path_resp))
+ break;
+
+ path_resp = (struct iscsi_path *) buf;
+ cp = dev->cnic_priv;
+ l5_cid = (u32) path_resp->handle;
+ if (l5_cid >= MAX_CM_SK_TBL_SZ)
+ break;
+
+ rcu_read_lock();
+ if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
+ rc = -ENODEV;
+ rcu_read_unlock();
+ break;
+ }
+ csk = &cp->csk_tbl[l5_cid];
+ csk_hold(csk);
+ if (cnic_in_use(csk) &&
+ test_bit(SK_F_CONNECT_START, &csk->flags)) {
+
+ memcpy(csk->ha, path_resp->mac_addr, 6);
+ if (test_bit(SK_F_IPV6, &csk->flags))
+ memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
+ sizeof(struct in6_addr));
+ else
+ memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
+ sizeof(struct in_addr));
+
+ if (is_valid_ether_addr(csk->ha)) {
+ cnic_cm_set_pg(csk);
+ } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
+ !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+
+ cnic_cm_upcall(cp, csk,
+ L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
+ clear_bit(SK_F_CONNECT_START, &csk->flags);
+ }
+ }
+ csk_put(csk);
+ rcu_read_unlock();
+ rc = 0;
+ }
+ }
+
+ return rc;
+}
+
+static int cnic_offld_prep(struct cnic_sock *csk)
+{
+ if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+ return 0;
+
+ if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
+ clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int cnic_close_prep(struct cnic_sock *csk)
+{
+ clear_bit(SK_F_CONNECT_START, &csk->flags);
+ smp_mb__after_clear_bit();
+
+ if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+ while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+ msleep(1);
+
+ return 1;
+ }
+ return 0;
+}
+
+static int cnic_abort_prep(struct cnic_sock *csk)
+{
+ clear_bit(SK_F_CONNECT_START, &csk->flags);
+ smp_mb__after_clear_bit();
+
+ while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+ msleep(1);
+
+ if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+ csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
+ return 1;
+ }
+
+ return 0;
+}
+
+int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
+{
+ struct cnic_dev *dev;
+
+ if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
+ pr_err("%s: Bad type %d\n", __func__, ulp_type);
+ return -EINVAL;
+ }
+ mutex_lock(&cnic_lock);
+ if (cnic_ulp_tbl_prot(ulp_type)) {
+ pr_err("%s: Type %d has already been registered\n",
+ __func__, ulp_type);
+ mutex_unlock(&cnic_lock);
+ return -EBUSY;
+ }
+
+ read_lock(&cnic_dev_lock);
+ list_for_each_entry(dev, &cnic_dev_list, list) {
+ struct cnic_local *cp = dev->cnic_priv;
+
+ clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
+ }
+ read_unlock(&cnic_dev_lock);
+
+ atomic_set(&ulp_ops->ref_count, 0);
+ rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
+ mutex_unlock(&cnic_lock);
+
+ /* Prevent race conditions with netdev_event */
+ rtnl_lock();
+ list_for_each_entry(dev, &cnic_dev_list, list) {
+ struct cnic_local *cp = dev->cnic_priv;
+
+ if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
+ ulp_ops->cnic_init(dev);
+ }
+ rtnl_unlock();
+
+ return 0;
+}
+
+int cnic_unregister_driver(int ulp_type)
+{
+ struct cnic_dev *dev;
+ struct cnic_ulp_ops *ulp_ops;
+ int i = 0;
+
+ if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
+ pr_err("%s: Bad type %d\n", __func__, ulp_type);
+ return -EINVAL;
+ }
+ mutex_lock(&cnic_lock);
+ ulp_ops = cnic_ulp_tbl_prot(ulp_type);
+ if (!ulp_ops) {
+ pr_err("%s: Type %d has not been registered\n",
+ __func__, ulp_type);
+ goto out_unlock;
+ }
+ read_lock(&cnic_dev_lock);
+ list_for_each_entry(dev, &cnic_dev_list, list) {
+ struct cnic_local *cp = dev->cnic_priv;
+
+ if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+ pr_err("%s: Type %d still has devices registered\n",
+ __func__, ulp_type);
+ read_unlock(&cnic_dev_lock);
+ goto out_unlock;
+ }
+ }
+ read_unlock(&cnic_dev_lock);
+
+ rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
+
+ mutex_unlock(&cnic_lock);
+ synchronize_rcu();
+ while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
+ msleep(100);
+ i++;
+ }
+
+ if (atomic_read(&ulp_ops->ref_count) != 0)
+ netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
+ return 0;
+
+out_unlock:
+ mutex_unlock(&cnic_lock);
+ return -EINVAL;
+}
+
+static int cnic_start_hw(struct cnic_dev *);
+static void cnic_stop_hw(struct cnic_dev *);
+
+static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
+ void *ulp_ctx)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_ulp_ops *ulp_ops;
+
+ if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
+ pr_err("%s: Bad type %d\n", __func__, ulp_type);
+ return -EINVAL;
+ }
+ mutex_lock(&cnic_lock);
+ if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
+ pr_err("%s: Driver with type %d has not been registered\n",
+ __func__, ulp_type);
+ mutex_unlock(&cnic_lock);
+ return -EAGAIN;
+ }
+ if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+ pr_err("%s: Type %d has already been registered to this device\n",
+ __func__, ulp_type);
+ mutex_unlock(&cnic_lock);
+ return -EBUSY;
+ }
+
+ clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
+ cp->ulp_handle[ulp_type] = ulp_ctx;
+ ulp_ops = cnic_ulp_tbl_prot(ulp_type);
+ rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
+ cnic_hold(dev);
+
+ if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
+ if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
+ ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
+
+ mutex_unlock(&cnic_lock);
+
+ return 0;
+
+}
+EXPORT_SYMBOL(cnic_register_driver);
+
+static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ int i = 0;
+
+ if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
+ pr_err("%s: Bad type %d\n", __func__, ulp_type);
+ return -EINVAL;
+ }
+ mutex_lock(&cnic_lock);
+ if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+ rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
+ cnic_put(dev);
+ } else {
+ pr_err("%s: device not registered to this ulp type %d\n",
+ __func__, ulp_type);
+ mutex_unlock(&cnic_lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&cnic_lock);
+
+ if (ulp_type == CNIC_ULP_ISCSI)
+ cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+
+ synchronize_rcu();
+
+ while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
+ i < 20) {
+ msleep(100);
+ i++;
+ }
+ if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
+ netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
+
+ return 0;
+}
+EXPORT_SYMBOL(cnic_unregister_driver);
+
+static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
+ u32 next)
+{
+ id_tbl->start = start_id;
+ id_tbl->max = size;
+ id_tbl->next = next;
+ spin_lock_init(&id_tbl->lock);
+ id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
+ if (!id_tbl->table)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
+{
+ kfree(id_tbl->table);
+ id_tbl->table = NULL;
+}
+
+static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
+{
+ int ret = -1;
+
+ id -= id_tbl->start;
+ if (id >= id_tbl->max)
+ return ret;
+
+ spin_lock(&id_tbl->lock);
+ if (!test_bit(id, id_tbl->table)) {
+ set_bit(id, id_tbl->table);
+ ret = 0;
+ }
+ spin_unlock(&id_tbl->lock);
+ return ret;
+}
+
+/* Returns -1 if not successful */
+static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
+{
+ u32 id;
+
+ spin_lock(&id_tbl->lock);
+ id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
+ if (id >= id_tbl->max) {
+ id = -1;
+ if (id_tbl->next != 0) {
+ id = find_first_zero_bit(id_tbl->table, id_tbl->next);
+ if (id >= id_tbl->next)
+ id = -1;
+ }
+ }
+
+ if (id < id_tbl->max) {
+ set_bit(id, id_tbl->table);
+ id_tbl->next = (id + 1) & (id_tbl->max - 1);
+ id += id_tbl->start;
+ }
+
+ spin_unlock(&id_tbl->lock);
+
+ return id;
+}
+
+static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
+{
+ if (id == -1)
+ return;
+
+ id -= id_tbl->start;
+ if (id >= id_tbl->max)
+ return;
+
+ clear_bit(id, id_tbl->table);
+}
+
+static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
+{
+ int i;
+
+ if (!dma->pg_arr)
+ return;
+
+ for (i = 0; i < dma->num_pages; i++) {
+ if (dma->pg_arr[i]) {
+ dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
+ dma->pg_arr[i], dma->pg_map_arr[i]);
+ dma->pg_arr[i] = NULL;
+ }
+ }
+ if (dma->pgtbl) {
+ dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
+ dma->pgtbl, dma->pgtbl_map);
+ dma->pgtbl = NULL;
+ }
+ kfree(dma->pg_arr);
+ dma->pg_arr = NULL;
+ dma->num_pages = 0;
+}
+
+static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
+{
+ int i;
+ __le32 *page_table = (__le32 *) dma->pgtbl;
+
+ for (i = 0; i < dma->num_pages; i++) {
+ /* Each entry needs to be in big endian format. */
+ *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
+ page_table++;
+ *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
+ page_table++;
+ }
+}
+
+static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
+{
+ int i;
+ __le32 *page_table = (__le32 *) dma->pgtbl;
+
+ for (i = 0; i < dma->num_pages; i++) {
+ /* Each entry needs to be in little endian format. */
+ *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
+ page_table++;
+ *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
+ page_table++;
+ }
+}
+
+static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
+ int pages, int use_pg_tbl)
+{
+ int i, size;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ size = pages * (sizeof(void *) + sizeof(dma_addr_t));
+ dma->pg_arr = kzalloc(size, GFP_ATOMIC);
+ if (dma->pg_arr == NULL)
+ return -ENOMEM;
+
+ dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
+ dma->num_pages = pages;
+
+ for (i = 0; i < pages; i++) {
+ dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
+ BCM_PAGE_SIZE,
+ &dma->pg_map_arr[i],
+ GFP_ATOMIC);
+ if (dma->pg_arr[i] == NULL)
+ goto error;
+ }
+ if (!use_pg_tbl)
+ return 0;
+
+ dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
+ ~(BCM_PAGE_SIZE - 1);
+ dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
+ &dma->pgtbl_map, GFP_ATOMIC);
+ if (dma->pgtbl == NULL)
+ goto error;
+
+ cp->setup_pgtbl(dev, dma);
+
+ return 0;
+
+error:
+ cnic_free_dma(dev, dma);
+ return -ENOMEM;
+}
+
+static void cnic_free_context(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ int i;
+
+ for (i = 0; i < cp->ctx_blks; i++) {
+ if (cp->ctx_arr[i].ctx) {
+ dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
+ cp->ctx_arr[i].ctx,
+ cp->ctx_arr[i].mapping);
+ cp->ctx_arr[i].ctx = NULL;
+ }
+ }
+}
+
+static void __cnic_free_uio(struct cnic_uio_dev *udev)
+{
+ uio_unregister_device(&udev->cnic_uinfo);
+
+ if (udev->l2_buf) {
+ dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
+ udev->l2_buf, udev->l2_buf_map);
+ udev->l2_buf = NULL;
+ }
+
+ if (udev->l2_ring) {
+ dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
+ udev->l2_ring, udev->l2_ring_map);
+ udev->l2_ring = NULL;
+ }
+
+ pci_dev_put(udev->pdev);
+ kfree(udev);
+}
+
+static void cnic_free_uio(struct cnic_uio_dev *udev)
+{
+ if (!udev)
+ return;
+
+ write_lock(&cnic_dev_lock);
+ list_del_init(&udev->list);
+ write_unlock(&cnic_dev_lock);
+ __cnic_free_uio(udev);
+}
+
+static void cnic_free_resc(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_uio_dev *udev = cp->udev;
+
+ if (udev) {
+ udev->dev = NULL;
+ cp->udev = NULL;
+ }
+
+ cnic_free_context(dev);
+ kfree(cp->ctx_arr);
+ cp->ctx_arr = NULL;
+ cp->ctx_blks = 0;
+
+ cnic_free_dma(dev, &cp->gbl_buf_info);
+ cnic_free_dma(dev, &cp->kwq_info);
+ cnic_free_dma(dev, &cp->kwq_16_data_info);
+ cnic_free_dma(dev, &cp->kcq2.dma);
+ cnic_free_dma(dev, &cp->kcq1.dma);
+ kfree(cp->iscsi_tbl);
+ cp->iscsi_tbl = NULL;
+ kfree(cp->ctx_tbl);
+ cp->ctx_tbl = NULL;
+
+ cnic_free_id_tbl(&cp->fcoe_cid_tbl);
+ cnic_free_id_tbl(&cp->cid_tbl);
+}
+
+static int cnic_alloc_context(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+
+ if (CHIP_NUM(cp) == CHIP_NUM_5709) {
+ int i, k, arr_size;
+
+ cp->ctx_blk_size = BCM_PAGE_SIZE;
+ cp->cids_per_blk = BCM_PAGE_SIZE / 128;
+ arr_size = BNX2_MAX_CID / cp->cids_per_blk *
+ sizeof(struct cnic_ctx);
+ cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
+ if (cp->ctx_arr == NULL)
+ return -ENOMEM;
+
+ k = 0;
+ for (i = 0; i < 2; i++) {
+ u32 j, reg, off, lo, hi;
+
+ if (i == 0)
+ off = BNX2_PG_CTX_MAP;
+ else
+ off = BNX2_ISCSI_CTX_MAP;
+
+ reg = cnic_reg_rd_ind(dev, off);
+ lo = reg >> 16;
+ hi = reg & 0xffff;
+ for (j = lo; j < hi; j += cp->cids_per_blk, k++)
+ cp->ctx_arr[k].cid = j;
+ }
+
+ cp->ctx_blks = k;
+ if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
+ cp->ctx_blks = 0;
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < cp->ctx_blks; i++) {
+ cp->ctx_arr[i].ctx =
+ dma_alloc_coherent(&dev->pcidev->dev,
+ BCM_PAGE_SIZE,
+ &cp->ctx_arr[i].mapping,
+ GFP_KERNEL);
+ if (cp->ctx_arr[i].ctx == NULL)
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+static u16 cnic_bnx2_next_idx(u16 idx)
+{
+ return idx + 1;
+}
+
+static u16 cnic_bnx2_hw_idx(u16 idx)
+{
+ return idx;
+}
+
+static u16 cnic_bnx2x_next_idx(u16 idx)
+{
+ idx++;
+ if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
+ idx++;
+
+ return idx;
+}
+
+static u16 cnic_bnx2x_hw_idx(u16 idx)
+{
+ if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
+ idx++;
+ return idx;
+}
+
+static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
+ bool use_pg_tbl)
+{
+ int err, i, use_page_tbl = 0;
+ struct kcqe **kcq;
+
+ if (use_pg_tbl)
+ use_page_tbl = 1;
+
+ err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
+ if (err)
+ return err;
+
+ kcq = (struct kcqe **) info->dma.pg_arr;
+ info->kcq = kcq;
+
+ info->next_idx = cnic_bnx2_next_idx;
+ info->hw_idx = cnic_bnx2_hw_idx;
+ if (use_pg_tbl)
+ return 0;
+
+ info->next_idx = cnic_bnx2x_next_idx;
+ info->hw_idx = cnic_bnx2x_hw_idx;
+
+ for (i = 0; i < KCQ_PAGE_CNT; i++) {
+ struct bnx2x_bd_chain_next *next =
+ (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
+ int j = i + 1;
+
+ if (j >= KCQ_PAGE_CNT)
+ j = 0;
+ next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
+ next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
+ }
+ return 0;
+}
+
+static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_uio_dev *udev;
+
+ read_lock(&cnic_dev_lock);
+ list_for_each_entry(udev, &cnic_udev_list, list) {
+ if (udev->pdev == dev->pcidev) {
+ udev->dev = dev;
+ cp->udev = udev;
+ read_unlock(&cnic_dev_lock);
+ return 0;
+ }
+ }
+ read_unlock(&cnic_dev_lock);
+
+ udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
+ if (!udev)
+ return -ENOMEM;
+
+ udev->uio_dev = -1;
+
+ udev->dev = dev;
+ udev->pdev = dev->pcidev;
+ udev->l2_ring_size = pages * BCM_PAGE_SIZE;
+ udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
+ &udev->l2_ring_map,
+ GFP_KERNEL | __GFP_COMP);
+ if (!udev->l2_ring)
+ goto err_udev;
+
+ udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
+ udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
+ udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
+ &udev->l2_buf_map,
+ GFP_KERNEL | __GFP_COMP);
+ if (!udev->l2_buf)
+ goto err_dma;
+
+ write_lock(&cnic_dev_lock);
+ list_add(&udev->list, &cnic_udev_list);
+ write_unlock(&cnic_dev_lock);
+
+ pci_dev_get(udev->pdev);
+
+ cp->udev = udev;
+
+ return 0;
+ err_dma:
+ dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
+ udev->l2_ring, udev->l2_ring_map);
+ err_udev:
+ kfree(udev);
+ return -ENOMEM;
+}
+
+static int cnic_init_uio(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_uio_dev *udev = cp->udev;
+ struct uio_info *uinfo;
+ int ret = 0;
+
+ if (!udev)
+ return -ENOMEM;
+
+ uinfo = &udev->cnic_uinfo;
+
+ uinfo->mem[0].addr = dev->netdev->base_addr;
+ uinfo->mem[0].internal_addr = dev->regview;
+ uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
+ uinfo->mem[0].memtype = UIO_MEM_PHYS;
+
+ if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+ uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
+ PAGE_MASK;
+ if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
+ uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
+ else
+ uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
+
+ uinfo->name = "bnx2_cnic";
+ } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+ uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
+ PAGE_MASK;
+ uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
+
+ uinfo->name = "bnx2x_cnic";
+ }
+
+ uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
+
+ uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
+ uinfo->mem[2].size = udev->l2_ring_size;
+ uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
+
+ uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
+ uinfo->mem[3].size = udev->l2_buf_size;
+ uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
+
+ uinfo->version = CNIC_MODULE_VERSION;
+ uinfo->irq = UIO_IRQ_CUSTOM;
+
+ uinfo->open = cnic_uio_open;
+ uinfo->release = cnic_uio_close;
+
+ if (udev->uio_dev == -1) {
+ if (!uinfo->priv) {
+ uinfo->priv = udev;
+
+ ret = uio_register_device(&udev->pdev->dev, uinfo);
+ }
+ } else {
+ cnic_init_rings(dev);
+ }
+
+ return ret;
+}
+
+static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ int ret;
+
+ ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
+ if (ret)
+ goto error;
+ cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
+
+ ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
+ if (ret)
+ goto error;
+
+ ret = cnic_alloc_context(dev);
+ if (ret)
+ goto error;
+
+ ret = cnic_alloc_uio_rings(dev, 2);
+ if (ret)
+ goto error;
+
+ ret = cnic_init_uio(dev);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ cnic_free_resc(dev);
+ return ret;
+}
+
+static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ int ctx_blk_size = cp->ethdev->ctx_blk_size;
+ int total_mem, blks, i;
+
+ total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
+ blks = total_mem / ctx_blk_size;
+ if (total_mem % ctx_blk_size)
+ blks++;
+
+ if (blks > cp->ethdev->ctx_tbl_len)
+ return -ENOMEM;
+
+ cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
+ if (cp->ctx_arr == NULL)
+ return -ENOMEM;
+
+ cp->ctx_blks = blks;
+ cp->ctx_blk_size = ctx_blk_size;
+ if (!BNX2X_CHIP_IS_57710(cp->chip_id))
+ cp->ctx_align = 0;
+ else
+ cp->ctx_align = ctx_blk_size;
+
+ cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
+
+ for (i = 0; i < blks; i++) {
+ cp->ctx_arr[i].ctx =
+ dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
+ &cp->ctx_arr[i].mapping,
+ GFP_KERNEL);
+ if (cp->ctx_arr[i].ctx == NULL)
+ return -ENOMEM;
+
+ if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
+ if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
+ cnic_free_context(dev);
+ cp->ctx_blk_size += cp->ctx_align;
+ i = -1;
+ continue;
+ }
+ }
+ }
+ return 0;
+}
+
+static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ u32 start_cid = ethdev->starting_cid;
+ int i, j, n, ret, pages;
+ struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
+
+ cp->iro_arr = ethdev->iro_arr;
+
+ cp->max_cid_space = MAX_ISCSI_TBL_SZ;
+ cp->iscsi_start_cid = start_cid;
+ cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
+
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ cp->max_cid_space += dev->max_fcoe_conn;
+ cp->fcoe_init_cid = ethdev->fcoe_init_cid;
+ if (!cp->fcoe_init_cid)
+ cp->fcoe_init_cid = 0x10;
+ }
+
+ cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
+ GFP_KERNEL);
+ if (!cp->iscsi_tbl)
+ goto error;
+
+ cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
+ cp->max_cid_space, GFP_KERNEL);
+ if (!cp->ctx_tbl)
+ goto error;
+
+ for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
+ cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
+ cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
+ }
+
+ for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
+ cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
+
+ pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
+ PAGE_SIZE;
+
+ ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
+ if (ret)
+ return -ENOMEM;
+
+ n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
+ for (i = 0, j = 0; i < cp->max_cid_space; i++) {
+ long off = CNIC_KWQ16_DATA_SIZE * (i % n);
+
+ cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
+ cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
+ off;
+
+ if ((i % n) == (n - 1))
+ j++;
+ }
+
+ ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
+ if (ret)
+ goto error;
+
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
+ if (ret)
+ goto error;
+ }
+
+ pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
+ ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
+ if (ret)
+ goto error;
+
+ ret = cnic_alloc_bnx2x_context(dev);
+ if (ret)
+ goto error;
+
+ cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
+
+ cp->l2_rx_ring_size = 15;
+
+ ret = cnic_alloc_uio_rings(dev, 4);
+ if (ret)
+ goto error;
+
+ ret = cnic_init_uio(dev);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ cnic_free_resc(dev);
+ return -ENOMEM;
+}
+
+static inline u32 cnic_kwq_avail(struct cnic_local *cp)
+{
+ return cp->max_kwq_idx -
+ ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
+}
+
+static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
+ u32 num_wqes)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct kwqe *prod_qe;
+ u16 prod, sw_prod, i;
+
+ if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+ return -EAGAIN; /* bnx2 is down */
+
+ spin_lock_bh(&cp->cnic_ulp_lock);
+ if (num_wqes > cnic_kwq_avail(cp) &&
+ !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
+ spin_unlock_bh(&cp->cnic_ulp_lock);
+ return -EAGAIN;
+ }
+
+ clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
+
+ prod = cp->kwq_prod_idx;
+ sw_prod = prod & MAX_KWQ_IDX;
+ for (i = 0; i < num_wqes; i++) {
+ prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
+ memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
+ prod++;
+ sw_prod = prod & MAX_KWQ_IDX;
+ }
+ cp->kwq_prod_idx = prod;
+
+ CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
+
+ spin_unlock_bh(&cp->cnic_ulp_lock);
+ return 0;
+}
+
+static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
+ union l5cm_specific_data *l5_data)
+{
+ struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+ dma_addr_t map;
+
+ map = ctx->kwqe_data_mapping;
+ l5_data->phy_address.lo = (u64) map & 0xffffffff;
+ l5_data->phy_address.hi = (u64) map >> 32;
+ return ctx->kwqe_data;
+}
+
+static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
+ u32 type, union l5cm_specific_data *l5_data)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct l5cm_spe kwqe;
+ struct kwqe_16 *kwq[1];
+ u16 type_16;
+ int ret;
+
+ kwqe.hdr.conn_and_cmd_data =
+ cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
+ BNX2X_HW_CID(cp, cid)));
+
+ type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
+ type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
+ SPE_HDR_FUNCTION_ID;
+
+ kwqe.hdr.type = cpu_to_le16(type_16);
+ kwqe.hdr.reserved1 = 0;
+ kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
+ kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
+
+ kwq[0] = (struct kwqe_16 *) &kwqe;
+
+ spin_lock_bh(&cp->cnic_ulp_lock);
+ ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
+ spin_unlock_bh(&cp->cnic_ulp_lock);
+
+ if (ret == 1)
+ return 0;
+
+ return -EBUSY;
+}
+
+static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
+ struct kcqe *cqes[], u32 num_cqes)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_ulp_ops *ulp_ops;
+
+ rcu_read_lock();
+ ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
+ if (likely(ulp_ops)) {
+ ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
+ cqes, num_cqes);
+ }
+ rcu_read_unlock();
+}
+
+static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
+ int hq_bds, pages;
+ u32 pfid = cp->pfid;
+
+ cp->num_iscsi_tasks = req1->num_tasks_per_conn;
+ cp->num_ccells = req1->num_ccells_per_conn;
+ cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
+ cp->num_iscsi_tasks;
+ cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
+ BNX2X_ISCSI_R2TQE_SIZE;
+ cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
+ pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
+ hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
+ cp->num_cqs = req1->num_cqs;
+
+ if (!dev->max_iscsi_conn)
+ return 0;
+
+ /* init Tstorm RAM */
+ CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
+ req1->rq_num_wqes);
+ CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
+ PAGE_SIZE);
+ CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+ TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+ CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
+ TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
+ req1->num_tasks_per_conn);
+
+ /* init Ustorm RAM */
+ CNIC_WR16(dev, BAR_USTRORM_INTMEM +
+ USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
+ req1->rq_buffer_size);
+ CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
+ PAGE_SIZE);
+ CNIC_WR8(dev, BAR_USTRORM_INTMEM +
+ USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+ CNIC_WR16(dev, BAR_USTRORM_INTMEM +
+ USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
+ req1->num_tasks_per_conn);
+ CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
+ req1->rq_num_wqes);
+ CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
+ req1->cq_num_wqes);
+ CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
+ cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
+
+ /* init Xstorm RAM */
+ CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
+ PAGE_SIZE);
+ CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+ XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+ CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
+ XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
+ req1->num_tasks_per_conn);
+ CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
+ hq_bds);
+ CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
+ req1->num_tasks_per_conn);
+ CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
+ cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
+
+ /* init Cstorm RAM */
+ CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
+ PAGE_SIZE);
+ CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
+ CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+ CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
+ CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
+ req1->num_tasks_per_conn);
+ CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
+ req1->cq_num_wqes);
+ CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
+ hq_bds);
+
+ return 0;
+}
+
+static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+ struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
+ struct cnic_local *cp = dev->cnic_priv;
+ u32 pfid = cp->pfid;
+ struct iscsi_kcqe kcqe;
+ struct kcqe *cqes[1];
+
+ memset(&kcqe, 0, sizeof(kcqe));
+ if (!dev->max_iscsi_conn) {
+ kcqe.completion_status =
+ ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
+ goto done;
+ }
+
+ CNIC_WR(dev, BAR_TSTRORM_INTMEM +
+ TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
+ CNIC_WR(dev, BAR_TSTRORM_INTMEM +
+ TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
+ req2->error_bit_map[1]);
+
+ CNIC_WR16(dev, BAR_USTRORM_INTMEM +
+ USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
+ CNIC_WR(dev, BAR_USTRORM_INTMEM +
+ USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
+ CNIC_WR(dev, BAR_USTRORM_INTMEM +
+ USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
+ req2->error_bit_map[1]);
+
+ CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
+ CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
+
+ kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
+
+done:
+ kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
+ cqes[0] = (struct kcqe *) &kcqe;
+ cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
+
+ return 0;
+}
+
+static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+
+ if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
+ struct cnic_iscsi *iscsi = ctx->proto.iscsi;
+
+ cnic_free_dma(dev, &iscsi->hq_info);
+ cnic_free_dma(dev, &iscsi->r2tq_info);
+ cnic_free_dma(dev, &iscsi->task_array_info);
+ cnic_free_id(&cp->cid_tbl, ctx->cid);
+ } else {
+ cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
+ }
+
+ ctx->cid = 0;
+}
+
+static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
+{
+ u32 cid;
+ int ret, pages;
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+ struct cnic_iscsi *iscsi = ctx->proto.iscsi;
+
+ if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
+ cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
+ if (cid == -1) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ ctx->cid = cid;
+ return 0;
+ }
+
+ cid = cnic_alloc_new_id(&cp->cid_tbl);
+ if (cid == -1) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ctx->cid = cid;
+ pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
+
+ ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
+ if (ret)
+ goto error;
+
+ pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
+ ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
+ if (ret)
+ goto error;
+
+ pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
+ ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ cnic_free_bnx2x_conn_resc(dev, l5_cid);
+ return ret;
+}
+
+static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
+ struct regpair *ctx_addr)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
+ int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
+ unsigned long align_off = 0;
+ dma_addr_t ctx_map;
+ void *ctx;
+
+ if (cp->ctx_align) {
+ unsigned long mask = cp->ctx_align - 1;
+
+ if (cp->ctx_arr[blk].mapping & mask)
+ align_off = cp->ctx_align -
+ (cp->ctx_arr[blk].mapping & mask);
+ }
+ ctx_map = cp->ctx_arr[blk].mapping + align_off +
+ (off * BNX2X_CONTEXT_MEM_SIZE);
+ ctx = cp->ctx_arr[blk].ctx + align_off +
+ (off * BNX2X_CONTEXT_MEM_SIZE);
+ if (init)
+ memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
+
+ ctx_addr->lo = ctx_map & 0xffffffff;
+ ctx_addr->hi = (u64) ctx_map >> 32;
+ return ctx;
+}
+
+static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
+ u32 num)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct iscsi_kwqe_conn_offload1 *req1 =
+ (struct iscsi_kwqe_conn_offload1 *) wqes[0];
+ struct iscsi_kwqe_conn_offload2 *req2 =
+ (struct iscsi_kwqe_conn_offload2 *) wqes[1];
+ struct iscsi_kwqe_conn_offload3 *req3;
+ struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
+ struct cnic_iscsi *iscsi = ctx->proto.iscsi;
+ u32 cid = ctx->cid;
+ u32 hw_cid = BNX2X_HW_CID(cp, cid);
+ struct iscsi_context *ictx;
+ struct regpair context_addr;
+ int i, j, n = 2, n_max;
+ u8 port = CNIC_PORT(cp);
+
+ ctx->ctx_flags = 0;
+ if (!req2->num_additional_wqes)
+ return -EINVAL;
+
+ n_max = req2->num_additional_wqes + 2;
+
+ ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
+ if (ictx == NULL)
+ return -ENOMEM;
+
+ req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
+
+ ictx->xstorm_ag_context.hq_prod = 1;
+
+ ictx->xstorm_st_context.iscsi.first_burst_length =
+ ISCSI_DEF_FIRST_BURST_LEN;
+ ictx->xstorm_st_context.iscsi.max_send_pdu_length =
+ ISCSI_DEF_MAX_RECV_SEG_LEN;
+ ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
+ req1->sq_page_table_addr_lo;
+ ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
+ req1->sq_page_table_addr_hi;
+ ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
+ ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
+ ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
+ iscsi->hq_info.pgtbl_map & 0xffffffff;
+ ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
+ (u64) iscsi->hq_info.pgtbl_map >> 32;
+ ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
+ iscsi->hq_info.pgtbl[0];
+ ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
+ iscsi->hq_info.pgtbl[1];
+ ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
+ iscsi->r2tq_info.pgtbl_map & 0xffffffff;
+ ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
+ (u64) iscsi->r2tq_info.pgtbl_map >> 32;
+ ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
+ iscsi->r2tq_info.pgtbl[0];
+ ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
+ iscsi->r2tq_info.pgtbl[1];
+ ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
+ iscsi->task_array_info.pgtbl_map & 0xffffffff;
+ ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
+ (u64) iscsi->task_array_info.pgtbl_map >> 32;
+ ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
+ BNX2X_ISCSI_PBL_NOT_CACHED;
+ ictx->xstorm_st_context.iscsi.flags.flags |=
+ XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
+ ictx->xstorm_st_context.iscsi.flags.flags |=
+ XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
+ ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
+ ETH_P_8021Q;
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
+ cp->port_mode == CHIP_2_PORT_MODE) {
+
+ port = 0;
+ }
+ ictx->xstorm_st_context.common.flags =
+ 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
+ ictx->xstorm_st_context.common.flags =
+ port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
+
+ ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
+ /* TSTORM requires the base address of RQ DB & not PTE */
+ ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
+ req2->rq_page_table_addr_lo & PAGE_MASK;
+ ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
+ req2->rq_page_table_addr_hi;
+ ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
+ ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
+ ictx->tstorm_st_context.tcp.flags2 |=
+ TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
+ ictx->tstorm_st_context.tcp.ooo_support_mode =
+ TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
+
+ ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
+
+ ictx->ustorm_st_context.ring.rq.pbl_base.lo =
+ req2->rq_page_table_addr_lo;
+ ictx->ustorm_st_context.ring.rq.pbl_base.hi =
+ req2->rq_page_table_addr_hi;
+ ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
+ ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
+ ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
+ iscsi->r2tq_info.pgtbl_map & 0xffffffff;
+ ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
+ (u64) iscsi->r2tq_info.pgtbl_map >> 32;
+ ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
+ iscsi->r2tq_info.pgtbl[0];
+ ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
+ iscsi->r2tq_info.pgtbl[1];
+ ictx->ustorm_st_context.ring.cq_pbl_base.lo =
+ req1->cq_page_table_addr_lo;
+ ictx->ustorm_st_context.ring.cq_pbl_base.hi =
+ req1->cq_page_table_addr_hi;
+ ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
+ ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
+ ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
+ ictx->ustorm_st_context.task_pbe_cache_index =
+ BNX2X_ISCSI_PBL_NOT_CACHED;
+ ictx->ustorm_st_context.task_pdu_cache_index =
+ BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
+
+ for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
+ if (j == 3) {
+ if (n >= n_max)
+ break;
+ req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
+ j = 0;
+ }
+ ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
+ ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
+ req3->qp_first_pte[j].hi;
+ ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
+ req3->qp_first_pte[j].lo;
+ }
+
+ ictx->ustorm_st_context.task_pbl_base.lo =
+ iscsi->task_array_info.pgtbl_map & 0xffffffff;
+ ictx->ustorm_st_context.task_pbl_base.hi =
+ (u64) iscsi->task_array_info.pgtbl_map >> 32;
+ ictx->ustorm_st_context.tce_phy_addr.lo =
+ iscsi->task_array_info.pgtbl[0];
+ ictx->ustorm_st_context.tce_phy_addr.hi =
+ iscsi->task_array_info.pgtbl[1];
+ ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
+ ictx->ustorm_st_context.num_cqs = cp->num_cqs;
+ ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
+ ictx->ustorm_st_context.negotiated_rx_and_flags |=
+ ISCSI_DEF_MAX_BURST_LEN;
+ ictx->ustorm_st_context.negotiated_rx |=
+ ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
+ USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
+
+ ictx->cstorm_st_context.hq_pbl_base.lo =
+ iscsi->hq_info.pgtbl_map & 0xffffffff;
+ ictx->cstorm_st_context.hq_pbl_base.hi =
+ (u64) iscsi->hq_info.pgtbl_map >> 32;
+ ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
+ ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
+ ictx->cstorm_st_context.task_pbl_base.lo =
+ iscsi->task_array_info.pgtbl_map & 0xffffffff;
+ ictx->cstorm_st_context.task_pbl_base.hi =
+ (u64) iscsi->task_array_info.pgtbl_map >> 32;
+ /* CSTORM and USTORM initialization is different, CSTORM requires
+ * CQ DB base & not PTE addr */
+ ictx->cstorm_st_context.cq_db_base.lo =
+ req1->cq_page_table_addr_lo & PAGE_MASK;
+ ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
+ ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
+ ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
+ for (i = 0; i < cp->num_cqs; i++) {
+ ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
+ ISCSI_INITIAL_SN;
+ ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
+ ISCSI_INITIAL_SN;
+ }
+
+ ictx->xstorm_ag_context.cdu_reserved =
+ CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
+ ISCSI_CONNECTION_TYPE);
+ ictx->ustorm_ag_context.cdu_usage =
+ CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
+ ISCSI_CONNECTION_TYPE);
+ return 0;
+
+}
+
+static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
+ u32 num, int *work)
+{
+ struct iscsi_kwqe_conn_offload1 *req1;
+ struct iscsi_kwqe_conn_offload2 *req2;
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_context *ctx;
+ struct iscsi_kcqe kcqe;
+ struct kcqe *cqes[1];
+ u32 l5_cid;
+ int ret = 0;
+
+ if (num < 2) {
+ *work = num;
+ return -EINVAL;
+ }
+
+ req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
+ req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
+ if ((num - 2) < req2->num_additional_wqes) {
+ *work = num;
+ return -EINVAL;
+ }
+ *work = 2 + req2->num_additional_wqes;
+
+ l5_cid = req1->iscsi_conn_id;
+ if (l5_cid >= MAX_ISCSI_TBL_SZ)
+ return -EINVAL;
+
+ memset(&kcqe, 0, sizeof(kcqe));
+ kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
+ kcqe.iscsi_conn_id = l5_cid;
+ kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
+
+ ctx = &cp->ctx_tbl[l5_cid];
+ if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
+ kcqe.completion_status =
+ ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
+ goto done;
+ }
+
+ if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
+ atomic_dec(&cp->iscsi_conn);
+ goto done;
+ }
+ ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
+ if (ret) {
+ atomic_dec(&cp->iscsi_conn);
+ ret = 0;
+ goto done;
+ }
+ ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
+ if (ret < 0) {
+ cnic_free_bnx2x_conn_resc(dev, l5_cid);
+ atomic_dec(&cp->iscsi_conn);
+ goto done;
+ }
+
+ kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
+ kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
+
+done:
+ cqes[0] = (struct kcqe *) &kcqe;
+ cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
+ return ret;
+}
+
+
+static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct iscsi_kwqe_conn_update *req =
+ (struct iscsi_kwqe_conn_update *) kwqe;
+ void *data;
+ union l5cm_specific_data l5_data;
+ u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
+ int ret;
+
+ if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
+ return -EINVAL;
+
+ data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(data, kwqe, sizeof(struct kwqe));
+
+ ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
+ req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
+ return ret;
+}
+
+static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+ union l5cm_specific_data l5_data;
+ int ret;
+ u32 hw_cid;
+
+ init_waitqueue_head(&ctx->waitq);
+ ctx->wait_cond = 0;
+ memset(&l5_data, 0, sizeof(l5_data));
+ hw_cid = BNX2X_HW_CID(cp, ctx->cid);
+
+ ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
+ hw_cid, NONE_CONNECTION_TYPE, &l5_data);
+
+ if (ret == 0) {
+ wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
+ if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct iscsi_kwqe_conn_destroy *req =
+ (struct iscsi_kwqe_conn_destroy *) kwqe;
+ u32 l5_cid = req->reserved0;
+ struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+ int ret = 0;
+ struct iscsi_kcqe kcqe;
+ struct kcqe *cqes[1];
+
+ if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+ goto skip_cfc_delete;
+
+ if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
+ unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
+
+ if (delta > (2 * HZ))
+ delta = 0;
+
+ set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
+ queue_delayed_work(cnic_wq, &cp->delete_task, delta);
+ goto destroy_reply;
+ }
+
+ ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
+
+skip_cfc_delete:
+ cnic_free_bnx2x_conn_resc(dev, l5_cid);
+
+ if (!ret) {
+ atomic_dec(&cp->iscsi_conn);
+ clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+ }
+
+destroy_reply:
+ memset(&kcqe, 0, sizeof(kcqe));
+ kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
+ kcqe.iscsi_conn_id = l5_cid;
+ kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
+ kcqe.iscsi_conn_context_id = req->context_id;
+
+ cqes[0] = (struct kcqe *) &kcqe;
+ cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
+
+ return ret;
+}
+
+static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
+ struct l4_kwq_connect_req1 *kwqe1,
+ struct l4_kwq_connect_req3 *kwqe3,
+ struct l5cm_active_conn_buffer *conn_buf)
+{
+ struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
+ struct l5cm_xstorm_conn_buffer *xstorm_buf =
+ &conn_buf->xstorm_conn_buffer;
+ struct l5cm_tstorm_conn_buffer *tstorm_buf =
+ &conn_buf->tstorm_conn_buffer;
+ struct regpair context_addr;
+ u32 cid = BNX2X_SW_CID(kwqe1->cid);
+ struct in6_addr src_ip, dst_ip;
+ int i;
+ u32 *addrp;
+
+ addrp = (u32 *) &conn_addr->local_ip_addr;
+ for (i = 0; i < 4; i++, addrp++)
+ src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
+
+ addrp = (u32 *) &conn_addr->remote_ip_addr;
+ for (i = 0; i < 4; i++, addrp++)
+ dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
+
+ cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
+
+ xstorm_buf->context_addr.hi = context_addr.hi;
+ xstorm_buf->context_addr.lo = context_addr.lo;
+ xstorm_buf->mss = 0xffff;
+ xstorm_buf->rcv_buf = kwqe3->rcv_buf;
+ if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
+ xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
+ xstorm_buf->pseudo_header_checksum =
+ swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
+
+ if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
+ tstorm_buf->params |=
+ L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
+ if (kwqe3->ka_timeout) {
+ tstorm_buf->ka_enable = 1;
+ tstorm_buf->ka_timeout = kwqe3->ka_timeout;
+ tstorm_buf->ka_interval = kwqe3->ka_interval;
+ tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
+ }
+ tstorm_buf->max_rt_time = 0xffffffff;
+}
+
+static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ u32 pfid = cp->pfid;
+ u8 *mac = dev->mac_addr;
+
+ CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+ XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
+ CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+ XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
+ CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+ XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
+ CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+ XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
+ CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+ XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
+ CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+ XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
+
+ CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+ TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
+ CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+ TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
+ mac[4]);
+ CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+ TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
+ CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+ TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
+ mac[2]);
+ CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+ TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
+ CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+ TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
+ mac[0]);
+}
+
+static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
+ u16 tstorm_flags = 0;
+
+ if (tcp_ts) {
+ xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
+ tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
+ }
+
+ CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+ XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
+
+ CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
+ TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
+}
+
+static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
+ u32 num, int *work)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct l4_kwq_connect_req1 *kwqe1 =
+ (struct l4_kwq_connect_req1 *) wqes[0];
+ struct l4_kwq_connect_req3 *kwqe3;
+ struct l5cm_active_conn_buffer *conn_buf;
+ struct l5cm_conn_addr_params *conn_addr;
+ union l5cm_specific_data l5_data;
+ u32 l5_cid = kwqe1->pg_cid;
+ struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
+ struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+ int ret;
+
+ if (num < 2) {
+ *work = num;
+ return -EINVAL;
+ }
+
+ if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
+ *work = 3;
+ else
+ *work = 2;
+
+ if (num < *work) {
+ *work = num;
+ return -EINVAL;
+ }
+
+ if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
+ netdev_err(dev->netdev, "conn_buf size too big\n");
+ return -ENOMEM;
+ }
+ conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
+ if (!conn_buf)
+ return -ENOMEM;
+
+ memset(conn_buf, 0, sizeof(*conn_buf));
+
+ conn_addr = &conn_buf->conn_addr_buf;
+ conn_addr->remote_addr_0 = csk->ha[0];
+ conn_addr->remote_addr_1 = csk->ha[1];
+ conn_addr->remote_addr_2 = csk->ha[2];
+ conn_addr->remote_addr_3 = csk->ha[3];
+ conn_addr->remote_addr_4 = csk->ha[4];
+ conn_addr->remote_addr_5 = csk->ha[5];
+
+ if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
+ struct l4_kwq_connect_req2 *kwqe2 =
+ (struct l4_kwq_connect_req2 *) wqes[1];
+
+ conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
+ conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
+ conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
+
+ conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
+ conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
+ conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
+ conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
+ }
+ kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
+
+ conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
+ conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
+ conn_addr->local_tcp_port = kwqe1->src_port;
+ conn_addr->remote_tcp_port = kwqe1->dst_port;
+
+ conn_addr->pmtu = kwqe3->pmtu;
+ cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
+
+ CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
+ XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
+
+ cnic_bnx2x_set_tcp_timestamp(dev,
+ kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
+
+ ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
+ kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
+ if (!ret)
+ set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+
+ return ret;
+}
+
+static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+ struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
+ union l5cm_specific_data l5_data;
+ int ret;
+
+ memset(&l5_data, 0, sizeof(l5_data));
+ ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
+ req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
+ return ret;
+}
+
+static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+ struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
+ union l5cm_specific_data l5_data;
+ int ret;
+
+ memset(&l5_data, 0, sizeof(l5_data));
+ ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
+ req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
+ return ret;
+}
+static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+ struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
+ struct l4_kcq kcqe;
+ struct kcqe *cqes[1];
+
+ memset(&kcqe, 0, sizeof(kcqe));
+ kcqe.pg_host_opaque = req->host_opaque;
+ kcqe.pg_cid = req->host_opaque;
+ kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
+ cqes[0] = (struct kcqe *) &kcqe;
+ cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
+ return 0;
+}
+
+static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+ struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
+ struct l4_kcq kcqe;
+ struct kcqe *cqes[1];
+
+ memset(&kcqe, 0, sizeof(kcqe));
+ kcqe.pg_host_opaque = req->pg_host_opaque;
+ kcqe.pg_cid = req->pg_cid;
+ kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
+ cqes[0] = (struct kcqe *) &kcqe;
+ cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
+ return 0;
+}
+
+static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+ struct fcoe_kwqe_stat *req;
+ struct fcoe_stat_ramrod_params *fcoe_stat;
+ union l5cm_specific_data l5_data;
+ struct cnic_local *cp = dev->cnic_priv;
+ int ret;
+ u32 cid;
+
+ req = (struct fcoe_kwqe_stat *) kwqe;
+ cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
+
+ fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
+ if (!fcoe_stat)
+ return -ENOMEM;
+
+ memset(fcoe_stat, 0, sizeof(*fcoe_stat));
+ memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
+
+ ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
+ FCOE_CONNECTION_TYPE, &l5_data);
+ return ret;
+}
+
+static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
+ u32 num, int *work)
+{
+ int ret;
+ struct cnic_local *cp = dev->cnic_priv;
+ u32 cid;
+ struct fcoe_init_ramrod_params *fcoe_init;
+ struct fcoe_kwqe_init1 *req1;
+ struct fcoe_kwqe_init2 *req2;
+ struct fcoe_kwqe_init3 *req3;
+ union l5cm_specific_data l5_data;
+
+ if (num < 3) {
+ *work = num;
+ return -EINVAL;
+ }
+ req1 = (struct fcoe_kwqe_init1 *) wqes[0];
+ req2 = (struct fcoe_kwqe_init2 *) wqes[1];
+ req3 = (struct fcoe_kwqe_init3 *) wqes[2];
+ if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
+ *work = 1;
+ return -EINVAL;
+ }
+ if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
+ *work = 2;
+ return -EINVAL;
+ }
+
+ if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
+ netdev_err(dev->netdev, "fcoe_init size too big\n");
+ return -ENOMEM;
+ }
+ fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
+ if (!fcoe_init)
+ return -ENOMEM;
+
+ memset(fcoe_init, 0, sizeof(*fcoe_init));
+ memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
+ memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
+ memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
+ fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
+ fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
+ fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
+
+ fcoe_init->sb_num = cp->status_blk_num;
+ fcoe_init->eq_prod = MAX_KCQ_IDX;
+ fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
+ cp->kcq2.sw_prod_idx = 0;
+
+ cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
+ ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
+ FCOE_CONNECTION_TYPE, &l5_data);
+ *work = 3;
+ return ret;
+}
+
+static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
+ u32 num, int *work)
+{
+ int ret = 0;
+ u32 cid = -1, l5_cid;
+ struct cnic_local *cp = dev->cnic_priv;
+ struct fcoe_kwqe_conn_offload1 *req1;
+ struct fcoe_kwqe_conn_offload2 *req2;
+ struct fcoe_kwqe_conn_offload3 *req3;
+ struct fcoe_kwqe_conn_offload4 *req4;
+ struct fcoe_conn_offload_ramrod_params *fcoe_offload;
+ struct cnic_context *ctx;
+ struct fcoe_context *fctx;
+ struct regpair ctx_addr;
+ union l5cm_specific_data l5_data;
+ struct fcoe_kcqe kcqe;
+ struct kcqe *cqes[1];
+
+ if (num < 4) {
+ *work = num;
+ return -EINVAL;
+ }
+ req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
+ req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
+ req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
+ req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
+
+ *work = 4;
+
+ l5_cid = req1->fcoe_conn_id;
+ if (l5_cid >= dev->max_fcoe_conn)
+ goto err_reply;
+
+ l5_cid += BNX2X_FCOE_L5_CID_BASE;
+
+ ctx = &cp->ctx_tbl[l5_cid];
+ if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+ goto err_reply;
+
+ ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
+ if (ret) {
+ ret = 0;
+ goto err_reply;
+ }
+ cid = ctx->cid;
+
+ fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
+ if (fctx) {
+ u32 hw_cid = BNX2X_HW_CID(cp, cid);
+ u32 val;
+
+ val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
+ FCOE_CONNECTION_TYPE);
+ fctx->xstorm_ag_context.cdu_reserved = val;
+ val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
+ FCOE_CONNECTION_TYPE);
+ fctx->ustorm_ag_context.cdu_usage = val;
+ }
+ if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
+ netdev_err(dev->netdev, "fcoe_offload size too big\n");
+ goto err_reply;
+ }
+ fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
+ if (!fcoe_offload)
+ goto err_reply;
+
+ memset(fcoe_offload, 0, sizeof(*fcoe_offload));
+ memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
+ memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
+ memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
+ memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
+
+ cid = BNX2X_HW_CID(cp, cid);
+ ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
+ FCOE_CONNECTION_TYPE, &l5_data);
+ if (!ret)
+ set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+
+ return ret;
+
+err_reply:
+ if (cid != -1)
+ cnic_free_bnx2x_conn_resc(dev, l5_cid);
+
+ memset(&kcqe, 0, sizeof(kcqe));
+ kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
+ kcqe.fcoe_conn_id = req1->fcoe_conn_id;
+ kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
+
+ cqes[0] = (struct kcqe *) &kcqe;
+ cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
+ return ret;
+}
+
+static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+ struct fcoe_kwqe_conn_enable_disable *req;
+ struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
+ union l5cm_specific_data l5_data;
+ int ret;
+ u32 cid, l5_cid;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
+ cid = req->context_id;
+ l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
+
+ if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
+ netdev_err(dev->netdev, "fcoe_enable size too big\n");
+ return -ENOMEM;
+ }
+ fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
+ if (!fcoe_enable)
+ return -ENOMEM;
+
+ memset(fcoe_enable, 0, sizeof(*fcoe_enable));
+ memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
+ ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
+ FCOE_CONNECTION_TYPE, &l5_data);
+ return ret;
+}
+
+static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+ struct fcoe_kwqe_conn_enable_disable *req;
+ struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
+ union l5cm_specific_data l5_data;
+ int ret;
+ u32 cid, l5_cid;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
+ cid = req->context_id;
+ l5_cid = req->conn_id;
+ if (l5_cid >= dev->max_fcoe_conn)
+ return -EINVAL;
+
+ l5_cid += BNX2X_FCOE_L5_CID_BASE;
+
+ if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
+ netdev_err(dev->netdev, "fcoe_disable size too big\n");
+ return -ENOMEM;
+ }
+ fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
+ if (!fcoe_disable)
+ return -ENOMEM;
+
+ memset(fcoe_disable, 0, sizeof(*fcoe_disable));
+ memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
+ ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
+ FCOE_CONNECTION_TYPE, &l5_data);
+ return ret;
+}
+
+static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+ struct fcoe_kwqe_conn_destroy *req;
+ union l5cm_specific_data l5_data;
+ int ret;
+ u32 cid, l5_cid;
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_context *ctx;
+ struct fcoe_kcqe kcqe;
+ struct kcqe *cqes[1];
+
+ req = (struct fcoe_kwqe_conn_destroy *) kwqe;
+ cid = req->context_id;
+ l5_cid = req->conn_id;
+ if (l5_cid >= dev->max_fcoe_conn)
+ return -EINVAL;
+
+ l5_cid += BNX2X_FCOE_L5_CID_BASE;
+
+ ctx = &cp->ctx_tbl[l5_cid];
+
+ init_waitqueue_head(&ctx->waitq);
+ ctx->wait_cond = 0;
+
+ memset(&kcqe, 0, sizeof(kcqe));
+ kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
+ memset(&l5_data, 0, sizeof(l5_data));
+ ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
+ FCOE_CONNECTION_TYPE, &l5_data);
+ if (ret == 0) {
+ wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
+ if (ctx->wait_cond)
+ kcqe.completion_status = 0;
+ }
+
+ set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
+ queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
+
+ kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
+ kcqe.fcoe_conn_id = req->conn_id;
+ kcqe.fcoe_conn_context_id = cid;
+
+ cqes[0] = (struct kcqe *) &kcqe;
+ cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
+ return ret;
+}
+
+static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ u32 i;
+
+ for (i = start_cid; i < cp->max_cid_space; i++) {
+ struct cnic_context *ctx = &cp->ctx_tbl[i];
+ int j;
+
+ while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
+ msleep(10);
+
+ for (j = 0; j < 5; j++) {
+ if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+ break;
+ msleep(20);
+ }
+
+ if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+ netdev_warn(dev->netdev, "CID %x not deleted\n",
+ ctx->cid);
+ }
+}
+
+static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+ struct fcoe_kwqe_destroy *req;
+ union l5cm_specific_data l5_data;
+ struct cnic_local *cp = dev->cnic_priv;
+ int ret;
+ u32 cid;
+
+ cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
+
+ req = (struct fcoe_kwqe_destroy *) kwqe;
+ cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
+
+ memset(&l5_data, 0, sizeof(l5_data));
+ ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
+ FCOE_CONNECTION_TYPE, &l5_data);
+ return ret;
+}
+
+static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
+ struct kwqe *wqes[], u32 num_wqes)
+{
+ int i, work, ret;
+ u32 opcode;
+ struct kwqe *kwqe;
+
+ if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+ return -EAGAIN; /* bnx2 is down */
+
+ for (i = 0; i < num_wqes; ) {
+ kwqe = wqes[i];
+ opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
+ work = 1;
+
+ switch (opcode) {
+ case ISCSI_KWQE_OPCODE_INIT1:
+ ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
+ break;
+ case ISCSI_KWQE_OPCODE_INIT2:
+ ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
+ break;
+ case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
+ ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
+ num_wqes - i, &work);
+ break;
+ case ISCSI_KWQE_OPCODE_UPDATE_CONN:
+ ret = cnic_bnx2x_iscsi_update(dev, kwqe);
+ break;
+ case ISCSI_KWQE_OPCODE_DESTROY_CONN:
+ ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
+ break;
+ case L4_KWQE_OPCODE_VALUE_CONNECT1:
+ ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
+ &work);
+ break;
+ case L4_KWQE_OPCODE_VALUE_CLOSE:
+ ret = cnic_bnx2x_close(dev, kwqe);
+ break;
+ case L4_KWQE_OPCODE_VALUE_RESET:
+ ret = cnic_bnx2x_reset(dev, kwqe);
+ break;
+ case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
+ ret = cnic_bnx2x_offload_pg(dev, kwqe);
+ break;
+ case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
+ ret = cnic_bnx2x_update_pg(dev, kwqe);
+ break;
+ case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
+ ret = 0;
+ break;
+ default:
+ ret = 0;
+ netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
+ opcode);
+ break;
+ }
+ if (ret < 0)
+ netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
+ opcode);
+ i += work;
+ }
+ return 0;
+}
+
+static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
+ struct kwqe *wqes[], u32 num_wqes)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ int i, work, ret;
+ u32 opcode;
+ struct kwqe *kwqe;
+
+ if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+ return -EAGAIN; /* bnx2 is down */
+
+ if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
+ return -EINVAL;
+
+ for (i = 0; i < num_wqes; ) {
+ kwqe = wqes[i];
+ opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
+ work = 1;
+
+ switch (opcode) {
+ case FCOE_KWQE_OPCODE_INIT1:
+ ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
+ num_wqes - i, &work);
+ break;
+ case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
+ ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
+ num_wqes - i, &work);
+ break;
+ case FCOE_KWQE_OPCODE_ENABLE_CONN:
+ ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
+ break;
+ case FCOE_KWQE_OPCODE_DISABLE_CONN:
+ ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
+ break;
+ case FCOE_KWQE_OPCODE_DESTROY_CONN:
+ ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
+ break;
+ case FCOE_KWQE_OPCODE_DESTROY:
+ ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
+ break;
+ case FCOE_KWQE_OPCODE_STAT:
+ ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
+ break;
+ default:
+ ret = 0;
+ netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
+ opcode);
+ break;
+ }
+ if (ret < 0)
+ netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
+ opcode);
+ i += work;
+ }
+ return 0;
+}
+
+static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
+ u32 num_wqes)
+{
+ int ret = -EINVAL;
+ u32 layer_code;
+
+ if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+ return -EAGAIN; /* bnx2x is down */
+
+ if (!num_wqes)
+ return 0;
+
+ layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
+ switch (layer_code) {
+ case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
+ case KWQE_FLAGS_LAYER_MASK_L4:
+ case KWQE_FLAGS_LAYER_MASK_L2:
+ ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
+ break;
+
+ case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
+ ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
+ break;
+ }
+ return ret;
+}
+
+static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
+{
+ if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
+ return KCQE_FLAGS_LAYER_MASK_L4;
+
+ return opflag & KCQE_FLAGS_LAYER_MASK;
+}
+
+static void service_kcqes(struct cnic_dev *dev, int num_cqes)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ int i, j, comp = 0;
+
+ i = 0;
+ j = 1;
+ while (num_cqes) {
+ struct cnic_ulp_ops *ulp_ops;
+ int ulp_type;
+ u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
+ u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
+
+ if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
+ comp++;
+
+ while (j < num_cqes) {
+ u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
+
+ if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
+ break;
+
+ if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
+ comp++;
+ j++;
+ }
+
+ if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
+ ulp_type = CNIC_ULP_RDMA;
+ else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
+ ulp_type = CNIC_ULP_ISCSI;
+ else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
+ ulp_type = CNIC_ULP_FCOE;
+ else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
+ ulp_type = CNIC_ULP_L4;
+ else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
+ goto end;
+ else {
+ netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
+ kcqe_op_flag);
+ goto end;
+ }
+
+ rcu_read_lock();
+ ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
+ if (likely(ulp_ops)) {
+ ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
+ cp->completed_kcq + i, j);
+ }
+ rcu_read_unlock();
+end:
+ num_cqes -= j;
+ i += j;
+ j = 1;
+ }
+ if (unlikely(comp))
+ cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
+}
+
+static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ u16 i, ri, hw_prod, last;
+ struct kcqe *kcqe;
+ int kcqe_cnt = 0, last_cnt = 0;
+
+ i = ri = last = info->sw_prod_idx;
+ ri &= MAX_KCQ_IDX;
+ hw_prod = *info->hw_prod_idx_ptr;
+ hw_prod = info->hw_idx(hw_prod);
+
+ while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
+ kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
+ cp->completed_kcq[kcqe_cnt++] = kcqe;
+ i = info->next_idx(i);
+ ri = i & MAX_KCQ_IDX;
+ if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
+ last_cnt = kcqe_cnt;
+ last = i;
+ }
+ }
+
+ info->sw_prod_idx = last;
+ return last_cnt;
+}
+
+static int cnic_l2_completion(struct cnic_local *cp)
+{
+ u16 hw_cons, sw_cons;
+ struct cnic_uio_dev *udev = cp->udev;
+ union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
+ (udev->l2_ring + (2 * BCM_PAGE_SIZE));
+ u32 cmd;
+ int comp = 0;
+
+ if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
+ return 0;
+
+ hw_cons = *cp->rx_cons_ptr;
+ if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
+ hw_cons++;
+
+ sw_cons = cp->rx_cons;
+ while (sw_cons != hw_cons) {
+ u8 cqe_fp_flags;
+
+ cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
+ cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+ if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
+ cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
+ cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
+ if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
+ cmd == RAMROD_CMD_ID_ETH_HALT)
+ comp++;
+ }
+ sw_cons = BNX2X_NEXT_RCQE(sw_cons);
+ }
+ return comp;
+}
+
+static void cnic_chk_pkt_rings(struct cnic_local *cp)
+{
+ u16 rx_cons, tx_cons;
+ int comp = 0;
+
+ if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
+ return;
+
+ rx_cons = *cp->rx_cons_ptr;
+ tx_cons = *cp->tx_cons_ptr;
+ if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
+ if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
+ comp = cnic_l2_completion(cp);
+
+ cp->tx_cons = tx_cons;
+ cp->rx_cons = rx_cons;
+
+ if (cp->udev)
+ uio_event_notify(&cp->udev->cnic_uinfo);
+ }
+ if (comp)
+ clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
+}
+
+static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
+ int kcqe_cnt;
+
+ /* status block index must be read before reading other fields */
+ rmb();
+ cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
+
+ while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
+
+ service_kcqes(dev, kcqe_cnt);
+
+ /* Tell compiler that status_blk fields can change. */
+ barrier();
+ status_idx = (u16) *cp->kcq1.status_idx_ptr;
+ /* status block index must be read first */
+ rmb();
+ cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
+ }
+
+ CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
+
+ cnic_chk_pkt_rings(cp);
+
+ return status_idx;
+}
+
+static int cnic_service_bnx2(void *data, void *status_blk)
+{
+ struct cnic_dev *dev = data;
+
+ if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
+ struct status_block *sblk = status_blk;
+
+ return sblk->status_idx;
+ }
+
+ return cnic_service_bnx2_queues(dev);
+}
+
+static void cnic_service_bnx2_msix(unsigned long data)
+{
+ struct cnic_dev *dev = (struct cnic_dev *) data;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ cp->last_status_idx = cnic_service_bnx2_queues(dev);
+
+ CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
+}
+
+static void cnic_doirq(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+
+ if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
+ u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
+
+ prefetch(cp->status_blk.gen);
+ prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
+
+ tasklet_schedule(&cp->cnic_irq_task);
+ }
+}
+
+static irqreturn_t cnic_irq(int irq, void *dev_instance)
+{
+ struct cnic_dev *dev = dev_instance;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ if (cp->ack_int)
+ cp->ack_int(dev);
+
+ cnic_doirq(dev);
+
+ return IRQ_HANDLED;
+}
+
+static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
+ u16 index, u8 op, u8 update)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
+ COMMAND_REG_INT_ACK);
+ struct igu_ack_register igu_ack;
+
+ igu_ack.status_block_index = index;
+ igu_ack.sb_id_and_flags =
+ ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
+ (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
+ (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
+ (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
+
+ CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
+}
+
+static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
+ u16 index, u8 op, u8 update)
+{
+ struct igu_regular cmd_data;
+ u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
+
+ cmd_data.sb_id_and_flags =
+ (index << IGU_REGULAR_SB_INDEX_SHIFT) |
+ (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
+ (update << IGU_REGULAR_BUPDATE_SHIFT) |
+ (op << IGU_REGULAR_ENABLE_INT_SHIFT);
+
+
+ CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
+}
+
+static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+
+ cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
+ IGU_INT_DISABLE, 0);
+}
+
+static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+
+ cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
+ IGU_INT_DISABLE, 0);
+}
+
+static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
+{
+ u32 last_status = *info->status_idx_ptr;
+ int kcqe_cnt;
+
+ /* status block index must be read before reading the KCQ */
+ rmb();
+ while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
+
+ service_kcqes(dev, kcqe_cnt);
+
+ /* Tell compiler that sblk fields can change. */
+ barrier();
+
+ last_status = *info->status_idx_ptr;
+ /* status block index must be read before reading the KCQ */
+ rmb();
+ }
+ return last_status;
+}
+
+static void cnic_service_bnx2x_bh(unsigned long data)
+{
+ struct cnic_dev *dev = (struct cnic_dev *) data;
+ struct cnic_local *cp = dev->cnic_priv;
+ u32 status_idx, new_status_idx;
+
+ if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
+ return;
+
+ while (1) {
+ status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
+
+ CNIC_WR16(dev, cp->kcq1.io_addr,
+ cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
+
+ if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
+ status_idx, IGU_INT_ENABLE, 1);
+ break;
+ }
+
+ new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
+
+ if (new_status_idx != status_idx)
+ continue;
+
+ CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
+ MAX_KCQ_IDX);
+
+ cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
+ status_idx, IGU_INT_ENABLE, 1);
+
+ break;
+ }
+}
+
+static int cnic_service_bnx2x(void *data, void *status_blk)
+{
+ struct cnic_dev *dev = data;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
+ cnic_doirq(dev);
+
+ cnic_chk_pkt_rings(cp);
+
+ return 0;
+}
+
+static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
+{
+ struct cnic_ulp_ops *ulp_ops;
+
+ if (if_type == CNIC_ULP_ISCSI)
+ cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+
+ mutex_lock(&cnic_lock);
+ ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+ lockdep_is_held(&cnic_lock));
+ if (!ulp_ops) {
+ mutex_unlock(&cnic_lock);
+ return;
+ }
+ set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+ mutex_unlock(&cnic_lock);
+
+ if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
+ ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
+
+ clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+}
+
+static void cnic_ulp_stop(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ int if_type;
+
+ for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
+ cnic_ulp_stop_one(cp, if_type);
+}
+
+static void cnic_ulp_start(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ int if_type;
+
+ for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+ struct cnic_ulp_ops *ulp_ops;
+
+ mutex_lock(&cnic_lock);
+ ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+ lockdep_is_held(&cnic_lock));
+ if (!ulp_ops || !ulp_ops->cnic_start) {
+ mutex_unlock(&cnic_lock);
+ continue;
+ }
+ set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+ mutex_unlock(&cnic_lock);
+
+ if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
+ ulp_ops->cnic_start(cp->ulp_handle[if_type]);
+
+ clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+ }
+}
+
+static int cnic_ctl(void *data, struct cnic_ctl_info *info)
+{
+ struct cnic_dev *dev = data;
+
+ switch (info->cmd) {
+ case CNIC_CTL_STOP_CMD:
+ cnic_hold(dev);
+
+ cnic_ulp_stop(dev);
+ cnic_stop_hw(dev);
+
+ cnic_put(dev);
+ break;
+ case CNIC_CTL_START_CMD:
+ cnic_hold(dev);
+
+ if (!cnic_start_hw(dev))
+ cnic_ulp_start(dev);
+
+ cnic_put(dev);
+ break;
+ case CNIC_CTL_STOP_ISCSI_CMD: {
+ struct cnic_local *cp = dev->cnic_priv;
+ set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
+ queue_delayed_work(cnic_wq, &cp->delete_task, 0);
+ break;
+ }
+ case CNIC_CTL_COMPLETION_CMD: {
+ struct cnic_ctl_completion *comp = &info->data.comp;
+ u32 cid = BNX2X_SW_CID(comp->cid);
+ u32 l5_cid;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
+ struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+
+ if (unlikely(comp->error)) {
+ set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
+ netdev_err(dev->netdev,
+ "CID %x CFC delete comp error %x\n",
+ cid, comp->error);
+ }
+
+ ctx->wait_cond = 1;
+ wake_up(&ctx->waitq);
+ }
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void cnic_ulp_init(struct cnic_dev *dev)
+{
+ int i;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
+ struct cnic_ulp_ops *ulp_ops;
+
+ mutex_lock(&cnic_lock);
+ ulp_ops = cnic_ulp_tbl_prot(i);
+ if (!ulp_ops || !ulp_ops->cnic_init) {
+ mutex_unlock(&cnic_lock);
+ continue;
+ }
+ ulp_get(ulp_ops);
+ mutex_unlock(&cnic_lock);
+
+ if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
+ ulp_ops->cnic_init(dev);
+
+ ulp_put(ulp_ops);
+ }
+}
+
+static void cnic_ulp_exit(struct cnic_dev *dev)
+{
+ int i;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
+ struct cnic_ulp_ops *ulp_ops;
+
+ mutex_lock(&cnic_lock);
+ ulp_ops = cnic_ulp_tbl_prot(i);
+ if (!ulp_ops || !ulp_ops->cnic_exit) {
+ mutex_unlock(&cnic_lock);
+ continue;
+ }
+ ulp_get(ulp_ops);
+ mutex_unlock(&cnic_lock);
+
+ if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
+ ulp_ops->cnic_exit(dev);
+
+ ulp_put(ulp_ops);
+ }
+}
+
+static int cnic_cm_offload_pg(struct cnic_sock *csk)
+{
+ struct cnic_dev *dev = csk->dev;
+ struct l4_kwq_offload_pg *l4kwqe;
+ struct kwqe *wqes[1];
+
+ l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
+ memset(l4kwqe, 0, sizeof(*l4kwqe));
+ wqes[0] = (struct kwqe *) l4kwqe;
+
+ l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
+ l4kwqe->flags =
+ L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
+ l4kwqe->l2hdr_nbytes = ETH_HLEN;
+
+ l4kwqe->da0 = csk->ha[0];
+ l4kwqe->da1 = csk->ha[1];
+ l4kwqe->da2 = csk->ha[2];
+ l4kwqe->da3 = csk->ha[3];
+ l4kwqe->da4 = csk->ha[4];
+ l4kwqe->da5 = csk->ha[5];
+
+ l4kwqe->sa0 = dev->mac_addr[0];
+ l4kwqe->sa1 = dev->mac_addr[1];
+ l4kwqe->sa2 = dev->mac_addr[2];
+ l4kwqe->sa3 = dev->mac_addr[3];
+ l4kwqe->sa4 = dev->mac_addr[4];
+ l4kwqe->sa5 = dev->mac_addr[5];
+
+ l4kwqe->etype = ETH_P_IP;
+ l4kwqe->ipid_start = DEF_IPID_START;
+ l4kwqe->host_opaque = csk->l5_cid;
+
+ if (csk->vlan_id) {
+ l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
+ l4kwqe->vlan_tag = csk->vlan_id;
+ l4kwqe->l2hdr_nbytes += 4;
+ }
+
+ return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_update_pg(struct cnic_sock *csk)
+{
+ struct cnic_dev *dev = csk->dev;
+ struct l4_kwq_update_pg *l4kwqe;
+ struct kwqe *wqes[1];
+
+ l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
+ memset(l4kwqe, 0, sizeof(*l4kwqe));
+ wqes[0] = (struct kwqe *) l4kwqe;
+
+ l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
+ l4kwqe->flags =
+ L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
+ l4kwqe->pg_cid = csk->pg_cid;
+
+ l4kwqe->da0 = csk->ha[0];
+ l4kwqe->da1 = csk->ha[1];
+ l4kwqe->da2 = csk->ha[2];
+ l4kwqe->da3 = csk->ha[3];
+ l4kwqe->da4 = csk->ha[4];
+ l4kwqe->da5 = csk->ha[5];
+
+ l4kwqe->pg_host_opaque = csk->l5_cid;
+ l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
+
+ return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_upload_pg(struct cnic_sock *csk)
+{
+ struct cnic_dev *dev = csk->dev;
+ struct l4_kwq_upload *l4kwqe;
+ struct kwqe *wqes[1];
+
+ l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
+ memset(l4kwqe, 0, sizeof(*l4kwqe));
+ wqes[0] = (struct kwqe *) l4kwqe;
+
+ l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
+ l4kwqe->flags =
+ L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
+ l4kwqe->cid = csk->pg_cid;
+
+ return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_conn_req(struct cnic_sock *csk)
+{
+ struct cnic_dev *dev = csk->dev;
+ struct l4_kwq_connect_req1 *l4kwqe1;
+ struct l4_kwq_connect_req2 *l4kwqe2;
+ struct l4_kwq_connect_req3 *l4kwqe3;
+ struct kwqe *wqes[3];
+ u8 tcp_flags = 0;
+ int num_wqes = 2;
+
+ l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
+ l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
+ l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
+ memset(l4kwqe1, 0, sizeof(*l4kwqe1));
+ memset(l4kwqe2, 0, sizeof(*l4kwqe2));
+ memset(l4kwqe3, 0, sizeof(*l4kwqe3));
+
+ l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
+ l4kwqe3->flags =
+ L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
+ l4kwqe3->ka_timeout = csk->ka_timeout;
+ l4kwqe3->ka_interval = csk->ka_interval;
+ l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
+ l4kwqe3->tos = csk->tos;
+ l4kwqe3->ttl = csk->ttl;
+ l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
+ l4kwqe3->pmtu = csk->mtu;
+ l4kwqe3->rcv_buf = csk->rcv_buf;
+ l4kwqe3->snd_buf = csk->snd_buf;
+ l4kwqe3->seed = csk->seed;
+
+ wqes[0] = (struct kwqe *) l4kwqe1;
+ if (test_bit(SK_F_IPV6, &csk->flags)) {
+ wqes[1] = (struct kwqe *) l4kwqe2;
+ wqes[2] = (struct kwqe *) l4kwqe3;
+ num_wqes = 3;
+
+ l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
+ l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
+ l4kwqe2->flags =
+ L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
+ L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
+ l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
+ l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
+ l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
+ l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
+ l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
+ l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
+ l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
+ sizeof(struct tcphdr);
+ } else {
+ wqes[1] = (struct kwqe *) l4kwqe3;
+ l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
+ sizeof(struct tcphdr);
+ }
+
+ l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
+ l4kwqe1->flags =
+ (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
+ L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
+ l4kwqe1->cid = csk->cid;
+ l4kwqe1->pg_cid = csk->pg_cid;
+ l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
+ l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
+ l4kwqe1->src_port = be16_to_cpu(csk->src_port);
+ l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
+ if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
+ tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
+ if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
+ tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
+ if (csk->tcp_flags & SK_TCP_NAGLE)
+ tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
+ if (csk->tcp_flags & SK_TCP_TIMESTAMP)
+ tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
+ if (csk->tcp_flags & SK_TCP_SACK)
+ tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
+ if (csk->tcp_flags & SK_TCP_SEG_SCALING)
+ tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
+
+ l4kwqe1->tcp_flags = tcp_flags;
+
+ return dev->submit_kwqes(dev, wqes, num_wqes);
+}
+
+static int cnic_cm_close_req(struct cnic_sock *csk)
+{
+ struct cnic_dev *dev = csk->dev;
+ struct l4_kwq_close_req *l4kwqe;
+ struct kwqe *wqes[1];
+
+ l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
+ memset(l4kwqe, 0, sizeof(*l4kwqe));
+ wqes[0] = (struct kwqe *) l4kwqe;
+
+ l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
+ l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
+ l4kwqe->cid = csk->cid;
+
+ return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_abort_req(struct cnic_sock *csk)
+{
+ struct cnic_dev *dev = csk->dev;
+ struct l4_kwq_reset_req *l4kwqe;
+ struct kwqe *wqes[1];
+
+ l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
+ memset(l4kwqe, 0, sizeof(*l4kwqe));
+ wqes[0] = (struct kwqe *) l4kwqe;
+
+ l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
+ l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
+ l4kwqe->cid = csk->cid;
+
+ return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
+ u32 l5_cid, struct cnic_sock **csk, void *context)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_sock *csk1;
+
+ if (l5_cid >= MAX_CM_SK_TBL_SZ)
+ return -EINVAL;
+
+ if (cp->ctx_tbl) {
+ struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+
+ if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+ return -EAGAIN;
+ }
+
+ csk1 = &cp->csk_tbl[l5_cid];
+ if (atomic_read(&csk1->ref_count))
+ return -EAGAIN;
+
+ if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
+ return -EBUSY;
+
+ csk1->dev = dev;
+ csk1->cid = cid;
+ csk1->l5_cid = l5_cid;
+ csk1->ulp_type = ulp_type;
+ csk1->context = context;
+
+ csk1->ka_timeout = DEF_KA_TIMEOUT;
+ csk1->ka_interval = DEF_KA_INTERVAL;
+ csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
+ csk1->tos = DEF_TOS;
+ csk1->ttl = DEF_TTL;
+ csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
+ csk1->rcv_buf = DEF_RCV_BUF;
+ csk1->snd_buf = DEF_SND_BUF;
+ csk1->seed = DEF_SEED;
+
+ *csk = csk1;
+ return 0;
+}
+
+static void cnic_cm_cleanup(struct cnic_sock *csk)
+{
+ if (csk->src_port) {
+ struct cnic_dev *dev = csk->dev;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
+ csk->src_port = 0;
+ }
+}
+
+static void cnic_close_conn(struct cnic_sock *csk)
+{
+ if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
+ cnic_cm_upload_pg(csk);
+ clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
+ }
+ cnic_cm_cleanup(csk);
+}
+
+static int cnic_cm_destroy(struct cnic_sock *csk)
+{
+ if (!cnic_in_use(csk))
+ return -EINVAL;
+
+ csk_hold(csk);
+ clear_bit(SK_F_INUSE, &csk->flags);
+ smp_mb__after_clear_bit();
+ while (atomic_read(&csk->ref_count) != 1)
+ msleep(1);
+ cnic_cm_cleanup(csk);
+
+ csk->flags = 0;
+ csk_put(csk);
+ return 0;
+}
+
+static inline u16 cnic_get_vlan(struct net_device *dev,
+ struct net_device **vlan_dev)
+{
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ *vlan_dev = vlan_dev_real_dev(dev);
+ return vlan_dev_vlan_id(dev);
+ }
+ *vlan_dev = dev;
+ return 0;
+}
+
+static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
+ struct dst_entry **dst)
+{
+#if defined(CONFIG_INET)
+ struct rtable *rt;
+
+ rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
+ if (!IS_ERR(rt)) {
+ *dst = &rt->dst;
+ return 0;
+ }
+ return PTR_ERR(rt);
+#else
+ return -ENETUNREACH;
+#endif
+}
+
+static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
+ struct dst_entry **dst)
+{
+#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
+ struct flowi6 fl6;
+
+ memset(&fl6, 0, sizeof(fl6));
+ ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr);
+ if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+ fl6.flowi6_oif = dst_addr->sin6_scope_id;
+
+ *dst = ip6_route_output(&init_net, NULL, &fl6);
+ if (*dst)
+ return 0;
+#endif
+
+ return -ENETUNREACH;
+}
+
+static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
+ int ulp_type)
+{
+ struct cnic_dev *dev = NULL;
+ struct dst_entry *dst;
+ struct net_device *netdev = NULL;
+ int err = -ENETUNREACH;
+
+ if (dst_addr->sin_family == AF_INET)
+ err = cnic_get_v4_route(dst_addr, &dst);
+ else if (dst_addr->sin_family == AF_INET6) {
+ struct sockaddr_in6 *dst_addr6 =
+ (struct sockaddr_in6 *) dst_addr;
+
+ err = cnic_get_v6_route(dst_addr6, &dst);
+ } else
+ return NULL;
+
+ if (err)
+ return NULL;
+
+ if (!dst->dev)
+ goto done;
+
+ cnic_get_vlan(dst->dev, &netdev);
+
+ dev = cnic_from_netdev(netdev);
+
+done:
+ dst_release(dst);
+ if (dev)
+ cnic_put(dev);
+ return dev;
+}
+
+static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+ struct cnic_dev *dev = csk->dev;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
+}
+
+static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+ struct cnic_dev *dev = csk->dev;
+ struct cnic_local *cp = dev->cnic_priv;
+ int is_v6, rc = 0;
+ struct dst_entry *dst = NULL;
+ struct net_device *realdev;
+ __be16 local_port;
+ u32 port_id;
+
+ if (saddr->local.v6.sin6_family == AF_INET6 &&
+ saddr->remote.v6.sin6_family == AF_INET6)
+ is_v6 = 1;
+ else if (saddr->local.v4.sin_family == AF_INET &&
+ saddr->remote.v4.sin_family == AF_INET)
+ is_v6 = 0;
+ else
+ return -EINVAL;
+
+ clear_bit(SK_F_IPV6, &csk->flags);
+
+ if (is_v6) {
+ set_bit(SK_F_IPV6, &csk->flags);
+ cnic_get_v6_route(&saddr->remote.v6, &dst);
+
+ memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
+ sizeof(struct in6_addr));
+ csk->dst_port = saddr->remote.v6.sin6_port;
+ local_port = saddr->local.v6.sin6_port;
+
+ } else {
+ cnic_get_v4_route(&saddr->remote.v4, &dst);
+
+ csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
+ csk->dst_port = saddr->remote.v4.sin_port;
+ local_port = saddr->local.v4.sin_port;
+ }
+
+ csk->vlan_id = 0;
+ csk->mtu = dev->netdev->mtu;
+ if (dst && dst->dev) {
+ u16 vlan = cnic_get_vlan(dst->dev, &realdev);
+ if (realdev == dev->netdev) {
+ csk->vlan_id = vlan;
+ csk->mtu = dst_mtu(dst);
+ }
+ }
+
+ port_id = be16_to_cpu(local_port);
+ if (port_id >= CNIC_LOCAL_PORT_MIN &&
+ port_id < CNIC_LOCAL_PORT_MAX) {
+ if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
+ port_id = 0;
+ } else
+ port_id = 0;
+
+ if (!port_id) {
+ port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
+ if (port_id == -1) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ local_port = cpu_to_be16(port_id);
+ }
+ csk->src_port = local_port;
+
+err_out:
+ dst_release(dst);
+ return rc;
+}
+
+static void cnic_init_csk_state(struct cnic_sock *csk)
+{
+ csk->state = 0;
+ clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+ clear_bit(SK_F_CLOSING, &csk->flags);
+}
+
+static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+ struct cnic_local *cp = csk->dev->cnic_priv;
+ int err = 0;
+
+ if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
+ return -EOPNOTSUPP;
+
+ if (!cnic_in_use(csk))
+ return -EINVAL;
+
+ if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
+ return -EINVAL;
+
+ cnic_init_csk_state(csk);
+
+ err = cnic_get_route(csk, saddr);
+ if (err)
+ goto err_out;
+
+ err = cnic_resolve_addr(csk, saddr);
+ if (!err)
+ return 0;
+
+err_out:
+ clear_bit(SK_F_CONNECT_START, &csk->flags);
+ return err;
+}
+
+static int cnic_cm_abort(struct cnic_sock *csk)
+{
+ struct cnic_local *cp = csk->dev->cnic_priv;
+ u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
+
+ if (!cnic_in_use(csk))
+ return -EINVAL;
+
+ if (cnic_abort_prep(csk))
+ return cnic_cm_abort_req(csk);
+
+ /* Getting here means that we haven't started connect, or
+ * connect was not successful.
+ */
+
+ cp->close_conn(csk, opcode);
+ if (csk->state != opcode)
+ return -EALREADY;
+
+ return 0;
+}
+
+static int cnic_cm_close(struct cnic_sock *csk)
+{
+ if (!cnic_in_use(csk))
+ return -EINVAL;
+
+ if (cnic_close_prep(csk)) {
+ csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
+ return cnic_cm_close_req(csk);
+ } else {
+ return -EALREADY;
+ }
+ return 0;
+}
+
+static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
+ u8 opcode)
+{
+ struct cnic_ulp_ops *ulp_ops;
+ int ulp_type = csk->ulp_type;
+
+ rcu_read_lock();
+ ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
+ if (ulp_ops) {
+ if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
+ ulp_ops->cm_connect_complete(csk);
+ else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
+ ulp_ops->cm_close_complete(csk);
+ else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
+ ulp_ops->cm_remote_abort(csk);
+ else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
+ ulp_ops->cm_abort_complete(csk);
+ else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
+ ulp_ops->cm_remote_close(csk);
+ }
+ rcu_read_unlock();
+}
+
+static int cnic_cm_set_pg(struct cnic_sock *csk)
+{
+ if (cnic_offld_prep(csk)) {
+ if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+ cnic_cm_update_pg(csk);
+ else
+ cnic_cm_offload_pg(csk);
+ }
+ return 0;
+}
+
+static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ u32 l5_cid = kcqe->pg_host_opaque;
+ u8 opcode = kcqe->op_code;
+ struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
+
+ csk_hold(csk);
+ if (!cnic_in_use(csk))
+ goto done;
+
+ if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
+ clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+ goto done;
+ }
+ /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
+ if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
+ clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+ cnic_cm_upcall(cp, csk,
+ L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
+ goto done;
+ }
+
+ csk->pg_cid = kcqe->pg_cid;
+ set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
+ cnic_cm_conn_req(csk);
+
+done:
+ csk_put(csk);
+}
+
+static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
+ u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
+ struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+
+ ctx->timestamp = jiffies;
+ ctx->wait_cond = 1;
+ wake_up(&ctx->waitq);
+}
+
+static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
+ u8 opcode = l4kcqe->op_code;
+ u32 l5_cid;
+ struct cnic_sock *csk;
+
+ if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
+ cnic_process_fcoe_term_conn(dev, kcqe);
+ return;
+ }
+ if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
+ opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
+ cnic_cm_process_offld_pg(dev, l4kcqe);
+ return;
+ }
+
+ l5_cid = l4kcqe->conn_id;
+ if (opcode & 0x80)
+ l5_cid = l4kcqe->cid;
+ if (l5_cid >= MAX_CM_SK_TBL_SZ)
+ return;
+
+ csk = &cp->csk_tbl[l5_cid];
+ csk_hold(csk);
+
+ if (!cnic_in_use(csk)) {
+ csk_put(csk);
+ return;
+ }
+
+ switch (opcode) {
+ case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
+ if (l4kcqe->status != 0) {
+ clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+ cnic_cm_upcall(cp, csk,
+ L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
+ }
+ break;
+ case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
+ if (l4kcqe->status == 0)
+ set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
+
+ smp_mb__before_clear_bit();
+ clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+ cnic_cm_upcall(cp, csk, opcode);
+ break;
+
+ case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
+ case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
+ case L4_KCQE_OPCODE_VALUE_RESET_COMP:
+ case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
+ case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
+ cp->close_conn(csk, opcode);
+ break;
+
+ case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
+ /* after we already sent CLOSE_REQ */
+ if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
+ !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
+ csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
+ cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
+ else
+ cnic_cm_upcall(cp, csk, opcode);
+ break;
+ }
+ csk_put(csk);
+}
+
+static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
+{
+ struct cnic_dev *dev = data;
+ int i;
+
+ for (i = 0; i < num; i++)
+ cnic_cm_process_kcqe(dev, kcqe[i]);
+}
+
+static struct cnic_ulp_ops cm_ulp_ops = {
+ .indicate_kcqes = cnic_cm_indicate_kcqe,
+};
+
+static void cnic_cm_free_mem(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+
+ kfree(cp->csk_tbl);
+ cp->csk_tbl = NULL;
+ cnic_free_id_tbl(&cp->csk_port_tbl);
+}
+
+static int cnic_cm_alloc_mem(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ u32 port_id;
+
+ cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
+ GFP_KERNEL);
+ if (!cp->csk_tbl)
+ return -ENOMEM;
+
+ port_id = random32();
+ port_id %= CNIC_LOCAL_PORT_RANGE;
+ if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
+ CNIC_LOCAL_PORT_MIN, port_id)) {
+ cnic_cm_free_mem(dev);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
+{
+ if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+ /* Unsolicited RESET_COMP or RESET_RECEIVED */
+ opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
+ csk->state = opcode;
+ }
+
+ /* 1. If event opcode matches the expected event in csk->state
+ * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
+ * event
+ * 3. If the expected event is 0, meaning the connection was never
+ * never established, we accept the opcode from cm_abort.
+ */
+ if (opcode == csk->state || csk->state == 0 ||
+ csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
+ csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
+ if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
+ if (csk->state == 0)
+ csk->state = opcode;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
+{
+ struct cnic_dev *dev = csk->dev;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
+ cnic_cm_upcall(cp, csk, opcode);
+ return;
+ }
+
+ clear_bit(SK_F_CONNECT_START, &csk->flags);
+ cnic_close_conn(csk);
+ csk->state = opcode;
+ cnic_cm_upcall(cp, csk, opcode);
+}
+
+static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
+{
+}
+
+static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
+{
+ u32 seed;
+
+ seed = random32();
+ cnic_ctx_wr(dev, 45, 0, seed);
+ return 0;
+}
+
+static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
+{
+ struct cnic_dev *dev = csk->dev;
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
+ union l5cm_specific_data l5_data;
+ u32 cmd = 0;
+ int close_complete = 0;
+
+ switch (opcode) {
+ case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
+ case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
+ case L4_KCQE_OPCODE_VALUE_RESET_COMP:
+ if (cnic_ready_to_close(csk, opcode)) {
+ if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+ cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
+ else
+ close_complete = 1;
+ }
+ break;
+ case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
+ cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
+ break;
+ case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
+ close_complete = 1;
+ break;
+ }
+ if (cmd) {
+ memset(&l5_data, 0, sizeof(l5_data));
+
+ cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
+ &l5_data);
+ } else if (close_complete) {
+ ctx->timestamp = jiffies;
+ cnic_close_conn(csk);
+ cnic_cm_upcall(cp, csk, csk->state);
+ }
+}
+
+static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+
+ if (!cp->ctx_tbl)
+ return;
+
+ if (!netif_running(dev->netdev))
+ return;
+
+ cnic_bnx2x_delete_wait(dev, 0);
+
+ cancel_delayed_work(&cp->delete_task);
+ flush_workqueue(cnic_wq);
+
+ if (atomic_read(&cp->iscsi_conn) != 0)
+ netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
+ atomic_read(&cp->iscsi_conn));
+}
+
+static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ u32 pfid = cp->pfid;
+ u32 port = CNIC_PORT(cp);
+
+ cnic_init_bnx2x_mac(dev);
+ cnic_bnx2x_set_tcp_timestamp(dev, 1);
+
+ CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
+ XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
+
+ CNIC_WR(dev, BAR_XSTRORM_INTMEM +
+ XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
+ CNIC_WR(dev, BAR_XSTRORM_INTMEM +
+ XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
+ DEF_MAX_DA_COUNT);
+
+ CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+ XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
+ CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+ XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
+ CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+ XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
+ CNIC_WR(dev, BAR_XSTRORM_INTMEM +
+ XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
+
+ CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
+ DEF_MAX_CWND);
+ return 0;
+}
+
+static void cnic_delete_task(struct work_struct *work)
+{
+ struct cnic_local *cp;
+ struct cnic_dev *dev;
+ u32 i;
+ int need_resched = 0;
+
+ cp = container_of(work, struct cnic_local, delete_task.work);
+ dev = cp->dev;
+
+ if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
+ struct drv_ctl_info info;
+
+ cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
+
+ info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
+ cp->ethdev->drv_ctl(dev->netdev, &info);
+ }
+
+ for (i = 0; i < cp->max_cid_space; i++) {
+ struct cnic_context *ctx = &cp->ctx_tbl[i];
+ int err;
+
+ if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
+ !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
+ continue;
+
+ if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
+ need_resched = 1;
+ continue;
+ }
+
+ if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
+ continue;
+
+ err = cnic_bnx2x_destroy_ramrod(dev, i);
+
+ cnic_free_bnx2x_conn_resc(dev, i);
+ if (!err) {
+ if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
+ atomic_dec(&cp->iscsi_conn);
+
+ clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+ }
+ }
+
+ if (need_resched)
+ queue_delayed_work(cnic_wq, &cp->delete_task,
+ msecs_to_jiffies(10));
+
+}
+
+static int cnic_cm_open(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ int err;
+
+ err = cnic_cm_alloc_mem(dev);
+ if (err)
+ return err;
+
+ err = cp->start_cm(dev);
+
+ if (err)
+ goto err_out;
+
+ INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
+
+ dev->cm_create = cnic_cm_create;
+ dev->cm_destroy = cnic_cm_destroy;
+ dev->cm_connect = cnic_cm_connect;
+ dev->cm_abort = cnic_cm_abort;
+ dev->cm_close = cnic_cm_close;
+ dev->cm_select_dev = cnic_cm_select_dev;
+
+ cp->ulp_handle[CNIC_ULP_L4] = dev;
+ rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
+ return 0;
+
+err_out:
+ cnic_cm_free_mem(dev);
+ return err;
+}
+
+static int cnic_cm_shutdown(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ int i;
+
+ cp->stop_cm(dev);
+
+ if (!cp->csk_tbl)
+ return 0;
+
+ for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
+ struct cnic_sock *csk = &cp->csk_tbl[i];
+
+ clear_bit(SK_F_INUSE, &csk->flags);
+ cnic_cm_cleanup(csk);
+ }
+ cnic_cm_free_mem(dev);
+
+ return 0;
+}
+
+static void cnic_init_context(struct cnic_dev *dev, u32 cid)
+{
+ u32 cid_addr;
+ int i;
+
+ cid_addr = GET_CID_ADDR(cid);
+
+ for (i = 0; i < CTX_SIZE; i += 4)
+ cnic_ctx_wr(dev, cid_addr, i, 0);
+}
+
+static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ int ret = 0, i;
+ u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
+
+ if (CHIP_NUM(cp) != CHIP_NUM_5709)
+ return 0;
+
+ for (i = 0; i < cp->ctx_blks; i++) {
+ int j;
+ u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
+ u32 val;
+
+ memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
+
+ CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
+ (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
+ CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
+ (u64) cp->ctx_arr[i].mapping >> 32);
+ CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
+ BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
+ for (j = 0; j < 10; j++) {
+
+ val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
+ if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
+ break;
+ udelay(5);
+ }
+ if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+ return ret;
+}
+
+static void cnic_free_irq(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+
+ if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+ cp->disable_int_sync(dev);
+ tasklet_kill(&cp->cnic_irq_task);
+ free_irq(ethdev->irq_arr[0].vector, dev);
+ }
+}
+
+static int cnic_request_irq(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ int err;
+
+ err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
+ if (err)
+ tasklet_disable(&cp->cnic_irq_task);
+
+ return err;
+}
+
+static int cnic_init_bnx2_irq(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+
+ if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+ int err, i = 0;
+ int sblk_num = cp->status_blk_num;
+ u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
+ BNX2_HC_SB_CONFIG_1;
+
+ CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
+
+ CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
+ CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
+ CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
+
+ cp->last_status_idx = cp->status_blk.bnx2->status_idx;
+ tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
+ (unsigned long) dev);
+ err = cnic_request_irq(dev);
+ if (err)
+ return err;
+
+ while (cp->status_blk.bnx2->status_completion_producer_index &&
+ i < 10) {
+ CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
+ 1 << (11 + sblk_num));
+ udelay(10);
+ i++;
+ barrier();
+ }
+ if (cp->status_blk.bnx2->status_completion_producer_index) {
+ cnic_free_irq(dev);
+ goto failed;
+ }
+
+ } else {
+ struct status_block *sblk = cp->status_blk.gen;
+ u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
+ int i = 0;
+
+ while (sblk->status_completion_producer_index && i < 10) {
+ CNIC_WR(dev, BNX2_HC_COMMAND,
+ hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
+ udelay(10);
+ i++;
+ barrier();
+ }
+ if (sblk->status_completion_producer_index)
+ goto failed;
+
+ }
+ return 0;
+
+failed:
+ netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
+ return -EBUSY;
+}
+
+static void cnic_enable_bnx2_int(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+
+ if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
+ return;
+
+ CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+ BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
+}
+
+static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+
+ if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
+ return;
+
+ CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+ BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+ CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
+ synchronize_irq(ethdev->irq_arr[0].vector);
+}
+
+static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ struct cnic_uio_dev *udev = cp->udev;
+ u32 cid_addr, tx_cid, sb_id;
+ u32 val, offset0, offset1, offset2, offset3;
+ int i;
+ struct tx_bd *txbd;
+ dma_addr_t buf_map, ring_map = udev->l2_ring_map;
+ struct status_block *s_blk = cp->status_blk.gen;
+
+ sb_id = cp->status_blk_num;
+ tx_cid = 20;
+ cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
+ if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+ struct status_block_msix *sblk = cp->status_blk.bnx2;
+
+ tx_cid = TX_TSS_CID + sb_id - 1;
+ CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
+ (TX_TSS_CID << 7));
+ cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
+ }
+ cp->tx_cons = *cp->tx_cons_ptr;
+
+ cid_addr = GET_CID_ADDR(tx_cid);
+ if (CHIP_NUM(cp) == CHIP_NUM_5709) {
+ u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
+
+ for (i = 0; i < PHY_CTX_SIZE; i += 4)
+ cnic_ctx_wr(dev, cid_addr2, i, 0);
+
+ offset0 = BNX2_L2CTX_TYPE_XI;
+ offset1 = BNX2_L2CTX_CMD_TYPE_XI;
+ offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
+ offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
+ } else {
+ cnic_init_context(dev, tx_cid);
+ cnic_init_context(dev, tx_cid + 1);
+
+ offset0 = BNX2_L2CTX_TYPE;
+ offset1 = BNX2_L2CTX_CMD_TYPE;
+ offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
+ offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
+ }
+ val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
+ cnic_ctx_wr(dev, cid_addr, offset0, val);
+
+ val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
+ cnic_ctx_wr(dev, cid_addr, offset1, val);
+
+ txbd = udev->l2_ring;
+
+ buf_map = udev->l2_buf_map;
+ for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
+ txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
+ txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
+ }
+ val = (u64) ring_map >> 32;
+ cnic_ctx_wr(dev, cid_addr, offset2, val);
+ txbd->tx_bd_haddr_hi = val;
+
+ val = (u64) ring_map & 0xffffffff;
+ cnic_ctx_wr(dev, cid_addr, offset3, val);
+ txbd->tx_bd_haddr_lo = val;
+}
+
+static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ struct cnic_uio_dev *udev = cp->udev;
+ u32 cid_addr, sb_id, val, coal_reg, coal_val;
+ int i;
+ struct rx_bd *rxbd;
+ struct status_block *s_blk = cp->status_blk.gen;
+ dma_addr_t ring_map = udev->l2_ring_map;
+
+ sb_id = cp->status_blk_num;
+ cnic_init_context(dev, 2);
+ cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
+ coal_reg = BNX2_HC_COMMAND;
+ coal_val = CNIC_RD(dev, coal_reg);
+ if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+ struct status_block_msix *sblk = cp->status_blk.bnx2;
+
+ cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
+ coal_reg = BNX2_HC_COALESCE_NOW;
+ coal_val = 1 << (11 + sb_id);
+ }
+ i = 0;
+ while (!(*cp->rx_cons_ptr != 0) && i < 10) {
+ CNIC_WR(dev, coal_reg, coal_val);
+ udelay(10);
+ i++;
+ barrier();
+ }
+ cp->rx_cons = *cp->rx_cons_ptr;
+
+ cid_addr = GET_CID_ADDR(2);
+ val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
+ BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
+ cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
+
+ if (sb_id == 0)
+ val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
+ else
+ val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
+ cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
+
+ rxbd = udev->l2_ring + BCM_PAGE_SIZE;
+ for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
+ dma_addr_t buf_map;
+ int n = (i % cp->l2_rx_ring_size) + 1;
+
+ buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
+ rxbd->rx_bd_len = cp->l2_single_buf_size;
+ rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
+ rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
+ rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
+ }
+ val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
+ cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
+ rxbd->rx_bd_haddr_hi = val;
+
+ val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
+ cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
+ rxbd->rx_bd_haddr_lo = val;
+
+ val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
+ cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
+}
+
+static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
+{
+ struct kwqe *wqes[1], l2kwqe;
+
+ memset(&l2kwqe, 0, sizeof(l2kwqe));
+ wqes[0] = &l2kwqe;
+ l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
+ (L2_KWQE_OPCODE_VALUE_FLUSH <<
+ KWQE_OPCODE_SHIFT) | 2;
+ dev->submit_kwqes(dev, wqes, 1);
+}
+
+static void cnic_set_bnx2_mac(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ u32 val;
+
+ val = cp->func << 2;
+
+ cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
+
+ val = cnic_reg_rd_ind(dev, cp->shmem_base +
+ BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
+ dev->mac_addr[0] = (u8) (val >> 8);
+ dev->mac_addr[1] = (u8) val;
+
+ CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
+
+ val = cnic_reg_rd_ind(dev, cp->shmem_base +
+ BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
+ dev->mac_addr[2] = (u8) (val >> 24);
+ dev->mac_addr[3] = (u8) (val >> 16);
+ dev->mac_addr[4] = (u8) (val >> 8);
+ dev->mac_addr[5] = (u8) val;
+
+ CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
+
+ val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
+ if (CHIP_NUM(cp) != CHIP_NUM_5709)
+ val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
+
+ CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
+ CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
+ CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
+}
+
+static int cnic_start_bnx2_hw(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ struct status_block *sblk = cp->status_blk.gen;
+ u32 val, kcq_cid_addr, kwq_cid_addr;
+ int err;
+
+ cnic_set_bnx2_mac(dev);
+
+ val = CNIC_RD(dev, BNX2_MQ_CONFIG);
+ val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
+ if (BCM_PAGE_BITS > 12)
+ val |= (12 - 8) << 4;
+ else
+ val |= (BCM_PAGE_BITS - 8) << 4;
+
+ CNIC_WR(dev, BNX2_MQ_CONFIG, val);
+
+ CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
+ CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
+ CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
+
+ err = cnic_setup_5709_context(dev, 1);
+ if (err)
+ return err;
+
+ cnic_init_context(dev, KWQ_CID);
+ cnic_init_context(dev, KCQ_CID);
+
+ kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
+ cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
+
+ cp->max_kwq_idx = MAX_KWQ_IDX;
+ cp->kwq_prod_idx = 0;
+ cp->kwq_con_idx = 0;
+ set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
+
+ if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
+ cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
+ else
+ cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
+
+ /* Initialize the kernel work queue context. */
+ val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
+ (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+ cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
+
+ val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
+ cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
+
+ val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
+ cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
+
+ val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
+ cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
+
+ val = (u32) cp->kwq_info.pgtbl_map;
+ cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
+
+ kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
+ cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
+
+ cp->kcq1.sw_prod_idx = 0;
+ cp->kcq1.hw_prod_idx_ptr =
+ (u16 *) &sblk->status_completion_producer_index;
+
+ cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
+
+ /* Initialize the kernel complete queue context. */
+ val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
+ (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+ cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
+
+ val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
+ cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
+
+ val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
+ cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
+
+ val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
+ cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
+
+ val = (u32) cp->kcq1.dma.pgtbl_map;
+ cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
+
+ cp->int_num = 0;
+ if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+ struct status_block_msix *msblk = cp->status_blk.bnx2;
+ u32 sb_id = cp->status_blk_num;
+ u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
+
+ cp->kcq1.hw_prod_idx_ptr =
+ (u16 *) &msblk->status_completion_producer_index;
+ cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
+ cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
+ cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
+ cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
+ cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
+ }
+
+ /* Enable Commnad Scheduler notification when we write to the
+ * host producer index of the kernel contexts. */
+ CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
+
+ /* Enable Command Scheduler notification when we write to either
+ * the Send Queue or Receive Queue producer indexes of the kernel
+ * bypass contexts. */
+ CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
+ CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
+
+ /* Notify COM when the driver post an application buffer. */
+ CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
+
+ /* Set the CP and COM doorbells. These two processors polls the
+ * doorbell for a non zero value before running. This must be done
+ * after setting up the kernel queue contexts. */
+ cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
+ cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
+
+ cnic_init_bnx2_tx_ring(dev);
+ cnic_init_bnx2_rx_ring(dev);
+
+ err = cnic_init_bnx2_irq(dev);
+ if (err) {
+ netdev_err(dev->netdev, "cnic_init_irq failed\n");
+ cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
+ cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
+ return err;
+ }
+
+ return 0;
+}
+
+static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ u32 start_offset = ethdev->ctx_tbl_offset;
+ int i;
+
+ for (i = 0; i < cp->ctx_blks; i++) {
+ struct cnic_ctx *ctx = &cp->ctx_arr[i];
+ dma_addr_t map = ctx->mapping;
+
+ if (cp->ctx_align) {
+ unsigned long mask = cp->ctx_align - 1;
+
+ map = (map + mask) & ~mask;
+ }
+
+ cnic_ctx_tbl_wr(dev, start_offset + i, map);
+ }
+}
+
+static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ int err = 0;
+
+ tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
+ (unsigned long) dev);
+ if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
+ err = cnic_request_irq(dev);
+
+ return err;
+}
+
+static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
+ u16 sb_id, u8 sb_index,
+ u8 disable)
+{
+
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
+ offsetof(struct hc_status_block_data_e1x, index_data) +
+ sizeof(struct hc_index_data)*sb_index +
+ offsetof(struct hc_index_data, flags);
+ u16 flags = CNIC_RD16(dev, addr);
+ /* clear and set */
+ flags &= ~HC_INDEX_DATA_HC_ENABLED;
+ flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
+ HC_INDEX_DATA_HC_ENABLED);
+ CNIC_WR16(dev, addr, flags);
+}
+
+static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ u8 sb_id = cp->status_blk_num;
+
+ CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
+ offsetof(struct hc_status_block_data_e1x, index_data) +
+ sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
+ offsetof(struct hc_index_data, timeout), 64 / 4);
+ cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
+}
+
+static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
+{
+}
+
+static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
+ struct client_init_ramrod_data *data)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_uio_dev *udev = cp->udev;
+ union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
+ dma_addr_t buf_map, ring_map = udev->l2_ring_map;
+ struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
+ int i;
+ u32 cli = cp->ethdev->iscsi_l2_client_id;
+ u32 val;
+
+ memset(txbd, 0, BCM_PAGE_SIZE);
+
+ buf_map = udev->l2_buf_map;
+ for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
+ struct eth_tx_start_bd *start_bd = &txbd->start_bd;
+ struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
+
+ start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
+ start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
+ reg_bd->addr_hi = start_bd->addr_hi;
+ reg_bd->addr_lo = start_bd->addr_lo + 0x10;
+ start_bd->nbytes = cpu_to_le16(0x10);
+ start_bd->nbd = cpu_to_le16(3);
+ start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+ start_bd->general_data = (UNICAST_ADDRESS <<
+ ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
+ start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
+
+ }
+
+ val = (u64) ring_map >> 32;
+ txbd->next_bd.addr_hi = cpu_to_le32(val);
+
+ data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
+
+ val = (u64) ring_map & 0xffffffff;
+ txbd->next_bd.addr_lo = cpu_to_le32(val);
+
+ data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
+
+ /* Other ramrod params */
+ data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
+ data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
+
+ /* reset xstorm per client statistics */
+ if (cli < MAX_STAT_COUNTER_ID) {
+ data->general.statistics_zero_flg = 1;
+ data->general.statistics_en_flg = 1;
+ data->general.statistics_counter_id = cli;
+ }
+
+ cp->tx_cons_ptr =
+ &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
+}
+
+static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
+ struct client_init_ramrod_data *data)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_uio_dev *udev = cp->udev;
+ struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
+ BCM_PAGE_SIZE);
+ struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
+ (udev->l2_ring + (2 * BCM_PAGE_SIZE));
+ struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
+ int i;
+ u32 cli = cp->ethdev->iscsi_l2_client_id;
+ int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
+ u32 val;
+ dma_addr_t ring_map = udev->l2_ring_map;
+
+ /* General data */
+ data->general.client_id = cli;
+ data->general.activate_flg = 1;
+ data->general.sp_client_id = cli;
+ data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
+ data->general.func_id = cp->pfid;
+
+ for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
+ dma_addr_t buf_map;
+ int n = (i % cp->l2_rx_ring_size) + 1;
+
+ buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
+ rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
+ rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
+ }
+
+ val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
+ rxbd->addr_hi = cpu_to_le32(val);
+ data->rx.bd_page_base.hi = cpu_to_le32(val);
+
+ val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
+ rxbd->addr_lo = cpu_to_le32(val);
+ data->rx.bd_page_base.lo = cpu_to_le32(val);
+
+ rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
+ val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
+ rxcqe->addr_hi = cpu_to_le32(val);
+ data->rx.cqe_page_base.hi = cpu_to_le32(val);
+
+ val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
+ rxcqe->addr_lo = cpu_to_le32(val);
+ data->rx.cqe_page_base.lo = cpu_to_le32(val);
+
+ /* Other ramrod params */
+ data->rx.client_qzone_id = cl_qzone_id;
+ data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
+ data->rx.status_block_id = BNX2X_DEF_SB_ID;
+
+ data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
+
+ data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
+ data->rx.outer_vlan_removal_enable_flg = 1;
+ data->rx.silent_vlan_removal_flg = 1;
+ data->rx.silent_vlan_value = 0;
+ data->rx.silent_vlan_mask = 0xffff;
+
+ cp->rx_cons_ptr =
+ &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
+ cp->rx_cons = *cp->rx_cons_ptr;
+}
+
+static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ u32 pfid = cp->pfid;
+
+ cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
+ CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
+ cp->kcq1.sw_prod_idx = 0;
+
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
+
+ cp->kcq1.hw_prod_idx_ptr =
+ &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
+ cp->kcq1.status_idx_ptr =
+ &sb->sb.running_index[SM_RX_ID];
+ } else {
+ struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
+
+ cp->kcq1.hw_prod_idx_ptr =
+ &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
+ cp->kcq1.status_idx_ptr =
+ &sb->sb.running_index[SM_RX_ID];
+ }
+
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
+
+ cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
+ USTORM_FCOE_EQ_PROD_OFFSET(pfid);
+ cp->kcq2.sw_prod_idx = 0;
+ cp->kcq2.hw_prod_idx_ptr =
+ &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
+ cp->kcq2.status_idx_ptr =
+ &sb->sb.running_index[SM_RX_ID];
+ }
+}
+
+static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ int func = CNIC_FUNC(cp), ret;
+ u32 pfid;
+
+ cp->port_mode = CHIP_PORT_MODE_NONE;
+
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
+
+ if (!(val & 1))
+ val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
+ else
+ val = (val >> 1) & 1;
+
+ if (val) {
+ cp->port_mode = CHIP_4_PORT_MODE;
+ cp->pfid = func >> 1;
+ } else {
+ cp->port_mode = CHIP_2_PORT_MODE;
+ cp->pfid = func & 0x6;
+ }
+ } else {
+ cp->pfid = func;
+ }
+ pfid = cp->pfid;
+
+ ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
+ cp->iscsi_start_cid, 0);
+
+ if (ret)
+ return -ENOMEM;
+
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
+ cp->fcoe_start_cid, 0);
+
+ if (ret)
+ return -ENOMEM;
+ }
+
+ cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
+
+ cnic_init_bnx2x_kcq(dev);
+
+ /* Only 1 EQ */
+ CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
+ CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+ CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
+ CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+ CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
+ cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
+ CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+ CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
+ (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
+ CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+ CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
+ cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
+ CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+ CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
+ (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
+ CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
+ CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
+ CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
+ CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
+ CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
+ CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
+ HC_INDEX_ISCSI_EQ_CONS);
+
+ CNIC_WR(dev, BAR_USTRORM_INTMEM +
+ USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
+ cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
+ CNIC_WR(dev, BAR_USTRORM_INTMEM +
+ USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
+ (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
+
+ CNIC_WR(dev, BAR_TSTRORM_INTMEM +
+ TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
+
+ cnic_setup_bnx2x_context(dev);
+
+ ret = cnic_init_bnx2x_irq(dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void cnic_init_rings(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_uio_dev *udev = cp->udev;
+
+ if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
+ return;
+
+ if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+ cnic_init_bnx2_tx_ring(dev);
+ cnic_init_bnx2_rx_ring(dev);
+ set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
+ } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+ u32 cli = cp->ethdev->iscsi_l2_client_id;
+ u32 cid = cp->ethdev->iscsi_l2_cid;
+ u32 cl_qzone_id;
+ struct client_init_ramrod_data *data;
+ union l5cm_specific_data l5_data;
+ struct ustorm_eth_rx_producers rx_prods = {0};
+ u32 off, i, *cid_ptr;
+
+ rx_prods.bd_prod = 0;
+ rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
+ barrier();
+
+ cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
+
+ off = BAR_USTRORM_INTMEM +
+ (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ?
+ USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
+ USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
+
+ for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
+ CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
+
+ set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
+
+ data = udev->l2_buf;
+ cid_ptr = udev->l2_buf + 12;
+
+ memset(data, 0, sizeof(*data));
+
+ cnic_init_bnx2x_tx_ring(dev, data);
+ cnic_init_bnx2x_rx_ring(dev, data);
+
+ l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
+ l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
+
+ set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
+
+ cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
+ cid, ETH_CONNECTION_TYPE, &l5_data);
+
+ i = 0;
+ while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
+ ++i < 10)
+ msleep(1);
+
+ if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
+ netdev_err(dev->netdev,
+ "iSCSI CLIENT_SETUP did not complete\n");
+ cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
+ cnic_ring_ctl(dev, cid, cli, 1);
+ *cid_ptr = cid;
+ }
+}
+
+static void cnic_shutdown_rings(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_uio_dev *udev = cp->udev;
+ void *rx_ring;
+
+ if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
+ return;
+
+ if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+ cnic_shutdown_bnx2_rx_ring(dev);
+ } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+ u32 cli = cp->ethdev->iscsi_l2_client_id;
+ u32 cid = cp->ethdev->iscsi_l2_cid;
+ union l5cm_specific_data l5_data;
+ int i;
+
+ cnic_ring_ctl(dev, cid, cli, 0);
+
+ set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
+
+ l5_data.phy_address.lo = cli;
+ l5_data.phy_address.hi = 0;
+ cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
+ cid, ETH_CONNECTION_TYPE, &l5_data);
+ i = 0;
+ while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
+ ++i < 10)
+ msleep(1);
+
+ if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
+ netdev_err(dev->netdev,
+ "iSCSI CLIENT_HALT did not complete\n");
+ cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
+
+ memset(&l5_data, 0, sizeof(l5_data));
+ cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
+ cid, NONE_CONNECTION_TYPE, &l5_data);
+ msleep(10);
+ }
+ clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
+ rx_ring = udev->l2_ring + BCM_PAGE_SIZE;
+ memset(rx_ring, 0, BCM_PAGE_SIZE);
+}
+
+static int cnic_register_netdev(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ int err;
+
+ if (!ethdev)
+ return -ENODEV;
+
+ if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
+ return 0;
+
+ err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
+ if (err)
+ netdev_err(dev->netdev, "register_cnic failed\n");
+
+ return err;
+}
+
+static void cnic_unregister_netdev(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+
+ if (!ethdev)
+ return;
+
+ ethdev->drv_unregister_cnic(dev->netdev);
+}
+
+static int cnic_start_hw(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ int err;
+
+ if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
+ return -EALREADY;
+
+ dev->regview = ethdev->io_base;
+ pci_dev_get(dev->pcidev);
+ cp->func = PCI_FUNC(dev->pcidev->devfn);
+ cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
+ cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
+
+ err = cp->alloc_resc(dev);
+ if (err) {
+ netdev_err(dev->netdev, "allocate resource failure\n");
+ goto err1;
+ }
+
+ err = cp->start_hw(dev);
+ if (err)
+ goto err1;
+
+ err = cnic_cm_open(dev);
+ if (err)
+ goto err1;
+
+ set_bit(CNIC_F_CNIC_UP, &dev->flags);
+
+ cp->enable_int(dev);
+
+ return 0;
+
+err1:
+ cp->free_resc(dev);
+ pci_dev_put(dev->pcidev);
+ return err;
+}
+
+static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
+{
+ cnic_disable_bnx2_int_sync(dev);
+
+ cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
+ cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
+
+ cnic_init_context(dev, KWQ_CID);
+ cnic_init_context(dev, KCQ_CID);
+
+ cnic_setup_5709_context(dev, 0);
+ cnic_free_irq(dev);
+
+ cnic_free_resc(dev);
+}
+
+
+static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+
+ cnic_free_irq(dev);
+ *cp->kcq1.hw_prod_idx_ptr = 0;
+ CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+ CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
+ CNIC_WR16(dev, cp->kcq1.io_addr, 0);
+ cnic_free_resc(dev);
+}
+
+static void cnic_stop_hw(struct cnic_dev *dev)
+{
+ if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
+ struct cnic_local *cp = dev->cnic_priv;
+ int i = 0;
+
+ /* Need to wait for the ring shutdown event to complete
+ * before clearing the CNIC_UP flag.
+ */
+ while (cp->udev->uio_dev != -1 && i < 15) {
+ msleep(100);
+ i++;
+ }
+ cnic_shutdown_rings(dev);
+ clear_bit(CNIC_F_CNIC_UP, &dev->flags);
+ rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
+ synchronize_rcu();
+ cnic_cm_shutdown(dev);
+ cp->stop_hw(dev);
+ pci_dev_put(dev->pcidev);
+ }
+}
+
+static void cnic_free_dev(struct cnic_dev *dev)
+{
+ int i = 0;
+
+ while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
+ msleep(100);
+ i++;
+ }
+ if (atomic_read(&dev->ref_count) != 0)
+ netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
+
+ netdev_info(dev->netdev, "Removed CNIC device\n");
+ dev_put(dev->netdev);
+ kfree(dev);
+}
+
+static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
+ struct pci_dev *pdev)
+{
+ struct cnic_dev *cdev;
+ struct cnic_local *cp;
+ int alloc_size;
+
+ alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
+
+ cdev = kzalloc(alloc_size , GFP_KERNEL);
+ if (cdev == NULL) {
+ netdev_err(dev, "allocate dev struct failure\n");
+ return NULL;
+ }
+
+ cdev->netdev = dev;
+ cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
+ cdev->register_device = cnic_register_device;
+ cdev->unregister_device = cnic_unregister_device;
+ cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
+
+ cp = cdev->cnic_priv;
+ cp->dev = cdev;
+ cp->l2_single_buf_size = 0x400;
+ cp->l2_rx_ring_size = 3;
+
+ spin_lock_init(&cp->cnic_ulp_lock);
+
+ netdev_info(dev, "Added CNIC device\n");
+
+ return cdev;
+}
+
+static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
+{
+ struct pci_dev *pdev;
+ struct cnic_dev *cdev;
+ struct cnic_local *cp;
+ struct cnic_eth_dev *ethdev = NULL;
+ struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
+
+ probe = symbol_get(bnx2_cnic_probe);
+ if (probe) {
+ ethdev = (*probe)(dev);
+ symbol_put(bnx2_cnic_probe);
+ }
+ if (!ethdev)
+ return NULL;
+
+ pdev = ethdev->pdev;
+ if (!pdev)
+ return NULL;
+
+ dev_hold(dev);
+ pci_dev_get(pdev);
+ if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
+ pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
+ (pdev->revision < 0x10)) {
+ pci_dev_put(pdev);
+ goto cnic_err;
+ }
+ pci_dev_put(pdev);
+
+ cdev = cnic_alloc_dev(dev, pdev);
+ if (cdev == NULL)
+ goto cnic_err;
+
+ set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
+ cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
+
+ cp = cdev->cnic_priv;
+ cp->ethdev = ethdev;
+ cdev->pcidev = pdev;
+ cp->chip_id = ethdev->chip_id;
+
+ cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
+
+ cp->cnic_ops = &cnic_bnx2_ops;
+ cp->start_hw = cnic_start_bnx2_hw;
+ cp->stop_hw = cnic_stop_bnx2_hw;
+ cp->setup_pgtbl = cnic_setup_page_tbl;
+ cp->alloc_resc = cnic_alloc_bnx2_resc;
+ cp->free_resc = cnic_free_resc;
+ cp->start_cm = cnic_cm_init_bnx2_hw;
+ cp->stop_cm = cnic_cm_stop_bnx2_hw;
+ cp->enable_int = cnic_enable_bnx2_int;
+ cp->disable_int_sync = cnic_disable_bnx2_int_sync;
+ cp->close_conn = cnic_close_bnx2_conn;
+ return cdev;
+
+cnic_err:
+ dev_put(dev);
+ return NULL;
+}
+
+static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
+{
+ struct pci_dev *pdev;
+ struct cnic_dev *cdev;
+ struct cnic_local *cp;
+ struct cnic_eth_dev *ethdev = NULL;
+ struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
+
+ probe = symbol_get(bnx2x_cnic_probe);
+ if (probe) {
+ ethdev = (*probe)(dev);
+ symbol_put(bnx2x_cnic_probe);
+ }
+ if (!ethdev)
+ return NULL;
+
+ pdev = ethdev->pdev;
+ if (!pdev)
+ return NULL;
+
+ dev_hold(dev);
+ cdev = cnic_alloc_dev(dev, pdev);
+ if (cdev == NULL) {
+ dev_put(dev);
+ return NULL;
+ }
+
+ set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
+ cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
+
+ cp = cdev->cnic_priv;
+ cp->ethdev = ethdev;
+ cdev->pcidev = pdev;
+ cp->chip_id = ethdev->chip_id;
+
+ if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
+ cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
+ !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
+ cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
+
+ if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
+ cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
+
+ memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
+
+ cp->cnic_ops = &cnic_bnx2x_ops;
+ cp->start_hw = cnic_start_bnx2x_hw;
+ cp->stop_hw = cnic_stop_bnx2x_hw;
+ cp->setup_pgtbl = cnic_setup_page_tbl_le;
+ cp->alloc_resc = cnic_alloc_bnx2x_resc;
+ cp->free_resc = cnic_free_resc;
+ cp->start_cm = cnic_cm_init_bnx2x_hw;
+ cp->stop_cm = cnic_cm_stop_bnx2x_hw;
+ cp->enable_int = cnic_enable_bnx2x_int;
+ cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
+ cp->ack_int = cnic_ack_bnx2x_e2_msix;
+ else
+ cp->ack_int = cnic_ack_bnx2x_msix;
+ cp->close_conn = cnic_close_bnx2x_conn;
+ return cdev;
+}
+
+static struct cnic_dev *is_cnic_dev(struct net_device *dev)
+{
+ struct ethtool_drvinfo drvinfo;
+ struct cnic_dev *cdev = NULL;
+
+ if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
+ memset(&drvinfo, 0, sizeof(drvinfo));
+ dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
+
+ if (!strcmp(drvinfo.driver, "bnx2"))
+ cdev = init_bnx2_cnic(dev);
+ if (!strcmp(drvinfo.driver, "bnx2x"))
+ cdev = init_bnx2x_cnic(dev);
+ if (cdev) {
+ write_lock(&cnic_dev_lock);
+ list_add(&cdev->list, &cnic_dev_list);
+ write_unlock(&cnic_dev_lock);
+ }
+ }
+ return cdev;
+}
+
+static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
+ u16 vlan_id)
+{
+ int if_type;
+
+ rcu_read_lock();
+ for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+ struct cnic_ulp_ops *ulp_ops;
+ void *ctx;
+
+ ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
+ if (!ulp_ops || !ulp_ops->indicate_netevent)
+ continue;
+
+ ctx = cp->ulp_handle[if_type];
+
+ ulp_ops->indicate_netevent(ctx, event, vlan_id);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * netdev event handler
+ */
+static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct net_device *netdev = ptr;
+ struct cnic_dev *dev;
+ int new_dev = 0;
+
+ dev = cnic_from_netdev(netdev);
+
+ if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) {
+ /* Check for the hot-plug device */
+ dev = is_cnic_dev(netdev);
+ if (dev) {
+ new_dev = 1;
+ cnic_hold(dev);
+ }
+ }
+ if (dev) {
+ struct cnic_local *cp = dev->cnic_priv;
+
+ if (new_dev)
+ cnic_ulp_init(dev);
+ else if (event == NETDEV_UNREGISTER)
+ cnic_ulp_exit(dev);
+
+ if (event == NETDEV_UP || (new_dev && netif_running(netdev))) {
+ if (cnic_register_netdev(dev) != 0) {
+ cnic_put(dev);
+ goto done;
+ }
+ if (!cnic_start_hw(dev))
+ cnic_ulp_start(dev);
+ }
+
+ cnic_rcv_netevent(cp, event, 0);
+
+ if (event == NETDEV_GOING_DOWN) {
+ cnic_ulp_stop(dev);
+ cnic_stop_hw(dev);
+ cnic_unregister_netdev(dev);
+ } else if (event == NETDEV_UNREGISTER) {
+ write_lock(&cnic_dev_lock);
+ list_del_init(&dev->list);
+ write_unlock(&cnic_dev_lock);
+
+ cnic_put(dev);
+ cnic_free_dev(dev);
+ goto done;
+ }
+ cnic_put(dev);
+ } else {
+ struct net_device *realdev;
+ u16 vid;
+
+ vid = cnic_get_vlan(netdev, &realdev);
+ if (realdev) {
+ dev = cnic_from_netdev(realdev);
+ if (dev) {
+ vid |= VLAN_TAG_PRESENT;
+ cnic_rcv_netevent(dev->cnic_priv, event, vid);
+ cnic_put(dev);
+ }
+ }
+ }
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cnic_netdev_notifier = {
+ .notifier_call = cnic_netdev_event
+};
+
+static void cnic_release(void)
+{
+ struct cnic_dev *dev;
+ struct cnic_uio_dev *udev;
+
+ while (!list_empty(&cnic_dev_list)) {
+ dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
+ if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
+ cnic_ulp_stop(dev);
+ cnic_stop_hw(dev);
+ }
+
+ cnic_ulp_exit(dev);
+ cnic_unregister_netdev(dev);
+ list_del_init(&dev->list);
+ cnic_free_dev(dev);
+ }
+ while (!list_empty(&cnic_udev_list)) {
+ udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
+ list);
+ cnic_free_uio(udev);
+ }
+}
+
+static int __init cnic_init(void)
+{
+ int rc = 0;
+
+ pr_info("%s", version);
+
+ rc = register_netdevice_notifier(&cnic_netdev_notifier);
+ if (rc) {
+ cnic_release();
+ return rc;
+ }
+
+ cnic_wq = create_singlethread_workqueue("cnic_wq");
+ if (!cnic_wq) {
+ cnic_release();
+ unregister_netdevice_notifier(&cnic_netdev_notifier);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void __exit cnic_exit(void)
+{
+ unregister_netdevice_notifier(&cnic_netdev_notifier);
+ cnic_release();
+ destroy_workqueue(cnic_wq);
+}
+
+module_init(cnic_init);
+module_exit(cnic_exit);
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
new file mode 100644
index 000000000000..30328097f516
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -0,0 +1,480 @@
+/* cnic.h: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+
+#ifndef CNIC_H
+#define CNIC_H
+
+#define HC_INDEX_ISCSI_EQ_CONS 6
+
+#define HC_INDEX_FCOE_EQ_CONS 3
+
+#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
+#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
+
+#define KWQ_PAGE_CNT 4
+#define KCQ_PAGE_CNT 16
+
+#define KWQ_CID 24
+#define KCQ_CID 25
+
+/*
+ * krnlq_context definition
+ */
+#define L5_KRNLQ_FLAGS 0x00000000
+#define L5_KRNLQ_SIZE 0x00000000
+#define L5_KRNLQ_TYPE 0x00000000
+#define KRNLQ_FLAGS_PG_SZ (0xf<<0)
+#define KRNLQ_FLAGS_PG_SZ_256 (0<<0)
+#define KRNLQ_FLAGS_PG_SZ_512 (1<<0)
+#define KRNLQ_FLAGS_PG_SZ_1K (2<<0)
+#define KRNLQ_FLAGS_PG_SZ_2K (3<<0)
+#define KRNLQ_FLAGS_PG_SZ_4K (4<<0)
+#define KRNLQ_FLAGS_PG_SZ_8K (5<<0)
+#define KRNLQ_FLAGS_PG_SZ_16K (6<<0)
+#define KRNLQ_FLAGS_PG_SZ_32K (7<<0)
+#define KRNLQ_FLAGS_PG_SZ_64K (8<<0)
+#define KRNLQ_FLAGS_PG_SZ_128K (9<<0)
+#define KRNLQ_FLAGS_PG_SZ_256K (10<<0)
+#define KRNLQ_FLAGS_PG_SZ_512K (11<<0)
+#define KRNLQ_FLAGS_PG_SZ_1M (12<<0)
+#define KRNLQ_FLAGS_PG_SZ_2M (13<<0)
+#define KRNLQ_FLAGS_QE_SELF_SEQ (1<<15)
+#define KRNLQ_SIZE_TYPE_SIZE ((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16)
+#define KRNLQ_TYPE_TYPE (0xf<<28)
+#define KRNLQ_TYPE_TYPE_EMPTY (0<<28)
+#define KRNLQ_TYPE_TYPE_KRNLQ (6<<28)
+
+#define L5_KRNLQ_HOST_QIDX 0x00000004
+#define L5_KRNLQ_HOST_FW_QIDX 0x00000008
+#define L5_KRNLQ_NX_QE_SELF_SEQ 0x0000000c
+#define L5_KRNLQ_QE_SELF_SEQ_MAX 0x0000000c
+#define L5_KRNLQ_NX_QE_HADDR_HI 0x00000010
+#define L5_KRNLQ_NX_QE_HADDR_LO 0x00000014
+#define L5_KRNLQ_PGTBL_PGIDX 0x00000018
+#define L5_KRNLQ_NX_PG_QIDX 0x00000018
+#define L5_KRNLQ_PGTBL_NPAGES 0x0000001c
+#define L5_KRNLQ_QIDX_INCR 0x0000001c
+#define L5_KRNLQ_PGTBL_HADDR_HI 0x00000020
+#define L5_KRNLQ_PGTBL_HADDR_LO 0x00000024
+
+#define BNX2_PG_CTX_MAP 0x1a0034
+#define BNX2_ISCSI_CTX_MAP 0x1a0074
+
+#define MAX_COMPLETED_KCQE 64
+
+#define MAX_CNIC_L5_CONTEXT 256
+
+#define MAX_CM_SK_TBL_SZ MAX_CNIC_L5_CONTEXT
+
+#define MAX_ISCSI_TBL_SZ 256
+
+#define CNIC_LOCAL_PORT_MIN 60000
+#define CNIC_LOCAL_PORT_MAX 61024
+#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
+
+#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe))
+#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe))
+#define MAX_KWQE_CNT (KWQE_CNT - 1)
+#define MAX_KCQE_CNT (KCQE_CNT - 1)
+
+#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1)
+#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1)
+
+#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5))
+#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT)
+
+#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5))
+#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT)
+
+#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \
+ (MAX_KCQE_CNT - 1)) ? \
+ (x) + 2 : (x) + 1
+
+#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp)
+#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp)
+#define BNX2X_KWQ_DATA(cp, x) \
+ &(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
+
+#define DEF_IPID_START 0x8000
+
+#define DEF_KA_TIMEOUT 10000
+#define DEF_KA_INTERVAL 300000
+#define DEF_KA_MAX_PROBE_COUNT 3
+#define DEF_TOS 0
+#define DEF_TTL 0xfe
+#define DEF_SND_SEQ_SCALE 0
+#define DEF_RCV_BUF 0xffff
+#define DEF_SND_BUF 0xffff
+#define DEF_SEED 0
+#define DEF_MAX_RT_TIME 500
+#define DEF_MAX_DA_COUNT 2
+#define DEF_SWS_TIMER 1000
+#define DEF_MAX_CWND 0xffff
+
+struct cnic_ctx {
+ u32 cid;
+ void *ctx;
+ dma_addr_t mapping;
+};
+
+#define BNX2_MAX_CID 0x2000
+
+struct cnic_dma {
+ int num_pages;
+ void **pg_arr;
+ dma_addr_t *pg_map_arr;
+ int pgtbl_size;
+ u32 *pgtbl;
+ dma_addr_t pgtbl_map;
+};
+
+struct cnic_id_tbl {
+ spinlock_t lock;
+ u32 start;
+ u32 max;
+ u32 next;
+ unsigned long *table;
+};
+
+#define CNIC_KWQ16_DATA_SIZE 128
+
+struct kwqe_16_data {
+ u8 data[CNIC_KWQ16_DATA_SIZE];
+};
+
+struct cnic_iscsi {
+ struct cnic_dma task_array_info;
+ struct cnic_dma r2tq_info;
+ struct cnic_dma hq_info;
+};
+
+struct cnic_context {
+ u32 cid;
+ struct kwqe_16_data *kwqe_data;
+ dma_addr_t kwqe_data_mapping;
+ wait_queue_head_t waitq;
+ int wait_cond;
+ unsigned long timestamp;
+ unsigned long ctx_flags;
+#define CTX_FL_OFFLD_START 0
+#define CTX_FL_DELETE_WAIT 1
+#define CTX_FL_CID_ERROR 2
+ u8 ulp_proto_id;
+ union {
+ struct cnic_iscsi *iscsi;
+ } proto;
+};
+
+struct kcq_info {
+ struct cnic_dma dma;
+ struct kcqe **kcq;
+
+ u16 *hw_prod_idx_ptr;
+ u16 sw_prod_idx;
+ u16 *status_idx_ptr;
+ u32 io_addr;
+
+ u16 (*next_idx)(u16);
+ u16 (*hw_idx)(u16);
+};
+
+struct iro {
+ u32 base;
+ u16 m1;
+ u16 m2;
+ u16 m3;
+ u16 size;
+};
+
+struct cnic_uio_dev {
+ struct uio_info cnic_uinfo;
+ u32 uio_dev;
+
+ int l2_ring_size;
+ void *l2_ring;
+ dma_addr_t l2_ring_map;
+
+ int l2_buf_size;
+ void *l2_buf;
+ dma_addr_t l2_buf_map;
+
+ struct cnic_dev *dev;
+ struct pci_dev *pdev;
+ struct list_head list;
+};
+
+struct cnic_local {
+
+ spinlock_t cnic_ulp_lock;
+ void *ulp_handle[MAX_CNIC_ULP_TYPE];
+ unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
+#define ULP_F_INIT 0
+#define ULP_F_START 1
+#define ULP_F_CALL_PENDING 2
+ struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE];
+
+ unsigned long cnic_local_flags;
+#define CNIC_LCL_FL_KWQ_INIT 0x0
+#define CNIC_LCL_FL_L2_WAIT 0x1
+#define CNIC_LCL_FL_RINGS_INITED 0x2
+#define CNIC_LCL_FL_STOP_ISCSI 0x4
+
+ struct cnic_dev *dev;
+
+ struct cnic_eth_dev *ethdev;
+
+ struct cnic_uio_dev *udev;
+
+ int l2_rx_ring_size;
+ int l2_single_buf_size;
+
+ u16 *rx_cons_ptr;
+ u16 *tx_cons_ptr;
+ u16 rx_cons;
+ u16 tx_cons;
+
+ const struct iro *iro_arr;
+#define IRO (((struct cnic_local *) dev->cnic_priv)->iro_arr)
+
+ struct cnic_dma kwq_info;
+ struct kwqe **kwq;
+
+ struct cnic_dma kwq_16_data_info;
+
+ u16 max_kwq_idx;
+
+ u16 kwq_prod_idx;
+ u32 kwq_io_addr;
+
+ u16 *kwq_con_idx_ptr;
+ u16 kwq_con_idx;
+
+ struct kcq_info kcq1;
+ struct kcq_info kcq2;
+
+ union {
+ void *gen;
+ struct status_block_msix *bnx2;
+ struct host_hc_status_block_e1x *bnx2x_e1x;
+ /* index values - which counter to update */
+ #define SM_RX_ID 0
+ #define SM_TX_ID 1
+ } status_blk;
+
+ struct host_sp_status_block *bnx2x_def_status_blk;
+
+ u32 status_blk_num;
+ u32 bnx2x_igu_sb_id;
+ u32 int_num;
+ u32 last_status_idx;
+ struct tasklet_struct cnic_irq_task;
+
+ struct kcqe *completed_kcq[MAX_COMPLETED_KCQE];
+
+ struct cnic_sock *csk_tbl;
+ struct cnic_id_tbl csk_port_tbl;
+
+ struct cnic_dma gbl_buf_info;
+
+ struct cnic_iscsi *iscsi_tbl;
+ struct cnic_context *ctx_tbl;
+ struct cnic_id_tbl cid_tbl;
+ atomic_t iscsi_conn;
+ u32 iscsi_start_cid;
+
+ u32 fcoe_init_cid;
+ u32 fcoe_start_cid;
+ struct cnic_id_tbl fcoe_cid_tbl;
+
+ u32 max_cid_space;
+
+ /* per connection parameters */
+ int num_iscsi_tasks;
+ int num_ccells;
+ int task_array_size;
+ int r2tq_size;
+ int hq_size;
+ int num_cqs;
+
+ struct delayed_work delete_task;
+
+ struct cnic_ctx *ctx_arr;
+ int ctx_blks;
+ int ctx_blk_size;
+ unsigned long ctx_align;
+ int cids_per_blk;
+
+ u32 chip_id;
+ int func;
+ u32 pfid;
+ u8 port_mode;
+#define CHIP_4_PORT_MODE 0
+#define CHIP_2_PORT_MODE 1
+#define CHIP_PORT_MODE_NONE 2
+
+ u32 shmem_base;
+
+ struct cnic_ops *cnic_ops;
+ int (*start_hw)(struct cnic_dev *);
+ void (*stop_hw)(struct cnic_dev *);
+ void (*setup_pgtbl)(struct cnic_dev *,
+ struct cnic_dma *);
+ int (*alloc_resc)(struct cnic_dev *);
+ void (*free_resc)(struct cnic_dev *);
+ int (*start_cm)(struct cnic_dev *);
+ void (*stop_cm)(struct cnic_dev *);
+ void (*enable_int)(struct cnic_dev *);
+ void (*disable_int_sync)(struct cnic_dev *);
+ void (*ack_int)(struct cnic_dev *);
+ void (*close_conn)(struct cnic_sock *, u32 opcode);
+};
+
+struct bnx2x_bd_chain_next {
+ u32 addr_lo;
+ u32 addr_hi;
+ u8 reserved[8];
+};
+
+#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1)
+
+#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN)
+#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT)
+
+#define CDU_REGION_NUMBER_XCM_AG 2
+#define CDU_REGION_NUMBER_UCM_AG 4
+
+#define CDU_VALID_DATA(_cid, _region, _type) \
+ (((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf)))
+
+#define CDU_CRC8(_cid, _region, _type) \
+ (calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff))
+
+#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type) \
+ (0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f))
+
+#define BNX2X_CONTEXT_MEM_SIZE 1024
+#define BNX2X_FCOE_CID 16
+
+#define BNX2X_ISCSI_START_CID 18
+#define BNX2X_ISCSI_NUM_CONNECTIONS 128
+#define BNX2X_ISCSI_TASK_CONTEXT_SIZE 128
+#define BNX2X_ISCSI_MAX_PENDING_R2TS 4
+#define BNX2X_ISCSI_R2TQE_SIZE 8
+#define BNX2X_ISCSI_HQ_BD_SIZE 64
+#define BNX2X_ISCSI_GLB_BUF_SIZE 64
+#define BNX2X_ISCSI_PBL_NOT_CACHED 0xff
+#define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED 0xff
+
+#define BNX2X_FCOE_NUM_CONNECTIONS 1024
+
+#define BNX2X_FCOE_L5_CID_BASE MAX_ISCSI_TBL_SZ
+
+#define BNX2X_CHIP_NUM_57710 0x164e
+#define BNX2X_CHIP_NUM_57711 0x164f
+#define BNX2X_CHIP_NUM_57711E 0x1650
+#define BNX2X_CHIP_NUM_57712 0x1662
+#define BNX2X_CHIP_NUM_57712E 0x1663
+#define BNX2X_CHIP_NUM_57713 0x1651
+#define BNX2X_CHIP_NUM_57713E 0x1652
+#define BNX2X_CHIP_NUM_57800 0x168a
+#define BNX2X_CHIP_NUM_57810 0x168e
+#define BNX2X_CHIP_NUM_57840 0x168d
+
+#define BNX2X_CHIP_NUM(x) (x >> 16)
+#define BNX2X_CHIP_IS_57710(x) \
+ (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57710)
+#define BNX2X_CHIP_IS_57711(x) \
+ (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711)
+#define BNX2X_CHIP_IS_57711E(x) \
+ (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711E)
+#define BNX2X_CHIP_IS_E1H(x) \
+ (BNX2X_CHIP_IS_57711(x) || BNX2X_CHIP_IS_57711E(x))
+#define BNX2X_CHIP_IS_57712(x) \
+ (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712)
+#define BNX2X_CHIP_IS_57712E(x) \
+ (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712E)
+#define BNX2X_CHIP_IS_57713(x) \
+ (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713)
+#define BNX2X_CHIP_IS_57713E(x) \
+ (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713E)
+#define BNX2X_CHIP_IS_57800(x) \
+ (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57800)
+#define BNX2X_CHIP_IS_57810(x) \
+ (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57810)
+#define BNX2X_CHIP_IS_57840(x) \
+ (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57840)
+#define BNX2X_CHIP_IS_E2(x) \
+ (BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \
+ BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x))
+#define BNX2X_CHIP_IS_E3(x) \
+ (BNX2X_CHIP_IS_57800(x) || BNX2X_CHIP_IS_57810(x) || \
+ BNX2X_CHIP_IS_57840(x))
+#define BNX2X_CHIP_IS_E2_PLUS(x) (BNX2X_CHIP_IS_E2(x) || BNX2X_CHIP_IS_E3(x))
+
+#define IS_E1H_OFFSET BNX2X_CHIP_IS_E1H(cp->chip_id)
+
+#define BNX2X_RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
+#define BNX2X_MAX_RX_DESC_CNT (BNX2X_RX_DESC_CNT - 2)
+#define BNX2X_RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
+#define BNX2X_MAX_RCQ_DESC_CNT (BNX2X_RCQ_DESC_CNT - 1)
+
+#define BNX2X_NEXT_RCQE(x) (((x) & BNX2X_MAX_RCQ_DESC_CNT) == \
+ (BNX2X_MAX_RCQ_DESC_CNT - 1)) ? \
+ ((x) + 2) : ((x) + 1)
+
+#define BNX2X_DEF_SB_ID HC_SP_SB_ID
+
+#define BNX2X_SHMEM_MF_BLK_OFFSET 0x7e4
+
+#define BNX2X_SHMEM_ADDR(base, field) (base + \
+ offsetof(struct shmem_region, field))
+
+#define BNX2X_SHMEM2_ADDR(base, field) (base + \
+ offsetof(struct shmem2_region, field))
+
+#define BNX2X_SHMEM2_HAS(base, field) \
+ ((base) && \
+ (CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base, size)) > \
+ offsetof(struct shmem2_region, field)))
+
+#define BNX2X_MF_CFG_ADDR(base, field) \
+ ((base) + offsetof(struct mf_cfg, field))
+
+#ifndef ETH_MAX_RX_CLIENTS_E2
+#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
+#endif
+
+#define CNIC_PORT(cp) ((cp)->pfid & 1)
+#define CNIC_FUNC(cp) ((cp)->func)
+#define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? \
+ 0 : (CNIC_FUNC(cp) & 1))
+#define CNIC_E1HVN(cp) ((cp)->pfid >> 1)
+
+#define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \
+ (CNIC_E1HVN(cp) << 17) | (x))
+
+#define BNX2X_SW_CID(x) (x & 0x1ffff)
+
+#define BNX2X_CL_QZONE_ID(cp, cli) \
+ (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? cli : \
+ cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H))
+
+#ifndef MAX_STAT_COUNTER_ID
+#define MAX_STAT_COUNTER_ID \
+ (BNX2X_CHIP_IS_E1H((cp)->chip_id) ? MAX_STAT_COUNTER_ID_E1H : \
+ ((BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id)) ? MAX_STAT_COUNTER_ID_E2 :\
+ MAX_STAT_COUNTER_ID_E1))
+#endif
+
+#define CNIC_RAMROD_TMO (HZ / 4)
+
+#endif
+
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
new file mode 100644
index 000000000000..239de898f071
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -0,0 +1,5485 @@
+
+/* cnic.c: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#ifndef CNIC_DEFS_H
+#define CNIC_DEFS_H
+
+/* KWQ (kernel work queue) request op codes */
+#define L2_KWQE_OPCODE_VALUE_FLUSH (4)
+#define L2_KWQE_OPCODE_VALUE_VM_FREE_RX_QUEUE (8)
+
+#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50)
+#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51)
+#define L4_KWQE_OPCODE_VALUE_CONNECT3 (52)
+#define L4_KWQE_OPCODE_VALUE_RESET (53)
+#define L4_KWQE_OPCODE_VALUE_CLOSE (54)
+#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET (60)
+#define L4_KWQE_OPCODE_VALUE_INIT_ULP (61)
+
+#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG (1)
+#define L4_KWQE_OPCODE_VALUE_UPDATE_PG (9)
+#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG (14)
+
+#define L5CM_RAMROD_CMD_ID_BASE (0x80)
+#define L5CM_RAMROD_CMD_ID_TCP_CONNECT (L5CM_RAMROD_CMD_ID_BASE + 3)
+#define L5CM_RAMROD_CMD_ID_CLOSE (L5CM_RAMROD_CMD_ID_BASE + 12)
+#define L5CM_RAMROD_CMD_ID_ABORT (L5CM_RAMROD_CMD_ID_BASE + 13)
+#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE (L5CM_RAMROD_CMD_ID_BASE + 14)
+#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD (L5CM_RAMROD_CMD_ID_BASE + 15)
+
+#define FCOE_KCQE_OPCODE_INIT_FUNC (0x10)
+#define FCOE_KCQE_OPCODE_DESTROY_FUNC (0x11)
+#define FCOE_KCQE_OPCODE_STAT_FUNC (0x12)
+#define FCOE_KCQE_OPCODE_OFFLOAD_CONN (0x15)
+#define FCOE_KCQE_OPCODE_ENABLE_CONN (0x16)
+#define FCOE_KCQE_OPCODE_DISABLE_CONN (0x17)
+#define FCOE_KCQE_OPCODE_DESTROY_CONN (0x18)
+#define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
+#define FCOE_KCQE_OPCODE_FCOE_ERROR (0x21)
+
+#define FCOE_RAMROD_CMD_ID_INIT_FUNC (FCOE_KCQE_OPCODE_INIT_FUNC)
+#define FCOE_RAMROD_CMD_ID_DESTROY_FUNC (FCOE_KCQE_OPCODE_DESTROY_FUNC)
+#define FCOE_RAMROD_CMD_ID_STAT_FUNC (FCOE_KCQE_OPCODE_STAT_FUNC)
+#define FCOE_RAMROD_CMD_ID_OFFLOAD_CONN (FCOE_KCQE_OPCODE_OFFLOAD_CONN)
+#define FCOE_RAMROD_CMD_ID_ENABLE_CONN (FCOE_KCQE_OPCODE_ENABLE_CONN)
+#define FCOE_RAMROD_CMD_ID_DISABLE_CONN (FCOE_KCQE_OPCODE_DISABLE_CONN)
+#define FCOE_RAMROD_CMD_ID_DESTROY_CONN (FCOE_KCQE_OPCODE_DESTROY_CONN)
+#define FCOE_RAMROD_CMD_ID_TERMINATE_CONN (0x81)
+
+#define FCOE_KWQE_OPCODE_INIT1 (0)
+#define FCOE_KWQE_OPCODE_INIT2 (1)
+#define FCOE_KWQE_OPCODE_INIT3 (2)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN1 (3)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN2 (4)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN3 (5)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN4 (6)
+#define FCOE_KWQE_OPCODE_ENABLE_CONN (7)
+#define FCOE_KWQE_OPCODE_DISABLE_CONN (8)
+#define FCOE_KWQE_OPCODE_DESTROY_CONN (9)
+#define FCOE_KWQE_OPCODE_DESTROY (10)
+#define FCOE_KWQE_OPCODE_STAT (11)
+
+#define FCOE_KCQE_COMPLETION_STATUS_ERROR (0x1)
+#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3)
+
+/* KCQ (kernel completion queue) response op codes */
+#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53)
+#define L4_KCQE_OPCODE_VALUE_RESET_COMP (54)
+#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE (55)
+#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE (56)
+#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED (57)
+#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED (58)
+#define L4_KCQE_OPCODE_VALUE_INIT_ULP (61)
+
+#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG (1)
+#define L4_KCQE_OPCODE_VALUE_UPDATE_PG (9)
+#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14)
+
+/* KCQ (kernel completion queue) completion status */
+#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
+#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
+
+#define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL (0x83)
+#define L4_KCQE_COMPLETION_STATUS_OFFLOADED_PG (0x89)
+
+#define L4_KCQE_OPCODE_VALUE_OOO_EVENT_NOTIFICATION (0xa0)
+#define L4_KCQE_OPCODE_VALUE_OOO_FLUSH (0xa1)
+
+#define L4_LAYER_CODE (4)
+#define L2_LAYER_CODE (2)
+
+/*
+ * L4 KCQ CQE
+ */
+struct l4_kcq {
+ u32 cid;
+ u32 pg_cid;
+ u32 conn_id;
+ u32 pg_host_opaque;
+#if defined(__BIG_ENDIAN)
+ u16 status;
+ u16 reserved1;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved1;
+ u16 status;
+#endif
+ u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define L4_KCQ_RESERVED3 (0x7<<0)
+#define L4_KCQ_RESERVED3_SHIFT 0
+#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
+#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
+#define L4_KCQ_LAYER_CODE (0x7<<4)
+#define L4_KCQ_LAYER_CODE_SHIFT 4
+#define L4_KCQ_RESERVED4 (0x1<<7)
+#define L4_KCQ_RESERVED4_SHIFT 7
+ u8 op_code;
+ u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+ u16 qe_self_seq;
+ u8 op_code;
+ u8 flags;
+#define L4_KCQ_RESERVED3 (0xF<<0)
+#define L4_KCQ_RESERVED3_SHIFT 0
+#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
+#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
+#define L4_KCQ_LAYER_CODE (0x7<<4)
+#define L4_KCQ_LAYER_CODE_SHIFT 4
+#define L4_KCQ_RESERVED4 (0x1<<7)
+#define L4_KCQ_RESERVED4_SHIFT 7
+#endif
+};
+
+
+/*
+ * L4 KCQ CQE PG upload
+ */
+struct l4_kcq_upload_pg {
+ u32 pg_cid;
+#if defined(__BIG_ENDIAN)
+ u16 pg_status;
+ u16 pg_ipid_count;
+#elif defined(__LITTLE_ENDIAN)
+ u16 pg_ipid_count;
+ u16 pg_status;
+#endif
+ u32 reserved1[5];
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
+#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
+#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
+ u8 op_code;
+ u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+ u16 qe_self_seq;
+ u8 op_code;
+ u8 flags;
+#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
+#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
+#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
+#endif
+};
+
+
+/*
+ * Gracefully close the connection request
+ */
+struct l4_kwq_close_req {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
+ u8 op_code;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 op_code;
+ u8 flags;
+#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
+#endif
+ u32 cid;
+ u32 reserved2[6];
+};
+
+
+/*
+ * The first request to be passed in order to establish connection in option2
+ */
+struct l4_kwq_connect_req1 {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
+ u8 op_code;
+ u8 reserved0;
+ u8 conn_flags;
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
+#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+ u8 conn_flags;
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
+#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
+ u8 reserved0;
+ u8 op_code;
+ u8 flags;
+#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
+#endif
+ u32 cid;
+ u32 pg_cid;
+ u32 src_ip;
+ u32 dst_ip;
+#if defined(__BIG_ENDIAN)
+ u16 dst_port;
+ u16 src_port;
+#elif defined(__LITTLE_ENDIAN)
+ u16 src_port;
+ u16 dst_port;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 rsrv1[3];
+ u8 tcp_flags;
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
+#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
+#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
+#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
+#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+ u8 tcp_flags;
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
+#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
+#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
+#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
+#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
+ u8 rsrv1[3];
+#endif
+ u32 rsrv2;
+};
+
+
+/*
+ * The second ( optional )request to be passed in order to establish
+ * connection in option2 - for IPv6 only
+ */
+struct l4_kwq_connect_req2 {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
+ u8 op_code;
+ u8 reserved0;
+ u8 rsrv;
+#elif defined(__LITTLE_ENDIAN)
+ u8 rsrv;
+ u8 reserved0;
+ u8 op_code;
+ u8 flags;
+#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
+#endif
+ u32 reserved2;
+ u32 src_ip_v6_2;
+ u32 src_ip_v6_3;
+ u32 src_ip_v6_4;
+ u32 dst_ip_v6_2;
+ u32 dst_ip_v6_3;
+ u32 dst_ip_v6_4;
+};
+
+
+/*
+ * The third ( and last )request to be passed in order to establish
+ * connection in option2
+ */
+struct l4_kwq_connect_req3 {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
+ u8 op_code;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 op_code;
+ u8 flags;
+#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
+#endif
+ u32 ka_timeout;
+ u32 ka_interval ;
+#if defined(__BIG_ENDIAN)
+ u8 snd_seq_scale;
+ u8 ttl;
+ u8 tos;
+ u8 ka_max_probe_count;
+#elif defined(__LITTLE_ENDIAN)
+ u8 ka_max_probe_count;
+ u8 tos;
+ u8 ttl;
+ u8 snd_seq_scale;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 pmtu;
+ u16 mss;
+#elif defined(__LITTLE_ENDIAN)
+ u16 mss;
+ u16 pmtu;
+#endif
+ u32 rcv_buf;
+ u32 snd_buf;
+ u32 seed;
+};
+
+
+/*
+ * a KWQE request to offload a PG connection
+ */
+struct l4_kwq_offload_pg {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
+ u8 op_code;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 op_code;
+ u8 flags;
+#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 l2hdr_nbytes;
+ u8 pg_flags;
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
+#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
+#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
+ u8 da0;
+ u8 da1;
+#elif defined(__LITTLE_ENDIAN)
+ u8 da1;
+ u8 da0;
+ u8 pg_flags;
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
+#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
+#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
+ u8 l2hdr_nbytes;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 da2;
+ u8 da3;
+ u8 da4;
+ u8 da5;
+#elif defined(__LITTLE_ENDIAN)
+ u8 da5;
+ u8 da4;
+ u8 da3;
+ u8 da2;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 sa0;
+ u8 sa1;
+ u8 sa2;
+ u8 sa3;
+#elif defined(__LITTLE_ENDIAN)
+ u8 sa3;
+ u8 sa2;
+ u8 sa1;
+ u8 sa0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 sa4;
+ u8 sa5;
+ u16 etype;
+#elif defined(__LITTLE_ENDIAN)
+ u16 etype;
+ u8 sa5;
+ u8 sa4;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 vlan_tag;
+ u16 ipid_start;
+#elif defined(__LITTLE_ENDIAN)
+ u16 ipid_start;
+ u16 vlan_tag;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 ipid_count;
+ u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved3;
+ u16 ipid_count;
+#endif
+ u32 host_opaque;
+};
+
+
+/*
+ * Abortively close the connection request
+ */
+struct l4_kwq_reset_req {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
+ u8 op_code;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ u8 op_code;
+ u8 flags;
+#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
+#endif
+ u32 cid;
+ u32 reserved2[6];
+};
+
+
+/*
+ * a KWQE request to update a PG connection
+ */
+struct l4_kwq_update_pg {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
+ u8 opcode;
+ u16 oper16;
+#elif defined(__LITTLE_ENDIAN)
+ u16 oper16;
+ u8 opcode;
+ u8 flags;
+#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
+#endif
+ u32 pg_cid;
+ u32 pg_host_opaque;
+#if defined(__BIG_ENDIAN)
+ u8 pg_valids;
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
+#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
+#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
+#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
+#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
+ u8 pg_unused_a;
+ u16 pg_ipid_count;
+#elif defined(__LITTLE_ENDIAN)
+ u16 pg_ipid_count;
+ u8 pg_unused_a;
+ u8 pg_valids;
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
+#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
+#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
+#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
+#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 reserverd3;
+ u8 da0;
+ u8 da1;
+#elif defined(__LITTLE_ENDIAN)
+ u8 da1;
+ u8 da0;
+ u16 reserverd3;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 da2;
+ u8 da3;
+ u8 da4;
+ u8 da5;
+#elif defined(__LITTLE_ENDIAN)
+ u8 da5;
+ u8 da4;
+ u8 da3;
+ u8 da2;
+#endif
+ u32 reserved4;
+ u32 reserved5;
+};
+
+
+/*
+ * a KWQE request to upload a PG or L4 context
+ */
+struct l4_kwq_upload {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
+#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
+ u8 opcode;
+ u16 oper16;
+#elif defined(__LITTLE_ENDIAN)
+ u16 oper16;
+ u8 opcode;
+ u8 flags;
+#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
+#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
+#endif
+ u32 cid;
+ u32 reserved2[6];
+};
+
+/*
+ * bnx2x structures
+ */
+
+/*
+ * The iscsi aggregative context of Cstorm
+ */
+struct cstorm_iscsi_ag_context {
+ u32 agg_vars1;
+#define CSTORM_ISCSI_AG_CONTEXT_STATE (0xFF<<0)
+#define CSTORM_ISCSI_AG_CONTEXT_STATE_SHIFT 0
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<8)
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 8
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<9)
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 9
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<10)
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 10
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<11)
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 11
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN (0x1<<12)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN_SHIFT 12
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN (0x1<<13)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN_SHIFT 13
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<14)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 14
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66 (0x3<<16)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66_SHIFT 16
+#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN (0x1<<18)
+#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN_SHIFT 18
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<19)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 19
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX2_CF_EN (0x1<<20)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX2_CF_EN_SHIFT 20
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<21)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 21
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<22)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 22
+#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE (0x7<<23)
+#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE_SHIFT 23
+#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE (0x3<<26)
+#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE_SHIFT 26
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52 (0x3<<28)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52_SHIFT 28
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53 (0x3<<30)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53_SHIFT 30
+#if defined(__BIG_ENDIAN)
+ u8 __aux1_th;
+ u8 __aux1_val;
+ u16 __agg_vars2;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __agg_vars2;
+ u8 __aux1_val;
+ u8 __aux1_th;
+#endif
+ u32 rel_seq;
+ u32 rel_seq_th;
+#if defined(__BIG_ENDIAN)
+ u16 hq_cons;
+ u16 hq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 hq_prod;
+ u16 hq_cons;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 __reserved62;
+ u8 __reserved61;
+ u8 __reserved60;
+ u8 __reserved59;
+#elif defined(__LITTLE_ENDIAN)
+ u8 __reserved59;
+ u8 __reserved60;
+ u8 __reserved61;
+ u8 __reserved62;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __reserved64;
+ u16 cq_u_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 cq_u_prod;
+ u16 __reserved64;
+#endif
+ u32 __cq_u_prod1;
+#if defined(__BIG_ENDIAN)
+ u16 __agg_vars3;
+ u16 cq_u_pend;
+#elif defined(__LITTLE_ENDIAN)
+ u16 cq_u_pend;
+ u16 __agg_vars3;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __aux2_th;
+ u16 aux2_val;
+#elif defined(__LITTLE_ENDIAN)
+ u16 aux2_val;
+ u16 __aux2_th;
+#endif
+};
+
+/*
+ * The fcoe extra aggregative context section of Tstorm
+ */
+struct tstorm_fcoe_extra_ag_context_section {
+ u32 __agg_val1;
+#if defined(__BIG_ENDIAN)
+ u8 __tcp_agg_vars2;
+ u8 __agg_val3;
+ u16 __agg_val2;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __agg_val2;
+ u8 __agg_val3;
+ u8 __tcp_agg_vars2;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __agg_val5;
+ u8 __agg_val6;
+ u8 __tcp_agg_vars3;
+#elif defined(__LITTLE_ENDIAN)
+ u8 __tcp_agg_vars3;
+ u8 __agg_val6;
+ u16 __agg_val5;
+#endif
+ u32 __lcq_prod;
+ u32 rtt_seq;
+ u32 rtt_time;
+ u32 __reserved66;
+ u32 wnd_right_edge;
+ u32 tcp_agg_vars1;
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN (0x1<<9)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN_SHIFT 9
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28
+ u32 snd_max;
+ u32 __lcq_cons;
+ u32 __reserved2;
+};
+
+/*
+ * The fcoe aggregative context of Tstorm
+ */
+struct tstorm_fcoe_ag_context {
+#if defined(__BIG_ENDIAN)
+ u16 ulp_credit;
+ u8 agg_vars1;
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7
+ u8 state;
+#elif defined(__LITTLE_ENDIAN)
+ u8 state;
+ u8 agg_vars1;
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7
+ u16 ulp_credit;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __agg_val4;
+ u16 agg_vars2;
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+#elif defined(__LITTLE_ENDIAN)
+ u16 agg_vars2;
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+ u16 __agg_val4;
+#endif
+ struct tstorm_fcoe_extra_ag_context_section __extra_section;
+};
+
+
+
+/*
+ * The tcp aggregative context section of Tstorm
+ */
+struct tstorm_tcp_tcp_ag_context_section {
+ u32 __agg_val1;
+#if defined(__BIG_ENDIAN)
+ u8 __tcp_agg_vars2;
+ u8 __agg_val3;
+ u16 __agg_val2;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __agg_val2;
+ u8 __agg_val3;
+ u8 __tcp_agg_vars2;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __agg_val5;
+ u8 __agg_val6;
+ u8 __tcp_agg_vars3;
+#elif defined(__LITTLE_ENDIAN)
+ u8 __tcp_agg_vars3;
+ u8 __agg_val6;
+ u16 __agg_val5;
+#endif
+ u32 snd_nxt;
+ u32 rtt_seq;
+ u32 rtt_time;
+ u32 __reserved66;
+ u32 wnd_right_edge;
+ u32 tcp_agg_vars1;
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN (0x1<<9)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN_SHIFT 9
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28
+ u32 snd_max;
+ u32 snd_una;
+ u32 __reserved2;
+};
+
+/*
+ * The iscsi aggregative context of Tstorm
+ */
+struct tstorm_iscsi_ag_context {
+#if defined(__BIG_ENDIAN)
+ u16 ulp_credit;
+ u8 agg_vars1;
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
+ u8 state;
+#elif defined(__LITTLE_ENDIAN)
+ u8 state;
+ u8 agg_vars1;
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
+ u16 ulp_credit;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __agg_val4;
+ u16 agg_vars2;
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+#elif defined(__LITTLE_ENDIAN)
+ u16 agg_vars2;
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+ u16 __agg_val4;
+#endif
+ struct tstorm_tcp_tcp_ag_context_section tcp;
+};
+
+
+
+/*
+ * The fcoe aggregative context of Ustorm
+ */
+struct ustorm_fcoe_ag_context {
+#if defined(__BIG_ENDIAN)
+ u8 __aux_counter_flags;
+ u8 agg_vars2;
+#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+ u8 agg_vars1;
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+ u8 state;
+#elif defined(__LITTLE_ENDIAN)
+ u8 state;
+ u8 agg_vars1;
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+ u8 agg_vars2;
+#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+ u8 __aux_counter_flags;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 cdu_usage;
+ u8 agg_misc2;
+ u16 pbf_tx_seq_ack;
+#elif defined(__LITTLE_ENDIAN)
+ u16 pbf_tx_seq_ack;
+ u8 agg_misc2;
+ u8 cdu_usage;
+#endif
+ u32 agg_misc4;
+#if defined(__BIG_ENDIAN)
+ u8 agg_val3_th;
+ u8 agg_val3;
+ u16 agg_misc3;
+#elif defined(__LITTLE_ENDIAN)
+ u16 agg_misc3;
+ u8 agg_val3;
+ u8 agg_val3_th;
+#endif
+ u32 expired_task_id;
+ u32 agg_misc4_th;
+#if defined(__BIG_ENDIAN)
+ u16 cq_prod;
+ u16 cq_cons;
+#elif defined(__LITTLE_ENDIAN)
+ u16 cq_cons;
+ u16 cq_prod;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __reserved2;
+ u8 decision_rules;
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0)
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6)
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7
+ u8 decision_rule_enable_bits;
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4)
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5)
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+ u8 decision_rule_enable_bits;
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4)
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5)
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+ u8 decision_rules;
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0)
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6)
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7
+ u16 __reserved2;
+#endif
+};
+
+
+/*
+ * The iscsi aggregative context of Ustorm
+ */
+struct ustorm_iscsi_ag_context {
+#if defined(__BIG_ENDIAN)
+ u8 __aux_counter_flags;
+ u8 agg_vars2;
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+ u8 agg_vars1;
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+ u8 state;
+#elif defined(__LITTLE_ENDIAN)
+ u8 state;
+ u8 agg_vars1;
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+ u8 agg_vars2;
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+ u8 __aux_counter_flags;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 cdu_usage;
+ u8 agg_misc2;
+ u16 __cq_local_comp_itt_val;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __cq_local_comp_itt_val;
+ u8 agg_misc2;
+ u8 cdu_usage;
+#endif
+ u32 agg_misc4;
+#if defined(__BIG_ENDIAN)
+ u8 agg_val3_th;
+ u8 agg_val3;
+ u16 agg_misc3;
+#elif defined(__LITTLE_ENDIAN)
+ u16 agg_misc3;
+ u8 agg_val3;
+ u8 agg_val3_th;
+#endif
+ u32 agg_val1;
+ u32 agg_misc4_th;
+#if defined(__BIG_ENDIAN)
+ u16 agg_val2_th;
+ u16 agg_val2;
+#elif defined(__LITTLE_ENDIAN)
+ u16 agg_val2;
+ u16 agg_val2_th;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __reserved2;
+ u8 decision_rules;
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
+ u8 decision_rule_enable_bits;
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+ u8 decision_rule_enable_bits;
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+ u8 decision_rules;
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
+ u16 __reserved2;
+#endif
+};
+
+
+/*
+ * The fcoe aggregative context section of Xstorm
+ */
+struct xstorm_fcoe_extra_ag_context_section {
+#if defined(__BIG_ENDIAN)
+ u8 tcp_agg_vars1;
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51 (0x3<<0)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG_SHIFT 7
+ u8 __reserved_da_cnt;
+ u16 __mtu;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __mtu;
+ u8 __reserved_da_cnt;
+ u8 tcp_agg_vars1;
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51 (0x3<<0)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG_SHIFT 7
+#endif
+ u32 snd_nxt;
+ u32 tx_wnd;
+ u32 __reserved55;
+ u32 local_adv_wnd;
+#if defined(__BIG_ENDIAN)
+ u8 __agg_val8_th;
+ u8 __tx_dest;
+ u16 tcp_agg_vars2;
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57_SHIFT 0
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58 (0x1<<1)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58_SHIFT 1
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59 (0x1<<2)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59_SHIFT 2
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60 (0x1<<5)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 tcp_agg_vars2;
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57_SHIFT 0
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58 (0x1<<1)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58_SHIFT 1
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59 (0x1<<2)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59_SHIFT 2
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60 (0x1<<5)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14
+ u8 __tx_dest;
+ u8 __agg_val8_th;
+#endif
+ u32 __sq_base_addr_lo;
+ u32 __sq_base_addr_hi;
+ u32 __xfrq_base_addr_lo;
+ u32 __xfrq_base_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 __xfrq_cons;
+ u16 __xfrq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __xfrq_prod;
+ u16 __xfrq_cons;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 __tcp_agg_vars5;
+ u8 __tcp_agg_vars4;
+ u8 __tcp_agg_vars3;
+ u8 __reserved_force_pure_ack_cnt;
+#elif defined(__LITTLE_ENDIAN)
+ u8 __reserved_force_pure_ack_cnt;
+ u8 __tcp_agg_vars3;
+ u8 __tcp_agg_vars4;
+ u8 __tcp_agg_vars5;
+#endif
+ u32 __tcp_agg_vars6;
+#if defined(__BIG_ENDIAN)
+ u16 __agg_misc6;
+ u16 __tcp_agg_vars7;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __tcp_agg_vars7;
+ u16 __agg_misc6;
+#endif
+ u32 __agg_val10;
+ u32 __agg_val10_th;
+#if defined(__BIG_ENDIAN)
+ u16 __reserved3;
+ u8 __reserved2;
+ u8 __da_only_cnt;
+#elif defined(__LITTLE_ENDIAN)
+ u8 __da_only_cnt;
+ u8 __reserved2;
+ u16 __reserved3;
+#endif
+};
+
+/*
+ * The fcoe aggregative context of Xstorm
+ */
+struct xstorm_fcoe_ag_context {
+#if defined(__BIG_ENDIAN)
+ u16 agg_val1;
+ u8 agg_vars1;
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51 (0x1<<2)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51_SHIFT 2
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52 (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN_SHIFT 7
+ u8 __state;
+#elif defined(__LITTLE_ENDIAN)
+ u8 __state;
+ u8 agg_vars1;
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51 (0x1<<2)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51_SHIFT 2
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52 (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN_SHIFT 7
+ u16 agg_val1;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 cdu_reserved;
+ u8 __agg_vars4;
+ u8 agg_vars3;
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF (0x3<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF_SHIFT 6
+ u8 agg_vars2;
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF (0x3<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+ u8 agg_vars2;
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF (0x3<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+ u8 agg_vars3;
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF (0x3<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF_SHIFT 6
+ u8 __agg_vars4;
+ u8 cdu_reserved;
+#endif
+ u32 more_to_send;
+#if defined(__BIG_ENDIAN)
+ u16 agg_vars5;
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE (0x3<<14)
+#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE_SHIFT 14
+ u16 sq_cons;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sq_cons;
+ u16 agg_vars5;
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE (0x3<<14)
+#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE_SHIFT 14
+#endif
+ struct xstorm_fcoe_extra_ag_context_section __extra_section;
+#if defined(__BIG_ENDIAN)
+ u16 agg_vars7;
+#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF (0x3<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62 (0x1<<10)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62_SHIFT 10
+#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG (0x1<<15)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG_SHIFT 15
+ u8 agg_val3_th;
+ u8 agg_vars6;
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE (0x7<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE (0x3<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+ u8 agg_vars6;
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE (0x7<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE (0x3<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE_SHIFT 6
+ u8 agg_val3_th;
+ u16 agg_vars7;
+#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF (0x3<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62 (0x1<<10)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62_SHIFT 10
+#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG (0x1<<15)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG_SHIFT 15
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __agg_val11_th;
+ u16 __agg_val11;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __agg_val11;
+ u16 __agg_val11_th;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 __reserved1;
+ u8 __agg_val6_th;
+ u16 __agg_val9;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __agg_val9;
+ u8 __agg_val6_th;
+ u8 __reserved1;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 confq_cons;
+ u16 confq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 confq_prod;
+ u16 confq_cons;
+#endif
+ u32 agg_vars8;
+#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0)
+#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC2_SHIFT 0
+#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
+#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3_SHIFT 24
+#if defined(__BIG_ENDIAN)
+ u16 agg_misc0;
+ u16 sq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sq_prod;
+ u16 agg_misc0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 agg_val3;
+ u8 agg_val6;
+ u8 agg_val5_th;
+ u8 agg_val5;
+#elif defined(__LITTLE_ENDIAN)
+ u8 agg_val5;
+ u8 agg_val5_th;
+ u8 agg_val6;
+ u8 agg_val3;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __agg_misc1;
+ u16 agg_limit1;
+#elif defined(__LITTLE_ENDIAN)
+ u16 agg_limit1;
+ u16 __agg_misc1;
+#endif
+ u32 completion_seq;
+ u32 confq_pbl_base_lo;
+ u32 confq_pbl_base_hi;
+};
+
+
+
+/*
+ * The tcp aggregative context section of Xstorm
+ */
+struct xstorm_tcp_tcp_ag_context_section {
+#if defined(__BIG_ENDIAN)
+ u8 tcp_agg_vars1;
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF (0x3<<0)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN (0x1<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN_SHIFT 6
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG (0x1<<7)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG_SHIFT 7
+ u8 __da_cnt;
+ u16 mss;
+#elif defined(__LITTLE_ENDIAN)
+ u16 mss;
+ u8 __da_cnt;
+ u8 tcp_agg_vars1;
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF (0x3<<0)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN (0x1<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN_SHIFT 6
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG (0x1<<7)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG_SHIFT 7
+#endif
+ u32 snd_nxt;
+ u32 tx_wnd;
+ u32 snd_una;
+ u32 local_adv_wnd;
+#if defined(__BIG_ENDIAN)
+ u8 __agg_val8_th;
+ u8 __tx_dest;
+ u16 tcp_agg_vars2;
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 tcp_agg_vars2;
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14
+ u8 __tx_dest;
+ u8 __agg_val8_th;
+#endif
+ u32 ack_to_far_end;
+ u32 rto_timer;
+ u32 ka_timer;
+ u32 ts_to_echo;
+#if defined(__BIG_ENDIAN)
+ u16 __agg_val7_th;
+ u16 __agg_val7;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __agg_val7;
+ u16 __agg_val7_th;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 __tcp_agg_vars5;
+ u8 __tcp_agg_vars4;
+ u8 __tcp_agg_vars3;
+ u8 __force_pure_ack_cnt;
+#elif defined(__LITTLE_ENDIAN)
+ u8 __force_pure_ack_cnt;
+ u8 __tcp_agg_vars3;
+ u8 __tcp_agg_vars4;
+ u8 __tcp_agg_vars5;
+#endif
+ u32 tcp_agg_vars6;
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN (0x1<<0)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_EN (0x1<<1)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_EN_SHIFT 1
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN (0x1<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN_SHIFT 2
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<3)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 3
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG (0x1<<4)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG_SHIFT 4
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG (0x1<<5)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG_SHIFT 5
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF (0x3<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF_SHIFT 6
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF (0x3<<8)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_SHIFT 8
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF (0x3<<10)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_SHIFT 10
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF (0x3<<12)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_SHIFT 12
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF (0x3<<14)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_SHIFT 14
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF (0x3<<16)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF_SHIFT 16
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF (0x3<<18)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF_SHIFT 18
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF (0x3<<20)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF_SHIFT 20
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF (0x3<<22)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF_SHIFT 22
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF (0x3<<24)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF_SHIFT 24
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG (0x1<<26)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG_SHIFT 26
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71 (0x1<<27)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71_SHIFT 27
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY (0x1<<28)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY_SHIFT 28
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG (0x1<<29)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG_SHIFT 29
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG (0x1<<30)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG_SHIFT 30
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG (0x1<<31)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG_SHIFT 31
+#if defined(__BIG_ENDIAN)
+ u16 __agg_misc6;
+ u16 __tcp_agg_vars7;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __tcp_agg_vars7;
+ u16 __agg_misc6;
+#endif
+ u32 __agg_val10;
+ u32 __agg_val10_th;
+#if defined(__BIG_ENDIAN)
+ u16 __reserved3;
+ u8 __reserved2;
+ u8 __da_only_cnt;
+#elif defined(__LITTLE_ENDIAN)
+ u8 __da_only_cnt;
+ u8 __reserved2;
+ u16 __reserved3;
+#endif
+};
+
+/*
+ * The iscsi aggregative context of Xstorm
+ */
+struct xstorm_iscsi_ag_context {
+#if defined(__BIG_ENDIAN)
+ u16 agg_val1;
+ u8 agg_vars1;
+#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
+ u8 state;
+#elif defined(__LITTLE_ENDIAN)
+ u8 state;
+ u8 agg_vars1;
+#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
+ u16 agg_val1;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 cdu_reserved;
+ u8 __agg_vars4;
+ u8 agg_vars3;
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
+ u8 agg_vars2;
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+ u8 agg_vars2;
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+ u8 agg_vars3;
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
+ u8 __agg_vars4;
+ u8 cdu_reserved;
+#endif
+ u32 more_to_send;
+#if defined(__BIG_ENDIAN)
+ u16 agg_vars5;
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14
+ u16 sq_cons;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sq_cons;
+ u16 agg_vars5;
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14
+#endif
+ struct xstorm_tcp_tcp_ag_context_section tcp;
+#if defined(__BIG_ENDIAN)
+ u16 agg_vars7;
+#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
+#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
+ u8 agg_val3_th;
+ u8 agg_vars6;
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+ u8 agg_vars6;
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6
+ u8 agg_val3_th;
+ u16 agg_vars7;
+#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
+#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __agg_val11_th;
+ u16 __gen_data;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __gen_data;
+ u16 __agg_val11_th;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 __reserved1;
+ u8 __agg_val6_th;
+ u16 __agg_val9;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __agg_val9;
+ u8 __agg_val6_th;
+ u8 __reserved1;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 hq_prod;
+ u16 hq_cons;
+#elif defined(__LITTLE_ENDIAN)
+ u16 hq_cons;
+ u16 hq_prod;
+#endif
+ u32 agg_vars8;
+#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
+#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3_SHIFT 24
+#if defined(__BIG_ENDIAN)
+ u16 r2tq_prod;
+ u16 sq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sq_prod;
+ u16 r2tq_prod;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 agg_val3;
+ u8 agg_val6;
+ u8 agg_val5_th;
+ u8 agg_val5;
+#elif defined(__LITTLE_ENDIAN)
+ u8 agg_val5;
+ u8 agg_val5_th;
+ u8 agg_val6;
+ u8 agg_val3;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __agg_misc1;
+ u16 agg_limit1;
+#elif defined(__LITTLE_ENDIAN)
+ u16 agg_limit1;
+ u16 __agg_misc1;
+#endif
+ u32 hq_cons_tcp_seq;
+ u32 exp_stat_sn;
+ u32 rst_seq_num;
+};
+
+
+/*
+ * The L5cm aggregative context of XStorm
+ */
+struct xstorm_l5cm_ag_context {
+#if defined(__BIG_ENDIAN)
+ u16 agg_val1;
+ u8 agg_vars1;
+#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
+ u8 state;
+#elif defined(__LITTLE_ENDIAN)
+ u8 state;
+ u8 agg_vars1;
+#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
+ u16 agg_val1;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 cdu_reserved;
+ u8 __agg_vars4;
+ u8 agg_vars3;
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
+#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
+ u8 agg_vars2;
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF (0x3<<0)
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN (0x1<<7)
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+ u8 agg_vars2;
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF (0x3<<0)
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN (0x1<<7)
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN_SHIFT 7
+ u8 agg_vars3;
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
+#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
+ u8 __agg_vars4;
+ u8 cdu_reserved;
+#endif
+ u32 more_to_send;
+#if defined(__BIG_ENDIAN)
+ u16 agg_vars5;
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2_SHIFT 14
+ u16 agg_val4_th;
+#elif defined(__LITTLE_ENDIAN)
+ u16 agg_val4_th;
+ u16 agg_vars5;
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2_SHIFT 14
+#endif
+ struct xstorm_tcp_tcp_ag_context_section tcp;
+#if defined(__BIG_ENDIAN)
+ u16 agg_vars7;
+#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
+#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
+#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
+#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
+ u8 agg_val3_th;
+ u8 agg_vars6;
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7_SHIFT 3
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+ u8 agg_vars6;
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7_SHIFT 3
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4_SHIFT 6
+ u8 agg_val3_th;
+ u16 agg_vars7;
+#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
+#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
+#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
+#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __agg_val11_th;
+ u16 __gen_data;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __gen_data;
+ u16 __agg_val11_th;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 __reserved1;
+ u8 __agg_val6_th;
+ u16 __agg_val9;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __agg_val9;
+ u8 __agg_val6_th;
+ u8 __reserved1;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 agg_val2_th;
+ u16 agg_val2;
+#elif defined(__LITTLE_ENDIAN)
+ u16 agg_val2;
+ u16 agg_val2_th;
+#endif
+ u32 agg_vars8;
+#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0)
+#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC2_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
+#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC3_SHIFT 24
+#if defined(__BIG_ENDIAN)
+ u16 agg_misc0;
+ u16 agg_val4;
+#elif defined(__LITTLE_ENDIAN)
+ u16 agg_val4;
+ u16 agg_misc0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 agg_val3;
+ u8 agg_val6;
+ u8 agg_val5_th;
+ u8 agg_val5;
+#elif defined(__LITTLE_ENDIAN)
+ u8 agg_val5;
+ u8 agg_val5_th;
+ u8 agg_val6;
+ u8 agg_val3;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __agg_misc1;
+ u16 agg_limit1;
+#elif defined(__LITTLE_ENDIAN)
+ u16 agg_limit1;
+ u16 __agg_misc1;
+#endif
+ u32 completion_seq;
+ u32 agg_misc4;
+ u32 rst_seq_num;
+};
+
+/*
+ * ABTS info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_abts_info {
+ __le16 aborted_task_id;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_abts_rsp_union {
+ u8 r_ctl;
+ u8 rsrv[3];
+ __le32 abts_rsp_payload[7];
+};
+
+
+/*
+ * 4 regs size $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_bd_ctx {
+ __le32 buf_addr_hi;
+ __le32 buf_addr_lo;
+ __le16 buf_len;
+ __le16 rsrv0;
+ __le16 flags;
+ __le16 rsrv1;
+};
+
+
+/*
+ * FCoE cached sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_cached_sge_ctx {
+ struct regpair cur_buf_addr;
+ __le16 cur_buf_rem;
+ __le16 second_buf_rem;
+ struct regpair second_buf_addr;
+};
+
+
+/*
+ * Cleanup info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_cleanup_info {
+ __le16 cleaned_task_id;
+ __le16 rolled_tx_seq_cnt;
+ __le32 rolled_tx_data_offset;
+};
+
+
+/*
+ * Fcp RSP flags $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_rsp_flags {
+ u8 flags;
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4)
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5)
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
+};
+
+/*
+ * Fcp RSP payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_rsp_payload {
+ struct regpair reserved0;
+ __le32 fcp_resid;
+ u8 scsi_status_code;
+ struct fcoe_fcp_rsp_flags fcp_flags;
+ __le16 retry_delay_timer;
+ __le32 fcp_rsp_len;
+ __le32 fcp_sns_len;
+};
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_rsp_union {
+ struct fcoe_fcp_rsp_payload payload;
+ struct regpair reserved0;
+};
+
+/*
+ * FC header $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fc_hdr {
+ u8 s_id[3];
+ u8 cs_ctl;
+ u8 d_id[3];
+ u8 r_ctl;
+ __le16 seq_cnt;
+ u8 df_ctl;
+ u8 seq_id;
+ u8 f_ctl[3];
+ u8 type;
+ __le32 parameters;
+ __le16 rx_id;
+ __le16 ox_id;
+};
+
+/*
+ * FC header union $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_mp_rsp_union {
+ struct fcoe_fc_hdr fc_hdr;
+ __le32 mp_payload_len;
+ __le32 rsrv;
+};
+
+/*
+ * Completion information $$KEEP_ENDIANNESS$$
+ */
+union fcoe_comp_flow_info {
+ struct fcoe_fcp_rsp_union fcp_rsp;
+ struct fcoe_abts_rsp_union abts_rsp;
+ struct fcoe_mp_rsp_union mp_rsp;
+ __le32 opaque[8];
+};
+
+
+/*
+ * External ABTS info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_abts_info {
+ __le32 rsrv0[6];
+ struct fcoe_abts_info ctx;
+};
+
+
+/*
+ * External cleanup info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_cleanup_info {
+ __le32 rsrv0[6];
+ struct fcoe_cleanup_info ctx;
+};
+
+
+/*
+ * Fcoe FW Tx sequence context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fw_tx_seq_ctx {
+ __le32 data_offset;
+ __le16 seq_cnt;
+ __le16 rsrv0;
+};
+
+/*
+ * Fcoe external FW Tx sequence context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_fw_tx_seq_ctx {
+ __le32 rsrv0[6];
+ struct fcoe_fw_tx_seq_ctx ctx;
+};
+
+
+/*
+ * FCoE multiple sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_mul_sges_ctx {
+ struct regpair cur_sge_addr;
+ __le16 cur_sge_off;
+ u8 cur_sge_idx;
+ u8 sgl_size;
+};
+
+/*
+ * FCoE external multiple sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_mul_sges_ctx {
+ struct fcoe_mul_sges_ctx mul_sgl;
+ struct regpair rsrv0;
+};
+
+
+/*
+ * FCP CMD payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_cmd_payload {
+ __le32 opaque[8];
+};
+
+
+
+
+
+/*
+ * Fcp xfr rdy payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_xfr_rdy_payload {
+ __le32 burst_len;
+ __le32 data_ro;
+};
+
+
+/*
+ * FC frame $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fc_frame {
+ struct fcoe_fc_hdr fc_hdr;
+ __le32 reserved0[2];
+};
+
+
+
+
+/*
+ * FCoE KCQ CQE parameters $$KEEP_ENDIANNESS$$
+ */
+union fcoe_kcqe_params {
+ __le32 reserved0[4];
+};
+
+/*
+ * FCoE KCQ CQE $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kcqe {
+ __le32 fcoe_conn_id;
+ __le32 completion_status;
+ __le32 fcoe_conn_context_id;
+ union fcoe_kcqe_params params;
+ __le16 qe_self_seq;
+ u8 op_code;
+ u8 flags;
+#define FCOE_KCQE_RESERVED0 (0x7<<0)
+#define FCOE_KCQE_RESERVED0_SHIFT 0
+#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
+#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
+#define FCOE_KCQE_LAYER_CODE (0x7<<4)
+#define FCOE_KCQE_LAYER_CODE_SHIFT 4
+#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
+#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
+};
+
+
+
+/*
+ * FCoE KWQE header $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_header {
+ u8 op_code;
+ u8 flags;
+#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
+#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
+};
+
+/*
+ * FCoE firmware init request 1 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_init1 {
+ __le16 num_tasks;
+ struct fcoe_kwqe_header hdr;
+ __le32 task_list_pbl_addr_lo;
+ __le32 task_list_pbl_addr_hi;
+ __le32 dummy_buffer_addr_lo;
+ __le32 dummy_buffer_addr_hi;
+ __le16 sq_num_wqes;
+ __le16 rq_num_wqes;
+ __le16 rq_buffer_log_size;
+ __le16 cq_num_wqes;
+ __le16 mtu;
+ u8 num_sessions_log;
+ u8 flags;
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
+#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
+};
+
+/*
+ * FCoE firmware init request 2 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_init2 {
+ u8 hsi_major_version;
+ u8 hsi_minor_version;
+ struct fcoe_kwqe_header hdr;
+ __le32 hash_tbl_pbl_addr_lo;
+ __le32 hash_tbl_pbl_addr_hi;
+ __le32 t2_hash_tbl_addr_lo;
+ __le32 t2_hash_tbl_addr_hi;
+ __le32 t2_ptr_hash_tbl_addr_lo;
+ __le32 t2_ptr_hash_tbl_addr_hi;
+ __le32 free_list_count;
+};
+
+/*
+ * FCoE firmware init request 3 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_init3 {
+ __le16 reserved0;
+ struct fcoe_kwqe_header hdr;
+ __le32 error_bit_map_lo;
+ __le32 error_bit_map_hi;
+ u8 perf_config;
+ u8 reserved21[3];
+ __le32 reserved2[4];
+};
+
+/*
+ * FCoE connection offload request 1 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload1 {
+ __le16 fcoe_conn_id;
+ struct fcoe_kwqe_header hdr;
+ __le32 sq_addr_lo;
+ __le32 sq_addr_hi;
+ __le32 rq_pbl_addr_lo;
+ __le32 rq_pbl_addr_hi;
+ __le32 rq_first_pbe_addr_lo;
+ __le32 rq_first_pbe_addr_hi;
+ __le16 rq_prod;
+ __le16 reserved0;
+};
+
+/*
+ * FCoE connection offload request 2 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload2 {
+ __le16 tx_max_fc_pay_len;
+ struct fcoe_kwqe_header hdr;
+ __le32 cq_addr_lo;
+ __le32 cq_addr_hi;
+ __le32 xferq_addr_lo;
+ __le32 xferq_addr_hi;
+ __le32 conn_db_addr_lo;
+ __le32 conn_db_addr_hi;
+ __le32 reserved1;
+};
+
+/*
+ * FCoE connection offload request 3 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload3 {
+ __le16 vlan_tag;
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
+ struct fcoe_kwqe_header hdr;
+ u8 s_id[3];
+ u8 tx_max_conc_seqs_c3;
+ u8 d_id[3];
+ u8 flags;
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
+ __le32 reserved;
+ __le32 confq_first_pbe_addr_lo;
+ __le32 confq_first_pbe_addr_hi;
+ __le16 tx_total_conc_seqs;
+ __le16 rx_max_fc_pay_len;
+ __le16 rx_total_conc_seqs;
+ u8 rx_max_conc_seqs_c3;
+ u8 rx_open_seqs_exch_c3;
+};
+
+/*
+ * FCoE connection offload request 4 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload4 {
+ u8 e_d_tov_timer_val;
+ u8 reserved2;
+ struct fcoe_kwqe_header hdr;
+ u8 src_mac_addr_lo[2];
+ u8 src_mac_addr_mid[2];
+ u8 src_mac_addr_hi[2];
+ u8 dst_mac_addr_hi[2];
+ u8 dst_mac_addr_lo[2];
+ u8 dst_mac_addr_mid[2];
+ __le32 lcq_addr_lo;
+ __le32 lcq_addr_hi;
+ __le32 confq_pbl_base_addr_lo;
+ __le32 confq_pbl_base_addr_hi;
+};
+
+/*
+ * FCoE connection enable request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_enable_disable {
+ __le16 reserved0;
+ struct fcoe_kwqe_header hdr;
+ u8 src_mac_addr_lo[2];
+ u8 src_mac_addr_mid[2];
+ u8 src_mac_addr_hi[2];
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
+ u8 dst_mac_addr_lo[2];
+ u8 dst_mac_addr_mid[2];
+ u8 dst_mac_addr_hi[2];
+ __le16 reserved1;
+ u8 s_id[3];
+ u8 vlan_flag;
+ u8 d_id[3];
+ u8 reserved3;
+ __le32 context_id;
+ __le32 conn_id;
+ __le32 reserved4;
+};
+
+/*
+ * FCoE connection destroy request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_destroy {
+ __le16 reserved0;
+ struct fcoe_kwqe_header hdr;
+ __le32 context_id;
+ __le32 conn_id;
+ __le32 reserved1[5];
+};
+
+/*
+ * FCoe destroy request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_destroy {
+ __le16 reserved0;
+ struct fcoe_kwqe_header hdr;
+ __le32 reserved1[7];
+};
+
+/*
+ * FCoe statistics request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_stat {
+ __le16 reserved0;
+ struct fcoe_kwqe_header hdr;
+ __le32 stat_params_addr_lo;
+ __le32 stat_params_addr_hi;
+ __le32 reserved1[5];
+};
+
+/*
+ * FCoE KWQ WQE $$KEEP_ENDIANNESS$$
+ */
+union fcoe_kwqe {
+ struct fcoe_kwqe_init1 init1;
+ struct fcoe_kwqe_init2 init2;
+ struct fcoe_kwqe_init3 init3;
+ struct fcoe_kwqe_conn_offload1 conn_offload1;
+ struct fcoe_kwqe_conn_offload2 conn_offload2;
+ struct fcoe_kwqe_conn_offload3 conn_offload3;
+ struct fcoe_kwqe_conn_offload4 conn_offload4;
+ struct fcoe_kwqe_conn_enable_disable conn_enable_disable;
+ struct fcoe_kwqe_conn_destroy conn_destroy;
+ struct fcoe_kwqe_destroy destroy;
+ struct fcoe_kwqe_stat statistics;
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*
+ * TX SGL context $$KEEP_ENDIANNESS$$
+ */
+union fcoe_sgl_union_ctx {
+ struct fcoe_cached_sge_ctx cached_sge;
+ struct fcoe_ext_mul_sges_ctx sgl;
+ __le32 opaque[5];
+};
+
+/*
+ * Data-In/ELS/BLS information $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_read_flow_info {
+ union fcoe_sgl_union_ctx sgl_ctx;
+ __le32 rsrv0[3];
+};
+
+
+/*
+ * Fcoe stat context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_s_stat_ctx {
+ u8 flags;
+#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0)
+#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1)
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2)
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3)
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3
+#define FCOE_S_STAT_CTX_P_RJT (0x1<<4)
+#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4
+#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5)
+#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5
+#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6)
+#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6
+};
+
+/*
+ * Fcoe rx seq context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_rx_seq_ctx {
+ u8 seq_id;
+ struct fcoe_s_stat_ctx s_stat;
+ __le16 seq_cnt;
+ __le32 low_exp_ro;
+ __le32 high_exp_ro;
+};
+
+
+/*
+ * Fcoe rx_wr union context $$KEEP_ENDIANNESS$$
+ */
+union fcoe_rx_wr_union_ctx {
+ struct fcoe_read_flow_info read_info;
+ union fcoe_comp_flow_info comp_info;
+ __le32 opaque[8];
+};
+
+
+
+/*
+ * FCoE SQ element $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_sqe {
+ __le16 wqe;
+#define FCOE_SQE_TASK_ID (0x7FFF<<0)
+#define FCOE_SQE_TASK_ID_SHIFT 0
+#define FCOE_SQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_SQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+
+/*
+ * 14 regs $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_only {
+ union fcoe_sgl_union_ctx sgl_ctx;
+ __le32 rsrv0;
+};
+
+/*
+ * 32 bytes (8 regs) used for TX only purposes $$KEEP_ENDIANNESS$$
+ */
+union fcoe_tx_wr_rx_rd_union_ctx {
+ struct fcoe_fc_frame tx_frame;
+ struct fcoe_fcp_cmd_payload fcp_cmd;
+ struct fcoe_ext_cleanup_info cleanup;
+ struct fcoe_ext_abts_info abts;
+ struct fcoe_ext_fw_tx_seq_ctx tx_seq;
+ __le32 opaque[8];
+};
+
+/*
+ * tce_tx_wr_rx_rd_const $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_wr_rx_rd_const {
+ u8 init_flags;
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE (0x7<<0)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT 0
+#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE (0x1<<3)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT 3
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE (0x1<<4)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT 4
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE (0x3<<5)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT 5
+#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV (0x1<<7)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV_SHIFT 7
+ u8 tx_flags;
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID (0x1<<0)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID_SHIFT 0
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE (0xF<<1)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT 1
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1 (0x1<<5)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1_SHIFT 5
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT (0x1<<6)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT_SHIFT 6
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2 (0x1<<7)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2_SHIFT 7
+ __le16 rsrv3;
+ __le32 verify_tx_seq;
+};
+
+/*
+ * tce_tx_wr_rx_rd $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_wr_rx_rd {
+ union fcoe_tx_wr_rx_rd_union_ctx union_ctx;
+ struct fcoe_tce_tx_wr_rx_rd_const const_ctx;
+};
+
+/*
+ * tce_rx_wr_tx_rd_const $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd_const {
+ __le32 data_2_trns;
+ __le32 init_flags;
+#define FCOE_TCE_RX_WR_TX_RD_CONST_CID (0xFFFFFF<<0)
+#define FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT 0
+#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0 (0xFF<<24)
+#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0_SHIFT 24
+};
+
+/*
+ * tce_rx_wr_tx_rd_var $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd_var {
+ __le16 rx_flags;
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1 (0xF<<0)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1_SHIFT 0
+#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE (0x7<<4)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT 4
+#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ (0x1<<7)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ_SHIFT 7
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE (0xF<<8)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT 8
+#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME (0x1<<12)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT 12
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT (0x1<<13)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT_SHIFT 13
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2 (0x1<<14)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2_SHIFT 14
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID (0x1<<15)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID_SHIFT 15
+ __le16 rx_id;
+ struct fcoe_fcp_xfr_rdy_payload fcp_xfr_rdy;
+};
+
+/*
+ * tce_rx_wr_tx_rd $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd {
+ struct fcoe_tce_rx_wr_tx_rd_const const_ctx;
+ struct fcoe_tce_rx_wr_tx_rd_var var_ctx;
+};
+
+/*
+ * tce_rx_only $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_only {
+ struct fcoe_rx_seq_ctx rx_seq_ctx;
+ union fcoe_rx_wr_union_ctx union_ctx;
+};
+
+/*
+ * task_ctx_entry $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_task_ctx_entry {
+ struct fcoe_tce_tx_only txwr_only;
+ struct fcoe_tce_tx_wr_rx_rd txwr_rxrd;
+ struct fcoe_tce_rx_wr_tx_rd rxwr_txrd;
+ struct fcoe_tce_rx_only rxwr_only;
+};
+
+
+
+
+
+
+
+
+
+
+/*
+ * FCoE XFRQ element $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_xfrqe {
+ __le16 wqe;
+#define FCOE_XFRQE_TASK_ID (0x7FFF<<0)
+#define FCOE_XFRQE_TASK_ID_SHIFT 0
+#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * Cached SGEs $$KEEP_ENDIANNESS$$
+ */
+struct common_fcoe_sgl {
+ struct fcoe_bd_ctx sge[3];
+};
+
+
+/*
+ * FCoE SQ\XFRQ element
+ */
+struct fcoe_cached_wqe {
+ struct fcoe_sqe sqe;
+ struct fcoe_xfrqe xfrqe;
+};
+
+
+/*
+ * FCoE connection enable\disable params passed by driver to FW in FCoE enable
+ * ramrod $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_conn_enable_disable_ramrod_params {
+ struct fcoe_kwqe_conn_enable_disable enable_disable_kwqe;
+};
+
+
+/*
+ * FCoE connection offload params passed by driver to FW in FCoE offload ramrod
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_conn_offload_ramrod_params {
+ struct fcoe_kwqe_conn_offload1 offload_kwqe1;
+ struct fcoe_kwqe_conn_offload2 offload_kwqe2;
+ struct fcoe_kwqe_conn_offload3 offload_kwqe3;
+ struct fcoe_kwqe_conn_offload4 offload_kwqe4;
+};
+
+
+struct ustorm_fcoe_mng_ctx {
+#if defined(__BIG_ENDIAN)
+ u8 mid_seq_proc_flag;
+ u8 tce_in_cam_flag;
+ u8 tce_on_ior_flag;
+ u8 en_cached_tce_flag;
+#elif defined(__LITTLE_ENDIAN)
+ u8 en_cached_tce_flag;
+ u8 tce_on_ior_flag;
+ u8 tce_in_cam_flag;
+ u8 mid_seq_proc_flag;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 tce_cam_addr;
+ u8 cached_conn_flag;
+ u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rsrv0;
+ u8 cached_conn_flag;
+ u8 tce_cam_addr;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 dma_tce_ram_addr;
+ u16 tce_ram_addr;
+#elif defined(__LITTLE_ENDIAN)
+ u16 tce_ram_addr;
+ u16 dma_tce_ram_addr;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 ox_id;
+ u16 wr_done_seq;
+#elif defined(__LITTLE_ENDIAN)
+ u16 wr_done_seq;
+ u16 ox_id;
+#endif
+ struct regpair task_addr;
+};
+
+/*
+ * Parameters initialized during offloaded according to FLOGI/PLOGI/PRLI and
+ * used in FCoE context section
+ */
+struct ustorm_fcoe_params {
+#if defined(__BIG_ENDIAN)
+ u16 fcoe_conn_id;
+ u16 flags;
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0)
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1)
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3)
+#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3
+#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4)
+#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5)
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6)
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6
+#define USTORM_FCOE_PARAMS_RSRV0 (0x1FF<<7)
+#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+ u16 flags;
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0)
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1)
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3)
+#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3
+#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4)
+#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5)
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6)
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6
+#define USTORM_FCOE_PARAMS_RSRV0 (0x1FF<<7)
+#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 7
+ u16 fcoe_conn_id;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 hc_csdm_byte_en;
+ u8 func_id;
+ u8 port_id;
+ u8 vnic_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 vnic_id;
+ u8 port_id;
+ u8 func_id;
+ u8 hc_csdm_byte_en;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 rx_total_conc_seqs;
+ u16 rx_max_fc_pay_len;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_max_fc_pay_len;
+ u16 rx_total_conc_seqs;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 task_pbe_idx_off;
+ u8 task_in_page_log_size;
+ u16 rx_max_conc_seqs;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_max_conc_seqs;
+ u8 task_in_page_log_size;
+ u8 task_pbe_idx_off;
+#endif
+};
+
+/*
+ * FCoE 16-bits index structure
+ */
+struct fcoe_idx16_fields {
+ u16 fields;
+#define FCOE_IDX16_FIELDS_IDX (0x7FFF<<0)
+#define FCOE_IDX16_FIELDS_IDX_SHIFT 0
+#define FCOE_IDX16_FIELDS_MSB (0x1<<15)
+#define FCOE_IDX16_FIELDS_MSB_SHIFT 15
+};
+
+/*
+ * FCoE 16-bits index union
+ */
+union fcoe_idx16_field_union {
+ struct fcoe_idx16_fields fields;
+ u16 val;
+};
+
+/*
+ * Parameters required for placement according to SGL
+ */
+struct ustorm_fcoe_data_place_mng {
+#if defined(__BIG_ENDIAN)
+ u16 sge_off;
+ u8 num_sges;
+ u8 sge_idx;
+#elif defined(__LITTLE_ENDIAN)
+ u8 sge_idx;
+ u8 num_sges;
+ u16 sge_off;
+#endif
+};
+
+/*
+ * Parameters required for placement according to SGL
+ */
+struct ustorm_fcoe_data_place {
+ struct ustorm_fcoe_data_place_mng cached_mng;
+ struct fcoe_bd_ctx cached_sge[2];
+};
+
+/*
+ * TX processing shall write and RX processing shall read from this section
+ */
+union fcoe_u_tce_tx_wr_rx_rd_union {
+ struct fcoe_abts_info abts;
+ struct fcoe_cleanup_info cleanup;
+ struct fcoe_fw_tx_seq_ctx tx_seq_ctx;
+ u32 opaque[2];
+};
+
+/*
+ * TX processing shall write and RX processing shall read from this section
+ */
+struct fcoe_u_tce_tx_wr_rx_rd {
+ union fcoe_u_tce_tx_wr_rx_rd_union union_ctx;
+ struct fcoe_tce_tx_wr_rx_rd_const const_ctx;
+};
+
+struct ustorm_fcoe_tce {
+ struct fcoe_u_tce_tx_wr_rx_rd txwr_rxrd;
+ struct fcoe_tce_rx_wr_tx_rd rxwr_txrd;
+ struct fcoe_tce_rx_only rxwr;
+};
+
+struct ustorm_fcoe_cache_ctx {
+ u32 rsrv0;
+ struct ustorm_fcoe_data_place data_place;
+ struct ustorm_fcoe_tce tce;
+};
+
+/*
+ * Ustorm FCoE Storm Context
+ */
+struct ustorm_fcoe_st_context {
+ struct ustorm_fcoe_mng_ctx mng_ctx;
+ struct ustorm_fcoe_params fcoe_params;
+ struct regpair cq_base_addr;
+ struct regpair rq_pbl_base;
+ struct regpair rq_cur_page_addr;
+ struct regpair confq_pbl_base_addr;
+ struct regpair conn_db_base;
+ struct regpair xfrq_base_addr;
+ struct regpair lcq_base_addr;
+#if defined(__BIG_ENDIAN)
+ union fcoe_idx16_field_union rq_cons;
+ union fcoe_idx16_field_union rq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ union fcoe_idx16_field_union rq_prod;
+ union fcoe_idx16_field_union rq_cons;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 xfrq_prod;
+ u16 cq_cons;
+#elif defined(__LITTLE_ENDIAN)
+ u16 cq_cons;
+ u16 xfrq_prod;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 lcq_cons;
+ u16 hc_cram_address;
+#elif defined(__LITTLE_ENDIAN)
+ u16 hc_cram_address;
+ u16 lcq_cons;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 sq_xfrq_lcq_confq_size;
+ u16 confq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 confq_prod;
+ u16 sq_xfrq_lcq_confq_size;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 hc_csdm_agg_int;
+ u8 rsrv2;
+ u8 available_rqes;
+ u8 sp_q_flush_cnt;
+#elif defined(__LITTLE_ENDIAN)
+ u8 sp_q_flush_cnt;
+ u8 available_rqes;
+ u8 rsrv2;
+ u8 hc_csdm_agg_int;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 num_pend_tasks;
+ u16 pbf_ack_ram_addr;
+#elif defined(__LITTLE_ENDIAN)
+ u16 pbf_ack_ram_addr;
+ u16 num_pend_tasks;
+#endif
+ struct ustorm_fcoe_cache_ctx cache_ctx;
+};
+
+/*
+ * The FCoE non-aggregative context of Tstorm
+ */
+struct tstorm_fcoe_st_context {
+ struct regpair reserved0;
+ struct regpair reserved1;
+};
+
+/*
+ * Ethernet context section
+ */
+struct xstorm_fcoe_eth_context_section {
+#if defined(__BIG_ENDIAN)
+ u8 remote_addr_4;
+ u8 remote_addr_5;
+ u8 local_addr_0;
+ u8 local_addr_1;
+#elif defined(__LITTLE_ENDIAN)
+ u8 local_addr_1;
+ u8 local_addr_0;
+ u8 remote_addr_5;
+ u8 remote_addr_4;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 remote_addr_0;
+ u8 remote_addr_1;
+ u8 remote_addr_2;
+ u8 remote_addr_3;
+#elif defined(__LITTLE_ENDIAN)
+ u8 remote_addr_3;
+ u8 remote_addr_2;
+ u8 remote_addr_1;
+ u8 remote_addr_0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 reserved_vlan_type;
+ u16 params;
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
+#elif defined(__LITTLE_ENDIAN)
+ u16 params;
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
+ u16 reserved_vlan_type;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 local_addr_2;
+ u8 local_addr_3;
+ u8 local_addr_4;
+ u8 local_addr_5;
+#elif defined(__LITTLE_ENDIAN)
+ u8 local_addr_5;
+ u8 local_addr_4;
+ u8 local_addr_3;
+ u8 local_addr_2;
+#endif
+};
+
+/*
+ * Flags used in FCoE context section - 1 byte
+ */
+struct xstorm_fcoe_context_flags {
+ u8 flags;
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q (0x3<<0)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q_SHIFT 0
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ (0x1<<2)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ_SHIFT 2
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_BLOCK_SQ (0x1<<3)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_BLOCK_SQ_SHIFT 3
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT (0x1<<4)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT_SHIFT 4
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE (0x1<<5)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE_SHIFT 5
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE (0x1<<6)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE_SHIFT 6
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_VNTAG_VLAN (0x1<<7)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_VNTAG_VLAN_SHIFT 7
+};
+
+struct xstorm_fcoe_tce {
+ struct fcoe_tce_tx_only txwr;
+ struct fcoe_tce_tx_wr_rx_rd txwr_rxrd;
+};
+
+/*
+ * FCP_DATA parameters required for transmission
+ */
+struct xstorm_fcoe_fcp_data {
+ u32 io_rem;
+#if defined(__BIG_ENDIAN)
+ u16 cached_sge_off;
+ u8 cached_num_sges;
+ u8 cached_sge_idx;
+#elif defined(__LITTLE_ENDIAN)
+ u8 cached_sge_idx;
+ u8 cached_num_sges;
+ u16 cached_sge_off;
+#endif
+ u32 buf_addr_hi_0;
+ u32 buf_addr_lo_0;
+#if defined(__BIG_ENDIAN)
+ u16 num_of_pending_tasks;
+ u16 buf_len_0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 buf_len_0;
+ u16 num_of_pending_tasks;
+#endif
+ u32 buf_addr_hi_1;
+ u32 buf_addr_lo_1;
+#if defined(__BIG_ENDIAN)
+ u16 task_pbe_idx_off;
+ u16 buf_len_1;
+#elif defined(__LITTLE_ENDIAN)
+ u16 buf_len_1;
+ u16 task_pbe_idx_off;
+#endif
+ u32 buf_addr_hi_2;
+ u32 buf_addr_lo_2;
+#if defined(__BIG_ENDIAN)
+ u16 ox_id;
+ u16 buf_len_2;
+#elif defined(__LITTLE_ENDIAN)
+ u16 buf_len_2;
+ u16 ox_id;
+#endif
+};
+
+/*
+ * vlan configuration
+ */
+struct xstorm_fcoe_vlan_conf {
+ u8 vlan_conf;
+#define XSTORM_FCOE_VLAN_CONF_PRIORITY (0x7<<0)
+#define XSTORM_FCOE_VLAN_CONF_PRIORITY_SHIFT 0
+#define XSTORM_FCOE_VLAN_CONF_INNER_VLAN_FLAG (0x1<<3)
+#define XSTORM_FCOE_VLAN_CONF_INNER_VLAN_FLAG_SHIFT 3
+#define XSTORM_FCOE_VLAN_CONF_RESERVED (0xF<<4)
+#define XSTORM_FCOE_VLAN_CONF_RESERVED_SHIFT 4
+};
+
+/*
+ * FCoE 16-bits vlan structure
+ */
+struct fcoe_vlan_fields {
+ u16 fields;
+#define FCOE_VLAN_FIELDS_VID (0xFFF<<0)
+#define FCOE_VLAN_FIELDS_VID_SHIFT 0
+#define FCOE_VLAN_FIELDS_CLI (0x1<<12)
+#define FCOE_VLAN_FIELDS_CLI_SHIFT 12
+#define FCOE_VLAN_FIELDS_PRI (0x7<<13)
+#define FCOE_VLAN_FIELDS_PRI_SHIFT 13
+};
+
+/*
+ * FCoE 16-bits vlan union
+ */
+union fcoe_vlan_field_union {
+ struct fcoe_vlan_fields fields;
+ u16 val;
+};
+
+/*
+ * FCoE 16-bits vlan, vif union
+ */
+union fcoe_vlan_vif_field_union {
+ union fcoe_vlan_field_union vlan;
+ u16 vif;
+};
+
+/*
+ * FCoE context section
+ */
+struct xstorm_fcoe_context_section {
+#if defined(__BIG_ENDIAN)
+ u8 cs_ctl;
+ u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 s_id[3];
+ u8 cs_ctl;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 rctl;
+ u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 d_id[3];
+ u8 rctl;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 sq_xfrq_lcq_confq_size;
+ u16 tx_max_fc_pay_len;
+#elif defined(__LITTLE_ENDIAN)
+ u16 tx_max_fc_pay_len;
+ u16 sq_xfrq_lcq_confq_size;
+#endif
+ u32 lcq_prod;
+#if defined(__BIG_ENDIAN)
+ u8 port_id;
+ u8 func_id;
+ u8 seq_id;
+ struct xstorm_fcoe_context_flags tx_flags;
+#elif defined(__LITTLE_ENDIAN)
+ struct xstorm_fcoe_context_flags tx_flags;
+ u8 seq_id;
+ u8 func_id;
+ u8 port_id;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 mtu;
+ u8 func_mode;
+ u8 vnic_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 vnic_id;
+ u8 func_mode;
+ u16 mtu;
+#endif
+ struct regpair confq_curr_page_addr;
+ struct fcoe_cached_wqe cached_wqe[8];
+ struct regpair lcq_base_addr;
+ struct xstorm_fcoe_tce tce;
+ struct xstorm_fcoe_fcp_data fcp_data;
+#if defined(__BIG_ENDIAN)
+ u8 tx_max_conc_seqs_c3;
+ u8 vlan_flag;
+ u8 dcb_val;
+ u8 data_pb_cmd_size;
+#elif defined(__LITTLE_ENDIAN)
+ u8 data_pb_cmd_size;
+ u8 dcb_val;
+ u8 vlan_flag;
+ u8 tx_max_conc_seqs_c3;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 fcoe_tx_stat_params_ram_addr;
+ u16 fcoe_tx_fc_seq_ram_addr;
+#elif defined(__LITTLE_ENDIAN)
+ u16 fcoe_tx_fc_seq_ram_addr;
+ u16 fcoe_tx_stat_params_ram_addr;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 fcp_cmd_line_credit;
+ u8 eth_hdr_size;
+ u16 pbf_addr;
+#elif defined(__LITTLE_ENDIAN)
+ u16 pbf_addr;
+ u8 eth_hdr_size;
+ u8 fcp_cmd_line_credit;
+#endif
+#if defined(__BIG_ENDIAN)
+ union fcoe_vlan_vif_field_union multi_func_val;
+ u8 page_log_size;
+ struct xstorm_fcoe_vlan_conf orig_vlan_conf;
+#elif defined(__LITTLE_ENDIAN)
+ struct xstorm_fcoe_vlan_conf orig_vlan_conf;
+ u8 page_log_size;
+ union fcoe_vlan_vif_field_union multi_func_val;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 fcp_cmd_frame_size;
+ u16 pbf_addr_ff;
+#elif defined(__LITTLE_ENDIAN)
+ u16 pbf_addr_ff;
+ u16 fcp_cmd_frame_size;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 vlan_num;
+ u8 cos;
+ u8 cache_xfrq_cons;
+ u8 cache_sq_cons;
+#elif defined(__LITTLE_ENDIAN)
+ u8 cache_sq_cons;
+ u8 cache_xfrq_cons;
+ u8 cos;
+ u8 vlan_num;
+#endif
+ u32 verify_tx_seq;
+};
+
+/*
+ * Xstorm FCoE Storm Context
+ */
+struct xstorm_fcoe_st_context {
+ struct xstorm_fcoe_eth_context_section eth;
+ struct xstorm_fcoe_context_section fcoe;
+};
+
+/*
+ * Fcoe connection context
+ */
+struct fcoe_context {
+ struct ustorm_fcoe_st_context ustorm_st_context;
+ struct tstorm_fcoe_st_context tstorm_st_context;
+ struct xstorm_fcoe_ag_context xstorm_ag_context;
+ struct tstorm_fcoe_ag_context tstorm_ag_context;
+ struct ustorm_fcoe_ag_context ustorm_ag_context;
+ struct timers_block_context timers_context;
+ struct xstorm_fcoe_st_context xstorm_st_context;
+};
+
+/*
+ * FCoE init params passed by driver to FW in FCoE init ramrod
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_init_ramrod_params {
+ struct fcoe_kwqe_init1 init_kwqe1;
+ struct fcoe_kwqe_init2 init_kwqe2;
+ struct fcoe_kwqe_init3 init_kwqe3;
+ struct regpair eq_pbl_base;
+ __le32 eq_pbl_size;
+ __le32 reserved2;
+ __le16 eq_prod;
+ __le16 sb_num;
+ u8 sb_id;
+ u8 reserved0;
+ __le16 reserved1;
+};
+
+/*
+ * FCoE statistics params buffer passed by driver to FW in FCoE statistics
+ * ramrod $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_stat_ramrod_params {
+ struct fcoe_kwqe_stat stat_kwqe;
+};
+
+/*
+ * CQ DB CQ producer and pending completion counter
+ */
+struct iscsi_cq_db_prod_pnd_cmpltn_cnt {
+#if defined(__BIG_ENDIAN)
+ u16 cntr;
+ u16 prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 prod;
+ u16 cntr;
+#endif
+};
+
+/*
+ * CQ DB pending completion ITT array
+ */
+struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr {
+ struct iscsi_cq_db_prod_pnd_cmpltn_cnt prod_pend_comp[8];
+};
+
+/*
+ * Cstorm CQ sequence to notify array, updated by driver
+ */
+struct iscsi_cq_db_sqn_2_notify_arr {
+ u16 sqn[8];
+};
+
+/*
+ * Cstorm iSCSI Storm Context
+ */
+struct cstorm_iscsi_st_context {
+ struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr cq_c_prod_pend_comp_ctr_arr;
+ struct iscsi_cq_db_sqn_2_notify_arr cq_c_prod_sqn_arr;
+ struct iscsi_cq_db_sqn_2_notify_arr cq_c_sqn_2_notify_arr;
+ struct regpair hq_pbl_base;
+ struct regpair hq_curr_pbe;
+ struct regpair task_pbl_base;
+ struct regpair cq_db_base;
+#if defined(__BIG_ENDIAN)
+ u16 hq_bd_itt;
+ u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 iscsi_conn_id;
+ u16 hq_bd_itt;
+#endif
+ u32 hq_bd_data_segment_len;
+ u32 hq_bd_buffer_offset;
+#if defined(__BIG_ENDIAN)
+ u8 rsrv;
+ u8 cq_proc_en_bit_map;
+ u8 cq_pend_comp_itt_valid_bit_map;
+ u8 hq_bd_opcode;
+#elif defined(__LITTLE_ENDIAN)
+ u8 hq_bd_opcode;
+ u8 cq_pend_comp_itt_valid_bit_map;
+ u8 cq_proc_en_bit_map;
+ u8 rsrv;
+#endif
+ u32 hq_tcp_seq;
+#if defined(__BIG_ENDIAN)
+ u16 flags;
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0)
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1)
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5)
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5
+ u16 hq_cons;
+#elif defined(__LITTLE_ENDIAN)
+ u16 hq_cons;
+ u16 flags;
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0)
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1)
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5)
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5
+#endif
+ struct regpair rsrv1;
+};
+
+
+/*
+ * SCSI read/write SQ WQE
+ */
+struct iscsi_cmd_pdu_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+ u8 opcode;
+ u8 op_attr;
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES (0x7<<0)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES_SHIFT 0
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x3<<3)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 3
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG (0x1<<5)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG_SHIFT 5
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG (0x1<<6)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG_SHIFT 6
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7
+ u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rsrv0;
+ u8 op_attr;
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES (0x7<<0)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES_SHIFT 0
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x3<<3)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 3
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG (0x1<<5)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG_SHIFT 5
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG (0x1<<6)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG_SHIFT 6
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7
+ u8 opcode;
+#endif
+ u32 data_fields;
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+ struct regpair lun;
+ u32 itt;
+ u32 expected_data_transfer_length;
+ u32 cmd_sn;
+ u32 exp_stat_sn;
+ u32 scsi_command_block[4];
+};
+
+
+/*
+ * Buffer per connection, used in Tstorm
+ */
+struct iscsi_conn_buf {
+ struct regpair reserved[8];
+};
+
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct ustorm_iscsi_rq_db {
+ struct regpair pbl_base;
+ struct regpair curr_pbe;
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct ustorm_iscsi_r2tq_db {
+ struct regpair pbl_base;
+ struct regpair curr_pbe;
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct ustorm_iscsi_cq_db {
+#if defined(__BIG_ENDIAN)
+ u16 cq_sn;
+ u16 prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 prod;
+ u16 cq_sn;
+#endif
+ struct regpair curr_pbe;
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct rings_db {
+ struct ustorm_iscsi_rq_db rq;
+ struct ustorm_iscsi_r2tq_db r2tq;
+ struct ustorm_iscsi_cq_db cq[8];
+#if defined(__BIG_ENDIAN)
+ u16 rq_prod;
+ u16 r2tq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 r2tq_prod;
+ u16 rq_prod;
+#endif
+ struct regpair cq_pbl_base;
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct ustorm_iscsi_placement_db {
+ u32 sgl_base_lo;
+ u32 sgl_base_hi;
+ u32 local_sge_0_address_hi;
+ u32 local_sge_0_address_lo;
+#if defined(__BIG_ENDIAN)
+ u16 curr_sge_offset;
+ u16 local_sge_0_size;
+#elif defined(__LITTLE_ENDIAN)
+ u16 local_sge_0_size;
+ u16 curr_sge_offset;
+#endif
+ u32 local_sge_1_address_hi;
+ u32 local_sge_1_address_lo;
+#if defined(__BIG_ENDIAN)
+ u8 exp_padding_2b;
+ u8 nal_len_3b;
+ u16 local_sge_1_size;
+#elif defined(__LITTLE_ENDIAN)
+ u16 local_sge_1_size;
+ u8 nal_len_3b;
+ u8 exp_padding_2b;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 sgl_size;
+ u8 local_sge_index_2b;
+ u16 reserved7;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved7;
+ u8 local_sge_index_2b;
+ u8 sgl_size;
+#endif
+ u32 rem_pdu;
+ u32 place_db_bitfield_1;
+#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD (0xFFFFFF<<0)
+#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD_SHIFT 0
+#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID (0xFF<<24)
+#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID_SHIFT 24
+ u32 place_db_bitfield_2;
+#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE (0xFFFFFF<<0)
+#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE_SHIFT 0
+#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX (0xFF<<24)
+#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX_SHIFT 24
+ u32 nal;
+#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE (0xFFFFFF<<0)
+#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE_SHIFT 0
+#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B (0xFF<<24)
+#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B_SHIFT 24
+};
+
+/*
+ * Ustorm iSCSI Storm Context
+ */
+struct ustorm_iscsi_st_context {
+ u32 exp_stat_sn;
+ u32 exp_data_sn;
+ struct rings_db ring;
+ struct regpair task_pbl_base;
+ struct regpair tce_phy_addr;
+ struct ustorm_iscsi_placement_db place_db;
+ u32 reserved8;
+ u32 rem_rcv_len;
+#if defined(__BIG_ENDIAN)
+ u16 hdr_itt;
+ u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 iscsi_conn_id;
+ u16 hdr_itt;
+#endif
+ u32 nal_bytes;
+#if defined(__BIG_ENDIAN)
+ u8 hdr_second_byte_union;
+ u8 bitfield_0;
+#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0)
+#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
+#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
+#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
+#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2)
+#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3)
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3
+ u8 task_pdu_cache_index;
+ u8 task_pbe_cache_index;
+#elif defined(__LITTLE_ENDIAN)
+ u8 task_pbe_cache_index;
+ u8 task_pdu_cache_index;
+ u8 bitfield_0;
+#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0)
+#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
+#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
+#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
+#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2)
+#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3)
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3
+ u8 hdr_second_byte_union;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 reserved3;
+ u8 reserved2;
+ u8 acDecrement;
+#elif defined(__LITTLE_ENDIAN)
+ u8 acDecrement;
+ u8 reserved2;
+ u16 reserved3;
+#endif
+ u32 task_stat;
+#if defined(__BIG_ENDIAN)
+ u8 hdr_opcode;
+ u8 num_cqs;
+ u16 reserved5;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved5;
+ u8 num_cqs;
+ u8 hdr_opcode;
+#endif
+ u32 negotiated_rx;
+#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH (0xFFFFFF<<0)
+#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH_SHIFT 0
+#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS (0xFF<<24)
+#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT 24
+ u32 negotiated_rx_and_flags;
+#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH (0xFFFFFF<<0)
+#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH_SHIFT 0
+#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED (0x1<<24)
+#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED_SHIFT 24
+#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN (0x1<<25)
+#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN_SHIFT 25
+#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN (0x1<<26)
+#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN_SHIFT 26
+#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR (0x1<<27)
+#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR_SHIFT 27
+#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID (0x1<<28)
+#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID_SHIFT 28
+#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE (0x3<<29)
+#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE_SHIFT 29
+#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED (0x1<<31)
+#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED_SHIFT 31
+};
+
+/*
+ * TCP context region, shared in TOE, RDMA and ISCSI
+ */
+struct tstorm_tcp_st_context_section {
+ u32 flags1;
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT (0xFFFFFF<<0)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_SHIFT 0
+#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID (0x1<<24)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID_SHIFT 24
+#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS (0x1<<25)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS_SHIFT 25
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0 (0x1<<26)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0_SHIFT 26
+#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD (0x1<<27)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD_SHIFT 27
+#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED (0x1<<28)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED_SHIFT 28
+#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE (0x1<<29)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE_SHIFT 29
+#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN (0x1<<30)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN_SHIFT 30
+#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN (0x1<<31)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN_SHIFT 31
+ u32 flags2;
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION (0xFFFFFF<<0)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_SHIFT 0
+#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN (0x1<<24)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN_SHIFT 24
+#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN (0x1<<25)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN_SHIFT 25
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT (0x1<<26)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT_SHIFT 26
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT (0x1<<27)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT_SHIFT 27
+#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<28)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 28
+#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<29)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 29
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK (0x1<<30)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK_SHIFT 30
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK (0x1<<31)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK_SHIFT 31
+#if defined(__BIG_ENDIAN)
+ u16 mss;
+ u8 tcp_sm_state;
+ u8 rto_exp;
+#elif defined(__LITTLE_ENDIAN)
+ u8 rto_exp;
+ u8 tcp_sm_state;
+ u16 mss;
+#endif
+ u32 rcv_nxt;
+ u32 timestamp_recent;
+ u32 timestamp_recent_time;
+ u32 cwnd;
+ u32 ss_thresh;
+ u32 cwnd_accum;
+ u32 prev_seg_seq;
+ u32 expected_rel_seq;
+ u32 recover;
+#if defined(__BIG_ENDIAN)
+ u8 retransmit_count;
+ u8 ka_max_probe_count;
+ u8 persist_probe_count;
+ u8 ka_probe_count;
+#elif defined(__LITTLE_ENDIAN)
+ u8 ka_probe_count;
+ u8 persist_probe_count;
+ u8 ka_max_probe_count;
+ u8 retransmit_count;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 statistics_counter_id;
+ u8 ooo_support_mode;
+ u8 snd_wnd_scale;
+ u8 dup_ack_count;
+#elif defined(__LITTLE_ENDIAN)
+ u8 dup_ack_count;
+ u8 snd_wnd_scale;
+ u8 ooo_support_mode;
+ u8 statistics_counter_id;
+#endif
+ u32 retransmit_start_time;
+ u32 ka_timeout;
+ u32 ka_interval;
+ u32 isle_start_seq;
+ u32 isle_end_seq;
+#if defined(__BIG_ENDIAN)
+ u16 second_isle_address;
+ u16 recent_seg_wnd;
+#elif defined(__LITTLE_ENDIAN)
+ u16 recent_seg_wnd;
+ u16 second_isle_address;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 max_isles_ever_happened;
+ u8 isles_number;
+ u16 last_isle_address;
+#elif defined(__LITTLE_ENDIAN)
+ u16 last_isle_address;
+ u8 isles_number;
+ u8 max_isles_ever_happened;
+#endif
+ u32 max_rt_time;
+#if defined(__BIG_ENDIAN)
+ u16 lsb_mac_address;
+ u16 vlan_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 vlan_id;
+ u16 lsb_mac_address;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 msb_mac_address;
+ u16 mid_mac_address;
+#elif defined(__LITTLE_ENDIAN)
+ u16 mid_mac_address;
+ u16 msb_mac_address;
+#endif
+ u32 rightmost_received_seq;
+};
+
+/*
+ * Termination variables
+ */
+struct iscsi_term_vars {
+ u8 BitMap;
+#define ISCSI_TERM_VARS_TCP_STATE (0xF<<0)
+#define ISCSI_TERM_VARS_TCP_STATE_SHIFT 0
+#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT (0x1<<4)
+#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT_SHIFT 4
+#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT (0x1<<5)
+#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT_SHIFT 5
+#define ISCSI_TERM_VARS_TERM_ON_CHIP (0x1<<6)
+#define ISCSI_TERM_VARS_TERM_ON_CHIP_SHIFT 6
+#define ISCSI_TERM_VARS_RSRV (0x1<<7)
+#define ISCSI_TERM_VARS_RSRV_SHIFT 7
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct tstorm_iscsi_st_context_section {
+ u32 nalPayload;
+ u32 b2nh;
+#if defined(__BIG_ENDIAN)
+ u16 rq_cons;
+ u8 flags;
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN (0x3<<5)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN_SHIFT 5
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0 (0x1<<7)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0_SHIFT 7
+ u8 hdr_bytes_2_fetch;
+#elif defined(__LITTLE_ENDIAN)
+ u8 hdr_bytes_2_fetch;
+ u8 flags;
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN (0x3<<5)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN_SHIFT 5
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0 (0x1<<7)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0_SHIFT 7
+ u16 rq_cons;
+#endif
+ struct regpair rq_db_phy_addr;
+#if defined(__BIG_ENDIAN)
+ struct iscsi_term_vars term_vars;
+ u8 rsrv1;
+ u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 iscsi_conn_id;
+ u8 rsrv1;
+ struct iscsi_term_vars term_vars;
+#endif
+ u32 process_nxt;
+};
+
+/*
+ * The iSCSI non-aggregative context of Tstorm
+ */
+struct tstorm_iscsi_st_context {
+ struct tstorm_tcp_st_context_section tcp;
+ struct tstorm_iscsi_st_context_section iscsi;
+};
+
+/*
+ * Ethernet context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_eth_context_section {
+#if defined(__BIG_ENDIAN)
+ u8 remote_addr_4;
+ u8 remote_addr_5;
+ u8 local_addr_0;
+ u8 local_addr_1;
+#elif defined(__LITTLE_ENDIAN)
+ u8 local_addr_1;
+ u8 local_addr_0;
+ u8 remote_addr_5;
+ u8 remote_addr_4;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 remote_addr_0;
+ u8 remote_addr_1;
+ u8 remote_addr_2;
+ u8 remote_addr_3;
+#elif defined(__LITTLE_ENDIAN)
+ u8 remote_addr_3;
+ u8 remote_addr_2;
+ u8 remote_addr_1;
+ u8 remote_addr_0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 reserved_vlan_type;
+ u16 params;
+#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
+#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
+#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12)
+#define XSTORM_ETH_CONTEXT_SECTION_CFI_SHIFT 12
+#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
+#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
+#elif defined(__LITTLE_ENDIAN)
+ u16 params;
+#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
+#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
+#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12)
+#define XSTORM_ETH_CONTEXT_SECTION_CFI_SHIFT 12
+#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
+#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
+ u16 reserved_vlan_type;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 local_addr_2;
+ u8 local_addr_3;
+ u8 local_addr_4;
+ u8 local_addr_5;
+#elif defined(__LITTLE_ENDIAN)
+ u8 local_addr_5;
+ u8 local_addr_4;
+ u8 local_addr_3;
+ u8 local_addr_2;
+#endif
+};
+
+/*
+ * IpV4 context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_ip_v4_context_section {
+#if defined(__BIG_ENDIAN)
+ u16 __pbf_hdr_cmd_rsvd_id;
+ u16 __pbf_hdr_cmd_rsvd_flags_offset;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __pbf_hdr_cmd_rsvd_flags_offset;
+ u16 __pbf_hdr_cmd_rsvd_id;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 __pbf_hdr_cmd_rsvd_ver_ihl;
+ u8 tos;
+ u16 __pbf_hdr_cmd_rsvd_length;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __pbf_hdr_cmd_rsvd_length;
+ u8 tos;
+ u8 __pbf_hdr_cmd_rsvd_ver_ihl;
+#endif
+ u32 ip_local_addr;
+#if defined(__BIG_ENDIAN)
+ u8 ttl;
+ u8 __pbf_hdr_cmd_rsvd_protocol;
+ u16 __pbf_hdr_cmd_rsvd_csum;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __pbf_hdr_cmd_rsvd_csum;
+ u8 __pbf_hdr_cmd_rsvd_protocol;
+ u8 ttl;
+#endif
+ u32 __pbf_hdr_cmd_rsvd_1;
+ u32 ip_remote_addr;
+};
+
+/*
+ * context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_padded_ip_v4_context_section {
+ struct xstorm_ip_v4_context_section ip_v4;
+ u32 reserved1[4];
+};
+
+/*
+ * IpV6 context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_ip_v6_context_section {
+#if defined(__BIG_ENDIAN)
+ u16 pbf_hdr_cmd_rsvd_payload_len;
+ u8 pbf_hdr_cmd_rsvd_nxt_hdr;
+ u8 hop_limit;
+#elif defined(__LITTLE_ENDIAN)
+ u8 hop_limit;
+ u8 pbf_hdr_cmd_rsvd_nxt_hdr;
+ u16 pbf_hdr_cmd_rsvd_payload_len;
+#endif
+ u32 priority_flow_label;
+#define XSTORM_IP_V6_CONTEXT_SECTION_FLOW_LABEL (0xFFFFF<<0)
+#define XSTORM_IP_V6_CONTEXT_SECTION_FLOW_LABEL_SHIFT 0
+#define XSTORM_IP_V6_CONTEXT_SECTION_TRAFFIC_CLASS (0xFF<<20)
+#define XSTORM_IP_V6_CONTEXT_SECTION_TRAFFIC_CLASS_SHIFT 20
+#define XSTORM_IP_V6_CONTEXT_SECTION_PBF_HDR_CMD_RSVD_VER (0xF<<28)
+#define XSTORM_IP_V6_CONTEXT_SECTION_PBF_HDR_CMD_RSVD_VER_SHIFT 28
+ u32 ip_local_addr_lo_hi;
+ u32 ip_local_addr_lo_lo;
+ u32 ip_local_addr_hi_hi;
+ u32 ip_local_addr_hi_lo;
+ u32 ip_remote_addr_lo_hi;
+ u32 ip_remote_addr_lo_lo;
+ u32 ip_remote_addr_hi_hi;
+ u32 ip_remote_addr_hi_lo;
+};
+
+union xstorm_ip_context_section_types {
+ struct xstorm_padded_ip_v4_context_section padded_ip_v4;
+ struct xstorm_ip_v6_context_section ip_v6;
+};
+
+/*
+ * TCP context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_tcp_context_section {
+ u32 snd_max;
+#if defined(__BIG_ENDIAN)
+ u16 remote_port;
+ u16 local_port;
+#elif defined(__LITTLE_ENDIAN)
+ u16 local_port;
+ u16 remote_port;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 original_nagle_1b;
+ u8 ts_enabled;
+ u16 tcp_params;
+#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0)
+#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0
+#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT (0x1<<8)
+#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT_SHIFT 8
+#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED (0x1<<9)
+#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9
+#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10)
+#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10
+#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV (0x1<<11)
+#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV_SHIFT 11
+#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12)
+#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12
+#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13)
+#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13
+#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14)
+#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 tcp_params;
+#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0)
+#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0
+#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT (0x1<<8)
+#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT_SHIFT 8
+#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED (0x1<<9)
+#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9
+#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10)
+#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10
+#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV (0x1<<11)
+#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV_SHIFT 11
+#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12)
+#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12
+#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13)
+#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13
+#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14)
+#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14
+ u8 ts_enabled;
+ u8 original_nagle_1b;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 pseudo_csum;
+ u16 window_scaling_factor;
+#elif defined(__LITTLE_ENDIAN)
+ u16 window_scaling_factor;
+ u16 pseudo_csum;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 reserved2;
+ u8 statistics_counter_id;
+ u8 statistics_params;
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0)
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1)
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
+#define XSTORM_TCP_CONTEXT_SECTION_RESERVED (0x3F<<2)
+#define XSTORM_TCP_CONTEXT_SECTION_RESERVED_SHIFT 2
+#elif defined(__LITTLE_ENDIAN)
+ u8 statistics_params;
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0)
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1)
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
+#define XSTORM_TCP_CONTEXT_SECTION_RESERVED (0x3F<<2)
+#define XSTORM_TCP_CONTEXT_SECTION_RESERVED_SHIFT 2
+ u8 statistics_counter_id;
+ u16 reserved2;
+#endif
+ u32 ts_time_diff;
+ u32 __next_timer_expir;
+};
+
+/*
+ * Common context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_common_context_section {
+ struct xstorm_eth_context_section ethernet;
+ union xstorm_ip_context_section_types ip_union;
+ struct xstorm_tcp_context_section tcp;
+#if defined(__BIG_ENDIAN)
+ u8 __dcb_val;
+ u8 flags;
+#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED (0x1<<0)
+#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT 0
+#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT (0x7<<1)
+#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT 1
+#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE (0x1<<4)
+#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE_SHIFT 4
+#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY (0x7<<5)
+#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY_SHIFT 5
+ u8 reserved;
+ u8 ip_version_1b;
+#elif defined(__LITTLE_ENDIAN)
+ u8 ip_version_1b;
+ u8 reserved;
+ u8 flags;
+#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED (0x1<<0)
+#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT 0
+#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT (0x7<<1)
+#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT 1
+#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE (0x1<<4)
+#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE_SHIFT 4
+#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY (0x7<<5)
+#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY_SHIFT 5
+ u8 __dcb_val;
+#endif
+};
+
+/*
+ * Flags used in ISCSI context section
+ */
+struct xstorm_iscsi_context_flags {
+ u8 flags;
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA (0x1<<0)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA_SHIFT 0
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T (0x1<<1)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T_SHIFT 1
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_HEADER_DIGEST (0x1<<2)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_HEADER_DIGEST_SHIFT 2
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_DATA_DIGEST (0x1<<3)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_DATA_DIGEST_SHIFT 3
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_HQ_BD_WRITTEN (0x1<<4)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_HQ_BD_WRITTEN_SHIFT 4
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_LAST_OP_SQ (0x1<<5)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_LAST_OP_SQ_SHIFT 5
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_UPDATE_SND_NXT (0x1<<6)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_UPDATE_SND_NXT_SHIFT 6
+#define XSTORM_ISCSI_CONTEXT_FLAGS_RESERVED4 (0x1<<7)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_RESERVED4_SHIFT 7
+};
+
+struct iscsi_task_context_entry_x {
+ u32 data_out_buffer_offset;
+ u32 itt;
+ u32 data_sn;
+};
+
+struct iscsi_task_context_entry_xuc_x_write_only {
+ u32 tx_r2t_sn;
+};
+
+struct iscsi_task_context_entry_xuc_xu_write_both {
+ u32 sgl_base_lo;
+ u32 sgl_base_hi;
+#if defined(__BIG_ENDIAN)
+ u8 sgl_size;
+ u8 sge_index;
+ u16 sge_offset;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sge_offset;
+ u8 sge_index;
+ u8 sgl_size;
+#endif
+};
+
+/*
+ * iSCSI context section
+ */
+struct xstorm_iscsi_context_section {
+ u32 first_burst_length;
+ u32 max_send_pdu_length;
+ struct regpair sq_pbl_base;
+ struct regpair sq_curr_pbe;
+ struct regpair hq_pbl_base;
+ struct regpair hq_curr_pbe_base;
+ struct regpair r2tq_pbl_base;
+ struct regpair r2tq_curr_pbe_base;
+ struct regpair task_pbl_base;
+#if defined(__BIG_ENDIAN)
+ u16 data_out_count;
+ struct xstorm_iscsi_context_flags flags;
+ u8 task_pbl_cache_idx;
+#elif defined(__LITTLE_ENDIAN)
+ u8 task_pbl_cache_idx;
+ struct xstorm_iscsi_context_flags flags;
+ u16 data_out_count;
+#endif
+ u32 seq_more_2_send;
+ u32 pdu_more_2_send;
+ struct iscsi_task_context_entry_x temp_tce_x;
+ struct iscsi_task_context_entry_xuc_x_write_only temp_tce_x_wr;
+ struct iscsi_task_context_entry_xuc_xu_write_both temp_tce_xu_wr;
+ struct regpair lun;
+ u32 exp_data_transfer_len_ttt;
+ u32 pdu_data_2_rxmit;
+ u32 rxmit_bytes_2_dr;
+#if defined(__BIG_ENDIAN)
+ u16 rxmit_sge_offset;
+ u16 hq_rxmit_cons;
+#elif defined(__LITTLE_ENDIAN)
+ u16 hq_rxmit_cons;
+ u16 rxmit_sge_offset;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 r2tq_cons;
+ u8 rxmit_flags;
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD (0x1<<0)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD_SHIFT 0
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR (0x1<<1)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR_SHIFT 1
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU (0x1<<2)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU_SHIFT 2
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR (0x1<<3)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR_SHIFT 3
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR (0x1<<4)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR_SHIFT 4
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING (0x3<<5)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING_SHIFT 5
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT (0x1<<7)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT_SHIFT 7
+ u8 rxmit_sge_idx;
+#elif defined(__LITTLE_ENDIAN)
+ u8 rxmit_sge_idx;
+ u8 rxmit_flags;
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD (0x1<<0)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD_SHIFT 0
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR (0x1<<1)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR_SHIFT 1
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU (0x1<<2)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU_SHIFT 2
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR (0x1<<3)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR_SHIFT 3
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR (0x1<<4)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR_SHIFT 4
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING (0x3<<5)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING_SHIFT 5
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT (0x1<<7)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT_SHIFT 7
+ u16 r2tq_cons;
+#endif
+ u32 hq_rxmit_tcp_seq;
+};
+
+/*
+ * Xstorm iSCSI Storm Context
+ */
+struct xstorm_iscsi_st_context {
+ struct xstorm_common_context_section common;
+ struct xstorm_iscsi_context_section iscsi;
+};
+
+/*
+ * Iscsi connection context
+ */
+struct iscsi_context {
+ struct ustorm_iscsi_st_context ustorm_st_context;
+ struct tstorm_iscsi_st_context tstorm_st_context;
+ struct xstorm_iscsi_ag_context xstorm_ag_context;
+ struct tstorm_iscsi_ag_context tstorm_ag_context;
+ struct cstorm_iscsi_ag_context cstorm_ag_context;
+ struct ustorm_iscsi_ag_context ustorm_ag_context;
+ struct timers_block_context timers_context;
+ struct regpair upb_context;
+ struct xstorm_iscsi_st_context xstorm_st_context;
+ struct regpair xpb_context;
+ struct cstorm_iscsi_st_context cstorm_st_context;
+};
+
+
+/*
+ * PDU header of an iSCSI DATA-OUT
+ */
+struct iscsi_data_pdu_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+ u8 opcode;
+ u8 op_attr;
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7
+ u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rsrv0;
+ u8 op_attr;
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7
+ u8 opcode;
+#endif
+ u32 data_fields;
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+ struct regpair lun;
+ u32 itt;
+ u32 ttt;
+ u32 rsrv2;
+ u32 exp_stat_sn;
+ u32 rsrv3;
+ u32 data_sn;
+ u32 buffer_offset;
+ u32 rsrv4;
+};
+
+
+/*
+ * PDU header of an iSCSI login request
+ */
+struct iscsi_login_req_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+ u8 opcode;
+ u8 op_attr;
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG (0x3<<0)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG (0x3<<2)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG_SHIFT 2
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0 (0x3<<4)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0_SHIFT 4
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT_SHIFT 7
+ u8 version_max;
+ u8 version_min;
+#elif defined(__LITTLE_ENDIAN)
+ u8 version_min;
+ u8 version_max;
+ u8 op_attr;
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG (0x3<<0)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG (0x3<<2)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG_SHIFT 2
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0 (0x3<<4)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0_SHIFT 4
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT_SHIFT 7
+ u8 opcode;
+#endif
+ u32 data_fields;
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+ u32 isid_lo;
+#if defined(__BIG_ENDIAN)
+ u16 isid_hi;
+ u16 tsih;
+#elif defined(__LITTLE_ENDIAN)
+ u16 tsih;
+ u16 isid_hi;
+#endif
+ u32 itt;
+#if defined(__BIG_ENDIAN)
+ u16 cid;
+ u16 rsrv1;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rsrv1;
+ u16 cid;
+#endif
+ u32 cmd_sn;
+ u32 exp_stat_sn;
+ u32 rsrv2[4];
+};
+
+/*
+ * PDU header of an iSCSI logout request
+ */
+struct iscsi_logout_req_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+ u8 opcode;
+ u8 op_attr;
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE (0x7F<<0)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE_SHIFT 0
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7
+ u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rsrv0;
+ u8 op_attr;
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE (0x7F<<0)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE_SHIFT 0
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7
+ u8 opcode;
+#endif
+ u32 data_fields;
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+ u32 rsrv2[2];
+ u32 itt;
+#if defined(__BIG_ENDIAN)
+ u16 cid;
+ u16 rsrv1;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rsrv1;
+ u16 cid;
+#endif
+ u32 cmd_sn;
+ u32 exp_stat_sn;
+ u32 rsrv3[4];
+};
+
+/*
+ * PDU header of an iSCSI TMF request
+ */
+struct iscsi_tmf_req_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+ u8 opcode;
+ u8 op_attr;
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7
+ u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rsrv0;
+ u8 op_attr;
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7
+ u8 opcode;
+#endif
+ u32 data_fields;
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+ struct regpair lun;
+ u32 itt;
+ u32 referenced_task_tag;
+ u32 cmd_sn;
+ u32 exp_stat_sn;
+ u32 ref_cmd_sn;
+ u32 exp_data_sn;
+ u32 rsrv2[2];
+};
+
+/*
+ * PDU header of an iSCSI Text request
+ */
+struct iscsi_text_req_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+ u8 opcode;
+ u8 op_attr;
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1 (0x3F<<0)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL_SHIFT 7
+ u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rsrv0;
+ u8 op_attr;
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1 (0x3F<<0)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL_SHIFT 7
+ u8 opcode;
+#endif
+ u32 data_fields;
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+ struct regpair lun;
+ u32 itt;
+ u32 ttt;
+ u32 cmd_sn;
+ u32 exp_stat_sn;
+ u32 rsrv3[4];
+};
+
+/*
+ * PDU header of an iSCSI Nop-Out
+ */
+struct iscsi_nop_out_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+ u8 opcode;
+ u8 op_attr;
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1 (0x1<<7)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1_SHIFT 7
+ u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rsrv0;
+ u8 op_attr;
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1 (0x1<<7)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1_SHIFT 7
+ u8 opcode;
+#endif
+ u32 data_fields;
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+ struct regpair lun;
+ u32 itt;
+ u32 ttt;
+ u32 cmd_sn;
+ u32 exp_stat_sn;
+ u32 rsrv3[4];
+};
+
+/*
+ * iscsi pdu headers in little endian form.
+ */
+union iscsi_pdu_headers_little_endian {
+ u32 fullHeaderSize[12];
+ struct iscsi_cmd_pdu_hdr_little_endian command_pdu_hdr;
+ struct iscsi_data_pdu_hdr_little_endian data_out_pdu_hdr;
+ struct iscsi_login_req_hdr_little_endian login_req_pdu_hdr;
+ struct iscsi_logout_req_hdr_little_endian logout_req_pdu_hdr;
+ struct iscsi_tmf_req_hdr_little_endian tmf_req_pdu_hdr;
+ struct iscsi_text_req_hdr_little_endian text_req_pdu_hdr;
+ struct iscsi_nop_out_hdr_little_endian nop_out_pdu_hdr;
+};
+
+struct iscsi_hq_bd {
+ union iscsi_pdu_headers_little_endian pdu_header;
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u16 lcl_cmp_flg;
+#elif defined(__LITTLE_ENDIAN)
+ u16 lcl_cmp_flg;
+ u16 reserved1;
+#endif
+ u32 sgl_base_lo;
+ u32 sgl_base_hi;
+#if defined(__BIG_ENDIAN)
+ u8 sgl_size;
+ u8 sge_index;
+ u16 sge_offset;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sge_offset;
+ u8 sge_index;
+ u8 sgl_size;
+#endif
+};
+
+
+/*
+ * CQE data for L2 OOO connection $$KEEP_ENDIANNESS$$
+ */
+struct iscsi_l2_ooo_data {
+ __le32 iscsi_cid;
+ u8 drop_isle;
+ u8 drop_size;
+ u8 ooo_opcode;
+ u8 ooo_isle;
+ u8 reserved[8];
+};
+
+
+
+
+
+
+struct iscsi_task_context_entry_xuc_c_write_only {
+ u32 total_data_acked;
+};
+
+struct iscsi_task_context_r2t_table_entry {
+ u32 ttt;
+ u32 desired_data_len;
+};
+
+struct iscsi_task_context_entry_xuc_u_write_only {
+ u32 exp_r2t_sn;
+ struct iscsi_task_context_r2t_table_entry r2t_table[4];
+#if defined(__BIG_ENDIAN)
+ u16 data_in_count;
+ u8 cq_id;
+ u8 valid_1b;
+#elif defined(__LITTLE_ENDIAN)
+ u8 valid_1b;
+ u8 cq_id;
+ u16 data_in_count;
+#endif
+};
+
+struct iscsi_task_context_entry_xuc {
+ struct iscsi_task_context_entry_xuc_c_write_only write_c;
+ u32 exp_data_transfer_len;
+ struct iscsi_task_context_entry_xuc_x_write_only write_x;
+ u32 lun_lo;
+ struct iscsi_task_context_entry_xuc_xu_write_both write_xu;
+ u32 lun_hi;
+ struct iscsi_task_context_entry_xuc_u_write_only write_u;
+};
+
+struct iscsi_task_context_entry_u {
+ u32 exp_r2t_buff_offset;
+ u32 rem_rcv_len;
+ u32 exp_data_sn;
+};
+
+struct iscsi_task_context_entry {
+ struct iscsi_task_context_entry_x tce_x;
+#if defined(__BIG_ENDIAN)
+ u16 data_out_count;
+ u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rsrv0;
+ u16 data_out_count;
+#endif
+ struct iscsi_task_context_entry_xuc tce_xuc;
+ struct iscsi_task_context_entry_u tce_u;
+ u32 rsrv1[7];
+};
+
+
+
+
+
+
+
+
+struct iscsi_task_context_entry_xuc_x_init_only {
+ struct regpair lun;
+ u32 exp_data_transfer_len;
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*
+ * ipv6 structure
+ */
+struct ip_v6_addr {
+ u32 ip_addr_lo_lo;
+ u32 ip_addr_lo_hi;
+ u32 ip_addr_hi_lo;
+ u32 ip_addr_hi_hi;
+};
+
+
+
+/*
+ * l5cm- connection identification params
+ */
+struct l5cm_conn_addr_params {
+ u32 pmtu;
+#if defined(__BIG_ENDIAN)
+ u8 remote_addr_3;
+ u8 remote_addr_2;
+ u8 remote_addr_1;
+ u8 remote_addr_0;
+#elif defined(__LITTLE_ENDIAN)
+ u8 remote_addr_0;
+ u8 remote_addr_1;
+ u8 remote_addr_2;
+ u8 remote_addr_3;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 params;
+#define L5CM_CONN_ADDR_PARAMS_IP_VERSION (0x1<<0)
+#define L5CM_CONN_ADDR_PARAMS_IP_VERSION_SHIFT 0
+#define L5CM_CONN_ADDR_PARAMS_RSRV (0x7FFF<<1)
+#define L5CM_CONN_ADDR_PARAMS_RSRV_SHIFT 1
+ u8 remote_addr_5;
+ u8 remote_addr_4;
+#elif defined(__LITTLE_ENDIAN)
+ u8 remote_addr_4;
+ u8 remote_addr_5;
+ u16 params;
+#define L5CM_CONN_ADDR_PARAMS_IP_VERSION (0x1<<0)
+#define L5CM_CONN_ADDR_PARAMS_IP_VERSION_SHIFT 0
+#define L5CM_CONN_ADDR_PARAMS_RSRV (0x7FFF<<1)
+#define L5CM_CONN_ADDR_PARAMS_RSRV_SHIFT 1
+#endif
+ struct ip_v6_addr local_ip_addr;
+ struct ip_v6_addr remote_ip_addr;
+ u32 ipv6_flow_label_20b;
+ u32 reserved1;
+#if defined(__BIG_ENDIAN)
+ u16 remote_tcp_port;
+ u16 local_tcp_port;
+#elif defined(__LITTLE_ENDIAN)
+ u16 local_tcp_port;
+ u16 remote_tcp_port;
+#endif
+};
+
+/*
+ * l5cm-xstorm connection buffer
+ */
+struct l5cm_xstorm_conn_buffer {
+#if defined(__BIG_ENDIAN)
+ u16 rsrv1;
+ u16 params;
+#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE (0x1<<0)
+#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE_SHIFT 0
+#define L5CM_XSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
+#define L5CM_XSTORM_CONN_BUFFER_RSRV_SHIFT 1
+#elif defined(__LITTLE_ENDIAN)
+ u16 params;
+#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE (0x1<<0)
+#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE_SHIFT 0
+#define L5CM_XSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
+#define L5CM_XSTORM_CONN_BUFFER_RSRV_SHIFT 1
+ u16 rsrv1;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 mss;
+ u16 pseudo_header_checksum;
+#elif defined(__LITTLE_ENDIAN)
+ u16 pseudo_header_checksum;
+ u16 mss;
+#endif
+ u32 rcv_buf;
+ u32 rsrv2;
+ struct regpair context_addr;
+};
+
+/*
+ * l5cm-tstorm connection buffer
+ */
+struct l5cm_tstorm_conn_buffer {
+ u32 rsrv1[2];
+#if defined(__BIG_ENDIAN)
+ u16 params;
+#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0)
+#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE_SHIFT 0
+#define L5CM_TSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
+#define L5CM_TSTORM_CONN_BUFFER_RSRV_SHIFT 1
+ u8 ka_max_probe_count;
+ u8 ka_enable;
+#elif defined(__LITTLE_ENDIAN)
+ u8 ka_enable;
+ u8 ka_max_probe_count;
+ u16 params;
+#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0)
+#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE_SHIFT 0
+#define L5CM_TSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
+#define L5CM_TSTORM_CONN_BUFFER_RSRV_SHIFT 1
+#endif
+ u32 ka_timeout;
+ u32 ka_interval;
+ u32 max_rt_time;
+};
+
+/*
+ * l5cm connection buffer for active side
+ */
+struct l5cm_active_conn_buffer {
+ struct l5cm_conn_addr_params conn_addr_buf;
+ struct l5cm_xstorm_conn_buffer xstorm_conn_buffer;
+ struct l5cm_tstorm_conn_buffer tstorm_conn_buffer;
+};
+
+
+
+/*
+ * The l5cm opaque buffer passed in add new connection ramrod passive side
+ */
+struct l5cm_hash_input_string {
+ u32 __opaque1;
+#if defined(__BIG_ENDIAN)
+ u16 __opaque3;
+ u16 __opaque2;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __opaque2;
+ u16 __opaque3;
+#endif
+ struct ip_v6_addr __opaque4;
+ struct ip_v6_addr __opaque5;
+ u32 __opaque6;
+ u32 __opaque7[5];
+};
+
+
+/*
+ * syn cookie component
+ */
+struct l5cm_syn_cookie_comp {
+ u32 __opaque;
+};
+
+/*
+ * data related to listeners of a TCP port
+ */
+struct l5cm_port_listener_data {
+ u8 params;
+#define L5CM_PORT_LISTENER_DATA_ENABLE (0x1<<0)
+#define L5CM_PORT_LISTENER_DATA_ENABLE_SHIFT 0
+#define L5CM_PORT_LISTENER_DATA_IP_INDEX (0xF<<1)
+#define L5CM_PORT_LISTENER_DATA_IP_INDEX_SHIFT 1
+#define L5CM_PORT_LISTENER_DATA_NET_FILTER (0x1<<5)
+#define L5CM_PORT_LISTENER_DATA_NET_FILTER_SHIFT 5
+#define L5CM_PORT_LISTENER_DATA_DEFFERED_MODE (0x1<<6)
+#define L5CM_PORT_LISTENER_DATA_DEFFERED_MODE_SHIFT 6
+#define L5CM_PORT_LISTENER_DATA_MPA_MODE (0x1<<7)
+#define L5CM_PORT_LISTENER_DATA_MPA_MODE_SHIFT 7
+};
+
+/*
+ * Opaque structure passed from U to X when final ack arrives
+ */
+struct l5cm_opaque_buf {
+ u32 __opaque1;
+ u32 __opaque2;
+ u32 __opaque3;
+ u32 __opaque4;
+ struct l5cm_syn_cookie_comp __opaque5;
+#if defined(__BIG_ENDIAN)
+ u16 rsrv2;
+ u8 rsrv;
+ struct l5cm_port_listener_data __opaque6;
+#elif defined(__LITTLE_ENDIAN)
+ struct l5cm_port_listener_data __opaque6;
+ u8 rsrv;
+ u16 rsrv2;
+#endif
+};
+
+
+/*
+ * l5cm slow path element
+ */
+struct l5cm_packet_size {
+ u32 size;
+ u32 rsrv;
+};
+
+
+/*
+ * The final-ack union structure in PCS entry after final ack arrived
+ */
+struct l5cm_pcse_ack {
+ struct l5cm_xstorm_conn_buffer tx_socket_params;
+ struct l5cm_opaque_buf opaque_buf;
+ struct l5cm_tstorm_conn_buffer rx_socket_params;
+};
+
+
+/*
+ * The syn union structure in PCS entry after syn arrived
+ */
+struct l5cm_pcse_syn {
+ struct l5cm_opaque_buf opaque_buf;
+ u32 rsrv[12];
+};
+
+
+/*
+ * pcs entry data for passive connections
+ */
+struct l5cm_pcs_attributes {
+#if defined(__BIG_ENDIAN)
+ u16 pcs_id;
+ u8 status;
+ u8 flags;
+#define L5CM_PCS_ATTRIBUTES_NET_FILTER (0x1<<0)
+#define L5CM_PCS_ATTRIBUTES_NET_FILTER_SHIFT 0
+#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH (0x1<<1)
+#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH_SHIFT 1
+#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT (0x1<<2)
+#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT_SHIFT 2
+#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT (0x1<<3)
+#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT_SHIFT 3
+#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC (0x1<<4)
+#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC_SHIFT 4
+#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD (0x1<<5)
+#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD_SHIFT 5
+#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET (0x1<<6)
+#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET_SHIFT 6
+#define L5CM_PCS_ATTRIBUTES_RSRV (0x1<<7)
+#define L5CM_PCS_ATTRIBUTES_RSRV_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+ u8 flags;
+#define L5CM_PCS_ATTRIBUTES_NET_FILTER (0x1<<0)
+#define L5CM_PCS_ATTRIBUTES_NET_FILTER_SHIFT 0
+#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH (0x1<<1)
+#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH_SHIFT 1
+#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT (0x1<<2)
+#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT_SHIFT 2
+#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT (0x1<<3)
+#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT_SHIFT 3
+#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC (0x1<<4)
+#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC_SHIFT 4
+#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD (0x1<<5)
+#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD_SHIFT 5
+#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET (0x1<<6)
+#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET_SHIFT 6
+#define L5CM_PCS_ATTRIBUTES_RSRV (0x1<<7)
+#define L5CM_PCS_ATTRIBUTES_RSRV_SHIFT 7
+ u8 status;
+ u16 pcs_id;
+#endif
+};
+
+
+union l5cm_seg_params {
+ struct l5cm_pcse_syn syn_seg_params;
+ struct l5cm_pcse_ack ack_seg_params;
+};
+
+/*
+ * pcs entry data for passive connections
+ */
+struct l5cm_pcs_hdr {
+ struct l5cm_hash_input_string hash_input_string;
+ struct l5cm_conn_addr_params conn_addr_buf;
+ u32 cid;
+ u32 hash_result;
+ union l5cm_seg_params seg_params;
+ struct l5cm_pcs_attributes att;
+#if defined(__BIG_ENDIAN)
+ u16 rsrv;
+ u16 rx_seg_size;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_seg_size;
+ u16 rsrv;
+#endif
+};
+
+/*
+ * pcs entry for passive connections
+ */
+struct l5cm_pcs_entry {
+ struct l5cm_pcs_hdr hdr;
+ u8 rx_segment[1516];
+};
+
+
+
+
+/*
+ * l5cm connection parameters
+ */
+union l5cm_reduce_param_union {
+ u32 opaque1;
+ u32 opaque2;
+};
+
+/*
+ * l5cm connection parameters
+ */
+struct l5cm_reduce_conn {
+ union l5cm_reduce_param_union opaque1;
+ u32 opaque2;
+};
+
+/*
+ * l5cm slow path element
+ */
+union l5cm_specific_data {
+ u8 protocol_data[8];
+ struct regpair phy_address;
+ struct l5cm_packet_size packet_size;
+ struct l5cm_reduce_conn reduced_conn;
+};
+
+/*
+ * l5 slow path element
+ */
+struct l5cm_spe {
+ struct spe_hdr hdr;
+ union l5cm_specific_data data;
+};
+
+
+
+
+/*
+ * Termination variables
+ */
+struct l5cm_term_vars {
+ u8 BitMap;
+#define L5CM_TERM_VARS_TCP_STATE (0xF<<0)
+#define L5CM_TERM_VARS_TCP_STATE_SHIFT 0
+#define L5CM_TERM_VARS_FIN_RECEIVED_SBIT (0x1<<4)
+#define L5CM_TERM_VARS_FIN_RECEIVED_SBIT_SHIFT 4
+#define L5CM_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT (0x1<<5)
+#define L5CM_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT_SHIFT 5
+#define L5CM_TERM_VARS_TERM_ON_CHIP (0x1<<6)
+#define L5CM_TERM_VARS_TERM_ON_CHIP_SHIFT 6
+#define L5CM_TERM_VARS_RSRV (0x1<<7)
+#define L5CM_TERM_VARS_RSRV_SHIFT 7
+};
+
+
+
+
+/*
+ * Tstorm Tcp flags
+ */
+struct tstorm_l5cm_tcp_flags {
+ u16 flags;
+#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0)
+#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0
+#define TSTORM_L5CM_TCP_FLAGS_RSRV0 (0x1<<12)
+#define TSTORM_L5CM_TCP_FLAGS_RSRV0_SHIFT 12
+#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13)
+#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13
+#define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14)
+#define TSTORM_L5CM_TCP_FLAGS_RSRV1_SHIFT 14
+};
+
+
+/*
+ * Xstorm Tcp flags
+ */
+struct xstorm_l5cm_tcp_flags {
+ u8 flags;
+#define XSTORM_L5CM_TCP_FLAGS_ENC_ENABLED (0x1<<0)
+#define XSTORM_L5CM_TCP_FLAGS_ENC_ENABLED_SHIFT 0
+#define XSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<1)
+#define XSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 1
+#define XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN (0x1<<2)
+#define XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN_SHIFT 2
+#define XSTORM_L5CM_TCP_FLAGS_RSRV (0x1F<<3)
+#define XSTORM_L5CM_TCP_FLAGS_RSRV_SHIFT 3
+};
+
+
+
+/*
+ * Out-of-order states
+ */
+enum tcp_ooo_event {
+ TCP_EVENT_ADD_PEN = 0,
+ TCP_EVENT_ADD_NEW_ISLE = 1,
+ TCP_EVENT_ADD_ISLE_RIGHT = 2,
+ TCP_EVENT_ADD_ISLE_LEFT = 3,
+ TCP_EVENT_JOIN = 4,
+ TCP_EVENT_NOP = 5,
+ MAX_TCP_OOO_EVENT
+};
+
+
+/*
+ * OOO support modes
+ */
+enum tcp_tstorm_ooo {
+ TCP_TSTORM_OOO_DROP_AND_PROC_ACK = 0,
+ TCP_TSTORM_OOO_SEND_PURE_ACK = 1,
+ TCP_TSTORM_OOO_SUPPORTED = 2,
+ MAX_TCP_TSTORM_OOO
+};
+
+
+
+
+
+
+
+
+
+#endif /* __5710_HSI_CNIC_LE__ */
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
new file mode 100644
index 000000000000..79443e0dbf96
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -0,0 +1,340 @@
+/* cnic_if.h: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+
+#ifndef CNIC_IF_H
+#define CNIC_IF_H
+
+#define CNIC_MODULE_VERSION "2.5.7"
+#define CNIC_MODULE_RELDATE "July 20, 2011"
+
+#define CNIC_ULP_RDMA 0
+#define CNIC_ULP_ISCSI 1
+#define CNIC_ULP_FCOE 2
+#define CNIC_ULP_L4 3
+#define MAX_CNIC_ULP_TYPE_EXT 3
+#define MAX_CNIC_ULP_TYPE 4
+
+struct kwqe {
+ u32 kwqe_op_flag;
+
+#define KWQE_QID_SHIFT 8
+#define KWQE_OPCODE_MASK 0x00ff0000
+#define KWQE_OPCODE_SHIFT 16
+#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
+#define KWQE_LAYER_MASK 0x70000000
+#define KWQE_LAYER_SHIFT 28
+#define KWQE_FLAGS_LAYER_MASK_L2 (2<<28)
+#define KWQE_FLAGS_LAYER_MASK_L3 (3<<28)
+#define KWQE_FLAGS_LAYER_MASK_L4 (4<<28)
+#define KWQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
+#define KWQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
+#define KWQE_FLAGS_LAYER_MASK_L5_FCOE (7<<28)
+
+ u32 kwqe_info0;
+ u32 kwqe_info1;
+ u32 kwqe_info2;
+ u32 kwqe_info3;
+ u32 kwqe_info4;
+ u32 kwqe_info5;
+ u32 kwqe_info6;
+};
+
+struct kwqe_16 {
+ u32 kwqe_info0;
+ u32 kwqe_info1;
+ u32 kwqe_info2;
+ u32 kwqe_info3;
+};
+
+struct kcqe {
+ u32 kcqe_info0;
+ u32 kcqe_info1;
+ u32 kcqe_info2;
+ u32 kcqe_info3;
+ u32 kcqe_info4;
+ u32 kcqe_info5;
+ u32 kcqe_info6;
+ u32 kcqe_op_flag;
+ #define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */
+ #define KCQE_FLAGS_LAYER_MASK (0x7<<28)
+ #define KCQE_FLAGS_LAYER_MASK_MISC (0<<28)
+ #define KCQE_FLAGS_LAYER_MASK_L2 (2<<28)
+ #define KCQE_FLAGS_LAYER_MASK_L3 (3<<28)
+ #define KCQE_FLAGS_LAYER_MASK_L4 (4<<28)
+ #define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
+ #define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
+ #define KCQE_FLAGS_LAYER_MASK_L5_FCOE (7<<28)
+ #define KCQE_FLAGS_NEXT (1<<31)
+ #define KCQE_FLAGS_OPCODE_MASK (0xff<<16)
+ #define KCQE_FLAGS_OPCODE_SHIFT (16)
+ #define KCQE_OPCODE(op) \
+ (((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT)
+};
+
+#define MAX_CNIC_CTL_DATA 64
+#define MAX_DRV_CTL_DATA 64
+
+#define CNIC_CTL_STOP_CMD 1
+#define CNIC_CTL_START_CMD 2
+#define CNIC_CTL_COMPLETION_CMD 3
+#define CNIC_CTL_STOP_ISCSI_CMD 4
+
+#define DRV_CTL_IO_WR_CMD 0x101
+#define DRV_CTL_IO_RD_CMD 0x102
+#define DRV_CTL_CTX_WR_CMD 0x103
+#define DRV_CTL_CTXTBL_WR_CMD 0x104
+#define DRV_CTL_RET_L5_SPQ_CREDIT_CMD 0x105
+#define DRV_CTL_START_L2_CMD 0x106
+#define DRV_CTL_STOP_L2_CMD 0x107
+#define DRV_CTL_RET_L2_SPQ_CREDIT_CMD 0x10c
+#define DRV_CTL_ISCSI_STOPPED_CMD 0x10d
+
+struct cnic_ctl_completion {
+ u32 cid;
+ u8 opcode;
+ u8 error;
+};
+
+struct cnic_ctl_info {
+ int cmd;
+ union {
+ struct cnic_ctl_completion comp;
+ char bytes[MAX_CNIC_CTL_DATA];
+ } data;
+};
+
+struct drv_ctl_spq_credit {
+ u32 credit_count;
+};
+
+struct drv_ctl_io {
+ u32 cid_addr;
+ u32 offset;
+ u32 data;
+ dma_addr_t dma_addr;
+};
+
+struct drv_ctl_l2_ring {
+ u32 client_id;
+ u32 cid;
+};
+
+struct drv_ctl_info {
+ int cmd;
+ union {
+ struct drv_ctl_spq_credit credit;
+ struct drv_ctl_io io;
+ struct drv_ctl_l2_ring ring;
+ char bytes[MAX_DRV_CTL_DATA];
+ } data;
+};
+
+struct cnic_ops {
+ struct module *cnic_owner;
+ /* Calls to these functions are protected by RCU. When
+ * unregistering, we wait for any calls to complete before
+ * continuing.
+ */
+ int (*cnic_handler)(void *, void *);
+ int (*cnic_ctl)(void *, struct cnic_ctl_info *);
+};
+
+#define MAX_CNIC_VEC 8
+
+struct cnic_irq {
+ unsigned int vector;
+ void *status_blk;
+ u32 status_blk_num;
+ u32 status_blk_num2;
+ u32 irq_flags;
+#define CNIC_IRQ_FL_MSIX 0x00000001
+};
+
+struct cnic_eth_dev {
+ struct module *drv_owner;
+ u32 drv_state;
+#define CNIC_DRV_STATE_REGD 0x00000001
+#define CNIC_DRV_STATE_USING_MSIX 0x00000002
+#define CNIC_DRV_STATE_NO_ISCSI_OOO 0x00000004
+#define CNIC_DRV_STATE_NO_ISCSI 0x00000008
+#define CNIC_DRV_STATE_NO_FCOE 0x00000010
+ u32 chip_id;
+ u32 max_kwqe_pending;
+ struct pci_dev *pdev;
+ void __iomem *io_base;
+ void __iomem *io_base2;
+ const void *iro_arr;
+
+ u32 ctx_tbl_offset;
+ u32 ctx_tbl_len;
+ int ctx_blk_size;
+ u32 starting_cid;
+ u32 max_iscsi_conn;
+ u32 max_fcoe_conn;
+ u32 max_rdma_conn;
+ u32 fcoe_init_cid;
+ u32 fcoe_wwn_port_name_hi;
+ u32 fcoe_wwn_port_name_lo;
+ u32 fcoe_wwn_node_name_hi;
+ u32 fcoe_wwn_node_name_lo;
+
+ u16 iscsi_l2_client_id;
+ u16 iscsi_l2_cid;
+ u8 iscsi_mac[ETH_ALEN];
+
+ int num_irq;
+ struct cnic_irq irq_arr[MAX_CNIC_VEC];
+ int (*drv_register_cnic)(struct net_device *,
+ struct cnic_ops *, void *);
+ int (*drv_unregister_cnic)(struct net_device *);
+ int (*drv_submit_kwqes_32)(struct net_device *,
+ struct kwqe *[], u32);
+ int (*drv_submit_kwqes_16)(struct net_device *,
+ struct kwqe_16 *[], u32);
+ int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
+ unsigned long reserved1[2];
+};
+
+struct cnic_sockaddr {
+ union {
+ struct sockaddr_in v4;
+ struct sockaddr_in6 v6;
+ } local;
+ union {
+ struct sockaddr_in v4;
+ struct sockaddr_in6 v6;
+ } remote;
+};
+
+struct cnic_sock {
+ struct cnic_dev *dev;
+ void *context;
+ u32 src_ip[4];
+ u32 dst_ip[4];
+ u16 src_port;
+ u16 dst_port;
+ u16 vlan_id;
+ unsigned char old_ha[6];
+ unsigned char ha[6];
+ u32 mtu;
+ u32 cid;
+ u32 l5_cid;
+ u32 pg_cid;
+ int ulp_type;
+
+ u32 ka_timeout;
+ u32 ka_interval;
+ u8 ka_max_probe_count;
+ u8 tos;
+ u8 ttl;
+ u8 snd_seq_scale;
+ u32 rcv_buf;
+ u32 snd_buf;
+ u32 seed;
+
+ unsigned long tcp_flags;
+#define SK_TCP_NO_DELAY_ACK 0x1
+#define SK_TCP_KEEP_ALIVE 0x2
+#define SK_TCP_NAGLE 0x4
+#define SK_TCP_TIMESTAMP 0x8
+#define SK_TCP_SACK 0x10
+#define SK_TCP_SEG_SCALING 0x20
+ unsigned long flags;
+#define SK_F_INUSE 0
+#define SK_F_OFFLD_COMPLETE 1
+#define SK_F_OFFLD_SCHED 2
+#define SK_F_PG_OFFLD_COMPLETE 3
+#define SK_F_CONNECT_START 4
+#define SK_F_IPV6 5
+#define SK_F_CLOSING 7
+
+ atomic_t ref_count;
+ u32 state;
+ struct kwqe kwqe1;
+ struct kwqe kwqe2;
+ struct kwqe kwqe3;
+};
+
+struct cnic_dev {
+ struct net_device *netdev;
+ struct pci_dev *pcidev;
+ void __iomem *regview;
+ struct list_head list;
+
+ int (*register_device)(struct cnic_dev *dev, int ulp_type,
+ void *ulp_ctx);
+ int (*unregister_device)(struct cnic_dev *dev, int ulp_type);
+ int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[],
+ u32 num_wqes);
+ int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[],
+ u32 num_wqes);
+
+ int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **,
+ void *);
+ int (*cm_destroy)(struct cnic_sock *);
+ int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *);
+ int (*cm_abort)(struct cnic_sock *);
+ int (*cm_close)(struct cnic_sock *);
+ struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
+ int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
+ char *data, u16 data_size);
+ unsigned long flags;
+#define CNIC_F_CNIC_UP 1
+#define CNIC_F_BNX2_CLASS 3
+#define CNIC_F_BNX2X_CLASS 4
+ atomic_t ref_count;
+ u8 mac_addr[6];
+
+ int max_iscsi_conn;
+ int max_fcoe_conn;
+ int max_rdma_conn;
+
+ void *cnic_priv;
+};
+
+#define CNIC_WR(dev, off, val) writel(val, dev->regview + off)
+#define CNIC_WR16(dev, off, val) writew(val, dev->regview + off)
+#define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off)
+#define CNIC_RD(dev, off) readl(dev->regview + off)
+#define CNIC_RD16(dev, off) readw(dev->regview + off)
+
+struct cnic_ulp_ops {
+ /* Calls to these functions are protected by RCU. When
+ * unregistering, we wait for any calls to complete before
+ * continuing.
+ */
+
+ void (*cnic_init)(struct cnic_dev *dev);
+ void (*cnic_exit)(struct cnic_dev *dev);
+ void (*cnic_start)(void *ulp_ctx);
+ void (*cnic_stop)(void *ulp_ctx);
+ void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
+ u32 num_cqes);
+ void (*indicate_netevent)(void *ulp_ctx, unsigned long event, u16 vid);
+ void (*cm_connect_complete)(struct cnic_sock *);
+ void (*cm_close_complete)(struct cnic_sock *);
+ void (*cm_abort_complete)(struct cnic_sock *);
+ void (*cm_remote_close)(struct cnic_sock *);
+ void (*cm_remote_abort)(struct cnic_sock *);
+ int (*iscsi_nl_send_msg)(void *ulp_ctx, u32 msg_type,
+ char *data, u16 data_size);
+ struct module *owner;
+ atomic_t ref_count;
+};
+
+extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
+
+extern int cnic_unregister_driver(int ulp_type);
+
+extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev);
+extern struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
new file mode 100644
index 000000000000..0a1d7f279fc8
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -0,0 +1,2690 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004 Broadcom Corporation
+ * Copyright (c) 2006, 2007 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ *
+ * This driver is designed for the Broadcom SiByte SOC built-in
+ * Ethernet controllers. Written by Mitch Lichtenberg at Broadcom Corp.
+ *
+ * Updated to the driver model and the PHY abstraction layer
+ * by Maciej W. Rozycki.
+ */
+
+#include <linux/bug.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/prefetch.h>
+
+#include <asm/cache.h>
+#include <asm/io.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+
+/* Operational parameters that usually are not changed. */
+
+#define CONFIG_SBMAC_COALESCE
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2*HZ)
+
+
+MODULE_AUTHOR("Mitch Lichtenberg (Broadcom Corp.)");
+MODULE_DESCRIPTION("Broadcom SiByte SOC GB Ethernet driver");
+
+/* A few user-configurable values which may be modified when a driver
+ module is loaded. */
+
+/* 1 normal messages, 0 quiet .. 7 verbose. */
+static int debug = 1;
+module_param(debug, int, S_IRUGO);
+MODULE_PARM_DESC(debug, "Debug messages");
+
+#ifdef CONFIG_SBMAC_COALESCE
+static int int_pktcnt_tx = 255;
+module_param(int_pktcnt_tx, int, S_IRUGO);
+MODULE_PARM_DESC(int_pktcnt_tx, "TX packet count");
+
+static int int_timeout_tx = 255;
+module_param(int_timeout_tx, int, S_IRUGO);
+MODULE_PARM_DESC(int_timeout_tx, "TX timeout value");
+
+static int int_pktcnt_rx = 64;
+module_param(int_pktcnt_rx, int, S_IRUGO);
+MODULE_PARM_DESC(int_pktcnt_rx, "RX packet count");
+
+static int int_timeout_rx = 64;
+module_param(int_timeout_rx, int, S_IRUGO);
+MODULE_PARM_DESC(int_timeout_rx, "RX timeout value");
+#endif
+
+#include <asm/sibyte/board.h>
+#include <asm/sibyte/sb1250.h>
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/bcm1480_int.h>
+#define R_MAC_DMA_OODPKTLOST_RX R_MAC_DMA_OODPKTLOST
+#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_int.h>
+#else
+#error invalid SiByte MAC configuration
+#endif
+#include <asm/sibyte/sb1250_scd.h>
+#include <asm/sibyte/sb1250_mac.h>
+#include <asm/sibyte/sb1250_dma.h>
+
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#define UNIT_INT(n) (K_BCM1480_INT_MAC_0 + ((n) * 2))
+#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
+#define UNIT_INT(n) (K_INT_MAC_0 + (n))
+#else
+#error invalid SiByte MAC configuration
+#endif
+
+#ifdef K_INT_PHY
+#define SBMAC_PHY_INT K_INT_PHY
+#else
+#define SBMAC_PHY_INT PHY_POLL
+#endif
+
+/**********************************************************************
+ * Simple types
+ ********************************************************************* */
+
+enum sbmac_speed {
+ sbmac_speed_none = 0,
+ sbmac_speed_10 = SPEED_10,
+ sbmac_speed_100 = SPEED_100,
+ sbmac_speed_1000 = SPEED_1000,
+};
+
+enum sbmac_duplex {
+ sbmac_duplex_none = -1,
+ sbmac_duplex_half = DUPLEX_HALF,
+ sbmac_duplex_full = DUPLEX_FULL,
+};
+
+enum sbmac_fc {
+ sbmac_fc_none,
+ sbmac_fc_disabled,
+ sbmac_fc_frame,
+ sbmac_fc_collision,
+ sbmac_fc_carrier,
+};
+
+enum sbmac_state {
+ sbmac_state_uninit,
+ sbmac_state_off,
+ sbmac_state_on,
+ sbmac_state_broken,
+};
+
+
+/**********************************************************************
+ * Macros
+ ********************************************************************* */
+
+
+#define SBDMA_NEXTBUF(d,f) ((((d)->f+1) == (d)->sbdma_dscrtable_end) ? \
+ (d)->sbdma_dscrtable : (d)->f+1)
+
+
+#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES)
+
+#define SBMAC_MAX_TXDESCR 256
+#define SBMAC_MAX_RXDESCR 256
+
+#define ETHER_ADDR_LEN 6
+#define ENET_PACKET_SIZE 1518
+/*#define ENET_PACKET_SIZE 9216 */
+
+/**********************************************************************
+ * DMA Descriptor structure
+ ********************************************************************* */
+
+struct sbdmadscr {
+ uint64_t dscr_a;
+ uint64_t dscr_b;
+};
+
+/**********************************************************************
+ * DMA Controller structure
+ ********************************************************************* */
+
+struct sbmacdma {
+
+ /*
+ * This stuff is used to identify the channel and the registers
+ * associated with it.
+ */
+ struct sbmac_softc *sbdma_eth; /* back pointer to associated
+ MAC */
+ int sbdma_channel; /* channel number */
+ int sbdma_txdir; /* direction (1=transmit) */
+ int sbdma_maxdescr; /* total # of descriptors
+ in ring */
+#ifdef CONFIG_SBMAC_COALESCE
+ int sbdma_int_pktcnt;
+ /* # descriptors rx/tx
+ before interrupt */
+ int sbdma_int_timeout;
+ /* # usec rx/tx interrupt */
+#endif
+ void __iomem *sbdma_config0; /* DMA config register 0 */
+ void __iomem *sbdma_config1; /* DMA config register 1 */
+ void __iomem *sbdma_dscrbase;
+ /* descriptor base address */
+ void __iomem *sbdma_dscrcnt; /* descriptor count register */
+ void __iomem *sbdma_curdscr; /* current descriptor
+ address */
+ void __iomem *sbdma_oodpktlost;
+ /* pkt drop (rx only) */
+
+ /*
+ * This stuff is for maintenance of the ring
+ */
+ void *sbdma_dscrtable_unaligned;
+ struct sbdmadscr *sbdma_dscrtable;
+ /* base of descriptor table */
+ struct sbdmadscr *sbdma_dscrtable_end;
+ /* end of descriptor table */
+ struct sk_buff **sbdma_ctxtable;
+ /* context table, one
+ per descr */
+ dma_addr_t sbdma_dscrtable_phys;
+ /* and also the phys addr */
+ struct sbdmadscr *sbdma_addptr; /* next dscr for sw to add */
+ struct sbdmadscr *sbdma_remptr; /* next dscr for sw
+ to remove */
+};
+
+
+/**********************************************************************
+ * Ethernet softc structure
+ ********************************************************************* */
+
+struct sbmac_softc {
+
+ /*
+ * Linux-specific things
+ */
+ struct net_device *sbm_dev; /* pointer to linux device */
+ struct napi_struct napi;
+ struct phy_device *phy_dev; /* the associated PHY device */
+ struct mii_bus *mii_bus; /* the MII bus */
+ int phy_irq[PHY_MAX_ADDR];
+ spinlock_t sbm_lock; /* spin lock */
+ int sbm_devflags; /* current device flags */
+
+ /*
+ * Controller-specific things
+ */
+ void __iomem *sbm_base; /* MAC's base address */
+ enum sbmac_state sbm_state; /* current state */
+
+ void __iomem *sbm_macenable; /* MAC Enable Register */
+ void __iomem *sbm_maccfg; /* MAC Config Register */
+ void __iomem *sbm_fifocfg; /* FIFO Config Register */
+ void __iomem *sbm_framecfg; /* Frame Config Register */
+ void __iomem *sbm_rxfilter; /* Receive Filter Register */
+ void __iomem *sbm_isr; /* Interrupt Status Register */
+ void __iomem *sbm_imr; /* Interrupt Mask Register */
+ void __iomem *sbm_mdio; /* MDIO Register */
+
+ enum sbmac_speed sbm_speed; /* current speed */
+ enum sbmac_duplex sbm_duplex; /* current duplex */
+ enum sbmac_fc sbm_fc; /* cur. flow control setting */
+ int sbm_pause; /* current pause setting */
+ int sbm_link; /* current link state */
+
+ unsigned char sbm_hwaddr[ETHER_ADDR_LEN];
+
+ struct sbmacdma sbm_txdma; /* only channel 0 for now */
+ struct sbmacdma sbm_rxdma;
+ int rx_hw_checksum;
+ int sbe_idx;
+};
+
+
+/**********************************************************************
+ * Externs
+ ********************************************************************* */
+
+/**********************************************************************
+ * Prototypes
+ ********************************************************************* */
+
+static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan,
+ int txrx, int maxdescr);
+static void sbdma_channel_start(struct sbmacdma *d, int rxtx);
+static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
+ struct sk_buff *m);
+static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m);
+static void sbdma_emptyring(struct sbmacdma *d);
+static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d);
+static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
+ int work_to_do, int poll);
+static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
+ int poll);
+static int sbmac_initctx(struct sbmac_softc *s);
+static void sbmac_channel_start(struct sbmac_softc *s);
+static void sbmac_channel_stop(struct sbmac_softc *s);
+static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *,
+ enum sbmac_state);
+static void sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff);
+static uint64_t sbmac_addr2reg(unsigned char *ptr);
+static irqreturn_t sbmac_intr(int irq, void *dev_instance);
+static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev);
+static void sbmac_setmulti(struct sbmac_softc *sc);
+static int sbmac_init(struct platform_device *pldev, long long base);
+static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed);
+static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex,
+ enum sbmac_fc fc);
+
+static int sbmac_open(struct net_device *dev);
+static void sbmac_tx_timeout (struct net_device *dev);
+static void sbmac_set_rx_mode(struct net_device *dev);
+static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int sbmac_close(struct net_device *dev);
+static int sbmac_poll(struct napi_struct *napi, int budget);
+
+static void sbmac_mii_poll(struct net_device *dev);
+static int sbmac_mii_probe(struct net_device *dev);
+
+static void sbmac_mii_sync(void __iomem *sbm_mdio);
+static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data,
+ int bitcnt);
+static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx);
+static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
+ u16 val);
+
+
+/**********************************************************************
+ * Globals
+ ********************************************************************* */
+
+static char sbmac_string[] = "sb1250-mac";
+
+static char sbmac_mdio_string[] = "sb1250-mac-mdio";
+
+
+/**********************************************************************
+ * MDIO constants
+ ********************************************************************* */
+
+#define MII_COMMAND_START 0x01
+#define MII_COMMAND_READ 0x02
+#define MII_COMMAND_WRITE 0x01
+#define MII_COMMAND_ACK 0x02
+
+#define M_MAC_MDIO_DIR_OUTPUT 0 /* for clarity */
+
+#define ENABLE 1
+#define DISABLE 0
+
+/**********************************************************************
+ * SBMAC_MII_SYNC(sbm_mdio)
+ *
+ * Synchronize with the MII - send a pattern of bits to the MII
+ * that will guarantee that it is ready to accept a command.
+ *
+ * Input parameters:
+ * sbm_mdio - address of the MAC's MDIO register
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbmac_mii_sync(void __iomem *sbm_mdio)
+{
+ int cnt;
+ uint64_t bits;
+ int mac_mdio_genc;
+
+ mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
+
+ bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT;
+
+ __raw_writeq(bits | mac_mdio_genc, sbm_mdio);
+
+ for (cnt = 0; cnt < 32; cnt++) {
+ __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio);
+ __raw_writeq(bits | mac_mdio_genc, sbm_mdio);
+ }
+}
+
+/**********************************************************************
+ * SBMAC_MII_SENDDATA(sbm_mdio, data, bitcnt)
+ *
+ * Send some bits to the MII. The bits to be sent are right-
+ * justified in the 'data' parameter.
+ *
+ * Input parameters:
+ * sbm_mdio - address of the MAC's MDIO register
+ * data - data to send
+ * bitcnt - number of bits to send
+ ********************************************************************* */
+
+static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data,
+ int bitcnt)
+{
+ int i;
+ uint64_t bits;
+ unsigned int curmask;
+ int mac_mdio_genc;
+
+ mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
+
+ bits = M_MAC_MDIO_DIR_OUTPUT;
+ __raw_writeq(bits | mac_mdio_genc, sbm_mdio);
+
+ curmask = 1 << (bitcnt - 1);
+
+ for (i = 0; i < bitcnt; i++) {
+ if (data & curmask)
+ bits |= M_MAC_MDIO_OUT;
+ else bits &= ~M_MAC_MDIO_OUT;
+ __raw_writeq(bits | mac_mdio_genc, sbm_mdio);
+ __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio);
+ __raw_writeq(bits | mac_mdio_genc, sbm_mdio);
+ curmask >>= 1;
+ }
+}
+
+
+
+/**********************************************************************
+ * SBMAC_MII_READ(bus, phyaddr, regidx)
+ * Read a PHY register.
+ *
+ * Input parameters:
+ * bus - MDIO bus handle
+ * phyaddr - PHY's address
+ * regnum - index of register to read
+ *
+ * Return value:
+ * value read, or 0xffff if an error occurred.
+ ********************************************************************* */
+
+static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
+{
+ struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv;
+ void __iomem *sbm_mdio = sc->sbm_mdio;
+ int idx;
+ int error;
+ int regval;
+ int mac_mdio_genc;
+
+ /*
+ * Synchronize ourselves so that the PHY knows the next
+ * thing coming down is a command
+ */
+ sbmac_mii_sync(sbm_mdio);
+
+ /*
+ * Send the data to the PHY. The sequence is
+ * a "start" command (2 bits)
+ * a "read" command (2 bits)
+ * the PHY addr (5 bits)
+ * the register index (5 bits)
+ */
+ sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2);
+ sbmac_mii_senddata(sbm_mdio, MII_COMMAND_READ, 2);
+ sbmac_mii_senddata(sbm_mdio, phyaddr, 5);
+ sbmac_mii_senddata(sbm_mdio, regidx, 5);
+
+ mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
+
+ /*
+ * Switch the port around without a clock transition.
+ */
+ __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
+
+ /*
+ * Send out a clock pulse to signal we want the status
+ */
+ __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc,
+ sbm_mdio);
+ __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
+
+ /*
+ * If an error occurred, the PHY will signal '1' back
+ */
+ error = __raw_readq(sbm_mdio) & M_MAC_MDIO_IN;
+
+ /*
+ * Issue an 'idle' clock pulse, but keep the direction
+ * the same.
+ */
+ __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc,
+ sbm_mdio);
+ __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
+
+ regval = 0;
+
+ for (idx = 0; idx < 16; idx++) {
+ regval <<= 1;
+
+ if (error == 0) {
+ if (__raw_readq(sbm_mdio) & M_MAC_MDIO_IN)
+ regval |= 1;
+ }
+
+ __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc,
+ sbm_mdio);
+ __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
+ }
+
+ /* Switch back to output */
+ __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio);
+
+ if (error == 0)
+ return regval;
+ return 0xffff;
+}
+
+
+/**********************************************************************
+ * SBMAC_MII_WRITE(bus, phyaddr, regidx, regval)
+ *
+ * Write a value to a PHY register.
+ *
+ * Input parameters:
+ * bus - MDIO bus handle
+ * phyaddr - PHY to use
+ * regidx - register within the PHY
+ * regval - data to write to register
+ *
+ * Return value:
+ * 0 for success
+ ********************************************************************* */
+
+static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
+ u16 regval)
+{
+ struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv;
+ void __iomem *sbm_mdio = sc->sbm_mdio;
+ int mac_mdio_genc;
+
+ sbmac_mii_sync(sbm_mdio);
+
+ sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2);
+ sbmac_mii_senddata(sbm_mdio, MII_COMMAND_WRITE, 2);
+ sbmac_mii_senddata(sbm_mdio, phyaddr, 5);
+ sbmac_mii_senddata(sbm_mdio, regidx, 5);
+ sbmac_mii_senddata(sbm_mdio, MII_COMMAND_ACK, 2);
+ sbmac_mii_senddata(sbm_mdio, regval, 16);
+
+ mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
+
+ __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio);
+
+ return 0;
+}
+
+
+
+/**********************************************************************
+ * SBDMA_INITCTX(d,s,chan,txrx,maxdescr)
+ *
+ * Initialize a DMA channel context. Since there are potentially
+ * eight DMA channels per MAC, it's nice to do this in a standard
+ * way.
+ *
+ * Input parameters:
+ * d - struct sbmacdma (DMA channel context)
+ * s - struct sbmac_softc (pointer to a MAC)
+ * chan - channel number (0..1 right now)
+ * txrx - Identifies DMA_TX or DMA_RX for channel direction
+ * maxdescr - number of descriptors
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan,
+ int txrx, int maxdescr)
+{
+#ifdef CONFIG_SBMAC_COALESCE
+ int int_pktcnt, int_timeout;
+#endif
+
+ /*
+ * Save away interesting stuff in the structure
+ */
+
+ d->sbdma_eth = s;
+ d->sbdma_channel = chan;
+ d->sbdma_txdir = txrx;
+
+#if 0
+ /* RMON clearing */
+ s->sbe_idx =(s->sbm_base - A_MAC_BASE_0)/MAC_SPACING;
+#endif
+
+ __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BYTES);
+ __raw_writeq(0, s->sbm_base + R_MAC_RMON_COLLISIONS);
+ __raw_writeq(0, s->sbm_base + R_MAC_RMON_LATE_COL);
+ __raw_writeq(0, s->sbm_base + R_MAC_RMON_EX_COL);
+ __raw_writeq(0, s->sbm_base + R_MAC_RMON_FCS_ERROR);
+ __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_ABORT);
+ __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BAD);
+ __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_GOOD);
+ __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_RUNT);
+ __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_OVERSIZE);
+ __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BYTES);
+ __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_MCAST);
+ __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BCAST);
+ __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BAD);
+ __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_GOOD);
+ __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_RUNT);
+ __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_OVERSIZE);
+ __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_FCS_ERROR);
+ __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_LENGTH_ERROR);
+ __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_CODE_ERROR);
+ __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_ALIGN_ERROR);
+
+ /*
+ * initialize register pointers
+ */
+
+ d->sbdma_config0 =
+ s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG0);
+ d->sbdma_config1 =
+ s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG1);
+ d->sbdma_dscrbase =
+ s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_BASE);
+ d->sbdma_dscrcnt =
+ s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT);
+ d->sbdma_curdscr =
+ s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR);
+ if (d->sbdma_txdir)
+ d->sbdma_oodpktlost = NULL;
+ else
+ d->sbdma_oodpktlost =
+ s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_OODPKTLOST_RX);
+
+ /*
+ * Allocate memory for the ring
+ */
+
+ d->sbdma_maxdescr = maxdescr;
+
+ d->sbdma_dscrtable_unaligned = kcalloc(d->sbdma_maxdescr + 1,
+ sizeof(*d->sbdma_dscrtable),
+ GFP_KERNEL);
+
+ /*
+ * The descriptor table must be aligned to at least 16 bytes or the
+ * MAC will corrupt it.
+ */
+ d->sbdma_dscrtable = (struct sbdmadscr *)
+ ALIGN((unsigned long)d->sbdma_dscrtable_unaligned,
+ sizeof(*d->sbdma_dscrtable));
+
+ d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr;
+
+ d->sbdma_dscrtable_phys = virt_to_phys(d->sbdma_dscrtable);
+
+ /*
+ * And context table
+ */
+
+ d->sbdma_ctxtable = kcalloc(d->sbdma_maxdescr,
+ sizeof(*d->sbdma_ctxtable), GFP_KERNEL);
+
+#ifdef CONFIG_SBMAC_COALESCE
+ /*
+ * Setup Rx/Tx DMA coalescing defaults
+ */
+
+ int_pktcnt = (txrx == DMA_TX) ? int_pktcnt_tx : int_pktcnt_rx;
+ if ( int_pktcnt ) {
+ d->sbdma_int_pktcnt = int_pktcnt;
+ } else {
+ d->sbdma_int_pktcnt = 1;
+ }
+
+ int_timeout = (txrx == DMA_TX) ? int_timeout_tx : int_timeout_rx;
+ if ( int_timeout ) {
+ d->sbdma_int_timeout = int_timeout;
+ } else {
+ d->sbdma_int_timeout = 0;
+ }
+#endif
+
+}
+
+/**********************************************************************
+ * SBDMA_CHANNEL_START(d)
+ *
+ * Initialize the hardware registers for a DMA channel.
+ *
+ * Input parameters:
+ * d - DMA channel to init (context must be previously init'd
+ * rxtx - DMA_RX or DMA_TX depending on what type of channel
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbdma_channel_start(struct sbmacdma *d, int rxtx)
+{
+ /*
+ * Turn on the DMA channel
+ */
+
+#ifdef CONFIG_SBMAC_COALESCE
+ __raw_writeq(V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) |
+ 0, d->sbdma_config1);
+ __raw_writeq(M_DMA_EOP_INT_EN |
+ V_DMA_RINGSZ(d->sbdma_maxdescr) |
+ V_DMA_INT_PKTCNT(d->sbdma_int_pktcnt) |
+ 0, d->sbdma_config0);
+#else
+ __raw_writeq(0, d->sbdma_config1);
+ __raw_writeq(V_DMA_RINGSZ(d->sbdma_maxdescr) |
+ 0, d->sbdma_config0);
+#endif
+
+ __raw_writeq(d->sbdma_dscrtable_phys, d->sbdma_dscrbase);
+
+ /*
+ * Initialize ring pointers
+ */
+
+ d->sbdma_addptr = d->sbdma_dscrtable;
+ d->sbdma_remptr = d->sbdma_dscrtable;
+}
+
+/**********************************************************************
+ * SBDMA_CHANNEL_STOP(d)
+ *
+ * Initialize the hardware registers for a DMA channel.
+ *
+ * Input parameters:
+ * d - DMA channel to init (context must be previously init'd
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbdma_channel_stop(struct sbmacdma *d)
+{
+ /*
+ * Turn off the DMA channel
+ */
+
+ __raw_writeq(0, d->sbdma_config1);
+
+ __raw_writeq(0, d->sbdma_dscrbase);
+
+ __raw_writeq(0, d->sbdma_config0);
+
+ /*
+ * Zero ring pointers
+ */
+
+ d->sbdma_addptr = NULL;
+ d->sbdma_remptr = NULL;
+}
+
+static inline void sbdma_align_skb(struct sk_buff *skb,
+ unsigned int power2, unsigned int offset)
+{
+ unsigned char *addr = skb->data;
+ unsigned char *newaddr = PTR_ALIGN(addr, power2);
+
+ skb_reserve(skb, newaddr - addr + offset);
+}
+
+
+/**********************************************************************
+ * SBDMA_ADD_RCVBUFFER(d,sb)
+ *
+ * Add a buffer to the specified DMA channel. For receive channels,
+ * this queues a buffer for inbound packets.
+ *
+ * Input parameters:
+ * sc - softc structure
+ * d - DMA channel descriptor
+ * sb - sk_buff to add, or NULL if we should allocate one
+ *
+ * Return value:
+ * 0 if buffer could not be added (ring is full)
+ * 1 if buffer added successfully
+ ********************************************************************* */
+
+
+static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
+ struct sk_buff *sb)
+{
+ struct net_device *dev = sc->sbm_dev;
+ struct sbdmadscr *dsc;
+ struct sbdmadscr *nextdsc;
+ struct sk_buff *sb_new = NULL;
+ int pktsize = ENET_PACKET_SIZE;
+
+ /* get pointer to our current place in the ring */
+
+ dsc = d->sbdma_addptr;
+ nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
+
+ /*
+ * figure out if the ring is full - if the next descriptor
+ * is the same as the one that we're going to remove from
+ * the ring, the ring is full
+ */
+
+ if (nextdsc == d->sbdma_remptr) {
+ return -ENOSPC;
+ }
+
+ /*
+ * Allocate a sk_buff if we don't already have one.
+ * If we do have an sk_buff, reset it so that it's empty.
+ *
+ * Note: sk_buffs don't seem to be guaranteed to have any sort
+ * of alignment when they are allocated. Therefore, allocate enough
+ * extra space to make sure that:
+ *
+ * 1. the data does not start in the middle of a cache line.
+ * 2. The data does not end in the middle of a cache line
+ * 3. The buffer can be aligned such that the IP addresses are
+ * naturally aligned.
+ *
+ * Remember, the SOCs MAC writes whole cache lines at a time,
+ * without reading the old contents first. So, if the sk_buff's
+ * data portion starts in the middle of a cache line, the SOC
+ * DMA will trash the beginning (and ending) portions.
+ */
+
+ if (sb == NULL) {
+ sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE +
+ SMP_CACHE_BYTES * 2 +
+ NET_IP_ALIGN);
+ if (sb_new == NULL) {
+ pr_info("%s: sk_buff allocation failed\n",
+ d->sbdma_eth->sbm_dev->name);
+ return -ENOBUFS;
+ }
+
+ sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN);
+ }
+ else {
+ sb_new = sb;
+ /*
+ * nothing special to reinit buffer, it's already aligned
+ * and sb->data already points to a good place.
+ */
+ }
+
+ /*
+ * fill in the descriptor
+ */
+
+#ifdef CONFIG_SBMAC_COALESCE
+ /*
+ * Do not interrupt per DMA transfer.
+ */
+ dsc->dscr_a = virt_to_phys(sb_new->data) |
+ V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0;
+#else
+ dsc->dscr_a = virt_to_phys(sb_new->data) |
+ V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) |
+ M_DMA_DSCRA_INTERRUPT;
+#endif
+
+ /* receiving: no options */
+ dsc->dscr_b = 0;
+
+ /*
+ * fill in the context
+ */
+
+ d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb_new;
+
+ /*
+ * point at next packet
+ */
+
+ d->sbdma_addptr = nextdsc;
+
+ /*
+ * Give the buffer to the DMA engine.
+ */
+
+ __raw_writeq(1, d->sbdma_dscrcnt);
+
+ return 0; /* we did it */
+}
+
+/**********************************************************************
+ * SBDMA_ADD_TXBUFFER(d,sb)
+ *
+ * Add a transmit buffer to the specified DMA channel, causing a
+ * transmit to start.
+ *
+ * Input parameters:
+ * d - DMA channel descriptor
+ * sb - sk_buff to add
+ *
+ * Return value:
+ * 0 transmit queued successfully
+ * otherwise error code
+ ********************************************************************* */
+
+
+static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *sb)
+{
+ struct sbdmadscr *dsc;
+ struct sbdmadscr *nextdsc;
+ uint64_t phys;
+ uint64_t ncb;
+ int length;
+
+ /* get pointer to our current place in the ring */
+
+ dsc = d->sbdma_addptr;
+ nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
+
+ /*
+ * figure out if the ring is full - if the next descriptor
+ * is the same as the one that we're going to remove from
+ * the ring, the ring is full
+ */
+
+ if (nextdsc == d->sbdma_remptr) {
+ return -ENOSPC;
+ }
+
+ /*
+ * Under Linux, it's not necessary to copy/coalesce buffers
+ * like it is on NetBSD. We think they're all contiguous,
+ * but that may not be true for GBE.
+ */
+
+ length = sb->len;
+
+ /*
+ * fill in the descriptor. Note that the number of cache
+ * blocks in the descriptor is the number of blocks
+ * *spanned*, so we need to add in the offset (if any)
+ * while doing the calculation.
+ */
+
+ phys = virt_to_phys(sb->data);
+ ncb = NUMCACHEBLKS(length+(phys & (SMP_CACHE_BYTES - 1)));
+
+ dsc->dscr_a = phys |
+ V_DMA_DSCRA_A_SIZE(ncb) |
+#ifndef CONFIG_SBMAC_COALESCE
+ M_DMA_DSCRA_INTERRUPT |
+#endif
+ M_DMA_ETHTX_SOP;
+
+ /* transmitting: set outbound options and length */
+
+ dsc->dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
+ V_DMA_DSCRB_PKT_SIZE(length);
+
+ /*
+ * fill in the context
+ */
+
+ d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb;
+
+ /*
+ * point at next packet
+ */
+
+ d->sbdma_addptr = nextdsc;
+
+ /*
+ * Give the buffer to the DMA engine.
+ */
+
+ __raw_writeq(1, d->sbdma_dscrcnt);
+
+ return 0; /* we did it */
+}
+
+
+
+
+/**********************************************************************
+ * SBDMA_EMPTYRING(d)
+ *
+ * Free all allocated sk_buffs on the specified DMA channel;
+ *
+ * Input parameters:
+ * d - DMA channel
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbdma_emptyring(struct sbmacdma *d)
+{
+ int idx;
+ struct sk_buff *sb;
+
+ for (idx = 0; idx < d->sbdma_maxdescr; idx++) {
+ sb = d->sbdma_ctxtable[idx];
+ if (sb) {
+ dev_kfree_skb(sb);
+ d->sbdma_ctxtable[idx] = NULL;
+ }
+ }
+}
+
+
+/**********************************************************************
+ * SBDMA_FILLRING(d)
+ *
+ * Fill the specified DMA channel (must be receive channel)
+ * with sk_buffs
+ *
+ * Input parameters:
+ * sc - softc structure
+ * d - DMA channel
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d)
+{
+ int idx;
+
+ for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) {
+ if (sbdma_add_rcvbuffer(sc, d, NULL) != 0)
+ break;
+ }
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sbmac_netpoll(struct net_device *netdev)
+{
+ struct sbmac_softc *sc = netdev_priv(netdev);
+ int irq = sc->sbm_dev->irq;
+
+ __raw_writeq(0, sc->sbm_imr);
+
+ sbmac_intr(irq, netdev);
+
+#ifdef CONFIG_SBMAC_COALESCE
+ __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
+ ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0),
+ sc->sbm_imr);
+#else
+ __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
+ (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr);
+#endif
+}
+#endif
+
+/**********************************************************************
+ * SBDMA_RX_PROCESS(sc,d,work_to_do,poll)
+ *
+ * Process "completed" receive buffers on the specified DMA channel.
+ *
+ * Input parameters:
+ * sc - softc structure
+ * d - DMA channel context
+ * work_to_do - no. of packets to process before enabling interrupt
+ * again (for NAPI)
+ * poll - 1: using polling (for NAPI)
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
+ int work_to_do, int poll)
+{
+ struct net_device *dev = sc->sbm_dev;
+ int curidx;
+ int hwidx;
+ struct sbdmadscr *dsc;
+ struct sk_buff *sb;
+ int len;
+ int work_done = 0;
+ int dropped = 0;
+
+ prefetch(d);
+
+again:
+ /* Check if the HW dropped any frames */
+ dev->stats.rx_fifo_errors
+ += __raw_readq(sc->sbm_rxdma.sbdma_oodpktlost) & 0xffff;
+ __raw_writeq(0, sc->sbm_rxdma.sbdma_oodpktlost);
+
+ while (work_to_do-- > 0) {
+ /*
+ * figure out where we are (as an index) and where
+ * the hardware is (also as an index)
+ *
+ * This could be done faster if (for example) the
+ * descriptor table was page-aligned and contiguous in
+ * both virtual and physical memory -- you could then
+ * just compare the low-order bits of the virtual address
+ * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
+ */
+
+ dsc = d->sbdma_remptr;
+ curidx = dsc - d->sbdma_dscrtable;
+
+ prefetch(dsc);
+ prefetch(&d->sbdma_ctxtable[curidx]);
+
+ hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
+ d->sbdma_dscrtable_phys) /
+ sizeof(*d->sbdma_dscrtable);
+
+ /*
+ * If they're the same, that means we've processed all
+ * of the descriptors up to (but not including) the one that
+ * the hardware is working on right now.
+ */
+
+ if (curidx == hwidx)
+ goto done;
+
+ /*
+ * Otherwise, get the packet's sk_buff ptr back
+ */
+
+ sb = d->sbdma_ctxtable[curidx];
+ d->sbdma_ctxtable[curidx] = NULL;
+
+ len = (int)G_DMA_DSCRB_PKT_SIZE(dsc->dscr_b) - 4;
+
+ /*
+ * Check packet status. If good, process it.
+ * If not, silently drop it and put it back on the
+ * receive ring.
+ */
+
+ if (likely (!(dsc->dscr_a & M_DMA_ETHRX_BAD))) {
+
+ /*
+ * Add a new buffer to replace the old one. If we fail
+ * to allocate a buffer, we're going to drop this
+ * packet and put it right back on the receive ring.
+ */
+
+ if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) ==
+ -ENOBUFS)) {
+ dev->stats.rx_dropped++;
+ /* Re-add old buffer */
+ sbdma_add_rcvbuffer(sc, d, sb);
+ /* No point in continuing at the moment */
+ printk(KERN_ERR "dropped packet (1)\n");
+ d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
+ goto done;
+ } else {
+ /*
+ * Set length into the packet
+ */
+ skb_put(sb,len);
+
+ /*
+ * Buffer has been replaced on the
+ * receive ring. Pass the buffer to
+ * the kernel
+ */
+ sb->protocol = eth_type_trans(sb,d->sbdma_eth->sbm_dev);
+ /* Check hw IPv4/TCP checksum if supported */
+ if (sc->rx_hw_checksum == ENABLE) {
+ if (!((dsc->dscr_a) & M_DMA_ETHRX_BADIP4CS) &&
+ !((dsc->dscr_a) & M_DMA_ETHRX_BADTCPCS)) {
+ sb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* don't need to set sb->csum */
+ } else {
+ skb_checksum_none_assert(sb);
+ }
+ }
+ prefetch(sb->data);
+ prefetch((const void *)(((char *)sb->data)+32));
+ if (poll)
+ dropped = netif_receive_skb(sb);
+ else
+ dropped = netif_rx(sb);
+
+ if (dropped == NET_RX_DROP) {
+ dev->stats.rx_dropped++;
+ d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
+ goto done;
+ }
+ else {
+ dev->stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+ }
+ }
+ } else {
+ /*
+ * Packet was mangled somehow. Just drop it and
+ * put it back on the receive ring.
+ */
+ dev->stats.rx_errors++;
+ sbdma_add_rcvbuffer(sc, d, sb);
+ }
+
+
+ /*
+ * .. and advance to the next buffer.
+ */
+
+ d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
+ work_done++;
+ }
+ if (!poll) {
+ work_to_do = 32;
+ goto again; /* collect fifo drop statistics again */
+ }
+done:
+ return work_done;
+}
+
+/**********************************************************************
+ * SBDMA_TX_PROCESS(sc,d)
+ *
+ * Process "completed" transmit buffers on the specified DMA channel.
+ * This is normally called within the interrupt service routine.
+ * Note that this isn't really ideal for priority channels, since
+ * it processes all of the packets on a given channel before
+ * returning.
+ *
+ * Input parameters:
+ * sc - softc structure
+ * d - DMA channel context
+ * poll - 1: using polling (for NAPI)
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
+ int poll)
+{
+ struct net_device *dev = sc->sbm_dev;
+ int curidx;
+ int hwidx;
+ struct sbdmadscr *dsc;
+ struct sk_buff *sb;
+ unsigned long flags;
+ int packets_handled = 0;
+
+ spin_lock_irqsave(&(sc->sbm_lock), flags);
+
+ if (d->sbdma_remptr == d->sbdma_addptr)
+ goto end_unlock;
+
+ hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
+ d->sbdma_dscrtable_phys) / sizeof(*d->sbdma_dscrtable);
+
+ for (;;) {
+ /*
+ * figure out where we are (as an index) and where
+ * the hardware is (also as an index)
+ *
+ * This could be done faster if (for example) the
+ * descriptor table was page-aligned and contiguous in
+ * both virtual and physical memory -- you could then
+ * just compare the low-order bits of the virtual address
+ * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
+ */
+
+ curidx = d->sbdma_remptr - d->sbdma_dscrtable;
+
+ /*
+ * If they're the same, that means we've processed all
+ * of the descriptors up to (but not including) the one that
+ * the hardware is working on right now.
+ */
+
+ if (curidx == hwidx)
+ break;
+
+ /*
+ * Otherwise, get the packet's sk_buff ptr back
+ */
+
+ dsc = &(d->sbdma_dscrtable[curidx]);
+ sb = d->sbdma_ctxtable[curidx];
+ d->sbdma_ctxtable[curidx] = NULL;
+
+ /*
+ * Stats
+ */
+
+ dev->stats.tx_bytes += sb->len;
+ dev->stats.tx_packets++;
+
+ /*
+ * for transmits, we just free buffers.
+ */
+
+ dev_kfree_skb_irq(sb);
+
+ /*
+ * .. and advance to the next buffer.
+ */
+
+ d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
+
+ packets_handled++;
+
+ }
+
+ /*
+ * Decide if we should wake up the protocol or not.
+ * Other drivers seem to do this when we reach a low
+ * watermark on the transmit queue.
+ */
+
+ if (packets_handled)
+ netif_wake_queue(d->sbdma_eth->sbm_dev);
+
+end_unlock:
+ spin_unlock_irqrestore(&(sc->sbm_lock), flags);
+
+}
+
+
+
+/**********************************************************************
+ * SBMAC_INITCTX(s)
+ *
+ * Initialize an Ethernet context structure - this is called
+ * once per MAC on the 1250. Memory is allocated here, so don't
+ * call it again from inside the ioctl routines that bring the
+ * interface up/down
+ *
+ * Input parameters:
+ * s - sbmac context structure
+ *
+ * Return value:
+ * 0
+ ********************************************************************* */
+
+static int sbmac_initctx(struct sbmac_softc *s)
+{
+
+ /*
+ * figure out the addresses of some ports
+ */
+
+ s->sbm_macenable = s->sbm_base + R_MAC_ENABLE;
+ s->sbm_maccfg = s->sbm_base + R_MAC_CFG;
+ s->sbm_fifocfg = s->sbm_base + R_MAC_THRSH_CFG;
+ s->sbm_framecfg = s->sbm_base + R_MAC_FRAMECFG;
+ s->sbm_rxfilter = s->sbm_base + R_MAC_ADFILTER_CFG;
+ s->sbm_isr = s->sbm_base + R_MAC_STATUS;
+ s->sbm_imr = s->sbm_base + R_MAC_INT_MASK;
+ s->sbm_mdio = s->sbm_base + R_MAC_MDIO;
+
+ /*
+ * Initialize the DMA channels. Right now, only one per MAC is used
+ * Note: Only do this _once_, as it allocates memory from the kernel!
+ */
+
+ sbdma_initctx(&(s->sbm_txdma),s,0,DMA_TX,SBMAC_MAX_TXDESCR);
+ sbdma_initctx(&(s->sbm_rxdma),s,0,DMA_RX,SBMAC_MAX_RXDESCR);
+
+ /*
+ * initial state is OFF
+ */
+
+ s->sbm_state = sbmac_state_off;
+
+ return 0;
+}
+
+
+static void sbdma_uninitctx(struct sbmacdma *d)
+{
+ if (d->sbdma_dscrtable_unaligned) {
+ kfree(d->sbdma_dscrtable_unaligned);
+ d->sbdma_dscrtable_unaligned = d->sbdma_dscrtable = NULL;
+ }
+
+ if (d->sbdma_ctxtable) {
+ kfree(d->sbdma_ctxtable);
+ d->sbdma_ctxtable = NULL;
+ }
+}
+
+
+static void sbmac_uninitctx(struct sbmac_softc *sc)
+{
+ sbdma_uninitctx(&(sc->sbm_txdma));
+ sbdma_uninitctx(&(sc->sbm_rxdma));
+}
+
+
+/**********************************************************************
+ * SBMAC_CHANNEL_START(s)
+ *
+ * Start packet processing on this MAC.
+ *
+ * Input parameters:
+ * s - sbmac structure
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbmac_channel_start(struct sbmac_softc *s)
+{
+ uint64_t reg;
+ void __iomem *port;
+ uint64_t cfg,fifo,framecfg;
+ int idx, th_value;
+
+ /*
+ * Don't do this if running
+ */
+
+ if (s->sbm_state == sbmac_state_on)
+ return;
+
+ /*
+ * Bring the controller out of reset, but leave it off.
+ */
+
+ __raw_writeq(0, s->sbm_macenable);
+
+ /*
+ * Ignore all received packets
+ */
+
+ __raw_writeq(0, s->sbm_rxfilter);
+
+ /*
+ * Calculate values for various control registers.
+ */
+
+ cfg = M_MAC_RETRY_EN |
+ M_MAC_TX_HOLD_SOP_EN |
+ V_MAC_TX_PAUSE_CNT_16K |
+ M_MAC_AP_STAT_EN |
+ M_MAC_FAST_SYNC |
+ M_MAC_SS_EN |
+ 0;
+
+ /*
+ * Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars
+ * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above
+ * Use a larger RD_THRSH for gigabit
+ */
+ if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2)
+ th_value = 28;
+ else
+ th_value = 64;
+
+ fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */
+ ((s->sbm_speed == sbmac_speed_1000)
+ ? V_MAC_TX_RD_THRSH(th_value) : V_MAC_TX_RD_THRSH(4)) |
+ V_MAC_TX_RL_THRSH(4) |
+ V_MAC_RX_PL_THRSH(4) |
+ V_MAC_RX_RD_THRSH(4) | /* Must be '4' */
+ V_MAC_RX_RL_THRSH(8) |
+ 0;
+
+ framecfg = V_MAC_MIN_FRAMESZ_DEFAULT |
+ V_MAC_MAX_FRAMESZ_DEFAULT |
+ V_MAC_BACKOFF_SEL(1);
+
+ /*
+ * Clear out the hash address map
+ */
+
+ port = s->sbm_base + R_MAC_HASH_BASE;
+ for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
+ __raw_writeq(0, port);
+ port += sizeof(uint64_t);
+ }
+
+ /*
+ * Clear out the exact-match table
+ */
+
+ port = s->sbm_base + R_MAC_ADDR_BASE;
+ for (idx = 0; idx < MAC_ADDR_COUNT; idx++) {
+ __raw_writeq(0, port);
+ port += sizeof(uint64_t);
+ }
+
+ /*
+ * Clear out the DMA Channel mapping table registers
+ */
+
+ port = s->sbm_base + R_MAC_CHUP0_BASE;
+ for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
+ __raw_writeq(0, port);
+ port += sizeof(uint64_t);
+ }
+
+
+ port = s->sbm_base + R_MAC_CHLO0_BASE;
+ for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
+ __raw_writeq(0, port);
+ port += sizeof(uint64_t);
+ }
+
+ /*
+ * Program the hardware address. It goes into the hardware-address
+ * register as well as the first filter register.
+ */
+
+ reg = sbmac_addr2reg(s->sbm_hwaddr);
+
+ port = s->sbm_base + R_MAC_ADDR_BASE;
+ __raw_writeq(reg, port);
+ port = s->sbm_base + R_MAC_ETHERNET_ADDR;
+
+#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
+ /*
+ * Pass1 SOCs do not receive packets addressed to the
+ * destination address in the R_MAC_ETHERNET_ADDR register.
+ * Set the value to zero.
+ */
+ __raw_writeq(0, port);
+#else
+ __raw_writeq(reg, port);
+#endif
+
+ /*
+ * Set the receive filter for no packets, and write values
+ * to the various config registers
+ */
+
+ __raw_writeq(0, s->sbm_rxfilter);
+ __raw_writeq(0, s->sbm_imr);
+ __raw_writeq(framecfg, s->sbm_framecfg);
+ __raw_writeq(fifo, s->sbm_fifocfg);
+ __raw_writeq(cfg, s->sbm_maccfg);
+
+ /*
+ * Initialize DMA channels (rings should be ok now)
+ */
+
+ sbdma_channel_start(&(s->sbm_rxdma), DMA_RX);
+ sbdma_channel_start(&(s->sbm_txdma), DMA_TX);
+
+ /*
+ * Configure the speed, duplex, and flow control
+ */
+
+ sbmac_set_speed(s,s->sbm_speed);
+ sbmac_set_duplex(s,s->sbm_duplex,s->sbm_fc);
+
+ /*
+ * Fill the receive ring
+ */
+
+ sbdma_fillring(s, &(s->sbm_rxdma));
+
+ /*
+ * Turn on the rest of the bits in the enable register
+ */
+
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+ __raw_writeq(M_MAC_RXDMA_EN0 |
+ M_MAC_TXDMA_EN0, s->sbm_macenable);
+#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
+ __raw_writeq(M_MAC_RXDMA_EN0 |
+ M_MAC_TXDMA_EN0 |
+ M_MAC_RX_ENABLE |
+ M_MAC_TX_ENABLE, s->sbm_macenable);
+#else
+#error invalid SiByte MAC configuration
+#endif
+
+#ifdef CONFIG_SBMAC_COALESCE
+ __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
+ ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr);
+#else
+ __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
+ (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr);
+#endif
+
+ /*
+ * Enable receiving unicasts and broadcasts
+ */
+
+ __raw_writeq(M_MAC_UCAST_EN | M_MAC_BCAST_EN, s->sbm_rxfilter);
+
+ /*
+ * we're running now.
+ */
+
+ s->sbm_state = sbmac_state_on;
+
+ /*
+ * Program multicast addresses
+ */
+
+ sbmac_setmulti(s);
+
+ /*
+ * If channel was in promiscuous mode before, turn that on
+ */
+
+ if (s->sbm_devflags & IFF_PROMISC) {
+ sbmac_promiscuous_mode(s,1);
+ }
+
+}
+
+
+/**********************************************************************
+ * SBMAC_CHANNEL_STOP(s)
+ *
+ * Stop packet processing on this MAC.
+ *
+ * Input parameters:
+ * s - sbmac structure
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbmac_channel_stop(struct sbmac_softc *s)
+{
+ /* don't do this if already stopped */
+
+ if (s->sbm_state == sbmac_state_off)
+ return;
+
+ /* don't accept any packets, disable all interrupts */
+
+ __raw_writeq(0, s->sbm_rxfilter);
+ __raw_writeq(0, s->sbm_imr);
+
+ /* Turn off ticker */
+
+ /* XXX */
+
+ /* turn off receiver and transmitter */
+
+ __raw_writeq(0, s->sbm_macenable);
+
+ /* We're stopped now. */
+
+ s->sbm_state = sbmac_state_off;
+
+ /*
+ * Stop DMA channels (rings should be ok now)
+ */
+
+ sbdma_channel_stop(&(s->sbm_rxdma));
+ sbdma_channel_stop(&(s->sbm_txdma));
+
+ /* Empty the receive and transmit rings */
+
+ sbdma_emptyring(&(s->sbm_rxdma));
+ sbdma_emptyring(&(s->sbm_txdma));
+
+}
+
+/**********************************************************************
+ * SBMAC_SET_CHANNEL_STATE(state)
+ *
+ * Set the channel's state ON or OFF
+ *
+ * Input parameters:
+ * state - new state
+ *
+ * Return value:
+ * old state
+ ********************************************************************* */
+static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *sc,
+ enum sbmac_state state)
+{
+ enum sbmac_state oldstate = sc->sbm_state;
+
+ /*
+ * If same as previous state, return
+ */
+
+ if (state == oldstate) {
+ return oldstate;
+ }
+
+ /*
+ * If new state is ON, turn channel on
+ */
+
+ if (state == sbmac_state_on) {
+ sbmac_channel_start(sc);
+ }
+ else {
+ sbmac_channel_stop(sc);
+ }
+
+ /*
+ * Return previous state
+ */
+
+ return oldstate;
+}
+
+
+/**********************************************************************
+ * SBMAC_PROMISCUOUS_MODE(sc,onoff)
+ *
+ * Turn on or off promiscuous mode
+ *
+ * Input parameters:
+ * sc - softc
+ * onoff - 1 to turn on, 0 to turn off
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff)
+{
+ uint64_t reg;
+
+ if (sc->sbm_state != sbmac_state_on)
+ return;
+
+ if (onoff) {
+ reg = __raw_readq(sc->sbm_rxfilter);
+ reg |= M_MAC_ALLPKT_EN;
+ __raw_writeq(reg, sc->sbm_rxfilter);
+ }
+ else {
+ reg = __raw_readq(sc->sbm_rxfilter);
+ reg &= ~M_MAC_ALLPKT_EN;
+ __raw_writeq(reg, sc->sbm_rxfilter);
+ }
+}
+
+/**********************************************************************
+ * SBMAC_SETIPHDR_OFFSET(sc,onoff)
+ *
+ * Set the iphdr offset as 15 assuming ethernet encapsulation
+ *
+ * Input parameters:
+ * sc - softc
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbmac_set_iphdr_offset(struct sbmac_softc *sc)
+{
+ uint64_t reg;
+
+ /* Hard code the off set to 15 for now */
+ reg = __raw_readq(sc->sbm_rxfilter);
+ reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15);
+ __raw_writeq(reg, sc->sbm_rxfilter);
+
+ /* BCM1250 pass1 didn't have hardware checksum. Everything
+ later does. */
+ if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) {
+ sc->rx_hw_checksum = DISABLE;
+ } else {
+ sc->rx_hw_checksum = ENABLE;
+ }
+}
+
+
+/**********************************************************************
+ * SBMAC_ADDR2REG(ptr)
+ *
+ * Convert six bytes into the 64-bit register value that
+ * we typically write into the SBMAC's address/mcast registers
+ *
+ * Input parameters:
+ * ptr - pointer to 6 bytes
+ *
+ * Return value:
+ * register value
+ ********************************************************************* */
+
+static uint64_t sbmac_addr2reg(unsigned char *ptr)
+{
+ uint64_t reg = 0;
+
+ ptr += 6;
+
+ reg |= (uint64_t) *(--ptr);
+ reg <<= 8;
+ reg |= (uint64_t) *(--ptr);
+ reg <<= 8;
+ reg |= (uint64_t) *(--ptr);
+ reg <<= 8;
+ reg |= (uint64_t) *(--ptr);
+ reg <<= 8;
+ reg |= (uint64_t) *(--ptr);
+ reg <<= 8;
+ reg |= (uint64_t) *(--ptr);
+
+ return reg;
+}
+
+
+/**********************************************************************
+ * SBMAC_SET_SPEED(s,speed)
+ *
+ * Configure LAN speed for the specified MAC.
+ * Warning: must be called when MAC is off!
+ *
+ * Input parameters:
+ * s - sbmac structure
+ * speed - speed to set MAC to (see enum sbmac_speed)
+ *
+ * Return value:
+ * 1 if successful
+ * 0 indicates invalid parameters
+ ********************************************************************* */
+
+static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed)
+{
+ uint64_t cfg;
+ uint64_t framecfg;
+
+ /*
+ * Save new current values
+ */
+
+ s->sbm_speed = speed;
+
+ if (s->sbm_state == sbmac_state_on)
+ return 0; /* save for next restart */
+
+ /*
+ * Read current register values
+ */
+
+ cfg = __raw_readq(s->sbm_maccfg);
+ framecfg = __raw_readq(s->sbm_framecfg);
+
+ /*
+ * Mask out the stuff we want to change
+ */
+
+ cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL);
+ framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH |
+ M_MAC_SLOT_SIZE);
+
+ /*
+ * Now add in the new bits
+ */
+
+ switch (speed) {
+ case sbmac_speed_10:
+ framecfg |= V_MAC_IFG_RX_10 |
+ V_MAC_IFG_TX_10 |
+ K_MAC_IFG_THRSH_10 |
+ V_MAC_SLOT_SIZE_10;
+ cfg |= V_MAC_SPEED_SEL_10MBPS;
+ break;
+
+ case sbmac_speed_100:
+ framecfg |= V_MAC_IFG_RX_100 |
+ V_MAC_IFG_TX_100 |
+ V_MAC_IFG_THRSH_100 |
+ V_MAC_SLOT_SIZE_100;
+ cfg |= V_MAC_SPEED_SEL_100MBPS ;
+ break;
+
+ case sbmac_speed_1000:
+ framecfg |= V_MAC_IFG_RX_1000 |
+ V_MAC_IFG_TX_1000 |
+ V_MAC_IFG_THRSH_1000 |
+ V_MAC_SLOT_SIZE_1000;
+ cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN;
+ break;
+
+ default:
+ return 0;
+ }
+
+ /*
+ * Send the bits back to the hardware
+ */
+
+ __raw_writeq(framecfg, s->sbm_framecfg);
+ __raw_writeq(cfg, s->sbm_maccfg);
+
+ return 1;
+}
+
+/**********************************************************************
+ * SBMAC_SET_DUPLEX(s,duplex,fc)
+ *
+ * Set Ethernet duplex and flow control options for this MAC
+ * Warning: must be called when MAC is off!
+ *
+ * Input parameters:
+ * s - sbmac structure
+ * duplex - duplex setting (see enum sbmac_duplex)
+ * fc - flow control setting (see enum sbmac_fc)
+ *
+ * Return value:
+ * 1 if ok
+ * 0 if an invalid parameter combination was specified
+ ********************************************************************* */
+
+static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex,
+ enum sbmac_fc fc)
+{
+ uint64_t cfg;
+
+ /*
+ * Save new current values
+ */
+
+ s->sbm_duplex = duplex;
+ s->sbm_fc = fc;
+
+ if (s->sbm_state == sbmac_state_on)
+ return 0; /* save for next restart */
+
+ /*
+ * Read current register values
+ */
+
+ cfg = __raw_readq(s->sbm_maccfg);
+
+ /*
+ * Mask off the stuff we're about to change
+ */
+
+ cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN);
+
+
+ switch (duplex) {
+ case sbmac_duplex_half:
+ switch (fc) {
+ case sbmac_fc_disabled:
+ cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED;
+ break;
+
+ case sbmac_fc_collision:
+ cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED;
+ break;
+
+ case sbmac_fc_carrier:
+ cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR;
+ break;
+
+ case sbmac_fc_frame: /* not valid in half duplex */
+ default: /* invalid selection */
+ return 0;
+ }
+ break;
+
+ case sbmac_duplex_full:
+ switch (fc) {
+ case sbmac_fc_disabled:
+ cfg |= V_MAC_FC_CMD_DISABLED;
+ break;
+
+ case sbmac_fc_frame:
+ cfg |= V_MAC_FC_CMD_ENABLED;
+ break;
+
+ case sbmac_fc_collision: /* not valid in full duplex */
+ case sbmac_fc_carrier: /* not valid in full duplex */
+ default:
+ return 0;
+ }
+ break;
+ default:
+ return 0;
+ }
+
+ /*
+ * Send the bits back to the hardware
+ */
+
+ __raw_writeq(cfg, s->sbm_maccfg);
+
+ return 1;
+}
+
+
+
+
+/**********************************************************************
+ * SBMAC_INTR()
+ *
+ * Interrupt handler for MAC interrupts
+ *
+ * Input parameters:
+ * MAC structure
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+static irqreturn_t sbmac_intr(int irq,void *dev_instance)
+{
+ struct net_device *dev = (struct net_device *) dev_instance;
+ struct sbmac_softc *sc = netdev_priv(dev);
+ uint64_t isr;
+ int handled = 0;
+
+ /*
+ * Read the ISR (this clears the bits in the real
+ * register, except for counter addr)
+ */
+
+ isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR;
+
+ if (isr == 0)
+ return IRQ_RETVAL(0);
+ handled = 1;
+
+ /*
+ * Transmits on channel 0
+ */
+
+ if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0))
+ sbdma_tx_process(sc,&(sc->sbm_txdma), 0);
+
+ if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
+ if (napi_schedule_prep(&sc->napi)) {
+ __raw_writeq(0, sc->sbm_imr);
+ __napi_schedule(&sc->napi);
+ /* Depend on the exit from poll to reenable intr */
+ }
+ else {
+ /* may leave some packets behind */
+ sbdma_rx_process(sc,&(sc->sbm_rxdma),
+ SBMAC_MAX_RXDESCR * 2, 0);
+ }
+ }
+ return IRQ_RETVAL(handled);
+}
+
+/**********************************************************************
+ * SBMAC_START_TX(skb,dev)
+ *
+ * Start output on the specified interface. Basically, we
+ * queue as many buffers as we can until the ring fills up, or
+ * we run off the end of the queue, whichever comes first.
+ *
+ * Input parameters:
+ *
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sbmac_softc *sc = netdev_priv(dev);
+ unsigned long flags;
+
+ /* lock eth irq */
+ spin_lock_irqsave(&sc->sbm_lock, flags);
+
+ /*
+ * Put the buffer on the transmit ring. If we
+ * don't have room, stop the queue.
+ */
+
+ if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) {
+ /* XXX save skb that we could not send */
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&sc->sbm_lock, flags);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ spin_unlock_irqrestore(&sc->sbm_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/**********************************************************************
+ * SBMAC_SETMULTI(sc)
+ *
+ * Reprogram the multicast table into the hardware, given
+ * the list of multicasts associated with the interface
+ * structure.
+ *
+ * Input parameters:
+ * sc - softc
+ *
+ * Return value:
+ * nothing
+ ********************************************************************* */
+
+static void sbmac_setmulti(struct sbmac_softc *sc)
+{
+ uint64_t reg;
+ void __iomem *port;
+ int idx;
+ struct netdev_hw_addr *ha;
+ struct net_device *dev = sc->sbm_dev;
+
+ /*
+ * Clear out entire multicast table. We do this by nuking
+ * the entire hash table and all the direct matches except
+ * the first one, which is used for our station address
+ */
+
+ for (idx = 1; idx < MAC_ADDR_COUNT; idx++) {
+ port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t));
+ __raw_writeq(0, port);
+ }
+
+ for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
+ port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t));
+ __raw_writeq(0, port);
+ }
+
+ /*
+ * Clear the filter to say we don't want any multicasts.
+ */
+
+ reg = __raw_readq(sc->sbm_rxfilter);
+ reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN);
+ __raw_writeq(reg, sc->sbm_rxfilter);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /*
+ * Enable ALL multicasts. Do this by inverting the
+ * multicast enable bit.
+ */
+ reg = __raw_readq(sc->sbm_rxfilter);
+ reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN);
+ __raw_writeq(reg, sc->sbm_rxfilter);
+ return;
+ }
+
+
+ /*
+ * Progam new multicast entries. For now, only use the
+ * perfect filter. In the future we'll need to use the
+ * hash filter if the perfect filter overflows
+ */
+
+ /* XXX only using perfect filter for now, need to use hash
+ * XXX if the table overflows */
+
+ idx = 1; /* skip station address */
+ netdev_for_each_mc_addr(ha, dev) {
+ if (idx == MAC_ADDR_COUNT)
+ break;
+ reg = sbmac_addr2reg(ha->addr);
+ port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
+ __raw_writeq(reg, port);
+ idx++;
+ }
+
+ /*
+ * Enable the "accept multicast bits" if we programmed at least one
+ * multicast.
+ */
+
+ if (idx > 1) {
+ reg = __raw_readq(sc->sbm_rxfilter);
+ reg |= M_MAC_MCAST_EN;
+ __raw_writeq(reg, sc->sbm_rxfilter);
+ }
+}
+
+static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
+{
+ if (new_mtu > ENET_PACKET_SIZE)
+ return -EINVAL;
+ _dev->mtu = new_mtu;
+ pr_info("changing the mtu to %d\n", new_mtu);
+ return 0;
+}
+
+static const struct net_device_ops sbmac_netdev_ops = {
+ .ndo_open = sbmac_open,
+ .ndo_stop = sbmac_close,
+ .ndo_start_xmit = sbmac_start_tx,
+ .ndo_set_rx_mode = sbmac_set_rx_mode,
+ .ndo_tx_timeout = sbmac_tx_timeout,
+ .ndo_do_ioctl = sbmac_mii_ioctl,
+ .ndo_change_mtu = sb1250_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = sbmac_netpoll,
+#endif
+};
+
+/**********************************************************************
+ * SBMAC_INIT(dev)
+ *
+ * Attach routine - init hardware and hook ourselves into linux
+ *
+ * Input parameters:
+ * dev - net_device structure
+ *
+ * Return value:
+ * status
+ ********************************************************************* */
+
+static int sbmac_init(struct platform_device *pldev, long long base)
+{
+ struct net_device *dev = dev_get_drvdata(&pldev->dev);
+ int idx = pldev->id;
+ struct sbmac_softc *sc = netdev_priv(dev);
+ unsigned char *eaddr;
+ uint64_t ea_reg;
+ int i;
+ int err;
+
+ sc->sbm_dev = dev;
+ sc->sbe_idx = idx;
+
+ eaddr = sc->sbm_hwaddr;
+
+ /*
+ * Read the ethernet address. The firmware left this programmed
+ * for us in the ethernet address register for each mac.
+ */
+
+ ea_reg = __raw_readq(sc->sbm_base + R_MAC_ETHERNET_ADDR);
+ __raw_writeq(0, sc->sbm_base + R_MAC_ETHERNET_ADDR);
+ for (i = 0; i < 6; i++) {
+ eaddr[i] = (uint8_t) (ea_reg & 0xFF);
+ ea_reg >>= 8;
+ }
+
+ for (i = 0; i < 6; i++) {
+ dev->dev_addr[i] = eaddr[i];
+ }
+
+ /*
+ * Initialize context (get pointers to registers and stuff), then
+ * allocate the memory for the descriptor tables.
+ */
+
+ sbmac_initctx(sc);
+
+ /*
+ * Set up Linux device callins
+ */
+
+ spin_lock_init(&(sc->sbm_lock));
+
+ dev->netdev_ops = &sbmac_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ netif_napi_add(dev, &sc->napi, sbmac_poll, 16);
+
+ dev->irq = UNIT_INT(idx);
+
+ /* This is needed for PASS2 for Rx H/W checksum feature */
+ sbmac_set_iphdr_offset(sc);
+
+ sc->mii_bus = mdiobus_alloc();
+ if (sc->mii_bus == NULL) {
+ err = -ENOMEM;
+ goto uninit_ctx;
+ }
+
+ sc->mii_bus->name = sbmac_mdio_string;
+ snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx);
+ sc->mii_bus->priv = sc;
+ sc->mii_bus->read = sbmac_mii_read;
+ sc->mii_bus->write = sbmac_mii_write;
+ sc->mii_bus->irq = sc->phy_irq;
+ for (i = 0; i < PHY_MAX_ADDR; ++i)
+ sc->mii_bus->irq[i] = SBMAC_PHY_INT;
+
+ sc->mii_bus->parent = &pldev->dev;
+ /*
+ * Probe PHY address
+ */
+ err = mdiobus_register(sc->mii_bus);
+ if (err) {
+ printk(KERN_ERR "%s: unable to register MDIO bus\n",
+ dev->name);
+ goto free_mdio;
+ }
+ dev_set_drvdata(&pldev->dev, sc->mii_bus);
+
+ err = register_netdev(dev);
+ if (err) {
+ printk(KERN_ERR "%s.%d: unable to register netdev\n",
+ sbmac_string, idx);
+ goto unreg_mdio;
+ }
+
+ pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name);
+
+ if (sc->rx_hw_checksum == ENABLE)
+ pr_info("%s: enabling TCP rcv checksum\n", dev->name);
+
+ /*
+ * Display Ethernet address (this is called during the config
+ * process so we need to finish off the config message that
+ * was being displayed)
+ */
+ pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n",
+ dev->name, base, eaddr);
+
+ return 0;
+unreg_mdio:
+ mdiobus_unregister(sc->mii_bus);
+ dev_set_drvdata(&pldev->dev, NULL);
+free_mdio:
+ mdiobus_free(sc->mii_bus);
+uninit_ctx:
+ sbmac_uninitctx(sc);
+ return err;
+}
+
+
+static int sbmac_open(struct net_device *dev)
+{
+ struct sbmac_softc *sc = netdev_priv(dev);
+ int err;
+
+ if (debug > 1)
+ pr_debug("%s: sbmac_open() irq %d.\n", dev->name, dev->irq);
+
+ /*
+ * map/route interrupt (clear status first, in case something
+ * weird is pending; we haven't initialized the mac registers
+ * yet)
+ */
+
+ __raw_readq(sc->sbm_isr);
+ err = request_irq(dev->irq, sbmac_intr, IRQF_SHARED, dev->name, dev);
+ if (err) {
+ printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name,
+ dev->irq);
+ goto out_err;
+ }
+
+ sc->sbm_speed = sbmac_speed_none;
+ sc->sbm_duplex = sbmac_duplex_none;
+ sc->sbm_fc = sbmac_fc_none;
+ sc->sbm_pause = -1;
+ sc->sbm_link = 0;
+
+ /*
+ * Attach to the PHY
+ */
+ err = sbmac_mii_probe(dev);
+ if (err)
+ goto out_unregister;
+
+ /*
+ * Turn on the channel
+ */
+
+ sbmac_set_channel_state(sc,sbmac_state_on);
+
+ netif_start_queue(dev);
+
+ sbmac_set_rx_mode(dev);
+
+ phy_start(sc->phy_dev);
+
+ napi_enable(&sc->napi);
+
+ return 0;
+
+out_unregister:
+ free_irq(dev->irq, dev);
+out_err:
+ return err;
+}
+
+static int sbmac_mii_probe(struct net_device *dev)
+{
+ struct sbmac_softc *sc = netdev_priv(dev);
+ struct phy_device *phy_dev;
+ int i;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ phy_dev = sc->mii_bus->phy_map[i];
+ if (phy_dev)
+ break;
+ }
+ if (!phy_dev) {
+ printk(KERN_ERR "%s: no PHY found\n", dev->name);
+ return -ENXIO;
+ }
+
+ phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll, 0,
+ PHY_INTERFACE_MODE_GMII);
+ if (IS_ERR(phy_dev)) {
+ printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
+ return PTR_ERR(phy_dev);
+ }
+
+ /* Remove any features not supported by the controller */
+ phy_dev->supported &= SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_MII |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause;
+ phy_dev->advertising = phy_dev->supported;
+
+ pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ dev->name, phy_dev->drv->name,
+ dev_name(&phy_dev->dev), phy_dev->irq);
+
+ sc->phy_dev = phy_dev;
+
+ return 0;
+}
+
+
+static void sbmac_mii_poll(struct net_device *dev)
+{
+ struct sbmac_softc *sc = netdev_priv(dev);
+ struct phy_device *phy_dev = sc->phy_dev;
+ unsigned long flags;
+ enum sbmac_fc fc;
+ int link_chg, speed_chg, duplex_chg, pause_chg, fc_chg;
+
+ link_chg = (sc->sbm_link != phy_dev->link);
+ speed_chg = (sc->sbm_speed != phy_dev->speed);
+ duplex_chg = (sc->sbm_duplex != phy_dev->duplex);
+ pause_chg = (sc->sbm_pause != phy_dev->pause);
+
+ if (!link_chg && !speed_chg && !duplex_chg && !pause_chg)
+ return; /* Hmmm... */
+
+ if (!phy_dev->link) {
+ if (link_chg) {
+ sc->sbm_link = phy_dev->link;
+ sc->sbm_speed = sbmac_speed_none;
+ sc->sbm_duplex = sbmac_duplex_none;
+ sc->sbm_fc = sbmac_fc_disabled;
+ sc->sbm_pause = -1;
+ pr_info("%s: link unavailable\n", dev->name);
+ }
+ return;
+ }
+
+ if (phy_dev->duplex == DUPLEX_FULL) {
+ if (phy_dev->pause)
+ fc = sbmac_fc_frame;
+ else
+ fc = sbmac_fc_disabled;
+ } else
+ fc = sbmac_fc_collision;
+ fc_chg = (sc->sbm_fc != fc);
+
+ pr_info("%s: link available: %dbase-%cD\n", dev->name, phy_dev->speed,
+ phy_dev->duplex == DUPLEX_FULL ? 'F' : 'H');
+
+ spin_lock_irqsave(&sc->sbm_lock, flags);
+
+ sc->sbm_speed = phy_dev->speed;
+ sc->sbm_duplex = phy_dev->duplex;
+ sc->sbm_fc = fc;
+ sc->sbm_pause = phy_dev->pause;
+ sc->sbm_link = phy_dev->link;
+
+ if ((speed_chg || duplex_chg || fc_chg) &&
+ sc->sbm_state != sbmac_state_off) {
+ /*
+ * something changed, restart the channel
+ */
+ if (debug > 1)
+ pr_debug("%s: restarting channel "
+ "because PHY state changed\n", dev->name);
+ sbmac_channel_stop(sc);
+ sbmac_channel_start(sc);
+ }
+
+ spin_unlock_irqrestore(&sc->sbm_lock, flags);
+}
+
+
+static void sbmac_tx_timeout (struct net_device *dev)
+{
+ struct sbmac_softc *sc = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sc->sbm_lock, flags);
+
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ dev->stats.tx_errors++;
+
+ spin_unlock_irqrestore(&sc->sbm_lock, flags);
+
+ printk (KERN_WARNING "%s: Transmit timed out\n",dev->name);
+}
+
+
+
+
+static void sbmac_set_rx_mode(struct net_device *dev)
+{
+ unsigned long flags;
+ struct sbmac_softc *sc = netdev_priv(dev);
+
+ spin_lock_irqsave(&sc->sbm_lock, flags);
+ if ((dev->flags ^ sc->sbm_devflags) & IFF_PROMISC) {
+ /*
+ * Promiscuous changed.
+ */
+
+ if (dev->flags & IFF_PROMISC) {
+ sbmac_promiscuous_mode(sc,1);
+ }
+ else {
+ sbmac_promiscuous_mode(sc,0);
+ }
+ }
+ spin_unlock_irqrestore(&sc->sbm_lock, flags);
+
+ /*
+ * Program the multicasts. Do this every time.
+ */
+
+ sbmac_setmulti(sc);
+
+}
+
+static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct sbmac_softc *sc = netdev_priv(dev);
+
+ if (!netif_running(dev) || !sc->phy_dev)
+ return -EINVAL;
+
+ return phy_mii_ioctl(sc->phy_dev, rq, cmd);
+}
+
+static int sbmac_close(struct net_device *dev)
+{
+ struct sbmac_softc *sc = netdev_priv(dev);
+
+ napi_disable(&sc->napi);
+
+ phy_stop(sc->phy_dev);
+
+ sbmac_set_channel_state(sc, sbmac_state_off);
+
+ netif_stop_queue(dev);
+
+ if (debug > 1)
+ pr_debug("%s: Shutting down ethercard\n", dev->name);
+
+ phy_disconnect(sc->phy_dev);
+ sc->phy_dev = NULL;
+ free_irq(dev->irq, dev);
+
+ sbdma_emptyring(&(sc->sbm_txdma));
+ sbdma_emptyring(&(sc->sbm_rxdma));
+
+ return 0;
+}
+
+static int sbmac_poll(struct napi_struct *napi, int budget)
+{
+ struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi);
+ int work_done;
+
+ work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1);
+ sbdma_tx_process(sc, &(sc->sbm_txdma), 1);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+
+#ifdef CONFIG_SBMAC_COALESCE
+ __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
+ ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0),
+ sc->sbm_imr);
+#else
+ __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
+ (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr);
+#endif
+ }
+
+ return work_done;
+}
+
+
+static int __devinit sbmac_probe(struct platform_device *pldev)
+{
+ struct net_device *dev;
+ struct sbmac_softc *sc;
+ void __iomem *sbm_base;
+ struct resource *res;
+ u64 sbmac_orig_hwaddr;
+ int err;
+
+ res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
+ BUG_ON(!res);
+ sbm_base = ioremap_nocache(res->start, resource_size(res));
+ if (!sbm_base) {
+ printk(KERN_ERR "%s: unable to map device registers\n",
+ dev_name(&pldev->dev));
+ err = -ENOMEM;
+ goto out_out;
+ }
+
+ /*
+ * The R_MAC_ETHERNET_ADDR register will be set to some nonzero
+ * value for us by the firmware if we're going to use this MAC.
+ * If we find a zero, skip this MAC.
+ */
+ sbmac_orig_hwaddr = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR);
+ pr_debug("%s: %sconfiguring MAC at 0x%08Lx\n", dev_name(&pldev->dev),
+ sbmac_orig_hwaddr ? "" : "not ", (long long)res->start);
+ if (sbmac_orig_hwaddr == 0) {
+ err = 0;
+ goto out_unmap;
+ }
+
+ /*
+ * Okay, cool. Initialize this MAC.
+ */
+ dev = alloc_etherdev(sizeof(struct sbmac_softc));
+ if (!dev) {
+ printk(KERN_ERR "%s: unable to allocate etherdev\n",
+ dev_name(&pldev->dev));
+ err = -ENOMEM;
+ goto out_unmap;
+ }
+
+ dev_set_drvdata(&pldev->dev, dev);
+ SET_NETDEV_DEV(dev, &pldev->dev);
+
+ sc = netdev_priv(dev);
+ sc->sbm_base = sbm_base;
+
+ err = sbmac_init(pldev, res->start);
+ if (err)
+ goto out_kfree;
+
+ return 0;
+
+out_kfree:
+ free_netdev(dev);
+ __raw_writeq(sbmac_orig_hwaddr, sbm_base + R_MAC_ETHERNET_ADDR);
+
+out_unmap:
+ iounmap(sbm_base);
+
+out_out:
+ return err;
+}
+
+static int __exit sbmac_remove(struct platform_device *pldev)
+{
+ struct net_device *dev = dev_get_drvdata(&pldev->dev);
+ struct sbmac_softc *sc = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ sbmac_uninitctx(sc);
+ mdiobus_unregister(sc->mii_bus);
+ mdiobus_free(sc->mii_bus);
+ iounmap(sc->sbm_base);
+ free_netdev(dev);
+
+ return 0;
+}
+
+static struct platform_driver sbmac_driver = {
+ .probe = sbmac_probe,
+ .remove = __exit_p(sbmac_remove),
+ .driver = {
+ .name = sbmac_string,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init sbmac_init_module(void)
+{
+ return platform_driver_register(&sbmac_driver);
+}
+
+static void __exit sbmac_cleanup_module(void)
+{
+ platform_driver_unregister(&sbmac_driver);
+}
+
+module_init(sbmac_init_module);
+module_exit(sbmac_cleanup_module);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
new file mode 100644
index 000000000000..9dbd1af6653c
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -0,0 +1,15953 @@
+/*
+ * tg3.c: Broadcom Tigon3 ethernet driver.
+ *
+ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
+ * Copyright (C) 2004 Sun Microsystems Inc.
+ * Copyright (C) 2005-2011 Broadcom Corporation.
+ *
+ * Firmware is:
+ * Derived from proprietary unpublished source code,
+ * Copyright (C) 2000-2003 Broadcom Corporation.
+ *
+ * Permission is hereby granted for the distribution of this firmware
+ * data in hexadecimal or equivalent format, provided this copyright
+ * notice is accompanying it.
+ */
+
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/stringify.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/in.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/brcmphy.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/workqueue.h>
+#include <linux/prefetch.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+
+#include <net/checksum.h>
+#include <net/ip.h>
+
+#include <asm/system.h>
+#include <linux/io.h>
+#include <asm/byteorder.h>
+#include <linux/uaccess.h>
+
+#ifdef CONFIG_SPARC
+#include <asm/idprom.h>
+#include <asm/prom.h>
+#endif
+
+#define BAR_0 0
+#define BAR_2 2
+
+#include "tg3.h"
+
+/* Functions & macros to verify TG3_FLAGS types */
+
+static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
+{
+ return test_bit(flag, bits);
+}
+
+static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
+{
+ set_bit(flag, bits);
+}
+
+static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
+{
+ clear_bit(flag, bits);
+}
+
+#define tg3_flag(tp, flag) \
+ _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
+#define tg3_flag_set(tp, flag) \
+ _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
+#define tg3_flag_clear(tp, flag) \
+ _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
+
+#define DRV_MODULE_NAME "tg3"
+#define TG3_MAJ_NUM 3
+#define TG3_MIN_NUM 120
+#define DRV_MODULE_VERSION \
+ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
+#define DRV_MODULE_RELDATE "August 18, 2011"
+
+#define RESET_KIND_SHUTDOWN 0
+#define RESET_KIND_INIT 1
+#define RESET_KIND_SUSPEND 2
+
+#define TG3_DEF_RX_MODE 0
+#define TG3_DEF_TX_MODE 0
+#define TG3_DEF_MSG_ENABLE \
+ (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+#define TG3_GRC_LCLCTL_PWRSW_DELAY 100
+
+/* length of time before we decide the hardware is borked,
+ * and dev->tx_timeout() should be called to fix the problem
+ */
+
+#define TG3_TX_TIMEOUT (5 * HZ)
+
+/* hardware minimum and maximum for a single frame's data payload */
+#define TG3_MIN_MTU 60
+#define TG3_MAX_MTU(tp) \
+ (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
+
+/* These numbers seem to be hard coded in the NIC firmware somehow.
+ * You can't change the ring sizes, but you can change where you place
+ * them in the NIC onboard memory.
+ */
+#define TG3_RX_STD_RING_SIZE(tp) \
+ (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
+ TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
+#define TG3_DEF_RX_RING_PENDING 200
+#define TG3_RX_JMB_RING_SIZE(tp) \
+ (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
+ TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
+#define TG3_DEF_RX_JUMBO_RING_PENDING 100
+#define TG3_RSS_INDIR_TBL_SIZE 128
+
+/* Do not place this n-ring entries value into the tp struct itself,
+ * we really want to expose these constants to GCC so that modulo et
+ * al. operations are done with shifts and masks instead of with
+ * hw multiply/modulo instructions. Another solution would be to
+ * replace things like '% foo' with '& (foo - 1)'.
+ */
+
+#define TG3_TX_RING_SIZE 512
+#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
+
+#define TG3_RX_STD_RING_BYTES(tp) \
+ (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
+#define TG3_RX_JMB_RING_BYTES(tp) \
+ (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
+#define TG3_RX_RCB_RING_BYTES(tp) \
+ (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
+#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
+ TG3_TX_RING_SIZE)
+#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
+
+#define TG3_DMA_BYTE_ENAB 64
+
+#define TG3_RX_STD_DMA_SZ 1536
+#define TG3_RX_JMB_DMA_SZ 9046
+
+#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
+
+#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
+#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
+
+#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
+ (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
+
+#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
+ (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
+
+/* Due to a hardware bug, the 5701 can only DMA to memory addresses
+ * that are at least dword aligned when used in PCIX mode. The driver
+ * works around this bug by double copying the packet. This workaround
+ * is built into the normal double copy length check for efficiency.
+ *
+ * However, the double copy is only necessary on those architectures
+ * where unaligned memory accesses are inefficient. For those architectures
+ * where unaligned memory accesses incur little penalty, we can reintegrate
+ * the 5701 in the normal rx path. Doing so saves a device structure
+ * dereference by hardcoding the double copy threshold in place.
+ */
+#define TG3_RX_COPY_THRESHOLD 256
+#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
+#else
+ #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
+#endif
+
+#if (NET_IP_ALIGN != 0)
+#define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
+#else
+#define TG3_RX_OFFSET(tp) 0
+#endif
+
+/* minimum number of free TX descriptors required to wake up TX process */
+#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
+#define TG3_TX_BD_DMA_MAX 4096
+
+#define TG3_RAW_IP_ALIGN 2
+
+#define TG3_FW_UPDATE_TIMEOUT_SEC 5
+
+#define FIRMWARE_TG3 "tigon/tg3.bin"
+#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
+#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
+
+static char version[] __devinitdata =
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
+
+MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
+MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_FIRMWARE(FIRMWARE_TG3);
+MODULE_FIRMWARE(FIRMWARE_TG3TSO);
+MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
+
+static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
+module_param(tg3_debug, int, 0);
+MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
+
+static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
+ {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
+ {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
+ {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
+ {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
+
+static const struct {
+ const char string[ETH_GSTRING_LEN];
+} ethtool_stats_keys[] = {
+ { "rx_octets" },
+ { "rx_fragments" },
+ { "rx_ucast_packets" },
+ { "rx_mcast_packets" },
+ { "rx_bcast_packets" },
+ { "rx_fcs_errors" },
+ { "rx_align_errors" },
+ { "rx_xon_pause_rcvd" },
+ { "rx_xoff_pause_rcvd" },
+ { "rx_mac_ctrl_rcvd" },
+ { "rx_xoff_entered" },
+ { "rx_frame_too_long_errors" },
+ { "rx_jabbers" },
+ { "rx_undersize_packets" },
+ { "rx_in_length_errors" },
+ { "rx_out_length_errors" },
+ { "rx_64_or_less_octet_packets" },
+ { "rx_65_to_127_octet_packets" },
+ { "rx_128_to_255_octet_packets" },
+ { "rx_256_to_511_octet_packets" },
+ { "rx_512_to_1023_octet_packets" },
+ { "rx_1024_to_1522_octet_packets" },
+ { "rx_1523_to_2047_octet_packets" },
+ { "rx_2048_to_4095_octet_packets" },
+ { "rx_4096_to_8191_octet_packets" },
+ { "rx_8192_to_9022_octet_packets" },
+
+ { "tx_octets" },
+ { "tx_collisions" },
+
+ { "tx_xon_sent" },
+ { "tx_xoff_sent" },
+ { "tx_flow_control" },
+ { "tx_mac_errors" },
+ { "tx_single_collisions" },
+ { "tx_mult_collisions" },
+ { "tx_deferred" },
+ { "tx_excessive_collisions" },
+ { "tx_late_collisions" },
+ { "tx_collide_2times" },
+ { "tx_collide_3times" },
+ { "tx_collide_4times" },
+ { "tx_collide_5times" },
+ { "tx_collide_6times" },
+ { "tx_collide_7times" },
+ { "tx_collide_8times" },
+ { "tx_collide_9times" },
+ { "tx_collide_10times" },
+ { "tx_collide_11times" },
+ { "tx_collide_12times" },
+ { "tx_collide_13times" },
+ { "tx_collide_14times" },
+ { "tx_collide_15times" },
+ { "tx_ucast_packets" },
+ { "tx_mcast_packets" },
+ { "tx_bcast_packets" },
+ { "tx_carrier_sense_errors" },
+ { "tx_discards" },
+ { "tx_errors" },
+
+ { "dma_writeq_full" },
+ { "dma_write_prioq_full" },
+ { "rxbds_empty" },
+ { "rx_discards" },
+ { "rx_errors" },
+ { "rx_threshold_hit" },
+
+ { "dma_readq_full" },
+ { "dma_read_prioq_full" },
+ { "tx_comp_queue_full" },
+
+ { "ring_set_send_prod_index" },
+ { "ring_status_update" },
+ { "nic_irqs" },
+ { "nic_avoided_irqs" },
+ { "nic_tx_threshold_hit" },
+
+ { "mbuf_lwm_thresh_hit" },
+};
+
+#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
+
+
+static const struct {
+ const char string[ETH_GSTRING_LEN];
+} ethtool_test_keys[] = {
+ { "nvram test (online) " },
+ { "link test (online) " },
+ { "register test (offline)" },
+ { "memory test (offline)" },
+ { "mac loopback test (offline)" },
+ { "phy loopback test (offline)" },
+ { "ext loopback test (offline)" },
+ { "interrupt test (offline)" },
+};
+
+#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
+
+
+static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
+{
+ writel(val, tp->regs + off);
+}
+
+static u32 tg3_read32(struct tg3 *tp, u32 off)
+{
+ return readl(tp->regs + off);
+}
+
+static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
+{
+ writel(val, tp->aperegs + off);
+}
+
+static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
+{
+ return readl(tp->aperegs + off);
+}
+
+static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->indirect_lock, flags);
+ pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
+ pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
+ spin_unlock_irqrestore(&tp->indirect_lock, flags);
+}
+
+static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
+{
+ writel(val, tp->regs + off);
+ readl(tp->regs + off);
+}
+
+static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&tp->indirect_lock, flags);
+ pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
+ pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
+ spin_unlock_irqrestore(&tp->indirect_lock, flags);
+ return val;
+}
+
+static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
+{
+ unsigned long flags;
+
+ if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
+ pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
+ TG3_64BIT_REG_LOW, val);
+ return;
+ }
+ if (off == TG3_RX_STD_PROD_IDX_REG) {
+ pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
+ TG3_64BIT_REG_LOW, val);
+ return;
+ }
+
+ spin_lock_irqsave(&tp->indirect_lock, flags);
+ pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
+ pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
+ spin_unlock_irqrestore(&tp->indirect_lock, flags);
+
+ /* In indirect mode when disabling interrupts, we also need
+ * to clear the interrupt bit in the GRC local ctrl register.
+ */
+ if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
+ (val == 0x1)) {
+ pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
+ tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
+ }
+}
+
+static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&tp->indirect_lock, flags);
+ pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
+ pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
+ spin_unlock_irqrestore(&tp->indirect_lock, flags);
+ return val;
+}
+
+/* usec_wait specifies the wait time in usec when writing to certain registers
+ * where it is unsafe to read back the register without some delay.
+ * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
+ * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
+ */
+static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
+{
+ if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
+ /* Non-posted methods */
+ tp->write32(tp, off, val);
+ else {
+ /* Posted method */
+ tg3_write32(tp, off, val);
+ if (usec_wait)
+ udelay(usec_wait);
+ tp->read32(tp, off);
+ }
+ /* Wait again after the read for the posted method to guarantee that
+ * the wait time is met.
+ */
+ if (usec_wait)
+ udelay(usec_wait);
+}
+
+static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
+{
+ tp->write32_mbox(tp, off, val);
+ if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
+ tp->read32_mbox(tp, off);
+}
+
+static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
+{
+ void __iomem *mbox = tp->regs + off;
+ writel(val, mbox);
+ if (tg3_flag(tp, TXD_MBOX_HWBUG))
+ writel(val, mbox);
+ if (tg3_flag(tp, MBOX_WRITE_REORDER))
+ readl(mbox);
+}
+
+static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
+{
+ return readl(tp->regs + off + GRCMBOX_BASE);
+}
+
+static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
+{
+ writel(val, tp->regs + off + GRCMBOX_BASE);
+}
+
+#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
+#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
+#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
+#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
+#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
+
+#define tw32(reg, val) tp->write32(tp, reg, val)
+#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
+#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
+#define tr32(reg) tp->read32(tp, reg)
+
+static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
+{
+ unsigned long flags;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
+ (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
+ return;
+
+ spin_lock_irqsave(&tp->indirect_lock, flags);
+ if (tg3_flag(tp, SRAM_USE_CONFIG)) {
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
+
+ /* Always leave this as zero. */
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
+ } else {
+ tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
+ tw32_f(TG3PCI_MEM_WIN_DATA, val);
+
+ /* Always leave this as zero. */
+ tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+ }
+ spin_unlock_irqrestore(&tp->indirect_lock, flags);
+}
+
+static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
+{
+ unsigned long flags;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
+ (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
+ *val = 0;
+ return;
+ }
+
+ spin_lock_irqsave(&tp->indirect_lock, flags);
+ if (tg3_flag(tp, SRAM_USE_CONFIG)) {
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
+ pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
+
+ /* Always leave this as zero. */
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
+ } else {
+ tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
+ *val = tr32(TG3PCI_MEM_WIN_DATA);
+
+ /* Always leave this as zero. */
+ tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+ }
+ spin_unlock_irqrestore(&tp->indirect_lock, flags);
+}
+
+static void tg3_ape_lock_init(struct tg3 *tp)
+{
+ int i;
+ u32 regbase, bit;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ regbase = TG3_APE_LOCK_GRANT;
+ else
+ regbase = TG3_APE_PER_LOCK_GRANT;
+
+ /* Make sure the driver hasn't any stale locks. */
+ for (i = 0; i < 8; i++) {
+ if (i == TG3_APE_LOCK_GPIO)
+ continue;
+ tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
+ }
+
+ /* Clear the correct bit of the GPIO lock too. */
+ if (!tp->pci_fn)
+ bit = APE_LOCK_GRANT_DRIVER;
+ else
+ bit = 1 << tp->pci_fn;
+
+ tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
+}
+
+static int tg3_ape_lock(struct tg3 *tp, int locknum)
+{
+ int i, off;
+ int ret = 0;
+ u32 status, req, gnt, bit;
+
+ if (!tg3_flag(tp, ENABLE_APE))
+ return 0;
+
+ switch (locknum) {
+ case TG3_APE_LOCK_GPIO:
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ return 0;
+ case TG3_APE_LOCK_GRC:
+ case TG3_APE_LOCK_MEM:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+ req = TG3_APE_LOCK_REQ;
+ gnt = TG3_APE_LOCK_GRANT;
+ } else {
+ req = TG3_APE_PER_LOCK_REQ;
+ gnt = TG3_APE_PER_LOCK_GRANT;
+ }
+
+ off = 4 * locknum;
+
+ if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
+ bit = APE_LOCK_REQ_DRIVER;
+ else
+ bit = 1 << tp->pci_fn;
+
+ tg3_ape_write32(tp, req + off, bit);
+
+ /* Wait for up to 1 millisecond to acquire lock. */
+ for (i = 0; i < 100; i++) {
+ status = tg3_ape_read32(tp, gnt + off);
+ if (status == bit)
+ break;
+ udelay(10);
+ }
+
+ if (status != bit) {
+ /* Revoke the lock request. */
+ tg3_ape_write32(tp, gnt + off, bit);
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+static void tg3_ape_unlock(struct tg3 *tp, int locknum)
+{
+ u32 gnt, bit;
+
+ if (!tg3_flag(tp, ENABLE_APE))
+ return;
+
+ switch (locknum) {
+ case TG3_APE_LOCK_GPIO:
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ return;
+ case TG3_APE_LOCK_GRC:
+ case TG3_APE_LOCK_MEM:
+ break;
+ default:
+ return;
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ gnt = TG3_APE_LOCK_GRANT;
+ else
+ gnt = TG3_APE_PER_LOCK_GRANT;
+
+ if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
+ bit = APE_LOCK_GRANT_DRIVER;
+ else
+ bit = 1 << tp->pci_fn;
+
+ tg3_ape_write32(tp, gnt + 4 * locknum, bit);
+}
+
+static void tg3_ape_send_event(struct tg3 *tp, u32 event)
+{
+ int i;
+ u32 apedata;
+
+ /* NCSI does not support APE events */
+ if (tg3_flag(tp, APE_HAS_NCSI))
+ return;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
+ if (apedata != APE_SEG_SIG_MAGIC)
+ return;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
+ if (!(apedata & APE_FW_STATUS_READY))
+ return;
+
+ /* Wait for up to 1 millisecond for APE to service previous event. */
+ for (i = 0; i < 10; i++) {
+ if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
+ return;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
+
+ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+ tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
+ event | APE_EVENT_STATUS_EVENT_PENDING);
+
+ tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
+
+ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+ break;
+
+ udelay(100);
+ }
+
+ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+ tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
+}
+
+static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
+{
+ u32 event;
+ u32 apedata;
+
+ if (!tg3_flag(tp, ENABLE_APE))
+ return;
+
+ switch (kind) {
+ case RESET_KIND_INIT:
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
+ APE_HOST_SEG_SIG_MAGIC);
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
+ APE_HOST_SEG_LEN_MAGIC);
+ apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
+ tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
+ tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
+ APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
+ tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
+ APE_HOST_BEHAV_NO_PHYLOCK);
+ tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
+ TG3_APE_HOST_DRVR_STATE_START);
+
+ event = APE_EVENT_STATUS_STATE_START;
+ break;
+ case RESET_KIND_SHUTDOWN:
+ /* With the interface we are currently using,
+ * APE does not track driver state. Wiping
+ * out the HOST SEGMENT SIGNATURE forces
+ * the APE to assume OS absent status.
+ */
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
+
+ if (device_may_wakeup(&tp->pdev->dev) &&
+ tg3_flag(tp, WOL_ENABLE)) {
+ tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
+ TG3_APE_HOST_WOL_SPEED_AUTO);
+ apedata = TG3_APE_HOST_DRVR_STATE_WOL;
+ } else
+ apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
+
+ tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
+
+ event = APE_EVENT_STATUS_STATE_UNLOAD;
+ break;
+ case RESET_KIND_SUSPEND:
+ event = APE_EVENT_STATUS_STATE_SUSPEND;
+ break;
+ default:
+ return;
+ }
+
+ event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
+
+ tg3_ape_send_event(tp, event);
+}
+
+static void tg3_disable_ints(struct tg3 *tp)
+{
+ int i;
+
+ tw32(TG3PCI_MISC_HOST_CTRL,
+ (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
+ for (i = 0; i < tp->irq_max; i++)
+ tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
+}
+
+static void tg3_enable_ints(struct tg3 *tp)
+{
+ int i;
+
+ tp->irq_sync = 0;
+ wmb();
+
+ tw32(TG3PCI_MISC_HOST_CTRL,
+ (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
+
+ tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
+ if (tg3_flag(tp, 1SHOT_MSI))
+ tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
+
+ tp->coal_now |= tnapi->coal_now;
+ }
+
+ /* Force an initial interrupt */
+ if (!tg3_flag(tp, TAGGED_STATUS) &&
+ (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
+ tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
+ else
+ tw32(HOSTCC_MODE, tp->coal_now);
+
+ tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
+}
+
+static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
+{
+ struct tg3 *tp = tnapi->tp;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
+ unsigned int work_exists = 0;
+
+ /* check for phy events */
+ if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
+ if (sblk->status & SD_STATUS_LINK_CHG)
+ work_exists = 1;
+ }
+ /* check for RX/TX work to do */
+ if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
+ *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
+ work_exists = 1;
+
+ return work_exists;
+}
+
+/* tg3_int_reenable
+ * similar to tg3_enable_ints, but it accurately determines whether there
+ * is new work pending and can return without flushing the PIO write
+ * which reenables interrupts
+ */
+static void tg3_int_reenable(struct tg3_napi *tnapi)
+{
+ struct tg3 *tp = tnapi->tp;
+
+ tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
+ mmiowb();
+
+ /* When doing tagged status, this work check is unnecessary.
+ * The last_tag we write above tells the chip which piece of
+ * work we've completed.
+ */
+ if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
+ tw32(HOSTCC_MODE, tp->coalesce_mode |
+ HOSTCC_MODE_ENABLE | tnapi->coal_now);
+}
+
+static void tg3_switch_clocks(struct tg3 *tp)
+{
+ u32 clock_ctrl;
+ u32 orig_clock_ctrl;
+
+ if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
+ return;
+
+ clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
+
+ orig_clock_ctrl = clock_ctrl;
+ clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
+ CLOCK_CTRL_CLKRUN_OENABLE |
+ 0x1f);
+ tp->pci_clock_ctrl = clock_ctrl;
+
+ if (tg3_flag(tp, 5705_PLUS)) {
+ if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
+ tw32_wait_f(TG3PCI_CLOCK_CTRL,
+ clock_ctrl | CLOCK_CTRL_625_CORE, 40);
+ }
+ } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
+ tw32_wait_f(TG3PCI_CLOCK_CTRL,
+ clock_ctrl |
+ (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
+ 40);
+ tw32_wait_f(TG3PCI_CLOCK_CTRL,
+ clock_ctrl | (CLOCK_CTRL_ALTCLK),
+ 40);
+ }
+ tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
+}
+
+#define PHY_BUSY_LOOPS 5000
+
+static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
+{
+ u32 frame_val;
+ unsigned int loops;
+ int ret;
+
+ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+ tw32_f(MAC_MI_MODE,
+ (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
+ udelay(80);
+ }
+
+ *val = 0x0;
+
+ frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
+ MI_COM_PHY_ADDR_MASK);
+ frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
+ MI_COM_REG_ADDR_MASK);
+ frame_val |= (MI_COM_CMD_READ | MI_COM_START);
+
+ tw32_f(MAC_MI_COM, frame_val);
+
+ loops = PHY_BUSY_LOOPS;
+ while (loops != 0) {
+ udelay(10);
+ frame_val = tr32(MAC_MI_COM);
+
+ if ((frame_val & MI_COM_BUSY) == 0) {
+ udelay(5);
+ frame_val = tr32(MAC_MI_COM);
+ break;
+ }
+ loops -= 1;
+ }
+
+ ret = -EBUSY;
+ if (loops != 0) {
+ *val = frame_val & MI_COM_DATA_MASK;
+ ret = 0;
+ }
+
+ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+ tw32_f(MAC_MI_MODE, tp->mi_mode);
+ udelay(80);
+ }
+
+ return ret;
+}
+
+static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
+{
+ u32 frame_val;
+ unsigned int loops;
+ int ret;
+
+ if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
+ (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
+ return 0;
+
+ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+ tw32_f(MAC_MI_MODE,
+ (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
+ udelay(80);
+ }
+
+ frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
+ MI_COM_PHY_ADDR_MASK);
+ frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
+ MI_COM_REG_ADDR_MASK);
+ frame_val |= (val & MI_COM_DATA_MASK);
+ frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
+
+ tw32_f(MAC_MI_COM, frame_val);
+
+ loops = PHY_BUSY_LOOPS;
+ while (loops != 0) {
+ udelay(10);
+ frame_val = tr32(MAC_MI_COM);
+ if ((frame_val & MI_COM_BUSY) == 0) {
+ udelay(5);
+ frame_val = tr32(MAC_MI_COM);
+ break;
+ }
+ loops -= 1;
+ }
+
+ ret = -EBUSY;
+ if (loops != 0)
+ ret = 0;
+
+ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+ tw32_f(MAC_MI_MODE, tp->mi_mode);
+ udelay(80);
+ }
+
+ return ret;
+}
+
+static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
+{
+ int err;
+
+ err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
+ if (err)
+ goto done;
+
+ err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
+ if (err)
+ goto done;
+
+ err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
+ MII_TG3_MMD_CTRL_DATA_NOINC | devad);
+ if (err)
+ goto done;
+
+ err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
+
+done:
+ return err;
+}
+
+static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
+{
+ int err;
+
+ err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
+ if (err)
+ goto done;
+
+ err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
+ if (err)
+ goto done;
+
+ err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
+ MII_TG3_MMD_CTRL_DATA_NOINC | devad);
+ if (err)
+ goto done;
+
+ err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
+
+done:
+ return err;
+}
+
+static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
+{
+ int err;
+
+ err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
+ if (!err)
+ err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
+
+ return err;
+}
+
+static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
+{
+ int err;
+
+ err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
+ if (!err)
+ err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
+
+ return err;
+}
+
+static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
+{
+ int err;
+
+ err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
+ (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
+ MII_TG3_AUXCTL_SHDWSEL_MISC);
+ if (!err)
+ err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
+
+ return err;
+}
+
+static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
+{
+ if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
+ set |= MII_TG3_AUXCTL_MISC_WREN;
+
+ return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
+}
+
+#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
+ tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
+ MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
+ MII_TG3_AUXCTL_ACTL_TX_6DB)
+
+#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
+ tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
+ MII_TG3_AUXCTL_ACTL_TX_6DB);
+
+static int tg3_bmcr_reset(struct tg3 *tp)
+{
+ u32 phy_control;
+ int limit, err;
+
+ /* OK, reset it, and poll the BMCR_RESET bit until it
+ * clears or we time out.
+ */
+ phy_control = BMCR_RESET;
+ err = tg3_writephy(tp, MII_BMCR, phy_control);
+ if (err != 0)
+ return -EBUSY;
+
+ limit = 5000;
+ while (limit--) {
+ err = tg3_readphy(tp, MII_BMCR, &phy_control);
+ if (err != 0)
+ return -EBUSY;
+
+ if ((phy_control & BMCR_RESET) == 0) {
+ udelay(40);
+ break;
+ }
+ udelay(10);
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
+{
+ struct tg3 *tp = bp->priv;
+ u32 val;
+
+ spin_lock_bh(&tp->lock);
+
+ if (tg3_readphy(tp, reg, &val))
+ val = -EIO;
+
+ spin_unlock_bh(&tp->lock);
+
+ return val;
+}
+
+static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
+{
+ struct tg3 *tp = bp->priv;
+ u32 ret = 0;
+
+ spin_lock_bh(&tp->lock);
+
+ if (tg3_writephy(tp, reg, val))
+ ret = -EIO;
+
+ spin_unlock_bh(&tp->lock);
+
+ return ret;
+}
+
+static int tg3_mdio_reset(struct mii_bus *bp)
+{
+ return 0;
+}
+
+static void tg3_mdio_config_5785(struct tg3 *tp)
+{
+ u32 val;
+ struct phy_device *phydev;
+
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
+ case PHY_ID_BCM50610:
+ case PHY_ID_BCM50610M:
+ val = MAC_PHYCFG2_50610_LED_MODES;
+ break;
+ case PHY_ID_BCMAC131:
+ val = MAC_PHYCFG2_AC131_LED_MODES;
+ break;
+ case PHY_ID_RTL8211C:
+ val = MAC_PHYCFG2_RTL8211C_LED_MODES;
+ break;
+ case PHY_ID_RTL8201E:
+ val = MAC_PHYCFG2_RTL8201E_LED_MODES;
+ break;
+ default:
+ return;
+ }
+
+ if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
+ tw32(MAC_PHYCFG2, val);
+
+ val = tr32(MAC_PHYCFG1);
+ val &= ~(MAC_PHYCFG1_RGMII_INT |
+ MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
+ val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
+ tw32(MAC_PHYCFG1, val);
+
+ return;
+ }
+
+ if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
+ val |= MAC_PHYCFG2_EMODE_MASK_MASK |
+ MAC_PHYCFG2_FMODE_MASK_MASK |
+ MAC_PHYCFG2_GMODE_MASK_MASK |
+ MAC_PHYCFG2_ACT_MASK_MASK |
+ MAC_PHYCFG2_QUAL_MASK_MASK |
+ MAC_PHYCFG2_INBAND_ENABLE;
+
+ tw32(MAC_PHYCFG2, val);
+
+ val = tr32(MAC_PHYCFG1);
+ val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
+ MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
+ if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
+ if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
+ val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
+ if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
+ val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
+ }
+ val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
+ MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
+ tw32(MAC_PHYCFG1, val);
+
+ val = tr32(MAC_EXT_RGMII_MODE);
+ val &= ~(MAC_RGMII_MODE_RX_INT_B |
+ MAC_RGMII_MODE_RX_QUALITY |
+ MAC_RGMII_MODE_RX_ACTIVITY |
+ MAC_RGMII_MODE_RX_ENG_DET |
+ MAC_RGMII_MODE_TX_ENABLE |
+ MAC_RGMII_MODE_TX_LOWPWR |
+ MAC_RGMII_MODE_TX_RESET);
+ if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
+ if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
+ val |= MAC_RGMII_MODE_RX_INT_B |
+ MAC_RGMII_MODE_RX_QUALITY |
+ MAC_RGMII_MODE_RX_ACTIVITY |
+ MAC_RGMII_MODE_RX_ENG_DET;
+ if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
+ val |= MAC_RGMII_MODE_TX_ENABLE |
+ MAC_RGMII_MODE_TX_LOWPWR |
+ MAC_RGMII_MODE_TX_RESET;
+ }
+ tw32(MAC_EXT_RGMII_MODE, val);
+}
+
+static void tg3_mdio_start(struct tg3 *tp)
+{
+ tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
+ tw32_f(MAC_MI_MODE, tp->mi_mode);
+ udelay(80);
+
+ if (tg3_flag(tp, MDIOBUS_INITED) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ tg3_mdio_config_5785(tp);
+}
+
+static int tg3_mdio_init(struct tg3 *tp)
+{
+ int i;
+ u32 reg;
+ struct phy_device *phydev;
+
+ if (tg3_flag(tp, 5717_PLUS)) {
+ u32 is_serdes;
+
+ tp->phy_addr = tp->pci_fn + 1;
+
+ if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
+ is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
+ else
+ is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
+ TG3_CPMU_PHY_STRAP_IS_SERDES;
+ if (is_serdes)
+ tp->phy_addr += 7;
+ } else
+ tp->phy_addr = TG3_PHY_MII_ADDR;
+
+ tg3_mdio_start(tp);
+
+ if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
+ return 0;
+
+ tp->mdio_bus = mdiobus_alloc();
+ if (tp->mdio_bus == NULL)
+ return -ENOMEM;
+
+ tp->mdio_bus->name = "tg3 mdio bus";
+ snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
+ (tp->pdev->bus->number << 8) | tp->pdev->devfn);
+ tp->mdio_bus->priv = tp;
+ tp->mdio_bus->parent = &tp->pdev->dev;
+ tp->mdio_bus->read = &tg3_mdio_read;
+ tp->mdio_bus->write = &tg3_mdio_write;
+ tp->mdio_bus->reset = &tg3_mdio_reset;
+ tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
+ tp->mdio_bus->irq = &tp->mdio_irq[0];
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ tp->mdio_bus->irq[i] = PHY_POLL;
+
+ /* The bus registration will look for all the PHYs on the mdio bus.
+ * Unfortunately, it does not ensure the PHY is powered up before
+ * accessing the PHY ID registers. A chip reset is the
+ * quickest way to bring the device back to an operational state..
+ */
+ if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
+ tg3_bmcr_reset(tp);
+
+ i = mdiobus_register(tp->mdio_bus);
+ if (i) {
+ dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
+ mdiobus_free(tp->mdio_bus);
+ return i;
+ }
+
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+
+ if (!phydev || !phydev->drv) {
+ dev_warn(&tp->pdev->dev, "No PHY devices\n");
+ mdiobus_unregister(tp->mdio_bus);
+ mdiobus_free(tp->mdio_bus);
+ return -ENODEV;
+ }
+
+ switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
+ case PHY_ID_BCM57780:
+ phydev->interface = PHY_INTERFACE_MODE_GMII;
+ phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
+ break;
+ case PHY_ID_BCM50610:
+ case PHY_ID_BCM50610M:
+ phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
+ PHY_BRCM_RX_REFCLK_UNUSED |
+ PHY_BRCM_DIS_TXCRXC_NOENRGY |
+ PHY_BRCM_AUTO_PWRDWN_ENABLE;
+ if (tg3_flag(tp, RGMII_INBAND_DISABLE))
+ phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
+ if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
+ phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
+ if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
+ phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
+ /* fallthru */
+ case PHY_ID_RTL8211C:
+ phydev->interface = PHY_INTERFACE_MODE_RGMII;
+ break;
+ case PHY_ID_RTL8201E:
+ case PHY_ID_BCMAC131:
+ phydev->interface = PHY_INTERFACE_MODE_MII;
+ phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
+ tp->phy_flags |= TG3_PHYFLG_IS_FET;
+ break;
+ }
+
+ tg3_flag_set(tp, MDIOBUS_INITED);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ tg3_mdio_config_5785(tp);
+
+ return 0;
+}
+
+static void tg3_mdio_fini(struct tg3 *tp)
+{
+ if (tg3_flag(tp, MDIOBUS_INITED)) {
+ tg3_flag_clear(tp, MDIOBUS_INITED);
+ mdiobus_unregister(tp->mdio_bus);
+ mdiobus_free(tp->mdio_bus);
+ }
+}
+
+/* tp->lock is held. */
+static inline void tg3_generate_fw_event(struct tg3 *tp)
+{
+ u32 val;
+
+ val = tr32(GRC_RX_CPU_EVENT);
+ val |= GRC_RX_CPU_DRIVER_EVENT;
+ tw32_f(GRC_RX_CPU_EVENT, val);
+
+ tp->last_event_jiffies = jiffies;
+}
+
+#define TG3_FW_EVENT_TIMEOUT_USEC 2500
+
+/* tp->lock is held. */
+static void tg3_wait_for_event_ack(struct tg3 *tp)
+{
+ int i;
+ unsigned int delay_cnt;
+ long time_remain;
+
+ /* If enough time has passed, no wait is necessary. */
+ time_remain = (long)(tp->last_event_jiffies + 1 +
+ usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
+ (long)jiffies;
+ if (time_remain < 0)
+ return;
+
+ /* Check if we can shorten the wait time. */
+ delay_cnt = jiffies_to_usecs(time_remain);
+ if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
+ delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
+ delay_cnt = (delay_cnt >> 3) + 1;
+
+ for (i = 0; i < delay_cnt; i++) {
+ if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
+ break;
+ udelay(8);
+ }
+}
+
+/* tp->lock is held. */
+static void tg3_ump_link_report(struct tg3 *tp)
+{
+ u32 reg;
+ u32 val;
+
+ if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
+ return;
+
+ tg3_wait_for_event_ack(tp);
+
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
+
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
+
+ val = 0;
+ if (!tg3_readphy(tp, MII_BMCR, &reg))
+ val = reg << 16;
+ if (!tg3_readphy(tp, MII_BMSR, &reg))
+ val |= (reg & 0xffff);
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
+
+ val = 0;
+ if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
+ val = reg << 16;
+ if (!tg3_readphy(tp, MII_LPA, &reg))
+ val |= (reg & 0xffff);
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
+
+ val = 0;
+ if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
+ if (!tg3_readphy(tp, MII_CTRL1000, &reg))
+ val = reg << 16;
+ if (!tg3_readphy(tp, MII_STAT1000, &reg))
+ val |= (reg & 0xffff);
+ }
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
+
+ if (!tg3_readphy(tp, MII_PHYADDR, &reg))
+ val = reg << 16;
+ else
+ val = 0;
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
+
+ tg3_generate_fw_event(tp);
+}
+
+/* tp->lock is held. */
+static void tg3_stop_fw(struct tg3 *tp)
+{
+ if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
+ /* Wait for RX cpu to ACK the previous event. */
+ tg3_wait_for_event_ack(tp);
+
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
+
+ tg3_generate_fw_event(tp);
+
+ /* Wait for RX cpu to ACK this event. */
+ tg3_wait_for_event_ack(tp);
+ }
+}
+
+/* tp->lock is held. */
+static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
+{
+ tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
+ NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
+
+ if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
+ switch (kind) {
+ case RESET_KIND_INIT:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_START);
+ break;
+
+ case RESET_KIND_SHUTDOWN:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_UNLOAD);
+ break;
+
+ case RESET_KIND_SUSPEND:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_SUSPEND);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (kind == RESET_KIND_INIT ||
+ kind == RESET_KIND_SUSPEND)
+ tg3_ape_driver_state_change(tp, kind);
+}
+
+/* tp->lock is held. */
+static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
+{
+ if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
+ switch (kind) {
+ case RESET_KIND_INIT:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_START_DONE);
+ break;
+
+ case RESET_KIND_SHUTDOWN:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_UNLOAD_DONE);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (kind == RESET_KIND_SHUTDOWN)
+ tg3_ape_driver_state_change(tp, kind);
+}
+
+/* tp->lock is held. */
+static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
+{
+ if (tg3_flag(tp, ENABLE_ASF)) {
+ switch (kind) {
+ case RESET_KIND_INIT:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_START);
+ break;
+
+ case RESET_KIND_SHUTDOWN:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_UNLOAD);
+ break;
+
+ case RESET_KIND_SUSPEND:
+ tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+ DRV_STATE_SUSPEND);
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+static int tg3_poll_fw(struct tg3 *tp)
+{
+ int i;
+ u32 val;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ /* Wait up to 20ms for init done. */
+ for (i = 0; i < 200; i++) {
+ if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
+ return 0;
+ udelay(100);
+ }
+ return -ENODEV;
+ }
+
+ /* Wait for firmware initialization to complete. */
+ for (i = 0; i < 100000; i++) {
+ tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
+ if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
+ break;
+ udelay(10);
+ }
+
+ /* Chip might not be fitted with firmware. Some Sun onboard
+ * parts are configured like that. So don't signal the timeout
+ * of the above loop as an error, but do report the lack of
+ * running firmware once.
+ */
+ if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
+ tg3_flag_set(tp, NO_FWARE_REPORTED);
+
+ netdev_info(tp->dev, "No firmware running\n");
+ }
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ /* The 57765 A0 needs a little more
+ * time to do some important work.
+ */
+ mdelay(10);
+ }
+
+ return 0;
+}
+
+static void tg3_link_report(struct tg3 *tp)
+{
+ if (!netif_carrier_ok(tp->dev)) {
+ netif_info(tp, link, tp->dev, "Link is down\n");
+ tg3_ump_link_report(tp);
+ } else if (netif_msg_link(tp)) {
+ netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
+ (tp->link_config.active_speed == SPEED_1000 ?
+ 1000 :
+ (tp->link_config.active_speed == SPEED_100 ?
+ 100 : 10)),
+ (tp->link_config.active_duplex == DUPLEX_FULL ?
+ "full" : "half"));
+
+ netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
+ (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
+ "on" : "off",
+ (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
+ "on" : "off");
+
+ if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
+ netdev_info(tp->dev, "EEE is %s\n",
+ tp->setlpicnt ? "enabled" : "disabled");
+
+ tg3_ump_link_report(tp);
+ }
+}
+
+static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
+{
+ u16 miireg;
+
+ if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
+ miireg = ADVERTISE_PAUSE_CAP;
+ else if (flow_ctrl & FLOW_CTRL_TX)
+ miireg = ADVERTISE_PAUSE_ASYM;
+ else if (flow_ctrl & FLOW_CTRL_RX)
+ miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ else
+ miireg = 0;
+
+ return miireg;
+}
+
+static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
+{
+ u16 miireg;
+
+ if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
+ miireg = ADVERTISE_1000XPAUSE;
+ else if (flow_ctrl & FLOW_CTRL_TX)
+ miireg = ADVERTISE_1000XPSE_ASYM;
+ else if (flow_ctrl & FLOW_CTRL_RX)
+ miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
+ else
+ miireg = 0;
+
+ return miireg;
+}
+
+static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
+{
+ u8 cap = 0;
+
+ if (lcladv & ADVERTISE_1000XPAUSE) {
+ if (lcladv & ADVERTISE_1000XPSE_ASYM) {
+ if (rmtadv & LPA_1000XPAUSE)
+ cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
+ else if (rmtadv & LPA_1000XPAUSE_ASYM)
+ cap = FLOW_CTRL_RX;
+ } else {
+ if (rmtadv & LPA_1000XPAUSE)
+ cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
+ }
+ } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
+ if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
+ cap = FLOW_CTRL_TX;
+ }
+
+ return cap;
+}
+
+static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
+{
+ u8 autoneg;
+ u8 flowctrl = 0;
+ u32 old_rx_mode = tp->rx_mode;
+ u32 old_tx_mode = tp->tx_mode;
+
+ if (tg3_flag(tp, USE_PHYLIB))
+ autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
+ else
+ autoneg = tp->link_config.autoneg;
+
+ if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
+ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
+ flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
+ else
+ flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+ } else
+ flowctrl = tp->link_config.flowctrl;
+
+ tp->link_config.active_flowctrl = flowctrl;
+
+ if (flowctrl & FLOW_CTRL_RX)
+ tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
+ else
+ tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
+
+ if (old_rx_mode != tp->rx_mode)
+ tw32_f(MAC_RX_MODE, tp->rx_mode);
+
+ if (flowctrl & FLOW_CTRL_TX)
+ tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
+ else
+ tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
+
+ if (old_tx_mode != tp->tx_mode)
+ tw32_f(MAC_TX_MODE, tp->tx_mode);
+}
+
+static void tg3_adjust_link(struct net_device *dev)
+{
+ u8 oldflowctrl, linkmesg = 0;
+ u32 mac_mode, lcl_adv, rmt_adv;
+ struct tg3 *tp = netdev_priv(dev);
+ struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+
+ spin_lock_bh(&tp->lock);
+
+ mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
+ MAC_MODE_HALF_DUPLEX);
+
+ oldflowctrl = tp->link_config.active_flowctrl;
+
+ if (phydev->link) {
+ lcl_adv = 0;
+ rmt_adv = 0;
+
+ if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
+ mac_mode |= MAC_MODE_PORT_MODE_MII;
+ else if (phydev->speed == SPEED_1000 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
+ mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ else
+ mac_mode |= MAC_MODE_PORT_MODE_MII;
+
+ if (phydev->duplex == DUPLEX_HALF)
+ mac_mode |= MAC_MODE_HALF_DUPLEX;
+ else {
+ lcl_adv = tg3_advert_flowctrl_1000T(
+ tp->link_config.flowctrl);
+
+ if (phydev->pause)
+ rmt_adv = LPA_PAUSE_CAP;
+ if (phydev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+ }
+
+ tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
+ } else
+ mac_mode |= MAC_MODE_PORT_MODE_GMII;
+
+ if (mac_mode != tp->mac_mode) {
+ tp->mac_mode = mac_mode;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ if (phydev->speed == SPEED_10)
+ tw32(MAC_MI_STAT,
+ MAC_MI_STAT_10MBPS_MODE |
+ MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
+ else
+ tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
+ }
+
+ if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
+ tw32(MAC_TX_LENGTHS,
+ ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+ (6 << TX_LENGTHS_IPG_SHIFT) |
+ (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
+ else
+ tw32(MAC_TX_LENGTHS,
+ ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+ (6 << TX_LENGTHS_IPG_SHIFT) |
+ (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
+
+ if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
+ (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
+ phydev->speed != tp->link_config.active_speed ||
+ phydev->duplex != tp->link_config.active_duplex ||
+ oldflowctrl != tp->link_config.active_flowctrl)
+ linkmesg = 1;
+
+ tp->link_config.active_speed = phydev->speed;
+ tp->link_config.active_duplex = phydev->duplex;
+
+ spin_unlock_bh(&tp->lock);
+
+ if (linkmesg)
+ tg3_link_report(tp);
+}
+
+static int tg3_phy_init(struct tg3 *tp)
+{
+ struct phy_device *phydev;
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
+ return 0;
+
+ /* Bring the PHY back to a known state. */
+ tg3_bmcr_reset(tp);
+
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+
+ /* Attach the MAC to the PHY. */
+ phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
+ phydev->dev_flags, phydev->interface);
+ if (IS_ERR(phydev)) {
+ dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
+ return PTR_ERR(phydev);
+ }
+
+ /* Mask with MAC supported features. */
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_GMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+ phydev->supported &= (PHY_GBIT_FEATURES |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ break;
+ }
+ /* fallthru */
+ case PHY_INTERFACE_MODE_MII:
+ phydev->supported &= (PHY_BASIC_FEATURES |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ break;
+ default:
+ phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+ return -EINVAL;
+ }
+
+ tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
+
+ phydev->advertising = phydev->supported;
+
+ return 0;
+}
+
+static void tg3_phy_start(struct tg3 *tp)
+{
+ struct phy_device *phydev;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+ return;
+
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
+ tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
+ phydev->speed = tp->link_config.orig_speed;
+ phydev->duplex = tp->link_config.orig_duplex;
+ phydev->autoneg = tp->link_config.orig_autoneg;
+ phydev->advertising = tp->link_config.orig_advertising;
+ }
+
+ phy_start(phydev);
+
+ phy_start_aneg(phydev);
+}
+
+static void tg3_phy_stop(struct tg3 *tp)
+{
+ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+ return;
+
+ phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+}
+
+static void tg3_phy_fini(struct tg3 *tp)
+{
+ if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
+ phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+ tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
+ }
+}
+
+static int tg3_phy_set_extloopbk(struct tg3 *tp)
+{
+ int err;
+ u32 val;
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_FET)
+ return 0;
+
+ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
+ /* Cannot do read-modify-write on 5401 */
+ err = tg3_phy_auxctl_write(tp,
+ MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
+ MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
+ 0x4c20);
+ goto done;
+ }
+
+ err = tg3_phy_auxctl_read(tp,
+ MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
+ if (err)
+ return err;
+
+ val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
+ err = tg3_phy_auxctl_write(tp,
+ MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
+
+done:
+ return err;
+}
+
+static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
+{
+ u32 phytest;
+
+ if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
+ u32 phy;
+
+ tg3_writephy(tp, MII_TG3_FET_TEST,
+ phytest | MII_TG3_FET_SHADOW_EN);
+ if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
+ if (enable)
+ phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
+ else
+ phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
+ tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
+ }
+ tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
+ }
+}
+
+static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
+{
+ u32 reg;
+
+ if (!tg3_flag(tp, 5705_PLUS) ||
+ (tg3_flag(tp, 5717_PLUS) &&
+ (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
+ return;
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+ tg3_phy_fet_toggle_apd(tp, enable);
+ return;
+ }
+
+ reg = MII_TG3_MISC_SHDW_WREN |
+ MII_TG3_MISC_SHDW_SCR5_SEL |
+ MII_TG3_MISC_SHDW_SCR5_LPED |
+ MII_TG3_MISC_SHDW_SCR5_DLPTLM |
+ MII_TG3_MISC_SHDW_SCR5_SDTL |
+ MII_TG3_MISC_SHDW_SCR5_C125OE;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
+ reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
+
+ tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
+
+
+ reg = MII_TG3_MISC_SHDW_WREN |
+ MII_TG3_MISC_SHDW_APD_SEL |
+ MII_TG3_MISC_SHDW_APD_WKTM_84MS;
+ if (enable)
+ reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
+
+ tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
+}
+
+static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
+{
+ u32 phy;
+
+ if (!tg3_flag(tp, 5705_PLUS) ||
+ (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+ return;
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+ u32 ephy;
+
+ if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
+ u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
+
+ tg3_writephy(tp, MII_TG3_FET_TEST,
+ ephy | MII_TG3_FET_SHADOW_EN);
+ if (!tg3_readphy(tp, reg, &phy)) {
+ if (enable)
+ phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
+ else
+ phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
+ tg3_writephy(tp, reg, phy);
+ }
+ tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
+ }
+ } else {
+ int ret;
+
+ ret = tg3_phy_auxctl_read(tp,
+ MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
+ if (!ret) {
+ if (enable)
+ phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
+ else
+ phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
+ tg3_phy_auxctl_write(tp,
+ MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
+ }
+ }
+}
+
+static void tg3_phy_set_wirespeed(struct tg3 *tp)
+{
+ int ret;
+ u32 val;
+
+ if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
+ return;
+
+ ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
+ if (!ret)
+ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
+ val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
+}
+
+static void tg3_phy_apply_otp(struct tg3 *tp)
+{
+ u32 otp, phy;
+
+ if (!tp->phy_otp)
+ return;
+
+ otp = tp->phy_otp;
+
+ if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
+ return;
+
+ phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
+ phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
+
+ phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
+ ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
+ tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
+
+ phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
+ phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
+ tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
+
+ phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
+ tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
+
+ phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
+ tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
+
+ phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
+ ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
+ tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
+
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+}
+
+static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
+{
+ u32 val;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
+ return;
+
+ tp->setlpicnt = 0;
+
+ if (tp->link_config.autoneg == AUTONEG_ENABLE &&
+ current_link_up == 1 &&
+ tp->link_config.active_duplex == DUPLEX_FULL &&
+ (tp->link_config.active_speed == SPEED_100 ||
+ tp->link_config.active_speed == SPEED_1000)) {
+ u32 eeectl;
+
+ if (tp->link_config.active_speed == SPEED_1000)
+ eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
+ else
+ eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
+
+ tw32(TG3_CPMU_EEE_CTRL, eeectl);
+
+ tg3_phy_cl45_read(tp, MDIO_MMD_AN,
+ TG3_CL45_D7_EEERES_STAT, &val);
+
+ if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
+ val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
+ tp->setlpicnt = 2;
+ }
+
+ if (!tp->setlpicnt) {
+ if (current_link_up == 1 &&
+ !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ }
+
+ val = tr32(TG3_CPMU_EEE_MODE);
+ tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
+ }
+}
+
+static void tg3_phy_eee_enable(struct tg3 *tp)
+{
+ u32 val;
+
+ if (tp->link_config.active_speed == SPEED_1000 &&
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
+ !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ val = MII_TG3_DSP_TAP26_ALNOKO |
+ MII_TG3_DSP_TAP26_RMRXSTO;
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ }
+
+ val = tr32(TG3_CPMU_EEE_MODE);
+ tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
+}
+
+static int tg3_wait_macro_done(struct tg3 *tp)
+{
+ int limit = 100;
+
+ while (limit--) {
+ u32 tmp32;
+
+ if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
+ if ((tmp32 & 0x1000) == 0)
+ break;
+ }
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
+{
+ static const u32 test_pat[4][6] = {
+ { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
+ { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
+ { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
+ { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
+ };
+ int chan;
+
+ for (chan = 0; chan < 4; chan++) {
+ int i;
+
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+ (chan * 0x2000) | 0x0200);
+ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
+
+ for (i = 0; i < 6; i++)
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
+ test_pat[chan][i]);
+
+ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
+ if (tg3_wait_macro_done(tp)) {
+ *resetp = 1;
+ return -EBUSY;
+ }
+
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+ (chan * 0x2000) | 0x0200);
+ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
+ if (tg3_wait_macro_done(tp)) {
+ *resetp = 1;
+ return -EBUSY;
+ }
+
+ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
+ if (tg3_wait_macro_done(tp)) {
+ *resetp = 1;
+ return -EBUSY;
+ }
+
+ for (i = 0; i < 6; i += 2) {
+ u32 low, high;
+
+ if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
+ tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
+ tg3_wait_macro_done(tp)) {
+ *resetp = 1;
+ return -EBUSY;
+ }
+ low &= 0x7fff;
+ high &= 0x000f;
+ if (low != test_pat[chan][i] ||
+ high != test_pat[chan][i+1]) {
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
+
+ return -EBUSY;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int tg3_phy_reset_chanpat(struct tg3 *tp)
+{
+ int chan;
+
+ for (chan = 0; chan < 4; chan++) {
+ int i;
+
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+ (chan * 0x2000) | 0x0200);
+ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
+ for (i = 0; i < 6; i++)
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
+ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
+ if (tg3_wait_macro_done(tp))
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
+{
+ u32 reg32, phy9_orig;
+ int retries, do_phy_reset, err;
+
+ retries = 10;
+ do_phy_reset = 1;
+ do {
+ if (do_phy_reset) {
+ err = tg3_bmcr_reset(tp);
+ if (err)
+ return err;
+ do_phy_reset = 0;
+ }
+
+ /* Disable transmitter and interrupt. */
+ if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
+ continue;
+
+ reg32 |= 0x3000;
+ tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
+
+ /* Set full-duplex, 1000 mbps. */
+ tg3_writephy(tp, MII_BMCR,
+ BMCR_FULLDPLX | BMCR_SPEED1000);
+
+ /* Set to master mode. */
+ if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
+ continue;
+
+ tg3_writephy(tp, MII_CTRL1000,
+ CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
+
+ err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
+ if (err)
+ return err;
+
+ /* Block the PHY control access. */
+ tg3_phydsp_write(tp, 0x8005, 0x0800);
+
+ err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
+ if (!err)
+ break;
+ } while (--retries);
+
+ err = tg3_phy_reset_chanpat(tp);
+ if (err)
+ return err;
+
+ tg3_phydsp_write(tp, 0x8005, 0x0000);
+
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
+ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
+
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+
+ tg3_writephy(tp, MII_CTRL1000, phy9_orig);
+
+ if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
+ reg32 &= ~0x3000;
+ tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
+ } else if (!err)
+ err = -EBUSY;
+
+ return err;
+}
+
+/* This will reset the tigon3 PHY if there is no valid
+ * link unless the FORCE argument is non-zero.
+ */
+static int tg3_phy_reset(struct tg3 *tp)
+{
+ u32 val, cpmuctrl;
+ int err;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ val = tr32(GRC_MISC_CFG);
+ tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
+ udelay(40);
+ }
+ err = tg3_readphy(tp, MII_BMSR, &val);
+ err |= tg3_readphy(tp, MII_BMSR, &val);
+ if (err != 0)
+ return -EBUSY;
+
+ if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
+ netif_carrier_off(tp->dev);
+ tg3_link_report(tp);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ err = tg3_phy_reset_5703_4_5(tp);
+ if (err)
+ return err;
+ goto out;
+ }
+
+ cpmuctrl = 0;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
+ cpmuctrl = tr32(TG3_CPMU_CTRL);
+ if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
+ tw32(TG3_CPMU_CTRL,
+ cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
+ }
+
+ err = tg3_bmcr_reset(tp);
+ if (err)
+ return err;
+
+ if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
+ val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
+ tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
+
+ tw32(TG3_CPMU_CTRL, cpmuctrl);
+ }
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
+ GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
+ val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
+ if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
+ CPMU_LSPD_1000MB_MACCLK_12_5) {
+ val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
+ udelay(40);
+ tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
+ }
+ }
+
+ if (tg3_flag(tp, 5717_PLUS) &&
+ (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
+ return 0;
+
+ tg3_phy_apply_otp(tp);
+
+ if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
+ tg3_phy_toggle_apd(tp, true);
+ else
+ tg3_phy_toggle_apd(tp, false);
+
+out:
+ if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
+ !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ tg3_phydsp_write(tp, 0x201f, 0x2aaa);
+ tg3_phydsp_write(tp, 0x000a, 0x0323);
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ }
+
+ if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
+ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
+ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
+ }
+
+ if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
+ if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ tg3_phydsp_write(tp, 0x000a, 0x310b);
+ tg3_phydsp_write(tp, 0x201f, 0x9506);
+ tg3_phydsp_write(tp, 0x401f, 0x14e2);
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ }
+ } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
+ if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
+ if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
+ tg3_writephy(tp, MII_TG3_TEST1,
+ MII_TG3_TEST1_TRIM_EN | 0x4);
+ } else
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
+
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ }
+ }
+
+ /* Set Extended packet length bit (bit 14) on all chips that */
+ /* support jumbo frames */
+ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
+ /* Cannot do read-modify-write on 5401 */
+ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
+ } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
+ /* Set bit 14 with read-modify-write to preserve other bits */
+ err = tg3_phy_auxctl_read(tp,
+ MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
+ if (!err)
+ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
+ val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
+ }
+
+ /* Set phy register 0x10 bit 0 to high fifo elasticity to support
+ * jumbo frames transmission.
+ */
+ if (tg3_flag(tp, JUMBO_CAPABLE)) {
+ if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
+ tg3_writephy(tp, MII_TG3_EXT_CTRL,
+ val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ /* adjust output voltage */
+ tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
+ }
+
+ tg3_phy_toggle_automdix(tp, 1);
+ tg3_phy_set_wirespeed(tp);
+ return 0;
+}
+
+#define TG3_GPIO_MSG_DRVR_PRES 0x00000001
+#define TG3_GPIO_MSG_NEED_VAUX 0x00000002
+#define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
+ TG3_GPIO_MSG_NEED_VAUX)
+#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
+ ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
+ (TG3_GPIO_MSG_DRVR_PRES << 4) | \
+ (TG3_GPIO_MSG_DRVR_PRES << 8) | \
+ (TG3_GPIO_MSG_DRVR_PRES << 12))
+
+#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
+ ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
+ (TG3_GPIO_MSG_NEED_VAUX << 4) | \
+ (TG3_GPIO_MSG_NEED_VAUX << 8) | \
+ (TG3_GPIO_MSG_NEED_VAUX << 12))
+
+static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
+{
+ u32 status, shift;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
+ else
+ status = tr32(TG3_CPMU_DRV_STATUS);
+
+ shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
+ status &= ~(TG3_GPIO_MSG_MASK << shift);
+ status |= (newstat << shift);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
+ else
+ tw32(TG3_CPMU_DRV_STATUS, status);
+
+ return status >> TG3_APE_GPIO_MSG_SHIFT;
+}
+
+static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
+{
+ if (!tg3_flag(tp, IS_NIC))
+ return 0;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
+ return -EIO;
+
+ tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
+
+ tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+ tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
+ } else {
+ tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+ }
+
+ return 0;
+}
+
+static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
+{
+ u32 grc_local_ctrl;
+
+ if (!tg3_flag(tp, IS_NIC) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
+ return;
+
+ grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
+
+ tw32_wait_f(GRC_LOCAL_CTRL,
+ grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+ tw32_wait_f(GRC_LOCAL_CTRL,
+ grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+ tw32_wait_f(GRC_LOCAL_CTRL,
+ grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+}
+
+static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
+{
+ if (!tg3_flag(tp, IS_NIC))
+ return;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+ (GRC_LCLCTRL_GPIO_OE0 |
+ GRC_LCLCTRL_GPIO_OE1 |
+ GRC_LCLCTRL_GPIO_OE2 |
+ GRC_LCLCTRL_GPIO_OUTPUT0 |
+ GRC_LCLCTRL_GPIO_OUTPUT1),
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+ } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
+ /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
+ u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
+ GRC_LCLCTRL_GPIO_OE1 |
+ GRC_LCLCTRL_GPIO_OE2 |
+ GRC_LCLCTRL_GPIO_OUTPUT0 |
+ GRC_LCLCTRL_GPIO_OUTPUT1 |
+ tp->grc_local_ctrl;
+ tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
+ tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+ grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
+ tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+ } else {
+ u32 no_gpio2;
+ u32 grc_local_ctrl = 0;
+
+ /* Workaround to prevent overdrawing Amps. */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
+ tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+ grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+ }
+
+ /* On 5753 and variants, GPIO2 cannot be used. */
+ no_gpio2 = tp->nic_sram_data_cfg &
+ NIC_SRAM_DATA_CFG_NO_GPIO2;
+
+ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
+ GRC_LCLCTRL_GPIO_OE1 |
+ GRC_LCLCTRL_GPIO_OE2 |
+ GRC_LCLCTRL_GPIO_OUTPUT1 |
+ GRC_LCLCTRL_GPIO_OUTPUT2;
+ if (no_gpio2) {
+ grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
+ GRC_LCLCTRL_GPIO_OUTPUT2);
+ }
+ tw32_wait_f(GRC_LOCAL_CTRL,
+ tp->grc_local_ctrl | grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
+
+ tw32_wait_f(GRC_LOCAL_CTRL,
+ tp->grc_local_ctrl | grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+ if (!no_gpio2) {
+ grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
+ tw32_wait_f(GRC_LOCAL_CTRL,
+ tp->grc_local_ctrl | grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+ }
+ }
+}
+
+static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
+{
+ u32 msg = 0;
+
+ /* Serialize power state transitions */
+ if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
+ return;
+
+ if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
+ msg = TG3_GPIO_MSG_NEED_VAUX;
+
+ msg = tg3_set_function_status(tp, msg);
+
+ if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
+ goto done;
+
+ if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
+ tg3_pwrsrc_switch_to_vaux(tp);
+ else
+ tg3_pwrsrc_die_with_vmain(tp);
+
+done:
+ tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
+}
+
+static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
+{
+ bool need_vaux = false;
+
+ /* The GPIOs do something completely different on 57765. */
+ if (!tg3_flag(tp, IS_NIC) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ return;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ tg3_frob_aux_power_5717(tp, include_wol ?
+ tg3_flag(tp, WOL_ENABLE) != 0 : 0);
+ return;
+ }
+
+ if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
+ struct net_device *dev_peer;
+
+ dev_peer = pci_get_drvdata(tp->pdev_peer);
+
+ /* remove_one() may have been run on the peer. */
+ if (dev_peer) {
+ struct tg3 *tp_peer = netdev_priv(dev_peer);
+
+ if (tg3_flag(tp_peer, INIT_COMPLETE))
+ return;
+
+ if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
+ tg3_flag(tp_peer, ENABLE_ASF))
+ need_vaux = true;
+ }
+ }
+
+ if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
+ tg3_flag(tp, ENABLE_ASF))
+ need_vaux = true;
+
+ if (need_vaux)
+ tg3_pwrsrc_switch_to_vaux(tp);
+ else
+ tg3_pwrsrc_die_with_vmain(tp);
+}
+
+static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
+{
+ if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
+ return 1;
+ else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
+ if (speed != SPEED_10)
+ return 1;
+ } else if (speed == SPEED_10)
+ return 1;
+
+ return 0;
+}
+
+static int tg3_setup_phy(struct tg3 *, int);
+static int tg3_halt_cpu(struct tg3 *, u32);
+
+static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
+{
+ u32 val;
+
+ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
+ u32 serdes_cfg = tr32(MAC_SERDES_CFG);
+
+ sg_dig_ctrl |=
+ SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
+ tw32(SG_DIG_CTRL, sg_dig_ctrl);
+ tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
+ }
+ return;
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ tg3_bmcr_reset(tp);
+ val = tr32(GRC_MISC_CFG);
+ tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
+ udelay(40);
+ return;
+ } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+ u32 phytest;
+ if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
+ u32 phy;
+
+ tg3_writephy(tp, MII_ADVERTISE, 0);
+ tg3_writephy(tp, MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+
+ tg3_writephy(tp, MII_TG3_FET_TEST,
+ phytest | MII_TG3_FET_SHADOW_EN);
+ if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
+ phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
+ tg3_writephy(tp,
+ MII_TG3_FET_SHDW_AUXMODE4,
+ phy);
+ }
+ tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
+ }
+ return;
+ } else if (do_low_power) {
+ tg3_writephy(tp, MII_TG3_EXT_CTRL,
+ MII_TG3_EXT_CTRL_FORCE_LED_OFF);
+
+ val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
+ MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
+ MII_TG3_AUXCTL_PCTL_VREG_11V;
+ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
+ }
+
+ /* The PHY should not be powered down on some chips because
+ * of bugs.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
+ (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
+ return;
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
+ GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
+ val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
+ val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
+ val |= CPMU_LSPD_1000MB_MACCLK_12_5;
+ tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
+ }
+
+ tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
+}
+
+/* tp->lock is held. */
+static int tg3_nvram_lock(struct tg3 *tp)
+{
+ if (tg3_flag(tp, NVRAM)) {
+ int i;
+
+ if (tp->nvram_lock_cnt == 0) {
+ tw32(NVRAM_SWARB, SWARB_REQ_SET1);
+ for (i = 0; i < 8000; i++) {
+ if (tr32(NVRAM_SWARB) & SWARB_GNT1)
+ break;
+ udelay(20);
+ }
+ if (i == 8000) {
+ tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
+ return -ENODEV;
+ }
+ }
+ tp->nvram_lock_cnt++;
+ }
+ return 0;
+}
+
+/* tp->lock is held. */
+static void tg3_nvram_unlock(struct tg3 *tp)
+{
+ if (tg3_flag(tp, NVRAM)) {
+ if (tp->nvram_lock_cnt > 0)
+ tp->nvram_lock_cnt--;
+ if (tp->nvram_lock_cnt == 0)
+ tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
+ }
+}
+
+/* tp->lock is held. */
+static void tg3_enable_nvram_access(struct tg3 *tp)
+{
+ if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
+ u32 nvaccess = tr32(NVRAM_ACCESS);
+
+ tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
+ }
+}
+
+/* tp->lock is held. */
+static void tg3_disable_nvram_access(struct tg3 *tp)
+{
+ if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
+ u32 nvaccess = tr32(NVRAM_ACCESS);
+
+ tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
+ }
+}
+
+static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
+ u32 offset, u32 *val)
+{
+ u32 tmp;
+ int i;
+
+ if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
+ return -EINVAL;
+
+ tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
+ EEPROM_ADDR_DEVID_MASK |
+ EEPROM_ADDR_READ);
+ tw32(GRC_EEPROM_ADDR,
+ tmp |
+ (0 << EEPROM_ADDR_DEVID_SHIFT) |
+ ((offset << EEPROM_ADDR_ADDR_SHIFT) &
+ EEPROM_ADDR_ADDR_MASK) |
+ EEPROM_ADDR_READ | EEPROM_ADDR_START);
+
+ for (i = 0; i < 1000; i++) {
+ tmp = tr32(GRC_EEPROM_ADDR);
+
+ if (tmp & EEPROM_ADDR_COMPLETE)
+ break;
+ msleep(1);
+ }
+ if (!(tmp & EEPROM_ADDR_COMPLETE))
+ return -EBUSY;
+
+ tmp = tr32(GRC_EEPROM_DATA);
+
+ /*
+ * The data will always be opposite the native endian
+ * format. Perform a blind byteswap to compensate.
+ */
+ *val = swab32(tmp);
+
+ return 0;
+}
+
+#define NVRAM_CMD_TIMEOUT 10000
+
+static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
+{
+ int i;
+
+ tw32(NVRAM_CMD, nvram_cmd);
+ for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
+ udelay(10);
+ if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
+ udelay(10);
+ break;
+ }
+ }
+
+ if (i == NVRAM_CMD_TIMEOUT)
+ return -EBUSY;
+
+ return 0;
+}
+
+static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
+{
+ if (tg3_flag(tp, NVRAM) &&
+ tg3_flag(tp, NVRAM_BUFFERED) &&
+ tg3_flag(tp, FLASH) &&
+ !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
+ (tp->nvram_jedecnum == JEDEC_ATMEL))
+
+ addr = ((addr / tp->nvram_pagesize) <<
+ ATMEL_AT45DB0X1B_PAGE_POS) +
+ (addr % tp->nvram_pagesize);
+
+ return addr;
+}
+
+static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
+{
+ if (tg3_flag(tp, NVRAM) &&
+ tg3_flag(tp, NVRAM_BUFFERED) &&
+ tg3_flag(tp, FLASH) &&
+ !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
+ (tp->nvram_jedecnum == JEDEC_ATMEL))
+
+ addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
+ tp->nvram_pagesize) +
+ (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
+
+ return addr;
+}
+
+/* NOTE: Data read in from NVRAM is byteswapped according to
+ * the byteswapping settings for all other register accesses.
+ * tg3 devices are BE devices, so on a BE machine, the data
+ * returned will be exactly as it is seen in NVRAM. On a LE
+ * machine, the 32-bit value will be byteswapped.
+ */
+static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
+{
+ int ret;
+
+ if (!tg3_flag(tp, NVRAM))
+ return tg3_nvram_read_using_eeprom(tp, offset, val);
+
+ offset = tg3_nvram_phys_addr(tp, offset);
+
+ if (offset > NVRAM_ADDR_MSK)
+ return -EINVAL;
+
+ ret = tg3_nvram_lock(tp);
+ if (ret)
+ return ret;
+
+ tg3_enable_nvram_access(tp);
+
+ tw32(NVRAM_ADDR, offset);
+ ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
+ NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
+
+ if (ret == 0)
+ *val = tr32(NVRAM_RDDATA);
+
+ tg3_disable_nvram_access(tp);
+
+ tg3_nvram_unlock(tp);
+
+ return ret;
+}
+
+/* Ensures NVRAM data is in bytestream format. */
+static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
+{
+ u32 v;
+ int res = tg3_nvram_read(tp, offset, &v);
+ if (!res)
+ *val = cpu_to_be32(v);
+ return res;
+}
+
+#define RX_CPU_SCRATCH_BASE 0x30000
+#define RX_CPU_SCRATCH_SIZE 0x04000
+#define TX_CPU_SCRATCH_BASE 0x34000
+#define TX_CPU_SCRATCH_SIZE 0x04000
+
+/* tp->lock is held. */
+static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
+{
+ int i;
+
+ BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ u32 val = tr32(GRC_VCPU_EXT_CTRL);
+
+ tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
+ return 0;
+ }
+ if (offset == RX_CPU_BASE) {
+ for (i = 0; i < 10000; i++) {
+ tw32(offset + CPU_STATE, 0xffffffff);
+ tw32(offset + CPU_MODE, CPU_MODE_HALT);
+ if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
+ break;
+ }
+
+ tw32(offset + CPU_STATE, 0xffffffff);
+ tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
+ udelay(10);
+ } else {
+ for (i = 0; i < 10000; i++) {
+ tw32(offset + CPU_STATE, 0xffffffff);
+ tw32(offset + CPU_MODE, CPU_MODE_HALT);
+ if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
+ break;
+ }
+ }
+
+ if (i >= 10000) {
+ netdev_err(tp->dev, "%s timed out, %s CPU\n",
+ __func__, offset == RX_CPU_BASE ? "RX" : "TX");
+ return -ENODEV;
+ }
+
+ /* Clear firmware's nvram arbitration. */
+ if (tg3_flag(tp, NVRAM))
+ tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
+ return 0;
+}
+
+struct fw_info {
+ unsigned int fw_base;
+ unsigned int fw_len;
+ const __be32 *fw_data;
+};
+
+/* tp->lock is held. */
+static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
+ u32 cpu_scratch_base, int cpu_scratch_size,
+ struct fw_info *info)
+{
+ int err, lock_err, i;
+ void (*write_op)(struct tg3 *, u32, u32);
+
+ if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
+ netdev_err(tp->dev,
+ "%s: Trying to load TX cpu firmware which is 5705\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (tg3_flag(tp, 5705_PLUS))
+ write_op = tg3_write_mem;
+ else
+ write_op = tg3_write_indirect_reg32;
+
+ /* It is possible that bootcode is still loading at this point.
+ * Get the nvram lock first before halting the cpu.
+ */
+ lock_err = tg3_nvram_lock(tp);
+ err = tg3_halt_cpu(tp, cpu_base);
+ if (!lock_err)
+ tg3_nvram_unlock(tp);
+ if (err)
+ goto out;
+
+ for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
+ write_op(tp, cpu_scratch_base + i, 0);
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
+ for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
+ write_op(tp, (cpu_scratch_base +
+ (info->fw_base & 0xffff) +
+ (i * sizeof(u32))),
+ be32_to_cpu(info->fw_data[i]));
+
+ err = 0;
+
+out:
+ return err;
+}
+
+/* tp->lock is held. */
+static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
+{
+ struct fw_info info;
+ const __be32 *fw_data;
+ int err, i;
+
+ fw_data = (void *)tp->fw->data;
+
+ /* Firmware blob starts with version numbers, followed by
+ start address and length. We are setting complete length.
+ length = end_address_of_bss - start_address_of_text.
+ Remainder is the blob to be loaded contiguously
+ from start address. */
+
+ info.fw_base = be32_to_cpu(fw_data[1]);
+ info.fw_len = tp->fw->size - 12;
+ info.fw_data = &fw_data[3];
+
+ err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
+ RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
+ &info);
+ if (err)
+ return err;
+
+ err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
+ TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
+ &info);
+ if (err)
+ return err;
+
+ /* Now startup only the RX cpu. */
+ tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+ tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
+
+ for (i = 0; i < 5; i++) {
+ if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
+ break;
+ tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+ tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
+ tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
+ udelay(1000);
+ }
+ if (i >= 5) {
+ netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
+ "should be %08x\n", __func__,
+ tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
+ return -ENODEV;
+ }
+ tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+ tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
+
+ return 0;
+}
+
+/* tp->lock is held. */
+static int tg3_load_tso_firmware(struct tg3 *tp)
+{
+ struct fw_info info;
+ const __be32 *fw_data;
+ unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
+ int err, i;
+
+ if (tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3))
+ return 0;
+
+ fw_data = (void *)tp->fw->data;
+
+ /* Firmware blob starts with version numbers, followed by
+ start address and length. We are setting complete length.
+ length = end_address_of_bss - start_address_of_text.
+ Remainder is the blob to be loaded contiguously
+ from start address. */
+
+ info.fw_base = be32_to_cpu(fw_data[1]);
+ cpu_scratch_size = tp->fw_len;
+ info.fw_len = tp->fw->size - 12;
+ info.fw_data = &fw_data[3];
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ cpu_base = RX_CPU_BASE;
+ cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
+ } else {
+ cpu_base = TX_CPU_BASE;
+ cpu_scratch_base = TX_CPU_SCRATCH_BASE;
+ cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
+ }
+
+ err = tg3_load_firmware_cpu(tp, cpu_base,
+ cpu_scratch_base, cpu_scratch_size,
+ &info);
+ if (err)
+ return err;
+
+ /* Now startup the cpu. */
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32_f(cpu_base + CPU_PC, info.fw_base);
+
+ for (i = 0; i < 5; i++) {
+ if (tr32(cpu_base + CPU_PC) == info.fw_base)
+ break;
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
+ tw32_f(cpu_base + CPU_PC, info.fw_base);
+ udelay(1000);
+ }
+ if (i >= 5) {
+ netdev_err(tp->dev,
+ "%s fails to set CPU PC, is %08x should be %08x\n",
+ __func__, tr32(cpu_base + CPU_PC), info.fw_base);
+ return -ENODEV;
+ }
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32_f(cpu_base + CPU_MODE, 0x00000000);
+ return 0;
+}
+
+
+/* tp->lock is held. */
+static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
+{
+ u32 addr_high, addr_low;
+ int i;
+
+ addr_high = ((tp->dev->dev_addr[0] << 8) |
+ tp->dev->dev_addr[1]);
+ addr_low = ((tp->dev->dev_addr[2] << 24) |
+ (tp->dev->dev_addr[3] << 16) |
+ (tp->dev->dev_addr[4] << 8) |
+ (tp->dev->dev_addr[5] << 0));
+ for (i = 0; i < 4; i++) {
+ if (i == 1 && skip_mac_1)
+ continue;
+ tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
+ tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ for (i = 0; i < 12; i++) {
+ tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
+ tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
+ }
+ }
+
+ addr_high = (tp->dev->dev_addr[0] +
+ tp->dev->dev_addr[1] +
+ tp->dev->dev_addr[2] +
+ tp->dev->dev_addr[3] +
+ tp->dev->dev_addr[4] +
+ tp->dev->dev_addr[5]) &
+ TX_BACKOFF_SEED_MASK;
+ tw32(MAC_TX_BACKOFF_SEED, addr_high);
+}
+
+static void tg3_enable_register_access(struct tg3 *tp)
+{
+ /*
+ * Make sure register accesses (indirect or otherwise) will function
+ * correctly.
+ */
+ pci_write_config_dword(tp->pdev,
+ TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
+}
+
+static int tg3_power_up(struct tg3 *tp)
+{
+ int err;
+
+ tg3_enable_register_access(tp);
+
+ err = pci_set_power_state(tp->pdev, PCI_D0);
+ if (!err) {
+ /* Switch out of Vaux if it is a NIC */
+ tg3_pwrsrc_switch_to_vmain(tp);
+ } else {
+ netdev_err(tp->dev, "Transition to D0 failed\n");
+ }
+
+ return err;
+}
+
+static int tg3_power_down_prepare(struct tg3 *tp)
+{
+ u32 misc_host_ctrl;
+ bool device_should_wake, do_low_power;
+
+ tg3_enable_register_access(tp);
+
+ /* Restore the CLKREQ setting. */
+ if (tg3_flag(tp, CLKREQ_BUG)) {
+ u16 lnkctl;
+
+ pci_read_config_word(tp->pdev,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
+ &lnkctl);
+ lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
+ pci_write_config_word(tp->pdev,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
+ lnkctl);
+ }
+
+ misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
+ tw32(TG3PCI_MISC_HOST_CTRL,
+ misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
+
+ device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
+ tg3_flag(tp, WOL_ENABLE);
+
+ if (tg3_flag(tp, USE_PHYLIB)) {
+ do_low_power = false;
+ if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
+ !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+ struct phy_device *phydev;
+ u32 phyid, advertising;
+
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+
+ tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
+
+ tp->link_config.orig_speed = phydev->speed;
+ tp->link_config.orig_duplex = phydev->duplex;
+ tp->link_config.orig_autoneg = phydev->autoneg;
+ tp->link_config.orig_advertising = phydev->advertising;
+
+ advertising = ADVERTISED_TP |
+ ADVERTISED_Pause |
+ ADVERTISED_Autoneg |
+ ADVERTISED_10baseT_Half;
+
+ if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
+ if (tg3_flag(tp, WOL_SPEED_100MB))
+ advertising |=
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Full;
+ else
+ advertising |= ADVERTISED_10baseT_Full;
+ }
+
+ phydev->advertising = advertising;
+
+ phy_start_aneg(phydev);
+
+ phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
+ if (phyid != PHY_ID_BCMAC131) {
+ phyid &= PHY_BCM_OUI_MASK;
+ if (phyid == PHY_BCM_OUI_1 ||
+ phyid == PHY_BCM_OUI_2 ||
+ phyid == PHY_BCM_OUI_3)
+ do_low_power = true;
+ }
+ }
+ } else {
+ do_low_power = true;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+ tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
+ tp->link_config.orig_speed = tp->link_config.speed;
+ tp->link_config.orig_duplex = tp->link_config.duplex;
+ tp->link_config.orig_autoneg = tp->link_config.autoneg;
+ }
+
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+ tp->link_config.speed = SPEED_10;
+ tp->link_config.duplex = DUPLEX_HALF;
+ tp->link_config.autoneg = AUTONEG_ENABLE;
+ tg3_setup_phy(tp, 0);
+ }
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ u32 val;
+
+ val = tr32(GRC_VCPU_EXT_CTRL);
+ tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
+ } else if (!tg3_flag(tp, ENABLE_ASF)) {
+ int i;
+ u32 val;
+
+ for (i = 0; i < 200; i++) {
+ tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
+ if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
+ break;
+ msleep(1);
+ }
+ }
+ if (tg3_flag(tp, WOL_CAP))
+ tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
+ WOL_DRV_STATE_SHUTDOWN |
+ WOL_DRV_WOL |
+ WOL_SET_MAGIC_PKT);
+
+ if (device_should_wake) {
+ u32 mac_mode;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
+ if (do_low_power &&
+ !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
+ tg3_phy_auxctl_write(tp,
+ MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
+ MII_TG3_AUXCTL_PCTL_WOL_EN |
+ MII_TG3_AUXCTL_PCTL_100TX_LPWR |
+ MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
+ udelay(40);
+ }
+
+ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
+ mac_mode = MAC_MODE_PORT_MODE_GMII;
+ else
+ mac_mode = MAC_MODE_PORT_MODE_MII;
+
+ mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+ ASIC_REV_5700) {
+ u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
+ SPEED_100 : SPEED_10;
+ if (tg3_5700_link_polarity(tp, speed))
+ mac_mode |= MAC_MODE_LINK_POLARITY;
+ else
+ mac_mode &= ~MAC_MODE_LINK_POLARITY;
+ }
+ } else {
+ mac_mode = MAC_MODE_PORT_MODE_TBI;
+ }
+
+ if (!tg3_flag(tp, 5750_PLUS))
+ tw32(MAC_LED_CTRL, tp->led_ctrl);
+
+ mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
+ if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
+ (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
+ mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
+
+ if (tg3_flag(tp, ENABLE_APE))
+ mac_mode |= MAC_MODE_APE_TX_EN |
+ MAC_MODE_APE_RX_EN |
+ MAC_MODE_TDE_ENABLE;
+
+ tw32_f(MAC_MODE, mac_mode);
+ udelay(100);
+
+ tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
+ udelay(10);
+ }
+
+ if (!tg3_flag(tp, WOL_SPEED_100MB) &&
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
+ u32 base_val;
+
+ base_val = tp->pci_clock_ctrl;
+ base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
+ CLOCK_CTRL_TXCLK_DISABLE);
+
+ tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
+ CLOCK_CTRL_PWRDOWN_PLL133, 40);
+ } else if (tg3_flag(tp, 5780_CLASS) ||
+ tg3_flag(tp, CPMU_PRESENT) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ /* do nothing */
+ } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
+ u32 newbits1, newbits2;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
+ CLOCK_CTRL_TXCLK_DISABLE |
+ CLOCK_CTRL_ALTCLK);
+ newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
+ } else if (tg3_flag(tp, 5705_PLUS)) {
+ newbits1 = CLOCK_CTRL_625_CORE;
+ newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
+ } else {
+ newbits1 = CLOCK_CTRL_ALTCLK;
+ newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
+ }
+
+ tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
+ 40);
+
+ tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
+ 40);
+
+ if (!tg3_flag(tp, 5705_PLUS)) {
+ u32 newbits3;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
+ CLOCK_CTRL_TXCLK_DISABLE |
+ CLOCK_CTRL_44MHZ_CORE);
+ } else {
+ newbits3 = CLOCK_CTRL_44MHZ_CORE;
+ }
+
+ tw32_wait_f(TG3PCI_CLOCK_CTRL,
+ tp->pci_clock_ctrl | newbits3, 40);
+ }
+ }
+
+ if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
+ tg3_power_down_phy(tp, do_low_power);
+
+ tg3_frob_aux_power(tp, true);
+
+ /* Workaround for unstable PLL clock */
+ if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
+ (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
+ u32 val = tr32(0x7d00);
+
+ val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
+ tw32(0x7d00, val);
+ if (!tg3_flag(tp, ENABLE_ASF)) {
+ int err;
+
+ err = tg3_nvram_lock(tp);
+ tg3_halt_cpu(tp, RX_CPU_BASE);
+ if (!err)
+ tg3_nvram_unlock(tp);
+ }
+ }
+
+ tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
+
+ return 0;
+}
+
+static void tg3_power_down(struct tg3 *tp)
+{
+ tg3_power_down_prepare(tp);
+
+ pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
+ pci_set_power_state(tp->pdev, PCI_D3hot);
+}
+
+static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
+{
+ switch (val & MII_TG3_AUX_STAT_SPDMASK) {
+ case MII_TG3_AUX_STAT_10HALF:
+ *speed = SPEED_10;
+ *duplex = DUPLEX_HALF;
+ break;
+
+ case MII_TG3_AUX_STAT_10FULL:
+ *speed = SPEED_10;
+ *duplex = DUPLEX_FULL;
+ break;
+
+ case MII_TG3_AUX_STAT_100HALF:
+ *speed = SPEED_100;
+ *duplex = DUPLEX_HALF;
+ break;
+
+ case MII_TG3_AUX_STAT_100FULL:
+ *speed = SPEED_100;
+ *duplex = DUPLEX_FULL;
+ break;
+
+ case MII_TG3_AUX_STAT_1000HALF:
+ *speed = SPEED_1000;
+ *duplex = DUPLEX_HALF;
+ break;
+
+ case MII_TG3_AUX_STAT_1000FULL:
+ *speed = SPEED_1000;
+ *duplex = DUPLEX_FULL;
+ break;
+
+ default:
+ if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+ *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
+ SPEED_10;
+ *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
+ DUPLEX_HALF;
+ break;
+ }
+ *speed = SPEED_INVALID;
+ *duplex = DUPLEX_INVALID;
+ break;
+ }
+}
+
+static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
+{
+ int err = 0;
+ u32 val, new_adv;
+
+ new_adv = ADVERTISE_CSMA;
+ if (advertise & ADVERTISED_10baseT_Half)
+ new_adv |= ADVERTISE_10HALF;
+ if (advertise & ADVERTISED_10baseT_Full)
+ new_adv |= ADVERTISE_10FULL;
+ if (advertise & ADVERTISED_100baseT_Half)
+ new_adv |= ADVERTISE_100HALF;
+ if (advertise & ADVERTISED_100baseT_Full)
+ new_adv |= ADVERTISE_100FULL;
+
+ new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
+
+ err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
+ if (err)
+ goto done;
+
+ if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
+ goto done;
+
+ new_adv = 0;
+ if (advertise & ADVERTISED_1000baseT_Half)
+ new_adv |= ADVERTISE_1000HALF;
+ if (advertise & ADVERTISED_1000baseT_Full)
+ new_adv |= ADVERTISE_1000FULL;
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
+ new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
+
+ err = tg3_writephy(tp, MII_CTRL1000, new_adv);
+ if (err)
+ goto done;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
+ goto done;
+
+ tw32(TG3_CPMU_EEE_MODE,
+ tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
+
+ err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
+ if (!err) {
+ u32 err2;
+
+ val = 0;
+ /* Advertise 100-BaseTX EEE ability */
+ if (advertise & ADVERTISED_100baseT_Full)
+ val |= MDIO_AN_EEE_ADV_100TX;
+ /* Advertise 1000-BaseT EEE ability */
+ if (advertise & ADVERTISED_1000baseT_Full)
+ val |= MDIO_AN_EEE_ADV_1000T;
+ err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
+ if (err)
+ val = 0;
+
+ switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+ case ASIC_REV_5717:
+ case ASIC_REV_57765:
+ case ASIC_REV_5719:
+ /* If we advertised any eee advertisements above... */
+ if (val)
+ val = MII_TG3_DSP_TAP26_ALNOKO |
+ MII_TG3_DSP_TAP26_RMRXSTO |
+ MII_TG3_DSP_TAP26_OPCSINPT;
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
+ /* Fall through */
+ case ASIC_REV_5720:
+ if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
+ tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
+ MII_TG3_DSP_CH34TP2_HIBW01);
+ }
+
+ err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ if (!err)
+ err = err2;
+ }
+
+done:
+ return err;
+}
+
+static void tg3_phy_copper_begin(struct tg3 *tp)
+{
+ u32 new_adv;
+ int i;
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
+ new_adv = ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full;
+ if (tg3_flag(tp, WOL_SPEED_100MB))
+ new_adv |= ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full;
+
+ tg3_phy_autoneg_cfg(tp, new_adv,
+ FLOW_CTRL_TX | FLOW_CTRL_RX);
+ } else if (tp->link_config.speed == SPEED_INVALID) {
+ if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
+ tp->link_config.advertising &=
+ ~(ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full);
+
+ tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
+ tp->link_config.flowctrl);
+ } else {
+ /* Asking for a specific link mode. */
+ if (tp->link_config.speed == SPEED_1000) {
+ if (tp->link_config.duplex == DUPLEX_FULL)
+ new_adv = ADVERTISED_1000baseT_Full;
+ else
+ new_adv = ADVERTISED_1000baseT_Half;
+ } else if (tp->link_config.speed == SPEED_100) {
+ if (tp->link_config.duplex == DUPLEX_FULL)
+ new_adv = ADVERTISED_100baseT_Full;
+ else
+ new_adv = ADVERTISED_100baseT_Half;
+ } else {
+ if (tp->link_config.duplex == DUPLEX_FULL)
+ new_adv = ADVERTISED_10baseT_Full;
+ else
+ new_adv = ADVERTISED_10baseT_Half;
+ }
+
+ tg3_phy_autoneg_cfg(tp, new_adv,
+ tp->link_config.flowctrl);
+ }
+
+ if (tp->link_config.autoneg == AUTONEG_DISABLE &&
+ tp->link_config.speed != SPEED_INVALID) {
+ u32 bmcr, orig_bmcr;
+
+ tp->link_config.active_speed = tp->link_config.speed;
+ tp->link_config.active_duplex = tp->link_config.duplex;
+
+ bmcr = 0;
+ switch (tp->link_config.speed) {
+ default:
+ case SPEED_10:
+ break;
+
+ case SPEED_100:
+ bmcr |= BMCR_SPEED100;
+ break;
+
+ case SPEED_1000:
+ bmcr |= BMCR_SPEED1000;
+ break;
+ }
+
+ if (tp->link_config.duplex == DUPLEX_FULL)
+ bmcr |= BMCR_FULLDPLX;
+
+ if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
+ (bmcr != orig_bmcr)) {
+ tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
+ for (i = 0; i < 1500; i++) {
+ u32 tmp;
+
+ udelay(10);
+ if (tg3_readphy(tp, MII_BMSR, &tmp) ||
+ tg3_readphy(tp, MII_BMSR, &tmp))
+ continue;
+ if (!(tmp & BMSR_LSTATUS)) {
+ udelay(40);
+ break;
+ }
+ }
+ tg3_writephy(tp, MII_BMCR, bmcr);
+ udelay(40);
+ }
+ } else {
+ tg3_writephy(tp, MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+ }
+}
+
+static int tg3_init_5401phy_dsp(struct tg3 *tp)
+{
+ int err;
+
+ /* Turn off tap power management. */
+ /* Set Extended packet length bit */
+ err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
+
+ err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
+ err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
+ err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
+ err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
+ err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
+
+ udelay(40);
+
+ return err;
+}
+
+static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
+{
+ u32 adv_reg, all_mask = 0;
+
+ if (mask & ADVERTISED_10baseT_Half)
+ all_mask |= ADVERTISE_10HALF;
+ if (mask & ADVERTISED_10baseT_Full)
+ all_mask |= ADVERTISE_10FULL;
+ if (mask & ADVERTISED_100baseT_Half)
+ all_mask |= ADVERTISE_100HALF;
+ if (mask & ADVERTISED_100baseT_Full)
+ all_mask |= ADVERTISE_100FULL;
+
+ if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
+ return 0;
+
+ if ((adv_reg & ADVERTISE_ALL) != all_mask)
+ return 0;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+ u32 tg3_ctrl;
+
+ all_mask = 0;
+ if (mask & ADVERTISED_1000baseT_Half)
+ all_mask |= ADVERTISE_1000HALF;
+ if (mask & ADVERTISED_1000baseT_Full)
+ all_mask |= ADVERTISE_1000FULL;
+
+ if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
+ return 0;
+
+ tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
+ if (tg3_ctrl != all_mask)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
+{
+ u32 curadv, reqadv;
+
+ if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
+ return 1;
+
+ curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+ reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
+
+ if (tp->link_config.active_duplex == DUPLEX_FULL) {
+ if (curadv != reqadv)
+ return 0;
+
+ if (tg3_flag(tp, PAUSE_AUTONEG))
+ tg3_readphy(tp, MII_LPA, rmtadv);
+ } else {
+ /* Reprogram the advertisement register, even if it
+ * does not affect the current link. If the link
+ * gets renegotiated in the future, we can save an
+ * additional renegotiation cycle by advertising
+ * it correctly in the first place.
+ */
+ if (curadv != reqadv) {
+ *lcladv &= ~(ADVERTISE_PAUSE_CAP |
+ ADVERTISE_PAUSE_ASYM);
+ tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
+ }
+ }
+
+ return 1;
+}
+
+static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
+{
+ int current_link_up;
+ u32 bmsr, val;
+ u32 lcl_adv, rmt_adv;
+ u16 current_speed;
+ u8 current_duplex;
+ int i, err;
+
+ tw32(MAC_EVENT, 0);
+
+ tw32_f(MAC_STATUS,
+ (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED |
+ MAC_STATUS_MI_COMPLETION |
+ MAC_STATUS_LNKSTATE_CHANGED));
+ udelay(40);
+
+ if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+ tw32_f(MAC_MI_MODE,
+ (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
+ udelay(80);
+ }
+
+ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
+
+ /* Some third-party PHYs need to be reset on link going
+ * down.
+ */
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
+ netif_carrier_ok(tp->dev)) {
+ tg3_readphy(tp, MII_BMSR, &bmsr);
+ if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+ !(bmsr & BMSR_LSTATUS))
+ force_reset = 1;
+ }
+ if (force_reset)
+ tg3_phy_reset(tp);
+
+ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
+ tg3_readphy(tp, MII_BMSR, &bmsr);
+ if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
+ !tg3_flag(tp, INIT_COMPLETE))
+ bmsr = 0;
+
+ if (!(bmsr & BMSR_LSTATUS)) {
+ err = tg3_init_5401phy_dsp(tp);
+ if (err)
+ return err;
+
+ tg3_readphy(tp, MII_BMSR, &bmsr);
+ for (i = 0; i < 1000; i++) {
+ udelay(10);
+ if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+ (bmsr & BMSR_LSTATUS)) {
+ udelay(40);
+ break;
+ }
+ }
+
+ if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
+ TG3_PHY_REV_BCM5401_B0 &&
+ !(bmsr & BMSR_LSTATUS) &&
+ tp->link_config.active_speed == SPEED_1000) {
+ err = tg3_phy_reset(tp);
+ if (!err)
+ err = tg3_init_5401phy_dsp(tp);
+ if (err)
+ return err;
+ }
+ }
+ } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
+ /* 5701 {A0,B0} CRC bug workaround */
+ tg3_writephy(tp, 0x15, 0x0a75);
+ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
+ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
+ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
+ }
+
+ /* Clear pending interrupts... */
+ tg3_readphy(tp, MII_TG3_ISTAT, &val);
+ tg3_readphy(tp, MII_TG3_ISTAT, &val);
+
+ if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
+ tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
+ else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
+ tg3_writephy(tp, MII_TG3_IMASK, ~0);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
+ tg3_writephy(tp, MII_TG3_EXT_CTRL,
+ MII_TG3_EXT_CTRL_LNK3_LED_MODE);
+ else
+ tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
+ }
+
+ current_link_up = 0;
+ current_speed = SPEED_INVALID;
+ current_duplex = DUPLEX_INVALID;
+
+ if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
+ err = tg3_phy_auxctl_read(tp,
+ MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
+ &val);
+ if (!err && !(val & (1 << 10))) {
+ tg3_phy_auxctl_write(tp,
+ MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
+ val | (1 << 10));
+ goto relink;
+ }
+ }
+
+ bmsr = 0;
+ for (i = 0; i < 100; i++) {
+ tg3_readphy(tp, MII_BMSR, &bmsr);
+ if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+ (bmsr & BMSR_LSTATUS))
+ break;
+ udelay(40);
+ }
+
+ if (bmsr & BMSR_LSTATUS) {
+ u32 aux_stat, bmcr;
+
+ tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
+ for (i = 0; i < 2000; i++) {
+ udelay(10);
+ if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
+ aux_stat)
+ break;
+ }
+
+ tg3_aux_stat_to_speed_duplex(tp, aux_stat,
+ &current_speed,
+ &current_duplex);
+
+ bmcr = 0;
+ for (i = 0; i < 200; i++) {
+ tg3_readphy(tp, MII_BMCR, &bmcr);
+ if (tg3_readphy(tp, MII_BMCR, &bmcr))
+ continue;
+ if (bmcr && bmcr != 0x7fff)
+ break;
+ udelay(10);
+ }
+
+ lcl_adv = 0;
+ rmt_adv = 0;
+
+ tp->link_config.active_speed = current_speed;
+ tp->link_config.active_duplex = current_duplex;
+
+ if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+ if ((bmcr & BMCR_ANENABLE) &&
+ tg3_copper_is_advertising_all(tp,
+ tp->link_config.advertising)) {
+ if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
+ &rmt_adv))
+ current_link_up = 1;
+ }
+ } else {
+ if (!(bmcr & BMCR_ANENABLE) &&
+ tp->link_config.speed == current_speed &&
+ tp->link_config.duplex == current_duplex &&
+ tp->link_config.flowctrl ==
+ tp->link_config.active_flowctrl) {
+ current_link_up = 1;
+ }
+ }
+
+ if (current_link_up == 1 &&
+ tp->link_config.active_duplex == DUPLEX_FULL)
+ tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
+ }
+
+relink:
+ if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+ tg3_phy_copper_begin(tp);
+
+ tg3_readphy(tp, MII_BMSR, &bmsr);
+ if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
+ (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
+ current_link_up = 1;
+ }
+
+ tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
+ if (current_link_up == 1) {
+ if (tp->link_config.active_speed == SPEED_100 ||
+ tp->link_config.active_speed == SPEED_10)
+ tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
+ else
+ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
+ tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
+ else
+ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+
+ tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
+ if (tp->link_config.active_duplex == DUPLEX_HALF)
+ tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+ if (current_link_up == 1 &&
+ tg3_5700_link_polarity(tp, tp->link_config.active_speed))
+ tp->mac_mode |= MAC_MODE_LINK_POLARITY;
+ else
+ tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
+ }
+
+ /* ??? Without this setting Netgear GA302T PHY does not
+ * ??? send/receive packets...
+ */
+ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
+ tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
+ tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
+ tw32_f(MAC_MI_MODE, tp->mi_mode);
+ udelay(80);
+ }
+
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ tg3_phy_eee_adjust(tp, current_link_up);
+
+ if (tg3_flag(tp, USE_LINKCHG_REG)) {
+ /* Polled via timer. */
+ tw32_f(MAC_EVENT, 0);
+ } else {
+ tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
+ }
+ udelay(40);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
+ current_link_up == 1 &&
+ tp->link_config.active_speed == SPEED_1000 &&
+ (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
+ udelay(120);
+ tw32_f(MAC_STATUS,
+ (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED));
+ udelay(40);
+ tg3_write_mem(tp,
+ NIC_SRAM_FIRMWARE_MBOX,
+ NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
+ }
+
+ /* Prevent send BD corruption. */
+ if (tg3_flag(tp, CLKREQ_BUG)) {
+ u16 oldlnkctl, newlnkctl;
+
+ pci_read_config_word(tp->pdev,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
+ &oldlnkctl);
+ if (tp->link_config.active_speed == SPEED_100 ||
+ tp->link_config.active_speed == SPEED_10)
+ newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
+ else
+ newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
+ if (newlnkctl != oldlnkctl)
+ pci_write_config_word(tp->pdev,
+ pci_pcie_cap(tp->pdev) +
+ PCI_EXP_LNKCTL, newlnkctl);
+ }
+
+ if (current_link_up != netif_carrier_ok(tp->dev)) {
+ if (current_link_up)
+ netif_carrier_on(tp->dev);
+ else
+ netif_carrier_off(tp->dev);
+ tg3_link_report(tp);
+ }
+
+ return 0;
+}
+
+struct tg3_fiber_aneginfo {
+ int state;
+#define ANEG_STATE_UNKNOWN 0
+#define ANEG_STATE_AN_ENABLE 1
+#define ANEG_STATE_RESTART_INIT 2
+#define ANEG_STATE_RESTART 3
+#define ANEG_STATE_DISABLE_LINK_OK 4
+#define ANEG_STATE_ABILITY_DETECT_INIT 5
+#define ANEG_STATE_ABILITY_DETECT 6
+#define ANEG_STATE_ACK_DETECT_INIT 7
+#define ANEG_STATE_ACK_DETECT 8
+#define ANEG_STATE_COMPLETE_ACK_INIT 9
+#define ANEG_STATE_COMPLETE_ACK 10
+#define ANEG_STATE_IDLE_DETECT_INIT 11
+#define ANEG_STATE_IDLE_DETECT 12
+#define ANEG_STATE_LINK_OK 13
+#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
+#define ANEG_STATE_NEXT_PAGE_WAIT 15
+
+ u32 flags;
+#define MR_AN_ENABLE 0x00000001
+#define MR_RESTART_AN 0x00000002
+#define MR_AN_COMPLETE 0x00000004
+#define MR_PAGE_RX 0x00000008
+#define MR_NP_LOADED 0x00000010
+#define MR_TOGGLE_TX 0x00000020
+#define MR_LP_ADV_FULL_DUPLEX 0x00000040
+#define MR_LP_ADV_HALF_DUPLEX 0x00000080
+#define MR_LP_ADV_SYM_PAUSE 0x00000100
+#define MR_LP_ADV_ASYM_PAUSE 0x00000200
+#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
+#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
+#define MR_LP_ADV_NEXT_PAGE 0x00001000
+#define MR_TOGGLE_RX 0x00002000
+#define MR_NP_RX 0x00004000
+
+#define MR_LINK_OK 0x80000000
+
+ unsigned long link_time, cur_time;
+
+ u32 ability_match_cfg;
+ int ability_match_count;
+
+ char ability_match, idle_match, ack_match;
+
+ u32 txconfig, rxconfig;
+#define ANEG_CFG_NP 0x00000080
+#define ANEG_CFG_ACK 0x00000040
+#define ANEG_CFG_RF2 0x00000020
+#define ANEG_CFG_RF1 0x00000010
+#define ANEG_CFG_PS2 0x00000001
+#define ANEG_CFG_PS1 0x00008000
+#define ANEG_CFG_HD 0x00004000
+#define ANEG_CFG_FD 0x00002000
+#define ANEG_CFG_INVAL 0x00001f06
+
+};
+#define ANEG_OK 0
+#define ANEG_DONE 1
+#define ANEG_TIMER_ENAB 2
+#define ANEG_FAILED -1
+
+#define ANEG_STATE_SETTLE_TIME 10000
+
+static int tg3_fiber_aneg_smachine(struct tg3 *tp,
+ struct tg3_fiber_aneginfo *ap)
+{
+ u16 flowctrl;
+ unsigned long delta;
+ u32 rx_cfg_reg;
+ int ret;
+
+ if (ap->state == ANEG_STATE_UNKNOWN) {
+ ap->rxconfig = 0;
+ ap->link_time = 0;
+ ap->cur_time = 0;
+ ap->ability_match_cfg = 0;
+ ap->ability_match_count = 0;
+ ap->ability_match = 0;
+ ap->idle_match = 0;
+ ap->ack_match = 0;
+ }
+ ap->cur_time++;
+
+ if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
+ rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
+
+ if (rx_cfg_reg != ap->ability_match_cfg) {
+ ap->ability_match_cfg = rx_cfg_reg;
+ ap->ability_match = 0;
+ ap->ability_match_count = 0;
+ } else {
+ if (++ap->ability_match_count > 1) {
+ ap->ability_match = 1;
+ ap->ability_match_cfg = rx_cfg_reg;
+ }
+ }
+ if (rx_cfg_reg & ANEG_CFG_ACK)
+ ap->ack_match = 1;
+ else
+ ap->ack_match = 0;
+
+ ap->idle_match = 0;
+ } else {
+ ap->idle_match = 1;
+ ap->ability_match_cfg = 0;
+ ap->ability_match_count = 0;
+ ap->ability_match = 0;
+ ap->ack_match = 0;
+
+ rx_cfg_reg = 0;
+ }
+
+ ap->rxconfig = rx_cfg_reg;
+ ret = ANEG_OK;
+
+ switch (ap->state) {
+ case ANEG_STATE_UNKNOWN:
+ if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
+ ap->state = ANEG_STATE_AN_ENABLE;
+
+ /* fallthru */
+ case ANEG_STATE_AN_ENABLE:
+ ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
+ if (ap->flags & MR_AN_ENABLE) {
+ ap->link_time = 0;
+ ap->cur_time = 0;
+ ap->ability_match_cfg = 0;
+ ap->ability_match_count = 0;
+ ap->ability_match = 0;
+ ap->idle_match = 0;
+ ap->ack_match = 0;
+
+ ap->state = ANEG_STATE_RESTART_INIT;
+ } else {
+ ap->state = ANEG_STATE_DISABLE_LINK_OK;
+ }
+ break;
+
+ case ANEG_STATE_RESTART_INIT:
+ ap->link_time = ap->cur_time;
+ ap->flags &= ~(MR_NP_LOADED);
+ ap->txconfig = 0;
+ tw32(MAC_TX_AUTO_NEG, 0);
+ tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ ret = ANEG_TIMER_ENAB;
+ ap->state = ANEG_STATE_RESTART;
+
+ /* fallthru */
+ case ANEG_STATE_RESTART:
+ delta = ap->cur_time - ap->link_time;
+ if (delta > ANEG_STATE_SETTLE_TIME)
+ ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
+ else
+ ret = ANEG_TIMER_ENAB;
+ break;
+
+ case ANEG_STATE_DISABLE_LINK_OK:
+ ret = ANEG_DONE;
+ break;
+
+ case ANEG_STATE_ABILITY_DETECT_INIT:
+ ap->flags &= ~(MR_TOGGLE_TX);
+ ap->txconfig = ANEG_CFG_FD;
+ flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+ if (flowctrl & ADVERTISE_1000XPAUSE)
+ ap->txconfig |= ANEG_CFG_PS1;
+ if (flowctrl & ADVERTISE_1000XPSE_ASYM)
+ ap->txconfig |= ANEG_CFG_PS2;
+ tw32(MAC_TX_AUTO_NEG, ap->txconfig);
+ tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ ap->state = ANEG_STATE_ABILITY_DETECT;
+ break;
+
+ case ANEG_STATE_ABILITY_DETECT:
+ if (ap->ability_match != 0 && ap->rxconfig != 0)
+ ap->state = ANEG_STATE_ACK_DETECT_INIT;
+ break;
+
+ case ANEG_STATE_ACK_DETECT_INIT:
+ ap->txconfig |= ANEG_CFG_ACK;
+ tw32(MAC_TX_AUTO_NEG, ap->txconfig);
+ tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ ap->state = ANEG_STATE_ACK_DETECT;
+
+ /* fallthru */
+ case ANEG_STATE_ACK_DETECT:
+ if (ap->ack_match != 0) {
+ if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
+ (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
+ ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
+ } else {
+ ap->state = ANEG_STATE_AN_ENABLE;
+ }
+ } else if (ap->ability_match != 0 &&
+ ap->rxconfig == 0) {
+ ap->state = ANEG_STATE_AN_ENABLE;
+ }
+ break;
+
+ case ANEG_STATE_COMPLETE_ACK_INIT:
+ if (ap->rxconfig & ANEG_CFG_INVAL) {
+ ret = ANEG_FAILED;
+ break;
+ }
+ ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
+ MR_LP_ADV_HALF_DUPLEX |
+ MR_LP_ADV_SYM_PAUSE |
+ MR_LP_ADV_ASYM_PAUSE |
+ MR_LP_ADV_REMOTE_FAULT1 |
+ MR_LP_ADV_REMOTE_FAULT2 |
+ MR_LP_ADV_NEXT_PAGE |
+ MR_TOGGLE_RX |
+ MR_NP_RX);
+ if (ap->rxconfig & ANEG_CFG_FD)
+ ap->flags |= MR_LP_ADV_FULL_DUPLEX;
+ if (ap->rxconfig & ANEG_CFG_HD)
+ ap->flags |= MR_LP_ADV_HALF_DUPLEX;
+ if (ap->rxconfig & ANEG_CFG_PS1)
+ ap->flags |= MR_LP_ADV_SYM_PAUSE;
+ if (ap->rxconfig & ANEG_CFG_PS2)
+ ap->flags |= MR_LP_ADV_ASYM_PAUSE;
+ if (ap->rxconfig & ANEG_CFG_RF1)
+ ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
+ if (ap->rxconfig & ANEG_CFG_RF2)
+ ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
+ if (ap->rxconfig & ANEG_CFG_NP)
+ ap->flags |= MR_LP_ADV_NEXT_PAGE;
+
+ ap->link_time = ap->cur_time;
+
+ ap->flags ^= (MR_TOGGLE_TX);
+ if (ap->rxconfig & 0x0008)
+ ap->flags |= MR_TOGGLE_RX;
+ if (ap->rxconfig & ANEG_CFG_NP)
+ ap->flags |= MR_NP_RX;
+ ap->flags |= MR_PAGE_RX;
+
+ ap->state = ANEG_STATE_COMPLETE_ACK;
+ ret = ANEG_TIMER_ENAB;
+ break;
+
+ case ANEG_STATE_COMPLETE_ACK:
+ if (ap->ability_match != 0 &&
+ ap->rxconfig == 0) {
+ ap->state = ANEG_STATE_AN_ENABLE;
+ break;
+ }
+ delta = ap->cur_time - ap->link_time;
+ if (delta > ANEG_STATE_SETTLE_TIME) {
+ if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
+ ap->state = ANEG_STATE_IDLE_DETECT_INIT;
+ } else {
+ if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
+ !(ap->flags & MR_NP_RX)) {
+ ap->state = ANEG_STATE_IDLE_DETECT_INIT;
+ } else {
+ ret = ANEG_FAILED;
+ }
+ }
+ }
+ break;
+
+ case ANEG_STATE_IDLE_DETECT_INIT:
+ ap->link_time = ap->cur_time;
+ tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ ap->state = ANEG_STATE_IDLE_DETECT;
+ ret = ANEG_TIMER_ENAB;
+ break;
+
+ case ANEG_STATE_IDLE_DETECT:
+ if (ap->ability_match != 0 &&
+ ap->rxconfig == 0) {
+ ap->state = ANEG_STATE_AN_ENABLE;
+ break;
+ }
+ delta = ap->cur_time - ap->link_time;
+ if (delta > ANEG_STATE_SETTLE_TIME) {
+ /* XXX another gem from the Broadcom driver :( */
+ ap->state = ANEG_STATE_LINK_OK;
+ }
+ break;
+
+ case ANEG_STATE_LINK_OK:
+ ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
+ ret = ANEG_DONE;
+ break;
+
+ case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
+ /* ??? unimplemented */
+ break;
+
+ case ANEG_STATE_NEXT_PAGE_WAIT:
+ /* ??? unimplemented */
+ break;
+
+ default:
+ ret = ANEG_FAILED;
+ break;
+ }
+
+ return ret;
+}
+
+static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
+{
+ int res = 0;
+ struct tg3_fiber_aneginfo aninfo;
+ int status = ANEG_FAILED;
+ unsigned int tick;
+ u32 tmp;
+
+ tw32_f(MAC_TX_AUTO_NEG, 0);
+
+ tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
+ tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
+ udelay(40);
+
+ tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
+ udelay(40);
+
+ memset(&aninfo, 0, sizeof(aninfo));
+ aninfo.flags |= MR_AN_ENABLE;
+ aninfo.state = ANEG_STATE_UNKNOWN;
+ aninfo.cur_time = 0;
+ tick = 0;
+ while (++tick < 195000) {
+ status = tg3_fiber_aneg_smachine(tp, &aninfo);
+ if (status == ANEG_DONE || status == ANEG_FAILED)
+ break;
+
+ udelay(1);
+ }
+
+ tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ *txflags = aninfo.txconfig;
+ *rxflags = aninfo.flags;
+
+ if (status == ANEG_DONE &&
+ (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
+ MR_LP_ADV_FULL_DUPLEX)))
+ res = 1;
+
+ return res;
+}
+
+static void tg3_init_bcm8002(struct tg3 *tp)
+{
+ u32 mac_status = tr32(MAC_STATUS);
+ int i;
+
+ /* Reset when initting first time or we have a link. */
+ if (tg3_flag(tp, INIT_COMPLETE) &&
+ !(mac_status & MAC_STATUS_PCS_SYNCED))
+ return;
+
+ /* Set PLL lock range. */
+ tg3_writephy(tp, 0x16, 0x8007);
+
+ /* SW reset */
+ tg3_writephy(tp, MII_BMCR, BMCR_RESET);
+
+ /* Wait for reset to complete. */
+ /* XXX schedule_timeout() ... */
+ for (i = 0; i < 500; i++)
+ udelay(10);
+
+ /* Config mode; select PMA/Ch 1 regs. */
+ tg3_writephy(tp, 0x10, 0x8411);
+
+ /* Enable auto-lock and comdet, select txclk for tx. */
+ tg3_writephy(tp, 0x11, 0x0a10);
+
+ tg3_writephy(tp, 0x18, 0x00a0);
+ tg3_writephy(tp, 0x16, 0x41ff);
+
+ /* Assert and deassert POR. */
+ tg3_writephy(tp, 0x13, 0x0400);
+ udelay(40);
+ tg3_writephy(tp, 0x13, 0x0000);
+
+ tg3_writephy(tp, 0x11, 0x0a50);
+ udelay(40);
+ tg3_writephy(tp, 0x11, 0x0a10);
+
+ /* Wait for signal to stabilize */
+ /* XXX schedule_timeout() ... */
+ for (i = 0; i < 15000; i++)
+ udelay(10);
+
+ /* Deselect the channel register so we can read the PHYID
+ * later.
+ */
+ tg3_writephy(tp, 0x10, 0x8011);
+}
+
+static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
+{
+ u16 flowctrl;
+ u32 sg_dig_ctrl, sg_dig_status;
+ u32 serdes_cfg, expected_sg_dig_ctrl;
+ int workaround, port_a;
+ int current_link_up;
+
+ serdes_cfg = 0;
+ expected_sg_dig_ctrl = 0;
+ workaround = 0;
+ port_a = 1;
+ current_link_up = 0;
+
+ if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
+ workaround = 1;
+ if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
+ port_a = 0;
+
+ /* preserve bits 0-11,13,14 for signal pre-emphasis */
+ /* preserve bits 20-23 for voltage regulator */
+ serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
+ }
+
+ sg_dig_ctrl = tr32(SG_DIG_CTRL);
+
+ if (tp->link_config.autoneg != AUTONEG_ENABLE) {
+ if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
+ if (workaround) {
+ u32 val = serdes_cfg;
+
+ if (port_a)
+ val |= 0xc010000;
+ else
+ val |= 0x4010000;
+ tw32_f(MAC_SERDES_CFG, val);
+ }
+
+ tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
+ }
+ if (mac_status & MAC_STATUS_PCS_SYNCED) {
+ tg3_setup_flow_control(tp, 0, 0);
+ current_link_up = 1;
+ }
+ goto out;
+ }
+
+ /* Want auto-negotiation. */
+ expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
+
+ flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+ if (flowctrl & ADVERTISE_1000XPAUSE)
+ expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
+ if (flowctrl & ADVERTISE_1000XPSE_ASYM)
+ expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
+
+ if (sg_dig_ctrl != expected_sg_dig_ctrl) {
+ if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
+ tp->serdes_counter &&
+ ((mac_status & (MAC_STATUS_PCS_SYNCED |
+ MAC_STATUS_RCVD_CFG)) ==
+ MAC_STATUS_PCS_SYNCED)) {
+ tp->serdes_counter--;
+ current_link_up = 1;
+ goto out;
+ }
+restart_autoneg:
+ if (workaround)
+ tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
+ tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
+ udelay(5);
+ tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
+
+ tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
+ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+ } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
+ MAC_STATUS_SIGNAL_DET)) {
+ sg_dig_status = tr32(SG_DIG_STATUS);
+ mac_status = tr32(MAC_STATUS);
+
+ if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
+ (mac_status & MAC_STATUS_PCS_SYNCED)) {
+ u32 local_adv = 0, remote_adv = 0;
+
+ if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
+ local_adv |= ADVERTISE_1000XPAUSE;
+ if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
+ local_adv |= ADVERTISE_1000XPSE_ASYM;
+
+ if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
+ remote_adv |= LPA_1000XPAUSE;
+ if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
+ remote_adv |= LPA_1000XPAUSE_ASYM;
+
+ tg3_setup_flow_control(tp, local_adv, remote_adv);
+ current_link_up = 1;
+ tp->serdes_counter = 0;
+ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+ } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
+ if (tp->serdes_counter)
+ tp->serdes_counter--;
+ else {
+ if (workaround) {
+ u32 val = serdes_cfg;
+
+ if (port_a)
+ val |= 0xc010000;
+ else
+ val |= 0x4010000;
+
+ tw32_f(MAC_SERDES_CFG, val);
+ }
+
+ tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
+ udelay(40);
+
+ /* Link parallel detection - link is up */
+ /* only if we have PCS_SYNC and not */
+ /* receiving config code words */
+ mac_status = tr32(MAC_STATUS);
+ if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
+ !(mac_status & MAC_STATUS_RCVD_CFG)) {
+ tg3_setup_flow_control(tp, 0, 0);
+ current_link_up = 1;
+ tp->phy_flags |=
+ TG3_PHYFLG_PARALLEL_DETECT;
+ tp->serdes_counter =
+ SERDES_PARALLEL_DET_TIMEOUT;
+ } else
+ goto restart_autoneg;
+ }
+ }
+ } else {
+ tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
+ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+ }
+
+out:
+ return current_link_up;
+}
+
+static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
+{
+ int current_link_up = 0;
+
+ if (!(mac_status & MAC_STATUS_PCS_SYNCED))
+ goto out;
+
+ if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+ u32 txflags, rxflags;
+ int i;
+
+ if (fiber_autoneg(tp, &txflags, &rxflags)) {
+ u32 local_adv = 0, remote_adv = 0;
+
+ if (txflags & ANEG_CFG_PS1)
+ local_adv |= ADVERTISE_1000XPAUSE;
+ if (txflags & ANEG_CFG_PS2)
+ local_adv |= ADVERTISE_1000XPSE_ASYM;
+
+ if (rxflags & MR_LP_ADV_SYM_PAUSE)
+ remote_adv |= LPA_1000XPAUSE;
+ if (rxflags & MR_LP_ADV_ASYM_PAUSE)
+ remote_adv |= LPA_1000XPAUSE_ASYM;
+
+ tg3_setup_flow_control(tp, local_adv, remote_adv);
+
+ current_link_up = 1;
+ }
+ for (i = 0; i < 30; i++) {
+ udelay(20);
+ tw32_f(MAC_STATUS,
+ (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED));
+ udelay(40);
+ if ((tr32(MAC_STATUS) &
+ (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED)) == 0)
+ break;
+ }
+
+ mac_status = tr32(MAC_STATUS);
+ if (current_link_up == 0 &&
+ (mac_status & MAC_STATUS_PCS_SYNCED) &&
+ !(mac_status & MAC_STATUS_RCVD_CFG))
+ current_link_up = 1;
+ } else {
+ tg3_setup_flow_control(tp, 0, 0);
+
+ /* Forcing 1000FD link up. */
+ current_link_up = 1;
+
+ tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
+ udelay(40);
+
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+ }
+
+out:
+ return current_link_up;
+}
+
+static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
+{
+ u32 orig_pause_cfg;
+ u16 orig_active_speed;
+ u8 orig_active_duplex;
+ u32 mac_status;
+ int current_link_up;
+ int i;
+
+ orig_pause_cfg = tp->link_config.active_flowctrl;
+ orig_active_speed = tp->link_config.active_speed;
+ orig_active_duplex = tp->link_config.active_duplex;
+
+ if (!tg3_flag(tp, HW_AUTONEG) &&
+ netif_carrier_ok(tp->dev) &&
+ tg3_flag(tp, INIT_COMPLETE)) {
+ mac_status = tr32(MAC_STATUS);
+ mac_status &= (MAC_STATUS_PCS_SYNCED |
+ MAC_STATUS_SIGNAL_DET |
+ MAC_STATUS_CFG_CHANGED |
+ MAC_STATUS_RCVD_CFG);
+ if (mac_status == (MAC_STATUS_PCS_SYNCED |
+ MAC_STATUS_SIGNAL_DET)) {
+ tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED));
+ return 0;
+ }
+ }
+
+ tw32_f(MAC_TX_AUTO_NEG, 0);
+
+ tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
+ tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ if (tp->phy_id == TG3_PHY_ID_BCM8002)
+ tg3_init_bcm8002(tp);
+
+ /* Enable link change event even when serdes polling. */
+ tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
+ udelay(40);
+
+ current_link_up = 0;
+ mac_status = tr32(MAC_STATUS);
+
+ if (tg3_flag(tp, HW_AUTONEG))
+ current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
+ else
+ current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
+
+ tp->napi[0].hw_status->status =
+ (SD_STATUS_UPDATED |
+ (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
+
+ for (i = 0; i < 100; i++) {
+ tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED));
+ udelay(5);
+ if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED |
+ MAC_STATUS_LNKSTATE_CHANGED)) == 0)
+ break;
+ }
+
+ mac_status = tr32(MAC_STATUS);
+ if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
+ current_link_up = 0;
+ if (tp->link_config.autoneg == AUTONEG_ENABLE &&
+ tp->serdes_counter == 0) {
+ tw32_f(MAC_MODE, (tp->mac_mode |
+ MAC_MODE_SEND_CONFIGS));
+ udelay(1);
+ tw32_f(MAC_MODE, tp->mac_mode);
+ }
+ }
+
+ if (current_link_up == 1) {
+ tp->link_config.active_speed = SPEED_1000;
+ tp->link_config.active_duplex = DUPLEX_FULL;
+ tw32(MAC_LED_CTRL, (tp->led_ctrl |
+ LED_CTRL_LNKLED_OVERRIDE |
+ LED_CTRL_1000MBPS_ON));
+ } else {
+ tp->link_config.active_speed = SPEED_INVALID;
+ tp->link_config.active_duplex = DUPLEX_INVALID;
+ tw32(MAC_LED_CTRL, (tp->led_ctrl |
+ LED_CTRL_LNKLED_OVERRIDE |
+ LED_CTRL_TRAFFIC_OVERRIDE));
+ }
+
+ if (current_link_up != netif_carrier_ok(tp->dev)) {
+ if (current_link_up)
+ netif_carrier_on(tp->dev);
+ else
+ netif_carrier_off(tp->dev);
+ tg3_link_report(tp);
+ } else {
+ u32 now_pause_cfg = tp->link_config.active_flowctrl;
+ if (orig_pause_cfg != now_pause_cfg ||
+ orig_active_speed != tp->link_config.active_speed ||
+ orig_active_duplex != tp->link_config.active_duplex)
+ tg3_link_report(tp);
+ }
+
+ return 0;
+}
+
+static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
+{
+ int current_link_up, err = 0;
+ u32 bmsr, bmcr;
+ u16 current_speed;
+ u8 current_duplex;
+ u32 local_adv, remote_adv;
+
+ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ tw32(MAC_EVENT, 0);
+
+ tw32_f(MAC_STATUS,
+ (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED |
+ MAC_STATUS_MI_COMPLETION |
+ MAC_STATUS_LNKSTATE_CHANGED));
+ udelay(40);
+
+ if (force_reset)
+ tg3_phy_reset(tp);
+
+ current_link_up = 0;
+ current_speed = SPEED_INVALID;
+ current_duplex = DUPLEX_INVALID;
+
+ err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+ err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+ bmsr |= BMSR_LSTATUS;
+ else
+ bmsr &= ~BMSR_LSTATUS;
+ }
+
+ err |= tg3_readphy(tp, MII_BMCR, &bmcr);
+
+ if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
+ (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
+ /* do nothing, just check for link up at the end */
+ } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+ u32 adv, new_adv;
+
+ err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
+ new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
+ ADVERTISE_1000XPAUSE |
+ ADVERTISE_1000XPSE_ASYM |
+ ADVERTISE_SLCT);
+
+ new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+
+ if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
+ new_adv |= ADVERTISE_1000XHALF;
+ if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
+ new_adv |= ADVERTISE_1000XFULL;
+
+ if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
+ tg3_writephy(tp, MII_ADVERTISE, new_adv);
+ bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
+ tg3_writephy(tp, MII_BMCR, bmcr);
+
+ tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
+ tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
+ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+
+ return err;
+ }
+ } else {
+ u32 new_bmcr;
+
+ bmcr &= ~BMCR_SPEED1000;
+ new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
+
+ if (tp->link_config.duplex == DUPLEX_FULL)
+ new_bmcr |= BMCR_FULLDPLX;
+
+ if (new_bmcr != bmcr) {
+ /* BMCR_SPEED1000 is a reserved bit that needs
+ * to be set on write.
+ */
+ new_bmcr |= BMCR_SPEED1000;
+
+ /* Force a linkdown */
+ if (netif_carrier_ok(tp->dev)) {
+ u32 adv;
+
+ err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
+ adv &= ~(ADVERTISE_1000XFULL |
+ ADVERTISE_1000XHALF |
+ ADVERTISE_SLCT);
+ tg3_writephy(tp, MII_ADVERTISE, adv);
+ tg3_writephy(tp, MII_BMCR, bmcr |
+ BMCR_ANRESTART |
+ BMCR_ANENABLE);
+ udelay(10);
+ netif_carrier_off(tp->dev);
+ }
+ tg3_writephy(tp, MII_BMCR, new_bmcr);
+ bmcr = new_bmcr;
+ err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+ err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+ ASIC_REV_5714) {
+ if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+ bmsr |= BMSR_LSTATUS;
+ else
+ bmsr &= ~BMSR_LSTATUS;
+ }
+ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+ }
+ }
+
+ if (bmsr & BMSR_LSTATUS) {
+ current_speed = SPEED_1000;
+ current_link_up = 1;
+ if (bmcr & BMCR_FULLDPLX)
+ current_duplex = DUPLEX_FULL;
+ else
+ current_duplex = DUPLEX_HALF;
+
+ local_adv = 0;
+ remote_adv = 0;
+
+ if (bmcr & BMCR_ANENABLE) {
+ u32 common;
+
+ err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
+ err |= tg3_readphy(tp, MII_LPA, &remote_adv);
+ common = local_adv & remote_adv;
+ if (common & (ADVERTISE_1000XHALF |
+ ADVERTISE_1000XFULL)) {
+ if (common & ADVERTISE_1000XFULL)
+ current_duplex = DUPLEX_FULL;
+ else
+ current_duplex = DUPLEX_HALF;
+ } else if (!tg3_flag(tp, 5780_CLASS)) {
+ /* Link is up via parallel detect */
+ } else {
+ current_link_up = 0;
+ }
+ }
+ }
+
+ if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
+ tg3_setup_flow_control(tp, local_adv, remote_adv);
+
+ tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
+ if (tp->link_config.active_duplex == DUPLEX_HALF)
+ tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
+
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
+
+ tp->link_config.active_speed = current_speed;
+ tp->link_config.active_duplex = current_duplex;
+
+ if (current_link_up != netif_carrier_ok(tp->dev)) {
+ if (current_link_up)
+ netif_carrier_on(tp->dev);
+ else {
+ netif_carrier_off(tp->dev);
+ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+ }
+ tg3_link_report(tp);
+ }
+ return err;
+}
+
+static void tg3_serdes_parallel_detect(struct tg3 *tp)
+{
+ if (tp->serdes_counter) {
+ /* Give autoneg time to complete. */
+ tp->serdes_counter--;
+ return;
+ }
+
+ if (!netif_carrier_ok(tp->dev) &&
+ (tp->link_config.autoneg == AUTONEG_ENABLE)) {
+ u32 bmcr;
+
+ tg3_readphy(tp, MII_BMCR, &bmcr);
+ if (bmcr & BMCR_ANENABLE) {
+ u32 phy1, phy2;
+
+ /* Select shadow register 0x1f */
+ tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
+ tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
+
+ /* Select expansion interrupt status register */
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+ MII_TG3_DSP_EXP1_INT_STAT);
+ tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
+ tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
+
+ if ((phy1 & 0x10) && !(phy2 & 0x20)) {
+ /* We have signal detect and not receiving
+ * config code words, link is up by parallel
+ * detection.
+ */
+
+ bmcr &= ~BMCR_ANENABLE;
+ bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
+ tg3_writephy(tp, MII_BMCR, bmcr);
+ tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
+ }
+ }
+ } else if (netif_carrier_ok(tp->dev) &&
+ (tp->link_config.autoneg == AUTONEG_ENABLE) &&
+ (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
+ u32 phy2;
+
+ /* Select expansion interrupt status register */
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+ MII_TG3_DSP_EXP1_INT_STAT);
+ tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
+ if (phy2 & 0x20) {
+ u32 bmcr;
+
+ /* Config code words received, turn on autoneg. */
+ tg3_readphy(tp, MII_BMCR, &bmcr);
+ tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
+
+ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+
+ }
+ }
+}
+
+static int tg3_setup_phy(struct tg3 *tp, int force_reset)
+{
+ u32 val;
+ int err;
+
+ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
+ err = tg3_setup_fiber_phy(tp, force_reset);
+ else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
+ err = tg3_setup_fiber_mii_phy(tp, force_reset);
+ else
+ err = tg3_setup_copper_phy(tp, force_reset);
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
+ u32 scale;
+
+ val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
+ if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
+ scale = 65;
+ else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
+ scale = 6;
+ else
+ scale = 12;
+
+ val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
+ val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
+ tw32(GRC_MISC_CFG, val);
+ }
+
+ val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+ (6 << TX_LENGTHS_IPG_SHIFT);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ val |= tr32(MAC_TX_LENGTHS) &
+ (TX_LENGTHS_JMB_FRM_LEN_MSK |
+ TX_LENGTHS_CNT_DWN_VAL_MSK);
+
+ if (tp->link_config.active_speed == SPEED_1000 &&
+ tp->link_config.active_duplex == DUPLEX_HALF)
+ tw32(MAC_TX_LENGTHS, val |
+ (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
+ else
+ tw32(MAC_TX_LENGTHS, val |
+ (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
+
+ if (!tg3_flag(tp, 5705_PLUS)) {
+ if (netif_carrier_ok(tp->dev)) {
+ tw32(HOSTCC_STAT_COAL_TICKS,
+ tp->coal.stats_block_coalesce_usecs);
+ } else {
+ tw32(HOSTCC_STAT_COAL_TICKS, 0);
+ }
+ }
+
+ if (tg3_flag(tp, ASPM_WORKAROUND)) {
+ val = tr32(PCIE_PWR_MGMT_THRESH);
+ if (!netif_carrier_ok(tp->dev))
+ val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
+ tp->pwrmgmt_thresh;
+ else
+ val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
+ tw32(PCIE_PWR_MGMT_THRESH, val);
+ }
+
+ return err;
+}
+
+static inline int tg3_irq_sync(struct tg3 *tp)
+{
+ return tp->irq_sync;
+}
+
+static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
+{
+ int i;
+
+ dst = (u32 *)((u8 *)dst + off);
+ for (i = 0; i < len; i += sizeof(u32))
+ *dst++ = tr32(off + i);
+}
+
+static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
+{
+ tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
+ tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
+ tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
+ tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
+ tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
+ tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
+ tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
+ tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
+ tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
+ tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
+ tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
+ tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
+ tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
+ tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
+ tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
+ tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
+ tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
+ tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
+ tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
+
+ if (tg3_flag(tp, SUPPORT_MSIX))
+ tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
+
+ tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
+ tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
+ tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
+ tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
+ tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
+ tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
+ tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
+ tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
+
+ if (!tg3_flag(tp, 5705_PLUS)) {
+ tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
+ tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
+ tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
+ }
+
+ tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
+ tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
+ tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
+ tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
+ tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
+
+ if (tg3_flag(tp, NVRAM))
+ tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
+}
+
+static void tg3_dump_state(struct tg3 *tp)
+{
+ int i;
+ u32 *regs;
+
+ regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
+ if (!regs) {
+ netdev_err(tp->dev, "Failed allocating register dump buffer\n");
+ return;
+ }
+
+ if (tg3_flag(tp, PCI_EXPRESS)) {
+ /* Read up to but not including private PCI registers */
+ for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
+ regs[i / sizeof(u32)] = tr32(i);
+ } else
+ tg3_dump_legacy_regs(tp, regs);
+
+ for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
+ if (!regs[i + 0] && !regs[i + 1] &&
+ !regs[i + 2] && !regs[i + 3])
+ continue;
+
+ netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
+ i * 4,
+ regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
+ }
+
+ kfree(regs);
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ /* SW status block */
+ netdev_err(tp->dev,
+ "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
+ i,
+ tnapi->hw_status->status,
+ tnapi->hw_status->status_tag,
+ tnapi->hw_status->rx_jumbo_consumer,
+ tnapi->hw_status->rx_consumer,
+ tnapi->hw_status->rx_mini_consumer,
+ tnapi->hw_status->idx[0].rx_producer,
+ tnapi->hw_status->idx[0].tx_consumer);
+
+ netdev_err(tp->dev,
+ "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
+ i,
+ tnapi->last_tag, tnapi->last_irq_tag,
+ tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
+ tnapi->rx_rcb_ptr,
+ tnapi->prodring.rx_std_prod_idx,
+ tnapi->prodring.rx_std_cons_idx,
+ tnapi->prodring.rx_jmb_prod_idx,
+ tnapi->prodring.rx_jmb_cons_idx);
+ }
+}
+
+/* This is called whenever we suspect that the system chipset is re-
+ * ordering the sequence of MMIO to the tx send mailbox. The symptom
+ * is bogus tx completions. We try to recover by setting the
+ * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
+ * in the workqueue.
+ */
+static void tg3_tx_recover(struct tg3 *tp)
+{
+ BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
+ tp->write32_tx_mbox == tg3_write_indirect_mbox);
+
+ netdev_warn(tp->dev,
+ "The system may be re-ordering memory-mapped I/O "
+ "cycles to the network device, attempting to recover. "
+ "Please report the problem to the driver maintainer "
+ "and include system chipset information.\n");
+
+ spin_lock(&tp->lock);
+ tg3_flag_set(tp, TX_RECOVERY_PENDING);
+ spin_unlock(&tp->lock);
+}
+
+static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
+{
+ /* Tell compiler to fetch tx indices from memory. */
+ barrier();
+ return tnapi->tx_pending -
+ ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
+}
+
+/* Tigon3 never reports partial packet sends. So we do not
+ * need special logic to handle SKBs that have not had all
+ * of their frags sent yet, like SunGEM does.
+ */
+static void tg3_tx(struct tg3_napi *tnapi)
+{
+ struct tg3 *tp = tnapi->tp;
+ u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
+ u32 sw_idx = tnapi->tx_cons;
+ struct netdev_queue *txq;
+ int index = tnapi - tp->napi;
+
+ if (tg3_flag(tp, ENABLE_TSS))
+ index--;
+
+ txq = netdev_get_tx_queue(tp->dev, index);
+
+ while (sw_idx != hw_idx) {
+ struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
+ struct sk_buff *skb = ri->skb;
+ int i, tx_bug = 0;
+
+ if (unlikely(skb == NULL)) {
+ tg3_tx_recover(tp);
+ return;
+ }
+
+ pci_unmap_single(tp->pdev,
+ dma_unmap_addr(ri, mapping),
+ skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+
+ ri->skb = NULL;
+
+ while (ri->fragmented) {
+ ri->fragmented = false;
+ sw_idx = NEXT_TX(sw_idx);
+ ri = &tnapi->tx_buffers[sw_idx];
+ }
+
+ sw_idx = NEXT_TX(sw_idx);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ ri = &tnapi->tx_buffers[sw_idx];
+ if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
+ tx_bug = 1;
+
+ pci_unmap_page(tp->pdev,
+ dma_unmap_addr(ri, mapping),
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_TODEVICE);
+
+ while (ri->fragmented) {
+ ri->fragmented = false;
+ sw_idx = NEXT_TX(sw_idx);
+ ri = &tnapi->tx_buffers[sw_idx];
+ }
+
+ sw_idx = NEXT_TX(sw_idx);
+ }
+
+ dev_kfree_skb(skb);
+
+ if (unlikely(tx_bug)) {
+ tg3_tx_recover(tp);
+ return;
+ }
+ }
+
+ tnapi->tx_cons = sw_idx;
+
+ /* Need to make the tx_cons update visible to tg3_start_xmit()
+ * before checking for netif_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that tg3_start_xmit()
+ * will miss it and cause the queue to be stopped forever.
+ */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(txq) &&
+ (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
+ __netif_tx_lock(txq, smp_processor_id());
+ if (netif_tx_queue_stopped(txq) &&
+ (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock(txq);
+ }
+}
+
+static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
+{
+ if (!ri->skb)
+ return;
+
+ pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
+ map_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(ri->skb);
+ ri->skb = NULL;
+}
+
+/* Returns size of skb allocated or < 0 on error.
+ *
+ * We only need to fill in the address because the other members
+ * of the RX descriptor are invariant, see tg3_init_rings.
+ *
+ * Note the purposeful assymetry of cpu vs. chip accesses. For
+ * posting buffers we only dirty the first cache line of the RX
+ * descriptor (containing the address). Whereas for the RX status
+ * buffers the cpu only reads the last cacheline of the RX descriptor
+ * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
+ */
+static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
+ u32 opaque_key, u32 dest_idx_unmasked)
+{
+ struct tg3_rx_buffer_desc *desc;
+ struct ring_info *map;
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ int skb_size, dest_idx;
+
+ switch (opaque_key) {
+ case RXD_OPAQUE_RING_STD:
+ dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
+ desc = &tpr->rx_std[dest_idx];
+ map = &tpr->rx_std_buffers[dest_idx];
+ skb_size = tp->rx_pkt_map_sz;
+ break;
+
+ case RXD_OPAQUE_RING_JUMBO:
+ dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
+ desc = &tpr->rx_jmb[dest_idx].std;
+ map = &tpr->rx_jmb_buffers[dest_idx];
+ skb_size = TG3_RX_JMB_MAP_SZ;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Do not overwrite any of the map or rp information
+ * until we are sure we can commit to a new buffer.
+ *
+ * Callers depend upon this behavior and assume that
+ * we leave everything unchanged if we fail.
+ */
+ skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
+ if (skb == NULL)
+ return -ENOMEM;
+
+ skb_reserve(skb, TG3_RX_OFFSET(tp));
+
+ mapping = pci_map_single(tp->pdev, skb->data, skb_size,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(tp->pdev, mapping)) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
+
+ map->skb = skb;
+ dma_unmap_addr_set(map, mapping, mapping);
+
+ desc->addr_hi = ((u64)mapping >> 32);
+ desc->addr_lo = ((u64)mapping & 0xffffffff);
+
+ return skb_size;
+}
+
+/* We only need to move over in the address because the other
+ * members of the RX descriptor are invariant. See notes above
+ * tg3_alloc_rx_skb for full details.
+ */
+static void tg3_recycle_rx(struct tg3_napi *tnapi,
+ struct tg3_rx_prodring_set *dpr,
+ u32 opaque_key, int src_idx,
+ u32 dest_idx_unmasked)
+{
+ struct tg3 *tp = tnapi->tp;
+ struct tg3_rx_buffer_desc *src_desc, *dest_desc;
+ struct ring_info *src_map, *dest_map;
+ struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
+ int dest_idx;
+
+ switch (opaque_key) {
+ case RXD_OPAQUE_RING_STD:
+ dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
+ dest_desc = &dpr->rx_std[dest_idx];
+ dest_map = &dpr->rx_std_buffers[dest_idx];
+ src_desc = &spr->rx_std[src_idx];
+ src_map = &spr->rx_std_buffers[src_idx];
+ break;
+
+ case RXD_OPAQUE_RING_JUMBO:
+ dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
+ dest_desc = &dpr->rx_jmb[dest_idx].std;
+ dest_map = &dpr->rx_jmb_buffers[dest_idx];
+ src_desc = &spr->rx_jmb[src_idx].std;
+ src_map = &spr->rx_jmb_buffers[src_idx];
+ break;
+
+ default:
+ return;
+ }
+
+ dest_map->skb = src_map->skb;
+ dma_unmap_addr_set(dest_map, mapping,
+ dma_unmap_addr(src_map, mapping));
+ dest_desc->addr_hi = src_desc->addr_hi;
+ dest_desc->addr_lo = src_desc->addr_lo;
+
+ /* Ensure that the update to the skb happens after the physical
+ * addresses have been transferred to the new BD location.
+ */
+ smp_wmb();
+
+ src_map->skb = NULL;
+}
+
+/* The RX ring scheme is composed of multiple rings which post fresh
+ * buffers to the chip, and one special ring the chip uses to report
+ * status back to the host.
+ *
+ * The special ring reports the status of received packets to the
+ * host. The chip does not write into the original descriptor the
+ * RX buffer was obtained from. The chip simply takes the original
+ * descriptor as provided by the host, updates the status and length
+ * field, then writes this into the next status ring entry.
+ *
+ * Each ring the host uses to post buffers to the chip is described
+ * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
+ * it is first placed into the on-chip ram. When the packet's length
+ * is known, it walks down the TG3_BDINFO entries to select the ring.
+ * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
+ * which is within the range of the new packet's length is chosen.
+ *
+ * The "separate ring for rx status" scheme may sound queer, but it makes
+ * sense from a cache coherency perspective. If only the host writes
+ * to the buffer post rings, and only the chip writes to the rx status
+ * rings, then cache lines never move beyond shared-modified state.
+ * If both the host and chip were to write into the same ring, cache line
+ * eviction could occur since both entities want it in an exclusive state.
+ */
+static int tg3_rx(struct tg3_napi *tnapi, int budget)
+{
+ struct tg3 *tp = tnapi->tp;
+ u32 work_mask, rx_std_posted = 0;
+ u32 std_prod_idx, jmb_prod_idx;
+ u32 sw_idx = tnapi->rx_rcb_ptr;
+ u16 hw_idx;
+ int received;
+ struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
+
+ hw_idx = *(tnapi->rx_rcb_prod_idx);
+ /*
+ * We need to order the read of hw_idx and the read of
+ * the opaque cookie.
+ */
+ rmb();
+ work_mask = 0;
+ received = 0;
+ std_prod_idx = tpr->rx_std_prod_idx;
+ jmb_prod_idx = tpr->rx_jmb_prod_idx;
+ while (sw_idx != hw_idx && budget > 0) {
+ struct ring_info *ri;
+ struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
+ unsigned int len;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u32 opaque_key, desc_idx, *post_ptr;
+
+ desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
+ opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
+ if (opaque_key == RXD_OPAQUE_RING_STD) {
+ ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
+ dma_addr = dma_unmap_addr(ri, mapping);
+ skb = ri->skb;
+ post_ptr = &std_prod_idx;
+ rx_std_posted++;
+ } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
+ ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
+ dma_addr = dma_unmap_addr(ri, mapping);
+ skb = ri->skb;
+ post_ptr = &jmb_prod_idx;
+ } else
+ goto next_pkt_nopost;
+
+ work_mask |= opaque_key;
+
+ if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
+ (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
+ drop_it:
+ tg3_recycle_rx(tnapi, tpr, opaque_key,
+ desc_idx, *post_ptr);
+ drop_it_no_recycle:
+ /* Other statistics kept track of by card. */
+ tp->rx_dropped++;
+ goto next_pkt;
+ }
+
+ len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
+ ETH_FCS_LEN;
+
+ if (len > TG3_RX_COPY_THRESH(tp)) {
+ int skb_size;
+
+ skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
+ *post_ptr);
+ if (skb_size < 0)
+ goto drop_it;
+
+ pci_unmap_single(tp->pdev, dma_addr, skb_size,
+ PCI_DMA_FROMDEVICE);
+
+ /* Ensure that the update to the skb happens
+ * after the usage of the old DMA mapping.
+ */
+ smp_wmb();
+
+ ri->skb = NULL;
+
+ skb_put(skb, len);
+ } else {
+ struct sk_buff *copy_skb;
+
+ tg3_recycle_rx(tnapi, tpr, opaque_key,
+ desc_idx, *post_ptr);
+
+ copy_skb = netdev_alloc_skb(tp->dev, len +
+ TG3_RAW_IP_ALIGN);
+ if (copy_skb == NULL)
+ goto drop_it_no_recycle;
+
+ skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
+ skb_put(copy_skb, len);
+ pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+ skb_copy_from_linear_data(skb, copy_skb->data, len);
+ pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+
+ /* We'll reuse the original ring buffer. */
+ skb = copy_skb;
+ }
+
+ if ((tp->dev->features & NETIF_F_RXCSUM) &&
+ (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
+ (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
+ >> RXD_TCPCSUM_SHIFT) == 0xffff))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+
+ skb->protocol = eth_type_trans(skb, tp->dev);
+
+ if (len > (tp->dev->mtu + ETH_HLEN) &&
+ skb->protocol != htons(ETH_P_8021Q)) {
+ dev_kfree_skb(skb);
+ goto drop_it_no_recycle;
+ }
+
+ if (desc->type_flags & RXD_FLAG_VLAN &&
+ !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
+ __vlan_hwaccel_put_tag(skb,
+ desc->err_vlan & RXD_VLAN_MASK);
+
+ napi_gro_receive(&tnapi->napi, skb);
+
+ received++;
+ budget--;
+
+next_pkt:
+ (*post_ptr)++;
+
+ if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
+ tpr->rx_std_prod_idx = std_prod_idx &
+ tp->rx_std_ring_mask;
+ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+ tpr->rx_std_prod_idx);
+ work_mask &= ~RXD_OPAQUE_RING_STD;
+ rx_std_posted = 0;
+ }
+next_pkt_nopost:
+ sw_idx++;
+ sw_idx &= tp->rx_ret_ring_mask;
+
+ /* Refresh hw_idx to see if there is new work */
+ if (sw_idx == hw_idx) {
+ hw_idx = *(tnapi->rx_rcb_prod_idx);
+ rmb();
+ }
+ }
+
+ /* ACK the status ring. */
+ tnapi->rx_rcb_ptr = sw_idx;
+ tw32_rx_mbox(tnapi->consmbox, sw_idx);
+
+ /* Refill RX ring(s). */
+ if (!tg3_flag(tp, ENABLE_RSS)) {
+ if (work_mask & RXD_OPAQUE_RING_STD) {
+ tpr->rx_std_prod_idx = std_prod_idx &
+ tp->rx_std_ring_mask;
+ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+ tpr->rx_std_prod_idx);
+ }
+ if (work_mask & RXD_OPAQUE_RING_JUMBO) {
+ tpr->rx_jmb_prod_idx = jmb_prod_idx &
+ tp->rx_jmb_ring_mask;
+ tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
+ tpr->rx_jmb_prod_idx);
+ }
+ mmiowb();
+ } else if (work_mask) {
+ /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
+ * updated before the producer indices can be updated.
+ */
+ smp_wmb();
+
+ tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
+ tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
+
+ if (tnapi != &tp->napi[1])
+ napi_schedule(&tp->napi[1].napi);
+ }
+
+ return received;
+}
+
+static void tg3_poll_link(struct tg3 *tp)
+{
+ /* handle link change and other phy events */
+ if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
+ struct tg3_hw_status *sblk = tp->napi[0].hw_status;
+
+ if (sblk->status & SD_STATUS_LINK_CHG) {
+ sblk->status = SD_STATUS_UPDATED |
+ (sblk->status & ~SD_STATUS_LINK_CHG);
+ spin_lock(&tp->lock);
+ if (tg3_flag(tp, USE_PHYLIB)) {
+ tw32_f(MAC_STATUS,
+ (MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED |
+ MAC_STATUS_MI_COMPLETION |
+ MAC_STATUS_LNKSTATE_CHANGED));
+ udelay(40);
+ } else
+ tg3_setup_phy(tp, 0);
+ spin_unlock(&tp->lock);
+ }
+ }
+}
+
+static int tg3_rx_prodring_xfer(struct tg3 *tp,
+ struct tg3_rx_prodring_set *dpr,
+ struct tg3_rx_prodring_set *spr)
+{
+ u32 si, di, cpycnt, src_prod_idx;
+ int i, err = 0;
+
+ while (1) {
+ src_prod_idx = spr->rx_std_prod_idx;
+
+ /* Make sure updates to the rx_std_buffers[] entries and the
+ * standard producer index are seen in the correct order.
+ */
+ smp_rmb();
+
+ if (spr->rx_std_cons_idx == src_prod_idx)
+ break;
+
+ if (spr->rx_std_cons_idx < src_prod_idx)
+ cpycnt = src_prod_idx - spr->rx_std_cons_idx;
+ else
+ cpycnt = tp->rx_std_ring_mask + 1 -
+ spr->rx_std_cons_idx;
+
+ cpycnt = min(cpycnt,
+ tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
+
+ si = spr->rx_std_cons_idx;
+ di = dpr->rx_std_prod_idx;
+
+ for (i = di; i < di + cpycnt; i++) {
+ if (dpr->rx_std_buffers[i].skb) {
+ cpycnt = i - di;
+ err = -ENOSPC;
+ break;
+ }
+ }
+
+ if (!cpycnt)
+ break;
+
+ /* Ensure that updates to the rx_std_buffers ring and the
+ * shadowed hardware producer ring from tg3_recycle_skb() are
+ * ordered correctly WRT the skb check above.
+ */
+ smp_rmb();
+
+ memcpy(&dpr->rx_std_buffers[di],
+ &spr->rx_std_buffers[si],
+ cpycnt * sizeof(struct ring_info));
+
+ for (i = 0; i < cpycnt; i++, di++, si++) {
+ struct tg3_rx_buffer_desc *sbd, *dbd;
+ sbd = &spr->rx_std[si];
+ dbd = &dpr->rx_std[di];
+ dbd->addr_hi = sbd->addr_hi;
+ dbd->addr_lo = sbd->addr_lo;
+ }
+
+ spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
+ tp->rx_std_ring_mask;
+ dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
+ tp->rx_std_ring_mask;
+ }
+
+ while (1) {
+ src_prod_idx = spr->rx_jmb_prod_idx;
+
+ /* Make sure updates to the rx_jmb_buffers[] entries and
+ * the jumbo producer index are seen in the correct order.
+ */
+ smp_rmb();
+
+ if (spr->rx_jmb_cons_idx == src_prod_idx)
+ break;
+
+ if (spr->rx_jmb_cons_idx < src_prod_idx)
+ cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
+ else
+ cpycnt = tp->rx_jmb_ring_mask + 1 -
+ spr->rx_jmb_cons_idx;
+
+ cpycnt = min(cpycnt,
+ tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
+
+ si = spr->rx_jmb_cons_idx;
+ di = dpr->rx_jmb_prod_idx;
+
+ for (i = di; i < di + cpycnt; i++) {
+ if (dpr->rx_jmb_buffers[i].skb) {
+ cpycnt = i - di;
+ err = -ENOSPC;
+ break;
+ }
+ }
+
+ if (!cpycnt)
+ break;
+
+ /* Ensure that updates to the rx_jmb_buffers ring and the
+ * shadowed hardware producer ring from tg3_recycle_skb() are
+ * ordered correctly WRT the skb check above.
+ */
+ smp_rmb();
+
+ memcpy(&dpr->rx_jmb_buffers[di],
+ &spr->rx_jmb_buffers[si],
+ cpycnt * sizeof(struct ring_info));
+
+ for (i = 0; i < cpycnt; i++, di++, si++) {
+ struct tg3_rx_buffer_desc *sbd, *dbd;
+ sbd = &spr->rx_jmb[si].std;
+ dbd = &dpr->rx_jmb[di].std;
+ dbd->addr_hi = sbd->addr_hi;
+ dbd->addr_lo = sbd->addr_lo;
+ }
+
+ spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
+ tp->rx_jmb_ring_mask;
+ dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
+ tp->rx_jmb_ring_mask;
+ }
+
+ return err;
+}
+
+static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
+{
+ struct tg3 *tp = tnapi->tp;
+
+ /* run TX completion thread */
+ if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
+ tg3_tx(tnapi);
+ if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
+ return work_done;
+ }
+
+ /* run RX thread, within the bounds set by NAPI.
+ * All RX "locking" is done by ensuring outside
+ * code synchronizes with tg3->napi.poll()
+ */
+ if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
+ work_done += tg3_rx(tnapi, budget - work_done);
+
+ if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
+ struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
+ int i, err = 0;
+ u32 std_prod_idx = dpr->rx_std_prod_idx;
+ u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
+
+ for (i = 1; i < tp->irq_cnt; i++)
+ err |= tg3_rx_prodring_xfer(tp, dpr,
+ &tp->napi[i].prodring);
+
+ wmb();
+
+ if (std_prod_idx != dpr->rx_std_prod_idx)
+ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+ dpr->rx_std_prod_idx);
+
+ if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
+ tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
+ dpr->rx_jmb_prod_idx);
+
+ mmiowb();
+
+ if (err)
+ tw32_f(HOSTCC_MODE, tp->coal_now);
+ }
+
+ return work_done;
+}
+
+static int tg3_poll_msix(struct napi_struct *napi, int budget)
+{
+ struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
+ struct tg3 *tp = tnapi->tp;
+ int work_done = 0;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
+
+ while (1) {
+ work_done = tg3_poll_work(tnapi, work_done, budget);
+
+ if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
+ goto tx_recovery;
+
+ if (unlikely(work_done >= budget))
+ break;
+
+ /* tp->last_tag is used in tg3_int_reenable() below
+ * to tell the hw how much work has been processed,
+ * so we must read it before checking for more work.
+ */
+ tnapi->last_tag = sblk->status_tag;
+ tnapi->last_irq_tag = tnapi->last_tag;
+ rmb();
+
+ /* check for RX/TX work to do */
+ if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
+ *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
+ napi_complete(napi);
+ /* Reenable interrupts. */
+ tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
+ mmiowb();
+ break;
+ }
+ }
+
+ return work_done;
+
+tx_recovery:
+ /* work_done is guaranteed to be less than budget. */
+ napi_complete(napi);
+ schedule_work(&tp->reset_task);
+ return work_done;
+}
+
+static void tg3_process_error(struct tg3 *tp)
+{
+ u32 val;
+ bool real_error = false;
+
+ if (tg3_flag(tp, ERROR_PROCESSED))
+ return;
+
+ /* Check Flow Attention register */
+ val = tr32(HOSTCC_FLOW_ATTN);
+ if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
+ netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
+ real_error = true;
+ }
+
+ if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
+ netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
+ real_error = true;
+ }
+
+ if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
+ netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
+ real_error = true;
+ }
+
+ if (!real_error)
+ return;
+
+ tg3_dump_state(tp);
+
+ tg3_flag_set(tp, ERROR_PROCESSED);
+ schedule_work(&tp->reset_task);
+}
+
+static int tg3_poll(struct napi_struct *napi, int budget)
+{
+ struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
+ struct tg3 *tp = tnapi->tp;
+ int work_done = 0;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
+
+ while (1) {
+ if (sblk->status & SD_STATUS_ERROR)
+ tg3_process_error(tp);
+
+ tg3_poll_link(tp);
+
+ work_done = tg3_poll_work(tnapi, work_done, budget);
+
+ if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
+ goto tx_recovery;
+
+ if (unlikely(work_done >= budget))
+ break;
+
+ if (tg3_flag(tp, TAGGED_STATUS)) {
+ /* tp->last_tag is used in tg3_int_reenable() below
+ * to tell the hw how much work has been processed,
+ * so we must read it before checking for more work.
+ */
+ tnapi->last_tag = sblk->status_tag;
+ tnapi->last_irq_tag = tnapi->last_tag;
+ rmb();
+ } else
+ sblk->status &= ~SD_STATUS_UPDATED;
+
+ if (likely(!tg3_has_work(tnapi))) {
+ napi_complete(napi);
+ tg3_int_reenable(tnapi);
+ break;
+ }
+ }
+
+ return work_done;
+
+tx_recovery:
+ /* work_done is guaranteed to be less than budget. */
+ napi_complete(napi);
+ schedule_work(&tp->reset_task);
+ return work_done;
+}
+
+static void tg3_napi_disable(struct tg3 *tp)
+{
+ int i;
+
+ for (i = tp->irq_cnt - 1; i >= 0; i--)
+ napi_disable(&tp->napi[i].napi);
+}
+
+static void tg3_napi_enable(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < tp->irq_cnt; i++)
+ napi_enable(&tp->napi[i].napi);
+}
+
+static void tg3_napi_init(struct tg3 *tp)
+{
+ int i;
+
+ netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
+ for (i = 1; i < tp->irq_cnt; i++)
+ netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
+}
+
+static void tg3_napi_fini(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < tp->irq_cnt; i++)
+ netif_napi_del(&tp->napi[i].napi);
+}
+
+static inline void tg3_netif_stop(struct tg3 *tp)
+{
+ tp->dev->trans_start = jiffies; /* prevent tx timeout */
+ tg3_napi_disable(tp);
+ netif_tx_disable(tp->dev);
+}
+
+static inline void tg3_netif_start(struct tg3 *tp)
+{
+ /* NOTE: unconditional netif_tx_wake_all_queues is only
+ * appropriate so long as all callers are assured to
+ * have free tx slots (such as after tg3_init_hw)
+ */
+ netif_tx_wake_all_queues(tp->dev);
+
+ tg3_napi_enable(tp);
+ tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
+ tg3_enable_ints(tp);
+}
+
+static void tg3_irq_quiesce(struct tg3 *tp)
+{
+ int i;
+
+ BUG_ON(tp->irq_sync);
+
+ tp->irq_sync = 1;
+ smp_mb();
+
+ for (i = 0; i < tp->irq_cnt; i++)
+ synchronize_irq(tp->napi[i].irq_vec);
+}
+
+/* Fully shutdown all tg3 driver activity elsewhere in the system.
+ * If irq_sync is non-zero, then the IRQ handler must be synchronized
+ * with as well. Most of the time, this is not necessary except when
+ * shutting down the device.
+ */
+static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
+{
+ spin_lock_bh(&tp->lock);
+ if (irq_sync)
+ tg3_irq_quiesce(tp);
+}
+
+static inline void tg3_full_unlock(struct tg3 *tp)
+{
+ spin_unlock_bh(&tp->lock);
+}
+
+/* One-shot MSI handler - Chip automatically disables interrupt
+ * after sending MSI so driver doesn't have to do it.
+ */
+static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
+{
+ struct tg3_napi *tnapi = dev_id;
+ struct tg3 *tp = tnapi->tp;
+
+ prefetch(tnapi->hw_status);
+ if (tnapi->rx_rcb)
+ prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
+
+ if (likely(!tg3_irq_sync(tp)))
+ napi_schedule(&tnapi->napi);
+
+ return IRQ_HANDLED;
+}
+
+/* MSI ISR - No need to check for interrupt sharing and no need to
+ * flush status block and interrupt mailbox. PCI ordering rules
+ * guarantee that MSI will arrive after the status block.
+ */
+static irqreturn_t tg3_msi(int irq, void *dev_id)
+{
+ struct tg3_napi *tnapi = dev_id;
+ struct tg3 *tp = tnapi->tp;
+
+ prefetch(tnapi->hw_status);
+ if (tnapi->rx_rcb)
+ prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
+ /*
+ * Writing any value to intr-mbox-0 clears PCI INTA# and
+ * chip-internal interrupt pending events.
+ * Writing non-zero to intr-mbox-0 additional tells the
+ * NIC to stop sending us irqs, engaging "in-intr-handler"
+ * event coalescing.
+ */
+ tw32_mailbox(tnapi->int_mbox, 0x00000001);
+ if (likely(!tg3_irq_sync(tp)))
+ napi_schedule(&tnapi->napi);
+
+ return IRQ_RETVAL(1);
+}
+
+static irqreturn_t tg3_interrupt(int irq, void *dev_id)
+{
+ struct tg3_napi *tnapi = dev_id;
+ struct tg3 *tp = tnapi->tp;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
+ unsigned int handled = 1;
+
+ /* In INTx mode, it is possible for the interrupt to arrive at
+ * the CPU before the status block posted prior to the interrupt.
+ * Reading the PCI State register will confirm whether the
+ * interrupt is ours and will flush the status block.
+ */
+ if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
+ if (tg3_flag(tp, CHIP_RESETTING) ||
+ (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
+ handled = 0;
+ goto out;
+ }
+ }
+
+ /*
+ * Writing any value to intr-mbox-0 clears PCI INTA# and
+ * chip-internal interrupt pending events.
+ * Writing non-zero to intr-mbox-0 additional tells the
+ * NIC to stop sending us irqs, engaging "in-intr-handler"
+ * event coalescing.
+ *
+ * Flush the mailbox to de-assert the IRQ immediately to prevent
+ * spurious interrupts. The flush impacts performance but
+ * excessive spurious interrupts can be worse in some cases.
+ */
+ tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
+ if (tg3_irq_sync(tp))
+ goto out;
+ sblk->status &= ~SD_STATUS_UPDATED;
+ if (likely(tg3_has_work(tnapi))) {
+ prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
+ napi_schedule(&tnapi->napi);
+ } else {
+ /* No work, shared interrupt perhaps? re-enable
+ * interrupts, and flush that PCI write
+ */
+ tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+ 0x00000000);
+ }
+out:
+ return IRQ_RETVAL(handled);
+}
+
+static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
+{
+ struct tg3_napi *tnapi = dev_id;
+ struct tg3 *tp = tnapi->tp;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
+ unsigned int handled = 1;
+
+ /* In INTx mode, it is possible for the interrupt to arrive at
+ * the CPU before the status block posted prior to the interrupt.
+ * Reading the PCI State register will confirm whether the
+ * interrupt is ours and will flush the status block.
+ */
+ if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
+ if (tg3_flag(tp, CHIP_RESETTING) ||
+ (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
+ handled = 0;
+ goto out;
+ }
+ }
+
+ /*
+ * writing any value to intr-mbox-0 clears PCI INTA# and
+ * chip-internal interrupt pending events.
+ * writing non-zero to intr-mbox-0 additional tells the
+ * NIC to stop sending us irqs, engaging "in-intr-handler"
+ * event coalescing.
+ *
+ * Flush the mailbox to de-assert the IRQ immediately to prevent
+ * spurious interrupts. The flush impacts performance but
+ * excessive spurious interrupts can be worse in some cases.
+ */
+ tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
+
+ /*
+ * In a shared interrupt configuration, sometimes other devices'
+ * interrupts will scream. We record the current status tag here
+ * so that the above check can report that the screaming interrupts
+ * are unhandled. Eventually they will be silenced.
+ */
+ tnapi->last_irq_tag = sblk->status_tag;
+
+ if (tg3_irq_sync(tp))
+ goto out;
+
+ prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
+
+ napi_schedule(&tnapi->napi);
+
+out:
+ return IRQ_RETVAL(handled);
+}
+
+/* ISR for interrupt test */
+static irqreturn_t tg3_test_isr(int irq, void *dev_id)
+{
+ struct tg3_napi *tnapi = dev_id;
+ struct tg3 *tp = tnapi->tp;
+ struct tg3_hw_status *sblk = tnapi->hw_status;
+
+ if ((sblk->status & SD_STATUS_UPDATED) ||
+ !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
+ tg3_disable_ints(tp);
+ return IRQ_RETVAL(1);
+ }
+ return IRQ_RETVAL(0);
+}
+
+static int tg3_init_hw(struct tg3 *, int);
+static int tg3_halt(struct tg3 *, int, int);
+
+/* Restart hardware after configuration changes, self-test, etc.
+ * Invoked with tp->lock held.
+ */
+static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
+ __releases(tp->lock)
+ __acquires(tp->lock)
+{
+ int err;
+
+ err = tg3_init_hw(tp, reset_phy);
+ if (err) {
+ netdev_err(tp->dev,
+ "Failed to re-initialize device, aborting\n");
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ tg3_full_unlock(tp);
+ del_timer_sync(&tp->timer);
+ tp->irq_sync = 0;
+ tg3_napi_enable(tp);
+ dev_close(tp->dev);
+ tg3_full_lock(tp, 0);
+ }
+ return err;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void tg3_poll_controller(struct net_device *dev)
+{
+ int i;
+ struct tg3 *tp = netdev_priv(dev);
+
+ for (i = 0; i < tp->irq_cnt; i++)
+ tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
+}
+#endif
+
+static void tg3_reset_task(struct work_struct *work)
+{
+ struct tg3 *tp = container_of(work, struct tg3, reset_task);
+ int err;
+ unsigned int restart_timer;
+
+ tg3_full_lock(tp, 0);
+
+ if (!netif_running(tp->dev)) {
+ tg3_full_unlock(tp);
+ return;
+ }
+
+ tg3_full_unlock(tp);
+
+ tg3_phy_stop(tp);
+
+ tg3_netif_stop(tp);
+
+ tg3_full_lock(tp, 1);
+
+ restart_timer = tg3_flag(tp, RESTART_TIMER);
+ tg3_flag_clear(tp, RESTART_TIMER);
+
+ if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
+ tp->write32_tx_mbox = tg3_write32_tx_mbox;
+ tp->write32_rx_mbox = tg3_write_flush_reg32;
+ tg3_flag_set(tp, MBOX_WRITE_REORDER);
+ tg3_flag_clear(tp, TX_RECOVERY_PENDING);
+ }
+
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
+ err = tg3_init_hw(tp, 1);
+ if (err)
+ goto out;
+
+ tg3_netif_start(tp);
+
+ if (restart_timer)
+ mod_timer(&tp->timer, jiffies + 1);
+
+out:
+ tg3_full_unlock(tp);
+
+ if (!err)
+ tg3_phy_start(tp);
+}
+
+static void tg3_tx_timeout(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (netif_msg_tx_err(tp)) {
+ netdev_err(dev, "transmit timed out, resetting\n");
+ tg3_dump_state(tp);
+ }
+
+ schedule_work(&tp->reset_task);
+}
+
+/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
+static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
+{
+ u32 base = (u32) mapping & 0xffffffff;
+
+ return (base > 0xffffdcc0) && (base + len + 8 < base);
+}
+
+/* Test for DMA addresses > 40-bit */
+static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
+ int len)
+{
+#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
+ if (tg3_flag(tp, 40BIT_DMA_BUG))
+ return ((u64) mapping + len) > DMA_BIT_MASK(40);
+ return 0;
+#else
+ return 0;
+#endif
+}
+
+static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
+ dma_addr_t mapping, u32 len, u32 flags,
+ u32 mss, u32 vlan)
+{
+ txbd->addr_hi = ((u64) mapping >> 32);
+ txbd->addr_lo = ((u64) mapping & 0xffffffff);
+ txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
+ txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
+}
+
+static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
+ dma_addr_t map, u32 len, u32 flags,
+ u32 mss, u32 vlan)
+{
+ struct tg3 *tp = tnapi->tp;
+ bool hwbug = false;
+
+ if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
+ hwbug = 1;
+
+ if (tg3_4g_overflow_test(map, len))
+ hwbug = 1;
+
+ if (tg3_40bit_overflow_test(tp, map, len))
+ hwbug = 1;
+
+ if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
+ u32 tmp_flag = flags & ~TXD_FLAG_END;
+ while (len > TG3_TX_BD_DMA_MAX) {
+ u32 frag_len = TG3_TX_BD_DMA_MAX;
+ len -= TG3_TX_BD_DMA_MAX;
+
+ if (len) {
+ tnapi->tx_buffers[*entry].fragmented = true;
+ /* Avoid the 8byte DMA problem */
+ if (len <= 8) {
+ len += TG3_TX_BD_DMA_MAX / 2;
+ frag_len = TG3_TX_BD_DMA_MAX / 2;
+ }
+ } else
+ tmp_flag = flags;
+
+ if (*budget) {
+ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+ frag_len, tmp_flag, mss, vlan);
+ (*budget)--;
+ *entry = NEXT_TX(*entry);
+ } else {
+ hwbug = 1;
+ break;
+ }
+
+ map += frag_len;
+ }
+
+ if (len) {
+ if (*budget) {
+ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+ len, flags, mss, vlan);
+ (*budget)--;
+ *entry = NEXT_TX(*entry);
+ } else {
+ hwbug = 1;
+ }
+ }
+ } else {
+ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+ len, flags, mss, vlan);
+ *entry = NEXT_TX(*entry);
+ }
+
+ return hwbug;
+}
+
+static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
+{
+ int i;
+ struct sk_buff *skb;
+ struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
+
+ skb = txb->skb;
+ txb->skb = NULL;
+
+ pci_unmap_single(tnapi->tp->pdev,
+ dma_unmap_addr(txb, mapping),
+ skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+
+ while (txb->fragmented) {
+ txb->fragmented = false;
+ entry = NEXT_TX(entry);
+ txb = &tnapi->tx_buffers[entry];
+ }
+
+ for (i = 0; i < last; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ entry = NEXT_TX(entry);
+ txb = &tnapi->tx_buffers[entry];
+
+ pci_unmap_page(tnapi->tp->pdev,
+ dma_unmap_addr(txb, mapping),
+ frag->size, PCI_DMA_TODEVICE);
+
+ while (txb->fragmented) {
+ txb->fragmented = false;
+ entry = NEXT_TX(entry);
+ txb = &tnapi->tx_buffers[entry];
+ }
+ }
+}
+
+/* Workaround 4GB and 40-bit hardware DMA bugs. */
+static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
+ struct sk_buff *skb,
+ u32 *entry, u32 *budget,
+ u32 base_flags, u32 mss, u32 vlan)
+{
+ struct tg3 *tp = tnapi->tp;
+ struct sk_buff *new_skb;
+ dma_addr_t new_addr = 0;
+ int ret = 0;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
+ new_skb = skb_copy(skb, GFP_ATOMIC);
+ else {
+ int more_headroom = 4 - ((unsigned long)skb->data & 3);
+
+ new_skb = skb_copy_expand(skb,
+ skb_headroom(skb) + more_headroom,
+ skb_tailroom(skb), GFP_ATOMIC);
+ }
+
+ if (!new_skb) {
+ ret = -1;
+ } else {
+ /* New SKB is guaranteed to be linear. */
+ new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
+ PCI_DMA_TODEVICE);
+ /* Make sure the mapping succeeded */
+ if (pci_dma_mapping_error(tp->pdev, new_addr)) {
+ dev_kfree_skb(new_skb);
+ ret = -1;
+ } else {
+ base_flags |= TXD_FLAG_END;
+
+ tnapi->tx_buffers[*entry].skb = new_skb;
+ dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
+ mapping, new_addr);
+
+ if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
+ new_skb->len, base_flags,
+ mss, vlan)) {
+ tg3_tx_skb_unmap(tnapi, *entry, 0);
+ dev_kfree_skb(new_skb);
+ ret = -1;
+ }
+ }
+ }
+
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
+
+/* Use GSO to workaround a rare TSO bug that may be triggered when the
+ * TSO header is greater than 80 bytes.
+ */
+static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
+{
+ struct sk_buff *segs, *nskb;
+ u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
+
+ /* Estimate the number of fragments in the worst case */
+ if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
+ netif_stop_queue(tp->dev);
+
+ /* netif_tx_stop_queue() must be done before checking
+ * checking tx index in tg3_tx_avail() below, because in
+ * tg3_tx(), we update tx index before checking for
+ * netif_tx_queue_stopped().
+ */
+ smp_mb();
+ if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
+ return NETDEV_TX_BUSY;
+
+ netif_wake_queue(tp->dev);
+ }
+
+ segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
+ if (IS_ERR(segs))
+ goto tg3_tso_bug_end;
+
+ do {
+ nskb = segs;
+ segs = segs->next;
+ nskb->next = NULL;
+ tg3_start_xmit(nskb, tp->dev);
+ } while (segs);
+
+tg3_tso_bug_end:
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
+ * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
+ */
+static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ u32 len, entry, base_flags, mss, vlan = 0;
+ u32 budget;
+ int i = -1, would_hit_hwbug;
+ dma_addr_t mapping;
+ struct tg3_napi *tnapi;
+ struct netdev_queue *txq;
+ unsigned int last;
+
+ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+ tnapi = &tp->napi[skb_get_queue_mapping(skb)];
+ if (tg3_flag(tp, ENABLE_TSS))
+ tnapi++;
+
+ budget = tg3_tx_avail(tnapi);
+
+ /* We are running in BH disabled context with netif_tx_lock
+ * and TX reclaim runs via tp->napi.poll inside of a software
+ * interrupt. Furthermore, IRQ processing runs lockless so we have
+ * no IRQ context deadlocks to worry about either. Rejoice!
+ */
+ if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
+ if (!netif_tx_queue_stopped(txq)) {
+ netif_tx_stop_queue(txq);
+
+ /* This is a hard error, log it. */
+ netdev_err(dev,
+ "BUG! Tx Ring full when queue awake!\n");
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = tnapi->tx_prod;
+ base_flags = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ base_flags |= TXD_FLAG_TCPUDP_CSUM;
+
+ mss = skb_shinfo(skb)->gso_size;
+ if (mss) {
+ struct iphdr *iph;
+ u32 tcp_opt_len, hdr_len;
+
+ if (skb_header_cloned(skb) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
+ dev_kfree_skb(skb);
+ goto out_unlock;
+ }
+
+ iph = ip_hdr(skb);
+ tcp_opt_len = tcp_optlen(skb);
+
+ if (skb_is_gso_v6(skb)) {
+ hdr_len = skb_headlen(skb) - ETH_HLEN;
+ } else {
+ u32 ip_tcp_len;
+
+ ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
+ hdr_len = ip_tcp_len + tcp_opt_len;
+
+ iph->check = 0;
+ iph->tot_len = htons(mss + hdr_len);
+ }
+
+ if (unlikely((ETH_HLEN + hdr_len) > 80) &&
+ tg3_flag(tp, TSO_BUG))
+ return tg3_tso_bug(tp, skb);
+
+ base_flags |= (TXD_FLAG_CPU_PRE_DMA |
+ TXD_FLAG_CPU_POST_DMA);
+
+ if (tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3)) {
+ tcp_hdr(skb)->check = 0;
+ base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
+ } else
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+
+ if (tg3_flag(tp, HW_TSO_3)) {
+ mss |= (hdr_len & 0xc) << 12;
+ if (hdr_len & 0x10)
+ base_flags |= 0x00000010;
+ base_flags |= (hdr_len & 0x3e0) << 5;
+ } else if (tg3_flag(tp, HW_TSO_2))
+ mss |= hdr_len << 9;
+ else if (tg3_flag(tp, HW_TSO_1) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ if (tcp_opt_len || iph->ihl > 5) {
+ int tsflags;
+
+ tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
+ mss |= (tsflags << 11);
+ }
+ } else {
+ if (tcp_opt_len || iph->ihl > 5) {
+ int tsflags;
+
+ tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
+ base_flags |= tsflags << 12;
+ }
+ }
+ }
+
+ if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
+ !mss && skb->len > VLAN_ETH_FRAME_LEN)
+ base_flags |= TXD_FLAG_JMB_PKT;
+
+ if (vlan_tx_tag_present(skb)) {
+ base_flags |= TXD_FLAG_VLAN;
+ vlan = vlan_tx_tag_get(skb);
+ }
+
+ len = skb_headlen(skb);
+
+ mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(tp->pdev, mapping)) {
+ dev_kfree_skb(skb);
+ goto out_unlock;
+ }
+
+ tnapi->tx_buffers[entry].skb = skb;
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
+
+ would_hit_hwbug = 0;
+
+ if (tg3_flag(tp, 5701_DMA_BUG))
+ would_hit_hwbug = 1;
+
+ if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
+ ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
+ mss, vlan))
+ would_hit_hwbug = 1;
+
+ /* Now loop through additional data fragments, and queue them. */
+ if (skb_shinfo(skb)->nr_frags > 0) {
+ u32 tmp_mss = mss;
+
+ if (!tg3_flag(tp, HW_TSO_1) &&
+ !tg3_flag(tp, HW_TSO_2) &&
+ !tg3_flag(tp, HW_TSO_3))
+ tmp_mss = 0;
+
+ last = skb_shinfo(skb)->nr_frags - 1;
+ for (i = 0; i <= last; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ len = frag->size;
+ mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
+ len, DMA_TO_DEVICE);
+
+ tnapi->tx_buffers[entry].skb = NULL;
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+ mapping);
+ if (dma_mapping_error(&tp->pdev->dev, mapping))
+ goto dma_error;
+
+ if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
+ len, base_flags |
+ ((i == last) ? TXD_FLAG_END : 0),
+ tmp_mss, vlan))
+ would_hit_hwbug = 1;
+ }
+ }
+
+ if (would_hit_hwbug) {
+ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
+
+ /* If the workaround fails due to memory/mapping
+ * failure, silently drop this packet.
+ */
+ entry = tnapi->tx_prod;
+ budget = tg3_tx_avail(tnapi);
+ if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget,
+ base_flags, mss, vlan))
+ goto out_unlock;
+ }
+
+ skb_tx_timestamp(skb);
+
+ /* Packets are ready, update Tx producer idx local and on card. */
+ tw32_tx_mbox(tnapi->prodmbox, entry);
+
+ tnapi->tx_prod = entry;
+ if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
+ netif_tx_stop_queue(txq);
+
+ /* netif_tx_stop_queue() must be done before checking
+ * checking tx index in tg3_tx_avail() below, because in
+ * tg3_tx(), we update tx index before checking for
+ * netif_tx_queue_stopped().
+ */
+ smp_mb();
+ if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
+ netif_tx_wake_queue(txq);
+ }
+
+out_unlock:
+ mmiowb();
+
+ return NETDEV_TX_OK;
+
+dma_error:
+ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
+ dev_kfree_skb(skb);
+ tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
+ return NETDEV_TX_OK;
+}
+
+static void tg3_mac_loopback(struct tg3 *tp, bool enable)
+{
+ if (enable) {
+ tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
+ MAC_MODE_PORT_MODE_MASK);
+
+ tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
+
+ if (!tg3_flag(tp, 5705_PLUS))
+ tp->mac_mode |= MAC_MODE_LINK_POLARITY;
+
+ if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
+ tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
+ else
+ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ } else {
+ tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
+
+ if (tg3_flag(tp, 5705_PLUS) ||
+ (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
+ }
+
+ tw32(MAC_MODE, tp->mac_mode);
+ udelay(40);
+}
+
+static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
+{
+ u32 val, bmcr, mac_mode, ptest = 0;
+
+ tg3_phy_toggle_apd(tp, false);
+ tg3_phy_toggle_automdix(tp, 0);
+
+ if (extlpbk && tg3_phy_set_extloopbk(tp))
+ return -EIO;
+
+ bmcr = BMCR_FULLDPLX;
+ switch (speed) {
+ case SPEED_10:
+ break;
+ case SPEED_100:
+ bmcr |= BMCR_SPEED100;
+ break;
+ case SPEED_1000:
+ default:
+ if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+ speed = SPEED_100;
+ bmcr |= BMCR_SPEED100;
+ } else {
+ speed = SPEED_1000;
+ bmcr |= BMCR_SPEED1000;
+ }
+ }
+
+ if (extlpbk) {
+ if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
+ tg3_readphy(tp, MII_CTRL1000, &val);
+ val |= CTL1000_AS_MASTER |
+ CTL1000_ENABLE_MASTER;
+ tg3_writephy(tp, MII_CTRL1000, val);
+ } else {
+ ptest = MII_TG3_FET_PTEST_TRIM_SEL |
+ MII_TG3_FET_PTEST_TRIM_2;
+ tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
+ }
+ } else
+ bmcr |= BMCR_LOOPBACK;
+
+ tg3_writephy(tp, MII_BMCR, bmcr);
+
+ /* The write needs to be flushed for the FETs */
+ if (tp->phy_flags & TG3_PHYFLG_IS_FET)
+ tg3_readphy(tp, MII_BMCR, &bmcr);
+
+ udelay(40);
+
+ if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
+ MII_TG3_FET_PTEST_FRC_TX_LINK |
+ MII_TG3_FET_PTEST_FRC_TX_LOCK);
+
+ /* The write needs to be flushed for the AC131 */
+ tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
+ }
+
+ /* Reset to prevent losing 1st rx packet intermittently */
+ if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
+ tg3_flag(tp, 5780_CLASS)) {
+ tw32_f(MAC_RX_MODE, RX_MODE_RESET);
+ udelay(10);
+ tw32_f(MAC_RX_MODE, tp->rx_mode);
+ }
+
+ mac_mode = tp->mac_mode &
+ ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
+ if (speed == SPEED_1000)
+ mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ else
+ mac_mode |= MAC_MODE_PORT_MODE_MII;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+ u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
+
+ if (masked_phy_id == TG3_PHY_ID_BCM5401)
+ mac_mode &= ~MAC_MODE_LINK_POLARITY;
+ else if (masked_phy_id == TG3_PHY_ID_BCM5411)
+ mac_mode |= MAC_MODE_LINK_POLARITY;
+
+ tg3_writephy(tp, MII_TG3_EXT_CTRL,
+ MII_TG3_EXT_CTRL_LNK3_LED_MODE);
+ }
+
+ tw32(MAC_MODE, mac_mode);
+ udelay(40);
+
+ return 0;
+}
+
+static void tg3_set_loopback(struct net_device *dev, u32 features)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (features & NETIF_F_LOOPBACK) {
+ if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
+ return;
+
+ spin_lock_bh(&tp->lock);
+ tg3_mac_loopback(tp, true);
+ netif_carrier_on(tp->dev);
+ spin_unlock_bh(&tp->lock);
+ netdev_info(dev, "Internal MAC loopback mode enabled.\n");
+ } else {
+ if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
+ return;
+
+ spin_lock_bh(&tp->lock);
+ tg3_mac_loopback(tp, false);
+ /* Force link status check */
+ tg3_setup_phy(tp, 1);
+ spin_unlock_bh(&tp->lock);
+ netdev_info(dev, "Internal MAC loopback mode disabled.\n");
+ }
+}
+
+static u32 tg3_fix_features(struct net_device *dev, u32 features)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
+ features &= ~NETIF_F_ALL_TSO;
+
+ return features;
+}
+
+static int tg3_set_features(struct net_device *dev, u32 features)
+{
+ u32 changed = dev->features ^ features;
+
+ if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
+ tg3_set_loopback(dev, features);
+
+ return 0;
+}
+
+static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
+ int new_mtu)
+{
+ dev->mtu = new_mtu;
+
+ if (new_mtu > ETH_DATA_LEN) {
+ if (tg3_flag(tp, 5780_CLASS)) {
+ netdev_update_features(dev);
+ tg3_flag_clear(tp, TSO_CAPABLE);
+ } else {
+ tg3_flag_set(tp, JUMBO_RING_ENABLE);
+ }
+ } else {
+ if (tg3_flag(tp, 5780_CLASS)) {
+ tg3_flag_set(tp, TSO_CAPABLE);
+ netdev_update_features(dev);
+ }
+ tg3_flag_clear(tp, JUMBO_RING_ENABLE);
+ }
+}
+
+static int tg3_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int err;
+
+ if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
+ return -EINVAL;
+
+ if (!netif_running(dev)) {
+ /* We'll just catch it later when the
+ * device is up'd.
+ */
+ tg3_set_mtu(dev, tp, new_mtu);
+ return 0;
+ }
+
+ tg3_phy_stop(tp);
+
+ tg3_netif_stop(tp);
+
+ tg3_full_lock(tp, 1);
+
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+
+ tg3_set_mtu(dev, tp, new_mtu);
+
+ err = tg3_restart_hw(tp, 0);
+
+ if (!err)
+ tg3_netif_start(tp);
+
+ tg3_full_unlock(tp);
+
+ if (!err)
+ tg3_phy_start(tp);
+
+ return err;
+}
+
+static void tg3_rx_prodring_free(struct tg3 *tp,
+ struct tg3_rx_prodring_set *tpr)
+{
+ int i;
+
+ if (tpr != &tp->napi[0].prodring) {
+ for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
+ i = (i + 1) & tp->rx_std_ring_mask)
+ tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+ tp->rx_pkt_map_sz);
+
+ if (tg3_flag(tp, JUMBO_CAPABLE)) {
+ for (i = tpr->rx_jmb_cons_idx;
+ i != tpr->rx_jmb_prod_idx;
+ i = (i + 1) & tp->rx_jmb_ring_mask) {
+ tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+ TG3_RX_JMB_MAP_SZ);
+ }
+ }
+
+ return;
+ }
+
+ for (i = 0; i <= tp->rx_std_ring_mask; i++)
+ tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+ tp->rx_pkt_map_sz);
+
+ if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
+ for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
+ tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+ TG3_RX_JMB_MAP_SZ);
+ }
+}
+
+/* Initialize rx rings for packet processing.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver. tp->{tx,}lock are held and thus
+ * we may not sleep.
+ */
+static int tg3_rx_prodring_alloc(struct tg3 *tp,
+ struct tg3_rx_prodring_set *tpr)
+{
+ u32 i, rx_pkt_dma_sz;
+
+ tpr->rx_std_cons_idx = 0;
+ tpr->rx_std_prod_idx = 0;
+ tpr->rx_jmb_cons_idx = 0;
+ tpr->rx_jmb_prod_idx = 0;
+
+ if (tpr != &tp->napi[0].prodring) {
+ memset(&tpr->rx_std_buffers[0], 0,
+ TG3_RX_STD_BUFF_RING_SIZE(tp));
+ if (tpr->rx_jmb_buffers)
+ memset(&tpr->rx_jmb_buffers[0], 0,
+ TG3_RX_JMB_BUFF_RING_SIZE(tp));
+ goto done;
+ }
+
+ /* Zero out all descriptors. */
+ memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
+
+ rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
+ if (tg3_flag(tp, 5780_CLASS) &&
+ tp->dev->mtu > ETH_DATA_LEN)
+ rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
+ tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
+
+ /* Initialize invariants of the rings, we only set this
+ * stuff once. This works because the card does not
+ * write into the rx buffer posting rings.
+ */
+ for (i = 0; i <= tp->rx_std_ring_mask; i++) {
+ struct tg3_rx_buffer_desc *rxd;
+
+ rxd = &tpr->rx_std[i];
+ rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
+ rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
+ rxd->opaque = (RXD_OPAQUE_RING_STD |
+ (i << RXD_OPAQUE_INDEX_SHIFT));
+ }
+
+ /* Now allocate fresh SKBs for each rx ring. */
+ for (i = 0; i < tp->rx_pending; i++) {
+ if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
+ netdev_warn(tp->dev,
+ "Using a smaller RX standard ring. Only "
+ "%d out of %d buffers were allocated "
+ "successfully\n", i, tp->rx_pending);
+ if (i == 0)
+ goto initfail;
+ tp->rx_pending = i;
+ break;
+ }
+ }
+
+ if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
+ goto done;
+
+ memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
+
+ if (!tg3_flag(tp, JUMBO_RING_ENABLE))
+ goto done;
+
+ for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
+ struct tg3_rx_buffer_desc *rxd;
+
+ rxd = &tpr->rx_jmb[i].std;
+ rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
+ rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
+ RXD_FLAG_JUMBO;
+ rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
+ (i << RXD_OPAQUE_INDEX_SHIFT));
+ }
+
+ for (i = 0; i < tp->rx_jumbo_pending; i++) {
+ if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
+ netdev_warn(tp->dev,
+ "Using a smaller RX jumbo ring. Only %d "
+ "out of %d buffers were allocated "
+ "successfully\n", i, tp->rx_jumbo_pending);
+ if (i == 0)
+ goto initfail;
+ tp->rx_jumbo_pending = i;
+ break;
+ }
+ }
+
+done:
+ return 0;
+
+initfail:
+ tg3_rx_prodring_free(tp, tpr);
+ return -ENOMEM;
+}
+
+static void tg3_rx_prodring_fini(struct tg3 *tp,
+ struct tg3_rx_prodring_set *tpr)
+{
+ kfree(tpr->rx_std_buffers);
+ tpr->rx_std_buffers = NULL;
+ kfree(tpr->rx_jmb_buffers);
+ tpr->rx_jmb_buffers = NULL;
+ if (tpr->rx_std) {
+ dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
+ tpr->rx_std, tpr->rx_std_mapping);
+ tpr->rx_std = NULL;
+ }
+ if (tpr->rx_jmb) {
+ dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
+ tpr->rx_jmb, tpr->rx_jmb_mapping);
+ tpr->rx_jmb = NULL;
+ }
+}
+
+static int tg3_rx_prodring_init(struct tg3 *tp,
+ struct tg3_rx_prodring_set *tpr)
+{
+ tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
+ GFP_KERNEL);
+ if (!tpr->rx_std_buffers)
+ return -ENOMEM;
+
+ tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_RX_STD_RING_BYTES(tp),
+ &tpr->rx_std_mapping,
+ GFP_KERNEL);
+ if (!tpr->rx_std)
+ goto err_out;
+
+ if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
+ tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
+ GFP_KERNEL);
+ if (!tpr->rx_jmb_buffers)
+ goto err_out;
+
+ tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_RX_JMB_RING_BYTES(tp),
+ &tpr->rx_jmb_mapping,
+ GFP_KERNEL);
+ if (!tpr->rx_jmb)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ tg3_rx_prodring_fini(tp, tpr);
+ return -ENOMEM;
+}
+
+/* Free up pending packets in all rx/tx rings.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver. tp->{tx,}lock is not held and we are not
+ * in an interrupt context and thus may sleep.
+ */
+static void tg3_free_rings(struct tg3 *tp)
+{
+ int i, j;
+
+ for (j = 0; j < tp->irq_cnt; j++) {
+ struct tg3_napi *tnapi = &tp->napi[j];
+
+ tg3_rx_prodring_free(tp, &tnapi->prodring);
+
+ if (!tnapi->tx_buffers)
+ continue;
+
+ for (i = 0; i < TG3_TX_RING_SIZE; i++) {
+ struct sk_buff *skb = tnapi->tx_buffers[i].skb;
+
+ if (!skb)
+ continue;
+
+ tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
+
+ dev_kfree_skb_any(skb);
+ }
+ }
+}
+
+/* Initialize tx/rx rings for packet processing.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver. tp->{tx,}lock are held and thus
+ * we may not sleep.
+ */
+static int tg3_init_rings(struct tg3 *tp)
+{
+ int i;
+
+ /* Free up all the SKBs. */
+ tg3_free_rings(tp);
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ tnapi->last_tag = 0;
+ tnapi->last_irq_tag = 0;
+ tnapi->hw_status->status = 0;
+ tnapi->hw_status->status_tag = 0;
+ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+
+ tnapi->tx_prod = 0;
+ tnapi->tx_cons = 0;
+ if (tnapi->tx_ring)
+ memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
+
+ tnapi->rx_rcb_ptr = 0;
+ if (tnapi->rx_rcb)
+ memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+
+ if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
+ tg3_free_rings(tp);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down.
+ */
+static void tg3_free_consistent(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ if (tnapi->tx_ring) {
+ dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
+ tnapi->tx_ring, tnapi->tx_desc_mapping);
+ tnapi->tx_ring = NULL;
+ }
+
+ kfree(tnapi->tx_buffers);
+ tnapi->tx_buffers = NULL;
+
+ if (tnapi->rx_rcb) {
+ dma_free_coherent(&tp->pdev->dev,
+ TG3_RX_RCB_RING_BYTES(tp),
+ tnapi->rx_rcb,
+ tnapi->rx_rcb_mapping);
+ tnapi->rx_rcb = NULL;
+ }
+
+ tg3_rx_prodring_fini(tp, &tnapi->prodring);
+
+ if (tnapi->hw_status) {
+ dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
+ tnapi->hw_status,
+ tnapi->status_mapping);
+ tnapi->hw_status = NULL;
+ }
+ }
+
+ if (tp->hw_stats) {
+ dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
+ tp->hw_stats, tp->stats_mapping);
+ tp->hw_stats = NULL;
+ }
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down. Can sleep.
+ */
+static int tg3_alloc_consistent(struct tg3 *tp)
+{
+ int i;
+
+ tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
+ sizeof(struct tg3_hw_stats),
+ &tp->stats_mapping,
+ GFP_KERNEL);
+ if (!tp->hw_stats)
+ goto err_out;
+
+ memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+ struct tg3_hw_status *sblk;
+
+ tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_HW_STATUS_SIZE,
+ &tnapi->status_mapping,
+ GFP_KERNEL);
+ if (!tnapi->hw_status)
+ goto err_out;
+
+ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+ sblk = tnapi->hw_status;
+
+ if (tg3_rx_prodring_init(tp, &tnapi->prodring))
+ goto err_out;
+
+ /* If multivector TSS is enabled, vector 0 does not handle
+ * tx interrupts. Don't allocate any resources for it.
+ */
+ if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
+ (i && tg3_flag(tp, ENABLE_TSS))) {
+ tnapi->tx_buffers = kzalloc(
+ sizeof(struct tg3_tx_ring_info) *
+ TG3_TX_RING_SIZE, GFP_KERNEL);
+ if (!tnapi->tx_buffers)
+ goto err_out;
+
+ tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_TX_RING_BYTES,
+ &tnapi->tx_desc_mapping,
+ GFP_KERNEL);
+ if (!tnapi->tx_ring)
+ goto err_out;
+ }
+
+ /*
+ * When RSS is enabled, the status block format changes
+ * slightly. The "rx_jumbo_consumer", "reserved",
+ * and "rx_mini_consumer" members get mapped to the
+ * other three rx return ring producer indexes.
+ */
+ switch (i) {
+ default:
+ tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
+ break;
+ case 2:
+ tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
+ break;
+ case 3:
+ tnapi->rx_rcb_prod_idx = &sblk->reserved;
+ break;
+ case 4:
+ tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
+ break;
+ }
+
+ /*
+ * If multivector RSS is enabled, vector 0 does not handle
+ * rx or tx interrupts. Don't allocate any resources for it.
+ */
+ if (!i && tg3_flag(tp, ENABLE_RSS))
+ continue;
+
+ tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_RX_RCB_RING_BYTES(tp),
+ &tnapi->rx_rcb_mapping,
+ GFP_KERNEL);
+ if (!tnapi->rx_rcb)
+ goto err_out;
+
+ memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+ }
+
+ return 0;
+
+err_out:
+ tg3_free_consistent(tp);
+ return -ENOMEM;
+}
+
+#define MAX_WAIT_CNT 1000
+
+/* To stop a block, clear the enable bit and poll till it
+ * clears. tp->lock is held.
+ */
+static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
+{
+ unsigned int i;
+ u32 val;
+
+ if (tg3_flag(tp, 5705_PLUS)) {
+ switch (ofs) {
+ case RCVLSC_MODE:
+ case DMAC_MODE:
+ case MBFREE_MODE:
+ case BUFMGR_MODE:
+ case MEMARB_MODE:
+ /* We can't enable/disable these bits of the
+ * 5705/5750, just say success.
+ */
+ return 0;
+
+ default:
+ break;
+ }
+ }
+
+ val = tr32(ofs);
+ val &= ~enable_bit;
+ tw32_f(ofs, val);
+
+ for (i = 0; i < MAX_WAIT_CNT; i++) {
+ udelay(100);
+ val = tr32(ofs);
+ if ((val & enable_bit) == 0)
+ break;
+ }
+
+ if (i == MAX_WAIT_CNT && !silent) {
+ dev_err(&tp->pdev->dev,
+ "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
+ ofs, enable_bit);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/* tp->lock is held. */
+static int tg3_abort_hw(struct tg3 *tp, int silent)
+{
+ int i, err;
+
+ tg3_disable_ints(tp);
+
+ tp->rx_mode &= ~RX_MODE_ENABLE;
+ tw32_f(MAC_RX_MODE, tp->rx_mode);
+ udelay(10);
+
+ err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
+
+ err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
+
+ tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ tp->tx_mode &= ~TX_MODE_ENABLE;
+ tw32_f(MAC_TX_MODE, tp->tx_mode);
+
+ for (i = 0; i < MAX_WAIT_CNT; i++) {
+ udelay(100);
+ if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
+ break;
+ }
+ if (i >= MAX_WAIT_CNT) {
+ dev_err(&tp->pdev->dev,
+ "%s timed out, TX_MODE_ENABLE will not clear "
+ "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
+ err |= -ENODEV;
+ }
+
+ err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
+
+ tw32(FTQ_RESET, 0xffffffff);
+ tw32(FTQ_RESET, 0x00000000);
+
+ err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
+ err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+ if (tnapi->hw_status)
+ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+ }
+ if (tp->hw_stats)
+ memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+
+ return err;
+}
+
+/* Save PCI command register before chip reset */
+static void tg3_save_pci_state(struct tg3 *tp)
+{
+ pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
+}
+
+/* Restore PCI state after chip reset */
+static void tg3_restore_pci_state(struct tg3 *tp)
+{
+ u32 val;
+
+ /* Re-enable indirect register accesses. */
+ pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+ tp->misc_host_ctrl);
+
+ /* Set MAX PCI retry to zero. */
+ val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+ tg3_flag(tp, PCIX_MODE))
+ val |= PCISTATE_RETRY_SAME_DMA;
+ /* Allow reads and writes to the APE register and memory space. */
+ if (tg3_flag(tp, ENABLE_APE))
+ val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
+ PCISTATE_ALLOW_APE_SHMEM_WR |
+ PCISTATE_ALLOW_APE_PSPACE_WR;
+ pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
+
+ pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
+ if (tg3_flag(tp, PCI_EXPRESS))
+ pcie_set_readrq(tp->pdev, tp->pcie_readrq);
+ else {
+ pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
+ tp->pci_cacheline_sz);
+ pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
+ tp->pci_lat_timer);
+ }
+ }
+
+ /* Make sure PCI-X relaxed ordering bit is clear. */
+ if (tg3_flag(tp, PCIX_MODE)) {
+ u16 pcix_cmd;
+
+ pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
+ &pcix_cmd);
+ pcix_cmd &= ~PCI_X_CMD_ERO;
+ pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
+ pcix_cmd);
+ }
+
+ if (tg3_flag(tp, 5780_CLASS)) {
+
+ /* Chip reset on 5780 will reset MSI enable bit,
+ * so need to restore it.
+ */
+ if (tg3_flag(tp, USING_MSI)) {
+ u16 ctrl;
+
+ pci_read_config_word(tp->pdev,
+ tp->msi_cap + PCI_MSI_FLAGS,
+ &ctrl);
+ pci_write_config_word(tp->pdev,
+ tp->msi_cap + PCI_MSI_FLAGS,
+ ctrl | PCI_MSI_FLAGS_ENABLE);
+ val = tr32(MSGINT_MODE);
+ tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
+ }
+ }
+}
+
+/* tp->lock is held. */
+static int tg3_chip_reset(struct tg3 *tp)
+{
+ u32 val;
+ void (*write_op)(struct tg3 *, u32, u32);
+ int i, err;
+
+ tg3_nvram_lock(tp);
+
+ tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
+
+ /* No matching tg3_nvram_unlock() after this because
+ * chip reset below will undo the nvram lock.
+ */
+ tp->nvram_lock_cnt = 0;
+
+ /* GRC_MISC_CFG core clock reset will clear the memory
+ * enable bit in PCI register 4 and the MSI enable bit
+ * on some chips, so we save relevant registers here.
+ */
+ tg3_save_pci_state(tp);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ tg3_flag(tp, 5755_PLUS))
+ tw32(GRC_FASTBOOT_PC, 0);
+
+ /*
+ * We must avoid the readl() that normally takes place.
+ * It locks machines, causes machine checks, and other
+ * fun things. So, temporarily disable the 5701
+ * hardware workaround, while we do the reset.
+ */
+ write_op = tp->write32;
+ if (write_op == tg3_write_flush_reg32)
+ tp->write32 = tg3_write32;
+
+ /* Prevent the irq handler from reading or writing PCI registers
+ * during chip reset when the memory enable bit in the PCI command
+ * register may be cleared. The chip does not generate interrupt
+ * at this time, but the irq handler may still be called due to irq
+ * sharing or irqpoll.
+ */
+ tg3_flag_set(tp, CHIP_RESETTING);
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+ if (tnapi->hw_status) {
+ tnapi->hw_status->status = 0;
+ tnapi->hw_status->status_tag = 0;
+ }
+ tnapi->last_tag = 0;
+ tnapi->last_irq_tag = 0;
+ }
+ smp_mb();
+
+ for (i = 0; i < tp->irq_cnt; i++)
+ synchronize_irq(tp->napi[i].irq_vec);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+ val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
+ tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
+ }
+
+ /* do the reset */
+ val = GRC_MISC_CFG_CORECLK_RESET;
+
+ if (tg3_flag(tp, PCI_EXPRESS)) {
+ /* Force PCIe 1.0a mode */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+ !tg3_flag(tp, 57765_PLUS) &&
+ tr32(TG3_PCIE_PHY_TSTCTL) ==
+ (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
+ tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
+
+ if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
+ tw32(GRC_MISC_CFG, (1 << 29));
+ val |= (1 << 29);
+ }
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
+ tw32(GRC_VCPU_EXT_CTRL,
+ tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
+ }
+
+ /* Manage gphy power for all CPMU absent PCIe devices. */
+ if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
+ val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
+
+ tw32(GRC_MISC_CFG, val);
+
+ /* restore 5701 hardware bug workaround write method */
+ tp->write32 = write_op;
+
+ /* Unfortunately, we have to delay before the PCI read back.
+ * Some 575X chips even will not respond to a PCI cfg access
+ * when the reset command is given to the chip.
+ *
+ * How do these hardware designers expect things to work
+ * properly if the PCI write is posted for a long period
+ * of time? It is always necessary to have some method by
+ * which a register read back can occur to push the write
+ * out which does the reset.
+ *
+ * For most tg3 variants the trick below was working.
+ * Ho hum...
+ */
+ udelay(120);
+
+ /* Flush PCI posted writes. The normal MMIO registers
+ * are inaccessible at this time so this is the only
+ * way to make this reliably (actually, this is no longer
+ * the case, see above). I tried to use indirect
+ * register read/write but this upset some 5701 variants.
+ */
+ pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
+
+ udelay(120);
+
+ if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
+ u16 val16;
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
+ int i;
+ u32 cfg_val;
+
+ /* Wait for link training to complete. */
+ for (i = 0; i < 5000; i++)
+ udelay(100);
+
+ pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
+ pci_write_config_dword(tp->pdev, 0xc4,
+ cfg_val | (1 << 15));
+ }
+
+ /* Clear the "no snoop" and "relaxed ordering" bits. */
+ pci_read_config_word(tp->pdev,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
+ &val16);
+ val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
+ PCI_EXP_DEVCTL_NOSNOOP_EN);
+ /*
+ * Older PCIe devices only support the 128 byte
+ * MPS setting. Enforce the restriction.
+ */
+ if (!tg3_flag(tp, CPMU_PRESENT))
+ val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
+ pci_write_config_word(tp->pdev,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
+ val16);
+
+ pcie_set_readrq(tp->pdev, tp->pcie_readrq);
+
+ /* Clear error status */
+ pci_write_config_word(tp->pdev,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
+ PCI_EXP_DEVSTA_CED |
+ PCI_EXP_DEVSTA_NFED |
+ PCI_EXP_DEVSTA_FED |
+ PCI_EXP_DEVSTA_URD);
+ }
+
+ tg3_restore_pci_state(tp);
+
+ tg3_flag_clear(tp, CHIP_RESETTING);
+ tg3_flag_clear(tp, ERROR_PROCESSED);
+
+ val = 0;
+ if (tg3_flag(tp, 5780_CLASS))
+ val = tr32(MEMARB_MODE);
+ tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
+ tg3_stop_fw(tp);
+ tw32(0x5000, 0x400);
+ }
+
+ tw32(GRC_MODE, tp->grc_mode);
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
+ val = tr32(0xc4);
+
+ tw32(0xc4, val | (1 << 15));
+ }
+
+ if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
+ tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
+ tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
+ }
+
+ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+ tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
+ val = tp->mac_mode;
+ } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
+ tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
+ val = tp->mac_mode;
+ } else
+ val = 0;
+
+ tw32_f(MAC_MODE, val);
+ udelay(40);
+
+ tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
+
+ err = tg3_poll_fw(tp);
+ if (err)
+ return err;
+
+ tg3_mdio_start(tp);
+
+ if (tg3_flag(tp, PCI_EXPRESS) &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+ !tg3_flag(tp, 57765_PLUS)) {
+ val = tr32(0x7c00);
+
+ tw32(0x7c00, val | (1 << 25));
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ val = tr32(TG3_CPMU_CLCK_ORIDE);
+ tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
+ }
+
+ /* Reprobe ASF enable state. */
+ tg3_flag_clear(tp, ENABLE_ASF);
+ tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
+ tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
+ if (val == NIC_SRAM_DATA_SIG_MAGIC) {
+ u32 nic_cfg;
+
+ tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
+ if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
+ tg3_flag_set(tp, ENABLE_ASF);
+ tp->last_event_jiffies = jiffies;
+ if (tg3_flag(tp, 5750_PLUS))
+ tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
+ }
+ }
+
+ return 0;
+}
+
+/* tp->lock is held. */
+static int tg3_halt(struct tg3 *tp, int kind, int silent)
+{
+ int err;
+
+ tg3_stop_fw(tp);
+
+ tg3_write_sig_pre_reset(tp, kind);
+
+ tg3_abort_hw(tp, silent);
+ err = tg3_chip_reset(tp);
+
+ __tg3_set_mac_addr(tp, 0);
+
+ tg3_write_sig_legacy(tp, kind);
+ tg3_write_sig_post_reset(tp, kind);
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tg3_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ int err = 0, skip_mac_1 = 0;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ if (!netif_running(dev))
+ return 0;
+
+ if (tg3_flag(tp, ENABLE_ASF)) {
+ u32 addr0_high, addr0_low, addr1_high, addr1_low;
+
+ addr0_high = tr32(MAC_ADDR_0_HIGH);
+ addr0_low = tr32(MAC_ADDR_0_LOW);
+ addr1_high = tr32(MAC_ADDR_1_HIGH);
+ addr1_low = tr32(MAC_ADDR_1_LOW);
+
+ /* Skip MAC addr 1 if ASF is using it. */
+ if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
+ !(addr1_high == 0 && addr1_low == 0))
+ skip_mac_1 = 1;
+ }
+ spin_lock_bh(&tp->lock);
+ __tg3_set_mac_addr(tp, skip_mac_1);
+ spin_unlock_bh(&tp->lock);
+
+ return err;
+}
+
+/* tp->lock is held. */
+static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
+ dma_addr_t mapping, u32 maxlen_flags,
+ u32 nic_addr)
+{
+ tg3_write_mem(tp,
+ (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
+ ((u64) mapping >> 32));
+ tg3_write_mem(tp,
+ (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
+ ((u64) mapping & 0xffffffff));
+ tg3_write_mem(tp,
+ (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
+ maxlen_flags);
+
+ if (!tg3_flag(tp, 5705_PLUS))
+ tg3_write_mem(tp,
+ (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
+ nic_addr);
+}
+
+static void __tg3_set_rx_mode(struct net_device *);
+static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
+{
+ int i;
+
+ if (!tg3_flag(tp, ENABLE_TSS)) {
+ tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
+ tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
+ tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
+ } else {
+ tw32(HOSTCC_TXCOL_TICKS, 0);
+ tw32(HOSTCC_TXMAX_FRAMES, 0);
+ tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
+ }
+
+ if (!tg3_flag(tp, ENABLE_RSS)) {
+ tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
+ tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
+ tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
+ } else {
+ tw32(HOSTCC_RXCOL_TICKS, 0);
+ tw32(HOSTCC_RXMAX_FRAMES, 0);
+ tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
+ }
+
+ if (!tg3_flag(tp, 5705_PLUS)) {
+ u32 val = ec->stats_block_coalesce_usecs;
+
+ tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
+ tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
+
+ if (!netif_carrier_ok(tp->dev))
+ val = 0;
+
+ tw32(HOSTCC_STAT_COAL_TICKS, val);
+ }
+
+ for (i = 0; i < tp->irq_cnt - 1; i++) {
+ u32 reg;
+
+ reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
+ tw32(reg, ec->rx_coalesce_usecs);
+ reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
+ tw32(reg, ec->rx_max_coalesced_frames);
+ reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
+ tw32(reg, ec->rx_max_coalesced_frames_irq);
+
+ if (tg3_flag(tp, ENABLE_TSS)) {
+ reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
+ tw32(reg, ec->tx_coalesce_usecs);
+ reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
+ tw32(reg, ec->tx_max_coalesced_frames);
+ reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
+ tw32(reg, ec->tx_max_coalesced_frames_irq);
+ }
+ }
+
+ for (; i < tp->irq_max - 1; i++) {
+ tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+
+ if (tg3_flag(tp, ENABLE_TSS)) {
+ tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+ }
+ }
+}
+
+/* tp->lock is held. */
+static void tg3_rings_reset(struct tg3 *tp)
+{
+ int i;
+ u32 stblk, txrcb, rxrcb, limit;
+ struct tg3_napi *tnapi = &tp->napi[0];
+
+ /* Disable all transmit rings but the first. */
+ if (!tg3_flag(tp, 5705_PLUS))
+ limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
+ else if (tg3_flag(tp, 5717_PLUS))
+ limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
+ else
+ limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
+
+ for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
+ txrcb < limit; txrcb += TG3_BDINFO_SIZE)
+ tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
+ BDINFO_FLAGS_DISABLED);
+
+
+ /* Disable all receive return rings but the first. */
+ if (tg3_flag(tp, 5717_PLUS))
+ limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
+ else if (!tg3_flag(tp, 5705_PLUS))
+ limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
+ else
+ limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
+
+ for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
+ rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
+ tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
+ BDINFO_FLAGS_DISABLED);
+
+ /* Disable interrupts */
+ tw32_mailbox_f(tp->napi[0].int_mbox, 1);
+ tp->napi[0].chk_msi_cnt = 0;
+ tp->napi[0].last_rx_cons = 0;
+ tp->napi[0].last_tx_cons = 0;
+
+ /* Zero mailbox registers. */
+ if (tg3_flag(tp, SUPPORT_MSIX)) {
+ for (i = 1; i < tp->irq_max; i++) {
+ tp->napi[i].tx_prod = 0;
+ tp->napi[i].tx_cons = 0;
+ if (tg3_flag(tp, ENABLE_TSS))
+ tw32_mailbox(tp->napi[i].prodmbox, 0);
+ tw32_rx_mbox(tp->napi[i].consmbox, 0);
+ tw32_mailbox_f(tp->napi[i].int_mbox, 1);
+ tp->napi[i].chk_msi_cnt = 0;
+ tp->napi[i].last_rx_cons = 0;
+ tp->napi[i].last_tx_cons = 0;
+ }
+ if (!tg3_flag(tp, ENABLE_TSS))
+ tw32_mailbox(tp->napi[0].prodmbox, 0);
+ } else {
+ tp->napi[0].tx_prod = 0;
+ tp->napi[0].tx_cons = 0;
+ tw32_mailbox(tp->napi[0].prodmbox, 0);
+ tw32_rx_mbox(tp->napi[0].consmbox, 0);
+ }
+
+ /* Make sure the NIC-based send BD rings are disabled. */
+ if (!tg3_flag(tp, 5705_PLUS)) {
+ u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
+ for (i = 0; i < 16; i++)
+ tw32_tx_mbox(mbox + i * 8, 0);
+ }
+
+ txrcb = NIC_SRAM_SEND_RCB;
+ rxrcb = NIC_SRAM_RCV_RET_RCB;
+
+ /* Clear status block in ram. */
+ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+
+ /* Set status block DMA address */
+ tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
+ ((u64) tnapi->status_mapping >> 32));
+ tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
+ ((u64) tnapi->status_mapping & 0xffffffff));
+
+ if (tnapi->tx_ring) {
+ tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
+ (TG3_TX_RING_SIZE <<
+ BDINFO_FLAGS_MAXLEN_SHIFT),
+ NIC_SRAM_TX_BUFFER_DESC);
+ txrcb += TG3_BDINFO_SIZE;
+ }
+
+ if (tnapi->rx_rcb) {
+ tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
+ (tp->rx_ret_ring_mask + 1) <<
+ BDINFO_FLAGS_MAXLEN_SHIFT, 0);
+ rxrcb += TG3_BDINFO_SIZE;
+ }
+
+ stblk = HOSTCC_STATBLCK_RING1;
+
+ for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
+ u64 mapping = (u64)tnapi->status_mapping;
+ tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
+ tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
+
+ /* Clear status block in ram. */
+ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+
+ if (tnapi->tx_ring) {
+ tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
+ (TG3_TX_RING_SIZE <<
+ BDINFO_FLAGS_MAXLEN_SHIFT),
+ NIC_SRAM_TX_BUFFER_DESC);
+ txrcb += TG3_BDINFO_SIZE;
+ }
+
+ tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
+ ((tp->rx_ret_ring_mask + 1) <<
+ BDINFO_FLAGS_MAXLEN_SHIFT), 0);
+
+ stblk += 8;
+ rxrcb += TG3_BDINFO_SIZE;
+ }
+}
+
+static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
+{
+ u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
+
+ if (!tg3_flag(tp, 5750_PLUS) ||
+ tg3_flag(tp, 5780_CLASS) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+ bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
+ else
+ bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
+
+ nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
+ host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
+
+ val = min(nic_rep_thresh, host_rep_thresh);
+ tw32(RCVBDI_STD_THRESH, val);
+
+ if (tg3_flag(tp, 57765_PLUS))
+ tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
+
+ if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
+ return;
+
+ if (!tg3_flag(tp, 5705_PLUS))
+ bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
+ else
+ bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
+
+ host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
+
+ val = min(bdcache_maxcnt / 2, host_rep_thresh);
+ tw32(RCVBDI_JUMBO_THRESH, val);
+
+ if (tg3_flag(tp, 57765_PLUS))
+ tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
+}
+
+/* tp->lock is held. */
+static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
+{
+ u32 val, rdmac_mode;
+ int i, err, limit;
+ struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
+
+ tg3_disable_ints(tp);
+
+ tg3_stop_fw(tp);
+
+ tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
+
+ if (tg3_flag(tp, INIT_COMPLETE))
+ tg3_abort_hw(tp, 1);
+
+ /* Enable MAC control of LPI */
+ if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
+ tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
+ TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
+ TG3_CPMU_EEE_LNKIDL_UART_IDL);
+
+ tw32_f(TG3_CPMU_EEE_CTRL,
+ TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
+
+ val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
+ TG3_CPMU_EEEMD_LPI_IN_TX |
+ TG3_CPMU_EEEMD_LPI_IN_RX |
+ TG3_CPMU_EEEMD_EEE_ENABLE;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+ val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
+
+ if (tg3_flag(tp, ENABLE_APE))
+ val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
+
+ tw32_f(TG3_CPMU_EEE_MODE, val);
+
+ tw32_f(TG3_CPMU_EEE_DBTMR1,
+ TG3_CPMU_DBTMR1_PCIEXIT_2047US |
+ TG3_CPMU_DBTMR1_LNKIDLE_2047US);
+
+ tw32_f(TG3_CPMU_EEE_DBTMR2,
+ TG3_CPMU_DBTMR2_APE_TX_2047US |
+ TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
+ }
+
+ if (reset_phy)
+ tg3_phy_reset(tp);
+
+ err = tg3_chip_reset(tp);
+ if (err)
+ return err;
+
+ tg3_write_sig_legacy(tp, RESET_KIND_INIT);
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
+ val = tr32(TG3_CPMU_CTRL);
+ val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
+ tw32(TG3_CPMU_CTRL, val);
+
+ val = tr32(TG3_CPMU_LSPD_10MB_CLK);
+ val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
+ val |= CPMU_LSPD_10MB_MACCLK_6_25;
+ tw32(TG3_CPMU_LSPD_10MB_CLK, val);
+
+ val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
+ val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
+ val |= CPMU_LNK_AWARE_MACCLK_6_25;
+ tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
+
+ val = tr32(TG3_CPMU_HST_ACC);
+ val &= ~CPMU_HST_ACC_MACCLK_MASK;
+ val |= CPMU_HST_ACC_MACCLK_6_25;
+ tw32(TG3_CPMU_HST_ACC, val);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+ val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
+ val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
+ PCIE_PWR_MGMT_L1_THRESH_4MS;
+ tw32(PCIE_PWR_MGMT_THRESH, val);
+
+ val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
+ tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
+
+ tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
+
+ val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
+ tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
+ }
+
+ if (tg3_flag(tp, L1PLLPD_EN)) {
+ u32 grc_mode = tr32(GRC_MODE);
+
+ /* Access the lower 1K of PL PCIE block registers. */
+ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+ tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
+
+ val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
+ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
+ val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
+
+ tw32(GRC_MODE, grc_mode);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+ if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ u32 grc_mode = tr32(GRC_MODE);
+
+ /* Access the lower 1K of PL PCIE block registers. */
+ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+ tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
+
+ val = tr32(TG3_PCIE_TLDLPL_PORT +
+ TG3_PCIE_PL_LO_PHYCTL5);
+ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
+ val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
+
+ tw32(GRC_MODE, grc_mode);
+ }
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
+ u32 grc_mode = tr32(GRC_MODE);
+
+ /* Access the lower 1K of DL PCIE block registers. */
+ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+ tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
+
+ val = tr32(TG3_PCIE_TLDLPL_PORT +
+ TG3_PCIE_DL_LO_FTSMAX);
+ val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
+ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
+ val | TG3_PCIE_DL_LO_FTSMAX_VAL);
+
+ tw32(GRC_MODE, grc_mode);
+ }
+
+ val = tr32(TG3_CPMU_LSPD_10MB_CLK);
+ val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
+ val |= CPMU_LSPD_10MB_MACCLK_6_25;
+ tw32(TG3_CPMU_LSPD_10MB_CLK, val);
+ }
+
+ /* This works around an issue with Athlon chipsets on
+ * B3 tigon3 silicon. This bit has no effect on any
+ * other revision. But do not set this on PCI Express
+ * chips and don't even touch the clocks if the CPMU is present.
+ */
+ if (!tg3_flag(tp, CPMU_PRESENT)) {
+ if (!tg3_flag(tp, PCI_EXPRESS))
+ tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
+ tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
+ }
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+ tg3_flag(tp, PCIX_MODE)) {
+ val = tr32(TG3PCI_PCISTATE);
+ val |= PCISTATE_RETRY_SAME_DMA;
+ tw32(TG3PCI_PCISTATE, val);
+ }
+
+ if (tg3_flag(tp, ENABLE_APE)) {
+ /* Allow reads and writes to the
+ * APE register and memory space.
+ */
+ val = tr32(TG3PCI_PCISTATE);
+ val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
+ PCISTATE_ALLOW_APE_SHMEM_WR |
+ PCISTATE_ALLOW_APE_PSPACE_WR;
+ tw32(TG3PCI_PCISTATE, val);
+ }
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
+ /* Enable some hw fixes. */
+ val = tr32(TG3PCI_MSI_DATA);
+ val |= (1 << 26) | (1 << 28) | (1 << 29);
+ tw32(TG3PCI_MSI_DATA, val);
+ }
+
+ /* Descriptor ring init may make accesses to the
+ * NIC SRAM area to setup the TX descriptors, so we
+ * can only do this after the hardware has been
+ * successfully reset.
+ */
+ err = tg3_init_rings(tp);
+ if (err)
+ return err;
+
+ if (tg3_flag(tp, 57765_PLUS)) {
+ val = tr32(TG3PCI_DMA_RW_CTRL) &
+ ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
+ if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
+ val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+ val |= DMA_RWCTRL_TAGGED_STAT_WA;
+ tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
+ /* This value is determined during the probe time DMA
+ * engine test, tg3_test_dma.
+ */
+ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+ }
+
+ tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
+ GRC_MODE_4X_NIC_SEND_RINGS |
+ GRC_MODE_NO_TX_PHDR_CSUM |
+ GRC_MODE_NO_RX_PHDR_CSUM);
+ tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
+
+ /* Pseudo-header checksum is done by hardware logic and not
+ * the offload processers, so make the chip do the pseudo-
+ * header checksums on receive. For transmit it is more
+ * convenient to do the pseudo-header checksum in software
+ * as Linux does that on transmit for us in all cases.
+ */
+ tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
+
+ tw32(GRC_MODE,
+ tp->grc_mode |
+ (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
+
+ /* Setup the timer prescalar register. Clock is always 66Mhz. */
+ val = tr32(GRC_MISC_CFG);
+ val &= ~0xff;
+ val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
+ tw32(GRC_MISC_CFG, val);
+
+ /* Initialize MBUF/DESC pool. */
+ if (tg3_flag(tp, 5750_PLUS)) {
+ /* Do nothing. */
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
+ tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+ tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
+ else
+ tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
+ tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
+ tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
+ } else if (tg3_flag(tp, TSO_CAPABLE)) {
+ int fw_len;
+
+ fw_len = tp->fw_len;
+ fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
+ tw32(BUFMGR_MB_POOL_ADDR,
+ NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
+ tw32(BUFMGR_MB_POOL_SIZE,
+ NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
+ }
+
+ if (tp->dev->mtu <= ETH_DATA_LEN) {
+ tw32(BUFMGR_MB_RDMA_LOW_WATER,
+ tp->bufmgr_config.mbuf_read_dma_low_water);
+ tw32(BUFMGR_MB_MACRX_LOW_WATER,
+ tp->bufmgr_config.mbuf_mac_rx_low_water);
+ tw32(BUFMGR_MB_HIGH_WATER,
+ tp->bufmgr_config.mbuf_high_water);
+ } else {
+ tw32(BUFMGR_MB_RDMA_LOW_WATER,
+ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
+ tw32(BUFMGR_MB_MACRX_LOW_WATER,
+ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
+ tw32(BUFMGR_MB_HIGH_WATER,
+ tp->bufmgr_config.mbuf_high_water_jumbo);
+ }
+ tw32(BUFMGR_DMA_LOW_WATER,
+ tp->bufmgr_config.dma_low_water);
+ tw32(BUFMGR_DMA_HIGH_WATER,
+ tp->bufmgr_config.dma_high_water);
+
+ val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ val |= BUFMGR_MODE_NO_TX_UNDERRUN;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
+ val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
+ tw32(BUFMGR_MODE, val);
+ for (i = 0; i < 2000; i++) {
+ if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
+ break;
+ udelay(10);
+ }
+ if (i >= 2000) {
+ netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
+ return -ENODEV;
+ }
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
+ tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
+
+ tg3_setup_rxbd_thresholds(tp);
+
+ /* Initialize TG3_BDINFO's at:
+ * RCVDBDI_STD_BD: standard eth size rx ring
+ * RCVDBDI_JUMBO_BD: jumbo frame rx ring
+ * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
+ *
+ * like so:
+ * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
+ * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
+ * ring attribute flags
+ * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
+ *
+ * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
+ * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
+ *
+ * The size of each ring is fixed in the firmware, but the location is
+ * configurable.
+ */
+ tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
+ ((u64) tpr->rx_std_mapping >> 32));
+ tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
+ ((u64) tpr->rx_std_mapping & 0xffffffff));
+ if (!tg3_flag(tp, 5717_PLUS))
+ tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
+ NIC_SRAM_RX_BUFFER_DESC);
+
+ /* Disable the mini ring */
+ if (!tg3_flag(tp, 5705_PLUS))
+ tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
+ BDINFO_FLAGS_DISABLED);
+
+ /* Program the jumbo buffer descriptor ring control
+ * blocks on those devices that have them.
+ */
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
+ (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
+
+ if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
+ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
+ ((u64) tpr->rx_jmb_mapping >> 32));
+ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
+ ((u64) tpr->rx_jmb_mapping & 0xffffffff));
+ val = TG3_RX_JMB_RING_SIZE(tp) <<
+ BDINFO_FLAGS_MAXLEN_SHIFT;
+ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
+ val | BDINFO_FLAGS_USE_EXT_RECV);
+ if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
+ NIC_SRAM_RX_JUMBO_BUFFER_DESC);
+ } else {
+ tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
+ BDINFO_FLAGS_DISABLED);
+ }
+
+ if (tg3_flag(tp, 57765_PLUS)) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ val = TG3_RX_STD_MAX_SIZE_5700;
+ else
+ val = TG3_RX_STD_MAX_SIZE_5717;
+ val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
+ val |= (TG3_RX_STD_DMA_SZ << 2);
+ } else
+ val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
+ } else
+ val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
+
+ tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
+
+ tpr->rx_std_prod_idx = tp->rx_pending;
+ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
+
+ tpr->rx_jmb_prod_idx =
+ tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
+ tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
+
+ tg3_rings_reset(tp);
+
+ /* Initialize MAC address and backoff seed. */
+ __tg3_set_mac_addr(tp, 0);
+
+ /* MTU + ethernet header + FCS + optional VLAN tag */
+ tw32(MAC_RX_MTU_SIZE,
+ tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+
+ /* The slot time is changed by tg3_setup_phy if we
+ * run at gigabit with half duplex.
+ */
+ val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+ (6 << TX_LENGTHS_IPG_SHIFT) |
+ (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ val |= tr32(MAC_TX_LENGTHS) &
+ (TX_LENGTHS_JMB_FRM_LEN_MSK |
+ TX_LENGTHS_CNT_DWN_VAL_MSK);
+
+ tw32(MAC_TX_LENGTHS, val);
+
+ /* Receive rules. */
+ tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
+ tw32(RCVLPC_CONFIG, 0x0181);
+
+ /* Calculate RDMAC_MODE setting early, we need it to determine
+ * the RCVLPC_STATE_ENABLE mask.
+ */
+ rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
+ RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
+ RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
+ RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
+ RDMAC_MODE_LNGREAD_ENAB);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
+ RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
+ RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+ if (tg3_flag(tp, TSO_CAPABLE) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
+ } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
+ !tg3_flag(tp, IS_5788)) {
+ rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
+ }
+ }
+
+ if (tg3_flag(tp, PCI_EXPRESS))
+ rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
+
+ if (tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3))
+ rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
+
+ if (tg3_flag(tp, 57765_PLUS) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ tg3_flag(tp, 57765_PLUS)) {
+ val = tr32(TG3_RDMA_RSRVCTRL_REG);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
+ TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
+ TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
+ val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
+ TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
+ TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
+ }
+ tw32(TG3_RDMA_RSRVCTRL_REG,
+ val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
+ tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
+ TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
+ TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
+ }
+
+ /* Receive/send statistics. */
+ if (tg3_flag(tp, 5750_PLUS)) {
+ val = tr32(RCVLPC_STATS_ENABLE);
+ val &= ~RCVLPC_STATSENAB_DACK_FIX;
+ tw32(RCVLPC_STATS_ENABLE, val);
+ } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
+ tg3_flag(tp, TSO_CAPABLE)) {
+ val = tr32(RCVLPC_STATS_ENABLE);
+ val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
+ tw32(RCVLPC_STATS_ENABLE, val);
+ } else {
+ tw32(RCVLPC_STATS_ENABLE, 0xffffff);
+ }
+ tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
+ tw32(SNDDATAI_STATSENAB, 0xffffff);
+ tw32(SNDDATAI_STATSCTRL,
+ (SNDDATAI_SCTRL_ENABLE |
+ SNDDATAI_SCTRL_FASTUPD));
+
+ /* Setup host coalescing engine. */
+ tw32(HOSTCC_MODE, 0);
+ for (i = 0; i < 2000; i++) {
+ if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
+ break;
+ udelay(10);
+ }
+
+ __tg3_set_coalesce(tp, &tp->coal);
+
+ if (!tg3_flag(tp, 5705_PLUS)) {
+ /* Status/statistics block address. See tg3_timer,
+ * the tg3_periodic_fetch_stats call there, and
+ * tg3_get_stats to see how this works for 5705/5750 chips.
+ */
+ tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
+ ((u64) tp->stats_mapping >> 32));
+ tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
+ ((u64) tp->stats_mapping & 0xffffffff));
+ tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
+
+ tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
+
+ /* Clear statistics and status block memory areas */
+ for (i = NIC_SRAM_STATS_BLK;
+ i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
+ i += sizeof(u32)) {
+ tg3_write_mem(tp, i, 0);
+ udelay(40);
+ }
+ }
+
+ tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
+
+ tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
+ tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
+ if (!tg3_flag(tp, 5705_PLUS))
+ tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
+
+ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
+ tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+ /* reset to prevent losing 1st rx packet intermittently */
+ tw32_f(MAC_RX_MODE, RX_MODE_RESET);
+ udelay(10);
+ }
+
+ tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
+ MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
+ MAC_MODE_FHDE_ENABLE;
+ if (tg3_flag(tp, ENABLE_APE))
+ tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
+ if (!tg3_flag(tp, 5705_PLUS) &&
+ !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
+ tp->mac_mode |= MAC_MODE_LINK_POLARITY;
+ tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
+ udelay(40);
+
+ /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
+ * If TG3_FLAG_IS_NIC is zero, we should read the
+ * register to preserve the GPIO settings for LOMs. The GPIOs,
+ * whether used as inputs or outputs, are set by boot code after
+ * reset.
+ */
+ if (!tg3_flag(tp, IS_NIC)) {
+ u32 gpio_mask;
+
+ gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
+ GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
+ GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
+ GRC_LCLCTRL_GPIO_OUTPUT3;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+ gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
+
+ tp->grc_local_ctrl &= ~gpio_mask;
+ tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
+
+ /* GPIO1 must be driven high for eeprom write protect */
+ if (tg3_flag(tp, EEPROM_WRITE_PROT))
+ tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
+ GRC_LCLCTRL_GPIO_OUTPUT1);
+ }
+ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
+ udelay(100);
+
+ if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
+ val = tr32(MSGINT_MODE);
+ val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
+ if (!tg3_flag(tp, 1SHOT_MSI))
+ val |= MSGINT_MODE_ONE_SHOT_DISABLE;
+ tw32(MSGINT_MODE, val);
+ }
+
+ if (!tg3_flag(tp, 5705_PLUS)) {
+ tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
+ udelay(40);
+ }
+
+ val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
+ WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
+ WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
+ WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
+ WDMAC_MODE_LNGREAD_ENAB);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+ if (tg3_flag(tp, TSO_CAPABLE) &&
+ (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
+ /* nothing */
+ } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
+ !tg3_flag(tp, IS_5788)) {
+ val |= WDMAC_MODE_RX_ACCEL;
+ }
+ }
+
+ /* Enable host coalescing bug fix */
+ if (tg3_flag(tp, 5755_PLUS))
+ val |= WDMAC_MODE_STATUS_TAG_FIX;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ val |= WDMAC_MODE_BURST_ALL_DATA;
+
+ tw32_f(WDMAC_MODE, val);
+ udelay(40);
+
+ if (tg3_flag(tp, PCIX_MODE)) {
+ u16 pcix_cmd;
+
+ pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
+ &pcix_cmd);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
+ pcix_cmd &= ~PCI_X_CMD_MAX_READ;
+ pcix_cmd |= PCI_X_CMD_READ_2K;
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
+ pcix_cmd |= PCI_X_CMD_READ_2K;
+ }
+ pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
+ pcix_cmd);
+ }
+
+ tw32_f(RDMAC_MODE, rdmac_mode);
+ udelay(40);
+
+ tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
+ if (!tg3_flag(tp, 5705_PLUS))
+ tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ tw32(SNDDATAC_MODE,
+ SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
+ else
+ tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
+
+ tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
+ tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
+ val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
+ if (tg3_flag(tp, LRG_PROD_RING_CAP))
+ val |= RCVDBDI_MODE_LRG_RING_SZ;
+ tw32(RCVDBDI_MODE, val);
+ tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
+ if (tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3))
+ tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
+ val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
+ if (tg3_flag(tp, ENABLE_TSS))
+ val |= SNDBDI_MODE_MULTI_TXQ_EN;
+ tw32(SNDBDI_MODE, val);
+ tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
+ err = tg3_load_5701_a0_firmware_fix(tp);
+ if (err)
+ return err;
+ }
+
+ if (tg3_flag(tp, TSO_CAPABLE)) {
+ err = tg3_load_tso_firmware(tp);
+ if (err)
+ return err;
+ }
+
+ tp->tx_mode = TX_MODE_ENABLE;
+
+ if (tg3_flag(tp, 5755_PLUS) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
+ tp->tx_mode &= ~val;
+ tp->tx_mode |= tr32(MAC_TX_MODE) & val;
+ }
+
+ tw32_f(MAC_TX_MODE, tp->tx_mode);
+ udelay(100);
+
+ if (tg3_flag(tp, ENABLE_RSS)) {
+ int i = 0;
+ u32 reg = MAC_RSS_INDIR_TBL_0;
+
+ if (tp->irq_cnt == 2) {
+ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
+ tw32(reg, 0x0);
+ reg += 4;
+ }
+ } else {
+ u32 val;
+
+ while (i < TG3_RSS_INDIR_TBL_SIZE) {
+ val = i % (tp->irq_cnt - 1);
+ i++;
+ for (; i % 8; i++) {
+ val <<= 4;
+ val |= (i % (tp->irq_cnt - 1));
+ }
+ tw32(reg, val);
+ reg += 4;
+ }
+ }
+
+ /* Setup the "secret" hash key. */
+ tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
+ tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
+ tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
+ tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
+ tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
+ tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
+ tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
+ tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
+ tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
+ tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
+ }
+
+ tp->rx_mode = RX_MODE_ENABLE;
+ if (tg3_flag(tp, 5755_PLUS))
+ tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
+
+ if (tg3_flag(tp, ENABLE_RSS))
+ tp->rx_mode |= RX_MODE_RSS_ENABLE |
+ RX_MODE_RSS_ITBL_HASH_BITS_7 |
+ RX_MODE_RSS_IPV6_HASH_EN |
+ RX_MODE_RSS_TCP_IPV6_HASH_EN |
+ RX_MODE_RSS_IPV4_HASH_EN |
+ RX_MODE_RSS_TCP_IPV4_HASH_EN;
+
+ tw32_f(MAC_RX_MODE, tp->rx_mode);
+ udelay(10);
+
+ tw32(MAC_LED_CTRL, tp->led_ctrl);
+
+ tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
+ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+ tw32_f(MAC_RX_MODE, RX_MODE_RESET);
+ udelay(10);
+ }
+ tw32_f(MAC_RX_MODE, tp->rx_mode);
+ udelay(10);
+
+ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
+ !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
+ /* Set drive transmission level to 1.2V */
+ /* only if the signal pre-emphasis bit is not set */
+ val = tr32(MAC_SERDES_CFG);
+ val &= 0xfffff000;
+ val |= 0x880;
+ tw32(MAC_SERDES_CFG, val);
+ }
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
+ tw32(MAC_SERDES_CFG, 0x616000);
+ }
+
+ /* Prevent chip from dropping frames when flow control
+ * is enabled.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ val = 1;
+ else
+ val = 2;
+ tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
+ (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
+ /* Use hardware link auto-negotiation */
+ tg3_flag_set(tp, HW_AUTONEG);
+ }
+
+ if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ u32 tmp;
+
+ tmp = tr32(SERDES_RX_CTRL);
+ tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
+ tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
+ tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
+ tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
+ }
+
+ if (!tg3_flag(tp, USE_PHYLIB)) {
+ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
+ tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
+ tp->link_config.speed = tp->link_config.orig_speed;
+ tp->link_config.duplex = tp->link_config.orig_duplex;
+ tp->link_config.autoneg = tp->link_config.orig_autoneg;
+ }
+
+ err = tg3_setup_phy(tp, 0);
+ if (err)
+ return err;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+ !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
+ u32 tmp;
+
+ /* Clear CRC stats. */
+ if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
+ tg3_writephy(tp, MII_TG3_TEST1,
+ tmp | MII_TG3_TEST1_CRC_EN);
+ tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
+ }
+ }
+ }
+
+ __tg3_set_rx_mode(tp->dev);
+
+ /* Initialize receive rules. */
+ tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
+ tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
+ tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
+ tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
+
+ if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
+ limit = 8;
+ else
+ limit = 16;
+ if (tg3_flag(tp, ENABLE_ASF))
+ limit -= 4;
+ switch (limit) {
+ case 16:
+ tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
+ case 15:
+ tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
+ case 14:
+ tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
+ case 13:
+ tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
+ case 12:
+ tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
+ case 11:
+ tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
+ case 10:
+ tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
+ case 9:
+ tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
+ case 8:
+ tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
+ case 7:
+ tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
+ case 6:
+ tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
+ case 5:
+ tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
+ case 4:
+ /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
+ case 3:
+ /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
+ case 2:
+ case 1:
+
+ default:
+ break;
+ }
+
+ if (tg3_flag(tp, ENABLE_APE))
+ /* Write our heartbeat update interval to APE. */
+ tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
+ APE_HOST_HEARTBEAT_INT_DISABLE);
+
+ tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
+
+ return 0;
+}
+
+/* Called at device open time to get the chip ready for
+ * packet processing. Invoked with tp->lock held.
+ */
+static int tg3_init_hw(struct tg3 *tp, int reset_phy)
+{
+ tg3_switch_clocks(tp);
+
+ tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+
+ return tg3_reset_hw(tp, reset_phy);
+}
+
+#define TG3_STAT_ADD32(PSTAT, REG) \
+do { u32 __val = tr32(REG); \
+ (PSTAT)->low += __val; \
+ if ((PSTAT)->low < __val) \
+ (PSTAT)->high += 1; \
+} while (0)
+
+static void tg3_periodic_fetch_stats(struct tg3 *tp)
+{
+ struct tg3_hw_stats *sp = tp->hw_stats;
+
+ if (!netif_carrier_ok(tp->dev))
+ return;
+
+ TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
+ TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
+ TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
+ TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
+ TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
+ TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
+ TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
+ TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
+ TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
+ TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
+ TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
+ TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
+ TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
+
+ TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
+ TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
+ TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
+ TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
+ TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
+ TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
+ TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
+ TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
+ TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
+ TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
+ TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
+ TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
+ TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
+ TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
+
+ TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
+ TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
+ } else {
+ u32 val = tr32(HOSTCC_FLOW_ATTN);
+ val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
+ if (val) {
+ tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
+ sp->rx_discards.low += val;
+ if (sp->rx_discards.low < val)
+ sp->rx_discards.high += 1;
+ }
+ sp->mbuf_lwm_thresh_hit = sp->rx_discards;
+ }
+ TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
+}
+
+static void tg3_chk_missed_msi(struct tg3 *tp)
+{
+ u32 i;
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ if (tg3_has_work(tnapi)) {
+ if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
+ tnapi->last_tx_cons == tnapi->tx_cons) {
+ if (tnapi->chk_msi_cnt < 1) {
+ tnapi->chk_msi_cnt++;
+ return;
+ }
+ tg3_msi(0, tnapi);
+ }
+ }
+ tnapi->chk_msi_cnt = 0;
+ tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
+ tnapi->last_tx_cons = tnapi->tx_cons;
+ }
+}
+
+static void tg3_timer(unsigned long __opaque)
+{
+ struct tg3 *tp = (struct tg3 *) __opaque;
+
+ if (tp->irq_sync)
+ goto restart_timer;
+
+ spin_lock(&tp->lock);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_chk_missed_msi(tp);
+
+ if (!tg3_flag(tp, TAGGED_STATUS)) {
+ /* All of this garbage is because when using non-tagged
+ * IRQ status the mailbox/status_block protocol the chip
+ * uses with the cpu is race prone.
+ */
+ if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
+ tw32(GRC_LOCAL_CTRL,
+ tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
+ } else {
+ tw32(HOSTCC_MODE, tp->coalesce_mode |
+ HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
+ }
+
+ if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+ tg3_flag_set(tp, RESTART_TIMER);
+ spin_unlock(&tp->lock);
+ schedule_work(&tp->reset_task);
+ return;
+ }
+ }
+
+ /* This part only runs once per second. */
+ if (!--tp->timer_counter) {
+ if (tg3_flag(tp, 5705_PLUS))
+ tg3_periodic_fetch_stats(tp);
+
+ if (tp->setlpicnt && !--tp->setlpicnt)
+ tg3_phy_eee_enable(tp);
+
+ if (tg3_flag(tp, USE_LINKCHG_REG)) {
+ u32 mac_stat;
+ int phy_event;
+
+ mac_stat = tr32(MAC_STATUS);
+
+ phy_event = 0;
+ if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
+ if (mac_stat & MAC_STATUS_MI_INTERRUPT)
+ phy_event = 1;
+ } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
+ phy_event = 1;
+
+ if (phy_event)
+ tg3_setup_phy(tp, 0);
+ } else if (tg3_flag(tp, POLL_SERDES)) {
+ u32 mac_stat = tr32(MAC_STATUS);
+ int need_setup = 0;
+
+ if (netif_carrier_ok(tp->dev) &&
+ (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
+ need_setup = 1;
+ }
+ if (!netif_carrier_ok(tp->dev) &&
+ (mac_stat & (MAC_STATUS_PCS_SYNCED |
+ MAC_STATUS_SIGNAL_DET))) {
+ need_setup = 1;
+ }
+ if (need_setup) {
+ if (!tp->serdes_counter) {
+ tw32_f(MAC_MODE,
+ (tp->mac_mode &
+ ~MAC_MODE_PORT_MODE_MASK));
+ udelay(40);
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+ }
+ tg3_setup_phy(tp, 0);
+ }
+ } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
+ tg3_flag(tp, 5780_CLASS)) {
+ tg3_serdes_parallel_detect(tp);
+ }
+
+ tp->timer_counter = tp->timer_multiplier;
+ }
+
+ /* Heartbeat is only sent once every 2 seconds.
+ *
+ * The heartbeat is to tell the ASF firmware that the host
+ * driver is still alive. In the event that the OS crashes,
+ * ASF needs to reset the hardware to free up the FIFO space
+ * that may be filled with rx packets destined for the host.
+ * If the FIFO is full, ASF will no longer function properly.
+ *
+ * Unintended resets have been reported on real time kernels
+ * where the timer doesn't run on time. Netpoll will also have
+ * same problem.
+ *
+ * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
+ * to check the ring condition when the heartbeat is expiring
+ * before doing the reset. This will prevent most unintended
+ * resets.
+ */
+ if (!--tp->asf_counter) {
+ if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
+ tg3_wait_for_event_ack(tp);
+
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
+ FWCMD_NICDRV_ALIVE3);
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
+ TG3_FW_UPDATE_TIMEOUT_SEC);
+
+ tg3_generate_fw_event(tp);
+ }
+ tp->asf_counter = tp->asf_multiplier;
+ }
+
+ spin_unlock(&tp->lock);
+
+restart_timer:
+ tp->timer.expires = jiffies + tp->timer_offset;
+ add_timer(&tp->timer);
+}
+
+static int tg3_request_irq(struct tg3 *tp, int irq_num)
+{
+ irq_handler_t fn;
+ unsigned long flags;
+ char *name;
+ struct tg3_napi *tnapi = &tp->napi[irq_num];
+
+ if (tp->irq_cnt == 1)
+ name = tp->dev->name;
+ else {
+ name = &tnapi->irq_lbl[0];
+ snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
+ name[IFNAMSIZ-1] = 0;
+ }
+
+ if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
+ fn = tg3_msi;
+ if (tg3_flag(tp, 1SHOT_MSI))
+ fn = tg3_msi_1shot;
+ flags = 0;
+ } else {
+ fn = tg3_interrupt;
+ if (tg3_flag(tp, TAGGED_STATUS))
+ fn = tg3_interrupt_tagged;
+ flags = IRQF_SHARED;
+ }
+
+ return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
+}
+
+static int tg3_test_interrupt(struct tg3 *tp)
+{
+ struct tg3_napi *tnapi = &tp->napi[0];
+ struct net_device *dev = tp->dev;
+ int err, i, intr_ok = 0;
+ u32 val;
+
+ if (!netif_running(dev))
+ return -ENODEV;
+
+ tg3_disable_ints(tp);
+
+ free_irq(tnapi->irq_vec, tnapi);
+
+ /*
+ * Turn off MSI one shot mode. Otherwise this test has no
+ * observable way to know whether the interrupt was delivered.
+ */
+ if (tg3_flag(tp, 57765_PLUS)) {
+ val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
+ tw32(MSGINT_MODE, val);
+ }
+
+ err = request_irq(tnapi->irq_vec, tg3_test_isr,
+ IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
+ if (err)
+ return err;
+
+ tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
+ tg3_enable_ints(tp);
+
+ tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
+ tnapi->coal_now);
+
+ for (i = 0; i < 5; i++) {
+ u32 int_mbox, misc_host_ctrl;
+
+ int_mbox = tr32_mailbox(tnapi->int_mbox);
+ misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
+
+ if ((int_mbox != 0) ||
+ (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
+ intr_ok = 1;
+ break;
+ }
+
+ if (tg3_flag(tp, 57765_PLUS) &&
+ tnapi->hw_status->status_tag != tnapi->last_tag)
+ tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
+
+ msleep(10);
+ }
+
+ tg3_disable_ints(tp);
+
+ free_irq(tnapi->irq_vec, tnapi);
+
+ err = tg3_request_irq(tp, 0);
+
+ if (err)
+ return err;
+
+ if (intr_ok) {
+ /* Reenable MSI one shot mode. */
+ if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
+ val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
+ tw32(MSGINT_MODE, val);
+ }
+ return 0;
+ }
+
+ return -EIO;
+}
+
+/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
+ * successfully restored
+ */
+static int tg3_test_msi(struct tg3 *tp)
+{
+ int err;
+ u16 pci_cmd;
+
+ if (!tg3_flag(tp, USING_MSI))
+ return 0;
+
+ /* Turn off SERR reporting in case MSI terminates with Master
+ * Abort.
+ */
+ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
+ pci_write_config_word(tp->pdev, PCI_COMMAND,
+ pci_cmd & ~PCI_COMMAND_SERR);
+
+ err = tg3_test_interrupt(tp);
+
+ pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+
+ if (!err)
+ return 0;
+
+ /* other failures */
+ if (err != -EIO)
+ return err;
+
+ /* MSI test failed, go back to INTx mode */
+ netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
+ "to INTx mode. Please report this failure to the PCI "
+ "maintainer and include system chipset information\n");
+
+ free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
+
+ pci_disable_msi(tp->pdev);
+
+ tg3_flag_clear(tp, USING_MSI);
+ tp->napi[0].irq_vec = tp->pdev->irq;
+
+ err = tg3_request_irq(tp, 0);
+ if (err)
+ return err;
+
+ /* Need to reset the chip because the MSI cycle may have terminated
+ * with Master Abort.
+ */
+ tg3_full_lock(tp, 1);
+
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ err = tg3_init_hw(tp, 1);
+
+ tg3_full_unlock(tp);
+
+ if (err)
+ free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
+
+ return err;
+}
+
+static int tg3_request_firmware(struct tg3 *tp)
+{
+ const __be32 *fw_data;
+
+ if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
+ netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
+ tp->fw_needed);
+ return -ENOENT;
+ }
+
+ fw_data = (void *)tp->fw->data;
+
+ /* Firmware blob starts with version numbers, followed by
+ * start address and _full_ length including BSS sections
+ * (which must be longer than the actual data, of course
+ */
+
+ tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
+ if (tp->fw_len < (tp->fw->size - 12)) {
+ netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
+ tp->fw_len, tp->fw_needed);
+ release_firmware(tp->fw);
+ tp->fw = NULL;
+ return -EINVAL;
+ }
+
+ /* We no longer need firmware; we have it. */
+ tp->fw_needed = NULL;
+ return 0;
+}
+
+static bool tg3_enable_msix(struct tg3 *tp)
+{
+ int i, rc, cpus = num_online_cpus();
+ struct msix_entry msix_ent[tp->irq_max];
+
+ if (cpus == 1)
+ /* Just fallback to the simpler MSI mode. */
+ return false;
+
+ /*
+ * We want as many rx rings enabled as there are cpus.
+ * The first MSIX vector only deals with link interrupts, etc,
+ * so we add one to the number of vectors we are requesting.
+ */
+ tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
+
+ for (i = 0; i < tp->irq_max; i++) {
+ msix_ent[i].entry = i;
+ msix_ent[i].vector = 0;
+ }
+
+ rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
+ if (rc < 0) {
+ return false;
+ } else if (rc != 0) {
+ if (pci_enable_msix(tp->pdev, msix_ent, rc))
+ return false;
+ netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
+ tp->irq_cnt, rc);
+ tp->irq_cnt = rc;
+ }
+
+ for (i = 0; i < tp->irq_max; i++)
+ tp->napi[i].irq_vec = msix_ent[i].vector;
+
+ netif_set_real_num_tx_queues(tp->dev, 1);
+ rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
+ if (netif_set_real_num_rx_queues(tp->dev, rc)) {
+ pci_disable_msix(tp->pdev);
+ return false;
+ }
+
+ if (tp->irq_cnt > 1) {
+ tg3_flag_set(tp, ENABLE_RSS);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ tg3_flag_set(tp, ENABLE_TSS);
+ netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
+ }
+ }
+
+ return true;
+}
+
+static void tg3_ints_init(struct tg3 *tp)
+{
+ if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
+ !tg3_flag(tp, TAGGED_STATUS)) {
+ /* All MSI supporting chips should support tagged
+ * status. Assert that this is the case.
+ */
+ netdev_warn(tp->dev,
+ "MSI without TAGGED_STATUS? Not using MSI\n");
+ goto defcfg;
+ }
+
+ if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
+ tg3_flag_set(tp, USING_MSIX);
+ else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
+ tg3_flag_set(tp, USING_MSI);
+
+ if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
+ u32 msi_mode = tr32(MSGINT_MODE);
+ if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
+ msi_mode |= MSGINT_MODE_MULTIVEC_EN;
+ if (!tg3_flag(tp, 1SHOT_MSI))
+ msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
+ tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
+ }
+defcfg:
+ if (!tg3_flag(tp, USING_MSIX)) {
+ tp->irq_cnt = 1;
+ tp->napi[0].irq_vec = tp->pdev->irq;
+ netif_set_real_num_tx_queues(tp->dev, 1);
+ netif_set_real_num_rx_queues(tp->dev, 1);
+ }
+}
+
+static void tg3_ints_fini(struct tg3 *tp)
+{
+ if (tg3_flag(tp, USING_MSIX))
+ pci_disable_msix(tp->pdev);
+ else if (tg3_flag(tp, USING_MSI))
+ pci_disable_msi(tp->pdev);
+ tg3_flag_clear(tp, USING_MSI);
+ tg3_flag_clear(tp, USING_MSIX);
+ tg3_flag_clear(tp, ENABLE_RSS);
+ tg3_flag_clear(tp, ENABLE_TSS);
+}
+
+static int tg3_open(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int i, err;
+
+ if (tp->fw_needed) {
+ err = tg3_request_firmware(tp);
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
+ if (err)
+ return err;
+ } else if (err) {
+ netdev_warn(tp->dev, "TSO capability disabled\n");
+ tg3_flag_clear(tp, TSO_CAPABLE);
+ } else if (!tg3_flag(tp, TSO_CAPABLE)) {
+ netdev_notice(tp->dev, "TSO capability restored\n");
+ tg3_flag_set(tp, TSO_CAPABLE);
+ }
+ }
+
+ netif_carrier_off(tp->dev);
+
+ err = tg3_power_up(tp);
+ if (err)
+ return err;
+
+ tg3_full_lock(tp, 0);
+
+ tg3_disable_ints(tp);
+ tg3_flag_clear(tp, INIT_COMPLETE);
+
+ tg3_full_unlock(tp);
+
+ /*
+ * Setup interrupts first so we know how
+ * many NAPI resources to allocate
+ */
+ tg3_ints_init(tp);
+
+ /* The placement of this call is tied
+ * to the setup and use of Host TX descriptors.
+ */
+ err = tg3_alloc_consistent(tp);
+ if (err)
+ goto err_out1;
+
+ tg3_napi_init(tp);
+
+ tg3_napi_enable(tp);
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+ err = tg3_request_irq(tp, i);
+ if (err) {
+ for (i--; i >= 0; i--)
+ free_irq(tnapi->irq_vec, tnapi);
+ break;
+ }
+ }
+
+ if (err)
+ goto err_out2;
+
+ tg3_full_lock(tp, 0);
+
+ err = tg3_init_hw(tp, 1);
+ if (err) {
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ tg3_free_rings(tp);
+ } else {
+ if (tg3_flag(tp, TAGGED_STATUS) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
+ tp->timer_offset = HZ;
+ else
+ tp->timer_offset = HZ / 10;
+
+ BUG_ON(tp->timer_offset > HZ);
+ tp->timer_counter = tp->timer_multiplier =
+ (HZ / tp->timer_offset);
+ tp->asf_counter = tp->asf_multiplier =
+ ((HZ / tp->timer_offset) * 2);
+
+ init_timer(&tp->timer);
+ tp->timer.expires = jiffies + tp->timer_offset;
+ tp->timer.data = (unsigned long) tp;
+ tp->timer.function = tg3_timer;
+ }
+
+ tg3_full_unlock(tp);
+
+ if (err)
+ goto err_out3;
+
+ if (tg3_flag(tp, USING_MSI)) {
+ err = tg3_test_msi(tp);
+
+ if (err) {
+ tg3_full_lock(tp, 0);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ tg3_free_rings(tp);
+ tg3_full_unlock(tp);
+
+ goto err_out2;
+ }
+
+ if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
+ u32 val = tr32(PCIE_TRANSACTION_CFG);
+
+ tw32(PCIE_TRANSACTION_CFG,
+ val | PCIE_TRANS_CFG_1SHOT_MSI);
+ }
+ }
+
+ tg3_phy_start(tp);
+
+ tg3_full_lock(tp, 0);
+
+ add_timer(&tp->timer);
+ tg3_flag_set(tp, INIT_COMPLETE);
+ tg3_enable_ints(tp);
+
+ tg3_full_unlock(tp);
+
+ netif_tx_start_all_queues(dev);
+
+ /*
+ * Reset loopback feature if it was turned on while the device was down
+ * make sure that it's installed properly now.
+ */
+ if (dev->features & NETIF_F_LOOPBACK)
+ tg3_set_loopback(dev, dev->features);
+
+ return 0;
+
+err_out3:
+ for (i = tp->irq_cnt - 1; i >= 0; i--) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+ free_irq(tnapi->irq_vec, tnapi);
+ }
+
+err_out2:
+ tg3_napi_disable(tp);
+ tg3_napi_fini(tp);
+ tg3_free_consistent(tp);
+
+err_out1:
+ tg3_ints_fini(tp);
+ tg3_frob_aux_power(tp, false);
+ pci_set_power_state(tp->pdev, PCI_D3hot);
+ return err;
+}
+
+static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
+ struct rtnl_link_stats64 *);
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
+
+static int tg3_close(struct net_device *dev)
+{
+ int i;
+ struct tg3 *tp = netdev_priv(dev);
+
+ tg3_napi_disable(tp);
+ cancel_work_sync(&tp->reset_task);
+
+ netif_tx_stop_all_queues(dev);
+
+ del_timer_sync(&tp->timer);
+
+ tg3_phy_stop(tp);
+
+ tg3_full_lock(tp, 1);
+
+ tg3_disable_ints(tp);
+
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ tg3_free_rings(tp);
+ tg3_flag_clear(tp, INIT_COMPLETE);
+
+ tg3_full_unlock(tp);
+
+ for (i = tp->irq_cnt - 1; i >= 0; i--) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+ free_irq(tnapi->irq_vec, tnapi);
+ }
+
+ tg3_ints_fini(tp);
+
+ tg3_get_stats64(tp->dev, &tp->net_stats_prev);
+
+ memcpy(&tp->estats_prev, tg3_get_estats(tp),
+ sizeof(tp->estats_prev));
+
+ tg3_napi_fini(tp);
+
+ tg3_free_consistent(tp);
+
+ tg3_power_down(tp);
+
+ netif_carrier_off(tp->dev);
+
+ return 0;
+}
+
+static inline u64 get_stat64(tg3_stat64_t *val)
+{
+ return ((u64)val->high << 32) | ((u64)val->low);
+}
+
+static u64 calc_crc_errors(struct tg3 *tp)
+{
+ struct tg3_hw_stats *hw_stats = tp->hw_stats;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
+ u32 val;
+
+ spin_lock_bh(&tp->lock);
+ if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
+ tg3_writephy(tp, MII_TG3_TEST1,
+ val | MII_TG3_TEST1_CRC_EN);
+ tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
+ } else
+ val = 0;
+ spin_unlock_bh(&tp->lock);
+
+ tp->phy_crc_errors += val;
+
+ return tp->phy_crc_errors;
+ }
+
+ return get_stat64(&hw_stats->rx_fcs_errors);
+}
+
+#define ESTAT_ADD(member) \
+ estats->member = old_estats->member + \
+ get_stat64(&hw_stats->member)
+
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
+{
+ struct tg3_ethtool_stats *estats = &tp->estats;
+ struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
+ struct tg3_hw_stats *hw_stats = tp->hw_stats;
+
+ if (!hw_stats)
+ return old_estats;
+
+ ESTAT_ADD(rx_octets);
+ ESTAT_ADD(rx_fragments);
+ ESTAT_ADD(rx_ucast_packets);
+ ESTAT_ADD(rx_mcast_packets);
+ ESTAT_ADD(rx_bcast_packets);
+ ESTAT_ADD(rx_fcs_errors);
+ ESTAT_ADD(rx_align_errors);
+ ESTAT_ADD(rx_xon_pause_rcvd);
+ ESTAT_ADD(rx_xoff_pause_rcvd);
+ ESTAT_ADD(rx_mac_ctrl_rcvd);
+ ESTAT_ADD(rx_xoff_entered);
+ ESTAT_ADD(rx_frame_too_long_errors);
+ ESTAT_ADD(rx_jabbers);
+ ESTAT_ADD(rx_undersize_packets);
+ ESTAT_ADD(rx_in_length_errors);
+ ESTAT_ADD(rx_out_length_errors);
+ ESTAT_ADD(rx_64_or_less_octet_packets);
+ ESTAT_ADD(rx_65_to_127_octet_packets);
+ ESTAT_ADD(rx_128_to_255_octet_packets);
+ ESTAT_ADD(rx_256_to_511_octet_packets);
+ ESTAT_ADD(rx_512_to_1023_octet_packets);
+ ESTAT_ADD(rx_1024_to_1522_octet_packets);
+ ESTAT_ADD(rx_1523_to_2047_octet_packets);
+ ESTAT_ADD(rx_2048_to_4095_octet_packets);
+ ESTAT_ADD(rx_4096_to_8191_octet_packets);
+ ESTAT_ADD(rx_8192_to_9022_octet_packets);
+
+ ESTAT_ADD(tx_octets);
+ ESTAT_ADD(tx_collisions);
+ ESTAT_ADD(tx_xon_sent);
+ ESTAT_ADD(tx_xoff_sent);
+ ESTAT_ADD(tx_flow_control);
+ ESTAT_ADD(tx_mac_errors);
+ ESTAT_ADD(tx_single_collisions);
+ ESTAT_ADD(tx_mult_collisions);
+ ESTAT_ADD(tx_deferred);
+ ESTAT_ADD(tx_excessive_collisions);
+ ESTAT_ADD(tx_late_collisions);
+ ESTAT_ADD(tx_collide_2times);
+ ESTAT_ADD(tx_collide_3times);
+ ESTAT_ADD(tx_collide_4times);
+ ESTAT_ADD(tx_collide_5times);
+ ESTAT_ADD(tx_collide_6times);
+ ESTAT_ADD(tx_collide_7times);
+ ESTAT_ADD(tx_collide_8times);
+ ESTAT_ADD(tx_collide_9times);
+ ESTAT_ADD(tx_collide_10times);
+ ESTAT_ADD(tx_collide_11times);
+ ESTAT_ADD(tx_collide_12times);
+ ESTAT_ADD(tx_collide_13times);
+ ESTAT_ADD(tx_collide_14times);
+ ESTAT_ADD(tx_collide_15times);
+ ESTAT_ADD(tx_ucast_packets);
+ ESTAT_ADD(tx_mcast_packets);
+ ESTAT_ADD(tx_bcast_packets);
+ ESTAT_ADD(tx_carrier_sense_errors);
+ ESTAT_ADD(tx_discards);
+ ESTAT_ADD(tx_errors);
+
+ ESTAT_ADD(dma_writeq_full);
+ ESTAT_ADD(dma_write_prioq_full);
+ ESTAT_ADD(rxbds_empty);
+ ESTAT_ADD(rx_discards);
+ ESTAT_ADD(rx_errors);
+ ESTAT_ADD(rx_threshold_hit);
+
+ ESTAT_ADD(dma_readq_full);
+ ESTAT_ADD(dma_read_prioq_full);
+ ESTAT_ADD(tx_comp_queue_full);
+
+ ESTAT_ADD(ring_set_send_prod_index);
+ ESTAT_ADD(ring_status_update);
+ ESTAT_ADD(nic_irqs);
+ ESTAT_ADD(nic_avoided_irqs);
+ ESTAT_ADD(nic_tx_threshold_hit);
+
+ ESTAT_ADD(mbuf_lwm_thresh_hit);
+
+ return estats;
+}
+
+static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
+ struct tg3_hw_stats *hw_stats = tp->hw_stats;
+
+ if (!hw_stats)
+ return old_stats;
+
+ stats->rx_packets = old_stats->rx_packets +
+ get_stat64(&hw_stats->rx_ucast_packets) +
+ get_stat64(&hw_stats->rx_mcast_packets) +
+ get_stat64(&hw_stats->rx_bcast_packets);
+
+ stats->tx_packets = old_stats->tx_packets +
+ get_stat64(&hw_stats->tx_ucast_packets) +
+ get_stat64(&hw_stats->tx_mcast_packets) +
+ get_stat64(&hw_stats->tx_bcast_packets);
+
+ stats->rx_bytes = old_stats->rx_bytes +
+ get_stat64(&hw_stats->rx_octets);
+ stats->tx_bytes = old_stats->tx_bytes +
+ get_stat64(&hw_stats->tx_octets);
+
+ stats->rx_errors = old_stats->rx_errors +
+ get_stat64(&hw_stats->rx_errors);
+ stats->tx_errors = old_stats->tx_errors +
+ get_stat64(&hw_stats->tx_errors) +
+ get_stat64(&hw_stats->tx_mac_errors) +
+ get_stat64(&hw_stats->tx_carrier_sense_errors) +
+ get_stat64(&hw_stats->tx_discards);
+
+ stats->multicast = old_stats->multicast +
+ get_stat64(&hw_stats->rx_mcast_packets);
+ stats->collisions = old_stats->collisions +
+ get_stat64(&hw_stats->tx_collisions);
+
+ stats->rx_length_errors = old_stats->rx_length_errors +
+ get_stat64(&hw_stats->rx_frame_too_long_errors) +
+ get_stat64(&hw_stats->rx_undersize_packets);
+
+ stats->rx_over_errors = old_stats->rx_over_errors +
+ get_stat64(&hw_stats->rxbds_empty);
+ stats->rx_frame_errors = old_stats->rx_frame_errors +
+ get_stat64(&hw_stats->rx_align_errors);
+ stats->tx_aborted_errors = old_stats->tx_aborted_errors +
+ get_stat64(&hw_stats->tx_discards);
+ stats->tx_carrier_errors = old_stats->tx_carrier_errors +
+ get_stat64(&hw_stats->tx_carrier_sense_errors);
+
+ stats->rx_crc_errors = old_stats->rx_crc_errors +
+ calc_crc_errors(tp);
+
+ stats->rx_missed_errors = old_stats->rx_missed_errors +
+ get_stat64(&hw_stats->rx_discards);
+
+ stats->rx_dropped = tp->rx_dropped;
+
+ return stats;
+}
+
+static inline u32 calc_crc(unsigned char *buf, int len)
+{
+ u32 reg;
+ u32 tmp;
+ int j, k;
+
+ reg = 0xffffffff;
+
+ for (j = 0; j < len; j++) {
+ reg ^= buf[j];
+
+ for (k = 0; k < 8; k++) {
+ tmp = reg & 0x01;
+
+ reg >>= 1;
+
+ if (tmp)
+ reg ^= 0xedb88320;
+ }
+ }
+
+ return ~reg;
+}
+
+static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
+{
+ /* accept or reject all multicast frames */
+ tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
+ tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
+ tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
+ tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
+}
+
+static void __tg3_set_rx_mode(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ u32 rx_mode;
+
+ rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
+ RX_MODE_KEEP_VLAN_TAG);
+
+#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
+ /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
+ * flag clear.
+ */
+ if (!tg3_flag(tp, ENABLE_ASF))
+ rx_mode |= RX_MODE_KEEP_VLAN_TAG;
+#endif
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Promiscuous mode. */
+ rx_mode |= RX_MODE_PROMISC;
+ } else if (dev->flags & IFF_ALLMULTI) {
+ /* Accept all multicast. */
+ tg3_set_multi(tp, 1);
+ } else if (netdev_mc_empty(dev)) {
+ /* Reject all multicast. */
+ tg3_set_multi(tp, 0);
+ } else {
+ /* Accept one or more multicast(s). */
+ struct netdev_hw_addr *ha;
+ u32 mc_filter[4] = { 0, };
+ u32 regidx;
+ u32 bit;
+ u32 crc;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = calc_crc(ha->addr, ETH_ALEN);
+ bit = ~crc & 0x7f;
+ regidx = (bit & 0x60) >> 5;
+ bit &= 0x1f;
+ mc_filter[regidx] |= (1 << bit);
+ }
+
+ tw32(MAC_HASH_REG_0, mc_filter[0]);
+ tw32(MAC_HASH_REG_1, mc_filter[1]);
+ tw32(MAC_HASH_REG_2, mc_filter[2]);
+ tw32(MAC_HASH_REG_3, mc_filter[3]);
+ }
+
+ if (rx_mode != tp->rx_mode) {
+ tp->rx_mode = rx_mode;
+ tw32_f(MAC_RX_MODE, rx_mode);
+ udelay(10);
+ }
+}
+
+static void tg3_set_rx_mode(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return;
+
+ tg3_full_lock(tp, 0);
+ __tg3_set_rx_mode(dev);
+ tg3_full_unlock(tp);
+}
+
+static int tg3_get_regs_len(struct net_device *dev)
+{
+ return TG3_REG_BLK_SIZE;
+}
+
+static void tg3_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *_p)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ regs->version = 0;
+
+ memset(_p, 0, TG3_REG_BLK_SIZE);
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+ return;
+
+ tg3_full_lock(tp, 0);
+
+ tg3_dump_legacy_regs(tp, (u32 *)_p);
+
+ tg3_full_unlock(tp);
+}
+
+static int tg3_get_eeprom_len(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ return tp->nvram_size;
+}
+
+static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int ret;
+ u8 *pd;
+ u32 i, offset, len, b_offset, b_count;
+ __be32 val;
+
+ if (tg3_flag(tp, NO_NVRAM))
+ return -EINVAL;
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+ return -EAGAIN;
+
+ offset = eeprom->offset;
+ len = eeprom->len;
+ eeprom->len = 0;
+
+ eeprom->magic = TG3_EEPROM_MAGIC;
+
+ if (offset & 3) {
+ /* adjustments to start on required 4 byte boundary */
+ b_offset = offset & 3;
+ b_count = 4 - b_offset;
+ if (b_count > len) {
+ /* i.e. offset=1 len=2 */
+ b_count = len;
+ }
+ ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
+ if (ret)
+ return ret;
+ memcpy(data, ((char *)&val) + b_offset, b_count);
+ len -= b_count;
+ offset += b_count;
+ eeprom->len += b_count;
+ }
+
+ /* read bytes up to the last 4 byte boundary */
+ pd = &data[eeprom->len];
+ for (i = 0; i < (len - (len & 3)); i += 4) {
+ ret = tg3_nvram_read_be32(tp, offset + i, &val);
+ if (ret) {
+ eeprom->len += i;
+ return ret;
+ }
+ memcpy(pd + i, &val, 4);
+ }
+ eeprom->len += i;
+
+ if (len & 3) {
+ /* read last bytes not ending on 4 byte boundary */
+ pd = &data[eeprom->len];
+ b_count = len & 3;
+ b_offset = offset + len - b_count;
+ ret = tg3_nvram_read_be32(tp, b_offset, &val);
+ if (ret)
+ return ret;
+ memcpy(pd, &val, b_count);
+ eeprom->len += b_count;
+ }
+ return 0;
+}
+
+static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
+
+static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int ret;
+ u32 offset, len, b_offset, odd_len;
+ u8 *buf;
+ __be32 start, end;
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+ return -EAGAIN;
+
+ if (tg3_flag(tp, NO_NVRAM) ||
+ eeprom->magic != TG3_EEPROM_MAGIC)
+ return -EINVAL;
+
+ offset = eeprom->offset;
+ len = eeprom->len;
+
+ if ((b_offset = (offset & 3))) {
+ /* adjustments to start on required 4 byte boundary */
+ ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
+ if (ret)
+ return ret;
+ len += b_offset;
+ offset &= ~3;
+ if (len < 4)
+ len = 4;
+ }
+
+ odd_len = 0;
+ if (len & 3) {
+ /* adjustments to end on required 4 byte boundary */
+ odd_len = 1;
+ len = (len + 3) & ~3;
+ ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
+ if (ret)
+ return ret;
+ }
+
+ buf = data;
+ if (b_offset || odd_len) {
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ if (b_offset)
+ memcpy(buf, &start, 4);
+ if (odd_len)
+ memcpy(buf+len-4, &end, 4);
+ memcpy(buf + b_offset, data, eeprom->len);
+ }
+
+ ret = tg3_nvram_write_block(tp, offset, len, buf);
+
+ if (buf != data)
+ kfree(buf);
+
+ return ret;
+}
+
+static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (tg3_flag(tp, USE_PHYLIB)) {
+ struct phy_device *phydev;
+ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+ return -EAGAIN;
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ return phy_ethtool_gset(phydev, cmd);
+ }
+
+ cmd->supported = (SUPPORTED_Autoneg);
+
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
+ cmd->supported |= (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+ cmd->supported |= (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_TP);
+ cmd->port = PORT_TP;
+ } else {
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->port = PORT_FIBRE;
+ }
+
+ cmd->advertising = tp->link_config.advertising;
+ if (tg3_flag(tp, PAUSE_AUTONEG)) {
+ if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
+ if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
+ cmd->advertising |= ADVERTISED_Pause;
+ } else {
+ cmd->advertising |= ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause;
+ }
+ } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
+ cmd->advertising |= ADVERTISED_Asym_Pause;
+ }
+ }
+ if (netif_running(dev)) {
+ ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
+ cmd->duplex = tp->link_config.active_duplex;
+ } else {
+ ethtool_cmd_speed_set(cmd, SPEED_INVALID);
+ cmd->duplex = DUPLEX_INVALID;
+ }
+ cmd->phy_address = tp->phy_addr;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->autoneg = tp->link_config.autoneg;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+ return 0;
+}
+
+static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ u32 speed = ethtool_cmd_speed(cmd);
+
+ if (tg3_flag(tp, USE_PHYLIB)) {
+ struct phy_device *phydev;
+ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+ return -EAGAIN;
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ return phy_ethtool_sset(phydev, cmd);
+ }
+
+ if (cmd->autoneg != AUTONEG_ENABLE &&
+ cmd->autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_DISABLE &&
+ cmd->duplex != DUPLEX_FULL &&
+ cmd->duplex != DUPLEX_HALF)
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ u32 mask = ADVERTISED_Autoneg |
+ ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
+ mask |= ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+ mask |= ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_TP;
+ else
+ mask |= ADVERTISED_FIBRE;
+
+ if (cmd->advertising & ~mask)
+ return -EINVAL;
+
+ mask &= (ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full);
+
+ cmd->advertising &= mask;
+ } else {
+ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
+ if (speed != SPEED_1000)
+ return -EINVAL;
+
+ if (cmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ } else {
+ if (speed != SPEED_100 &&
+ speed != SPEED_10)
+ return -EINVAL;
+ }
+ }
+
+ tg3_full_lock(tp, 0);
+
+ tp->link_config.autoneg = cmd->autoneg;
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ tp->link_config.advertising = (cmd->advertising |
+ ADVERTISED_Autoneg);
+ tp->link_config.speed = SPEED_INVALID;
+ tp->link_config.duplex = DUPLEX_INVALID;
+ } else {
+ tp->link_config.advertising = 0;
+ tp->link_config.speed = speed;
+ tp->link_config.duplex = cmd->duplex;
+ }
+
+ tp->link_config.orig_speed = tp->link_config.speed;
+ tp->link_config.orig_duplex = tp->link_config.duplex;
+ tp->link_config.orig_autoneg = tp->link_config.autoneg;
+
+ if (netif_running(dev))
+ tg3_setup_phy(tp, 1);
+
+ tg3_full_unlock(tp);
+
+ return 0;
+}
+
+static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_MODULE_NAME);
+ strcpy(info->version, DRV_MODULE_VERSION);
+ strcpy(info->fw_version, tp->fw_ver);
+ strcpy(info->bus_info, pci_name(tp->pdev));
+}
+
+static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
+ wol->supported = WAKE_MAGIC;
+ else
+ wol->supported = 0;
+ wol->wolopts = 0;
+ if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
+ wol->wolopts = WAKE_MAGIC;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ struct device *dp = &tp->pdev->dev;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+ if ((wol->wolopts & WAKE_MAGIC) &&
+ !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
+ return -EINVAL;
+
+ device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
+
+ spin_lock_bh(&tp->lock);
+ if (device_may_wakeup(dp))
+ tg3_flag_set(tp, WOL_ENABLE);
+ else
+ tg3_flag_clear(tp, WOL_ENABLE);
+ spin_unlock_bh(&tp->lock);
+
+ return 0;
+}
+
+static u32 tg3_get_msglevel(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ return tp->msg_enable;
+}
+
+static void tg3_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ tp->msg_enable = value;
+}
+
+static int tg3_nway_reset(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int r;
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
+ return -EINVAL;
+
+ if (tg3_flag(tp, USE_PHYLIB)) {
+ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+ return -EAGAIN;
+ r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+ } else {
+ u32 bmcr;
+
+ spin_lock_bh(&tp->lock);
+ r = -EINVAL;
+ tg3_readphy(tp, MII_BMCR, &bmcr);
+ if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
+ ((bmcr & BMCR_ANENABLE) ||
+ (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
+ tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
+ BMCR_ANENABLE);
+ r = 0;
+ }
+ spin_unlock_bh(&tp->lock);
+ }
+
+ return r;
+}
+
+static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ ering->rx_max_pending = tp->rx_std_ring_mask;
+ ering->rx_mini_max_pending = 0;
+ if (tg3_flag(tp, JUMBO_RING_ENABLE))
+ ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
+ else
+ ering->rx_jumbo_max_pending = 0;
+
+ ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
+
+ ering->rx_pending = tp->rx_pending;
+ ering->rx_mini_pending = 0;
+ if (tg3_flag(tp, JUMBO_RING_ENABLE))
+ ering->rx_jumbo_pending = tp->rx_jumbo_pending;
+ else
+ ering->rx_jumbo_pending = 0;
+
+ ering->tx_pending = tp->napi[0].tx_pending;
+}
+
+static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int i, irq_sync = 0, err = 0;
+
+ if ((ering->rx_pending > tp->rx_std_ring_mask) ||
+ (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
+ (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
+ (ering->tx_pending <= MAX_SKB_FRAGS) ||
+ (tg3_flag(tp, TSO_BUG) &&
+ (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
+ return -EINVAL;
+
+ if (netif_running(dev)) {
+ tg3_phy_stop(tp);
+ tg3_netif_stop(tp);
+ irq_sync = 1;
+ }
+
+ tg3_full_lock(tp, irq_sync);
+
+ tp->rx_pending = ering->rx_pending;
+
+ if (tg3_flag(tp, MAX_RXPEND_64) &&
+ tp->rx_pending > 63)
+ tp->rx_pending = 63;
+ tp->rx_jumbo_pending = ering->rx_jumbo_pending;
+
+ for (i = 0; i < tp->irq_max; i++)
+ tp->napi[i].tx_pending = ering->tx_pending;
+
+ if (netif_running(dev)) {
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ err = tg3_restart_hw(tp, 1);
+ if (!err)
+ tg3_netif_start(tp);
+ }
+
+ tg3_full_unlock(tp);
+
+ if (irq_sync && !err)
+ tg3_phy_start(tp);
+
+ return err;
+}
+
+static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
+
+ if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
+ epause->rx_pause = 1;
+ else
+ epause->rx_pause = 0;
+
+ if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
+ epause->tx_pause = 1;
+ else
+ epause->tx_pause = 0;
+}
+
+static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int err = 0;
+
+ if (tg3_flag(tp, USE_PHYLIB)) {
+ u32 newadv;
+ struct phy_device *phydev;
+
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+
+ if (!(phydev->supported & SUPPORTED_Pause) ||
+ (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+ (epause->rx_pause != epause->tx_pause)))
+ return -EINVAL;
+
+ tp->link_config.flowctrl = 0;
+ if (epause->rx_pause) {
+ tp->link_config.flowctrl |= FLOW_CTRL_RX;
+
+ if (epause->tx_pause) {
+ tp->link_config.flowctrl |= FLOW_CTRL_TX;
+ newadv = ADVERTISED_Pause;
+ } else
+ newadv = ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause;
+ } else if (epause->tx_pause) {
+ tp->link_config.flowctrl |= FLOW_CTRL_TX;
+ newadv = ADVERTISED_Asym_Pause;
+ } else
+ newadv = 0;
+
+ if (epause->autoneg)
+ tg3_flag_set(tp, PAUSE_AUTONEG);
+ else
+ tg3_flag_clear(tp, PAUSE_AUTONEG);
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
+ u32 oldadv = phydev->advertising &
+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ if (oldadv != newadv) {
+ phydev->advertising &=
+ ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ phydev->advertising |= newadv;
+ if (phydev->autoneg) {
+ /*
+ * Always renegotiate the link to
+ * inform our link partner of our
+ * flow control settings, even if the
+ * flow control is forced. Let
+ * tg3_adjust_link() do the final
+ * flow control setup.
+ */
+ return phy_start_aneg(phydev);
+ }
+ }
+
+ if (!epause->autoneg)
+ tg3_setup_flow_control(tp, 0, 0);
+ } else {
+ tp->link_config.orig_advertising &=
+ ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ tp->link_config.orig_advertising |= newadv;
+ }
+ } else {
+ int irq_sync = 0;
+
+ if (netif_running(dev)) {
+ tg3_netif_stop(tp);
+ irq_sync = 1;
+ }
+
+ tg3_full_lock(tp, irq_sync);
+
+ if (epause->autoneg)
+ tg3_flag_set(tp, PAUSE_AUTONEG);
+ else
+ tg3_flag_clear(tp, PAUSE_AUTONEG);
+ if (epause->rx_pause)
+ tp->link_config.flowctrl |= FLOW_CTRL_RX;
+ else
+ tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
+ if (epause->tx_pause)
+ tp->link_config.flowctrl |= FLOW_CTRL_TX;
+ else
+ tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
+
+ if (netif_running(dev)) {
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ err = tg3_restart_hw(tp, 1);
+ if (!err)
+ tg3_netif_start(tp);
+ }
+
+ tg3_full_unlock(tp);
+ }
+
+ return err;
+}
+
+static int tg3_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return TG3_NUM_TEST;
+ case ETH_SS_STATS:
+ return TG3_NUM_STATS;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
+ break;
+ case ETH_SS_TEST:
+ memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
+ break;
+ default:
+ WARN_ON(1); /* we need a WARN() */
+ break;
+ }
+}
+
+static int tg3_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (!netif_running(tp->dev))
+ return -EAGAIN;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 1; /* cycle on/off once per second */
+
+ case ETHTOOL_ID_ON:
+ tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
+ LED_CTRL_1000MBPS_ON |
+ LED_CTRL_100MBPS_ON |
+ LED_CTRL_10MBPS_ON |
+ LED_CTRL_TRAFFIC_OVERRIDE |
+ LED_CTRL_TRAFFIC_BLINK |
+ LED_CTRL_TRAFFIC_LED);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
+ LED_CTRL_TRAFFIC_OVERRIDE);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ tw32(MAC_LED_CTRL, tp->led_ctrl);
+ break;
+ }
+
+ return 0;
+}
+
+static void tg3_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *estats, u64 *tmp_stats)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
+}
+
+static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
+{
+ int i;
+ __be32 *buf;
+ u32 offset = 0, len = 0;
+ u32 magic, val;
+
+ if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
+ return NULL;
+
+ if (magic == TG3_EEPROM_MAGIC) {
+ for (offset = TG3_NVM_DIR_START;
+ offset < TG3_NVM_DIR_END;
+ offset += TG3_NVM_DIRENT_SIZE) {
+ if (tg3_nvram_read(tp, offset, &val))
+ return NULL;
+
+ if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
+ TG3_NVM_DIRTYPE_EXTVPD)
+ break;
+ }
+
+ if (offset != TG3_NVM_DIR_END) {
+ len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
+ if (tg3_nvram_read(tp, offset + 4, &offset))
+ return NULL;
+
+ offset = tg3_nvram_logical_addr(tp, offset);
+ }
+ }
+
+ if (!offset || !len) {
+ offset = TG3_NVM_VPD_OFF;
+ len = TG3_NVM_VPD_LEN;
+ }
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (buf == NULL)
+ return NULL;
+
+ if (magic == TG3_EEPROM_MAGIC) {
+ for (i = 0; i < len; i += 4) {
+ /* The data is in little-endian format in NVRAM.
+ * Use the big-endian read routines to preserve
+ * the byte order as it exists in NVRAM.
+ */
+ if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
+ goto error;
+ }
+ } else {
+ u8 *ptr;
+ ssize_t cnt;
+ unsigned int pos = 0;
+
+ ptr = (u8 *)&buf[0];
+ for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
+ cnt = pci_read_vpd(tp->pdev, pos,
+ len - pos, ptr);
+ if (cnt == -ETIMEDOUT || cnt == -EINTR)
+ cnt = 0;
+ else if (cnt < 0)
+ goto error;
+ }
+ if (pos != len)
+ goto error;
+ }
+
+ *vpdlen = len;
+
+ return buf;
+
+error:
+ kfree(buf);
+ return NULL;
+}
+
+#define NVRAM_TEST_SIZE 0x100
+#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
+#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
+#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
+#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
+#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
+#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
+#define NVRAM_SELFBOOT_HW_SIZE 0x20
+#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
+
+static int tg3_test_nvram(struct tg3 *tp)
+{
+ u32 csum, magic, len;
+ __be32 *buf;
+ int i, j, k, err = 0, size;
+
+ if (tg3_flag(tp, NO_NVRAM))
+ return 0;
+
+ if (tg3_nvram_read(tp, 0, &magic) != 0)
+ return -EIO;
+
+ if (magic == TG3_EEPROM_MAGIC)
+ size = NVRAM_TEST_SIZE;
+ else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
+ if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
+ TG3_EEPROM_SB_FORMAT_1) {
+ switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
+ case TG3_EEPROM_SB_REVISION_0:
+ size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
+ break;
+ case TG3_EEPROM_SB_REVISION_2:
+ size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
+ break;
+ case TG3_EEPROM_SB_REVISION_3:
+ size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
+ break;
+ case TG3_EEPROM_SB_REVISION_4:
+ size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
+ break;
+ case TG3_EEPROM_SB_REVISION_5:
+ size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
+ break;
+ case TG3_EEPROM_SB_REVISION_6:
+ size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
+ break;
+ default:
+ return -EIO;
+ }
+ } else
+ return 0;
+ } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
+ size = NVRAM_SELFBOOT_HW_SIZE;
+ else
+ return -EIO;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ err = -EIO;
+ for (i = 0, j = 0; i < size; i += 4, j++) {
+ err = tg3_nvram_read_be32(tp, i, &buf[j]);
+ if (err)
+ break;
+ }
+ if (i < size)
+ goto out;
+
+ /* Selfboot format */
+ magic = be32_to_cpu(buf[0]);
+ if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
+ TG3_EEPROM_MAGIC_FW) {
+ u8 *buf8 = (u8 *) buf, csum8 = 0;
+
+ if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
+ TG3_EEPROM_SB_REVISION_2) {
+ /* For rev 2, the csum doesn't include the MBA. */
+ for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
+ csum8 += buf8[i];
+ for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
+ csum8 += buf8[i];
+ } else {
+ for (i = 0; i < size; i++)
+ csum8 += buf8[i];
+ }
+
+ if (csum8 == 0) {
+ err = 0;
+ goto out;
+ }
+
+ err = -EIO;
+ goto out;
+ }
+
+ if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
+ TG3_EEPROM_MAGIC_HW) {
+ u8 data[NVRAM_SELFBOOT_DATA_SIZE];
+ u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
+ u8 *buf8 = (u8 *) buf;
+
+ /* Separate the parity bits and the data bytes. */
+ for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
+ if ((i == 0) || (i == 8)) {
+ int l;
+ u8 msk;
+
+ for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
+ parity[k++] = buf8[i] & msk;
+ i++;
+ } else if (i == 16) {
+ int l;
+ u8 msk;
+
+ for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
+ parity[k++] = buf8[i] & msk;
+ i++;
+
+ for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
+ parity[k++] = buf8[i] & msk;
+ i++;
+ }
+ data[j++] = buf8[i];
+ }
+
+ err = -EIO;
+ for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
+ u8 hw8 = hweight8(data[i]);
+
+ if ((hw8 & 0x1) && parity[i])
+ goto out;
+ else if (!(hw8 & 0x1) && !parity[i])
+ goto out;
+ }
+ err = 0;
+ goto out;
+ }
+
+ err = -EIO;
+
+ /* Bootstrap checksum at offset 0x10 */
+ csum = calc_crc((unsigned char *) buf, 0x10);
+ if (csum != le32_to_cpu(buf[0x10/4]))
+ goto out;
+
+ /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
+ csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
+ if (csum != le32_to_cpu(buf[0xfc/4]))
+ goto out;
+
+ kfree(buf);
+
+ buf = tg3_vpd_readblock(tp, &len);
+ if (!buf)
+ return -ENOMEM;
+
+ i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
+ if (i > 0) {
+ j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
+ if (j < 0)
+ goto out;
+
+ if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
+ goto out;
+
+ i += PCI_VPD_LRDT_TAG_SIZE;
+ j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
+ PCI_VPD_RO_KEYWORD_CHKSUM);
+ if (j > 0) {
+ u8 csum8 = 0;
+
+ j += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+ for (i = 0; i <= j; i++)
+ csum8 += ((u8 *)buf)[i];
+
+ if (csum8)
+ goto out;
+ }
+ }
+
+ err = 0;
+
+out:
+ kfree(buf);
+ return err;
+}
+
+#define TG3_SERDES_TIMEOUT_SEC 2
+#define TG3_COPPER_TIMEOUT_SEC 6
+
+static int tg3_test_link(struct tg3 *tp)
+{
+ int i, max;
+
+ if (!netif_running(tp->dev))
+ return -ENODEV;
+
+ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
+ max = TG3_SERDES_TIMEOUT_SEC;
+ else
+ max = TG3_COPPER_TIMEOUT_SEC;
+
+ for (i = 0; i < max; i++) {
+ if (netif_carrier_ok(tp->dev))
+ return 0;
+
+ if (msleep_interruptible(1000))
+ break;
+ }
+
+ return -EIO;
+}
+
+/* Only test the commonly used registers */
+static int tg3_test_registers(struct tg3 *tp)
+{
+ int i, is_5705, is_5750;
+ u32 offset, read_mask, write_mask, val, save_val, read_val;
+ static struct {
+ u16 offset;
+ u16 flags;
+#define TG3_FL_5705 0x1
+#define TG3_FL_NOT_5705 0x2
+#define TG3_FL_NOT_5788 0x4
+#define TG3_FL_NOT_5750 0x8
+ u32 read_mask;
+ u32 write_mask;
+ } reg_tbl[] = {
+ /* MAC Control Registers */
+ { MAC_MODE, TG3_FL_NOT_5705,
+ 0x00000000, 0x00ef6f8c },
+ { MAC_MODE, TG3_FL_5705,
+ 0x00000000, 0x01ef6b8c },
+ { MAC_STATUS, TG3_FL_NOT_5705,
+ 0x03800107, 0x00000000 },
+ { MAC_STATUS, TG3_FL_5705,
+ 0x03800100, 0x00000000 },
+ { MAC_ADDR_0_HIGH, 0x0000,
+ 0x00000000, 0x0000ffff },
+ { MAC_ADDR_0_LOW, 0x0000,
+ 0x00000000, 0xffffffff },
+ { MAC_RX_MTU_SIZE, 0x0000,
+ 0x00000000, 0x0000ffff },
+ { MAC_TX_MODE, 0x0000,
+ 0x00000000, 0x00000070 },
+ { MAC_TX_LENGTHS, 0x0000,
+ 0x00000000, 0x00003fff },
+ { MAC_RX_MODE, TG3_FL_NOT_5705,
+ 0x00000000, 0x000007fc },
+ { MAC_RX_MODE, TG3_FL_5705,
+ 0x00000000, 0x000007dc },
+ { MAC_HASH_REG_0, 0x0000,
+ 0x00000000, 0xffffffff },
+ { MAC_HASH_REG_1, 0x0000,
+ 0x00000000, 0xffffffff },
+ { MAC_HASH_REG_2, 0x0000,
+ 0x00000000, 0xffffffff },
+ { MAC_HASH_REG_3, 0x0000,
+ 0x00000000, 0xffffffff },
+
+ /* Receive Data and Receive BD Initiator Control Registers. */
+ { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
+ 0x00000000, 0x00000003 },
+ { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { RCVDBDI_STD_BD+0, 0x0000,
+ 0x00000000, 0xffffffff },
+ { RCVDBDI_STD_BD+4, 0x0000,
+ 0x00000000, 0xffffffff },
+ { RCVDBDI_STD_BD+8, 0x0000,
+ 0x00000000, 0xffff0002 },
+ { RCVDBDI_STD_BD+0xc, 0x0000,
+ 0x00000000, 0xffffffff },
+
+ /* Receive BD Initiator Control Registers. */
+ { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { RCVBDI_STD_THRESH, TG3_FL_5705,
+ 0x00000000, 0x000003ff },
+ { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+
+ /* Host Coalescing Control Registers. */
+ { HOSTCC_MODE, TG3_FL_NOT_5705,
+ 0x00000000, 0x00000004 },
+ { HOSTCC_MODE, TG3_FL_5705,
+ 0x00000000, 0x000000f6 },
+ { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
+ 0x00000000, 0x000003ff },
+ { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
+ 0x00000000, 0x000003ff },
+ { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
+ 0x00000000, 0x000000ff },
+ { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
+ 0x00000000, 0x000000ff },
+ { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
+ 0x00000000, 0x000000ff },
+ { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
+ 0x00000000, 0x000000ff },
+ { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
+ 0xffffffff, 0x00000000 },
+ { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
+ 0xffffffff, 0x00000000 },
+
+ /* Buffer Manager Control Registers. */
+ { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
+ 0x00000000, 0x007fff80 },
+ { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
+ 0x00000000, 0x007fffff },
+ { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
+ 0x00000000, 0x0000003f },
+ { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
+ 0x00000000, 0x000001ff },
+ { BUFMGR_MB_HIGH_WATER, 0x0000,
+ 0x00000000, 0x000001ff },
+ { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
+ 0xffffffff, 0x00000000 },
+ { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
+ 0xffffffff, 0x00000000 },
+
+ /* Mailbox Registers */
+ { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
+ 0x00000000, 0x000001ff },
+ { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
+ 0x00000000, 0x000001ff },
+ { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
+ 0x00000000, 0x000007ff },
+ { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
+ 0x00000000, 0x000001ff },
+
+ { 0xffff, 0x0000, 0x00000000, 0x00000000 },
+ };
+
+ is_5705 = is_5750 = 0;
+ if (tg3_flag(tp, 5705_PLUS)) {
+ is_5705 = 1;
+ if (tg3_flag(tp, 5750_PLUS))
+ is_5750 = 1;
+ }
+
+ for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
+ if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
+ continue;
+
+ if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
+ continue;
+
+ if (tg3_flag(tp, IS_5788) &&
+ (reg_tbl[i].flags & TG3_FL_NOT_5788))
+ continue;
+
+ if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
+ continue;
+
+ offset = (u32) reg_tbl[i].offset;
+ read_mask = reg_tbl[i].read_mask;
+ write_mask = reg_tbl[i].write_mask;
+
+ /* Save the original register content */
+ save_val = tr32(offset);
+
+ /* Determine the read-only value. */
+ read_val = save_val & read_mask;
+
+ /* Write zero to the register, then make sure the read-only bits
+ * are not changed and the read/write bits are all zeros.
+ */
+ tw32(offset, 0);
+
+ val = tr32(offset);
+
+ /* Test the read-only and read/write bits. */
+ if (((val & read_mask) != read_val) || (val & write_mask))
+ goto out;
+
+ /* Write ones to all the bits defined by RdMask and WrMask, then
+ * make sure the read-only bits are not changed and the
+ * read/write bits are all ones.
+ */
+ tw32(offset, read_mask | write_mask);
+
+ val = tr32(offset);
+
+ /* Test the read-only bits. */
+ if ((val & read_mask) != read_val)
+ goto out;
+
+ /* Test the read/write bits. */
+ if ((val & write_mask) != write_mask)
+ goto out;
+
+ tw32(offset, save_val);
+ }
+
+ return 0;
+
+out:
+ if (netif_msg_hw(tp))
+ netdev_err(tp->dev,
+ "Register test failed at offset %x\n", offset);
+ tw32(offset, save_val);
+ return -EIO;
+}
+
+static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
+{
+ static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
+ int i;
+ u32 j;
+
+ for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
+ for (j = 0; j < len; j += 4) {
+ u32 val;
+
+ tg3_write_mem(tp, offset + j, test_pattern[i]);
+ tg3_read_mem(tp, offset + j, &val);
+ if (val != test_pattern[i])
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static int tg3_test_memory(struct tg3 *tp)
+{
+ static struct mem_entry {
+ u32 offset;
+ u32 len;
+ } mem_tbl_570x[] = {
+ { 0x00000000, 0x00b50},
+ { 0x00002000, 0x1c000},
+ { 0xffffffff, 0x00000}
+ }, mem_tbl_5705[] = {
+ { 0x00000100, 0x0000c},
+ { 0x00000200, 0x00008},
+ { 0x00004000, 0x00800},
+ { 0x00006000, 0x01000},
+ { 0x00008000, 0x02000},
+ { 0x00010000, 0x0e000},
+ { 0xffffffff, 0x00000}
+ }, mem_tbl_5755[] = {
+ { 0x00000200, 0x00008},
+ { 0x00004000, 0x00800},
+ { 0x00006000, 0x00800},
+ { 0x00008000, 0x02000},
+ { 0x00010000, 0x0c000},
+ { 0xffffffff, 0x00000}
+ }, mem_tbl_5906[] = {
+ { 0x00000200, 0x00008},
+ { 0x00004000, 0x00400},
+ { 0x00006000, 0x00400},
+ { 0x00008000, 0x01000},
+ { 0x00010000, 0x01000},
+ { 0xffffffff, 0x00000}
+ }, mem_tbl_5717[] = {
+ { 0x00000200, 0x00008},
+ { 0x00010000, 0x0a000},
+ { 0x00020000, 0x13c00},
+ { 0xffffffff, 0x00000}
+ }, mem_tbl_57765[] = {
+ { 0x00000200, 0x00008},
+ { 0x00004000, 0x00800},
+ { 0x00006000, 0x09800},
+ { 0x00010000, 0x0a000},
+ { 0xffffffff, 0x00000}
+ };
+ struct mem_entry *mem_tbl;
+ int err = 0;
+ int i;
+
+ if (tg3_flag(tp, 5717_PLUS))
+ mem_tbl = mem_tbl_5717;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ mem_tbl = mem_tbl_57765;
+ else if (tg3_flag(tp, 5755_PLUS))
+ mem_tbl = mem_tbl_5755;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ mem_tbl = mem_tbl_5906;
+ else if (tg3_flag(tp, 5705_PLUS))
+ mem_tbl = mem_tbl_5705;
+ else
+ mem_tbl = mem_tbl_570x;
+
+ for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
+ err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+#define TG3_TSO_MSS 500
+
+#define TG3_TSO_IP_HDR_LEN 20
+#define TG3_TSO_TCP_HDR_LEN 20
+#define TG3_TSO_TCP_OPT_LEN 12
+
+static const u8 tg3_tso_header[] = {
+0x08, 0x00,
+0x45, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x40, 0x00,
+0x40, 0x06, 0x00, 0x00,
+0x0a, 0x00, 0x00, 0x01,
+0x0a, 0x00, 0x00, 0x02,
+0x0d, 0x00, 0xe0, 0x00,
+0x00, 0x00, 0x01, 0x00,
+0x00, 0x00, 0x02, 0x00,
+0x80, 0x10, 0x10, 0x00,
+0x14, 0x09, 0x00, 0x00,
+0x01, 0x01, 0x08, 0x0a,
+0x11, 0x11, 0x11, 0x11,
+0x11, 0x11, 0x11, 0x11,
+};
+
+static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
+{
+ u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
+ u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
+ u32 budget;
+ struct sk_buff *skb, *rx_skb;
+ u8 *tx_data;
+ dma_addr_t map;
+ int num_pkts, tx_len, rx_len, i, err;
+ struct tg3_rx_buffer_desc *desc;
+ struct tg3_napi *tnapi, *rnapi;
+ struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
+
+ tnapi = &tp->napi[0];
+ rnapi = &tp->napi[0];
+ if (tp->irq_cnt > 1) {
+ if (tg3_flag(tp, ENABLE_RSS))
+ rnapi = &tp->napi[1];
+ if (tg3_flag(tp, ENABLE_TSS))
+ tnapi = &tp->napi[1];
+ }
+ coal_now = tnapi->coal_now | rnapi->coal_now;
+
+ err = -EIO;
+
+ tx_len = pktsz;
+ skb = netdev_alloc_skb(tp->dev, tx_len);
+ if (!skb)
+ return -ENOMEM;
+
+ tx_data = skb_put(skb, tx_len);
+ memcpy(tx_data, tp->dev->dev_addr, 6);
+ memset(tx_data + 6, 0x0, 8);
+
+ tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
+
+ if (tso_loopback) {
+ struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
+
+ u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
+ TG3_TSO_TCP_OPT_LEN;
+
+ memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
+ sizeof(tg3_tso_header));
+ mss = TG3_TSO_MSS;
+
+ val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
+ num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
+
+ /* Set the total length field in the IP header */
+ iph->tot_len = htons((u16)(mss + hdr_len));
+
+ base_flags = (TXD_FLAG_CPU_PRE_DMA |
+ TXD_FLAG_CPU_POST_DMA);
+
+ if (tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3)) {
+ struct tcphdr *th;
+ val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
+ th = (struct tcphdr *)&tx_data[val];
+ th->check = 0;
+ } else
+ base_flags |= TXD_FLAG_TCPUDP_CSUM;
+
+ if (tg3_flag(tp, HW_TSO_3)) {
+ mss |= (hdr_len & 0xc) << 12;
+ if (hdr_len & 0x10)
+ base_flags |= 0x00000010;
+ base_flags |= (hdr_len & 0x3e0) << 5;
+ } else if (tg3_flag(tp, HW_TSO_2))
+ mss |= hdr_len << 9;
+ else if (tg3_flag(tp, HW_TSO_1) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ mss |= (TG3_TSO_TCP_OPT_LEN << 9);
+ } else {
+ base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
+ }
+
+ data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
+ } else {
+ num_pkts = 1;
+ data_off = ETH_HLEN;
+ }
+
+ for (i = data_off; i < tx_len; i++)
+ tx_data[i] = (u8) (i & 0xff);
+
+ map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(tp->pdev, map)) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
+
+ val = tnapi->tx_prod;
+ tnapi->tx_buffers[val].skb = skb;
+ dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
+
+ tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
+ rnapi->coal_now);
+
+ udelay(10);
+
+ rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
+
+ budget = tg3_tx_avail(tnapi);
+ if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
+ base_flags | TXD_FLAG_END, mss, 0)) {
+ tnapi->tx_buffers[val].skb = NULL;
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
+
+ tnapi->tx_prod++;
+
+ tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
+ tr32_mailbox(tnapi->prodmbox);
+
+ udelay(10);
+
+ /* 350 usec to allow enough time on some 10/100 Mbps devices. */
+ for (i = 0; i < 35; i++) {
+ tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
+ coal_now);
+
+ udelay(10);
+
+ tx_idx = tnapi->hw_status->idx[0].tx_consumer;
+ rx_idx = rnapi->hw_status->idx[0].rx_producer;
+ if ((tx_idx == tnapi->tx_prod) &&
+ (rx_idx == (rx_start_idx + num_pkts)))
+ break;
+ }
+
+ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
+ dev_kfree_skb(skb);
+
+ if (tx_idx != tnapi->tx_prod)
+ goto out;
+
+ if (rx_idx != rx_start_idx + num_pkts)
+ goto out;
+
+ val = data_off;
+ while (rx_idx != rx_start_idx) {
+ desc = &rnapi->rx_rcb[rx_start_idx++];
+ desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
+ opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
+
+ if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
+ (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
+ goto out;
+
+ rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
+ - ETH_FCS_LEN;
+
+ if (!tso_loopback) {
+ if (rx_len != tx_len)
+ goto out;
+
+ if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
+ if (opaque_key != RXD_OPAQUE_RING_STD)
+ goto out;
+ } else {
+ if (opaque_key != RXD_OPAQUE_RING_JUMBO)
+ goto out;
+ }
+ } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
+ (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
+ >> RXD_TCPCSUM_SHIFT != 0xffff) {
+ goto out;
+ }
+
+ if (opaque_key == RXD_OPAQUE_RING_STD) {
+ rx_skb = tpr->rx_std_buffers[desc_idx].skb;
+ map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
+ mapping);
+ } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
+ rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
+ map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
+ mapping);
+ } else
+ goto out;
+
+ pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
+ PCI_DMA_FROMDEVICE);
+
+ for (i = data_off; i < rx_len; i++, val++) {
+ if (*(rx_skb->data + i) != (u8) (val & 0xff))
+ goto out;
+ }
+ }
+
+ err = 0;
+
+ /* tg3_free_rings will unmap and free the rx_skb */
+out:
+ return err;
+}
+
+#define TG3_STD_LOOPBACK_FAILED 1
+#define TG3_JMB_LOOPBACK_FAILED 2
+#define TG3_TSO_LOOPBACK_FAILED 4
+#define TG3_LOOPBACK_FAILED \
+ (TG3_STD_LOOPBACK_FAILED | \
+ TG3_JMB_LOOPBACK_FAILED | \
+ TG3_TSO_LOOPBACK_FAILED)
+
+static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
+{
+ int err = -EIO;
+ u32 eee_cap;
+
+ eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
+ tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
+
+ if (!netif_running(tp->dev)) {
+ data[0] = TG3_LOOPBACK_FAILED;
+ data[1] = TG3_LOOPBACK_FAILED;
+ if (do_extlpbk)
+ data[2] = TG3_LOOPBACK_FAILED;
+ goto done;
+ }
+
+ err = tg3_reset_hw(tp, 1);
+ if (err) {
+ data[0] = TG3_LOOPBACK_FAILED;
+ data[1] = TG3_LOOPBACK_FAILED;
+ if (do_extlpbk)
+ data[2] = TG3_LOOPBACK_FAILED;
+ goto done;
+ }
+
+ if (tg3_flag(tp, ENABLE_RSS)) {
+ int i;
+
+ /* Reroute all rx packets to the 1st queue */
+ for (i = MAC_RSS_INDIR_TBL_0;
+ i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
+ tw32(i, 0x0);
+ }
+
+ /* HW errata - mac loopback fails in some cases on 5780.
+ * Normal traffic and PHY loopback are not affected by
+ * errata. Also, the MAC loopback test is deprecated for
+ * all newer ASIC revisions.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
+ !tg3_flag(tp, CPMU_PRESENT)) {
+ tg3_mac_loopback(tp, true);
+
+ if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
+ data[0] |= TG3_STD_LOOPBACK_FAILED;
+
+ if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
+ tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
+ data[0] |= TG3_JMB_LOOPBACK_FAILED;
+
+ tg3_mac_loopback(tp, false);
+ }
+
+ if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+ !tg3_flag(tp, USE_PHYLIB)) {
+ int i;
+
+ tg3_phy_lpbk_set(tp, 0, false);
+
+ /* Wait for link */
+ for (i = 0; i < 100; i++) {
+ if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+ break;
+ mdelay(1);
+ }
+
+ if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
+ data[1] |= TG3_STD_LOOPBACK_FAILED;
+ if (tg3_flag(tp, TSO_CAPABLE) &&
+ tg3_run_loopback(tp, ETH_FRAME_LEN, true))
+ data[1] |= TG3_TSO_LOOPBACK_FAILED;
+ if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
+ tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
+ data[1] |= TG3_JMB_LOOPBACK_FAILED;
+
+ if (do_extlpbk) {
+ tg3_phy_lpbk_set(tp, 0, true);
+
+ /* All link indications report up, but the hardware
+ * isn't really ready for about 20 msec. Double it
+ * to be sure.
+ */
+ mdelay(40);
+
+ if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
+ data[2] |= TG3_STD_LOOPBACK_FAILED;
+ if (tg3_flag(tp, TSO_CAPABLE) &&
+ tg3_run_loopback(tp, ETH_FRAME_LEN, true))
+ data[2] |= TG3_TSO_LOOPBACK_FAILED;
+ if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
+ tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
+ data[2] |= TG3_JMB_LOOPBACK_FAILED;
+ }
+
+ /* Re-enable gphy autopowerdown. */
+ if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
+ tg3_phy_toggle_apd(tp, true);
+ }
+
+ err = (data[0] | data[1] | data[2]) ? -EIO : 0;
+
+done:
+ tp->phy_flags |= eee_cap;
+
+ return err;
+}
+
+static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
+ u64 *data)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
+
+ if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
+ tg3_power_up(tp)) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
+ return;
+ }
+
+ memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
+
+ if (tg3_test_nvram(tp) != 0) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ data[0] = 1;
+ }
+ if (!doextlpbk && tg3_test_link(tp)) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ data[1] = 1;
+ }
+ if (etest->flags & ETH_TEST_FL_OFFLINE) {
+ int err, err2 = 0, irq_sync = 0;
+
+ if (netif_running(dev)) {
+ tg3_phy_stop(tp);
+ tg3_netif_stop(tp);
+ irq_sync = 1;
+ }
+
+ tg3_full_lock(tp, irq_sync);
+
+ tg3_halt(tp, RESET_KIND_SUSPEND, 1);
+ err = tg3_nvram_lock(tp);
+ tg3_halt_cpu(tp, RX_CPU_BASE);
+ if (!tg3_flag(tp, 5705_PLUS))
+ tg3_halt_cpu(tp, TX_CPU_BASE);
+ if (!err)
+ tg3_nvram_unlock(tp);
+
+ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
+ tg3_phy_reset(tp);
+
+ if (tg3_test_registers(tp) != 0) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ data[2] = 1;
+ }
+
+ if (tg3_test_memory(tp) != 0) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ data[3] = 1;
+ }
+
+ if (doextlpbk)
+ etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+
+ if (tg3_test_loopback(tp, &data[4], doextlpbk))
+ etest->flags |= ETH_TEST_FL_FAILED;
+
+ tg3_full_unlock(tp);
+
+ if (tg3_test_interrupt(tp) != 0) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ data[7] = 1;
+ }
+
+ tg3_full_lock(tp, 0);
+
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ if (netif_running(dev)) {
+ tg3_flag_set(tp, INIT_COMPLETE);
+ err2 = tg3_restart_hw(tp, 1);
+ if (!err2)
+ tg3_netif_start(tp);
+ }
+
+ tg3_full_unlock(tp);
+
+ if (irq_sync && !err2)
+ tg3_phy_start(tp);
+ }
+ if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+ tg3_power_down(tp);
+
+}
+
+static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(ifr);
+ struct tg3 *tp = netdev_priv(dev);
+ int err;
+
+ if (tg3_flag(tp, USE_PHYLIB)) {
+ struct phy_device *phydev;
+ if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+ return -EAGAIN;
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ return phy_mii_ioctl(phydev, ifr, cmd);
+ }
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = tp->phy_addr;
+
+ /* fallthru */
+ case SIOCGMIIREG: {
+ u32 mii_regval;
+
+ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
+ break; /* We have no PHY */
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ spin_lock_bh(&tp->lock);
+ err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
+ spin_unlock_bh(&tp->lock);
+
+ data->val_out = mii_regval;
+
+ return err;
+ }
+
+ case SIOCSMIIREG:
+ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
+ break; /* We have no PHY */
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ spin_lock_bh(&tp->lock);
+ err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
+ spin_unlock_bh(&tp->lock);
+
+ return err;
+
+ default:
+ /* do nothing */
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ memcpy(ec, &tp->coal, sizeof(*ec));
+ return 0;
+}
+
+static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
+ u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
+
+ if (!tg3_flag(tp, 5705_PLUS)) {
+ max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
+ max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
+ max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
+ min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
+ }
+
+ if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
+ (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
+ (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
+ (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
+ (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
+ (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
+ (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
+ (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
+ (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
+ (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
+ return -EINVAL;
+
+ /* No rx interrupts will be generated if both are zero */
+ if ((ec->rx_coalesce_usecs == 0) &&
+ (ec->rx_max_coalesced_frames == 0))
+ return -EINVAL;
+
+ /* No tx interrupts will be generated if both are zero */
+ if ((ec->tx_coalesce_usecs == 0) &&
+ (ec->tx_max_coalesced_frames == 0))
+ return -EINVAL;
+
+ /* Only copy relevant parameters, ignore all others. */
+ tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
+ tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
+ tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
+ tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
+ tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
+ tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
+ tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
+ tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
+ tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
+
+ if (netif_running(dev)) {
+ tg3_full_lock(tp, 0);
+ __tg3_set_coalesce(tp, &tp->coal);
+ tg3_full_unlock(tp);
+ }
+ return 0;
+}
+
+static const struct ethtool_ops tg3_ethtool_ops = {
+ .get_settings = tg3_get_settings,
+ .set_settings = tg3_set_settings,
+ .get_drvinfo = tg3_get_drvinfo,
+ .get_regs_len = tg3_get_regs_len,
+ .get_regs = tg3_get_regs,
+ .get_wol = tg3_get_wol,
+ .set_wol = tg3_set_wol,
+ .get_msglevel = tg3_get_msglevel,
+ .set_msglevel = tg3_set_msglevel,
+ .nway_reset = tg3_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = tg3_get_eeprom_len,
+ .get_eeprom = tg3_get_eeprom,
+ .set_eeprom = tg3_set_eeprom,
+ .get_ringparam = tg3_get_ringparam,
+ .set_ringparam = tg3_set_ringparam,
+ .get_pauseparam = tg3_get_pauseparam,
+ .set_pauseparam = tg3_set_pauseparam,
+ .self_test = tg3_self_test,
+ .get_strings = tg3_get_strings,
+ .set_phys_id = tg3_set_phys_id,
+ .get_ethtool_stats = tg3_get_ethtool_stats,
+ .get_coalesce = tg3_get_coalesce,
+ .set_coalesce = tg3_set_coalesce,
+ .get_sset_count = tg3_get_sset_count,
+};
+
+static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
+{
+ u32 cursize, val, magic;
+
+ tp->nvram_size = EEPROM_CHIP_SIZE;
+
+ if (tg3_nvram_read(tp, 0, &magic) != 0)
+ return;
+
+ if ((magic != TG3_EEPROM_MAGIC) &&
+ ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
+ ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
+ return;
+
+ /*
+ * Size the chip by reading offsets at increasing powers of two.
+ * When we encounter our validation signature, we know the addressing
+ * has wrapped around, and thus have our chip size.
+ */
+ cursize = 0x10;
+
+ while (cursize < tp->nvram_size) {
+ if (tg3_nvram_read(tp, cursize, &val) != 0)
+ return;
+
+ if (val == magic)
+ break;
+
+ cursize <<= 1;
+ }
+
+ tp->nvram_size = cursize;
+}
+
+static void __devinit tg3_get_nvram_size(struct tg3 *tp)
+{
+ u32 val;
+
+ if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
+ return;
+
+ /* Selfboot format */
+ if (val != TG3_EEPROM_MAGIC) {
+ tg3_get_eeprom_size(tp);
+ return;
+ }
+
+ if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
+ if (val != 0) {
+ /* This is confusing. We want to operate on the
+ * 16-bit value at offset 0xf2. The tg3_nvram_read()
+ * call will read from NVRAM and byteswap the data
+ * according to the byteswapping settings for all
+ * other register accesses. This ensures the data we
+ * want will always reside in the lower 16-bits.
+ * However, the data in NVRAM is in LE format, which
+ * means the data from the NVRAM read will always be
+ * opposite the endianness of the CPU. The 16-bit
+ * byteswap then brings the data to CPU endianness.
+ */
+ tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
+ return;
+ }
+ }
+ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+}
+
+static void __devinit tg3_get_nvram_info(struct tg3 *tp)
+{
+ u32 nvcfg1;
+
+ nvcfg1 = tr32(NVRAM_CFG1);
+ if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
+ tg3_flag_set(tp, FLASH);
+ } else {
+ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+ tw32(NVRAM_CFG1, nvcfg1);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+ tg3_flag(tp, 5780_CLASS)) {
+ switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
+ case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ break;
+ case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
+ break;
+ case FLASH_VENDOR_ATMEL_EEPROM:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ break;
+ case FLASH_VENDOR_ST:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ break;
+ case FLASH_VENDOR_SAIFUN:
+ tp->nvram_jedecnum = JEDEC_SAIFUN;
+ tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
+ break;
+ case FLASH_VENDOR_SST_SMALL:
+ case FLASH_VENDOR_SST_LARGE:
+ tp->nvram_jedecnum = JEDEC_SST;
+ tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
+ break;
+ }
+ } else {
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ }
+}
+
+static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
+{
+ switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
+ case FLASH_5752PAGE_SIZE_256:
+ tp->nvram_pagesize = 256;
+ break;
+ case FLASH_5752PAGE_SIZE_512:
+ tp->nvram_pagesize = 512;
+ break;
+ case FLASH_5752PAGE_SIZE_1K:
+ tp->nvram_pagesize = 1024;
+ break;
+ case FLASH_5752PAGE_SIZE_2K:
+ tp->nvram_pagesize = 2048;
+ break;
+ case FLASH_5752PAGE_SIZE_4K:
+ tp->nvram_pagesize = 4096;
+ break;
+ case FLASH_5752PAGE_SIZE_264:
+ tp->nvram_pagesize = 264;
+ break;
+ case FLASH_5752PAGE_SIZE_528:
+ tp->nvram_pagesize = 528;
+ break;
+ }
+}
+
+static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
+{
+ u32 nvcfg1;
+
+ nvcfg1 = tr32(NVRAM_CFG1);
+
+ /* NVRAM protection for TPM */
+ if (nvcfg1 & (1 << 27))
+ tg3_flag_set(tp, PROTECTED_NVRAM);
+
+ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+ case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
+ case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ break;
+ case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+ break;
+ case FLASH_5752VENDOR_ST_M45PE10:
+ case FLASH_5752VENDOR_ST_M45PE20:
+ case FLASH_5752VENDOR_ST_M45PE40:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+ break;
+ }
+
+ if (tg3_flag(tp, FLASH)) {
+ tg3_nvram_get_pagesize(tp, nvcfg1);
+ } else {
+ /* For eeprom, set pagesize to maximum eeprom size */
+ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+
+ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+ tw32(NVRAM_CFG1, nvcfg1);
+ }
+}
+
+static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
+{
+ u32 nvcfg1, protect = 0;
+
+ nvcfg1 = tr32(NVRAM_CFG1);
+
+ /* NVRAM protection for TPM */
+ if (nvcfg1 & (1 << 27)) {
+ tg3_flag_set(tp, PROTECTED_NVRAM);
+ protect = 1;
+ }
+
+ nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
+ switch (nvcfg1) {
+ case FLASH_5755VENDOR_ATMEL_FLASH_1:
+ case FLASH_5755VENDOR_ATMEL_FLASH_2:
+ case FLASH_5755VENDOR_ATMEL_FLASH_3:
+ case FLASH_5755VENDOR_ATMEL_FLASH_5:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+ tp->nvram_pagesize = 264;
+ if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
+ nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
+ tp->nvram_size = (protect ? 0x3e200 :
+ TG3_NVRAM_SIZE_512KB);
+ else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
+ tp->nvram_size = (protect ? 0x1f200 :
+ TG3_NVRAM_SIZE_256KB);
+ else
+ tp->nvram_size = (protect ? 0x1f200 :
+ TG3_NVRAM_SIZE_128KB);
+ break;
+ case FLASH_5752VENDOR_ST_M45PE10:
+ case FLASH_5752VENDOR_ST_M45PE20:
+ case FLASH_5752VENDOR_ST_M45PE40:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+ tp->nvram_pagesize = 256;
+ if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
+ tp->nvram_size = (protect ?
+ TG3_NVRAM_SIZE_64KB :
+ TG3_NVRAM_SIZE_128KB);
+ else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
+ tp->nvram_size = (protect ?
+ TG3_NVRAM_SIZE_64KB :
+ TG3_NVRAM_SIZE_256KB);
+ else
+ tp->nvram_size = (protect ?
+ TG3_NVRAM_SIZE_128KB :
+ TG3_NVRAM_SIZE_512KB);
+ break;
+ }
+}
+
+static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
+{
+ u32 nvcfg1;
+
+ nvcfg1 = tr32(NVRAM_CFG1);
+
+ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+ case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
+ case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
+ case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
+ case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+
+ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+ tw32(NVRAM_CFG1, nvcfg1);
+ break;
+ case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+ case FLASH_5755VENDOR_ATMEL_FLASH_1:
+ case FLASH_5755VENDOR_ATMEL_FLASH_2:
+ case FLASH_5755VENDOR_ATMEL_FLASH_3:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+ tp->nvram_pagesize = 264;
+ break;
+ case FLASH_5752VENDOR_ST_M45PE10:
+ case FLASH_5752VENDOR_ST_M45PE20:
+ case FLASH_5752VENDOR_ST_M45PE40:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+ tp->nvram_pagesize = 256;
+ break;
+ }
+}
+
+static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
+{
+ u32 nvcfg1, protect = 0;
+
+ nvcfg1 = tr32(NVRAM_CFG1);
+
+ /* NVRAM protection for TPM */
+ if (nvcfg1 & (1 << 27)) {
+ tg3_flag_set(tp, PROTECTED_NVRAM);
+ protect = 1;
+ }
+
+ nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
+ switch (nvcfg1) {
+ case FLASH_5761VENDOR_ATMEL_ADB021D:
+ case FLASH_5761VENDOR_ATMEL_ADB041D:
+ case FLASH_5761VENDOR_ATMEL_ADB081D:
+ case FLASH_5761VENDOR_ATMEL_ADB161D:
+ case FLASH_5761VENDOR_ATMEL_MDB021D:
+ case FLASH_5761VENDOR_ATMEL_MDB041D:
+ case FLASH_5761VENDOR_ATMEL_MDB081D:
+ case FLASH_5761VENDOR_ATMEL_MDB161D:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+ tp->nvram_pagesize = 256;
+ break;
+ case FLASH_5761VENDOR_ST_A_M45PE20:
+ case FLASH_5761VENDOR_ST_A_M45PE40:
+ case FLASH_5761VENDOR_ST_A_M45PE80:
+ case FLASH_5761VENDOR_ST_A_M45PE16:
+ case FLASH_5761VENDOR_ST_M_M45PE20:
+ case FLASH_5761VENDOR_ST_M_M45PE40:
+ case FLASH_5761VENDOR_ST_M_M45PE80:
+ case FLASH_5761VENDOR_ST_M_M45PE16:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+ tp->nvram_pagesize = 256;
+ break;
+ }
+
+ if (protect) {
+ tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
+ } else {
+ switch (nvcfg1) {
+ case FLASH_5761VENDOR_ATMEL_ADB161D:
+ case FLASH_5761VENDOR_ATMEL_MDB161D:
+ case FLASH_5761VENDOR_ST_A_M45PE16:
+ case FLASH_5761VENDOR_ST_M_M45PE16:
+ tp->nvram_size = TG3_NVRAM_SIZE_2MB;
+ break;
+ case FLASH_5761VENDOR_ATMEL_ADB081D:
+ case FLASH_5761VENDOR_ATMEL_MDB081D:
+ case FLASH_5761VENDOR_ST_A_M45PE80:
+ case FLASH_5761VENDOR_ST_M_M45PE80:
+ tp->nvram_size = TG3_NVRAM_SIZE_1MB;
+ break;
+ case FLASH_5761VENDOR_ATMEL_ADB041D:
+ case FLASH_5761VENDOR_ATMEL_MDB041D:
+ case FLASH_5761VENDOR_ST_A_M45PE40:
+ case FLASH_5761VENDOR_ST_M_M45PE40:
+ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+ break;
+ case FLASH_5761VENDOR_ATMEL_ADB021D:
+ case FLASH_5761VENDOR_ATMEL_MDB021D:
+ case FLASH_5761VENDOR_ST_A_M45PE20:
+ case FLASH_5761VENDOR_ST_M_M45PE20:
+ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+ break;
+ }
+ }
+}
+
+static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
+{
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+}
+
+static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
+{
+ u32 nvcfg1;
+
+ nvcfg1 = tr32(NVRAM_CFG1);
+
+ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+ case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
+ case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+
+ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+ tw32(NVRAM_CFG1, nvcfg1);
+ return;
+ case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+ case FLASH_57780VENDOR_ATMEL_AT45DB011D:
+ case FLASH_57780VENDOR_ATMEL_AT45DB011B:
+ case FLASH_57780VENDOR_ATMEL_AT45DB021D:
+ case FLASH_57780VENDOR_ATMEL_AT45DB021B:
+ case FLASH_57780VENDOR_ATMEL_AT45DB041D:
+ case FLASH_57780VENDOR_ATMEL_AT45DB041B:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+
+ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+ case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+ case FLASH_57780VENDOR_ATMEL_AT45DB011D:
+ case FLASH_57780VENDOR_ATMEL_AT45DB011B:
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ break;
+ case FLASH_57780VENDOR_ATMEL_AT45DB021D:
+ case FLASH_57780VENDOR_ATMEL_AT45DB021B:
+ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+ break;
+ case FLASH_57780VENDOR_ATMEL_AT45DB041D:
+ case FLASH_57780VENDOR_ATMEL_AT45DB041B:
+ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+ break;
+ }
+ break;
+ case FLASH_5752VENDOR_ST_M45PE10:
+ case FLASH_5752VENDOR_ST_M45PE20:
+ case FLASH_5752VENDOR_ST_M45PE40:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+
+ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+ case FLASH_5752VENDOR_ST_M45PE10:
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ break;
+ case FLASH_5752VENDOR_ST_M45PE20:
+ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+ break;
+ case FLASH_5752VENDOR_ST_M45PE40:
+ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+ break;
+ }
+ break;
+ default:
+ tg3_flag_set(tp, NO_NVRAM);
+ return;
+ }
+
+ tg3_nvram_get_pagesize(tp, nvcfg1);
+ if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
+ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+}
+
+
+static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
+{
+ u32 nvcfg1;
+
+ nvcfg1 = tr32(NVRAM_CFG1);
+
+ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+ case FLASH_5717VENDOR_ATMEL_EEPROM:
+ case FLASH_5717VENDOR_MICRO_EEPROM:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+
+ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+ tw32(NVRAM_CFG1, nvcfg1);
+ return;
+ case FLASH_5717VENDOR_ATMEL_MDB011D:
+ case FLASH_5717VENDOR_ATMEL_ADB011B:
+ case FLASH_5717VENDOR_ATMEL_ADB011D:
+ case FLASH_5717VENDOR_ATMEL_MDB021D:
+ case FLASH_5717VENDOR_ATMEL_ADB021B:
+ case FLASH_5717VENDOR_ATMEL_ADB021D:
+ case FLASH_5717VENDOR_ATMEL_45USPT:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+
+ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+ case FLASH_5717VENDOR_ATMEL_MDB021D:
+ /* Detect size with tg3_nvram_get_size() */
+ break;
+ case FLASH_5717VENDOR_ATMEL_ADB021B:
+ case FLASH_5717VENDOR_ATMEL_ADB021D:
+ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+ break;
+ default:
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ break;
+ }
+ break;
+ case FLASH_5717VENDOR_ST_M_M25PE10:
+ case FLASH_5717VENDOR_ST_A_M25PE10:
+ case FLASH_5717VENDOR_ST_M_M45PE10:
+ case FLASH_5717VENDOR_ST_A_M45PE10:
+ case FLASH_5717VENDOR_ST_M_M25PE20:
+ case FLASH_5717VENDOR_ST_A_M25PE20:
+ case FLASH_5717VENDOR_ST_M_M45PE20:
+ case FLASH_5717VENDOR_ST_A_M45PE20:
+ case FLASH_5717VENDOR_ST_25USPT:
+ case FLASH_5717VENDOR_ST_45USPT:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+
+ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+ case FLASH_5717VENDOR_ST_M_M25PE20:
+ case FLASH_5717VENDOR_ST_M_M45PE20:
+ /* Detect size with tg3_nvram_get_size() */
+ break;
+ case FLASH_5717VENDOR_ST_A_M25PE20:
+ case FLASH_5717VENDOR_ST_A_M45PE20:
+ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+ break;
+ default:
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ break;
+ }
+ break;
+ default:
+ tg3_flag_set(tp, NO_NVRAM);
+ return;
+ }
+
+ tg3_nvram_get_pagesize(tp, nvcfg1);
+ if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
+ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+}
+
+static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
+{
+ u32 nvcfg1, nvmpinstrp;
+
+ nvcfg1 = tr32(NVRAM_CFG1);
+ nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
+
+ switch (nvmpinstrp) {
+ case FLASH_5720_EEPROM_HD:
+ case FLASH_5720_EEPROM_LD:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+
+ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+ tw32(NVRAM_CFG1, nvcfg1);
+ if (nvmpinstrp == FLASH_5720_EEPROM_HD)
+ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+ else
+ tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
+ return;
+ case FLASH_5720VENDOR_M_ATMEL_DB011D:
+ case FLASH_5720VENDOR_A_ATMEL_DB011B:
+ case FLASH_5720VENDOR_A_ATMEL_DB011D:
+ case FLASH_5720VENDOR_M_ATMEL_DB021D:
+ case FLASH_5720VENDOR_A_ATMEL_DB021B:
+ case FLASH_5720VENDOR_A_ATMEL_DB021D:
+ case FLASH_5720VENDOR_M_ATMEL_DB041D:
+ case FLASH_5720VENDOR_A_ATMEL_DB041B:
+ case FLASH_5720VENDOR_A_ATMEL_DB041D:
+ case FLASH_5720VENDOR_M_ATMEL_DB081D:
+ case FLASH_5720VENDOR_A_ATMEL_DB081D:
+ case FLASH_5720VENDOR_ATMEL_45USPT:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+
+ switch (nvmpinstrp) {
+ case FLASH_5720VENDOR_M_ATMEL_DB021D:
+ case FLASH_5720VENDOR_A_ATMEL_DB021B:
+ case FLASH_5720VENDOR_A_ATMEL_DB021D:
+ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+ break;
+ case FLASH_5720VENDOR_M_ATMEL_DB041D:
+ case FLASH_5720VENDOR_A_ATMEL_DB041B:
+ case FLASH_5720VENDOR_A_ATMEL_DB041D:
+ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+ break;
+ case FLASH_5720VENDOR_M_ATMEL_DB081D:
+ case FLASH_5720VENDOR_A_ATMEL_DB081D:
+ tp->nvram_size = TG3_NVRAM_SIZE_1MB;
+ break;
+ default:
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ break;
+ }
+ break;
+ case FLASH_5720VENDOR_M_ST_M25PE10:
+ case FLASH_5720VENDOR_M_ST_M45PE10:
+ case FLASH_5720VENDOR_A_ST_M25PE10:
+ case FLASH_5720VENDOR_A_ST_M45PE10:
+ case FLASH_5720VENDOR_M_ST_M25PE20:
+ case FLASH_5720VENDOR_M_ST_M45PE20:
+ case FLASH_5720VENDOR_A_ST_M25PE20:
+ case FLASH_5720VENDOR_A_ST_M45PE20:
+ case FLASH_5720VENDOR_M_ST_M25PE40:
+ case FLASH_5720VENDOR_M_ST_M45PE40:
+ case FLASH_5720VENDOR_A_ST_M25PE40:
+ case FLASH_5720VENDOR_A_ST_M45PE40:
+ case FLASH_5720VENDOR_M_ST_M25PE80:
+ case FLASH_5720VENDOR_M_ST_M45PE80:
+ case FLASH_5720VENDOR_A_ST_M25PE80:
+ case FLASH_5720VENDOR_A_ST_M45PE80:
+ case FLASH_5720VENDOR_ST_25USPT:
+ case FLASH_5720VENDOR_ST_45USPT:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+
+ switch (nvmpinstrp) {
+ case FLASH_5720VENDOR_M_ST_M25PE20:
+ case FLASH_5720VENDOR_M_ST_M45PE20:
+ case FLASH_5720VENDOR_A_ST_M25PE20:
+ case FLASH_5720VENDOR_A_ST_M45PE20:
+ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+ break;
+ case FLASH_5720VENDOR_M_ST_M25PE40:
+ case FLASH_5720VENDOR_M_ST_M45PE40:
+ case FLASH_5720VENDOR_A_ST_M25PE40:
+ case FLASH_5720VENDOR_A_ST_M45PE40:
+ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+ break;
+ case FLASH_5720VENDOR_M_ST_M25PE80:
+ case FLASH_5720VENDOR_M_ST_M45PE80:
+ case FLASH_5720VENDOR_A_ST_M25PE80:
+ case FLASH_5720VENDOR_A_ST_M45PE80:
+ tp->nvram_size = TG3_NVRAM_SIZE_1MB;
+ break;
+ default:
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ break;
+ }
+ break;
+ default:
+ tg3_flag_set(tp, NO_NVRAM);
+ return;
+ }
+
+ tg3_nvram_get_pagesize(tp, nvcfg1);
+ if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
+ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+}
+
+/* Chips other than 5700/5701 use the NVRAM for fetching info. */
+static void __devinit tg3_nvram_init(struct tg3 *tp)
+{
+ tw32_f(GRC_EEPROM_ADDR,
+ (EEPROM_ADDR_FSM_RESET |
+ (EEPROM_DEFAULT_CLOCK_PERIOD <<
+ EEPROM_ADDR_CLKPERD_SHIFT)));
+
+ msleep(1);
+
+ /* Enable seeprom accesses. */
+ tw32_f(GRC_LOCAL_CTRL,
+ tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
+ udelay(100);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
+ tg3_flag_set(tp, NVRAM);
+
+ if (tg3_nvram_lock(tp)) {
+ netdev_warn(tp->dev,
+ "Cannot get nvram lock, %s failed\n",
+ __func__);
+ return;
+ }
+ tg3_enable_nvram_access(tp);
+
+ tp->nvram_size = 0;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ tg3_get_5752_nvram_info(tp);
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+ tg3_get_5755_nvram_info(tp);
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ tg3_get_5787_nvram_info(tp);
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ tg3_get_5761_nvram_info(tp);
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tg3_get_5906_nvram_info(tp);
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_get_57780_nvram_info(tp);
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ tg3_get_5717_nvram_info(tp);
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ tg3_get_5720_nvram_info(tp);
+ else
+ tg3_get_nvram_info(tp);
+
+ if (tp->nvram_size == 0)
+ tg3_get_nvram_size(tp);
+
+ tg3_disable_nvram_access(tp);
+ tg3_nvram_unlock(tp);
+
+ } else {
+ tg3_flag_clear(tp, NVRAM);
+ tg3_flag_clear(tp, NVRAM_BUFFERED);
+
+ tg3_get_eeprom_size(tp);
+ }
+}
+
+static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
+ u32 offset, u32 len, u8 *buf)
+{
+ int i, j, rc = 0;
+ u32 val;
+
+ for (i = 0; i < len; i += 4) {
+ u32 addr;
+ __be32 data;
+
+ addr = offset + i;
+
+ memcpy(&data, buf + i, 4);
+
+ /*
+ * The SEEPROM interface expects the data to always be opposite
+ * the native endian format. We accomplish this by reversing
+ * all the operations that would have been performed on the
+ * data from a call to tg3_nvram_read_be32().
+ */
+ tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
+
+ val = tr32(GRC_EEPROM_ADDR);
+ tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
+
+ val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
+ EEPROM_ADDR_READ);
+ tw32(GRC_EEPROM_ADDR, val |
+ (0 << EEPROM_ADDR_DEVID_SHIFT) |
+ (addr & EEPROM_ADDR_ADDR_MASK) |
+ EEPROM_ADDR_START |
+ EEPROM_ADDR_WRITE);
+
+ for (j = 0; j < 1000; j++) {
+ val = tr32(GRC_EEPROM_ADDR);
+
+ if (val & EEPROM_ADDR_COMPLETE)
+ break;
+ msleep(1);
+ }
+ if (!(val & EEPROM_ADDR_COMPLETE)) {
+ rc = -EBUSY;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/* offset and length are dword aligned */
+static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
+ u8 *buf)
+{
+ int ret = 0;
+ u32 pagesize = tp->nvram_pagesize;
+ u32 pagemask = pagesize - 1;
+ u32 nvram_cmd;
+ u8 *tmp;
+
+ tmp = kmalloc(pagesize, GFP_KERNEL);
+ if (tmp == NULL)
+ return -ENOMEM;
+
+ while (len) {
+ int j;
+ u32 phy_addr, page_off, size;
+
+ phy_addr = offset & ~pagemask;
+
+ for (j = 0; j < pagesize; j += 4) {
+ ret = tg3_nvram_read_be32(tp, phy_addr + j,
+ (__be32 *) (tmp + j));
+ if (ret)
+ break;
+ }
+ if (ret)
+ break;
+
+ page_off = offset & pagemask;
+ size = pagesize;
+ if (len < size)
+ size = len;
+
+ len -= size;
+
+ memcpy(tmp + page_off, buf, size);
+
+ offset = offset + (pagesize - page_off);
+
+ tg3_enable_nvram_access(tp);
+
+ /*
+ * Before we can erase the flash page, we need
+ * to issue a special "write enable" command.
+ */
+ nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+
+ if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+ break;
+
+ /* Erase the target page */
+ tw32(NVRAM_ADDR, phy_addr);
+
+ nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
+ NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
+
+ if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+ break;
+
+ /* Issue another write enable to start the write. */
+ nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+
+ if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+ break;
+
+ for (j = 0; j < pagesize; j += 4) {
+ __be32 data;
+
+ data = *((__be32 *) (tmp + j));
+
+ tw32(NVRAM_WRDATA, be32_to_cpu(data));
+
+ tw32(NVRAM_ADDR, phy_addr + j);
+
+ nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
+ NVRAM_CMD_WR;
+
+ if (j == 0)
+ nvram_cmd |= NVRAM_CMD_FIRST;
+ else if (j == (pagesize - 4))
+ nvram_cmd |= NVRAM_CMD_LAST;
+
+ if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
+ break;
+ }
+ if (ret)
+ break;
+ }
+
+ nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+ tg3_nvram_exec_cmd(tp, nvram_cmd);
+
+ kfree(tmp);
+
+ return ret;
+}
+
+/* offset and length are dword aligned */
+static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
+ u8 *buf)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < len; i += 4, offset += 4) {
+ u32 page_off, phy_addr, nvram_cmd;
+ __be32 data;
+
+ memcpy(&data, buf + i, 4);
+ tw32(NVRAM_WRDATA, be32_to_cpu(data));
+
+ page_off = offset % tp->nvram_pagesize;
+
+ phy_addr = tg3_nvram_phys_addr(tp, offset);
+
+ tw32(NVRAM_ADDR, phy_addr);
+
+ nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
+
+ if (page_off == 0 || i == 0)
+ nvram_cmd |= NVRAM_CMD_FIRST;
+ if (page_off == (tp->nvram_pagesize - 4))
+ nvram_cmd |= NVRAM_CMD_LAST;
+
+ if (i == (len - 4))
+ nvram_cmd |= NVRAM_CMD_LAST;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
+ !tg3_flag(tp, 5755_PLUS) &&
+ (tp->nvram_jedecnum == JEDEC_ST) &&
+ (nvram_cmd & NVRAM_CMD_FIRST)) {
+
+ if ((ret = tg3_nvram_exec_cmd(tp,
+ NVRAM_CMD_WREN | NVRAM_CMD_GO |
+ NVRAM_CMD_DONE)))
+
+ break;
+ }
+ if (!tg3_flag(tp, FLASH)) {
+ /* We always do complete word writes to eeprom. */
+ nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
+ }
+
+ if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
+ break;
+ }
+ return ret;
+}
+
+/* offset and length are dword aligned */
+static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
+{
+ int ret;
+
+ if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
+ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
+ ~GRC_LCLCTRL_GPIO_OUTPUT1);
+ udelay(40);
+ }
+
+ if (!tg3_flag(tp, NVRAM)) {
+ ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
+ } else {
+ u32 grc_mode;
+
+ ret = tg3_nvram_lock(tp);
+ if (ret)
+ return ret;
+
+ tg3_enable_nvram_access(tp);
+ if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
+ tw32(NVRAM_WRITE1, 0x406);
+
+ grc_mode = tr32(GRC_MODE);
+ tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
+
+ if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
+ ret = tg3_nvram_write_block_buffered(tp, offset, len,
+ buf);
+ } else {
+ ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
+ buf);
+ }
+
+ grc_mode = tr32(GRC_MODE);
+ tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
+
+ tg3_disable_nvram_access(tp);
+ tg3_nvram_unlock(tp);
+ }
+
+ if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
+ tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
+ udelay(40);
+ }
+
+ return ret;
+}
+
+struct subsys_tbl_ent {
+ u16 subsys_vendor, subsys_devid;
+ u32 phy_id;
+};
+
+static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
+ /* Broadcom boards. */
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
+
+ /* 3com boards. */
+ { TG3PCI_SUBVENDOR_ID_3COM,
+ TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
+ { TG3PCI_SUBVENDOR_ID_3COM,
+ TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_3COM,
+ TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
+ { TG3PCI_SUBVENDOR_ID_3COM,
+ TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_3COM,
+ TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
+
+ /* DELL boards. */
+ { TG3PCI_SUBVENDOR_ID_DELL,
+ TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
+ { TG3PCI_SUBVENDOR_ID_DELL,
+ TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
+ { TG3PCI_SUBVENDOR_ID_DELL,
+ TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
+ { TG3PCI_SUBVENDOR_ID_DELL,
+ TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
+
+ /* Compaq boards. */
+ { TG3PCI_SUBVENDOR_ID_COMPAQ,
+ TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_COMPAQ,
+ TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_COMPAQ,
+ TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
+ { TG3PCI_SUBVENDOR_ID_COMPAQ,
+ TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_COMPAQ,
+ TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
+
+ /* IBM boards. */
+ { TG3PCI_SUBVENDOR_ID_IBM,
+ TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
+};
+
+static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
+ if ((subsys_id_to_phy_id[i].subsys_vendor ==
+ tp->pdev->subsystem_vendor) &&
+ (subsys_id_to_phy_id[i].subsys_devid ==
+ tp->pdev->subsystem_device))
+ return &subsys_id_to_phy_id[i];
+ }
+ return NULL;
+}
+
+static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
+{
+ u32 val;
+
+ tp->phy_id = TG3_PHY_ID_INVALID;
+ tp->led_ctrl = LED_CTRL_MODE_PHY_1;
+
+ /* Assume an onboard device and WOL capable by default. */
+ tg3_flag_set(tp, EEPROM_WRITE_PROT);
+ tg3_flag_set(tp, WOL_CAP);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
+ tg3_flag_clear(tp, EEPROM_WRITE_PROT);
+ tg3_flag_set(tp, IS_NIC);
+ }
+ val = tr32(VCPU_CFGSHDW);
+ if (val & VCPU_CFGSHDW_ASPM_DBNC)
+ tg3_flag_set(tp, ASPM_WORKAROUND);
+ if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
+ (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
+ tg3_flag_set(tp, WOL_ENABLE);
+ device_set_wakeup_enable(&tp->pdev->dev, true);
+ }
+ goto done;
+ }
+
+ tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
+ if (val == NIC_SRAM_DATA_SIG_MAGIC) {
+ u32 nic_cfg, led_cfg;
+ u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
+ int eeprom_phy_serdes = 0;
+
+ tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
+ tp->nic_sram_data_cfg = nic_cfg;
+
+ tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
+ ver >>= NIC_SRAM_DATA_VER_SHIFT;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
+ (ver > 0) && (ver < 0x100))
+ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
+
+ if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
+ NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
+ eeprom_phy_serdes = 1;
+
+ tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
+ if (nic_phy_id != 0) {
+ u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
+ u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
+
+ eeprom_phy_id = (id1 >> 16) << 10;
+ eeprom_phy_id |= (id2 & 0xfc00) << 16;
+ eeprom_phy_id |= (id2 & 0x03ff) << 0;
+ } else
+ eeprom_phy_id = 0;
+
+ tp->phy_id = eeprom_phy_id;
+ if (eeprom_phy_serdes) {
+ if (!tg3_flag(tp, 5705_PLUS))
+ tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
+ else
+ tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
+ }
+
+ if (tg3_flag(tp, 5750_PLUS))
+ led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
+ SHASTA_EXT_LED_MODE_MASK);
+ else
+ led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
+
+ switch (led_cfg) {
+ default:
+ case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
+ tp->led_ctrl = LED_CTRL_MODE_PHY_1;
+ break;
+
+ case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
+ tp->led_ctrl = LED_CTRL_MODE_PHY_2;
+ break;
+
+ case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
+ tp->led_ctrl = LED_CTRL_MODE_MAC;
+
+ /* Default to PHY_1_MODE if 0 (MAC_MODE) is
+ * read on some older 5700/5701 bootcode.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+ ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) ==
+ ASIC_REV_5701)
+ tp->led_ctrl = LED_CTRL_MODE_PHY_1;
+
+ break;
+
+ case SHASTA_EXT_LED_SHARED:
+ tp->led_ctrl = LED_CTRL_MODE_SHARED;
+ if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
+ tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
+ LED_CTRL_MODE_PHY_2);
+ break;
+
+ case SHASTA_EXT_LED_MAC:
+ tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
+ break;
+
+ case SHASTA_EXT_LED_COMBO:
+ tp->led_ctrl = LED_CTRL_MODE_COMBO;
+ if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
+ tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
+ LED_CTRL_MODE_PHY_2);
+ break;
+
+ }
+
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
+ tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
+ tp->led_ctrl = LED_CTRL_MODE_PHY_2;
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
+ tp->led_ctrl = LED_CTRL_MODE_PHY_1;
+
+ if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
+ tg3_flag_set(tp, EEPROM_WRITE_PROT);
+ if ((tp->pdev->subsystem_vendor ==
+ PCI_VENDOR_ID_ARIMA) &&
+ (tp->pdev->subsystem_device == 0x205a ||
+ tp->pdev->subsystem_device == 0x2063))
+ tg3_flag_clear(tp, EEPROM_WRITE_PROT);
+ } else {
+ tg3_flag_clear(tp, EEPROM_WRITE_PROT);
+ tg3_flag_set(tp, IS_NIC);
+ }
+
+ if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
+ tg3_flag_set(tp, ENABLE_ASF);
+ if (tg3_flag(tp, 5750_PLUS))
+ tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
+ }
+
+ if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
+ tg3_flag(tp, 5750_PLUS))
+ tg3_flag_set(tp, ENABLE_APE);
+
+ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
+ !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
+ tg3_flag_clear(tp, WOL_CAP);
+
+ if (tg3_flag(tp, WOL_CAP) &&
+ (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
+ tg3_flag_set(tp, WOL_ENABLE);
+ device_set_wakeup_enable(&tp->pdev->dev, true);
+ }
+
+ if (cfg2 & (1 << 17))
+ tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
+
+ /* serdes signal pre-emphasis in register 0x590 set by */
+ /* bootcode if bit 18 is set */
+ if (cfg2 & (1 << 18))
+ tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
+
+ if ((tg3_flag(tp, 57765_PLUS) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
+ (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
+ tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
+
+ if (tg3_flag(tp, PCI_EXPRESS) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+ !tg3_flag(tp, 57765_PLUS)) {
+ u32 cfg3;
+
+ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
+ if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
+ tg3_flag_set(tp, ASPM_WORKAROUND);
+ }
+
+ if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
+ tg3_flag_set(tp, RGMII_INBAND_DISABLE);
+ if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
+ tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
+ if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
+ tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
+ }
+done:
+ if (tg3_flag(tp, WOL_CAP))
+ device_set_wakeup_enable(&tp->pdev->dev,
+ tg3_flag(tp, WOL_ENABLE));
+ else
+ device_set_wakeup_capable(&tp->pdev->dev, false);
+}
+
+static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
+{
+ int i;
+ u32 val;
+
+ tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
+ tw32(OTP_CTRL, cmd);
+
+ /* Wait for up to 1 ms for command to execute. */
+ for (i = 0; i < 100; i++) {
+ val = tr32(OTP_STATUS);
+ if (val & OTP_STATUS_CMD_DONE)
+ break;
+ udelay(10);
+ }
+
+ return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
+}
+
+/* Read the gphy configuration from the OTP region of the chip. The gphy
+ * configuration is a 32-bit value that straddles the alignment boundary.
+ * We do two 32-bit reads and then shift and merge the results.
+ */
+static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
+{
+ u32 bhalf_otp, thalf_otp;
+
+ tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
+
+ if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
+ return 0;
+
+ tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
+
+ if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
+ return 0;
+
+ thalf_otp = tr32(OTP_READ_DATA);
+
+ tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
+
+ if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
+ return 0;
+
+ bhalf_otp = tr32(OTP_READ_DATA);
+
+ return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
+}
+
+static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
+{
+ u32 adv = ADVERTISED_Autoneg |
+ ADVERTISED_Pause;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
+ adv |= ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+ adv |= ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_TP;
+ else
+ adv |= ADVERTISED_FIBRE;
+
+ tp->link_config.advertising = adv;
+ tp->link_config.speed = SPEED_INVALID;
+ tp->link_config.duplex = DUPLEX_INVALID;
+ tp->link_config.autoneg = AUTONEG_ENABLE;
+ tp->link_config.active_speed = SPEED_INVALID;
+ tp->link_config.active_duplex = DUPLEX_INVALID;
+ tp->link_config.orig_speed = SPEED_INVALID;
+ tp->link_config.orig_duplex = DUPLEX_INVALID;
+ tp->link_config.orig_autoneg = AUTONEG_INVALID;
+}
+
+static int __devinit tg3_phy_probe(struct tg3 *tp)
+{
+ u32 hw_phy_id_1, hw_phy_id_2;
+ u32 hw_phy_id, hw_phy_id_masked;
+ int err;
+
+ /* flow control autonegotiation is default behavior */
+ tg3_flag_set(tp, PAUSE_AUTONEG);
+ tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
+
+ if (tg3_flag(tp, USE_PHYLIB))
+ return tg3_phy_init(tp);
+
+ /* Reading the PHY ID register can conflict with ASF
+ * firmware access to the PHY hardware.
+ */
+ err = 0;
+ if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
+ hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
+ } else {
+ /* Now read the physical PHY_ID from the chip and verify
+ * that it is sane. If it doesn't look good, we fall back
+ * to either the hard-coded table based PHY_ID and failing
+ * that the value found in the eeprom area.
+ */
+ err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
+ err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
+
+ hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
+ hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
+ hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
+
+ hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
+ }
+
+ if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
+ tp->phy_id = hw_phy_id;
+ if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
+ tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
+ else
+ tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
+ } else {
+ if (tp->phy_id != TG3_PHY_ID_INVALID) {
+ /* Do nothing, phy ID already set up in
+ * tg3_get_eeprom_hw_cfg().
+ */
+ } else {
+ struct subsys_tbl_ent *p;
+
+ /* No eeprom signature? Try the hardcoded
+ * subsys device table.
+ */
+ p = tg3_lookup_by_subsys(tp);
+ if (!p)
+ return -ENODEV;
+
+ tp->phy_id = p->phy_id;
+ if (!tp->phy_id ||
+ tp->phy_id == TG3_PHY_ID_BCM8002)
+ tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
+ }
+ }
+
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
+ (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
+ tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
+
+ tg3_phy_init_link_config(tp);
+
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
+ !tg3_flag(tp, ENABLE_APE) &&
+ !tg3_flag(tp, ENABLE_ASF)) {
+ u32 bmsr, mask;
+
+ tg3_readphy(tp, MII_BMSR, &bmsr);
+ if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+ (bmsr & BMSR_LSTATUS))
+ goto skip_phy_reset;
+
+ err = tg3_phy_reset(tp);
+ if (err)
+ return err;
+
+ tg3_phy_set_wirespeed(tp);
+
+ mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
+ if (!tg3_copper_is_advertising_all(tp, mask)) {
+ tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
+ tp->link_config.flowctrl);
+
+ tg3_writephy(tp, MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+ }
+ }
+
+skip_phy_reset:
+ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
+ err = tg3_init_5401phy_dsp(tp);
+ if (err)
+ return err;
+
+ err = tg3_init_5401phy_dsp(tp);
+ }
+
+ return err;
+}
+
+static void __devinit tg3_read_vpd(struct tg3 *tp)
+{
+ u8 *vpd_data;
+ unsigned int block_end, rosize, len;
+ u32 vpdlen;
+ int j, i = 0;
+
+ vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
+ if (!vpd_data)
+ goto out_no_vpd;
+
+ i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
+ if (i < 0)
+ goto out_not_found;
+
+ rosize = pci_vpd_lrdt_size(&vpd_data[i]);
+ block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
+ i += PCI_VPD_LRDT_TAG_SIZE;
+
+ if (block_end > vpdlen)
+ goto out_not_found;
+
+ j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+ PCI_VPD_RO_KEYWORD_MFR_ID);
+ if (j > 0) {
+ len = pci_vpd_info_field_size(&vpd_data[j]);
+
+ j += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (j + len > block_end || len != 4 ||
+ memcmp(&vpd_data[j], "1028", 4))
+ goto partno;
+
+ j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+ PCI_VPD_RO_KEYWORD_VENDOR0);
+ if (j < 0)
+ goto partno;
+
+ len = pci_vpd_info_field_size(&vpd_data[j]);
+
+ j += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (j + len > block_end)
+ goto partno;
+
+ memcpy(tp->fw_ver, &vpd_data[j], len);
+ strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
+ }
+
+partno:
+ i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+ PCI_VPD_RO_KEYWORD_PARTNO);
+ if (i < 0)
+ goto out_not_found;
+
+ len = pci_vpd_info_field_size(&vpd_data[i]);
+
+ i += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (len > TG3_BPN_SIZE ||
+ (len + i) > vpdlen)
+ goto out_not_found;
+
+ memcpy(tp->board_part_number, &vpd_data[i], len);
+
+out_not_found:
+ kfree(vpd_data);
+ if (tp->board_part_number[0])
+ return;
+
+out_no_vpd:
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
+ strcpy(tp->board_part_number, "BCM5717");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
+ strcpy(tp->board_part_number, "BCM5718");
+ else
+ goto nomatch;
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
+ strcpy(tp->board_part_number, "BCM57780");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
+ strcpy(tp->board_part_number, "BCM57760");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
+ strcpy(tp->board_part_number, "BCM57790");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
+ strcpy(tp->board_part_number, "BCM57788");
+ else
+ goto nomatch;
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
+ strcpy(tp->board_part_number, "BCM57761");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
+ strcpy(tp->board_part_number, "BCM57765");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
+ strcpy(tp->board_part_number, "BCM57781");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
+ strcpy(tp->board_part_number, "BCM57785");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
+ strcpy(tp->board_part_number, "BCM57791");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
+ strcpy(tp->board_part_number, "BCM57795");
+ else
+ goto nomatch;
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ strcpy(tp->board_part_number, "BCM95906");
+ } else {
+nomatch:
+ strcpy(tp->board_part_number, "none");
+ }
+}
+
+static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
+{
+ u32 val;
+
+ if (tg3_nvram_read(tp, offset, &val) ||
+ (val & 0xfc000000) != 0x0c000000 ||
+ tg3_nvram_read(tp, offset + 4, &val) ||
+ val != 0)
+ return 0;
+
+ return 1;
+}
+
+static void __devinit tg3_read_bc_ver(struct tg3 *tp)
+{
+ u32 val, offset, start, ver_offset;
+ int i, dst_off;
+ bool newver = false;
+
+ if (tg3_nvram_read(tp, 0xc, &offset) ||
+ tg3_nvram_read(tp, 0x4, &start))
+ return;
+
+ offset = tg3_nvram_logical_addr(tp, offset);
+
+ if (tg3_nvram_read(tp, offset, &val))
+ return;
+
+ if ((val & 0xfc000000) == 0x0c000000) {
+ if (tg3_nvram_read(tp, offset + 4, &val))
+ return;
+
+ if (val == 0)
+ newver = true;
+ }
+
+ dst_off = strlen(tp->fw_ver);
+
+ if (newver) {
+ if (TG3_VER_SIZE - dst_off < 16 ||
+ tg3_nvram_read(tp, offset + 8, &ver_offset))
+ return;
+
+ offset = offset + ver_offset - start;
+ for (i = 0; i < 16; i += 4) {
+ __be32 v;
+ if (tg3_nvram_read_be32(tp, offset + i, &v))
+ return;
+
+ memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
+ }
+ } else {
+ u32 major, minor;
+
+ if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
+ return;
+
+ major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
+ TG3_NVM_BCVER_MAJSFT;
+ minor = ver_offset & TG3_NVM_BCVER_MINMSK;
+ snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
+ "v%d.%02d", major, minor);
+ }
+}
+
+static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
+{
+ u32 val, major, minor;
+
+ /* Use native endian representation */
+ if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
+ return;
+
+ major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
+ TG3_NVM_HWSB_CFG1_MAJSFT;
+ minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
+ TG3_NVM_HWSB_CFG1_MINSFT;
+
+ snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
+}
+
+static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
+{
+ u32 offset, major, minor, build;
+
+ strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
+
+ if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
+ return;
+
+ switch (val & TG3_EEPROM_SB_REVISION_MASK) {
+ case TG3_EEPROM_SB_REVISION_0:
+ offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
+ break;
+ case TG3_EEPROM_SB_REVISION_2:
+ offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
+ break;
+ case TG3_EEPROM_SB_REVISION_3:
+ offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
+ break;
+ case TG3_EEPROM_SB_REVISION_4:
+ offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
+ break;
+ case TG3_EEPROM_SB_REVISION_5:
+ offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
+ break;
+ case TG3_EEPROM_SB_REVISION_6:
+ offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
+ break;
+ default:
+ return;
+ }
+
+ if (tg3_nvram_read(tp, offset, &val))
+ return;
+
+ build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
+ TG3_EEPROM_SB_EDH_BLD_SHFT;
+ major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
+ TG3_EEPROM_SB_EDH_MAJ_SHFT;
+ minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
+
+ if (minor > 99 || build > 26)
+ return;
+
+ offset = strlen(tp->fw_ver);
+ snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
+ " v%d.%02d", major, minor);
+
+ if (build > 0) {
+ offset = strlen(tp->fw_ver);
+ if (offset < TG3_VER_SIZE - 1)
+ tp->fw_ver[offset] = 'a' + build - 1;
+ }
+}
+
+static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
+{
+ u32 val, offset, start;
+ int i, vlen;
+
+ for (offset = TG3_NVM_DIR_START;
+ offset < TG3_NVM_DIR_END;
+ offset += TG3_NVM_DIRENT_SIZE) {
+ if (tg3_nvram_read(tp, offset, &val))
+ return;
+
+ if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
+ break;
+ }
+
+ if (offset == TG3_NVM_DIR_END)
+ return;
+
+ if (!tg3_flag(tp, 5705_PLUS))
+ start = 0x08000000;
+ else if (tg3_nvram_read(tp, offset - 4, &start))
+ return;
+
+ if (tg3_nvram_read(tp, offset + 4, &offset) ||
+ !tg3_fw_img_is_valid(tp, offset) ||
+ tg3_nvram_read(tp, offset + 8, &val))
+ return;
+
+ offset += val - start;
+
+ vlen = strlen(tp->fw_ver);
+
+ tp->fw_ver[vlen++] = ',';
+ tp->fw_ver[vlen++] = ' ';
+
+ for (i = 0; i < 4; i++) {
+ __be32 v;
+ if (tg3_nvram_read_be32(tp, offset, &v))
+ return;
+
+ offset += sizeof(v);
+
+ if (vlen > TG3_VER_SIZE - sizeof(v)) {
+ memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
+ break;
+ }
+
+ memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
+ vlen += sizeof(v);
+ }
+}
+
+static void __devinit tg3_read_dash_ver(struct tg3 *tp)
+{
+ int vlen;
+ u32 apedata;
+ char *fwtype;
+
+ if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
+ return;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
+ if (apedata != APE_SEG_SIG_MAGIC)
+ return;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
+ if (!(apedata & APE_FW_STATUS_READY))
+ return;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
+
+ if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
+ tg3_flag_set(tp, APE_HAS_NCSI);
+ fwtype = "NCSI";
+ } else {
+ fwtype = "DASH";
+ }
+
+ vlen = strlen(tp->fw_ver);
+
+ snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
+ fwtype,
+ (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
+ (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
+ (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
+ (apedata & APE_FW_VERSION_BLDMSK));
+}
+
+static void __devinit tg3_read_fw_ver(struct tg3 *tp)
+{
+ u32 val;
+ bool vpd_vers = false;
+
+ if (tp->fw_ver[0] != 0)
+ vpd_vers = true;
+
+ if (tg3_flag(tp, NO_NVRAM)) {
+ strcat(tp->fw_ver, "sb");
+ return;
+ }
+
+ if (tg3_nvram_read(tp, 0, &val))
+ return;
+
+ if (val == TG3_EEPROM_MAGIC)
+ tg3_read_bc_ver(tp);
+ else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
+ tg3_read_sb_ver(tp, val);
+ else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
+ tg3_read_hwsb_ver(tp);
+ else
+ return;
+
+ if (vpd_vers)
+ goto done;
+
+ if (tg3_flag(tp, ENABLE_APE)) {
+ if (tg3_flag(tp, ENABLE_ASF))
+ tg3_read_dash_ver(tp);
+ } else if (tg3_flag(tp, ENABLE_ASF)) {
+ tg3_read_mgmtfw_ver(tp);
+ }
+
+done:
+ tp->fw_ver[TG3_VER_SIZE - 1] = 0;
+}
+
+static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
+
+static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
+{
+ if (tg3_flag(tp, LRG_PROD_RING_CAP))
+ return TG3_RX_RET_MAX_SIZE_5717;
+ else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
+ return TG3_RX_RET_MAX_SIZE_5700;
+ else
+ return TG3_RX_RET_MAX_SIZE_5705;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
+ { },
+};
+
+static int __devinit tg3_get_invariants(struct tg3 *tp)
+{
+ u32 misc_ctrl_reg;
+ u32 pci_state_reg, grc_misc_cfg;
+ u32 val;
+ u16 pci_cmd;
+ int err;
+
+ /* Force memory write invalidate off. If we leave it on,
+ * then on 5700_BX chips we have to enable a workaround.
+ * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
+ * to match the cacheline size. The Broadcom driver have this
+ * workaround but turns MWI off all the times so never uses
+ * it. This seems to suggest that the workaround is insufficient.
+ */
+ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
+ pci_cmd &= ~PCI_COMMAND_INVALIDATE;
+ pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+
+ /* Important! -- Make sure register accesses are byteswapped
+ * correctly. Also, for those chips that require it, make
+ * sure that indirect register accesses are enabled before
+ * the first operation.
+ */
+ pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+ &misc_ctrl_reg);
+ tp->misc_host_ctrl |= (misc_ctrl_reg &
+ MISC_HOST_CTRL_CHIPREV);
+ pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+ tp->misc_host_ctrl);
+
+ tp->pci_chip_rev_id = (misc_ctrl_reg >>
+ MISC_HOST_CTRL_CHIPREV_SHIFT);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
+ u32 prod_id_asic_rev;
+
+ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
+ pci_read_config_dword(tp->pdev,
+ TG3PCI_GEN2_PRODID_ASICREV,
+ &prod_id_asic_rev);
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
+ pci_read_config_dword(tp->pdev,
+ TG3PCI_GEN15_PRODID_ASICREV,
+ &prod_id_asic_rev);
+ else
+ pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
+ &prod_id_asic_rev);
+
+ tp->pci_chip_rev_id = prod_id_asic_rev;
+ }
+
+ /* Wrong chip ID in 5752 A0. This code can be removed later
+ * as A0 is not in production.
+ */
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
+ tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
+
+ /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
+ * we need to disable memory and use config. cycles
+ * only to access all registers. The 5702/03 chips
+ * can mistakenly decode the special cycles from the
+ * ICH chipsets as memory write cycles, causing corruption
+ * of register and memory space. Only certain ICH bridges
+ * will drive special cycles with non-zero data during the
+ * address phase which can fall within the 5703's address
+ * range. This is not an ICH bug as the PCI spec allows
+ * non-zero address during special cycles. However, only
+ * these ICH bridges are known to drive non-zero addresses
+ * during special cycles.
+ *
+ * Since special cycles do not cross PCI bridges, we only
+ * enable this workaround if the 5703 is on the secondary
+ * bus of these ICH bridges.
+ */
+ if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
+ (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
+ static struct tg3_dev_id {
+ u32 vendor;
+ u32 device;
+ u32 rev;
+ } ich_chipsets[] = {
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
+ PCI_ANY_ID },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
+ PCI_ANY_ID },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
+ 0xa },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
+ PCI_ANY_ID },
+ { },
+ };
+ struct tg3_dev_id *pci_id = &ich_chipsets[0];
+ struct pci_dev *bridge = NULL;
+
+ while (pci_id->vendor != 0) {
+ bridge = pci_get_device(pci_id->vendor, pci_id->device,
+ bridge);
+ if (!bridge) {
+ pci_id++;
+ continue;
+ }
+ if (pci_id->rev != PCI_ANY_ID) {
+ if (bridge->revision > pci_id->rev)
+ continue;
+ }
+ if (bridge->subordinate &&
+ (bridge->subordinate->number ==
+ tp->pdev->bus->number)) {
+ tg3_flag_set(tp, ICH_WORKAROUND);
+ pci_dev_put(bridge);
+ break;
+ }
+ }
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ static struct tg3_dev_id {
+ u32 vendor;
+ u32 device;
+ } bridge_chipsets[] = {
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
+ { },
+ };
+ struct tg3_dev_id *pci_id = &bridge_chipsets[0];
+ struct pci_dev *bridge = NULL;
+
+ while (pci_id->vendor != 0) {
+ bridge = pci_get_device(pci_id->vendor,
+ pci_id->device,
+ bridge);
+ if (!bridge) {
+ pci_id++;
+ continue;
+ }
+ if (bridge->subordinate &&
+ (bridge->subordinate->number <=
+ tp->pdev->bus->number) &&
+ (bridge->subordinate->subordinate >=
+ tp->pdev->bus->number)) {
+ tg3_flag_set(tp, 5701_DMA_BUG);
+ pci_dev_put(bridge);
+ break;
+ }
+ }
+ }
+
+ /* The EPB bridge inside 5714, 5715, and 5780 cannot support
+ * DMA addresses > 40-bit. This bridge may have other additional
+ * 57xx devices behind it in some 4-port NIC designs for example.
+ * Any tg3 device found behind the bridge will also need the 40-bit
+ * DMA workaround.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ tg3_flag_set(tp, 5780_CLASS);
+ tg3_flag_set(tp, 40BIT_DMA_BUG);
+ tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
+ } else {
+ struct pci_dev *bridge = NULL;
+
+ do {
+ bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
+ PCI_DEVICE_ID_SERVERWORKS_EPB,
+ bridge);
+ if (bridge && bridge->subordinate &&
+ (bridge->subordinate->number <=
+ tp->pdev->bus->number) &&
+ (bridge->subordinate->subordinate >=
+ tp->pdev->bus->number)) {
+ tg3_flag_set(tp, 40BIT_DMA_BUG);
+ pci_dev_put(bridge);
+ break;
+ }
+ } while (bridge);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
+ tp->pdev_peer = tg3_find_peer(tp);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ tg3_flag_set(tp, 5717_PLUS);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
+ tg3_flag(tp, 5717_PLUS))
+ tg3_flag_set(tp, 57765_PLUS);
+
+ /* Intentionally exclude ASIC_REV_5906 */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ tg3_flag(tp, 57765_PLUS))
+ tg3_flag_set(tp, 5755_PLUS);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
+ tg3_flag(tp, 5755_PLUS) ||
+ tg3_flag(tp, 5780_CLASS))
+ tg3_flag_set(tp, 5750_PLUS);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
+ tg3_flag(tp, 5750_PLUS))
+ tg3_flag_set(tp, 5705_PLUS);
+
+ /* Determine TSO capabilities */
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
+ ; /* Do nothing. HW bug. */
+ else if (tg3_flag(tp, 57765_PLUS))
+ tg3_flag_set(tp, HW_TSO_3);
+ else if (tg3_flag(tp, 5755_PLUS) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tg3_flag_set(tp, HW_TSO_2);
+ else if (tg3_flag(tp, 5750_PLUS)) {
+ tg3_flag_set(tp, HW_TSO_1);
+ tg3_flag_set(tp, TSO_BUG);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
+ tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
+ tg3_flag_clear(tp, TSO_BUG);
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+ tg3_flag_set(tp, TSO_BUG);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
+ tp->fw_needed = FIRMWARE_TG3TSO5;
+ else
+ tp->fw_needed = FIRMWARE_TG3TSO;
+ }
+
+ /* Selectively allow TSO based on operating conditions */
+ if (tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3) ||
+ (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
+ tg3_flag_set(tp, TSO_CAPABLE);
+ else {
+ tg3_flag_clear(tp, TSO_CAPABLE);
+ tg3_flag_clear(tp, TSO_BUG);
+ tp->fw_needed = NULL;
+ }
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
+ tp->fw_needed = FIRMWARE_TG3;
+
+ tp->irq_max = 1;
+
+ if (tg3_flag(tp, 5750_PLUS)) {
+ tg3_flag_set(tp, SUPPORT_MSI);
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
+ GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
+ tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
+ tp->pdev_peer == tp->pdev))
+ tg3_flag_clear(tp, SUPPORT_MSI);
+
+ if (tg3_flag(tp, 5755_PLUS) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ tg3_flag_set(tp, 1SHOT_MSI);
+ }
+
+ if (tg3_flag(tp, 57765_PLUS)) {
+ tg3_flag_set(tp, SUPPORT_MSIX);
+ tp->irq_max = TG3_IRQ_MAX_VECS;
+ }
+ }
+
+ if (tg3_flag(tp, 5755_PLUS))
+ tg3_flag_set(tp, SHORT_DMA_BUG);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ tg3_flag_set(tp, 4K_FIFO_LIMIT);
+
+ if (tg3_flag(tp, 5717_PLUS))
+ tg3_flag_set(tp, LRG_PROD_RING_CAP);
+
+ if (tg3_flag(tp, 57765_PLUS) &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
+ tg3_flag_set(tp, USE_JUMBO_BDFLAG);
+
+ if (!tg3_flag(tp, 5705_PLUS) ||
+ tg3_flag(tp, 5780_CLASS) ||
+ tg3_flag(tp, USE_JUMBO_BDFLAG))
+ tg3_flag_set(tp, JUMBO_CAPABLE);
+
+ pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
+ &pci_state_reg);
+
+ if (pci_is_pcie(tp->pdev)) {
+ u16 lnkctl;
+
+ tg3_flag_set(tp, PCI_EXPRESS);
+
+ tp->pcie_readrq = 4096;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ tp->pcie_readrq = 2048;
+
+ pcie_set_readrq(tp->pdev, tp->pcie_readrq);
+
+ pci_read_config_word(tp->pdev,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
+ &lnkctl);
+ if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+ ASIC_REV_5906) {
+ tg3_flag_clear(tp, HW_TSO_2);
+ tg3_flag_clear(tp, TSO_CAPABLE);
+ }
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
+ tg3_flag_set(tp, CLKREQ_BUG);
+ } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
+ tg3_flag_set(tp, L1PLLPD_EN);
+ }
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ /* BCM5785 devices are effectively PCIe devices, and should
+ * follow PCIe codepaths, but do not have a PCIe capabilities
+ * section.
+ */
+ tg3_flag_set(tp, PCI_EXPRESS);
+ } else if (!tg3_flag(tp, 5705_PLUS) ||
+ tg3_flag(tp, 5780_CLASS)) {
+ tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
+ if (!tp->pcix_cap) {
+ dev_err(&tp->pdev->dev,
+ "Cannot find PCI-X capability, aborting\n");
+ return -EIO;
+ }
+
+ if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
+ tg3_flag_set(tp, PCIX_MODE);
+ }
+
+ /* If we have an AMD 762 or VIA K8T800 chipset, write
+ * reordering to the mailbox registers done by the host
+ * controller can cause major troubles. We read back from
+ * every mailbox register write to force the writes to be
+ * posted to the chip in order.
+ */
+ if (pci_dev_present(tg3_write_reorder_chipsets) &&
+ !tg3_flag(tp, PCI_EXPRESS))
+ tg3_flag_set(tp, MBOX_WRITE_REORDER);
+
+ pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
+ &tp->pci_cacheline_sz);
+ pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
+ &tp->pci_lat_timer);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
+ tp->pci_lat_timer < 64) {
+ tp->pci_lat_timer = 64;
+ pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
+ tp->pci_lat_timer);
+ }
+
+ /* Important! -- It is critical that the PCI-X hw workaround
+ * situation is decided before the first MMIO register access.
+ */
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
+ /* 5700 BX chips need to have their TX producer index
+ * mailboxes written twice to workaround a bug.
+ */
+ tg3_flag_set(tp, TXD_MBOX_HWBUG);
+
+ /* If we are in PCI-X mode, enable register write workaround.
+ *
+ * The workaround is to use indirect register accesses
+ * for all chip writes not to mailbox registers.
+ */
+ if (tg3_flag(tp, PCIX_MODE)) {
+ u32 pm_reg;
+
+ tg3_flag_set(tp, PCIX_TARGET_HWBUG);
+
+ /* The chip can have it's power management PCI config
+ * space registers clobbered due to this bug.
+ * So explicitly force the chip into D0 here.
+ */
+ pci_read_config_dword(tp->pdev,
+ tp->pm_cap + PCI_PM_CTRL,
+ &pm_reg);
+ pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
+ pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
+ pci_write_config_dword(tp->pdev,
+ tp->pm_cap + PCI_PM_CTRL,
+ pm_reg);
+
+ /* Also, force SERR#/PERR# in PCI command. */
+ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
+ pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
+ pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+ }
+ }
+
+ if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
+ tg3_flag_set(tp, PCI_HIGH_SPEED);
+ if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
+ tg3_flag_set(tp, PCI_32BIT);
+
+ /* Chip-specific fixup from Broadcom driver */
+ if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
+ (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
+ pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
+ pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
+ }
+
+ /* Default fast path register access methods */
+ tp->read32 = tg3_read32;
+ tp->write32 = tg3_write32;
+ tp->read32_mbox = tg3_read32;
+ tp->write32_mbox = tg3_write32;
+ tp->write32_tx_mbox = tg3_write32;
+ tp->write32_rx_mbox = tg3_write32;
+
+ /* Various workaround register access methods */
+ if (tg3_flag(tp, PCIX_TARGET_HWBUG))
+ tp->write32 = tg3_write_indirect_reg32;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
+ (tg3_flag(tp, PCI_EXPRESS) &&
+ tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
+ /*
+ * Back to back register writes can cause problems on these
+ * chips, the workaround is to read back all reg writes
+ * except those to mailbox regs.
+ *
+ * See tg3_write_indirect_reg32().
+ */
+ tp->write32 = tg3_write_flush_reg32;
+ }
+
+ if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
+ tp->write32_tx_mbox = tg3_write32_tx_mbox;
+ if (tg3_flag(tp, MBOX_WRITE_REORDER))
+ tp->write32_rx_mbox = tg3_write_flush_reg32;
+ }
+
+ if (tg3_flag(tp, ICH_WORKAROUND)) {
+ tp->read32 = tg3_read_indirect_reg32;
+ tp->write32 = tg3_write_indirect_reg32;
+ tp->read32_mbox = tg3_read_indirect_mbox;
+ tp->write32_mbox = tg3_write_indirect_mbox;
+ tp->write32_tx_mbox = tg3_write_indirect_mbox;
+ tp->write32_rx_mbox = tg3_write_indirect_mbox;
+
+ iounmap(tp->regs);
+ tp->regs = NULL;
+
+ pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
+ pci_cmd &= ~PCI_COMMAND_MEMORY;
+ pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+ }
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ tp->read32_mbox = tg3_read32_mbox_5906;
+ tp->write32_mbox = tg3_write32_mbox_5906;
+ tp->write32_tx_mbox = tg3_write32_mbox_5906;
+ tp->write32_rx_mbox = tg3_write32_mbox_5906;
+ }
+
+ if (tp->write32 == tg3_write_indirect_reg32 ||
+ (tg3_flag(tp, PCIX_MODE) &&
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
+ tg3_flag_set(tp, SRAM_USE_CONFIG);
+
+ /* The memory arbiter has to be enabled in order for SRAM accesses
+ * to succeed. Normally on powerup the tg3 chip firmware will make
+ * sure it is enabled, but other entities such as system netboot
+ * code might disable it.
+ */
+ val = tr32(MEMARB_MODE);
+ tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
+
+ if (tg3_flag(tp, PCIX_MODE)) {
+ pci_read_config_dword(tp->pdev,
+ tp->pcix_cap + PCI_X_STATUS, &val);
+ tp->pci_fn = val & 0x7;
+ } else {
+ tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
+ }
+
+ /* Get eeprom hw config before calling tg3_set_power_state().
+ * In particular, the TG3_FLAG_IS_NIC flag must be
+ * determined before calling tg3_set_power_state() so that
+ * we know whether or not to switch out of Vaux power.
+ * When the flag is set, it means that GPIO1 is used for eeprom
+ * write protect and also implies that it is a LOM where GPIOs
+ * are not used to switch power.
+ */
+ tg3_get_eeprom_hw_cfg(tp);
+
+ if (tg3_flag(tp, ENABLE_APE)) {
+ /* Allow reads and writes to the
+ * APE register and memory space.
+ */
+ pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
+ PCISTATE_ALLOW_APE_SHMEM_WR |
+ PCISTATE_ALLOW_APE_PSPACE_WR;
+ pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
+ pci_state_reg);
+
+ tg3_ape_lock_init(tp);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ tg3_flag(tp, 57765_PLUS))
+ tg3_flag_set(tp, CPMU_PRESENT);
+
+ /* Set up tp->grc_local_ctrl before calling
+ * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
+ * will bring 5700's external PHY out of reset.
+ * It is also used as eeprom write protect on LOMs.
+ */
+ tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ tg3_flag(tp, EEPROM_WRITE_PROT))
+ tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
+ GRC_LCLCTRL_GPIO_OUTPUT1);
+ /* Unused GPIO3 must be driven as output on 5752 because there
+ * are no pull-up resistors on unused GPIO pins.
+ */
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
+
+ if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
+ /* Turn off the debug UART. */
+ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
+ if (tg3_flag(tp, IS_NIC))
+ /* Keep VMain power. */
+ tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
+ GRC_LCLCTRL_GPIO_OUTPUT0;
+ }
+
+ /* Switch out of Vaux if it is a NIC */
+ tg3_pwrsrc_switch_to_vmain(tp);
+
+ /* Derive initial jumbo mode from MTU assigned in
+ * ether_setup() via the alloc_etherdev() call
+ */
+ if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
+ tg3_flag_set(tp, JUMBO_RING_ENABLE);
+
+ /* Determine WakeOnLan speed to use. */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
+ tg3_flag_clear(tp, WOL_SPEED_100MB);
+ } else {
+ tg3_flag_set(tp, WOL_SPEED_100MB);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tp->phy_flags |= TG3_PHYFLG_IS_FET;
+
+ /* A few boards don't want Ethernet@WireSpeed phy feature */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+ (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
+ (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
+ (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
+ (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+ tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
+ GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
+ tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
+ tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
+
+ if (tg3_flag(tp, 5705_PLUS) &&
+ !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
+ !tg3_flag(tp, 57765_PLUS)) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+ if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
+ tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
+ tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
+ if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
+ tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
+ } else
+ tp->phy_flags |= TG3_PHYFLG_BER_BUG;
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
+ tp->phy_otp = tg3_read_otp_phycfg(tp);
+ if (tp->phy_otp == 0)
+ tp->phy_otp = TG3_OTP_DEFAULT;
+ }
+
+ if (tg3_flag(tp, CPMU_PRESENT))
+ tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
+ else
+ tp->mi_mode = MAC_MI_MODE_BASE;
+
+ tp->coalesce_mode = 0;
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
+ tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
+
+ /* Set these bits to enable statistics workaround. */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
+ tp->coalesce_mode |= HOSTCC_MODE_ATTN;
+ tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ tg3_flag_set(tp, USE_PHYLIB);
+
+ err = tg3_mdio_init(tp);
+ if (err)
+ return err;
+
+ /* Initialize data/descriptor byte/word swapping. */
+ val = tr32(GRC_MODE);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
+ GRC_MODE_WORD_SWAP_B2HRX_DATA |
+ GRC_MODE_B2HRX_ENABLE |
+ GRC_MODE_HTX2B_ENABLE |
+ GRC_MODE_HOST_STACKUP);
+ else
+ val &= GRC_MODE_HOST_STACKUP;
+
+ tw32(GRC_MODE, val | tp->grc_mode);
+
+ tg3_switch_clocks(tp);
+
+ /* Clear this out for sanity. */
+ tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+
+ pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
+ &pci_state_reg);
+ if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
+ !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
+ u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
+
+ if (chiprevid == CHIPREV_ID_5701_A0 ||
+ chiprevid == CHIPREV_ID_5701_B0 ||
+ chiprevid == CHIPREV_ID_5701_B2 ||
+ chiprevid == CHIPREV_ID_5701_B5) {
+ void __iomem *sram_base;
+
+ /* Write some dummy words into the SRAM status block
+ * area, see if it reads back correctly. If the return
+ * value is bad, force enable the PCIX workaround.
+ */
+ sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
+
+ writel(0x00000000, sram_base);
+ writel(0x00000000, sram_base + 4);
+ writel(0xffffffff, sram_base + 4);
+ if (readl(sram_base) != 0x00000000)
+ tg3_flag_set(tp, PCIX_TARGET_HWBUG);
+ }
+ }
+
+ udelay(50);
+ tg3_nvram_init(tp);
+
+ grc_misc_cfg = tr32(GRC_MISC_CFG);
+ grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+ (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
+ grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
+ tg3_flag_set(tp, IS_5788);
+
+ if (!tg3_flag(tp, IS_5788) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
+ tg3_flag_set(tp, TAGGED_STATUS);
+ if (tg3_flag(tp, TAGGED_STATUS)) {
+ tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
+ HOSTCC_MODE_CLRTICK_TXBD);
+
+ tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
+ pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+ tp->misc_host_ctrl);
+ }
+
+ /* Preserve the APE MAC_MODE bits */
+ if (tg3_flag(tp, ENABLE_APE))
+ tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
+ else
+ tp->mac_mode = 0;
+
+ /* these are limited to 10/100 only */
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
+ (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+ tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
+ (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
+ tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
+ tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
+ (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
+ (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
+ tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
+ tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
+ (tp->phy_flags & TG3_PHYFLG_IS_FET))
+ tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
+
+ err = tg3_phy_probe(tp);
+ if (err) {
+ dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
+ /* ... but do not return immediately ... */
+ tg3_mdio_fini(tp);
+ }
+
+ tg3_read_vpd(tp);
+ tg3_read_fw_ver(tp);
+
+ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+ tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
+ } else {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
+ else
+ tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
+ }
+
+ /* 5700 {AX,BX} chips have a broken status block link
+ * change bit implementation, so we must use the
+ * status register in those cases.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ tg3_flag_set(tp, USE_LINKCHG_REG);
+ else
+ tg3_flag_clear(tp, USE_LINKCHG_REG);
+
+ /* The led_ctrl is set during tg3_phy_probe, here we might
+ * have to force the link status polling mechanism based
+ * upon subsystem IDs.
+ */
+ if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
+ !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
+ tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
+ tg3_flag_set(tp, USE_LINKCHG_REG);
+ }
+
+ /* For all SERDES we poll the MAC status register. */
+ if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
+ tg3_flag_set(tp, POLL_SERDES);
+ else
+ tg3_flag_clear(tp, POLL_SERDES);
+
+ tp->rx_offset = NET_IP_ALIGN;
+ tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
+ tg3_flag(tp, PCIX_MODE)) {
+ tp->rx_offset = 0;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ tp->rx_copy_thresh = ~(u16)0;
+#endif
+ }
+
+ tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
+ tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
+ tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
+
+ tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
+
+ /* Increment the rx prod index on the rx std ring by at most
+ * 8 for these chips to workaround hw errata.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+ tp->rx_std_max_post = 8;
+
+ if (tg3_flag(tp, ASPM_WORKAROUND))
+ tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
+ PCIE_PWR_MGMT_L1_THRESH_MSK;
+
+ return err;
+}
+
+#ifdef CONFIG_SPARC
+static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
+{
+ struct net_device *dev = tp->dev;
+ struct pci_dev *pdev = tp->pdev;
+ struct device_node *dp = pci_device_to_OF_node(pdev);
+ const unsigned char *addr;
+ int len;
+
+ addr = of_get_property(dp, "local-mac-address", &len);
+ if (addr && len == 6) {
+ memcpy(dev->dev_addr, addr, 6);
+ memcpy(dev->perm_addr, dev->dev_addr, 6);
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
+{
+ struct net_device *dev = tp->dev;
+
+ memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+ memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
+ return 0;
+}
+#endif
+
+static int __devinit tg3_get_device_address(struct tg3 *tp)
+{
+ struct net_device *dev = tp->dev;
+ u32 hi, lo, mac_offset;
+ int addr_ok = 0;
+
+#ifdef CONFIG_SPARC
+ if (!tg3_get_macaddr_sparc(tp))
+ return 0;
+#endif
+
+ mac_offset = 0x7c;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ tg3_flag(tp, 5780_CLASS)) {
+ if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
+ mac_offset = 0xcc;
+ if (tg3_nvram_lock(tp))
+ tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
+ else
+ tg3_nvram_unlock(tp);
+ } else if (tg3_flag(tp, 5717_PLUS)) {
+ if (tp->pci_fn & 1)
+ mac_offset = 0xcc;
+ if (tp->pci_fn > 1)
+ mac_offset += 0x18c;
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ mac_offset = 0x10;
+
+ /* First try to get it from MAC address mailbox. */
+ tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
+ if ((hi >> 16) == 0x484b) {
+ dev->dev_addr[0] = (hi >> 8) & 0xff;
+ dev->dev_addr[1] = (hi >> 0) & 0xff;
+
+ tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
+ dev->dev_addr[2] = (lo >> 24) & 0xff;
+ dev->dev_addr[3] = (lo >> 16) & 0xff;
+ dev->dev_addr[4] = (lo >> 8) & 0xff;
+ dev->dev_addr[5] = (lo >> 0) & 0xff;
+
+ /* Some old bootcode may report a 0 MAC address in SRAM */
+ addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
+ }
+ if (!addr_ok) {
+ /* Next, try NVRAM. */
+ if (!tg3_flag(tp, NO_NVRAM) &&
+ !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
+ !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
+ memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
+ memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
+ }
+ /* Finally just fetch it out of the MAC control regs. */
+ else {
+ hi = tr32(MAC_ADDR_0_HIGH);
+ lo = tr32(MAC_ADDR_0_LOW);
+
+ dev->dev_addr[5] = lo & 0xff;
+ dev->dev_addr[4] = (lo >> 8) & 0xff;
+ dev->dev_addr[3] = (lo >> 16) & 0xff;
+ dev->dev_addr[2] = (lo >> 24) & 0xff;
+ dev->dev_addr[1] = hi & 0xff;
+ dev->dev_addr[0] = (hi >> 8) & 0xff;
+ }
+ }
+
+ if (!is_valid_ether_addr(&dev->dev_addr[0])) {
+#ifdef CONFIG_SPARC
+ if (!tg3_get_default_macaddr_sparc(tp))
+ return 0;
+#endif
+ return -EINVAL;
+ }
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+ return 0;
+}
+
+#define BOUNDARY_SINGLE_CACHELINE 1
+#define BOUNDARY_MULTI_CACHELINE 2
+
+static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
+{
+ int cacheline_size;
+ u8 byte;
+ int goal;
+
+ pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
+ if (byte == 0)
+ cacheline_size = 1024;
+ else
+ cacheline_size = (int) byte * 4;
+
+ /* On 5703 and later chips, the boundary bits have no
+ * effect.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
+ !tg3_flag(tp, PCI_EXPRESS))
+ goto out;
+
+#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
+ goal = BOUNDARY_MULTI_CACHELINE;
+#else
+#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
+ goal = BOUNDARY_SINGLE_CACHELINE;
+#else
+ goal = 0;
+#endif
+#endif
+
+ if (tg3_flag(tp, 57765_PLUS)) {
+ val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
+ goto out;
+ }
+
+ if (!goal)
+ goto out;
+
+ /* PCI controllers on most RISC systems tend to disconnect
+ * when a device tries to burst across a cache-line boundary.
+ * Therefore, letting tg3 do so just wastes PCI bandwidth.
+ *
+ * Unfortunately, for PCI-E there are only limited
+ * write-side controls for this, and thus for reads
+ * we will still get the disconnects. We'll also waste
+ * these PCI cycles for both read and write for chips
+ * other than 5700 and 5701 which do not implement the
+ * boundary bits.
+ */
+ if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
+ switch (cacheline_size) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ if (goal == BOUNDARY_SINGLE_CACHELINE) {
+ val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
+ DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
+ } else {
+ val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
+ DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
+ }
+ break;
+
+ case 256:
+ val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
+ DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
+ break;
+
+ default:
+ val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
+ DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
+ break;
+ }
+ } else if (tg3_flag(tp, PCI_EXPRESS)) {
+ switch (cacheline_size) {
+ case 16:
+ case 32:
+ case 64:
+ if (goal == BOUNDARY_SINGLE_CACHELINE) {
+ val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
+ val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
+ break;
+ }
+ /* fallthrough */
+ case 128:
+ default:
+ val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
+ val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
+ break;
+ }
+ } else {
+ switch (cacheline_size) {
+ case 16:
+ if (goal == BOUNDARY_SINGLE_CACHELINE) {
+ val |= (DMA_RWCTRL_READ_BNDRY_16 |
+ DMA_RWCTRL_WRITE_BNDRY_16);
+ break;
+ }
+ /* fallthrough */
+ case 32:
+ if (goal == BOUNDARY_SINGLE_CACHELINE) {
+ val |= (DMA_RWCTRL_READ_BNDRY_32 |
+ DMA_RWCTRL_WRITE_BNDRY_32);
+ break;
+ }
+ /* fallthrough */
+ case 64:
+ if (goal == BOUNDARY_SINGLE_CACHELINE) {
+ val |= (DMA_RWCTRL_READ_BNDRY_64 |
+ DMA_RWCTRL_WRITE_BNDRY_64);
+ break;
+ }
+ /* fallthrough */
+ case 128:
+ if (goal == BOUNDARY_SINGLE_CACHELINE) {
+ val |= (DMA_RWCTRL_READ_BNDRY_128 |
+ DMA_RWCTRL_WRITE_BNDRY_128);
+ break;
+ }
+ /* fallthrough */
+ case 256:
+ val |= (DMA_RWCTRL_READ_BNDRY_256 |
+ DMA_RWCTRL_WRITE_BNDRY_256);
+ break;
+ case 512:
+ val |= (DMA_RWCTRL_READ_BNDRY_512 |
+ DMA_RWCTRL_WRITE_BNDRY_512);
+ break;
+ case 1024:
+ default:
+ val |= (DMA_RWCTRL_READ_BNDRY_1024 |
+ DMA_RWCTRL_WRITE_BNDRY_1024);
+ break;
+ }
+ }
+
+out:
+ return val;
+}
+
+static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
+{
+ struct tg3_internal_buffer_desc test_desc;
+ u32 sram_dma_descs;
+ int i, ret;
+
+ sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
+
+ tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
+ tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
+ tw32(RDMAC_STATUS, 0);
+ tw32(WDMAC_STATUS, 0);
+
+ tw32(BUFMGR_MODE, 0);
+ tw32(FTQ_RESET, 0);
+
+ test_desc.addr_hi = ((u64) buf_dma) >> 32;
+ test_desc.addr_lo = buf_dma & 0xffffffff;
+ test_desc.nic_mbuf = 0x00002100;
+ test_desc.len = size;
+
+ /*
+ * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
+ * the *second* time the tg3 driver was getting loaded after an
+ * initial scan.
+ *
+ * Broadcom tells me:
+ * ...the DMA engine is connected to the GRC block and a DMA
+ * reset may affect the GRC block in some unpredictable way...
+ * The behavior of resets to individual blocks has not been tested.
+ *
+ * Broadcom noted the GRC reset will also reset all sub-components.
+ */
+ if (to_device) {
+ test_desc.cqid_sqid = (13 << 8) | 2;
+
+ tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
+ udelay(40);
+ } else {
+ test_desc.cqid_sqid = (16 << 8) | 7;
+
+ tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
+ udelay(40);
+ }
+ test_desc.flags = 0x00000005;
+
+ for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
+ u32 val;
+
+ val = *(((u32 *)&test_desc) + i);
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
+ sram_dma_descs + (i * sizeof(u32)));
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
+ }
+ pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
+
+ if (to_device)
+ tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
+ else
+ tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
+
+ ret = -ENODEV;
+ for (i = 0; i < 40; i++) {
+ u32 val;
+
+ if (to_device)
+ val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
+ else
+ val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
+ if ((val & 0xffff) == sram_dma_descs) {
+ ret = 0;
+ break;
+ }
+
+ udelay(100);
+ }
+
+ return ret;
+}
+
+#define TEST_BUFFER_SIZE 0x2000
+
+static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
+ { },
+};
+
+static int __devinit tg3_test_dma(struct tg3 *tp)
+{
+ dma_addr_t buf_dma;
+ u32 *buf, saved_dma_rwctrl;
+ int ret = 0;
+
+ buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
+ &buf_dma, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out_nofree;
+ }
+
+ tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
+ (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
+
+ tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
+
+ if (tg3_flag(tp, 57765_PLUS))
+ goto out;
+
+ if (tg3_flag(tp, PCI_EXPRESS)) {
+ /* DMA read watermark not used on PCIE */
+ tp->dma_rwctrl |= 0x00180000;
+ } else if (!tg3_flag(tp, PCIX_MODE)) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
+ tp->dma_rwctrl |= 0x003f0000;
+ else
+ tp->dma_rwctrl |= 0x003f000f;
+ } else {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
+ u32 read_water = 0x7;
+
+ /* If the 5704 is behind the EPB bridge, we can
+ * do the less restrictive ONE_DMA workaround for
+ * better performance.
+ */
+ if (tg3_flag(tp, 40BIT_DMA_BUG) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+ tp->dma_rwctrl |= 0x8000;
+ else if (ccval == 0x6 || ccval == 0x7)
+ tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
+ read_water = 4;
+ /* Set bit 23 to enable PCIX hw bug fix */
+ tp->dma_rwctrl |=
+ (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
+ (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
+ (1 << 23);
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
+ /* 5780 always in PCIX mode */
+ tp->dma_rwctrl |= 0x00144000;
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ /* 5714 always in PCIX mode */
+ tp->dma_rwctrl |= 0x00148000;
+ } else {
+ tp->dma_rwctrl |= 0x001b000f;
+ }
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+ tp->dma_rwctrl &= 0xfffffff0;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ /* Remove this if it causes problems for some boards. */
+ tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
+
+ /* On 5700/5701 chips, we need to set this bit.
+ * Otherwise the chip will issue cacheline transactions
+ * to streamable DMA memory with not all the byte
+ * enables turned on. This is an error on several
+ * RISC PCI controllers, in particular sparc64.
+ *
+ * On 5703/5704 chips, this bit has been reassigned
+ * a different meaning. In particular, it is used
+ * on those chips to enable a PCI-X workaround.
+ */
+ tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
+ }
+
+ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+
+#if 0
+ /* Unneeded, already done by tg3_get_invariants. */
+ tg3_switch_clocks(tp);
+#endif
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
+ goto out;
+
+ /* It is best to perform DMA test with maximum write burst size
+ * to expose the 5700/5701 write DMA bug.
+ */
+ saved_dma_rwctrl = tp->dma_rwctrl;
+ tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
+ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+
+ while (1) {
+ u32 *p = buf, i;
+
+ for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
+ p[i] = i;
+
+ /* Send the buffer to the chip. */
+ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
+ if (ret) {
+ dev_err(&tp->pdev->dev,
+ "%s: Buffer write failed. err = %d\n",
+ __func__, ret);
+ break;
+ }
+
+#if 0
+ /* validate data reached card RAM correctly. */
+ for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
+ u32 val;
+ tg3_read_mem(tp, 0x2100 + (i*4), &val);
+ if (le32_to_cpu(val) != p[i]) {
+ dev_err(&tp->pdev->dev,
+ "%s: Buffer corrupted on device! "
+ "(%d != %d)\n", __func__, val, i);
+ /* ret = -ENODEV here? */
+ }
+ p[i] = 0;
+ }
+#endif
+ /* Now read it back. */
+ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
+ if (ret) {
+ dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
+ "err = %d\n", __func__, ret);
+ break;
+ }
+
+ /* Verify it. */
+ for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
+ if (p[i] == i)
+ continue;
+
+ if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
+ DMA_RWCTRL_WRITE_BNDRY_16) {
+ tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
+ tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
+ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+ break;
+ } else {
+ dev_err(&tp->pdev->dev,
+ "%s: Buffer corrupted on read back! "
+ "(%d != %d)\n", __func__, p[i], i);
+ ret = -ENODEV;
+ goto out;
+ }
+ }
+
+ if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
+ /* Success. */
+ ret = 0;
+ break;
+ }
+ }
+ if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
+ DMA_RWCTRL_WRITE_BNDRY_16) {
+ /* DMA test passed without adjusting DMA boundary,
+ * now look for chipsets that are known to expose the
+ * DMA bug without failing the test.
+ */
+ if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
+ tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
+ tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
+ } else {
+ /* Safe to use the calculated DMA boundary. */
+ tp->dma_rwctrl = saved_dma_rwctrl;
+ }
+
+ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+ }
+
+out:
+ dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
+out_nofree:
+ return ret;
+}
+
+static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
+{
+ if (tg3_flag(tp, 57765_PLUS)) {
+ tp->bufmgr_config.mbuf_read_dma_low_water =
+ DEFAULT_MB_RDMA_LOW_WATER_5705;
+ tp->bufmgr_config.mbuf_mac_rx_low_water =
+ DEFAULT_MB_MACRX_LOW_WATER_57765;
+ tp->bufmgr_config.mbuf_high_water =
+ DEFAULT_MB_HIGH_WATER_57765;
+
+ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
+ DEFAULT_MB_RDMA_LOW_WATER_5705;
+ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
+ DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
+ tp->bufmgr_config.mbuf_high_water_jumbo =
+ DEFAULT_MB_HIGH_WATER_JUMBO_57765;
+ } else if (tg3_flag(tp, 5705_PLUS)) {
+ tp->bufmgr_config.mbuf_read_dma_low_water =
+ DEFAULT_MB_RDMA_LOW_WATER_5705;
+ tp->bufmgr_config.mbuf_mac_rx_low_water =
+ DEFAULT_MB_MACRX_LOW_WATER_5705;
+ tp->bufmgr_config.mbuf_high_water =
+ DEFAULT_MB_HIGH_WATER_5705;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ tp->bufmgr_config.mbuf_mac_rx_low_water =
+ DEFAULT_MB_MACRX_LOW_WATER_5906;
+ tp->bufmgr_config.mbuf_high_water =
+ DEFAULT_MB_HIGH_WATER_5906;
+ }
+
+ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
+ DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
+ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
+ DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
+ tp->bufmgr_config.mbuf_high_water_jumbo =
+ DEFAULT_MB_HIGH_WATER_JUMBO_5780;
+ } else {
+ tp->bufmgr_config.mbuf_read_dma_low_water =
+ DEFAULT_MB_RDMA_LOW_WATER;
+ tp->bufmgr_config.mbuf_mac_rx_low_water =
+ DEFAULT_MB_MACRX_LOW_WATER;
+ tp->bufmgr_config.mbuf_high_water =
+ DEFAULT_MB_HIGH_WATER;
+
+ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
+ DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
+ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
+ DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
+ tp->bufmgr_config.mbuf_high_water_jumbo =
+ DEFAULT_MB_HIGH_WATER_JUMBO;
+ }
+
+ tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
+ tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
+}
+
+static char * __devinit tg3_phy_string(struct tg3 *tp)
+{
+ switch (tp->phy_id & TG3_PHY_ID_MASK) {
+ case TG3_PHY_ID_BCM5400: return "5400";
+ case TG3_PHY_ID_BCM5401: return "5401";
+ case TG3_PHY_ID_BCM5411: return "5411";
+ case TG3_PHY_ID_BCM5701: return "5701";
+ case TG3_PHY_ID_BCM5703: return "5703";
+ case TG3_PHY_ID_BCM5704: return "5704";
+ case TG3_PHY_ID_BCM5705: return "5705";
+ case TG3_PHY_ID_BCM5750: return "5750";
+ case TG3_PHY_ID_BCM5752: return "5752";
+ case TG3_PHY_ID_BCM5714: return "5714";
+ case TG3_PHY_ID_BCM5780: return "5780";
+ case TG3_PHY_ID_BCM5755: return "5755";
+ case TG3_PHY_ID_BCM5787: return "5787";
+ case TG3_PHY_ID_BCM5784: return "5784";
+ case TG3_PHY_ID_BCM5756: return "5722/5756";
+ case TG3_PHY_ID_BCM5906: return "5906";
+ case TG3_PHY_ID_BCM5761: return "5761";
+ case TG3_PHY_ID_BCM5718C: return "5718C";
+ case TG3_PHY_ID_BCM5718S: return "5718S";
+ case TG3_PHY_ID_BCM57765: return "57765";
+ case TG3_PHY_ID_BCM5719C: return "5719C";
+ case TG3_PHY_ID_BCM5720C: return "5720C";
+ case TG3_PHY_ID_BCM8002: return "8002/serdes";
+ case 0: return "serdes";
+ default: return "unknown";
+ }
+}
+
+static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
+{
+ if (tg3_flag(tp, PCI_EXPRESS)) {
+ strcpy(str, "PCI Express");
+ return str;
+ } else if (tg3_flag(tp, PCIX_MODE)) {
+ u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
+
+ strcpy(str, "PCIX:");
+
+ if ((clock_ctrl == 7) ||
+ ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
+ GRC_MISC_CFG_BOARD_ID_5704CIOBE))
+ strcat(str, "133MHz");
+ else if (clock_ctrl == 0)
+ strcat(str, "33MHz");
+ else if (clock_ctrl == 2)
+ strcat(str, "50MHz");
+ else if (clock_ctrl == 4)
+ strcat(str, "66MHz");
+ else if (clock_ctrl == 6)
+ strcat(str, "100MHz");
+ } else {
+ strcpy(str, "PCI:");
+ if (tg3_flag(tp, PCI_HIGH_SPEED))
+ strcat(str, "66MHz");
+ else
+ strcat(str, "33MHz");
+ }
+ if (tg3_flag(tp, PCI_32BIT))
+ strcat(str, ":32-bit");
+ else
+ strcat(str, ":64-bit");
+ return str;
+}
+
+static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
+{
+ struct pci_dev *peer;
+ unsigned int func, devnr = tp->pdev->devfn & ~7;
+
+ for (func = 0; func < 8; func++) {
+ peer = pci_get_slot(tp->pdev->bus, devnr | func);
+ if (peer && peer != tp->pdev)
+ break;
+ pci_dev_put(peer);
+ }
+ /* 5704 can be configured in single-port mode, set peer to
+ * tp->pdev in that case.
+ */
+ if (!peer) {
+ peer = tp->pdev;
+ return peer;
+ }
+
+ /*
+ * We don't need to keep the refcount elevated; there's no way
+ * to remove one half of this device without removing the other
+ */
+ pci_dev_put(peer);
+
+ return peer;
+}
+
+static void __devinit tg3_init_coal(struct tg3 *tp)
+{
+ struct ethtool_coalesce *ec = &tp->coal;
+
+ memset(ec, 0, sizeof(*ec));
+ ec->cmd = ETHTOOL_GCOALESCE;
+ ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
+ ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
+ ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
+ ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
+ ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
+ ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
+ ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
+ ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
+ ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
+
+ if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
+ HOSTCC_MODE_CLRTICK_TXBD)) {
+ ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
+ ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
+ ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
+ ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
+ }
+
+ if (tg3_flag(tp, 5705_PLUS)) {
+ ec->rx_coalesce_usecs_irq = 0;
+ ec->tx_coalesce_usecs_irq = 0;
+ ec->stats_block_coalesce_usecs = 0;
+ }
+}
+
+static const struct net_device_ops tg3_netdev_ops = {
+ .ndo_open = tg3_open,
+ .ndo_stop = tg3_close,
+ .ndo_start_xmit = tg3_start_xmit,
+ .ndo_get_stats64 = tg3_get_stats64,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = tg3_set_rx_mode,
+ .ndo_set_mac_address = tg3_set_mac_addr,
+ .ndo_do_ioctl = tg3_ioctl,
+ .ndo_tx_timeout = tg3_tx_timeout,
+ .ndo_change_mtu = tg3_change_mtu,
+ .ndo_fix_features = tg3_fix_features,
+ .ndo_set_features = tg3_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = tg3_poll_controller,
+#endif
+};
+
+static int __devinit tg3_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct tg3 *tp;
+ int i, err, pm_cap;
+ u32 sndmbx, rcvmbx, intmbx;
+ char str[40];
+ u64 dma_mask, persist_dma_mask;
+ u32 features = 0;
+
+ printk_once(KERN_INFO "%s\n", version);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+ goto err_out_disable_pdev;
+ }
+
+ pci_set_master(pdev);
+
+ /* Find power-management capability. */
+ pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (pm_cap == 0) {
+ dev_err(&pdev->dev,
+ "Cannot find Power Management capability, aborting\n");
+ err = -EIO;
+ goto err_out_free_res;
+ }
+
+ err = pci_set_power_state(pdev, PCI_D0);
+ if (err) {
+ dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
+ goto err_out_free_res;
+ }
+
+ dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
+ if (!dev) {
+ dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
+ err = -ENOMEM;
+ goto err_out_power_down;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ tp = netdev_priv(dev);
+ tp->pdev = pdev;
+ tp->dev = dev;
+ tp->pm_cap = pm_cap;
+ tp->rx_mode = TG3_DEF_RX_MODE;
+ tp->tx_mode = TG3_DEF_TX_MODE;
+
+ if (tg3_debug > 0)
+ tp->msg_enable = tg3_debug;
+ else
+ tp->msg_enable = TG3_DEF_MSG_ENABLE;
+
+ /* The word/byte swap controls here control register access byte
+ * swapping. DMA data byte swapping is controlled in the GRC_MODE
+ * setting below.
+ */
+ tp->misc_host_ctrl =
+ MISC_HOST_CTRL_MASK_PCI_INT |
+ MISC_HOST_CTRL_WORD_SWAP |
+ MISC_HOST_CTRL_INDIR_ACCESS |
+ MISC_HOST_CTRL_PCISTATE_RW;
+
+ /* The NONFRM (non-frame) byte/word swap controls take effect
+ * on descriptor entries, anything which isn't packet data.
+ *
+ * The StrongARM chips on the board (one for tx, one for rx)
+ * are running in big-endian mode.
+ */
+ tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
+ GRC_MODE_WSWAP_NONFRM_DATA);
+#ifdef __BIG_ENDIAN
+ tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
+#endif
+ spin_lock_init(&tp->lock);
+ spin_lock_init(&tp->indirect_lock);
+ INIT_WORK(&tp->reset_task, tg3_reset_task);
+
+ tp->regs = pci_ioremap_bar(pdev, BAR_0);
+ if (!tp->regs) {
+ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
+ err = -ENOMEM;
+ goto err_out_free_dev;
+ }
+
+ if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
+ tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
+ tg3_flag_set(tp, ENABLE_APE);
+ tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
+ if (!tp->aperegs) {
+ dev_err(&pdev->dev,
+ "Cannot map APE registers, aborting\n");
+ err = -ENOMEM;
+ goto err_out_iounmap;
+ }
+ }
+
+ tp->rx_pending = TG3_DEF_RX_RING_PENDING;
+ tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
+
+ dev->ethtool_ops = &tg3_ethtool_ops;
+ dev->watchdog_timeo = TG3_TX_TIMEOUT;
+ dev->netdev_ops = &tg3_netdev_ops;
+ dev->irq = pdev->irq;
+
+ err = tg3_get_invariants(tp);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Problem fetching invariants of chip, aborting\n");
+ goto err_out_apeunmap;
+ }
+
+ /* The EPB bridge inside 5714, 5715, and 5780 and any
+ * device behind the EPB cannot support DMA addresses > 40-bit.
+ * On 64-bit systems with IOMMU, use 40-bit dma_mask.
+ * On 64-bit systems without IOMMU, use 64-bit dma_mask and
+ * do DMA address check in tg3_start_xmit().
+ */
+ if (tg3_flag(tp, IS_5788))
+ persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
+ else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
+ persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
+#ifdef CONFIG_HIGHMEM
+ dma_mask = DMA_BIT_MASK(64);
+#endif
+ } else
+ persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
+
+ /* Configure DMA attributes. */
+ if (dma_mask > DMA_BIT_MASK(32)) {
+ err = pci_set_dma_mask(pdev, dma_mask);
+ if (!err) {
+ features |= NETIF_F_HIGHDMA;
+ err = pci_set_consistent_dma_mask(pdev,
+ persist_dma_mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Unable to obtain 64 bit "
+ "DMA for consistent allocations\n");
+ goto err_out_apeunmap;
+ }
+ }
+ }
+ if (err || dma_mask == DMA_BIT_MASK(32)) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_out_apeunmap;
+ }
+ }
+
+ tg3_init_bufmgr_config(tp);
+
+ features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+
+ /* 5700 B0 chips do not support checksumming correctly due
+ * to hardware bugs.
+ */
+ if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
+ features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+
+ if (tg3_flag(tp, 5755_PLUS))
+ features |= NETIF_F_IPV6_CSUM;
+ }
+
+ /* TSO is on by default on chips that support hardware TSO.
+ * Firmware TSO on older chips gives lower performance, so it
+ * is off by default, but can be enabled using ethtool.
+ */
+ if ((tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3)) &&
+ (features & NETIF_F_IP_CSUM))
+ features |= NETIF_F_TSO;
+ if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
+ if (features & NETIF_F_IPV6_CSUM)
+ features |= NETIF_F_TSO6;
+ if (tg3_flag(tp, HW_TSO_3) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ features |= NETIF_F_TSO_ECN;
+ }
+
+ dev->features |= features;
+ dev->vlan_features |= features;
+
+ /*
+ * Add loopback capability only for a subset of devices that support
+ * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
+ * loopback for the remaining devices.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
+ !tg3_flag(tp, CPMU_PRESENT))
+ /* Add the loopback capability */
+ features |= NETIF_F_LOOPBACK;
+
+ dev->hw_features |= features;
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
+ !tg3_flag(tp, TSO_CAPABLE) &&
+ !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
+ tg3_flag_set(tp, MAX_RXPEND_64);
+ tp->rx_pending = 63;
+ }
+
+ err = tg3_get_device_address(tp);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Could not obtain valid ethernet address, aborting\n");
+ goto err_out_apeunmap;
+ }
+
+ /*
+ * Reset chip in case UNDI or EFI driver did not shutdown
+ * DMA self test will enable WDMAC and we'll see (spurious)
+ * pending DMA on the PCI bus at that point.
+ */
+ if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
+ (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+ tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ }
+
+ err = tg3_test_dma(tp);
+ if (err) {
+ dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
+ goto err_out_apeunmap;
+ }
+
+ intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
+ rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
+ sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
+ for (i = 0; i < tp->irq_max; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ tnapi->tp = tp;
+ tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
+
+ tnapi->int_mbox = intmbx;
+ if (i <= 4)
+ intmbx += 0x8;
+ else
+ intmbx += 0x4;
+
+ tnapi->consmbox = rcvmbx;
+ tnapi->prodmbox = sndmbx;
+
+ if (i)
+ tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
+ else
+ tnapi->coal_now = HOSTCC_MODE_NOW;
+
+ if (!tg3_flag(tp, SUPPORT_MSIX))
+ break;
+
+ /*
+ * If we support MSIX, we'll be using RSS. If we're using
+ * RSS, the first vector only handles link interrupts and the
+ * remaining vectors handle rx and tx interrupts. Reuse the
+ * mailbox values for the next iteration. The values we setup
+ * above are still useful for the single vectored mode.
+ */
+ if (!i)
+ continue;
+
+ rcvmbx += 0x8;
+
+ if (sndmbx & 0x4)
+ sndmbx -= 0x4;
+ else
+ sndmbx += 0xc;
+ }
+
+ tg3_init_coal(tp);
+
+ pci_set_drvdata(pdev, dev);
+
+ if (tg3_flag(tp, 5717_PLUS)) {
+ /* Resume a low-power mode */
+ tg3_frob_aux_power(tp, false);
+ }
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting\n");
+ goto err_out_apeunmap;
+ }
+
+ netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
+ tp->board_part_number,
+ tp->pci_chip_rev_id,
+ tg3_bus_string(tp, str),
+ dev->dev_addr);
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
+ struct phy_device *phydev;
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ netdev_info(dev,
+ "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
+ phydev->drv->name, dev_name(&phydev->dev));
+ } else {
+ char *ethtype;
+
+ if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
+ ethtype = "10/100Base-TX";
+ else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
+ ethtype = "1000Base-SX";
+ else
+ ethtype = "10/100/1000Base-T";
+
+ netdev_info(dev, "attached PHY is %s (%s Ethernet) "
+ "(WireSpeed[%d], EEE[%d])\n",
+ tg3_phy_string(tp), ethtype,
+ (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
+ (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
+ }
+
+ netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
+ (dev->features & NETIF_F_RXCSUM) != 0,
+ tg3_flag(tp, USE_LINKCHG_REG) != 0,
+ (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
+ tg3_flag(tp, ENABLE_ASF) != 0,
+ tg3_flag(tp, TSO_CAPABLE) != 0);
+ netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
+ tp->dma_rwctrl,
+ pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
+ ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
+
+ pci_save_state(pdev);
+
+ return 0;
+
+err_out_apeunmap:
+ if (tp->aperegs) {
+ iounmap(tp->aperegs);
+ tp->aperegs = NULL;
+ }
+
+err_out_iounmap:
+ if (tp->regs) {
+ iounmap(tp->regs);
+ tp->regs = NULL;
+ }
+
+err_out_free_dev:
+ free_netdev(dev);
+
+err_out_power_down:
+ pci_set_power_state(pdev, PCI_D3hot);
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_out_disable_pdev:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void __devexit tg3_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (tp->fw)
+ release_firmware(tp->fw);
+
+ cancel_work_sync(&tp->reset_task);
+
+ if (!tg3_flag(tp, USE_PHYLIB)) {
+ tg3_phy_fini(tp);
+ tg3_mdio_fini(tp);
+ }
+
+ unregister_netdev(dev);
+ if (tp->aperegs) {
+ iounmap(tp->aperegs);
+ tp->aperegs = NULL;
+ }
+ if (tp->regs) {
+ iounmap(tp->regs);
+ tp->regs = NULL;
+ }
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tg3_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(dev);
+ int err;
+
+ if (!netif_running(dev))
+ return 0;
+
+ flush_work_sync(&tp->reset_task);
+ tg3_phy_stop(tp);
+ tg3_netif_stop(tp);
+
+ del_timer_sync(&tp->timer);
+
+ tg3_full_lock(tp, 1);
+ tg3_disable_ints(tp);
+ tg3_full_unlock(tp);
+
+ netif_device_detach(dev);
+
+ tg3_full_lock(tp, 0);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ tg3_flag_clear(tp, INIT_COMPLETE);
+ tg3_full_unlock(tp);
+
+ err = tg3_power_down_prepare(tp);
+ if (err) {
+ int err2;
+
+ tg3_full_lock(tp, 0);
+
+ tg3_flag_set(tp, INIT_COMPLETE);
+ err2 = tg3_restart_hw(tp, 1);
+ if (err2)
+ goto out;
+
+ tp->timer.expires = jiffies + tp->timer_offset;
+ add_timer(&tp->timer);
+
+ netif_device_attach(dev);
+ tg3_netif_start(tp);
+
+out:
+ tg3_full_unlock(tp);
+
+ if (!err2)
+ tg3_phy_start(tp);
+ }
+
+ return err;
+}
+
+static int tg3_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(dev);
+ int err;
+
+ if (!netif_running(dev))
+ return 0;
+
+ netif_device_attach(dev);
+
+ tg3_full_lock(tp, 0);
+
+ tg3_flag_set(tp, INIT_COMPLETE);
+ err = tg3_restart_hw(tp, 1);
+ if (err)
+ goto out;
+
+ tp->timer.expires = jiffies + tp->timer_offset;
+ add_timer(&tp->timer);
+
+ tg3_netif_start(tp);
+
+out:
+ tg3_full_unlock(tp);
+
+ if (!err)
+ tg3_phy_start(tp);
+
+ return err;
+}
+
+static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
+#define TG3_PM_OPS (&tg3_pm_ops)
+
+#else
+
+#define TG3_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
+/**
+ * tg3_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(netdev);
+ pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
+
+ netdev_info(netdev, "PCI I/O error detected\n");
+
+ rtnl_lock();
+
+ if (!netif_running(netdev))
+ goto done;
+
+ tg3_phy_stop(tp);
+
+ tg3_netif_stop(tp);
+
+ del_timer_sync(&tp->timer);
+ tg3_flag_clear(tp, RESTART_TIMER);
+
+ /* Want to make sure that the reset task doesn't run */
+ cancel_work_sync(&tp->reset_task);
+ tg3_flag_clear(tp, TX_RECOVERY_PENDING);
+ tg3_flag_clear(tp, RESTART_TIMER);
+
+ netif_device_detach(netdev);
+
+ /* Clean up software state, even if MMIO is blocked */
+ tg3_full_lock(tp, 0);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
+ tg3_full_unlock(tp);
+
+done:
+ if (state == pci_channel_io_perm_failure)
+ err = PCI_ERS_RESULT_DISCONNECT;
+ else
+ pci_disable_device(pdev);
+
+ rtnl_unlock();
+
+ return err;
+}
+
+/**
+ * tg3_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ * At this point, the card has exprienced a hard reset,
+ * followed by fixups by BIOS, and has its config space
+ * set up identically to what it was at cold boot.
+ */
+static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(netdev);
+ pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+ int err;
+
+ rtnl_lock();
+
+ if (pci_enable_device(pdev)) {
+ netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
+ goto done;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ if (!netif_running(netdev)) {
+ rc = PCI_ERS_RESULT_RECOVERED;
+ goto done;
+ }
+
+ err = tg3_power_up(tp);
+ if (err)
+ goto done;
+
+ rc = PCI_ERS_RESULT_RECOVERED;
+
+done:
+ rtnl_unlock();
+
+ return rc;
+}
+
+/**
+ * tg3_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells
+ * us that its OK to resume normal operation.
+ */
+static void tg3_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(netdev);
+ int err;
+
+ rtnl_lock();
+
+ if (!netif_running(netdev))
+ goto done;
+
+ tg3_full_lock(tp, 0);
+ tg3_flag_set(tp, INIT_COMPLETE);
+ err = tg3_restart_hw(tp, 1);
+ tg3_full_unlock(tp);
+ if (err) {
+ netdev_err(netdev, "Cannot restart hardware after reset.\n");
+ goto done;
+ }
+
+ netif_device_attach(netdev);
+
+ tp->timer.expires = jiffies + tp->timer_offset;
+ add_timer(&tp->timer);
+
+ tg3_netif_start(tp);
+
+ tg3_phy_start(tp);
+
+done:
+ rtnl_unlock();
+}
+
+static struct pci_error_handlers tg3_err_handler = {
+ .error_detected = tg3_io_error_detected,
+ .slot_reset = tg3_io_slot_reset,
+ .resume = tg3_io_resume
+};
+
+static struct pci_driver tg3_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = tg3_pci_tbl,
+ .probe = tg3_init_one,
+ .remove = __devexit_p(tg3_remove_one),
+ .err_handler = &tg3_err_handler,
+ .driver.pm = TG3_PM_OPS,
+};
+
+static int __init tg3_init(void)
+{
+ return pci_register_driver(&tg3_driver);
+}
+
+static void __exit tg3_cleanup(void)
+{
+ pci_unregister_driver(&tg3_driver);
+}
+
+module_init(tg3_init);
+module_exit(tg3_cleanup);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
new file mode 100644
index 000000000000..d2976f39b2fc
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -0,0 +1,3186 @@
+/* $Id: tg3.h,v 1.37.2.32 2002/03/11 12:18:18 davem Exp $
+ * tg3.h: Definitions for Broadcom Tigon3 ethernet driver.
+ *
+ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
+ * Copyright (C) 2004 Sun Microsystems Inc.
+ * Copyright (C) 2007-2011 Broadcom Corporation.
+ */
+
+#ifndef _T3_H
+#define _T3_H
+
+#define TG3_64BIT_REG_HIGH 0x00UL
+#define TG3_64BIT_REG_LOW 0x04UL
+
+/* Descriptor block info. */
+#define TG3_BDINFO_HOST_ADDR 0x0UL /* 64-bit */
+#define TG3_BDINFO_MAXLEN_FLAGS 0x8UL /* 32-bit */
+#define BDINFO_FLAGS_USE_EXT_RECV 0x00000001 /* ext rx_buffer_desc */
+#define BDINFO_FLAGS_DISABLED 0x00000002
+#define BDINFO_FLAGS_MAXLEN_MASK 0xffff0000
+#define BDINFO_FLAGS_MAXLEN_SHIFT 16
+#define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */
+#define TG3_BDINFO_SIZE 0x10UL
+
+#define TG3_RX_STD_MAX_SIZE_5700 512
+#define TG3_RX_STD_MAX_SIZE_5717 2048
+#define TG3_RX_JMB_MAX_SIZE_5700 256
+#define TG3_RX_JMB_MAX_SIZE_5717 1024
+#define TG3_RX_RET_MAX_SIZE_5700 1024
+#define TG3_RX_RET_MAX_SIZE_5705 512
+#define TG3_RX_RET_MAX_SIZE_5717 4096
+
+/* First 256 bytes are a mirror of PCI config space. */
+#define TG3PCI_VENDOR 0x00000000
+#define TG3PCI_VENDOR_BROADCOM 0x14e4
+#define TG3PCI_DEVICE 0x00000002
+#define TG3PCI_DEVICE_TIGON3_1 0x1644 /* BCM5700 */
+#define TG3PCI_DEVICE_TIGON3_2 0x1645 /* BCM5701 */
+#define TG3PCI_DEVICE_TIGON3_3 0x1646 /* BCM5702 */
+#define TG3PCI_DEVICE_TIGON3_4 0x1647 /* BCM5703 */
+#define TG3PCI_DEVICE_TIGON3_5761S 0x1688
+#define TG3PCI_DEVICE_TIGON3_5761SE 0x1689
+#define TG3PCI_DEVICE_TIGON3_57780 0x1692
+#define TG3PCI_DEVICE_TIGON3_57760 0x1690
+#define TG3PCI_DEVICE_TIGON3_57790 0x1694
+#define TG3PCI_DEVICE_TIGON3_57788 0x1691
+#define TG3PCI_DEVICE_TIGON3_5785_G 0x1699 /* GPHY */
+#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */
+#define TG3PCI_DEVICE_TIGON3_5717 0x1655
+#define TG3PCI_DEVICE_TIGON3_5718 0x1656
+#define TG3PCI_DEVICE_TIGON3_57781 0x16b1
+#define TG3PCI_DEVICE_TIGON3_57785 0x16b5
+#define TG3PCI_DEVICE_TIGON3_57761 0x16b0
+#define TG3PCI_DEVICE_TIGON3_57765 0x16b4
+#define TG3PCI_DEVICE_TIGON3_57791 0x16b2
+#define TG3PCI_DEVICE_TIGON3_57795 0x16b6
+#define TG3PCI_DEVICE_TIGON3_5719 0x1657
+#define TG3PCI_DEVICE_TIGON3_5720 0x165f
+/* 0x04 --> 0x2c unused */
+#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5 0x0001
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6 0x0002
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9 0x0003
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1 0x0005
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8 0x0006
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7 0x0007
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10 0x0008
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12 0x8008
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1 0x0009
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2 0x8009
+#define TG3PCI_SUBVENDOR_ID_3COM PCI_VENDOR_ID_3COM
+#define TG3PCI_SUBDEVICE_ID_3COM_3C996T 0x1000
+#define TG3PCI_SUBDEVICE_ID_3COM_3C996BT 0x1006
+#define TG3PCI_SUBDEVICE_ID_3COM_3C996SX 0x1004
+#define TG3PCI_SUBDEVICE_ID_3COM_3C1000T 0x1007
+#define TG3PCI_SUBDEVICE_ID_3COM_3C940BR01 0x1008
+#define TG3PCI_SUBVENDOR_ID_DELL PCI_VENDOR_ID_DELL
+#define TG3PCI_SUBDEVICE_ID_DELL_VIPER 0x00d1
+#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106
+#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109
+#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a
+#define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING 0x007d
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780 0x0085
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2 0x0099
+#define TG3PCI_SUBVENDOR_ID_IBM PCI_VENDOR_ID_IBM
+#define TG3PCI_SUBDEVICE_ID_IBM_5703SAX2 0x0281
+/* 0x30 --> 0x64 unused */
+#define TG3PCI_MSI_DATA 0x00000064
+/* 0x66 --> 0x68 unused */
+#define TG3PCI_MISC_HOST_CTRL 0x00000068
+#define MISC_HOST_CTRL_CLEAR_INT 0x00000001
+#define MISC_HOST_CTRL_MASK_PCI_INT 0x00000002
+#define MISC_HOST_CTRL_BYTE_SWAP 0x00000004
+#define MISC_HOST_CTRL_WORD_SWAP 0x00000008
+#define MISC_HOST_CTRL_PCISTATE_RW 0x00000010
+#define MISC_HOST_CTRL_CLKREG_RW 0x00000020
+#define MISC_HOST_CTRL_REGWORD_SWAP 0x00000040
+#define MISC_HOST_CTRL_INDIR_ACCESS 0x00000080
+#define MISC_HOST_CTRL_IRQ_MASK_MODE 0x00000100
+#define MISC_HOST_CTRL_TAGGED_STATUS 0x00000200
+#define MISC_HOST_CTRL_CHIPREV 0xffff0000
+#define MISC_HOST_CTRL_CHIPREV_SHIFT 16
+#define GET_CHIP_REV_ID(MISC_HOST_CTRL) \
+ (((MISC_HOST_CTRL) & MISC_HOST_CTRL_CHIPREV) >> \
+ MISC_HOST_CTRL_CHIPREV_SHIFT)
+#define CHIPREV_ID_5700_A0 0x7000
+#define CHIPREV_ID_5700_A1 0x7001
+#define CHIPREV_ID_5700_B0 0x7100
+#define CHIPREV_ID_5700_B1 0x7101
+#define CHIPREV_ID_5700_B3 0x7102
+#define CHIPREV_ID_5700_ALTIMA 0x7104
+#define CHIPREV_ID_5700_C0 0x7200
+#define CHIPREV_ID_5701_A0 0x0000
+#define CHIPREV_ID_5701_B0 0x0100
+#define CHIPREV_ID_5701_B2 0x0102
+#define CHIPREV_ID_5701_B5 0x0105
+#define CHIPREV_ID_5703_A0 0x1000
+#define CHIPREV_ID_5703_A1 0x1001
+#define CHIPREV_ID_5703_A2 0x1002
+#define CHIPREV_ID_5703_A3 0x1003
+#define CHIPREV_ID_5704_A0 0x2000
+#define CHIPREV_ID_5704_A1 0x2001
+#define CHIPREV_ID_5704_A2 0x2002
+#define CHIPREV_ID_5704_A3 0x2003
+#define CHIPREV_ID_5705_A0 0x3000
+#define CHIPREV_ID_5705_A1 0x3001
+#define CHIPREV_ID_5705_A2 0x3002
+#define CHIPREV_ID_5705_A3 0x3003
+#define CHIPREV_ID_5750_A0 0x4000
+#define CHIPREV_ID_5750_A1 0x4001
+#define CHIPREV_ID_5750_A3 0x4003
+#define CHIPREV_ID_5750_C2 0x4202
+#define CHIPREV_ID_5752_A0_HW 0x5000
+#define CHIPREV_ID_5752_A0 0x6000
+#define CHIPREV_ID_5752_A1 0x6001
+#define CHIPREV_ID_5714_A2 0x9002
+#define CHIPREV_ID_5906_A1 0xc001
+#define CHIPREV_ID_57780_A0 0x57780000
+#define CHIPREV_ID_57780_A1 0x57780001
+#define CHIPREV_ID_5717_A0 0x05717000
+#define CHIPREV_ID_57765_A0 0x57785000
+#define CHIPREV_ID_5719_A0 0x05719000
+#define CHIPREV_ID_5720_A0 0x05720000
+#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
+#define ASIC_REV_5700 0x07
+#define ASIC_REV_5701 0x00
+#define ASIC_REV_5703 0x01
+#define ASIC_REV_5704 0x02
+#define ASIC_REV_5705 0x03
+#define ASIC_REV_5750 0x04
+#define ASIC_REV_5752 0x06
+#define ASIC_REV_5780 0x08
+#define ASIC_REV_5714 0x09
+#define ASIC_REV_5755 0x0a
+#define ASIC_REV_5787 0x0b
+#define ASIC_REV_5906 0x0c
+#define ASIC_REV_USE_PROD_ID_REG 0x0f
+#define ASIC_REV_5784 0x5784
+#define ASIC_REV_5761 0x5761
+#define ASIC_REV_5785 0x5785
+#define ASIC_REV_57780 0x57780
+#define ASIC_REV_5717 0x5717
+#define ASIC_REV_57765 0x57785
+#define ASIC_REV_5719 0x5719
+#define ASIC_REV_5720 0x5720
+#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
+#define CHIPREV_5700_AX 0x70
+#define CHIPREV_5700_BX 0x71
+#define CHIPREV_5700_CX 0x72
+#define CHIPREV_5701_AX 0x00
+#define CHIPREV_5703_AX 0x10
+#define CHIPREV_5704_AX 0x20
+#define CHIPREV_5704_BX 0x21
+#define CHIPREV_5750_AX 0x40
+#define CHIPREV_5750_BX 0x41
+#define CHIPREV_5784_AX 0x57840
+#define CHIPREV_5761_AX 0x57610
+#define CHIPREV_57765_AX 0x577650
+#define GET_METAL_REV(CHIP_REV_ID) ((CHIP_REV_ID) & 0xff)
+#define METAL_REV_A0 0x00
+#define METAL_REV_A1 0x01
+#define METAL_REV_B0 0x00
+#define METAL_REV_B1 0x01
+#define METAL_REV_B2 0x02
+#define TG3PCI_DMA_RW_CTRL 0x0000006c
+#define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001
+#define DMA_RWCTRL_TAGGED_STAT_WA 0x00000080
+#define DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK 0x00000380
+#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700
+#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000
+#define DMA_RWCTRL_READ_BNDRY_16 0x00000100
+#define DMA_RWCTRL_READ_BNDRY_128_PCIX 0x00000100
+#define DMA_RWCTRL_READ_BNDRY_32 0x00000200
+#define DMA_RWCTRL_READ_BNDRY_256_PCIX 0x00000200
+#define DMA_RWCTRL_READ_BNDRY_64 0x00000300
+#define DMA_RWCTRL_READ_BNDRY_384_PCIX 0x00000300
+#define DMA_RWCTRL_READ_BNDRY_128 0x00000400
+#define DMA_RWCTRL_READ_BNDRY_256 0x00000500
+#define DMA_RWCTRL_READ_BNDRY_512 0x00000600
+#define DMA_RWCTRL_READ_BNDRY_1024 0x00000700
+#define DMA_RWCTRL_WRITE_BNDRY_MASK 0x00003800
+#define DMA_RWCTRL_WRITE_BNDRY_DISAB 0x00000000
+#define DMA_RWCTRL_WRITE_BNDRY_16 0x00000800
+#define DMA_RWCTRL_WRITE_BNDRY_128_PCIX 0x00000800
+#define DMA_RWCTRL_WRITE_BNDRY_32 0x00001000
+#define DMA_RWCTRL_WRITE_BNDRY_256_PCIX 0x00001000
+#define DMA_RWCTRL_WRITE_BNDRY_64 0x00001800
+#define DMA_RWCTRL_WRITE_BNDRY_384_PCIX 0x00001800
+#define DMA_RWCTRL_WRITE_BNDRY_128 0x00002000
+#define DMA_RWCTRL_WRITE_BNDRY_256 0x00002800
+#define DMA_RWCTRL_WRITE_BNDRY_512 0x00003000
+#define DMA_RWCTRL_WRITE_BNDRY_1024 0x00003800
+#define DMA_RWCTRL_ONE_DMA 0x00004000
+#define DMA_RWCTRL_READ_WATER 0x00070000
+#define DMA_RWCTRL_READ_WATER_SHIFT 16
+#define DMA_RWCTRL_WRITE_WATER 0x00380000
+#define DMA_RWCTRL_WRITE_WATER_SHIFT 19
+#define DMA_RWCTRL_USE_MEM_READ_MULT 0x00400000
+#define DMA_RWCTRL_ASSERT_ALL_BE 0x00800000
+#define DMA_RWCTRL_PCI_READ_CMD 0x0f000000
+#define DMA_RWCTRL_PCI_READ_CMD_SHIFT 24
+#define DMA_RWCTRL_PCI_WRITE_CMD 0xf0000000
+#define DMA_RWCTRL_PCI_WRITE_CMD_SHIFT 28
+#define DMA_RWCTRL_WRITE_BNDRY_64_PCIE 0x10000000
+#define DMA_RWCTRL_WRITE_BNDRY_128_PCIE 0x30000000
+#define DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE 0x70000000
+#define TG3PCI_PCISTATE 0x00000070
+#define PCISTATE_FORCE_RESET 0x00000001
+#define PCISTATE_INT_NOT_ACTIVE 0x00000002
+#define PCISTATE_CONV_PCI_MODE 0x00000004
+#define PCISTATE_BUS_SPEED_HIGH 0x00000008
+#define PCISTATE_BUS_32BIT 0x00000010
+#define PCISTATE_ROM_ENABLE 0x00000020
+#define PCISTATE_ROM_RETRY_ENABLE 0x00000040
+#define PCISTATE_FLAT_VIEW 0x00000100
+#define PCISTATE_RETRY_SAME_DMA 0x00002000
+#define PCISTATE_ALLOW_APE_CTLSPC_WR 0x00010000
+#define PCISTATE_ALLOW_APE_SHMEM_WR 0x00020000
+#define PCISTATE_ALLOW_APE_PSPACE_WR 0x00040000
+#define TG3PCI_CLOCK_CTRL 0x00000074
+#define CLOCK_CTRL_CORECLK_DISABLE 0x00000200
+#define CLOCK_CTRL_RXCLK_DISABLE 0x00000400
+#define CLOCK_CTRL_TXCLK_DISABLE 0x00000800
+#define CLOCK_CTRL_ALTCLK 0x00001000
+#define CLOCK_CTRL_PWRDOWN_PLL133 0x00008000
+#define CLOCK_CTRL_44MHZ_CORE 0x00040000
+#define CLOCK_CTRL_625_CORE 0x00100000
+#define CLOCK_CTRL_FORCE_CLKRUN 0x00200000
+#define CLOCK_CTRL_CLKRUN_OENABLE 0x00400000
+#define CLOCK_CTRL_DELAY_PCI_GRANT 0x80000000
+#define TG3PCI_REG_BASE_ADDR 0x00000078
+#define TG3PCI_MEM_WIN_BASE_ADDR 0x0000007c
+#define TG3PCI_REG_DATA 0x00000080
+#define TG3PCI_MEM_WIN_DATA 0x00000084
+#define TG3PCI_MISC_LOCAL_CTRL 0x00000090
+/* 0x94 --> 0x98 unused */
+#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */
+#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */
+/* 0xa8 --> 0xb8 unused */
+#define TG3PCI_DUAL_MAC_CTRL 0x000000b8
+#define DUAL_MAC_CTRL_CH_MASK 0x00000003
+#define DUAL_MAC_CTRL_ID 0x00000004
+#define TG3PCI_PRODID_ASICREV 0x000000bc
+#define PROD_ID_ASIC_REV_MASK 0x0fffffff
+/* 0xc0 --> 0xf4 unused */
+
+#define TG3PCI_GEN2_PRODID_ASICREV 0x000000f4
+#define TG3PCI_GEN15_PRODID_ASICREV 0x000000fc
+/* 0xf8 --> 0x200 unused */
+
+#define TG3_CORR_ERR_STAT 0x00000110
+#define TG3_CORR_ERR_STAT_CLEAR 0xffffffff
+/* 0x114 --> 0x200 unused */
+
+/* Mailbox registers */
+#define MAILBOX_INTERRUPT_0 0x00000200 /* 64-bit */
+#define MAILBOX_INTERRUPT_1 0x00000208 /* 64-bit */
+#define MAILBOX_INTERRUPT_2 0x00000210 /* 64-bit */
+#define MAILBOX_INTERRUPT_3 0x00000218 /* 64-bit */
+#define MAILBOX_GENERAL_0 0x00000220 /* 64-bit */
+#define MAILBOX_GENERAL_1 0x00000228 /* 64-bit */
+#define MAILBOX_GENERAL_2 0x00000230 /* 64-bit */
+#define MAILBOX_GENERAL_3 0x00000238 /* 64-bit */
+#define MAILBOX_GENERAL_4 0x00000240 /* 64-bit */
+#define MAILBOX_GENERAL_5 0x00000248 /* 64-bit */
+#define MAILBOX_GENERAL_6 0x00000250 /* 64-bit */
+#define MAILBOX_GENERAL_7 0x00000258 /* 64-bit */
+#define MAILBOX_RELOAD_STAT 0x00000260 /* 64-bit */
+#define MAILBOX_RCV_STD_PROD_IDX 0x00000268 /* 64-bit */
+#define TG3_RX_STD_PROD_IDX_REG (MAILBOX_RCV_STD_PROD_IDX + \
+ TG3_64BIT_REG_LOW)
+#define MAILBOX_RCV_JUMBO_PROD_IDX 0x00000270 /* 64-bit */
+#define TG3_RX_JMB_PROD_IDX_REG (MAILBOX_RCV_JUMBO_PROD_IDX + \
+ TG3_64BIT_REG_LOW)
+#define MAILBOX_RCV_MINI_PROD_IDX 0x00000278 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_0 0x00000280 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_1 0x00000288 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_2 0x00000290 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_3 0x00000298 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_4 0x000002a0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_5 0x000002a8 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_6 0x000002b0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_7 0x000002b8 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_8 0x000002c0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_9 0x000002c8 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_10 0x000002d0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_11 0x000002d8 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_12 0x000002e0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_13 0x000002e8 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_14 0x000002f0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_15 0x000002f8 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_0 0x00000300 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_1 0x00000308 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_2 0x00000310 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_3 0x00000318 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_4 0x00000320 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_5 0x00000328 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_6 0x00000330 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_7 0x00000338 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_8 0x00000340 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_9 0x00000348 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_10 0x00000350 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_11 0x00000358 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_12 0x00000360 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_13 0x00000368 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_14 0x00000370 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_15 0x00000378 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_0 0x00000380 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_1 0x00000388 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_2 0x00000390 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_3 0x00000398 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_4 0x000003a0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_5 0x000003a8 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_6 0x000003b0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_7 0x000003b8 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_8 0x000003c0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_9 0x000003c8 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_10 0x000003d0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_11 0x000003d8 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_12 0x000003e0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_13 0x000003e8 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_14 0x000003f0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_15 0x000003f8 /* 64-bit */
+
+/* MAC control registers */
+#define MAC_MODE 0x00000400
+#define MAC_MODE_RESET 0x00000001
+#define MAC_MODE_HALF_DUPLEX 0x00000002
+#define MAC_MODE_PORT_MODE_MASK 0x0000000c
+#define MAC_MODE_PORT_MODE_TBI 0x0000000c
+#define MAC_MODE_PORT_MODE_GMII 0x00000008
+#define MAC_MODE_PORT_MODE_MII 0x00000004
+#define MAC_MODE_PORT_MODE_NONE 0x00000000
+#define MAC_MODE_PORT_INT_LPBACK 0x00000010
+#define MAC_MODE_TAGGED_MAC_CTRL 0x00000080
+#define MAC_MODE_TX_BURSTING 0x00000100
+#define MAC_MODE_MAX_DEFER 0x00000200
+#define MAC_MODE_LINK_POLARITY 0x00000400
+#define MAC_MODE_RXSTAT_ENABLE 0x00000800
+#define MAC_MODE_RXSTAT_CLEAR 0x00001000
+#define MAC_MODE_RXSTAT_FLUSH 0x00002000
+#define MAC_MODE_TXSTAT_ENABLE 0x00004000
+#define MAC_MODE_TXSTAT_CLEAR 0x00008000
+#define MAC_MODE_TXSTAT_FLUSH 0x00010000
+#define MAC_MODE_SEND_CONFIGS 0x00020000
+#define MAC_MODE_MAGIC_PKT_ENABLE 0x00040000
+#define MAC_MODE_ACPI_ENABLE 0x00080000
+#define MAC_MODE_MIP_ENABLE 0x00100000
+#define MAC_MODE_TDE_ENABLE 0x00200000
+#define MAC_MODE_RDE_ENABLE 0x00400000
+#define MAC_MODE_FHDE_ENABLE 0x00800000
+#define MAC_MODE_KEEP_FRAME_IN_WOL 0x01000000
+#define MAC_MODE_APE_RX_EN 0x08000000
+#define MAC_MODE_APE_TX_EN 0x10000000
+#define MAC_STATUS 0x00000404
+#define MAC_STATUS_PCS_SYNCED 0x00000001
+#define MAC_STATUS_SIGNAL_DET 0x00000002
+#define MAC_STATUS_RCVD_CFG 0x00000004
+#define MAC_STATUS_CFG_CHANGED 0x00000008
+#define MAC_STATUS_SYNC_CHANGED 0x00000010
+#define MAC_STATUS_PORT_DEC_ERR 0x00000400
+#define MAC_STATUS_LNKSTATE_CHANGED 0x00001000
+#define MAC_STATUS_MI_COMPLETION 0x00400000
+#define MAC_STATUS_MI_INTERRUPT 0x00800000
+#define MAC_STATUS_AP_ERROR 0x01000000
+#define MAC_STATUS_ODI_ERROR 0x02000000
+#define MAC_STATUS_RXSTAT_OVERRUN 0x04000000
+#define MAC_STATUS_TXSTAT_OVERRUN 0x08000000
+#define MAC_EVENT 0x00000408
+#define MAC_EVENT_PORT_DECODE_ERR 0x00000400
+#define MAC_EVENT_LNKSTATE_CHANGED 0x00001000
+#define MAC_EVENT_MI_COMPLETION 0x00400000
+#define MAC_EVENT_MI_INTERRUPT 0x00800000
+#define MAC_EVENT_AP_ERROR 0x01000000
+#define MAC_EVENT_ODI_ERROR 0x02000000
+#define MAC_EVENT_RXSTAT_OVERRUN 0x04000000
+#define MAC_EVENT_TXSTAT_OVERRUN 0x08000000
+#define MAC_LED_CTRL 0x0000040c
+#define LED_CTRL_LNKLED_OVERRIDE 0x00000001
+#define LED_CTRL_1000MBPS_ON 0x00000002
+#define LED_CTRL_100MBPS_ON 0x00000004
+#define LED_CTRL_10MBPS_ON 0x00000008
+#define LED_CTRL_TRAFFIC_OVERRIDE 0x00000010
+#define LED_CTRL_TRAFFIC_BLINK 0x00000020
+#define LED_CTRL_TRAFFIC_LED 0x00000040
+#define LED_CTRL_1000MBPS_STATUS 0x00000080
+#define LED_CTRL_100MBPS_STATUS 0x00000100
+#define LED_CTRL_10MBPS_STATUS 0x00000200
+#define LED_CTRL_TRAFFIC_STATUS 0x00000400
+#define LED_CTRL_MODE_MAC 0x00000000
+#define LED_CTRL_MODE_PHY_1 0x00000800
+#define LED_CTRL_MODE_PHY_2 0x00001000
+#define LED_CTRL_MODE_SHASTA_MAC 0x00002000
+#define LED_CTRL_MODE_SHARED 0x00004000
+#define LED_CTRL_MODE_COMBO 0x00008000
+#define LED_CTRL_BLINK_RATE_MASK 0x7ff80000
+#define LED_CTRL_BLINK_RATE_SHIFT 19
+#define LED_CTRL_BLINK_PER_OVERRIDE 0x00080000
+#define LED_CTRL_BLINK_RATE_OVERRIDE 0x80000000
+#define MAC_ADDR_0_HIGH 0x00000410 /* upper 2 bytes */
+#define MAC_ADDR_0_LOW 0x00000414 /* lower 4 bytes */
+#define MAC_ADDR_1_HIGH 0x00000418 /* upper 2 bytes */
+#define MAC_ADDR_1_LOW 0x0000041c /* lower 4 bytes */
+#define MAC_ADDR_2_HIGH 0x00000420 /* upper 2 bytes */
+#define MAC_ADDR_2_LOW 0x00000424 /* lower 4 bytes */
+#define MAC_ADDR_3_HIGH 0x00000428 /* upper 2 bytes */
+#define MAC_ADDR_3_LOW 0x0000042c /* lower 4 bytes */
+#define MAC_ACPI_MBUF_PTR 0x00000430
+#define MAC_ACPI_LEN_OFFSET 0x00000434
+#define ACPI_LENOFF_LEN_MASK 0x0000ffff
+#define ACPI_LENOFF_LEN_SHIFT 0
+#define ACPI_LENOFF_OFF_MASK 0x0fff0000
+#define ACPI_LENOFF_OFF_SHIFT 16
+#define MAC_TX_BACKOFF_SEED 0x00000438
+#define TX_BACKOFF_SEED_MASK 0x000003ff
+#define MAC_RX_MTU_SIZE 0x0000043c
+#define RX_MTU_SIZE_MASK 0x0000ffff
+#define MAC_PCS_TEST 0x00000440
+#define PCS_TEST_PATTERN_MASK 0x000fffff
+#define PCS_TEST_PATTERN_SHIFT 0
+#define PCS_TEST_ENABLE 0x00100000
+#define MAC_TX_AUTO_NEG 0x00000444
+#define TX_AUTO_NEG_MASK 0x0000ffff
+#define TX_AUTO_NEG_SHIFT 0
+#define MAC_RX_AUTO_NEG 0x00000448
+#define RX_AUTO_NEG_MASK 0x0000ffff
+#define RX_AUTO_NEG_SHIFT 0
+#define MAC_MI_COM 0x0000044c
+#define MI_COM_CMD_MASK 0x0c000000
+#define MI_COM_CMD_WRITE 0x04000000
+#define MI_COM_CMD_READ 0x08000000
+#define MI_COM_READ_FAILED 0x10000000
+#define MI_COM_START 0x20000000
+#define MI_COM_BUSY 0x20000000
+#define MI_COM_PHY_ADDR_MASK 0x03e00000
+#define MI_COM_PHY_ADDR_SHIFT 21
+#define MI_COM_REG_ADDR_MASK 0x001f0000
+#define MI_COM_REG_ADDR_SHIFT 16
+#define MI_COM_DATA_MASK 0x0000ffff
+#define MAC_MI_STAT 0x00000450
+#define MAC_MI_STAT_LNKSTAT_ATTN_ENAB 0x00000001
+#define MAC_MI_STAT_10MBPS_MODE 0x00000002
+#define MAC_MI_MODE 0x00000454
+#define MAC_MI_MODE_CLK_10MHZ 0x00000001
+#define MAC_MI_MODE_SHORT_PREAMBLE 0x00000002
+#define MAC_MI_MODE_AUTO_POLL 0x00000010
+#define MAC_MI_MODE_500KHZ_CONST 0x00008000
+#define MAC_MI_MODE_BASE 0x000c0000 /* XXX magic values XXX */
+#define MAC_AUTO_POLL_STATUS 0x00000458
+#define MAC_AUTO_POLL_ERROR 0x00000001
+#define MAC_TX_MODE 0x0000045c
+#define TX_MODE_RESET 0x00000001
+#define TX_MODE_ENABLE 0x00000002
+#define TX_MODE_FLOW_CTRL_ENABLE 0x00000010
+#define TX_MODE_BIG_BCKOFF_ENABLE 0x00000020
+#define TX_MODE_LONG_PAUSE_ENABLE 0x00000040
+#define TX_MODE_MBUF_LOCKUP_FIX 0x00000100
+#define TX_MODE_JMB_FRM_LEN 0x00400000
+#define TX_MODE_CNT_DN_MODE 0x00800000
+#define MAC_TX_STATUS 0x00000460
+#define TX_STATUS_XOFFED 0x00000001
+#define TX_STATUS_SENT_XOFF 0x00000002
+#define TX_STATUS_SENT_XON 0x00000004
+#define TX_STATUS_LINK_UP 0x00000008
+#define TX_STATUS_ODI_UNDERRUN 0x00000010
+#define TX_STATUS_ODI_OVERRUN 0x00000020
+#define MAC_TX_LENGTHS 0x00000464
+#define TX_LENGTHS_SLOT_TIME_MASK 0x000000ff
+#define TX_LENGTHS_SLOT_TIME_SHIFT 0
+#define TX_LENGTHS_IPG_MASK 0x00000f00
+#define TX_LENGTHS_IPG_SHIFT 8
+#define TX_LENGTHS_IPG_CRS_MASK 0x00003000
+#define TX_LENGTHS_IPG_CRS_SHIFT 12
+#define TX_LENGTHS_JMB_FRM_LEN_MSK 0x00ff0000
+#define TX_LENGTHS_CNT_DWN_VAL_MSK 0xff000000
+#define MAC_RX_MODE 0x00000468
+#define RX_MODE_RESET 0x00000001
+#define RX_MODE_ENABLE 0x00000002
+#define RX_MODE_FLOW_CTRL_ENABLE 0x00000004
+#define RX_MODE_KEEP_MAC_CTRL 0x00000008
+#define RX_MODE_KEEP_PAUSE 0x00000010
+#define RX_MODE_ACCEPT_OVERSIZED 0x00000020
+#define RX_MODE_ACCEPT_RUNTS 0x00000040
+#define RX_MODE_LEN_CHECK 0x00000080
+#define RX_MODE_PROMISC 0x00000100
+#define RX_MODE_NO_CRC_CHECK 0x00000200
+#define RX_MODE_KEEP_VLAN_TAG 0x00000400
+#define RX_MODE_RSS_IPV4_HASH_EN 0x00010000
+#define RX_MODE_RSS_TCP_IPV4_HASH_EN 0x00020000
+#define RX_MODE_RSS_IPV6_HASH_EN 0x00040000
+#define RX_MODE_RSS_TCP_IPV6_HASH_EN 0x00080000
+#define RX_MODE_RSS_ITBL_HASH_BITS_7 0x00700000
+#define RX_MODE_RSS_ENABLE 0x00800000
+#define RX_MODE_IPV6_CSUM_ENABLE 0x01000000
+#define MAC_RX_STATUS 0x0000046c
+#define RX_STATUS_REMOTE_TX_XOFFED 0x00000001
+#define RX_STATUS_XOFF_RCVD 0x00000002
+#define RX_STATUS_XON_RCVD 0x00000004
+#define MAC_HASH_REG_0 0x00000470
+#define MAC_HASH_REG_1 0x00000474
+#define MAC_HASH_REG_2 0x00000478
+#define MAC_HASH_REG_3 0x0000047c
+#define MAC_RCV_RULE_0 0x00000480
+#define MAC_RCV_VALUE_0 0x00000484
+#define MAC_RCV_RULE_1 0x00000488
+#define MAC_RCV_VALUE_1 0x0000048c
+#define MAC_RCV_RULE_2 0x00000490
+#define MAC_RCV_VALUE_2 0x00000494
+#define MAC_RCV_RULE_3 0x00000498
+#define MAC_RCV_VALUE_3 0x0000049c
+#define MAC_RCV_RULE_4 0x000004a0
+#define MAC_RCV_VALUE_4 0x000004a4
+#define MAC_RCV_RULE_5 0x000004a8
+#define MAC_RCV_VALUE_5 0x000004ac
+#define MAC_RCV_RULE_6 0x000004b0
+#define MAC_RCV_VALUE_6 0x000004b4
+#define MAC_RCV_RULE_7 0x000004b8
+#define MAC_RCV_VALUE_7 0x000004bc
+#define MAC_RCV_RULE_8 0x000004c0
+#define MAC_RCV_VALUE_8 0x000004c4
+#define MAC_RCV_RULE_9 0x000004c8
+#define MAC_RCV_VALUE_9 0x000004cc
+#define MAC_RCV_RULE_10 0x000004d0
+#define MAC_RCV_VALUE_10 0x000004d4
+#define MAC_RCV_RULE_11 0x000004d8
+#define MAC_RCV_VALUE_11 0x000004dc
+#define MAC_RCV_RULE_12 0x000004e0
+#define MAC_RCV_VALUE_12 0x000004e4
+#define MAC_RCV_RULE_13 0x000004e8
+#define MAC_RCV_VALUE_13 0x000004ec
+#define MAC_RCV_RULE_14 0x000004f0
+#define MAC_RCV_VALUE_14 0x000004f4
+#define MAC_RCV_RULE_15 0x000004f8
+#define MAC_RCV_VALUE_15 0x000004fc
+#define RCV_RULE_DISABLE_MASK 0x7fffffff
+#define MAC_RCV_RULE_CFG 0x00000500
+#define RCV_RULE_CFG_DEFAULT_CLASS 0x00000008
+#define MAC_LOW_WMARK_MAX_RX_FRAME 0x00000504
+/* 0x508 --> 0x520 unused */
+#define MAC_HASHREGU_0 0x00000520
+#define MAC_HASHREGU_1 0x00000524
+#define MAC_HASHREGU_2 0x00000528
+#define MAC_HASHREGU_3 0x0000052c
+#define MAC_EXTADDR_0_HIGH 0x00000530
+#define MAC_EXTADDR_0_LOW 0x00000534
+#define MAC_EXTADDR_1_HIGH 0x00000538
+#define MAC_EXTADDR_1_LOW 0x0000053c
+#define MAC_EXTADDR_2_HIGH 0x00000540
+#define MAC_EXTADDR_2_LOW 0x00000544
+#define MAC_EXTADDR_3_HIGH 0x00000548
+#define MAC_EXTADDR_3_LOW 0x0000054c
+#define MAC_EXTADDR_4_HIGH 0x00000550
+#define MAC_EXTADDR_4_LOW 0x00000554
+#define MAC_EXTADDR_5_HIGH 0x00000558
+#define MAC_EXTADDR_5_LOW 0x0000055c
+#define MAC_EXTADDR_6_HIGH 0x00000560
+#define MAC_EXTADDR_6_LOW 0x00000564
+#define MAC_EXTADDR_7_HIGH 0x00000568
+#define MAC_EXTADDR_7_LOW 0x0000056c
+#define MAC_EXTADDR_8_HIGH 0x00000570
+#define MAC_EXTADDR_8_LOW 0x00000574
+#define MAC_EXTADDR_9_HIGH 0x00000578
+#define MAC_EXTADDR_9_LOW 0x0000057c
+#define MAC_EXTADDR_10_HIGH 0x00000580
+#define MAC_EXTADDR_10_LOW 0x00000584
+#define MAC_EXTADDR_11_HIGH 0x00000588
+#define MAC_EXTADDR_11_LOW 0x0000058c
+#define MAC_SERDES_CFG 0x00000590
+#define MAC_SERDES_CFG_EDGE_SELECT 0x00001000
+#define MAC_SERDES_STAT 0x00000594
+/* 0x598 --> 0x5a0 unused */
+#define MAC_PHYCFG1 0x000005a0
+#define MAC_PHYCFG1_RGMII_INT 0x00000001
+#define MAC_PHYCFG1_RXCLK_TO_MASK 0x00001ff0
+#define MAC_PHYCFG1_RXCLK_TIMEOUT 0x00001000
+#define MAC_PHYCFG1_TXCLK_TO_MASK 0x01ff0000
+#define MAC_PHYCFG1_TXCLK_TIMEOUT 0x01000000
+#define MAC_PHYCFG1_RGMII_EXT_RX_DEC 0x02000000
+#define MAC_PHYCFG1_RGMII_SND_STAT_EN 0x04000000
+#define MAC_PHYCFG1_TXC_DRV 0x20000000
+#define MAC_PHYCFG2 0x000005a4
+#define MAC_PHYCFG2_INBAND_ENABLE 0x00000001
+#define MAC_PHYCFG2_EMODE_MASK_MASK 0x000001c0
+#define MAC_PHYCFG2_EMODE_MASK_AC131 0x000000c0
+#define MAC_PHYCFG2_EMODE_MASK_50610 0x00000100
+#define MAC_PHYCFG2_EMODE_MASK_RT8211 0x00000000
+#define MAC_PHYCFG2_EMODE_MASK_RT8201 0x000001c0
+#define MAC_PHYCFG2_EMODE_COMP_MASK 0x00000e00
+#define MAC_PHYCFG2_EMODE_COMP_AC131 0x00000600
+#define MAC_PHYCFG2_EMODE_COMP_50610 0x00000400
+#define MAC_PHYCFG2_EMODE_COMP_RT8211 0x00000800
+#define MAC_PHYCFG2_EMODE_COMP_RT8201 0x00000000
+#define MAC_PHYCFG2_FMODE_MASK_MASK 0x00007000
+#define MAC_PHYCFG2_FMODE_MASK_AC131 0x00006000
+#define MAC_PHYCFG2_FMODE_MASK_50610 0x00004000
+#define MAC_PHYCFG2_FMODE_MASK_RT8211 0x00000000
+#define MAC_PHYCFG2_FMODE_MASK_RT8201 0x00007000
+#define MAC_PHYCFG2_FMODE_COMP_MASK 0x00038000
+#define MAC_PHYCFG2_FMODE_COMP_AC131 0x00030000
+#define MAC_PHYCFG2_FMODE_COMP_50610 0x00008000
+#define MAC_PHYCFG2_FMODE_COMP_RT8211 0x00038000
+#define MAC_PHYCFG2_FMODE_COMP_RT8201 0x00000000
+#define MAC_PHYCFG2_GMODE_MASK_MASK 0x001c0000
+#define MAC_PHYCFG2_GMODE_MASK_AC131 0x001c0000
+#define MAC_PHYCFG2_GMODE_MASK_50610 0x00100000
+#define MAC_PHYCFG2_GMODE_MASK_RT8211 0x00000000
+#define MAC_PHYCFG2_GMODE_MASK_RT8201 0x001c0000
+#define MAC_PHYCFG2_GMODE_COMP_MASK 0x00e00000
+#define MAC_PHYCFG2_GMODE_COMP_AC131 0x00e00000
+#define MAC_PHYCFG2_GMODE_COMP_50610 0x00000000
+#define MAC_PHYCFG2_GMODE_COMP_RT8211 0x00200000
+#define MAC_PHYCFG2_GMODE_COMP_RT8201 0x00000000
+#define MAC_PHYCFG2_ACT_MASK_MASK 0x03000000
+#define MAC_PHYCFG2_ACT_MASK_AC131 0x03000000
+#define MAC_PHYCFG2_ACT_MASK_50610 0x01000000
+#define MAC_PHYCFG2_ACT_MASK_RT8211 0x03000000
+#define MAC_PHYCFG2_ACT_MASK_RT8201 0x01000000
+#define MAC_PHYCFG2_ACT_COMP_MASK 0x0c000000
+#define MAC_PHYCFG2_ACT_COMP_AC131 0x00000000
+#define MAC_PHYCFG2_ACT_COMP_50610 0x00000000
+#define MAC_PHYCFG2_ACT_COMP_RT8211 0x00000000
+#define MAC_PHYCFG2_ACT_COMP_RT8201 0x08000000
+#define MAC_PHYCFG2_QUAL_MASK_MASK 0x30000000
+#define MAC_PHYCFG2_QUAL_MASK_AC131 0x30000000
+#define MAC_PHYCFG2_QUAL_MASK_50610 0x30000000
+#define MAC_PHYCFG2_QUAL_MASK_RT8211 0x30000000
+#define MAC_PHYCFG2_QUAL_MASK_RT8201 0x30000000
+#define MAC_PHYCFG2_QUAL_COMP_MASK 0xc0000000
+#define MAC_PHYCFG2_QUAL_COMP_AC131 0x00000000
+#define MAC_PHYCFG2_QUAL_COMP_50610 0x00000000
+#define MAC_PHYCFG2_QUAL_COMP_RT8211 0x00000000
+#define MAC_PHYCFG2_QUAL_COMP_RT8201 0x00000000
+#define MAC_PHYCFG2_50610_LED_MODES \
+ (MAC_PHYCFG2_EMODE_MASK_50610 | \
+ MAC_PHYCFG2_EMODE_COMP_50610 | \
+ MAC_PHYCFG2_FMODE_MASK_50610 | \
+ MAC_PHYCFG2_FMODE_COMP_50610 | \
+ MAC_PHYCFG2_GMODE_MASK_50610 | \
+ MAC_PHYCFG2_GMODE_COMP_50610 | \
+ MAC_PHYCFG2_ACT_MASK_50610 | \
+ MAC_PHYCFG2_ACT_COMP_50610 | \
+ MAC_PHYCFG2_QUAL_MASK_50610 | \
+ MAC_PHYCFG2_QUAL_COMP_50610)
+#define MAC_PHYCFG2_AC131_LED_MODES \
+ (MAC_PHYCFG2_EMODE_MASK_AC131 | \
+ MAC_PHYCFG2_EMODE_COMP_AC131 | \
+ MAC_PHYCFG2_FMODE_MASK_AC131 | \
+ MAC_PHYCFG2_FMODE_COMP_AC131 | \
+ MAC_PHYCFG2_GMODE_MASK_AC131 | \
+ MAC_PHYCFG2_GMODE_COMP_AC131 | \
+ MAC_PHYCFG2_ACT_MASK_AC131 | \
+ MAC_PHYCFG2_ACT_COMP_AC131 | \
+ MAC_PHYCFG2_QUAL_MASK_AC131 | \
+ MAC_PHYCFG2_QUAL_COMP_AC131)
+#define MAC_PHYCFG2_RTL8211C_LED_MODES \
+ (MAC_PHYCFG2_EMODE_MASK_RT8211 | \
+ MAC_PHYCFG2_EMODE_COMP_RT8211 | \
+ MAC_PHYCFG2_FMODE_MASK_RT8211 | \
+ MAC_PHYCFG2_FMODE_COMP_RT8211 | \
+ MAC_PHYCFG2_GMODE_MASK_RT8211 | \
+ MAC_PHYCFG2_GMODE_COMP_RT8211 | \
+ MAC_PHYCFG2_ACT_MASK_RT8211 | \
+ MAC_PHYCFG2_ACT_COMP_RT8211 | \
+ MAC_PHYCFG2_QUAL_MASK_RT8211 | \
+ MAC_PHYCFG2_QUAL_COMP_RT8211)
+#define MAC_PHYCFG2_RTL8201E_LED_MODES \
+ (MAC_PHYCFG2_EMODE_MASK_RT8201 | \
+ MAC_PHYCFG2_EMODE_COMP_RT8201 | \
+ MAC_PHYCFG2_FMODE_MASK_RT8201 | \
+ MAC_PHYCFG2_FMODE_COMP_RT8201 | \
+ MAC_PHYCFG2_GMODE_MASK_RT8201 | \
+ MAC_PHYCFG2_GMODE_COMP_RT8201 | \
+ MAC_PHYCFG2_ACT_MASK_RT8201 | \
+ MAC_PHYCFG2_ACT_COMP_RT8201 | \
+ MAC_PHYCFG2_QUAL_MASK_RT8201 | \
+ MAC_PHYCFG2_QUAL_COMP_RT8201)
+#define MAC_EXT_RGMII_MODE 0x000005a8
+#define MAC_RGMII_MODE_TX_ENABLE 0x00000001
+#define MAC_RGMII_MODE_TX_LOWPWR 0x00000002
+#define MAC_RGMII_MODE_TX_RESET 0x00000004
+#define MAC_RGMII_MODE_RX_INT_B 0x00000100
+#define MAC_RGMII_MODE_RX_QUALITY 0x00000200
+#define MAC_RGMII_MODE_RX_ACTIVITY 0x00000400
+#define MAC_RGMII_MODE_RX_ENG_DET 0x00000800
+/* 0x5ac --> 0x5b0 unused */
+#define SERDES_RX_CTRL 0x000005b0 /* 5780/5714 only */
+#define SERDES_RX_SIG_DETECT 0x00000400
+#define SG_DIG_CTRL 0x000005b0
+#define SG_DIG_USING_HW_AUTONEG 0x80000000
+#define SG_DIG_SOFT_RESET 0x40000000
+#define SG_DIG_DISABLE_LINKRDY 0x20000000
+#define SG_DIG_CRC16_CLEAR_N 0x01000000
+#define SG_DIG_EN10B 0x00800000
+#define SG_DIG_CLEAR_STATUS 0x00400000
+#define SG_DIG_LOCAL_DUPLEX_STATUS 0x00200000
+#define SG_DIG_LOCAL_LINK_STATUS 0x00100000
+#define SG_DIG_SPEED_STATUS_MASK 0x000c0000
+#define SG_DIG_SPEED_STATUS_SHIFT 18
+#define SG_DIG_JUMBO_PACKET_DISABLE 0x00020000
+#define SG_DIG_RESTART_AUTONEG 0x00010000
+#define SG_DIG_FIBER_MODE 0x00008000
+#define SG_DIG_REMOTE_FAULT_MASK 0x00006000
+#define SG_DIG_PAUSE_MASK 0x00001800
+#define SG_DIG_PAUSE_CAP 0x00000800
+#define SG_DIG_ASYM_PAUSE 0x00001000
+#define SG_DIG_GBIC_ENABLE 0x00000400
+#define SG_DIG_CHECK_END_ENABLE 0x00000200
+#define SG_DIG_SGMII_AUTONEG_TIMER 0x00000100
+#define SG_DIG_CLOCK_PHASE_SELECT 0x00000080
+#define SG_DIG_GMII_INPUT_SELECT 0x00000040
+#define SG_DIG_MRADV_CRC16_SELECT 0x00000020
+#define SG_DIG_COMMA_DETECT_ENABLE 0x00000010
+#define SG_DIG_AUTONEG_TIMER_REDUCE 0x00000008
+#define SG_DIG_AUTONEG_LOW_ENABLE 0x00000004
+#define SG_DIG_REMOTE_LOOPBACK 0x00000002
+#define SG_DIG_LOOPBACK 0x00000001
+#define SG_DIG_COMMON_SETUP (SG_DIG_CRC16_CLEAR_N | \
+ SG_DIG_LOCAL_DUPLEX_STATUS | \
+ SG_DIG_LOCAL_LINK_STATUS | \
+ (0x2 << SG_DIG_SPEED_STATUS_SHIFT) | \
+ SG_DIG_FIBER_MODE | SG_DIG_GBIC_ENABLE)
+#define SG_DIG_STATUS 0x000005b4
+#define SG_DIG_CRC16_BUS_MASK 0xffff0000
+#define SG_DIG_PARTNER_FAULT_MASK 0x00600000 /* If !MRADV_CRC16_SELECT */
+#define SG_DIG_PARTNER_ASYM_PAUSE 0x00100000 /* If !MRADV_CRC16_SELECT */
+#define SG_DIG_PARTNER_PAUSE_CAPABLE 0x00080000 /* If !MRADV_CRC16_SELECT */
+#define SG_DIG_PARTNER_HALF_DUPLEX 0x00040000 /* If !MRADV_CRC16_SELECT */
+#define SG_DIG_PARTNER_FULL_DUPLEX 0x00020000 /* If !MRADV_CRC16_SELECT */
+#define SG_DIG_PARTNER_NEXT_PAGE 0x00010000 /* If !MRADV_CRC16_SELECT */
+#define SG_DIG_AUTONEG_STATE_MASK 0x00000ff0
+#define SG_DIG_IS_SERDES 0x00000100
+#define SG_DIG_COMMA_DETECTOR 0x00000008
+#define SG_DIG_MAC_ACK_STATUS 0x00000004
+#define SG_DIG_AUTONEG_COMPLETE 0x00000002
+#define SG_DIG_AUTONEG_ERROR 0x00000001
+/* 0x5b8 --> 0x600 unused */
+#define MAC_TX_MAC_STATE_BASE 0x00000600 /* 16 bytes */
+#define MAC_RX_MAC_STATE_BASE 0x00000610 /* 20 bytes */
+/* 0x624 --> 0x670 unused */
+
+#define MAC_RSS_INDIR_TBL_0 0x00000630
+
+#define MAC_RSS_HASH_KEY_0 0x00000670
+#define MAC_RSS_HASH_KEY_1 0x00000674
+#define MAC_RSS_HASH_KEY_2 0x00000678
+#define MAC_RSS_HASH_KEY_3 0x0000067c
+#define MAC_RSS_HASH_KEY_4 0x00000680
+#define MAC_RSS_HASH_KEY_5 0x00000684
+#define MAC_RSS_HASH_KEY_6 0x00000688
+#define MAC_RSS_HASH_KEY_7 0x0000068c
+#define MAC_RSS_HASH_KEY_8 0x00000690
+#define MAC_RSS_HASH_KEY_9 0x00000694
+/* 0x698 --> 0x800 unused */
+
+#define MAC_TX_STATS_OCTETS 0x00000800
+#define MAC_TX_STATS_RESV1 0x00000804
+#define MAC_TX_STATS_COLLISIONS 0x00000808
+#define MAC_TX_STATS_XON_SENT 0x0000080c
+#define MAC_TX_STATS_XOFF_SENT 0x00000810
+#define MAC_TX_STATS_RESV2 0x00000814
+#define MAC_TX_STATS_MAC_ERRORS 0x00000818
+#define MAC_TX_STATS_SINGLE_COLLISIONS 0x0000081c
+#define MAC_TX_STATS_MULT_COLLISIONS 0x00000820
+#define MAC_TX_STATS_DEFERRED 0x00000824
+#define MAC_TX_STATS_RESV3 0x00000828
+#define MAC_TX_STATS_EXCESSIVE_COL 0x0000082c
+#define MAC_TX_STATS_LATE_COL 0x00000830
+#define MAC_TX_STATS_RESV4_1 0x00000834
+#define MAC_TX_STATS_RESV4_2 0x00000838
+#define MAC_TX_STATS_RESV4_3 0x0000083c
+#define MAC_TX_STATS_RESV4_4 0x00000840
+#define MAC_TX_STATS_RESV4_5 0x00000844
+#define MAC_TX_STATS_RESV4_6 0x00000848
+#define MAC_TX_STATS_RESV4_7 0x0000084c
+#define MAC_TX_STATS_RESV4_8 0x00000850
+#define MAC_TX_STATS_RESV4_9 0x00000854
+#define MAC_TX_STATS_RESV4_10 0x00000858
+#define MAC_TX_STATS_RESV4_11 0x0000085c
+#define MAC_TX_STATS_RESV4_12 0x00000860
+#define MAC_TX_STATS_RESV4_13 0x00000864
+#define MAC_TX_STATS_RESV4_14 0x00000868
+#define MAC_TX_STATS_UCAST 0x0000086c
+#define MAC_TX_STATS_MCAST 0x00000870
+#define MAC_TX_STATS_BCAST 0x00000874
+#define MAC_TX_STATS_RESV5_1 0x00000878
+#define MAC_TX_STATS_RESV5_2 0x0000087c
+#define MAC_RX_STATS_OCTETS 0x00000880
+#define MAC_RX_STATS_RESV1 0x00000884
+#define MAC_RX_STATS_FRAGMENTS 0x00000888
+#define MAC_RX_STATS_UCAST 0x0000088c
+#define MAC_RX_STATS_MCAST 0x00000890
+#define MAC_RX_STATS_BCAST 0x00000894
+#define MAC_RX_STATS_FCS_ERRORS 0x00000898
+#define MAC_RX_STATS_ALIGN_ERRORS 0x0000089c
+#define MAC_RX_STATS_XON_PAUSE_RECVD 0x000008a0
+#define MAC_RX_STATS_XOFF_PAUSE_RECVD 0x000008a4
+#define MAC_RX_STATS_MAC_CTRL_RECVD 0x000008a8
+#define MAC_RX_STATS_XOFF_ENTERED 0x000008ac
+#define MAC_RX_STATS_FRAME_TOO_LONG 0x000008b0
+#define MAC_RX_STATS_JABBERS 0x000008b4
+#define MAC_RX_STATS_UNDERSIZE 0x000008b8
+/* 0x8bc --> 0xc00 unused */
+
+/* Send data initiator control registers */
+#define SNDDATAI_MODE 0x00000c00
+#define SNDDATAI_MODE_RESET 0x00000001
+#define SNDDATAI_MODE_ENABLE 0x00000002
+#define SNDDATAI_MODE_STAT_OFLOW_ENAB 0x00000004
+#define SNDDATAI_STATUS 0x00000c04
+#define SNDDATAI_STATUS_STAT_OFLOW 0x00000004
+#define SNDDATAI_STATSCTRL 0x00000c08
+#define SNDDATAI_SCTRL_ENABLE 0x00000001
+#define SNDDATAI_SCTRL_FASTUPD 0x00000002
+#define SNDDATAI_SCTRL_CLEAR 0x00000004
+#define SNDDATAI_SCTRL_FLUSH 0x00000008
+#define SNDDATAI_SCTRL_FORCE_ZERO 0x00000010
+#define SNDDATAI_STATSENAB 0x00000c0c
+#define SNDDATAI_STATSINCMASK 0x00000c10
+#define ISO_PKT_TX 0x00000c20
+/* 0xc24 --> 0xc80 unused */
+#define SNDDATAI_COS_CNT_0 0x00000c80
+#define SNDDATAI_COS_CNT_1 0x00000c84
+#define SNDDATAI_COS_CNT_2 0x00000c88
+#define SNDDATAI_COS_CNT_3 0x00000c8c
+#define SNDDATAI_COS_CNT_4 0x00000c90
+#define SNDDATAI_COS_CNT_5 0x00000c94
+#define SNDDATAI_COS_CNT_6 0x00000c98
+#define SNDDATAI_COS_CNT_7 0x00000c9c
+#define SNDDATAI_COS_CNT_8 0x00000ca0
+#define SNDDATAI_COS_CNT_9 0x00000ca4
+#define SNDDATAI_COS_CNT_10 0x00000ca8
+#define SNDDATAI_COS_CNT_11 0x00000cac
+#define SNDDATAI_COS_CNT_12 0x00000cb0
+#define SNDDATAI_COS_CNT_13 0x00000cb4
+#define SNDDATAI_COS_CNT_14 0x00000cb8
+#define SNDDATAI_COS_CNT_15 0x00000cbc
+#define SNDDATAI_DMA_RDQ_FULL_CNT 0x00000cc0
+#define SNDDATAI_DMA_PRIO_RDQ_FULL_CNT 0x00000cc4
+#define SNDDATAI_SDCQ_FULL_CNT 0x00000cc8
+#define SNDDATAI_NICRNG_SSND_PIDX_CNT 0x00000ccc
+#define SNDDATAI_STATS_UPDATED_CNT 0x00000cd0
+#define SNDDATAI_INTERRUPTS_CNT 0x00000cd4
+#define SNDDATAI_AVOID_INTERRUPTS_CNT 0x00000cd8
+#define SNDDATAI_SND_THRESH_HIT_CNT 0x00000cdc
+/* 0xce0 --> 0x1000 unused */
+
+/* Send data completion control registers */
+#define SNDDATAC_MODE 0x00001000
+#define SNDDATAC_MODE_RESET 0x00000001
+#define SNDDATAC_MODE_ENABLE 0x00000002
+#define SNDDATAC_MODE_CDELAY 0x00000010
+/* 0x1004 --> 0x1400 unused */
+
+/* Send BD ring selector */
+#define SNDBDS_MODE 0x00001400
+#define SNDBDS_MODE_RESET 0x00000001
+#define SNDBDS_MODE_ENABLE 0x00000002
+#define SNDBDS_MODE_ATTN_ENABLE 0x00000004
+#define SNDBDS_STATUS 0x00001404
+#define SNDBDS_STATUS_ERROR_ATTN 0x00000004
+#define SNDBDS_HWDIAG 0x00001408
+/* 0x140c --> 0x1440 */
+#define SNDBDS_SEL_CON_IDX_0 0x00001440
+#define SNDBDS_SEL_CON_IDX_1 0x00001444
+#define SNDBDS_SEL_CON_IDX_2 0x00001448
+#define SNDBDS_SEL_CON_IDX_3 0x0000144c
+#define SNDBDS_SEL_CON_IDX_4 0x00001450
+#define SNDBDS_SEL_CON_IDX_5 0x00001454
+#define SNDBDS_SEL_CON_IDX_6 0x00001458
+#define SNDBDS_SEL_CON_IDX_7 0x0000145c
+#define SNDBDS_SEL_CON_IDX_8 0x00001460
+#define SNDBDS_SEL_CON_IDX_9 0x00001464
+#define SNDBDS_SEL_CON_IDX_10 0x00001468
+#define SNDBDS_SEL_CON_IDX_11 0x0000146c
+#define SNDBDS_SEL_CON_IDX_12 0x00001470
+#define SNDBDS_SEL_CON_IDX_13 0x00001474
+#define SNDBDS_SEL_CON_IDX_14 0x00001478
+#define SNDBDS_SEL_CON_IDX_15 0x0000147c
+/* 0x1480 --> 0x1800 unused */
+
+/* Send BD initiator control registers */
+#define SNDBDI_MODE 0x00001800
+#define SNDBDI_MODE_RESET 0x00000001
+#define SNDBDI_MODE_ENABLE 0x00000002
+#define SNDBDI_MODE_ATTN_ENABLE 0x00000004
+#define SNDBDI_MODE_MULTI_TXQ_EN 0x00000020
+#define SNDBDI_STATUS 0x00001804
+#define SNDBDI_STATUS_ERROR_ATTN 0x00000004
+#define SNDBDI_IN_PROD_IDX_0 0x00001808
+#define SNDBDI_IN_PROD_IDX_1 0x0000180c
+#define SNDBDI_IN_PROD_IDX_2 0x00001810
+#define SNDBDI_IN_PROD_IDX_3 0x00001814
+#define SNDBDI_IN_PROD_IDX_4 0x00001818
+#define SNDBDI_IN_PROD_IDX_5 0x0000181c
+#define SNDBDI_IN_PROD_IDX_6 0x00001820
+#define SNDBDI_IN_PROD_IDX_7 0x00001824
+#define SNDBDI_IN_PROD_IDX_8 0x00001828
+#define SNDBDI_IN_PROD_IDX_9 0x0000182c
+#define SNDBDI_IN_PROD_IDX_10 0x00001830
+#define SNDBDI_IN_PROD_IDX_11 0x00001834
+#define SNDBDI_IN_PROD_IDX_12 0x00001838
+#define SNDBDI_IN_PROD_IDX_13 0x0000183c
+#define SNDBDI_IN_PROD_IDX_14 0x00001840
+#define SNDBDI_IN_PROD_IDX_15 0x00001844
+/* 0x1848 --> 0x1c00 unused */
+
+/* Send BD completion control registers */
+#define SNDBDC_MODE 0x00001c00
+#define SNDBDC_MODE_RESET 0x00000001
+#define SNDBDC_MODE_ENABLE 0x00000002
+#define SNDBDC_MODE_ATTN_ENABLE 0x00000004
+/* 0x1c04 --> 0x2000 unused */
+
+/* Receive list placement control registers */
+#define RCVLPC_MODE 0x00002000
+#define RCVLPC_MODE_RESET 0x00000001
+#define RCVLPC_MODE_ENABLE 0x00000002
+#define RCVLPC_MODE_CLASS0_ATTN_ENAB 0x00000004
+#define RCVLPC_MODE_MAPOOR_AATTN_ENAB 0x00000008
+#define RCVLPC_MODE_STAT_OFLOW_ENAB 0x00000010
+#define RCVLPC_STATUS 0x00002004
+#define RCVLPC_STATUS_CLASS0 0x00000004
+#define RCVLPC_STATUS_MAPOOR 0x00000008
+#define RCVLPC_STATUS_STAT_OFLOW 0x00000010
+#define RCVLPC_LOCK 0x00002008
+#define RCVLPC_LOCK_REQ_MASK 0x0000ffff
+#define RCVLPC_LOCK_REQ_SHIFT 0
+#define RCVLPC_LOCK_GRANT_MASK 0xffff0000
+#define RCVLPC_LOCK_GRANT_SHIFT 16
+#define RCVLPC_NON_EMPTY_BITS 0x0000200c
+#define RCVLPC_NON_EMPTY_BITS_MASK 0x0000ffff
+#define RCVLPC_CONFIG 0x00002010
+#define RCVLPC_STATSCTRL 0x00002014
+#define RCVLPC_STATSCTRL_ENABLE 0x00000001
+#define RCVLPC_STATSCTRL_FASTUPD 0x00000002
+#define RCVLPC_STATS_ENABLE 0x00002018
+#define RCVLPC_STATSENAB_ASF_FIX 0x00000002
+#define RCVLPC_STATSENAB_DACK_FIX 0x00040000
+#define RCVLPC_STATSENAB_LNGBRST_RFIX 0x00400000
+#define RCVLPC_STATS_INCMASK 0x0000201c
+/* 0x2020 --> 0x2100 unused */
+#define RCVLPC_SELLST_BASE 0x00002100 /* 16 16-byte entries */
+#define SELLST_TAIL 0x00000004
+#define SELLST_CONT 0x00000008
+#define SELLST_UNUSED 0x0000000c
+#define RCVLPC_COS_CNTL_BASE 0x00002200 /* 16 4-byte entries */
+#define RCVLPC_DROP_FILTER_CNT 0x00002240
+#define RCVLPC_DMA_WQ_FULL_CNT 0x00002244
+#define RCVLPC_DMA_HIPRIO_WQ_FULL_CNT 0x00002248
+#define RCVLPC_NO_RCV_BD_CNT 0x0000224c
+#define RCVLPC_IN_DISCARDS_CNT 0x00002250
+#define RCVLPC_IN_ERRORS_CNT 0x00002254
+#define RCVLPC_RCV_THRESH_HIT_CNT 0x00002258
+/* 0x225c --> 0x2400 unused */
+
+/* Receive Data and Receive BD Initiator Control */
+#define RCVDBDI_MODE 0x00002400
+#define RCVDBDI_MODE_RESET 0x00000001
+#define RCVDBDI_MODE_ENABLE 0x00000002
+#define RCVDBDI_MODE_JUMBOBD_NEEDED 0x00000004
+#define RCVDBDI_MODE_FRM_TOO_BIG 0x00000008
+#define RCVDBDI_MODE_INV_RING_SZ 0x00000010
+#define RCVDBDI_MODE_LRG_RING_SZ 0x00010000
+#define RCVDBDI_STATUS 0x00002404
+#define RCVDBDI_STATUS_JUMBOBD_NEEDED 0x00000004
+#define RCVDBDI_STATUS_FRM_TOO_BIG 0x00000008
+#define RCVDBDI_STATUS_INV_RING_SZ 0x00000010
+#define RCVDBDI_SPLIT_FRAME_MINSZ 0x00002408
+/* 0x240c --> 0x2440 unused */
+#define RCVDBDI_JUMBO_BD 0x00002440 /* TG3_BDINFO_... */
+#define RCVDBDI_STD_BD 0x00002450 /* TG3_BDINFO_... */
+#define RCVDBDI_MINI_BD 0x00002460 /* TG3_BDINFO_... */
+#define RCVDBDI_JUMBO_CON_IDX 0x00002470
+#define RCVDBDI_STD_CON_IDX 0x00002474
+#define RCVDBDI_MINI_CON_IDX 0x00002478
+/* 0x247c --> 0x2480 unused */
+#define RCVDBDI_BD_PROD_IDX_0 0x00002480
+#define RCVDBDI_BD_PROD_IDX_1 0x00002484
+#define RCVDBDI_BD_PROD_IDX_2 0x00002488
+#define RCVDBDI_BD_PROD_IDX_3 0x0000248c
+#define RCVDBDI_BD_PROD_IDX_4 0x00002490
+#define RCVDBDI_BD_PROD_IDX_5 0x00002494
+#define RCVDBDI_BD_PROD_IDX_6 0x00002498
+#define RCVDBDI_BD_PROD_IDX_7 0x0000249c
+#define RCVDBDI_BD_PROD_IDX_8 0x000024a0
+#define RCVDBDI_BD_PROD_IDX_9 0x000024a4
+#define RCVDBDI_BD_PROD_IDX_10 0x000024a8
+#define RCVDBDI_BD_PROD_IDX_11 0x000024ac
+#define RCVDBDI_BD_PROD_IDX_12 0x000024b0
+#define RCVDBDI_BD_PROD_IDX_13 0x000024b4
+#define RCVDBDI_BD_PROD_IDX_14 0x000024b8
+#define RCVDBDI_BD_PROD_IDX_15 0x000024bc
+#define RCVDBDI_HWDIAG 0x000024c0
+/* 0x24c4 --> 0x2800 unused */
+
+/* Receive Data Completion Control */
+#define RCVDCC_MODE 0x00002800
+#define RCVDCC_MODE_RESET 0x00000001
+#define RCVDCC_MODE_ENABLE 0x00000002
+#define RCVDCC_MODE_ATTN_ENABLE 0x00000004
+/* 0x2804 --> 0x2c00 unused */
+
+/* Receive BD Initiator Control Registers */
+#define RCVBDI_MODE 0x00002c00
+#define RCVBDI_MODE_RESET 0x00000001
+#define RCVBDI_MODE_ENABLE 0x00000002
+#define RCVBDI_MODE_RCB_ATTN_ENAB 0x00000004
+#define RCVBDI_STATUS 0x00002c04
+#define RCVBDI_STATUS_RCB_ATTN 0x00000004
+#define RCVBDI_JUMBO_PROD_IDX 0x00002c08
+#define RCVBDI_STD_PROD_IDX 0x00002c0c
+#define RCVBDI_MINI_PROD_IDX 0x00002c10
+#define RCVBDI_MINI_THRESH 0x00002c14
+#define RCVBDI_STD_THRESH 0x00002c18
+#define RCVBDI_JUMBO_THRESH 0x00002c1c
+/* 0x2c20 --> 0x2d00 unused */
+
+#define STD_REPLENISH_LWM 0x00002d00
+#define JMB_REPLENISH_LWM 0x00002d04
+/* 0x2d08 --> 0x3000 unused */
+
+/* Receive BD Completion Control Registers */
+#define RCVCC_MODE 0x00003000
+#define RCVCC_MODE_RESET 0x00000001
+#define RCVCC_MODE_ENABLE 0x00000002
+#define RCVCC_MODE_ATTN_ENABLE 0x00000004
+#define RCVCC_STATUS 0x00003004
+#define RCVCC_STATUS_ERROR_ATTN 0x00000004
+#define RCVCC_JUMP_PROD_IDX 0x00003008
+#define RCVCC_STD_PROD_IDX 0x0000300c
+#define RCVCC_MINI_PROD_IDX 0x00003010
+/* 0x3014 --> 0x3400 unused */
+
+/* Receive list selector control registers */
+#define RCVLSC_MODE 0x00003400
+#define RCVLSC_MODE_RESET 0x00000001
+#define RCVLSC_MODE_ENABLE 0x00000002
+#define RCVLSC_MODE_ATTN_ENABLE 0x00000004
+#define RCVLSC_STATUS 0x00003404
+#define RCVLSC_STATUS_ERROR_ATTN 0x00000004
+/* 0x3408 --> 0x3600 unused */
+
+#define TG3_CPMU_DRV_STATUS 0x0000344c
+
+/* CPMU registers */
+#define TG3_CPMU_CTRL 0x00003600
+#define CPMU_CTRL_LINK_IDLE_MODE 0x00000200
+#define CPMU_CTRL_LINK_AWARE_MODE 0x00000400
+#define CPMU_CTRL_LINK_SPEED_MODE 0x00004000
+#define CPMU_CTRL_GPHY_10MB_RXONLY 0x00010000
+#define TG3_CPMU_LSPD_10MB_CLK 0x00003604
+#define CPMU_LSPD_10MB_MACCLK_MASK 0x001f0000
+#define CPMU_LSPD_10MB_MACCLK_6_25 0x00130000
+/* 0x3608 --> 0x360c unused */
+
+#define TG3_CPMU_LSPD_1000MB_CLK 0x0000360c
+#define CPMU_LSPD_1000MB_MACCLK_62_5 0x00000000
+#define CPMU_LSPD_1000MB_MACCLK_12_5 0x00110000
+#define CPMU_LSPD_1000MB_MACCLK_MASK 0x001f0000
+#define TG3_CPMU_LNK_AWARE_PWRMD 0x00003610
+#define CPMU_LNK_AWARE_MACCLK_MASK 0x001f0000
+#define CPMU_LNK_AWARE_MACCLK_6_25 0x00130000
+/* 0x3614 --> 0x361c unused */
+
+#define TG3_CPMU_HST_ACC 0x0000361c
+#define CPMU_HST_ACC_MACCLK_MASK 0x001f0000
+#define CPMU_HST_ACC_MACCLK_6_25 0x00130000
+/* 0x3620 --> 0x3630 unused */
+
+#define TG3_CPMU_CLCK_ORIDE 0x00003624
+#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000
+
+#define TG3_CPMU_CLCK_STAT 0x00003630
+#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000
+#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000
+#define CPMU_CLCK_STAT_MAC_CLCK_12_5 0x00110000
+#define CPMU_CLCK_STAT_MAC_CLCK_6_25 0x00130000
+/* 0x3634 --> 0x365c unused */
+
+#define TG3_CPMU_MUTEX_REQ 0x0000365c
+#define CPMU_MUTEX_REQ_DRIVER 0x00001000
+#define TG3_CPMU_MUTEX_GNT 0x00003660
+#define CPMU_MUTEX_GNT_DRIVER 0x00001000
+#define TG3_CPMU_PHY_STRAP 0x00003664
+#define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020
+/* 0x3664 --> 0x36b0 unused */
+
+#define TG3_CPMU_EEE_MODE 0x000036b0
+#define TG3_CPMU_EEEMD_APE_TX_DET_EN 0x00000004
+#define TG3_CPMU_EEEMD_ERLY_L1_XIT_DET 0x00000008
+#define TG3_CPMU_EEEMD_SND_IDX_DET_EN 0x00000040
+#define TG3_CPMU_EEEMD_LPI_ENABLE 0x00000080
+#define TG3_CPMU_EEEMD_LPI_IN_TX 0x00000100
+#define TG3_CPMU_EEEMD_LPI_IN_RX 0x00000200
+#define TG3_CPMU_EEEMD_EEE_ENABLE 0x00100000
+#define TG3_CPMU_EEE_DBTMR1 0x000036b4
+#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000
+#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000007ff
+#define TG3_CPMU_EEE_DBTMR2 0x000036b8
+#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000
+#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000007ff
+#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc
+#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000
+#define TG3_CPMU_EEE_LNKIDL_UART_IDL 0x00000004
+/* 0x36c0 --> 0x36d0 unused */
+
+#define TG3_CPMU_EEE_CTRL 0x000036d0
+#define TG3_CPMU_EEE_CTRL_EXIT_16_5_US 0x0000019d
+#define TG3_CPMU_EEE_CTRL_EXIT_36_US 0x00000384
+#define TG3_CPMU_EEE_CTRL_EXIT_20_1_US 0x000001f8
+/* 0x36d4 --> 0x3800 unused */
+
+/* Mbuf cluster free registers */
+#define MBFREE_MODE 0x00003800
+#define MBFREE_MODE_RESET 0x00000001
+#define MBFREE_MODE_ENABLE 0x00000002
+#define MBFREE_STATUS 0x00003804
+/* 0x3808 --> 0x3c00 unused */
+
+/* Host coalescing control registers */
+#define HOSTCC_MODE 0x00003c00
+#define HOSTCC_MODE_RESET 0x00000001
+#define HOSTCC_MODE_ENABLE 0x00000002
+#define HOSTCC_MODE_ATTN 0x00000004
+#define HOSTCC_MODE_NOW 0x00000008
+#define HOSTCC_MODE_FULL_STATUS 0x00000000
+#define HOSTCC_MODE_64BYTE 0x00000080
+#define HOSTCC_MODE_32BYTE 0x00000100
+#define HOSTCC_MODE_CLRTICK_RXBD 0x00000200
+#define HOSTCC_MODE_CLRTICK_TXBD 0x00000400
+#define HOSTCC_MODE_NOINT_ON_NOW 0x00000800
+#define HOSTCC_MODE_NOINT_ON_FORCE 0x00001000
+#define HOSTCC_MODE_COAL_VEC1_NOW 0x00002000
+#define HOSTCC_STATUS 0x00003c04
+#define HOSTCC_STATUS_ERROR_ATTN 0x00000004
+#define HOSTCC_RXCOL_TICKS 0x00003c08
+#define LOW_RXCOL_TICKS 0x00000032
+#define LOW_RXCOL_TICKS_CLRTCKS 0x00000014
+#define DEFAULT_RXCOL_TICKS 0x00000048
+#define HIGH_RXCOL_TICKS 0x00000096
+#define MAX_RXCOL_TICKS 0x000003ff
+#define HOSTCC_TXCOL_TICKS 0x00003c0c
+#define LOW_TXCOL_TICKS 0x00000096
+#define LOW_TXCOL_TICKS_CLRTCKS 0x00000048
+#define DEFAULT_TXCOL_TICKS 0x0000012c
+#define HIGH_TXCOL_TICKS 0x00000145
+#define MAX_TXCOL_TICKS 0x000003ff
+#define HOSTCC_RXMAX_FRAMES 0x00003c10
+#define LOW_RXMAX_FRAMES 0x00000005
+#define DEFAULT_RXMAX_FRAMES 0x00000008
+#define HIGH_RXMAX_FRAMES 0x00000012
+#define MAX_RXMAX_FRAMES 0x000000ff
+#define HOSTCC_TXMAX_FRAMES 0x00003c14
+#define LOW_TXMAX_FRAMES 0x00000035
+#define DEFAULT_TXMAX_FRAMES 0x0000004b
+#define HIGH_TXMAX_FRAMES 0x00000052
+#define MAX_TXMAX_FRAMES 0x000000ff
+#define HOSTCC_RXCOAL_TICK_INT 0x00003c18
+#define DEFAULT_RXCOAL_TICK_INT 0x00000019
+#define DEFAULT_RXCOAL_TICK_INT_CLRTCKS 0x00000014
+#define MAX_RXCOAL_TICK_INT 0x000003ff
+#define HOSTCC_TXCOAL_TICK_INT 0x00003c1c
+#define DEFAULT_TXCOAL_TICK_INT 0x00000019
+#define DEFAULT_TXCOAL_TICK_INT_CLRTCKS 0x00000014
+#define MAX_TXCOAL_TICK_INT 0x000003ff
+#define HOSTCC_RXCOAL_MAXF_INT 0x00003c20
+#define DEFAULT_RXCOAL_MAXF_INT 0x00000005
+#define MAX_RXCOAL_MAXF_INT 0x000000ff
+#define HOSTCC_TXCOAL_MAXF_INT 0x00003c24
+#define DEFAULT_TXCOAL_MAXF_INT 0x00000005
+#define MAX_TXCOAL_MAXF_INT 0x000000ff
+#define HOSTCC_STAT_COAL_TICKS 0x00003c28
+#define DEFAULT_STAT_COAL_TICKS 0x000f4240
+#define MAX_STAT_COAL_TICKS 0xd693d400
+#define MIN_STAT_COAL_TICKS 0x00000064
+/* 0x3c2c --> 0x3c30 unused */
+#define HOSTCC_STATS_BLK_HOST_ADDR 0x00003c30 /* 64-bit */
+#define HOSTCC_STATUS_BLK_HOST_ADDR 0x00003c38 /* 64-bit */
+#define HOSTCC_STATS_BLK_NIC_ADDR 0x00003c40
+#define HOSTCC_STATUS_BLK_NIC_ADDR 0x00003c44
+#define HOSTCC_FLOW_ATTN 0x00003c48
+#define HOSTCC_FLOW_ATTN_MBUF_LWM 0x00000040
+/* 0x3c4c --> 0x3c50 unused */
+#define HOSTCC_JUMBO_CON_IDX 0x00003c50
+#define HOSTCC_STD_CON_IDX 0x00003c54
+#define HOSTCC_MINI_CON_IDX 0x00003c58
+/* 0x3c5c --> 0x3c80 unused */
+#define HOSTCC_RET_PROD_IDX_0 0x00003c80
+#define HOSTCC_RET_PROD_IDX_1 0x00003c84
+#define HOSTCC_RET_PROD_IDX_2 0x00003c88
+#define HOSTCC_RET_PROD_IDX_3 0x00003c8c
+#define HOSTCC_RET_PROD_IDX_4 0x00003c90
+#define HOSTCC_RET_PROD_IDX_5 0x00003c94
+#define HOSTCC_RET_PROD_IDX_6 0x00003c98
+#define HOSTCC_RET_PROD_IDX_7 0x00003c9c
+#define HOSTCC_RET_PROD_IDX_8 0x00003ca0
+#define HOSTCC_RET_PROD_IDX_9 0x00003ca4
+#define HOSTCC_RET_PROD_IDX_10 0x00003ca8
+#define HOSTCC_RET_PROD_IDX_11 0x00003cac
+#define HOSTCC_RET_PROD_IDX_12 0x00003cb0
+#define HOSTCC_RET_PROD_IDX_13 0x00003cb4
+#define HOSTCC_RET_PROD_IDX_14 0x00003cb8
+#define HOSTCC_RET_PROD_IDX_15 0x00003cbc
+#define HOSTCC_SND_CON_IDX_0 0x00003cc0
+#define HOSTCC_SND_CON_IDX_1 0x00003cc4
+#define HOSTCC_SND_CON_IDX_2 0x00003cc8
+#define HOSTCC_SND_CON_IDX_3 0x00003ccc
+#define HOSTCC_SND_CON_IDX_4 0x00003cd0
+#define HOSTCC_SND_CON_IDX_5 0x00003cd4
+#define HOSTCC_SND_CON_IDX_6 0x00003cd8
+#define HOSTCC_SND_CON_IDX_7 0x00003cdc
+#define HOSTCC_SND_CON_IDX_8 0x00003ce0
+#define HOSTCC_SND_CON_IDX_9 0x00003ce4
+#define HOSTCC_SND_CON_IDX_10 0x00003ce8
+#define HOSTCC_SND_CON_IDX_11 0x00003cec
+#define HOSTCC_SND_CON_IDX_12 0x00003cf0
+#define HOSTCC_SND_CON_IDX_13 0x00003cf4
+#define HOSTCC_SND_CON_IDX_14 0x00003cf8
+#define HOSTCC_SND_CON_IDX_15 0x00003cfc
+#define HOSTCC_STATBLCK_RING1 0x00003d00
+/* 0x3d00 --> 0x3d80 unused */
+
+#define HOSTCC_RXCOL_TICKS_VEC1 0x00003d80
+#define HOSTCC_TXCOL_TICKS_VEC1 0x00003d84
+#define HOSTCC_RXMAX_FRAMES_VEC1 0x00003d88
+#define HOSTCC_TXMAX_FRAMES_VEC1 0x00003d8c
+#define HOSTCC_RXCOAL_MAXF_INT_VEC1 0x00003d90
+#define HOSTCC_TXCOAL_MAXF_INT_VEC1 0x00003d94
+/* 0x3d98 --> 0x4000 unused */
+
+/* Memory arbiter control registers */
+#define MEMARB_MODE 0x00004000
+#define MEMARB_MODE_RESET 0x00000001
+#define MEMARB_MODE_ENABLE 0x00000002
+#define MEMARB_STATUS 0x00004004
+#define MEMARB_TRAP_ADDR_LOW 0x00004008
+#define MEMARB_TRAP_ADDR_HIGH 0x0000400c
+/* 0x4010 --> 0x4400 unused */
+
+/* Buffer manager control registers */
+#define BUFMGR_MODE 0x00004400
+#define BUFMGR_MODE_RESET 0x00000001
+#define BUFMGR_MODE_ENABLE 0x00000002
+#define BUFMGR_MODE_ATTN_ENABLE 0x00000004
+#define BUFMGR_MODE_BM_TEST 0x00000008
+#define BUFMGR_MODE_MBLOW_ATTN_ENAB 0x00000010
+#define BUFMGR_MODE_NO_TX_UNDERRUN 0x80000000
+#define BUFMGR_STATUS 0x00004404
+#define BUFMGR_STATUS_ERROR 0x00000004
+#define BUFMGR_STATUS_MBLOW 0x00000010
+#define BUFMGR_MB_POOL_ADDR 0x00004408
+#define BUFMGR_MB_POOL_SIZE 0x0000440c
+#define BUFMGR_MB_RDMA_LOW_WATER 0x00004410
+#define DEFAULT_MB_RDMA_LOW_WATER 0x00000050
+#define DEFAULT_MB_RDMA_LOW_WATER_5705 0x00000000
+#define DEFAULT_MB_RDMA_LOW_WATER_JUMBO 0x00000130
+#define DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780 0x00000000
+#define BUFMGR_MB_MACRX_LOW_WATER 0x00004414
+#define DEFAULT_MB_MACRX_LOW_WATER 0x00000020
+#define DEFAULT_MB_MACRX_LOW_WATER_5705 0x00000010
+#define DEFAULT_MB_MACRX_LOW_WATER_5906 0x00000004
+#define DEFAULT_MB_MACRX_LOW_WATER_57765 0x0000002a
+#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO 0x00000098
+#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780 0x0000004b
+#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765 0x0000007e
+#define BUFMGR_MB_HIGH_WATER 0x00004418
+#define DEFAULT_MB_HIGH_WATER 0x00000060
+#define DEFAULT_MB_HIGH_WATER_5705 0x00000060
+#define DEFAULT_MB_HIGH_WATER_5906 0x00000010
+#define DEFAULT_MB_HIGH_WATER_57765 0x000000a0
+#define DEFAULT_MB_HIGH_WATER_JUMBO 0x0000017c
+#define DEFAULT_MB_HIGH_WATER_JUMBO_5780 0x00000096
+#define DEFAULT_MB_HIGH_WATER_JUMBO_57765 0x000000ea
+#define BUFMGR_RX_MB_ALLOC_REQ 0x0000441c
+#define BUFMGR_MB_ALLOC_BIT 0x10000000
+#define BUFMGR_RX_MB_ALLOC_RESP 0x00004420
+#define BUFMGR_TX_MB_ALLOC_REQ 0x00004424
+#define BUFMGR_TX_MB_ALLOC_RESP 0x00004428
+#define BUFMGR_DMA_DESC_POOL_ADDR 0x0000442c
+#define BUFMGR_DMA_DESC_POOL_SIZE 0x00004430
+#define BUFMGR_DMA_LOW_WATER 0x00004434
+#define DEFAULT_DMA_LOW_WATER 0x00000005
+#define BUFMGR_DMA_HIGH_WATER 0x00004438
+#define DEFAULT_DMA_HIGH_WATER 0x0000000a
+#define BUFMGR_RX_DMA_ALLOC_REQ 0x0000443c
+#define BUFMGR_RX_DMA_ALLOC_RESP 0x00004440
+#define BUFMGR_TX_DMA_ALLOC_REQ 0x00004444
+#define BUFMGR_TX_DMA_ALLOC_RESP 0x00004448
+#define BUFMGR_HWDIAG_0 0x0000444c
+#define BUFMGR_HWDIAG_1 0x00004450
+#define BUFMGR_HWDIAG_2 0x00004454
+/* 0x4458 --> 0x4800 unused */
+
+/* Read DMA control registers */
+#define RDMAC_MODE 0x00004800
+#define RDMAC_MODE_RESET 0x00000001
+#define RDMAC_MODE_ENABLE 0x00000002
+#define RDMAC_MODE_TGTABORT_ENAB 0x00000004
+#define RDMAC_MODE_MSTABORT_ENAB 0x00000008
+#define RDMAC_MODE_PARITYERR_ENAB 0x00000010
+#define RDMAC_MODE_ADDROFLOW_ENAB 0x00000020
+#define RDMAC_MODE_FIFOOFLOW_ENAB 0x00000040
+#define RDMAC_MODE_FIFOURUN_ENAB 0x00000080
+#define RDMAC_MODE_FIFOOREAD_ENAB 0x00000100
+#define RDMAC_MODE_LNGREAD_ENAB 0x00000200
+#define RDMAC_MODE_SPLIT_ENABLE 0x00000800
+#define RDMAC_MODE_BD_SBD_CRPT_ENAB 0x00000800
+#define RDMAC_MODE_SPLIT_RESET 0x00001000
+#define RDMAC_MODE_MBUF_RBD_CRPT_ENAB 0x00001000
+#define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000
+#define RDMAC_MODE_FIFO_SIZE_128 0x00020000
+#define RDMAC_MODE_FIFO_LONG_BURST 0x00030000
+#define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000
+#define RDMAC_MODE_IPV4_LSO_EN 0x08000000
+#define RDMAC_MODE_IPV6_LSO_EN 0x10000000
+#define RDMAC_MODE_H2BNC_VLAN_DET 0x20000000
+#define RDMAC_STATUS 0x00004804
+#define RDMAC_STATUS_TGTABORT 0x00000004
+#define RDMAC_STATUS_MSTABORT 0x00000008
+#define RDMAC_STATUS_PARITYERR 0x00000010
+#define RDMAC_STATUS_ADDROFLOW 0x00000020
+#define RDMAC_STATUS_FIFOOFLOW 0x00000040
+#define RDMAC_STATUS_FIFOURUN 0x00000080
+#define RDMAC_STATUS_FIFOOREAD 0x00000100
+#define RDMAC_STATUS_LNGREAD 0x00000200
+/* 0x4808 --> 0x4900 unused */
+
+#define TG3_RDMA_RSRVCTRL_REG 0x00004900
+#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004
+#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K 0x00000c00
+#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK 0x00000ff0
+#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K 0x000c0000
+#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK 0x000ff000
+#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000
+#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000
+/* 0x4904 --> 0x4910 unused */
+
+#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910
+#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K 0x00030000
+#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K 0x000c0000
+/* 0x4914 --> 0x4c00 unused */
+
+/* Write DMA control registers */
+#define WDMAC_MODE 0x00004c00
+#define WDMAC_MODE_RESET 0x00000001
+#define WDMAC_MODE_ENABLE 0x00000002
+#define WDMAC_MODE_TGTABORT_ENAB 0x00000004
+#define WDMAC_MODE_MSTABORT_ENAB 0x00000008
+#define WDMAC_MODE_PARITYERR_ENAB 0x00000010
+#define WDMAC_MODE_ADDROFLOW_ENAB 0x00000020
+#define WDMAC_MODE_FIFOOFLOW_ENAB 0x00000040
+#define WDMAC_MODE_FIFOURUN_ENAB 0x00000080
+#define WDMAC_MODE_FIFOOREAD_ENAB 0x00000100
+#define WDMAC_MODE_LNGREAD_ENAB 0x00000200
+#define WDMAC_MODE_RX_ACCEL 0x00000400
+#define WDMAC_MODE_STATUS_TAG_FIX 0x20000000
+#define WDMAC_MODE_BURST_ALL_DATA 0xc0000000
+#define WDMAC_STATUS 0x00004c04
+#define WDMAC_STATUS_TGTABORT 0x00000004
+#define WDMAC_STATUS_MSTABORT 0x00000008
+#define WDMAC_STATUS_PARITYERR 0x00000010
+#define WDMAC_STATUS_ADDROFLOW 0x00000020
+#define WDMAC_STATUS_FIFOOFLOW 0x00000040
+#define WDMAC_STATUS_FIFOURUN 0x00000080
+#define WDMAC_STATUS_FIFOOREAD 0x00000100
+#define WDMAC_STATUS_LNGREAD 0x00000200
+/* 0x4c08 --> 0x5000 unused */
+
+/* Per-cpu register offsets (arm9) */
+#define CPU_MODE 0x00000000
+#define CPU_MODE_RESET 0x00000001
+#define CPU_MODE_HALT 0x00000400
+#define CPU_STATE 0x00000004
+#define CPU_EVTMASK 0x00000008
+/* 0xc --> 0x1c reserved */
+#define CPU_PC 0x0000001c
+#define CPU_INSN 0x00000020
+#define CPU_SPAD_UFLOW 0x00000024
+#define CPU_WDOG_CLEAR 0x00000028
+#define CPU_WDOG_VECTOR 0x0000002c
+#define CPU_WDOG_PC 0x00000030
+#define CPU_HW_BP 0x00000034
+/* 0x38 --> 0x44 unused */
+#define CPU_WDOG_SAVED_STATE 0x00000044
+#define CPU_LAST_BRANCH_ADDR 0x00000048
+#define CPU_SPAD_UFLOW_SET 0x0000004c
+/* 0x50 --> 0x200 unused */
+#define CPU_R0 0x00000200
+#define CPU_R1 0x00000204
+#define CPU_R2 0x00000208
+#define CPU_R3 0x0000020c
+#define CPU_R4 0x00000210
+#define CPU_R5 0x00000214
+#define CPU_R6 0x00000218
+#define CPU_R7 0x0000021c
+#define CPU_R8 0x00000220
+#define CPU_R9 0x00000224
+#define CPU_R10 0x00000228
+#define CPU_R11 0x0000022c
+#define CPU_R12 0x00000230
+#define CPU_R13 0x00000234
+#define CPU_R14 0x00000238
+#define CPU_R15 0x0000023c
+#define CPU_R16 0x00000240
+#define CPU_R17 0x00000244
+#define CPU_R18 0x00000248
+#define CPU_R19 0x0000024c
+#define CPU_R20 0x00000250
+#define CPU_R21 0x00000254
+#define CPU_R22 0x00000258
+#define CPU_R23 0x0000025c
+#define CPU_R24 0x00000260
+#define CPU_R25 0x00000264
+#define CPU_R26 0x00000268
+#define CPU_R27 0x0000026c
+#define CPU_R28 0x00000270
+#define CPU_R29 0x00000274
+#define CPU_R30 0x00000278
+#define CPU_R31 0x0000027c
+/* 0x280 --> 0x400 unused */
+
+#define RX_CPU_BASE 0x00005000
+#define RX_CPU_MODE 0x00005000
+#define RX_CPU_STATE 0x00005004
+#define RX_CPU_PGMCTR 0x0000501c
+#define RX_CPU_HWBKPT 0x00005034
+#define TX_CPU_BASE 0x00005400
+#define TX_CPU_MODE 0x00005400
+#define TX_CPU_STATE 0x00005404
+#define TX_CPU_PGMCTR 0x0000541c
+
+#define VCPU_STATUS 0x00005100
+#define VCPU_STATUS_INIT_DONE 0x04000000
+#define VCPU_STATUS_DRV_RESET 0x08000000
+
+#define VCPU_CFGSHDW 0x00005104
+#define VCPU_CFGSHDW_WOL_ENABLE 0x00000001
+#define VCPU_CFGSHDW_WOL_MAGPKT 0x00000004
+#define VCPU_CFGSHDW_ASPM_DBNC 0x00001000
+
+/* Mailboxes */
+#define GRCMBOX_BASE 0x00005600
+#define GRCMBOX_INTERRUPT_0 0x00005800 /* 64-bit */
+#define GRCMBOX_INTERRUPT_1 0x00005808 /* 64-bit */
+#define GRCMBOX_INTERRUPT_2 0x00005810 /* 64-bit */
+#define GRCMBOX_INTERRUPT_3 0x00005818 /* 64-bit */
+#define GRCMBOX_GENERAL_0 0x00005820 /* 64-bit */
+#define GRCMBOX_GENERAL_1 0x00005828 /* 64-bit */
+#define GRCMBOX_GENERAL_2 0x00005830 /* 64-bit */
+#define GRCMBOX_GENERAL_3 0x00005838 /* 64-bit */
+#define GRCMBOX_GENERAL_4 0x00005840 /* 64-bit */
+#define GRCMBOX_GENERAL_5 0x00005848 /* 64-bit */
+#define GRCMBOX_GENERAL_6 0x00005850 /* 64-bit */
+#define GRCMBOX_GENERAL_7 0x00005858 /* 64-bit */
+#define GRCMBOX_RELOAD_STAT 0x00005860 /* 64-bit */
+#define GRCMBOX_RCVSTD_PROD_IDX 0x00005868 /* 64-bit */
+#define GRCMBOX_RCVJUMBO_PROD_IDX 0x00005870 /* 64-bit */
+#define GRCMBOX_RCVMINI_PROD_IDX 0x00005878 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_0 0x00005880 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_1 0x00005888 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_2 0x00005890 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_3 0x00005898 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_4 0x000058a0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_5 0x000058a8 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_6 0x000058b0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_7 0x000058b8 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_8 0x000058c0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_9 0x000058c8 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_10 0x000058d0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_11 0x000058d8 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_12 0x000058e0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_13 0x000058e8 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_14 0x000058f0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_15 0x000058f8 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_0 0x00005900 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_1 0x00005908 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_2 0x00005910 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_3 0x00005918 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_4 0x00005920 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_5 0x00005928 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_6 0x00005930 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_7 0x00005938 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_8 0x00005940 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_9 0x00005948 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_10 0x00005950 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_11 0x00005958 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_12 0x00005960 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_13 0x00005968 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_14 0x00005970 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_15 0x00005978 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_0 0x00005980 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_1 0x00005988 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_2 0x00005990 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_3 0x00005998 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_4 0x000059a0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_5 0x000059a8 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_6 0x000059b0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_7 0x000059b8 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_8 0x000059c0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_9 0x000059c8 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_10 0x000059d0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_11 0x000059d8 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_12 0x000059e0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_13 0x000059e8 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_14 0x000059f0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_15 0x000059f8 /* 64-bit */
+#define GRCMBOX_HIGH_PRIO_EV_VECTOR 0x00005a00
+#define GRCMBOX_HIGH_PRIO_EV_MASK 0x00005a04
+#define GRCMBOX_LOW_PRIO_EV_VEC 0x00005a08
+#define GRCMBOX_LOW_PRIO_EV_MASK 0x00005a0c
+/* 0x5a10 --> 0x5c00 */
+
+/* Flow Through queues */
+#define FTQ_RESET 0x00005c00
+/* 0x5c04 --> 0x5c10 unused */
+#define FTQ_DMA_NORM_READ_CTL 0x00005c10
+#define FTQ_DMA_NORM_READ_FULL_CNT 0x00005c14
+#define FTQ_DMA_NORM_READ_FIFO_ENQDEQ 0x00005c18
+#define FTQ_DMA_NORM_READ_WRITE_PEEK 0x00005c1c
+#define FTQ_DMA_HIGH_READ_CTL 0x00005c20
+#define FTQ_DMA_HIGH_READ_FULL_CNT 0x00005c24
+#define FTQ_DMA_HIGH_READ_FIFO_ENQDEQ 0x00005c28
+#define FTQ_DMA_HIGH_READ_WRITE_PEEK 0x00005c2c
+#define FTQ_DMA_COMP_DISC_CTL 0x00005c30
+#define FTQ_DMA_COMP_DISC_FULL_CNT 0x00005c34
+#define FTQ_DMA_COMP_DISC_FIFO_ENQDEQ 0x00005c38
+#define FTQ_DMA_COMP_DISC_WRITE_PEEK 0x00005c3c
+#define FTQ_SEND_BD_COMP_CTL 0x00005c40
+#define FTQ_SEND_BD_COMP_FULL_CNT 0x00005c44
+#define FTQ_SEND_BD_COMP_FIFO_ENQDEQ 0x00005c48
+#define FTQ_SEND_BD_COMP_WRITE_PEEK 0x00005c4c
+#define FTQ_SEND_DATA_INIT_CTL 0x00005c50
+#define FTQ_SEND_DATA_INIT_FULL_CNT 0x00005c54
+#define FTQ_SEND_DATA_INIT_FIFO_ENQDEQ 0x00005c58
+#define FTQ_SEND_DATA_INIT_WRITE_PEEK 0x00005c5c
+#define FTQ_DMA_NORM_WRITE_CTL 0x00005c60
+#define FTQ_DMA_NORM_WRITE_FULL_CNT 0x00005c64
+#define FTQ_DMA_NORM_WRITE_FIFO_ENQDEQ 0x00005c68
+#define FTQ_DMA_NORM_WRITE_WRITE_PEEK 0x00005c6c
+#define FTQ_DMA_HIGH_WRITE_CTL 0x00005c70
+#define FTQ_DMA_HIGH_WRITE_FULL_CNT 0x00005c74
+#define FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ 0x00005c78
+#define FTQ_DMA_HIGH_WRITE_WRITE_PEEK 0x00005c7c
+#define FTQ_SWTYPE1_CTL 0x00005c80
+#define FTQ_SWTYPE1_FULL_CNT 0x00005c84
+#define FTQ_SWTYPE1_FIFO_ENQDEQ 0x00005c88
+#define FTQ_SWTYPE1_WRITE_PEEK 0x00005c8c
+#define FTQ_SEND_DATA_COMP_CTL 0x00005c90
+#define FTQ_SEND_DATA_COMP_FULL_CNT 0x00005c94
+#define FTQ_SEND_DATA_COMP_FIFO_ENQDEQ 0x00005c98
+#define FTQ_SEND_DATA_COMP_WRITE_PEEK 0x00005c9c
+#define FTQ_HOST_COAL_CTL 0x00005ca0
+#define FTQ_HOST_COAL_FULL_CNT 0x00005ca4
+#define FTQ_HOST_COAL_FIFO_ENQDEQ 0x00005ca8
+#define FTQ_HOST_COAL_WRITE_PEEK 0x00005cac
+#define FTQ_MAC_TX_CTL 0x00005cb0
+#define FTQ_MAC_TX_FULL_CNT 0x00005cb4
+#define FTQ_MAC_TX_FIFO_ENQDEQ 0x00005cb8
+#define FTQ_MAC_TX_WRITE_PEEK 0x00005cbc
+#define FTQ_MB_FREE_CTL 0x00005cc0
+#define FTQ_MB_FREE_FULL_CNT 0x00005cc4
+#define FTQ_MB_FREE_FIFO_ENQDEQ 0x00005cc8
+#define FTQ_MB_FREE_WRITE_PEEK 0x00005ccc
+#define FTQ_RCVBD_COMP_CTL 0x00005cd0
+#define FTQ_RCVBD_COMP_FULL_CNT 0x00005cd4
+#define FTQ_RCVBD_COMP_FIFO_ENQDEQ 0x00005cd8
+#define FTQ_RCVBD_COMP_WRITE_PEEK 0x00005cdc
+#define FTQ_RCVLST_PLMT_CTL 0x00005ce0
+#define FTQ_RCVLST_PLMT_FULL_CNT 0x00005ce4
+#define FTQ_RCVLST_PLMT_FIFO_ENQDEQ 0x00005ce8
+#define FTQ_RCVLST_PLMT_WRITE_PEEK 0x00005cec
+#define FTQ_RCVDATA_INI_CTL 0x00005cf0
+#define FTQ_RCVDATA_INI_FULL_CNT 0x00005cf4
+#define FTQ_RCVDATA_INI_FIFO_ENQDEQ 0x00005cf8
+#define FTQ_RCVDATA_INI_WRITE_PEEK 0x00005cfc
+#define FTQ_RCVDATA_COMP_CTL 0x00005d00
+#define FTQ_RCVDATA_COMP_FULL_CNT 0x00005d04
+#define FTQ_RCVDATA_COMP_FIFO_ENQDEQ 0x00005d08
+#define FTQ_RCVDATA_COMP_WRITE_PEEK 0x00005d0c
+#define FTQ_SWTYPE2_CTL 0x00005d10
+#define FTQ_SWTYPE2_FULL_CNT 0x00005d14
+#define FTQ_SWTYPE2_FIFO_ENQDEQ 0x00005d18
+#define FTQ_SWTYPE2_WRITE_PEEK 0x00005d1c
+/* 0x5d20 --> 0x6000 unused */
+
+/* Message signaled interrupt registers */
+#define MSGINT_MODE 0x00006000
+#define MSGINT_MODE_RESET 0x00000001
+#define MSGINT_MODE_ENABLE 0x00000002
+#define MSGINT_MODE_ONE_SHOT_DISABLE 0x00000020
+#define MSGINT_MODE_MULTIVEC_EN 0x00000080
+#define MSGINT_STATUS 0x00006004
+#define MSGINT_STATUS_MSI_REQ 0x00000001
+#define MSGINT_FIFO 0x00006008
+/* 0x600c --> 0x6400 unused */
+
+/* DMA completion registers */
+#define DMAC_MODE 0x00006400
+#define DMAC_MODE_RESET 0x00000001
+#define DMAC_MODE_ENABLE 0x00000002
+/* 0x6404 --> 0x6800 unused */
+
+/* GRC registers */
+#define GRC_MODE 0x00006800
+#define GRC_MODE_UPD_ON_COAL 0x00000001
+#define GRC_MODE_BSWAP_NONFRM_DATA 0x00000002
+#define GRC_MODE_WSWAP_NONFRM_DATA 0x00000004
+#define GRC_MODE_BSWAP_DATA 0x00000010
+#define GRC_MODE_WSWAP_DATA 0x00000020
+#define GRC_MODE_BYTE_SWAP_B2HRX_DATA 0x00000040
+#define GRC_MODE_WORD_SWAP_B2HRX_DATA 0x00000080
+#define GRC_MODE_SPLITHDR 0x00000100
+#define GRC_MODE_NOFRM_CRACKING 0x00000200
+#define GRC_MODE_INCL_CRC 0x00000400
+#define GRC_MODE_ALLOW_BAD_FRMS 0x00000800
+#define GRC_MODE_NOIRQ_ON_SENDS 0x00002000
+#define GRC_MODE_NOIRQ_ON_RCV 0x00004000
+#define GRC_MODE_FORCE_PCI32BIT 0x00008000
+#define GRC_MODE_B2HRX_ENABLE 0x00008000
+#define GRC_MODE_HOST_STACKUP 0x00010000
+#define GRC_MODE_HOST_SENDBDS 0x00020000
+#define GRC_MODE_HTX2B_ENABLE 0x00040000
+#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000
+#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000
+#define GRC_MODE_PCIE_TL_SEL 0x00000000
+#define GRC_MODE_PCIE_PL_SEL 0x00400000
+#define GRC_MODE_NO_RX_PHDR_CSUM 0x00800000
+#define GRC_MODE_IRQ_ON_TX_CPU_ATTN 0x01000000
+#define GRC_MODE_IRQ_ON_RX_CPU_ATTN 0x02000000
+#define GRC_MODE_IRQ_ON_MAC_ATTN 0x04000000
+#define GRC_MODE_IRQ_ON_DMA_ATTN 0x08000000
+#define GRC_MODE_IRQ_ON_FLOW_ATTN 0x10000000
+#define GRC_MODE_4X_NIC_SEND_RINGS 0x20000000
+#define GRC_MODE_PCIE_DL_SEL 0x20000000
+#define GRC_MODE_MCAST_FRM_ENABLE 0x40000000
+#define GRC_MODE_PCIE_HI_1K_EN 0x80000000
+#define GRC_MODE_PCIE_PORT_MASK (GRC_MODE_PCIE_TL_SEL | \
+ GRC_MODE_PCIE_PL_SEL | \
+ GRC_MODE_PCIE_DL_SEL | \
+ GRC_MODE_PCIE_HI_1K_EN)
+#define GRC_MISC_CFG 0x00006804
+#define GRC_MISC_CFG_CORECLK_RESET 0x00000001
+#define GRC_MISC_CFG_PRESCALAR_MASK 0x000000fe
+#define GRC_MISC_CFG_PRESCALAR_SHIFT 1
+#define GRC_MISC_CFG_BOARD_ID_MASK 0x0001e000
+#define GRC_MISC_CFG_BOARD_ID_5700 0x0001e000
+#define GRC_MISC_CFG_BOARD_ID_5701 0x00000000
+#define GRC_MISC_CFG_BOARD_ID_5702FE 0x00004000
+#define GRC_MISC_CFG_BOARD_ID_5703 0x00000000
+#define GRC_MISC_CFG_BOARD_ID_5703S 0x00002000
+#define GRC_MISC_CFG_BOARD_ID_5704 0x00000000
+#define GRC_MISC_CFG_BOARD_ID_5704CIOBE 0x00004000
+#define GRC_MISC_CFG_BOARD_ID_5704_A2 0x00008000
+#define GRC_MISC_CFG_BOARD_ID_5788 0x00010000
+#define GRC_MISC_CFG_BOARD_ID_5788M 0x00018000
+#define GRC_MISC_CFG_BOARD_ID_AC91002A1 0x00018000
+#define GRC_MISC_CFG_EPHY_IDDQ 0x00200000
+#define GRC_MISC_CFG_KEEP_GPHY_POWER 0x04000000
+#define GRC_LOCAL_CTRL 0x00006808
+#define GRC_LCLCTRL_INT_ACTIVE 0x00000001
+#define GRC_LCLCTRL_CLEARINT 0x00000002
+#define GRC_LCLCTRL_SETINT 0x00000004
+#define GRC_LCLCTRL_INT_ON_ATTN 0x00000008
+#define GRC_LCLCTRL_GPIO_UART_SEL 0x00000010 /* 5755 only */
+#define GRC_LCLCTRL_USE_SIG_DETECT 0x00000010 /* 5714/5780 only */
+#define GRC_LCLCTRL_USE_EXT_SIG_DETECT 0x00000020 /* 5714/5780 only */
+#define GRC_LCLCTRL_GPIO_INPUT3 0x00000020
+#define GRC_LCLCTRL_GPIO_OE3 0x00000040
+#define GRC_LCLCTRL_GPIO_OUTPUT3 0x00000080
+#define GRC_LCLCTRL_GPIO_INPUT0 0x00000100
+#define GRC_LCLCTRL_GPIO_INPUT1 0x00000200
+#define GRC_LCLCTRL_GPIO_INPUT2 0x00000400
+#define GRC_LCLCTRL_GPIO_OE0 0x00000800
+#define GRC_LCLCTRL_GPIO_OE1 0x00001000
+#define GRC_LCLCTRL_GPIO_OE2 0x00002000
+#define GRC_LCLCTRL_GPIO_OUTPUT0 0x00004000
+#define GRC_LCLCTRL_GPIO_OUTPUT1 0x00008000
+#define GRC_LCLCTRL_GPIO_OUTPUT2 0x00010000
+#define GRC_LCLCTRL_EXTMEM_ENABLE 0x00020000
+#define GRC_LCLCTRL_MEMSZ_MASK 0x001c0000
+#define GRC_LCLCTRL_MEMSZ_256K 0x00000000
+#define GRC_LCLCTRL_MEMSZ_512K 0x00040000
+#define GRC_LCLCTRL_MEMSZ_1M 0x00080000
+#define GRC_LCLCTRL_MEMSZ_2M 0x000c0000
+#define GRC_LCLCTRL_MEMSZ_4M 0x00100000
+#define GRC_LCLCTRL_MEMSZ_8M 0x00140000
+#define GRC_LCLCTRL_MEMSZ_16M 0x00180000
+#define GRC_LCLCTRL_BANK_SELECT 0x00200000
+#define GRC_LCLCTRL_SSRAM_TYPE 0x00400000
+#define GRC_LCLCTRL_AUTO_SEEPROM 0x01000000
+#define GRC_TIMER 0x0000680c
+#define GRC_RX_CPU_EVENT 0x00006810
+#define GRC_RX_CPU_DRIVER_EVENT 0x00004000
+#define GRC_RX_TIMER_REF 0x00006814
+#define GRC_RX_CPU_SEM 0x00006818
+#define GRC_REMOTE_RX_CPU_ATTN 0x0000681c
+#define GRC_TX_CPU_EVENT 0x00006820
+#define GRC_TX_TIMER_REF 0x00006824
+#define GRC_TX_CPU_SEM 0x00006828
+#define GRC_REMOTE_TX_CPU_ATTN 0x0000682c
+#define GRC_MEM_POWER_UP 0x00006830 /* 64-bit */
+#define GRC_EEPROM_ADDR 0x00006838
+#define EEPROM_ADDR_WRITE 0x00000000
+#define EEPROM_ADDR_READ 0x80000000
+#define EEPROM_ADDR_COMPLETE 0x40000000
+#define EEPROM_ADDR_FSM_RESET 0x20000000
+#define EEPROM_ADDR_DEVID_MASK 0x1c000000
+#define EEPROM_ADDR_DEVID_SHIFT 26
+#define EEPROM_ADDR_START 0x02000000
+#define EEPROM_ADDR_CLKPERD_SHIFT 16
+#define EEPROM_ADDR_ADDR_MASK 0x0000ffff
+#define EEPROM_ADDR_ADDR_SHIFT 0
+#define EEPROM_DEFAULT_CLOCK_PERIOD 0x60
+#define EEPROM_CHIP_SIZE (64 * 1024)
+#define GRC_EEPROM_DATA 0x0000683c
+#define GRC_EEPROM_CTRL 0x00006840
+#define GRC_MDI_CTRL 0x00006844
+#define GRC_SEEPROM_DELAY 0x00006848
+/* 0x684c --> 0x6890 unused */
+#define GRC_VCPU_EXT_CTRL 0x00006890
+#define GRC_VCPU_EXT_CTRL_HALT_CPU 0x00400000
+#define GRC_VCPU_EXT_CTRL_DISABLE_WOL 0x20000000
+#define GRC_FASTBOOT_PC 0x00006894 /* 5752, 5755, 5787 */
+
+/* 0x6c00 --> 0x7000 unused */
+
+/* NVRAM Control registers */
+#define NVRAM_CMD 0x00007000
+#define NVRAM_CMD_RESET 0x00000001
+#define NVRAM_CMD_DONE 0x00000008
+#define NVRAM_CMD_GO 0x00000010
+#define NVRAM_CMD_WR 0x00000020
+#define NVRAM_CMD_RD 0x00000000
+#define NVRAM_CMD_ERASE 0x00000040
+#define NVRAM_CMD_FIRST 0x00000080
+#define NVRAM_CMD_LAST 0x00000100
+#define NVRAM_CMD_WREN 0x00010000
+#define NVRAM_CMD_WRDI 0x00020000
+#define NVRAM_STAT 0x00007004
+#define NVRAM_WRDATA 0x00007008
+#define NVRAM_ADDR 0x0000700c
+#define NVRAM_ADDR_MSK 0x00ffffff
+#define NVRAM_RDDATA 0x00007010
+#define NVRAM_CFG1 0x00007014
+#define NVRAM_CFG1_FLASHIF_ENAB 0x00000001
+#define NVRAM_CFG1_BUFFERED_MODE 0x00000002
+#define NVRAM_CFG1_PASS_THRU 0x00000004
+#define NVRAM_CFG1_STATUS_BITS 0x00000070
+#define NVRAM_CFG1_BIT_BANG 0x00000008
+#define NVRAM_CFG1_FLASH_SIZE 0x02000000
+#define NVRAM_CFG1_COMPAT_BYPASS 0x80000000
+#define NVRAM_CFG1_VENDOR_MASK 0x03000003
+#define FLASH_VENDOR_ATMEL_EEPROM 0x02000000
+#define FLASH_VENDOR_ATMEL_FLASH_BUFFERED 0x02000003
+#define FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED 0x00000003
+#define FLASH_VENDOR_ST 0x03000001
+#define FLASH_VENDOR_SAIFUN 0x01000003
+#define FLASH_VENDOR_SST_SMALL 0x00000001
+#define FLASH_VENDOR_SST_LARGE 0x02000001
+#define NVRAM_CFG1_5752VENDOR_MASK 0x03c00003
+#define FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ 0x00000000
+#define FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ 0x02000000
+#define FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED 0x02000003
+#define FLASH_5752VENDOR_ST_M45PE10 0x02400000
+#define FLASH_5752VENDOR_ST_M45PE20 0x02400002
+#define FLASH_5752VENDOR_ST_M45PE40 0x02400001
+#define FLASH_5755VENDOR_ATMEL_FLASH_1 0x03400001
+#define FLASH_5755VENDOR_ATMEL_FLASH_2 0x03400002
+#define FLASH_5755VENDOR_ATMEL_FLASH_3 0x03400000
+#define FLASH_5755VENDOR_ATMEL_FLASH_4 0x00000003
+#define FLASH_5755VENDOR_ATMEL_FLASH_5 0x02000003
+#define FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ 0x03c00003
+#define FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ 0x03c00002
+#define FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ 0x03000003
+#define FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ 0x03000002
+#define FLASH_5787VENDOR_MICRO_EEPROM_64KHZ 0x03000000
+#define FLASH_5787VENDOR_MICRO_EEPROM_376KHZ 0x02000000
+#define FLASH_5761VENDOR_ATMEL_MDB021D 0x00800003
+#define FLASH_5761VENDOR_ATMEL_MDB041D 0x00800000
+#define FLASH_5761VENDOR_ATMEL_MDB081D 0x00800002
+#define FLASH_5761VENDOR_ATMEL_MDB161D 0x00800001
+#define FLASH_5761VENDOR_ATMEL_ADB021D 0x00000003
+#define FLASH_5761VENDOR_ATMEL_ADB041D 0x00000000
+#define FLASH_5761VENDOR_ATMEL_ADB081D 0x00000002
+#define FLASH_5761VENDOR_ATMEL_ADB161D 0x00000001
+#define FLASH_5761VENDOR_ST_M_M45PE20 0x02800001
+#define FLASH_5761VENDOR_ST_M_M45PE40 0x02800000
+#define FLASH_5761VENDOR_ST_M_M45PE80 0x02800002
+#define FLASH_5761VENDOR_ST_M_M45PE16 0x02800003
+#define FLASH_5761VENDOR_ST_A_M45PE20 0x02000001
+#define FLASH_5761VENDOR_ST_A_M45PE40 0x02000000
+#define FLASH_5761VENDOR_ST_A_M45PE80 0x02000002
+#define FLASH_5761VENDOR_ST_A_M45PE16 0x02000003
+#define FLASH_57780VENDOR_ATMEL_AT45DB011D 0x00400000
+#define FLASH_57780VENDOR_ATMEL_AT45DB011B 0x03400000
+#define FLASH_57780VENDOR_ATMEL_AT45DB021D 0x00400002
+#define FLASH_57780VENDOR_ATMEL_AT45DB021B 0x03400002
+#define FLASH_57780VENDOR_ATMEL_AT45DB041D 0x00400001
+#define FLASH_57780VENDOR_ATMEL_AT45DB041B 0x03400001
+#define FLASH_5717VENDOR_ATMEL_EEPROM 0x02000001
+#define FLASH_5717VENDOR_MICRO_EEPROM 0x02000003
+#define FLASH_5717VENDOR_ATMEL_MDB011D 0x01000001
+#define FLASH_5717VENDOR_ATMEL_MDB021D 0x01000003
+#define FLASH_5717VENDOR_ST_M_M25PE10 0x02000000
+#define FLASH_5717VENDOR_ST_M_M25PE20 0x02000002
+#define FLASH_5717VENDOR_ST_M_M45PE10 0x00000001
+#define FLASH_5717VENDOR_ST_M_M45PE20 0x00000003
+#define FLASH_5717VENDOR_ATMEL_ADB011B 0x01400000
+#define FLASH_5717VENDOR_ATMEL_ADB021B 0x01400002
+#define FLASH_5717VENDOR_ATMEL_ADB011D 0x01400001
+#define FLASH_5717VENDOR_ATMEL_ADB021D 0x01400003
+#define FLASH_5717VENDOR_ST_A_M25PE10 0x02400000
+#define FLASH_5717VENDOR_ST_A_M25PE20 0x02400002
+#define FLASH_5717VENDOR_ST_A_M45PE10 0x02400001
+#define FLASH_5717VENDOR_ST_A_M45PE20 0x02400003
+#define FLASH_5717VENDOR_ATMEL_45USPT 0x03400000
+#define FLASH_5717VENDOR_ST_25USPT 0x03400002
+#define FLASH_5717VENDOR_ST_45USPT 0x03400001
+#define FLASH_5720_EEPROM_HD 0x00000001
+#define FLASH_5720_EEPROM_LD 0x00000003
+#define FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000
+#define FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002
+#define FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001
+#define FLASH_5720VENDOR_M_ATMEL_DB081D 0x01000003
+#define FLASH_5720VENDOR_M_ST_M25PE10 0x02000000
+#define FLASH_5720VENDOR_M_ST_M25PE20 0x02000002
+#define FLASH_5720VENDOR_M_ST_M25PE40 0x02000001
+#define FLASH_5720VENDOR_M_ST_M25PE80 0x02000003
+#define FLASH_5720VENDOR_M_ST_M45PE10 0x03000000
+#define FLASH_5720VENDOR_M_ST_M45PE20 0x03000002
+#define FLASH_5720VENDOR_M_ST_M45PE40 0x03000001
+#define FLASH_5720VENDOR_M_ST_M45PE80 0x03000003
+#define FLASH_5720VENDOR_A_ATMEL_DB011B 0x01800000
+#define FLASH_5720VENDOR_A_ATMEL_DB021B 0x01800002
+#define FLASH_5720VENDOR_A_ATMEL_DB041B 0x01800001
+#define FLASH_5720VENDOR_A_ATMEL_DB011D 0x01c00000
+#define FLASH_5720VENDOR_A_ATMEL_DB021D 0x01c00002
+#define FLASH_5720VENDOR_A_ATMEL_DB041D 0x01c00001
+#define FLASH_5720VENDOR_A_ATMEL_DB081D 0x01c00003
+#define FLASH_5720VENDOR_A_ST_M25PE10 0x02800000
+#define FLASH_5720VENDOR_A_ST_M25PE20 0x02800002
+#define FLASH_5720VENDOR_A_ST_M25PE40 0x02800001
+#define FLASH_5720VENDOR_A_ST_M25PE80 0x02800003
+#define FLASH_5720VENDOR_A_ST_M45PE10 0x02c00000
+#define FLASH_5720VENDOR_A_ST_M45PE20 0x02c00002
+#define FLASH_5720VENDOR_A_ST_M45PE40 0x02c00001
+#define FLASH_5720VENDOR_A_ST_M45PE80 0x02c00003
+#define FLASH_5720VENDOR_ATMEL_45USPT 0x03c00000
+#define FLASH_5720VENDOR_ST_25USPT 0x03c00002
+#define FLASH_5720VENDOR_ST_45USPT 0x03c00001
+#define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000
+#define FLASH_5752PAGE_SIZE_256 0x00000000
+#define FLASH_5752PAGE_SIZE_512 0x10000000
+#define FLASH_5752PAGE_SIZE_1K 0x20000000
+#define FLASH_5752PAGE_SIZE_2K 0x30000000
+#define FLASH_5752PAGE_SIZE_4K 0x40000000
+#define FLASH_5752PAGE_SIZE_264 0x50000000
+#define FLASH_5752PAGE_SIZE_528 0x60000000
+#define NVRAM_CFG2 0x00007018
+#define NVRAM_CFG3 0x0000701c
+#define NVRAM_SWARB 0x00007020
+#define SWARB_REQ_SET0 0x00000001
+#define SWARB_REQ_SET1 0x00000002
+#define SWARB_REQ_SET2 0x00000004
+#define SWARB_REQ_SET3 0x00000008
+#define SWARB_REQ_CLR0 0x00000010
+#define SWARB_REQ_CLR1 0x00000020
+#define SWARB_REQ_CLR2 0x00000040
+#define SWARB_REQ_CLR3 0x00000080
+#define SWARB_GNT0 0x00000100
+#define SWARB_GNT1 0x00000200
+#define SWARB_GNT2 0x00000400
+#define SWARB_GNT3 0x00000800
+#define SWARB_REQ0 0x00001000
+#define SWARB_REQ1 0x00002000
+#define SWARB_REQ2 0x00004000
+#define SWARB_REQ3 0x00008000
+#define NVRAM_ACCESS 0x00007024
+#define ACCESS_ENABLE 0x00000001
+#define ACCESS_WR_ENABLE 0x00000002
+#define NVRAM_WRITE1 0x00007028
+/* 0x702c unused */
+
+#define NVRAM_ADDR_LOCKOUT 0x00007030
+/* 0x7034 --> 0x7500 unused */
+
+#define OTP_MODE 0x00007500
+#define OTP_MODE_OTP_THRU_GRC 0x00000001
+#define OTP_CTRL 0x00007504
+#define OTP_CTRL_OTP_PROG_ENABLE 0x00200000
+#define OTP_CTRL_OTP_CMD_READ 0x00000000
+#define OTP_CTRL_OTP_CMD_INIT 0x00000008
+#define OTP_CTRL_OTP_CMD_START 0x00000001
+#define OTP_STATUS 0x00007508
+#define OTP_STATUS_CMD_DONE 0x00000001
+#define OTP_ADDRESS 0x0000750c
+#define OTP_ADDRESS_MAGIC1 0x000000a0
+#define OTP_ADDRESS_MAGIC2 0x00000080
+/* 0x7510 unused */
+
+#define OTP_READ_DATA 0x00007514
+/* 0x7518 --> 0x7c04 unused */
+
+#define PCIE_TRANSACTION_CFG 0x00007c04
+#define PCIE_TRANS_CFG_1SHOT_MSI 0x20000000
+#define PCIE_TRANS_CFG_LOM 0x00000020
+/* 0x7c08 --> 0x7d28 unused */
+
+#define PCIE_PWR_MGMT_THRESH 0x00007d28
+#define PCIE_PWR_MGMT_L1_THRESH_MSK 0x0000ff00
+#define PCIE_PWR_MGMT_L1_THRESH_4MS 0x0000ff00
+#define PCIE_PWR_MGMT_EXT_ASPM_TMR_EN 0x01000000
+/* 0x7d2c --> 0x7d54 unused */
+
+#define TG3_PCIE_LNKCTL 0x00007d54
+#define TG3_PCIE_LNKCTL_L1_PLL_PD_EN 0x00000008
+#define TG3_PCIE_LNKCTL_L1_PLL_PD_DIS 0x00000080
+/* 0x7d58 --> 0x7e70 unused */
+
+#define TG3_PCIE_PHY_TSTCTL 0x00007e2c
+#define TG3_PCIE_PHY_TSTCTL_PCIE10 0x00000040
+#define TG3_PCIE_PHY_TSTCTL_PSCRAM 0x00000020
+
+#define TG3_PCIE_EIDLE_DELAY 0x00007e70
+#define TG3_PCIE_EIDLE_DELAY_MASK 0x0000001f
+#define TG3_PCIE_EIDLE_DELAY_13_CLKS 0x0000000c
+/* 0x7e74 --> 0x8000 unused */
+
+
+/* Alternate PCIE definitions */
+#define TG3_PCIE_TLDLPL_PORT 0x00007c00
+#define TG3_PCIE_DL_LO_FTSMAX 0x0000000c
+#define TG3_PCIE_DL_LO_FTSMAX_MSK 0x000000ff
+#define TG3_PCIE_DL_LO_FTSMAX_VAL 0x0000002c
+#define TG3_PCIE_PL_LO_PHYCTL1 0x00000004
+#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000
+#define TG3_PCIE_PL_LO_PHYCTL5 0x00000014
+#define TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ 0x80000000
+
+#define TG3_REG_BLK_SIZE 0x00008000
+
+/* OTP bit definitions */
+#define TG3_OTP_AGCTGT_MASK 0x000000e0
+#define TG3_OTP_AGCTGT_SHIFT 1
+#define TG3_OTP_HPFFLTR_MASK 0x00000300
+#define TG3_OTP_HPFFLTR_SHIFT 1
+#define TG3_OTP_HPFOVER_MASK 0x00000400
+#define TG3_OTP_HPFOVER_SHIFT 1
+#define TG3_OTP_LPFDIS_MASK 0x00000800
+#define TG3_OTP_LPFDIS_SHIFT 11
+#define TG3_OTP_VDAC_MASK 0xff000000
+#define TG3_OTP_VDAC_SHIFT 24
+#define TG3_OTP_10BTAMP_MASK 0x0000f000
+#define TG3_OTP_10BTAMP_SHIFT 8
+#define TG3_OTP_ROFF_MASK 0x00e00000
+#define TG3_OTP_ROFF_SHIFT 11
+#define TG3_OTP_RCOFF_MASK 0x001c0000
+#define TG3_OTP_RCOFF_SHIFT 16
+
+#define TG3_OTP_DEFAULT 0x286c1640
+
+
+/* Hardware Legacy NVRAM layout */
+#define TG3_NVM_VPD_OFF 0x100
+#define TG3_NVM_VPD_LEN 256
+
+/* Hardware Selfboot NVRAM layout */
+#define TG3_NVM_HWSB_CFG1 0x00000004
+#define TG3_NVM_HWSB_CFG1_MAJMSK 0xf8000000
+#define TG3_NVM_HWSB_CFG1_MAJSFT 27
+#define TG3_NVM_HWSB_CFG1_MINMSK 0x07c00000
+#define TG3_NVM_HWSB_CFG1_MINSFT 22
+
+#define TG3_EEPROM_MAGIC 0x669955aa
+#define TG3_EEPROM_MAGIC_FW 0xa5000000
+#define TG3_EEPROM_MAGIC_FW_MSK 0xff000000
+#define TG3_EEPROM_SB_FORMAT_MASK 0x00e00000
+#define TG3_EEPROM_SB_FORMAT_1 0x00200000
+#define TG3_EEPROM_SB_REVISION_MASK 0x001f0000
+#define TG3_EEPROM_SB_REVISION_0 0x00000000
+#define TG3_EEPROM_SB_REVISION_2 0x00020000
+#define TG3_EEPROM_SB_REVISION_3 0x00030000
+#define TG3_EEPROM_SB_REVISION_4 0x00040000
+#define TG3_EEPROM_SB_REVISION_5 0x00050000
+#define TG3_EEPROM_SB_REVISION_6 0x00060000
+#define TG3_EEPROM_MAGIC_HW 0xabcd
+#define TG3_EEPROM_MAGIC_HW_MSK 0xffff
+
+#define TG3_NVM_DIR_START 0x18
+#define TG3_NVM_DIR_END 0x78
+#define TG3_NVM_DIRENT_SIZE 0xc
+#define TG3_NVM_DIRTYPE_SHIFT 24
+#define TG3_NVM_DIRTYPE_LENMSK 0x003fffff
+#define TG3_NVM_DIRTYPE_ASFINI 1
+#define TG3_NVM_DIRTYPE_EXTVPD 20
+#define TG3_NVM_PTREV_BCVER 0x94
+#define TG3_NVM_BCVER_MAJMSK 0x0000ff00
+#define TG3_NVM_BCVER_MAJSFT 8
+#define TG3_NVM_BCVER_MINMSK 0x000000ff
+
+#define TG3_EEPROM_SB_F1R0_EDH_OFF 0x10
+#define TG3_EEPROM_SB_F1R2_EDH_OFF 0x14
+#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10
+#define TG3_EEPROM_SB_F1R3_EDH_OFF 0x18
+#define TG3_EEPROM_SB_F1R4_EDH_OFF 0x1c
+#define TG3_EEPROM_SB_F1R5_EDH_OFF 0x20
+#define TG3_EEPROM_SB_F1R6_EDH_OFF 0x4c
+#define TG3_EEPROM_SB_EDH_MAJ_MASK 0x00000700
+#define TG3_EEPROM_SB_EDH_MAJ_SHFT 8
+#define TG3_EEPROM_SB_EDH_MIN_MASK 0x000000ff
+#define TG3_EEPROM_SB_EDH_BLD_MASK 0x0000f800
+#define TG3_EEPROM_SB_EDH_BLD_SHFT 11
+
+
+/* 32K Window into NIC internal memory */
+#define NIC_SRAM_WIN_BASE 0x00008000
+
+/* Offsets into first 32k of NIC internal memory. */
+#define NIC_SRAM_PAGE_ZERO 0x00000000
+#define NIC_SRAM_SEND_RCB 0x00000100 /* 16 * TG3_BDINFO_... */
+#define NIC_SRAM_RCV_RET_RCB 0x00000200 /* 16 * TG3_BDINFO_... */
+#define NIC_SRAM_STATS_BLK 0x00000300
+#define NIC_SRAM_STATUS_BLK 0x00000b00
+
+#define NIC_SRAM_FIRMWARE_MBOX 0x00000b50
+#define NIC_SRAM_FIRMWARE_MBOX_MAGIC1 0x4B657654
+#define NIC_SRAM_FIRMWARE_MBOX_MAGIC2 0x4861764b /* !dma on linkchg */
+
+#define NIC_SRAM_DATA_SIG 0x00000b54
+#define NIC_SRAM_DATA_SIG_MAGIC 0x4b657654 /* ascii for 'KevT' */
+
+#define NIC_SRAM_DATA_CFG 0x00000b58
+#define NIC_SRAM_DATA_CFG_LED_MODE_MASK 0x0000000c
+#define NIC_SRAM_DATA_CFG_LED_MODE_MAC 0x00000000
+#define NIC_SRAM_DATA_CFG_LED_MODE_PHY_1 0x00000004
+#define NIC_SRAM_DATA_CFG_LED_MODE_PHY_2 0x00000008
+#define NIC_SRAM_DATA_CFG_PHY_TYPE_MASK 0x00000030
+#define NIC_SRAM_DATA_CFG_PHY_TYPE_UNKNOWN 0x00000000
+#define NIC_SRAM_DATA_CFG_PHY_TYPE_COPPER 0x00000010
+#define NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER 0x00000020
+#define NIC_SRAM_DATA_CFG_WOL_ENABLE 0x00000040
+#define NIC_SRAM_DATA_CFG_ASF_ENABLE 0x00000080
+#define NIC_SRAM_DATA_CFG_EEPROM_WP 0x00000100
+#define NIC_SRAM_DATA_CFG_MINI_PCI 0x00001000
+#define NIC_SRAM_DATA_CFG_FIBER_WOL 0x00004000
+#define NIC_SRAM_DATA_CFG_NO_GPIO2 0x00100000
+#define NIC_SRAM_DATA_CFG_APE_ENABLE 0x00200000
+
+#define NIC_SRAM_DATA_VER 0x00000b5c
+#define NIC_SRAM_DATA_VER_SHIFT 16
+
+#define NIC_SRAM_DATA_PHY_ID 0x00000b74
+#define NIC_SRAM_DATA_PHY_ID1_MASK 0xffff0000
+#define NIC_SRAM_DATA_PHY_ID2_MASK 0x0000ffff
+
+#define NIC_SRAM_FW_CMD_MBOX 0x00000b78
+#define FWCMD_NICDRV_ALIVE 0x00000001
+#define FWCMD_NICDRV_PAUSE_FW 0x00000002
+#define FWCMD_NICDRV_IPV4ADDR_CHG 0x00000003
+#define FWCMD_NICDRV_IPV6ADDR_CHG 0x00000004
+#define FWCMD_NICDRV_FIX_DMAR 0x00000005
+#define FWCMD_NICDRV_FIX_DMAW 0x00000006
+#define FWCMD_NICDRV_LINK_UPDATE 0x0000000c
+#define FWCMD_NICDRV_ALIVE2 0x0000000d
+#define FWCMD_NICDRV_ALIVE3 0x0000000e
+#define NIC_SRAM_FW_CMD_LEN_MBOX 0x00000b7c
+#define NIC_SRAM_FW_CMD_DATA_MBOX 0x00000b80
+#define NIC_SRAM_FW_ASF_STATUS_MBOX 0x00000c00
+#define NIC_SRAM_FW_DRV_STATE_MBOX 0x00000c04
+#define DRV_STATE_START 0x00000001
+#define DRV_STATE_START_DONE 0x80000001
+#define DRV_STATE_UNLOAD 0x00000002
+#define DRV_STATE_UNLOAD_DONE 0x80000002
+#define DRV_STATE_WOL 0x00000003
+#define DRV_STATE_SUSPEND 0x00000004
+
+#define NIC_SRAM_FW_RESET_TYPE_MBOX 0x00000c08
+
+#define NIC_SRAM_MAC_ADDR_HIGH_MBOX 0x00000c14
+#define NIC_SRAM_MAC_ADDR_LOW_MBOX 0x00000c18
+
+#define NIC_SRAM_WOL_MBOX 0x00000d30
+#define WOL_SIGNATURE 0x474c0000
+#define WOL_DRV_STATE_SHUTDOWN 0x00000001
+#define WOL_DRV_WOL 0x00000002
+#define WOL_SET_MAGIC_PKT 0x00000004
+
+#define NIC_SRAM_DATA_CFG_2 0x00000d38
+
+#define NIC_SRAM_DATA_CFG_2_APD_EN 0x00000400
+#define SHASTA_EXT_LED_MODE_MASK 0x00018000
+#define SHASTA_EXT_LED_LEGACY 0x00000000
+#define SHASTA_EXT_LED_SHARED 0x00008000
+#define SHASTA_EXT_LED_MAC 0x00010000
+#define SHASTA_EXT_LED_COMBO 0x00018000
+
+#define NIC_SRAM_DATA_CFG_3 0x00000d3c
+#define NIC_SRAM_ASPM_DEBOUNCE 0x00000002
+
+#define NIC_SRAM_DATA_CFG_4 0x00000d60
+#define NIC_SRAM_GMII_MODE 0x00000002
+#define NIC_SRAM_RGMII_INBAND_DISABLE 0x00000004
+#define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008
+#define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010
+
+#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000
+
+#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000
+#define NIC_SRAM_DMA_DESC_POOL_SIZE 0x00002000
+#define NIC_SRAM_TX_BUFFER_DESC 0x00004000 /* 512 entries */
+#define NIC_SRAM_RX_BUFFER_DESC 0x00006000 /* 256 entries */
+#define NIC_SRAM_RX_JUMBO_BUFFER_DESC 0x00007000 /* 256 entries */
+#define NIC_SRAM_MBUF_POOL_BASE 0x00008000
+#define NIC_SRAM_MBUF_POOL_SIZE96 0x00018000
+#define NIC_SRAM_MBUF_POOL_SIZE64 0x00010000
+#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000
+#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000
+
+#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700 128
+#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755 64
+#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906 32
+
+#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700 64
+#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717 16
+
+
+/* Currently this is fixed. */
+#define TG3_PHY_MII_ADDR 0x01
+
+
+/*** Tigon3 specific PHY MII registers. ***/
+#define MII_TG3_MMD_CTRL 0x0d /* MMD Access Control register */
+#define MII_TG3_MMD_CTRL_DATA_NOINC 0x4000
+#define MII_TG3_MMD_ADDRESS 0x0e /* MMD Address Data register */
+
+#define MII_TG3_EXT_CTRL 0x10 /* Extended control register */
+#define MII_TG3_EXT_CTRL_FIFO_ELASTIC 0x0001
+#define MII_TG3_EXT_CTRL_LNK3_LED_MODE 0x0002
+#define MII_TG3_EXT_CTRL_FORCE_LED_OFF 0x0008
+#define MII_TG3_EXT_CTRL_TBI 0x8000
+
+#define MII_TG3_EXT_STAT 0x11 /* Extended status register */
+#define MII_TG3_EXT_STAT_LPASS 0x0100
+
+#define MII_TG3_RXR_COUNTERS 0x14 /* Local/Remote Receiver Counts */
+#define MII_TG3_DSP_RW_PORT 0x15 /* DSP coefficient read/write port */
+#define MII_TG3_DSP_CONTROL 0x16 /* DSP control register */
+#define MII_TG3_DSP_ADDRESS 0x17 /* DSP address register */
+
+#define MII_TG3_DSP_TAP1 0x0001
+#define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007
+#define MII_TG3_DSP_TAP26 0x001a
+#define MII_TG3_DSP_TAP26_ALNOKO 0x0001
+#define MII_TG3_DSP_TAP26_RMRXSTO 0x0002
+#define MII_TG3_DSP_TAP26_OPCSINPT 0x0004
+#define MII_TG3_DSP_AADJ1CH0 0x001f
+#define MII_TG3_DSP_CH34TP2 0x4022
+#define MII_TG3_DSP_CH34TP2_HIBW01 0x01ff
+#define MII_TG3_DSP_AADJ1CH3 0x601f
+#define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002
+#define MII_TG3_DSP_EXP1_INT_STAT 0x0f01
+#define MII_TG3_DSP_EXP8 0x0f08
+#define MII_TG3_DSP_EXP8_REJ2MHz 0x0001
+#define MII_TG3_DSP_EXP8_AEDW 0x0200
+#define MII_TG3_DSP_EXP75 0x0f75
+#define MII_TG3_DSP_EXP96 0x0f96
+#define MII_TG3_DSP_EXP97 0x0f97
+
+#define MII_TG3_AUX_CTRL 0x18 /* auxiliary control register */
+
+#define MII_TG3_AUXCTL_SHDWSEL_AUXCTL 0x0000
+#define MII_TG3_AUXCTL_ACTL_TX_6DB 0x0400
+#define MII_TG3_AUXCTL_ACTL_SMDSP_ENA 0x0800
+#define MII_TG3_AUXCTL_ACTL_EXTPKTLEN 0x4000
+#define MII_TG3_AUXCTL_ACTL_EXTLOOPBK 0x8000
+
+#define MII_TG3_AUXCTL_SHDWSEL_PWRCTL 0x0002
+#define MII_TG3_AUXCTL_PCTL_WOL_EN 0x0008
+#define MII_TG3_AUXCTL_PCTL_100TX_LPWR 0x0010
+#define MII_TG3_AUXCTL_PCTL_SPR_ISOLATE 0x0020
+#define MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC 0x0040
+#define MII_TG3_AUXCTL_PCTL_VREG_11V 0x0180
+
+#define MII_TG3_AUXCTL_SHDWSEL_MISCTEST 0x0004
+
+#define MII_TG3_AUXCTL_SHDWSEL_MISC 0x0007
+#define MII_TG3_AUXCTL_MISC_WIRESPD_EN 0x0010
+#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX 0x0200
+#define MII_TG3_AUXCTL_MISC_RDSEL_SHIFT 12
+#define MII_TG3_AUXCTL_MISC_WREN 0x8000
+
+
+#define MII_TG3_AUX_STAT 0x19 /* auxiliary status register */
+#define MII_TG3_AUX_STAT_LPASS 0x0004
+#define MII_TG3_AUX_STAT_SPDMASK 0x0700
+#define MII_TG3_AUX_STAT_10HALF 0x0100
+#define MII_TG3_AUX_STAT_10FULL 0x0200
+#define MII_TG3_AUX_STAT_100HALF 0x0300
+#define MII_TG3_AUX_STAT_100_4 0x0400
+#define MII_TG3_AUX_STAT_100FULL 0x0500
+#define MII_TG3_AUX_STAT_1000HALF 0x0600
+#define MII_TG3_AUX_STAT_1000FULL 0x0700
+#define MII_TG3_AUX_STAT_100 0x0008
+#define MII_TG3_AUX_STAT_FULL 0x0001
+
+#define MII_TG3_ISTAT 0x1a /* IRQ status register */
+#define MII_TG3_IMASK 0x1b /* IRQ mask register */
+
+/* ISTAT/IMASK event bits */
+#define MII_TG3_INT_LINKCHG 0x0002
+#define MII_TG3_INT_SPEEDCHG 0x0004
+#define MII_TG3_INT_DUPLEXCHG 0x0008
+#define MII_TG3_INT_ANEG_PAGE_RX 0x0400
+
+#define MII_TG3_MISC_SHDW 0x1c
+#define MII_TG3_MISC_SHDW_WREN 0x8000
+
+#define MII_TG3_MISC_SHDW_APD_WKTM_84MS 0x0001
+#define MII_TG3_MISC_SHDW_APD_ENABLE 0x0020
+#define MII_TG3_MISC_SHDW_APD_SEL 0x2800
+
+#define MII_TG3_MISC_SHDW_SCR5_C125OE 0x0001
+#define MII_TG3_MISC_SHDW_SCR5_DLLAPD 0x0002
+#define MII_TG3_MISC_SHDW_SCR5_SDTL 0x0004
+#define MII_TG3_MISC_SHDW_SCR5_DLPTLM 0x0008
+#define MII_TG3_MISC_SHDW_SCR5_LPED 0x0010
+#define MII_TG3_MISC_SHDW_SCR5_SEL 0x1400
+
+#define MII_TG3_TEST1 0x1e
+#define MII_TG3_TEST1_TRIM_EN 0x0010
+#define MII_TG3_TEST1_CRC_EN 0x8000
+
+/* Clause 45 expansion registers */
+#define TG3_CL45_D7_EEERES_STAT 0x803e
+#define TG3_CL45_D7_EEERES_STAT_LP_100TX 0x0002
+#define TG3_CL45_D7_EEERES_STAT_LP_1000T 0x0004
+
+
+/* Fast Ethernet Tranceiver definitions */
+#define MII_TG3_FET_PTEST 0x17
+#define MII_TG3_FET_PTEST_TRIM_SEL 0x0010
+#define MII_TG3_FET_PTEST_TRIM_2 0x0002
+#define MII_TG3_FET_PTEST_FRC_TX_LINK 0x1000
+#define MII_TG3_FET_PTEST_FRC_TX_LOCK 0x0800
+
+#define MII_TG3_FET_TEST 0x1f
+#define MII_TG3_FET_SHADOW_EN 0x0080
+
+#define MII_TG3_FET_SHDW_MISCCTRL 0x10
+#define MII_TG3_FET_SHDW_MISCCTRL_MDIX 0x4000
+
+#define MII_TG3_FET_SHDW_AUXMODE4 0x1a
+#define MII_TG3_FET_SHDW_AUXMODE4_SBPD 0x0008
+
+#define MII_TG3_FET_SHDW_AUXSTAT2 0x1b
+#define MII_TG3_FET_SHDW_AUXSTAT2_APD 0x0020
+
+
+/* APE registers. Accessible through BAR1 */
+#define TG3_APE_GPIO_MSG 0x0008
+#define TG3_APE_GPIO_MSG_SHIFT 4
+#define TG3_APE_EVENT 0x000c
+#define APE_EVENT_1 0x00000001
+#define TG3_APE_LOCK_REQ 0x002c
+#define APE_LOCK_REQ_DRIVER 0x00001000
+#define TG3_APE_LOCK_GRANT 0x004c
+#define APE_LOCK_GRANT_DRIVER 0x00001000
+#define TG3_APE_SEG_SIG 0x4000
+#define APE_SEG_SIG_MAGIC 0x41504521
+
+/* APE shared memory. Accessible through BAR1 */
+#define TG3_APE_FW_STATUS 0x400c
+#define APE_FW_STATUS_READY 0x00000100
+#define TG3_APE_FW_FEATURES 0x4010
+#define TG3_APE_FW_FEATURE_NCSI 0x00000002
+#define TG3_APE_FW_VERSION 0x4018
+#define APE_FW_VERSION_MAJMSK 0xff000000
+#define APE_FW_VERSION_MAJSFT 24
+#define APE_FW_VERSION_MINMSK 0x00ff0000
+#define APE_FW_VERSION_MINSFT 16
+#define APE_FW_VERSION_REVMSK 0x0000ff00
+#define APE_FW_VERSION_REVSFT 8
+#define APE_FW_VERSION_BLDMSK 0x000000ff
+#define TG3_APE_HOST_SEG_SIG 0x4200
+#define APE_HOST_SEG_SIG_MAGIC 0x484f5354
+#define TG3_APE_HOST_SEG_LEN 0x4204
+#define APE_HOST_SEG_LEN_MAGIC 0x00000020
+#define TG3_APE_HOST_INIT_COUNT 0x4208
+#define TG3_APE_HOST_DRIVER_ID 0x420c
+#define APE_HOST_DRIVER_ID_LINUX 0xf0000000
+#define APE_HOST_DRIVER_ID_MAGIC(maj, min) \
+ (APE_HOST_DRIVER_ID_LINUX | (maj & 0xff) << 16 | (min & 0xff) << 8)
+#define TG3_APE_HOST_BEHAVIOR 0x4210
+#define APE_HOST_BEHAV_NO_PHYLOCK 0x00000001
+#define TG3_APE_HOST_HEARTBEAT_INT_MS 0x4214
+#define APE_HOST_HEARTBEAT_INT_DISABLE 0
+#define APE_HOST_HEARTBEAT_INT_5SEC 5000
+#define TG3_APE_HOST_HEARTBEAT_COUNT 0x4218
+#define TG3_APE_HOST_DRVR_STATE 0x421c
+#define TG3_APE_HOST_DRVR_STATE_START 0x00000001
+#define TG3_APE_HOST_DRVR_STATE_UNLOAD 0x00000002
+#define TG3_APE_HOST_DRVR_STATE_WOL 0x00000003
+#define TG3_APE_HOST_WOL_SPEED 0x4224
+#define TG3_APE_HOST_WOL_SPEED_AUTO 0x00008000
+
+#define TG3_APE_EVENT_STATUS 0x4300
+
+#define APE_EVENT_STATUS_DRIVER_EVNT 0x00000010
+#define APE_EVENT_STATUS_STATE_CHNGE 0x00000500
+#define APE_EVENT_STATUS_STATE_START 0x00010000
+#define APE_EVENT_STATUS_STATE_UNLOAD 0x00020000
+#define APE_EVENT_STATUS_STATE_WOL 0x00030000
+#define APE_EVENT_STATUS_STATE_SUSPEND 0x00040000
+#define APE_EVENT_STATUS_EVENT_PENDING 0x80000000
+
+#define TG3_APE_PER_LOCK_REQ 0x8400
+#define APE_LOCK_PER_REQ_DRIVER 0x00001000
+#define TG3_APE_PER_LOCK_GRANT 0x8420
+#define APE_PER_LOCK_GRANT_DRIVER 0x00001000
+
+/* APE convenience enumerations. */
+#define TG3_APE_LOCK_GRC 1
+#define TG3_APE_LOCK_MEM 4
+#define TG3_APE_LOCK_GPIO 7
+
+#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10
+
+
+/* There are two ways to manage the TX descriptors on the tigon3.
+ * Either the descriptors are in host DMA'able memory, or they
+ * exist only in the cards on-chip SRAM. All 16 send bds are under
+ * the same mode, they may not be configured individually.
+ *
+ * This driver always uses host memory TX descriptors.
+ *
+ * To use host memory TX descriptors:
+ * 1) Set GRC_MODE_HOST_SENDBDS in GRC_MODE register.
+ * Make sure GRC_MODE_4X_NIC_SEND_RINGS is clear.
+ * 2) Allocate DMA'able memory.
+ * 3) In NIC_SRAM_SEND_RCB (of desired index) of on-chip SRAM:
+ * a) Set TG3_BDINFO_HOST_ADDR to DMA address of memory
+ * obtained in step 2
+ * b) Set TG3_BDINFO_NIC_ADDR to NIC_SRAM_TX_BUFFER_DESC.
+ * c) Set len field of TG3_BDINFO_MAXLEN_FLAGS to number
+ * of TX descriptors. Leave flags field clear.
+ * 4) Access TX descriptors via host memory. The chip
+ * will refetch into local SRAM as needed when producer
+ * index mailboxes are updated.
+ *
+ * To use on-chip TX descriptors:
+ * 1) Set GRC_MODE_4X_NIC_SEND_RINGS in GRC_MODE register.
+ * Make sure GRC_MODE_HOST_SENDBDS is clear.
+ * 2) In NIC_SRAM_SEND_RCB (of desired index) of on-chip SRAM:
+ * a) Set TG3_BDINFO_HOST_ADDR to zero.
+ * b) Set TG3_BDINFO_NIC_ADDR to NIC_SRAM_TX_BUFFER_DESC
+ * c) TG3_BDINFO_MAXLEN_FLAGS is don't care.
+ * 3) Access TX descriptors directly in on-chip SRAM
+ * using normal {read,write}l(). (and not using
+ * pointer dereferencing of ioremap()'d memory like
+ * the broken Broadcom driver does)
+ *
+ * Note that BDINFO_FLAGS_DISABLED should be set in the flags field of
+ * TG3_BDINFO_MAXLEN_FLAGS of all unused SEND_RCB indices.
+ */
+struct tg3_tx_buffer_desc {
+ u32 addr_hi;
+ u32 addr_lo;
+
+ u32 len_flags;
+#define TXD_FLAG_TCPUDP_CSUM 0x0001
+#define TXD_FLAG_IP_CSUM 0x0002
+#define TXD_FLAG_END 0x0004
+#define TXD_FLAG_IP_FRAG 0x0008
+#define TXD_FLAG_JMB_PKT 0x0008
+#define TXD_FLAG_IP_FRAG_END 0x0010
+#define TXD_FLAG_VLAN 0x0040
+#define TXD_FLAG_COAL_NOW 0x0080
+#define TXD_FLAG_CPU_PRE_DMA 0x0100
+#define TXD_FLAG_CPU_POST_DMA 0x0200
+#define TXD_FLAG_ADD_SRC_ADDR 0x1000
+#define TXD_FLAG_CHOOSE_SRC_ADDR 0x6000
+#define TXD_FLAG_NO_CRC 0x8000
+#define TXD_LEN_SHIFT 16
+
+ u32 vlan_tag;
+#define TXD_VLAN_TAG_SHIFT 0
+#define TXD_MSS_SHIFT 16
+};
+
+#define TXD_ADDR 0x00UL /* 64-bit */
+#define TXD_LEN_FLAGS 0x08UL /* 32-bit (upper 16-bits are len) */
+#define TXD_VLAN_TAG 0x0cUL /* 32-bit (upper 16-bits are tag) */
+#define TXD_SIZE 0x10UL
+
+struct tg3_rx_buffer_desc {
+ u32 addr_hi;
+ u32 addr_lo;
+
+ u32 idx_len;
+#define RXD_IDX_MASK 0xffff0000
+#define RXD_IDX_SHIFT 16
+#define RXD_LEN_MASK 0x0000ffff
+#define RXD_LEN_SHIFT 0
+
+ u32 type_flags;
+#define RXD_TYPE_SHIFT 16
+#define RXD_FLAGS_SHIFT 0
+
+#define RXD_FLAG_END 0x0004
+#define RXD_FLAG_MINI 0x0800
+#define RXD_FLAG_JUMBO 0x0020
+#define RXD_FLAG_VLAN 0x0040
+#define RXD_FLAG_ERROR 0x0400
+#define RXD_FLAG_IP_CSUM 0x1000
+#define RXD_FLAG_TCPUDP_CSUM 0x2000
+#define RXD_FLAG_IS_TCP 0x4000
+
+ u32 ip_tcp_csum;
+#define RXD_IPCSUM_MASK 0xffff0000
+#define RXD_IPCSUM_SHIFT 16
+#define RXD_TCPCSUM_MASK 0x0000ffff
+#define RXD_TCPCSUM_SHIFT 0
+
+ u32 err_vlan;
+
+#define RXD_VLAN_MASK 0x0000ffff
+
+#define RXD_ERR_BAD_CRC 0x00010000
+#define RXD_ERR_COLLISION 0x00020000
+#define RXD_ERR_LINK_LOST 0x00040000
+#define RXD_ERR_PHY_DECODE 0x00080000
+#define RXD_ERR_ODD_NIBBLE_RCVD_MII 0x00100000
+#define RXD_ERR_MAC_ABRT 0x00200000
+#define RXD_ERR_TOO_SMALL 0x00400000
+#define RXD_ERR_NO_RESOURCES 0x00800000
+#define RXD_ERR_HUGE_FRAME 0x01000000
+#define RXD_ERR_MASK 0xffff0000
+
+ u32 reserved;
+ u32 opaque;
+#define RXD_OPAQUE_INDEX_MASK 0x0000ffff
+#define RXD_OPAQUE_INDEX_SHIFT 0
+#define RXD_OPAQUE_RING_STD 0x00010000
+#define RXD_OPAQUE_RING_JUMBO 0x00020000
+#define RXD_OPAQUE_RING_MINI 0x00040000
+#define RXD_OPAQUE_RING_MASK 0x00070000
+};
+
+struct tg3_ext_rx_buffer_desc {
+ struct {
+ u32 addr_hi;
+ u32 addr_lo;
+ } addrlist[3];
+ u32 len2_len1;
+ u32 resv_len3;
+ struct tg3_rx_buffer_desc std;
+};
+
+/* We only use this when testing out the DMA engine
+ * at probe time. This is the internal format of buffer
+ * descriptors used by the chip at NIC_SRAM_DMA_DESCS.
+ */
+struct tg3_internal_buffer_desc {
+ u32 addr_hi;
+ u32 addr_lo;
+ u32 nic_mbuf;
+ /* XXX FIX THIS */
+#ifdef __BIG_ENDIAN
+ u16 cqid_sqid;
+ u16 len;
+#else
+ u16 len;
+ u16 cqid_sqid;
+#endif
+ u32 flags;
+ u32 __cookie1;
+ u32 __cookie2;
+ u32 __cookie3;
+};
+
+#define TG3_HW_STATUS_SIZE 0x50
+struct tg3_hw_status {
+ u32 status;
+#define SD_STATUS_UPDATED 0x00000001
+#define SD_STATUS_LINK_CHG 0x00000002
+#define SD_STATUS_ERROR 0x00000004
+
+ u32 status_tag;
+
+#ifdef __BIG_ENDIAN
+ u16 rx_consumer;
+ u16 rx_jumbo_consumer;
+#else
+ u16 rx_jumbo_consumer;
+ u16 rx_consumer;
+#endif
+
+#ifdef __BIG_ENDIAN
+ u16 reserved;
+ u16 rx_mini_consumer;
+#else
+ u16 rx_mini_consumer;
+ u16 reserved;
+#endif
+ struct {
+#ifdef __BIG_ENDIAN
+ u16 tx_consumer;
+ u16 rx_producer;
+#else
+ u16 rx_producer;
+ u16 tx_consumer;
+#endif
+ } idx[16];
+};
+
+typedef struct {
+ u32 high, low;
+} tg3_stat64_t;
+
+struct tg3_hw_stats {
+ u8 __reserved0[0x400-0x300];
+
+ /* Statistics maintained by Receive MAC. */
+ tg3_stat64_t rx_octets;
+ u64 __reserved1;
+ tg3_stat64_t rx_fragments;
+ tg3_stat64_t rx_ucast_packets;
+ tg3_stat64_t rx_mcast_packets;
+ tg3_stat64_t rx_bcast_packets;
+ tg3_stat64_t rx_fcs_errors;
+ tg3_stat64_t rx_align_errors;
+ tg3_stat64_t rx_xon_pause_rcvd;
+ tg3_stat64_t rx_xoff_pause_rcvd;
+ tg3_stat64_t rx_mac_ctrl_rcvd;
+ tg3_stat64_t rx_xoff_entered;
+ tg3_stat64_t rx_frame_too_long_errors;
+ tg3_stat64_t rx_jabbers;
+ tg3_stat64_t rx_undersize_packets;
+ tg3_stat64_t rx_in_length_errors;
+ tg3_stat64_t rx_out_length_errors;
+ tg3_stat64_t rx_64_or_less_octet_packets;
+ tg3_stat64_t rx_65_to_127_octet_packets;
+ tg3_stat64_t rx_128_to_255_octet_packets;
+ tg3_stat64_t rx_256_to_511_octet_packets;
+ tg3_stat64_t rx_512_to_1023_octet_packets;
+ tg3_stat64_t rx_1024_to_1522_octet_packets;
+ tg3_stat64_t rx_1523_to_2047_octet_packets;
+ tg3_stat64_t rx_2048_to_4095_octet_packets;
+ tg3_stat64_t rx_4096_to_8191_octet_packets;
+ tg3_stat64_t rx_8192_to_9022_octet_packets;
+
+ u64 __unused0[37];
+
+ /* Statistics maintained by Transmit MAC. */
+ tg3_stat64_t tx_octets;
+ u64 __reserved2;
+ tg3_stat64_t tx_collisions;
+ tg3_stat64_t tx_xon_sent;
+ tg3_stat64_t tx_xoff_sent;
+ tg3_stat64_t tx_flow_control;
+ tg3_stat64_t tx_mac_errors;
+ tg3_stat64_t tx_single_collisions;
+ tg3_stat64_t tx_mult_collisions;
+ tg3_stat64_t tx_deferred;
+ u64 __reserved3;
+ tg3_stat64_t tx_excessive_collisions;
+ tg3_stat64_t tx_late_collisions;
+ tg3_stat64_t tx_collide_2times;
+ tg3_stat64_t tx_collide_3times;
+ tg3_stat64_t tx_collide_4times;
+ tg3_stat64_t tx_collide_5times;
+ tg3_stat64_t tx_collide_6times;
+ tg3_stat64_t tx_collide_7times;
+ tg3_stat64_t tx_collide_8times;
+ tg3_stat64_t tx_collide_9times;
+ tg3_stat64_t tx_collide_10times;
+ tg3_stat64_t tx_collide_11times;
+ tg3_stat64_t tx_collide_12times;
+ tg3_stat64_t tx_collide_13times;
+ tg3_stat64_t tx_collide_14times;
+ tg3_stat64_t tx_collide_15times;
+ tg3_stat64_t tx_ucast_packets;
+ tg3_stat64_t tx_mcast_packets;
+ tg3_stat64_t tx_bcast_packets;
+ tg3_stat64_t tx_carrier_sense_errors;
+ tg3_stat64_t tx_discards;
+ tg3_stat64_t tx_errors;
+
+ u64 __unused1[31];
+
+ /* Statistics maintained by Receive List Placement. */
+ tg3_stat64_t COS_rx_packets[16];
+ tg3_stat64_t COS_rx_filter_dropped;
+ tg3_stat64_t dma_writeq_full;
+ tg3_stat64_t dma_write_prioq_full;
+ tg3_stat64_t rxbds_empty;
+ tg3_stat64_t rx_discards;
+ tg3_stat64_t rx_errors;
+ tg3_stat64_t rx_threshold_hit;
+
+ u64 __unused2[9];
+
+ /* Statistics maintained by Send Data Initiator. */
+ tg3_stat64_t COS_out_packets[16];
+ tg3_stat64_t dma_readq_full;
+ tg3_stat64_t dma_read_prioq_full;
+ tg3_stat64_t tx_comp_queue_full;
+
+ /* Statistics maintained by Host Coalescing. */
+ tg3_stat64_t ring_set_send_prod_index;
+ tg3_stat64_t ring_status_update;
+ tg3_stat64_t nic_irqs;
+ tg3_stat64_t nic_avoided_irqs;
+ tg3_stat64_t nic_tx_threshold_hit;
+
+ /* NOT a part of the hardware statistics block format.
+ * These stats are here as storage for tg3_periodic_fetch_stats().
+ */
+ tg3_stat64_t mbuf_lwm_thresh_hit;
+
+ u8 __reserved4[0xb00-0x9c8];
+};
+
+/* 'mapping' is superfluous as the chip does not write into
+ * the tx/rx post rings so we could just fetch it from there.
+ * But the cache behavior is better how we are doing it now.
+ */
+struct ring_info {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct tg3_tx_ring_info {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ bool fragmented;
+};
+
+struct tg3_link_config {
+ /* Describes what we're trying to get. */
+ u32 advertising;
+ u16 speed;
+ u8 duplex;
+ u8 autoneg;
+ u8 flowctrl;
+
+ /* Describes what we actually have. */
+ u8 active_flowctrl;
+
+ u8 active_duplex;
+#define SPEED_INVALID 0xffff
+#define DUPLEX_INVALID 0xff
+#define AUTONEG_INVALID 0xff
+ u16 active_speed;
+
+ /* When we go in and out of low power mode we need
+ * to swap with this state.
+ */
+ u16 orig_speed;
+ u8 orig_duplex;
+ u8 orig_autoneg;
+ u32 orig_advertising;
+};
+
+struct tg3_bufmgr_config {
+ u32 mbuf_read_dma_low_water;
+ u32 mbuf_mac_rx_low_water;
+ u32 mbuf_high_water;
+
+ u32 mbuf_read_dma_low_water_jumbo;
+ u32 mbuf_mac_rx_low_water_jumbo;
+ u32 mbuf_high_water_jumbo;
+
+ u32 dma_low_water;
+ u32 dma_high_water;
+};
+
+struct tg3_ethtool_stats {
+ /* Statistics maintained by Receive MAC. */
+ u64 rx_octets;
+ u64 rx_fragments;
+ u64 rx_ucast_packets;
+ u64 rx_mcast_packets;
+ u64 rx_bcast_packets;
+ u64 rx_fcs_errors;
+ u64 rx_align_errors;
+ u64 rx_xon_pause_rcvd;
+ u64 rx_xoff_pause_rcvd;
+ u64 rx_mac_ctrl_rcvd;
+ u64 rx_xoff_entered;
+ u64 rx_frame_too_long_errors;
+ u64 rx_jabbers;
+ u64 rx_undersize_packets;
+ u64 rx_in_length_errors;
+ u64 rx_out_length_errors;
+ u64 rx_64_or_less_octet_packets;
+ u64 rx_65_to_127_octet_packets;
+ u64 rx_128_to_255_octet_packets;
+ u64 rx_256_to_511_octet_packets;
+ u64 rx_512_to_1023_octet_packets;
+ u64 rx_1024_to_1522_octet_packets;
+ u64 rx_1523_to_2047_octet_packets;
+ u64 rx_2048_to_4095_octet_packets;
+ u64 rx_4096_to_8191_octet_packets;
+ u64 rx_8192_to_9022_octet_packets;
+
+ /* Statistics maintained by Transmit MAC. */
+ u64 tx_octets;
+ u64 tx_collisions;
+ u64 tx_xon_sent;
+ u64 tx_xoff_sent;
+ u64 tx_flow_control;
+ u64 tx_mac_errors;
+ u64 tx_single_collisions;
+ u64 tx_mult_collisions;
+ u64 tx_deferred;
+ u64 tx_excessive_collisions;
+ u64 tx_late_collisions;
+ u64 tx_collide_2times;
+ u64 tx_collide_3times;
+ u64 tx_collide_4times;
+ u64 tx_collide_5times;
+ u64 tx_collide_6times;
+ u64 tx_collide_7times;
+ u64 tx_collide_8times;
+ u64 tx_collide_9times;
+ u64 tx_collide_10times;
+ u64 tx_collide_11times;
+ u64 tx_collide_12times;
+ u64 tx_collide_13times;
+ u64 tx_collide_14times;
+ u64 tx_collide_15times;
+ u64 tx_ucast_packets;
+ u64 tx_mcast_packets;
+ u64 tx_bcast_packets;
+ u64 tx_carrier_sense_errors;
+ u64 tx_discards;
+ u64 tx_errors;
+
+ /* Statistics maintained by Receive List Placement. */
+ u64 dma_writeq_full;
+ u64 dma_write_prioq_full;
+ u64 rxbds_empty;
+ u64 rx_discards;
+ u64 rx_errors;
+ u64 rx_threshold_hit;
+
+ /* Statistics maintained by Send Data Initiator. */
+ u64 dma_readq_full;
+ u64 dma_read_prioq_full;
+ u64 tx_comp_queue_full;
+
+ /* Statistics maintained by Host Coalescing. */
+ u64 ring_set_send_prod_index;
+ u64 ring_status_update;
+ u64 nic_irqs;
+ u64 nic_avoided_irqs;
+ u64 nic_tx_threshold_hit;
+
+ u64 mbuf_lwm_thresh_hit;
+};
+
+struct tg3_rx_prodring_set {
+ u32 rx_std_prod_idx;
+ u32 rx_std_cons_idx;
+ u32 rx_jmb_prod_idx;
+ u32 rx_jmb_cons_idx;
+ struct tg3_rx_buffer_desc *rx_std;
+ struct tg3_ext_rx_buffer_desc *rx_jmb;
+ struct ring_info *rx_std_buffers;
+ struct ring_info *rx_jmb_buffers;
+ dma_addr_t rx_std_mapping;
+ dma_addr_t rx_jmb_mapping;
+};
+
+#define TG3_IRQ_MAX_VECS_RSS 5
+#define TG3_IRQ_MAX_VECS TG3_IRQ_MAX_VECS_RSS
+
+struct tg3_napi {
+ struct napi_struct napi ____cacheline_aligned;
+ struct tg3 *tp;
+ struct tg3_hw_status *hw_status;
+
+ u32 chk_msi_cnt;
+ u32 last_tag;
+ u32 last_irq_tag;
+ u32 int_mbox;
+ u32 coal_now;
+
+ u32 consmbox ____cacheline_aligned;
+ u32 rx_rcb_ptr;
+ u32 last_rx_cons;
+ u16 *rx_rcb_prod_idx;
+ struct tg3_rx_prodring_set prodring;
+ struct tg3_rx_buffer_desc *rx_rcb;
+
+ u32 tx_prod ____cacheline_aligned;
+ u32 tx_cons;
+ u32 tx_pending;
+ u32 last_tx_cons;
+ u32 prodmbox;
+ struct tg3_tx_buffer_desc *tx_ring;
+ struct tg3_tx_ring_info *tx_buffers;
+
+ dma_addr_t status_mapping;
+ dma_addr_t rx_rcb_mapping;
+ dma_addr_t tx_desc_mapping;
+
+ char irq_lbl[IFNAMSIZ];
+ unsigned int irq_vec;
+};
+
+enum TG3_FLAGS {
+ TG3_FLAG_TAGGED_STATUS = 0,
+ TG3_FLAG_TXD_MBOX_HWBUG,
+ TG3_FLAG_USE_LINKCHG_REG,
+ TG3_FLAG_ERROR_PROCESSED,
+ TG3_FLAG_ENABLE_ASF,
+ TG3_FLAG_ASPM_WORKAROUND,
+ TG3_FLAG_POLL_SERDES,
+ TG3_FLAG_MBOX_WRITE_REORDER,
+ TG3_FLAG_PCIX_TARGET_HWBUG,
+ TG3_FLAG_WOL_SPEED_100MB,
+ TG3_FLAG_WOL_ENABLE,
+ TG3_FLAG_EEPROM_WRITE_PROT,
+ TG3_FLAG_NVRAM,
+ TG3_FLAG_NVRAM_BUFFERED,
+ TG3_FLAG_SUPPORT_MSI,
+ TG3_FLAG_SUPPORT_MSIX,
+ TG3_FLAG_PCIX_MODE,
+ TG3_FLAG_PCI_HIGH_SPEED,
+ TG3_FLAG_PCI_32BIT,
+ TG3_FLAG_SRAM_USE_CONFIG,
+ TG3_FLAG_TX_RECOVERY_PENDING,
+ TG3_FLAG_WOL_CAP,
+ TG3_FLAG_JUMBO_RING_ENABLE,
+ TG3_FLAG_PAUSE_AUTONEG,
+ TG3_FLAG_CPMU_PRESENT,
+ TG3_FLAG_40BIT_DMA_BUG,
+ TG3_FLAG_BROKEN_CHECKSUMS,
+ TG3_FLAG_JUMBO_CAPABLE,
+ TG3_FLAG_CHIP_RESETTING,
+ TG3_FLAG_INIT_COMPLETE,
+ TG3_FLAG_RESTART_TIMER,
+ TG3_FLAG_TSO_BUG,
+ TG3_FLAG_IS_5788,
+ TG3_FLAG_MAX_RXPEND_64,
+ TG3_FLAG_TSO_CAPABLE,
+ TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */
+ TG3_FLAG_ASF_NEW_HANDSHAKE,
+ TG3_FLAG_HW_AUTONEG,
+ TG3_FLAG_IS_NIC,
+ TG3_FLAG_FLASH,
+ TG3_FLAG_HW_TSO_1,
+ TG3_FLAG_5705_PLUS,
+ TG3_FLAG_5750_PLUS,
+ TG3_FLAG_HW_TSO_3,
+ TG3_FLAG_USING_MSI,
+ TG3_FLAG_USING_MSIX,
+ TG3_FLAG_ICH_WORKAROUND,
+ TG3_FLAG_5780_CLASS,
+ TG3_FLAG_HW_TSO_2,
+ TG3_FLAG_1SHOT_MSI,
+ TG3_FLAG_NO_FWARE_REPORTED,
+ TG3_FLAG_NO_NVRAM_ADDR_TRANS,
+ TG3_FLAG_ENABLE_APE,
+ TG3_FLAG_PROTECTED_NVRAM,
+ TG3_FLAG_5701_DMA_BUG,
+ TG3_FLAG_USE_PHYLIB,
+ TG3_FLAG_MDIOBUS_INITED,
+ TG3_FLAG_LRG_PROD_RING_CAP,
+ TG3_FLAG_RGMII_INBAND_DISABLE,
+ TG3_FLAG_RGMII_EXT_IBND_RX_EN,
+ TG3_FLAG_RGMII_EXT_IBND_TX_EN,
+ TG3_FLAG_CLKREQ_BUG,
+ TG3_FLAG_5755_PLUS,
+ TG3_FLAG_NO_NVRAM,
+ TG3_FLAG_ENABLE_RSS,
+ TG3_FLAG_ENABLE_TSS,
+ TG3_FLAG_SHORT_DMA_BUG,
+ TG3_FLAG_USE_JUMBO_BDFLAG,
+ TG3_FLAG_L1PLLPD_EN,
+ TG3_FLAG_57765_PLUS,
+ TG3_FLAG_APE_HAS_NCSI,
+ TG3_FLAG_5717_PLUS,
+ TG3_FLAG_4K_FIFO_LIMIT,
+
+ /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
+ TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
+};
+
+struct tg3 {
+ /* begin "general, frequently-used members" cacheline section */
+
+ /* If the IRQ handler (which runs lockless) needs to be
+ * quiesced, the following bitmask state is used. The
+ * SYNC flag is set by non-IRQ context code to initiate
+ * the quiescence.
+ *
+ * When the IRQ handler notices that SYNC is set, it
+ * disables interrupts and returns.
+ *
+ * When all outstanding IRQ handlers have returned after
+ * the SYNC flag has been set, the setter can be assured
+ * that interrupts will no longer get run.
+ *
+ * In this way all SMP driver locks are never acquired
+ * in hw IRQ context, only sw IRQ context or lower.
+ */
+ unsigned int irq_sync;
+
+ /* SMP locking strategy:
+ *
+ * lock: Held during reset, PHY access, timer, and when
+ * updating tg3_flags.
+ *
+ * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds
+ * netif_tx_lock when it needs to call
+ * netif_wake_queue.
+ *
+ * Both of these locks are to be held with BH safety.
+ *
+ * Because the IRQ handler, tg3_poll, and tg3_start_xmit
+ * are running lockless, it is necessary to completely
+ * quiesce the chip with tg3_netif_stop and tg3_full_lock
+ * before reconfiguring the device.
+ *
+ * indirect_lock: Held when accessing registers indirectly
+ * with IRQ disabling.
+ */
+ spinlock_t lock;
+ spinlock_t indirect_lock;
+
+ u32 (*read32) (struct tg3 *, u32);
+ void (*write32) (struct tg3 *, u32, u32);
+ u32 (*read32_mbox) (struct tg3 *, u32);
+ void (*write32_mbox) (struct tg3 *, u32,
+ u32);
+ void __iomem *regs;
+ void __iomem *aperegs;
+ struct net_device *dev;
+ struct pci_dev *pdev;
+
+ u32 coal_now;
+ u32 msg_enable;
+
+ /* begin "tx thread" cacheline section */
+ void (*write32_tx_mbox) (struct tg3 *, u32,
+ u32);
+
+ /* begin "rx thread" cacheline section */
+ struct tg3_napi napi[TG3_IRQ_MAX_VECS];
+ void (*write32_rx_mbox) (struct tg3 *, u32,
+ u32);
+ u32 rx_copy_thresh;
+ u32 rx_std_ring_mask;
+ u32 rx_jmb_ring_mask;
+ u32 rx_ret_ring_mask;
+ u32 rx_pending;
+ u32 rx_jumbo_pending;
+ u32 rx_std_max_post;
+ u32 rx_offset;
+ u32 rx_pkt_map_sz;
+
+
+ /* begin "everything else" cacheline(s) section */
+ unsigned long rx_dropped;
+ struct rtnl_link_stats64 net_stats_prev;
+ struct tg3_ethtool_stats estats;
+ struct tg3_ethtool_stats estats_prev;
+
+ DECLARE_BITMAP(tg3_flags, TG3_FLAG_NUMBER_OF_FLAGS);
+
+ union {
+ unsigned long phy_crc_errors;
+ unsigned long last_event_jiffies;
+ };
+
+ struct timer_list timer;
+ u16 timer_counter;
+ u16 timer_multiplier;
+ u32 timer_offset;
+ u16 asf_counter;
+ u16 asf_multiplier;
+
+ /* 1 second counter for transient serdes link events */
+ u32 serdes_counter;
+#define SERDES_AN_TIMEOUT_5704S 2
+#define SERDES_PARALLEL_DET_TIMEOUT 1
+#define SERDES_AN_TIMEOUT_5714S 1
+
+ struct tg3_link_config link_config;
+ struct tg3_bufmgr_config bufmgr_config;
+
+ /* cache h/w values, often passed straight to h/w */
+ u32 rx_mode;
+ u32 tx_mode;
+ u32 mac_mode;
+ u32 mi_mode;
+ u32 misc_host_ctrl;
+ u32 grc_mode;
+ u32 grc_local_ctrl;
+ u32 dma_rwctrl;
+ u32 coalesce_mode;
+ u32 pwrmgmt_thresh;
+
+ /* PCI block */
+ u32 pci_chip_rev_id;
+ u16 pci_cmd;
+ u8 pci_cacheline_sz;
+ u8 pci_lat_timer;
+
+ int pci_fn;
+ int pm_cap;
+ int msi_cap;
+ int pcix_cap;
+ int pcie_readrq;
+
+ struct mii_bus *mdio_bus;
+ int mdio_irq[PHY_MAX_ADDR];
+
+ u8 phy_addr;
+
+ /* PHY info */
+ u32 phy_id;
+#define TG3_PHY_ID_MASK 0xfffffff0
+#define TG3_PHY_ID_BCM5400 0x60008040
+#define TG3_PHY_ID_BCM5401 0x60008050
+#define TG3_PHY_ID_BCM5411 0x60008070
+#define TG3_PHY_ID_BCM5701 0x60008110
+#define TG3_PHY_ID_BCM5703 0x60008160
+#define TG3_PHY_ID_BCM5704 0x60008190
+#define TG3_PHY_ID_BCM5705 0x600081a0
+#define TG3_PHY_ID_BCM5750 0x60008180
+#define TG3_PHY_ID_BCM5752 0x60008100
+#define TG3_PHY_ID_BCM5714 0x60008340
+#define TG3_PHY_ID_BCM5780 0x60008350
+#define TG3_PHY_ID_BCM5755 0xbc050cc0
+#define TG3_PHY_ID_BCM5787 0xbc050ce0
+#define TG3_PHY_ID_BCM5756 0xbc050ed0
+#define TG3_PHY_ID_BCM5784 0xbc050fa0
+#define TG3_PHY_ID_BCM5761 0xbc050fd0
+#define TG3_PHY_ID_BCM5718C 0x5c0d8a00
+#define TG3_PHY_ID_BCM5718S 0xbc050ff0
+#define TG3_PHY_ID_BCM57765 0x5c0d8a40
+#define TG3_PHY_ID_BCM5719C 0x5c0d8a20
+#define TG3_PHY_ID_BCM5720C 0x5c0d8b60
+#define TG3_PHY_ID_BCM5906 0xdc00ac40
+#define TG3_PHY_ID_BCM8002 0x60010140
+#define TG3_PHY_ID_INVALID 0xffffffff
+
+#define PHY_ID_RTL8211C 0x001cc910
+#define PHY_ID_RTL8201E 0x00008200
+
+#define TG3_PHY_ID_REV_MASK 0x0000000f
+#define TG3_PHY_REV_BCM5401_B0 0x1
+
+ /* This macro assumes the passed PHY ID is
+ * already masked with TG3_PHY_ID_MASK.
+ */
+#define TG3_KNOWN_PHY_ID(X) \
+ ((X) == TG3_PHY_ID_BCM5400 || (X) == TG3_PHY_ID_BCM5401 || \
+ (X) == TG3_PHY_ID_BCM5411 || (X) == TG3_PHY_ID_BCM5701 || \
+ (X) == TG3_PHY_ID_BCM5703 || (X) == TG3_PHY_ID_BCM5704 || \
+ (X) == TG3_PHY_ID_BCM5705 || (X) == TG3_PHY_ID_BCM5750 || \
+ (X) == TG3_PHY_ID_BCM5752 || (X) == TG3_PHY_ID_BCM5714 || \
+ (X) == TG3_PHY_ID_BCM5780 || (X) == TG3_PHY_ID_BCM5787 || \
+ (X) == TG3_PHY_ID_BCM5755 || (X) == TG3_PHY_ID_BCM5756 || \
+ (X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \
+ (X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \
+ (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM5719C || \
+ (X) == TG3_PHY_ID_BCM8002)
+
+ u32 phy_flags;
+#define TG3_PHYFLG_IS_LOW_POWER 0x00000001
+#define TG3_PHYFLG_IS_CONNECTED 0x00000002
+#define TG3_PHYFLG_USE_MI_INTERRUPT 0x00000004
+#define TG3_PHYFLG_PHY_SERDES 0x00000010
+#define TG3_PHYFLG_MII_SERDES 0x00000020
+#define TG3_PHYFLG_ANY_SERDES (TG3_PHYFLG_PHY_SERDES | \
+ TG3_PHYFLG_MII_SERDES)
+#define TG3_PHYFLG_IS_FET 0x00000040
+#define TG3_PHYFLG_10_100_ONLY 0x00000080
+#define TG3_PHYFLG_ENABLE_APD 0x00000100
+#define TG3_PHYFLG_CAPACITIVE_COUPLING 0x00000200
+#define TG3_PHYFLG_NO_ETH_WIRE_SPEED 0x00000400
+#define TG3_PHYFLG_JITTER_BUG 0x00000800
+#define TG3_PHYFLG_ADJUST_TRIM 0x00001000
+#define TG3_PHYFLG_ADC_BUG 0x00002000
+#define TG3_PHYFLG_5704_A0_BUG 0x00004000
+#define TG3_PHYFLG_BER_BUG 0x00008000
+#define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000
+#define TG3_PHYFLG_PARALLEL_DETECT 0x00020000
+#define TG3_PHYFLG_EEE_CAP 0x00040000
+
+ u32 led_ctrl;
+ u32 phy_otp;
+ u32 setlpicnt;
+
+#define TG3_BPN_SIZE 24
+ char board_part_number[TG3_BPN_SIZE];
+#define TG3_VER_SIZE ETHTOOL_FWVERS_LEN
+ char fw_ver[TG3_VER_SIZE];
+ u32 nic_sram_data_cfg;
+ u32 pci_clock_ctrl;
+ struct pci_dev *pdev_peer;
+
+ struct tg3_hw_stats *hw_stats;
+ dma_addr_t stats_mapping;
+ struct work_struct reset_task;
+
+ int nvram_lock_cnt;
+ u32 nvram_size;
+#define TG3_NVRAM_SIZE_2KB 0x00000800
+#define TG3_NVRAM_SIZE_64KB 0x00010000
+#define TG3_NVRAM_SIZE_128KB 0x00020000
+#define TG3_NVRAM_SIZE_256KB 0x00040000
+#define TG3_NVRAM_SIZE_512KB 0x00080000
+#define TG3_NVRAM_SIZE_1MB 0x00100000
+#define TG3_NVRAM_SIZE_2MB 0x00200000
+
+ u32 nvram_pagesize;
+ u32 nvram_jedecnum;
+
+#define JEDEC_ATMEL 0x1f
+#define JEDEC_ST 0x20
+#define JEDEC_SAIFUN 0x4f
+#define JEDEC_SST 0xbf
+
+#define ATMEL_AT24C02_CHIP_SIZE TG3_NVRAM_SIZE_2KB
+#define ATMEL_AT24C02_PAGE_SIZE (8)
+
+#define ATMEL_AT24C64_CHIP_SIZE TG3_NVRAM_SIZE_64KB
+#define ATMEL_AT24C64_PAGE_SIZE (32)
+
+#define ATMEL_AT24C512_CHIP_SIZE TG3_NVRAM_SIZE_512KB
+#define ATMEL_AT24C512_PAGE_SIZE (128)
+
+#define ATMEL_AT45DB0X1B_PAGE_POS 9
+#define ATMEL_AT45DB0X1B_PAGE_SIZE 264
+
+#define ATMEL_AT25F512_PAGE_SIZE 256
+
+#define ST_M45PEX0_PAGE_SIZE 256
+
+#define SAIFUN_SA25F0XX_PAGE_SIZE 256
+
+#define SST_25VF0X0_PAGE_SIZE 4098
+
+ unsigned int irq_max;
+ unsigned int irq_cnt;
+
+ struct ethtool_coalesce coal;
+
+ /* firmware info */
+ const char *fw_needed;
+ const struct firmware *fw;
+ u32 fw_len; /* includes BSS */
+};
+
+#endif /* !(_T3_H) */
diff --git a/drivers/net/ethernet/brocade/Kconfig b/drivers/net/ethernet/brocade/Kconfig
new file mode 100644
index 000000000000..264155778857
--- /dev/null
+++ b/drivers/net/ethernet/brocade/Kconfig
@@ -0,0 +1,23 @@
+#
+# Brocade device configuration
+#
+
+config NET_VENDOR_BROCADE
+ bool "Brocade devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Brocade cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_BROCADE
+
+source "drivers/net/ethernet/brocade/bna/Kconfig"
+
+endif # NET_VENDOR_BROCADE
diff --git a/drivers/net/ethernet/brocade/Makefile b/drivers/net/ethernet/brocade/Makefile
new file mode 100644
index 000000000000..b58238d2df6a
--- /dev/null
+++ b/drivers/net/ethernet/brocade/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Brocade device drivers.
+#
+
+obj-$(CONFIG_BNA) += bna/
diff --git a/drivers/net/ethernet/brocade/bna/Kconfig b/drivers/net/ethernet/brocade/bna/Kconfig
new file mode 100644
index 000000000000..dc2eb526fbf7
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/Kconfig
@@ -0,0 +1,17 @@
+#
+# Brocade network device configuration
+#
+
+config BNA
+ tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
+ depends on PCI
+ ---help---
+ This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
+ cards.
+ To compile this driver as a module, choose M here: the module
+ will be called bna.
+
+ For general information and support, go to the Brocade support
+ website at:
+
+ <http://support.brocade.com>
diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile
new file mode 100644
index 000000000000..74d3abca1960
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/Makefile
@@ -0,0 +1,12 @@
+#
+# Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+# All rights reserved.
+#
+
+obj-$(CONFIG_BNA) += bna.o
+
+bna-objs := bnad.o bnad_ethtool.o bna_enet.o bna_tx_rx.o
+bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o
+bna-objs += cna_fwimg.o
+
+EXTRA_CFLAGS := -Idrivers/net/bna
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
new file mode 100644
index 000000000000..8e627186507c
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -0,0 +1,299 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include "bfa_cee.h"
+#include "bfi_cna.h"
+#include "bfa_ioc.h"
+
+static void bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg);
+static void bfa_cee_format_cee_cfg(void *buffer);
+
+static void
+bfa_cee_format_cee_cfg(void *buffer)
+{
+ struct bfa_cee_attr *cee_cfg = buffer;
+ bfa_cee_format_lldp_cfg(&cee_cfg->lldp_remote);
+}
+
+static void
+bfa_cee_stats_swap(struct bfa_cee_stats *stats)
+{
+ u32 *buffer = (u32 *)stats;
+ int i;
+
+ for (i = 0; i < (sizeof(struct bfa_cee_stats) / sizeof(u32));
+ i++) {
+ buffer[i] = ntohl(buffer[i]);
+ }
+}
+
+static void
+bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg)
+{
+ lldp_cfg->time_to_live =
+ ntohs(lldp_cfg->time_to_live);
+ lldp_cfg->enabled_system_cap =
+ ntohs(lldp_cfg->enabled_system_cap);
+}
+
+/**
+ * bfa_cee_attr_meminfo()
+ *
+ * @brief Returns the size of the DMA memory needed by CEE attributes
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+static u32
+bfa_cee_attr_meminfo(void)
+{
+ return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ);
+}
+/**
+ * bfa_cee_stats_meminfo()
+ *
+ * @brief Returns the size of the DMA memory needed by CEE stats
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+static u32
+bfa_cee_stats_meminfo(void)
+{
+ return roundup(sizeof(struct bfa_cee_stats), BFA_DMA_ALIGN_SZ);
+}
+
+/**
+ * bfa_cee_get_attr_isr()
+ *
+ * @brief CEE ISR for get-attributes responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
+{
+ cee->get_attr_status = status;
+ if (status == BFA_STATUS_OK) {
+ memcpy(cee->attr, cee->attr_dma.kva,
+ sizeof(struct bfa_cee_attr));
+ bfa_cee_format_cee_cfg(cee->attr);
+ }
+ cee->get_attr_pending = false;
+ if (cee->cbfn.get_attr_cbfn)
+ cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
+}
+
+/**
+ * bfa_cee_get_attr_isr()
+ *
+ * @brief CEE ISR for get-stats responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status)
+{
+ cee->get_stats_status = status;
+ if (status == BFA_STATUS_OK) {
+ memcpy(cee->stats, cee->stats_dma.kva,
+ sizeof(struct bfa_cee_stats));
+ bfa_cee_stats_swap(cee->stats);
+ }
+ cee->get_stats_pending = false;
+ if (cee->cbfn.get_stats_cbfn)
+ cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
+}
+
+/**
+ * bfa_cee_get_attr_isr()
+ *
+ * @brief CEE ISR for reset-stats responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
+{
+ cee->reset_stats_status = status;
+ cee->reset_stats_pending = false;
+ if (cee->cbfn.reset_stats_cbfn)
+ cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
+}
+/**
+ * bfa_nw_cee_meminfo()
+ *
+ * @brief Returns the size of the DMA memory needed by CEE module
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+u32
+bfa_nw_cee_meminfo(void)
+{
+ return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo();
+}
+
+/**
+ * bfa_nw_cee_mem_claim()
+ *
+ * @brief Initialized CEE DMA Memory
+ *
+ * @param[in] cee CEE module pointer
+ * dma_kva Kernel Virtual Address of CEE DMA Memory
+ * dma_pa Physical Address of CEE DMA Memory
+ *
+ * @return void
+ */
+void
+bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
+{
+ cee->attr_dma.kva = dma_kva;
+ cee->attr_dma.pa = dma_pa;
+ cee->stats_dma.kva = dma_kva + bfa_cee_attr_meminfo();
+ cee->stats_dma.pa = dma_pa + bfa_cee_attr_meminfo();
+ cee->attr = (struct bfa_cee_attr *) dma_kva;
+ cee->stats = (struct bfa_cee_stats *)
+ (dma_kva + bfa_cee_attr_meminfo());
+}
+
+/**
+ * bfa_cee_isrs()
+ *
+ * @brief Handles Mail-box interrupts for CEE module.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return void
+ */
+
+static void
+bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
+{
+ union bfi_cee_i2h_msg_u *msg;
+ struct bfi_cee_get_rsp *get_rsp;
+ struct bfa_cee *cee = (struct bfa_cee *) cbarg;
+ msg = (union bfi_cee_i2h_msg_u *) m;
+ get_rsp = (struct bfi_cee_get_rsp *) m;
+ switch (msg->mh.msg_id) {
+ case BFI_CEE_I2H_GET_CFG_RSP:
+ bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
+ break;
+ case BFI_CEE_I2H_GET_STATS_RSP:
+ bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
+ break;
+ case BFI_CEE_I2H_RESET_STATS_RSP:
+ bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
+ break;
+ default:
+ BUG_ON(1);
+ }
+}
+
+/**
+ * bfa_cee_notify()
+ *
+ * @brief CEE module heart-beat failure handler.
+ * @brief CEE module IOC event handler.
+ *
+ * @param[in] IOC event type
+ *
+ * @return void
+ */
+
+static void
+bfa_cee_notify(void *arg, enum bfa_ioc_event event)
+{
+ struct bfa_cee *cee;
+ cee = (struct bfa_cee *) arg;
+
+ switch (event) {
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ if (cee->get_attr_pending == true) {
+ cee->get_attr_status = BFA_STATUS_FAILED;
+ cee->get_attr_pending = false;
+ if (cee->cbfn.get_attr_cbfn) {
+ cee->cbfn.get_attr_cbfn(
+ cee->cbfn.get_attr_cbarg,
+ BFA_STATUS_FAILED);
+ }
+ }
+ if (cee->get_stats_pending == true) {
+ cee->get_stats_status = BFA_STATUS_FAILED;
+ cee->get_stats_pending = false;
+ if (cee->cbfn.get_stats_cbfn) {
+ cee->cbfn.get_stats_cbfn(
+ cee->cbfn.get_stats_cbarg,
+ BFA_STATUS_FAILED);
+ }
+ }
+ if (cee->reset_stats_pending == true) {
+ cee->reset_stats_status = BFA_STATUS_FAILED;
+ cee->reset_stats_pending = false;
+ if (cee->cbfn.reset_stats_cbfn) {
+ cee->cbfn.reset_stats_cbfn(
+ cee->cbfn.reset_stats_cbarg,
+ BFA_STATUS_FAILED);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * bfa_nw_cee_attach()
+ *
+ * @brief CEE module-attach API
+ *
+ * @param[in] cee - Pointer to the CEE module data structure
+ * ioc - Pointer to the ioc module data structure
+ * dev - Pointer to the device driver module data structure
+ * The device driver specific mbox ISR functions have
+ * this pointer as one of the parameters.
+ *
+ * @return void
+ */
+void
+bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
+ void *dev)
+{
+ BUG_ON(!(cee != NULL));
+ cee->dev = dev;
+ cee->ioc = ioc;
+
+ bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
+ bfa_q_qe_init(&cee->ioc_notify);
+ bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee);
+ bfa_nw_ioc_notify_register(cee->ioc, &cee->ioc_notify);
+}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.h b/drivers/net/ethernet/brocade/bna/bfa_cee.h
new file mode 100644
index 000000000000..58d54e98d595
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.h
@@ -0,0 +1,63 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __BFA_CEE_H__
+#define __BFA_CEE_H__
+
+#include "bfa_defs_cna.h"
+#include "bfa_ioc.h"
+
+typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, enum bfa_status status);
+typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, enum bfa_status status);
+typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, enum bfa_status status);
+
+struct bfa_cee_cbfn {
+ bfa_cee_get_attr_cbfn_t get_attr_cbfn;
+ void *get_attr_cbarg;
+ bfa_cee_get_stats_cbfn_t get_stats_cbfn;
+ void *get_stats_cbarg;
+ bfa_cee_reset_stats_cbfn_t reset_stats_cbfn;
+ void *reset_stats_cbarg;
+};
+
+struct bfa_cee {
+ void *dev;
+ bool get_attr_pending;
+ bool get_stats_pending;
+ bool reset_stats_pending;
+ enum bfa_status get_attr_status;
+ enum bfa_status get_stats_status;
+ enum bfa_status reset_stats_status;
+ struct bfa_cee_cbfn cbfn;
+ struct bfa_ioc_notify ioc_notify;
+ struct bfa_cee_attr *attr;
+ struct bfa_cee_stats *stats;
+ struct bfa_dma attr_dma;
+ struct bfa_dma stats_dma;
+ struct bfa_ioc *ioc;
+ struct bfa_mbox_cmd get_cfg_mb;
+ struct bfa_mbox_cmd get_stats_mb;
+ struct bfa_mbox_cmd reset_stats_mb;
+};
+
+u32 bfa_nw_cee_meminfo(void);
+void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva,
+ u64 dma_pa);
+void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev);
+
+#endif /* __BFA_CEE_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h
new file mode 100644
index 000000000000..3da1a946ccdd
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h
@@ -0,0 +1,140 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ * @file bfa_cs.h BFA common services
+ */
+
+#ifndef __BFA_CS_H__
+#define __BFA_CS_H__
+
+#include "cna.h"
+
+/**
+ * @ BFA state machine interfaces
+ */
+
+typedef void (*bfa_sm_t)(void *sm, int event);
+
+/**
+ * oc - object class eg. bfa_ioc
+ * st - state, eg. reset
+ * otype - object type, eg. struct bfa_ioc
+ * etype - object type, eg. enum ioc_event
+ */
+#define bfa_sm_state_decl(oc, st, otype, etype) \
+ static void oc ## _sm_ ## st(otype * fsm, etype event)
+
+#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
+#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
+#define bfa_sm_get_state(_sm) ((_sm)->sm)
+#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
+
+/**
+ * For converting from state machine function to state encoding.
+ */
+struct bfa_sm_table {
+ bfa_sm_t sm; /*!< state machine function */
+ int state; /*!< state machine encoding */
+ char *name; /*!< state name for display */
+};
+#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
+
+/**
+ * State machine with entry actions.
+ */
+typedef void (*bfa_fsm_t)(void *fsm, int event);
+
+/**
+ * oc - object class eg. bfa_ioc
+ * st - state, eg. reset
+ * otype - object type, eg. struct bfa_ioc
+ * etype - object type, eg. enum ioc_event
+ */
+#define bfa_fsm_state_decl(oc, st, otype, etype) \
+ static void oc ## _sm_ ## st(otype * fsm, etype event); \
+ static void oc ## _sm_ ## st ## _entry(otype * fsm)
+
+#define bfa_fsm_set_state(_fsm, _state) do { \
+ (_fsm)->fsm = (bfa_fsm_t)(_state); \
+ _state ## _entry(_fsm); \
+} while (0)
+
+#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
+#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm)
+#define bfa_fsm_cmp_state(_fsm, _state) \
+ ((_fsm)->fsm == (bfa_fsm_t)(_state))
+
+static inline int
+bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
+{
+ int i = 0;
+
+ while (smt[i].sm && smt[i].sm != sm)
+ i++;
+ return smt[i].state;
+}
+
+/**
+ * @ Generic wait counter.
+ */
+
+typedef void (*bfa_wc_resume_t) (void *cbarg);
+
+struct bfa_wc {
+ bfa_wc_resume_t wc_resume;
+ void *wc_cbarg;
+ int wc_count;
+};
+
+static inline void
+bfa_wc_up(struct bfa_wc *wc)
+{
+ wc->wc_count++;
+}
+
+static inline void
+bfa_wc_down(struct bfa_wc *wc)
+{
+ wc->wc_count--;
+ if (wc->wc_count == 0)
+ wc->wc_resume(wc->wc_cbarg);
+}
+
+/**
+ * Initialize a waiting counter.
+ */
+static inline void
+bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
+{
+ wc->wc_resume = wc_resume;
+ wc->wc_cbarg = wc_cbarg;
+ wc->wc_count = 0;
+ bfa_wc_up(wc);
+}
+
+/**
+ * Wait for counter to reach zero
+ */
+static inline void
+bfa_wc_wait(struct bfa_wc *wc)
+{
+ bfa_wc_down(wc);
+}
+
+#endif /* __BFA_CS_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
new file mode 100644
index 000000000000..2f12d68021d5
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -0,0 +1,296 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __BFA_DEFS_H__
+#define __BFA_DEFS_H__
+
+#include "cna.h"
+#include "bfa_defs_status.h"
+#include "bfa_defs_mfg_comm.h"
+
+#define BFA_STRING_32 32
+#define BFA_VERSION_LEN 64
+
+/**
+ * ---------------------- adapter definitions ------------
+ */
+
+/**
+ * BFA adapter level attributes.
+ */
+enum {
+ BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
+ /*
+ *!< adapter serial num length
+ */
+ BFA_ADAPTER_MODEL_NAME_LEN = 16, /*!< model name length */
+ BFA_ADAPTER_MODEL_DESCR_LEN = 128, /*!< model description length */
+ BFA_ADAPTER_MFG_NAME_LEN = 8, /*!< manufacturer name length */
+ BFA_ADAPTER_SYM_NAME_LEN = 64, /*!< adapter symbolic name length */
+ BFA_ADAPTER_OS_TYPE_LEN = 64, /*!< adapter os type length */
+};
+
+struct bfa_adapter_attr {
+ char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
+ char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
+ u32 card_type;
+ char model[BFA_ADAPTER_MODEL_NAME_LEN];
+ char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
+ u64 pwwn;
+ char node_symname[FC_SYMNAME_MAX];
+ char hw_ver[BFA_VERSION_LEN];
+ char fw_ver[BFA_VERSION_LEN];
+ char optrom_ver[BFA_VERSION_LEN];
+ char os_type[BFA_ADAPTER_OS_TYPE_LEN];
+ struct bfa_mfg_vpd vpd;
+ struct mac mac;
+
+ u8 nports;
+ u8 max_speed;
+ u8 prototype;
+ char asic_rev;
+
+ u8 pcie_gen;
+ u8 pcie_lanes_orig;
+ u8 pcie_lanes;
+ u8 cna_capable;
+
+ u8 is_mezz;
+ u8 trunk_capable;
+};
+
+/**
+ * ---------------------- IOC definitions ------------
+ */
+
+enum {
+ BFA_IOC_DRIVER_LEN = 16,
+ BFA_IOC_CHIP_REV_LEN = 8,
+};
+
+/**
+ * Driver and firmware versions.
+ */
+struct bfa_ioc_driver_attr {
+ char driver[BFA_IOC_DRIVER_LEN]; /*!< driver name */
+ char driver_ver[BFA_VERSION_LEN]; /*!< driver version */
+ char fw_ver[BFA_VERSION_LEN]; /*!< firmware version */
+ char bios_ver[BFA_VERSION_LEN]; /*!< bios version */
+ char efi_ver[BFA_VERSION_LEN]; /*!< EFI version */
+ char ob_ver[BFA_VERSION_LEN]; /*!< openboot version */
+};
+
+/**
+ * IOC PCI device attributes
+ */
+struct bfa_ioc_pci_attr {
+ u16 vendor_id; /*!< PCI vendor ID */
+ u16 device_id; /*!< PCI device ID */
+ u16 ssid; /*!< subsystem ID */
+ u16 ssvid; /*!< subsystem vendor ID */
+ u32 pcifn; /*!< PCI device function */
+ u32 rsvd; /* padding */
+ char chip_rev[BFA_IOC_CHIP_REV_LEN]; /*!< chip revision */
+};
+
+/**
+ * IOC states
+ */
+enum bfa_ioc_state {
+ BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */
+ BFA_IOC_RESET = 2, /*!< IOC is in reset state */
+ BFA_IOC_SEMWAIT = 3, /*!< Waiting for IOC h/w semaphore */
+ BFA_IOC_HWINIT = 4, /*!< IOC h/w is being initialized */
+ BFA_IOC_GETATTR = 5, /*!< IOC is being configured */
+ BFA_IOC_OPERATIONAL = 6, /*!< IOC is operational */
+ BFA_IOC_INITFAIL = 7, /*!< IOC hardware failure */
+ BFA_IOC_FAIL = 8, /*!< IOC heart-beat failure */
+ BFA_IOC_DISABLING = 9, /*!< IOC is being disabled */
+ BFA_IOC_DISABLED = 10, /*!< IOC is disabled */
+ BFA_IOC_FWMISMATCH = 11, /*!< IOC f/w different from drivers */
+ BFA_IOC_ENABLING = 12, /*!< IOC is being enabled */
+ BFA_IOC_HWFAIL = 13, /*!< PCI mapping doesn't exist */
+};
+
+/**
+ * IOC firmware stats
+ */
+struct bfa_fw_ioc_stats {
+ u32 enable_reqs;
+ u32 disable_reqs;
+ u32 get_attr_reqs;
+ u32 dbg_sync;
+ u32 dbg_dump;
+ u32 unknown_reqs;
+};
+
+/**
+ * IOC driver stats
+ */
+struct bfa_ioc_drv_stats {
+ u32 ioc_isrs;
+ u32 ioc_enables;
+ u32 ioc_disables;
+ u32 ioc_hbfails;
+ u32 ioc_boots;
+ u32 stats_tmos;
+ u32 hb_count;
+ u32 disable_reqs;
+ u32 enable_reqs;
+ u32 disable_replies;
+ u32 enable_replies;
+ u32 rsvd;
+};
+
+/**
+ * IOC statistics
+ */
+struct bfa_ioc_stats {
+ struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */
+ struct bfa_fw_ioc_stats fw_stats; /*!< firmware IOC stats */
+};
+
+enum bfa_ioc_type {
+ BFA_IOC_TYPE_FC = 1,
+ BFA_IOC_TYPE_FCoE = 2,
+ BFA_IOC_TYPE_LL = 3,
+};
+
+/**
+ * IOC attributes returned in queries
+ */
+struct bfa_ioc_attr {
+ enum bfa_ioc_type ioc_type;
+ enum bfa_ioc_state state; /*!< IOC state */
+ struct bfa_adapter_attr adapter_attr; /*!< HBA attributes */
+ struct bfa_ioc_driver_attr driver_attr; /*!< driver attr */
+ struct bfa_ioc_pci_attr pci_attr;
+ u8 port_id; /*!< port number */
+ u8 port_mode; /*!< enum bfa_mode */
+ u8 cap_bm; /*!< capability */
+ u8 port_mode_cfg; /*!< enum bfa_mode */
+ u8 rsvd[4]; /*!< 64bit align */
+};
+
+/**
+ * Adapter capability mask definition
+ */
+enum {
+ BFA_CM_HBA = 0x01,
+ BFA_CM_CNA = 0x02,
+ BFA_CM_NIC = 0x04,
+};
+
+/**
+ * ---------------------- mfg definitions ------------
+ */
+
+/**
+ * Checksum size
+ */
+#define BFA_MFG_CHKSUM_SIZE 16
+
+#define BFA_MFG_PARTNUM_SIZE 14
+#define BFA_MFG_SUPPLIER_ID_SIZE 10
+#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20
+#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
+#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
+
+#pragma pack(1)
+
+/**
+ * @brief BFA adapter manufacturing block definition.
+ *
+ * All numerical fields are in big-endian format.
+ */
+struct bfa_mfg_block {
+ u8 version; /*!< manufacturing block version */
+ u8 mfg_sig[3]; /*!< characters 'M', 'F', 'G' */
+ u16 mfgsize; /*!< mfg block size */
+ u16 u16_chksum; /*!< old u16 checksum */
+ char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+ char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
+ u8 mfg_day; /*!< manufacturing day */
+ u8 mfg_month; /*!< manufacturing month */
+ u16 mfg_year; /*!< manufacturing year */
+ u64 mfg_wwn; /*!< wwn base for this adapter */
+ u8 num_wwn; /*!< number of wwns assigned */
+ u8 mfg_speeds; /*!< speeds allowed for this adapter */
+ u8 rsv[2];
+ char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
+ char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
+ char
+ supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
+ char
+ supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
+ mac_t mfg_mac; /*!< mac address */
+ u8 num_mac; /*!< number of mac addresses */
+ u8 rsv2;
+ u32 card_type; /*!< card type */
+ char cap_nic; /*!< capability nic */
+ char cap_cna; /*!< capability cna */
+ char cap_hba; /*!< capability hba */
+ char cap_fc16g; /*!< capability fc 16g */
+ char cap_sriov; /*!< capability sriov */
+ char cap_mezz; /*!< capability mezz */
+ u8 rsv3;
+ u8 mfg_nports; /*!< number of ports */
+ char media[8]; /*!< xfi/xaui */
+ char initial_mode[8];/*!< initial mode: hba/cna/nic */
+ u8 rsv4[84];
+ u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
+};
+
+#pragma pack()
+
+/**
+ * ---------------------- pci definitions ------------
+ */
+
+/*
+ * PCI device ID information
+ */
+enum {
+ BFA_PCI_DEVICE_ID_CT2 = 0x22,
+};
+
+#define bfa_asic_id_ct(device) \
+ ((device) == PCI_DEVICE_ID_BROCADE_CT || \
+ (device) == PCI_DEVICE_ID_BROCADE_CT_FC)
+#define bfa_asic_id_ct2(device) \
+ ((device) == BFA_PCI_DEVICE_ID_CT2)
+#define bfa_asic_id_ctc(device) \
+ (bfa_asic_id_ct(device) || bfa_asic_id_ct2(device))
+
+/**
+ * PCI sub-system device and vendor ID information
+ */
+enum {
+ BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
+ BFA_PCI_CT2_SSID_FCoE = 0x22,
+ BFA_PCI_CT2_SSID_ETH = 0x23,
+ BFA_PCI_CT2_SSID_FC = 0x24,
+};
+
+enum bfa_mode {
+ BFA_MODE_HBA = 1,
+ BFA_MODE_CNA = 2,
+ BFA_MODE_NIC = 3
+};
+
+#endif /* __BFA_DEFS_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
new file mode 100644
index 000000000000..8ab33ee2c2bc
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
@@ -0,0 +1,229 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BFA_DEFS_CNA_H__
+#define __BFA_DEFS_CNA_H__
+
+#include "bfa_defs.h"
+
+/**
+ * @brief
+ * FC physical port statistics.
+ */
+struct bfa_port_fc_stats {
+ u64 secs_reset; /*!< Seconds since stats is reset */
+ u64 tx_frames; /*!< Tx frames */
+ u64 tx_words; /*!< Tx words */
+ u64 tx_lip; /*!< Tx LIP */
+ u64 tx_nos; /*!< Tx NOS */
+ u64 tx_ols; /*!< Tx OLS */
+ u64 tx_lr; /*!< Tx LR */
+ u64 tx_lrr; /*!< Tx LRR */
+ u64 rx_frames; /*!< Rx frames */
+ u64 rx_words; /*!< Rx words */
+ u64 lip_count; /*!< Rx LIP */
+ u64 nos_count; /*!< Rx NOS */
+ u64 ols_count; /*!< Rx OLS */
+ u64 lr_count; /*!< Rx LR */
+ u64 lrr_count; /*!< Rx LRR */
+ u64 invalid_crcs; /*!< Rx CRC err frames */
+ u64 invalid_crc_gd_eof; /*!< Rx CRC err good EOF frames */
+ u64 undersized_frm; /*!< Rx undersized frames */
+ u64 oversized_frm; /*!< Rx oversized frames */
+ u64 bad_eof_frm; /*!< Rx frames with bad EOF */
+ u64 error_frames; /*!< Errored frames */
+ u64 dropped_frames; /*!< Dropped frames */
+ u64 link_failures; /*!< Link Failure (LF) count */
+ u64 loss_of_syncs; /*!< Loss of sync count */
+ u64 loss_of_signals; /*!< Loss of signal count */
+ u64 primseq_errs; /*!< Primitive sequence protocol err. */
+ u64 bad_os_count; /*!< Invalid ordered sets */
+ u64 err_enc_out; /*!< Encoding err nonframe_8b10b */
+ u64 err_enc; /*!< Encoding err frame_8b10b */
+ u64 bbsc_frames_lost; /*!< Credit Recovery-Frames Lost */
+ u64 bbsc_credits_lost; /*!< Credit Recovery-Credits Lost */
+ u64 bbsc_link_resets; /*!< Credit Recovery-Link Resets */
+};
+
+/**
+ * @brief
+ * Eth Physical Port statistics.
+ */
+struct bfa_port_eth_stats {
+ u64 secs_reset; /*!< Seconds since stats is reset */
+ u64 frame_64; /*!< Frames 64 bytes */
+ u64 frame_65_127; /*!< Frames 65-127 bytes */
+ u64 frame_128_255; /*!< Frames 128-255 bytes */
+ u64 frame_256_511; /*!< Frames 256-511 bytes */
+ u64 frame_512_1023; /*!< Frames 512-1023 bytes */
+ u64 frame_1024_1518; /*!< Frames 1024-1518 bytes */
+ u64 frame_1519_1522; /*!< Frames 1519-1522 bytes */
+ u64 tx_bytes; /*!< Tx bytes */
+ u64 tx_packets; /*!< Tx packets */
+ u64 tx_mcast_packets; /*!< Tx multicast packets */
+ u64 tx_bcast_packets; /*!< Tx broadcast packets */
+ u64 tx_control_frame; /*!< Tx control frame */
+ u64 tx_drop; /*!< Tx drops */
+ u64 tx_jabber; /*!< Tx jabber */
+ u64 tx_fcs_error; /*!< Tx FCS errors */
+ u64 tx_fragments; /*!< Tx fragments */
+ u64 rx_bytes; /*!< Rx bytes */
+ u64 rx_packets; /*!< Rx packets */
+ u64 rx_mcast_packets; /*!< Rx multicast packets */
+ u64 rx_bcast_packets; /*!< Rx broadcast packets */
+ u64 rx_control_frames; /*!< Rx control frames */
+ u64 rx_unknown_opcode; /*!< Rx unknown opcode */
+ u64 rx_drop; /*!< Rx drops */
+ u64 rx_jabber; /*!< Rx jabber */
+ u64 rx_fcs_error; /*!< Rx FCS errors */
+ u64 rx_alignment_error; /*!< Rx alignment errors */
+ u64 rx_frame_length_error; /*!< Rx frame len errors */
+ u64 rx_code_error; /*!< Rx code errors */
+ u64 rx_fragments; /*!< Rx fragments */
+ u64 rx_pause; /*!< Rx pause */
+ u64 rx_zero_pause; /*!< Rx zero pause */
+ u64 tx_pause; /*!< Tx pause */
+ u64 tx_zero_pause; /*!< Tx zero pause */
+ u64 rx_fcoe_pause; /*!< Rx FCoE pause */
+ u64 rx_fcoe_zero_pause; /*!< Rx FCoE zero pause */
+ u64 tx_fcoe_pause; /*!< Tx FCoE pause */
+ u64 tx_fcoe_zero_pause; /*!< Tx FCoE zero pause */
+ u64 rx_iscsi_pause; /*!< Rx iSCSI pause */
+ u64 rx_iscsi_zero_pause; /*!< Rx iSCSI zero pause */
+ u64 tx_iscsi_pause; /*!< Tx iSCSI pause */
+ u64 tx_iscsi_zero_pause; /*!< Tx iSCSI zero pause */
+};
+
+/**
+ * @brief
+ * Port statistics.
+ */
+union bfa_port_stats_u {
+ struct bfa_port_fc_stats fc;
+ struct bfa_port_eth_stats eth;
+};
+
+#pragma pack(1)
+
+#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
+#define BFA_CEE_DCBX_MAX_PRIORITY (8)
+#define BFA_CEE_DCBX_MAX_PGID (8)
+
+#define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001
+#define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002
+#define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004
+#define BFA_CEE_LLDP_SYS_CAP_WLAN_AP 0x0008
+#define BFA_CEE_LLDP_SYS_CAP_ROUTER 0x0010
+#define BFA_CEE_LLDP_SYS_CAP_TELEPHONE 0x0020
+#define BFA_CEE_LLDP_SYS_CAP_DOCSIS_CD 0x0040
+#define BFA_CEE_LLDP_SYS_CAP_STATION 0x0080
+#define BFA_CEE_LLDP_SYS_CAP_CVLAN 0x0100
+#define BFA_CEE_LLDP_SYS_CAP_SVLAN 0x0200
+#define BFA_CEE_LLDP_SYS_CAP_TPMR 0x0400
+
+/* LLDP string type */
+struct bfa_cee_lldp_str {
+ u8 sub_type;
+ u8 len;
+ u8 rsvd[2];
+ u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
+};
+
+/* LLDP paramters */
+struct bfa_cee_lldp_cfg {
+ struct bfa_cee_lldp_str chassis_id;
+ struct bfa_cee_lldp_str port_id;
+ struct bfa_cee_lldp_str port_desc;
+ struct bfa_cee_lldp_str sys_name;
+ struct bfa_cee_lldp_str sys_desc;
+ struct bfa_cee_lldp_str mgmt_addr;
+ u16 time_to_live;
+ u16 enabled_system_cap;
+};
+
+enum bfa_cee_dcbx_version {
+ DCBX_PROTOCOL_PRECEE = 1,
+ DCBX_PROTOCOL_CEE = 2,
+};
+
+enum bfa_cee_lls {
+ /* LLS is down because the TLV not sent by the peer */
+ CEE_LLS_DOWN_NO_TLV = 0,
+ /* LLS is down as advertised by the peer */
+ CEE_LLS_DOWN = 1,
+ CEE_LLS_UP = 2,
+};
+
+/* CEE/DCBX parameters */
+struct bfa_cee_dcbx_cfg {
+ u8 pgid[BFA_CEE_DCBX_MAX_PRIORITY];
+ u8 pg_percentage[BFA_CEE_DCBX_MAX_PGID];
+ u8 pfc_primap; /* bitmap of priorties with PFC enabled */
+ u8 fcoe_primap; /* bitmap of priorities used for FcoE traffic */
+ u8 iscsi_primap; /* bitmap of priorities used for iSCSI traffic */
+ u8 dcbx_version; /* operating version:CEE or preCEE */
+ u8 lls_fcoe; /* FCoE Logical Link Status */
+ u8 lls_lan; /* LAN Logical Link Status */
+ u8 rsvd[2];
+};
+
+/* CEE status */
+/* Making this to tri-state for the benefit of port list command */
+enum bfa_cee_status {
+ CEE_UP = 0,
+ CEE_PHY_UP = 1,
+ CEE_LOOPBACK = 2,
+ CEE_PHY_DOWN = 3,
+};
+
+/* CEE Query */
+struct bfa_cee_attr {
+ u8 cee_status;
+ u8 error_reason;
+ struct bfa_cee_lldp_cfg lldp_remote;
+ struct bfa_cee_dcbx_cfg dcbx_remote;
+ mac_t src_mac;
+ u8 link_speed;
+ u8 nw_priority;
+ u8 filler[2];
+};
+
+/* LLDP/DCBX/CEE Statistics */
+struct bfa_cee_stats {
+ u32 lldp_tx_frames; /*!< LLDP Tx Frames */
+ u32 lldp_rx_frames; /*!< LLDP Rx Frames */
+ u32 lldp_rx_frames_invalid; /*!< LLDP Rx Frames invalid */
+ u32 lldp_rx_frames_new; /*!< LLDP Rx Frames new */
+ u32 lldp_tlvs_unrecognized; /*!< LLDP Rx unrecognized TLVs */
+ u32 lldp_rx_shutdown_tlvs; /*!< LLDP Rx shutdown TLVs */
+ u32 lldp_info_aged_out; /*!< LLDP remote info aged out */
+ u32 dcbx_phylink_ups; /*!< DCBX phy link ups */
+ u32 dcbx_phylink_downs; /*!< DCBX phy link downs */
+ u32 dcbx_rx_tlvs; /*!< DCBX Rx TLVs */
+ u32 dcbx_rx_tlvs_invalid; /*!< DCBX Rx TLVs invalid */
+ u32 dcbx_control_tlv_error; /*!< DCBX control TLV errors */
+ u32 dcbx_feature_tlv_error; /*!< DCBX feature TLV errors */
+ u32 dcbx_cee_cfg_new; /*!< DCBX new CEE cfg rcvd */
+ u32 cee_status_down; /*!< CEE status down */
+ u32 cee_status_up; /*!< CEE status up */
+ u32 cee_hw_cfg_changed; /*!< CEE hw cfg changed */
+ u32 cee_rx_invalid_cfg; /*!< CEE invalid cfg */
+};
+
+#pragma pack()
+
+#endif /* __BFA_DEFS_CNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
new file mode 100644
index 000000000000..6681fe87c1e1
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
@@ -0,0 +1,171 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BFA_DEFS_MFG_COMM_H__
+#define __BFA_DEFS_MFG_COMM_H__
+
+#include "bfa_defs.h"
+
+/**
+ * Manufacturing block version
+ */
+#define BFA_MFG_VERSION 3
+#define BFA_MFG_VERSION_UNINIT 0xFF
+
+/**
+ * Manufacturing block encrypted version
+ */
+#define BFA_MFG_ENC_VER 2
+
+/**
+ * Manufacturing block version 1 length
+ */
+#define BFA_MFG_VER1_LEN 128
+
+/**
+ * Manufacturing block header length
+ */
+#define BFA_MFG_HDR_LEN 4
+
+#define BFA_MFG_SERIALNUM_SIZE 11
+#define STRSZ(_n) (((_n) + 4) & ~3)
+
+/**
+ * Manufacturing card type
+ */
+enum {
+ BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */
+ BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */
+ BFA_MFG_TYPE_FC8P1 = 815, /*!< 8G 1port FC card */
+ BFA_MFG_TYPE_FC4P2 = 425, /*!< 4G 2port FC card */
+ BFA_MFG_TYPE_FC4P1 = 415, /*!< 4G 1port FC card */
+ BFA_MFG_TYPE_CNA10P2 = 1020, /*!< 10G 2port CNA card */
+ BFA_MFG_TYPE_CNA10P1 = 1010, /*!< 10G 1port CNA card */
+ BFA_MFG_TYPE_JAYHAWK = 804, /*!< Jayhawk mezz card */
+ BFA_MFG_TYPE_WANCHESE = 1007, /*!< Wanchese mezz card */
+ BFA_MFG_TYPE_ASTRA = 807, /*!< Astra mezz card */
+ BFA_MFG_TYPE_LIGHTNING_P0 = 902, /*!< Lightning mezz card - old */
+ BFA_MFG_TYPE_LIGHTNING = 1741, /*!< Lightning mezz card */
+ BFA_MFG_TYPE_PROWLER_F = 1560, /*!< Prowler FC only cards */
+ BFA_MFG_TYPE_PROWLER_N = 1410, /*!< Prowler NIC only cards */
+ BFA_MFG_TYPE_PROWLER_C = 1710, /*!< Prowler CNA only cards */
+ BFA_MFG_TYPE_PROWLER_D = 1860, /*!< Prowler Dual cards */
+ BFA_MFG_TYPE_CHINOOK = 1867, /*!< Chinook cards */
+ BFA_MFG_TYPE_INVALID = 0, /*!< Invalid card type */
+};
+
+#pragma pack(1)
+
+/**
+ * Check if Mezz card
+ */
+#define bfa_mfg_is_mezz(type) (( \
+ (type) == BFA_MFG_TYPE_JAYHAWK || \
+ (type) == BFA_MFG_TYPE_WANCHESE || \
+ (type) == BFA_MFG_TYPE_ASTRA || \
+ (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
+ (type) == BFA_MFG_TYPE_LIGHTNING || \
+ (type) == BFA_MFG_TYPE_CHINOOK))
+
+enum {
+ CB_GPIO_TTV = (1), /*!< TTV debug capable cards */
+ CB_GPIO_FC8P2 = (2), /*!< 8G 2port FC card */
+ CB_GPIO_FC8P1 = (3), /*!< 8G 1port FC card */
+ CB_GPIO_FC4P2 = (4), /*!< 4G 2port FC card */
+ CB_GPIO_FC4P1 = (5), /*!< 4G 1port FC card */
+ CB_GPIO_DFLY = (6), /*!< 8G 2port FC mezzanine card */
+ CB_GPIO_PROTO = (1 << 7) /*!< 8G 2port FC prototypes */
+};
+
+#define bfa_mfg_adapter_prop_init_gpio(gpio, card_type, prop) \
+do { \
+ if ((gpio) & CB_GPIO_PROTO) { \
+ (prop) |= BFI_ADAPTER_PROTO; \
+ (gpio) &= ~CB_GPIO_PROTO; \
+ } \
+ switch ((gpio)) { \
+ case CB_GPIO_TTV: \
+ (prop) |= BFI_ADAPTER_TTV; \
+ case CB_GPIO_DFLY: \
+ case CB_GPIO_FC8P2: \
+ (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \
+ (card_type) = BFA_MFG_TYPE_FC8P2; \
+ break; \
+ case CB_GPIO_FC8P1: \
+ (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \
+ (card_type) = BFA_MFG_TYPE_FC8P1; \
+ break; \
+ case CB_GPIO_FC4P2: \
+ (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \
+ (card_type) = BFA_MFG_TYPE_FC4P2; \
+ break; \
+ case CB_GPIO_FC4P1: \
+ (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \
+ (card_type) = BFA_MFG_TYPE_FC4P1; \
+ break; \
+ default: \
+ (prop) |= BFI_ADAPTER_UNSUPP; \
+ (card_type) = BFA_MFG_TYPE_INVALID; \
+ } \
+} while (0)
+
+/**
+ * VPD data length
+ */
+#define BFA_MFG_VPD_LEN 512
+#define BFA_MFG_VPD_LEN_INVALID 0
+
+#define BFA_MFG_VPD_PCI_HDR_OFF 137
+#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */
+#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */
+
+/**
+ * VPD vendor tag
+ */
+enum {
+ BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */
+ BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */
+ BFA_MFG_VPD_HP = 2, /*!< vendor HP */
+ BFA_MFG_VPD_DELL = 3, /*!< vendor DELL */
+ BFA_MFG_VPD_PCI_IBM = 0x08, /*!< PCI VPD IBM */
+ BFA_MFG_VPD_PCI_HP = 0x10, /*!< PCI VPD HP */
+ BFA_MFG_VPD_PCI_DELL = 0x20, /*!< PCI VPD DELL */
+ BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */
+};
+
+/**
+ * @brief BFA adapter flash vpd data definition.
+ *
+ * All numerical fields are in big-endian format.
+ */
+struct bfa_mfg_vpd {
+ u8 version; /*!< vpd data version */
+ u8 vpd_sig[3]; /*!< characters 'V', 'P', 'D' */
+ u8 chksum; /*!< u8 checksum */
+ u8 vendor; /*!< vendor */
+ u8 len; /*!< vpd data length excluding header */
+ u8 rsv;
+ u8 data[BFA_MFG_VPD_LEN]; /*!< vpd data */
+};
+
+#pragma pack()
+
+#endif /* __BFA_DEFS_MFG_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
new file mode 100644
index 000000000000..7c5fe6c2e80e
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
@@ -0,0 +1,216 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BFA_DEFS_STATUS_H__
+#define __BFA_DEFS_STATUS_H__
+
+/**
+ * API status return values
+ *
+ * NOTE: The error msgs are auto generated from the comments. Only singe line
+ * comments are supported
+ */
+enum bfa_status {
+ BFA_STATUS_OK = 0,
+ BFA_STATUS_FAILED = 1,
+ BFA_STATUS_EINVAL = 2,
+ BFA_STATUS_ENOMEM = 3,
+ BFA_STATUS_ENOSYS = 4,
+ BFA_STATUS_ETIMER = 5,
+ BFA_STATUS_EPROTOCOL = 6,
+ BFA_STATUS_ENOFCPORTS = 7,
+ BFA_STATUS_NOFLASH = 8,
+ BFA_STATUS_BADFLASH = 9,
+ BFA_STATUS_SFP_UNSUPP = 10,
+ BFA_STATUS_UNKNOWN_VFID = 11,
+ BFA_STATUS_DATACORRUPTED = 12,
+ BFA_STATUS_DEVBUSY = 13,
+ BFA_STATUS_ABORTED = 14,
+ BFA_STATUS_NODEV = 15,
+ BFA_STATUS_HDMA_FAILED = 16,
+ BFA_STATUS_FLASH_BAD_LEN = 17,
+ BFA_STATUS_UNKNOWN_LWWN = 18,
+ BFA_STATUS_UNKNOWN_RWWN = 19,
+ BFA_STATUS_FCPT_LS_RJT = 20,
+ BFA_STATUS_VPORT_EXISTS = 21,
+ BFA_STATUS_VPORT_MAX = 22,
+ BFA_STATUS_UNSUPP_SPEED = 23,
+ BFA_STATUS_INVLD_DFSZ = 24,
+ BFA_STATUS_CNFG_FAILED = 25,
+ BFA_STATUS_CMD_NOTSUPP = 26,
+ BFA_STATUS_NO_ADAPTER = 27,
+ BFA_STATUS_LINKDOWN = 28,
+ BFA_STATUS_FABRIC_RJT = 29,
+ BFA_STATUS_UNKNOWN_VWWN = 30,
+ BFA_STATUS_NSLOGIN_FAILED = 31,
+ BFA_STATUS_NO_RPORTS = 32,
+ BFA_STATUS_NSQUERY_FAILED = 33,
+ BFA_STATUS_PORT_OFFLINE = 34,
+ BFA_STATUS_RPORT_OFFLINE = 35,
+ BFA_STATUS_TGTOPEN_FAILED = 36,
+ BFA_STATUS_BAD_LUNS = 37,
+ BFA_STATUS_IO_FAILURE = 38,
+ BFA_STATUS_NO_FABRIC = 39,
+ BFA_STATUS_EBADF = 40,
+ BFA_STATUS_EINTR = 41,
+ BFA_STATUS_EIO = 42,
+ BFA_STATUS_ENOTTY = 43,
+ BFA_STATUS_ENXIO = 44,
+ BFA_STATUS_EFOPEN = 45,
+ BFA_STATUS_VPORT_WWN_BP = 46,
+ BFA_STATUS_PORT_NOT_DISABLED = 47,
+ BFA_STATUS_BADFRMHDR = 48,
+ BFA_STATUS_BADFRMSZ = 49,
+ BFA_STATUS_MISSINGFRM = 50,
+ BFA_STATUS_LINKTIMEOUT = 51,
+ BFA_STATUS_NO_FCPIM_NEXUS = 52,
+ BFA_STATUS_CHECKSUM_FAIL = 53,
+ BFA_STATUS_GZME_FAILED = 54,
+ BFA_STATUS_SCSISTART_REQD = 55,
+ BFA_STATUS_IOC_FAILURE = 56,
+ BFA_STATUS_INVALID_WWN = 57,
+ BFA_STATUS_MISMATCH = 58,
+ BFA_STATUS_IOC_ENABLED = 59,
+ BFA_STATUS_ADAPTER_ENABLED = 60,
+ BFA_STATUS_IOC_NON_OP = 61,
+ BFA_STATUS_ADDR_MAP_FAILURE = 62,
+ BFA_STATUS_SAME_NAME = 63,
+ BFA_STATUS_PENDING = 64,
+ BFA_STATUS_8G_SPD = 65,
+ BFA_STATUS_4G_SPD = 66,
+ BFA_STATUS_AD_IS_ENABLE = 67,
+ BFA_STATUS_EINVAL_TOV = 68,
+ BFA_STATUS_EINVAL_QDEPTH = 69,
+ BFA_STATUS_VERSION_FAIL = 70,
+ BFA_STATUS_DIAG_BUSY = 71,
+ BFA_STATUS_BEACON_ON = 72,
+ BFA_STATUS_BEACON_OFF = 73,
+ BFA_STATUS_LBEACON_ON = 74,
+ BFA_STATUS_LBEACON_OFF = 75,
+ BFA_STATUS_PORT_NOT_INITED = 76,
+ BFA_STATUS_RPSC_ENABLED = 77,
+ BFA_STATUS_ENOFSAVE = 78,
+ BFA_STATUS_BAD_FILE = 79,
+ BFA_STATUS_RLIM_EN = 80,
+ BFA_STATUS_RLIM_DIS = 81,
+ BFA_STATUS_IOC_DISABLED = 82,
+ BFA_STATUS_ADAPTER_DISABLED = 83,
+ BFA_STATUS_BIOS_DISABLED = 84,
+ BFA_STATUS_AUTH_ENABLED = 85,
+ BFA_STATUS_AUTH_DISABLED = 86,
+ BFA_STATUS_ERROR_TRL_ENABLED = 87,
+ BFA_STATUS_ERROR_QOS_ENABLED = 88,
+ BFA_STATUS_NO_SFP_DEV = 89,
+ BFA_STATUS_MEMTEST_FAILED = 90,
+ BFA_STATUS_INVALID_DEVID = 91,
+ BFA_STATUS_QOS_ENABLED = 92,
+ BFA_STATUS_QOS_DISABLED = 93,
+ BFA_STATUS_INCORRECT_DRV_CONFIG = 94,
+ BFA_STATUS_REG_FAIL = 95,
+ BFA_STATUS_IM_INV_CODE = 96,
+ BFA_STATUS_IM_INV_VLAN = 97,
+ BFA_STATUS_IM_INV_ADAPT_NAME = 98,
+ BFA_STATUS_IM_LOW_RESOURCES = 99,
+ BFA_STATUS_IM_VLANID_IS_PVID = 100,
+ BFA_STATUS_IM_VLANID_EXISTS = 101,
+ BFA_STATUS_IM_FW_UPDATE_FAIL = 102,
+ BFA_STATUS_PORTLOG_ENABLED = 103,
+ BFA_STATUS_PORTLOG_DISABLED = 104,
+ BFA_STATUS_FILE_NOT_FOUND = 105,
+ BFA_STATUS_QOS_FC_ONLY = 106,
+ BFA_STATUS_RLIM_FC_ONLY = 107,
+ BFA_STATUS_CT_SPD = 108,
+ BFA_STATUS_LEDTEST_OP = 109,
+ BFA_STATUS_CEE_NOT_DN = 110,
+ BFA_STATUS_10G_SPD = 111,
+ BFA_STATUS_IM_INV_TEAM_NAME = 112,
+ BFA_STATUS_IM_DUP_TEAM_NAME = 113,
+ BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114,
+ BFA_STATUS_IM_ADAPT_HAS_VLANS = 115,
+ BFA_STATUS_IM_PVID_MISMATCH = 116,
+ BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117,
+ BFA_STATUS_IM_MTU_MISMATCH = 118,
+ BFA_STATUS_IM_RSS_MISMATCH = 119,
+ BFA_STATUS_IM_HDS_MISMATCH = 120,
+ BFA_STATUS_IM_OFFLOAD_MISMATCH = 121,
+ BFA_STATUS_IM_PORT_PARAMS = 122,
+ BFA_STATUS_IM_PORT_NOT_IN_TEAM = 123,
+ BFA_STATUS_IM_CANNOT_REM_PRI = 124,
+ BFA_STATUS_IM_MAX_PORTS_REACHED = 125,
+ BFA_STATUS_IM_LAST_PORT_DELETE = 126,
+ BFA_STATUS_IM_NO_DRIVER = 127,
+ BFA_STATUS_IM_MAX_VLANS_REACHED = 128,
+ BFA_STATUS_TOMCAT_SPD_NOT_ALLOWED = 129,
+ BFA_STATUS_NO_MINPORT_DRIVER = 130,
+ BFA_STATUS_CARD_TYPE_MISMATCH = 131,
+ BFA_STATUS_BAD_ASICBLK = 132,
+ BFA_STATUS_NO_DRIVER = 133,
+ BFA_STATUS_INVALID_MAC = 134,
+ BFA_STATUS_IM_NO_VLAN = 135,
+ BFA_STATUS_IM_ETH_LB_FAILED = 136,
+ BFA_STATUS_IM_PVID_REMOVE = 137,
+ BFA_STATUS_IM_PVID_EDIT = 138,
+ BFA_STATUS_CNA_NO_BOOT = 139,
+ BFA_STATUS_IM_PVID_NON_ZERO = 140,
+ BFA_STATUS_IM_INETCFG_LOCK_FAILED = 141,
+ BFA_STATUS_IM_GET_INETCFG_FAILED = 142,
+ BFA_STATUS_IM_NOT_BOUND = 143,
+ BFA_STATUS_INSUFFICIENT_PERMS = 144,
+ BFA_STATUS_IM_INV_VLAN_NAME = 145,
+ BFA_STATUS_CMD_NOTSUPP_CNA = 146,
+ BFA_STATUS_IM_PASSTHRU_EDIT = 147,
+ BFA_STATUS_IM_BIND_FAILED = 148,
+ BFA_STATUS_IM_UNBIND_FAILED = 149,
+ BFA_STATUS_IM_PORT_IN_TEAM = 150,
+ BFA_STATUS_IM_VLAN_NOT_FOUND = 151,
+ BFA_STATUS_IM_TEAM_NOT_FOUND = 152,
+ BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153,
+ BFA_STATUS_PBC = 154,
+ BFA_STATUS_DEVID_MISSING = 155,
+ BFA_STATUS_BAD_FWCFG = 156,
+ BFA_STATUS_CREATE_FILE = 157,
+ BFA_STATUS_INVALID_VENDOR = 158,
+ BFA_STATUS_SFP_NOT_READY = 159,
+ BFA_STATUS_FLASH_UNINIT = 160,
+ BFA_STATUS_FLASH_EMPTY = 161,
+ BFA_STATUS_FLASH_CKFAIL = 162,
+ BFA_STATUS_TRUNK_UNSUPP = 163,
+ BFA_STATUS_TRUNK_ENABLED = 164,
+ BFA_STATUS_TRUNK_DISABLED = 165,
+ BFA_STATUS_TRUNK_ERROR_TRL_ENABLED = 166,
+ BFA_STATUS_BOOT_CODE_UPDATED = 167,
+ BFA_STATUS_BOOT_VERSION = 168,
+ BFA_STATUS_CARDTYPE_MISSING = 169,
+ BFA_STATUS_INVALID_CARDTYPE = 170,
+ BFA_STATUS_NO_TOPOLOGY_FOR_CNA = 171,
+ BFA_STATUS_IM_VLAN_OVER_TEAM_DELETE_FAILED = 172,
+ BFA_STATUS_ETHBOOT_ENABLED = 173,
+ BFA_STATUS_ETHBOOT_DISABLED = 174,
+ BFA_STATUS_IOPROFILE_OFF = 175,
+ BFA_STATUS_NO_PORT_INSTANCE = 176,
+ BFA_STATUS_BOOT_CODE_TIMEDOUT = 177,
+ BFA_STATUS_NO_VPORT_LOCK = 178,
+ BFA_STATUS_VPORT_NO_CNFG = 179,
+ BFA_STATUS_MAX_VAL
+};
+
+enum bfa_eproto_status {
+ BFA_EPROTO_BAD_ACCEPT = 0,
+ BFA_EPROTO_UNKNOWN_RSP = 1
+};
+
+#endif /* __BFA_DEFS_STATUS_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
new file mode 100644
index 000000000000..b0307a00a109
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -0,0 +1,2473 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include "bfa_ioc.h"
+#include "bfi_reg.h"
+#include "bfa_defs.h"
+
+/**
+ * IOC local definitions
+ */
+
+/**
+ * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
+ */
+
+#define bfa_ioc_firmware_lock(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
+#define bfa_ioc_firmware_unlock(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
+#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
+#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
+#define bfa_ioc_notify_fail(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
+#define bfa_ioc_sync_start(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
+#define bfa_ioc_sync_join(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
+#define bfa_ioc_sync_leave(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
+#define bfa_ioc_sync_ack(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
+#define bfa_ioc_sync_complete(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
+
+#define bfa_ioc_mbox_cmd_pending(__ioc) \
+ (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
+ readl((__ioc)->ioc_regs.hfn_mbox_cmd))
+
+static bool bfa_nw_auto_recover = true;
+
+/*
+ * forward declarations
+ */
+static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
+static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
+static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
+static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
+static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc);
+static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
+static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
+static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
+static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
+static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
+static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
+static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
+static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
+static void bfa_ioc_recover(struct bfa_ioc *ioc);
+static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
+static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
+static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
+static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
+static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
+static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
+static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
+static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
+static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
+static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
+static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
+ u32 boot_param);
+static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
+static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
+ char *serial_num);
+static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
+ char *fw_ver);
+static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
+ char *chip_rev);
+static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
+ char *optrom_ver);
+static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
+ char *manufacturer);
+static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
+static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
+
+/**
+ * IOC state machine definitions/declarations
+ */
+enum ioc_event {
+ IOC_E_RESET = 1, /*!< IOC reset request */
+ IOC_E_ENABLE = 2, /*!< IOC enable request */
+ IOC_E_DISABLE = 3, /*!< IOC disable request */
+ IOC_E_DETACH = 4, /*!< driver detach cleanup */
+ IOC_E_ENABLED = 5, /*!< f/w enabled */
+ IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */
+ IOC_E_DISABLED = 7, /*!< f/w disabled */
+ IOC_E_PFFAILED = 8, /*!< failure notice by iocpf sm */
+ IOC_E_HBFAIL = 9, /*!< heartbeat failure */
+ IOC_E_HWERROR = 10, /*!< hardware error interrupt */
+ IOC_E_TIMEOUT = 11, /*!< timeout */
+ IOC_E_HWFAILED = 12, /*!< PCI mapping failure notice */
+};
+
+bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
+
+static struct bfa_sm_table ioc_sm_table[] = {
+ {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
+ {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
+ {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
+ {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
+ {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
+ {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
+ {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
+ {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
+ {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
+ {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
+};
+
+/*
+ * Forward declareations for iocpf state machine
+ */
+static void bfa_iocpf_enable(struct bfa_ioc *ioc);
+static void bfa_iocpf_disable(struct bfa_ioc *ioc);
+static void bfa_iocpf_fail(struct bfa_ioc *ioc);
+static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
+static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
+static void bfa_iocpf_stop(struct bfa_ioc *ioc);
+
+/**
+ * IOCPF state machine events
+ */
+enum iocpf_event {
+ IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
+ IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
+ IOCPF_E_STOP = 3, /*!< stop on driver detach */
+ IOCPF_E_FWREADY = 4, /*!< f/w initialization done */
+ IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */
+ IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */
+ IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */
+ IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */
+ IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */
+ IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
+ IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */
+ IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */
+};
+
+/**
+ * IOCPF states
+ */
+enum bfa_iocpf_state {
+ BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */
+ BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
+ BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */
+ BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */
+ BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */
+ BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */
+ BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */
+ BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */
+ BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */
+};
+
+bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
+ enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
+ enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
+
+static struct bfa_sm_table iocpf_sm_table[] = {
+ {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
+ {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
+ {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
+ {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
+ {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
+ {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
+ {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
+ {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
+ {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
+ {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
+ {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
+ {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
+ {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
+ {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
+};
+
+/**
+ * IOC State Machine
+ */
+
+/**
+ * Beginning state. IOC uninit state.
+ */
+static void
+bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
+{
+}
+
+/**
+ * IOC is in uninit state.
+ */
+static void
+bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_RESET:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+/**
+ * Reset entry actions -- initialize state machine
+ */
+static void
+bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
+{
+ bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
+}
+
+/**
+ * IOC is in reset state.
+ */
+static void
+bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_ENABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_disable_comp(ioc);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
+{
+ bfa_iocpf_enable(ioc);
+}
+
+/**
+ * Host IOC function is being enabled, awaiting response from firmware.
+ * Semaphore is acquired.
+ */
+static void
+bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_ENABLED:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
+ break;
+
+ case IOC_E_PFFAILED:
+ /* !!! fall through !!! */
+ case IOC_E_HWERROR:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+ if (event != IOC_E_PFFAILED)
+ bfa_iocpf_initfail(ioc);
+ break;
+
+ case IOC_E_HWFAILED:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ bfa_iocpf_stop(ioc);
+ break;
+
+ case IOC_E_ENABLE:
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+/**
+ * Semaphore should be acquired for version check.
+ */
+static void
+bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
+{
+ mod_timer(&ioc->ioc_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_TOV));
+ bfa_ioc_send_getattr(ioc);
+}
+
+/**
+ * IOC configuration in progress. Timer is active.
+ */
+static void
+bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_FWRSP_GETATTR:
+ del_timer(&ioc->ioc_timer);
+ bfa_ioc_check_attr_wwns(ioc);
+ bfa_ioc_hb_monitor(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
+ break;
+
+ case IOC_E_PFFAILED:
+ case IOC_E_HWERROR:
+ del_timer(&ioc->ioc_timer);
+ /* fall through */
+ case IOC_E_TIMEOUT:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+ if (event != IOC_E_PFFAILED)
+ bfa_iocpf_getattrfail(ioc);
+ break;
+
+ case IOC_E_DISABLE:
+ del_timer(&ioc->ioc_timer);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_ENABLE:
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
+{
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
+ bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
+}
+
+static void
+bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_ENABLE:
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_hb_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_PFFAILED:
+ case IOC_E_HWERROR:
+ bfa_ioc_hb_stop(ioc);
+ /* !!! fall through !!! */
+ case IOC_E_HBFAIL:
+ if (ioc->iocpf.auto_recover)
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+ else
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+
+ bfa_ioc_fail_notify(ioc);
+
+ if (event != IOC_E_PFFAILED)
+ bfa_iocpf_fail(ioc);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
+{
+ bfa_iocpf_disable(ioc);
+}
+
+/**
+ * IOC is being disabled
+ */
+static void
+bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_DISABLED:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ case IOC_E_HWERROR:
+ /*
+ * No state change. Will move to disabled state
+ * after iocpf sm completes failure processing and
+ * moves to disabled state.
+ */
+ bfa_iocpf_fail(ioc);
+ break;
+
+ case IOC_E_HWFAILED:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
+ bfa_ioc_disable_comp(ioc);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+/**
+ * IOC disable completion entry.
+ */
+static void
+bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_disable_comp(ioc);
+}
+
+static void
+bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_ENABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
+ break;
+
+ case IOC_E_DISABLE:
+ ioc->cbfn->disable_cbfn(ioc->bfa);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ bfa_iocpf_stop(ioc);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
+{
+}
+
+/**
+ * Hardware initialization retry.
+ */
+static void
+bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_ENABLED:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
+ break;
+
+ case IOC_E_PFFAILED:
+ case IOC_E_HWERROR:
+ /**
+ * Initialization retry failed.
+ */
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+ if (event != IOC_E_PFFAILED)
+ bfa_iocpf_initfail(ioc);
+ break;
+
+ case IOC_E_HWFAILED:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
+ break;
+
+ case IOC_E_ENABLE:
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ bfa_iocpf_stop(ioc);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
+{
+}
+
+/**
+ * IOC failure.
+ */
+static void
+bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_ENABLE:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ bfa_iocpf_stop(ioc);
+ break;
+
+ case IOC_E_HWERROR:
+ /* HB failure notification, ignore. */
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
+{
+}
+
+/**
+ * IOC failure.
+ */
+static void
+bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+
+ case IOC_E_ENABLE:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ break;
+
+ case IOC_E_DISABLE:
+ ioc->cbfn->disable_cbfn(ioc->bfa);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+/**
+ * IOCPF State Machine
+ */
+
+/**
+ * Reset entry actions -- initialize state machine
+ */
+static void
+bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
+{
+ iocpf->fw_mismatch_notified = false;
+ iocpf->auto_recover = bfa_nw_auto_recover;
+}
+
+/**
+ * Beginning state. IOC is in reset state.
+ */
+static void
+bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ switch (event) {
+ case IOCPF_E_ENABLE:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
+ break;
+
+ case IOCPF_E_STOP:
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+/**
+ * Semaphore should be acquired for version check.
+ */
+static void
+bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
+{
+ bfa_ioc_hw_sem_init(iocpf->ioc);
+ bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/**
+ * Awaiting h/w semaphore to continue with version check.
+ */
+static void
+bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc *ioc = iocpf->ioc;
+
+ switch (event) {
+ case IOCPF_E_SEMLOCKED:
+ if (bfa_ioc_firmware_lock(ioc)) {
+ if (bfa_ioc_sync_start(ioc)) {
+ bfa_ioc_sync_join(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+ } else {
+ bfa_ioc_firmware_unlock(ioc);
+ bfa_nw_ioc_hw_sem_release(ioc);
+ mod_timer(&ioc->sem_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
+ }
+ } else {
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
+ }
+ break;
+
+ case IOCPF_E_SEM_ERROR:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ bfa_ioc_pf_hwfailed(ioc);
+ break;
+
+ case IOCPF_E_DISABLE:
+ bfa_ioc_hw_sem_get_cancel(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ bfa_ioc_pf_disabled(ioc);
+ break;
+
+ case IOCPF_E_STOP:
+ bfa_ioc_hw_sem_get_cancel(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+/**
+ * Notify enable completion callback
+ */
+static void
+bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
+{
+ /* Call only the first time sm enters fwmismatch state. */
+ if (iocpf->fw_mismatch_notified == false)
+ bfa_ioc_pf_fwmismatch(iocpf->ioc);
+
+ iocpf->fw_mismatch_notified = true;
+ mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_TOV));
+}
+
+/**
+ * Awaiting firmware version match.
+ */
+static void
+bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc *ioc = iocpf->ioc;
+
+ switch (event) {
+ case IOCPF_E_TIMEOUT:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
+ break;
+
+ case IOCPF_E_DISABLE:
+ del_timer(&ioc->iocpf_timer);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ bfa_ioc_pf_disabled(ioc);
+ break;
+
+ case IOCPF_E_STOP:
+ del_timer(&ioc->iocpf_timer);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+/**
+ * Request for semaphore.
+ */
+static void
+bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
+{
+ bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/**
+ * Awaiting semaphore for h/w initialzation.
+ */
+static void
+bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc *ioc = iocpf->ioc;
+
+ switch (event) {
+ case IOCPF_E_SEMLOCKED:
+ if (bfa_ioc_sync_complete(ioc)) {
+ bfa_ioc_sync_join(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+ } else {
+ bfa_nw_ioc_hw_sem_release(ioc);
+ mod_timer(&ioc->sem_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
+ }
+ break;
+
+ case IOCPF_E_SEM_ERROR:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ bfa_ioc_pf_hwfailed(ioc);
+ break;
+
+ case IOCPF_E_DISABLE:
+ bfa_ioc_hw_sem_get_cancel(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
+{
+ iocpf->poll_time = 0;
+ bfa_ioc_reset(iocpf->ioc, false);
+}
+
+/**
+ * Hardware is being initialized. Interrupts are enabled.
+ * Holding hardware semaphore lock.
+ */
+static void
+bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc *ioc = iocpf->ioc;
+
+ switch (event) {
+ case IOCPF_E_FWREADY:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
+ break;
+
+ case IOCPF_E_TIMEOUT:
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_ioc_pf_failed(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
+ break;
+
+ case IOCPF_E_DISABLE:
+ del_timer(&ioc->iocpf_timer);
+ bfa_ioc_sync_leave(ioc);
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
+{
+ mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_TOV));
+ /**
+ * Enable Interrupts before sending fw IOC ENABLE cmd.
+ */
+ iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
+ bfa_ioc_send_enable(iocpf->ioc);
+}
+
+/**
+ * Host IOC function is being enabled, awaiting response from firmware.
+ * Semaphore is acquired.
+ */
+static void
+bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc *ioc = iocpf->ioc;
+
+ switch (event) {
+ case IOCPF_E_FWRSP_ENABLE:
+ del_timer(&ioc->iocpf_timer);
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
+ break;
+
+ case IOCPF_E_INITFAIL:
+ del_timer(&ioc->iocpf_timer);
+ /*
+ * !!! fall through !!!
+ */
+ case IOCPF_E_TIMEOUT:
+ bfa_nw_ioc_hw_sem_release(ioc);
+ if (event == IOCPF_E_TIMEOUT)
+ bfa_ioc_pf_failed(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
+ break;
+
+ case IOCPF_E_DISABLE:
+ del_timer(&ioc->iocpf_timer);
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
+{
+ bfa_ioc_pf_enabled(iocpf->ioc);
+}
+
+static void
+bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ switch (event) {
+ case IOCPF_E_DISABLE:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
+ break;
+
+ case IOCPF_E_GETATTRFAIL:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
+ break;
+
+ case IOCPF_E_FAIL:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
+{
+ mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_TOV));
+ bfa_ioc_send_disable(iocpf->ioc);
+}
+
+/**
+ * IOC is being disabled
+ */
+static void
+bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc *ioc = iocpf->ioc;
+
+ switch (event) {
+ case IOCPF_E_FWRSP_DISABLE:
+ del_timer(&ioc->iocpf_timer);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+ break;
+
+ case IOCPF_E_FAIL:
+ del_timer(&ioc->iocpf_timer);
+ /*
+ * !!! fall through !!!
+ */
+
+ case IOCPF_E_TIMEOUT:
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+ break;
+
+ case IOCPF_E_FWRSP_ENABLE:
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
+{
+ bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/**
+ * IOC hb ack request is being removed.
+ */
+static void
+bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc *ioc = iocpf->ioc;
+
+ switch (event) {
+ case IOCPF_E_SEMLOCKED:
+ bfa_ioc_sync_leave(ioc);
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+ break;
+
+ case IOCPF_E_SEM_ERROR:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ bfa_ioc_pf_hwfailed(ioc);
+ break;
+
+ case IOCPF_E_FAIL:
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+/**
+ * IOC disable completion entry.
+ */
+static void
+bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
+{
+ bfa_ioc_mbox_flush(iocpf->ioc);
+ bfa_ioc_pf_disabled(iocpf->ioc);
+}
+
+static void
+bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc *ioc = iocpf->ioc;
+
+ switch (event) {
+ case IOCPF_E_ENABLE:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
+ break;
+
+ case IOCPF_E_STOP:
+ bfa_ioc_firmware_unlock(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
+{
+ bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/**
+ * Hardware initialization failed.
+ */
+static void
+bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc *ioc = iocpf->ioc;
+
+ switch (event) {
+ case IOCPF_E_SEMLOCKED:
+ bfa_ioc_notify_fail(ioc);
+ bfa_ioc_sync_leave(ioc);
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
+ break;
+
+ case IOCPF_E_SEM_ERROR:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ bfa_ioc_pf_hwfailed(ioc);
+ break;
+
+ case IOCPF_E_DISABLE:
+ bfa_ioc_hw_sem_get_cancel(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+ break;
+
+ case IOCPF_E_STOP:
+ bfa_ioc_hw_sem_get_cancel(ioc);
+ bfa_ioc_firmware_unlock(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ break;
+
+ case IOCPF_E_FAIL:
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
+{
+}
+
+/**
+ * Hardware initialization failed.
+ */
+static void
+bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc *ioc = iocpf->ioc;
+
+ switch (event) {
+ case IOCPF_E_DISABLE:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+ break;
+
+ case IOCPF_E_STOP:
+ bfa_ioc_firmware_unlock(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
+{
+ /**
+ * Mark IOC as failed in hardware and stop firmware.
+ */
+ bfa_ioc_lpu_stop(iocpf->ioc);
+
+ /**
+ * Flush any queued up mailbox requests.
+ */
+ bfa_ioc_mbox_flush(iocpf->ioc);
+ bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/**
+ * IOC is in failed state.
+ */
+static void
+bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc *ioc = iocpf->ioc;
+
+ switch (event) {
+ case IOCPF_E_SEMLOCKED:
+ bfa_ioc_sync_ack(ioc);
+ bfa_ioc_notify_fail(ioc);
+ if (!iocpf->auto_recover) {
+ bfa_ioc_sync_leave(ioc);
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ } else {
+ if (bfa_ioc_sync_complete(ioc))
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+ else {
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
+ }
+ }
+ break;
+
+ case IOCPF_E_SEM_ERROR:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ bfa_ioc_pf_hwfailed(ioc);
+ break;
+
+ case IOCPF_E_DISABLE:
+ bfa_ioc_hw_sem_get_cancel(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+ break;
+
+ case IOCPF_E_FAIL:
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
+{
+}
+
+/**
+ * @brief
+ * IOC is in failed state.
+ */
+static void
+bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ switch (event) {
+ case IOCPF_E_DISABLE:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+/**
+ * BFA IOC private functions
+ */
+
+/**
+ * Notify common modules registered for notification.
+ */
+static void
+bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
+{
+ struct bfa_ioc_notify *notify;
+ struct list_head *qe;
+
+ list_for_each(qe, &ioc->notify_q) {
+ notify = (struct bfa_ioc_notify *)qe;
+ notify->cbfn(notify->cbarg, event);
+ }
+}
+
+static void
+bfa_ioc_disable_comp(struct bfa_ioc *ioc)
+{
+ ioc->cbfn->disable_cbfn(ioc->bfa);
+ bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
+}
+
+bool
+bfa_nw_ioc_sem_get(void __iomem *sem_reg)
+{
+ u32 r32;
+ int cnt = 0;
+#define BFA_SEM_SPINCNT 3000
+
+ r32 = readl(sem_reg);
+
+ while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
+ cnt++;
+ udelay(2);
+ r32 = readl(sem_reg);
+ }
+
+ if (!(r32 & 1))
+ return true;
+
+ return false;
+}
+
+void
+bfa_nw_ioc_sem_release(void __iomem *sem_reg)
+{
+ readl(sem_reg);
+ writel(1, sem_reg);
+}
+
+static void
+bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_image_hdr fwhdr;
+ u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+ if (fwstate == BFI_IOC_UNINIT)
+ return;
+
+ bfa_nw_ioc_fwver_get(ioc, &fwhdr);
+
+ if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
+ return;
+
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+
+ /*
+ * Try to lock and then unlock the semaphore.
+ */
+ readl(ioc->ioc_regs.ioc_sem_reg);
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+}
+
+static void
+bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
+{
+ u32 r32;
+
+ /**
+ * First read to the semaphore register will return 0, subsequent reads
+ * will return 1. Semaphore is released by writing 1 to the register
+ */
+ r32 = readl(ioc->ioc_regs.ioc_sem_reg);
+ if (r32 == ~0) {
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
+ return;
+ }
+ if (!(r32 & 1)) {
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
+ return;
+ }
+
+ mod_timer(&ioc->sem_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
+}
+
+void
+bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
+{
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+}
+
+static void
+bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
+{
+ del_timer(&ioc->sem_timer);
+}
+
+/**
+ * @brief
+ * Initialize LPU local memory (aka secondary memory / SRAM)
+ */
+static void
+bfa_ioc_lmem_init(struct bfa_ioc *ioc)
+{
+ u32 pss_ctl;
+ int i;
+#define PSS_LMEM_INIT_TIME 10000
+
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl &= ~__PSS_LMEM_RESET;
+ pss_ctl |= __PSS_LMEM_INIT_EN;
+
+ /*
+ * i2c workaround 12.5khz clock
+ */
+ pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+
+ /**
+ * wait for memory initialization to be complete
+ */
+ i = 0;
+ do {
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+ i++;
+ } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
+
+ /**
+ * If memory initialization is not successful, IOC timeout will catch
+ * such failures.
+ */
+ BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
+
+ pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+}
+
+static void
+bfa_ioc_lpu_start(struct bfa_ioc *ioc)
+{
+ u32 pss_ctl;
+
+ /**
+ * Take processor out of reset.
+ */
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl &= ~__PSS_LPU0_RESET;
+
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+}
+
+static void
+bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
+{
+ u32 pss_ctl;
+
+ /**
+ * Put processors in reset.
+ */
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
+
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+}
+
+/**
+ * Get driver and firmware versions.
+ */
+void
+bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
+{
+ u32 pgnum;
+ u32 loff = 0;
+ int i;
+ u32 *fwsig = (u32 *) fwhdr;
+
+ pgnum = bfa_ioc_smem_pgnum(ioc, loff);
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+ for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
+ i++) {
+ fwsig[i] =
+ swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
+ loff += sizeof(u32);
+ }
+}
+
+/**
+ * Returns TRUE if same.
+ */
+bool
+bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
+{
+ struct bfi_ioc_image_hdr *drv_fwhdr;
+ int i;
+
+ drv_fwhdr = (struct bfi_ioc_image_hdr *)
+ bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
+
+ for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
+ if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Return true if current running version is valid. Firmware signature and
+ * execution context (driver/bios) must match.
+ */
+static bool
+bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
+{
+ struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
+
+ bfa_nw_ioc_fwver_get(ioc, &fwhdr);
+ drv_fwhdr = (struct bfi_ioc_image_hdr *)
+ bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
+
+ if (fwhdr.signature != drv_fwhdr->signature)
+ return false;
+
+ if (swab32(fwhdr.bootenv) != boot_env)
+ return false;
+
+ return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
+}
+
+/**
+ * Conditionally flush any pending message from firmware at start.
+ */
+static void
+bfa_ioc_msgflush(struct bfa_ioc *ioc)
+{
+ u32 r32;
+
+ r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
+ if (r32)
+ writel(1, ioc->ioc_regs.lpu_mbox_cmd);
+}
+
+/**
+ * @img ioc_init_logic.jpg
+ */
+static void
+bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
+{
+ enum bfi_ioc_state ioc_fwstate;
+ bool fwvalid;
+ u32 boot_env;
+
+ ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+ if (force)
+ ioc_fwstate = BFI_IOC_UNINIT;
+
+ boot_env = BFI_FWBOOT_ENV_OS;
+
+ /**
+ * check if firmware is valid
+ */
+ fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
+ false : bfa_ioc_fwver_valid(ioc, boot_env);
+
+ if (!fwvalid) {
+ bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
+ bfa_ioc_poll_fwinit(ioc);
+ return;
+ }
+
+ /**
+ * If hardware initialization is in progress (initialized by other IOC),
+ * just wait for an initialization completion interrupt.
+ */
+ if (ioc_fwstate == BFI_IOC_INITING) {
+ bfa_ioc_poll_fwinit(ioc);
+ return;
+ }
+
+ /**
+ * If IOC function is disabled and firmware version is same,
+ * just re-enable IOC.
+ */
+ if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
+ /**
+ * When using MSI-X any pending firmware ready event should
+ * be flushed. Otherwise MSI-X interrupts are not delivered.
+ */
+ bfa_ioc_msgflush(ioc);
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
+ return;
+ }
+
+ /**
+ * Initialize the h/w for any other states.
+ */
+ bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
+ bfa_ioc_poll_fwinit(ioc);
+}
+
+void
+bfa_nw_ioc_timeout(void *ioc_arg)
+{
+ struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
+
+ bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
+}
+
+static void
+bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
+{
+ u32 *msgp = (u32 *) ioc_msg;
+ u32 i;
+
+ BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
+
+ /*
+ * first write msg to mailbox registers
+ */
+ for (i = 0; i < len / sizeof(u32); i++)
+ writel(cpu_to_le32(msgp[i]),
+ ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
+
+ for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
+ writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
+
+ /*
+ * write 1 to mailbox CMD to trigger LPU event
+ */
+ writel(1, ioc->ioc_regs.hfn_mbox_cmd);
+ (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
+}
+
+static void
+bfa_ioc_send_enable(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_ctrl_req enable_req;
+ struct timeval tv;
+
+ bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
+ bfa_ioc_portid(ioc));
+ enable_req.clscode = htons(ioc->clscode);
+ do_gettimeofday(&tv);
+ enable_req.tv_sec = ntohl(tv.tv_sec);
+ bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
+}
+
+static void
+bfa_ioc_send_disable(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_ctrl_req disable_req;
+
+ bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
+ bfa_ioc_portid(ioc));
+ bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
+}
+
+static void
+bfa_ioc_send_getattr(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_getattr_req attr_req;
+
+ bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
+ bfa_ioc_portid(ioc));
+ bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
+ bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
+}
+
+void
+bfa_nw_ioc_hb_check(void *cbarg)
+{
+ struct bfa_ioc *ioc = cbarg;
+ u32 hb_count;
+
+ hb_count = readl(ioc->ioc_regs.heartbeat);
+ if (ioc->hb_count == hb_count) {
+ bfa_ioc_recover(ioc);
+ return;
+ } else {
+ ioc->hb_count = hb_count;
+ }
+
+ bfa_ioc_mbox_poll(ioc);
+ mod_timer(&ioc->hb_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_HB_TOV));
+}
+
+static void
+bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
+{
+ ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
+ mod_timer(&ioc->hb_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_HB_TOV));
+}
+
+static void
+bfa_ioc_hb_stop(struct bfa_ioc *ioc)
+{
+ del_timer(&ioc->hb_timer);
+}
+
+/**
+ * @brief
+ * Initiate a full firmware download.
+ */
+static void
+bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
+ u32 boot_env)
+{
+ u32 *fwimg;
+ u32 pgnum;
+ u32 loff = 0;
+ u32 chunkno = 0;
+ u32 i;
+ u32 asicmode;
+
+ /**
+ * Initialize LMEM first before code download
+ */
+ bfa_ioc_lmem_init(ioc);
+
+ fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
+
+ pgnum = bfa_ioc_smem_pgnum(ioc, loff);
+
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+ for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
+ if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
+ chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
+ fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+ }
+
+ /**
+ * write smem
+ */
+ writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
+ ((ioc->ioc_regs.smem_page_start) + (loff)));
+
+ loff += sizeof(u32);
+
+ /**
+ * handle page offset wrap around
+ */
+ loff = PSS_SMEM_PGOFF(loff);
+ if (loff == 0) {
+ pgnum++;
+ writel(pgnum,
+ ioc->ioc_regs.host_page_num_fn);
+ }
+ }
+
+ writel(bfa_ioc_smem_pgnum(ioc, 0),
+ ioc->ioc_regs.host_page_num_fn);
+
+ /*
+ * Set boot type, env and device mode at the end.
+ */
+ asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
+ ioc->port0_mode, ioc->port1_mode);
+ writel(asicmode, ((ioc->ioc_regs.smem_page_start)
+ + BFI_FWBOOT_DEVMODE_OFF));
+ writel(boot_type, ((ioc->ioc_regs.smem_page_start)
+ + (BFI_FWBOOT_TYPE_OFF)));
+ writel(boot_env, ((ioc->ioc_regs.smem_page_start)
+ + (BFI_FWBOOT_ENV_OFF)));
+}
+
+static void
+bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
+{
+ bfa_ioc_hwinit(ioc, force);
+}
+
+/**
+ * BFA ioc enable reply by firmware
+ */
+static void
+bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
+ u8 cap_bm)
+{
+ struct bfa_iocpf *iocpf = &ioc->iocpf;
+
+ ioc->port_mode = ioc->port_mode_cfg = port_mode;
+ ioc->ad_cap_bm = cap_bm;
+ bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
+}
+
+/**
+ * @brief
+ * Update BFA configuration from firmware configuration.
+ */
+static void
+bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_attr *attr = ioc->attr;
+
+ attr->adapter_prop = ntohl(attr->adapter_prop);
+ attr->card_type = ntohl(attr->card_type);
+ attr->maxfrsize = ntohs(attr->maxfrsize);
+
+ bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
+}
+
+/**
+ * Attach time initialization of mbox logic.
+ */
+static void
+bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ int mc;
+
+ INIT_LIST_HEAD(&mod->cmd_q);
+ for (mc = 0; mc < BFI_MC_MAX; mc++) {
+ mod->mbhdlr[mc].cbfn = NULL;
+ mod->mbhdlr[mc].cbarg = ioc->bfa;
+ }
+}
+
+/**
+ * Mbox poll timer -- restarts any pending mailbox requests.
+ */
+static void
+bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ struct bfa_mbox_cmd *cmd;
+ bfa_mbox_cmd_cbfn_t cbfn;
+ void *cbarg;
+ u32 stat;
+
+ /**
+ * If no command pending, do nothing
+ */
+ if (list_empty(&mod->cmd_q))
+ return;
+
+ /**
+ * If previous command is not yet fetched by firmware, do nothing
+ */
+ stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
+ if (stat)
+ return;
+
+ /**
+ * Enqueue command to firmware.
+ */
+ bfa_q_deq(&mod->cmd_q, &cmd);
+ bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
+
+ /**
+ * Give a callback to the client, indicating that the command is sent
+ */
+ if (cmd->cbfn) {
+ cbfn = cmd->cbfn;
+ cbarg = cmd->cbarg;
+ cmd->cbfn = NULL;
+ cbfn(cbarg);
+ }
+}
+
+/**
+ * Cleanup any pending requests.
+ */
+static void
+bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ struct bfa_mbox_cmd *cmd;
+
+ while (!list_empty(&mod->cmd_q))
+ bfa_q_deq(&mod->cmd_q, &cmd);
+}
+
+static void
+bfa_ioc_fail_notify(struct bfa_ioc *ioc)
+{
+ /**
+ * Notify driver and common modules registered for notification.
+ */
+ ioc->cbfn->hbfail_cbfn(ioc->bfa);
+ bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
+}
+
+/**
+ * IOCPF to IOC interface
+ */
+static void
+bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(ioc, IOC_E_ENABLED);
+}
+
+static void
+bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(ioc, IOC_E_DISABLED);
+}
+
+static void
+bfa_ioc_pf_failed(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
+}
+
+static void
+bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+}
+
+static void
+bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
+{
+ /**
+ * Provide enable completion callback and AEN notification.
+ */
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+}
+
+/**
+ * IOC public
+ */
+static enum bfa_status
+bfa_ioc_pll_init(struct bfa_ioc *ioc)
+{
+ /*
+ * Hold semaphore so that nobody can access the chip during init.
+ */
+ bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
+
+ bfa_ioc_pll_init_asic(ioc);
+
+ ioc->pllinit = true;
+ /*
+ * release semaphore.
+ */
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
+
+ return BFA_STATUS_OK;
+}
+
+/**
+ * Interface used by diag module to do firmware boot with memory test
+ * as the entry vector.
+ */
+static void
+bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
+ u32 boot_env)
+{
+ bfa_ioc_stats(ioc, ioc_boots);
+
+ if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
+ return;
+
+ /**
+ * Initialize IOC state of all functions on a chip reset.
+ */
+ if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
+ writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
+ } else {
+ writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
+ }
+
+ bfa_ioc_msgflush(ioc);
+ bfa_ioc_download_fw(ioc, boot_type, boot_env);
+ bfa_ioc_lpu_start(ioc);
+}
+
+/**
+ * Enable/disable IOC failure auto recovery.
+ */
+void
+bfa_nw_ioc_auto_recover(bool auto_recover)
+{
+ bfa_nw_auto_recover = auto_recover;
+}
+
+static bool
+bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
+{
+ u32 *msgp = mbmsg;
+ u32 r32;
+ int i;
+
+ r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
+ if ((r32 & 1) == 0)
+ return false;
+
+ /**
+ * read the MBOX msg
+ */
+ for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
+ i++) {
+ r32 = readl(ioc->ioc_regs.lpu_mbox +
+ i * sizeof(u32));
+ msgp[i] = htonl(r32);
+ }
+
+ /**
+ * turn off mailbox interrupt by clearing mailbox status
+ */
+ writel(1, ioc->ioc_regs.lpu_mbox_cmd);
+ readl(ioc->ioc_regs.lpu_mbox_cmd);
+
+ return true;
+}
+
+static void
+bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
+{
+ union bfi_ioc_i2h_msg_u *msg;
+ struct bfa_iocpf *iocpf = &ioc->iocpf;
+
+ msg = (union bfi_ioc_i2h_msg_u *) m;
+
+ bfa_ioc_stats(ioc, ioc_isrs);
+
+ switch (msg->mh.msg_id) {
+ case BFI_IOC_I2H_HBEAT:
+ break;
+
+ case BFI_IOC_I2H_ENABLE_REPLY:
+ bfa_ioc_enable_reply(ioc,
+ (enum bfa_mode)msg->fw_event.port_mode,
+ msg->fw_event.cap_bm);
+ break;
+
+ case BFI_IOC_I2H_DISABLE_REPLY:
+ bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
+ break;
+
+ case BFI_IOC_I2H_GETATTR_REPLY:
+ bfa_ioc_getattr_reply(ioc);
+ break;
+
+ default:
+ BUG_ON(1);
+ }
+}
+
+/**
+ * IOC attach time initialization and setup.
+ *
+ * @param[in] ioc memory for IOC
+ * @param[in] bfa driver instance structure
+ */
+void
+bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
+{
+ ioc->bfa = bfa;
+ ioc->cbfn = cbfn;
+ ioc->fcmode = false;
+ ioc->pllinit = false;
+ ioc->dbg_fwsave_once = true;
+ ioc->iocpf.ioc = ioc;
+
+ bfa_ioc_mbox_attach(ioc);
+ INIT_LIST_HEAD(&ioc->notify_q);
+
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ bfa_fsm_send_event(ioc, IOC_E_RESET);
+}
+
+/**
+ * Driver detach time IOC cleanup.
+ */
+void
+bfa_nw_ioc_detach(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(ioc, IOC_E_DETACH);
+
+ /* Done with detach, empty the notify_q. */
+ INIT_LIST_HEAD(&ioc->notify_q);
+}
+
+/**
+ * Setup IOC PCI properties.
+ *
+ * @param[in] pcidev PCI device information for this IOC
+ */
+void
+bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
+ enum bfi_pcifn_class clscode)
+{
+ ioc->clscode = clscode;
+ ioc->pcidev = *pcidev;
+
+ /**
+ * Initialize IOC and device personality
+ */
+ ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
+ ioc->asic_mode = BFI_ASIC_MODE_FC;
+
+ switch (pcidev->device_id) {
+ case PCI_DEVICE_ID_BROCADE_CT:
+ ioc->asic_gen = BFI_ASIC_GEN_CT;
+ ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
+ ioc->asic_mode = BFI_ASIC_MODE_ETH;
+ ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
+ ioc->ad_cap_bm = BFA_CM_CNA;
+ break;
+
+ case BFA_PCI_DEVICE_ID_CT2:
+ ioc->asic_gen = BFI_ASIC_GEN_CT2;
+ if (clscode == BFI_PCIFN_CLASS_FC &&
+ pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
+ ioc->asic_mode = BFI_ASIC_MODE_FC16;
+ ioc->fcmode = true;
+ ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
+ ioc->ad_cap_bm = BFA_CM_HBA;
+ } else {
+ ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
+ ioc->asic_mode = BFI_ASIC_MODE_ETH;
+ if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
+ ioc->port_mode =
+ ioc->port_mode_cfg = BFA_MODE_CNA;
+ ioc->ad_cap_bm = BFA_CM_CNA;
+ } else {
+ ioc->port_mode =
+ ioc->port_mode_cfg = BFA_MODE_NIC;
+ ioc->ad_cap_bm = BFA_CM_NIC;
+ }
+ }
+ break;
+
+ default:
+ BUG_ON(1);
+ }
+
+ /**
+ * Set asic specific interfaces.
+ */
+ if (ioc->asic_gen == BFI_ASIC_GEN_CT)
+ bfa_nw_ioc_set_ct_hwif(ioc);
+ else {
+ WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
+ bfa_nw_ioc_set_ct2_hwif(ioc);
+ bfa_nw_ioc_ct2_poweron(ioc);
+ }
+
+ bfa_ioc_map_port(ioc);
+ bfa_ioc_reg_init(ioc);
+}
+
+/**
+ * Initialize IOC dma memory
+ *
+ * @param[in] dm_kva kernel virtual address of IOC dma memory
+ * @param[in] dm_pa physical address of IOC dma memory
+ */
+void
+bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
+{
+ /**
+ * dma memory for firmware attribute
+ */
+ ioc->attr_dma.kva = dm_kva;
+ ioc->attr_dma.pa = dm_pa;
+ ioc->attr = (struct bfi_ioc_attr *) dm_kva;
+}
+
+/**
+ * Return size of dma memory required.
+ */
+u32
+bfa_nw_ioc_meminfo(void)
+{
+ return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
+}
+
+void
+bfa_nw_ioc_enable(struct bfa_ioc *ioc)
+{
+ bfa_ioc_stats(ioc, ioc_enables);
+ ioc->dbg_fwsave_once = true;
+
+ bfa_fsm_send_event(ioc, IOC_E_ENABLE);
+}
+
+void
+bfa_nw_ioc_disable(struct bfa_ioc *ioc)
+{
+ bfa_ioc_stats(ioc, ioc_disables);
+ bfa_fsm_send_event(ioc, IOC_E_DISABLE);
+}
+
+static u32
+bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
+{
+ return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
+}
+
+/**
+ * Register mailbox message handler function, to be called by common modules
+ */
+void
+bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
+ bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+
+ mod->mbhdlr[mc].cbfn = cbfn;
+ mod->mbhdlr[mc].cbarg = cbarg;
+}
+
+/**
+ * Queue a mailbox command request to firmware. Waits if mailbox is busy.
+ * Responsibility of caller to serialize
+ *
+ * @param[in] ioc IOC instance
+ * @param[i] cmd Mailbox command
+ */
+bool
+bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
+ bfa_mbox_cmd_cbfn_t cbfn, void *cbarg)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ u32 stat;
+
+ cmd->cbfn = cbfn;
+ cmd->cbarg = cbarg;
+
+ /**
+ * If a previous command is pending, queue new command
+ */
+ if (!list_empty(&mod->cmd_q)) {
+ list_add_tail(&cmd->qe, &mod->cmd_q);
+ return true;
+ }
+
+ /**
+ * If mailbox is busy, queue command for poll timer
+ */
+ stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
+ if (stat) {
+ list_add_tail(&cmd->qe, &mod->cmd_q);
+ return true;
+ }
+
+ /**
+ * mailbox is free -- queue command to firmware
+ */
+ bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
+
+ return false;
+}
+
+/**
+ * Handle mailbox interrupts
+ */
+void
+bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ struct bfi_mbmsg m;
+ int mc;
+
+ if (bfa_ioc_msgget(ioc, &m)) {
+ /**
+ * Treat IOC message class as special.
+ */
+ mc = m.mh.msg_class;
+ if (mc == BFI_MC_IOC) {
+ bfa_ioc_isr(ioc, &m);
+ return;
+ }
+
+ if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
+ return;
+
+ mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
+ }
+
+ bfa_ioc_lpu_read_stat(ioc);
+
+ /**
+ * Try to send pending mailbox commands
+ */
+ bfa_ioc_mbox_poll(ioc);
+}
+
+void
+bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
+{
+ bfa_ioc_stats(ioc, ioc_hbfails);
+ bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
+ bfa_fsm_send_event(ioc, IOC_E_HWERROR);
+}
+
+/**
+ * return true if IOC is disabled
+ */
+bool
+bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
+{
+ return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
+ bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
+}
+
+/**
+ * Add to IOC heartbeat failure notification queue. To be used by common
+ * modules such as cee, port, diag.
+ */
+void
+bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
+ struct bfa_ioc_notify *notify)
+{
+ list_add_tail(&notify->qe, &ioc->notify_q);
+}
+
+#define BFA_MFG_NAME "Brocade"
+static void
+bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
+ struct bfa_adapter_attr *ad_attr)
+{
+ struct bfi_ioc_attr *ioc_attr;
+
+ ioc_attr = ioc->attr;
+
+ bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
+ bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
+ bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
+ bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
+ memcpy(&ad_attr->vpd, &ioc_attr->vpd,
+ sizeof(struct bfa_mfg_vpd));
+
+ ad_attr->nports = bfa_ioc_get_nports(ioc);
+ ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
+
+ bfa_ioc_get_adapter_model(ioc, ad_attr->model);
+ /* For now, model descr uses same model string */
+ bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
+
+ ad_attr->card_type = ioc_attr->card_type;
+ ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
+
+ if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
+ ad_attr->prototype = 1;
+ else
+ ad_attr->prototype = 0;
+
+ ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
+ ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
+
+ ad_attr->pcie_gen = ioc_attr->pcie_gen;
+ ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
+ ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
+ ad_attr->asic_rev = ioc_attr->asic_rev;
+
+ bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
+}
+
+static enum bfa_ioc_type
+bfa_ioc_get_type(struct bfa_ioc *ioc)
+{
+ if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
+ return BFA_IOC_TYPE_LL;
+
+ BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC));
+
+ return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
+ ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
+}
+
+static void
+bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
+{
+ memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
+ memcpy(serial_num,
+ (void *)ioc->attr->brcd_serialnum,
+ BFA_ADAPTER_SERIAL_NUM_LEN);
+}
+
+static void
+bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
+{
+ memset(fw_ver, 0, BFA_VERSION_LEN);
+ memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
+}
+
+static void
+bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
+{
+ BUG_ON(!(chip_rev));
+
+ memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
+
+ chip_rev[0] = 'R';
+ chip_rev[1] = 'e';
+ chip_rev[2] = 'v';
+ chip_rev[3] = '-';
+ chip_rev[4] = ioc->attr->asic_rev;
+ chip_rev[5] = '\0';
+}
+
+static void
+bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
+{
+ memset(optrom_ver, 0, BFA_VERSION_LEN);
+ memcpy(optrom_ver, ioc->attr->optrom_version,
+ BFA_VERSION_LEN);
+}
+
+static void
+bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
+{
+ memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
+ memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+}
+
+static void
+bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
+{
+ struct bfi_ioc_attr *ioc_attr;
+
+ BUG_ON(!(model));
+ memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
+
+ ioc_attr = ioc->attr;
+
+ snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
+ BFA_MFG_NAME, ioc_attr->card_type);
+}
+
+static enum bfa_ioc_state
+bfa_ioc_get_state(struct bfa_ioc *ioc)
+{
+ enum bfa_iocpf_state iocpf_st;
+ enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+
+ if (ioc_st == BFA_IOC_ENABLING ||
+ ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
+
+ iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
+
+ switch (iocpf_st) {
+ case BFA_IOCPF_SEMWAIT:
+ ioc_st = BFA_IOC_SEMWAIT;
+ break;
+
+ case BFA_IOCPF_HWINIT:
+ ioc_st = BFA_IOC_HWINIT;
+ break;
+
+ case BFA_IOCPF_FWMISMATCH:
+ ioc_st = BFA_IOC_FWMISMATCH;
+ break;
+
+ case BFA_IOCPF_FAIL:
+ ioc_st = BFA_IOC_FAIL;
+ break;
+
+ case BFA_IOCPF_INITFAIL:
+ ioc_st = BFA_IOC_INITFAIL;
+ break;
+
+ default:
+ break;
+ }
+ }
+ return ioc_st;
+}
+
+void
+bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
+{
+ memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
+
+ ioc_attr->state = bfa_ioc_get_state(ioc);
+ ioc_attr->port_id = ioc->port_id;
+ ioc_attr->port_mode = ioc->port_mode;
+
+ ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
+ ioc_attr->cap_bm = ioc->ad_cap_bm;
+
+ ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
+
+ bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
+
+ ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
+ ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
+ bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
+}
+
+/**
+ * WWN public
+ */
+static u64
+bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
+{
+ return ioc->attr->pwwn;
+}
+
+mac_t
+bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
+{
+ return ioc->attr->mac;
+}
+
+/**
+ * Firmware failure detected. Start recovery actions.
+ */
+static void
+bfa_ioc_recover(struct bfa_ioc *ioc)
+{
+ pr_crit("Heart Beat of IOC has failed\n");
+ bfa_ioc_stats(ioc, ioc_hbfails);
+ bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
+ bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
+}
+
+static void
+bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
+{
+ if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
+ return;
+}
+
+/**
+ * @dg hal_iocpf_pvt BFA IOC PF private functions
+ * @{
+ */
+
+static void
+bfa_iocpf_enable(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
+}
+
+static void
+bfa_iocpf_disable(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
+}
+
+static void
+bfa_iocpf_fail(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
+}
+
+static void
+bfa_iocpf_initfail(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
+}
+
+static void
+bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
+}
+
+static void
+bfa_iocpf_stop(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
+}
+
+void
+bfa_nw_iocpf_timeout(void *ioc_arg)
+{
+ struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
+ enum bfa_iocpf_state iocpf_st;
+
+ iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
+
+ if (iocpf_st == BFA_IOCPF_HWINIT)
+ bfa_ioc_poll_fwinit(ioc);
+ else
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
+}
+
+void
+bfa_nw_iocpf_sem_timeout(void *ioc_arg)
+{
+ struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
+
+ bfa_ioc_hw_sem_get(ioc);
+}
+
+static void
+bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
+{
+ u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+ if (fwstate == BFI_IOC_DISABLED) {
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
+ return;
+ }
+
+ if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
+ bfa_nw_iocpf_timeout(ioc);
+ } else {
+ ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
+ mod_timer(&ioc->iocpf_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_POLL_TOV));
+ }
+}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
new file mode 100644
index 000000000000..ca158d1eaef3
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -0,0 +1,325 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __BFA_IOC_H__
+#define __BFA_IOC_H__
+
+#include "bfa_cs.h"
+#include "bfi.h"
+#include "cna.h"
+
+#define BFA_IOC_TOV 3000 /* msecs */
+#define BFA_IOC_HWSEM_TOV 500 /* msecs */
+#define BFA_IOC_HB_TOV 500 /* msecs */
+#define BFA_IOC_POLL_TOV 200 /* msecs */
+
+/**
+ * PCI device information required by IOC
+ */
+struct bfa_pcidev {
+ int pci_slot;
+ u8 pci_func;
+ u16 device_id;
+ u16 ssid;
+ void __iomem *pci_bar_kva;
+};
+
+/**
+ * Structure used to remember the DMA-able memory block's KVA and Physical
+ * Address
+ */
+struct bfa_dma {
+ void *kva; /* ! Kernel virtual address */
+ u64 pa; /* ! Physical address */
+};
+
+#define BFA_DMA_ALIGN_SZ 256
+
+/**
+ * smem size for Crossbow and Catapult
+ */
+#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
+#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
+
+/**
+ * @brief BFA dma address assignment macro. (big endian format)
+ */
+#define bfa_dma_be_addr_set(dma_addr, pa) \
+ __bfa_dma_be_addr_set(&dma_addr, (u64)pa)
+static inline void
+__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
+{
+ dma_addr->a32.addr_lo = (u32) htonl(pa);
+ dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa));
+}
+
+struct bfa_ioc_regs {
+ void __iomem *hfn_mbox_cmd;
+ void __iomem *hfn_mbox;
+ void __iomem *lpu_mbox_cmd;
+ void __iomem *lpu_mbox;
+ void __iomem *lpu_read_stat;
+ void __iomem *pss_ctl_reg;
+ void __iomem *pss_err_status_reg;
+ void __iomem *app_pll_fast_ctl_reg;
+ void __iomem *app_pll_slow_ctl_reg;
+ void __iomem *ioc_sem_reg;
+ void __iomem *ioc_usage_sem_reg;
+ void __iomem *ioc_init_sem_reg;
+ void __iomem *ioc_usage_reg;
+ void __iomem *host_page_num_fn;
+ void __iomem *heartbeat;
+ void __iomem *ioc_fwstate;
+ void __iomem *alt_ioc_fwstate;
+ void __iomem *ll_halt;
+ void __iomem *alt_ll_halt;
+ void __iomem *err_set;
+ void __iomem *ioc_fail_sync;
+ void __iomem *shirq_isr_next;
+ void __iomem *shirq_msk_next;
+ void __iomem *smem_page_start;
+ u32 smem_pg0;
+};
+
+/**
+ * IOC Mailbox structures
+ */
+typedef void (*bfa_mbox_cmd_cbfn_t)(void *cbarg);
+struct bfa_mbox_cmd {
+ struct list_head qe;
+ bfa_mbox_cmd_cbfn_t cbfn;
+ void *cbarg;
+ u32 msg[BFI_IOC_MSGSZ];
+};
+
+/**
+ * IOC mailbox module
+ */
+typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m);
+struct bfa_ioc_mbox_mod {
+ struct list_head cmd_q; /*!< pending mbox queue */
+ int nmclass; /*!< number of handlers */
+ struct {
+ bfa_ioc_mbox_mcfunc_t cbfn; /*!< message handlers */
+ void *cbarg;
+ } mbhdlr[BFI_MC_MAX];
+};
+
+/**
+ * IOC callback function interfaces
+ */
+typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
+typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
+typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
+typedef void (*bfa_ioc_reset_cbfn_t)(void *bfa);
+struct bfa_ioc_cbfn {
+ bfa_ioc_enable_cbfn_t enable_cbfn;
+ bfa_ioc_disable_cbfn_t disable_cbfn;
+ bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
+ bfa_ioc_reset_cbfn_t reset_cbfn;
+};
+
+/**
+ * IOC event notification mechanism.
+ */
+enum bfa_ioc_event {
+ BFA_IOC_E_ENABLED = 1,
+ BFA_IOC_E_DISABLED = 2,
+ BFA_IOC_E_FAILED = 3,
+};
+
+typedef void (*bfa_ioc_notify_cbfn_t)(void *, enum bfa_ioc_event);
+
+struct bfa_ioc_notify {
+ struct list_head qe;
+ bfa_ioc_notify_cbfn_t cbfn;
+ void *cbarg;
+};
+
+/**
+ * Initialize a IOC event notification structure
+ */
+#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \
+ (__notify)->cbfn = (__cbfn); \
+ (__notify)->cbarg = (__cbarg); \
+} while (0)
+
+struct bfa_iocpf {
+ bfa_fsm_t fsm;
+ struct bfa_ioc *ioc;
+ bool fw_mismatch_notified;
+ bool auto_recover;
+ u32 poll_time;
+};
+
+struct bfa_ioc {
+ bfa_fsm_t fsm;
+ struct bfa *bfa;
+ struct bfa_pcidev pcidev;
+ struct timer_list ioc_timer;
+ struct timer_list iocpf_timer;
+ struct timer_list sem_timer;
+ struct timer_list hb_timer;
+ u32 hb_count;
+ struct list_head notify_q;
+ void *dbg_fwsave;
+ int dbg_fwsave_len;
+ bool dbg_fwsave_once;
+ enum bfi_pcifn_class clscode;
+ struct bfa_ioc_regs ioc_regs;
+ struct bfa_ioc_drv_stats stats;
+ bool fcmode;
+ bool pllinit;
+ bool stats_busy; /*!< outstanding stats */
+ u8 port_id;
+
+ struct bfa_dma attr_dma;
+ struct bfi_ioc_attr *attr;
+ struct bfa_ioc_cbfn *cbfn;
+ struct bfa_ioc_mbox_mod mbox_mod;
+ const struct bfa_ioc_hwif *ioc_hwif;
+ struct bfa_iocpf iocpf;
+ enum bfi_asic_gen asic_gen;
+ enum bfi_asic_mode asic_mode;
+ enum bfi_port_mode port0_mode;
+ enum bfi_port_mode port1_mode;
+ enum bfa_mode port_mode;
+ u8 ad_cap_bm; /*!< adapter cap bit mask */
+ u8 port_mode_cfg; /*!< config port mode */
+};
+
+struct bfa_ioc_hwif {
+ enum bfa_status (*ioc_pll_init) (void __iomem *rb,
+ enum bfi_asic_mode m);
+ bool (*ioc_firmware_lock) (struct bfa_ioc *ioc);
+ void (*ioc_firmware_unlock) (struct bfa_ioc *ioc);
+ void (*ioc_reg_init) (struct bfa_ioc *ioc);
+ void (*ioc_map_port) (struct bfa_ioc *ioc);
+ void (*ioc_isr_mode_set) (struct bfa_ioc *ioc,
+ bool msix);
+ void (*ioc_notify_fail) (struct bfa_ioc *ioc);
+ void (*ioc_ownership_reset) (struct bfa_ioc *ioc);
+ bool (*ioc_sync_start) (struct bfa_ioc *ioc);
+ void (*ioc_sync_join) (struct bfa_ioc *ioc);
+ void (*ioc_sync_leave) (struct bfa_ioc *ioc);
+ void (*ioc_sync_ack) (struct bfa_ioc *ioc);
+ bool (*ioc_sync_complete) (struct bfa_ioc *ioc);
+ bool (*ioc_lpu_read_stat) (struct bfa_ioc *ioc);
+};
+
+#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
+#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
+#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva)
+#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
+#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen)
+#define bfa_ioc_fetch_stats(__ioc, __stats) \
+ (((__stats)->drv_stats) = (__ioc)->stats)
+#define bfa_ioc_clr_stats(__ioc) \
+ memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
+#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize)
+#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
+#define bfa_ioc_speed_sup(__ioc) \
+ BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
+#define bfa_ioc_get_nports(__ioc) \
+ BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
+
+#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
+#define bfa_ioc_stats_hb_count(_ioc, _hb_count) \
+ ((_ioc)->stats.hb_count = (_hb_count))
+#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
+#define BFA_IOC_FW_SMEM_SIZE(__ioc) \
+ ((bfa_ioc_asic_gen(__ioc) == BFI_ASIC_GEN_CB) \
+ ? BFI_SMEM_CB_SIZE : BFI_SMEM_CT_SIZE)
+#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
+#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
+#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
+
+/**
+ * IOC mailbox interface
+ */
+bool bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc,
+ struct bfa_mbox_cmd *cmd,
+ bfa_mbox_cmd_cbfn_t cbfn, void *cbarg);
+void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc);
+void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
+ bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
+
+/**
+ * IOC interfaces
+ */
+
+#define bfa_ioc_pll_init_asic(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
+ (__ioc)->asic_mode))
+
+#define bfa_ioc_isr_mode_set(__ioc, __msix) do { \
+ if ((__ioc)->ioc_hwif->ioc_isr_mode_set) \
+ ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)); \
+} while (0)
+#define bfa_ioc_ownership_reset(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
+
+#define bfa_ioc_lpu_read_stat(__ioc) do { \
+ if ((__ioc)->ioc_hwif->ioc_lpu_read_stat) \
+ ((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc)); \
+} while (0)
+
+void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc);
+void bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc);
+void bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc);
+
+void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa,
+ struct bfa_ioc_cbfn *cbfn);
+void bfa_nw_ioc_auto_recover(bool auto_recover);
+void bfa_nw_ioc_detach(struct bfa_ioc *ioc);
+void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
+ enum bfi_pcifn_class clscode);
+u32 bfa_nw_ioc_meminfo(void);
+void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa);
+void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
+void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
+
+void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
+bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc);
+void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
+void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
+ struct bfa_ioc_notify *notify);
+bool bfa_nw_ioc_sem_get(void __iomem *sem_reg);
+void bfa_nw_ioc_sem_release(void __iomem *sem_reg);
+void bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc);
+void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc,
+ struct bfi_ioc_image_hdr *fwhdr);
+bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
+ struct bfi_ioc_image_hdr *fwhdr);
+mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
+
+/*
+ * Timeout APIs
+ */
+void bfa_nw_ioc_timeout(void *ioc);
+void bfa_nw_ioc_hb_check(void *ioc);
+void bfa_nw_iocpf_timeout(void *ioc);
+void bfa_nw_iocpf_sem_timeout(void *ioc);
+
+/*
+ * F/W Image Size & Chunk
+ */
+u32 *bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off);
+u32 bfa_cb_image_get_size(enum bfi_asic_gen asic_gen);
+
+#endif /* __BFA_IOC_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
new file mode 100644
index 000000000000..348479bbfa3a
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -0,0 +1,878 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include "bfa_ioc.h"
+#include "cna.h"
+#include "bfi.h"
+#include "bfi_reg.h"
+#include "bfa_defs.h"
+
+#define bfa_ioc_ct_sync_pos(__ioc) \
+ ((u32) (1 << bfa_ioc_pcifn(__ioc)))
+#define BFA_IOC_SYNC_REQD_SH 16
+#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
+#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
+#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
+#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
+ (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
+
+/*
+ * forward declarations
+ */
+static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
+static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
+static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
+static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
+static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
+static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
+static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
+ enum bfi_asic_mode asic_mode);
+static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
+ enum bfi_asic_mode asic_mode);
+static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc);
+
+static const struct bfa_ioc_hwif nw_hwif_ct = {
+ .ioc_pll_init = bfa_ioc_ct_pll_init,
+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
+ .ioc_reg_init = bfa_ioc_ct_reg_init,
+ .ioc_map_port = bfa_ioc_ct_map_port,
+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
+ .ioc_sync_start = bfa_ioc_ct_sync_start,
+ .ioc_sync_join = bfa_ioc_ct_sync_join,
+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
+ .ioc_sync_complete = bfa_ioc_ct_sync_complete,
+};
+
+static const struct bfa_ioc_hwif nw_hwif_ct2 = {
+ .ioc_pll_init = bfa_ioc_ct2_pll_init,
+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
+ .ioc_reg_init = bfa_ioc_ct2_reg_init,
+ .ioc_map_port = bfa_ioc_ct2_map_port,
+ .ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat,
+ .ioc_isr_mode_set = NULL,
+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
+ .ioc_sync_start = bfa_ioc_ct_sync_start,
+ .ioc_sync_join = bfa_ioc_ct_sync_join,
+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
+ .ioc_sync_complete = bfa_ioc_ct_sync_complete,
+};
+
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+void
+bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
+{
+ ioc->ioc_hwif = &nw_hwif_ct;
+}
+
+void
+bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc)
+{
+ ioc->ioc_hwif = &nw_hwif_ct2;
+}
+
+/**
+ * Return true if firmware of current driver matches the running firmware.
+ */
+static bool
+bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
+{
+ enum bfi_ioc_state ioc_fwstate;
+ u32 usecnt;
+ struct bfi_ioc_image_hdr fwhdr;
+
+ /**
+ * If bios boot (flash based) -- do not increment usage count
+ */
+ if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
+ BFA_IOC_FWIMG_MINSZ)
+ return true;
+
+ bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
+
+ /**
+ * If usage count is 0, always return TRUE.
+ */
+ if (usecnt == 0) {
+ writel(1, ioc->ioc_regs.ioc_usage_reg);
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ writel(0, ioc->ioc_regs.ioc_fail_sync);
+ return true;
+ }
+
+ ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+ /**
+ * Use count cannot be non-zero and chip in uninitialized state.
+ */
+ BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
+
+ /**
+ * Check if another driver with a different firmware is active
+ */
+ bfa_nw_ioc_fwver_get(ioc, &fwhdr);
+ if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ return false;
+ }
+
+ /**
+ * Same firmware version. Increment the reference count.
+ */
+ usecnt++;
+ writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ return true;
+}
+
+static void
+bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
+{
+ u32 usecnt;
+
+ /**
+ * If bios boot (flash based) -- do not decrement usage count
+ */
+ if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
+ BFA_IOC_FWIMG_MINSZ)
+ return;
+
+ /**
+ * decrement usage count
+ */
+ bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
+ BUG_ON(!(usecnt > 0));
+
+ usecnt--;
+ writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
+
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+}
+
+/**
+ * Notify other functions on HB failure.
+ */
+static void
+bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
+{
+ writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
+ writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
+ /* Wait for halt to take effect */
+ readl(ioc->ioc_regs.ll_halt);
+ readl(ioc->ioc_regs.alt_ll_halt);
+}
+
+/**
+ * Host to LPU mailbox message addresses
+ */
+static const struct {
+ u32 hfn_mbox;
+ u32 lpu_mbox;
+ u32 hfn_pgn;
+} ct_fnreg[] = {
+ { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
+ { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
+ { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
+ { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
+};
+
+/**
+ * Host <-> LPU mailbox command/status registers - port 0
+ */
+static const struct {
+ u32 hfn;
+ u32 lpu;
+} ct_p0reg[] = {
+ { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
+ { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
+ { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
+ { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
+};
+
+/**
+ * Host <-> LPU mailbox command/status registers - port 1
+ */
+static const struct {
+ u32 hfn;
+ u32 lpu;
+} ct_p1reg[] = {
+ { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
+ { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
+ { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
+ { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
+};
+
+static const struct {
+ u32 hfn_mbox;
+ u32 lpu_mbox;
+ u32 hfn_pgn;
+ u32 hfn;
+ u32 lpu;
+ u32 lpu_read;
+} ct2_reg[] = {
+ { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
+ CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
+ CT2_HOSTFN_LPU0_READ_STAT},
+ { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
+ CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
+ CT2_HOSTFN_LPU1_READ_STAT},
+};
+
+static void
+bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
+{
+ void __iomem *rb;
+ int pcifn = bfa_ioc_pcifn(ioc);
+
+ rb = bfa_ioc_bar0(ioc);
+
+ ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
+ ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
+ ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
+
+ if (ioc->port_id == 0) {
+ ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
+ ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+ ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
+ ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+ ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
+ } else {
+ ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG;
+ ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG;
+ ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+ ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+ ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
+ }
+
+ /*
+ * PSS control registers
+ */
+ ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
+ ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
+ ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG;
+ ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG;
+
+ /*
+ * IOC semaphore registers and serialization
+ */
+ ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG;
+ ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG;
+ ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG;
+ ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT;
+ ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC;
+
+ /**
+ * sram memory access
+ */
+ ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
+ ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
+
+ /*
+ * err set reg : for notification of hb failure in fcmode
+ */
+ ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
+}
+
+static void
+bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc)
+{
+ void __iomem *rb;
+ int port = bfa_ioc_portid(ioc);
+
+ rb = bfa_ioc_bar0(ioc);
+
+ ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
+ ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
+ ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
+ ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
+ ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
+
+ if (port == 0) {
+ ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
+ ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
+ ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+ ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
+ } else {
+ ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG;
+ ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
+ ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+ ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
+ }
+
+ /*
+ * PSS control registers
+ */
+ ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
+ ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
+ ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG;
+ ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG;
+
+ /*
+ * IOC semaphore registers and serialization
+ */
+ ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG;
+ ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG;
+ ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG;
+ ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT;
+ ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC;
+
+ /**
+ * sram memory access
+ */
+ ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
+ ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
+
+ /*
+ * err set reg : for notification of hb failure in fcmode
+ */
+ ioc->ioc_regs.err_set = rb + ERR_SET_REG;
+}
+
+/**
+ * Initialize IOC to port mapping.
+ */
+
+#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
+static void
+bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
+{
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
+ u32 r32;
+
+ /**
+ * For catapult, base port id on personality register and IOC type
+ */
+ r32 = readl(rb + FNC_PERS_REG);
+ r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
+ ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
+
+}
+
+static void
+bfa_ioc_ct2_map_port(struct bfa_ioc *ioc)
+{
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
+ u32 r32;
+
+ r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
+ ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
+}
+
+/**
+ * Set interrupt mode for a function: INTX or MSIX
+ */
+static void
+bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
+{
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
+ u32 r32, mode;
+
+ r32 = readl(rb + FNC_PERS_REG);
+
+ mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
+ __F0_INTX_STATUS;
+
+ /**
+ * If already in desired mode, do not change anything
+ */
+ if ((!msix && mode) || (msix && !mode))
+ return;
+
+ if (msix)
+ mode = __F0_INTX_STATUS_MSIX;
+ else
+ mode = __F0_INTX_STATUS_INTA;
+
+ r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
+ r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
+
+ writel(r32, rb + FNC_PERS_REG);
+}
+
+static bool
+bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc)
+{
+ u32 r32;
+
+ r32 = readl(ioc->ioc_regs.lpu_read_stat);
+ if (r32) {
+ writel(1, ioc->ioc_regs.lpu_read_stat);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * MSI-X resource allocation for 1860 with no asic block
+ */
+#define HOSTFN_MSIX_DEFAULT 64
+#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
+#define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
+#define __MSIX_VT_NUMVT__MK 0x003ff800
+#define __MSIX_VT_NUMVT__SH 11
+#define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
+#define __MSIX_VT_OFST_ 0x000007ff
+void
+bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc)
+{
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
+ u32 r32;
+
+ r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
+ if (r32 & __MSIX_VT_NUMVT__MK) {
+ writel(r32 & __MSIX_VT_OFST_,
+ rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
+ return;
+ }
+
+ writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
+ HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
+ rb + HOSTFN_MSIX_VT_OFST_NUMVT);
+ writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
+ rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
+}
+
+/**
+ * Cleanup hw semaphore and usecnt registers
+ */
+static void
+bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
+{
+ bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ writel(0, ioc->ioc_regs.ioc_usage_reg);
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+
+ /*
+ * Read the hw sem reg to make sure that it is locked
+ * before we clear it. If it is not locked, writing 1
+ * will lock it instead of clearing it.
+ */
+ readl(ioc->ioc_regs.ioc_sem_reg);
+ bfa_nw_ioc_hw_sem_release(ioc);
+}
+
+/**
+ * Synchronized IOC failure processing routines
+ */
+static bool
+bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+
+ /*
+ * Driver load time. If the sync required bit for this PCI fn
+ * is set, it is due to an unclean exit by the driver for this
+ * PCI fn in the previous incarnation. Whoever comes here first
+ * should clean it up, no matter which PCI fn.
+ */
+
+ if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
+ writel(0, ioc->ioc_regs.ioc_fail_sync);
+ writel(1, ioc->ioc_regs.ioc_usage_reg);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+ return true;
+ }
+
+ return bfa_ioc_ct_sync_complete(ioc);
+}
+/**
+ * Synchronized IOC failure processing routines
+ */
+static void
+bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
+
+ writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static void
+bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
+ bfa_ioc_ct_sync_pos(ioc);
+
+ writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static void
+bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+
+ writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static bool
+bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+ u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
+ u32 tmp_ackd;
+
+ if (sync_ackd == 0)
+ return true;
+
+ /**
+ * The check below is to see whether any other PCI fn
+ * has reinitialized the ASIC (reset sync_ackd bits)
+ * and failed again while this IOC was waiting for hw
+ * semaphore (in bfa_iocpf_sm_semwait()).
+ */
+ tmp_ackd = sync_ackd;
+ if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
+ !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
+ sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
+
+ if (sync_reqd == sync_ackd) {
+ writel(bfa_ioc_ct_clear_sync_ackd(r32),
+ ioc->ioc_regs.ioc_fail_sync);
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
+ return true;
+ }
+
+ /**
+ * If another PCI fn reinitialized and failed again while
+ * this IOC was waiting for hw sem, the sync_ackd bit for
+ * this IOC need to be set again to allow reinitialization.
+ */
+ if (tmp_ackd != sync_ackd)
+ writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
+
+ return false;
+}
+
+static enum bfa_status
+bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
+{
+ u32 pll_sclk, pll_fclk, r32;
+ bool fcmode = (asic_mode == BFI_ASIC_MODE_FC);
+
+ pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
+ __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
+ __APP_PLL_SCLK_JITLMT0_1(3U) |
+ __APP_PLL_SCLK_CNTLMT0_1(1U);
+ pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
+ __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
+ __APP_PLL_LCLK_JITLMT0_1(3U) |
+ __APP_PLL_LCLK_CNTLMT0_1(1U);
+
+ if (fcmode) {
+ writel(0, (rb + OP_MODE));
+ writel(__APP_EMS_CMLCKSEL |
+ __APP_EMS_REFCKBUFEN2 |
+ __APP_EMS_CHANNEL_SEL,
+ (rb + ETH_MAC_SER_REG));
+ } else {
+ writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
+ writel(__APP_EMS_REFCKBUFEN1,
+ (rb + ETH_MAC_SER_REG));
+ }
+ writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
+ writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+ writel(pll_sclk |
+ __APP_PLL_SCLK_LOGIC_SOFT_RESET,
+ rb + APP_PLL_SCLK_CTL_REG);
+ writel(pll_fclk |
+ __APP_PLL_LCLK_LOGIC_SOFT_RESET,
+ rb + APP_PLL_LCLK_CTL_REG);
+ writel(pll_sclk |
+ __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE,
+ rb + APP_PLL_SCLK_CTL_REG);
+ writel(pll_fclk |
+ __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE,
+ rb + APP_PLL_LCLK_CTL_REG);
+ readl(rb + HOSTFN0_INT_MSK);
+ udelay(2000);
+ writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+ writel(pll_sclk |
+ __APP_PLL_SCLK_ENABLE,
+ rb + APP_PLL_SCLK_CTL_REG);
+ writel(pll_fclk |
+ __APP_PLL_LCLK_ENABLE,
+ rb + APP_PLL_LCLK_CTL_REG);
+
+ if (!fcmode) {
+ writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
+ writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
+ }
+ r32 = readl((rb + PSS_CTL_REG));
+ r32 &= ~__PSS_LMEM_RESET;
+ writel(r32, (rb + PSS_CTL_REG));
+ udelay(1000);
+ if (!fcmode) {
+ writel(0, (rb + PMM_1T_RESET_REG_P0));
+ writel(0, (rb + PMM_1T_RESET_REG_P1));
+ }
+
+ writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
+ udelay(1000);
+ r32 = readl((rb + MBIST_STAT_REG));
+ writel(0, (rb + MBIST_CTL_REG));
+ return BFA_STATUS_OK;
+}
+
+static void
+bfa_ioc_ct2_sclk_init(void __iomem *rb)
+{
+ u32 r32;
+
+ /*
+ * put s_clk PLL and PLL FSM in reset
+ */
+ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
+ r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
+ __APP_PLL_SCLK_LOGIC_SOFT_RESET);
+ writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+ /*
+ * Ignore mode and program for the max clock (which is FC16)
+ * Firmware/NFC will do the PLL init appropiately
+ */
+ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
+ writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+ /*
+ * while doing PLL init dont clock gate ethernet subsystem
+ */
+ r32 = readl((rb + CT2_CHIP_MISC_PRG));
+ writel((r32 | __ETH_CLK_ENABLE_PORT0),
+ (rb + CT2_CHIP_MISC_PRG));
+
+ r32 = readl((rb + CT2_PCIE_MISC_REG));
+ writel((r32 | __ETH_CLK_ENABLE_PORT1),
+ (rb + CT2_PCIE_MISC_REG));
+
+ /*
+ * set sclk value
+ */
+ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
+ __APP_PLL_SCLK_CLK_DIV2);
+ writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+ /*
+ * poll for s_clk lock or delay 1ms
+ */
+ udelay(1000);
+
+ /*
+ * Dont do clock gating for ethernet subsystem, firmware/NFC will
+ * do this appropriately
+ */
+}
+
+static void
+bfa_ioc_ct2_lclk_init(void __iomem *rb)
+{
+ u32 r32;
+
+ /*
+ * put l_clk PLL and PLL FSM in reset
+ */
+ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
+ r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
+ __APP_PLL_LCLK_LOGIC_SOFT_RESET);
+ writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+ /*
+ * set LPU speed (set for FC16 which will work for other modes)
+ */
+ r32 = readl((rb + CT2_CHIP_MISC_PRG));
+ writel(r32, (rb + CT2_CHIP_MISC_PRG));
+
+ /*
+ * set LPU half speed (set for FC16 which will work for other modes)
+ */
+ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+ /*
+ * set lclk for mode (set for FC16)
+ */
+ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
+ r32 |= 0x20c1731b;
+ writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+ /*
+ * poll for s_clk lock or delay 1ms
+ */
+ udelay(1000);
+}
+
+static void
+bfa_ioc_ct2_mem_init(void __iomem *rb)
+{
+ u32 r32;
+
+ r32 = readl((rb + PSS_CTL_REG));
+ r32 &= ~__PSS_LMEM_RESET;
+ writel(r32, (rb + PSS_CTL_REG));
+ udelay(1000);
+
+ writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
+ udelay(1000);
+ writel(0, (rb + CT2_MBIST_CTL_REG));
+}
+
+static void
+bfa_ioc_ct2_mac_reset(void __iomem *rb)
+{
+ volatile u32 r32;
+
+ bfa_ioc_ct2_sclk_init(rb);
+ bfa_ioc_ct2_lclk_init(rb);
+
+ /*
+ * release soft reset on s_clk & l_clk
+ */
+ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
+ (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+ /*
+ * release soft reset on s_clk & l_clk
+ */
+ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ writel((r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET),
+ (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+ /* put port0, port1 MAC & AHB in reset */
+ writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
+ (rb + CT2_CSI_MAC_CONTROL_REG(0)));
+ writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
+ (rb + CT2_CSI_MAC_CONTROL_REG(1)));
+}
+
+#define CT2_NFC_MAX_DELAY 1000
+static enum bfa_status
+bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
+{
+ volatile u32 wgn, r32;
+ int i;
+
+ /*
+ * Initialize PLL if not already done by NFC
+ */
+ wgn = readl(rb + CT2_WGN_STATUS);
+ if (!(wgn & __GLBL_PF_VF_CFG_RDY)) {
+ writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
+ for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
+ r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+ if (r32 & __NFC_CONTROLLER_HALTED)
+ break;
+ udelay(1000);
+ }
+ }
+
+ /*
+ * Mask the interrupts and clear any
+ * pending interrupts left by BIOS/EFI
+ */
+
+ writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
+ writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
+
+ r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+ if (r32 == 1) {
+ writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
+ readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+ }
+ r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+ if (r32 == 1) {
+ writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
+ readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+ }
+
+ bfa_ioc_ct2_mac_reset(rb);
+ bfa_ioc_ct2_sclk_init(rb);
+ bfa_ioc_ct2_lclk_init(rb);
+
+ /*
+ * release soft reset on s_clk & l_clk
+ */
+ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
+ (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+ /*
+ * release soft reset on s_clk & l_clk
+ */
+ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
+ (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+ /*
+ * Announce flash device presence, if flash was corrupted.
+ */
+ if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
+ r32 = readl((rb + PSS_GPIO_OUT_REG));
+ writel((r32 & ~1), (rb + PSS_GPIO_OUT_REG));
+ r32 = readl((rb + PSS_GPIO_OE_REG));
+ writel((r32 | 1), (rb + PSS_GPIO_OE_REG));
+ }
+
+ bfa_ioc_ct2_mem_init(rb);
+
+ writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
+ writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
+ return BFA_STATUS_OK;
+}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
new file mode 100644
index 000000000000..dd36427f4752
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
@@ -0,0 +1,669 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ * @file bfa_msgq.c MSGQ module source file.
+ */
+
+#include "bfi.h"
+#include "bfa_msgq.h"
+#include "bfa_ioc.h"
+
+#define call_cmdq_ent_cbfn(_cmdq_ent, _status) \
+{ \
+ bfa_msgq_cmdcbfn_t cbfn; \
+ void *cbarg; \
+ cbfn = (_cmdq_ent)->cbfn; \
+ cbarg = (_cmdq_ent)->cbarg; \
+ (_cmdq_ent)->cbfn = NULL; \
+ (_cmdq_ent)->cbarg = NULL; \
+ if (cbfn) { \
+ cbfn(cbarg, (_status)); \
+ } \
+}
+
+static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq);
+static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq);
+
+enum cmdq_event {
+ CMDQ_E_START = 1,
+ CMDQ_E_STOP = 2,
+ CMDQ_E_FAIL = 3,
+ CMDQ_E_POST = 4,
+ CMDQ_E_INIT_RESP = 5,
+ CMDQ_E_DB_READY = 6,
+};
+
+bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event);
+bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event);
+bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event);
+bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq,
+ enum cmdq_event);
+
+static void
+cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq)
+{
+ struct bfa_msgq_cmd_entry *cmdq_ent;
+
+ cmdq->producer_index = 0;
+ cmdq->consumer_index = 0;
+ cmdq->flags = 0;
+ cmdq->token = 0;
+ cmdq->offset = 0;
+ cmdq->bytes_to_copy = 0;
+ while (!list_empty(&cmdq->pending_q)) {
+ bfa_q_deq(&cmdq->pending_q, &cmdq_ent);
+ bfa_q_qe_init(&cmdq_ent->qe);
+ call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED);
+ }
+}
+
+static void
+cmdq_sm_stopped(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
+{
+ switch (event) {
+ case CMDQ_E_START:
+ bfa_fsm_set_state(cmdq, cmdq_sm_init_wait);
+ break;
+
+ case CMDQ_E_STOP:
+ case CMDQ_E_FAIL:
+ /* No-op */
+ break;
+
+ case CMDQ_E_POST:
+ cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq *cmdq)
+{
+ bfa_wc_down(&cmdq->msgq->init_wc);
+}
+
+static void
+cmdq_sm_init_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
+{
+ switch (event) {
+ case CMDQ_E_STOP:
+ case CMDQ_E_FAIL:
+ bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
+ break;
+
+ case CMDQ_E_POST:
+ cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
+ break;
+
+ case CMDQ_E_INIT_RESP:
+ if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
+ cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
+ bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
+ } else
+ bfa_fsm_set_state(cmdq, cmdq_sm_ready);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+cmdq_sm_ready_entry(struct bfa_msgq_cmdq *cmdq)
+{
+}
+
+static void
+cmdq_sm_ready(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
+{
+ switch (event) {
+ case CMDQ_E_STOP:
+ case CMDQ_E_FAIL:
+ bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
+ break;
+
+ case CMDQ_E_POST:
+ bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq *cmdq)
+{
+ bfa_msgq_cmdq_dbell(cmdq);
+}
+
+static void
+cmdq_sm_dbell_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
+{
+ switch (event) {
+ case CMDQ_E_STOP:
+ case CMDQ_E_FAIL:
+ bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
+ break;
+
+ case CMDQ_E_POST:
+ cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
+ break;
+
+ case CMDQ_E_DB_READY:
+ if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
+ cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
+ bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
+ } else
+ bfa_fsm_set_state(cmdq, cmdq_sm_ready);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bfa_msgq_cmdq_dbell_ready(void *arg)
+{
+ struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
+ bfa_fsm_send_event(cmdq, CMDQ_E_DB_READY);
+}
+
+static void
+bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq)
+{
+ struct bfi_msgq_h2i_db *dbell =
+ (struct bfi_msgq_h2i_db *)(&cmdq->dbell_mb.msg[0]);
+
+ memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
+ bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_PI, 0);
+ dbell->mh.mtag.i2htok = 0;
+ dbell->idx.cmdq_pi = htons(cmdq->producer_index);
+
+ if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb,
+ bfa_msgq_cmdq_dbell_ready, cmdq)) {
+ bfa_msgq_cmdq_dbell_ready(cmdq);
+ }
+}
+
+static void
+__cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
+{
+ size_t len = cmd->msg_size;
+ int num_entries = 0;
+ size_t to_copy;
+ u8 *src, *dst;
+
+ src = (u8 *)cmd->msg_hdr;
+ dst = (u8 *)cmdq->addr.kva;
+ dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
+
+ while (len) {
+ to_copy = (len < BFI_MSGQ_CMD_ENTRY_SIZE) ?
+ len : BFI_MSGQ_CMD_ENTRY_SIZE;
+ memcpy(dst, src, to_copy);
+ len -= to_copy;
+ src += BFI_MSGQ_CMD_ENTRY_SIZE;
+ BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth);
+ dst = (u8 *)cmdq->addr.kva;
+ dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
+ num_entries++;
+ }
+
+}
+
+static void
+bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
+{
+ struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
+ struct bfa_msgq_cmd_entry *cmd;
+ int posted = 0;
+
+ cmdq->consumer_index = ntohs(dbell->idx.cmdq_ci);
+
+ /* Walk through pending list to see if the command can be posted */
+ while (!list_empty(&cmdq->pending_q)) {
+ cmd =
+ (struct bfa_msgq_cmd_entry *)bfa_q_first(&cmdq->pending_q);
+ if (ntohs(cmd->msg_hdr->num_entries) <=
+ BFA_MSGQ_FREE_CNT(cmdq)) {
+ list_del(&cmd->qe);
+ __cmd_copy(cmdq, cmd);
+ posted = 1;
+ call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
+ } else {
+ break;
+ }
+ }
+
+ if (posted)
+ bfa_fsm_send_event(cmdq, CMDQ_E_POST);
+}
+
+static void
+bfa_msgq_cmdq_copy_next(void *arg)
+{
+ struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
+
+ if (cmdq->bytes_to_copy)
+ bfa_msgq_cmdq_copy_rsp(cmdq);
+}
+
+static void
+bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
+{
+ struct bfi_msgq_i2h_cmdq_copy_req *req =
+ (struct bfi_msgq_i2h_cmdq_copy_req *)mb;
+
+ cmdq->token = 0;
+ cmdq->offset = ntohs(req->offset);
+ cmdq->bytes_to_copy = ntohs(req->len);
+ bfa_msgq_cmdq_copy_rsp(cmdq);
+}
+
+static void
+bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq)
+{
+ struct bfi_msgq_h2i_cmdq_copy_rsp *rsp =
+ (struct bfi_msgq_h2i_cmdq_copy_rsp *)&cmdq->copy_mb.msg[0];
+ int copied;
+ u8 *addr = (u8 *)cmdq->addr.kva;
+
+ memset(rsp, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp));
+ bfi_h2i_set(rsp->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_CMDQ_COPY_RSP, 0);
+ rsp->mh.mtag.i2htok = htons(cmdq->token);
+ copied = (cmdq->bytes_to_copy >= BFI_CMD_COPY_SZ) ? BFI_CMD_COPY_SZ :
+ cmdq->bytes_to_copy;
+ addr += cmdq->offset;
+ memcpy(rsp->data, addr, copied);
+
+ cmdq->token++;
+ cmdq->offset += copied;
+ cmdq->bytes_to_copy -= copied;
+
+ if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb,
+ bfa_msgq_cmdq_copy_next, cmdq)) {
+ bfa_msgq_cmdq_copy_next(cmdq);
+ }
+}
+
+static void
+bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq *msgq)
+{
+ cmdq->depth = BFA_MSGQ_CMDQ_NUM_ENTRY;
+ INIT_LIST_HEAD(&cmdq->pending_q);
+ cmdq->msgq = msgq;
+ bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
+}
+
+static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq);
+
+enum rspq_event {
+ RSPQ_E_START = 1,
+ RSPQ_E_STOP = 2,
+ RSPQ_E_FAIL = 3,
+ RSPQ_E_RESP = 4,
+ RSPQ_E_INIT_RESP = 5,
+ RSPQ_E_DB_READY = 6,
+};
+
+bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event);
+bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq,
+ enum rspq_event);
+bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event);
+bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq,
+ enum rspq_event);
+
+static void
+rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq)
+{
+ rspq->producer_index = 0;
+ rspq->consumer_index = 0;
+ rspq->flags = 0;
+}
+
+static void
+rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enum rspq_event event)
+{
+ switch (event) {
+ case RSPQ_E_START:
+ bfa_fsm_set_state(rspq, rspq_sm_init_wait);
+ break;
+
+ case RSPQ_E_STOP:
+ case RSPQ_E_FAIL:
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq)
+{
+ bfa_wc_down(&rspq->msgq->init_wc);
+}
+
+static void
+rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
+{
+ switch (event) {
+ case RSPQ_E_FAIL:
+ case RSPQ_E_STOP:
+ bfa_fsm_set_state(rspq, rspq_sm_stopped);
+ break;
+
+ case RSPQ_E_INIT_RESP:
+ bfa_fsm_set_state(rspq, rspq_sm_ready);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq)
+{
+}
+
+static void
+rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event)
+{
+ switch (event) {
+ case RSPQ_E_STOP:
+ case RSPQ_E_FAIL:
+ bfa_fsm_set_state(rspq, rspq_sm_stopped);
+ break;
+
+ case RSPQ_E_RESP:
+ bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq)
+{
+ if (!bfa_nw_ioc_is_disabled(rspq->msgq->ioc))
+ bfa_msgq_rspq_dbell(rspq);
+}
+
+static void
+rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
+{
+ switch (event) {
+ case RSPQ_E_STOP:
+ case RSPQ_E_FAIL:
+ bfa_fsm_set_state(rspq, rspq_sm_stopped);
+ break;
+
+ case RSPQ_E_RESP:
+ rspq->flags |= BFA_MSGQ_RSPQ_F_DB_UPDATE;
+ break;
+
+ case RSPQ_E_DB_READY:
+ if (rspq->flags & BFA_MSGQ_RSPQ_F_DB_UPDATE) {
+ rspq->flags &= ~BFA_MSGQ_RSPQ_F_DB_UPDATE;
+ bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
+ } else
+ bfa_fsm_set_state(rspq, rspq_sm_ready);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bfa_msgq_rspq_dbell_ready(void *arg)
+{
+ struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg;
+ bfa_fsm_send_event(rspq, RSPQ_E_DB_READY);
+}
+
+static void
+bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq)
+{
+ struct bfi_msgq_h2i_db *dbell =
+ (struct bfi_msgq_h2i_db *)(&rspq->dbell_mb.msg[0]);
+
+ memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
+ bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_CI, 0);
+ dbell->mh.mtag.i2htok = 0;
+ dbell->idx.rspq_ci = htons(rspq->consumer_index);
+
+ if (!bfa_nw_ioc_mbox_queue(rspq->msgq->ioc, &rspq->dbell_mb,
+ bfa_msgq_rspq_dbell_ready, rspq)) {
+ bfa_msgq_rspq_dbell_ready(rspq);
+ }
+}
+
+static void
+bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb)
+{
+ struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
+ struct bfi_msgq_mhdr *msghdr;
+ int num_entries;
+ int mc;
+ u8 *rspq_qe;
+
+ rspq->producer_index = ntohs(dbell->idx.rspq_pi);
+
+ while (rspq->consumer_index != rspq->producer_index) {
+ rspq_qe = (u8 *)rspq->addr.kva;
+ rspq_qe += (rspq->consumer_index * BFI_MSGQ_RSP_ENTRY_SIZE);
+ msghdr = (struct bfi_msgq_mhdr *)rspq_qe;
+
+ mc = msghdr->msg_class;
+ num_entries = ntohs(msghdr->num_entries);
+
+ if ((mc >= BFI_MC_MAX) || (rspq->rsphdlr[mc].cbfn == NULL))
+ break;
+
+ (rspq->rsphdlr[mc].cbfn)(rspq->rsphdlr[mc].cbarg, msghdr);
+
+ BFA_MSGQ_INDX_ADD(rspq->consumer_index, num_entries,
+ rspq->depth);
+ }
+
+ bfa_fsm_send_event(rspq, RSPQ_E_RESP);
+}
+
+static void
+bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq)
+{
+ rspq->depth = BFA_MSGQ_RSPQ_NUM_ENTRY;
+ rspq->msgq = msgq;
+ bfa_fsm_set_state(rspq, rspq_sm_stopped);
+}
+
+static void
+bfa_msgq_init_rsp(struct bfa_msgq *msgq,
+ struct bfi_mbmsg *mb)
+{
+ bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_INIT_RESP);
+ bfa_fsm_send_event(&msgq->rspq, RSPQ_E_INIT_RESP);
+}
+
+static void
+bfa_msgq_init(void *arg)
+{
+ struct bfa_msgq *msgq = (struct bfa_msgq *)arg;
+ struct bfi_msgq_cfg_req *msgq_cfg =
+ (struct bfi_msgq_cfg_req *)&msgq->init_mb.msg[0];
+
+ memset(msgq_cfg, 0, sizeof(struct bfi_msgq_cfg_req));
+ bfi_h2i_set(msgq_cfg->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_INIT_REQ, 0);
+ msgq_cfg->mh.mtag.i2htok = 0;
+
+ bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa);
+ msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth);
+ bfa_dma_be_addr_set(msgq_cfg->rspq.addr, msgq->rspq.addr.pa);
+ msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth);
+
+ bfa_nw_ioc_mbox_queue(msgq->ioc, &msgq->init_mb, NULL, NULL);
+}
+
+static void
+bfa_msgq_isr(void *cbarg, struct bfi_mbmsg *msg)
+{
+ struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
+
+ switch (msg->mh.msg_id) {
+ case BFI_MSGQ_I2H_INIT_RSP:
+ bfa_msgq_init_rsp(msgq, msg);
+ break;
+
+ case BFI_MSGQ_I2H_DOORBELL_PI:
+ bfa_msgq_rspq_pi_update(&msgq->rspq, msg);
+ break;
+
+ case BFI_MSGQ_I2H_DOORBELL_CI:
+ bfa_msgq_cmdq_ci_update(&msgq->cmdq, msg);
+ break;
+
+ case BFI_MSGQ_I2H_CMDQ_COPY_REQ:
+ bfa_msgq_cmdq_copy_req(&msgq->cmdq, msg);
+ break;
+
+ default:
+ BUG_ON(1);
+ }
+}
+
+static void
+bfa_msgq_notify(void *cbarg, enum bfa_ioc_event event)
+{
+ struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
+
+ switch (event) {
+ case BFA_IOC_E_ENABLED:
+ bfa_wc_init(&msgq->init_wc, bfa_msgq_init, msgq);
+ bfa_wc_up(&msgq->init_wc);
+ bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_START);
+ bfa_wc_up(&msgq->init_wc);
+ bfa_fsm_send_event(&msgq->rspq, RSPQ_E_START);
+ bfa_wc_wait(&msgq->init_wc);
+ break;
+
+ case BFA_IOC_E_DISABLED:
+ bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_STOP);
+ bfa_fsm_send_event(&msgq->rspq, RSPQ_E_STOP);
+ break;
+
+ case BFA_IOC_E_FAILED:
+ bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_FAIL);
+ bfa_fsm_send_event(&msgq->rspq, RSPQ_E_FAIL);
+ break;
+
+ default:
+ break;
+ }
+}
+
+u32
+bfa_msgq_meminfo(void)
+{
+ return roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ) +
+ roundup(BFA_MSGQ_RSPQ_SIZE, BFA_DMA_ALIGN_SZ);
+}
+
+void
+bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa)
+{
+ msgq->cmdq.addr.kva = kva;
+ msgq->cmdq.addr.pa = pa;
+
+ kva += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
+ pa += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
+
+ msgq->rspq.addr.kva = kva;
+ msgq->rspq.addr.pa = pa;
+}
+
+void
+bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc)
+{
+ msgq->ioc = ioc;
+
+ bfa_msgq_cmdq_attach(&msgq->cmdq, msgq);
+ bfa_msgq_rspq_attach(&msgq->rspq, msgq);
+
+ bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq);
+ bfa_q_qe_init(&msgq->ioc_notify);
+ bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq);
+ bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify);
+}
+
+void
+bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc,
+ bfa_msgq_mcfunc_t cbfn, void *cbarg)
+{
+ msgq->rspq.rsphdlr[mc].cbfn = cbfn;
+ msgq->rspq.rsphdlr[mc].cbarg = cbarg;
+}
+
+void
+bfa_msgq_cmd_post(struct bfa_msgq *msgq, struct bfa_msgq_cmd_entry *cmd)
+{
+ if (ntohs(cmd->msg_hdr->num_entries) <=
+ BFA_MSGQ_FREE_CNT(&msgq->cmdq)) {
+ __cmd_copy(&msgq->cmdq, cmd);
+ call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
+ bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_POST);
+ } else {
+ list_add_tail(&cmd->qe, &msgq->cmdq.pending_q);
+ }
+}
+
+void
+bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len)
+{
+ struct bfa_msgq_rspq *rspq = &msgq->rspq;
+ size_t len = buf_len;
+ size_t to_copy;
+ int ci;
+ u8 *src, *dst;
+
+ ci = rspq->consumer_index;
+ src = (u8 *)rspq->addr.kva;
+ src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
+ dst = buf;
+
+ while (len) {
+ to_copy = (len < BFI_MSGQ_RSP_ENTRY_SIZE) ?
+ len : BFI_MSGQ_RSP_ENTRY_SIZE;
+ memcpy(dst, src, to_copy);
+ len -= to_copy;
+ dst += BFI_MSGQ_RSP_ENTRY_SIZE;
+ BFA_MSGQ_INDX_ADD(ci, 1, rspq->depth);
+ src = (u8 *)rspq->addr.kva;
+ src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
+ }
+}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.h b/drivers/net/ethernet/brocade/bna/bfa_msgq.h
new file mode 100644
index 000000000000..a6a565a366dc
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.h
@@ -0,0 +1,130 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __BFA_MSGQ_H__
+#define __BFA_MSGQ_H__
+
+#include "bfa_defs.h"
+#include "bfi.h"
+#include "bfa_ioc.h"
+#include "bfa_cs.h"
+
+#define BFA_MSGQ_FREE_CNT(_q) \
+ (((_q)->consumer_index - (_q)->producer_index - 1) & ((_q)->depth - 1))
+
+#define BFA_MSGQ_INDX_ADD(_q_indx, _qe_num, _q_depth) \
+ ((_q_indx) = (((_q_indx) + (_qe_num)) & ((_q_depth) - 1)))
+
+#define BFA_MSGQ_CMDQ_NUM_ENTRY 128
+#define BFA_MSGQ_CMDQ_SIZE \
+ (BFI_MSGQ_CMD_ENTRY_SIZE * BFA_MSGQ_CMDQ_NUM_ENTRY)
+
+#define BFA_MSGQ_RSPQ_NUM_ENTRY 128
+#define BFA_MSGQ_RSPQ_SIZE \
+ (BFI_MSGQ_RSP_ENTRY_SIZE * BFA_MSGQ_RSPQ_NUM_ENTRY)
+
+#define bfa_msgq_cmd_set(_cmd, _cbfn, _cbarg, _msg_size, _msg_hdr) \
+do { \
+ (_cmd)->cbfn = (_cbfn); \
+ (_cmd)->cbarg = (_cbarg); \
+ (_cmd)->msg_size = (_msg_size); \
+ (_cmd)->msg_hdr = (_msg_hdr); \
+} while (0)
+
+struct bfa_msgq;
+
+typedef void (*bfa_msgq_cmdcbfn_t)(void *cbarg, enum bfa_status status);
+
+struct bfa_msgq_cmd_entry {
+ struct list_head qe;
+ bfa_msgq_cmdcbfn_t cbfn;
+ void *cbarg;
+ size_t msg_size;
+ struct bfi_msgq_mhdr *msg_hdr;
+};
+
+enum bfa_msgq_cmdq_flags {
+ BFA_MSGQ_CMDQ_F_DB_UPDATE = 1,
+};
+
+struct bfa_msgq_cmdq {
+ bfa_fsm_t fsm;
+ enum bfa_msgq_cmdq_flags flags;
+
+ u16 producer_index;
+ u16 consumer_index;
+ u16 depth; /* FW Q depth is 16 bits */
+ struct bfa_dma addr;
+ struct bfa_mbox_cmd dbell_mb;
+
+ u16 token;
+ int offset;
+ int bytes_to_copy;
+ struct bfa_mbox_cmd copy_mb;
+
+ struct list_head pending_q; /* pending command queue */
+
+ struct bfa_msgq *msgq;
+};
+
+enum bfa_msgq_rspq_flags {
+ BFA_MSGQ_RSPQ_F_DB_UPDATE = 1,
+};
+
+typedef void (*bfa_msgq_mcfunc_t)(void *cbarg, struct bfi_msgq_mhdr *mhdr);
+
+struct bfa_msgq_rspq {
+ bfa_fsm_t fsm;
+ enum bfa_msgq_rspq_flags flags;
+
+ u16 producer_index;
+ u16 consumer_index;
+ u16 depth; /* FW Q depth is 16 bits */
+ struct bfa_dma addr;
+ struct bfa_mbox_cmd dbell_mb;
+
+ int nmclass;
+ struct {
+ bfa_msgq_mcfunc_t cbfn;
+ void *cbarg;
+ } rsphdlr[BFI_MC_MAX];
+
+ struct bfa_msgq *msgq;
+};
+
+struct bfa_msgq {
+ struct bfa_msgq_cmdq cmdq;
+ struct bfa_msgq_rspq rspq;
+
+ struct bfa_wc init_wc;
+ struct bfa_mbox_cmd init_mb;
+
+ struct bfa_ioc_notify ioc_notify;
+ struct bfa_ioc *ioc;
+};
+
+u32 bfa_msgq_meminfo(void);
+void bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa);
+void bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc);
+void bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc,
+ bfa_msgq_mcfunc_t cbfn, void *cbarg);
+void bfa_msgq_cmd_post(struct bfa_msgq *msgq,
+ struct bfa_msgq_cmd_entry *cmd);
+void bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len);
+
+#endif
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
new file mode 100644
index 000000000000..7a1393aabd43
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -0,0 +1,481 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BFI_H__
+#define __BFI_H__
+
+#include "bfa_defs.h"
+
+#pragma pack(1)
+
+/**
+ * BFI FW image type
+ */
+#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
+#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
+
+/**
+ * Msg header common to all msgs
+ */
+struct bfi_mhdr {
+ u8 msg_class; /*!< @ref enum bfi_mclass */
+ u8 msg_id; /*!< msg opcode with in the class */
+ union {
+ struct {
+ u8 qid;
+ u8 fn_lpu; /*!< msg destination */
+ } h2i;
+ u16 i2htok; /*!< token in msgs to host */
+ } mtag;
+};
+
+#define bfi_fn_lpu(__fn, __lpu) ((__fn) << 1 | (__lpu))
+#define bfi_mhdr_2_fn(_mh) ((_mh)->mtag.h2i.fn_lpu >> 1)
+#define bfi_mhdr_2_qid(_mh) ((_mh)->mtag.h2i.qid)
+
+#define bfi_h2i_set(_mh, _mc, _op, _fn_lpu) do { \
+ (_mh).msg_class = (_mc); \
+ (_mh).msg_id = (_op); \
+ (_mh).mtag.h2i.fn_lpu = (_fn_lpu); \
+} while (0)
+
+#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
+ (_mh).msg_class = (_mc); \
+ (_mh).msg_id = (_op); \
+ (_mh).mtag.i2htok = (_i2htok); \
+} while (0)
+
+/*
+ * Message opcodes: 0-127 to firmware, 128-255 to host
+ */
+#define BFI_I2H_OPCODE_BASE 128
+#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
+
+/**
+ ****************************************************************************
+ *
+ * Scatter Gather Element and Page definition
+ *
+ ****************************************************************************
+ */
+
+/**
+ * DMA addresses
+ */
+union bfi_addr_u {
+ struct {
+ u32 addr_lo;
+ u32 addr_hi;
+ } a32;
+};
+
+/*
+ * Large Message structure - 128 Bytes size Msgs
+ */
+#define BFI_LMSG_SZ 128
+#define BFI_LMSG_PL_WSZ \
+ ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4)
+
+/**
+ * Mailbox message structure
+ */
+#define BFI_MBMSG_SZ 7
+struct bfi_mbmsg {
+ struct bfi_mhdr mh;
+ u32 pl[BFI_MBMSG_SZ];
+};
+
+/**
+ * Supported PCI function class codes (personality)
+ */
+enum bfi_pcifn_class {
+ BFI_PCIFN_CLASS_FC = 0x0c04,
+ BFI_PCIFN_CLASS_ETH = 0x0200,
+};
+
+/**
+ * Message Classes
+ */
+enum bfi_mclass {
+ BFI_MC_IOC = 1, /*!< IO Controller (IOC) */
+ BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */
+ BFI_MC_FLASH = 3, /*!< Flash message class */
+ BFI_MC_CEE = 4, /*!< CEE */
+ BFI_MC_FCPORT = 5, /*!< FC port */
+ BFI_MC_IOCFC = 6, /*!< FC - IO Controller (IOC) */
+ BFI_MC_LL = 7, /*!< Link Layer */
+ BFI_MC_UF = 8, /*!< Unsolicited frame receive */
+ BFI_MC_FCXP = 9, /*!< FC Transport */
+ BFI_MC_LPS = 10, /*!< lport fc login services */
+ BFI_MC_RPORT = 11, /*!< Remote port */
+ BFI_MC_ITNIM = 12, /*!< I-T nexus (Initiator mode) */
+ BFI_MC_IOIM_READ = 13, /*!< read IO (Initiator mode) */
+ BFI_MC_IOIM_WRITE = 14, /*!< write IO (Initiator mode) */
+ BFI_MC_IOIM_IO = 15, /*!< IO (Initiator mode) */
+ BFI_MC_IOIM = 16, /*!< IO (Initiator mode) */
+ BFI_MC_IOIM_IOCOM = 17, /*!< good IO completion */
+ BFI_MC_TSKIM = 18, /*!< Initiator Task management */
+ BFI_MC_SBOOT = 19, /*!< SAN boot services */
+ BFI_MC_IPFC = 20, /*!< IP over FC Msgs */
+ BFI_MC_PORT = 21, /*!< Physical port */
+ BFI_MC_SFP = 22, /*!< SFP module */
+ BFI_MC_MSGQ = 23, /*!< MSGQ */
+ BFI_MC_ENET = 24, /*!< ENET commands/responses */
+ BFI_MC_PHY = 25, /*!< External PHY message class */
+ BFI_MC_NBOOT = 26, /*!< Network Boot */
+ BFI_MC_TIO_READ = 27, /*!< read IO (Target mode) */
+ BFI_MC_TIO_WRITE = 28, /*!< write IO (Target mode) */
+ BFI_MC_TIO_DATA_XFERED = 29, /*!< ds transferred (target mode) */
+ BFI_MC_TIO_IO = 30, /*!< IO (Target mode) */
+ BFI_MC_TIO = 31, /*!< IO (target mode) */
+ BFI_MC_MFG = 32, /*!< MFG/ASIC block commands */
+ BFI_MC_EDMA = 33, /*!< EDMA copy commands */
+ BFI_MC_MAX = 34
+};
+
+#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
+
+#define BFI_FWBOOT_ENV_OS 0
+
+/**
+ *----------------------------------------------------------------------
+ * IOC
+ *----------------------------------------------------------------------
+ */
+
+/**
+ * Different asic generations
+ */
+enum bfi_asic_gen {
+ BFI_ASIC_GEN_CB = 1,
+ BFI_ASIC_GEN_CT = 2,
+ BFI_ASIC_GEN_CT2 = 3,
+};
+
+enum bfi_asic_mode {
+ BFI_ASIC_MODE_FC = 1, /* FC upto 8G speed */
+ BFI_ASIC_MODE_FC16 = 2, /* FC upto 16G speed */
+ BFI_ASIC_MODE_ETH = 3, /* Ethernet ports */
+ BFI_ASIC_MODE_COMBO = 4, /* FC 16G and Ethernet 10G port */
+};
+
+enum bfi_ioc_h2i_msgs {
+ BFI_IOC_H2I_ENABLE_REQ = 1,
+ BFI_IOC_H2I_DISABLE_REQ = 2,
+ BFI_IOC_H2I_GETATTR_REQ = 3,
+ BFI_IOC_H2I_DBG_SYNC = 4,
+ BFI_IOC_H2I_DBG_DUMP = 5,
+};
+
+enum bfi_ioc_i2h_msgs {
+ BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
+ BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
+ BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
+ BFI_IOC_I2H_HBEAT = BFA_I2HM(4),
+};
+
+/**
+ * BFI_IOC_H2I_GETATTR_REQ message
+ */
+struct bfi_ioc_getattr_req {
+ struct bfi_mhdr mh;
+ union bfi_addr_u attr_addr;
+};
+
+struct bfi_ioc_attr {
+ u64 mfg_pwwn; /*!< Mfg port wwn */
+ u64 mfg_nwwn; /*!< Mfg node wwn */
+ mac_t mfg_mac; /*!< Mfg mac */
+ u8 port_mode; /* enum bfi_port_mode */
+ u8 rsvd_a;
+ u64 pwwn;
+ u64 nwwn;
+ mac_t mac; /*!< PBC or Mfg mac */
+ u16 rsvd_b;
+ mac_t fcoe_mac;
+ u16 rsvd_c;
+ char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+ u8 pcie_gen;
+ u8 pcie_lanes_orig;
+ u8 pcie_lanes;
+ u8 rx_bbcredit; /*!< receive buffer credits */
+ u32 adapter_prop; /*!< adapter properties */
+ u16 maxfrsize; /*!< max receive frame size */
+ char asic_rev;
+ u8 rsvd_d;
+ char fw_version[BFA_VERSION_LEN];
+ char optrom_version[BFA_VERSION_LEN];
+ struct bfa_mfg_vpd vpd;
+ u32 card_type; /*!< card type */
+};
+
+/**
+ * BFI_IOC_I2H_GETATTR_REPLY message
+ */
+struct bfi_ioc_getattr_reply {
+ struct bfi_mhdr mh; /*!< Common msg header */
+ u8 status; /*!< cfg reply status */
+ u8 rsvd[3];
+};
+
+/**
+ * Firmware memory page offsets
+ */
+#define BFI_IOC_SMEM_PG0_CB (0x40)
+#define BFI_IOC_SMEM_PG0_CT (0x180)
+
+/**
+ * Firmware statistic offset
+ */
+#define BFI_IOC_FWSTATS_OFF (0x6B40)
+#define BFI_IOC_FWSTATS_SZ (4096)
+
+/**
+ * Firmware trace offset
+ */
+#define BFI_IOC_TRC_OFF (0x4b00)
+#define BFI_IOC_TRC_ENTS 256
+
+#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
+#define BFI_IOC_MD5SUM_SZ 4
+struct bfi_ioc_image_hdr {
+ u32 signature; /*!< constant signature */
+ u8 asic_gen; /*!< asic generation */
+ u8 asic_mode;
+ u8 port0_mode; /*!< device mode for port 0 */
+ u8 port1_mode; /*!< device mode for port 1 */
+ u32 exec; /*!< exec vector */
+ u32 bootenv; /*!< firmware boot env */
+ u32 rsvd_b[4];
+ u32 md5sum[BFI_IOC_MD5SUM_SZ];
+};
+
+#define BFI_FWBOOT_DEVMODE_OFF 4
+#define BFI_FWBOOT_TYPE_OFF 8
+#define BFI_FWBOOT_ENV_OFF 12
+#define BFI_FWBOOT_DEVMODE(__asic_gen, __asic_mode, __p0_mode, __p1_mode) \
+ (((u32)(__asic_gen)) << 24 | \
+ ((u32)(__asic_mode)) << 16 | \
+ ((u32)(__p0_mode)) << 8 | \
+ ((u32)(__p1_mode)))
+
+enum bfi_fwboot_type {
+ BFI_FWBOOT_TYPE_NORMAL = 0,
+ BFI_FWBOOT_TYPE_FLASH = 1,
+ BFI_FWBOOT_TYPE_MEMTEST = 2,
+};
+
+enum bfi_port_mode {
+ BFI_PORT_MODE_FC = 1,
+ BFI_PORT_MODE_ETH = 2,
+};
+
+struct bfi_ioc_hbeat {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u32 hb_count; /*!< current heart beat count */
+};
+
+/**
+ * IOC hardware/firmware state
+ */
+enum bfi_ioc_state {
+ BFI_IOC_UNINIT = 0, /*!< not initialized */
+ BFI_IOC_INITING = 1, /*!< h/w is being initialized */
+ BFI_IOC_HWINIT = 2, /*!< h/w is initialized */
+ BFI_IOC_CFG = 3, /*!< IOC configuration in progress */
+ BFI_IOC_OP = 4, /*!< IOC is operational */
+ BFI_IOC_DISABLING = 5, /*!< IOC is being disabled */
+ BFI_IOC_DISABLED = 6, /*!< IOC is disabled */
+ BFI_IOC_CFG_DISABLED = 7, /*!< IOC is being disabled;transient */
+ BFI_IOC_FAIL = 8, /*!< IOC heart-beat failure */
+ BFI_IOC_MEMTEST = 9, /*!< IOC is doing memtest */
+};
+
+#define BFI_IOC_ENDIAN_SIG 0x12345678
+
+enum {
+ BFI_ADAPTER_TYPE_FC = 0x01, /*!< FC adapters */
+ BFI_ADAPTER_TYPE_MK = 0x0f0000, /*!< adapter type mask */
+ BFI_ADAPTER_TYPE_SH = 16, /*!< adapter type shift */
+ BFI_ADAPTER_NPORTS_MK = 0xff00, /*!< number of ports mask */
+ BFI_ADAPTER_NPORTS_SH = 8, /*!< number of ports shift */
+ BFI_ADAPTER_SPEED_MK = 0xff, /*!< adapter speed mask */
+ BFI_ADAPTER_SPEED_SH = 0, /*!< adapter speed shift */
+ BFI_ADAPTER_PROTO = 0x100000, /*!< prototype adapaters */
+ BFI_ADAPTER_TTV = 0x200000, /*!< TTV debug capable */
+ BFI_ADAPTER_UNSUPP = 0x400000, /*!< unknown adapter type */
+};
+
+#define BFI_ADAPTER_GETP(__prop, __adap_prop) \
+ (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \
+ BFI_ADAPTER_ ## __prop ## _SH)
+#define BFI_ADAPTER_SETP(__prop, __val) \
+ ((__val) << BFI_ADAPTER_ ## __prop ## _SH)
+#define BFI_ADAPTER_IS_PROTO(__adap_type) \
+ ((__adap_type) & BFI_ADAPTER_PROTO)
+#define BFI_ADAPTER_IS_TTV(__adap_type) \
+ ((__adap_type) & BFI_ADAPTER_TTV)
+#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \
+ ((__adap_type) & BFI_ADAPTER_UNSUPP)
+#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \
+ ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
+ BFI_ADAPTER_UNSUPP))
+
+/**
+ * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
+ */
+struct bfi_ioc_ctrl_req {
+ struct bfi_mhdr mh;
+ u16 clscode;
+ u16 rsvd;
+ u32 tv_sec;
+};
+
+/**
+ * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
+ */
+struct bfi_ioc_ctrl_reply {
+ struct bfi_mhdr mh; /*!< Common msg header */
+ u8 status; /*!< enable/disable status */
+ u8 port_mode; /*!< enum bfa_mode */
+ u8 cap_bm; /*!< capability bit mask */
+ u8 rsvd;
+};
+
+#define BFI_IOC_MSGSZ 8
+/**
+ * H2I Messages
+ */
+union bfi_ioc_h2i_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_ioc_ctrl_req enable_req;
+ struct bfi_ioc_ctrl_req disable_req;
+ struct bfi_ioc_getattr_req getattr_req;
+ u32 mboxmsg[BFI_IOC_MSGSZ];
+};
+
+/**
+ * I2H Messages
+ */
+union bfi_ioc_i2h_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_ioc_ctrl_reply fw_event;
+ u32 mboxmsg[BFI_IOC_MSGSZ];
+};
+
+/**
+ *----------------------------------------------------------------------
+ * MSGQ
+ *----------------------------------------------------------------------
+ */
+
+enum bfi_msgq_h2i_msgs {
+ BFI_MSGQ_H2I_INIT_REQ = 1,
+ BFI_MSGQ_H2I_DOORBELL_PI = 2,
+ BFI_MSGQ_H2I_DOORBELL_CI = 3,
+ BFI_MSGQ_H2I_CMDQ_COPY_RSP = 4,
+};
+
+enum bfi_msgq_i2h_msgs {
+ BFI_MSGQ_I2H_INIT_RSP = BFA_I2HM(BFI_MSGQ_H2I_INIT_REQ),
+ BFI_MSGQ_I2H_DOORBELL_PI = BFA_I2HM(BFI_MSGQ_H2I_DOORBELL_PI),
+ BFI_MSGQ_I2H_DOORBELL_CI = BFA_I2HM(BFI_MSGQ_H2I_DOORBELL_CI),
+ BFI_MSGQ_I2H_CMDQ_COPY_REQ = BFA_I2HM(BFI_MSGQ_H2I_CMDQ_COPY_RSP),
+};
+
+/* Messages(commands/responsed/AENS will have the following header */
+struct bfi_msgq_mhdr {
+ u8 msg_class;
+ u8 msg_id;
+ u16 msg_token;
+ u16 num_entries;
+ u8 enet_id;
+ u8 rsvd[1];
+};
+
+#define bfi_msgq_mhdr_set(_mh, _mc, _mid, _tok, _enet_id) do { \
+ (_mh).msg_class = (_mc); \
+ (_mh).msg_id = (_mid); \
+ (_mh).msg_token = (_tok); \
+ (_mh).enet_id = (_enet_id); \
+} while (0)
+
+/*
+ * Mailbox for messaging interface
+ */
+#define BFI_MSGQ_CMD_ENTRY_SIZE (64) /* TBD */
+#define BFI_MSGQ_RSP_ENTRY_SIZE (64) /* TBD */
+
+#define bfi_msgq_num_cmd_entries(_size) \
+ (((_size) + BFI_MSGQ_CMD_ENTRY_SIZE - 1) / BFI_MSGQ_CMD_ENTRY_SIZE)
+
+struct bfi_msgq {
+ union bfi_addr_u addr;
+ u16 q_depth; /* Total num of entries in the queue */
+ u8 rsvd[2];
+};
+
+/* BFI_ENET_MSGQ_CFG_REQ TBD init or cfg? */
+struct bfi_msgq_cfg_req {
+ struct bfi_mhdr mh;
+ struct bfi_msgq cmdq;
+ struct bfi_msgq rspq;
+};
+
+/* BFI_ENET_MSGQ_CFG_RSP */
+struct bfi_msgq_cfg_rsp {
+ struct bfi_mhdr mh;
+ u8 cmd_status;
+ u8 rsvd[3];
+};
+
+/* BFI_MSGQ_H2I_DOORBELL */
+struct bfi_msgq_h2i_db {
+ struct bfi_mhdr mh;
+ union {
+ u16 cmdq_pi;
+ u16 rspq_ci;
+ } idx;
+};
+
+/* BFI_MSGQ_I2H_DOORBELL */
+struct bfi_msgq_i2h_db {
+ struct bfi_mhdr mh;
+ union {
+ u16 rspq_pi;
+ u16 cmdq_ci;
+ } idx;
+};
+
+#define BFI_CMD_COPY_SZ 28
+
+/* BFI_MSGQ_H2I_CMD_COPY_RSP */
+struct bfi_msgq_h2i_cmdq_copy_rsp {
+ struct bfi_mhdr mh;
+ u8 data[BFI_CMD_COPY_SZ];
+};
+
+/* BFI_MSGQ_I2H_CMD_COPY_REQ */
+struct bfi_msgq_i2h_cmdq_copy_req {
+ struct bfi_mhdr mh;
+ u16 offset;
+ u16 len;
+};
+
+#pragma pack()
+
+#endif /* __BFI_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h
new file mode 100644
index 000000000000..4eecabea397b
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h
@@ -0,0 +1,199 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BFI_CNA_H__
+#define __BFI_CNA_H__
+
+#include "bfi.h"
+#include "bfa_defs_cna.h"
+
+#pragma pack(1)
+
+enum bfi_port_h2i {
+ BFI_PORT_H2I_ENABLE_REQ = (1),
+ BFI_PORT_H2I_DISABLE_REQ = (2),
+ BFI_PORT_H2I_GET_STATS_REQ = (3),
+ BFI_PORT_H2I_CLEAR_STATS_REQ = (4),
+};
+
+enum bfi_port_i2h {
+ BFI_PORT_I2H_ENABLE_RSP = BFA_I2HM(1),
+ BFI_PORT_I2H_DISABLE_RSP = BFA_I2HM(2),
+ BFI_PORT_I2H_GET_STATS_RSP = BFA_I2HM(3),
+ BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
+};
+
+/**
+ * Generic REQ type
+ */
+struct bfi_port_generic_req {
+ struct bfi_mhdr mh; /*!< msg header */
+ u32 msgtag; /*!< msgtag for reply */
+ u32 rsvd;
+};
+
+/**
+ * Generic RSP type
+ */
+struct bfi_port_generic_rsp {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 status; /*!< port enable status */
+ u8 rsvd[3];
+ u32 msgtag; /*!< msgtag for reply */
+};
+
+/**
+ * @todo
+ * BFI_PORT_H2I_ENABLE_REQ
+ */
+
+/**
+ * @todo
+ * BFI_PORT_I2H_ENABLE_RSP
+ */
+
+/**
+ * BFI_PORT_H2I_DISABLE_REQ
+ */
+
+/**
+ * BFI_PORT_I2H_DISABLE_RSP
+ */
+
+/**
+ * BFI_PORT_H2I_GET_STATS_REQ
+ */
+struct bfi_port_get_stats_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ union bfi_addr_u dma_addr;
+};
+
+/**
+ * BFI_PORT_I2H_GET_STATS_RSP
+ */
+
+/**
+ * BFI_PORT_H2I_CLEAR_STATS_REQ
+ */
+
+/**
+ * BFI_PORT_I2H_CLEAR_STATS_RSP
+ */
+
+union bfi_port_h2i_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_port_generic_req enable_req;
+ struct bfi_port_generic_req disable_req;
+ struct bfi_port_get_stats_req getstats_req;
+ struct bfi_port_generic_req clearstats_req;
+};
+
+union bfi_port_i2h_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_port_generic_rsp enable_rsp;
+ struct bfi_port_generic_rsp disable_rsp;
+ struct bfi_port_generic_rsp getstats_rsp;
+ struct bfi_port_generic_rsp clearstats_rsp;
+};
+
+/* @brief Mailbox commands from host to (DCBX/LLDP) firmware */
+enum bfi_cee_h2i_msgs {
+ BFI_CEE_H2I_GET_CFG_REQ = 1,
+ BFI_CEE_H2I_RESET_STATS = 2,
+ BFI_CEE_H2I_GET_STATS_REQ = 3,
+};
+
+/* @brief Mailbox reply and AEN messages from DCBX/LLDP firmware to host */
+enum bfi_cee_i2h_msgs {
+ BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1),
+ BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2),
+ BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3),
+};
+
+/* Data structures */
+
+/*
+ * @brief H2I command structure for resetting the stats.
+ * BFI_CEE_H2I_RESET_STATS
+ */
+struct bfi_lldp_reset_stats {
+ struct bfi_mhdr mh;
+};
+
+/*
+ * @brief H2I command structure for resetting the stats.
+ * BFI_CEE_H2I_RESET_STATS
+ */
+struct bfi_cee_reset_stats {
+ struct bfi_mhdr mh;
+};
+
+/*
+ * @brief get configuration command from host
+ * BFI_CEE_H2I_GET_CFG_REQ
+ */
+struct bfi_cee_get_req {
+ struct bfi_mhdr mh;
+ union bfi_addr_u dma_addr;
+};
+
+/*
+ * @brief reply message from firmware
+ * BFI_CEE_I2H_GET_CFG_RSP
+ */
+struct bfi_cee_get_rsp {
+ struct bfi_mhdr mh;
+ u8 cmd_status;
+ u8 rsvd[3];
+};
+
+/*
+ * @brief get configuration command from host
+ * BFI_CEE_H2I_GET_STATS_REQ
+ */
+struct bfi_cee_stats_req {
+ struct bfi_mhdr mh;
+ union bfi_addr_u dma_addr;
+};
+
+/*
+ * @brief reply message from firmware
+ * BFI_CEE_I2H_GET_STATS_RSP
+ */
+struct bfi_cee_stats_rsp {
+ struct bfi_mhdr mh;
+ u8 cmd_status;
+ u8 rsvd[3];
+};
+
+/* @brief mailbox command structures from host to firmware */
+union bfi_cee_h2i_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_cee_get_req get_req;
+ struct bfi_cee_stats_req stats_req;
+};
+
+/* @brief mailbox message structures from firmware to host */
+union bfi_cee_i2h_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_cee_get_rsp get_rsp;
+ struct bfi_cee_stats_rsp stats_rsp;
+};
+
+#pragma pack()
+
+#endif /* __BFI_CNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
new file mode 100644
index 000000000000..a90f1cf46b41
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -0,0 +1,901 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ * @file bfi_enet.h BNA Hardware and Firmware Interface
+ */
+
+/**
+ * Skipping statistics collection to avoid clutter.
+ * Command is no longer needed:
+ * MTU
+ * TxQ Stop
+ * RxQ Stop
+ * RxF Enable/Disable
+ *
+ * HDS-off request is dynamic
+ * keep structures as multiple of 32-bit fields for alignment.
+ * All values must be written in big-endian.
+ */
+#ifndef __BFI_ENET_H__
+#define __BFI_ENET_H__
+
+#include "bfa_defs.h"
+#include "bfi.h"
+
+#pragma pack(1)
+
+#define BFI_ENET_CFG_MAX 32 /* Max resources per PF */
+
+#define BFI_ENET_TXQ_PRIO_MAX 8
+#define BFI_ENET_RX_QSET_MAX 16
+#define BFI_ENET_TXQ_WI_VECT_MAX 4
+
+#define BFI_ENET_VLAN_ID_MAX 4096
+#define BFI_ENET_VLAN_BLOCK_SIZE 512 /* in bits */
+#define BFI_ENET_VLAN_BLOCKS_MAX \
+ (BFI_ENET_VLAN_ID_MAX / BFI_ENET_VLAN_BLOCK_SIZE)
+#define BFI_ENET_VLAN_WORD_SIZE 32 /* in bits */
+#define BFI_ENET_VLAN_WORDS_MAX \
+ (BFI_ENET_VLAN_BLOCK_SIZE / BFI_ENET_VLAN_WORD_SIZE)
+
+#define BFI_ENET_RSS_RIT_MAX 64 /* entries */
+#define BFI_ENET_RSS_KEY_LEN 10 /* 32-bit words */
+
+union bfi_addr_be_u {
+ struct {
+ u32 addr_hi; /* Most Significant 32-bits */
+ u32 addr_lo; /* Least Significant 32-Bits */
+ } a32;
+};
+
+/**
+ * T X Q U E U E D E F I N E S
+ */
+/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
+/* TxQ Entry Opcodes */
+#define BFI_ENET_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
+#define BFI_ENET_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */
+#define BFI_ENET_TXQ_WI_EXTENSION (0x104) /* Extension WI */
+
+/* TxQ Entry Control Flags */
+#define BFI_ENET_TXQ_WI_CF_FCOE_CRC (1 << 8)
+#define BFI_ENET_TXQ_WI_CF_IPID_MODE (1 << 5)
+#define BFI_ENET_TXQ_WI_CF_INS_PRIO (1 << 4)
+#define BFI_ENET_TXQ_WI_CF_INS_VLAN (1 << 3)
+#define BFI_ENET_TXQ_WI_CF_UDP_CKSUM (1 << 2)
+#define BFI_ENET_TXQ_WI_CF_TCP_CKSUM (1 << 1)
+#define BFI_ENET_TXQ_WI_CF_IP_CKSUM (1 << 0)
+
+struct bfi_enet_txq_wi_base {
+ u8 reserved;
+ u8 num_vectors; /* number of vectors present */
+ u16 opcode;
+ /* BFI_ENET_TXQ_WI_SEND or BFI_ENET_TXQ_WI_SEND_LSO */
+ u16 flags; /* OR of all the flags */
+ u16 l4_hdr_size_n_offset;
+ u16 vlan_tag;
+ u16 lso_mss; /* Only 14 LSB are valid */
+ u32 frame_length; /* Only 24 LSB are valid */
+};
+
+struct bfi_enet_txq_wi_ext {
+ u16 reserved;
+ u16 opcode; /* BFI_ENET_TXQ_WI_EXTENSION */
+ u32 reserved2[3];
+};
+
+struct bfi_enet_txq_wi_vector { /* Tx Buffer Descriptor */
+ u16 reserved;
+ u16 length; /* Only 14 LSB are valid */
+ union bfi_addr_be_u addr;
+};
+
+/**
+ * TxQ Entry Structure
+ *
+ */
+struct bfi_enet_txq_entry {
+ union {
+ struct bfi_enet_txq_wi_base base;
+ struct bfi_enet_txq_wi_ext ext;
+ } wi;
+ struct bfi_enet_txq_wi_vector vector[BFI_ENET_TXQ_WI_VECT_MAX];
+};
+
+#define wi_hdr wi.base
+#define wi_ext_hdr wi.ext
+
+#define BFI_ENET_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
+ (((_hdr_size) << 10) | ((_offset) & 0x3FF))
+
+/**
+ * R X Q U E U E D E F I N E S
+ */
+struct bfi_enet_rxq_entry {
+ union bfi_addr_be_u rx_buffer;
+};
+
+/**
+ * R X C O M P L E T I O N Q U E U E D E F I N E S
+ */
+/* CQ Entry Flags */
+#define BFI_ENET_CQ_EF_MAC_ERROR (1 << 0)
+#define BFI_ENET_CQ_EF_FCS_ERROR (1 << 1)
+#define BFI_ENET_CQ_EF_TOO_LONG (1 << 2)
+#define BFI_ENET_CQ_EF_FC_CRC_OK (1 << 3)
+
+#define BFI_ENET_CQ_EF_RSVD1 (1 << 4)
+#define BFI_ENET_CQ_EF_L4_CKSUM_OK (1 << 5)
+#define BFI_ENET_CQ_EF_L3_CKSUM_OK (1 << 6)
+#define BFI_ENET_CQ_EF_HDS_HEADER (1 << 7)
+
+#define BFI_ENET_CQ_EF_UDP (1 << 8)
+#define BFI_ENET_CQ_EF_TCP (1 << 9)
+#define BFI_ENET_CQ_EF_IP_OPTIONS (1 << 10)
+#define BFI_ENET_CQ_EF_IPV6 (1 << 11)
+
+#define BFI_ENET_CQ_EF_IPV4 (1 << 12)
+#define BFI_ENET_CQ_EF_VLAN (1 << 13)
+#define BFI_ENET_CQ_EF_RSS (1 << 14)
+#define BFI_ENET_CQ_EF_RSVD2 (1 << 15)
+
+#define BFI_ENET_CQ_EF_MCAST_MATCH (1 << 16)
+#define BFI_ENET_CQ_EF_MCAST (1 << 17)
+#define BFI_ENET_CQ_EF_BCAST (1 << 18)
+#define BFI_ENET_CQ_EF_REMOTE (1 << 19)
+
+#define BFI_ENET_CQ_EF_LOCAL (1 << 20)
+
+/* CQ Entry Structure */
+struct bfi_enet_cq_entry {
+ u32 flags;
+ u16 vlan_tag;
+ u16 length;
+ u32 rss_hash;
+ u8 valid;
+ u8 reserved1;
+ u8 reserved2;
+ u8 rxq_id;
+};
+
+/**
+ * E N E T C O N T R O L P A T H C O M M A N D S
+ */
+struct bfi_enet_q {
+ union bfi_addr_u pg_tbl;
+ union bfi_addr_u first_entry;
+ u16 pages; /* # of pages */
+ u16 page_sz;
+};
+
+struct bfi_enet_txq {
+ struct bfi_enet_q q;
+ u8 priority;
+ u8 rsvd[3];
+};
+
+struct bfi_enet_rxq {
+ struct bfi_enet_q q;
+ u16 rx_buffer_size;
+ u16 rsvd;
+};
+
+struct bfi_enet_cq {
+ struct bfi_enet_q q;
+};
+
+struct bfi_enet_ib_cfg {
+ u8 int_pkt_dma;
+ u8 int_enabled;
+ u8 int_pkt_enabled;
+ u8 continuous_coalescing;
+ u8 msix;
+ u8 rsvd[3];
+ u32 coalescing_timeout;
+ u32 inter_pkt_timeout;
+ u8 inter_pkt_count;
+ u8 rsvd1[3];
+};
+
+struct bfi_enet_ib {
+ union bfi_addr_u index_addr;
+ union {
+ u16 msix_index;
+ u16 intx_bitmask;
+ } intr;
+ u16 rsvd;
+};
+
+/**
+ * ENET command messages
+ */
+enum bfi_enet_h2i_msgs {
+ /* Rx Commands */
+ BFI_ENET_H2I_RX_CFG_SET_REQ = 1,
+ BFI_ENET_H2I_RX_CFG_CLR_REQ = 2,
+
+ BFI_ENET_H2I_RIT_CFG_REQ = 3,
+ BFI_ENET_H2I_RSS_CFG_REQ = 4,
+ BFI_ENET_H2I_RSS_ENABLE_REQ = 5,
+ BFI_ENET_H2I_RX_PROMISCUOUS_REQ = 6,
+ BFI_ENET_H2I_RX_DEFAULT_REQ = 7,
+
+ BFI_ENET_H2I_MAC_UCAST_SET_REQ = 8,
+ BFI_ENET_H2I_MAC_UCAST_CLR_REQ = 9,
+ BFI_ENET_H2I_MAC_UCAST_ADD_REQ = 10,
+ BFI_ENET_H2I_MAC_UCAST_DEL_REQ = 11,
+
+ BFI_ENET_H2I_MAC_MCAST_ADD_REQ = 12,
+ BFI_ENET_H2I_MAC_MCAST_DEL_REQ = 13,
+ BFI_ENET_H2I_MAC_MCAST_FILTER_REQ = 14,
+
+ BFI_ENET_H2I_RX_VLAN_SET_REQ = 15,
+ BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ = 16,
+
+ /* Tx Commands */
+ BFI_ENET_H2I_TX_CFG_SET_REQ = 17,
+ BFI_ENET_H2I_TX_CFG_CLR_REQ = 18,
+
+ /* Port Commands */
+ BFI_ENET_H2I_PORT_ADMIN_UP_REQ = 19,
+ BFI_ENET_H2I_SET_PAUSE_REQ = 20,
+ BFI_ENET_H2I_DIAG_LOOPBACK_REQ = 21,
+
+ /* Get Attributes Command */
+ BFI_ENET_H2I_GET_ATTR_REQ = 22,
+
+ /* Statistics Commands */
+ BFI_ENET_H2I_STATS_GET_REQ = 23,
+ BFI_ENET_H2I_STATS_CLR_REQ = 24,
+
+ BFI_ENET_H2I_WOL_MAGIC_REQ = 25,
+ BFI_ENET_H2I_WOL_FRAME_REQ = 26,
+
+ BFI_ENET_H2I_MAX = 27,
+};
+
+enum bfi_enet_i2h_msgs {
+ /* Rx Responses */
+ BFI_ENET_I2H_RX_CFG_SET_RSP =
+ BFA_I2HM(BFI_ENET_H2I_RX_CFG_SET_REQ),
+ BFI_ENET_I2H_RX_CFG_CLR_RSP =
+ BFA_I2HM(BFI_ENET_H2I_RX_CFG_CLR_REQ),
+
+ BFI_ENET_I2H_RIT_CFG_RSP =
+ BFA_I2HM(BFI_ENET_H2I_RIT_CFG_REQ),
+ BFI_ENET_I2H_RSS_CFG_RSP =
+ BFA_I2HM(BFI_ENET_H2I_RSS_CFG_REQ),
+ BFI_ENET_I2H_RSS_ENABLE_RSP =
+ BFA_I2HM(BFI_ENET_H2I_RSS_ENABLE_REQ),
+ BFI_ENET_I2H_RX_PROMISCUOUS_RSP =
+ BFA_I2HM(BFI_ENET_H2I_RX_PROMISCUOUS_REQ),
+ BFI_ENET_I2H_RX_DEFAULT_RSP =
+ BFA_I2HM(BFI_ENET_H2I_RX_DEFAULT_REQ),
+
+ BFI_ENET_I2H_MAC_UCAST_SET_RSP =
+ BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_SET_REQ),
+ BFI_ENET_I2H_MAC_UCAST_CLR_RSP =
+ BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_CLR_REQ),
+ BFI_ENET_I2H_MAC_UCAST_ADD_RSP =
+ BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_ADD_REQ),
+ BFI_ENET_I2H_MAC_UCAST_DEL_RSP =
+ BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_DEL_REQ),
+
+ BFI_ENET_I2H_MAC_MCAST_ADD_RSP =
+ BFA_I2HM(BFI_ENET_H2I_MAC_MCAST_ADD_REQ),
+ BFI_ENET_I2H_MAC_MCAST_DEL_RSP =
+ BFA_I2HM(BFI_ENET_H2I_MAC_MCAST_DEL_REQ),
+ BFI_ENET_I2H_MAC_MCAST_FILTER_RSP =
+ BFA_I2HM(BFI_ENET_H2I_MAC_MCAST_FILTER_REQ),
+
+ BFI_ENET_I2H_RX_VLAN_SET_RSP =
+ BFA_I2HM(BFI_ENET_H2I_RX_VLAN_SET_REQ),
+
+ BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP =
+ BFA_I2HM(BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ),
+
+ /* Tx Responses */
+ BFI_ENET_I2H_TX_CFG_SET_RSP =
+ BFA_I2HM(BFI_ENET_H2I_TX_CFG_SET_REQ),
+ BFI_ENET_I2H_TX_CFG_CLR_RSP =
+ BFA_I2HM(BFI_ENET_H2I_TX_CFG_CLR_REQ),
+
+ /* Port Responses */
+ BFI_ENET_I2H_PORT_ADMIN_RSP =
+ BFA_I2HM(BFI_ENET_H2I_PORT_ADMIN_UP_REQ),
+
+ BFI_ENET_I2H_SET_PAUSE_RSP =
+ BFA_I2HM(BFI_ENET_H2I_SET_PAUSE_REQ),
+ BFI_ENET_I2H_DIAG_LOOPBACK_RSP =
+ BFA_I2HM(BFI_ENET_H2I_DIAG_LOOPBACK_REQ),
+
+ /* Attributes Response */
+ BFI_ENET_I2H_GET_ATTR_RSP =
+ BFA_I2HM(BFI_ENET_H2I_GET_ATTR_REQ),
+
+ /* Statistics Responses */
+ BFI_ENET_I2H_STATS_GET_RSP =
+ BFA_I2HM(BFI_ENET_H2I_STATS_GET_REQ),
+ BFI_ENET_I2H_STATS_CLR_RSP =
+ BFA_I2HM(BFI_ENET_H2I_STATS_CLR_REQ),
+
+ BFI_ENET_I2H_WOL_MAGIC_RSP =
+ BFA_I2HM(BFI_ENET_H2I_WOL_MAGIC_REQ),
+ BFI_ENET_I2H_WOL_FRAME_RSP =
+ BFA_I2HM(BFI_ENET_H2I_WOL_FRAME_REQ),
+
+ /* AENs */
+ BFI_ENET_I2H_LINK_DOWN_AEN = BFA_I2HM(BFI_ENET_H2I_MAX),
+ BFI_ENET_I2H_LINK_UP_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 1),
+
+ BFI_ENET_I2H_PORT_ENABLE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 2),
+ BFI_ENET_I2H_PORT_DISABLE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 3),
+
+ BFI_ENET_I2H_BW_UPDATE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 4),
+};
+
+/**
+ * The following error codes can be returned by the enet commands
+ */
+enum bfi_enet_err {
+ BFI_ENET_CMD_OK = 0,
+ BFI_ENET_CMD_FAIL = 1,
+ BFI_ENET_CMD_DUP_ENTRY = 2, /* !< Duplicate entry in CAM */
+ BFI_ENET_CMD_CAM_FULL = 3, /* !< CAM is full */
+ BFI_ENET_CMD_NOT_OWNER = 4, /* !< Not permitted, b'cos not owner */
+ BFI_ENET_CMD_NOT_EXEC = 5, /* !< Was not sent to f/w at all */
+ BFI_ENET_CMD_WAITING = 6, /* !< Waiting for completion */
+ BFI_ENET_CMD_PORT_DISABLED = 7, /* !< port in disabled state */
+};
+
+/**
+ * Generic Request
+ *
+ * bfi_enet_req is used by:
+ * BFI_ENET_H2I_RX_CFG_CLR_REQ
+ * BFI_ENET_H2I_TX_CFG_CLR_REQ
+ */
+struct bfi_enet_req {
+ struct bfi_msgq_mhdr mh;
+};
+
+/**
+ * Enable/Disable Request
+ *
+ * bfi_enet_enable_req is used by:
+ * BFI_ENET_H2I_RSS_ENABLE_REQ (enet_id must be zero)
+ * BFI_ENET_H2I_RX_PROMISCUOUS_REQ (enet_id must be zero)
+ * BFI_ENET_H2I_RX_DEFAULT_REQ (enet_id must be zero)
+ * BFI_ENET_H2I_RX_MAC_MCAST_FILTER_REQ
+ * BFI_ENET_H2I_PORT_ADMIN_UP_REQ (enet_id must be zero)
+ */
+struct bfi_enet_enable_req {
+ struct bfi_msgq_mhdr mh;
+ u8 enable; /* 1 = enable; 0 = disable */
+ u8 rsvd[3];
+};
+
+/**
+ * Generic Response
+ */
+struct bfi_enet_rsp {
+ struct bfi_msgq_mhdr mh;
+ u8 error; /*!< if error see cmd_offset */
+ u8 rsvd;
+ u16 cmd_offset; /*!< offset to invalid parameter */
+};
+
+/**
+ * GLOBAL CONFIGURATION
+ */
+
+/**
+ * bfi_enet_attr_req is used by:
+ * BFI_ENET_H2I_GET_ATTR_REQ
+ */
+struct bfi_enet_attr_req {
+ struct bfi_msgq_mhdr mh;
+};
+
+/**
+ * bfi_enet_attr_rsp is used by:
+ * BFI_ENET_I2H_GET_ATTR_RSP
+ */
+struct bfi_enet_attr_rsp {
+ struct bfi_msgq_mhdr mh;
+ u8 error; /*!< if error see cmd_offset */
+ u8 rsvd;
+ u16 cmd_offset; /*!< offset to invalid parameter */
+ u32 max_cfg;
+ u32 max_ucmac;
+ u32 rit_size;
+};
+
+/**
+ * Tx Configuration
+ *
+ * bfi_enet_tx_cfg is used by:
+ * BFI_ENET_H2I_TX_CFG_SET_REQ
+ */
+enum bfi_enet_tx_vlan_mode {
+ BFI_ENET_TX_VLAN_NOP = 0,
+ BFI_ENET_TX_VLAN_INS = 1,
+ BFI_ENET_TX_VLAN_WI = 2,
+};
+
+struct bfi_enet_tx_cfg {
+ u8 vlan_mode; /*!< processing mode */
+ u8 rsvd;
+ u16 vlan_id;
+ u8 admit_tagged_frame;
+ u8 apply_vlan_filter;
+ u8 add_to_vswitch;
+ u8 rsvd1[1];
+};
+
+struct bfi_enet_tx_cfg_req {
+ struct bfi_msgq_mhdr mh;
+ u8 num_queues; /* # of Tx Queues */
+ u8 rsvd[3];
+
+ struct {
+ struct bfi_enet_txq q;
+ struct bfi_enet_ib ib;
+ } q_cfg[BFI_ENET_TXQ_PRIO_MAX];
+
+ struct bfi_enet_ib_cfg ib_cfg;
+
+ struct bfi_enet_tx_cfg tx_cfg;
+};
+
+struct bfi_enet_tx_cfg_rsp {
+ struct bfi_msgq_mhdr mh;
+ u8 error;
+ u8 hw_id; /* For debugging */
+ u8 rsvd[2];
+ struct {
+ u32 q_dbell; /* PCI base address offset */
+ u32 i_dbell; /* PCI base address offset */
+ u8 hw_qid; /* For debugging */
+ u8 rsvd[3];
+ } q_handles[BFI_ENET_TXQ_PRIO_MAX];
+};
+
+/**
+ * Rx Configuration
+ *
+ * bfi_enet_rx_cfg is used by:
+ * BFI_ENET_H2I_RX_CFG_SET_REQ
+ */
+enum bfi_enet_rxq_type {
+ BFI_ENET_RXQ_SINGLE = 1,
+ BFI_ENET_RXQ_LARGE_SMALL = 2,
+ BFI_ENET_RXQ_HDS = 3,
+ BFI_ENET_RXQ_HDS_OPT_BASED = 4,
+};
+
+enum bfi_enet_hds_type {
+ BFI_ENET_HDS_FORCED = 0x01,
+ BFI_ENET_HDS_IPV6_UDP = 0x02,
+ BFI_ENET_HDS_IPV6_TCP = 0x04,
+ BFI_ENET_HDS_IPV4_TCP = 0x08,
+ BFI_ENET_HDS_IPV4_UDP = 0x10,
+};
+
+struct bfi_enet_rx_cfg {
+ u8 rxq_type;
+ u8 rsvd[3];
+
+ struct {
+ u8 max_header_size;
+ u8 force_offset;
+ u8 type;
+ u8 rsvd1;
+ } hds;
+
+ u8 multi_buffer;
+ u8 strip_vlan;
+ u8 drop_untagged;
+ u8 rsvd2;
+};
+
+/*
+ * Multicast frames are received on the ql of q-set index zero.
+ * On the completion queue. RxQ ID = even is for large/data buffer queues
+ * and RxQ ID = odd is for small/header buffer queues.
+ */
+struct bfi_enet_rx_cfg_req {
+ struct bfi_msgq_mhdr mh;
+ u8 num_queue_sets; /* # of Rx Queue Sets */
+ u8 rsvd[3];
+
+ struct {
+ struct bfi_enet_rxq ql; /* large/data/single buffers */
+ struct bfi_enet_rxq qs; /* small/header buffers */
+ struct bfi_enet_cq cq;
+ struct bfi_enet_ib ib;
+ } q_cfg[BFI_ENET_RX_QSET_MAX];
+
+ struct bfi_enet_ib_cfg ib_cfg;
+
+ struct bfi_enet_rx_cfg rx_cfg;
+};
+
+struct bfi_enet_rx_cfg_rsp {
+ struct bfi_msgq_mhdr mh;
+ u8 error;
+ u8 hw_id; /* For debugging */
+ u8 rsvd[2];
+ struct {
+ u32 ql_dbell; /* PCI base address offset */
+ u32 qs_dbell; /* PCI base address offset */
+ u32 i_dbell; /* PCI base address offset */
+ u8 hw_lqid; /* For debugging */
+ u8 hw_sqid; /* For debugging */
+ u8 hw_cqid; /* For debugging */
+ u8 rsvd;
+ } q_handles[BFI_ENET_RX_QSET_MAX];
+};
+
+/**
+ * RIT
+ *
+ * bfi_enet_rit_req is used by:
+ * BFI_ENET_H2I_RIT_CFG_REQ
+ */
+struct bfi_enet_rit_req {
+ struct bfi_msgq_mhdr mh;
+ u16 size; /* number of table-entries used */
+ u8 rsvd[2];
+ u8 table[BFI_ENET_RSS_RIT_MAX];
+};
+
+/**
+ * RSS
+ *
+ * bfi_enet_rss_cfg_req is used by:
+ * BFI_ENET_H2I_RSS_CFG_REQ
+ */
+enum bfi_enet_rss_type {
+ BFI_ENET_RSS_IPV6 = 0x01,
+ BFI_ENET_RSS_IPV6_TCP = 0x02,
+ BFI_ENET_RSS_IPV4 = 0x04,
+ BFI_ENET_RSS_IPV4_TCP = 0x08
+};
+
+struct bfi_enet_rss_cfg {
+ u8 type;
+ u8 mask;
+ u8 rsvd[2];
+ u32 key[BFI_ENET_RSS_KEY_LEN];
+};
+
+struct bfi_enet_rss_cfg_req {
+ struct bfi_msgq_mhdr mh;
+ struct bfi_enet_rss_cfg cfg;
+};
+
+/**
+ * MAC Unicast
+ *
+ * bfi_enet_rx_vlan_req is used by:
+ * BFI_ENET_H2I_MAC_UCAST_SET_REQ
+ * BFI_ENET_H2I_MAC_UCAST_CLR_REQ
+ * BFI_ENET_H2I_MAC_UCAST_ADD_REQ
+ * BFI_ENET_H2I_MAC_UCAST_DEL_REQ
+ */
+struct bfi_enet_ucast_req {
+ struct bfi_msgq_mhdr mh;
+ mac_t mac_addr;
+ u8 rsvd[2];
+};
+
+/**
+ * MAC Unicast + VLAN
+ */
+struct bfi_enet_mac_n_vlan_req {
+ struct bfi_msgq_mhdr mh;
+ u16 vlan_id;
+ mac_t mac_addr;
+};
+
+/**
+ * MAC Multicast
+ *
+ * bfi_enet_mac_mfilter_add_req is used by:
+ * BFI_ENET_H2I_MAC_MCAST_ADD_REQ
+ */
+struct bfi_enet_mcast_add_req {
+ struct bfi_msgq_mhdr mh;
+ mac_t mac_addr;
+ u8 rsvd[2];
+};
+
+/**
+ * bfi_enet_mac_mfilter_add_rsp is used by:
+ * BFI_ENET_I2H_MAC_MCAST_ADD_RSP
+ */
+struct bfi_enet_mcast_add_rsp {
+ struct bfi_msgq_mhdr mh;
+ u8 error;
+ u8 rsvd;
+ u16 cmd_offset;
+ u16 handle;
+ u8 rsvd1[2];
+};
+
+/**
+ * bfi_enet_mac_mfilter_del_req is used by:
+ * BFI_ENET_H2I_MAC_MCAST_DEL_REQ
+ */
+struct bfi_enet_mcast_del_req {
+ struct bfi_msgq_mhdr mh;
+ u16 handle;
+ u8 rsvd[2];
+};
+
+/**
+ * VLAN
+ *
+ * bfi_enet_rx_vlan_req is used by:
+ * BFI_ENET_H2I_RX_VLAN_SET_REQ
+ */
+struct bfi_enet_rx_vlan_req {
+ struct bfi_msgq_mhdr mh;
+ u8 block_idx;
+ u8 rsvd[3];
+ u32 bit_mask[BFI_ENET_VLAN_WORDS_MAX];
+};
+
+/**
+ * PAUSE
+ *
+ * bfi_enet_set_pause_req is used by:
+ * BFI_ENET_H2I_SET_PAUSE_REQ
+ */
+struct bfi_enet_set_pause_req {
+ struct bfi_msgq_mhdr mh;
+ u8 rsvd[2];
+ u8 tx_pause; /* 1 = enable; 0 = disable */
+ u8 rx_pause; /* 1 = enable; 0 = disable */
+};
+
+/**
+ * DIAGNOSTICS
+ *
+ * bfi_enet_diag_lb_req is used by:
+ * BFI_ENET_H2I_DIAG_LOOPBACK
+ */
+struct bfi_enet_diag_lb_req {
+ struct bfi_msgq_mhdr mh;
+ u8 rsvd[2];
+ u8 mode; /* cable or Serdes */
+ u8 enable; /* 1 = enable; 0 = disable */
+};
+
+/**
+ * enum for Loopback opmodes
+ */
+enum {
+ BFI_ENET_DIAG_LB_OPMODE_EXT = 0,
+ BFI_ENET_DIAG_LB_OPMODE_CBL = 1,
+};
+
+/**
+ * STATISTICS
+ *
+ * bfi_enet_stats_req is used by:
+ * BFI_ENET_H2I_STATS_GET_REQ
+ * BFI_ENET_I2H_STATS_CLR_REQ
+ */
+struct bfi_enet_stats_req {
+ struct bfi_msgq_mhdr mh;
+ u16 stats_mask;
+ u8 rsvd[2];
+ u32 rx_enet_mask;
+ u32 tx_enet_mask;
+ union bfi_addr_u host_buffer;
+};
+
+/**
+ * defines for "stats_mask" above.
+ */
+#define BFI_ENET_STATS_MAC (1 << 0) /* !< MAC Statistics */
+#define BFI_ENET_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */
+#define BFI_ENET_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */
+#define BFI_ENET_STATS_RX_FC (1 << 3) /* !< Rx FC Stats from RxA */
+#define BFI_ENET_STATS_TX_FC (1 << 4) /* !< Tx FC Stats from TxA */
+
+#define BFI_ENET_STATS_ALL 0x1f
+
+/* TxF Frame Statistics */
+struct bfi_enet_stats_txf {
+ u64 ucast_octets;
+ u64 ucast;
+ u64 ucast_vlan;
+
+ u64 mcast_octets;
+ u64 mcast;
+ u64 mcast_vlan;
+
+ u64 bcast_octets;
+ u64 bcast;
+ u64 bcast_vlan;
+
+ u64 errors;
+ u64 filter_vlan; /* frames filtered due to VLAN */
+ u64 filter_mac_sa; /* frames filtered due to SA check */
+};
+
+/* RxF Frame Statistics */
+struct bfi_enet_stats_rxf {
+ u64 ucast_octets;
+ u64 ucast;
+ u64 ucast_vlan;
+
+ u64 mcast_octets;
+ u64 mcast;
+ u64 mcast_vlan;
+
+ u64 bcast_octets;
+ u64 bcast;
+ u64 bcast_vlan;
+ u64 frame_drops;
+};
+
+/* FC Tx Frame Statistics */
+struct bfi_enet_stats_fc_tx {
+ u64 txf_ucast_octets;
+ u64 txf_ucast;
+ u64 txf_ucast_vlan;
+
+ u64 txf_mcast_octets;
+ u64 txf_mcast;
+ u64 txf_mcast_vlan;
+
+ u64 txf_bcast_octets;
+ u64 txf_bcast;
+ u64 txf_bcast_vlan;
+
+ u64 txf_parity_errors;
+ u64 txf_timeout;
+ u64 txf_fid_parity_errors;
+};
+
+/* FC Rx Frame Statistics */
+struct bfi_enet_stats_fc_rx {
+ u64 rxf_ucast_octets;
+ u64 rxf_ucast;
+ u64 rxf_ucast_vlan;
+
+ u64 rxf_mcast_octets;
+ u64 rxf_mcast;
+ u64 rxf_mcast_vlan;
+
+ u64 rxf_bcast_octets;
+ u64 rxf_bcast;
+ u64 rxf_bcast_vlan;
+};
+
+/* RAD Frame Statistics */
+struct bfi_enet_stats_rad {
+ u64 rx_frames;
+ u64 rx_octets;
+ u64 rx_vlan_frames;
+
+ u64 rx_ucast;
+ u64 rx_ucast_octets;
+ u64 rx_ucast_vlan;
+
+ u64 rx_mcast;
+ u64 rx_mcast_octets;
+ u64 rx_mcast_vlan;
+
+ u64 rx_bcast;
+ u64 rx_bcast_octets;
+ u64 rx_bcast_vlan;
+
+ u64 rx_drops;
+};
+
+/* BPC Tx Registers */
+struct bfi_enet_stats_bpc {
+ /* transmit stats */
+ u64 tx_pause[8];
+ u64 tx_zero_pause[8]; /*!< Pause cancellation */
+ /*!<Pause initiation rather than retention */
+ u64 tx_first_pause[8];
+
+ /* receive stats */
+ u64 rx_pause[8];
+ u64 rx_zero_pause[8]; /*!< Pause cancellation */
+ /*!<Pause initiation rather than retention */
+ u64 rx_first_pause[8];
+};
+
+/* MAC Rx Statistics */
+struct bfi_enet_stats_mac {
+ u64 frame_64; /* both rx and tx counter */
+ u64 frame_65_127; /* both rx and tx counter */
+ u64 frame_128_255; /* both rx and tx counter */
+ u64 frame_256_511; /* both rx and tx counter */
+ u64 frame_512_1023; /* both rx and tx counter */
+ u64 frame_1024_1518; /* both rx and tx counter */
+ u64 frame_1519_1522; /* both rx and tx counter */
+
+ /* receive stats */
+ u64 rx_bytes;
+ u64 rx_packets;
+ u64 rx_fcs_error;
+ u64 rx_multicast;
+ u64 rx_broadcast;
+ u64 rx_control_frames;
+ u64 rx_pause;
+ u64 rx_unknown_opcode;
+ u64 rx_alignment_error;
+ u64 rx_frame_length_error;
+ u64 rx_code_error;
+ u64 rx_carrier_sense_error;
+ u64 rx_undersize;
+ u64 rx_oversize;
+ u64 rx_fragments;
+ u64 rx_jabber;
+ u64 rx_drop;
+
+ /* transmit stats */
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 tx_multicast;
+ u64 tx_broadcast;
+ u64 tx_pause;
+ u64 tx_deferral;
+ u64 tx_excessive_deferral;
+ u64 tx_single_collision;
+ u64 tx_muliple_collision;
+ u64 tx_late_collision;
+ u64 tx_excessive_collision;
+ u64 tx_total_collision;
+ u64 tx_pause_honored;
+ u64 tx_drop;
+ u64 tx_jabber;
+ u64 tx_fcs_error;
+ u64 tx_control_frame;
+ u64 tx_oversize;
+ u64 tx_undersize;
+ u64 tx_fragments;
+};
+
+/**
+ * Complete statistics, DMAed from fw to host followed by
+ * BFI_ENET_I2H_STATS_GET_RSP
+ */
+struct bfi_enet_stats {
+ struct bfi_enet_stats_mac mac_stats;
+ struct bfi_enet_stats_bpc bpc_stats;
+ struct bfi_enet_stats_rad rad_stats;
+ struct bfi_enet_stats_rad rlb_stats;
+ struct bfi_enet_stats_fc_rx fc_rx_stats;
+ struct bfi_enet_stats_fc_tx fc_tx_stats;
+ struct bfi_enet_stats_rxf rxf_stats[BFI_ENET_CFG_MAX];
+ struct bfi_enet_stats_txf txf_stats[BFI_ENET_CFG_MAX];
+};
+
+#pragma pack()
+
+#endif /* __BFI_ENET_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h
new file mode 100644
index 000000000000..efacff3ab51d
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h
@@ -0,0 +1,452 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/*
+ * bfi_reg.h ASIC register defines for all Brocade adapter ASICs
+ */
+
+#ifndef __BFI_REG_H__
+#define __BFI_REG_H__
+
+#define HOSTFN0_INT_STATUS 0x00014000 /* cb/ct */
+#define HOSTFN1_INT_STATUS 0x00014100 /* cb/ct */
+#define HOSTFN2_INT_STATUS 0x00014300 /* ct */
+#define HOSTFN3_INT_STATUS 0x00014400 /* ct */
+#define HOSTFN0_INT_MSK 0x00014004 /* cb/ct */
+#define HOSTFN1_INT_MSK 0x00014104 /* cb/ct */
+#define HOSTFN2_INT_MSK 0x00014304 /* ct */
+#define HOSTFN3_INT_MSK 0x00014404 /* ct */
+
+#define HOST_PAGE_NUM_FN0 0x00014008 /* cb/ct */
+#define HOST_PAGE_NUM_FN1 0x00014108 /* cb/ct */
+#define HOST_PAGE_NUM_FN2 0x00014308 /* ct */
+#define HOST_PAGE_NUM_FN3 0x00014408 /* ct */
+
+#define APP_PLL_LCLK_CTL_REG 0x00014204 /* cb/ct */
+#define __P_LCLK_PLL_LOCK 0x80000000
+#define __APP_PLL_LCLK_SRAM_USE_100MHZ 0x00100000
+#define __APP_PLL_LCLK_RESET_TIMER_MK 0x000e0000
+#define __APP_PLL_LCLK_RESET_TIMER_SH 17
+#define __APP_PLL_LCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_LCLK_RESET_TIMER_SH)
+#define __APP_PLL_LCLK_LOGIC_SOFT_RESET 0x00010000
+#define __APP_PLL_LCLK_CNTLMT0_1_MK 0x0000c000
+#define __APP_PLL_LCLK_CNTLMT0_1_SH 14
+#define __APP_PLL_LCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_LCLK_CNTLMT0_1_SH)
+#define __APP_PLL_LCLK_JITLMT0_1_MK 0x00003000
+#define __APP_PLL_LCLK_JITLMT0_1_SH 12
+#define __APP_PLL_LCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_LCLK_JITLMT0_1_SH)
+#define __APP_PLL_LCLK_HREF 0x00000800
+#define __APP_PLL_LCLK_HDIV 0x00000400
+#define __APP_PLL_LCLK_P0_1_MK 0x00000300
+#define __APP_PLL_LCLK_P0_1_SH 8
+#define __APP_PLL_LCLK_P0_1(_v) ((_v) << __APP_PLL_LCLK_P0_1_SH)
+#define __APP_PLL_LCLK_Z0_2_MK 0x000000e0
+#define __APP_PLL_LCLK_Z0_2_SH 5
+#define __APP_PLL_LCLK_Z0_2(_v) ((_v) << __APP_PLL_LCLK_Z0_2_SH)
+#define __APP_PLL_LCLK_RSEL200500 0x00000010
+#define __APP_PLL_LCLK_ENARST 0x00000008
+#define __APP_PLL_LCLK_BYPASS 0x00000004
+#define __APP_PLL_LCLK_LRESETN 0x00000002
+#define __APP_PLL_LCLK_ENABLE 0x00000001
+#define APP_PLL_SCLK_CTL_REG 0x00014208 /* cb/ct */
+#define __P_SCLK_PLL_LOCK 0x80000000
+#define __APP_PLL_SCLK_RESET_TIMER_MK 0x000e0000
+#define __APP_PLL_SCLK_RESET_TIMER_SH 17
+#define __APP_PLL_SCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_SCLK_RESET_TIMER_SH)
+#define __APP_PLL_SCLK_LOGIC_SOFT_RESET 0x00010000
+#define __APP_PLL_SCLK_CNTLMT0_1_MK 0x0000c000
+#define __APP_PLL_SCLK_CNTLMT0_1_SH 14
+#define __APP_PLL_SCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_SCLK_CNTLMT0_1_SH)
+#define __APP_PLL_SCLK_JITLMT0_1_MK 0x00003000
+#define __APP_PLL_SCLK_JITLMT0_1_SH 12
+#define __APP_PLL_SCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_SCLK_JITLMT0_1_SH)
+#define __APP_PLL_SCLK_HREF 0x00000800
+#define __APP_PLL_SCLK_HDIV 0x00000400
+#define __APP_PLL_SCLK_P0_1_MK 0x00000300
+#define __APP_PLL_SCLK_P0_1_SH 8
+#define __APP_PLL_SCLK_P0_1(_v) ((_v) << __APP_PLL_SCLK_P0_1_SH)
+#define __APP_PLL_SCLK_Z0_2_MK 0x000000e0
+#define __APP_PLL_SCLK_Z0_2_SH 5
+#define __APP_PLL_SCLK_Z0_2(_v) ((_v) << __APP_PLL_SCLK_Z0_2_SH)
+#define __APP_PLL_SCLK_RSEL200500 0x00000010
+#define __APP_PLL_SCLK_ENARST 0x00000008
+#define __APP_PLL_SCLK_BYPASS 0x00000004
+#define __APP_PLL_SCLK_LRESETN 0x00000002
+#define __APP_PLL_SCLK_ENABLE 0x00000001
+#define __ENABLE_MAC_AHB_1 0x00800000 /* ct */
+#define __ENABLE_MAC_AHB_0 0x00400000 /* ct */
+#define __ENABLE_MAC_1 0x00200000 /* ct */
+#define __ENABLE_MAC_0 0x00100000 /* ct */
+
+#define HOST_SEM0_REG 0x00014230 /* cb/ct */
+#define HOST_SEM1_REG 0x00014234 /* cb/ct */
+#define HOST_SEM2_REG 0x00014238 /* cb/ct */
+#define HOST_SEM3_REG 0x0001423c /* cb/ct */
+#define HOST_SEM4_REG 0x00014610 /* cb/ct */
+#define HOST_SEM5_REG 0x00014614 /* cb/ct */
+#define HOST_SEM6_REG 0x00014618 /* cb/ct */
+#define HOST_SEM7_REG 0x0001461c /* cb/ct */
+#define HOST_SEM0_INFO_REG 0x00014240 /* cb/ct */
+#define HOST_SEM1_INFO_REG 0x00014244 /* cb/ct */
+#define HOST_SEM2_INFO_REG 0x00014248 /* cb/ct */
+#define HOST_SEM3_INFO_REG 0x0001424c /* cb/ct */
+#define HOST_SEM4_INFO_REG 0x00014620 /* cb/ct */
+#define HOST_SEM5_INFO_REG 0x00014624 /* cb/ct */
+#define HOST_SEM6_INFO_REG 0x00014628 /* cb/ct */
+#define HOST_SEM7_INFO_REG 0x0001462c /* cb/ct */
+
+#define HOSTFN0_LPU0_CMD_STAT 0x00019000 /* cb/ct */
+#define HOSTFN0_LPU1_CMD_STAT 0x00019004 /* cb/ct */
+#define HOSTFN1_LPU0_CMD_STAT 0x00019010 /* cb/ct */
+#define HOSTFN1_LPU1_CMD_STAT 0x00019014 /* cb/ct */
+#define HOSTFN2_LPU0_CMD_STAT 0x00019150 /* ct */
+#define HOSTFN2_LPU1_CMD_STAT 0x00019154 /* ct */
+#define HOSTFN3_LPU0_CMD_STAT 0x00019160 /* ct */
+#define HOSTFN3_LPU1_CMD_STAT 0x00019164 /* ct */
+#define LPU0_HOSTFN0_CMD_STAT 0x00019008 /* cb/ct */
+#define LPU1_HOSTFN0_CMD_STAT 0x0001900c /* cb/ct */
+#define LPU0_HOSTFN1_CMD_STAT 0x00019018 /* cb/ct */
+#define LPU1_HOSTFN1_CMD_STAT 0x0001901c /* cb/ct */
+#define LPU0_HOSTFN2_CMD_STAT 0x00019158 /* ct */
+#define LPU1_HOSTFN2_CMD_STAT 0x0001915c /* ct */
+#define LPU0_HOSTFN3_CMD_STAT 0x00019168 /* ct */
+#define LPU1_HOSTFN3_CMD_STAT 0x0001916c /* ct */
+
+#define PSS_CTL_REG 0x00018800 /* cb/ct */
+#define __PSS_I2C_CLK_DIV_MK 0x007f0000
+#define __PSS_I2C_CLK_DIV_SH 16
+#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
+#define __PSS_LMEM_INIT_DONE 0x00001000
+#define __PSS_LMEM_RESET 0x00000200
+#define __PSS_LMEM_INIT_EN 0x00000100
+#define __PSS_LPU1_RESET 0x00000002
+#define __PSS_LPU0_RESET 0x00000001
+#define PSS_ERR_STATUS_REG 0x00018810 /* cb/ct */
+#define ERR_SET_REG 0x00018818 /* cb/ct */
+#define PSS_GPIO_OUT_REG 0x000188c0 /* cb/ct */
+#define __PSS_GPIO_OUT_REG 0x00000fff
+#define PSS_GPIO_OE_REG 0x000188c8 /* cb/ct */
+#define __PSS_GPIO_OE_REG 0x000000ff
+
+#define HOSTFN0_LPU_MBOX0_0 0x00019200 /* cb/ct */
+#define HOSTFN1_LPU_MBOX0_8 0x00019260 /* cb/ct */
+#define LPU_HOSTFN0_MBOX0_0 0x00019280 /* cb/ct */
+#define LPU_HOSTFN1_MBOX0_8 0x000192e0 /* cb/ct */
+#define HOSTFN2_LPU_MBOX0_0 0x00019400 /* ct */
+#define HOSTFN3_LPU_MBOX0_8 0x00019460 /* ct */
+#define LPU_HOSTFN2_MBOX0_0 0x00019480 /* ct */
+#define LPU_HOSTFN3_MBOX0_8 0x000194e0 /* ct */
+
+#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c /* ct */
+#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c /* ct */
+#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c /* ct */
+#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c /* ct */
+
+#define MBIST_CTL_REG 0x00014220 /* ct */
+#define __EDRAM_BISTR_START 0x00000004
+#define MBIST_STAT_REG 0x00014224 /* ct */
+#define ETH_MAC_SER_REG 0x00014288 /* ct */
+#define __APP_EMS_CKBUFAMPIN 0x00000020
+#define __APP_EMS_REFCLKSEL 0x00000010
+#define __APP_EMS_CMLCKSEL 0x00000008
+#define __APP_EMS_REFCKBUFEN2 0x00000004
+#define __APP_EMS_REFCKBUFEN1 0x00000002
+#define __APP_EMS_CHANNEL_SEL 0x00000001
+#define FNC_PERS_REG 0x00014604 /* ct */
+#define __F3_FUNCTION_ACTIVE 0x80000000
+#define __F3_FUNCTION_MODE 0x40000000
+#define __F3_PORT_MAP_MK 0x30000000
+#define __F3_PORT_MAP_SH 28
+#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH)
+#define __F3_VM_MODE 0x08000000
+#define __F3_INTX_STATUS_MK 0x07000000
+#define __F3_INTX_STATUS_SH 24
+#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH)
+#define __F2_FUNCTION_ACTIVE 0x00800000
+#define __F2_FUNCTION_MODE 0x00400000
+#define __F2_PORT_MAP_MK 0x00300000
+#define __F2_PORT_MAP_SH 20
+#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH)
+#define __F2_VM_MODE 0x00080000
+#define __F2_INTX_STATUS_MK 0x00070000
+#define __F2_INTX_STATUS_SH 16
+#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH)
+#define __F1_FUNCTION_ACTIVE 0x00008000
+#define __F1_FUNCTION_MODE 0x00004000
+#define __F1_PORT_MAP_MK 0x00003000
+#define __F1_PORT_MAP_SH 12
+#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH)
+#define __F1_VM_MODE 0x00000800
+#define __F1_INTX_STATUS_MK 0x00000700
+#define __F1_INTX_STATUS_SH 8
+#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH)
+#define __F0_FUNCTION_ACTIVE 0x00000080
+#define __F0_FUNCTION_MODE 0x00000040
+#define __F0_PORT_MAP_MK 0x00000030
+#define __F0_PORT_MAP_SH 4
+#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH)
+#define __F0_VM_MODE 0x00000008
+#define __F0_INTX_STATUS 0x00000007
+enum {
+ __F0_INTX_STATUS_MSIX = 0x0,
+ __F0_INTX_STATUS_INTA = 0x1,
+ __F0_INTX_STATUS_INTB = 0x2,
+ __F0_INTX_STATUS_INTC = 0x3,
+ __F0_INTX_STATUS_INTD = 0x4,
+};
+
+#define OP_MODE 0x0001460c
+#define __APP_ETH_CLK_LOWSPEED 0x00000004
+#define __GLOBAL_CORECLK_HALFSPEED 0x00000002
+#define __GLOBAL_FCOE_MODE 0x00000001
+#define FW_INIT_HALT_P0 0x000191ac
+#define __FW_INIT_HALT_P 0x00000001
+#define FW_INIT_HALT_P1 0x000191bc
+#define PMM_1T_RESET_REG_P0 0x0002381c
+#define __PMM_1T_RESET_P 0x00000001
+#define PMM_1T_RESET_REG_P1 0x00023c1c
+
+/**
+ * Brocade 1860 Adapter specific defines
+ */
+#define CT2_PCI_CPQ_BASE 0x00030000
+#define CT2_PCI_APP_BASE 0x00030100
+#define CT2_PCI_ETH_BASE 0x00030400
+
+/*
+ * APP block registers
+ */
+#define CT2_HOSTFN_INT_STATUS (CT2_PCI_APP_BASE + 0x00)
+#define CT2_HOSTFN_INTR_MASK (CT2_PCI_APP_BASE + 0x04)
+#define CT2_HOSTFN_PERSONALITY0 (CT2_PCI_APP_BASE + 0x08)
+#define __PME_STATUS_ 0x00200000
+#define __PF_VF_BAR_SIZE_MODE__MK 0x00180000
+#define __PF_VF_BAR_SIZE_MODE__SH 19
+#define __PF_VF_BAR_SIZE_MODE_(_v) ((_v) << __PF_VF_BAR_SIZE_MODE__SH)
+#define __FC_LL_PORT_MAP__MK 0x00060000
+#define __FC_LL_PORT_MAP__SH 17
+#define __FC_LL_PORT_MAP_(_v) ((_v) << __FC_LL_PORT_MAP__SH)
+#define __PF_VF_ACTIVE_ 0x00010000
+#define __PF_VF_CFG_RDY_ 0x00008000
+#define __PF_VF_ENABLE_ 0x00004000
+#define __PF_DRIVER_ACTIVE_ 0x00002000
+#define __PF_PME_SEND_ENABLE_ 0x00001000
+#define __PF_EXROM_OFFSET__MK 0x00000ff0
+#define __PF_EXROM_OFFSET__SH 4
+#define __PF_EXROM_OFFSET_(_v) ((_v) << __PF_EXROM_OFFSET__SH)
+#define __FC_LL_MODE_ 0x00000008
+#define __PF_INTX_PIN_ 0x00000007
+#define CT2_HOSTFN_PERSONALITY1 (CT2_PCI_APP_BASE + 0x0C)
+#define __PF_NUM_QUEUES1__MK 0xff000000
+#define __PF_NUM_QUEUES1__SH 24
+#define __PF_NUM_QUEUES1_(_v) ((_v) << __PF_NUM_QUEUES1__SH)
+#define __PF_VF_QUE_OFFSET1__MK 0x00ff0000
+#define __PF_VF_QUE_OFFSET1__SH 16
+#define __PF_VF_QUE_OFFSET1_(_v) ((_v) << __PF_VF_QUE_OFFSET1__SH)
+#define __PF_VF_NUM_QUEUES__MK 0x0000ff00
+#define __PF_VF_NUM_QUEUES__SH 8
+#define __PF_VF_NUM_QUEUES_(_v) ((_v) << __PF_VF_NUM_QUEUES__SH)
+#define __PF_VF_QUE_OFFSET_ 0x000000ff
+#define CT2_HOSTFN_PAGE_NUM (CT2_PCI_APP_BASE + 0x18)
+#define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR (CT2_PCI_APP_BASE + 0x38)
+
+/*
+ * Brocade 1860 adapter CPQ block registers
+ */
+#define CT2_HOSTFN_LPU0_MBOX0 (CT2_PCI_CPQ_BASE + 0x00)
+#define CT2_HOSTFN_LPU1_MBOX0 (CT2_PCI_CPQ_BASE + 0x20)
+#define CT2_LPU0_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x40)
+#define CT2_LPU1_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x60)
+#define CT2_HOSTFN_LPU0_CMD_STAT (CT2_PCI_CPQ_BASE + 0x80)
+#define CT2_HOSTFN_LPU1_CMD_STAT (CT2_PCI_CPQ_BASE + 0x84)
+#define CT2_LPU0_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x88)
+#define CT2_LPU1_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x8c)
+#define CT2_HOSTFN_LPU0_READ_STAT (CT2_PCI_CPQ_BASE + 0x90)
+#define CT2_HOSTFN_LPU1_READ_STAT (CT2_PCI_CPQ_BASE + 0x94)
+#define CT2_LPU0_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x98)
+#define CT2_LPU1_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x9C)
+#define CT2_HOST_SEM0_REG 0x000148f0
+#define CT2_HOST_SEM1_REG 0x000148f4
+#define CT2_HOST_SEM2_REG 0x000148f8
+#define CT2_HOST_SEM3_REG 0x000148fc
+#define CT2_HOST_SEM4_REG 0x00014900
+#define CT2_HOST_SEM5_REG 0x00014904
+#define CT2_HOST_SEM6_REG 0x00014908
+#define CT2_HOST_SEM7_REG 0x0001490c
+#define CT2_HOST_SEM0_INFO_REG 0x000148b0
+#define CT2_HOST_SEM1_INFO_REG 0x000148b4
+#define CT2_HOST_SEM2_INFO_REG 0x000148b8
+#define CT2_HOST_SEM3_INFO_REG 0x000148bc
+#define CT2_HOST_SEM4_INFO_REG 0x000148c0
+#define CT2_HOST_SEM5_INFO_REG 0x000148c4
+#define CT2_HOST_SEM6_INFO_REG 0x000148c8
+#define CT2_HOST_SEM7_INFO_REG 0x000148cc
+
+#define CT2_APP_PLL_LCLK_CTL_REG 0x00014808
+#define __APP_LPUCLK_HALFSPEED 0x40000000
+#define __APP_PLL_LCLK_LOAD 0x20000000
+#define __APP_PLL_LCLK_FBCNT_MK 0x1fe00000
+#define __APP_PLL_LCLK_FBCNT_SH 21
+#define __APP_PLL_LCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH)
+enum {
+ __APP_PLL_LCLK_FBCNT_425_MHZ = 6,
+ __APP_PLL_LCLK_FBCNT_468_MHZ = 4,
+};
+#define __APP_PLL_LCLK_EXTFB 0x00000800
+#define __APP_PLL_LCLK_ENOUTS 0x00000400
+#define __APP_PLL_LCLK_RATE 0x00000010
+#define CT2_APP_PLL_SCLK_CTL_REG 0x0001480c
+#define __P_SCLK_PLL_LOCK 0x80000000
+#define __APP_PLL_SCLK_REFCLK_SEL 0x40000000
+#define __APP_PLL_SCLK_CLK_DIV2 0x20000000
+#define __APP_PLL_SCLK_LOAD 0x10000000
+#define __APP_PLL_SCLK_FBCNT_MK 0x0ff00000
+#define __APP_PLL_SCLK_FBCNT_SH 20
+#define __APP_PLL_SCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH)
+enum {
+ __APP_PLL_SCLK_FBCNT_NORM = 6,
+ __APP_PLL_SCLK_FBCNT_10G_FC = 10,
+};
+#define __APP_PLL_SCLK_EXTFB 0x00000800
+#define __APP_PLL_SCLK_ENOUTS 0x00000400
+#define __APP_PLL_SCLK_RATE 0x00000010
+#define CT2_PCIE_MISC_REG 0x00014804
+#define __ETH_CLK_ENABLE_PORT1 0x00000010
+#define CT2_CHIP_MISC_PRG 0x000148a4
+#define __ETH_CLK_ENABLE_PORT0 0x00004000
+#define __APP_LPU_SPEED 0x00000002
+#define CT2_MBIST_STAT_REG 0x00014818
+#define CT2_MBIST_CTL_REG 0x0001481c
+#define CT2_PMM_1T_CONTROL_REG_P0 0x0002381c
+#define __PMM_1T_PNDB_P 0x00000002
+#define CT2_PMM_1T_CONTROL_REG_P1 0x00023c1c
+#define CT2_WGN_STATUS 0x00014990
+#define __A2T_AHB_LOAD 0x00000800
+#define __WGN_READY 0x00000400
+#define __GLBL_PF_VF_CFG_RDY 0x00000200
+#define CT2_NFC_CSR_SET_REG 0x00027424
+#define __HALT_NFC_CONTROLLER 0x00000002
+#define __NFC_CONTROLLER_HALTED 0x00001000
+
+#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0
+#define __CSI_MAC_RESET 0x00000010
+#define __CSI_MAC_AHB_RESET 0x00000008
+#define CT2_CSI_MAC1_CONTROL_REG 0x000270d4
+#define CT2_CSI_MAC_CONTROL_REG(__n) \
+ (CT2_CSI_MAC0_CONTROL_REG + \
+ (__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG))
+
+/*
+ * Name semaphore registers based on usage
+ */
+#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
+#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
+#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
+#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
+#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
+#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG
+
+/*
+ * CT2 semaphore register locations changed
+ */
+#define CT2_BFA_IOC0_HBEAT_REG CT2_HOST_SEM0_INFO_REG
+#define CT2_BFA_IOC0_STATE_REG CT2_HOST_SEM1_INFO_REG
+#define CT2_BFA_IOC1_HBEAT_REG CT2_HOST_SEM2_INFO_REG
+#define CT2_BFA_IOC1_STATE_REG CT2_HOST_SEM3_INFO_REG
+#define CT2_BFA_FW_USE_COUNT CT2_HOST_SEM4_INFO_REG
+#define CT2_BFA_IOC_FAIL_SYNC CT2_HOST_SEM5_INFO_REG
+
+#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
+#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
+
+/*
+ * And corresponding host interrupt status bit field defines
+ */
+#define __HFN_INT_CPE_Q0 0x00000001U
+#define __HFN_INT_CPE_Q1 0x00000002U
+#define __HFN_INT_CPE_Q2 0x00000004U
+#define __HFN_INT_CPE_Q3 0x00000008U
+#define __HFN_INT_CPE_Q4 0x00000010U
+#define __HFN_INT_CPE_Q5 0x00000020U
+#define __HFN_INT_CPE_Q6 0x00000040U
+#define __HFN_INT_CPE_Q7 0x00000080U
+#define __HFN_INT_RME_Q0 0x00000100U
+#define __HFN_INT_RME_Q1 0x00000200U
+#define __HFN_INT_RME_Q2 0x00000400U
+#define __HFN_INT_RME_Q3 0x00000800U
+#define __HFN_INT_RME_Q4 0x00001000U
+#define __HFN_INT_RME_Q5 0x00002000U
+#define __HFN_INT_RME_Q6 0x00004000U
+#define __HFN_INT_RME_Q7 0x00008000U
+#define __HFN_INT_ERR_EMC 0x00010000U
+#define __HFN_INT_ERR_LPU0 0x00020000U
+#define __HFN_INT_ERR_LPU1 0x00040000U
+#define __HFN_INT_ERR_PSS 0x00080000U
+#define __HFN_INT_MBOX_LPU0 0x00100000U
+#define __HFN_INT_MBOX_LPU1 0x00200000U
+#define __HFN_INT_MBOX1_LPU0 0x00400000U
+#define __HFN_INT_MBOX1_LPU1 0x00800000U
+#define __HFN_INT_LL_HALT 0x01000000U
+#define __HFN_INT_CPE_MASK 0x000000ffU
+#define __HFN_INT_RME_MASK 0x0000ff00U
+#define __HFN_INT_ERR_MASK \
+ (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | \
+ __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT)
+#define __HFN_INT_FN0_MASK \
+ (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
+ __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
+ __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0)
+#define __HFN_INT_FN1_MASK \
+ (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
+ __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
+ __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1)
+
+/*
+ * Host interrupt status defines for 1860
+ */
+#define __HFN_INT_MBOX_LPU0_CT2 0x00010000U
+#define __HFN_INT_MBOX_LPU1_CT2 0x00020000U
+#define __HFN_INT_ERR_PSS_CT2 0x00040000U
+#define __HFN_INT_ERR_LPU0_CT2 0x00080000U
+#define __HFN_INT_ERR_LPU1_CT2 0x00100000U
+#define __HFN_INT_CPQ_HALT_CT2 0x00200000U
+#define __HFN_INT_ERR_WGN_CT2 0x00400000U
+#define __HFN_INT_ERR_LEHRX_CT2 0x00800000U
+#define __HFN_INT_ERR_LEHTX_CT2 0x01000000U
+#define __HFN_INT_ERR_MASK_CT2 \
+ (__HFN_INT_ERR_PSS_CT2 | __HFN_INT_ERR_LPU0_CT2 | \
+ __HFN_INT_ERR_LPU1_CT2 | __HFN_INT_CPQ_HALT_CT2 | \
+ __HFN_INT_ERR_WGN_CT2 | __HFN_INT_ERR_LEHRX_CT2 | \
+ __HFN_INT_ERR_LEHTX_CT2)
+#define __HFN_INT_FN0_MASK_CT2 \
+ (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
+ __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
+ __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0_CT2)
+#define __HFN_INT_FN1_MASK_CT2 \
+ (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
+ __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
+ __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1_CT2)
+
+/*
+ * asic memory map.
+ */
+#define PSS_SMEM_PAGE_START 0x8000
+#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
+#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
+
+#endif /* __BFI_REG_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
new file mode 100644
index 000000000000..4d7a5de08e12
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -0,0 +1,575 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BNA_H__
+#define __BNA_H__
+
+#include "bfa_defs.h"
+#include "bfa_ioc.h"
+#include "bfi_enet.h"
+#include "bna_types.h"
+
+extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
+
+/**
+ *
+ * Macros and constants
+ *
+ */
+
+#define BNA_IOC_TIMER_FREQ 200
+
+/* Log string size */
+#define BNA_MESSAGE_SIZE 256
+
+#define bna_is_small_rxq(_id) ((_id) & 0x1)
+
+#define BNA_MAC_IS_EQUAL(_mac1, _mac2) \
+ (!memcmp((_mac1), (_mac2), sizeof(mac_t)))
+
+#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)
+
+#define BNA_TO_POWER_OF_2(x) \
+do { \
+ int _shift = 0; \
+ while ((x) && (x) != 1) { \
+ (x) >>= 1; \
+ _shift++; \
+ } \
+ (x) <<= _shift; \
+} while (0)
+
+#define BNA_TO_POWER_OF_2_HIGH(x) \
+do { \
+ int n = 1; \
+ while (n < (x)) \
+ n <<= 1; \
+ (x) = n; \
+} while (0)
+
+/*
+ * input : _addr-> os dma addr in host endian format,
+ * output : _bna_dma_addr-> pointer to hw dma addr
+ */
+#define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr) \
+do { \
+ u64 tmp_addr = \
+ cpu_to_be64((u64)(_addr)); \
+ (_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \
+ (_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \
+} while (0)
+
+/*
+ * input : _bna_dma_addr-> pointer to hw dma addr
+ * output : _addr-> os dma addr in host endian format
+ */
+#define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr) \
+do { \
+ (_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32) \
+ | ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \
+} while (0)
+
+#define containing_rec(addr, type, field) \
+ ((type *)((unsigned char *)(addr) - \
+ (unsigned char *)(&((type *)0)->field)))
+
+#define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2)
+
+/* TxQ element is 64 bytes */
+#define BNA_TXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 6)
+#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 6)
+
+#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
+{ \
+ unsigned int page_index; /* index within a page */ \
+ void *page_addr; \
+ page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \
+ (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \
+ page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
+ (_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
+}
+
+/* RxQ element is 8 bytes */
+#define BNA_RXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 3)
+#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 3)
+
+#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
+{ \
+ unsigned int page_index; /* index within a page */ \
+ void *page_addr; \
+ page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1); \
+ (_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index); \
+ page_addr = (_qpt_ptr)[((_qe_idx) >> \
+ BNA_RXQ_PAGE_INDEX_MAX_SHIFT)]; \
+ (_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
+}
+
+/* CQ element is 16 bytes */
+#define BNA_CQ_PAGE_INDEX_MAX (PAGE_SIZE >> 4)
+#define BNA_CQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 4)
+
+#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
+{ \
+ unsigned int page_index; /* index within a page */ \
+ void *page_addr; \
+ \
+ page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1); \
+ (_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index); \
+ page_addr = (_qpt_ptr)[((_qe_idx) >> \
+ BNA_CQ_PAGE_INDEX_MAX_SHIFT)]; \
+ (_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
+}
+
+#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base) \
+ (&((_cast *)(_q_base))[(_qe_idx)])
+
+#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))
+
+#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \
+ ((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
+
+#define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \
+ (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
+
+#define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \
+ (((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \
+ ((_q_depth) - 1))
+
+#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \
+ ((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \
+ (_q_depth - 1))
+
+#define BNA_Q_GET_CI(_q_ptr) ((_q_ptr)->q.consumer_index)
+
+#define BNA_Q_GET_PI(_q_ptr) ((_q_ptr)->q.producer_index)
+
+#define BNA_Q_PI_ADD(_q_ptr, _num) \
+ (_q_ptr)->q.producer_index = \
+ (((_q_ptr)->q.producer_index + (_num)) & \
+ ((_q_ptr)->q.q_depth - 1))
+
+#define BNA_Q_CI_ADD(_q_ptr, _num) \
+ (_q_ptr)->q.consumer_index = \
+ (((_q_ptr)->q.consumer_index + (_num)) \
+ & ((_q_ptr)->q.q_depth - 1))
+
+#define BNA_Q_FREE_COUNT(_q_ptr) \
+ (BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
+
+#define BNA_Q_IN_USE_COUNT(_q_ptr) \
+ (BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
+
+#define BNA_LARGE_PKT_SIZE 1000
+
+#define BNA_UPDATE_PKT_CNT(_pkt, _len) \
+do { \
+ if ((_len) > BNA_LARGE_PKT_SIZE) { \
+ (_pkt)->large_pkt_cnt++; \
+ } else { \
+ (_pkt)->small_pkt_cnt++; \
+ } \
+} while (0)
+
+#define call_rxf_stop_cbfn(rxf) \
+do { \
+ if ((rxf)->stop_cbfn) { \
+ void (*cbfn)(struct bna_rx *); \
+ struct bna_rx *cbarg; \
+ cbfn = (rxf)->stop_cbfn; \
+ cbarg = (rxf)->stop_cbarg; \
+ (rxf)->stop_cbfn = NULL; \
+ (rxf)->stop_cbarg = NULL; \
+ cbfn(cbarg); \
+ } \
+} while (0)
+
+#define call_rxf_start_cbfn(rxf) \
+do { \
+ if ((rxf)->start_cbfn) { \
+ void (*cbfn)(struct bna_rx *); \
+ struct bna_rx *cbarg; \
+ cbfn = (rxf)->start_cbfn; \
+ cbarg = (rxf)->start_cbarg; \
+ (rxf)->start_cbfn = NULL; \
+ (rxf)->start_cbarg = NULL; \
+ cbfn(cbarg); \
+ } \
+} while (0)
+
+#define call_rxf_cam_fltr_cbfn(rxf) \
+do { \
+ if ((rxf)->cam_fltr_cbfn) { \
+ void (*cbfn)(struct bnad *, struct bna_rx *); \
+ struct bnad *cbarg; \
+ cbfn = (rxf)->cam_fltr_cbfn; \
+ cbarg = (rxf)->cam_fltr_cbarg; \
+ (rxf)->cam_fltr_cbfn = NULL; \
+ (rxf)->cam_fltr_cbarg = NULL; \
+ cbfn(cbarg, rxf->rx); \
+ } \
+} while (0)
+
+#define call_rxf_pause_cbfn(rxf) \
+do { \
+ if ((rxf)->oper_state_cbfn) { \
+ void (*cbfn)(struct bnad *, struct bna_rx *); \
+ struct bnad *cbarg; \
+ cbfn = (rxf)->oper_state_cbfn; \
+ cbarg = (rxf)->oper_state_cbarg; \
+ (rxf)->oper_state_cbfn = NULL; \
+ (rxf)->oper_state_cbarg = NULL; \
+ cbfn(cbarg, rxf->rx); \
+ } \
+} while (0)
+
+#define call_rxf_resume_cbfn(rxf) call_rxf_pause_cbfn(rxf)
+
+#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
+
+#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))
+
+#define xxx_enable(mode, bitmask, xxx) \
+do { \
+ bitmask |= xxx; \
+ mode |= xxx; \
+} while (0)
+
+#define xxx_disable(mode, bitmask, xxx) \
+do { \
+ bitmask |= xxx; \
+ mode &= ~xxx; \
+} while (0)
+
+#define xxx_inactive(mode, bitmask, xxx) \
+do { \
+ bitmask &= ~xxx; \
+ mode &= ~xxx; \
+} while (0)
+
+#define is_promisc_enable(mode, bitmask) \
+ is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define is_promisc_disable(mode, bitmask) \
+ is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define promisc_enable(mode, bitmask) \
+ xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define promisc_disable(mode, bitmask) \
+ xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define promisc_inactive(mode, bitmask) \
+ xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define is_default_enable(mode, bitmask) \
+ is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define is_default_disable(mode, bitmask) \
+ is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define default_enable(mode, bitmask) \
+ xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define default_disable(mode, bitmask) \
+ xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define default_inactive(mode, bitmask) \
+ xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define is_allmulti_enable(mode, bitmask) \
+ is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define is_allmulti_disable(mode, bitmask) \
+ is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define allmulti_enable(mode, bitmask) \
+ xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define allmulti_disable(mode, bitmask) \
+ xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define allmulti_inactive(mode, bitmask) \
+ xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define GET_RXQS(rxp, q0, q1) do { \
+ switch ((rxp)->type) { \
+ case BNA_RXP_SINGLE: \
+ (q0) = rxp->rxq.single.only; \
+ (q1) = NULL; \
+ break; \
+ case BNA_RXP_SLR: \
+ (q0) = rxp->rxq.slr.large; \
+ (q1) = rxp->rxq.slr.small; \
+ break; \
+ case BNA_RXP_HDS: \
+ (q0) = rxp->rxq.hds.data; \
+ (q1) = rxp->rxq.hds.hdr; \
+ break; \
+ } \
+} while (0)
+
+#define bna_tx_rid_mask(_bna) ((_bna)->tx_mod.rid_mask)
+
+#define bna_rx_rid_mask(_bna) ((_bna)->rx_mod.rid_mask)
+
+#define bna_tx_from_rid(_bna, _rid, _tx) \
+do { \
+ struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod; \
+ struct bna_tx *__tx; \
+ struct list_head *qe; \
+ _tx = NULL; \
+ list_for_each(qe, &__tx_mod->tx_active_q) { \
+ __tx = (struct bna_tx *)qe; \
+ if (__tx->rid == (_rid)) { \
+ (_tx) = __tx; \
+ break; \
+ } \
+ } \
+} while (0)
+
+#define bna_rx_from_rid(_bna, _rid, _rx) \
+do { \
+ struct bna_rx_mod *__rx_mod = &(_bna)->rx_mod; \
+ struct bna_rx *__rx; \
+ struct list_head *qe; \
+ _rx = NULL; \
+ list_for_each(qe, &__rx_mod->rx_active_q) { \
+ __rx = (struct bna_rx *)qe; \
+ if (__rx->rid == (_rid)) { \
+ (_rx) = __rx; \
+ break; \
+ } \
+ } \
+} while (0)
+
+/**
+ *
+ * Inline functions
+ *
+ */
+
+static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
+{
+ struct bna_mac *mac = NULL;
+ struct list_head *qe;
+ list_for_each(qe, q) {
+ if (BNA_MAC_IS_EQUAL(((struct bna_mac *)qe)->addr, addr)) {
+ mac = (struct bna_mac *)qe;
+ break;
+ }
+ }
+ return mac;
+}
+
+#define bna_attr(_bna) (&(_bna)->ioceth.attr)
+
+/**
+ *
+ * Function prototypes
+ *
+ */
+
+/**
+ * BNA
+ */
+
+/* FW response handlers */
+void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr);
+
+/* APIs for BNAD */
+void bna_res_req(struct bna_res_info *res_info);
+void bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info);
+void bna_init(struct bna *bna, struct bnad *bnad,
+ struct bfa_pcidev *pcidev,
+ struct bna_res_info *res_info);
+void bna_mod_init(struct bna *bna, struct bna_res_info *res_info);
+void bna_uninit(struct bna *bna);
+int bna_num_txq_set(struct bna *bna, int num_txq);
+int bna_num_rxp_set(struct bna *bna, int num_rxp);
+void bna_hw_stats_get(struct bna *bna);
+
+/* APIs for RxF */
+struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
+void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
+ struct bna_mac *mac);
+struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod);
+void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod,
+ struct bna_mac *mac);
+struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
+void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
+ struct bna_mcam_handle *handle);
+
+/**
+ * MBOX
+ */
+
+/* API for BNAD */
+void bna_mbox_handler(struct bna *bna, u32 intr_status);
+
+/**
+ * ETHPORT
+ */
+
+/* Callbacks for RX */
+void bna_ethport_cb_rx_started(struct bna_ethport *ethport);
+void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport);
+
+/**
+ * TX MODULE AND TX
+ */
+/* FW response handelrs */
+void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx,
+ struct bfi_msgq_mhdr *msghdr);
+void bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx,
+ struct bfi_msgq_mhdr *msghdr);
+void bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod);
+
+/* APIs for BNA */
+void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
+ struct bna_res_info *res_info);
+void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod);
+
+/* APIs for ENET */
+void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
+void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
+void bna_tx_mod_fail(struct bna_tx_mod *tx_mod);
+
+/* APIs for BNAD */
+void bna_tx_res_req(int num_txq, int txq_depth,
+ struct bna_res_info *res_info);
+struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad,
+ struct bna_tx_config *tx_cfg,
+ const struct bna_tx_event_cbfn *tx_cbfn,
+ struct bna_res_info *res_info, void *priv);
+void bna_tx_destroy(struct bna_tx *tx);
+void bna_tx_enable(struct bna_tx *tx);
+void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
+ void (*cbfn)(void *, struct bna_tx *));
+void bna_tx_cleanup_complete(struct bna_tx *tx);
+void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
+
+/**
+ * RX MODULE, RX, RXF
+ */
+
+/* FW response handlers */
+void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx,
+ struct bfi_msgq_mhdr *msghdr);
+void bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx,
+ struct bfi_msgq_mhdr *msghdr);
+void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr);
+void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
+ struct bfi_msgq_mhdr *msghdr);
+
+/* APIs for BNA */
+void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
+ struct bna_res_info *res_info);
+void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod);
+
+/* APIs for ENET */
+void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
+void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
+void bna_rx_mod_fail(struct bna_rx_mod *rx_mod);
+
+/* APIs for BNAD */
+void bna_rx_res_req(struct bna_rx_config *rx_config,
+ struct bna_res_info *res_info);
+struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad,
+ struct bna_rx_config *rx_cfg,
+ const struct bna_rx_event_cbfn *rx_cbfn,
+ struct bna_res_info *res_info, void *priv);
+void bna_rx_destroy(struct bna_rx *rx);
+void bna_rx_enable(struct bna_rx *rx);
+void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
+ void (*cbfn)(void *, struct bna_rx *));
+void bna_rx_cleanup_complete(struct bna_rx *rx);
+void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
+void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
+void bna_rx_dim_update(struct bna_ccb *ccb);
+enum bna_cb_status
+bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
+enum bna_cb_status
+bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
+enum bna_cb_status
+bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
+enum bna_cb_status
+bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
+enum bna_cb_status
+bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
+enum bna_cb_status
+bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
+ enum bna_rxmode bitmask,
+ void (*cbfn)(struct bnad *, struct bna_rx *));
+void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
+void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
+void bna_rx_vlanfilter_enable(struct bna_rx *rx);
+/**
+ * ENET
+ */
+
+/* API for RX */
+int bna_enet_mtu_get(struct bna_enet *enet);
+
+/* Callbacks for TX, RX */
+void bna_enet_cb_tx_stopped(struct bna_enet *enet);
+void bna_enet_cb_rx_stopped(struct bna_enet *enet);
+
+/* API for BNAD */
+void bna_enet_enable(struct bna_enet *enet);
+void bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
+ void (*cbfn)(void *));
+void bna_enet_pause_config(struct bna_enet *enet,
+ struct bna_pause_config *pause_config,
+ void (*cbfn)(struct bnad *));
+void bna_enet_mtu_set(struct bna_enet *enet, int mtu,
+ void (*cbfn)(struct bnad *));
+void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac);
+
+/**
+ * IOCETH
+ */
+
+/* APIs for BNAD */
+void bna_ioceth_enable(struct bna_ioceth *ioceth);
+void bna_ioceth_disable(struct bna_ioceth *ioceth,
+ enum bna_cleanup_type type);
+
+/**
+ * BNAD
+ */
+
+/* Callbacks for ENET */
+void bnad_cb_ethport_link_status(struct bnad *bnad,
+ enum bna_link_status status);
+
+/* Callbacks for IOCETH */
+void bnad_cb_ioceth_ready(struct bnad *bnad);
+void bnad_cb_ioceth_failed(struct bnad *bnad);
+void bnad_cb_ioceth_disabled(struct bnad *bnad);
+void bnad_cb_mbox_intr_enable(struct bnad *bnad);
+void bnad_cb_mbox_intr_disable(struct bnad *bnad);
+
+/* Callbacks for BNA */
+void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
+ struct bna_stats *stats);
+
+#endif /* __BNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
new file mode 100644
index 000000000000..26f5c5abfd1f
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -0,0 +1,2144 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#include "bna.h"
+
+static inline int
+ethport_can_be_up(struct bna_ethport *ethport)
+{
+ int ready = 0;
+ if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
+ ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
+ (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
+ (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
+ else
+ ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
+ (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
+ !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
+ return ready;
+}
+
+#define ethport_is_up ethport_can_be_up
+
+enum bna_ethport_event {
+ ETHPORT_E_START = 1,
+ ETHPORT_E_STOP = 2,
+ ETHPORT_E_FAIL = 3,
+ ETHPORT_E_UP = 4,
+ ETHPORT_E_DOWN = 5,
+ ETHPORT_E_FWRESP_UP_OK = 6,
+ ETHPORT_E_FWRESP_DOWN = 7,
+ ETHPORT_E_FWRESP_UP_FAIL = 8,
+};
+
+enum bna_enet_event {
+ ENET_E_START = 1,
+ ENET_E_STOP = 2,
+ ENET_E_FAIL = 3,
+ ENET_E_PAUSE_CFG = 4,
+ ENET_E_MTU_CFG = 5,
+ ENET_E_FWRESP_PAUSE = 6,
+ ENET_E_CHLD_STOPPED = 7,
+};
+
+enum bna_ioceth_event {
+ IOCETH_E_ENABLE = 1,
+ IOCETH_E_DISABLE = 2,
+ IOCETH_E_IOC_RESET = 3,
+ IOCETH_E_IOC_FAILED = 4,
+ IOCETH_E_IOC_READY = 5,
+ IOCETH_E_ENET_ATTR_RESP = 6,
+ IOCETH_E_ENET_STOPPED = 7,
+ IOCETH_E_IOC_DISABLED = 8,
+};
+
+#define bna_stats_copy(_name, _type) \
+do { \
+ count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \
+ stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \
+ stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \
+ for (i = 0; i < count; i++) \
+ stats_dst[i] = be64_to_cpu(stats_src[i]); \
+} while (0) \
+
+/*
+ * FW response handlers
+ */
+
+static void
+bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
+
+ if (ethport_can_be_up(ethport))
+ bfa_fsm_send_event(ethport, ETHPORT_E_UP);
+}
+
+static void
+bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ int ethport_up = ethport_is_up(ethport);
+
+ ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
+
+ if (ethport_up)
+ bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
+}
+
+static void
+bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ struct bfi_enet_enable_req *admin_req =
+ &ethport->bfi_enet_cmd.admin_req;
+ struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
+
+ switch (admin_req->enable) {
+ case BNA_STATUS_T_ENABLED:
+ if (rsp->error == BFI_ENET_CMD_OK)
+ bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
+ else {
+ ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
+ bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
+ }
+ break;
+
+ case BNA_STATUS_T_DISABLED:
+ bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
+ ethport->link_status = BNA_LINK_DOWN;
+ ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
+ break;
+ }
+}
+
+static void
+bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ struct bfi_enet_diag_lb_req *diag_lb_req =
+ &ethport->bfi_enet_cmd.lpbk_req;
+ struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
+
+ switch (diag_lb_req->enable) {
+ case BNA_STATUS_T_ENABLED:
+ if (rsp->error == BFI_ENET_CMD_OK)
+ bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
+ else {
+ ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
+ bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
+ }
+ break;
+
+ case BNA_STATUS_T_DISABLED:
+ bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
+ break;
+ }
+}
+
+static void
+bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
+{
+ bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
+}
+
+static void
+bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ struct bfi_enet_attr_rsp *rsp = (struct bfi_enet_attr_rsp *)msghdr;
+
+ /**
+ * Store only if not set earlier, since BNAD can override the HW
+ * attributes
+ */
+ if (!ioceth->attr.fw_query_complete) {
+ ioceth->attr.num_txq = ntohl(rsp->max_cfg);
+ ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
+ ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
+ ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
+ ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
+ ioceth->attr.fw_query_complete = true;
+ }
+
+ bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
+}
+
+static void
+bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
+{
+ struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
+ u64 *stats_src;
+ u64 *stats_dst;
+ u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
+ u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
+ int count;
+ int i;
+
+ bna_stats_copy(mac, mac);
+ bna_stats_copy(bpc, bpc);
+ bna_stats_copy(rad, rad);
+ bna_stats_copy(rlb, rad);
+ bna_stats_copy(fc_rx, fc_rx);
+ bna_stats_copy(fc_tx, fc_tx);
+
+ stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
+
+ /* Copy Rxf stats to SW area, scatter them while copying */
+ for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
+ stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
+ memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
+ if (rx_enet_mask & ((u32)(1 << i))) {
+ int k;
+ count = sizeof(struct bfi_enet_stats_rxf) /
+ sizeof(u64);
+ for (k = 0; k < count; k++) {
+ stats_dst[k] = be64_to_cpu(*stats_src);
+ stats_src++;
+ }
+ }
+ }
+
+ /* Copy Txf stats to SW area, scatter them while copying */
+ for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
+ stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
+ memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
+ if (tx_enet_mask & ((u32)(1 << i))) {
+ int k;
+ count = sizeof(struct bfi_enet_stats_txf) /
+ sizeof(u64);
+ for (k = 0; k < count; k++) {
+ stats_dst[k] = be64_to_cpu(*stats_src);
+ stats_src++;
+ }
+ }
+ }
+
+ bna->stats_mod.stats_get_busy = false;
+ bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
+}
+
+static void
+bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ ethport->link_status = BNA_LINK_UP;
+
+ /* Dispatch events */
+ ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
+}
+
+static void
+bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ ethport->link_status = BNA_LINK_DOWN;
+
+ /* Dispatch events */
+ ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
+}
+
+static void
+bna_err_handler(struct bna *bna, u32 intr_status)
+{
+ if (BNA_IS_HALT_INTR(bna, intr_status))
+ bna_halt_clear(bna);
+
+ bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
+}
+
+void
+bna_mbox_handler(struct bna *bna, u32 intr_status)
+{
+ if (BNA_IS_ERR_INTR(bna, intr_status)) {
+ bna_err_handler(bna, intr_status);
+ return;
+ }
+ if (BNA_IS_MBOX_INTR(bna, intr_status))
+ bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
+}
+
+static void
+bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
+{
+ struct bna *bna = (struct bna *)arg;
+ struct bna_tx *tx;
+ struct bna_rx *rx;
+
+ switch (msghdr->msg_id) {
+ case BFI_ENET_I2H_RX_CFG_SET_RSP:
+ bna_rx_from_rid(bna, msghdr->enet_id, rx);
+ if (rx)
+ bna_bfi_rx_enet_start_rsp(rx, msghdr);
+ break;
+
+ case BFI_ENET_I2H_RX_CFG_CLR_RSP:
+ bna_rx_from_rid(bna, msghdr->enet_id, rx);
+ if (rx)
+ bna_bfi_rx_enet_stop_rsp(rx, msghdr);
+ break;
+
+ case BFI_ENET_I2H_RIT_CFG_RSP:
+ case BFI_ENET_I2H_RSS_CFG_RSP:
+ case BFI_ENET_I2H_RSS_ENABLE_RSP:
+ case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
+ case BFI_ENET_I2H_RX_DEFAULT_RSP:
+ case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
+ case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
+ case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
+ case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
+ case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
+ case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
+ case BFI_ENET_I2H_RX_VLAN_SET_RSP:
+ case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
+ bna_rx_from_rid(bna, msghdr->enet_id, rx);
+ if (rx)
+ bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
+ break;
+
+ case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
+ bna_rx_from_rid(bna, msghdr->enet_id, rx);
+ if (rx)
+ bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
+ break;
+
+ case BFI_ENET_I2H_TX_CFG_SET_RSP:
+ bna_tx_from_rid(bna, msghdr->enet_id, tx);
+ if (tx)
+ bna_bfi_tx_enet_start_rsp(tx, msghdr);
+ break;
+
+ case BFI_ENET_I2H_TX_CFG_CLR_RSP:
+ bna_tx_from_rid(bna, msghdr->enet_id, tx);
+ if (tx)
+ bna_bfi_tx_enet_stop_rsp(tx, msghdr);
+ break;
+
+ case BFI_ENET_I2H_PORT_ADMIN_RSP:
+ bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
+ break;
+
+ case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
+ bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
+ break;
+
+ case BFI_ENET_I2H_SET_PAUSE_RSP:
+ bna_bfi_pause_set_rsp(&bna->enet, msghdr);
+ break;
+
+ case BFI_ENET_I2H_GET_ATTR_RSP:
+ bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
+ break;
+
+ case BFI_ENET_I2H_STATS_GET_RSP:
+ bna_bfi_stats_get_rsp(bna, msghdr);
+ break;
+
+ case BFI_ENET_I2H_STATS_CLR_RSP:
+ /* No-op */
+ break;
+
+ case BFI_ENET_I2H_LINK_UP_AEN:
+ bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
+ break;
+
+ case BFI_ENET_I2H_LINK_DOWN_AEN:
+ bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
+ break;
+
+ case BFI_ENET_I2H_PORT_ENABLE_AEN:
+ bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
+ break;
+
+ case BFI_ENET_I2H_PORT_DISABLE_AEN:
+ bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
+ break;
+
+ case BFI_ENET_I2H_BW_UPDATE_AEN:
+ bna_bfi_bw_update_aen(&bna->tx_mod);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * ETHPORT
+ */
+#define call_ethport_stop_cbfn(_ethport) \
+do { \
+ if ((_ethport)->stop_cbfn) { \
+ void (*cbfn)(struct bna_enet *); \
+ cbfn = (_ethport)->stop_cbfn; \
+ (_ethport)->stop_cbfn = NULL; \
+ cbfn(&(_ethport)->bna->enet); \
+ } \
+} while (0)
+
+#define call_ethport_adminup_cbfn(ethport, status) \
+do { \
+ if ((ethport)->adminup_cbfn) { \
+ void (*cbfn)(struct bnad *, enum bna_cb_status); \
+ cbfn = (ethport)->adminup_cbfn; \
+ (ethport)->adminup_cbfn = NULL; \
+ cbfn((ethport)->bna->bnad, status); \
+ } \
+} while (0)
+
+static void
+bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
+{
+ struct bfi_enet_enable_req *admin_up_req =
+ &ethport->bfi_enet_cmd.admin_req;
+
+ bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
+ admin_up_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
+ admin_up_req->enable = BNA_STATUS_T_ENABLED;
+
+ bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
+ bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
+}
+
+static void
+bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
+{
+ struct bfi_enet_enable_req *admin_down_req =
+ &ethport->bfi_enet_cmd.admin_req;
+
+ bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
+ admin_down_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
+ admin_down_req->enable = BNA_STATUS_T_DISABLED;
+
+ bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
+ bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
+}
+
+static void
+bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
+{
+ struct bfi_enet_diag_lb_req *lpbk_up_req =
+ &ethport->bfi_enet_cmd.lpbk_req;
+
+ bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
+ lpbk_up_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
+ lpbk_up_req->mode = (ethport->bna->enet.type ==
+ BNA_ENET_T_LOOPBACK_INTERNAL) ?
+ BFI_ENET_DIAG_LB_OPMODE_EXT :
+ BFI_ENET_DIAG_LB_OPMODE_CBL;
+ lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
+
+ bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
+ bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
+}
+
+static void
+bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
+{
+ struct bfi_enet_diag_lb_req *lpbk_down_req =
+ &ethport->bfi_enet_cmd.lpbk_req;
+
+ bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
+ lpbk_down_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
+ lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
+
+ bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
+ bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
+}
+
+static void
+bna_bfi_ethport_up(struct bna_ethport *ethport)
+{
+ if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
+ bna_bfi_ethport_admin_up(ethport);
+ else
+ bna_bfi_ethport_lpbk_up(ethport);
+}
+
+static void
+bna_bfi_ethport_down(struct bna_ethport *ethport)
+{
+ if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
+ bna_bfi_ethport_admin_down(ethport);
+ else
+ bna_bfi_ethport_lpbk_down(ethport);
+}
+
+bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
+ enum bna_ethport_event);
+bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
+ enum bna_ethport_event);
+bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
+ enum bna_ethport_event);
+bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
+ enum bna_ethport_event);
+bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
+ enum bna_ethport_event);
+bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
+ enum bna_ethport_event);
+
+static void
+bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
+{
+ call_ethport_stop_cbfn(ethport);
+}
+
+static void
+bna_ethport_sm_stopped(struct bna_ethport *ethport,
+ enum bna_ethport_event event)
+{
+ switch (event) {
+ case ETHPORT_E_START:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_down);
+ break;
+
+ case ETHPORT_E_STOP:
+ call_ethport_stop_cbfn(ethport);
+ break;
+
+ case ETHPORT_E_FAIL:
+ /* No-op */
+ break;
+
+ case ETHPORT_E_DOWN:
+ /* This event is received due to Rx objects failing */
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ethport_sm_down_entry(struct bna_ethport *ethport)
+{
+}
+
+static void
+bna_ethport_sm_down(struct bna_ethport *ethport,
+ enum bna_ethport_event event)
+{
+ switch (event) {
+ case ETHPORT_E_STOP:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+ break;
+
+ case ETHPORT_E_FAIL:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+ break;
+
+ case ETHPORT_E_UP:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
+ bna_bfi_ethport_up(ethport);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
+{
+}
+
+static void
+bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
+ enum bna_ethport_event event)
+{
+ switch (event) {
+ case ETHPORT_E_STOP:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
+ break;
+
+ case ETHPORT_E_FAIL:
+ call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+ break;
+
+ case ETHPORT_E_DOWN:
+ call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
+ bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
+ break;
+
+ case ETHPORT_E_FWRESP_UP_OK:
+ call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
+ bfa_fsm_set_state(ethport, bna_ethport_sm_up);
+ break;
+
+ case ETHPORT_E_FWRESP_UP_FAIL:
+ call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
+ bfa_fsm_set_state(ethport, bna_ethport_sm_down);
+ break;
+
+ case ETHPORT_E_FWRESP_DOWN:
+ /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
+ bna_bfi_ethport_up(ethport);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
+{
+ /**
+ * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
+ * mbox due to up_resp_wait -> down_resp_wait transition on event
+ * ETHPORT_E_DOWN
+ */
+}
+
+static void
+bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
+ enum bna_ethport_event event)
+{
+ switch (event) {
+ case ETHPORT_E_STOP:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
+ break;
+
+ case ETHPORT_E_FAIL:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+ break;
+
+ case ETHPORT_E_UP:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
+ break;
+
+ case ETHPORT_E_FWRESP_UP_OK:
+ /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
+ bna_bfi_ethport_down(ethport);
+ break;
+
+ case ETHPORT_E_FWRESP_UP_FAIL:
+ case ETHPORT_E_FWRESP_DOWN:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_down);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ethport_sm_up_entry(struct bna_ethport *ethport)
+{
+}
+
+static void
+bna_ethport_sm_up(struct bna_ethport *ethport,
+ enum bna_ethport_event event)
+{
+ switch (event) {
+ case ETHPORT_E_STOP:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
+ bna_bfi_ethport_down(ethport);
+ break;
+
+ case ETHPORT_E_FAIL:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+ break;
+
+ case ETHPORT_E_DOWN:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
+ bna_bfi_ethport_down(ethport);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
+{
+}
+
+static void
+bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
+ enum bna_ethport_event event)
+{
+ switch (event) {
+ case ETHPORT_E_FAIL:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+ break;
+
+ case ETHPORT_E_DOWN:
+ /**
+ * This event is received due to Rx objects stopping in
+ * parallel to ethport
+ */
+ /* No-op */
+ break;
+
+ case ETHPORT_E_FWRESP_UP_OK:
+ /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
+ bna_bfi_ethport_down(ethport);
+ break;
+
+ case ETHPORT_E_FWRESP_UP_FAIL:
+ case ETHPORT_E_FWRESP_DOWN:
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
+{
+ ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
+ ethport->bna = bna;
+
+ ethport->link_status = BNA_LINK_DOWN;
+ ethport->link_cbfn = bnad_cb_ethport_link_status;
+
+ ethport->rx_started_count = 0;
+
+ ethport->stop_cbfn = NULL;
+ ethport->adminup_cbfn = NULL;
+
+ bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
+}
+
+static void
+bna_ethport_uninit(struct bna_ethport *ethport)
+{
+ ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
+ ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
+
+ ethport->bna = NULL;
+}
+
+static void
+bna_ethport_start(struct bna_ethport *ethport)
+{
+ bfa_fsm_send_event(ethport, ETHPORT_E_START);
+}
+
+static void
+bna_enet_cb_ethport_stopped(struct bna_enet *enet)
+{
+ bfa_wc_down(&enet->chld_stop_wc);
+}
+
+static void
+bna_ethport_stop(struct bna_ethport *ethport)
+{
+ ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
+ bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
+}
+
+static void
+bna_ethport_fail(struct bna_ethport *ethport)
+{
+ /* Reset the physical port status to enabled */
+ ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
+
+ if (ethport->link_status != BNA_LINK_DOWN) {
+ ethport->link_status = BNA_LINK_DOWN;
+ ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
+ }
+ bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
+}
+
+/* Should be called only when ethport is disabled */
+void
+bna_ethport_cb_rx_started(struct bna_ethport *ethport)
+{
+ ethport->rx_started_count++;
+
+ if (ethport->rx_started_count == 1) {
+ ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
+
+ if (ethport_can_be_up(ethport))
+ bfa_fsm_send_event(ethport, ETHPORT_E_UP);
+ }
+}
+
+void
+bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
+{
+ int ethport_up = ethport_is_up(ethport);
+
+ ethport->rx_started_count--;
+
+ if (ethport->rx_started_count == 0) {
+ ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
+
+ if (ethport_up)
+ bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
+ }
+}
+
+/**
+ * ENET
+ */
+#define bna_enet_chld_start(enet) \
+do { \
+ enum bna_tx_type tx_type = \
+ ((enet)->type == BNA_ENET_T_REGULAR) ? \
+ BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
+ enum bna_rx_type rx_type = \
+ ((enet)->type == BNA_ENET_T_REGULAR) ? \
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
+ bna_ethport_start(&(enet)->bna->ethport); \
+ bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \
+ bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
+} while (0)
+
+#define bna_enet_chld_stop(enet) \
+do { \
+ enum bna_tx_type tx_type = \
+ ((enet)->type == BNA_ENET_T_REGULAR) ? \
+ BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
+ enum bna_rx_type rx_type = \
+ ((enet)->type == BNA_ENET_T_REGULAR) ? \
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
+ bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
+ bfa_wc_up(&(enet)->chld_stop_wc); \
+ bna_ethport_stop(&(enet)->bna->ethport); \
+ bfa_wc_up(&(enet)->chld_stop_wc); \
+ bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \
+ bfa_wc_up(&(enet)->chld_stop_wc); \
+ bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
+ bfa_wc_wait(&(enet)->chld_stop_wc); \
+} while (0)
+
+#define bna_enet_chld_fail(enet) \
+do { \
+ bna_ethport_fail(&(enet)->bna->ethport); \
+ bna_tx_mod_fail(&(enet)->bna->tx_mod); \
+ bna_rx_mod_fail(&(enet)->bna->rx_mod); \
+} while (0)
+
+#define bna_enet_rx_start(enet) \
+do { \
+ enum bna_rx_type rx_type = \
+ ((enet)->type == BNA_ENET_T_REGULAR) ? \
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
+ bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
+} while (0)
+
+#define bna_enet_rx_stop(enet) \
+do { \
+ enum bna_rx_type rx_type = \
+ ((enet)->type == BNA_ENET_T_REGULAR) ? \
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
+ bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
+ bfa_wc_up(&(enet)->chld_stop_wc); \
+ bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
+ bfa_wc_wait(&(enet)->chld_stop_wc); \
+} while (0)
+
+#define call_enet_stop_cbfn(enet) \
+do { \
+ if ((enet)->stop_cbfn) { \
+ void (*cbfn)(void *); \
+ void *cbarg; \
+ cbfn = (enet)->stop_cbfn; \
+ cbarg = (enet)->stop_cbarg; \
+ (enet)->stop_cbfn = NULL; \
+ (enet)->stop_cbarg = NULL; \
+ cbfn(cbarg); \
+ } \
+} while (0)
+
+#define call_enet_pause_cbfn(enet) \
+do { \
+ if ((enet)->pause_cbfn) { \
+ void (*cbfn)(struct bnad *); \
+ cbfn = (enet)->pause_cbfn; \
+ (enet)->pause_cbfn = NULL; \
+ cbfn((enet)->bna->bnad); \
+ } \
+} while (0)
+
+#define call_enet_mtu_cbfn(enet) \
+do { \
+ if ((enet)->mtu_cbfn) { \
+ void (*cbfn)(struct bnad *); \
+ cbfn = (enet)->mtu_cbfn; \
+ (enet)->mtu_cbfn = NULL; \
+ cbfn((enet)->bna->bnad); \
+ } \
+} while (0)
+
+static void bna_enet_cb_chld_stopped(void *arg);
+static void bna_bfi_pause_set(struct bna_enet *enet);
+
+bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
+ enum bna_enet_event);
+bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
+ enum bna_enet_event);
+bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
+ enum bna_enet_event);
+bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
+ enum bna_enet_event);
+bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
+ enum bna_enet_event);
+bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
+ enum bna_enet_event);
+bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
+ enum bna_enet_event);
+
+static void
+bna_enet_sm_stopped_entry(struct bna_enet *enet)
+{
+ call_enet_pause_cbfn(enet);
+ call_enet_mtu_cbfn(enet);
+ call_enet_stop_cbfn(enet);
+}
+
+static void
+bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
+{
+ switch (event) {
+ case ENET_E_START:
+ bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
+ break;
+
+ case ENET_E_STOP:
+ call_enet_stop_cbfn(enet);
+ break;
+
+ case ENET_E_FAIL:
+ /* No-op */
+ break;
+
+ case ENET_E_PAUSE_CFG:
+ call_enet_pause_cbfn(enet);
+ break;
+
+ case ENET_E_MTU_CFG:
+ call_enet_mtu_cbfn(enet);
+ break;
+
+ case ENET_E_CHLD_STOPPED:
+ /**
+ * This event is received due to Ethport, Tx and Rx objects
+ * failing
+ */
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
+{
+ bna_bfi_pause_set(enet);
+}
+
+static void
+bna_enet_sm_pause_init_wait(struct bna_enet *enet,
+ enum bna_enet_event event)
+{
+ switch (event) {
+ case ENET_E_STOP:
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+ bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
+ break;
+
+ case ENET_E_FAIL:
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+ break;
+
+ case ENET_E_PAUSE_CFG:
+ enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
+ break;
+
+ case ENET_E_MTU_CFG:
+ /* No-op */
+ break;
+
+ case ENET_E_FWRESP_PAUSE:
+ if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+ bna_bfi_pause_set(enet);
+ } else {
+ bfa_fsm_set_state(enet, bna_enet_sm_started);
+ bna_enet_chld_start(enet);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
+{
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+}
+
+static void
+bna_enet_sm_last_resp_wait(struct bna_enet *enet,
+ enum bna_enet_event event)
+{
+ switch (event) {
+ case ENET_E_FAIL:
+ case ENET_E_FWRESP_PAUSE:
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_enet_sm_started_entry(struct bna_enet *enet)
+{
+ /**
+ * NOTE: Do not call bna_enet_chld_start() here, since it will be
+ * inadvertently called during cfg_wait->started transition as well
+ */
+ call_enet_pause_cbfn(enet);
+ call_enet_mtu_cbfn(enet);
+}
+
+static void
+bna_enet_sm_started(struct bna_enet *enet,
+ enum bna_enet_event event)
+{
+ switch (event) {
+ case ENET_E_STOP:
+ bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
+ break;
+
+ case ENET_E_FAIL:
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+ bna_enet_chld_fail(enet);
+ break;
+
+ case ENET_E_PAUSE_CFG:
+ bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
+ bna_bfi_pause_set(enet);
+ break;
+
+ case ENET_E_MTU_CFG:
+ bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
+ bna_enet_rx_stop(enet);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
+{
+}
+
+static void
+bna_enet_sm_cfg_wait(struct bna_enet *enet,
+ enum bna_enet_event event)
+{
+ switch (event) {
+ case ENET_E_STOP:
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+ enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
+ bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
+ break;
+
+ case ENET_E_FAIL:
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+ enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+ bna_enet_chld_fail(enet);
+ break;
+
+ case ENET_E_PAUSE_CFG:
+ enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
+ break;
+
+ case ENET_E_MTU_CFG:
+ enet->flags |= BNA_ENET_F_MTU_CHANGED;
+ break;
+
+ case ENET_E_CHLD_STOPPED:
+ bna_enet_rx_start(enet);
+ /* Fall through */
+ case ENET_E_FWRESP_PAUSE:
+ if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+ bna_bfi_pause_set(enet);
+ } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
+ enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
+ bna_enet_rx_stop(enet);
+ } else {
+ bfa_fsm_set_state(enet, bna_enet_sm_started);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
+{
+ enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
+ enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
+}
+
+static void
+bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
+ enum bna_enet_event event)
+{
+ switch (event) {
+ case ENET_E_FAIL:
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+ bna_enet_chld_fail(enet);
+ break;
+
+ case ENET_E_FWRESP_PAUSE:
+ case ENET_E_CHLD_STOPPED:
+ bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
+{
+ bna_enet_chld_stop(enet);
+}
+
+static void
+bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
+ enum bna_enet_event event)
+{
+ switch (event) {
+ case ENET_E_FAIL:
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+ bna_enet_chld_fail(enet);
+ break;
+
+ case ENET_E_CHLD_STOPPED:
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_bfi_pause_set(struct bna_enet *enet)
+{
+ struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
+
+ bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
+ pause_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
+ pause_req->tx_pause = enet->pause_config.tx_pause;
+ pause_req->rx_pause = enet->pause_config.rx_pause;
+
+ bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
+ bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
+}
+
+static void
+bna_enet_cb_chld_stopped(void *arg)
+{
+ struct bna_enet *enet = (struct bna_enet *)arg;
+
+ bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
+}
+
+static void
+bna_enet_init(struct bna_enet *enet, struct bna *bna)
+{
+ enet->bna = bna;
+ enet->flags = 0;
+ enet->mtu = 0;
+ enet->type = BNA_ENET_T_REGULAR;
+
+ enet->stop_cbfn = NULL;
+ enet->stop_cbarg = NULL;
+
+ enet->pause_cbfn = NULL;
+
+ enet->mtu_cbfn = NULL;
+
+ bfa_fsm_set_state(enet, bna_enet_sm_stopped);
+}
+
+static void
+bna_enet_uninit(struct bna_enet *enet)
+{
+ enet->flags = 0;
+
+ enet->bna = NULL;
+}
+
+static void
+bna_enet_start(struct bna_enet *enet)
+{
+ enet->flags |= BNA_ENET_F_IOCETH_READY;
+ if (enet->flags & BNA_ENET_F_ENABLED)
+ bfa_fsm_send_event(enet, ENET_E_START);
+}
+
+static void
+bna_ioceth_cb_enet_stopped(void *arg)
+{
+ struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
+
+ bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
+}
+
+static void
+bna_enet_stop(struct bna_enet *enet)
+{
+ enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
+ enet->stop_cbarg = &enet->bna->ioceth;
+
+ enet->flags &= ~BNA_ENET_F_IOCETH_READY;
+ bfa_fsm_send_event(enet, ENET_E_STOP);
+}
+
+static void
+bna_enet_fail(struct bna_enet *enet)
+{
+ enet->flags &= ~BNA_ENET_F_IOCETH_READY;
+ bfa_fsm_send_event(enet, ENET_E_FAIL);
+}
+
+void
+bna_enet_cb_tx_stopped(struct bna_enet *enet)
+{
+ bfa_wc_down(&enet->chld_stop_wc);
+}
+
+void
+bna_enet_cb_rx_stopped(struct bna_enet *enet)
+{
+ bfa_wc_down(&enet->chld_stop_wc);
+}
+
+int
+bna_enet_mtu_get(struct bna_enet *enet)
+{
+ return enet->mtu;
+}
+
+void
+bna_enet_enable(struct bna_enet *enet)
+{
+ if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
+ return;
+
+ enet->flags |= BNA_ENET_F_ENABLED;
+
+ if (enet->flags & BNA_ENET_F_IOCETH_READY)
+ bfa_fsm_send_event(enet, ENET_E_START);
+}
+
+void
+bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
+ void (*cbfn)(void *))
+{
+ if (type == BNA_SOFT_CLEANUP) {
+ (*cbfn)(enet->bna->bnad);
+ return;
+ }
+
+ enet->stop_cbfn = cbfn;
+ enet->stop_cbarg = enet->bna->bnad;
+
+ enet->flags &= ~BNA_ENET_F_ENABLED;
+
+ bfa_fsm_send_event(enet, ENET_E_STOP);
+}
+
+void
+bna_enet_pause_config(struct bna_enet *enet,
+ struct bna_pause_config *pause_config,
+ void (*cbfn)(struct bnad *))
+{
+ enet->pause_config = *pause_config;
+
+ enet->pause_cbfn = cbfn;
+
+ bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
+}
+
+void
+bna_enet_mtu_set(struct bna_enet *enet, int mtu,
+ void (*cbfn)(struct bnad *))
+{
+ enet->mtu = mtu;
+
+ enet->mtu_cbfn = cbfn;
+
+ bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
+}
+
+void
+bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
+{
+ *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
+}
+
+/**
+ * IOCETH
+ */
+#define enable_mbox_intr(_ioceth) \
+do { \
+ u32 intr_status; \
+ bna_intr_status_get((_ioceth)->bna, intr_status); \
+ bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \
+ bna_mbox_intr_enable((_ioceth)->bna); \
+} while (0)
+
+#define disable_mbox_intr(_ioceth) \
+do { \
+ bna_mbox_intr_disable((_ioceth)->bna); \
+ bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \
+} while (0)
+
+#define call_ioceth_stop_cbfn(_ioceth) \
+do { \
+ if ((_ioceth)->stop_cbfn) { \
+ void (*cbfn)(struct bnad *); \
+ struct bnad *cbarg; \
+ cbfn = (_ioceth)->stop_cbfn; \
+ cbarg = (_ioceth)->stop_cbarg; \
+ (_ioceth)->stop_cbfn = NULL; \
+ (_ioceth)->stop_cbarg = NULL; \
+ cbfn(cbarg); \
+ } \
+} while (0)
+
+#define bna_stats_mod_uninit(_stats_mod) \
+do { \
+} while (0)
+
+#define bna_stats_mod_start(_stats_mod) \
+do { \
+ (_stats_mod)->ioc_ready = true; \
+} while (0)
+
+#define bna_stats_mod_stop(_stats_mod) \
+do { \
+ (_stats_mod)->ioc_ready = false; \
+} while (0)
+
+#define bna_stats_mod_fail(_stats_mod) \
+do { \
+ (_stats_mod)->ioc_ready = false; \
+ (_stats_mod)->stats_get_busy = false; \
+ (_stats_mod)->stats_clr_busy = false; \
+} while (0)
+
+static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
+
+bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
+ enum bna_ioceth_event);
+bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
+ enum bna_ioceth_event);
+bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
+ enum bna_ioceth_event);
+bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
+ enum bna_ioceth_event);
+bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
+ enum bna_ioceth_event);
+bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
+ enum bna_ioceth_event);
+bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
+ enum bna_ioceth_event);
+bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
+ enum bna_ioceth_event);
+
+static void
+bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
+{
+ call_ioceth_stop_cbfn(ioceth);
+}
+
+static void
+bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
+ enum bna_ioceth_event event)
+{
+ switch (event) {
+ case IOCETH_E_ENABLE:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
+ bfa_nw_ioc_enable(&ioceth->ioc);
+ break;
+
+ case IOCETH_E_DISABLE:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
+ break;
+
+ case IOCETH_E_IOC_RESET:
+ enable_mbox_intr(ioceth);
+ break;
+
+ case IOCETH_E_IOC_FAILED:
+ disable_mbox_intr(ioceth);
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
+{
+ /**
+ * Do not call bfa_nw_ioc_enable() here. It must be called in the
+ * previous state due to failed -> ioc_ready_wait transition.
+ */
+}
+
+static void
+bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
+ enum bna_ioceth_event event)
+{
+ switch (event) {
+ case IOCETH_E_DISABLE:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
+ bfa_nw_ioc_disable(&ioceth->ioc);
+ break;
+
+ case IOCETH_E_IOC_RESET:
+ enable_mbox_intr(ioceth);
+ break;
+
+ case IOCETH_E_IOC_FAILED:
+ disable_mbox_intr(ioceth);
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
+ break;
+
+ case IOCETH_E_IOC_READY:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
+{
+ bna_bfi_attr_get(ioceth);
+}
+
+static void
+bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
+ enum bna_ioceth_event event)
+{
+ switch (event) {
+ case IOCETH_E_DISABLE:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
+ break;
+
+ case IOCETH_E_IOC_FAILED:
+ disable_mbox_intr(ioceth);
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
+ break;
+
+ case IOCETH_E_ENET_ATTR_RESP:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
+{
+ bna_enet_start(&ioceth->bna->enet);
+ bna_stats_mod_start(&ioceth->bna->stats_mod);
+ bnad_cb_ioceth_ready(ioceth->bna->bnad);
+}
+
+static void
+bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
+{
+ switch (event) {
+ case IOCETH_E_DISABLE:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
+ break;
+
+ case IOCETH_E_IOC_FAILED:
+ disable_mbox_intr(ioceth);
+ bna_enet_fail(&ioceth->bna->enet);
+ bna_stats_mod_fail(&ioceth->bna->stats_mod);
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
+{
+}
+
+static void
+bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
+ enum bna_ioceth_event event)
+{
+ switch (event) {
+ case IOCETH_E_IOC_FAILED:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
+ disable_mbox_intr(ioceth);
+ bfa_nw_ioc_disable(&ioceth->ioc);
+ break;
+
+ case IOCETH_E_ENET_ATTR_RESP:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
+ bfa_nw_ioc_disable(&ioceth->ioc);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
+{
+ bna_stats_mod_stop(&ioceth->bna->stats_mod);
+ bna_enet_stop(&ioceth->bna->enet);
+}
+
+static void
+bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
+ enum bna_ioceth_event event)
+{
+ switch (event) {
+ case IOCETH_E_IOC_FAILED:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
+ disable_mbox_intr(ioceth);
+ bna_enet_fail(&ioceth->bna->enet);
+ bna_stats_mod_fail(&ioceth->bna->stats_mod);
+ bfa_nw_ioc_disable(&ioceth->ioc);
+ break;
+
+ case IOCETH_E_ENET_STOPPED:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
+ bfa_nw_ioc_disable(&ioceth->ioc);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
+{
+}
+
+static void
+bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
+ enum bna_ioceth_event event)
+{
+ switch (event) {
+ case IOCETH_E_IOC_DISABLED:
+ disable_mbox_intr(ioceth);
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
+ break;
+
+ case IOCETH_E_ENET_STOPPED:
+ /* This event is received due to enet failing */
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
+{
+ bnad_cb_ioceth_failed(ioceth->bna->bnad);
+}
+
+static void
+bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
+ enum bna_ioceth_event event)
+{
+ switch (event) {
+ case IOCETH_E_DISABLE:
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
+ bfa_nw_ioc_disable(&ioceth->ioc);
+ break;
+
+ case IOCETH_E_IOC_RESET:
+ enable_mbox_intr(ioceth);
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
+ break;
+
+ case IOCETH_E_IOC_FAILED:
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_bfi_attr_get(struct bna_ioceth *ioceth)
+{
+ struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
+
+ bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
+ attr_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
+ bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_attr_req), &attr_req->mh);
+ bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
+}
+
+/* IOC callback functions */
+
+static void
+bna_cb_ioceth_enable(void *arg, enum bfa_status error)
+{
+ struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
+
+ if (error)
+ bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
+ else
+ bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
+}
+
+static void
+bna_cb_ioceth_disable(void *arg)
+{
+ struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
+
+ bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
+}
+
+static void
+bna_cb_ioceth_hbfail(void *arg)
+{
+ struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
+
+ bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
+}
+
+static void
+bna_cb_ioceth_reset(void *arg)
+{
+ struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
+
+ bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
+}
+
+static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
+ bna_cb_ioceth_enable,
+ bna_cb_ioceth_disable,
+ bna_cb_ioceth_hbfail,
+ bna_cb_ioceth_reset
+};
+
+static void bna_attr_init(struct bna_ioceth *ioceth)
+{
+ ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
+ ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
+ ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
+ ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
+ ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
+ ioceth->attr.fw_query_complete = false;
+}
+
+static void
+bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ u64 dma;
+ u8 *kva;
+
+ ioceth->bna = bna;
+
+ /**
+ * Attach IOC and claim:
+ * 1. DMA memory for IOC attributes
+ * 2. Kernel memory for FW trace
+ */
+ bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
+ bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
+
+ BNA_GET_DMA_ADDR(
+ &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
+ kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
+ bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
+
+ kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
+
+ /**
+ * Attach common modules (Diag, SFP, CEE, Port) and claim respective
+ * DMA memory.
+ */
+ BNA_GET_DMA_ADDR(
+ &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
+ kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
+ bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
+ bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
+ kva += bfa_nw_cee_meminfo();
+ dma += bfa_nw_cee_meminfo();
+
+ bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
+ bfa_msgq_memclaim(&bna->msgq, kva, dma);
+ bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
+ kva += bfa_msgq_meminfo();
+ dma += bfa_msgq_meminfo();
+
+ ioceth->stop_cbfn = NULL;
+ ioceth->stop_cbarg = NULL;
+
+ bna_attr_init(ioceth);
+
+ bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
+}
+
+static void
+bna_ioceth_uninit(struct bna_ioceth *ioceth)
+{
+ bfa_nw_ioc_detach(&ioceth->ioc);
+
+ ioceth->bna = NULL;
+}
+
+void
+bna_ioceth_enable(struct bna_ioceth *ioceth)
+{
+ if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
+ bnad_cb_ioceth_ready(ioceth->bna->bnad);
+ return;
+ }
+
+ if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
+ bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
+}
+
+void
+bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
+{
+ if (type == BNA_SOFT_CLEANUP) {
+ bnad_cb_ioceth_disabled(ioceth->bna->bnad);
+ return;
+ }
+
+ ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
+ ioceth->stop_cbarg = ioceth->bna->bnad;
+
+ bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
+}
+
+static void
+bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int i;
+
+ ucam_mod->ucmac = (struct bna_mac *)
+ res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ INIT_LIST_HEAD(&ucam_mod->free_q);
+ for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
+ bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
+ list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
+ }
+
+ ucam_mod->bna = bna;
+}
+
+static void
+bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
+{
+ struct list_head *qe;
+ int i = 0;
+
+ list_for_each(qe, &ucam_mod->free_q)
+ i++;
+
+ ucam_mod->bna = NULL;
+}
+
+static void
+bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int i;
+
+ mcam_mod->mcmac = (struct bna_mac *)
+ res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ INIT_LIST_HEAD(&mcam_mod->free_q);
+ for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
+ bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
+ list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
+ }
+
+ mcam_mod->mchandle = (struct bna_mcam_handle *)
+ res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ INIT_LIST_HEAD(&mcam_mod->free_handle_q);
+ for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
+ bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
+ list_add_tail(&mcam_mod->mchandle[i].qe,
+ &mcam_mod->free_handle_q);
+ }
+
+ mcam_mod->bna = bna;
+}
+
+static void
+bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
+{
+ struct list_head *qe;
+ int i;
+
+ i = 0;
+ list_for_each(qe, &mcam_mod->free_q) i++;
+
+ i = 0;
+ list_for_each(qe, &mcam_mod->free_handle_q) i++;
+
+ mcam_mod->bna = NULL;
+}
+
+static void
+bna_bfi_stats_get(struct bna *bna)
+{
+ struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
+
+ bna->stats_mod.stats_get_busy = true;
+
+ bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
+ stats_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
+ stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
+ stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
+ stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
+ stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
+ stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
+
+ bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_stats_req), &stats_req->mh);
+ bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
+}
+
+void
+bna_res_req(struct bna_res_info *res_info)
+{
+ /* DMA memory for COMMON_MODULE */
+ res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
+ res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
+ (bfa_nw_cee_meminfo() +
+ bfa_msgq_meminfo()), PAGE_SIZE);
+
+ /* DMA memory for retrieving IOC attributes */
+ res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
+ res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
+ ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
+
+ /* Virtual memory for retreiving fw_trc */
+ res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
+
+ /* DMA memory for retreiving stats */
+ res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
+ ALIGN(sizeof(struct bfi_enet_stats),
+ PAGE_SIZE);
+}
+
+void
+bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
+{
+ struct bna_attr *attr = &bna->ioceth.attr;
+
+ /* Virtual memory for Tx objects - stored by Tx module */
+ res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
+ attr->num_txq * sizeof(struct bna_tx);
+
+ /* Virtual memory for TxQ - stored by Tx module */
+ res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
+ attr->num_txq * sizeof(struct bna_txq);
+
+ /* Virtual memory for Rx objects - stored by Rx module */
+ res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
+ attr->num_rxp * sizeof(struct bna_rx);
+
+ /* Virtual memory for RxPath - stored by Rx module */
+ res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
+ attr->num_rxp * sizeof(struct bna_rxp);
+
+ /* Virtual memory for RxQ - stored by Rx module */
+ res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
+ (attr->num_rxp * 2) * sizeof(struct bna_rxq);
+
+ /* Virtual memory for Unicast MAC address - stored by ucam module */
+ res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
+ attr->num_ucmac * sizeof(struct bna_mac);
+
+ /* Virtual memory for Multicast MAC address - stored by mcam module */
+ res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
+ attr->num_mcmac * sizeof(struct bna_mac);
+
+ /* Virtual memory for Multicast handle - stored by mcam module */
+ res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
+ attr->num_mcmac * sizeof(struct bna_mcam_handle);
+}
+
+void
+bna_init(struct bna *bna, struct bnad *bnad,
+ struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
+{
+ bna->bnad = bnad;
+ bna->pcidev = *pcidev;
+
+ bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
+ bna->stats.hw_stats_dma.msb =
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
+ bna->stats.hw_stats_dma.lsb =
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
+
+ bna_reg_addr_init(bna, &bna->pcidev);
+
+ /* Also initializes diag, cee, sfp, phy_port, msgq */
+ bna_ioceth_init(&bna->ioceth, bna, res_info);
+
+ bna_enet_init(&bna->enet, bna);
+ bna_ethport_init(&bna->ethport, bna);
+}
+
+void
+bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
+{
+ bna_tx_mod_init(&bna->tx_mod, bna, res_info);
+
+ bna_rx_mod_init(&bna->rx_mod, bna, res_info);
+
+ bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
+
+ bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
+
+ bna->default_mode_rid = BFI_INVALID_RID;
+ bna->promisc_rid = BFI_INVALID_RID;
+
+ bna->mod_flags |= BNA_MOD_F_INIT_DONE;
+}
+
+void
+bna_uninit(struct bna *bna)
+{
+ if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
+ bna_mcam_mod_uninit(&bna->mcam_mod);
+ bna_ucam_mod_uninit(&bna->ucam_mod);
+ bna_rx_mod_uninit(&bna->rx_mod);
+ bna_tx_mod_uninit(&bna->tx_mod);
+ bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
+ }
+
+ bna_stats_mod_uninit(&bna->stats_mod);
+ bna_ethport_uninit(&bna->ethport);
+ bna_enet_uninit(&bna->enet);
+
+ bna_ioceth_uninit(&bna->ioceth);
+
+ bna->bnad = NULL;
+}
+
+int
+bna_num_txq_set(struct bna *bna, int num_txq)
+{
+ if (bna->ioceth.attr.fw_query_complete &&
+ (num_txq <= bna->ioceth.attr.num_txq)) {
+ bna->ioceth.attr.num_txq = num_txq;
+ return BNA_CB_SUCCESS;
+ }
+
+ return BNA_CB_FAIL;
+}
+
+int
+bna_num_rxp_set(struct bna *bna, int num_rxp)
+{
+ if (bna->ioceth.attr.fw_query_complete &&
+ (num_rxp <= bna->ioceth.attr.num_rxp)) {
+ bna->ioceth.attr.num_rxp = num_rxp;
+ return BNA_CB_SUCCESS;
+ }
+
+ return BNA_CB_FAIL;
+}
+
+struct bna_mac *
+bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
+{
+ struct list_head *qe;
+
+ if (list_empty(&ucam_mod->free_q))
+ return NULL;
+
+ bfa_q_deq(&ucam_mod->free_q, &qe);
+
+ return (struct bna_mac *)qe;
+}
+
+void
+bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
+{
+ list_add_tail(&mac->qe, &ucam_mod->free_q);
+}
+
+struct bna_mac *
+bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
+{
+ struct list_head *qe;
+
+ if (list_empty(&mcam_mod->free_q))
+ return NULL;
+
+ bfa_q_deq(&mcam_mod->free_q, &qe);
+
+ return (struct bna_mac *)qe;
+}
+
+void
+bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
+{
+ list_add_tail(&mac->qe, &mcam_mod->free_q);
+}
+
+struct bna_mcam_handle *
+bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
+{
+ struct list_head *qe;
+
+ if (list_empty(&mcam_mod->free_handle_q))
+ return NULL;
+
+ bfa_q_deq(&mcam_mod->free_handle_q, &qe);
+
+ return (struct bna_mcam_handle *)qe;
+}
+
+void
+bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
+ struct bna_mcam_handle *handle)
+{
+ list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
+}
+
+void
+bna_hw_stats_get(struct bna *bna)
+{
+ if (!bna->stats_mod.ioc_ready) {
+ bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
+ return;
+ }
+ if (bna->stats_mod.stats_get_busy) {
+ bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
+ return;
+ }
+
+ bna_bfi_stats_get(bna);
+}
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
new file mode 100644
index 000000000000..4c6aab2a9534
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -0,0 +1,422 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ * File for interrupt macros and functions
+ */
+
+#ifndef __BNA_HW_DEFS_H__
+#define __BNA_HW_DEFS_H__
+
+#include "bfi_reg.h"
+
+/**
+ *
+ * SW imposed limits
+ *
+ */
+#define BFI_ENET_DEF_TXQ 1
+#define BFI_ENET_DEF_RXP 1
+#define BFI_ENET_DEF_UCAM 1
+#define BFI_ENET_DEF_RITSZ 1
+
+#define BFI_ENET_MAX_MCAM 256
+
+#define BFI_INVALID_RID -1
+
+#define BFI_IBIDX_SIZE 4
+
+#define BFI_VLAN_WORD_SHIFT 5 /* 32 bits */
+#define BFI_VLAN_WORD_MASK 0x1F
+#define BFI_VLAN_BLOCK_SHIFT 9 /* 512 bits */
+#define BFI_VLAN_BMASK_ALL 0xFF
+
+#define BFI_COALESCING_TIMER_UNIT 5 /* 5us */
+#define BFI_MAX_COALESCING_TIMEO 0xFF /* in 5us units */
+#define BFI_MAX_INTERPKT_COUNT 0xFF
+#define BFI_MAX_INTERPKT_TIMEO 0xF /* in 0.5us units */
+#define BFI_TX_COALESCING_TIMEO 20 /* 20 * 5 = 100us */
+#define BFI_TX_INTERPKT_COUNT 32
+#define BFI_RX_COALESCING_TIMEO 12 /* 12 * 5 = 60us */
+#define BFI_RX_INTERPKT_COUNT 6 /* Pkt Cnt = 6 */
+#define BFI_RX_INTERPKT_TIMEO 3 /* 3 * 0.5 = 1.5us */
+
+#define BFI_TXQ_WI_SIZE 64 /* bytes */
+#define BFI_RXQ_WI_SIZE 8 /* bytes */
+#define BFI_CQ_WI_SIZE 16 /* bytes */
+#define BFI_TX_MAX_WRR_QUOTA 0xFFF
+
+#define BFI_TX_MAX_VECTORS_PER_WI 4
+#define BFI_TX_MAX_VECTORS_PER_PKT 0xFF
+#define BFI_TX_MAX_DATA_PER_VECTOR 0xFFFF
+#define BFI_TX_MAX_DATA_PER_PKT 0xFFFFFF
+
+/* Small Q buffer size */
+#define BFI_SMALL_RXBUF_SIZE 128
+
+#define BFI_TX_MAX_PRIO 8
+#define BFI_TX_PRIO_MAP_ALL 0xFF
+
+/*
+ *
+ * Register definitions and macros
+ *
+ */
+
+#define BNA_PCI_REG_CT_ADDRSZ (0x40000)
+
+#define ct_reg_addr_init(_bna, _pcidev) \
+{ \
+ struct bna_reg_offset reg_offset[] = \
+ {{HOSTFN0_INT_STATUS, HOSTFN0_INT_MSK}, \
+ {HOSTFN1_INT_STATUS, HOSTFN1_INT_MSK}, \
+ {HOSTFN2_INT_STATUS, HOSTFN2_INT_MSK}, \
+ {HOSTFN3_INT_STATUS, HOSTFN3_INT_MSK} }; \
+ \
+ (_bna)->regs.fn_int_status = (_pcidev)->pci_bar_kva + \
+ reg_offset[(_pcidev)->pci_func].fn_int_status;\
+ (_bna)->regs.fn_int_mask = (_pcidev)->pci_bar_kva + \
+ reg_offset[(_pcidev)->pci_func].fn_int_mask;\
+}
+
+#define ct_bit_defn_init(_bna, _pcidev) \
+{ \
+ (_bna)->bits.mbox_status_bits = (__HFN_INT_MBOX_LPU0 | \
+ __HFN_INT_MBOX_LPU1); \
+ (_bna)->bits.mbox_mask_bits = (__HFN_INT_MBOX_LPU0 | \
+ __HFN_INT_MBOX_LPU1); \
+ (_bna)->bits.error_status_bits = (__HFN_INT_ERR_MASK); \
+ (_bna)->bits.error_mask_bits = (__HFN_INT_ERR_MASK); \
+ (_bna)->bits.halt_status_bits = __HFN_INT_LL_HALT; \
+ (_bna)->bits.halt_mask_bits = __HFN_INT_LL_HALT; \
+}
+
+#define ct2_reg_addr_init(_bna, _pcidev) \
+{ \
+ (_bna)->regs.fn_int_status = (_pcidev)->pci_bar_kva + \
+ CT2_HOSTFN_INT_STATUS; \
+ (_bna)->regs.fn_int_mask = (_pcidev)->pci_bar_kva + \
+ CT2_HOSTFN_INTR_MASK; \
+}
+
+#define ct2_bit_defn_init(_bna, _pcidev) \
+{ \
+ (_bna)->bits.mbox_status_bits = (__HFN_INT_MBOX_LPU0_CT2 | \
+ __HFN_INT_MBOX_LPU1_CT2); \
+ (_bna)->bits.mbox_mask_bits = (__HFN_INT_MBOX_LPU0_CT2 | \
+ __HFN_INT_MBOX_LPU1_CT2); \
+ (_bna)->bits.error_status_bits = (__HFN_INT_ERR_MASK_CT2); \
+ (_bna)->bits.error_mask_bits = (__HFN_INT_ERR_MASK_CT2); \
+ (_bna)->bits.halt_status_bits = __HFN_INT_CPQ_HALT_CT2; \
+ (_bna)->bits.halt_mask_bits = __HFN_INT_CPQ_HALT_CT2; \
+}
+
+#define bna_reg_addr_init(_bna, _pcidev) \
+{ \
+ switch ((_pcidev)->device_id) { \
+ case PCI_DEVICE_ID_BROCADE_CT: \
+ ct_reg_addr_init((_bna), (_pcidev)); \
+ ct_bit_defn_init((_bna), (_pcidev)); \
+ break; \
+ case BFA_PCI_DEVICE_ID_CT2: \
+ ct2_reg_addr_init((_bna), (_pcidev)); \
+ ct2_bit_defn_init((_bna), (_pcidev)); \
+ break; \
+ } \
+}
+
+#define bna_port_id_get(_bna) ((_bna)->ioceth.ioc.port_id)
+/**
+ *
+ * Interrupt related bits, flags and macros
+ *
+ */
+
+#define IB_STATUS_BITS 0x0000ffff
+
+#define BNA_IS_MBOX_INTR(_bna, _intr_status) \
+ ((_intr_status) & (_bna)->bits.mbox_status_bits)
+
+#define BNA_IS_HALT_INTR(_bna, _intr_status) \
+ ((_intr_status) & (_bna)->bits.halt_status_bits)
+
+#define BNA_IS_ERR_INTR(_bna, _intr_status) \
+ ((_intr_status) & (_bna)->bits.error_status_bits)
+
+#define BNA_IS_MBOX_ERR_INTR(_bna, _intr_status) \
+ (BNA_IS_MBOX_INTR(_bna, _intr_status) | \
+ BNA_IS_ERR_INTR(_bna, _intr_status))
+
+#define BNA_IS_INTX_DATA_INTR(_intr_status) \
+ ((_intr_status) & IB_STATUS_BITS)
+
+#define bna_halt_clear(_bna) \
+do { \
+ u32 init_halt; \
+ init_halt = readl((_bna)->ioceth.ioc.ioc_regs.ll_halt); \
+ init_halt &= ~__FW_INIT_HALT_P; \
+ writel(init_halt, (_bna)->ioceth.ioc.ioc_regs.ll_halt); \
+ init_halt = readl((_bna)->ioceth.ioc.ioc_regs.ll_halt); \
+} while (0)
+
+#define bna_intx_disable(_bna, _cur_mask) \
+{ \
+ (_cur_mask) = readl((_bna)->regs.fn_int_mask); \
+ writel(0xffffffff, (_bna)->regs.fn_int_mask); \
+}
+
+#define bna_intx_enable(bna, new_mask) \
+ writel((new_mask), (bna)->regs.fn_int_mask)
+#define bna_mbox_intr_disable(bna) \
+do { \
+ u32 mask; \
+ mask = readl((bna)->regs.fn_int_mask); \
+ writel((mask | (bna)->bits.mbox_mask_bits | \
+ (bna)->bits.error_mask_bits), (bna)->regs.fn_int_mask); \
+ mask = readl((bna)->regs.fn_int_mask); \
+} while (0)
+
+#define bna_mbox_intr_enable(bna) \
+do { \
+ u32 mask; \
+ mask = readl((bna)->regs.fn_int_mask); \
+ writel((mask & ~((bna)->bits.mbox_mask_bits | \
+ (bna)->bits.error_mask_bits)), (bna)->regs.fn_int_mask);\
+ mask = readl((bna)->regs.fn_int_mask); \
+} while (0)
+
+#define bna_intr_status_get(_bna, _status) \
+{ \
+ (_status) = readl((_bna)->regs.fn_int_status); \
+ if (_status) { \
+ writel(((_status) & ~(_bna)->bits.mbox_status_bits), \
+ (_bna)->regs.fn_int_status); \
+ } \
+}
+
+/*
+ * MAX ACK EVENTS : No. of acks that can be accumulated in driver,
+ * before acking to h/w. The no. of bits is 16 in the doorbell register,
+ * however we keep this limited to 15 bits.
+ * This is because around the edge of 64K boundary (16 bits), one
+ * single poll can make the accumulated ACK counter cross the 64K boundary,
+ * causing problems, when we try to ack with a value greater than 64K.
+ * 15 bits (32K) should be large enough to accumulate, anyways, and the max.
+ * acked events to h/w can be (32K + max poll weight) (currently 64).
+ */
+#define BNA_IB_MAX_ACK_EVENTS (1 << 15)
+
+/* These macros build the data portion of the TxQ/RxQ doorbell */
+#define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi))
+#define BNA_DOORBELL_Q_STOP (0x40000000)
+
+/* These macros build the data portion of the IB doorbell */
+#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
+ (0x80000000 | ((_timeout) << 16) | (_events))
+#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
+
+/* Set the coalescing timer for the given ib */
+#define bna_ib_coalescing_timer_set(_i_dbell, _cls_timer) \
+ ((_i_dbell)->doorbell_ack = BNA_DOORBELL_IB_INT_ACK((_cls_timer), 0));
+
+/* Acks 'events' # of events for a given ib while disabling interrupts */
+#define bna_ib_ack_disable_irq(_i_dbell, _events) \
+ (writel(BNA_DOORBELL_IB_INT_ACK(0, (_events)), \
+ (_i_dbell)->doorbell_addr));
+
+/* Acks 'events' # of events for a given ib */
+#define bna_ib_ack(_i_dbell, _events) \
+ (writel(((_i_dbell)->doorbell_ack | (_events)), \
+ (_i_dbell)->doorbell_addr));
+
+#define bna_ib_start(_bna, _ib, _is_regular) \
+{ \
+ u32 intx_mask; \
+ struct bna_ib *ib = _ib; \
+ if ((ib->intr_type == BNA_INTR_T_INTX)) { \
+ bna_intx_disable((_bna), intx_mask); \
+ intx_mask &= ~(ib->intr_vector); \
+ bna_intx_enable((_bna), intx_mask); \
+ } \
+ bna_ib_coalescing_timer_set(&ib->door_bell, \
+ ib->coalescing_timeo); \
+ if (_is_regular) \
+ bna_ib_ack(&ib->door_bell, 0); \
+}
+
+#define bna_ib_stop(_bna, _ib) \
+{ \
+ u32 intx_mask; \
+ struct bna_ib *ib = _ib; \
+ writel(BNA_DOORBELL_IB_INT_DISABLE, \
+ ib->door_bell.doorbell_addr); \
+ if (ib->intr_type == BNA_INTR_T_INTX) { \
+ bna_intx_disable((_bna), intx_mask); \
+ intx_mask |= ib->intr_vector; \
+ bna_intx_enable((_bna), intx_mask); \
+ } \
+}
+
+#define bna_txq_prod_indx_doorbell(_tcb) \
+ (writel(BNA_DOORBELL_Q_PRD_IDX((_tcb)->producer_index), \
+ (_tcb)->q_dbell));
+
+#define bna_rxq_prod_indx_doorbell(_rcb) \
+ (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \
+ (_rcb)->q_dbell));
+
+/**
+ *
+ * TxQ, RxQ, CQ related bits, offsets, macros
+ *
+ */
+
+/* TxQ Entry Opcodes */
+#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
+#define BNA_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */
+#define BNA_TXQ_WI_EXTENSION (0x104) /* Extension WI */
+
+/* TxQ Entry Control Flags */
+#define BNA_TXQ_WI_CF_FCOE_CRC (1 << 8)
+#define BNA_TXQ_WI_CF_IPID_MODE (1 << 5)
+#define BNA_TXQ_WI_CF_INS_PRIO (1 << 4)
+#define BNA_TXQ_WI_CF_INS_VLAN (1 << 3)
+#define BNA_TXQ_WI_CF_UDP_CKSUM (1 << 2)
+#define BNA_TXQ_WI_CF_TCP_CKSUM (1 << 1)
+#define BNA_TXQ_WI_CF_IP_CKSUM (1 << 0)
+
+#define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
+ (((_hdr_size) << 10) | ((_offset) & 0x3FF))
+
+/*
+ * Completion Q defines
+ */
+/* CQ Entry Flags */
+#define BNA_CQ_EF_MAC_ERROR (1 << 0)
+#define BNA_CQ_EF_FCS_ERROR (1 << 1)
+#define BNA_CQ_EF_TOO_LONG (1 << 2)
+#define BNA_CQ_EF_FC_CRC_OK (1 << 3)
+
+#define BNA_CQ_EF_RSVD1 (1 << 4)
+#define BNA_CQ_EF_L4_CKSUM_OK (1 << 5)
+#define BNA_CQ_EF_L3_CKSUM_OK (1 << 6)
+#define BNA_CQ_EF_HDS_HEADER (1 << 7)
+
+#define BNA_CQ_EF_UDP (1 << 8)
+#define BNA_CQ_EF_TCP (1 << 9)
+#define BNA_CQ_EF_IP_OPTIONS (1 << 10)
+#define BNA_CQ_EF_IPV6 (1 << 11)
+
+#define BNA_CQ_EF_IPV4 (1 << 12)
+#define BNA_CQ_EF_VLAN (1 << 13)
+#define BNA_CQ_EF_RSS (1 << 14)
+#define BNA_CQ_EF_RSVD2 (1 << 15)
+
+#define BNA_CQ_EF_MCAST_MATCH (1 << 16)
+#define BNA_CQ_EF_MCAST (1 << 17)
+#define BNA_CQ_EF_BCAST (1 << 18)
+#define BNA_CQ_EF_REMOTE (1 << 19)
+
+#define BNA_CQ_EF_LOCAL (1 << 20)
+
+/**
+ *
+ * Data structures
+ *
+ */
+
+struct bna_reg_offset {
+ u32 fn_int_status;
+ u32 fn_int_mask;
+};
+
+struct bna_bit_defn {
+ u32 mbox_status_bits;
+ u32 mbox_mask_bits;
+ u32 error_status_bits;
+ u32 error_mask_bits;
+ u32 halt_status_bits;
+ u32 halt_mask_bits;
+};
+
+struct bna_reg {
+ void __iomem *fn_int_status;
+ void __iomem *fn_int_mask;
+};
+
+/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
+struct bna_dma_addr {
+ u32 msb;
+ u32 lsb;
+};
+
+struct bna_txq_wi_vector {
+ u16 reserved;
+ u16 length; /* Only 14 LSB are valid */
+ struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */
+};
+
+/**
+ * TxQ Entry Structure
+ *
+ * BEWARE: Load values into this structure with correct endianess.
+ */
+struct bna_txq_entry {
+ union {
+ struct {
+ u8 reserved;
+ u8 num_vectors; /* number of vectors present */
+ u16 opcode; /* Either */
+ /* BNA_TXQ_WI_SEND or */
+ /* BNA_TXQ_WI_SEND_LSO */
+ u16 flags; /* OR of all the flags */
+ u16 l4_hdr_size_n_offset;
+ u16 vlan_tag;
+ u16 lso_mss; /* Only 14 LSB are valid */
+ u32 frame_length; /* Only 24 LSB are valid */
+ } wi;
+
+ struct {
+ u16 reserved;
+ u16 opcode; /* Must be */
+ /* BNA_TXQ_WI_EXTENSION */
+ u32 reserved2[3]; /* Place holder for */
+ /* removed vector (12 bytes) */
+ } wi_ext;
+ } hdr;
+ struct bna_txq_wi_vector vector[4];
+};
+
+/* RxQ Entry Structure */
+struct bna_rxq_entry { /* Rx-Buffer */
+ struct bna_dma_addr host_addr; /* Rx-Buffer DMA address */
+};
+
+/* CQ Entry Structure */
+struct bna_cq_entry {
+ u32 flags;
+ u16 vlan_tag;
+ u16 length;
+ u32 rss_hash;
+ u8 valid;
+ u8 reserved1;
+ u8 reserved2;
+ u8 rxq_id;
+};
+
+#endif /* __BNA_HW_DEFS_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
new file mode 100644
index 000000000000..276fcb589f4b
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -0,0 +1,3798 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#include "bna.h"
+#include "bfi.h"
+
+/**
+ * IB
+ */
+static void
+bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
+{
+ ib->coalescing_timeo = coalescing_timeo;
+ ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
+ (u32)ib->coalescing_timeo, 0);
+}
+
+/**
+ * RXF
+ */
+
+#define bna_rxf_vlan_cfg_soft_reset(rxf) \
+do { \
+ (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
+ (rxf)->vlan_strip_pending = true; \
+} while (0)
+
+#define bna_rxf_rss_cfg_soft_reset(rxf) \
+do { \
+ if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
+ (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
+ BNA_RSS_F_CFG_PENDING | \
+ BNA_RSS_F_STATUS_PENDING); \
+} while (0)
+
+static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
+static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
+static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
+static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
+static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
+static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
+static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
+static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
+ enum bna_cleanup_type cleanup);
+static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
+ enum bna_cleanup_type cleanup);
+static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
+ enum bna_cleanup_type cleanup);
+
+bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
+ enum bna_rxf_event);
+
+static void
+bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
+{
+ call_rxf_stop_cbfn(rxf);
+}
+
+static void
+bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_START:
+ if (rxf->flags & BNA_RXF_F_PAUSED) {
+ bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
+ call_rxf_start_cbfn(rxf);
+ } else
+ bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
+ break;
+
+ case RXF_E_STOP:
+ call_rxf_stop_cbfn(rxf);
+ break;
+
+ case RXF_E_FAIL:
+ /* No-op */
+ break;
+
+ case RXF_E_CONFIG:
+ call_rxf_cam_fltr_cbfn(rxf);
+ break;
+
+ case RXF_E_PAUSE:
+ rxf->flags |= BNA_RXF_F_PAUSED;
+ call_rxf_pause_cbfn(rxf);
+ break;
+
+ case RXF_E_RESUME:
+ rxf->flags &= ~BNA_RXF_F_PAUSED;
+ call_rxf_resume_cbfn(rxf);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
+{
+ call_rxf_pause_cbfn(rxf);
+}
+
+static void
+bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_STOP:
+ case RXF_E_FAIL:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_CONFIG:
+ call_rxf_cam_fltr_cbfn(rxf);
+ break;
+
+ case RXF_E_RESUME:
+ rxf->flags &= ~BNA_RXF_F_PAUSED;
+ bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
+{
+ if (!bna_rxf_cfg_apply(rxf)) {
+ /* No more pending config updates */
+ bfa_fsm_set_state(rxf, bna_rxf_sm_started);
+ }
+}
+
+static void
+bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_STOP:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
+ break;
+
+ case RXF_E_FAIL:
+ bna_rxf_cfg_reset(rxf);
+ call_rxf_start_cbfn(rxf);
+ call_rxf_cam_fltr_cbfn(rxf);
+ call_rxf_resume_cbfn(rxf);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_CONFIG:
+ /* No-op */
+ break;
+
+ case RXF_E_PAUSE:
+ rxf->flags |= BNA_RXF_F_PAUSED;
+ call_rxf_start_cbfn(rxf);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
+ break;
+
+ case RXF_E_FW_RESP:
+ if (!bna_rxf_cfg_apply(rxf)) {
+ /* No more pending config updates */
+ bfa_fsm_set_state(rxf, bna_rxf_sm_started);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_rxf_sm_started_entry(struct bna_rxf *rxf)
+{
+ call_rxf_start_cbfn(rxf);
+ call_rxf_cam_fltr_cbfn(rxf);
+ call_rxf_resume_cbfn(rxf);
+}
+
+static void
+bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_STOP:
+ case RXF_E_FAIL:
+ bna_rxf_cfg_reset(rxf);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_CONFIG:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
+ break;
+
+ case RXF_E_PAUSE:
+ rxf->flags |= BNA_RXF_F_PAUSED;
+ if (!bna_rxf_fltr_clear(rxf))
+ bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
+ else
+ bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
+{
+}
+
+static void
+bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_FAIL:
+ bna_rxf_cfg_reset(rxf);
+ call_rxf_pause_cbfn(rxf);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_FW_RESP:
+ if (!bna_rxf_fltr_clear(rxf)) {
+ /* No more pending CAM entries to clear */
+ bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
+{
+}
+
+static void
+bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_FAIL:
+ case RXF_E_FW_RESP:
+ bna_rxf_cfg_reset(rxf);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
+ enum bfi_enet_h2i_msgs req_type)
+{
+ struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
+ memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
+ bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_ucast_req), &req->mh);
+ bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
+}
+
+static void
+bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
+{
+ struct bfi_enet_mcast_add_req *req =
+ &rxf->bfi_enet_cmd.mcast_add_req;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
+ 0, rxf->rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
+ memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
+ bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_mcast_add_req), &req->mh);
+ bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
+}
+
+static void
+bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
+{
+ struct bfi_enet_mcast_del_req *req =
+ &rxf->bfi_enet_cmd.mcast_del_req;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
+ 0, rxf->rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
+ req->handle = htons(handle);
+ bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_mcast_del_req), &req->mh);
+ bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
+}
+
+static void
+bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
+{
+ struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
+ req->enable = status;
+ bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_enable_req), &req->mh);
+ bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
+}
+
+static void
+bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
+{
+ struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
+ req->enable = status;
+ bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_enable_req), &req->mh);
+ bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
+}
+
+static void
+bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
+{
+ struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
+ int i;
+ int j;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
+ req->block_idx = block_idx;
+ for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
+ j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
+ if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
+ req->bit_mask[i] =
+ htonl(rxf->vlan_filter_table[j]);
+ else
+ req->bit_mask[i] = 0xFFFFFFFF;
+ }
+ bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
+ bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
+}
+
+static void
+bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
+{
+ struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
+ req->enable = rxf->vlan_strip_status;
+ bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_enable_req), &req->mh);
+ bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
+}
+
+static void
+bna_bfi_rit_cfg(struct bna_rxf *rxf)
+{
+ struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
+ req->size = htons(rxf->rit_size);
+ memcpy(&req->table[0], rxf->rit, rxf->rit_size);
+ bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_rit_req), &req->mh);
+ bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
+}
+
+static void
+bna_bfi_rss_cfg(struct bna_rxf *rxf)
+{
+ struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
+ int i;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
+ req->cfg.type = rxf->rss_cfg.hash_type;
+ req->cfg.mask = rxf->rss_cfg.hash_mask;
+ for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
+ req->cfg.key[i] =
+ htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
+ bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
+ bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
+}
+
+static void
+bna_bfi_rss_enable(struct bna_rxf *rxf)
+{
+ struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
+ req->enable = rxf->rss_status;
+ bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_enable_req), &req->mh);
+ bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
+}
+
+/* This function gets the multicast MAC that has already been added to CAM */
+static struct bna_mac *
+bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
+{
+ struct bna_mac *mac;
+ struct list_head *qe;
+
+ list_for_each(qe, &rxf->mcast_active_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
+ return mac;
+ }
+
+ list_for_each(qe, &rxf->mcast_pending_del_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
+ return mac;
+ }
+
+ return NULL;
+}
+
+static struct bna_mcam_handle *
+bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
+{
+ struct bna_mcam_handle *mchandle;
+ struct list_head *qe;
+
+ list_for_each(qe, &rxf->mcast_handle_q) {
+ mchandle = (struct bna_mcam_handle *)qe;
+ if (mchandle->handle == handle)
+ return mchandle;
+ }
+
+ return NULL;
+}
+
+static void
+bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
+{
+ struct bna_mac *mcmac;
+ struct bna_mcam_handle *mchandle;
+
+ mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
+ mchandle = bna_rxf_mchandle_get(rxf, handle);
+ if (mchandle == NULL) {
+ mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
+ mchandle->handle = handle;
+ mchandle->refcnt = 0;
+ list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
+ }
+ mchandle->refcnt++;
+ mcmac->handle = mchandle;
+}
+
+static int
+bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
+ enum bna_cleanup_type cleanup)
+{
+ struct bna_mcam_handle *mchandle;
+ int ret = 0;
+
+ mchandle = mac->handle;
+ if (mchandle == NULL)
+ return ret;
+
+ mchandle->refcnt--;
+ if (mchandle->refcnt == 0) {
+ if (cleanup == BNA_HARD_CLEANUP) {
+ bna_bfi_mcast_del_req(rxf, mchandle->handle);
+ ret = 1;
+ }
+ list_del(&mchandle->qe);
+ bfa_q_qe_init(&mchandle->qe);
+ bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
+ }
+ mac->handle = NULL;
+
+ return ret;
+}
+
+static int
+bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
+{
+ struct bna_mac *mac = NULL;
+ struct list_head *qe;
+ int ret;
+
+ /* Delete multicast entries previousely added */
+ while (!list_empty(&rxf->mcast_pending_del_q)) {
+ bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ if (ret)
+ return ret;
+ }
+
+ /* Add multicast entries */
+ if (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ list_add_tail(&mac->qe, &rxf->mcast_active_q);
+ bna_bfi_mcast_add_req(rxf, mac);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
+{
+ u8 vlan_pending_bitmask;
+ int block_idx = 0;
+
+ if (rxf->vlan_pending_bitmask) {
+ vlan_pending_bitmask = rxf->vlan_pending_bitmask;
+ while (!(vlan_pending_bitmask & 0x1)) {
+ block_idx++;
+ vlan_pending_bitmask >>= 1;
+ }
+ rxf->vlan_pending_bitmask &= ~(1 << block_idx);
+ bna_bfi_rx_vlan_filter_set(rxf, block_idx);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
+{
+ struct list_head *qe;
+ struct bna_mac *mac;
+ int ret;
+
+ /* Throw away delete pending mcast entries */
+ while (!list_empty(&rxf->mcast_pending_del_q)) {
+ bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ ret = bna_rxf_mcast_del(rxf, mac, cleanup);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ if (ret)
+ return ret;
+ }
+
+ /* Move active mcast entries to pending_add_q */
+ while (!list_empty(&rxf->mcast_active_q)) {
+ bfa_q_deq(&rxf->mcast_active_q, &qe);
+ bfa_q_qe_init(qe);
+ list_add_tail(qe, &rxf->mcast_pending_add_q);
+ mac = (struct bna_mac *)qe;
+ if (bna_rxf_mcast_del(rxf, mac, cleanup))
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
+{
+ if (rxf->rss_pending) {
+ if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
+ rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
+ bna_bfi_rit_cfg(rxf);
+ return 1;
+ }
+
+ if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
+ rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
+ bna_bfi_rss_cfg(rxf);
+ return 1;
+ }
+
+ if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
+ rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
+ bna_bfi_rss_enable(rxf);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+bna_rxf_cfg_apply(struct bna_rxf *rxf)
+{
+ if (bna_rxf_ucast_cfg_apply(rxf))
+ return 1;
+
+ if (bna_rxf_mcast_cfg_apply(rxf))
+ return 1;
+
+ if (bna_rxf_promisc_cfg_apply(rxf))
+ return 1;
+
+ if (bna_rxf_allmulti_cfg_apply(rxf))
+ return 1;
+
+ if (bna_rxf_vlan_cfg_apply(rxf))
+ return 1;
+
+ if (bna_rxf_vlan_strip_cfg_apply(rxf))
+ return 1;
+
+ if (bna_rxf_rss_cfg_apply(rxf))
+ return 1;
+
+ return 0;
+}
+
+/* Only software reset */
+static int
+bna_rxf_fltr_clear(struct bna_rxf *rxf)
+{
+ if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
+ return 1;
+
+ if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
+ return 1;
+
+ if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
+ return 1;
+
+ if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
+ return 1;
+
+ return 0;
+}
+
+static void
+bna_rxf_cfg_reset(struct bna_rxf *rxf)
+{
+ bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
+ bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
+ bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
+ bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
+ bna_rxf_vlan_cfg_soft_reset(rxf);
+ bna_rxf_rss_cfg_soft_reset(rxf);
+}
+
+static void
+bna_rit_init(struct bna_rxf *rxf, int rit_size)
+{
+ struct bna_rx *rx = rxf->rx;
+ struct bna_rxp *rxp;
+ struct list_head *qe;
+ int offset = 0;
+
+ rxf->rit_size = rit_size;
+ list_for_each(qe, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe;
+ rxf->rit[offset] = rxp->cq.ccb->id;
+ offset++;
+ }
+
+}
+
+void
+bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
+{
+ bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
+}
+
+void
+bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
+ struct bfi_msgq_mhdr *msghdr)
+{
+ struct bfi_enet_mcast_add_req *req =
+ &rxf->bfi_enet_cmd.mcast_add_req;
+ struct bfi_enet_mcast_add_rsp *rsp =
+ (struct bfi_enet_mcast_add_rsp *)msghdr;
+
+ bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
+ ntohs(rsp->handle));
+ bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
+}
+
+static void
+bna_rxf_init(struct bna_rxf *rxf,
+ struct bna_rx *rx,
+ struct bna_rx_config *q_config,
+ struct bna_res_info *res_info)
+{
+ rxf->rx = rx;
+
+ INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
+ INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
+ rxf->ucast_pending_set = 0;
+ rxf->ucast_active_set = 0;
+ INIT_LIST_HEAD(&rxf->ucast_active_q);
+ rxf->ucast_pending_mac = NULL;
+
+ INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
+ INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
+ INIT_LIST_HEAD(&rxf->mcast_active_q);
+ INIT_LIST_HEAD(&rxf->mcast_handle_q);
+
+ if (q_config->paused)
+ rxf->flags |= BNA_RXF_F_PAUSED;
+
+ rxf->rit = (u8 *)
+ res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
+ bna_rit_init(rxf, q_config->num_paths);
+
+ rxf->rss_status = q_config->rss_status;
+ if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
+ rxf->rss_cfg = q_config->rss_config;
+ rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
+ rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
+ rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
+ }
+
+ rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
+ memset(rxf->vlan_filter_table, 0,
+ (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
+ rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
+ rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
+
+ rxf->vlan_strip_status = q_config->vlan_strip_status;
+
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+}
+
+static void
+bna_rxf_uninit(struct bna_rxf *rxf)
+{
+ struct bna_mac *mac;
+
+ rxf->ucast_pending_set = 0;
+ rxf->ucast_active_set = 0;
+
+ while (!list_empty(&rxf->ucast_pending_add_q)) {
+ bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
+ bfa_q_qe_init(&mac->qe);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ }
+
+ if (rxf->ucast_pending_mac) {
+ bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
+ rxf->ucast_pending_mac);
+ rxf->ucast_pending_mac = NULL;
+ }
+
+ while (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
+ bfa_q_qe_init(&mac->qe);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ }
+
+ rxf->rxmode_pending = 0;
+ rxf->rxmode_pending_bitmask = 0;
+ if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
+ rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
+ if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
+ rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
+
+ rxf->rss_pending = 0;
+ rxf->vlan_strip_pending = false;
+
+ rxf->flags = 0;
+
+ rxf->rx = NULL;
+}
+
+static void
+bna_rx_cb_rxf_started(struct bna_rx *rx)
+{
+ bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
+}
+
+static void
+bna_rxf_start(struct bna_rxf *rxf)
+{
+ rxf->start_cbfn = bna_rx_cb_rxf_started;
+ rxf->start_cbarg = rxf->rx;
+ bfa_fsm_send_event(rxf, RXF_E_START);
+}
+
+static void
+bna_rx_cb_rxf_stopped(struct bna_rx *rx)
+{
+ bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
+}
+
+static void
+bna_rxf_stop(struct bna_rxf *rxf)
+{
+ rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
+ rxf->stop_cbarg = rxf->rx;
+ bfa_fsm_send_event(rxf, RXF_E_STOP);
+}
+
+static void
+bna_rxf_fail(struct bna_rxf *rxf)
+{
+ bfa_fsm_send_event(rxf, RXF_E_FAIL);
+}
+
+enum bna_cb_status
+bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->ucast_pending_mac == NULL) {
+ rxf->ucast_pending_mac =
+ bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
+ if (rxf->ucast_pending_mac == NULL)
+ return BNA_CB_UCAST_CAM_FULL;
+ bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
+ }
+
+ memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN);
+ rxf->ucast_pending_set = 1;
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+
+ return BNA_CB_SUCCESS;
+}
+
+enum bna_cb_status
+bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
+ void (*cbfn)(struct bnad *, struct bna_rx *))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct bna_mac *mac;
+
+ /* Check if already added or pending addition */
+ if (bna_mac_find(&rxf->mcast_active_q, addr) ||
+ bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
+ if (cbfn)
+ cbfn(rx->bna->bnad, rx);
+ return BNA_CB_SUCCESS;
+ }
+
+ mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+ if (mac == NULL)
+ return BNA_CB_MCAST_LIST_FULL;
+ bfa_q_qe_init(&mac->qe);
+ memcpy(mac->addr, addr, ETH_ALEN);
+ list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
+
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+
+ return BNA_CB_SUCCESS;
+}
+
+enum bna_cb_status
+bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
+ void (*cbfn)(struct bnad *, struct bna_rx *))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head list_head;
+ struct list_head *qe;
+ u8 *mcaddr;
+ struct bna_mac *mac;
+ int i;
+
+ /* Allocate nodes */
+ INIT_LIST_HEAD(&list_head);
+ for (i = 0, mcaddr = mclist; i < count; i++) {
+ mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+ if (mac == NULL)
+ goto err_return;
+ bfa_q_qe_init(&mac->qe);
+ memcpy(mac->addr, mcaddr, ETH_ALEN);
+ list_add_tail(&mac->qe, &list_head);
+
+ mcaddr += ETH_ALEN;
+ }
+
+ /* Purge the pending_add_q */
+ while (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ }
+
+ /* Schedule active_q entries for deletion */
+ while (!list_empty(&rxf->mcast_active_q)) {
+ bfa_q_deq(&rxf->mcast_active_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
+ }
+
+ /* Add the new entries */
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
+ }
+
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+
+ return BNA_CB_SUCCESS;
+
+err_return:
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ }
+
+ return BNA_CB_MCAST_LIST_FULL;
+}
+
+void
+bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
+ int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
+ int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
+
+ rxf->vlan_filter_table[index] |= bit;
+ if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
+ rxf->vlan_pending_bitmask |= (1 << group_id);
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ }
+}
+
+void
+bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
+ int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
+ int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
+
+ rxf->vlan_filter_table[index] &= ~bit;
+ if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
+ rxf->vlan_pending_bitmask |= (1 << group_id);
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ }
+}
+
+static int
+bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
+{
+ struct bna_mac *mac = NULL;
+ struct list_head *qe;
+
+ /* Delete MAC addresses previousely added */
+ if (!list_empty(&rxf->ucast_pending_del_q)) {
+ bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ return 1;
+ }
+
+ /* Set default unicast MAC */
+ if (rxf->ucast_pending_set) {
+ rxf->ucast_pending_set = 0;
+ memcpy(rxf->ucast_active_mac.addr,
+ rxf->ucast_pending_mac->addr, ETH_ALEN);
+ rxf->ucast_active_set = 1;
+ bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
+ BFI_ENET_H2I_MAC_UCAST_SET_REQ);
+ return 1;
+ }
+
+ /* Add additional MAC entries */
+ if (!list_empty(&rxf->ucast_pending_add_q)) {
+ bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ list_add_tail(&mac->qe, &rxf->ucast_active_q);
+ bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
+{
+ struct list_head *qe;
+ struct bna_mac *mac;
+
+ /* Throw away delete pending ucast entries */
+ while (!list_empty(&rxf->ucast_pending_del_q)) {
+ bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ if (cleanup == BNA_SOFT_CLEANUP)
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ else {
+ bna_bfi_ucast_req(rxf, mac,
+ BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ return 1;
+ }
+ }
+
+ /* Move active ucast entries to pending_add_q */
+ while (!list_empty(&rxf->ucast_active_q)) {
+ bfa_q_deq(&rxf->ucast_active_q, &qe);
+ bfa_q_qe_init(qe);
+ list_add_tail(qe, &rxf->ucast_pending_add_q);
+ if (cleanup == BNA_HARD_CLEANUP) {
+ mac = (struct bna_mac *)qe;
+ bna_bfi_ucast_req(rxf, mac,
+ BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
+ return 1;
+ }
+ }
+
+ if (rxf->ucast_active_set) {
+ rxf->ucast_pending_set = 1;
+ rxf->ucast_active_set = 0;
+ if (cleanup == BNA_HARD_CLEANUP) {
+ bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
+ BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* Enable/disable promiscuous mode */
+ if (is_promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move promisc configuration from pending -> active */
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active |= BNA_RXMODE_PROMISC;
+ bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
+ return 1;
+ } else if (is_promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move promisc configuration from pending -> active */
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
+ bna->promisc_rid = BFI_INVALID_RID;
+ bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* Clear pending promisc mode disable */
+ if (is_promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
+ bna->promisc_rid = BFI_INVALID_RID;
+ if (cleanup == BNA_HARD_CLEANUP) {
+ bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+ }
+
+ /* Move promisc mode config from active -> pending */
+ if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
+ promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
+ if (cleanup == BNA_HARD_CLEANUP) {
+ bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
+{
+ /* Enable/disable allmulti mode */
+ if (is_allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move allmulti configuration from pending -> active */
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
+ bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
+ return 1;
+ } else if (is_allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move allmulti configuration from pending -> active */
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
+ bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
+{
+ /* Clear pending allmulti mode disable */
+ if (is_allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
+ if (cleanup == BNA_HARD_CLEANUP) {
+ bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
+ return 1;
+ }
+ }
+
+ /* Move allmulti mode config from active -> pending */
+ if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
+ allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
+ if (cleanup == BNA_HARD_CLEANUP) {
+ bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+bna_rxf_promisc_enable(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+ int ret = 0;
+
+ if (is_promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask) ||
+ (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
+ /* Do nothing if pending enable or already enabled */
+ } else if (is_promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* Turn off pending disable command */
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ } else {
+ /* Schedule enable */
+ promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ bna->promisc_rid = rxf->rx->rid;
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static int
+bna_rxf_promisc_disable(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+ int ret = 0;
+
+ if (is_promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask) ||
+ (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
+ /* Do nothing if pending disable or already disabled */
+ } else if (is_promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* Turn off pending enable command */
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ bna->promisc_rid = BFI_INVALID_RID;
+ } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
+ /* Schedule disable */
+ promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static int
+bna_rxf_allmulti_enable(struct bna_rxf *rxf)
+{
+ int ret = 0;
+
+ if (is_allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask) ||
+ (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
+ /* Do nothing if pending enable or already enabled */
+ } else if (is_allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* Turn off pending disable command */
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ } else {
+ /* Schedule enable */
+ allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static int
+bna_rxf_allmulti_disable(struct bna_rxf *rxf)
+{
+ int ret = 0;
+
+ if (is_allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask) ||
+ (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
+ /* Do nothing if pending disable or already disabled */
+ } else if (is_allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* Turn off pending enable command */
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
+ /* Schedule disable */
+ allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static int
+bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
+{
+ if (rxf->vlan_strip_pending) {
+ rxf->vlan_strip_pending = false;
+ bna_bfi_vlan_strip_enable(rxf);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * RX
+ */
+
+#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
+ (qcfg)->num_paths : ((qcfg)->num_paths * 2))
+
+#define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
+ (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
+
+#define call_rx_stop_cbfn(rx) \
+do { \
+ if ((rx)->stop_cbfn) { \
+ void (*cbfn)(void *, struct bna_rx *); \
+ void *cbarg; \
+ cbfn = (rx)->stop_cbfn; \
+ cbarg = (rx)->stop_cbarg; \
+ (rx)->stop_cbfn = NULL; \
+ (rx)->stop_cbarg = NULL; \
+ cbfn(cbarg, rx); \
+ } \
+} while (0)
+
+#define call_rx_stall_cbfn(rx) \
+do { \
+ if ((rx)->rx_stall_cbfn) \
+ (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
+} while (0)
+
+#define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
+do { \
+ struct bna_dma_addr cur_q_addr = \
+ *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
+ (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
+ (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
+ (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
+ (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
+ (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
+ (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
+} while (0)
+
+static void bna_bfi_rx_enet_start(struct bna_rx *rx);
+static void bna_rx_enet_stop(struct bna_rx *rx);
+static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
+
+bfa_fsm_state_decl(bna_rx, stopped,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, start_wait,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, rxf_start_wait,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, started,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, stop_wait,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, cleanup_wait,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, failed,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, quiesce_wait,
+ struct bna_rx, enum bna_rx_event);
+
+static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
+{
+ call_rx_stop_cbfn(rx);
+}
+
+static void bna_rx_sm_stopped(struct bna_rx *rx,
+ enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_START:
+ bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
+ break;
+
+ case RX_E_STOP:
+ call_rx_stop_cbfn(rx);
+ break;
+
+ case RX_E_FAIL:
+ /* no-op */
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ break;
+ }
+}
+
+static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
+{
+ bna_bfi_rx_enet_start(rx);
+}
+
+void
+bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
+{
+}
+
+static void
+bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_FAIL:
+ case RX_E_STOPPED:
+ bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
+ rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
+ break;
+
+ case RX_E_STARTED:
+ bna_rx_enet_stop(rx);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ break;
+ }
+}
+
+static void bna_rx_sm_start_wait(struct bna_rx *rx,
+ enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_STOP:
+ bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
+ break;
+
+ case RX_E_FAIL:
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+ break;
+
+ case RX_E_STARTED:
+ bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ break;
+ }
+}
+
+static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
+{
+ rx->rx_post_cbfn(rx->bna->bnad, rx);
+ bna_rxf_start(&rx->rxf);
+}
+
+void
+bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
+{
+}
+
+static void
+bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_FAIL:
+ bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
+ bna_rxf_fail(&rx->rxf);
+ call_rx_stall_cbfn(rx);
+ rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
+ break;
+
+ case RX_E_RXF_STARTED:
+ bna_rxf_stop(&rx->rxf);
+ break;
+
+ case RX_E_RXF_STOPPED:
+ bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
+ call_rx_stall_cbfn(rx);
+ bna_rx_enet_stop(rx);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ break;
+ }
+
+}
+
+void
+bna_rx_sm_started_entry(struct bna_rx *rx)
+{
+ struct bna_rxp *rxp;
+ struct list_head *qe_rxp;
+ int is_regular = (rx->type == BNA_RX_T_REGULAR);
+
+ /* Start IB */
+ list_for_each(qe_rxp, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe_rxp;
+ bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
+ }
+
+ bna_ethport_cb_rx_started(&rx->bna->ethport);
+}
+
+static void
+bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_STOP:
+ bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
+ bna_ethport_cb_rx_stopped(&rx->bna->ethport);
+ bna_rxf_stop(&rx->rxf);
+ break;
+
+ case RX_E_FAIL:
+ bfa_fsm_set_state(rx, bna_rx_sm_failed);
+ bna_ethport_cb_rx_stopped(&rx->bna->ethport);
+ bna_rxf_fail(&rx->rxf);
+ call_rx_stall_cbfn(rx);
+ rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ break;
+ }
+}
+
+static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
+ enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_STOP:
+ bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
+ break;
+
+ case RX_E_FAIL:
+ bfa_fsm_set_state(rx, bna_rx_sm_failed);
+ bna_rxf_fail(&rx->rxf);
+ call_rx_stall_cbfn(rx);
+ rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
+ break;
+
+ case RX_E_RXF_STARTED:
+ bfa_fsm_set_state(rx, bna_rx_sm_started);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ break;
+ }
+}
+
+void
+bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
+{
+}
+
+void
+bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_FAIL:
+ case RX_E_RXF_STOPPED:
+ /* No-op */
+ break;
+
+ case RX_E_CLEANUP_DONE:
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ break;
+ }
+}
+
+static void
+bna_rx_sm_failed_entry(struct bna_rx *rx)
+{
+}
+
+static void
+bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_START:
+ bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
+ break;
+
+ case RX_E_STOP:
+ bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
+ break;
+
+ case RX_E_FAIL:
+ case RX_E_RXF_STARTED:
+ case RX_E_RXF_STOPPED:
+ /* No-op */
+ break;
+
+ case RX_E_CLEANUP_DONE:
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ break;
+} }
+
+static void
+bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
+{
+}
+
+static void
+bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_STOP:
+ bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
+ break;
+
+ case RX_E_FAIL:
+ bfa_fsm_set_state(rx, bna_rx_sm_failed);
+ break;
+
+ case RX_E_CLEANUP_DONE:
+ bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ break;
+ }
+}
+
+static void
+bna_bfi_rx_enet_start(struct bna_rx *rx)
+{
+ struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
+ struct bna_rxp *rxp = NULL;
+ struct bna_rxq *q0 = NULL, *q1 = NULL;
+ struct list_head *rxp_qe;
+ int i;
+
+ bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
+ cfg_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
+
+ cfg_req->num_queue_sets = rx->num_paths;
+ for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
+ i < rx->num_paths;
+ i++, rxp_qe = bfa_q_next(rxp_qe)) {
+ rxp = (struct bna_rxp *)rxp_qe;
+
+ GET_RXQS(rxp, q0, q1);
+ switch (rxp->type) {
+ case BNA_RXP_SLR:
+ case BNA_RXP_HDS:
+ /* Small RxQ */
+ bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
+ &q1->qpt);
+ cfg_req->q_cfg[i].qs.rx_buffer_size =
+ htons((u16)q1->buffer_size);
+ /* Fall through */
+
+ case BNA_RXP_SINGLE:
+ /* Large/Single RxQ */
+ bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
+ &q0->qpt);
+ q0->buffer_size =
+ bna_enet_mtu_get(&rx->bna->enet);
+ cfg_req->q_cfg[i].ql.rx_buffer_size =
+ htons((u16)q0->buffer_size);
+ break;
+
+ default:
+ BUG_ON(1);
+ }
+
+ bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
+ &rxp->cq.qpt);
+
+ cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
+ rxp->cq.ib.ib_seg_host_addr.lsb;
+ cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
+ rxp->cq.ib.ib_seg_host_addr.msb;
+ cfg_req->q_cfg[i].ib.intr.msix_index =
+ htons((u16)rxp->cq.ib.intr_vector);
+ }
+
+ cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
+ cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
+ cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
+ cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
+ cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
+ ? BNA_STATUS_T_ENABLED :
+ BNA_STATUS_T_DISABLED;
+ cfg_req->ib_cfg.coalescing_timeout =
+ htonl((u32)rxp->cq.ib.coalescing_timeo);
+ cfg_req->ib_cfg.inter_pkt_timeout =
+ htonl((u32)rxp->cq.ib.interpkt_timeo);
+ cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
+
+ switch (rxp->type) {
+ case BNA_RXP_SLR:
+ cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
+ break;
+
+ case BNA_RXP_HDS:
+ cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
+ cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
+ cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
+ cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
+ break;
+
+ case BNA_RXP_SINGLE:
+ cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
+ break;
+
+ default:
+ BUG_ON(1);
+ }
+ cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
+
+ bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
+ bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
+}
+
+static void
+bna_bfi_rx_enet_stop(struct bna_rx *rx)
+{
+ struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
+ bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
+ &req->mh);
+ bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
+}
+
+static void
+bna_rx_enet_stop(struct bna_rx *rx)
+{
+ struct bna_rxp *rxp;
+ struct list_head *qe_rxp;
+
+ /* Stop IB */
+ list_for_each(qe_rxp, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe_rxp;
+ bna_ib_stop(rx->bna, &rxp->cq.ib);
+ }
+
+ bna_bfi_rx_enet_stop(rx);
+}
+
+static int
+bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
+{
+ if ((rx_mod->rx_free_count == 0) ||
+ (rx_mod->rxp_free_count == 0) ||
+ (rx_mod->rxq_free_count == 0))
+ return 0;
+
+ if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
+ if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
+ (rx_mod->rxq_free_count < rx_cfg->num_paths))
+ return 0;
+ } else {
+ if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
+ (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct bna_rxq *
+bna_rxq_get(struct bna_rx_mod *rx_mod)
+{
+ struct bna_rxq *rxq = NULL;
+ struct list_head *qe = NULL;
+
+ bfa_q_deq(&rx_mod->rxq_free_q, &qe);
+ rx_mod->rxq_free_count--;
+ rxq = (struct bna_rxq *)qe;
+ bfa_q_qe_init(&rxq->qe);
+
+ return rxq;
+}
+
+static void
+bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
+{
+ bfa_q_qe_init(&rxq->qe);
+ list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
+ rx_mod->rxq_free_count++;
+}
+
+static struct bna_rxp *
+bna_rxp_get(struct bna_rx_mod *rx_mod)
+{
+ struct list_head *qe = NULL;
+ struct bna_rxp *rxp = NULL;
+
+ bfa_q_deq(&rx_mod->rxp_free_q, &qe);
+ rx_mod->rxp_free_count--;
+ rxp = (struct bna_rxp *)qe;
+ bfa_q_qe_init(&rxp->qe);
+
+ return rxp;
+}
+
+static void
+bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
+{
+ bfa_q_qe_init(&rxp->qe);
+ list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
+ rx_mod->rxp_free_count++;
+}
+
+static struct bna_rx *
+bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
+{
+ struct list_head *qe = NULL;
+ struct bna_rx *rx = NULL;
+
+ if (type == BNA_RX_T_REGULAR) {
+ bfa_q_deq(&rx_mod->rx_free_q, &qe);
+ } else
+ bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
+
+ rx_mod->rx_free_count--;
+ rx = (struct bna_rx *)qe;
+ bfa_q_qe_init(&rx->qe);
+ list_add_tail(&rx->qe, &rx_mod->rx_active_q);
+ rx->type = type;
+
+ return rx;
+}
+
+static void
+bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
+{
+ struct list_head *prev_qe = NULL;
+ struct list_head *qe;
+
+ bfa_q_qe_init(&rx->qe);
+
+ list_for_each(qe, &rx_mod->rx_free_q) {
+ if (((struct bna_rx *)qe)->rid < rx->rid)
+ prev_qe = qe;
+ else
+ break;
+ }
+
+ if (prev_qe == NULL) {
+ /* This is the first entry */
+ bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
+ } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
+ /* This is the last entry */
+ list_add_tail(&rx->qe, &rx_mod->rx_free_q);
+ } else {
+ /* Somewhere in the middle */
+ bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
+ bfa_q_prev(&rx->qe) = prev_qe;
+ bfa_q_next(prev_qe) = &rx->qe;
+ bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
+ }
+
+ rx_mod->rx_free_count++;
+}
+
+static void
+bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
+ struct bna_rxq *q1)
+{
+ switch (rxp->type) {
+ case BNA_RXP_SINGLE:
+ rxp->rxq.single.only = q0;
+ rxp->rxq.single.reserved = NULL;
+ break;
+ case BNA_RXP_SLR:
+ rxp->rxq.slr.large = q0;
+ rxp->rxq.slr.small = q1;
+ break;
+ case BNA_RXP_HDS:
+ rxp->rxq.hds.data = q0;
+ rxp->rxq.hds.hdr = q1;
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+bna_rxq_qpt_setup(struct bna_rxq *rxq,
+ struct bna_rxp *rxp,
+ u32 page_count,
+ u32 page_size,
+ struct bna_mem_descr *qpt_mem,
+ struct bna_mem_descr *swqpt_mem,
+ struct bna_mem_descr *page_mem)
+{
+ int i;
+
+ rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
+ rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
+ rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
+ rxq->qpt.page_count = page_count;
+ rxq->qpt.page_size = page_size;
+
+ rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
+
+ for (i = 0; i < rxq->qpt.page_count; i++) {
+ rxq->rcb->sw_qpt[i] = page_mem[i].kva;
+ ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
+ page_mem[i].dma.lsb;
+ ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
+ page_mem[i].dma.msb;
+ }
+}
+
+static void
+bna_rxp_cqpt_setup(struct bna_rxp *rxp,
+ u32 page_count,
+ u32 page_size,
+ struct bna_mem_descr *qpt_mem,
+ struct bna_mem_descr *swqpt_mem,
+ struct bna_mem_descr *page_mem)
+{
+ int i;
+
+ rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
+ rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
+ rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
+ rxp->cq.qpt.page_count = page_count;
+ rxp->cq.qpt.page_size = page_size;
+
+ rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
+
+ for (i = 0; i < rxp->cq.qpt.page_count; i++) {
+ rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva;
+
+ ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
+ page_mem[i].dma.lsb;
+ ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
+ page_mem[i].dma.msb;
+ }
+}
+
+static void
+bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
+{
+ struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
+
+ bfa_wc_down(&rx_mod->rx_stop_wc);
+}
+
+static void
+bna_rx_mod_cb_rx_stopped_all(void *arg)
+{
+ struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
+
+ if (rx_mod->stop_cbfn)
+ rx_mod->stop_cbfn(&rx_mod->bna->enet);
+ rx_mod->stop_cbfn = NULL;
+}
+
+static void
+bna_rx_start(struct bna_rx *rx)
+{
+ rx->rx_flags |= BNA_RX_F_ENET_STARTED;
+ if (rx->rx_flags & BNA_RX_F_ENABLED)
+ bfa_fsm_send_event(rx, RX_E_START);
+}
+
+static void
+bna_rx_stop(struct bna_rx *rx)
+{
+ rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
+ if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
+ bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
+ else {
+ rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
+ rx->stop_cbarg = &rx->bna->rx_mod;
+ bfa_fsm_send_event(rx, RX_E_STOP);
+ }
+}
+
+static void
+bna_rx_fail(struct bna_rx *rx)
+{
+ /* Indicate Enet is not enabled, and failed */
+ rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
+ bfa_fsm_send_event(rx, RX_E_FAIL);
+}
+
+void
+bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
+{
+ struct bna_rx *rx;
+ struct list_head *qe;
+
+ rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
+ if (type == BNA_RX_T_LOOPBACK)
+ rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
+
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ rx = (struct bna_rx *)qe;
+ if (rx->type == type)
+ bna_rx_start(rx);
+ }
+}
+
+void
+bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
+{
+ struct bna_rx *rx;
+ struct list_head *qe;
+
+ rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
+ rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
+
+ rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
+
+ bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
+
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ rx = (struct bna_rx *)qe;
+ if (rx->type == type) {
+ bfa_wc_up(&rx_mod->rx_stop_wc);
+ bna_rx_stop(rx);
+ }
+ }
+
+ bfa_wc_wait(&rx_mod->rx_stop_wc);
+}
+
+void
+bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
+{
+ struct bna_rx *rx;
+ struct list_head *qe;
+
+ rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
+ rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
+
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ rx = (struct bna_rx *)qe;
+ bna_rx_fail(rx);
+ }
+}
+
+void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int index;
+ struct bna_rx *rx_ptr;
+ struct bna_rxp *rxp_ptr;
+ struct bna_rxq *rxq_ptr;
+
+ rx_mod->bna = bna;
+ rx_mod->flags = 0;
+
+ rx_mod->rx = (struct bna_rx *)
+ res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
+ rx_mod->rxp = (struct bna_rxp *)
+ res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
+ rx_mod->rxq = (struct bna_rxq *)
+ res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ /* Initialize the queues */
+ INIT_LIST_HEAD(&rx_mod->rx_free_q);
+ rx_mod->rx_free_count = 0;
+ INIT_LIST_HEAD(&rx_mod->rxq_free_q);
+ rx_mod->rxq_free_count = 0;
+ INIT_LIST_HEAD(&rx_mod->rxp_free_q);
+ rx_mod->rxp_free_count = 0;
+ INIT_LIST_HEAD(&rx_mod->rx_active_q);
+
+ /* Build RX queues */
+ for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
+ rx_ptr = &rx_mod->rx[index];
+
+ bfa_q_qe_init(&rx_ptr->qe);
+ INIT_LIST_HEAD(&rx_ptr->rxp_q);
+ rx_ptr->bna = NULL;
+ rx_ptr->rid = index;
+ rx_ptr->stop_cbfn = NULL;
+ rx_ptr->stop_cbarg = NULL;
+
+ list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
+ rx_mod->rx_free_count++;
+ }
+
+ /* build RX-path queue */
+ for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
+ rxp_ptr = &rx_mod->rxp[index];
+ bfa_q_qe_init(&rxp_ptr->qe);
+ list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
+ rx_mod->rxp_free_count++;
+ }
+
+ /* build RXQ queue */
+ for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
+ rxq_ptr = &rx_mod->rxq[index];
+ bfa_q_qe_init(&rxq_ptr->qe);
+ list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
+ rx_mod->rxq_free_count++;
+ }
+}
+
+void
+bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
+{
+ struct list_head *qe;
+ int i;
+
+ i = 0;
+ list_for_each(qe, &rx_mod->rx_free_q)
+ i++;
+
+ i = 0;
+ list_for_each(qe, &rx_mod->rxp_free_q)
+ i++;
+
+ i = 0;
+ list_for_each(qe, &rx_mod->rxq_free_q)
+ i++;
+
+ rx_mod->bna = NULL;
+}
+
+void
+bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
+{
+ struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
+ struct bna_rxp *rxp = NULL;
+ struct bna_rxq *q0 = NULL, *q1 = NULL;
+ struct list_head *rxp_qe;
+ int i;
+
+ bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
+ sizeof(struct bfi_enet_rx_cfg_rsp));
+
+ rx->hw_id = cfg_rsp->hw_id;
+
+ for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
+ i < rx->num_paths;
+ i++, rxp_qe = bfa_q_next(rxp_qe)) {
+ rxp = (struct bna_rxp *)rxp_qe;
+ GET_RXQS(rxp, q0, q1);
+
+ /* Setup doorbells */
+ rxp->cq.ccb->i_dbell->doorbell_addr =
+ rx->bna->pcidev.pci_bar_kva
+ + ntohl(cfg_rsp->q_handles[i].i_dbell);
+ rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
+ q0->rcb->q_dbell =
+ rx->bna->pcidev.pci_bar_kva
+ + ntohl(cfg_rsp->q_handles[i].ql_dbell);
+ q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
+ if (q1) {
+ q1->rcb->q_dbell =
+ rx->bna->pcidev.pci_bar_kva
+ + ntohl(cfg_rsp->q_handles[i].qs_dbell);
+ q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
+ }
+
+ /* Initialize producer/consumer indexes */
+ (*rxp->cq.ccb->hw_producer_index) = 0;
+ rxp->cq.ccb->producer_index = 0;
+ q0->rcb->producer_index = q0->rcb->consumer_index = 0;
+ if (q1)
+ q1->rcb->producer_index = q1->rcb->consumer_index = 0;
+ }
+
+ bfa_fsm_send_event(rx, RX_E_STARTED);
+}
+
+void
+bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
+{
+ bfa_fsm_send_event(rx, RX_E_STOPPED);
+}
+
+void
+bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
+{
+ u32 cq_size, hq_size, dq_size;
+ u32 cpage_count, hpage_count, dpage_count;
+ struct bna_mem_info *mem_info;
+ u32 cq_depth;
+ u32 hq_depth;
+ u32 dq_depth;
+
+ dq_depth = q_cfg->q_depth;
+ hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
+ cq_depth = dq_depth + hq_depth;
+
+ BNA_TO_POWER_OF_2_HIGH(cq_depth);
+ cq_size = cq_depth * BFI_CQ_WI_SIZE;
+ cq_size = ALIGN(cq_size, PAGE_SIZE);
+ cpage_count = SIZE_TO_PAGES(cq_size);
+
+ BNA_TO_POWER_OF_2_HIGH(dq_depth);
+ dq_size = dq_depth * BFI_RXQ_WI_SIZE;
+ dq_size = ALIGN(dq_size, PAGE_SIZE);
+ dpage_count = SIZE_TO_PAGES(dq_size);
+
+ if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
+ BNA_TO_POWER_OF_2_HIGH(hq_depth);
+ hq_size = hq_depth * BFI_RXQ_WI_SIZE;
+ hq_size = ALIGN(hq_size, PAGE_SIZE);
+ hpage_count = SIZE_TO_PAGES(hq_size);
+ } else
+ hpage_count = 0;
+
+ res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = sizeof(struct bna_ccb);
+ mem_info->num = q_cfg->num_paths;
+
+ res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = sizeof(struct bna_rcb);
+ mem_info->num = BNA_GET_RXQS(q_cfg);
+
+ res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
+ mem_info->num = q_cfg->num_paths;
+
+ res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = cpage_count * sizeof(void *);
+ mem_info->num = q_cfg->num_paths;
+
+ res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = PAGE_SIZE;
+ mem_info->num = cpage_count * q_cfg->num_paths;
+
+ res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
+ mem_info->num = q_cfg->num_paths;
+
+ res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = dpage_count * sizeof(void *);
+ mem_info->num = q_cfg->num_paths;
+
+ res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = PAGE_SIZE;
+ mem_info->num = dpage_count * q_cfg->num_paths;
+
+ res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
+ mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
+
+ res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = hpage_count * sizeof(void *);
+ mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
+
+ res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = (hpage_count ? PAGE_SIZE : 0);
+ mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0);
+
+ res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = BFI_IBIDX_SIZE;
+ mem_info->num = q_cfg->num_paths;
+
+ res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = BFI_ENET_RSS_RIT_MAX;
+ mem_info->num = 1;
+
+ res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
+ res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
+ res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
+}
+
+struct bna_rx *
+bna_rx_create(struct bna *bna, struct bnad *bnad,
+ struct bna_rx_config *rx_cfg,
+ const struct bna_rx_event_cbfn *rx_cbfn,
+ struct bna_res_info *res_info,
+ void *priv)
+{
+ struct bna_rx_mod *rx_mod = &bna->rx_mod;
+ struct bna_rx *rx;
+ struct bna_rxp *rxp;
+ struct bna_rxq *q0;
+ struct bna_rxq *q1;
+ struct bna_intr_info *intr_info;
+ u32 page_count;
+ struct bna_mem_descr *ccb_mem;
+ struct bna_mem_descr *rcb_mem;
+ struct bna_mem_descr *unmapq_mem;
+ struct bna_mem_descr *cqpt_mem;
+ struct bna_mem_descr *cswqpt_mem;
+ struct bna_mem_descr *cpage_mem;
+ struct bna_mem_descr *hqpt_mem;
+ struct bna_mem_descr *dqpt_mem;
+ struct bna_mem_descr *hsqpt_mem;
+ struct bna_mem_descr *dsqpt_mem;
+ struct bna_mem_descr *hpage_mem;
+ struct bna_mem_descr *dpage_mem;
+ int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0;
+ int dpage_count, hpage_count, rcb_idx;
+
+ if (!bna_rx_res_check(rx_mod, rx_cfg))
+ return NULL;
+
+ intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
+ ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
+ rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
+ unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
+ cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
+ cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
+ cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
+ hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
+ dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
+ hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
+ dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
+ hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
+ dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
+
+ page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num /
+ rx_cfg->num_paths;
+
+ dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num /
+ rx_cfg->num_paths;
+
+ hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num /
+ rx_cfg->num_paths;
+
+ rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
+ rx->bna = bna;
+ rx->rx_flags = 0;
+ INIT_LIST_HEAD(&rx->rxp_q);
+ rx->stop_cbfn = NULL;
+ rx->stop_cbarg = NULL;
+ rx->priv = priv;
+
+ rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
+ rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
+ rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
+ rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
+ rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
+ /* Following callbacks are mandatory */
+ rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
+ rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
+
+ if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
+ switch (rx->type) {
+ case BNA_RX_T_REGULAR:
+ if (!(rx->bna->rx_mod.flags &
+ BNA_RX_MOD_F_ENET_LOOPBACK))
+ rx->rx_flags |= BNA_RX_F_ENET_STARTED;
+ break;
+ case BNA_RX_T_LOOPBACK:
+ if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
+ rx->rx_flags |= BNA_RX_F_ENET_STARTED;
+ break;
+ }
+ }
+
+ rx->num_paths = rx_cfg->num_paths;
+ for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) {
+ rxp = bna_rxp_get(rx_mod);
+ list_add_tail(&rxp->qe, &rx->rxp_q);
+ rxp->type = rx_cfg->rxp_type;
+ rxp->rx = rx;
+ rxp->cq.rx = rx;
+
+ q0 = bna_rxq_get(rx_mod);
+ if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
+ q1 = NULL;
+ else
+ q1 = bna_rxq_get(rx_mod);
+
+ if (1 == intr_info->num)
+ rxp->vector = intr_info->idl[0].vector;
+ else
+ rxp->vector = intr_info->idl[i].vector;
+
+ /* Setup IB */
+
+ rxp->cq.ib.ib_seg_host_addr.lsb =
+ res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
+ rxp->cq.ib.ib_seg_host_addr.msb =
+ res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
+ rxp->cq.ib.ib_seg_host_addr_kva =
+ res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
+ rxp->cq.ib.intr_type = intr_info->intr_type;
+ if (intr_info->intr_type == BNA_INTR_T_MSIX)
+ rxp->cq.ib.intr_vector = rxp->vector;
+ else
+ rxp->cq.ib.intr_vector = (1 << rxp->vector);
+ rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
+ rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
+ rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
+
+ bna_rxp_add_rxqs(rxp, q0, q1);
+
+ /* Setup large Q */
+
+ q0->rx = rx;
+ q0->rxp = rxp;
+
+ q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
+ q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
+ rcb_idx++;
+ q0->rcb->q_depth = rx_cfg->q_depth;
+ q0->rcb->rxq = q0;
+ q0->rcb->bnad = bna->bnad;
+ q0->rcb->id = 0;
+ q0->rx_packets = q0->rx_bytes = 0;
+ q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
+
+ bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
+ &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]);
+ q0->rcb->page_idx = dpage_idx;
+ q0->rcb->page_count = dpage_count;
+ dpage_idx += dpage_count;
+
+ if (rx->rcb_setup_cbfn)
+ rx->rcb_setup_cbfn(bnad, q0->rcb);
+
+ /* Setup small Q */
+
+ if (q1) {
+ q1->rx = rx;
+ q1->rxp = rxp;
+
+ q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
+ q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
+ rcb_idx++;
+ q1->rcb->q_depth = rx_cfg->q_depth;
+ q1->rcb->rxq = q1;
+ q1->rcb->bnad = bna->bnad;
+ q1->rcb->id = 1;
+ q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
+ rx_cfg->hds_config.forced_offset
+ : rx_cfg->small_buff_size;
+ q1->rx_packets = q1->rx_bytes = 0;
+ q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
+
+ bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
+ &hqpt_mem[i], &hsqpt_mem[i],
+ &hpage_mem[hpage_idx]);
+ q1->rcb->page_idx = hpage_idx;
+ q1->rcb->page_count = hpage_count;
+ hpage_idx += hpage_count;
+
+ if (rx->rcb_setup_cbfn)
+ rx->rcb_setup_cbfn(bnad, q1->rcb);
+ }
+
+ /* Setup CQ */
+
+ rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
+ rxp->cq.ccb->q_depth = rx_cfg->q_depth +
+ ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
+ 0 : rx_cfg->q_depth);
+ rxp->cq.ccb->cq = &rxp->cq;
+ rxp->cq.ccb->rcb[0] = q0->rcb;
+ q0->rcb->ccb = rxp->cq.ccb;
+ if (q1) {
+ rxp->cq.ccb->rcb[1] = q1->rcb;
+ q1->rcb->ccb = rxp->cq.ccb;
+ }
+ rxp->cq.ccb->hw_producer_index =
+ (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
+ rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
+ rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
+ rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
+ rxp->cq.ccb->rx_coalescing_timeo =
+ rxp->cq.ib.coalescing_timeo;
+ rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
+ rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
+ rxp->cq.ccb->bnad = bna->bnad;
+ rxp->cq.ccb->id = i;
+
+ bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
+ &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]);
+ rxp->cq.ccb->page_idx = cpage_idx;
+ rxp->cq.ccb->page_count = page_count;
+ cpage_idx += page_count;
+
+ if (rx->ccb_setup_cbfn)
+ rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
+ }
+
+ rx->hds_cfg = rx_cfg->hds_config;
+
+ bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
+
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+
+ rx_mod->rid_mask |= (1 << rx->rid);
+
+ return rx;
+}
+
+void
+bna_rx_destroy(struct bna_rx *rx)
+{
+ struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
+ struct bna_rxq *q0 = NULL;
+ struct bna_rxq *q1 = NULL;
+ struct bna_rxp *rxp;
+ struct list_head *qe;
+
+ bna_rxf_uninit(&rx->rxf);
+
+ while (!list_empty(&rx->rxp_q)) {
+ bfa_q_deq(&rx->rxp_q, &rxp);
+ GET_RXQS(rxp, q0, q1);
+ if (rx->rcb_destroy_cbfn)
+ rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
+ q0->rcb = NULL;
+ q0->rxp = NULL;
+ q0->rx = NULL;
+ bna_rxq_put(rx_mod, q0);
+
+ if (q1) {
+ if (rx->rcb_destroy_cbfn)
+ rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
+ q1->rcb = NULL;
+ q1->rxp = NULL;
+ q1->rx = NULL;
+ bna_rxq_put(rx_mod, q1);
+ }
+ rxp->rxq.slr.large = NULL;
+ rxp->rxq.slr.small = NULL;
+
+ if (rx->ccb_destroy_cbfn)
+ rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
+ rxp->cq.ccb = NULL;
+ rxp->rx = NULL;
+ bna_rxp_put(rx_mod, rxp);
+ }
+
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ if (qe == &rx->qe) {
+ list_del(&rx->qe);
+ bfa_q_qe_init(&rx->qe);
+ break;
+ }
+ }
+
+ rx_mod->rid_mask &= ~(1 << rx->rid);
+
+ rx->bna = NULL;
+ rx->priv = NULL;
+ bna_rx_put(rx_mod, rx);
+}
+
+void
+bna_rx_enable(struct bna_rx *rx)
+{
+ if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
+ return;
+
+ rx->rx_flags |= BNA_RX_F_ENABLED;
+ if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
+ bfa_fsm_send_event(rx, RX_E_START);
+}
+
+void
+bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
+ void (*cbfn)(void *, struct bna_rx *))
+{
+ if (type == BNA_SOFT_CLEANUP) {
+ /* h/w should not be accessed. Treat we're stopped */
+ (*cbfn)(rx->bna->bnad, rx);
+ } else {
+ rx->stop_cbfn = cbfn;
+ rx->stop_cbarg = rx->bna->bnad;
+
+ rx->rx_flags &= ~BNA_RX_F_ENABLED;
+
+ bfa_fsm_send_event(rx, RX_E_STOP);
+ }
+}
+
+void
+bna_rx_cleanup_complete(struct bna_rx *rx)
+{
+ bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
+}
+
+enum bna_cb_status
+bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
+ enum bna_rxmode bitmask,
+ void (*cbfn)(struct bnad *, struct bna_rx *))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ int need_hw_config = 0;
+
+ /* Error checks */
+
+ if (is_promisc_enable(new_mode, bitmask)) {
+ /* If promisc mode is already enabled elsewhere in the system */
+ if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
+ (rx->bna->promisc_rid != rxf->rx->rid))
+ goto err_return;
+
+ /* If default mode is already enabled in the system */
+ if (rx->bna->default_mode_rid != BFI_INVALID_RID)
+ goto err_return;
+
+ /* Trying to enable promiscuous and default mode together */
+ if (is_default_enable(new_mode, bitmask))
+ goto err_return;
+ }
+
+ if (is_default_enable(new_mode, bitmask)) {
+ /* If default mode is already enabled elsewhere in the system */
+ if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
+ (rx->bna->default_mode_rid != rxf->rx->rid)) {
+ goto err_return;
+ }
+
+ /* If promiscuous mode is already enabled in the system */
+ if (rx->bna->promisc_rid != BFI_INVALID_RID)
+ goto err_return;
+ }
+
+ /* Process the commands */
+
+ if (is_promisc_enable(new_mode, bitmask)) {
+ if (bna_rxf_promisc_enable(rxf))
+ need_hw_config = 1;
+ } else if (is_promisc_disable(new_mode, bitmask)) {
+ if (bna_rxf_promisc_disable(rxf))
+ need_hw_config = 1;
+ }
+
+ if (is_allmulti_enable(new_mode, bitmask)) {
+ if (bna_rxf_allmulti_enable(rxf))
+ need_hw_config = 1;
+ } else if (is_allmulti_disable(new_mode, bitmask)) {
+ if (bna_rxf_allmulti_disable(rxf))
+ need_hw_config = 1;
+ }
+
+ /* Trigger h/w if needed */
+
+ if (need_hw_config) {
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ } else if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx);
+
+ return BNA_CB_SUCCESS;
+
+err_return:
+ return BNA_CB_FAIL;
+}
+
+void
+bna_rx_vlanfilter_enable(struct bna_rx *rx)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
+ rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
+ rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
+ bfa_fsm_send_event(rxf, RXF_E_CONFIG);
+ }
+}
+
+void
+bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
+{
+ struct bna_rxp *rxp;
+ struct list_head *qe;
+
+ list_for_each(qe, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe;
+ rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
+ bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
+ }
+}
+
+void
+bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
+{
+ int i, j;
+
+ for (i = 0; i < BNA_LOAD_T_MAX; i++)
+ for (j = 0; j < BNA_BIAS_T_MAX; j++)
+ bna->rx_mod.dim_vector[i][j] = vector[i][j];
+}
+
+void
+bna_rx_dim_update(struct bna_ccb *ccb)
+{
+ struct bna *bna = ccb->cq->rx->bna;
+ u32 load, bias;
+ u32 pkt_rt, small_rt, large_rt;
+ u8 coalescing_timeo;
+
+ if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
+ (ccb->pkt_rate.large_pkt_cnt == 0))
+ return;
+
+ /* Arrive at preconfigured coalescing timeo value based on pkt rate */
+
+ small_rt = ccb->pkt_rate.small_pkt_cnt;
+ large_rt = ccb->pkt_rate.large_pkt_cnt;
+
+ pkt_rt = small_rt + large_rt;
+
+ if (pkt_rt < BNA_PKT_RATE_10K)
+ load = BNA_LOAD_T_LOW_4;
+ else if (pkt_rt < BNA_PKT_RATE_20K)
+ load = BNA_LOAD_T_LOW_3;
+ else if (pkt_rt < BNA_PKT_RATE_30K)
+ load = BNA_LOAD_T_LOW_2;
+ else if (pkt_rt < BNA_PKT_RATE_40K)
+ load = BNA_LOAD_T_LOW_1;
+ else if (pkt_rt < BNA_PKT_RATE_50K)
+ load = BNA_LOAD_T_HIGH_1;
+ else if (pkt_rt < BNA_PKT_RATE_60K)
+ load = BNA_LOAD_T_HIGH_2;
+ else if (pkt_rt < BNA_PKT_RATE_80K)
+ load = BNA_LOAD_T_HIGH_3;
+ else
+ load = BNA_LOAD_T_HIGH_4;
+
+ if (small_rt > (large_rt << 1))
+ bias = 0;
+ else
+ bias = 1;
+
+ ccb->pkt_rate.small_pkt_cnt = 0;
+ ccb->pkt_rate.large_pkt_cnt = 0;
+
+ coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
+ ccb->rx_coalescing_timeo = coalescing_timeo;
+
+ /* Set it to IB */
+ bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
+}
+
+const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
+ {12, 12},
+ {6, 10},
+ {5, 10},
+ {4, 8},
+ {3, 6},
+ {3, 6},
+ {2, 4},
+ {1, 2},
+};
+
+/**
+ * TX
+ */
+#define call_tx_stop_cbfn(tx) \
+do { \
+ if ((tx)->stop_cbfn) { \
+ void (*cbfn)(void *, struct bna_tx *); \
+ void *cbarg; \
+ cbfn = (tx)->stop_cbfn; \
+ cbarg = (tx)->stop_cbarg; \
+ (tx)->stop_cbfn = NULL; \
+ (tx)->stop_cbarg = NULL; \
+ cbfn(cbarg, (tx)); \
+ } \
+} while (0)
+
+#define call_tx_prio_change_cbfn(tx) \
+do { \
+ if ((tx)->prio_change_cbfn) { \
+ void (*cbfn)(struct bnad *, struct bna_tx *); \
+ cbfn = (tx)->prio_change_cbfn; \
+ (tx)->prio_change_cbfn = NULL; \
+ cbfn((tx)->bna->bnad, (tx)); \
+ } \
+} while (0)
+
+static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
+static void bna_bfi_tx_enet_start(struct bna_tx *tx);
+static void bna_tx_enet_stop(struct bna_tx *tx);
+
+enum bna_tx_event {
+ TX_E_START = 1,
+ TX_E_STOP = 2,
+ TX_E_FAIL = 3,
+ TX_E_STARTED = 4,
+ TX_E_STOPPED = 5,
+ TX_E_PRIO_CHANGE = 6,
+ TX_E_CLEANUP_DONE = 7,
+ TX_E_BW_UPDATE = 8,
+};
+
+bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
+ enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
+ enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
+ enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
+ enum bna_tx_event);
+
+static void
+bna_tx_sm_stopped_entry(struct bna_tx *tx)
+{
+ call_tx_stop_cbfn(tx);
+}
+
+static void
+bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_START:
+ bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
+ break;
+
+ case TX_E_STOP:
+ call_tx_stop_cbfn(tx);
+ break;
+
+ case TX_E_FAIL:
+ /* No-op */
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ call_tx_prio_change_cbfn(tx);
+ break;
+
+ case TX_E_BW_UPDATE:
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_tx_sm_start_wait_entry(struct bna_tx *tx)
+{
+ bna_bfi_tx_enet_start(tx);
+}
+
+static void
+bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_STOP:
+ tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
+ bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
+ break;
+
+ case TX_E_FAIL:
+ tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+ break;
+
+ case TX_E_STARTED:
+ if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
+ tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
+ BNA_TX_F_BW_UPDATED);
+ bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
+ } else
+ bfa_fsm_set_state(tx, bna_tx_sm_started);
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ tx->flags |= BNA_TX_F_PRIO_CHANGED;
+ break;
+
+ case TX_E_BW_UPDATE:
+ tx->flags |= BNA_TX_F_BW_UPDATED;
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_tx_sm_started_entry(struct bna_tx *tx)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+ int is_regular = (tx->type == BNA_TX_T_REGULAR);
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ txq->tcb->priority = txq->priority;
+ /* Start IB */
+ bna_ib_start(tx->bna, &txq->ib, is_regular);
+ }
+ tx->tx_resume_cbfn(tx->bna->bnad, tx);
+}
+
+static void
+bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_STOP:
+ bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
+ tx->tx_stall_cbfn(tx->bna->bnad, tx);
+ bna_tx_enet_stop(tx);
+ break;
+
+ case TX_E_FAIL:
+ bfa_fsm_set_state(tx, bna_tx_sm_failed);
+ tx->tx_stall_cbfn(tx->bna->bnad, tx);
+ tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ case TX_E_BW_UPDATE:
+ bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
+{
+}
+
+static void
+bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_FAIL:
+ case TX_E_STOPPED:
+ bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
+ tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
+ break;
+
+ case TX_E_STARTED:
+ /**
+ * We are here due to start_wait -> stop_wait transition on
+ * TX_E_STOP event
+ */
+ bna_tx_enet_stop(tx);
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ case TX_E_BW_UPDATE:
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
+{
+}
+
+static void
+bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_FAIL:
+ case TX_E_PRIO_CHANGE:
+ case TX_E_BW_UPDATE:
+ /* No-op */
+ break;
+
+ case TX_E_CLEANUP_DONE:
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
+{
+ tx->tx_stall_cbfn(tx->bna->bnad, tx);
+ bna_tx_enet_stop(tx);
+}
+
+static void
+bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_STOP:
+ bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
+ break;
+
+ case TX_E_FAIL:
+ bfa_fsm_set_state(tx, bna_tx_sm_failed);
+ call_tx_prio_change_cbfn(tx);
+ tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
+ break;
+
+ case TX_E_STOPPED:
+ bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ case TX_E_BW_UPDATE:
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
+{
+ call_tx_prio_change_cbfn(tx);
+ tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
+}
+
+static void
+bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_STOP:
+ bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
+ break;
+
+ case TX_E_FAIL:
+ bfa_fsm_set_state(tx, bna_tx_sm_failed);
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ case TX_E_BW_UPDATE:
+ /* No-op */
+ break;
+
+ case TX_E_CLEANUP_DONE:
+ bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_tx_sm_failed_entry(struct bna_tx *tx)
+{
+}
+
+static void
+bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_START:
+ bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
+ break;
+
+ case TX_E_STOP:
+ bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
+ break;
+
+ case TX_E_FAIL:
+ /* No-op */
+ break;
+
+ case TX_E_CLEANUP_DONE:
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
+{
+}
+
+static void
+bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_STOP:
+ bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
+ break;
+
+ case TX_E_FAIL:
+ bfa_fsm_set_state(tx, bna_tx_sm_failed);
+ break;
+
+ case TX_E_CLEANUP_DONE:
+ bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
+ break;
+
+ case TX_E_BW_UPDATE:
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(event);
+ }
+}
+
+static void
+bna_bfi_tx_enet_start(struct bna_tx *tx)
+{
+ struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
+ struct bna_txq *txq = NULL;
+ struct list_head *qe;
+ int i;
+
+ bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
+ cfg_req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
+
+ cfg_req->num_queues = tx->num_txq;
+ for (i = 0, qe = bfa_q_first(&tx->txq_q);
+ i < tx->num_txq;
+ i++, qe = bfa_q_next(qe)) {
+ txq = (struct bna_txq *)qe;
+
+ bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
+ cfg_req->q_cfg[i].q.priority = txq->priority;
+
+ cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
+ txq->ib.ib_seg_host_addr.lsb;
+ cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
+ txq->ib.ib_seg_host_addr.msb;
+ cfg_req->q_cfg[i].ib.intr.msix_index =
+ htons((u16)txq->ib.intr_vector);
+ }
+
+ cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
+ cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
+ cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
+ cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
+ cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
+ ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
+ cfg_req->ib_cfg.coalescing_timeout =
+ htonl((u32)txq->ib.coalescing_timeo);
+ cfg_req->ib_cfg.inter_pkt_timeout =
+ htonl((u32)txq->ib.interpkt_timeo);
+ cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
+
+ cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
+ cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
+ cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_DISABLED;
+ cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
+
+ bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
+ sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
+ bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
+}
+
+static void
+bna_bfi_tx_enet_stop(struct bna_tx *tx)
+{
+ struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
+
+ bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
+ BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
+ req->mh.num_entries = htons(
+ bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
+ bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
+ &req->mh);
+ bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
+}
+
+static void
+bna_tx_enet_stop(struct bna_tx *tx)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ /* Stop IB */
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_stop(tx->bna, &txq->ib);
+ }
+
+ bna_bfi_tx_enet_stop(tx);
+}
+
+static void
+bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
+ struct bna_mem_descr *qpt_mem,
+ struct bna_mem_descr *swqpt_mem,
+ struct bna_mem_descr *page_mem)
+{
+ int i;
+
+ txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
+ txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
+ txq->qpt.kv_qpt_ptr = qpt_mem->kva;
+ txq->qpt.page_count = page_count;
+ txq->qpt.page_size = page_size;
+
+ txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
+
+ for (i = 0; i < page_count; i++) {
+ txq->tcb->sw_qpt[i] = page_mem[i].kva;
+
+ ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
+ page_mem[i].dma.lsb;
+ ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
+ page_mem[i].dma.msb;
+ }
+}
+
+static struct bna_tx *
+bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
+{
+ struct list_head *qe = NULL;
+ struct bna_tx *tx = NULL;
+
+ if (list_empty(&tx_mod->tx_free_q))
+ return NULL;
+ if (type == BNA_TX_T_REGULAR) {
+ bfa_q_deq(&tx_mod->tx_free_q, &qe);
+ } else {
+ bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
+ }
+ tx = (struct bna_tx *)qe;
+ bfa_q_qe_init(&tx->qe);
+ tx->type = type;
+
+ return tx;
+}
+
+static void
+bna_tx_free(struct bna_tx *tx)
+{
+ struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
+ struct bna_txq *txq;
+ struct list_head *prev_qe;
+ struct list_head *qe;
+
+ while (!list_empty(&tx->txq_q)) {
+ bfa_q_deq(&tx->txq_q, &txq);
+ bfa_q_qe_init(&txq->qe);
+ txq->tcb = NULL;
+ txq->tx = NULL;
+ list_add_tail(&txq->qe, &tx_mod->txq_free_q);
+ }
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ if (qe == &tx->qe) {
+ list_del(&tx->qe);
+ bfa_q_qe_init(&tx->qe);
+ break;
+ }
+ }
+
+ tx->bna = NULL;
+ tx->priv = NULL;
+
+ prev_qe = NULL;
+ list_for_each(qe, &tx_mod->tx_free_q) {
+ if (((struct bna_tx *)qe)->rid < tx->rid)
+ prev_qe = qe;
+ else {
+ break;
+ }
+ }
+
+ if (prev_qe == NULL) {
+ /* This is the first entry */
+ bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
+ } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
+ /* This is the last entry */
+ list_add_tail(&tx->qe, &tx_mod->tx_free_q);
+ } else {
+ /* Somewhere in the middle */
+ bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
+ bfa_q_prev(&tx->qe) = prev_qe;
+ bfa_q_next(prev_qe) = &tx->qe;
+ bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
+ }
+}
+
+static void
+bna_tx_start(struct bna_tx *tx)
+{
+ tx->flags |= BNA_TX_F_ENET_STARTED;
+ if (tx->flags & BNA_TX_F_ENABLED)
+ bfa_fsm_send_event(tx, TX_E_START);
+}
+
+static void
+bna_tx_stop(struct bna_tx *tx)
+{
+ tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
+ tx->stop_cbarg = &tx->bna->tx_mod;
+
+ tx->flags &= ~BNA_TX_F_ENET_STARTED;
+ bfa_fsm_send_event(tx, TX_E_STOP);
+}
+
+static void
+bna_tx_fail(struct bna_tx *tx)
+{
+ tx->flags &= ~BNA_TX_F_ENET_STARTED;
+ bfa_fsm_send_event(tx, TX_E_FAIL);
+}
+
+void
+bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
+{
+ struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
+ struct bna_txq *txq = NULL;
+ struct list_head *qe;
+ int i;
+
+ bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
+ sizeof(struct bfi_enet_tx_cfg_rsp));
+
+ tx->hw_id = cfg_rsp->hw_id;
+
+ for (i = 0, qe = bfa_q_first(&tx->txq_q);
+ i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
+ txq = (struct bna_txq *)qe;
+
+ /* Setup doorbells */
+ txq->tcb->i_dbell->doorbell_addr =
+ tx->bna->pcidev.pci_bar_kva
+ + ntohl(cfg_rsp->q_handles[i].i_dbell);
+ txq->tcb->q_dbell =
+ tx->bna->pcidev.pci_bar_kva
+ + ntohl(cfg_rsp->q_handles[i].q_dbell);
+ txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
+
+ /* Initialize producer/consumer indexes */
+ (*txq->tcb->hw_consumer_index) = 0;
+ txq->tcb->producer_index = txq->tcb->consumer_index = 0;
+ }
+
+ bfa_fsm_send_event(tx, TX_E_STARTED);
+}
+
+void
+bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
+{
+ bfa_fsm_send_event(tx, TX_E_STOPPED);
+}
+
+void
+bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
+ }
+}
+
+void
+bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
+{
+ u32 q_size;
+ u32 page_count;
+ struct bna_mem_info *mem_info;
+
+ res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = sizeof(struct bna_tcb);
+ mem_info->num = num_txq;
+
+ q_size = txq_depth * BFI_TXQ_WI_SIZE;
+ q_size = ALIGN(q_size, PAGE_SIZE);
+ page_count = q_size >> PAGE_SHIFT;
+
+ res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = page_count * sizeof(struct bna_dma_addr);
+ mem_info->num = num_txq;
+
+ res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = page_count * sizeof(void *);
+ mem_info->num = num_txq;
+
+ res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = PAGE_SIZE;
+ mem_info->num = num_txq * page_count;
+
+ res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = BFI_IBIDX_SIZE;
+ mem_info->num = num_txq;
+
+ res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
+ res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
+ BNA_INTR_T_MSIX;
+ res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
+}
+
+struct bna_tx *
+bna_tx_create(struct bna *bna, struct bnad *bnad,
+ struct bna_tx_config *tx_cfg,
+ const struct bna_tx_event_cbfn *tx_cbfn,
+ struct bna_res_info *res_info, void *priv)
+{
+ struct bna_intr_info *intr_info;
+ struct bna_tx_mod *tx_mod = &bna->tx_mod;
+ struct bna_tx *tx;
+ struct bna_txq *txq;
+ struct list_head *qe;
+ int page_count;
+ int page_size;
+ int page_idx;
+ int i;
+
+ intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
+ page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) /
+ tx_cfg->num_txq;
+ page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len;
+
+ /**
+ * Get resources
+ */
+
+ if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
+ return NULL;
+
+ /* Tx */
+
+ tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
+ if (!tx)
+ return NULL;
+ tx->bna = bna;
+ tx->priv = priv;
+
+ /* TxQs */
+
+ INIT_LIST_HEAD(&tx->txq_q);
+ for (i = 0; i < tx_cfg->num_txq; i++) {
+ if (list_empty(&tx_mod->txq_free_q))
+ goto err_return;
+
+ bfa_q_deq(&tx_mod->txq_free_q, &txq);
+ bfa_q_qe_init(&txq->qe);
+ list_add_tail(&txq->qe, &tx->txq_q);
+ txq->tx = tx;
+ }
+
+ /*
+ * Initialize
+ */
+
+ /* Tx */
+
+ tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
+ tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
+ /* Following callbacks are mandatory */
+ tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
+ tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
+ tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
+
+ list_add_tail(&tx->qe, &tx_mod->tx_active_q);
+
+ tx->num_txq = tx_cfg->num_txq;
+
+ tx->flags = 0;
+ if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
+ switch (tx->type) {
+ case BNA_TX_T_REGULAR:
+ if (!(tx->bna->tx_mod.flags &
+ BNA_TX_MOD_F_ENET_LOOPBACK))
+ tx->flags |= BNA_TX_F_ENET_STARTED;
+ break;
+ case BNA_TX_T_LOOPBACK:
+ if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
+ tx->flags |= BNA_TX_F_ENET_STARTED;
+ break;
+ }
+ }
+
+ /* TxQ */
+
+ i = 0;
+ page_idx = 0;
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ txq->tcb = (struct bna_tcb *)
+ res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
+ txq->tx_packets = 0;
+ txq->tx_bytes = 0;
+
+ /* IB */
+ txq->ib.ib_seg_host_addr.lsb =
+ res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
+ txq->ib.ib_seg_host_addr.msb =
+ res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
+ txq->ib.ib_seg_host_addr_kva =
+ res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
+ txq->ib.intr_type = intr_info->intr_type;
+ txq->ib.intr_vector = (intr_info->num == 1) ?
+ intr_info->idl[0].vector :
+ intr_info->idl[i].vector;
+ if (intr_info->intr_type == BNA_INTR_T_INTX)
+ txq->ib.intr_vector = (1 << txq->ib.intr_vector);
+ txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
+ txq->ib.interpkt_timeo = 0; /* Not used */
+ txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
+
+ /* TCB */
+
+ txq->tcb->q_depth = tx_cfg->txq_depth;
+ txq->tcb->unmap_q = (void *)
+ res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
+ txq->tcb->hw_consumer_index =
+ (u32 *)txq->ib.ib_seg_host_addr_kva;
+ txq->tcb->i_dbell = &txq->ib.door_bell;
+ txq->tcb->intr_type = txq->ib.intr_type;
+ txq->tcb->intr_vector = txq->ib.intr_vector;
+ txq->tcb->txq = txq;
+ txq->tcb->bnad = bnad;
+ txq->tcb->id = i;
+
+ /* QPT, SWQPT, Pages */
+ bna_txq_qpt_setup(txq, page_count, page_size,
+ &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
+ &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
+ &res_info[BNA_TX_RES_MEM_T_PAGE].
+ res_u.mem_info.mdl[page_idx]);
+ txq->tcb->page_idx = page_idx;
+ txq->tcb->page_count = page_count;
+ page_idx += page_count;
+
+ /* Callback to bnad for setting up TCB */
+ if (tx->tcb_setup_cbfn)
+ (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
+
+ if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
+ txq->priority = txq->tcb->id;
+ else
+ txq->priority = tx_mod->default_prio;
+
+ i++;
+ }
+
+ tx->txf_vlan_id = 0;
+
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+
+ tx_mod->rid_mask |= (1 << tx->rid);
+
+ return tx;
+
+err_return:
+ bna_tx_free(tx);
+ return NULL;
+}
+
+void
+bna_tx_destroy(struct bna_tx *tx)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ if (tx->tcb_destroy_cbfn)
+ (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
+ }
+
+ tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid);
+ bna_tx_free(tx);
+}
+
+void
+bna_tx_enable(struct bna_tx *tx)
+{
+ if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
+ return;
+
+ tx->flags |= BNA_TX_F_ENABLED;
+
+ if (tx->flags & BNA_TX_F_ENET_STARTED)
+ bfa_fsm_send_event(tx, TX_E_START);
+}
+
+void
+bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
+ void (*cbfn)(void *, struct bna_tx *))
+{
+ if (type == BNA_SOFT_CLEANUP) {
+ (*cbfn)(tx->bna->bnad, tx);
+ return;
+ }
+
+ tx->stop_cbfn = cbfn;
+ tx->stop_cbarg = tx->bna->bnad;
+
+ tx->flags &= ~BNA_TX_F_ENABLED;
+
+ bfa_fsm_send_event(tx, TX_E_STOP);
+}
+
+void
+bna_tx_cleanup_complete(struct bna_tx *tx)
+{
+ bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
+}
+
+static void
+bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
+{
+ struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
+
+ bfa_wc_down(&tx_mod->tx_stop_wc);
+}
+
+static void
+bna_tx_mod_cb_tx_stopped_all(void *arg)
+{
+ struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
+
+ if (tx_mod->stop_cbfn)
+ tx_mod->stop_cbfn(&tx_mod->bna->enet);
+ tx_mod->stop_cbfn = NULL;
+}
+
+void
+bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int i;
+
+ tx_mod->bna = bna;
+ tx_mod->flags = 0;
+
+ tx_mod->tx = (struct bna_tx *)
+ res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
+ tx_mod->txq = (struct bna_txq *)
+ res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ INIT_LIST_HEAD(&tx_mod->tx_free_q);
+ INIT_LIST_HEAD(&tx_mod->tx_active_q);
+
+ INIT_LIST_HEAD(&tx_mod->txq_free_q);
+
+ for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
+ tx_mod->tx[i].rid = i;
+ bfa_q_qe_init(&tx_mod->tx[i].qe);
+ list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
+ bfa_q_qe_init(&tx_mod->txq[i].qe);
+ list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
+ }
+
+ tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
+ tx_mod->default_prio = 0;
+ tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
+ tx_mod->iscsi_prio = -1;
+}
+
+void
+bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
+{
+ struct list_head *qe;
+ int i;
+
+ i = 0;
+ list_for_each(qe, &tx_mod->tx_free_q)
+ i++;
+
+ i = 0;
+ list_for_each(qe, &tx_mod->txq_free_q)
+ i++;
+
+ tx_mod->bna = NULL;
+}
+
+void
+bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
+ if (type == BNA_TX_T_LOOPBACK)
+ tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ if (tx->type == type)
+ bna_tx_start(tx);
+ }
+}
+
+void
+bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
+ tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
+
+ tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
+
+ bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ if (tx->type == type) {
+ bfa_wc_up(&tx_mod->tx_stop_wc);
+ bna_tx_stop(tx);
+ }
+ }
+
+ bfa_wc_wait(&tx_mod->tx_stop_wc);
+}
+
+void
+bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
+ tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ bna_tx_fail(tx);
+ }
+}
+
+void
+bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);
+ }
+}
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
new file mode 100644
index 000000000000..d090fbfb12fa
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -0,0 +1,987 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BNA_TYPES_H__
+#define __BNA_TYPES_H__
+
+#include "cna.h"
+#include "bna_hw_defs.h"
+#include "bfa_cee.h"
+#include "bfa_msgq.h"
+
+/**
+ *
+ * Forward declarations
+ *
+ */
+
+struct bna_mcam_handle;
+struct bna_txq;
+struct bna_tx;
+struct bna_rxq;
+struct bna_cq;
+struct bna_rx;
+struct bna_rxf;
+struct bna_enet;
+struct bna;
+struct bnad;
+
+/**
+ *
+ * Enums, primitive data types
+ *
+ */
+
+enum bna_status {
+ BNA_STATUS_T_DISABLED = 0,
+ BNA_STATUS_T_ENABLED = 1
+};
+
+enum bna_cleanup_type {
+ BNA_HARD_CLEANUP = 0,
+ BNA_SOFT_CLEANUP = 1
+};
+
+enum bna_cb_status {
+ BNA_CB_SUCCESS = 0,
+ BNA_CB_FAIL = 1,
+ BNA_CB_INTERRUPT = 2,
+ BNA_CB_BUSY = 3,
+ BNA_CB_INVALID_MAC = 4,
+ BNA_CB_MCAST_LIST_FULL = 5,
+ BNA_CB_UCAST_CAM_FULL = 6,
+ BNA_CB_WAITING = 7,
+ BNA_CB_NOT_EXEC = 8
+};
+
+enum bna_res_type {
+ BNA_RES_T_MEM = 1,
+ BNA_RES_T_INTR = 2
+};
+
+enum bna_mem_type {
+ BNA_MEM_T_KVA = 1,
+ BNA_MEM_T_DMA = 2
+};
+
+enum bna_intr_type {
+ BNA_INTR_T_INTX = 1,
+ BNA_INTR_T_MSIX = 2
+};
+
+enum bna_res_req_type {
+ BNA_RES_MEM_T_COM = 0,
+ BNA_RES_MEM_T_ATTR = 1,
+ BNA_RES_MEM_T_FWTRC = 2,
+ BNA_RES_MEM_T_STATS = 3,
+ BNA_RES_T_MAX
+};
+
+enum bna_mod_res_req_type {
+ BNA_MOD_RES_MEM_T_TX_ARRAY = 0,
+ BNA_MOD_RES_MEM_T_TXQ_ARRAY = 1,
+ BNA_MOD_RES_MEM_T_RX_ARRAY = 2,
+ BNA_MOD_RES_MEM_T_RXP_ARRAY = 3,
+ BNA_MOD_RES_MEM_T_RXQ_ARRAY = 4,
+ BNA_MOD_RES_MEM_T_UCMAC_ARRAY = 5,
+ BNA_MOD_RES_MEM_T_MCMAC_ARRAY = 6,
+ BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY = 7,
+ BNA_MOD_RES_T_MAX
+};
+
+enum bna_tx_res_req_type {
+ BNA_TX_RES_MEM_T_TCB = 0,
+ BNA_TX_RES_MEM_T_UNMAPQ = 1,
+ BNA_TX_RES_MEM_T_QPT = 2,
+ BNA_TX_RES_MEM_T_SWQPT = 3,
+ BNA_TX_RES_MEM_T_PAGE = 4,
+ BNA_TX_RES_MEM_T_IBIDX = 5,
+ BNA_TX_RES_INTR_T_TXCMPL = 6,
+ BNA_TX_RES_T_MAX,
+};
+
+enum bna_rx_mem_type {
+ BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */
+ BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */
+ BNA_RX_RES_MEM_T_UNMAPQ = 2, /* UnmapQ for RxQs */
+ BNA_RX_RES_MEM_T_CQPT = 3, /* CQ QPT */
+ BNA_RX_RES_MEM_T_CSWQPT = 4, /* S/W QPT */
+ BNA_RX_RES_MEM_T_CQPT_PAGE = 5, /* CQPT page */
+ BNA_RX_RES_MEM_T_HQPT = 6, /* RX QPT */
+ BNA_RX_RES_MEM_T_DQPT = 7, /* RX QPT */
+ BNA_RX_RES_MEM_T_HSWQPT = 8, /* RX s/w QPT */
+ BNA_RX_RES_MEM_T_DSWQPT = 9, /* RX s/w QPT */
+ BNA_RX_RES_MEM_T_DPAGE = 10, /* RX s/w QPT */
+ BNA_RX_RES_MEM_T_HPAGE = 11, /* RX s/w QPT */
+ BNA_RX_RES_MEM_T_IBIDX = 12,
+ BNA_RX_RES_MEM_T_RIT = 13,
+ BNA_RX_RES_T_INTR = 14, /* Rx interrupts */
+ BNA_RX_RES_T_MAX = 15
+};
+
+enum bna_tx_type {
+ BNA_TX_T_REGULAR = 0,
+ BNA_TX_T_LOOPBACK = 1,
+};
+
+enum bna_tx_flags {
+ BNA_TX_F_ENET_STARTED = 1,
+ BNA_TX_F_ENABLED = 2,
+ BNA_TX_F_PRIO_CHANGED = 4,
+ BNA_TX_F_BW_UPDATED = 8,
+};
+
+enum bna_tx_mod_flags {
+ BNA_TX_MOD_F_ENET_STARTED = 1,
+ BNA_TX_MOD_F_ENET_LOOPBACK = 2,
+};
+
+enum bna_rx_type {
+ BNA_RX_T_REGULAR = 0,
+ BNA_RX_T_LOOPBACK = 1,
+};
+
+enum bna_rxp_type {
+ BNA_RXP_SINGLE = 1,
+ BNA_RXP_SLR = 2,
+ BNA_RXP_HDS = 3
+};
+
+enum bna_rxmode {
+ BNA_RXMODE_PROMISC = 1,
+ BNA_RXMODE_DEFAULT = 2,
+ BNA_RXMODE_ALLMULTI = 4
+};
+
+enum bna_rx_event {
+ RX_E_START = 1,
+ RX_E_STOP = 2,
+ RX_E_FAIL = 3,
+ RX_E_STARTED = 4,
+ RX_E_STOPPED = 5,
+ RX_E_RXF_STARTED = 6,
+ RX_E_RXF_STOPPED = 7,
+ RX_E_CLEANUP_DONE = 8,
+};
+
+enum bna_rx_flags {
+ BNA_RX_F_ENET_STARTED = 1,
+ BNA_RX_F_ENABLED = 2,
+};
+
+enum bna_rx_mod_flags {
+ BNA_RX_MOD_F_ENET_STARTED = 1,
+ BNA_RX_MOD_F_ENET_LOOPBACK = 2,
+};
+
+enum bna_rxf_flags {
+ BNA_RXF_F_PAUSED = 1,
+};
+
+enum bna_rxf_event {
+ RXF_E_START = 1,
+ RXF_E_STOP = 2,
+ RXF_E_FAIL = 3,
+ RXF_E_CONFIG = 4,
+ RXF_E_PAUSE = 5,
+ RXF_E_RESUME = 6,
+ RXF_E_FW_RESP = 7,
+};
+
+enum bna_enet_type {
+ BNA_ENET_T_REGULAR = 0,
+ BNA_ENET_T_LOOPBACK_INTERNAL = 1,
+ BNA_ENET_T_LOOPBACK_EXTERNAL = 2,
+};
+
+enum bna_link_status {
+ BNA_LINK_DOWN = 0,
+ BNA_LINK_UP = 1,
+ BNA_CEE_UP = 2
+};
+
+enum bna_ethport_flags {
+ BNA_ETHPORT_F_ADMIN_UP = 1,
+ BNA_ETHPORT_F_PORT_ENABLED = 2,
+ BNA_ETHPORT_F_RX_STARTED = 4,
+};
+
+enum bna_enet_flags {
+ BNA_ENET_F_IOCETH_READY = 1,
+ BNA_ENET_F_ENABLED = 2,
+ BNA_ENET_F_PAUSE_CHANGED = 4,
+ BNA_ENET_F_MTU_CHANGED = 8
+};
+
+enum bna_rss_flags {
+ BNA_RSS_F_RIT_PENDING = 1,
+ BNA_RSS_F_CFG_PENDING = 2,
+ BNA_RSS_F_STATUS_PENDING = 4,
+};
+
+enum bna_mod_flags {
+ BNA_MOD_F_INIT_DONE = 1,
+};
+
+enum bna_pkt_rates {
+ BNA_PKT_RATE_10K = 10000,
+ BNA_PKT_RATE_20K = 20000,
+ BNA_PKT_RATE_30K = 30000,
+ BNA_PKT_RATE_40K = 40000,
+ BNA_PKT_RATE_50K = 50000,
+ BNA_PKT_RATE_60K = 60000,
+ BNA_PKT_RATE_70K = 70000,
+ BNA_PKT_RATE_80K = 80000,
+};
+
+enum bna_dim_load_types {
+ BNA_LOAD_T_HIGH_4 = 0, /* 80K <= r */
+ BNA_LOAD_T_HIGH_3 = 1, /* 60K <= r < 80K */
+ BNA_LOAD_T_HIGH_2 = 2, /* 50K <= r < 60K */
+ BNA_LOAD_T_HIGH_1 = 3, /* 40K <= r < 50K */
+ BNA_LOAD_T_LOW_1 = 4, /* 30K <= r < 40K */
+ BNA_LOAD_T_LOW_2 = 5, /* 20K <= r < 30K */
+ BNA_LOAD_T_LOW_3 = 6, /* 10K <= r < 20K */
+ BNA_LOAD_T_LOW_4 = 7, /* r < 10K */
+ BNA_LOAD_T_MAX = 8
+};
+
+enum bna_dim_bias_types {
+ BNA_BIAS_T_SMALL = 0, /* small pkts > (large pkts * 2) */
+ BNA_BIAS_T_LARGE = 1, /* Not BNA_BIAS_T_SMALL */
+ BNA_BIAS_T_MAX = 2
+};
+
+#define BNA_MAX_NAME_SIZE 64
+struct bna_ident {
+ int id;
+ char name[BNA_MAX_NAME_SIZE];
+};
+
+struct bna_mac {
+ /* This should be the first one */
+ struct list_head qe;
+ u8 addr[ETH_ALEN];
+ struct bna_mcam_handle *handle;
+};
+
+struct bna_mem_descr {
+ u32 len;
+ void *kva;
+ struct bna_dma_addr dma;
+};
+
+struct bna_mem_info {
+ enum bna_mem_type mem_type;
+ u32 len;
+ u32 num;
+ u32 align_sz; /* 0/1 = no alignment */
+ struct bna_mem_descr *mdl;
+ void *cookie; /* For bnad to unmap dma later */
+};
+
+struct bna_intr_descr {
+ int vector;
+};
+
+struct bna_intr_info {
+ enum bna_intr_type intr_type;
+ int num;
+ struct bna_intr_descr *idl;
+};
+
+union bna_res_u {
+ struct bna_mem_info mem_info;
+ struct bna_intr_info intr_info;
+};
+
+struct bna_res_info {
+ enum bna_res_type res_type;
+ union bna_res_u res_u;
+};
+
+/* HW QPT */
+struct bna_qpt {
+ struct bna_dma_addr hw_qpt_ptr;
+ void *kv_qpt_ptr;
+ u32 page_count;
+ u32 page_size;
+};
+
+struct bna_attr {
+ bool fw_query_complete;
+ int num_txq;
+ int num_rxp;
+ int num_ucmac;
+ int num_mcmac;
+ int max_rit_size;
+};
+
+/**
+ *
+ * IOCEth
+ *
+ */
+
+struct bna_ioceth {
+ bfa_fsm_t fsm;
+ struct bfa_ioc ioc;
+
+ struct bna_attr attr;
+ struct bfa_msgq_cmd_entry msgq_cmd;
+ struct bfi_enet_attr_req attr_req;
+
+ void (*stop_cbfn)(struct bnad *bnad);
+ struct bnad *stop_cbarg;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Enet
+ *
+ */
+
+/* Pause configuration */
+struct bna_pause_config {
+ enum bna_status tx_pause;
+ enum bna_status rx_pause;
+};
+
+struct bna_enet {
+ bfa_fsm_t fsm;
+ enum bna_enet_flags flags;
+
+ enum bna_enet_type type;
+
+ struct bna_pause_config pause_config;
+ int mtu;
+
+ /* Callback for bna_enet_disable(), enet_stop() */
+ void (*stop_cbfn)(void *);
+ void *stop_cbarg;
+
+ /* Callback for bna_enet_pause_config() */
+ void (*pause_cbfn)(struct bnad *);
+
+ /* Callback for bna_enet_mtu_set() */
+ void (*mtu_cbfn)(struct bnad *);
+
+ struct bfa_wc chld_stop_wc;
+
+ struct bfa_msgq_cmd_entry msgq_cmd;
+ struct bfi_enet_set_pause_req pause_req;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Ethport
+ *
+ */
+
+struct bna_ethport {
+ bfa_fsm_t fsm;
+ enum bna_ethport_flags flags;
+
+ enum bna_link_status link_status;
+
+ int rx_started_count;
+
+ void (*stop_cbfn)(struct bna_enet *);
+
+ void (*adminup_cbfn)(struct bnad *, enum bna_cb_status);
+
+ void (*link_cbfn)(struct bnad *, enum bna_link_status);
+
+ struct bfa_msgq_cmd_entry msgq_cmd;
+ union {
+ struct bfi_enet_enable_req admin_req;
+ struct bfi_enet_diag_lb_req lpbk_req;
+ } bfi_enet_cmd;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Interrupt Block
+ *
+ */
+
+/* Doorbell structure */
+struct bna_ib_dbell {
+ void *__iomem doorbell_addr;
+ u32 doorbell_ack;
+};
+
+/* IB structure */
+struct bna_ib {
+ struct bna_dma_addr ib_seg_host_addr;
+ void *ib_seg_host_addr_kva;
+
+ struct bna_ib_dbell door_bell;
+
+ enum bna_intr_type intr_type;
+ int intr_vector;
+
+ u8 coalescing_timeo; /* Unit is 5usec. */
+
+ int interpkt_count;
+ int interpkt_timeo;
+};
+
+/**
+ *
+ * Tx object
+ *
+ */
+
+/* Tx datapath control structure */
+#define BNA_Q_NAME_SIZE 16
+struct bna_tcb {
+ /* Fast path */
+ void **sw_qpt;
+ void *unmap_q;
+ u32 producer_index;
+ u32 consumer_index;
+ volatile u32 *hw_consumer_index;
+ u32 q_depth;
+ void *__iomem q_dbell;
+ struct bna_ib_dbell *i_dbell;
+ int page_idx;
+ int page_count;
+ /* Control path */
+ struct bna_txq *txq;
+ struct bnad *bnad;
+ void *priv; /* BNAD's cookie */
+ enum bna_intr_type intr_type;
+ int intr_vector;
+ u8 priority; /* Current priority */
+ unsigned long flags; /* Used by bnad as required */
+ int id;
+ char name[BNA_Q_NAME_SIZE];
+};
+
+/* TxQ QPT and configuration */
+struct bna_txq {
+ /* This should be the first one */
+ struct list_head qe;
+
+ u8 priority;
+
+ struct bna_qpt qpt;
+ struct bna_tcb *tcb;
+ struct bna_ib ib;
+
+ struct bna_tx *tx;
+
+ int hw_id;
+
+ u64 tx_packets;
+ u64 tx_bytes;
+};
+
+/* Tx object */
+struct bna_tx {
+ /* This should be the first one */
+ struct list_head qe;
+ int rid;
+ int hw_id;
+
+ bfa_fsm_t fsm;
+ enum bna_tx_flags flags;
+
+ enum bna_tx_type type;
+ int num_txq;
+
+ struct list_head txq_q;
+ u16 txf_vlan_id;
+
+ /* Tx event handlers */
+ void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tx_stall_cbfn)(struct bnad *, struct bna_tx *);
+ void (*tx_resume_cbfn)(struct bnad *, struct bna_tx *);
+ void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tx *);
+
+ /* callback for bna_tx_disable(), bna_tx_stop() */
+ void (*stop_cbfn)(void *arg, struct bna_tx *tx);
+ void *stop_cbarg;
+
+ /* callback for bna_tx_prio_set() */
+ void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx);
+
+ struct bfa_msgq_cmd_entry msgq_cmd;
+ union {
+ struct bfi_enet_tx_cfg_req cfg_req;
+ struct bfi_enet_req req;
+ struct bfi_enet_tx_cfg_rsp cfg_rsp;
+ } bfi_enet_cmd;
+
+ struct bna *bna;
+ void *priv; /* bnad's cookie */
+};
+
+/* Tx object configuration used during creation */
+struct bna_tx_config {
+ int num_txq;
+ int txq_depth;
+ int coalescing_timeo;
+ enum bna_tx_type tx_type;
+};
+
+struct bna_tx_event_cbfn {
+ /* Optional */
+ void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
+ /* Mandatory */
+ void (*tx_stall_cbfn)(struct bnad *, struct bna_tx *);
+ void (*tx_resume_cbfn)(struct bnad *, struct bna_tx *);
+ void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tx *);
+};
+
+/* Tx module - keeps track of free, active tx objects */
+struct bna_tx_mod {
+ struct bna_tx *tx; /* BFI_MAX_TXQ entries */
+ struct bna_txq *txq; /* BFI_MAX_TXQ entries */
+
+ struct list_head tx_free_q;
+ struct list_head tx_active_q;
+
+ struct list_head txq_free_q;
+
+ /* callback for bna_tx_mod_stop() */
+ void (*stop_cbfn)(struct bna_enet *enet);
+
+ struct bfa_wc tx_stop_wc;
+
+ enum bna_tx_mod_flags flags;
+
+ u8 prio_map;
+ int default_prio;
+ int iscsi_over_cee;
+ int iscsi_prio;
+ int prio_reconfigured;
+
+ u32 rid_mask;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Rx object
+ *
+ */
+
+/* Rx datapath control structure */
+struct bna_rcb {
+ /* Fast path */
+ void **sw_qpt;
+ void *unmap_q;
+ u32 producer_index;
+ u32 consumer_index;
+ u32 q_depth;
+ void *__iomem q_dbell;
+ int page_idx;
+ int page_count;
+ /* Control path */
+ struct bna_rxq *rxq;
+ struct bna_ccb *ccb;
+ struct bnad *bnad;
+ void *priv; /* BNAD's cookie */
+ unsigned long flags;
+ int id;
+};
+
+/* RxQ structure - QPT, configuration */
+struct bna_rxq {
+ struct list_head qe;
+
+ int buffer_size;
+ int q_depth;
+
+ struct bna_qpt qpt;
+ struct bna_rcb *rcb;
+
+ struct bna_rxp *rxp;
+ struct bna_rx *rx;
+
+ int hw_id;
+
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_packets_with_error;
+ u64 rxbuf_alloc_failed;
+};
+
+/* RxQ pair */
+union bna_rxq_u {
+ struct {
+ struct bna_rxq *hdr;
+ struct bna_rxq *data;
+ } hds;
+ struct {
+ struct bna_rxq *small;
+ struct bna_rxq *large;
+ } slr;
+ struct {
+ struct bna_rxq *only;
+ struct bna_rxq *reserved;
+ } single;
+};
+
+/* Packet rate for Dynamic Interrupt Moderation */
+struct bna_pkt_rate {
+ u32 small_pkt_cnt;
+ u32 large_pkt_cnt;
+};
+
+/* Completion control structure */
+struct bna_ccb {
+ /* Fast path */
+ void **sw_qpt;
+ u32 producer_index;
+ volatile u32 *hw_producer_index;
+ u32 q_depth;
+ struct bna_ib_dbell *i_dbell;
+ struct bna_rcb *rcb[2];
+ void *ctrl; /* For bnad */
+ struct bna_pkt_rate pkt_rate;
+ int page_idx;
+ int page_count;
+
+ /* Control path */
+ struct bna_cq *cq;
+ struct bnad *bnad;
+ void *priv; /* BNAD's cookie */
+ enum bna_intr_type intr_type;
+ int intr_vector;
+ u8 rx_coalescing_timeo; /* For NAPI */
+ int id;
+ char name[BNA_Q_NAME_SIZE];
+};
+
+/* CQ QPT, configuration */
+struct bna_cq {
+ struct bna_qpt qpt;
+ struct bna_ccb *ccb;
+
+ struct bna_ib ib;
+
+ struct bna_rx *rx;
+};
+
+struct bna_rss_config {
+ enum bfi_enet_rss_type hash_type;
+ u8 hash_mask;
+ u32 toeplitz_hash_key[BFI_ENET_RSS_KEY_LEN];
+};
+
+struct bna_hds_config {
+ enum bfi_enet_hds_type hdr_type;
+ int forced_offset;
+};
+
+/* Rx object configuration used during creation */
+struct bna_rx_config {
+ enum bna_rx_type rx_type;
+ int num_paths;
+ enum bna_rxp_type rxp_type;
+ int paused;
+ int q_depth;
+ int coalescing_timeo;
+ /*
+ * Small/Large (or Header/Data) buffer size to be configured
+ * for SLR and HDS queue type. Large buffer size comes from
+ * enet->mtu.
+ */
+ int small_buff_size;
+
+ enum bna_status rss_status;
+ struct bna_rss_config rss_config;
+
+ struct bna_hds_config hds_config;
+
+ enum bna_status vlan_strip_status;
+};
+
+/* Rx Path structure - one per MSIX vector/CPU */
+struct bna_rxp {
+ /* This should be the first one */
+ struct list_head qe;
+
+ enum bna_rxp_type type;
+ union bna_rxq_u rxq;
+ struct bna_cq cq;
+
+ struct bna_rx *rx;
+
+ /* MSI-x vector number for configuring RSS */
+ int vector;
+ int hw_id;
+};
+
+/* RxF structure (hardware Rx Function) */
+struct bna_rxf {
+ bfa_fsm_t fsm;
+ enum bna_rxf_flags flags;
+
+ struct bfa_msgq_cmd_entry msgq_cmd;
+ union {
+ struct bfi_enet_enable_req req;
+ struct bfi_enet_rss_cfg_req rss_req;
+ struct bfi_enet_rit_req rit_req;
+ struct bfi_enet_rx_vlan_req vlan_req;
+ struct bfi_enet_mcast_add_req mcast_add_req;
+ struct bfi_enet_mcast_del_req mcast_del_req;
+ struct bfi_enet_ucast_req ucast_req;
+ } bfi_enet_cmd;
+
+ /* callback for bna_rxf_start() */
+ void (*start_cbfn) (struct bna_rx *rx);
+ struct bna_rx *start_cbarg;
+
+ /* callback for bna_rxf_stop() */
+ void (*stop_cbfn) (struct bna_rx *rx);
+ struct bna_rx *stop_cbarg;
+
+ /* callback for bna_rx_receive_pause() / bna_rx_receive_resume() */
+ void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx);
+ struct bnad *oper_state_cbarg;
+
+ /**
+ * callback for:
+ * bna_rxf_ucast_set()
+ * bna_rxf_{ucast/mcast}_add(),
+ * bna_rxf_{ucast/mcast}_del(),
+ * bna_rxf_mode_set()
+ */
+ void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx);
+ struct bnad *cam_fltr_cbarg;
+
+ /* List of unicast addresses yet to be applied to h/w */
+ struct list_head ucast_pending_add_q;
+ struct list_head ucast_pending_del_q;
+ struct bna_mac *ucast_pending_mac;
+ int ucast_pending_set;
+ /* ucast addresses applied to the h/w */
+ struct list_head ucast_active_q;
+ struct bna_mac ucast_active_mac;
+ int ucast_active_set;
+
+ /* List of multicast addresses yet to be applied to h/w */
+ struct list_head mcast_pending_add_q;
+ struct list_head mcast_pending_del_q;
+ /* multicast addresses applied to the h/w */
+ struct list_head mcast_active_q;
+ struct list_head mcast_handle_q;
+
+ /* Rx modes yet to be applied to h/w */
+ enum bna_rxmode rxmode_pending;
+ enum bna_rxmode rxmode_pending_bitmask;
+ /* Rx modes applied to h/w */
+ enum bna_rxmode rxmode_active;
+
+ u8 vlan_pending_bitmask;
+ enum bna_status vlan_filter_status;
+ u32 vlan_filter_table[(BFI_ENET_VLAN_ID_MAX) / 32];
+ bool vlan_strip_pending;
+ enum bna_status vlan_strip_status;
+
+ enum bna_rss_flags rss_pending;
+ enum bna_status rss_status;
+ struct bna_rss_config rss_cfg;
+ u8 *rit;
+ int rit_size;
+
+ struct bna_rx *rx;
+};
+
+/* Rx object */
+struct bna_rx {
+ /* This should be the first one */
+ struct list_head qe;
+ int rid;
+ int hw_id;
+
+ bfa_fsm_t fsm;
+
+ enum bna_rx_type type;
+
+ int num_paths;
+ struct list_head rxp_q;
+
+ struct bna_hds_config hds_cfg;
+
+ struct bna_rxf rxf;
+
+ enum bna_rx_flags rx_flags;
+
+ struct bfa_msgq_cmd_entry msgq_cmd;
+ union {
+ struct bfi_enet_rx_cfg_req cfg_req;
+ struct bfi_enet_req req;
+ struct bfi_enet_rx_cfg_rsp cfg_rsp;
+ } bfi_enet_cmd;
+
+ /* Rx event handlers */
+ void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
+ void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
+ void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*rx_stall_cbfn)(struct bnad *, struct bna_rx *);
+ void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *);
+ void (*rx_post_cbfn)(struct bnad *, struct bna_rx *);
+
+ /* callback for bna_rx_disable(), bna_rx_stop() */
+ void (*stop_cbfn)(void *arg, struct bna_rx *rx);
+ void *stop_cbarg;
+
+ struct bna *bna;
+ void *priv; /* bnad's cookie */
+};
+
+struct bna_rx_event_cbfn {
+ /* Optional */
+ void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
+ void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
+ void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*rx_stall_cbfn)(struct bnad *, struct bna_rx *);
+ /* Mandatory */
+ void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *);
+ void (*rx_post_cbfn)(struct bnad *, struct bna_rx *);
+};
+
+/* Rx module - keeps track of free, active rx objects */
+struct bna_rx_mod {
+ struct bna *bna; /* back pointer to parent */
+ struct bna_rx *rx; /* BFI_MAX_RXQ entries */
+ struct bna_rxp *rxp; /* BFI_MAX_RXQ entries */
+ struct bna_rxq *rxq; /* BFI_MAX_RXQ entries */
+
+ struct list_head rx_free_q;
+ struct list_head rx_active_q;
+ int rx_free_count;
+
+ struct list_head rxp_free_q;
+ int rxp_free_count;
+
+ struct list_head rxq_free_q;
+ int rxq_free_count;
+
+ enum bna_rx_mod_flags flags;
+
+ /* callback for bna_rx_mod_stop() */
+ void (*stop_cbfn)(struct bna_enet *enet);
+
+ struct bfa_wc rx_stop_wc;
+ u32 dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX];
+ u32 rid_mask;
+};
+
+/**
+ *
+ * CAM
+ *
+ */
+
+struct bna_ucam_mod {
+ struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */
+ struct list_head free_q;
+
+ struct bna *bna;
+};
+
+struct bna_mcam_handle {
+ /* This should be the first one */
+ struct list_head qe;
+ int handle;
+ int refcnt;
+};
+
+struct bna_mcam_mod {
+ struct bna_mac *mcmac; /* BFI_MAX_MCMAC entries */
+ struct bna_mcam_handle *mchandle; /* BFI_MAX_MCMAC entries */
+ struct list_head free_q;
+ struct list_head free_handle_q;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Statistics
+ *
+ */
+
+struct bna_stats {
+ struct bna_dma_addr hw_stats_dma;
+ struct bfi_enet_stats *hw_stats_kva;
+ struct bfi_enet_stats hw_stats;
+};
+
+struct bna_stats_mod {
+ bool ioc_ready;
+ bool stats_get_busy;
+ bool stats_clr_busy;
+ struct bfa_msgq_cmd_entry stats_get_cmd;
+ struct bfa_msgq_cmd_entry stats_clr_cmd;
+ struct bfi_enet_stats_req stats_get;
+ struct bfi_enet_stats_req stats_clr;
+};
+
+/**
+ *
+ * BNA
+ *
+ */
+
+struct bna {
+ struct bna_ident ident;
+ struct bfa_pcidev pcidev;
+
+ struct bna_reg regs;
+ struct bna_bit_defn bits;
+
+ struct bna_stats stats;
+
+ struct bna_ioceth ioceth;
+ struct bfa_cee cee;
+ struct bfa_msgq msgq;
+
+ struct bna_ethport ethport;
+ struct bna_enet enet;
+ struct bna_stats_mod stats_mod;
+
+ struct bna_tx_mod tx_mod;
+ struct bna_rx_mod rx_mod;
+ struct bna_ucam_mod ucam_mod;
+ struct bna_mcam_mod mcam_mod;
+
+ enum bna_mod_flags mod_flags;
+
+ int default_mode_rid;
+ int promisc_rid;
+
+ struct bnad *bnad;
+};
+#endif /* __BNA_TYPES_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
new file mode 100644
index 000000000000..2f4ced66612a
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -0,0 +1,3510 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#include <linux/bitops.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/prefetch.h>
+
+#include "bnad.h"
+#include "bna.h"
+#include "cna.h"
+
+static DEFINE_MUTEX(bnad_fwimg_mutex);
+
+/*
+ * Module params
+ */
+static uint bnad_msix_disable;
+module_param(bnad_msix_disable, uint, 0444);
+MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
+
+static uint bnad_ioc_auto_recover = 1;
+module_param(bnad_ioc_auto_recover, uint, 0444);
+MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
+
+/*
+ * Global variables
+ */
+u32 bnad_rxqs_per_cq = 2;
+
+static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+/*
+ * Local MACROS
+ */
+#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
+
+#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
+
+#define BNAD_GET_MBOX_IRQ(_bnad) \
+ (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
+ ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
+ ((_bnad)->pcidev->irq))
+
+#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
+do { \
+ (_res_info)->res_type = BNA_RES_T_MEM; \
+ (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
+ (_res_info)->res_u.mem_info.num = (_num); \
+ (_res_info)->res_u.mem_info.len = \
+ sizeof(struct bnad_unmap_q) + \
+ (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
+} while (0)
+
+#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
+
+/*
+ * Reinitialize completions in CQ, once Rx is taken down
+ */
+static void
+bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ struct bna_cq_entry *cmpl, *next_cmpl;
+ unsigned int wi_range, wis = 0, ccb_prod = 0;
+ int i;
+
+ BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
+ wi_range);
+
+ for (i = 0; i < ccb->q_depth; i++) {
+ wis++;
+ if (likely(--wi_range))
+ next_cmpl = cmpl + 1;
+ else {
+ BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
+ wis = 0;
+ BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
+ next_cmpl, wi_range);
+ }
+ cmpl->valid = 0;
+ cmpl = next_cmpl;
+ }
+}
+
+static u32
+bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
+ u32 index, u32 depth, struct sk_buff *skb, u32 frag)
+{
+ int j;
+ array[index].skb = NULL;
+
+ dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr),
+ skb_headlen(skb), DMA_TO_DEVICE);
+ dma_unmap_addr_set(&array[index], dma_addr, 0);
+ BNA_QE_INDX_ADD(index, 1, depth);
+
+ for (j = 0; j < frag; j++) {
+ dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
+ skb_shinfo(skb)->frags[j].size, DMA_TO_DEVICE);
+ dma_unmap_addr_set(&array[index], dma_addr, 0);
+ BNA_QE_INDX_ADD(index, 1, depth);
+ }
+
+ return index;
+}
+
+/*
+ * Frees all pending Tx Bufs
+ * At this point no activity is expected on the Q,
+ * so DMA unmap & freeing is fine.
+ */
+static void
+bnad_free_all_txbufs(struct bnad *bnad,
+ struct bna_tcb *tcb)
+{
+ u32 unmap_cons;
+ struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+ struct bnad_skb_unmap *unmap_array;
+ struct sk_buff *skb = NULL;
+ int q;
+
+ unmap_array = unmap_q->unmap_array;
+
+ for (q = 0; q < unmap_q->q_depth; q++) {
+ skb = unmap_array[q].skb;
+ if (!skb)
+ continue;
+
+ unmap_cons = q;
+ unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
+ unmap_cons, unmap_q->q_depth, skb,
+ skb_shinfo(skb)->nr_frags);
+
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/* Data Path Handlers */
+
+/*
+ * bnad_free_txbufs : Frees the Tx bufs on Tx completion
+ * Can be called in a) Interrupt context
+ * b) Sending context
+ * c) Tasklet context
+ */
+static u32
+bnad_free_txbufs(struct bnad *bnad,
+ struct bna_tcb *tcb)
+{
+ u32 unmap_cons, sent_packets = 0, sent_bytes = 0;
+ u16 wis, updated_hw_cons;
+ struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+ struct bnad_skb_unmap *unmap_array;
+ struct sk_buff *skb;
+
+ /*
+ * Just return if TX is stopped. This check is useful
+ * when bnad_free_txbufs() runs out of a tasklet scheduled
+ * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
+ * but this routine runs actually after the cleanup has been
+ * executed.
+ */
+ if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
+ return 0;
+
+ updated_hw_cons = *(tcb->hw_consumer_index);
+
+ wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
+ updated_hw_cons, tcb->q_depth);
+
+ BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
+
+ unmap_array = unmap_q->unmap_array;
+ unmap_cons = unmap_q->consumer_index;
+
+ prefetch(&unmap_array[unmap_cons + 1]);
+ while (wis) {
+ skb = unmap_array[unmap_cons].skb;
+
+ sent_packets++;
+ sent_bytes += skb->len;
+ wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
+
+ unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
+ unmap_cons, unmap_q->q_depth, skb,
+ skb_shinfo(skb)->nr_frags);
+
+ dev_kfree_skb_any(skb);
+ }
+
+ /* Update consumer pointers. */
+ tcb->consumer_index = updated_hw_cons;
+ unmap_q->consumer_index = unmap_cons;
+
+ tcb->txq->tx_packets += sent_packets;
+ tcb->txq->tx_bytes += sent_bytes;
+
+ return sent_packets;
+}
+
+/* Tx Free Tasklet function */
+/* Frees for all the tcb's in all the Tx's */
+/*
+ * Scheduled from sending context, so that
+ * the fat Tx lock is not held for too long
+ * in the sending context.
+ */
+static void
+bnad_tx_free_tasklet(unsigned long bnad_ptr)
+{
+ struct bnad *bnad = (struct bnad *)bnad_ptr;
+ struct bna_tcb *tcb;
+ u32 acked = 0;
+ int i, j;
+
+ for (i = 0; i < bnad->num_tx; i++) {
+ for (j = 0; j < bnad->num_txq_per_tx; j++) {
+ tcb = bnad->tx_info[i].tcb[j];
+ if (!tcb)
+ continue;
+ if (((u16) (*tcb->hw_consumer_index) !=
+ tcb->consumer_index) &&
+ (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
+ &tcb->flags))) {
+ acked = bnad_free_txbufs(bnad, tcb);
+ if (likely(test_bit(BNAD_TXQ_TX_STARTED,
+ &tcb->flags)))
+ bna_ib_ack(tcb->i_dbell, acked);
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+ }
+ if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
+ &tcb->flags)))
+ continue;
+ if (netif_queue_stopped(bnad->netdev)) {
+ if (acked && netif_carrier_ok(bnad->netdev) &&
+ BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
+ BNAD_NETIF_WAKE_THRESHOLD) {
+ netif_wake_queue(bnad->netdev);
+ /* TODO */
+ /* Counters for individual TxQs? */
+ BNAD_UPDATE_CTR(bnad,
+ netif_queue_wakeup);
+ }
+ }
+ }
+ }
+}
+
+static u32
+bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ struct net_device *netdev = bnad->netdev;
+ u32 sent = 0;
+
+ if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
+ return 0;
+
+ sent = bnad_free_txbufs(bnad, tcb);
+ if (sent) {
+ if (netif_queue_stopped(netdev) &&
+ netif_carrier_ok(netdev) &&
+ BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
+ BNAD_NETIF_WAKE_THRESHOLD) {
+ if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
+ netif_wake_queue(netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+ }
+ }
+ }
+
+ if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
+ bna_ib_ack(tcb->i_dbell, sent);
+
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+
+ return sent;
+}
+
+/* MSIX Tx Completion Handler */
+static irqreturn_t
+bnad_msix_tx(int irq, void *data)
+{
+ struct bna_tcb *tcb = (struct bna_tcb *)data;
+ struct bnad *bnad = tcb->bnad;
+
+ bnad_tx(bnad, tcb);
+
+ return IRQ_HANDLED;
+}
+
+static void
+bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+
+ rcb->producer_index = 0;
+ rcb->consumer_index = 0;
+
+ unmap_q->producer_index = 0;
+ unmap_q->consumer_index = 0;
+}
+
+static void
+bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ struct bnad_unmap_q *unmap_q;
+ struct bnad_skb_unmap *unmap_array;
+ struct sk_buff *skb;
+ int unmap_cons;
+
+ unmap_q = rcb->unmap_q;
+ unmap_array = unmap_q->unmap_array;
+ for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
+ skb = unmap_array[unmap_cons].skb;
+ if (!skb)
+ continue;
+ unmap_array[unmap_cons].skb = NULL;
+ dma_unmap_single(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
+ dma_addr),
+ rcb->rxq->buffer_size,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ }
+ bnad_reset_rcb(bnad, rcb);
+}
+
+static void
+bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ u16 to_alloc, alloced, unmap_prod, wi_range;
+ struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+ struct bnad_skb_unmap *unmap_array;
+ struct bna_rxq_entry *rxent;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+
+ alloced = 0;
+ to_alloc =
+ BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
+
+ unmap_array = unmap_q->unmap_array;
+ unmap_prod = unmap_q->producer_index;
+
+ BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
+
+ while (to_alloc--) {
+ if (!wi_range)
+ BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
+ wi_range);
+ skb = netdev_alloc_skb_ip_align(bnad->netdev,
+ rcb->rxq->buffer_size);
+ if (unlikely(!skb)) {
+ BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
+ rcb->rxq->rxbuf_alloc_failed++;
+ goto finishing;
+ }
+ unmap_array[unmap_prod].skb = skb;
+ dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
+ rcb->rxq->buffer_size,
+ DMA_FROM_DEVICE);
+ dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
+ dma_addr);
+ BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
+ BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+
+ rxent++;
+ wi_range--;
+ alloced++;
+ }
+
+finishing:
+ if (likely(alloced)) {
+ unmap_q->producer_index = unmap_prod;
+ rcb->producer_index = unmap_prod;
+ smp_mb();
+ if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
+ bna_rxq_prod_indx_doorbell(rcb);
+ }
+}
+
+static inline void
+bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+
+ if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
+ if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
+ >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
+ bnad_alloc_n_post_rxbufs(bnad, rcb);
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
+ }
+}
+
+static u32
+bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
+{
+ struct bna_cq_entry *cmpl, *next_cmpl;
+ struct bna_rcb *rcb = NULL;
+ unsigned int wi_range, packets = 0, wis = 0;
+ struct bnad_unmap_q *unmap_q;
+ struct bnad_skb_unmap *unmap_array;
+ struct sk_buff *skb;
+ u32 flags, unmap_cons;
+ struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
+ struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
+
+ set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
+
+ if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
+ clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
+ return 0;
+ }
+
+ prefetch(bnad->netdev);
+ BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
+ wi_range);
+ BUG_ON(!(wi_range <= ccb->q_depth));
+ while (cmpl->valid && packets < budget) {
+ packets++;
+ BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
+
+ if (bna_is_small_rxq(cmpl->rxq_id))
+ rcb = ccb->rcb[1];
+ else
+ rcb = ccb->rcb[0];
+
+ unmap_q = rcb->unmap_q;
+ unmap_array = unmap_q->unmap_array;
+ unmap_cons = unmap_q->consumer_index;
+
+ skb = unmap_array[unmap_cons].skb;
+ BUG_ON(!(skb));
+ unmap_array[unmap_cons].skb = NULL;
+ dma_unmap_single(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
+ dma_addr),
+ rcb->rxq->buffer_size,
+ DMA_FROM_DEVICE);
+ BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+
+ /* Should be more efficient ? Performance ? */
+ BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
+
+ wis++;
+ if (likely(--wi_range))
+ next_cmpl = cmpl + 1;
+ else {
+ BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
+ wis = 0;
+ BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
+ next_cmpl, wi_range);
+ BUG_ON(!(wi_range <= ccb->q_depth));
+ }
+ prefetch(next_cmpl);
+
+ flags = ntohl(cmpl->flags);
+ if (unlikely
+ (flags &
+ (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
+ BNA_CQ_EF_TOO_LONG))) {
+ dev_kfree_skb_any(skb);
+ rcb->rxq->rx_packets_with_error++;
+ goto next;
+ }
+
+ skb_put(skb, ntohs(cmpl->length));
+ if (likely
+ ((bnad->netdev->features & NETIF_F_RXCSUM) &&
+ (((flags & BNA_CQ_EF_IPV4) &&
+ (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
+ (flags & BNA_CQ_EF_IPV6)) &&
+ (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
+ (flags & BNA_CQ_EF_L4_CKSUM_OK)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+
+ rcb->rxq->rx_packets++;
+ rcb->rxq->rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, bnad->netdev);
+
+ if (flags & BNA_CQ_EF_VLAN)
+ __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
+
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ napi_gro_receive(&rx_ctrl->napi, skb);
+ else {
+ netif_receive_skb(skb);
+ }
+
+next:
+ cmpl->valid = 0;
+ cmpl = next_cmpl;
+ }
+
+ BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
+
+ if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
+ bna_ib_ack_disable_irq(ccb->i_dbell, packets);
+
+ bnad_refill_rxq(bnad, ccb->rcb[0]);
+ if (ccb->rcb[1])
+ bnad_refill_rxq(bnad, ccb->rcb[1]);
+
+ clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
+
+ return packets;
+}
+
+static void
+bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
+ struct napi_struct *napi = &rx_ctrl->napi;
+
+ if (likely(napi_schedule_prep(napi))) {
+ __napi_schedule(napi);
+ rx_ctrl->rx_schedule++;
+ }
+}
+
+/* MSIX Rx Path Handler */
+static irqreturn_t
+bnad_msix_rx(int irq, void *data)
+{
+ struct bna_ccb *ccb = (struct bna_ccb *)data;
+
+ if (ccb) {
+ ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
+ bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handlers */
+
+/* Mbox Interrupt Handlers */
+static irqreturn_t
+bnad_msix_mbox_handler(int irq, void *data)
+{
+ u32 intr_status;
+ unsigned long flags;
+ struct bnad *bnad = (struct bnad *)data;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ bna_intr_status_get(&bnad->bna, intr_status);
+
+ if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
+ bna_mbox_handler(&bnad->bna, intr_status);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+bnad_isr(int irq, void *data)
+{
+ int i, j;
+ u32 intr_status;
+ unsigned long flags;
+ struct bnad *bnad = (struct bnad *)data;
+ struct bnad_rx_info *rx_info;
+ struct bnad_rx_ctrl *rx_ctrl;
+ struct bna_tcb *tcb = NULL;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ return IRQ_NONE;
+ }
+
+ bna_intr_status_get(&bnad->bna, intr_status);
+
+ if (unlikely(!intr_status)) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ return IRQ_NONE;
+ }
+
+ if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
+ bna_mbox_handler(&bnad->bna, intr_status);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ if (!BNA_IS_INTX_DATA_INTR(intr_status))
+ return IRQ_HANDLED;
+
+ /* Process data interrupts */
+ /* Tx processing */
+ for (i = 0; i < bnad->num_tx; i++) {
+ for (j = 0; j < bnad->num_txq_per_tx; j++) {
+ tcb = bnad->tx_info[i].tcb[j];
+ if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
+ bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
+ }
+ }
+ /* Rx processing */
+ for (i = 0; i < bnad->num_rx; i++) {
+ rx_info = &bnad->rx_info[i];
+ if (!rx_info->rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ rx_ctrl = &rx_info->rx_ctrl[j];
+ if (rx_ctrl->ccb)
+ bnad_netif_rx_schedule_poll(bnad,
+ rx_ctrl->ccb);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/*
+ * Called in interrupt / callback context
+ * with bna_lock held, so cfg_flags access is OK
+ */
+static void
+bnad_enable_mbox_irq(struct bnad *bnad)
+{
+ clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
+
+ BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
+}
+
+/*
+ * Called with bnad->bna_lock held b'cos of
+ * bnad->cfg_flags access.
+ */
+static void
+bnad_disable_mbox_irq(struct bnad *bnad)
+{
+ set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
+
+ BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
+}
+
+static void
+bnad_set_netdev_perm_addr(struct bnad *bnad)
+{
+ struct net_device *netdev = bnad->netdev;
+
+ memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
+ if (is_zero_ether_addr(netdev->dev_addr))
+ memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
+}
+
+/* Control Path Handlers */
+
+/* Callbacks */
+void
+bnad_cb_mbox_intr_enable(struct bnad *bnad)
+{
+ bnad_enable_mbox_irq(bnad);
+}
+
+void
+bnad_cb_mbox_intr_disable(struct bnad *bnad)
+{
+ bnad_disable_mbox_irq(bnad);
+}
+
+void
+bnad_cb_ioceth_ready(struct bnad *bnad)
+{
+ bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
+ complete(&bnad->bnad_completions.ioc_comp);
+}
+
+void
+bnad_cb_ioceth_failed(struct bnad *bnad)
+{
+ bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
+ complete(&bnad->bnad_completions.ioc_comp);
+}
+
+void
+bnad_cb_ioceth_disabled(struct bnad *bnad)
+{
+ bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
+ complete(&bnad->bnad_completions.ioc_comp);
+}
+
+static void
+bnad_cb_enet_disabled(void *arg)
+{
+ struct bnad *bnad = (struct bnad *)arg;
+
+ netif_carrier_off(bnad->netdev);
+ complete(&bnad->bnad_completions.enet_comp);
+}
+
+void
+bnad_cb_ethport_link_status(struct bnad *bnad,
+ enum bna_link_status link_status)
+{
+ bool link_up = 0;
+
+ link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
+
+ if (link_status == BNA_CEE_UP) {
+ if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
+ BNAD_UPDATE_CTR(bnad, cee_toggle);
+ set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
+ } else {
+ if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
+ BNAD_UPDATE_CTR(bnad, cee_toggle);
+ clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
+ }
+
+ if (link_up) {
+ if (!netif_carrier_ok(bnad->netdev)) {
+ uint tx_id, tcb_id;
+ printk(KERN_WARNING "bna: %s link up\n",
+ bnad->netdev->name);
+ netif_carrier_on(bnad->netdev);
+ BNAD_UPDATE_CTR(bnad, link_toggle);
+ for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
+ for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
+ tcb_id++) {
+ struct bna_tcb *tcb =
+ bnad->tx_info[tx_id].tcb[tcb_id];
+ u32 txq_id;
+ if (!tcb)
+ continue;
+
+ txq_id = tcb->id;
+
+ if (test_bit(BNAD_TXQ_TX_STARTED,
+ &tcb->flags)) {
+ /*
+ * Force an immediate
+ * Transmit Schedule */
+ printk(KERN_INFO "bna: %s %d "
+ "TXQ_STARTED\n",
+ bnad->netdev->name,
+ txq_id);
+ netif_wake_subqueue(
+ bnad->netdev,
+ txq_id);
+ BNAD_UPDATE_CTR(bnad,
+ netif_queue_wakeup);
+ } else {
+ netif_stop_subqueue(
+ bnad->netdev,
+ txq_id);
+ BNAD_UPDATE_CTR(bnad,
+ netif_queue_stop);
+ }
+ }
+ }
+ }
+ } else {
+ if (netif_carrier_ok(bnad->netdev)) {
+ printk(KERN_WARNING "bna: %s link down\n",
+ bnad->netdev->name);
+ netif_carrier_off(bnad->netdev);
+ BNAD_UPDATE_CTR(bnad, link_toggle);
+ }
+ }
+}
+
+static void
+bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
+{
+ struct bnad *bnad = (struct bnad *)arg;
+
+ complete(&bnad->bnad_completions.tx_comp);
+}
+
+static void
+bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ struct bnad_tx_info *tx_info =
+ (struct bnad_tx_info *)tcb->txq->tx->priv;
+ struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+
+ tx_info->tcb[tcb->id] = tcb;
+ unmap_q->producer_index = 0;
+ unmap_q->consumer_index = 0;
+ unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
+}
+
+static void
+bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ struct bnad_tx_info *tx_info =
+ (struct bnad_tx_info *)tcb->txq->tx->priv;
+ struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+
+ while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
+ cpu_relax();
+
+ bnad_free_all_txbufs(bnad, tcb);
+
+ unmap_q->producer_index = 0;
+ unmap_q->consumer_index = 0;
+
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+
+ tx_info->tcb[tcb->id] = NULL;
+}
+
+static void
+bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+
+ unmap_q->producer_index = 0;
+ unmap_q->consumer_index = 0;
+ unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
+}
+
+static void
+bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ bnad_free_all_rxbufs(bnad, rcb);
+}
+
+static void
+bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ struct bnad_rx_info *rx_info =
+ (struct bnad_rx_info *)ccb->cq->rx->priv;
+
+ rx_info->rx_ctrl[ccb->id].ccb = ccb;
+ ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
+}
+
+static void
+bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ struct bnad_rx_info *rx_info =
+ (struct bnad_rx_info *)ccb->cq->rx->priv;
+
+ rx_info->rx_ctrl[ccb->id].ccb = NULL;
+}
+
+static void
+bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
+{
+ struct bnad_tx_info *tx_info =
+ (struct bnad_tx_info *)tx->priv;
+ struct bna_tcb *tcb;
+ u32 txq_id;
+ int i;
+
+ for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
+ tcb = tx_info->tcb[i];
+ if (!tcb)
+ continue;
+ txq_id = tcb->id;
+ clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
+ netif_stop_subqueue(bnad->netdev, txq_id);
+ printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
+ bnad->netdev->name, txq_id);
+ }
+}
+
+static void
+bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
+{
+ struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
+ struct bna_tcb *tcb;
+ struct bnad_unmap_q *unmap_q;
+ u32 txq_id;
+ int i;
+
+ for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
+ tcb = tx_info->tcb[i];
+ if (!tcb)
+ continue;
+ txq_id = tcb->id;
+
+ unmap_q = tcb->unmap_q;
+
+ if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
+ continue;
+
+ while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
+ cpu_relax();
+
+ bnad_free_all_txbufs(bnad, tcb);
+
+ unmap_q->producer_index = 0;
+ unmap_q->consumer_index = 0;
+
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+
+ set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
+
+ if (netif_carrier_ok(bnad->netdev)) {
+ printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
+ bnad->netdev->name, txq_id);
+ netif_wake_subqueue(bnad->netdev, txq_id);
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+ }
+ }
+
+ /*
+ * Workaround for first ioceth enable failure & we
+ * get a 0 MAC address. We try to get the MAC address
+ * again here.
+ */
+ if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
+ bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
+ bnad_set_netdev_perm_addr(bnad);
+ }
+}
+
+static void
+bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
+{
+ struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
+ struct bna_tcb *tcb;
+ int i;
+
+ for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
+ tcb = tx_info->tcb[i];
+ if (!tcb)
+ continue;
+ }
+
+ mdelay(BNAD_TXRX_SYNC_MDELAY);
+ bna_tx_cleanup_complete(tx);
+}
+
+static void
+bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
+{
+ struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
+ struct bna_ccb *ccb;
+ struct bnad_rx_ctrl *rx_ctrl;
+ int i;
+
+ for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
+ rx_ctrl = &rx_info->rx_ctrl[i];
+ ccb = rx_ctrl->ccb;
+ if (!ccb)
+ continue;
+
+ clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
+
+ if (ccb->rcb[1])
+ clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
+ }
+}
+
+static void
+bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
+{
+ struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
+ struct bna_ccb *ccb;
+ struct bnad_rx_ctrl *rx_ctrl;
+ int i;
+
+ mdelay(BNAD_TXRX_SYNC_MDELAY);
+
+ for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
+ rx_ctrl = &rx_info->rx_ctrl[i];
+ ccb = rx_ctrl->ccb;
+ if (!ccb)
+ continue;
+
+ clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
+
+ if (ccb->rcb[1])
+ clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
+
+ while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
+ cpu_relax();
+ }
+
+ bna_rx_cleanup_complete(rx);
+}
+
+static void
+bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
+{
+ struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
+ struct bna_ccb *ccb;
+ struct bna_rcb *rcb;
+ struct bnad_rx_ctrl *rx_ctrl;
+ struct bnad_unmap_q *unmap_q;
+ int i;
+ int j;
+
+ for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
+ rx_ctrl = &rx_info->rx_ctrl[i];
+ ccb = rx_ctrl->ccb;
+ if (!ccb)
+ continue;
+
+ bnad_cq_cmpl_init(bnad, ccb);
+
+ for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
+ rcb = ccb->rcb[j];
+ if (!rcb)
+ continue;
+ bnad_free_all_rxbufs(bnad, rcb);
+
+ set_bit(BNAD_RXQ_STARTED, &rcb->flags);
+ set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
+ unmap_q = rcb->unmap_q;
+
+ /* Now allocate & post buffers for this RCB */
+ /* !!Allocation in callback context */
+ if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
+ if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
+ >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
+ bnad_alloc_n_post_rxbufs(bnad, rcb);
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
+ }
+ }
+ }
+}
+
+static void
+bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
+{
+ struct bnad *bnad = (struct bnad *)arg;
+
+ complete(&bnad->bnad_completions.rx_comp);
+}
+
+static void
+bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
+{
+ bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
+ complete(&bnad->bnad_completions.mcast_comp);
+}
+
+void
+bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
+ struct bna_stats *stats)
+{
+ if (status == BNA_CB_SUCCESS)
+ BNAD_UPDATE_CTR(bnad, hw_stats_updates);
+
+ if (!netif_running(bnad->netdev) ||
+ !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
+ return;
+
+ mod_timer(&bnad->stats_timer,
+ jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
+}
+
+static void
+bnad_cb_enet_mtu_set(struct bnad *bnad)
+{
+ bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
+ complete(&bnad->bnad_completions.mtu_comp);
+}
+
+/* Resource allocation, free functions */
+
+static void
+bnad_mem_free(struct bnad *bnad,
+ struct bna_mem_info *mem_info)
+{
+ int i;
+ dma_addr_t dma_pa;
+
+ if (mem_info->mdl == NULL)
+ return;
+
+ for (i = 0; i < mem_info->num; i++) {
+ if (mem_info->mdl[i].kva != NULL) {
+ if (mem_info->mem_type == BNA_MEM_T_DMA) {
+ BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
+ dma_pa);
+ dma_free_coherent(&bnad->pcidev->dev,
+ mem_info->mdl[i].len,
+ mem_info->mdl[i].kva, dma_pa);
+ } else
+ kfree(mem_info->mdl[i].kva);
+ }
+ }
+ kfree(mem_info->mdl);
+ mem_info->mdl = NULL;
+}
+
+static int
+bnad_mem_alloc(struct bnad *bnad,
+ struct bna_mem_info *mem_info)
+{
+ int i;
+ dma_addr_t dma_pa;
+
+ if ((mem_info->num == 0) || (mem_info->len == 0)) {
+ mem_info->mdl = NULL;
+ return 0;
+ }
+
+ mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
+ GFP_KERNEL);
+ if (mem_info->mdl == NULL)
+ return -ENOMEM;
+
+ if (mem_info->mem_type == BNA_MEM_T_DMA) {
+ for (i = 0; i < mem_info->num; i++) {
+ mem_info->mdl[i].len = mem_info->len;
+ mem_info->mdl[i].kva =
+ dma_alloc_coherent(&bnad->pcidev->dev,
+ mem_info->len, &dma_pa,
+ GFP_KERNEL);
+
+ if (mem_info->mdl[i].kva == NULL)
+ goto err_return;
+
+ BNA_SET_DMA_ADDR(dma_pa,
+ &(mem_info->mdl[i].dma));
+ }
+ } else {
+ for (i = 0; i < mem_info->num; i++) {
+ mem_info->mdl[i].len = mem_info->len;
+ mem_info->mdl[i].kva = kzalloc(mem_info->len,
+ GFP_KERNEL);
+ if (mem_info->mdl[i].kva == NULL)
+ goto err_return;
+ }
+ }
+
+ return 0;
+
+err_return:
+ bnad_mem_free(bnad, mem_info);
+ return -ENOMEM;
+}
+
+/* Free IRQ for Mailbox */
+static void
+bnad_mbox_irq_free(struct bnad *bnad)
+{
+ int irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad_disable_mbox_irq(bnad);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ irq = BNAD_GET_MBOX_IRQ(bnad);
+ free_irq(irq, bnad);
+}
+
+/*
+ * Allocates IRQ for Mailbox, but keep it disabled
+ * This will be enabled once we get the mbox enable callback
+ * from bna
+ */
+static int
+bnad_mbox_irq_alloc(struct bnad *bnad)
+{
+ int err = 0;
+ unsigned long irq_flags, flags;
+ u32 irq;
+ irq_handler_t irq_handler;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (bnad->cfg_flags & BNAD_CF_MSIX) {
+ irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
+ irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
+ irq_flags = 0;
+ } else {
+ irq_handler = (irq_handler_t)bnad_isr;
+ irq = bnad->pcidev->irq;
+ irq_flags = IRQF_SHARED;
+ }
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
+
+ /*
+ * Set the Mbox IRQ disable flag, so that the IRQ handler
+ * called from request_irq() for SHARED IRQs do not execute
+ */
+ set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
+
+ BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
+
+ err = request_irq(irq, irq_handler, irq_flags,
+ bnad->mbox_irq_name, bnad);
+
+ return err;
+}
+
+static void
+bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
+{
+ kfree(intr_info->idl);
+ intr_info->idl = NULL;
+}
+
+/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
+static int
+bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
+ u32 txrx_id, struct bna_intr_info *intr_info)
+{
+ int i, vector_start = 0;
+ u32 cfg_flags;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ cfg_flags = bnad->cfg_flags;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ if (cfg_flags & BNAD_CF_MSIX) {
+ intr_info->intr_type = BNA_INTR_T_MSIX;
+ intr_info->idl = kcalloc(intr_info->num,
+ sizeof(struct bna_intr_descr),
+ GFP_KERNEL);
+ if (!intr_info->idl)
+ return -ENOMEM;
+
+ switch (src) {
+ case BNAD_INTR_TX:
+ vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
+ break;
+
+ case BNAD_INTR_RX:
+ vector_start = BNAD_MAILBOX_MSIX_VECTORS +
+ (bnad->num_tx * bnad->num_txq_per_tx) +
+ txrx_id;
+ break;
+
+ default:
+ BUG();
+ }
+
+ for (i = 0; i < intr_info->num; i++)
+ intr_info->idl[i].vector = vector_start + i;
+ } else {
+ intr_info->intr_type = BNA_INTR_T_INTX;
+ intr_info->num = 1;
+ intr_info->idl = kcalloc(intr_info->num,
+ sizeof(struct bna_intr_descr),
+ GFP_KERNEL);
+ if (!intr_info->idl)
+ return -ENOMEM;
+
+ switch (src) {
+ case BNAD_INTR_TX:
+ intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
+ break;
+
+ case BNAD_INTR_RX:
+ intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * NOTE: Should be called for MSIX only
+ * Unregisters Tx MSIX vector(s) from the kernel
+ */
+static void
+bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
+ int num_txqs)
+{
+ int i;
+ int vector_num;
+
+ for (i = 0; i < num_txqs; i++) {
+ if (tx_info->tcb[i] == NULL)
+ continue;
+
+ vector_num = tx_info->tcb[i]->intr_vector;
+ free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
+ }
+}
+
+/**
+ * NOTE: Should be called for MSIX only
+ * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
+ */
+static int
+bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
+ u32 tx_id, int num_txqs)
+{
+ int i;
+ int err;
+ int vector_num;
+
+ for (i = 0; i < num_txqs; i++) {
+ vector_num = tx_info->tcb[i]->intr_vector;
+ sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
+ tx_id + tx_info->tcb[i]->id);
+ err = request_irq(bnad->msix_table[vector_num].vector,
+ (irq_handler_t)bnad_msix_tx, 0,
+ tx_info->tcb[i]->name,
+ tx_info->tcb[i]);
+ if (err)
+ goto err_return;
+ }
+
+ return 0;
+
+err_return:
+ if (i > 0)
+ bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
+ return -1;
+}
+
+/**
+ * NOTE: Should be called for MSIX only
+ * Unregisters Rx MSIX vector(s) from the kernel
+ */
+static void
+bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
+ int num_rxps)
+{
+ int i;
+ int vector_num;
+
+ for (i = 0; i < num_rxps; i++) {
+ if (rx_info->rx_ctrl[i].ccb == NULL)
+ continue;
+
+ vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
+ free_irq(bnad->msix_table[vector_num].vector,
+ rx_info->rx_ctrl[i].ccb);
+ }
+}
+
+/**
+ * NOTE: Should be called for MSIX only
+ * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
+ */
+static int
+bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
+ u32 rx_id, int num_rxps)
+{
+ int i;
+ int err;
+ int vector_num;
+
+ for (i = 0; i < num_rxps; i++) {
+ vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
+ sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
+ bnad->netdev->name,
+ rx_id + rx_info->rx_ctrl[i].ccb->id);
+ err = request_irq(bnad->msix_table[vector_num].vector,
+ (irq_handler_t)bnad_msix_rx, 0,
+ rx_info->rx_ctrl[i].ccb->name,
+ rx_info->rx_ctrl[i].ccb);
+ if (err)
+ goto err_return;
+ }
+
+ return 0;
+
+err_return:
+ if (i > 0)
+ bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
+ return -1;
+}
+
+/* Free Tx object Resources */
+static void
+bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
+{
+ int i;
+
+ for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
+ else if (res_info[i].res_type == BNA_RES_T_INTR)
+ bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
+ }
+}
+
+/* Allocates memory and interrupt resources for Tx object */
+static int
+bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
+ u32 tx_id)
+{
+ int i, err = 0;
+
+ for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ err = bnad_mem_alloc(bnad,
+ &res_info[i].res_u.mem_info);
+ else if (res_info[i].res_type == BNA_RES_T_INTR)
+ err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
+ &res_info[i].res_u.intr_info);
+ if (err)
+ goto err_return;
+ }
+ return 0;
+
+err_return:
+ bnad_tx_res_free(bnad, res_info);
+ return err;
+}
+
+/* Free Rx object Resources */
+static void
+bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
+{
+ int i;
+
+ for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
+ else if (res_info[i].res_type == BNA_RES_T_INTR)
+ bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
+ }
+}
+
+/* Allocates memory and interrupt resources for Rx object */
+static int
+bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
+ uint rx_id)
+{
+ int i, err = 0;
+
+ /* All memory needs to be allocated before setup_ccbs */
+ for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ err = bnad_mem_alloc(bnad,
+ &res_info[i].res_u.mem_info);
+ else if (res_info[i].res_type == BNA_RES_T_INTR)
+ err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
+ &res_info[i].res_u.intr_info);
+ if (err)
+ goto err_return;
+ }
+ return 0;
+
+err_return:
+ bnad_rx_res_free(bnad, res_info);
+ return err;
+}
+
+/* Timer callbacks */
+/* a) IOC timer */
+static void
+bnad_ioc_timeout(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+static void
+bnad_ioc_hb_check(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+static void
+bnad_iocpf_timeout(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+static void
+bnad_iocpf_sem_timeout(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+/*
+ * All timer routines use bnad->bna_lock to protect against
+ * the following race, which may occur in case of no locking:
+ * Time CPU m CPU n
+ * 0 1 = test_bit
+ * 1 clear_bit
+ * 2 del_timer_sync
+ * 3 mod_timer
+ */
+
+/* b) Dynamic Interrupt Moderation Timer */
+static void
+bnad_dim_timeout(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ struct bnad_rx_info *rx_info;
+ struct bnad_rx_ctrl *rx_ctrl;
+ int i, j;
+ unsigned long flags;
+
+ if (!netif_carrier_ok(bnad->netdev))
+ return;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ for (i = 0; i < bnad->num_rx; i++) {
+ rx_info = &bnad->rx_info[i];
+ if (!rx_info->rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ rx_ctrl = &rx_info->rx_ctrl[j];
+ if (!rx_ctrl->ccb)
+ continue;
+ bna_rx_dim_update(rx_ctrl->ccb);
+ }
+ }
+
+ /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
+ if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
+ mod_timer(&bnad->dim_timer,
+ jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+/* c) Statistics Timer */
+static void
+bnad_stats_timeout(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ unsigned long flags;
+
+ if (!netif_running(bnad->netdev) ||
+ !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
+ return;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_hw_stats_get(&bnad->bna);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+/*
+ * Set up timer for DIM
+ * Called with bnad->bna_lock held
+ */
+void
+bnad_dim_timer_start(struct bnad *bnad)
+{
+ if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
+ !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
+ setup_timer(&bnad->dim_timer, bnad_dim_timeout,
+ (unsigned long)bnad);
+ set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
+ mod_timer(&bnad->dim_timer,
+ jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
+ }
+}
+
+/*
+ * Set up timer for statistics
+ * Called with mutex_lock(&bnad->conf_mutex) held
+ */
+static void
+bnad_stats_timer_start(struct bnad *bnad)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
+ setup_timer(&bnad->stats_timer, bnad_stats_timeout,
+ (unsigned long)bnad);
+ mod_timer(&bnad->stats_timer,
+ jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+/*
+ * Stops the stats timer
+ * Called with mutex_lock(&bnad->conf_mutex) held
+ */
+static void
+bnad_stats_timer_stop(struct bnad *bnad)
+{
+ int to_del = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
+ to_del = 1;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (to_del)
+ del_timer_sync(&bnad->stats_timer);
+}
+
+/* Utilities */
+
+static void
+bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
+{
+ int i = 1; /* Index 0 has broadcast address */
+ struct netdev_hw_addr *mc_addr;
+
+ netdev_for_each_mc_addr(mc_addr, netdev) {
+ memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
+ ETH_ALEN);
+ i++;
+ }
+}
+
+static int
+bnad_napi_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct bnad_rx_ctrl *rx_ctrl =
+ container_of(napi, struct bnad_rx_ctrl, napi);
+ struct bnad *bnad = rx_ctrl->bnad;
+ int rcvd = 0;
+
+ rx_ctrl->rx_poll_ctr++;
+
+ if (!netif_carrier_ok(bnad->netdev))
+ goto poll_exit;
+
+ rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget);
+ if (rcvd >= budget)
+ return rcvd;
+
+poll_exit:
+ napi_complete(napi);
+
+ rx_ctrl->rx_complete++;
+
+ if (rx_ctrl->ccb)
+ bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
+
+ return rcvd;
+}
+
+#define BNAD_NAPI_POLL_QUOTA 64
+static void
+bnad_napi_init(struct bnad *bnad, u32 rx_id)
+{
+ struct bnad_rx_ctrl *rx_ctrl;
+ int i;
+
+ /* Initialize & enable NAPI */
+ for (i = 0; i < bnad->num_rxp_per_rx; i++) {
+ rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
+ netif_napi_add(bnad->netdev, &rx_ctrl->napi,
+ bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
+ }
+}
+
+static void
+bnad_napi_enable(struct bnad *bnad, u32 rx_id)
+{
+ struct bnad_rx_ctrl *rx_ctrl;
+ int i;
+
+ /* Initialize & enable NAPI */
+ for (i = 0; i < bnad->num_rxp_per_rx; i++) {
+ rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
+
+ napi_enable(&rx_ctrl->napi);
+ }
+}
+
+static void
+bnad_napi_disable(struct bnad *bnad, u32 rx_id)
+{
+ int i;
+
+ /* First disable and then clean up */
+ for (i = 0; i < bnad->num_rxp_per_rx; i++) {
+ napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
+ netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
+ }
+}
+
+/* Should be held with conf_lock held */
+void
+bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
+{
+ struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
+ struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
+ unsigned long flags;
+
+ if (!tx_info->tx)
+ return;
+
+ init_completion(&bnad->bnad_completions.tx_comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&bnad->bnad_completions.tx_comp);
+
+ if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
+ bnad_tx_msix_unregister(bnad, tx_info,
+ bnad->num_txq_per_tx);
+
+ if (0 == tx_id)
+ tasklet_kill(&bnad->tx_free_tasklet);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_tx_destroy(tx_info->tx);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ tx_info->tx = NULL;
+ tx_info->tx_id = 0;
+
+ bnad_tx_res_free(bnad, res_info);
+}
+
+/* Should be held with conf_lock held */
+int
+bnad_setup_tx(struct bnad *bnad, u32 tx_id)
+{
+ int err;
+ struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
+ struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
+ struct bna_intr_info *intr_info =
+ &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
+ struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
+ static const struct bna_tx_event_cbfn tx_cbfn = {
+ .tcb_setup_cbfn = bnad_cb_tcb_setup,
+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
+ .tx_stall_cbfn = bnad_cb_tx_stall,
+ .tx_resume_cbfn = bnad_cb_tx_resume,
+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
+ };
+
+ struct bna_tx *tx;
+ unsigned long flags;
+
+ tx_info->tx_id = tx_id;
+
+ /* Initialize the Tx object configuration */
+ tx_config->num_txq = bnad->num_txq_per_tx;
+ tx_config->txq_depth = bnad->txq_depth;
+ tx_config->tx_type = BNA_TX_T_REGULAR;
+ tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
+
+ /* Get BNA's resource requirement for one tx object */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_tx_res_req(bnad->num_txq_per_tx,
+ bnad->txq_depth, res_info);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Fill Unmap Q memory requirements */
+ BNAD_FILL_UNMAPQ_MEM_REQ(
+ &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
+ bnad->num_txq_per_tx,
+ BNAD_TX_UNMAPQ_DEPTH);
+
+ /* Allocate resources */
+ err = bnad_tx_res_alloc(bnad, res_info, tx_id);
+ if (err)
+ return err;
+
+ /* Ask BNA to create one Tx object, supplying required resources */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
+ tx_info);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (!tx)
+ goto err_return;
+ tx_info->tx = tx;
+
+ /* Register ISR for the Tx object */
+ if (intr_info->intr_type == BNA_INTR_T_MSIX) {
+ err = bnad_tx_msix_register(bnad, tx_info,
+ tx_id, bnad->num_txq_per_tx);
+ if (err)
+ goto err_return;
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_tx_enable(tx);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return 0;
+
+err_return:
+ bnad_tx_res_free(bnad, res_info);
+ return err;
+}
+
+/* Setup the rx config for bna_rx_create */
+/* bnad decides the configuration */
+static void
+bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
+{
+ rx_config->rx_type = BNA_RX_T_REGULAR;
+ rx_config->num_paths = bnad->num_rxp_per_rx;
+ rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
+
+ if (bnad->num_rxp_per_rx > 1) {
+ rx_config->rss_status = BNA_STATUS_T_ENABLED;
+ rx_config->rss_config.hash_type =
+ (BFI_ENET_RSS_IPV6 |
+ BFI_ENET_RSS_IPV6_TCP |
+ BFI_ENET_RSS_IPV4 |
+ BFI_ENET_RSS_IPV4_TCP);
+ rx_config->rss_config.hash_mask =
+ bnad->num_rxp_per_rx - 1;
+ get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
+ sizeof(rx_config->rss_config.toeplitz_hash_key));
+ } else {
+ rx_config->rss_status = BNA_STATUS_T_DISABLED;
+ memset(&rx_config->rss_config, 0,
+ sizeof(rx_config->rss_config));
+ }
+ rx_config->rxp_type = BNA_RXP_SLR;
+ rx_config->q_depth = bnad->rxq_depth;
+
+ rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
+
+ rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
+}
+
+static void
+bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
+{
+ struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
+ int i;
+
+ for (i = 0; i < bnad->num_rxp_per_rx; i++)
+ rx_info->rx_ctrl[i].bnad = bnad;
+}
+
+/* Called with mutex_lock(&bnad->conf_mutex) held */
+void
+bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
+{
+ struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
+ struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
+ struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
+ unsigned long flags;
+ int to_del = 0;
+
+ if (!rx_info->rx)
+ return;
+
+ if (0 == rx_id) {
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
+ test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
+ clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
+ to_del = 1;
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (to_del)
+ del_timer_sync(&bnad->dim_timer);
+ }
+
+ init_completion(&bnad->bnad_completions.rx_comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&bnad->bnad_completions.rx_comp);
+
+ if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
+ bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
+
+ bnad_napi_disable(bnad, rx_id);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_rx_destroy(rx_info->rx);
+
+ rx_info->rx = NULL;
+ rx_info->rx_id = 0;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ bnad_rx_res_free(bnad, res_info);
+}
+
+/* Called with mutex_lock(&bnad->conf_mutex) held */
+int
+bnad_setup_rx(struct bnad *bnad, u32 rx_id)
+{
+ int err;
+ struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
+ struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
+ struct bna_intr_info *intr_info =
+ &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
+ struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
+ static const struct bna_rx_event_cbfn rx_cbfn = {
+ .rcb_setup_cbfn = bnad_cb_rcb_setup,
+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
+ .ccb_setup_cbfn = bnad_cb_ccb_setup,
+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
+ .rx_stall_cbfn = bnad_cb_rx_stall,
+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
+ .rx_post_cbfn = bnad_cb_rx_post,
+ };
+ struct bna_rx *rx;
+ unsigned long flags;
+
+ rx_info->rx_id = rx_id;
+
+ /* Initialize the Rx object configuration */
+ bnad_init_rx_config(bnad, rx_config);
+
+ /* Get BNA's resource requirement for one Rx object */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_rx_res_req(rx_config, res_info);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Fill Unmap Q memory requirements */
+ BNAD_FILL_UNMAPQ_MEM_REQ(
+ &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
+ rx_config->num_paths +
+ ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
+ rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
+
+ /* Allocate resource */
+ err = bnad_rx_res_alloc(bnad, res_info, rx_id);
+ if (err)
+ return err;
+
+ bnad_rx_ctrl_init(bnad, rx_id);
+
+ /* Ask BNA to create one Rx object, supplying required resources */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
+ rx_info);
+ if (!rx) {
+ err = -ENOMEM;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ goto err_return;
+ }
+ rx_info->rx = rx;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /*
+ * Init NAPI, so that state is set to NAPI_STATE_SCHED,
+ * so that IRQ handler cannot schedule NAPI at this point.
+ */
+ bnad_napi_init(bnad, rx_id);
+
+ /* Register ISR for the Rx object */
+ if (intr_info->intr_type == BNA_INTR_T_MSIX) {
+ err = bnad_rx_msix_register(bnad, rx_info, rx_id,
+ rx_config->num_paths);
+ if (err)
+ goto err_return;
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (0 == rx_id) {
+ /* Set up Dynamic Interrupt Moderation Vector */
+ if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
+ bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
+
+ /* Enable VLAN filtering only on the default Rx */
+ bna_rx_vlanfilter_enable(rx);
+
+ /* Start the DIM timer */
+ bnad_dim_timer_start(bnad);
+ }
+
+ bna_rx_enable(rx);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Enable scheduling of NAPI */
+ bnad_napi_enable(bnad, rx_id);
+
+ return 0;
+
+err_return:
+ bnad_cleanup_rx(bnad, rx_id);
+ return err;
+}
+
+/* Called with conf_lock & bnad->bna_lock held */
+void
+bnad_tx_coalescing_timeo_set(struct bnad *bnad)
+{
+ struct bnad_tx_info *tx_info;
+
+ tx_info = &bnad->tx_info[0];
+ if (!tx_info->tx)
+ return;
+
+ bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
+}
+
+/* Called with conf_lock & bnad->bna_lock held */
+void
+bnad_rx_coalescing_timeo_set(struct bnad *bnad)
+{
+ struct bnad_rx_info *rx_info;
+ int i;
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ rx_info = &bnad->rx_info[i];
+ if (!rx_info->rx)
+ continue;
+ bna_rx_coalescing_timeo_set(rx_info->rx,
+ bnad->rx_coalescing_timeo);
+ }
+}
+
+/*
+ * Called with bnad->bna_lock held
+ */
+int
+bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
+{
+ int ret;
+
+ if (!is_valid_ether_addr(mac_addr))
+ return -EADDRNOTAVAIL;
+
+ /* If datapath is down, pretend everything went through */
+ if (!bnad->rx_info[0].rx)
+ return 0;
+
+ ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
+ if (ret != BNA_CB_SUCCESS)
+ return -EADDRNOTAVAIL;
+
+ return 0;
+}
+
+/* Should be called with conf_lock held */
+int
+bnad_enable_default_bcast(struct bnad *bnad)
+{
+ struct bnad_rx_info *rx_info = &bnad->rx_info[0];
+ int ret;
+ unsigned long flags;
+
+ init_completion(&bnad->bnad_completions.mcast_comp);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
+ bnad_cb_rx_mcast_add);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ if (ret == BNA_CB_SUCCESS)
+ wait_for_completion(&bnad->bnad_completions.mcast_comp);
+ else
+ return -ENODEV;
+
+ if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
+ return -ENODEV;
+
+ return 0;
+}
+
+/* Called with mutex_lock(&bnad->conf_mutex) held */
+void
+bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
+{
+ u16 vid;
+ unsigned long flags;
+
+ for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ }
+}
+
+/* Statistics utilities */
+void
+bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
+{
+ int i, j;
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ if (bnad->rx_info[i].rx_ctrl[j].ccb) {
+ stats->rx_packets += bnad->rx_info[i].
+ rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
+ stats->rx_bytes += bnad->rx_info[i].
+ rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
+ if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[1]->rxq) {
+ stats->rx_packets +=
+ bnad->rx_info[i].rx_ctrl[j].
+ ccb->rcb[1]->rxq->rx_packets;
+ stats->rx_bytes +=
+ bnad->rx_info[i].rx_ctrl[j].
+ ccb->rcb[1]->rxq->rx_bytes;
+ }
+ }
+ }
+ }
+ for (i = 0; i < bnad->num_tx; i++) {
+ for (j = 0; j < bnad->num_txq_per_tx; j++) {
+ if (bnad->tx_info[i].tcb[j]) {
+ stats->tx_packets +=
+ bnad->tx_info[i].tcb[j]->txq->tx_packets;
+ stats->tx_bytes +=
+ bnad->tx_info[i].tcb[j]->txq->tx_bytes;
+ }
+ }
+ }
+}
+
+/*
+ * Must be called with the bna_lock held.
+ */
+void
+bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
+{
+ struct bfi_enet_stats_mac *mac_stats;
+ u32 bmap;
+ int i;
+
+ mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
+ stats->rx_errors =
+ mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
+ mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
+ mac_stats->rx_undersize;
+ stats->tx_errors = mac_stats->tx_fcs_error +
+ mac_stats->tx_undersize;
+ stats->rx_dropped = mac_stats->rx_drop;
+ stats->tx_dropped = mac_stats->tx_drop;
+ stats->multicast = mac_stats->rx_multicast;
+ stats->collisions = mac_stats->tx_total_collision;
+
+ stats->rx_length_errors = mac_stats->rx_frame_length_error;
+
+ /* receive ring buffer overflow ?? */
+
+ stats->rx_crc_errors = mac_stats->rx_fcs_error;
+ stats->rx_frame_errors = mac_stats->rx_alignment_error;
+ /* recv'r fifo overrun */
+ bmap = bna_rx_rid_mask(&bnad->bna);
+ for (i = 0; bmap; i++) {
+ if (bmap & 1) {
+ stats->rx_fifo_errors +=
+ bnad->stats.bna_stats->
+ hw_stats.rxf_stats[i].frame_drops;
+ break;
+ }
+ bmap >>= 1;
+ }
+}
+
+static void
+bnad_mbox_irq_sync(struct bnad *bnad)
+{
+ u32 irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (bnad->cfg_flags & BNAD_CF_MSIX)
+ irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
+ else
+ irq = bnad->pcidev->irq;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ synchronize_irq(irq);
+}
+
+/* Utility used by bnad_start_xmit, for doing TSO */
+static int
+bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
+{
+ int err;
+
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err) {
+ BNAD_UPDATE_CTR(bnad, tso_err);
+ return err;
+ }
+ }
+
+ /*
+ * For TSO, the TCP checksum field is seeded with pseudo-header sum
+ * excluding the length field.
+ */
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+
+ /* Do we really need these? */
+ iph->tot_len = 0;
+ iph->check = 0;
+
+ tcp_hdr(skb)->check =
+ ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
+ IPPROTO_TCP, 0);
+ BNAD_UPDATE_CTR(bnad, tso4);
+ } else {
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+ ipv6h->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
+ IPPROTO_TCP, 0);
+ BNAD_UPDATE_CTR(bnad, tso6);
+ }
+
+ return 0;
+}
+
+/*
+ * Initialize Q numbers depending on Rx Paths
+ * Called with bnad->bna_lock held, because of cfg_flags
+ * access.
+ */
+static void
+bnad_q_num_init(struct bnad *bnad)
+{
+ int rxps;
+
+ rxps = min((uint)num_online_cpus(),
+ (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
+
+ if (!(bnad->cfg_flags & BNAD_CF_MSIX))
+ rxps = 1; /* INTx */
+
+ bnad->num_rx = 1;
+ bnad->num_tx = 1;
+ bnad->num_rxp_per_rx = rxps;
+ bnad->num_txq_per_tx = BNAD_TXQ_NUM;
+}
+
+/*
+ * Adjusts the Q numbers, given a number of msix vectors
+ * Give preference to RSS as opposed to Tx priority Queues,
+ * in such a case, just use 1 Tx Q
+ * Called with bnad->bna_lock held b'cos of cfg_flags access
+ */
+static void
+bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
+{
+ bnad->num_txq_per_tx = 1;
+ if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
+ bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
+ (bnad->cfg_flags & BNAD_CF_MSIX)) {
+ bnad->num_rxp_per_rx = msix_vectors -
+ (bnad->num_tx * bnad->num_txq_per_tx) -
+ BNAD_MAILBOX_MSIX_VECTORS;
+ } else
+ bnad->num_rxp_per_rx = 1;
+}
+
+/* Enable / disable ioceth */
+static int
+bnad_ioceth_disable(struct bnad *bnad)
+{
+ unsigned long flags;
+ int err = 0;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ init_completion(&bnad->bnad_completions.ioc_comp);
+ bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
+ msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
+
+ err = bnad->bnad_completions.ioc_comp_status;
+ return err;
+}
+
+static int
+bnad_ioceth_enable(struct bnad *bnad)
+{
+ int err = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ init_completion(&bnad->bnad_completions.ioc_comp);
+ bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
+ bna_ioceth_enable(&bnad->bna.ioceth);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
+ msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
+
+ err = bnad->bnad_completions.ioc_comp_status;
+
+ return err;
+}
+
+/* Free BNA resources */
+static void
+bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
+ u32 res_val_max)
+{
+ int i;
+
+ for (i = 0; i < res_val_max; i++)
+ bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
+}
+
+/* Allocates memory and interrupt resources for BNA */
+static int
+bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
+ u32 res_val_max)
+{
+ int i, err;
+
+ for (i = 0; i < res_val_max; i++) {
+ err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
+ if (err)
+ goto err_return;
+ }
+ return 0;
+
+err_return:
+ bnad_res_free(bnad, res_info, res_val_max);
+ return err;
+}
+
+/* Interrupt enable / disable */
+static void
+bnad_enable_msix(struct bnad *bnad)
+{
+ int i, ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ if (bnad->msix_table)
+ return;
+
+ bnad->msix_table =
+ kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
+
+ if (!bnad->msix_table)
+ goto intx_mode;
+
+ for (i = 0; i < bnad->msix_num; i++)
+ bnad->msix_table[i].entry = i;
+
+ ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
+ if (ret > 0) {
+ /* Not enough MSI-X vectors. */
+ pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
+ ret, bnad->msix_num);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ /* ret = #of vectors that we got */
+ bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
+ (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
+ BNAD_MAILBOX_MSIX_VECTORS;
+
+ if (bnad->msix_num > ret)
+ goto intx_mode;
+
+ /* Try once more with adjusted numbers */
+ /* If this fails, fall back to INTx */
+ ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
+ bnad->msix_num);
+ if (ret)
+ goto intx_mode;
+
+ } else if (ret < 0)
+ goto intx_mode;
+
+ pci_intx(bnad->pcidev, 0);
+
+ return;
+
+intx_mode:
+ pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
+
+ kfree(bnad->msix_table);
+ bnad->msix_table = NULL;
+ bnad->msix_num = 0;
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad->cfg_flags &= ~BNAD_CF_MSIX;
+ bnad_q_num_init(bnad);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+static void
+bnad_disable_msix(struct bnad *bnad)
+{
+ u32 cfg_flags;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ cfg_flags = bnad->cfg_flags;
+ if (bnad->cfg_flags & BNAD_CF_MSIX)
+ bnad->cfg_flags &= ~BNAD_CF_MSIX;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ if (cfg_flags & BNAD_CF_MSIX) {
+ pci_disable_msix(bnad->pcidev);
+ kfree(bnad->msix_table);
+ bnad->msix_table = NULL;
+ }
+}
+
+/* Netdev entry points */
+static int
+bnad_open(struct net_device *netdev)
+{
+ int err;
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bna_pause_config pause_config;
+ int mtu;
+ unsigned long flags;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ /* Tx */
+ err = bnad_setup_tx(bnad, 0);
+ if (err)
+ goto err_return;
+
+ /* Rx */
+ err = bnad_setup_rx(bnad, 0);
+ if (err)
+ goto cleanup_tx;
+
+ /* Port */
+ pause_config.tx_pause = 0;
+ pause_config.rx_pause = 0;
+
+ mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
+ bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
+ bna_enet_enable(&bnad->bna.enet);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Enable broadcast */
+ bnad_enable_default_bcast(bnad);
+
+ /* Restore VLANs, if any */
+ bnad_restore_vlans(bnad, 0);
+
+ /* Set the UCAST address */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Start the stats timer */
+ bnad_stats_timer_start(bnad);
+
+ mutex_unlock(&bnad->conf_mutex);
+
+ return 0;
+
+cleanup_tx:
+ bnad_cleanup_tx(bnad, 0);
+
+err_return:
+ mutex_unlock(&bnad->conf_mutex);
+ return err;
+}
+
+static int
+bnad_stop(struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ /* Stop the stats timer */
+ bnad_stats_timer_stop(bnad);
+
+ init_completion(&bnad->bnad_completions.enet_comp);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
+ bnad_cb_enet_disabled);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ wait_for_completion(&bnad->bnad_completions.enet_comp);
+
+ bnad_cleanup_tx(bnad, 0);
+ bnad_cleanup_rx(bnad, 0);
+
+ /* Synchronize mailbox IRQ */
+ bnad_mbox_irq_sync(bnad);
+
+ mutex_unlock(&bnad->conf_mutex);
+
+ return 0;
+}
+
+/* TX */
+/*
+ * bnad_start_xmit : Netdev entry point for Transmit
+ * Called under lock held by net_device
+ */
+static netdev_tx_t
+bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ u32 txq_id = 0;
+ struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id];
+
+ u16 txq_prod, vlan_tag = 0;
+ u32 unmap_prod, wis, wis_used, wi_range;
+ u32 vectors, vect_id, i, acked;
+ int err;
+ unsigned int len;
+ u32 gso_size;
+
+ struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+ dma_addr_t dma_addr;
+ struct bna_txq_entry *txqent;
+ u16 flags;
+
+ if (unlikely(skb->len <= ETH_HLEN)) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
+ return NETDEV_TX_OK;
+ }
+ if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long);
+ return NETDEV_TX_OK;
+ }
+ if (unlikely(skb_headlen(skb) == 0)) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
+ return NETDEV_TX_OK;
+ }
+
+ /*
+ * Takes care of the Tx that is scheduled between clearing the flag
+ * and the netif_tx_stop_all_queues() call.
+ */
+ if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
+ return NETDEV_TX_OK;
+ }
+
+ vectors = 1 + skb_shinfo(skb)->nr_frags;
+ if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
+ return NETDEV_TX_OK;
+ }
+ wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
+ acked = 0;
+ if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
+ vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+ if ((u16) (*tcb->hw_consumer_index) !=
+ tcb->consumer_index &&
+ !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
+ acked = bnad_free_txbufs(bnad, tcb);
+ if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
+ bna_ib_ack(tcb->i_dbell, acked);
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+ } else {
+ netif_stop_queue(netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_stop);
+ }
+
+ smp_mb();
+ /*
+ * Check again to deal with race condition between
+ * netif_stop_queue here, and netif_wake_queue in
+ * interrupt handler which is not inside netif tx lock.
+ */
+ if (likely
+ (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
+ vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+ BNAD_UPDATE_CTR(bnad, netif_queue_stop);
+ return NETDEV_TX_BUSY;
+ } else {
+ netif_wake_queue(netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+ }
+ }
+
+ unmap_prod = unmap_q->producer_index;
+ flags = 0;
+
+ txq_prod = tcb->producer_index;
+ BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
+ txqent->hdr.wi.reserved = 0;
+ txqent->hdr.wi.num_vectors = vectors;
+
+ if (vlan_tx_tag_present(skb)) {
+ vlan_tag = (u16) vlan_tx_tag_get(skb);
+ flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+ }
+ if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
+ vlan_tag =
+ (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
+ flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+ }
+
+ txqent->hdr.wi.vlan_tag = htons(vlan_tag);
+
+ if (skb_is_gso(skb)) {
+ gso_size = skb_shinfo(skb)->gso_size;
+
+ if (unlikely(gso_size > netdev->mtu)) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
+ return NETDEV_TX_OK;
+ }
+ if (unlikely((gso_size + skb_transport_offset(skb) +
+ tcp_hdrlen(skb)) >= skb->len)) {
+ txqent->hdr.wi.opcode =
+ __constant_htons(BNA_TXQ_WI_SEND);
+ txqent->hdr.wi.lso_mss = 0;
+ BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
+ } else {
+ txqent->hdr.wi.opcode =
+ __constant_htons(BNA_TXQ_WI_SEND_LSO);
+ txqent->hdr.wi.lso_mss = htons(gso_size);
+ }
+
+ err = bnad_tso_prepare(bnad, skb);
+ if (unlikely(err)) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
+ return NETDEV_TX_OK;
+ }
+ flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
+ txqent->hdr.wi.l4_hdr_size_n_offset =
+ htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+ (tcp_hdrlen(skb) >> 2,
+ skb_transport_offset(skb)));
+ } else {
+ txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
+ txqent->hdr.wi.lso_mss = 0;
+
+ if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
+ return NETDEV_TX_OK;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 proto = 0;
+
+ if (skb->protocol == __constant_htons(ETH_P_IP))
+ proto = ip_hdr(skb)->protocol;
+ else if (skb->protocol ==
+ __constant_htons(ETH_P_IPV6)) {
+ /* nexthdr may not be TCP immediately. */
+ proto = ipv6_hdr(skb)->nexthdr;
+ }
+ if (proto == IPPROTO_TCP) {
+ flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
+ txqent->hdr.wi.l4_hdr_size_n_offset =
+ htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+ (0, skb_transport_offset(skb)));
+
+ BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
+
+ if (unlikely(skb_headlen(skb) <
+ skb_transport_offset(skb) + tcp_hdrlen(skb))) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
+ return NETDEV_TX_OK;
+ }
+
+ } else if (proto == IPPROTO_UDP) {
+ flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
+ txqent->hdr.wi.l4_hdr_size_n_offset =
+ htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+ (0, skb_transport_offset(skb)));
+
+ BNAD_UPDATE_CTR(bnad, udpcsum_offload);
+ if (unlikely(skb_headlen(skb) <
+ skb_transport_offset(skb) +
+ sizeof(struct udphdr))) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
+ return NETDEV_TX_OK;
+ }
+ } else {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
+ return NETDEV_TX_OK;
+ }
+ } else {
+ txqent->hdr.wi.l4_hdr_size_n_offset = 0;
+ }
+ }
+
+ txqent->hdr.wi.flags = htons(flags);
+
+ txqent->hdr.wi.frame_length = htonl(skb->len);
+
+ unmap_q->unmap_array[unmap_prod].skb = skb;
+ len = skb_headlen(skb);
+ txqent->vector[0].length = htons(len);
+ dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+ dma_addr);
+
+ BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
+ BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+
+ vect_id = 0;
+ wis_used = 1;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ u16 size = frag->size;
+
+ if (unlikely(size == 0)) {
+ unmap_prod = unmap_q->producer_index;
+
+ unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
+ unmap_q->unmap_array,
+ unmap_prod, unmap_q->q_depth, skb,
+ i);
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
+ return NETDEV_TX_OK;
+ }
+
+ len += size;
+
+ if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
+ vect_id = 0;
+ if (--wi_range)
+ txqent++;
+ else {
+ BNA_QE_INDX_ADD(txq_prod, wis_used,
+ tcb->q_depth);
+ wis_used = 0;
+ BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
+ txqent, wi_range);
+ }
+ wis_used++;
+ txqent->hdr.wi_ext.opcode =
+ __constant_htons(BNA_TXQ_WI_EXTENSION);
+ }
+
+ BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
+ txqent->vector[vect_id].length = htons(size);
+ dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
+ 0, size, DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+ dma_addr);
+ BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+ BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+ }
+
+ if (unlikely(len != skb->len)) {
+ unmap_prod = unmap_q->producer_index;
+
+ unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
+ unmap_q->unmap_array, unmap_prod,
+ unmap_q->q_depth, skb,
+ skb_shinfo(skb)->nr_frags);
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
+ return NETDEV_TX_OK;
+ }
+
+ unmap_q->producer_index = unmap_prod;
+ BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
+ tcb->producer_index = txq_prod;
+
+ smp_mb();
+
+ if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
+ return NETDEV_TX_OK;
+
+ bna_txq_prod_indx_doorbell(tcb);
+ smp_mb();
+
+ if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
+ tasklet_schedule(&bnad->tx_free_tasklet);
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Used spin_lock to synchronize reading of stats structures, which
+ * is written by BNA under the same lock.
+ */
+static struct rtnl_link_stats64 *
+bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ bnad_netdev_qstats_fill(bnad, stats);
+ bnad_netdev_hwstats_fill(bnad, stats);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return stats;
+}
+
+void
+bnad_set_rx_mode(struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ u32 new_mask, valid_mask;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ new_mask = valid_mask = 0;
+
+ if (netdev->flags & IFF_PROMISC) {
+ if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
+ new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
+ valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
+ bnad->cfg_flags |= BNAD_CF_PROMISC;
+ }
+ } else {
+ if (bnad->cfg_flags & BNAD_CF_PROMISC) {
+ new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
+ valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
+ bnad->cfg_flags &= ~BNAD_CF_PROMISC;
+ }
+ }
+
+ if (netdev->flags & IFF_ALLMULTI) {
+ if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
+ new_mask |= BNA_RXMODE_ALLMULTI;
+ valid_mask |= BNA_RXMODE_ALLMULTI;
+ bnad->cfg_flags |= BNAD_CF_ALLMULTI;
+ }
+ } else {
+ if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
+ new_mask &= ~BNA_RXMODE_ALLMULTI;
+ valid_mask |= BNA_RXMODE_ALLMULTI;
+ bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
+ }
+ }
+
+ if (bnad->rx_info[0].rx == NULL)
+ goto unlock;
+
+ bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
+
+ if (!netdev_mc_empty(netdev)) {
+ u8 *mcaddr_list;
+ int mc_count = netdev_mc_count(netdev);
+
+ /* Index 0 holds the broadcast address */
+ mcaddr_list =
+ kzalloc((mc_count + 1) * ETH_ALEN,
+ GFP_ATOMIC);
+ if (!mcaddr_list)
+ goto unlock;
+
+ memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
+
+ /* Copy rest of the MC addresses */
+ bnad_netdev_mc_list_get(netdev, mcaddr_list);
+
+ bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
+ mcaddr_list, NULL);
+
+ /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
+ kfree(mcaddr_list);
+ }
+unlock:
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+/*
+ * bna_lock is used to sync writes to netdev->addr
+ * conf_lock cannot be used since this call may be made
+ * in a non-blocking context.
+ */
+static int
+bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
+{
+ int err;
+ struct bnad *bnad = netdev_priv(netdev);
+ struct sockaddr *sa = (struct sockaddr *)mac_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
+
+ if (!err)
+ memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return err;
+}
+
+static int
+bnad_mtu_set(struct bnad *bnad, int mtu)
+{
+ unsigned long flags;
+
+ init_completion(&bnad->bnad_completions.mtu_comp);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ wait_for_completion(&bnad->bnad_completions.mtu_comp);
+
+ return bnad->bnad_completions.mtu_comp_status;
+}
+
+static int
+bnad_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ int err, mtu = netdev->mtu;
+ struct bnad *bnad = netdev_priv(netdev);
+
+ if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
+ return -EINVAL;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ netdev->mtu = new_mtu;
+
+ mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
+ err = bnad_mtu_set(bnad, mtu);
+ if (err)
+ err = -EBUSY;
+
+ mutex_unlock(&bnad->conf_mutex);
+ return err;
+}
+
+static void
+bnad_vlan_rx_add_vid(struct net_device *netdev,
+ unsigned short vid)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ if (!bnad->rx_info[0].rx)
+ return;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
+ set_bit(vid, bnad->active_vlans);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+static void
+bnad_vlan_rx_kill_vid(struct net_device *netdev,
+ unsigned short vid)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ if (!bnad->rx_info[0].rx)
+ return;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ clear_bit(vid, bnad->active_vlans);
+ bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void
+bnad_netpoll(struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bnad_rx_info *rx_info;
+ struct bnad_rx_ctrl *rx_ctrl;
+ u32 curr_mask;
+ int i, j;
+
+ if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
+ bna_intx_disable(&bnad->bna, curr_mask);
+ bnad_isr(bnad->pcidev->irq, netdev);
+ bna_intx_enable(&bnad->bna, curr_mask);
+ } else {
+ /*
+ * Tx processing may happen in sending context, so no need
+ * to explicitly process completions here
+ */
+
+ /* Rx processing */
+ for (i = 0; i < bnad->num_rx; i++) {
+ rx_info = &bnad->rx_info[i];
+ if (!rx_info->rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ rx_ctrl = &rx_info->rx_ctrl[j];
+ if (rx_ctrl->ccb)
+ bnad_netif_rx_schedule_poll(bnad,
+ rx_ctrl->ccb);
+ }
+ }
+ }
+}
+#endif
+
+static const struct net_device_ops bnad_netdev_ops = {
+ .ndo_open = bnad_open,
+ .ndo_stop = bnad_stop,
+ .ndo_start_xmit = bnad_start_xmit,
+ .ndo_get_stats64 = bnad_get_stats64,
+ .ndo_set_rx_mode = bnad_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = bnad_set_mac_address,
+ .ndo_change_mtu = bnad_change_mtu,
+ .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = bnad_netpoll
+#endif
+};
+
+static void
+bnad_netdev_init(struct bnad *bnad, bool using_dac)
+{
+ struct net_device *netdev = bnad->netdev;
+
+ netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
+
+ netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
+
+ netdev->features |= netdev->hw_features |
+ NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+
+ if (using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->mem_start = bnad->mmio_start;
+ netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
+
+ netdev->netdev_ops = &bnad_netdev_ops;
+ bnad_set_ethtool_ops(netdev);
+}
+
+/*
+ * 1. Initialize the bnad structure
+ * 2. Setup netdev pointer in pci_dev
+ * 3. Initialze Tx free tasklet
+ * 4. Initialize no. of TxQ & CQs & MSIX vectors
+ */
+static int
+bnad_init(struct bnad *bnad,
+ struct pci_dev *pdev, struct net_device *netdev)
+{
+ unsigned long flags;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ pci_set_drvdata(pdev, netdev);
+
+ bnad->netdev = netdev;
+ bnad->pcidev = pdev;
+ bnad->mmio_start = pci_resource_start(pdev, 0);
+ bnad->mmio_len = pci_resource_len(pdev, 0);
+ bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
+ if (!bnad->bar0) {
+ dev_err(&pdev->dev, "ioremap for bar0 failed\n");
+ pci_set_drvdata(pdev, NULL);
+ return -ENOMEM;
+ }
+ pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
+ (unsigned long long) bnad->mmio_len);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (!bnad_msix_disable)
+ bnad->cfg_flags = BNAD_CF_MSIX;
+
+ bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
+
+ bnad_q_num_init(bnad);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
+ (bnad->num_rx * bnad->num_rxp_per_rx) +
+ BNAD_MAILBOX_MSIX_VECTORS;
+
+ bnad->txq_depth = BNAD_TXQ_DEPTH;
+ bnad->rxq_depth = BNAD_RXQ_DEPTH;
+
+ bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
+ bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
+
+ tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
+ (unsigned long)bnad);
+
+ return 0;
+}
+
+/*
+ * Must be called after bnad_pci_uninit()
+ * so that iounmap() and pci_set_drvdata(NULL)
+ * happens only after PCI uninitialization.
+ */
+static void
+bnad_uninit(struct bnad *bnad)
+{
+ if (bnad->bar0)
+ iounmap(bnad->bar0);
+ pci_set_drvdata(bnad->pcidev, NULL);
+}
+
+/*
+ * Initialize locks
+ a) Per ioceth mutes used for serializing configuration
+ changes from OS interface
+ b) spin lock used to protect bna state machine
+ */
+static void
+bnad_lock_init(struct bnad *bnad)
+{
+ spin_lock_init(&bnad->bna_lock);
+ mutex_init(&bnad->conf_mutex);
+}
+
+static void
+bnad_lock_uninit(struct bnad *bnad)
+{
+ mutex_destroy(&bnad->conf_mutex);
+}
+
+/* PCI Initialization */
+static int
+bnad_pci_init(struct bnad *bnad,
+ struct pci_dev *pdev, bool *using_dac)
+{
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+ err = pci_request_regions(pdev, BNAD_NAME);
+ if (err)
+ goto disable_device;
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ *using_dac = 1;
+ } else {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (err)
+ goto release_regions;
+ }
+ *using_dac = 0;
+ }
+ pci_set_master(pdev);
+ return 0;
+
+release_regions:
+ pci_release_regions(pdev);
+disable_device:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void
+bnad_pci_uninit(struct pci_dev *pdev)
+{
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static int __devinit
+bnad_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pcidev_id)
+{
+ bool using_dac;
+ int err;
+ struct bnad *bnad;
+ struct bna *bna;
+ struct net_device *netdev;
+ struct bfa_pcidev pcidev_info;
+ unsigned long flags;
+
+ pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
+ pdev, pcidev_id, PCI_FUNC(pdev->devfn));
+
+ mutex_lock(&bnad_fwimg_mutex);
+ if (!cna_get_firmware_buf(pdev)) {
+ mutex_unlock(&bnad_fwimg_mutex);
+ pr_warn("Failed to load Firmware Image!\n");
+ return -ENODEV;
+ }
+ mutex_unlock(&bnad_fwimg_mutex);
+
+ /*
+ * Allocates sizeof(struct net_device + struct bnad)
+ * bnad = netdev->priv
+ */
+ netdev = alloc_etherdev(sizeof(struct bnad));
+ if (!netdev) {
+ dev_err(&pdev->dev, "netdev allocation failed\n");
+ err = -ENOMEM;
+ return err;
+ }
+ bnad = netdev_priv(netdev);
+
+ bnad_lock_init(bnad);
+
+ mutex_lock(&bnad->conf_mutex);
+ /*
+ * PCI initialization
+ * Output : using_dac = 1 for 64 bit DMA
+ * = 0 for 32 bit DMA
+ */
+ err = bnad_pci_init(bnad, pdev, &using_dac);
+ if (err)
+ goto unlock_mutex;
+
+ /*
+ * Initialize bnad structure
+ * Setup relation between pci_dev & netdev
+ * Init Tx free tasklet
+ */
+ err = bnad_init(bnad, pdev, netdev);
+ if (err)
+ goto pci_uninit;
+
+ /* Initialize netdev structure, set up ethtool ops */
+ bnad_netdev_init(bnad, using_dac);
+
+ /* Set link to down state */
+ netif_carrier_off(netdev);
+
+ /* Get resource requirement form bna */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_res_req(&bnad->res_info[0]);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Allocate resources from bna */
+ err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
+ if (err)
+ goto drv_uninit;
+
+ bna = &bnad->bna;
+
+ /* Setup pcidev_info for bna_init() */
+ pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
+ pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
+ pcidev_info.device_id = bnad->pcidev->device;
+ pcidev_info.pci_bar_kva = bnad->bar0;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ bnad->stats.bna_stats = &bna->stats;
+
+ bnad_enable_msix(bnad);
+ err = bnad_mbox_irq_alloc(bnad);
+ if (err)
+ goto res_free;
+
+
+ /* Set up timers */
+ setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
+ ((unsigned long)bnad));
+ setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
+ ((unsigned long)bnad));
+ setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
+ ((unsigned long)bnad));
+ setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
+ ((unsigned long)bnad));
+
+ /* Now start the timer before calling IOC */
+ mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
+ jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
+
+ /*
+ * Start the chip
+ * If the call back comes with error, we bail out.
+ * This is a catastrophic error.
+ */
+ err = bnad_ioceth_enable(bnad);
+ if (err) {
+ pr_err("BNA: Initialization failed err=%d\n",
+ err);
+ goto probe_success;
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
+ bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
+ bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
+ bna_attr(bna)->num_rxp - 1);
+ if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
+ bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
+ err = -EIO;
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (err)
+ goto disable_ioceth;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
+ if (err) {
+ err = -EIO;
+ goto disable_ioceth;
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Get the burnt-in mac */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
+ bnad_set_netdev_perm_addr(bnad);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+
+ /* Finally, reguister with net_device layer */
+ err = register_netdev(netdev);
+ if (err) {
+ pr_err("BNA : Registering with netdev failed\n");
+ goto probe_uninit;
+ }
+ set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
+
+ return 0;
+
+probe_success:
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+
+probe_uninit:
+ mutex_lock(&bnad->conf_mutex);
+ bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
+disable_ioceth:
+ bnad_ioceth_disable(bnad);
+ del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
+ del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
+ del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_uninit(bna);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ bnad_mbox_irq_free(bnad);
+ bnad_disable_msix(bnad);
+res_free:
+ bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
+drv_uninit:
+ bnad_uninit(bnad);
+pci_uninit:
+ bnad_pci_uninit(pdev);
+unlock_mutex:
+ mutex_unlock(&bnad->conf_mutex);
+ bnad_lock_uninit(bnad);
+ free_netdev(netdev);
+ return err;
+}
+
+static void __devexit
+bnad_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct bnad *bnad;
+ struct bna *bna;
+ unsigned long flags;
+
+ if (!netdev)
+ return;
+
+ pr_info("%s bnad_pci_remove\n", netdev->name);
+ bnad = netdev_priv(netdev);
+ bna = &bnad->bna;
+
+ if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
+ unregister_netdev(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ bnad_ioceth_disable(bnad);
+ del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
+ del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
+ del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_uninit(bna);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
+ bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
+ bnad_mbox_irq_free(bnad);
+ bnad_disable_msix(bnad);
+ bnad_pci_uninit(pdev);
+ mutex_unlock(&bnad->conf_mutex);
+ bnad_lock_uninit(bnad);
+ bnad_uninit(bnad);
+ free_netdev(netdev);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
+ PCI_DEVICE_ID_BROCADE_CT),
+ .class = PCI_CLASS_NETWORK_ETHERNET << 8,
+ .class_mask = 0xffff00
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
+ BFA_PCI_DEVICE_ID_CT2),
+ .class = PCI_CLASS_NETWORK_ETHERNET << 8,
+ .class_mask = 0xffff00
+ },
+ {0, },
+};
+
+MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
+
+static struct pci_driver bnad_pci_driver = {
+ .name = BNAD_NAME,
+ .id_table = bnad_pci_id_table,
+ .probe = bnad_pci_probe,
+ .remove = __devexit_p(bnad_pci_remove),
+};
+
+static int __init
+bnad_module_init(void)
+{
+ int err;
+
+ pr_info("Brocade 10G Ethernet driver - version: %s\n",
+ BNAD_VERSION);
+
+ bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
+
+ err = pci_register_driver(&bnad_pci_driver);
+ if (err < 0) {
+ pr_err("bna : PCI registration failed in module init "
+ "(%d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit
+bnad_module_exit(void)
+{
+ pci_unregister_driver(&bnad_pci_driver);
+
+ if (bfi_fw)
+ release_firmware(bfi_fw);
+}
+
+module_init(bnad_module_init);
+module_exit(bnad_module_exit);
+
+MODULE_AUTHOR("Brocade");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
+MODULE_VERSION(BNAD_VERSION);
+MODULE_FIRMWARE(CNA_FW_FILE_CT);
+MODULE_FIRMWARE(CNA_FW_FILE_CT2);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
new file mode 100644
index 000000000000..5487ca42d018
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -0,0 +1,380 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BNAD_H__
+#define __BNAD_H__
+
+#include <linux/rtnetlink.h>
+#include <linux/workqueue.h>
+#include <linux/ipv6.h>
+#include <linux/etherdevice.h>
+#include <linux/mutex.h>
+#include <linux/firmware.h>
+#include <linux/if_vlan.h>
+
+/* Fix for IA64 */
+#include <asm/checksum.h>
+#include <net/ip6_checksum.h>
+
+#include <net/ip.h>
+#include <net/tcp.h>
+
+#include "bna.h"
+
+#define BNAD_TXQ_DEPTH 2048
+#define BNAD_RXQ_DEPTH 2048
+
+#define BNAD_MAX_TX 1
+#define BNAD_MAX_TXQ_PER_TX 8 /* 8 priority queues */
+#define BNAD_TXQ_NUM 1
+
+#define BNAD_MAX_RX 1
+#define BNAD_MAX_RXP_PER_RX 16
+#define BNAD_MAX_RXQ_PER_RXP 2
+
+/*
+ * Control structure pointed to ccb->ctrl, which
+ * determines the NAPI / LRO behavior CCB
+ * There is 1:1 corres. between ccb & ctrl
+ */
+struct bnad_rx_ctrl {
+ struct bna_ccb *ccb;
+ struct bnad *bnad;
+ unsigned long flags;
+ struct napi_struct napi;
+ u64 rx_intr_ctr;
+ u64 rx_poll_ctr;
+ u64 rx_schedule;
+ u64 rx_keep_poll;
+ u64 rx_complete;
+};
+
+#define BNAD_RXMODE_PROMISC_DEFAULT BNA_RXMODE_PROMISC
+
+/*
+ * GLOBAL #defines (CONSTANTS)
+ */
+#define BNAD_NAME "bna"
+#define BNAD_NAME_LEN 64
+
+#define BNAD_VERSION "3.0.2.2"
+
+#define BNAD_MAILBOX_MSIX_INDEX 0
+#define BNAD_MAILBOX_MSIX_VECTORS 1
+#define BNAD_INTX_TX_IB_BITMASK 0x1
+#define BNAD_INTX_RX_IB_BITMASK 0x2
+
+#define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */
+#define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */
+
+#define BNAD_IOCETH_TIMEOUT 10000
+
+#define BNAD_MAX_Q_DEPTH 0x10000
+#define BNAD_MIN_Q_DEPTH 0x200
+
+#define BNAD_MAX_RXQ_DEPTH (BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq)
+/* keeping MAX TX and RX Q depth equal */
+#define BNAD_MAX_TXQ_DEPTH BNAD_MAX_RXQ_DEPTH
+
+#define BNAD_JUMBO_MTU 9000
+
+#define BNAD_NETIF_WAKE_THRESHOLD 8
+
+#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT 3
+
+/* Bit positions for tcb->flags */
+#define BNAD_TXQ_FREE_SENT 0
+#define BNAD_TXQ_TX_STARTED 1
+
+/* Bit positions for rcb->flags */
+#define BNAD_RXQ_REFILL 0
+#define BNAD_RXQ_STARTED 1
+#define BNAD_RXQ_POST_OK 2
+
+/* Resource limits */
+#define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx)
+#define BNAD_NUM_RXP (bnad->num_rx * bnad->num_rxp_per_rx)
+
+/*
+ * DATA STRUCTURES
+ */
+
+/* enums */
+enum bnad_intr_source {
+ BNAD_INTR_TX = 1,
+ BNAD_INTR_RX = 2
+};
+
+enum bnad_link_state {
+ BNAD_LS_DOWN = 0,
+ BNAD_LS_UP = 1
+};
+
+struct bnad_completion {
+ struct completion ioc_comp;
+ struct completion ucast_comp;
+ struct completion mcast_comp;
+ struct completion tx_comp;
+ struct completion rx_comp;
+ struct completion stats_comp;
+ struct completion enet_comp;
+ struct completion mtu_comp;
+
+ u8 ioc_comp_status;
+ u8 ucast_comp_status;
+ u8 mcast_comp_status;
+ u8 tx_comp_status;
+ u8 rx_comp_status;
+ u8 stats_comp_status;
+ u8 port_comp_status;
+ u8 mtu_comp_status;
+};
+
+/* Tx Rx Control Stats */
+struct bnad_drv_stats {
+ u64 netif_queue_stop;
+ u64 netif_queue_wakeup;
+ u64 netif_queue_stopped;
+ u64 tso4;
+ u64 tso6;
+ u64 tso_err;
+ u64 tcpcsum_offload;
+ u64 udpcsum_offload;
+ u64 csum_help;
+ u64 tx_skb_too_short;
+ u64 tx_skb_stopping;
+ u64 tx_skb_max_vectors;
+ u64 tx_skb_mss_too_long;
+ u64 tx_skb_tso_too_short;
+ u64 tx_skb_tso_prepare;
+ u64 tx_skb_non_tso_too_long;
+ u64 tx_skb_tcp_hdr;
+ u64 tx_skb_udp_hdr;
+ u64 tx_skb_csum_err;
+ u64 tx_skb_headlen_too_long;
+ u64 tx_skb_headlen_zero;
+ u64 tx_skb_frag_zero;
+ u64 tx_skb_len_mismatch;
+
+ u64 hw_stats_updates;
+ u64 netif_rx_dropped;
+
+ u64 link_toggle;
+ u64 cee_toggle;
+
+ u64 rxp_info_alloc_failed;
+ u64 mbox_intr_disabled;
+ u64 mbox_intr_enabled;
+ u64 tx_unmap_q_alloc_failed;
+ u64 rx_unmap_q_alloc_failed;
+
+ u64 rxbuf_alloc_failed;
+};
+
+/* Complete driver stats */
+struct bnad_stats {
+ struct bnad_drv_stats drv_stats;
+ struct bna_stats *bna_stats;
+};
+
+/* Tx / Rx Resources */
+struct bnad_tx_res_info {
+ struct bna_res_info res_info[BNA_TX_RES_T_MAX];
+};
+
+struct bnad_rx_res_info {
+ struct bna_res_info res_info[BNA_RX_RES_T_MAX];
+};
+
+struct bnad_tx_info {
+ struct bna_tx *tx; /* 1:1 between tx_info & tx */
+ struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX];
+ u32 tx_id;
+} ____cacheline_aligned;
+
+struct bnad_rx_info {
+ struct bna_rx *rx; /* 1:1 between rx_info & rx */
+
+ struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX];
+ u32 rx_id;
+} ____cacheline_aligned;
+
+/* Unmap queues for Tx / Rx cleanup */
+struct bnad_skb_unmap {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+};
+
+struct bnad_unmap_q {
+ u32 producer_index;
+ u32 consumer_index;
+ u32 q_depth;
+ /* This should be the last one */
+ struct bnad_skb_unmap unmap_array[1];
+};
+
+/* Bit mask values for bnad->cfg_flags */
+#define BNAD_CF_DIM_ENABLED 0x01 /* DIM */
+#define BNAD_CF_PROMISC 0x02
+#define BNAD_CF_ALLMULTI 0x04
+#define BNAD_CF_MSIX 0x08 /* If in MSIx mode */
+
+/* Defines for run_flags bit-mask */
+/* Set, tested & cleared using xxx_bit() functions */
+/* Values indicated bit positions */
+#define BNAD_RF_CEE_RUNNING 0
+#define BNAD_RF_MTU_SET 1
+#define BNAD_RF_MBOX_IRQ_DISABLED 2
+#define BNAD_RF_NETDEV_REGISTERED 3
+#define BNAD_RF_DIM_TIMER_RUNNING 4
+#define BNAD_RF_STATS_TIMER_RUNNING 5
+#define BNAD_RF_TX_PRIO_SET 6
+
+
+/* Define for Fast Path flags */
+/* Defined as bit positions */
+#define BNAD_FP_IN_RX_PATH 0
+
+struct bnad {
+ struct net_device *netdev;
+
+ /* Data path */
+ struct bnad_tx_info tx_info[BNAD_MAX_TX];
+ struct bnad_rx_info rx_info[BNAD_MAX_RX];
+
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ /*
+ * These q numbers are global only because
+ * they are used to calculate MSIx vectors.
+ * Actually the exact # of queues are per Tx/Rx
+ * object.
+ */
+ u32 num_tx;
+ u32 num_rx;
+ u32 num_txq_per_tx;
+ u32 num_rxp_per_rx;
+
+ u32 txq_depth;
+ u32 rxq_depth;
+
+ u8 tx_coalescing_timeo;
+ u8 rx_coalescing_timeo;
+
+ struct bna_rx_config rx_config[BNAD_MAX_RX];
+ struct bna_tx_config tx_config[BNAD_MAX_TX];
+
+ void __iomem *bar0; /* BAR0 address */
+
+ struct bna bna;
+
+ u32 cfg_flags;
+ unsigned long run_flags;
+
+ struct pci_dev *pcidev;
+ u64 mmio_start;
+ u64 mmio_len;
+
+ u32 msix_num;
+ struct msix_entry *msix_table;
+
+ struct mutex conf_mutex;
+ spinlock_t bna_lock ____cacheline_aligned;
+
+ /* Timers */
+ struct timer_list ioc_timer;
+ struct timer_list dim_timer;
+ struct timer_list stats_timer;
+
+ /* Control path resources, memory & irq */
+ struct bna_res_info res_info[BNA_RES_T_MAX];
+ struct bna_res_info mod_res_info[BNA_MOD_RES_T_MAX];
+ struct bnad_tx_res_info tx_res_info[BNAD_MAX_TX];
+ struct bnad_rx_res_info rx_res_info[BNAD_MAX_RX];
+
+ struct bnad_completion bnad_completions;
+
+ /* Burnt in MAC address */
+ mac_t perm_addr;
+
+ struct tasklet_struct tx_free_tasklet;
+
+ /* Statistics */
+ struct bnad_stats stats;
+
+ struct bnad_diag *diag;
+
+ char adapter_name[BNAD_NAME_LEN];
+ char port_name[BNAD_NAME_LEN];
+ char mbox_irq_name[BNAD_NAME_LEN];
+};
+
+/*
+ * EXTERN VARIABLES
+ */
+extern struct firmware *bfi_fw;
+extern u32 bnad_rxqs_per_cq;
+
+/*
+ * EXTERN PROTOTYPES
+ */
+extern u32 *cna_get_firmware_buf(struct pci_dev *pdev);
+/* Netdev entry point prototypes */
+extern void bnad_set_rx_mode(struct net_device *netdev);
+extern struct net_device_stats *bnad_get_netdev_stats(
+ struct net_device *netdev);
+extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
+extern int bnad_enable_default_bcast(struct bnad *bnad);
+extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
+extern void bnad_set_ethtool_ops(struct net_device *netdev);
+
+/* Configuration & setup */
+extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
+extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
+
+extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
+extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
+extern void bnad_cleanup_tx(struct bnad *bnad, u32 tx_id);
+extern void bnad_cleanup_rx(struct bnad *bnad, u32 rx_id);
+
+/* Timer start/stop protos */
+extern void bnad_dim_timer_start(struct bnad *bnad);
+
+/* Statistics */
+extern void bnad_netdev_qstats_fill(struct bnad *bnad,
+ struct rtnl_link_stats64 *stats);
+extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
+ struct rtnl_link_stats64 *stats);
+
+/**
+ * MACROS
+ */
+/* To set & get the stats counters */
+#define BNAD_UPDATE_CTR(_bnad, _ctr) \
+ (((_bnad)->stats.drv_stats._ctr)++)
+
+#define BNAD_GET_CTR(_bnad, _ctr) ((_bnad)->stats.drv_stats._ctr)
+
+#define bnad_enable_rx_irq_unsafe(_ccb) \
+{ \
+ if (likely(test_bit(BNAD_RXQ_STARTED, &(_ccb)->rcb[0]->flags))) {\
+ bna_ib_coalescing_timer_set((_ccb)->i_dbell, \
+ (_ccb)->rx_coalescing_timeo); \
+ bna_ib_ack((_ccb)->i_dbell, 0); \
+ } \
+}
+
+#endif /* __BNAD_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
new file mode 100644
index 000000000000..ac6b49561bf0
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -0,0 +1,962 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include "cna.h"
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+
+#include "bna.h"
+
+#include "bnad.h"
+
+#define BNAD_NUM_TXF_COUNTERS 12
+#define BNAD_NUM_RXF_COUNTERS 10
+#define BNAD_NUM_CQ_COUNTERS (3 + 5)
+#define BNAD_NUM_RXQ_COUNTERS 6
+#define BNAD_NUM_TXQ_COUNTERS 5
+
+#define BNAD_ETHTOOL_STATS_NUM \
+ (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \
+ sizeof(struct bnad_drv_stats) / sizeof(u64) + \
+ offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
+
+static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
+ "rx_packets",
+ "tx_packets",
+ "rx_bytes",
+ "tx_bytes",
+ "rx_errors",
+ "tx_errors",
+ "rx_dropped",
+ "tx_dropped",
+ "multicast",
+ "collisions",
+
+ "rx_length_errors",
+ "rx_over_errors",
+ "rx_crc_errors",
+ "rx_frame_errors",
+ "rx_fifo_errors",
+ "rx_missed_errors",
+
+ "tx_aborted_errors",
+ "tx_carrier_errors",
+ "tx_fifo_errors",
+ "tx_heartbeat_errors",
+ "tx_window_errors",
+
+ "rx_compressed",
+ "tx_compressed",
+
+ "netif_queue_stop",
+ "netif_queue_wakeup",
+ "netif_queue_stopped",
+ "tso4",
+ "tso6",
+ "tso_err",
+ "tcpcsum_offload",
+ "udpcsum_offload",
+ "csum_help",
+ "tx_skb_too_short",
+ "tx_skb_stopping",
+ "tx_skb_max_vectors",
+ "tx_skb_mss_too_long",
+ "tx_skb_tso_too_short",
+ "tx_skb_tso_prepare",
+ "tx_skb_non_tso_too_long",
+ "tx_skb_tcp_hdr",
+ "tx_skb_udp_hdr",
+ "tx_skb_csum_err",
+ "tx_skb_headlen_too_long",
+ "tx_skb_headlen_zero",
+ "tx_skb_frag_zero",
+ "tx_skb_len_mismatch",
+ "hw_stats_updates",
+ "netif_rx_dropped",
+
+ "link_toggle",
+ "cee_toggle",
+
+ "rxp_info_alloc_failed",
+ "mbox_intr_disabled",
+ "mbox_intr_enabled",
+ "tx_unmap_q_alloc_failed",
+ "rx_unmap_q_alloc_failed",
+ "rxbuf_alloc_failed",
+
+ "mac_frame_64",
+ "mac_frame_65_127",
+ "mac_frame_128_255",
+ "mac_frame_256_511",
+ "mac_frame_512_1023",
+ "mac_frame_1024_1518",
+ "mac_frame_1518_1522",
+ "mac_rx_bytes",
+ "mac_rx_packets",
+ "mac_rx_fcs_error",
+ "mac_rx_multicast",
+ "mac_rx_broadcast",
+ "mac_rx_control_frames",
+ "mac_rx_pause",
+ "mac_rx_unknown_opcode",
+ "mac_rx_alignment_error",
+ "mac_rx_frame_length_error",
+ "mac_rx_code_error",
+ "mac_rx_carrier_sense_error",
+ "mac_rx_undersize",
+ "mac_rx_oversize",
+ "mac_rx_fragments",
+ "mac_rx_jabber",
+ "mac_rx_drop",
+
+ "mac_tx_bytes",
+ "mac_tx_packets",
+ "mac_tx_multicast",
+ "mac_tx_broadcast",
+ "mac_tx_pause",
+ "mac_tx_deferral",
+ "mac_tx_excessive_deferral",
+ "mac_tx_single_collision",
+ "mac_tx_muliple_collision",
+ "mac_tx_late_collision",
+ "mac_tx_excessive_collision",
+ "mac_tx_total_collision",
+ "mac_tx_pause_honored",
+ "mac_tx_drop",
+ "mac_tx_jabber",
+ "mac_tx_fcs_error",
+ "mac_tx_control_frame",
+ "mac_tx_oversize",
+ "mac_tx_undersize",
+ "mac_tx_fragments",
+
+ "bpc_tx_pause_0",
+ "bpc_tx_pause_1",
+ "bpc_tx_pause_2",
+ "bpc_tx_pause_3",
+ "bpc_tx_pause_4",
+ "bpc_tx_pause_5",
+ "bpc_tx_pause_6",
+ "bpc_tx_pause_7",
+ "bpc_tx_zero_pause_0",
+ "bpc_tx_zero_pause_1",
+ "bpc_tx_zero_pause_2",
+ "bpc_tx_zero_pause_3",
+ "bpc_tx_zero_pause_4",
+ "bpc_tx_zero_pause_5",
+ "bpc_tx_zero_pause_6",
+ "bpc_tx_zero_pause_7",
+ "bpc_tx_first_pause_0",
+ "bpc_tx_first_pause_1",
+ "bpc_tx_first_pause_2",
+ "bpc_tx_first_pause_3",
+ "bpc_tx_first_pause_4",
+ "bpc_tx_first_pause_5",
+ "bpc_tx_first_pause_6",
+ "bpc_tx_first_pause_7",
+
+ "bpc_rx_pause_0",
+ "bpc_rx_pause_1",
+ "bpc_rx_pause_2",
+ "bpc_rx_pause_3",
+ "bpc_rx_pause_4",
+ "bpc_rx_pause_5",
+ "bpc_rx_pause_6",
+ "bpc_rx_pause_7",
+ "bpc_rx_zero_pause_0",
+ "bpc_rx_zero_pause_1",
+ "bpc_rx_zero_pause_2",
+ "bpc_rx_zero_pause_3",
+ "bpc_rx_zero_pause_4",
+ "bpc_rx_zero_pause_5",
+ "bpc_rx_zero_pause_6",
+ "bpc_rx_zero_pause_7",
+ "bpc_rx_first_pause_0",
+ "bpc_rx_first_pause_1",
+ "bpc_rx_first_pause_2",
+ "bpc_rx_first_pause_3",
+ "bpc_rx_first_pause_4",
+ "bpc_rx_first_pause_5",
+ "bpc_rx_first_pause_6",
+ "bpc_rx_first_pause_7",
+
+ "rad_rx_frames",
+ "rad_rx_octets",
+ "rad_rx_vlan_frames",
+ "rad_rx_ucast",
+ "rad_rx_ucast_octets",
+ "rad_rx_ucast_vlan",
+ "rad_rx_mcast",
+ "rad_rx_mcast_octets",
+ "rad_rx_mcast_vlan",
+ "rad_rx_bcast",
+ "rad_rx_bcast_octets",
+ "rad_rx_bcast_vlan",
+ "rad_rx_drops",
+
+ "rlb_rad_rx_frames",
+ "rlb_rad_rx_octets",
+ "rlb_rad_rx_vlan_frames",
+ "rlb_rad_rx_ucast",
+ "rlb_rad_rx_ucast_octets",
+ "rlb_rad_rx_ucast_vlan",
+ "rlb_rad_rx_mcast",
+ "rlb_rad_rx_mcast_octets",
+ "rlb_rad_rx_mcast_vlan",
+ "rlb_rad_rx_bcast",
+ "rlb_rad_rx_bcast_octets",
+ "rlb_rad_rx_bcast_vlan",
+ "rlb_rad_rx_drops",
+
+ "fc_rx_ucast_octets",
+ "fc_rx_ucast",
+ "fc_rx_ucast_vlan",
+ "fc_rx_mcast_octets",
+ "fc_rx_mcast",
+ "fc_rx_mcast_vlan",
+ "fc_rx_bcast_octets",
+ "fc_rx_bcast",
+ "fc_rx_bcast_vlan",
+
+ "fc_tx_ucast_octets",
+ "fc_tx_ucast",
+ "fc_tx_ucast_vlan",
+ "fc_tx_mcast_octets",
+ "fc_tx_mcast",
+ "fc_tx_mcast_vlan",
+ "fc_tx_bcast_octets",
+ "fc_tx_bcast",
+ "fc_tx_bcast_vlan",
+ "fc_tx_parity_errors",
+ "fc_tx_timeout",
+ "fc_tx_fid_parity_errors",
+};
+
+static int
+bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ cmd->supported = SUPPORTED_10000baseT_Full;
+ cmd->advertising = ADVERTISED_10000baseT_Full;
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_FIBRE;
+ cmd->port = PORT_FIBRE;
+ cmd->phy_address = 0;
+
+ if (netif_carrier_ok(netdev)) {
+ ethtool_cmd_speed_set(cmd, SPEED_10000);
+ cmd->duplex = DUPLEX_FULL;
+ } else {
+ ethtool_cmd_speed_set(cmd, -1);
+ cmd->duplex = -1;
+ }
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+
+ return 0;
+}
+
+static int
+bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ /* 10G full duplex setting supported only */
+ if (cmd->autoneg == AUTONEG_ENABLE)
+ return -EOPNOTSUPP; else {
+ if ((ethtool_cmd_speed(cmd) == SPEED_10000)
+ && (cmd->duplex == DUPLEX_FULL))
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static void
+bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bfa_ioc_attr *ioc_attr;
+ unsigned long flags;
+
+ strcpy(drvinfo->driver, BNAD_NAME);
+ strcpy(drvinfo->version, BNAD_VERSION);
+
+ ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
+ if (ioc_attr) {
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
+ sizeof(drvinfo->fw_version) - 1);
+ kfree(ioc_attr);
+ }
+
+ strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
+}
+
+static void
+bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo)
+{
+ wolinfo->supported = 0;
+ wolinfo->wolopts = 0;
+}
+
+static int
+bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ /* Lock rqd. to access bnad->bna_lock */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ coalesce->use_adaptive_rx_coalesce =
+ (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo *
+ BFI_COALESCING_TIMER_UNIT;
+ coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo *
+ BFI_COALESCING_TIMER_UNIT;
+ coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT;
+
+ return 0;
+}
+
+static int
+bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+ int to_del = 0;
+
+ if (coalesce->rx_coalesce_usecs == 0 ||
+ coalesce->rx_coalesce_usecs >
+ BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
+ return -EINVAL;
+
+ if (coalesce->tx_coalesce_usecs == 0 ||
+ coalesce->tx_coalesce_usecs >
+ BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
+ return -EINVAL;
+
+ mutex_lock(&bnad->conf_mutex);
+ /*
+ * Do not need to store rx_coalesce_usecs here
+ * Every time DIM is disabled, we can get it from the
+ * stack.
+ */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (coalesce->use_adaptive_rx_coalesce) {
+ if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) {
+ bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
+ bnad_dim_timer_start(bnad);
+ }
+ } else {
+ if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
+ bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
+ if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
+ test_bit(BNAD_RF_DIM_TIMER_RUNNING,
+ &bnad->run_flags)) {
+ clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
+ &bnad->run_flags);
+ to_del = 1;
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (to_del)
+ del_timer_sync(&bnad->dim_timer);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad_rx_coalescing_timeo_set(bnad);
+ }
+ }
+ if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs /
+ BFI_COALESCING_TIMER_UNIT) {
+ bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs /
+ BFI_COALESCING_TIMER_UNIT;
+ bnad_tx_coalescing_timeo_set(bnad);
+ }
+
+ if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs /
+ BFI_COALESCING_TIMER_UNIT) {
+ bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs /
+ BFI_COALESCING_TIMER_UNIT;
+
+ if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED))
+ bnad_rx_coalescing_timeo_set(bnad);
+
+ }
+
+ /* Add Tx Inter-pkt DMA count? */
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static void
+bnad_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ringparam)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ ringparam->rx_max_pending = BNAD_MAX_RXQ_DEPTH;
+ ringparam->rx_mini_max_pending = 0;
+ ringparam->rx_jumbo_max_pending = 0;
+ ringparam->tx_max_pending = BNAD_MAX_TXQ_DEPTH;
+
+ ringparam->rx_pending = bnad->rxq_depth;
+ ringparam->rx_mini_max_pending = 0;
+ ringparam->rx_jumbo_max_pending = 0;
+ ringparam->tx_pending = bnad->txq_depth;
+}
+
+static int
+bnad_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ringparam)
+{
+ int i, current_err, err = 0;
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ mutex_lock(&bnad->conf_mutex);
+ if (ringparam->rx_pending == bnad->rxq_depth &&
+ ringparam->tx_pending == bnad->txq_depth) {
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+ }
+
+ if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
+ ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH ||
+ !BNA_POWER_OF_2(ringparam->rx_pending)) {
+ mutex_unlock(&bnad->conf_mutex);
+ return -EINVAL;
+ }
+ if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
+ ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH ||
+ !BNA_POWER_OF_2(ringparam->tx_pending)) {
+ mutex_unlock(&bnad->conf_mutex);
+ return -EINVAL;
+ }
+
+ if (ringparam->rx_pending != bnad->rxq_depth) {
+ bnad->rxq_depth = ringparam->rx_pending;
+ if (!netif_running(netdev)) {
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+ }
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ bnad_cleanup_rx(bnad, i);
+ current_err = bnad_setup_rx(bnad, i);
+ if (current_err && !err)
+ err = current_err;
+ }
+
+ if (!err && bnad->rx_info[0].rx) {
+ /* restore rx configuration */
+ bnad_restore_vlans(bnad, 0);
+ bnad_enable_default_bcast(bnad);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI |
+ BNAD_CF_PROMISC);
+ bnad_set_rx_mode(netdev);
+ }
+ }
+ if (ringparam->tx_pending != bnad->txq_depth) {
+ bnad->txq_depth = ringparam->tx_pending;
+ if (!netif_running(netdev)) {
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+ }
+
+ for (i = 0; i < bnad->num_tx; i++) {
+ if (!bnad->tx_info[i].tx)
+ continue;
+ bnad_cleanup_tx(bnad, i);
+ current_err = bnad_setup_tx(bnad, i);
+ if (current_err && !err)
+ err = current_err;
+ }
+ }
+
+ mutex_unlock(&bnad->conf_mutex);
+ return err;
+}
+
+static void
+bnad_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ pauseparam->autoneg = 0;
+ pauseparam->rx_pause = bnad->bna.enet.pause_config.rx_pause;
+ pauseparam->tx_pause = bnad->bna.enet.pause_config.tx_pause;
+}
+
+static int
+bnad_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bna_pause_config pause_config;
+ unsigned long flags;
+
+ if (pauseparam->autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+
+ mutex_lock(&bnad->conf_mutex);
+ if (pauseparam->rx_pause != bnad->bna.enet.pause_config.rx_pause ||
+ pauseparam->tx_pause != bnad->bna.enet.pause_config.tx_pause) {
+ pause_config.rx_pause = pauseparam->rx_pause;
+ pause_config.tx_pause = pauseparam->tx_pause;
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ }
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static void
+bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ int i, j, q_num;
+ u32 bmap;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
+ BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
+ ETH_GSTRING_LEN));
+ memcpy(string, bnad_net_stats_strings[i],
+ ETH_GSTRING_LEN);
+ string += ETH_GSTRING_LEN;
+ }
+ bmap = bna_tx_rid_mask(&bnad->bna);
+ for (i = 0; bmap; i++) {
+ if (bmap & 1) {
+ sprintf(string, "txf%d_ucast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_ucast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_ucast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_mcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_mcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_mcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_bcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_bcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_bcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_errors", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_filter_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_filter_mac_sa", i);
+ string += ETH_GSTRING_LEN;
+ }
+ bmap >>= 1;
+ }
+
+ bmap = bna_rx_rid_mask(&bnad->bna);
+ for (i = 0; bmap; i++) {
+ if (bmap & 1) {
+ sprintf(string, "rxf%d_ucast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_ucast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_ucast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_mcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_mcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_mcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_bcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_bcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_bcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_frame_drops", i);
+ string += ETH_GSTRING_LEN;
+ }
+ bmap >>= 1;
+ }
+
+ q_num = 0;
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ sprintf(string, "cq%d_producer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_consumer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_hw_producer_index",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_intr", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_poll", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_schedule", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_keep_poll", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_complete", q_num);
+ string += ETH_GSTRING_LEN;
+ q_num++;
+ }
+ }
+
+ q_num = 0;
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ sprintf(string, "rxq%d_packets", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_bytes", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_packets_with_error",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_allocbuf_failed", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_producer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_consumer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ q_num++;
+ if (bnad->rx_info[i].rx_ctrl[j].ccb &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[1] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[1]->rxq) {
+ sprintf(string, "rxq%d_packets", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_bytes", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string,
+ "rxq%d_packets_with_error", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_allocbuf_failed",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_producer_index",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_consumer_index",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ q_num++;
+ }
+ }
+ }
+
+ q_num = 0;
+ for (i = 0; i < bnad->num_tx; i++) {
+ if (!bnad->tx_info[i].tx)
+ continue;
+ for (j = 0; j < bnad->num_txq_per_tx; j++) {
+ sprintf(string, "txq%d_packets", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_bytes", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_producer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_consumer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_hw_consumer_index",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ q_num++;
+ }
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+static int
+bnad_get_stats_count_locked(struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ int i, j, count = 0, rxf_active_num = 0, txf_active_num = 0;
+ u32 bmap;
+
+ bmap = bna_tx_rid_mask(&bnad->bna);
+ for (i = 0; bmap; i++) {
+ if (bmap & 1)
+ txf_active_num++;
+ bmap >>= 1;
+ }
+ bmap = bna_rx_rid_mask(&bnad->bna);
+ for (i = 0; bmap; i++) {
+ if (bmap & 1)
+ rxf_active_num++;
+ bmap >>= 1;
+ }
+ count = BNAD_ETHTOOL_STATS_NUM +
+ txf_active_num * BNAD_NUM_TXF_COUNTERS +
+ rxf_active_num * BNAD_NUM_RXF_COUNTERS;
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS;
+ count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++)
+ if (bnad->rx_info[i].rx_ctrl[j].ccb &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
+ count += BNAD_NUM_RXQ_COUNTERS;
+ }
+
+ for (i = 0; i < bnad->num_tx; i++) {
+ if (!bnad->tx_info[i].tx)
+ continue;
+ count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS;
+ }
+ return count;
+}
+
+static int
+bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
+{
+ int i, j;
+ struct bna_rcb *rcb = NULL;
+ struct bna_tcb *tcb = NULL;
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++)
+ if (bnad->rx_info[i].rx_ctrl[j].ccb &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
+ buf[bi++] = bnad->rx_info[i].rx_ctrl[j].
+ ccb->producer_index;
+ buf[bi++] = 0; /* ccb->consumer_index */
+ buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
+ ccb->hw_producer_index);
+
+ buf[bi++] = bnad->rx_info[i].
+ rx_ctrl[j].rx_intr_ctr;
+ buf[bi++] = bnad->rx_info[i].
+ rx_ctrl[j].rx_poll_ctr;
+ buf[bi++] = bnad->rx_info[i].
+ rx_ctrl[j].rx_schedule;
+ buf[bi++] = bnad->rx_info[i].
+ rx_ctrl[j].rx_keep_poll;
+ buf[bi++] = bnad->rx_info[i].
+ rx_ctrl[j].rx_complete;
+ }
+ }
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++)
+ if (bnad->rx_info[i].rx_ctrl[j].ccb) {
+ if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[0]->rxq) {
+ rcb = bnad->rx_info[i].rx_ctrl[j].
+ ccb->rcb[0];
+ buf[bi++] = rcb->rxq->rx_packets;
+ buf[bi++] = rcb->rxq->rx_bytes;
+ buf[bi++] = rcb->rxq->
+ rx_packets_with_error;
+ buf[bi++] = rcb->rxq->
+ rxbuf_alloc_failed;
+ buf[bi++] = rcb->producer_index;
+ buf[bi++] = rcb->consumer_index;
+ }
+ if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[1]->rxq) {
+ rcb = bnad->rx_info[i].rx_ctrl[j].
+ ccb->rcb[1];
+ buf[bi++] = rcb->rxq->rx_packets;
+ buf[bi++] = rcb->rxq->rx_bytes;
+ buf[bi++] = rcb->rxq->
+ rx_packets_with_error;
+ buf[bi++] = rcb->rxq->
+ rxbuf_alloc_failed;
+ buf[bi++] = rcb->producer_index;
+ buf[bi++] = rcb->consumer_index;
+ }
+ }
+ }
+
+ for (i = 0; i < bnad->num_tx; i++) {
+ if (!bnad->tx_info[i].tx)
+ continue;
+ for (j = 0; j < bnad->num_txq_per_tx; j++)
+ if (bnad->tx_info[i].tcb[j] &&
+ bnad->tx_info[i].tcb[j]->txq) {
+ tcb = bnad->tx_info[i].tcb[j];
+ buf[bi++] = tcb->txq->tx_packets;
+ buf[bi++] = tcb->txq->tx_bytes;
+ buf[bi++] = tcb->producer_index;
+ buf[bi++] = tcb->consumer_index;
+ buf[bi++] = *(tcb->hw_consumer_index);
+ }
+ }
+
+ return bi;
+}
+
+static void
+bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
+ u64 *buf)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ int i, j, bi;
+ unsigned long flags;
+ struct rtnl_link_stats64 *net_stats64;
+ u64 *stats64;
+ u32 bmap;
+
+ mutex_lock(&bnad->conf_mutex);
+ if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
+ mutex_unlock(&bnad->conf_mutex);
+ return;
+ }
+
+ /*
+ * Used bna_lock to sync reads from bna_stats, which is written
+ * under the same lock
+ */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bi = 0;
+ memset(buf, 0, stats->n_stats * sizeof(u64));
+
+ net_stats64 = (struct rtnl_link_stats64 *)buf;
+ bnad_netdev_qstats_fill(bnad, net_stats64);
+ bnad_netdev_hwstats_fill(bnad, net_stats64);
+
+ bi = sizeof(*net_stats64) / sizeof(u64);
+
+ /* Get netif_queue_stopped from stack */
+ bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
+
+ /* Fill driver stats into ethtool buffers */
+ stats64 = (u64 *)&bnad->stats.drv_stats;
+ for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
+ buf[bi++] = stats64[i];
+
+ /* Fill hardware stats excluding the rxf/txf into ethtool bufs */
+ stats64 = (u64 *) &bnad->stats.bna_stats->hw_stats;
+ for (i = 0;
+ i < offsetof(struct bfi_enet_stats, rxf_stats[0]) /
+ sizeof(u64);
+ i++)
+ buf[bi++] = stats64[i];
+
+ /* Fill txf stats into ethtool buffers */
+ bmap = bna_tx_rid_mask(&bnad->bna);
+ for (i = 0; bmap; i++) {
+ if (bmap & 1) {
+ stats64 = (u64 *)&bnad->stats.bna_stats->
+ hw_stats.txf_stats[i];
+ for (j = 0; j < sizeof(struct bfi_enet_stats_txf) /
+ sizeof(u64); j++)
+ buf[bi++] = stats64[j];
+ }
+ bmap >>= 1;
+ }
+
+ /* Fill rxf stats into ethtool buffers */
+ bmap = bna_rx_rid_mask(&bnad->bna);
+ for (i = 0; bmap; i++) {
+ if (bmap & 1) {
+ stats64 = (u64 *)&bnad->stats.bna_stats->
+ hw_stats.rxf_stats[i];
+ for (j = 0; j < sizeof(struct bfi_enet_stats_rxf) /
+ sizeof(u64); j++)
+ buf[bi++] = stats64[j];
+ }
+ bmap >>= 1;
+ }
+
+ /* Fill per Q stats into ethtool buffers */
+ bi = bnad_per_q_stats_fill(bnad, buf, bi);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+static int
+bnad_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return bnad_get_stats_count_locked(netdev);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct ethtool_ops bnad_ethtool_ops = {
+ .get_settings = bnad_get_settings,
+ .set_settings = bnad_set_settings,
+ .get_drvinfo = bnad_get_drvinfo,
+ .get_wol = bnad_get_wol,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = bnad_get_coalesce,
+ .set_coalesce = bnad_set_coalesce,
+ .get_ringparam = bnad_get_ringparam,
+ .set_ringparam = bnad_set_ringparam,
+ .get_pauseparam = bnad_get_pauseparam,
+ .set_pauseparam = bnad_set_pauseparam,
+ .get_strings = bnad_get_strings,
+ .get_ethtool_stats = bnad_get_ethtool_stats,
+ .get_sset_count = bnad_get_sset_count
+};
+
+void
+bnad_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
new file mode 100644
index 000000000000..1b3e90dfbd9a
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -0,0 +1,107 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __CNA_H__
+#define __CNA_H__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+
+#define bfa_sm_fault(__event) do { \
+ pr_err("SM Assertion failure: %s: %d: event = %d\n", \
+ __FILE__, __LINE__, __event); \
+} while (0)
+
+extern char bfa_version[];
+
+#define CNA_FW_FILE_CT "ctfw.bin"
+#define CNA_FW_FILE_CT2 "ct2fw.bin"
+#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
+
+#pragma pack(1)
+
+#define MAC_ADDRLEN (6)
+typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t;
+
+#pragma pack()
+
+#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
+#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
+#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
+
+/*
+ * bfa_q_qe_init - to initialize a queue element
+ */
+#define bfa_q_qe_init(_qe) { \
+ bfa_q_next(_qe) = (struct list_head *) NULL; \
+ bfa_q_prev(_qe) = (struct list_head *) NULL; \
+}
+
+/*
+ * bfa_q_deq - dequeue an element from head of the queue
+ */
+#define bfa_q_deq(_q, _qe) { \
+ if (!list_empty(_q)) { \
+ (*((struct list_head **) (_qe))) = bfa_q_next(_q); \
+ bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
+ (struct list_head *) (_q); \
+ bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \
+ bfa_q_qe_init(*((struct list_head **) _qe)); \
+ } else { \
+ *((struct list_head **)(_qe)) = NULL; \
+ } \
+}
+
+/*
+ * bfa_q_deq_tail - dequeue an element from tail of the queue
+ */
+#define bfa_q_deq_tail(_q, _qe) { \
+ if (!list_empty(_q)) { \
+ *((struct list_head **) (_qe)) = bfa_q_prev(_q); \
+ bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \
+ (struct list_head *) (_q); \
+ bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
+ bfa_q_qe_init(*((struct list_head **) _qe)); \
+ } else { \
+ *((struct list_head **) (_qe)) = (struct list_head *) NULL; \
+ } \
+}
+
+/*
+ * bfa_add_tail_head - enqueue an element at the head of queue
+ */
+#define bfa_q_enq_head(_q, _qe) { \
+ if (!(bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL)) \
+ pr_err("Assertion failure: %s:%d: %d", \
+ __FILE__, __LINE__, \
+ (bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL));\
+ bfa_q_next(_qe) = bfa_q_next(_q); \
+ bfa_q_prev(_qe) = (struct list_head *) (_q); \
+ bfa_q_prev(bfa_q_next(_q)) = (struct list_head *) (_qe); \
+ bfa_q_next(_q) = (struct list_head *) (_qe); \
+}
+
+#endif /* __CNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
new file mode 100644
index 000000000000..725b9fff337f
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
@@ -0,0 +1,92 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#include <linux/firmware.h>
+#include "bfi.h"
+#include "cna.h"
+
+const struct firmware *bfi_fw;
+static u32 *bfi_image_ct_cna, *bfi_image_ct2_cna;
+static u32 bfi_image_ct_cna_size, bfi_image_ct2_cna_size;
+
+static u32 *
+cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
+ u32 *bfi_image_size, char *fw_name)
+{
+ const struct firmware *fw;
+
+ if (request_firmware(&fw, fw_name, &pdev->dev)) {
+ pr_alert("Can't locate firmware %s\n", fw_name);
+ goto error;
+ }
+
+ *bfi_image = (u32 *)fw->data;
+ *bfi_image_size = fw->size/sizeof(u32);
+ bfi_fw = fw;
+
+ return *bfi_image;
+error:
+ return NULL;
+}
+
+u32 *
+cna_get_firmware_buf(struct pci_dev *pdev)
+{
+ if (pdev->device == BFA_PCI_DEVICE_ID_CT2) {
+ if (bfi_image_ct2_cna_size == 0)
+ cna_read_firmware(pdev, &bfi_image_ct2_cna,
+ &bfi_image_ct2_cna_size, CNA_FW_FILE_CT2);
+ return bfi_image_ct2_cna;
+ } else if (bfa_asic_id_ct(pdev->device)) {
+ if (bfi_image_ct_cna_size == 0)
+ cna_read_firmware(pdev, &bfi_image_ct_cna,
+ &bfi_image_ct_cna_size, CNA_FW_FILE_CT);
+ return bfi_image_ct_cna;
+ }
+
+ return NULL;
+}
+
+u32 *
+bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off)
+{
+ switch (asic_gen) {
+ case BFI_ASIC_GEN_CT:
+ return (u32 *)(bfi_image_ct_cna + off);
+ break;
+ case BFI_ASIC_GEN_CT2:
+ return (u32 *)(bfi_image_ct2_cna + off);
+ break;
+ default:
+ return NULL;
+ }
+}
+
+u32
+bfa_cb_image_get_size(enum bfi_asic_gen asic_gen)
+{
+ switch (asic_gen) {
+ case BFI_ASIC_GEN_CT:
+ return bfi_image_ct_cna_size;
+ break;
+ case BFI_ASIC_GEN_CT2:
+ return bfi_image_ct2_cna_size;
+ break;
+ default:
+ return 0;
+ }
+}
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
new file mode 100644
index 000000000000..98849a1fc749
--- /dev/null
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -0,0 +1,45 @@
+#
+# Atmel device configuration
+#
+
+config HAVE_NET_MACB
+ bool
+
+config NET_ATMEL
+ bool "Atmel devices"
+ depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y.
+ Make sure you know the name of your card. Read the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ If unsure, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the remaining Atmel network card questions. If you say Y, you will be
+ asked for your specific card in the following questions.
+
+if NET_ATMEL
+
+config ARM_AT91_ETHER
+ tristate "AT91RM9200 Ethernet support"
+ depends on ARM && ARCH_AT91RM9200
+ select NET_CORE
+ select MII
+ ---help---
+ If you wish to compile a kernel for the AT91RM9200 and enable
+ ethernet support, then you should always answer Y to this.
+
+config MACB
+ tristate "Atmel MACB support"
+ depends on HAVE_NET_MACB
+ select PHYLIB
+ ---help---
+ The Atmel MACB ethernet interface is found on many AT32 and AT91
+ parts. Say Y to include support for the MACB chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called macb.
+
+endif # NET_ATMEL
diff --git a/drivers/net/ethernet/cadence/Makefile b/drivers/net/ethernet/cadence/Makefile
new file mode 100644
index 000000000000..9068b8331ed1
--- /dev/null
+++ b/drivers/net/ethernet/cadence/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Atmel network device drivers.
+#
+
+obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o
+obj-$(CONFIG_MACB) += macb.o
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
new file mode 100644
index 000000000000..1b0ba8c819f7
--- /dev/null
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -0,0 +1,1254 @@
+/*
+ * Ethernet driver for the Atmel AT91RM9200 (Thunder)
+ *
+ * Copyright (C) 2003 SAN People (Pty) Ltd
+ *
+ * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
+ * Initial version by Rick Bronson 01/11/2003
+ *
+ * Intel LXT971A PHY support by Christopher Bahns & David Knickerbocker
+ * (Polaroid Corporation)
+ *
+ * Realtek RTL8201(B)L PHY support by Roman Avramenko <roman@imsystems.ru>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/gfp.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/mach-types.h>
+
+#include <mach/at91rm9200_emac.h>
+#include <mach/gpio.h>
+#include <mach/board.h>
+
+#include "at91_ether.h"
+
+#define DRV_NAME "at91_ether"
+#define DRV_VERSION "1.0"
+
+#define LINK_POLL_INTERVAL (HZ)
+
+/* ..................................................................... */
+
+/*
+ * Read from a EMAC register.
+ */
+static inline unsigned long at91_emac_read(unsigned int reg)
+{
+ void __iomem *emac_base = (void __iomem *)AT91_VA_BASE_EMAC;
+
+ return __raw_readl(emac_base + reg);
+}
+
+/*
+ * Write to a EMAC register.
+ */
+static inline void at91_emac_write(unsigned int reg, unsigned long value)
+{
+ void __iomem *emac_base = (void __iomem *)AT91_VA_BASE_EMAC;
+
+ __raw_writel(value, emac_base + reg);
+}
+
+/* ........................... PHY INTERFACE ........................... */
+
+/*
+ * Enable the MDIO bit in MAC control register
+ * When not called from an interrupt-handler, access to the PHY must be
+ * protected by a spinlock.
+ */
+static void enable_mdi(void)
+{
+ unsigned long ctl;
+
+ ctl = at91_emac_read(AT91_EMAC_CTL);
+ at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_MPE); /* enable management port */
+}
+
+/*
+ * Disable the MDIO bit in the MAC control register
+ */
+static void disable_mdi(void)
+{
+ unsigned long ctl;
+
+ ctl = at91_emac_read(AT91_EMAC_CTL);
+ at91_emac_write(AT91_EMAC_CTL, ctl & ~AT91_EMAC_MPE); /* disable management port */
+}
+
+/*
+ * Wait until the PHY operation is complete.
+ */
+static inline void at91_phy_wait(void) {
+ unsigned long timeout = jiffies + 2;
+
+ while (!(at91_emac_read(AT91_EMAC_SR) & AT91_EMAC_SR_IDLE)) {
+ if (time_after(jiffies, timeout)) {
+ printk("at91_ether: MIO timeout\n");
+ break;
+ }
+ cpu_relax();
+ }
+}
+
+/*
+ * Write value to the a PHY register
+ * Note: MDI interface is assumed to already have been enabled.
+ */
+static void write_phy(unsigned char phy_addr, unsigned char address, unsigned int value)
+{
+ at91_emac_write(AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_W
+ | ((phy_addr & 0x1f) << 23) | (address << 18) | (value & AT91_EMAC_DATA));
+
+ /* Wait until IDLE bit in Network Status register is cleared */
+ at91_phy_wait();
+}
+
+/*
+ * Read value stored in a PHY register.
+ * Note: MDI interface is assumed to already have been enabled.
+ */
+static void read_phy(unsigned char phy_addr, unsigned char address, unsigned int *value)
+{
+ at91_emac_write(AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_R
+ | ((phy_addr & 0x1f) << 23) | (address << 18));
+
+ /* Wait until IDLE bit in Network Status register is cleared */
+ at91_phy_wait();
+
+ *value = at91_emac_read(AT91_EMAC_MAN) & AT91_EMAC_DATA;
+}
+
+/* ........................... PHY MANAGEMENT .......................... */
+
+/*
+ * Access the PHY to determine the current link speed and mode, and update the
+ * MAC accordingly.
+ * If no link or auto-negotiation is busy, then no changes are made.
+ */
+static void update_linkspeed(struct net_device *dev, int silent)
+{
+ struct at91_private *lp = netdev_priv(dev);
+ unsigned int bmsr, bmcr, lpa, mac_cfg;
+ unsigned int speed, duplex;
+
+ if (!mii_link_ok(&lp->mii)) { /* no link */
+ netif_carrier_off(dev);
+ if (!silent)
+ printk(KERN_INFO "%s: Link down.\n", dev->name);
+ return;
+ }
+
+ /* Link up, or auto-negotiation still in progress */
+ read_phy(lp->phy_address, MII_BMSR, &bmsr);
+ read_phy(lp->phy_address, MII_BMCR, &bmcr);
+ if (bmcr & BMCR_ANENABLE) { /* AutoNegotiation is enabled */
+ if (!(bmsr & BMSR_ANEGCOMPLETE))
+ return; /* Do nothing - another interrupt generated when negotiation complete */
+
+ read_phy(lp->phy_address, MII_LPA, &lpa);
+ if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) speed = SPEED_100;
+ else speed = SPEED_10;
+ if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) duplex = DUPLEX_FULL;
+ else duplex = DUPLEX_HALF;
+ } else {
+ speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
+ duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+ }
+
+ /* Update the MAC */
+ mac_cfg = at91_emac_read(AT91_EMAC_CFG) & ~(AT91_EMAC_SPD | AT91_EMAC_FD);
+ if (speed == SPEED_100) {
+ if (duplex == DUPLEX_FULL) /* 100 Full Duplex */
+ mac_cfg |= AT91_EMAC_SPD | AT91_EMAC_FD;
+ else /* 100 Half Duplex */
+ mac_cfg |= AT91_EMAC_SPD;
+ } else {
+ if (duplex == DUPLEX_FULL) /* 10 Full Duplex */
+ mac_cfg |= AT91_EMAC_FD;
+ else {} /* 10 Half Duplex */
+ }
+ at91_emac_write(AT91_EMAC_CFG, mac_cfg);
+
+ if (!silent)
+ printk(KERN_INFO "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
+ netif_carrier_on(dev);
+}
+
+/*
+ * Handle interrupts from the PHY
+ */
+static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct at91_private *lp = netdev_priv(dev);
+ unsigned int phy;
+
+ /*
+ * This hander is triggered on both edges, but the PHY chips expect
+ * level-triggering. We therefore have to check if the PHY actually has
+ * an IRQ pending.
+ */
+ enable_mdi();
+ if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
+ read_phy(lp->phy_address, MII_DSINTR_REG, &phy); /* ack interrupt in Davicom PHY */
+ if (!(phy & (1 << 0)))
+ goto done;
+ }
+ else if (lp->phy_type == MII_LXT971A_ID) {
+ read_phy(lp->phy_address, MII_ISINTS_REG, &phy); /* ack interrupt in Intel PHY */
+ if (!(phy & (1 << 2)))
+ goto done;
+ }
+ else if (lp->phy_type == MII_BCM5221_ID) {
+ read_phy(lp->phy_address, MII_BCMINTR_REG, &phy); /* ack interrupt in Broadcom PHY */
+ if (!(phy & (1 << 0)))
+ goto done;
+ }
+ else if (lp->phy_type == MII_KS8721_ID) {
+ read_phy(lp->phy_address, MII_TPISTATUS, &phy); /* ack interrupt in Micrel PHY */
+ if (!(phy & ((1 << 2) | 1)))
+ goto done;
+ }
+ else if (lp->phy_type == MII_T78Q21x3_ID) { /* ack interrupt in Teridian PHY */
+ read_phy(lp->phy_address, MII_T78Q21INT_REG, &phy);
+ if (!(phy & ((1 << 2) | 1)))
+ goto done;
+ }
+ else if (lp->phy_type == MII_DP83848_ID) {
+ read_phy(lp->phy_address, MII_DPPHYSTS_REG, &phy); /* ack interrupt in DP83848 PHY */
+ if (!(phy & (1 << 7)))
+ goto done;
+ }
+
+ update_linkspeed(dev, 0);
+
+done:
+ disable_mdi();
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Initialize and enable the PHY interrupt for link-state changes
+ */
+static void enable_phyirq(struct net_device *dev)
+{
+ struct at91_private *lp = netdev_priv(dev);
+ unsigned int dsintr, irq_number;
+ int status;
+
+ irq_number = lp->board_data.phy_irq_pin;
+ if (!irq_number) {
+ /*
+ * PHY doesn't have an IRQ pin (RTL8201, DP83847, AC101L),
+ * or board does not have it connected.
+ */
+ mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
+ return;
+ }
+
+ status = request_irq(irq_number, at91ether_phy_interrupt, 0, dev->name, dev);
+ if (status) {
+ printk(KERN_ERR "at91_ether: PHY IRQ %d request failed - status %d!\n", irq_number, status);
+ return;
+ }
+
+ spin_lock_irq(&lp->lock);
+ enable_mdi();
+
+ if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */
+ read_phy(lp->phy_address, MII_DSINTR_REG, &dsintr);
+ dsintr = dsintr & ~0xf00; /* clear bits 8..11 */
+ write_phy(lp->phy_address, MII_DSINTR_REG, dsintr);
+ }
+ else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */
+ read_phy(lp->phy_address, MII_ISINTE_REG, &dsintr);
+ dsintr = dsintr | 0xf2; /* set bits 1, 4..7 */
+ write_phy(lp->phy_address, MII_ISINTE_REG, dsintr);
+ }
+ else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */
+ dsintr = (1 << 15) | ( 1 << 14);
+ write_phy(lp->phy_address, MII_BCMINTR_REG, dsintr);
+ }
+ else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */
+ dsintr = (1 << 10) | ( 1 << 8);
+ write_phy(lp->phy_address, MII_TPISTATUS, dsintr);
+ }
+ else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */
+ read_phy(lp->phy_address, MII_T78Q21INT_REG, &dsintr);
+ dsintr = dsintr | 0x500; /* set bits 8, 10 */
+ write_phy(lp->phy_address, MII_T78Q21INT_REG, dsintr);
+ }
+ else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */
+ read_phy(lp->phy_address, MII_DPMISR_REG, &dsintr);
+ dsintr = dsintr | 0x3c; /* set bits 2..5 */
+ write_phy(lp->phy_address, MII_DPMISR_REG, dsintr);
+ read_phy(lp->phy_address, MII_DPMICR_REG, &dsintr);
+ dsintr = dsintr | 0x3; /* set bits 0,1 */
+ write_phy(lp->phy_address, MII_DPMICR_REG, dsintr);
+ }
+
+ disable_mdi();
+ spin_unlock_irq(&lp->lock);
+}
+
+/*
+ * Disable the PHY interrupt
+ */
+static void disable_phyirq(struct net_device *dev)
+{
+ struct at91_private *lp = netdev_priv(dev);
+ unsigned int dsintr;
+ unsigned int irq_number;
+
+ irq_number = lp->board_data.phy_irq_pin;
+ if (!irq_number) {
+ del_timer_sync(&lp->check_timer);
+ return;
+ }
+
+ spin_lock_irq(&lp->lock);
+ enable_mdi();
+
+ if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */
+ read_phy(lp->phy_address, MII_DSINTR_REG, &dsintr);
+ dsintr = dsintr | 0xf00; /* set bits 8..11 */
+ write_phy(lp->phy_address, MII_DSINTR_REG, dsintr);
+ }
+ else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */
+ read_phy(lp->phy_address, MII_ISINTE_REG, &dsintr);
+ dsintr = dsintr & ~0xf2; /* clear bits 1, 4..7 */
+ write_phy(lp->phy_address, MII_ISINTE_REG, dsintr);
+ }
+ else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */
+ read_phy(lp->phy_address, MII_BCMINTR_REG, &dsintr);
+ dsintr = ~(1 << 14);
+ write_phy(lp->phy_address, MII_BCMINTR_REG, dsintr);
+ }
+ else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */
+ read_phy(lp->phy_address, MII_TPISTATUS, &dsintr);
+ dsintr = ~((1 << 10) | (1 << 8));
+ write_phy(lp->phy_address, MII_TPISTATUS, dsintr);
+ }
+ else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */
+ read_phy(lp->phy_address, MII_T78Q21INT_REG, &dsintr);
+ dsintr = dsintr & ~0x500; /* clear bits 8, 10 */
+ write_phy(lp->phy_address, MII_T78Q21INT_REG, dsintr);
+ }
+ else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */
+ read_phy(lp->phy_address, MII_DPMICR_REG, &dsintr);
+ dsintr = dsintr & ~0x3; /* clear bits 0, 1 */
+ write_phy(lp->phy_address, MII_DPMICR_REG, dsintr);
+ read_phy(lp->phy_address, MII_DPMISR_REG, &dsintr);
+ dsintr = dsintr & ~0x3c; /* clear bits 2..5 */
+ write_phy(lp->phy_address, MII_DPMISR_REG, dsintr);
+ }
+
+ disable_mdi();
+ spin_unlock_irq(&lp->lock);
+
+ free_irq(irq_number, dev); /* Free interrupt handler */
+}
+
+/*
+ * Perform a software reset of the PHY.
+ */
+#if 0
+static void reset_phy(struct net_device *dev)
+{
+ struct at91_private *lp = netdev_priv(dev);
+ unsigned int bmcr;
+
+ spin_lock_irq(&lp->lock);
+ enable_mdi();
+
+ /* Perform PHY reset */
+ write_phy(lp->phy_address, MII_BMCR, BMCR_RESET);
+
+ /* Wait until PHY reset is complete */
+ do {
+ read_phy(lp->phy_address, MII_BMCR, &bmcr);
+ } while (!(bmcr & BMCR_RESET));
+
+ disable_mdi();
+ spin_unlock_irq(&lp->lock);
+}
+#endif
+
+static void at91ether_check_link(unsigned long dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct at91_private *lp = netdev_priv(dev);
+
+ enable_mdi();
+ update_linkspeed(dev, 1);
+ disable_mdi();
+
+ mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
+}
+
+/* ......................... ADDRESS MANAGEMENT ........................ */
+
+/*
+ * NOTE: Your bootloader must always set the MAC address correctly before
+ * booting into Linux.
+ *
+ * - It must always set the MAC address after reset, even if it doesn't
+ * happen to access the Ethernet while it's booting. Some versions of
+ * U-Boot on the AT91RM9200-DK do not do this.
+ *
+ * - Likewise it must store the addresses in the correct byte order.
+ * MicroMonitor (uMon) on the CSB337 does this incorrectly (and
+ * continues to do so, for bug-compatibility).
+ */
+
+static short __init unpack_mac_address(struct net_device *dev, unsigned int hi, unsigned int lo)
+{
+ char addr[6];
+
+ if (machine_is_csb337()) {
+ addr[5] = (lo & 0xff); /* The CSB337 bootloader stores the MAC the wrong-way around */
+ addr[4] = (lo & 0xff00) >> 8;
+ addr[3] = (lo & 0xff0000) >> 16;
+ addr[2] = (lo & 0xff000000) >> 24;
+ addr[1] = (hi & 0xff);
+ addr[0] = (hi & 0xff00) >> 8;
+ }
+ else {
+ addr[0] = (lo & 0xff);
+ addr[1] = (lo & 0xff00) >> 8;
+ addr[2] = (lo & 0xff0000) >> 16;
+ addr[3] = (lo & 0xff000000) >> 24;
+ addr[4] = (hi & 0xff);
+ addr[5] = (hi & 0xff00) >> 8;
+ }
+
+ if (is_valid_ether_addr(addr)) {
+ memcpy(dev->dev_addr, &addr, 6);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Set the ethernet MAC address in dev->dev_addr
+ */
+static void __init get_mac_address(struct net_device *dev)
+{
+ /* Check Specific-Address 1 */
+ if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA1H), at91_emac_read(AT91_EMAC_SA1L)))
+ return;
+ /* Check Specific-Address 2 */
+ if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA2H), at91_emac_read(AT91_EMAC_SA2L)))
+ return;
+ /* Check Specific-Address 3 */
+ if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA3H), at91_emac_read(AT91_EMAC_SA3L)))
+ return;
+ /* Check Specific-Address 4 */
+ if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA4H), at91_emac_read(AT91_EMAC_SA4L)))
+ return;
+
+ printk(KERN_ERR "at91_ether: Your bootloader did not configure a MAC address.\n");
+}
+
+/*
+ * Program the hardware MAC address from dev->dev_addr.
+ */
+static void update_mac_address(struct net_device *dev)
+{
+ at91_emac_write(AT91_EMAC_SA1L, (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | (dev->dev_addr[1] << 8) | (dev->dev_addr[0]));
+ at91_emac_write(AT91_EMAC_SA1H, (dev->dev_addr[5] << 8) | (dev->dev_addr[4]));
+
+ at91_emac_write(AT91_EMAC_SA2L, 0);
+ at91_emac_write(AT91_EMAC_SA2H, 0);
+}
+
+/*
+ * Store the new hardware address in dev->dev_addr, and update the MAC.
+ */
+static int set_mac_address(struct net_device *dev, void* addr)
+{
+ struct sockaddr *address = addr;
+
+ if (!is_valid_ether_addr(address->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
+ update_mac_address(dev);
+
+ printk("%s: Setting MAC address to %pM\n", dev->name,
+ dev->dev_addr);
+
+ return 0;
+}
+
+static int inline hash_bit_value(int bitnr, __u8 *addr)
+{
+ if (addr[bitnr / 8] & (1 << (bitnr % 8)))
+ return 1;
+ return 0;
+}
+
+/*
+ * The hash address register is 64 bits long and takes up two locations in the memory map.
+ * The least significant bits are stored in EMAC_HSL and the most significant
+ * bits in EMAC_HSH.
+ *
+ * The unicast hash enable and the multicast hash enable bits in the network configuration
+ * register enable the reception of hash matched frames. The destination address is
+ * reduced to a 6 bit index into the 64 bit hash register using the following hash function.
+ * The hash function is an exclusive or of every sixth bit of the destination address.
+ * hash_index[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
+ * hash_index[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
+ * hash_index[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
+ * hash_index[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
+ * hash_index[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
+ * hash_index[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
+ * da[0] represents the least significant bit of the first byte received, that is, the multicast/
+ * unicast indicator, and da[47] represents the most significant bit of the last byte
+ * received.
+ * If the hash index points to a bit that is set in the hash register then the frame will be
+ * matched according to whether the frame is multicast or unicast.
+ * A multicast match will be signalled if the multicast hash enable bit is set, da[0] is 1 and
+ * the hash index points to a bit set in the hash register.
+ * A unicast match will be signalled if the unicast hash enable bit is set, da[0] is 0 and the
+ * hash index points to a bit set in the hash register.
+ * To receive all multicast frames, the hash register should be set with all ones and the
+ * multicast hash enable bit should be set in the network configuration register.
+ */
+
+/*
+ * Return the hash index value for the specified address.
+ */
+static int hash_get_index(__u8 *addr)
+{
+ int i, j, bitval;
+ int hash_index = 0;
+
+ for (j = 0; j < 6; j++) {
+ for (i = 0, bitval = 0; i < 8; i++)
+ bitval ^= hash_bit_value(i*6 + j, addr);
+
+ hash_index |= (bitval << j);
+ }
+
+ return hash_index;
+}
+
+/*
+ * Add multicast addresses to the internal multicast-hash table.
+ */
+static void at91ether_sethashtable(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ unsigned long mc_filter[2];
+ unsigned int bitnr;
+
+ mc_filter[0] = mc_filter[1] = 0;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ bitnr = hash_get_index(ha->addr);
+ mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
+ }
+
+ at91_emac_write(AT91_EMAC_HSL, mc_filter[0]);
+ at91_emac_write(AT91_EMAC_HSH, mc_filter[1]);
+}
+
+/*
+ * Enable/Disable promiscuous and multicast modes.
+ */
+static void at91ether_set_multicast_list(struct net_device *dev)
+{
+ unsigned long cfg;
+
+ cfg = at91_emac_read(AT91_EMAC_CFG);
+
+ if (dev->flags & IFF_PROMISC) /* Enable promiscuous mode */
+ cfg |= AT91_EMAC_CAF;
+ else if (dev->flags & (~IFF_PROMISC)) /* Disable promiscuous mode */
+ cfg &= ~AT91_EMAC_CAF;
+
+ if (dev->flags & IFF_ALLMULTI) { /* Enable all multicast mode */
+ at91_emac_write(AT91_EMAC_HSH, -1);
+ at91_emac_write(AT91_EMAC_HSL, -1);
+ cfg |= AT91_EMAC_MTI;
+ } else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */
+ at91ether_sethashtable(dev);
+ cfg |= AT91_EMAC_MTI;
+ } else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */
+ at91_emac_write(AT91_EMAC_HSH, 0);
+ at91_emac_write(AT91_EMAC_HSL, 0);
+ cfg &= ~AT91_EMAC_MTI;
+ }
+
+ at91_emac_write(AT91_EMAC_CFG, cfg);
+}
+
+/* ......................... ETHTOOL SUPPORT ........................... */
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ unsigned int value;
+
+ read_phy(phy_id, location, &value);
+ return value;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ write_phy(phy_id, location, value);
+}
+
+static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct at91_private *lp = netdev_priv(dev);
+ int ret;
+
+ spin_lock_irq(&lp->lock);
+ enable_mdi();
+
+ ret = mii_ethtool_gset(&lp->mii, cmd);
+
+ disable_mdi();
+ spin_unlock_irq(&lp->lock);
+
+ if (lp->phy_media == PORT_FIBRE) { /* override media type since mii.c doesn't know */
+ cmd->supported = SUPPORTED_FIBRE;
+ cmd->port = PORT_FIBRE;
+ }
+
+ return ret;
+}
+
+static int at91ether_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct at91_private *lp = netdev_priv(dev);
+ int ret;
+
+ spin_lock_irq(&lp->lock);
+ enable_mdi();
+
+ ret = mii_ethtool_sset(&lp->mii, cmd);
+
+ disable_mdi();
+ spin_unlock_irq(&lp->lock);
+
+ return ret;
+}
+
+static int at91ether_nwayreset(struct net_device *dev)
+{
+ struct at91_private *lp = netdev_priv(dev);
+ int ret;
+
+ spin_lock_irq(&lp->lock);
+ enable_mdi();
+
+ ret = mii_nway_restart(&lp->mii);
+
+ disable_mdi();
+ spin_unlock_irq(&lp->lock);
+
+ return ret;
+}
+
+static void at91ether_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops at91ether_ethtool_ops = {
+ .get_settings = at91ether_get_settings,
+ .set_settings = at91ether_set_settings,
+ .get_drvinfo = at91ether_get_drvinfo,
+ .nway_reset = at91ether_nwayreset,
+ .get_link = ethtool_op_get_link,
+};
+
+static int at91ether_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct at91_private *lp = netdev_priv(dev);
+ int res;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irq(&lp->lock);
+ enable_mdi();
+ res = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL);
+ disable_mdi();
+ spin_unlock_irq(&lp->lock);
+
+ return res;
+}
+
+/* ................................ MAC ................................ */
+
+/*
+ * Initialize and start the Receiver and Transmit subsystems
+ */
+static void at91ether_start(struct net_device *dev)
+{
+ struct at91_private *lp = netdev_priv(dev);
+ struct recv_desc_bufs *dlist, *dlist_phys;
+ int i;
+ unsigned long ctl;
+
+ dlist = lp->dlist;
+ dlist_phys = lp->dlist_phys;
+
+ for (i = 0; i < MAX_RX_DESCR; i++) {
+ dlist->descriptors[i].addr = (unsigned int) &dlist_phys->recv_buf[i][0];
+ dlist->descriptors[i].size = 0;
+ }
+
+ /* Set the Wrap bit on the last descriptor */
+ dlist->descriptors[i-1].addr |= EMAC_DESC_WRAP;
+
+ /* Reset buffer index */
+ lp->rxBuffIndex = 0;
+
+ /* Program address of descriptor list in Rx Buffer Queue register */
+ at91_emac_write(AT91_EMAC_RBQP, (unsigned long) dlist_phys);
+
+ /* Enable Receive and Transmit */
+ ctl = at91_emac_read(AT91_EMAC_CTL);
+ at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_RE | AT91_EMAC_TE);
+}
+
+/*
+ * Open the ethernet interface
+ */
+static int at91ether_open(struct net_device *dev)
+{
+ struct at91_private *lp = netdev_priv(dev);
+ unsigned long ctl;
+
+ if (!is_valid_ether_addr(dev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ clk_enable(lp->ether_clk); /* Re-enable Peripheral clock */
+
+ /* Clear internal statistics */
+ ctl = at91_emac_read(AT91_EMAC_CTL);
+ at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_CSR);
+
+ /* Update the MAC address (incase user has changed it) */
+ update_mac_address(dev);
+
+ /* Enable PHY interrupt */
+ enable_phyirq(dev);
+
+ /* Enable MAC interrupts */
+ at91_emac_write(AT91_EMAC_IER, AT91_EMAC_RCOM | AT91_EMAC_RBNA
+ | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM
+ | AT91_EMAC_ROVR | AT91_EMAC_ABT);
+
+ /* Determine current link speed */
+ spin_lock_irq(&lp->lock);
+ enable_mdi();
+ update_linkspeed(dev, 0);
+ disable_mdi();
+ spin_unlock_irq(&lp->lock);
+
+ at91ether_start(dev);
+ netif_start_queue(dev);
+ return 0;
+}
+
+/*
+ * Close the interface
+ */
+static int at91ether_close(struct net_device *dev)
+{
+ struct at91_private *lp = netdev_priv(dev);
+ unsigned long ctl;
+
+ /* Disable Receiver and Transmitter */
+ ctl = at91_emac_read(AT91_EMAC_CTL);
+ at91_emac_write(AT91_EMAC_CTL, ctl & ~(AT91_EMAC_TE | AT91_EMAC_RE));
+
+ /* Disable PHY interrupt */
+ disable_phyirq(dev);
+
+ /* Disable MAC interrupts */
+ at91_emac_write(AT91_EMAC_IDR, AT91_EMAC_RCOM | AT91_EMAC_RBNA
+ | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM
+ | AT91_EMAC_ROVR | AT91_EMAC_ABT);
+
+ netif_stop_queue(dev);
+
+ clk_disable(lp->ether_clk); /* Disable Peripheral clock */
+
+ return 0;
+}
+
+/*
+ * Transmit packet.
+ */
+static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct at91_private *lp = netdev_priv(dev);
+
+ if (at91_emac_read(AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ) {
+ netif_stop_queue(dev);
+
+ /* Store packet information (to free when Tx completed) */
+ lp->skb = skb;
+ lp->skb_length = skb->len;
+ lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE);
+ dev->stats.tx_bytes += skb->len;
+
+ /* Set address of the data in the Transmit Address register */
+ at91_emac_write(AT91_EMAC_TAR, lp->skb_physaddr);
+ /* Set length of the packet in the Transmit Control register */
+ at91_emac_write(AT91_EMAC_TCR, skb->len);
+
+ } else {
+ printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n");
+ return NETDEV_TX_BUSY; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb)
+ on this skb, he also reports -ENETDOWN and printk's, so either
+ we free and return(0) or don't free and return 1 */
+ }
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Update the current statistics from the internal statistics registers.
+ */
+static struct net_device_stats *at91ether_stats(struct net_device *dev)
+{
+ int ale, lenerr, seqe, lcol, ecol;
+
+ if (netif_running(dev)) {
+ dev->stats.rx_packets += at91_emac_read(AT91_EMAC_OK); /* Good frames received */
+ ale = at91_emac_read(AT91_EMAC_ALE);
+ dev->stats.rx_frame_errors += ale; /* Alignment errors */
+ lenerr = at91_emac_read(AT91_EMAC_ELR) + at91_emac_read(AT91_EMAC_USF);
+ dev->stats.rx_length_errors += lenerr; /* Excessive Length or Undersize Frame error */
+ seqe = at91_emac_read(AT91_EMAC_SEQE);
+ dev->stats.rx_crc_errors += seqe; /* CRC error */
+ dev->stats.rx_fifo_errors += at91_emac_read(AT91_EMAC_DRFC); /* Receive buffer not available */
+ dev->stats.rx_errors += (ale + lenerr + seqe
+ + at91_emac_read(AT91_EMAC_CDE) + at91_emac_read(AT91_EMAC_RJB));
+
+ dev->stats.tx_packets += at91_emac_read(AT91_EMAC_FRA); /* Frames successfully transmitted */
+ dev->stats.tx_fifo_errors += at91_emac_read(AT91_EMAC_TUE); /* Transmit FIFO underruns */
+ dev->stats.tx_carrier_errors += at91_emac_read(AT91_EMAC_CSE); /* Carrier Sense errors */
+ dev->stats.tx_heartbeat_errors += at91_emac_read(AT91_EMAC_SQEE);/* Heartbeat error */
+
+ lcol = at91_emac_read(AT91_EMAC_LCOL);
+ ecol = at91_emac_read(AT91_EMAC_ECOL);
+ dev->stats.tx_window_errors += lcol; /* Late collisions */
+ dev->stats.tx_aborted_errors += ecol; /* 16 collisions */
+
+ dev->stats.collisions += (at91_emac_read(AT91_EMAC_SCOL) + at91_emac_read(AT91_EMAC_MCOL) + lcol + ecol);
+ }
+ return &dev->stats;
+}
+
+/*
+ * Extract received frame from buffer descriptors and sent to upper layers.
+ * (Called from interrupt context)
+ */
+static void at91ether_rx(struct net_device *dev)
+{
+ struct at91_private *lp = netdev_priv(dev);
+ struct recv_desc_bufs *dlist;
+ unsigned char *p_recv;
+ struct sk_buff *skb;
+ unsigned int pktlen;
+
+ dlist = lp->dlist;
+ while (dlist->descriptors[lp->rxBuffIndex].addr & EMAC_DESC_DONE) {
+ p_recv = dlist->recv_buf[lp->rxBuffIndex];
+ pktlen = dlist->descriptors[lp->rxBuffIndex].size & 0x7ff; /* Length of frame including FCS */
+ skb = dev_alloc_skb(pktlen + 2);
+ if (skb != NULL) {
+ skb_reserve(skb, 2);
+ memcpy(skb_put(skb, pktlen), p_recv, pktlen);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ dev->stats.rx_bytes += pktlen;
+ netif_rx(skb);
+ }
+ else {
+ dev->stats.rx_dropped += 1;
+ printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
+ }
+
+ if (dlist->descriptors[lp->rxBuffIndex].size & EMAC_MULTICAST)
+ dev->stats.multicast++;
+
+ dlist->descriptors[lp->rxBuffIndex].addr &= ~EMAC_DESC_DONE; /* reset ownership bit */
+ if (lp->rxBuffIndex == MAX_RX_DESCR-1) /* wrap after last buffer */
+ lp->rxBuffIndex = 0;
+ else
+ lp->rxBuffIndex++;
+ }
+}
+
+/*
+ * MAC interrupt handler
+ */
+static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct at91_private *lp = netdev_priv(dev);
+ unsigned long intstatus, ctl;
+
+ /* MAC Interrupt Status register indicates what interrupts are pending.
+ It is automatically cleared once read. */
+ intstatus = at91_emac_read(AT91_EMAC_ISR);
+
+ if (intstatus & AT91_EMAC_RCOM) /* Receive complete */
+ at91ether_rx(dev);
+
+ if (intstatus & AT91_EMAC_TCOM) { /* Transmit complete */
+ /* The TCOM bit is set even if the transmission failed. */
+ if (intstatus & (AT91_EMAC_TUND | AT91_EMAC_RTRY))
+ dev->stats.tx_errors += 1;
+
+ if (lp->skb) {
+ dev_kfree_skb_irq(lp->skb);
+ lp->skb = NULL;
+ dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE);
+ }
+ netif_wake_queue(dev);
+ }
+
+ /* Work-around for Errata #11 */
+ if (intstatus & AT91_EMAC_RBNA) {
+ ctl = at91_emac_read(AT91_EMAC_CTL);
+ at91_emac_write(AT91_EMAC_CTL, ctl & ~AT91_EMAC_RE);
+ at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_RE);
+ }
+
+ if (intstatus & AT91_EMAC_ROVR)
+ printk("%s: ROVR error\n", dev->name);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void at91ether_poll_controller(struct net_device *dev)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ at91ether_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+}
+#endif
+
+static const struct net_device_ops at91ether_netdev_ops = {
+ .ndo_open = at91ether_open,
+ .ndo_stop = at91ether_close,
+ .ndo_start_xmit = at91ether_start_xmit,
+ .ndo_get_stats = at91ether_stats,
+ .ndo_set_rx_mode = at91ether_set_multicast_list,
+ .ndo_set_mac_address = set_mac_address,
+ .ndo_do_ioctl = at91ether_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = at91ether_poll_controller,
+#endif
+};
+
+/*
+ * Initialize the ethernet interface
+ */
+static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_address,
+ struct platform_device *pdev, struct clk *ether_clk)
+{
+ struct at91_eth_data *board_data = pdev->dev.platform_data;
+ struct net_device *dev;
+ struct at91_private *lp;
+ unsigned int val;
+ int res;
+
+ dev = alloc_etherdev(sizeof(struct at91_private));
+ if (!dev)
+ return -ENOMEM;
+
+ dev->base_addr = AT91_VA_BASE_EMAC;
+ dev->irq = AT91RM9200_ID_EMAC;
+
+ /* Install the interrupt handler */
+ if (request_irq(dev->irq, at91ether_interrupt, 0, dev->name, dev)) {
+ free_netdev(dev);
+ return -EBUSY;
+ }
+
+ /* Allocate memory for DMA Receive descriptors */
+ lp = netdev_priv(dev);
+ lp->dlist = (struct recv_desc_bufs *) dma_alloc_coherent(NULL, sizeof(struct recv_desc_bufs), (dma_addr_t *) &lp->dlist_phys, GFP_KERNEL);
+ if (lp->dlist == NULL) {
+ free_irq(dev->irq, dev);
+ free_netdev(dev);
+ return -ENOMEM;
+ }
+ lp->board_data = *board_data;
+ lp->ether_clk = ether_clk;
+ platform_set_drvdata(pdev, dev);
+
+ spin_lock_init(&lp->lock);
+
+ ether_setup(dev);
+ dev->netdev_ops = &at91ether_netdev_ops;
+ dev->ethtool_ops = &at91ether_ethtool_ops;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ get_mac_address(dev); /* Get ethernet address and store it in dev->dev_addr */
+ update_mac_address(dev); /* Program ethernet address into MAC */
+
+ at91_emac_write(AT91_EMAC_CTL, 0);
+
+ if (lp->board_data.is_rmii)
+ at91_emac_write(AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG | AT91_EMAC_RMII);
+ else
+ at91_emac_write(AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG);
+
+ /* Perform PHY-specific initialization */
+ spin_lock_irq(&lp->lock);
+ enable_mdi();
+ if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
+ read_phy(phy_address, MII_DSCR_REG, &val);
+ if ((val & (1 << 10)) == 0) /* DSCR bit 10 is 0 -- fiber mode */
+ lp->phy_media = PORT_FIBRE;
+ } else if (machine_is_csb337()) {
+ /* mix link activity status into LED2 link state */
+ write_phy(phy_address, MII_LEDCTRL_REG, 0x0d22);
+ } else if (machine_is_ecbat91())
+ write_phy(phy_address, MII_LEDCTRL_REG, 0x156A);
+
+ disable_mdi();
+ spin_unlock_irq(&lp->lock);
+
+ lp->mii.dev = dev; /* Support for ethtool */
+ lp->mii.mdio_read = mdio_read;
+ lp->mii.mdio_write = mdio_write;
+ lp->mii.phy_id = phy_address;
+ lp->mii.phy_id_mask = 0x1f;
+ lp->mii.reg_num_mask = 0x1f;
+
+ lp->phy_type = phy_type; /* Type of PHY connected */
+ lp->phy_address = phy_address; /* MDI address of PHY */
+
+ /* Register the network interface */
+ res = register_netdev(dev);
+ if (res) {
+ free_irq(dev->irq, dev);
+ free_netdev(dev);
+ dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
+ return res;
+ }
+
+ /* Determine current link speed */
+ spin_lock_irq(&lp->lock);
+ enable_mdi();
+ update_linkspeed(dev, 0);
+ disable_mdi();
+ spin_unlock_irq(&lp->lock);
+ netif_carrier_off(dev); /* will be enabled in open() */
+
+ /* If board has no PHY IRQ, use a timer to poll the PHY */
+ if (!lp->board_data.phy_irq_pin) {
+ init_timer(&lp->check_timer);
+ lp->check_timer.data = (unsigned long)dev;
+ lp->check_timer.function = at91ether_check_link;
+ } else if (lp->board_data.phy_irq_pin >= 32)
+ gpio_request(lp->board_data.phy_irq_pin, "ethernet_phy");
+
+ /* Display ethernet banner */
+ printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%pM)\n",
+ dev->name, (uint) dev->base_addr, dev->irq,
+ at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-",
+ at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex",
+ dev->dev_addr);
+ if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID))
+ printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)");
+ else if (phy_type == MII_LXT971A_ID)
+ printk(KERN_INFO "%s: Intel LXT971A PHY\n", dev->name);
+ else if (phy_type == MII_RTL8201_ID)
+ printk(KERN_INFO "%s: Realtek RTL8201(B)L PHY\n", dev->name);
+ else if (phy_type == MII_BCM5221_ID)
+ printk(KERN_INFO "%s: Broadcom BCM5221 PHY\n", dev->name);
+ else if (phy_type == MII_DP83847_ID)
+ printk(KERN_INFO "%s: National Semiconductor DP83847 PHY\n", dev->name);
+ else if (phy_type == MII_DP83848_ID)
+ printk(KERN_INFO "%s: National Semiconductor DP83848 PHY\n", dev->name);
+ else if (phy_type == MII_AC101L_ID)
+ printk(KERN_INFO "%s: Altima AC101L PHY\n", dev->name);
+ else if (phy_type == MII_KS8721_ID)
+ printk(KERN_INFO "%s: Micrel KS8721 PHY\n", dev->name);
+ else if (phy_type == MII_T78Q21x3_ID)
+ printk(KERN_INFO "%s: Teridian 78Q21x3 PHY\n", dev->name);
+ else if (phy_type == MII_LAN83C185_ID)
+ printk(KERN_INFO "%s: SMSC LAN83C185 PHY\n", dev->name);
+
+ return 0;
+}
+
+/*
+ * Detect MAC and PHY and perform initialization
+ */
+static int __init at91ether_probe(struct platform_device *pdev)
+{
+ unsigned int phyid1, phyid2;
+ int detected = -1;
+ unsigned long phy_id;
+ unsigned short phy_address = 0;
+ struct clk *ether_clk;
+
+ ether_clk = clk_get(&pdev->dev, "ether_clk");
+ if (IS_ERR(ether_clk)) {
+ printk(KERN_ERR "at91_ether: no clock defined\n");
+ return -ENODEV;
+ }
+ clk_enable(ether_clk); /* Enable Peripheral clock */
+
+ while ((detected != 0) && (phy_address < 32)) {
+ /* Read the PHY ID registers */
+ enable_mdi();
+ read_phy(phy_address, MII_PHYSID1, &phyid1);
+ read_phy(phy_address, MII_PHYSID2, &phyid2);
+ disable_mdi();
+
+ phy_id = (phyid1 << 16) | (phyid2 & 0xfff0);
+ switch (phy_id) {
+ case MII_DM9161_ID: /* Davicom 9161: PHY_ID1 = 0x181, PHY_ID2 = B881 */
+ case MII_DM9161A_ID: /* Davicom 9161A: PHY_ID1 = 0x181, PHY_ID2 = B8A0 */
+ case MII_LXT971A_ID: /* Intel LXT971A: PHY_ID1 = 0x13, PHY_ID2 = 78E0 */
+ case MII_RTL8201_ID: /* Realtek RTL8201: PHY_ID1 = 0, PHY_ID2 = 0x8201 */
+ case MII_BCM5221_ID: /* Broadcom BCM5221: PHY_ID1 = 0x40, PHY_ID2 = 0x61e0 */
+ case MII_DP83847_ID: /* National Semiconductor DP83847: */
+ case MII_DP83848_ID: /* National Semiconductor DP83848: */
+ case MII_AC101L_ID: /* Altima AC101L: PHY_ID1 = 0x22, PHY_ID2 = 0x5520 */
+ case MII_KS8721_ID: /* Micrel KS8721: PHY_ID1 = 0x22, PHY_ID2 = 0x1610 */
+ case MII_T78Q21x3_ID: /* Teridian 78Q21x3: PHY_ID1 = 0x0E, PHY_ID2 = 7237 */
+ case MII_LAN83C185_ID: /* SMSC LAN83C185: PHY_ID1 = 0x0007, PHY_ID2 = 0xC0A1 */
+ detected = at91ether_setup(phy_id, phy_address, pdev, ether_clk);
+ break;
+ }
+
+ phy_address++;
+ }
+
+ clk_disable(ether_clk); /* Disable Peripheral clock */
+
+ return detected;
+}
+
+static int __devexit at91ether_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct at91_private *lp = netdev_priv(dev);
+
+ if (lp->board_data.phy_irq_pin >= 32)
+ gpio_free(lp->board_data.phy_irq_pin);
+
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+ dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
+ clk_put(lp->ether_clk);
+
+ platform_set_drvdata(pdev, NULL);
+ free_netdev(dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ struct net_device *net_dev = platform_get_drvdata(pdev);
+ struct at91_private *lp = netdev_priv(net_dev);
+ int phy_irq = lp->board_data.phy_irq_pin;
+
+ if (netif_running(net_dev)) {
+ if (phy_irq)
+ disable_irq(phy_irq);
+
+ netif_stop_queue(net_dev);
+ netif_device_detach(net_dev);
+
+ clk_disable(lp->ether_clk);
+ }
+ return 0;
+}
+
+static int at91ether_resume(struct platform_device *pdev)
+{
+ struct net_device *net_dev = platform_get_drvdata(pdev);
+ struct at91_private *lp = netdev_priv(net_dev);
+ int phy_irq = lp->board_data.phy_irq_pin;
+
+ if (netif_running(net_dev)) {
+ clk_enable(lp->ether_clk);
+
+ netif_device_attach(net_dev);
+ netif_start_queue(net_dev);
+
+ if (phy_irq)
+ enable_irq(phy_irq);
+ }
+ return 0;
+}
+
+#else
+#define at91ether_suspend NULL
+#define at91ether_resume NULL
+#endif
+
+static struct platform_driver at91ether_driver = {
+ .remove = __devexit_p(at91ether_remove),
+ .suspend = at91ether_suspend,
+ .resume = at91ether_resume,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init at91ether_init(void)
+{
+ return platform_driver_probe(&at91ether_driver, at91ether_probe);
+}
+
+static void __exit at91ether_exit(void)
+{
+ platform_driver_unregister(&at91ether_driver);
+}
+
+module_init(at91ether_init)
+module_exit(at91ether_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
+MODULE_AUTHOR("Andrew Victor");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/net/ethernet/cadence/at91_ether.h b/drivers/net/ethernet/cadence/at91_ether.h
new file mode 100644
index 000000000000..353f4dab62be
--- /dev/null
+++ b/drivers/net/ethernet/cadence/at91_ether.h
@@ -0,0 +1,109 @@
+/*
+ * Ethernet driver for the Atmel AT91RM9200 (Thunder)
+ *
+ * Copyright (C) SAN People (Pty) Ltd
+ *
+ * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
+ * Initial version by Rick Bronson.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef AT91_ETHERNET
+#define AT91_ETHERNET
+
+
+/* Davicom 9161 PHY */
+#define MII_DM9161_ID 0x0181b880
+#define MII_DM9161A_ID 0x0181b8a0
+#define MII_DSCR_REG 16
+#define MII_DSCSR_REG 17
+#define MII_DSINTR_REG 21
+
+/* Intel LXT971A PHY */
+#define MII_LXT971A_ID 0x001378E0
+#define MII_ISINTE_REG 18
+#define MII_ISINTS_REG 19
+#define MII_LEDCTRL_REG 20
+
+/* Realtek RTL8201 PHY */
+#define MII_RTL8201_ID 0x00008200
+
+/* Broadcom BCM5221 PHY */
+#define MII_BCM5221_ID 0x004061e0
+#define MII_BCMINTR_REG 26
+
+/* National Semiconductor DP83847 */
+#define MII_DP83847_ID 0x20005c30
+
+/* National Semiconductor DP83848 */
+#define MII_DP83848_ID 0x20005c90
+#define MII_DPPHYSTS_REG 16
+#define MII_DPMICR_REG 17
+#define MII_DPMISR_REG 18
+
+/* Altima AC101L PHY */
+#define MII_AC101L_ID 0x00225520
+
+/* Micrel KS8721 PHY */
+#define MII_KS8721_ID 0x00221610
+
+/* Teridian 78Q2123/78Q2133 */
+#define MII_T78Q21x3_ID 0x000e7230
+#define MII_T78Q21INT_REG 17
+
+/* SMSC LAN83C185 */
+#define MII_LAN83C185_ID 0x0007C0A0
+
+/* ........................................................................ */
+
+#define MAX_RBUFF_SZ 0x600 /* 1518 rounded up */
+#define MAX_RX_DESCR 9 /* max number of receive buffers */
+
+#define EMAC_DESC_DONE 0x00000001 /* bit for if DMA is done */
+#define EMAC_DESC_WRAP 0x00000002 /* bit for wrap */
+
+#define EMAC_BROADCAST 0x80000000 /* broadcast address */
+#define EMAC_MULTICAST 0x40000000 /* multicast address */
+#define EMAC_UNICAST 0x20000000 /* unicast address */
+
+struct rbf_t
+{
+ unsigned int addr;
+ unsigned long size;
+};
+
+struct recv_desc_bufs
+{
+ struct rbf_t descriptors[MAX_RX_DESCR]; /* must be on sizeof (rbf_t) boundary */
+ char recv_buf[MAX_RX_DESCR][MAX_RBUFF_SZ]; /* must be on long boundary */
+};
+
+struct at91_private
+{
+ struct mii_if_info mii; /* ethtool support */
+ struct at91_eth_data board_data; /* board-specific configuration */
+ struct clk *ether_clk; /* clock */
+
+ /* PHY */
+ unsigned long phy_type; /* type of PHY (PHY_ID) */
+ spinlock_t lock; /* lock for MDI interface */
+ short phy_media; /* media interface type */
+ unsigned short phy_address; /* 5-bit MDI address of PHY (0..31) */
+ struct timer_list check_timer; /* Poll link status */
+
+ /* Transmit */
+ struct sk_buff *skb; /* holds skb until xmit interrupt completes */
+ dma_addr_t skb_physaddr; /* phys addr from pci_map_single */
+ int skb_length; /* saved skb length for pci_unmap_single */
+
+ /* Receive */
+ int rxBuffIndex; /* index into receive descriptor list */
+ struct recv_desc_bufs *dlist; /* descriptor list address */
+ struct recv_desc_bufs *dlist_phys; /* descriptor list physical address */
+};
+
+#endif
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
new file mode 100644
index 000000000000..a437b46e5490
--- /dev/null
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -0,0 +1,1366 @@
+/*
+ * Atmel MACB Ethernet Controller driver
+ *
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+
+#include <mach/board.h>
+#include <mach/cpu.h>
+
+#include "macb.h"
+
+#define RX_BUFFER_SIZE 128
+#define RX_RING_SIZE 512
+#define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE)
+
+/* Make the IP header word-aligned (the ethernet header is 14 bytes) */
+#define RX_OFFSET 2
+
+#define TX_RING_SIZE 128
+#define DEF_TX_RING_PENDING (TX_RING_SIZE - 1)
+#define TX_RING_BYTES (sizeof(struct dma_desc) * TX_RING_SIZE)
+
+#define TX_RING_GAP(bp) \
+ (TX_RING_SIZE - (bp)->tx_pending)
+#define TX_BUFFS_AVAIL(bp) \
+ (((bp)->tx_tail <= (bp)->tx_head) ? \
+ (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head : \
+ (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp))
+#define NEXT_TX(n) (((n) + 1) & (TX_RING_SIZE - 1))
+
+#define NEXT_RX(n) (((n) + 1) & (RX_RING_SIZE - 1))
+
+/* minimum number of free TX descriptors before waking up TX process */
+#define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4)
+
+#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
+ | MACB_BIT(ISR_ROVR))
+
+static void __macb_set_hwaddr(struct macb *bp)
+{
+ u32 bottom;
+ u16 top;
+
+ bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
+ macb_writel(bp, SA1B, bottom);
+ top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
+ macb_writel(bp, SA1T, top);
+}
+
+static void __init macb_get_hwaddr(struct macb *bp)
+{
+ u32 bottom;
+ u16 top;
+ u8 addr[6];
+
+ bottom = macb_readl(bp, SA1B);
+ top = macb_readl(bp, SA1T);
+
+ addr[0] = bottom & 0xff;
+ addr[1] = (bottom >> 8) & 0xff;
+ addr[2] = (bottom >> 16) & 0xff;
+ addr[3] = (bottom >> 24) & 0xff;
+ addr[4] = top & 0xff;
+ addr[5] = (top >> 8) & 0xff;
+
+ if (is_valid_ether_addr(addr)) {
+ memcpy(bp->dev->dev_addr, addr, sizeof(addr));
+ } else {
+ dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
+ random_ether_addr(bp->dev->dev_addr);
+ }
+}
+
+static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct macb *bp = bus->priv;
+ int value;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
+ | MACB_BF(RW, MACB_MAN_READ)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, regnum)
+ | MACB_BF(CODE, MACB_MAN_CODE)));
+
+ /* wait for end of transfer */
+ while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
+ cpu_relax();
+
+ value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
+
+ return value;
+}
+
+static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ struct macb *bp = bus->priv;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
+ | MACB_BF(RW, MACB_MAN_WRITE)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, regnum)
+ | MACB_BF(CODE, MACB_MAN_CODE)
+ | MACB_BF(DATA, value)));
+
+ /* wait for end of transfer */
+ while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
+ cpu_relax();
+
+ return 0;
+}
+
+static int macb_mdio_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+static void macb_handle_link_change(struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct phy_device *phydev = bp->phy_dev;
+ unsigned long flags;
+
+ int status_change = 0;
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ if (phydev->link) {
+ if ((bp->speed != phydev->speed) ||
+ (bp->duplex != phydev->duplex)) {
+ u32 reg;
+
+ reg = macb_readl(bp, NCFGR);
+ reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
+
+ if (phydev->duplex)
+ reg |= MACB_BIT(FD);
+ if (phydev->speed == SPEED_100)
+ reg |= MACB_BIT(SPD);
+
+ macb_writel(bp, NCFGR, reg);
+
+ bp->speed = phydev->speed;
+ bp->duplex = phydev->duplex;
+ status_change = 1;
+ }
+ }
+
+ if (phydev->link != bp->link) {
+ if (!phydev->link) {
+ bp->speed = 0;
+ bp->duplex = -1;
+ }
+ bp->link = phydev->link;
+
+ status_change = 1;
+ }
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ if (status_change) {
+ if (phydev->link)
+ printk(KERN_INFO "%s: link up (%d/%s)\n",
+ dev->name, phydev->speed,
+ DUPLEX_FULL == phydev->duplex ? "Full":"Half");
+ else
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ }
+}
+
+/* based on au1000_eth. c*/
+static int macb_mii_probe(struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct phy_device *phydev;
+ struct eth_platform_data *pdata;
+ int ret;
+
+ phydev = phy_find_first(bp->mii_bus);
+ if (!phydev) {
+ printk (KERN_ERR "%s: no PHY found\n", dev->name);
+ return -1;
+ }
+
+ pdata = bp->pdev->dev.platform_data;
+ /* TODO : add pin_irq */
+
+ /* attach the mac to the phy */
+ ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0,
+ pdata && pdata->is_rmii ?
+ PHY_INTERFACE_MODE_RMII :
+ PHY_INTERFACE_MODE_MII);
+ if (ret) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ return ret;
+ }
+
+ /* mask with MAC supported features */
+ phydev->supported &= PHY_BASIC_FEATURES;
+
+ phydev->advertising = phydev->supported;
+
+ bp->link = 0;
+ bp->speed = 0;
+ bp->duplex = -1;
+ bp->phy_dev = phydev;
+
+ return 0;
+}
+
+static int macb_mii_init(struct macb *bp)
+{
+ struct eth_platform_data *pdata;
+ int err = -ENXIO, i;
+
+ /* Enable management port */
+ macb_writel(bp, NCR, MACB_BIT(MPE));
+
+ bp->mii_bus = mdiobus_alloc();
+ if (bp->mii_bus == NULL) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ bp->mii_bus->name = "MACB_mii_bus";
+ bp->mii_bus->read = &macb_mdio_read;
+ bp->mii_bus->write = &macb_mdio_write;
+ bp->mii_bus->reset = &macb_mdio_reset;
+ snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", bp->pdev->id);
+ bp->mii_bus->priv = bp;
+ bp->mii_bus->parent = &bp->dev->dev;
+ pdata = bp->pdev->dev.platform_data;
+
+ if (pdata)
+ bp->mii_bus->phy_mask = pdata->phy_mask;
+
+ bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ if (!bp->mii_bus->irq) {
+ err = -ENOMEM;
+ goto err_out_free_mdiobus;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ bp->mii_bus->irq[i] = PHY_POLL;
+
+ dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
+
+ if (mdiobus_register(bp->mii_bus))
+ goto err_out_free_mdio_irq;
+
+ if (macb_mii_probe(bp->dev) != 0) {
+ goto err_out_unregister_bus;
+ }
+
+ return 0;
+
+err_out_unregister_bus:
+ mdiobus_unregister(bp->mii_bus);
+err_out_free_mdio_irq:
+ kfree(bp->mii_bus->irq);
+err_out_free_mdiobus:
+ mdiobus_free(bp->mii_bus);
+err_out:
+ return err;
+}
+
+static void macb_update_stats(struct macb *bp)
+{
+ u32 __iomem *reg = bp->regs + MACB_PFR;
+ u32 *p = &bp->hw_stats.rx_pause_frames;
+ u32 *end = &bp->hw_stats.tx_pause_frames + 1;
+
+ WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
+
+ for(; p < end; p++, reg++)
+ *p += __raw_readl(reg);
+}
+
+static void macb_tx(struct macb *bp)
+{
+ unsigned int tail;
+ unsigned int head;
+ u32 status;
+
+ status = macb_readl(bp, TSR);
+ macb_writel(bp, TSR, status);
+
+ dev_dbg(&bp->pdev->dev, "macb_tx status = %02lx\n",
+ (unsigned long)status);
+
+ if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) {
+ int i;
+ printk(KERN_ERR "%s: TX %s, resetting buffers\n",
+ bp->dev->name, status & MACB_BIT(UND) ?
+ "underrun" : "retry limit exceeded");
+
+ /* Transfer ongoing, disable transmitter, to avoid confusion */
+ if (status & MACB_BIT(TGO))
+ macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE));
+
+ head = bp->tx_head;
+
+ /*Mark all the buffer as used to avoid sending a lost buffer*/
+ for (i = 0; i < TX_RING_SIZE; i++)
+ bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+
+ /* Add wrap bit */
+ bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+
+ /* free transmit buffer in upper layer*/
+ for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
+ struct ring_info *rp = &bp->tx_skb[tail];
+ struct sk_buff *skb = rp->skb;
+
+ BUG_ON(skb == NULL);
+
+ rmb();
+
+ dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
+ DMA_TO_DEVICE);
+ rp->skb = NULL;
+ dev_kfree_skb_irq(skb);
+ }
+
+ bp->tx_head = bp->tx_tail = 0;
+
+ /* Enable the transmitter again */
+ if (status & MACB_BIT(TGO))
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
+ }
+
+ if (!(status & MACB_BIT(COMP)))
+ /*
+ * This may happen when a buffer becomes complete
+ * between reading the ISR and scanning the
+ * descriptors. Nothing to worry about.
+ */
+ return;
+
+ head = bp->tx_head;
+ for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
+ struct ring_info *rp = &bp->tx_skb[tail];
+ struct sk_buff *skb = rp->skb;
+ u32 bufstat;
+
+ BUG_ON(skb == NULL);
+
+ rmb();
+ bufstat = bp->tx_ring[tail].ctrl;
+
+ if (!(bufstat & MACB_BIT(TX_USED)))
+ break;
+
+ dev_dbg(&bp->pdev->dev, "skb %u (data %p) TX complete\n",
+ tail, skb->data);
+ dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
+ DMA_TO_DEVICE);
+ bp->stats.tx_packets++;
+ bp->stats.tx_bytes += skb->len;
+ rp->skb = NULL;
+ dev_kfree_skb_irq(skb);
+ }
+
+ bp->tx_tail = tail;
+ if (netif_queue_stopped(bp->dev) &&
+ TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH)
+ netif_wake_queue(bp->dev);
+}
+
+static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
+ unsigned int last_frag)
+{
+ unsigned int len;
+ unsigned int frag;
+ unsigned int offset = 0;
+ struct sk_buff *skb;
+
+ len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl);
+
+ dev_dbg(&bp->pdev->dev, "macb_rx_frame frags %u - %u (len %u)\n",
+ first_frag, last_frag, len);
+
+ skb = dev_alloc_skb(len + RX_OFFSET);
+ if (!skb) {
+ bp->stats.rx_dropped++;
+ for (frag = first_frag; ; frag = NEXT_RX(frag)) {
+ bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
+ if (frag == last_frag)
+ break;
+ }
+ wmb();
+ return 1;
+ }
+
+ skb_reserve(skb, RX_OFFSET);
+ skb_checksum_none_assert(skb);
+ skb_put(skb, len);
+
+ for (frag = first_frag; ; frag = NEXT_RX(frag)) {
+ unsigned int frag_len = RX_BUFFER_SIZE;
+
+ if (offset + frag_len > len) {
+ BUG_ON(frag != last_frag);
+ frag_len = len - offset;
+ }
+ skb_copy_to_linear_data_offset(skb, offset,
+ (bp->rx_buffers +
+ (RX_BUFFER_SIZE * frag)),
+ frag_len);
+ offset += RX_BUFFER_SIZE;
+ bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
+ wmb();
+
+ if (frag == last_frag)
+ break;
+ }
+
+ skb->protocol = eth_type_trans(skb, bp->dev);
+
+ bp->stats.rx_packets++;
+ bp->stats.rx_bytes += len;
+ dev_dbg(&bp->pdev->dev, "received skb of length %u, csum: %08x\n",
+ skb->len, skb->csum);
+ netif_receive_skb(skb);
+
+ return 0;
+}
+
+/* Mark DMA descriptors from begin up to and not including end as unused */
+static void discard_partial_frame(struct macb *bp, unsigned int begin,
+ unsigned int end)
+{
+ unsigned int frag;
+
+ for (frag = begin; frag != end; frag = NEXT_RX(frag))
+ bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
+ wmb();
+
+ /*
+ * When this happens, the hardware stats registers for
+ * whatever caused this is updated, so we don't have to record
+ * anything.
+ */
+}
+
+static int macb_rx(struct macb *bp, int budget)
+{
+ int received = 0;
+ unsigned int tail = bp->rx_tail;
+ int first_frag = -1;
+
+ for (; budget > 0; tail = NEXT_RX(tail)) {
+ u32 addr, ctrl;
+
+ rmb();
+ addr = bp->rx_ring[tail].addr;
+ ctrl = bp->rx_ring[tail].ctrl;
+
+ if (!(addr & MACB_BIT(RX_USED)))
+ break;
+
+ if (ctrl & MACB_BIT(RX_SOF)) {
+ if (first_frag != -1)
+ discard_partial_frame(bp, first_frag, tail);
+ first_frag = tail;
+ }
+
+ if (ctrl & MACB_BIT(RX_EOF)) {
+ int dropped;
+ BUG_ON(first_frag == -1);
+
+ dropped = macb_rx_frame(bp, first_frag, tail);
+ first_frag = -1;
+ if (!dropped) {
+ received++;
+ budget--;
+ }
+ }
+ }
+
+ if (first_frag != -1)
+ bp->rx_tail = first_frag;
+ else
+ bp->rx_tail = tail;
+
+ return received;
+}
+
+static int macb_poll(struct napi_struct *napi, int budget)
+{
+ struct macb *bp = container_of(napi, struct macb, napi);
+ int work_done;
+ u32 status;
+
+ status = macb_readl(bp, RSR);
+ macb_writel(bp, RSR, status);
+
+ work_done = 0;
+
+ dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n",
+ (unsigned long)status, budget);
+
+ work_done = macb_rx(bp, budget);
+ if (work_done < budget) {
+ napi_complete(napi);
+
+ /*
+ * We've done what we can to clean the buffers. Make sure we
+ * get notified when new packets arrive.
+ */
+ macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+ }
+
+ /* TODO: Handle errors */
+
+ return work_done;
+}
+
+static irqreturn_t macb_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct macb *bp = netdev_priv(dev);
+ u32 status;
+
+ status = macb_readl(bp, ISR);
+
+ if (unlikely(!status))
+ return IRQ_NONE;
+
+ spin_lock(&bp->lock);
+
+ while (status) {
+ /* close possible race with dev_close */
+ if (unlikely(!netif_running(dev))) {
+ macb_writel(bp, IDR, ~0UL);
+ break;
+ }
+
+ if (status & MACB_RX_INT_FLAGS) {
+ /*
+ * There's no point taking any more interrupts
+ * until we have processed the buffers. The
+ * scheduling call may fail if the poll routine
+ * is already scheduled, so disable interrupts
+ * now.
+ */
+ macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
+
+ if (napi_schedule_prep(&bp->napi)) {
+ dev_dbg(&bp->pdev->dev,
+ "scheduling RX softirq\n");
+ __napi_schedule(&bp->napi);
+ }
+ }
+
+ if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND) |
+ MACB_BIT(ISR_RLE)))
+ macb_tx(bp);
+
+ /*
+ * Link change detection isn't possible with RMII, so we'll
+ * add that if/when we get our hands on a full-blown MII PHY.
+ */
+
+ if (status & MACB_BIT(ISR_ROVR)) {
+ /* We missed at least one packet */
+ bp->hw_stats.rx_overruns++;
+ }
+
+ if (status & MACB_BIT(HRESP)) {
+ /*
+ * TODO: Reset the hardware, and maybe move the printk
+ * to a lower-priority context as well (work queue?)
+ */
+ printk(KERN_ERR "%s: DMA bus error: HRESP not OK\n",
+ dev->name);
+ }
+
+ status = macb_readl(bp, ISR);
+ }
+
+ spin_unlock(&bp->lock);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void macb_poll_controller(struct net_device *dev)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ macb_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+}
+#endif
+
+static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+ dma_addr_t mapping;
+ unsigned int len, entry;
+ u32 ctrl;
+ unsigned long flags;
+
+#ifdef DEBUG
+ int i;
+ dev_dbg(&bp->pdev->dev,
+ "start_xmit: len %u head %p data %p tail %p end %p\n",
+ skb->len, skb->head, skb->data,
+ skb_tail_pointer(skb), skb_end_pointer(skb));
+ dev_dbg(&bp->pdev->dev,
+ "data:");
+ for (i = 0; i < 16; i++)
+ printk(" %02x", (unsigned int)skb->data[i]);
+ printk("\n");
+#endif
+
+ len = skb->len;
+ spin_lock_irqsave(&bp->lock, flags);
+
+ /* This is a hard error, log it. */
+ if (TX_BUFFS_AVAIL(bp) < 1) {
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&bp->lock, flags);
+ dev_err(&bp->pdev->dev,
+ "BUG! Tx Ring full when queue awake!\n");
+ dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n",
+ bp->tx_head, bp->tx_tail);
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = bp->tx_head;
+ dev_dbg(&bp->pdev->dev, "Allocated ring entry %u\n", entry);
+ mapping = dma_map_single(&bp->pdev->dev, skb->data,
+ len, DMA_TO_DEVICE);
+ bp->tx_skb[entry].skb = skb;
+ bp->tx_skb[entry].mapping = mapping;
+ dev_dbg(&bp->pdev->dev, "Mapped skb data %p to DMA addr %08lx\n",
+ skb->data, (unsigned long)mapping);
+
+ ctrl = MACB_BF(TX_FRMLEN, len);
+ ctrl |= MACB_BIT(TX_LAST);
+ if (entry == (TX_RING_SIZE - 1))
+ ctrl |= MACB_BIT(TX_WRAP);
+
+ bp->tx_ring[entry].addr = mapping;
+ bp->tx_ring[entry].ctrl = ctrl;
+ wmb();
+
+ entry = NEXT_TX(entry);
+ bp->tx_head = entry;
+
+ skb_tx_timestamp(skb);
+
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+
+ if (TX_BUFFS_AVAIL(bp) < 1)
+ netif_stop_queue(dev);
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static void macb_free_consistent(struct macb *bp)
+{
+ if (bp->tx_skb) {
+ kfree(bp->tx_skb);
+ bp->tx_skb = NULL;
+ }
+ if (bp->rx_ring) {
+ dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
+ bp->rx_ring, bp->rx_ring_dma);
+ bp->rx_ring = NULL;
+ }
+ if (bp->tx_ring) {
+ dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
+ bp->tx_ring, bp->tx_ring_dma);
+ bp->tx_ring = NULL;
+ }
+ if (bp->rx_buffers) {
+ dma_free_coherent(&bp->pdev->dev,
+ RX_RING_SIZE * RX_BUFFER_SIZE,
+ bp->rx_buffers, bp->rx_buffers_dma);
+ bp->rx_buffers = NULL;
+ }
+}
+
+static int macb_alloc_consistent(struct macb *bp)
+{
+ int size;
+
+ size = TX_RING_SIZE * sizeof(struct ring_info);
+ bp->tx_skb = kmalloc(size, GFP_KERNEL);
+ if (!bp->tx_skb)
+ goto out_err;
+
+ size = RX_RING_BYTES;
+ bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
+ &bp->rx_ring_dma, GFP_KERNEL);
+ if (!bp->rx_ring)
+ goto out_err;
+ dev_dbg(&bp->pdev->dev,
+ "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
+
+ size = TX_RING_BYTES;
+ bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
+ &bp->tx_ring_dma, GFP_KERNEL);
+ if (!bp->tx_ring)
+ goto out_err;
+ dev_dbg(&bp->pdev->dev,
+ "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
+
+ size = RX_RING_SIZE * RX_BUFFER_SIZE;
+ bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
+ &bp->rx_buffers_dma, GFP_KERNEL);
+ if (!bp->rx_buffers)
+ goto out_err;
+ dev_dbg(&bp->pdev->dev,
+ "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
+
+ return 0;
+
+out_err:
+ macb_free_consistent(bp);
+ return -ENOMEM;
+}
+
+static void macb_init_rings(struct macb *bp)
+{
+ int i;
+ dma_addr_t addr;
+
+ addr = bp->rx_buffers_dma;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ bp->rx_ring[i].addr = addr;
+ bp->rx_ring[i].ctrl = 0;
+ addr += RX_BUFFER_SIZE;
+ }
+ bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ bp->tx_ring[i].addr = 0;
+ bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ }
+ bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+
+ bp->rx_tail = bp->tx_head = bp->tx_tail = 0;
+}
+
+static void macb_reset_hw(struct macb *bp)
+{
+ /* Make sure we have the write buffer for ourselves */
+ wmb();
+
+ /*
+ * Disable RX and TX (XXX: Should we halt the transmission
+ * more gracefully?)
+ */
+ macb_writel(bp, NCR, 0);
+
+ /* Clear the stats registers (XXX: Update stats first?) */
+ macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
+
+ /* Clear all status flags */
+ macb_writel(bp, TSR, ~0UL);
+ macb_writel(bp, RSR, ~0UL);
+
+ /* Disable all interrupts */
+ macb_writel(bp, IDR, ~0UL);
+ macb_readl(bp, ISR);
+}
+
+static void macb_init_hw(struct macb *bp)
+{
+ u32 config;
+
+ macb_reset_hw(bp);
+ __macb_set_hwaddr(bp);
+
+ config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L);
+ config |= MACB_BIT(PAE); /* PAuse Enable */
+ config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
+ config |= MACB_BIT(BIG); /* Receive oversized frames */
+ if (bp->dev->flags & IFF_PROMISC)
+ config |= MACB_BIT(CAF); /* Copy All Frames */
+ if (!(bp->dev->flags & IFF_BROADCAST))
+ config |= MACB_BIT(NBC); /* No BroadCast */
+ macb_writel(bp, NCFGR, config);
+
+ /* Initialize TX and RX buffers */
+ macb_writel(bp, RBQP, bp->rx_ring_dma);
+ macb_writel(bp, TBQP, bp->tx_ring_dma);
+
+ /* Enable TX and RX */
+ macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
+
+ /* Enable interrupts */
+ macb_writel(bp, IER, (MACB_BIT(RCOMP)
+ | MACB_BIT(RXUBR)
+ | MACB_BIT(ISR_TUND)
+ | MACB_BIT(ISR_RLE)
+ | MACB_BIT(TXERR)
+ | MACB_BIT(TCOMP)
+ | MACB_BIT(ISR_ROVR)
+ | MACB_BIT(HRESP)));
+
+}
+
+/*
+ * The hash address register is 64 bits long and takes up two
+ * locations in the memory map. The least significant bits are stored
+ * in EMAC_HSL and the most significant bits in EMAC_HSH.
+ *
+ * The unicast hash enable and the multicast hash enable bits in the
+ * network configuration register enable the reception of hash matched
+ * frames. The destination address is reduced to a 6 bit index into
+ * the 64 bit hash register using the following hash function. The
+ * hash function is an exclusive or of every sixth bit of the
+ * destination address.
+ *
+ * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
+ * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
+ * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
+ * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
+ * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
+ * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
+ *
+ * da[0] represents the least significant bit of the first byte
+ * received, that is, the multicast/unicast indicator, and da[47]
+ * represents the most significant bit of the last byte received. If
+ * the hash index, hi[n], points to a bit that is set in the hash
+ * register then the frame will be matched according to whether the
+ * frame is multicast or unicast. A multicast match will be signalled
+ * if the multicast hash enable bit is set, da[0] is 1 and the hash
+ * index points to a bit set in the hash register. A unicast match
+ * will be signalled if the unicast hash enable bit is set, da[0] is 0
+ * and the hash index points to a bit set in the hash register. To
+ * receive all multicast frames, the hash register should be set with
+ * all ones and the multicast hash enable bit should be set in the
+ * network configuration register.
+ */
+
+static inline int hash_bit_value(int bitnr, __u8 *addr)
+{
+ if (addr[bitnr / 8] & (1 << (bitnr % 8)))
+ return 1;
+ return 0;
+}
+
+/*
+ * Return the hash index value for the specified address.
+ */
+static int hash_get_index(__u8 *addr)
+{
+ int i, j, bitval;
+ int hash_index = 0;
+
+ for (j = 0; j < 6; j++) {
+ for (i = 0, bitval = 0; i < 8; i++)
+ bitval ^= hash_bit_value(i*6 + j, addr);
+
+ hash_index |= (bitval << j);
+ }
+
+ return hash_index;
+}
+
+/*
+ * Add multicast addresses to the internal multicast-hash table.
+ */
+static void macb_sethashtable(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ unsigned long mc_filter[2];
+ unsigned int bitnr;
+ struct macb *bp = netdev_priv(dev);
+
+ mc_filter[0] = mc_filter[1] = 0;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ bitnr = hash_get_index(ha->addr);
+ mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
+ }
+
+ macb_writel(bp, HRB, mc_filter[0]);
+ macb_writel(bp, HRT, mc_filter[1]);
+}
+
+/*
+ * Enable/Disable promiscuous and multicast modes.
+ */
+static void macb_set_rx_mode(struct net_device *dev)
+{
+ unsigned long cfg;
+ struct macb *bp = netdev_priv(dev);
+
+ cfg = macb_readl(bp, NCFGR);
+
+ if (dev->flags & IFF_PROMISC)
+ /* Enable promiscuous mode */
+ cfg |= MACB_BIT(CAF);
+ else if (dev->flags & (~IFF_PROMISC))
+ /* Disable promiscuous mode */
+ cfg &= ~MACB_BIT(CAF);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /* Enable all multicast mode */
+ macb_writel(bp, HRB, -1);
+ macb_writel(bp, HRT, -1);
+ cfg |= MACB_BIT(NCFGR_MTI);
+ } else if (!netdev_mc_empty(dev)) {
+ /* Enable specific multicasts */
+ macb_sethashtable(dev);
+ cfg |= MACB_BIT(NCFGR_MTI);
+ } else if (dev->flags & (~IFF_ALLMULTI)) {
+ /* Disable all multicast mode */
+ macb_writel(bp, HRB, 0);
+ macb_writel(bp, HRT, 0);
+ cfg &= ~MACB_BIT(NCFGR_MTI);
+ }
+
+ macb_writel(bp, NCFGR, cfg);
+}
+
+static int macb_open(struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+ int err;
+
+ dev_dbg(&bp->pdev->dev, "open\n");
+
+ /* if the phy is not yet register, retry later*/
+ if (!bp->phy_dev)
+ return -EAGAIN;
+
+ if (!is_valid_ether_addr(dev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ err = macb_alloc_consistent(bp);
+ if (err) {
+ printk(KERN_ERR
+ "%s: Unable to allocate DMA memory (error %d)\n",
+ dev->name, err);
+ return err;
+ }
+
+ napi_enable(&bp->napi);
+
+ macb_init_rings(bp);
+ macb_init_hw(bp);
+
+ /* schedule a link state check */
+ phy_start(bp->phy_dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int macb_close(struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+ napi_disable(&bp->napi);
+
+ if (bp->phy_dev)
+ phy_stop(bp->phy_dev);
+
+ spin_lock_irqsave(&bp->lock, flags);
+ macb_reset_hw(bp);
+ netif_carrier_off(dev);
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ macb_free_consistent(bp);
+
+ return 0;
+}
+
+static struct net_device_stats *macb_get_stats(struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct net_device_stats *nstat = &bp->stats;
+ struct macb_stats *hwstat = &bp->hw_stats;
+
+ /* read stats from hardware */
+ macb_update_stats(bp);
+
+ /* Convert HW stats into netdevice stats */
+ nstat->rx_errors = (hwstat->rx_fcs_errors +
+ hwstat->rx_align_errors +
+ hwstat->rx_resource_errors +
+ hwstat->rx_overruns +
+ hwstat->rx_oversize_pkts +
+ hwstat->rx_jabbers +
+ hwstat->rx_undersize_pkts +
+ hwstat->sqe_test_errors +
+ hwstat->rx_length_mismatch);
+ nstat->tx_errors = (hwstat->tx_late_cols +
+ hwstat->tx_excessive_cols +
+ hwstat->tx_underruns +
+ hwstat->tx_carrier_errors);
+ nstat->collisions = (hwstat->tx_single_cols +
+ hwstat->tx_multiple_cols +
+ hwstat->tx_excessive_cols);
+ nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
+ hwstat->rx_jabbers +
+ hwstat->rx_undersize_pkts +
+ hwstat->rx_length_mismatch);
+ nstat->rx_over_errors = hwstat->rx_resource_errors +
+ hwstat->rx_overruns;
+ nstat->rx_crc_errors = hwstat->rx_fcs_errors;
+ nstat->rx_frame_errors = hwstat->rx_align_errors;
+ nstat->rx_fifo_errors = hwstat->rx_overruns;
+ /* XXX: What does "missed" mean? */
+ nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
+ nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
+ nstat->tx_fifo_errors = hwstat->tx_underruns;
+ /* Don't know about heartbeat or window errors... */
+
+ return nstat;
+}
+
+static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct phy_device *phydev = bp->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(phydev, cmd);
+}
+
+static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct phy_device *phydev = bp->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, cmd);
+}
+
+static void macb_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct macb *bp = netdev_priv(dev);
+
+ strcpy(info->driver, bp->pdev->dev.driver->name);
+ strcpy(info->version, "$Revision: 1.14 $");
+ strcpy(info->bus_info, dev_name(&bp->pdev->dev));
+}
+
+static const struct ethtool_ops macb_ethtool_ops = {
+ .get_settings = macb_get_settings,
+ .set_settings = macb_set_settings,
+ .get_drvinfo = macb_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct phy_device *phydev = bp->phy_dev;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(phydev, rq, cmd);
+}
+
+static const struct net_device_ops macb_netdev_ops = {
+ .ndo_open = macb_open,
+ .ndo_stop = macb_close,
+ .ndo_start_xmit = macb_start_xmit,
+ .ndo_set_rx_mode = macb_set_rx_mode,
+ .ndo_get_stats = macb_get_stats,
+ .ndo_do_ioctl = macb_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = macb_poll_controller,
+#endif
+};
+
+static int __init macb_probe(struct platform_device *pdev)
+{
+ struct eth_platform_data *pdata;
+ struct resource *regs;
+ struct net_device *dev;
+ struct macb *bp;
+ struct phy_device *phydev;
+ unsigned long pclk_hz;
+ u32 config;
+ int err = -ENXIO;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "no mmio resource defined\n");
+ goto err_out;
+ }
+
+ err = -ENOMEM;
+ dev = alloc_etherdev(sizeof(*bp));
+ if (!dev) {
+ dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");
+ goto err_out;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* TODO: Actually, we have some interesting features... */
+ dev->features |= 0;
+
+ bp = netdev_priv(dev);
+ bp->pdev = pdev;
+ bp->dev = dev;
+
+ spin_lock_init(&bp->lock);
+
+#if defined(CONFIG_ARCH_AT91)
+ bp->pclk = clk_get(&pdev->dev, "macb_clk");
+ if (IS_ERR(bp->pclk)) {
+ dev_err(&pdev->dev, "failed to get macb_clk\n");
+ goto err_out_free_dev;
+ }
+ clk_enable(bp->pclk);
+#else
+ bp->pclk = clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(bp->pclk)) {
+ dev_err(&pdev->dev, "failed to get pclk\n");
+ goto err_out_free_dev;
+ }
+ bp->hclk = clk_get(&pdev->dev, "hclk");
+ if (IS_ERR(bp->hclk)) {
+ dev_err(&pdev->dev, "failed to get hclk\n");
+ goto err_out_put_pclk;
+ }
+
+ clk_enable(bp->pclk);
+ clk_enable(bp->hclk);
+#endif
+
+ bp->regs = ioremap(regs->start, resource_size(regs));
+ if (!bp->regs) {
+ dev_err(&pdev->dev, "failed to map registers, aborting.\n");
+ err = -ENOMEM;
+ goto err_out_disable_clocks;
+ }
+
+ dev->irq = platform_get_irq(pdev, 0);
+ err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev);
+ if (err) {
+ printk(KERN_ERR
+ "%s: Unable to request IRQ %d (error %d)\n",
+ dev->name, dev->irq, err);
+ goto err_out_iounmap;
+ }
+
+ dev->netdev_ops = &macb_netdev_ops;
+ netif_napi_add(dev, &bp->napi, macb_poll, 64);
+ dev->ethtool_ops = &macb_ethtool_ops;
+
+ dev->base_addr = regs->start;
+
+ /* Set MII management clock divider */
+ pclk_hz = clk_get_rate(bp->pclk);
+ if (pclk_hz <= 20000000)
+ config = MACB_BF(CLK, MACB_CLK_DIV8);
+ else if (pclk_hz <= 40000000)
+ config = MACB_BF(CLK, MACB_CLK_DIV16);
+ else if (pclk_hz <= 80000000)
+ config = MACB_BF(CLK, MACB_CLK_DIV32);
+ else
+ config = MACB_BF(CLK, MACB_CLK_DIV64);
+ macb_writel(bp, NCFGR, config);
+
+ macb_get_hwaddr(bp);
+ pdata = pdev->dev.platform_data;
+
+ if (pdata && pdata->is_rmii)
+#if defined(CONFIG_ARCH_AT91)
+ macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) );
+#else
+ macb_writel(bp, USRIO, 0);
+#endif
+ else
+#if defined(CONFIG_ARCH_AT91)
+ macb_writel(bp, USRIO, MACB_BIT(CLKEN));
+#else
+ macb_writel(bp, USRIO, MACB_BIT(MII));
+#endif
+
+ bp->tx_pending = DEF_TX_RING_PENDING;
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_free_irq;
+ }
+
+ if (macb_mii_init(bp) != 0) {
+ goto err_out_unregister_netdev;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d (%pM)\n",
+ dev->name, dev->base_addr, dev->irq, dev->dev_addr);
+
+ phydev = bp->phy_dev;
+ printk(KERN_INFO "%s: attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
+ phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
+
+ return 0;
+
+err_out_unregister_netdev:
+ unregister_netdev(dev);
+err_out_free_irq:
+ free_irq(dev->irq, dev);
+err_out_iounmap:
+ iounmap(bp->regs);
+err_out_disable_clocks:
+#ifndef CONFIG_ARCH_AT91
+ clk_disable(bp->hclk);
+ clk_put(bp->hclk);
+#endif
+ clk_disable(bp->pclk);
+#ifndef CONFIG_ARCH_AT91
+err_out_put_pclk:
+#endif
+ clk_put(bp->pclk);
+err_out_free_dev:
+ free_netdev(dev);
+err_out:
+ platform_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static int __exit macb_remove(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct macb *bp;
+
+ dev = platform_get_drvdata(pdev);
+
+ if (dev) {
+ bp = netdev_priv(dev);
+ if (bp->phy_dev)
+ phy_disconnect(bp->phy_dev);
+ mdiobus_unregister(bp->mii_bus);
+ kfree(bp->mii_bus->irq);
+ mdiobus_free(bp->mii_bus);
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+ iounmap(bp->regs);
+#ifndef CONFIG_ARCH_AT91
+ clk_disable(bp->hclk);
+ clk_put(bp->hclk);
+#endif
+ clk_disable(bp->pclk);
+ clk_put(bp->pclk);
+ free_netdev(dev);
+ platform_set_drvdata(pdev, NULL);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int macb_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+ struct macb *bp = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+#ifndef CONFIG_ARCH_AT91
+ clk_disable(bp->hclk);
+#endif
+ clk_disable(bp->pclk);
+
+ return 0;
+}
+
+static int macb_resume(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+ struct macb *bp = netdev_priv(netdev);
+
+ clk_enable(bp->pclk);
+#ifndef CONFIG_ARCH_AT91
+ clk_enable(bp->hclk);
+#endif
+
+ netif_device_attach(netdev);
+
+ return 0;
+}
+#else
+#define macb_suspend NULL
+#define macb_resume NULL
+#endif
+
+static struct platform_driver macb_driver = {
+ .remove = __exit_p(macb_remove),
+ .suspend = macb_suspend,
+ .resume = macb_resume,
+ .driver = {
+ .name = "macb",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init macb_init(void)
+{
+ return platform_driver_probe(&macb_driver, macb_probe);
+}
+
+static void __exit macb_exit(void)
+{
+ platform_driver_unregister(&macb_driver);
+}
+
+module_init(macb_init);
+module_exit(macb_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Atmel MACB Ethernet driver");
+MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
+MODULE_ALIAS("platform:macb");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
new file mode 100644
index 000000000000..d3212f6db703
--- /dev/null
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -0,0 +1,394 @@
+/*
+ * Atmel MACB Ethernet Controller driver
+ *
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _MACB_H
+#define _MACB_H
+
+/* MACB register offsets */
+#define MACB_NCR 0x0000
+#define MACB_NCFGR 0x0004
+#define MACB_NSR 0x0008
+#define MACB_TSR 0x0014
+#define MACB_RBQP 0x0018
+#define MACB_TBQP 0x001c
+#define MACB_RSR 0x0020
+#define MACB_ISR 0x0024
+#define MACB_IER 0x0028
+#define MACB_IDR 0x002c
+#define MACB_IMR 0x0030
+#define MACB_MAN 0x0034
+#define MACB_PTR 0x0038
+#define MACB_PFR 0x003c
+#define MACB_FTO 0x0040
+#define MACB_SCF 0x0044
+#define MACB_MCF 0x0048
+#define MACB_FRO 0x004c
+#define MACB_FCSE 0x0050
+#define MACB_ALE 0x0054
+#define MACB_DTF 0x0058
+#define MACB_LCOL 0x005c
+#define MACB_EXCOL 0x0060
+#define MACB_TUND 0x0064
+#define MACB_CSE 0x0068
+#define MACB_RRE 0x006c
+#define MACB_ROVR 0x0070
+#define MACB_RSE 0x0074
+#define MACB_ELE 0x0078
+#define MACB_RJA 0x007c
+#define MACB_USF 0x0080
+#define MACB_STE 0x0084
+#define MACB_RLE 0x0088
+#define MACB_TPF 0x008c
+#define MACB_HRB 0x0090
+#define MACB_HRT 0x0094
+#define MACB_SA1B 0x0098
+#define MACB_SA1T 0x009c
+#define MACB_SA2B 0x00a0
+#define MACB_SA2T 0x00a4
+#define MACB_SA3B 0x00a8
+#define MACB_SA3T 0x00ac
+#define MACB_SA4B 0x00b0
+#define MACB_SA4T 0x00b4
+#define MACB_TID 0x00b8
+#define MACB_TPQ 0x00bc
+#define MACB_USRIO 0x00c0
+#define MACB_WOL 0x00c4
+
+/* Bitfields in NCR */
+#define MACB_LB_OFFSET 0
+#define MACB_LB_SIZE 1
+#define MACB_LLB_OFFSET 1
+#define MACB_LLB_SIZE 1
+#define MACB_RE_OFFSET 2
+#define MACB_RE_SIZE 1
+#define MACB_TE_OFFSET 3
+#define MACB_TE_SIZE 1
+#define MACB_MPE_OFFSET 4
+#define MACB_MPE_SIZE 1
+#define MACB_CLRSTAT_OFFSET 5
+#define MACB_CLRSTAT_SIZE 1
+#define MACB_INCSTAT_OFFSET 6
+#define MACB_INCSTAT_SIZE 1
+#define MACB_WESTAT_OFFSET 7
+#define MACB_WESTAT_SIZE 1
+#define MACB_BP_OFFSET 8
+#define MACB_BP_SIZE 1
+#define MACB_TSTART_OFFSET 9
+#define MACB_TSTART_SIZE 1
+#define MACB_THALT_OFFSET 10
+#define MACB_THALT_SIZE 1
+#define MACB_NCR_TPF_OFFSET 11
+#define MACB_NCR_TPF_SIZE 1
+#define MACB_TZQ_OFFSET 12
+#define MACB_TZQ_SIZE 1
+
+/* Bitfields in NCFGR */
+#define MACB_SPD_OFFSET 0
+#define MACB_SPD_SIZE 1
+#define MACB_FD_OFFSET 1
+#define MACB_FD_SIZE 1
+#define MACB_BIT_RATE_OFFSET 2
+#define MACB_BIT_RATE_SIZE 1
+#define MACB_JFRAME_OFFSET 3
+#define MACB_JFRAME_SIZE 1
+#define MACB_CAF_OFFSET 4
+#define MACB_CAF_SIZE 1
+#define MACB_NBC_OFFSET 5
+#define MACB_NBC_SIZE 1
+#define MACB_NCFGR_MTI_OFFSET 6
+#define MACB_NCFGR_MTI_SIZE 1
+#define MACB_UNI_OFFSET 7
+#define MACB_UNI_SIZE 1
+#define MACB_BIG_OFFSET 8
+#define MACB_BIG_SIZE 1
+#define MACB_EAE_OFFSET 9
+#define MACB_EAE_SIZE 1
+#define MACB_CLK_OFFSET 10
+#define MACB_CLK_SIZE 2
+#define MACB_RTY_OFFSET 12
+#define MACB_RTY_SIZE 1
+#define MACB_PAE_OFFSET 13
+#define MACB_PAE_SIZE 1
+#define MACB_RBOF_OFFSET 14
+#define MACB_RBOF_SIZE 2
+#define MACB_RLCE_OFFSET 16
+#define MACB_RLCE_SIZE 1
+#define MACB_DRFCS_OFFSET 17
+#define MACB_DRFCS_SIZE 1
+#define MACB_EFRHD_OFFSET 18
+#define MACB_EFRHD_SIZE 1
+#define MACB_IRXFCS_OFFSET 19
+#define MACB_IRXFCS_SIZE 1
+
+/* Bitfields in NSR */
+#define MACB_NSR_LINK_OFFSET 0
+#define MACB_NSR_LINK_SIZE 1
+#define MACB_MDIO_OFFSET 1
+#define MACB_MDIO_SIZE 1
+#define MACB_IDLE_OFFSET 2
+#define MACB_IDLE_SIZE 1
+
+/* Bitfields in TSR */
+#define MACB_UBR_OFFSET 0
+#define MACB_UBR_SIZE 1
+#define MACB_COL_OFFSET 1
+#define MACB_COL_SIZE 1
+#define MACB_TSR_RLE_OFFSET 2
+#define MACB_TSR_RLE_SIZE 1
+#define MACB_TGO_OFFSET 3
+#define MACB_TGO_SIZE 1
+#define MACB_BEX_OFFSET 4
+#define MACB_BEX_SIZE 1
+#define MACB_COMP_OFFSET 5
+#define MACB_COMP_SIZE 1
+#define MACB_UND_OFFSET 6
+#define MACB_UND_SIZE 1
+
+/* Bitfields in RSR */
+#define MACB_BNA_OFFSET 0
+#define MACB_BNA_SIZE 1
+#define MACB_REC_OFFSET 1
+#define MACB_REC_SIZE 1
+#define MACB_OVR_OFFSET 2
+#define MACB_OVR_SIZE 1
+
+/* Bitfields in ISR/IER/IDR/IMR */
+#define MACB_MFD_OFFSET 0
+#define MACB_MFD_SIZE 1
+#define MACB_RCOMP_OFFSET 1
+#define MACB_RCOMP_SIZE 1
+#define MACB_RXUBR_OFFSET 2
+#define MACB_RXUBR_SIZE 1
+#define MACB_TXUBR_OFFSET 3
+#define MACB_TXUBR_SIZE 1
+#define MACB_ISR_TUND_OFFSET 4
+#define MACB_ISR_TUND_SIZE 1
+#define MACB_ISR_RLE_OFFSET 5
+#define MACB_ISR_RLE_SIZE 1
+#define MACB_TXERR_OFFSET 6
+#define MACB_TXERR_SIZE 1
+#define MACB_TCOMP_OFFSET 7
+#define MACB_TCOMP_SIZE 1
+#define MACB_ISR_LINK_OFFSET 9
+#define MACB_ISR_LINK_SIZE 1
+#define MACB_ISR_ROVR_OFFSET 10
+#define MACB_ISR_ROVR_SIZE 1
+#define MACB_HRESP_OFFSET 11
+#define MACB_HRESP_SIZE 1
+#define MACB_PFR_OFFSET 12
+#define MACB_PFR_SIZE 1
+#define MACB_PTZ_OFFSET 13
+#define MACB_PTZ_SIZE 1
+
+/* Bitfields in MAN */
+#define MACB_DATA_OFFSET 0
+#define MACB_DATA_SIZE 16
+#define MACB_CODE_OFFSET 16
+#define MACB_CODE_SIZE 2
+#define MACB_REGA_OFFSET 18
+#define MACB_REGA_SIZE 5
+#define MACB_PHYA_OFFSET 23
+#define MACB_PHYA_SIZE 5
+#define MACB_RW_OFFSET 28
+#define MACB_RW_SIZE 2
+#define MACB_SOF_OFFSET 30
+#define MACB_SOF_SIZE 2
+
+/* Bitfields in USRIO (AVR32) */
+#define MACB_MII_OFFSET 0
+#define MACB_MII_SIZE 1
+#define MACB_EAM_OFFSET 1
+#define MACB_EAM_SIZE 1
+#define MACB_TX_PAUSE_OFFSET 2
+#define MACB_TX_PAUSE_SIZE 1
+#define MACB_TX_PAUSE_ZERO_OFFSET 3
+#define MACB_TX_PAUSE_ZERO_SIZE 1
+
+/* Bitfields in USRIO (AT91) */
+#define MACB_RMII_OFFSET 0
+#define MACB_RMII_SIZE 1
+#define MACB_CLKEN_OFFSET 1
+#define MACB_CLKEN_SIZE 1
+
+/* Bitfields in WOL */
+#define MACB_IP_OFFSET 0
+#define MACB_IP_SIZE 16
+#define MACB_MAG_OFFSET 16
+#define MACB_MAG_SIZE 1
+#define MACB_ARP_OFFSET 17
+#define MACB_ARP_SIZE 1
+#define MACB_SA1_OFFSET 18
+#define MACB_SA1_SIZE 1
+#define MACB_WOL_MTI_OFFSET 19
+#define MACB_WOL_MTI_SIZE 1
+
+/* Constants for CLK */
+#define MACB_CLK_DIV8 0
+#define MACB_CLK_DIV16 1
+#define MACB_CLK_DIV32 2
+#define MACB_CLK_DIV64 3
+
+/* Constants for MAN register */
+#define MACB_MAN_SOF 1
+#define MACB_MAN_WRITE 1
+#define MACB_MAN_READ 2
+#define MACB_MAN_CODE 2
+
+/* Bit manipulation macros */
+#define MACB_BIT(name) \
+ (1 << MACB_##name##_OFFSET)
+#define MACB_BF(name,value) \
+ (((value) & ((1 << MACB_##name##_SIZE) - 1)) \
+ << MACB_##name##_OFFSET)
+#define MACB_BFEXT(name,value)\
+ (((value) >> MACB_##name##_OFFSET) \
+ & ((1 << MACB_##name##_SIZE) - 1))
+#define MACB_BFINS(name,value,old) \
+ (((old) & ~(((1 << MACB_##name##_SIZE) - 1) \
+ << MACB_##name##_OFFSET)) \
+ | MACB_BF(name,value))
+
+/* Register access macros */
+#define macb_readl(port,reg) \
+ __raw_readl((port)->regs + MACB_##reg)
+#define macb_writel(port,reg,value) \
+ __raw_writel((value), (port)->regs + MACB_##reg)
+
+struct dma_desc {
+ u32 addr;
+ u32 ctrl;
+};
+
+/* DMA descriptor bitfields */
+#define MACB_RX_USED_OFFSET 0
+#define MACB_RX_USED_SIZE 1
+#define MACB_RX_WRAP_OFFSET 1
+#define MACB_RX_WRAP_SIZE 1
+#define MACB_RX_WADDR_OFFSET 2
+#define MACB_RX_WADDR_SIZE 30
+
+#define MACB_RX_FRMLEN_OFFSET 0
+#define MACB_RX_FRMLEN_SIZE 12
+#define MACB_RX_OFFSET_OFFSET 12
+#define MACB_RX_OFFSET_SIZE 2
+#define MACB_RX_SOF_OFFSET 14
+#define MACB_RX_SOF_SIZE 1
+#define MACB_RX_EOF_OFFSET 15
+#define MACB_RX_EOF_SIZE 1
+#define MACB_RX_CFI_OFFSET 16
+#define MACB_RX_CFI_SIZE 1
+#define MACB_RX_VLAN_PRI_OFFSET 17
+#define MACB_RX_VLAN_PRI_SIZE 3
+#define MACB_RX_PRI_TAG_OFFSET 20
+#define MACB_RX_PRI_TAG_SIZE 1
+#define MACB_RX_VLAN_TAG_OFFSET 21
+#define MACB_RX_VLAN_TAG_SIZE 1
+#define MACB_RX_TYPEID_MATCH_OFFSET 22
+#define MACB_RX_TYPEID_MATCH_SIZE 1
+#define MACB_RX_SA4_MATCH_OFFSET 23
+#define MACB_RX_SA4_MATCH_SIZE 1
+#define MACB_RX_SA3_MATCH_OFFSET 24
+#define MACB_RX_SA3_MATCH_SIZE 1
+#define MACB_RX_SA2_MATCH_OFFSET 25
+#define MACB_RX_SA2_MATCH_SIZE 1
+#define MACB_RX_SA1_MATCH_OFFSET 26
+#define MACB_RX_SA1_MATCH_SIZE 1
+#define MACB_RX_EXT_MATCH_OFFSET 28
+#define MACB_RX_EXT_MATCH_SIZE 1
+#define MACB_RX_UHASH_MATCH_OFFSET 29
+#define MACB_RX_UHASH_MATCH_SIZE 1
+#define MACB_RX_MHASH_MATCH_OFFSET 30
+#define MACB_RX_MHASH_MATCH_SIZE 1
+#define MACB_RX_BROADCAST_OFFSET 31
+#define MACB_RX_BROADCAST_SIZE 1
+
+#define MACB_TX_FRMLEN_OFFSET 0
+#define MACB_TX_FRMLEN_SIZE 11
+#define MACB_TX_LAST_OFFSET 15
+#define MACB_TX_LAST_SIZE 1
+#define MACB_TX_NOCRC_OFFSET 16
+#define MACB_TX_NOCRC_SIZE 1
+#define MACB_TX_BUF_EXHAUSTED_OFFSET 27
+#define MACB_TX_BUF_EXHAUSTED_SIZE 1
+#define MACB_TX_UNDERRUN_OFFSET 28
+#define MACB_TX_UNDERRUN_SIZE 1
+#define MACB_TX_ERROR_OFFSET 29
+#define MACB_TX_ERROR_SIZE 1
+#define MACB_TX_WRAP_OFFSET 30
+#define MACB_TX_WRAP_SIZE 1
+#define MACB_TX_USED_OFFSET 31
+#define MACB_TX_USED_SIZE 1
+
+struct ring_info {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+};
+
+/*
+ * Hardware-collected statistics. Used when updating the network
+ * device stats by a periodic timer.
+ */
+struct macb_stats {
+ u32 rx_pause_frames;
+ u32 tx_ok;
+ u32 tx_single_cols;
+ u32 tx_multiple_cols;
+ u32 rx_ok;
+ u32 rx_fcs_errors;
+ u32 rx_align_errors;
+ u32 tx_deferred;
+ u32 tx_late_cols;
+ u32 tx_excessive_cols;
+ u32 tx_underruns;
+ u32 tx_carrier_errors;
+ u32 rx_resource_errors;
+ u32 rx_overruns;
+ u32 rx_symbol_errors;
+ u32 rx_oversize_pkts;
+ u32 rx_jabbers;
+ u32 rx_undersize_pkts;
+ u32 sqe_test_errors;
+ u32 rx_length_mismatch;
+ u32 tx_pause_frames;
+};
+
+struct macb {
+ void __iomem *regs;
+
+ unsigned int rx_tail;
+ struct dma_desc *rx_ring;
+ void *rx_buffers;
+
+ unsigned int tx_head, tx_tail;
+ struct dma_desc *tx_ring;
+ struct ring_info *tx_skb;
+
+ spinlock_t lock;
+ struct platform_device *pdev;
+ struct clk *pclk;
+ struct clk *hclk;
+ struct net_device *dev;
+ struct napi_struct napi;
+ struct net_device_stats stats;
+ struct macb_stats hw_stats;
+
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+ dma_addr_t rx_buffers_dma;
+
+ unsigned int rx_pending, tx_pending;
+
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ unsigned int link;
+ unsigned int speed;
+ unsigned int duplex;
+};
+
+#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
new file mode 100644
index 000000000000..2de50f95798f
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -0,0 +1,107 @@
+#
+# Chelsio device configuration
+#
+
+config NET_VENDOR_CHELSIO
+ bool "Chelsio devices"
+ default y
+ depends on PCI || INET
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Chelsio devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_CHELSIO
+
+config CHELSIO_T1
+ tristate "Chelsio 10Gb Ethernet support"
+ depends on PCI
+ select CRC32
+ select MDIO
+ ---help---
+ This driver supports Chelsio gigabit and 10-gigabit
+ Ethernet cards. More information about adapter features and
+ performance tuning is in <file:Documentation/networking/cxgb.txt>.
+
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
+
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.html>.
+
+ Please send feedback to <linux-bugs@chelsio.com>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called cxgb.
+
+config CHELSIO_T1_1G
+ bool "Chelsio gigabit Ethernet support"
+ depends on CHELSIO_T1
+ ---help---
+ Enables support for Chelsio's gigabit Ethernet PCI cards. If you
+ are using only 10G cards say 'N' here.
+
+config CHELSIO_T3
+ tristate "Chelsio Communications T3 10Gb Ethernet support"
+ depends on PCI && INET
+ select FW_LOADER
+ select MDIO
+ ---help---
+ This driver supports Chelsio T3-based gigabit and 10Gb Ethernet
+ adapters.
+
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
+
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.html>.
+
+ Please send feedback to <linux-bugs@chelsio.com>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called cxgb3.
+
+config CHELSIO_T4
+ tristate "Chelsio Communications T4 Ethernet support"
+ depends on PCI
+ select FW_LOADER
+ select MDIO
+ ---help---
+ This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
+ adapters.
+
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
+
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.html>.
+
+ Please send feedback to <linux-bugs@chelsio.com>.
+
+ To compile this driver as a module choose M here; the module
+ will be called cxgb4.
+
+config CHELSIO_T4VF
+ tristate "Chelsio Communications T4 Virtual Function Ethernet support"
+ depends on PCI
+ ---help---
+ This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
+ adapters with PCI-E SR-IOV Virtual Functions.
+
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
+
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.html>.
+
+ Please send feedback to <linux-bugs@chelsio.com>.
+
+ To compile this driver as a module choose M here; the module
+ will be called cxgb4vf.
+
+endif # NET_VENDOR_CHELSIO
diff --git a/drivers/net/ethernet/chelsio/Makefile b/drivers/net/ethernet/chelsio/Makefile
new file mode 100644
index 000000000000..390510b5e90f
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Chelsio network device drivers.
+#
+
+obj-$(CONFIG_CHELSIO_T1) += cxgb/
+obj-$(CONFIG_CHELSIO_T3) += cxgb3/
+obj-$(CONFIG_CHELSIO_T4) += cxgb4/
+obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf/
diff --git a/drivers/net/ethernet/chelsio/cxgb/Makefile b/drivers/net/ethernet/chelsio/cxgb/Makefile
new file mode 100644
index 000000000000..57a4b262fd3f
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/Makefile
@@ -0,0 +1,9 @@
+#
+# Chelsio T1 driver
+#
+
+obj-$(CONFIG_CHELSIO_T1) += cxgb.o
+
+cxgb-$(CONFIG_CHELSIO_T1_1G) += mv88e1xxx.o vsc7326.o
+cxgb-objs := cxgb2.o espi.o tp.o pm3393.o sge.o subr.o \
+ mv88x201x.o my3126.o $(cxgb-y)
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
new file mode 100644
index 000000000000..5ccbed1784d2
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -0,0 +1,353 @@
+/*****************************************************************************
+ * *
+ * File: common.h *
+ * $Revision: 1.21 $ *
+ * $Date: 2005/06/22 00:43:25 $ *
+ * Description: *
+ * part of the Chelsio 10Gb Ethernet Driver. *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License, version 2, as *
+ * published by the Free Software Foundation. *
+ * *
+ * You should have received a copy of the GNU General Public License along *
+ * with this program; if not, write to the Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
+ * *
+ * http://www.chelsio.com *
+ * *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
+ * All rights reserved. *
+ * *
+ * Maintainers: maintainers@chelsio.com *
+ * *
+ * Authors: Dimitrios Michailidis <dm@chelsio.com> *
+ * Tina Yang <tainay@chelsio.com> *
+ * Felix Marti <felix@chelsio.com> *
+ * Scott Bardone <sbardone@chelsio.com> *
+ * Kurt Ottaway <kottaway@chelsio.com> *
+ * Frank DiMambro <frank@chelsio.com> *
+ * *
+ * History: *
+ * *
+ ****************************************************************************/
+
+#define pr_fmt(fmt) "cxgb: " fmt
+
+#ifndef _CXGB_COMMON_H_
+#define _CXGB_COMMON_H_
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/mdio.h>
+#include <linux/crc32.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <linux/pci_ids.h>
+
+#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
+#define DRV_NAME "cxgb"
+#define DRV_VERSION "2.2"
+
+#define CH_DEVICE(devid, ssid, idx) \
+ { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
+
+#define SUPPORTED_PAUSE (1 << 13)
+#define SUPPORTED_LOOPBACK (1 << 15)
+
+#define ADVERTISED_PAUSE (1 << 13)
+#define ADVERTISED_ASYM_PAUSE (1 << 14)
+
+typedef struct adapter adapter_t;
+
+struct t1_rx_mode {
+ struct net_device *dev;
+};
+
+#define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC)
+#define t1_rx_mode_allmulti(rm) (rm->dev->flags & IFF_ALLMULTI)
+#define t1_rx_mode_mc_cnt(rm) (netdev_mc_count(rm->dev))
+#define t1_get_netdev(rm) (rm->dev)
+
+#define MAX_NPORTS 4
+#define PORT_MASK ((1 << MAX_NPORTS) - 1)
+#define NMTUS 8
+#define TCB_SIZE 128
+
+#define SPEED_INVALID 0xffff
+#define DUPLEX_INVALID 0xff
+
+enum {
+ CHBT_BOARD_N110,
+ CHBT_BOARD_N210,
+ CHBT_BOARD_7500,
+ CHBT_BOARD_8000,
+ CHBT_BOARD_CHT101,
+ CHBT_BOARD_CHT110,
+ CHBT_BOARD_CHT210,
+ CHBT_BOARD_CHT204,
+ CHBT_BOARD_CHT204V,
+ CHBT_BOARD_CHT204E,
+ CHBT_BOARD_CHN204,
+ CHBT_BOARD_COUGAR,
+ CHBT_BOARD_6800,
+ CHBT_BOARD_SIMUL,
+};
+
+enum {
+ CHBT_TERM_FPGA,
+ CHBT_TERM_T1,
+ CHBT_TERM_T2,
+ CHBT_TERM_T3
+};
+
+enum {
+ CHBT_MAC_CHELSIO_A,
+ CHBT_MAC_IXF1010,
+ CHBT_MAC_PM3393,
+ CHBT_MAC_VSC7321,
+ CHBT_MAC_DUMMY
+};
+
+enum {
+ CHBT_PHY_88E1041,
+ CHBT_PHY_88E1111,
+ CHBT_PHY_88X2010,
+ CHBT_PHY_XPAK,
+ CHBT_PHY_MY3126,
+ CHBT_PHY_8244,
+ CHBT_PHY_DUMMY
+};
+
+enum {
+ PAUSE_RX = 1 << 0,
+ PAUSE_TX = 1 << 1,
+ PAUSE_AUTONEG = 1 << 2
+};
+
+/* Revisions of T1 chip */
+enum {
+ TERM_T1A = 0,
+ TERM_T1B = 1,
+ TERM_T2 = 3
+};
+
+struct sge_params {
+ unsigned int cmdQ_size[2];
+ unsigned int freelQ_size[2];
+ unsigned int large_buf_capacity;
+ unsigned int rx_coalesce_usecs;
+ unsigned int last_rx_coalesce_raw;
+ unsigned int default_rx_coalesce_usecs;
+ unsigned int sample_interval_usecs;
+ unsigned int coalesce_enable;
+ unsigned int polling;
+};
+
+struct chelsio_pci_params {
+ unsigned short speed;
+ unsigned char width;
+ unsigned char is_pcix;
+};
+
+struct tp_params {
+ unsigned int pm_size;
+ unsigned int cm_size;
+ unsigned int pm_rx_base;
+ unsigned int pm_tx_base;
+ unsigned int pm_rx_pg_size;
+ unsigned int pm_tx_pg_size;
+ unsigned int pm_rx_num_pgs;
+ unsigned int pm_tx_num_pgs;
+ unsigned int rx_coalescing_size;
+ unsigned int use_5tuple_mode;
+};
+
+struct mc5_params {
+ unsigned int mode; /* selects MC5 width */
+ unsigned int nservers; /* size of server region */
+ unsigned int nroutes; /* size of routing region */
+};
+
+/* Default MC5 region sizes */
+#define DEFAULT_SERVER_REGION_LEN 256
+#define DEFAULT_RT_REGION_LEN 1024
+
+struct adapter_params {
+ struct sge_params sge;
+ struct mc5_params mc5;
+ struct tp_params tp;
+ struct chelsio_pci_params pci;
+
+ const struct board_info *brd_info;
+
+ unsigned short mtus[NMTUS];
+ unsigned int nports; /* # of ethernet ports */
+ unsigned int stats_update_period;
+ unsigned short chip_revision;
+ unsigned char chip_version;
+ unsigned char is_asic;
+ unsigned char has_msi;
+};
+
+struct link_config {
+ unsigned int supported; /* link capabilities */
+ unsigned int advertising; /* advertised capabilities */
+ unsigned short requested_speed; /* speed user has requested */
+ unsigned short speed; /* actual link speed */
+ unsigned char requested_duplex; /* duplex user has requested */
+ unsigned char duplex; /* actual link duplex */
+ unsigned char requested_fc; /* flow control user has requested */
+ unsigned char fc; /* actual link flow control */
+ unsigned char autoneg; /* autonegotiating? */
+};
+
+struct cmac;
+struct cphy;
+
+struct port_info {
+ struct net_device *dev;
+ struct cmac *mac;
+ struct cphy *phy;
+ struct link_config link_config;
+ struct net_device_stats netstats;
+};
+
+struct sge;
+struct peespi;
+
+struct adapter {
+ u8 __iomem *regs;
+ struct pci_dev *pdev;
+ unsigned long registered_device_map;
+ unsigned long open_device_map;
+ unsigned long flags;
+
+ const char *name;
+ int msg_enable;
+ u32 mmio_len;
+
+ struct work_struct ext_intr_handler_task;
+ struct adapter_params params;
+
+ /* Terminator modules. */
+ struct sge *sge;
+ struct peespi *espi;
+ struct petp *tp;
+
+ struct napi_struct napi;
+ struct port_info port[MAX_NPORTS];
+ struct delayed_work stats_update_task;
+ struct timer_list stats_update_timer;
+
+ spinlock_t tpi_lock;
+ spinlock_t work_lock;
+ spinlock_t mac_lock;
+
+ /* guards async operations */
+ spinlock_t async_lock ____cacheline_aligned;
+ u32 slow_intr_mask;
+ int t1powersave;
+};
+
+enum { /* adapter flags */
+ FULL_INIT_DONE = 1 << 0,
+};
+
+struct mdio_ops;
+struct gmac;
+struct gphy;
+
+struct board_info {
+ unsigned char board;
+ unsigned char port_number;
+ unsigned long caps;
+ unsigned char chip_term;
+ unsigned char chip_mac;
+ unsigned char chip_phy;
+ unsigned int clock_core;
+ unsigned int clock_mc3;
+ unsigned int clock_mc4;
+ unsigned int espi_nports;
+ unsigned int clock_elmer0;
+ unsigned char mdio_mdien;
+ unsigned char mdio_mdiinv;
+ unsigned char mdio_mdc;
+ unsigned char mdio_phybaseaddr;
+ const struct gmac *gmac;
+ const struct gphy *gphy;
+ const struct mdio_ops *mdio_ops;
+ const char *desc;
+};
+
+static inline int t1_is_asic(const adapter_t *adapter)
+{
+ return adapter->params.is_asic;
+}
+
+extern const struct pci_device_id t1_pci_tbl[];
+
+static inline int adapter_matches_type(const adapter_t *adapter,
+ int version, int revision)
+{
+ return adapter->params.chip_version == version &&
+ adapter->params.chip_revision == revision;
+}
+
+#define t1_is_T1B(adap) adapter_matches_type(adap, CHBT_TERM_T1, TERM_T1B)
+#define is_T2(adap) adapter_matches_type(adap, CHBT_TERM_T2, TERM_T2)
+
+/* Returns true if an adapter supports VLAN acceleration and TSO */
+static inline int vlan_tso_capable(const adapter_t *adapter)
+{
+ return !t1_is_T1B(adapter);
+}
+
+#define for_each_port(adapter, iter) \
+ for (iter = 0; iter < (adapter)->params.nports; ++iter)
+
+#define board_info(adapter) ((adapter)->params.brd_info)
+#define is_10G(adapter) (board_info(adapter)->caps & SUPPORTED_10000baseT_Full)
+
+static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
+{
+ return board_info(adap)->clock_core / 1000000;
+}
+
+extern int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp);
+extern int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
+extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
+extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
+
+extern void t1_interrupts_enable(adapter_t *adapter);
+extern void t1_interrupts_disable(adapter_t *adapter);
+extern void t1_interrupts_clear(adapter_t *adapter);
+extern int t1_elmer0_ext_intr_handler(adapter_t *adapter);
+extern void t1_elmer0_ext_intr(adapter_t *adapter);
+extern int t1_slow_intr_handler(adapter_t *adapter);
+
+extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
+extern const struct board_info *t1_get_board_info(unsigned int board_id);
+extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
+ unsigned short ssid);
+extern int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data);
+extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
+ struct adapter_params *p);
+extern int t1_init_hw_modules(adapter_t *adapter);
+extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
+extern void t1_free_sw_modules(adapter_t *adapter);
+extern void t1_fatal_err(adapter_t *adapter);
+extern void t1_link_changed(adapter_t *adapter, int port_id);
+extern void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
+ int speed, int duplex, int pause);
+#endif /* _CXGB_COMMON_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb/cphy.h b/drivers/net/ethernet/chelsio/cxgb/cphy.h
new file mode 100644
index 000000000000..1f095a9fc739
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/cphy.h
@@ -0,0 +1,175 @@
+/*****************************************************************************
+ * *
+ * File: cphy.h *
+ * $Revision: 1.7 $ *
+ * $Date: 2005/06/21 18:29:47 $ *
+ * Description: *
+ * part of the Chelsio 10Gb Ethernet Driver. *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License, version 2, as *
+ * published by the Free Software Foundation. *
+ * *
+ * You should have received a copy of the GNU General Public License along *
+ * with this program; if not, write to the Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
+ * *
+ * http://www.chelsio.com *
+ * *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
+ * All rights reserved. *
+ * *
+ * Maintainers: maintainers@chelsio.com *
+ * *
+ * Authors: Dimitrios Michailidis <dm@chelsio.com> *
+ * Tina Yang <tainay@chelsio.com> *
+ * Felix Marti <felix@chelsio.com> *
+ * Scott Bardone <sbardone@chelsio.com> *
+ * Kurt Ottaway <kottaway@chelsio.com> *
+ * Frank DiMambro <frank@chelsio.com> *
+ * *
+ * History: *
+ * *
+ ****************************************************************************/
+
+#ifndef _CXGB_CPHY_H_
+#define _CXGB_CPHY_H_
+
+#include "common.h"
+
+struct mdio_ops {
+ void (*init)(adapter_t *adapter, const struct board_info *bi);
+ int (*read)(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr);
+ int (*write)(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr, u16 val);
+ unsigned mode_support;
+};
+
+/* PHY interrupt types */
+enum {
+ cphy_cause_link_change = 0x1,
+ cphy_cause_error = 0x2,
+ cphy_cause_fifo_error = 0x3
+};
+
+enum {
+ PHY_LINK_UP = 0x1,
+ PHY_AUTONEG_RDY = 0x2,
+ PHY_AUTONEG_EN = 0x4
+};
+
+struct cphy;
+
+/* PHY operations */
+struct cphy_ops {
+ void (*destroy)(struct cphy *);
+ int (*reset)(struct cphy *, int wait);
+
+ int (*interrupt_enable)(struct cphy *);
+ int (*interrupt_disable)(struct cphy *);
+ int (*interrupt_clear)(struct cphy *);
+ int (*interrupt_handler)(struct cphy *);
+
+ int (*autoneg_enable)(struct cphy *);
+ int (*autoneg_disable)(struct cphy *);
+ int (*autoneg_restart)(struct cphy *);
+
+ int (*advertise)(struct cphy *phy, unsigned int advertise_map);
+ int (*set_loopback)(struct cphy *, int on);
+ int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
+ int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
+ int *duplex, int *fc);
+
+ u32 mmds;
+};
+
+/* A PHY instance */
+struct cphy {
+ int state; /* Link status state machine */
+ adapter_t *adapter; /* associated adapter */
+
+ struct delayed_work phy_update;
+
+ u16 bmsr;
+ int count;
+ int act_count;
+ int act_on;
+
+ u32 elmer_gpo;
+
+ const struct cphy_ops *ops; /* PHY operations */
+ struct mdio_if_info mdio;
+ struct cphy_instance *instance;
+};
+
+/* Convenience MDIO read/write wrappers */
+static inline int cphy_mdio_read(struct cphy *cphy, int mmd, int reg,
+ unsigned int *valp)
+{
+ int rc = cphy->mdio.mdio_read(cphy->mdio.dev, cphy->mdio.prtad, mmd,
+ reg);
+ *valp = (rc >= 0) ? rc : -1;
+ return (rc >= 0) ? 0 : rc;
+}
+
+static inline int cphy_mdio_write(struct cphy *cphy, int mmd, int reg,
+ unsigned int val)
+{
+ return cphy->mdio.mdio_write(cphy->mdio.dev, cphy->mdio.prtad, mmd,
+ reg, val);
+}
+
+static inline int simple_mdio_read(struct cphy *cphy, int reg,
+ unsigned int *valp)
+{
+ return cphy_mdio_read(cphy, MDIO_DEVAD_NONE, reg, valp);
+}
+
+static inline int simple_mdio_write(struct cphy *cphy, int reg,
+ unsigned int val)
+{
+ return cphy_mdio_write(cphy, MDIO_DEVAD_NONE, reg, val);
+}
+
+/* Convenience initializer */
+static inline void cphy_init(struct cphy *phy, struct net_device *dev,
+ int phy_addr, struct cphy_ops *phy_ops,
+ const struct mdio_ops *mdio_ops)
+{
+ struct adapter *adapter = netdev_priv(dev);
+ phy->adapter = adapter;
+ phy->ops = phy_ops;
+ if (mdio_ops) {
+ phy->mdio.prtad = phy_addr;
+ phy->mdio.mmds = phy_ops->mmds;
+ phy->mdio.mode_support = mdio_ops->mode_support;
+ phy->mdio.mdio_read = mdio_ops->read;
+ phy->mdio.mdio_write = mdio_ops->write;
+ }
+ phy->mdio.dev = dev;
+}
+
+/* Operations of the PHY-instance factory */
+struct gphy {
+ /* Construct a PHY instance with the given PHY address */
+ struct cphy *(*create)(struct net_device *dev, int phy_addr,
+ const struct mdio_ops *mdio_ops);
+
+ /*
+ * Reset the PHY chip. This resets the whole PHY chip, not individual
+ * ports.
+ */
+ int (*reset)(adapter_t *adapter);
+};
+
+extern const struct gphy t1_my3126_ops;
+extern const struct gphy t1_mv88e1xxx_ops;
+extern const struct gphy t1_vsc8244_ops;
+extern const struct gphy t1_mv88x201x_ops;
+
+#endif /* _CXGB_CPHY_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
new file mode 100644
index 000000000000..e36d45b78cc7
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
@@ -0,0 +1,639 @@
+/*****************************************************************************
+ * *
+ * File: cpl5_cmd.h *
+ * $Revision: 1.6 $ *
+ * $Date: 2005/06/21 18:29:47 $ *
+ * Description: *
+ * part of the Chelsio 10Gb Ethernet Driver. *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License, version 2, as *
+ * published by the Free Software Foundation. *
+ * *
+ * You should have received a copy of the GNU General Public License along *
+ * with this program; if not, write to the Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
+ * *
+ * http://www.chelsio.com *
+ * *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
+ * All rights reserved. *
+ * *
+ * Maintainers: maintainers@chelsio.com *
+ * *
+ * Authors: Dimitrios Michailidis <dm@chelsio.com> *
+ * Tina Yang <tainay@chelsio.com> *
+ * Felix Marti <felix@chelsio.com> *
+ * Scott Bardone <sbardone@chelsio.com> *
+ * Kurt Ottaway <kottaway@chelsio.com> *
+ * Frank DiMambro <frank@chelsio.com> *
+ * *
+ * History: *
+ * *
+ ****************************************************************************/
+
+#ifndef _CXGB_CPL5_CMD_H_
+#define _CXGB_CPL5_CMD_H_
+
+#include <asm/byteorder.h>
+
+#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
+#error "Adjust your <asm/byteorder.h> defines"
+#endif
+
+enum CPL_opcode {
+ CPL_PASS_OPEN_REQ = 0x1,
+ CPL_PASS_OPEN_RPL = 0x2,
+ CPL_PASS_ESTABLISH = 0x3,
+ CPL_PASS_ACCEPT_REQ = 0xE,
+ CPL_PASS_ACCEPT_RPL = 0x4,
+ CPL_ACT_OPEN_REQ = 0x5,
+ CPL_ACT_OPEN_RPL = 0x6,
+ CPL_CLOSE_CON_REQ = 0x7,
+ CPL_CLOSE_CON_RPL = 0x8,
+ CPL_CLOSE_LISTSRV_REQ = 0x9,
+ CPL_CLOSE_LISTSRV_RPL = 0xA,
+ CPL_ABORT_REQ = 0xB,
+ CPL_ABORT_RPL = 0xC,
+ CPL_PEER_CLOSE = 0xD,
+ CPL_ACT_ESTABLISH = 0x17,
+
+ CPL_GET_TCB = 0x24,
+ CPL_GET_TCB_RPL = 0x25,
+ CPL_SET_TCB = 0x26,
+ CPL_SET_TCB_FIELD = 0x27,
+ CPL_SET_TCB_RPL = 0x28,
+ CPL_PCMD = 0x29,
+
+ CPL_PCMD_READ = 0x31,
+ CPL_PCMD_READ_RPL = 0x32,
+
+
+ CPL_RX_DATA = 0xA0,
+ CPL_RX_DATA_DDP = 0xA1,
+ CPL_RX_DATA_ACK = 0xA3,
+ CPL_RX_PKT = 0xAD,
+ CPL_RX_ISCSI_HDR = 0xAF,
+ CPL_TX_DATA_ACK = 0xB0,
+ CPL_TX_DATA = 0xB1,
+ CPL_TX_PKT = 0xB2,
+ CPL_TX_PKT_LSO = 0xB6,
+
+ CPL_RTE_DELETE_REQ = 0xC0,
+ CPL_RTE_DELETE_RPL = 0xC1,
+ CPL_RTE_WRITE_REQ = 0xC2,
+ CPL_RTE_WRITE_RPL = 0xD3,
+ CPL_RTE_READ_REQ = 0xC3,
+ CPL_RTE_READ_RPL = 0xC4,
+ CPL_L2T_WRITE_REQ = 0xC5,
+ CPL_L2T_WRITE_RPL = 0xD4,
+ CPL_L2T_READ_REQ = 0xC6,
+ CPL_L2T_READ_RPL = 0xC7,
+ CPL_SMT_WRITE_REQ = 0xC8,
+ CPL_SMT_WRITE_RPL = 0xD5,
+ CPL_SMT_READ_REQ = 0xC9,
+ CPL_SMT_READ_RPL = 0xCA,
+ CPL_ARP_MISS_REQ = 0xCD,
+ CPL_ARP_MISS_RPL = 0xCE,
+ CPL_MIGRATE_C2T_REQ = 0xDC,
+ CPL_MIGRATE_C2T_RPL = 0xDD,
+ CPL_ERROR = 0xD7,
+
+ /* internal: driver -> TOM */
+ CPL_MSS_CHANGE = 0xE1
+};
+
+#define NUM_CPL_CMDS 256
+
+enum CPL_error {
+ CPL_ERR_NONE = 0,
+ CPL_ERR_TCAM_PARITY = 1,
+ CPL_ERR_TCAM_FULL = 3,
+ CPL_ERR_CONN_RESET = 20,
+ CPL_ERR_CONN_EXIST = 22,
+ CPL_ERR_ARP_MISS = 23,
+ CPL_ERR_BAD_SYN = 24,
+ CPL_ERR_CONN_TIMEDOUT = 30,
+ CPL_ERR_XMIT_TIMEDOUT = 31,
+ CPL_ERR_PERSIST_TIMEDOUT = 32,
+ CPL_ERR_FINWAIT2_TIMEDOUT = 33,
+ CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
+ CPL_ERR_ABORT_FAILED = 42,
+ CPL_ERR_GENERAL = 99
+};
+
+enum {
+ CPL_CONN_POLICY_AUTO = 0,
+ CPL_CONN_POLICY_ASK = 1,
+ CPL_CONN_POLICY_DENY = 3
+};
+
+enum {
+ ULP_MODE_NONE = 0,
+ ULP_MODE_TCPDDP = 1,
+ ULP_MODE_ISCSI = 2,
+ ULP_MODE_IWARP = 3,
+ ULP_MODE_SSL = 4
+};
+
+enum {
+ CPL_PASS_OPEN_ACCEPT,
+ CPL_PASS_OPEN_REJECT
+};
+
+enum {
+ CPL_ABORT_SEND_RST = 0,
+ CPL_ABORT_NO_RST,
+ CPL_ABORT_POST_CLOSE_REQ = 2
+};
+
+enum { // TX_PKT_LSO ethernet types
+ CPL_ETH_II,
+ CPL_ETH_II_VLAN,
+ CPL_ETH_802_3,
+ CPL_ETH_802_3_VLAN
+};
+
+union opcode_tid {
+ u32 opcode_tid;
+ u8 opcode;
+};
+
+#define S_OPCODE 24
+#define V_OPCODE(x) ((x) << S_OPCODE)
+#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
+#define G_TID(x) ((x) & 0xFFFFFF)
+
+/* tid is assumed to be 24-bits */
+#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
+
+#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
+
+/* extract the TID from a CPL command */
+#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
+
+struct tcp_options {
+ u16 mss;
+ u8 wsf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 rsvd:4;
+ u8 ecn:1;
+ u8 sack:1;
+ u8 tstamp:1;
+#else
+ u8 tstamp:1;
+ u8 sack:1;
+ u8 ecn:1;
+ u8 rsvd:4;
+#endif
+};
+
+struct cpl_pass_open_req {
+ union opcode_tid ot;
+ u16 local_port;
+ u16 peer_port;
+ u32 local_ip;
+ u32 peer_ip;
+ u32 opt0h;
+ u32 opt0l;
+ u32 peer_netmask;
+ u32 opt1;
+};
+
+struct cpl_pass_open_rpl {
+ union opcode_tid ot;
+ u16 local_port;
+ u16 peer_port;
+ u32 local_ip;
+ u32 peer_ip;
+ u8 resvd[7];
+ u8 status;
+};
+
+struct cpl_pass_establish {
+ union opcode_tid ot;
+ u16 local_port;
+ u16 peer_port;
+ u32 local_ip;
+ u32 peer_ip;
+ u32 tos_tid;
+ u8 l2t_idx;
+ u8 rsvd[3];
+ u32 snd_isn;
+ u32 rcv_isn;
+};
+
+struct cpl_pass_accept_req {
+ union opcode_tid ot;
+ u16 local_port;
+ u16 peer_port;
+ u32 local_ip;
+ u32 peer_ip;
+ u32 tos_tid;
+ struct tcp_options tcp_options;
+ u8 dst_mac[6];
+ u16 vlan_tag;
+ u8 src_mac[6];
+ u8 rsvd[2];
+ u32 rcv_isn;
+ u32 unknown_tcp_options;
+};
+
+struct cpl_pass_accept_rpl {
+ union opcode_tid ot;
+ u32 rsvd0;
+ u32 rsvd1;
+ u32 peer_ip;
+ u32 opt0h;
+ union {
+ u32 opt0l;
+ struct {
+ u8 rsvd[3];
+ u8 status;
+ };
+ };
+};
+
+struct cpl_act_open_req {
+ union opcode_tid ot;
+ u16 local_port;
+ u16 peer_port;
+ u32 local_ip;
+ u32 peer_ip;
+ u32 opt0h;
+ u32 opt0l;
+ u32 iff_vlantag;
+ u32 rsvd;
+};
+
+struct cpl_act_open_rpl {
+ union opcode_tid ot;
+ u16 local_port;
+ u16 peer_port;
+ u32 local_ip;
+ u32 peer_ip;
+ u32 new_tid;
+ u8 rsvd[3];
+ u8 status;
+};
+
+struct cpl_act_establish {
+ union opcode_tid ot;
+ u16 local_port;
+ u16 peer_port;
+ u32 local_ip;
+ u32 peer_ip;
+ u32 tos_tid;
+ u32 rsvd;
+ u32 snd_isn;
+ u32 rcv_isn;
+};
+
+struct cpl_get_tcb {
+ union opcode_tid ot;
+ u32 rsvd;
+};
+
+struct cpl_get_tcb_rpl {
+ union opcode_tid ot;
+ u16 len;
+ u8 rsvd;
+ u8 status;
+};
+
+struct cpl_set_tcb {
+ union opcode_tid ot;
+ u16 len;
+ u16 rsvd;
+};
+
+struct cpl_set_tcb_field {
+ union opcode_tid ot;
+ u8 rsvd[3];
+ u8 offset;
+ u32 mask;
+ u32 val;
+};
+
+struct cpl_set_tcb_rpl {
+ union opcode_tid ot;
+ u8 rsvd[3];
+ u8 status;
+};
+
+struct cpl_pcmd {
+ union opcode_tid ot;
+ u16 dlen_in;
+ u16 dlen_out;
+ u32 pcmd_parm[2];
+};
+
+struct cpl_pcmd_read {
+ union opcode_tid ot;
+ u32 rsvd1;
+ u16 rsvd2;
+ u32 addr;
+ u16 len;
+};
+
+struct cpl_pcmd_read_rpl {
+ union opcode_tid ot;
+ u16 len;
+};
+
+struct cpl_close_con_req {
+ union opcode_tid ot;
+ u32 rsvd;
+};
+
+struct cpl_close_con_rpl {
+ union opcode_tid ot;
+ u8 rsvd[3];
+ u8 status;
+ u32 snd_nxt;
+ u32 rcv_nxt;
+};
+
+struct cpl_close_listserv_req {
+ union opcode_tid ot;
+ u32 rsvd;
+};
+
+struct cpl_close_listserv_rpl {
+ union opcode_tid ot;
+ u8 rsvd[3];
+ u8 status;
+};
+
+struct cpl_abort_req {
+ union opcode_tid ot;
+ u32 rsvd0;
+ u8 rsvd1;
+ u8 cmd;
+ u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl {
+ union opcode_tid ot;
+ u32 rsvd0;
+ u8 rsvd1;
+ u8 status;
+ u8 rsvd2[6];
+};
+
+struct cpl_peer_close {
+ union opcode_tid ot;
+ u32 rsvd;
+};
+
+struct cpl_tx_data {
+ union opcode_tid ot;
+ u32 len;
+ u32 rsvd0;
+ u16 urg;
+ u16 flags;
+};
+
+struct cpl_tx_data_ack {
+ union opcode_tid ot;
+ u32 ack_seq;
+};
+
+struct cpl_rx_data {
+ union opcode_tid ot;
+ u32 len;
+ u32 seq;
+ u16 urg;
+ u8 rsvd;
+ u8 status;
+};
+
+struct cpl_rx_data_ack {
+ union opcode_tid ot;
+ u32 credit;
+};
+
+struct cpl_rx_data_ddp {
+ union opcode_tid ot;
+ u32 len;
+ u32 seq;
+ u32 nxt_seq;
+ u32 ulp_crc;
+ u16 ddp_status;
+ u8 rsvd;
+ u8 status;
+};
+
+/*
+ * We want this header's alignment to be no more stringent than 2-byte aligned.
+ * All fields are u8 or u16 except for the length. However that field is not
+ * used so we break it into 2 16-bit parts to easily meet our alignment needs.
+ */
+struct cpl_tx_pkt {
+ u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 iff:4;
+ u8 ip_csum_dis:1;
+ u8 l4_csum_dis:1;
+ u8 vlan_valid:1;
+ u8 rsvd:1;
+#else
+ u8 rsvd:1;
+ u8 vlan_valid:1;
+ u8 l4_csum_dis:1;
+ u8 ip_csum_dis:1;
+ u8 iff:4;
+#endif
+ u16 vlan;
+ u16 len_hi;
+ u16 len_lo;
+};
+
+struct cpl_tx_pkt_lso {
+ u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 iff:4;
+ u8 ip_csum_dis:1;
+ u8 l4_csum_dis:1;
+ u8 vlan_valid:1;
+ u8 :1;
+#else
+ u8 :1;
+ u8 vlan_valid:1;
+ u8 l4_csum_dis:1;
+ u8 ip_csum_dis:1;
+ u8 iff:4;
+#endif
+ u16 vlan;
+ __be32 len;
+
+ u8 rsvd[5];
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 tcp_hdr_words:4;
+ u8 ip_hdr_words:4;
+#else
+ u8 ip_hdr_words:4;
+ u8 tcp_hdr_words:4;
+#endif
+ __be16 eth_type_mss;
+};
+
+struct cpl_rx_pkt {
+ u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 iff:4;
+ u8 csum_valid:1;
+ u8 bad_pkt:1;
+ u8 vlan_valid:1;
+ u8 rsvd:1;
+#else
+ u8 rsvd:1;
+ u8 vlan_valid:1;
+ u8 bad_pkt:1;
+ u8 csum_valid:1;
+ u8 iff:4;
+#endif
+ u16 csum;
+ u16 vlan;
+ u16 len;
+};
+
+struct cpl_l2t_write_req {
+ union opcode_tid ot;
+ u32 params;
+ u8 rsvd1[2];
+ u8 dst_mac[6];
+};
+
+struct cpl_l2t_write_rpl {
+ union opcode_tid ot;
+ u8 status;
+ u8 rsvd[3];
+};
+
+struct cpl_l2t_read_req {
+ union opcode_tid ot;
+ u8 rsvd[3];
+ u8 l2t_idx;
+};
+
+struct cpl_l2t_read_rpl {
+ union opcode_tid ot;
+ u32 params;
+ u8 rsvd1[2];
+ u8 dst_mac[6];
+};
+
+struct cpl_smt_write_req {
+ union opcode_tid ot;
+ u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 rsvd1:1;
+ u8 mtu_idx:3;
+ u8 iff:4;
+#else
+ u8 iff:4;
+ u8 mtu_idx:3;
+ u8 rsvd1:1;
+#endif
+ u16 rsvd2;
+ u16 rsvd3;
+ u8 src_mac1[6];
+ u16 rsvd4;
+ u8 src_mac0[6];
+};
+
+struct cpl_smt_write_rpl {
+ union opcode_tid ot;
+ u8 status;
+ u8 rsvd[3];
+};
+
+struct cpl_smt_read_req {
+ union opcode_tid ot;
+ u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 rsvd1:4;
+ u8 iff:4;
+#else
+ u8 iff:4;
+ u8 rsvd1:4;
+#endif
+ u16 rsvd2;
+};
+
+struct cpl_smt_read_rpl {
+ union opcode_tid ot;
+ u8 status;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 rsvd1:1;
+ u8 mtu_idx:3;
+ u8 rsvd0:4;
+#else
+ u8 rsvd0:4;
+ u8 mtu_idx:3;
+ u8 rsvd1:1;
+#endif
+ u16 rsvd2;
+ u16 rsvd3;
+ u8 src_mac1[6];
+ u16 rsvd4;
+ u8 src_mac0[6];
+};
+
+struct cpl_rte_delete_req {
+ union opcode_tid ot;
+ u32 params;
+};
+
+struct cpl_rte_delete_rpl {
+ union opcode_tid ot;
+ u8 status;
+ u8 rsvd[3];
+};
+
+struct cpl_rte_write_req {
+ union opcode_tid ot;
+ u32 params;
+ u32 netmask;
+ u32 faddr;
+};
+
+struct cpl_rte_write_rpl {
+ union opcode_tid ot;
+ u8 status;
+ u8 rsvd[3];
+};
+
+struct cpl_rte_read_req {
+ union opcode_tid ot;
+ u32 params;
+};
+
+struct cpl_rte_read_rpl {
+ union opcode_tid ot;
+ u8 status;
+ u8 rsvd0[2];
+ u8 l2t_idx;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 rsvd1:7;
+ u8 select:1;
+#else
+ u8 select:1;
+ u8 rsvd1:7;
+#endif
+ u8 rsvd2[3];
+ u32 addr;
+};
+
+struct cpl_mss_change {
+ union opcode_tid ot;
+ u32 mss;
+};
+
+#endif /* _CXGB_CPL5_CMD_H_ */
+
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
new file mode 100644
index 000000000000..9993f4f15433
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -0,0 +1,1379 @@
+/*****************************************************************************
+ * *
+ * File: cxgb2.c *
+ * $Revision: 1.25 $ *
+ * $Date: 2005/06/22 00:43:25 $ *
+ * Description: *
+ * Chelsio 10Gb Ethernet Driver. *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License, version 2, as *
+ * published by the Free Software Foundation. *
+ * *
+ * You should have received a copy of the GNU General Public License along *
+ * with this program; if not, write to the Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
+ * *
+ * http://www.chelsio.com *
+ * *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
+ * All rights reserved. *
+ * *
+ * Maintainers: maintainers@chelsio.com *
+ * *
+ * Authors: Dimitrios Michailidis <dm@chelsio.com> *
+ * Tina Yang <tainay@chelsio.com> *
+ * Felix Marti <felix@chelsio.com> *
+ * Scott Bardone <sbardone@chelsio.com> *
+ * Kurt Ottaway <kottaway@chelsio.com> *
+ * Frank DiMambro <frank@chelsio.com> *
+ * *
+ * History: *
+ * *
+ ****************************************************************************/
+
+#include "common.h"
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/mii.h>
+#include <linux/sockios.h>
+#include <linux/dma-mapping.h>
+#include <asm/uaccess.h>
+
+#include "cpl5_cmd.h"
+#include "regs.h"
+#include "gmac.h"
+#include "cphy.h"
+#include "sge.h"
+#include "tp.h"
+#include "espi.h"
+#include "elmer0.h"
+
+#include <linux/workqueue.h>
+
+static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
+{
+ schedule_delayed_work(&ap->stats_update_task, secs * HZ);
+}
+
+static inline void cancel_mac_stats_update(struct adapter *ap)
+{
+ cancel_delayed_work(&ap->stats_update_task);
+}
+
+#define MAX_CMDQ_ENTRIES 16384
+#define MAX_CMDQ1_ENTRIES 1024
+#define MAX_RX_BUFFERS 16384
+#define MAX_RX_JUMBO_BUFFERS 16384
+#define MAX_TX_BUFFERS_HIGH 16384U
+#define MAX_TX_BUFFERS_LOW 1536U
+#define MAX_TX_BUFFERS 1460U
+#define MIN_FL_ENTRIES 32
+
+#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
+ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+/*
+ * The EEPROM is actually bigger but only the first few bytes are used so we
+ * only report those.
+ */
+#define EEPROM_SIZE 32
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_LICENSE("GPL");
+
+static int dflt_msg_enable = DFLT_MSG_ENABLE;
+
+module_param(dflt_msg_enable, int, 0);
+MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
+
+#define HCLOCK 0x0
+#define LCLOCK 0x1
+
+/* T1 cards powersave mode */
+static int t1_clock(struct adapter *adapter, int mode);
+static int t1powersave = 1; /* HW default is powersave mode. */
+
+module_param(t1powersave, int, 0);
+MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
+
+static int disable_msi = 0;
+module_param(disable_msi, int, 0);
+MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
+
+static const char pci_speed[][4] = {
+ "33", "66", "100", "133"
+};
+
+/*
+ * Setup MAC to receive the types of packets we want.
+ */
+static void t1_set_rxmode(struct net_device *dev)
+{
+ struct adapter *adapter = dev->ml_priv;
+ struct cmac *mac = adapter->port[dev->if_port].mac;
+ struct t1_rx_mode rm;
+
+ rm.dev = dev;
+ mac->ops->set_rx_mode(mac, &rm);
+}
+
+static void link_report(struct port_info *p)
+{
+ if (!netif_carrier_ok(p->dev))
+ printk(KERN_INFO "%s: link down\n", p->dev->name);
+ else {
+ const char *s = "10Mbps";
+
+ switch (p->link_config.speed) {
+ case SPEED_10000: s = "10Gbps"; break;
+ case SPEED_1000: s = "1000Mbps"; break;
+ case SPEED_100: s = "100Mbps"; break;
+ }
+
+ printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
+ p->dev->name, s,
+ p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
+ }
+}
+
+void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
+ int speed, int duplex, int pause)
+{
+ struct port_info *p = &adapter->port[port_id];
+
+ if (link_stat != netif_carrier_ok(p->dev)) {
+ if (link_stat)
+ netif_carrier_on(p->dev);
+ else
+ netif_carrier_off(p->dev);
+ link_report(p);
+
+ /* multi-ports: inform toe */
+ if ((speed > 0) && (adapter->params.nports > 1)) {
+ unsigned int sched_speed = 10;
+ switch (speed) {
+ case SPEED_1000:
+ sched_speed = 1000;
+ break;
+ case SPEED_100:
+ sched_speed = 100;
+ break;
+ case SPEED_10:
+ sched_speed = 10;
+ break;
+ }
+ t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
+ }
+ }
+}
+
+static void link_start(struct port_info *p)
+{
+ struct cmac *mac = p->mac;
+
+ mac->ops->reset(mac);
+ if (mac->ops->macaddress_set)
+ mac->ops->macaddress_set(mac, p->dev->dev_addr);
+ t1_set_rxmode(p->dev);
+ t1_link_start(p->phy, mac, &p->link_config);
+ mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+}
+
+static void enable_hw_csum(struct adapter *adapter)
+{
+ if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
+ t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */
+ t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
+}
+
+/*
+ * Things to do upon first use of a card.
+ * This must run with the rtnl lock held.
+ */
+static int cxgb_up(struct adapter *adapter)
+{
+ int err = 0;
+
+ if (!(adapter->flags & FULL_INIT_DONE)) {
+ err = t1_init_hw_modules(adapter);
+ if (err)
+ goto out_err;
+
+ enable_hw_csum(adapter);
+ adapter->flags |= FULL_INIT_DONE;
+ }
+
+ t1_interrupts_clear(adapter);
+
+ adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
+ err = request_irq(adapter->pdev->irq, t1_interrupt,
+ adapter->params.has_msi ? 0 : IRQF_SHARED,
+ adapter->name, adapter);
+ if (err) {
+ if (adapter->params.has_msi)
+ pci_disable_msi(adapter->pdev);
+
+ goto out_err;
+ }
+
+ t1_sge_start(adapter->sge);
+ t1_interrupts_enable(adapter);
+out_err:
+ return err;
+}
+
+/*
+ * Release resources when all the ports have been stopped.
+ */
+static void cxgb_down(struct adapter *adapter)
+{
+ t1_sge_stop(adapter->sge);
+ t1_interrupts_disable(adapter);
+ free_irq(adapter->pdev->irq, adapter);
+ if (adapter->params.has_msi)
+ pci_disable_msi(adapter->pdev);
+}
+
+static int cxgb_open(struct net_device *dev)
+{
+ int err;
+ struct adapter *adapter = dev->ml_priv;
+ int other_ports = adapter->open_device_map & PORT_MASK;
+
+ napi_enable(&adapter->napi);
+ if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
+ napi_disable(&adapter->napi);
+ return err;
+ }
+
+ __set_bit(dev->if_port, &adapter->open_device_map);
+ link_start(&adapter->port[dev->if_port]);
+ netif_start_queue(dev);
+ if (!other_ports && adapter->params.stats_update_period)
+ schedule_mac_stats_update(adapter,
+ adapter->params.stats_update_period);
+
+ t1_vlan_mode(adapter, dev->features);
+ return 0;
+}
+
+static int cxgb_close(struct net_device *dev)
+{
+ struct adapter *adapter = dev->ml_priv;
+ struct port_info *p = &adapter->port[dev->if_port];
+ struct cmac *mac = p->mac;
+
+ netif_stop_queue(dev);
+ napi_disable(&adapter->napi);
+ mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
+ netif_carrier_off(dev);
+
+ clear_bit(dev->if_port, &adapter->open_device_map);
+ if (adapter->params.stats_update_period &&
+ !(adapter->open_device_map & PORT_MASK)) {
+ /* Stop statistics accumulation. */
+ smp_mb__after_clear_bit();
+ spin_lock(&adapter->work_lock); /* sync with update task */
+ spin_unlock(&adapter->work_lock);
+ cancel_mac_stats_update(adapter);
+ }
+
+ if (!adapter->open_device_map)
+ cxgb_down(adapter);
+ return 0;
+}
+
+static struct net_device_stats *t1_get_stats(struct net_device *dev)
+{
+ struct adapter *adapter = dev->ml_priv;
+ struct port_info *p = &adapter->port[dev->if_port];
+ struct net_device_stats *ns = &p->netstats;
+ const struct cmac_statistics *pstats;
+
+ /* Do a full update of the MAC stats */
+ pstats = p->mac->ops->statistics_update(p->mac,
+ MAC_STATS_UPDATE_FULL);
+
+ ns->tx_packets = pstats->TxUnicastFramesOK +
+ pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
+
+ ns->rx_packets = pstats->RxUnicastFramesOK +
+ pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
+
+ ns->tx_bytes = pstats->TxOctetsOK;
+ ns->rx_bytes = pstats->RxOctetsOK;
+
+ ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
+ pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
+ ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
+ pstats->RxFCSErrors + pstats->RxAlignErrors +
+ pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
+ pstats->RxSymbolErrors + pstats->RxRuntErrors;
+
+ ns->multicast = pstats->RxMulticastFramesOK;
+ ns->collisions = pstats->TxTotalCollisions;
+
+ /* detailed rx_errors */
+ ns->rx_length_errors = pstats->RxFrameTooLongErrors +
+ pstats->RxJabberErrors;
+ ns->rx_over_errors = 0;
+ ns->rx_crc_errors = pstats->RxFCSErrors;
+ ns->rx_frame_errors = pstats->RxAlignErrors;
+ ns->rx_fifo_errors = 0;
+ ns->rx_missed_errors = 0;
+
+ /* detailed tx_errors */
+ ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
+ ns->tx_carrier_errors = 0;
+ ns->tx_fifo_errors = pstats->TxUnderrun;
+ ns->tx_heartbeat_errors = 0;
+ ns->tx_window_errors = pstats->TxLateCollisions;
+ return ns;
+}
+
+static u32 get_msglevel(struct net_device *dev)
+{
+ struct adapter *adapter = dev->ml_priv;
+
+ return adapter->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+ struct adapter *adapter = dev->ml_priv;
+
+ adapter->msg_enable = val;
+}
+
+static char stats_strings[][ETH_GSTRING_LEN] = {
+ "TxOctetsOK",
+ "TxOctetsBad",
+ "TxUnicastFramesOK",
+ "TxMulticastFramesOK",
+ "TxBroadcastFramesOK",
+ "TxPauseFrames",
+ "TxFramesWithDeferredXmissions",
+ "TxLateCollisions",
+ "TxTotalCollisions",
+ "TxFramesAbortedDueToXSCollisions",
+ "TxUnderrun",
+ "TxLengthErrors",
+ "TxInternalMACXmitError",
+ "TxFramesWithExcessiveDeferral",
+ "TxFCSErrors",
+ "TxJumboFramesOk",
+ "TxJumboOctetsOk",
+
+ "RxOctetsOK",
+ "RxOctetsBad",
+ "RxUnicastFramesOK",
+ "RxMulticastFramesOK",
+ "RxBroadcastFramesOK",
+ "RxPauseFrames",
+ "RxFCSErrors",
+ "RxAlignErrors",
+ "RxSymbolErrors",
+ "RxDataErrors",
+ "RxSequenceErrors",
+ "RxRuntErrors",
+ "RxJabberErrors",
+ "RxInternalMACRcvError",
+ "RxInRangeLengthErrors",
+ "RxOutOfRangeLengthField",
+ "RxFrameTooLongErrors",
+ "RxJumboFramesOk",
+ "RxJumboOctetsOk",
+
+ /* Port stats */
+ "RxCsumGood",
+ "TxCsumOffload",
+ "TxTso",
+ "RxVlan",
+ "TxVlan",
+ "TxNeedHeadroom",
+
+ /* Interrupt stats */
+ "rx drops",
+ "pure_rsps",
+ "unhandled irqs",
+ "respQ_empty",
+ "respQ_overflow",
+ "freelistQ_empty",
+ "pkt_too_big",
+ "pkt_mismatch",
+ "cmdQ_full0",
+ "cmdQ_full1",
+
+ "espi_DIP2ParityErr",
+ "espi_DIP4Err",
+ "espi_RxDrops",
+ "espi_TxDrops",
+ "espi_RxOvfl",
+ "espi_ParityErr"
+};
+
+#define T2_REGMAP_SIZE (3 * 1024)
+
+static int get_regs_len(struct net_device *dev)
+{
+ return T2_REGMAP_SIZE;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct adapter *adapter = dev->ml_priv;
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->fw_version, "N/A");
+ strcpy(info->bus_info, pci_name(adapter->pdev));
+}
+
+static int get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(stats_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ if (stringset == ETH_SS_STATS)
+ memcpy(data, stats_strings, sizeof(stats_strings));
+}
+
+static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct adapter *adapter = dev->ml_priv;
+ struct cmac *mac = adapter->port[dev->if_port].mac;
+ const struct cmac_statistics *s;
+ const struct sge_intr_counts *t;
+ struct sge_port_stats ss;
+
+ s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
+ t = t1_sge_get_intr_counts(adapter->sge);
+ t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
+
+ *data++ = s->TxOctetsOK;
+ *data++ = s->TxOctetsBad;
+ *data++ = s->TxUnicastFramesOK;
+ *data++ = s->TxMulticastFramesOK;
+ *data++ = s->TxBroadcastFramesOK;
+ *data++ = s->TxPauseFrames;
+ *data++ = s->TxFramesWithDeferredXmissions;
+ *data++ = s->TxLateCollisions;
+ *data++ = s->TxTotalCollisions;
+ *data++ = s->TxFramesAbortedDueToXSCollisions;
+ *data++ = s->TxUnderrun;
+ *data++ = s->TxLengthErrors;
+ *data++ = s->TxInternalMACXmitError;
+ *data++ = s->TxFramesWithExcessiveDeferral;
+ *data++ = s->TxFCSErrors;
+ *data++ = s->TxJumboFramesOK;
+ *data++ = s->TxJumboOctetsOK;
+
+ *data++ = s->RxOctetsOK;
+ *data++ = s->RxOctetsBad;
+ *data++ = s->RxUnicastFramesOK;
+ *data++ = s->RxMulticastFramesOK;
+ *data++ = s->RxBroadcastFramesOK;
+ *data++ = s->RxPauseFrames;
+ *data++ = s->RxFCSErrors;
+ *data++ = s->RxAlignErrors;
+ *data++ = s->RxSymbolErrors;
+ *data++ = s->RxDataErrors;
+ *data++ = s->RxSequenceErrors;
+ *data++ = s->RxRuntErrors;
+ *data++ = s->RxJabberErrors;
+ *data++ = s->RxInternalMACRcvError;
+ *data++ = s->RxInRangeLengthErrors;
+ *data++ = s->RxOutOfRangeLengthField;
+ *data++ = s->RxFrameTooLongErrors;
+ *data++ = s->RxJumboFramesOK;
+ *data++ = s->RxJumboOctetsOK;
+
+ *data++ = ss.rx_cso_good;
+ *data++ = ss.tx_cso;
+ *data++ = ss.tx_tso;
+ *data++ = ss.vlan_xtract;
+ *data++ = ss.vlan_insert;
+ *data++ = ss.tx_need_hdrroom;
+
+ *data++ = t->rx_drops;
+ *data++ = t->pure_rsps;
+ *data++ = t->unhandled_irqs;
+ *data++ = t->respQ_empty;
+ *data++ = t->respQ_overflow;
+ *data++ = t->freelistQ_empty;
+ *data++ = t->pkt_too_big;
+ *data++ = t->pkt_mismatch;
+ *data++ = t->cmdQ_full[0];
+ *data++ = t->cmdQ_full[1];
+
+ if (adapter->espi) {
+ const struct espi_intr_counts *e;
+
+ e = t1_espi_get_intr_counts(adapter->espi);
+ *data++ = e->DIP2_parity_err;
+ *data++ = e->DIP4_err;
+ *data++ = e->rx_drops;
+ *data++ = e->tx_drops;
+ *data++ = e->rx_ovflw;
+ *data++ = e->parity_err;
+ }
+}
+
+static inline void reg_block_dump(struct adapter *ap, void *buf,
+ unsigned int start, unsigned int end)
+{
+ u32 *p = buf + start;
+
+ for ( ; start <= end; start += sizeof(u32))
+ *p++ = readl(ap->regs + start);
+}
+
+static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *buf)
+{
+ struct adapter *ap = dev->ml_priv;
+
+ /*
+ * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
+ */
+ regs->version = 2;
+
+ memset(buf, 0, T2_REGMAP_SIZE);
+ reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
+ reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
+ reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
+ reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
+ reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
+ reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
+ reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
+ reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
+ reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
+ reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
+}
+
+static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct adapter *adapter = dev->ml_priv;
+ struct port_info *p = &adapter->port[dev->if_port];
+
+ cmd->supported = p->link_config.supported;
+ cmd->advertising = p->link_config.advertising;
+
+ if (netif_carrier_ok(dev)) {
+ ethtool_cmd_speed_set(cmd, p->link_config.speed);
+ cmd->duplex = p->link_config.duplex;
+ } else {
+ ethtool_cmd_speed_set(cmd, -1);
+ cmd->duplex = -1;
+ }
+
+ cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+ cmd->phy_address = p->phy->mdio.prtad;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->autoneg = p->link_config.autoneg;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+ return 0;
+}
+
+static int speed_duplex_to_caps(int speed, int duplex)
+{
+ int cap = 0;
+
+ switch (speed) {
+ case SPEED_10:
+ if (duplex == DUPLEX_FULL)
+ cap = SUPPORTED_10baseT_Full;
+ else
+ cap = SUPPORTED_10baseT_Half;
+ break;
+ case SPEED_100:
+ if (duplex == DUPLEX_FULL)
+ cap = SUPPORTED_100baseT_Full;
+ else
+ cap = SUPPORTED_100baseT_Half;
+ break;
+ case SPEED_1000:
+ if (duplex == DUPLEX_FULL)
+ cap = SUPPORTED_1000baseT_Full;
+ else
+ cap = SUPPORTED_1000baseT_Half;
+ break;
+ case SPEED_10000:
+ if (duplex == DUPLEX_FULL)
+ cap = SUPPORTED_10000baseT_Full;
+ }
+ return cap;
+}
+
+#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
+ ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
+ ADVERTISED_10000baseT_Full)
+
+static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct adapter *adapter = dev->ml_priv;
+ struct port_info *p = &adapter->port[dev->if_port];
+ struct link_config *lc = &p->link_config;
+
+ if (!(lc->supported & SUPPORTED_Autoneg))
+ return -EOPNOTSUPP; /* can't change speed/duplex */
+
+ if (cmd->autoneg == AUTONEG_DISABLE) {
+ u32 speed = ethtool_cmd_speed(cmd);
+ int cap = speed_duplex_to_caps(speed, cmd->duplex);
+
+ if (!(lc->supported & cap) || (speed == SPEED_1000))
+ return -EINVAL;
+ lc->requested_speed = speed;
+ lc->requested_duplex = cmd->duplex;
+ lc->advertising = 0;
+ } else {
+ cmd->advertising &= ADVERTISED_MASK;
+ if (cmd->advertising & (cmd->advertising - 1))
+ cmd->advertising = lc->supported;
+ cmd->advertising &= lc->supported;
+ if (!cmd->advertising)
+ return -EINVAL;
+ lc->requested_speed = SPEED_INVALID;
+ lc->requested_duplex = DUPLEX_INVALID;
+ lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
+ }
+ lc->autoneg = cmd->autoneg;
+ if (netif_running(dev))
+ t1_link_start(p->phy, p->mac, lc);
+ return 0;
+}
+
+static void get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct adapter *adapter = dev->ml_priv;
+ struct port_info *p = &adapter->port[dev->if_port];
+
+ epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
+ epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
+ epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
+}
+
+static int set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct adapter *adapter = dev->ml_priv;
+ struct port_info *p = &adapter->port[dev->if_port];
+ struct link_config *lc = &p->link_config;
+
+ if (epause->autoneg == AUTONEG_DISABLE)
+ lc->requested_fc = 0;
+ else if (lc->supported & SUPPORTED_Autoneg)
+ lc->requested_fc = PAUSE_AUTONEG;
+ else
+ return -EINVAL;
+
+ if (epause->rx_pause)
+ lc->requested_fc |= PAUSE_RX;
+ if (epause->tx_pause)
+ lc->requested_fc |= PAUSE_TX;
+ if (lc->autoneg == AUTONEG_ENABLE) {
+ if (netif_running(dev))
+ t1_link_start(p->phy, p->mac, lc);
+ } else {
+ lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+ if (netif_running(dev))
+ p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
+ lc->fc);
+ }
+ return 0;
+}
+
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+ struct adapter *adapter = dev->ml_priv;
+ int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
+
+ e->rx_max_pending = MAX_RX_BUFFERS;
+ e->rx_mini_max_pending = 0;
+ e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
+ e->tx_max_pending = MAX_CMDQ_ENTRIES;
+
+ e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
+ e->rx_mini_pending = 0;
+ e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
+ e->tx_pending = adapter->params.sge.cmdQ_size[0];
+}
+
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+ struct adapter *adapter = dev->ml_priv;
+ int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
+
+ if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
+ e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
+ e->tx_pending > MAX_CMDQ_ENTRIES ||
+ e->rx_pending < MIN_FL_ENTRIES ||
+ e->rx_jumbo_pending < MIN_FL_ENTRIES ||
+ e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
+ return -EINVAL;
+
+ if (adapter->flags & FULL_INIT_DONE)
+ return -EBUSY;
+
+ adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
+ adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
+ adapter->params.sge.cmdQ_size[0] = e->tx_pending;
+ adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
+ MAX_CMDQ1_ENTRIES : e->tx_pending;
+ return 0;
+}
+
+static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+ struct adapter *adapter = dev->ml_priv;
+
+ adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
+ adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
+ adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
+ t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
+ return 0;
+}
+
+static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+ struct adapter *adapter = dev->ml_priv;
+
+ c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
+ c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
+ c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
+ return 0;
+}
+
+static int get_eeprom_len(struct net_device *dev)
+{
+ struct adapter *adapter = dev->ml_priv;
+
+ return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
+}
+
+#define EEPROM_MAGIC(ap) \
+ (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
+
+static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
+ u8 *data)
+{
+ int i;
+ u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
+ struct adapter *adapter = dev->ml_priv;
+
+ e->magic = EEPROM_MAGIC(adapter);
+ for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
+ t1_seeprom_read(adapter, i, (__le32 *)&buf[i]);
+ memcpy(data, buf + e->offset, e->len);
+ return 0;
+}
+
+static const struct ethtool_ops t1_ethtool_ops = {
+ .get_settings = get_settings,
+ .set_settings = set_settings,
+ .get_drvinfo = get_drvinfo,
+ .get_msglevel = get_msglevel,
+ .set_msglevel = set_msglevel,
+ .get_ringparam = get_sge_param,
+ .set_ringparam = set_sge_param,
+ .get_coalesce = get_coalesce,
+ .set_coalesce = set_coalesce,
+ .get_eeprom_len = get_eeprom_len,
+ .get_eeprom = get_eeprom,
+ .get_pauseparam = get_pauseparam,
+ .set_pauseparam = set_pauseparam,
+ .get_link = ethtool_op_get_link,
+ .get_strings = get_strings,
+ .get_sset_count = get_sset_count,
+ .get_ethtool_stats = get_stats,
+ .get_regs_len = get_regs_len,
+ .get_regs = get_regs,
+};
+
+static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ struct adapter *adapter = dev->ml_priv;
+ struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio;
+
+ return mdio_mii_ioctl(mdio, if_mii(req), cmd);
+}
+
+static int t1_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int ret;
+ struct adapter *adapter = dev->ml_priv;
+ struct cmac *mac = adapter->port[dev->if_port].mac;
+
+ if (!mac->ops->set_mtu)
+ return -EOPNOTSUPP;
+ if (new_mtu < 68)
+ return -EINVAL;
+ if ((ret = mac->ops->set_mtu(mac, new_mtu)))
+ return ret;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static int t1_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct adapter *adapter = dev->ml_priv;
+ struct cmac *mac = adapter->port[dev->if_port].mac;
+ struct sockaddr *addr = p;
+
+ if (!mac->ops->macaddress_set)
+ return -EOPNOTSUPP;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ mac->ops->macaddress_set(mac, dev->dev_addr);
+ return 0;
+}
+
+static u32 t1_fix_features(struct net_device *dev, u32 features)
+{
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_RX)
+ features |= NETIF_F_HW_VLAN_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_TX;
+
+ return features;
+}
+
+static int t1_set_features(struct net_device *dev, u32 features)
+{
+ u32 changed = dev->features ^ features;
+ struct adapter *adapter = dev->ml_priv;
+
+ if (changed & NETIF_F_HW_VLAN_RX)
+ t1_vlan_mode(adapter, features);
+
+ return 0;
+}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void t1_netpoll(struct net_device *dev)
+{
+ unsigned long flags;
+ struct adapter *adapter = dev->ml_priv;
+
+ local_irq_save(flags);
+ t1_interrupt(adapter->pdev->irq, adapter);
+ local_irq_restore(flags);
+}
+#endif
+
+/*
+ * Periodic accumulation of MAC statistics. This is used only if the MAC
+ * does not have any other way to prevent stats counter overflow.
+ */
+static void mac_stats_task(struct work_struct *work)
+{
+ int i;
+ struct adapter *adapter =
+ container_of(work, struct adapter, stats_update_task.work);
+
+ for_each_port(adapter, i) {
+ struct port_info *p = &adapter->port[i];
+
+ if (netif_running(p->dev))
+ p->mac->ops->statistics_update(p->mac,
+ MAC_STATS_UPDATE_FAST);
+ }
+
+ /* Schedule the next statistics update if any port is active. */
+ spin_lock(&adapter->work_lock);
+ if (adapter->open_device_map & PORT_MASK)
+ schedule_mac_stats_update(adapter,
+ adapter->params.stats_update_period);
+ spin_unlock(&adapter->work_lock);
+}
+
+/*
+ * Processes elmer0 external interrupts in process context.
+ */
+static void ext_intr_task(struct work_struct *work)
+{
+ struct adapter *adapter =
+ container_of(work, struct adapter, ext_intr_handler_task);
+
+ t1_elmer0_ext_intr_handler(adapter);
+
+ /* Now reenable external interrupts */
+ spin_lock_irq(&adapter->async_lock);
+ adapter->slow_intr_mask |= F_PL_INTR_EXT;
+ writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
+ writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
+ adapter->regs + A_PL_ENABLE);
+ spin_unlock_irq(&adapter->async_lock);
+}
+
+/*
+ * Interrupt-context handler for elmer0 external interrupts.
+ */
+void t1_elmer0_ext_intr(struct adapter *adapter)
+{
+ /*
+ * Schedule a task to handle external interrupts as we require
+ * a process context. We disable EXT interrupts in the interim
+ * and let the task reenable them when it's done.
+ */
+ adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
+ writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
+ adapter->regs + A_PL_ENABLE);
+ schedule_work(&adapter->ext_intr_handler_task);
+}
+
+void t1_fatal_err(struct adapter *adapter)
+{
+ if (adapter->flags & FULL_INIT_DONE) {
+ t1_sge_stop(adapter->sge);
+ t1_interrupts_disable(adapter);
+ }
+ pr_alert("%s: encountered fatal error, operation suspended\n",
+ adapter->name);
+}
+
+static const struct net_device_ops cxgb_netdev_ops = {
+ .ndo_open = cxgb_open,
+ .ndo_stop = cxgb_close,
+ .ndo_start_xmit = t1_start_xmit,
+ .ndo_get_stats = t1_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = t1_set_rxmode,
+ .ndo_do_ioctl = t1_ioctl,
+ .ndo_change_mtu = t1_change_mtu,
+ .ndo_set_mac_address = t1_set_mac_addr,
+ .ndo_fix_features = t1_fix_features,
+ .ndo_set_features = t1_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = t1_netpoll,
+#endif
+};
+
+static int __devinit init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int version_printed;
+
+ int i, err, pci_using_dac = 0;
+ unsigned long mmio_start, mmio_len;
+ const struct board_info *bi;
+ struct adapter *adapter = NULL;
+ struct port_info *pi;
+
+ if (!version_printed) {
+ printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
+ DRV_VERSION);
+ ++version_printed;
+ }
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ pr_err("%s: cannot find PCI device memory base address\n",
+ pci_name(pdev));
+ err = -ENODEV;
+ goto out_disable_pdev;
+ }
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ pci_using_dac = 1;
+
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ pr_err("%s: unable to obtain 64-bit DMA for "
+ "consistent allocations\n", pci_name(pdev));
+ err = -ENODEV;
+ goto out_disable_pdev;
+ }
+
+ } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
+ pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
+ goto out_disable_pdev;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
+ goto out_disable_pdev;
+ }
+
+ pci_set_master(pdev);
+
+ mmio_start = pci_resource_start(pdev, 0);
+ mmio_len = pci_resource_len(pdev, 0);
+ bi = t1_get_board_info(ent->driver_data);
+
+ for (i = 0; i < bi->port_number; ++i) {
+ struct net_device *netdev;
+
+ netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
+ if (!netdev) {
+ err = -ENOMEM;
+ goto out_free_dev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ if (!adapter) {
+ adapter = netdev_priv(netdev);
+ adapter->pdev = pdev;
+ adapter->port[0].dev = netdev; /* so we don't leak it */
+
+ adapter->regs = ioremap(mmio_start, mmio_len);
+ if (!adapter->regs) {
+ pr_err("%s: cannot map device registers\n",
+ pci_name(pdev));
+ err = -ENOMEM;
+ goto out_free_dev;
+ }
+
+ if (t1_get_board_rev(adapter, bi, &adapter->params)) {
+ err = -ENODEV; /* Can't handle this chip rev */
+ goto out_free_dev;
+ }
+
+ adapter->name = pci_name(pdev);
+ adapter->msg_enable = dflt_msg_enable;
+ adapter->mmio_len = mmio_len;
+
+ spin_lock_init(&adapter->tpi_lock);
+ spin_lock_init(&adapter->work_lock);
+ spin_lock_init(&adapter->async_lock);
+ spin_lock_init(&adapter->mac_lock);
+
+ INIT_WORK(&adapter->ext_intr_handler_task,
+ ext_intr_task);
+ INIT_DELAYED_WORK(&adapter->stats_update_task,
+ mac_stats_task);
+
+ pci_set_drvdata(pdev, netdev);
+ }
+
+ pi = &adapter->port[i];
+ pi->dev = netdev;
+ netif_carrier_off(netdev);
+ netdev->irq = pdev->irq;
+ netdev->if_port = i;
+ netdev->mem_start = mmio_start;
+ netdev->mem_end = mmio_start + mmio_len - 1;
+ netdev->ml_priv = adapter;
+ netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_RXCSUM;
+ netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_LLTX;
+
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+ if (vlan_tso_capable(adapter)) {
+ netdev->features |=
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ netdev->hw_features |= NETIF_F_HW_VLAN_RX;
+
+ /* T204: disable TSO */
+ if (!(is_T2(adapter)) || bi->port_number != 4) {
+ netdev->hw_features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO;
+ }
+ }
+
+ netdev->netdev_ops = &cxgb_netdev_ops;
+ netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
+ sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
+
+ netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
+
+ SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
+ }
+
+ if (t1_init_sw_modules(adapter, bi) < 0) {
+ err = -ENODEV;
+ goto out_free_dev;
+ }
+
+ /*
+ * The card is now ready to go. If any errors occur during device
+ * registration we do not fail the whole card but rather proceed only
+ * with the ports we manage to register successfully. However we must
+ * register at least one net device.
+ */
+ for (i = 0; i < bi->port_number; ++i) {
+ err = register_netdev(adapter->port[i].dev);
+ if (err)
+ pr_warning("%s: cannot register net device %s, skipping\n",
+ pci_name(pdev), adapter->port[i].dev->name);
+ else {
+ /*
+ * Change the name we use for messages to the name of
+ * the first successfully registered interface.
+ */
+ if (!adapter->registered_device_map)
+ adapter->name = adapter->port[i].dev->name;
+
+ __set_bit(i, &adapter->registered_device_map);
+ }
+ }
+ if (!adapter->registered_device_map) {
+ pr_err("%s: could not register any net devices\n",
+ pci_name(pdev));
+ goto out_release_adapter_res;
+ }
+
+ printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
+ bi->desc, adapter->params.chip_revision,
+ adapter->params.pci.is_pcix ? "PCIX" : "PCI",
+ adapter->params.pci.speed, adapter->params.pci.width);
+
+ /*
+ * Set the T1B ASIC and memory clocks.
+ */
+ if (t1powersave)
+ adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */
+ else
+ adapter->t1powersave = HCLOCK;
+ if (t1_is_T1B(adapter))
+ t1_clock(adapter, t1powersave);
+
+ return 0;
+
+out_release_adapter_res:
+ t1_free_sw_modules(adapter);
+out_free_dev:
+ if (adapter) {
+ if (adapter->regs)
+ iounmap(adapter->regs);
+ for (i = bi->port_number - 1; i >= 0; --i)
+ if (adapter->port[i].dev)
+ free_netdev(adapter->port[i].dev);
+ }
+ pci_release_regions(pdev);
+out_disable_pdev:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
+{
+ int data;
+ int i;
+ u32 val;
+
+ enum {
+ S_CLOCK = 1 << 3,
+ S_DATA = 1 << 4
+ };
+
+ for (i = (nbits - 1); i > -1; i--) {
+
+ udelay(50);
+
+ data = ((bitdata >> i) & 0x1);
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+
+ if (data)
+ val |= S_DATA;
+ else
+ val &= ~S_DATA;
+
+ udelay(50);
+
+ /* Set SCLOCK low */
+ val &= ~S_CLOCK;
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+ udelay(50);
+
+ /* Write SCLOCK high */
+ val |= S_CLOCK;
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+ }
+}
+
+static int t1_clock(struct adapter *adapter, int mode)
+{
+ u32 val;
+ int M_CORE_VAL;
+ int M_MEM_VAL;
+
+ enum {
+ M_CORE_BITS = 9,
+ T_CORE_VAL = 0,
+ T_CORE_BITS = 2,
+ N_CORE_VAL = 0,
+ N_CORE_BITS = 2,
+ M_MEM_BITS = 9,
+ T_MEM_VAL = 0,
+ T_MEM_BITS = 2,
+ N_MEM_VAL = 0,
+ N_MEM_BITS = 2,
+ NP_LOAD = 1 << 17,
+ S_LOAD_MEM = 1 << 5,
+ S_LOAD_CORE = 1 << 6,
+ S_CLOCK = 1 << 3
+ };
+
+ if (!t1_is_T1B(adapter))
+ return -ENODEV; /* Can't re-clock this chip. */
+
+ if (mode & 2)
+ return 0; /* show current mode. */
+
+ if ((adapter->t1powersave & 1) == (mode & 1))
+ return -EALREADY; /* ASIC already running in mode. */
+
+ if ((mode & 1) == HCLOCK) {
+ M_CORE_VAL = 0x14;
+ M_MEM_VAL = 0x18;
+ adapter->t1powersave = HCLOCK; /* overclock */
+ } else {
+ M_CORE_VAL = 0xe;
+ M_MEM_VAL = 0x10;
+ adapter->t1powersave = LCLOCK; /* underclock */
+ }
+
+ /* Don't interrupt this serial stream! */
+ spin_lock(&adapter->tpi_lock);
+
+ /* Initialize for ASIC core */
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val |= NP_LOAD;
+ udelay(50);
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(50);
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val &= ~S_LOAD_CORE;
+ val &= ~S_CLOCK;
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(50);
+
+ /* Serial program the ASIC clock synthesizer */
+ bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
+ bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
+ bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
+ udelay(50);
+
+ /* Finish ASIC core */
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val |= S_LOAD_CORE;
+ udelay(50);
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(50);
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val &= ~S_LOAD_CORE;
+ udelay(50);
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(50);
+
+ /* Initialize for memory */
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val |= NP_LOAD;
+ udelay(50);
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(50);
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val &= ~S_LOAD_MEM;
+ val &= ~S_CLOCK;
+ udelay(50);
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(50);
+
+ /* Serial program the memory clock synthesizer */
+ bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
+ bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
+ bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
+ udelay(50);
+
+ /* Finish memory */
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val |= S_LOAD_MEM;
+ udelay(50);
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(50);
+ __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val &= ~S_LOAD_MEM;
+ udelay(50);
+ __t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+ spin_unlock(&adapter->tpi_lock);
+
+ return 0;
+}
+
+static inline void t1_sw_reset(struct pci_dev *pdev)
+{
+ pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
+ pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
+}
+
+static void __devexit remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct adapter *adapter = dev->ml_priv;
+ int i;
+
+ for_each_port(adapter, i) {
+ if (test_bit(i, &adapter->registered_device_map))
+ unregister_netdev(adapter->port[i].dev);
+ }
+
+ t1_free_sw_modules(adapter);
+ iounmap(adapter->regs);
+
+ while (--i >= 0) {
+ if (adapter->port[i].dev)
+ free_netdev(adapter->port[i].dev);
+ }
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ t1_sw_reset(pdev);
+}
+
+static struct pci_driver driver = {
+ .name = DRV_NAME,
+ .id_table = t1_pci_tbl,
+ .probe = init_one,
+ .remove = __devexit_p(remove_one),
+};
+
+static int __init t1_init_module(void)
+{
+ return pci_register_driver(&driver);
+}
+
+static void __exit t1_cleanup_module(void)
+{
+ pci_unregister_driver(&driver);
+}
+
+module_init(t1_init_module);
+module_exit(t1_cleanup_module);
diff --git a/drivers/net/ethernet/chelsio/cxgb/elmer0.h b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
new file mode 100644
index 000000000000..eef655c827d9
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
@@ -0,0 +1,158 @@
+/*****************************************************************************
+ * *
+ * File: elmer0.h *
+ * $Revision: 1.6 $ *
+ * $Date: 2005/06/21 22:49:43 $ *
+ * Description: *
+ * part of the Chelsio 10Gb Ethernet Driver. *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License, version 2, as *
+ * published by the Free Software Foundation. *
+ * *
+ * You should have received a copy of the GNU General Public License along *
+ * with this program; if not, write to the Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
+ * *
+ * http://www.chelsio.com *
+ * *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
+ * All rights reserved. *
+ * *
+ * Maintainers: maintainers@chelsio.com *
+ * *
+ * Authors: Dimitrios Michailidis <dm@chelsio.com> *
+ * Tina Yang <tainay@chelsio.com> *
+ * Felix Marti <felix@chelsio.com> *
+ * Scott Bardone <sbardone@chelsio.com> *
+ * Kurt Ottaway <kottaway@chelsio.com> *
+ * Frank DiMambro <frank@chelsio.com> *
+ * *
+ * History: *
+ * *
+ ****************************************************************************/
+
+#ifndef _CXGB_ELMER0_H_
+#define _CXGB_ELMER0_H_
+
+/* ELMER0 flavors */
+enum {
+ ELMER0_XC2S300E_6FT256_C,
+ ELMER0_XC2S100E_6TQ144_C
+};
+
+/* ELMER0 registers */
+#define A_ELMER0_VERSION 0x100000
+#define A_ELMER0_PHY_CFG 0x100004
+#define A_ELMER0_INT_ENABLE 0x100008
+#define A_ELMER0_INT_CAUSE 0x10000c
+#define A_ELMER0_GPI_CFG 0x100010
+#define A_ELMER0_GPI_STAT 0x100014
+#define A_ELMER0_GPO 0x100018
+#define A_ELMER0_PORT0_MI1_CFG 0x400000
+
+#define S_MI1_MDI_ENABLE 0
+#define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE)
+#define F_MI1_MDI_ENABLE V_MI1_MDI_ENABLE(1U)
+
+#define S_MI1_MDI_INVERT 1
+#define V_MI1_MDI_INVERT(x) ((x) << S_MI1_MDI_INVERT)
+#define F_MI1_MDI_INVERT V_MI1_MDI_INVERT(1U)
+
+#define S_MI1_PREAMBLE_ENABLE 2
+#define V_MI1_PREAMBLE_ENABLE(x) ((x) << S_MI1_PREAMBLE_ENABLE)
+#define F_MI1_PREAMBLE_ENABLE V_MI1_PREAMBLE_ENABLE(1U)
+
+#define S_MI1_SOF 3
+#define M_MI1_SOF 0x3
+#define V_MI1_SOF(x) ((x) << S_MI1_SOF)
+#define G_MI1_SOF(x) (((x) >> S_MI1_SOF) & M_MI1_SOF)
+
+#define S_MI1_CLK_DIV 5
+#define M_MI1_CLK_DIV 0xff
+#define V_MI1_CLK_DIV(x) ((x) << S_MI1_CLK_DIV)
+#define G_MI1_CLK_DIV(x) (((x) >> S_MI1_CLK_DIV) & M_MI1_CLK_DIV)
+
+#define A_ELMER0_PORT0_MI1_ADDR 0x400004
+
+#define S_MI1_REG_ADDR 0
+#define M_MI1_REG_ADDR 0x1f
+#define V_MI1_REG_ADDR(x) ((x) << S_MI1_REG_ADDR)
+#define G_MI1_REG_ADDR(x) (((x) >> S_MI1_REG_ADDR) & M_MI1_REG_ADDR)
+
+#define S_MI1_PHY_ADDR 5
+#define M_MI1_PHY_ADDR 0x1f
+#define V_MI1_PHY_ADDR(x) ((x) << S_MI1_PHY_ADDR)
+#define G_MI1_PHY_ADDR(x) (((x) >> S_MI1_PHY_ADDR) & M_MI1_PHY_ADDR)
+
+#define A_ELMER0_PORT0_MI1_DATA 0x400008
+
+#define S_MI1_DATA 0
+#define M_MI1_DATA 0xffff
+#define V_MI1_DATA(x) ((x) << S_MI1_DATA)
+#define G_MI1_DATA(x) (((x) >> S_MI1_DATA) & M_MI1_DATA)
+
+#define A_ELMER0_PORT0_MI1_OP 0x40000c
+
+#define S_MI1_OP 0
+#define M_MI1_OP 0x3
+#define V_MI1_OP(x) ((x) << S_MI1_OP)
+#define G_MI1_OP(x) (((x) >> S_MI1_OP) & M_MI1_OP)
+
+#define S_MI1_ADDR_AUTOINC 2
+#define V_MI1_ADDR_AUTOINC(x) ((x) << S_MI1_ADDR_AUTOINC)
+#define F_MI1_ADDR_AUTOINC V_MI1_ADDR_AUTOINC(1U)
+
+#define S_MI1_OP_BUSY 31
+#define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY)
+#define F_MI1_OP_BUSY V_MI1_OP_BUSY(1U)
+
+#define A_ELMER0_PORT1_MI1_CFG 0x500000
+#define A_ELMER0_PORT1_MI1_ADDR 0x500004
+#define A_ELMER0_PORT1_MI1_DATA 0x500008
+#define A_ELMER0_PORT1_MI1_OP 0x50000c
+#define A_ELMER0_PORT2_MI1_CFG 0x600000
+#define A_ELMER0_PORT2_MI1_ADDR 0x600004
+#define A_ELMER0_PORT2_MI1_DATA 0x600008
+#define A_ELMER0_PORT2_MI1_OP 0x60000c
+#define A_ELMER0_PORT3_MI1_CFG 0x700000
+#define A_ELMER0_PORT3_MI1_ADDR 0x700004
+#define A_ELMER0_PORT3_MI1_DATA 0x700008
+#define A_ELMER0_PORT3_MI1_OP 0x70000c
+
+/* Simple bit definition for GPI and GP0 registers. */
+#define ELMER0_GP_BIT0 0x0001
+#define ELMER0_GP_BIT1 0x0002
+#define ELMER0_GP_BIT2 0x0004
+#define ELMER0_GP_BIT3 0x0008
+#define ELMER0_GP_BIT4 0x0010
+#define ELMER0_GP_BIT5 0x0020
+#define ELMER0_GP_BIT6 0x0040
+#define ELMER0_GP_BIT7 0x0080
+#define ELMER0_GP_BIT8 0x0100
+#define ELMER0_GP_BIT9 0x0200
+#define ELMER0_GP_BIT10 0x0400
+#define ELMER0_GP_BIT11 0x0800
+#define ELMER0_GP_BIT12 0x1000
+#define ELMER0_GP_BIT13 0x2000
+#define ELMER0_GP_BIT14 0x4000
+#define ELMER0_GP_BIT15 0x8000
+#define ELMER0_GP_BIT16 0x10000
+#define ELMER0_GP_BIT17 0x20000
+#define ELMER0_GP_BIT18 0x40000
+#define ELMER0_GP_BIT19 0x80000
+
+#define MI1_OP_DIRECT_WRITE 1
+#define MI1_OP_DIRECT_READ 2
+
+#define MI1_OP_INDIRECT_ADDRESS 0
+#define MI1_OP_INDIRECT_WRITE 1
+#define MI1_OP_INDIRECT_READ_INC 2
+#define MI1_OP_INDIRECT_READ 3
+
+#endif /* _CXGB_ELMER0_H_ */
+
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.c b/drivers/net/ethernet/chelsio/cxgb/espi.c
new file mode 100644
index 000000000000..639ff1955739
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.c
@@ -0,0 +1,373 @@
+/*****************************************************************************
+ * *
+ * File: espi.c *
+ * $Revision: 1.14 $ *
+ * $Date: 2005/05/14 00:59:32 $ *
+ * Description: *
+ * Ethernet SPI functionality. *
+ * part of the Chelsio 10Gb Ethernet Driver. *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License, version 2, as *
+ * published by the Free Software Foundation. *
+ * *
+ * You should have received a copy of the GNU General Public License along *
+ * with this program; if not, write to the Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
+ * *
+ * http://www.chelsio.com *
+ * *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
+ * All rights reserved. *
+ * *
+ * Maintainers: maintainers@chelsio.com *
+ * *
+ * Authors: Dimitrios Michailidis <dm@chelsio.com> *
+ * Tina Yang <tainay@chelsio.com> *
+ * Felix Marti <felix@chelsio.com> *
+ * Scott Bardone <sbardone@chelsio.com> *
+ * Kurt Ottaway <kottaway@chelsio.com> *
+ * Frank DiMambro <frank@chelsio.com> *
+ * *
+ * History: *
+ * *
+ ****************************************************************************/
+
+#include "common.h"
+#include "regs.h"
+#include "espi.h"
+
+struct peespi {
+ adapter_t *adapter;
+ struct espi_intr_counts intr_cnt;
+ u32 misc_ctrl;
+ spinlock_t lock;
+};
+
+#define ESPI_INTR_MASK (F_DIP4ERR | F_RXDROP | F_TXDROP | F_RXOVERFLOW | \
+ F_RAMPARITYERR | F_DIP2PARITYERR)
+#define MON_MASK (V_MONITORED_PORT_NUM(3) | F_MONITORED_DIRECTION \
+ | F_MONITORED_INTERFACE)
+
+#define TRICN_CNFG 14
+#define TRICN_CMD_READ 0x11
+#define TRICN_CMD_WRITE 0x21
+#define TRICN_CMD_ATTEMPTS 10
+
+static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr,
+ int ch_addr, int reg_offset, u32 wr_data)
+{
+ int busy, attempts = TRICN_CMD_ATTEMPTS;
+
+ writel(V_WRITE_DATA(wr_data) |
+ V_REGISTER_OFFSET(reg_offset) |
+ V_CHANNEL_ADDR(ch_addr) | V_MODULE_ADDR(module_addr) |
+ V_BUNDLE_ADDR(bundle_addr) |
+ V_SPI4_COMMAND(TRICN_CMD_WRITE),
+ adapter->regs + A_ESPI_CMD_ADDR);
+ writel(0, adapter->regs + A_ESPI_GOSTAT);
+
+ do {
+ busy = readl(adapter->regs + A_ESPI_GOSTAT) & F_ESPI_CMD_BUSY;
+ } while (busy && --attempts);
+
+ if (busy)
+ pr_err("%s: TRICN write timed out\n", adapter->name);
+
+ return busy;
+}
+
+static int tricn_init(adapter_t *adapter)
+{
+ int i, sme = 1;
+
+ if (!(readl(adapter->regs + A_ESPI_RX_RESET) & F_RX_CLK_STATUS)) {
+ pr_err("%s: ESPI clock not ready\n", adapter->name);
+ return -1;
+ }
+
+ writel(F_ESPI_RX_CORE_RST, adapter->regs + A_ESPI_RX_RESET);
+
+ if (sme) {
+ tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81);
+ tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81);
+ tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81);
+ }
+ for (i = 1; i <= 8; i++)
+ tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1);
+ for (i = 1; i <= 2; i++)
+ tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1);
+ for (i = 1; i <= 3; i++)
+ tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
+ tricn_write(adapter, 0, 2, 4, TRICN_CNFG, 0xf1);
+ tricn_write(adapter, 0, 2, 5, TRICN_CNFG, 0xe1);
+ tricn_write(adapter, 0, 2, 6, TRICN_CNFG, 0xf1);
+ tricn_write(adapter, 0, 2, 7, TRICN_CNFG, 0x80);
+ tricn_write(adapter, 0, 2, 8, TRICN_CNFG, 0xf1);
+
+ writel(F_ESPI_RX_CORE_RST | F_ESPI_RX_LNK_RST,
+ adapter->regs + A_ESPI_RX_RESET);
+
+ return 0;
+}
+
+void t1_espi_intr_enable(struct peespi *espi)
+{
+ u32 enable, pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
+
+ /*
+ * Cannot enable ESPI interrupts on T1B because HW asserts the
+ * interrupt incorrectly, namely the driver gets ESPI interrupts
+ * but no data is actually dropped (can verify this reading the ESPI
+ * drop registers). Also, once the ESPI interrupt is asserted it
+ * cannot be cleared (HW bug).
+ */
+ enable = t1_is_T1B(espi->adapter) ? 0 : ESPI_INTR_MASK;
+ writel(enable, espi->adapter->regs + A_ESPI_INTR_ENABLE);
+ writel(pl_intr | F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
+}
+
+void t1_espi_intr_clear(struct peespi *espi)
+{
+ readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
+ writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS);
+ writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE);
+}
+
+void t1_espi_intr_disable(struct peespi *espi)
+{
+ u32 pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
+
+ writel(0, espi->adapter->regs + A_ESPI_INTR_ENABLE);
+ writel(pl_intr & ~F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
+}
+
+int t1_espi_intr_handler(struct peespi *espi)
+{
+ u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
+
+ if (status & F_DIP4ERR)
+ espi->intr_cnt.DIP4_err++;
+ if (status & F_RXDROP)
+ espi->intr_cnt.rx_drops++;
+ if (status & F_TXDROP)
+ espi->intr_cnt.tx_drops++;
+ if (status & F_RXOVERFLOW)
+ espi->intr_cnt.rx_ovflw++;
+ if (status & F_RAMPARITYERR)
+ espi->intr_cnt.parity_err++;
+ if (status & F_DIP2PARITYERR) {
+ espi->intr_cnt.DIP2_parity_err++;
+
+ /*
+ * Must read the error count to clear the interrupt
+ * that it causes.
+ */
+ readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
+ }
+
+ /*
+ * For T1B we need to write 1 to clear ESPI interrupts. For T2+ we
+ * write the status as is.
+ */
+ if (status && t1_is_T1B(espi->adapter))
+ status = 1;
+ writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
+ return 0;
+}
+
+const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi)
+{
+ return &espi->intr_cnt;
+}
+
+static void espi_setup_for_pm3393(adapter_t *adapter)
+{
+ u32 wmark = t1_is_T1B(adapter) ? 0x4000 : 0x3200;
+
+ writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
+ writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN1);
+ writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
+ writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN3);
+ writel(0x100, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
+ writel(wmark, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
+ writel(3, adapter->regs + A_ESPI_CALENDAR_LENGTH);
+ writel(0x08000008, adapter->regs + A_ESPI_TRAIN);
+ writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG);
+}
+
+static void espi_setup_for_vsc7321(adapter_t *adapter)
+{
+ writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
+ writel(0x1f401f4, adapter->regs + A_ESPI_SCH_TOKEN1);
+ writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
+ writel(0xa00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
+ writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
+ writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH);
+ writel(V_RX_NPORTS(4) | V_TX_NPORTS(4), adapter->regs + A_PORT_CONFIG);
+
+ writel(0x08000008, adapter->regs + A_ESPI_TRAIN);
+}
+
+/*
+ * Note that T1B requires at least 2 ports for IXF1010 due to a HW bug.
+ */
+static void espi_setup_for_ixf1010(adapter_t *adapter, int nports)
+{
+ writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH);
+ if (nports == 4) {
+ if (is_T2(adapter)) {
+ writel(0xf00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
+ writel(0x3c0, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
+ } else {
+ writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
+ writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
+ }
+ } else {
+ writel(0x1fff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
+ writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
+ }
+ writel(V_RX_NPORTS(nports) | V_TX_NPORTS(nports), adapter->regs + A_PORT_CONFIG);
+
+}
+
+int t1_espi_init(struct peespi *espi, int mac_type, int nports)
+{
+ u32 status_enable_extra = 0;
+ adapter_t *adapter = espi->adapter;
+
+ /* Disable ESPI training. MACs that can handle it enable it below. */
+ writel(0, adapter->regs + A_ESPI_TRAIN);
+
+ if (is_T2(adapter)) {
+ writel(V_OUT_OF_SYNC_COUNT(4) |
+ V_DIP2_PARITY_ERR_THRES(3) |
+ V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL);
+ writel(nports == 4 ? 0x200040 : 0x1000080,
+ adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
+ } else
+ writel(0x800100, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
+
+ if (mac_type == CHBT_MAC_PM3393)
+ espi_setup_for_pm3393(adapter);
+ else if (mac_type == CHBT_MAC_VSC7321)
+ espi_setup_for_vsc7321(adapter);
+ else if (mac_type == CHBT_MAC_IXF1010) {
+ status_enable_extra = F_INTEL1010MODE;
+ espi_setup_for_ixf1010(adapter, nports);
+ } else
+ return -1;
+
+ writel(status_enable_extra | F_RXSTATUSENABLE,
+ adapter->regs + A_ESPI_FIFO_STATUS_ENABLE);
+
+ if (is_T2(adapter)) {
+ tricn_init(adapter);
+ /*
+ * Always position the control at the 1st port egress IN
+ * (sop,eop) counter to reduce PIOs for T/N210 workaround.
+ */
+ espi->misc_ctrl = readl(adapter->regs + A_ESPI_MISC_CONTROL);
+ espi->misc_ctrl &= ~MON_MASK;
+ espi->misc_ctrl |= F_MONITORED_DIRECTION;
+ if (adapter->params.nports == 1)
+ espi->misc_ctrl |= F_MONITORED_INTERFACE;
+ writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+ spin_lock_init(&espi->lock);
+ }
+
+ return 0;
+}
+
+void t1_espi_destroy(struct peespi *espi)
+{
+ kfree(espi);
+}
+
+struct peespi *t1_espi_create(adapter_t *adapter)
+{
+ struct peespi *espi = kzalloc(sizeof(*espi), GFP_KERNEL);
+
+ if (espi)
+ espi->adapter = adapter;
+ return espi;
+}
+
+#if 0
+void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val)
+{
+ struct peespi *espi = adapter->espi;
+
+ if (!is_T2(adapter))
+ return;
+ spin_lock(&espi->lock);
+ espi->misc_ctrl = (val & ~MON_MASK) |
+ (espi->misc_ctrl & MON_MASK);
+ writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+ spin_unlock(&espi->lock);
+}
+#endif /* 0 */
+
+u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait)
+{
+ struct peespi *espi = adapter->espi;
+ u32 sel;
+
+ if (!is_T2(adapter))
+ return 0;
+
+ sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2);
+ if (!wait) {
+ if (!spin_trylock(&espi->lock))
+ return 0;
+ } else
+ spin_lock(&espi->lock);
+
+ if ((sel != (espi->misc_ctrl & MON_MASK))) {
+ writel(((espi->misc_ctrl & ~MON_MASK) | sel),
+ adapter->regs + A_ESPI_MISC_CONTROL);
+ sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
+ writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+ } else
+ sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
+ spin_unlock(&espi->lock);
+ return sel;
+}
+
+/*
+ * This function is for T204 only.
+ * compare with t1_espi_get_mon(), it reads espiInTxSop[0 ~ 3] in
+ * one shot, since there is no per port counter on the out side.
+ */
+int t1_espi_get_mon_t204(adapter_t *adapter, u32 *valp, u8 wait)
+{
+ struct peespi *espi = adapter->espi;
+ u8 i, nport = (u8)adapter->params.nports;
+
+ if (!wait) {
+ if (!spin_trylock(&espi->lock))
+ return -1;
+ } else
+ spin_lock(&espi->lock);
+
+ if ((espi->misc_ctrl & MON_MASK) != F_MONITORED_DIRECTION) {
+ espi->misc_ctrl = (espi->misc_ctrl & ~MON_MASK) |
+ F_MONITORED_DIRECTION;
+ writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+ }
+ for (i = 0 ; i < nport; i++, valp++) {
+ if (i) {
+ writel(espi->misc_ctrl | V_MONITORED_PORT_NUM(i),
+ adapter->regs + A_ESPI_MISC_CONTROL);
+ }
+ *valp = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
+ }
+
+ writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+ spin_unlock(&espi->lock);
+ return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.h b/drivers/net/ethernet/chelsio/cxgb/espi.h
new file mode 100644
index 000000000000..5694aad4fbc0
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.h
@@ -0,0 +1,68 @@
+/*****************************************************************************
+ * *
+ * File: espi.h *
+ * $Revision: 1.7 $ *
+ * $Date: 2005/06/21 18:29:47 $ *
+ * Description: *
+ * part of the Chelsio 10Gb Ethernet Driver. *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License, version 2, as *
+ * published by the Free Software Foundation. *
+ * *
+ * You should have received a copy of the GNU General Public License along *
+ * with this program; if not, write to the Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
+ * *
+ * http://www.chelsio.com *
+ * *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
+ * All rights reserved. *
+ * *
+ * Maintainers: maintainers@chelsio.com *
+ * *
+ * Authors: Dimitrios Michailidis <dm@chelsio.com> *
+ * Tina Yang <tainay@chelsio.com> *
+ * Felix Marti <felix@chelsio.com> *
+ * Scott Bardone <sbardone@chelsio.com> *
+ * Kurt Ottaway <kottaway@chelsio.com> *
+ * Frank DiMambro <frank@chelsio.com> *
+ * *
+ * History: *
+ * *
+ ****************************************************************************/
+
+#ifndef _CXGB_ESPI_H_
+#define _CXGB_ESPI_H_
+
+#include "common.h"
+
+struct espi_intr_counts {
+ unsigned int DIP4_err;
+ unsigned int rx_drops;
+ unsigned int tx_drops;
+ unsigned int rx_ovflw;
+ unsigned int parity_err;
+ unsigned int DIP2_parity_err;
+};
+
+struct peespi;
+
+struct peespi *t1_espi_create(adapter_t *adapter);
+void t1_espi_destroy(struct peespi *espi);
+int t1_espi_init(struct peespi *espi, int mac_type, int nports);
+
+void t1_espi_intr_enable(struct peespi *);
+void t1_espi_intr_clear(struct peespi *);
+void t1_espi_intr_disable(struct peespi *);
+int t1_espi_intr_handler(struct peespi *);
+const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi);
+
+u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait);
+int t1_espi_get_mon_t204(adapter_t *, u32 *, u8);
+
+#endif /* _CXGB_ESPI_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb/fpga_defs.h b/drivers/net/ethernet/chelsio/cxgb/fpga_defs.h
new file mode 100644
index 000000000000..ccdb2bc9ae98
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/fpga_defs.h
@@ -0,0 +1,232 @@
+/* $Date: 2005/03/07 23:59:05 $ $RCSfile: fpga_defs.h,v $ $Revision: 1.4 $ */
+
+/*
+ * FPGA specific definitions
+ */
+
+#ifndef __CHELSIO_FPGA_DEFS_H__
+#define __CHELSIO_FPGA_DEFS_H__
+
+#define FPGA_PCIX_ADDR_VERSION 0xA08
+#define FPGA_PCIX_ADDR_STAT 0xA0C
+
+/* FPGA master interrupt Cause/Enable bits */
+#define FPGA_PCIX_INTERRUPT_SGE_ERROR 0x1
+#define FPGA_PCIX_INTERRUPT_SGE_DATA 0x2
+#define FPGA_PCIX_INTERRUPT_TP 0x4
+#define FPGA_PCIX_INTERRUPT_MC3 0x8
+#define FPGA_PCIX_INTERRUPT_GMAC 0x10
+#define FPGA_PCIX_INTERRUPT_PCIX 0x20
+
+/* TP interrupt register addresses */
+#define FPGA_TP_ADDR_INTERRUPT_ENABLE 0xA10
+#define FPGA_TP_ADDR_INTERRUPT_CAUSE 0xA14
+#define FPGA_TP_ADDR_VERSION 0xA18
+
+/* TP interrupt Cause/Enable bits */
+#define FPGA_TP_INTERRUPT_MC4 0x1
+#define FPGA_TP_INTERRUPT_MC5 0x2
+
+/*
+ * PM interrupt register addresses
+ */
+#define FPGA_MC3_REG_INTRENABLE 0xA20
+#define FPGA_MC3_REG_INTRCAUSE 0xA24
+#define FPGA_MC3_REG_VERSION 0xA28
+
+/*
+ * GMAC interrupt register addresses
+ */
+#define FPGA_GMAC_ADDR_INTERRUPT_ENABLE 0xA30
+#define FPGA_GMAC_ADDR_INTERRUPT_CAUSE 0xA34
+#define FPGA_GMAC_ADDR_VERSION 0xA38
+
+/* GMAC Cause/Enable bits */
+#define FPGA_GMAC_INTERRUPT_PORT0 0x1
+#define FPGA_GMAC_INTERRUPT_PORT1 0x2
+#define FPGA_GMAC_INTERRUPT_PORT2 0x4
+#define FPGA_GMAC_INTERRUPT_PORT3 0x8
+
+/* MI0 registers */
+#define A_MI0_CLK 0xb00
+
+#define S_MI0_CLK_DIV 0
+#define M_MI0_CLK_DIV 0xff
+#define V_MI0_CLK_DIV(x) ((x) << S_MI0_CLK_DIV)
+#define G_MI0_CLK_DIV(x) (((x) >> S_MI0_CLK_DIV) & M_MI0_CLK_DIV)
+
+#define S_MI0_CLK_CNT 8
+#define M_MI0_CLK_CNT 0xff
+#define V_MI0_CLK_CNT(x) ((x) << S_MI0_CLK_CNT)
+#define G_MI0_CLK_CNT(x) (((x) >> S_MI0_CLK_CNT) & M_MI0_CLK_CNT)
+
+#define A_MI0_CSR 0xb04
+
+#define S_MI0_CSR_POLL 0
+#define V_MI0_CSR_POLL(x) ((x) << S_MI0_CSR_POLL)
+#define F_MI0_CSR_POLL V_MI0_CSR_POLL(1U)
+
+#define S_MI0_PREAMBLE 1
+#define V_MI0_PREAMBLE(x) ((x) << S_MI0_PREAMBLE)
+#define F_MI0_PREAMBLE V_MI0_PREAMBLE(1U)
+
+#define S_MI0_INTR_ENABLE 2
+#define V_MI0_INTR_ENABLE(x) ((x) << S_MI0_INTR_ENABLE)
+#define F_MI0_INTR_ENABLE V_MI0_INTR_ENABLE(1U)
+
+#define S_MI0_BUSY 3
+#define V_MI0_BUSY(x) ((x) << S_MI0_BUSY)
+#define F_MI0_BUSY V_MI0_BUSY(1U)
+
+#define S_MI0_MDIO 4
+#define V_MI0_MDIO(x) ((x) << S_MI0_MDIO)
+#define F_MI0_MDIO V_MI0_MDIO(1U)
+
+#define A_MI0_ADDR 0xb08
+
+#define S_MI0_PHY_REG_ADDR 0
+#define M_MI0_PHY_REG_ADDR 0x1f
+#define V_MI0_PHY_REG_ADDR(x) ((x) << S_MI0_PHY_REG_ADDR)
+#define G_MI0_PHY_REG_ADDR(x) (((x) >> S_MI0_PHY_REG_ADDR) & M_MI0_PHY_REG_ADDR)
+
+#define S_MI0_PHY_ADDR 5
+#define M_MI0_PHY_ADDR 0x1f
+#define V_MI0_PHY_ADDR(x) ((x) << S_MI0_PHY_ADDR)
+#define G_MI0_PHY_ADDR(x) (((x) >> S_MI0_PHY_ADDR) & M_MI0_PHY_ADDR)
+
+#define A_MI0_DATA_EXT 0xb0c
+#define A_MI0_DATA_INT 0xb10
+
+/* GMAC registers */
+#define A_GMAC_MACID_LO 0x28
+#define A_GMAC_MACID_HI 0x2c
+#define A_GMAC_CSR 0x30
+
+#define S_INTERFACE 0
+#define M_INTERFACE 0x3
+#define V_INTERFACE(x) ((x) << S_INTERFACE)
+#define G_INTERFACE(x) (((x) >> S_INTERFACE) & M_INTERFACE)
+
+#define S_MAC_TX_ENABLE 2
+#define V_MAC_TX_ENABLE(x) ((x) << S_MAC_TX_ENABLE)
+#define F_MAC_TX_ENABLE V_MAC_TX_ENABLE(1U)
+
+#define S_MAC_RX_ENABLE 3
+#define V_MAC_RX_ENABLE(x) ((x) << S_MAC_RX_ENABLE)
+#define F_MAC_RX_ENABLE V_MAC_RX_ENABLE(1U)
+
+#define S_MAC_LB_ENABLE 4
+#define V_MAC_LB_ENABLE(x) ((x) << S_MAC_LB_ENABLE)
+#define F_MAC_LB_ENABLE V_MAC_LB_ENABLE(1U)
+
+#define S_MAC_SPEED 5
+#define M_MAC_SPEED 0x3
+#define V_MAC_SPEED(x) ((x) << S_MAC_SPEED)
+#define G_MAC_SPEED(x) (((x) >> S_MAC_SPEED) & M_MAC_SPEED)
+
+#define S_MAC_HD_FC_ENABLE 7
+#define V_MAC_HD_FC_ENABLE(x) ((x) << S_MAC_HD_FC_ENABLE)
+#define F_MAC_HD_FC_ENABLE V_MAC_HD_FC_ENABLE(1U)
+
+#define S_MAC_HALF_DUPLEX 8
+#define V_MAC_HALF_DUPLEX(x) ((x) << S_MAC_HALF_DUPLEX)
+#define F_MAC_HALF_DUPLEX V_MAC_HALF_DUPLEX(1U)
+
+#define S_MAC_PROMISC 9
+#define V_MAC_PROMISC(x) ((x) << S_MAC_PROMISC)
+#define F_MAC_PROMISC V_MAC_PROMISC(1U)
+
+#define S_MAC_MC_ENABLE 10
+#define V_MAC_MC_ENABLE(x) ((x) << S_MAC_MC_ENABLE)
+#define F_MAC_MC_ENABLE V_MAC_MC_ENABLE(1U)
+
+#define S_MAC_RESET 11
+#define V_MAC_RESET(x) ((x) << S_MAC_RESET)
+#define F_MAC_RESET V_MAC_RESET(1U)
+
+#define S_MAC_RX_PAUSE_ENABLE 12
+#define V_MAC_RX_PAUSE_ENABLE(x) ((x) << S_MAC_RX_PAUSE_ENABLE)
+#define F_MAC_RX_PAUSE_ENABLE V_MAC_RX_PAUSE_ENABLE(1U)
+
+#define S_MAC_TX_PAUSE_ENABLE 13
+#define V_MAC_TX_PAUSE_ENABLE(x) ((x) << S_MAC_TX_PAUSE_ENABLE)
+#define F_MAC_TX_PAUSE_ENABLE V_MAC_TX_PAUSE_ENABLE(1U)
+
+#define S_MAC_LWM_ENABLE 14
+#define V_MAC_LWM_ENABLE(x) ((x) << S_MAC_LWM_ENABLE)
+#define F_MAC_LWM_ENABLE V_MAC_LWM_ENABLE(1U)
+
+#define S_MAC_MAGIC_PKT_ENABLE 15
+#define V_MAC_MAGIC_PKT_ENABLE(x) ((x) << S_MAC_MAGIC_PKT_ENABLE)
+#define F_MAC_MAGIC_PKT_ENABLE V_MAC_MAGIC_PKT_ENABLE(1U)
+
+#define S_MAC_ISL_ENABLE 16
+#define V_MAC_ISL_ENABLE(x) ((x) << S_MAC_ISL_ENABLE)
+#define F_MAC_ISL_ENABLE V_MAC_ISL_ENABLE(1U)
+
+#define S_MAC_JUMBO_ENABLE 17
+#define V_MAC_JUMBO_ENABLE(x) ((x) << S_MAC_JUMBO_ENABLE)
+#define F_MAC_JUMBO_ENABLE V_MAC_JUMBO_ENABLE(1U)
+
+#define S_MAC_RX_PAD_ENABLE 18
+#define V_MAC_RX_PAD_ENABLE(x) ((x) << S_MAC_RX_PAD_ENABLE)
+#define F_MAC_RX_PAD_ENABLE V_MAC_RX_PAD_ENABLE(1U)
+
+#define S_MAC_RX_CRC_ENABLE 19
+#define V_MAC_RX_CRC_ENABLE(x) ((x) << S_MAC_RX_CRC_ENABLE)
+#define F_MAC_RX_CRC_ENABLE V_MAC_RX_CRC_ENABLE(1U)
+
+#define A_GMAC_IFS 0x34
+
+#define S_MAC_IFS2 0
+#define M_MAC_IFS2 0x3f
+#define V_MAC_IFS2(x) ((x) << S_MAC_IFS2)
+#define G_MAC_IFS2(x) (((x) >> S_MAC_IFS2) & M_MAC_IFS2)
+
+#define S_MAC_IFS1 8
+#define M_MAC_IFS1 0x7f
+#define V_MAC_IFS1(x) ((x) << S_MAC_IFS1)
+#define G_MAC_IFS1(x) (((x) >> S_MAC_IFS1) & M_MAC_IFS1)
+
+#define A_GMAC_JUMBO_FRAME_LEN 0x38
+#define A_GMAC_LNK_DLY 0x3c
+#define A_GMAC_PAUSETIME 0x40
+#define A_GMAC_MCAST_LO 0x44
+#define A_GMAC_MCAST_HI 0x48
+#define A_GMAC_MCAST_MASK_LO 0x4c
+#define A_GMAC_MCAST_MASK_HI 0x50
+#define A_GMAC_RMT_CNT 0x54
+#define A_GMAC_RMT_DATA 0x58
+#define A_GMAC_BACKOFF_SEED 0x5c
+#define A_GMAC_TXF_THRES 0x60
+
+#define S_TXF_READ_THRESHOLD 0
+#define M_TXF_READ_THRESHOLD 0xff
+#define V_TXF_READ_THRESHOLD(x) ((x) << S_TXF_READ_THRESHOLD)
+#define G_TXF_READ_THRESHOLD(x) (((x) >> S_TXF_READ_THRESHOLD) & M_TXF_READ_THRESHOLD)
+
+#define S_TXF_WRITE_THRESHOLD 16
+#define M_TXF_WRITE_THRESHOLD 0xff
+#define V_TXF_WRITE_THRESHOLD(x) ((x) << S_TXF_WRITE_THRESHOLD)
+#define G_TXF_WRITE_THRESHOLD(x) (((x) >> S_TXF_WRITE_THRESHOLD) & M_TXF_WRITE_THRESHOLD)
+
+#define MAC_REG_BASE 0x600
+#define MAC_REG_ADDR(idx, reg) (MAC_REG_BASE + (idx) * 128 + (reg))
+
+#define MAC_REG_IDLO(idx) MAC_REG_ADDR(idx, A_GMAC_MACID_LO)
+#define MAC_REG_IDHI(idx) MAC_REG_ADDR(idx, A_GMAC_MACID_HI)
+#define MAC_REG_CSR(idx) MAC_REG_ADDR(idx, A_GMAC_CSR)
+#define MAC_REG_IFS(idx) MAC_REG_ADDR(idx, A_GMAC_IFS)
+#define MAC_REG_LARGEFRAMELENGTH(idx) MAC_REG_ADDR(idx, A_GMAC_JUMBO_FRAME_LEN)
+#define MAC_REG_LINKDLY(idx) MAC_REG_ADDR(idx, A_GMAC_LNK_DLY)
+#define MAC_REG_PAUSETIME(idx) MAC_REG_ADDR(idx, A_GMAC_PAUSETIME)
+#define MAC_REG_CASTLO(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_LO)
+#define MAC_REG_MCASTHI(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_HI)
+#define MAC_REG_CASTMASKLO(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_MASK_LO)
+#define MAC_REG_MCASTMASKHI(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_MASK_HI)
+#define MAC_REG_RMCNT(idx) MAC_REG_ADDR(idx, A_GMAC_RMT_CNT)
+#define MAC_REG_RMDATA(idx) MAC_REG_ADDR(idx, A_GMAC_RMT_DATA)
+#define MAC_REG_GMRANDBACKOFFSEED(idx) MAC_REG_ADDR(idx, A_GMAC_BACKOFF_SEED)
+#define MAC_REG_TXFTHRESHOLDS(idx) MAC_REG_ADDR(idx, A_GMAC_TXF_THRES)
+
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb/gmac.h b/drivers/net/ethernet/chelsio/cxgb/gmac.h
new file mode 100644
index 000000000000..d42337457cf7
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/gmac.h
@@ -0,0 +1,142 @@
+/*****************************************************************************
+ * *
+ * File: gmac.h *
+ * $Revision: 1.6 $ *
+ * $Date: 2005/06/21 18:29:47 $ *
+ * Description: *
+ * Generic MAC functionality. *
+ * part of the Chelsio 10Gb Ethernet Driver. *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License, version 2, as *
+ * published by the Free Software Foundation. *
+ * *
+ * You should have received a copy of the GNU General Public License along *
+ * with this program; if not, write to the Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
+ * *
+ * http://www.chelsio.com *
+ * *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
+ * All rights reserved. *
+ * *
+ * Maintainers: maintainers@chelsio.com *
+ * *
+ * Authors: Dimitrios Michailidis <dm@chelsio.com> *
+ * Tina Yang <tainay@chelsio.com> *
+ * Felix Marti <felix@chelsio.com> *
+ * Scott Bardone <sbardone@chelsio.com> *
+ * Kurt Ottaway <kottaway@chelsio.com> *
+ * Frank DiMambro <frank@chelsio.com> *
+ * *
+ * History: *
+ * *
+ ****************************************************************************/
+
+#ifndef _CXGB_GMAC_H_
+#define _CXGB_GMAC_H_
+
+#include "common.h"
+
+enum {
+ MAC_STATS_UPDATE_FAST,
+ MAC_STATS_UPDATE_FULL
+};
+
+enum {
+ MAC_DIRECTION_RX = 1,
+ MAC_DIRECTION_TX = 2
+};
+
+struct cmac_statistics {
+ /* Transmit */
+ u64 TxOctetsOK;
+ u64 TxOctetsBad;
+ u64 TxUnicastFramesOK;
+ u64 TxMulticastFramesOK;
+ u64 TxBroadcastFramesOK;
+ u64 TxPauseFrames;
+ u64 TxFramesWithDeferredXmissions;
+ u64 TxLateCollisions;
+ u64 TxTotalCollisions;
+ u64 TxFramesAbortedDueToXSCollisions;
+ u64 TxUnderrun;
+ u64 TxLengthErrors;
+ u64 TxInternalMACXmitError;
+ u64 TxFramesWithExcessiveDeferral;
+ u64 TxFCSErrors;
+ u64 TxJumboFramesOK;
+ u64 TxJumboOctetsOK;
+
+ /* Receive */
+ u64 RxOctetsOK;
+ u64 RxOctetsBad;
+ u64 RxUnicastFramesOK;
+ u64 RxMulticastFramesOK;
+ u64 RxBroadcastFramesOK;
+ u64 RxPauseFrames;
+ u64 RxFCSErrors;
+ u64 RxAlignErrors;
+ u64 RxSymbolErrors;
+ u64 RxDataErrors;
+ u64 RxSequenceErrors;
+ u64 RxRuntErrors;
+ u64 RxJabberErrors;
+ u64 RxInternalMACRcvError;
+ u64 RxInRangeLengthErrors;
+ u64 RxOutOfRangeLengthField;
+ u64 RxFrameTooLongErrors;
+ u64 RxJumboFramesOK;
+ u64 RxJumboOctetsOK;
+};
+
+struct cmac_ops {
+ void (*destroy)(struct cmac *);
+ int (*reset)(struct cmac *);
+ int (*interrupt_enable)(struct cmac *);
+ int (*interrupt_disable)(struct cmac *);
+ int (*interrupt_clear)(struct cmac *);
+ int (*interrupt_handler)(struct cmac *);
+
+ int (*enable)(struct cmac *, int);
+ int (*disable)(struct cmac *, int);
+
+ int (*loopback_enable)(struct cmac *);
+ int (*loopback_disable)(struct cmac *);
+
+ int (*set_mtu)(struct cmac *, int mtu);
+ int (*set_rx_mode)(struct cmac *, struct t1_rx_mode *rm);
+
+ int (*set_speed_duplex_fc)(struct cmac *, int speed, int duplex, int fc);
+ int (*get_speed_duplex_fc)(struct cmac *, int *speed, int *duplex,
+ int *fc);
+
+ const struct cmac_statistics *(*statistics_update)(struct cmac *, int);
+
+ int (*macaddress_get)(struct cmac *, u8 mac_addr[6]);
+ int (*macaddress_set)(struct cmac *, u8 mac_addr[6]);
+};
+
+typedef struct _cmac_instance cmac_instance;
+
+struct cmac {
+ struct cmac_statistics stats;
+ adapter_t *adapter;
+ const struct cmac_ops *ops;
+ cmac_instance *instance;
+};
+
+struct gmac {
+ unsigned int stats_update_period;
+ struct cmac *(*create)(adapter_t *adapter, int index);
+ int (*reset)(adapter_t *);
+};
+
+extern const struct gmac t1_pm3393_ops;
+extern const struct gmac t1_vsc7326_ops;
+
+#endif /* _CXGB_GMAC_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c
new file mode 100644
index 000000000000..71018a4fdf15
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c
@@ -0,0 +1,397 @@
+/* $Date: 2005/10/24 23:18:13 $ $RCSfile: mv88e1xxx.c,v $ $Revision: 1.49 $ */
+#include "common.h"
+#include "mv88e1xxx.h"
+#include "cphy.h"
+#include "elmer0.h"
+
+/* MV88E1XXX MDI crossover register values */
+#define CROSSOVER_MDI 0
+#define CROSSOVER_MDIX 1
+#define CROSSOVER_AUTO 3
+
+#define INTR_ENABLE_MASK 0x6CA0
+
+/*
+ * Set the bits given by 'bitval' in PHY register 'reg'.
+ */
+static void mdio_set_bit(struct cphy *cphy, int reg, u32 bitval)
+{
+ u32 val;
+
+ (void) simple_mdio_read(cphy, reg, &val);
+ (void) simple_mdio_write(cphy, reg, val | bitval);
+}
+
+/*
+ * Clear the bits given by 'bitval' in PHY register 'reg'.
+ */
+static void mdio_clear_bit(struct cphy *cphy, int reg, u32 bitval)
+{
+ u32 val;
+
+ (void) simple_mdio_read(cphy, reg, &val);
+ (void) simple_mdio_write(cphy, reg, val & ~bitval);
+}
+
+/*
+ * NAME: phy_reset
+ *
+ * DESC: Reset the given PHY's port. NOTE: This is not a global
+ * chip reset.
+ *
+ * PARAMS: cphy - Pointer to PHY instance data.
+ *
+ * RETURN: 0 - Successful reset.
+ * -1 - Timeout.
+ */
+static int mv88e1xxx_reset(struct cphy *cphy, int wait)
+{
+ u32 ctl;
+ int time_out = 1000;
+
+ mdio_set_bit(cphy, MII_BMCR, BMCR_RESET);
+
+ do {
+ (void) simple_mdio_read(cphy, MII_BMCR, &ctl);
+ ctl &= BMCR_RESET;
+ if (ctl)
+ udelay(1);
+ } while (ctl && --time_out);
+
+ return ctl ? -1 : 0;
+}
+
+static int mv88e1xxx_interrupt_enable(struct cphy *cphy)
+{
+ /* Enable PHY interrupts. */
+ (void) simple_mdio_write(cphy, MV88E1XXX_INTERRUPT_ENABLE_REGISTER,
+ INTR_ENABLE_MASK);
+
+ /* Enable Marvell interrupts through Elmer0. */
+ if (t1_is_asic(cphy->adapter)) {
+ u32 elmer;
+
+ t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+ elmer |= ELMER0_GP_BIT1;
+ if (is_T2(cphy->adapter))
+ elmer |= ELMER0_GP_BIT2 | ELMER0_GP_BIT3 | ELMER0_GP_BIT4;
+ t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+ }
+ return 0;
+}
+
+static int mv88e1xxx_interrupt_disable(struct cphy *cphy)
+{
+ /* Disable all phy interrupts. */
+ (void) simple_mdio_write(cphy, MV88E1XXX_INTERRUPT_ENABLE_REGISTER, 0);
+
+ /* Disable Marvell interrupts through Elmer0. */
+ if (t1_is_asic(cphy->adapter)) {
+ u32 elmer;
+
+ t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+ elmer &= ~ELMER0_GP_BIT1;
+ if (is_T2(cphy->adapter))
+ elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4);
+ t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+ }
+ return 0;
+}
+
+static int mv88e1xxx_interrupt_clear(struct cphy *cphy)
+{
+ u32 elmer;
+
+ /* Clear PHY interrupts by reading the register. */
+ (void) simple_mdio_read(cphy,
+ MV88E1XXX_INTERRUPT_STATUS_REGISTER, &elmer);
+
+ /* Clear Marvell interrupts through Elmer0. */
+ if (t1_is_asic(cphy->adapter)) {
+ t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
+ elmer |= ELMER0_GP_BIT1;
+ if (is_T2(cphy->adapter))
+ elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
+ t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
+ }
+ return 0;
+}
+
+/*
+ * Set the PHY speed and duplex. This also disables auto-negotiation, except
+ * for 1Gb/s, where auto-negotiation is mandatory.
+ */
+static int mv88e1xxx_set_speed_duplex(struct cphy *phy, int speed, int duplex)
+{
+ u32 ctl;
+
+ (void) simple_mdio_read(phy, MII_BMCR, &ctl);
+ if (speed >= 0) {
+ ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
+ if (speed == SPEED_100)
+ ctl |= BMCR_SPEED100;
+ else if (speed == SPEED_1000)
+ ctl |= BMCR_SPEED1000;
+ }
+ if (duplex >= 0) {
+ ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
+ if (duplex == DUPLEX_FULL)
+ ctl |= BMCR_FULLDPLX;
+ }
+ if (ctl & BMCR_SPEED1000) /* auto-negotiation required for 1Gb/s */
+ ctl |= BMCR_ANENABLE;
+ (void) simple_mdio_write(phy, MII_BMCR, ctl);
+ return 0;
+}
+
+static int mv88e1xxx_crossover_set(struct cphy *cphy, int crossover)
+{
+ u32 data32;
+
+ (void) simple_mdio_read(cphy,
+ MV88E1XXX_SPECIFIC_CNTRL_REGISTER, &data32);
+ data32 &= ~V_PSCR_MDI_XOVER_MODE(M_PSCR_MDI_XOVER_MODE);
+ data32 |= V_PSCR_MDI_XOVER_MODE(crossover);
+ (void) simple_mdio_write(cphy,
+ MV88E1XXX_SPECIFIC_CNTRL_REGISTER, data32);
+ return 0;
+}
+
+static int mv88e1xxx_autoneg_enable(struct cphy *cphy)
+{
+ u32 ctl;
+
+ (void) mv88e1xxx_crossover_set(cphy, CROSSOVER_AUTO);
+
+ (void) simple_mdio_read(cphy, MII_BMCR, &ctl);
+ /* restart autoneg for change to take effect */
+ ctl |= BMCR_ANENABLE | BMCR_ANRESTART;
+ (void) simple_mdio_write(cphy, MII_BMCR, ctl);
+ return 0;
+}
+
+static int mv88e1xxx_autoneg_disable(struct cphy *cphy)
+{
+ u32 ctl;
+
+ /*
+ * Crossover *must* be set to manual in order to disable auto-neg.
+ * The Alaska FAQs document highlights this point.
+ */
+ (void) mv88e1xxx_crossover_set(cphy, CROSSOVER_MDI);
+
+ /*
+ * Must include autoneg reset when disabling auto-neg. This
+ * is described in the Alaska FAQ document.
+ */
+ (void) simple_mdio_read(cphy, MII_BMCR, &ctl);
+ ctl &= ~BMCR_ANENABLE;
+ (void) simple_mdio_write(cphy, MII_BMCR, ctl | BMCR_ANRESTART);
+ return 0;
+}
+
+static int mv88e1xxx_autoneg_restart(struct cphy *cphy)
+{
+ mdio_set_bit(cphy, MII_BMCR, BMCR_ANRESTART);
+ return 0;
+}
+
+static int mv88e1xxx_advertise(struct cphy *phy, unsigned int advertise_map)
+{
+ u32 val = 0;
+
+ if (advertise_map &
+ (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
+ (void) simple_mdio_read(phy, MII_GBCR, &val);
+ val &= ~(GBCR_ADV_1000HALF | GBCR_ADV_1000FULL);
+ if (advertise_map & ADVERTISED_1000baseT_Half)
+ val |= GBCR_ADV_1000HALF;
+ if (advertise_map & ADVERTISED_1000baseT_Full)
+ val |= GBCR_ADV_1000FULL;
+ }
+ (void) simple_mdio_write(phy, MII_GBCR, val);
+
+ val = 1;
+ if (advertise_map & ADVERTISED_10baseT_Half)
+ val |= ADVERTISE_10HALF;
+ if (advertise_map & ADVERTISED_10baseT_Full)
+ val |= ADVERTISE_10FULL;
+ if (advertise_map & ADVERTISED_100baseT_Half)
+ val |= ADVERTISE_100HALF;
+ if (advertise_map & ADVERTISED_100baseT_Full)
+ val |= ADVERTISE_100FULL;
+ if (advertise_map & ADVERTISED_PAUSE)
+ val |= ADVERTISE_PAUSE;
+ if (advertise_map & ADVERTISED_ASYM_PAUSE)
+ val |= ADVERTISE_PAUSE_ASYM;
+ (void) simple_mdio_write(phy, MII_ADVERTISE, val);
+ return 0;
+}
+
+static int mv88e1xxx_set_loopback(struct cphy *cphy, int on)
+{
+ if (on)
+ mdio_set_bit(cphy, MII_BMCR, BMCR_LOOPBACK);
+ else
+ mdio_clear_bit(cphy, MII_BMCR, BMCR_LOOPBACK);
+ return 0;
+}
+
+static int mv88e1xxx_get_link_status(struct cphy *cphy, int *link_ok,
+ int *speed, int *duplex, int *fc)
+{
+ u32 status;
+ int sp = -1, dplx = -1, pause = 0;
+
+ (void) simple_mdio_read(cphy,
+ MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status);
+ if ((status & V_PSSR_STATUS_RESOLVED) != 0) {
+ if (status & V_PSSR_RX_PAUSE)
+ pause |= PAUSE_RX;
+ if (status & V_PSSR_TX_PAUSE)
+ pause |= PAUSE_TX;
+ dplx = (status & V_PSSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
+ sp = G_PSSR_SPEED(status);
+ if (sp == 0)
+ sp = SPEED_10;
+ else if (sp == 1)
+ sp = SPEED_100;
+ else
+ sp = SPEED_1000;
+ }
+ if (link_ok)
+ *link_ok = (status & V_PSSR_LINK) != 0;
+ if (speed)
+ *speed = sp;
+ if (duplex)
+ *duplex = dplx;
+ if (fc)
+ *fc = pause;
+ return 0;
+}
+
+static int mv88e1xxx_downshift_set(struct cphy *cphy, int downshift_enable)
+{
+ u32 val;
+
+ (void) simple_mdio_read(cphy,
+ MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER, &val);
+
+ /*
+ * Set the downshift counter to 2 so we try to establish Gb link
+ * twice before downshifting.
+ */
+ val &= ~(V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(M_DOWNSHIFT_CNT));
+
+ if (downshift_enable)
+ val |= V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(2);
+ (void) simple_mdio_write(cphy,
+ MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER, val);
+ return 0;
+}
+
+static int mv88e1xxx_interrupt_handler(struct cphy *cphy)
+{
+ int cphy_cause = 0;
+ u32 status;
+
+ /*
+ * Loop until cause reads zero. Need to handle bouncing interrupts.
+ */
+ while (1) {
+ u32 cause;
+
+ (void) simple_mdio_read(cphy,
+ MV88E1XXX_INTERRUPT_STATUS_REGISTER,
+ &cause);
+ cause &= INTR_ENABLE_MASK;
+ if (!cause)
+ break;
+
+ if (cause & MV88E1XXX_INTR_LINK_CHNG) {
+ (void) simple_mdio_read(cphy,
+ MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status);
+
+ if (status & MV88E1XXX_INTR_LINK_CHNG)
+ cphy->state |= PHY_LINK_UP;
+ else {
+ cphy->state &= ~PHY_LINK_UP;
+ if (cphy->state & PHY_AUTONEG_EN)
+ cphy->state &= ~PHY_AUTONEG_RDY;
+ cphy_cause |= cphy_cause_link_change;
+ }
+ }
+
+ if (cause & MV88E1XXX_INTR_AUTONEG_DONE)
+ cphy->state |= PHY_AUTONEG_RDY;
+
+ if ((cphy->state & (PHY_LINK_UP | PHY_AUTONEG_RDY)) ==
+ (PHY_LINK_UP | PHY_AUTONEG_RDY))
+ cphy_cause |= cphy_cause_link_change;
+ }
+ return cphy_cause;
+}
+
+static void mv88e1xxx_destroy(struct cphy *cphy)
+{
+ kfree(cphy);
+}
+
+static struct cphy_ops mv88e1xxx_ops = {
+ .destroy = mv88e1xxx_destroy,
+ .reset = mv88e1xxx_reset,
+ .interrupt_enable = mv88e1xxx_interrupt_enable,
+ .interrupt_disable = mv88e1xxx_interrupt_disable,
+ .interrupt_clear = mv88e1xxx_interrupt_clear,
+ .interrupt_handler = mv88e1xxx_interrupt_handler,
+ .autoneg_enable = mv88e1xxx_autoneg_enable,
+ .autoneg_disable = mv88e1xxx_autoneg_disable,
+ .autoneg_restart = mv88e1xxx_autoneg_restart,
+ .advertise = mv88e1xxx_advertise,
+ .set_loopback = mv88e1xxx_set_loopback,
+ .set_speed_duplex = mv88e1xxx_set_speed_duplex,
+ .get_link_status = mv88e1xxx_get_link_status,
+};
+
+static struct cphy *mv88e1xxx_phy_create(struct net_device *dev, int phy_addr,
+ const struct mdio_ops *mdio_ops)
+{
+ struct adapter *adapter = netdev_priv(dev);
+ struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
+
+ if (!cphy)
+ return NULL;
+
+ cphy_init(cphy, dev, phy_addr, &mv88e1xxx_ops, mdio_ops);
+
+ /* Configure particular PHY's to run in a different mode. */
+ if ((board_info(adapter)->caps & SUPPORTED_TP) &&
+ board_info(adapter)->chip_phy == CHBT_PHY_88E1111) {
+ /*
+ * Configure the PHY transmitter as class A to reduce EMI.
+ */
+ (void) simple_mdio_write(cphy,
+ MV88E1XXX_EXTENDED_ADDR_REGISTER, 0xB);
+ (void) simple_mdio_write(cphy,
+ MV88E1XXX_EXTENDED_REGISTER, 0x8004);
+ }
+ (void) mv88e1xxx_downshift_set(cphy, 1); /* Enable downshift */
+
+ /* LED */
+ if (is_T2(adapter)) {
+ (void) simple_mdio_write(cphy,
+ MV88E1XXX_LED_CONTROL_REGISTER, 0x1);
+ }
+
+ return cphy;
+}
+
+static int mv88e1xxx_phy_reset(adapter_t* adapter)
+{
+ return 0;
+}
+
+const struct gphy t1_mv88e1xxx_ops = {
+ .create = mv88e1xxx_phy_create,
+ .reset = mv88e1xxx_phy_reset
+};
diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h
new file mode 100644
index 000000000000..967cc4286359
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h
@@ -0,0 +1,127 @@
+/* $Date: 2005/03/07 23:59:05 $ $RCSfile: mv88e1xxx.h,v $ $Revision: 1.13 $ */
+#ifndef CHELSIO_MV8E1XXX_H
+#define CHELSIO_MV8E1XXX_H
+
+#ifndef BMCR_SPEED1000
+# define BMCR_SPEED1000 0x40
+#endif
+
+#ifndef ADVERTISE_PAUSE
+# define ADVERTISE_PAUSE 0x400
+#endif
+#ifndef ADVERTISE_PAUSE_ASYM
+# define ADVERTISE_PAUSE_ASYM 0x800
+#endif
+
+/* Gigabit MII registers */
+#define MII_GBCR 9 /* 1000Base-T control register */
+#define MII_GBSR 10 /* 1000Base-T status register */
+
+/* 1000Base-T control register fields */
+#define GBCR_ADV_1000HALF 0x100
+#define GBCR_ADV_1000FULL 0x200
+#define GBCR_PREFER_MASTER 0x400
+#define GBCR_MANUAL_AS_MASTER 0x800
+#define GBCR_MANUAL_CONFIG_ENABLE 0x1000
+
+/* 1000Base-T status register fields */
+#define GBSR_LP_1000HALF 0x400
+#define GBSR_LP_1000FULL 0x800
+#define GBSR_REMOTE_OK 0x1000
+#define GBSR_LOCAL_OK 0x2000
+#define GBSR_LOCAL_MASTER 0x4000
+#define GBSR_MASTER_FAULT 0x8000
+
+/* Marvell PHY interrupt status bits. */
+#define MV88E1XXX_INTR_JABBER 0x0001
+#define MV88E1XXX_INTR_POLARITY_CHNG 0x0002
+#define MV88E1XXX_INTR_ENG_DETECT_CHNG 0x0010
+#define MV88E1XXX_INTR_DOWNSHIFT 0x0020
+#define MV88E1XXX_INTR_MDI_XOVER_CHNG 0x0040
+#define MV88E1XXX_INTR_FIFO_OVER_UNDER 0x0080
+#define MV88E1XXX_INTR_FALSE_CARRIER 0x0100
+#define MV88E1XXX_INTR_SYMBOL_ERROR 0x0200
+#define MV88E1XXX_INTR_LINK_CHNG 0x0400
+#define MV88E1XXX_INTR_AUTONEG_DONE 0x0800
+#define MV88E1XXX_INTR_PAGE_RECV 0x1000
+#define MV88E1XXX_INTR_DUPLEX_CHNG 0x2000
+#define MV88E1XXX_INTR_SPEED_CHNG 0x4000
+#define MV88E1XXX_INTR_AUTONEG_ERR 0x8000
+
+/* Marvell PHY specific registers. */
+#define MV88E1XXX_SPECIFIC_CNTRL_REGISTER 16
+#define MV88E1XXX_SPECIFIC_STATUS_REGISTER 17
+#define MV88E1XXX_INTERRUPT_ENABLE_REGISTER 18
+#define MV88E1XXX_INTERRUPT_STATUS_REGISTER 19
+#define MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER 20
+#define MV88E1XXX_RECV_ERR_CNTR_REGISTER 21
+#define MV88E1XXX_RES_REGISTER 22
+#define MV88E1XXX_GLOBAL_STATUS_REGISTER 23
+#define MV88E1XXX_LED_CONTROL_REGISTER 24
+#define MV88E1XXX_MANUAL_LED_OVERRIDE_REGISTER 25
+#define MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_2_REGISTER 26
+#define MV88E1XXX_EXT_PHY_SPECIFIC_STATUS_REGISTER 27
+#define MV88E1XXX_VIRTUAL_CABLE_TESTER_REGISTER 28
+#define MV88E1XXX_EXTENDED_ADDR_REGISTER 29
+#define MV88E1XXX_EXTENDED_REGISTER 30
+
+/* PHY specific control register fields */
+#define S_PSCR_MDI_XOVER_MODE 5
+#define M_PSCR_MDI_XOVER_MODE 0x3
+#define V_PSCR_MDI_XOVER_MODE(x) ((x) << S_PSCR_MDI_XOVER_MODE)
+#define G_PSCR_MDI_XOVER_MODE(x) (((x) >> S_PSCR_MDI_XOVER_MODE) & M_PSCR_MDI_XOVER_MODE)
+
+/* Extended PHY specific control register fields */
+#define S_DOWNSHIFT_ENABLE 8
+#define V_DOWNSHIFT_ENABLE (1 << S_DOWNSHIFT_ENABLE)
+
+#define S_DOWNSHIFT_CNT 9
+#define M_DOWNSHIFT_CNT 0x7
+#define V_DOWNSHIFT_CNT(x) ((x) << S_DOWNSHIFT_CNT)
+#define G_DOWNSHIFT_CNT(x) (((x) >> S_DOWNSHIFT_CNT) & M_DOWNSHIFT_CNT)
+
+/* PHY specific status register fields */
+#define S_PSSR_JABBER 0
+#define V_PSSR_JABBER (1 << S_PSSR_JABBER)
+
+#define S_PSSR_POLARITY 1
+#define V_PSSR_POLARITY (1 << S_PSSR_POLARITY)
+
+#define S_PSSR_RX_PAUSE 2
+#define V_PSSR_RX_PAUSE (1 << S_PSSR_RX_PAUSE)
+
+#define S_PSSR_TX_PAUSE 3
+#define V_PSSR_TX_PAUSE (1 << S_PSSR_TX_PAUSE)
+
+#define S_PSSR_ENERGY_DETECT 4
+#define V_PSSR_ENERGY_DETECT (1 << S_PSSR_ENERGY_DETECT)
+
+#define S_PSSR_DOWNSHIFT_STATUS 5
+#define V_PSSR_DOWNSHIFT_STATUS (1 << S_PSSR_DOWNSHIFT_STATUS)
+
+#define S_PSSR_MDI 6
+#define V_PSSR_MDI (1 << S_PSSR_MDI)
+
+#define S_PSSR_CABLE_LEN 7
+#define M_PSSR_CABLE_LEN 0x7
+#define V_PSSR_CABLE_LEN(x) ((x) << S_PSSR_CABLE_LEN)
+#define G_PSSR_CABLE_LEN(x) (((x) >> S_PSSR_CABLE_LEN) & M_PSSR_CABLE_LEN)
+
+#define S_PSSR_LINK 10
+#define V_PSSR_LINK (1 << S_PSSR_LINK)
+
+#define S_PSSR_STATUS_RESOLVED 11
+#define V_PSSR_STATUS_RESOLVED (1 << S_PSSR_STATUS_RESOLVED)
+
+#define S_PSSR_PAGE_RECEIVED 12
+#define V_PSSR_PAGE_RECEIVED (1 << S_PSSR_PAGE_RECEIVED)
+
+#define S_PSSR_DUPLEX 13
+#define V_PSSR_DUPLEX (1 << S_PSSR_DUPLEX)
+
+#define S_PSSR_SPEED 14
+#define M_PSSR_SPEED 0x3
+#define V_PSSR_SPEED(x) ((x) << S_PSSR_SPEED)
+#define G_PSSR_SPEED(x) (((x) >> S_PSSR_SPEED) & M_PSSR_SPEED)
+
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
new file mode 100644
index 000000000000..f7136b2fd1e5
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
@@ -0,0 +1,260 @@
+/*****************************************************************************
+ * *
+ * File: mv88x201x.c *
+ * $Revision: 1.12 $ *
+ * $Date: 2005/04/15 19:27:14 $ *
+ * Description: *
+ * Marvell PHY (mv88x201x) functionality. *
+ * part of the Chelsio 10Gb Ethernet Driver. *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License, version 2, as *
+ * published by the Free Software Foundation. *
+ * *
+ * You should have received a copy of the GNU General Public License along *
+ * with this program; if not, write to the Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
+ * *
+ * http://www.chelsio.com *
+ * *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
+ * All rights reserved. *
+ * *
+ * Maintainers: maintainers@chelsio.com *
+ * *
+ * Authors: Dimitrios Michailidis <dm@chelsio.com> *
+ * Tina Yang <tainay@chelsio.com> *
+ * Felix Marti <felix@chelsio.com> *
+ * Scott Bardone <sbardone@chelsio.com> *
+ * Kurt Ottaway <kottaway@chelsio.com> *
+ * Frank DiMambro <frank@chelsio.com> *
+ * *
+ * History: *
+ * *
+ ****************************************************************************/
+
+#include "cphy.h"
+#include "elmer0.h"
+
+/*
+ * The 88x2010 Rev C. requires some link status registers * to be read
+ * twice in order to get the right values. Future * revisions will fix
+ * this problem and then this macro * can disappear.
+ */
+#define MV88x2010_LINK_STATUS_BUGS 1
+
+static int led_init(struct cphy *cphy)
+{
+ /* Setup the LED registers so we can turn on/off.
+ * Writing these bits maps control to another
+ * register. mmd(0x1) addr(0x7)
+ */
+ cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8304, 0xdddd);
+ return 0;
+}
+
+static int led_link(struct cphy *cphy, u32 do_enable)
+{
+ u32 led = 0;
+#define LINK_ENABLE_BIT 0x1
+
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, &led);
+
+ if (do_enable & LINK_ENABLE_BIT) {
+ led |= LINK_ENABLE_BIT;
+ cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led);
+ } else {
+ led &= ~LINK_ENABLE_BIT;
+ cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led);
+ }
+ return 0;
+}
+
+/* Port Reset */
+static int mv88x201x_reset(struct cphy *cphy, int wait)
+{
+ /* This can be done through registers. It is not required since
+ * a full chip reset is used.
+ */
+ return 0;
+}
+
+static int mv88x201x_interrupt_enable(struct cphy *cphy)
+{
+ /* Enable PHY LASI interrupts. */
+ cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
+ MDIO_PMA_LASI_LSALARM);
+
+ /* Enable Marvell interrupts through Elmer0. */
+ if (t1_is_asic(cphy->adapter)) {
+ u32 elmer;
+
+ t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+ elmer |= ELMER0_GP_BIT6;
+ t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+ }
+ return 0;
+}
+
+static int mv88x201x_interrupt_disable(struct cphy *cphy)
+{
+ /* Disable PHY LASI interrupts. */
+ cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0x0);
+
+ /* Disable Marvell interrupts through Elmer0. */
+ if (t1_is_asic(cphy->adapter)) {
+ u32 elmer;
+
+ t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+ elmer &= ~ELMER0_GP_BIT6;
+ t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+ }
+ return 0;
+}
+
+static int mv88x201x_interrupt_clear(struct cphy *cphy)
+{
+ u32 elmer;
+ u32 val;
+
+#ifdef MV88x2010_LINK_STATUS_BUGS
+ /* Required to read twice before clear takes affect. */
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val);
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val);
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
+
+ /* Read this register after the others above it else
+ * the register doesn't clear correctly.
+ */
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
+#endif
+
+ /* Clear link status. */
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
+ /* Clear PHY LASI interrupts. */
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
+
+#ifdef MV88x2010_LINK_STATUS_BUGS
+ /* Do it again. */
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val);
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val);
+#endif
+
+ /* Clear Marvell interrupts through Elmer0. */
+ if (t1_is_asic(cphy->adapter)) {
+ t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
+ elmer |= ELMER0_GP_BIT6;
+ t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
+ }
+ return 0;
+}
+
+static int mv88x201x_interrupt_handler(struct cphy *cphy)
+{
+ /* Clear interrupts */
+ mv88x201x_interrupt_clear(cphy);
+
+ /* We have only enabled link change interrupts and so
+ * cphy_cause must be a link change interrupt.
+ */
+ return cphy_cause_link_change;
+}
+
+static int mv88x201x_set_loopback(struct cphy *cphy, int on)
+{
+ return 0;
+}
+
+static int mv88x201x_get_link_status(struct cphy *cphy, int *link_ok,
+ int *speed, int *duplex, int *fc)
+{
+ u32 val = 0;
+
+ if (link_ok) {
+ /* Read link status. */
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
+ val &= MDIO_STAT1_LSTATUS;
+ *link_ok = (val == MDIO_STAT1_LSTATUS);
+ /* Turn on/off Link LED */
+ led_link(cphy, *link_ok);
+ }
+ if (speed)
+ *speed = SPEED_10000;
+ if (duplex)
+ *duplex = DUPLEX_FULL;
+ if (fc)
+ *fc = PAUSE_RX | PAUSE_TX;
+ return 0;
+}
+
+static void mv88x201x_destroy(struct cphy *cphy)
+{
+ kfree(cphy);
+}
+
+static struct cphy_ops mv88x201x_ops = {
+ .destroy = mv88x201x_destroy,
+ .reset = mv88x201x_reset,
+ .interrupt_enable = mv88x201x_interrupt_enable,
+ .interrupt_disable = mv88x201x_interrupt_disable,
+ .interrupt_clear = mv88x201x_interrupt_clear,
+ .interrupt_handler = mv88x201x_interrupt_handler,
+ .get_link_status = mv88x201x_get_link_status,
+ .set_loopback = mv88x201x_set_loopback,
+ .mmds = (MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS |
+ MDIO_DEVS_PHYXS | MDIO_DEVS_WIS),
+};
+
+static struct cphy *mv88x201x_phy_create(struct net_device *dev, int phy_addr,
+ const struct mdio_ops *mdio_ops)
+{
+ u32 val;
+ struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
+
+ if (!cphy)
+ return NULL;
+
+ cphy_init(cphy, dev, phy_addr, &mv88x201x_ops, mdio_ops);
+
+ /* Commands the PHY to enable XFP's clock. */
+ cphy_mdio_read(cphy, MDIO_MMD_PCS, 0x8300, &val);
+ cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8300, val | 1);
+
+ /* Clear link status. Required because of a bug in the PHY. */
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT2, &val);
+ cphy_mdio_read(cphy, MDIO_MMD_PCS, MDIO_STAT2, &val);
+
+ /* Allows for Link,Ack LED turn on/off */
+ led_init(cphy);
+ return cphy;
+}
+
+/* Chip Reset */
+static int mv88x201x_phy_reset(adapter_t *adapter)
+{
+ u32 val;
+
+ t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val &= ~4;
+ t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ msleep(100);
+
+ t1_tpi_write(adapter, A_ELMER0_GPO, val | 4);
+ msleep(1000);
+
+ /* Now lets enable the Laser. Delay 100us */
+ t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val |= 0x8000;
+ t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(100);
+ return 0;
+}
+
+const struct gphy t1_mv88x201x_ops = {
+ .create = mv88x201x_phy_create,
+ .reset = mv88x201x_phy_reset
+};
diff --git a/drivers/net/ethernet/chelsio/cxgb/my3126.c b/drivers/net/ethernet/chelsio/cxgb/my3126.c
new file mode 100644
index 000000000000..a683fd3bb624
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/my3126.c
@@ -0,0 +1,209 @@
+/* $Date: 2005/11/12 02:13:49 $ $RCSfile: my3126.c,v $ $Revision: 1.15 $ */
+#include "cphy.h"
+#include "elmer0.h"
+#include "suni1x10gexp_regs.h"
+
+/* Port Reset */
+static int my3126_reset(struct cphy *cphy, int wait)
+{
+ /*
+ * This can be done through registers. It is not required since
+ * a full chip reset is used.
+ */
+ return 0;
+}
+
+static int my3126_interrupt_enable(struct cphy *cphy)
+{
+ schedule_delayed_work(&cphy->phy_update, HZ/30);
+ t1_tpi_read(cphy->adapter, A_ELMER0_GPO, &cphy->elmer_gpo);
+ return 0;
+}
+
+static int my3126_interrupt_disable(struct cphy *cphy)
+{
+ cancel_delayed_work_sync(&cphy->phy_update);
+ return 0;
+}
+
+static int my3126_interrupt_clear(struct cphy *cphy)
+{
+ return 0;
+}
+
+#define OFFSET(REG_ADDR) (REG_ADDR << 2)
+
+static int my3126_interrupt_handler(struct cphy *cphy)
+{
+ u32 val;
+ u16 val16;
+ u16 status;
+ u32 act_count;
+ adapter_t *adapter;
+ adapter = cphy->adapter;
+
+ if (cphy->count == 50) {
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
+ val16 = (u16) val;
+ status = cphy->bmsr ^ val16;
+
+ if (status & MDIO_STAT1_LSTATUS)
+ t1_link_changed(adapter, 0);
+ cphy->bmsr = val16;
+
+ /* We have only enabled link change interrupts so it
+ must be that
+ */
+ cphy->count = 0;
+ }
+
+ t1_tpi_write(adapter, OFFSET(SUNI1x10GEXP_REG_MSTAT_CONTROL),
+ SUNI1x10GEXP_BITMSK_MSTAT_SNAP);
+ t1_tpi_read(adapter,
+ OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW), &act_count);
+ t1_tpi_read(adapter,
+ OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW), &val);
+ act_count += val;
+
+ /* Populate elmer_gpo with the register value */
+ t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ cphy->elmer_gpo = val;
+
+ if ( (val & (1 << 8)) || (val & (1 << 19)) ||
+ (cphy->act_count == act_count) || cphy->act_on ) {
+ if (is_T2(adapter))
+ val |= (1 << 9);
+ else if (t1_is_T1B(adapter))
+ val |= (1 << 20);
+ cphy->act_on = 0;
+ } else {
+ if (is_T2(adapter))
+ val &= ~(1 << 9);
+ else if (t1_is_T1B(adapter))
+ val &= ~(1 << 20);
+ cphy->act_on = 1;
+ }
+
+ t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+ cphy->elmer_gpo = val;
+ cphy->act_count = act_count;
+ cphy->count++;
+
+ return cphy_cause_link_change;
+}
+
+static void my3216_poll(struct work_struct *work)
+{
+ struct cphy *cphy = container_of(work, struct cphy, phy_update.work);
+
+ my3126_interrupt_handler(cphy);
+}
+
+static int my3126_set_loopback(struct cphy *cphy, int on)
+{
+ return 0;
+}
+
+/* To check the activity LED */
+static int my3126_get_link_status(struct cphy *cphy,
+ int *link_ok, int *speed, int *duplex, int *fc)
+{
+ u32 val;
+ u16 val16;
+ adapter_t *adapter;
+
+ adapter = cphy->adapter;
+ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
+ val16 = (u16) val;
+
+ /* Populate elmer_gpo with the register value */
+ t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ cphy->elmer_gpo = val;
+
+ *link_ok = (val16 & MDIO_STAT1_LSTATUS);
+
+ if (*link_ok) {
+ /* Turn on the LED. */
+ if (is_T2(adapter))
+ val &= ~(1 << 8);
+ else if (t1_is_T1B(adapter))
+ val &= ~(1 << 19);
+ } else {
+ /* Turn off the LED. */
+ if (is_T2(adapter))
+ val |= (1 << 8);
+ else if (t1_is_T1B(adapter))
+ val |= (1 << 19);
+ }
+
+ t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ cphy->elmer_gpo = val;
+ *speed = SPEED_10000;
+ *duplex = DUPLEX_FULL;
+
+ /* need to add flow control */
+ if (fc)
+ *fc = PAUSE_RX | PAUSE_TX;
+
+ return 0;
+}
+
+static void my3126_destroy(struct cphy *cphy)
+{
+ kfree(cphy);
+}
+
+static struct cphy_ops my3126_ops = {
+ .destroy = my3126_destroy,
+ .reset = my3126_reset,
+ .interrupt_enable = my3126_interrupt_enable,
+ .interrupt_disable = my3126_interrupt_disable,
+ .interrupt_clear = my3126_interrupt_clear,
+ .interrupt_handler = my3126_interrupt_handler,
+ .get_link_status = my3126_get_link_status,
+ .set_loopback = my3126_set_loopback,
+ .mmds = (MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS |
+ MDIO_DEVS_PHYXS),
+};
+
+static struct cphy *my3126_phy_create(struct net_device *dev,
+ int phy_addr, const struct mdio_ops *mdio_ops)
+{
+ struct cphy *cphy = kzalloc(sizeof (*cphy), GFP_KERNEL);
+
+ if (!cphy)
+ return NULL;
+
+ cphy_init(cphy, dev, phy_addr, &my3126_ops, mdio_ops);
+ INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
+ cphy->bmsr = 0;
+
+ return cphy;
+}
+
+/* Chip Reset */
+static int my3126_phy_reset(adapter_t * adapter)
+{
+ u32 val;
+
+ t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val &= ~4;
+ t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ msleep(100);
+
+ t1_tpi_write(adapter, A_ELMER0_GPO, val | 4);
+ msleep(1000);
+
+ /* Now lets enable the Laser. Delay 100us */
+ t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val |= 0x8000;
+ t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(100);
+ return 0;
+}
+
+const struct gphy t1_my3126_ops = {
+ .create = my3126_phy_create,
+ .reset = my3126_phy_reset
+};
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
new file mode 100644
index 000000000000..40c7b93ababc
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -0,0 +1,796 @@
+/*****************************************************************************
+ * *
+ * File: pm3393.c *
+ * $Revision: 1.16 $ *
+ * $Date: 2005/05/14 00:59:32 $ *
+ * Description: *
+ * PMC/SIERRA (pm3393) MAC-PHY functionality. *
+ * part of the Chelsio 10Gb Ethernet Driver. *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License, version 2, as *
+ * published by the Free Software Foundation. *
+ * *
+ * You should have received a copy of the GNU General Public License along *
+ * with this program; if not, write to the Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
+ * *
+ * http://www.chelsio.com *
+ * *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
+ * All rights reserved. *
+ * *
+ * Maintainers: maintainers@chelsio.com *
+ * *
+ * Authors: Dimitrios Michailidis <dm@chelsio.com> *
+ * Tina Yang <tainay@chelsio.com> *
+ * Felix Marti <felix@chelsio.com> *
+ * Scott Bardone <sbardone@chelsio.com> *
+ * Kurt Ottaway <kottaway@chelsio.com> *
+ * Frank DiMambro <frank@chelsio.com> *
+ * *
+ * History: *
+ * *
+ ****************************************************************************/
+
+#include "common.h"
+#include "regs.h"
+#include "gmac.h"
+#include "elmer0.h"
+#include "suni1x10gexp_regs.h"
+
+#include <linux/crc32.h>
+#include <linux/slab.h>
+
+#define OFFSET(REG_ADDR) ((REG_ADDR) << 2)
+
+/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */
+#define MAX_FRAME_SIZE 9600
+
+#define IPG 12
+#define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \
+ SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \
+ SUNI1x10GEXP_BITMSK_TXXG_PADEN)
+#define RXXG_CONF1_VAL (SUNI1x10GEXP_BITMSK_RXXG_PUREP | 0x14 | \
+ SUNI1x10GEXP_BITMSK_RXXG_FLCHK | SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP)
+
+/* Update statistics every 15 minutes */
+#define STATS_TICK_SECS (15 * 60)
+
+enum { /* RMON registers */
+ RxOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW,
+ RxUnicastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW,
+ RxMulticastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW,
+ RxBroadcastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW,
+ RxPAUSEMACCtrlFramesReceived = SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW,
+ RxFrameCheckSequenceErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW,
+ RxFramesLostDueToInternalMACErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW,
+ RxSymbolErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW,
+ RxInRangeLengthErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW,
+ RxFramesTooLongErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW,
+ RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW,
+ RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW,
+ RxUndersizedFrames = SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW,
+ RxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_25_LOW,
+ RxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_26_LOW,
+
+ TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW,
+ TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW,
+ TxTransmitSystemError = SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW,
+ TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW,
+ TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW,
+ TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW,
+ TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW,
+ TxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_51_LOW,
+ TxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_52_LOW
+};
+
+struct _cmac_instance {
+ u8 enabled;
+ u8 fc;
+ u8 mac_addr[6];
+};
+
+static int pmread(struct cmac *cmac, u32 reg, u32 * data32)
+{
+ t1_tpi_read(cmac->adapter, OFFSET(reg), data32);
+ return 0;
+}
+
+static int pmwrite(struct cmac *cmac, u32 reg, u32 data32)
+{
+ t1_tpi_write(cmac->adapter, OFFSET(reg), data32);
+ return 0;
+}
+
+/* Port reset. */
+static int pm3393_reset(struct cmac *cmac)
+{
+ return 0;
+}
+
+/*
+ * Enable interrupts for the PM3393
+ *
+ * 1. Enable PM3393 BLOCK interrupts.
+ * 2. Enable PM3393 Master Interrupt bit(INTE)
+ * 3. Enable ELMER's PM3393 bit.
+ * 4. Enable Terminator external interrupt.
+ */
+static int pm3393_interrupt_enable(struct cmac *cmac)
+{
+ u32 pl_intr;
+
+ /* PM3393 - Enabling all hardware block interrupts.
+ */
+ pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0xffff);
+ pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0xffff);
+ pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0xffff);
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0xffff);
+
+ /* Don't interrupt on statistics overflow, we are polling */
+ pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
+ pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
+ pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
+ pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
+
+ pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0xffff);
+ pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0xffff);
+ pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0xffff);
+ pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0xffff);
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0xffff);
+ pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0xffff);
+ pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0xffff);
+ pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0xffff);
+ pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0xffff);
+
+ /* PM3393 - Global interrupt enable
+ */
+ /* TBD XXX Disable for now until we figure out why error interrupts keep asserting. */
+ pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE,
+ 0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ );
+
+ /* TERMINATOR - PL_INTERUPTS_EXT */
+ pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE);
+ pl_intr |= F_PL_INTR_EXT;
+ writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE);
+ return 0;
+}
+
+static int pm3393_interrupt_disable(struct cmac *cmac)
+{
+ u32 elmer;
+
+ /* PM3393 - Enabling HW interrupt blocks. */
+ pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0);
+ pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0);
+ pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0);
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0);
+ pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
+ pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
+ pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
+ pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
+ pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0);
+ pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0);
+ pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0);
+ pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0);
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0);
+ pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0);
+ pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0);
+ pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0);
+ pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0);
+
+ /* PM3393 - Global interrupt enable */
+ pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, 0);
+
+ /* ELMER - External chip interrupts. */
+ t1_tpi_read(cmac->adapter, A_ELMER0_INT_ENABLE, &elmer);
+ elmer &= ~ELMER0_GP_BIT1;
+ t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer);
+
+ /* TERMINATOR - PL_INTERUPTS_EXT */
+ /* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP
+ * COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level.
+ */
+
+ return 0;
+}
+
+static int pm3393_interrupt_clear(struct cmac *cmac)
+{
+ u32 elmer;
+ u32 pl_intr;
+ u32 val32;
+
+ /* PM3393 - Clearing HW interrupt blocks. Note, this assumes
+ * bit WCIMODE=0 for a clear-on-read.
+ */
+ pmread(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS, &val32);
+ pmread(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS, &val32);
+ pmread(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS, &val32);
+ pmread(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS, &val32);
+ pmread(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT, &val32);
+ pmread(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS, &val32);
+ pmread(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT, &val32);
+ pmread(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS, &val32);
+ pmread(cmac, SUNI1x10GEXP_REG_RXXG_INTERRUPT, &val32);
+ pmread(cmac, SUNI1x10GEXP_REG_TXXG_INTERRUPT, &val32);
+ pmread(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT, &val32);
+ pmread(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION,
+ &val32);
+ pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS, &val32);
+ pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE, &val32);
+
+ /* PM3393 - Global interrupt status
+ */
+ pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, &val32);
+
+ /* ELMER - External chip interrupts.
+ */
+ t1_tpi_read(cmac->adapter, A_ELMER0_INT_CAUSE, &elmer);
+ elmer |= ELMER0_GP_BIT1;
+ t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer);
+
+ /* TERMINATOR - PL_INTERUPTS_EXT
+ */
+ pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE);
+ pl_intr |= F_PL_INTR_EXT;
+ writel(pl_intr, cmac->adapter->regs + A_PL_CAUSE);
+
+ return 0;
+}
+
+/* Interrupt handler */
+static int pm3393_interrupt_handler(struct cmac *cmac)
+{
+ u32 master_intr_status;
+
+ /* Read the master interrupt status register. */
+ pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS,
+ &master_intr_status);
+ if (netif_msg_intr(cmac->adapter))
+ dev_dbg(&cmac->adapter->pdev->dev, "PM3393 intr cause 0x%x\n",
+ master_intr_status);
+
+ /* TBD XXX Lets just clear everything for now */
+ pm3393_interrupt_clear(cmac);
+
+ return 0;
+}
+
+static int pm3393_enable(struct cmac *cmac, int which)
+{
+ if (which & MAC_DIRECTION_RX)
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1,
+ (RXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_RXXG_RXEN));
+
+ if (which & MAC_DIRECTION_TX) {
+ u32 val = TXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_TXXG_TXEN0;
+
+ if (cmac->instance->fc & PAUSE_RX)
+ val |= SUNI1x10GEXP_BITMSK_TXXG_FCRX;
+ if (cmac->instance->fc & PAUSE_TX)
+ val |= SUNI1x10GEXP_BITMSK_TXXG_FCTX;
+ pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, val);
+ }
+
+ cmac->instance->enabled |= which;
+ return 0;
+}
+
+static int pm3393_enable_port(struct cmac *cmac, int which)
+{
+ /* Clear port statistics */
+ pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
+ SUNI1x10GEXP_BITMSK_MSTAT_CLEAR);
+ udelay(2);
+ memset(&cmac->stats, 0, sizeof(struct cmac_statistics));
+
+ pm3393_enable(cmac, which);
+
+ /*
+ * XXX This should be done by the PHY and preferably not at all.
+ * The PHY doesn't give us link status indication on its own so have
+ * the link management code query it instead.
+ */
+ t1_link_changed(cmac->adapter, 0);
+ return 0;
+}
+
+static int pm3393_disable(struct cmac *cmac, int which)
+{
+ if (which & MAC_DIRECTION_RX)
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, RXXG_CONF1_VAL);
+ if (which & MAC_DIRECTION_TX)
+ pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, TXXG_CONF1_VAL);
+
+ /*
+ * The disable is graceful. Give the PM3393 time. Can't wait very
+ * long here, we may be holding locks.
+ */
+ udelay(20);
+
+ cmac->instance->enabled &= ~which;
+ return 0;
+}
+
+static int pm3393_loopback_enable(struct cmac *cmac)
+{
+ return 0;
+}
+
+static int pm3393_loopback_disable(struct cmac *cmac)
+{
+ return 0;
+}
+
+static int pm3393_set_mtu(struct cmac *cmac, int mtu)
+{
+ int enabled = cmac->instance->enabled;
+
+ /* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */
+ mtu += 14 + 4;
+ if (mtu > MAX_FRAME_SIZE)
+ return -EINVAL;
+
+ /* Disable Rx/Tx MAC before configuring it. */
+ if (enabled)
+ pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH, mtu);
+ pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE, mtu);
+
+ if (enabled)
+ pm3393_enable(cmac, enabled);
+ return 0;
+}
+
+static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
+{
+ int enabled = cmac->instance->enabled & MAC_DIRECTION_RX;
+ u32 rx_mode;
+
+ /* Disable MAC RX before reconfiguring it */
+ if (enabled)
+ pm3393_disable(cmac, MAC_DIRECTION_RX);
+
+ pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, &rx_mode);
+ rx_mode &= ~(SUNI1x10GEXP_BITMSK_RXXG_PMODE |
+ SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN);
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2,
+ (u16)rx_mode);
+
+ if (t1_rx_mode_promisc(rm)) {
+ /* Promiscuous mode. */
+ rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_PMODE;
+ }
+ if (t1_rx_mode_allmulti(rm)) {
+ /* Accept all multicast. */
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, 0xffff);
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, 0xffff);
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, 0xffff);
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, 0xffff);
+ rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
+ } else if (t1_rx_mode_mc_cnt(rm)) {
+ /* Accept one or more multicast(s). */
+ struct netdev_hw_addr *ha;
+ int bit;
+ u16 mc_filter[4] = { 0, };
+
+ netdev_for_each_mc_addr(ha, t1_get_netdev(rm)) {
+ /* bit[23:28] */
+ bit = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x3f;
+ mc_filter[bit >> 4] |= 1 << (bit & 0xf);
+ }
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, mc_filter[1]);
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, mc_filter[2]);
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, mc_filter[3]);
+ rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
+ }
+
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, (u16)rx_mode);
+
+ if (enabled)
+ pm3393_enable(cmac, MAC_DIRECTION_RX);
+
+ return 0;
+}
+
+static int pm3393_get_speed_duplex_fc(struct cmac *cmac, int *speed,
+ int *duplex, int *fc)
+{
+ if (speed)
+ *speed = SPEED_10000;
+ if (duplex)
+ *duplex = DUPLEX_FULL;
+ if (fc)
+ *fc = cmac->instance->fc;
+ return 0;
+}
+
+static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex,
+ int fc)
+{
+ if (speed >= 0 && speed != SPEED_10000)
+ return -1;
+ if (duplex >= 0 && duplex != DUPLEX_FULL)
+ return -1;
+ if (fc & ~(PAUSE_TX | PAUSE_RX))
+ return -1;
+
+ if (fc != cmac->instance->fc) {
+ cmac->instance->fc = (u8) fc;
+ if (cmac->instance->enabled & MAC_DIRECTION_TX)
+ pm3393_enable(cmac, MAC_DIRECTION_TX);
+ }
+ return 0;
+}
+
+#define RMON_UPDATE(mac, name, stat_name) \
+{ \
+ t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \
+ t1_tpi_read((mac)->adapter, OFFSET((name)+1), &val1); \
+ t1_tpi_read((mac)->adapter, OFFSET((name)+2), &val2); \
+ (mac)->stats.stat_name = (u64)(val0 & 0xffff) | \
+ ((u64)(val1 & 0xffff) << 16) | \
+ ((u64)(val2 & 0xff) << 32) | \
+ ((mac)->stats.stat_name & \
+ 0xffffff0000000000ULL); \
+ if (ro & \
+ (1ULL << ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2))) \
+ (mac)->stats.stat_name += 1ULL << 40; \
+}
+
+static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
+ int flag)
+{
+ u64 ro;
+ u32 val0, val1, val2, val3;
+
+ /* Snap the counters */
+ pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
+ SUNI1x10GEXP_BITMSK_MSTAT_SNAP);
+
+ /* Counter rollover, clear on read */
+ pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0, &val0);
+ pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1, &val1);
+ pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2, &val2);
+ pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3, &val3);
+ ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
+ (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
+
+ /* Rx stats */
+ RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK);
+ RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK);
+ RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK);
+ RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK);
+ RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames);
+ RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors);
+ RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors,
+ RxInternalMACRcvError);
+ RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
+ RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors);
+ RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors);
+ RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
+ RMON_UPDATE(mac, RxFragments, RxRuntErrors);
+ RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
+ RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK);
+ RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK);
+
+ /* Tx stats */
+ RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
+ RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError,
+ TxInternalMACXmitError);
+ RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors);
+ RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK);
+ RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
+ RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
+ RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
+ RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK);
+ RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK);
+
+ return &mac->stats;
+}
+
+static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
+{
+ memcpy(mac_addr, cmac->instance->mac_addr, 6);
+ return 0;
+}
+
+static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
+{
+ u32 val, lo, mid, hi, enabled = cmac->instance->enabled;
+
+ /*
+ * MAC addr: 00:07:43:00:13:09
+ *
+ * ma[5] = 0x09
+ * ma[4] = 0x13
+ * ma[3] = 0x00
+ * ma[2] = 0x43
+ * ma[1] = 0x07
+ * ma[0] = 0x00
+ *
+ * The PM3393 requires byte swapping and reverse order entry
+ * when programming MAC addresses:
+ *
+ * low_bits[15:0] = ma[1]:ma[0]
+ * mid_bits[31:16] = ma[3]:ma[2]
+ * high_bits[47:32] = ma[5]:ma[4]
+ */
+
+ /* Store local copy */
+ memcpy(cmac->instance->mac_addr, ma, 6);
+
+ lo = ((u32) ma[1] << 8) | (u32) ma[0];
+ mid = ((u32) ma[3] << 8) | (u32) ma[2];
+ hi = ((u32) ma[5] << 8) | (u32) ma[4];
+
+ /* Disable Rx/Tx MAC before configuring it. */
+ if (enabled)
+ pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+
+ /* Set RXXG Station Address */
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_15_0, lo);
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_31_16, mid);
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_47_32, hi);
+
+ /* Set TXXG Station Address */
+ pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_15_0, lo);
+ pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_31_16, mid);
+ pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_47_32, hi);
+
+ /* Setup Exact Match Filter 1 with our MAC address
+ *
+ * Must disable exact match filter before configuring it.
+ */
+ pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, &val);
+ val &= 0xff0f;
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
+
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW, lo);
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID, mid);
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH, hi);
+
+ val |= 0x0090;
+ pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
+
+ if (enabled)
+ pm3393_enable(cmac, enabled);
+ return 0;
+}
+
+static void pm3393_destroy(struct cmac *cmac)
+{
+ kfree(cmac);
+}
+
+static struct cmac_ops pm3393_ops = {
+ .destroy = pm3393_destroy,
+ .reset = pm3393_reset,
+ .interrupt_enable = pm3393_interrupt_enable,
+ .interrupt_disable = pm3393_interrupt_disable,
+ .interrupt_clear = pm3393_interrupt_clear,
+ .interrupt_handler = pm3393_interrupt_handler,
+ .enable = pm3393_enable_port,
+ .disable = pm3393_disable,
+ .loopback_enable = pm3393_loopback_enable,
+ .loopback_disable = pm3393_loopback_disable,
+ .set_mtu = pm3393_set_mtu,
+ .set_rx_mode = pm3393_set_rx_mode,
+ .get_speed_duplex_fc = pm3393_get_speed_duplex_fc,
+ .set_speed_duplex_fc = pm3393_set_speed_duplex_fc,
+ .statistics_update = pm3393_update_statistics,
+ .macaddress_get = pm3393_macaddress_get,
+ .macaddress_set = pm3393_macaddress_set
+};
+
+static struct cmac *pm3393_mac_create(adapter_t *adapter, int index)
+{
+ struct cmac *cmac;
+
+ cmac = kzalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL);
+ if (!cmac)
+ return NULL;
+
+ cmac->ops = &pm3393_ops;
+ cmac->instance = (cmac_instance *) (cmac + 1);
+ cmac->adapter = adapter;
+ cmac->instance->fc = PAUSE_TX | PAUSE_RX;
+
+ t1_tpi_write(adapter, OFFSET(0x0001), 0x00008000);
+ t1_tpi_write(adapter, OFFSET(0x0001), 0x00000000);
+ t1_tpi_write(adapter, OFFSET(0x2308), 0x00009800);
+ t1_tpi_write(adapter, OFFSET(0x2305), 0x00001001); /* PL4IO Enable */
+ t1_tpi_write(adapter, OFFSET(0x2320), 0x00008800);
+ t1_tpi_write(adapter, OFFSET(0x2321), 0x00008800);
+ t1_tpi_write(adapter, OFFSET(0x2322), 0x00008800);
+ t1_tpi_write(adapter, OFFSET(0x2323), 0x00008800);
+ t1_tpi_write(adapter, OFFSET(0x2324), 0x00008800);
+ t1_tpi_write(adapter, OFFSET(0x2325), 0x00008800);
+ t1_tpi_write(adapter, OFFSET(0x2326), 0x00008800);
+ t1_tpi_write(adapter, OFFSET(0x2327), 0x00008800);
+ t1_tpi_write(adapter, OFFSET(0x2328), 0x00008800);
+ t1_tpi_write(adapter, OFFSET(0x2329), 0x00008800);
+ t1_tpi_write(adapter, OFFSET(0x232a), 0x00008800);
+ t1_tpi_write(adapter, OFFSET(0x232b), 0x00008800);
+ t1_tpi_write(adapter, OFFSET(0x232c), 0x00008800);
+ t1_tpi_write(adapter, OFFSET(0x232d), 0x00008800);
+ t1_tpi_write(adapter, OFFSET(0x232e), 0x00008800);
+ t1_tpi_write(adapter, OFFSET(0x232f), 0x00008800);
+ t1_tpi_write(adapter, OFFSET(0x230d), 0x00009c00);
+ t1_tpi_write(adapter, OFFSET(0x2304), 0x00000202); /* PL4IO Calendar Repetitions */
+
+ t1_tpi_write(adapter, OFFSET(0x3200), 0x00008080); /* EFLX Enable */
+ t1_tpi_write(adapter, OFFSET(0x3210), 0x00000000); /* EFLX Channel Deprovision */
+ t1_tpi_write(adapter, OFFSET(0x3203), 0x00000000); /* EFLX Low Limit */
+ t1_tpi_write(adapter, OFFSET(0x3204), 0x00000040); /* EFLX High Limit */
+ t1_tpi_write(adapter, OFFSET(0x3205), 0x000002cc); /* EFLX Almost Full */
+ t1_tpi_write(adapter, OFFSET(0x3206), 0x00000199); /* EFLX Almost Empty */
+ t1_tpi_write(adapter, OFFSET(0x3207), 0x00000240); /* EFLX Cut Through Threshold */
+ t1_tpi_write(adapter, OFFSET(0x3202), 0x00000000); /* EFLX Indirect Register Update */
+ t1_tpi_write(adapter, OFFSET(0x3210), 0x00000001); /* EFLX Channel Provision */
+ t1_tpi_write(adapter, OFFSET(0x3208), 0x0000ffff); /* EFLX Undocumented */
+ t1_tpi_write(adapter, OFFSET(0x320a), 0x0000ffff); /* EFLX Undocumented */
+ t1_tpi_write(adapter, OFFSET(0x320c), 0x0000ffff); /* EFLX enable overflow interrupt The other bit are undocumented */
+ t1_tpi_write(adapter, OFFSET(0x320e), 0x0000ffff); /* EFLX Undocumented */
+
+ t1_tpi_write(adapter, OFFSET(0x2200), 0x0000c000); /* IFLX Configuration - enable */
+ t1_tpi_write(adapter, OFFSET(0x2201), 0x00000000); /* IFLX Channel Deprovision */
+ t1_tpi_write(adapter, OFFSET(0x220e), 0x00000000); /* IFLX Low Limit */
+ t1_tpi_write(adapter, OFFSET(0x220f), 0x00000100); /* IFLX High Limit */
+ t1_tpi_write(adapter, OFFSET(0x2210), 0x00000c00); /* IFLX Almost Full Limit */
+ t1_tpi_write(adapter, OFFSET(0x2211), 0x00000599); /* IFLX Almost Empty Limit */
+ t1_tpi_write(adapter, OFFSET(0x220d), 0x00000000); /* IFLX Indirect Register Update */
+ t1_tpi_write(adapter, OFFSET(0x2201), 0x00000001); /* IFLX Channel Provision */
+ t1_tpi_write(adapter, OFFSET(0x2203), 0x0000ffff); /* IFLX Undocumented */
+ t1_tpi_write(adapter, OFFSET(0x2205), 0x0000ffff); /* IFLX Undocumented */
+ t1_tpi_write(adapter, OFFSET(0x2209), 0x0000ffff); /* IFLX Enable overflow interrupt. The other bit are undocumented */
+
+ t1_tpi_write(adapter, OFFSET(0x2241), 0xfffffffe); /* PL4MOS Undocumented */
+ t1_tpi_write(adapter, OFFSET(0x2242), 0x0000ffff); /* PL4MOS Undocumented */
+ t1_tpi_write(adapter, OFFSET(0x2243), 0x00000008); /* PL4MOS Starving Burst Size */
+ t1_tpi_write(adapter, OFFSET(0x2244), 0x00000008); /* PL4MOS Hungry Burst Size */
+ t1_tpi_write(adapter, OFFSET(0x2245), 0x00000008); /* PL4MOS Transfer Size */
+ t1_tpi_write(adapter, OFFSET(0x2240), 0x00000005); /* PL4MOS Disable */
+
+ t1_tpi_write(adapter, OFFSET(0x2280), 0x00002103); /* PL4ODP Training Repeat and SOP rule */
+ t1_tpi_write(adapter, OFFSET(0x2284), 0x00000000); /* PL4ODP MAX_T setting */
+
+ t1_tpi_write(adapter, OFFSET(0x3280), 0x00000087); /* PL4IDU Enable data forward, port state machine. Set ALLOW_NON_ZERO_OLB */
+ t1_tpi_write(adapter, OFFSET(0x3282), 0x0000001f); /* PL4IDU Enable Dip4 check error interrupts */
+
+ t1_tpi_write(adapter, OFFSET(0x3040), 0x0c32); /* # TXXG Config */
+ /* For T1 use timer based Mac flow control. */
+ t1_tpi_write(adapter, OFFSET(0x304d), 0x8000);
+ t1_tpi_write(adapter, OFFSET(0x2040), 0x059c); /* # RXXG Config */
+ t1_tpi_write(adapter, OFFSET(0x2049), 0x0001); /* # RXXG Cut Through */
+ t1_tpi_write(adapter, OFFSET(0x2070), 0x0000); /* # Disable promiscuous mode */
+
+ /* Setup Exact Match Filter 0 to allow broadcast packets.
+ */
+ t1_tpi_write(adapter, OFFSET(0x206e), 0x0000); /* # Disable Match Enable bit */
+ t1_tpi_write(adapter, OFFSET(0x204a), 0xffff); /* # low addr */
+ t1_tpi_write(adapter, OFFSET(0x204b), 0xffff); /* # mid addr */
+ t1_tpi_write(adapter, OFFSET(0x204c), 0xffff); /* # high addr */
+ t1_tpi_write(adapter, OFFSET(0x206e), 0x0009); /* # Enable Match Enable bit */
+
+ t1_tpi_write(adapter, OFFSET(0x0003), 0x0000); /* # NO SOP/ PAD_EN setup */
+ t1_tpi_write(adapter, OFFSET(0x0100), 0x0ff0); /* # RXEQB disabled */
+ t1_tpi_write(adapter, OFFSET(0x0101), 0x0f0f); /* # No Preemphasis */
+
+ return cmac;
+}
+
+static int pm3393_mac_reset(adapter_t * adapter)
+{
+ u32 val;
+ u32 x;
+ u32 is_pl4_reset_finished;
+ u32 is_pl4_outof_lock;
+ u32 is_xaui_mabc_pll_locked;
+ u32 successful_reset;
+ int i;
+
+ /* The following steps are required to properly reset
+ * the PM3393. This information is provided in the
+ * PM3393 datasheet (Issue 2: November 2002)
+ * section 13.1 -- Device Reset.
+ *
+ * The PM3393 has three types of components that are
+ * individually reset:
+ *
+ * DRESETB - Digital circuitry
+ * PL4_ARESETB - PL4 analog circuitry
+ * XAUI_ARESETB - XAUI bus analog circuitry
+ *
+ * Steps to reset PM3393 using RSTB pin:
+ *
+ * 1. Assert RSTB pin low ( write 0 )
+ * 2. Wait at least 1ms to initiate a complete initialization of device.
+ * 3. Wait until all external clocks and REFSEL are stable.
+ * 4. Wait minimum of 1ms. (after external clocks and REFEL are stable)
+ * 5. De-assert RSTB ( write 1 )
+ * 6. Wait until internal timers to expires after ~14ms.
+ * - Allows analog clock synthesizer(PL4CSU) to stabilize to
+ * selected reference frequency before allowing the digital
+ * portion of the device to operate.
+ * 7. Wait at least 200us for XAUI interface to stabilize.
+ * 8. Verify the PM3393 came out of reset successfully.
+ * Set successful reset flag if everything worked else try again
+ * a few more times.
+ */
+
+ successful_reset = 0;
+ for (i = 0; i < 3 && !successful_reset; i++) {
+ /* 1 */
+ t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val &= ~1;
+ t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+ /* 2 */
+ msleep(1);
+
+ /* 3 */
+ msleep(1);
+
+ /* 4 */
+ msleep(2 /*1 extra ms for safety */ );
+
+ /* 5 */
+ val |= 1;
+ t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+ /* 6 */
+ msleep(15 /*1 extra ms for safety */ );
+
+ /* 7 */
+ msleep(1);
+
+ /* 8 */
+
+ /* Has PL4 analog block come out of reset correctly? */
+ t1_tpi_read(adapter, OFFSET(SUNI1x10GEXP_REG_DEVICE_STATUS), &val);
+ is_pl4_reset_finished = (val & SUNI1x10GEXP_BITMSK_TOP_EXPIRED);
+
+ /* TBD XXX SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL gets locked later in the init sequence
+ * figure out why? */
+
+ /* Have all PL4 block clocks locked? */
+ x = (SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL
+ /*| SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL */ |
+ SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL |
+ SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL |
+ SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL);
+ is_pl4_outof_lock = (val & x);
+
+ /* ??? If this fails, might be able to software reset the XAUI part
+ * and try to recover... thus saving us from doing another HW reset */
+ /* Has the XAUI MABC PLL circuitry stablized? */
+ is_xaui_mabc_pll_locked =
+ (val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED);
+
+ successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock
+ && is_xaui_mabc_pll_locked);
+
+ if (netif_msg_hw(adapter))
+ dev_dbg(&adapter->pdev->dev,
+ "PM3393 HW reset %d: pl4_reset 0x%x, val 0x%x, "
+ "is_pl4_outof_lock 0x%x, xaui_locked 0x%x\n",
+ i, is_pl4_reset_finished, val,
+ is_pl4_outof_lock, is_xaui_mabc_pll_locked);
+ }
+ return successful_reset ? 0 : 1;
+}
+
+const struct gmac t1_pm3393_ops = {
+ .stats_update_period = STATS_TICK_SECS,
+ .create = pm3393_mac_create,
+ .reset = pm3393_mac_reset,
+};
diff --git a/drivers/net/ethernet/chelsio/cxgb/regs.h b/drivers/net/ethernet/chelsio/cxgb/regs.h
new file mode 100644
index 000000000000..c80bf4d6d0a6
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/regs.h
@@ -0,0 +1,2168 @@
+/*****************************************************************************
+ * *
+ * File: regs.h *
+ * $Revision: 1.8 $ *
+ * $Date: 2005/06/21 18:29:48 $ *
+ * Description: *
+ * part of the Chelsio 10Gb Ethernet Driver. *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License, version 2, as *
+ * published by the Free Software Foundation. *
+ * *
+ * You should have received a copy of the GNU General Public License along *
+ * with this program; if not, write to the Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
+ * *
+ * http://www.chelsio.com *
+ * *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
+ * All rights reserved. *
+ * *
+ * Maintainers: maintainers@chelsio.com *
+ * *
+ * Authors: Dimitrios Michailidis <dm@chelsio.com> *
+ * Tina Yang <tainay@chelsio.com> *
+ * Felix Marti <felix@chelsio.com> *
+ * Scott Bardone <sbardone@chelsio.com> *
+ * Kurt Ottaway <kottaway@chelsio.com> *
+ * Frank DiMambro <frank@chelsio.com> *
+ * *
+ * History: *
+ * *
+ ****************************************************************************/
+
+#ifndef _CXGB_REGS_H_
+#define _CXGB_REGS_H_
+
+/* SGE registers */
+#define A_SG_CONTROL 0x0
+
+#define S_CMDQ0_ENABLE 0
+#define V_CMDQ0_ENABLE(x) ((x) << S_CMDQ0_ENABLE)
+#define F_CMDQ0_ENABLE V_CMDQ0_ENABLE(1U)
+
+#define S_CMDQ1_ENABLE 1
+#define V_CMDQ1_ENABLE(x) ((x) << S_CMDQ1_ENABLE)
+#define F_CMDQ1_ENABLE V_CMDQ1_ENABLE(1U)
+
+#define S_FL0_ENABLE 2
+#define V_FL0_ENABLE(x) ((x) << S_FL0_ENABLE)
+#define F_FL0_ENABLE V_FL0_ENABLE(1U)
+
+#define S_FL1_ENABLE 3
+#define V_FL1_ENABLE(x) ((x) << S_FL1_ENABLE)
+#define F_FL1_ENABLE V_FL1_ENABLE(1U)
+
+#define S_CPL_ENABLE 4
+#define V_CPL_ENABLE(x) ((x) << S_CPL_ENABLE)
+#define F_CPL_ENABLE V_CPL_ENABLE(1U)
+
+#define S_RESPONSE_QUEUE_ENABLE 5
+#define V_RESPONSE_QUEUE_ENABLE(x) ((x) << S_RESPONSE_QUEUE_ENABLE)
+#define F_RESPONSE_QUEUE_ENABLE V_RESPONSE_QUEUE_ENABLE(1U)
+
+#define S_CMDQ_PRIORITY 6
+#define M_CMDQ_PRIORITY 0x3
+#define V_CMDQ_PRIORITY(x) ((x) << S_CMDQ_PRIORITY)
+#define G_CMDQ_PRIORITY(x) (((x) >> S_CMDQ_PRIORITY) & M_CMDQ_PRIORITY)
+
+#define S_DISABLE_CMDQ0_GTS 8
+#define V_DISABLE_CMDQ0_GTS(x) ((x) << S_DISABLE_CMDQ0_GTS)
+#define F_DISABLE_CMDQ0_GTS V_DISABLE_CMDQ0_GTS(1U)
+
+#define S_DISABLE_CMDQ1_GTS 9
+#define V_DISABLE_CMDQ1_GTS(x) ((x) << S_DISABLE_CMDQ1_GTS)
+#define F_DISABLE_CMDQ1_GTS V_DISABLE_CMDQ1_GTS(1U)
+
+#define S_DISABLE_FL0_GTS 10
+#define V_DISABLE_FL0_GTS(x) ((x) << S_DISABLE_FL0_GTS)
+#define F_DISABLE_FL0_GTS V_DISABLE_FL0_GTS(1U)
+
+#define S_DISABLE_FL1_GTS 11
+#define V_DISABLE_FL1_GTS(x) ((x) << S_DISABLE_FL1_GTS)
+#define F_DISABLE_FL1_GTS V_DISABLE_FL1_GTS(1U)
+
+#define S_ENABLE_BIG_ENDIAN 12
+#define V_ENABLE_BIG_ENDIAN(x) ((x) << S_ENABLE_BIG_ENDIAN)
+#define F_ENABLE_BIG_ENDIAN V_ENABLE_BIG_ENDIAN(1U)
+
+#define S_FL_SELECTION_CRITERIA 13
+#define V_FL_SELECTION_CRITERIA(x) ((x) << S_FL_SELECTION_CRITERIA)
+#define F_FL_SELECTION_CRITERIA V_FL_SELECTION_CRITERIA(1U)
+
+#define S_ISCSI_COALESCE 14
+#define V_ISCSI_COALESCE(x) ((x) << S_ISCSI_COALESCE)
+#define F_ISCSI_COALESCE V_ISCSI_COALESCE(1U)
+
+#define S_RX_PKT_OFFSET 15
+#define M_RX_PKT_OFFSET 0x7
+#define V_RX_PKT_OFFSET(x) ((x) << S_RX_PKT_OFFSET)
+#define G_RX_PKT_OFFSET(x) (((x) >> S_RX_PKT_OFFSET) & M_RX_PKT_OFFSET)
+
+#define S_VLAN_XTRACT 18
+#define V_VLAN_XTRACT(x) ((x) << S_VLAN_XTRACT)
+#define F_VLAN_XTRACT V_VLAN_XTRACT(1U)
+
+#define A_SG_DOORBELL 0x4
+#define A_SG_CMD0BASELWR 0x8
+#define A_SG_CMD0BASEUPR 0xc
+#define A_SG_CMD1BASELWR 0x10
+#define A_SG_CMD1BASEUPR 0x14
+#define A_SG_FL0BASELWR 0x18
+#define A_SG_FL0BASEUPR 0x1c
+#define A_SG_FL1BASELWR 0x20
+#define A_SG_FL1BASEUPR 0x24
+#define A_SG_CMD0SIZE 0x28
+
+#define S_CMDQ0_SIZE 0
+#define M_CMDQ0_SIZE 0x1ffff
+#define V_CMDQ0_SIZE(x) ((x) << S_CMDQ0_SIZE)
+#define G_CMDQ0_SIZE(x) (((x) >> S_CMDQ0_SIZE) & M_CMDQ0_SIZE)
+
+#define A_SG_FL0SIZE 0x2c
+
+#define S_FL0_SIZE 0
+#define M_FL0_SIZE 0x1ffff
+#define V_FL0_SIZE(x) ((x) << S_FL0_SIZE)
+#define G_FL0_SIZE(x) (((x) >> S_FL0_SIZE) & M_FL0_SIZE)
+
+#define A_SG_RSPSIZE 0x30
+
+#define S_RESPQ_SIZE 0
+#define M_RESPQ_SIZE 0x1ffff
+#define V_RESPQ_SIZE(x) ((x) << S_RESPQ_SIZE)
+#define G_RESPQ_SIZE(x) (((x) >> S_RESPQ_SIZE) & M_RESPQ_SIZE)
+
+#define A_SG_RSPBASELWR 0x34
+#define A_SG_RSPBASEUPR 0x38
+#define A_SG_FLTHRESHOLD 0x3c
+
+#define S_FL_THRESHOLD 0
+#define M_FL_THRESHOLD 0xffff
+#define V_FL_THRESHOLD(x) ((x) << S_FL_THRESHOLD)
+#define G_FL_THRESHOLD(x) (((x) >> S_FL_THRESHOLD) & M_FL_THRESHOLD)
+
+#define A_SG_RSPQUEUECREDIT 0x40
+
+#define S_RESPQ_CREDIT 0
+#define M_RESPQ_CREDIT 0x1ffff
+#define V_RESPQ_CREDIT(x) ((x) << S_RESPQ_CREDIT)
+#define G_RESPQ_CREDIT(x) (((x) >> S_RESPQ_CREDIT) & M_RESPQ_CREDIT)
+
+#define A_SG_SLEEPING 0x48
+
+#define S_SLEEPING 0
+#define M_SLEEPING 0xffff
+#define V_SLEEPING(x) ((x) << S_SLEEPING)
+#define G_SLEEPING(x) (((x) >> S_SLEEPING) & M_SLEEPING)
+
+#define A_SG_INTRTIMER 0x4c
+
+#define S_INTERRUPT_TIMER_COUNT 0
+#define M_INTERRUPT_TIMER_COUNT 0xffffff
+#define V_INTERRUPT_TIMER_COUNT(x) ((x) << S_INTERRUPT_TIMER_COUNT)
+#define G_INTERRUPT_TIMER_COUNT(x) (((x) >> S_INTERRUPT_TIMER_COUNT) & M_INTERRUPT_TIMER_COUNT)
+
+#define A_SG_CMD0PTR 0x50
+
+#define S_CMDQ0_POINTER 0
+#define M_CMDQ0_POINTER 0xffff
+#define V_CMDQ0_POINTER(x) ((x) << S_CMDQ0_POINTER)
+#define G_CMDQ0_POINTER(x) (((x) >> S_CMDQ0_POINTER) & M_CMDQ0_POINTER)
+
+#define S_CURRENT_GENERATION_BIT 16
+#define V_CURRENT_GENERATION_BIT(x) ((x) << S_CURRENT_GENERATION_BIT)
+#define F_CURRENT_GENERATION_BIT V_CURRENT_GENERATION_BIT(1U)
+
+#define A_SG_CMD1PTR 0x54
+
+#define S_CMDQ1_POINTER 0
+#define M_CMDQ1_POINTER 0xffff
+#define V_CMDQ1_POINTER(x) ((x) << S_CMDQ1_POINTER)
+#define G_CMDQ1_POINTER(x) (((x) >> S_CMDQ1_POINTER) & M_CMDQ1_POINTER)
+
+#define A_SG_FL0PTR 0x58
+
+#define S_FL0_POINTER 0
+#define M_FL0_POINTER 0xffff
+#define V_FL0_POINTER(x) ((x) << S_FL0_POINTER)
+#define G_FL0_POINTER(x) (((x) >> S_FL0_POINTER) & M_FL0_POINTER)
+
+#define A_SG_FL1PTR 0x5c
+
+#define S_FL1_POINTER 0
+#define M_FL1_POINTER 0xffff
+#define V_FL1_POINTER(x) ((x) << S_FL1_POINTER)
+#define G_FL1_POINTER(x) (((x) >> S_FL1_POINTER) & M_FL1_POINTER)
+
+#define A_SG_VERSION 0x6c
+
+#define S_DAY 0
+#define M_DAY 0x1f
+#define V_DAY(x) ((x) << S_DAY)
+#define G_DAY(x) (((x) >> S_DAY) & M_DAY)
+
+#define S_MONTH 5
+#define M_MONTH 0xf
+#define V_MONTH(x) ((x) << S_MONTH)
+#define G_MONTH(x) (((x) >> S_MONTH) & M_MONTH)
+
+#define A_SG_CMD1SIZE 0xb0
+
+#define S_CMDQ1_SIZE 0
+#define M_CMDQ1_SIZE 0x1ffff
+#define V_CMDQ1_SIZE(x) ((x) << S_CMDQ1_SIZE)
+#define G_CMDQ1_SIZE(x) (((x) >> S_CMDQ1_SIZE) & M_CMDQ1_SIZE)
+
+#define A_SG_FL1SIZE 0xb4
+
+#define S_FL1_SIZE 0
+#define M_FL1_SIZE 0x1ffff
+#define V_FL1_SIZE(x) ((x) << S_FL1_SIZE)
+#define G_FL1_SIZE(x) (((x) >> S_FL1_SIZE) & M_FL1_SIZE)
+
+#define A_SG_INT_ENABLE 0xb8
+
+#define S_RESPQ_EXHAUSTED 0
+#define V_RESPQ_EXHAUSTED(x) ((x) << S_RESPQ_EXHAUSTED)
+#define F_RESPQ_EXHAUSTED V_RESPQ_EXHAUSTED(1U)
+
+#define S_RESPQ_OVERFLOW 1
+#define V_RESPQ_OVERFLOW(x) ((x) << S_RESPQ_OVERFLOW)
+#define F_RESPQ_OVERFLOW V_RESPQ_OVERFLOW(1U)
+
+#define S_FL_EXHAUSTED 2
+#define V_FL_EXHAUSTED(x) ((x) << S_FL_EXHAUSTED)
+#define F_FL_EXHAUSTED V_FL_EXHAUSTED(1U)
+
+#define S_PACKET_TOO_BIG 3
+#define V_PACKET_TOO_BIG(x) ((x) << S_PACKET_TOO_BIG)
+#define F_PACKET_TOO_BIG V_PACKET_TOO_BIG(1U)
+
+#define S_PACKET_MISMATCH 4
+#define V_PACKET_MISMATCH(x) ((x) << S_PACKET_MISMATCH)
+#define F_PACKET_MISMATCH V_PACKET_MISMATCH(1U)
+
+#define A_SG_INT_CAUSE 0xbc
+#define A_SG_RESPACCUTIMER 0xc0
+
+/* MC3 registers */
+#define A_MC3_CFG 0x100
+
+#define S_CLK_ENABLE 0
+#define V_CLK_ENABLE(x) ((x) << S_CLK_ENABLE)
+#define F_CLK_ENABLE V_CLK_ENABLE(1U)
+
+#define S_READY 1
+#define V_READY(x) ((x) << S_READY)
+#define F_READY V_READY(1U)
+
+#define S_READ_TO_WRITE_DELAY 2
+#define M_READ_TO_WRITE_DELAY 0x7
+#define V_READ_TO_WRITE_DELAY(x) ((x) << S_READ_TO_WRITE_DELAY)
+#define G_READ_TO_WRITE_DELAY(x) (((x) >> S_READ_TO_WRITE_DELAY) & M_READ_TO_WRITE_DELAY)
+
+#define S_WRITE_TO_READ_DELAY 5
+#define M_WRITE_TO_READ_DELAY 0x7
+#define V_WRITE_TO_READ_DELAY(x) ((x) << S_WRITE_TO_READ_DELAY)
+#define G_WRITE_TO_READ_DELAY(x) (((x) >> S_WRITE_TO_READ_DELAY) & M_WRITE_TO_READ_DELAY)
+
+#define S_MC3_BANK_CYCLE 8
+#define M_MC3_BANK_CYCLE 0xf
+#define V_MC3_BANK_CYCLE(x) ((x) << S_MC3_BANK_CYCLE)
+#define G_MC3_BANK_CYCLE(x) (((x) >> S_MC3_BANK_CYCLE) & M_MC3_BANK_CYCLE)
+
+#define S_REFRESH_CYCLE 12
+#define M_REFRESH_CYCLE 0xf
+#define V_REFRESH_CYCLE(x) ((x) << S_REFRESH_CYCLE)
+#define G_REFRESH_CYCLE(x) (((x) >> S_REFRESH_CYCLE) & M_REFRESH_CYCLE)
+
+#define S_PRECHARGE_CYCLE 16
+#define M_PRECHARGE_CYCLE 0x3
+#define V_PRECHARGE_CYCLE(x) ((x) << S_PRECHARGE_CYCLE)
+#define G_PRECHARGE_CYCLE(x) (((x) >> S_PRECHARGE_CYCLE) & M_PRECHARGE_CYCLE)
+
+#define S_ACTIVE_TO_READ_WRITE_DELAY 18
+#define V_ACTIVE_TO_READ_WRITE_DELAY(x) ((x) << S_ACTIVE_TO_READ_WRITE_DELAY)
+#define F_ACTIVE_TO_READ_WRITE_DELAY V_ACTIVE_TO_READ_WRITE_DELAY(1U)
+
+#define S_ACTIVE_TO_PRECHARGE_DELAY 19
+#define M_ACTIVE_TO_PRECHARGE_DELAY 0x7
+#define V_ACTIVE_TO_PRECHARGE_DELAY(x) ((x) << S_ACTIVE_TO_PRECHARGE_DELAY)
+#define G_ACTIVE_TO_PRECHARGE_DELAY(x) (((x) >> S_ACTIVE_TO_PRECHARGE_DELAY) & M_ACTIVE_TO_PRECHARGE_DELAY)
+
+#define S_WRITE_RECOVERY_DELAY 22
+#define M_WRITE_RECOVERY_DELAY 0x3
+#define V_WRITE_RECOVERY_DELAY(x) ((x) << S_WRITE_RECOVERY_DELAY)
+#define G_WRITE_RECOVERY_DELAY(x) (((x) >> S_WRITE_RECOVERY_DELAY) & M_WRITE_RECOVERY_DELAY)
+
+#define S_DENSITY 24
+#define M_DENSITY 0x3
+#define V_DENSITY(x) ((x) << S_DENSITY)
+#define G_DENSITY(x) (((x) >> S_DENSITY) & M_DENSITY)
+
+#define S_ORGANIZATION 26
+#define V_ORGANIZATION(x) ((x) << S_ORGANIZATION)
+#define F_ORGANIZATION V_ORGANIZATION(1U)
+
+#define S_BANKS 27
+#define V_BANKS(x) ((x) << S_BANKS)
+#define F_BANKS V_BANKS(1U)
+
+#define S_UNREGISTERED 28
+#define V_UNREGISTERED(x) ((x) << S_UNREGISTERED)
+#define F_UNREGISTERED V_UNREGISTERED(1U)
+
+#define S_MC3_WIDTH 29
+#define M_MC3_WIDTH 0x3
+#define V_MC3_WIDTH(x) ((x) << S_MC3_WIDTH)
+#define G_MC3_WIDTH(x) (((x) >> S_MC3_WIDTH) & M_MC3_WIDTH)
+
+#define S_MC3_SLOW 31
+#define V_MC3_SLOW(x) ((x) << S_MC3_SLOW)
+#define F_MC3_SLOW V_MC3_SLOW(1U)
+
+#define A_MC3_MODE 0x104
+
+#define S_MC3_MODE 0
+#define M_MC3_MODE 0x3fff
+#define V_MC3_MODE(x) ((x) << S_MC3_MODE)
+#define G_MC3_MODE(x) (((x) >> S_MC3_MODE) & M_MC3_MODE)
+
+#define S_BUSY 31
+#define V_BUSY(x) ((x) << S_BUSY)
+#define F_BUSY V_BUSY(1U)
+
+#define A_MC3_EXT_MODE 0x108
+
+#define S_MC3_EXTENDED_MODE 0
+#define M_MC3_EXTENDED_MODE 0x3fff
+#define V_MC3_EXTENDED_MODE(x) ((x) << S_MC3_EXTENDED_MODE)
+#define G_MC3_EXTENDED_MODE(x) (((x) >> S_MC3_EXTENDED_MODE) & M_MC3_EXTENDED_MODE)
+
+#define A_MC3_PRECHARG 0x10c
+#define A_MC3_REFRESH 0x110
+
+#define S_REFRESH_ENABLE 0
+#define V_REFRESH_ENABLE(x) ((x) << S_REFRESH_ENABLE)
+#define F_REFRESH_ENABLE V_REFRESH_ENABLE(1U)
+
+#define S_REFRESH_DIVISOR 1
+#define M_REFRESH_DIVISOR 0x3fff
+#define V_REFRESH_DIVISOR(x) ((x) << S_REFRESH_DIVISOR)
+#define G_REFRESH_DIVISOR(x) (((x) >> S_REFRESH_DIVISOR) & M_REFRESH_DIVISOR)
+
+#define A_MC3_STROBE 0x114
+
+#define S_MASTER_DLL_RESET 0
+#define V_MASTER_DLL_RESET(x) ((x) << S_MASTER_DLL_RESET)
+#define F_MASTER_DLL_RESET V_MASTER_DLL_RESET(1U)
+
+#define S_MASTER_DLL_TAP_COUNT 1
+#define M_MASTER_DLL_TAP_COUNT 0xff
+#define V_MASTER_DLL_TAP_COUNT(x) ((x) << S_MASTER_DLL_TAP_COUNT)
+#define G_MASTER_DLL_TAP_COUNT(x) (((x) >> S_MASTER_DLL_TAP_COUNT) & M_MASTER_DLL_TAP_COUNT)
+
+#define S_MASTER_DLL_LOCKED 9
+#define V_MASTER_DLL_LOCKED(x) ((x) << S_MASTER_DLL_LOCKED)
+#define F_MASTER_DLL_LOCKED V_MASTER_DLL_LOCKED(1U)
+
+#define S_MASTER_DLL_MAX_TAP_COUNT 10
+#define V_MASTER_DLL_MAX_TAP_COUNT(x) ((x) << S_MASTER_DLL_MAX_TAP_COUNT)
+#define F_MASTER_DLL_MAX_TAP_COUNT V_MASTER_DLL_MAX_TAP_COUNT(1U)
+
+#define S_MASTER_DLL_TAP_COUNT_OFFSET 11
+#define M_MASTER_DLL_TAP_COUNT_OFFSET 0x3f
+#define V_MASTER_DLL_TAP_COUNT_OFFSET(x) ((x) << S_MASTER_DLL_TAP_COUNT_OFFSET)
+#define G_MASTER_DLL_TAP_COUNT_OFFSET(x) (((x) >> S_MASTER_DLL_TAP_COUNT_OFFSET) & M_MASTER_DLL_TAP_COUNT_OFFSET)
+
+#define S_SLAVE_DLL_RESET 11
+#define V_SLAVE_DLL_RESET(x) ((x) << S_SLAVE_DLL_RESET)
+#define F_SLAVE_DLL_RESET V_SLAVE_DLL_RESET(1U)
+
+#define S_SLAVE_DLL_DELTA 12
+#define M_SLAVE_DLL_DELTA 0xf
+#define V_SLAVE_DLL_DELTA(x) ((x) << S_SLAVE_DLL_DELTA)
+#define G_SLAVE_DLL_DELTA(x) (((x) >> S_SLAVE_DLL_DELTA) & M_SLAVE_DLL_DELTA)
+
+#define S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT 17
+#define M_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT 0x3f
+#define V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT(x) ((x) << S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT)
+#define G_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT(x) (((x) >> S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT) & M_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT)
+
+#define S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE 23
+#define V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE(x) ((x) << S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE)
+#define F_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE(1U)
+
+#define S_SLAVE_DELAY_LINE_TAP_COUNT 24
+#define M_SLAVE_DELAY_LINE_TAP_COUNT 0x3f
+#define V_SLAVE_DELAY_LINE_TAP_COUNT(x) ((x) << S_SLAVE_DELAY_LINE_TAP_COUNT)
+#define G_SLAVE_DELAY_LINE_TAP_COUNT(x) (((x) >> S_SLAVE_DELAY_LINE_TAP_COUNT) & M_SLAVE_DELAY_LINE_TAP_COUNT)
+
+#define A_MC3_ECC_CNTL 0x118
+
+#define S_ECC_GENERATION_ENABLE 0
+#define V_ECC_GENERATION_ENABLE(x) ((x) << S_ECC_GENERATION_ENABLE)
+#define F_ECC_GENERATION_ENABLE V_ECC_GENERATION_ENABLE(1U)
+
+#define S_ECC_CHECK_ENABLE 1
+#define V_ECC_CHECK_ENABLE(x) ((x) << S_ECC_CHECK_ENABLE)
+#define F_ECC_CHECK_ENABLE V_ECC_CHECK_ENABLE(1U)
+
+#define S_CORRECTABLE_ERROR_COUNT 2
+#define M_CORRECTABLE_ERROR_COUNT 0xff
+#define V_CORRECTABLE_ERROR_COUNT(x) ((x) << S_CORRECTABLE_ERROR_COUNT)
+#define G_CORRECTABLE_ERROR_COUNT(x) (((x) >> S_CORRECTABLE_ERROR_COUNT) & M_CORRECTABLE_ERROR_COUNT)
+
+#define S_UNCORRECTABLE_ERROR_COUNT 10
+#define M_UNCORRECTABLE_ERROR_COUNT 0xff
+#define V_UNCORRECTABLE_ERROR_COUNT(x) ((x) << S_UNCORRECTABLE_ERROR_COUNT)
+#define G_UNCORRECTABLE_ERROR_COUNT(x) (((x) >> S_UNCORRECTABLE_ERROR_COUNT) & M_UNCORRECTABLE_ERROR_COUNT)
+
+#define A_MC3_CE_ADDR 0x11c
+
+#define S_MC3_CE_ADDR 4
+#define M_MC3_CE_ADDR 0xfffffff
+#define V_MC3_CE_ADDR(x) ((x) << S_MC3_CE_ADDR)
+#define G_MC3_CE_ADDR(x) (((x) >> S_MC3_CE_ADDR) & M_MC3_CE_ADDR)
+
+#define A_MC3_CE_DATA0 0x120
+#define A_MC3_CE_DATA1 0x124
+#define A_MC3_CE_DATA2 0x128
+#define A_MC3_CE_DATA3 0x12c
+#define A_MC3_CE_DATA4 0x130
+#define A_MC3_UE_ADDR 0x134
+
+#define S_MC3_UE_ADDR 4
+#define M_MC3_UE_ADDR 0xfffffff
+#define V_MC3_UE_ADDR(x) ((x) << S_MC3_UE_ADDR)
+#define G_MC3_UE_ADDR(x) (((x) >> S_MC3_UE_ADDR) & M_MC3_UE_ADDR)
+
+#define A_MC3_UE_DATA0 0x138
+#define A_MC3_UE_DATA1 0x13c
+#define A_MC3_UE_DATA2 0x140
+#define A_MC3_UE_DATA3 0x144
+#define A_MC3_UE_DATA4 0x148
+#define A_MC3_BD_ADDR 0x14c
+#define A_MC3_BD_DATA0 0x150
+#define A_MC3_BD_DATA1 0x154
+#define A_MC3_BD_DATA2 0x158
+#define A_MC3_BD_DATA3 0x15c
+#define A_MC3_BD_DATA4 0x160
+#define A_MC3_BD_OP 0x164
+
+#define S_BACK_DOOR_OPERATION 0
+#define V_BACK_DOOR_OPERATION(x) ((x) << S_BACK_DOOR_OPERATION)
+#define F_BACK_DOOR_OPERATION V_BACK_DOOR_OPERATION(1U)
+
+#define A_MC3_BIST_ADDR_BEG 0x168
+#define A_MC3_BIST_ADDR_END 0x16c
+#define A_MC3_BIST_DATA 0x170
+#define A_MC3_BIST_OP 0x174
+
+#define S_OP 0
+#define V_OP(x) ((x) << S_OP)
+#define F_OP V_OP(1U)
+
+#define S_DATA_PATTERN 1
+#define M_DATA_PATTERN 0x3
+#define V_DATA_PATTERN(x) ((x) << S_DATA_PATTERN)
+#define G_DATA_PATTERN(x) (((x) >> S_DATA_PATTERN) & M_DATA_PATTERN)
+
+#define S_CONTINUOUS 3
+#define V_CONTINUOUS(x) ((x) << S_CONTINUOUS)
+#define F_CONTINUOUS V_CONTINUOUS(1U)
+
+#define A_MC3_INT_ENABLE 0x178
+
+#define S_MC3_CORR_ERR 0
+#define V_MC3_CORR_ERR(x) ((x) << S_MC3_CORR_ERR)
+#define F_MC3_CORR_ERR V_MC3_CORR_ERR(1U)
+
+#define S_MC3_UNCORR_ERR 1
+#define V_MC3_UNCORR_ERR(x) ((x) << S_MC3_UNCORR_ERR)
+#define F_MC3_UNCORR_ERR V_MC3_UNCORR_ERR(1U)
+
+#define S_MC3_PARITY_ERR 2
+#define M_MC3_PARITY_ERR 0xff
+#define V_MC3_PARITY_ERR(x) ((x) << S_MC3_PARITY_ERR)
+#define G_MC3_PARITY_ERR(x) (((x) >> S_MC3_PARITY_ERR) & M_MC3_PARITY_ERR)
+
+#define S_MC3_ADDR_ERR 10
+#define V_MC3_ADDR_ERR(x) ((x) << S_MC3_ADDR_ERR)
+#define F_MC3_ADDR_ERR V_MC3_ADDR_ERR(1U)
+
+#define A_MC3_INT_CAUSE 0x17c
+
+/* MC4 registers */
+#define A_MC4_CFG 0x180
+
+#define S_POWER_UP 0
+#define V_POWER_UP(x) ((x) << S_POWER_UP)
+#define F_POWER_UP V_POWER_UP(1U)
+
+#define S_MC4_BANK_CYCLE 8
+#define M_MC4_BANK_CYCLE 0x7
+#define V_MC4_BANK_CYCLE(x) ((x) << S_MC4_BANK_CYCLE)
+#define G_MC4_BANK_CYCLE(x) (((x) >> S_MC4_BANK_CYCLE) & M_MC4_BANK_CYCLE)
+
+#define S_MC4_NARROW 24
+#define V_MC4_NARROW(x) ((x) << S_MC4_NARROW)
+#define F_MC4_NARROW V_MC4_NARROW(1U)
+
+#define S_MC4_SLOW 25
+#define V_MC4_SLOW(x) ((x) << S_MC4_SLOW)
+#define F_MC4_SLOW V_MC4_SLOW(1U)
+
+#define S_MC4A_WIDTH 24
+#define M_MC4A_WIDTH 0x3
+#define V_MC4A_WIDTH(x) ((x) << S_MC4A_WIDTH)
+#define G_MC4A_WIDTH(x) (((x) >> S_MC4A_WIDTH) & M_MC4A_WIDTH)
+
+#define S_MC4A_SLOW 26
+#define V_MC4A_SLOW(x) ((x) << S_MC4A_SLOW)
+#define F_MC4A_SLOW V_MC4A_SLOW(1U)
+
+#define A_MC4_MODE 0x184
+
+#define S_MC4_MODE 0
+#define M_MC4_MODE 0x7fff
+#define V_MC4_MODE(x) ((x) << S_MC4_MODE)
+#define G_MC4_MODE(x) (((x) >> S_MC4_MODE) & M_MC4_MODE)
+
+#define A_MC4_EXT_MODE 0x188
+
+#define S_MC4_EXTENDED_MODE 0
+#define M_MC4_EXTENDED_MODE 0x7fff
+#define V_MC4_EXTENDED_MODE(x) ((x) << S_MC4_EXTENDED_MODE)
+#define G_MC4_EXTENDED_MODE(x) (((x) >> S_MC4_EXTENDED_MODE) & M_MC4_EXTENDED_MODE)
+
+#define A_MC4_REFRESH 0x190
+#define A_MC4_STROBE 0x194
+#define A_MC4_ECC_CNTL 0x198
+#define A_MC4_CE_ADDR 0x19c
+
+#define S_MC4_CE_ADDR 4
+#define M_MC4_CE_ADDR 0xffffff
+#define V_MC4_CE_ADDR(x) ((x) << S_MC4_CE_ADDR)
+#define G_MC4_CE_ADDR(x) (((x) >> S_MC4_CE_ADDR) & M_MC4_CE_ADDR)
+
+#define A_MC4_CE_DATA0 0x1a0
+#define A_MC4_CE_DATA1 0x1a4
+#define A_MC4_CE_DATA2 0x1a8
+#define A_MC4_CE_DATA3 0x1ac
+#define A_MC4_CE_DATA4 0x1b0
+#define A_MC4_UE_ADDR 0x1b4
+
+#define S_MC4_UE_ADDR 4
+#define M_MC4_UE_ADDR 0xffffff
+#define V_MC4_UE_ADDR(x) ((x) << S_MC4_UE_ADDR)
+#define G_MC4_UE_ADDR(x) (((x) >> S_MC4_UE_ADDR) & M_MC4_UE_ADDR)
+
+#define A_MC4_UE_DATA0 0x1b8
+#define A_MC4_UE_DATA1 0x1bc
+#define A_MC4_UE_DATA2 0x1c0
+#define A_MC4_UE_DATA3 0x1c4
+#define A_MC4_UE_DATA4 0x1c8
+#define A_MC4_BD_ADDR 0x1cc
+
+#define S_MC4_BACK_DOOR_ADDR 0
+#define M_MC4_BACK_DOOR_ADDR 0xfffffff
+#define V_MC4_BACK_DOOR_ADDR(x) ((x) << S_MC4_BACK_DOOR_ADDR)
+#define G_MC4_BACK_DOOR_ADDR(x) (((x) >> S_MC4_BACK_DOOR_ADDR) & M_MC4_BACK_DOOR_ADDR)
+
+#define A_MC4_BD_DATA0 0x1d0
+#define A_MC4_BD_DATA1 0x1d4
+#define A_MC4_BD_DATA2 0x1d8
+#define A_MC4_BD_DATA3 0x1dc
+#define A_MC4_BD_DATA4 0x1e0
+#define A_MC4_BD_OP 0x1e4
+
+#define S_OPERATION 0
+#define V_OPERATION(x) ((x) << S_OPERATION)
+#define F_OPERATION V_OPERATION(1U)
+
+#define A_MC4_BIST_ADDR_BEG 0x1e8
+#define A_MC4_BIST_ADDR_END 0x1ec
+#define A_MC4_BIST_DATA 0x1f0
+#define A_MC4_BIST_OP 0x1f4
+#define A_MC4_INT_ENABLE 0x1f8
+
+#define S_MC4_CORR_ERR 0
+#define V_MC4_CORR_ERR(x) ((x) << S_MC4_CORR_ERR)
+#define F_MC4_CORR_ERR V_MC4_CORR_ERR(1U)
+
+#define S_MC4_UNCORR_ERR 1
+#define V_MC4_UNCORR_ERR(x) ((x) << S_MC4_UNCORR_ERR)
+#define F_MC4_UNCORR_ERR V_MC4_UNCORR_ERR(1U)
+
+#define S_MC4_ADDR_ERR 2
+#define V_MC4_ADDR_ERR(x) ((x) << S_MC4_ADDR_ERR)
+#define F_MC4_ADDR_ERR V_MC4_ADDR_ERR(1U)
+
+#define A_MC4_INT_CAUSE 0x1fc
+
+/* TPI registers */
+#define A_TPI_ADDR 0x280
+
+#define S_TPI_ADDRESS 0
+#define M_TPI_ADDRESS 0xffffff
+#define V_TPI_ADDRESS(x) ((x) << S_TPI_ADDRESS)
+#define G_TPI_ADDRESS(x) (((x) >> S_TPI_ADDRESS) & M_TPI_ADDRESS)
+
+#define A_TPI_WR_DATA 0x284
+#define A_TPI_RD_DATA 0x288
+#define A_TPI_CSR 0x28c
+
+#define S_TPIWR 0
+#define V_TPIWR(x) ((x) << S_TPIWR)
+#define F_TPIWR V_TPIWR(1U)
+
+#define S_TPIRDY 1
+#define V_TPIRDY(x) ((x) << S_TPIRDY)
+#define F_TPIRDY V_TPIRDY(1U)
+
+#define S_INT_DIR 31
+#define V_INT_DIR(x) ((x) << S_INT_DIR)
+#define F_INT_DIR V_INT_DIR(1U)
+
+#define A_TPI_PAR 0x29c
+
+#define S_TPIPAR 0
+#define M_TPIPAR 0x7f
+#define V_TPIPAR(x) ((x) << S_TPIPAR)
+#define G_TPIPAR(x) (((x) >> S_TPIPAR) & M_TPIPAR)
+
+
+/* TP registers */
+#define A_TP_IN_CONFIG 0x300
+
+#define S_TP_IN_CSPI_TUNNEL 0
+#define V_TP_IN_CSPI_TUNNEL(x) ((x) << S_TP_IN_CSPI_TUNNEL)
+#define F_TP_IN_CSPI_TUNNEL V_TP_IN_CSPI_TUNNEL(1U)
+
+#define S_TP_IN_CSPI_ETHERNET 1
+#define V_TP_IN_CSPI_ETHERNET(x) ((x) << S_TP_IN_CSPI_ETHERNET)
+#define F_TP_IN_CSPI_ETHERNET V_TP_IN_CSPI_ETHERNET(1U)
+
+#define S_TP_IN_CSPI_CPL 3
+#define V_TP_IN_CSPI_CPL(x) ((x) << S_TP_IN_CSPI_CPL)
+#define F_TP_IN_CSPI_CPL V_TP_IN_CSPI_CPL(1U)
+
+#define S_TP_IN_CSPI_POS 4
+#define V_TP_IN_CSPI_POS(x) ((x) << S_TP_IN_CSPI_POS)
+#define F_TP_IN_CSPI_POS V_TP_IN_CSPI_POS(1U)
+
+#define S_TP_IN_CSPI_CHECK_IP_CSUM 5
+#define V_TP_IN_CSPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_IP_CSUM)
+#define F_TP_IN_CSPI_CHECK_IP_CSUM V_TP_IN_CSPI_CHECK_IP_CSUM(1U)
+
+#define S_TP_IN_CSPI_CHECK_TCP_CSUM 6
+#define V_TP_IN_CSPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_TCP_CSUM)
+#define F_TP_IN_CSPI_CHECK_TCP_CSUM V_TP_IN_CSPI_CHECK_TCP_CSUM(1U)
+
+#define S_TP_IN_ESPI_TUNNEL 7
+#define V_TP_IN_ESPI_TUNNEL(x) ((x) << S_TP_IN_ESPI_TUNNEL)
+#define F_TP_IN_ESPI_TUNNEL V_TP_IN_ESPI_TUNNEL(1U)
+
+#define S_TP_IN_ESPI_ETHERNET 8
+#define V_TP_IN_ESPI_ETHERNET(x) ((x) << S_TP_IN_ESPI_ETHERNET)
+#define F_TP_IN_ESPI_ETHERNET V_TP_IN_ESPI_ETHERNET(1U)
+
+#define S_TP_IN_ESPI_CPL 10
+#define V_TP_IN_ESPI_CPL(x) ((x) << S_TP_IN_ESPI_CPL)
+#define F_TP_IN_ESPI_CPL V_TP_IN_ESPI_CPL(1U)
+
+#define S_TP_IN_ESPI_POS 11
+#define V_TP_IN_ESPI_POS(x) ((x) << S_TP_IN_ESPI_POS)
+#define F_TP_IN_ESPI_POS V_TP_IN_ESPI_POS(1U)
+
+#define S_TP_IN_ESPI_CHECK_IP_CSUM 12
+#define V_TP_IN_ESPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_IP_CSUM)
+#define F_TP_IN_ESPI_CHECK_IP_CSUM V_TP_IN_ESPI_CHECK_IP_CSUM(1U)
+
+#define S_TP_IN_ESPI_CHECK_TCP_CSUM 13
+#define V_TP_IN_ESPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_TCP_CSUM)
+#define F_TP_IN_ESPI_CHECK_TCP_CSUM V_TP_IN_ESPI_CHECK_TCP_CSUM(1U)
+
+#define S_OFFLOAD_DISABLE 14
+#define V_OFFLOAD_DISABLE(x) ((x) << S_OFFLOAD_DISABLE)
+#define F_OFFLOAD_DISABLE V_OFFLOAD_DISABLE(1U)
+
+#define A_TP_OUT_CONFIG 0x304
+
+#define S_TP_OUT_C_ETH 0
+#define V_TP_OUT_C_ETH(x) ((x) << S_TP_OUT_C_ETH)
+#define F_TP_OUT_C_ETH V_TP_OUT_C_ETH(1U)
+
+#define S_TP_OUT_CSPI_CPL 2
+#define V_TP_OUT_CSPI_CPL(x) ((x) << S_TP_OUT_CSPI_CPL)
+#define F_TP_OUT_CSPI_CPL V_TP_OUT_CSPI_CPL(1U)
+
+#define S_TP_OUT_CSPI_POS 3
+#define V_TP_OUT_CSPI_POS(x) ((x) << S_TP_OUT_CSPI_POS)
+#define F_TP_OUT_CSPI_POS V_TP_OUT_CSPI_POS(1U)
+
+#define S_TP_OUT_CSPI_GENERATE_IP_CSUM 4
+#define V_TP_OUT_CSPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_CSPI_GENERATE_IP_CSUM)
+#define F_TP_OUT_CSPI_GENERATE_IP_CSUM V_TP_OUT_CSPI_GENERATE_IP_CSUM(1U)
+
+#define S_TP_OUT_CSPI_GENERATE_TCP_CSUM 5
+#define V_TP_OUT_CSPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_CSPI_GENERATE_TCP_CSUM)
+#define F_TP_OUT_CSPI_GENERATE_TCP_CSUM V_TP_OUT_CSPI_GENERATE_TCP_CSUM(1U)
+
+#define S_TP_OUT_ESPI_ETHERNET 6
+#define V_TP_OUT_ESPI_ETHERNET(x) ((x) << S_TP_OUT_ESPI_ETHERNET)
+#define F_TP_OUT_ESPI_ETHERNET V_TP_OUT_ESPI_ETHERNET(1U)
+
+#define S_TP_OUT_ESPI_TAG_ETHERNET 7
+#define V_TP_OUT_ESPI_TAG_ETHERNET(x) ((x) << S_TP_OUT_ESPI_TAG_ETHERNET)
+#define F_TP_OUT_ESPI_TAG_ETHERNET V_TP_OUT_ESPI_TAG_ETHERNET(1U)
+
+#define S_TP_OUT_ESPI_CPL 8
+#define V_TP_OUT_ESPI_CPL(x) ((x) << S_TP_OUT_ESPI_CPL)
+#define F_TP_OUT_ESPI_CPL V_TP_OUT_ESPI_CPL(1U)
+
+#define S_TP_OUT_ESPI_POS 9
+#define V_TP_OUT_ESPI_POS(x) ((x) << S_TP_OUT_ESPI_POS)
+#define F_TP_OUT_ESPI_POS V_TP_OUT_ESPI_POS(1U)
+
+#define S_TP_OUT_ESPI_GENERATE_IP_CSUM 10
+#define V_TP_OUT_ESPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_IP_CSUM)
+#define F_TP_OUT_ESPI_GENERATE_IP_CSUM V_TP_OUT_ESPI_GENERATE_IP_CSUM(1U)
+
+#define S_TP_OUT_ESPI_GENERATE_TCP_CSUM 11
+#define V_TP_OUT_ESPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_TCP_CSUM)
+#define F_TP_OUT_ESPI_GENERATE_TCP_CSUM V_TP_OUT_ESPI_GENERATE_TCP_CSUM(1U)
+
+#define A_TP_GLOBAL_CONFIG 0x308
+
+#define S_IP_TTL 0
+#define M_IP_TTL 0xff
+#define V_IP_TTL(x) ((x) << S_IP_TTL)
+#define G_IP_TTL(x) (((x) >> S_IP_TTL) & M_IP_TTL)
+
+#define S_TCAM_SERVER_REGION_USAGE 8
+#define M_TCAM_SERVER_REGION_USAGE 0x3
+#define V_TCAM_SERVER_REGION_USAGE(x) ((x) << S_TCAM_SERVER_REGION_USAGE)
+#define G_TCAM_SERVER_REGION_USAGE(x) (((x) >> S_TCAM_SERVER_REGION_USAGE) & M_TCAM_SERVER_REGION_USAGE)
+
+#define S_QOS_MAPPING 10
+#define V_QOS_MAPPING(x) ((x) << S_QOS_MAPPING)
+#define F_QOS_MAPPING V_QOS_MAPPING(1U)
+
+#define S_TCP_CSUM 11
+#define V_TCP_CSUM(x) ((x) << S_TCP_CSUM)
+#define F_TCP_CSUM V_TCP_CSUM(1U)
+
+#define S_UDP_CSUM 12
+#define V_UDP_CSUM(x) ((x) << S_UDP_CSUM)
+#define F_UDP_CSUM V_UDP_CSUM(1U)
+
+#define S_IP_CSUM 13
+#define V_IP_CSUM(x) ((x) << S_IP_CSUM)
+#define F_IP_CSUM V_IP_CSUM(1U)
+
+#define S_IP_ID_SPLIT 14
+#define V_IP_ID_SPLIT(x) ((x) << S_IP_ID_SPLIT)
+#define F_IP_ID_SPLIT V_IP_ID_SPLIT(1U)
+
+#define S_PATH_MTU 15
+#define V_PATH_MTU(x) ((x) << S_PATH_MTU)
+#define F_PATH_MTU V_PATH_MTU(1U)
+
+#define S_5TUPLE_LOOKUP 17
+#define M_5TUPLE_LOOKUP 0x3
+#define V_5TUPLE_LOOKUP(x) ((x) << S_5TUPLE_LOOKUP)
+#define G_5TUPLE_LOOKUP(x) (((x) >> S_5TUPLE_LOOKUP) & M_5TUPLE_LOOKUP)
+
+#define S_IP_FRAGMENT_DROP 19
+#define V_IP_FRAGMENT_DROP(x) ((x) << S_IP_FRAGMENT_DROP)
+#define F_IP_FRAGMENT_DROP V_IP_FRAGMENT_DROP(1U)
+
+#define S_PING_DROP 20
+#define V_PING_DROP(x) ((x) << S_PING_DROP)
+#define F_PING_DROP V_PING_DROP(1U)
+
+#define S_PROTECT_MODE 21
+#define V_PROTECT_MODE(x) ((x) << S_PROTECT_MODE)
+#define F_PROTECT_MODE V_PROTECT_MODE(1U)
+
+#define S_SYN_COOKIE_ALGORITHM 22
+#define V_SYN_COOKIE_ALGORITHM(x) ((x) << S_SYN_COOKIE_ALGORITHM)
+#define F_SYN_COOKIE_ALGORITHM V_SYN_COOKIE_ALGORITHM(1U)
+
+#define S_ATTACK_FILTER 23
+#define V_ATTACK_FILTER(x) ((x) << S_ATTACK_FILTER)
+#define F_ATTACK_FILTER V_ATTACK_FILTER(1U)
+
+#define S_INTERFACE_TYPE 24
+#define V_INTERFACE_TYPE(x) ((x) << S_INTERFACE_TYPE)
+#define F_INTERFACE_TYPE V_INTERFACE_TYPE(1U)
+
+#define S_DISABLE_RX_FLOW_CONTROL 25
+#define V_DISABLE_RX_FLOW_CONTROL(x) ((x) << S_DISABLE_RX_FLOW_CONTROL)
+#define F_DISABLE_RX_FLOW_CONTROL V_DISABLE_RX_FLOW_CONTROL(1U)
+
+#define S_SYN_COOKIE_PARAMETER 26
+#define M_SYN_COOKIE_PARAMETER 0x3f
+#define V_SYN_COOKIE_PARAMETER(x) ((x) << S_SYN_COOKIE_PARAMETER)
+#define G_SYN_COOKIE_PARAMETER(x) (((x) >> S_SYN_COOKIE_PARAMETER) & M_SYN_COOKIE_PARAMETER)
+
+#define A_TP_GLOBAL_RX_CREDITS 0x30c
+#define A_TP_CM_SIZE 0x310
+#define A_TP_CM_MM_BASE 0x314
+
+#define S_CM_MEMMGR_BASE 0
+#define M_CM_MEMMGR_BASE 0xfffffff
+#define V_CM_MEMMGR_BASE(x) ((x) << S_CM_MEMMGR_BASE)
+#define G_CM_MEMMGR_BASE(x) (((x) >> S_CM_MEMMGR_BASE) & M_CM_MEMMGR_BASE)
+
+#define A_TP_CM_TIMER_BASE 0x318
+
+#define S_CM_TIMER_BASE 0
+#define M_CM_TIMER_BASE 0xfffffff
+#define V_CM_TIMER_BASE(x) ((x) << S_CM_TIMER_BASE)
+#define G_CM_TIMER_BASE(x) (((x) >> S_CM_TIMER_BASE) & M_CM_TIMER_BASE)
+
+#define A_TP_PM_SIZE 0x31c
+#define A_TP_PM_TX_BASE 0x320
+#define A_TP_PM_DEFRAG_BASE 0x324
+#define A_TP_PM_RX_BASE 0x328
+#define A_TP_PM_RX_PG_SIZE 0x32c
+#define A_TP_PM_RX_MAX_PGS 0x330
+#define A_TP_PM_TX_PG_SIZE 0x334
+#define A_TP_PM_TX_MAX_PGS 0x338
+#define A_TP_TCP_OPTIONS 0x340
+
+#define S_TIMESTAMP 0
+#define M_TIMESTAMP 0x3
+#define V_TIMESTAMP(x) ((x) << S_TIMESTAMP)
+#define G_TIMESTAMP(x) (((x) >> S_TIMESTAMP) & M_TIMESTAMP)
+
+#define S_WINDOW_SCALE 2
+#define M_WINDOW_SCALE 0x3
+#define V_WINDOW_SCALE(x) ((x) << S_WINDOW_SCALE)
+#define G_WINDOW_SCALE(x) (((x) >> S_WINDOW_SCALE) & M_WINDOW_SCALE)
+
+#define S_SACK 4
+#define M_SACK 0x3
+#define V_SACK(x) ((x) << S_SACK)
+#define G_SACK(x) (((x) >> S_SACK) & M_SACK)
+
+#define S_ECN 6
+#define M_ECN 0x3
+#define V_ECN(x) ((x) << S_ECN)
+#define G_ECN(x) (((x) >> S_ECN) & M_ECN)
+
+#define S_SACK_ALGORITHM 8
+#define M_SACK_ALGORITHM 0x3
+#define V_SACK_ALGORITHM(x) ((x) << S_SACK_ALGORITHM)
+#define G_SACK_ALGORITHM(x) (((x) >> S_SACK_ALGORITHM) & M_SACK_ALGORITHM)
+
+#define S_MSS 10
+#define V_MSS(x) ((x) << S_MSS)
+#define F_MSS V_MSS(1U)
+
+#define S_DEFAULT_PEER_MSS 16
+#define M_DEFAULT_PEER_MSS 0xffff
+#define V_DEFAULT_PEER_MSS(x) ((x) << S_DEFAULT_PEER_MSS)
+#define G_DEFAULT_PEER_MSS(x) (((x) >> S_DEFAULT_PEER_MSS) & M_DEFAULT_PEER_MSS)
+
+#define A_TP_DACK_CONFIG 0x344
+
+#define S_DACK_MODE 0
+#define V_DACK_MODE(x) ((x) << S_DACK_MODE)
+#define F_DACK_MODE V_DACK_MODE(1U)
+
+#define S_DACK_AUTO_MGMT 1
+#define V_DACK_AUTO_MGMT(x) ((x) << S_DACK_AUTO_MGMT)
+#define F_DACK_AUTO_MGMT V_DACK_AUTO_MGMT(1U)
+
+#define S_DACK_AUTO_CAREFUL 2
+#define V_DACK_AUTO_CAREFUL(x) ((x) << S_DACK_AUTO_CAREFUL)
+#define F_DACK_AUTO_CAREFUL V_DACK_AUTO_CAREFUL(1U)
+
+#define S_DACK_MSS_SELECTOR 3
+#define M_DACK_MSS_SELECTOR 0x3
+#define V_DACK_MSS_SELECTOR(x) ((x) << S_DACK_MSS_SELECTOR)
+#define G_DACK_MSS_SELECTOR(x) (((x) >> S_DACK_MSS_SELECTOR) & M_DACK_MSS_SELECTOR)
+
+#define S_DACK_BYTE_THRESHOLD 5
+#define M_DACK_BYTE_THRESHOLD 0xfffff
+#define V_DACK_BYTE_THRESHOLD(x) ((x) << S_DACK_BYTE_THRESHOLD)
+#define G_DACK_BYTE_THRESHOLD(x) (((x) >> S_DACK_BYTE_THRESHOLD) & M_DACK_BYTE_THRESHOLD)
+
+#define A_TP_PC_CONFIG 0x348
+
+#define S_TP_ACCESS_LATENCY 0
+#define M_TP_ACCESS_LATENCY 0xf
+#define V_TP_ACCESS_LATENCY(x) ((x) << S_TP_ACCESS_LATENCY)
+#define G_TP_ACCESS_LATENCY(x) (((x) >> S_TP_ACCESS_LATENCY) & M_TP_ACCESS_LATENCY)
+
+#define S_HELD_FIN_DISABLE 4
+#define V_HELD_FIN_DISABLE(x) ((x) << S_HELD_FIN_DISABLE)
+#define F_HELD_FIN_DISABLE V_HELD_FIN_DISABLE(1U)
+
+#define S_DDP_FC_ENABLE 5
+#define V_DDP_FC_ENABLE(x) ((x) << S_DDP_FC_ENABLE)
+#define F_DDP_FC_ENABLE V_DDP_FC_ENABLE(1U)
+
+#define S_RDMA_ERR_ENABLE 6
+#define V_RDMA_ERR_ENABLE(x) ((x) << S_RDMA_ERR_ENABLE)
+#define F_RDMA_ERR_ENABLE V_RDMA_ERR_ENABLE(1U)
+
+#define S_FAST_PDU_DELIVERY 7
+#define V_FAST_PDU_DELIVERY(x) ((x) << S_FAST_PDU_DELIVERY)
+#define F_FAST_PDU_DELIVERY V_FAST_PDU_DELIVERY(1U)
+
+#define S_CLEAR_FIN 8
+#define V_CLEAR_FIN(x) ((x) << S_CLEAR_FIN)
+#define F_CLEAR_FIN V_CLEAR_FIN(1U)
+
+#define S_DIS_TX_FILL_WIN_PUSH 12
+#define V_DIS_TX_FILL_WIN_PUSH(x) ((x) << S_DIS_TX_FILL_WIN_PUSH)
+#define F_DIS_TX_FILL_WIN_PUSH V_DIS_TX_FILL_WIN_PUSH(1U)
+
+#define S_TP_PC_REV 30
+#define M_TP_PC_REV 0x3
+#define V_TP_PC_REV(x) ((x) << S_TP_PC_REV)
+#define G_TP_PC_REV(x) (((x) >> S_TP_PC_REV) & M_TP_PC_REV)
+
+#define A_TP_BACKOFF0 0x350
+
+#define S_ELEMENT0 0
+#define M_ELEMENT0 0xff
+#define V_ELEMENT0(x) ((x) << S_ELEMENT0)
+#define G_ELEMENT0(x) (((x) >> S_ELEMENT0) & M_ELEMENT0)
+
+#define S_ELEMENT1 8
+#define M_ELEMENT1 0xff
+#define V_ELEMENT1(x) ((x) << S_ELEMENT1)
+#define G_ELEMENT1(x) (((x) >> S_ELEMENT1) & M_ELEMENT1)
+
+#define S_ELEMENT2 16
+#define M_ELEMENT2 0xff
+#define V_ELEMENT2(x) ((x) << S_ELEMENT2)
+#define G_ELEMENT2(x) (((x) >> S_ELEMENT2) & M_ELEMENT2)
+
+#define S_ELEMENT3 24
+#define M_ELEMENT3 0xff
+#define V_ELEMENT3(x) ((x) << S_ELEMENT3)
+#define G_ELEMENT3(x) (((x) >> S_ELEMENT3) & M_ELEMENT3)
+
+#define A_TP_BACKOFF1 0x354
+#define A_TP_BACKOFF2 0x358
+#define A_TP_BACKOFF3 0x35c
+#define A_TP_PARA_REG0 0x360
+
+#define S_VAR_MULT 0
+#define M_VAR_MULT 0xf
+#define V_VAR_MULT(x) ((x) << S_VAR_MULT)
+#define G_VAR_MULT(x) (((x) >> S_VAR_MULT) & M_VAR_MULT)
+
+#define S_VAR_GAIN 4
+#define M_VAR_GAIN 0xf
+#define V_VAR_GAIN(x) ((x) << S_VAR_GAIN)
+#define G_VAR_GAIN(x) (((x) >> S_VAR_GAIN) & M_VAR_GAIN)
+
+#define S_SRTT_GAIN 8
+#define M_SRTT_GAIN 0xf
+#define V_SRTT_GAIN(x) ((x) << S_SRTT_GAIN)
+#define G_SRTT_GAIN(x) (((x) >> S_SRTT_GAIN) & M_SRTT_GAIN)
+
+#define S_RTTVAR_INIT 12
+#define M_RTTVAR_INIT 0xf
+#define V_RTTVAR_INIT(x) ((x) << S_RTTVAR_INIT)
+#define G_RTTVAR_INIT(x) (((x) >> S_RTTVAR_INIT) & M_RTTVAR_INIT)
+
+#define S_DUP_THRESH 20
+#define M_DUP_THRESH 0xf
+#define V_DUP_THRESH(x) ((x) << S_DUP_THRESH)
+#define G_DUP_THRESH(x) (((x) >> S_DUP_THRESH) & M_DUP_THRESH)
+
+#define S_INIT_CONG_WIN 24
+#define M_INIT_CONG_WIN 0x7
+#define V_INIT_CONG_WIN(x) ((x) << S_INIT_CONG_WIN)
+#define G_INIT_CONG_WIN(x) (((x) >> S_INIT_CONG_WIN) & M_INIT_CONG_WIN)
+
+#define A_TP_PARA_REG1 0x364
+
+#define S_INITIAL_SLOW_START_THRESHOLD 0
+#define M_INITIAL_SLOW_START_THRESHOLD 0xffff
+#define V_INITIAL_SLOW_START_THRESHOLD(x) ((x) << S_INITIAL_SLOW_START_THRESHOLD)
+#define G_INITIAL_SLOW_START_THRESHOLD(x) (((x) >> S_INITIAL_SLOW_START_THRESHOLD) & M_INITIAL_SLOW_START_THRESHOLD)
+
+#define S_RECEIVE_BUFFER_SIZE 16
+#define M_RECEIVE_BUFFER_SIZE 0xffff
+#define V_RECEIVE_BUFFER_SIZE(x) ((x) << S_RECEIVE_BUFFER_SIZE)
+#define G_RECEIVE_BUFFER_SIZE(x) (((x) >> S_RECEIVE_BUFFER_SIZE) & M_RECEIVE_BUFFER_SIZE)
+
+#define A_TP_PARA_REG2 0x368
+
+#define S_RX_COALESCE_SIZE 0
+#define M_RX_COALESCE_SIZE 0xffff
+#define V_RX_COALESCE_SIZE(x) ((x) << S_RX_COALESCE_SIZE)
+#define G_RX_COALESCE_SIZE(x) (((x) >> S_RX_COALESCE_SIZE) & M_RX_COALESCE_SIZE)
+
+#define S_MAX_RX_SIZE 16
+#define M_MAX_RX_SIZE 0xffff
+#define V_MAX_RX_SIZE(x) ((x) << S_MAX_RX_SIZE)
+#define G_MAX_RX_SIZE(x) (((x) >> S_MAX_RX_SIZE) & M_MAX_RX_SIZE)
+
+#define A_TP_PARA_REG3 0x36c
+
+#define S_RX_COALESCING_PSH_DELIVER 0
+#define V_RX_COALESCING_PSH_DELIVER(x) ((x) << S_RX_COALESCING_PSH_DELIVER)
+#define F_RX_COALESCING_PSH_DELIVER V_RX_COALESCING_PSH_DELIVER(1U)
+
+#define S_RX_COALESCING_ENABLE 1
+#define V_RX_COALESCING_ENABLE(x) ((x) << S_RX_COALESCING_ENABLE)
+#define F_RX_COALESCING_ENABLE V_RX_COALESCING_ENABLE(1U)
+
+#define S_TAHOE_ENABLE 2
+#define V_TAHOE_ENABLE(x) ((x) << S_TAHOE_ENABLE)
+#define F_TAHOE_ENABLE V_TAHOE_ENABLE(1U)
+
+#define S_MAX_REORDER_FRAGMENTS 12
+#define M_MAX_REORDER_FRAGMENTS 0x7
+#define V_MAX_REORDER_FRAGMENTS(x) ((x) << S_MAX_REORDER_FRAGMENTS)
+#define G_MAX_REORDER_FRAGMENTS(x) (((x) >> S_MAX_REORDER_FRAGMENTS) & M_MAX_REORDER_FRAGMENTS)
+
+#define A_TP_TIMER_RESOLUTION 0x390
+
+#define S_DELAYED_ACK_TIMER_RESOLUTION 0
+#define M_DELAYED_ACK_TIMER_RESOLUTION 0x3f
+#define V_DELAYED_ACK_TIMER_RESOLUTION(x) ((x) << S_DELAYED_ACK_TIMER_RESOLUTION)
+#define G_DELAYED_ACK_TIMER_RESOLUTION(x) (((x) >> S_DELAYED_ACK_TIMER_RESOLUTION) & M_DELAYED_ACK_TIMER_RESOLUTION)
+
+#define S_GENERIC_TIMER_RESOLUTION 16
+#define M_GENERIC_TIMER_RESOLUTION 0x3f
+#define V_GENERIC_TIMER_RESOLUTION(x) ((x) << S_GENERIC_TIMER_RESOLUTION)
+#define G_GENERIC_TIMER_RESOLUTION(x) (((x) >> S_GENERIC_TIMER_RESOLUTION) & M_GENERIC_TIMER_RESOLUTION)
+
+#define A_TP_2MSL 0x394
+
+#define S_2MSL 0
+#define M_2MSL 0x3fffffff
+#define V_2MSL(x) ((x) << S_2MSL)
+#define G_2MSL(x) (((x) >> S_2MSL) & M_2MSL)
+
+#define A_TP_RXT_MIN 0x398
+
+#define S_RETRANSMIT_TIMER_MIN 0
+#define M_RETRANSMIT_TIMER_MIN 0xffff
+#define V_RETRANSMIT_TIMER_MIN(x) ((x) << S_RETRANSMIT_TIMER_MIN)
+#define G_RETRANSMIT_TIMER_MIN(x) (((x) >> S_RETRANSMIT_TIMER_MIN) & M_RETRANSMIT_TIMER_MIN)
+
+#define A_TP_RXT_MAX 0x39c
+
+#define S_RETRANSMIT_TIMER_MAX 0
+#define M_RETRANSMIT_TIMER_MAX 0x3fffffff
+#define V_RETRANSMIT_TIMER_MAX(x) ((x) << S_RETRANSMIT_TIMER_MAX)
+#define G_RETRANSMIT_TIMER_MAX(x) (((x) >> S_RETRANSMIT_TIMER_MAX) & M_RETRANSMIT_TIMER_MAX)
+
+#define A_TP_PERS_MIN 0x3a0
+
+#define S_PERSIST_TIMER_MIN 0
+#define M_PERSIST_TIMER_MIN 0xffff
+#define V_PERSIST_TIMER_MIN(x) ((x) << S_PERSIST_TIMER_MIN)
+#define G_PERSIST_TIMER_MIN(x) (((x) >> S_PERSIST_TIMER_MIN) & M_PERSIST_TIMER_MIN)
+
+#define A_TP_PERS_MAX 0x3a4
+
+#define S_PERSIST_TIMER_MAX 0
+#define M_PERSIST_TIMER_MAX 0x3fffffff
+#define V_PERSIST_TIMER_MAX(x) ((x) << S_PERSIST_TIMER_MAX)
+#define G_PERSIST_TIMER_MAX(x) (((x) >> S_PERSIST_TIMER_MAX) & M_PERSIST_TIMER_MAX)
+
+#define A_TP_KEEP_IDLE 0x3ac
+
+#define S_KEEP_ALIVE_IDLE_TIME 0
+#define M_KEEP_ALIVE_IDLE_TIME 0x3fffffff
+#define V_KEEP_ALIVE_IDLE_TIME(x) ((x) << S_KEEP_ALIVE_IDLE_TIME)
+#define G_KEEP_ALIVE_IDLE_TIME(x) (((x) >> S_KEEP_ALIVE_IDLE_TIME) & M_KEEP_ALIVE_IDLE_TIME)
+
+#define A_TP_KEEP_INTVL 0x3b0
+
+#define S_KEEP_ALIVE_INTERVAL_TIME 0
+#define M_KEEP_ALIVE_INTERVAL_TIME 0x3fffffff
+#define V_KEEP_ALIVE_INTERVAL_TIME(x) ((x) << S_KEEP_ALIVE_INTERVAL_TIME)
+#define G_KEEP_ALIVE_INTERVAL_TIME(x) (((x) >> S_KEEP_ALIVE_INTERVAL_TIME) & M_KEEP_ALIVE_INTERVAL_TIME)
+
+#define A_TP_INIT_SRTT 0x3b4
+
+#define S_INITIAL_SRTT 0
+#define M_INITIAL_SRTT 0xffff
+#define V_INITIAL_SRTT(x) ((x) << S_INITIAL_SRTT)
+#define G_INITIAL_SRTT(x) (((x) >> S_INITIAL_SRTT) & M_INITIAL_SRTT)
+
+#define A_TP_DACK_TIME 0x3b8
+
+#define S_DELAYED_ACK_TIME 0
+#define M_DELAYED_ACK_TIME 0x7ff
+#define V_DELAYED_ACK_TIME(x) ((x) << S_DELAYED_ACK_TIME)
+#define G_DELAYED_ACK_TIME(x) (((x) >> S_DELAYED_ACK_TIME) & M_DELAYED_ACK_TIME)
+
+#define A_TP_FINWAIT2_TIME 0x3bc
+
+#define S_FINWAIT2_TIME 0
+#define M_FINWAIT2_TIME 0x3fffffff
+#define V_FINWAIT2_TIME(x) ((x) << S_FINWAIT2_TIME)
+#define G_FINWAIT2_TIME(x) (((x) >> S_FINWAIT2_TIME) & M_FINWAIT2_TIME)
+
+#define A_TP_FAST_FINWAIT2_TIME 0x3c0
+
+#define S_FAST_FINWAIT2_TIME 0
+#define M_FAST_FINWAIT2_TIME 0x3fffffff
+#define V_FAST_FINWAIT2_TIME(x) ((x) << S_FAST_FINWAIT2_TIME)
+#define G_FAST_FINWAIT2_TIME(x) (((x) >> S_FAST_FINWAIT2_TIME) & M_FAST_FINWAIT2_TIME)
+
+#define A_TP_SHIFT_CNT 0x3c4
+
+#define S_KEEPALIVE_MAX 0
+#define M_KEEPALIVE_MAX 0xff
+#define V_KEEPALIVE_MAX(x) ((x) << S_KEEPALIVE_MAX)
+#define G_KEEPALIVE_MAX(x) (((x) >> S_KEEPALIVE_MAX) & M_KEEPALIVE_MAX)
+
+#define S_WINDOWPROBE_MAX 8
+#define M_WINDOWPROBE_MAX 0xff
+#define V_WINDOWPROBE_MAX(x) ((x) << S_WINDOWPROBE_MAX)
+#define G_WINDOWPROBE_MAX(x) (((x) >> S_WINDOWPROBE_MAX) & M_WINDOWPROBE_MAX)
+
+#define S_RETRANSMISSION_MAX 16
+#define M_RETRANSMISSION_MAX 0xff
+#define V_RETRANSMISSION_MAX(x) ((x) << S_RETRANSMISSION_MAX)
+#define G_RETRANSMISSION_MAX(x) (((x) >> S_RETRANSMISSION_MAX) & M_RETRANSMISSION_MAX)
+
+#define S_SYN_MAX 24
+#define M_SYN_MAX 0xff
+#define V_SYN_MAX(x) ((x) << S_SYN_MAX)
+#define G_SYN_MAX(x) (((x) >> S_SYN_MAX) & M_SYN_MAX)
+
+#define A_TP_QOS_REG0 0x3e0
+
+#define S_L3_VALUE 0
+#define M_L3_VALUE 0x3f
+#define V_L3_VALUE(x) ((x) << S_L3_VALUE)
+#define G_L3_VALUE(x) (((x) >> S_L3_VALUE) & M_L3_VALUE)
+
+#define A_TP_QOS_REG1 0x3e4
+#define A_TP_QOS_REG2 0x3e8
+#define A_TP_QOS_REG3 0x3ec
+#define A_TP_QOS_REG4 0x3f0
+#define A_TP_QOS_REG5 0x3f4
+#define A_TP_QOS_REG6 0x3f8
+#define A_TP_QOS_REG7 0x3fc
+#define A_TP_MTU_REG0 0x404
+#define A_TP_MTU_REG1 0x408
+#define A_TP_MTU_REG2 0x40c
+#define A_TP_MTU_REG3 0x410
+#define A_TP_MTU_REG4 0x414
+#define A_TP_MTU_REG5 0x418
+#define A_TP_MTU_REG6 0x41c
+#define A_TP_MTU_REG7 0x420
+#define A_TP_RESET 0x44c
+
+#define S_TP_RESET 0
+#define V_TP_RESET(x) ((x) << S_TP_RESET)
+#define F_TP_RESET V_TP_RESET(1U)
+
+#define S_CM_MEMMGR_INIT 1
+#define V_CM_MEMMGR_INIT(x) ((x) << S_CM_MEMMGR_INIT)
+#define F_CM_MEMMGR_INIT V_CM_MEMMGR_INIT(1U)
+
+#define A_TP_MIB_INDEX 0x450
+#define A_TP_MIB_DATA 0x454
+#define A_TP_SYNC_TIME_HI 0x458
+#define A_TP_SYNC_TIME_LO 0x45c
+#define A_TP_CM_MM_RX_FLST_BASE 0x460
+
+#define S_CM_MEMMGR_RX_FREE_LIST_BASE 0
+#define M_CM_MEMMGR_RX_FREE_LIST_BASE 0xfffffff
+#define V_CM_MEMMGR_RX_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_RX_FREE_LIST_BASE)
+#define G_CM_MEMMGR_RX_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_RX_FREE_LIST_BASE) & M_CM_MEMMGR_RX_FREE_LIST_BASE)
+
+#define A_TP_CM_MM_TX_FLST_BASE 0x464
+
+#define S_CM_MEMMGR_TX_FREE_LIST_BASE 0
+#define M_CM_MEMMGR_TX_FREE_LIST_BASE 0xfffffff
+#define V_CM_MEMMGR_TX_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_TX_FREE_LIST_BASE)
+#define G_CM_MEMMGR_TX_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_TX_FREE_LIST_BASE) & M_CM_MEMMGR_TX_FREE_LIST_BASE)
+
+#define A_TP_CM_MM_P_FLST_BASE 0x468
+
+#define S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE 0
+#define M_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE 0xfffffff
+#define V_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE)
+#define G_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE) & M_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE)
+
+#define A_TP_CM_MM_MAX_P 0x46c
+
+#define S_CM_MEMMGR_MAX_PSTRUCT 0
+#define M_CM_MEMMGR_MAX_PSTRUCT 0xfffffff
+#define V_CM_MEMMGR_MAX_PSTRUCT(x) ((x) << S_CM_MEMMGR_MAX_PSTRUCT)
+#define G_CM_MEMMGR_MAX_PSTRUCT(x) (((x) >> S_CM_MEMMGR_MAX_PSTRUCT) & M_CM_MEMMGR_MAX_PSTRUCT)
+
+#define A_TP_INT_ENABLE 0x470
+
+#define S_TX_FREE_LIST_EMPTY 0
+#define V_TX_FREE_LIST_EMPTY(x) ((x) << S_TX_FREE_LIST_EMPTY)
+#define F_TX_FREE_LIST_EMPTY V_TX_FREE_LIST_EMPTY(1U)
+
+#define S_RX_FREE_LIST_EMPTY 1
+#define V_RX_FREE_LIST_EMPTY(x) ((x) << S_RX_FREE_LIST_EMPTY)
+#define F_RX_FREE_LIST_EMPTY V_RX_FREE_LIST_EMPTY(1U)
+
+#define A_TP_INT_CAUSE 0x474
+#define A_TP_TIMER_SEPARATOR 0x4a4
+
+#define S_DISABLE_PAST_TIMER_INSERTION 0
+#define V_DISABLE_PAST_TIMER_INSERTION(x) ((x) << S_DISABLE_PAST_TIMER_INSERTION)
+#define F_DISABLE_PAST_TIMER_INSERTION V_DISABLE_PAST_TIMER_INSERTION(1U)
+
+#define S_MODULATION_TIMER_SEPARATOR 1
+#define M_MODULATION_TIMER_SEPARATOR 0x7fff
+#define V_MODULATION_TIMER_SEPARATOR(x) ((x) << S_MODULATION_TIMER_SEPARATOR)
+#define G_MODULATION_TIMER_SEPARATOR(x) (((x) >> S_MODULATION_TIMER_SEPARATOR) & M_MODULATION_TIMER_SEPARATOR)
+
+#define S_GLOBAL_TIMER_SEPARATOR 16
+#define M_GLOBAL_TIMER_SEPARATOR 0xffff
+#define V_GLOBAL_TIMER_SEPARATOR(x) ((x) << S_GLOBAL_TIMER_SEPARATOR)
+#define G_GLOBAL_TIMER_SEPARATOR(x) (((x) >> S_GLOBAL_TIMER_SEPARATOR) & M_GLOBAL_TIMER_SEPARATOR)
+
+#define A_TP_CM_FC_MODE 0x4b0
+#define A_TP_PC_CONGESTION_CNTL 0x4b4
+#define A_TP_TX_DROP_CONFIG 0x4b8
+
+#define S_ENABLE_TX_DROP 31
+#define V_ENABLE_TX_DROP(x) ((x) << S_ENABLE_TX_DROP)
+#define F_ENABLE_TX_DROP V_ENABLE_TX_DROP(1U)
+
+#define S_ENABLE_TX_ERROR 30
+#define V_ENABLE_TX_ERROR(x) ((x) << S_ENABLE_TX_ERROR)
+#define F_ENABLE_TX_ERROR V_ENABLE_TX_ERROR(1U)
+
+#define S_DROP_TICKS_CNT 4
+#define M_DROP_TICKS_CNT 0x3ffffff
+#define V_DROP_TICKS_CNT(x) ((x) << S_DROP_TICKS_CNT)
+#define G_DROP_TICKS_CNT(x) (((x) >> S_DROP_TICKS_CNT) & M_DROP_TICKS_CNT)
+
+#define S_NUM_PKTS_DROPPED 0
+#define M_NUM_PKTS_DROPPED 0xf
+#define V_NUM_PKTS_DROPPED(x) ((x) << S_NUM_PKTS_DROPPED)
+#define G_NUM_PKTS_DROPPED(x) (((x) >> S_NUM_PKTS_DROPPED) & M_NUM_PKTS_DROPPED)
+
+#define A_TP_TX_DROP_COUNT 0x4bc
+
+/* RAT registers */
+#define A_RAT_ROUTE_CONTROL 0x580
+
+#define S_USE_ROUTE_TABLE 0
+#define V_USE_ROUTE_TABLE(x) ((x) << S_USE_ROUTE_TABLE)
+#define F_USE_ROUTE_TABLE V_USE_ROUTE_TABLE(1U)
+
+#define S_ENABLE_CSPI 1
+#define V_ENABLE_CSPI(x) ((x) << S_ENABLE_CSPI)
+#define F_ENABLE_CSPI V_ENABLE_CSPI(1U)
+
+#define S_ENABLE_PCIX 2
+#define V_ENABLE_PCIX(x) ((x) << S_ENABLE_PCIX)
+#define F_ENABLE_PCIX V_ENABLE_PCIX(1U)
+
+#define A_RAT_ROUTE_TABLE_INDEX 0x584
+
+#define S_ROUTE_TABLE_INDEX 0
+#define M_ROUTE_TABLE_INDEX 0xf
+#define V_ROUTE_TABLE_INDEX(x) ((x) << S_ROUTE_TABLE_INDEX)
+#define G_ROUTE_TABLE_INDEX(x) (((x) >> S_ROUTE_TABLE_INDEX) & M_ROUTE_TABLE_INDEX)
+
+#define A_RAT_ROUTE_TABLE_DATA 0x588
+#define A_RAT_NO_ROUTE 0x58c
+
+#define S_CPL_OPCODE 0
+#define M_CPL_OPCODE 0xff
+#define V_CPL_OPCODE(x) ((x) << S_CPL_OPCODE)
+#define G_CPL_OPCODE(x) (((x) >> S_CPL_OPCODE) & M_CPL_OPCODE)
+
+#define A_RAT_INTR_ENABLE 0x590
+
+#define S_ZEROROUTEERROR 0
+#define V_ZEROROUTEERROR(x) ((x) << S_ZEROROUTEERROR)
+#define F_ZEROROUTEERROR V_ZEROROUTEERROR(1U)
+
+#define S_CSPIFRAMINGERROR 1
+#define V_CSPIFRAMINGERROR(x) ((x) << S_CSPIFRAMINGERROR)
+#define F_CSPIFRAMINGERROR V_CSPIFRAMINGERROR(1U)
+
+#define S_SGEFRAMINGERROR 2
+#define V_SGEFRAMINGERROR(x) ((x) << S_SGEFRAMINGERROR)
+#define F_SGEFRAMINGERROR V_SGEFRAMINGERROR(1U)
+
+#define S_TPFRAMINGERROR 3
+#define V_TPFRAMINGERROR(x) ((x) << S_TPFRAMINGERROR)
+#define F_TPFRAMINGERROR V_TPFRAMINGERROR(1U)
+
+#define A_RAT_INTR_CAUSE 0x594
+
+/* CSPI registers */
+#define A_CSPI_RX_AE_WM 0x810
+#define A_CSPI_RX_AF_WM 0x814
+#define A_CSPI_CALENDAR_LEN 0x818
+
+#define S_CALENDARLENGTH 0
+#define M_CALENDARLENGTH 0xffff
+#define V_CALENDARLENGTH(x) ((x) << S_CALENDARLENGTH)
+#define G_CALENDARLENGTH(x) (((x) >> S_CALENDARLENGTH) & M_CALENDARLENGTH)
+
+#define A_CSPI_FIFO_STATUS_ENABLE 0x820
+
+#define S_FIFOSTATUSENABLE 0
+#define V_FIFOSTATUSENABLE(x) ((x) << S_FIFOSTATUSENABLE)
+#define F_FIFOSTATUSENABLE V_FIFOSTATUSENABLE(1U)
+
+#define A_CSPI_MAXBURST1_MAXBURST2 0x828
+
+#define S_MAXBURST1 0
+#define M_MAXBURST1 0xffff
+#define V_MAXBURST1(x) ((x) << S_MAXBURST1)
+#define G_MAXBURST1(x) (((x) >> S_MAXBURST1) & M_MAXBURST1)
+
+#define S_MAXBURST2 16
+#define M_MAXBURST2 0xffff
+#define V_MAXBURST2(x) ((x) << S_MAXBURST2)
+#define G_MAXBURST2(x) (((x) >> S_MAXBURST2) & M_MAXBURST2)
+
+#define A_CSPI_TRAIN 0x82c
+
+#define S_CSPI_TRAIN_ALPHA 0
+#define M_CSPI_TRAIN_ALPHA 0xffff
+#define V_CSPI_TRAIN_ALPHA(x) ((x) << S_CSPI_TRAIN_ALPHA)
+#define G_CSPI_TRAIN_ALPHA(x) (((x) >> S_CSPI_TRAIN_ALPHA) & M_CSPI_TRAIN_ALPHA)
+
+#define S_CSPI_TRAIN_DATA_MAXT 16
+#define M_CSPI_TRAIN_DATA_MAXT 0xffff
+#define V_CSPI_TRAIN_DATA_MAXT(x) ((x) << S_CSPI_TRAIN_DATA_MAXT)
+#define G_CSPI_TRAIN_DATA_MAXT(x) (((x) >> S_CSPI_TRAIN_DATA_MAXT) & M_CSPI_TRAIN_DATA_MAXT)
+
+#define A_CSPI_INTR_STATUS 0x848
+
+#define S_DIP4ERR 0
+#define V_DIP4ERR(x) ((x) << S_DIP4ERR)
+#define F_DIP4ERR V_DIP4ERR(1U)
+
+#define S_RXDROP 1
+#define V_RXDROP(x) ((x) << S_RXDROP)
+#define F_RXDROP V_RXDROP(1U)
+
+#define S_TXDROP 2
+#define V_TXDROP(x) ((x) << S_TXDROP)
+#define F_TXDROP V_TXDROP(1U)
+
+#define S_RXOVERFLOW 3
+#define V_RXOVERFLOW(x) ((x) << S_RXOVERFLOW)
+#define F_RXOVERFLOW V_RXOVERFLOW(1U)
+
+#define S_RAMPARITYERR 4
+#define V_RAMPARITYERR(x) ((x) << S_RAMPARITYERR)
+#define F_RAMPARITYERR V_RAMPARITYERR(1U)
+
+#define A_CSPI_INTR_ENABLE 0x84c
+
+/* ESPI registers */
+#define A_ESPI_SCH_TOKEN0 0x880
+
+#define S_SCHTOKEN0 0
+#define M_SCHTOKEN0 0xffff
+#define V_SCHTOKEN0(x) ((x) << S_SCHTOKEN0)
+#define G_SCHTOKEN0(x) (((x) >> S_SCHTOKEN0) & M_SCHTOKEN0)
+
+#define A_ESPI_SCH_TOKEN1 0x884
+
+#define S_SCHTOKEN1 0
+#define M_SCHTOKEN1 0xffff
+#define V_SCHTOKEN1(x) ((x) << S_SCHTOKEN1)
+#define G_SCHTOKEN1(x) (((x) >> S_SCHTOKEN1) & M_SCHTOKEN1)
+
+#define A_ESPI_SCH_TOKEN2 0x888
+
+#define S_SCHTOKEN2 0
+#define M_SCHTOKEN2 0xffff
+#define V_SCHTOKEN2(x) ((x) << S_SCHTOKEN2)
+#define G_SCHTOKEN2(x) (((x) >> S_SCHTOKEN2) & M_SCHTOKEN2)
+
+#define A_ESPI_SCH_TOKEN3 0x88c
+
+#define S_SCHTOKEN3 0
+#define M_SCHTOKEN3 0xffff
+#define V_SCHTOKEN3(x) ((x) << S_SCHTOKEN3)
+#define G_SCHTOKEN3(x) (((x) >> S_SCHTOKEN3) & M_SCHTOKEN3)
+
+#define A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK 0x890
+
+#define S_ALMOSTEMPTY 0
+#define M_ALMOSTEMPTY 0xffff
+#define V_ALMOSTEMPTY(x) ((x) << S_ALMOSTEMPTY)
+#define G_ALMOSTEMPTY(x) (((x) >> S_ALMOSTEMPTY) & M_ALMOSTEMPTY)
+
+#define A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK 0x894
+
+#define S_ALMOSTFULL 0
+#define M_ALMOSTFULL 0xffff
+#define V_ALMOSTFULL(x) ((x) << S_ALMOSTFULL)
+#define G_ALMOSTFULL(x) (((x) >> S_ALMOSTFULL) & M_ALMOSTFULL)
+
+#define A_ESPI_CALENDAR_LENGTH 0x898
+#define A_PORT_CONFIG 0x89c
+
+#define S_RX_NPORTS 0
+#define M_RX_NPORTS 0xff
+#define V_RX_NPORTS(x) ((x) << S_RX_NPORTS)
+#define G_RX_NPORTS(x) (((x) >> S_RX_NPORTS) & M_RX_NPORTS)
+
+#define S_TX_NPORTS 8
+#define M_TX_NPORTS 0xff
+#define V_TX_NPORTS(x) ((x) << S_TX_NPORTS)
+#define G_TX_NPORTS(x) (((x) >> S_TX_NPORTS) & M_TX_NPORTS)
+
+#define A_ESPI_FIFO_STATUS_ENABLE 0x8a0
+
+#define S_RXSTATUSENABLE 0
+#define V_RXSTATUSENABLE(x) ((x) << S_RXSTATUSENABLE)
+#define F_RXSTATUSENABLE V_RXSTATUSENABLE(1U)
+
+#define S_TXDROPENABLE 1
+#define V_TXDROPENABLE(x) ((x) << S_TXDROPENABLE)
+#define F_TXDROPENABLE V_TXDROPENABLE(1U)
+
+#define S_RXENDIANMODE 2
+#define V_RXENDIANMODE(x) ((x) << S_RXENDIANMODE)
+#define F_RXENDIANMODE V_RXENDIANMODE(1U)
+
+#define S_TXENDIANMODE 3
+#define V_TXENDIANMODE(x) ((x) << S_TXENDIANMODE)
+#define F_TXENDIANMODE V_TXENDIANMODE(1U)
+
+#define S_INTEL1010MODE 4
+#define V_INTEL1010MODE(x) ((x) << S_INTEL1010MODE)
+#define F_INTEL1010MODE V_INTEL1010MODE(1U)
+
+#define A_ESPI_MAXBURST1_MAXBURST2 0x8a8
+#define A_ESPI_TRAIN 0x8ac
+
+#define S_MAXTRAINALPHA 0
+#define M_MAXTRAINALPHA 0xffff
+#define V_MAXTRAINALPHA(x) ((x) << S_MAXTRAINALPHA)
+#define G_MAXTRAINALPHA(x) (((x) >> S_MAXTRAINALPHA) & M_MAXTRAINALPHA)
+
+#define S_MAXTRAINDATA 16
+#define M_MAXTRAINDATA 0xffff
+#define V_MAXTRAINDATA(x) ((x) << S_MAXTRAINDATA)
+#define G_MAXTRAINDATA(x) (((x) >> S_MAXTRAINDATA) & M_MAXTRAINDATA)
+
+#define A_RAM_STATUS 0x8b0
+
+#define S_RXFIFOPARITYERROR 0
+#define M_RXFIFOPARITYERROR 0x3ff
+#define V_RXFIFOPARITYERROR(x) ((x) << S_RXFIFOPARITYERROR)
+#define G_RXFIFOPARITYERROR(x) (((x) >> S_RXFIFOPARITYERROR) & M_RXFIFOPARITYERROR)
+
+#define S_TXFIFOPARITYERROR 10
+#define M_TXFIFOPARITYERROR 0x3ff
+#define V_TXFIFOPARITYERROR(x) ((x) << S_TXFIFOPARITYERROR)
+#define G_TXFIFOPARITYERROR(x) (((x) >> S_TXFIFOPARITYERROR) & M_TXFIFOPARITYERROR)
+
+#define S_RXFIFOOVERFLOW 20
+#define M_RXFIFOOVERFLOW 0x3ff
+#define V_RXFIFOOVERFLOW(x) ((x) << S_RXFIFOOVERFLOW)
+#define G_RXFIFOOVERFLOW(x) (((x) >> S_RXFIFOOVERFLOW) & M_RXFIFOOVERFLOW)
+
+#define A_TX_DROP_COUNT0 0x8b4
+
+#define S_TXPORT0DROPCNT 0
+#define M_TXPORT0DROPCNT 0xffff
+#define V_TXPORT0DROPCNT(x) ((x) << S_TXPORT0DROPCNT)
+#define G_TXPORT0DROPCNT(x) (((x) >> S_TXPORT0DROPCNT) & M_TXPORT0DROPCNT)
+
+#define S_TXPORT1DROPCNT 16
+#define M_TXPORT1DROPCNT 0xffff
+#define V_TXPORT1DROPCNT(x) ((x) << S_TXPORT1DROPCNT)
+#define G_TXPORT1DROPCNT(x) (((x) >> S_TXPORT1DROPCNT) & M_TXPORT1DROPCNT)
+
+#define A_TX_DROP_COUNT1 0x8b8
+
+#define S_TXPORT2DROPCNT 0
+#define M_TXPORT2DROPCNT 0xffff
+#define V_TXPORT2DROPCNT(x) ((x) << S_TXPORT2DROPCNT)
+#define G_TXPORT2DROPCNT(x) (((x) >> S_TXPORT2DROPCNT) & M_TXPORT2DROPCNT)
+
+#define S_TXPORT3DROPCNT 16
+#define M_TXPORT3DROPCNT 0xffff
+#define V_TXPORT3DROPCNT(x) ((x) << S_TXPORT3DROPCNT)
+#define G_TXPORT3DROPCNT(x) (((x) >> S_TXPORT3DROPCNT) & M_TXPORT3DROPCNT)
+
+#define A_RX_DROP_COUNT0 0x8bc
+
+#define S_RXPORT0DROPCNT 0
+#define M_RXPORT0DROPCNT 0xffff
+#define V_RXPORT0DROPCNT(x) ((x) << S_RXPORT0DROPCNT)
+#define G_RXPORT0DROPCNT(x) (((x) >> S_RXPORT0DROPCNT) & M_RXPORT0DROPCNT)
+
+#define S_RXPORT1DROPCNT 16
+#define M_RXPORT1DROPCNT 0xffff
+#define V_RXPORT1DROPCNT(x) ((x) << S_RXPORT1DROPCNT)
+#define G_RXPORT1DROPCNT(x) (((x) >> S_RXPORT1DROPCNT) & M_RXPORT1DROPCNT)
+
+#define A_RX_DROP_COUNT1 0x8c0
+
+#define S_RXPORT2DROPCNT 0
+#define M_RXPORT2DROPCNT 0xffff
+#define V_RXPORT2DROPCNT(x) ((x) << S_RXPORT2DROPCNT)
+#define G_RXPORT2DROPCNT(x) (((x) >> S_RXPORT2DROPCNT) & M_RXPORT2DROPCNT)
+
+#define S_RXPORT3DROPCNT 16
+#define M_RXPORT3DROPCNT 0xffff
+#define V_RXPORT3DROPCNT(x) ((x) << S_RXPORT3DROPCNT)
+#define G_RXPORT3DROPCNT(x) (((x) >> S_RXPORT3DROPCNT) & M_RXPORT3DROPCNT)
+
+#define A_DIP4_ERROR_COUNT 0x8c4
+
+#define S_DIP4ERRORCNT 0
+#define M_DIP4ERRORCNT 0xfff
+#define V_DIP4ERRORCNT(x) ((x) << S_DIP4ERRORCNT)
+#define G_DIP4ERRORCNT(x) (((x) >> S_DIP4ERRORCNT) & M_DIP4ERRORCNT)
+
+#define S_DIP4ERRORCNTSHADOW 12
+#define M_DIP4ERRORCNTSHADOW 0xfff
+#define V_DIP4ERRORCNTSHADOW(x) ((x) << S_DIP4ERRORCNTSHADOW)
+#define G_DIP4ERRORCNTSHADOW(x) (((x) >> S_DIP4ERRORCNTSHADOW) & M_DIP4ERRORCNTSHADOW)
+
+#define S_TRICN_RX_TRAIN_ERR 24
+#define V_TRICN_RX_TRAIN_ERR(x) ((x) << S_TRICN_RX_TRAIN_ERR)
+#define F_TRICN_RX_TRAIN_ERR V_TRICN_RX_TRAIN_ERR(1U)
+
+#define S_TRICN_RX_TRAINING 25
+#define V_TRICN_RX_TRAINING(x) ((x) << S_TRICN_RX_TRAINING)
+#define F_TRICN_RX_TRAINING V_TRICN_RX_TRAINING(1U)
+
+#define S_TRICN_RX_TRAIN_OK 26
+#define V_TRICN_RX_TRAIN_OK(x) ((x) << S_TRICN_RX_TRAIN_OK)
+#define F_TRICN_RX_TRAIN_OK V_TRICN_RX_TRAIN_OK(1U)
+
+#define A_ESPI_INTR_STATUS 0x8c8
+
+#define S_DIP2PARITYERR 5
+#define V_DIP2PARITYERR(x) ((x) << S_DIP2PARITYERR)
+#define F_DIP2PARITYERR V_DIP2PARITYERR(1U)
+
+#define A_ESPI_INTR_ENABLE 0x8cc
+#define A_RX_DROP_THRESHOLD 0x8d0
+#define A_ESPI_RX_RESET 0x8ec
+
+#define S_ESPI_RX_LNK_RST 0
+#define V_ESPI_RX_LNK_RST(x) ((x) << S_ESPI_RX_LNK_RST)
+#define F_ESPI_RX_LNK_RST V_ESPI_RX_LNK_RST(1U)
+
+#define S_ESPI_RX_CORE_RST 1
+#define V_ESPI_RX_CORE_RST(x) ((x) << S_ESPI_RX_CORE_RST)
+#define F_ESPI_RX_CORE_RST V_ESPI_RX_CORE_RST(1U)
+
+#define S_RX_CLK_STATUS 2
+#define V_RX_CLK_STATUS(x) ((x) << S_RX_CLK_STATUS)
+#define F_RX_CLK_STATUS V_RX_CLK_STATUS(1U)
+
+#define A_ESPI_MISC_CONTROL 0x8f0
+
+#define S_OUT_OF_SYNC_COUNT 0
+#define M_OUT_OF_SYNC_COUNT 0xf
+#define V_OUT_OF_SYNC_COUNT(x) ((x) << S_OUT_OF_SYNC_COUNT)
+#define G_OUT_OF_SYNC_COUNT(x) (((x) >> S_OUT_OF_SYNC_COUNT) & M_OUT_OF_SYNC_COUNT)
+
+#define S_DIP2_COUNT_MODE_ENABLE 4
+#define V_DIP2_COUNT_MODE_ENABLE(x) ((x) << S_DIP2_COUNT_MODE_ENABLE)
+#define F_DIP2_COUNT_MODE_ENABLE V_DIP2_COUNT_MODE_ENABLE(1U)
+
+#define S_DIP2_PARITY_ERR_THRES 5
+#define M_DIP2_PARITY_ERR_THRES 0xf
+#define V_DIP2_PARITY_ERR_THRES(x) ((x) << S_DIP2_PARITY_ERR_THRES)
+#define G_DIP2_PARITY_ERR_THRES(x) (((x) >> S_DIP2_PARITY_ERR_THRES) & M_DIP2_PARITY_ERR_THRES)
+
+#define S_DIP4_THRES 9
+#define M_DIP4_THRES 0xfff
+#define V_DIP4_THRES(x) ((x) << S_DIP4_THRES)
+#define G_DIP4_THRES(x) (((x) >> S_DIP4_THRES) & M_DIP4_THRES)
+
+#define S_DIP4_THRES_ENABLE 21
+#define V_DIP4_THRES_ENABLE(x) ((x) << S_DIP4_THRES_ENABLE)
+#define F_DIP4_THRES_ENABLE V_DIP4_THRES_ENABLE(1U)
+
+#define S_FORCE_DISABLE_STATUS 22
+#define V_FORCE_DISABLE_STATUS(x) ((x) << S_FORCE_DISABLE_STATUS)
+#define F_FORCE_DISABLE_STATUS V_FORCE_DISABLE_STATUS(1U)
+
+#define S_DYNAMIC_DESKEW 23
+#define V_DYNAMIC_DESKEW(x) ((x) << S_DYNAMIC_DESKEW)
+#define F_DYNAMIC_DESKEW V_DYNAMIC_DESKEW(1U)
+
+#define S_MONITORED_PORT_NUM 25
+#define M_MONITORED_PORT_NUM 0x3
+#define V_MONITORED_PORT_NUM(x) ((x) << S_MONITORED_PORT_NUM)
+#define G_MONITORED_PORT_NUM(x) (((x) >> S_MONITORED_PORT_NUM) & M_MONITORED_PORT_NUM)
+
+#define S_MONITORED_DIRECTION 27
+#define V_MONITORED_DIRECTION(x) ((x) << S_MONITORED_DIRECTION)
+#define F_MONITORED_DIRECTION V_MONITORED_DIRECTION(1U)
+
+#define S_MONITORED_INTERFACE 28
+#define V_MONITORED_INTERFACE(x) ((x) << S_MONITORED_INTERFACE)
+#define F_MONITORED_INTERFACE V_MONITORED_INTERFACE(1U)
+
+#define A_ESPI_DIP2_ERR_COUNT 0x8f4
+
+#define S_DIP2_ERR_CNT 0
+#define M_DIP2_ERR_CNT 0xf
+#define V_DIP2_ERR_CNT(x) ((x) << S_DIP2_ERR_CNT)
+#define G_DIP2_ERR_CNT(x) (((x) >> S_DIP2_ERR_CNT) & M_DIP2_ERR_CNT)
+
+#define A_ESPI_CMD_ADDR 0x8f8
+
+#define S_WRITE_DATA 0
+#define M_WRITE_DATA 0xff
+#define V_WRITE_DATA(x) ((x) << S_WRITE_DATA)
+#define G_WRITE_DATA(x) (((x) >> S_WRITE_DATA) & M_WRITE_DATA)
+
+#define S_REGISTER_OFFSET 8
+#define M_REGISTER_OFFSET 0xf
+#define V_REGISTER_OFFSET(x) ((x) << S_REGISTER_OFFSET)
+#define G_REGISTER_OFFSET(x) (((x) >> S_REGISTER_OFFSET) & M_REGISTER_OFFSET)
+
+#define S_CHANNEL_ADDR 12
+#define M_CHANNEL_ADDR 0xf
+#define V_CHANNEL_ADDR(x) ((x) << S_CHANNEL_ADDR)
+#define G_CHANNEL_ADDR(x) (((x) >> S_CHANNEL_ADDR) & M_CHANNEL_ADDR)
+
+#define S_MODULE_ADDR 16
+#define M_MODULE_ADDR 0x3
+#define V_MODULE_ADDR(x) ((x) << S_MODULE_ADDR)
+#define G_MODULE_ADDR(x) (((x) >> S_MODULE_ADDR) & M_MODULE_ADDR)
+
+#define S_BUNDLE_ADDR 20
+#define M_BUNDLE_ADDR 0x3
+#define V_BUNDLE_ADDR(x) ((x) << S_BUNDLE_ADDR)
+#define G_BUNDLE_ADDR(x) (((x) >> S_BUNDLE_ADDR) & M_BUNDLE_ADDR)
+
+#define S_SPI4_COMMAND 24
+#define M_SPI4_COMMAND 0xff
+#define V_SPI4_COMMAND(x) ((x) << S_SPI4_COMMAND)
+#define G_SPI4_COMMAND(x) (((x) >> S_SPI4_COMMAND) & M_SPI4_COMMAND)
+
+#define A_ESPI_GOSTAT 0x8fc
+
+#define S_READ_DATA 0
+#define M_READ_DATA 0xff
+#define V_READ_DATA(x) ((x) << S_READ_DATA)
+#define G_READ_DATA(x) (((x) >> S_READ_DATA) & M_READ_DATA)
+
+#define S_ESPI_CMD_BUSY 8
+#define V_ESPI_CMD_BUSY(x) ((x) << S_ESPI_CMD_BUSY)
+#define F_ESPI_CMD_BUSY V_ESPI_CMD_BUSY(1U)
+
+#define S_ERROR_ACK 9
+#define V_ERROR_ACK(x) ((x) << S_ERROR_ACK)
+#define F_ERROR_ACK V_ERROR_ACK(1U)
+
+#define S_UNMAPPED_ERR 10
+#define V_UNMAPPED_ERR(x) ((x) << S_UNMAPPED_ERR)
+#define F_UNMAPPED_ERR V_UNMAPPED_ERR(1U)
+
+#define S_TRANSACTION_TIMER 16
+#define M_TRANSACTION_TIMER 0xff
+#define V_TRANSACTION_TIMER(x) ((x) << S_TRANSACTION_TIMER)
+#define G_TRANSACTION_TIMER(x) (((x) >> S_TRANSACTION_TIMER) & M_TRANSACTION_TIMER)
+
+
+/* ULP registers */
+#define A_ULP_ULIMIT 0x980
+#define A_ULP_TAGMASK 0x984
+#define A_ULP_HREG_INDEX 0x988
+#define A_ULP_HREG_DATA 0x98c
+#define A_ULP_INT_ENABLE 0x990
+#define A_ULP_INT_CAUSE 0x994
+
+#define S_HREG_PAR_ERR 0
+#define V_HREG_PAR_ERR(x) ((x) << S_HREG_PAR_ERR)
+#define F_HREG_PAR_ERR V_HREG_PAR_ERR(1U)
+
+#define S_EGRS_DATA_PAR_ERR 1
+#define V_EGRS_DATA_PAR_ERR(x) ((x) << S_EGRS_DATA_PAR_ERR)
+#define F_EGRS_DATA_PAR_ERR V_EGRS_DATA_PAR_ERR(1U)
+
+#define S_INGRS_DATA_PAR_ERR 2
+#define V_INGRS_DATA_PAR_ERR(x) ((x) << S_INGRS_DATA_PAR_ERR)
+#define F_INGRS_DATA_PAR_ERR V_INGRS_DATA_PAR_ERR(1U)
+
+#define S_PM_INTR 3
+#define V_PM_INTR(x) ((x) << S_PM_INTR)
+#define F_PM_INTR V_PM_INTR(1U)
+
+#define S_PM_E2C_SYNC_ERR 4
+#define V_PM_E2C_SYNC_ERR(x) ((x) << S_PM_E2C_SYNC_ERR)
+#define F_PM_E2C_SYNC_ERR V_PM_E2C_SYNC_ERR(1U)
+
+#define S_PM_C2E_SYNC_ERR 5
+#define V_PM_C2E_SYNC_ERR(x) ((x) << S_PM_C2E_SYNC_ERR)
+#define F_PM_C2E_SYNC_ERR V_PM_C2E_SYNC_ERR(1U)
+
+#define S_PM_E2C_EMPTY_ERR 6
+#define V_PM_E2C_EMPTY_ERR(x) ((x) << S_PM_E2C_EMPTY_ERR)
+#define F_PM_E2C_EMPTY_ERR V_PM_E2C_EMPTY_ERR(1U)
+
+#define S_PM_C2E_EMPTY_ERR 7
+#define V_PM_C2E_EMPTY_ERR(x) ((x) << S_PM_C2E_EMPTY_ERR)
+#define F_PM_C2E_EMPTY_ERR V_PM_C2E_EMPTY_ERR(1U)
+
+#define S_PM_PAR_ERR 8
+#define M_PM_PAR_ERR 0xffff
+#define V_PM_PAR_ERR(x) ((x) << S_PM_PAR_ERR)
+#define G_PM_PAR_ERR(x) (((x) >> S_PM_PAR_ERR) & M_PM_PAR_ERR)
+
+#define S_PM_E2C_WRT_FULL 24
+#define V_PM_E2C_WRT_FULL(x) ((x) << S_PM_E2C_WRT_FULL)
+#define F_PM_E2C_WRT_FULL V_PM_E2C_WRT_FULL(1U)
+
+#define S_PM_C2E_WRT_FULL 25
+#define V_PM_C2E_WRT_FULL(x) ((x) << S_PM_C2E_WRT_FULL)
+#define F_PM_C2E_WRT_FULL V_PM_C2E_WRT_FULL(1U)
+
+#define A_ULP_PIO_CTRL 0x998
+
+/* PL registers */
+#define A_PL_ENABLE 0xa00
+
+#define S_PL_INTR_SGE_ERR 0
+#define V_PL_INTR_SGE_ERR(x) ((x) << S_PL_INTR_SGE_ERR)
+#define F_PL_INTR_SGE_ERR V_PL_INTR_SGE_ERR(1U)
+
+#define S_PL_INTR_SGE_DATA 1
+#define V_PL_INTR_SGE_DATA(x) ((x) << S_PL_INTR_SGE_DATA)
+#define F_PL_INTR_SGE_DATA V_PL_INTR_SGE_DATA(1U)
+
+#define S_PL_INTR_MC3 2
+#define V_PL_INTR_MC3(x) ((x) << S_PL_INTR_MC3)
+#define F_PL_INTR_MC3 V_PL_INTR_MC3(1U)
+
+#define S_PL_INTR_MC4 3
+#define V_PL_INTR_MC4(x) ((x) << S_PL_INTR_MC4)
+#define F_PL_INTR_MC4 V_PL_INTR_MC4(1U)
+
+#define S_PL_INTR_MC5 4
+#define V_PL_INTR_MC5(x) ((x) << S_PL_INTR_MC5)
+#define F_PL_INTR_MC5 V_PL_INTR_MC5(1U)
+
+#define S_PL_INTR_RAT 5
+#define V_PL_INTR_RAT(x) ((x) << S_PL_INTR_RAT)
+#define F_PL_INTR_RAT V_PL_INTR_RAT(1U)
+
+#define S_PL_INTR_TP 6
+#define V_PL_INTR_TP(x) ((x) << S_PL_INTR_TP)
+#define F_PL_INTR_TP V_PL_INTR_TP(1U)
+
+#define S_PL_INTR_ULP 7
+#define V_PL_INTR_ULP(x) ((x) << S_PL_INTR_ULP)
+#define F_PL_INTR_ULP V_PL_INTR_ULP(1U)
+
+#define S_PL_INTR_ESPI 8
+#define V_PL_INTR_ESPI(x) ((x) << S_PL_INTR_ESPI)
+#define F_PL_INTR_ESPI V_PL_INTR_ESPI(1U)
+
+#define S_PL_INTR_CSPI 9
+#define V_PL_INTR_CSPI(x) ((x) << S_PL_INTR_CSPI)
+#define F_PL_INTR_CSPI V_PL_INTR_CSPI(1U)
+
+#define S_PL_INTR_PCIX 10
+#define V_PL_INTR_PCIX(x) ((x) << S_PL_INTR_PCIX)
+#define F_PL_INTR_PCIX V_PL_INTR_PCIX(1U)
+
+#define S_PL_INTR_EXT 11
+#define V_PL_INTR_EXT(x) ((x) << S_PL_INTR_EXT)
+#define F_PL_INTR_EXT V_PL_INTR_EXT(1U)
+
+#define A_PL_CAUSE 0xa04
+
+/* MC5 registers */
+#define A_MC5_CONFIG 0xc04
+
+#define S_MODE 0
+#define V_MODE(x) ((x) << S_MODE)
+#define F_MODE V_MODE(1U)
+
+#define S_TCAM_RESET 1
+#define V_TCAM_RESET(x) ((x) << S_TCAM_RESET)
+#define F_TCAM_RESET V_TCAM_RESET(1U)
+
+#define S_TCAM_READY 2
+#define V_TCAM_READY(x) ((x) << S_TCAM_READY)
+#define F_TCAM_READY V_TCAM_READY(1U)
+
+#define S_DBGI_ENABLE 4
+#define V_DBGI_ENABLE(x) ((x) << S_DBGI_ENABLE)
+#define F_DBGI_ENABLE V_DBGI_ENABLE(1U)
+
+#define S_M_BUS_ENABLE 5
+#define V_M_BUS_ENABLE(x) ((x) << S_M_BUS_ENABLE)
+#define F_M_BUS_ENABLE V_M_BUS_ENABLE(1U)
+
+#define S_PARITY_ENABLE 6
+#define V_PARITY_ENABLE(x) ((x) << S_PARITY_ENABLE)
+#define F_PARITY_ENABLE V_PARITY_ENABLE(1U)
+
+#define S_SYN_ISSUE_MODE 7
+#define M_SYN_ISSUE_MODE 0x3
+#define V_SYN_ISSUE_MODE(x) ((x) << S_SYN_ISSUE_MODE)
+#define G_SYN_ISSUE_MODE(x) (((x) >> S_SYN_ISSUE_MODE) & M_SYN_ISSUE_MODE)
+
+#define S_BUILD 16
+#define V_BUILD(x) ((x) << S_BUILD)
+#define F_BUILD V_BUILD(1U)
+
+#define S_COMPRESSION_ENABLE 17
+#define V_COMPRESSION_ENABLE(x) ((x) << S_COMPRESSION_ENABLE)
+#define F_COMPRESSION_ENABLE V_COMPRESSION_ENABLE(1U)
+
+#define S_NUM_LIP 18
+#define M_NUM_LIP 0x3f
+#define V_NUM_LIP(x) ((x) << S_NUM_LIP)
+#define G_NUM_LIP(x) (((x) >> S_NUM_LIP) & M_NUM_LIP)
+
+#define S_TCAM_PART_CNT 24
+#define M_TCAM_PART_CNT 0x3
+#define V_TCAM_PART_CNT(x) ((x) << S_TCAM_PART_CNT)
+#define G_TCAM_PART_CNT(x) (((x) >> S_TCAM_PART_CNT) & M_TCAM_PART_CNT)
+
+#define S_TCAM_PART_TYPE 26
+#define M_TCAM_PART_TYPE 0x3
+#define V_TCAM_PART_TYPE(x) ((x) << S_TCAM_PART_TYPE)
+#define G_TCAM_PART_TYPE(x) (((x) >> S_TCAM_PART_TYPE) & M_TCAM_PART_TYPE)
+
+#define S_TCAM_PART_SIZE 28
+#define M_TCAM_PART_SIZE 0x3
+#define V_TCAM_PART_SIZE(x) ((x) << S_TCAM_PART_SIZE)
+#define G_TCAM_PART_SIZE(x) (((x) >> S_TCAM_PART_SIZE) & M_TCAM_PART_SIZE)
+
+#define S_TCAM_PART_TYPE_HI 30
+#define V_TCAM_PART_TYPE_HI(x) ((x) << S_TCAM_PART_TYPE_HI)
+#define F_TCAM_PART_TYPE_HI V_TCAM_PART_TYPE_HI(1U)
+
+#define A_MC5_SIZE 0xc08
+
+#define S_SIZE 0
+#define M_SIZE 0x3fffff
+#define V_SIZE(x) ((x) << S_SIZE)
+#define G_SIZE(x) (((x) >> S_SIZE) & M_SIZE)
+
+#define A_MC5_ROUTING_TABLE_INDEX 0xc0c
+
+#define S_START_OF_ROUTING_TABLE 0
+#define M_START_OF_ROUTING_TABLE 0x3fffff
+#define V_START_OF_ROUTING_TABLE(x) ((x) << S_START_OF_ROUTING_TABLE)
+#define G_START_OF_ROUTING_TABLE(x) (((x) >> S_START_OF_ROUTING_TABLE) & M_START_OF_ROUTING_TABLE)
+
+#define A_MC5_SERVER_INDEX 0xc14
+
+#define S_START_OF_SERVER_INDEX 0
+#define M_START_OF_SERVER_INDEX 0x3fffff
+#define V_START_OF_SERVER_INDEX(x) ((x) << S_START_OF_SERVER_INDEX)
+#define G_START_OF_SERVER_INDEX(x) (((x) >> S_START_OF_SERVER_INDEX) & M_START_OF_SERVER_INDEX)
+
+#define A_MC5_LIP_RAM_ADDR 0xc18
+
+#define S_LOCAL_IP_RAM_ADDR 0
+#define M_LOCAL_IP_RAM_ADDR 0x3f
+#define V_LOCAL_IP_RAM_ADDR(x) ((x) << S_LOCAL_IP_RAM_ADDR)
+#define G_LOCAL_IP_RAM_ADDR(x) (((x) >> S_LOCAL_IP_RAM_ADDR) & M_LOCAL_IP_RAM_ADDR)
+
+#define S_RAM_WRITE_ENABLE 8
+#define V_RAM_WRITE_ENABLE(x) ((x) << S_RAM_WRITE_ENABLE)
+#define F_RAM_WRITE_ENABLE V_RAM_WRITE_ENABLE(1U)
+
+#define A_MC5_LIP_RAM_DATA 0xc1c
+#define A_MC5_RSP_LATENCY 0xc20
+
+#define S_SEARCH_RESPONSE_LATENCY 0
+#define M_SEARCH_RESPONSE_LATENCY 0x1f
+#define V_SEARCH_RESPONSE_LATENCY(x) ((x) << S_SEARCH_RESPONSE_LATENCY)
+#define G_SEARCH_RESPONSE_LATENCY(x) (((x) >> S_SEARCH_RESPONSE_LATENCY) & M_SEARCH_RESPONSE_LATENCY)
+
+#define S_LEARN_RESPONSE_LATENCY 8
+#define M_LEARN_RESPONSE_LATENCY 0x1f
+#define V_LEARN_RESPONSE_LATENCY(x) ((x) << S_LEARN_RESPONSE_LATENCY)
+#define G_LEARN_RESPONSE_LATENCY(x) (((x) >> S_LEARN_RESPONSE_LATENCY) & M_LEARN_RESPONSE_LATENCY)
+
+#define A_MC5_PARITY_LATENCY 0xc24
+
+#define S_SRCHLAT 0
+#define M_SRCHLAT 0x1f
+#define V_SRCHLAT(x) ((x) << S_SRCHLAT)
+#define G_SRCHLAT(x) (((x) >> S_SRCHLAT) & M_SRCHLAT)
+
+#define S_PARLAT 8
+#define M_PARLAT 0x1f
+#define V_PARLAT(x) ((x) << S_PARLAT)
+#define G_PARLAT(x) (((x) >> S_PARLAT) & M_PARLAT)
+
+#define A_MC5_WR_LRN_VERIFY 0xc28
+
+#define S_POVEREN 0
+#define V_POVEREN(x) ((x) << S_POVEREN)
+#define F_POVEREN V_POVEREN(1U)
+
+#define S_LRNVEREN 1
+#define V_LRNVEREN(x) ((x) << S_LRNVEREN)
+#define F_LRNVEREN V_LRNVEREN(1U)
+
+#define S_VWVEREN 2
+#define V_VWVEREN(x) ((x) << S_VWVEREN)
+#define F_VWVEREN V_VWVEREN(1U)
+
+#define A_MC5_PART_ID_INDEX 0xc2c
+
+#define S_IDINDEX 0
+#define M_IDINDEX 0xf
+#define V_IDINDEX(x) ((x) << S_IDINDEX)
+#define G_IDINDEX(x) (((x) >> S_IDINDEX) & M_IDINDEX)
+
+#define A_MC5_RESET_MAX 0xc30
+
+#define S_RSTMAX 0
+#define M_RSTMAX 0x1ff
+#define V_RSTMAX(x) ((x) << S_RSTMAX)
+#define G_RSTMAX(x) (((x) >> S_RSTMAX) & M_RSTMAX)
+
+#define A_MC5_INT_ENABLE 0xc40
+
+#define S_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR 0
+#define V_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR(x) ((x) << S_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR)
+#define F_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR V_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR(1U)
+
+#define S_MC5_INT_HIT_IN_ACTIVE_REGION_ERR 1
+#define V_MC5_INT_HIT_IN_ACTIVE_REGION_ERR(x) ((x) << S_MC5_INT_HIT_IN_ACTIVE_REGION_ERR)
+#define F_MC5_INT_HIT_IN_ACTIVE_REGION_ERR V_MC5_INT_HIT_IN_ACTIVE_REGION_ERR(1U)
+
+#define S_MC5_INT_HIT_IN_RT_REGION_ERR 2
+#define V_MC5_INT_HIT_IN_RT_REGION_ERR(x) ((x) << S_MC5_INT_HIT_IN_RT_REGION_ERR)
+#define F_MC5_INT_HIT_IN_RT_REGION_ERR V_MC5_INT_HIT_IN_RT_REGION_ERR(1U)
+
+#define S_MC5_INT_MISS_ERR 3
+#define V_MC5_INT_MISS_ERR(x) ((x) << S_MC5_INT_MISS_ERR)
+#define F_MC5_INT_MISS_ERR V_MC5_INT_MISS_ERR(1U)
+
+#define S_MC5_INT_LIP0_ERR 4
+#define V_MC5_INT_LIP0_ERR(x) ((x) << S_MC5_INT_LIP0_ERR)
+#define F_MC5_INT_LIP0_ERR V_MC5_INT_LIP0_ERR(1U)
+
+#define S_MC5_INT_LIP_MISS_ERR 5
+#define V_MC5_INT_LIP_MISS_ERR(x) ((x) << S_MC5_INT_LIP_MISS_ERR)
+#define F_MC5_INT_LIP_MISS_ERR V_MC5_INT_LIP_MISS_ERR(1U)
+
+#define S_MC5_INT_PARITY_ERR 6
+#define V_MC5_INT_PARITY_ERR(x) ((x) << S_MC5_INT_PARITY_ERR)
+#define F_MC5_INT_PARITY_ERR V_MC5_INT_PARITY_ERR(1U)
+
+#define S_MC5_INT_ACTIVE_REGION_FULL 7
+#define V_MC5_INT_ACTIVE_REGION_FULL(x) ((x) << S_MC5_INT_ACTIVE_REGION_FULL)
+#define F_MC5_INT_ACTIVE_REGION_FULL V_MC5_INT_ACTIVE_REGION_FULL(1U)
+
+#define S_MC5_INT_NFA_SRCH_ERR 8
+#define V_MC5_INT_NFA_SRCH_ERR(x) ((x) << S_MC5_INT_NFA_SRCH_ERR)
+#define F_MC5_INT_NFA_SRCH_ERR V_MC5_INT_NFA_SRCH_ERR(1U)
+
+#define S_MC5_INT_SYN_COOKIE 9
+#define V_MC5_INT_SYN_COOKIE(x) ((x) << S_MC5_INT_SYN_COOKIE)
+#define F_MC5_INT_SYN_COOKIE V_MC5_INT_SYN_COOKIE(1U)
+
+#define S_MC5_INT_SYN_COOKIE_BAD 10
+#define V_MC5_INT_SYN_COOKIE_BAD(x) ((x) << S_MC5_INT_SYN_COOKIE_BAD)
+#define F_MC5_INT_SYN_COOKIE_BAD V_MC5_INT_SYN_COOKIE_BAD(1U)
+
+#define S_MC5_INT_SYN_COOKIE_OFF 11
+#define V_MC5_INT_SYN_COOKIE_OFF(x) ((x) << S_MC5_INT_SYN_COOKIE_OFF)
+#define F_MC5_INT_SYN_COOKIE_OFF V_MC5_INT_SYN_COOKIE_OFF(1U)
+
+#define S_MC5_INT_UNKNOWN_CMD 15
+#define V_MC5_INT_UNKNOWN_CMD(x) ((x) << S_MC5_INT_UNKNOWN_CMD)
+#define F_MC5_INT_UNKNOWN_CMD V_MC5_INT_UNKNOWN_CMD(1U)
+
+#define S_MC5_INT_REQUESTQ_PARITY_ERR 16
+#define V_MC5_INT_REQUESTQ_PARITY_ERR(x) ((x) << S_MC5_INT_REQUESTQ_PARITY_ERR)
+#define F_MC5_INT_REQUESTQ_PARITY_ERR V_MC5_INT_REQUESTQ_PARITY_ERR(1U)
+
+#define S_MC5_INT_DISPATCHQ_PARITY_ERR 17
+#define V_MC5_INT_DISPATCHQ_PARITY_ERR(x) ((x) << S_MC5_INT_DISPATCHQ_PARITY_ERR)
+#define F_MC5_INT_DISPATCHQ_PARITY_ERR V_MC5_INT_DISPATCHQ_PARITY_ERR(1U)
+
+#define S_MC5_INT_DEL_ACT_EMPTY 18
+#define V_MC5_INT_DEL_ACT_EMPTY(x) ((x) << S_MC5_INT_DEL_ACT_EMPTY)
+#define F_MC5_INT_DEL_ACT_EMPTY V_MC5_INT_DEL_ACT_EMPTY(1U)
+
+#define A_MC5_INT_CAUSE 0xc44
+#define A_MC5_INT_TID 0xc48
+#define A_MC5_INT_PTID 0xc4c
+#define A_MC5_DBGI_CONFIG 0xc74
+#define A_MC5_DBGI_REQ_CMD 0xc78
+
+#define S_CMDMODE 0
+#define M_CMDMODE 0x7
+#define V_CMDMODE(x) ((x) << S_CMDMODE)
+#define G_CMDMODE(x) (((x) >> S_CMDMODE) & M_CMDMODE)
+
+#define S_SADRSEL 4
+#define V_SADRSEL(x) ((x) << S_SADRSEL)
+#define F_SADRSEL V_SADRSEL(1U)
+
+#define S_WRITE_BURST_SIZE 22
+#define M_WRITE_BURST_SIZE 0x3ff
+#define V_WRITE_BURST_SIZE(x) ((x) << S_WRITE_BURST_SIZE)
+#define G_WRITE_BURST_SIZE(x) (((x) >> S_WRITE_BURST_SIZE) & M_WRITE_BURST_SIZE)
+
+#define A_MC5_DBGI_REQ_ADDR0 0xc7c
+#define A_MC5_DBGI_REQ_ADDR1 0xc80
+#define A_MC5_DBGI_REQ_ADDR2 0xc84
+#define A_MC5_DBGI_REQ_DATA0 0xc88
+#define A_MC5_DBGI_REQ_DATA1 0xc8c
+#define A_MC5_DBGI_REQ_DATA2 0xc90
+#define A_MC5_DBGI_REQ_DATA3 0xc94
+#define A_MC5_DBGI_REQ_DATA4 0xc98
+#define A_MC5_DBGI_REQ_MASK0 0xc9c
+#define A_MC5_DBGI_REQ_MASK1 0xca0
+#define A_MC5_DBGI_REQ_MASK2 0xca4
+#define A_MC5_DBGI_REQ_MASK3 0xca8
+#define A_MC5_DBGI_REQ_MASK4 0xcac
+#define A_MC5_DBGI_RSP_STATUS 0xcb0
+
+#define S_DBGI_RSP_VALID 0
+#define V_DBGI_RSP_VALID(x) ((x) << S_DBGI_RSP_VALID)
+#define F_DBGI_RSP_VALID V_DBGI_RSP_VALID(1U)
+
+#define S_DBGI_RSP_HIT 1
+#define V_DBGI_RSP_HIT(x) ((x) << S_DBGI_RSP_HIT)
+#define F_DBGI_RSP_HIT V_DBGI_RSP_HIT(1U)
+
+#define S_DBGI_RSP_ERR 2
+#define V_DBGI_RSP_ERR(x) ((x) << S_DBGI_RSP_ERR)
+#define F_DBGI_RSP_ERR V_DBGI_RSP_ERR(1U)
+
+#define S_DBGI_RSP_ERR_REASON 8
+#define M_DBGI_RSP_ERR_REASON 0x7
+#define V_DBGI_RSP_ERR_REASON(x) ((x) << S_DBGI_RSP_ERR_REASON)
+#define G_DBGI_RSP_ERR_REASON(x) (((x) >> S_DBGI_RSP_ERR_REASON) & M_DBGI_RSP_ERR_REASON)
+
+#define A_MC5_DBGI_RSP_DATA0 0xcb4
+#define A_MC5_DBGI_RSP_DATA1 0xcb8
+#define A_MC5_DBGI_RSP_DATA2 0xcbc
+#define A_MC5_DBGI_RSP_DATA3 0xcc0
+#define A_MC5_DBGI_RSP_DATA4 0xcc4
+#define A_MC5_DBGI_RSP_LAST_CMD 0xcc8
+#define A_MC5_POPEN_DATA_WR_CMD 0xccc
+#define A_MC5_POPEN_MASK_WR_CMD 0xcd0
+#define A_MC5_AOPEN_SRCH_CMD 0xcd4
+#define A_MC5_AOPEN_LRN_CMD 0xcd8
+#define A_MC5_SYN_SRCH_CMD 0xcdc
+#define A_MC5_SYN_LRN_CMD 0xce0
+#define A_MC5_ACK_SRCH_CMD 0xce4
+#define A_MC5_ACK_LRN_CMD 0xce8
+#define A_MC5_ILOOKUP_CMD 0xcec
+#define A_MC5_ELOOKUP_CMD 0xcf0
+#define A_MC5_DATA_WRITE_CMD 0xcf4
+#define A_MC5_DATA_READ_CMD 0xcf8
+#define A_MC5_MASK_WRITE_CMD 0xcfc
+
+/* PCICFG registers */
+#define A_PCICFG_PM_CSR 0x44
+#define A_PCICFG_VPD_ADDR 0x4a
+
+#define S_VPD_ADDR 0
+#define M_VPD_ADDR 0x7fff
+#define V_VPD_ADDR(x) ((x) << S_VPD_ADDR)
+#define G_VPD_ADDR(x) (((x) >> S_VPD_ADDR) & M_VPD_ADDR)
+
+#define S_VPD_OP_FLAG 15
+#define V_VPD_OP_FLAG(x) ((x) << S_VPD_OP_FLAG)
+#define F_VPD_OP_FLAG V_VPD_OP_FLAG(1U)
+
+#define A_PCICFG_VPD_DATA 0x4c
+#define A_PCICFG_PCIX_CMD 0x60
+#define A_PCICFG_INTR_ENABLE 0xf4
+
+#define S_MASTER_PARITY_ERR 0
+#define V_MASTER_PARITY_ERR(x) ((x) << S_MASTER_PARITY_ERR)
+#define F_MASTER_PARITY_ERR V_MASTER_PARITY_ERR(1U)
+
+#define S_SIG_TARGET_ABORT 1
+#define V_SIG_TARGET_ABORT(x) ((x) << S_SIG_TARGET_ABORT)
+#define F_SIG_TARGET_ABORT V_SIG_TARGET_ABORT(1U)
+
+#define S_RCV_TARGET_ABORT 2
+#define V_RCV_TARGET_ABORT(x) ((x) << S_RCV_TARGET_ABORT)
+#define F_RCV_TARGET_ABORT V_RCV_TARGET_ABORT(1U)
+
+#define S_RCV_MASTER_ABORT 3
+#define V_RCV_MASTER_ABORT(x) ((x) << S_RCV_MASTER_ABORT)
+#define F_RCV_MASTER_ABORT V_RCV_MASTER_ABORT(1U)
+
+#define S_SIG_SYS_ERR 4
+#define V_SIG_SYS_ERR(x) ((x) << S_SIG_SYS_ERR)
+#define F_SIG_SYS_ERR V_SIG_SYS_ERR(1U)
+
+#define S_DET_PARITY_ERR 5
+#define V_DET_PARITY_ERR(x) ((x) << S_DET_PARITY_ERR)
+#define F_DET_PARITY_ERR V_DET_PARITY_ERR(1U)
+
+#define S_PIO_PARITY_ERR 6
+#define V_PIO_PARITY_ERR(x) ((x) << S_PIO_PARITY_ERR)
+#define F_PIO_PARITY_ERR V_PIO_PARITY_ERR(1U)
+
+#define S_WF_PARITY_ERR 7
+#define V_WF_PARITY_ERR(x) ((x) << S_WF_PARITY_ERR)
+#define F_WF_PARITY_ERR V_WF_PARITY_ERR(1U)
+
+#define S_RF_PARITY_ERR 8
+#define M_RF_PARITY_ERR 0x3
+#define V_RF_PARITY_ERR(x) ((x) << S_RF_PARITY_ERR)
+#define G_RF_PARITY_ERR(x) (((x) >> S_RF_PARITY_ERR) & M_RF_PARITY_ERR)
+
+#define S_CF_PARITY_ERR 10
+#define M_CF_PARITY_ERR 0x3
+#define V_CF_PARITY_ERR(x) ((x) << S_CF_PARITY_ERR)
+#define G_CF_PARITY_ERR(x) (((x) >> S_CF_PARITY_ERR) & M_CF_PARITY_ERR)
+
+#define A_PCICFG_INTR_CAUSE 0xf8
+#define A_PCICFG_MODE 0xfc
+
+#define S_PCI_MODE_64BIT 0
+#define V_PCI_MODE_64BIT(x) ((x) << S_PCI_MODE_64BIT)
+#define F_PCI_MODE_64BIT V_PCI_MODE_64BIT(1U)
+
+#define S_PCI_MODE_66MHZ 1
+#define V_PCI_MODE_66MHZ(x) ((x) << S_PCI_MODE_66MHZ)
+#define F_PCI_MODE_66MHZ V_PCI_MODE_66MHZ(1U)
+
+#define S_PCI_MODE_PCIX_INITPAT 2
+#define M_PCI_MODE_PCIX_INITPAT 0x7
+#define V_PCI_MODE_PCIX_INITPAT(x) ((x) << S_PCI_MODE_PCIX_INITPAT)
+#define G_PCI_MODE_PCIX_INITPAT(x) (((x) >> S_PCI_MODE_PCIX_INITPAT) & M_PCI_MODE_PCIX_INITPAT)
+
+#define S_PCI_MODE_PCIX 5
+#define V_PCI_MODE_PCIX(x) ((x) << S_PCI_MODE_PCIX)
+#define F_PCI_MODE_PCIX V_PCI_MODE_PCIX(1U)
+
+#define S_PCI_MODE_CLK 6
+#define M_PCI_MODE_CLK 0x3
+#define V_PCI_MODE_CLK(x) ((x) << S_PCI_MODE_CLK)
+#define G_PCI_MODE_CLK(x) (((x) >> S_PCI_MODE_CLK) & M_PCI_MODE_CLK)
+
+#endif /* _CXGB_REGS_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
new file mode 100644
index 000000000000..0a511c4a0472
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -0,0 +1,2139 @@
+/*****************************************************************************
+ * *
+ * File: sge.c *
+ * $Revision: 1.26 $ *
+ * $Date: 2005/06/21 18:29:48 $ *
+ * Description: *
+ * DMA engine. *
+ * part of the Chelsio 10Gb Ethernet Driver. *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License, version 2, as *
+ * published by the Free Software Foundation. *
+ * *
+ * You should have received a copy of the GNU General Public License along *
+ * with this program; if not, write to the Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
+ * *
+ * http://www.chelsio.com *
+ * *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
+ * All rights reserved. *
+ * *
+ * Maintainers: maintainers@chelsio.com *
+ * *
+ * Authors: Dimitrios Michailidis <dm@chelsio.com> *
+ * Tina Yang <tainay@chelsio.com> *
+ * Felix Marti <felix@chelsio.com> *
+ * Scott Bardone <sbardone@chelsio.com> *
+ * Kurt Ottaway <kottaway@chelsio.com> *
+ * Frank DiMambro <frank@chelsio.com> *
+ * *
+ * History: *
+ * *
+ ****************************************************************************/
+
+#include "common.h"
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/ktime.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/tcp.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+#include <linux/slab.h>
+#include <linux/prefetch.h>
+
+#include "cpl5_cmd.h"
+#include "sge.h"
+#include "regs.h"
+#include "espi.h"
+
+/* This belongs in if_ether.h */
+#define ETH_P_CPL5 0xf
+
+#define SGE_CMDQ_N 2
+#define SGE_FREELQ_N 2
+#define SGE_CMDQ0_E_N 1024
+#define SGE_CMDQ1_E_N 128
+#define SGE_FREEL_SIZE 4096
+#define SGE_JUMBO_FREEL_SIZE 512
+#define SGE_FREEL_REFILL_THRESH 16
+#define SGE_RESPQ_E_N 1024
+#define SGE_INTRTIMER_NRES 1000
+#define SGE_RX_SM_BUF_SIZE 1536
+#define SGE_TX_DESC_MAX_PLEN 16384
+
+#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
+
+/*
+ * Period of the TX buffer reclaim timer. This timer does not need to run
+ * frequently as TX buffers are usually reclaimed by new TX packets.
+ */
+#define TX_RECLAIM_PERIOD (HZ / 4)
+
+#define M_CMD_LEN 0x7fffffff
+#define V_CMD_LEN(v) (v)
+#define G_CMD_LEN(v) ((v) & M_CMD_LEN)
+#define V_CMD_GEN1(v) ((v) << 31)
+#define V_CMD_GEN2(v) (v)
+#define F_CMD_DATAVALID (1 << 1)
+#define F_CMD_SOP (1 << 2)
+#define V_CMD_EOP(v) ((v) << 3)
+
+/*
+ * Command queue, receive buffer list, and response queue descriptors.
+ */
+#if defined(__BIG_ENDIAN_BITFIELD)
+struct cmdQ_e {
+ u32 addr_lo;
+ u32 len_gen;
+ u32 flags;
+ u32 addr_hi;
+};
+
+struct freelQ_e {
+ u32 addr_lo;
+ u32 len_gen;
+ u32 gen2;
+ u32 addr_hi;
+};
+
+struct respQ_e {
+ u32 Qsleeping : 4;
+ u32 Cmdq1CreditReturn : 5;
+ u32 Cmdq1DmaComplete : 5;
+ u32 Cmdq0CreditReturn : 5;
+ u32 Cmdq0DmaComplete : 5;
+ u32 FreelistQid : 2;
+ u32 CreditValid : 1;
+ u32 DataValid : 1;
+ u32 Offload : 1;
+ u32 Eop : 1;
+ u32 Sop : 1;
+ u32 GenerationBit : 1;
+ u32 BufferLength;
+};
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+struct cmdQ_e {
+ u32 len_gen;
+ u32 addr_lo;
+ u32 addr_hi;
+ u32 flags;
+};
+
+struct freelQ_e {
+ u32 len_gen;
+ u32 addr_lo;
+ u32 addr_hi;
+ u32 gen2;
+};
+
+struct respQ_e {
+ u32 BufferLength;
+ u32 GenerationBit : 1;
+ u32 Sop : 1;
+ u32 Eop : 1;
+ u32 Offload : 1;
+ u32 DataValid : 1;
+ u32 CreditValid : 1;
+ u32 FreelistQid : 2;
+ u32 Cmdq0DmaComplete : 5;
+ u32 Cmdq0CreditReturn : 5;
+ u32 Cmdq1DmaComplete : 5;
+ u32 Cmdq1CreditReturn : 5;
+ u32 Qsleeping : 4;
+} ;
+#endif
+
+/*
+ * SW Context Command and Freelist Queue Descriptors
+ */
+struct cmdQ_ce {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+struct freelQ_ce {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+/*
+ * SW command, freelist and response rings
+ */
+struct cmdQ {
+ unsigned long status; /* HW DMA fetch status */
+ unsigned int in_use; /* # of in-use command descriptors */
+ unsigned int size; /* # of descriptors */
+ unsigned int processed; /* total # of descs HW has processed */
+ unsigned int cleaned; /* total # of descs SW has reclaimed */
+ unsigned int stop_thres; /* SW TX queue suspend threshold */
+ u16 pidx; /* producer index (SW) */
+ u16 cidx; /* consumer index (HW) */
+ u8 genbit; /* current generation (=valid) bit */
+ u8 sop; /* is next entry start of packet? */
+ struct cmdQ_e *entries; /* HW command descriptor Q */
+ struct cmdQ_ce *centries; /* SW command context descriptor Q */
+ dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
+ spinlock_t lock; /* Lock to protect cmdQ enqueuing */
+};
+
+struct freelQ {
+ unsigned int credits; /* # of available RX buffers */
+ unsigned int size; /* free list capacity */
+ u16 pidx; /* producer index (SW) */
+ u16 cidx; /* consumer index (HW) */
+ u16 rx_buffer_size; /* Buffer size on this free list */
+ u16 dma_offset; /* DMA offset to align IP headers */
+ u16 recycleq_idx; /* skb recycle q to use */
+ u8 genbit; /* current generation (=valid) bit */
+ struct freelQ_e *entries; /* HW freelist descriptor Q */
+ struct freelQ_ce *centries; /* SW freelist context descriptor Q */
+ dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */
+};
+
+struct respQ {
+ unsigned int credits; /* credits to be returned to SGE */
+ unsigned int size; /* # of response Q descriptors */
+ u16 cidx; /* consumer index (SW) */
+ u8 genbit; /* current generation(=valid) bit */
+ struct respQ_e *entries; /* HW response descriptor Q */
+ dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */
+};
+
+/* Bit flags for cmdQ.status */
+enum {
+ CMDQ_STAT_RUNNING = 1, /* fetch engine is running */
+ CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
+};
+
+/* T204 TX SW scheduler */
+
+/* Per T204 TX port */
+struct sched_port {
+ unsigned int avail; /* available bits - quota */
+ unsigned int drain_bits_per_1024ns; /* drain rate */
+ unsigned int speed; /* drain rate, mbps */
+ unsigned int mtu; /* mtu size */
+ struct sk_buff_head skbq; /* pending skbs */
+};
+
+/* Per T204 device */
+struct sched {
+ ktime_t last_updated; /* last time quotas were computed */
+ unsigned int max_avail; /* max bits to be sent to any port */
+ unsigned int port; /* port index (round robin ports) */
+ unsigned int num; /* num skbs in per port queues */
+ struct sched_port p[MAX_NPORTS];
+ struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
+};
+static void restart_sched(unsigned long);
+
+
+/*
+ * Main SGE data structure
+ *
+ * Interrupts are handled by a single CPU and it is likely that on a MP system
+ * the application is migrated to another CPU. In that scenario, we try to
+ * separate the RX(in irq context) and TX state in order to decrease memory
+ * contention.
+ */
+struct sge {
+ struct adapter *adapter; /* adapter backpointer */
+ struct net_device *netdev; /* netdevice backpointer */
+ struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
+ struct respQ respQ; /* response Q */
+ unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
+ unsigned int rx_pkt_pad; /* RX padding for L2 packets */
+ unsigned int jumbo_fl; /* jumbo freelist Q index */
+ unsigned int intrtimer_nres; /* no-resource interrupt timer */
+ unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
+ struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
+ struct timer_list espibug_timer;
+ unsigned long espibug_timeout;
+ struct sk_buff *espibug_skb[MAX_NPORTS];
+ u32 sge_control; /* shadow value of sge control reg */
+ struct sge_intr_counts stats;
+ struct sge_port_stats __percpu *port_stats[MAX_NPORTS];
+ struct sched *tx_sched;
+ struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
+};
+
+static const u8 ch_mac_addr[ETH_ALEN] = {
+ 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
+};
+
+/*
+ * stop tasklet and free all pending skb's
+ */
+static void tx_sched_stop(struct sge *sge)
+{
+ struct sched *s = sge->tx_sched;
+ int i;
+
+ tasklet_kill(&s->sched_tsk);
+
+ for (i = 0; i < MAX_NPORTS; i++)
+ __skb_queue_purge(&s->p[s->port].skbq);
+}
+
+/*
+ * t1_sched_update_parms() is called when the MTU or link speed changes. It
+ * re-computes scheduler parameters to scope with the change.
+ */
+unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
+ unsigned int mtu, unsigned int speed)
+{
+ struct sched *s = sge->tx_sched;
+ struct sched_port *p = &s->p[port];
+ unsigned int max_avail_segs;
+
+ pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed);
+ if (speed)
+ p->speed = speed;
+ if (mtu)
+ p->mtu = mtu;
+
+ if (speed || mtu) {
+ unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
+ do_div(drain, (p->mtu + 50) * 1000);
+ p->drain_bits_per_1024ns = (unsigned int) drain;
+
+ if (p->speed < 1000)
+ p->drain_bits_per_1024ns =
+ 90 * p->drain_bits_per_1024ns / 100;
+ }
+
+ if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
+ p->drain_bits_per_1024ns -= 16;
+ s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
+ max_avail_segs = max(1U, 4096 / (p->mtu - 40));
+ } else {
+ s->max_avail = 16384;
+ max_avail_segs = max(1U, 9000 / (p->mtu - 40));
+ }
+
+ pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
+ "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
+ p->speed, s->max_avail, max_avail_segs,
+ p->drain_bits_per_1024ns);
+
+ return max_avail_segs * (p->mtu - 40);
+}
+
+#if 0
+
+/*
+ * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
+ * data that can be pushed per port.
+ */
+void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
+{
+ struct sched *s = sge->tx_sched;
+ unsigned int i;
+
+ s->max_avail = val;
+ for (i = 0; i < MAX_NPORTS; i++)
+ t1_sched_update_parms(sge, i, 0, 0);
+}
+
+/*
+ * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
+ * is draining.
+ */
+void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
+ unsigned int val)
+{
+ struct sched *s = sge->tx_sched;
+ struct sched_port *p = &s->p[port];
+ p->drain_bits_per_1024ns = val * 1024 / 1000;
+ t1_sched_update_parms(sge, port, 0, 0);
+}
+
+#endif /* 0 */
+
+
+/*
+ * get_clock() implements a ns clock (see ktime_get)
+ */
+static inline ktime_t get_clock(void)
+{
+ struct timespec ts;
+
+ ktime_get_ts(&ts);
+ return timespec_to_ktime(ts);
+}
+
+/*
+ * tx_sched_init() allocates resources and does basic initialization.
+ */
+static int tx_sched_init(struct sge *sge)
+{
+ struct sched *s;
+ int i;
+
+ s = kzalloc(sizeof (struct sched), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ pr_debug("tx_sched_init\n");
+ tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
+ sge->tx_sched = s;
+
+ for (i = 0; i < MAX_NPORTS; i++) {
+ skb_queue_head_init(&s->p[i].skbq);
+ t1_sched_update_parms(sge, i, 1500, 1000);
+ }
+
+ return 0;
+}
+
+/*
+ * sched_update_avail() computes the delta since the last time it was called
+ * and updates the per port quota (number of bits that can be sent to the any
+ * port).
+ */
+static inline int sched_update_avail(struct sge *sge)
+{
+ struct sched *s = sge->tx_sched;
+ ktime_t now = get_clock();
+ unsigned int i;
+ long long delta_time_ns;
+
+ delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
+
+ pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
+ if (delta_time_ns < 15000)
+ return 0;
+
+ for (i = 0; i < MAX_NPORTS; i++) {
+ struct sched_port *p = &s->p[i];
+ unsigned int delta_avail;
+
+ delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
+ p->avail = min(p->avail + delta_avail, s->max_avail);
+ }
+
+ s->last_updated = now;
+
+ return 1;
+}
+
+/*
+ * sched_skb() is called from two different places. In the tx path, any
+ * packet generating load on an output port will call sched_skb()
+ * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
+ * context (skb == NULL).
+ * The scheduler only returns a skb (which will then be sent) if the
+ * length of the skb is <= the current quota of the output port.
+ */
+static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
+ unsigned int credits)
+{
+ struct sched *s = sge->tx_sched;
+ struct sk_buff_head *skbq;
+ unsigned int i, len, update = 1;
+
+ pr_debug("sched_skb %p\n", skb);
+ if (!skb) {
+ if (!s->num)
+ return NULL;
+ } else {
+ skbq = &s->p[skb->dev->if_port].skbq;
+ __skb_queue_tail(skbq, skb);
+ s->num++;
+ skb = NULL;
+ }
+
+ if (credits < MAX_SKB_FRAGS + 1)
+ goto out;
+
+again:
+ for (i = 0; i < MAX_NPORTS; i++) {
+ s->port = (s->port + 1) & (MAX_NPORTS - 1);
+ skbq = &s->p[s->port].skbq;
+
+ skb = skb_peek(skbq);
+
+ if (!skb)
+ continue;
+
+ len = skb->len;
+ if (len <= s->p[s->port].avail) {
+ s->p[s->port].avail -= len;
+ s->num--;
+ __skb_unlink(skb, skbq);
+ goto out;
+ }
+ skb = NULL;
+ }
+
+ if (update-- && sched_update_avail(sge))
+ goto again;
+
+out:
+ /* If there are more pending skbs, we use the hardware to schedule us
+ * again.
+ */
+ if (s->num && !skb) {
+ struct cmdQ *q = &sge->cmdQ[0];
+ clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+ if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
+ set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+ writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
+ }
+ }
+ pr_debug("sched_skb ret %p\n", skb);
+
+ return skb;
+}
+
+/*
+ * PIO to indicate that memory mapped Q contains valid descriptor(s).
+ */
+static inline void doorbell_pio(struct adapter *adapter, u32 val)
+{
+ wmb();
+ writel(val, adapter->regs + A_SG_DOORBELL);
+}
+
+/*
+ * Frees all RX buffers on the freelist Q. The caller must make sure that
+ * the SGE is turned off before calling this function.
+ */
+static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
+{
+ unsigned int cidx = q->cidx;
+
+ while (q->credits--) {
+ struct freelQ_ce *ce = &q->centries[cidx];
+
+ pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(ce->skb);
+ ce->skb = NULL;
+ if (++cidx == q->size)
+ cidx = 0;
+ }
+}
+
+/*
+ * Free RX free list and response queue resources.
+ */
+static void free_rx_resources(struct sge *sge)
+{
+ struct pci_dev *pdev = sge->adapter->pdev;
+ unsigned int size, i;
+
+ if (sge->respQ.entries) {
+ size = sizeof(struct respQ_e) * sge->respQ.size;
+ pci_free_consistent(pdev, size, sge->respQ.entries,
+ sge->respQ.dma_addr);
+ }
+
+ for (i = 0; i < SGE_FREELQ_N; i++) {
+ struct freelQ *q = &sge->freelQ[i];
+
+ if (q->centries) {
+ free_freelQ_buffers(pdev, q);
+ kfree(q->centries);
+ }
+ if (q->entries) {
+ size = sizeof(struct freelQ_e) * q->size;
+ pci_free_consistent(pdev, size, q->entries,
+ q->dma_addr);
+ }
+ }
+}
+
+/*
+ * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
+ * response queue.
+ */
+static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
+{
+ struct pci_dev *pdev = sge->adapter->pdev;
+ unsigned int size, i;
+
+ for (i = 0; i < SGE_FREELQ_N; i++) {
+ struct freelQ *q = &sge->freelQ[i];
+
+ q->genbit = 1;
+ q->size = p->freelQ_size[i];
+ q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
+ size = sizeof(struct freelQ_e) * q->size;
+ q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
+ if (!q->entries)
+ goto err_no_mem;
+
+ size = sizeof(struct freelQ_ce) * q->size;
+ q->centries = kzalloc(size, GFP_KERNEL);
+ if (!q->centries)
+ goto err_no_mem;
+ }
+
+ /*
+ * Calculate the buffer sizes for the two free lists. FL0 accommodates
+ * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
+ * including all the sk_buff overhead.
+ *
+ * Note: For T2 FL0 and FL1 are reversed.
+ */
+ sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
+ sizeof(struct cpl_rx_data) +
+ sge->freelQ[!sge->jumbo_fl].dma_offset;
+
+ size = (16 * 1024) -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
+
+ /*
+ * Setup which skb recycle Q should be used when recycling buffers from
+ * each free list.
+ */
+ sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
+ sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
+
+ sge->respQ.genbit = 1;
+ sge->respQ.size = SGE_RESPQ_E_N;
+ sge->respQ.credits = 0;
+ size = sizeof(struct respQ_e) * sge->respQ.size;
+ sge->respQ.entries =
+ pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
+ if (!sge->respQ.entries)
+ goto err_no_mem;
+ return 0;
+
+err_no_mem:
+ free_rx_resources(sge);
+ return -ENOMEM;
+}
+
+/*
+ * Reclaims n TX descriptors and frees the buffers associated with them.
+ */
+static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
+{
+ struct cmdQ_ce *ce;
+ struct pci_dev *pdev = sge->adapter->pdev;
+ unsigned int cidx = q->cidx;
+
+ q->in_use -= n;
+ ce = &q->centries[cidx];
+ while (n--) {
+ if (likely(dma_unmap_len(ce, dma_len))) {
+ pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
+ PCI_DMA_TODEVICE);
+ if (q->sop)
+ q->sop = 0;
+ }
+ if (ce->skb) {
+ dev_kfree_skb_any(ce->skb);
+ q->sop = 1;
+ }
+ ce++;
+ if (++cidx == q->size) {
+ cidx = 0;
+ ce = q->centries;
+ }
+ }
+ q->cidx = cidx;
+}
+
+/*
+ * Free TX resources.
+ *
+ * Assumes that SGE is stopped and all interrupts are disabled.
+ */
+static void free_tx_resources(struct sge *sge)
+{
+ struct pci_dev *pdev = sge->adapter->pdev;
+ unsigned int size, i;
+
+ for (i = 0; i < SGE_CMDQ_N; i++) {
+ struct cmdQ *q = &sge->cmdQ[i];
+
+ if (q->centries) {
+ if (q->in_use)
+ free_cmdQ_buffers(sge, q, q->in_use);
+ kfree(q->centries);
+ }
+ if (q->entries) {
+ size = sizeof(struct cmdQ_e) * q->size;
+ pci_free_consistent(pdev, size, q->entries,
+ q->dma_addr);
+ }
+ }
+}
+
+/*
+ * Allocates basic TX resources, consisting of memory mapped command Qs.
+ */
+static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
+{
+ struct pci_dev *pdev = sge->adapter->pdev;
+ unsigned int size, i;
+
+ for (i = 0; i < SGE_CMDQ_N; i++) {
+ struct cmdQ *q = &sge->cmdQ[i];
+
+ q->genbit = 1;
+ q->sop = 1;
+ q->size = p->cmdQ_size[i];
+ q->in_use = 0;
+ q->status = 0;
+ q->processed = q->cleaned = 0;
+ q->stop_thres = 0;
+ spin_lock_init(&q->lock);
+ size = sizeof(struct cmdQ_e) * q->size;
+ q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
+ if (!q->entries)
+ goto err_no_mem;
+
+ size = sizeof(struct cmdQ_ce) * q->size;
+ q->centries = kzalloc(size, GFP_KERNEL);
+ if (!q->centries)
+ goto err_no_mem;
+ }
+
+ /*
+ * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
+ * only. For queue 0 set the stop threshold so we can handle one more
+ * packet from each port, plus reserve an additional 24 entries for
+ * Ethernet packets only. Queue 1 never suspends nor do we reserve
+ * space for Ethernet packets.
+ */
+ sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
+ (MAX_SKB_FRAGS + 1);
+ return 0;
+
+err_no_mem:
+ free_tx_resources(sge);
+ return -ENOMEM;
+}
+
+static inline void setup_ring_params(struct adapter *adapter, u64 addr,
+ u32 size, int base_reg_lo,
+ int base_reg_hi, int size_reg)
+{
+ writel((u32)addr, adapter->regs + base_reg_lo);
+ writel(addr >> 32, adapter->regs + base_reg_hi);
+ writel(size, adapter->regs + size_reg);
+}
+
+/*
+ * Enable/disable VLAN acceleration.
+ */
+void t1_vlan_mode(struct adapter *adapter, u32 features)
+{
+ struct sge *sge = adapter->sge;
+
+ if (features & NETIF_F_HW_VLAN_RX)
+ sge->sge_control |= F_VLAN_XTRACT;
+ else
+ sge->sge_control &= ~F_VLAN_XTRACT;
+ if (adapter->open_device_map) {
+ writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
+ readl(adapter->regs + A_SG_CONTROL); /* flush */
+ }
+}
+
+/*
+ * Programs the various SGE registers. However, the engine is not yet enabled,
+ * but sge->sge_control is setup and ready to go.
+ */
+static void configure_sge(struct sge *sge, struct sge_params *p)
+{
+ struct adapter *ap = sge->adapter;
+
+ writel(0, ap->regs + A_SG_CONTROL);
+ setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
+ A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
+ setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
+ A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
+ setup_ring_params(ap, sge->freelQ[0].dma_addr,
+ sge->freelQ[0].size, A_SG_FL0BASELWR,
+ A_SG_FL0BASEUPR, A_SG_FL0SIZE);
+ setup_ring_params(ap, sge->freelQ[1].dma_addr,
+ sge->freelQ[1].size, A_SG_FL1BASELWR,
+ A_SG_FL1BASEUPR, A_SG_FL1SIZE);
+
+ /* The threshold comparison uses <. */
+ writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
+
+ setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
+ A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
+ writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
+
+ sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
+ F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
+ V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
+ V_RX_PKT_OFFSET(sge->rx_pkt_pad);
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ sge->sge_control |= F_ENABLE_BIG_ENDIAN;
+#endif
+
+ /* Initialize no-resource timer */
+ sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
+
+ t1_sge_set_coalesce_params(sge, p);
+}
+
+/*
+ * Return the payload capacity of the jumbo free-list buffers.
+ */
+static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
+{
+ return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
+ sge->freelQ[sge->jumbo_fl].dma_offset -
+ sizeof(struct cpl_rx_data);
+}
+
+/*
+ * Frees all SGE related resources and the sge structure itself
+ */
+void t1_sge_destroy(struct sge *sge)
+{
+ int i;
+
+ for_each_port(sge->adapter, i)
+ free_percpu(sge->port_stats[i]);
+
+ kfree(sge->tx_sched);
+ free_tx_resources(sge);
+ free_rx_resources(sge);
+ kfree(sge);
+}
+
+/*
+ * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
+ * context Q) until the Q is full or alloc_skb fails.
+ *
+ * It is possible that the generation bits already match, indicating that the
+ * buffer is already valid and nothing needs to be done. This happens when we
+ * copied a received buffer into a new sk_buff during the interrupt processing.
+ *
+ * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
+ * we specify a RX_OFFSET in order to make sure that the IP header is 4B
+ * aligned.
+ */
+static void refill_free_list(struct sge *sge, struct freelQ *q)
+{
+ struct pci_dev *pdev = sge->adapter->pdev;
+ struct freelQ_ce *ce = &q->centries[q->pidx];
+ struct freelQ_e *e = &q->entries[q->pidx];
+ unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
+
+ while (q->credits < q->size) {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+
+ skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
+ if (!skb)
+ break;
+
+ skb_reserve(skb, q->dma_offset);
+ mapping = pci_map_single(pdev, skb->data, dma_len,
+ PCI_DMA_FROMDEVICE);
+ skb_reserve(skb, sge->rx_pkt_pad);
+
+ ce->skb = skb;
+ dma_unmap_addr_set(ce, dma_addr, mapping);
+ dma_unmap_len_set(ce, dma_len, dma_len);
+ e->addr_lo = (u32)mapping;
+ e->addr_hi = (u64)mapping >> 32;
+ e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
+ wmb();
+ e->gen2 = V_CMD_GEN2(q->genbit);
+
+ e++;
+ ce++;
+ if (++q->pidx == q->size) {
+ q->pidx = 0;
+ q->genbit ^= 1;
+ ce = q->centries;
+ e = q->entries;
+ }
+ q->credits++;
+ }
+}
+
+/*
+ * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
+ * of both rings, we go into 'few interrupt mode' in order to give the system
+ * time to free up resources.
+ */
+static void freelQs_empty(struct sge *sge)
+{
+ struct adapter *adapter = sge->adapter;
+ u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
+ u32 irqholdoff_reg;
+
+ refill_free_list(sge, &sge->freelQ[0]);
+ refill_free_list(sge, &sge->freelQ[1]);
+
+ if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
+ sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
+ irq_reg |= F_FL_EXHAUSTED;
+ irqholdoff_reg = sge->fixed_intrtimer;
+ } else {
+ /* Clear the F_FL_EXHAUSTED interrupts for now */
+ irq_reg &= ~F_FL_EXHAUSTED;
+ irqholdoff_reg = sge->intrtimer_nres;
+ }
+ writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
+ writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
+
+ /* We reenable the Qs to force a freelist GTS interrupt later */
+ doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
+}
+
+#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
+#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
+#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
+ F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
+
+/*
+ * Disable SGE Interrupts
+ */
+void t1_sge_intr_disable(struct sge *sge)
+{
+ u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
+
+ writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
+ writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
+}
+
+/*
+ * Enable SGE interrupts.
+ */
+void t1_sge_intr_enable(struct sge *sge)
+{
+ u32 en = SGE_INT_ENABLE;
+ u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
+
+ if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO)
+ en &= ~F_PACKET_TOO_BIG;
+ writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
+ writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
+}
+
+/*
+ * Clear SGE interrupts.
+ */
+void t1_sge_intr_clear(struct sge *sge)
+{
+ writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
+ writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
+}
+
+/*
+ * SGE 'Error' interrupt handler
+ */
+int t1_sge_intr_error_handler(struct sge *sge)
+{
+ struct adapter *adapter = sge->adapter;
+ u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
+
+ if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
+ cause &= ~F_PACKET_TOO_BIG;
+ if (cause & F_RESPQ_EXHAUSTED)
+ sge->stats.respQ_empty++;
+ if (cause & F_RESPQ_OVERFLOW) {
+ sge->stats.respQ_overflow++;
+ pr_alert("%s: SGE response queue overflow\n",
+ adapter->name);
+ }
+ if (cause & F_FL_EXHAUSTED) {
+ sge->stats.freelistQ_empty++;
+ freelQs_empty(sge);
+ }
+ if (cause & F_PACKET_TOO_BIG) {
+ sge->stats.pkt_too_big++;
+ pr_alert("%s: SGE max packet size exceeded\n",
+ adapter->name);
+ }
+ if (cause & F_PACKET_MISMATCH) {
+ sge->stats.pkt_mismatch++;
+ pr_alert("%s: SGE packet mismatch\n", adapter->name);
+ }
+ if (cause & SGE_INT_FATAL)
+ t1_fatal_err(adapter);
+
+ writel(cause, adapter->regs + A_SG_INT_CAUSE);
+ return 0;
+}
+
+const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
+{
+ return &sge->stats;
+}
+
+void t1_sge_get_port_stats(const struct sge *sge, int port,
+ struct sge_port_stats *ss)
+{
+ int cpu;
+
+ memset(ss, 0, sizeof(*ss));
+ for_each_possible_cpu(cpu) {
+ struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
+
+ ss->rx_cso_good += st->rx_cso_good;
+ ss->tx_cso += st->tx_cso;
+ ss->tx_tso += st->tx_tso;
+ ss->tx_need_hdrroom += st->tx_need_hdrroom;
+ ss->vlan_xtract += st->vlan_xtract;
+ ss->vlan_insert += st->vlan_insert;
+ }
+}
+
+/**
+ * recycle_fl_buf - recycle a free list buffer
+ * @fl: the free list
+ * @idx: index of buffer to recycle
+ *
+ * Recycles the specified buffer on the given free list by adding it at
+ * the next available slot on the list.
+ */
+static void recycle_fl_buf(struct freelQ *fl, int idx)
+{
+ struct freelQ_e *from = &fl->entries[idx];
+ struct freelQ_e *to = &fl->entries[fl->pidx];
+
+ fl->centries[fl->pidx] = fl->centries[idx];
+ to->addr_lo = from->addr_lo;
+ to->addr_hi = from->addr_hi;
+ to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
+ wmb();
+ to->gen2 = V_CMD_GEN2(fl->genbit);
+ fl->credits++;
+
+ if (++fl->pidx == fl->size) {
+ fl->pidx = 0;
+ fl->genbit ^= 1;
+ }
+}
+
+static int copybreak __read_mostly = 256;
+module_param(copybreak, int, 0);
+MODULE_PARM_DESC(copybreak, "Receive copy threshold");
+
+/**
+ * get_packet - return the next ingress packet buffer
+ * @pdev: the PCI device that received the packet
+ * @fl: the SGE free list holding the packet
+ * @len: the actual packet length, excluding any SGE padding
+ *
+ * Get the next packet from a free list and complete setup of the
+ * sk_buff. If the packet is small we make a copy and recycle the
+ * original buffer, otherwise we use the original buffer itself. If a
+ * positive drop threshold is supplied packets are dropped and their
+ * buffers recycled if (a) the number of remaining buffers is under the
+ * threshold and the packet is too big to copy, or (b) the packet should
+ * be copied but there is no memory for the copy.
+ */
+static inline struct sk_buff *get_packet(struct pci_dev *pdev,
+ struct freelQ *fl, unsigned int len)
+{
+ struct sk_buff *skb;
+ const struct freelQ_ce *ce = &fl->centries[fl->cidx];
+
+ if (len < copybreak) {
+ skb = alloc_skb(len + 2, GFP_ATOMIC);
+ if (!skb)
+ goto use_orig_buf;
+
+ skb_reserve(skb, 2); /* align IP header */
+ skb_put(skb, len);
+ pci_dma_sync_single_for_cpu(pdev,
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
+ PCI_DMA_FROMDEVICE);
+ skb_copy_from_linear_data(ce->skb, skb->data, len);
+ pci_dma_sync_single_for_device(pdev,
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
+ PCI_DMA_FROMDEVICE);
+ recycle_fl_buf(fl, fl->cidx);
+ return skb;
+ }
+
+use_orig_buf:
+ if (fl->credits < 2) {
+ recycle_fl_buf(fl, fl->cidx);
+ return NULL;
+ }
+
+ pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+ skb = ce->skb;
+ prefetch(skb->data);
+
+ skb_put(skb, len);
+ return skb;
+}
+
+/**
+ * unexpected_offload - handle an unexpected offload packet
+ * @adapter: the adapter
+ * @fl: the free list that received the packet
+ *
+ * Called when we receive an unexpected offload packet (e.g., the TOE
+ * function is disabled or the card is a NIC). Prints a message and
+ * recycles the buffer.
+ */
+static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
+{
+ struct freelQ_ce *ce = &fl->centries[fl->cidx];
+ struct sk_buff *skb = ce->skb;
+
+ pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+ pr_err("%s: unexpected offload packet, cmd %u\n",
+ adapter->name, *skb->data);
+ recycle_fl_buf(fl, fl->cidx);
+}
+
+/*
+ * T1/T2 SGE limits the maximum DMA size per TX descriptor to
+ * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
+ * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
+ * Note that the *_large_page_tx_descs stuff will be optimized out when
+ * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
+ *
+ * compute_large_page_descs() computes how many additional descriptors are
+ * required to break down the stack's request.
+ */
+static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
+{
+ unsigned int count = 0;
+
+ if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
+ unsigned int nfrags = skb_shinfo(skb)->nr_frags;
+ unsigned int i, len = skb_headlen(skb);
+ while (len > SGE_TX_DESC_MAX_PLEN) {
+ count++;
+ len -= SGE_TX_DESC_MAX_PLEN;
+ }
+ for (i = 0; nfrags--; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ len = frag->size;
+ while (len > SGE_TX_DESC_MAX_PLEN) {
+ count++;
+ len -= SGE_TX_DESC_MAX_PLEN;
+ }
+ }
+ }
+ return count;
+}
+
+/*
+ * Write a cmdQ entry.
+ *
+ * Since this function writes the 'flags' field, it must not be used to
+ * write the first cmdQ entry.
+ */
+static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
+ unsigned int len, unsigned int gen,
+ unsigned int eop)
+{
+ BUG_ON(len > SGE_TX_DESC_MAX_PLEN);
+
+ e->addr_lo = (u32)mapping;
+ e->addr_hi = (u64)mapping >> 32;
+ e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
+ e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
+}
+
+/*
+ * See comment for previous function.
+ *
+ * write_tx_descs_large_page() writes additional SGE tx descriptors if
+ * *desc_len exceeds HW's capability.
+ */
+static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
+ struct cmdQ_e **e,
+ struct cmdQ_ce **ce,
+ unsigned int *gen,
+ dma_addr_t *desc_mapping,
+ unsigned int *desc_len,
+ unsigned int nfrags,
+ struct cmdQ *q)
+{
+ if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
+ struct cmdQ_e *e1 = *e;
+ struct cmdQ_ce *ce1 = *ce;
+
+ while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
+ *desc_len -= SGE_TX_DESC_MAX_PLEN;
+ write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
+ *gen, nfrags == 0 && *desc_len == 0);
+ ce1->skb = NULL;
+ dma_unmap_len_set(ce1, dma_len, 0);
+ *desc_mapping += SGE_TX_DESC_MAX_PLEN;
+ if (*desc_len) {
+ ce1++;
+ e1++;
+ if (++pidx == q->size) {
+ pidx = 0;
+ *gen ^= 1;
+ ce1 = q->centries;
+ e1 = q->entries;
+ }
+ }
+ }
+ *e = e1;
+ *ce = ce1;
+ }
+ return pidx;
+}
+
+/*
+ * Write the command descriptors to transmit the given skb starting at
+ * descriptor pidx with the given generation.
+ */
+static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
+ unsigned int pidx, unsigned int gen,
+ struct cmdQ *q)
+{
+ dma_addr_t mapping, desc_mapping;
+ struct cmdQ_e *e, *e1;
+ struct cmdQ_ce *ce;
+ unsigned int i, flags, first_desc_len, desc_len,
+ nfrags = skb_shinfo(skb)->nr_frags;
+
+ e = e1 = &q->entries[pidx];
+ ce = &q->centries[pidx];
+
+ mapping = pci_map_single(adapter->pdev, skb->data,
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+
+ desc_mapping = mapping;
+ desc_len = skb_headlen(skb);
+
+ flags = F_CMD_DATAVALID | F_CMD_SOP |
+ V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
+ V_CMD_GEN2(gen);
+ first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
+ desc_len : SGE_TX_DESC_MAX_PLEN;
+ e->addr_lo = (u32)desc_mapping;
+ e->addr_hi = (u64)desc_mapping >> 32;
+ e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
+ ce->skb = NULL;
+ dma_unmap_len_set(ce, dma_len, 0);
+
+ if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
+ desc_len > SGE_TX_DESC_MAX_PLEN) {
+ desc_mapping += first_desc_len;
+ desc_len -= first_desc_len;
+ e1++;
+ ce++;
+ if (++pidx == q->size) {
+ pidx = 0;
+ gen ^= 1;
+ e1 = q->entries;
+ ce = q->centries;
+ }
+ pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
+ &desc_mapping, &desc_len,
+ nfrags, q);
+
+ if (likely(desc_len))
+ write_tx_desc(e1, desc_mapping, desc_len, gen,
+ nfrags == 0);
+ }
+
+ ce->skb = NULL;
+ dma_unmap_addr_set(ce, dma_addr, mapping);
+ dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
+
+ for (i = 0; nfrags--; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ e1++;
+ ce++;
+ if (++pidx == q->size) {
+ pidx = 0;
+ gen ^= 1;
+ e1 = q->entries;
+ ce = q->centries;
+ }
+
+ mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0,
+ frag->size, DMA_TO_DEVICE);
+ desc_mapping = mapping;
+ desc_len = frag->size;
+
+ pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
+ &desc_mapping, &desc_len,
+ nfrags, q);
+ if (likely(desc_len))
+ write_tx_desc(e1, desc_mapping, desc_len, gen,
+ nfrags == 0);
+ ce->skb = NULL;
+ dma_unmap_addr_set(ce, dma_addr, mapping);
+ dma_unmap_len_set(ce, dma_len, frag->size);
+ }
+ ce->skb = skb;
+ wmb();
+ e->flags = flags;
+}
+
+/*
+ * Clean up completed Tx buffers.
+ */
+static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
+{
+ unsigned int reclaim = q->processed - q->cleaned;
+
+ if (reclaim) {
+ pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
+ q->processed, q->cleaned);
+ free_cmdQ_buffers(sge, q, reclaim);
+ q->cleaned += reclaim;
+ }
+}
+
+/*
+ * Called from tasklet. Checks the scheduler for any
+ * pending skbs that can be sent.
+ */
+static void restart_sched(unsigned long arg)
+{
+ struct sge *sge = (struct sge *) arg;
+ struct adapter *adapter = sge->adapter;
+ struct cmdQ *q = &sge->cmdQ[0];
+ struct sk_buff *skb;
+ unsigned int credits, queued_skb = 0;
+
+ spin_lock(&q->lock);
+ reclaim_completed_tx(sge, q);
+
+ credits = q->size - q->in_use;
+ pr_debug("restart_sched credits=%d\n", credits);
+ while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
+ unsigned int genbit, pidx, count;
+ count = 1 + skb_shinfo(skb)->nr_frags;
+ count += compute_large_page_tx_descs(skb);
+ q->in_use += count;
+ genbit = q->genbit;
+ pidx = q->pidx;
+ q->pidx += count;
+ if (q->pidx >= q->size) {
+ q->pidx -= q->size;
+ q->genbit ^= 1;
+ }
+ write_tx_descs(adapter, skb, pidx, genbit, q);
+ credits = q->size - q->in_use;
+ queued_skb = 1;
+ }
+
+ if (queued_skb) {
+ clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+ if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
+ set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+ writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
+ }
+ }
+ spin_unlock(&q->lock);
+}
+
+/**
+ * sge_rx - process an ingress ethernet packet
+ * @sge: the sge structure
+ * @fl: the free list that contains the packet buffer
+ * @len: the packet length
+ *
+ * Process an ingress ethernet pakcet and deliver it to the stack.
+ */
+static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
+{
+ struct sk_buff *skb;
+ const struct cpl_rx_pkt *p;
+ struct adapter *adapter = sge->adapter;
+ struct sge_port_stats *st;
+ struct net_device *dev;
+
+ skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
+ if (unlikely(!skb)) {
+ sge->stats.rx_drops++;
+ return;
+ }
+
+ p = (const struct cpl_rx_pkt *) skb->data;
+ if (p->iff >= adapter->params.nports) {
+ kfree_skb(skb);
+ return;
+ }
+ __skb_pull(skb, sizeof(*p));
+
+ st = this_cpu_ptr(sge->port_stats[p->iff]);
+ dev = adapter->port[p->iff].dev;
+
+ skb->protocol = eth_type_trans(skb, dev);
+ if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff &&
+ skb->protocol == htons(ETH_P_IP) &&
+ (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
+ ++st->rx_cso_good;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else
+ skb_checksum_none_assert(skb);
+
+ if (p->vlan_valid) {
+ st->vlan_xtract++;
+ __vlan_hwaccel_put_tag(skb, ntohs(p->vlan));
+ }
+ netif_receive_skb(skb);
+}
+
+/*
+ * Returns true if a command queue has enough available descriptors that
+ * we can resume Tx operation after temporarily disabling its packet queue.
+ */
+static inline int enough_free_Tx_descs(const struct cmdQ *q)
+{
+ unsigned int r = q->processed - q->cleaned;
+
+ return q->in_use - r < (q->size >> 1);
+}
+
+/*
+ * Called when sufficient space has become available in the SGE command queues
+ * after the Tx packet schedulers have been suspended to restart the Tx path.
+ */
+static void restart_tx_queues(struct sge *sge)
+{
+ struct adapter *adap = sge->adapter;
+ int i;
+
+ if (!enough_free_Tx_descs(&sge->cmdQ[0]))
+ return;
+
+ for_each_port(adap, i) {
+ struct net_device *nd = adap->port[i].dev;
+
+ if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
+ netif_running(nd)) {
+ sge->stats.cmdQ_restarted[2]++;
+ netif_wake_queue(nd);
+ }
+ }
+}
+
+/*
+ * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
+ * information.
+ */
+static unsigned int update_tx_info(struct adapter *adapter,
+ unsigned int flags,
+ unsigned int pr0)
+{
+ struct sge *sge = adapter->sge;
+ struct cmdQ *cmdq = &sge->cmdQ[0];
+
+ cmdq->processed += pr0;
+ if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
+ freelQs_empty(sge);
+ flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
+ }
+ if (flags & F_CMDQ0_ENABLE) {
+ clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
+
+ if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
+ !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
+ set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
+ writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
+ }
+ if (sge->tx_sched)
+ tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
+
+ flags &= ~F_CMDQ0_ENABLE;
+ }
+
+ if (unlikely(sge->stopped_tx_queues != 0))
+ restart_tx_queues(sge);
+
+ return flags;
+}
+
+/*
+ * Process SGE responses, up to the supplied budget. Returns the number of
+ * responses processed. A negative budget is effectively unlimited.
+ */
+static int process_responses(struct adapter *adapter, int budget)
+{
+ struct sge *sge = adapter->sge;
+ struct respQ *q = &sge->respQ;
+ struct respQ_e *e = &q->entries[q->cidx];
+ int done = 0;
+ unsigned int flags = 0;
+ unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
+
+ while (done < budget && e->GenerationBit == q->genbit) {
+ flags |= e->Qsleeping;
+
+ cmdq_processed[0] += e->Cmdq0CreditReturn;
+ cmdq_processed[1] += e->Cmdq1CreditReturn;
+
+ /* We batch updates to the TX side to avoid cacheline
+ * ping-pong of TX state information on MP where the sender
+ * might run on a different CPU than this function...
+ */
+ if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) {
+ flags = update_tx_info(adapter, flags, cmdq_processed[0]);
+ cmdq_processed[0] = 0;
+ }
+
+ if (unlikely(cmdq_processed[1] > 16)) {
+ sge->cmdQ[1].processed += cmdq_processed[1];
+ cmdq_processed[1] = 0;
+ }
+
+ if (likely(e->DataValid)) {
+ struct freelQ *fl = &sge->freelQ[e->FreelistQid];
+
+ BUG_ON(!e->Sop || !e->Eop);
+ if (unlikely(e->Offload))
+ unexpected_offload(adapter, fl);
+ else
+ sge_rx(sge, fl, e->BufferLength);
+
+ ++done;
+
+ /*
+ * Note: this depends on each packet consuming a
+ * single free-list buffer; cf. the BUG above.
+ */
+ if (++fl->cidx == fl->size)
+ fl->cidx = 0;
+ prefetch(fl->centries[fl->cidx].skb);
+
+ if (unlikely(--fl->credits <
+ fl->size - SGE_FREEL_REFILL_THRESH))
+ refill_free_list(sge, fl);
+ } else
+ sge->stats.pure_rsps++;
+
+ e++;
+ if (unlikely(++q->cidx == q->size)) {
+ q->cidx = 0;
+ q->genbit ^= 1;
+ e = q->entries;
+ }
+ prefetch(e);
+
+ if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
+ writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
+ q->credits = 0;
+ }
+ }
+
+ flags = update_tx_info(adapter, flags, cmdq_processed[0]);
+ sge->cmdQ[1].processed += cmdq_processed[1];
+
+ return done;
+}
+
+static inline int responses_pending(const struct adapter *adapter)
+{
+ const struct respQ *Q = &adapter->sge->respQ;
+ const struct respQ_e *e = &Q->entries[Q->cidx];
+
+ return e->GenerationBit == Q->genbit;
+}
+
+/*
+ * A simpler version of process_responses() that handles only pure (i.e.,
+ * non data-carrying) responses. Such respones are too light-weight to justify
+ * calling a softirq when using NAPI, so we handle them specially in hard
+ * interrupt context. The function is called with a pointer to a response,
+ * which the caller must ensure is a valid pure response. Returns 1 if it
+ * encounters a valid data-carrying response, 0 otherwise.
+ */
+static int process_pure_responses(struct adapter *adapter)
+{
+ struct sge *sge = adapter->sge;
+ struct respQ *q = &sge->respQ;
+ struct respQ_e *e = &q->entries[q->cidx];
+ const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
+ unsigned int flags = 0;
+ unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
+
+ prefetch(fl->centries[fl->cidx].skb);
+ if (e->DataValid)
+ return 1;
+
+ do {
+ flags |= e->Qsleeping;
+
+ cmdq_processed[0] += e->Cmdq0CreditReturn;
+ cmdq_processed[1] += e->Cmdq1CreditReturn;
+
+ e++;
+ if (unlikely(++q->cidx == q->size)) {
+ q->cidx = 0;
+ q->genbit ^= 1;
+ e = q->entries;
+ }
+ prefetch(e);
+
+ if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
+ writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
+ q->credits = 0;
+ }
+ sge->stats.pure_rsps++;
+ } while (e->GenerationBit == q->genbit && !e->DataValid);
+
+ flags = update_tx_info(adapter, flags, cmdq_processed[0]);
+ sge->cmdQ[1].processed += cmdq_processed[1];
+
+ return e->GenerationBit == q->genbit;
+}
+
+/*
+ * Handler for new data events when using NAPI. This does not need any locking
+ * or protection from interrupts as data interrupts are off at this point and
+ * other adapter interrupts do not interfere.
+ */
+int t1_poll(struct napi_struct *napi, int budget)
+{
+ struct adapter *adapter = container_of(napi, struct adapter, napi);
+ int work_done = process_responses(adapter, budget);
+
+ if (likely(work_done < budget)) {
+ napi_complete(napi);
+ writel(adapter->sge->respQ.cidx,
+ adapter->regs + A_SG_SLEEPING);
+ }
+ return work_done;
+}
+
+irqreturn_t t1_interrupt(int irq, void *data)
+{
+ struct adapter *adapter = data;
+ struct sge *sge = adapter->sge;
+ int handled;
+
+ if (likely(responses_pending(adapter))) {
+ writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
+
+ if (napi_schedule_prep(&adapter->napi)) {
+ if (process_pure_responses(adapter))
+ __napi_schedule(&adapter->napi);
+ else {
+ /* no data, no NAPI needed */
+ writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
+ /* undo schedule_prep */
+ napi_enable(&adapter->napi);
+ }
+ }
+ return IRQ_HANDLED;
+ }
+
+ spin_lock(&adapter->async_lock);
+ handled = t1_slow_intr_handler(adapter);
+ spin_unlock(&adapter->async_lock);
+
+ if (!handled)
+ sge->stats.unhandled_irqs++;
+
+ return IRQ_RETVAL(handled != 0);
+}
+
+/*
+ * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
+ *
+ * The code figures out how many entries the sk_buff will require in the
+ * cmdQ and updates the cmdQ data structure with the state once the enqueue
+ * has complete. Then, it doesn't access the global structure anymore, but
+ * uses the corresponding fields on the stack. In conjunction with a spinlock
+ * around that code, we can make the function reentrant without holding the
+ * lock when we actually enqueue (which might be expensive, especially on
+ * architectures with IO MMUs).
+ *
+ * This runs with softirqs disabled.
+ */
+static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
+ unsigned int qid, struct net_device *dev)
+{
+ struct sge *sge = adapter->sge;
+ struct cmdQ *q = &sge->cmdQ[qid];
+ unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
+
+ if (!spin_trylock(&q->lock))
+ return NETDEV_TX_LOCKED;
+
+ reclaim_completed_tx(sge, q);
+
+ pidx = q->pidx;
+ credits = q->size - q->in_use;
+ count = 1 + skb_shinfo(skb)->nr_frags;
+ count += compute_large_page_tx_descs(skb);
+
+ /* Ethernet packet */
+ if (unlikely(credits < count)) {
+ if (!netif_queue_stopped(dev)) {
+ netif_stop_queue(dev);
+ set_bit(dev->if_port, &sge->stopped_tx_queues);
+ sge->stats.cmdQ_full[2]++;
+ pr_err("%s: Tx ring full while queue awake!\n",
+ adapter->name);
+ }
+ spin_unlock(&q->lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(credits - count < q->stop_thres)) {
+ netif_stop_queue(dev);
+ set_bit(dev->if_port, &sge->stopped_tx_queues);
+ sge->stats.cmdQ_full[2]++;
+ }
+
+ /* T204 cmdQ0 skbs that are destined for a certain port have to go
+ * through the scheduler.
+ */
+ if (sge->tx_sched && !qid && skb->dev) {
+use_sched:
+ use_sched_skb = 1;
+ /* Note that the scheduler might return a different skb than
+ * the one passed in.
+ */
+ skb = sched_skb(sge, skb, credits);
+ if (!skb) {
+ spin_unlock(&q->lock);
+ return NETDEV_TX_OK;
+ }
+ pidx = q->pidx;
+ count = 1 + skb_shinfo(skb)->nr_frags;
+ count += compute_large_page_tx_descs(skb);
+ }
+
+ q->in_use += count;
+ genbit = q->genbit;
+ pidx = q->pidx;
+ q->pidx += count;
+ if (q->pidx >= q->size) {
+ q->pidx -= q->size;
+ q->genbit ^= 1;
+ }
+ spin_unlock(&q->lock);
+
+ write_tx_descs(adapter, skb, pidx, genbit, q);
+
+ /*
+ * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
+ * the doorbell if the Q is asleep. There is a natural race, where
+ * the hardware is going to sleep just after we checked, however,
+ * then the interrupt handler will detect the outstanding TX packet
+ * and ring the doorbell for us.
+ */
+ if (qid)
+ doorbell_pio(adapter, F_CMDQ1_ENABLE);
+ else {
+ clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+ if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
+ set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+ writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
+ }
+ }
+
+ if (use_sched_skb) {
+ if (spin_trylock(&q->lock)) {
+ credits = q->size - q->in_use;
+ skb = NULL;
+ goto use_sched;
+ }
+ }
+ return NETDEV_TX_OK;
+}
+
+#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
+
+/*
+ * eth_hdr_len - return the length of an Ethernet header
+ * @data: pointer to the start of the Ethernet header
+ *
+ * Returns the length of an Ethernet header, including optional VLAN tag.
+ */
+static inline int eth_hdr_len(const void *data)
+{
+ const struct ethhdr *e = data;
+
+ return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
+}
+
+/*
+ * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
+ */
+netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct adapter *adapter = dev->ml_priv;
+ struct sge *sge = adapter->sge;
+ struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
+ struct cpl_tx_pkt *cpl;
+ struct sk_buff *orig_skb = skb;
+ int ret;
+
+ if (skb->protocol == htons(ETH_P_CPL5))
+ goto send;
+
+ /*
+ * We are using a non-standard hard_header_len.
+ * Allocate more header room in the rare cases it is not big enough.
+ */
+ if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
+ skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
+ ++st->tx_need_hdrroom;
+ dev_kfree_skb_any(orig_skb);
+ if (!skb)
+ return NETDEV_TX_OK;
+ }
+
+ if (skb_shinfo(skb)->gso_size) {
+ int eth_type;
+ struct cpl_tx_pkt_lso *hdr;
+
+ ++st->tx_tso;
+
+ eth_type = skb_network_offset(skb) == ETH_HLEN ?
+ CPL_ETH_II : CPL_ETH_II_VLAN;
+
+ hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
+ hdr->opcode = CPL_TX_PKT_LSO;
+ hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
+ hdr->ip_hdr_words = ip_hdr(skb)->ihl;
+ hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
+ hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
+ skb_shinfo(skb)->gso_size));
+ hdr->len = htonl(skb->len - sizeof(*hdr));
+ cpl = (struct cpl_tx_pkt *)hdr;
+ } else {
+ /*
+ * Packets shorter than ETH_HLEN can break the MAC, drop them
+ * early. Also, we may get oversized packets because some
+ * parts of the kernel don't handle our unusual hard_header_len
+ * right, drop those too.
+ */
+ if (unlikely(skb->len < ETH_HLEN ||
+ skb->len > dev->mtu + eth_hdr_len(skb->data))) {
+ pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name,
+ skb->len, eth_hdr_len(skb->data), dev->mtu);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ ip_hdr(skb)->protocol == IPPROTO_UDP) {
+ if (unlikely(skb_checksum_help(skb))) {
+ pr_debug("%s: unable to do udp checksum\n", dev->name);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ /* Hmmm, assuming to catch the gratious arp... and we'll use
+ * it to flush out stuck espi packets...
+ */
+ if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
+ if (skb->protocol == htons(ETH_P_ARP) &&
+ arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) {
+ adapter->sge->espibug_skb[dev->if_port] = skb;
+ /* We want to re-use this skb later. We
+ * simply bump the reference count and it
+ * will not be freed...
+ */
+ skb = skb_get(skb);
+ }
+ }
+
+ cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
+ cpl->opcode = CPL_TX_PKT;
+ cpl->ip_csum_dis = 1; /* SW calculates IP csum */
+ cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
+ /* the length field isn't used so don't bother setting it */
+
+ st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
+ }
+ cpl->iff = dev->if_port;
+
+ if (vlan_tx_tag_present(skb)) {
+ cpl->vlan_valid = 1;
+ cpl->vlan = htons(vlan_tx_tag_get(skb));
+ st->vlan_insert++;
+ } else
+ cpl->vlan_valid = 0;
+
+send:
+ ret = t1_sge_tx(skb, adapter, 0, dev);
+
+ /* If transmit busy, and we reallocated skb's due to headroom limit,
+ * then silently discard to avoid leak.
+ */
+ if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
+ dev_kfree_skb_any(skb);
+ ret = NETDEV_TX_OK;
+ }
+ return ret;
+}
+
+/*
+ * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
+ */
+static void sge_tx_reclaim_cb(unsigned long data)
+{
+ int i;
+ struct sge *sge = (struct sge *)data;
+
+ for (i = 0; i < SGE_CMDQ_N; ++i) {
+ struct cmdQ *q = &sge->cmdQ[i];
+
+ if (!spin_trylock(&q->lock))
+ continue;
+
+ reclaim_completed_tx(sge, q);
+ if (i == 0 && q->in_use) { /* flush pending credits */
+ writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
+ }
+ spin_unlock(&q->lock);
+ }
+ mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+}
+
+/*
+ * Propagate changes of the SGE coalescing parameters to the HW.
+ */
+int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
+{
+ sge->fixed_intrtimer = p->rx_coalesce_usecs *
+ core_ticks_per_usec(sge->adapter);
+ writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
+ return 0;
+}
+
+/*
+ * Allocates both RX and TX resources and configures the SGE. However,
+ * the hardware is not enabled yet.
+ */
+int t1_sge_configure(struct sge *sge, struct sge_params *p)
+{
+ if (alloc_rx_resources(sge, p))
+ return -ENOMEM;
+ if (alloc_tx_resources(sge, p)) {
+ free_rx_resources(sge);
+ return -ENOMEM;
+ }
+ configure_sge(sge, p);
+
+ /*
+ * Now that we have sized the free lists calculate the payload
+ * capacity of the large buffers. Other parts of the driver use
+ * this to set the max offload coalescing size so that RX packets
+ * do not overflow our large buffers.
+ */
+ p->large_buf_capacity = jumbo_payload_capacity(sge);
+ return 0;
+}
+
+/*
+ * Disables the DMA engine.
+ */
+void t1_sge_stop(struct sge *sge)
+{
+ int i;
+ writel(0, sge->adapter->regs + A_SG_CONTROL);
+ readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+
+ if (is_T2(sge->adapter))
+ del_timer_sync(&sge->espibug_timer);
+
+ del_timer_sync(&sge->tx_reclaim_timer);
+ if (sge->tx_sched)
+ tx_sched_stop(sge);
+
+ for (i = 0; i < MAX_NPORTS; i++)
+ kfree_skb(sge->espibug_skb[i]);
+}
+
+/*
+ * Enables the DMA engine.
+ */
+void t1_sge_start(struct sge *sge)
+{
+ refill_free_list(sge, &sge->freelQ[0]);
+ refill_free_list(sge, &sge->freelQ[1]);
+
+ writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
+ doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
+ readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+
+ mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+
+ if (is_T2(sge->adapter))
+ mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
+}
+
+/*
+ * Callback for the T2 ESPI 'stuck packet feature' workaorund
+ */
+static void espibug_workaround_t204(unsigned long data)
+{
+ struct adapter *adapter = (struct adapter *)data;
+ struct sge *sge = adapter->sge;
+ unsigned int nports = adapter->params.nports;
+ u32 seop[MAX_NPORTS];
+
+ if (adapter->open_device_map & PORT_MASK) {
+ int i;
+
+ if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
+ return;
+
+ for (i = 0; i < nports; i++) {
+ struct sk_buff *skb = sge->espibug_skb[i];
+
+ if (!netif_running(adapter->port[i].dev) ||
+ netif_queue_stopped(adapter->port[i].dev) ||
+ !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
+ continue;
+
+ if (!skb->cb[0]) {
+ skb_copy_to_linear_data_offset(skb,
+ sizeof(struct cpl_tx_pkt),
+ ch_mac_addr,
+ ETH_ALEN);
+ skb_copy_to_linear_data_offset(skb,
+ skb->len - 10,
+ ch_mac_addr,
+ ETH_ALEN);
+ skb->cb[0] = 0xff;
+ }
+
+ /* bump the reference count to avoid freeing of
+ * the skb once the DMA has completed.
+ */
+ skb = skb_get(skb);
+ t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
+ }
+ }
+ mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
+}
+
+static void espibug_workaround(unsigned long data)
+{
+ struct adapter *adapter = (struct adapter *)data;
+ struct sge *sge = adapter->sge;
+
+ if (netif_running(adapter->port[0].dev)) {
+ struct sk_buff *skb = sge->espibug_skb[0];
+ u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
+
+ if ((seop & 0xfff0fff) == 0xfff && skb) {
+ if (!skb->cb[0]) {
+ skb_copy_to_linear_data_offset(skb,
+ sizeof(struct cpl_tx_pkt),
+ ch_mac_addr,
+ ETH_ALEN);
+ skb_copy_to_linear_data_offset(skb,
+ skb->len - 10,
+ ch_mac_addr,
+ ETH_ALEN);
+ skb->cb[0] = 0xff;
+ }
+
+ /* bump the reference count to avoid freeing of the
+ * skb once the DMA has completed.
+ */
+ skb = skb_get(skb);
+ t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
+ }
+ }
+ mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
+}
+
+/*
+ * Creates a t1_sge structure and returns suggested resource parameters.
+ */
+struct sge * __devinit t1_sge_create(struct adapter *adapter,
+ struct sge_params *p)
+{
+ struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
+ int i;
+
+ if (!sge)
+ return NULL;
+
+ sge->adapter = adapter;
+ sge->netdev = adapter->port[0].dev;
+ sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
+ sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
+
+ for_each_port(adapter, i) {
+ sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
+ if (!sge->port_stats[i])
+ goto nomem_port;
+ }
+
+ init_timer(&sge->tx_reclaim_timer);
+ sge->tx_reclaim_timer.data = (unsigned long)sge;
+ sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
+
+ if (is_T2(sge->adapter)) {
+ init_timer(&sge->espibug_timer);
+
+ if (adapter->params.nports > 1) {
+ tx_sched_init(sge);
+ sge->espibug_timer.function = espibug_workaround_t204;
+ } else
+ sge->espibug_timer.function = espibug_workaround;
+ sge->espibug_timer.data = (unsigned long)sge->adapter;
+
+ sge->espibug_timeout = 1;
+ /* for T204, every 10ms */
+ if (adapter->params.nports > 1)
+ sge->espibug_timeout = HZ/100;
+ }
+
+
+ p->cmdQ_size[0] = SGE_CMDQ0_E_N;
+ p->cmdQ_size[1] = SGE_CMDQ1_E_N;
+ p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
+ p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
+ if (sge->tx_sched) {
+ if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
+ p->rx_coalesce_usecs = 15;
+ else
+ p->rx_coalesce_usecs = 50;
+ } else
+ p->rx_coalesce_usecs = 50;
+
+ p->coalesce_enable = 0;
+ p->sample_interval_usecs = 0;
+
+ return sge;
+nomem_port:
+ while (i >= 0) {
+ free_percpu(sge->port_stats[i]);
+ --i;
+ }
+ kfree(sge);
+ return NULL;
+
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h
new file mode 100644
index 000000000000..e03980bcdd65
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.h
@@ -0,0 +1,94 @@
+/*****************************************************************************
+ * *
+ * File: sge.h *
+ * $Revision: 1.11 $ *
+ * $Date: 2005/06/21 22:10:55 $ *
+ * Description: *
+ * part of the Chelsio 10Gb Ethernet Driver. *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License, version 2, as *
+ * published by the Free Software Foundation. *
+ * *
+ * You should have received a copy of the GNU General Public License along *
+ * with this program; if not, write to the Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
+ * *
+ * http://www.chelsio.com *
+ * *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
+ * All rights reserved. *
+ * *
+ * Maintainers: maintainers@chelsio.com *
+ * *
+ * Authors: Dimitrios Michailidis <dm@chelsio.com> *
+ * Tina Yang <tainay@chelsio.com> *
+ * Felix Marti <felix@chelsio.com> *
+ * Scott Bardone <sbardone@chelsio.com> *
+ * Kurt Ottaway <kottaway@chelsio.com> *
+ * Frank DiMambro <frank@chelsio.com> *
+ * *
+ * History: *
+ * *
+ ****************************************************************************/
+
+#ifndef _CXGB_SGE_H_
+#define _CXGB_SGE_H_
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <asm/byteorder.h>
+
+struct sge_intr_counts {
+ unsigned int rx_drops; /* # of packets dropped due to no mem */
+ unsigned int pure_rsps; /* # of non-payload responses */
+ unsigned int unhandled_irqs; /* # of unhandled interrupts */
+ unsigned int respQ_empty; /* # times respQ empty */
+ unsigned int respQ_overflow; /* # respQ overflow (fatal) */
+ unsigned int freelistQ_empty; /* # times freelist empty */
+ unsigned int pkt_too_big; /* packet too large (fatal) */
+ unsigned int pkt_mismatch;
+ unsigned int cmdQ_full[3]; /* not HW IRQ, host cmdQ[] full */
+ unsigned int cmdQ_restarted[3];/* # of times cmdQ X was restarted */
+};
+
+struct sge_port_stats {
+ u64 rx_cso_good; /* # of successful RX csum offloads */
+ u64 tx_cso; /* # of TX checksum offloads */
+ u64 tx_tso; /* # of TSO requests */
+ u64 vlan_xtract; /* # of VLAN tag extractions */
+ u64 vlan_insert; /* # of VLAN tag insertions */
+ u64 tx_need_hdrroom; /* # of TX skbs in need of more header room */
+};
+
+struct sk_buff;
+struct net_device;
+struct adapter;
+struct sge_params;
+struct sge;
+
+struct sge *t1_sge_create(struct adapter *, struct sge_params *);
+int t1_sge_configure(struct sge *, struct sge_params *);
+int t1_sge_set_coalesce_params(struct sge *, struct sge_params *);
+void t1_sge_destroy(struct sge *);
+irqreturn_t t1_interrupt(int irq, void *cookie);
+int t1_poll(struct napi_struct *, int);
+
+netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void t1_vlan_mode(struct adapter *adapter, u32 features);
+void t1_sge_start(struct sge *);
+void t1_sge_stop(struct sge *);
+int t1_sge_intr_error_handler(struct sge *);
+void t1_sge_intr_enable(struct sge *);
+void t1_sge_intr_disable(struct sge *);
+void t1_sge_intr_clear(struct sge *);
+const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge);
+void t1_sge_get_port_stats(const struct sge *sge, int port, struct sge_port_stats *);
+unsigned int t1_sched_update_parms(struct sge *, unsigned int, unsigned int,
+ unsigned int);
+
+#endif /* _CXGB_SGE_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c
new file mode 100644
index 000000000000..8a43c7e19701
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/subr.c
@@ -0,0 +1,1130 @@
+/*****************************************************************************
+ * *
+ * File: subr.c *
+ * $Revision: 1.27 $ *
+ * $Date: 2005/06/22 01:08:36 $ *
+ * Description: *
+ * Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. *
+ * part of the Chelsio 10Gb Ethernet Driver. *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License, version 2, as *
+ * published by the Free Software Foundation. *
+ * *
+ * You should have received a copy of the GNU General Public License along *
+ * with this program; if not, write to the Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
+ * *
+ * http://www.chelsio.com *
+ * *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
+ * All rights reserved. *
+ * *
+ * Maintainers: maintainers@chelsio.com *
+ * *
+ * Authors: Dimitrios Michailidis <dm@chelsio.com> *
+ * Tina Yang <tainay@chelsio.com> *
+ * Felix Marti <felix@chelsio.com> *
+ * Scott Bardone <sbardone@chelsio.com> *
+ * Kurt Ottaway <kottaway@chelsio.com> *
+ * Frank DiMambro <frank@chelsio.com> *
+ * *
+ * History: *
+ * *
+ ****************************************************************************/
+
+#include "common.h"
+#include "elmer0.h"
+#include "regs.h"
+#include "gmac.h"
+#include "cphy.h"
+#include "sge.h"
+#include "tp.h"
+#include "espi.h"
+
+/**
+ * t1_wait_op_done - wait until an operation is completed
+ * @adapter: the adapter performing the operation
+ * @reg: the register to check for completion
+ * @mask: a single-bit field within @reg that indicates completion
+ * @polarity: the value of the field when the operation is completed
+ * @attempts: number of check iterations
+ * @delay: delay in usecs between iterations
+ *
+ * Wait until an operation is completed by checking a bit in a register
+ * up to @attempts times. Returns %0 if the operation completes and %1
+ * otherwise.
+ */
+static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity,
+ int attempts, int delay)
+{
+ while (1) {
+ u32 val = readl(adapter->regs + reg) & mask;
+
+ if (!!val == polarity)
+ return 0;
+ if (--attempts == 0)
+ return 1;
+ if (delay)
+ udelay(delay);
+ }
+}
+
+#define TPI_ATTEMPTS 50
+
+/*
+ * Write a register over the TPI interface (unlocked and locked versions).
+ */
+int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
+{
+ int tpi_busy;
+
+ writel(addr, adapter->regs + A_TPI_ADDR);
+ writel(value, adapter->regs + A_TPI_WR_DATA);
+ writel(F_TPIWR, adapter->regs + A_TPI_CSR);
+
+ tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
+ TPI_ATTEMPTS, 3);
+ if (tpi_busy)
+ pr_alert("%s: TPI write to 0x%x failed\n",
+ adapter->name, addr);
+ return tpi_busy;
+}
+
+int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
+{
+ int ret;
+
+ spin_lock(&adapter->tpi_lock);
+ ret = __t1_tpi_write(adapter, addr, value);
+ spin_unlock(&adapter->tpi_lock);
+ return ret;
+}
+
+/*
+ * Read a register over the TPI interface (unlocked and locked versions).
+ */
+int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
+{
+ int tpi_busy;
+
+ writel(addr, adapter->regs + A_TPI_ADDR);
+ writel(0, adapter->regs + A_TPI_CSR);
+
+ tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
+ TPI_ATTEMPTS, 3);
+ if (tpi_busy)
+ pr_alert("%s: TPI read from 0x%x failed\n",
+ adapter->name, addr);
+ else
+ *valp = readl(adapter->regs + A_TPI_RD_DATA);
+ return tpi_busy;
+}
+
+int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
+{
+ int ret;
+
+ spin_lock(&adapter->tpi_lock);
+ ret = __t1_tpi_read(adapter, addr, valp);
+ spin_unlock(&adapter->tpi_lock);
+ return ret;
+}
+
+/*
+ * Set a TPI parameter.
+ */
+static void t1_tpi_par(adapter_t *adapter, u32 value)
+{
+ writel(V_TPIPAR(value), adapter->regs + A_TPI_PAR);
+}
+
+/*
+ * Called when a port's link settings change to propagate the new values to the
+ * associated PHY and MAC. After performing the common tasks it invokes an
+ * OS-specific handler.
+ */
+void t1_link_changed(adapter_t *adapter, int port_id)
+{
+ int link_ok, speed, duplex, fc;
+ struct cphy *phy = adapter->port[port_id].phy;
+ struct link_config *lc = &adapter->port[port_id].link_config;
+
+ phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
+
+ lc->speed = speed < 0 ? SPEED_INVALID : speed;
+ lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
+ if (!(lc->requested_fc & PAUSE_AUTONEG))
+ fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+
+ if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
+ /* Set MAC speed, duplex, and flow control to match PHY. */
+ struct cmac *mac = adapter->port[port_id].mac;
+
+ mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc);
+ lc->fc = (unsigned char)fc;
+ }
+ t1_link_negotiated(adapter, port_id, link_ok, speed, duplex, fc);
+}
+
+static int t1_pci_intr_handler(adapter_t *adapter)
+{
+ u32 pcix_cause;
+
+ pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause);
+
+ if (pcix_cause) {
+ pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE,
+ pcix_cause);
+ t1_fatal_err(adapter); /* PCI errors are fatal */
+ }
+ return 0;
+}
+
+#ifdef CONFIG_CHELSIO_T1_1G
+#include "fpga_defs.h"
+
+/*
+ * PHY interrupt handler for FPGA boards.
+ */
+static int fpga_phy_intr_handler(adapter_t *adapter)
+{
+ int p;
+ u32 cause = readl(adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE);
+
+ for_each_port(adapter, p)
+ if (cause & (1 << p)) {
+ struct cphy *phy = adapter->port[p].phy;
+ int phy_cause = phy->ops->interrupt_handler(phy);
+
+ if (phy_cause & cphy_cause_link_change)
+ t1_link_changed(adapter, p);
+ }
+ writel(cause, adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE);
+ return 0;
+}
+
+/*
+ * Slow path interrupt handler for FPGAs.
+ */
+static int fpga_slow_intr(adapter_t *adapter)
+{
+ u32 cause = readl(adapter->regs + A_PL_CAUSE);
+
+ cause &= ~F_PL_INTR_SGE_DATA;
+ if (cause & F_PL_INTR_SGE_ERR)
+ t1_sge_intr_error_handler(adapter->sge);
+
+ if (cause & FPGA_PCIX_INTERRUPT_GMAC)
+ fpga_phy_intr_handler(adapter);
+
+ if (cause & FPGA_PCIX_INTERRUPT_TP) {
+ /*
+ * FPGA doesn't support MC4 interrupts and it requires
+ * this odd layer of indirection for MC5.
+ */
+ u32 tp_cause = readl(adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE);
+
+ /* Clear TP interrupt */
+ writel(tp_cause, adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE);
+ }
+ if (cause & FPGA_PCIX_INTERRUPT_PCIX)
+ t1_pci_intr_handler(adapter);
+
+ /* Clear the interrupts just processed. */
+ if (cause)
+ writel(cause, adapter->regs + A_PL_CAUSE);
+
+ return cause != 0;
+}
+#endif
+
+/*
+ * Wait until Elmer's MI1 interface is ready for new operations.
+ */
+static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg)
+{
+ int attempts = 100, busy;
+
+ do {
+ u32 val;
+
+ __t1_tpi_read(adapter, mi1_reg, &val);
+ busy = val & F_MI1_OP_BUSY;
+ if (busy)
+ udelay(10);
+ } while (busy && --attempts);
+ if (busy)
+ pr_alert("%s: MDIO operation timed out\n", adapter->name);
+ return busy;
+}
+
+/*
+ * MI1 MDIO initialization.
+ */
+static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi)
+{
+ u32 clkdiv = bi->clock_elmer0 / (2 * bi->mdio_mdc) - 1;
+ u32 val = F_MI1_PREAMBLE_ENABLE | V_MI1_MDI_INVERT(bi->mdio_mdiinv) |
+ V_MI1_MDI_ENABLE(bi->mdio_mdien) | V_MI1_CLK_DIV(clkdiv);
+
+ if (!(bi->caps & SUPPORTED_10000baseT_Full))
+ val |= V_MI1_SOF(1);
+ t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val);
+}
+
+#if defined(CONFIG_CHELSIO_T1_1G)
+/*
+ * Elmer MI1 MDIO read/write operations.
+ */
+static int mi1_mdio_read(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr)
+{
+ struct adapter *adapter = dev->ml_priv;
+ u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr);
+ unsigned int val;
+
+ spin_lock(&adapter->tpi_lock);
+ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
+ __t1_tpi_write(adapter,
+ A_ELMER0_PORT0_MI1_OP, MI1_OP_DIRECT_READ);
+ mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+ __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, &val);
+ spin_unlock(&adapter->tpi_lock);
+ return val;
+}
+
+static int mi1_mdio_write(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr, u16 val)
+{
+ struct adapter *adapter = dev->ml_priv;
+ u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr);
+
+ spin_lock(&adapter->tpi_lock);
+ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
+ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val);
+ __t1_tpi_write(adapter,
+ A_ELMER0_PORT0_MI1_OP, MI1_OP_DIRECT_WRITE);
+ mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+ spin_unlock(&adapter->tpi_lock);
+ return 0;
+}
+
+static const struct mdio_ops mi1_mdio_ops = {
+ .init = mi1_mdio_init,
+ .read = mi1_mdio_read,
+ .write = mi1_mdio_write,
+ .mode_support = MDIO_SUPPORTS_C22
+};
+
+#endif
+
+static int mi1_mdio_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr)
+{
+ struct adapter *adapter = dev->ml_priv;
+ u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
+ unsigned int val;
+
+ spin_lock(&adapter->tpi_lock);
+
+ /* Write the address we want. */
+ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
+ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
+ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
+ MI1_OP_INDIRECT_ADDRESS);
+ mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+
+ /* Write the operation we want. */
+ __t1_tpi_write(adapter,
+ A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ);
+ mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+
+ /* Read the data. */
+ __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, &val);
+ spin_unlock(&adapter->tpi_lock);
+ return val;
+}
+
+static int mi1_mdio_ext_write(struct net_device *dev, int phy_addr,
+ int mmd_addr, u16 reg_addr, u16 val)
+{
+ struct adapter *adapter = dev->ml_priv;
+ u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
+
+ spin_lock(&adapter->tpi_lock);
+
+ /* Write the address we want. */
+ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
+ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
+ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
+ MI1_OP_INDIRECT_ADDRESS);
+ mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+
+ /* Write the data. */
+ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val);
+ __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE);
+ mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+ spin_unlock(&adapter->tpi_lock);
+ return 0;
+}
+
+static const struct mdio_ops mi1_mdio_ext_ops = {
+ .init = mi1_mdio_init,
+ .read = mi1_mdio_ext_read,
+ .write = mi1_mdio_ext_write,
+ .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
+};
+
+enum {
+ CH_BRD_T110_1CU,
+ CH_BRD_N110_1F,
+ CH_BRD_N210_1F,
+ CH_BRD_T210_1F,
+ CH_BRD_T210_1CU,
+ CH_BRD_N204_4CU,
+};
+
+static const struct board_info t1_board[] = {
+ {
+ .board = CHBT_BOARD_CHT110,
+ .port_number = 1,
+ .caps = SUPPORTED_10000baseT_Full,
+ .chip_term = CHBT_TERM_T1,
+ .chip_mac = CHBT_MAC_PM3393,
+ .chip_phy = CHBT_PHY_MY3126,
+ .clock_core = 125000000,
+ .clock_mc3 = 150000000,
+ .clock_mc4 = 125000000,
+ .espi_nports = 1,
+ .clock_elmer0 = 44,
+ .mdio_mdien = 1,
+ .mdio_mdiinv = 1,
+ .mdio_mdc = 1,
+ .mdio_phybaseaddr = 1,
+ .gmac = &t1_pm3393_ops,
+ .gphy = &t1_my3126_ops,
+ .mdio_ops = &mi1_mdio_ext_ops,
+ .desc = "Chelsio T110 1x10GBase-CX4 TOE",
+ },
+
+ {
+ .board = CHBT_BOARD_N110,
+ .port_number = 1,
+ .caps = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE,
+ .chip_term = CHBT_TERM_T1,
+ .chip_mac = CHBT_MAC_PM3393,
+ .chip_phy = CHBT_PHY_88X2010,
+ .clock_core = 125000000,
+ .espi_nports = 1,
+ .clock_elmer0 = 44,
+ .mdio_mdien = 0,
+ .mdio_mdiinv = 0,
+ .mdio_mdc = 1,
+ .mdio_phybaseaddr = 0,
+ .gmac = &t1_pm3393_ops,
+ .gphy = &t1_mv88x201x_ops,
+ .mdio_ops = &mi1_mdio_ext_ops,
+ .desc = "Chelsio N110 1x10GBaseX NIC",
+ },
+
+ {
+ .board = CHBT_BOARD_N210,
+ .port_number = 1,
+ .caps = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE,
+ .chip_term = CHBT_TERM_T2,
+ .chip_mac = CHBT_MAC_PM3393,
+ .chip_phy = CHBT_PHY_88X2010,
+ .clock_core = 125000000,
+ .espi_nports = 1,
+ .clock_elmer0 = 44,
+ .mdio_mdien = 0,
+ .mdio_mdiinv = 0,
+ .mdio_mdc = 1,
+ .mdio_phybaseaddr = 0,
+ .gmac = &t1_pm3393_ops,
+ .gphy = &t1_mv88x201x_ops,
+ .mdio_ops = &mi1_mdio_ext_ops,
+ .desc = "Chelsio N210 1x10GBaseX NIC",
+ },
+
+ {
+ .board = CHBT_BOARD_CHT210,
+ .port_number = 1,
+ .caps = SUPPORTED_10000baseT_Full,
+ .chip_term = CHBT_TERM_T2,
+ .chip_mac = CHBT_MAC_PM3393,
+ .chip_phy = CHBT_PHY_88X2010,
+ .clock_core = 125000000,
+ .clock_mc3 = 133000000,
+ .clock_mc4 = 125000000,
+ .espi_nports = 1,
+ .clock_elmer0 = 44,
+ .mdio_mdien = 0,
+ .mdio_mdiinv = 0,
+ .mdio_mdc = 1,
+ .mdio_phybaseaddr = 0,
+ .gmac = &t1_pm3393_ops,
+ .gphy = &t1_mv88x201x_ops,
+ .mdio_ops = &mi1_mdio_ext_ops,
+ .desc = "Chelsio T210 1x10GBaseX TOE",
+ },
+
+ {
+ .board = CHBT_BOARD_CHT210,
+ .port_number = 1,
+ .caps = SUPPORTED_10000baseT_Full,
+ .chip_term = CHBT_TERM_T2,
+ .chip_mac = CHBT_MAC_PM3393,
+ .chip_phy = CHBT_PHY_MY3126,
+ .clock_core = 125000000,
+ .clock_mc3 = 133000000,
+ .clock_mc4 = 125000000,
+ .espi_nports = 1,
+ .clock_elmer0 = 44,
+ .mdio_mdien = 1,
+ .mdio_mdiinv = 1,
+ .mdio_mdc = 1,
+ .mdio_phybaseaddr = 1,
+ .gmac = &t1_pm3393_ops,
+ .gphy = &t1_my3126_ops,
+ .mdio_ops = &mi1_mdio_ext_ops,
+ .desc = "Chelsio T210 1x10GBase-CX4 TOE",
+ },
+
+#ifdef CONFIG_CHELSIO_T1_1G
+ {
+ .board = CHBT_BOARD_CHN204,
+ .port_number = 4,
+ .caps = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full
+ | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full
+ | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+ SUPPORTED_PAUSE | SUPPORTED_TP,
+ .chip_term = CHBT_TERM_T2,
+ .chip_mac = CHBT_MAC_VSC7321,
+ .chip_phy = CHBT_PHY_88E1111,
+ .clock_core = 100000000,
+ .espi_nports = 4,
+ .clock_elmer0 = 44,
+ .mdio_mdien = 0,
+ .mdio_mdiinv = 0,
+ .mdio_mdc = 0,
+ .mdio_phybaseaddr = 4,
+ .gmac = &t1_vsc7326_ops,
+ .gphy = &t1_mv88e1xxx_ops,
+ .mdio_ops = &mi1_mdio_ops,
+ .desc = "Chelsio N204 4x100/1000BaseT NIC",
+ },
+#endif
+
+};
+
+DEFINE_PCI_DEVICE_TABLE(t1_pci_tbl) = {
+ CH_DEVICE(8, 0, CH_BRD_T110_1CU),
+ CH_DEVICE(8, 1, CH_BRD_T110_1CU),
+ CH_DEVICE(7, 0, CH_BRD_N110_1F),
+ CH_DEVICE(10, 1, CH_BRD_N210_1F),
+ CH_DEVICE(11, 1, CH_BRD_T210_1F),
+ CH_DEVICE(14, 1, CH_BRD_T210_1CU),
+ CH_DEVICE(16, 1, CH_BRD_N204_4CU),
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, t1_pci_tbl);
+
+/*
+ * Return the board_info structure with a given index. Out-of-range indices
+ * return NULL.
+ */
+const struct board_info *t1_get_board_info(unsigned int board_id)
+{
+ return board_id < ARRAY_SIZE(t1_board) ? &t1_board[board_id] : NULL;
+}
+
+struct chelsio_vpd_t {
+ u32 format_version;
+ u8 serial_number[16];
+ u8 mac_base_address[6];
+ u8 pad[2]; /* make multiple-of-4 size requirement explicit */
+};
+
+#define EEPROMSIZE (8 * 1024)
+#define EEPROM_MAX_POLL 4
+
+/*
+ * Read SEEPROM. A zero is written to the flag register when the address is
+ * written to the Control register. The hardware device will set the flag to a
+ * one when 4B have been transferred to the Data register.
+ */
+int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data)
+{
+ int i = EEPROM_MAX_POLL;
+ u16 val;
+ u32 v;
+
+ if (addr >= EEPROMSIZE || (addr & 3))
+ return -EINVAL;
+
+ pci_write_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, (u16)addr);
+ do {
+ udelay(50);
+ pci_read_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, &val);
+ } while (!(val & F_VPD_OP_FLAG) && --i);
+
+ if (!(val & F_VPD_OP_FLAG)) {
+ pr_err("%s: reading EEPROM address 0x%x failed\n",
+ adapter->name, addr);
+ return -EIO;
+ }
+ pci_read_config_dword(adapter->pdev, A_PCICFG_VPD_DATA, &v);
+ *data = cpu_to_le32(v);
+ return 0;
+}
+
+static int t1_eeprom_vpd_get(adapter_t *adapter, struct chelsio_vpd_t *vpd)
+{
+ int addr, ret = 0;
+
+ for (addr = 0; !ret && addr < sizeof(*vpd); addr += sizeof(u32))
+ ret = t1_seeprom_read(adapter, addr,
+ (__le32 *)((u8 *)vpd + addr));
+
+ return ret;
+}
+
+/*
+ * Read a port's MAC address from the VPD ROM.
+ */
+static int vpd_macaddress_get(adapter_t *adapter, int index, u8 mac_addr[])
+{
+ struct chelsio_vpd_t vpd;
+
+ if (t1_eeprom_vpd_get(adapter, &vpd))
+ return 1;
+ memcpy(mac_addr, vpd.mac_base_address, 5);
+ mac_addr[5] = vpd.mac_base_address[5] + index;
+ return 0;
+}
+
+/*
+ * Set up the MAC/PHY according to the requested link settings.
+ *
+ * If the PHY can auto-negotiate first decide what to advertise, then
+ * enable/disable auto-negotiation as desired and reset.
+ *
+ * If the PHY does not auto-negotiate we just reset it.
+ *
+ * If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ * otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
+{
+ unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+
+ if (lc->supported & SUPPORTED_Autoneg) {
+ lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE);
+ if (fc) {
+ if (fc == ((PAUSE_RX | PAUSE_TX) &
+ (mac->adapter->params.nports < 2)))
+ lc->advertising |= ADVERTISED_PAUSE;
+ else {
+ lc->advertising |= ADVERTISED_ASYM_PAUSE;
+ if (fc == PAUSE_RX)
+ lc->advertising |= ADVERTISED_PAUSE;
+ }
+ }
+ phy->ops->advertise(phy, lc->advertising);
+
+ if (lc->autoneg == AUTONEG_DISABLE) {
+ lc->speed = lc->requested_speed;
+ lc->duplex = lc->requested_duplex;
+ lc->fc = (unsigned char)fc;
+ mac->ops->set_speed_duplex_fc(mac, lc->speed,
+ lc->duplex, fc);
+ /* Also disables autoneg */
+ phy->state = PHY_AUTONEG_RDY;
+ phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
+ phy->ops->reset(phy, 0);
+ } else {
+ phy->state = PHY_AUTONEG_EN;
+ phy->ops->autoneg_enable(phy); /* also resets PHY */
+ }
+ } else {
+ phy->state = PHY_AUTONEG_RDY;
+ mac->ops->set_speed_duplex_fc(mac, -1, -1, fc);
+ lc->fc = (unsigned char)fc;
+ phy->ops->reset(phy, 0);
+ }
+ return 0;
+}
+
+/*
+ * External interrupt handler for boards using elmer0.
+ */
+int t1_elmer0_ext_intr_handler(adapter_t *adapter)
+{
+ struct cphy *phy;
+ int phy_cause;
+ u32 cause;
+
+ t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause);
+
+ switch (board_info(adapter)->board) {
+#ifdef CONFIG_CHELSIO_T1_1G
+ case CHBT_BOARD_CHT204:
+ case CHBT_BOARD_CHT204E:
+ case CHBT_BOARD_CHN204:
+ case CHBT_BOARD_CHT204V: {
+ int i, port_bit;
+ for_each_port(adapter, i) {
+ port_bit = i + 1;
+ if (!(cause & (1 << port_bit)))
+ continue;
+
+ phy = adapter->port[i].phy;
+ phy_cause = phy->ops->interrupt_handler(phy);
+ if (phy_cause & cphy_cause_link_change)
+ t1_link_changed(adapter, i);
+ }
+ break;
+ }
+ case CHBT_BOARD_CHT101:
+ if (cause & ELMER0_GP_BIT1) { /* Marvell 88E1111 interrupt */
+ phy = adapter->port[0].phy;
+ phy_cause = phy->ops->interrupt_handler(phy);
+ if (phy_cause & cphy_cause_link_change)
+ t1_link_changed(adapter, 0);
+ }
+ break;
+ case CHBT_BOARD_7500: {
+ int p;
+ /*
+ * Elmer0's interrupt cause isn't useful here because there is
+ * only one bit that can be set for all 4 ports. This means
+ * we are forced to check every PHY's interrupt status
+ * register to see who initiated the interrupt.
+ */
+ for_each_port(adapter, p) {
+ phy = adapter->port[p].phy;
+ phy_cause = phy->ops->interrupt_handler(phy);
+ if (phy_cause & cphy_cause_link_change)
+ t1_link_changed(adapter, p);
+ }
+ break;
+ }
+#endif
+ case CHBT_BOARD_CHT210:
+ case CHBT_BOARD_N210:
+ case CHBT_BOARD_N110:
+ if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */
+ phy = adapter->port[0].phy;
+ phy_cause = phy->ops->interrupt_handler(phy);
+ if (phy_cause & cphy_cause_link_change)
+ t1_link_changed(adapter, 0);
+ }
+ break;
+ case CHBT_BOARD_8000:
+ case CHBT_BOARD_CHT110:
+ if (netif_msg_intr(adapter))
+ dev_dbg(&adapter->pdev->dev,
+ "External interrupt cause 0x%x\n", cause);
+ if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */
+ struct cmac *mac = adapter->port[0].mac;
+
+ mac->ops->interrupt_handler(mac);
+ }
+ if (cause & ELMER0_GP_BIT5) { /* XPAK MOD_DETECT */
+ u32 mod_detect;
+
+ t1_tpi_read(adapter,
+ A_ELMER0_GPI_STAT, &mod_detect);
+ if (netif_msg_link(adapter))
+ dev_info(&adapter->pdev->dev, "XPAK %s\n",
+ mod_detect ? "removed" : "inserted");
+ }
+ break;
+ }
+ t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause);
+ return 0;
+}
+
+/* Enables all interrupts. */
+void t1_interrupts_enable(adapter_t *adapter)
+{
+ unsigned int i;
+
+ adapter->slow_intr_mask = F_PL_INTR_SGE_ERR | F_PL_INTR_TP;
+
+ t1_sge_intr_enable(adapter->sge);
+ t1_tp_intr_enable(adapter->tp);
+ if (adapter->espi) {
+ adapter->slow_intr_mask |= F_PL_INTR_ESPI;
+ t1_espi_intr_enable(adapter->espi);
+ }
+
+ /* Enable MAC/PHY interrupts for each port. */
+ for_each_port(adapter, i) {
+ adapter->port[i].mac->ops->interrupt_enable(adapter->port[i].mac);
+ adapter->port[i].phy->ops->interrupt_enable(adapter->port[i].phy);
+ }
+
+ /* Enable PCIX & external chip interrupts on ASIC boards. */
+ if (t1_is_asic(adapter)) {
+ u32 pl_intr = readl(adapter->regs + A_PL_ENABLE);
+
+ /* PCI-X interrupts */
+ pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE,
+ 0xffffffff);
+
+ adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
+ pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
+ writel(pl_intr, adapter->regs + A_PL_ENABLE);
+ }
+}
+
+/* Disables all interrupts. */
+void t1_interrupts_disable(adapter_t* adapter)
+{
+ unsigned int i;
+
+ t1_sge_intr_disable(adapter->sge);
+ t1_tp_intr_disable(adapter->tp);
+ if (adapter->espi)
+ t1_espi_intr_disable(adapter->espi);
+
+ /* Disable MAC/PHY interrupts for each port. */
+ for_each_port(adapter, i) {
+ adapter->port[i].mac->ops->interrupt_disable(adapter->port[i].mac);
+ adapter->port[i].phy->ops->interrupt_disable(adapter->port[i].phy);
+ }
+
+ /* Disable PCIX & external chip interrupts. */
+ if (t1_is_asic(adapter))
+ writel(0, adapter->regs + A_PL_ENABLE);
+
+ /* PCI-X interrupts */
+ pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0);
+
+ adapter->slow_intr_mask = 0;
+}
+
+/* Clears all interrupts */
+void t1_interrupts_clear(adapter_t* adapter)
+{
+ unsigned int i;
+
+ t1_sge_intr_clear(adapter->sge);
+ t1_tp_intr_clear(adapter->tp);
+ if (adapter->espi)
+ t1_espi_intr_clear(adapter->espi);
+
+ /* Clear MAC/PHY interrupts for each port. */
+ for_each_port(adapter, i) {
+ adapter->port[i].mac->ops->interrupt_clear(adapter->port[i].mac);
+ adapter->port[i].phy->ops->interrupt_clear(adapter->port[i].phy);
+ }
+
+ /* Enable interrupts for external devices. */
+ if (t1_is_asic(adapter)) {
+ u32 pl_intr = readl(adapter->regs + A_PL_CAUSE);
+
+ writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX,
+ adapter->regs + A_PL_CAUSE);
+ }
+
+ /* PCI-X interrupts */
+ pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff);
+}
+
+/*
+ * Slow path interrupt handler for ASICs.
+ */
+static int asic_slow_intr(adapter_t *adapter)
+{
+ u32 cause = readl(adapter->regs + A_PL_CAUSE);
+
+ cause &= adapter->slow_intr_mask;
+ if (!cause)
+ return 0;
+ if (cause & F_PL_INTR_SGE_ERR)
+ t1_sge_intr_error_handler(adapter->sge);
+ if (cause & F_PL_INTR_TP)
+ t1_tp_intr_handler(adapter->tp);
+ if (cause & F_PL_INTR_ESPI)
+ t1_espi_intr_handler(adapter->espi);
+ if (cause & F_PL_INTR_PCIX)
+ t1_pci_intr_handler(adapter);
+ if (cause & F_PL_INTR_EXT)
+ t1_elmer0_ext_intr(adapter);
+
+ /* Clear the interrupts just processed. */
+ writel(cause, adapter->regs + A_PL_CAUSE);
+ readl(adapter->regs + A_PL_CAUSE); /* flush writes */
+ return 1;
+}
+
+int t1_slow_intr_handler(adapter_t *adapter)
+{
+#ifdef CONFIG_CHELSIO_T1_1G
+ if (!t1_is_asic(adapter))
+ return fpga_slow_intr(adapter);
+#endif
+ return asic_slow_intr(adapter);
+}
+
+/* Power sequencing is a work-around for Intel's XPAKs. */
+static void power_sequence_xpak(adapter_t* adapter)
+{
+ u32 mod_detect;
+ u32 gpo;
+
+ /* Check for XPAK */
+ t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect);
+ if (!(ELMER0_GP_BIT5 & mod_detect)) {
+ /* XPAK is present */
+ t1_tpi_read(adapter, A_ELMER0_GPO, &gpo);
+ gpo |= ELMER0_GP_BIT18;
+ t1_tpi_write(adapter, A_ELMER0_GPO, gpo);
+ }
+}
+
+int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
+ struct adapter_params *p)
+{
+ p->chip_version = bi->chip_term;
+ p->is_asic = (p->chip_version != CHBT_TERM_FPGA);
+ if (p->chip_version == CHBT_TERM_T1 ||
+ p->chip_version == CHBT_TERM_T2 ||
+ p->chip_version == CHBT_TERM_FPGA) {
+ u32 val = readl(adapter->regs + A_TP_PC_CONFIG);
+
+ val = G_TP_PC_REV(val);
+ if (val == 2)
+ p->chip_revision = TERM_T1B;
+ else if (val == 3)
+ p->chip_revision = TERM_T2;
+ else
+ return -1;
+ } else
+ return -1;
+ return 0;
+}
+
+/*
+ * Enable board components other than the Chelsio chip, such as external MAC
+ * and PHY.
+ */
+static int board_init(adapter_t *adapter, const struct board_info *bi)
+{
+ switch (bi->board) {
+ case CHBT_BOARD_8000:
+ case CHBT_BOARD_N110:
+ case CHBT_BOARD_N210:
+ case CHBT_BOARD_CHT210:
+ t1_tpi_par(adapter, 0xf);
+ t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
+ break;
+ case CHBT_BOARD_CHT110:
+ t1_tpi_par(adapter, 0xf);
+ t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800);
+
+ /* TBD XXX Might not need. This fixes a problem
+ * described in the Intel SR XPAK errata.
+ */
+ power_sequence_xpak(adapter);
+ break;
+#ifdef CONFIG_CHELSIO_T1_1G
+ case CHBT_BOARD_CHT204E:
+ /* add config space write here */
+ case CHBT_BOARD_CHT204:
+ case CHBT_BOARD_CHT204V:
+ case CHBT_BOARD_CHN204:
+ t1_tpi_par(adapter, 0xf);
+ t1_tpi_write(adapter, A_ELMER0_GPO, 0x804);
+ break;
+ case CHBT_BOARD_CHT101:
+ case CHBT_BOARD_7500:
+ t1_tpi_par(adapter, 0xf);
+ t1_tpi_write(adapter, A_ELMER0_GPO, 0x1804);
+ break;
+#endif
+ }
+ return 0;
+}
+
+/*
+ * Initialize and configure the Terminator HW modules. Note that external
+ * MAC and PHYs are initialized separately.
+ */
+int t1_init_hw_modules(adapter_t *adapter)
+{
+ int err = -EIO;
+ const struct board_info *bi = board_info(adapter);
+
+ if (!bi->clock_mc4) {
+ u32 val = readl(adapter->regs + A_MC4_CFG);
+
+ writel(val | F_READY | F_MC4_SLOW, adapter->regs + A_MC4_CFG);
+ writel(F_M_BUS_ENABLE | F_TCAM_RESET,
+ adapter->regs + A_MC5_CONFIG);
+ }
+
+ if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac,
+ bi->espi_nports))
+ goto out_err;
+
+ if (t1_tp_reset(adapter->tp, &adapter->params.tp, bi->clock_core))
+ goto out_err;
+
+ err = t1_sge_configure(adapter->sge, &adapter->params.sge);
+ if (err)
+ goto out_err;
+
+ err = 0;
+out_err:
+ return err;
+}
+
+/*
+ * Determine a card's PCI mode.
+ */
+static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p)
+{
+ static const unsigned short speed_map[] = { 33, 66, 100, 133 };
+ u32 pci_mode;
+
+ pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode);
+ p->speed = speed_map[G_PCI_MODE_CLK(pci_mode)];
+ p->width = (pci_mode & F_PCI_MODE_64BIT) ? 64 : 32;
+ p->is_pcix = (pci_mode & F_PCI_MODE_PCIX) != 0;
+}
+
+/*
+ * Release the structures holding the SW per-Terminator-HW-module state.
+ */
+void t1_free_sw_modules(adapter_t *adapter)
+{
+ unsigned int i;
+
+ for_each_port(adapter, i) {
+ struct cmac *mac = adapter->port[i].mac;
+ struct cphy *phy = adapter->port[i].phy;
+
+ if (mac)
+ mac->ops->destroy(mac);
+ if (phy)
+ phy->ops->destroy(phy);
+ }
+
+ if (adapter->sge)
+ t1_sge_destroy(adapter->sge);
+ if (adapter->tp)
+ t1_tp_destroy(adapter->tp);
+ if (adapter->espi)
+ t1_espi_destroy(adapter->espi);
+}
+
+static void __devinit init_link_config(struct link_config *lc,
+ const struct board_info *bi)
+{
+ lc->supported = bi->caps;
+ lc->requested_speed = lc->speed = SPEED_INVALID;
+ lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
+ lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+ if (lc->supported & SUPPORTED_Autoneg) {
+ lc->advertising = lc->supported;
+ lc->autoneg = AUTONEG_ENABLE;
+ lc->requested_fc |= PAUSE_AUTONEG;
+ } else {
+ lc->advertising = 0;
+ lc->autoneg = AUTONEG_DISABLE;
+ }
+}
+
+/*
+ * Allocate and initialize the data structures that hold the SW state of
+ * the Terminator HW modules.
+ */
+int __devinit t1_init_sw_modules(adapter_t *adapter,
+ const struct board_info *bi)
+{
+ unsigned int i;
+
+ adapter->params.brd_info = bi;
+ adapter->params.nports = bi->port_number;
+ adapter->params.stats_update_period = bi->gmac->stats_update_period;
+
+ adapter->sge = t1_sge_create(adapter, &adapter->params.sge);
+ if (!adapter->sge) {
+ pr_err("%s: SGE initialization failed\n",
+ adapter->name);
+ goto error;
+ }
+
+ if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) {
+ pr_err("%s: ESPI initialization failed\n",
+ adapter->name);
+ goto error;
+ }
+
+ adapter->tp = t1_tp_create(adapter, &adapter->params.tp);
+ if (!adapter->tp) {
+ pr_err("%s: TP initialization failed\n",
+ adapter->name);
+ goto error;
+ }
+
+ board_init(adapter, bi);
+ bi->mdio_ops->init(adapter, bi);
+ if (bi->gphy->reset)
+ bi->gphy->reset(adapter);
+ if (bi->gmac->reset)
+ bi->gmac->reset(adapter);
+
+ for_each_port(adapter, i) {
+ u8 hw_addr[6];
+ struct cmac *mac;
+ int phy_addr = bi->mdio_phybaseaddr + i;
+
+ adapter->port[i].phy = bi->gphy->create(adapter->port[i].dev,
+ phy_addr, bi->mdio_ops);
+ if (!adapter->port[i].phy) {
+ pr_err("%s: PHY %d initialization failed\n",
+ adapter->name, i);
+ goto error;
+ }
+
+ adapter->port[i].mac = mac = bi->gmac->create(adapter, i);
+ if (!mac) {
+ pr_err("%s: MAC %d initialization failed\n",
+ adapter->name, i);
+ goto error;
+ }
+
+ /*
+ * Get the port's MAC addresses either from the EEPROM if one
+ * exists or the one hardcoded in the MAC.
+ */
+ if (!t1_is_asic(adapter) || bi->chip_mac == CHBT_MAC_DUMMY)
+ mac->ops->macaddress_get(mac, hw_addr);
+ else if (vpd_macaddress_get(adapter, i, hw_addr)) {
+ pr_err("%s: could not read MAC address from VPD ROM\n",
+ adapter->port[i].dev->name);
+ goto error;
+ }
+ memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN);
+ init_link_config(&adapter->port[i].link_config, bi);
+ }
+
+ get_pci_mode(adapter, &adapter->params.pci);
+ t1_interrupts_clear(adapter);
+ return 0;
+
+error:
+ t1_free_sw_modules(adapter);
+ return -1;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
new file mode 100644
index 000000000000..d0f87d82566a
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
@@ -0,0 +1,1643 @@
+/*****************************************************************************
+ * *
+ * File: suni1x10gexp_regs.h *
+ * $Revision: 1.9 $ *
+ * $Date: 2005/06/22 00:17:04 $ *
+ * Description: *
+ * PMC/SIERRA (pm3393) MAC-PHY functionality. *
+ * part of the Chelsio 10Gb Ethernet Driver. *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License, version 2, as *
+ * published by the Free Software Foundation. *
+ * *
+ * You should have received a copy of the GNU General Public License along *
+ * with this program; if not, write to the Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
+ * *
+ * http://www.chelsio.com *
+ * *
+ * Maintainers: maintainers@chelsio.com *
+ * *
+ * Authors: PMC/SIERRA *
+ * *
+ * History: *
+ * *
+ ****************************************************************************/
+
+#ifndef _CXGB_SUNI1x10GEXP_REGS_H_
+#define _CXGB_SUNI1x10GEXP_REGS_H_
+
+/*
+** Space allocated for each Exact Match Filter
+** There are 8 filter configurations
+*/
+#define SUNI1x10GEXP_REG_SIZEOF_MAC_FILTER 0x0003
+
+#define mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId) ( (filterId) * SUNI1x10GEXP_REG_SIZEOF_MAC_FILTER )
+
+/*
+** Space allocated for VLAN-Id Filter
+** There are 8 filter configurations
+*/
+#define SUNI1x10GEXP_REG_SIZEOF_MAC_VID_FILTER 0x0001
+
+#define mSUNI1x10GEXP_MAC_VID_FILTER_OFFSET(filterId) ( (filterId) * SUNI1x10GEXP_REG_SIZEOF_MAC_VID_FILTER )
+
+/*
+** Space allocated for each MSTAT Counter
+*/
+#define SUNI1x10GEXP_REG_SIZEOF_MSTAT_COUNT 0x0004
+
+#define mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId) ( (countId) * SUNI1x10GEXP_REG_SIZEOF_MSTAT_COUNT )
+
+
+/******************************************************************************/
+/** S/UNI-1x10GE-XP REGISTER ADDRESS MAP **/
+/******************************************************************************/
+/* Refer to the Register Bit Masks bellow for the naming of each register and */
+/* to the S/UNI-1x10GE-XP Data Sheet for the signification of each bit */
+/******************************************************************************/
+
+
+#define SUNI1x10GEXP_REG_IDENTIFICATION 0x0000
+#define SUNI1x10GEXP_REG_PRODUCT_REVISION 0x0001
+#define SUNI1x10GEXP_REG_CONFIG_AND_RESET_CONTROL 0x0002
+#define SUNI1x10GEXP_REG_LOOPBACK_MISC_CTRL 0x0003
+#define SUNI1x10GEXP_REG_DEVICE_STATUS 0x0004
+#define SUNI1x10GEXP_REG_GLOBAL_PERFORMANCE_MONITOR_UPDATE 0x0005
+
+#define SUNI1x10GEXP_REG_MDIO_COMMAND 0x0006
+#define SUNI1x10GEXP_REG_MDIO_INTERRUPT_ENABLE 0x0007
+#define SUNI1x10GEXP_REG_MDIO_INTERRUPT_STATUS 0x0008
+#define SUNI1x10GEXP_REG_MMD_PHY_ADDRESS 0x0009
+#define SUNI1x10GEXP_REG_MMD_CONTROL_ADDRESS_DATA 0x000A
+#define SUNI1x10GEXP_REG_MDIO_READ_STATUS_DATA 0x000B
+
+#define SUNI1x10GEXP_REG_OAM_INTF_CTRL 0x000C
+#define SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS 0x000D
+#define SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE 0x000E
+#define SUNI1x10GEXP_REG_FREE 0x000F
+
+#define SUNI1x10GEXP_REG_XTEF_MISC_CTRL 0x0010
+#define SUNI1x10GEXP_REG_XRF_MISC_CTRL 0x0011
+
+#define SUNI1x10GEXP_REG_SERDES_3125_CONFIG_1 0x0100
+#define SUNI1x10GEXP_REG_SERDES_3125_CONFIG_2 0x0101
+#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE 0x0102
+#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_VISIBLE 0x0103
+#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS 0x0104
+#define SUNI1x10GEXP_REG_SERDES_3125_TEST_CONFIG 0x0107
+
+#define SUNI1x10GEXP_REG_RXXG_CONFIG_1 0x2040
+#define SUNI1x10GEXP_REG_RXXG_CONFIG_2 0x2041
+#define SUNI1x10GEXP_REG_RXXG_CONFIG_3 0x2042
+#define SUNI1x10GEXP_REG_RXXG_INTERRUPT 0x2043
+#define SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH 0x2045
+#define SUNI1x10GEXP_REG_RXXG_SA_15_0 0x2046
+#define SUNI1x10GEXP_REG_RXXG_SA_31_16 0x2047
+#define SUNI1x10GEXP_REG_RXXG_SA_47_32 0x2048
+#define SUNI1x10GEXP_REG_RXXG_RECEIVE_FIFO_THRESHOLD 0x2049
+#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_LOW(filterId) (0x204A + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId))
+#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_MID(filterId) (0x204B + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId))
+#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_HIGH(filterId)(0x204C + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId))
+#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID(filterId) (0x2062 + mSUNI1x10GEXP_MAC_VID_FILTER_OFFSET(filterId))
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_LOW 0x204A
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_MID 0x204B
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_HIGH 0x204C
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW 0x204D
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID 0x204E
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH 0x204F
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_LOW 0x2050
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_MID 0x2051
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_HIGH 0x2052
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_LOW 0x2053
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_MID 0x2054
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_HIGH 0x2055
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_LOW 0x2056
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_MID 0x2057
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_HIGH 0x2058
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_LOW 0x2059
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_MID 0x205A
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_HIGH 0x205B
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_LOW 0x205C
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_MID 0x205D
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_HIGH 0x205E
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_LOW 0x205F
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_MID 0x2060
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_HIGH 0x2061
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_0 0x2062
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_1 0x2063
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_2 0x2064
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_3 0x2065
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_4 0x2066
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_5 0x2067
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_6 0x2068
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_7 0x2069
+#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW 0x206A
+#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW 0x206B
+#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH 0x206C
+#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH 0x206D
+#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0 0x206E
+#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_1 0x206F
+#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2 0x2070
+
+#define SUNI1x10GEXP_REG_XRF_PATTERN_GEN_CTRL 0x2081
+#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_0 0x2084
+#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_1 0x2085
+#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_2 0x2086
+#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_3 0x2087
+#define SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE 0x2088
+#define SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS 0x2089
+#define SUNI1x10GEXP_REG_XRF_ERR_STATUS 0x208A
+#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE 0x208B
+#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS 0x208C
+#define SUNI1x10GEXP_REG_XRF_CODE_ERR_THRES 0x2092
+
+#define SUNI1x10GEXP_REG_RXOAM_CONFIG 0x20C0
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_1_CONFIG 0x20C1
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_2_CONFIG 0x20C2
+#define SUNI1x10GEXP_REG_RXOAM_CONFIG_2 0x20C3
+#define SUNI1x10GEXP_REG_RXOAM_HEC_CONFIG 0x20C4
+#define SUNI1x10GEXP_REG_RXOAM_HEC_ERR_THRES 0x20C5
+#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE 0x20C7
+#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS 0x20C8
+#define SUNI1x10GEXP_REG_RXOAM_STATUS 0x20C9
+#define SUNI1x10GEXP_REG_RXOAM_HEC_ERR_COUNT 0x20CA
+#define SUNI1x10GEXP_REG_RXOAM_FIFO_OVERFLOW_COUNT 0x20CB
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_COUNT_LSB 0x20CC
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_COUNT_MSB 0x20CD
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_1_MISMATCH_COUNT_LSB 0x20CE
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_1_MISMATCH_COUNT_MSB 0x20CF
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_2_MISMATCH_COUNT_LSB 0x20D0
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_2_MISMATCH_COUNT_MSB 0x20D1
+#define SUNI1x10GEXP_REG_RXOAM_OAM_EXTRACT_COUNT_LSB 0x20D2
+#define SUNI1x10GEXP_REG_RXOAM_OAM_EXTRACT_COUNT_MSB 0x20D3
+#define SUNI1x10GEXP_REG_RXOAM_MINI_PACKET_COUNT_LSB 0x20D4
+#define SUNI1x10GEXP_REG_RXOAM_MINI_PACKET_COUNT_MSB 0x20D5
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_THRES_LSB 0x20D6
+#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_THRES_MSB 0x20D7
+
+#define SUNI1x10GEXP_REG_MSTAT_CONTROL 0x2100
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0 0x2101
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1 0x2102
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2 0x2103
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3 0x2104
+#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0 0x2105
+#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1 0x2106
+#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2 0x2107
+#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3 0x2108
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_ADDRESS 0x2109
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_LOW 0x210A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_MIDDLE 0x210B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_HIGH 0x210C
+#define mSUNI1x10GEXP_REG_MSTAT_COUNTER_LOW(countId) (0x2110 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId))
+#define mSUNI1x10GEXP_REG_MSTAT_COUNTER_MID(countId) (0x2111 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId))
+#define mSUNI1x10GEXP_REG_MSTAT_COUNTER_HIGH(countId) (0x2112 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId))
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW 0x2110
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_MID 0x2111
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_HIGH 0x2112
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_RESVD 0x2113
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW 0x2114
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_MID 0x2115
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_HIGH 0x2116
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_RESVD 0x2117
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_LOW 0x2118
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_MID 0x2119
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_HIGH 0x211A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_RESVD 0x211B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_LOW 0x211C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_MID 0x211D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_HIGH 0x211E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_RESVD 0x211F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW 0x2120
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_MID 0x2121
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_HIGH 0x2122
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_RESVD 0x2123
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW 0x2124
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_MID 0x2125
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_HIGH 0x2126
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_RESVD 0x2127
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW 0x2128
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_MID 0x2129
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_HIGH 0x212A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_RESVD 0x212B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_LOW 0x212C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_MID 0x212D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_HIGH 0x212E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_RESVD 0x212F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW 0x2130
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_MID 0x2131
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_HIGH 0x2132
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_RESVD 0x2133
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_LOW 0x2134
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_MID 0x2135
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_HIGH 0x2136
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_RESVD 0x2137
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW 0x2138
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_MID 0x2139
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_HIGH 0x213A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_RESVD 0x213B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW 0x213C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_MID 0x213D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_HIGH 0x213E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_RESVD 0x213F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW 0x2140
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_MID 0x2141
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_HIGH 0x2142
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_RESVD 0x2143
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW 0x2144
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_MID 0x2145
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_HIGH 0x2146
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_RESVD 0x2147
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_LOW 0x2148
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_MID 0x2149
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_HIGH 0x214A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_RESVD 0x214B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW 0x214C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_MID 0x214D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_HIGH 0x214E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_RESVD 0x214F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW 0x2150
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_MID 0x2151
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_HIGH 0x2152
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_RESVD 0x2153
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW 0x2154
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_MID 0x2155
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_HIGH 0x2156
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_RESVD 0x2157
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW 0x2158
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_MID 0x2159
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_HIGH 0x215A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_RESVD 0x215B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_LOW 0x215C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_MID 0x215D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_HIGH 0x215E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_RESVD 0x215F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_LOW 0x2160
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_MID 0x2161
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_HIGH 0x2162
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_RESVD 0x2163
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_LOW 0x2164
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_MID 0x2165
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_HIGH 0x2166
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_RESVD 0x2167
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_LOW 0x2168
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_MID 0x2169
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_HIGH 0x216A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_RESVD 0x216B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_LOW 0x216C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_MID 0x216D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_HIGH 0x216E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_RESVD 0x216F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_LOW 0x2170
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_MID 0x2171
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_HIGH 0x2172
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_RESVD 0x2173
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_LOW 0x2174
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_MID 0x2175
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_HIGH 0x2176
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_RESVD 0x2177
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_LOW 0x2178
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_MID 0x2179
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_HIGH 0x217a
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_RESVD 0x217b
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_LOW 0x217c
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_MID 0x217d
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_HIGH 0x217e
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_RESVD 0x217f
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_LOW 0x2180
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_MID 0x2181
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_HIGH 0x2182
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_RESVD 0x2183
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_LOW 0x2184
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_MID 0x2185
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_HIGH 0x2186
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_RESVD 0x2187
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_LOW 0x2188
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_MID 0x2189
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_HIGH 0x218A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_RESVD 0x218B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_LOW 0x218C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_MID 0x218D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_HIGH 0x218E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_RESVD 0x218F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_LOW 0x2190
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_MID 0x2191
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_HIGH 0x2192
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_RESVD 0x2193
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW 0x2194
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_MID 0x2195
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_HIGH 0x2196
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_RESVD 0x2197
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_LOW 0x2198
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_MID 0x2199
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_HIGH 0x219A
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_RESVD 0x219B
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW 0x219C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_MID 0x219D
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_HIGH 0x219E
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_RESVD 0x219F
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW 0x21A0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_MID 0x21A1
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_HIGH 0x21A2
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_RESVD 0x21A3
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_LOW 0x21A4
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_MID 0x21A5
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_HIGH 0x21A6
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_RESVD 0x21A7
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW 0x21A8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_MID 0x21A9
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_HIGH 0x21AA
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_RESVD 0x21AB
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_LOW 0x21AC
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_MID 0x21AD
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_HIGH 0x21AE
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_RESVD 0x21AF
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW 0x21B0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_MID 0x21B1
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_HIGH 0x21B2
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_RESVD 0x21B3
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_LOW 0x21B4
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_MID 0x21B5
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_HIGH 0x21B6
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_RESVD 0x21B7
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW 0x21B8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_MID 0x21B9
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_HIGH 0x21BA
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_RESVD 0x21BB
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW 0x21BC
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_MID 0x21BD
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_HIGH 0x21BE
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_RESVD 0x21BF
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_LOW 0x21C0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_MID 0x21C1
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_HIGH 0x21C2
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_RESVD 0x21C3
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_LOW 0x21C4
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_MID 0x21C5
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_HIGH 0x21C6
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_RESVD 0x21C7
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_LOW 0x21C8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_MID 0x21C9
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_HIGH 0x21CA
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_RESVD 0x21CB
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_LOW 0x21CC
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_MID 0x21CD
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_HIGH 0x21CE
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_RESVD 0x21CF
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_LOW 0x21D0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_MID 0x21D1
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_HIGH 0x21D2
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_RESVD 0x21D3
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_LOW 0x21D4
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_MID 0x21D5
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_HIGH 0x21D6
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_RESVD 0x21D7
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_LOW 0x21D8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_MID 0x21D9
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_HIGH 0x21DA
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_RESVD 0x21DB
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_LOW 0x21DC
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_MID 0x21DD
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_HIGH 0x21DE
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_RESVD 0x21DF
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_LOW 0x21E0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_MID 0x21E1
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_HIGH 0x21E2
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_RESVD 0x21E3
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_LOW 0x21E4
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_MID 0x21E5
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_HIGH 0x21E6
+#define SUNI1x10GEXP_CNTR_MAC_ETHERNET_NUM 51
+
+#define SUNI1x10GEXP_REG_IFLX_GLOBAL_CONFIG 0x2200
+#define SUNI1x10GEXP_REG_IFLX_CHANNEL_PROVISION 0x2201
+#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE 0x2209
+#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT 0x220A
+#define SUNI1x10GEXP_REG_IFLX_INDIR_CHANNEL_ADDRESS 0x220D
+#define SUNI1x10GEXP_REG_IFLX_INDIR_LOGICAL_FIFO_LOW_LIMIT_PROVISION 0x220E
+#define SUNI1x10GEXP_REG_IFLX_INDIR_LOGICAL_FIFO_HIGH_LIMIT 0x220F
+#define SUNI1x10GEXP_REG_IFLX_INDIR_FULL_ALMOST_FULL_STATUS_LIMIT 0x2210
+#define SUNI1x10GEXP_REG_IFLX_INDIR_EMPTY_ALMOST_EMPTY_STATUS_LIMIT 0x2211
+
+#define SUNI1x10GEXP_REG_PL4MOS_CONFIG 0x2240
+#define SUNI1x10GEXP_REG_PL4MOS_MASK 0x2241
+#define SUNI1x10GEXP_REG_PL4MOS_FAIRNESS_MASKING 0x2242
+#define SUNI1x10GEXP_REG_PL4MOS_MAXBURST1 0x2243
+#define SUNI1x10GEXP_REG_PL4MOS_MAXBURST2 0x2244
+#define SUNI1x10GEXP_REG_PL4MOS_TRANSFER_SIZE 0x2245
+
+#define SUNI1x10GEXP_REG_PL4ODP_CONFIG 0x2280
+#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK 0x2282
+#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT 0x2283
+#define SUNI1x10GEXP_REG_PL4ODP_CONFIG_MAX_T 0x2284
+
+#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS 0x2300
+#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE 0x2301
+#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK 0x2302
+#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_LIMITS 0x2303
+#define SUNI1x10GEXP_REG_PL4IO_CALENDAR_REPETITIONS 0x2304
+#define SUNI1x10GEXP_REG_PL4IO_CONFIG 0x2305
+
+#define SUNI1x10GEXP_REG_TXXG_CONFIG_1 0x3040
+#define SUNI1x10GEXP_REG_TXXG_CONFIG_2 0x3041
+#define SUNI1x10GEXP_REG_TXXG_CONFIG_3 0x3042
+#define SUNI1x10GEXP_REG_TXXG_INTERRUPT 0x3043
+#define SUNI1x10GEXP_REG_TXXG_STATUS 0x3044
+#define SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE 0x3045
+#define SUNI1x10GEXP_REG_TXXG_MIN_FRAME_SIZE 0x3046
+#define SUNI1x10GEXP_REG_TXXG_SA_15_0 0x3047
+#define SUNI1x10GEXP_REG_TXXG_SA_31_16 0x3048
+#define SUNI1x10GEXP_REG_TXXG_SA_47_32 0x3049
+#define SUNI1x10GEXP_REG_TXXG_PAUSE_TIMER 0x304D
+#define SUNI1x10GEXP_REG_TXXG_PAUSE_TIMER_INTERVAL 0x304E
+#define SUNI1x10GEXP_REG_TXXG_FILTER_ERROR_COUNTER 0x3051
+#define SUNI1x10GEXP_REG_TXXG_PAUSE_QUANTUM_CONFIG 0x3052
+
+#define SUNI1x10GEXP_REG_XTEF_CTRL 0x3080
+#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS 0x3084
+#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE 0x3085
+#define SUNI1x10GEXP_REG_XTEF_VISIBILITY 0x3086
+
+#define SUNI1x10GEXP_REG_TXOAM_OAM_CONFIG 0x30C0
+#define SUNI1x10GEXP_REG_TXOAM_MINI_RATE_CONFIG 0x30C1
+#define SUNI1x10GEXP_REG_TXOAM_MINI_GAP_FIFO_CONFIG 0x30C2
+#define SUNI1x10GEXP_REG_TXOAM_P1P2_STATIC_VALUES 0x30C3
+#define SUNI1x10GEXP_REG_TXOAM_P3P4_STATIC_VALUES 0x30C4
+#define SUNI1x10GEXP_REG_TXOAM_P5P6_STATIC_VALUES 0x30C5
+#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE 0x30C6
+#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS 0x30C7
+#define SUNI1x10GEXP_REG_TXOAM_INSERT_COUNT_LSB 0x30C8
+#define SUNI1x10GEXP_REG_TXOAM_INSERT_COUNT_MSB 0x30C9
+#define SUNI1x10GEXP_REG_TXOAM_OAM_MINI_COUNT_LSB 0x30CA
+#define SUNI1x10GEXP_REG_TXOAM_OAM_MINI_COUNT_MSB 0x30CB
+#define SUNI1x10GEXP_REG_TXOAM_P1P2_MINI_MASK 0x30CC
+#define SUNI1x10GEXP_REG_TXOAM_P3P4_MINI_MASK 0x30CD
+#define SUNI1x10GEXP_REG_TXOAM_P5P6_MINI_MASK 0x30CE
+#define SUNI1x10GEXP_REG_TXOAM_COSET 0x30CF
+#define SUNI1x10GEXP_REG_TXOAM_EMPTY_FIFO_INS_OP_CNT_LSB 0x30D0
+#define SUNI1x10GEXP_REG_TXOAM_EMPTY_FIFO_INS_OP_CNT_MSB 0x30D1
+#define SUNI1x10GEXP_REG_TXOAM_STATIC_VALUE_MINI_COUNT_LSB 0x30D2
+#define SUNI1x10GEXP_REG_TXOAM_STATIC_VALUE_MINI_COUNT_MSB 0x30D3
+
+
+#define SUNI1x10GEXP_REG_EFLX_GLOBAL_CONFIG 0x3200
+#define SUNI1x10GEXP_REG_EFLX_ERCU_GLOBAL_STATUS 0x3201
+#define SUNI1x10GEXP_REG_EFLX_INDIR_CHANNEL_ADDRESS 0x3202
+#define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_LOW_LIMIT 0x3203
+#define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_HIGH_LIMIT 0x3204
+#define SUNI1x10GEXP_REG_EFLX_INDIR_FULL_ALMOST_FULL_STATUS_AND_LIMIT 0x3205
+#define SUNI1x10GEXP_REG_EFLX_INDIR_EMPTY_ALMOST_EMPTY_STATUS_AND_LIMIT 0x3206
+#define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_CUT_THROUGH_THRESHOLD 0x3207
+#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE 0x320C
+#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION 0x320D
+#define SUNI1x10GEXP_REG_EFLX_CHANNEL_PROVISION 0x3210
+
+#define SUNI1x10GEXP_REG_PL4IDU_CONFIG 0x3280
+#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK 0x3282
+#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT 0x3283
+
+
+/*----------------------------------------*/
+#define SUNI1x10GEXP_REG_MAX_OFFSET 0x3480
+
+/******************************************************************************/
+/* -- End register offset definitions -- */
+/******************************************************************************/
+
+/******************************************************************************/
+/** SUNI-1x10GE-XP REGISTER BIT MASKS **/
+/******************************************************************************/
+
+#define SUNI1x10GEXP_BITMSK_BITS_1 0x00001
+#define SUNI1x10GEXP_BITMSK_BITS_2 0x00003
+#define SUNI1x10GEXP_BITMSK_BITS_3 0x00007
+#define SUNI1x10GEXP_BITMSK_BITS_4 0x0000f
+#define SUNI1x10GEXP_BITMSK_BITS_5 0x0001f
+#define SUNI1x10GEXP_BITMSK_BITS_6 0x0003f
+#define SUNI1x10GEXP_BITMSK_BITS_7 0x0007f
+#define SUNI1x10GEXP_BITMSK_BITS_8 0x000ff
+#define SUNI1x10GEXP_BITMSK_BITS_9 0x001ff
+#define SUNI1x10GEXP_BITMSK_BITS_10 0x003ff
+#define SUNI1x10GEXP_BITMSK_BITS_11 0x007ff
+#define SUNI1x10GEXP_BITMSK_BITS_12 0x00fff
+#define SUNI1x10GEXP_BITMSK_BITS_13 0x01fff
+#define SUNI1x10GEXP_BITMSK_BITS_14 0x03fff
+#define SUNI1x10GEXP_BITMSK_BITS_15 0x07fff
+#define SUNI1x10GEXP_BITMSK_BITS_16 0x0ffff
+
+#define mSUNI1x10GEXP_CLR_MSBITS_1(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_15)
+#define mSUNI1x10GEXP_CLR_MSBITS_2(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_14)
+#define mSUNI1x10GEXP_CLR_MSBITS_3(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_13)
+#define mSUNI1x10GEXP_CLR_MSBITS_4(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_12)
+#define mSUNI1x10GEXP_CLR_MSBITS_5(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_11)
+#define mSUNI1x10GEXP_CLR_MSBITS_6(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_10)
+#define mSUNI1x10GEXP_CLR_MSBITS_7(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_9)
+#define mSUNI1x10GEXP_CLR_MSBITS_8(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_8)
+#define mSUNI1x10GEXP_CLR_MSBITS_9(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_7)
+#define mSUNI1x10GEXP_CLR_MSBITS_10(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_6)
+#define mSUNI1x10GEXP_CLR_MSBITS_11(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_5)
+#define mSUNI1x10GEXP_CLR_MSBITS_12(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_4)
+#define mSUNI1x10GEXP_CLR_MSBITS_13(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_3)
+#define mSUNI1x10GEXP_CLR_MSBITS_14(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_2)
+#define mSUNI1x10GEXP_CLR_MSBITS_15(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_1)
+
+#define mSUNI1x10GEXP_GET_BIT(val, bitMsk) (((val)&(bitMsk)) ? 1:0)
+
+
+
+/*----------------------------------------------------------------------------
+ * Register 0x0001: S/UNI-1x10GE-XP Product Revision
+ * Bit 3-0 REVISION
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_REVISION 0x000F
+
+/*----------------------------------------------------------------------------
+ * Register 0x0002: S/UNI-1x10GE-XP Configuration and Reset Control
+ * Bit 2 XAUI_ARESETB
+ * Bit 1 PL4_ARESETB
+ * Bit 0 DRESETB
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_XAUI_ARESET 0x0004
+#define SUNI1x10GEXP_BITMSK_PL4_ARESET 0x0002
+#define SUNI1x10GEXP_BITMSK_DRESETB 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0003: S/UNI-1x10GE-XP Loop Back and Miscellaneous Control
+ * Bit 11 PL4IO_OUTCLKSEL
+ * Bit 9 SYSPCSLB
+ * Bit 8 LINEPCSLB
+ * Bit 7 MSTAT_BYPASS
+ * Bit 6 RXXG_BYPASS
+ * Bit 5 TXXG_BYPASS
+ * Bit 4 SOP_PAD_EN
+ * Bit 1 LOS_INV
+ * Bit 0 OVERRIDE_LOS
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUTCLKSEL 0x0800
+#define SUNI1x10GEXP_BITMSK_SYSPCSLB 0x0200
+#define SUNI1x10GEXP_BITMSK_LINEPCSLB 0x0100
+#define SUNI1x10GEXP_BITMSK_MSTAT_BYPASS 0x0080
+#define SUNI1x10GEXP_BITMSK_RXXG_BYPASS 0x0040
+#define SUNI1x10GEXP_BITMSK_TXXG_BYPASS 0x0020
+#define SUNI1x10GEXP_BITMSK_SOP_PAD_EN 0x0010
+#define SUNI1x10GEXP_BITMSK_LOS_INV 0x0002
+#define SUNI1x10GEXP_BITMSK_OVERRIDE_LOS 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0004: S/UNI-1x10GE-XP Device Status
+ * Bit 9 TOP_SXRA_EXPIRED
+ * Bit 8 TOP_MDIO_BUSY
+ * Bit 7 TOP_DTRB
+ * Bit 6 TOP_EXPIRED
+ * Bit 5 TOP_PAUSED
+ * Bit 4 TOP_PL4_ID_DOOL
+ * Bit 3 TOP_PL4_IS_DOOL
+ * Bit 2 TOP_PL4_ID_ROOL
+ * Bit 1 TOP_PL4_IS_ROOL
+ * Bit 0 TOP_PL4_OUT_ROOL
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED 0x0200
+#define SUNI1x10GEXP_BITMSK_TOP_MDIO_BUSY 0x0100
+#define SUNI1x10GEXP_BITMSK_TOP_DTRB 0x0080
+#define SUNI1x10GEXP_BITMSK_TOP_EXPIRED 0x0040
+#define SUNI1x10GEXP_BITMSK_TOP_PAUSED 0x0020
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL 0x0010
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL 0x0008
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL 0x0004
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL 0x0002
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0005: Global Performance Update and Clock Monitors
+ * Bit 15 TIP
+ * Bit 8 XAUI_REF_CLKA
+ * Bit 7 RXLANE3CLKA
+ * Bit 6 RXLANE2CLKA
+ * Bit 5 RXLANE1CLKA
+ * Bit 4 RXLANE0CLKA
+ * Bit 3 CSUCLKA
+ * Bit 2 TDCLKA
+ * Bit 1 RSCLKA
+ * Bit 0 RDCLKA
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TIP 0x8000
+#define SUNI1x10GEXP_BITMSK_XAUI_REF_CLKA 0x0100
+#define SUNI1x10GEXP_BITMSK_RXLANE3CLKA 0x0080
+#define SUNI1x10GEXP_BITMSK_RXLANE2CLKA 0x0040
+#define SUNI1x10GEXP_BITMSK_RXLANE1CLKA 0x0020
+#define SUNI1x10GEXP_BITMSK_RXLANE0CLKA 0x0010
+#define SUNI1x10GEXP_BITMSK_CSUCLKA 0x0008
+#define SUNI1x10GEXP_BITMSK_TDCLKA 0x0004
+#define SUNI1x10GEXP_BITMSK_RSCLKA 0x0002
+#define SUNI1x10GEXP_BITMSK_RDCLKA 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0006: MDIO Command
+ * Bit 4 MDIO_RDINC
+ * Bit 3 MDIO_RSTAT
+ * Bit 2 MDIO_LCTLD
+ * Bit 1 MDIO_LCTLA
+ * Bit 0 MDIO_SPRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MDIO_RDINC 0x0010
+#define SUNI1x10GEXP_BITMSK_MDIO_RSTAT 0x0008
+#define SUNI1x10GEXP_BITMSK_MDIO_LCTLD 0x0004
+#define SUNI1x10GEXP_BITMSK_MDIO_LCTLA 0x0002
+#define SUNI1x10GEXP_BITMSK_MDIO_SPRE 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0007: MDIO Interrupt Enable
+ * Bit 0 MDIO_BUSY_EN
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MDIO_BUSY_EN 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0008: MDIO Interrupt Status
+ * Bit 0 MDIO_BUSYI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MDIO_BUSYI 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0009: MMD PHY Address
+ * Bit 12-8 MDIO_DEVADR
+ * Bit 4-0 MDIO_PRTADR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MDIO_DEVADR 0x1F00
+#define SUNI1x10GEXP_BITOFF_MDIO_DEVADR 8
+#define SUNI1x10GEXP_BITMSK_MDIO_PRTADR 0x001F
+#define SUNI1x10GEXP_BITOFF_MDIO_PRTADR 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x000C: OAM Interface Control
+ * Bit 6 MDO_OD_ENB
+ * Bit 5 MDI_INV
+ * Bit 4 MDI_SEL
+ * Bit 3 RXOAMEN
+ * Bit 2 RXOAMCLKEN
+ * Bit 1 TXOAMEN
+ * Bit 0 TXOAMCLKEN
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MDO_OD_ENB 0x0040
+#define SUNI1x10GEXP_BITMSK_MDI_INV 0x0020
+#define SUNI1x10GEXP_BITMSK_MDI_SEL 0x0010
+#define SUNI1x10GEXP_BITMSK_RXOAMEN 0x0008
+#define SUNI1x10GEXP_BITMSK_RXOAMCLKEN 0x0004
+#define SUNI1x10GEXP_BITMSK_TXOAMEN 0x0002
+#define SUNI1x10GEXP_BITMSK_TXOAMCLKEN 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x000D: S/UNI-1x10GE-XP Master Interrupt Status
+ * Bit 15 TOP_PL4IO_INT
+ * Bit 14 TOP_IRAM_INT
+ * Bit 13 TOP_ERAM_INT
+ * Bit 12 TOP_XAUI_INT
+ * Bit 11 TOP_MSTAT_INT
+ * Bit 10 TOP_RXXG_INT
+ * Bit 9 TOP_TXXG_INT
+ * Bit 8 TOP_XRF_INT
+ * Bit 7 TOP_XTEF_INT
+ * Bit 6 TOP_MDIO_BUSY_INT
+ * Bit 5 TOP_RXOAM_INT
+ * Bit 4 TOP_TXOAM_INT
+ * Bit 3 TOP_IFLX_INT
+ * Bit 2 TOP_EFLX_INT
+ * Bit 1 TOP_PL4ODP_INT
+ * Bit 0 TOP_PL4IDU_INT
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TOP_PL4IO_INT 0x8000
+#define SUNI1x10GEXP_BITMSK_TOP_IRAM_INT 0x4000
+#define SUNI1x10GEXP_BITMSK_TOP_ERAM_INT 0x2000
+#define SUNI1x10GEXP_BITMSK_TOP_XAUI_INT 0x1000
+#define SUNI1x10GEXP_BITMSK_TOP_MSTAT_INT 0x0800
+#define SUNI1x10GEXP_BITMSK_TOP_RXXG_INT 0x0400
+#define SUNI1x10GEXP_BITMSK_TOP_TXXG_INT 0x0200
+#define SUNI1x10GEXP_BITMSK_TOP_XRF_INT 0x0100
+#define SUNI1x10GEXP_BITMSK_TOP_XTEF_INT 0x0080
+#define SUNI1x10GEXP_BITMSK_TOP_MDIO_BUSY_INT 0x0040
+#define SUNI1x10GEXP_BITMSK_TOP_RXOAM_INT 0x0020
+#define SUNI1x10GEXP_BITMSK_TOP_TXOAM_INT 0x0010
+#define SUNI1x10GEXP_BITMSK_TOP_IFLX_INT 0x0008
+#define SUNI1x10GEXP_BITMSK_TOP_EFLX_INT 0x0004
+#define SUNI1x10GEXP_BITMSK_TOP_PL4ODP_INT 0x0002
+#define SUNI1x10GEXP_BITMSK_TOP_PL4IDU_INT 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x000E:PM3393 Global interrupt enable
+ * Bit 15 TOP_INTE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TOP_INTE 0x8000
+
+/*----------------------------------------------------------------------------
+ * Register 0x0010: XTEF Miscellaneous Control
+ * Bit 7 RF_VAL
+ * Bit 6 RF_OVERRIDE
+ * Bit 5 LF_VAL
+ * Bit 4 LF_OVERRIDE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RF_VAL 0x0080
+#define SUNI1x10GEXP_BITMSK_RF_OVERRIDE 0x0040
+#define SUNI1x10GEXP_BITMSK_LF_VAL 0x0020
+#define SUNI1x10GEXP_BITMSK_LF_OVERRIDE 0x0010
+#define SUNI1x10GEXP_BITMSK_LFRF_OVERRIDE_VAL 0x00F0
+
+/*----------------------------------------------------------------------------
+ * Register 0x0011: XRF Miscellaneous Control
+ * Bit 6-4 EN_IDLE_REP
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EN_IDLE_REP 0x0070
+
+/*----------------------------------------------------------------------------
+ * Register 0x0100: SERDES 3125 Configuration Register 1
+ * Bit 10 RXEQB_3
+ * Bit 8 RXEQB_2
+ * Bit 6 RXEQB_1
+ * Bit 4 RXEQB_0
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXEQB 0x0FF0
+#define SUNI1x10GEXP_BITOFF_RXEQB_3 10
+#define SUNI1x10GEXP_BITOFF_RXEQB_2 8
+#define SUNI1x10GEXP_BITOFF_RXEQB_1 6
+#define SUNI1x10GEXP_BITOFF_RXEQB_0 4
+
+/*----------------------------------------------------------------------------
+ * Register 0x0101: SERDES 3125 Configuration Register 2
+ * Bit 12 YSEL
+ * Bit 7 PRE_EMPH_3
+ * Bit 6 PRE_EMPH_2
+ * Bit 5 PRE_EMPH_1
+ * Bit 4 PRE_EMPH_0
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_YSEL 0x1000
+#define SUNI1x10GEXP_BITMSK_PRE_EMPH 0x00F0
+#define SUNI1x10GEXP_BITMSK_PRE_EMPH_3 0x0080
+#define SUNI1x10GEXP_BITMSK_PRE_EMPH_2 0x0040
+#define SUNI1x10GEXP_BITMSK_PRE_EMPH_1 0x0020
+#define SUNI1x10GEXP_BITMSK_PRE_EMPH_0 0x0010
+
+/*----------------------------------------------------------------------------
+ * Register 0x0102: SERDES 3125 Interrupt Enable Register
+ * Bit 3 LASIE
+ * Bit 2 SPLL_RAE
+ * Bit 1 MPLL_RAE
+ * Bit 0 PLL_LOCKE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LASIE 0x0008
+#define SUNI1x10GEXP_BITMSK_SPLL_RAE 0x0004
+#define SUNI1x10GEXP_BITMSK_MPLL_RAE 0x0002
+#define SUNI1x10GEXP_BITMSK_PLL_LOCKE 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0103: SERDES 3125 Interrupt Visibility Register
+ * Bit 3 LASIV
+ * Bit 2 SPLL_RAV
+ * Bit 1 MPLL_RAV
+ * Bit 0 PLL_LOCKV
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LASIV 0x0008
+#define SUNI1x10GEXP_BITMSK_SPLL_RAV 0x0004
+#define SUNI1x10GEXP_BITMSK_MPLL_RAV 0x0002
+#define SUNI1x10GEXP_BITMSK_PLL_LOCKV 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0104: SERDES 3125 Interrupt Status Register
+ * Bit 3 LASII
+ * Bit 2 SPLL_RAI
+ * Bit 1 MPLL_RAI
+ * Bit 0 PLL_LOCKI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LASII 0x0008
+#define SUNI1x10GEXP_BITMSK_SPLL_RAI 0x0004
+#define SUNI1x10GEXP_BITMSK_MPLL_RAI 0x0002
+#define SUNI1x10GEXP_BITMSK_PLL_LOCKI 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x0107: SERDES 3125 Test Configuration
+ * Bit 12 DUALTX
+ * Bit 10 HC_1
+ * Bit 9 HC_0
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_DUALTX 0x1000
+#define SUNI1x10GEXP_BITMSK_HC 0x0600
+#define SUNI1x10GEXP_BITOFF_HC_0 9
+
+/*----------------------------------------------------------------------------
+ * Register 0x2040: RXXG Configuration 1
+ * Bit 15 RXXG_RXEN
+ * Bit 14 RXXG_ROCF
+ * Bit 13 RXXG_PAD_STRIP
+ * Bit 10 RXXG_PUREP
+ * Bit 9 RXXG_LONGP
+ * Bit 8 RXXG_PARF
+ * Bit 7 RXXG_FLCHK
+ * Bit 5 RXXG_PASS_CTRL
+ * Bit 3 RXXG_CRC_STRIP
+ * Bit 2-0 RXXG_MIFG
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_RXEN 0x8000
+#define SUNI1x10GEXP_BITMSK_RXXG_ROCF 0x4000
+#define SUNI1x10GEXP_BITMSK_RXXG_PAD_STRIP 0x2000
+#define SUNI1x10GEXP_BITMSK_RXXG_PUREP 0x0400
+#define SUNI1x10GEXP_BITMSK_RXXG_LONGP 0x0200
+#define SUNI1x10GEXP_BITMSK_RXXG_PARF 0x0100
+#define SUNI1x10GEXP_BITMSK_RXXG_FLCHK 0x0080
+#define SUNI1x10GEXP_BITMSK_RXXG_PASS_CTRL 0x0020
+#define SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP 0x0008
+
+/*----------------------------------------------------------------------------
+ * Register 0x02041: RXXG Configuration 2
+ * Bit 7-0 RXXG_HDRSIZE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_HDRSIZE 0x00FF
+
+/*----------------------------------------------------------------------------
+ * Register 0x2042: RXXG Configuration 3
+ * Bit 15 RXXG_MIN_LERRE
+ * Bit 14 RXXG_MAX_LERRE
+ * Bit 12 RXXG_LINE_ERRE
+ * Bit 10 RXXG_RX_OVRE
+ * Bit 9 RXXG_ADR_FILTERE
+ * Bit 8 RXXG_ERR_FILTERE
+ * Bit 5 RXXG_PRMB_ERRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_MIN_LERRE 0x8000
+#define SUNI1x10GEXP_BITMSK_RXXG_MAX_LERRE 0x4000
+#define SUNI1x10GEXP_BITMSK_RXXG_LINE_ERRE 0x1000
+#define SUNI1x10GEXP_BITMSK_RXXG_RX_OVRE 0x0400
+#define SUNI1x10GEXP_BITMSK_RXXG_ADR_FILTERE 0x0200
+#define SUNI1x10GEXP_BITMSK_RXXG_ERR_FILTERRE 0x0100
+#define SUNI1x10GEXP_BITMSK_RXXG_PRMB_ERRE 0x0020
+
+/*----------------------------------------------------------------------------
+ * Register 0x2043: RXXG Interrupt
+ * Bit 15 RXXG_MIN_LERRI
+ * Bit 14 RXXG_MAX_LERRI
+ * Bit 12 RXXG_LINE_ERRI
+ * Bit 10 RXXG_RX_OVRI
+ * Bit 9 RXXG_ADR_FILTERI
+ * Bit 8 RXXG_ERR_FILTERI
+ * Bit 5 RXXG_PRMB_ERRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_MIN_LERRI 0x8000
+#define SUNI1x10GEXP_BITMSK_RXXG_MAX_LERRI 0x4000
+#define SUNI1x10GEXP_BITMSK_RXXG_LINE_ERRI 0x1000
+#define SUNI1x10GEXP_BITMSK_RXXG_RX_OVRI 0x0400
+#define SUNI1x10GEXP_BITMSK_RXXG_ADR_FILTERI 0x0200
+#define SUNI1x10GEXP_BITMSK_RXXG_ERR_FILTERI 0x0100
+#define SUNI1x10GEXP_BITMSK_RXXG_PRMB_ERRE 0x0020
+
+/*----------------------------------------------------------------------------
+ * Register 0x2049: RXXG Receive FIFO Threshold
+ * Bit 2-0 RXXG_CUT_THRU
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_CUT_THRU 0x0007
+#define SUNI1x10GEXP_BITOFF_RXXG_CUT_THRU 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2062H - 0x2069: RXXG Exact Match VID
+ * Bit 11-0 RXXG_VID_MATCH
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_VID_MATCH 0x0FFF
+#define SUNI1x10GEXP_BITOFF_RXXG_VID_MATCH 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x206EH - 0x206F: RXXG Address Filter Control
+ * Bit 3 RXXG_FORWARD_ENABLE
+ * Bit 2 RXXG_VLAN_ENABLE
+ * Bit 1 RXXG_SRC_ADDR
+ * Bit 0 RXXG_MATCH_ENABLE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_FORWARD_ENABLE 0x0008
+#define SUNI1x10GEXP_BITMSK_RXXG_VLAN_ENABLE 0x0004
+#define SUNI1x10GEXP_BITMSK_RXXG_SRC_ADDR 0x0002
+#define SUNI1x10GEXP_BITMSK_RXXG_MATCH_ENABLE 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2070: RXXG Address Filter Control 2
+ * Bit 1 RXXG_PMODE
+ * Bit 0 RXXG_MHASH_EN
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_PMODE 0x0002
+#define SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2081: XRF Control Register 2
+ * Bit 6 EN_PKT_GEN
+ * Bit 4-2 PATT
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EN_PKT_GEN 0x0040
+#define SUNI1x10GEXP_BITMSK_PATT 0x001C
+#define SUNI1x10GEXP_BITOFF_PATT 2
+
+/*----------------------------------------------------------------------------
+ * Register 0x2088: XRF Interrupt Enable
+ * Bit 12-9 LANE_HICERE
+ * Bit 8-5 HS_SD_LANEE
+ * Bit 4 ALIGN_STATUS_ERRE
+ * Bit 3-0 LANE_SYNC_STAT_ERRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LANE_HICERE 0x1E00
+#define SUNI1x10GEXP_BITOFF_LANE_HICERE 9
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANEE 0x01E0
+#define SUNI1x10GEXP_BITOFF_HS_SD_LANEE 5
+#define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERRE 0x0010
+#define SUNI1x10GEXP_BITMSK_LANE_SYNC_STAT_ERRE 0x000F
+#define SUNI1x10GEXP_BITOFF_LANE_SYNC_STAT_ERRE 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2089: XRF Interrupt Status
+ * Bit 12-9 LANE_HICERI
+ * Bit 8-5 HS_SD_LANEI
+ * Bit 4 ALIGN_STATUS_ERRI
+ * Bit 3-0 LANE_SYNC_STAT_ERRI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LANE_HICERI 0x1E00
+#define SUNI1x10GEXP_BITOFF_LANE_HICERI 9
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANEI 0x01E0
+#define SUNI1x10GEXP_BITOFF_HS_SD_LANEI 5
+#define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERRI 0x0010
+#define SUNI1x10GEXP_BITMSK_LANE_SYNC_STAT_ERRI 0x000F
+#define SUNI1x10GEXP_BITOFF_LANE_SYNC_STAT_ERRI 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x208A: XRF Error Status
+ * Bit 8-5 HS_SD_LANE
+ * Bit 4 ALIGN_STATUS_ERR
+ * Bit 3-0 LANE_SYNC_STAT_ERR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANE3 0x0100
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANE2 0x0080
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANE1 0x0040
+#define SUNI1x10GEXP_BITMSK_HS_SD_LANE0 0x0020
+#define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERR 0x0010
+#define SUNI1x10GEXP_BITMSK_LANE3_SYNC_STAT_ERR 0x0008
+#define SUNI1x10GEXP_BITMSK_LANE2_SYNC_STAT_ERR 0x0004
+#define SUNI1x10GEXP_BITMSK_LANE1_SYNC_STAT_ERR 0x0002
+#define SUNI1x10GEXP_BITMSK_LANE0_SYNC_STAT_ERR 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x208B: XRF Diagnostic Interrupt Enable
+ * Bit 7-4 LANE_OVERRUNE
+ * Bit 3-0 LANE_UNDERRUNE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LANE_OVERRUNE 0x00F0
+#define SUNI1x10GEXP_BITOFF_LANE_OVERRUNE 4
+#define SUNI1x10GEXP_BITMSK_LANE_UNDERRUNE 0x000F
+#define SUNI1x10GEXP_BITOFF_LANE_UNDERRUNE 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x208C: XRF Diagnostic Interrupt Status
+ * Bit 7-4 LANE_OVERRUNI
+ * Bit 3-0 LANE_UNDERRUNI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_LANE_OVERRUNI 0x00F0
+#define SUNI1x10GEXP_BITOFF_LANE_OVERRUNI 4
+#define SUNI1x10GEXP_BITMSK_LANE_UNDERRUNI 0x000F
+#define SUNI1x10GEXP_BITOFF_LANE_UNDERRUNI 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C0: RXOAM Configuration
+ * Bit 15 RXOAM_BUSY
+ * Bit 14-12 RXOAM_F2_SEL
+ * Bit 10-8 RXOAM_F1_SEL
+ * Bit 7-6 RXOAM_FILTER_CTRL
+ * Bit 5-0 RXOAM_PX_EN
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_BUSY 0x8000
+#define SUNI1x10GEXP_BITMSK_RXOAM_F2_SEL 0x7000
+#define SUNI1x10GEXP_BITOFF_RXOAM_F2_SEL 12
+#define SUNI1x10GEXP_BITMSK_RXOAM_F1_SEL 0x0700
+#define SUNI1x10GEXP_BITOFF_RXOAM_F1_SEL 8
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_CTRL 0x00C0
+#define SUNI1x10GEXP_BITOFF_RXOAM_FILTER_CTRL 6
+#define SUNI1x10GEXP_BITMSK_RXOAM_PX_EN 0x003F
+#define SUNI1x10GEXP_BITOFF_RXOAM_PX_EN 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C1,0x20C2: RXOAM Filter Configuration
+ * Bit 15-8 RXOAM_FX_MASK
+ * Bit 7-0 RXOAM_FX_VAL
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_FX_MASK 0xFF00
+#define SUNI1x10GEXP_BITOFF_RXOAM_FX_MASK 8
+#define SUNI1x10GEXP_BITMSK_RXOAM_FX_VAL 0x00FF
+#define SUNI1x10GEXP_BITOFF_RXOAM_FX_VAl 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C3: RXOAM Configuration Register 2
+ * Bit 13 RXOAM_REC_BYTE_VAL
+ * Bit 11-10 RXOAM_BYPASS_MODE
+ * Bit 5-0 RXOAM_PX_CLEAR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_REC_BYTE_VAL 0x2000
+#define SUNI1x10GEXP_BITMSK_RXOAM_BYPASS_MODE 0x0C00
+#define SUNI1x10GEXP_BITOFF_RXOAM_BYPASS_MODE 10
+#define SUNI1x10GEXP_BITMSK_RXOAM_PX_CLEAR 0x003F
+#define SUNI1x10GEXP_BITOFF_RXOAM_PX_CLEAR 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C4: RXOAM HEC Configuration
+ * Bit 15-8 RXOAM_COSET
+ * Bit 2 RXOAM_HEC_ERR_PKT
+ * Bit 0 RXOAM_HEC_EN
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_COSET 0xFF00
+#define SUNI1x10GEXP_BITOFF_RXOAM_COSET 8
+#define SUNI1x10GEXP_BITMSK_RXOAM_HEC_ERR_PKT 0x0004
+#define SUNI1x10GEXP_BITMSK_RXOAM_HEC_EN 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C7: RXOAM Interrupt Enable
+ * Bit 10 RXOAM_FILTER_THRSHE
+ * Bit 9 RXOAM_OAM_ERRE
+ * Bit 8 RXOAM_HECE_THRSHE
+ * Bit 7 RXOAM_SOPE
+ * Bit 6 RXOAM_RFE
+ * Bit 5 RXOAM_LFE
+ * Bit 4 RXOAM_DV_ERRE
+ * Bit 3 RXOAM_DATA_INVALIDE
+ * Bit 2 RXOAM_FILTER_DROPE
+ * Bit 1 RXOAM_HECE
+ * Bit 0 RXOAM_OFLE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHE 0x0400
+#define SUNI1x10GEXP_BITMSK_RXOAM_OAM_ERRE 0x0200
+#define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHE 0x0100
+#define SUNI1x10GEXP_BITMSK_RXOAM_SOPE 0x0080
+#define SUNI1x10GEXP_BITMSK_RXOAM_RFE 0x0040
+#define SUNI1x10GEXP_BITMSK_RXOAM_LFE 0x0020
+#define SUNI1x10GEXP_BITMSK_RXOAM_DV_ERRE 0x0010
+#define SUNI1x10GEXP_BITMSK_RXOAM_DATA_INVALIDE 0x0008
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_DROPE 0x0004
+#define SUNI1x10GEXP_BITMSK_RXOAM_HECE 0x0002
+#define SUNI1x10GEXP_BITMSK_RXOAM_OFLE 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C8: RXOAM Interrupt Status
+ * Bit 10 RXOAM_FILTER_THRSHI
+ * Bit 9 RXOAM_OAM_ERRI
+ * Bit 8 RXOAM_HECE_THRSHI
+ * Bit 7 RXOAM_SOPI
+ * Bit 6 RXOAM_RFI
+ * Bit 5 RXOAM_LFI
+ * Bit 4 RXOAM_DV_ERRI
+ * Bit 3 RXOAM_DATA_INVALIDI
+ * Bit 2 RXOAM_FILTER_DROPI
+ * Bit 1 RXOAM_HECI
+ * Bit 0 RXOAM_OFLI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHI 0x0400
+#define SUNI1x10GEXP_BITMSK_RXOAM_OAM_ERRI 0x0200
+#define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHI 0x0100
+#define SUNI1x10GEXP_BITMSK_RXOAM_SOPI 0x0080
+#define SUNI1x10GEXP_BITMSK_RXOAM_RFI 0x0040
+#define SUNI1x10GEXP_BITMSK_RXOAM_LFI 0x0020
+#define SUNI1x10GEXP_BITMSK_RXOAM_DV_ERRI 0x0010
+#define SUNI1x10GEXP_BITMSK_RXOAM_DATA_INVALIDI 0x0008
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_DROPI 0x0004
+#define SUNI1x10GEXP_BITMSK_RXOAM_HECI 0x0002
+#define SUNI1x10GEXP_BITMSK_RXOAM_OFLI 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x20C9: RXOAM Status
+ * Bit 10 RXOAM_FILTER_THRSHV
+ * Bit 8 RXOAM_HECE_THRSHV
+ * Bit 6 RXOAM_RFV
+ * Bit 5 RXOAM_LFV
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHV 0x0400
+#define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHV 0x0100
+#define SUNI1x10GEXP_BITMSK_RXOAM_RFV 0x0040
+#define SUNI1x10GEXP_BITMSK_RXOAM_LFV 0x0020
+
+/*----------------------------------------------------------------------------
+ * Register 0x2100: MSTAT Control
+ * Bit 2 MSTAT_WRITE
+ * Bit 1 MSTAT_CLEAR
+ * Bit 0 MSTAT_SNAP
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MSTAT_WRITE 0x0004
+#define SUNI1x10GEXP_BITMSK_MSTAT_CLEAR 0x0002
+#define SUNI1x10GEXP_BITMSK_MSTAT_SNAP 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2109: MSTAT Counter Write Address
+ * Bit 5-0 MSTAT_WRITE_ADDRESS
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MSTAT_WRITE_ADDRESS 0x003F
+#define SUNI1x10GEXP_BITOFF_MSTAT_WRITE_ADDRESS 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2200: IFLX Global Configuration Register
+ * Bit 15 IFLX_IRCU_ENABLE
+ * Bit 14 IFLX_IDSWT_ENABLE
+ * Bit 13-0 IFLX_IFD_CNT
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_IRCU_ENABLE 0x8000
+#define SUNI1x10GEXP_BITMSK_IFLX_IDSWT_ENABLE 0x4000
+#define SUNI1x10GEXP_BITMSK_IFLX_IFD_CNT 0x3FFF
+#define SUNI1x10GEXP_BITOFF_IFLX_IFD_CNT 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2209: IFLX FIFO Overflow Enable
+ * Bit 0 IFLX_OVFE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_OVFE 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x220A: IFLX FIFO Overflow Interrupt
+ * Bit 0 IFLX_OVFI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_OVFI 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x220D: IFLX Indirect Channel Address
+ * Bit 15 IFLX_BUSY
+ * Bit 14 IFLX_RWB
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_BUSY 0x8000
+#define SUNI1x10GEXP_BITMSK_IFLX_RWB 0x4000
+
+/*----------------------------------------------------------------------------
+ * Register 0x220E: IFLX Indirect Logical FIFO Low Limit & Provision
+ * Bit 9-0 IFLX_LOLIM
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_LOLIM 0x03FF
+#define SUNI1x10GEXP_BITOFF_IFLX_LOLIM 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x220F: IFLX Indirect Logical FIFO High Limit
+ * Bit 9-0 IFLX_HILIM
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_HILIM 0x03FF
+#define SUNI1x10GEXP_BITOFF_IFLX_HILIM 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2210: IFLX Indirect Full/Almost Full Status & Limit
+ * Bit 15 IFLX_FULL
+ * Bit 14 IFLX_AFULL
+ * Bit 13-0 IFLX_AFTH
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_FULL 0x8000
+#define SUNI1x10GEXP_BITMSK_IFLX_AFULL 0x4000
+#define SUNI1x10GEXP_BITMSK_IFLX_AFTH 0x3FFF
+#define SUNI1x10GEXP_BITOFF_IFLX_AFTH 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2211: IFLX Indirect Empty/Almost Empty Status & Limit
+ * Bit 15 IFLX_EMPTY
+ * Bit 14 IFLX_AEMPTY
+ * Bit 13-0 IFLX_AETH
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_IFLX_EMPTY 0x8000
+#define SUNI1x10GEXP_BITMSK_IFLX_AEMPTY 0x4000
+#define SUNI1x10GEXP_BITMSK_IFLX_AETH 0x3FFF
+#define SUNI1x10GEXP_BITOFF_IFLX_AETH 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2240: PL4MOS Configuration Register
+ * Bit 3 PL4MOS_RE_INIT
+ * Bit 2 PL4MOS_EN
+ * Bit 1 PL4MOS_NO_STATUS
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4MOS_RE_INIT 0x0008
+#define SUNI1x10GEXP_BITMSK_PL4MOS_EN 0x0004
+#define SUNI1x10GEXP_BITMSK_PL4MOS_NO_STATUS 0x0002
+
+/*----------------------------------------------------------------------------
+ * Register 0x2243: PL4MOS MaxBurst1 Register
+ * Bit 11-0 PL4MOS_MAX_BURST1
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_BURST1 0x0FFF
+#define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_BURST1 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2244: PL4MOS MaxBurst2 Register
+ * Bit 11-0 PL4MOS_MAX_BURST2
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_BURST2 0x0FFF
+#define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_BURST2 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2245: PL4MOS Transfer Size Register
+ * Bit 7-0 PL4MOS_MAX_TRANSFER
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_TRANSFER 0x00FF
+#define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_TRANSFER 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2280: PL4ODP Configuration
+ * Bit 15-12 PL4ODP_REPEAT_T
+ * Bit 8 PL4ODP_SOP_RULE
+ * Bit 1 PL4ODP_EN_PORTS
+ * Bit 0 PL4ODP_EN_DFWD
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4ODP_REPEAT_T 0xF000
+#define SUNI1x10GEXP_BITOFF_PL4ODP_REPEAT_T 12
+#define SUNI1x10GEXP_BITMSK_PL4ODP_SOP_RULE 0x0100
+#define SUNI1x10GEXP_BITMSK_PL4ODP_EN_PORTS 0x0002
+#define SUNI1x10GEXP_BITMSK_PL4ODP_EN_DFWD 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2282: PL4ODP Interrupt Mask
+ * Bit 0 PL4ODP_OUT_DISE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4ODP_OUT_DISE 0x0001
+
+
+
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_EOPEOBE 0x0080
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_ERREOPE 0x0040
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MEOPE 0x0008
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MSOPE 0x0004
+#define SUNI1x10GEXP_BITMSK_PL4ODP_ES_OVRE 0x0002
+
+
+/*----------------------------------------------------------------------------
+ * Register 0x2283: PL4ODP Interrupt
+ * Bit 0 PL4ODP_OUT_DISI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4ODP_OUT_DISI 0x0001
+
+
+
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_EOPEOBI 0x0080
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_ERREOPI 0x0040
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MEOPI 0x0008
+#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MSOPI 0x0004
+#define SUNI1x10GEXP_BITMSK_PL4ODP_ES_OVRI 0x0002
+
+/*----------------------------------------------------------------------------
+ * Register 0x2300: PL4IO Lock Detect Status
+ * Bit 15 PL4IO_OUT_ROOLV
+ * Bit 12 PL4IO_IS_ROOLV
+ * Bit 11 PL4IO_DIP2_ERRV
+ * Bit 8 PL4IO_ID_ROOLV
+ * Bit 4 PL4IO_IS_DOOLV
+ * Bit 0 PL4IO_ID_DOOLV
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLV 0x8000
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLV 0x1000
+#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRV 0x0800
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLV 0x0100
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLV 0x0010
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLV 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2301: PL4IO Lock Detect Change
+ * Bit 15 PL4IO_OUT_ROOLI
+ * Bit 12 PL4IO_IS_ROOLI
+ * Bit 11 PL4IO_DIP2_ERRI
+ * Bit 8 PL4IO_ID_ROOLI
+ * Bit 4 PL4IO_IS_DOOLI
+ * Bit 0 PL4IO_ID_DOOLI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLI 0x8000
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLI 0x1000
+#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRI 0x0800
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLI 0x0100
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLI 0x0010
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLI 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2302: PL4IO Lock Detect Mask
+ * Bit 15 PL4IO_OUT_ROOLE
+ * Bit 12 PL4IO_IS_ROOLE
+ * Bit 11 PL4IO_DIP2_ERRE
+ * Bit 8 PL4IO_ID_ROOLE
+ * Bit 4 PL4IO_IS_DOOLE
+ * Bit 0 PL4IO_ID_DOOLE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLE 0x8000
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLE 0x1000
+#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRE 0x0800
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLE 0x0100
+#define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLE 0x0010
+#define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLE 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2303: PL4IO Lock Detect Limits
+ * Bit 15-8 PL4IO_REF_LIMIT
+ * Bit 7-0 PL4IO_TRAN_LIMIT
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_REF_LIMIT 0xFF00
+#define SUNI1x10GEXP_BITOFF_PL4IO_REF_LIMIT 8
+#define SUNI1x10GEXP_BITMSK_PL4IO_TRAN_LIMIT 0x00FF
+#define SUNI1x10GEXP_BITOFF_PL4IO_TRAN_LIMIT 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2304: PL4IO Calendar Repetitions
+ * Bit 15-8 PL4IO_IN_MUL
+ * Bit 7-0 PL4IO_OUT_MUL
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_IN_MUL 0xFF00
+#define SUNI1x10GEXP_BITOFF_PL4IO_IN_MUL 8
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_MUL 0x00FF
+#define SUNI1x10GEXP_BITOFF_PL4IO_OUT_MUL 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x2305: PL4IO Configuration
+ * Bit 15 PL4IO_DIP2_ERR_CHK
+ * Bit 11 PL4IO_ODAT_DIS
+ * Bit 10 PL4IO_TRAIN_DIS
+ * Bit 9 PL4IO_OSTAT_DIS
+ * Bit 8 PL4IO_ISTAT_DIS
+ * Bit 7 PL4IO_NO_ISTAT
+ * Bit 6 PL4IO_STAT_OUTSEL
+ * Bit 5 PL4IO_INSEL
+ * Bit 4 PL4IO_DLSEL
+ * Bit 1-0 PL4IO_OUTSEL
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERR_CHK 0x8000
+#define SUNI1x10GEXP_BITMSK_PL4IO_ODAT_DIS 0x0800
+#define SUNI1x10GEXP_BITMSK_PL4IO_TRAIN_DIS 0x0400
+#define SUNI1x10GEXP_BITMSK_PL4IO_OSTAT_DIS 0x0200
+#define SUNI1x10GEXP_BITMSK_PL4IO_ISTAT_DIS 0x0100
+#define SUNI1x10GEXP_BITMSK_PL4IO_NO_ISTAT 0x0080
+#define SUNI1x10GEXP_BITMSK_PL4IO_STAT_OUTSEL 0x0040
+#define SUNI1x10GEXP_BITMSK_PL4IO_INSEL 0x0020
+#define SUNI1x10GEXP_BITMSK_PL4IO_DLSEL 0x0010
+#define SUNI1x10GEXP_BITMSK_PL4IO_OUTSEL 0x0003
+#define SUNI1x10GEXP_BITOFF_PL4IO_OUTSEL 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3040: TXXG Configuration Register 1
+ * Bit 15 TXXG_TXEN0
+ * Bit 13 TXXG_HOSTPAUSE
+ * Bit 12-7 TXXG_IPGT
+ * Bit 5 TXXG_32BIT_ALIGN
+ * Bit 4 TXXG_CRCEN
+ * Bit 3 TXXG_FCTX
+ * Bit 2 TXXG_FCRX
+ * Bit 1 TXXG_PADEN
+ * Bit 0 TXXG_SPRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_TXEN0 0x8000
+#define SUNI1x10GEXP_BITMSK_TXXG_HOSTPAUSE 0x2000
+#define SUNI1x10GEXP_BITMSK_TXXG_IPGT 0x1F80
+#define SUNI1x10GEXP_BITOFF_TXXG_IPGT 7
+#define SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN 0x0020
+#define SUNI1x10GEXP_BITMSK_TXXG_CRCEN 0x0010
+#define SUNI1x10GEXP_BITMSK_TXXG_FCTX 0x0008
+#define SUNI1x10GEXP_BITMSK_TXXG_FCRX 0x0004
+#define SUNI1x10GEXP_BITMSK_TXXG_PADEN 0x0002
+#define SUNI1x10GEXP_BITMSK_TXXG_SPRE 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3041: TXXG Configuration Register 2
+ * Bit 7-0 TXXG_HDRSIZE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_HDRSIZE 0x00FF
+
+/*----------------------------------------------------------------------------
+ * Register 0x3042: TXXG Configuration Register 3
+ * Bit 15 TXXG_FIFO_ERRE
+ * Bit 14 TXXG_FIFO_UDRE
+ * Bit 13 TXXG_MAX_LERRE
+ * Bit 12 TXXG_MIN_LERRE
+ * Bit 11 TXXG_XFERE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_ERRE 0x8000
+#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_UDRE 0x4000
+#define SUNI1x10GEXP_BITMSK_TXXG_MAX_LERRE 0x2000
+#define SUNI1x10GEXP_BITMSK_TXXG_MIN_LERRE 0x1000
+#define SUNI1x10GEXP_BITMSK_TXXG_XFERE 0x0800
+
+/*----------------------------------------------------------------------------
+ * Register 0x3043: TXXG Interrupt
+ * Bit 15 TXXG_FIFO_ERRI
+ * Bit 14 TXXG_FIFO_UDRI
+ * Bit 13 TXXG_MAX_LERRI
+ * Bit 12 TXXG_MIN_LERRI
+ * Bit 11 TXXG_XFERI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_ERRI 0x8000
+#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_UDRI 0x4000
+#define SUNI1x10GEXP_BITMSK_TXXG_MAX_LERRI 0x2000
+#define SUNI1x10GEXP_BITMSK_TXXG_MIN_LERRI 0x1000
+#define SUNI1x10GEXP_BITMSK_TXXG_XFERI 0x0800
+
+/*----------------------------------------------------------------------------
+ * Register 0x3044: TXXG Status Register
+ * Bit 1 TXXG_TXACTIVE
+ * Bit 0 TXXG_PAUSED
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_TXACTIVE 0x0002
+#define SUNI1x10GEXP_BITMSK_TXXG_PAUSED 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3046: TXXG TX_MINFR - Transmit Min Frame Size Register
+ * Bit 7-0 TXXG_TX_MINFR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_TX_MINFR 0x00FF
+#define SUNI1x10GEXP_BITOFF_TXXG_TX_MINFR 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3052: TXXG Pause Quantum Value Configuration Register
+ * Bit 7-0 TXXG_FC_PAUSE_QNTM
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_FC_PAUSE_QNTM 0x00FF
+#define SUNI1x10GEXP_BITOFF_TXXG_FC_PAUSE_QNTM 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3080: XTEF Control
+ * Bit 3-0 XTEF_FORCE_PARITY_ERR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_XTEF_FORCE_PARITY_ERR 0x000F
+#define SUNI1x10GEXP_BITOFF_XTEF_FORCE_PARITY_ERR 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3084: XTEF Interrupt Event Register
+ * Bit 0 XTEF_LOST_SYNCI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCI 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3085: XTEF Interrupt Enable Register
+ * Bit 0 XTEF_LOST_SYNCE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCE 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3086: XTEF Visibility Register
+ * Bit 0 XTEF_LOST_SYNCV
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCV 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x30C0: TXOAM OAM Configuration
+ * Bit 15 TXOAM_HEC_EN
+ * Bit 14 TXOAM_EMPTYCODE_EN
+ * Bit 13 TXOAM_FORCE_IDLE
+ * Bit 12 TXOAM_IGNORE_IDLE
+ * Bit 11-6 TXOAM_PX_OVERWRITE
+ * Bit 5-0 TXOAM_PX_SEL
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_HEC_EN 0x8000
+#define SUNI1x10GEXP_BITMSK_TXOAM_EMPTYCODE_EN 0x4000
+#define SUNI1x10GEXP_BITMSK_TXOAM_FORCE_IDLE 0x2000
+#define SUNI1x10GEXP_BITMSK_TXOAM_IGNORE_IDLE 0x1000
+#define SUNI1x10GEXP_BITMSK_TXOAM_PX_OVERWRITE 0x0FC0
+#define SUNI1x10GEXP_BITOFF_TXOAM_PX_OVERWRITE 6
+#define SUNI1x10GEXP_BITMSK_TXOAM_PX_SEL 0x003F
+#define SUNI1x10GEXP_BITOFF_TXOAM_PX_SEL 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x30C1: TXOAM Mini-Packet Rate Configuration
+ * Bit 15 TXOAM_MINIDIS
+ * Bit 14 TXOAM_BUSY
+ * Bit 13 TXOAM_TRANS_EN
+ * Bit 10-0 TXOAM_MINIRATE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_MINIDIS 0x8000
+#define SUNI1x10GEXP_BITMSK_TXOAM_BUSY 0x4000
+#define SUNI1x10GEXP_BITMSK_TXOAM_TRANS_EN 0x2000
+#define SUNI1x10GEXP_BITMSK_TXOAM_MINIRATE 0x07FF
+
+/*----------------------------------------------------------------------------
+ * Register 0x30C2: TXOAM Mini-Packet Gap and FIFO Configuration
+ * Bit 13-10 TXOAM_FTHRESH
+ * Bit 9-6 TXOAM_MINIPOST
+ * Bit 5-0 TXOAM_MINIPRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_FTHRESH 0x3C00
+#define SUNI1x10GEXP_BITOFF_TXOAM_FTHRESH 10
+#define SUNI1x10GEXP_BITMSK_TXOAM_MINIPOST 0x03C0
+#define SUNI1x10GEXP_BITOFF_TXOAM_MINIPOST 6
+#define SUNI1x10GEXP_BITMSK_TXOAM_MINIPRE 0x003F
+
+/*----------------------------------------------------------------------------
+ * Register 0x30C6: TXOAM Interrupt Enable
+ * Bit 2 TXOAM_SOP_ERRE
+ * Bit 1 TXOAM_OFLE
+ * Bit 0 TXOAM_ERRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_SOP_ERRE 0x0004
+#define SUNI1x10GEXP_BITMSK_TXOAM_OFLE 0x0002
+#define SUNI1x10GEXP_BITMSK_TXOAM_ERRE 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x30C7: TXOAM Interrupt Status
+ * Bit 2 TXOAM_SOP_ERRI
+ * Bit 1 TXOAM_OFLI
+ * Bit 0 TXOAM_ERRI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_SOP_ERRI 0x0004
+#define SUNI1x10GEXP_BITMSK_TXOAM_OFLI 0x0002
+#define SUNI1x10GEXP_BITMSK_TXOAM_ERRI 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x30CF: TXOAM Coset
+ * Bit 7-0 TXOAM_COSET
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXOAM_COSET 0x00FF
+
+/*----------------------------------------------------------------------------
+ * Register 0x3200: EFLX Global Configuration
+ * Bit 15 EFLX_ERCU_EN
+ * Bit 7 EFLX_EN_EDSWT
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_ERCU_EN 0x8000
+#define SUNI1x10GEXP_BITMSK_EFLX_EN_EDSWT 0x0080
+
+/*----------------------------------------------------------------------------
+ * Register 0x3201: EFLX ERCU Global Status
+ * Bit 13 EFLX_OVF_ERR
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_OVF_ERR 0x2000
+
+/*----------------------------------------------------------------------------
+ * Register 0x3202: EFLX Indirect Channel Address
+ * Bit 15 EFLX_BUSY
+ * Bit 14 EFLX_RDWRB
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_BUSY 0x8000
+#define SUNI1x10GEXP_BITMSK_EFLX_RDWRB 0x4000
+
+/*----------------------------------------------------------------------------
+ * Register 0x3203: EFLX Indirect Logical FIFO Low Limit
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_LOLIM 0x03FF
+#define SUNI1x10GEXP_BITOFF_EFLX_LOLIM 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3204: EFLX Indirect Logical FIFO High Limit
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_HILIM 0x03FF
+#define SUNI1x10GEXP_BITOFF_EFLX_HILIM 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3205: EFLX Indirect Full/Almost-Full Status and Limit
+ * Bit 15 EFLX_FULL
+ * Bit 14 EFLX_AFULL
+ * Bit 13-0 EFLX_AFTH
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_FULL 0x8000
+#define SUNI1x10GEXP_BITMSK_EFLX_AFULL 0x4000
+#define SUNI1x10GEXP_BITMSK_EFLX_AFTH 0x3FFF
+#define SUNI1x10GEXP_BITOFF_EFLX_AFTH 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3206: EFLX Indirect Empty/Almost-Empty Status and Limit
+ * Bit 15 EFLX_EMPTY
+ * Bit 14 EFLX_AEMPTY
+ * Bit 13-0 EFLX_AETH
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_EMPTY 0x8000
+#define SUNI1x10GEXP_BITMSK_EFLX_AEMPTY 0x4000
+#define SUNI1x10GEXP_BITMSK_EFLX_AETH 0x3FFF
+#define SUNI1x10GEXP_BITOFF_EFLX_AETH 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x3207: EFLX Indirect FIFO Cut-Through Threshold
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_CUT_THRU 0x3FFF
+#define SUNI1x10GEXP_BITOFF_EFLX_CUT_THRU 0
+
+/*----------------------------------------------------------------------------
+ * Register 0x320C: EFLX FIFO Overflow Error Enable
+ * Bit 0 EFLX_OVFE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_OVFE 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x320D: EFLX FIFO Overflow Error Indication
+ * Bit 0 EFLX_OVFI
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_OVFI 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3210: EFLX Channel Provision
+ * Bit 0 EFLX_PROV
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_EFLX_PROV 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3280: PL4IDU Configuration
+ * Bit 2 PL4IDU_SYNCH_ON_TRAIN
+ * Bit 1 PL4IDU_EN_PORTS
+ * Bit 0 PL4IDU_EN_DFWD
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IDU_SYNCH_ON_TRAIN 0x0004
+#define SUNI1x10GEXP_BITMSK_PL4IDU_EN_PORTS 0x0002
+#define SUNI1x10GEXP_BITMSK_PL4IDU_EN_DFWD 0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3282: PL4IDU Interrupt Mask
+ * Bit 1 PL4IDU_DIP4E
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IDU_DIP4E 0x0002
+
+/*----------------------------------------------------------------------------
+ * Register 0x3283: PL4IDU Interrupt
+ * Bit 1 PL4IDU_DIP4I
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_PL4IDU_DIP4I 0x0002
+
+#endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */
+
diff --git a/drivers/net/ethernet/chelsio/cxgb/tp.c b/drivers/net/ethernet/chelsio/cxgb/tp.c
new file mode 100644
index 000000000000..8bed4a59e65f
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/tp.c
@@ -0,0 +1,171 @@
+/* $Date: 2006/02/07 04:21:54 $ $RCSfile: tp.c,v $ $Revision: 1.73 $ */
+#include "common.h"
+#include "regs.h"
+#include "tp.h"
+#ifdef CONFIG_CHELSIO_T1_1G
+#include "fpga_defs.h"
+#endif
+
+struct petp {
+ adapter_t *adapter;
+};
+
+/* Pause deadlock avoidance parameters */
+#define DROP_MSEC 16
+#define DROP_PKTS_CNT 1
+
+static void tp_init(adapter_t * ap, const struct tp_params *p,
+ unsigned int tp_clk)
+{
+ u32 val;
+
+ if (!t1_is_asic(ap))
+ return;
+
+ val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
+ F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
+ if (!p->pm_size)
+ val |= F_OFFLOAD_DISABLE;
+ else
+ val |= F_TP_IN_ESPI_CHECK_IP_CSUM | F_TP_IN_ESPI_CHECK_TCP_CSUM;
+ writel(val, ap->regs + A_TP_IN_CONFIG);
+ writel(F_TP_OUT_CSPI_CPL |
+ F_TP_OUT_ESPI_ETHERNET |
+ F_TP_OUT_ESPI_GENERATE_IP_CSUM |
+ F_TP_OUT_ESPI_GENERATE_TCP_CSUM, ap->regs + A_TP_OUT_CONFIG);
+ writel(V_IP_TTL(64) |
+ F_PATH_MTU /* IP DF bit */ |
+ V_5TUPLE_LOOKUP(p->use_5tuple_mode) |
+ V_SYN_COOKIE_PARAMETER(29), ap->regs + A_TP_GLOBAL_CONFIG);
+ /*
+ * Enable pause frame deadlock prevention.
+ */
+ if (is_T2(ap) && ap->params.nports > 1) {
+ u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
+
+ writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
+ V_DROP_TICKS_CNT(drop_ticks) |
+ V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
+ ap->regs + A_TP_TX_DROP_CONFIG);
+ }
+}
+
+void t1_tp_destroy(struct petp *tp)
+{
+ kfree(tp);
+}
+
+struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p)
+{
+ struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL);
+
+ if (!tp)
+ return NULL;
+
+ tp->adapter = adapter;
+
+ return tp;
+}
+
+void t1_tp_intr_enable(struct petp *tp)
+{
+ u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE);
+
+#ifdef CONFIG_CHELSIO_T1_1G
+ if (!t1_is_asic(tp->adapter)) {
+ /* FPGA */
+ writel(0xffffffff,
+ tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_ENABLE);
+ writel(tp_intr | FPGA_PCIX_INTERRUPT_TP,
+ tp->adapter->regs + A_PL_ENABLE);
+ } else
+#endif
+ {
+ /* We don't use any TP interrupts */
+ writel(0, tp->adapter->regs + A_TP_INT_ENABLE);
+ writel(tp_intr | F_PL_INTR_TP,
+ tp->adapter->regs + A_PL_ENABLE);
+ }
+}
+
+void t1_tp_intr_disable(struct petp *tp)
+{
+ u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE);
+
+#ifdef CONFIG_CHELSIO_T1_1G
+ if (!t1_is_asic(tp->adapter)) {
+ /* FPGA */
+ writel(0, tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_ENABLE);
+ writel(tp_intr & ~FPGA_PCIX_INTERRUPT_TP,
+ tp->adapter->regs + A_PL_ENABLE);
+ } else
+#endif
+ {
+ writel(0, tp->adapter->regs + A_TP_INT_ENABLE);
+ writel(tp_intr & ~F_PL_INTR_TP,
+ tp->adapter->regs + A_PL_ENABLE);
+ }
+}
+
+void t1_tp_intr_clear(struct petp *tp)
+{
+#ifdef CONFIG_CHELSIO_T1_1G
+ if (!t1_is_asic(tp->adapter)) {
+ writel(0xffffffff,
+ tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE);
+ writel(FPGA_PCIX_INTERRUPT_TP, tp->adapter->regs + A_PL_CAUSE);
+ return;
+ }
+#endif
+ writel(0xffffffff, tp->adapter->regs + A_TP_INT_CAUSE);
+ writel(F_PL_INTR_TP, tp->adapter->regs + A_PL_CAUSE);
+}
+
+int t1_tp_intr_handler(struct petp *tp)
+{
+ u32 cause;
+
+#ifdef CONFIG_CHELSIO_T1_1G
+ /* FPGA doesn't support TP interrupts. */
+ if (!t1_is_asic(tp->adapter))
+ return 1;
+#endif
+
+ cause = readl(tp->adapter->regs + A_TP_INT_CAUSE);
+ writel(cause, tp->adapter->regs + A_TP_INT_CAUSE);
+ return 0;
+}
+
+static void set_csum_offload(struct petp *tp, u32 csum_bit, int enable)
+{
+ u32 val = readl(tp->adapter->regs + A_TP_GLOBAL_CONFIG);
+
+ if (enable)
+ val |= csum_bit;
+ else
+ val &= ~csum_bit;
+ writel(val, tp->adapter->regs + A_TP_GLOBAL_CONFIG);
+}
+
+void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable)
+{
+ set_csum_offload(tp, F_IP_CSUM, enable);
+}
+
+void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable)
+{
+ set_csum_offload(tp, F_TCP_CSUM, enable);
+}
+
+/*
+ * Initialize TP state. tp_params contains initial settings for some TP
+ * parameters, particularly the one-time PM and CM settings.
+ */
+int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk)
+{
+ adapter_t *adapter = tp->adapter;
+
+ tp_init(adapter, p, tp_clk);
+ writel(F_TP_RESET, adapter->regs + A_TP_RESET);
+ return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb/tp.h b/drivers/net/ethernet/chelsio/cxgb/tp.h
new file mode 100644
index 000000000000..dfd8ce25106a
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/tp.h
@@ -0,0 +1,72 @@
+/* $Date: 2005/03/07 23:59:05 $ $RCSfile: tp.h,v $ $Revision: 1.20 $ */
+#ifndef CHELSIO_TP_H
+#define CHELSIO_TP_H
+
+#include "common.h"
+
+#define TP_MAX_RX_COALESCING_SIZE 16224U
+
+struct tp_mib_statistics {
+
+ /* IP */
+ u32 ipInReceive_hi;
+ u32 ipInReceive_lo;
+ u32 ipInHdrErrors_hi;
+ u32 ipInHdrErrors_lo;
+ u32 ipInAddrErrors_hi;
+ u32 ipInAddrErrors_lo;
+ u32 ipInUnknownProtos_hi;
+ u32 ipInUnknownProtos_lo;
+ u32 ipInDiscards_hi;
+ u32 ipInDiscards_lo;
+ u32 ipInDelivers_hi;
+ u32 ipInDelivers_lo;
+ u32 ipOutRequests_hi;
+ u32 ipOutRequests_lo;
+ u32 ipOutDiscards_hi;
+ u32 ipOutDiscards_lo;
+ u32 ipOutNoRoutes_hi;
+ u32 ipOutNoRoutes_lo;
+ u32 ipReasmTimeout;
+ u32 ipReasmReqds;
+ u32 ipReasmOKs;
+ u32 ipReasmFails;
+
+ u32 reserved[8];
+
+ /* TCP */
+ u32 tcpActiveOpens;
+ u32 tcpPassiveOpens;
+ u32 tcpAttemptFails;
+ u32 tcpEstabResets;
+ u32 tcpOutRsts;
+ u32 tcpCurrEstab;
+ u32 tcpInSegs_hi;
+ u32 tcpInSegs_lo;
+ u32 tcpOutSegs_hi;
+ u32 tcpOutSegs_lo;
+ u32 tcpRetransSeg_hi;
+ u32 tcpRetransSeg_lo;
+ u32 tcpInErrs_hi;
+ u32 tcpInErrs_lo;
+ u32 tcpRtoMin;
+ u32 tcpRtoMax;
+};
+
+struct petp;
+struct tp_params;
+
+struct petp *t1_tp_create(adapter_t *adapter, struct tp_params *p);
+void t1_tp_destroy(struct petp *tp);
+
+void t1_tp_intr_disable(struct petp *tp);
+void t1_tp_intr_enable(struct petp *tp);
+void t1_tp_intr_clear(struct petp *tp);
+int t1_tp_intr_handler(struct petp *tp);
+
+void t1_tp_get_mib_statistics(adapter_t *adap, struct tp_mib_statistics *tps);
+void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable);
+void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable);
+int t1_tp_set_coalescing_size(struct petp *tp, unsigned int size);
+int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk);
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
new file mode 100644
index 000000000000..b0cb388f5e12
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
@@ -0,0 +1,730 @@
+/* $Date: 2006/04/28 19:20:06 $ $RCSfile: vsc7326.c,v $ $Revision: 1.19 $ */
+
+/* Driver for Vitesse VSC7326 (Schaumburg) MAC */
+
+#include "gmac.h"
+#include "elmer0.h"
+#include "vsc7326_reg.h"
+
+/* Update fast changing statistics every 15 seconds */
+#define STATS_TICK_SECS 15
+/* 30 minutes for full statistics update */
+#define MAJOR_UPDATE_TICKS (1800 / STATS_TICK_SECS)
+
+#define MAX_MTU 9600
+
+/* The egress WM value 0x01a01fff should be used only when the
+ * interface is down (MAC port disabled). This is a workaround
+ * for disabling the T2/MAC flow-control. When the interface is
+ * enabled, the WM value should be set to 0x014a03F0.
+ */
+#define WM_DISABLE 0x01a01fff
+#define WM_ENABLE 0x014a03F0
+
+struct init_table {
+ u32 addr;
+ u32 data;
+};
+
+struct _cmac_instance {
+ u32 index;
+ u32 ticks;
+};
+
+#define INITBLOCK_SLEEP 0xffffffff
+
+static void vsc_read(adapter_t *adapter, u32 addr, u32 *val)
+{
+ u32 status, vlo, vhi;
+ int i;
+
+ spin_lock_bh(&adapter->mac_lock);
+ t1_tpi_read(adapter, (addr << 2) + 4, &vlo);
+ i = 0;
+ do {
+ t1_tpi_read(adapter, (REG_LOCAL_STATUS << 2) + 4, &vlo);
+ t1_tpi_read(adapter, REG_LOCAL_STATUS << 2, &vhi);
+ status = (vhi << 16) | vlo;
+ i++;
+ } while (((status & 1) == 0) && (i < 50));
+ if (i == 50)
+ pr_err("Invalid tpi read from MAC, breaking loop.\n");
+
+ t1_tpi_read(adapter, (REG_LOCAL_DATA << 2) + 4, &vlo);
+ t1_tpi_read(adapter, REG_LOCAL_DATA << 2, &vhi);
+
+ *val = (vhi << 16) | vlo;
+
+ /* pr_err("rd: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n",
+ ((addr&0xe000)>>13), ((addr&0x1e00)>>9),
+ ((addr&0x01fe)>>1), *val); */
+ spin_unlock_bh(&adapter->mac_lock);
+}
+
+static void vsc_write(adapter_t *adapter, u32 addr, u32 data)
+{
+ spin_lock_bh(&adapter->mac_lock);
+ t1_tpi_write(adapter, (addr << 2) + 4, data & 0xFFFF);
+ t1_tpi_write(adapter, addr << 2, (data >> 16) & 0xFFFF);
+ /* pr_err("wr: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n",
+ ((addr&0xe000)>>13), ((addr&0x1e00)>>9),
+ ((addr&0x01fe)>>1), data); */
+ spin_unlock_bh(&adapter->mac_lock);
+}
+
+/* Hard reset the MAC. This wipes out *all* configuration. */
+static void vsc7326_full_reset(adapter_t* adapter)
+{
+ u32 val;
+ u32 result = 0xffff;
+
+ t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+ val &= ~1;
+ t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ udelay(2);
+ val |= 0x1; /* Enable mac MAC itself */
+ val |= 0x800; /* Turn off the red LED */
+ t1_tpi_write(adapter, A_ELMER0_GPO, val);
+ mdelay(1);
+ vsc_write(adapter, REG_SW_RESET, 0x80000001);
+ do {
+ mdelay(1);
+ vsc_read(adapter, REG_SW_RESET, &result);
+ } while (result != 0x0);
+}
+
+static struct init_table vsc7326_reset[] = {
+ { REG_IFACE_MODE, 0x00000000 },
+ { REG_CRC_CFG, 0x00000020 },
+ { REG_PLL_CLK_SPEED, 0x00050c00 },
+ { REG_PLL_CLK_SPEED, 0x00050c00 },
+ { REG_MSCH, 0x00002f14 },
+ { REG_SPI4_MISC, 0x00040409 },
+ { REG_SPI4_DESKEW, 0x00080000 },
+ { REG_SPI4_ING_SETUP2, 0x08080004 },
+ { REG_SPI4_ING_SETUP0, 0x04111004 },
+ { REG_SPI4_EGR_SETUP0, 0x80001a04 },
+ { REG_SPI4_ING_SETUP1, 0x02010000 },
+ { REG_AGE_INC(0), 0x00000000 },
+ { REG_AGE_INC(1), 0x00000000 },
+ { REG_ING_CONTROL, 0x0a200011 },
+ { REG_EGR_CONTROL, 0xa0010091 },
+};
+
+static struct init_table vsc7326_portinit[4][22] = {
+ { /* Port 0 */
+ /* FIFO setup */
+ { REG_DBG(0), 0x000004f0 },
+ { REG_HDX(0), 0x00073101 },
+ { REG_TEST(0,0), 0x00000022 },
+ { REG_TEST(1,0), 0x00000022 },
+ { REG_TOP_BOTTOM(0,0), 0x003f0000 },
+ { REG_TOP_BOTTOM(1,0), 0x00120000 },
+ { REG_HIGH_LOW_WM(0,0), 0x07460757 },
+ { REG_HIGH_LOW_WM(1,0), WM_DISABLE },
+ { REG_CT_THRHLD(0,0), 0x00000000 },
+ { REG_CT_THRHLD(1,0), 0x00000000 },
+ { REG_BUCKE(0), 0x0002ffff },
+ { REG_BUCKI(0), 0x0002ffff },
+ { REG_TEST(0,0), 0x00000020 },
+ { REG_TEST(1,0), 0x00000020 },
+ /* Port config */
+ { REG_MAX_LEN(0), 0x00002710 },
+ { REG_PORT_FAIL(0), 0x00000002 },
+ { REG_NORMALIZER(0), 0x00000a64 },
+ { REG_DENORM(0), 0x00000010 },
+ { REG_STICK_BIT(0), 0x03baa370 },
+ { REG_DEV_SETUP(0), 0x00000083 },
+ { REG_DEV_SETUP(0), 0x00000082 },
+ { REG_MODE_CFG(0), 0x0200259f },
+ },
+ { /* Port 1 */
+ /* FIFO setup */
+ { REG_DBG(1), 0x000004f0 },
+ { REG_HDX(1), 0x00073101 },
+ { REG_TEST(0,1), 0x00000022 },
+ { REG_TEST(1,1), 0x00000022 },
+ { REG_TOP_BOTTOM(0,1), 0x007e003f },
+ { REG_TOP_BOTTOM(1,1), 0x00240012 },
+ { REG_HIGH_LOW_WM(0,1), 0x07460757 },
+ { REG_HIGH_LOW_WM(1,1), WM_DISABLE },
+ { REG_CT_THRHLD(0,1), 0x00000000 },
+ { REG_CT_THRHLD(1,1), 0x00000000 },
+ { REG_BUCKE(1), 0x0002ffff },
+ { REG_BUCKI(1), 0x0002ffff },
+ { REG_TEST(0,1), 0x00000020 },
+ { REG_TEST(1,1), 0x00000020 },
+ /* Port config */
+ { REG_MAX_LEN(1), 0x00002710 },
+ { REG_PORT_FAIL(1), 0x00000002 },
+ { REG_NORMALIZER(1), 0x00000a64 },
+ { REG_DENORM(1), 0x00000010 },
+ { REG_STICK_BIT(1), 0x03baa370 },
+ { REG_DEV_SETUP(1), 0x00000083 },
+ { REG_DEV_SETUP(1), 0x00000082 },
+ { REG_MODE_CFG(1), 0x0200259f },
+ },
+ { /* Port 2 */
+ /* FIFO setup */
+ { REG_DBG(2), 0x000004f0 },
+ { REG_HDX(2), 0x00073101 },
+ { REG_TEST(0,2), 0x00000022 },
+ { REG_TEST(1,2), 0x00000022 },
+ { REG_TOP_BOTTOM(0,2), 0x00bd007e },
+ { REG_TOP_BOTTOM(1,2), 0x00360024 },
+ { REG_HIGH_LOW_WM(0,2), 0x07460757 },
+ { REG_HIGH_LOW_WM(1,2), WM_DISABLE },
+ { REG_CT_THRHLD(0,2), 0x00000000 },
+ { REG_CT_THRHLD(1,2), 0x00000000 },
+ { REG_BUCKE(2), 0x0002ffff },
+ { REG_BUCKI(2), 0x0002ffff },
+ { REG_TEST(0,2), 0x00000020 },
+ { REG_TEST(1,2), 0x00000020 },
+ /* Port config */
+ { REG_MAX_LEN(2), 0x00002710 },
+ { REG_PORT_FAIL(2), 0x00000002 },
+ { REG_NORMALIZER(2), 0x00000a64 },
+ { REG_DENORM(2), 0x00000010 },
+ { REG_STICK_BIT(2), 0x03baa370 },
+ { REG_DEV_SETUP(2), 0x00000083 },
+ { REG_DEV_SETUP(2), 0x00000082 },
+ { REG_MODE_CFG(2), 0x0200259f },
+ },
+ { /* Port 3 */
+ /* FIFO setup */
+ { REG_DBG(3), 0x000004f0 },
+ { REG_HDX(3), 0x00073101 },
+ { REG_TEST(0,3), 0x00000022 },
+ { REG_TEST(1,3), 0x00000022 },
+ { REG_TOP_BOTTOM(0,3), 0x00fc00bd },
+ { REG_TOP_BOTTOM(1,3), 0x00480036 },
+ { REG_HIGH_LOW_WM(0,3), 0x07460757 },
+ { REG_HIGH_LOW_WM(1,3), WM_DISABLE },
+ { REG_CT_THRHLD(0,3), 0x00000000 },
+ { REG_CT_THRHLD(1,3), 0x00000000 },
+ { REG_BUCKE(3), 0x0002ffff },
+ { REG_BUCKI(3), 0x0002ffff },
+ { REG_TEST(0,3), 0x00000020 },
+ { REG_TEST(1,3), 0x00000020 },
+ /* Port config */
+ { REG_MAX_LEN(3), 0x00002710 },
+ { REG_PORT_FAIL(3), 0x00000002 },
+ { REG_NORMALIZER(3), 0x00000a64 },
+ { REG_DENORM(3), 0x00000010 },
+ { REG_STICK_BIT(3), 0x03baa370 },
+ { REG_DEV_SETUP(3), 0x00000083 },
+ { REG_DEV_SETUP(3), 0x00000082 },
+ { REG_MODE_CFG(3), 0x0200259f },
+ },
+};
+
+static void run_table(adapter_t *adapter, struct init_table *ib, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (ib[i].addr == INITBLOCK_SLEEP) {
+ udelay( ib[i].data );
+ pr_err("sleep %d us\n",ib[i].data);
+ } else
+ vsc_write( adapter, ib[i].addr, ib[i].data );
+ }
+}
+
+static int bist_rd(adapter_t *adapter, int moduleid, int address)
+{
+ int data = 0;
+ u32 result = 0;
+
+ if ((address != 0x0) &&
+ (address != 0x1) &&
+ (address != 0x2) &&
+ (address != 0xd) &&
+ (address != 0xe))
+ pr_err("No bist address: 0x%x\n", address);
+
+ data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) |
+ ((moduleid & 0xff) << 0));
+ vsc_write(adapter, REG_RAM_BIST_CMD, data);
+
+ udelay(10);
+
+ vsc_read(adapter, REG_RAM_BIST_RESULT, &result);
+ if ((result & (1 << 9)) != 0x0)
+ pr_err("Still in bist read: 0x%x\n", result);
+ else if ((result & (1 << 8)) != 0x0)
+ pr_err("bist read error: 0x%x\n", result);
+
+ return result & 0xff;
+}
+
+static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
+{
+ int data = 0;
+ u32 result = 0;
+
+ if ((address != 0x0) &&
+ (address != 0x1) &&
+ (address != 0x2) &&
+ (address != 0xd) &&
+ (address != 0xe))
+ pr_err("No bist address: 0x%x\n", address);
+
+ if (value > 255)
+ pr_err("Suspicious write out of range value: 0x%x\n", value);
+
+ data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) |
+ ((moduleid & 0xff) << 0));
+ vsc_write(adapter, REG_RAM_BIST_CMD, data);
+
+ udelay(5);
+
+ vsc_read(adapter, REG_RAM_BIST_CMD, &result);
+ if ((result & (1 << 27)) != 0x0)
+ pr_err("Still in bist write: 0x%x\n", result);
+ else if ((result & (1 << 26)) != 0x0)
+ pr_err("bist write error: 0x%x\n", result);
+
+ return 0;
+}
+
+static int run_bist(adapter_t *adapter, int moduleid)
+{
+ /*run bist*/
+ (void) bist_wr(adapter,moduleid, 0x00, 0x02);
+ (void) bist_wr(adapter,moduleid, 0x01, 0x01);
+
+ return 0;
+}
+
+static int check_bist(adapter_t *adapter, int moduleid)
+{
+ int result=0;
+ int column=0;
+ /*check bist*/
+ result = bist_rd(adapter,moduleid, 0x02);
+ column = ((bist_rd(adapter,moduleid, 0x0e)<<8) +
+ (bist_rd(adapter,moduleid, 0x0d)));
+ if ((result & 3) != 0x3)
+ pr_err("Result: 0x%x BIST error in ram %d, column: 0x%04x\n",
+ result, moduleid, column);
+ return 0;
+}
+
+static int enable_mem(adapter_t *adapter, int moduleid)
+{
+ /*enable mem*/
+ (void) bist_wr(adapter,moduleid, 0x00, 0x00);
+ return 0;
+}
+
+static int run_bist_all(adapter_t *adapter)
+{
+ int port = 0;
+ u32 val = 0;
+
+ vsc_write(adapter, REG_MEM_BIST, 0x5);
+ vsc_read(adapter, REG_MEM_BIST, &val);
+
+ for (port = 0; port < 12; port++)
+ vsc_write(adapter, REG_DEV_SETUP(port), 0x0);
+
+ udelay(300);
+ vsc_write(adapter, REG_SPI4_MISC, 0x00040409);
+ udelay(300);
+
+ (void) run_bist(adapter,13);
+ (void) run_bist(adapter,14);
+ (void) run_bist(adapter,20);
+ (void) run_bist(adapter,21);
+ mdelay(200);
+ (void) check_bist(adapter,13);
+ (void) check_bist(adapter,14);
+ (void) check_bist(adapter,20);
+ (void) check_bist(adapter,21);
+ udelay(100);
+ (void) enable_mem(adapter,13);
+ (void) enable_mem(adapter,14);
+ (void) enable_mem(adapter,20);
+ (void) enable_mem(adapter,21);
+ udelay(300);
+ vsc_write(adapter, REG_SPI4_MISC, 0x60040400);
+ udelay(300);
+ for (port = 0; port < 12; port++)
+ vsc_write(adapter, REG_DEV_SETUP(port), 0x1);
+
+ udelay(300);
+ vsc_write(adapter, REG_MEM_BIST, 0x0);
+ mdelay(10);
+ return 0;
+}
+
+static int mac_intr_handler(struct cmac *mac)
+{
+ return 0;
+}
+
+static int mac_intr_enable(struct cmac *mac)
+{
+ return 0;
+}
+
+static int mac_intr_disable(struct cmac *mac)
+{
+ return 0;
+}
+
+static int mac_intr_clear(struct cmac *mac)
+{
+ return 0;
+}
+
+/* Expect MAC address to be in network byte order. */
+static int mac_set_address(struct cmac* mac, u8 addr[6])
+{
+ u32 val;
+ int port = mac->instance->index;
+
+ vsc_write(mac->adapter, REG_MAC_LOW_ADDR(port),
+ (addr[3] << 16) | (addr[4] << 8) | addr[5]);
+ vsc_write(mac->adapter, REG_MAC_HIGH_ADDR(port),
+ (addr[0] << 16) | (addr[1] << 8) | addr[2]);
+
+ vsc_read(mac->adapter, REG_ING_FFILT_UM_EN, &val);
+ val &= ~0xf0000000;
+ vsc_write(mac->adapter, REG_ING_FFILT_UM_EN, val | (port << 28));
+
+ vsc_write(mac->adapter, REG_ING_FFILT_MASK0,
+ 0xffff0000 | (addr[4] << 8) | addr[5]);
+ vsc_write(mac->adapter, REG_ING_FFILT_MASK1,
+ 0xffff0000 | (addr[2] << 8) | addr[3]);
+ vsc_write(mac->adapter, REG_ING_FFILT_MASK2,
+ 0xffff0000 | (addr[0] << 8) | addr[1]);
+ return 0;
+}
+
+static int mac_get_address(struct cmac *mac, u8 addr[6])
+{
+ u32 addr_lo, addr_hi;
+ int port = mac->instance->index;
+
+ vsc_read(mac->adapter, REG_MAC_LOW_ADDR(port), &addr_lo);
+ vsc_read(mac->adapter, REG_MAC_HIGH_ADDR(port), &addr_hi);
+
+ addr[0] = (u8) (addr_hi >> 16);
+ addr[1] = (u8) (addr_hi >> 8);
+ addr[2] = (u8) addr_hi;
+ addr[3] = (u8) (addr_lo >> 16);
+ addr[4] = (u8) (addr_lo >> 8);
+ addr[5] = (u8) addr_lo;
+ return 0;
+}
+
+/* This is intended to reset a port, not the whole MAC */
+static int mac_reset(struct cmac *mac)
+{
+ int index = mac->instance->index;
+
+ run_table(mac->adapter, vsc7326_portinit[index],
+ ARRAY_SIZE(vsc7326_portinit[index]));
+
+ return 0;
+}
+
+static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm)
+{
+ u32 v;
+ int port = mac->instance->index;
+
+ vsc_read(mac->adapter, REG_ING_FFILT_UM_EN, &v);
+ v |= 1 << 12;
+
+ if (t1_rx_mode_promisc(rm))
+ v &= ~(1 << (port + 16));
+ else
+ v |= 1 << (port + 16);
+
+ vsc_write(mac->adapter, REG_ING_FFILT_UM_EN, v);
+ return 0;
+}
+
+static int mac_set_mtu(struct cmac *mac, int mtu)
+{
+ int port = mac->instance->index;
+
+ if (mtu > MAX_MTU)
+ return -EINVAL;
+
+ /* max_len includes header and FCS */
+ vsc_write(mac->adapter, REG_MAX_LEN(port), mtu + 14 + 4);
+ return 0;
+}
+
+static int mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex,
+ int fc)
+{
+ u32 v;
+ int enable, port = mac->instance->index;
+
+ if (speed >= 0 && speed != SPEED_10 && speed != SPEED_100 &&
+ speed != SPEED_1000)
+ return -1;
+ if (duplex > 0 && duplex != DUPLEX_FULL)
+ return -1;
+
+ if (speed >= 0) {
+ vsc_read(mac->adapter, REG_MODE_CFG(port), &v);
+ enable = v & 3; /* save tx/rx enables */
+ v &= ~0xf;
+ v |= 4; /* full duplex */
+ if (speed == SPEED_1000)
+ v |= 8; /* GigE */
+ enable |= v;
+ vsc_write(mac->adapter, REG_MODE_CFG(port), v);
+
+ if (speed == SPEED_1000)
+ v = 0x82;
+ else if (speed == SPEED_100)
+ v = 0x84;
+ else /* SPEED_10 */
+ v = 0x86;
+ vsc_write(mac->adapter, REG_DEV_SETUP(port), v | 1); /* reset */
+ vsc_write(mac->adapter, REG_DEV_SETUP(port), v);
+ vsc_read(mac->adapter, REG_DBG(port), &v);
+ v &= ~0xff00;
+ if (speed == SPEED_1000)
+ v |= 0x400;
+ else if (speed == SPEED_100)
+ v |= 0x2000;
+ else /* SPEED_10 */
+ v |= 0xff00;
+ vsc_write(mac->adapter, REG_DBG(port), v);
+
+ vsc_write(mac->adapter, REG_TX_IFG(port),
+ speed == SPEED_1000 ? 5 : 0x11);
+ if (duplex == DUPLEX_HALF)
+ enable = 0x0; /* 100 or 10 */
+ else if (speed == SPEED_1000)
+ enable = 0xc;
+ else /* SPEED_100 or 10 */
+ enable = 0x4;
+ enable |= 0x9 << 10; /* IFG1 */
+ enable |= 0x6 << 6; /* IFG2 */
+ enable |= 0x1 << 4; /* VLAN */
+ enable |= 0x3; /* RX/TX EN */
+ vsc_write(mac->adapter, REG_MODE_CFG(port), enable);
+
+ }
+
+ vsc_read(mac->adapter, REG_PAUSE_CFG(port), &v);
+ v &= 0xfff0ffff;
+ v |= 0x20000; /* xon/xoff */
+ if (fc & PAUSE_RX)
+ v |= 0x40000;
+ if (fc & PAUSE_TX)
+ v |= 0x80000;
+ if (fc == (PAUSE_RX | PAUSE_TX))
+ v |= 0x10000;
+ vsc_write(mac->adapter, REG_PAUSE_CFG(port), v);
+ return 0;
+}
+
+static int mac_enable(struct cmac *mac, int which)
+{
+ u32 val;
+ int port = mac->instance->index;
+
+ /* Write the correct WM value when the port is enabled. */
+ vsc_write(mac->adapter, REG_HIGH_LOW_WM(1,port), WM_ENABLE);
+
+ vsc_read(mac->adapter, REG_MODE_CFG(port), &val);
+ if (which & MAC_DIRECTION_RX)
+ val |= 0x2;
+ if (which & MAC_DIRECTION_TX)
+ val |= 1;
+ vsc_write(mac->adapter, REG_MODE_CFG(port), val);
+ return 0;
+}
+
+static int mac_disable(struct cmac *mac, int which)
+{
+ u32 val;
+ int i, port = mac->instance->index;
+
+ /* Reset the port, this also writes the correct WM value */
+ mac_reset(mac);
+
+ vsc_read(mac->adapter, REG_MODE_CFG(port), &val);
+ if (which & MAC_DIRECTION_RX)
+ val &= ~0x2;
+ if (which & MAC_DIRECTION_TX)
+ val &= ~0x1;
+ vsc_write(mac->adapter, REG_MODE_CFG(port), val);
+ vsc_read(mac->adapter, REG_MODE_CFG(port), &val);
+
+ /* Clear stats */
+ for (i = 0; i <= 0x3a; ++i)
+ vsc_write(mac->adapter, CRA(4, port, i), 0);
+
+ /* Clear software counters */
+ memset(&mac->stats, 0, sizeof(struct cmac_statistics));
+
+ return 0;
+}
+
+static void rmon_update(struct cmac *mac, unsigned int addr, u64 *stat)
+{
+ u32 v, lo;
+
+ vsc_read(mac->adapter, addr, &v);
+ lo = *stat;
+ *stat = *stat - lo + v;
+
+ if (v == 0)
+ return;
+
+ if (v < lo)
+ *stat += (1ULL << 32);
+}
+
+static void port_stats_update(struct cmac *mac)
+{
+ struct {
+ unsigned int reg;
+ unsigned int offset;
+ } hw_stats[] = {
+
+#define HW_STAT(reg, stat_name) \
+ { reg, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
+
+ /* Rx stats */
+ HW_STAT(RxUnicast, RxUnicastFramesOK),
+ HW_STAT(RxMulticast, RxMulticastFramesOK),
+ HW_STAT(RxBroadcast, RxBroadcastFramesOK),
+ HW_STAT(Crc, RxFCSErrors),
+ HW_STAT(RxAlignment, RxAlignErrors),
+ HW_STAT(RxOversize, RxFrameTooLongErrors),
+ HW_STAT(RxPause, RxPauseFrames),
+ HW_STAT(RxJabbers, RxJabberErrors),
+ HW_STAT(RxFragments, RxRuntErrors),
+ HW_STAT(RxUndersize, RxRuntErrors),
+ HW_STAT(RxSymbolCarrier, RxSymbolErrors),
+ HW_STAT(RxSize1519ToMax, RxJumboFramesOK),
+
+ /* Tx stats (skip collision stats as we are full-duplex only) */
+ HW_STAT(TxUnicast, TxUnicastFramesOK),
+ HW_STAT(TxMulticast, TxMulticastFramesOK),
+ HW_STAT(TxBroadcast, TxBroadcastFramesOK),
+ HW_STAT(TxPause, TxPauseFrames),
+ HW_STAT(TxUnderrun, TxUnderrun),
+ HW_STAT(TxSize1519ToMax, TxJumboFramesOK),
+ }, *p = hw_stats;
+ unsigned int port = mac->instance->index;
+ u64 *stats = (u64 *)&mac->stats;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(hw_stats); i++)
+ rmon_update(mac, CRA(0x4, port, p->reg), stats + p->offset);
+
+ rmon_update(mac, REG_TX_OK_BYTES(port), &mac->stats.TxOctetsOK);
+ rmon_update(mac, REG_RX_OK_BYTES(port), &mac->stats.RxOctetsOK);
+ rmon_update(mac, REG_RX_BAD_BYTES(port), &mac->stats.RxOctetsBad);
+}
+
+/*
+ * This function is called periodically to accumulate the current values of the
+ * RMON counters into the port statistics. Since the counters are only 32 bits
+ * some of them can overflow in less than a minute at GigE speeds, so this
+ * function should be called every 30 seconds or so.
+ *
+ * To cut down on reading costs we update only the octet counters at each tick
+ * and do a full update at major ticks, which can be every 30 minutes or more.
+ */
+static const struct cmac_statistics *mac_update_statistics(struct cmac *mac,
+ int flag)
+{
+ if (flag == MAC_STATS_UPDATE_FULL ||
+ mac->instance->ticks >= MAJOR_UPDATE_TICKS) {
+ port_stats_update(mac);
+ mac->instance->ticks = 0;
+ } else {
+ int port = mac->instance->index;
+
+ rmon_update(mac, REG_RX_OK_BYTES(port),
+ &mac->stats.RxOctetsOK);
+ rmon_update(mac, REG_RX_BAD_BYTES(port),
+ &mac->stats.RxOctetsBad);
+ rmon_update(mac, REG_TX_OK_BYTES(port),
+ &mac->stats.TxOctetsOK);
+ mac->instance->ticks++;
+ }
+ return &mac->stats;
+}
+
+static void mac_destroy(struct cmac *mac)
+{
+ kfree(mac);
+}
+
+static struct cmac_ops vsc7326_ops = {
+ .destroy = mac_destroy,
+ .reset = mac_reset,
+ .interrupt_handler = mac_intr_handler,
+ .interrupt_enable = mac_intr_enable,
+ .interrupt_disable = mac_intr_disable,
+ .interrupt_clear = mac_intr_clear,
+ .enable = mac_enable,
+ .disable = mac_disable,
+ .set_mtu = mac_set_mtu,
+ .set_rx_mode = mac_set_rx_mode,
+ .set_speed_duplex_fc = mac_set_speed_duplex_fc,
+ .statistics_update = mac_update_statistics,
+ .macaddress_get = mac_get_address,
+ .macaddress_set = mac_set_address,
+};
+
+static struct cmac *vsc7326_mac_create(adapter_t *adapter, int index)
+{
+ struct cmac *mac;
+ u32 val;
+ int i;
+
+ mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL);
+ if (!mac)
+ return NULL;
+
+ mac->ops = &vsc7326_ops;
+ mac->instance = (cmac_instance *)(mac + 1);
+ mac->adapter = adapter;
+
+ mac->instance->index = index;
+ mac->instance->ticks = 0;
+
+ i = 0;
+ do {
+ u32 vhi, vlo;
+
+ vhi = vlo = 0;
+ t1_tpi_read(adapter, (REG_LOCAL_STATUS << 2) + 4, &vlo);
+ udelay(1);
+ t1_tpi_read(adapter, REG_LOCAL_STATUS << 2, &vhi);
+ udelay(5);
+ val = (vhi << 16) | vlo;
+ } while ((++i < 10000) && (val == 0xffffffff));
+
+ return mac;
+}
+
+static int vsc7326_mac_reset(adapter_t *adapter)
+{
+ vsc7326_full_reset(adapter);
+ (void) run_bist_all(adapter);
+ run_table(adapter, vsc7326_reset, ARRAY_SIZE(vsc7326_reset));
+ return 0;
+}
+
+const struct gmac t1_vsc7326_ops = {
+ .stats_update_period = STATS_TICK_SECS,
+ .create = vsc7326_mac_create,
+ .reset = vsc7326_mac_reset,
+};
diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h b/drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h
new file mode 100644
index 000000000000..479edbcabe68
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h
@@ -0,0 +1,297 @@
+/* $Date: 2006/04/28 19:20:17 $ $RCSfile: vsc7326_reg.h,v $ $Revision: 1.5 $ */
+#ifndef _VSC7321_REG_H_
+#define _VSC7321_REG_H_
+
+/* Register definitions for Vitesse VSC7321 (Meigs II) MAC
+ *
+ * Straight off the data sheet, VMDS-10038 Rev 2.0 and
+ * PD0011-01-14-Meigs-II 2002-12-12
+ */
+
+/* Just 'cause it's in here doesn't mean it's used. */
+
+#define CRA(blk,sub,adr) ((((blk) & 0x7) << 13) | (((sub) & 0xf) << 9) | (((adr) & 0xff) << 1))
+
+/* System and CPU comm's registers */
+#define REG_CHIP_ID CRA(0x7,0xf,0x00) /* Chip ID */
+#define REG_BLADE_ID CRA(0x7,0xf,0x01) /* Blade ID */
+#define REG_SW_RESET CRA(0x7,0xf,0x02) /* Global Soft Reset */
+#define REG_MEM_BIST CRA(0x7,0xf,0x04) /* mem */
+#define REG_IFACE_MODE CRA(0x7,0xf,0x07) /* Interface mode */
+#define REG_MSCH CRA(0x7,0x2,0x06) /* CRC error count */
+#define REG_CRC_CNT CRA(0x7,0x2,0x0a) /* CRC error count */
+#define REG_CRC_CFG CRA(0x7,0x2,0x0b) /* CRC config */
+#define REG_SI_TRANSFER_SEL CRA(0x7,0xf,0x18) /* SI Transfer Select */
+#define REG_PLL_CLK_SPEED CRA(0x7,0xf,0x19) /* Clock Speed Selection */
+#define REG_SYS_CLK_SELECT CRA(0x7,0xf,0x1c) /* System Clock Select */
+#define REG_GPIO_CTRL CRA(0x7,0xf,0x1d) /* GPIO Control */
+#define REG_GPIO_OUT CRA(0x7,0xf,0x1e) /* GPIO Out */
+#define REG_GPIO_IN CRA(0x7,0xf,0x1f) /* GPIO In */
+#define REG_CPU_TRANSFER_SEL CRA(0x7,0xf,0x20) /* CPU Transfer Select */
+#define REG_LOCAL_DATA CRA(0x7,0xf,0xfe) /* Local CPU Data Register */
+#define REG_LOCAL_STATUS CRA(0x7,0xf,0xff) /* Local CPU Status Register */
+
+/* Aggregator registers */
+#define REG_AGGR_SETUP CRA(0x7,0x1,0x00) /* Aggregator Setup */
+#define REG_PMAP_TABLE CRA(0x7,0x1,0x01) /* Port map table */
+#define REG_MPLS_BIT0 CRA(0x7,0x1,0x08) /* MPLS bit0 position */
+#define REG_MPLS_BIT1 CRA(0x7,0x1,0x09) /* MPLS bit1 position */
+#define REG_MPLS_BIT2 CRA(0x7,0x1,0x0a) /* MPLS bit2 position */
+#define REG_MPLS_BIT3 CRA(0x7,0x1,0x0b) /* MPLS bit3 position */
+#define REG_MPLS_BITMASK CRA(0x7,0x1,0x0c) /* MPLS bit mask */
+#define REG_PRE_BIT0POS CRA(0x7,0x1,0x10) /* Preamble bit0 position */
+#define REG_PRE_BIT1POS CRA(0x7,0x1,0x11) /* Preamble bit1 position */
+#define REG_PRE_BIT2POS CRA(0x7,0x1,0x12) /* Preamble bit2 position */
+#define REG_PRE_BIT3POS CRA(0x7,0x1,0x13) /* Preamble bit3 position */
+#define REG_PRE_ERR_CNT CRA(0x7,0x1,0x14) /* Preamble parity error count */
+
+/* BIST registers */
+/*#define REG_RAM_BIST_CMD CRA(0x7,0x2,0x00)*/ /* RAM BIST Command Register */
+/*#define REG_RAM_BIST_RESULT CRA(0x7,0x2,0x01)*/ /* RAM BIST Read Status/Result */
+#define REG_RAM_BIST_CMD CRA(0x7,0x1,0x00) /* RAM BIST Command Register */
+#define REG_RAM_BIST_RESULT CRA(0x7,0x1,0x01) /* RAM BIST Read Status/Result */
+#define BIST_PORT_SELECT 0x00 /* BIST port select */
+#define BIST_COMMAND 0x01 /* BIST enable/disable */
+#define BIST_STATUS 0x02 /* BIST operation status */
+#define BIST_ERR_CNT_LSB 0x03 /* BIST error count lo 8b */
+#define BIST_ERR_CNT_MSB 0x04 /* BIST error count hi 8b */
+#define BIST_ERR_SEL_LSB 0x05 /* BIST error select lo 8b */
+#define BIST_ERR_SEL_MSB 0x06 /* BIST error select hi 8b */
+#define BIST_ERROR_STATE 0x07 /* BIST engine internal state */
+#define BIST_ERR_ADR0 0x08 /* BIST error address lo 8b */
+#define BIST_ERR_ADR1 0x09 /* BIST error address lomid 8b */
+#define BIST_ERR_ADR2 0x0a /* BIST error address himid 8b */
+#define BIST_ERR_ADR3 0x0b /* BIST error address hi 8b */
+
+/* FIFO registers
+ * ie = 0 for ingress, 1 for egress
+ * fn = FIFO number, 0-9
+ */
+#define REG_TEST(ie,fn) CRA(0x2,ie&1,0x00+fn) /* Mode & Test Register */
+#define REG_TOP_BOTTOM(ie,fn) CRA(0x2,ie&1,0x10+fn) /* FIFO Buffer Top & Bottom */
+#define REG_TAIL(ie,fn) CRA(0x2,ie&1,0x20+fn) /* FIFO Write Pointer */
+#define REG_HEAD(ie,fn) CRA(0x2,ie&1,0x30+fn) /* FIFO Read Pointer */
+#define REG_HIGH_LOW_WM(ie,fn) CRA(0x2,ie&1,0x40+fn) /* Flow Control Water Marks */
+#define REG_CT_THRHLD(ie,fn) CRA(0x2,ie&1,0x50+fn) /* Cut Through Threshold */
+#define REG_FIFO_DROP_CNT(ie,fn) CRA(0x2,ie&1,0x60+fn) /* Drop & CRC Error Counter */
+#define REG_DEBUG_BUF_CNT(ie,fn) CRA(0x2,ie&1,0x70+fn) /* Input Side Debug Counter */
+#define REG_BUCKI(fn) CRA(0x2,2,0x20+fn) /* Input Side Debug Counter */
+#define REG_BUCKE(fn) CRA(0x2,3,0x20+fn) /* Input Side Debug Counter */
+
+/* Traffic shaper buckets
+ * ie = 0 for ingress, 1 for egress
+ * bn = bucket number 0-10 (yes, 11 buckets)
+ */
+/* OK, this one's kinda ugly. Some hardware designers are perverse. */
+#define REG_TRAFFIC_SHAPER_BUCKET(ie,bn) CRA(0x2,ie&1,0x0a + (bn>7) | ((bn&7)<<4))
+#define REG_TRAFFIC_SHAPER_CONTROL(ie) CRA(0x2,ie&1,0x3b)
+
+#define REG_SRAM_ADR(ie) CRA(0x2,ie&1,0x0e) /* FIFO SRAM address */
+#define REG_SRAM_WR_STRB(ie) CRA(0x2,ie&1,0x1e) /* FIFO SRAM write strobe */
+#define REG_SRAM_RD_STRB(ie) CRA(0x2,ie&1,0x2e) /* FIFO SRAM read strobe */
+#define REG_SRAM_DATA_0(ie) CRA(0x2,ie&1,0x3e) /* FIFO SRAM data lo 8b */
+#define REG_SRAM_DATA_1(ie) CRA(0x2,ie&1,0x4e) /* FIFO SRAM data lomid 8b */
+#define REG_SRAM_DATA_2(ie) CRA(0x2,ie&1,0x5e) /* FIFO SRAM data himid 8b */
+#define REG_SRAM_DATA_3(ie) CRA(0x2,ie&1,0x6e) /* FIFO SRAM data hi 8b */
+#define REG_SRAM_DATA_BLK_TYPE(ie) CRA(0x2,ie&1,0x7e) /* FIFO SRAM tag */
+/* REG_ING_CONTROL equals REG_CONTROL with ie = 0, likewise REG_EGR_CONTROL is ie = 1 */
+#define REG_CONTROL(ie) CRA(0x2,ie&1,0x0f) /* FIFO control */
+#define REG_ING_CONTROL CRA(0x2,0x0,0x0f) /* Ingress control (alias) */
+#define REG_EGR_CONTROL CRA(0x2,0x1,0x0f) /* Egress control (alias) */
+#define REG_AGE_TIMER(ie) CRA(0x2,ie&1,0x1f) /* Aging timer */
+#define REG_AGE_INC(ie) CRA(0x2,ie&1,0x2f) /* Aging increment */
+#define DEBUG_OUT(ie) CRA(0x2,ie&1,0x3f) /* Output debug counter control */
+#define DEBUG_CNT(ie) CRA(0x2,ie&1,0x4f) /* Output debug counter */
+
+/* SPI4 interface */
+#define REG_SPI4_MISC CRA(0x5,0x0,0x00) /* Misc Register */
+#define REG_SPI4_STATUS CRA(0x5,0x0,0x01) /* CML Status */
+#define REG_SPI4_ING_SETUP0 CRA(0x5,0x0,0x02) /* Ingress Status Channel Setup */
+#define REG_SPI4_ING_SETUP1 CRA(0x5,0x0,0x03) /* Ingress Data Training Setup */
+#define REG_SPI4_ING_SETUP2 CRA(0x5,0x0,0x04) /* Ingress Data Burst Size Setup */
+#define REG_SPI4_EGR_SETUP0 CRA(0x5,0x0,0x05) /* Egress Status Channel Setup */
+#define REG_SPI4_DBG_CNT(n) CRA(0x5,0x0,0x10+n) /* Debug counters 0-9 */
+#define REG_SPI4_DBG_SETUP CRA(0x5,0x0,0x1A) /* Debug counters setup */
+#define REG_SPI4_TEST CRA(0x5,0x0,0x20) /* Test Setup Register */
+#define REG_TPGEN_UP0 CRA(0x5,0x0,0x21) /* Test Pattern generator user pattern 0 */
+#define REG_TPGEN_UP1 CRA(0x5,0x0,0x22) /* Test Pattern generator user pattern 1 */
+#define REG_TPCHK_UP0 CRA(0x5,0x0,0x23) /* Test Pattern checker user pattern 0 */
+#define REG_TPCHK_UP1 CRA(0x5,0x0,0x24) /* Test Pattern checker user pattern 1 */
+#define REG_TPSAM_P0 CRA(0x5,0x0,0x25) /* Sampled pattern 0 */
+#define REG_TPSAM_P1 CRA(0x5,0x0,0x26) /* Sampled pattern 1 */
+#define REG_TPERR_CNT CRA(0x5,0x0,0x27) /* Pattern checker error counter */
+#define REG_SPI4_STICKY CRA(0x5,0x0,0x30) /* Sticky bits register */
+#define REG_SPI4_DBG_INH CRA(0x5,0x0,0x31) /* Core egress & ingress inhibit */
+#define REG_SPI4_DBG_STATUS CRA(0x5,0x0,0x32) /* Sampled ingress status */
+#define REG_SPI4_DBG_GRANT CRA(0x5,0x0,0x33) /* Ingress cranted credit value */
+
+#define REG_SPI4_DESKEW CRA(0x5,0x0,0x43) /* Ingress cranted credit value */
+
+/* 10GbE MAC Block Registers */
+/* Note that those registers that are exactly the same for 10GbE as for
+ * tri-speed are only defined with the version that needs a port number.
+ * Pass 0xa in those cases.
+ *
+ * Also note that despite the presence of a MAC address register, this part
+ * does no ingress MAC address filtering. That register is used only for
+ * pause frame detection and generation.
+ */
+/* 10GbE specific, and different from tri-speed */
+#define REG_MISC_10G CRA(0x1,0xa,0x00) /* Misc 10GbE setup */
+#define REG_PAUSE_10G CRA(0x1,0xa,0x01) /* Pause register */
+#define REG_NORMALIZER_10G CRA(0x1,0xa,0x05) /* 10G normalizer */
+#define REG_STICKY_RX CRA(0x1,0xa,0x06) /* RX debug register */
+#define REG_DENORM_10G CRA(0x1,0xa,0x07) /* Denormalizer */
+#define REG_STICKY_TX CRA(0x1,0xa,0x08) /* TX sticky bits */
+#define REG_MAX_RXHIGH CRA(0x1,0xa,0x0a) /* XGMII lane 0-3 debug */
+#define REG_MAX_RXLOW CRA(0x1,0xa,0x0b) /* XGMII lane 4-7 debug */
+#define REG_MAC_TX_STICKY CRA(0x1,0xa,0x0c) /* MAC Tx state sticky debug */
+#define REG_MAC_TX_RUNNING CRA(0x1,0xa,0x0d) /* MAC Tx state running debug */
+#define REG_TX_ABORT_AGE CRA(0x1,0xa,0x14) /* Aged Tx frames discarded */
+#define REG_TX_ABORT_SHORT CRA(0x1,0xa,0x15) /* Short Tx frames discarded */
+#define REG_TX_ABORT_TAXI CRA(0x1,0xa,0x16) /* Taxi error frames discarded */
+#define REG_TX_ABORT_UNDERRUN CRA(0x1,0xa,0x17) /* Tx Underrun abort counter */
+#define REG_TX_DENORM_DISCARD CRA(0x1,0xa,0x18) /* Tx denormalizer discards */
+#define REG_XAUI_STAT_A CRA(0x1,0xa,0x20) /* XAUI status A */
+#define REG_XAUI_STAT_B CRA(0x1,0xa,0x21) /* XAUI status B */
+#define REG_XAUI_STAT_C CRA(0x1,0xa,0x22) /* XAUI status C */
+#define REG_XAUI_CONF_A CRA(0x1,0xa,0x23) /* XAUI configuration A */
+#define REG_XAUI_CONF_B CRA(0x1,0xa,0x24) /* XAUI configuration B */
+#define REG_XAUI_CODE_GRP_CNT CRA(0x1,0xa,0x25) /* XAUI code group error count */
+#define REG_XAUI_CONF_TEST_A CRA(0x1,0xa,0x26) /* XAUI test register A */
+#define REG_PDERRCNT CRA(0x1,0xa,0x27) /* XAUI test register B */
+
+/* pn = port number 0-9 for tri-speed, 10 for 10GbE */
+/* Both tri-speed and 10GbE */
+#define REG_MAX_LEN(pn) CRA(0x1,pn,0x02) /* Max length */
+#define REG_MAC_HIGH_ADDR(pn) CRA(0x1,pn,0x03) /* Upper 24 bits of MAC addr */
+#define REG_MAC_LOW_ADDR(pn) CRA(0x1,pn,0x04) /* Lower 24 bits of MAC addr */
+
+/* tri-speed only
+ * pn = port number, 0-9
+ */
+#define REG_MODE_CFG(pn) CRA(0x1,pn,0x00) /* Mode configuration */
+#define REG_PAUSE_CFG(pn) CRA(0x1,pn,0x01) /* Pause configuration */
+#define REG_NORMALIZER(pn) CRA(0x1,pn,0x05) /* Normalizer */
+#define REG_TBI_STATUS(pn) CRA(0x1,pn,0x06) /* TBI status */
+#define REG_PCS_STATUS_DBG(pn) CRA(0x1,pn,0x07) /* PCS status debug */
+#define REG_PCS_CTRL(pn) CRA(0x1,pn,0x08) /* PCS control */
+#define REG_TBI_CONFIG(pn) CRA(0x1,pn,0x09) /* TBI configuration */
+#define REG_STICK_BIT(pn) CRA(0x1,pn,0x0a) /* Sticky bits */
+#define REG_DEV_SETUP(pn) CRA(0x1,pn,0x0b) /* MAC clock/reset setup */
+#define REG_DROP_CNT(pn) CRA(0x1,pn,0x0c) /* Drop counter */
+#define REG_PORT_POS(pn) CRA(0x1,pn,0x0d) /* Preamble port position */
+#define REG_PORT_FAIL(pn) CRA(0x1,pn,0x0e) /* Preamble port position */
+#define REG_SERDES_CONF(pn) CRA(0x1,pn,0x0f) /* SerDes configuration */
+#define REG_SERDES_TEST(pn) CRA(0x1,pn,0x10) /* SerDes test */
+#define REG_SERDES_STAT(pn) CRA(0x1,pn,0x11) /* SerDes status */
+#define REG_SERDES_COM_CNT(pn) CRA(0x1,pn,0x12) /* SerDes comma counter */
+#define REG_DENORM(pn) CRA(0x1,pn,0x15) /* Frame denormalization */
+#define REG_DBG(pn) CRA(0x1,pn,0x16) /* Device 1G debug */
+#define REG_TX_IFG(pn) CRA(0x1,pn,0x18) /* Tx IFG config */
+#define REG_HDX(pn) CRA(0x1,pn,0x19) /* Half-duplex config */
+
+/* Statistics */
+/* CRA(0x4,pn,reg) */
+/* reg below */
+/* pn = port number, 0-a, a = 10GbE */
+
+enum {
+ RxInBytes = 0x00, // # Rx in octets
+ RxSymbolCarrier = 0x01, // Frames w/ symbol errors
+ RxPause = 0x02, // # pause frames received
+ RxUnsupOpcode = 0x03, // # control frames with unsupported opcode
+ RxOkBytes = 0x04, // # octets in good frames
+ RxBadBytes = 0x05, // # octets in bad frames
+ RxUnicast = 0x06, // # good unicast frames
+ RxMulticast = 0x07, // # good multicast frames
+ RxBroadcast = 0x08, // # good broadcast frames
+ Crc = 0x09, // # frames w/ bad CRC only
+ RxAlignment = 0x0a, // # frames w/ alignment err
+ RxUndersize = 0x0b, // # frames undersize
+ RxFragments = 0x0c, // # frames undersize w/ crc err
+ RxInRangeLengthError = 0x0d, // # frames with length error
+ RxOutOfRangeError = 0x0e, // # frames with illegal length field
+ RxOversize = 0x0f, // # frames oversize
+ RxJabbers = 0x10, // # frames oversize w/ crc err
+ RxSize64 = 0x11, // # frames 64 octets long
+ RxSize65To127 = 0x12, // # frames 65-127 octets
+ RxSize128To255 = 0x13, // # frames 128-255
+ RxSize256To511 = 0x14, // # frames 256-511
+ RxSize512To1023 = 0x15, // # frames 512-1023
+ RxSize1024To1518 = 0x16, // # frames 1024-1518
+ RxSize1519ToMax = 0x17, // # frames 1519-max
+
+ TxOutBytes = 0x18, // # octets tx
+ TxPause = 0x19, // # pause frames sent
+ TxOkBytes = 0x1a, // # octets tx OK
+ TxUnicast = 0x1b, // # frames unicast
+ TxMulticast = 0x1c, // # frames multicast
+ TxBroadcast = 0x1d, // # frames broadcast
+ TxMultipleColl = 0x1e, // # frames tx after multiple collisions
+ TxLateColl = 0x1f, // # late collisions detected
+ TxXcoll = 0x20, // # frames lost, excessive collisions
+ TxDefer = 0x21, // # frames deferred on first tx attempt
+ TxXdefer = 0x22, // # frames excessively deferred
+ TxCsense = 0x23, // carrier sense errors at frame end
+ TxSize64 = 0x24, // # frames 64 octets long
+ TxSize65To127 = 0x25, // # frames 65-127 octets
+ TxSize128To255 = 0x26, // # frames 128-255
+ TxSize256To511 = 0x27, // # frames 256-511
+ TxSize512To1023 = 0x28, // # frames 512-1023
+ TxSize1024To1518 = 0x29, // # frames 1024-1518
+ TxSize1519ToMax = 0x2a, // # frames 1519-max
+ TxSingleColl = 0x2b, // # frames tx after single collision
+ TxBackoff2 = 0x2c, // # frames tx ok after 2 backoffs/collisions
+ TxBackoff3 = 0x2d, // after 3 backoffs/collisions
+ TxBackoff4 = 0x2e, // after 4
+ TxBackoff5 = 0x2f, // after 5
+ TxBackoff6 = 0x30, // after 6
+ TxBackoff7 = 0x31, // after 7
+ TxBackoff8 = 0x32, // after 8
+ TxBackoff9 = 0x33, // after 9
+ TxBackoff10 = 0x34, // after 10
+ TxBackoff11 = 0x35, // after 11
+ TxBackoff12 = 0x36, // after 12
+ TxBackoff13 = 0x37, // after 13
+ TxBackoff14 = 0x38, // after 14
+ TxBackoff15 = 0x39, // after 15
+ TxUnderrun = 0x3a, // # frames dropped from underrun
+ // Hole. See REG_RX_XGMII_PROT_ERR below.
+ RxIpgShrink = 0x3c, // # of IPG shrinks detected
+ // Duplicate. See REG_STAT_STICKY10G below.
+ StatSticky1G = 0x3e, // tri-speed sticky bits
+ StatInit = 0x3f // Clear all statistics
+};
+
+#define REG_RX_XGMII_PROT_ERR CRA(0x4,0xa,0x3b) /* # protocol errors detected on XGMII interface */
+#define REG_STAT_STICKY10G CRA(0x4,0xa,StatSticky1G) /* 10GbE sticky bits */
+
+#define REG_RX_OK_BYTES(pn) CRA(0x4,pn,RxOkBytes)
+#define REG_RX_BAD_BYTES(pn) CRA(0x4,pn,RxBadBytes)
+#define REG_TX_OK_BYTES(pn) CRA(0x4,pn,TxOkBytes)
+
+/* MII-Management Block registers */
+/* These are for MII-M interface 0, which is the bidirectional LVTTL one. If
+ * we hooked up to the one with separate directions, the middle 0x0 needs to
+ * change to 0x1. And the current errata states that MII-M 1 doesn't work.
+ */
+
+#define REG_MIIM_STATUS CRA(0x3,0x0,0x00) /* MII-M Status */
+#define REG_MIIM_CMD CRA(0x3,0x0,0x01) /* MII-M Command */
+#define REG_MIIM_DATA CRA(0x3,0x0,0x02) /* MII-M Data */
+#define REG_MIIM_PRESCALE CRA(0x3,0x0,0x03) /* MII-M MDC Prescale */
+
+#define REG_ING_FFILT_UM_EN CRA(0x2, 0, 0xd)
+#define REG_ING_FFILT_BE_EN CRA(0x2, 0, 0x1d)
+#define REG_ING_FFILT_VAL0 CRA(0x2, 0, 0x2d)
+#define REG_ING_FFILT_VAL1 CRA(0x2, 0, 0x3d)
+#define REG_ING_FFILT_MASK0 CRA(0x2, 0, 0x4d)
+#define REG_ING_FFILT_MASK1 CRA(0x2, 0, 0x5d)
+#define REG_ING_FFILT_MASK2 CRA(0x2, 0, 0x6d)
+#define REG_ING_FFILT_ETYPE CRA(0x2, 0, 0x7d)
+
+
+/* Whew. */
+
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/Makefile b/drivers/net/ethernet/chelsio/cxgb3/Makefile
new file mode 100644
index 000000000000..29aff78c7820
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/Makefile
@@ -0,0 +1,8 @@
+#
+# Chelsio T3 driver
+#
+
+obj-$(CONFIG_CHELSIO_T3) += cxgb3.o
+
+cxgb3-objs := cxgb3_main.o ael1002.o vsc8211.o t3_hw.o mc5.o \
+ xgmac.o sge.o l2t.o cxgb3_offload.o aq100x.o
diff --git a/drivers/net/ethernet/chelsio/cxgb3/adapter.h b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
new file mode 100644
index 000000000000..8b395b537330
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* This file should not be included directly. Include common.h instead. */
+
+#ifndef __T3_ADAPTER_H__
+#define __T3_ADAPTER_H__
+
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/cache.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include "t3cdev.h"
+#include <asm/io.h>
+
+struct adapter;
+struct sge_qset;
+struct port_info;
+
+enum mac_idx_types {
+ LAN_MAC_IDX = 0,
+ SAN_MAC_IDX,
+
+ MAX_MAC_IDX
+};
+
+struct iscsi_config {
+ __u8 mac_addr[ETH_ALEN];
+ __u32 flags;
+ int (*send)(struct port_info *pi, struct sk_buff **skb);
+ int (*recv)(struct port_info *pi, struct sk_buff *skb);
+};
+
+struct port_info {
+ struct adapter *adapter;
+ struct sge_qset *qs;
+ u8 port_id;
+ u8 nqsets;
+ u8 first_qset;
+ struct cphy phy;
+ struct cmac mac;
+ struct link_config link_config;
+ struct net_device_stats netstats;
+ int activity;
+ __be32 iscsi_ipv4addr;
+ struct iscsi_config iscsic;
+
+ int link_fault; /* link fault was detected */
+};
+
+enum { /* adapter flags */
+ FULL_INIT_DONE = (1 << 0),
+ USING_MSI = (1 << 1),
+ USING_MSIX = (1 << 2),
+ QUEUES_BOUND = (1 << 3),
+ TP_PARITY_INIT = (1 << 4),
+ NAPI_INIT = (1 << 5),
+};
+
+struct fl_pg_chunk {
+ struct page *page;
+ void *va;
+ unsigned int offset;
+ unsigned long *p_cnt;
+ dma_addr_t mapping;
+};
+
+struct rx_desc;
+struct rx_sw_desc;
+
+struct sge_fl { /* SGE per free-buffer list state */
+ unsigned int buf_size; /* size of each Rx buffer */
+ unsigned int credits; /* # of available Rx buffers */
+ unsigned int pend_cred; /* new buffers since last FL DB ring */
+ unsigned int size; /* capacity of free list */
+ unsigned int cidx; /* consumer index */
+ unsigned int pidx; /* producer index */
+ unsigned int gen; /* free list generation */
+ struct fl_pg_chunk pg_chunk;/* page chunk cache */
+ unsigned int use_pages; /* whether FL uses pages or sk_buffs */
+ unsigned int order; /* order of page allocations */
+ unsigned int alloc_size; /* size of allocated buffer */
+ struct rx_desc *desc; /* address of HW Rx descriptor ring */
+ struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
+ dma_addr_t phys_addr; /* physical address of HW ring start */
+ unsigned int cntxt_id; /* SGE context id for the free list */
+ unsigned long empty; /* # of times queue ran out of buffers */
+ unsigned long alloc_failed; /* # of times buffer allocation failed */
+};
+
+/*
+ * Bundle size for grouping offload RX packets for delivery to the stack.
+ * Don't make this too big as we do prefetch on each packet in a bundle.
+ */
+# define RX_BUNDLE_SIZE 8
+
+struct rsp_desc;
+
+struct sge_rspq { /* state for an SGE response queue */
+ unsigned int credits; /* # of pending response credits */
+ unsigned int size; /* capacity of response queue */
+ unsigned int cidx; /* consumer index */
+ unsigned int gen; /* current generation bit */
+ unsigned int polling; /* is the queue serviced through NAPI? */
+ unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */
+ unsigned int next_holdoff; /* holdoff time for next interrupt */
+ unsigned int rx_recycle_buf; /* whether recycling occurred
+ within current sop-eop */
+ struct rsp_desc *desc; /* address of HW response ring */
+ dma_addr_t phys_addr; /* physical address of the ring */
+ unsigned int cntxt_id; /* SGE context id for the response q */
+ spinlock_t lock; /* guards response processing */
+ struct sk_buff_head rx_queue; /* offload packet receive queue */
+ struct sk_buff *pg_skb; /* used to build frag list in napi handler */
+
+ unsigned long offload_pkts;
+ unsigned long offload_bundles;
+ unsigned long eth_pkts; /* # of ethernet packets */
+ unsigned long pure_rsps; /* # of pure (non-data) responses */
+ unsigned long imm_data; /* responses with immediate data */
+ unsigned long rx_drops; /* # of packets dropped due to no mem */
+ unsigned long async_notif; /* # of asynchronous notification events */
+ unsigned long empty; /* # of times queue ran out of credits */
+ unsigned long nomem; /* # of responses deferred due to no mem */
+ unsigned long unhandled_irqs; /* # of spurious intrs */
+ unsigned long starved;
+ unsigned long restarted;
+};
+
+struct tx_desc;
+struct tx_sw_desc;
+
+struct sge_txq { /* state for an SGE Tx queue */
+ unsigned long flags; /* HW DMA fetch status */
+ unsigned int in_use; /* # of in-use Tx descriptors */
+ unsigned int size; /* # of descriptors */
+ unsigned int processed; /* total # of descs HW has processed */
+ unsigned int cleaned; /* total # of descs SW has reclaimed */
+ unsigned int stop_thres; /* SW TX queue suspend threshold */
+ unsigned int cidx; /* consumer index */
+ unsigned int pidx; /* producer index */
+ unsigned int gen; /* current value of generation bit */
+ unsigned int unacked; /* Tx descriptors used since last COMPL */
+ struct tx_desc *desc; /* address of HW Tx descriptor ring */
+ struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
+ spinlock_t lock; /* guards enqueueing of new packets */
+ unsigned int token; /* WR token */
+ dma_addr_t phys_addr; /* physical address of the ring */
+ struct sk_buff_head sendq; /* List of backpressured offload packets */
+ struct tasklet_struct qresume_tsk; /* restarts the queue */
+ unsigned int cntxt_id; /* SGE context id for the Tx q */
+ unsigned long stops; /* # of times q has been stopped */
+ unsigned long restarts; /* # of queue restarts */
+};
+
+enum { /* per port SGE statistics */
+ SGE_PSTAT_TSO, /* # of TSO requests */
+ SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
+ SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
+ SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
+ SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
+
+ SGE_PSTAT_MAX /* must be last */
+};
+
+struct napi_gro_fraginfo;
+
+struct sge_qset { /* an SGE queue set */
+ struct adapter *adap;
+ struct napi_struct napi;
+ struct sge_rspq rspq;
+ struct sge_fl fl[SGE_RXQ_PER_SET];
+ struct sge_txq txq[SGE_TXQ_PER_SET];
+ int nomem;
+ void *lro_va;
+ struct net_device *netdev;
+ struct netdev_queue *tx_q; /* associated netdev TX queue */
+ unsigned long txq_stopped; /* which Tx queues are stopped */
+ struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
+ struct timer_list rx_reclaim_timer; /* reclaims RX buffers */
+ unsigned long port_stats[SGE_PSTAT_MAX];
+} ____cacheline_aligned;
+
+struct sge {
+ struct sge_qset qs[SGE_QSETS];
+ spinlock_t reg_lock; /* guards non-atomic SGE registers (eg context) */
+};
+
+struct adapter {
+ struct t3cdev tdev;
+ struct list_head adapter_list;
+ void __iomem *regs;
+ struct pci_dev *pdev;
+ unsigned long registered_device_map;
+ unsigned long open_device_map;
+ unsigned long flags;
+
+ const char *name;
+ int msg_enable;
+ unsigned int mmio_len;
+
+ struct adapter_params params;
+ unsigned int slow_intr_mask;
+ unsigned long irq_stats[IRQ_NUM_STATS];
+
+ int msix_nvectors;
+ struct {
+ unsigned short vec;
+ char desc[22];
+ } msix_info[SGE_QSETS + 1];
+
+ /* T3 modules */
+ struct sge sge;
+ struct mc7 pmrx;
+ struct mc7 pmtx;
+ struct mc7 cm;
+ struct mc5 mc5;
+
+ struct net_device *port[MAX_NPORTS];
+ unsigned int check_task_cnt;
+ struct delayed_work adap_check_task;
+ struct work_struct ext_intr_handler_task;
+ struct work_struct fatal_error_handler_task;
+ struct work_struct link_fault_handler_task;
+
+ struct work_struct db_full_task;
+ struct work_struct db_empty_task;
+ struct work_struct db_drop_task;
+
+ struct dentry *debugfs_root;
+
+ struct mutex mdio_lock;
+ spinlock_t stats_lock;
+ spinlock_t work_lock;
+
+ struct sk_buff *nofail_skb;
+};
+
+static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr)
+{
+ u32 val = readl(adapter->regs + reg_addr);
+
+ CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr, val);
+ return val;
+}
+
+static inline void t3_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
+{
+ CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr, val);
+ writel(val, adapter->regs + reg_addr);
+}
+
+static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
+{
+ return netdev_priv(adap->port[idx]);
+}
+
+static inline int phy2portid(struct cphy *phy)
+{
+ struct adapter *adap = phy->adapter;
+ struct port_info *port0 = adap2pinfo(adap, 0);
+
+ return &port0->phy == phy ? 0 : 1;
+}
+
+#define OFFLOAD_DEVMAP_BIT 15
+
+#define tdev2adap(d) container_of(d, struct adapter, tdev)
+
+static inline int offload_running(struct adapter *adapter)
+{
+ return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
+}
+
+int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb);
+
+void t3_os_ext_intr_handler(struct adapter *adapter);
+void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status,
+ int speed, int duplex, int fc);
+void t3_os_phymod_changed(struct adapter *adap, int port_id);
+void t3_os_link_fault(struct adapter *adapter, int port_id, int state);
+void t3_os_link_fault_handler(struct adapter *adapter, int port_id);
+
+void t3_sge_start(struct adapter *adap);
+void t3_sge_stop(struct adapter *adap);
+void t3_start_sge_timers(struct adapter *adap);
+void t3_stop_sge_timers(struct adapter *adap);
+void t3_free_sge_resources(struct adapter *adap);
+void t3_sge_err_intr_handler(struct adapter *adapter);
+irq_handler_t t3_intr_handler(struct adapter *adap, int polling);
+netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev);
+int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
+void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
+int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
+ int irq_vec_idx, const struct qset_params *p,
+ int ntxq, struct net_device *dev,
+ struct netdev_queue *netdevq);
+extern struct workqueue_struct *cxgb3_wq;
+
+int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size);
+
+#endif /* __T3_ADAPTER_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/ael1002.c b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c
new file mode 100644
index 000000000000..2028da95afa1
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c
@@ -0,0 +1,941 @@
+/*
+ * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "common.h"
+#include "regs.h"
+
+enum {
+ AEL100X_TX_CONFIG1 = 0xc002,
+ AEL1002_PWR_DOWN_HI = 0xc011,
+ AEL1002_PWR_DOWN_LO = 0xc012,
+ AEL1002_XFI_EQL = 0xc015,
+ AEL1002_LB_EN = 0xc017,
+ AEL_OPT_SETTINGS = 0xc017,
+ AEL_I2C_CTRL = 0xc30a,
+ AEL_I2C_DATA = 0xc30b,
+ AEL_I2C_STAT = 0xc30c,
+ AEL2005_GPIO_CTRL = 0xc214,
+ AEL2005_GPIO_STAT = 0xc215,
+
+ AEL2020_GPIO_INTR = 0xc103, /* Latch High (LH) */
+ AEL2020_GPIO_CTRL = 0xc108, /* Store Clear (SC) */
+ AEL2020_GPIO_STAT = 0xc10c, /* Read Only (RO) */
+ AEL2020_GPIO_CFG = 0xc110, /* Read Write (RW) */
+
+ AEL2020_GPIO_SDA = 0, /* IN: i2c serial data */
+ AEL2020_GPIO_MODDET = 1, /* IN: Module Detect */
+ AEL2020_GPIO_0 = 3, /* IN: unassigned */
+ AEL2020_GPIO_1 = 2, /* OUT: unassigned */
+ AEL2020_GPIO_LSTAT = AEL2020_GPIO_1, /* wired to link status LED */
+};
+
+enum { edc_none, edc_sr, edc_twinax };
+
+/* PHY module I2C device address */
+enum {
+ MODULE_DEV_ADDR = 0xa0,
+ SFF_DEV_ADDR = 0xa2,
+};
+
+/* PHY transceiver type */
+enum {
+ phy_transtype_unknown = 0,
+ phy_transtype_sfp = 3,
+ phy_transtype_xfp = 6,
+};
+
+#define AEL2005_MODDET_IRQ 4
+
+struct reg_val {
+ unsigned short mmd_addr;
+ unsigned short reg_addr;
+ unsigned short clear_bits;
+ unsigned short set_bits;
+};
+
+static int set_phy_regs(struct cphy *phy, const struct reg_val *rv)
+{
+ int err;
+
+ for (err = 0; rv->mmd_addr && !err; rv++) {
+ if (rv->clear_bits == 0xffff)
+ err = t3_mdio_write(phy, rv->mmd_addr, rv->reg_addr,
+ rv->set_bits);
+ else
+ err = t3_mdio_change_bits(phy, rv->mmd_addr,
+ rv->reg_addr, rv->clear_bits,
+ rv->set_bits);
+ }
+ return err;
+}
+
+static void ael100x_txon(struct cphy *phy)
+{
+ int tx_on_gpio =
+ phy->mdio.prtad == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL;
+
+ msleep(100);
+ t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio);
+ msleep(30);
+}
+
+/*
+ * Read an 8-bit word from a device attached to the PHY's i2c bus.
+ */
+static int ael_i2c_rd(struct cphy *phy, int dev_addr, int word_addr)
+{
+ int i, err;
+ unsigned int stat, data;
+
+ err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL_I2C_CTRL,
+ (dev_addr << 8) | (1 << 8) | word_addr);
+ if (err)
+ return err;
+
+ for (i = 0; i < 200; i++) {
+ msleep(1);
+ err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_STAT, &stat);
+ if (err)
+ return err;
+ if ((stat & 3) == 1) {
+ err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_DATA,
+ &data);
+ if (err)
+ return err;
+ return data >> 8;
+ }
+ }
+ CH_WARN(phy->adapter, "PHY %u i2c read of dev.addr %#x.%#x timed out\n",
+ phy->mdio.prtad, dev_addr, word_addr);
+ return -ETIMEDOUT;
+}
+
+static int ael1002_power_down(struct cphy *phy, int enable)
+{
+ int err;
+
+ err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_TXDIS, !!enable);
+ if (!err)
+ err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
+ MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER, enable);
+ return err;
+}
+
+static int ael1002_reset(struct cphy *phy, int wait)
+{
+ int err;
+
+ if ((err = ael1002_power_down(phy, 0)) ||
+ (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL100X_TX_CONFIG1, 1)) ||
+ (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_PWR_DOWN_HI, 0)) ||
+ (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_PWR_DOWN_LO, 0)) ||
+ (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_XFI_EQL, 0x18)) ||
+ (err = t3_mdio_change_bits(phy, MDIO_MMD_PMAPMD, AEL1002_LB_EN,
+ 0, 1 << 5)))
+ return err;
+ return 0;
+}
+
+static int ael1002_intr_noop(struct cphy *phy)
+{
+ return 0;
+}
+
+/*
+ * Get link status for a 10GBASE-R device.
+ */
+static int get_link_status_r(struct cphy *phy, int *link_ok, int *speed,
+ int *duplex, int *fc)
+{
+ if (link_ok) {
+ unsigned int stat0, stat1, stat2;
+ int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD,
+ MDIO_PMA_RXDET, &stat0);
+
+ if (!err)
+ err = t3_mdio_read(phy, MDIO_MMD_PCS,
+ MDIO_PCS_10GBRT_STAT1, &stat1);
+ if (!err)
+ err = t3_mdio_read(phy, MDIO_MMD_PHYXS,
+ MDIO_PHYXS_LNSTAT, &stat2);
+ if (err)
+ return err;
+ *link_ok = (stat0 & stat1 & (stat2 >> 12)) & 1;
+ }
+ if (speed)
+ *speed = SPEED_10000;
+ if (duplex)
+ *duplex = DUPLEX_FULL;
+ return 0;
+}
+
+static struct cphy_ops ael1002_ops = {
+ .reset = ael1002_reset,
+ .intr_enable = ael1002_intr_noop,
+ .intr_disable = ael1002_intr_noop,
+ .intr_clear = ael1002_intr_noop,
+ .intr_handler = ael1002_intr_noop,
+ .get_link_status = get_link_status_r,
+ .power_down = ael1002_power_down,
+ .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
+};
+
+int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops)
+{
+ cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE,
+ "10GBASE-R");
+ ael100x_txon(phy);
+ return 0;
+}
+
+static int ael1006_reset(struct cphy *phy, int wait)
+{
+ return t3_phy_reset(phy, MDIO_MMD_PMAPMD, wait);
+}
+
+static struct cphy_ops ael1006_ops = {
+ .reset = ael1006_reset,
+ .intr_enable = t3_phy_lasi_intr_enable,
+ .intr_disable = t3_phy_lasi_intr_disable,
+ .intr_clear = t3_phy_lasi_intr_clear,
+ .intr_handler = t3_phy_lasi_intr_handler,
+ .get_link_status = get_link_status_r,
+ .power_down = ael1002_power_down,
+ .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
+};
+
+int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops)
+{
+ cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE,
+ "10GBASE-SR");
+ ael100x_txon(phy);
+ return 0;
+}
+
+/*
+ * Decode our module type.
+ */
+static int ael2xxx_get_module_type(struct cphy *phy, int delay_ms)
+{
+ int v;
+
+ if (delay_ms)
+ msleep(delay_ms);
+
+ /* see SFF-8472 for below */
+ v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 3);
+ if (v < 0)
+ return v;
+
+ if (v == 0x10)
+ return phy_modtype_sr;
+ if (v == 0x20)
+ return phy_modtype_lr;
+ if (v == 0x40)
+ return phy_modtype_lrm;
+
+ v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 6);
+ if (v < 0)
+ return v;
+ if (v != 4)
+ goto unknown;
+
+ v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 10);
+ if (v < 0)
+ return v;
+
+ if (v & 0x80) {
+ v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 0x12);
+ if (v < 0)
+ return v;
+ return v > 10 ? phy_modtype_twinax_long : phy_modtype_twinax;
+ }
+unknown:
+ return phy_modtype_unknown;
+}
+
+/*
+ * Code to support the Aeluros/NetLogic 2005 10Gb PHY.
+ */
+static int ael2005_setup_sr_edc(struct cphy *phy)
+{
+ static const struct reg_val regs[] = {
+ { MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x181 },
+ { MDIO_MMD_PMAPMD, 0xc010, 0xffff, 0x448a },
+ { MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5200 },
+ { 0, 0, 0, 0 }
+ };
+
+ int i, err;
+
+ err = set_phy_regs(phy, regs);
+ if (err)
+ return err;
+
+ msleep(50);
+
+ if (phy->priv != edc_sr)
+ err = t3_get_edc_fw(phy, EDC_OPT_AEL2005,
+ EDC_OPT_AEL2005_SIZE);
+ if (err)
+ return err;
+
+ for (i = 0; i < EDC_OPT_AEL2005_SIZE / sizeof(u16) && !err; i += 2)
+ err = t3_mdio_write(phy, MDIO_MMD_PMAPMD,
+ phy->phy_cache[i],
+ phy->phy_cache[i + 1]);
+ if (!err)
+ phy->priv = edc_sr;
+ return err;
+}
+
+static int ael2005_setup_twinax_edc(struct cphy *phy, int modtype)
+{
+ static const struct reg_val regs[] = {
+ { MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5a00 },
+ { 0, 0, 0, 0 }
+ };
+ static const struct reg_val preemphasis[] = {
+ { MDIO_MMD_PMAPMD, 0xc014, 0xffff, 0xfe16 },
+ { MDIO_MMD_PMAPMD, 0xc015, 0xffff, 0xa000 },
+ { 0, 0, 0, 0 }
+ };
+ int i, err;
+
+ err = set_phy_regs(phy, regs);
+ if (!err && modtype == phy_modtype_twinax_long)
+ err = set_phy_regs(phy, preemphasis);
+ if (err)
+ return err;
+
+ msleep(50);
+
+ if (phy->priv != edc_twinax)
+ err = t3_get_edc_fw(phy, EDC_TWX_AEL2005,
+ EDC_TWX_AEL2005_SIZE);
+ if (err)
+ return err;
+
+ for (i = 0; i < EDC_TWX_AEL2005_SIZE / sizeof(u16) && !err; i += 2)
+ err = t3_mdio_write(phy, MDIO_MMD_PMAPMD,
+ phy->phy_cache[i],
+ phy->phy_cache[i + 1]);
+ if (!err)
+ phy->priv = edc_twinax;
+ return err;
+}
+
+static int ael2005_get_module_type(struct cphy *phy, int delay_ms)
+{
+ int v;
+ unsigned int stat;
+
+ v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, &stat);
+ if (v)
+ return v;
+
+ if (stat & (1 << 8)) /* module absent */
+ return phy_modtype_none;
+
+ return ael2xxx_get_module_type(phy, delay_ms);
+}
+
+static int ael2005_intr_enable(struct cphy *phy)
+{
+ int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0x200);
+ return err ? err : t3_phy_lasi_intr_enable(phy);
+}
+
+static int ael2005_intr_disable(struct cphy *phy)
+{
+ int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0x100);
+ return err ? err : t3_phy_lasi_intr_disable(phy);
+}
+
+static int ael2005_intr_clear(struct cphy *phy)
+{
+ int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0xd00);
+ return err ? err : t3_phy_lasi_intr_clear(phy);
+}
+
+static int ael2005_reset(struct cphy *phy, int wait)
+{
+ static const struct reg_val regs0[] = {
+ { MDIO_MMD_PMAPMD, 0xc001, 0, 1 << 5 },
+ { MDIO_MMD_PMAPMD, 0xc017, 0, 1 << 5 },
+ { MDIO_MMD_PMAPMD, 0xc013, 0xffff, 0xf341 },
+ { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8000 },
+ { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8100 },
+ { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8000 },
+ { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0 },
+ { 0, 0, 0, 0 }
+ };
+ static const struct reg_val regs1[] = {
+ { MDIO_MMD_PMAPMD, 0xca00, 0xffff, 0x0080 },
+ { MDIO_MMD_PMAPMD, 0xca12, 0xffff, 0 },
+ { 0, 0, 0, 0 }
+ };
+
+ int err;
+ unsigned int lasi_ctrl;
+
+ err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
+ &lasi_ctrl);
+ if (err)
+ return err;
+
+ err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 0);
+ if (err)
+ return err;
+
+ msleep(125);
+ phy->priv = edc_none;
+ err = set_phy_regs(phy, regs0);
+ if (err)
+ return err;
+
+ msleep(50);
+
+ err = ael2005_get_module_type(phy, 0);
+ if (err < 0)
+ return err;
+ phy->modtype = err;
+
+ if (err == phy_modtype_twinax || err == phy_modtype_twinax_long)
+ err = ael2005_setup_twinax_edc(phy, err);
+ else
+ err = ael2005_setup_sr_edc(phy);
+ if (err)
+ return err;
+
+ err = set_phy_regs(phy, regs1);
+ if (err)
+ return err;
+
+ /* reset wipes out interrupts, reenable them if they were on */
+ if (lasi_ctrl & 1)
+ err = ael2005_intr_enable(phy);
+ return err;
+}
+
+static int ael2005_intr_handler(struct cphy *phy)
+{
+ unsigned int stat;
+ int ret, edc_needed, cause = 0;
+
+ ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_STAT, &stat);
+ if (ret)
+ return ret;
+
+ if (stat & AEL2005_MODDET_IRQ) {
+ ret = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL,
+ 0xd00);
+ if (ret)
+ return ret;
+
+ /* modules have max 300 ms init time after hot plug */
+ ret = ael2005_get_module_type(phy, 300);
+ if (ret < 0)
+ return ret;
+
+ phy->modtype = ret;
+ if (ret == phy_modtype_none)
+ edc_needed = phy->priv; /* on unplug retain EDC */
+ else if (ret == phy_modtype_twinax ||
+ ret == phy_modtype_twinax_long)
+ edc_needed = edc_twinax;
+ else
+ edc_needed = edc_sr;
+
+ if (edc_needed != phy->priv) {
+ ret = ael2005_reset(phy, 0);
+ return ret ? ret : cphy_cause_module_change;
+ }
+ cause = cphy_cause_module_change;
+ }
+
+ ret = t3_phy_lasi_intr_handler(phy);
+ if (ret < 0)
+ return ret;
+
+ ret |= cause;
+ return ret ? ret : cphy_cause_link_change;
+}
+
+static struct cphy_ops ael2005_ops = {
+ .reset = ael2005_reset,
+ .intr_enable = ael2005_intr_enable,
+ .intr_disable = ael2005_intr_disable,
+ .intr_clear = ael2005_intr_clear,
+ .intr_handler = ael2005_intr_handler,
+ .get_link_status = get_link_status_r,
+ .power_down = ael1002_power_down,
+ .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
+};
+
+int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops)
+{
+ cphy_init(phy, adapter, phy_addr, &ael2005_ops, mdio_ops,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE |
+ SUPPORTED_IRQ, "10GBASE-R");
+ msleep(125);
+ return t3_mdio_change_bits(phy, MDIO_MMD_PMAPMD, AEL_OPT_SETTINGS, 0,
+ 1 << 5);
+}
+
+/*
+ * Setup EDC and other parameters for operation with an optical module.
+ */
+static int ael2020_setup_sr_edc(struct cphy *phy)
+{
+ static const struct reg_val regs[] = {
+ /* set CDR offset to 10 */
+ { MDIO_MMD_PMAPMD, 0xcc01, 0xffff, 0x488a },
+
+ /* adjust 10G RX bias current */
+ { MDIO_MMD_PMAPMD, 0xcb1b, 0xffff, 0x0200 },
+ { MDIO_MMD_PMAPMD, 0xcb1c, 0xffff, 0x00f0 },
+ { MDIO_MMD_PMAPMD, 0xcc06, 0xffff, 0x00e0 },
+
+ /* end */
+ { 0, 0, 0, 0 }
+ };
+ int err;
+
+ err = set_phy_regs(phy, regs);
+ msleep(50);
+ if (err)
+ return err;
+
+ phy->priv = edc_sr;
+ return 0;
+}
+
+/*
+ * Setup EDC and other parameters for operation with an TWINAX module.
+ */
+static int ael2020_setup_twinax_edc(struct cphy *phy, int modtype)
+{
+ /* set uC to 40MHz */
+ static const struct reg_val uCclock40MHz[] = {
+ { MDIO_MMD_PMAPMD, 0xff28, 0xffff, 0x4001 },
+ { MDIO_MMD_PMAPMD, 0xff2a, 0xffff, 0x0002 },
+ { 0, 0, 0, 0 }
+ };
+
+ /* activate uC clock */
+ static const struct reg_val uCclockActivate[] = {
+ { MDIO_MMD_PMAPMD, 0xd000, 0xffff, 0x5200 },
+ { 0, 0, 0, 0 }
+ };
+
+ /* set PC to start of SRAM and activate uC */
+ static const struct reg_val uCactivate[] = {
+ { MDIO_MMD_PMAPMD, 0xd080, 0xffff, 0x0100 },
+ { MDIO_MMD_PMAPMD, 0xd092, 0xffff, 0x0000 },
+ { 0, 0, 0, 0 }
+ };
+ int i, err;
+
+ /* set uC clock and activate it */
+ err = set_phy_regs(phy, uCclock40MHz);
+ msleep(500);
+ if (err)
+ return err;
+ err = set_phy_regs(phy, uCclockActivate);
+ msleep(500);
+ if (err)
+ return err;
+
+ if (phy->priv != edc_twinax)
+ err = t3_get_edc_fw(phy, EDC_TWX_AEL2020,
+ EDC_TWX_AEL2020_SIZE);
+ if (err)
+ return err;
+
+ for (i = 0; i < EDC_TWX_AEL2020_SIZE / sizeof(u16) && !err; i += 2)
+ err = t3_mdio_write(phy, MDIO_MMD_PMAPMD,
+ phy->phy_cache[i],
+ phy->phy_cache[i + 1]);
+ /* activate uC */
+ err = set_phy_regs(phy, uCactivate);
+ if (!err)
+ phy->priv = edc_twinax;
+ return err;
+}
+
+/*
+ * Return Module Type.
+ */
+static int ael2020_get_module_type(struct cphy *phy, int delay_ms)
+{
+ int v;
+ unsigned int stat;
+
+ v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_STAT, &stat);
+ if (v)
+ return v;
+
+ if (stat & (0x1 << (AEL2020_GPIO_MODDET*4))) {
+ /* module absent */
+ return phy_modtype_none;
+ }
+
+ return ael2xxx_get_module_type(phy, delay_ms);
+}
+
+/*
+ * Enable PHY interrupts. We enable "Module Detection" interrupts (on any
+ * state transition) and then generic Link Alarm Status Interrupt (LASI).
+ */
+static int ael2020_intr_enable(struct cphy *phy)
+{
+ static const struct reg_val regs[] = {
+ /* output Module's Loss Of Signal (LOS) to LED */
+ { MDIO_MMD_PMAPMD, AEL2020_GPIO_CFG+AEL2020_GPIO_LSTAT,
+ 0xffff, 0x4 },
+ { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
+ 0xffff, 0x8 << (AEL2020_GPIO_LSTAT*4) },
+
+ /* enable module detect status change interrupts */
+ { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
+ 0xffff, 0x2 << (AEL2020_GPIO_MODDET*4) },
+
+ /* end */
+ { 0, 0, 0, 0 }
+ };
+ int err, link_ok = 0;
+
+ /* set up "link status" LED and enable module change interrupts */
+ err = set_phy_regs(phy, regs);
+ if (err)
+ return err;
+
+ err = get_link_status_r(phy, &link_ok, NULL, NULL, NULL);
+ if (err)
+ return err;
+ if (link_ok)
+ t3_link_changed(phy->adapter,
+ phy2portid(phy));
+
+ err = t3_phy_lasi_intr_enable(phy);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/*
+ * Disable PHY interrupts. The mirror of the above ...
+ */
+static int ael2020_intr_disable(struct cphy *phy)
+{
+ static const struct reg_val regs[] = {
+ /* reset "link status" LED to "off" */
+ { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
+ 0xffff, 0xb << (AEL2020_GPIO_LSTAT*4) },
+
+ /* disable module detect status change interrupts */
+ { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
+ 0xffff, 0x1 << (AEL2020_GPIO_MODDET*4) },
+
+ /* end */
+ { 0, 0, 0, 0 }
+ };
+ int err;
+
+ /* turn off "link status" LED and disable module change interrupts */
+ err = set_phy_regs(phy, regs);
+ if (err)
+ return err;
+
+ return t3_phy_lasi_intr_disable(phy);
+}
+
+/*
+ * Clear PHY interrupt state.
+ */
+static int ael2020_intr_clear(struct cphy *phy)
+{
+ /*
+ * The GPIO Interrupt register on the AEL2020 is a "Latching High"
+ * (LH) register which is cleared to the current state when it's read.
+ * Thus, we simply read the register and discard the result.
+ */
+ unsigned int stat;
+ int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat);
+ return err ? err : t3_phy_lasi_intr_clear(phy);
+}
+
+static const struct reg_val ael2020_reset_regs[] = {
+ /* Erratum #2: CDRLOL asserted, causing PMA link down status */
+ { MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x3101 },
+
+ /* force XAUI to send LF when RX_LOS is asserted */
+ { MDIO_MMD_PMAPMD, 0xcd40, 0xffff, 0x0001 },
+
+ /* allow writes to transceiver module EEPROM on i2c bus */
+ { MDIO_MMD_PMAPMD, 0xff02, 0xffff, 0x0023 },
+ { MDIO_MMD_PMAPMD, 0xff03, 0xffff, 0x0000 },
+ { MDIO_MMD_PMAPMD, 0xff04, 0xffff, 0x0000 },
+
+ /* end */
+ { 0, 0, 0, 0 }
+};
+/*
+ * Reset the PHY and put it into a canonical operating state.
+ */
+static int ael2020_reset(struct cphy *phy, int wait)
+{
+ int err;
+ unsigned int lasi_ctrl;
+
+ /* grab current interrupt state */
+ err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
+ &lasi_ctrl);
+ if (err)
+ return err;
+
+ err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 125);
+ if (err)
+ return err;
+ msleep(100);
+
+ /* basic initialization for all module types */
+ phy->priv = edc_none;
+ err = set_phy_regs(phy, ael2020_reset_regs);
+ if (err)
+ return err;
+
+ /* determine module type and perform appropriate initialization */
+ err = ael2020_get_module_type(phy, 0);
+ if (err < 0)
+ return err;
+ phy->modtype = (u8)err;
+ if (err == phy_modtype_twinax || err == phy_modtype_twinax_long)
+ err = ael2020_setup_twinax_edc(phy, err);
+ else
+ err = ael2020_setup_sr_edc(phy);
+ if (err)
+ return err;
+
+ /* reset wipes out interrupts, reenable them if they were on */
+ if (lasi_ctrl & 1)
+ err = ael2005_intr_enable(phy);
+ return err;
+}
+
+/*
+ * Handle a PHY interrupt.
+ */
+static int ael2020_intr_handler(struct cphy *phy)
+{
+ unsigned int stat;
+ int ret, edc_needed, cause = 0;
+
+ ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat);
+ if (ret)
+ return ret;
+
+ if (stat & (0x1 << AEL2020_GPIO_MODDET)) {
+ /* modules have max 300 ms init time after hot plug */
+ ret = ael2020_get_module_type(phy, 300);
+ if (ret < 0)
+ return ret;
+
+ phy->modtype = (u8)ret;
+ if (ret == phy_modtype_none)
+ edc_needed = phy->priv; /* on unplug retain EDC */
+ else if (ret == phy_modtype_twinax ||
+ ret == phy_modtype_twinax_long)
+ edc_needed = edc_twinax;
+ else
+ edc_needed = edc_sr;
+
+ if (edc_needed != phy->priv) {
+ ret = ael2020_reset(phy, 0);
+ return ret ? ret : cphy_cause_module_change;
+ }
+ cause = cphy_cause_module_change;
+ }
+
+ ret = t3_phy_lasi_intr_handler(phy);
+ if (ret < 0)
+ return ret;
+
+ ret |= cause;
+ return ret ? ret : cphy_cause_link_change;
+}
+
+static struct cphy_ops ael2020_ops = {
+ .reset = ael2020_reset,
+ .intr_enable = ael2020_intr_enable,
+ .intr_disable = ael2020_intr_disable,
+ .intr_clear = ael2020_intr_clear,
+ .intr_handler = ael2020_intr_handler,
+ .get_link_status = get_link_status_r,
+ .power_down = ael1002_power_down,
+ .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
+};
+
+int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops)
+{
+ int err;
+
+ cphy_init(phy, adapter, phy_addr, &ael2020_ops, mdio_ops,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE |
+ SUPPORTED_IRQ, "10GBASE-R");
+ msleep(125);
+
+ err = set_phy_regs(phy, ael2020_reset_regs);
+ if (err)
+ return err;
+ return 0;
+}
+
+/*
+ * Get link status for a 10GBASE-X device.
+ */
+static int get_link_status_x(struct cphy *phy, int *link_ok, int *speed,
+ int *duplex, int *fc)
+{
+ if (link_ok) {
+ unsigned int stat0, stat1, stat2;
+ int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD,
+ MDIO_PMA_RXDET, &stat0);
+
+ if (!err)
+ err = t3_mdio_read(phy, MDIO_MMD_PCS,
+ MDIO_PCS_10GBX_STAT1, &stat1);
+ if (!err)
+ err = t3_mdio_read(phy, MDIO_MMD_PHYXS,
+ MDIO_PHYXS_LNSTAT, &stat2);
+ if (err)
+ return err;
+ *link_ok = (stat0 & (stat1 >> 12) & (stat2 >> 12)) & 1;
+ }
+ if (speed)
+ *speed = SPEED_10000;
+ if (duplex)
+ *duplex = DUPLEX_FULL;
+ return 0;
+}
+
+static struct cphy_ops qt2045_ops = {
+ .reset = ael1006_reset,
+ .intr_enable = t3_phy_lasi_intr_enable,
+ .intr_disable = t3_phy_lasi_intr_disable,
+ .intr_clear = t3_phy_lasi_intr_clear,
+ .intr_handler = t3_phy_lasi_intr_handler,
+ .get_link_status = get_link_status_x,
+ .power_down = ael1002_power_down,
+ .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
+};
+
+int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops)
+{
+ unsigned int stat;
+
+ cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
+ "10GBASE-CX4");
+
+ /*
+ * Some cards where the PHY is supposed to be at address 0 actually
+ * have it at 1.
+ */
+ if (!phy_addr &&
+ !t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &stat) &&
+ stat == 0xffff)
+ phy->mdio.prtad = 1;
+ return 0;
+}
+
+static int xaui_direct_reset(struct cphy *phy, int wait)
+{
+ return 0;
+}
+
+static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok,
+ int *speed, int *duplex, int *fc)
+{
+ if (link_ok) {
+ unsigned int status;
+ int prtad = phy->mdio.prtad;
+
+ status = t3_read_reg(phy->adapter,
+ XGM_REG(A_XGM_SERDES_STAT0, prtad)) |
+ t3_read_reg(phy->adapter,
+ XGM_REG(A_XGM_SERDES_STAT1, prtad)) |
+ t3_read_reg(phy->adapter,
+ XGM_REG(A_XGM_SERDES_STAT2, prtad)) |
+ t3_read_reg(phy->adapter,
+ XGM_REG(A_XGM_SERDES_STAT3, prtad));
+ *link_ok = !(status & F_LOWSIG0);
+ }
+ if (speed)
+ *speed = SPEED_10000;
+ if (duplex)
+ *duplex = DUPLEX_FULL;
+ return 0;
+}
+
+static int xaui_direct_power_down(struct cphy *phy, int enable)
+{
+ return 0;
+}
+
+static struct cphy_ops xaui_direct_ops = {
+ .reset = xaui_direct_reset,
+ .intr_enable = ael1002_intr_noop,
+ .intr_disable = ael1002_intr_noop,
+ .intr_clear = ael1002_intr_noop,
+ .intr_handler = ael1002_intr_noop,
+ .get_link_status = xaui_direct_get_link_status,
+ .power_down = xaui_direct_power_down,
+};
+
+int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops)
+{
+ cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
+ "10GBASE-CX4");
+ return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/aq100x.c b/drivers/net/ethernet/chelsio/cxgb3/aq100x.c
new file mode 100644
index 000000000000..341b7ef1508f
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/aq100x.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "common.h"
+#include "regs.h"
+
+enum {
+ /* MDIO_DEV_PMA_PMD registers */
+ AQ_LINK_STAT = 0xe800,
+ AQ_IMASK_PMA = 0xf000,
+
+ /* MDIO_DEV_XGXS registers */
+ AQ_XAUI_RX_CFG = 0xc400,
+ AQ_XAUI_TX_CFG = 0xe400,
+
+ /* MDIO_DEV_ANEG registers */
+ AQ_1G_CTRL = 0xc400,
+ AQ_ANEG_STAT = 0xc800,
+
+ /* MDIO_DEV_VEND1 registers */
+ AQ_FW_VERSION = 0x0020,
+ AQ_IFLAG_GLOBAL = 0xfc00,
+ AQ_IMASK_GLOBAL = 0xff00,
+};
+
+enum {
+ IMASK_PMA = 1 << 2,
+ IMASK_GLOBAL = 1 << 15,
+ ADV_1G_FULL = 1 << 15,
+ ADV_1G_HALF = 1 << 14,
+ ADV_10G_FULL = 1 << 12,
+ AQ_RESET = (1 << 14) | (1 << 15),
+ AQ_LOWPOWER = 1 << 12,
+};
+
+static int aq100x_reset(struct cphy *phy, int wait)
+{
+ /*
+ * Ignore the caller specified wait time; always wait for the reset to
+ * complete. Can take up to 3s.
+ */
+ int err = t3_phy_reset(phy, MDIO_MMD_VEND1, 3000);
+
+ if (err)
+ CH_WARN(phy->adapter, "PHY%d: reset failed (0x%x).\n",
+ phy->mdio.prtad, err);
+
+ return err;
+}
+
+static int aq100x_intr_enable(struct cphy *phy)
+{
+ int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AQ_IMASK_PMA, IMASK_PMA);
+ if (err)
+ return err;
+
+ err = t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, IMASK_GLOBAL);
+ return err;
+}
+
+static int aq100x_intr_disable(struct cphy *phy)
+{
+ return t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, 0);
+}
+
+static int aq100x_intr_clear(struct cphy *phy)
+{
+ unsigned int v;
+
+ t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &v);
+ t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v);
+
+ return 0;
+}
+
+static int aq100x_intr_handler(struct cphy *phy)
+{
+ int err;
+ unsigned int cause, v;
+
+ err = t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &cause);
+ if (err)
+ return err;
+
+ /* Read (and reset) the latching version of the status */
+ t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v);
+
+ return cphy_cause_link_change;
+}
+
+static int aq100x_power_down(struct cphy *phy, int off)
+{
+ return mdio_set_flag(&phy->mdio, phy->mdio.prtad,
+ MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER, off);
+}
+
+static int aq100x_autoneg_enable(struct cphy *phy)
+{
+ int err;
+
+ err = aq100x_power_down(phy, 0);
+ if (!err)
+ err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
+ MDIO_MMD_AN, MDIO_CTRL1,
+ BMCR_ANENABLE | BMCR_ANRESTART, 1);
+
+ return err;
+}
+
+static int aq100x_autoneg_restart(struct cphy *phy)
+{
+ int err;
+
+ err = aq100x_power_down(phy, 0);
+ if (!err)
+ err = mdio_set_flag(&phy->mdio, phy->mdio.prtad,
+ MDIO_MMD_AN, MDIO_CTRL1,
+ BMCR_ANENABLE | BMCR_ANRESTART, 1);
+
+ return err;
+}
+
+static int aq100x_advertise(struct cphy *phy, unsigned int advertise_map)
+{
+ unsigned int adv;
+ int err;
+
+ /* 10G advertisement */
+ adv = 0;
+ if (advertise_map & ADVERTISED_10000baseT_Full)
+ adv |= ADV_10G_FULL;
+ err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ ADV_10G_FULL, adv);
+ if (err)
+ return err;
+
+ /* 1G advertisement */
+ adv = 0;
+ if (advertise_map & ADVERTISED_1000baseT_Full)
+ adv |= ADV_1G_FULL;
+ if (advertise_map & ADVERTISED_1000baseT_Half)
+ adv |= ADV_1G_HALF;
+ err = t3_mdio_change_bits(phy, MDIO_MMD_AN, AQ_1G_CTRL,
+ ADV_1G_FULL | ADV_1G_HALF, adv);
+ if (err)
+ return err;
+
+ /* 100M, pause advertisement */
+ adv = 0;
+ if (advertise_map & ADVERTISED_100baseT_Half)
+ adv |= ADVERTISE_100HALF;
+ if (advertise_map & ADVERTISED_100baseT_Full)
+ adv |= ADVERTISE_100FULL;
+ if (advertise_map & ADVERTISED_Pause)
+ adv |= ADVERTISE_PAUSE_CAP;
+ if (advertise_map & ADVERTISED_Asym_Pause)
+ adv |= ADVERTISE_PAUSE_ASYM;
+ err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_ADVERTISE,
+ 0xfe0, adv);
+
+ return err;
+}
+
+static int aq100x_set_loopback(struct cphy *phy, int mmd, int dir, int enable)
+{
+ return mdio_set_flag(&phy->mdio, phy->mdio.prtad,
+ MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ BMCR_LOOPBACK, enable);
+}
+
+static int aq100x_set_speed_duplex(struct cphy *phy, int speed, int duplex)
+{
+ /* no can do */
+ return -1;
+}
+
+static int aq100x_get_link_status(struct cphy *phy, int *link_ok,
+ int *speed, int *duplex, int *fc)
+{
+ int err;
+ unsigned int v;
+
+ if (link_ok) {
+ err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AQ_LINK_STAT, &v);
+ if (err)
+ return err;
+
+ *link_ok = v & 1;
+ if (!*link_ok)
+ return 0;
+ }
+
+ err = t3_mdio_read(phy, MDIO_MMD_AN, AQ_ANEG_STAT, &v);
+ if (err)
+ return err;
+
+ if (speed) {
+ switch (v & 0x6) {
+ case 0x6:
+ *speed = SPEED_10000;
+ break;
+ case 0x4:
+ *speed = SPEED_1000;
+ break;
+ case 0x2:
+ *speed = SPEED_100;
+ break;
+ case 0x0:
+ *speed = SPEED_10;
+ break;
+ }
+ }
+
+ if (duplex)
+ *duplex = v & 1 ? DUPLEX_FULL : DUPLEX_HALF;
+
+ return 0;
+}
+
+static struct cphy_ops aq100x_ops = {
+ .reset = aq100x_reset,
+ .intr_enable = aq100x_intr_enable,
+ .intr_disable = aq100x_intr_disable,
+ .intr_clear = aq100x_intr_clear,
+ .intr_handler = aq100x_intr_handler,
+ .autoneg_enable = aq100x_autoneg_enable,
+ .autoneg_restart = aq100x_autoneg_restart,
+ .advertise = aq100x_advertise,
+ .set_loopback = aq100x_set_loopback,
+ .set_speed_duplex = aq100x_set_speed_duplex,
+ .get_link_status = aq100x_get_link_status,
+ .power_down = aq100x_power_down,
+ .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS,
+};
+
+int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops)
+{
+ unsigned int v, v2, gpio, wait;
+ int err;
+
+ cphy_init(phy, adapter, phy_addr, &aq100x_ops, mdio_ops,
+ SUPPORTED_1000baseT_Full | SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_AUI,
+ "1000/10GBASE-T");
+
+ /*
+ * The PHY has been out of reset ever since the system powered up. So
+ * we do a hard reset over here.
+ */
+ gpio = phy_addr ? F_GPIO10_OUT_VAL : F_GPIO6_OUT_VAL;
+ t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, 0);
+ msleep(1);
+ t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, gpio);
+
+ /*
+ * Give it enough time to load the firmware and get ready for mdio.
+ */
+ msleep(1000);
+ wait = 500; /* in 10ms increments */
+ do {
+ err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v);
+ if (err || v == 0xffff) {
+
+ /* Allow prep_adapter to succeed when ffff is read */
+
+ CH_WARN(adapter, "PHY%d: reset failed (0x%x, 0x%x).\n",
+ phy_addr, err, v);
+ goto done;
+ }
+
+ v &= AQ_RESET;
+ if (v)
+ msleep(10);
+ } while (v && --wait);
+ if (v) {
+ CH_WARN(adapter, "PHY%d: reset timed out (0x%x).\n",
+ phy_addr, v);
+
+ goto done; /* let prep_adapter succeed */
+ }
+
+ /* Datasheet says 3s max but this has been observed */
+ wait = (500 - wait) * 10 + 1000;
+ if (wait > 3000)
+ CH_WARN(adapter, "PHY%d: reset took %ums\n", phy_addr, wait);
+
+ /* Firmware version check. */
+ t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_FW_VERSION, &v);
+ if (v != 101)
+ CH_WARN(adapter, "PHY%d: unsupported firmware %d\n",
+ phy_addr, v);
+
+ /*
+ * The PHY should start in really-low-power mode. Prepare it for normal
+ * operations.
+ */
+ err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v);
+ if (err)
+ return err;
+ if (v & AQ_LOWPOWER) {
+ err = t3_mdio_change_bits(phy, MDIO_MMD_VEND1, MDIO_CTRL1,
+ AQ_LOWPOWER, 0);
+ if (err)
+ return err;
+ msleep(10);
+ } else
+ CH_WARN(adapter, "PHY%d does not start in low power mode.\n",
+ phy_addr);
+
+ /*
+ * Verify XAUI settings, but let prep succeed no matter what.
+ */
+ v = v2 = 0;
+ t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_RX_CFG, &v);
+ t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_TX_CFG, &v2);
+ if (v != 0x1b || v2 != 0x1b)
+ CH_WARN(adapter,
+ "PHY%d: incorrect XAUI settings (0x%x, 0x%x).\n",
+ phy_addr, v, v2);
+
+done:
+ return err;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
new file mode 100644
index 000000000000..df01b6343241
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@ -0,0 +1,775 @@
+/*
+ * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __CHELSIO_COMMON_H
+#define __CHELSIO_COMMON_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+#include "version.h"
+
+#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__)
+#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__)
+#define CH_ALERT(adap, fmt, ...) \
+ dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
+
+/*
+ * More powerful macro that selectively prints messages based on msg_enable.
+ * For info and debugging messages.
+ */
+#define CH_MSG(adapter, level, category, fmt, ...) do { \
+ if ((adapter)->msg_enable & NETIF_MSG_##category) \
+ dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \
+ ## __VA_ARGS__); \
+} while (0)
+
+#ifdef DEBUG
+# define CH_DBG(adapter, category, fmt, ...) \
+ CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
+#else
+# define CH_DBG(adapter, category, fmt, ...)
+#endif
+
+/* Additional NETIF_MSG_* categories */
+#define NETIF_MSG_MMIO 0x8000000
+
+enum {
+ MAX_NPORTS = 2, /* max # of ports */
+ MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */
+ EEPROMSIZE = 8192, /* Serial EEPROM size */
+ SERNUM_LEN = 16, /* Serial # length */
+ RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */
+ TCB_SIZE = 128, /* TCB size */
+ NMTUS = 16, /* size of MTU table */
+ NCCTRL_WIN = 32, /* # of congestion control windows */
+ PROTO_SRAM_LINES = 128, /* size of TP sram */
+};
+
+#define MAX_RX_COALESCING_LEN 12288U
+
+enum {
+ PAUSE_RX = 1 << 0,
+ PAUSE_TX = 1 << 1,
+ PAUSE_AUTONEG = 1 << 2
+};
+
+enum {
+ SUPPORTED_IRQ = 1 << 24
+};
+
+enum { /* adapter interrupt-maintained statistics */
+ STAT_ULP_CH0_PBL_OOB,
+ STAT_ULP_CH1_PBL_OOB,
+ STAT_PCI_CORR_ECC,
+
+ IRQ_NUM_STATS /* keep last */
+};
+
+#define TP_VERSION_MAJOR 1
+#define TP_VERSION_MINOR 1
+#define TP_VERSION_MICRO 0
+
+#define S_TP_VERSION_MAJOR 16
+#define M_TP_VERSION_MAJOR 0xFF
+#define V_TP_VERSION_MAJOR(x) ((x) << S_TP_VERSION_MAJOR)
+#define G_TP_VERSION_MAJOR(x) \
+ (((x) >> S_TP_VERSION_MAJOR) & M_TP_VERSION_MAJOR)
+
+#define S_TP_VERSION_MINOR 8
+#define M_TP_VERSION_MINOR 0xFF
+#define V_TP_VERSION_MINOR(x) ((x) << S_TP_VERSION_MINOR)
+#define G_TP_VERSION_MINOR(x) \
+ (((x) >> S_TP_VERSION_MINOR) & M_TP_VERSION_MINOR)
+
+#define S_TP_VERSION_MICRO 0
+#define M_TP_VERSION_MICRO 0xFF
+#define V_TP_VERSION_MICRO(x) ((x) << S_TP_VERSION_MICRO)
+#define G_TP_VERSION_MICRO(x) \
+ (((x) >> S_TP_VERSION_MICRO) & M_TP_VERSION_MICRO)
+
+enum {
+ SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */
+ SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */
+ SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */
+};
+
+enum sge_context_type { /* SGE egress context types */
+ SGE_CNTXT_RDMA = 0,
+ SGE_CNTXT_ETH = 2,
+ SGE_CNTXT_OFLD = 4,
+ SGE_CNTXT_CTRL = 5
+};
+
+enum {
+ AN_PKT_SIZE = 32, /* async notification packet size */
+ IMMED_PKT_SIZE = 48 /* packet size for immediate data */
+};
+
+struct sg_ent { /* SGE scatter/gather entry */
+ __be32 len[2];
+ __be64 addr[2];
+};
+
+#ifndef SGE_NUM_GENBITS
+/* Must be 1 or 2 */
+# define SGE_NUM_GENBITS 2
+#endif
+
+#define TX_DESC_FLITS 16U
+#define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS)
+
+struct cphy;
+struct adapter;
+
+struct mdio_ops {
+ int (*read)(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr);
+ int (*write)(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr, u16 val);
+ unsigned mode_support;
+};
+
+struct adapter_info {
+ unsigned char nports0; /* # of ports on channel 0 */
+ unsigned char nports1; /* # of ports on channel 1 */
+ unsigned char phy_base_addr; /* MDIO PHY base address */
+ unsigned int gpio_out; /* GPIO output settings */
+ unsigned char gpio_intr[MAX_NPORTS]; /* GPIO PHY IRQ pins */
+ unsigned long caps; /* adapter capabilities */
+ const struct mdio_ops *mdio_ops; /* MDIO operations */
+ const char *desc; /* product description */
+};
+
+struct mc5_stats {
+ unsigned long parity_err;
+ unsigned long active_rgn_full;
+ unsigned long nfa_srch_err;
+ unsigned long unknown_cmd;
+ unsigned long reqq_parity_err;
+ unsigned long dispq_parity_err;
+ unsigned long del_act_empty;
+};
+
+struct mc7_stats {
+ unsigned long corr_err;
+ unsigned long uncorr_err;
+ unsigned long parity_err;
+ unsigned long addr_err;
+};
+
+struct mac_stats {
+ u64 tx_octets; /* total # of octets in good frames */
+ u64 tx_octets_bad; /* total # of octets in error frames */
+ u64 tx_frames; /* all good frames */
+ u64 tx_mcast_frames; /* good multicast frames */
+ u64 tx_bcast_frames; /* good broadcast frames */
+ u64 tx_pause; /* # of transmitted pause frames */
+ u64 tx_deferred; /* frames with deferred transmissions */
+ u64 tx_late_collisions; /* # of late collisions */
+ u64 tx_total_collisions; /* # of total collisions */
+ u64 tx_excess_collisions; /* frame errors from excessive collissions */
+ u64 tx_underrun; /* # of Tx FIFO underruns */
+ u64 tx_len_errs; /* # of Tx length errors */
+ u64 tx_mac_internal_errs; /* # of internal MAC errors on Tx */
+ u64 tx_excess_deferral; /* # of frames with excessive deferral */
+ u64 tx_fcs_errs; /* # of frames with bad FCS */
+
+ u64 tx_frames_64; /* # of Tx frames in a particular range */
+ u64 tx_frames_65_127;
+ u64 tx_frames_128_255;
+ u64 tx_frames_256_511;
+ u64 tx_frames_512_1023;
+ u64 tx_frames_1024_1518;
+ u64 tx_frames_1519_max;
+
+ u64 rx_octets; /* total # of octets in good frames */
+ u64 rx_octets_bad; /* total # of octets in error frames */
+ u64 rx_frames; /* all good frames */
+ u64 rx_mcast_frames; /* good multicast frames */
+ u64 rx_bcast_frames; /* good broadcast frames */
+ u64 rx_pause; /* # of received pause frames */
+ u64 rx_fcs_errs; /* # of received frames with bad FCS */
+ u64 rx_align_errs; /* alignment errors */
+ u64 rx_symbol_errs; /* symbol errors */
+ u64 rx_data_errs; /* data errors */
+ u64 rx_sequence_errs; /* sequence errors */
+ u64 rx_runt; /* # of runt frames */
+ u64 rx_jabber; /* # of jabber frames */
+ u64 rx_short; /* # of short frames */
+ u64 rx_too_long; /* # of oversized frames */
+ u64 rx_mac_internal_errs; /* # of internal MAC errors on Rx */
+
+ u64 rx_frames_64; /* # of Rx frames in a particular range */
+ u64 rx_frames_65_127;
+ u64 rx_frames_128_255;
+ u64 rx_frames_256_511;
+ u64 rx_frames_512_1023;
+ u64 rx_frames_1024_1518;
+ u64 rx_frames_1519_max;
+
+ u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */
+
+ unsigned long tx_fifo_parity_err;
+ unsigned long rx_fifo_parity_err;
+ unsigned long tx_fifo_urun;
+ unsigned long rx_fifo_ovfl;
+ unsigned long serdes_signal_loss;
+ unsigned long xaui_pcs_ctc_err;
+ unsigned long xaui_pcs_align_change;
+
+ unsigned long num_toggled; /* # times toggled TxEn due to stuck TX */
+ unsigned long num_resets; /* # times reset due to stuck TX */
+
+ unsigned long link_faults; /* # detected link faults */
+};
+
+struct tp_mib_stats {
+ u32 ipInReceive_hi;
+ u32 ipInReceive_lo;
+ u32 ipInHdrErrors_hi;
+ u32 ipInHdrErrors_lo;
+ u32 ipInAddrErrors_hi;
+ u32 ipInAddrErrors_lo;
+ u32 ipInUnknownProtos_hi;
+ u32 ipInUnknownProtos_lo;
+ u32 ipInDiscards_hi;
+ u32 ipInDiscards_lo;
+ u32 ipInDelivers_hi;
+ u32 ipInDelivers_lo;
+ u32 ipOutRequests_hi;
+ u32 ipOutRequests_lo;
+ u32 ipOutDiscards_hi;
+ u32 ipOutDiscards_lo;
+ u32 ipOutNoRoutes_hi;
+ u32 ipOutNoRoutes_lo;
+ u32 ipReasmTimeout;
+ u32 ipReasmReqds;
+ u32 ipReasmOKs;
+ u32 ipReasmFails;
+
+ u32 reserved[8];
+
+ u32 tcpActiveOpens;
+ u32 tcpPassiveOpens;
+ u32 tcpAttemptFails;
+ u32 tcpEstabResets;
+ u32 tcpOutRsts;
+ u32 tcpCurrEstab;
+ u32 tcpInSegs_hi;
+ u32 tcpInSegs_lo;
+ u32 tcpOutSegs_hi;
+ u32 tcpOutSegs_lo;
+ u32 tcpRetransSeg_hi;
+ u32 tcpRetransSeg_lo;
+ u32 tcpInErrs_hi;
+ u32 tcpInErrs_lo;
+ u32 tcpRtoMin;
+ u32 tcpRtoMax;
+};
+
+struct tp_params {
+ unsigned int nchan; /* # of channels */
+ unsigned int pmrx_size; /* total PMRX capacity */
+ unsigned int pmtx_size; /* total PMTX capacity */
+ unsigned int cm_size; /* total CM capacity */
+ unsigned int chan_rx_size; /* per channel Rx size */
+ unsigned int chan_tx_size; /* per channel Tx size */
+ unsigned int rx_pg_size; /* Rx page size */
+ unsigned int tx_pg_size; /* Tx page size */
+ unsigned int rx_num_pgs; /* # of Rx pages */
+ unsigned int tx_num_pgs; /* # of Tx pages */
+ unsigned int ntimer_qs; /* # of timer queues */
+};
+
+struct qset_params { /* SGE queue set parameters */
+ unsigned int polling; /* polling/interrupt service for rspq */
+ unsigned int coalesce_usecs; /* irq coalescing timer */
+ unsigned int rspq_size; /* # of entries in response queue */
+ unsigned int fl_size; /* # of entries in regular free list */
+ unsigned int jumbo_size; /* # of entries in jumbo free list */
+ unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */
+ unsigned int cong_thres; /* FL congestion threshold */
+ unsigned int vector; /* Interrupt (line or vector) number */
+};
+
+struct sge_params {
+ unsigned int max_pkt_size; /* max offload pkt size */
+ struct qset_params qset[SGE_QSETS];
+};
+
+struct mc5_params {
+ unsigned int mode; /* selects MC5 width */
+ unsigned int nservers; /* size of server region */
+ unsigned int nfilters; /* size of filter region */
+ unsigned int nroutes; /* size of routing region */
+};
+
+/* Default MC5 region sizes */
+enum {
+ DEFAULT_NSERVERS = 512,
+ DEFAULT_NFILTERS = 128
+};
+
+/* MC5 modes, these must be non-0 */
+enum {
+ MC5_MODE_144_BIT = 1,
+ MC5_MODE_72_BIT = 2
+};
+
+/* MC5 min active region size */
+enum { MC5_MIN_TIDS = 16 };
+
+struct vpd_params {
+ unsigned int cclk;
+ unsigned int mclk;
+ unsigned int uclk;
+ unsigned int mdc;
+ unsigned int mem_timing;
+ u8 sn[SERNUM_LEN + 1];
+ u8 eth_base[6];
+ u8 port_type[MAX_NPORTS];
+ unsigned short xauicfg[2];
+};
+
+struct pci_params {
+ unsigned int vpd_cap_addr;
+ unsigned short speed;
+ unsigned char width;
+ unsigned char variant;
+};
+
+enum {
+ PCI_VARIANT_PCI,
+ PCI_VARIANT_PCIX_MODE1_PARITY,
+ PCI_VARIANT_PCIX_MODE1_ECC,
+ PCI_VARIANT_PCIX_266_MODE2,
+ PCI_VARIANT_PCIE
+};
+
+struct adapter_params {
+ struct sge_params sge;
+ struct mc5_params mc5;
+ struct tp_params tp;
+ struct vpd_params vpd;
+ struct pci_params pci;
+
+ const struct adapter_info *info;
+
+ unsigned short mtus[NMTUS];
+ unsigned short a_wnd[NCCTRL_WIN];
+ unsigned short b_wnd[NCCTRL_WIN];
+
+ unsigned int nports; /* # of ethernet ports */
+ unsigned int chan_map; /* bitmap of in-use Tx channels */
+ unsigned int stats_update_period; /* MAC stats accumulation period */
+ unsigned int linkpoll_period; /* link poll period in 0.1s */
+ unsigned int rev; /* chip revision */
+ unsigned int offload;
+};
+
+enum { /* chip revisions */
+ T3_REV_A = 0,
+ T3_REV_B = 2,
+ T3_REV_B2 = 3,
+ T3_REV_C = 4,
+};
+
+struct trace_params {
+ u32 sip;
+ u32 sip_mask;
+ u32 dip;
+ u32 dip_mask;
+ u16 sport;
+ u16 sport_mask;
+ u16 dport;
+ u16 dport_mask;
+ u32 vlan:12;
+ u32 vlan_mask:12;
+ u32 intf:4;
+ u32 intf_mask:4;
+ u8 proto;
+ u8 proto_mask;
+};
+
+struct link_config {
+ unsigned int supported; /* link capabilities */
+ unsigned int advertising; /* advertised capabilities */
+ unsigned short requested_speed; /* speed user has requested */
+ unsigned short speed; /* actual link speed */
+ unsigned char requested_duplex; /* duplex user has requested */
+ unsigned char duplex; /* actual link duplex */
+ unsigned char requested_fc; /* flow control user has requested */
+ unsigned char fc; /* actual link flow control */
+ unsigned char autoneg; /* autonegotiating? */
+ unsigned int link_ok; /* link up? */
+};
+
+#define SPEED_INVALID 0xffff
+#define DUPLEX_INVALID 0xff
+
+struct mc5 {
+ struct adapter *adapter;
+ unsigned int tcam_size;
+ unsigned char part_type;
+ unsigned char parity_enabled;
+ unsigned char mode;
+ struct mc5_stats stats;
+};
+
+static inline unsigned int t3_mc5_size(const struct mc5 *p)
+{
+ return p->tcam_size;
+}
+
+struct mc7 {
+ struct adapter *adapter; /* backpointer to adapter */
+ unsigned int size; /* memory size in bytes */
+ unsigned int width; /* MC7 interface width */
+ unsigned int offset; /* register address offset for MC7 instance */
+ const char *name; /* name of MC7 instance */
+ struct mc7_stats stats; /* MC7 statistics */
+};
+
+static inline unsigned int t3_mc7_size(const struct mc7 *p)
+{
+ return p->size;
+}
+
+struct cmac {
+ struct adapter *adapter;
+ unsigned int offset;
+ unsigned int nucast; /* # of address filters for unicast MACs */
+ unsigned int tx_tcnt;
+ unsigned int tx_xcnt;
+ u64 tx_mcnt;
+ unsigned int rx_xcnt;
+ unsigned int rx_ocnt;
+ u64 rx_mcnt;
+ unsigned int toggle_cnt;
+ unsigned int txen;
+ u64 rx_pause;
+ struct mac_stats stats;
+};
+
+enum {
+ MAC_DIRECTION_RX = 1,
+ MAC_DIRECTION_TX = 2,
+ MAC_RXFIFO_SIZE = 32768
+};
+
+/* PHY loopback direction */
+enum {
+ PHY_LOOPBACK_TX = 1,
+ PHY_LOOPBACK_RX = 2
+};
+
+/* PHY interrupt types */
+enum {
+ cphy_cause_link_change = 1,
+ cphy_cause_fifo_error = 2,
+ cphy_cause_module_change = 4,
+};
+
+/* PHY module types */
+enum {
+ phy_modtype_none,
+ phy_modtype_sr,
+ phy_modtype_lr,
+ phy_modtype_lrm,
+ phy_modtype_twinax,
+ phy_modtype_twinax_long,
+ phy_modtype_unknown
+};
+
+/* PHY operations */
+struct cphy_ops {
+ int (*reset)(struct cphy *phy, int wait);
+
+ int (*intr_enable)(struct cphy *phy);
+ int (*intr_disable)(struct cphy *phy);
+ int (*intr_clear)(struct cphy *phy);
+ int (*intr_handler)(struct cphy *phy);
+
+ int (*autoneg_enable)(struct cphy *phy);
+ int (*autoneg_restart)(struct cphy *phy);
+
+ int (*advertise)(struct cphy *phy, unsigned int advertise_map);
+ int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable);
+ int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
+ int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
+ int *duplex, int *fc);
+ int (*power_down)(struct cphy *phy, int enable);
+
+ u32 mmds;
+};
+enum {
+ EDC_OPT_AEL2005 = 0,
+ EDC_OPT_AEL2005_SIZE = 1084,
+ EDC_TWX_AEL2005 = 1,
+ EDC_TWX_AEL2005_SIZE = 1464,
+ EDC_TWX_AEL2020 = 2,
+ EDC_TWX_AEL2020_SIZE = 1628,
+ EDC_MAX_SIZE = EDC_TWX_AEL2020_SIZE, /* Max cache size */
+};
+
+/* A PHY instance */
+struct cphy {
+ u8 modtype; /* PHY module type */
+ short priv; /* scratch pad */
+ unsigned int caps; /* PHY capabilities */
+ struct adapter *adapter; /* associated adapter */
+ const char *desc; /* PHY description */
+ unsigned long fifo_errors; /* FIFO over/under-flows */
+ const struct cphy_ops *ops; /* PHY operations */
+ struct mdio_if_info mdio;
+ u16 phy_cache[EDC_MAX_SIZE]; /* EDC cache */
+};
+
+/* Convenience MDIO read/write wrappers */
+static inline int t3_mdio_read(struct cphy *phy, int mmd, int reg,
+ unsigned int *valp)
+{
+ int rc = phy->mdio.mdio_read(phy->mdio.dev, phy->mdio.prtad, mmd, reg);
+ *valp = (rc >= 0) ? rc : -1;
+ return (rc >= 0) ? 0 : rc;
+}
+
+static inline int t3_mdio_write(struct cphy *phy, int mmd, int reg,
+ unsigned int val)
+{
+ return phy->mdio.mdio_write(phy->mdio.dev, phy->mdio.prtad, mmd,
+ reg, val);
+}
+
+/* Convenience initializer */
+static inline void cphy_init(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, struct cphy_ops *phy_ops,
+ const struct mdio_ops *mdio_ops,
+ unsigned int caps, const char *desc)
+{
+ phy->caps = caps;
+ phy->adapter = adapter;
+ phy->desc = desc;
+ phy->ops = phy_ops;
+ if (mdio_ops) {
+ phy->mdio.prtad = phy_addr;
+ phy->mdio.mmds = phy_ops->mmds;
+ phy->mdio.mode_support = mdio_ops->mode_support;
+ phy->mdio.mdio_read = mdio_ops->read;
+ phy->mdio.mdio_write = mdio_ops->write;
+ }
+}
+
+/* Accumulate MAC statistics every 180 seconds. For 1G we multiply by 10. */
+#define MAC_STATS_ACCUM_SECS 180
+
+#define XGM_REG(reg_addr, idx) \
+ ((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR))
+
+struct addr_val_pair {
+ unsigned int reg_addr;
+ unsigned int val;
+};
+
+#include "adapter.h"
+
+#ifndef PCI_VENDOR_ID_CHELSIO
+# define PCI_VENDOR_ID_CHELSIO 0x1425
+#endif
+
+#define for_each_port(adapter, iter) \
+ for (iter = 0; iter < (adapter)->params.nports; ++iter)
+
+#define adapter_info(adap) ((adap)->params.info)
+
+static inline int uses_xaui(const struct adapter *adap)
+{
+ return adapter_info(adap)->caps & SUPPORTED_AUI;
+}
+
+static inline int is_10G(const struct adapter *adap)
+{
+ return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full;
+}
+
+static inline int is_offload(const struct adapter *adap)
+{
+ return adap->params.offload;
+}
+
+static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
+{
+ return adap->params.vpd.cclk / 1000;
+}
+
+static inline unsigned int is_pcie(const struct adapter *adap)
+{
+ return adap->params.pci.variant == PCI_VARIANT_PCIE;
+}
+
+void t3_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
+ u32 val);
+void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
+ int n, unsigned int offset);
+int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay, u32 *valp);
+static inline int t3_wait_op_done(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay)
+{
+ return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts,
+ delay, NULL);
+}
+int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
+ unsigned int set);
+int t3_phy_reset(struct cphy *phy, int mmd, int wait);
+int t3_phy_advertise(struct cphy *phy, unsigned int advert);
+int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert);
+int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex);
+int t3_phy_lasi_intr_enable(struct cphy *phy);
+int t3_phy_lasi_intr_disable(struct cphy *phy);
+int t3_phy_lasi_intr_clear(struct cphy *phy);
+int t3_phy_lasi_intr_handler(struct cphy *phy);
+
+void t3_intr_enable(struct adapter *adapter);
+void t3_intr_disable(struct adapter *adapter);
+void t3_intr_clear(struct adapter *adapter);
+void t3_xgm_intr_enable(struct adapter *adapter, int idx);
+void t3_xgm_intr_disable(struct adapter *adapter, int idx);
+void t3_port_intr_enable(struct adapter *adapter, int idx);
+void t3_port_intr_disable(struct adapter *adapter, int idx);
+int t3_slow_intr_handler(struct adapter *adapter);
+int t3_phy_intr_handler(struct adapter *adapter);
+
+void t3_link_changed(struct adapter *adapter, int port_id);
+void t3_link_fault(struct adapter *adapter, int port_id);
+int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
+const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
+int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data);
+int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data);
+int t3_seeprom_wp(struct adapter *adapter, int enable);
+int t3_get_tp_version(struct adapter *adapter, u32 *vers);
+int t3_check_tpsram_version(struct adapter *adapter);
+int t3_check_tpsram(struct adapter *adapter, const u8 *tp_ram,
+ unsigned int size);
+int t3_set_proto_sram(struct adapter *adap, const u8 *data);
+int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
+int t3_get_fw_version(struct adapter *adapter, u32 *vers);
+int t3_check_fw_version(struct adapter *adapter);
+int t3_init_hw(struct adapter *adapter, u32 fw_params);
+int t3_reset_adapter(struct adapter *adapter);
+int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
+ int reset);
+int t3_replay_prep_adapter(struct adapter *adapter);
+void t3_led_ready(struct adapter *adapter);
+void t3_fatal_err(struct adapter *adapter);
+void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
+void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
+ const u8 * cpus, const u16 *rspq);
+int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
+ unsigned int n, unsigned int *valp);
+int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
+ u64 *buf);
+
+int t3_mac_reset(struct cmac *mac);
+void t3b_pcs_reset(struct cmac *mac);
+void t3_mac_disable_exact_filters(struct cmac *mac);
+void t3_mac_enable_exact_filters(struct cmac *mac);
+int t3_mac_enable(struct cmac *mac, int which);
+int t3_mac_disable(struct cmac *mac, int which);
+int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
+int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev);
+int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
+int t3_mac_set_num_ucast(struct cmac *mac, int n);
+const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
+int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
+int t3b2_mac_watchdog_task(struct cmac *mac);
+
+void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode);
+int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
+ unsigned int nroutes);
+void t3_mc5_intr_handler(struct mc5 *mc5);
+
+void t3_tp_set_offload_mode(struct adapter *adap, int enable);
+void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps);
+void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
+ unsigned short alpha[NCCTRL_WIN],
+ unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap);
+void t3_config_trace_filter(struct adapter *adapter,
+ const struct trace_params *tp, int filter_index,
+ int invert, int enable);
+int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched);
+
+void t3_sge_prep(struct adapter *adap, struct sge_params *p);
+void t3_sge_init(struct adapter *adap, struct sge_params *p);
+int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
+ enum sge_context_type type, int respq, u64 base_addr,
+ unsigned int size, unsigned int token, int gen,
+ unsigned int cidx);
+int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
+ int gts_enable, u64 base_addr, unsigned int size,
+ unsigned int esize, unsigned int cong_thres, int gen,
+ unsigned int cidx);
+int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
+ int irq_vec_idx, u64 base_addr, unsigned int size,
+ unsigned int fl_thres, int gen, unsigned int cidx);
+int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
+ unsigned int size, int rspq, int ovfl_mode,
+ unsigned int credits, unsigned int credit_thres);
+int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable);
+int t3_sge_disable_fl(struct adapter *adapter, unsigned int id);
+int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id);
+int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id);
+int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
+ unsigned int credits);
+
+int t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops);
+int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops);
+int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops);
+int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops);
+int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops);
+int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops);
+int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops);
+int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops);
+#endif /* __CHELSIO_COMMON_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h
new file mode 100644
index 000000000000..369fe711fd7f
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CXGB3_OFFLOAD_CTL_DEFS_H
+#define _CXGB3_OFFLOAD_CTL_DEFS_H
+
+enum {
+ GET_MAX_OUTSTANDING_WR = 0,
+ GET_TX_MAX_CHUNK = 1,
+ GET_TID_RANGE = 2,
+ GET_STID_RANGE = 3,
+ GET_RTBL_RANGE = 4,
+ GET_L2T_CAPACITY = 5,
+ GET_MTUS = 6,
+ GET_WR_LEN = 7,
+ GET_IFF_FROM_MAC = 8,
+ GET_DDP_PARAMS = 9,
+ GET_PORTS = 10,
+
+ ULP_ISCSI_GET_PARAMS = 11,
+ ULP_ISCSI_SET_PARAMS = 12,
+
+ RDMA_GET_PARAMS = 13,
+ RDMA_CQ_OP = 14,
+ RDMA_CQ_SETUP = 15,
+ RDMA_CQ_DISABLE = 16,
+ RDMA_CTRL_QP_SETUP = 17,
+ RDMA_GET_MEM = 18,
+ RDMA_GET_MIB = 19,
+
+ GET_RX_PAGE_INFO = 50,
+ GET_ISCSI_IPV4ADDR = 51,
+
+ GET_EMBEDDED_INFO = 70,
+};
+
+/*
+ * Structure used to describe a TID range. Valid TIDs are [base, base+num).
+ */
+struct tid_range {
+ unsigned int base; /* first TID */
+ unsigned int num; /* number of TIDs in range */
+};
+
+/*
+ * Structure used to request the size and contents of the MTU table.
+ */
+struct mtutab {
+ unsigned int size; /* # of entries in the MTU table */
+ const unsigned short *mtus; /* the MTU table values */
+};
+
+struct net_device;
+
+/*
+ * Structure used to request the adapter net_device owning a given MAC address.
+ */
+struct iff_mac {
+ struct net_device *dev; /* the net_device */
+ const unsigned char *mac_addr; /* MAC address to lookup */
+ u16 vlan_tag;
+};
+
+/* Structure used to request a port's iSCSI IPv4 address */
+struct iscsi_ipv4addr {
+ struct net_device *dev; /* the net_device */
+ __be32 ipv4addr; /* the return iSCSI IPv4 address */
+};
+
+struct pci_dev;
+
+/*
+ * Structure used to request the TCP DDP parameters.
+ */
+struct ddp_params {
+ unsigned int llimit; /* TDDP region start address */
+ unsigned int ulimit; /* TDDP region end address */
+ unsigned int tag_mask; /* TDDP tag mask */
+ struct pci_dev *pdev;
+};
+
+struct adap_ports {
+ unsigned int nports; /* number of ports on this adapter */
+ struct net_device *lldevs[2];
+};
+
+/*
+ * Structure used to return information to the iscsi layer.
+ */
+struct ulp_iscsi_info {
+ unsigned int offset;
+ unsigned int llimit;
+ unsigned int ulimit;
+ unsigned int tagmask;
+ u8 pgsz_factor[4];
+ unsigned int max_rxsz;
+ unsigned int max_txsz;
+ struct pci_dev *pdev;
+};
+
+/*
+ * Structure used to return information to the RDMA layer.
+ */
+struct rdma_info {
+ unsigned int tpt_base; /* TPT base address */
+ unsigned int tpt_top; /* TPT last entry address */
+ unsigned int pbl_base; /* PBL base address */
+ unsigned int pbl_top; /* PBL last entry address */
+ unsigned int rqt_base; /* RQT base address */
+ unsigned int rqt_top; /* RQT last entry address */
+ unsigned int udbell_len; /* user doorbell region length */
+ unsigned long udbell_physbase; /* user doorbell physical start addr */
+ void __iomem *kdb_addr; /* kernel doorbell register address */
+ struct pci_dev *pdev; /* associated PCI device */
+};
+
+/*
+ * Structure used to request an operation on an RDMA completion queue.
+ */
+struct rdma_cq_op {
+ unsigned int id;
+ unsigned int op;
+ unsigned int credits;
+};
+
+/*
+ * Structure used to setup RDMA completion queues.
+ */
+struct rdma_cq_setup {
+ unsigned int id;
+ unsigned long long base_addr;
+ unsigned int size;
+ unsigned int credits;
+ unsigned int credit_thres;
+ unsigned int ovfl_mode;
+};
+
+/*
+ * Structure used to setup the RDMA control egress context.
+ */
+struct rdma_ctrlqp_setup {
+ unsigned long long base_addr;
+ unsigned int size;
+};
+
+/*
+ * Offload TX/RX page information.
+ */
+struct ofld_page_info {
+ unsigned int page_size; /* Page size, should be a power of 2 */
+ unsigned int num; /* Number of pages */
+};
+
+/*
+ * Structure used to get firmware and protocol engine versions.
+ */
+struct ch_embedded_info {
+ u32 fw_vers;
+ u32 tp_vers;
+};
+#endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
new file mode 100644
index 000000000000..920d918ed193
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CHELSIO_DEFS_H
+#define _CHELSIO_DEFS_H
+
+#include <linux/skbuff.h>
+#include <net/tcp.h>
+
+#include "t3cdev.h"
+
+#include "cxgb3_offload.h"
+
+#define VALIDATE_TID 1
+
+void *cxgb_alloc_mem(unsigned long size);
+void cxgb_free_mem(void *addr);
+
+/*
+ * Map an ATID or STID to their entries in the corresponding TID tables.
+ */
+static inline union active_open_entry *atid2entry(const struct tid_info *t,
+ unsigned int atid)
+{
+ return &t->atid_tab[atid - t->atid_base];
+}
+
+static inline union listen_entry *stid2entry(const struct tid_info *t,
+ unsigned int stid)
+{
+ return &t->stid_tab[stid - t->stid_base];
+}
+
+/*
+ * Find the connection corresponding to a TID.
+ */
+static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
+ unsigned int tid)
+{
+ struct t3c_tid_entry *t3c_tid = tid < t->ntids ?
+ &(t->tid_tab[tid]) : NULL;
+
+ return (t3c_tid && t3c_tid->client) ? t3c_tid : NULL;
+}
+
+/*
+ * Find the connection corresponding to a server TID.
+ */
+static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t,
+ unsigned int tid)
+{
+ union listen_entry *e;
+
+ if (tid < t->stid_base || tid >= t->stid_base + t->nstids)
+ return NULL;
+
+ e = stid2entry(t, tid);
+ if ((void *)e->next >= (void *)t->tid_tab &&
+ (void *)e->next < (void *)&t->atid_tab[t->natids])
+ return NULL;
+
+ return &e->t3c_tid;
+}
+
+/*
+ * Find the connection corresponding to an active-open TID.
+ */
+static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t,
+ unsigned int tid)
+{
+ union active_open_entry *e;
+
+ if (tid < t->atid_base || tid >= t->atid_base + t->natids)
+ return NULL;
+
+ e = atid2entry(t, tid);
+ if ((void *)e->next >= (void *)t->tid_tab &&
+ (void *)e->next < (void *)&t->atid_tab[t->natids])
+ return NULL;
+
+ return &e->t3c_tid;
+}
+
+int attach_t3cdev(struct t3cdev *dev);
+void detach_t3cdev(struct t3cdev *dev);
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h
new file mode 100644
index 000000000000..b19e4376ba76
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __CHIOCTL_H__
+#define __CHIOCTL_H__
+
+/*
+ * Ioctl commands specific to this driver.
+ */
+enum {
+ CHELSIO_GETMTUTAB = 1029,
+ CHELSIO_SETMTUTAB = 1030,
+ CHELSIO_SET_PM = 1032,
+ CHELSIO_GET_PM = 1033,
+ CHELSIO_GET_MEM = 1038,
+ CHELSIO_LOAD_FW = 1041,
+ CHELSIO_SET_TRACE_FILTER = 1044,
+ CHELSIO_SET_QSET_PARAMS = 1045,
+ CHELSIO_GET_QSET_PARAMS = 1046,
+ CHELSIO_SET_QSET_NUM = 1047,
+ CHELSIO_GET_QSET_NUM = 1048,
+};
+
+struct ch_reg {
+ uint32_t cmd;
+ uint32_t addr;
+ uint32_t val;
+};
+
+struct ch_cntxt {
+ uint32_t cmd;
+ uint32_t cntxt_type;
+ uint32_t cntxt_id;
+ uint32_t data[4];
+};
+
+/* context types */
+enum { CNTXT_TYPE_EGRESS, CNTXT_TYPE_FL, CNTXT_TYPE_RSP, CNTXT_TYPE_CQ };
+
+struct ch_desc {
+ uint32_t cmd;
+ uint32_t queue_num;
+ uint32_t idx;
+ uint32_t size;
+ uint8_t data[128];
+};
+
+struct ch_mem_range {
+ uint32_t cmd;
+ uint32_t mem_id;
+ uint32_t addr;
+ uint32_t len;
+ uint32_t version;
+ uint8_t buf[0];
+};
+
+struct ch_qset_params {
+ uint32_t cmd;
+ uint32_t qset_idx;
+ int32_t txq_size[3];
+ int32_t rspq_size;
+ int32_t fl_size[2];
+ int32_t intr_lat;
+ int32_t polling;
+ int32_t lro;
+ int32_t cong_thres;
+ int32_t vector;
+ int32_t qnum;
+};
+
+struct ch_pktsched_params {
+ uint32_t cmd;
+ uint8_t sched;
+ uint8_t idx;
+ uint8_t min;
+ uint8_t max;
+ uint8_t binding;
+};
+
+#ifndef TCB_SIZE
+# define TCB_SIZE 128
+#endif
+
+/* TCB size in 32-bit words */
+#define TCB_WORDS (TCB_SIZE / 4)
+
+enum { MEM_CM, MEM_PMRX, MEM_PMTX }; /* ch_mem_range.mem_id values */
+
+struct ch_mtus {
+ uint32_t cmd;
+ uint32_t nmtus;
+ uint16_t mtus[NMTUS];
+};
+
+struct ch_pm {
+ uint32_t cmd;
+ uint32_t tx_pg_sz;
+ uint32_t tx_num_pg;
+ uint32_t rx_pg_sz;
+ uint32_t rx_num_pg;
+ uint32_t pm_total;
+};
+
+struct ch_tcam {
+ uint32_t cmd;
+ uint32_t tcam_size;
+ uint32_t nservers;
+ uint32_t nroutes;
+ uint32_t nfilters;
+};
+
+struct ch_tcb {
+ uint32_t cmd;
+ uint32_t tcb_index;
+ uint32_t tcb_data[TCB_WORDS];
+};
+
+struct ch_tcam_word {
+ uint32_t cmd;
+ uint32_t addr;
+ uint32_t buf[3];
+};
+
+struct ch_trace {
+ uint32_t cmd;
+ uint32_t sip;
+ uint32_t sip_mask;
+ uint32_t dip;
+ uint32_t dip_mask;
+ uint16_t sport;
+ uint16_t sport_mask;
+ uint16_t dport;
+ uint16_t dport_mask;
+ uint32_t vlan:12;
+ uint32_t vlan_mask:12;
+ uint32_t intf:4;
+ uint32_t intf_mask:4;
+ uint8_t proto;
+ uint8_t proto_mask;
+ uint8_t invert_match:1;
+ uint8_t config_tx:1;
+ uint8_t config_rx:1;
+ uint8_t trace_tx:1;
+ uint8_t trace_rx:1;
+};
+
+#define SIOCCHIOCTL SIOCDEVPRIVATE
+
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
new file mode 100644
index 000000000000..29e0e4243231
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -0,0 +1,3448 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/mdio.h>
+#include <linux/sockios.h>
+#include <linux/workqueue.h>
+#include <linux/proc_fs.h>
+#include <linux/rtnetlink.h>
+#include <linux/firmware.h>
+#include <linux/log2.h>
+#include <linux/stringify.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+
+#include "common.h"
+#include "cxgb3_ioctl.h"
+#include "regs.h"
+#include "cxgb3_offload.h"
+#include "version.h"
+
+#include "cxgb3_ctl_defs.h"
+#include "t3_cpl.h"
+#include "firmware_exports.h"
+
+enum {
+ MAX_TXQ_ENTRIES = 16384,
+ MAX_CTRL_TXQ_ENTRIES = 1024,
+ MAX_RSPQ_ENTRIES = 16384,
+ MAX_RX_BUFFERS = 16384,
+ MAX_RX_JUMBO_BUFFERS = 16384,
+ MIN_TXQ_ENTRIES = 4,
+ MIN_CTRL_TXQ_ENTRIES = 4,
+ MIN_RSPQ_ENTRIES = 32,
+ MIN_FL_ENTRIES = 32
+};
+
+#define PORT_MASK ((1 << MAX_NPORTS) - 1)
+
+#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
+ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+#define EEPROM_MAGIC 0x38E2F10C
+
+#define CH_DEVICE(devid, idx) \
+ { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
+
+static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = {
+ CH_DEVICE(0x20, 0), /* PE9000 */
+ CH_DEVICE(0x21, 1), /* T302E */
+ CH_DEVICE(0x22, 2), /* T310E */
+ CH_DEVICE(0x23, 3), /* T320X */
+ CH_DEVICE(0x24, 1), /* T302X */
+ CH_DEVICE(0x25, 3), /* T320E */
+ CH_DEVICE(0x26, 2), /* T310X */
+ CH_DEVICE(0x30, 2), /* T3B10 */
+ CH_DEVICE(0x31, 3), /* T3B20 */
+ CH_DEVICE(0x32, 1), /* T3B02 */
+ CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
+ CH_DEVICE(0x36, 3), /* S320E-CR */
+ CH_DEVICE(0x37, 7), /* N320E-G2 */
+ {0,}
+};
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
+
+static int dflt_msg_enable = DFLT_MSG_ENABLE;
+
+module_param(dflt_msg_enable, int, 0644);
+MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
+
+/*
+ * The driver uses the best interrupt scheme available on a platform in the
+ * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
+ * of these schemes the driver may consider as follows:
+ *
+ * msi = 2: choose from among all three options
+ * msi = 1: only consider MSI and pin interrupts
+ * msi = 0: force pin interrupts
+ */
+static int msi = 2;
+
+module_param(msi, int, 0644);
+MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
+
+/*
+ * The driver enables offload as a default.
+ * To disable it, use ofld_disable = 1.
+ */
+
+static int ofld_disable = 0;
+
+module_param(ofld_disable, int, 0644);
+MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
+
+/*
+ * We have work elements that we need to cancel when an interface is taken
+ * down. Normally the work elements would be executed by keventd but that
+ * can deadlock because of linkwatch. If our close method takes the rtnl
+ * lock and linkwatch is ahead of our work elements in keventd, linkwatch
+ * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
+ * for our work to complete. Get our own work queue to solve this.
+ */
+struct workqueue_struct *cxgb3_wq;
+
+/**
+ * link_report - show link status and link speed/duplex
+ * @p: the port whose settings are to be reported
+ *
+ * Shows the link status, speed, and duplex of a port.
+ */
+static void link_report(struct net_device *dev)
+{
+ if (!netif_carrier_ok(dev))
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ else {
+ const char *s = "10Mbps";
+ const struct port_info *p = netdev_priv(dev);
+
+ switch (p->link_config.speed) {
+ case SPEED_10000:
+ s = "10Gbps";
+ break;
+ case SPEED_1000:
+ s = "1000Mbps";
+ break;
+ case SPEED_100:
+ s = "100Mbps";
+ break;
+ }
+
+ printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
+ p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
+ }
+}
+
+static void enable_tx_fifo_drain(struct adapter *adapter,
+ struct port_info *pi)
+{
+ t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
+ F_ENDROPPKT);
+ t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
+ t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
+ t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
+}
+
+static void disable_tx_fifo_drain(struct adapter *adapter,
+ struct port_info *pi)
+{
+ t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
+ F_ENDROPPKT, 0);
+}
+
+void t3_os_link_fault(struct adapter *adap, int port_id, int state)
+{
+ struct net_device *dev = adap->port[port_id];
+ struct port_info *pi = netdev_priv(dev);
+
+ if (state == netif_carrier_ok(dev))
+ return;
+
+ if (state) {
+ struct cmac *mac = &pi->mac;
+
+ netif_carrier_on(dev);
+
+ disable_tx_fifo_drain(adap, pi);
+
+ /* Clear local faults */
+ t3_xgm_intr_disable(adap, pi->port_id);
+ t3_read_reg(adap, A_XGM_INT_STATUS +
+ pi->mac.offset);
+ t3_write_reg(adap,
+ A_XGM_INT_CAUSE + pi->mac.offset,
+ F_XGM_INT);
+
+ t3_set_reg_field(adap,
+ A_XGM_INT_ENABLE +
+ pi->mac.offset,
+ F_XGM_INT, F_XGM_INT);
+ t3_xgm_intr_enable(adap, pi->port_id);
+
+ t3_mac_enable(mac, MAC_DIRECTION_TX);
+ } else {
+ netif_carrier_off(dev);
+
+ /* Flush TX FIFO */
+ enable_tx_fifo_drain(adap, pi);
+ }
+ link_report(dev);
+}
+
+/**
+ * t3_os_link_changed - handle link status changes
+ * @adapter: the adapter associated with the link change
+ * @port_id: the port index whose limk status has changed
+ * @link_stat: the new status of the link
+ * @speed: the new speed setting
+ * @duplex: the new duplex setting
+ * @pause: the new flow-control setting
+ *
+ * This is the OS-dependent handler for link status changes. The OS
+ * neutral handler takes care of most of the processing for these events,
+ * then calls this handler for any OS-specific processing.
+ */
+void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
+ int speed, int duplex, int pause)
+{
+ struct net_device *dev = adapter->port[port_id];
+ struct port_info *pi = netdev_priv(dev);
+ struct cmac *mac = &pi->mac;
+
+ /* Skip changes from disabled ports. */
+ if (!netif_running(dev))
+ return;
+
+ if (link_stat != netif_carrier_ok(dev)) {
+ if (link_stat) {
+ disable_tx_fifo_drain(adapter, pi);
+
+ t3_mac_enable(mac, MAC_DIRECTION_RX);
+
+ /* Clear local faults */
+ t3_xgm_intr_disable(adapter, pi->port_id);
+ t3_read_reg(adapter, A_XGM_INT_STATUS +
+ pi->mac.offset);
+ t3_write_reg(adapter,
+ A_XGM_INT_CAUSE + pi->mac.offset,
+ F_XGM_INT);
+
+ t3_set_reg_field(adapter,
+ A_XGM_INT_ENABLE + pi->mac.offset,
+ F_XGM_INT, F_XGM_INT);
+ t3_xgm_intr_enable(adapter, pi->port_id);
+
+ netif_carrier_on(dev);
+ } else {
+ netif_carrier_off(dev);
+
+ t3_xgm_intr_disable(adapter, pi->port_id);
+ t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
+ t3_set_reg_field(adapter,
+ A_XGM_INT_ENABLE + pi->mac.offset,
+ F_XGM_INT, 0);
+
+ if (is_10G(adapter))
+ pi->phy.ops->power_down(&pi->phy, 1);
+
+ t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
+ t3_mac_disable(mac, MAC_DIRECTION_RX);
+ t3_link_start(&pi->phy, mac, &pi->link_config);
+
+ /* Flush TX FIFO */
+ enable_tx_fifo_drain(adapter, pi);
+ }
+
+ link_report(dev);
+ }
+}
+
+/**
+ * t3_os_phymod_changed - handle PHY module changes
+ * @phy: the PHY reporting the module change
+ * @mod_type: new module type
+ *
+ * This is the OS-dependent handler for PHY module changes. It is
+ * invoked when a PHY module is removed or inserted for any OS-specific
+ * processing.
+ */
+void t3_os_phymod_changed(struct adapter *adap, int port_id)
+{
+ static const char *mod_str[] = {
+ NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
+ };
+
+ const struct net_device *dev = adap->port[port_id];
+ const struct port_info *pi = netdev_priv(dev);
+
+ if (pi->phy.modtype == phy_modtype_none)
+ printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
+ else
+ printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
+ mod_str[pi->phy.modtype]);
+}
+
+static void cxgb_set_rxmode(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+
+ t3_mac_set_rx_mode(&pi->mac, dev);
+}
+
+/**
+ * link_start - enable a port
+ * @dev: the device to enable
+ *
+ * Performs the MAC and PHY actions needed to enable a port.
+ */
+static void link_start(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct cmac *mac = &pi->mac;
+
+ t3_mac_reset(mac);
+ t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
+ t3_mac_set_mtu(mac, dev->mtu);
+ t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
+ t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
+ t3_mac_set_rx_mode(mac, dev);
+ t3_link_start(&pi->phy, mac, &pi->link_config);
+ t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+}
+
+static inline void cxgb_disable_msi(struct adapter *adapter)
+{
+ if (adapter->flags & USING_MSIX) {
+ pci_disable_msix(adapter->pdev);
+ adapter->flags &= ~USING_MSIX;
+ } else if (adapter->flags & USING_MSI) {
+ pci_disable_msi(adapter->pdev);
+ adapter->flags &= ~USING_MSI;
+ }
+}
+
+/*
+ * Interrupt handler for asynchronous events used with MSI-X.
+ */
+static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
+{
+ t3_slow_intr_handler(cookie);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Name the MSI-X interrupts.
+ */
+static void name_msix_vecs(struct adapter *adap)
+{
+ int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
+
+ snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
+ adap->msix_info[0].desc[n] = 0;
+
+ for_each_port(adap, j) {
+ struct net_device *d = adap->port[j];
+ const struct port_info *pi = netdev_priv(d);
+
+ for (i = 0; i < pi->nqsets; i++, msi_idx++) {
+ snprintf(adap->msix_info[msi_idx].desc, n,
+ "%s-%d", d->name, pi->first_qset + i);
+ adap->msix_info[msi_idx].desc[n] = 0;
+ }
+ }
+}
+
+static int request_msix_data_irqs(struct adapter *adap)
+{
+ int i, j, err, qidx = 0;
+
+ for_each_port(adap, i) {
+ int nqsets = adap2pinfo(adap, i)->nqsets;
+
+ for (j = 0; j < nqsets; ++j) {
+ err = request_irq(adap->msix_info[qidx + 1].vec,
+ t3_intr_handler(adap,
+ adap->sge.qs[qidx].
+ rspq.polling), 0,
+ adap->msix_info[qidx + 1].desc,
+ &adap->sge.qs[qidx]);
+ if (err) {
+ while (--qidx >= 0)
+ free_irq(adap->msix_info[qidx + 1].vec,
+ &adap->sge.qs[qidx]);
+ return err;
+ }
+ qidx++;
+ }
+ }
+ return 0;
+}
+
+static void free_irq_resources(struct adapter *adapter)
+{
+ if (adapter->flags & USING_MSIX) {
+ int i, n = 0;
+
+ free_irq(adapter->msix_info[0].vec, adapter);
+ for_each_port(adapter, i)
+ n += adap2pinfo(adapter, i)->nqsets;
+
+ for (i = 0; i < n; ++i)
+ free_irq(adapter->msix_info[i + 1].vec,
+ &adapter->sge.qs[i]);
+ } else
+ free_irq(adapter->pdev->irq, adapter);
+}
+
+static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
+ unsigned long n)
+{
+ int attempts = 10;
+
+ while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
+ if (!--attempts)
+ return -ETIMEDOUT;
+ msleep(10);
+ }
+ return 0;
+}
+
+static int init_tp_parity(struct adapter *adap)
+{
+ int i;
+ struct sk_buff *skb;
+ struct cpl_set_tcb_field *greq;
+ unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
+
+ t3_tp_set_offload_mode(adap, 1);
+
+ for (i = 0; i < 16; i++) {
+ struct cpl_smt_write_req *req;
+
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ skb = adap->nofail_skb;
+ if (!skb)
+ goto alloc_skb_fail;
+
+ req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
+ memset(req, 0, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
+ req->mtu_idx = NMTUS - 1;
+ req->iff = i;
+ t3_mgmt_tx(adap, skb);
+ if (skb == adap->nofail_skb) {
+ await_mgmt_replies(adap, cnt, i + 1);
+ adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+ if (!adap->nofail_skb)
+ goto alloc_skb_fail;
+ }
+ }
+
+ for (i = 0; i < 2048; i++) {
+ struct cpl_l2t_write_req *req;
+
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ skb = adap->nofail_skb;
+ if (!skb)
+ goto alloc_skb_fail;
+
+ req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
+ memset(req, 0, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
+ req->params = htonl(V_L2T_W_IDX(i));
+ t3_mgmt_tx(adap, skb);
+ if (skb == adap->nofail_skb) {
+ await_mgmt_replies(adap, cnt, 16 + i + 1);
+ adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+ if (!adap->nofail_skb)
+ goto alloc_skb_fail;
+ }
+ }
+
+ for (i = 0; i < 2048; i++) {
+ struct cpl_rte_write_req *req;
+
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ skb = adap->nofail_skb;
+ if (!skb)
+ goto alloc_skb_fail;
+
+ req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
+ memset(req, 0, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
+ req->l2t_idx = htonl(V_L2T_W_IDX(i));
+ t3_mgmt_tx(adap, skb);
+ if (skb == adap->nofail_skb) {
+ await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
+ adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+ if (!adap->nofail_skb)
+ goto alloc_skb_fail;
+ }
+ }
+
+ skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+ if (!skb)
+ skb = adap->nofail_skb;
+ if (!skb)
+ goto alloc_skb_fail;
+
+ greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
+ memset(greq, 0, sizeof(*greq));
+ greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
+ greq->mask = cpu_to_be64(1);
+ t3_mgmt_tx(adap, skb);
+
+ i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
+ if (skb == adap->nofail_skb) {
+ i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
+ adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
+ }
+
+ t3_tp_set_offload_mode(adap, 0);
+ return i;
+
+alloc_skb_fail:
+ t3_tp_set_offload_mode(adap, 0);
+ return -ENOMEM;
+}
+
+/**
+ * setup_rss - configure RSS
+ * @adap: the adapter
+ *
+ * Sets up RSS to distribute packets to multiple receive queues. We
+ * configure the RSS CPU lookup table to distribute to the number of HW
+ * receive queues, and the response queue lookup table to narrow that
+ * down to the response queues actually configured for each port.
+ * We always configure the RSS mapping for two ports since the mapping
+ * table has plenty of entries.
+ */
+static void setup_rss(struct adapter *adap)
+{
+ int i;
+ unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
+ unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
+ u8 cpus[SGE_QSETS + 1];
+ u16 rspq_map[RSS_TABLE_SIZE];
+
+ for (i = 0; i < SGE_QSETS; ++i)
+ cpus[i] = i;
+ cpus[SGE_QSETS] = 0xff; /* terminator */
+
+ for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
+ rspq_map[i] = i % nq0;
+ rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
+ }
+
+ t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
+ F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
+ V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
+}
+
+static void ring_dbs(struct adapter *adap)
+{
+ int i, j;
+
+ for (i = 0; i < SGE_QSETS; i++) {
+ struct sge_qset *qs = &adap->sge.qs[i];
+
+ if (qs->adap)
+ for (j = 0; j < SGE_TXQ_PER_SET; j++)
+ t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
+ }
+}
+
+static void init_napi(struct adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < SGE_QSETS; i++) {
+ struct sge_qset *qs = &adap->sge.qs[i];
+
+ if (qs->adap)
+ netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
+ 64);
+ }
+
+ /*
+ * netif_napi_add() can be called only once per napi_struct because it
+ * adds each new napi_struct to a list. Be careful not to call it a
+ * second time, e.g., during EEH recovery, by making a note of it.
+ */
+ adap->flags |= NAPI_INIT;
+}
+
+/*
+ * Wait until all NAPI handlers are descheduled. This includes the handlers of
+ * both netdevices representing interfaces and the dummy ones for the extra
+ * queues.
+ */
+static void quiesce_rx(struct adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < SGE_QSETS; i++)
+ if (adap->sge.qs[i].adap)
+ napi_disable(&adap->sge.qs[i].napi);
+}
+
+static void enable_all_napi(struct adapter *adap)
+{
+ int i;
+ for (i = 0; i < SGE_QSETS; i++)
+ if (adap->sge.qs[i].adap)
+ napi_enable(&adap->sge.qs[i].napi);
+}
+
+/**
+ * setup_sge_qsets - configure SGE Tx/Rx/response queues
+ * @adap: the adapter
+ *
+ * Determines how many sets of SGE queues to use and initializes them.
+ * We support multiple queue sets per port if we have MSI-X, otherwise
+ * just one queue set per port.
+ */
+static int setup_sge_qsets(struct adapter *adap)
+{
+ int i, j, err, irq_idx = 0, qset_idx = 0;
+ unsigned int ntxq = SGE_TXQ_PER_SET;
+
+ if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
+ irq_idx = -1;
+
+ for_each_port(adap, i) {
+ struct net_device *dev = adap->port[i];
+ struct port_info *pi = netdev_priv(dev);
+
+ pi->qs = &adap->sge.qs[pi->first_qset];
+ for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
+ err = t3_sge_alloc_qset(adap, qset_idx, 1,
+ (adap->flags & USING_MSIX) ? qset_idx + 1 :
+ irq_idx,
+ &adap->params.sge.qset[qset_idx], ntxq, dev,
+ netdev_get_tx_queue(dev, j));
+ if (err) {
+ t3_free_sge_resources(adap);
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t attr_show(struct device *d, char *buf,
+ ssize_t(*format) (struct net_device *, char *))
+{
+ ssize_t len;
+
+ /* Synchronize with ioctls that may shut down the device */
+ rtnl_lock();
+ len = (*format) (to_net_dev(d), buf);
+ rtnl_unlock();
+ return len;
+}
+
+static ssize_t attr_store(struct device *d,
+ const char *buf, size_t len,
+ ssize_t(*set) (struct net_device *, unsigned int),
+ unsigned int min_val, unsigned int max_val)
+{
+ char *endp;
+ ssize_t ret;
+ unsigned int val;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ val = simple_strtoul(buf, &endp, 0);
+ if (endp == buf || val < min_val || val > max_val)
+ return -EINVAL;
+
+ rtnl_lock();
+ ret = (*set) (to_net_dev(d), val);
+ if (!ret)
+ ret = len;
+ rtnl_unlock();
+ return ret;
+}
+
+#define CXGB3_SHOW(name, val_expr) \
+static ssize_t format_##name(struct net_device *dev, char *buf) \
+{ \
+ struct port_info *pi = netdev_priv(dev); \
+ struct adapter *adap = pi->adapter; \
+ return sprintf(buf, "%u\n", val_expr); \
+} \
+static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return attr_show(d, buf, format_##name); \
+}
+
+static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
+
+ if (adap->flags & FULL_INIT_DONE)
+ return -EBUSY;
+ if (val && adap->params.rev == 0)
+ return -EINVAL;
+ if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
+ min_tids)
+ return -EINVAL;
+ adap->params.mc5.nfilters = val;
+ return 0;
+}
+
+static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return attr_store(d, buf, len, set_nfilters, 0, ~0);
+}
+
+static ssize_t set_nservers(struct net_device *dev, unsigned int val)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+
+ if (adap->flags & FULL_INIT_DONE)
+ return -EBUSY;
+ if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
+ MC5_MIN_TIDS)
+ return -EINVAL;
+ adap->params.mc5.nservers = val;
+ return 0;
+}
+
+static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return attr_store(d, buf, len, set_nservers, 0, ~0);
+}
+
+#define CXGB3_ATTR_R(name, val_expr) \
+CXGB3_SHOW(name, val_expr) \
+static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
+
+#define CXGB3_ATTR_RW(name, val_expr, store_method) \
+CXGB3_SHOW(name, val_expr) \
+static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
+
+CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
+CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
+CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
+
+static struct attribute *cxgb3_attrs[] = {
+ &dev_attr_cam_size.attr,
+ &dev_attr_nfilters.attr,
+ &dev_attr_nservers.attr,
+ NULL
+};
+
+static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
+
+static ssize_t tm_attr_show(struct device *d,
+ char *buf, int sched)
+{
+ struct port_info *pi = netdev_priv(to_net_dev(d));
+ struct adapter *adap = pi->adapter;
+ unsigned int v, addr, bpt, cpt;
+ ssize_t len;
+
+ addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
+ rtnl_lock();
+ t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
+ v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
+ if (sched & 1)
+ v >>= 16;
+ bpt = (v >> 8) & 0xff;
+ cpt = v & 0xff;
+ if (!cpt)
+ len = sprintf(buf, "disabled\n");
+ else {
+ v = (adap->params.vpd.cclk * 1000) / cpt;
+ len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
+ }
+ rtnl_unlock();
+ return len;
+}
+
+static ssize_t tm_attr_store(struct device *d,
+ const char *buf, size_t len, int sched)
+{
+ struct port_info *pi = netdev_priv(to_net_dev(d));
+ struct adapter *adap = pi->adapter;
+ unsigned int val;
+ char *endp;
+ ssize_t ret;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ val = simple_strtoul(buf, &endp, 0);
+ if (endp == buf || val > 10000000)
+ return -EINVAL;
+
+ rtnl_lock();
+ ret = t3_config_sched(adap, val, sched);
+ if (!ret)
+ ret = len;
+ rtnl_unlock();
+ return ret;
+}
+
+#define TM_ATTR(name, sched) \
+static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return tm_attr_show(d, buf, sched); \
+} \
+static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ return tm_attr_store(d, buf, len, sched); \
+} \
+static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
+
+TM_ATTR(sched0, 0);
+TM_ATTR(sched1, 1);
+TM_ATTR(sched2, 2);
+TM_ATTR(sched3, 3);
+TM_ATTR(sched4, 4);
+TM_ATTR(sched5, 5);
+TM_ATTR(sched6, 6);
+TM_ATTR(sched7, 7);
+
+static struct attribute *offload_attrs[] = {
+ &dev_attr_sched0.attr,
+ &dev_attr_sched1.attr,
+ &dev_attr_sched2.attr,
+ &dev_attr_sched3.attr,
+ &dev_attr_sched4.attr,
+ &dev_attr_sched5.attr,
+ &dev_attr_sched6.attr,
+ &dev_attr_sched7.attr,
+ NULL
+};
+
+static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
+
+/*
+ * Sends an sk_buff to an offload queue driver
+ * after dealing with any active network taps.
+ */
+static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
+{
+ int ret;
+
+ local_bh_disable();
+ ret = t3_offload_tx(tdev, skb);
+ local_bh_enable();
+ return ret;
+}
+
+static int write_smt_entry(struct adapter *adapter, int idx)
+{
+ struct cpl_smt_write_req *req;
+ struct port_info *pi = netdev_priv(adapter->port[idx]);
+ struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+
+ if (!skb)
+ return -ENOMEM;
+
+ req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
+ req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
+ req->iff = idx;
+ memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
+ memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
+ skb->priority = 1;
+ offload_tx(&adapter->tdev, skb);
+ return 0;
+}
+
+static int init_smt(struct adapter *adapter)
+{
+ int i;
+
+ for_each_port(adapter, i)
+ write_smt_entry(adapter, i);
+ return 0;
+}
+
+static void init_port_mtus(struct adapter *adapter)
+{
+ unsigned int mtus = adapter->port[0]->mtu;
+
+ if (adapter->port[1])
+ mtus |= adapter->port[1]->mtu << 16;
+ t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
+}
+
+static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
+ int hi, int port)
+{
+ struct sk_buff *skb;
+ struct mngt_pktsched_wr *req;
+ int ret;
+
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ skb = adap->nofail_skb;
+ if (!skb)
+ return -ENOMEM;
+
+ req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
+ req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
+ req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
+ req->sched = sched;
+ req->idx = qidx;
+ req->min = lo;
+ req->max = hi;
+ req->binding = port;
+ ret = t3_mgmt_tx(adap, skb);
+ if (skb == adap->nofail_skb) {
+ adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
+ GFP_KERNEL);
+ if (!adap->nofail_skb)
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+
+static int bind_qsets(struct adapter *adap)
+{
+ int i, j, err = 0;
+
+ for_each_port(adap, i) {
+ const struct port_info *pi = adap2pinfo(adap, i);
+
+ for (j = 0; j < pi->nqsets; ++j) {
+ int ret = send_pktsched_cmd(adap, 1,
+ pi->first_qset + j, -1,
+ -1, i);
+ if (ret)
+ err = ret;
+ }
+ }
+
+ return err;
+}
+
+#define FW_VERSION __stringify(FW_VERSION_MAJOR) "." \
+ __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
+#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
+#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "." \
+ __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
+#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
+#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
+#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
+#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
+MODULE_FIRMWARE(FW_FNAME);
+MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
+MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
+MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
+MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
+MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
+
+static inline const char *get_edc_fw_name(int edc_idx)
+{
+ const char *fw_name = NULL;
+
+ switch (edc_idx) {
+ case EDC_OPT_AEL2005:
+ fw_name = AEL2005_OPT_EDC_NAME;
+ break;
+ case EDC_TWX_AEL2005:
+ fw_name = AEL2005_TWX_EDC_NAME;
+ break;
+ case EDC_TWX_AEL2020:
+ fw_name = AEL2020_TWX_EDC_NAME;
+ break;
+ }
+ return fw_name;
+}
+
+int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
+{
+ struct adapter *adapter = phy->adapter;
+ const struct firmware *fw;
+ char buf[64];
+ u32 csum;
+ const __be32 *p;
+ u16 *cache = phy->phy_cache;
+ int i, ret;
+
+ snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx));
+
+ ret = request_firmware(&fw, buf, &adapter->pdev->dev);
+ if (ret < 0) {
+ dev_err(&adapter->pdev->dev,
+ "could not upgrade firmware: unable to load %s\n",
+ buf);
+ return ret;
+ }
+
+ /* check size, take checksum in account */
+ if (fw->size > size + 4) {
+ CH_ERR(adapter, "firmware image too large %u, expected %d\n",
+ (unsigned int)fw->size, size + 4);
+ ret = -EINVAL;
+ }
+
+ /* compute checksum */
+ p = (const __be32 *)fw->data;
+ for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
+ csum += ntohl(p[i]);
+
+ if (csum != 0xffffffff) {
+ CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
+ csum);
+ ret = -EINVAL;
+ }
+
+ for (i = 0; i < size / 4 ; i++) {
+ *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
+ *cache++ = be32_to_cpu(p[i]) & 0xffff;
+ }
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int upgrade_fw(struct adapter *adap)
+{
+ int ret;
+ const struct firmware *fw;
+ struct device *dev = &adap->pdev->dev;
+
+ ret = request_firmware(&fw, FW_FNAME, dev);
+ if (ret < 0) {
+ dev_err(dev, "could not upgrade firmware: unable to load %s\n",
+ FW_FNAME);
+ return ret;
+ }
+ ret = t3_load_fw(adap, fw->data, fw->size);
+ release_firmware(fw);
+
+ if (ret == 0)
+ dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
+ FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
+ else
+ dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
+ FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
+
+ return ret;
+}
+
+static inline char t3rev2char(struct adapter *adapter)
+{
+ char rev = 0;
+
+ switch(adapter->params.rev) {
+ case T3_REV_B:
+ case T3_REV_B2:
+ rev = 'b';
+ break;
+ case T3_REV_C:
+ rev = 'c';
+ break;
+ }
+ return rev;
+}
+
+static int update_tpsram(struct adapter *adap)
+{
+ const struct firmware *tpsram;
+ char buf[64];
+ struct device *dev = &adap->pdev->dev;
+ int ret;
+ char rev;
+
+ rev = t3rev2char(adap);
+ if (!rev)
+ return 0;
+
+ snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
+
+ ret = request_firmware(&tpsram, buf, dev);
+ if (ret < 0) {
+ dev_err(dev, "could not load TP SRAM: unable to load %s\n",
+ buf);
+ return ret;
+ }
+
+ ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
+ if (ret)
+ goto release_tpsram;
+
+ ret = t3_set_proto_sram(adap, tpsram->data);
+ if (ret == 0)
+ dev_info(dev,
+ "successful update of protocol engine "
+ "to %d.%d.%d\n",
+ TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
+ else
+ dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
+ TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
+ if (ret)
+ dev_err(dev, "loading protocol SRAM failed\n");
+
+release_tpsram:
+ release_firmware(tpsram);
+
+ return ret;
+}
+
+/**
+ * cxgb_up - enable the adapter
+ * @adapter: adapter being enabled
+ *
+ * Called when the first port is enabled, this function performs the
+ * actions necessary to make an adapter operational, such as completing
+ * the initialization of HW modules, and enabling interrupts.
+ *
+ * Must be called with the rtnl lock held.
+ */
+static int cxgb_up(struct adapter *adap)
+{
+ int err;
+
+ if (!(adap->flags & FULL_INIT_DONE)) {
+ err = t3_check_fw_version(adap);
+ if (err == -EINVAL) {
+ err = upgrade_fw(adap);
+ CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
+ FW_VERSION_MAJOR, FW_VERSION_MINOR,
+ FW_VERSION_MICRO, err ? "failed" : "succeeded");
+ }
+
+ err = t3_check_tpsram_version(adap);
+ if (err == -EINVAL) {
+ err = update_tpsram(adap);
+ CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
+ TP_VERSION_MAJOR, TP_VERSION_MINOR,
+ TP_VERSION_MICRO, err ? "failed" : "succeeded");
+ }
+
+ /*
+ * Clear interrupts now to catch errors if t3_init_hw fails.
+ * We clear them again later as initialization may trigger
+ * conditions that can interrupt.
+ */
+ t3_intr_clear(adap);
+
+ err = t3_init_hw(adap, 0);
+ if (err)
+ goto out;
+
+ t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
+ t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
+
+ err = setup_sge_qsets(adap);
+ if (err)
+ goto out;
+
+ setup_rss(adap);
+ if (!(adap->flags & NAPI_INIT))
+ init_napi(adap);
+
+ t3_start_sge_timers(adap);
+ adap->flags |= FULL_INIT_DONE;
+ }
+
+ t3_intr_clear(adap);
+
+ if (adap->flags & USING_MSIX) {
+ name_msix_vecs(adap);
+ err = request_irq(adap->msix_info[0].vec,
+ t3_async_intr_handler, 0,
+ adap->msix_info[0].desc, adap);
+ if (err)
+ goto irq_err;
+
+ err = request_msix_data_irqs(adap);
+ if (err) {
+ free_irq(adap->msix_info[0].vec, adap);
+ goto irq_err;
+ }
+ } else if ((err = request_irq(adap->pdev->irq,
+ t3_intr_handler(adap,
+ adap->sge.qs[0].rspq.
+ polling),
+ (adap->flags & USING_MSI) ?
+ 0 : IRQF_SHARED,
+ adap->name, adap)))
+ goto irq_err;
+
+ enable_all_napi(adap);
+ t3_sge_start(adap);
+ t3_intr_enable(adap);
+
+ if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
+ is_offload(adap) && init_tp_parity(adap) == 0)
+ adap->flags |= TP_PARITY_INIT;
+
+ if (adap->flags & TP_PARITY_INIT) {
+ t3_write_reg(adap, A_TP_INT_CAUSE,
+ F_CMCACHEPERR | F_ARPLUTPERR);
+ t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
+ }
+
+ if (!(adap->flags & QUEUES_BOUND)) {
+ int ret = bind_qsets(adap);
+
+ if (ret < 0) {
+ CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
+ t3_intr_disable(adap);
+ free_irq_resources(adap);
+ err = ret;
+ goto out;
+ }
+ adap->flags |= QUEUES_BOUND;
+ }
+
+out:
+ return err;
+irq_err:
+ CH_ERR(adap, "request_irq failed, err %d\n", err);
+ goto out;
+}
+
+/*
+ * Release resources when all the ports and offloading have been stopped.
+ */
+static void cxgb_down(struct adapter *adapter, int on_wq)
+{
+ t3_sge_stop(adapter);
+ spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
+ t3_intr_disable(adapter);
+ spin_unlock_irq(&adapter->work_lock);
+
+ free_irq_resources(adapter);
+ quiesce_rx(adapter);
+ t3_sge_stop(adapter);
+ if (!on_wq)
+ flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
+}
+
+static void schedule_chk_task(struct adapter *adap)
+{
+ unsigned int timeo;
+
+ timeo = adap->params.linkpoll_period ?
+ (HZ * adap->params.linkpoll_period) / 10 :
+ adap->params.stats_update_period * HZ;
+ if (timeo)
+ queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
+}
+
+static int offload_open(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ struct t3cdev *tdev = dev2t3cdev(dev);
+ int adap_up = adapter->open_device_map & PORT_MASK;
+ int err;
+
+ if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
+ return 0;
+
+ if (!adap_up && (err = cxgb_up(adapter)) < 0)
+ goto out;
+
+ t3_tp_set_offload_mode(adapter, 1);
+ tdev->lldev = adapter->port[0];
+ err = cxgb3_offload_activate(adapter);
+ if (err)
+ goto out;
+
+ init_port_mtus(adapter);
+ t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
+ adapter->params.b_wnd,
+ adapter->params.rev == 0 ?
+ adapter->port[0]->mtu : 0xffff);
+ init_smt(adapter);
+
+ if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
+ dev_dbg(&dev->dev, "cannot create sysfs group\n");
+
+ /* Call back all registered clients */
+ cxgb3_add_clients(tdev);
+
+out:
+ /* restore them in case the offload module has changed them */
+ if (err) {
+ t3_tp_set_offload_mode(adapter, 0);
+ clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
+ cxgb3_set_dummy_ops(tdev);
+ }
+ return err;
+}
+
+static int offload_close(struct t3cdev *tdev)
+{
+ struct adapter *adapter = tdev2adap(tdev);
+ struct t3c_data *td = T3C_DATA(tdev);
+
+ if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
+ return 0;
+
+ /* Call back all registered clients */
+ cxgb3_remove_clients(tdev);
+
+ sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
+
+ /* Flush work scheduled while releasing TIDs */
+ flush_work_sync(&td->tid_release_task);
+
+ tdev->lldev = NULL;
+ cxgb3_set_dummy_ops(tdev);
+ t3_tp_set_offload_mode(adapter, 0);
+ clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
+
+ if (!adapter->open_device_map)
+ cxgb_down(adapter, 0);
+
+ cxgb3_offload_deactivate(adapter);
+ return 0;
+}
+
+static int cxgb_open(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ int other_ports = adapter->open_device_map & PORT_MASK;
+ int err;
+
+ if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
+ return err;
+
+ set_bit(pi->port_id, &adapter->open_device_map);
+ if (is_offload(adapter) && !ofld_disable) {
+ err = offload_open(dev);
+ if (err)
+ printk(KERN_WARNING
+ "Could not initialize offload capabilities\n");
+ }
+
+ netif_set_real_num_tx_queues(dev, pi->nqsets);
+ err = netif_set_real_num_rx_queues(dev, pi->nqsets);
+ if (err)
+ return err;
+ link_start(dev);
+ t3_port_intr_enable(adapter, pi->port_id);
+ netif_tx_start_all_queues(dev);
+ if (!other_ports)
+ schedule_chk_task(adapter);
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
+ return 0;
+}
+
+static int __cxgb_close(struct net_device *dev, int on_wq)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+
+ if (!adapter->open_device_map)
+ return 0;
+
+ /* Stop link fault interrupts */
+ t3_xgm_intr_disable(adapter, pi->port_id);
+ t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
+
+ t3_port_intr_disable(adapter, pi->port_id);
+ netif_tx_stop_all_queues(dev);
+ pi->phy.ops->power_down(&pi->phy, 1);
+ netif_carrier_off(dev);
+ t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
+
+ spin_lock_irq(&adapter->work_lock); /* sync with update task */
+ clear_bit(pi->port_id, &adapter->open_device_map);
+ spin_unlock_irq(&adapter->work_lock);
+
+ if (!(adapter->open_device_map & PORT_MASK))
+ cancel_delayed_work_sync(&adapter->adap_check_task);
+
+ if (!adapter->open_device_map)
+ cxgb_down(adapter, on_wq);
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
+ return 0;
+}
+
+static int cxgb_close(struct net_device *dev)
+{
+ return __cxgb_close(dev, 0);
+}
+
+static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ struct net_device_stats *ns = &pi->netstats;
+ const struct mac_stats *pstats;
+
+ spin_lock(&adapter->stats_lock);
+ pstats = t3_mac_update_stats(&pi->mac);
+ spin_unlock(&adapter->stats_lock);
+
+ ns->tx_bytes = pstats->tx_octets;
+ ns->tx_packets = pstats->tx_frames;
+ ns->rx_bytes = pstats->rx_octets;
+ ns->rx_packets = pstats->rx_frames;
+ ns->multicast = pstats->rx_mcast_frames;
+
+ ns->tx_errors = pstats->tx_underrun;
+ ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
+ pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
+ pstats->rx_fifo_ovfl;
+
+ /* detailed rx_errors */
+ ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
+ ns->rx_over_errors = 0;
+ ns->rx_crc_errors = pstats->rx_fcs_errs;
+ ns->rx_frame_errors = pstats->rx_symbol_errs;
+ ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
+ ns->rx_missed_errors = pstats->rx_cong_drops;
+
+ /* detailed tx_errors */
+ ns->tx_aborted_errors = 0;
+ ns->tx_carrier_errors = 0;
+ ns->tx_fifo_errors = pstats->tx_underrun;
+ ns->tx_heartbeat_errors = 0;
+ ns->tx_window_errors = 0;
+ return ns;
+}
+
+static u32 get_msglevel(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ return adapter->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ adapter->msg_enable = val;
+}
+
+static char stats_strings[][ETH_GSTRING_LEN] = {
+ "TxOctetsOK ",
+ "TxFramesOK ",
+ "TxMulticastFramesOK",
+ "TxBroadcastFramesOK",
+ "TxPauseFrames ",
+ "TxUnderrun ",
+ "TxExtUnderrun ",
+
+ "TxFrames64 ",
+ "TxFrames65To127 ",
+ "TxFrames128To255 ",
+ "TxFrames256To511 ",
+ "TxFrames512To1023 ",
+ "TxFrames1024To1518 ",
+ "TxFrames1519ToMax ",
+
+ "RxOctetsOK ",
+ "RxFramesOK ",
+ "RxMulticastFramesOK",
+ "RxBroadcastFramesOK",
+ "RxPauseFrames ",
+ "RxFCSErrors ",
+ "RxSymbolErrors ",
+ "RxShortErrors ",
+ "RxJabberErrors ",
+ "RxLengthErrors ",
+ "RxFIFOoverflow ",
+
+ "RxFrames64 ",
+ "RxFrames65To127 ",
+ "RxFrames128To255 ",
+ "RxFrames256To511 ",
+ "RxFrames512To1023 ",
+ "RxFrames1024To1518 ",
+ "RxFrames1519ToMax ",
+
+ "PhyFIFOErrors ",
+ "TSO ",
+ "VLANextractions ",
+ "VLANinsertions ",
+ "TxCsumOffload ",
+ "RxCsumGood ",
+ "LroAggregated ",
+ "LroFlushed ",
+ "LroNoDesc ",
+ "RxDrops ",
+
+ "CheckTXEnToggled ",
+ "CheckResets ",
+
+ "LinkFaults ",
+};
+
+static int get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(stats_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+#define T3_REGMAP_SIZE (3 * 1024)
+
+static int get_regs_len(struct net_device *dev)
+{
+ return T3_REGMAP_SIZE;
+}
+
+static int get_eeprom_len(struct net_device *dev)
+{
+ return EEPROMSIZE;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ u32 fw_vers = 0;
+ u32 tp_vers = 0;
+
+ spin_lock(&adapter->stats_lock);
+ t3_get_fw_version(adapter, &fw_vers);
+ t3_get_tp_version(adapter, &tp_vers);
+ spin_unlock(&adapter->stats_lock);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(adapter->pdev));
+ if (!fw_vers)
+ strcpy(info->fw_version, "N/A");
+ else {
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%s %u.%u.%u TP %u.%u.%u",
+ G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
+ G_FW_VERSION_MAJOR(fw_vers),
+ G_FW_VERSION_MINOR(fw_vers),
+ G_FW_VERSION_MICRO(fw_vers),
+ G_TP_VERSION_MAJOR(tp_vers),
+ G_TP_VERSION_MINOR(tp_vers),
+ G_TP_VERSION_MICRO(tp_vers));
+ }
+}
+
+static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+ if (stringset == ETH_SS_STATS)
+ memcpy(data, stats_strings, sizeof(stats_strings));
+}
+
+static unsigned long collect_sge_port_stats(struct adapter *adapter,
+ struct port_info *p, int idx)
+{
+ int i;
+ unsigned long tot = 0;
+
+ for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
+ tot += adapter->sge.qs[i].port_stats[idx];
+ return tot;
+}
+
+static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ const struct mac_stats *s;
+
+ spin_lock(&adapter->stats_lock);
+ s = t3_mac_update_stats(&pi->mac);
+ spin_unlock(&adapter->stats_lock);
+
+ *data++ = s->tx_octets;
+ *data++ = s->tx_frames;
+ *data++ = s->tx_mcast_frames;
+ *data++ = s->tx_bcast_frames;
+ *data++ = s->tx_pause;
+ *data++ = s->tx_underrun;
+ *data++ = s->tx_fifo_urun;
+
+ *data++ = s->tx_frames_64;
+ *data++ = s->tx_frames_65_127;
+ *data++ = s->tx_frames_128_255;
+ *data++ = s->tx_frames_256_511;
+ *data++ = s->tx_frames_512_1023;
+ *data++ = s->tx_frames_1024_1518;
+ *data++ = s->tx_frames_1519_max;
+
+ *data++ = s->rx_octets;
+ *data++ = s->rx_frames;
+ *data++ = s->rx_mcast_frames;
+ *data++ = s->rx_bcast_frames;
+ *data++ = s->rx_pause;
+ *data++ = s->rx_fcs_errs;
+ *data++ = s->rx_symbol_errs;
+ *data++ = s->rx_short;
+ *data++ = s->rx_jabber;
+ *data++ = s->rx_too_long;
+ *data++ = s->rx_fifo_ovfl;
+
+ *data++ = s->rx_frames_64;
+ *data++ = s->rx_frames_65_127;
+ *data++ = s->rx_frames_128_255;
+ *data++ = s->rx_frames_256_511;
+ *data++ = s->rx_frames_512_1023;
+ *data++ = s->rx_frames_1024_1518;
+ *data++ = s->rx_frames_1519_max;
+
+ *data++ = pi->phy.fifo_errors;
+
+ *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
+ *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
+ *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
+ *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
+ *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
+ *data++ = 0;
+ *data++ = 0;
+ *data++ = 0;
+ *data++ = s->rx_cong_drops;
+
+ *data++ = s->num_toggled;
+ *data++ = s->num_resets;
+
+ *data++ = s->link_faults;
+}
+
+static inline void reg_block_dump(struct adapter *ap, void *buf,
+ unsigned int start, unsigned int end)
+{
+ u32 *p = buf + start;
+
+ for (; start <= end; start += sizeof(u32))
+ *p++ = t3_read_reg(ap, start);
+}
+
+static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *buf)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *ap = pi->adapter;
+
+ /*
+ * Version scheme:
+ * bits 0..9: chip version
+ * bits 10..15: chip revision
+ * bit 31: set for PCIe cards
+ */
+ regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
+
+ /*
+ * We skip the MAC statistics registers because they are clear-on-read.
+ * Also reading multi-register stats would need to synchronize with the
+ * periodic mac stats accumulation. Hard to justify the complexity.
+ */
+ memset(buf, 0, T3_REGMAP_SIZE);
+ reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
+ reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
+ reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
+ reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
+ reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
+ reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
+ XGM_REG(A_XGM_SERDES_STAT3, 1));
+ reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
+ XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
+}
+
+static int restart_autoneg(struct net_device *dev)
+{
+ struct port_info *p = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+ if (p->link_config.autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+ p->phy.ops->autoneg_restart(&p->phy);
+ return 0;
+}
+
+static int set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 1; /* cycle on/off once per second */
+
+ case ETHTOOL_ID_OFF:
+ t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
+ break;
+
+ case ETHTOOL_ID_ON:
+ case ETHTOOL_ID_INACTIVE:
+ t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+ F_GPIO0_OUT_VAL);
+ }
+
+ return 0;
+}
+
+static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct port_info *p = netdev_priv(dev);
+
+ cmd->supported = p->link_config.supported;
+ cmd->advertising = p->link_config.advertising;
+
+ if (netif_carrier_ok(dev)) {
+ ethtool_cmd_speed_set(cmd, p->link_config.speed);
+ cmd->duplex = p->link_config.duplex;
+ } else {
+ ethtool_cmd_speed_set(cmd, -1);
+ cmd->duplex = -1;
+ }
+
+ cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+ cmd->phy_address = p->phy.mdio.prtad;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->autoneg = p->link_config.autoneg;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+ return 0;
+}
+
+static int speed_duplex_to_caps(int speed, int duplex)
+{
+ int cap = 0;
+
+ switch (speed) {
+ case SPEED_10:
+ if (duplex == DUPLEX_FULL)
+ cap = SUPPORTED_10baseT_Full;
+ else
+ cap = SUPPORTED_10baseT_Half;
+ break;
+ case SPEED_100:
+ if (duplex == DUPLEX_FULL)
+ cap = SUPPORTED_100baseT_Full;
+ else
+ cap = SUPPORTED_100baseT_Half;
+ break;
+ case SPEED_1000:
+ if (duplex == DUPLEX_FULL)
+ cap = SUPPORTED_1000baseT_Full;
+ else
+ cap = SUPPORTED_1000baseT_Half;
+ break;
+ case SPEED_10000:
+ if (duplex == DUPLEX_FULL)
+ cap = SUPPORTED_10000baseT_Full;
+ }
+ return cap;
+}
+
+#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
+ ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
+ ADVERTISED_10000baseT_Full)
+
+static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct port_info *p = netdev_priv(dev);
+ struct link_config *lc = &p->link_config;
+
+ if (!(lc->supported & SUPPORTED_Autoneg)) {
+ /*
+ * PHY offers a single speed/duplex. See if that's what's
+ * being requested.
+ */
+ if (cmd->autoneg == AUTONEG_DISABLE) {
+ u32 speed = ethtool_cmd_speed(cmd);
+ int cap = speed_duplex_to_caps(speed, cmd->duplex);
+ if (lc->supported & cap)
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ if (cmd->autoneg == AUTONEG_DISABLE) {
+ u32 speed = ethtool_cmd_speed(cmd);
+ int cap = speed_duplex_to_caps(speed, cmd->duplex);
+
+ if (!(lc->supported & cap) || (speed == SPEED_1000))
+ return -EINVAL;
+ lc->requested_speed = speed;
+ lc->requested_duplex = cmd->duplex;
+ lc->advertising = 0;
+ } else {
+ cmd->advertising &= ADVERTISED_MASK;
+ cmd->advertising &= lc->supported;
+ if (!cmd->advertising)
+ return -EINVAL;
+ lc->requested_speed = SPEED_INVALID;
+ lc->requested_duplex = DUPLEX_INVALID;
+ lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
+ }
+ lc->autoneg = cmd->autoneg;
+ if (netif_running(dev))
+ t3_link_start(&p->phy, &p->mac, lc);
+ return 0;
+}
+
+static void get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct port_info *p = netdev_priv(dev);
+
+ epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
+ epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
+ epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
+}
+
+static int set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct port_info *p = netdev_priv(dev);
+ struct link_config *lc = &p->link_config;
+
+ if (epause->autoneg == AUTONEG_DISABLE)
+ lc->requested_fc = 0;
+ else if (lc->supported & SUPPORTED_Autoneg)
+ lc->requested_fc = PAUSE_AUTONEG;
+ else
+ return -EINVAL;
+
+ if (epause->rx_pause)
+ lc->requested_fc |= PAUSE_RX;
+ if (epause->tx_pause)
+ lc->requested_fc |= PAUSE_TX;
+ if (lc->autoneg == AUTONEG_ENABLE) {
+ if (netif_running(dev))
+ t3_link_start(&p->phy, &p->mac, lc);
+ } else {
+ lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+ if (netif_running(dev))
+ t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
+ }
+ return 0;
+}
+
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
+
+ e->rx_max_pending = MAX_RX_BUFFERS;
+ e->rx_mini_max_pending = 0;
+ e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
+ e->tx_max_pending = MAX_TXQ_ENTRIES;
+
+ e->rx_pending = q->fl_size;
+ e->rx_mini_pending = q->rspq_size;
+ e->rx_jumbo_pending = q->jumbo_size;
+ e->tx_pending = q->txq_size[0];
+}
+
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ struct qset_params *q;
+ int i;
+
+ if (e->rx_pending > MAX_RX_BUFFERS ||
+ e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
+ e->tx_pending > MAX_TXQ_ENTRIES ||
+ e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
+ e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
+ e->rx_pending < MIN_FL_ENTRIES ||
+ e->rx_jumbo_pending < MIN_FL_ENTRIES ||
+ e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
+ return -EINVAL;
+
+ if (adapter->flags & FULL_INIT_DONE)
+ return -EBUSY;
+
+ q = &adapter->params.sge.qset[pi->first_qset];
+ for (i = 0; i < pi->nqsets; ++i, ++q) {
+ q->rspq_size = e->rx_mini_pending;
+ q->fl_size = e->rx_pending;
+ q->jumbo_size = e->rx_jumbo_pending;
+ q->txq_size[0] = e->tx_pending;
+ q->txq_size[1] = e->tx_pending;
+ q->txq_size[2] = e->tx_pending;
+ }
+ return 0;
+}
+
+static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ struct qset_params *qsp;
+ struct sge_qset *qs;
+ int i;
+
+ if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
+ return -EINVAL;
+
+ for (i = 0; i < pi->nqsets; i++) {
+ qsp = &adapter->params.sge.qset[i];
+ qs = &adapter->sge.qs[i];
+ qsp->coalesce_usecs = c->rx_coalesce_usecs;
+ t3_update_qset_coalesce(qs, qsp);
+ }
+
+ return 0;
+}
+
+static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ struct qset_params *q = adapter->params.sge.qset;
+
+ c->rx_coalesce_usecs = q->coalesce_usecs;
+ return 0;
+}
+
+static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
+ u8 * data)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ int i, err = 0;
+
+ u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ e->magic = EEPROM_MAGIC;
+ for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
+ err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
+
+ if (!err)
+ memcpy(data, buf + e->offset, e->len);
+ kfree(buf);
+ return err;
+}
+
+static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 * data)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ u32 aligned_offset, aligned_len;
+ __le32 *p;
+ u8 *buf;
+ int err;
+
+ if (eeprom->magic != EEPROM_MAGIC)
+ return -EINVAL;
+
+ aligned_offset = eeprom->offset & ~3;
+ aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
+
+ if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
+ buf = kmalloc(aligned_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
+ if (!err && aligned_len > 4)
+ err = t3_seeprom_read(adapter,
+ aligned_offset + aligned_len - 4,
+ (__le32 *) & buf[aligned_len - 4]);
+ if (err)
+ goto out;
+ memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
+ } else
+ buf = data;
+
+ err = t3_seeprom_wp(adapter, 0);
+ if (err)
+ goto out;
+
+ for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
+ err = t3_seeprom_write(adapter, aligned_offset, *p);
+ aligned_offset += 4;
+ }
+
+ if (!err)
+ err = t3_seeprom_wp(adapter, 1);
+out:
+ if (buf != data)
+ kfree(buf);
+ return err;
+}
+
+static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static const struct ethtool_ops cxgb_ethtool_ops = {
+ .get_settings = get_settings,
+ .set_settings = set_settings,
+ .get_drvinfo = get_drvinfo,
+ .get_msglevel = get_msglevel,
+ .set_msglevel = set_msglevel,
+ .get_ringparam = get_sge_param,
+ .set_ringparam = set_sge_param,
+ .get_coalesce = get_coalesce,
+ .set_coalesce = set_coalesce,
+ .get_eeprom_len = get_eeprom_len,
+ .get_eeprom = get_eeprom,
+ .set_eeprom = set_eeprom,
+ .get_pauseparam = get_pauseparam,
+ .set_pauseparam = set_pauseparam,
+ .get_link = ethtool_op_get_link,
+ .get_strings = get_strings,
+ .set_phys_id = set_phys_id,
+ .nway_reset = restart_autoneg,
+ .get_sset_count = get_sset_count,
+ .get_ethtool_stats = get_stats,
+ .get_regs_len = get_regs_len,
+ .get_regs = get_regs,
+ .get_wol = get_wol,
+};
+
+static int in_range(int val, int lo, int hi)
+{
+ return val < 0 || (val <= hi && val >= lo);
+}
+
+static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ u32 cmd;
+ int ret;
+
+ if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case CHELSIO_SET_QSET_PARAMS:{
+ int i;
+ struct qset_params *q;
+ struct ch_qset_params t;
+ int q1 = pi->first_qset;
+ int nqsets = pi->nqsets;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&t, useraddr, sizeof(t)))
+ return -EFAULT;
+ if (t.qset_idx >= SGE_QSETS)
+ return -EINVAL;
+ if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
+ !in_range(t.cong_thres, 0, 255) ||
+ !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
+ MAX_TXQ_ENTRIES) ||
+ !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
+ MAX_TXQ_ENTRIES) ||
+ !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
+ MAX_CTRL_TXQ_ENTRIES) ||
+ !in_range(t.fl_size[0], MIN_FL_ENTRIES,
+ MAX_RX_BUFFERS) ||
+ !in_range(t.fl_size[1], MIN_FL_ENTRIES,
+ MAX_RX_JUMBO_BUFFERS) ||
+ !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
+ MAX_RSPQ_ENTRIES))
+ return -EINVAL;
+
+ if ((adapter->flags & FULL_INIT_DONE) &&
+ (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
+ t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
+ t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
+ t.polling >= 0 || t.cong_thres >= 0))
+ return -EBUSY;
+
+ /* Allow setting of any available qset when offload enabled */
+ if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
+ q1 = 0;
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ nqsets += pi->first_qset + pi->nqsets;
+ }
+ }
+
+ if (t.qset_idx < q1)
+ return -EINVAL;
+ if (t.qset_idx > q1 + nqsets - 1)
+ return -EINVAL;
+
+ q = &adapter->params.sge.qset[t.qset_idx];
+
+ if (t.rspq_size >= 0)
+ q->rspq_size = t.rspq_size;
+ if (t.fl_size[0] >= 0)
+ q->fl_size = t.fl_size[0];
+ if (t.fl_size[1] >= 0)
+ q->jumbo_size = t.fl_size[1];
+ if (t.txq_size[0] >= 0)
+ q->txq_size[0] = t.txq_size[0];
+ if (t.txq_size[1] >= 0)
+ q->txq_size[1] = t.txq_size[1];
+ if (t.txq_size[2] >= 0)
+ q->txq_size[2] = t.txq_size[2];
+ if (t.cong_thres >= 0)
+ q->cong_thres = t.cong_thres;
+ if (t.intr_lat >= 0) {
+ struct sge_qset *qs =
+ &adapter->sge.qs[t.qset_idx];
+
+ q->coalesce_usecs = t.intr_lat;
+ t3_update_qset_coalesce(qs, q);
+ }
+ if (t.polling >= 0) {
+ if (adapter->flags & USING_MSIX)
+ q->polling = t.polling;
+ else {
+ /* No polling with INTx for T3A */
+ if (adapter->params.rev == 0 &&
+ !(adapter->flags & USING_MSI))
+ t.polling = 0;
+
+ for (i = 0; i < SGE_QSETS; i++) {
+ q = &adapter->params.sge.
+ qset[i];
+ q->polling = t.polling;
+ }
+ }
+ }
+
+ if (t.lro >= 0) {
+ if (t.lro)
+ dev->wanted_features |= NETIF_F_GRO;
+ else
+ dev->wanted_features &= ~NETIF_F_GRO;
+ netdev_update_features(dev);
+ }
+
+ break;
+ }
+ case CHELSIO_GET_QSET_PARAMS:{
+ struct qset_params *q;
+ struct ch_qset_params t;
+ int q1 = pi->first_qset;
+ int nqsets = pi->nqsets;
+ int i;
+
+ if (copy_from_user(&t, useraddr, sizeof(t)))
+ return -EFAULT;
+
+ /* Display qsets for all ports when offload enabled */
+ if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
+ q1 = 0;
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ nqsets = pi->first_qset + pi->nqsets;
+ }
+ }
+
+ if (t.qset_idx >= nqsets)
+ return -EINVAL;
+
+ q = &adapter->params.sge.qset[q1 + t.qset_idx];
+ t.rspq_size = q->rspq_size;
+ t.txq_size[0] = q->txq_size[0];
+ t.txq_size[1] = q->txq_size[1];
+ t.txq_size[2] = q->txq_size[2];
+ t.fl_size[0] = q->fl_size;
+ t.fl_size[1] = q->jumbo_size;
+ t.polling = q->polling;
+ t.lro = !!(dev->features & NETIF_F_GRO);
+ t.intr_lat = q->coalesce_usecs;
+ t.cong_thres = q->cong_thres;
+ t.qnum = q1;
+
+ if (adapter->flags & USING_MSIX)
+ t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
+ else
+ t.vector = adapter->pdev->irq;
+
+ if (copy_to_user(useraddr, &t, sizeof(t)))
+ return -EFAULT;
+ break;
+ }
+ case CHELSIO_SET_QSET_NUM:{
+ struct ch_reg edata;
+ unsigned int i, first_qset = 0, other_qsets = 0;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (adapter->flags & FULL_INIT_DONE)
+ return -EBUSY;
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+ if (edata.val < 1 ||
+ (edata.val > 1 && !(adapter->flags & USING_MSIX)))
+ return -EINVAL;
+
+ for_each_port(adapter, i)
+ if (adapter->port[i] && adapter->port[i] != dev)
+ other_qsets += adap2pinfo(adapter, i)->nqsets;
+
+ if (edata.val + other_qsets > SGE_QSETS)
+ return -EINVAL;
+
+ pi->nqsets = edata.val;
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]) {
+ pi = adap2pinfo(adapter, i);
+ pi->first_qset = first_qset;
+ first_qset += pi->nqsets;
+ }
+ break;
+ }
+ case CHELSIO_GET_QSET_NUM:{
+ struct ch_reg edata;
+
+ memset(&edata, 0, sizeof(struct ch_reg));
+
+ edata.cmd = CHELSIO_GET_QSET_NUM;
+ edata.val = pi->nqsets;
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ break;
+ }
+ case CHELSIO_LOAD_FW:{
+ u8 *fw_data;
+ struct ch_mem_range t;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ if (copy_from_user(&t, useraddr, sizeof(t)))
+ return -EFAULT;
+ /* Check t.len sanity ? */
+ fw_data = memdup_user(useraddr + sizeof(t), t.len);
+ if (IS_ERR(fw_data))
+ return PTR_ERR(fw_data);
+
+ ret = t3_load_fw(adapter, fw_data, t.len);
+ kfree(fw_data);
+ if (ret)
+ return ret;
+ break;
+ }
+ case CHELSIO_SETMTUTAB:{
+ struct ch_mtus m;
+ int i;
+
+ if (!is_offload(adapter))
+ return -EOPNOTSUPP;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (offload_running(adapter))
+ return -EBUSY;
+ if (copy_from_user(&m, useraddr, sizeof(m)))
+ return -EFAULT;
+ if (m.nmtus != NMTUS)
+ return -EINVAL;
+ if (m.mtus[0] < 81) /* accommodate SACK */
+ return -EINVAL;
+
+ /* MTUs must be in ascending order */
+ for (i = 1; i < NMTUS; ++i)
+ if (m.mtus[i] < m.mtus[i - 1])
+ return -EINVAL;
+
+ memcpy(adapter->params.mtus, m.mtus,
+ sizeof(adapter->params.mtus));
+ break;
+ }
+ case CHELSIO_GET_PM:{
+ struct tp_params *p = &adapter->params.tp;
+ struct ch_pm m = {.cmd = CHELSIO_GET_PM };
+
+ if (!is_offload(adapter))
+ return -EOPNOTSUPP;
+ m.tx_pg_sz = p->tx_pg_size;
+ m.tx_num_pg = p->tx_num_pgs;
+ m.rx_pg_sz = p->rx_pg_size;
+ m.rx_num_pg = p->rx_num_pgs;
+ m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
+ if (copy_to_user(useraddr, &m, sizeof(m)))
+ return -EFAULT;
+ break;
+ }
+ case CHELSIO_SET_PM:{
+ struct ch_pm m;
+ struct tp_params *p = &adapter->params.tp;
+
+ if (!is_offload(adapter))
+ return -EOPNOTSUPP;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (adapter->flags & FULL_INIT_DONE)
+ return -EBUSY;
+ if (copy_from_user(&m, useraddr, sizeof(m)))
+ return -EFAULT;
+ if (!is_power_of_2(m.rx_pg_sz) ||
+ !is_power_of_2(m.tx_pg_sz))
+ return -EINVAL; /* not power of 2 */
+ if (!(m.rx_pg_sz & 0x14000))
+ return -EINVAL; /* not 16KB or 64KB */
+ if (!(m.tx_pg_sz & 0x1554000))
+ return -EINVAL;
+ if (m.tx_num_pg == -1)
+ m.tx_num_pg = p->tx_num_pgs;
+ if (m.rx_num_pg == -1)
+ m.rx_num_pg = p->rx_num_pgs;
+ if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
+ return -EINVAL;
+ if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
+ m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
+ return -EINVAL;
+ p->rx_pg_size = m.rx_pg_sz;
+ p->tx_pg_size = m.tx_pg_sz;
+ p->rx_num_pgs = m.rx_num_pg;
+ p->tx_num_pgs = m.tx_num_pg;
+ break;
+ }
+ case CHELSIO_GET_MEM:{
+ struct ch_mem_range t;
+ struct mc7 *mem;
+ u64 buf[32];
+
+ if (!is_offload(adapter))
+ return -EOPNOTSUPP;
+ if (!(adapter->flags & FULL_INIT_DONE))
+ return -EIO; /* need the memory controllers */
+ if (copy_from_user(&t, useraddr, sizeof(t)))
+ return -EFAULT;
+ if ((t.addr & 7) || (t.len & 7))
+ return -EINVAL;
+ if (t.mem_id == MEM_CM)
+ mem = &adapter->cm;
+ else if (t.mem_id == MEM_PMRX)
+ mem = &adapter->pmrx;
+ else if (t.mem_id == MEM_PMTX)
+ mem = &adapter->pmtx;
+ else
+ return -EINVAL;
+
+ /*
+ * Version scheme:
+ * bits 0..9: chip version
+ * bits 10..15: chip revision
+ */
+ t.version = 3 | (adapter->params.rev << 10);
+ if (copy_to_user(useraddr, &t, sizeof(t)))
+ return -EFAULT;
+
+ /*
+ * Read 256 bytes at a time as len can be large and we don't
+ * want to use huge intermediate buffers.
+ */
+ useraddr += sizeof(t); /* advance to start of buffer */
+ while (t.len) {
+ unsigned int chunk =
+ min_t(unsigned int, t.len, sizeof(buf));
+
+ ret =
+ t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
+ buf);
+ if (ret)
+ return ret;
+ if (copy_to_user(useraddr, buf, chunk))
+ return -EFAULT;
+ useraddr += chunk;
+ t.addr += chunk;
+ t.len -= chunk;
+ }
+ break;
+ }
+ case CHELSIO_SET_TRACE_FILTER:{
+ struct ch_trace t;
+ const struct trace_params *tp;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (!offload_running(adapter))
+ return -EAGAIN;
+ if (copy_from_user(&t, useraddr, sizeof(t)))
+ return -EFAULT;
+
+ tp = (const struct trace_params *)&t.sip;
+ if (t.config_tx)
+ t3_config_trace_filter(adapter, tp, 0,
+ t.invert_match,
+ t.trace_tx);
+ if (t.config_rx)
+ t3_config_trace_filter(adapter, tp, 1,
+ t.invert_match,
+ t.trace_rx);
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(req);
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ switch (cmd) {
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ /* Convert phy_id from older PRTAD/DEVAD format */
+ if (is_10G(adapter) &&
+ !mdio_phy_id_is_c45(data->phy_id) &&
+ (data->phy_id & 0x1f00) &&
+ !(data->phy_id & 0xe0e0))
+ data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
+ data->phy_id & 0x1f);
+ /* FALLTHRU */
+ case SIOCGMIIPHY:
+ return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
+ case SIOCCHIOCTL:
+ return cxgb_extension_ioctl(dev, req->ifr_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ int ret;
+
+ if (new_mtu < 81) /* accommodate SACK */
+ return -EINVAL;
+ if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
+ return ret;
+ dev->mtu = new_mtu;
+ init_port_mtus(adapter);
+ if (adapter->params.rev == 0 && offload_running(adapter))
+ t3_load_mtus(adapter, adapter->params.mtus,
+ adapter->params.a_wnd, adapter->params.b_wnd,
+ adapter->port[0]->mtu);
+ return 0;
+}
+
+static int cxgb_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
+ if (offload_running(adapter))
+ write_smt_entry(adapter, pi->port_id);
+ return 0;
+}
+
+/**
+ * t3_synchronize_rx - wait for current Rx processing on a port to complete
+ * @adap: the adapter
+ * @p: the port
+ *
+ * Ensures that current Rx processing on any of the queues associated with
+ * the given port completes before returning. We do this by acquiring and
+ * releasing the locks of the response queues associated with the port.
+ */
+static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
+{
+ int i;
+
+ for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
+ struct sge_rspq *q = &adap->sge.qs[i].rspq;
+
+ spin_lock_irq(&q->lock);
+ spin_unlock_irq(&q->lock);
+ }
+}
+
+static void cxgb_vlan_mode(struct net_device *dev, u32 features)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ if (adapter->params.rev > 0) {
+ t3_set_vlan_accel(adapter, 1 << pi->port_id,
+ features & NETIF_F_HW_VLAN_RX);
+ } else {
+ /* single control for all ports */
+ unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX;
+
+ for_each_port(adapter, i)
+ have_vlans |=
+ adapter->port[i]->features & NETIF_F_HW_VLAN_RX;
+
+ t3_set_vlan_accel(adapter, 1, have_vlans);
+ }
+ t3_synchronize_rx(adapter, pi);
+}
+
+static u32 cxgb_fix_features(struct net_device *dev, u32 features)
+{
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_RX)
+ features |= NETIF_F_HW_VLAN_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_TX;
+
+ return features;
+}
+
+static int cxgb_set_features(struct net_device *dev, u32 features)
+{
+ u32 changed = dev->features ^ features;
+
+ if (changed & NETIF_F_HW_VLAN_RX)
+ cxgb_vlan_mode(dev, features);
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cxgb_netpoll(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ int qidx;
+
+ for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
+ struct sge_qset *qs = &adapter->sge.qs[qidx];
+ void *source;
+
+ if (adapter->flags & USING_MSIX)
+ source = qs;
+ else
+ source = adapter;
+
+ t3_intr_handler(adapter, qs->rspq.polling) (0, source);
+ }
+}
+#endif
+
+/*
+ * Periodic accumulation of MAC statistics.
+ */
+static void mac_stats_update(struct adapter *adapter)
+{
+ int i;
+
+ for_each_port(adapter, i) {
+ struct net_device *dev = adapter->port[i];
+ struct port_info *p = netdev_priv(dev);
+
+ if (netif_running(dev)) {
+ spin_lock(&adapter->stats_lock);
+ t3_mac_update_stats(&p->mac);
+ spin_unlock(&adapter->stats_lock);
+ }
+ }
+}
+
+static void check_link_status(struct adapter *adapter)
+{
+ int i;
+
+ for_each_port(adapter, i) {
+ struct net_device *dev = adapter->port[i];
+ struct port_info *p = netdev_priv(dev);
+ int link_fault;
+
+ spin_lock_irq(&adapter->work_lock);
+ link_fault = p->link_fault;
+ spin_unlock_irq(&adapter->work_lock);
+
+ if (link_fault) {
+ t3_link_fault(adapter, i);
+ continue;
+ }
+
+ if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
+ t3_xgm_intr_disable(adapter, i);
+ t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
+
+ t3_link_changed(adapter, i);
+ t3_xgm_intr_enable(adapter, i);
+ }
+ }
+}
+
+static void check_t3b2_mac(struct adapter *adapter)
+{
+ int i;
+
+ if (!rtnl_trylock()) /* synchronize with ifdown */
+ return;
+
+ for_each_port(adapter, i) {
+ struct net_device *dev = adapter->port[i];
+ struct port_info *p = netdev_priv(dev);
+ int status;
+
+ if (!netif_running(dev))
+ continue;
+
+ status = 0;
+ if (netif_running(dev) && netif_carrier_ok(dev))
+ status = t3b2_mac_watchdog_task(&p->mac);
+ if (status == 1)
+ p->mac.stats.num_toggled++;
+ else if (status == 2) {
+ struct cmac *mac = &p->mac;
+
+ t3_mac_set_mtu(mac, dev->mtu);
+ t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
+ cxgb_set_rxmode(dev);
+ t3_link_start(&p->phy, mac, &p->link_config);
+ t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+ t3_port_intr_enable(adapter, p->port_id);
+ p->mac.stats.num_resets++;
+ }
+ }
+ rtnl_unlock();
+}
+
+
+static void t3_adap_check_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ adap_check_task.work);
+ const struct adapter_params *p = &adapter->params;
+ int port;
+ unsigned int v, status, reset;
+
+ adapter->check_task_cnt++;
+
+ check_link_status(adapter);
+
+ /* Accumulate MAC stats if needed */
+ if (!p->linkpoll_period ||
+ (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
+ p->stats_update_period) {
+ mac_stats_update(adapter);
+ adapter->check_task_cnt = 0;
+ }
+
+ if (p->rev == T3_REV_B2)
+ check_t3b2_mac(adapter);
+
+ /*
+ * Scan the XGMAC's to check for various conditions which we want to
+ * monitor in a periodic polling manner rather than via an interrupt
+ * condition. This is used for conditions which would otherwise flood
+ * the system with interrupts and we only really need to know that the
+ * conditions are "happening" ... For each condition we count the
+ * detection of the condition and reset it for the next polling loop.
+ */
+ for_each_port(adapter, port) {
+ struct cmac *mac = &adap2pinfo(adapter, port)->mac;
+ u32 cause;
+
+ cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
+ reset = 0;
+ if (cause & F_RXFIFO_OVERFLOW) {
+ mac->stats.rx_fifo_ovfl++;
+ reset |= F_RXFIFO_OVERFLOW;
+ }
+
+ t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
+ }
+
+ /*
+ * We do the same as above for FL_EMPTY interrupts.
+ */
+ status = t3_read_reg(adapter, A_SG_INT_CAUSE);
+ reset = 0;
+
+ if (status & F_FLEMPTY) {
+ struct sge_qset *qs = &adapter->sge.qs[0];
+ int i = 0;
+
+ reset |= F_FLEMPTY;
+
+ v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
+ 0xffff;
+
+ while (v) {
+ qs->fl[i].empty += (v & 1);
+ if (i)
+ qs++;
+ i ^= 1;
+ v >>= 1;
+ }
+ }
+
+ t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
+
+ /* Schedule the next check update if any port is active. */
+ spin_lock_irq(&adapter->work_lock);
+ if (adapter->open_device_map & PORT_MASK)
+ schedule_chk_task(adapter);
+ spin_unlock_irq(&adapter->work_lock);
+}
+
+static void db_full_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ db_full_task);
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
+}
+
+static void db_empty_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ db_empty_task);
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
+}
+
+static void db_drop_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ db_drop_task);
+ unsigned long delay = 1000;
+ unsigned short r;
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
+
+ /*
+ * Sleep a while before ringing the driver qset dbs.
+ * The delay is between 1000-2023 usecs.
+ */
+ get_random_bytes(&r, 2);
+ delay += r & 1023;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(delay));
+ ring_dbs(adapter);
+}
+
+/*
+ * Processes external (PHY) interrupts in process context.
+ */
+static void ext_intr_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ ext_intr_handler_task);
+ int i;
+
+ /* Disable link fault interrupts */
+ for_each_port(adapter, i) {
+ struct net_device *dev = adapter->port[i];
+ struct port_info *p = netdev_priv(dev);
+
+ t3_xgm_intr_disable(adapter, i);
+ t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
+ }
+
+ /* Re-enable link fault interrupts */
+ t3_phy_intr_handler(adapter);
+
+ for_each_port(adapter, i)
+ t3_xgm_intr_enable(adapter, i);
+
+ /* Now reenable external interrupts */
+ spin_lock_irq(&adapter->work_lock);
+ if (adapter->slow_intr_mask) {
+ adapter->slow_intr_mask |= F_T3DBG;
+ t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
+ t3_write_reg(adapter, A_PL_INT_ENABLE0,
+ adapter->slow_intr_mask);
+ }
+ spin_unlock_irq(&adapter->work_lock);
+}
+
+/*
+ * Interrupt-context handler for external (PHY) interrupts.
+ */
+void t3_os_ext_intr_handler(struct adapter *adapter)
+{
+ /*
+ * Schedule a task to handle external interrupts as they may be slow
+ * and we use a mutex to protect MDIO registers. We disable PHY
+ * interrupts in the meantime and let the task reenable them when
+ * it's done.
+ */
+ spin_lock(&adapter->work_lock);
+ if (adapter->slow_intr_mask) {
+ adapter->slow_intr_mask &= ~F_T3DBG;
+ t3_write_reg(adapter, A_PL_INT_ENABLE0,
+ adapter->slow_intr_mask);
+ queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
+ }
+ spin_unlock(&adapter->work_lock);
+}
+
+void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
+{
+ struct net_device *netdev = adapter->port[port_id];
+ struct port_info *pi = netdev_priv(netdev);
+
+ spin_lock(&adapter->work_lock);
+ pi->link_fault = 1;
+ spin_unlock(&adapter->work_lock);
+}
+
+static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
+{
+ int i, ret = 0;
+
+ if (is_offload(adapter) &&
+ test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
+ offload_close(&adapter->tdev);
+ }
+
+ /* Stop all ports */
+ for_each_port(adapter, i) {
+ struct net_device *netdev = adapter->port[i];
+
+ if (netif_running(netdev))
+ __cxgb_close(netdev, on_wq);
+ }
+
+ /* Stop SGE timers */
+ t3_stop_sge_timers(adapter);
+
+ adapter->flags &= ~FULL_INIT_DONE;
+
+ if (reset)
+ ret = t3_reset_adapter(adapter);
+
+ pci_disable_device(adapter->pdev);
+
+ return ret;
+}
+
+static int t3_reenable_adapter(struct adapter *adapter)
+{
+ if (pci_enable_device(adapter->pdev)) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ goto err;
+ }
+ pci_set_master(adapter->pdev);
+ pci_restore_state(adapter->pdev);
+ pci_save_state(adapter->pdev);
+
+ /* Free sge resources */
+ t3_free_sge_resources(adapter);
+
+ if (t3_replay_prep_adapter(adapter))
+ goto err;
+
+ return 0;
+err:
+ return -1;
+}
+
+static void t3_resume_ports(struct adapter *adapter)
+{
+ int i;
+
+ /* Restart the ports */
+ for_each_port(adapter, i) {
+ struct net_device *netdev = adapter->port[i];
+
+ if (netif_running(netdev)) {
+ if (cxgb_open(netdev)) {
+ dev_err(&adapter->pdev->dev,
+ "can't bring device back up"
+ " after reset\n");
+ continue;
+ }
+ }
+ }
+
+ if (is_offload(adapter) && !ofld_disable)
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
+}
+
+/*
+ * processes a fatal error.
+ * Bring the ports down, reset the chip, bring the ports back up.
+ */
+static void fatal_error_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ fatal_error_handler_task);
+ int err = 0;
+
+ rtnl_lock();
+ err = t3_adapter_error(adapter, 1, 1);
+ if (!err)
+ err = t3_reenable_adapter(adapter);
+ if (!err)
+ t3_resume_ports(adapter);
+
+ CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
+ rtnl_unlock();
+}
+
+void t3_fatal_err(struct adapter *adapter)
+{
+ unsigned int fw_status[4];
+
+ if (adapter->flags & FULL_INIT_DONE) {
+ t3_sge_stop(adapter);
+ t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
+ t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
+ t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
+ t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
+
+ spin_lock(&adapter->work_lock);
+ t3_intr_disable(adapter);
+ queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
+ spin_unlock(&adapter->work_lock);
+ }
+ CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
+ if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
+ CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
+ fw_status[0], fw_status[1],
+ fw_status[2], fw_status[3]);
+}
+
+/**
+ * t3_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ t3_adapter_error(adapter, 0, 0);
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * t3_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ */
+static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+
+ if (!t3_reenable_adapter(adapter))
+ return PCI_ERS_RESULT_RECOVERED;
+
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+/**
+ * t3_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation.
+ */
+static void t3_io_resume(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+
+ CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
+ t3_read_reg(adapter, A_PCIE_PEX_ERR));
+
+ t3_resume_ports(adapter);
+}
+
+static struct pci_error_handlers t3_err_handler = {
+ .error_detected = t3_io_error_detected,
+ .slot_reset = t3_io_slot_reset,
+ .resume = t3_io_resume,
+};
+
+/*
+ * Set the number of qsets based on the number of CPUs and the number of ports,
+ * not to exceed the number of available qsets, assuming there are enough qsets
+ * per port in HW.
+ */
+static void set_nqsets(struct adapter *adap)
+{
+ int i, j = 0;
+ int num_cpus = num_online_cpus();
+ int hwports = adap->params.nports;
+ int nqsets = adap->msix_nvectors - 1;
+
+ if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
+ if (hwports == 2 &&
+ (hwports * nqsets > SGE_QSETS ||
+ num_cpus >= nqsets / hwports))
+ nqsets /= hwports;
+ if (nqsets > num_cpus)
+ nqsets = num_cpus;
+ if (nqsets < 1 || hwports == 4)
+ nqsets = 1;
+ } else
+ nqsets = 1;
+
+ for_each_port(adap, i) {
+ struct port_info *pi = adap2pinfo(adap, i);
+
+ pi->first_qset = j;
+ pi->nqsets = nqsets;
+ j = pi->first_qset + nqsets;
+
+ dev_info(&adap->pdev->dev,
+ "Port %d using %d queue sets.\n", i, nqsets);
+ }
+}
+
+static int __devinit cxgb_enable_msix(struct adapter *adap)
+{
+ struct msix_entry entries[SGE_QSETS + 1];
+ int vectors;
+ int i, err;
+
+ vectors = ARRAY_SIZE(entries);
+ for (i = 0; i < vectors; ++i)
+ entries[i].entry = i;
+
+ while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
+ vectors = err;
+
+ if (err < 0)
+ pci_disable_msix(adap->pdev);
+
+ if (!err && vectors < (adap->params.nports + 1)) {
+ pci_disable_msix(adap->pdev);
+ err = -1;
+ }
+
+ if (!err) {
+ for (i = 0; i < vectors; ++i)
+ adap->msix_info[i].vec = entries[i].vector;
+ adap->msix_nvectors = vectors;
+ }
+
+ return err;
+}
+
+static void __devinit print_port_info(struct adapter *adap,
+ const struct adapter_info *ai)
+{
+ static const char *pci_variant[] = {
+ "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
+ };
+
+ int i;
+ char buf[80];
+
+ if (is_pcie(adap))
+ snprintf(buf, sizeof(buf), "%s x%d",
+ pci_variant[adap->params.pci.variant],
+ adap->params.pci.width);
+ else
+ snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
+ pci_variant[adap->params.pci.variant],
+ adap->params.pci.speed, adap->params.pci.width);
+
+ for_each_port(adap, i) {
+ struct net_device *dev = adap->port[i];
+ const struct port_info *pi = netdev_priv(dev);
+
+ if (!test_bit(i, &adap->registered_device_map))
+ continue;
+ printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
+ dev->name, ai->desc, pi->phy.desc,
+ is_offload(adap) ? "R" : "", adap->params.rev, buf,
+ (adap->flags & USING_MSIX) ? " MSI-X" :
+ (adap->flags & USING_MSI) ? " MSI" : "");
+ if (adap->name == dev->name && adap->params.vpd.mclk)
+ printk(KERN_INFO
+ "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
+ adap->name, t3_mc7_size(&adap->cm) >> 20,
+ t3_mc7_size(&adap->pmtx) >> 20,
+ t3_mc7_size(&adap->pmrx) >> 20,
+ adap->params.vpd.sn);
+ }
+}
+
+static const struct net_device_ops cxgb_netdev_ops = {
+ .ndo_open = cxgb_open,
+ .ndo_stop = cxgb_close,
+ .ndo_start_xmit = t3_eth_xmit,
+ .ndo_get_stats = cxgb_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = cxgb_set_rxmode,
+ .ndo_do_ioctl = cxgb_ioctl,
+ .ndo_change_mtu = cxgb_change_mtu,
+ .ndo_set_mac_address = cxgb_set_mac_addr,
+ .ndo_fix_features = cxgb_fix_features,
+ .ndo_set_features = cxgb_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cxgb_netpoll,
+#endif
+};
+
+static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+
+ memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
+ pi->iscsic.mac_addr[3] |= 0x80;
+}
+
+static int __devinit init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int version_printed;
+
+ int i, err, pci_using_dac = 0;
+ resource_size_t mmio_start, mmio_len;
+ const struct adapter_info *ai;
+ struct adapter *adapter = NULL;
+ struct port_info *pi;
+
+ if (!version_printed) {
+ printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
+ ++version_printed;
+ }
+
+ if (!cxgb3_wq) {
+ cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
+ if (!cxgb3_wq) {
+ printk(KERN_ERR DRV_NAME
+ ": cannot initialize work queue\n");
+ return -ENOMEM;
+ }
+ }
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "cannot enable PCI device\n");
+ goto out;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ /* Just info, some other driver may have claimed the device. */
+ dev_info(&pdev->dev, "cannot obtain PCI resources\n");
+ goto out_disable_device;
+ }
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ pci_using_dac = 1;
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
+ "coherent allocations\n");
+ goto out_release_regions;
+ }
+ } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
+ goto out_release_regions;
+ }
+
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+
+ mmio_start = pci_resource_start(pdev, 0);
+ mmio_len = pci_resource_len(pdev, 0);
+ ai = t3_get_adapter_info(ent->driver_data);
+
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter) {
+ err = -ENOMEM;
+ goto out_release_regions;
+ }
+
+ adapter->nofail_skb =
+ alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
+ if (!adapter->nofail_skb) {
+ dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+
+ adapter->regs = ioremap_nocache(mmio_start, mmio_len);
+ if (!adapter->regs) {
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+
+ adapter->pdev = pdev;
+ adapter->name = pci_name(pdev);
+ adapter->msg_enable = dflt_msg_enable;
+ adapter->mmio_len = mmio_len;
+
+ mutex_init(&adapter->mdio_lock);
+ spin_lock_init(&adapter->work_lock);
+ spin_lock_init(&adapter->stats_lock);
+
+ INIT_LIST_HEAD(&adapter->adapter_list);
+ INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
+ INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
+
+ INIT_WORK(&adapter->db_full_task, db_full_task);
+ INIT_WORK(&adapter->db_empty_task, db_empty_task);
+ INIT_WORK(&adapter->db_drop_task, db_drop_task);
+
+ INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
+
+ for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
+ struct net_device *netdev;
+
+ netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto out_free_dev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter->port[i] = netdev;
+ pi = netdev_priv(netdev);
+ pi->adapter = adapter;
+ pi->port_id = i;
+ netif_carrier_off(netdev);
+ netdev->irq = pdev->irq;
+ netdev->mem_start = mmio_start;
+ netdev->mem_end = mmio_start + mmio_len - 1;
+ netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX;
+ netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_TX;
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->netdev_ops = &cxgb_netdev_ops;
+ SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
+ }
+
+ pci_set_drvdata(pdev, adapter);
+ if (t3_prep_adapter(adapter, ai, 1) < 0) {
+ err = -ENODEV;
+ goto out_free_dev;
+ }
+
+ /*
+ * The card is now ready to go. If any errors occur during device
+ * registration we do not fail the whole card but rather proceed only
+ * with the ports we manage to register successfully. However we must
+ * register at least one net device.
+ */
+ for_each_port(adapter, i) {
+ err = register_netdev(adapter->port[i]);
+ if (err)
+ dev_warn(&pdev->dev,
+ "cannot register net device %s, skipping\n",
+ adapter->port[i]->name);
+ else {
+ /*
+ * Change the name we use for messages to the name of
+ * the first successfully registered interface.
+ */
+ if (!adapter->registered_device_map)
+ adapter->name = adapter->port[i]->name;
+
+ __set_bit(i, &adapter->registered_device_map);
+ }
+ }
+ if (!adapter->registered_device_map) {
+ dev_err(&pdev->dev, "could not register any net devices\n");
+ goto out_free_dev;
+ }
+
+ for_each_port(adapter, i)
+ cxgb3_init_iscsi_mac(adapter->port[i]);
+
+ /* Driver's ready. Reflect it on LEDs */
+ t3_led_ready(adapter);
+
+ if (is_offload(adapter)) {
+ __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
+ cxgb3_adapter_ofld(adapter);
+ }
+
+ /* See what interrupts we'll be using */
+ if (msi > 1 && cxgb_enable_msix(adapter) == 0)
+ adapter->flags |= USING_MSIX;
+ else if (msi > 0 && pci_enable_msi(pdev) == 0)
+ adapter->flags |= USING_MSI;
+
+ set_nqsets(adapter);
+
+ err = sysfs_create_group(&adapter->port[0]->dev.kobj,
+ &cxgb3_attr_group);
+
+ for_each_port(adapter, i)
+ cxgb_vlan_mode(adapter->port[i], adapter->port[i]->features);
+
+ print_port_info(adapter, ai);
+ return 0;
+
+out_free_dev:
+ iounmap(adapter->regs);
+ for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
+ if (adapter->port[i])
+ free_netdev(adapter->port[i]);
+
+out_free_adapter:
+ kfree(adapter);
+
+out_release_regions:
+ pci_release_regions(pdev);
+out_disable_device:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+out:
+ return err;
+}
+
+static void __devexit remove_one(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+
+ if (adapter) {
+ int i;
+
+ t3_sge_stop(adapter);
+ sysfs_remove_group(&adapter->port[0]->dev.kobj,
+ &cxgb3_attr_group);
+
+ if (is_offload(adapter)) {
+ cxgb3_adapter_unofld(adapter);
+ if (test_bit(OFFLOAD_DEVMAP_BIT,
+ &adapter->open_device_map))
+ offload_close(&adapter->tdev);
+ }
+
+ for_each_port(adapter, i)
+ if (test_bit(i, &adapter->registered_device_map))
+ unregister_netdev(adapter->port[i]);
+
+ t3_stop_sge_timers(adapter);
+ t3_free_sge_resources(adapter);
+ cxgb_disable_msi(adapter);
+
+ for_each_port(adapter, i)
+ if (adapter->port[i])
+ free_netdev(adapter->port[i]);
+
+ iounmap(adapter->regs);
+ if (adapter->nofail_skb)
+ kfree_skb(adapter->nofail_skb);
+ kfree(adapter);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+static struct pci_driver driver = {
+ .name = DRV_NAME,
+ .id_table = cxgb3_pci_tbl,
+ .probe = init_one,
+ .remove = __devexit_p(remove_one),
+ .err_handler = &t3_err_handler,
+};
+
+static int __init cxgb3_init_module(void)
+{
+ int ret;
+
+ cxgb3_offload_init();
+
+ ret = pci_register_driver(&driver);
+ return ret;
+}
+
+static void __exit cxgb3_cleanup_module(void)
+{
+ pci_unregister_driver(&driver);
+ if (cxgb3_wq)
+ destroy_workqueue(cxgb3_wq);
+}
+
+module_init(cxgb3_init_module);
+module_exit(cxgb3_cleanup_module);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
new file mode 100644
index 000000000000..da5a5d9b8aff
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -0,0 +1,1431 @@
+/*
+ * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <net/neighbour.h>
+#include <linux/notifier.h>
+#include <linux/atomic.h>
+#include <linux/proc_fs.h>
+#include <linux/if_vlan.h>
+#include <net/netevent.h>
+#include <linux/highmem.h>
+#include <linux/vmalloc.h>
+
+#include "common.h"
+#include "regs.h"
+#include "cxgb3_ioctl.h"
+#include "cxgb3_ctl_defs.h"
+#include "cxgb3_defs.h"
+#include "l2t.h"
+#include "firmware_exports.h"
+#include "cxgb3_offload.h"
+
+static LIST_HEAD(client_list);
+static LIST_HEAD(ofld_dev_list);
+static DEFINE_MUTEX(cxgb3_db_lock);
+
+static DEFINE_RWLOCK(adapter_list_lock);
+static LIST_HEAD(adapter_list);
+
+static const unsigned int MAX_ATIDS = 64 * 1024;
+static const unsigned int ATID_BASE = 0x10000;
+
+static void cxgb_neigh_update(struct neighbour *neigh);
+static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new);
+
+static inline int offload_activated(struct t3cdev *tdev)
+{
+ const struct adapter *adapter = tdev2adap(tdev);
+
+ return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
+}
+
+/**
+ * cxgb3_register_client - register an offload client
+ * @client: the client
+ *
+ * Add the client to the client list,
+ * and call backs the client for each activated offload device
+ */
+void cxgb3_register_client(struct cxgb3_client *client)
+{
+ struct t3cdev *tdev;
+
+ mutex_lock(&cxgb3_db_lock);
+ list_add_tail(&client->client_list, &client_list);
+
+ if (client->add) {
+ list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
+ if (offload_activated(tdev))
+ client->add(tdev);
+ }
+ }
+ mutex_unlock(&cxgb3_db_lock);
+}
+
+EXPORT_SYMBOL(cxgb3_register_client);
+
+/**
+ * cxgb3_unregister_client - unregister an offload client
+ * @client: the client
+ *
+ * Remove the client to the client list,
+ * and call backs the client for each activated offload device.
+ */
+void cxgb3_unregister_client(struct cxgb3_client *client)
+{
+ struct t3cdev *tdev;
+
+ mutex_lock(&cxgb3_db_lock);
+ list_del(&client->client_list);
+
+ if (client->remove) {
+ list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
+ if (offload_activated(tdev))
+ client->remove(tdev);
+ }
+ }
+ mutex_unlock(&cxgb3_db_lock);
+}
+
+EXPORT_SYMBOL(cxgb3_unregister_client);
+
+/**
+ * cxgb3_add_clients - activate registered clients for an offload device
+ * @tdev: the offload device
+ *
+ * Call backs all registered clients once a offload device is activated
+ */
+void cxgb3_add_clients(struct t3cdev *tdev)
+{
+ struct cxgb3_client *client;
+
+ mutex_lock(&cxgb3_db_lock);
+ list_for_each_entry(client, &client_list, client_list) {
+ if (client->add)
+ client->add(tdev);
+ }
+ mutex_unlock(&cxgb3_db_lock);
+}
+
+/**
+ * cxgb3_remove_clients - deactivates registered clients
+ * for an offload device
+ * @tdev: the offload device
+ *
+ * Call backs all registered clients once a offload device is deactivated
+ */
+void cxgb3_remove_clients(struct t3cdev *tdev)
+{
+ struct cxgb3_client *client;
+
+ mutex_lock(&cxgb3_db_lock);
+ list_for_each_entry(client, &client_list, client_list) {
+ if (client->remove)
+ client->remove(tdev);
+ }
+ mutex_unlock(&cxgb3_db_lock);
+}
+
+void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port)
+{
+ struct cxgb3_client *client;
+
+ mutex_lock(&cxgb3_db_lock);
+ list_for_each_entry(client, &client_list, client_list) {
+ if (client->event_handler)
+ client->event_handler(tdev, event, port);
+ }
+ mutex_unlock(&cxgb3_db_lock);
+}
+
+static struct net_device *get_iff_from_mac(struct adapter *adapter,
+ const unsigned char *mac,
+ unsigned int vlan)
+{
+ int i;
+
+ for_each_port(adapter, i) {
+ struct net_device *dev = adapter->port[i];
+
+ if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
+ if (vlan && vlan != VLAN_VID_MASK) {
+ rcu_read_lock();
+ dev = __vlan_find_dev_deep(dev, vlan);
+ rcu_read_unlock();
+ } else if (netif_is_bond_slave(dev)) {
+ while (dev->master)
+ dev = dev->master;
+ }
+ return dev;
+ }
+ }
+ return NULL;
+}
+
+static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
+ void *data)
+{
+ int i;
+ int ret = 0;
+ unsigned int val = 0;
+ struct ulp_iscsi_info *uiip = data;
+
+ switch (req) {
+ case ULP_ISCSI_GET_PARAMS:
+ uiip->pdev = adapter->pdev;
+ uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
+ uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
+ uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
+
+ val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ);
+ for (i = 0; i < 4; i++, val >>= 8)
+ uiip->pgsz_factor[i] = val & 0xFF;
+
+ val = t3_read_reg(adapter, A_TP_PARA_REG7);
+ uiip->max_txsz =
+ uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0,
+ (val >> S_PMMAXXFERLEN1)&M_PMMAXXFERLEN1);
+ /*
+ * On tx, the iscsi pdu has to be <= tx page size and has to
+ * fit into the Tx PM FIFO.
+ */
+ val = min(adapter->params.tp.tx_pg_size,
+ t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
+ uiip->max_txsz = min(val, uiip->max_txsz);
+
+ /* set MaxRxData to 16224 */
+ val = t3_read_reg(adapter, A_TP_PARA_REG2);
+ if ((val >> S_MAXRXDATA) != 0x3f60) {
+ val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE);
+ val |= V_MAXRXDATA(0x3f60);
+ printk(KERN_INFO
+ "%s, iscsi set MaxRxData to 16224 (0x%x).\n",
+ adapter->name, val);
+ t3_write_reg(adapter, A_TP_PARA_REG2, val);
+ }
+
+ /*
+ * on rx, the iscsi pdu has to be < rx page size and the
+ * the max rx data length programmed in TP
+ */
+ val = min(adapter->params.tp.rx_pg_size,
+ ((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
+ S_MAXRXDATA) & M_MAXRXDATA);
+ uiip->max_rxsz = min(val, uiip->max_rxsz);
+ break;
+ case ULP_ISCSI_SET_PARAMS:
+ t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
+ /* program the ddp page sizes */
+ for (i = 0; i < 4; i++)
+ val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i);
+ if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) {
+ printk(KERN_INFO
+ "%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u.\n",
+ adapter->name, val, uiip->pgsz_factor[0],
+ uiip->pgsz_factor[1], uiip->pgsz_factor[2],
+ uiip->pgsz_factor[3]);
+ t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val);
+ }
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+/* Response queue used for RDMA events. */
+#define ASYNC_NOTIF_RSPQ 0
+
+static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
+{
+ int ret = 0;
+
+ switch (req) {
+ case RDMA_GET_PARAMS: {
+ struct rdma_info *rdma = data;
+ struct pci_dev *pdev = adapter->pdev;
+
+ rdma->udbell_physbase = pci_resource_start(pdev, 2);
+ rdma->udbell_len = pci_resource_len(pdev, 2);
+ rdma->tpt_base =
+ t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
+ rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
+ rdma->pbl_base =
+ t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
+ rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
+ rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
+ rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
+ rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL;
+ rdma->pdev = pdev;
+ break;
+ }
+ case RDMA_CQ_OP:{
+ unsigned long flags;
+ struct rdma_cq_op *rdma = data;
+
+ /* may be called in any context */
+ spin_lock_irqsave(&adapter->sge.reg_lock, flags);
+ ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op,
+ rdma->credits);
+ spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
+ break;
+ }
+ case RDMA_GET_MEM:{
+ struct ch_mem_range *t = data;
+ struct mc7 *mem;
+
+ if ((t->addr & 7) || (t->len & 7))
+ return -EINVAL;
+ if (t->mem_id == MEM_CM)
+ mem = &adapter->cm;
+ else if (t->mem_id == MEM_PMRX)
+ mem = &adapter->pmrx;
+ else if (t->mem_id == MEM_PMTX)
+ mem = &adapter->pmtx;
+ else
+ return -EINVAL;
+
+ ret =
+ t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
+ (u64 *) t->buf);
+ if (ret)
+ return ret;
+ break;
+ }
+ case RDMA_CQ_SETUP:{
+ struct rdma_cq_setup *rdma = data;
+
+ spin_lock_irq(&adapter->sge.reg_lock);
+ ret =
+ t3_sge_init_cqcntxt(adapter, rdma->id,
+ rdma->base_addr, rdma->size,
+ ASYNC_NOTIF_RSPQ,
+ rdma->ovfl_mode, rdma->credits,
+ rdma->credit_thres);
+ spin_unlock_irq(&adapter->sge.reg_lock);
+ break;
+ }
+ case RDMA_CQ_DISABLE:
+ spin_lock_irq(&adapter->sge.reg_lock);
+ ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
+ spin_unlock_irq(&adapter->sge.reg_lock);
+ break;
+ case RDMA_CTRL_QP_SETUP:{
+ struct rdma_ctrlqp_setup *rdma = data;
+
+ spin_lock_irq(&adapter->sge.reg_lock);
+ ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
+ SGE_CNTXT_RDMA,
+ ASYNC_NOTIF_RSPQ,
+ rdma->base_addr, rdma->size,
+ FW_RI_TID_START, 1, 0);
+ spin_unlock_irq(&adapter->sge.reg_lock);
+ break;
+ }
+ case RDMA_GET_MIB: {
+ spin_lock(&adapter->stats_lock);
+ t3_tp_get_mib_stats(adapter, (struct tp_mib_stats *)data);
+ spin_unlock(&adapter->stats_lock);
+ break;
+ }
+ default:
+ ret = -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
+{
+ struct adapter *adapter = tdev2adap(tdev);
+ struct tid_range *tid;
+ struct mtutab *mtup;
+ struct iff_mac *iffmacp;
+ struct ddp_params *ddpp;
+ struct adap_ports *ports;
+ struct ofld_page_info *rx_page_info;
+ struct tp_params *tp = &adapter->params.tp;
+ int i;
+
+ switch (req) {
+ case GET_MAX_OUTSTANDING_WR:
+ *(unsigned int *)data = FW_WR_NUM;
+ break;
+ case GET_WR_LEN:
+ *(unsigned int *)data = WR_FLITS;
+ break;
+ case GET_TX_MAX_CHUNK:
+ *(unsigned int *)data = 1 << 20; /* 1MB */
+ break;
+ case GET_TID_RANGE:
+ tid = data;
+ tid->num = t3_mc5_size(&adapter->mc5) -
+ adapter->params.mc5.nroutes -
+ adapter->params.mc5.nfilters - adapter->params.mc5.nservers;
+ tid->base = 0;
+ break;
+ case GET_STID_RANGE:
+ tid = data;
+ tid->num = adapter->params.mc5.nservers;
+ tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
+ adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;
+ break;
+ case GET_L2T_CAPACITY:
+ *(unsigned int *)data = 2048;
+ break;
+ case GET_MTUS:
+ mtup = data;
+ mtup->size = NMTUS;
+ mtup->mtus = adapter->params.mtus;
+ break;
+ case GET_IFF_FROM_MAC:
+ iffmacp = data;
+ iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
+ iffmacp->vlan_tag &
+ VLAN_VID_MASK);
+ break;
+ case GET_DDP_PARAMS:
+ ddpp = data;
+ ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
+ ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
+ ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
+ break;
+ case GET_PORTS:
+ ports = data;
+ ports->nports = adapter->params.nports;
+ for_each_port(adapter, i)
+ ports->lldevs[i] = adapter->port[i];
+ break;
+ case ULP_ISCSI_GET_PARAMS:
+ case ULP_ISCSI_SET_PARAMS:
+ if (!offload_running(adapter))
+ return -EAGAIN;
+ return cxgb_ulp_iscsi_ctl(adapter, req, data);
+ case RDMA_GET_PARAMS:
+ case RDMA_CQ_OP:
+ case RDMA_CQ_SETUP:
+ case RDMA_CQ_DISABLE:
+ case RDMA_CTRL_QP_SETUP:
+ case RDMA_GET_MEM:
+ case RDMA_GET_MIB:
+ if (!offload_running(adapter))
+ return -EAGAIN;
+ return cxgb_rdma_ctl(adapter, req, data);
+ case GET_RX_PAGE_INFO:
+ rx_page_info = data;
+ rx_page_info->page_size = tp->rx_pg_size;
+ rx_page_info->num = tp->rx_num_pgs;
+ break;
+ case GET_ISCSI_IPV4ADDR: {
+ struct iscsi_ipv4addr *p = data;
+ struct port_info *pi = netdev_priv(p->dev);
+ p->ipv4addr = pi->iscsi_ipv4addr;
+ break;
+ }
+ case GET_EMBEDDED_INFO: {
+ struct ch_embedded_info *e = data;
+
+ spin_lock(&adapter->stats_lock);
+ t3_get_fw_version(adapter, &e->fw_vers);
+ t3_get_tp_version(adapter, &e->tp_vers);
+ spin_unlock(&adapter->stats_lock);
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+/*
+ * Dummy handler for Rx offload packets in case we get an offload packet before
+ * proper processing is setup. This complains and drops the packet as it isn't
+ * normal to get offload packets at this stage.
+ */
+static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,
+ int n)
+{
+ while (n--)
+ dev_kfree_skb_any(skbs[n]);
+ return 0;
+}
+
+static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh)
+{
+}
+
+void cxgb3_set_dummy_ops(struct t3cdev *dev)
+{
+ dev->recv = rx_offload_blackhole;
+ dev->neigh_update = dummy_neigh_update;
+}
+
+/*
+ * Free an active-open TID.
+ */
+void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
+{
+ struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+ union active_open_entry *p = atid2entry(t, atid);
+ void *ctx = p->t3c_tid.ctx;
+
+ spin_lock_bh(&t->atid_lock);
+ p->next = t->afree;
+ t->afree = p;
+ t->atids_in_use--;
+ spin_unlock_bh(&t->atid_lock);
+
+ return ctx;
+}
+
+EXPORT_SYMBOL(cxgb3_free_atid);
+
+/*
+ * Free a server TID and return it to the free pool.
+ */
+void cxgb3_free_stid(struct t3cdev *tdev, int stid)
+{
+ struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+ union listen_entry *p = stid2entry(t, stid);
+
+ spin_lock_bh(&t->stid_lock);
+ p->next = t->sfree;
+ t->sfree = p;
+ t->stids_in_use--;
+ spin_unlock_bh(&t->stid_lock);
+}
+
+EXPORT_SYMBOL(cxgb3_free_stid);
+
+void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
+ void *ctx, unsigned int tid)
+{
+ struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+
+ t->tid_tab[tid].client = client;
+ t->tid_tab[tid].ctx = ctx;
+ atomic_inc(&t->tids_in_use);
+}
+
+EXPORT_SYMBOL(cxgb3_insert_tid);
+
+/*
+ * Populate a TID_RELEASE WR. The skb must be already propely sized.
+ */
+static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
+{
+ struct cpl_tid_release *req;
+
+ skb->priority = CPL_PRIORITY_SETUP;
+ req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
+}
+
+static void t3_process_tid_release_list(struct work_struct *work)
+{
+ struct t3c_data *td = container_of(work, struct t3c_data,
+ tid_release_task);
+ struct sk_buff *skb;
+ struct t3cdev *tdev = td->dev;
+
+
+ spin_lock_bh(&td->tid_release_lock);
+ while (td->tid_release_list) {
+ struct t3c_tid_entry *p = td->tid_release_list;
+
+ td->tid_release_list = p->ctx;
+ spin_unlock_bh(&td->tid_release_lock);
+
+ skb = alloc_skb(sizeof(struct cpl_tid_release),
+ GFP_KERNEL);
+ if (!skb)
+ skb = td->nofail_skb;
+ if (!skb) {
+ spin_lock_bh(&td->tid_release_lock);
+ p->ctx = (void *)td->tid_release_list;
+ td->tid_release_list = (struct t3c_tid_entry *)p;
+ break;
+ }
+ mk_tid_release(skb, p - td->tid_maps.tid_tab);
+ cxgb3_ofld_send(tdev, skb);
+ p->ctx = NULL;
+ if (skb == td->nofail_skb)
+ td->nofail_skb =
+ alloc_skb(sizeof(struct cpl_tid_release),
+ GFP_KERNEL);
+ spin_lock_bh(&td->tid_release_lock);
+ }
+ td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1;
+ spin_unlock_bh(&td->tid_release_lock);
+
+ if (!td->nofail_skb)
+ td->nofail_skb =
+ alloc_skb(sizeof(struct cpl_tid_release),
+ GFP_KERNEL);
+}
+
+/* use ctx as a next pointer in the tid release list */
+void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
+{
+ struct t3c_data *td = T3C_DATA(tdev);
+ struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
+
+ spin_lock_bh(&td->tid_release_lock);
+ p->ctx = (void *)td->tid_release_list;
+ p->client = NULL;
+ td->tid_release_list = p;
+ if (!p->ctx || td->release_list_incomplete)
+ schedule_work(&td->tid_release_task);
+ spin_unlock_bh(&td->tid_release_lock);
+}
+
+EXPORT_SYMBOL(cxgb3_queue_tid_release);
+
+/*
+ * Remove a tid from the TID table. A client may defer processing its last
+ * CPL message if it is locked at the time it arrives, and while the message
+ * sits in the client's backlog the TID may be reused for another connection.
+ * To handle this we atomically switch the TID association if it still points
+ * to the original client context.
+ */
+void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid)
+{
+ struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+
+ BUG_ON(tid >= t->ntids);
+ if (tdev->type == T3A)
+ (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);
+ else {
+ struct sk_buff *skb;
+
+ skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
+ if (likely(skb)) {
+ mk_tid_release(skb, tid);
+ cxgb3_ofld_send(tdev, skb);
+ t->tid_tab[tid].ctx = NULL;
+ } else
+ cxgb3_queue_tid_release(tdev, tid);
+ }
+ atomic_dec(&t->tids_in_use);
+}
+
+EXPORT_SYMBOL(cxgb3_remove_tid);
+
+int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
+ void *ctx)
+{
+ int atid = -1;
+ struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+
+ spin_lock_bh(&t->atid_lock);
+ if (t->afree &&
+ t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <=
+ t->ntids) {
+ union active_open_entry *p = t->afree;
+
+ atid = (p - t->atid_tab) + t->atid_base;
+ t->afree = p->next;
+ p->t3c_tid.ctx = ctx;
+ p->t3c_tid.client = client;
+ t->atids_in_use++;
+ }
+ spin_unlock_bh(&t->atid_lock);
+ return atid;
+}
+
+EXPORT_SYMBOL(cxgb3_alloc_atid);
+
+int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
+ void *ctx)
+{
+ int stid = -1;
+ struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+
+ spin_lock_bh(&t->stid_lock);
+ if (t->sfree) {
+ union listen_entry *p = t->sfree;
+
+ stid = (p - t->stid_tab) + t->stid_base;
+ t->sfree = p->next;
+ p->t3c_tid.ctx = ctx;
+ p->t3c_tid.client = client;
+ t->stids_in_use++;
+ }
+ spin_unlock_bh(&t->stid_lock);
+ return stid;
+}
+
+EXPORT_SYMBOL(cxgb3_alloc_stid);
+
+/* Get the t3cdev associated with a net_device */
+struct t3cdev *dev2t3cdev(struct net_device *dev)
+{
+ const struct port_info *pi = netdev_priv(dev);
+
+ return (struct t3cdev *)pi->adapter;
+}
+
+EXPORT_SYMBOL(dev2t3cdev);
+
+static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_smt_write_rpl *rpl = cplhdr(skb);
+
+ if (rpl->status != CPL_ERR_NONE)
+ printk(KERN_ERR
+ "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
+ rpl->status, GET_TID(rpl));
+
+ return CPL_RET_BUF_DONE;
+}
+
+static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
+
+ if (rpl->status != CPL_ERR_NONE)
+ printk(KERN_ERR
+ "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
+ rpl->status, GET_TID(rpl));
+
+ return CPL_RET_BUF_DONE;
+}
+
+static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_rte_write_rpl *rpl = cplhdr(skb);
+
+ if (rpl->status != CPL_ERR_NONE)
+ printk(KERN_ERR
+ "Unexpected RTE_WRITE_RPL status %u for entry %u\n",
+ rpl->status, GET_TID(rpl));
+
+ return CPL_RET_BUF_DONE;
+}
+
+static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_act_open_rpl *rpl = cplhdr(skb);
+ unsigned int atid = G_TID(ntohl(rpl->atid));
+ struct t3c_tid_entry *t3c_tid;
+
+ t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client &&
+ t3c_tid->client->handlers &&
+ t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
+ return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
+ t3c_tid->
+ ctx);
+ } else {
+ printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ dev->name, CPL_ACT_OPEN_RPL);
+ return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+ }
+}
+
+static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ union opcode_tid *p = cplhdr(skb);
+ unsigned int stid = G_TID(ntohl(p->opcode_tid));
+ struct t3c_tid_entry *t3c_tid;
+
+ t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+ t3c_tid->client->handlers[p->opcode]) {
+ return t3c_tid->client->handlers[p->opcode] (dev, skb,
+ t3c_tid->ctx);
+ } else {
+ printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ dev->name, p->opcode);
+ return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+ }
+}
+
+static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ union opcode_tid *p = cplhdr(skb);
+ unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
+ struct t3c_tid_entry *t3c_tid;
+
+ t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+ t3c_tid->client->handlers[p->opcode]) {
+ return t3c_tid->client->handlers[p->opcode]
+ (dev, skb, t3c_tid->ctx);
+ } else {
+ printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ dev->name, p->opcode);
+ return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+ }
+}
+
+static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_pass_accept_req *req = cplhdr(skb);
+ unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
+ struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
+ struct t3c_tid_entry *t3c_tid;
+ unsigned int tid = GET_TID(req);
+
+ if (unlikely(tid >= t->ntids)) {
+ printk("%s: passive open TID %u too large\n",
+ dev->name, tid);
+ t3_fatal_err(tdev2adap(dev));
+ return CPL_RET_BUF_DONE;
+ }
+
+ t3c_tid = lookup_stid(t, stid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+ t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
+ return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
+ (dev, skb, t3c_tid->ctx);
+ } else {
+ printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ dev->name, CPL_PASS_ACCEPT_REQ);
+ return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+ }
+}
+
+/*
+ * Returns an sk_buff for a reply CPL message of size len. If the input
+ * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
+ * is allocated. The input skb must be of size at least len. Note that this
+ * operation does not destroy the original skb data even if it decides to reuse
+ * the buffer.
+ */
+static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
+ gfp_t gfp)
+{
+ if (likely(!skb_cloned(skb))) {
+ BUG_ON(skb->len < len);
+ __skb_trim(skb, len);
+ skb_get(skb);
+ } else {
+ skb = alloc_skb(len, gfp);
+ if (skb)
+ __skb_put(skb, len);
+ }
+ return skb;
+}
+
+static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
+{
+ union opcode_tid *p = cplhdr(skb);
+ unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
+ struct t3c_tid_entry *t3c_tid;
+
+ t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+ t3c_tid->client->handlers[p->opcode]) {
+ return t3c_tid->client->handlers[p->opcode]
+ (dev, skb, t3c_tid->ctx);
+ } else {
+ struct cpl_abort_req_rss *req = cplhdr(skb);
+ struct cpl_abort_rpl *rpl;
+ struct sk_buff *reply_skb;
+ unsigned int tid = GET_TID(req);
+ u8 cmd = req->status;
+
+ if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
+ req->status == CPL_ERR_PERSIST_NEG_ADVICE)
+ goto out;
+
+ reply_skb = cxgb3_get_cpl_reply_skb(skb,
+ sizeof(struct
+ cpl_abort_rpl),
+ GFP_ATOMIC);
+
+ if (!reply_skb) {
+ printk("do_abort_req_rss: couldn't get skb!\n");
+ goto out;
+ }
+ reply_skb->priority = CPL_PRIORITY_DATA;
+ __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
+ rpl = cplhdr(reply_skb);
+ rpl->wr.wr_hi =
+ htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
+ rpl->wr.wr_lo = htonl(V_WR_TID(tid));
+ OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+ rpl->cmd = cmd;
+ cxgb3_ofld_send(dev, reply_skb);
+out:
+ return CPL_RET_BUF_DONE;
+ }
+}
+
+static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_act_establish *req = cplhdr(skb);
+ unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
+ struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
+ struct t3c_tid_entry *t3c_tid;
+ unsigned int tid = GET_TID(req);
+
+ if (unlikely(tid >= t->ntids)) {
+ printk("%s: active establish TID %u too large\n",
+ dev->name, tid);
+ t3_fatal_err(tdev2adap(dev));
+ return CPL_RET_BUF_DONE;
+ }
+
+ t3c_tid = lookup_atid(t, atid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+ t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
+ return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
+ (dev, skb, t3c_tid->ctx);
+ } else {
+ printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ dev->name, CPL_ACT_ESTABLISH);
+ return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+ }
+}
+
+static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_trace_pkt *p = cplhdr(skb);
+
+ skb->protocol = htons(0xffff);
+ skb->dev = dev->lldev;
+ skb_pull(skb, sizeof(*p));
+ skb_reset_mac_header(skb);
+ netif_receive_skb(skb);
+ return 0;
+}
+
+/*
+ * That skb would better have come from process_responses() where we abuse
+ * ->priority and ->csum to carry our data. NB: if we get to per-arch
+ * ->csum, the things might get really interesting here.
+ */
+
+static inline u32 get_hwtid(struct sk_buff *skb)
+{
+ return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff;
+}
+
+static inline u32 get_opcode(struct sk_buff *skb)
+{
+ return G_OPCODE(ntohl((__force __be32)skb->csum));
+}
+
+static int do_term(struct t3cdev *dev, struct sk_buff *skb)
+{
+ unsigned int hwtid = get_hwtid(skb);
+ unsigned int opcode = get_opcode(skb);
+ struct t3c_tid_entry *t3c_tid;
+
+ t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+ t3c_tid->client->handlers[opcode]) {
+ return t3c_tid->client->handlers[opcode] (dev, skb,
+ t3c_tid->ctx);
+ } else {
+ printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ dev->name, opcode);
+ return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+ }
+}
+
+static int nb_callback(struct notifier_block *self, unsigned long event,
+ void *ctx)
+{
+ switch (event) {
+ case (NETEVENT_NEIGH_UPDATE):{
+ cxgb_neigh_update((struct neighbour *)ctx);
+ break;
+ }
+ case (NETEVENT_REDIRECT):{
+ struct netevent_redirect *nr = ctx;
+ cxgb_redirect(nr->old, nr->new);
+ cxgb_neigh_update(dst_get_neighbour(nr->new));
+ break;
+ }
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block nb = {
+ .notifier_call = nb_callback
+};
+
+/*
+ * Process a received packet with an unknown/unexpected CPL opcode.
+ */
+static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name,
+ *skb->data);
+ return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+}
+
+/*
+ * Handlers for each CPL opcode
+ */
+static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
+
+/*
+ * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
+ * to unregister an existing handler.
+ */
+void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
+{
+ if (opcode < NUM_CPL_CMDS)
+ cpl_handlers[opcode] = h ? h : do_bad_cpl;
+ else
+ printk(KERN_ERR "T3C: handler registration for "
+ "opcode %x failed\n", opcode);
+}
+
+EXPORT_SYMBOL(t3_register_cpl_handler);
+
+/*
+ * T3CDEV's receive method.
+ */
+static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
+{
+ while (n--) {
+ struct sk_buff *skb = *skbs++;
+ unsigned int opcode = get_opcode(skb);
+ int ret = cpl_handlers[opcode] (dev, skb);
+
+#if VALIDATE_TID
+ if (ret & CPL_RET_UNKNOWN_TID) {
+ union opcode_tid *p = cplhdr(skb);
+
+ printk(KERN_ERR "%s: CPL message (opcode %u) had "
+ "unknown TID %u\n", dev->name, opcode,
+ G_TID(ntohl(p->opcode_tid)));
+ }
+#endif
+ if (ret & CPL_RET_BUF_DONE)
+ kfree_skb(skb);
+ }
+ return 0;
+}
+
+/*
+ * Sends an sk_buff to a T3C driver after dealing with any active network taps.
+ */
+int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
+{
+ int r;
+
+ local_bh_disable();
+ r = dev->send(dev, skb);
+ local_bh_enable();
+ return r;
+}
+
+EXPORT_SYMBOL(cxgb3_ofld_send);
+
+static int is_offloading(struct net_device *dev)
+{
+ struct adapter *adapter;
+ int i;
+
+ read_lock_bh(&adapter_list_lock);
+ list_for_each_entry(adapter, &adapter_list, adapter_list) {
+ for_each_port(adapter, i) {
+ if (dev == adapter->port[i]) {
+ read_unlock_bh(&adapter_list_lock);
+ return 1;
+ }
+ }
+ }
+ read_unlock_bh(&adapter_list_lock);
+ return 0;
+}
+
+static void cxgb_neigh_update(struct neighbour *neigh)
+{
+ struct net_device *dev = neigh->dev;
+
+ if (dev && (is_offloading(dev))) {
+ struct t3cdev *tdev = dev2t3cdev(dev);
+
+ BUG_ON(!tdev);
+ t3_l2t_update(tdev, neigh);
+ }
+}
+
+static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
+{
+ struct sk_buff *skb;
+ struct cpl_set_tcb_field *req;
+
+ skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_ERR "%s: cannot allocate skb!\n", __func__);
+ return;
+ }
+ skb->priority = CPL_PRIORITY_CONTROL;
+ req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
+ req->reply = 0;
+ req->cpu_idx = 0;
+ req->word = htons(W_TCB_L2T_IX);
+ req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX));
+ req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
+ tdev->send(tdev, skb);
+}
+
+static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
+{
+ struct net_device *olddev, *newdev;
+ struct tid_info *ti;
+ struct t3cdev *tdev;
+ u32 tid;
+ int update_tcb;
+ struct l2t_entry *e;
+ struct t3c_tid_entry *te;
+
+ olddev = dst_get_neighbour(old)->dev;
+ newdev = dst_get_neighbour(new)->dev;
+ if (!is_offloading(olddev))
+ return;
+ if (!is_offloading(newdev)) {
+ printk(KERN_WARNING "%s: Redirect to non-offload "
+ "device ignored.\n", __func__);
+ return;
+ }
+ tdev = dev2t3cdev(olddev);
+ BUG_ON(!tdev);
+ if (tdev != dev2t3cdev(newdev)) {
+ printk(KERN_WARNING "%s: Redirect to different "
+ "offload device ignored.\n", __func__);
+ return;
+ }
+
+ /* Add new L2T entry */
+ e = t3_l2t_get(tdev, dst_get_neighbour(new), newdev);
+ if (!e) {
+ printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
+ __func__);
+ return;
+ }
+
+ /* Walk tid table and notify clients of dst change. */
+ ti = &(T3C_DATA(tdev))->tid_maps;
+ for (tid = 0; tid < ti->ntids; tid++) {
+ te = lookup_tid(ti, tid);
+ BUG_ON(!te);
+ if (te && te->ctx && te->client && te->client->redirect) {
+ update_tcb = te->client->redirect(te->ctx, old, new, e);
+ if (update_tcb) {
+ rcu_read_lock();
+ l2t_hold(L2DATA(tdev), e);
+ rcu_read_unlock();
+ set_l2t_ix(tdev, tid, e);
+ }
+ }
+ }
+ l2t_release(tdev, e);
+}
+
+/*
+ * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
+ * The allocated memory is cleared.
+ */
+void *cxgb_alloc_mem(unsigned long size)
+{
+ void *p = kzalloc(size, GFP_KERNEL);
+
+ if (!p)
+ p = vzalloc(size);
+ return p;
+}
+
+/*
+ * Free memory allocated through t3_alloc_mem().
+ */
+void cxgb_free_mem(void *addr)
+{
+ if (is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+}
+
+/*
+ * Allocate and initialize the TID tables. Returns 0 on success.
+ */
+static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
+ unsigned int natids, unsigned int nstids,
+ unsigned int atid_base, unsigned int stid_base)
+{
+ unsigned long size = ntids * sizeof(*t->tid_tab) +
+ natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
+
+ t->tid_tab = cxgb_alloc_mem(size);
+ if (!t->tid_tab)
+ return -ENOMEM;
+
+ t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
+ t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
+ t->ntids = ntids;
+ t->nstids = nstids;
+ t->stid_base = stid_base;
+ t->sfree = NULL;
+ t->natids = natids;
+ t->atid_base = atid_base;
+ t->afree = NULL;
+ t->stids_in_use = t->atids_in_use = 0;
+ atomic_set(&t->tids_in_use, 0);
+ spin_lock_init(&t->stid_lock);
+ spin_lock_init(&t->atid_lock);
+
+ /*
+ * Setup the free lists for stid_tab and atid_tab.
+ */
+ if (nstids) {
+ while (--nstids)
+ t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
+ t->sfree = t->stid_tab;
+ }
+ if (natids) {
+ while (--natids)
+ t->atid_tab[natids - 1].next = &t->atid_tab[natids];
+ t->afree = t->atid_tab;
+ }
+ return 0;
+}
+
+static void free_tid_maps(struct tid_info *t)
+{
+ cxgb_free_mem(t->tid_tab);
+}
+
+static inline void add_adapter(struct adapter *adap)
+{
+ write_lock_bh(&adapter_list_lock);
+ list_add_tail(&adap->adapter_list, &adapter_list);
+ write_unlock_bh(&adapter_list_lock);
+}
+
+static inline void remove_adapter(struct adapter *adap)
+{
+ write_lock_bh(&adapter_list_lock);
+ list_del(&adap->adapter_list);
+ write_unlock_bh(&adapter_list_lock);
+}
+
+int cxgb3_offload_activate(struct adapter *adapter)
+{
+ struct t3cdev *dev = &adapter->tdev;
+ int natids, err;
+ struct t3c_data *t;
+ struct tid_range stid_range, tid_range;
+ struct mtutab mtutab;
+ unsigned int l2t_capacity;
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ return -ENOMEM;
+
+ err = -EOPNOTSUPP;
+ if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
+ dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
+ dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
+ dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
+ dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
+ dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
+ goto out_free;
+
+ err = -ENOMEM;
+ RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity));
+ if (!L2DATA(dev))
+ goto out_free;
+
+ natids = min(tid_range.num / 2, MAX_ATIDS);
+ err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
+ stid_range.num, ATID_BASE, stid_range.base);
+ if (err)
+ goto out_free_l2t;
+
+ t->mtus = mtutab.mtus;
+ t->nmtus = mtutab.size;
+
+ INIT_WORK(&t->tid_release_task, t3_process_tid_release_list);
+ spin_lock_init(&t->tid_release_lock);
+ INIT_LIST_HEAD(&t->list_node);
+ t->dev = dev;
+
+ T3C_DATA(dev) = t;
+ dev->recv = process_rx;
+ dev->neigh_update = t3_l2t_update;
+
+ /* Register netevent handler once */
+ if (list_empty(&adapter_list))
+ register_netevent_notifier(&nb);
+
+ t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL);
+ t->release_list_incomplete = 0;
+
+ add_adapter(adapter);
+ return 0;
+
+out_free_l2t:
+ t3_free_l2t(L2DATA(dev));
+ rcu_assign_pointer(dev->l2opt, NULL);
+out_free:
+ kfree(t);
+ return err;
+}
+
+static void clean_l2_data(struct rcu_head *head)
+{
+ struct l2t_data *d = container_of(head, struct l2t_data, rcu_head);
+ t3_free_l2t(d);
+}
+
+
+void cxgb3_offload_deactivate(struct adapter *adapter)
+{
+ struct t3cdev *tdev = &adapter->tdev;
+ struct t3c_data *t = T3C_DATA(tdev);
+ struct l2t_data *d;
+
+ remove_adapter(adapter);
+ if (list_empty(&adapter_list))
+ unregister_netevent_notifier(&nb);
+
+ free_tid_maps(&t->tid_maps);
+ T3C_DATA(tdev) = NULL;
+ rcu_read_lock();
+ d = L2DATA(tdev);
+ rcu_read_unlock();
+ rcu_assign_pointer(tdev->l2opt, NULL);
+ call_rcu(&d->rcu_head, clean_l2_data);
+ if (t->nofail_skb)
+ kfree_skb(t->nofail_skb);
+ kfree(t);
+}
+
+static inline void register_tdev(struct t3cdev *tdev)
+{
+ static int unit;
+
+ mutex_lock(&cxgb3_db_lock);
+ snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
+ list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list);
+ mutex_unlock(&cxgb3_db_lock);
+}
+
+static inline void unregister_tdev(struct t3cdev *tdev)
+{
+ mutex_lock(&cxgb3_db_lock);
+ list_del(&tdev->ofld_dev_list);
+ mutex_unlock(&cxgb3_db_lock);
+}
+
+static inline int adap2type(struct adapter *adapter)
+{
+ int type = 0;
+
+ switch (adapter->params.rev) {
+ case T3_REV_A:
+ type = T3A;
+ break;
+ case T3_REV_B:
+ case T3_REV_B2:
+ type = T3B;
+ break;
+ case T3_REV_C:
+ type = T3C;
+ break;
+ }
+ return type;
+}
+
+void __devinit cxgb3_adapter_ofld(struct adapter *adapter)
+{
+ struct t3cdev *tdev = &adapter->tdev;
+
+ INIT_LIST_HEAD(&tdev->ofld_dev_list);
+
+ cxgb3_set_dummy_ops(tdev);
+ tdev->send = t3_offload_tx;
+ tdev->ctl = cxgb_offload_ctl;
+ tdev->type = adap2type(adapter);
+
+ register_tdev(tdev);
+}
+
+void __devexit cxgb3_adapter_unofld(struct adapter *adapter)
+{
+ struct t3cdev *tdev = &adapter->tdev;
+
+ tdev->recv = NULL;
+ tdev->neigh_update = NULL;
+
+ unregister_tdev(tdev);
+}
+
+void __init cxgb3_offload_init(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_CPL_CMDS; ++i)
+ cpl_handlers[i] = do_bad_cpl;
+
+ t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
+ t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
+ t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl);
+ t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
+ t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
+ t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
+ t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
+ t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
+ t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
+ t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
+ t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
+ t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h
new file mode 100644
index 000000000000..929c298115ca
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CXGB3_OFFLOAD_H
+#define _CXGB3_OFFLOAD_H
+
+#include <linux/list.h>
+#include <linux/skbuff.h>
+
+#include "l2t.h"
+
+#include "t3cdev.h"
+#include "t3_cpl.h"
+
+struct adapter;
+
+void cxgb3_offload_init(void);
+
+void cxgb3_adapter_ofld(struct adapter *adapter);
+void cxgb3_adapter_unofld(struct adapter *adapter);
+int cxgb3_offload_activate(struct adapter *adapter);
+void cxgb3_offload_deactivate(struct adapter *adapter);
+
+void cxgb3_set_dummy_ops(struct t3cdev *dev);
+
+struct t3cdev *dev2t3cdev(struct net_device *dev);
+
+/*
+ * Client registration. Users of T3 driver must register themselves.
+ * The T3 driver will call the add function of every client for each T3
+ * adapter activated, passing up the t3cdev ptr. Each client fills out an
+ * array of callback functions to process CPL messages.
+ */
+
+void cxgb3_register_client(struct cxgb3_client *client);
+void cxgb3_unregister_client(struct cxgb3_client *client);
+void cxgb3_add_clients(struct t3cdev *tdev);
+void cxgb3_remove_clients(struct t3cdev *tdev);
+void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port);
+
+typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev,
+ struct sk_buff *skb, void *ctx);
+
+enum {
+ OFFLOAD_STATUS_UP,
+ OFFLOAD_STATUS_DOWN,
+ OFFLOAD_PORT_DOWN,
+ OFFLOAD_PORT_UP,
+ OFFLOAD_DB_FULL,
+ OFFLOAD_DB_EMPTY,
+ OFFLOAD_DB_DROP
+};
+
+struct cxgb3_client {
+ char *name;
+ void (*add) (struct t3cdev *);
+ void (*remove) (struct t3cdev *);
+ cxgb3_cpl_handler_func *handlers;
+ int (*redirect)(void *ctx, struct dst_entry *old,
+ struct dst_entry *new, struct l2t_entry *l2t);
+ struct list_head client_list;
+ void (*event_handler)(struct t3cdev *tdev, u32 event, u32 port);
+};
+
+/*
+ * TID allocation services.
+ */
+int cxgb3_alloc_atid(struct t3cdev *dev, struct cxgb3_client *client,
+ void *ctx);
+int cxgb3_alloc_stid(struct t3cdev *dev, struct cxgb3_client *client,
+ void *ctx);
+void *cxgb3_free_atid(struct t3cdev *dev, int atid);
+void cxgb3_free_stid(struct t3cdev *dev, int stid);
+void cxgb3_insert_tid(struct t3cdev *dev, struct cxgb3_client *client,
+ void *ctx, unsigned int tid);
+void cxgb3_queue_tid_release(struct t3cdev *dev, unsigned int tid);
+void cxgb3_remove_tid(struct t3cdev *dev, void *ctx, unsigned int tid);
+
+struct t3c_tid_entry {
+ struct cxgb3_client *client;
+ void *ctx;
+};
+
+/* CPL message priority levels */
+enum {
+ CPL_PRIORITY_DATA = 0, /* data messages */
+ CPL_PRIORITY_SETUP = 1, /* connection setup messages */
+ CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
+ CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
+ CPL_PRIORITY_ACK = 1, /* RX ACK messages */
+ CPL_PRIORITY_CONTROL = 1 /* offload control messages */
+};
+
+/* Flags for return value of CPL message handlers */
+enum {
+ CPL_RET_BUF_DONE = 1, /* buffer processing done, buffer may be freed */
+ CPL_RET_BAD_MSG = 2, /* bad CPL message (e.g., unknown opcode) */
+ CPL_RET_UNKNOWN_TID = 4 /* unexpected unknown TID */
+};
+
+typedef int (*cpl_handler_func)(struct t3cdev *dev, struct sk_buff *skb);
+
+/*
+ * Returns a pointer to the first byte of the CPL header in an sk_buff that
+ * contains a CPL message.
+ */
+static inline void *cplhdr(struct sk_buff *skb)
+{
+ return skb->data;
+}
+
+void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h);
+
+union listen_entry {
+ struct t3c_tid_entry t3c_tid;
+ union listen_entry *next;
+};
+
+union active_open_entry {
+ struct t3c_tid_entry t3c_tid;
+ union active_open_entry *next;
+};
+
+/*
+ * Holds the size, base address, free list start, etc of the TID, server TID,
+ * and active-open TID tables for a offload device.
+ * The tables themselves are allocated dynamically.
+ */
+struct tid_info {
+ struct t3c_tid_entry *tid_tab;
+ unsigned int ntids;
+ atomic_t tids_in_use;
+
+ union listen_entry *stid_tab;
+ unsigned int nstids;
+ unsigned int stid_base;
+
+ union active_open_entry *atid_tab;
+ unsigned int natids;
+ unsigned int atid_base;
+
+ /*
+ * The following members are accessed R/W so we put them in their own
+ * cache lines.
+ *
+ * XXX We could combine the atid fields above with the lock here since
+ * atids are use once (unlike other tids). OTOH the above fields are
+ * usually in cache due to tid_tab.
+ */
+ spinlock_t atid_lock ____cacheline_aligned_in_smp;
+ union active_open_entry *afree;
+ unsigned int atids_in_use;
+
+ spinlock_t stid_lock ____cacheline_aligned;
+ union listen_entry *sfree;
+ unsigned int stids_in_use;
+};
+
+struct t3c_data {
+ struct list_head list_node;
+ struct t3cdev *dev;
+ unsigned int tx_max_chunk; /* max payload for TX_DATA */
+ unsigned int max_wrs; /* max in-flight WRs per connection */
+ unsigned int nmtus;
+ const unsigned short *mtus;
+ struct tid_info tid_maps;
+
+ struct t3c_tid_entry *tid_release_list;
+ spinlock_t tid_release_lock;
+ struct work_struct tid_release_task;
+
+ struct sk_buff *nofail_skb;
+ unsigned int release_list_incomplete;
+};
+
+/*
+ * t3cdev -> t3c_data accessor
+ */
+#define T3C_DATA(dev) (*(struct t3c_data **)&(dev)->l4opt)
+
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/firmware_exports.h b/drivers/net/ethernet/chelsio/cxgb3/firmware_exports.h
new file mode 100644
index 000000000000..0d9b0e6dccff
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/firmware_exports.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2004-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FIRMWARE_EXPORTS_H_
+#define _FIRMWARE_EXPORTS_H_
+
+/* WR OPCODES supported by the firmware.
+ */
+#define FW_WROPCODE_FORWARD 0x01
+#define FW_WROPCODE_BYPASS 0x05
+
+#define FW_WROPCODE_TUNNEL_TX_PKT 0x03
+
+#define FW_WROPOCDE_ULPTX_DATA_SGL 0x00
+#define FW_WROPCODE_ULPTX_MEM_READ 0x02
+#define FW_WROPCODE_ULPTX_PKT 0x04
+#define FW_WROPCODE_ULPTX_INVALIDATE 0x06
+
+#define FW_WROPCODE_TUNNEL_RX_PKT 0x07
+
+#define FW_WROPCODE_OFLD_GETTCB_RPL 0x08
+#define FW_WROPCODE_OFLD_CLOSE_CON 0x09
+#define FW_WROPCODE_OFLD_TP_ABORT_CON_REQ 0x0A
+#define FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL 0x0F
+#define FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ 0x0B
+#define FW_WROPCODE_OFLD_TP_ABORT_CON_RPL 0x0C
+#define FW_WROPCODE_OFLD_TX_DATA 0x0D
+#define FW_WROPCODE_OFLD_TX_DATA_ACK 0x0E
+
+#define FW_WROPCODE_RI_RDMA_INIT 0x10
+#define FW_WROPCODE_RI_RDMA_WRITE 0x11
+#define FW_WROPCODE_RI_RDMA_READ_REQ 0x12
+#define FW_WROPCODE_RI_RDMA_READ_RESP 0x13
+#define FW_WROPCODE_RI_SEND 0x14
+#define FW_WROPCODE_RI_TERMINATE 0x15
+#define FW_WROPCODE_RI_RDMA_READ 0x16
+#define FW_WROPCODE_RI_RECEIVE 0x17
+#define FW_WROPCODE_RI_BIND_MW 0x18
+#define FW_WROPCODE_RI_FASTREGISTER_MR 0x19
+#define FW_WROPCODE_RI_LOCAL_INV 0x1A
+#define FW_WROPCODE_RI_MODIFY_QP 0x1B
+#define FW_WROPCODE_RI_BYPASS 0x1C
+
+#define FW_WROPOCDE_RSVD 0x1E
+
+#define FW_WROPCODE_SGE_EGRESSCONTEXT_RR 0x1F
+
+#define FW_WROPCODE_MNGT 0x1D
+#define FW_MNGTOPCODE_PKTSCHED_SET 0x00
+
+/* Maximum size of a WR sent from the host, limited by the SGE.
+ *
+ * Note: WR coming from ULP or TP are only limited by CIM.
+ */
+#define FW_WR_SIZE 128
+
+/* Maximum number of outstanding WRs sent from the host. Value must be
+ * programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by
+ * offload modules to limit the number of WRs per connection.
+ */
+#define FW_T3_WR_NUM 16
+#define FW_N3_WR_NUM 7
+
+#ifndef N3
+# define FW_WR_NUM FW_T3_WR_NUM
+#else
+# define FW_WR_NUM FW_N3_WR_NUM
+#endif
+
+/* FW_TUNNEL_NUM corresponds to the number of supported TUNNEL Queues. These
+ * queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must
+ * start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START.
+ *
+ * Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent
+ * to RESP Queue[i].
+ */
+#define FW_TUNNEL_NUM 8
+#define FW_TUNNEL_SGEEC_START 8
+#define FW_TUNNEL_TID_START 65544
+
+/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues
+ * must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID'
+ * (or 'uP Token') FW_CTRL_TID_START.
+ *
+ * Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i].
+ */
+#define FW_CTRL_NUM 8
+#define FW_CTRL_SGEEC_START 65528
+#define FW_CTRL_TID_START 65536
+
+/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These
+ * queues must start at SGE Egress Context FW_OFLD_SGEEC_START.
+ *
+ * Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for
+ * OFFLOAD Queues, as the host is responsible for providing the correct TID in
+ * every WR.
+ *
+ * Ingress Trafffic for OFFLOAD Queue[i] is sent to RESP Queue[i].
+ */
+#define FW_OFLD_NUM 8
+#define FW_OFLD_SGEEC_START 0
+
+/*
+ *
+ */
+#define FW_RI_NUM 1
+#define FW_RI_SGEEC_START 65527
+#define FW_RI_TID_START 65552
+
+/*
+ * The RX_PKT_TID
+ */
+#define FW_RX_PKT_NUM 1
+#define FW_RX_PKT_TID_START 65553
+
+/* FW_WRC_NUM corresponds to the number of Work Request Context that supported
+ * by the firmware.
+ */
+#define FW_WRC_NUM \
+ (65536 + FW_TUNNEL_NUM + FW_CTRL_NUM + FW_RI_NUM + FW_RX_PKT_NUM)
+
+/*
+ * FW type and version.
+ */
+#define S_FW_VERSION_TYPE 28
+#define M_FW_VERSION_TYPE 0xF
+#define V_FW_VERSION_TYPE(x) ((x) << S_FW_VERSION_TYPE)
+#define G_FW_VERSION_TYPE(x) \
+ (((x) >> S_FW_VERSION_TYPE) & M_FW_VERSION_TYPE)
+
+#define S_FW_VERSION_MAJOR 16
+#define M_FW_VERSION_MAJOR 0xFFF
+#define V_FW_VERSION_MAJOR(x) ((x) << S_FW_VERSION_MAJOR)
+#define G_FW_VERSION_MAJOR(x) \
+ (((x) >> S_FW_VERSION_MAJOR) & M_FW_VERSION_MAJOR)
+
+#define S_FW_VERSION_MINOR 8
+#define M_FW_VERSION_MINOR 0xFF
+#define V_FW_VERSION_MINOR(x) ((x) << S_FW_VERSION_MINOR)
+#define G_FW_VERSION_MINOR(x) \
+ (((x) >> S_FW_VERSION_MINOR) & M_FW_VERSION_MINOR)
+
+#define S_FW_VERSION_MICRO 0
+#define M_FW_VERSION_MICRO 0xFF
+#define V_FW_VERSION_MICRO(x) ((x) << S_FW_VERSION_MICRO)
+#define G_FW_VERSION_MICRO(x) \
+ (((x) >> S_FW_VERSION_MICRO) & M_FW_VERSION_MICRO)
+
+#endif /* _FIRMWARE_EXPORTS_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
new file mode 100644
index 000000000000..41540978a173
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -0,0 +1,454 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <net/neighbour.h>
+#include "common.h"
+#include "t3cdev.h"
+#include "cxgb3_defs.h"
+#include "l2t.h"
+#include "t3_cpl.h"
+#include "firmware_exports.h"
+
+#define VLAN_NONE 0xfff
+
+/*
+ * Module locking notes: There is a RW lock protecting the L2 table as a
+ * whole plus a spinlock per L2T entry. Entry lookups and allocations happen
+ * under the protection of the table lock, individual entry changes happen
+ * while holding that entry's spinlock. The table lock nests outside the
+ * entry locks. Allocations of new entries take the table lock as writers so
+ * no other lookups can happen while allocating new entries. Entry updates
+ * take the table lock as readers so multiple entries can be updated in
+ * parallel. An L2T entry can be dropped by decrementing its reference count
+ * and therefore can happen in parallel with entry allocation but no entry
+ * can change state or increment its ref count during allocation as both of
+ * these perform lookups.
+ */
+
+static inline unsigned int vlan_prio(const struct l2t_entry *e)
+{
+ return e->vlan >> 13;
+}
+
+static inline unsigned int arp_hash(u32 key, int ifindex,
+ const struct l2t_data *d)
+{
+ return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
+}
+
+static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n)
+{
+ neigh_hold(n);
+ if (e->neigh)
+ neigh_release(e->neigh);
+ e->neigh = n;
+}
+
+/*
+ * Set up an L2T entry and send any packets waiting in the arp queue. The
+ * supplied skb is used for the CPL_L2T_WRITE_REQ. Must be called with the
+ * entry locked.
+ */
+static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
+ struct l2t_entry *e)
+{
+ struct cpl_l2t_write_req *req;
+ struct sk_buff *tmp;
+
+ if (!skb) {
+ skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+ }
+
+ req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
+ req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
+ V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
+ V_L2T_W_PRIO(vlan_prio(e)));
+ memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
+ memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
+ skb->priority = CPL_PRIORITY_CONTROL;
+ cxgb3_ofld_send(dev, skb);
+
+ skb_queue_walk_safe(&e->arpq, skb, tmp) {
+ __skb_unlink(skb, &e->arpq);
+ cxgb3_ofld_send(dev, skb);
+ }
+ e->state = L2T_STATE_VALID;
+
+ return 0;
+}
+
+/*
+ * Add a packet to the an L2T entry's queue of packets awaiting resolution.
+ * Must be called with the entry's lock held.
+ */
+static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
+{
+ __skb_queue_tail(&e->arpq, skb);
+}
+
+int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
+ struct l2t_entry *e)
+{
+again:
+ switch (e->state) {
+ case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
+ neigh_event_send(e->neigh, NULL);
+ spin_lock_bh(&e->lock);
+ if (e->state == L2T_STATE_STALE)
+ e->state = L2T_STATE_VALID;
+ spin_unlock_bh(&e->lock);
+ case L2T_STATE_VALID: /* fast-path, send the packet on */
+ return cxgb3_ofld_send(dev, skb);
+ case L2T_STATE_RESOLVING:
+ spin_lock_bh(&e->lock);
+ if (e->state != L2T_STATE_RESOLVING) {
+ /* ARP already completed */
+ spin_unlock_bh(&e->lock);
+ goto again;
+ }
+ arpq_enqueue(e, skb);
+ spin_unlock_bh(&e->lock);
+
+ /*
+ * Only the first packet added to the arpq should kick off
+ * resolution. However, because the alloc_skb below can fail,
+ * we allow each packet added to the arpq to retry resolution
+ * as a way of recovering from transient memory exhaustion.
+ * A better way would be to use a work request to retry L2T
+ * entries when there's no memory.
+ */
+ if (!neigh_event_send(e->neigh, NULL)) {
+ skb = alloc_skb(sizeof(struct cpl_l2t_write_req),
+ GFP_ATOMIC);
+ if (!skb)
+ break;
+
+ spin_lock_bh(&e->lock);
+ if (!skb_queue_empty(&e->arpq))
+ setup_l2e_send_pending(dev, skb, e);
+ else /* we lost the race */
+ __kfree_skb(skb);
+ spin_unlock_bh(&e->lock);
+ }
+ }
+ return 0;
+}
+
+EXPORT_SYMBOL(t3_l2t_send_slow);
+
+void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
+{
+again:
+ switch (e->state) {
+ case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
+ neigh_event_send(e->neigh, NULL);
+ spin_lock_bh(&e->lock);
+ if (e->state == L2T_STATE_STALE) {
+ e->state = L2T_STATE_VALID;
+ }
+ spin_unlock_bh(&e->lock);
+ return;
+ case L2T_STATE_VALID: /* fast-path, send the packet on */
+ return;
+ case L2T_STATE_RESOLVING:
+ spin_lock_bh(&e->lock);
+ if (e->state != L2T_STATE_RESOLVING) {
+ /* ARP already completed */
+ spin_unlock_bh(&e->lock);
+ goto again;
+ }
+ spin_unlock_bh(&e->lock);
+
+ /*
+ * Only the first packet added to the arpq should kick off
+ * resolution. However, because the alloc_skb below can fail,
+ * we allow each packet added to the arpq to retry resolution
+ * as a way of recovering from transient memory exhaustion.
+ * A better way would be to use a work request to retry L2T
+ * entries when there's no memory.
+ */
+ neigh_event_send(e->neigh, NULL);
+ }
+}
+
+EXPORT_SYMBOL(t3_l2t_send_event);
+
+/*
+ * Allocate a free L2T entry. Must be called with l2t_data.lock held.
+ */
+static struct l2t_entry *alloc_l2e(struct l2t_data *d)
+{
+ struct l2t_entry *end, *e, **p;
+
+ if (!atomic_read(&d->nfree))
+ return NULL;
+
+ /* there's definitely a free entry */
+ for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
+ if (atomic_read(&e->refcnt) == 0)
+ goto found;
+
+ for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ;
+found:
+ d->rover = e + 1;
+ atomic_dec(&d->nfree);
+
+ /*
+ * The entry we found may be an inactive entry that is
+ * presently in the hash table. We need to remove it.
+ */
+ if (e->state != L2T_STATE_UNUSED) {
+ int hash = arp_hash(e->addr, e->ifindex, d);
+
+ for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
+ if (*p == e) {
+ *p = e->next;
+ break;
+ }
+ e->state = L2T_STATE_UNUSED;
+ }
+ return e;
+}
+
+/*
+ * Called when an L2T entry has no more users. The entry is left in the hash
+ * table since it is likely to be reused but we also bump nfree to indicate
+ * that the entry can be reallocated for a different neighbor. We also drop
+ * the existing neighbor reference in case the neighbor is going away and is
+ * waiting on our reference.
+ *
+ * Because entries can be reallocated to other neighbors once their ref count
+ * drops to 0 we need to take the entry's lock to avoid races with a new
+ * incarnation.
+ */
+void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
+{
+ spin_lock_bh(&e->lock);
+ if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
+ if (e->neigh) {
+ neigh_release(e->neigh);
+ e->neigh = NULL;
+ }
+ }
+ spin_unlock_bh(&e->lock);
+ atomic_inc(&d->nfree);
+}
+
+EXPORT_SYMBOL(t3_l2e_free);
+
+/*
+ * Update an L2T entry that was previously used for the same next hop as neigh.
+ * Must be called with softirqs disabled.
+ */
+static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
+{
+ unsigned int nud_state;
+
+ spin_lock(&e->lock); /* avoid race with t3_l2t_free */
+
+ if (neigh != e->neigh)
+ neigh_replace(e, neigh);
+ nud_state = neigh->nud_state;
+ if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
+ !(nud_state & NUD_VALID))
+ e->state = L2T_STATE_RESOLVING;
+ else if (nud_state & NUD_CONNECTED)
+ e->state = L2T_STATE_VALID;
+ else
+ e->state = L2T_STATE_STALE;
+ spin_unlock(&e->lock);
+}
+
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+ struct net_device *dev)
+{
+ struct l2t_entry *e = NULL;
+ struct l2t_data *d;
+ int hash;
+ u32 addr = *(u32 *) neigh->primary_key;
+ int ifidx = neigh->dev->ifindex;
+ struct port_info *p = netdev_priv(dev);
+ int smt_idx = p->port_id;
+
+ rcu_read_lock();
+ d = L2DATA(cdev);
+ if (!d)
+ goto done_rcu;
+
+ hash = arp_hash(addr, ifidx, d);
+
+ write_lock_bh(&d->lock);
+ for (e = d->l2tab[hash].first; e; e = e->next)
+ if (e->addr == addr && e->ifindex == ifidx &&
+ e->smt_idx == smt_idx) {
+ l2t_hold(d, e);
+ if (atomic_read(&e->refcnt) == 1)
+ reuse_entry(e, neigh);
+ goto done;
+ }
+
+ /* Need to allocate a new entry */
+ e = alloc_l2e(d);
+ if (e) {
+ spin_lock(&e->lock); /* avoid race with t3_l2t_free */
+ e->next = d->l2tab[hash].first;
+ d->l2tab[hash].first = e;
+ e->state = L2T_STATE_RESOLVING;
+ e->addr = addr;
+ e->ifindex = ifidx;
+ e->smt_idx = smt_idx;
+ atomic_set(&e->refcnt, 1);
+ neigh_replace(e, neigh);
+ if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
+ e->vlan = vlan_dev_vlan_id(neigh->dev);
+ else
+ e->vlan = VLAN_NONE;
+ spin_unlock(&e->lock);
+ }
+done:
+ write_unlock_bh(&d->lock);
+done_rcu:
+ rcu_read_unlock();
+ return e;
+}
+
+EXPORT_SYMBOL(t3_l2t_get);
+
+/*
+ * Called when address resolution fails for an L2T entry to handle packets
+ * on the arpq head. If a packet specifies a failure handler it is invoked,
+ * otherwise the packets is sent to the offload device.
+ *
+ * XXX: maybe we should abandon the latter behavior and just require a failure
+ * handler.
+ */
+static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff_head *arpq)
+{
+ struct sk_buff *skb, *tmp;
+
+ skb_queue_walk_safe(arpq, skb, tmp) {
+ struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
+
+ __skb_unlink(skb, arpq);
+ if (cb->arp_failure_handler)
+ cb->arp_failure_handler(dev, skb);
+ else
+ cxgb3_ofld_send(dev, skb);
+ }
+}
+
+/*
+ * Called when the host's ARP layer makes a change to some entry that is
+ * loaded into the HW L2 table.
+ */
+void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
+{
+ struct sk_buff_head arpq;
+ struct l2t_entry *e;
+ struct l2t_data *d = L2DATA(dev);
+ u32 addr = *(u32 *) neigh->primary_key;
+ int ifidx = neigh->dev->ifindex;
+ int hash = arp_hash(addr, ifidx, d);
+
+ read_lock_bh(&d->lock);
+ for (e = d->l2tab[hash].first; e; e = e->next)
+ if (e->addr == addr && e->ifindex == ifidx) {
+ spin_lock(&e->lock);
+ goto found;
+ }
+ read_unlock_bh(&d->lock);
+ return;
+
+found:
+ __skb_queue_head_init(&arpq);
+
+ read_unlock(&d->lock);
+ if (atomic_read(&e->refcnt)) {
+ if (neigh != e->neigh)
+ neigh_replace(e, neigh);
+
+ if (e->state == L2T_STATE_RESOLVING) {
+ if (neigh->nud_state & NUD_FAILED) {
+ skb_queue_splice_init(&e->arpq, &arpq);
+ } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
+ setup_l2e_send_pending(dev, NULL, e);
+ } else {
+ e->state = neigh->nud_state & NUD_CONNECTED ?
+ L2T_STATE_VALID : L2T_STATE_STALE;
+ if (memcmp(e->dmac, neigh->ha, 6))
+ setup_l2e_send_pending(dev, NULL, e);
+ }
+ }
+ spin_unlock_bh(&e->lock);
+
+ if (!skb_queue_empty(&arpq))
+ handle_failed_resolution(dev, &arpq);
+}
+
+struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
+{
+ struct l2t_data *d;
+ int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
+
+ d = cxgb_alloc_mem(size);
+ if (!d)
+ return NULL;
+
+ d->nentries = l2t_capacity;
+ d->rover = &d->l2tab[1]; /* entry 0 is not used */
+ atomic_set(&d->nfree, l2t_capacity - 1);
+ rwlock_init(&d->lock);
+
+ for (i = 0; i < l2t_capacity; ++i) {
+ d->l2tab[i].idx = i;
+ d->l2tab[i].state = L2T_STATE_UNUSED;
+ __skb_queue_head_init(&d->l2tab[i].arpq);
+ spin_lock_init(&d->l2tab[i].lock);
+ atomic_set(&d->l2tab[i].refcnt, 0);
+ }
+ return d;
+}
+
+void t3_free_l2t(struct l2t_data *d)
+{
+ cxgb_free_mem(d);
+}
+
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
new file mode 100644
index 000000000000..c5f54796e2cb
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CHELSIO_L2T_H
+#define _CHELSIO_L2T_H
+
+#include <linux/spinlock.h>
+#include "t3cdev.h"
+#include <linux/atomic.h>
+
+enum {
+ L2T_STATE_VALID, /* entry is up to date */
+ L2T_STATE_STALE, /* entry may be used but needs revalidation */
+ L2T_STATE_RESOLVING, /* entry needs address resolution */
+ L2T_STATE_UNUSED /* entry not in use */
+};
+
+struct neighbour;
+struct sk_buff;
+
+/*
+ * Each L2T entry plays multiple roles. First of all, it keeps state for the
+ * corresponding entry of the HW L2 table and maintains a queue of offload
+ * packets awaiting address resolution. Second, it is a node of a hash table
+ * chain, where the nodes of the chain are linked together through their next
+ * pointer. Finally, each node is a bucket of a hash table, pointing to the
+ * first element in its chain through its first pointer.
+ */
+struct l2t_entry {
+ u16 state; /* entry state */
+ u16 idx; /* entry index */
+ u32 addr; /* dest IP address */
+ int ifindex; /* neighbor's net_device's ifindex */
+ u16 smt_idx; /* SMT index */
+ u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
+ struct neighbour *neigh; /* associated neighbour */
+ struct l2t_entry *first; /* start of hash chain */
+ struct l2t_entry *next; /* next l2t_entry on chain */
+ struct sk_buff_head arpq; /* queue of packets awaiting resolution */
+ spinlock_t lock;
+ atomic_t refcnt; /* entry reference count */
+ u8 dmac[6]; /* neighbour's MAC address */
+};
+
+struct l2t_data {
+ unsigned int nentries; /* number of entries */
+ struct l2t_entry *rover; /* starting point for next allocation */
+ atomic_t nfree; /* number of free entries */
+ rwlock_t lock;
+ struct l2t_entry l2tab[0];
+ struct rcu_head rcu_head; /* to handle rcu cleanup */
+};
+
+typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
+ struct sk_buff * skb);
+
+/*
+ * Callback stored in an skb to handle address resolution failure.
+ */
+struct l2t_skb_cb {
+ arp_failure_handler_func arp_failure_handler;
+};
+
+#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
+
+static inline void set_arp_failure_handler(struct sk_buff *skb,
+ arp_failure_handler_func hnd)
+{
+ L2T_SKB_CB(skb)->arp_failure_handler = hnd;
+}
+
+/*
+ * Getting to the L2 data from an offload device.
+ */
+#define L2DATA(cdev) (rcu_dereference((cdev)->l2opt))
+
+#define W_TCB_L2T_IX 0
+#define S_TCB_L2T_IX 7
+#define M_TCB_L2T_IX 0x7ffULL
+#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
+
+void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
+void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+ struct net_device *dev);
+int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
+ struct l2t_entry *e);
+void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
+struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
+void t3_free_l2t(struct l2t_data *d);
+
+int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
+
+static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
+ struct l2t_entry *e)
+{
+ if (likely(e->state == L2T_STATE_VALID))
+ return cxgb3_ofld_send(dev, skb);
+ return t3_l2t_send_slow(dev, skb, e);
+}
+
+static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e)
+{
+ struct l2t_data *d;
+
+ rcu_read_lock();
+ d = L2DATA(t);
+
+ if (atomic_dec_and_test(&e->refcnt) && d)
+ t3_l2e_free(d, e);
+
+ rcu_read_unlock();
+}
+
+static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
+{
+ if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
+ atomic_dec(&d->nfree);
+}
+
+#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/mc5.c b/drivers/net/ethernet/chelsio/cxgb3/mc5.c
new file mode 100644
index 000000000000..e13b7fe9d082
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/mc5.c
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "common.h"
+#include "regs.h"
+
+enum {
+ IDT75P52100 = 4,
+ IDT75N43102 = 5
+};
+
+/* DBGI command mode */
+enum {
+ DBGI_MODE_MBUS = 0,
+ DBGI_MODE_IDT52100 = 5
+};
+
+/* IDT 75P52100 commands */
+#define IDT_CMD_READ 0
+#define IDT_CMD_WRITE 1
+#define IDT_CMD_SEARCH 2
+#define IDT_CMD_LEARN 3
+
+/* IDT LAR register address and value for 144-bit mode (low 32 bits) */
+#define IDT_LAR_ADR0 0x180006
+#define IDT_LAR_MODE144 0xffff0000
+
+/* IDT SCR and SSR addresses (low 32 bits) */
+#define IDT_SCR_ADR0 0x180000
+#define IDT_SSR0_ADR0 0x180002
+#define IDT_SSR1_ADR0 0x180004
+
+/* IDT GMR base address (low 32 bits) */
+#define IDT_GMR_BASE_ADR0 0x180020
+
+/* IDT data and mask array base addresses (low 32 bits) */
+#define IDT_DATARY_BASE_ADR0 0
+#define IDT_MSKARY_BASE_ADR0 0x80000
+
+/* IDT 75N43102 commands */
+#define IDT4_CMD_SEARCH144 3
+#define IDT4_CMD_WRITE 4
+#define IDT4_CMD_READ 5
+
+/* IDT 75N43102 SCR address (low 32 bits) */
+#define IDT4_SCR_ADR0 0x3
+
+/* IDT 75N43102 GMR base addresses (low 32 bits) */
+#define IDT4_GMR_BASE0 0x10
+#define IDT4_GMR_BASE1 0x20
+#define IDT4_GMR_BASE2 0x30
+
+/* IDT 75N43102 data and mask array base addresses (low 32 bits) */
+#define IDT4_DATARY_BASE_ADR0 0x1000000
+#define IDT4_MSKARY_BASE_ADR0 0x2000000
+
+#define MAX_WRITE_ATTEMPTS 5
+
+#define MAX_ROUTES 2048
+
+/*
+ * Issue a command to the TCAM and wait for its completion. The address and
+ * any data required by the command must have been setup by the caller.
+ */
+static int mc5_cmd_write(struct adapter *adapter, u32 cmd)
+{
+ t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_CMD, cmd);
+ return t3_wait_op_done(adapter, A_MC5_DB_DBGI_RSP_STATUS,
+ F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1);
+}
+
+static inline void dbgi_wr_addr3(struct adapter *adapter, u32 v1, u32 v2,
+ u32 v3)
+{
+ t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1);
+ t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2);
+ t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3);
+}
+
+static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
+ u32 v3)
+{
+ t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA0, v1);
+ t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA1, v2);
+ t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3);
+}
+
+static inline void dbgi_rd_rsp3(struct adapter *adapter, u32 *v1, u32 *v2,
+ u32 *v3)
+{
+ *v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0);
+ *v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1);
+ *v3 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA2);
+}
+
+/*
+ * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
+ * command cmd. The data to be written must have been set up by the caller.
+ * Returns -1 on failure, 0 on success.
+ */
+static int mc5_write(struct adapter *adapter, u32 addr_lo, u32 cmd)
+{
+ t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, addr_lo);
+ if (mc5_cmd_write(adapter, cmd) == 0)
+ return 0;
+ CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n",
+ addr_lo);
+ return -1;
+}
+
+static int init_mask_data_array(struct mc5 *mc5, u32 mask_array_base,
+ u32 data_array_base, u32 write_cmd,
+ int addr_shift)
+{
+ unsigned int i;
+ struct adapter *adap = mc5->adapter;
+
+ /*
+ * We need the size of the TCAM data and mask arrays in terms of
+ * 72-bit entries.
+ */
+ unsigned int size72 = mc5->tcam_size;
+ unsigned int server_base = t3_read_reg(adap, A_MC5_DB_SERVER_INDEX);
+
+ if (mc5->mode == MC5_MODE_144_BIT) {
+ size72 *= 2; /* 1 144-bit entry is 2 72-bit entries */
+ server_base *= 2;
+ }
+
+ /* Clear the data array */
+ dbgi_wr_data3(adap, 0, 0, 0);
+ for (i = 0; i < size72; i++)
+ if (mc5_write(adap, data_array_base + (i << addr_shift),
+ write_cmd))
+ return -1;
+
+ /* Initialize the mask array. */
+ dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
+ for (i = 0; i < size72; i++) {
+ if (i == server_base) /* entering server or routing region */
+ t3_write_reg(adap, A_MC5_DB_DBGI_REQ_DATA0,
+ mc5->mode == MC5_MODE_144_BIT ?
+ 0xfffffff9 : 0xfffffffd);
+ if (mc5_write(adap, mask_array_base + (i << addr_shift),
+ write_cmd))
+ return -1;
+ }
+ return 0;
+}
+
+static int init_idt52100(struct mc5 *mc5)
+{
+ int i;
+ struct adapter *adap = mc5->adapter;
+
+ t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
+ V_RDLAT(0x15) | V_LRNLAT(0x15) | V_SRCHLAT(0x15));
+ t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 2);
+
+ /*
+ * Use GMRs 14-15 for ELOOKUP, GMRs 12-13 for SYN lookups, and
+ * GMRs 8-9 for ACK- and AOPEN searches.
+ */
+ t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT_CMD_WRITE);
+ t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT_CMD_WRITE);
+ t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, IDT_CMD_SEARCH);
+ t3_write_reg(adap, A_MC5_DB_AOPEN_LRN_CMD, IDT_CMD_LEARN);
+ t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT_CMD_SEARCH | 0x6000);
+ t3_write_reg(adap, A_MC5_DB_SYN_LRN_CMD, IDT_CMD_LEARN);
+ t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT_CMD_SEARCH);
+ t3_write_reg(adap, A_MC5_DB_ACK_LRN_CMD, IDT_CMD_LEARN);
+ t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT_CMD_SEARCH);
+ t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT_CMD_SEARCH | 0x7000);
+ t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT_CMD_WRITE);
+ t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT_CMD_READ);
+
+ /* Set DBGI command mode for IDT TCAM. */
+ t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
+
+ /* Set up LAR */
+ dbgi_wr_data3(adap, IDT_LAR_MODE144, 0, 0);
+ if (mc5_write(adap, IDT_LAR_ADR0, IDT_CMD_WRITE))
+ goto err;
+
+ /* Set up SSRs */
+ dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0);
+ if (mc5_write(adap, IDT_SSR0_ADR0, IDT_CMD_WRITE) ||
+ mc5_write(adap, IDT_SSR1_ADR0, IDT_CMD_WRITE))
+ goto err;
+
+ /* Set up GMRs */
+ for (i = 0; i < 32; ++i) {
+ if (i >= 12 && i < 15)
+ dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
+ else if (i == 15)
+ dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
+ else
+ dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
+
+ if (mc5_write(adap, IDT_GMR_BASE_ADR0 + i, IDT_CMD_WRITE))
+ goto err;
+ }
+
+ /* Set up SCR */
+ dbgi_wr_data3(adap, 1, 0, 0);
+ if (mc5_write(adap, IDT_SCR_ADR0, IDT_CMD_WRITE))
+ goto err;
+
+ return init_mask_data_array(mc5, IDT_MSKARY_BASE_ADR0,
+ IDT_DATARY_BASE_ADR0, IDT_CMD_WRITE, 0);
+err:
+ return -EIO;
+}
+
+static int init_idt43102(struct mc5 *mc5)
+{
+ int i;
+ struct adapter *adap = mc5->adapter;
+
+ t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
+ adap->params.rev == 0 ? V_RDLAT(0xd) | V_SRCHLAT(0x11) :
+ V_RDLAT(0xd) | V_SRCHLAT(0x12));
+
+ /*
+ * Use GMRs 24-25 for ELOOKUP, GMRs 20-21 for SYN lookups, and no mask
+ * for ACK- and AOPEN searches.
+ */
+ t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT4_CMD_WRITE);
+ t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT4_CMD_WRITE);
+ t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD,
+ IDT4_CMD_SEARCH144 | 0x3800);
+ t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT4_CMD_SEARCH144);
+ t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT4_CMD_SEARCH144 | 0x3800);
+ t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x3800);
+ t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x800);
+ t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT4_CMD_WRITE);
+ t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT4_CMD_READ);
+
+ t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 3);
+
+ /* Set DBGI command mode for IDT TCAM. */
+ t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
+
+ /* Set up GMRs */
+ dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
+ for (i = 0; i < 7; ++i)
+ if (mc5_write(adap, IDT4_GMR_BASE0 + i, IDT4_CMD_WRITE))
+ goto err;
+
+ for (i = 0; i < 4; ++i)
+ if (mc5_write(adap, IDT4_GMR_BASE2 + i, IDT4_CMD_WRITE))
+ goto err;
+
+ dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
+ if (mc5_write(adap, IDT4_GMR_BASE1, IDT4_CMD_WRITE) ||
+ mc5_write(adap, IDT4_GMR_BASE1 + 1, IDT4_CMD_WRITE) ||
+ mc5_write(adap, IDT4_GMR_BASE1 + 4, IDT4_CMD_WRITE))
+ goto err;
+
+ dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
+ if (mc5_write(adap, IDT4_GMR_BASE1 + 5, IDT4_CMD_WRITE))
+ goto err;
+
+ /* Set up SCR */
+ dbgi_wr_data3(adap, 0xf0000000, 0, 0);
+ if (mc5_write(adap, IDT4_SCR_ADR0, IDT4_CMD_WRITE))
+ goto err;
+
+ return init_mask_data_array(mc5, IDT4_MSKARY_BASE_ADR0,
+ IDT4_DATARY_BASE_ADR0, IDT4_CMD_WRITE, 1);
+err:
+ return -EIO;
+}
+
+/* Put MC5 in DBGI mode. */
+static inline void mc5_dbgi_mode_enable(const struct mc5 *mc5)
+{
+ t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
+ V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_DBGIEN);
+}
+
+/* Put MC5 in M-Bus mode. */
+static void mc5_dbgi_mode_disable(const struct mc5 *mc5)
+{
+ t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
+ V_TMMODE(mc5->mode == MC5_MODE_72_BIT) |
+ V_COMPEN(mc5->mode == MC5_MODE_72_BIT) |
+ V_PRTYEN(mc5->parity_enabled) | F_MBUSEN);
+}
+
+/*
+ * Initialization that requires the OS and protocol layers to already
+ * be initialized goes here.
+ */
+int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
+ unsigned int nroutes)
+{
+ u32 cfg;
+ int err;
+ unsigned int tcam_size = mc5->tcam_size;
+ struct adapter *adap = mc5->adapter;
+
+ if (!tcam_size)
+ return 0;
+
+ if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size)
+ return -EINVAL;
+
+ /* Reset the TCAM */
+ cfg = t3_read_reg(adap, A_MC5_DB_CONFIG) & ~F_TMMODE;
+ cfg |= V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_TMRST;
+ t3_write_reg(adap, A_MC5_DB_CONFIG, cfg);
+ if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) {
+ CH_ERR(adap, "TCAM reset timed out\n");
+ return -1;
+ }
+
+ t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes);
+ t3_write_reg(adap, A_MC5_DB_FILTER_TABLE,
+ tcam_size - nroutes - nfilters);
+ t3_write_reg(adap, A_MC5_DB_SERVER_INDEX,
+ tcam_size - nroutes - nfilters - nservers);
+
+ mc5->parity_enabled = 1;
+
+ /* All the TCAM addresses we access have only the low 32 bits non 0 */
+ t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0);
+ t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0);
+
+ mc5_dbgi_mode_enable(mc5);
+
+ switch (mc5->part_type) {
+ case IDT75P52100:
+ err = init_idt52100(mc5);
+ break;
+ case IDT75N43102:
+ err = init_idt43102(mc5);
+ break;
+ default:
+ CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type);
+ err = -EINVAL;
+ break;
+ }
+
+ mc5_dbgi_mode_disable(mc5);
+ return err;
+}
+
+
+#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR)
+
+/*
+ * MC5 interrupt handler
+ */
+void t3_mc5_intr_handler(struct mc5 *mc5)
+{
+ struct adapter *adap = mc5->adapter;
+ u32 cause = t3_read_reg(adap, A_MC5_DB_INT_CAUSE);
+
+ if ((cause & F_PARITYERR) && mc5->parity_enabled) {
+ CH_ALERT(adap, "MC5 parity error\n");
+ mc5->stats.parity_err++;
+ }
+
+ if (cause & F_REQQPARERR) {
+ CH_ALERT(adap, "MC5 request queue parity error\n");
+ mc5->stats.reqq_parity_err++;
+ }
+
+ if (cause & F_DISPQPARERR) {
+ CH_ALERT(adap, "MC5 dispatch queue parity error\n");
+ mc5->stats.dispq_parity_err++;
+ }
+
+ if (cause & F_ACTRGNFULL)
+ mc5->stats.active_rgn_full++;
+ if (cause & F_NFASRCHFAIL)
+ mc5->stats.nfa_srch_err++;
+ if (cause & F_UNKNOWNCMD)
+ mc5->stats.unknown_cmd++;
+ if (cause & F_DELACTEMPTY)
+ mc5->stats.del_act_empty++;
+ if (cause & MC5_INT_FATAL)
+ t3_fatal_err(adap);
+
+ t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause);
+}
+
+void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode)
+{
+#define K * 1024
+
+ static unsigned int tcam_part_size[] = { /* in K 72-bit entries */
+ 64 K, 128 K, 256 K, 32 K
+ };
+
+#undef K
+
+ u32 cfg = t3_read_reg(adapter, A_MC5_DB_CONFIG);
+
+ mc5->adapter = adapter;
+ mc5->mode = (unsigned char)mode;
+ mc5->part_type = (unsigned char)G_TMTYPE(cfg);
+ if (cfg & F_TMTYPEHI)
+ mc5->part_type |= 4;
+
+ mc5->tcam_size = tcam_part_size[G_TMPARTSIZE(cfg)];
+ if (mode == MC5_MODE_144_BIT)
+ mc5->tcam_size /= 2;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/regs.h b/drivers/net/ethernet/chelsio/cxgb3/regs.h
new file mode 100644
index 000000000000..6990f6c65221
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/regs.h
@@ -0,0 +1,2598 @@
+#define A_SG_CONTROL 0x0
+
+#define S_CONGMODE 29
+#define V_CONGMODE(x) ((x) << S_CONGMODE)
+#define F_CONGMODE V_CONGMODE(1U)
+
+#define S_TNLFLMODE 28
+#define V_TNLFLMODE(x) ((x) << S_TNLFLMODE)
+#define F_TNLFLMODE V_TNLFLMODE(1U)
+
+#define S_FATLPERREN 27
+#define V_FATLPERREN(x) ((x) << S_FATLPERREN)
+#define F_FATLPERREN V_FATLPERREN(1U)
+
+#define S_DROPPKT 20
+#define V_DROPPKT(x) ((x) << S_DROPPKT)
+#define F_DROPPKT V_DROPPKT(1U)
+
+#define S_EGRGENCTRL 19
+#define V_EGRGENCTRL(x) ((x) << S_EGRGENCTRL)
+#define F_EGRGENCTRL V_EGRGENCTRL(1U)
+
+#define S_USERSPACESIZE 14
+#define M_USERSPACESIZE 0x1f
+#define V_USERSPACESIZE(x) ((x) << S_USERSPACESIZE)
+
+#define S_HOSTPAGESIZE 11
+#define M_HOSTPAGESIZE 0x7
+#define V_HOSTPAGESIZE(x) ((x) << S_HOSTPAGESIZE)
+
+#define S_FLMODE 9
+#define V_FLMODE(x) ((x) << S_FLMODE)
+#define F_FLMODE V_FLMODE(1U)
+
+#define S_PKTSHIFT 6
+#define M_PKTSHIFT 0x7
+#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT)
+
+#define S_ONEINTMULTQ 5
+#define V_ONEINTMULTQ(x) ((x) << S_ONEINTMULTQ)
+#define F_ONEINTMULTQ V_ONEINTMULTQ(1U)
+
+#define S_BIGENDIANINGRESS 2
+#define V_BIGENDIANINGRESS(x) ((x) << S_BIGENDIANINGRESS)
+#define F_BIGENDIANINGRESS V_BIGENDIANINGRESS(1U)
+
+#define S_ISCSICOALESCING 1
+#define V_ISCSICOALESCING(x) ((x) << S_ISCSICOALESCING)
+#define F_ISCSICOALESCING V_ISCSICOALESCING(1U)
+
+#define S_GLOBALENABLE 0
+#define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE)
+#define F_GLOBALENABLE V_GLOBALENABLE(1U)
+
+#define S_AVOIDCQOVFL 24
+#define V_AVOIDCQOVFL(x) ((x) << S_AVOIDCQOVFL)
+#define F_AVOIDCQOVFL V_AVOIDCQOVFL(1U)
+
+#define S_OPTONEINTMULTQ 23
+#define V_OPTONEINTMULTQ(x) ((x) << S_OPTONEINTMULTQ)
+#define F_OPTONEINTMULTQ V_OPTONEINTMULTQ(1U)
+
+#define S_CQCRDTCTRL 22
+#define V_CQCRDTCTRL(x) ((x) << S_CQCRDTCTRL)
+#define F_CQCRDTCTRL V_CQCRDTCTRL(1U)
+
+#define A_SG_KDOORBELL 0x4
+
+#define S_SELEGRCNTX 31
+#define V_SELEGRCNTX(x) ((x) << S_SELEGRCNTX)
+#define F_SELEGRCNTX V_SELEGRCNTX(1U)
+
+#define S_EGRCNTX 0
+#define M_EGRCNTX 0xffff
+#define V_EGRCNTX(x) ((x) << S_EGRCNTX)
+
+#define A_SG_GTS 0x8
+
+#define S_RSPQ 29
+#define M_RSPQ 0x7
+#define V_RSPQ(x) ((x) << S_RSPQ)
+#define G_RSPQ(x) (((x) >> S_RSPQ) & M_RSPQ)
+
+#define S_NEWTIMER 16
+#define M_NEWTIMER 0x1fff
+#define V_NEWTIMER(x) ((x) << S_NEWTIMER)
+
+#define S_NEWINDEX 0
+#define M_NEWINDEX 0xffff
+#define V_NEWINDEX(x) ((x) << S_NEWINDEX)
+
+#define A_SG_CONTEXT_CMD 0xc
+
+#define S_CONTEXT_CMD_OPCODE 28
+#define M_CONTEXT_CMD_OPCODE 0xf
+#define V_CONTEXT_CMD_OPCODE(x) ((x) << S_CONTEXT_CMD_OPCODE)
+
+#define S_CONTEXT_CMD_BUSY 27
+#define V_CONTEXT_CMD_BUSY(x) ((x) << S_CONTEXT_CMD_BUSY)
+#define F_CONTEXT_CMD_BUSY V_CONTEXT_CMD_BUSY(1U)
+
+#define S_CQ_CREDIT 20
+
+#define M_CQ_CREDIT 0x7f
+
+#define V_CQ_CREDIT(x) ((x) << S_CQ_CREDIT)
+
+#define G_CQ_CREDIT(x) (((x) >> S_CQ_CREDIT) & M_CQ_CREDIT)
+
+#define S_CQ 19
+
+#define V_CQ(x) ((x) << S_CQ)
+#define F_CQ V_CQ(1U)
+
+#define S_RESPONSEQ 18
+#define V_RESPONSEQ(x) ((x) << S_RESPONSEQ)
+#define F_RESPONSEQ V_RESPONSEQ(1U)
+
+#define S_EGRESS 17
+#define V_EGRESS(x) ((x) << S_EGRESS)
+#define F_EGRESS V_EGRESS(1U)
+
+#define S_FREELIST 16
+#define V_FREELIST(x) ((x) << S_FREELIST)
+#define F_FREELIST V_FREELIST(1U)
+
+#define S_CONTEXT 0
+#define M_CONTEXT 0xffff
+#define V_CONTEXT(x) ((x) << S_CONTEXT)
+
+#define G_CONTEXT(x) (((x) >> S_CONTEXT) & M_CONTEXT)
+
+#define A_SG_CONTEXT_DATA0 0x10
+
+#define A_SG_CONTEXT_DATA1 0x14
+
+#define A_SG_CONTEXT_DATA2 0x18
+
+#define A_SG_CONTEXT_DATA3 0x1c
+
+#define A_SG_CONTEXT_MASK0 0x20
+
+#define A_SG_CONTEXT_MASK1 0x24
+
+#define A_SG_CONTEXT_MASK2 0x28
+
+#define A_SG_CONTEXT_MASK3 0x2c
+
+#define A_SG_RSPQ_CREDIT_RETURN 0x30
+
+#define S_CREDITS 0
+#define M_CREDITS 0xffff
+#define V_CREDITS(x) ((x) << S_CREDITS)
+
+#define A_SG_DATA_INTR 0x34
+
+#define S_ERRINTR 31
+#define V_ERRINTR(x) ((x) << S_ERRINTR)
+#define F_ERRINTR V_ERRINTR(1U)
+
+#define A_SG_HI_DRB_HI_THRSH 0x38
+
+#define A_SG_HI_DRB_LO_THRSH 0x3c
+
+#define A_SG_LO_DRB_HI_THRSH 0x40
+
+#define A_SG_LO_DRB_LO_THRSH 0x44
+
+#define A_SG_RSPQ_FL_STATUS 0x4c
+
+#define S_RSPQ0DISABLED 8
+
+#define S_FL0EMPTY 16
+#define V_FL0EMPTY(x) ((x) << S_FL0EMPTY)
+#define F_FL0EMPTY V_FL0EMPTY(1U)
+
+#define A_SG_EGR_RCQ_DRB_THRSH 0x54
+
+#define S_HIRCQDRBTHRSH 16
+#define M_HIRCQDRBTHRSH 0x7ff
+#define V_HIRCQDRBTHRSH(x) ((x) << S_HIRCQDRBTHRSH)
+
+#define S_LORCQDRBTHRSH 0
+#define M_LORCQDRBTHRSH 0x7ff
+#define V_LORCQDRBTHRSH(x) ((x) << S_LORCQDRBTHRSH)
+
+#define A_SG_EGR_CNTX_BADDR 0x58
+
+#define A_SG_INT_CAUSE 0x5c
+
+#define S_HIRCQPARITYERROR 31
+#define V_HIRCQPARITYERROR(x) ((x) << S_HIRCQPARITYERROR)
+#define F_HIRCQPARITYERROR V_HIRCQPARITYERROR(1U)
+
+#define S_LORCQPARITYERROR 30
+#define V_LORCQPARITYERROR(x) ((x) << S_LORCQPARITYERROR)
+#define F_LORCQPARITYERROR V_LORCQPARITYERROR(1U)
+
+#define S_HIDRBPARITYERROR 29
+#define V_HIDRBPARITYERROR(x) ((x) << S_HIDRBPARITYERROR)
+#define F_HIDRBPARITYERROR V_HIDRBPARITYERROR(1U)
+
+#define S_LODRBPARITYERROR 28
+#define V_LODRBPARITYERROR(x) ((x) << S_LODRBPARITYERROR)
+#define F_LODRBPARITYERROR V_LODRBPARITYERROR(1U)
+
+#define S_FLPARITYERROR 22
+#define M_FLPARITYERROR 0x3f
+#define V_FLPARITYERROR(x) ((x) << S_FLPARITYERROR)
+#define G_FLPARITYERROR(x) (((x) >> S_FLPARITYERROR) & M_FLPARITYERROR)
+
+#define S_ITPARITYERROR 20
+#define M_ITPARITYERROR 0x3
+#define V_ITPARITYERROR(x) ((x) << S_ITPARITYERROR)
+#define G_ITPARITYERROR(x) (((x) >> S_ITPARITYERROR) & M_ITPARITYERROR)
+
+#define S_IRPARITYERROR 19
+#define V_IRPARITYERROR(x) ((x) << S_IRPARITYERROR)
+#define F_IRPARITYERROR V_IRPARITYERROR(1U)
+
+#define S_RCPARITYERROR 18
+#define V_RCPARITYERROR(x) ((x) << S_RCPARITYERROR)
+#define F_RCPARITYERROR V_RCPARITYERROR(1U)
+
+#define S_OCPARITYERROR 17
+#define V_OCPARITYERROR(x) ((x) << S_OCPARITYERROR)
+#define F_OCPARITYERROR V_OCPARITYERROR(1U)
+
+#define S_CPPARITYERROR 16
+#define V_CPPARITYERROR(x) ((x) << S_CPPARITYERROR)
+#define F_CPPARITYERROR V_CPPARITYERROR(1U)
+
+#define S_R_REQ_FRAMINGERROR 15
+#define V_R_REQ_FRAMINGERROR(x) ((x) << S_R_REQ_FRAMINGERROR)
+#define F_R_REQ_FRAMINGERROR V_R_REQ_FRAMINGERROR(1U)
+
+#define S_UC_REQ_FRAMINGERROR 14
+#define V_UC_REQ_FRAMINGERROR(x) ((x) << S_UC_REQ_FRAMINGERROR)
+#define F_UC_REQ_FRAMINGERROR V_UC_REQ_FRAMINGERROR(1U)
+
+#define S_HICTLDRBDROPERR 13
+#define V_HICTLDRBDROPERR(x) ((x) << S_HICTLDRBDROPERR)
+#define F_HICTLDRBDROPERR V_HICTLDRBDROPERR(1U)
+
+#define S_LOCTLDRBDROPERR 12
+#define V_LOCTLDRBDROPERR(x) ((x) << S_LOCTLDRBDROPERR)
+#define F_LOCTLDRBDROPERR V_LOCTLDRBDROPERR(1U)
+
+#define S_HIPIODRBDROPERR 11
+#define V_HIPIODRBDROPERR(x) ((x) << S_HIPIODRBDROPERR)
+#define F_HIPIODRBDROPERR V_HIPIODRBDROPERR(1U)
+
+#define S_LOPIODRBDROPERR 10
+#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR)
+#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U)
+
+#define S_HIPRIORITYDBFULL 7
+#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL)
+#define F_HIPRIORITYDBFULL V_HIPRIORITYDBFULL(1U)
+
+#define S_HIPRIORITYDBEMPTY 6
+#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY)
+#define F_HIPRIORITYDBEMPTY V_HIPRIORITYDBEMPTY(1U)
+
+#define S_LOPRIORITYDBFULL 5
+#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL)
+#define F_LOPRIORITYDBFULL V_LOPRIORITYDBFULL(1U)
+
+#define S_LOPRIORITYDBEMPTY 4
+#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY)
+#define F_LOPRIORITYDBEMPTY V_LOPRIORITYDBEMPTY(1U)
+
+#define S_RSPQDISABLED 3
+#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
+#define F_RSPQDISABLED V_RSPQDISABLED(1U)
+
+#define S_RSPQCREDITOVERFOW 2
+#define V_RSPQCREDITOVERFOW(x) ((x) << S_RSPQCREDITOVERFOW)
+#define F_RSPQCREDITOVERFOW V_RSPQCREDITOVERFOW(1U)
+
+#define S_FLEMPTY 1
+#define V_FLEMPTY(x) ((x) << S_FLEMPTY)
+#define F_FLEMPTY V_FLEMPTY(1U)
+
+#define A_SG_INT_ENABLE 0x60
+
+#define A_SG_CMDQ_CREDIT_TH 0x64
+
+#define S_TIMEOUT 8
+#define M_TIMEOUT 0xffffff
+#define V_TIMEOUT(x) ((x) << S_TIMEOUT)
+
+#define S_THRESHOLD 0
+#define M_THRESHOLD 0xff
+#define V_THRESHOLD(x) ((x) << S_THRESHOLD)
+
+#define A_SG_TIMER_TICK 0x68
+
+#define A_SG_CQ_CONTEXT_BADDR 0x6c
+
+#define A_SG_OCO_BASE 0x70
+
+#define S_BASE1 16
+#define M_BASE1 0xffff
+#define V_BASE1(x) ((x) << S_BASE1)
+
+#define A_SG_DRB_PRI_THRESH 0x74
+
+#define A_PCIX_INT_ENABLE 0x80
+
+#define S_MSIXPARERR 22
+#define M_MSIXPARERR 0x7
+
+#define V_MSIXPARERR(x) ((x) << S_MSIXPARERR)
+
+#define S_CFPARERR 18
+#define M_CFPARERR 0xf
+
+#define V_CFPARERR(x) ((x) << S_CFPARERR)
+
+#define S_RFPARERR 14
+#define M_RFPARERR 0xf
+
+#define V_RFPARERR(x) ((x) << S_RFPARERR)
+
+#define S_WFPARERR 12
+#define M_WFPARERR 0x3
+
+#define V_WFPARERR(x) ((x) << S_WFPARERR)
+
+#define S_PIOPARERR 11
+#define V_PIOPARERR(x) ((x) << S_PIOPARERR)
+#define F_PIOPARERR V_PIOPARERR(1U)
+
+#define S_DETUNCECCERR 10
+#define V_DETUNCECCERR(x) ((x) << S_DETUNCECCERR)
+#define F_DETUNCECCERR V_DETUNCECCERR(1U)
+
+#define S_DETCORECCERR 9
+#define V_DETCORECCERR(x) ((x) << S_DETCORECCERR)
+#define F_DETCORECCERR V_DETCORECCERR(1U)
+
+#define S_RCVSPLCMPERR 8
+#define V_RCVSPLCMPERR(x) ((x) << S_RCVSPLCMPERR)
+#define F_RCVSPLCMPERR V_RCVSPLCMPERR(1U)
+
+#define S_UNXSPLCMP 7
+#define V_UNXSPLCMP(x) ((x) << S_UNXSPLCMP)
+#define F_UNXSPLCMP V_UNXSPLCMP(1U)
+
+#define S_SPLCMPDIS 6
+#define V_SPLCMPDIS(x) ((x) << S_SPLCMPDIS)
+#define F_SPLCMPDIS V_SPLCMPDIS(1U)
+
+#define S_DETPARERR 5
+#define V_DETPARERR(x) ((x) << S_DETPARERR)
+#define F_DETPARERR V_DETPARERR(1U)
+
+#define S_SIGSYSERR 4
+#define V_SIGSYSERR(x) ((x) << S_SIGSYSERR)
+#define F_SIGSYSERR V_SIGSYSERR(1U)
+
+#define S_RCVMSTABT 3
+#define V_RCVMSTABT(x) ((x) << S_RCVMSTABT)
+#define F_RCVMSTABT V_RCVMSTABT(1U)
+
+#define S_RCVTARABT 2
+#define V_RCVTARABT(x) ((x) << S_RCVTARABT)
+#define F_RCVTARABT V_RCVTARABT(1U)
+
+#define S_SIGTARABT 1
+#define V_SIGTARABT(x) ((x) << S_SIGTARABT)
+#define F_SIGTARABT V_SIGTARABT(1U)
+
+#define S_MSTDETPARERR 0
+#define V_MSTDETPARERR(x) ((x) << S_MSTDETPARERR)
+#define F_MSTDETPARERR V_MSTDETPARERR(1U)
+
+#define A_PCIX_INT_CAUSE 0x84
+
+#define A_PCIX_CFG 0x88
+
+#define S_DMASTOPEN 19
+#define V_DMASTOPEN(x) ((x) << S_DMASTOPEN)
+#define F_DMASTOPEN V_DMASTOPEN(1U)
+
+#define S_CLIDECEN 18
+#define V_CLIDECEN(x) ((x) << S_CLIDECEN)
+#define F_CLIDECEN V_CLIDECEN(1U)
+
+#define A_PCIX_MODE 0x8c
+
+#define S_PCLKRANGE 6
+#define M_PCLKRANGE 0x3
+#define V_PCLKRANGE(x) ((x) << S_PCLKRANGE)
+#define G_PCLKRANGE(x) (((x) >> S_PCLKRANGE) & M_PCLKRANGE)
+
+#define S_PCIXINITPAT 2
+#define M_PCIXINITPAT 0xf
+#define V_PCIXINITPAT(x) ((x) << S_PCIXINITPAT)
+#define G_PCIXINITPAT(x) (((x) >> S_PCIXINITPAT) & M_PCIXINITPAT)
+
+#define S_64BIT 0
+#define V_64BIT(x) ((x) << S_64BIT)
+#define F_64BIT V_64BIT(1U)
+
+#define A_PCIE_INT_ENABLE 0x80
+
+#define S_BISTERR 15
+#define M_BISTERR 0xff
+
+#define V_BISTERR(x) ((x) << S_BISTERR)
+
+#define S_TXPARERR 18
+#define V_TXPARERR(x) ((x) << S_TXPARERR)
+#define F_TXPARERR V_TXPARERR(1U)
+
+#define S_RXPARERR 17
+#define V_RXPARERR(x) ((x) << S_RXPARERR)
+#define F_RXPARERR V_RXPARERR(1U)
+
+#define S_RETRYLUTPARERR 16
+#define V_RETRYLUTPARERR(x) ((x) << S_RETRYLUTPARERR)
+#define F_RETRYLUTPARERR V_RETRYLUTPARERR(1U)
+
+#define S_RETRYBUFPARERR 15
+#define V_RETRYBUFPARERR(x) ((x) << S_RETRYBUFPARERR)
+#define F_RETRYBUFPARERR V_RETRYBUFPARERR(1U)
+
+#define S_PCIE_MSIXPARERR 12
+#define M_PCIE_MSIXPARERR 0x7
+
+#define V_PCIE_MSIXPARERR(x) ((x) << S_PCIE_MSIXPARERR)
+
+#define S_PCIE_CFPARERR 11
+#define V_PCIE_CFPARERR(x) ((x) << S_PCIE_CFPARERR)
+#define F_PCIE_CFPARERR V_PCIE_CFPARERR(1U)
+
+#define S_PCIE_RFPARERR 10
+#define V_PCIE_RFPARERR(x) ((x) << S_PCIE_RFPARERR)
+#define F_PCIE_RFPARERR V_PCIE_RFPARERR(1U)
+
+#define S_PCIE_WFPARERR 9
+#define V_PCIE_WFPARERR(x) ((x) << S_PCIE_WFPARERR)
+#define F_PCIE_WFPARERR V_PCIE_WFPARERR(1U)
+
+#define S_PCIE_PIOPARERR 8
+#define V_PCIE_PIOPARERR(x) ((x) << S_PCIE_PIOPARERR)
+#define F_PCIE_PIOPARERR V_PCIE_PIOPARERR(1U)
+
+#define S_UNXSPLCPLERRC 7
+#define V_UNXSPLCPLERRC(x) ((x) << S_UNXSPLCPLERRC)
+#define F_UNXSPLCPLERRC V_UNXSPLCPLERRC(1U)
+
+#define S_UNXSPLCPLERRR 6
+#define V_UNXSPLCPLERRR(x) ((x) << S_UNXSPLCPLERRR)
+#define F_UNXSPLCPLERRR V_UNXSPLCPLERRR(1U)
+
+#define S_PEXERR 0
+#define V_PEXERR(x) ((x) << S_PEXERR)
+#define F_PEXERR V_PEXERR(1U)
+
+#define A_PCIE_INT_CAUSE 0x84
+
+#define S_PCIE_DMASTOPEN 24
+#define V_PCIE_DMASTOPEN(x) ((x) << S_PCIE_DMASTOPEN)
+#define F_PCIE_DMASTOPEN V_PCIE_DMASTOPEN(1U)
+
+#define A_PCIE_CFG 0x88
+
+#define S_ENABLELINKDWNDRST 21
+#define V_ENABLELINKDWNDRST(x) ((x) << S_ENABLELINKDWNDRST)
+#define F_ENABLELINKDWNDRST V_ENABLELINKDWNDRST(1U)
+
+#define S_ENABLELINKDOWNRST 20
+#define V_ENABLELINKDOWNRST(x) ((x) << S_ENABLELINKDOWNRST)
+#define F_ENABLELINKDOWNRST V_ENABLELINKDOWNRST(1U)
+
+#define S_PCIE_CLIDECEN 16
+#define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
+#define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U)
+
+#define S_CRSTWRMMODE 0
+#define V_CRSTWRMMODE(x) ((x) << S_CRSTWRMMODE)
+#define F_CRSTWRMMODE V_CRSTWRMMODE(1U)
+
+#define A_PCIE_MODE 0x8c
+
+#define S_NUMFSTTRNSEQRX 10
+#define M_NUMFSTTRNSEQRX 0xff
+#define V_NUMFSTTRNSEQRX(x) ((x) << S_NUMFSTTRNSEQRX)
+#define G_NUMFSTTRNSEQRX(x) (((x) >> S_NUMFSTTRNSEQRX) & M_NUMFSTTRNSEQRX)
+
+#define A_PCIE_PEX_CTRL0 0x98
+
+#define S_NUMFSTTRNSEQ 22
+#define M_NUMFSTTRNSEQ 0xff
+#define V_NUMFSTTRNSEQ(x) ((x) << S_NUMFSTTRNSEQ)
+#define G_NUMFSTTRNSEQ(x) (((x) >> S_NUMFSTTRNSEQ) & M_NUMFSTTRNSEQ)
+
+#define S_REPLAYLMT 2
+#define M_REPLAYLMT 0xfffff
+
+#define V_REPLAYLMT(x) ((x) << S_REPLAYLMT)
+
+#define A_PCIE_PEX_CTRL1 0x9c
+
+#define S_T3A_ACKLAT 0
+#define M_T3A_ACKLAT 0x7ff
+
+#define V_T3A_ACKLAT(x) ((x) << S_T3A_ACKLAT)
+
+#define S_ACKLAT 0
+#define M_ACKLAT 0x1fff
+
+#define V_ACKLAT(x) ((x) << S_ACKLAT)
+
+#define A_PCIE_PEX_ERR 0xa4
+
+#define A_T3DBG_GPIO_EN 0xd0
+
+#define S_GPIO11_OEN 27
+#define V_GPIO11_OEN(x) ((x) << S_GPIO11_OEN)
+#define F_GPIO11_OEN V_GPIO11_OEN(1U)
+
+#define S_GPIO10_OEN 26
+#define V_GPIO10_OEN(x) ((x) << S_GPIO10_OEN)
+#define F_GPIO10_OEN V_GPIO10_OEN(1U)
+
+#define S_GPIO7_OEN 23
+#define V_GPIO7_OEN(x) ((x) << S_GPIO7_OEN)
+#define F_GPIO7_OEN V_GPIO7_OEN(1U)
+
+#define S_GPIO6_OEN 22
+#define V_GPIO6_OEN(x) ((x) << S_GPIO6_OEN)
+#define F_GPIO6_OEN V_GPIO6_OEN(1U)
+
+#define S_GPIO5_OEN 21
+#define V_GPIO5_OEN(x) ((x) << S_GPIO5_OEN)
+#define F_GPIO5_OEN V_GPIO5_OEN(1U)
+
+#define S_GPIO4_OEN 20
+#define V_GPIO4_OEN(x) ((x) << S_GPIO4_OEN)
+#define F_GPIO4_OEN V_GPIO4_OEN(1U)
+
+#define S_GPIO2_OEN 18
+#define V_GPIO2_OEN(x) ((x) << S_GPIO2_OEN)
+#define F_GPIO2_OEN V_GPIO2_OEN(1U)
+
+#define S_GPIO1_OEN 17
+#define V_GPIO1_OEN(x) ((x) << S_GPIO1_OEN)
+#define F_GPIO1_OEN V_GPIO1_OEN(1U)
+
+#define S_GPIO0_OEN 16
+#define V_GPIO0_OEN(x) ((x) << S_GPIO0_OEN)
+#define F_GPIO0_OEN V_GPIO0_OEN(1U)
+
+#define S_GPIO10_OUT_VAL 10
+#define V_GPIO10_OUT_VAL(x) ((x) << S_GPIO10_OUT_VAL)
+#define F_GPIO10_OUT_VAL V_GPIO10_OUT_VAL(1U)
+
+#define S_GPIO7_OUT_VAL 7
+#define V_GPIO7_OUT_VAL(x) ((x) << S_GPIO7_OUT_VAL)
+#define F_GPIO7_OUT_VAL V_GPIO7_OUT_VAL(1U)
+
+#define S_GPIO6_OUT_VAL 6
+#define V_GPIO6_OUT_VAL(x) ((x) << S_GPIO6_OUT_VAL)
+#define F_GPIO6_OUT_VAL V_GPIO6_OUT_VAL(1U)
+
+#define S_GPIO5_OUT_VAL 5
+#define V_GPIO5_OUT_VAL(x) ((x) << S_GPIO5_OUT_VAL)
+#define F_GPIO5_OUT_VAL V_GPIO5_OUT_VAL(1U)
+
+#define S_GPIO4_OUT_VAL 4
+#define V_GPIO4_OUT_VAL(x) ((x) << S_GPIO4_OUT_VAL)
+#define F_GPIO4_OUT_VAL V_GPIO4_OUT_VAL(1U)
+
+#define S_GPIO2_OUT_VAL 2
+#define V_GPIO2_OUT_VAL(x) ((x) << S_GPIO2_OUT_VAL)
+#define F_GPIO2_OUT_VAL V_GPIO2_OUT_VAL(1U)
+
+#define S_GPIO1_OUT_VAL 1
+#define V_GPIO1_OUT_VAL(x) ((x) << S_GPIO1_OUT_VAL)
+#define F_GPIO1_OUT_VAL V_GPIO1_OUT_VAL(1U)
+
+#define S_GPIO0_OUT_VAL 0
+#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL)
+#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U)
+
+#define A_T3DBG_INT_ENABLE 0xd8
+
+#define S_GPIO11 11
+#define V_GPIO11(x) ((x) << S_GPIO11)
+#define F_GPIO11 V_GPIO11(1U)
+
+#define S_GPIO10 10
+#define V_GPIO10(x) ((x) << S_GPIO10)
+#define F_GPIO10 V_GPIO10(1U)
+
+#define S_GPIO9 9
+#define V_GPIO9(x) ((x) << S_GPIO9)
+#define F_GPIO9 V_GPIO9(1U)
+
+#define S_GPIO7 7
+#define V_GPIO7(x) ((x) << S_GPIO7)
+#define F_GPIO7 V_GPIO7(1U)
+
+#define S_GPIO6 6
+#define V_GPIO6(x) ((x) << S_GPIO6)
+#define F_GPIO6 V_GPIO6(1U)
+
+#define S_GPIO5 5
+#define V_GPIO5(x) ((x) << S_GPIO5)
+#define F_GPIO5 V_GPIO5(1U)
+
+#define S_GPIO4 4
+#define V_GPIO4(x) ((x) << S_GPIO4)
+#define F_GPIO4 V_GPIO4(1U)
+
+#define S_GPIO3 3
+#define V_GPIO3(x) ((x) << S_GPIO3)
+#define F_GPIO3 V_GPIO3(1U)
+
+#define S_GPIO2 2
+#define V_GPIO2(x) ((x) << S_GPIO2)
+#define F_GPIO2 V_GPIO2(1U)
+
+#define S_GPIO1 1
+#define V_GPIO1(x) ((x) << S_GPIO1)
+#define F_GPIO1 V_GPIO1(1U)
+
+#define S_GPIO0 0
+#define V_GPIO0(x) ((x) << S_GPIO0)
+#define F_GPIO0 V_GPIO0(1U)
+
+#define A_T3DBG_INT_CAUSE 0xdc
+
+#define A_T3DBG_GPIO_ACT_LOW 0xf0
+
+#define MC7_PMRX_BASE_ADDR 0x100
+
+#define A_MC7_CFG 0x100
+
+#define S_IFEN 13
+#define V_IFEN(x) ((x) << S_IFEN)
+#define F_IFEN V_IFEN(1U)
+
+#define S_TERM150 11
+#define V_TERM150(x) ((x) << S_TERM150)
+#define F_TERM150 V_TERM150(1U)
+
+#define S_SLOW 10
+#define V_SLOW(x) ((x) << S_SLOW)
+#define F_SLOW V_SLOW(1U)
+
+#define S_WIDTH 8
+#define M_WIDTH 0x3
+#define V_WIDTH(x) ((x) << S_WIDTH)
+#define G_WIDTH(x) (((x) >> S_WIDTH) & M_WIDTH)
+
+#define S_BKS 6
+#define V_BKS(x) ((x) << S_BKS)
+#define F_BKS V_BKS(1U)
+
+#define S_ORG 5
+#define V_ORG(x) ((x) << S_ORG)
+#define F_ORG V_ORG(1U)
+
+#define S_DEN 2
+#define M_DEN 0x7
+#define V_DEN(x) ((x) << S_DEN)
+#define G_DEN(x) (((x) >> S_DEN) & M_DEN)
+
+#define S_RDY 1
+#define V_RDY(x) ((x) << S_RDY)
+#define F_RDY V_RDY(1U)
+
+#define S_CLKEN 0
+#define V_CLKEN(x) ((x) << S_CLKEN)
+#define F_CLKEN V_CLKEN(1U)
+
+#define A_MC7_MODE 0x104
+
+#define S_BUSY 31
+#define V_BUSY(x) ((x) << S_BUSY)
+#define F_BUSY V_BUSY(1U)
+
+#define S_BUSY 31
+#define V_BUSY(x) ((x) << S_BUSY)
+#define F_BUSY V_BUSY(1U)
+
+#define A_MC7_EXT_MODE1 0x108
+
+#define A_MC7_EXT_MODE2 0x10c
+
+#define A_MC7_EXT_MODE3 0x110
+
+#define A_MC7_PRE 0x114
+
+#define A_MC7_REF 0x118
+
+#define S_PREREFDIV 1
+#define M_PREREFDIV 0x3fff
+#define V_PREREFDIV(x) ((x) << S_PREREFDIV)
+
+#define S_PERREFEN 0
+#define V_PERREFEN(x) ((x) << S_PERREFEN)
+#define F_PERREFEN V_PERREFEN(1U)
+
+#define A_MC7_DLL 0x11c
+
+#define S_DLLENB 1
+#define V_DLLENB(x) ((x) << S_DLLENB)
+#define F_DLLENB V_DLLENB(1U)
+
+#define S_DLLRST 0
+#define V_DLLRST(x) ((x) << S_DLLRST)
+#define F_DLLRST V_DLLRST(1U)
+
+#define A_MC7_PARM 0x120
+
+#define S_ACTTOPREDLY 26
+#define M_ACTTOPREDLY 0xf
+#define V_ACTTOPREDLY(x) ((x) << S_ACTTOPREDLY)
+
+#define S_ACTTORDWRDLY 23
+#define M_ACTTORDWRDLY 0x7
+#define V_ACTTORDWRDLY(x) ((x) << S_ACTTORDWRDLY)
+
+#define S_PRECYC 20
+#define M_PRECYC 0x7
+#define V_PRECYC(x) ((x) << S_PRECYC)
+
+#define S_REFCYC 13
+#define M_REFCYC 0x7f
+#define V_REFCYC(x) ((x) << S_REFCYC)
+
+#define S_BKCYC 8
+#define M_BKCYC 0x1f
+#define V_BKCYC(x) ((x) << S_BKCYC)
+
+#define S_WRTORDDLY 4
+#define M_WRTORDDLY 0xf
+#define V_WRTORDDLY(x) ((x) << S_WRTORDDLY)
+
+#define S_RDTOWRDLY 0
+#define M_RDTOWRDLY 0xf
+#define V_RDTOWRDLY(x) ((x) << S_RDTOWRDLY)
+
+#define A_MC7_CAL 0x128
+
+#define S_BUSY 31
+#define V_BUSY(x) ((x) << S_BUSY)
+#define F_BUSY V_BUSY(1U)
+
+#define S_BUSY 31
+#define V_BUSY(x) ((x) << S_BUSY)
+#define F_BUSY V_BUSY(1U)
+
+#define S_CAL_FAULT 30
+#define V_CAL_FAULT(x) ((x) << S_CAL_FAULT)
+#define F_CAL_FAULT V_CAL_FAULT(1U)
+
+#define S_SGL_CAL_EN 20
+#define V_SGL_CAL_EN(x) ((x) << S_SGL_CAL_EN)
+#define F_SGL_CAL_EN V_SGL_CAL_EN(1U)
+
+#define A_MC7_ERR_ADDR 0x12c
+
+#define A_MC7_ECC 0x130
+
+#define S_ECCCHKEN 1
+#define V_ECCCHKEN(x) ((x) << S_ECCCHKEN)
+#define F_ECCCHKEN V_ECCCHKEN(1U)
+
+#define S_ECCGENEN 0
+#define V_ECCGENEN(x) ((x) << S_ECCGENEN)
+#define F_ECCGENEN V_ECCGENEN(1U)
+
+#define A_MC7_CE_ADDR 0x134
+
+#define A_MC7_CE_DATA0 0x138
+
+#define A_MC7_CE_DATA1 0x13c
+
+#define A_MC7_CE_DATA2 0x140
+
+#define S_DATA 0
+#define M_DATA 0xff
+
+#define G_DATA(x) (((x) >> S_DATA) & M_DATA)
+
+#define A_MC7_UE_ADDR 0x144
+
+#define A_MC7_UE_DATA0 0x148
+
+#define A_MC7_UE_DATA1 0x14c
+
+#define A_MC7_UE_DATA2 0x150
+
+#define A_MC7_BD_ADDR 0x154
+
+#define S_ADDR 3
+
+#define M_ADDR 0x1fffffff
+
+#define A_MC7_BD_DATA0 0x158
+
+#define A_MC7_BD_DATA1 0x15c
+
+#define A_MC7_BD_OP 0x164
+
+#define S_OP 0
+
+#define V_OP(x) ((x) << S_OP)
+#define F_OP V_OP(1U)
+
+#define F_OP V_OP(1U)
+#define A_SF_OP 0x6dc
+
+#define A_MC7_BIST_ADDR_BEG 0x168
+
+#define A_MC7_BIST_ADDR_END 0x16c
+
+#define A_MC7_BIST_DATA 0x170
+
+#define A_MC7_BIST_OP 0x174
+
+#define S_CONT 3
+#define V_CONT(x) ((x) << S_CONT)
+#define F_CONT V_CONT(1U)
+
+#define F_CONT V_CONT(1U)
+
+#define A_MC7_INT_ENABLE 0x178
+
+#define S_AE 17
+#define V_AE(x) ((x) << S_AE)
+#define F_AE V_AE(1U)
+
+#define S_PE 2
+#define M_PE 0x7fff
+
+#define V_PE(x) ((x) << S_PE)
+
+#define G_PE(x) (((x) >> S_PE) & M_PE)
+
+#define S_UE 1
+#define V_UE(x) ((x) << S_UE)
+#define F_UE V_UE(1U)
+
+#define S_CE 0
+#define V_CE(x) ((x) << S_CE)
+#define F_CE V_CE(1U)
+
+#define A_MC7_INT_CAUSE 0x17c
+
+#define MC7_PMTX_BASE_ADDR 0x180
+
+#define MC7_CM_BASE_ADDR 0x200
+
+#define A_CIM_BOOT_CFG 0x280
+
+#define S_BOOTADDR 2
+#define M_BOOTADDR 0x3fffffff
+#define V_BOOTADDR(x) ((x) << S_BOOTADDR)
+
+#define A_CIM_SDRAM_BASE_ADDR 0x28c
+
+#define A_CIM_SDRAM_ADDR_SIZE 0x290
+
+#define A_CIM_HOST_INT_ENABLE 0x298
+
+#define S_DTAGPARERR 28
+#define V_DTAGPARERR(x) ((x) << S_DTAGPARERR)
+#define F_DTAGPARERR V_DTAGPARERR(1U)
+
+#define S_ITAGPARERR 27
+#define V_ITAGPARERR(x) ((x) << S_ITAGPARERR)
+#define F_ITAGPARERR V_ITAGPARERR(1U)
+
+#define S_IBQTPPARERR 26
+#define V_IBQTPPARERR(x) ((x) << S_IBQTPPARERR)
+#define F_IBQTPPARERR V_IBQTPPARERR(1U)
+
+#define S_IBQULPPARERR 25
+#define V_IBQULPPARERR(x) ((x) << S_IBQULPPARERR)
+#define F_IBQULPPARERR V_IBQULPPARERR(1U)
+
+#define S_IBQSGEHIPARERR 24
+#define V_IBQSGEHIPARERR(x) ((x) << S_IBQSGEHIPARERR)
+#define F_IBQSGEHIPARERR V_IBQSGEHIPARERR(1U)
+
+#define S_IBQSGELOPARERR 23
+#define V_IBQSGELOPARERR(x) ((x) << S_IBQSGELOPARERR)
+#define F_IBQSGELOPARERR V_IBQSGELOPARERR(1U)
+
+#define S_OBQULPLOPARERR 22
+#define V_OBQULPLOPARERR(x) ((x) << S_OBQULPLOPARERR)
+#define F_OBQULPLOPARERR V_OBQULPLOPARERR(1U)
+
+#define S_OBQULPHIPARERR 21
+#define V_OBQULPHIPARERR(x) ((x) << S_OBQULPHIPARERR)
+#define F_OBQULPHIPARERR V_OBQULPHIPARERR(1U)
+
+#define S_OBQSGEPARERR 20
+#define V_OBQSGEPARERR(x) ((x) << S_OBQSGEPARERR)
+#define F_OBQSGEPARERR V_OBQSGEPARERR(1U)
+
+#define S_DCACHEPARERR 19
+#define V_DCACHEPARERR(x) ((x) << S_DCACHEPARERR)
+#define F_DCACHEPARERR V_DCACHEPARERR(1U)
+
+#define S_ICACHEPARERR 18
+#define V_ICACHEPARERR(x) ((x) << S_ICACHEPARERR)
+#define F_ICACHEPARERR V_ICACHEPARERR(1U)
+
+#define S_DRAMPARERR 17
+#define V_DRAMPARERR(x) ((x) << S_DRAMPARERR)
+#define F_DRAMPARERR V_DRAMPARERR(1U)
+
+#define A_CIM_HOST_INT_CAUSE 0x29c
+
+#define S_BLKWRPLINT 12
+#define V_BLKWRPLINT(x) ((x) << S_BLKWRPLINT)
+#define F_BLKWRPLINT V_BLKWRPLINT(1U)
+
+#define S_BLKRDPLINT 11
+#define V_BLKRDPLINT(x) ((x) << S_BLKRDPLINT)
+#define F_BLKRDPLINT V_BLKRDPLINT(1U)
+
+#define S_BLKWRCTLINT 10
+#define V_BLKWRCTLINT(x) ((x) << S_BLKWRCTLINT)
+#define F_BLKWRCTLINT V_BLKWRCTLINT(1U)
+
+#define S_BLKRDCTLINT 9
+#define V_BLKRDCTLINT(x) ((x) << S_BLKRDCTLINT)
+#define F_BLKRDCTLINT V_BLKRDCTLINT(1U)
+
+#define S_BLKWRFLASHINT 8
+#define V_BLKWRFLASHINT(x) ((x) << S_BLKWRFLASHINT)
+#define F_BLKWRFLASHINT V_BLKWRFLASHINT(1U)
+
+#define S_BLKRDFLASHINT 7
+#define V_BLKRDFLASHINT(x) ((x) << S_BLKRDFLASHINT)
+#define F_BLKRDFLASHINT V_BLKRDFLASHINT(1U)
+
+#define S_SGLWRFLASHINT 6
+#define V_SGLWRFLASHINT(x) ((x) << S_SGLWRFLASHINT)
+#define F_SGLWRFLASHINT V_SGLWRFLASHINT(1U)
+
+#define S_WRBLKFLASHINT 5
+#define V_WRBLKFLASHINT(x) ((x) << S_WRBLKFLASHINT)
+#define F_WRBLKFLASHINT V_WRBLKFLASHINT(1U)
+
+#define S_BLKWRBOOTINT 4
+#define V_BLKWRBOOTINT(x) ((x) << S_BLKWRBOOTINT)
+#define F_BLKWRBOOTINT V_BLKWRBOOTINT(1U)
+
+#define S_FLASHRANGEINT 2
+#define V_FLASHRANGEINT(x) ((x) << S_FLASHRANGEINT)
+#define F_FLASHRANGEINT V_FLASHRANGEINT(1U)
+
+#define S_SDRAMRANGEINT 1
+#define V_SDRAMRANGEINT(x) ((x) << S_SDRAMRANGEINT)
+#define F_SDRAMRANGEINT V_SDRAMRANGEINT(1U)
+
+#define S_RSVDSPACEINT 0
+#define V_RSVDSPACEINT(x) ((x) << S_RSVDSPACEINT)
+#define F_RSVDSPACEINT V_RSVDSPACEINT(1U)
+
+#define A_CIM_HOST_ACC_CTRL 0x2b0
+
+#define S_HOSTBUSY 17
+#define V_HOSTBUSY(x) ((x) << S_HOSTBUSY)
+#define F_HOSTBUSY V_HOSTBUSY(1U)
+
+#define A_CIM_HOST_ACC_DATA 0x2b4
+
+#define A_CIM_IBQ_DBG_CFG 0x2c0
+
+#define S_IBQDBGADDR 16
+#define M_IBQDBGADDR 0x1ff
+#define V_IBQDBGADDR(x) ((x) << S_IBQDBGADDR)
+#define G_IBQDBGADDR(x) (((x) >> S_IBQDBGADDR) & M_IBQDBGADDR)
+
+#define S_IBQDBGQID 3
+#define M_IBQDBGQID 0x3
+#define V_IBQDBGQID(x) ((x) << S_IBQDBGQID)
+#define G_IBQDBGQID(x) (((x) >> S_IBQDBGQID) & M_IBQDBGQID)
+
+#define S_IBQDBGWR 2
+#define V_IBQDBGWR(x) ((x) << S_IBQDBGWR)
+#define F_IBQDBGWR V_IBQDBGWR(1U)
+
+#define S_IBQDBGBUSY 1
+#define V_IBQDBGBUSY(x) ((x) << S_IBQDBGBUSY)
+#define F_IBQDBGBUSY V_IBQDBGBUSY(1U)
+
+#define S_IBQDBGEN 0
+#define V_IBQDBGEN(x) ((x) << S_IBQDBGEN)
+#define F_IBQDBGEN V_IBQDBGEN(1U)
+
+#define A_CIM_IBQ_DBG_DATA 0x2c8
+
+#define A_TP_IN_CONFIG 0x300
+
+#define S_RXFBARBPRIO 25
+#define V_RXFBARBPRIO(x) ((x) << S_RXFBARBPRIO)
+#define F_RXFBARBPRIO V_RXFBARBPRIO(1U)
+
+#define S_TXFBARBPRIO 24
+#define V_TXFBARBPRIO(x) ((x) << S_TXFBARBPRIO)
+#define F_TXFBARBPRIO V_TXFBARBPRIO(1U)
+
+#define S_NICMODE 14
+#define V_NICMODE(x) ((x) << S_NICMODE)
+#define F_NICMODE V_NICMODE(1U)
+
+#define F_NICMODE V_NICMODE(1U)
+
+#define S_IPV6ENABLE 15
+#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
+#define F_IPV6ENABLE V_IPV6ENABLE(1U)
+
+#define A_TP_OUT_CONFIG 0x304
+
+#define S_VLANEXTRACTIONENABLE 12
+
+#define A_TP_GLOBAL_CONFIG 0x308
+
+#define S_TXPACINGENABLE 24
+#define V_TXPACINGENABLE(x) ((x) << S_TXPACINGENABLE)
+#define F_TXPACINGENABLE V_TXPACINGENABLE(1U)
+
+#define S_PATHMTU 15
+#define V_PATHMTU(x) ((x) << S_PATHMTU)
+#define F_PATHMTU V_PATHMTU(1U)
+
+#define S_IPCHECKSUMOFFLOAD 13
+#define V_IPCHECKSUMOFFLOAD(x) ((x) << S_IPCHECKSUMOFFLOAD)
+#define F_IPCHECKSUMOFFLOAD V_IPCHECKSUMOFFLOAD(1U)
+
+#define S_UDPCHECKSUMOFFLOAD 12
+#define V_UDPCHECKSUMOFFLOAD(x) ((x) << S_UDPCHECKSUMOFFLOAD)
+#define F_UDPCHECKSUMOFFLOAD V_UDPCHECKSUMOFFLOAD(1U)
+
+#define S_TCPCHECKSUMOFFLOAD 11
+#define V_TCPCHECKSUMOFFLOAD(x) ((x) << S_TCPCHECKSUMOFFLOAD)
+#define F_TCPCHECKSUMOFFLOAD V_TCPCHECKSUMOFFLOAD(1U)
+
+#define S_IPTTL 0
+#define M_IPTTL 0xff
+#define V_IPTTL(x) ((x) << S_IPTTL)
+
+#define A_TP_CMM_MM_BASE 0x314
+
+#define A_TP_CMM_TIMER_BASE 0x318
+
+#define S_CMTIMERMAXNUM 28
+#define M_CMTIMERMAXNUM 0x3
+#define V_CMTIMERMAXNUM(x) ((x) << S_CMTIMERMAXNUM)
+
+#define A_TP_PMM_SIZE 0x31c
+
+#define A_TP_PMM_TX_BASE 0x320
+
+#define A_TP_PMM_RX_BASE 0x328
+
+#define A_TP_PMM_RX_PAGE_SIZE 0x32c
+
+#define A_TP_PMM_RX_MAX_PAGE 0x330
+
+#define A_TP_PMM_TX_PAGE_SIZE 0x334
+
+#define A_TP_PMM_TX_MAX_PAGE 0x338
+
+#define A_TP_TCP_OPTIONS 0x340
+
+#define S_MTUDEFAULT 16
+#define M_MTUDEFAULT 0xffff
+#define V_MTUDEFAULT(x) ((x) << S_MTUDEFAULT)
+
+#define S_MTUENABLE 10
+#define V_MTUENABLE(x) ((x) << S_MTUENABLE)
+#define F_MTUENABLE V_MTUENABLE(1U)
+
+#define S_SACKRX 8
+#define V_SACKRX(x) ((x) << S_SACKRX)
+#define F_SACKRX V_SACKRX(1U)
+
+#define S_SACKMODE 4
+
+#define M_SACKMODE 0x3
+
+#define V_SACKMODE(x) ((x) << S_SACKMODE)
+
+#define S_WINDOWSCALEMODE 2
+#define M_WINDOWSCALEMODE 0x3
+#define V_WINDOWSCALEMODE(x) ((x) << S_WINDOWSCALEMODE)
+
+#define S_TIMESTAMPSMODE 0
+
+#define M_TIMESTAMPSMODE 0x3
+
+#define V_TIMESTAMPSMODE(x) ((x) << S_TIMESTAMPSMODE)
+
+#define A_TP_DACK_CONFIG 0x344
+
+#define S_AUTOSTATE3 30
+#define M_AUTOSTATE3 0x3
+#define V_AUTOSTATE3(x) ((x) << S_AUTOSTATE3)
+
+#define S_AUTOSTATE2 28
+#define M_AUTOSTATE2 0x3
+#define V_AUTOSTATE2(x) ((x) << S_AUTOSTATE2)
+
+#define S_AUTOSTATE1 26
+#define M_AUTOSTATE1 0x3
+#define V_AUTOSTATE1(x) ((x) << S_AUTOSTATE1)
+
+#define S_BYTETHRESHOLD 5
+#define M_BYTETHRESHOLD 0xfffff
+#define V_BYTETHRESHOLD(x) ((x) << S_BYTETHRESHOLD)
+
+#define S_MSSTHRESHOLD 3
+#define M_MSSTHRESHOLD 0x3
+#define V_MSSTHRESHOLD(x) ((x) << S_MSSTHRESHOLD)
+
+#define S_AUTOCAREFUL 2
+#define V_AUTOCAREFUL(x) ((x) << S_AUTOCAREFUL)
+#define F_AUTOCAREFUL V_AUTOCAREFUL(1U)
+
+#define S_AUTOENABLE 1
+#define V_AUTOENABLE(x) ((x) << S_AUTOENABLE)
+#define F_AUTOENABLE V_AUTOENABLE(1U)
+
+#define S_DACK_MODE 0
+#define V_DACK_MODE(x) ((x) << S_DACK_MODE)
+#define F_DACK_MODE V_DACK_MODE(1U)
+
+#define A_TP_PC_CONFIG 0x348
+
+#define S_TXTOSQUEUEMAPMODE 26
+#define V_TXTOSQUEUEMAPMODE(x) ((x) << S_TXTOSQUEUEMAPMODE)
+#define F_TXTOSQUEUEMAPMODE V_TXTOSQUEUEMAPMODE(1U)
+
+#define S_ENABLEEPCMDAFULL 23
+#define V_ENABLEEPCMDAFULL(x) ((x) << S_ENABLEEPCMDAFULL)
+#define F_ENABLEEPCMDAFULL V_ENABLEEPCMDAFULL(1U)
+
+#define S_MODULATEUNIONMODE 22
+#define V_MODULATEUNIONMODE(x) ((x) << S_MODULATEUNIONMODE)
+#define F_MODULATEUNIONMODE V_MODULATEUNIONMODE(1U)
+
+#define S_TXDEFERENABLE 20
+#define V_TXDEFERENABLE(x) ((x) << S_TXDEFERENABLE)
+#define F_TXDEFERENABLE V_TXDEFERENABLE(1U)
+
+#define S_RXCONGESTIONMODE 19
+#define V_RXCONGESTIONMODE(x) ((x) << S_RXCONGESTIONMODE)
+#define F_RXCONGESTIONMODE V_RXCONGESTIONMODE(1U)
+
+#define S_HEARBEATDACK 16
+#define V_HEARBEATDACK(x) ((x) << S_HEARBEATDACK)
+#define F_HEARBEATDACK V_HEARBEATDACK(1U)
+
+#define S_TXCONGESTIONMODE 15
+#define V_TXCONGESTIONMODE(x) ((x) << S_TXCONGESTIONMODE)
+#define F_TXCONGESTIONMODE V_TXCONGESTIONMODE(1U)
+
+#define S_ENABLEOCSPIFULL 30
+#define V_ENABLEOCSPIFULL(x) ((x) << S_ENABLEOCSPIFULL)
+#define F_ENABLEOCSPIFULL V_ENABLEOCSPIFULL(1U)
+
+#define S_LOCKTID 28
+#define V_LOCKTID(x) ((x) << S_LOCKTID)
+#define F_LOCKTID V_LOCKTID(1U)
+
+#define S_TABLELATENCYDELTA 0
+#define M_TABLELATENCYDELTA 0xf
+#define V_TABLELATENCYDELTA(x) ((x) << S_TABLELATENCYDELTA)
+#define G_TABLELATENCYDELTA(x) \
+ (((x) >> S_TABLELATENCYDELTA) & M_TABLELATENCYDELTA)
+
+#define A_TP_PC_CONFIG2 0x34c
+
+#define S_DISBLEDAPARBIT0 15
+#define V_DISBLEDAPARBIT0(x) ((x) << S_DISBLEDAPARBIT0)
+#define F_DISBLEDAPARBIT0 V_DISBLEDAPARBIT0(1U)
+
+#define S_ENABLEARPMISS 13
+#define V_ENABLEARPMISS(x) ((x) << S_ENABLEARPMISS)
+#define F_ENABLEARPMISS V_ENABLEARPMISS(1U)
+
+#define S_ENABLENONOFDTNLSYN 12
+#define V_ENABLENONOFDTNLSYN(x) ((x) << S_ENABLENONOFDTNLSYN)
+#define F_ENABLENONOFDTNLSYN V_ENABLENONOFDTNLSYN(1U)
+
+#define S_ENABLEIPV6RSS 11
+#define V_ENABLEIPV6RSS(x) ((x) << S_ENABLEIPV6RSS)
+#define F_ENABLEIPV6RSS V_ENABLEIPV6RSS(1U)
+
+#define S_CHDRAFULL 4
+#define V_CHDRAFULL(x) ((x) << S_CHDRAFULL)
+#define F_CHDRAFULL V_CHDRAFULL(1U)
+
+#define A_TP_TCP_BACKOFF_REG0 0x350
+
+#define A_TP_TCP_BACKOFF_REG1 0x354
+
+#define A_TP_TCP_BACKOFF_REG2 0x358
+
+#define A_TP_TCP_BACKOFF_REG3 0x35c
+
+#define A_TP_PARA_REG2 0x368
+
+#define S_MAXRXDATA 16
+#define M_MAXRXDATA 0xffff
+#define V_MAXRXDATA(x) ((x) << S_MAXRXDATA)
+
+#define S_RXCOALESCESIZE 0
+#define M_RXCOALESCESIZE 0xffff
+#define V_RXCOALESCESIZE(x) ((x) << S_RXCOALESCESIZE)
+
+#define A_TP_PARA_REG3 0x36c
+
+#define S_TXDATAACKIDX 16
+#define M_TXDATAACKIDX 0xf
+
+#define V_TXDATAACKIDX(x) ((x) << S_TXDATAACKIDX)
+
+#define S_TXPACEAUTOSTRICT 10
+#define V_TXPACEAUTOSTRICT(x) ((x) << S_TXPACEAUTOSTRICT)
+#define F_TXPACEAUTOSTRICT V_TXPACEAUTOSTRICT(1U)
+
+#define S_TXPACEFIXED 9
+#define V_TXPACEFIXED(x) ((x) << S_TXPACEFIXED)
+#define F_TXPACEFIXED V_TXPACEFIXED(1U)
+
+#define S_TXPACEAUTO 8
+#define V_TXPACEAUTO(x) ((x) << S_TXPACEAUTO)
+#define F_TXPACEAUTO V_TXPACEAUTO(1U)
+
+#define S_RXCOALESCEENABLE 1
+#define V_RXCOALESCEENABLE(x) ((x) << S_RXCOALESCEENABLE)
+#define F_RXCOALESCEENABLE V_RXCOALESCEENABLE(1U)
+
+#define S_RXCOALESCEPSHEN 0
+#define V_RXCOALESCEPSHEN(x) ((x) << S_RXCOALESCEPSHEN)
+#define F_RXCOALESCEPSHEN V_RXCOALESCEPSHEN(1U)
+
+#define A_TP_PARA_REG4 0x370
+
+#define A_TP_PARA_REG5 0x374
+
+#define S_RXDDPOFFINIT 3
+#define V_RXDDPOFFINIT(x) ((x) << S_RXDDPOFFINIT)
+#define F_RXDDPOFFINIT V_RXDDPOFFINIT(1U)
+
+#define A_TP_PARA_REG6 0x378
+
+#define S_T3A_ENABLEESND 13
+#define V_T3A_ENABLEESND(x) ((x) << S_T3A_ENABLEESND)
+#define F_T3A_ENABLEESND V_T3A_ENABLEESND(1U)
+
+#define S_ENABLEESND 11
+#define V_ENABLEESND(x) ((x) << S_ENABLEESND)
+#define F_ENABLEESND V_ENABLEESND(1U)
+
+#define A_TP_PARA_REG7 0x37c
+
+#define S_PMMAXXFERLEN1 16
+#define M_PMMAXXFERLEN1 0xffff
+#define V_PMMAXXFERLEN1(x) ((x) << S_PMMAXXFERLEN1)
+
+#define S_PMMAXXFERLEN0 0
+#define M_PMMAXXFERLEN0 0xffff
+#define V_PMMAXXFERLEN0(x) ((x) << S_PMMAXXFERLEN0)
+
+#define A_TP_TIMER_RESOLUTION 0x390
+
+#define S_TIMERRESOLUTION 16
+#define M_TIMERRESOLUTION 0xff
+#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION)
+
+#define S_TIMESTAMPRESOLUTION 8
+#define M_TIMESTAMPRESOLUTION 0xff
+#define V_TIMESTAMPRESOLUTION(x) ((x) << S_TIMESTAMPRESOLUTION)
+
+#define S_DELAYEDACKRESOLUTION 0
+#define M_DELAYEDACKRESOLUTION 0xff
+#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION)
+
+#define A_TP_MSL 0x394
+
+#define A_TP_RXT_MIN 0x398
+
+#define A_TP_RXT_MAX 0x39c
+
+#define A_TP_PERS_MIN 0x3a0
+
+#define A_TP_PERS_MAX 0x3a4
+
+#define A_TP_KEEP_IDLE 0x3a8
+
+#define A_TP_KEEP_INTVL 0x3ac
+
+#define A_TP_INIT_SRTT 0x3b0
+
+#define A_TP_DACK_TIMER 0x3b4
+
+#define A_TP_FINWAIT2_TIMER 0x3b8
+
+#define A_TP_SHIFT_CNT 0x3c0
+
+#define S_SYNSHIFTMAX 24
+
+#define M_SYNSHIFTMAX 0xff
+
+#define V_SYNSHIFTMAX(x) ((x) << S_SYNSHIFTMAX)
+
+#define S_RXTSHIFTMAXR1 20
+
+#define M_RXTSHIFTMAXR1 0xf
+
+#define V_RXTSHIFTMAXR1(x) ((x) << S_RXTSHIFTMAXR1)
+
+#define S_RXTSHIFTMAXR2 16
+
+#define M_RXTSHIFTMAXR2 0xf
+
+#define V_RXTSHIFTMAXR2(x) ((x) << S_RXTSHIFTMAXR2)
+
+#define S_PERSHIFTBACKOFFMAX 12
+#define M_PERSHIFTBACKOFFMAX 0xf
+#define V_PERSHIFTBACKOFFMAX(x) ((x) << S_PERSHIFTBACKOFFMAX)
+
+#define S_PERSHIFTMAX 8
+#define M_PERSHIFTMAX 0xf
+#define V_PERSHIFTMAX(x) ((x) << S_PERSHIFTMAX)
+
+#define S_KEEPALIVEMAX 0
+
+#define M_KEEPALIVEMAX 0xff
+
+#define V_KEEPALIVEMAX(x) ((x) << S_KEEPALIVEMAX)
+
+#define A_TP_MTU_PORT_TABLE 0x3d0
+
+#define A_TP_CCTRL_TABLE 0x3dc
+
+#define A_TP_MTU_TABLE 0x3e4
+
+#define A_TP_RSS_MAP_TABLE 0x3e8
+
+#define A_TP_RSS_LKP_TABLE 0x3ec
+
+#define A_TP_RSS_CONFIG 0x3f0
+
+#define S_TNL4TUPEN 29
+#define V_TNL4TUPEN(x) ((x) << S_TNL4TUPEN)
+#define F_TNL4TUPEN V_TNL4TUPEN(1U)
+
+#define S_TNL2TUPEN 28
+#define V_TNL2TUPEN(x) ((x) << S_TNL2TUPEN)
+#define F_TNL2TUPEN V_TNL2TUPEN(1U)
+
+#define S_TNLPRTEN 26
+#define V_TNLPRTEN(x) ((x) << S_TNLPRTEN)
+#define F_TNLPRTEN V_TNLPRTEN(1U)
+
+#define S_TNLMAPEN 25
+#define V_TNLMAPEN(x) ((x) << S_TNLMAPEN)
+#define F_TNLMAPEN V_TNLMAPEN(1U)
+
+#define S_TNLLKPEN 24
+#define V_TNLLKPEN(x) ((x) << S_TNLLKPEN)
+#define F_TNLLKPEN V_TNLLKPEN(1U)
+
+#define S_RRCPLMAPEN 7
+#define V_RRCPLMAPEN(x) ((x) << S_RRCPLMAPEN)
+#define F_RRCPLMAPEN V_RRCPLMAPEN(1U)
+
+#define S_RRCPLCPUSIZE 4
+#define M_RRCPLCPUSIZE 0x7
+#define V_RRCPLCPUSIZE(x) ((x) << S_RRCPLCPUSIZE)
+
+#define S_RQFEEDBACKENABLE 3
+#define V_RQFEEDBACKENABLE(x) ((x) << S_RQFEEDBACKENABLE)
+#define F_RQFEEDBACKENABLE V_RQFEEDBACKENABLE(1U)
+
+#define S_HASHTOEPLITZ 2
+#define V_HASHTOEPLITZ(x) ((x) << S_HASHTOEPLITZ)
+#define F_HASHTOEPLITZ V_HASHTOEPLITZ(1U)
+
+#define S_DISABLE 0
+
+#define A_TP_TM_PIO_ADDR 0x418
+
+#define A_TP_TM_PIO_DATA 0x41c
+
+#define A_TP_TX_MOD_QUE_TABLE 0x420
+
+#define A_TP_TX_RESOURCE_LIMIT 0x424
+
+#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x428
+
+#define S_TX_MOD_QUEUE_REQ_MAP 0
+#define M_TX_MOD_QUEUE_REQ_MAP 0xff
+#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
+
+#define A_TP_TX_MOD_QUEUE_WEIGHT1 0x42c
+
+#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x430
+
+#define A_TP_MOD_CHANNEL_WEIGHT 0x434
+
+#define A_TP_MOD_RATE_LIMIT 0x438
+
+#define A_TP_PIO_ADDR 0x440
+
+#define A_TP_PIO_DATA 0x444
+
+#define A_TP_RESET 0x44c
+
+#define S_FLSTINITENABLE 1
+#define V_FLSTINITENABLE(x) ((x) << S_FLSTINITENABLE)
+#define F_FLSTINITENABLE V_FLSTINITENABLE(1U)
+
+#define S_TPRESET 0
+#define V_TPRESET(x) ((x) << S_TPRESET)
+#define F_TPRESET V_TPRESET(1U)
+
+#define A_TP_CMM_MM_RX_FLST_BASE 0x460
+
+#define A_TP_CMM_MM_TX_FLST_BASE 0x464
+
+#define A_TP_CMM_MM_PS_FLST_BASE 0x468
+
+#define A_TP_MIB_INDEX 0x450
+
+#define A_TP_MIB_RDATA 0x454
+
+#define A_TP_CMM_MM_MAX_PSTRUCT 0x46c
+
+#define A_TP_INT_ENABLE 0x470
+
+#define S_FLMTXFLSTEMPTY 30
+#define V_FLMTXFLSTEMPTY(x) ((x) << S_FLMTXFLSTEMPTY)
+#define F_FLMTXFLSTEMPTY V_FLMTXFLSTEMPTY(1U)
+
+#define S_FLMRXFLSTEMPTY 29
+#define V_FLMRXFLSTEMPTY(x) ((x) << S_FLMRXFLSTEMPTY)
+#define F_FLMRXFLSTEMPTY V_FLMRXFLSTEMPTY(1U)
+
+#define S_ARPLUTPERR 26
+#define V_ARPLUTPERR(x) ((x) << S_ARPLUTPERR)
+#define F_ARPLUTPERR V_ARPLUTPERR(1U)
+
+#define S_CMCACHEPERR 24
+#define V_CMCACHEPERR(x) ((x) << S_CMCACHEPERR)
+#define F_CMCACHEPERR V_CMCACHEPERR(1U)
+
+#define A_TP_INT_CAUSE 0x474
+
+#define A_TP_TX_MOD_Q1_Q0_RATE_LIMIT 0x8
+
+#define A_TP_TX_DROP_CFG_CH0 0x12b
+
+#define A_TP_TX_DROP_MODE 0x12f
+
+#define A_TP_EGRESS_CONFIG 0x145
+
+#define S_REWRITEFORCETOSIZE 0
+#define V_REWRITEFORCETOSIZE(x) ((x) << S_REWRITEFORCETOSIZE)
+#define F_REWRITEFORCETOSIZE V_REWRITEFORCETOSIZE(1U)
+
+#define A_TP_TX_TRC_KEY0 0x20
+
+#define A_TP_RX_TRC_KEY0 0x120
+
+#define A_TP_TX_DROP_CNT_CH0 0x12d
+
+#define S_TXDROPCNTCH0RCVD 0
+#define M_TXDROPCNTCH0RCVD 0xffff
+#define V_TXDROPCNTCH0RCVD(x) ((x) << S_TXDROPCNTCH0RCVD)
+#define G_TXDROPCNTCH0RCVD(x) (((x) >> S_TXDROPCNTCH0RCVD) & \
+ M_TXDROPCNTCH0RCVD)
+
+#define A_TP_PROXY_FLOW_CNTL 0x4b0
+
+#define A_TP_EMBED_OP_FIELD0 0x4e8
+#define A_TP_EMBED_OP_FIELD1 0x4ec
+#define A_TP_EMBED_OP_FIELD2 0x4f0
+#define A_TP_EMBED_OP_FIELD3 0x4f4
+#define A_TP_EMBED_OP_FIELD4 0x4f8
+#define A_TP_EMBED_OP_FIELD5 0x4fc
+
+#define A_ULPRX_CTL 0x500
+
+#define S_ROUND_ROBIN 4
+#define V_ROUND_ROBIN(x) ((x) << S_ROUND_ROBIN)
+#define F_ROUND_ROBIN V_ROUND_ROBIN(1U)
+
+#define A_ULPRX_INT_ENABLE 0x504
+
+#define S_DATASELFRAMEERR0 7
+#define V_DATASELFRAMEERR0(x) ((x) << S_DATASELFRAMEERR0)
+#define F_DATASELFRAMEERR0 V_DATASELFRAMEERR0(1U)
+
+#define S_DATASELFRAMEERR1 6
+#define V_DATASELFRAMEERR1(x) ((x) << S_DATASELFRAMEERR1)
+#define F_DATASELFRAMEERR1 V_DATASELFRAMEERR1(1U)
+
+#define S_PCMDMUXPERR 5
+#define V_PCMDMUXPERR(x) ((x) << S_PCMDMUXPERR)
+#define F_PCMDMUXPERR V_PCMDMUXPERR(1U)
+
+#define S_ARBFPERR 4
+#define V_ARBFPERR(x) ((x) << S_ARBFPERR)
+#define F_ARBFPERR V_ARBFPERR(1U)
+
+#define S_ARBPF0PERR 3
+#define V_ARBPF0PERR(x) ((x) << S_ARBPF0PERR)
+#define F_ARBPF0PERR V_ARBPF0PERR(1U)
+
+#define S_ARBPF1PERR 2
+#define V_ARBPF1PERR(x) ((x) << S_ARBPF1PERR)
+#define F_ARBPF1PERR V_ARBPF1PERR(1U)
+
+#define S_PARERRPCMD 1
+#define V_PARERRPCMD(x) ((x) << S_PARERRPCMD)
+#define F_PARERRPCMD V_PARERRPCMD(1U)
+
+#define S_PARERRDATA 0
+#define V_PARERRDATA(x) ((x) << S_PARERRDATA)
+#define F_PARERRDATA V_PARERRDATA(1U)
+
+#define A_ULPRX_INT_CAUSE 0x508
+
+#define A_ULPRX_ISCSI_LLIMIT 0x50c
+
+#define A_ULPRX_ISCSI_ULIMIT 0x510
+
+#define A_ULPRX_ISCSI_TAGMASK 0x514
+
+#define A_ULPRX_ISCSI_PSZ 0x518
+
+#define A_ULPRX_TDDP_LLIMIT 0x51c
+
+#define A_ULPRX_TDDP_ULIMIT 0x520
+#define A_ULPRX_TDDP_PSZ 0x528
+
+#define S_HPZ0 0
+#define M_HPZ0 0xf
+#define V_HPZ0(x) ((x) << S_HPZ0)
+#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0)
+
+#define A_ULPRX_STAG_LLIMIT 0x52c
+
+#define A_ULPRX_STAG_ULIMIT 0x530
+
+#define A_ULPRX_RQ_LLIMIT 0x534
+#define A_ULPRX_RQ_LLIMIT 0x534
+
+#define A_ULPRX_RQ_ULIMIT 0x538
+#define A_ULPRX_RQ_ULIMIT 0x538
+
+#define A_ULPRX_PBL_LLIMIT 0x53c
+
+#define A_ULPRX_PBL_ULIMIT 0x540
+#define A_ULPRX_PBL_ULIMIT 0x540
+
+#define A_ULPRX_TDDP_TAGMASK 0x524
+
+#define A_ULPRX_RQ_LLIMIT 0x534
+#define A_ULPRX_RQ_LLIMIT 0x534
+
+#define A_ULPRX_RQ_ULIMIT 0x538
+#define A_ULPRX_RQ_ULIMIT 0x538
+
+#define A_ULPRX_PBL_ULIMIT 0x540
+#define A_ULPRX_PBL_ULIMIT 0x540
+
+#define A_ULPTX_CONFIG 0x580
+
+#define S_CFG_CQE_SOP_MASK 1
+#define V_CFG_CQE_SOP_MASK(x) ((x) << S_CFG_CQE_SOP_MASK)
+#define F_CFG_CQE_SOP_MASK V_CFG_CQE_SOP_MASK(1U)
+
+#define S_CFG_RR_ARB 0
+#define V_CFG_RR_ARB(x) ((x) << S_CFG_RR_ARB)
+#define F_CFG_RR_ARB V_CFG_RR_ARB(1U)
+
+#define A_ULPTX_INT_ENABLE 0x584
+
+#define S_PBL_BOUND_ERR_CH1 1
+#define V_PBL_BOUND_ERR_CH1(x) ((x) << S_PBL_BOUND_ERR_CH1)
+#define F_PBL_BOUND_ERR_CH1 V_PBL_BOUND_ERR_CH1(1U)
+
+#define S_PBL_BOUND_ERR_CH0 0
+#define V_PBL_BOUND_ERR_CH0(x) ((x) << S_PBL_BOUND_ERR_CH0)
+#define F_PBL_BOUND_ERR_CH0 V_PBL_BOUND_ERR_CH0(1U)
+
+#define A_ULPTX_INT_CAUSE 0x588
+
+#define A_ULPTX_TPT_LLIMIT 0x58c
+
+#define A_ULPTX_TPT_ULIMIT 0x590
+
+#define A_ULPTX_PBL_LLIMIT 0x594
+
+#define A_ULPTX_PBL_ULIMIT 0x598
+
+#define A_ULPTX_DMA_WEIGHT 0x5ac
+
+#define S_D1_WEIGHT 16
+#define M_D1_WEIGHT 0xffff
+#define V_D1_WEIGHT(x) ((x) << S_D1_WEIGHT)
+
+#define S_D0_WEIGHT 0
+#define M_D0_WEIGHT 0xffff
+#define V_D0_WEIGHT(x) ((x) << S_D0_WEIGHT)
+
+#define A_PM1_RX_CFG 0x5c0
+#define A_PM1_RX_MODE 0x5c4
+
+#define A_PM1_RX_INT_ENABLE 0x5d8
+
+#define S_ZERO_E_CMD_ERROR 18
+#define V_ZERO_E_CMD_ERROR(x) ((x) << S_ZERO_E_CMD_ERROR)
+#define F_ZERO_E_CMD_ERROR V_ZERO_E_CMD_ERROR(1U)
+
+#define S_IESPI0_FIFO2X_RX_FRAMING_ERROR 17
+#define V_IESPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_FIFO2X_RX_FRAMING_ERROR)
+#define F_IESPI0_FIFO2X_RX_FRAMING_ERROR V_IESPI0_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI1_FIFO2X_RX_FRAMING_ERROR 16
+#define V_IESPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_FIFO2X_RX_FRAMING_ERROR)
+#define F_IESPI1_FIFO2X_RX_FRAMING_ERROR V_IESPI1_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI0_RX_FRAMING_ERROR 15
+#define V_IESPI0_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_RX_FRAMING_ERROR)
+#define F_IESPI0_RX_FRAMING_ERROR V_IESPI0_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI1_RX_FRAMING_ERROR 14
+#define V_IESPI1_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_RX_FRAMING_ERROR)
+#define F_IESPI1_RX_FRAMING_ERROR V_IESPI1_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI0_TX_FRAMING_ERROR 13
+#define V_IESPI0_TX_FRAMING_ERROR(x) ((x) << S_IESPI0_TX_FRAMING_ERROR)
+#define F_IESPI0_TX_FRAMING_ERROR V_IESPI0_TX_FRAMING_ERROR(1U)
+
+#define S_IESPI1_TX_FRAMING_ERROR 12
+#define V_IESPI1_TX_FRAMING_ERROR(x) ((x) << S_IESPI1_TX_FRAMING_ERROR)
+#define F_IESPI1_TX_FRAMING_ERROR V_IESPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI0_RX_FRAMING_ERROR 11
+#define V_OCSPI0_RX_FRAMING_ERROR(x) ((x) << S_OCSPI0_RX_FRAMING_ERROR)
+#define F_OCSPI0_RX_FRAMING_ERROR V_OCSPI0_RX_FRAMING_ERROR(1U)
+
+#define S_OCSPI1_RX_FRAMING_ERROR 10
+#define V_OCSPI1_RX_FRAMING_ERROR(x) ((x) << S_OCSPI1_RX_FRAMING_ERROR)
+#define F_OCSPI1_RX_FRAMING_ERROR V_OCSPI1_RX_FRAMING_ERROR(1U)
+
+#define S_OCSPI0_TX_FRAMING_ERROR 9
+#define V_OCSPI0_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_TX_FRAMING_ERROR)
+#define F_OCSPI0_TX_FRAMING_ERROR V_OCSPI0_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI1_TX_FRAMING_ERROR 8
+#define V_OCSPI1_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_TX_FRAMING_ERROR)
+#define F_OCSPI1_TX_FRAMING_ERROR V_OCSPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR 7
+#define V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR 6
+#define V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_IESPI_PAR_ERROR 3
+#define M_IESPI_PAR_ERROR 0x7
+
+#define V_IESPI_PAR_ERROR(x) ((x) << S_IESPI_PAR_ERROR)
+
+#define S_OCSPI_PAR_ERROR 0
+#define M_OCSPI_PAR_ERROR 0x7
+
+#define V_OCSPI_PAR_ERROR(x) ((x) << S_OCSPI_PAR_ERROR)
+
+#define A_PM1_RX_INT_CAUSE 0x5dc
+
+#define A_PM1_TX_CFG 0x5e0
+#define A_PM1_TX_MODE 0x5e4
+
+#define A_PM1_TX_INT_ENABLE 0x5f8
+
+#define S_ZERO_C_CMD_ERROR 18
+#define V_ZERO_C_CMD_ERROR(x) ((x) << S_ZERO_C_CMD_ERROR)
+#define F_ZERO_C_CMD_ERROR V_ZERO_C_CMD_ERROR(1U)
+
+#define S_ICSPI0_FIFO2X_RX_FRAMING_ERROR 17
+#define V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_FIFO2X_RX_FRAMING_ERROR)
+#define F_ICSPI0_FIFO2X_RX_FRAMING_ERROR V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI1_FIFO2X_RX_FRAMING_ERROR 16
+#define V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_FIFO2X_RX_FRAMING_ERROR)
+#define F_ICSPI1_FIFO2X_RX_FRAMING_ERROR V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI0_RX_FRAMING_ERROR 15
+#define V_ICSPI0_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_RX_FRAMING_ERROR)
+#define F_ICSPI0_RX_FRAMING_ERROR V_ICSPI0_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI1_RX_FRAMING_ERROR 14
+#define V_ICSPI1_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_RX_FRAMING_ERROR)
+#define F_ICSPI1_RX_FRAMING_ERROR V_ICSPI1_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI0_TX_FRAMING_ERROR 13
+#define V_ICSPI0_TX_FRAMING_ERROR(x) ((x) << S_ICSPI0_TX_FRAMING_ERROR)
+#define F_ICSPI0_TX_FRAMING_ERROR V_ICSPI0_TX_FRAMING_ERROR(1U)
+
+#define S_ICSPI1_TX_FRAMING_ERROR 12
+#define V_ICSPI1_TX_FRAMING_ERROR(x) ((x) << S_ICSPI1_TX_FRAMING_ERROR)
+#define F_ICSPI1_TX_FRAMING_ERROR V_ICSPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI0_RX_FRAMING_ERROR 11
+#define V_OESPI0_RX_FRAMING_ERROR(x) ((x) << S_OESPI0_RX_FRAMING_ERROR)
+#define F_OESPI0_RX_FRAMING_ERROR V_OESPI0_RX_FRAMING_ERROR(1U)
+
+#define S_OESPI1_RX_FRAMING_ERROR 10
+#define V_OESPI1_RX_FRAMING_ERROR(x) ((x) << S_OESPI1_RX_FRAMING_ERROR)
+#define F_OESPI1_RX_FRAMING_ERROR V_OESPI1_RX_FRAMING_ERROR(1U)
+
+#define S_OESPI0_TX_FRAMING_ERROR 9
+#define V_OESPI0_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_TX_FRAMING_ERROR)
+#define F_OESPI0_TX_FRAMING_ERROR V_OESPI0_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI1_TX_FRAMING_ERROR 8
+#define V_OESPI1_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_TX_FRAMING_ERROR)
+#define F_OESPI1_TX_FRAMING_ERROR V_OESPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI0_OFIFO2X_TX_FRAMING_ERROR 7
+#define V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OESPI0_OFIFO2X_TX_FRAMING_ERROR V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI1_OFIFO2X_TX_FRAMING_ERROR 6
+#define V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OESPI1_OFIFO2X_TX_FRAMING_ERROR V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_ICSPI_PAR_ERROR 3
+#define M_ICSPI_PAR_ERROR 0x7
+
+#define V_ICSPI_PAR_ERROR(x) ((x) << S_ICSPI_PAR_ERROR)
+
+#define S_OESPI_PAR_ERROR 0
+#define M_OESPI_PAR_ERROR 0x7
+
+#define V_OESPI_PAR_ERROR(x) ((x) << S_OESPI_PAR_ERROR)
+
+#define A_PM1_TX_INT_CAUSE 0x5fc
+
+#define A_MPS_CFG 0x600
+
+#define S_TPRXPORTEN 4
+#define V_TPRXPORTEN(x) ((x) << S_TPRXPORTEN)
+#define F_TPRXPORTEN V_TPRXPORTEN(1U)
+
+#define S_TPTXPORT1EN 3
+#define V_TPTXPORT1EN(x) ((x) << S_TPTXPORT1EN)
+#define F_TPTXPORT1EN V_TPTXPORT1EN(1U)
+
+#define S_TPTXPORT0EN 2
+#define V_TPTXPORT0EN(x) ((x) << S_TPTXPORT0EN)
+#define F_TPTXPORT0EN V_TPTXPORT0EN(1U)
+
+#define S_PORT1ACTIVE 1
+#define V_PORT1ACTIVE(x) ((x) << S_PORT1ACTIVE)
+#define F_PORT1ACTIVE V_PORT1ACTIVE(1U)
+
+#define S_PORT0ACTIVE 0
+#define V_PORT0ACTIVE(x) ((x) << S_PORT0ACTIVE)
+#define F_PORT0ACTIVE V_PORT0ACTIVE(1U)
+
+#define S_ENFORCEPKT 11
+#define V_ENFORCEPKT(x) ((x) << S_ENFORCEPKT)
+#define F_ENFORCEPKT V_ENFORCEPKT(1U)
+
+#define A_MPS_INT_ENABLE 0x61c
+
+#define S_MCAPARERRENB 6
+#define M_MCAPARERRENB 0x7
+
+#define V_MCAPARERRENB(x) ((x) << S_MCAPARERRENB)
+
+#define S_RXTPPARERRENB 4
+#define M_RXTPPARERRENB 0x3
+
+#define V_RXTPPARERRENB(x) ((x) << S_RXTPPARERRENB)
+
+#define S_TX1TPPARERRENB 2
+#define M_TX1TPPARERRENB 0x3
+
+#define V_TX1TPPARERRENB(x) ((x) << S_TX1TPPARERRENB)
+
+#define S_TX0TPPARERRENB 0
+#define M_TX0TPPARERRENB 0x3
+
+#define V_TX0TPPARERRENB(x) ((x) << S_TX0TPPARERRENB)
+
+#define A_MPS_INT_CAUSE 0x620
+
+#define S_MCAPARERR 6
+#define M_MCAPARERR 0x7
+
+#define V_MCAPARERR(x) ((x) << S_MCAPARERR)
+
+#define S_RXTPPARERR 4
+#define M_RXTPPARERR 0x3
+
+#define V_RXTPPARERR(x) ((x) << S_RXTPPARERR)
+
+#define S_TX1TPPARERR 2
+#define M_TX1TPPARERR 0x3
+
+#define V_TX1TPPARERR(x) ((x) << S_TX1TPPARERR)
+
+#define S_TX0TPPARERR 0
+#define M_TX0TPPARERR 0x3
+
+#define V_TX0TPPARERR(x) ((x) << S_TX0TPPARERR)
+
+#define A_CPL_SWITCH_CNTRL 0x640
+
+#define A_CPL_INTR_ENABLE 0x650
+
+#define S_CIM_OP_MAP_PERR 5
+#define V_CIM_OP_MAP_PERR(x) ((x) << S_CIM_OP_MAP_PERR)
+#define F_CIM_OP_MAP_PERR V_CIM_OP_MAP_PERR(1U)
+
+#define S_CIM_OVFL_ERROR 4
+#define V_CIM_OVFL_ERROR(x) ((x) << S_CIM_OVFL_ERROR)
+#define F_CIM_OVFL_ERROR V_CIM_OVFL_ERROR(1U)
+
+#define S_TP_FRAMING_ERROR 3
+#define V_TP_FRAMING_ERROR(x) ((x) << S_TP_FRAMING_ERROR)
+#define F_TP_FRAMING_ERROR V_TP_FRAMING_ERROR(1U)
+
+#define S_SGE_FRAMING_ERROR 2
+#define V_SGE_FRAMING_ERROR(x) ((x) << S_SGE_FRAMING_ERROR)
+#define F_SGE_FRAMING_ERROR V_SGE_FRAMING_ERROR(1U)
+
+#define S_CIM_FRAMING_ERROR 1
+#define V_CIM_FRAMING_ERROR(x) ((x) << S_CIM_FRAMING_ERROR)
+#define F_CIM_FRAMING_ERROR V_CIM_FRAMING_ERROR(1U)
+
+#define S_ZERO_SWITCH_ERROR 0
+#define V_ZERO_SWITCH_ERROR(x) ((x) << S_ZERO_SWITCH_ERROR)
+#define F_ZERO_SWITCH_ERROR V_ZERO_SWITCH_ERROR(1U)
+
+#define A_CPL_INTR_CAUSE 0x654
+
+#define A_CPL_MAP_TBL_DATA 0x65c
+
+#define A_SMB_GLOBAL_TIME_CFG 0x660
+
+#define A_I2C_CFG 0x6a0
+
+#define S_I2C_CLKDIV 0
+#define M_I2C_CLKDIV 0xfff
+#define V_I2C_CLKDIV(x) ((x) << S_I2C_CLKDIV)
+
+#define A_MI1_CFG 0x6b0
+
+#define S_CLKDIV 5
+#define M_CLKDIV 0xff
+#define V_CLKDIV(x) ((x) << S_CLKDIV)
+
+#define S_ST 3
+
+#define M_ST 0x3
+
+#define V_ST(x) ((x) << S_ST)
+
+#define G_ST(x) (((x) >> S_ST) & M_ST)
+
+#define S_PREEN 2
+#define V_PREEN(x) ((x) << S_PREEN)
+#define F_PREEN V_PREEN(1U)
+
+#define S_MDIINV 1
+#define V_MDIINV(x) ((x) << S_MDIINV)
+#define F_MDIINV V_MDIINV(1U)
+
+#define S_MDIEN 0
+#define V_MDIEN(x) ((x) << S_MDIEN)
+#define F_MDIEN V_MDIEN(1U)
+
+#define A_MI1_ADDR 0x6b4
+
+#define S_PHYADDR 5
+#define M_PHYADDR 0x1f
+#define V_PHYADDR(x) ((x) << S_PHYADDR)
+
+#define S_REGADDR 0
+#define M_REGADDR 0x1f
+#define V_REGADDR(x) ((x) << S_REGADDR)
+
+#define A_MI1_DATA 0x6b8
+
+#define A_MI1_OP 0x6bc
+
+#define S_MDI_OP 0
+#define M_MDI_OP 0x3
+#define V_MDI_OP(x) ((x) << S_MDI_OP)
+
+#define A_SF_DATA 0x6d8
+
+#define A_SF_OP 0x6dc
+
+#define S_BYTECNT 1
+#define M_BYTECNT 0x3
+#define V_BYTECNT(x) ((x) << S_BYTECNT)
+
+#define A_PL_INT_ENABLE0 0x6e0
+
+#define S_T3DBG 23
+#define V_T3DBG(x) ((x) << S_T3DBG)
+#define F_T3DBG V_T3DBG(1U)
+
+#define S_XGMAC0_1 20
+#define V_XGMAC0_1(x) ((x) << S_XGMAC0_1)
+#define F_XGMAC0_1 V_XGMAC0_1(1U)
+
+#define S_XGMAC0_0 19
+#define V_XGMAC0_0(x) ((x) << S_XGMAC0_0)
+#define F_XGMAC0_0 V_XGMAC0_0(1U)
+
+#define S_MC5A 18
+#define V_MC5A(x) ((x) << S_MC5A)
+#define F_MC5A V_MC5A(1U)
+
+#define S_CPL_SWITCH 12
+#define V_CPL_SWITCH(x) ((x) << S_CPL_SWITCH)
+#define F_CPL_SWITCH V_CPL_SWITCH(1U)
+
+#define S_MPS0 11
+#define V_MPS0(x) ((x) << S_MPS0)
+#define F_MPS0 V_MPS0(1U)
+
+#define S_PM1_TX 10
+#define V_PM1_TX(x) ((x) << S_PM1_TX)
+#define F_PM1_TX V_PM1_TX(1U)
+
+#define S_PM1_RX 9
+#define V_PM1_RX(x) ((x) << S_PM1_RX)
+#define F_PM1_RX V_PM1_RX(1U)
+
+#define S_ULP2_TX 8
+#define V_ULP2_TX(x) ((x) << S_ULP2_TX)
+#define F_ULP2_TX V_ULP2_TX(1U)
+
+#define S_ULP2_RX 7
+#define V_ULP2_RX(x) ((x) << S_ULP2_RX)
+#define F_ULP2_RX V_ULP2_RX(1U)
+
+#define S_TP1 6
+#define V_TP1(x) ((x) << S_TP1)
+#define F_TP1 V_TP1(1U)
+
+#define S_CIM 5
+#define V_CIM(x) ((x) << S_CIM)
+#define F_CIM V_CIM(1U)
+
+#define S_MC7_CM 4
+#define V_MC7_CM(x) ((x) << S_MC7_CM)
+#define F_MC7_CM V_MC7_CM(1U)
+
+#define S_MC7_PMTX 3
+#define V_MC7_PMTX(x) ((x) << S_MC7_PMTX)
+#define F_MC7_PMTX V_MC7_PMTX(1U)
+
+#define S_MC7_PMRX 2
+#define V_MC7_PMRX(x) ((x) << S_MC7_PMRX)
+#define F_MC7_PMRX V_MC7_PMRX(1U)
+
+#define S_PCIM0 1
+#define V_PCIM0(x) ((x) << S_PCIM0)
+#define F_PCIM0 V_PCIM0(1U)
+
+#define S_SGE3 0
+#define V_SGE3(x) ((x) << S_SGE3)
+#define F_SGE3 V_SGE3(1U)
+
+#define A_PL_INT_CAUSE0 0x6e4
+
+#define A_PL_RST 0x6f0
+
+#define S_FATALPERREN 4
+#define V_FATALPERREN(x) ((x) << S_FATALPERREN)
+#define F_FATALPERREN V_FATALPERREN(1U)
+
+#define S_CRSTWRM 1
+#define V_CRSTWRM(x) ((x) << S_CRSTWRM)
+#define F_CRSTWRM V_CRSTWRM(1U)
+
+#define A_PL_REV 0x6f4
+
+#define A_PL_CLI 0x6f8
+
+#define A_MC5_DB_CONFIG 0x704
+
+#define S_TMTYPEHI 30
+#define V_TMTYPEHI(x) ((x) << S_TMTYPEHI)
+#define F_TMTYPEHI V_TMTYPEHI(1U)
+
+#define S_TMPARTSIZE 28
+#define M_TMPARTSIZE 0x3
+#define V_TMPARTSIZE(x) ((x) << S_TMPARTSIZE)
+#define G_TMPARTSIZE(x) (((x) >> S_TMPARTSIZE) & M_TMPARTSIZE)
+
+#define S_TMTYPE 26
+#define M_TMTYPE 0x3
+#define V_TMTYPE(x) ((x) << S_TMTYPE)
+#define G_TMTYPE(x) (((x) >> S_TMTYPE) & M_TMTYPE)
+
+#define S_COMPEN 17
+#define V_COMPEN(x) ((x) << S_COMPEN)
+#define F_COMPEN V_COMPEN(1U)
+
+#define S_PRTYEN 6
+#define V_PRTYEN(x) ((x) << S_PRTYEN)
+#define F_PRTYEN V_PRTYEN(1U)
+
+#define S_MBUSEN 5
+#define V_MBUSEN(x) ((x) << S_MBUSEN)
+#define F_MBUSEN V_MBUSEN(1U)
+
+#define S_DBGIEN 4
+#define V_DBGIEN(x) ((x) << S_DBGIEN)
+#define F_DBGIEN V_DBGIEN(1U)
+
+#define S_TMRDY 2
+#define V_TMRDY(x) ((x) << S_TMRDY)
+#define F_TMRDY V_TMRDY(1U)
+
+#define S_TMRST 1
+#define V_TMRST(x) ((x) << S_TMRST)
+#define F_TMRST V_TMRST(1U)
+
+#define S_TMMODE 0
+#define V_TMMODE(x) ((x) << S_TMMODE)
+#define F_TMMODE V_TMMODE(1U)
+
+#define F_TMMODE V_TMMODE(1U)
+
+#define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c
+
+#define A_MC5_DB_FILTER_TABLE 0x710
+
+#define A_MC5_DB_SERVER_INDEX 0x714
+
+#define A_MC5_DB_RSP_LATENCY 0x720
+
+#define S_RDLAT 16
+#define M_RDLAT 0x1f
+#define V_RDLAT(x) ((x) << S_RDLAT)
+
+#define S_LRNLAT 8
+#define M_LRNLAT 0x1f
+#define V_LRNLAT(x) ((x) << S_LRNLAT)
+
+#define S_SRCHLAT 0
+#define M_SRCHLAT 0x1f
+#define V_SRCHLAT(x) ((x) << S_SRCHLAT)
+
+#define A_MC5_DB_PART_ID_INDEX 0x72c
+
+#define A_MC5_DB_INT_ENABLE 0x740
+
+#define S_DELACTEMPTY 18
+#define V_DELACTEMPTY(x) ((x) << S_DELACTEMPTY)
+#define F_DELACTEMPTY V_DELACTEMPTY(1U)
+
+#define S_DISPQPARERR 17
+#define V_DISPQPARERR(x) ((x) << S_DISPQPARERR)
+#define F_DISPQPARERR V_DISPQPARERR(1U)
+
+#define S_REQQPARERR 16
+#define V_REQQPARERR(x) ((x) << S_REQQPARERR)
+#define F_REQQPARERR V_REQQPARERR(1U)
+
+#define S_UNKNOWNCMD 15
+#define V_UNKNOWNCMD(x) ((x) << S_UNKNOWNCMD)
+#define F_UNKNOWNCMD V_UNKNOWNCMD(1U)
+
+#define S_NFASRCHFAIL 8
+#define V_NFASRCHFAIL(x) ((x) << S_NFASRCHFAIL)
+#define F_NFASRCHFAIL V_NFASRCHFAIL(1U)
+
+#define S_ACTRGNFULL 7
+#define V_ACTRGNFULL(x) ((x) << S_ACTRGNFULL)
+#define F_ACTRGNFULL V_ACTRGNFULL(1U)
+
+#define S_PARITYERR 6
+#define V_PARITYERR(x) ((x) << S_PARITYERR)
+#define F_PARITYERR V_PARITYERR(1U)
+
+#define A_MC5_DB_INT_CAUSE 0x744
+
+#define A_MC5_DB_DBGI_CONFIG 0x774
+
+#define A_MC5_DB_DBGI_REQ_CMD 0x778
+
+#define A_MC5_DB_DBGI_REQ_ADDR0 0x77c
+
+#define A_MC5_DB_DBGI_REQ_ADDR1 0x780
+
+#define A_MC5_DB_DBGI_REQ_ADDR2 0x784
+
+#define A_MC5_DB_DBGI_REQ_DATA0 0x788
+
+#define A_MC5_DB_DBGI_REQ_DATA1 0x78c
+
+#define A_MC5_DB_DBGI_REQ_DATA2 0x790
+
+#define A_MC5_DB_DBGI_RSP_STATUS 0x7b0
+
+#define S_DBGIRSPVALID 0
+#define V_DBGIRSPVALID(x) ((x) << S_DBGIRSPVALID)
+#define F_DBGIRSPVALID V_DBGIRSPVALID(1U)
+
+#define A_MC5_DB_DBGI_RSP_DATA0 0x7b4
+
+#define A_MC5_DB_DBGI_RSP_DATA1 0x7b8
+
+#define A_MC5_DB_DBGI_RSP_DATA2 0x7bc
+
+#define A_MC5_DB_POPEN_DATA_WR_CMD 0x7cc
+
+#define A_MC5_DB_POPEN_MASK_WR_CMD 0x7d0
+
+#define A_MC5_DB_AOPEN_SRCH_CMD 0x7d4
+
+#define A_MC5_DB_AOPEN_LRN_CMD 0x7d8
+
+#define A_MC5_DB_SYN_SRCH_CMD 0x7dc
+
+#define A_MC5_DB_SYN_LRN_CMD 0x7e0
+
+#define A_MC5_DB_ACK_SRCH_CMD 0x7e4
+
+#define A_MC5_DB_ACK_LRN_CMD 0x7e8
+
+#define A_MC5_DB_ILOOKUP_CMD 0x7ec
+
+#define A_MC5_DB_ELOOKUP_CMD 0x7f0
+
+#define A_MC5_DB_DATA_WRITE_CMD 0x7f4
+
+#define A_MC5_DB_DATA_READ_CMD 0x7f8
+
+#define XGMAC0_0_BASE_ADDR 0x800
+
+#define A_XGM_TX_CTRL 0x800
+
+#define S_TXEN 0
+#define V_TXEN(x) ((x) << S_TXEN)
+#define F_TXEN V_TXEN(1U)
+
+#define A_XGM_TX_CFG 0x804
+
+#define S_TXPAUSEEN 0
+#define V_TXPAUSEEN(x) ((x) << S_TXPAUSEEN)
+#define F_TXPAUSEEN V_TXPAUSEEN(1U)
+
+#define A_XGM_TX_PAUSE_QUANTA 0x808
+
+#define A_XGM_RX_CTRL 0x80c
+
+#define S_RXEN 0
+#define V_RXEN(x) ((x) << S_RXEN)
+#define F_RXEN V_RXEN(1U)
+
+#define A_XGM_RX_CFG 0x810
+
+#define S_DISPAUSEFRAMES 9
+#define V_DISPAUSEFRAMES(x) ((x) << S_DISPAUSEFRAMES)
+#define F_DISPAUSEFRAMES V_DISPAUSEFRAMES(1U)
+
+#define S_EN1536BFRAMES 8
+#define V_EN1536BFRAMES(x) ((x) << S_EN1536BFRAMES)
+#define F_EN1536BFRAMES V_EN1536BFRAMES(1U)
+
+#define S_ENJUMBO 7
+#define V_ENJUMBO(x) ((x) << S_ENJUMBO)
+#define F_ENJUMBO V_ENJUMBO(1U)
+
+#define S_RMFCS 6
+#define V_RMFCS(x) ((x) << S_RMFCS)
+#define F_RMFCS V_RMFCS(1U)
+
+#define S_ENHASHMCAST 2
+#define V_ENHASHMCAST(x) ((x) << S_ENHASHMCAST)
+#define F_ENHASHMCAST V_ENHASHMCAST(1U)
+
+#define S_COPYALLFRAMES 0
+#define V_COPYALLFRAMES(x) ((x) << S_COPYALLFRAMES)
+#define F_COPYALLFRAMES V_COPYALLFRAMES(1U)
+
+#define S_DISBCAST 1
+#define V_DISBCAST(x) ((x) << S_DISBCAST)
+#define F_DISBCAST V_DISBCAST(1U)
+
+#define A_XGM_RX_HASH_LOW 0x814
+
+#define A_XGM_RX_HASH_HIGH 0x818
+
+#define A_XGM_RX_EXACT_MATCH_LOW_1 0x81c
+
+#define A_XGM_RX_EXACT_MATCH_HIGH_1 0x820
+
+#define A_XGM_RX_EXACT_MATCH_LOW_2 0x824
+
+#define A_XGM_RX_EXACT_MATCH_LOW_3 0x82c
+
+#define A_XGM_RX_EXACT_MATCH_LOW_4 0x834
+
+#define A_XGM_RX_EXACT_MATCH_LOW_5 0x83c
+
+#define A_XGM_RX_EXACT_MATCH_LOW_6 0x844
+
+#define A_XGM_RX_EXACT_MATCH_LOW_7 0x84c
+
+#define A_XGM_RX_EXACT_MATCH_LOW_8 0x854
+
+#define A_XGM_INT_STATUS 0x86c
+
+#define S_LINKFAULTCHANGE 9
+#define V_LINKFAULTCHANGE(x) ((x) << S_LINKFAULTCHANGE)
+#define F_LINKFAULTCHANGE V_LINKFAULTCHANGE(1U)
+
+#define A_XGM_XGM_INT_ENABLE 0x874
+#define A_XGM_XGM_INT_DISABLE 0x878
+
+#define A_XGM_STAT_CTRL 0x880
+
+#define S_CLRSTATS 2
+#define V_CLRSTATS(x) ((x) << S_CLRSTATS)
+#define F_CLRSTATS V_CLRSTATS(1U)
+
+#define A_XGM_RXFIFO_CFG 0x884
+
+#define S_RXFIFO_EMPTY 31
+#define V_RXFIFO_EMPTY(x) ((x) << S_RXFIFO_EMPTY)
+#define F_RXFIFO_EMPTY V_RXFIFO_EMPTY(1U)
+
+#define S_RXFIFOPAUSEHWM 17
+#define M_RXFIFOPAUSEHWM 0xfff
+
+#define V_RXFIFOPAUSEHWM(x) ((x) << S_RXFIFOPAUSEHWM)
+
+#define G_RXFIFOPAUSEHWM(x) (((x) >> S_RXFIFOPAUSEHWM) & M_RXFIFOPAUSEHWM)
+
+#define S_RXFIFOPAUSELWM 5
+#define M_RXFIFOPAUSELWM 0xfff
+
+#define V_RXFIFOPAUSELWM(x) ((x) << S_RXFIFOPAUSELWM)
+
+#define G_RXFIFOPAUSELWM(x) (((x) >> S_RXFIFOPAUSELWM) & M_RXFIFOPAUSELWM)
+
+#define S_RXSTRFRWRD 1
+#define V_RXSTRFRWRD(x) ((x) << S_RXSTRFRWRD)
+#define F_RXSTRFRWRD V_RXSTRFRWRD(1U)
+
+#define S_DISERRFRAMES 0
+#define V_DISERRFRAMES(x) ((x) << S_DISERRFRAMES)
+#define F_DISERRFRAMES V_DISERRFRAMES(1U)
+
+#define A_XGM_TXFIFO_CFG 0x888
+
+#define S_UNDERUNFIX 22
+#define V_UNDERUNFIX(x) ((x) << S_UNDERUNFIX)
+#define F_UNDERUNFIX V_UNDERUNFIX(1U)
+
+#define S_TXIPG 13
+#define M_TXIPG 0xff
+#define V_TXIPG(x) ((x) << S_TXIPG)
+#define G_TXIPG(x) (((x) >> S_TXIPG) & M_TXIPG)
+
+#define S_TXFIFOTHRESH 4
+#define M_TXFIFOTHRESH 0x1ff
+
+#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH)
+
+#define S_ENDROPPKT 21
+#define V_ENDROPPKT(x) ((x) << S_ENDROPPKT)
+#define F_ENDROPPKT V_ENDROPPKT(1U)
+
+#define A_XGM_SERDES_CTRL 0x890
+#define A_XGM_SERDES_CTRL0 0x8e0
+
+#define S_SERDESRESET_ 24
+#define V_SERDESRESET_(x) ((x) << S_SERDESRESET_)
+#define F_SERDESRESET_ V_SERDESRESET_(1U)
+
+#define S_RXENABLE 4
+#define V_RXENABLE(x) ((x) << S_RXENABLE)
+#define F_RXENABLE V_RXENABLE(1U)
+
+#define S_TXENABLE 3
+#define V_TXENABLE(x) ((x) << S_TXENABLE)
+#define F_TXENABLE V_TXENABLE(1U)
+
+#define A_XGM_PAUSE_TIMER 0x890
+
+#define A_XGM_RGMII_IMP 0x89c
+
+#define S_XGM_IMPSETUPDATE 6
+#define V_XGM_IMPSETUPDATE(x) ((x) << S_XGM_IMPSETUPDATE)
+#define F_XGM_IMPSETUPDATE V_XGM_IMPSETUPDATE(1U)
+
+#define S_RGMIIIMPPD 3
+#define M_RGMIIIMPPD 0x7
+#define V_RGMIIIMPPD(x) ((x) << S_RGMIIIMPPD)
+
+#define S_RGMIIIMPPU 0
+#define M_RGMIIIMPPU 0x7
+#define V_RGMIIIMPPU(x) ((x) << S_RGMIIIMPPU)
+
+#define S_CALRESET 8
+#define V_CALRESET(x) ((x) << S_CALRESET)
+#define F_CALRESET V_CALRESET(1U)
+
+#define S_CALUPDATE 7
+#define V_CALUPDATE(x) ((x) << S_CALUPDATE)
+#define F_CALUPDATE V_CALUPDATE(1U)
+
+#define A_XGM_XAUI_IMP 0x8a0
+
+#define S_CALBUSY 31
+#define V_CALBUSY(x) ((x) << S_CALBUSY)
+#define F_CALBUSY V_CALBUSY(1U)
+
+#define S_XGM_CALFAULT 29
+#define V_XGM_CALFAULT(x) ((x) << S_XGM_CALFAULT)
+#define F_XGM_CALFAULT V_XGM_CALFAULT(1U)
+
+#define S_CALIMP 24
+#define M_CALIMP 0x1f
+#define V_CALIMP(x) ((x) << S_CALIMP)
+#define G_CALIMP(x) (((x) >> S_CALIMP) & M_CALIMP)
+
+#define S_XAUIIMP 0
+#define M_XAUIIMP 0x7
+#define V_XAUIIMP(x) ((x) << S_XAUIIMP)
+
+#define A_XGM_RX_MAX_PKT_SIZE 0x8a8
+
+#define S_RXMAXFRAMERSIZE 17
+#define M_RXMAXFRAMERSIZE 0x3fff
+#define V_RXMAXFRAMERSIZE(x) ((x) << S_RXMAXFRAMERSIZE)
+#define G_RXMAXFRAMERSIZE(x) (((x) >> S_RXMAXFRAMERSIZE) & M_RXMAXFRAMERSIZE)
+
+#define S_RXENFRAMER 14
+#define V_RXENFRAMER(x) ((x) << S_RXENFRAMER)
+#define F_RXENFRAMER V_RXENFRAMER(1U)
+
+#define S_RXMAXPKTSIZE 0
+#define M_RXMAXPKTSIZE 0x3fff
+#define V_RXMAXPKTSIZE(x) ((x) << S_RXMAXPKTSIZE)
+#define G_RXMAXPKTSIZE(x) (((x) >> S_RXMAXPKTSIZE) & M_RXMAXPKTSIZE)
+
+#define A_XGM_RESET_CTRL 0x8ac
+
+#define S_XGMAC_STOP_EN 4
+#define V_XGMAC_STOP_EN(x) ((x) << S_XGMAC_STOP_EN)
+#define F_XGMAC_STOP_EN V_XGMAC_STOP_EN(1U)
+
+#define S_XG2G_RESET_ 3
+#define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_)
+#define F_XG2G_RESET_ V_XG2G_RESET_(1U)
+
+#define S_RGMII_RESET_ 2
+#define V_RGMII_RESET_(x) ((x) << S_RGMII_RESET_)
+#define F_RGMII_RESET_ V_RGMII_RESET_(1U)
+
+#define S_PCS_RESET_ 1
+#define V_PCS_RESET_(x) ((x) << S_PCS_RESET_)
+#define F_PCS_RESET_ V_PCS_RESET_(1U)
+
+#define S_MAC_RESET_ 0
+#define V_MAC_RESET_(x) ((x) << S_MAC_RESET_)
+#define F_MAC_RESET_ V_MAC_RESET_(1U)
+
+#define A_XGM_PORT_CFG 0x8b8
+
+#define S_CLKDIVRESET_ 3
+#define V_CLKDIVRESET_(x) ((x) << S_CLKDIVRESET_)
+#define F_CLKDIVRESET_ V_CLKDIVRESET_(1U)
+
+#define S_PORTSPEED 1
+#define M_PORTSPEED 0x3
+
+#define V_PORTSPEED(x) ((x) << S_PORTSPEED)
+
+#define S_ENRGMII 0
+#define V_ENRGMII(x) ((x) << S_ENRGMII)
+#define F_ENRGMII V_ENRGMII(1U)
+
+#define A_XGM_INT_ENABLE 0x8d4
+
+#define S_TXFIFO_PRTY_ERR 17
+#define M_TXFIFO_PRTY_ERR 0x7
+
+#define V_TXFIFO_PRTY_ERR(x) ((x) << S_TXFIFO_PRTY_ERR)
+
+#define S_RXFIFO_PRTY_ERR 14
+#define M_RXFIFO_PRTY_ERR 0x7
+
+#define V_RXFIFO_PRTY_ERR(x) ((x) << S_RXFIFO_PRTY_ERR)
+
+#define S_TXFIFO_UNDERRUN 13
+#define V_TXFIFO_UNDERRUN(x) ((x) << S_TXFIFO_UNDERRUN)
+#define F_TXFIFO_UNDERRUN V_TXFIFO_UNDERRUN(1U)
+
+#define S_RXFIFO_OVERFLOW 12
+#define V_RXFIFO_OVERFLOW(x) ((x) << S_RXFIFO_OVERFLOW)
+#define F_RXFIFO_OVERFLOW V_RXFIFO_OVERFLOW(1U)
+
+#define S_SERDES_LOS 4
+#define M_SERDES_LOS 0xf
+
+#define V_SERDES_LOS(x) ((x) << S_SERDES_LOS)
+
+#define S_XAUIPCSCTCERR 3
+#define V_XAUIPCSCTCERR(x) ((x) << S_XAUIPCSCTCERR)
+#define F_XAUIPCSCTCERR V_XAUIPCSCTCERR(1U)
+
+#define S_XAUIPCSALIGNCHANGE 2
+#define V_XAUIPCSALIGNCHANGE(x) ((x) << S_XAUIPCSALIGNCHANGE)
+#define F_XAUIPCSALIGNCHANGE V_XAUIPCSALIGNCHANGE(1U)
+
+#define S_XGM_INT 0
+#define V_XGM_INT(x) ((x) << S_XGM_INT)
+#define F_XGM_INT V_XGM_INT(1U)
+
+#define A_XGM_INT_CAUSE 0x8d8
+
+#define A_XGM_XAUI_ACT_CTRL 0x8dc
+
+#define S_TXACTENABLE 1
+#define V_TXACTENABLE(x) ((x) << S_TXACTENABLE)
+#define F_TXACTENABLE V_TXACTENABLE(1U)
+
+#define A_XGM_SERDES_CTRL0 0x8e0
+
+#define S_RESET3 23
+#define V_RESET3(x) ((x) << S_RESET3)
+#define F_RESET3 V_RESET3(1U)
+
+#define S_RESET2 22
+#define V_RESET2(x) ((x) << S_RESET2)
+#define F_RESET2 V_RESET2(1U)
+
+#define S_RESET1 21
+#define V_RESET1(x) ((x) << S_RESET1)
+#define F_RESET1 V_RESET1(1U)
+
+#define S_RESET0 20
+#define V_RESET0(x) ((x) << S_RESET0)
+#define F_RESET0 V_RESET0(1U)
+
+#define S_PWRDN3 19
+#define V_PWRDN3(x) ((x) << S_PWRDN3)
+#define F_PWRDN3 V_PWRDN3(1U)
+
+#define S_PWRDN2 18
+#define V_PWRDN2(x) ((x) << S_PWRDN2)
+#define F_PWRDN2 V_PWRDN2(1U)
+
+#define S_PWRDN1 17
+#define V_PWRDN1(x) ((x) << S_PWRDN1)
+#define F_PWRDN1 V_PWRDN1(1U)
+
+#define S_PWRDN0 16
+#define V_PWRDN0(x) ((x) << S_PWRDN0)
+#define F_PWRDN0 V_PWRDN0(1U)
+
+#define S_RESETPLL23 15
+#define V_RESETPLL23(x) ((x) << S_RESETPLL23)
+#define F_RESETPLL23 V_RESETPLL23(1U)
+
+#define S_RESETPLL01 14
+#define V_RESETPLL01(x) ((x) << S_RESETPLL01)
+#define F_RESETPLL01 V_RESETPLL01(1U)
+
+#define A_XGM_SERDES_STAT0 0x8f0
+#define A_XGM_SERDES_STAT1 0x8f4
+#define A_XGM_SERDES_STAT2 0x8f8
+
+#define S_LOWSIG0 0
+#define V_LOWSIG0(x) ((x) << S_LOWSIG0)
+#define F_LOWSIG0 V_LOWSIG0(1U)
+
+#define A_XGM_SERDES_STAT3 0x8fc
+
+#define A_XGM_STAT_TX_BYTE_LOW 0x900
+
+#define A_XGM_STAT_TX_BYTE_HIGH 0x904
+
+#define A_XGM_STAT_TX_FRAME_LOW 0x908
+
+#define A_XGM_STAT_TX_FRAME_HIGH 0x90c
+
+#define A_XGM_STAT_TX_BCAST 0x910
+
+#define A_XGM_STAT_TX_MCAST 0x914
+
+#define A_XGM_STAT_TX_PAUSE 0x918
+
+#define A_XGM_STAT_TX_64B_FRAMES 0x91c
+
+#define A_XGM_STAT_TX_65_127B_FRAMES 0x920
+
+#define A_XGM_STAT_TX_128_255B_FRAMES 0x924
+
+#define A_XGM_STAT_TX_256_511B_FRAMES 0x928
+
+#define A_XGM_STAT_TX_512_1023B_FRAMES 0x92c
+
+#define A_XGM_STAT_TX_1024_1518B_FRAMES 0x930
+
+#define A_XGM_STAT_TX_1519_MAXB_FRAMES 0x934
+
+#define A_XGM_STAT_TX_ERR_FRAMES 0x938
+
+#define A_XGM_STAT_RX_BYTES_LOW 0x93c
+
+#define A_XGM_STAT_RX_BYTES_HIGH 0x940
+
+#define A_XGM_STAT_RX_FRAMES_LOW 0x944
+
+#define A_XGM_STAT_RX_FRAMES_HIGH 0x948
+
+#define A_XGM_STAT_RX_BCAST_FRAMES 0x94c
+
+#define A_XGM_STAT_RX_MCAST_FRAMES 0x950
+
+#define A_XGM_STAT_RX_PAUSE_FRAMES 0x954
+
+#define A_XGM_STAT_RX_64B_FRAMES 0x958
+
+#define A_XGM_STAT_RX_65_127B_FRAMES 0x95c
+
+#define A_XGM_STAT_RX_128_255B_FRAMES 0x960
+
+#define A_XGM_STAT_RX_256_511B_FRAMES 0x964
+
+#define A_XGM_STAT_RX_512_1023B_FRAMES 0x968
+
+#define A_XGM_STAT_RX_1024_1518B_FRAMES 0x96c
+
+#define A_XGM_STAT_RX_1519_MAXB_FRAMES 0x970
+
+#define A_XGM_STAT_RX_SHORT_FRAMES 0x974
+
+#define A_XGM_STAT_RX_OVERSIZE_FRAMES 0x978
+
+#define A_XGM_STAT_RX_JABBER_FRAMES 0x97c
+
+#define A_XGM_STAT_RX_CRC_ERR_FRAMES 0x980
+
+#define A_XGM_STAT_RX_LENGTH_ERR_FRAMES 0x984
+
+#define A_XGM_STAT_RX_SYM_CODE_ERR_FRAMES 0x988
+
+#define A_XGM_SERDES_STATUS0 0x98c
+
+#define A_XGM_SERDES_STATUS1 0x990
+
+#define S_CMULOCK 31
+#define V_CMULOCK(x) ((x) << S_CMULOCK)
+#define F_CMULOCK V_CMULOCK(1U)
+
+#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
+
+#define A_XGM_TX_SPI4_SOP_EOP_CNT 0x9a8
+
+#define S_TXSPI4SOPCNT 16
+#define M_TXSPI4SOPCNT 0xffff
+#define V_TXSPI4SOPCNT(x) ((x) << S_TXSPI4SOPCNT)
+#define G_TXSPI4SOPCNT(x) (((x) >> S_TXSPI4SOPCNT) & M_TXSPI4SOPCNT)
+
+#define A_XGM_RX_SPI4_SOP_EOP_CNT 0x9ac
+
+#define XGMAC0_1_BASE_ADDR 0xa00
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
new file mode 100644
index 000000000000..2f46b37e5d16
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -0,0 +1,3303 @@
+/*
+ * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/prefetch.h>
+#include <net/arp.h>
+#include "common.h"
+#include "regs.h"
+#include "sge_defs.h"
+#include "t3_cpl.h"
+#include "firmware_exports.h"
+#include "cxgb3_offload.h"
+
+#define USE_GTS 0
+
+#define SGE_RX_SM_BUF_SIZE 1536
+
+#define SGE_RX_COPY_THRES 256
+#define SGE_RX_PULL_LEN 128
+
+#define SGE_PG_RSVD SMP_CACHE_BYTES
+/*
+ * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
+ * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
+ * directly.
+ */
+#define FL0_PG_CHUNK_SIZE 2048
+#define FL0_PG_ORDER 0
+#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
+#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
+#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
+#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
+
+#define SGE_RX_DROP_THRES 16
+#define RX_RECLAIM_PERIOD (HZ/4)
+
+/*
+ * Max number of Rx buffers we replenish at a time.
+ */
+#define MAX_RX_REFILL 16U
+/*
+ * Period of the Tx buffer reclaim timer. This timer does not need to run
+ * frequently as Tx buffers are usually reclaimed by new Tx packets.
+ */
+#define TX_RECLAIM_PERIOD (HZ / 4)
+#define TX_RECLAIM_TIMER_CHUNK 64U
+#define TX_RECLAIM_CHUNK 16U
+
+/* WR size in bytes */
+#define WR_LEN (WR_FLITS * 8)
+
+/*
+ * Types of Tx queues in each queue set. Order here matters, do not change.
+ */
+enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
+
+/* Values for sge_txq.flags */
+enum {
+ TXQ_RUNNING = 1 << 0, /* fetch engine is running */
+ TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
+};
+
+struct tx_desc {
+ __be64 flit[TX_DESC_FLITS];
+};
+
+struct rx_desc {
+ __be32 addr_lo;
+ __be32 len_gen;
+ __be32 gen2;
+ __be32 addr_hi;
+};
+
+struct tx_sw_desc { /* SW state per Tx descriptor */
+ struct sk_buff *skb;
+ u8 eop; /* set if last descriptor for packet */
+ u8 addr_idx; /* buffer index of first SGL entry in descriptor */
+ u8 fragidx; /* first page fragment associated with descriptor */
+ s8 sflit; /* start flit of first SGL entry in descriptor */
+};
+
+struct rx_sw_desc { /* SW state per Rx descriptor */
+ union {
+ struct sk_buff *skb;
+ struct fl_pg_chunk pg_chunk;
+ };
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+};
+
+struct rsp_desc { /* response queue descriptor */
+ struct rss_header rss_hdr;
+ __be32 flags;
+ __be32 len_cq;
+ u8 imm_data[47];
+ u8 intr_gen;
+};
+
+/*
+ * Holds unmapping information for Tx packets that need deferred unmapping.
+ * This structure lives at skb->head and must be allocated by callers.
+ */
+struct deferred_unmap_info {
+ struct pci_dev *pdev;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+};
+
+/*
+ * Maps a number of flits to the number of Tx descriptors that can hold them.
+ * The formula is
+ *
+ * desc = 1 + (flits - 2) / (WR_FLITS - 1).
+ *
+ * HW allows up to 4 descriptors to be combined into a WR.
+ */
+static u8 flit_desc_map[] = {
+ 0,
+#if SGE_NUM_GENBITS == 1
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
+#elif SGE_NUM_GENBITS == 2
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+#else
+# error "SGE_NUM_GENBITS must be 1 or 2"
+#endif
+};
+
+static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
+{
+ return container_of(q, struct sge_qset, fl[qidx]);
+}
+
+static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
+{
+ return container_of(q, struct sge_qset, rspq);
+}
+
+static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
+{
+ return container_of(q, struct sge_qset, txq[qidx]);
+}
+
+/**
+ * refill_rspq - replenish an SGE response queue
+ * @adapter: the adapter
+ * @q: the response queue to replenish
+ * @credits: how many new responses to make available
+ *
+ * Replenishes a response queue by making the supplied number of responses
+ * available to HW.
+ */
+static inline void refill_rspq(struct adapter *adapter,
+ const struct sge_rspq *q, unsigned int credits)
+{
+ rmb();
+ t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
+ V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
+}
+
+/**
+ * need_skb_unmap - does the platform need unmapping of sk_buffs?
+ *
+ * Returns true if the platform needs sk_buff unmapping. The compiler
+ * optimizes away unnecessary code if this returns true.
+ */
+static inline int need_skb_unmap(void)
+{
+#ifdef CONFIG_NEED_DMA_MAP_STATE
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+/**
+ * unmap_skb - unmap a packet main body and its page fragments
+ * @skb: the packet
+ * @q: the Tx queue containing Tx descriptors for the packet
+ * @cidx: index of Tx descriptor
+ * @pdev: the PCI device
+ *
+ * Unmap the main body of an sk_buff and its page fragments, if any.
+ * Because of the fairly complicated structure of our SGLs and the desire
+ * to conserve space for metadata, the information necessary to unmap an
+ * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
+ * descriptors (the physical addresses of the various data buffers), and
+ * the SW descriptor state (assorted indices). The send functions
+ * initialize the indices for the first packet descriptor so we can unmap
+ * the buffers held in the first Tx descriptor here, and we have enough
+ * information at this point to set the state for the next Tx descriptor.
+ *
+ * Note that it is possible to clean up the first descriptor of a packet
+ * before the send routines have written the next descriptors, but this
+ * race does not cause any problem. We just end up writing the unmapping
+ * info for the descriptor first.
+ */
+static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
+ unsigned int cidx, struct pci_dev *pdev)
+{
+ const struct sg_ent *sgp;
+ struct tx_sw_desc *d = &q->sdesc[cidx];
+ int nfrags, frag_idx, curflit, j = d->addr_idx;
+
+ sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
+ frag_idx = d->fragidx;
+
+ if (frag_idx == 0 && skb_headlen(skb)) {
+ pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ j = 1;
+ }
+
+ curflit = d->sflit + 1 + j;
+ nfrags = skb_shinfo(skb)->nr_frags;
+
+ while (frag_idx < nfrags && curflit < WR_FLITS) {
+ pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
+ skb_shinfo(skb)->frags[frag_idx].size,
+ PCI_DMA_TODEVICE);
+ j ^= 1;
+ if (j == 0) {
+ sgp++;
+ curflit++;
+ }
+ curflit++;
+ frag_idx++;
+ }
+
+ if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
+ d = cidx + 1 == q->size ? q->sdesc : d + 1;
+ d->fragidx = frag_idx;
+ d->addr_idx = j;
+ d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
+ }
+}
+
+/**
+ * free_tx_desc - reclaims Tx descriptors and their buffers
+ * @adapter: the adapter
+ * @q: the Tx queue to reclaim descriptors from
+ * @n: the number of descriptors to reclaim
+ *
+ * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
+ * Tx buffers. Called with the Tx queue lock held.
+ */
+static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
+ unsigned int n)
+{
+ struct tx_sw_desc *d;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned int cidx = q->cidx;
+
+ const int need_unmap = need_skb_unmap() &&
+ q->cntxt_id >= FW_TUNNEL_SGEEC_START;
+
+ d = &q->sdesc[cidx];
+ while (n--) {
+ if (d->skb) { /* an SGL is present */
+ if (need_unmap)
+ unmap_skb(d->skb, q, cidx, pdev);
+ if (d->eop) {
+ kfree_skb(d->skb);
+ d->skb = NULL;
+ }
+ }
+ ++d;
+ if (++cidx == q->size) {
+ cidx = 0;
+ d = q->sdesc;
+ }
+ }
+ q->cidx = cidx;
+}
+
+/**
+ * reclaim_completed_tx - reclaims completed Tx descriptors
+ * @adapter: the adapter
+ * @q: the Tx queue to reclaim completed descriptors from
+ * @chunk: maximum number of descriptors to reclaim
+ *
+ * Reclaims Tx descriptors that the SGE has indicated it has processed,
+ * and frees the associated buffers if possible. Called with the Tx
+ * queue's lock held.
+ */
+static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
+ struct sge_txq *q,
+ unsigned int chunk)
+{
+ unsigned int reclaim = q->processed - q->cleaned;
+
+ reclaim = min(chunk, reclaim);
+ if (reclaim) {
+ free_tx_desc(adapter, q, reclaim);
+ q->cleaned += reclaim;
+ q->in_use -= reclaim;
+ }
+ return q->processed - q->cleaned;
+}
+
+/**
+ * should_restart_tx - are there enough resources to restart a Tx queue?
+ * @q: the Tx queue
+ *
+ * Checks if there are enough descriptors to restart a suspended Tx queue.
+ */
+static inline int should_restart_tx(const struct sge_txq *q)
+{
+ unsigned int r = q->processed - q->cleaned;
+
+ return q->in_use - r < (q->size >> 1);
+}
+
+static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
+ struct rx_sw_desc *d)
+{
+ if (q->use_pages && d->pg_chunk.page) {
+ (*d->pg_chunk.p_cnt)--;
+ if (!*d->pg_chunk.p_cnt)
+ pci_unmap_page(pdev,
+ d->pg_chunk.mapping,
+ q->alloc_size, PCI_DMA_FROMDEVICE);
+
+ put_page(d->pg_chunk.page);
+ d->pg_chunk.page = NULL;
+ } else {
+ pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
+ q->buf_size, PCI_DMA_FROMDEVICE);
+ kfree_skb(d->skb);
+ d->skb = NULL;
+ }
+}
+
+/**
+ * free_rx_bufs - free the Rx buffers on an SGE free list
+ * @pdev: the PCI device associated with the adapter
+ * @rxq: the SGE free list to clean up
+ *
+ * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
+ * this queue should be stopped before calling this function.
+ */
+static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
+{
+ unsigned int cidx = q->cidx;
+
+ while (q->credits--) {
+ struct rx_sw_desc *d = &q->sdesc[cidx];
+
+
+ clear_rx_desc(pdev, q, d);
+ if (++cidx == q->size)
+ cidx = 0;
+ }
+
+ if (q->pg_chunk.page) {
+ __free_pages(q->pg_chunk.page, q->order);
+ q->pg_chunk.page = NULL;
+ }
+}
+
+/**
+ * add_one_rx_buf - add a packet buffer to a free-buffer list
+ * @va: buffer start VA
+ * @len: the buffer length
+ * @d: the HW Rx descriptor to write
+ * @sd: the SW Rx descriptor to write
+ * @gen: the generation bit value
+ * @pdev: the PCI device associated with the adapter
+ *
+ * Add a buffer of the given length to the supplied HW and SW Rx
+ * descriptors.
+ */
+static inline int add_one_rx_buf(void *va, unsigned int len,
+ struct rx_desc *d, struct rx_sw_desc *sd,
+ unsigned int gen, struct pci_dev *pdev)
+{
+ dma_addr_t mapping;
+
+ mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(pdev, mapping)))
+ return -ENOMEM;
+
+ dma_unmap_addr_set(sd, dma_addr, mapping);
+
+ d->addr_lo = cpu_to_be32(mapping);
+ d->addr_hi = cpu_to_be32((u64) mapping >> 32);
+ wmb();
+ d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
+ d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
+ return 0;
+}
+
+static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
+ unsigned int gen)
+{
+ d->addr_lo = cpu_to_be32(mapping);
+ d->addr_hi = cpu_to_be32((u64) mapping >> 32);
+ wmb();
+ d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
+ d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
+ return 0;
+}
+
+static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
+ struct rx_sw_desc *sd, gfp_t gfp,
+ unsigned int order)
+{
+ if (!q->pg_chunk.page) {
+ dma_addr_t mapping;
+
+ q->pg_chunk.page = alloc_pages(gfp, order);
+ if (unlikely(!q->pg_chunk.page))
+ return -ENOMEM;
+ q->pg_chunk.va = page_address(q->pg_chunk.page);
+ q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
+ SGE_PG_RSVD;
+ q->pg_chunk.offset = 0;
+ mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
+ 0, q->alloc_size, PCI_DMA_FROMDEVICE);
+ q->pg_chunk.mapping = mapping;
+ }
+ sd->pg_chunk = q->pg_chunk;
+
+ prefetch(sd->pg_chunk.p_cnt);
+
+ q->pg_chunk.offset += q->buf_size;
+ if (q->pg_chunk.offset == (PAGE_SIZE << order))
+ q->pg_chunk.page = NULL;
+ else {
+ q->pg_chunk.va += q->buf_size;
+ get_page(q->pg_chunk.page);
+ }
+
+ if (sd->pg_chunk.offset == 0)
+ *sd->pg_chunk.p_cnt = 1;
+ else
+ *sd->pg_chunk.p_cnt += 1;
+
+ return 0;
+}
+
+static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
+{
+ if (q->pend_cred >= q->credits / 4) {
+ q->pend_cred = 0;
+ wmb();
+ t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
+ }
+}
+
+/**
+ * refill_fl - refill an SGE free-buffer list
+ * @adapter: the adapter
+ * @q: the free-list to refill
+ * @n: the number of new buffers to allocate
+ * @gfp: the gfp flags for allocating new buffers
+ *
+ * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
+ * allocated with the supplied gfp flags. The caller must assure that
+ * @n does not exceed the queue's capacity.
+ */
+static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
+{
+ struct rx_sw_desc *sd = &q->sdesc[q->pidx];
+ struct rx_desc *d = &q->desc[q->pidx];
+ unsigned int count = 0;
+
+ while (n--) {
+ dma_addr_t mapping;
+ int err;
+
+ if (q->use_pages) {
+ if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
+ q->order))) {
+nomem: q->alloc_failed++;
+ break;
+ }
+ mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
+ dma_unmap_addr_set(sd, dma_addr, mapping);
+
+ add_one_rx_chunk(mapping, d, q->gen);
+ pci_dma_sync_single_for_device(adap->pdev, mapping,
+ q->buf_size - SGE_PG_RSVD,
+ PCI_DMA_FROMDEVICE);
+ } else {
+ void *buf_start;
+
+ struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
+ if (!skb)
+ goto nomem;
+
+ sd->skb = skb;
+ buf_start = skb->data;
+ err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
+ q->gen, adap->pdev);
+ if (unlikely(err)) {
+ clear_rx_desc(adap->pdev, q, sd);
+ break;
+ }
+ }
+
+ d++;
+ sd++;
+ if (++q->pidx == q->size) {
+ q->pidx = 0;
+ q->gen ^= 1;
+ sd = q->sdesc;
+ d = q->desc;
+ }
+ count++;
+ }
+
+ q->credits += count;
+ q->pend_cred += count;
+ ring_fl_db(adap, q);
+
+ return count;
+}
+
+static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
+{
+ refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
+ GFP_ATOMIC | __GFP_COMP);
+}
+
+/**
+ * recycle_rx_buf - recycle a receive buffer
+ * @adapter: the adapter
+ * @q: the SGE free list
+ * @idx: index of buffer to recycle
+ *
+ * Recycles the specified buffer on the given free list by adding it at
+ * the next available slot on the list.
+ */
+static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
+ unsigned int idx)
+{
+ struct rx_desc *from = &q->desc[idx];
+ struct rx_desc *to = &q->desc[q->pidx];
+
+ q->sdesc[q->pidx] = q->sdesc[idx];
+ to->addr_lo = from->addr_lo; /* already big endian */
+ to->addr_hi = from->addr_hi; /* likewise */
+ wmb();
+ to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
+ to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
+
+ if (++q->pidx == q->size) {
+ q->pidx = 0;
+ q->gen ^= 1;
+ }
+
+ q->credits++;
+ q->pend_cred++;
+ ring_fl_db(adap, q);
+}
+
+/**
+ * alloc_ring - allocate resources for an SGE descriptor ring
+ * @pdev: the PCI device
+ * @nelem: the number of descriptors
+ * @elem_size: the size of each descriptor
+ * @sw_size: the size of the SW state associated with each ring element
+ * @phys: the physical address of the allocated ring
+ * @metadata: address of the array holding the SW state for the ring
+ *
+ * Allocates resources for an SGE descriptor ring, such as Tx queues,
+ * free buffer lists, or response queues. Each SGE ring requires
+ * space for its HW descriptors plus, optionally, space for the SW state
+ * associated with each HW entry (the metadata). The function returns
+ * three values: the virtual address for the HW ring (the return value
+ * of the function), the physical address of the HW ring, and the address
+ * of the SW ring.
+ */
+static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
+ size_t sw_size, dma_addr_t * phys, void *metadata)
+{
+ size_t len = nelem * elem_size;
+ void *s = NULL;
+ void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
+
+ if (!p)
+ return NULL;
+ if (sw_size && metadata) {
+ s = kcalloc(nelem, sw_size, GFP_KERNEL);
+
+ if (!s) {
+ dma_free_coherent(&pdev->dev, len, p, *phys);
+ return NULL;
+ }
+ *(void **)metadata = s;
+ }
+ memset(p, 0, len);
+ return p;
+}
+
+/**
+ * t3_reset_qset - reset a sge qset
+ * @q: the queue set
+ *
+ * Reset the qset structure.
+ * the NAPI structure is preserved in the event of
+ * the qset's reincarnation, for example during EEH recovery.
+ */
+static void t3_reset_qset(struct sge_qset *q)
+{
+ if (q->adap &&
+ !(q->adap->flags & NAPI_INIT)) {
+ memset(q, 0, sizeof(*q));
+ return;
+ }
+
+ q->adap = NULL;
+ memset(&q->rspq, 0, sizeof(q->rspq));
+ memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
+ memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
+ q->txq_stopped = 0;
+ q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
+ q->rx_reclaim_timer.function = NULL;
+ q->nomem = 0;
+ napi_free_frags(&q->napi);
+}
+
+
+/**
+ * free_qset - free the resources of an SGE queue set
+ * @adapter: the adapter owning the queue set
+ * @q: the queue set
+ *
+ * Release the HW and SW resources associated with an SGE queue set, such
+ * as HW contexts, packet buffers, and descriptor rings. Traffic to the
+ * queue set must be quiesced prior to calling this.
+ */
+static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
+{
+ int i;
+ struct pci_dev *pdev = adapter->pdev;
+
+ for (i = 0; i < SGE_RXQ_PER_SET; ++i)
+ if (q->fl[i].desc) {
+ spin_lock_irq(&adapter->sge.reg_lock);
+ t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
+ spin_unlock_irq(&adapter->sge.reg_lock);
+ free_rx_bufs(pdev, &q->fl[i]);
+ kfree(q->fl[i].sdesc);
+ dma_free_coherent(&pdev->dev,
+ q->fl[i].size *
+ sizeof(struct rx_desc), q->fl[i].desc,
+ q->fl[i].phys_addr);
+ }
+
+ for (i = 0; i < SGE_TXQ_PER_SET; ++i)
+ if (q->txq[i].desc) {
+ spin_lock_irq(&adapter->sge.reg_lock);
+ t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
+ spin_unlock_irq(&adapter->sge.reg_lock);
+ if (q->txq[i].sdesc) {
+ free_tx_desc(adapter, &q->txq[i],
+ q->txq[i].in_use);
+ kfree(q->txq[i].sdesc);
+ }
+ dma_free_coherent(&pdev->dev,
+ q->txq[i].size *
+ sizeof(struct tx_desc),
+ q->txq[i].desc, q->txq[i].phys_addr);
+ __skb_queue_purge(&q->txq[i].sendq);
+ }
+
+ if (q->rspq.desc) {
+ spin_lock_irq(&adapter->sge.reg_lock);
+ t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
+ spin_unlock_irq(&adapter->sge.reg_lock);
+ dma_free_coherent(&pdev->dev,
+ q->rspq.size * sizeof(struct rsp_desc),
+ q->rspq.desc, q->rspq.phys_addr);
+ }
+
+ t3_reset_qset(q);
+}
+
+/**
+ * init_qset_cntxt - initialize an SGE queue set context info
+ * @qs: the queue set
+ * @id: the queue set id
+ *
+ * Initializes the TIDs and context ids for the queues of a queue set.
+ */
+static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
+{
+ qs->rspq.cntxt_id = id;
+ qs->fl[0].cntxt_id = 2 * id;
+ qs->fl[1].cntxt_id = 2 * id + 1;
+ qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
+ qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
+ qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
+ qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
+ qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
+}
+
+/**
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ *
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+ /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
+ return (3 * n) / 2 + (n & 1);
+}
+
+/**
+ * flits_to_desc - returns the num of Tx descriptors for the given flits
+ * @n: the number of flits
+ *
+ * Calculates the number of Tx descriptors needed for the supplied number
+ * of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int n)
+{
+ BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
+ return flit_desc_map[n];
+}
+
+/**
+ * get_packet - return the next ingress packet buffer from a free list
+ * @adap: the adapter that received the packet
+ * @fl: the SGE free list holding the packet
+ * @len: the packet length including any SGE padding
+ * @drop_thres: # of remaining buffers before we start dropping packets
+ *
+ * Get the next packet from a free list and complete setup of the
+ * sk_buff. If the packet is small we make a copy and recycle the
+ * original buffer, otherwise we use the original buffer itself. If a
+ * positive drop threshold is supplied packets are dropped and their
+ * buffers recycled if (a) the number of remaining buffers is under the
+ * threshold and the packet is too big to copy, or (b) the packet should
+ * be copied but there is no memory for the copy.
+ */
+static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
+ unsigned int len, unsigned int drop_thres)
+{
+ struct sk_buff *skb = NULL;
+ struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+
+ prefetch(sd->skb->data);
+ fl->credits--;
+
+ if (len <= SGE_RX_COPY_THRES) {
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (likely(skb != NULL)) {
+ __skb_put(skb, len);
+ pci_dma_sync_single_for_cpu(adap->pdev,
+ dma_unmap_addr(sd, dma_addr), len,
+ PCI_DMA_FROMDEVICE);
+ memcpy(skb->data, sd->skb->data, len);
+ pci_dma_sync_single_for_device(adap->pdev,
+ dma_unmap_addr(sd, dma_addr), len,
+ PCI_DMA_FROMDEVICE);
+ } else if (!drop_thres)
+ goto use_orig_buf;
+recycle:
+ recycle_rx_buf(adap, fl, fl->cidx);
+ return skb;
+ }
+
+ if (unlikely(fl->credits < drop_thres) &&
+ refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
+ GFP_ATOMIC | __GFP_COMP) == 0)
+ goto recycle;
+
+use_orig_buf:
+ pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
+ fl->buf_size, PCI_DMA_FROMDEVICE);
+ skb = sd->skb;
+ skb_put(skb, len);
+ __refill_fl(adap, fl);
+ return skb;
+}
+
+/**
+ * get_packet_pg - return the next ingress packet buffer from a free list
+ * @adap: the adapter that received the packet
+ * @fl: the SGE free list holding the packet
+ * @len: the packet length including any SGE padding
+ * @drop_thres: # of remaining buffers before we start dropping packets
+ *
+ * Get the next packet from a free list populated with page chunks.
+ * If the packet is small we make a copy and recycle the original buffer,
+ * otherwise we attach the original buffer as a page fragment to a fresh
+ * sk_buff. If a positive drop threshold is supplied packets are dropped
+ * and their buffers recycled if (a) the number of remaining buffers is
+ * under the threshold and the packet is too big to copy, or (b) there's
+ * no system memory.
+ *
+ * Note: this function is similar to @get_packet but deals with Rx buffers
+ * that are page chunks rather than sk_buffs.
+ */
+static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
+ struct sge_rspq *q, unsigned int len,
+ unsigned int drop_thres)
+{
+ struct sk_buff *newskb, *skb;
+ struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+
+ dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
+
+ newskb = skb = q->pg_skb;
+ if (!skb && (len <= SGE_RX_COPY_THRES)) {
+ newskb = alloc_skb(len, GFP_ATOMIC);
+ if (likely(newskb != NULL)) {
+ __skb_put(newskb, len);
+ pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
+ PCI_DMA_FROMDEVICE);
+ memcpy(newskb->data, sd->pg_chunk.va, len);
+ pci_dma_sync_single_for_device(adap->pdev, dma_addr,
+ len,
+ PCI_DMA_FROMDEVICE);
+ } else if (!drop_thres)
+ return NULL;
+recycle:
+ fl->credits--;
+ recycle_rx_buf(adap, fl, fl->cidx);
+ q->rx_recycle_buf++;
+ return newskb;
+ }
+
+ if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
+ goto recycle;
+
+ prefetch(sd->pg_chunk.p_cnt);
+
+ if (!skb)
+ newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
+
+ if (unlikely(!newskb)) {
+ if (!drop_thres)
+ return NULL;
+ goto recycle;
+ }
+
+ pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
+ PCI_DMA_FROMDEVICE);
+ (*sd->pg_chunk.p_cnt)--;
+ if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
+ pci_unmap_page(adap->pdev,
+ sd->pg_chunk.mapping,
+ fl->alloc_size,
+ PCI_DMA_FROMDEVICE);
+ if (!skb) {
+ __skb_put(newskb, SGE_RX_PULL_LEN);
+ memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
+ skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
+ sd->pg_chunk.offset + SGE_RX_PULL_LEN,
+ len - SGE_RX_PULL_LEN);
+ newskb->len = len;
+ newskb->data_len = len - SGE_RX_PULL_LEN;
+ newskb->truesize += newskb->data_len;
+ } else {
+ skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
+ sd->pg_chunk.page,
+ sd->pg_chunk.offset, len);
+ newskb->len += len;
+ newskb->data_len += len;
+ newskb->truesize += len;
+ }
+
+ fl->credits--;
+ /*
+ * We do not refill FLs here, we let the caller do it to overlap a
+ * prefetch.
+ */
+ return newskb;
+}
+
+/**
+ * get_imm_packet - return the next ingress packet buffer from a response
+ * @resp: the response descriptor containing the packet data
+ *
+ * Return a packet containing the immediate data of the given response.
+ */
+static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
+{
+ struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
+
+ if (skb) {
+ __skb_put(skb, IMMED_PKT_SIZE);
+ skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
+ }
+ return skb;
+}
+
+/**
+ * calc_tx_descs - calculate the number of Tx descriptors for a packet
+ * @skb: the packet
+ *
+ * Returns the number of Tx descriptors needed for the given Ethernet
+ * packet. Ethernet packets require addition of WR and CPL headers.
+ */
+static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
+{
+ unsigned int flits;
+
+ if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
+ return 1;
+
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
+ if (skb_shinfo(skb)->gso_size)
+ flits++;
+ return flits_to_desc(flits);
+}
+
+/**
+ * make_sgl - populate a scatter/gather list for a packet
+ * @skb: the packet
+ * @sgp: the SGL to populate
+ * @start: start address of skb main body data to include in the SGL
+ * @len: length of skb main body data to include in the SGL
+ * @pdev: the PCI device
+ *
+ * Generates a scatter/gather list for the buffers that make up a packet
+ * and returns the SGL size in 8-byte words. The caller must size the SGL
+ * appropriately.
+ */
+static inline unsigned int make_sgl(const struct sk_buff *skb,
+ struct sg_ent *sgp, unsigned char *start,
+ unsigned int len, struct pci_dev *pdev)
+{
+ dma_addr_t mapping;
+ unsigned int i, j = 0, nfrags;
+
+ if (len) {
+ mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
+ sgp->len[0] = cpu_to_be32(len);
+ sgp->addr[0] = cpu_to_be64(mapping);
+ j = 1;
+ }
+
+ nfrags = skb_shinfo(skb)->nr_frags;
+ for (i = 0; i < nfrags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ mapping = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size,
+ DMA_TO_DEVICE);
+ sgp->len[j] = cpu_to_be32(frag->size);
+ sgp->addr[j] = cpu_to_be64(mapping);
+ j ^= 1;
+ if (j == 0)
+ ++sgp;
+ }
+ if (j)
+ sgp->len[j] = 0;
+ return ((nfrags + (len != 0)) * 3) / 2 + j;
+}
+
+/**
+ * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
+ * @adap: the adapter
+ * @q: the Tx queue
+ *
+ * Ring the doorbel if a Tx queue is asleep. There is a natural race,
+ * where the HW is going to sleep just after we checked, however,
+ * then the interrupt handler will detect the outstanding TX packet
+ * and ring the doorbell for us.
+ *
+ * When GTS is disabled we unconditionally ring the doorbell.
+ */
+static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
+{
+#if USE_GTS
+ clear_bit(TXQ_LAST_PKT_DB, &q->flags);
+ if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
+ set_bit(TXQ_LAST_PKT_DB, &q->flags);
+ t3_write_reg(adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+ }
+#else
+ wmb(); /* write descriptors before telling HW */
+ t3_write_reg(adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+#endif
+}
+
+static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
+{
+#if SGE_NUM_GENBITS == 2
+ d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
+#endif
+}
+
+/**
+ * write_wr_hdr_sgl - write a WR header and, optionally, SGL
+ * @ndesc: number of Tx descriptors spanned by the SGL
+ * @skb: the packet corresponding to the WR
+ * @d: first Tx descriptor to be written
+ * @pidx: index of above descriptors
+ * @q: the SGE Tx queue
+ * @sgl: the SGL
+ * @flits: number of flits to the start of the SGL in the first descriptor
+ * @sgl_flits: the SGL size in flits
+ * @gen: the Tx descriptor generation
+ * @wr_hi: top 32 bits of WR header based on WR type (big endian)
+ * @wr_lo: low 32 bits of WR header based on WR type (big endian)
+ *
+ * Write a work request header and an associated SGL. If the SGL is
+ * small enough to fit into one Tx descriptor it has already been written
+ * and we just need to write the WR header. Otherwise we distribute the
+ * SGL across the number of descriptors it spans.
+ */
+static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
+ struct tx_desc *d, unsigned int pidx,
+ const struct sge_txq *q,
+ const struct sg_ent *sgl,
+ unsigned int flits, unsigned int sgl_flits,
+ unsigned int gen, __be32 wr_hi,
+ __be32 wr_lo)
+{
+ struct work_request_hdr *wrp = (struct work_request_hdr *)d;
+ struct tx_sw_desc *sd = &q->sdesc[pidx];
+
+ sd->skb = skb;
+ if (need_skb_unmap()) {
+ sd->fragidx = 0;
+ sd->addr_idx = 0;
+ sd->sflit = flits;
+ }
+
+ if (likely(ndesc == 1)) {
+ sd->eop = 1;
+ wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
+ V_WR_SGLSFLT(flits)) | wr_hi;
+ wmb();
+ wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
+ V_WR_GEN(gen)) | wr_lo;
+ wr_gen2(d, gen);
+ } else {
+ unsigned int ogen = gen;
+ const u64 *fp = (const u64 *)sgl;
+ struct work_request_hdr *wp = wrp;
+
+ wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
+ V_WR_SGLSFLT(flits)) | wr_hi;
+
+ while (sgl_flits) {
+ unsigned int avail = WR_FLITS - flits;
+
+ if (avail > sgl_flits)
+ avail = sgl_flits;
+ memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
+ sgl_flits -= avail;
+ ndesc--;
+ if (!sgl_flits)
+ break;
+
+ fp += avail;
+ d++;
+ sd->eop = 0;
+ sd++;
+ if (++pidx == q->size) {
+ pidx = 0;
+ gen ^= 1;
+ d = q->desc;
+ sd = q->sdesc;
+ }
+
+ sd->skb = skb;
+ wrp = (struct work_request_hdr *)d;
+ wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
+ V_WR_SGLSFLT(1)) | wr_hi;
+ wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
+ sgl_flits + 1)) |
+ V_WR_GEN(gen)) | wr_lo;
+ wr_gen2(d, gen);
+ flits = 1;
+ }
+ sd->eop = 1;
+ wrp->wr_hi |= htonl(F_WR_EOP);
+ wmb();
+ wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
+ wr_gen2((struct tx_desc *)wp, ogen);
+ WARN_ON(ndesc != 0);
+ }
+}
+
+/**
+ * write_tx_pkt_wr - write a TX_PKT work request
+ * @adap: the adapter
+ * @skb: the packet to send
+ * @pi: the egress interface
+ * @pidx: index of the first Tx descriptor to write
+ * @gen: the generation value to use
+ * @q: the Tx queue
+ * @ndesc: number of descriptors the packet will occupy
+ * @compl: the value of the COMPL bit to use
+ *
+ * Generate a TX_PKT work request to send the supplied packet.
+ */
+static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
+ const struct port_info *pi,
+ unsigned int pidx, unsigned int gen,
+ struct sge_txq *q, unsigned int ndesc,
+ unsigned int compl)
+{
+ unsigned int flits, sgl_flits, cntrl, tso_info;
+ struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
+ struct tx_desc *d = &q->desc[pidx];
+ struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
+
+ cpl->len = htonl(skb->len);
+ cntrl = V_TXPKT_INTF(pi->port_id);
+
+ if (vlan_tx_tag_present(skb))
+ cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
+
+ tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
+ if (tso_info) {
+ int eth_type;
+ struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
+
+ d->flit[2] = 0;
+ cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
+ hdr->cntrl = htonl(cntrl);
+ eth_type = skb_network_offset(skb) == ETH_HLEN ?
+ CPL_ETH_II : CPL_ETH_II_VLAN;
+ tso_info |= V_LSO_ETH_TYPE(eth_type) |
+ V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
+ V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
+ hdr->lso_info = htonl(tso_info);
+ flits = 3;
+ } else {
+ cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
+ cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
+ cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
+ cpl->cntrl = htonl(cntrl);
+
+ if (skb->len <= WR_LEN - sizeof(*cpl)) {
+ q->sdesc[pidx].skb = NULL;
+ if (!skb->data_len)
+ skb_copy_from_linear_data(skb, &d->flit[2],
+ skb->len);
+ else
+ skb_copy_bits(skb, 0, &d->flit[2], skb->len);
+
+ flits = (skb->len + 7) / 8 + 2;
+ cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
+ V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
+ | F_WR_SOP | F_WR_EOP | compl);
+ wmb();
+ cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
+ V_WR_TID(q->token));
+ wr_gen2(d, gen);
+ kfree_skb(skb);
+ return;
+ }
+
+ flits = 2;
+ }
+
+ sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
+ sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
+
+ write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
+ htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
+ htonl(V_WR_TID(q->token)));
+}
+
+static inline void t3_stop_tx_queue(struct netdev_queue *txq,
+ struct sge_qset *qs, struct sge_txq *q)
+{
+ netif_tx_stop_queue(txq);
+ set_bit(TXQ_ETH, &qs->txq_stopped);
+ q->stops++;
+}
+
+/**
+ * eth_xmit - add a packet to the Ethernet Tx queue
+ * @skb: the packet
+ * @dev: the egress net device
+ *
+ * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
+ */
+netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ int qidx;
+ unsigned int ndesc, pidx, credits, gen, compl;
+ const struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ struct netdev_queue *txq;
+ struct sge_qset *qs;
+ struct sge_txq *q;
+
+ /*
+ * The chip min packet length is 9 octets but play safe and reject
+ * anything shorter than an Ethernet header.
+ */
+ if (unlikely(skb->len < ETH_HLEN)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ qidx = skb_get_queue_mapping(skb);
+ qs = &pi->qs[qidx];
+ q = &qs->txq[TXQ_ETH];
+ txq = netdev_get_tx_queue(dev, qidx);
+
+ reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
+
+ credits = q->size - q->in_use;
+ ndesc = calc_tx_descs(skb);
+
+ if (unlikely(credits < ndesc)) {
+ t3_stop_tx_queue(txq, qs, q);
+ dev_err(&adap->pdev->dev,
+ "%s: Tx ring %u full while queue awake!\n",
+ dev->name, q->cntxt_id & 7);
+ return NETDEV_TX_BUSY;
+ }
+
+ q->in_use += ndesc;
+ if (unlikely(credits - ndesc < q->stop_thres)) {
+ t3_stop_tx_queue(txq, qs, q);
+
+ if (should_restart_tx(q) &&
+ test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
+ q->restarts++;
+ netif_tx_start_queue(txq);
+ }
+ }
+
+ gen = q->gen;
+ q->unacked += ndesc;
+ compl = (q->unacked & 8) << (S_WR_COMPL - 3);
+ q->unacked &= 7;
+ pidx = q->pidx;
+ q->pidx += ndesc;
+ if (q->pidx >= q->size) {
+ q->pidx -= q->size;
+ q->gen ^= 1;
+ }
+
+ /* update port statistics */
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ qs->port_stats[SGE_PSTAT_TX_CSUM]++;
+ if (skb_shinfo(skb)->gso_size)
+ qs->port_stats[SGE_PSTAT_TSO]++;
+ if (vlan_tx_tag_present(skb))
+ qs->port_stats[SGE_PSTAT_VLANINS]++;
+
+ /*
+ * We do not use Tx completion interrupts to free DMAd Tx packets.
+ * This is good for performance but means that we rely on new Tx
+ * packets arriving to run the destructors of completed packets,
+ * which open up space in their sockets' send queues. Sometimes
+ * we do not get such new packets causing Tx to stall. A single
+ * UDP transmitter is a good example of this situation. We have
+ * a clean up timer that periodically reclaims completed packets
+ * but it doesn't run often enough (nor do we want it to) to prevent
+ * lengthy stalls. A solution to this problem is to run the
+ * destructor early, after the packet is queued but before it's DMAd.
+ * A cons is that we lie to socket memory accounting, but the amount
+ * of extra memory is reasonable (limited by the number of Tx
+ * descriptors), the packets do actually get freed quickly by new
+ * packets almost always, and for protocols like TCP that wait for
+ * acks to really free up the data the extra memory is even less.
+ * On the positive side we run the destructors on the sending CPU
+ * rather than on a potentially different completing CPU, usually a
+ * good thing. We also run them without holding our Tx queue lock,
+ * unlike what reclaim_completed_tx() would otherwise do.
+ *
+ * Run the destructor before telling the DMA engine about the packet
+ * to make sure it doesn't complete and get freed prematurely.
+ */
+ if (likely(!skb_shared(skb)))
+ skb_orphan(skb);
+
+ write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
+ check_ring_tx_db(adap, q);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * write_imm - write a packet into a Tx descriptor as immediate data
+ * @d: the Tx descriptor to write
+ * @skb: the packet
+ * @len: the length of packet data to write as immediate data
+ * @gen: the generation bit value to write
+ *
+ * Writes a packet as immediate data into a Tx descriptor. The packet
+ * contains a work request at its beginning. We must write the packet
+ * carefully so the SGE doesn't read it accidentally before it's written
+ * in its entirety.
+ */
+static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
+ unsigned int len, unsigned int gen)
+{
+ struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
+ struct work_request_hdr *to = (struct work_request_hdr *)d;
+
+ if (likely(!skb->data_len))
+ memcpy(&to[1], &from[1], len - sizeof(*from));
+ else
+ skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
+
+ to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
+ V_WR_BCNTLFLT(len & 7));
+ wmb();
+ to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
+ V_WR_LEN((len + 7) / 8));
+ wr_gen2(d, gen);
+ kfree_skb(skb);
+}
+
+/**
+ * check_desc_avail - check descriptor availability on a send queue
+ * @adap: the adapter
+ * @q: the send queue
+ * @skb: the packet needing the descriptors
+ * @ndesc: the number of Tx descriptors needed
+ * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
+ *
+ * Checks if the requested number of Tx descriptors is available on an
+ * SGE send queue. If the queue is already suspended or not enough
+ * descriptors are available the packet is queued for later transmission.
+ * Must be called with the Tx queue locked.
+ *
+ * Returns 0 if enough descriptors are available, 1 if there aren't
+ * enough descriptors and the packet has been queued, and 2 if the caller
+ * needs to retry because there weren't enough descriptors at the
+ * beginning of the call but some freed up in the mean time.
+ */
+static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
+ struct sk_buff *skb, unsigned int ndesc,
+ unsigned int qid)
+{
+ if (unlikely(!skb_queue_empty(&q->sendq))) {
+ addq_exit:__skb_queue_tail(&q->sendq, skb);
+ return 1;
+ }
+ if (unlikely(q->size - q->in_use < ndesc)) {
+ struct sge_qset *qs = txq_to_qset(q, qid);
+
+ set_bit(qid, &qs->txq_stopped);
+ smp_mb__after_clear_bit();
+
+ if (should_restart_tx(q) &&
+ test_and_clear_bit(qid, &qs->txq_stopped))
+ return 2;
+
+ q->stops++;
+ goto addq_exit;
+ }
+ return 0;
+}
+
+/**
+ * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ * @q: the SGE control Tx queue
+ *
+ * This is a variant of reclaim_completed_tx() that is used for Tx queues
+ * that send only immediate data (presently just the control queues) and
+ * thus do not have any sk_buffs to release.
+ */
+static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+{
+ unsigned int reclaim = q->processed - q->cleaned;
+
+ q->in_use -= reclaim;
+ q->cleaned += reclaim;
+}
+
+static inline int immediate(const struct sk_buff *skb)
+{
+ return skb->len <= WR_LEN;
+}
+
+/**
+ * ctrl_xmit - send a packet through an SGE control Tx queue
+ * @adap: the adapter
+ * @q: the control queue
+ * @skb: the packet
+ *
+ * Send a packet through an SGE control Tx queue. Packets sent through
+ * a control queue must fit entirely as immediate data in a single Tx
+ * descriptor and have no page fragments.
+ */
+static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
+ struct sk_buff *skb)
+{
+ int ret;
+ struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
+
+ if (unlikely(!immediate(skb))) {
+ WARN_ON(1);
+ dev_kfree_skb(skb);
+ return NET_XMIT_SUCCESS;
+ }
+
+ wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
+ wrp->wr_lo = htonl(V_WR_TID(q->token));
+
+ spin_lock(&q->lock);
+ again:reclaim_completed_tx_imm(q);
+
+ ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
+ if (unlikely(ret)) {
+ if (ret == 1) {
+ spin_unlock(&q->lock);
+ return NET_XMIT_CN;
+ }
+ goto again;
+ }
+
+ write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
+
+ q->in_use++;
+ if (++q->pidx >= q->size) {
+ q->pidx = 0;
+ q->gen ^= 1;
+ }
+ spin_unlock(&q->lock);
+ wmb();
+ t3_write_reg(adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+ return NET_XMIT_SUCCESS;
+}
+
+/**
+ * restart_ctrlq - restart a suspended control queue
+ * @qs: the queue set cotaining the control queue
+ *
+ * Resumes transmission on a suspended Tx control queue.
+ */
+static void restart_ctrlq(unsigned long data)
+{
+ struct sk_buff *skb;
+ struct sge_qset *qs = (struct sge_qset *)data;
+ struct sge_txq *q = &qs->txq[TXQ_CTRL];
+
+ spin_lock(&q->lock);
+ again:reclaim_completed_tx_imm(q);
+
+ while (q->in_use < q->size &&
+ (skb = __skb_dequeue(&q->sendq)) != NULL) {
+
+ write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
+
+ if (++q->pidx >= q->size) {
+ q->pidx = 0;
+ q->gen ^= 1;
+ }
+ q->in_use++;
+ }
+
+ if (!skb_queue_empty(&q->sendq)) {
+ set_bit(TXQ_CTRL, &qs->txq_stopped);
+ smp_mb__after_clear_bit();
+
+ if (should_restart_tx(q) &&
+ test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
+ goto again;
+ q->stops++;
+ }
+
+ spin_unlock(&q->lock);
+ wmb();
+ t3_write_reg(qs->adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+}
+
+/*
+ * Send a management message through control queue 0
+ */
+int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
+{
+ int ret;
+ local_bh_disable();
+ ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
+ local_bh_enable();
+
+ return ret;
+}
+
+/**
+ * deferred_unmap_destructor - unmap a packet when it is freed
+ * @skb: the packet
+ *
+ * This is the packet destructor used for Tx packets that need to remain
+ * mapped until they are freed rather than until their Tx descriptors are
+ * freed.
+ */
+static void deferred_unmap_destructor(struct sk_buff *skb)
+{
+ int i;
+ const dma_addr_t *p;
+ const struct skb_shared_info *si;
+ const struct deferred_unmap_info *dui;
+
+ dui = (struct deferred_unmap_info *)skb->head;
+ p = dui->addr;
+
+ if (skb->tail - skb->transport_header)
+ pci_unmap_single(dui->pdev, *p++,
+ skb->tail - skb->transport_header,
+ PCI_DMA_TODEVICE);
+
+ si = skb_shinfo(skb);
+ for (i = 0; i < si->nr_frags; i++)
+ pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
+ PCI_DMA_TODEVICE);
+}
+
+static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
+ const struct sg_ent *sgl, int sgl_flits)
+{
+ dma_addr_t *p;
+ struct deferred_unmap_info *dui;
+
+ dui = (struct deferred_unmap_info *)skb->head;
+ dui->pdev = pdev;
+ for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
+ *p++ = be64_to_cpu(sgl->addr[0]);
+ *p++ = be64_to_cpu(sgl->addr[1]);
+ }
+ if (sgl_flits)
+ *p = be64_to_cpu(sgl->addr[0]);
+}
+
+/**
+ * write_ofld_wr - write an offload work request
+ * @adap: the adapter
+ * @skb: the packet to send
+ * @q: the Tx queue
+ * @pidx: index of the first Tx descriptor to write
+ * @gen: the generation value to use
+ * @ndesc: number of descriptors the packet will occupy
+ *
+ * Write an offload work request to send the supplied packet. The packet
+ * data already carry the work request with most fields populated.
+ */
+static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
+ struct sge_txq *q, unsigned int pidx,
+ unsigned int gen, unsigned int ndesc)
+{
+ unsigned int sgl_flits, flits;
+ struct work_request_hdr *from;
+ struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
+ struct tx_desc *d = &q->desc[pidx];
+
+ if (immediate(skb)) {
+ q->sdesc[pidx].skb = NULL;
+ write_imm(d, skb, skb->len, gen);
+ return;
+ }
+
+ /* Only TX_DATA builds SGLs */
+
+ from = (struct work_request_hdr *)skb->data;
+ memcpy(&d->flit[1], &from[1],
+ skb_transport_offset(skb) - sizeof(*from));
+
+ flits = skb_transport_offset(skb) / 8;
+ sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
+ sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
+ skb->tail - skb->transport_header,
+ adap->pdev);
+ if (need_skb_unmap()) {
+ setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
+ skb->destructor = deferred_unmap_destructor;
+ }
+
+ write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
+ gen, from->wr_hi, from->wr_lo);
+}
+
+/**
+ * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
+ * @skb: the packet
+ *
+ * Returns the number of Tx descriptors needed for the given offload
+ * packet. These packets are already fully constructed.
+ */
+static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
+{
+ unsigned int flits, cnt;
+
+ if (skb->len <= WR_LEN)
+ return 1; /* packet fits as immediate data */
+
+ flits = skb_transport_offset(skb) / 8; /* headers */
+ cnt = skb_shinfo(skb)->nr_frags;
+ if (skb->tail != skb->transport_header)
+ cnt++;
+ return flits_to_desc(flits + sgl_len(cnt));
+}
+
+/**
+ * ofld_xmit - send a packet through an offload queue
+ * @adap: the adapter
+ * @q: the Tx offload queue
+ * @skb: the packet
+ *
+ * Send an offload packet through an SGE offload queue.
+ */
+static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
+ struct sk_buff *skb)
+{
+ int ret;
+ unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
+
+ spin_lock(&q->lock);
+again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
+
+ ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
+ if (unlikely(ret)) {
+ if (ret == 1) {
+ skb->priority = ndesc; /* save for restart */
+ spin_unlock(&q->lock);
+ return NET_XMIT_CN;
+ }
+ goto again;
+ }
+
+ gen = q->gen;
+ q->in_use += ndesc;
+ pidx = q->pidx;
+ q->pidx += ndesc;
+ if (q->pidx >= q->size) {
+ q->pidx -= q->size;
+ q->gen ^= 1;
+ }
+ spin_unlock(&q->lock);
+
+ write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
+ check_ring_tx_db(adap, q);
+ return NET_XMIT_SUCCESS;
+}
+
+/**
+ * restart_offloadq - restart a suspended offload queue
+ * @qs: the queue set cotaining the offload queue
+ *
+ * Resumes transmission on a suspended Tx offload queue.
+ */
+static void restart_offloadq(unsigned long data)
+{
+ struct sk_buff *skb;
+ struct sge_qset *qs = (struct sge_qset *)data;
+ struct sge_txq *q = &qs->txq[TXQ_OFLD];
+ const struct port_info *pi = netdev_priv(qs->netdev);
+ struct adapter *adap = pi->adapter;
+
+ spin_lock(&q->lock);
+again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
+
+ while ((skb = skb_peek(&q->sendq)) != NULL) {
+ unsigned int gen, pidx;
+ unsigned int ndesc = skb->priority;
+
+ if (unlikely(q->size - q->in_use < ndesc)) {
+ set_bit(TXQ_OFLD, &qs->txq_stopped);
+ smp_mb__after_clear_bit();
+
+ if (should_restart_tx(q) &&
+ test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
+ goto again;
+ q->stops++;
+ break;
+ }
+
+ gen = q->gen;
+ q->in_use += ndesc;
+ pidx = q->pidx;
+ q->pidx += ndesc;
+ if (q->pidx >= q->size) {
+ q->pidx -= q->size;
+ q->gen ^= 1;
+ }
+ __skb_unlink(skb, &q->sendq);
+ spin_unlock(&q->lock);
+
+ write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
+ spin_lock(&q->lock);
+ }
+ spin_unlock(&q->lock);
+
+#if USE_GTS
+ set_bit(TXQ_RUNNING, &q->flags);
+ set_bit(TXQ_LAST_PKT_DB, &q->flags);
+#endif
+ wmb();
+ t3_write_reg(adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+}
+
+/**
+ * queue_set - return the queue set a packet should use
+ * @skb: the packet
+ *
+ * Maps a packet to the SGE queue set it should use. The desired queue
+ * set is carried in bits 1-3 in the packet's priority.
+ */
+static inline int queue_set(const struct sk_buff *skb)
+{
+ return skb->priority >> 1;
+}
+
+/**
+ * is_ctrl_pkt - return whether an offload packet is a control packet
+ * @skb: the packet
+ *
+ * Determines whether an offload packet should use an OFLD or a CTRL
+ * Tx queue. This is indicated by bit 0 in the packet's priority.
+ */
+static inline int is_ctrl_pkt(const struct sk_buff *skb)
+{
+ return skb->priority & 1;
+}
+
+/**
+ * t3_offload_tx - send an offload packet
+ * @tdev: the offload device to send to
+ * @skb: the packet
+ *
+ * Sends an offload packet. We use the packet priority to select the
+ * appropriate Tx queue as follows: bit 0 indicates whether the packet
+ * should be sent as regular or control, bits 1-3 select the queue set.
+ */
+int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
+{
+ struct adapter *adap = tdev2adap(tdev);
+ struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
+
+ if (unlikely(is_ctrl_pkt(skb)))
+ return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
+
+ return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
+}
+
+/**
+ * offload_enqueue - add an offload packet to an SGE offload receive queue
+ * @q: the SGE response queue
+ * @skb: the packet
+ *
+ * Add a new offload packet to an SGE response queue's offload packet
+ * queue. If the packet is the first on the queue it schedules the RX
+ * softirq to process the queue.
+ */
+static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
+{
+ int was_empty = skb_queue_empty(&q->rx_queue);
+
+ __skb_queue_tail(&q->rx_queue, skb);
+
+ if (was_empty) {
+ struct sge_qset *qs = rspq_to_qset(q);
+
+ napi_schedule(&qs->napi);
+ }
+}
+
+/**
+ * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
+ * @tdev: the offload device that will be receiving the packets
+ * @q: the SGE response queue that assembled the bundle
+ * @skbs: the partial bundle
+ * @n: the number of packets in the bundle
+ *
+ * Delivers a (partial) bundle of Rx offload packets to an offload device.
+ */
+static inline void deliver_partial_bundle(struct t3cdev *tdev,
+ struct sge_rspq *q,
+ struct sk_buff *skbs[], int n)
+{
+ if (n) {
+ q->offload_bundles++;
+ tdev->recv(tdev, skbs, n);
+ }
+}
+
+/**
+ * ofld_poll - NAPI handler for offload packets in interrupt mode
+ * @dev: the network device doing the polling
+ * @budget: polling budget
+ *
+ * The NAPI handler for offload packets when a response queue is serviced
+ * by the hard interrupt handler, i.e., when it's operating in non-polling
+ * mode. Creates small packet batches and sends them through the offload
+ * receive handler. Batches need to be of modest size as we do prefetches
+ * on the packets in each.
+ */
+static int ofld_poll(struct napi_struct *napi, int budget)
+{
+ struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
+ struct sge_rspq *q = &qs->rspq;
+ struct adapter *adapter = qs->adap;
+ int work_done = 0;
+
+ while (work_done < budget) {
+ struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
+ struct sk_buff_head queue;
+ int ngathered;
+
+ spin_lock_irq(&q->lock);
+ __skb_queue_head_init(&queue);
+ skb_queue_splice_init(&q->rx_queue, &queue);
+ if (skb_queue_empty(&queue)) {
+ napi_complete(napi);
+ spin_unlock_irq(&q->lock);
+ return work_done;
+ }
+ spin_unlock_irq(&q->lock);
+
+ ngathered = 0;
+ skb_queue_walk_safe(&queue, skb, tmp) {
+ if (work_done >= budget)
+ break;
+ work_done++;
+
+ __skb_unlink(skb, &queue);
+ prefetch(skb->data);
+ skbs[ngathered] = skb;
+ if (++ngathered == RX_BUNDLE_SIZE) {
+ q->offload_bundles++;
+ adapter->tdev.recv(&adapter->tdev, skbs,
+ ngathered);
+ ngathered = 0;
+ }
+ }
+ if (!skb_queue_empty(&queue)) {
+ /* splice remaining packets back onto Rx queue */
+ spin_lock_irq(&q->lock);
+ skb_queue_splice(&queue, &q->rx_queue);
+ spin_unlock_irq(&q->lock);
+ }
+ deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
+ }
+
+ return work_done;
+}
+
+/**
+ * rx_offload - process a received offload packet
+ * @tdev: the offload device receiving the packet
+ * @rq: the response queue that received the packet
+ * @skb: the packet
+ * @rx_gather: a gather list of packets if we are building a bundle
+ * @gather_idx: index of the next available slot in the bundle
+ *
+ * Process an ingress offload pakcet and add it to the offload ingress
+ * queue. Returns the index of the next available slot in the bundle.
+ */
+static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
+ struct sk_buff *skb, struct sk_buff *rx_gather[],
+ unsigned int gather_idx)
+{
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ if (rq->polling) {
+ rx_gather[gather_idx++] = skb;
+ if (gather_idx == RX_BUNDLE_SIZE) {
+ tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
+ gather_idx = 0;
+ rq->offload_bundles++;
+ }
+ } else
+ offload_enqueue(rq, skb);
+
+ return gather_idx;
+}
+
+/**
+ * restart_tx - check whether to restart suspended Tx queues
+ * @qs: the queue set to resume
+ *
+ * Restarts suspended Tx queues of an SGE queue set if they have enough
+ * free resources to resume operation.
+ */
+static void restart_tx(struct sge_qset *qs)
+{
+ if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
+ should_restart_tx(&qs->txq[TXQ_ETH]) &&
+ test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
+ qs->txq[TXQ_ETH].restarts++;
+ if (netif_running(qs->netdev))
+ netif_tx_wake_queue(qs->tx_q);
+ }
+
+ if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
+ should_restart_tx(&qs->txq[TXQ_OFLD]) &&
+ test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
+ qs->txq[TXQ_OFLD].restarts++;
+ tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
+ }
+ if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
+ should_restart_tx(&qs->txq[TXQ_CTRL]) &&
+ test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
+ qs->txq[TXQ_CTRL].restarts++;
+ tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
+ }
+}
+
+/**
+ * cxgb3_arp_process - process an ARP request probing a private IP address
+ * @adapter: the adapter
+ * @skb: the skbuff containing the ARP request
+ *
+ * Check if the ARP request is probing the private IP address
+ * dedicated to iSCSI, generate an ARP reply if so.
+ */
+static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ struct arphdr *arp;
+ unsigned char *arp_ptr;
+ unsigned char *sha;
+ __be32 sip, tip;
+
+ if (!dev)
+ return;
+
+ skb_reset_network_header(skb);
+ arp = arp_hdr(skb);
+
+ if (arp->ar_op != htons(ARPOP_REQUEST))
+ return;
+
+ arp_ptr = (unsigned char *)(arp + 1);
+ sha = arp_ptr;
+ arp_ptr += dev->addr_len;
+ memcpy(&sip, arp_ptr, sizeof(sip));
+ arp_ptr += sizeof(sip);
+ arp_ptr += dev->addr_len;
+ memcpy(&tip, arp_ptr, sizeof(tip));
+
+ if (tip != pi->iscsi_ipv4addr)
+ return;
+
+ arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
+ pi->iscsic.mac_addr, sha);
+
+}
+
+static inline int is_arp(struct sk_buff *skb)
+{
+ return skb->protocol == htons(ETH_P_ARP);
+}
+
+static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
+ struct sk_buff *skb)
+{
+ if (is_arp(skb)) {
+ cxgb3_arp_process(pi, skb);
+ return;
+ }
+
+ if (pi->iscsic.recv)
+ pi->iscsic.recv(pi, skb);
+
+}
+
+/**
+ * rx_eth - process an ingress ethernet packet
+ * @adap: the adapter
+ * @rq: the response queue that received the packet
+ * @skb: the packet
+ * @pad: amount of padding at the start of the buffer
+ *
+ * Process an ingress ethernet pakcet and deliver it to the stack.
+ * The padding is 2 if the packet was delivered in an Rx buffer and 0
+ * if it was immediate data in a response.
+ */
+static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
+ struct sk_buff *skb, int pad, int lro)
+{
+ struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
+ struct sge_qset *qs = rspq_to_qset(rq);
+ struct port_info *pi;
+
+ skb_pull(skb, sizeof(*p) + pad);
+ skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
+ pi = netdev_priv(skb->dev);
+ if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
+ p->csum == htons(0xffff) && !p->fragment) {
+ qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else
+ skb_checksum_none_assert(skb);
+ skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
+
+ if (p->vlan_valid) {
+ qs->port_stats[SGE_PSTAT_VLANEX]++;
+ __vlan_hwaccel_put_tag(skb, ntohs(p->vlan));
+ }
+ if (rq->polling) {
+ if (lro)
+ napi_gro_receive(&qs->napi, skb);
+ else {
+ if (unlikely(pi->iscsic.flags))
+ cxgb3_process_iscsi_prov_pack(pi, skb);
+ netif_receive_skb(skb);
+ }
+ } else
+ netif_rx(skb);
+}
+
+static inline int is_eth_tcp(u32 rss)
+{
+ return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
+}
+
+/**
+ * lro_add_page - add a page chunk to an LRO session
+ * @adap: the adapter
+ * @qs: the associated queue set
+ * @fl: the free list containing the page chunk to add
+ * @len: packet length
+ * @complete: Indicates the last fragment of a frame
+ *
+ * Add a received packet contained in a page chunk to an existing LRO
+ * session.
+ */
+static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
+ struct sge_fl *fl, int len, int complete)
+{
+ struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+ struct port_info *pi = netdev_priv(qs->netdev);
+ struct sk_buff *skb = NULL;
+ struct cpl_rx_pkt *cpl;
+ struct skb_frag_struct *rx_frag;
+ int nr_frags;
+ int offset = 0;
+
+ if (!qs->nomem) {
+ skb = napi_get_frags(&qs->napi);
+ qs->nomem = !skb;
+ }
+
+ fl->credits--;
+
+ pci_dma_sync_single_for_cpu(adap->pdev,
+ dma_unmap_addr(sd, dma_addr),
+ fl->buf_size - SGE_PG_RSVD,
+ PCI_DMA_FROMDEVICE);
+
+ (*sd->pg_chunk.p_cnt)--;
+ if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
+ pci_unmap_page(adap->pdev,
+ sd->pg_chunk.mapping,
+ fl->alloc_size,
+ PCI_DMA_FROMDEVICE);
+
+ if (!skb) {
+ put_page(sd->pg_chunk.page);
+ if (complete)
+ qs->nomem = 0;
+ return;
+ }
+
+ rx_frag = skb_shinfo(skb)->frags;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (!nr_frags) {
+ offset = 2 + sizeof(struct cpl_rx_pkt);
+ cpl = qs->lro_va = sd->pg_chunk.va + 2;
+
+ if ((qs->netdev->features & NETIF_F_RXCSUM) &&
+ cpl->csum_valid && cpl->csum == htons(0xffff)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
+ } else
+ skb->ip_summed = CHECKSUM_NONE;
+ } else
+ cpl = qs->lro_va;
+
+ len -= offset;
+
+ rx_frag += nr_frags;
+ __skb_frag_set_page(rx_frag, sd->pg_chunk.page);
+ rx_frag->page_offset = sd->pg_chunk.offset + offset;
+ rx_frag->size = len;
+
+ skb->len += len;
+ skb->data_len += len;
+ skb->truesize += len;
+ skb_shinfo(skb)->nr_frags++;
+
+ if (!complete)
+ return;
+
+ skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
+
+ if (cpl->vlan_valid)
+ __vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan));
+ napi_gro_frags(&qs->napi);
+}
+
+/**
+ * handle_rsp_cntrl_info - handles control information in a response
+ * @qs: the queue set corresponding to the response
+ * @flags: the response control flags
+ *
+ * Handles the control information of an SGE response, such as GTS
+ * indications and completion credits for the queue set's Tx queues.
+ * HW coalesces credits, we don't do any extra SW coalescing.
+ */
+static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
+{
+ unsigned int credits;
+
+#if USE_GTS
+ if (flags & F_RSPD_TXQ0_GTS)
+ clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
+#endif
+
+ credits = G_RSPD_TXQ0_CR(flags);
+ if (credits)
+ qs->txq[TXQ_ETH].processed += credits;
+
+ credits = G_RSPD_TXQ2_CR(flags);
+ if (credits)
+ qs->txq[TXQ_CTRL].processed += credits;
+
+# if USE_GTS
+ if (flags & F_RSPD_TXQ1_GTS)
+ clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
+# endif
+ credits = G_RSPD_TXQ1_CR(flags);
+ if (credits)
+ qs->txq[TXQ_OFLD].processed += credits;
+}
+
+/**
+ * check_ring_db - check if we need to ring any doorbells
+ * @adapter: the adapter
+ * @qs: the queue set whose Tx queues are to be examined
+ * @sleeping: indicates which Tx queue sent GTS
+ *
+ * Checks if some of a queue set's Tx queues need to ring their doorbells
+ * to resume transmission after idling while they still have unprocessed
+ * descriptors.
+ */
+static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
+ unsigned int sleeping)
+{
+ if (sleeping & F_RSPD_TXQ0_GTS) {
+ struct sge_txq *txq = &qs->txq[TXQ_ETH];
+
+ if (txq->cleaned + txq->in_use != txq->processed &&
+ !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
+ set_bit(TXQ_RUNNING, &txq->flags);
+ t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
+ V_EGRCNTX(txq->cntxt_id));
+ }
+ }
+
+ if (sleeping & F_RSPD_TXQ1_GTS) {
+ struct sge_txq *txq = &qs->txq[TXQ_OFLD];
+
+ if (txq->cleaned + txq->in_use != txq->processed &&
+ !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
+ set_bit(TXQ_RUNNING, &txq->flags);
+ t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
+ V_EGRCNTX(txq->cntxt_id));
+ }
+ }
+}
+
+/**
+ * is_new_response - check if a response is newly written
+ * @r: the response descriptor
+ * @q: the response queue
+ *
+ * Returns true if a response descriptor contains a yet unprocessed
+ * response.
+ */
+static inline int is_new_response(const struct rsp_desc *r,
+ const struct sge_rspq *q)
+{
+ return (r->intr_gen & F_RSPD_GEN2) == q->gen;
+}
+
+static inline void clear_rspq_bufstate(struct sge_rspq * const q)
+{
+ q->pg_skb = NULL;
+ q->rx_recycle_buf = 0;
+}
+
+#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
+#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
+ V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
+ V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
+ V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
+
+/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
+#define NOMEM_INTR_DELAY 2500
+
+/**
+ * process_responses - process responses from an SGE response queue
+ * @adap: the adapter
+ * @qs: the queue set to which the response queue belongs
+ * @budget: how many responses can be processed in this round
+ *
+ * Process responses from an SGE response queue up to the supplied budget.
+ * Responses include received packets as well as credits and other events
+ * for the queues that belong to the response queue's queue set.
+ * A negative budget is effectively unlimited.
+ *
+ * Additionally choose the interrupt holdoff time for the next interrupt
+ * on this queue. If the system is under memory shortage use a fairly
+ * long delay to help recovery.
+ */
+static int process_responses(struct adapter *adap, struct sge_qset *qs,
+ int budget)
+{
+ struct sge_rspq *q = &qs->rspq;
+ struct rsp_desc *r = &q->desc[q->cidx];
+ int budget_left = budget;
+ unsigned int sleeping = 0;
+ struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
+ int ngathered = 0;
+
+ q->next_holdoff = q->holdoff_tmr;
+
+ while (likely(budget_left && is_new_response(r, q))) {
+ int packet_complete, eth, ethpad = 2;
+ int lro = !!(qs->netdev->features & NETIF_F_GRO);
+ struct sk_buff *skb = NULL;
+ u32 len, flags;
+ __be32 rss_hi, rss_lo;
+
+ rmb();
+ eth = r->rss_hdr.opcode == CPL_RX_PKT;
+ rss_hi = *(const __be32 *)r;
+ rss_lo = r->rss_hdr.rss_hash_val;
+ flags = ntohl(r->flags);
+
+ if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
+ skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
+ if (!skb)
+ goto no_mem;
+
+ memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
+ skb->data[0] = CPL_ASYNC_NOTIF;
+ rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
+ q->async_notif++;
+ } else if (flags & F_RSPD_IMM_DATA_VALID) {
+ skb = get_imm_packet(r);
+ if (unlikely(!skb)) {
+no_mem:
+ q->next_holdoff = NOMEM_INTR_DELAY;
+ q->nomem++;
+ /* consume one credit since we tried */
+ budget_left--;
+ break;
+ }
+ q->imm_data++;
+ ethpad = 0;
+ } else if ((len = ntohl(r->len_cq)) != 0) {
+ struct sge_fl *fl;
+
+ lro &= eth && is_eth_tcp(rss_hi);
+
+ fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
+ if (fl->use_pages) {
+ void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
+
+ prefetch(addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(addr + L1_CACHE_BYTES);
+#endif
+ __refill_fl(adap, fl);
+ if (lro > 0) {
+ lro_add_page(adap, qs, fl,
+ G_RSPD_LEN(len),
+ flags & F_RSPD_EOP);
+ goto next_fl;
+ }
+
+ skb = get_packet_pg(adap, fl, q,
+ G_RSPD_LEN(len),
+ eth ?
+ SGE_RX_DROP_THRES : 0);
+ q->pg_skb = skb;
+ } else
+ skb = get_packet(adap, fl, G_RSPD_LEN(len),
+ eth ? SGE_RX_DROP_THRES : 0);
+ if (unlikely(!skb)) {
+ if (!eth)
+ goto no_mem;
+ q->rx_drops++;
+ } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
+ __skb_pull(skb, 2);
+next_fl:
+ if (++fl->cidx == fl->size)
+ fl->cidx = 0;
+ } else
+ q->pure_rsps++;
+
+ if (flags & RSPD_CTRL_MASK) {
+ sleeping |= flags & RSPD_GTS_MASK;
+ handle_rsp_cntrl_info(qs, flags);
+ }
+
+ r++;
+ if (unlikely(++q->cidx == q->size)) {
+ q->cidx = 0;
+ q->gen ^= 1;
+ r = q->desc;
+ }
+ prefetch(r);
+
+ if (++q->credits >= (q->size / 4)) {
+ refill_rspq(adap, q, q->credits);
+ q->credits = 0;
+ }
+
+ packet_complete = flags &
+ (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
+ F_RSPD_ASYNC_NOTIF);
+
+ if (skb != NULL && packet_complete) {
+ if (eth)
+ rx_eth(adap, q, skb, ethpad, lro);
+ else {
+ q->offload_pkts++;
+ /* Preserve the RSS info in csum & priority */
+ skb->csum = rss_hi;
+ skb->priority = rss_lo;
+ ngathered = rx_offload(&adap->tdev, q, skb,
+ offload_skbs,
+ ngathered);
+ }
+
+ if (flags & F_RSPD_EOP)
+ clear_rspq_bufstate(q);
+ }
+ --budget_left;
+ }
+
+ deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
+
+ if (sleeping)
+ check_ring_db(adap, qs, sleeping);
+
+ smp_mb(); /* commit Tx queue .processed updates */
+ if (unlikely(qs->txq_stopped != 0))
+ restart_tx(qs);
+
+ budget -= budget_left;
+ return budget;
+}
+
+static inline int is_pure_response(const struct rsp_desc *r)
+{
+ __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
+
+ return (n | r->len_cq) == 0;
+}
+
+/**
+ * napi_rx_handler - the NAPI handler for Rx processing
+ * @napi: the napi instance
+ * @budget: how many packets we can process in this round
+ *
+ * Handler for new data events when using NAPI.
+ */
+static int napi_rx_handler(struct napi_struct *napi, int budget)
+{
+ struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
+ struct adapter *adap = qs->adap;
+ int work_done = process_responses(adap, qs, budget);
+
+ if (likely(work_done < budget)) {
+ napi_complete(napi);
+
+ /*
+ * Because we don't atomically flush the following
+ * write it is possible that in very rare cases it can
+ * reach the device in a way that races with a new
+ * response being written plus an error interrupt
+ * causing the NAPI interrupt handler below to return
+ * unhandled status to the OS. To protect against
+ * this would require flushing the write and doing
+ * both the write and the flush with interrupts off.
+ * Way too expensive and unjustifiable given the
+ * rarity of the race.
+ *
+ * The race cannot happen at all with MSI-X.
+ */
+ t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
+ V_NEWTIMER(qs->rspq.next_holdoff) |
+ V_NEWINDEX(qs->rspq.cidx));
+ }
+ return work_done;
+}
+
+/*
+ * Returns true if the device is already scheduled for polling.
+ */
+static inline int napi_is_scheduled(struct napi_struct *napi)
+{
+ return test_bit(NAPI_STATE_SCHED, &napi->state);
+}
+
+/**
+ * process_pure_responses - process pure responses from a response queue
+ * @adap: the adapter
+ * @qs: the queue set owning the response queue
+ * @r: the first pure response to process
+ *
+ * A simpler version of process_responses() that handles only pure (i.e.,
+ * non data-carrying) responses. Such respones are too light-weight to
+ * justify calling a softirq under NAPI, so we handle them specially in
+ * the interrupt handler. The function is called with a pointer to a
+ * response, which the caller must ensure is a valid pure response.
+ *
+ * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
+ */
+static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
+ struct rsp_desc *r)
+{
+ struct sge_rspq *q = &qs->rspq;
+ unsigned int sleeping = 0;
+
+ do {
+ u32 flags = ntohl(r->flags);
+
+ r++;
+ if (unlikely(++q->cidx == q->size)) {
+ q->cidx = 0;
+ q->gen ^= 1;
+ r = q->desc;
+ }
+ prefetch(r);
+
+ if (flags & RSPD_CTRL_MASK) {
+ sleeping |= flags & RSPD_GTS_MASK;
+ handle_rsp_cntrl_info(qs, flags);
+ }
+
+ q->pure_rsps++;
+ if (++q->credits >= (q->size / 4)) {
+ refill_rspq(adap, q, q->credits);
+ q->credits = 0;
+ }
+ if (!is_new_response(r, q))
+ break;
+ rmb();
+ } while (is_pure_response(r));
+
+ if (sleeping)
+ check_ring_db(adap, qs, sleeping);
+
+ smp_mb(); /* commit Tx queue .processed updates */
+ if (unlikely(qs->txq_stopped != 0))
+ restart_tx(qs);
+
+ return is_new_response(r, q);
+}
+
+/**
+ * handle_responses - decide what to do with new responses in NAPI mode
+ * @adap: the adapter
+ * @q: the response queue
+ *
+ * This is used by the NAPI interrupt handlers to decide what to do with
+ * new SGE responses. If there are no new responses it returns -1. If
+ * there are new responses and they are pure (i.e., non-data carrying)
+ * it handles them straight in hard interrupt context as they are very
+ * cheap and don't deliver any packets. Finally, if there are any data
+ * signaling responses it schedules the NAPI handler. Returns 1 if it
+ * schedules NAPI, 0 if all new responses were pure.
+ *
+ * The caller must ascertain NAPI is not already running.
+ */
+static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
+{
+ struct sge_qset *qs = rspq_to_qset(q);
+ struct rsp_desc *r = &q->desc[q->cidx];
+
+ if (!is_new_response(r, q))
+ return -1;
+ rmb();
+ if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
+ t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
+ V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
+ return 0;
+ }
+ napi_schedule(&qs->napi);
+ return 1;
+}
+
+/*
+ * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
+ * (i.e., response queue serviced in hard interrupt).
+ */
+static irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
+{
+ struct sge_qset *qs = cookie;
+ struct adapter *adap = qs->adap;
+ struct sge_rspq *q = &qs->rspq;
+
+ spin_lock(&q->lock);
+ if (process_responses(adap, qs, -1) == 0)
+ q->unhandled_irqs++;
+ t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
+ V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
+ spin_unlock(&q->lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * The MSI-X interrupt handler for an SGE response queue for the NAPI case
+ * (i.e., response queue serviced by NAPI polling).
+ */
+static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
+{
+ struct sge_qset *qs = cookie;
+ struct sge_rspq *q = &qs->rspq;
+
+ spin_lock(&q->lock);
+
+ if (handle_responses(qs->adap, q) < 0)
+ q->unhandled_irqs++;
+ spin_unlock(&q->lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * The non-NAPI MSI interrupt handler. This needs to handle data events from
+ * SGE response queues as well as error and other async events as they all use
+ * the same MSI vector. We use one SGE response queue per port in this mode
+ * and protect all response queues with queue 0's lock.
+ */
+static irqreturn_t t3_intr_msi(int irq, void *cookie)
+{
+ int new_packets = 0;
+ struct adapter *adap = cookie;
+ struct sge_rspq *q = &adap->sge.qs[0].rspq;
+
+ spin_lock(&q->lock);
+
+ if (process_responses(adap, &adap->sge.qs[0], -1)) {
+ t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
+ V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
+ new_packets = 1;
+ }
+
+ if (adap->params.nports == 2 &&
+ process_responses(adap, &adap->sge.qs[1], -1)) {
+ struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
+
+ t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
+ V_NEWTIMER(q1->next_holdoff) |
+ V_NEWINDEX(q1->cidx));
+ new_packets = 1;
+ }
+
+ if (!new_packets && t3_slow_intr_handler(adap) == 0)
+ q->unhandled_irqs++;
+
+ spin_unlock(&q->lock);
+ return IRQ_HANDLED;
+}
+
+static int rspq_check_napi(struct sge_qset *qs)
+{
+ struct sge_rspq *q = &qs->rspq;
+
+ if (!napi_is_scheduled(&qs->napi) &&
+ is_new_response(&q->desc[q->cidx], q)) {
+ napi_schedule(&qs->napi);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
+ * by NAPI polling). Handles data events from SGE response queues as well as
+ * error and other async events as they all use the same MSI vector. We use
+ * one SGE response queue per port in this mode and protect all response
+ * queues with queue 0's lock.
+ */
+static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
+{
+ int new_packets;
+ struct adapter *adap = cookie;
+ struct sge_rspq *q = &adap->sge.qs[0].rspq;
+
+ spin_lock(&q->lock);
+
+ new_packets = rspq_check_napi(&adap->sge.qs[0]);
+ if (adap->params.nports == 2)
+ new_packets += rspq_check_napi(&adap->sge.qs[1]);
+ if (!new_packets && t3_slow_intr_handler(adap) == 0)
+ q->unhandled_irqs++;
+
+ spin_unlock(&q->lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * A helper function that processes responses and issues GTS.
+ */
+static inline int process_responses_gts(struct adapter *adap,
+ struct sge_rspq *rq)
+{
+ int work;
+
+ work = process_responses(adap, rspq_to_qset(rq), -1);
+ t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
+ V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
+ return work;
+}
+
+/*
+ * The legacy INTx interrupt handler. This needs to handle data events from
+ * SGE response queues as well as error and other async events as they all use
+ * the same interrupt pin. We use one SGE response queue per port in this mode
+ * and protect all response queues with queue 0's lock.
+ */
+static irqreturn_t t3_intr(int irq, void *cookie)
+{
+ int work_done, w0, w1;
+ struct adapter *adap = cookie;
+ struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
+ struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
+
+ spin_lock(&q0->lock);
+
+ w0 = is_new_response(&q0->desc[q0->cidx], q0);
+ w1 = adap->params.nports == 2 &&
+ is_new_response(&q1->desc[q1->cidx], q1);
+
+ if (likely(w0 | w1)) {
+ t3_write_reg(adap, A_PL_CLI, 0);
+ t3_read_reg(adap, A_PL_CLI); /* flush */
+
+ if (likely(w0))
+ process_responses_gts(adap, q0);
+
+ if (w1)
+ process_responses_gts(adap, q1);
+
+ work_done = w0 | w1;
+ } else
+ work_done = t3_slow_intr_handler(adap);
+
+ spin_unlock(&q0->lock);
+ return IRQ_RETVAL(work_done != 0);
+}
+
+/*
+ * Interrupt handler for legacy INTx interrupts for T3B-based cards.
+ * Handles data events from SGE response queues as well as error and other
+ * async events as they all use the same interrupt pin. We use one SGE
+ * response queue per port in this mode and protect all response queues with
+ * queue 0's lock.
+ */
+static irqreturn_t t3b_intr(int irq, void *cookie)
+{
+ u32 map;
+ struct adapter *adap = cookie;
+ struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
+
+ t3_write_reg(adap, A_PL_CLI, 0);
+ map = t3_read_reg(adap, A_SG_DATA_INTR);
+
+ if (unlikely(!map)) /* shared interrupt, most likely */
+ return IRQ_NONE;
+
+ spin_lock(&q0->lock);
+
+ if (unlikely(map & F_ERRINTR))
+ t3_slow_intr_handler(adap);
+
+ if (likely(map & 1))
+ process_responses_gts(adap, q0);
+
+ if (map & 2)
+ process_responses_gts(adap, &adap->sge.qs[1].rspq);
+
+ spin_unlock(&q0->lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
+ * Handles data events from SGE response queues as well as error and other
+ * async events as they all use the same interrupt pin. We use one SGE
+ * response queue per port in this mode and protect all response queues with
+ * queue 0's lock.
+ */
+static irqreturn_t t3b_intr_napi(int irq, void *cookie)
+{
+ u32 map;
+ struct adapter *adap = cookie;
+ struct sge_qset *qs0 = &adap->sge.qs[0];
+ struct sge_rspq *q0 = &qs0->rspq;
+
+ t3_write_reg(adap, A_PL_CLI, 0);
+ map = t3_read_reg(adap, A_SG_DATA_INTR);
+
+ if (unlikely(!map)) /* shared interrupt, most likely */
+ return IRQ_NONE;
+
+ spin_lock(&q0->lock);
+
+ if (unlikely(map & F_ERRINTR))
+ t3_slow_intr_handler(adap);
+
+ if (likely(map & 1))
+ napi_schedule(&qs0->napi);
+
+ if (map & 2)
+ napi_schedule(&adap->sge.qs[1].napi);
+
+ spin_unlock(&q0->lock);
+ return IRQ_HANDLED;
+}
+
+/**
+ * t3_intr_handler - select the top-level interrupt handler
+ * @adap: the adapter
+ * @polling: whether using NAPI to service response queues
+ *
+ * Selects the top-level interrupt handler based on the type of interrupts
+ * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
+ * response queues.
+ */
+irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
+{
+ if (adap->flags & USING_MSIX)
+ return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
+ if (adap->flags & USING_MSI)
+ return polling ? t3_intr_msi_napi : t3_intr_msi;
+ if (adap->params.rev > 0)
+ return polling ? t3b_intr_napi : t3b_intr;
+ return t3_intr;
+}
+
+#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
+ F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
+ V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
+ F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
+ F_HIRCQPARITYERROR)
+#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
+#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
+ F_RSPQDISABLED)
+
+/**
+ * t3_sge_err_intr_handler - SGE async event interrupt handler
+ * @adapter: the adapter
+ *
+ * Interrupt handler for SGE asynchronous (non-data) events.
+ */
+void t3_sge_err_intr_handler(struct adapter *adapter)
+{
+ unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
+ ~F_FLEMPTY;
+
+ if (status & SGE_PARERR)
+ CH_ALERT(adapter, "SGE parity error (0x%x)\n",
+ status & SGE_PARERR);
+ if (status & SGE_FRAMINGERR)
+ CH_ALERT(adapter, "SGE framing error (0x%x)\n",
+ status & SGE_FRAMINGERR);
+
+ if (status & F_RSPQCREDITOVERFOW)
+ CH_ALERT(adapter, "SGE response queue credit overflow\n");
+
+ if (status & F_RSPQDISABLED) {
+ v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
+
+ CH_ALERT(adapter,
+ "packet delivered to disabled response queue "
+ "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
+ }
+
+ if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
+ queue_work(cxgb3_wq, &adapter->db_drop_task);
+
+ if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
+ queue_work(cxgb3_wq, &adapter->db_full_task);
+
+ if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
+ queue_work(cxgb3_wq, &adapter->db_empty_task);
+
+ t3_write_reg(adapter, A_SG_INT_CAUSE, status);
+ if (status & SGE_FATALERR)
+ t3_fatal_err(adapter);
+}
+
+/**
+ * sge_timer_tx - perform periodic maintenance of an SGE qset
+ * @data: the SGE queue set to maintain
+ *
+ * Runs periodically from a timer to perform maintenance of an SGE queue
+ * set. It performs two tasks:
+ *
+ * Cleans up any completed Tx descriptors that may still be pending.
+ * Normal descriptor cleanup happens when new packets are added to a Tx
+ * queue so this timer is relatively infrequent and does any cleanup only
+ * if the Tx queue has not seen any new packets in a while. We make a
+ * best effort attempt to reclaim descriptors, in that we don't wait
+ * around if we cannot get a queue's lock (which most likely is because
+ * someone else is queueing new packets and so will also handle the clean
+ * up). Since control queues use immediate data exclusively we don't
+ * bother cleaning them up here.
+ *
+ */
+static void sge_timer_tx(unsigned long data)
+{
+ struct sge_qset *qs = (struct sge_qset *)data;
+ struct port_info *pi = netdev_priv(qs->netdev);
+ struct adapter *adap = pi->adapter;
+ unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
+ unsigned long next_period;
+
+ if (__netif_tx_trylock(qs->tx_q)) {
+ tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
+ TX_RECLAIM_TIMER_CHUNK);
+ __netif_tx_unlock(qs->tx_q);
+ }
+
+ if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
+ tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
+ TX_RECLAIM_TIMER_CHUNK);
+ spin_unlock(&qs->txq[TXQ_OFLD].lock);
+ }
+
+ next_period = TX_RECLAIM_PERIOD >>
+ (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
+ TX_RECLAIM_TIMER_CHUNK);
+ mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
+}
+
+/*
+ * sge_timer_rx - perform periodic maintenance of an SGE qset
+ * @data: the SGE queue set to maintain
+ *
+ * a) Replenishes Rx queues that have run out due to memory shortage.
+ * Normally new Rx buffers are added when existing ones are consumed but
+ * when out of memory a queue can become empty. We try to add only a few
+ * buffers here, the queue will be replenished fully as these new buffers
+ * are used up if memory shortage has subsided.
+ *
+ * b) Return coalesced response queue credits in case a response queue is
+ * starved.
+ *
+ */
+static void sge_timer_rx(unsigned long data)
+{
+ spinlock_t *lock;
+ struct sge_qset *qs = (struct sge_qset *)data;
+ struct port_info *pi = netdev_priv(qs->netdev);
+ struct adapter *adap = pi->adapter;
+ u32 status;
+
+ lock = adap->params.rev > 0 ?
+ &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
+
+ if (!spin_trylock_irq(lock))
+ goto out;
+
+ if (napi_is_scheduled(&qs->napi))
+ goto unlock;
+
+ if (adap->params.rev < 4) {
+ status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
+
+ if (status & (1 << qs->rspq.cntxt_id)) {
+ qs->rspq.starved++;
+ if (qs->rspq.credits) {
+ qs->rspq.credits--;
+ refill_rspq(adap, &qs->rspq, 1);
+ qs->rspq.restarted++;
+ t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
+ 1 << qs->rspq.cntxt_id);
+ }
+ }
+ }
+
+ if (qs->fl[0].credits < qs->fl[0].size)
+ __refill_fl(adap, &qs->fl[0]);
+ if (qs->fl[1].credits < qs->fl[1].size)
+ __refill_fl(adap, &qs->fl[1]);
+
+unlock:
+ spin_unlock_irq(lock);
+out:
+ mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
+}
+
+/**
+ * t3_update_qset_coalesce - update coalescing settings for a queue set
+ * @qs: the SGE queue set
+ * @p: new queue set parameters
+ *
+ * Update the coalescing settings for an SGE queue set. Nothing is done
+ * if the queue set is not initialized yet.
+ */
+void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
+{
+ qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
+ qs->rspq.polling = p->polling;
+ qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
+}
+
+/**
+ * t3_sge_alloc_qset - initialize an SGE queue set
+ * @adapter: the adapter
+ * @id: the queue set id
+ * @nports: how many Ethernet ports will be using this queue set
+ * @irq_vec_idx: the IRQ vector index for response queue interrupts
+ * @p: configuration parameters for this queue set
+ * @ntxq: number of Tx queues for the queue set
+ * @netdev: net device associated with this queue set
+ * @netdevq: net device TX queue associated with this queue set
+ *
+ * Allocate resources and initialize an SGE queue set. A queue set
+ * comprises a response queue, two Rx free-buffer queues, and up to 3
+ * Tx queues. The Tx queues are assigned roles in the order Ethernet
+ * queue, offload queue, and control queue.
+ */
+int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
+ int irq_vec_idx, const struct qset_params *p,
+ int ntxq, struct net_device *dev,
+ struct netdev_queue *netdevq)
+{
+ int i, avail, ret = -ENOMEM;
+ struct sge_qset *q = &adapter->sge.qs[id];
+
+ init_qset_cntxt(q, id);
+ setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q);
+ setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q);
+
+ q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
+ sizeof(struct rx_desc),
+ sizeof(struct rx_sw_desc),
+ &q->fl[0].phys_addr, &q->fl[0].sdesc);
+ if (!q->fl[0].desc)
+ goto err;
+
+ q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
+ sizeof(struct rx_desc),
+ sizeof(struct rx_sw_desc),
+ &q->fl[1].phys_addr, &q->fl[1].sdesc);
+ if (!q->fl[1].desc)
+ goto err;
+
+ q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
+ sizeof(struct rsp_desc), 0,
+ &q->rspq.phys_addr, NULL);
+ if (!q->rspq.desc)
+ goto err;
+
+ for (i = 0; i < ntxq; ++i) {
+ /*
+ * The control queue always uses immediate data so does not
+ * need to keep track of any sk_buffs.
+ */
+ size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
+
+ q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
+ sizeof(struct tx_desc), sz,
+ &q->txq[i].phys_addr,
+ &q->txq[i].sdesc);
+ if (!q->txq[i].desc)
+ goto err;
+
+ q->txq[i].gen = 1;
+ q->txq[i].size = p->txq_size[i];
+ spin_lock_init(&q->txq[i].lock);
+ skb_queue_head_init(&q->txq[i].sendq);
+ }
+
+ tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
+ (unsigned long)q);
+ tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
+ (unsigned long)q);
+
+ q->fl[0].gen = q->fl[1].gen = 1;
+ q->fl[0].size = p->fl_size;
+ q->fl[1].size = p->jumbo_size;
+
+ q->rspq.gen = 1;
+ q->rspq.size = p->rspq_size;
+ spin_lock_init(&q->rspq.lock);
+ skb_queue_head_init(&q->rspq.rx_queue);
+
+ q->txq[TXQ_ETH].stop_thres = nports *
+ flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
+
+#if FL0_PG_CHUNK_SIZE > 0
+ q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
+#else
+ q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
+#endif
+#if FL1_PG_CHUNK_SIZE > 0
+ q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
+#else
+ q->fl[1].buf_size = is_offload(adapter) ?
+ (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
+#endif
+
+ q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
+ q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
+ q->fl[0].order = FL0_PG_ORDER;
+ q->fl[1].order = FL1_PG_ORDER;
+ q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
+ q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
+
+ spin_lock_irq(&adapter->sge.reg_lock);
+
+ /* FL threshold comparison uses < */
+ ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
+ q->rspq.phys_addr, q->rspq.size,
+ q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
+ if (ret)
+ goto err_unlock;
+
+ for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
+ ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
+ q->fl[i].phys_addr, q->fl[i].size,
+ q->fl[i].buf_size - SGE_PG_RSVD,
+ p->cong_thres, 1, 0);
+ if (ret)
+ goto err_unlock;
+ }
+
+ ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
+ SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
+ q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
+ 1, 0);
+ if (ret)
+ goto err_unlock;
+
+ if (ntxq > 1) {
+ ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
+ USE_GTS, SGE_CNTXT_OFLD, id,
+ q->txq[TXQ_OFLD].phys_addr,
+ q->txq[TXQ_OFLD].size, 0, 1, 0);
+ if (ret)
+ goto err_unlock;
+ }
+
+ if (ntxq > 2) {
+ ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
+ SGE_CNTXT_CTRL, id,
+ q->txq[TXQ_CTRL].phys_addr,
+ q->txq[TXQ_CTRL].size,
+ q->txq[TXQ_CTRL].token, 1, 0);
+ if (ret)
+ goto err_unlock;
+ }
+
+ spin_unlock_irq(&adapter->sge.reg_lock);
+
+ q->adap = adapter;
+ q->netdev = dev;
+ q->tx_q = netdevq;
+ t3_update_qset_coalesce(q, p);
+
+ avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
+ GFP_KERNEL | __GFP_COMP);
+ if (!avail) {
+ CH_ALERT(adapter, "free list queue 0 initialization failed\n");
+ goto err;
+ }
+ if (avail < q->fl[0].size)
+ CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
+ avail);
+
+ avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
+ GFP_KERNEL | __GFP_COMP);
+ if (avail < q->fl[1].size)
+ CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
+ avail);
+ refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
+
+ t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
+ V_NEWTIMER(q->rspq.holdoff_tmr));
+
+ return 0;
+
+err_unlock:
+ spin_unlock_irq(&adapter->sge.reg_lock);
+err:
+ t3_free_qset(adapter, q);
+ return ret;
+}
+
+/**
+ * t3_start_sge_timers - start SGE timer call backs
+ * @adap: the adapter
+ *
+ * Starts each SGE queue set's timer call back
+ */
+void t3_start_sge_timers(struct adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < SGE_QSETS; ++i) {
+ struct sge_qset *q = &adap->sge.qs[i];
+
+ if (q->tx_reclaim_timer.function)
+ mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+
+ if (q->rx_reclaim_timer.function)
+ mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
+ }
+}
+
+/**
+ * t3_stop_sge_timers - stop SGE timer call backs
+ * @adap: the adapter
+ *
+ * Stops each SGE queue set's timer call back
+ */
+void t3_stop_sge_timers(struct adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < SGE_QSETS; ++i) {
+ struct sge_qset *q = &adap->sge.qs[i];
+
+ if (q->tx_reclaim_timer.function)
+ del_timer_sync(&q->tx_reclaim_timer);
+ if (q->rx_reclaim_timer.function)
+ del_timer_sync(&q->rx_reclaim_timer);
+ }
+}
+
+/**
+ * t3_free_sge_resources - free SGE resources
+ * @adap: the adapter
+ *
+ * Frees resources used by the SGE queue sets.
+ */
+void t3_free_sge_resources(struct adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < SGE_QSETS; ++i)
+ t3_free_qset(adap, &adap->sge.qs[i]);
+}
+
+/**
+ * t3_sge_start - enable SGE
+ * @adap: the adapter
+ *
+ * Enables the SGE for DMAs. This is the last step in starting packet
+ * transfers.
+ */
+void t3_sge_start(struct adapter *adap)
+{
+ t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
+}
+
+/**
+ * t3_sge_stop - disable SGE operation
+ * @adap: the adapter
+ *
+ * Disables the DMA engine. This can be called in emeregencies (e.g.,
+ * from error interrupts) or from normal process context. In the latter
+ * case it also disables any pending queue restart tasklets. Note that
+ * if it is called in interrupt context it cannot disable the restart
+ * tasklets as it cannot wait, however the tasklets will have no effect
+ * since the doorbells are disabled and the driver will call this again
+ * later from process context, at which time the tasklets will be stopped
+ * if they are still running.
+ */
+void t3_sge_stop(struct adapter *adap)
+{
+ t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
+ if (!in_interrupt()) {
+ int i;
+
+ for (i = 0; i < SGE_QSETS; ++i) {
+ struct sge_qset *qs = &adap->sge.qs[i];
+
+ tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
+ tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
+ }
+ }
+}
+
+/**
+ * t3_sge_init - initialize SGE
+ * @adap: the adapter
+ * @p: the SGE parameters
+ *
+ * Performs SGE initialization needed every time after a chip reset.
+ * We do not initialize any of the queue sets here, instead the driver
+ * top-level must request those individually. We also do not enable DMA
+ * here, that should be done after the queues have been set up.
+ */
+void t3_sge_init(struct adapter *adap, struct sge_params *p)
+{
+ unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
+
+ ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
+ F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
+ V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
+ V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
+#if SGE_NUM_GENBITS == 1
+ ctrl |= F_EGRGENCTRL;
+#endif
+ if (adap->params.rev > 0) {
+ if (!(adap->flags & (USING_MSIX | USING_MSI)))
+ ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
+ }
+ t3_write_reg(adap, A_SG_CONTROL, ctrl);
+ t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
+ V_LORCQDRBTHRSH(512));
+ t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
+ t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
+ V_TIMEOUT(200 * core_ticks_per_usec(adap)));
+ t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
+ adap->params.rev < T3_REV_C ? 1000 : 500);
+ t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
+ t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
+ t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
+ t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
+ t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
+}
+
+/**
+ * t3_sge_prep - one-time SGE initialization
+ * @adap: the associated adapter
+ * @p: SGE parameters
+ *
+ * Performs one-time initialization of SGE SW state. Includes determining
+ * defaults for the assorted SGE parameters, which admins can change until
+ * they are used to initialize the SGE.
+ */
+void t3_sge_prep(struct adapter *adap, struct sge_params *p)
+{
+ int i;
+
+ p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ for (i = 0; i < SGE_QSETS; ++i) {
+ struct qset_params *q = p->qset + i;
+
+ q->polling = adap->params.rev > 0;
+ q->coalesce_usecs = 5;
+ q->rspq_size = 1024;
+ q->fl_size = 1024;
+ q->jumbo_size = 512;
+ q->txq_size[TXQ_ETH] = 1024;
+ q->txq_size[TXQ_OFLD] = 1024;
+ q->txq_size[TXQ_CTRL] = 256;
+ q->cong_thres = 0;
+ }
+
+ spin_lock_init(&adap->sge.reg_lock);
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge_defs.h b/drivers/net/ethernet/chelsio/cxgb3/sge_defs.h
new file mode 100644
index 000000000000..29b6c800b238
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge_defs.h
@@ -0,0 +1,255 @@
+/*
+ * This file is automatically generated --- any changes will be lost.
+ */
+
+#ifndef _SGE_DEFS_H
+#define _SGE_DEFS_H
+
+#define S_EC_CREDITS 0
+#define M_EC_CREDITS 0x7FFF
+#define V_EC_CREDITS(x) ((x) << S_EC_CREDITS)
+#define G_EC_CREDITS(x) (((x) >> S_EC_CREDITS) & M_EC_CREDITS)
+
+#define S_EC_GTS 15
+#define V_EC_GTS(x) ((x) << S_EC_GTS)
+#define F_EC_GTS V_EC_GTS(1U)
+
+#define S_EC_INDEX 16
+#define M_EC_INDEX 0xFFFF
+#define V_EC_INDEX(x) ((x) << S_EC_INDEX)
+#define G_EC_INDEX(x) (((x) >> S_EC_INDEX) & M_EC_INDEX)
+
+#define S_EC_SIZE 0
+#define M_EC_SIZE 0xFFFF
+#define V_EC_SIZE(x) ((x) << S_EC_SIZE)
+#define G_EC_SIZE(x) (((x) >> S_EC_SIZE) & M_EC_SIZE)
+
+#define S_EC_BASE_LO 16
+#define M_EC_BASE_LO 0xFFFF
+#define V_EC_BASE_LO(x) ((x) << S_EC_BASE_LO)
+#define G_EC_BASE_LO(x) (((x) >> S_EC_BASE_LO) & M_EC_BASE_LO)
+
+#define S_EC_BASE_HI 0
+#define M_EC_BASE_HI 0xF
+#define V_EC_BASE_HI(x) ((x) << S_EC_BASE_HI)
+#define G_EC_BASE_HI(x) (((x) >> S_EC_BASE_HI) & M_EC_BASE_HI)
+
+#define S_EC_RESPQ 4
+#define M_EC_RESPQ 0x7
+#define V_EC_RESPQ(x) ((x) << S_EC_RESPQ)
+#define G_EC_RESPQ(x) (((x) >> S_EC_RESPQ) & M_EC_RESPQ)
+
+#define S_EC_TYPE 7
+#define M_EC_TYPE 0x7
+#define V_EC_TYPE(x) ((x) << S_EC_TYPE)
+#define G_EC_TYPE(x) (((x) >> S_EC_TYPE) & M_EC_TYPE)
+
+#define S_EC_GEN 10
+#define V_EC_GEN(x) ((x) << S_EC_GEN)
+#define F_EC_GEN V_EC_GEN(1U)
+
+#define S_EC_UP_TOKEN 11
+#define M_EC_UP_TOKEN 0xFFFFF
+#define V_EC_UP_TOKEN(x) ((x) << S_EC_UP_TOKEN)
+#define G_EC_UP_TOKEN(x) (((x) >> S_EC_UP_TOKEN) & M_EC_UP_TOKEN)
+
+#define S_EC_VALID 31
+#define V_EC_VALID(x) ((x) << S_EC_VALID)
+#define F_EC_VALID V_EC_VALID(1U)
+
+#define S_RQ_MSI_VEC 20
+#define M_RQ_MSI_VEC 0x3F
+#define V_RQ_MSI_VEC(x) ((x) << S_RQ_MSI_VEC)
+#define G_RQ_MSI_VEC(x) (((x) >> S_RQ_MSI_VEC) & M_RQ_MSI_VEC)
+
+#define S_RQ_INTR_EN 26
+#define V_RQ_INTR_EN(x) ((x) << S_RQ_INTR_EN)
+#define F_RQ_INTR_EN V_RQ_INTR_EN(1U)
+
+#define S_RQ_GEN 28
+#define V_RQ_GEN(x) ((x) << S_RQ_GEN)
+#define F_RQ_GEN V_RQ_GEN(1U)
+
+#define S_CQ_INDEX 0
+#define M_CQ_INDEX 0xFFFF
+#define V_CQ_INDEX(x) ((x) << S_CQ_INDEX)
+#define G_CQ_INDEX(x) (((x) >> S_CQ_INDEX) & M_CQ_INDEX)
+
+#define S_CQ_SIZE 16
+#define M_CQ_SIZE 0xFFFF
+#define V_CQ_SIZE(x) ((x) << S_CQ_SIZE)
+#define G_CQ_SIZE(x) (((x) >> S_CQ_SIZE) & M_CQ_SIZE)
+
+#define S_CQ_BASE_HI 0
+#define M_CQ_BASE_HI 0xFFFFF
+#define V_CQ_BASE_HI(x) ((x) << S_CQ_BASE_HI)
+#define G_CQ_BASE_HI(x) (((x) >> S_CQ_BASE_HI) & M_CQ_BASE_HI)
+
+#define S_CQ_RSPQ 20
+#define M_CQ_RSPQ 0x3F
+#define V_CQ_RSPQ(x) ((x) << S_CQ_RSPQ)
+#define G_CQ_RSPQ(x) (((x) >> S_CQ_RSPQ) & M_CQ_RSPQ)
+
+#define S_CQ_ASYNC_NOTIF 26
+#define V_CQ_ASYNC_NOTIF(x) ((x) << S_CQ_ASYNC_NOTIF)
+#define F_CQ_ASYNC_NOTIF V_CQ_ASYNC_NOTIF(1U)
+
+#define S_CQ_ARMED 27
+#define V_CQ_ARMED(x) ((x) << S_CQ_ARMED)
+#define F_CQ_ARMED V_CQ_ARMED(1U)
+
+#define S_CQ_ASYNC_NOTIF_SOL 28
+#define V_CQ_ASYNC_NOTIF_SOL(x) ((x) << S_CQ_ASYNC_NOTIF_SOL)
+#define F_CQ_ASYNC_NOTIF_SOL V_CQ_ASYNC_NOTIF_SOL(1U)
+
+#define S_CQ_GEN 29
+#define V_CQ_GEN(x) ((x) << S_CQ_GEN)
+#define F_CQ_GEN V_CQ_GEN(1U)
+
+#define S_CQ_ERR 30
+#define V_CQ_ERR(x) ((x) << S_CQ_ERR)
+#define F_CQ_ERR V_CQ_ERR(1U)
+
+#define S_CQ_OVERFLOW_MODE 31
+#define V_CQ_OVERFLOW_MODE(x) ((x) << S_CQ_OVERFLOW_MODE)
+#define F_CQ_OVERFLOW_MODE V_CQ_OVERFLOW_MODE(1U)
+
+#define S_CQ_CREDITS 0
+#define M_CQ_CREDITS 0xFFFF
+#define V_CQ_CREDITS(x) ((x) << S_CQ_CREDITS)
+#define G_CQ_CREDITS(x) (((x) >> S_CQ_CREDITS) & M_CQ_CREDITS)
+
+#define S_CQ_CREDIT_THRES 16
+#define M_CQ_CREDIT_THRES 0x1FFF
+#define V_CQ_CREDIT_THRES(x) ((x) << S_CQ_CREDIT_THRES)
+#define G_CQ_CREDIT_THRES(x) (((x) >> S_CQ_CREDIT_THRES) & M_CQ_CREDIT_THRES)
+
+#define S_FL_BASE_HI 0
+#define M_FL_BASE_HI 0xFFFFF
+#define V_FL_BASE_HI(x) ((x) << S_FL_BASE_HI)
+#define G_FL_BASE_HI(x) (((x) >> S_FL_BASE_HI) & M_FL_BASE_HI)
+
+#define S_FL_INDEX_LO 20
+#define M_FL_INDEX_LO 0xFFF
+#define V_FL_INDEX_LO(x) ((x) << S_FL_INDEX_LO)
+#define G_FL_INDEX_LO(x) (((x) >> S_FL_INDEX_LO) & M_FL_INDEX_LO)
+
+#define S_FL_INDEX_HI 0
+#define M_FL_INDEX_HI 0xF
+#define V_FL_INDEX_HI(x) ((x) << S_FL_INDEX_HI)
+#define G_FL_INDEX_HI(x) (((x) >> S_FL_INDEX_HI) & M_FL_INDEX_HI)
+
+#define S_FL_SIZE 4
+#define M_FL_SIZE 0xFFFF
+#define V_FL_SIZE(x) ((x) << S_FL_SIZE)
+#define G_FL_SIZE(x) (((x) >> S_FL_SIZE) & M_FL_SIZE)
+
+#define S_FL_GEN 20
+#define V_FL_GEN(x) ((x) << S_FL_GEN)
+#define F_FL_GEN V_FL_GEN(1U)
+
+#define S_FL_ENTRY_SIZE_LO 21
+#define M_FL_ENTRY_SIZE_LO 0x7FF
+#define V_FL_ENTRY_SIZE_LO(x) ((x) << S_FL_ENTRY_SIZE_LO)
+#define G_FL_ENTRY_SIZE_LO(x) (((x) >> S_FL_ENTRY_SIZE_LO) & M_FL_ENTRY_SIZE_LO)
+
+#define S_FL_ENTRY_SIZE_HI 0
+#define M_FL_ENTRY_SIZE_HI 0x1FFFFF
+#define V_FL_ENTRY_SIZE_HI(x) ((x) << S_FL_ENTRY_SIZE_HI)
+#define G_FL_ENTRY_SIZE_HI(x) (((x) >> S_FL_ENTRY_SIZE_HI) & M_FL_ENTRY_SIZE_HI)
+
+#define S_FL_CONG_THRES 21
+#define M_FL_CONG_THRES 0x3FF
+#define V_FL_CONG_THRES(x) ((x) << S_FL_CONG_THRES)
+#define G_FL_CONG_THRES(x) (((x) >> S_FL_CONG_THRES) & M_FL_CONG_THRES)
+
+#define S_FL_GTS 31
+#define V_FL_GTS(x) ((x) << S_FL_GTS)
+#define F_FL_GTS V_FL_GTS(1U)
+
+#define S_FLD_GEN1 31
+#define V_FLD_GEN1(x) ((x) << S_FLD_GEN1)
+#define F_FLD_GEN1 V_FLD_GEN1(1U)
+
+#define S_FLD_GEN2 0
+#define V_FLD_GEN2(x) ((x) << S_FLD_GEN2)
+#define F_FLD_GEN2 V_FLD_GEN2(1U)
+
+#define S_RSPD_TXQ1_CR 0
+#define M_RSPD_TXQ1_CR 0x7F
+#define V_RSPD_TXQ1_CR(x) ((x) << S_RSPD_TXQ1_CR)
+#define G_RSPD_TXQ1_CR(x) (((x) >> S_RSPD_TXQ1_CR) & M_RSPD_TXQ1_CR)
+
+#define S_RSPD_TXQ1_GTS 7
+#define V_RSPD_TXQ1_GTS(x) ((x) << S_RSPD_TXQ1_GTS)
+#define F_RSPD_TXQ1_GTS V_RSPD_TXQ1_GTS(1U)
+
+#define S_RSPD_TXQ2_CR 8
+#define M_RSPD_TXQ2_CR 0x7F
+#define V_RSPD_TXQ2_CR(x) ((x) << S_RSPD_TXQ2_CR)
+#define G_RSPD_TXQ2_CR(x) (((x) >> S_RSPD_TXQ2_CR) & M_RSPD_TXQ2_CR)
+
+#define S_RSPD_TXQ2_GTS 15
+#define V_RSPD_TXQ2_GTS(x) ((x) << S_RSPD_TXQ2_GTS)
+#define F_RSPD_TXQ2_GTS V_RSPD_TXQ2_GTS(1U)
+
+#define S_RSPD_TXQ0_CR 16
+#define M_RSPD_TXQ0_CR 0x7F
+#define V_RSPD_TXQ0_CR(x) ((x) << S_RSPD_TXQ0_CR)
+#define G_RSPD_TXQ0_CR(x) (((x) >> S_RSPD_TXQ0_CR) & M_RSPD_TXQ0_CR)
+
+#define S_RSPD_TXQ0_GTS 23
+#define V_RSPD_TXQ0_GTS(x) ((x) << S_RSPD_TXQ0_GTS)
+#define F_RSPD_TXQ0_GTS V_RSPD_TXQ0_GTS(1U)
+
+#define S_RSPD_EOP 24
+#define V_RSPD_EOP(x) ((x) << S_RSPD_EOP)
+#define F_RSPD_EOP V_RSPD_EOP(1U)
+
+#define S_RSPD_SOP 25
+#define V_RSPD_SOP(x) ((x) << S_RSPD_SOP)
+#define F_RSPD_SOP V_RSPD_SOP(1U)
+
+#define S_RSPD_ASYNC_NOTIF 26
+#define V_RSPD_ASYNC_NOTIF(x) ((x) << S_RSPD_ASYNC_NOTIF)
+#define F_RSPD_ASYNC_NOTIF V_RSPD_ASYNC_NOTIF(1U)
+
+#define S_RSPD_FL0_GTS 27
+#define V_RSPD_FL0_GTS(x) ((x) << S_RSPD_FL0_GTS)
+#define F_RSPD_FL0_GTS V_RSPD_FL0_GTS(1U)
+
+#define S_RSPD_FL1_GTS 28
+#define V_RSPD_FL1_GTS(x) ((x) << S_RSPD_FL1_GTS)
+#define F_RSPD_FL1_GTS V_RSPD_FL1_GTS(1U)
+
+#define S_RSPD_IMM_DATA_VALID 29
+#define V_RSPD_IMM_DATA_VALID(x) ((x) << S_RSPD_IMM_DATA_VALID)
+#define F_RSPD_IMM_DATA_VALID V_RSPD_IMM_DATA_VALID(1U)
+
+#define S_RSPD_OFFLOAD 30
+#define V_RSPD_OFFLOAD(x) ((x) << S_RSPD_OFFLOAD)
+#define F_RSPD_OFFLOAD V_RSPD_OFFLOAD(1U)
+
+#define S_RSPD_GEN1 31
+#define V_RSPD_GEN1(x) ((x) << S_RSPD_GEN1)
+#define F_RSPD_GEN1 V_RSPD_GEN1(1U)
+
+#define S_RSPD_LEN 0
+#define M_RSPD_LEN 0x7FFFFFFF
+#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN)
+#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN)
+
+#define S_RSPD_FLQ 31
+#define V_RSPD_FLQ(x) ((x) << S_RSPD_FLQ)
+#define F_RSPD_FLQ V_RSPD_FLQ(1U)
+
+#define S_RSPD_GEN2 0
+#define V_RSPD_GEN2(x) ((x) << S_RSPD_GEN2)
+#define F_RSPD_GEN2 V_RSPD_GEN2(1U)
+
+#define S_RSPD_INR_VEC 1
+#define M_RSPD_INR_VEC 0x7F
+#define V_RSPD_INR_VEC(x) ((x) << S_RSPD_INR_VEC)
+#define G_RSPD_INR_VEC(x) (((x) >> S_RSPD_INR_VEC) & M_RSPD_INR_VEC)
+
+#endif /* _SGE_DEFS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h b/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h
new file mode 100644
index 000000000000..852c399a8b0a
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h
@@ -0,0 +1,1495 @@
+/*
+ * Copyright (c) 2004-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef T3_CPL_H
+#define T3_CPL_H
+
+#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
+# include <asm/byteorder.h>
+#endif
+
+enum CPL_opcode {
+ CPL_PASS_OPEN_REQ = 0x1,
+ CPL_PASS_ACCEPT_RPL = 0x2,
+ CPL_ACT_OPEN_REQ = 0x3,
+ CPL_SET_TCB = 0x4,
+ CPL_SET_TCB_FIELD = 0x5,
+ CPL_GET_TCB = 0x6,
+ CPL_PCMD = 0x7,
+ CPL_CLOSE_CON_REQ = 0x8,
+ CPL_CLOSE_LISTSRV_REQ = 0x9,
+ CPL_ABORT_REQ = 0xA,
+ CPL_ABORT_RPL = 0xB,
+ CPL_TX_DATA = 0xC,
+ CPL_RX_DATA_ACK = 0xD,
+ CPL_TX_PKT = 0xE,
+ CPL_RTE_DELETE_REQ = 0xF,
+ CPL_RTE_WRITE_REQ = 0x10,
+ CPL_RTE_READ_REQ = 0x11,
+ CPL_L2T_WRITE_REQ = 0x12,
+ CPL_L2T_READ_REQ = 0x13,
+ CPL_SMT_WRITE_REQ = 0x14,
+ CPL_SMT_READ_REQ = 0x15,
+ CPL_TX_PKT_LSO = 0x16,
+ CPL_PCMD_READ = 0x17,
+ CPL_BARRIER = 0x18,
+ CPL_TID_RELEASE = 0x1A,
+
+ CPL_CLOSE_LISTSRV_RPL = 0x20,
+ CPL_ERROR = 0x21,
+ CPL_GET_TCB_RPL = 0x22,
+ CPL_L2T_WRITE_RPL = 0x23,
+ CPL_PCMD_READ_RPL = 0x24,
+ CPL_PCMD_RPL = 0x25,
+ CPL_PEER_CLOSE = 0x26,
+ CPL_RTE_DELETE_RPL = 0x27,
+ CPL_RTE_WRITE_RPL = 0x28,
+ CPL_RX_DDP_COMPLETE = 0x29,
+ CPL_RX_PHYS_ADDR = 0x2A,
+ CPL_RX_PKT = 0x2B,
+ CPL_RX_URG_NOTIFY = 0x2C,
+ CPL_SET_TCB_RPL = 0x2D,
+ CPL_SMT_WRITE_RPL = 0x2E,
+ CPL_TX_DATA_ACK = 0x2F,
+
+ CPL_ABORT_REQ_RSS = 0x30,
+ CPL_ABORT_RPL_RSS = 0x31,
+ CPL_CLOSE_CON_RPL = 0x32,
+ CPL_ISCSI_HDR = 0x33,
+ CPL_L2T_READ_RPL = 0x34,
+ CPL_RDMA_CQE = 0x35,
+ CPL_RDMA_CQE_READ_RSP = 0x36,
+ CPL_RDMA_CQE_ERR = 0x37,
+ CPL_RTE_READ_RPL = 0x38,
+ CPL_RX_DATA = 0x39,
+
+ CPL_ACT_OPEN_RPL = 0x40,
+ CPL_PASS_OPEN_RPL = 0x41,
+ CPL_RX_DATA_DDP = 0x42,
+ CPL_SMT_READ_RPL = 0x43,
+
+ CPL_ACT_ESTABLISH = 0x50,
+ CPL_PASS_ESTABLISH = 0x51,
+
+ CPL_PASS_ACCEPT_REQ = 0x70,
+
+ CPL_ASYNC_NOTIF = 0x80, /* fake opcode for async notifications */
+
+ CPL_TX_DMA_ACK = 0xA0,
+ CPL_RDMA_READ_REQ = 0xA1,
+ CPL_RDMA_TERMINATE = 0xA2,
+ CPL_TRACE_PKT = 0xA3,
+ CPL_RDMA_EC_STATUS = 0xA5,
+
+ NUM_CPL_CMDS /* must be last and previous entries must be sorted */
+};
+
+enum CPL_error {
+ CPL_ERR_NONE = 0,
+ CPL_ERR_TCAM_PARITY = 1,
+ CPL_ERR_TCAM_FULL = 3,
+ CPL_ERR_CONN_RESET = 20,
+ CPL_ERR_CONN_EXIST = 22,
+ CPL_ERR_ARP_MISS = 23,
+ CPL_ERR_BAD_SYN = 24,
+ CPL_ERR_CONN_TIMEDOUT = 30,
+ CPL_ERR_XMIT_TIMEDOUT = 31,
+ CPL_ERR_PERSIST_TIMEDOUT = 32,
+ CPL_ERR_FINWAIT2_TIMEDOUT = 33,
+ CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
+ CPL_ERR_RTX_NEG_ADVICE = 35,
+ CPL_ERR_PERSIST_NEG_ADVICE = 36,
+ CPL_ERR_ABORT_FAILED = 42,
+ CPL_ERR_GENERAL = 99
+};
+
+enum {
+ CPL_CONN_POLICY_AUTO = 0,
+ CPL_CONN_POLICY_ASK = 1,
+ CPL_CONN_POLICY_DENY = 3
+};
+
+enum {
+ ULP_MODE_NONE = 0,
+ ULP_MODE_ISCSI = 2,
+ ULP_MODE_RDMA = 4,
+ ULP_MODE_TCPDDP = 5
+};
+
+enum {
+ ULP_CRC_HEADER = 1 << 0,
+ ULP_CRC_DATA = 1 << 1
+};
+
+enum {
+ CPL_PASS_OPEN_ACCEPT,
+ CPL_PASS_OPEN_REJECT
+};
+
+enum {
+ CPL_ABORT_SEND_RST = 0,
+ CPL_ABORT_NO_RST,
+ CPL_ABORT_POST_CLOSE_REQ = 2
+};
+
+enum { /* TX_PKT_LSO ethernet types */
+ CPL_ETH_II,
+ CPL_ETH_II_VLAN,
+ CPL_ETH_802_3,
+ CPL_ETH_802_3_VLAN
+};
+
+enum { /* TCP congestion control algorithms */
+ CONG_ALG_RENO,
+ CONG_ALG_TAHOE,
+ CONG_ALG_NEWRENO,
+ CONG_ALG_HIGHSPEED
+};
+
+enum { /* RSS hash type */
+ RSS_HASH_NONE = 0,
+ RSS_HASH_2_TUPLE = 1,
+ RSS_HASH_4_TUPLE = 2,
+ RSS_HASH_TCPV6 = 3
+};
+
+union opcode_tid {
+ __be32 opcode_tid;
+ __u8 opcode;
+};
+
+#define S_OPCODE 24
+#define V_OPCODE(x) ((x) << S_OPCODE)
+#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
+#define G_TID(x) ((x) & 0xFFFFFF)
+
+#define S_QNUM 0
+#define G_QNUM(x) (((x) >> S_QNUM) & 0xFFFF)
+
+#define S_HASHTYPE 22
+#define M_HASHTYPE 0x3
+#define G_HASHTYPE(x) (((x) >> S_HASHTYPE) & M_HASHTYPE)
+
+/* tid is assumed to be 24-bits */
+#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
+
+#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
+
+/* extract the TID from a CPL command */
+#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
+
+struct tcp_options {
+ __be16 mss;
+ __u8 wsf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8:5;
+ __u8 ecn:1;
+ __u8 sack:1;
+ __u8 tstamp:1;
+#else
+ __u8 tstamp:1;
+ __u8 sack:1;
+ __u8 ecn:1;
+ __u8:5;
+#endif
+};
+
+struct rss_header {
+ __u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 cpu_idx:6;
+ __u8 hash_type:2;
+#else
+ __u8 hash_type:2;
+ __u8 cpu_idx:6;
+#endif
+ __be16 cq_idx;
+ __be32 rss_hash_val;
+};
+
+#ifndef CHELSIO_FW
+struct work_request_hdr {
+ __be32 wr_hi;
+ __be32 wr_lo;
+};
+
+/* wr_hi fields */
+#define S_WR_SGE_CREDITS 0
+#define M_WR_SGE_CREDITS 0xFF
+#define V_WR_SGE_CREDITS(x) ((x) << S_WR_SGE_CREDITS)
+#define G_WR_SGE_CREDITS(x) (((x) >> S_WR_SGE_CREDITS) & M_WR_SGE_CREDITS)
+
+#define S_WR_SGLSFLT 8
+#define M_WR_SGLSFLT 0xFF
+#define V_WR_SGLSFLT(x) ((x) << S_WR_SGLSFLT)
+#define G_WR_SGLSFLT(x) (((x) >> S_WR_SGLSFLT) & M_WR_SGLSFLT)
+
+#define S_WR_BCNTLFLT 16
+#define M_WR_BCNTLFLT 0xF
+#define V_WR_BCNTLFLT(x) ((x) << S_WR_BCNTLFLT)
+#define G_WR_BCNTLFLT(x) (((x) >> S_WR_BCNTLFLT) & M_WR_BCNTLFLT)
+
+#define S_WR_DATATYPE 20
+#define V_WR_DATATYPE(x) ((x) << S_WR_DATATYPE)
+#define F_WR_DATATYPE V_WR_DATATYPE(1U)
+
+#define S_WR_COMPL 21
+#define V_WR_COMPL(x) ((x) << S_WR_COMPL)
+#define F_WR_COMPL V_WR_COMPL(1U)
+
+#define S_WR_EOP 22
+#define V_WR_EOP(x) ((x) << S_WR_EOP)
+#define F_WR_EOP V_WR_EOP(1U)
+
+#define S_WR_SOP 23
+#define V_WR_SOP(x) ((x) << S_WR_SOP)
+#define F_WR_SOP V_WR_SOP(1U)
+
+#define S_WR_OP 24
+#define M_WR_OP 0xFF
+#define V_WR_OP(x) ((x) << S_WR_OP)
+#define G_WR_OP(x) (((x) >> S_WR_OP) & M_WR_OP)
+
+/* wr_lo fields */
+#define S_WR_LEN 0
+#define M_WR_LEN 0xFF
+#define V_WR_LEN(x) ((x) << S_WR_LEN)
+#define G_WR_LEN(x) (((x) >> S_WR_LEN) & M_WR_LEN)
+
+#define S_WR_TID 8
+#define M_WR_TID 0xFFFFF
+#define V_WR_TID(x) ((x) << S_WR_TID)
+#define G_WR_TID(x) (((x) >> S_WR_TID) & M_WR_TID)
+
+#define S_WR_CR_FLUSH 30
+#define V_WR_CR_FLUSH(x) ((x) << S_WR_CR_FLUSH)
+#define F_WR_CR_FLUSH V_WR_CR_FLUSH(1U)
+
+#define S_WR_GEN 31
+#define V_WR_GEN(x) ((x) << S_WR_GEN)
+#define F_WR_GEN V_WR_GEN(1U)
+
+# define WR_HDR struct work_request_hdr wr
+# define RSS_HDR
+#else
+# define WR_HDR
+# define RSS_HDR struct rss_header rss_hdr;
+#endif
+
+/* option 0 lower-half fields */
+#define S_CPL_STATUS 0
+#define M_CPL_STATUS 0xFF
+#define V_CPL_STATUS(x) ((x) << S_CPL_STATUS)
+#define G_CPL_STATUS(x) (((x) >> S_CPL_STATUS) & M_CPL_STATUS)
+
+#define S_INJECT_TIMER 6
+#define V_INJECT_TIMER(x) ((x) << S_INJECT_TIMER)
+#define F_INJECT_TIMER V_INJECT_TIMER(1U)
+
+#define S_NO_OFFLOAD 7
+#define V_NO_OFFLOAD(x) ((x) << S_NO_OFFLOAD)
+#define F_NO_OFFLOAD V_NO_OFFLOAD(1U)
+
+#define S_ULP_MODE 8
+#define M_ULP_MODE 0xF
+#define V_ULP_MODE(x) ((x) << S_ULP_MODE)
+#define G_ULP_MODE(x) (((x) >> S_ULP_MODE) & M_ULP_MODE)
+
+#define S_RCV_BUFSIZ 12
+#define M_RCV_BUFSIZ 0x3FFF
+#define V_RCV_BUFSIZ(x) ((x) << S_RCV_BUFSIZ)
+#define G_RCV_BUFSIZ(x) (((x) >> S_RCV_BUFSIZ) & M_RCV_BUFSIZ)
+
+#define S_TOS 26
+#define M_TOS 0x3F
+#define V_TOS(x) ((x) << S_TOS)
+#define G_TOS(x) (((x) >> S_TOS) & M_TOS)
+
+/* option 0 upper-half fields */
+#define S_DELACK 0
+#define V_DELACK(x) ((x) << S_DELACK)
+#define F_DELACK V_DELACK(1U)
+
+#define S_NO_CONG 1
+#define V_NO_CONG(x) ((x) << S_NO_CONG)
+#define F_NO_CONG V_NO_CONG(1U)
+
+#define S_SRC_MAC_SEL 2
+#define M_SRC_MAC_SEL 0x3
+#define V_SRC_MAC_SEL(x) ((x) << S_SRC_MAC_SEL)
+#define G_SRC_MAC_SEL(x) (((x) >> S_SRC_MAC_SEL) & M_SRC_MAC_SEL)
+
+#define S_L2T_IDX 4
+#define M_L2T_IDX 0x7FF
+#define V_L2T_IDX(x) ((x) << S_L2T_IDX)
+#define G_L2T_IDX(x) (((x) >> S_L2T_IDX) & M_L2T_IDX)
+
+#define S_TX_CHANNEL 15
+#define V_TX_CHANNEL(x) ((x) << S_TX_CHANNEL)
+#define F_TX_CHANNEL V_TX_CHANNEL(1U)
+
+#define S_TCAM_BYPASS 16
+#define V_TCAM_BYPASS(x) ((x) << S_TCAM_BYPASS)
+#define F_TCAM_BYPASS V_TCAM_BYPASS(1U)
+
+#define S_NAGLE 17
+#define V_NAGLE(x) ((x) << S_NAGLE)
+#define F_NAGLE V_NAGLE(1U)
+
+#define S_WND_SCALE 18
+#define M_WND_SCALE 0xF
+#define V_WND_SCALE(x) ((x) << S_WND_SCALE)
+#define G_WND_SCALE(x) (((x) >> S_WND_SCALE) & M_WND_SCALE)
+
+#define S_KEEP_ALIVE 22
+#define V_KEEP_ALIVE(x) ((x) << S_KEEP_ALIVE)
+#define F_KEEP_ALIVE V_KEEP_ALIVE(1U)
+
+#define S_MAX_RETRANS 23
+#define M_MAX_RETRANS 0xF
+#define V_MAX_RETRANS(x) ((x) << S_MAX_RETRANS)
+#define G_MAX_RETRANS(x) (((x) >> S_MAX_RETRANS) & M_MAX_RETRANS)
+
+#define S_MAX_RETRANS_OVERRIDE 27
+#define V_MAX_RETRANS_OVERRIDE(x) ((x) << S_MAX_RETRANS_OVERRIDE)
+#define F_MAX_RETRANS_OVERRIDE V_MAX_RETRANS_OVERRIDE(1U)
+
+#define S_MSS_IDX 28
+#define M_MSS_IDX 0xF
+#define V_MSS_IDX(x) ((x) << S_MSS_IDX)
+#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
+
+/* option 1 fields */
+#define S_RSS_ENABLE 0
+#define V_RSS_ENABLE(x) ((x) << S_RSS_ENABLE)
+#define F_RSS_ENABLE V_RSS_ENABLE(1U)
+
+#define S_RSS_MASK_LEN 1
+#define M_RSS_MASK_LEN 0x7
+#define V_RSS_MASK_LEN(x) ((x) << S_RSS_MASK_LEN)
+#define G_RSS_MASK_LEN(x) (((x) >> S_RSS_MASK_LEN) & M_RSS_MASK_LEN)
+
+#define S_CPU_IDX 4
+#define M_CPU_IDX 0x3F
+#define V_CPU_IDX(x) ((x) << S_CPU_IDX)
+#define G_CPU_IDX(x) (((x) >> S_CPU_IDX) & M_CPU_IDX)
+
+#define S_MAC_MATCH_VALID 18
+#define V_MAC_MATCH_VALID(x) ((x) << S_MAC_MATCH_VALID)
+#define F_MAC_MATCH_VALID V_MAC_MATCH_VALID(1U)
+
+#define S_CONN_POLICY 19
+#define M_CONN_POLICY 0x3
+#define V_CONN_POLICY(x) ((x) << S_CONN_POLICY)
+#define G_CONN_POLICY(x) (((x) >> S_CONN_POLICY) & M_CONN_POLICY)
+
+#define S_SYN_DEFENSE 21
+#define V_SYN_DEFENSE(x) ((x) << S_SYN_DEFENSE)
+#define F_SYN_DEFENSE V_SYN_DEFENSE(1U)
+
+#define S_VLAN_PRI 22
+#define M_VLAN_PRI 0x3
+#define V_VLAN_PRI(x) ((x) << S_VLAN_PRI)
+#define G_VLAN_PRI(x) (((x) >> S_VLAN_PRI) & M_VLAN_PRI)
+
+#define S_VLAN_PRI_VALID 24
+#define V_VLAN_PRI_VALID(x) ((x) << S_VLAN_PRI_VALID)
+#define F_VLAN_PRI_VALID V_VLAN_PRI_VALID(1U)
+
+#define S_PKT_TYPE 25
+#define M_PKT_TYPE 0x3
+#define V_PKT_TYPE(x) ((x) << S_PKT_TYPE)
+#define G_PKT_TYPE(x) (((x) >> S_PKT_TYPE) & M_PKT_TYPE)
+
+#define S_MAC_MATCH 27
+#define M_MAC_MATCH 0x1F
+#define V_MAC_MATCH(x) ((x) << S_MAC_MATCH)
+#define G_MAC_MATCH(x) (((x) >> S_MAC_MATCH) & M_MAC_MATCH)
+
+/* option 2 fields */
+#define S_CPU_INDEX 0
+#define M_CPU_INDEX 0x7F
+#define V_CPU_INDEX(x) ((x) << S_CPU_INDEX)
+#define G_CPU_INDEX(x) (((x) >> S_CPU_INDEX) & M_CPU_INDEX)
+
+#define S_CPU_INDEX_VALID 7
+#define V_CPU_INDEX_VALID(x) ((x) << S_CPU_INDEX_VALID)
+#define F_CPU_INDEX_VALID V_CPU_INDEX_VALID(1U)
+
+#define S_RX_COALESCE 8
+#define M_RX_COALESCE 0x3
+#define V_RX_COALESCE(x) ((x) << S_RX_COALESCE)
+#define G_RX_COALESCE(x) (((x) >> S_RX_COALESCE) & M_RX_COALESCE)
+
+#define S_RX_COALESCE_VALID 10
+#define V_RX_COALESCE_VALID(x) ((x) << S_RX_COALESCE_VALID)
+#define F_RX_COALESCE_VALID V_RX_COALESCE_VALID(1U)
+
+#define S_CONG_CONTROL_FLAVOR 11
+#define M_CONG_CONTROL_FLAVOR 0x3
+#define V_CONG_CONTROL_FLAVOR(x) ((x) << S_CONG_CONTROL_FLAVOR)
+#define G_CONG_CONTROL_FLAVOR(x) (((x) >> S_CONG_CONTROL_FLAVOR) & M_CONG_CONTROL_FLAVOR)
+
+#define S_PACING_FLAVOR 13
+#define M_PACING_FLAVOR 0x3
+#define V_PACING_FLAVOR(x) ((x) << S_PACING_FLAVOR)
+#define G_PACING_FLAVOR(x) (((x) >> S_PACING_FLAVOR) & M_PACING_FLAVOR)
+
+#define S_FLAVORS_VALID 15
+#define V_FLAVORS_VALID(x) ((x) << S_FLAVORS_VALID)
+#define F_FLAVORS_VALID V_FLAVORS_VALID(1U)
+
+#define S_RX_FC_DISABLE 16
+#define V_RX_FC_DISABLE(x) ((x) << S_RX_FC_DISABLE)
+#define F_RX_FC_DISABLE V_RX_FC_DISABLE(1U)
+
+#define S_RX_FC_VALID 17
+#define V_RX_FC_VALID(x) ((x) << S_RX_FC_VALID)
+#define F_RX_FC_VALID V_RX_FC_VALID(1U)
+
+struct cpl_pass_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be32 opt0h;
+ __be32 opt0l;
+ __be32 peer_netmask;
+ __be32 opt1;
+};
+
+struct cpl_pass_open_rpl {
+ RSS_HDR union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __u8 resvd[7];
+ __u8 status;
+};
+
+struct cpl_pass_establish {
+ RSS_HDR union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be32 tos_tid;
+ __be16 l2t_idx;
+ __be16 tcp_opt;
+ __be32 snd_isn;
+ __be32 rcv_isn;
+};
+
+/* cpl_pass_establish.tos_tid fields */
+#define S_PASS_OPEN_TID 0
+#define M_PASS_OPEN_TID 0xFFFFFF
+#define V_PASS_OPEN_TID(x) ((x) << S_PASS_OPEN_TID)
+#define G_PASS_OPEN_TID(x) (((x) >> S_PASS_OPEN_TID) & M_PASS_OPEN_TID)
+
+#define S_PASS_OPEN_TOS 24
+#define M_PASS_OPEN_TOS 0xFF
+#define V_PASS_OPEN_TOS(x) ((x) << S_PASS_OPEN_TOS)
+#define G_PASS_OPEN_TOS(x) (((x) >> S_PASS_OPEN_TOS) & M_PASS_OPEN_TOS)
+
+/* cpl_pass_establish.l2t_idx fields */
+#define S_L2T_IDX16 5
+#define M_L2T_IDX16 0x7FF
+#define V_L2T_IDX16(x) ((x) << S_L2T_IDX16)
+#define G_L2T_IDX16(x) (((x) >> S_L2T_IDX16) & M_L2T_IDX16)
+
+/* cpl_pass_establish.tcp_opt fields (also applies act_open_establish) */
+#define G_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
+#define G_TCPOPT_SACK(x) (((x) >> 6) & 1)
+#define G_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
+#define G_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
+#define G_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
+
+struct cpl_pass_accept_req {
+ RSS_HDR union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be32 tos_tid;
+ struct tcp_options tcp_options;
+ __u8 dst_mac[6];
+ __be16 vlan_tag;
+ __u8 src_mac[6];
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8:3;
+ __u8 addr_idx:3;
+ __u8 port_idx:1;
+ __u8 exact_match:1;
+#else
+ __u8 exact_match:1;
+ __u8 port_idx:1;
+ __u8 addr_idx:3;
+ __u8:3;
+#endif
+ __u8 rsvd;
+ __be32 rcv_isn;
+ __be32 rsvd2;
+};
+
+struct cpl_pass_accept_rpl {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 opt2;
+ __be32 rsvd;
+ __be32 peer_ip;
+ __be32 opt0h;
+ __be32 opt0l_status;
+};
+
+struct cpl_act_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be32 opt0h;
+ __be32 opt0l;
+ __be32 params;
+ __be32 opt2;
+};
+
+/* cpl_act_open_req.params fields */
+#define S_AOPEN_VLAN_PRI 9
+#define M_AOPEN_VLAN_PRI 0x3
+#define V_AOPEN_VLAN_PRI(x) ((x) << S_AOPEN_VLAN_PRI)
+#define G_AOPEN_VLAN_PRI(x) (((x) >> S_AOPEN_VLAN_PRI) & M_AOPEN_VLAN_PRI)
+
+#define S_AOPEN_VLAN_PRI_VALID 11
+#define V_AOPEN_VLAN_PRI_VALID(x) ((x) << S_AOPEN_VLAN_PRI_VALID)
+#define F_AOPEN_VLAN_PRI_VALID V_AOPEN_VLAN_PRI_VALID(1U)
+
+#define S_AOPEN_PKT_TYPE 12
+#define M_AOPEN_PKT_TYPE 0x3
+#define V_AOPEN_PKT_TYPE(x) ((x) << S_AOPEN_PKT_TYPE)
+#define G_AOPEN_PKT_TYPE(x) (((x) >> S_AOPEN_PKT_TYPE) & M_AOPEN_PKT_TYPE)
+
+#define S_AOPEN_MAC_MATCH 14
+#define M_AOPEN_MAC_MATCH 0x1F
+#define V_AOPEN_MAC_MATCH(x) ((x) << S_AOPEN_MAC_MATCH)
+#define G_AOPEN_MAC_MATCH(x) (((x) >> S_AOPEN_MAC_MATCH) & M_AOPEN_MAC_MATCH)
+
+#define S_AOPEN_MAC_MATCH_VALID 19
+#define V_AOPEN_MAC_MATCH_VALID(x) ((x) << S_AOPEN_MAC_MATCH_VALID)
+#define F_AOPEN_MAC_MATCH_VALID V_AOPEN_MAC_MATCH_VALID(1U)
+
+#define S_AOPEN_IFF_VLAN 20
+#define M_AOPEN_IFF_VLAN 0xFFF
+#define V_AOPEN_IFF_VLAN(x) ((x) << S_AOPEN_IFF_VLAN)
+#define G_AOPEN_IFF_VLAN(x) (((x) >> S_AOPEN_IFF_VLAN) & M_AOPEN_IFF_VLAN)
+
+struct cpl_act_open_rpl {
+ RSS_HDR union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be32 atid;
+ __u8 rsvd[3];
+ __u8 status;
+};
+
+struct cpl_act_establish {
+ RSS_HDR union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be32 tos_tid;
+ __be16 l2t_idx;
+ __be16 tcp_opt;
+ __be32 snd_isn;
+ __be32 rcv_isn;
+};
+
+struct cpl_get_tcb {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 cpuno;
+ __be16 rsvd;
+};
+
+struct cpl_get_tcb_rpl {
+ RSS_HDR union opcode_tid ot;
+ __u8 rsvd;
+ __u8 status;
+ __be16 len;
+};
+
+struct cpl_set_tcb {
+ WR_HDR;
+ union opcode_tid ot;
+ __u8 reply;
+ __u8 cpu_idx;
+ __be16 len;
+};
+
+/* cpl_set_tcb.reply fields */
+#define S_NO_REPLY 7
+#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
+#define F_NO_REPLY V_NO_REPLY(1U)
+
+struct cpl_set_tcb_field {
+ WR_HDR;
+ union opcode_tid ot;
+ __u8 reply;
+ __u8 cpu_idx;
+ __be16 word;
+ __be64 mask;
+ __be64 val;
+};
+
+struct cpl_set_tcb_rpl {
+ RSS_HDR union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+};
+
+struct cpl_pcmd {
+ WR_HDR;
+ union opcode_tid ot;
+ __u8 rsvd[3];
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 src:1;
+ __u8 bundle:1;
+ __u8 channel:1;
+ __u8:5;
+#else
+ __u8:5;
+ __u8 channel:1;
+ __u8 bundle:1;
+ __u8 src:1;
+#endif
+ __be32 pcmd_parm[2];
+};
+
+struct cpl_pcmd_reply {
+ RSS_HDR union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd;
+ __be16 len;
+};
+
+struct cpl_close_con_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd;
+};
+
+struct cpl_close_con_rpl {
+ RSS_HDR union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+ __be32 snd_nxt;
+ __be32 rcv_nxt;
+};
+
+struct cpl_close_listserv_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __u8 rsvd0;
+ __u8 cpu_idx;
+ __be16 rsvd1;
+};
+
+struct cpl_close_listserv_rpl {
+ RSS_HDR union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+};
+
+struct cpl_abort_req_rss {
+ RSS_HDR union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 status;
+ __u8 rsvd2[6];
+};
+
+struct cpl_abort_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 cmd;
+ __u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl_rss {
+ RSS_HDR union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 status;
+ __u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 cmd;
+ __u8 rsvd2[6];
+};
+
+struct cpl_peer_close {
+ RSS_HDR union opcode_tid ot;
+ __be32 rcv_nxt;
+};
+
+struct tx_data_wr {
+ __be32 wr_hi;
+ __be32 wr_lo;
+ __be32 len;
+ __be32 flags;
+ __be32 sndseq;
+ __be32 param;
+};
+
+/* tx_data_wr.flags fields */
+#define S_TX_ACK_PAGES 21
+#define M_TX_ACK_PAGES 0x7
+#define V_TX_ACK_PAGES(x) ((x) << S_TX_ACK_PAGES)
+#define G_TX_ACK_PAGES(x) (((x) >> S_TX_ACK_PAGES) & M_TX_ACK_PAGES)
+
+/* tx_data_wr.param fields */
+#define S_TX_PORT 0
+#define M_TX_PORT 0x7
+#define V_TX_PORT(x) ((x) << S_TX_PORT)
+#define G_TX_PORT(x) (((x) >> S_TX_PORT) & M_TX_PORT)
+
+#define S_TX_MSS 4
+#define M_TX_MSS 0xF
+#define V_TX_MSS(x) ((x) << S_TX_MSS)
+#define G_TX_MSS(x) (((x) >> S_TX_MSS) & M_TX_MSS)
+
+#define S_TX_QOS 8
+#define M_TX_QOS 0xFF
+#define V_TX_QOS(x) ((x) << S_TX_QOS)
+#define G_TX_QOS(x) (((x) >> S_TX_QOS) & M_TX_QOS)
+
+#define S_TX_SNDBUF 16
+#define M_TX_SNDBUF 0xFFFF
+#define V_TX_SNDBUF(x) ((x) << S_TX_SNDBUF)
+#define G_TX_SNDBUF(x) (((x) >> S_TX_SNDBUF) & M_TX_SNDBUF)
+
+struct cpl_tx_data {
+ union opcode_tid ot;
+ __be32 len;
+ __be32 rsvd;
+ __be16 urg;
+ __be16 flags;
+};
+
+/* cpl_tx_data.flags fields */
+#define S_TX_ULP_SUBMODE 6
+#define M_TX_ULP_SUBMODE 0xF
+#define V_TX_ULP_SUBMODE(x) ((x) << S_TX_ULP_SUBMODE)
+#define G_TX_ULP_SUBMODE(x) (((x) >> S_TX_ULP_SUBMODE) & M_TX_ULP_SUBMODE)
+
+#define S_TX_ULP_MODE 10
+#define M_TX_ULP_MODE 0xF
+#define V_TX_ULP_MODE(x) ((x) << S_TX_ULP_MODE)
+#define G_TX_ULP_MODE(x) (((x) >> S_TX_ULP_MODE) & M_TX_ULP_MODE)
+
+#define S_TX_SHOVE 14
+#define V_TX_SHOVE(x) ((x) << S_TX_SHOVE)
+#define F_TX_SHOVE V_TX_SHOVE(1U)
+
+#define S_TX_MORE 15
+#define V_TX_MORE(x) ((x) << S_TX_MORE)
+#define F_TX_MORE V_TX_MORE(1U)
+
+/* additional tx_data_wr.flags fields */
+#define S_TX_CPU_IDX 0
+#define M_TX_CPU_IDX 0x3F
+#define V_TX_CPU_IDX(x) ((x) << S_TX_CPU_IDX)
+#define G_TX_CPU_IDX(x) (((x) >> S_TX_CPU_IDX) & M_TX_CPU_IDX)
+
+#define S_TX_URG 16
+#define V_TX_URG(x) ((x) << S_TX_URG)
+#define F_TX_URG V_TX_URG(1U)
+
+#define S_TX_CLOSE 17
+#define V_TX_CLOSE(x) ((x) << S_TX_CLOSE)
+#define F_TX_CLOSE V_TX_CLOSE(1U)
+
+#define S_TX_INIT 18
+#define V_TX_INIT(x) ((x) << S_TX_INIT)
+#define F_TX_INIT V_TX_INIT(1U)
+
+#define S_TX_IMM_ACK 19
+#define V_TX_IMM_ACK(x) ((x) << S_TX_IMM_ACK)
+#define F_TX_IMM_ACK V_TX_IMM_ACK(1U)
+
+#define S_TX_IMM_DMA 20
+#define V_TX_IMM_DMA(x) ((x) << S_TX_IMM_DMA)
+#define F_TX_IMM_DMA V_TX_IMM_DMA(1U)
+
+struct cpl_tx_data_ack {
+ RSS_HDR union opcode_tid ot;
+ __be32 ack_seq;
+};
+
+struct cpl_wr_ack {
+ RSS_HDR union opcode_tid ot;
+ __be16 credits;
+ __be16 rsvd;
+ __be32 snd_nxt;
+ __be32 snd_una;
+};
+
+struct cpl_rdma_ec_status {
+ RSS_HDR union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+};
+
+struct mngt_pktsched_wr {
+ __be32 wr_hi;
+ __be32 wr_lo;
+ __u8 mngt_opcode;
+ __u8 rsvd[7];
+ __u8 sched;
+ __u8 idx;
+ __u8 min;
+ __u8 max;
+ __u8 binding;
+ __u8 rsvd1[3];
+};
+
+struct cpl_iscsi_hdr {
+ RSS_HDR union opcode_tid ot;
+ __be16 pdu_len_ddp;
+ __be16 len;
+ __be32 seq;
+ __be16 urg;
+ __u8 rsvd;
+ __u8 status;
+};
+
+/* cpl_iscsi_hdr.pdu_len_ddp fields */
+#define S_ISCSI_PDU_LEN 0
+#define M_ISCSI_PDU_LEN 0x7FFF
+#define V_ISCSI_PDU_LEN(x) ((x) << S_ISCSI_PDU_LEN)
+#define G_ISCSI_PDU_LEN(x) (((x) >> S_ISCSI_PDU_LEN) & M_ISCSI_PDU_LEN)
+
+#define S_ISCSI_DDP 15
+#define V_ISCSI_DDP(x) ((x) << S_ISCSI_DDP)
+#define F_ISCSI_DDP V_ISCSI_DDP(1U)
+
+struct cpl_rx_data {
+ RSS_HDR union opcode_tid ot;
+ __be16 rsvd;
+ __be16 len;
+ __be32 seq;
+ __be16 urg;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 dack_mode:2;
+ __u8 psh:1;
+ __u8 heartbeat:1;
+ __u8:4;
+#else
+ __u8:4;
+ __u8 heartbeat:1;
+ __u8 psh:1;
+ __u8 dack_mode:2;
+#endif
+ __u8 status;
+};
+
+struct cpl_rx_data_ack {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 credit_dack;
+};
+
+/* cpl_rx_data_ack.ack_seq fields */
+#define S_RX_CREDITS 0
+#define M_RX_CREDITS 0x7FFFFFF
+#define V_RX_CREDITS(x) ((x) << S_RX_CREDITS)
+#define G_RX_CREDITS(x) (((x) >> S_RX_CREDITS) & M_RX_CREDITS)
+
+#define S_RX_MODULATE 27
+#define V_RX_MODULATE(x) ((x) << S_RX_MODULATE)
+#define F_RX_MODULATE V_RX_MODULATE(1U)
+
+#define S_RX_FORCE_ACK 28
+#define V_RX_FORCE_ACK(x) ((x) << S_RX_FORCE_ACK)
+#define F_RX_FORCE_ACK V_RX_FORCE_ACK(1U)
+
+#define S_RX_DACK_MODE 29
+#define M_RX_DACK_MODE 0x3
+#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
+#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
+
+#define S_RX_DACK_CHANGE 31
+#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
+#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
+
+struct cpl_rx_urg_notify {
+ RSS_HDR union opcode_tid ot;
+ __be32 seq;
+};
+
+struct cpl_rx_ddp_complete {
+ RSS_HDR union opcode_tid ot;
+ __be32 ddp_report;
+};
+
+struct cpl_rx_data_ddp {
+ RSS_HDR union opcode_tid ot;
+ __be16 urg;
+ __be16 len;
+ __be32 seq;
+ union {
+ __be32 nxt_seq;
+ __be32 ddp_report;
+ };
+ __be32 ulp_crc;
+ __be32 ddpvld_status;
+};
+
+/* cpl_rx_data_ddp.ddpvld_status fields */
+#define S_DDP_STATUS 0
+#define M_DDP_STATUS 0xFF
+#define V_DDP_STATUS(x) ((x) << S_DDP_STATUS)
+#define G_DDP_STATUS(x) (((x) >> S_DDP_STATUS) & M_DDP_STATUS)
+
+#define S_DDP_VALID 15
+#define M_DDP_VALID 0x1FFFF
+#define V_DDP_VALID(x) ((x) << S_DDP_VALID)
+#define G_DDP_VALID(x) (((x) >> S_DDP_VALID) & M_DDP_VALID)
+
+#define S_DDP_PPOD_MISMATCH 15
+#define V_DDP_PPOD_MISMATCH(x) ((x) << S_DDP_PPOD_MISMATCH)
+#define F_DDP_PPOD_MISMATCH V_DDP_PPOD_MISMATCH(1U)
+
+#define S_DDP_PDU 16
+#define V_DDP_PDU(x) ((x) << S_DDP_PDU)
+#define F_DDP_PDU V_DDP_PDU(1U)
+
+#define S_DDP_LLIMIT_ERR 17
+#define V_DDP_LLIMIT_ERR(x) ((x) << S_DDP_LLIMIT_ERR)
+#define F_DDP_LLIMIT_ERR V_DDP_LLIMIT_ERR(1U)
+
+#define S_DDP_PPOD_PARITY_ERR 18
+#define V_DDP_PPOD_PARITY_ERR(x) ((x) << S_DDP_PPOD_PARITY_ERR)
+#define F_DDP_PPOD_PARITY_ERR V_DDP_PPOD_PARITY_ERR(1U)
+
+#define S_DDP_PADDING_ERR 19
+#define V_DDP_PADDING_ERR(x) ((x) << S_DDP_PADDING_ERR)
+#define F_DDP_PADDING_ERR V_DDP_PADDING_ERR(1U)
+
+#define S_DDP_HDRCRC_ERR 20
+#define V_DDP_HDRCRC_ERR(x) ((x) << S_DDP_HDRCRC_ERR)
+#define F_DDP_HDRCRC_ERR V_DDP_HDRCRC_ERR(1U)
+
+#define S_DDP_DATACRC_ERR 21
+#define V_DDP_DATACRC_ERR(x) ((x) << S_DDP_DATACRC_ERR)
+#define F_DDP_DATACRC_ERR V_DDP_DATACRC_ERR(1U)
+
+#define S_DDP_INVALID_TAG 22
+#define V_DDP_INVALID_TAG(x) ((x) << S_DDP_INVALID_TAG)
+#define F_DDP_INVALID_TAG V_DDP_INVALID_TAG(1U)
+
+#define S_DDP_ULIMIT_ERR 23
+#define V_DDP_ULIMIT_ERR(x) ((x) << S_DDP_ULIMIT_ERR)
+#define F_DDP_ULIMIT_ERR V_DDP_ULIMIT_ERR(1U)
+
+#define S_DDP_OFFSET_ERR 24
+#define V_DDP_OFFSET_ERR(x) ((x) << S_DDP_OFFSET_ERR)
+#define F_DDP_OFFSET_ERR V_DDP_OFFSET_ERR(1U)
+
+#define S_DDP_COLOR_ERR 25
+#define V_DDP_COLOR_ERR(x) ((x) << S_DDP_COLOR_ERR)
+#define F_DDP_COLOR_ERR V_DDP_COLOR_ERR(1U)
+
+#define S_DDP_TID_MISMATCH 26
+#define V_DDP_TID_MISMATCH(x) ((x) << S_DDP_TID_MISMATCH)
+#define F_DDP_TID_MISMATCH V_DDP_TID_MISMATCH(1U)
+
+#define S_DDP_INVALID_PPOD 27
+#define V_DDP_INVALID_PPOD(x) ((x) << S_DDP_INVALID_PPOD)
+#define F_DDP_INVALID_PPOD V_DDP_INVALID_PPOD(1U)
+
+#define S_DDP_ULP_MODE 28
+#define M_DDP_ULP_MODE 0xF
+#define V_DDP_ULP_MODE(x) ((x) << S_DDP_ULP_MODE)
+#define G_DDP_ULP_MODE(x) (((x) >> S_DDP_ULP_MODE) & M_DDP_ULP_MODE)
+
+/* cpl_rx_data_ddp.ddp_report fields */
+#define S_DDP_OFFSET 0
+#define M_DDP_OFFSET 0x3FFFFF
+#define V_DDP_OFFSET(x) ((x) << S_DDP_OFFSET)
+#define G_DDP_OFFSET(x) (((x) >> S_DDP_OFFSET) & M_DDP_OFFSET)
+
+#define S_DDP_URG 24
+#define V_DDP_URG(x) ((x) << S_DDP_URG)
+#define F_DDP_URG V_DDP_URG(1U)
+
+#define S_DDP_PSH 25
+#define V_DDP_PSH(x) ((x) << S_DDP_PSH)
+#define F_DDP_PSH V_DDP_PSH(1U)
+
+#define S_DDP_BUF_COMPLETE 26
+#define V_DDP_BUF_COMPLETE(x) ((x) << S_DDP_BUF_COMPLETE)
+#define F_DDP_BUF_COMPLETE V_DDP_BUF_COMPLETE(1U)
+
+#define S_DDP_BUF_TIMED_OUT 27
+#define V_DDP_BUF_TIMED_OUT(x) ((x) << S_DDP_BUF_TIMED_OUT)
+#define F_DDP_BUF_TIMED_OUT V_DDP_BUF_TIMED_OUT(1U)
+
+#define S_DDP_BUF_IDX 28
+#define V_DDP_BUF_IDX(x) ((x) << S_DDP_BUF_IDX)
+#define F_DDP_BUF_IDX V_DDP_BUF_IDX(1U)
+
+struct cpl_tx_pkt {
+ WR_HDR;
+ __be32 cntrl;
+ __be32 len;
+};
+
+struct cpl_tx_pkt_lso {
+ WR_HDR;
+ __be32 cntrl;
+ __be32 len;
+
+ __be32 rsvd;
+ __be32 lso_info;
+};
+
+/* cpl_tx_pkt*.cntrl fields */
+#define S_TXPKT_VLAN 0
+#define M_TXPKT_VLAN 0xFFFF
+#define V_TXPKT_VLAN(x) ((x) << S_TXPKT_VLAN)
+#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN)
+
+#define S_TXPKT_INTF 16
+#define M_TXPKT_INTF 0xF
+#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF)
+#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF)
+
+#define S_TXPKT_IPCSUM_DIS 20
+#define V_TXPKT_IPCSUM_DIS(x) ((x) << S_TXPKT_IPCSUM_DIS)
+#define F_TXPKT_IPCSUM_DIS V_TXPKT_IPCSUM_DIS(1U)
+
+#define S_TXPKT_L4CSUM_DIS 21
+#define V_TXPKT_L4CSUM_DIS(x) ((x) << S_TXPKT_L4CSUM_DIS)
+#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1U)
+
+#define S_TXPKT_VLAN_VLD 22
+#define V_TXPKT_VLAN_VLD(x) ((x) << S_TXPKT_VLAN_VLD)
+#define F_TXPKT_VLAN_VLD V_TXPKT_VLAN_VLD(1U)
+
+#define S_TXPKT_LOOPBACK 23
+#define V_TXPKT_LOOPBACK(x) ((x) << S_TXPKT_LOOPBACK)
+#define F_TXPKT_LOOPBACK V_TXPKT_LOOPBACK(1U)
+
+#define S_TXPKT_OPCODE 24
+#define M_TXPKT_OPCODE 0xFF
+#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE)
+#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE)
+
+/* cpl_tx_pkt_lso.lso_info fields */
+#define S_LSO_MSS 0
+#define M_LSO_MSS 0x3FFF
+#define V_LSO_MSS(x) ((x) << S_LSO_MSS)
+#define G_LSO_MSS(x) (((x) >> S_LSO_MSS) & M_LSO_MSS)
+
+#define S_LSO_ETH_TYPE 14
+#define M_LSO_ETH_TYPE 0x3
+#define V_LSO_ETH_TYPE(x) ((x) << S_LSO_ETH_TYPE)
+#define G_LSO_ETH_TYPE(x) (((x) >> S_LSO_ETH_TYPE) & M_LSO_ETH_TYPE)
+
+#define S_LSO_TCPHDR_WORDS 16
+#define M_LSO_TCPHDR_WORDS 0xF
+#define V_LSO_TCPHDR_WORDS(x) ((x) << S_LSO_TCPHDR_WORDS)
+#define G_LSO_TCPHDR_WORDS(x) (((x) >> S_LSO_TCPHDR_WORDS) & M_LSO_TCPHDR_WORDS)
+
+#define S_LSO_IPHDR_WORDS 20
+#define M_LSO_IPHDR_WORDS 0xF
+#define V_LSO_IPHDR_WORDS(x) ((x) << S_LSO_IPHDR_WORDS)
+#define G_LSO_IPHDR_WORDS(x) (((x) >> S_LSO_IPHDR_WORDS) & M_LSO_IPHDR_WORDS)
+
+#define S_LSO_IPV6 24
+#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6)
+#define F_LSO_IPV6 V_LSO_IPV6(1U)
+
+struct cpl_trace_pkt {
+#ifdef CHELSIO_FW
+ __u8 rss_opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 err:1;
+ __u8:7;
+#else
+ __u8:7;
+ __u8 err:1;
+#endif
+ __u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 qid:4;
+ __u8:4;
+#else
+ __u8:4;
+ __u8 qid:4;
+#endif
+ __be32 tstamp;
+#endif /* CHELSIO_FW */
+
+ __u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 iff:4;
+ __u8:4;
+#else
+ __u8:4;
+ __u8 iff:4;
+#endif
+ __u8 rsvd[4];
+ __be16 len;
+};
+
+struct cpl_rx_pkt {
+ RSS_HDR __u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 iff:4;
+ __u8 csum_valid:1;
+ __u8 ipmi_pkt:1;
+ __u8 vlan_valid:1;
+ __u8 fragment:1;
+#else
+ __u8 fragment:1;
+ __u8 vlan_valid:1;
+ __u8 ipmi_pkt:1;
+ __u8 csum_valid:1;
+ __u8 iff:4;
+#endif
+ __be16 csum;
+ __be16 vlan;
+ __be16 len;
+};
+
+struct cpl_l2t_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 params;
+ __u8 rsvd[2];
+ __u8 dst_mac[6];
+};
+
+/* cpl_l2t_write_req.params fields */
+#define S_L2T_W_IDX 0
+#define M_L2T_W_IDX 0x7FF
+#define V_L2T_W_IDX(x) ((x) << S_L2T_W_IDX)
+#define G_L2T_W_IDX(x) (((x) >> S_L2T_W_IDX) & M_L2T_W_IDX)
+
+#define S_L2T_W_VLAN 11
+#define M_L2T_W_VLAN 0xFFF
+#define V_L2T_W_VLAN(x) ((x) << S_L2T_W_VLAN)
+#define G_L2T_W_VLAN(x) (((x) >> S_L2T_W_VLAN) & M_L2T_W_VLAN)
+
+#define S_L2T_W_IFF 23
+#define M_L2T_W_IFF 0xF
+#define V_L2T_W_IFF(x) ((x) << S_L2T_W_IFF)
+#define G_L2T_W_IFF(x) (((x) >> S_L2T_W_IFF) & M_L2T_W_IFF)
+
+#define S_L2T_W_PRIO 27
+#define M_L2T_W_PRIO 0x7
+#define V_L2T_W_PRIO(x) ((x) << S_L2T_W_PRIO)
+#define G_L2T_W_PRIO(x) (((x) >> S_L2T_W_PRIO) & M_L2T_W_PRIO)
+
+struct cpl_l2t_write_rpl {
+ RSS_HDR union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd[3];
+};
+
+struct cpl_l2t_read_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 rsvd;
+ __be16 l2t_idx;
+};
+
+struct cpl_l2t_read_rpl {
+ RSS_HDR union opcode_tid ot;
+ __be32 params;
+ __u8 rsvd[2];
+ __u8 dst_mac[6];
+};
+
+/* cpl_l2t_read_rpl.params fields */
+#define S_L2T_R_PRIO 0
+#define M_L2T_R_PRIO 0x7
+#define V_L2T_R_PRIO(x) ((x) << S_L2T_R_PRIO)
+#define G_L2T_R_PRIO(x) (((x) >> S_L2T_R_PRIO) & M_L2T_R_PRIO)
+
+#define S_L2T_R_VLAN 8
+#define M_L2T_R_VLAN 0xFFF
+#define V_L2T_R_VLAN(x) ((x) << S_L2T_R_VLAN)
+#define G_L2T_R_VLAN(x) (((x) >> S_L2T_R_VLAN) & M_L2T_R_VLAN)
+
+#define S_L2T_R_IFF 20
+#define M_L2T_R_IFF 0xF
+#define V_L2T_R_IFF(x) ((x) << S_L2T_R_IFF)
+#define G_L2T_R_IFF(x) (((x) >> S_L2T_R_IFF) & M_L2T_R_IFF)
+
+#define S_L2T_STATUS 24
+#define M_L2T_STATUS 0xFF
+#define V_L2T_STATUS(x) ((x) << S_L2T_STATUS)
+#define G_L2T_STATUS(x) (((x) >> S_L2T_STATUS) & M_L2T_STATUS)
+
+struct cpl_smt_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 mtu_idx:4;
+ __u8 iff:4;
+#else
+ __u8 iff:4;
+ __u8 mtu_idx:4;
+#endif
+ __be16 rsvd2;
+ __be16 rsvd3;
+ __u8 src_mac1[6];
+ __be16 rsvd4;
+ __u8 src_mac0[6];
+};
+
+struct cpl_smt_write_rpl {
+ RSS_HDR union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd[3];
+};
+
+struct cpl_smt_read_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8:4;
+ __u8 iff:4;
+#else
+ __u8 iff:4;
+ __u8:4;
+#endif
+ __be16 rsvd2;
+};
+
+struct cpl_smt_read_rpl {
+ RSS_HDR union opcode_tid ot;
+ __u8 status;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 mtu_idx:4;
+ __u8:4;
+#else
+ __u8:4;
+ __u8 mtu_idx:4;
+#endif
+ __be16 rsvd2;
+ __be16 rsvd3;
+ __u8 src_mac1[6];
+ __be16 rsvd4;
+ __u8 src_mac0[6];
+};
+
+struct cpl_rte_delete_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 params;
+};
+
+/* { cpl_rte_delete_req, cpl_rte_read_req }.params fields */
+#define S_RTE_REQ_LUT_IX 8
+#define M_RTE_REQ_LUT_IX 0x7FF
+#define V_RTE_REQ_LUT_IX(x) ((x) << S_RTE_REQ_LUT_IX)
+#define G_RTE_REQ_LUT_IX(x) (((x) >> S_RTE_REQ_LUT_IX) & M_RTE_REQ_LUT_IX)
+
+#define S_RTE_REQ_LUT_BASE 19
+#define M_RTE_REQ_LUT_BASE 0x7FF
+#define V_RTE_REQ_LUT_BASE(x) ((x) << S_RTE_REQ_LUT_BASE)
+#define G_RTE_REQ_LUT_BASE(x) (((x) >> S_RTE_REQ_LUT_BASE) & M_RTE_REQ_LUT_BASE)
+
+#define S_RTE_READ_REQ_SELECT 31
+#define V_RTE_READ_REQ_SELECT(x) ((x) << S_RTE_READ_REQ_SELECT)
+#define F_RTE_READ_REQ_SELECT V_RTE_READ_REQ_SELECT(1U)
+
+struct cpl_rte_delete_rpl {
+ RSS_HDR union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd[3];
+};
+
+struct cpl_rte_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8:6;
+ __u8 write_tcam:1;
+ __u8 write_l2t_lut:1;
+#else
+ __u8 write_l2t_lut:1;
+ __u8 write_tcam:1;
+ __u8:6;
+#endif
+ __u8 rsvd[3];
+ __be32 lut_params;
+ __be16 rsvd2;
+ __be16 l2t_idx;
+ __be32 netmask;
+ __be32 faddr;
+};
+
+/* cpl_rte_write_req.lut_params fields */
+#define S_RTE_WRITE_REQ_LUT_IX 10
+#define M_RTE_WRITE_REQ_LUT_IX 0x7FF
+#define V_RTE_WRITE_REQ_LUT_IX(x) ((x) << S_RTE_WRITE_REQ_LUT_IX)
+#define G_RTE_WRITE_REQ_LUT_IX(x) (((x) >> S_RTE_WRITE_REQ_LUT_IX) & M_RTE_WRITE_REQ_LUT_IX)
+
+#define S_RTE_WRITE_REQ_LUT_BASE 21
+#define M_RTE_WRITE_REQ_LUT_BASE 0x7FF
+#define V_RTE_WRITE_REQ_LUT_BASE(x) ((x) << S_RTE_WRITE_REQ_LUT_BASE)
+#define G_RTE_WRITE_REQ_LUT_BASE(x) (((x) >> S_RTE_WRITE_REQ_LUT_BASE) & M_RTE_WRITE_REQ_LUT_BASE)
+
+struct cpl_rte_write_rpl {
+ RSS_HDR union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd[3];
+};
+
+struct cpl_rte_read_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 params;
+};
+
+struct cpl_rte_read_rpl {
+ RSS_HDR union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd0;
+ __be16 l2t_idx;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8:7;
+ __u8 select:1;
+#else
+ __u8 select:1;
+ __u8:7;
+#endif
+ __u8 rsvd2[3];
+ __be32 addr;
+};
+
+struct cpl_tid_release {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd;
+};
+
+struct cpl_barrier {
+ WR_HDR;
+ __u8 opcode;
+ __u8 rsvd[7];
+};
+
+struct cpl_rdma_read_req {
+ __u8 opcode;
+ __u8 rsvd[15];
+};
+
+struct cpl_rdma_terminate {
+#ifdef CHELSIO_FW
+ __u8 opcode;
+ __u8 rsvd[2];
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 rspq:3;
+ __u8:5;
+#else
+ __u8:5;
+ __u8 rspq:3;
+#endif
+ __be32 tid_len;
+#endif
+ __be32 msn;
+ __be32 mo;
+ __u8 data[0];
+};
+
+/* cpl_rdma_terminate.tid_len fields */
+#define S_FLIT_CNT 0
+#define M_FLIT_CNT 0xFF
+#define V_FLIT_CNT(x) ((x) << S_FLIT_CNT)
+#define G_FLIT_CNT(x) (((x) >> S_FLIT_CNT) & M_FLIT_CNT)
+
+#define S_TERM_TID 8
+#define M_TERM_TID 0xFFFFF
+#define V_TERM_TID(x) ((x) << S_TERM_TID)
+#define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID)
+
+/* ULP_TX opcodes */
+enum { ULP_MEM_READ = 2, ULP_MEM_WRITE = 3, ULP_TXPKT = 4 };
+
+#define S_ULPTX_CMD 28
+#define M_ULPTX_CMD 0xF
+#define V_ULPTX_CMD(x) ((x) << S_ULPTX_CMD)
+
+#define S_ULPTX_NFLITS 0
+#define M_ULPTX_NFLITS 0xFF
+#define V_ULPTX_NFLITS(x) ((x) << S_ULPTX_NFLITS)
+
+struct ulp_mem_io {
+ WR_HDR;
+ __be32 cmd_lock_addr;
+ __be32 len;
+};
+
+/* ulp_mem_io.cmd_lock_addr fields */
+#define S_ULP_MEMIO_ADDR 0
+#define M_ULP_MEMIO_ADDR 0x7FFFFFF
+#define V_ULP_MEMIO_ADDR(x) ((x) << S_ULP_MEMIO_ADDR)
+#define S_ULP_MEMIO_LOCK 27
+#define V_ULP_MEMIO_LOCK(x) ((x) << S_ULP_MEMIO_LOCK)
+#define F_ULP_MEMIO_LOCK V_ULP_MEMIO_LOCK(1U)
+
+/* ulp_mem_io.len fields */
+#define S_ULP_MEMIO_DATA_LEN 28
+#define M_ULP_MEMIO_DATA_LEN 0xF
+#define V_ULP_MEMIO_DATA_LEN(x) ((x) << S_ULP_MEMIO_DATA_LEN)
+
+#endif /* T3_CPL_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
new file mode 100644
index 000000000000..44ac2f40b644
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -0,0 +1,3785 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "common.h"
+#include "regs.h"
+#include "sge_defs.h"
+#include "firmware_exports.h"
+
+static void t3_port_intr_clear(struct adapter *adapter, int idx);
+
+/**
+ * t3_wait_op_done_val - wait until an operation is completed
+ * @adapter: the adapter performing the operation
+ * @reg: the register to check for completion
+ * @mask: a single-bit field within @reg that indicates completion
+ * @polarity: the value of the field when the operation is completed
+ * @attempts: number of check iterations
+ * @delay: delay in usecs between iterations
+ * @valp: where to store the value of the register at completion time
+ *
+ * Wait until an operation is completed by checking a bit in a register
+ * up to @attempts times. If @valp is not NULL the value of the register
+ * at the time it indicated completion is stored there. Returns 0 if the
+ * operation completes and -EAGAIN otherwise.
+ */
+
+int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay, u32 *valp)
+{
+ while (1) {
+ u32 val = t3_read_reg(adapter, reg);
+
+ if (!!(val & mask) == polarity) {
+ if (valp)
+ *valp = val;
+ return 0;
+ }
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ udelay(delay);
+ }
+}
+
+/**
+ * t3_write_regs - write a bunch of registers
+ * @adapter: the adapter to program
+ * @p: an array of register address/register value pairs
+ * @n: the number of address/value pairs
+ * @offset: register address offset
+ *
+ * Takes an array of register address/register value pairs and writes each
+ * value to the corresponding register. Register addresses are adjusted
+ * by the supplied offset.
+ */
+void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
+ int n, unsigned int offset)
+{
+ while (n--) {
+ t3_write_reg(adapter, p->reg_addr + offset, p->val);
+ p++;
+ }
+}
+
+/**
+ * t3_set_reg_field - set a register field to a value
+ * @adapter: the adapter to program
+ * @addr: the register address
+ * @mask: specifies the portion of the register to modify
+ * @val: the new value for the register field
+ *
+ * Sets a register field specified by the supplied mask to the
+ * given value.
+ */
+void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
+ u32 val)
+{
+ u32 v = t3_read_reg(adapter, addr) & ~mask;
+
+ t3_write_reg(adapter, addr, v | val);
+ t3_read_reg(adapter, addr); /* flush */
+}
+
+/**
+ * t3_read_indirect - read indirectly addressed registers
+ * @adap: the adapter
+ * @addr_reg: register holding the indirect address
+ * @data_reg: register holding the value of the indirect register
+ * @vals: where the read register values are stored
+ * @start_idx: index of first indirect register to read
+ * @nregs: how many indirect registers to read
+ *
+ * Reads registers that are accessed indirectly through an address/data
+ * register pair.
+ */
+static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals,
+ unsigned int nregs, unsigned int start_idx)
+{
+ while (nregs--) {
+ t3_write_reg(adap, addr_reg, start_idx);
+ *vals++ = t3_read_reg(adap, data_reg);
+ start_idx++;
+ }
+}
+
+/**
+ * t3_mc7_bd_read - read from MC7 through backdoor accesses
+ * @mc7: identifies MC7 to read from
+ * @start: index of first 64-bit word to read
+ * @n: number of 64-bit words to read
+ * @buf: where to store the read result
+ *
+ * Read n 64-bit words from MC7 starting at word start, using backdoor
+ * accesses.
+ */
+int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
+ u64 *buf)
+{
+ static const int shift[] = { 0, 0, 16, 24 };
+ static const int step[] = { 0, 32, 16, 8 };
+
+ unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
+ struct adapter *adap = mc7->adapter;
+
+ if (start >= size64 || start + n > size64)
+ return -EINVAL;
+
+ start *= (8 << mc7->width);
+ while (n--) {
+ int i;
+ u64 val64 = 0;
+
+ for (i = (1 << mc7->width) - 1; i >= 0; --i) {
+ int attempts = 10;
+ u32 val;
+
+ t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
+ t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
+ val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
+ while ((val & F_BUSY) && attempts--)
+ val = t3_read_reg(adap,
+ mc7->offset + A_MC7_BD_OP);
+ if (val & F_BUSY)
+ return -EIO;
+
+ val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
+ if (mc7->width == 0) {
+ val64 = t3_read_reg(adap,
+ mc7->offset +
+ A_MC7_BD_DATA0);
+ val64 |= (u64) val << 32;
+ } else {
+ if (mc7->width > 1)
+ val >>= shift[mc7->width];
+ val64 |= (u64) val << (step[mc7->width] * i);
+ }
+ start += 8;
+ }
+ *buf++ = val64;
+ }
+ return 0;
+}
+
+/*
+ * Initialize MI1.
+ */
+static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
+{
+ u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
+ u32 val = F_PREEN | V_CLKDIV(clkdiv);
+
+ t3_write_reg(adap, A_MI1_CFG, val);
+}
+
+#define MDIO_ATTEMPTS 20
+
+/*
+ * MI1 read/write operations for clause 22 PHYs.
+ */
+static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ int ret;
+ u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
+
+ mutex_lock(&adapter->mdio_lock);
+ t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
+ t3_write_reg(adapter, A_MI1_ADDR, addr);
+ t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
+ ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
+ if (!ret)
+ ret = t3_read_reg(adapter, A_MI1_DATA);
+ mutex_unlock(&adapter->mdio_lock);
+ return ret;
+}
+
+static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr, u16 val)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ int ret;
+ u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
+
+ mutex_lock(&adapter->mdio_lock);
+ t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
+ t3_write_reg(adapter, A_MI1_ADDR, addr);
+ t3_write_reg(adapter, A_MI1_DATA, val);
+ t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
+ ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
+ mutex_unlock(&adapter->mdio_lock);
+ return ret;
+}
+
+static const struct mdio_ops mi1_mdio_ops = {
+ .read = t3_mi1_read,
+ .write = t3_mi1_write,
+ .mode_support = MDIO_SUPPORTS_C22
+};
+
+/*
+ * Performs the address cycle for clause 45 PHYs.
+ * Must be called with the MDIO_LOCK held.
+ */
+static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
+ int reg_addr)
+{
+ u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
+
+ t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
+ t3_write_reg(adapter, A_MI1_ADDR, addr);
+ t3_write_reg(adapter, A_MI1_DATA, reg_addr);
+ t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
+ return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
+ MDIO_ATTEMPTS, 10);
+}
+
+/*
+ * MI1 read/write operations for indirect-addressed PHYs.
+ */
+static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ int ret;
+
+ mutex_lock(&adapter->mdio_lock);
+ ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
+ if (!ret) {
+ t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
+ ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
+ MDIO_ATTEMPTS, 10);
+ if (!ret)
+ ret = t3_read_reg(adapter, A_MI1_DATA);
+ }
+ mutex_unlock(&adapter->mdio_lock);
+ return ret;
+}
+
+static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
+ u16 reg_addr, u16 val)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ int ret;
+
+ mutex_lock(&adapter->mdio_lock);
+ ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
+ if (!ret) {
+ t3_write_reg(adapter, A_MI1_DATA, val);
+ t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
+ ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
+ MDIO_ATTEMPTS, 10);
+ }
+ mutex_unlock(&adapter->mdio_lock);
+ return ret;
+}
+
+static const struct mdio_ops mi1_mdio_ext_ops = {
+ .read = mi1_ext_read,
+ .write = mi1_ext_write,
+ .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
+};
+
+/**
+ * t3_mdio_change_bits - modify the value of a PHY register
+ * @phy: the PHY to operate on
+ * @mmd: the device address
+ * @reg: the register address
+ * @clear: what part of the register value to mask off
+ * @set: what part of the register value to set
+ *
+ * Changes the value of a PHY register by applying a mask to its current
+ * value and ORing the result with a new value.
+ */
+int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
+ unsigned int set)
+{
+ int ret;
+ unsigned int val;
+
+ ret = t3_mdio_read(phy, mmd, reg, &val);
+ if (!ret) {
+ val &= ~clear;
+ ret = t3_mdio_write(phy, mmd, reg, val | set);
+ }
+ return ret;
+}
+
+/**
+ * t3_phy_reset - reset a PHY block
+ * @phy: the PHY to operate on
+ * @mmd: the device address of the PHY block to reset
+ * @wait: how long to wait for the reset to complete in 1ms increments
+ *
+ * Resets a PHY block and optionally waits for the reset to complete.
+ * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
+ * for 10G PHYs.
+ */
+int t3_phy_reset(struct cphy *phy, int mmd, int wait)
+{
+ int err;
+ unsigned int ctl;
+
+ err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
+ MDIO_CTRL1_RESET);
+ if (err || !wait)
+ return err;
+
+ do {
+ err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
+ if (err)
+ return err;
+ ctl &= MDIO_CTRL1_RESET;
+ if (ctl)
+ msleep(1);
+ } while (ctl && --wait);
+
+ return ctl ? -1 : 0;
+}
+
+/**
+ * t3_phy_advertise - set the PHY advertisement registers for autoneg
+ * @phy: the PHY to operate on
+ * @advert: bitmap of capabilities the PHY should advertise
+ *
+ * Sets a 10/100/1000 PHY's advertisement registers to advertise the
+ * requested capabilities.
+ */
+int t3_phy_advertise(struct cphy *phy, unsigned int advert)
+{
+ int err;
+ unsigned int val = 0;
+
+ err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
+ if (err)
+ return err;
+
+ val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
+ if (advert & ADVERTISED_1000baseT_Half)
+ val |= ADVERTISE_1000HALF;
+ if (advert & ADVERTISED_1000baseT_Full)
+ val |= ADVERTISE_1000FULL;
+
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
+ if (err)
+ return err;
+
+ val = 1;
+ if (advert & ADVERTISED_10baseT_Half)
+ val |= ADVERTISE_10HALF;
+ if (advert & ADVERTISED_10baseT_Full)
+ val |= ADVERTISE_10FULL;
+ if (advert & ADVERTISED_100baseT_Half)
+ val |= ADVERTISE_100HALF;
+ if (advert & ADVERTISED_100baseT_Full)
+ val |= ADVERTISE_100FULL;
+ if (advert & ADVERTISED_Pause)
+ val |= ADVERTISE_PAUSE_CAP;
+ if (advert & ADVERTISED_Asym_Pause)
+ val |= ADVERTISE_PAUSE_ASYM;
+ return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
+}
+
+/**
+ * t3_phy_advertise_fiber - set fiber PHY advertisement register
+ * @phy: the PHY to operate on
+ * @advert: bitmap of capabilities the PHY should advertise
+ *
+ * Sets a fiber PHY's advertisement register to advertise the
+ * requested capabilities.
+ */
+int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
+{
+ unsigned int val = 0;
+
+ if (advert & ADVERTISED_1000baseT_Half)
+ val |= ADVERTISE_1000XHALF;
+ if (advert & ADVERTISED_1000baseT_Full)
+ val |= ADVERTISE_1000XFULL;
+ if (advert & ADVERTISED_Pause)
+ val |= ADVERTISE_1000XPAUSE;
+ if (advert & ADVERTISED_Asym_Pause)
+ val |= ADVERTISE_1000XPSE_ASYM;
+ return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
+}
+
+/**
+ * t3_set_phy_speed_duplex - force PHY speed and duplex
+ * @phy: the PHY to operate on
+ * @speed: requested PHY speed
+ * @duplex: requested PHY duplex
+ *
+ * Force a 10/100/1000 PHY's speed and duplex. This also disables
+ * auto-negotiation except for GigE, where auto-negotiation is mandatory.
+ */
+int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
+{
+ int err;
+ unsigned int ctl;
+
+ err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
+ if (err)
+ return err;
+
+ if (speed >= 0) {
+ ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
+ if (speed == SPEED_100)
+ ctl |= BMCR_SPEED100;
+ else if (speed == SPEED_1000)
+ ctl |= BMCR_SPEED1000;
+ }
+ if (duplex >= 0) {
+ ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
+ if (duplex == DUPLEX_FULL)
+ ctl |= BMCR_FULLDPLX;
+ }
+ if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
+ ctl |= BMCR_ANENABLE;
+ return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
+}
+
+int t3_phy_lasi_intr_enable(struct cphy *phy)
+{
+ return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
+ MDIO_PMA_LASI_LSALARM);
+}
+
+int t3_phy_lasi_intr_disable(struct cphy *phy)
+{
+ return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
+}
+
+int t3_phy_lasi_intr_clear(struct cphy *phy)
+{
+ u32 val;
+
+ return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
+}
+
+int t3_phy_lasi_intr_handler(struct cphy *phy)
+{
+ unsigned int status;
+ int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
+ &status);
+
+ if (err)
+ return err;
+ return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
+}
+
+static const struct adapter_info t3_adap_info[] = {
+ {1, 1, 0,
+ F_GPIO2_OEN | F_GPIO4_OEN |
+ F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
+ &mi1_mdio_ops, "Chelsio PE9000"},
+ {1, 1, 0,
+ F_GPIO2_OEN | F_GPIO4_OEN |
+ F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
+ &mi1_mdio_ops, "Chelsio T302"},
+ {1, 0, 0,
+ F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
+ F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
+ { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
+ &mi1_mdio_ext_ops, "Chelsio T310"},
+ {1, 1, 0,
+ F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
+ F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
+ F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
+ { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
+ &mi1_mdio_ext_ops, "Chelsio T320"},
+ {},
+ {},
+ {1, 0, 0,
+ F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
+ F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
+ { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
+ &mi1_mdio_ext_ops, "Chelsio T310" },
+ {1, 0, 0,
+ F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
+ F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
+ { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
+ &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
+};
+
+/*
+ * Return the adapter_info structure with a given index. Out-of-range indices
+ * return NULL.
+ */
+const struct adapter_info *t3_get_adapter_info(unsigned int id)
+{
+ return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
+}
+
+struct port_type_info {
+ int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *ops);
+};
+
+static const struct port_type_info port_types[] = {
+ { NULL },
+ { t3_ael1002_phy_prep },
+ { t3_vsc8211_phy_prep },
+ { NULL},
+ { t3_xaui_direct_phy_prep },
+ { t3_ael2005_phy_prep },
+ { t3_qt2045_phy_prep },
+ { t3_ael1006_phy_prep },
+ { NULL },
+ { t3_aq100x_phy_prep },
+ { t3_ael2020_phy_prep },
+};
+
+#define VPD_ENTRY(name, len) \
+ u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
+
+/*
+ * Partial EEPROM Vital Product Data structure. Includes only the ID and
+ * VPD-R sections.
+ */
+struct t3_vpd {
+ u8 id_tag;
+ u8 id_len[2];
+ u8 id_data[16];
+ u8 vpdr_tag;
+ u8 vpdr_len[2];
+ VPD_ENTRY(pn, 16); /* part number */
+ VPD_ENTRY(ec, 16); /* EC level */
+ VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
+ VPD_ENTRY(na, 12); /* MAC address base */
+ VPD_ENTRY(cclk, 6); /* core clock */
+ VPD_ENTRY(mclk, 6); /* mem clock */
+ VPD_ENTRY(uclk, 6); /* uP clk */
+ VPD_ENTRY(mdc, 6); /* MDIO clk */
+ VPD_ENTRY(mt, 2); /* mem timing */
+ VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
+ VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
+ VPD_ENTRY(port0, 2); /* PHY0 complex */
+ VPD_ENTRY(port1, 2); /* PHY1 complex */
+ VPD_ENTRY(port2, 2); /* PHY2 complex */
+ VPD_ENTRY(port3, 2); /* PHY3 complex */
+ VPD_ENTRY(rv, 1); /* csum */
+ u32 pad; /* for multiple-of-4 sizing and alignment */
+};
+
+#define EEPROM_MAX_POLL 40
+#define EEPROM_STAT_ADDR 0x4000
+#define VPD_BASE 0xc00
+
+/**
+ * t3_seeprom_read - read a VPD EEPROM location
+ * @adapter: adapter to read
+ * @addr: EEPROM address
+ * @data: where to store the read data
+ *
+ * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
+ * VPD ROM capability. A zero is written to the flag bit when the
+ * address is written to the control register. The hardware device will
+ * set the flag to 1 when 4 bytes have been read into the data register.
+ */
+int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
+{
+ u16 val;
+ int attempts = EEPROM_MAX_POLL;
+ u32 v;
+ unsigned int base = adapter->params.pci.vpd_cap_addr;
+
+ if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
+ return -EINVAL;
+
+ pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
+ do {
+ udelay(10);
+ pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
+ } while (!(val & PCI_VPD_ADDR_F) && --attempts);
+
+ if (!(val & PCI_VPD_ADDR_F)) {
+ CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
+ return -EIO;
+ }
+ pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
+ *data = cpu_to_le32(v);
+ return 0;
+}
+
+/**
+ * t3_seeprom_write - write a VPD EEPROM location
+ * @adapter: adapter to write
+ * @addr: EEPROM address
+ * @data: value to write
+ *
+ * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
+ * VPD ROM capability.
+ */
+int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
+{
+ u16 val;
+ int attempts = EEPROM_MAX_POLL;
+ unsigned int base = adapter->params.pci.vpd_cap_addr;
+
+ if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
+ return -EINVAL;
+
+ pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
+ le32_to_cpu(data));
+ pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
+ addr | PCI_VPD_ADDR_F);
+ do {
+ msleep(1);
+ pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
+ } while ((val & PCI_VPD_ADDR_F) && --attempts);
+
+ if (val & PCI_VPD_ADDR_F) {
+ CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
+ * t3_seeprom_wp - enable/disable EEPROM write protection
+ * @adapter: the adapter
+ * @enable: 1 to enable write protection, 0 to disable it
+ *
+ * Enables or disables write protection on the serial EEPROM.
+ */
+int t3_seeprom_wp(struct adapter *adapter, int enable)
+{
+ return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
+}
+
+/**
+ * get_vpd_params - read VPD parameters from VPD EEPROM
+ * @adapter: adapter to read
+ * @p: where to store the parameters
+ *
+ * Reads card parameters stored in VPD EEPROM.
+ */
+static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+{
+ int i, addr, ret;
+ struct t3_vpd vpd;
+
+ /*
+ * Card information is normally at VPD_BASE but some early cards had
+ * it at 0.
+ */
+ ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
+ if (ret)
+ return ret;
+ addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
+
+ for (i = 0; i < sizeof(vpd); i += 4) {
+ ret = t3_seeprom_read(adapter, addr + i,
+ (__le32 *)((u8 *)&vpd + i));
+ if (ret)
+ return ret;
+ }
+
+ p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
+ p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
+ p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
+ p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
+ p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
+ memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
+
+ /* Old eeproms didn't have port information */
+ if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
+ p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
+ p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
+ } else {
+ p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
+ p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
+ p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
+ p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
+ }
+
+ for (i = 0; i < 6; i++)
+ p->eth_base[i] = hex_to_bin(vpd.na_data[2 * i]) * 16 +
+ hex_to_bin(vpd.na_data[2 * i + 1]);
+ return 0;
+}
+
+/* serial flash and firmware constants */
+enum {
+ SF_ATTEMPTS = 5, /* max retries for SF1 operations */
+ SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
+ SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
+
+ /* flash command opcodes */
+ SF_PROG_PAGE = 2, /* program page */
+ SF_WR_DISABLE = 4, /* disable writes */
+ SF_RD_STATUS = 5, /* read status register */
+ SF_WR_ENABLE = 6, /* enable writes */
+ SF_RD_DATA_FAST = 0xb, /* read flash */
+ SF_ERASE_SECTOR = 0xd8, /* erase sector */
+
+ FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
+ FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
+ FW_MIN_SIZE = 8 /* at least version and csum */
+};
+
+/**
+ * sf1_read - read data from the serial flash
+ * @adapter: the adapter
+ * @byte_cnt: number of bytes to read
+ * @cont: whether another operation will be chained
+ * @valp: where to store the read data
+ *
+ * Reads up to 4 bytes of data from the serial flash. The location of
+ * the read needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
+ u32 *valp)
+{
+ int ret;
+
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
+ return -EBUSY;
+ t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
+ ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
+ if (!ret)
+ *valp = t3_read_reg(adapter, A_SF_DATA);
+ return ret;
+}
+
+/**
+ * sf1_write - write data to the serial flash
+ * @adapter: the adapter
+ * @byte_cnt: number of bytes to write
+ * @cont: whether another operation will be chained
+ * @val: value to write
+ *
+ * Writes up to 4 bytes of data to the serial flash. The location of
+ * the write needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
+ u32 val)
+{
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
+ return -EBUSY;
+ t3_write_reg(adapter, A_SF_DATA, val);
+ t3_write_reg(adapter, A_SF_OP,
+ V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
+ return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
+}
+
+/**
+ * flash_wait_op - wait for a flash operation to complete
+ * @adapter: the adapter
+ * @attempts: max number of polls of the status register
+ * @delay: delay between polls in ms
+ *
+ * Wait for a flash operation to complete by polling the status register.
+ */
+static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
+{
+ int ret;
+ u32 status;
+
+ while (1) {
+ if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
+ (ret = sf1_read(adapter, 1, 0, &status)) != 0)
+ return ret;
+ if (!(status & 1))
+ return 0;
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ msleep(delay);
+ }
+}
+
+/**
+ * t3_read_flash - read words from serial flash
+ * @adapter: the adapter
+ * @addr: the start address for the read
+ * @nwords: how many 32-bit words to read
+ * @data: where to store the read data
+ * @byte_oriented: whether to store data as bytes or as words
+ *
+ * Read the specified number of 32-bit words from the serial flash.
+ * If @byte_oriented is set the read data is stored as a byte array
+ * (i.e., big-endian), otherwise as 32-bit words in the platform's
+ * natural endianess.
+ */
+static int t3_read_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int nwords, u32 *data, int byte_oriented)
+{
+ int ret;
+
+ if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
+ return -EINVAL;
+
+ addr = swab32(addr) | SF_RD_DATA_FAST;
+
+ if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
+ (ret = sf1_read(adapter, 1, 1, data)) != 0)
+ return ret;
+
+ for (; nwords; nwords--, data++) {
+ ret = sf1_read(adapter, 4, nwords > 1, data);
+ if (ret)
+ return ret;
+ if (byte_oriented)
+ *data = htonl(*data);
+ }
+ return 0;
+}
+
+/**
+ * t3_write_flash - write up to a page of data to the serial flash
+ * @adapter: the adapter
+ * @addr: the start address to write
+ * @n: length of data to write
+ * @data: the data to write
+ *
+ * Writes up to a page of data (256 bytes) to the serial flash starting
+ * at the given address.
+ */
+static int t3_write_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int n, const u8 *data)
+{
+ int ret;
+ u32 buf[64];
+ unsigned int i, c, left, val, offset = addr & 0xff;
+
+ if (addr + n > SF_SIZE || offset + n > 256)
+ return -EINVAL;
+
+ val = swab32(addr) | SF_PROG_PAGE;
+
+ if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
+ (ret = sf1_write(adapter, 4, 1, val)) != 0)
+ return ret;
+
+ for (left = n; left; left -= c) {
+ c = min(left, 4U);
+ for (val = 0, i = 0; i < c; ++i)
+ val = (val << 8) + *data++;
+
+ ret = sf1_write(adapter, c, c != left, val);
+ if (ret)
+ return ret;
+ }
+ if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
+ return ret;
+
+ /* Read the page to verify the write succeeded */
+ ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+ if (ret)
+ return ret;
+
+ if (memcmp(data - n, (u8 *) buf + offset, n))
+ return -EIO;
+ return 0;
+}
+
+/**
+ * t3_get_tp_version - read the tp sram version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the protocol sram version from sram.
+ */
+int t3_get_tp_version(struct adapter *adapter, u32 *vers)
+{
+ int ret;
+
+ /* Get version loaded in SRAM */
+ t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
+ ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
+ 1, 1, 5, 1);
+ if (ret)
+ return ret;
+
+ *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
+
+ return 0;
+}
+
+/**
+ * t3_check_tpsram_version - read the tp sram version
+ * @adapter: the adapter
+ *
+ * Reads the protocol sram version from flash.
+ */
+int t3_check_tpsram_version(struct adapter *adapter)
+{
+ int ret;
+ u32 vers;
+ unsigned int major, minor;
+
+ if (adapter->params.rev == T3_REV_A)
+ return 0;
+
+
+ ret = t3_get_tp_version(adapter, &vers);
+ if (ret)
+ return ret;
+
+ major = G_TP_VERSION_MAJOR(vers);
+ minor = G_TP_VERSION_MINOR(vers);
+
+ if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
+ return 0;
+ else {
+ CH_ERR(adapter, "found wrong TP version (%u.%u), "
+ "driver compiled for version %d.%d\n", major, minor,
+ TP_VERSION_MAJOR, TP_VERSION_MINOR);
+ }
+ return -EINVAL;
+}
+
+/**
+ * t3_check_tpsram - check if provided protocol SRAM
+ * is compatible with this driver
+ * @adapter: the adapter
+ * @tp_sram: the firmware image to write
+ * @size: image size
+ *
+ * Checks if an adapter's tp sram is compatible with the driver.
+ * Returns 0 if the versions are compatible, a negative error otherwise.
+ */
+int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
+ unsigned int size)
+{
+ u32 csum;
+ unsigned int i;
+ const __be32 *p = (const __be32 *)tp_sram;
+
+ /* Verify checksum */
+ for (csum = 0, i = 0; i < size / sizeof(csum); i++)
+ csum += ntohl(p[i]);
+ if (csum != 0xffffffff) {
+ CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
+ csum);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+enum fw_version_type {
+ FW_VERSION_N3,
+ FW_VERSION_T3
+};
+
+/**
+ * t3_get_fw_version - read the firmware version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the FW version from flash.
+ */
+int t3_get_fw_version(struct adapter *adapter, u32 *vers)
+{
+ return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
+}
+
+/**
+ * t3_check_fw_version - check if the FW is compatible with this driver
+ * @adapter: the adapter
+ *
+ * Checks if an adapter's FW is compatible with the driver. Returns 0
+ * if the versions are compatible, a negative error otherwise.
+ */
+int t3_check_fw_version(struct adapter *adapter)
+{
+ int ret;
+ u32 vers;
+ unsigned int type, major, minor;
+
+ ret = t3_get_fw_version(adapter, &vers);
+ if (ret)
+ return ret;
+
+ type = G_FW_VERSION_TYPE(vers);
+ major = G_FW_VERSION_MAJOR(vers);
+ minor = G_FW_VERSION_MINOR(vers);
+
+ if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
+ minor == FW_VERSION_MINOR)
+ return 0;
+ else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
+ CH_WARN(adapter, "found old FW minor version(%u.%u), "
+ "driver compiled for version %u.%u\n", major, minor,
+ FW_VERSION_MAJOR, FW_VERSION_MINOR);
+ else {
+ CH_WARN(adapter, "found newer FW version(%u.%u), "
+ "driver compiled for version %u.%u\n", major, minor,
+ FW_VERSION_MAJOR, FW_VERSION_MINOR);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/**
+ * t3_flash_erase_sectors - erase a range of flash sectors
+ * @adapter: the adapter
+ * @start: the first sector to erase
+ * @end: the last sector to erase
+ *
+ * Erases the sectors in the given range.
+ */
+static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
+{
+ while (start <= end) {
+ int ret;
+
+ if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
+ (ret = sf1_write(adapter, 4, 0,
+ SF_ERASE_SECTOR | (start << 8))) != 0 ||
+ (ret = flash_wait_op(adapter, 5, 500)) != 0)
+ return ret;
+ start++;
+ }
+ return 0;
+}
+
+/*
+ * t3_load_fw - download firmware
+ * @adapter: the adapter
+ * @fw_data: the firmware image to write
+ * @size: image size
+ *
+ * Write the supplied firmware image to the card's serial flash.
+ * The FW image has the following sections: @size - 8 bytes of code and
+ * data, followed by 4 bytes of FW version, followed by the 32-bit
+ * 1's complement checksum of the whole image.
+ */
+int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
+{
+ u32 csum;
+ unsigned int i;
+ const __be32 *p = (const __be32 *)fw_data;
+ int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
+
+ if ((size & 3) || size < FW_MIN_SIZE)
+ return -EINVAL;
+ if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
+ return -EFBIG;
+
+ for (csum = 0, i = 0; i < size / sizeof(csum); i++)
+ csum += ntohl(p[i]);
+ if (csum != 0xffffffff) {
+ CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
+ csum);
+ return -EINVAL;
+ }
+
+ ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
+ if (ret)
+ goto out;
+
+ size -= 8; /* trim off version and checksum */
+ for (addr = FW_FLASH_BOOT_ADDR; size;) {
+ unsigned int chunk_size = min(size, 256U);
+
+ ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
+ if (ret)
+ goto out;
+
+ addr += chunk_size;
+ fw_data += chunk_size;
+ size -= chunk_size;
+ }
+
+ ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
+out:
+ if (ret)
+ CH_ERR(adapter, "firmware download failed, error %d\n", ret);
+ return ret;
+}
+
+#define CIM_CTL_BASE 0x2000
+
+/**
+ * t3_cim_ctl_blk_read - read a block from CIM control region
+ *
+ * @adap: the adapter
+ * @addr: the start address within the CIM control region
+ * @n: number of words to read
+ * @valp: where to store the result
+ *
+ * Reads a block of 4-byte words from the CIM control region.
+ */
+int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
+ unsigned int n, unsigned int *valp)
+{
+ int ret = 0;
+
+ if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
+ return -EBUSY;
+
+ for ( ; !ret && n--; addr += 4) {
+ t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
+ ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
+ 0, 5, 2);
+ if (!ret)
+ *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
+ }
+ return ret;
+}
+
+static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
+ u32 *rx_hash_high, u32 *rx_hash_low)
+{
+ /* stop Rx unicast traffic */
+ t3_mac_disable_exact_filters(mac);
+
+ /* stop broadcast, multicast, promiscuous mode traffic */
+ *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
+ t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
+ F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
+ F_DISBCAST);
+
+ *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
+ t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
+
+ *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
+ t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
+
+ /* Leave time to drain max RX fifo */
+ msleep(1);
+}
+
+static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
+ u32 rx_hash_high, u32 rx_hash_low)
+{
+ t3_mac_enable_exact_filters(mac);
+ t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
+ F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
+ rx_cfg);
+ t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
+ t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
+}
+
+/**
+ * t3_link_changed - handle interface link changes
+ * @adapter: the adapter
+ * @port_id: the port index that changed link state
+ *
+ * Called when a port's link settings change to propagate the new values
+ * to the associated PHY and MAC. After performing the common tasks it
+ * invokes an OS-specific handler.
+ */
+void t3_link_changed(struct adapter *adapter, int port_id)
+{
+ int link_ok, speed, duplex, fc;
+ struct port_info *pi = adap2pinfo(adapter, port_id);
+ struct cphy *phy = &pi->phy;
+ struct cmac *mac = &pi->mac;
+ struct link_config *lc = &pi->link_config;
+
+ phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
+
+ if (!lc->link_ok && link_ok) {
+ u32 rx_cfg, rx_hash_high, rx_hash_low;
+ u32 status;
+
+ t3_xgm_intr_enable(adapter, port_id);
+ t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
+ t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
+ t3_mac_enable(mac, MAC_DIRECTION_RX);
+
+ status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
+ if (status & F_LINKFAULTCHANGE) {
+ mac->stats.link_faults++;
+ pi->link_fault = 1;
+ }
+ t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
+ }
+
+ if (lc->requested_fc & PAUSE_AUTONEG)
+ fc &= lc->requested_fc;
+ else
+ fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+
+ if (link_ok == lc->link_ok && speed == lc->speed &&
+ duplex == lc->duplex && fc == lc->fc)
+ return; /* nothing changed */
+
+ if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
+ uses_xaui(adapter)) {
+ if (link_ok)
+ t3b_pcs_reset(mac);
+ t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
+ link_ok ? F_TXACTENABLE | F_RXEN : 0);
+ }
+ lc->link_ok = link_ok;
+ lc->speed = speed < 0 ? SPEED_INVALID : speed;
+ lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
+
+ if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
+ /* Set MAC speed, duplex, and flow control to match PHY. */
+ t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
+ lc->fc = fc;
+ }
+
+ t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
+ speed, duplex, fc);
+}
+
+void t3_link_fault(struct adapter *adapter, int port_id)
+{
+ struct port_info *pi = adap2pinfo(adapter, port_id);
+ struct cmac *mac = &pi->mac;
+ struct cphy *phy = &pi->phy;
+ struct link_config *lc = &pi->link_config;
+ int link_ok, speed, duplex, fc, link_fault;
+ u32 rx_cfg, rx_hash_high, rx_hash_low;
+
+ t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
+
+ if (adapter->params.rev > 0 && uses_xaui(adapter))
+ t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
+
+ t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
+ t3_mac_enable(mac, MAC_DIRECTION_RX);
+
+ t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
+
+ link_fault = t3_read_reg(adapter,
+ A_XGM_INT_STATUS + mac->offset);
+ link_fault &= F_LINKFAULTCHANGE;
+
+ link_ok = lc->link_ok;
+ speed = lc->speed;
+ duplex = lc->duplex;
+ fc = lc->fc;
+
+ phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
+
+ if (link_fault) {
+ lc->link_ok = 0;
+ lc->speed = SPEED_INVALID;
+ lc->duplex = DUPLEX_INVALID;
+
+ t3_os_link_fault(adapter, port_id, 0);
+
+ /* Account link faults only when the phy reports a link up */
+ if (link_ok)
+ mac->stats.link_faults++;
+ } else {
+ if (link_ok)
+ t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
+ F_TXACTENABLE | F_RXEN);
+
+ pi->link_fault = 0;
+ lc->link_ok = (unsigned char)link_ok;
+ lc->speed = speed < 0 ? SPEED_INVALID : speed;
+ lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
+ t3_os_link_fault(adapter, port_id, link_ok);
+ }
+}
+
+/**
+ * t3_link_start - apply link configuration to MAC/PHY
+ * @phy: the PHY to setup
+ * @mac: the MAC to setup
+ * @lc: the requested link configuration
+ *
+ * Set up a port's MAC and PHY according to a desired link configuration.
+ * - If the PHY can auto-negotiate first decide what to advertise, then
+ * enable/disable auto-negotiation as desired, and reset.
+ * - If the PHY does not auto-negotiate just reset it.
+ * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ * otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
+{
+ unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+
+ lc->link_ok = 0;
+ if (lc->supported & SUPPORTED_Autoneg) {
+ lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
+ if (fc) {
+ lc->advertising |= ADVERTISED_Asym_Pause;
+ if (fc & PAUSE_RX)
+ lc->advertising |= ADVERTISED_Pause;
+ }
+ phy->ops->advertise(phy, lc->advertising);
+
+ if (lc->autoneg == AUTONEG_DISABLE) {
+ lc->speed = lc->requested_speed;
+ lc->duplex = lc->requested_duplex;
+ lc->fc = (unsigned char)fc;
+ t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
+ fc);
+ /* Also disables autoneg */
+ phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
+ } else
+ phy->ops->autoneg_enable(phy);
+ } else {
+ t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
+ lc->fc = (unsigned char)fc;
+ phy->ops->reset(phy, 0);
+ }
+ return 0;
+}
+
+/**
+ * t3_set_vlan_accel - control HW VLAN extraction
+ * @adapter: the adapter
+ * @ports: bitmap of adapter ports to operate on
+ * @on: enable (1) or disable (0) HW VLAN extraction
+ *
+ * Enables or disables HW extraction of VLAN tags for the given port.
+ */
+void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
+{
+ t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
+ ports << S_VLANEXTRACTIONENABLE,
+ on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
+}
+
+struct intr_info {
+ unsigned int mask; /* bits to check in interrupt status */
+ const char *msg; /* message to print or NULL */
+ short stat_idx; /* stat counter to increment or -1 */
+ unsigned short fatal; /* whether the condition reported is fatal */
+};
+
+/**
+ * t3_handle_intr_status - table driven interrupt handler
+ * @adapter: the adapter that generated the interrupt
+ * @reg: the interrupt status register to process
+ * @mask: a mask to apply to the interrupt status
+ * @acts: table of interrupt actions
+ * @stats: statistics counters tracking interrupt occurrences
+ *
+ * A table driven interrupt handler that applies a set of masks to an
+ * interrupt status word and performs the corresponding actions if the
+ * interrupts described by the mask have occurred. The actions include
+ * optionally printing a warning or alert message, and optionally
+ * incrementing a stat counter. The table is terminated by an entry
+ * specifying mask 0. Returns the number of fatal interrupt conditions.
+ */
+static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
+ unsigned int mask,
+ const struct intr_info *acts,
+ unsigned long *stats)
+{
+ int fatal = 0;
+ unsigned int status = t3_read_reg(adapter, reg) & mask;
+
+ for (; acts->mask; ++acts) {
+ if (!(status & acts->mask))
+ continue;
+ if (acts->fatal) {
+ fatal++;
+ CH_ALERT(adapter, "%s (0x%x)\n",
+ acts->msg, status & acts->mask);
+ status &= ~acts->mask;
+ } else if (acts->msg)
+ CH_WARN(adapter, "%s (0x%x)\n",
+ acts->msg, status & acts->mask);
+ if (acts->stat_idx >= 0)
+ stats[acts->stat_idx]++;
+ }
+ if (status) /* clear processed interrupts */
+ t3_write_reg(adapter, reg, status);
+ return fatal;
+}
+
+#define SGE_INTR_MASK (F_RSPQDISABLED | \
+ F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
+ F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
+ F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
+ V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
+ F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
+ F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
+ F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
+ F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
+ F_LOPIODRBDROPERR)
+#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
+ F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
+ F_NFASRCHFAIL)
+#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
+#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
+ V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
+ F_TXFIFO_UNDERRUN)
+#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
+ F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
+ F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
+ F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
+ V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
+ V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
+#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
+ F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
+ /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
+ F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
+ F_TXPARERR | V_BISTERR(M_BISTERR))
+#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
+ F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
+ F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
+#define ULPTX_INTR_MASK 0xfc
+#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
+ F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
+ F_ZERO_SWITCH_ERROR)
+#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
+ F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
+ F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
+ F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
+ F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
+ F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
+ F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
+ F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
+#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
+ V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
+ V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
+#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
+ V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
+ V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
+#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
+ V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
+ V_RXTPPARERRENB(M_RXTPPARERRENB) | \
+ V_MCAPARERRENB(M_MCAPARERRENB))
+#define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
+#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
+ F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
+ F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
+ F_MPS0 | F_CPL_SWITCH)
+/*
+ * Interrupt handler for the PCIX1 module.
+ */
+static void pci_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info pcix1_intr_info[] = {
+ {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
+ {F_SIGTARABT, "PCI signaled target abort", -1, 1},
+ {F_RCVTARABT, "PCI received target abort", -1, 1},
+ {F_RCVMSTABT, "PCI received master abort", -1, 1},
+ {F_SIGSYSERR, "PCI signaled system error", -1, 1},
+ {F_DETPARERR, "PCI detected parity error", -1, 1},
+ {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
+ {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
+ {F_RCVSPLCMPERR, "PCI received split completion error", -1,
+ 1},
+ {F_DETCORECCERR, "PCI correctable ECC error",
+ STAT_PCI_CORR_ECC, 0},
+ {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
+ {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
+ {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
+ 1},
+ {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
+ 1},
+ {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
+ 1},
+ {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
+ "error", -1, 1},
+ {0}
+ };
+
+ if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
+ pcix1_intr_info, adapter->irq_stats))
+ t3_fatal_err(adapter);
+}
+
+/*
+ * Interrupt handler for the PCIE module.
+ */
+static void pcie_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info pcie_intr_info[] = {
+ {F_PEXERR, "PCI PEX error", -1, 1},
+ {F_UNXSPLCPLERRR,
+ "PCI unexpected split completion DMA read error", -1, 1},
+ {F_UNXSPLCPLERRC,
+ "PCI unexpected split completion DMA command error", -1, 1},
+ {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
+ {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
+ {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
+ {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
+ {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
+ "PCI MSI-X table/PBA parity error", -1, 1},
+ {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
+ {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
+ {F_RXPARERR, "PCI Rx parity error", -1, 1},
+ {F_TXPARERR, "PCI Tx parity error", -1, 1},
+ {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
+ {0}
+ };
+
+ if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
+ CH_ALERT(adapter, "PEX error code 0x%x\n",
+ t3_read_reg(adapter, A_PCIE_PEX_ERR));
+
+ if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
+ pcie_intr_info, adapter->irq_stats))
+ t3_fatal_err(adapter);
+}
+
+/*
+ * TP interrupt handler.
+ */
+static void tp_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info tp_intr_info[] = {
+ {0xffffff, "TP parity error", -1, 1},
+ {0x1000000, "TP out of Rx pages", -1, 1},
+ {0x2000000, "TP out of Tx pages", -1, 1},
+ {0}
+ };
+
+ static const struct intr_info tp_intr_info_t3c[] = {
+ {0x1fffffff, "TP parity error", -1, 1},
+ {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
+ {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
+ {0}
+ };
+
+ if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
+ adapter->params.rev < T3_REV_C ?
+ tp_intr_info : tp_intr_info_t3c, NULL))
+ t3_fatal_err(adapter);
+}
+
+/*
+ * CIM interrupt handler.
+ */
+static void cim_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info cim_intr_info[] = {
+ {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
+ {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
+ {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
+ {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
+ {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
+ {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
+ {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
+ {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
+ {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
+ {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
+ {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
+ {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
+ {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
+ {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
+ {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
+ {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
+ {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
+ {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
+ {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
+ {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
+ {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
+ {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
+ {F_ITAGPARERR, "CIM itag parity error", -1, 1},
+ {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
+ {0}
+ };
+
+ if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
+ cim_intr_info, NULL))
+ t3_fatal_err(adapter);
+}
+
+/*
+ * ULP RX interrupt handler.
+ */
+static void ulprx_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info ulprx_intr_info[] = {
+ {F_PARERRDATA, "ULP RX data parity error", -1, 1},
+ {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
+ {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
+ {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
+ {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
+ {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
+ {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
+ {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
+ {0}
+ };
+
+ if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
+ ulprx_intr_info, NULL))
+ t3_fatal_err(adapter);
+}
+
+/*
+ * ULP TX interrupt handler.
+ */
+static void ulptx_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info ulptx_intr_info[] = {
+ {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
+ STAT_ULP_CH0_PBL_OOB, 0},
+ {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
+ STAT_ULP_CH1_PBL_OOB, 0},
+ {0xfc, "ULP TX parity error", -1, 1},
+ {0}
+ };
+
+ if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
+ ulptx_intr_info, adapter->irq_stats))
+ t3_fatal_err(adapter);
+}
+
+#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
+ F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
+ F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
+ F_ICSPI1_TX_FRAMING_ERROR)
+#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
+ F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
+ F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
+ F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
+
+/*
+ * PM TX interrupt handler.
+ */
+static void pmtx_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info pmtx_intr_info[] = {
+ {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
+ {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
+ {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
+ {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
+ "PMTX ispi parity error", -1, 1},
+ {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
+ "PMTX ospi parity error", -1, 1},
+ {0}
+ };
+
+ if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
+ pmtx_intr_info, NULL))
+ t3_fatal_err(adapter);
+}
+
+#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
+ F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
+ F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
+ F_IESPI1_TX_FRAMING_ERROR)
+#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
+ F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
+ F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
+ F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
+
+/*
+ * PM RX interrupt handler.
+ */
+static void pmrx_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info pmrx_intr_info[] = {
+ {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
+ {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
+ {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
+ {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
+ "PMRX ispi parity error", -1, 1},
+ {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
+ "PMRX ospi parity error", -1, 1},
+ {0}
+ };
+
+ if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
+ pmrx_intr_info, NULL))
+ t3_fatal_err(adapter);
+}
+
+/*
+ * CPL switch interrupt handler.
+ */
+static void cplsw_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info cplsw_intr_info[] = {
+ {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
+ {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
+ {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
+ {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
+ {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
+ {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
+ {0}
+ };
+
+ if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
+ cplsw_intr_info, NULL))
+ t3_fatal_err(adapter);
+}
+
+/*
+ * MPS interrupt handler.
+ */
+static void mps_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info mps_intr_info[] = {
+ {0x1ff, "MPS parity error", -1, 1},
+ {0}
+ };
+
+ if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
+ mps_intr_info, NULL))
+ t3_fatal_err(adapter);
+}
+
+#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
+
+/*
+ * MC7 interrupt handler.
+ */
+static void mc7_intr_handler(struct mc7 *mc7)
+{
+ struct adapter *adapter = mc7->adapter;
+ u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
+
+ if (cause & F_CE) {
+ mc7->stats.corr_err++;
+ CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
+ "data 0x%x 0x%x 0x%x\n", mc7->name,
+ t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
+ t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
+ t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
+ t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
+ }
+
+ if (cause & F_UE) {
+ mc7->stats.uncorr_err++;
+ CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
+ "data 0x%x 0x%x 0x%x\n", mc7->name,
+ t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
+ t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
+ t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
+ t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
+ }
+
+ if (G_PE(cause)) {
+ mc7->stats.parity_err++;
+ CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
+ mc7->name, G_PE(cause));
+ }
+
+ if (cause & F_AE) {
+ u32 addr = 0;
+
+ if (adapter->params.rev > 0)
+ addr = t3_read_reg(adapter,
+ mc7->offset + A_MC7_ERR_ADDR);
+ mc7->stats.addr_err++;
+ CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
+ mc7->name, addr);
+ }
+
+ if (cause & MC7_INTR_FATAL)
+ t3_fatal_err(adapter);
+
+ t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
+}
+
+#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
+ V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
+/*
+ * XGMAC interrupt handler.
+ */
+static int mac_intr_handler(struct adapter *adap, unsigned int idx)
+{
+ struct cmac *mac = &adap2pinfo(adap, idx)->mac;
+ /*
+ * We mask out interrupt causes for which we're not taking interrupts.
+ * This allows us to use polling logic to monitor some of the other
+ * conditions when taking interrupts would impose too much load on the
+ * system.
+ */
+ u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
+ ~F_RXFIFO_OVERFLOW;
+
+ if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
+ mac->stats.tx_fifo_parity_err++;
+ CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
+ }
+ if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
+ mac->stats.rx_fifo_parity_err++;
+ CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
+ }
+ if (cause & F_TXFIFO_UNDERRUN)
+ mac->stats.tx_fifo_urun++;
+ if (cause & F_RXFIFO_OVERFLOW)
+ mac->stats.rx_fifo_ovfl++;
+ if (cause & V_SERDES_LOS(M_SERDES_LOS))
+ mac->stats.serdes_signal_loss++;
+ if (cause & F_XAUIPCSCTCERR)
+ mac->stats.xaui_pcs_ctc_err++;
+ if (cause & F_XAUIPCSALIGNCHANGE)
+ mac->stats.xaui_pcs_align_change++;
+ if (cause & F_XGM_INT) {
+ t3_set_reg_field(adap,
+ A_XGM_INT_ENABLE + mac->offset,
+ F_XGM_INT, 0);
+ mac->stats.link_faults++;
+
+ t3_os_link_fault_handler(adap, idx);
+ }
+
+ if (cause & XGM_INTR_FATAL)
+ t3_fatal_err(adap);
+
+ t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
+ return cause != 0;
+}
+
+/*
+ * Interrupt handler for PHY events.
+ */
+int t3_phy_intr_handler(struct adapter *adapter)
+{
+ u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
+
+ for_each_port(adapter, i) {
+ struct port_info *p = adap2pinfo(adapter, i);
+
+ if (!(p->phy.caps & SUPPORTED_IRQ))
+ continue;
+
+ if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
+ int phy_cause = p->phy.ops->intr_handler(&p->phy);
+
+ if (phy_cause & cphy_cause_link_change)
+ t3_link_changed(adapter, i);
+ if (phy_cause & cphy_cause_fifo_error)
+ p->phy.fifo_errors++;
+ if (phy_cause & cphy_cause_module_change)
+ t3_os_phymod_changed(adapter, i);
+ }
+ }
+
+ t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
+ return 0;
+}
+
+/*
+ * T3 slow path (non-data) interrupt handler.
+ */
+int t3_slow_intr_handler(struct adapter *adapter)
+{
+ u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
+
+ cause &= adapter->slow_intr_mask;
+ if (!cause)
+ return 0;
+ if (cause & F_PCIM0) {
+ if (is_pcie(adapter))
+ pcie_intr_handler(adapter);
+ else
+ pci_intr_handler(adapter);
+ }
+ if (cause & F_SGE3)
+ t3_sge_err_intr_handler(adapter);
+ if (cause & F_MC7_PMRX)
+ mc7_intr_handler(&adapter->pmrx);
+ if (cause & F_MC7_PMTX)
+ mc7_intr_handler(&adapter->pmtx);
+ if (cause & F_MC7_CM)
+ mc7_intr_handler(&adapter->cm);
+ if (cause & F_CIM)
+ cim_intr_handler(adapter);
+ if (cause & F_TP1)
+ tp_intr_handler(adapter);
+ if (cause & F_ULP2_RX)
+ ulprx_intr_handler(adapter);
+ if (cause & F_ULP2_TX)
+ ulptx_intr_handler(adapter);
+ if (cause & F_PM1_RX)
+ pmrx_intr_handler(adapter);
+ if (cause & F_PM1_TX)
+ pmtx_intr_handler(adapter);
+ if (cause & F_CPL_SWITCH)
+ cplsw_intr_handler(adapter);
+ if (cause & F_MPS0)
+ mps_intr_handler(adapter);
+ if (cause & F_MC5A)
+ t3_mc5_intr_handler(&adapter->mc5);
+ if (cause & F_XGMAC0_0)
+ mac_intr_handler(adapter, 0);
+ if (cause & F_XGMAC0_1)
+ mac_intr_handler(adapter, 1);
+ if (cause & F_T3DBG)
+ t3_os_ext_intr_handler(adapter);
+
+ /* Clear the interrupts just processed. */
+ t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
+ t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
+ return 1;
+}
+
+static unsigned int calc_gpio_intr(struct adapter *adap)
+{
+ unsigned int i, gpi_intr = 0;
+
+ for_each_port(adap, i)
+ if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
+ adapter_info(adap)->gpio_intr[i])
+ gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
+ return gpi_intr;
+}
+
+/**
+ * t3_intr_enable - enable interrupts
+ * @adapter: the adapter whose interrupts should be enabled
+ *
+ * Enable interrupts by setting the interrupt enable registers of the
+ * various HW modules and then enabling the top-level interrupt
+ * concentrator.
+ */
+void t3_intr_enable(struct adapter *adapter)
+{
+ static const struct addr_val_pair intr_en_avp[] = {
+ {A_SG_INT_ENABLE, SGE_INTR_MASK},
+ {A_MC7_INT_ENABLE, MC7_INTR_MASK},
+ {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
+ MC7_INTR_MASK},
+ {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
+ MC7_INTR_MASK},
+ {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
+ {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
+ {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
+ {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
+ {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
+ {A_MPS_INT_ENABLE, MPS_INTR_MASK},
+ };
+
+ adapter->slow_intr_mask = PL_INTR_MASK;
+
+ t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
+ t3_write_reg(adapter, A_TP_INT_ENABLE,
+ adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
+
+ if (adapter->params.rev > 0) {
+ t3_write_reg(adapter, A_CPL_INTR_ENABLE,
+ CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
+ t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
+ ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
+ F_PBL_BOUND_ERR_CH1);
+ } else {
+ t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
+ t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
+ }
+
+ t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
+
+ if (is_pcie(adapter))
+ t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
+ else
+ t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
+ t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
+ t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
+}
+
+/**
+ * t3_intr_disable - disable a card's interrupts
+ * @adapter: the adapter whose interrupts should be disabled
+ *
+ * Disable interrupts. We only disable the top-level interrupt
+ * concentrator and the SGE data interrupts.
+ */
+void t3_intr_disable(struct adapter *adapter)
+{
+ t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
+ t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
+ adapter->slow_intr_mask = 0;
+}
+
+/**
+ * t3_intr_clear - clear all interrupts
+ * @adapter: the adapter whose interrupts should be cleared
+ *
+ * Clears all interrupts.
+ */
+void t3_intr_clear(struct adapter *adapter)
+{
+ static const unsigned int cause_reg_addr[] = {
+ A_SG_INT_CAUSE,
+ A_SG_RSPQ_FL_STATUS,
+ A_PCIX_INT_CAUSE,
+ A_MC7_INT_CAUSE,
+ A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
+ A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
+ A_CIM_HOST_INT_CAUSE,
+ A_TP_INT_CAUSE,
+ A_MC5_DB_INT_CAUSE,
+ A_ULPRX_INT_CAUSE,
+ A_ULPTX_INT_CAUSE,
+ A_CPL_INTR_CAUSE,
+ A_PM1_TX_INT_CAUSE,
+ A_PM1_RX_INT_CAUSE,
+ A_MPS_INT_CAUSE,
+ A_T3DBG_INT_CAUSE,
+ };
+ unsigned int i;
+
+ /* Clear PHY and MAC interrupts for each port. */
+ for_each_port(adapter, i)
+ t3_port_intr_clear(adapter, i);
+
+ for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
+ t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
+
+ if (is_pcie(adapter))
+ t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
+ t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
+ t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
+}
+
+void t3_xgm_intr_enable(struct adapter *adapter, int idx)
+{
+ struct port_info *pi = adap2pinfo(adapter, idx);
+
+ t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
+ XGM_EXTRA_INTR_MASK);
+}
+
+void t3_xgm_intr_disable(struct adapter *adapter, int idx)
+{
+ struct port_info *pi = adap2pinfo(adapter, idx);
+
+ t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
+ 0x7ff);
+}
+
+/**
+ * t3_port_intr_enable - enable port-specific interrupts
+ * @adapter: associated adapter
+ * @idx: index of port whose interrupts should be enabled
+ *
+ * Enable port-specific (i.e., MAC and PHY) interrupts for the given
+ * adapter port.
+ */
+void t3_port_intr_enable(struct adapter *adapter, int idx)
+{
+ struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
+
+ t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
+ t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
+ phy->ops->intr_enable(phy);
+}
+
+/**
+ * t3_port_intr_disable - disable port-specific interrupts
+ * @adapter: associated adapter
+ * @idx: index of port whose interrupts should be disabled
+ *
+ * Disable port-specific (i.e., MAC and PHY) interrupts for the given
+ * adapter port.
+ */
+void t3_port_intr_disable(struct adapter *adapter, int idx)
+{
+ struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
+
+ t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
+ t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
+ phy->ops->intr_disable(phy);
+}
+
+/**
+ * t3_port_intr_clear - clear port-specific interrupts
+ * @adapter: associated adapter
+ * @idx: index of port whose interrupts to clear
+ *
+ * Clear port-specific (i.e., MAC and PHY) interrupts for the given
+ * adapter port.
+ */
+static void t3_port_intr_clear(struct adapter *adapter, int idx)
+{
+ struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
+
+ t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
+ t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
+ phy->ops->intr_clear(phy);
+}
+
+#define SG_CONTEXT_CMD_ATTEMPTS 100
+
+/**
+ * t3_sge_write_context - write an SGE context
+ * @adapter: the adapter
+ * @id: the context id
+ * @type: the context type
+ *
+ * Program an SGE context with the values already loaded in the
+ * CONTEXT_DATA? registers.
+ */
+static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
+ unsigned int type)
+{
+ if (type == F_RESPONSEQ) {
+ /*
+ * Can't write the Response Queue Context bits for
+ * Interrupt Armed or the Reserve bits after the chip
+ * has been initialized out of reset. Writing to these
+ * bits can confuse the hardware.
+ */
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
+ } else {
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
+ }
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
+ return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+ 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
+}
+
+/**
+ * clear_sge_ctxt - completely clear an SGE context
+ * @adapter: the adapter
+ * @id: the context id
+ * @type: the context type
+ *
+ * Completely clear an SGE context. Used predominantly at post-reset
+ * initialization. Note in particular that we don't skip writing to any
+ * "sensitive bits" in the contexts the way that t3_sge_write_context()
+ * does ...
+ */
+static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
+ unsigned int type)
+{
+ t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
+ t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
+ t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
+ t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
+ t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
+ t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
+ t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
+ t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
+ t3_write_reg(adap, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
+ return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+ 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
+}
+
+/**
+ * t3_sge_init_ecntxt - initialize an SGE egress context
+ * @adapter: the adapter to configure
+ * @id: the context id
+ * @gts_enable: whether to enable GTS for the context
+ * @type: the egress context type
+ * @respq: associated response queue
+ * @base_addr: base address of queue
+ * @size: number of queue entries
+ * @token: uP token
+ * @gen: initial generation value for the context
+ * @cidx: consumer pointer
+ *
+ * Initialize an SGE egress context and make it ready for use. If the
+ * platform allows concurrent context operations, the caller is
+ * responsible for appropriate locking.
+ */
+int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
+ enum sge_context_type type, int respq, u64 base_addr,
+ unsigned int size, unsigned int token, int gen,
+ unsigned int cidx)
+{
+ unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
+
+ if (base_addr & 0xfff) /* must be 4K aligned */
+ return -EINVAL;
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ base_addr >>= 12;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
+ V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
+ V_EC_BASE_LO(base_addr & 0xffff));
+ base_addr >>= 16;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
+ base_addr >>= 32;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
+ V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
+ V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
+ F_EC_VALID);
+ return t3_sge_write_context(adapter, id, F_EGRESS);
+}
+
+/**
+ * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
+ * @adapter: the adapter to configure
+ * @id: the context id
+ * @gts_enable: whether to enable GTS for the context
+ * @base_addr: base address of queue
+ * @size: number of queue entries
+ * @bsize: size of each buffer for this queue
+ * @cong_thres: threshold to signal congestion to upstream producers
+ * @gen: initial generation value for the context
+ * @cidx: consumer pointer
+ *
+ * Initialize an SGE free list context and make it ready for use. The
+ * caller is responsible for ensuring only one context operation occurs
+ * at a time.
+ */
+int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
+ int gts_enable, u64 base_addr, unsigned int size,
+ unsigned int bsize, unsigned int cong_thres, int gen,
+ unsigned int cidx)
+{
+ if (base_addr & 0xfff) /* must be 4K aligned */
+ return -EINVAL;
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ base_addr >>= 12;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
+ base_addr >>= 32;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
+ V_FL_BASE_HI((u32) base_addr) |
+ V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
+ V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
+ V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
+ V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
+ V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
+ return t3_sge_write_context(adapter, id, F_FREELIST);
+}
+
+/**
+ * t3_sge_init_rspcntxt - initialize an SGE response queue context
+ * @adapter: the adapter to configure
+ * @id: the context id
+ * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
+ * @base_addr: base address of queue
+ * @size: number of queue entries
+ * @fl_thres: threshold for selecting the normal or jumbo free list
+ * @gen: initial generation value for the context
+ * @cidx: consumer pointer
+ *
+ * Initialize an SGE response queue context and make it ready for use.
+ * The caller is responsible for ensuring only one context operation
+ * occurs at a time.
+ */
+int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
+ int irq_vec_idx, u64 base_addr, unsigned int size,
+ unsigned int fl_thres, int gen, unsigned int cidx)
+{
+ unsigned int intr = 0;
+
+ if (base_addr & 0xfff) /* must be 4K aligned */
+ return -EINVAL;
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ base_addr >>= 12;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
+ V_CQ_INDEX(cidx));
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
+ base_addr >>= 32;
+ if (irq_vec_idx >= 0)
+ intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
+ V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
+ return t3_sge_write_context(adapter, id, F_RESPONSEQ);
+}
+
+/**
+ * t3_sge_init_cqcntxt - initialize an SGE completion queue context
+ * @adapter: the adapter to configure
+ * @id: the context id
+ * @base_addr: base address of queue
+ * @size: number of queue entries
+ * @rspq: response queue for async notifications
+ * @ovfl_mode: CQ overflow mode
+ * @credits: completion queue credits
+ * @credit_thres: the credit threshold
+ *
+ * Initialize an SGE completion queue context and make it ready for use.
+ * The caller is responsible for ensuring only one context operation
+ * occurs at a time.
+ */
+int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
+ unsigned int size, int rspq, int ovfl_mode,
+ unsigned int credits, unsigned int credit_thres)
+{
+ if (base_addr & 0xfff) /* must be 4K aligned */
+ return -EINVAL;
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ base_addr >>= 12;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
+ base_addr >>= 32;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
+ V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
+ V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
+ V_CQ_ERR(ovfl_mode));
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
+ V_CQ_CREDIT_THRES(credit_thres));
+ return t3_sge_write_context(adapter, id, F_CQ);
+}
+
+/**
+ * t3_sge_enable_ecntxt - enable/disable an SGE egress context
+ * @adapter: the adapter
+ * @id: the egress context id
+ * @enable: enable (1) or disable (0) the context
+ *
+ * Enable or disable an SGE egress context. The caller is responsible for
+ * ensuring only one context operation occurs at a time.
+ */
+int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
+{
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
+ return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+ 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
+}
+
+/**
+ * t3_sge_disable_fl - disable an SGE free-buffer list
+ * @adapter: the adapter
+ * @id: the free list context id
+ *
+ * Disable an SGE free-buffer list. The caller is responsible for
+ * ensuring only one context operation occurs at a time.
+ */
+int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
+{
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
+ return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+ 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
+}
+
+/**
+ * t3_sge_disable_rspcntxt - disable an SGE response queue
+ * @adapter: the adapter
+ * @id: the response queue context id
+ *
+ * Disable an SGE response queue. The caller is responsible for
+ * ensuring only one context operation occurs at a time.
+ */
+int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
+{
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
+ return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+ 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
+}
+
+/**
+ * t3_sge_disable_cqcntxt - disable an SGE completion queue
+ * @adapter: the adapter
+ * @id: the completion queue context id
+ *
+ * Disable an SGE completion queue. The caller is responsible for
+ * ensuring only one context operation occurs at a time.
+ */
+int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
+{
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
+ return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+ 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
+}
+
+/**
+ * t3_sge_cqcntxt_op - perform an operation on a completion queue context
+ * @adapter: the adapter
+ * @id: the context id
+ * @op: the operation to perform
+ *
+ * Perform the selected operation on an SGE completion queue context.
+ * The caller is responsible for ensuring only one context operation
+ * occurs at a time.
+ */
+int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
+ unsigned int credits)
+{
+ u32 val;
+
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
+ V_CONTEXT(id) | F_CQ);
+ if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+ 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
+ return -EIO;
+
+ if (op >= 2 && op < 7) {
+ if (adapter->params.rev > 0)
+ return G_CQ_INDEX(val);
+
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
+ if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
+ F_CONTEXT_CMD_BUSY, 0,
+ SG_CONTEXT_CMD_ATTEMPTS, 1))
+ return -EIO;
+ return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
+ }
+ return 0;
+}
+
+/**
+ * t3_config_rss - configure Rx packet steering
+ * @adapter: the adapter
+ * @rss_config: RSS settings (written to TP_RSS_CONFIG)
+ * @cpus: values for the CPU lookup table (0xff terminated)
+ * @rspq: values for the response queue lookup table (0xffff terminated)
+ *
+ * Programs the receive packet steering logic. @cpus and @rspq provide
+ * the values for the CPU and response queue lookup tables. If they
+ * provide fewer values than the size of the tables the supplied values
+ * are used repeatedly until the tables are fully populated.
+ */
+void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
+ const u8 * cpus, const u16 *rspq)
+{
+ int i, j, cpu_idx = 0, q_idx = 0;
+
+ if (cpus)
+ for (i = 0; i < RSS_TABLE_SIZE; ++i) {
+ u32 val = i << 16;
+
+ for (j = 0; j < 2; ++j) {
+ val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
+ if (cpus[cpu_idx] == 0xff)
+ cpu_idx = 0;
+ }
+ t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
+ }
+
+ if (rspq)
+ for (i = 0; i < RSS_TABLE_SIZE; ++i) {
+ t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
+ (i << 16) | rspq[q_idx++]);
+ if (rspq[q_idx] == 0xffff)
+ q_idx = 0;
+ }
+
+ t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
+}
+
+/**
+ * t3_tp_set_offload_mode - put TP in NIC/offload mode
+ * @adap: the adapter
+ * @enable: 1 to select offload mode, 0 for regular NIC
+ *
+ * Switches TP to NIC/offload mode.
+ */
+void t3_tp_set_offload_mode(struct adapter *adap, int enable)
+{
+ if (is_offload(adap) || !enable)
+ t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
+ V_NICMODE(!enable));
+}
+
+/**
+ * pm_num_pages - calculate the number of pages of the payload memory
+ * @mem_size: the size of the payload memory
+ * @pg_size: the size of each payload memory page
+ *
+ * Calculate the number of pages, each of the given size, that fit in a
+ * memory of the specified size, respecting the HW requirement that the
+ * number of pages must be a multiple of 24.
+ */
+static inline unsigned int pm_num_pages(unsigned int mem_size,
+ unsigned int pg_size)
+{
+ unsigned int n = mem_size / pg_size;
+
+ return n - n % 24;
+}
+
+#define mem_region(adap, start, size, reg) \
+ t3_write_reg((adap), A_ ## reg, (start)); \
+ start += size
+
+/**
+ * partition_mem - partition memory and configure TP memory settings
+ * @adap: the adapter
+ * @p: the TP parameters
+ *
+ * Partitions context and payload memory and configures TP's memory
+ * registers.
+ */
+static void partition_mem(struct adapter *adap, const struct tp_params *p)
+{
+ unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
+ unsigned int timers = 0, timers_shift = 22;
+
+ if (adap->params.rev > 0) {
+ if (tids <= 16 * 1024) {
+ timers = 1;
+ timers_shift = 16;
+ } else if (tids <= 64 * 1024) {
+ timers = 2;
+ timers_shift = 18;
+ } else if (tids <= 256 * 1024) {
+ timers = 3;
+ timers_shift = 20;
+ }
+ }
+
+ t3_write_reg(adap, A_TP_PMM_SIZE,
+ p->chan_rx_size | (p->chan_tx_size >> 16));
+
+ t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
+ t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
+ t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
+ t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
+ V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
+
+ t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
+ t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
+ t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
+
+ pstructs = p->rx_num_pgs + p->tx_num_pgs;
+ /* Add a bit of headroom and make multiple of 24 */
+ pstructs += 48;
+ pstructs -= pstructs % 24;
+ t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
+
+ m = tids * TCB_SIZE;
+ mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
+ mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
+ t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
+ m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
+ mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
+ mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
+ mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
+ mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
+
+ m = (m + 4095) & ~0xfff;
+ t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
+ t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
+
+ tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
+ m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
+ adap->params.mc5.nfilters - adap->params.mc5.nroutes;
+ if (tids < m)
+ adap->params.mc5.nservers += m - tids;
+}
+
+static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
+ u32 val)
+{
+ t3_write_reg(adap, A_TP_PIO_ADDR, addr);
+ t3_write_reg(adap, A_TP_PIO_DATA, val);
+}
+
+static void tp_config(struct adapter *adap, const struct tp_params *p)
+{
+ t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
+ F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
+ F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
+ t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
+ F_MTUENABLE | V_WINDOWSCALEMODE(1) |
+ V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
+ t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
+ V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
+ V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
+ F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
+ t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
+ F_IPV6ENABLE | F_NICMODE);
+ t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
+ t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
+ t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
+ adap->params.rev > 0 ? F_ENABLEESND :
+ F_T3A_ENABLEESND);
+
+ t3_set_reg_field(adap, A_TP_PC_CONFIG,
+ F_ENABLEEPCMDAFULL,
+ F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
+ F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
+ t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
+ F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
+ F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
+ t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
+ t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
+
+ if (adap->params.rev > 0) {
+ tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
+ t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
+ F_TXPACEAUTO);
+ t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
+ t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
+ } else
+ t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
+
+ if (adap->params.rev == T3_REV_C)
+ t3_set_reg_field(adap, A_TP_PC_CONFIG,
+ V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
+ V_TABLELATENCYDELTA(4));
+
+ t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
+ t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
+ t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
+ t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
+}
+
+/* Desired TP timer resolution in usec */
+#define TP_TMR_RES 50
+
+/* TCP timer values in ms */
+#define TP_DACK_TIMER 50
+#define TP_RTO_MIN 250
+
+/**
+ * tp_set_timers - set TP timing parameters
+ * @adap: the adapter to set
+ * @core_clk: the core clock frequency in Hz
+ *
+ * Set TP's timing parameters, such as the various timer resolutions and
+ * the TCP timer values.
+ */
+static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
+{
+ unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
+ unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
+ unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
+ unsigned int tps = core_clk >> tre;
+
+ t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
+ V_DELAYEDACKRESOLUTION(dack_re) |
+ V_TIMESTAMPRESOLUTION(tstamp_re));
+ t3_write_reg(adap, A_TP_DACK_TIMER,
+ (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
+ t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
+ t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
+ t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
+ t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
+ t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
+ V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
+ V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
+ V_KEEPALIVEMAX(9));
+
+#define SECONDS * tps
+
+ t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
+ t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
+ t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
+ t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
+ t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
+ t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
+ t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
+ t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
+ t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
+
+#undef SECONDS
+}
+
+/**
+ * t3_tp_set_coalescing_size - set receive coalescing size
+ * @adap: the adapter
+ * @size: the receive coalescing size
+ * @psh: whether a set PSH bit should deliver coalesced data
+ *
+ * Set the receive coalescing size and PSH bit handling.
+ */
+static int t3_tp_set_coalescing_size(struct adapter *adap,
+ unsigned int size, int psh)
+{
+ u32 val;
+
+ if (size > MAX_RX_COALESCING_LEN)
+ return -EINVAL;
+
+ val = t3_read_reg(adap, A_TP_PARA_REG3);
+ val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
+
+ if (size) {
+ val |= F_RXCOALESCEENABLE;
+ if (psh)
+ val |= F_RXCOALESCEPSHEN;
+ size = min(MAX_RX_COALESCING_LEN, size);
+ t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
+ V_MAXRXDATA(MAX_RX_COALESCING_LEN));
+ }
+ t3_write_reg(adap, A_TP_PARA_REG3, val);
+ return 0;
+}
+
+/**
+ * t3_tp_set_max_rxsize - set the max receive size
+ * @adap: the adapter
+ * @size: the max receive size
+ *
+ * Set TP's max receive size. This is the limit that applies when
+ * receive coalescing is disabled.
+ */
+static void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
+{
+ t3_write_reg(adap, A_TP_PARA_REG7,
+ V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
+}
+
+static void init_mtus(unsigned short mtus[])
+{
+ /*
+ * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
+ * it can accommodate max size TCP/IP headers when SACK and timestamps
+ * are enabled and still have at least 8 bytes of payload.
+ */
+ mtus[0] = 88;
+ mtus[1] = 88;
+ mtus[2] = 256;
+ mtus[3] = 512;
+ mtus[4] = 576;
+ mtus[5] = 1024;
+ mtus[6] = 1280;
+ mtus[7] = 1492;
+ mtus[8] = 1500;
+ mtus[9] = 2002;
+ mtus[10] = 2048;
+ mtus[11] = 4096;
+ mtus[12] = 4352;
+ mtus[13] = 8192;
+ mtus[14] = 9000;
+ mtus[15] = 9600;
+}
+
+/*
+ * Initial congestion control parameters.
+ */
+static void init_cong_ctrl(unsigned short *a, unsigned short *b)
+{
+ a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
+ a[9] = 2;
+ a[10] = 3;
+ a[11] = 4;
+ a[12] = 5;
+ a[13] = 6;
+ a[14] = 7;
+ a[15] = 8;
+ a[16] = 9;
+ a[17] = 10;
+ a[18] = 14;
+ a[19] = 17;
+ a[20] = 21;
+ a[21] = 25;
+ a[22] = 30;
+ a[23] = 35;
+ a[24] = 45;
+ a[25] = 60;
+ a[26] = 80;
+ a[27] = 100;
+ a[28] = 200;
+ a[29] = 300;
+ a[30] = 400;
+ a[31] = 500;
+
+ b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
+ b[9] = b[10] = 1;
+ b[11] = b[12] = 2;
+ b[13] = b[14] = b[15] = b[16] = 3;
+ b[17] = b[18] = b[19] = b[20] = b[21] = 4;
+ b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
+ b[28] = b[29] = 6;
+ b[30] = b[31] = 7;
+}
+
+/* The minimum additive increment value for the congestion control table */
+#define CC_MIN_INCR 2U
+
+/**
+ * t3_load_mtus - write the MTU and congestion control HW tables
+ * @adap: the adapter
+ * @mtus: the unrestricted values for the MTU table
+ * @alphs: the values for the congestion control alpha parameter
+ * @beta: the values for the congestion control beta parameter
+ * @mtu_cap: the maximum permitted effective MTU
+ *
+ * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
+ * Update the high-speed congestion control table with the supplied alpha,
+ * beta, and MTUs.
+ */
+void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
+ unsigned short alpha[NCCTRL_WIN],
+ unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
+{
+ static const unsigned int avg_pkts[NCCTRL_WIN] = {
+ 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
+ 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
+ 28672, 40960, 57344, 81920, 114688, 163840, 229376
+ };
+
+ unsigned int i, w;
+
+ for (i = 0; i < NMTUS; ++i) {
+ unsigned int mtu = min(mtus[i], mtu_cap);
+ unsigned int log2 = fls(mtu);
+
+ if (!(mtu & ((1 << log2) >> 2))) /* round */
+ log2--;
+ t3_write_reg(adap, A_TP_MTU_TABLE,
+ (i << 24) | (log2 << 16) | mtu);
+
+ for (w = 0; w < NCCTRL_WIN; ++w) {
+ unsigned int inc;
+
+ inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
+ CC_MIN_INCR);
+
+ t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
+ (w << 16) | (beta[w] << 13) | inc);
+ }
+ }
+}
+
+/**
+ * t3_tp_get_mib_stats - read TP's MIB counters
+ * @adap: the adapter
+ * @tps: holds the returned counter values
+ *
+ * Returns the values of TP's MIB counters.
+ */
+void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
+{
+ t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
+ sizeof(*tps) / sizeof(u32), 0);
+}
+
+#define ulp_region(adap, name, start, len) \
+ t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
+ t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
+ (start) + (len) - 1); \
+ start += len
+
+#define ulptx_region(adap, name, start, len) \
+ t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
+ t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
+ (start) + (len) - 1)
+
+static void ulp_config(struct adapter *adap, const struct tp_params *p)
+{
+ unsigned int m = p->chan_rx_size;
+
+ ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
+ ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
+ ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
+ ulp_region(adap, STAG, m, p->chan_rx_size / 4);
+ ulp_region(adap, RQ, m, p->chan_rx_size / 4);
+ ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
+ ulp_region(adap, PBL, m, p->chan_rx_size / 4);
+ t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
+}
+
+/**
+ * t3_set_proto_sram - set the contents of the protocol sram
+ * @adapter: the adapter
+ * @data: the protocol image
+ *
+ * Write the contents of the protocol SRAM.
+ */
+int t3_set_proto_sram(struct adapter *adap, const u8 *data)
+{
+ int i;
+ const __be32 *buf = (const __be32 *)data;
+
+ for (i = 0; i < PROTO_SRAM_LINES; i++) {
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
+
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
+ if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
+ return -EIO;
+ }
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
+
+ return 0;
+}
+
+void t3_config_trace_filter(struct adapter *adapter,
+ const struct trace_params *tp, int filter_index,
+ int invert, int enable)
+{
+ u32 addr, key[4], mask[4];
+
+ key[0] = tp->sport | (tp->sip << 16);
+ key[1] = (tp->sip >> 16) | (tp->dport << 16);
+ key[2] = tp->dip;
+ key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
+
+ mask[0] = tp->sport_mask | (tp->sip_mask << 16);
+ mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
+ mask[2] = tp->dip_mask;
+ mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
+
+ if (invert)
+ key[3] |= (1 << 29);
+ if (enable)
+ key[3] |= (1 << 28);
+
+ addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
+ tp_wr_indirect(adapter, addr++, key[0]);
+ tp_wr_indirect(adapter, addr++, mask[0]);
+ tp_wr_indirect(adapter, addr++, key[1]);
+ tp_wr_indirect(adapter, addr++, mask[1]);
+ tp_wr_indirect(adapter, addr++, key[2]);
+ tp_wr_indirect(adapter, addr++, mask[2]);
+ tp_wr_indirect(adapter, addr++, key[3]);
+ tp_wr_indirect(adapter, addr, mask[3]);
+ t3_read_reg(adapter, A_TP_PIO_DATA);
+}
+
+/**
+ * t3_config_sched - configure a HW traffic scheduler
+ * @adap: the adapter
+ * @kbps: target rate in Kbps
+ * @sched: the scheduler index
+ *
+ * Configure a HW scheduler for the target rate
+ */
+int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
+{
+ unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
+ unsigned int clk = adap->params.vpd.cclk * 1000;
+ unsigned int selected_cpt = 0, selected_bpt = 0;
+
+ if (kbps > 0) {
+ kbps *= 125; /* -> bytes */
+ for (cpt = 1; cpt <= 255; cpt++) {
+ tps = clk / cpt;
+ bpt = (kbps + tps / 2) / tps;
+ if (bpt > 0 && bpt <= 255) {
+ v = bpt * tps;
+ delta = v >= kbps ? v - kbps : kbps - v;
+ if (delta <= mindelta) {
+ mindelta = delta;
+ selected_cpt = cpt;
+ selected_bpt = bpt;
+ }
+ } else if (selected_cpt)
+ break;
+ }
+ if (!selected_cpt)
+ return -EINVAL;
+ }
+ t3_write_reg(adap, A_TP_TM_PIO_ADDR,
+ A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
+ v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
+ if (sched & 1)
+ v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
+ else
+ v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
+ t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
+ return 0;
+}
+
+static int tp_init(struct adapter *adap, const struct tp_params *p)
+{
+ int busy = 0;
+
+ tp_config(adap, p);
+ t3_set_vlan_accel(adap, 3, 0);
+
+ if (is_offload(adap)) {
+ tp_set_timers(adap, adap->params.vpd.cclk * 1000);
+ t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
+ busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
+ 0, 1000, 5);
+ if (busy)
+ CH_ERR(adap, "TP initialization timed out\n");
+ }
+
+ if (!busy)
+ t3_write_reg(adap, A_TP_RESET, F_TPRESET);
+ return busy;
+}
+
+/*
+ * Perform the bits of HW initialization that are dependent on the Tx
+ * channels being used.
+ */
+static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
+{
+ int i;
+
+ if (chan_map != 3) { /* one channel */
+ t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
+ t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
+ t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
+ (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
+ F_TPTXPORT1EN | F_PORT1ACTIVE));
+ t3_write_reg(adap, A_PM1_TX_CFG,
+ chan_map == 1 ? 0xffffffff : 0);
+ } else { /* two channels */
+ t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
+ t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
+ t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
+ V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
+ t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
+ F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
+ F_ENFORCEPKT);
+ t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
+ t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
+ t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
+ V_TX_MOD_QUEUE_REQ_MAP(0xaa));
+ for (i = 0; i < 16; i++)
+ t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
+ (i << 16) | 0x1010);
+ }
+}
+
+static int calibrate_xgm(struct adapter *adapter)
+{
+ if (uses_xaui(adapter)) {
+ unsigned int v, i;
+
+ for (i = 0; i < 5; ++i) {
+ t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
+ t3_read_reg(adapter, A_XGM_XAUI_IMP);
+ msleep(1);
+ v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
+ if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
+ t3_write_reg(adapter, A_XGM_XAUI_IMP,
+ V_XAUIIMP(G_CALIMP(v) >> 2));
+ return 0;
+ }
+ }
+ CH_ERR(adapter, "MAC calibration failed\n");
+ return -1;
+ } else {
+ t3_write_reg(adapter, A_XGM_RGMII_IMP,
+ V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
+ t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
+ F_XGM_IMPSETUPDATE);
+ }
+ return 0;
+}
+
+static void calibrate_xgm_t3b(struct adapter *adapter)
+{
+ if (!uses_xaui(adapter)) {
+ t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
+ F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
+ t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
+ t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
+ F_XGM_IMPSETUPDATE);
+ t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
+ 0);
+ t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
+ t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
+ }
+}
+
+struct mc7_timing_params {
+ unsigned char ActToPreDly;
+ unsigned char ActToRdWrDly;
+ unsigned char PreCyc;
+ unsigned char RefCyc[5];
+ unsigned char BkCyc;
+ unsigned char WrToRdDly;
+ unsigned char RdToWrDly;
+};
+
+/*
+ * Write a value to a register and check that the write completed. These
+ * writes normally complete in a cycle or two, so one read should suffice.
+ * The very first read exists to flush the posted write to the device.
+ */
+static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
+{
+ t3_write_reg(adapter, addr, val);
+ t3_read_reg(adapter, addr); /* flush */
+ if (!(t3_read_reg(adapter, addr) & F_BUSY))
+ return 0;
+ CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
+ return -EIO;
+}
+
+static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
+{
+ static const unsigned int mc7_mode[] = {
+ 0x632, 0x642, 0x652, 0x432, 0x442
+ };
+ static const struct mc7_timing_params mc7_timings[] = {
+ {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
+ {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
+ {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
+ {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
+ {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
+ };
+
+ u32 val;
+ unsigned int width, density, slow, attempts;
+ struct adapter *adapter = mc7->adapter;
+ const struct mc7_timing_params *p = &mc7_timings[mem_type];
+
+ if (!mc7->size)
+ return 0;
+
+ val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
+ slow = val & F_SLOW;
+ width = G_WIDTH(val);
+ density = G_DEN(val);
+
+ t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
+ val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
+ msleep(1);
+
+ if (!slow) {
+ t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
+ t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
+ msleep(1);
+ if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
+ (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
+ CH_ERR(adapter, "%s MC7 calibration timed out\n",
+ mc7->name);
+ goto out_fail;
+ }
+ }
+
+ t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
+ V_ACTTOPREDLY(p->ActToPreDly) |
+ V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
+ V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
+ V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
+
+ t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
+ val | F_CLKEN | F_TERM150);
+ t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
+
+ if (!slow)
+ t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
+ F_DLLENB);
+ udelay(1);
+
+ val = slow ? 3 : 6;
+ if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
+ goto out_fail;
+
+ if (!slow) {
+ t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
+ t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
+ udelay(5);
+ }
+
+ if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
+ mc7_mode[mem_type]) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
+ goto out_fail;
+
+ /* clock value is in KHz */
+ mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
+ mc7_clock /= 1000000; /* KHz->MHz, ns->us */
+
+ t3_write_reg(adapter, mc7->offset + A_MC7_REF,
+ F_PERREFEN | V_PREREFDIV(mc7_clock));
+ t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
+
+ t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
+ t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
+ t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
+ t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
+ (mc7->size << width) - 1);
+ t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
+ t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
+
+ attempts = 50;
+ do {
+ msleep(250);
+ val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
+ } while ((val & F_BUSY) && --attempts);
+ if (val & F_BUSY) {
+ CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
+ goto out_fail;
+ }
+
+ /* Enable normal memory accesses. */
+ t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
+ return 0;
+
+out_fail:
+ return -1;
+}
+
+static void config_pcie(struct adapter *adap)
+{
+ static const u16 ack_lat[4][6] = {
+ {237, 416, 559, 1071, 2095, 4143},
+ {128, 217, 289, 545, 1057, 2081},
+ {73, 118, 154, 282, 538, 1050},
+ {67, 107, 86, 150, 278, 534}
+ };
+ static const u16 rpl_tmr[4][6] = {
+ {711, 1248, 1677, 3213, 6285, 12429},
+ {384, 651, 867, 1635, 3171, 6243},
+ {219, 354, 462, 846, 1614, 3150},
+ {201, 321, 258, 450, 834, 1602}
+ };
+
+ u16 val, devid;
+ unsigned int log2_width, pldsize;
+ unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
+
+ pci_read_config_word(adap->pdev,
+ adap->pdev->pcie_cap + PCI_EXP_DEVCTL,
+ &val);
+ pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
+
+ pci_read_config_word(adap->pdev, 0x2, &devid);
+ if (devid == 0x37) {
+ pci_write_config_word(adap->pdev,
+ adap->pdev->pcie_cap + PCI_EXP_DEVCTL,
+ val & ~PCI_EXP_DEVCTL_READRQ &
+ ~PCI_EXP_DEVCTL_PAYLOAD);
+ pldsize = 0;
+ }
+
+ pci_read_config_word(adap->pdev, adap->pdev->pcie_cap + PCI_EXP_LNKCTL,
+ &val);
+
+ fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
+ fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
+ G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
+ log2_width = fls(adap->params.pci.width) - 1;
+ acklat = ack_lat[log2_width][pldsize];
+ if (val & 1) /* check LOsEnable */
+ acklat += fst_trn_tx * 4;
+ rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
+
+ if (adap->params.rev == 0)
+ t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
+ V_T3A_ACKLAT(M_T3A_ACKLAT),
+ V_T3A_ACKLAT(acklat));
+ else
+ t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
+ V_ACKLAT(acklat));
+
+ t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
+ V_REPLAYLMT(rpllmt));
+
+ t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
+ t3_set_reg_field(adap, A_PCIE_CFG, 0,
+ F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
+ F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
+}
+
+/*
+ * Initialize and configure T3 HW modules. This performs the
+ * initialization steps that need to be done once after a card is reset.
+ * MAC and PHY initialization is handled separarely whenever a port is enabled.
+ *
+ * fw_params are passed to FW and their value is platform dependent. Only the
+ * top 8 bits are available for use, the rest must be 0.
+ */
+int t3_init_hw(struct adapter *adapter, u32 fw_params)
+{
+ int err = -EIO, attempts, i;
+ const struct vpd_params *vpd = &adapter->params.vpd;
+
+ if (adapter->params.rev > 0)
+ calibrate_xgm_t3b(adapter);
+ else if (calibrate_xgm(adapter))
+ goto out_err;
+
+ if (vpd->mclk) {
+ partition_mem(adapter, &adapter->params.tp);
+
+ if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
+ mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
+ mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
+ t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
+ adapter->params.mc5.nfilters,
+ adapter->params.mc5.nroutes))
+ goto out_err;
+
+ for (i = 0; i < 32; i++)
+ if (clear_sge_ctxt(adapter, i, F_CQ))
+ goto out_err;
+ }
+
+ if (tp_init(adapter, &adapter->params.tp))
+ goto out_err;
+
+ t3_tp_set_coalescing_size(adapter,
+ min(adapter->params.sge.max_pkt_size,
+ MAX_RX_COALESCING_LEN), 1);
+ t3_tp_set_max_rxsize(adapter,
+ min(adapter->params.sge.max_pkt_size, 16384U));
+ ulp_config(adapter, &adapter->params.tp);
+
+ if (is_pcie(adapter))
+ config_pcie(adapter);
+ else
+ t3_set_reg_field(adapter, A_PCIX_CFG, 0,
+ F_DMASTOPEN | F_CLIDECEN);
+
+ if (adapter->params.rev == T3_REV_C)
+ t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
+ F_CFG_CQE_SOP_MASK);
+
+ t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
+ t3_write_reg(adapter, A_PM1_RX_MODE, 0);
+ t3_write_reg(adapter, A_PM1_TX_MODE, 0);
+ chan_init_hw(adapter, adapter->params.chan_map);
+ t3_sge_init(adapter, &adapter->params.sge);
+ t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
+
+ t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
+
+ t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
+ t3_write_reg(adapter, A_CIM_BOOT_CFG,
+ V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
+ t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
+
+ attempts = 100;
+ do { /* wait for uP to initialize */
+ msleep(20);
+ } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
+ if (!attempts) {
+ CH_ERR(adapter, "uP initialization timed out\n");
+ goto out_err;
+ }
+
+ err = 0;
+out_err:
+ return err;
+}
+
+/**
+ * get_pci_mode - determine a card's PCI mode
+ * @adapter: the adapter
+ * @p: where to store the PCI settings
+ *
+ * Determines a card's PCI mode and associated parameters, such as speed
+ * and width.
+ */
+static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
+{
+ static unsigned short speed_map[] = { 33, 66, 100, 133 };
+ u32 pci_mode, pcie_cap;
+
+ pcie_cap = pci_pcie_cap(adapter->pdev);
+ if (pcie_cap) {
+ u16 val;
+
+ p->variant = PCI_VARIANT_PCIE;
+ pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
+ &val);
+ p->width = (val >> 4) & 0x3f;
+ return;
+ }
+
+ pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
+ p->speed = speed_map[G_PCLKRANGE(pci_mode)];
+ p->width = (pci_mode & F_64BIT) ? 64 : 32;
+ pci_mode = G_PCIXINITPAT(pci_mode);
+ if (pci_mode == 0)
+ p->variant = PCI_VARIANT_PCI;
+ else if (pci_mode < 4)
+ p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
+ else if (pci_mode < 8)
+ p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
+ else
+ p->variant = PCI_VARIANT_PCIX_266_MODE2;
+}
+
+/**
+ * init_link_config - initialize a link's SW state
+ * @lc: structure holding the link state
+ * @ai: information about the current card
+ *
+ * Initializes the SW state maintained for each link, including the link's
+ * capabilities and default speed/duplex/flow-control/autonegotiation
+ * settings.
+ */
+static void init_link_config(struct link_config *lc, unsigned int caps)
+{
+ lc->supported = caps;
+ lc->requested_speed = lc->speed = SPEED_INVALID;
+ lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
+ lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+ if (lc->supported & SUPPORTED_Autoneg) {
+ lc->advertising = lc->supported;
+ lc->autoneg = AUTONEG_ENABLE;
+ lc->requested_fc |= PAUSE_AUTONEG;
+ } else {
+ lc->advertising = 0;
+ lc->autoneg = AUTONEG_DISABLE;
+ }
+}
+
+/**
+ * mc7_calc_size - calculate MC7 memory size
+ * @cfg: the MC7 configuration
+ *
+ * Calculates the size of an MC7 memory in bytes from the value of its
+ * configuration register.
+ */
+static unsigned int mc7_calc_size(u32 cfg)
+{
+ unsigned int width = G_WIDTH(cfg);
+ unsigned int banks = !!(cfg & F_BKS) + 1;
+ unsigned int org = !!(cfg & F_ORG) + 1;
+ unsigned int density = G_DEN(cfg);
+ unsigned int MBs = ((256 << density) * banks) / (org << width);
+
+ return MBs << 20;
+}
+
+static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
+ unsigned int base_addr, const char *name)
+{
+ u32 cfg;
+
+ mc7->adapter = adapter;
+ mc7->name = name;
+ mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
+ cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
+ mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
+ mc7->width = G_WIDTH(cfg);
+}
+
+static void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
+{
+ u16 devid;
+
+ mac->adapter = adapter;
+ pci_read_config_word(adapter->pdev, 0x2, &devid);
+
+ if (devid == 0x37 && !adapter->params.vpd.xauicfg[1])
+ index = 0;
+ mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
+ mac->nucast = 1;
+
+ if (adapter->params.rev == 0 && uses_xaui(adapter)) {
+ t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
+ is_10G(adapter) ? 0x2901c04 : 0x2301c04);
+ t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
+ F_ENRGMII, 0);
+ }
+}
+
+static void early_hw_init(struct adapter *adapter,
+ const struct adapter_info *ai)
+{
+ u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
+
+ mi1_init(adapter, ai);
+ t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
+ V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
+ t3_write_reg(adapter, A_T3DBG_GPIO_EN,
+ ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
+ t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
+ t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
+
+ if (adapter->params.rev == 0 || !uses_xaui(adapter))
+ val |= F_ENRGMII;
+
+ /* Enable MAC clocks so we can access the registers */
+ t3_write_reg(adapter, A_XGM_PORT_CFG, val);
+ t3_read_reg(adapter, A_XGM_PORT_CFG);
+
+ val |= F_CLKDIVRESET_;
+ t3_write_reg(adapter, A_XGM_PORT_CFG, val);
+ t3_read_reg(adapter, A_XGM_PORT_CFG);
+ t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
+ t3_read_reg(adapter, A_XGM_PORT_CFG);
+}
+
+/*
+ * Reset the adapter.
+ * Older PCIe cards lose their config space during reset, PCI-X
+ * ones don't.
+ */
+int t3_reset_adapter(struct adapter *adapter)
+{
+ int i, save_and_restore_pcie =
+ adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
+ uint16_t devid = 0;
+
+ if (save_and_restore_pcie)
+ pci_save_state(adapter->pdev);
+ t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
+
+ /*
+ * Delay. Give Some time to device to reset fully.
+ * XXX The delay time should be modified.
+ */
+ for (i = 0; i < 10; i++) {
+ msleep(50);
+ pci_read_config_word(adapter->pdev, 0x00, &devid);
+ if (devid == 0x1425)
+ break;
+ }
+
+ if (devid != 0x1425)
+ return -1;
+
+ if (save_and_restore_pcie)
+ pci_restore_state(adapter->pdev);
+ return 0;
+}
+
+static int init_parity(struct adapter *adap)
+{
+ int i, err, addr;
+
+ if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ for (err = i = 0; !err && i < 16; i++)
+ err = clear_sge_ctxt(adap, i, F_EGRESS);
+ for (i = 0xfff0; !err && i <= 0xffff; i++)
+ err = clear_sge_ctxt(adap, i, F_EGRESS);
+ for (i = 0; !err && i < SGE_QSETS; i++)
+ err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
+ if (err)
+ return err;
+
+ t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
+ for (i = 0; i < 4; i++)
+ for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
+ t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
+ F_IBQDBGWR | V_IBQDBGQID(i) |
+ V_IBQDBGADDR(addr));
+ err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
+ F_IBQDBGBUSY, 0, 2, 1);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * Initialize adapter SW state for the various HW modules, set initial values
+ * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
+ * interface.
+ */
+int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
+ int reset)
+{
+ int ret;
+ unsigned int i, j = -1;
+
+ get_pci_mode(adapter, &adapter->params.pci);
+
+ adapter->params.info = ai;
+ adapter->params.nports = ai->nports0 + ai->nports1;
+ adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
+ adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
+ /*
+ * We used to only run the "adapter check task" once a second if
+ * we had PHYs which didn't support interrupts (we would check
+ * their link status once a second). Now we check other conditions
+ * in that routine which could potentially impose a very high
+ * interrupt load on the system. As such, we now always scan the
+ * adapter state once a second ...
+ */
+ adapter->params.linkpoll_period = 10;
+ adapter->params.stats_update_period = is_10G(adapter) ?
+ MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
+ adapter->params.pci.vpd_cap_addr =
+ pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
+ ret = get_vpd_params(adapter, &adapter->params.vpd);
+ if (ret < 0)
+ return ret;
+
+ if (reset && t3_reset_adapter(adapter))
+ return -1;
+
+ t3_sge_prep(adapter, &adapter->params.sge);
+
+ if (adapter->params.vpd.mclk) {
+ struct tp_params *p = &adapter->params.tp;
+
+ mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
+ mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
+ mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
+
+ p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
+ p->pmrx_size = t3_mc7_size(&adapter->pmrx);
+ p->pmtx_size = t3_mc7_size(&adapter->pmtx);
+ p->cm_size = t3_mc7_size(&adapter->cm);
+ p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
+ p->chan_tx_size = p->pmtx_size / p->nchan;
+ p->rx_pg_size = 64 * 1024;
+ p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
+ p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
+ p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
+ p->ntimer_qs = p->cm_size >= (128 << 20) ||
+ adapter->params.rev > 0 ? 12 : 6;
+ }
+
+ adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
+ t3_mc7_size(&adapter->pmtx) &&
+ t3_mc7_size(&adapter->cm);
+
+ if (is_offload(adapter)) {
+ adapter->params.mc5.nservers = DEFAULT_NSERVERS;
+ adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
+ DEFAULT_NFILTERS : 0;
+ adapter->params.mc5.nroutes = 0;
+ t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
+
+ init_mtus(adapter->params.mtus);
+ init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
+ }
+
+ early_hw_init(adapter, ai);
+ ret = init_parity(adapter);
+ if (ret)
+ return ret;
+
+ for_each_port(adapter, i) {
+ u8 hw_addr[6];
+ const struct port_type_info *pti;
+ struct port_info *p = adap2pinfo(adapter, i);
+
+ while (!adapter->params.vpd.port_type[++j])
+ ;
+
+ pti = &port_types[adapter->params.vpd.port_type[j]];
+ if (!pti->phy_prep) {
+ CH_ALERT(adapter, "Invalid port type index %d\n",
+ adapter->params.vpd.port_type[j]);
+ return -EINVAL;
+ }
+
+ p->phy.mdio.dev = adapter->port[i];
+ ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
+ ai->mdio_ops);
+ if (ret)
+ return ret;
+ mac_prep(&p->mac, adapter, j);
+
+ /*
+ * The VPD EEPROM stores the base Ethernet address for the
+ * card. A port's address is derived from the base by adding
+ * the port's index to the base's low octet.
+ */
+ memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
+ hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
+
+ memcpy(adapter->port[i]->dev_addr, hw_addr,
+ ETH_ALEN);
+ memcpy(adapter->port[i]->perm_addr, hw_addr,
+ ETH_ALEN);
+ init_link_config(&p->link_config, p->phy.caps);
+ p->phy.ops->power_down(&p->phy, 1);
+
+ /*
+ * If the PHY doesn't support interrupts for link status
+ * changes, schedule a scan of the adapter links at least
+ * once a second.
+ */
+ if (!(p->phy.caps & SUPPORTED_IRQ) &&
+ adapter->params.linkpoll_period > 10)
+ adapter->params.linkpoll_period = 10;
+ }
+
+ return 0;
+}
+
+void t3_led_ready(struct adapter *adapter)
+{
+ t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+ F_GPIO0_OUT_VAL);
+}
+
+int t3_replay_prep_adapter(struct adapter *adapter)
+{
+ const struct adapter_info *ai = adapter->params.info;
+ unsigned int i, j = -1;
+ int ret;
+
+ early_hw_init(adapter, ai);
+ ret = init_parity(adapter);
+ if (ret)
+ return ret;
+
+ for_each_port(adapter, i) {
+ const struct port_type_info *pti;
+ struct port_info *p = adap2pinfo(adapter, i);
+
+ while (!adapter->params.vpd.port_type[++j])
+ ;
+
+ pti = &port_types[adapter->params.vpd.port_type[j]];
+ ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
+ if (ret)
+ return ret;
+ p->phy.ops->power_down(&p->phy, 1);
+ }
+
+return 0;
+}
+
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h b/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h
new file mode 100644
index 000000000000..705713b56636
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2006-2008 Chelsio Communications. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _T3CDEV_H_
+#define _T3CDEV_H_
+
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/netdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <net/neighbour.h>
+
+#define T3CNAMSIZ 16
+
+struct cxgb3_client;
+
+enum t3ctype {
+ T3A = 0,
+ T3B,
+ T3C,
+};
+
+struct t3cdev {
+ char name[T3CNAMSIZ]; /* T3C device name */
+ enum t3ctype type;
+ struct list_head ofld_dev_list; /* for list linking */
+ struct net_device *lldev; /* LL dev associated with T3C messages */
+ struct proc_dir_entry *proc_dir; /* root of proc dir for this T3C */
+ int (*send)(struct t3cdev *dev, struct sk_buff *skb);
+ int (*recv)(struct t3cdev *dev, struct sk_buff **skb, int n);
+ int (*ctl)(struct t3cdev *dev, unsigned int req, void *data);
+ void (*neigh_update)(struct t3cdev *dev, struct neighbour *neigh);
+ void *priv; /* driver private data */
+ void *l2opt; /* optional layer 2 data */
+ void *l3opt; /* optional layer 3 data */
+ void *l4opt; /* optional layer 4 data */
+ void *ulp; /* ulp stuff */
+ void *ulp_iscsi; /* ulp iscsi */
+};
+
+#endif /* _T3CDEV_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/version.h b/drivers/net/ethernet/chelsio/cxgb3/version.h
new file mode 100644
index 000000000000..8bda06e366c8
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/version.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/* $Date: 2006/10/31 18:57:51 $ $RCSfile: version.h,v $ $Revision: 1.3 $ */
+#ifndef __CHELSIO_VERSION_H
+#define __CHELSIO_VERSION_H
+#define DRV_DESC "Chelsio T3 Network Driver"
+#define DRV_NAME "cxgb3"
+/* Driver version */
+#define DRV_VERSION "1.1.4-ko"
+
+/* Firmware version */
+#define FW_VERSION_MAJOR 7
+#define FW_VERSION_MINOR 10
+#define FW_VERSION_MICRO 0
+#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c b/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c
new file mode 100644
index 000000000000..4f9a1c2724f4
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c
@@ -0,0 +1,416 @@
+/*
+ * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "common.h"
+
+/* VSC8211 PHY specific registers. */
+enum {
+ VSC8211_SIGDET_CTRL = 19,
+ VSC8211_EXT_CTRL = 23,
+ VSC8211_INTR_ENABLE = 25,
+ VSC8211_INTR_STATUS = 26,
+ VSC8211_LED_CTRL = 27,
+ VSC8211_AUX_CTRL_STAT = 28,
+ VSC8211_EXT_PAGE_AXS = 31,
+};
+
+enum {
+ VSC_INTR_RX_ERR = 1 << 0,
+ VSC_INTR_MS_ERR = 1 << 1, /* master/slave resolution error */
+ VSC_INTR_CABLE = 1 << 2, /* cable impairment */
+ VSC_INTR_FALSE_CARR = 1 << 3, /* false carrier */
+ VSC_INTR_MEDIA_CHG = 1 << 4, /* AMS media change */
+ VSC_INTR_RX_FIFO = 1 << 5, /* Rx FIFO over/underflow */
+ VSC_INTR_TX_FIFO = 1 << 6, /* Tx FIFO over/underflow */
+ VSC_INTR_DESCRAMBL = 1 << 7, /* descrambler lock-lost */
+ VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */
+ VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */
+ VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */
+ VSC_INTR_DPLX_CHG = 1 << 12, /* duplex change */
+ VSC_INTR_LINK_CHG = 1 << 13, /* link change */
+ VSC_INTR_SPD_CHG = 1 << 14, /* speed change */
+ VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */
+};
+
+enum {
+ VSC_CTRL_CLAUSE37_VIEW = 1 << 4, /* Switch to Clause 37 view */
+ VSC_CTRL_MEDIA_MODE_HI = 0xf000 /* High part of media mode select */
+};
+
+#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
+ VSC_INTR_DPLX_CHG | VSC_INTR_SPD_CHG | \
+ VSC_INTR_NEG_DONE)
+#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
+ VSC_INTR_ENABLE)
+
+/* PHY specific auxiliary control & status register fields */
+#define S_ACSR_ACTIPHY_TMR 0
+#define M_ACSR_ACTIPHY_TMR 0x3
+#define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR)
+
+#define S_ACSR_SPEED 3
+#define M_ACSR_SPEED 0x3
+#define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED)
+
+#define S_ACSR_DUPLEX 5
+#define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX)
+
+#define S_ACSR_ACTIPHY 6
+#define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY)
+
+/*
+ * Reset the PHY. This PHY completes reset immediately so we never wait.
+ */
+static int vsc8211_reset(struct cphy *cphy, int wait)
+{
+ return t3_phy_reset(cphy, MDIO_DEVAD_NONE, 0);
+}
+
+static int vsc8211_intr_enable(struct cphy *cphy)
+{
+ return t3_mdio_write(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_ENABLE,
+ INTR_MASK);
+}
+
+static int vsc8211_intr_disable(struct cphy *cphy)
+{
+ return t3_mdio_write(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_ENABLE, 0);
+}
+
+static int vsc8211_intr_clear(struct cphy *cphy)
+{
+ u32 val;
+
+ /* Clear PHY interrupts by reading the register. */
+ return t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_STATUS, &val);
+}
+
+static int vsc8211_autoneg_enable(struct cphy *cphy)
+{
+ return t3_mdio_change_bits(cphy, MDIO_DEVAD_NONE, MII_BMCR,
+ BMCR_PDOWN | BMCR_ISOLATE,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+}
+
+static int vsc8211_autoneg_restart(struct cphy *cphy)
+{
+ return t3_mdio_change_bits(cphy, MDIO_DEVAD_NONE, MII_BMCR,
+ BMCR_PDOWN | BMCR_ISOLATE,
+ BMCR_ANRESTART);
+}
+
+static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok,
+ int *speed, int *duplex, int *fc)
+{
+ unsigned int bmcr, status, lpa, adv;
+ int err, sp = -1, dplx = -1, pause = 0;
+
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMCR, &bmcr);
+ if (!err)
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, &status);
+ if (err)
+ return err;
+
+ if (link_ok) {
+ /*
+ * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
+ * once more to get the current link state.
+ */
+ if (!(status & BMSR_LSTATUS))
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR,
+ &status);
+ if (err)
+ return err;
+ *link_ok = (status & BMSR_LSTATUS) != 0;
+ }
+ if (!(bmcr & BMCR_ANENABLE)) {
+ dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+ if (bmcr & BMCR_SPEED1000)
+ sp = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ sp = SPEED_100;
+ else
+ sp = SPEED_10;
+ } else if (status & BMSR_ANEGCOMPLETE) {
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_AUX_CTRL_STAT,
+ &status);
+ if (err)
+ return err;
+
+ dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
+ sp = G_ACSR_SPEED(status);
+ if (sp == 0)
+ sp = SPEED_10;
+ else if (sp == 1)
+ sp = SPEED_100;
+ else
+ sp = SPEED_1000;
+
+ if (fc && dplx == DUPLEX_FULL) {
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_LPA,
+ &lpa);
+ if (!err)
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE,
+ MII_ADVERTISE, &adv);
+ if (err)
+ return err;
+
+ if (lpa & adv & ADVERTISE_PAUSE_CAP)
+ pause = PAUSE_RX | PAUSE_TX;
+ else if ((lpa & ADVERTISE_PAUSE_CAP) &&
+ (lpa & ADVERTISE_PAUSE_ASYM) &&
+ (adv & ADVERTISE_PAUSE_ASYM))
+ pause = PAUSE_TX;
+ else if ((lpa & ADVERTISE_PAUSE_ASYM) &&
+ (adv & ADVERTISE_PAUSE_CAP))
+ pause = PAUSE_RX;
+ }
+ }
+ if (speed)
+ *speed = sp;
+ if (duplex)
+ *duplex = dplx;
+ if (fc)
+ *fc = pause;
+ return 0;
+}
+
+static int vsc8211_get_link_status_fiber(struct cphy *cphy, int *link_ok,
+ int *speed, int *duplex, int *fc)
+{
+ unsigned int bmcr, status, lpa, adv;
+ int err, sp = -1, dplx = -1, pause = 0;
+
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMCR, &bmcr);
+ if (!err)
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, &status);
+ if (err)
+ return err;
+
+ if (link_ok) {
+ /*
+ * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
+ * once more to get the current link state.
+ */
+ if (!(status & BMSR_LSTATUS))
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR,
+ &status);
+ if (err)
+ return err;
+ *link_ok = (status & BMSR_LSTATUS) != 0;
+ }
+ if (!(bmcr & BMCR_ANENABLE)) {
+ dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+ if (bmcr & BMCR_SPEED1000)
+ sp = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ sp = SPEED_100;
+ else
+ sp = SPEED_10;
+ } else if (status & BMSR_ANEGCOMPLETE) {
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_LPA, &lpa);
+ if (!err)
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_ADVERTISE,
+ &adv);
+ if (err)
+ return err;
+
+ if (adv & lpa & ADVERTISE_1000XFULL) {
+ dplx = DUPLEX_FULL;
+ sp = SPEED_1000;
+ } else if (adv & lpa & ADVERTISE_1000XHALF) {
+ dplx = DUPLEX_HALF;
+ sp = SPEED_1000;
+ }
+
+ if (fc && dplx == DUPLEX_FULL) {
+ if (lpa & adv & ADVERTISE_1000XPAUSE)
+ pause = PAUSE_RX | PAUSE_TX;
+ else if ((lpa & ADVERTISE_1000XPAUSE) &&
+ (adv & lpa & ADVERTISE_1000XPSE_ASYM))
+ pause = PAUSE_TX;
+ else if ((lpa & ADVERTISE_1000XPSE_ASYM) &&
+ (adv & ADVERTISE_1000XPAUSE))
+ pause = PAUSE_RX;
+ }
+ }
+ if (speed)
+ *speed = sp;
+ if (duplex)
+ *duplex = dplx;
+ if (fc)
+ *fc = pause;
+ return 0;
+}
+
+#ifdef UNUSED
+/*
+ * Enable/disable auto MDI/MDI-X in forced link speed mode.
+ */
+static int vsc8211_set_automdi(struct cphy *phy, int enable)
+{
+ int err;
+
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0x52b5);
+ if (err)
+ return err;
+
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 18, 0x12);
+ if (err)
+ return err;
+
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 17, enable ? 0x2803 : 0x3003);
+ if (err)
+ return err;
+
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 16, 0x87fa);
+ if (err)
+ return err;
+
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int vsc8211_set_speed_duplex(struct cphy *phy, int speed, int duplex)
+{
+ int err;
+
+ err = t3_set_phy_speed_duplex(phy, speed, duplex);
+ if (!err)
+ err = vsc8211_set_automdi(phy, 1);
+ return err;
+}
+#endif /* UNUSED */
+
+static int vsc8211_power_down(struct cphy *cphy, int enable)
+{
+ return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN,
+ enable ? BMCR_PDOWN : 0);
+}
+
+static int vsc8211_intr_handler(struct cphy *cphy)
+{
+ unsigned int cause;
+ int err, cphy_cause = 0;
+
+ err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_STATUS, &cause);
+ if (err)
+ return err;
+
+ cause &= INTR_MASK;
+ if (cause & CFG_CHG_INTR_MASK)
+ cphy_cause |= cphy_cause_link_change;
+ if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO))
+ cphy_cause |= cphy_cause_fifo_error;
+ return cphy_cause;
+}
+
+static struct cphy_ops vsc8211_ops = {
+ .reset = vsc8211_reset,
+ .intr_enable = vsc8211_intr_enable,
+ .intr_disable = vsc8211_intr_disable,
+ .intr_clear = vsc8211_intr_clear,
+ .intr_handler = vsc8211_intr_handler,
+ .autoneg_enable = vsc8211_autoneg_enable,
+ .autoneg_restart = vsc8211_autoneg_restart,
+ .advertise = t3_phy_advertise,
+ .set_speed_duplex = t3_set_phy_speed_duplex,
+ .get_link_status = vsc8211_get_link_status,
+ .power_down = vsc8211_power_down,
+};
+
+static struct cphy_ops vsc8211_fiber_ops = {
+ .reset = vsc8211_reset,
+ .intr_enable = vsc8211_intr_enable,
+ .intr_disable = vsc8211_intr_disable,
+ .intr_clear = vsc8211_intr_clear,
+ .intr_handler = vsc8211_intr_handler,
+ .autoneg_enable = vsc8211_autoneg_enable,
+ .autoneg_restart = vsc8211_autoneg_restart,
+ .advertise = t3_phy_advertise_fiber,
+ .set_speed_duplex = t3_set_phy_speed_duplex,
+ .get_link_status = vsc8211_get_link_status_fiber,
+ .power_down = vsc8211_power_down,
+};
+
+int t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
+ int phy_addr, const struct mdio_ops *mdio_ops)
+{
+ int err;
+ unsigned int val;
+
+ cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops,
+ SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII |
+ SUPPORTED_TP | SUPPORTED_IRQ, "10/100/1000BASE-T");
+ msleep(20); /* PHY needs ~10ms to start responding to MDIO */
+
+ err = t3_mdio_read(phy, MDIO_DEVAD_NONE, VSC8211_EXT_CTRL, &val);
+ if (err)
+ return err;
+ if (val & VSC_CTRL_MEDIA_MODE_HI) {
+ /* copper interface, just need to configure the LEDs */
+ return t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_LED_CTRL,
+ 0x100);
+ }
+
+ phy->caps = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+ SUPPORTED_MII | SUPPORTED_FIBRE | SUPPORTED_IRQ;
+ phy->desc = "1000BASE-X";
+ phy->ops = &vsc8211_fiber_ops;
+
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 1);
+ if (err)
+ return err;
+
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_SIGDET_CTRL, 1);
+ if (err)
+ return err;
+
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0);
+ if (err)
+ return err;
+
+ err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_CTRL,
+ val | VSC_CTRL_CLAUSE37_VIEW);
+ if (err)
+ return err;
+
+ err = vsc8211_reset(phy, 0);
+ if (err)
+ return err;
+
+ udelay(5); /* delay after reset before next SMI */
+ return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/xgmac.c b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c
new file mode 100644
index 000000000000..3af19a550372
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c
@@ -0,0 +1,657 @@
+/*
+ * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "common.h"
+#include "regs.h"
+
+/*
+ * # of exact address filters. The first one is used for the station address,
+ * the rest are available for multicast addresses.
+ */
+#define EXACT_ADDR_FILTERS 8
+
+static inline int macidx(const struct cmac *mac)
+{
+ return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
+}
+
+static void xaui_serdes_reset(struct cmac *mac)
+{
+ static const unsigned int clear[] = {
+ F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
+ F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
+ };
+
+ int i;
+ struct adapter *adap = mac->adapter;
+ u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
+
+ t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
+ F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
+ F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
+ F_RESETPLL23 | F_RESETPLL01);
+ t3_read_reg(adap, ctrl);
+ udelay(15);
+
+ for (i = 0; i < ARRAY_SIZE(clear); i++) {
+ t3_set_reg_field(adap, ctrl, clear[i], 0);
+ udelay(15);
+ }
+}
+
+void t3b_pcs_reset(struct cmac *mac)
+{
+ t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
+ F_PCS_RESET_, 0);
+ udelay(20);
+ t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
+ F_PCS_RESET_);
+}
+
+int t3_mac_reset(struct cmac *mac)
+{
+ static const struct addr_val_pair mac_reset_avp[] = {
+ {A_XGM_TX_CTRL, 0},
+ {A_XGM_RX_CTRL, 0},
+ {A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
+ F_RMFCS | F_ENJUMBO | F_ENHASHMCAST},
+ {A_XGM_RX_HASH_LOW, 0},
+ {A_XGM_RX_HASH_HIGH, 0},
+ {A_XGM_RX_EXACT_MATCH_LOW_1, 0},
+ {A_XGM_RX_EXACT_MATCH_LOW_2, 0},
+ {A_XGM_RX_EXACT_MATCH_LOW_3, 0},
+ {A_XGM_RX_EXACT_MATCH_LOW_4, 0},
+ {A_XGM_RX_EXACT_MATCH_LOW_5, 0},
+ {A_XGM_RX_EXACT_MATCH_LOW_6, 0},
+ {A_XGM_RX_EXACT_MATCH_LOW_7, 0},
+ {A_XGM_RX_EXACT_MATCH_LOW_8, 0},
+ {A_XGM_STAT_CTRL, F_CLRSTATS}
+ };
+ u32 val;
+ struct adapter *adap = mac->adapter;
+ unsigned int oft = mac->offset;
+
+ t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
+ t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
+
+ t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
+ t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
+ F_RXSTRFRWRD | F_DISERRFRAMES,
+ uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
+ t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0, F_UNDERUNFIX);
+
+ if (uses_xaui(adap)) {
+ if (adap->params.rev == 0) {
+ t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
+ F_RXENABLE | F_TXENABLE);
+ if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
+ F_CMULOCK, 1, 5, 2)) {
+ CH_ERR(adap,
+ "MAC %d XAUI SERDES CMU lock failed\n",
+ macidx(mac));
+ return -1;
+ }
+ t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
+ F_SERDESRESET_);
+ } else
+ xaui_serdes_reset(mac);
+ }
+
+ t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + oft,
+ V_RXMAXFRAMERSIZE(M_RXMAXFRAMERSIZE),
+ V_RXMAXFRAMERSIZE(MAX_FRAME_SIZE) | F_RXENFRAMER);
+ val = F_MAC_RESET_ | F_XGMAC_STOP_EN;
+
+ if (is_10G(adap))
+ val |= F_PCS_RESET_;
+ else if (uses_xaui(adap))
+ val |= F_PCS_RESET_ | F_XG2G_RESET_;
+ else
+ val |= F_RGMII_RESET_ | F_XG2G_RESET_;
+ t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
+ t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
+ if ((val & F_PCS_RESET_) && adap->params.rev) {
+ msleep(1);
+ t3b_pcs_reset(mac);
+ }
+
+ memset(&mac->stats, 0, sizeof(mac->stats));
+ return 0;
+}
+
+static int t3b2_mac_reset(struct cmac *mac)
+{
+ struct adapter *adap = mac->adapter;
+ unsigned int oft = mac->offset, store;
+ int idx = macidx(mac);
+ u32 val;
+
+ if (!macidx(mac))
+ t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0);
+ else
+ t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0);
+
+ /* Stop NIC traffic to reduce the number of TXTOGGLES */
+ t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 0);
+ /* Ensure TX drains */
+ t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, 0);
+
+ t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
+ t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
+
+ /* Store A_TP_TX_DROP_CFG_CH0 */
+ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
+ store = t3_read_reg(adap, A_TP_TX_DROP_CFG_CH0 + idx);
+
+ msleep(10);
+
+ /* Change DROP_CFG to 0xc0000011 */
+ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
+ t3_write_reg(adap, A_TP_PIO_DATA, 0xc0000011);
+
+ /* Check for xgm Rx fifo empty */
+ /* Increased loop count to 1000 from 5 cover 1G and 100Mbps case */
+ if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft,
+ 0x80000000, 1, 1000, 2)) {
+ CH_ERR(adap, "MAC %d Rx fifo drain failed\n",
+ macidx(mac));
+ return -1;
+ }
+
+ t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0);
+ t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
+
+ val = F_MAC_RESET_;
+ if (is_10G(adap))
+ val |= F_PCS_RESET_;
+ else if (uses_xaui(adap))
+ val |= F_PCS_RESET_ | F_XG2G_RESET_;
+ else
+ val |= F_RGMII_RESET_ | F_XG2G_RESET_;
+ t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
+ t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
+ if ((val & F_PCS_RESET_) && adap->params.rev) {
+ msleep(1);
+ t3b_pcs_reset(mac);
+ }
+ t3_write_reg(adap, A_XGM_RX_CFG + oft,
+ F_DISPAUSEFRAMES | F_EN1536BFRAMES |
+ F_RMFCS | F_ENJUMBO | F_ENHASHMCAST);
+
+ /* Restore the DROP_CFG */
+ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
+ t3_write_reg(adap, A_TP_PIO_DATA, store);
+
+ if (!idx)
+ t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE);
+ else
+ t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE);
+
+ /* re-enable nic traffic */
+ t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1);
+
+ /* Set: re-enable NIC traffic */
+ t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1);
+
+ return 0;
+}
+
+/*
+ * Set the exact match register 'idx' to recognize the given Ethernet address.
+ */
+static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
+{
+ u32 addr_lo, addr_hi;
+ unsigned int oft = mac->offset + idx * 8;
+
+ addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ addr_hi = (addr[5] << 8) | addr[4];
+
+ t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
+ t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
+}
+
+/* Set one of the station's unicast MAC addresses. */
+int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
+{
+ if (idx >= mac->nucast)
+ return -EINVAL;
+ set_addr_filter(mac, idx, addr);
+ return 0;
+}
+
+/*
+ * Specify the number of exact address filters that should be reserved for
+ * unicast addresses. Caller should reload the unicast and multicast addresses
+ * after calling this.
+ */
+int t3_mac_set_num_ucast(struct cmac *mac, int n)
+{
+ if (n > EXACT_ADDR_FILTERS)
+ return -EINVAL;
+ mac->nucast = n;
+ return 0;
+}
+
+void t3_mac_disable_exact_filters(struct cmac *mac)
+{
+ unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_LOW_1;
+
+ for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
+ u32 v = t3_read_reg(mac->adapter, reg);
+ t3_write_reg(mac->adapter, reg, v);
+ }
+ t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
+}
+
+void t3_mac_enable_exact_filters(struct cmac *mac)
+{
+ unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_HIGH_1;
+
+ for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
+ u32 v = t3_read_reg(mac->adapter, reg);
+ t3_write_reg(mac->adapter, reg, v);
+ }
+ t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
+}
+
+/* Calculate the RX hash filter index of an Ethernet address */
+static int hash_hw_addr(const u8 * addr)
+{
+ int hash = 0, octet, bit, i = 0, c;
+
+ for (octet = 0; octet < 6; ++octet)
+ for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
+ hash ^= (c & 1) << i;
+ if (++i == 6)
+ i = 0;
+ }
+ return hash;
+}
+
+int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev)
+{
+ u32 val, hash_lo, hash_hi;
+ struct adapter *adap = mac->adapter;
+ unsigned int oft = mac->offset;
+
+ val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
+ if (dev->flags & IFF_PROMISC)
+ val |= F_COPYALLFRAMES;
+ t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
+
+ if (dev->flags & IFF_ALLMULTI)
+ hash_lo = hash_hi = 0xffffffff;
+ else {
+ struct netdev_hw_addr *ha;
+ int exact_addr_idx = mac->nucast;
+
+ hash_lo = hash_hi = 0;
+ netdev_for_each_mc_addr(ha, dev)
+ if (exact_addr_idx < EXACT_ADDR_FILTERS)
+ set_addr_filter(mac, exact_addr_idx++,
+ ha->addr);
+ else {
+ int hash = hash_hw_addr(ha->addr);
+
+ if (hash < 32)
+ hash_lo |= (1 << hash);
+ else
+ hash_hi |= (1 << (hash - 32));
+ }
+ }
+
+ t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
+ t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
+ return 0;
+}
+
+static int rx_fifo_hwm(int mtu)
+{
+ int hwm;
+
+ hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, (MAC_RXFIFO_SIZE * 38) / 100);
+ return min(hwm, MAC_RXFIFO_SIZE - 8192);
+}
+
+int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
+{
+ int hwm, lwm, divisor;
+ int ipg;
+ unsigned int thres, v, reg;
+ struct adapter *adap = mac->adapter;
+
+ /*
+ * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max
+ * packet size register includes header, but not FCS.
+ */
+ mtu += 14;
+ if (mtu > 1536)
+ mtu += 4;
+
+ if (mtu > MAX_FRAME_SIZE - 4)
+ return -EINVAL;
+ t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
+
+ if (adap->params.rev >= T3_REV_B2 &&
+ (t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) {
+ t3_mac_disable_exact_filters(mac);
+ v = t3_read_reg(adap, A_XGM_RX_CFG + mac->offset);
+ t3_set_reg_field(adap, A_XGM_RX_CFG + mac->offset,
+ F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST);
+
+ reg = adap->params.rev == T3_REV_B2 ?
+ A_XGM_RX_MAX_PKT_SIZE_ERR_CNT : A_XGM_RXFIFO_CFG;
+
+ /* drain RX FIFO */
+ if (t3_wait_op_done(adap, reg + mac->offset,
+ F_RXFIFO_EMPTY, 1, 20, 5)) {
+ t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
+ t3_mac_enable_exact_filters(mac);
+ return -EIO;
+ }
+ t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
+ V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
+ V_RXMAXPKTSIZE(mtu));
+ t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
+ t3_mac_enable_exact_filters(mac);
+ } else
+ t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
+ V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
+ V_RXMAXPKTSIZE(mtu));
+
+ /*
+ * Adjust the PAUSE frame watermarks. We always set the LWM, and the
+ * HWM only if flow-control is enabled.
+ */
+ hwm = rx_fifo_hwm(mtu);
+ lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4);
+ v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
+ v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
+ v |= V_RXFIFOPAUSELWM(lwm / 8);
+ if (G_RXFIFOPAUSEHWM(v))
+ v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
+ V_RXFIFOPAUSEHWM(hwm / 8);
+
+ t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
+
+ /* Adjust the TX FIFO threshold based on the MTU */
+ thres = (adap->params.vpd.cclk * 1000) / 15625;
+ thres = (thres * mtu) / 1000;
+ if (is_10G(adap))
+ thres /= 10;
+ thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
+ thres = max(thres, 8U); /* need at least 8 */
+ ipg = (adap->params.rev == T3_REV_C) ? 0 : 1;
+ t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
+ V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG),
+ V_TXFIFOTHRESH(thres) | V_TXIPG(ipg));
+
+ if (adap->params.rev > 0) {
+ divisor = (adap->params.rev == T3_REV_C) ? 64 : 8;
+ t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset,
+ (hwm - lwm) * 4 / divisor);
+ }
+ t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset,
+ MAC_RXFIFO_SIZE * 4 * 8 / 512);
+ return 0;
+}
+
+int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
+{
+ u32 val;
+ struct adapter *adap = mac->adapter;
+ unsigned int oft = mac->offset;
+
+ if (duplex >= 0 && duplex != DUPLEX_FULL)
+ return -EINVAL;
+ if (speed >= 0) {
+ if (speed == SPEED_10)
+ val = V_PORTSPEED(0);
+ else if (speed == SPEED_100)
+ val = V_PORTSPEED(1);
+ else if (speed == SPEED_1000)
+ val = V_PORTSPEED(2);
+ else if (speed == SPEED_10000)
+ val = V_PORTSPEED(3);
+ else
+ return -EINVAL;
+
+ t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
+ V_PORTSPEED(M_PORTSPEED), val);
+ }
+
+ val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
+ val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
+ if (fc & PAUSE_TX) {
+ u32 rx_max_pkt_size =
+ G_RXMAXPKTSIZE(t3_read_reg(adap,
+ A_XGM_RX_MAX_PKT_SIZE + oft));
+ val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(rx_max_pkt_size) / 8);
+ }
+ t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
+
+ t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
+ (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
+ return 0;
+}
+
+int t3_mac_enable(struct cmac *mac, int which)
+{
+ int idx = macidx(mac);
+ struct adapter *adap = mac->adapter;
+ unsigned int oft = mac->offset;
+ struct mac_stats *s = &mac->stats;
+
+ if (which & MAC_DIRECTION_TX) {
+ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
+ t3_write_reg(adap, A_TP_PIO_DATA,
+ adap->params.rev == T3_REV_C ?
+ 0xc4ffff01 : 0xc0ede401);
+ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
+ t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx,
+ adap->params.rev == T3_REV_C ? 0 : 1 << idx);
+
+ t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
+
+ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx);
+ mac->tx_mcnt = s->tx_frames;
+ mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
+ A_TP_PIO_DATA)));
+ mac->tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
+ A_XGM_TX_SPI4_SOP_EOP_CNT +
+ oft)));
+ mac->rx_mcnt = s->rx_frames;
+ mac->rx_pause = s->rx_pause;
+ mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
+ A_XGM_RX_SPI4_SOP_EOP_CNT +
+ oft)));
+ mac->rx_ocnt = s->rx_fifo_ovfl;
+ mac->txen = F_TXEN;
+ mac->toggle_cnt = 0;
+ }
+ if (which & MAC_DIRECTION_RX)
+ t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
+ return 0;
+}
+
+int t3_mac_disable(struct cmac *mac, int which)
+{
+ struct adapter *adap = mac->adapter;
+
+ if (which & MAC_DIRECTION_TX) {
+ t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
+ mac->txen = 0;
+ }
+ if (which & MAC_DIRECTION_RX) {
+ int val = F_MAC_RESET_;
+
+ t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
+ F_PCS_RESET_, 0);
+ msleep(100);
+ t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
+ if (is_10G(adap))
+ val |= F_PCS_RESET_;
+ else if (uses_xaui(adap))
+ val |= F_PCS_RESET_ | F_XG2G_RESET_;
+ else
+ val |= F_RGMII_RESET_ | F_XG2G_RESET_;
+ t3_write_reg(mac->adapter, A_XGM_RESET_CTRL + mac->offset, val);
+ }
+ return 0;
+}
+
+int t3b2_mac_watchdog_task(struct cmac *mac)
+{
+ struct adapter *adap = mac->adapter;
+ struct mac_stats *s = &mac->stats;
+ unsigned int tx_tcnt, tx_xcnt;
+ u64 tx_mcnt = s->tx_frames;
+ int status;
+
+ status = 0;
+ tx_xcnt = 1; /* By default tx_xcnt is making progress */
+ tx_tcnt = mac->tx_tcnt; /* If tx_mcnt is progressing ignore tx_tcnt */
+ if (tx_mcnt == mac->tx_mcnt && mac->rx_pause == s->rx_pause) {
+ tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
+ A_XGM_TX_SPI4_SOP_EOP_CNT +
+ mac->offset)));
+ if (tx_xcnt == 0) {
+ t3_write_reg(adap, A_TP_PIO_ADDR,
+ A_TP_TX_DROP_CNT_CH0 + macidx(mac));
+ tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
+ A_TP_PIO_DATA)));
+ } else {
+ goto out;
+ }
+ } else {
+ mac->toggle_cnt = 0;
+ goto out;
+ }
+
+ if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) {
+ if (mac->toggle_cnt > 4) {
+ status = 2;
+ goto out;
+ } else {
+ status = 1;
+ goto out;
+ }
+ } else {
+ mac->toggle_cnt = 0;
+ goto out;
+ }
+
+out:
+ mac->tx_tcnt = tx_tcnt;
+ mac->tx_xcnt = tx_xcnt;
+ mac->tx_mcnt = s->tx_frames;
+ mac->rx_pause = s->rx_pause;
+ if (status == 1) {
+ t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
+ t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
+ t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, mac->txen);
+ t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
+ mac->toggle_cnt++;
+ } else if (status == 2) {
+ t3b2_mac_reset(mac);
+ mac->toggle_cnt = 0;
+ }
+ return status;
+}
+
+/*
+ * This function is called periodically to accumulate the current values of the
+ * RMON counters into the port statistics. Since the packet counters are only
+ * 32 bits they can overflow in ~286 secs at 10G, so the function should be
+ * called more frequently than that. The byte counters are 45-bit wide, they
+ * would overflow in ~7.8 hours.
+ */
+const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
+{
+#define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
+#define RMON_UPDATE(mac, name, reg) \
+ (mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
+#define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
+ (mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
+ ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
+
+ u32 v, lo;
+
+ RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
+ RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
+ RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
+ RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
+ RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
+ RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
+ RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
+ RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
+ RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
+
+ RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
+
+ v = RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
+ if (mac->adapter->params.rev == T3_REV_B2)
+ v &= 0x7fffffff;
+ mac->stats.rx_too_long += v;
+
+ RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
+
+ RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
+ RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
+ RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
+ RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
+ RMON_UPDATE(mac, tx_pause, TX_PAUSE);
+ /* This counts error frames in general (bad FCS, underrun, etc). */
+ RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
+
+ RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
+
+ /* The next stat isn't clear-on-read. */
+ t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
+ v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
+ lo = (u32) mac->stats.rx_cong_drops;
+ mac->stats.rx_cong_drops += (u64) (v - lo);
+
+ return &mac->stats;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
new file mode 100644
index 000000000000..498667487f52
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -0,0 +1,7 @@
+#
+# Chelsio T4 driver
+#
+
+obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
+
+cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
new file mode 100644
index 000000000000..223a7f72343b
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -0,0 +1,722 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_H__
+#define __CXGB4_H__
+
+#include <linux/bitops.h>
+#include <linux/cache.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <asm/io.h>
+#include "cxgb4_uld.h"
+#include "t4_hw.h"
+
+#define FW_VERSION_MAJOR 1
+#define FW_VERSION_MINOR 1
+#define FW_VERSION_MICRO 0
+
+enum {
+ MAX_NPORTS = 4, /* max # of ports */
+ SERNUM_LEN = 24, /* Serial # length */
+ EC_LEN = 16, /* E/C length */
+ ID_LEN = 16, /* ID length */
+};
+
+enum {
+ MEM_EDC0,
+ MEM_EDC1,
+ MEM_MC
+};
+
+enum dev_master {
+ MASTER_CANT,
+ MASTER_MAY,
+ MASTER_MUST
+};
+
+enum dev_state {
+ DEV_STATE_UNINIT,
+ DEV_STATE_INIT,
+ DEV_STATE_ERR
+};
+
+enum {
+ PAUSE_RX = 1 << 0,
+ PAUSE_TX = 1 << 1,
+ PAUSE_AUTONEG = 1 << 2
+};
+
+struct port_stats {
+ u64 tx_octets; /* total # of octets in good frames */
+ u64 tx_frames; /* all good frames */
+ u64 tx_bcast_frames; /* all broadcast frames */
+ u64 tx_mcast_frames; /* all multicast frames */
+ u64 tx_ucast_frames; /* all unicast frames */
+ u64 tx_error_frames; /* all error frames */
+
+ u64 tx_frames_64; /* # of Tx frames in a particular range */
+ u64 tx_frames_65_127;
+ u64 tx_frames_128_255;
+ u64 tx_frames_256_511;
+ u64 tx_frames_512_1023;
+ u64 tx_frames_1024_1518;
+ u64 tx_frames_1519_max;
+
+ u64 tx_drop; /* # of dropped Tx frames */
+ u64 tx_pause; /* # of transmitted pause frames */
+ u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */
+ u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */
+ u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */
+ u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */
+ u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */
+ u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */
+ u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */
+ u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */
+
+ u64 rx_octets; /* total # of octets in good frames */
+ u64 rx_frames; /* all good frames */
+ u64 rx_bcast_frames; /* all broadcast frames */
+ u64 rx_mcast_frames; /* all multicast frames */
+ u64 rx_ucast_frames; /* all unicast frames */
+ u64 rx_too_long; /* # of frames exceeding MTU */
+ u64 rx_jabber; /* # of jabber frames */
+ u64 rx_fcs_err; /* # of received frames with bad FCS */
+ u64 rx_len_err; /* # of received frames with length error */
+ u64 rx_symbol_err; /* symbol errors */
+ u64 rx_runt; /* # of short frames */
+
+ u64 rx_frames_64; /* # of Rx frames in a particular range */
+ u64 rx_frames_65_127;
+ u64 rx_frames_128_255;
+ u64 rx_frames_256_511;
+ u64 rx_frames_512_1023;
+ u64 rx_frames_1024_1518;
+ u64 rx_frames_1519_max;
+
+ u64 rx_pause; /* # of received pause frames */
+ u64 rx_ppp0; /* # of received PPP prio 0 frames */
+ u64 rx_ppp1; /* # of received PPP prio 1 frames */
+ u64 rx_ppp2; /* # of received PPP prio 2 frames */
+ u64 rx_ppp3; /* # of received PPP prio 3 frames */
+ u64 rx_ppp4; /* # of received PPP prio 4 frames */
+ u64 rx_ppp5; /* # of received PPP prio 5 frames */
+ u64 rx_ppp6; /* # of received PPP prio 6 frames */
+ u64 rx_ppp7; /* # of received PPP prio 7 frames */
+
+ u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */
+ u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */
+ u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */
+ u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */
+ u64 rx_trunc0; /* buffer-group 0 truncated packets */
+ u64 rx_trunc1; /* buffer-group 1 truncated packets */
+ u64 rx_trunc2; /* buffer-group 2 truncated packets */
+ u64 rx_trunc3; /* buffer-group 3 truncated packets */
+};
+
+struct lb_port_stats {
+ u64 octets;
+ u64 frames;
+ u64 bcast_frames;
+ u64 mcast_frames;
+ u64 ucast_frames;
+ u64 error_frames;
+
+ u64 frames_64;
+ u64 frames_65_127;
+ u64 frames_128_255;
+ u64 frames_256_511;
+ u64 frames_512_1023;
+ u64 frames_1024_1518;
+ u64 frames_1519_max;
+
+ u64 drop;
+
+ u64 ovflow0;
+ u64 ovflow1;
+ u64 ovflow2;
+ u64 ovflow3;
+ u64 trunc0;
+ u64 trunc1;
+ u64 trunc2;
+ u64 trunc3;
+};
+
+struct tp_tcp_stats {
+ u32 tcpOutRsts;
+ u64 tcpInSegs;
+ u64 tcpOutSegs;
+ u64 tcpRetransSegs;
+};
+
+struct tp_err_stats {
+ u32 macInErrs[4];
+ u32 hdrInErrs[4];
+ u32 tcpInErrs[4];
+ u32 tnlCongDrops[4];
+ u32 ofldChanDrops[4];
+ u32 tnlTxDrops[4];
+ u32 ofldVlanDrops[4];
+ u32 tcp6InErrs[4];
+ u32 ofldNoNeigh;
+ u32 ofldCongDefer;
+};
+
+struct tp_params {
+ unsigned int ntxchan; /* # of Tx channels */
+ unsigned int tre; /* log2 of core clocks per TP tick */
+};
+
+struct vpd_params {
+ unsigned int cclk;
+ u8 ec[EC_LEN + 1];
+ u8 sn[SERNUM_LEN + 1];
+ u8 id[ID_LEN + 1];
+};
+
+struct pci_params {
+ unsigned char speed;
+ unsigned char width;
+};
+
+struct adapter_params {
+ struct tp_params tp;
+ struct vpd_params vpd;
+ struct pci_params pci;
+
+ unsigned int sf_size; /* serial flash size in bytes */
+ unsigned int sf_nsec; /* # of flash sectors */
+ unsigned int sf_fw_start; /* start of FW image in flash */
+
+ unsigned int fw_vers;
+ unsigned int tp_vers;
+ u8 api_vers[7];
+
+ unsigned short mtus[NMTUS];
+ unsigned short a_wnd[NCCTRL_WIN];
+ unsigned short b_wnd[NCCTRL_WIN];
+
+ unsigned char nports; /* # of ethernet ports */
+ unsigned char portvec;
+ unsigned char rev; /* chip revision */
+ unsigned char offload;
+
+ unsigned int ofldq_wr_cred;
+};
+
+struct trace_params {
+ u32 data[TRACE_LEN / 4];
+ u32 mask[TRACE_LEN / 4];
+ unsigned short snap_len;
+ unsigned short min_len;
+ unsigned char skip_ofst;
+ unsigned char skip_len;
+ unsigned char invert;
+ unsigned char port;
+};
+
+struct link_config {
+ unsigned short supported; /* link capabilities */
+ unsigned short advertising; /* advertised capabilities */
+ unsigned short requested_speed; /* speed user has requested */
+ unsigned short speed; /* actual link speed */
+ unsigned char requested_fc; /* flow control user has requested */
+ unsigned char fc; /* actual link flow control */
+ unsigned char autoneg; /* autonegotiating? */
+ unsigned char link_ok; /* link up? */
+};
+
+#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
+
+enum {
+ MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */
+ MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
+ MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
+ MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
+};
+
+enum {
+ MAX_EGRQ = 128, /* max # of egress queues, including FLs */
+ MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */
+};
+
+struct adapter;
+struct sge_rspq;
+
+struct port_info {
+ struct adapter *adapter;
+ u16 viid;
+ s16 xact_addr_filt; /* index of exact MAC address filter */
+ u16 rss_size; /* size of VI's RSS table slice */
+ s8 mdio_addr;
+ u8 port_type;
+ u8 mod_type;
+ u8 port_id;
+ u8 tx_chan;
+ u8 lport; /* associated offload logical port */
+ u8 nqsets; /* # of qsets */
+ u8 first_qset; /* index of first qset */
+ u8 rss_mode;
+ struct link_config link_cfg;
+ u16 *rss;
+};
+
+struct dentry;
+struct work_struct;
+
+enum { /* adapter flags */
+ FULL_INIT_DONE = (1 << 0),
+ USING_MSI = (1 << 1),
+ USING_MSIX = (1 << 2),
+ FW_OK = (1 << 4),
+};
+
+struct rx_sw_desc;
+
+struct sge_fl { /* SGE free-buffer queue state */
+ unsigned int avail; /* # of available Rx buffers */
+ unsigned int pend_cred; /* new buffers since last FL DB ring */
+ unsigned int cidx; /* consumer index */
+ unsigned int pidx; /* producer index */
+ unsigned long alloc_failed; /* # of times buffer allocation failed */
+ unsigned long large_alloc_failed;
+ unsigned long starving;
+ /* RO fields */
+ unsigned int cntxt_id; /* SGE context id for the free list */
+ unsigned int size; /* capacity of free list */
+ struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
+ __be64 *desc; /* address of HW Rx descriptor ring */
+ dma_addr_t addr; /* bus address of HW ring start */
+};
+
+/* A packet gather list */
+struct pkt_gl {
+ skb_frag_t frags[MAX_SKB_FRAGS];
+ void *va; /* virtual address of first byte */
+ unsigned int nfrags; /* # of fragments */
+ unsigned int tot_len; /* total length of fragments */
+};
+
+typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *gl);
+
+struct sge_rspq { /* state for an SGE response queue */
+ struct napi_struct napi;
+ const __be64 *cur_desc; /* current descriptor in queue */
+ unsigned int cidx; /* consumer index */
+ u8 gen; /* current generation bit */
+ u8 intr_params; /* interrupt holdoff parameters */
+ u8 next_intr_params; /* holdoff params for next interrupt */
+ u8 pktcnt_idx; /* interrupt packet threshold */
+ u8 uld; /* ULD handling this queue */
+ u8 idx; /* queue index within its group */
+ int offset; /* offset into current Rx buffer */
+ u16 cntxt_id; /* SGE context id for the response q */
+ u16 abs_id; /* absolute SGE id for the response q */
+ __be64 *desc; /* address of HW response ring */
+ dma_addr_t phys_addr; /* physical address of the ring */
+ unsigned int iqe_len; /* entry size */
+ unsigned int size; /* capacity of response queue */
+ struct adapter *adap;
+ struct net_device *netdev; /* associated net device */
+ rspq_handler_t handler;
+};
+
+struct sge_eth_stats { /* Ethernet queue statistics */
+ unsigned long pkts; /* # of ethernet packets */
+ unsigned long lro_pkts; /* # of LRO super packets */
+ unsigned long lro_merged; /* # of wire packets merged by LRO */
+ unsigned long rx_cso; /* # of Rx checksum offloads */
+ unsigned long vlan_ex; /* # of Rx VLAN extractions */
+ unsigned long rx_drops; /* # of packets dropped due to no mem */
+};
+
+struct sge_eth_rxq { /* SW Ethernet Rx queue */
+ struct sge_rspq rspq;
+ struct sge_fl fl;
+ struct sge_eth_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct sge_ofld_stats { /* offload queue statistics */
+ unsigned long pkts; /* # of packets */
+ unsigned long imm; /* # of immediate-data packets */
+ unsigned long an; /* # of asynchronous notifications */
+ unsigned long nomem; /* # of responses deferred due to no mem */
+};
+
+struct sge_ofld_rxq { /* SW offload Rx queue */
+ struct sge_rspq rspq;
+ struct sge_fl fl;
+ struct sge_ofld_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct tx_desc {
+ __be64 flit[8];
+};
+
+struct tx_sw_desc;
+
+struct sge_txq {
+ unsigned int in_use; /* # of in-use Tx descriptors */
+ unsigned int size; /* # of descriptors */
+ unsigned int cidx; /* SW consumer index */
+ unsigned int pidx; /* producer index */
+ unsigned long stops; /* # of times q has been stopped */
+ unsigned long restarts; /* # of queue restarts */
+ unsigned int cntxt_id; /* SGE context id for the Tx q */
+ struct tx_desc *desc; /* address of HW Tx descriptor ring */
+ struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
+ struct sge_qstat *stat; /* queue status entry */
+ dma_addr_t phys_addr; /* physical address of the ring */
+};
+
+struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
+ struct sge_txq q;
+ struct netdev_queue *txq; /* associated netdev TX queue */
+ unsigned long tso; /* # of TSO requests */
+ unsigned long tx_cso; /* # of Tx checksum offloads */
+ unsigned long vlan_ins; /* # of Tx VLAN insertions */
+ unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
+} ____cacheline_aligned_in_smp;
+
+struct sge_ofld_txq { /* state for an SGE offload Tx queue */
+ struct sge_txq q;
+ struct adapter *adap;
+ struct sk_buff_head sendq; /* list of backpressured packets */
+ struct tasklet_struct qresume_tsk; /* restarts the queue */
+ u8 full; /* the Tx ring is full */
+ unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
+} ____cacheline_aligned_in_smp;
+
+struct sge_ctrl_txq { /* state for an SGE control Tx queue */
+ struct sge_txq q;
+ struct adapter *adap;
+ struct sk_buff_head sendq; /* list of backpressured packets */
+ struct tasklet_struct qresume_tsk; /* restarts the queue */
+ u8 full; /* the Tx ring is full */
+} ____cacheline_aligned_in_smp;
+
+struct sge {
+ struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
+ struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS];
+ struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
+
+ struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
+ struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
+ struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
+ struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
+
+ struct sge_rspq intrq ____cacheline_aligned_in_smp;
+ spinlock_t intrq_lock;
+
+ u16 max_ethqsets; /* # of available Ethernet queue sets */
+ u16 ethqsets; /* # of active Ethernet queue sets */
+ u16 ethtxq_rover; /* Tx queue to clean up next */
+ u16 ofldqsets; /* # of active offload queue sets */
+ u16 rdmaqs; /* # of available RDMA Rx queues */
+ u16 ofld_rxq[MAX_OFLD_QSETS];
+ u16 rdma_rxq[NCHAN];
+ u16 timer_val[SGE_NTIMERS];
+ u8 counter_val[SGE_NCOUNTERS];
+ unsigned int starve_thres;
+ u8 idma_state[2];
+ unsigned int egr_start;
+ unsigned int ingr_start;
+ void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
+ struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
+ DECLARE_BITMAP(starving_fl, MAX_EGRQ);
+ DECLARE_BITMAP(txq_maperr, MAX_EGRQ);
+ struct timer_list rx_timer; /* refills starving FLs */
+ struct timer_list tx_timer; /* checks Tx queues */
+};
+
+#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
+#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
+#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
+
+struct l2t_data;
+
+struct adapter {
+ void __iomem *regs;
+ struct pci_dev *pdev;
+ struct device *pdev_dev;
+ unsigned int fn;
+ unsigned int flags;
+
+ int msg_enable;
+
+ struct adapter_params params;
+ struct cxgb4_virt_res vres;
+ unsigned int swintr;
+
+ unsigned int wol;
+
+ struct {
+ unsigned short vec;
+ char desc[IFNAMSIZ + 10];
+ } msix_info[MAX_INGQ + 1];
+
+ struct sge sge;
+
+ struct net_device *port[MAX_NPORTS];
+ u8 chan_map[NCHAN]; /* channel -> port map */
+
+ struct l2t_data *l2t;
+ void *uld_handle[CXGB4_ULD_MAX];
+ struct list_head list_node;
+
+ struct tid_info tids;
+ void **tid_release_head;
+ spinlock_t tid_release_lock;
+ struct work_struct tid_release_task;
+ bool tid_release_task_busy;
+
+ struct dentry *debugfs_root;
+
+ spinlock_t stats_lock;
+};
+
+static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
+{
+ return readl(adap->regs + reg_addr);
+}
+
+static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val)
+{
+ writel(val, adap->regs + reg_addr);
+}
+
+#ifndef readq
+static inline u64 readq(const volatile void __iomem *addr)
+{
+ return readl(addr) + ((u64)readl(addr + 4) << 32);
+}
+
+static inline void writeq(u64 val, volatile void __iomem *addr)
+{
+ writel(val, addr);
+ writel(val >> 32, addr + 4);
+}
+#endif
+
+static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr)
+{
+ return readq(adap->regs + reg_addr);
+}
+
+static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
+{
+ writeq(val, adap->regs + reg_addr);
+}
+
+/**
+ * netdev2pinfo - return the port_info structure associated with a net_device
+ * @dev: the netdev
+ *
+ * Return the struct port_info associated with a net_device
+ */
+static inline struct port_info *netdev2pinfo(const struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+/**
+ * adap2pinfo - return the port_info of a port
+ * @adap: the adapter
+ * @idx: the port index
+ *
+ * Return the port_info structure for the port of the given index.
+ */
+static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
+{
+ return netdev_priv(adap->port[idx]);
+}
+
+/**
+ * netdev2adap - return the adapter structure associated with a net_device
+ * @dev: the netdev
+ *
+ * Return the struct adapter associated with a net_device
+ */
+static inline struct adapter *netdev2adap(const struct net_device *dev)
+{
+ return netdev2pinfo(dev)->adapter;
+}
+
+void t4_os_portmod_changed(const struct adapter *adap, int port_id);
+void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
+
+void *t4_alloc_mem(size_t size);
+
+void t4_free_sge_resources(struct adapter *adap);
+irq_handler_t t4_intr_handler(struct adapter *adap);
+netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
+int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *gl);
+int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
+int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
+int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
+ struct net_device *dev, int intr_idx,
+ struct sge_fl *fl, rspq_handler_t hnd);
+int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
+ struct net_device *dev, struct netdev_queue *netdevq,
+ unsigned int iqid);
+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+ struct net_device *dev, unsigned int iqid,
+ unsigned int cmplqid);
+int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
+ struct net_device *dev, unsigned int iqid);
+irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
+void t4_sge_init(struct adapter *adap);
+void t4_sge_start(struct adapter *adap);
+void t4_sge_stop(struct adapter *adap);
+
+#define for_each_port(adapter, iter) \
+ for (iter = 0; iter < (adapter)->params.nports; ++iter)
+
+static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
+{
+ return adap->params.vpd.cclk / 1000;
+}
+
+static inline unsigned int us_to_core_ticks(const struct adapter *adap,
+ unsigned int us)
+{
+ return (us * adap->params.vpd.cclk) / 1000;
+}
+
+void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
+ u32 val);
+
+int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+ void *rpl, bool sleep_ok);
+
+static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
+ int size, void *rpl)
+{
+ return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
+}
+
+static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
+ int size, void *rpl)
+{
+ return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
+}
+
+void t4_intr_enable(struct adapter *adapter);
+void t4_intr_disable(struct adapter *adapter);
+int t4_slow_intr_handler(struct adapter *adapter);
+
+int t4_wait_dev_ready(struct adapter *adap);
+int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+ struct link_config *lc);
+int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
+int t4_seeprom_wp(struct adapter *adapter, bool enable);
+int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
+int t4_check_fw_version(struct adapter *adapter);
+int t4_prep_adapter(struct adapter *adapter);
+int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
+void t4_fatal_err(struct adapter *adapter);
+int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
+ int start, int n, const u16 *rspq, unsigned int nrspq);
+int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
+ unsigned int flags);
+int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity);
+int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
+ u64 *parity);
+
+void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
+void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
+void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
+ struct tp_tcp_stats *v6);
+void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
+ const unsigned short *alpha, const unsigned short *beta);
+
+void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
+ const u8 *addr);
+int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
+ u64 mask0, u64 mask1, unsigned int crc, bool enable);
+
+int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
+ enum dev_master master, enum dev_state *state);
+int t4_fw_bye(struct adapter *adap, unsigned int mbox);
+int t4_early_init(struct adapter *adap, unsigned int mbox);
+int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val);
+int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ const u32 *val);
+int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
+ unsigned int rxqi, unsigned int rxq, unsigned int tc,
+ unsigned int vi, unsigned int cmask, unsigned int pmask,
+ unsigned int nexact, unsigned int rcaps, unsigned int wxcaps);
+int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
+ unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
+ unsigned int *rss_size);
+int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int mtu, int promisc, int all_multi, int bcast, int vlanex,
+ bool sleep_ok);
+int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
+ unsigned int viid, bool free, unsigned int naddr,
+ const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
+int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int idx, const u8 *addr, bool persist, bool add_smt);
+int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ bool ucast, u64 vec, bool sleep_ok);
+int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ bool rx_en, bool tx_en);
+int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ unsigned int nblinks);
+int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+ unsigned int mmd, unsigned int reg, u16 *valp);
+int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+ unsigned int mmd, unsigned int reg, u16 val);
+int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int iqtype, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id);
+int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid);
+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid);
+int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid);
+int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
+#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
new file mode 100644
index 000000000000..4c8f42afa3c6
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -0,0 +1,3812 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitmap.h>
+#include <linux/crc32.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/log2.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/rtnetlink.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/sockios.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+#include <net/neighbour.h>
+#include <net/netevent.h>
+#include <asm/uaccess.h>
+
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "t4fw_api.h"
+#include "l2t.h"
+
+#define DRV_VERSION "1.3.0-ko"
+#define DRV_DESC "Chelsio T4 Network Driver"
+
+/*
+ * Max interrupt hold-off timer value in us. Queues fall back to this value
+ * under extreme memory pressure so it's largish to give the system time to
+ * recover.
+ */
+#define MAX_SGE_TIMERVAL 200U
+
+#ifdef CONFIG_PCI_IOV
+/*
+ * Virtual Function provisioning constants. We need two extra Ingress Queues
+ * with Interrupt capability to serve as the VF's Firmware Event Queue and
+ * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free
+ * Lists associated with them). For each Ethernet/Control Egress Queue and
+ * for each Free List, we need an Egress Context.
+ */
+enum {
+ VFRES_NPORTS = 1, /* # of "ports" per VF */
+ VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
+
+ VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
+ VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
+ VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
+ VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
+ VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
+ VFRES_TC = 0, /* PCI-E traffic class */
+ VFRES_NEXACTF = 16, /* # of exact MPS filters */
+
+ VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
+ VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
+};
+
+/*
+ * Provide a Port Access Rights Mask for the specified PF/VF. This is very
+ * static and likely not to be useful in the long run. We really need to
+ * implement some form of persistent configuration which the firmware
+ * controls.
+ */
+static unsigned int pfvfres_pmask(struct adapter *adapter,
+ unsigned int pf, unsigned int vf)
+{
+ unsigned int portn, portvec;
+
+ /*
+ * Give PF's access to all of the ports.
+ */
+ if (vf == 0)
+ return FW_PFVF_CMD_PMASK_MASK;
+
+ /*
+ * For VFs, we'll assign them access to the ports based purely on the
+ * PF. We assign active ports in order, wrapping around if there are
+ * fewer active ports than PFs: e.g. active port[pf % nports].
+ * Unfortunately the adapter's port_info structs haven't been
+ * initialized yet so we have to compute this.
+ */
+ if (adapter->params.nports == 0)
+ return 0;
+
+ portn = pf % adapter->params.nports;
+ portvec = adapter->params.portvec;
+ for (;;) {
+ /*
+ * Isolate the lowest set bit in the port vector. If we're at
+ * the port number that we want, return that as the pmask.
+ * otherwise mask that bit out of the port vector and
+ * decrement our port number ...
+ */
+ unsigned int pmask = portvec ^ (portvec & (portvec-1));
+ if (portn == 0)
+ return pmask;
+ portn--;
+ portvec &= ~pmask;
+ }
+ /*NOTREACHED*/
+}
+#endif
+
+enum {
+ MEMWIN0_APERTURE = 65536,
+ MEMWIN0_BASE = 0x30000,
+ MEMWIN1_APERTURE = 32768,
+ MEMWIN1_BASE = 0x28000,
+ MEMWIN2_APERTURE = 2048,
+ MEMWIN2_BASE = 0x1b800,
+};
+
+enum {
+ MAX_TXQ_ENTRIES = 16384,
+ MAX_CTRL_TXQ_ENTRIES = 1024,
+ MAX_RSPQ_ENTRIES = 16384,
+ MAX_RX_BUFFERS = 16384,
+ MIN_TXQ_ENTRIES = 32,
+ MIN_CTRL_TXQ_ENTRIES = 32,
+ MIN_RSPQ_ENTRIES = 128,
+ MIN_FL_ENTRIES = 16
+};
+
+#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
+ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
+
+static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
+ CH_DEVICE(0xa000, 0), /* PE10K */
+ CH_DEVICE(0x4001, -1),
+ CH_DEVICE(0x4002, -1),
+ CH_DEVICE(0x4003, -1),
+ CH_DEVICE(0x4004, -1),
+ CH_DEVICE(0x4005, -1),
+ CH_DEVICE(0x4006, -1),
+ CH_DEVICE(0x4007, -1),
+ CH_DEVICE(0x4008, -1),
+ CH_DEVICE(0x4009, -1),
+ CH_DEVICE(0x400a, -1),
+ CH_DEVICE(0x4401, 4),
+ CH_DEVICE(0x4402, 4),
+ CH_DEVICE(0x4403, 4),
+ CH_DEVICE(0x4404, 4),
+ CH_DEVICE(0x4405, 4),
+ CH_DEVICE(0x4406, 4),
+ CH_DEVICE(0x4407, 4),
+ CH_DEVICE(0x4408, 4),
+ CH_DEVICE(0x4409, 4),
+ CH_DEVICE(0x440a, 4),
+ { 0, }
+};
+
+#define FW_FNAME "cxgb4/t4fw.bin"
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
+MODULE_FIRMWARE(FW_FNAME);
+
+static int dflt_msg_enable = DFLT_MSG_ENABLE;
+
+module_param(dflt_msg_enable, int, 0644);
+MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
+
+/*
+ * The driver uses the best interrupt scheme available on a platform in the
+ * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
+ * of these schemes the driver may consider as follows:
+ *
+ * msi = 2: choose from among all three options
+ * msi = 1: only consider MSI and INTx interrupts
+ * msi = 0: force INTx interrupts
+ */
+static int msi = 2;
+
+module_param(msi, int, 0644);
+MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
+
+/*
+ * Queue interrupt hold-off timer values. Queues default to the first of these
+ * upon creation.
+ */
+static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
+
+module_param_array(intr_holdoff, uint, NULL, 0644);
+MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
+ "0..4 in microseconds");
+
+static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
+
+module_param_array(intr_cnt, uint, NULL, 0644);
+MODULE_PARM_DESC(intr_cnt,
+ "thresholds 1..3 for queue interrupt packet counters");
+
+static int vf_acls;
+
+#ifdef CONFIG_PCI_IOV
+module_param(vf_acls, bool, 0644);
+MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
+
+static unsigned int num_vf[4];
+
+module_param_array(num_vf, uint, NULL, 0644);
+MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
+#endif
+
+static struct dentry *cxgb4_debugfs_root;
+
+static LIST_HEAD(adapter_list);
+static DEFINE_MUTEX(uld_mutex);
+static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
+static const char *uld_str[] = { "RDMA", "iSCSI" };
+
+static void link_report(struct net_device *dev)
+{
+ if (!netif_carrier_ok(dev))
+ netdev_info(dev, "link down\n");
+ else {
+ static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
+
+ const char *s = "10Mbps";
+ const struct port_info *p = netdev_priv(dev);
+
+ switch (p->link_cfg.speed) {
+ case SPEED_10000:
+ s = "10Gbps";
+ break;
+ case SPEED_1000:
+ s = "1000Mbps";
+ break;
+ case SPEED_100:
+ s = "100Mbps";
+ break;
+ }
+
+ netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
+ fc[p->link_cfg.fc]);
+ }
+}
+
+void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
+{
+ struct net_device *dev = adapter->port[port_id];
+
+ /* Skip changes from disabled ports. */
+ if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
+ if (link_stat)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ link_report(dev);
+ }
+}
+
+void t4_os_portmod_changed(const struct adapter *adap, int port_id)
+{
+ static const char *mod_str[] = {
+ NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
+ };
+
+ const struct net_device *dev = adap->port[port_id];
+ const struct port_info *pi = netdev_priv(dev);
+
+ if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
+ netdev_info(dev, "port module unplugged\n");
+ else if (pi->mod_type < ARRAY_SIZE(mod_str))
+ netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
+}
+
+/*
+ * Configure the exact and hash address filters to handle a port's multicast
+ * and secondary unicast MAC addresses.
+ */
+static int set_addr_filters(const struct net_device *dev, bool sleep)
+{
+ u64 mhash = 0;
+ u64 uhash = 0;
+ bool free = true;
+ u16 filt_idx[7];
+ const u8 *addr[7];
+ int ret, naddr = 0;
+ const struct netdev_hw_addr *ha;
+ int uc_cnt = netdev_uc_count(dev);
+ int mc_cnt = netdev_mc_count(dev);
+ const struct port_info *pi = netdev_priv(dev);
+ unsigned int mb = pi->adapter->fn;
+
+ /* first do the secondary unicast addresses */
+ netdev_for_each_uc_addr(ha, dev) {
+ addr[naddr++] = ha->addr;
+ if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
+ ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
+ naddr, addr, filt_idx, &uhash, sleep);
+ if (ret < 0)
+ return ret;
+
+ free = false;
+ naddr = 0;
+ }
+ }
+
+ /* next set up the multicast addresses */
+ netdev_for_each_mc_addr(ha, dev) {
+ addr[naddr++] = ha->addr;
+ if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
+ ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
+ naddr, addr, filt_idx, &mhash, sleep);
+ if (ret < 0)
+ return ret;
+
+ free = false;
+ naddr = 0;
+ }
+ }
+
+ return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
+ uhash | mhash, sleep);
+}
+
+/*
+ * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
+ * If @mtu is -1 it is left unchanged.
+ */
+static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
+{
+ int ret;
+ struct port_info *pi = netdev_priv(dev);
+
+ ret = set_addr_filters(dev, sleep_ok);
+ if (ret == 0)
+ ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
+ (dev->flags & IFF_PROMISC) ? 1 : 0,
+ (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
+ sleep_ok);
+ return ret;
+}
+
+/**
+ * link_start - enable a port
+ * @dev: the port to enable
+ *
+ * Performs the MAC and PHY actions needed to enable a port.
+ */
+static int link_start(struct net_device *dev)
+{
+ int ret;
+ struct port_info *pi = netdev_priv(dev);
+ unsigned int mb = pi->adapter->fn;
+
+ /*
+ * We do not set address filters and promiscuity here, the stack does
+ * that step explicitly.
+ */
+ ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
+ !!(dev->features & NETIF_F_HW_VLAN_RX), true);
+ if (ret == 0) {
+ ret = t4_change_mac(pi->adapter, mb, pi->viid,
+ pi->xact_addr_filt, dev->dev_addr, true,
+ true);
+ if (ret >= 0) {
+ pi->xact_addr_filt = ret;
+ ret = 0;
+ }
+ }
+ if (ret == 0)
+ ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
+ &pi->link_cfg);
+ if (ret == 0)
+ ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
+ return ret;
+}
+
+/*
+ * Response queue handler for the FW event queue.
+ */
+static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *gl)
+{
+ u8 opcode = ((const struct rss_header *)rsp)->opcode;
+
+ rsp++; /* skip RSS header */
+ if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
+ const struct cpl_sge_egr_update *p = (void *)rsp;
+ unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
+ struct sge_txq *txq;
+
+ txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
+ txq->restarts++;
+ if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
+ struct sge_eth_txq *eq;
+
+ eq = container_of(txq, struct sge_eth_txq, q);
+ netif_tx_wake_queue(eq->txq);
+ } else {
+ struct sge_ofld_txq *oq;
+
+ oq = container_of(txq, struct sge_ofld_txq, q);
+ tasklet_schedule(&oq->qresume_tsk);
+ }
+ } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
+ const struct cpl_fw6_msg *p = (void *)rsp;
+
+ if (p->type == 0)
+ t4_handle_fw_rpl(q->adap, p->data);
+ } else if (opcode == CPL_L2T_WRITE_RPL) {
+ const struct cpl_l2t_write_rpl *p = (void *)rsp;
+
+ do_l2t_write_rpl(q->adap, p);
+ } else
+ dev_err(q->adap->pdev_dev,
+ "unexpected CPL %#x on FW event queue\n", opcode);
+ return 0;
+}
+
+/**
+ * uldrx_handler - response queue handler for ULD queues
+ * @q: the response queue that received the packet
+ * @rsp: the response queue descriptor holding the offload message
+ * @gl: the gather list of packet fragments
+ *
+ * Deliver an ingress offload packet to a ULD. All processing is done by
+ * the ULD, we just maintain statistics.
+ */
+static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *gl)
+{
+ struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
+
+ if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
+ rxq->stats.nomem++;
+ return -1;
+ }
+ if (gl == NULL)
+ rxq->stats.imm++;
+ else if (gl == CXGB4_MSG_AN)
+ rxq->stats.an++;
+ else
+ rxq->stats.pkts++;
+ return 0;
+}
+
+static void disable_msi(struct adapter *adapter)
+{
+ if (adapter->flags & USING_MSIX) {
+ pci_disable_msix(adapter->pdev);
+ adapter->flags &= ~USING_MSIX;
+ } else if (adapter->flags & USING_MSI) {
+ pci_disable_msi(adapter->pdev);
+ adapter->flags &= ~USING_MSI;
+ }
+}
+
+/*
+ * Interrupt handler for non-data events used with MSI-X.
+ */
+static irqreturn_t t4_nondata_intr(int irq, void *cookie)
+{
+ struct adapter *adap = cookie;
+
+ u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
+ if (v & PFSW) {
+ adap->swintr = 1;
+ t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
+ }
+ t4_slow_intr_handler(adap);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Name the MSI-X interrupts.
+ */
+static void name_msix_vecs(struct adapter *adap)
+{
+ int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
+
+ /* non-data interrupts */
+ snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
+
+ /* FW events */
+ snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
+ adap->port[0]->name);
+
+ /* Ethernet queues */
+ for_each_port(adap, j) {
+ struct net_device *d = adap->port[j];
+ const struct port_info *pi = netdev_priv(d);
+
+ for (i = 0; i < pi->nqsets; i++, msi_idx++)
+ snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
+ d->name, i);
+ }
+
+ /* offload queues */
+ for_each_ofldrxq(&adap->sge, i)
+ snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
+ adap->port[0]->name, i);
+
+ for_each_rdmarxq(&adap->sge, i)
+ snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
+ adap->port[0]->name, i);
+}
+
+static int request_msix_queue_irqs(struct adapter *adap)
+{
+ struct sge *s = &adap->sge;
+ int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
+
+ err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
+ adap->msix_info[1].desc, &s->fw_evtq);
+ if (err)
+ return err;
+
+ for_each_ethrxq(s, ethqidx) {
+ err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
+ adap->msix_info[msi].desc,
+ &s->ethrxq[ethqidx].rspq);
+ if (err)
+ goto unwind;
+ msi++;
+ }
+ for_each_ofldrxq(s, ofldqidx) {
+ err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
+ adap->msix_info[msi].desc,
+ &s->ofldrxq[ofldqidx].rspq);
+ if (err)
+ goto unwind;
+ msi++;
+ }
+ for_each_rdmarxq(s, rdmaqidx) {
+ err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
+ adap->msix_info[msi].desc,
+ &s->rdmarxq[rdmaqidx].rspq);
+ if (err)
+ goto unwind;
+ msi++;
+ }
+ return 0;
+
+unwind:
+ while (--rdmaqidx >= 0)
+ free_irq(adap->msix_info[--msi].vec,
+ &s->rdmarxq[rdmaqidx].rspq);
+ while (--ofldqidx >= 0)
+ free_irq(adap->msix_info[--msi].vec,
+ &s->ofldrxq[ofldqidx].rspq);
+ while (--ethqidx >= 0)
+ free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
+ free_irq(adap->msix_info[1].vec, &s->fw_evtq);
+ return err;
+}
+
+static void free_msix_queue_irqs(struct adapter *adap)
+{
+ int i, msi = 2;
+ struct sge *s = &adap->sge;
+
+ free_irq(adap->msix_info[1].vec, &s->fw_evtq);
+ for_each_ethrxq(s, i)
+ free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
+ for_each_ofldrxq(s, i)
+ free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
+ for_each_rdmarxq(s, i)
+ free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
+}
+
+/**
+ * write_rss - write the RSS table for a given port
+ * @pi: the port
+ * @queues: array of queue indices for RSS
+ *
+ * Sets up the portion of the HW RSS table for the port's VI to distribute
+ * packets to the Rx queues in @queues.
+ */
+static int write_rss(const struct port_info *pi, const u16 *queues)
+{
+ u16 *rss;
+ int i, err;
+ const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
+
+ rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
+ if (!rss)
+ return -ENOMEM;
+
+ /* map the queue indices to queue ids */
+ for (i = 0; i < pi->rss_size; i++, queues++)
+ rss[i] = q[*queues].rspq.abs_id;
+
+ err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
+ pi->rss_size, rss, pi->rss_size);
+ kfree(rss);
+ return err;
+}
+
+/**
+ * setup_rss - configure RSS
+ * @adap: the adapter
+ *
+ * Sets up RSS for each port.
+ */
+static int setup_rss(struct adapter *adap)
+{
+ int i, err;
+
+ for_each_port(adap, i) {
+ const struct port_info *pi = adap2pinfo(adap, i);
+
+ err = write_rss(pi, pi->rss);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * Return the channel of the ingress queue with the given qid.
+ */
+static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
+{
+ qid -= p->ingr_start;
+ return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
+}
+
+/*
+ * Wait until all NAPI handlers are descheduled.
+ */
+static void quiesce_rx(struct adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
+ struct sge_rspq *q = adap->sge.ingr_map[i];
+
+ if (q && q->handler)
+ napi_disable(&q->napi);
+ }
+}
+
+/*
+ * Enable NAPI scheduling and interrupt generation for all Rx queues.
+ */
+static void enable_rx(struct adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
+ struct sge_rspq *q = adap->sge.ingr_map[i];
+
+ if (!q)
+ continue;
+ if (q->handler)
+ napi_enable(&q->napi);
+ /* 0-increment GTS to start the timer and enable interrupts */
+ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
+ SEINTARM(q->intr_params) |
+ INGRESSQID(q->cntxt_id));
+ }
+}
+
+/**
+ * setup_sge_queues - configure SGE Tx/Rx/response queues
+ * @adap: the adapter
+ *
+ * Determines how many sets of SGE queues to use and initializes them.
+ * We support multiple queue sets per port if we have MSI-X, otherwise
+ * just one queue set per port.
+ */
+static int setup_sge_queues(struct adapter *adap)
+{
+ int err, msi_idx, i, j;
+ struct sge *s = &adap->sge;
+
+ bitmap_zero(s->starving_fl, MAX_EGRQ);
+ bitmap_zero(s->txq_maperr, MAX_EGRQ);
+
+ if (adap->flags & USING_MSIX)
+ msi_idx = 1; /* vector 0 is for non-queue interrupts */
+ else {
+ err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
+ NULL, NULL);
+ if (err)
+ return err;
+ msi_idx = -((int)s->intrq.abs_id + 1);
+ }
+
+ err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
+ msi_idx, NULL, fwevtq_handler);
+ if (err) {
+freeout: t4_free_sge_resources(adap);
+ return err;
+ }
+
+ for_each_port(adap, i) {
+ struct net_device *dev = adap->port[i];
+ struct port_info *pi = netdev_priv(dev);
+ struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
+ struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
+
+ for (j = 0; j < pi->nqsets; j++, q++) {
+ if (msi_idx > 0)
+ msi_idx++;
+ err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
+ msi_idx, &q->fl,
+ t4_ethrx_handler);
+ if (err)
+ goto freeout;
+ q->rspq.idx = j;
+ memset(&q->stats, 0, sizeof(q->stats));
+ }
+ for (j = 0; j < pi->nqsets; j++, t++) {
+ err = t4_sge_alloc_eth_txq(adap, t, dev,
+ netdev_get_tx_queue(dev, j),
+ s->fw_evtq.cntxt_id);
+ if (err)
+ goto freeout;
+ }
+ }
+
+ j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
+ for_each_ofldrxq(s, i) {
+ struct sge_ofld_rxq *q = &s->ofldrxq[i];
+ struct net_device *dev = adap->port[i / j];
+
+ if (msi_idx > 0)
+ msi_idx++;
+ err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
+ &q->fl, uldrx_handler);
+ if (err)
+ goto freeout;
+ memset(&q->stats, 0, sizeof(q->stats));
+ s->ofld_rxq[i] = q->rspq.abs_id;
+ err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
+ s->fw_evtq.cntxt_id);
+ if (err)
+ goto freeout;
+ }
+
+ for_each_rdmarxq(s, i) {
+ struct sge_ofld_rxq *q = &s->rdmarxq[i];
+
+ if (msi_idx > 0)
+ msi_idx++;
+ err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
+ msi_idx, &q->fl, uldrx_handler);
+ if (err)
+ goto freeout;
+ memset(&q->stats, 0, sizeof(q->stats));
+ s->rdma_rxq[i] = q->rspq.abs_id;
+ }
+
+ for_each_port(adap, i) {
+ /*
+ * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
+ * have RDMA queues, and that's the right value.
+ */
+ err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
+ s->fw_evtq.cntxt_id,
+ s->rdmarxq[i].rspq.cntxt_id);
+ if (err)
+ goto freeout;
+ }
+
+ t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
+ RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
+ QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
+ return 0;
+}
+
+/*
+ * Returns 0 if new FW was successfully loaded, a positive errno if a load was
+ * started but failed, and a negative errno if flash load couldn't start.
+ */
+static int upgrade_fw(struct adapter *adap)
+{
+ int ret;
+ u32 vers;
+ const struct fw_hdr *hdr;
+ const struct firmware *fw;
+ struct device *dev = adap->pdev_dev;
+
+ ret = request_firmware(&fw, FW_FNAME, dev);
+ if (ret < 0) {
+ dev_err(dev, "unable to load firmware image " FW_FNAME
+ ", error %d\n", ret);
+ return ret;
+ }
+
+ hdr = (const struct fw_hdr *)fw->data;
+ vers = ntohl(hdr->fw_ver);
+ if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
+ ret = -EINVAL; /* wrong major version, won't do */
+ goto out;
+ }
+
+ /*
+ * If the flash FW is unusable or we found something newer, load it.
+ */
+ if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
+ vers > adap->params.fw_vers) {
+ ret = -t4_load_fw(adap, fw->data, fw->size);
+ if (!ret)
+ dev_info(dev, "firmware upgraded to version %pI4 from "
+ FW_FNAME "\n", &hdr->fw_ver);
+ }
+out: release_firmware(fw);
+ return ret;
+}
+
+/*
+ * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
+ * The allocated memory is cleared.
+ */
+void *t4_alloc_mem(size_t size)
+{
+ void *p = kzalloc(size, GFP_KERNEL);
+
+ if (!p)
+ p = vzalloc(size);
+ return p;
+}
+
+/*
+ * Free memory allocated through alloc_mem().
+ */
+static void t4_free_mem(void *addr)
+{
+ if (is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+}
+
+static inline int is_offload(const struct adapter *adap)
+{
+ return adap->params.offload;
+}
+
+/*
+ * Implementation of ethtool operations.
+ */
+
+static u32 get_msglevel(struct net_device *dev)
+{
+ return netdev2adap(dev)->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+ netdev2adap(dev)->msg_enable = val;
+}
+
+static char stats_strings[][ETH_GSTRING_LEN] = {
+ "TxOctetsOK ",
+ "TxFramesOK ",
+ "TxBroadcastFrames ",
+ "TxMulticastFrames ",
+ "TxUnicastFrames ",
+ "TxErrorFrames ",
+
+ "TxFrames64 ",
+ "TxFrames65To127 ",
+ "TxFrames128To255 ",
+ "TxFrames256To511 ",
+ "TxFrames512To1023 ",
+ "TxFrames1024To1518 ",
+ "TxFrames1519ToMax ",
+
+ "TxFramesDropped ",
+ "TxPauseFrames ",
+ "TxPPP0Frames ",
+ "TxPPP1Frames ",
+ "TxPPP2Frames ",
+ "TxPPP3Frames ",
+ "TxPPP4Frames ",
+ "TxPPP5Frames ",
+ "TxPPP6Frames ",
+ "TxPPP7Frames ",
+
+ "RxOctetsOK ",
+ "RxFramesOK ",
+ "RxBroadcastFrames ",
+ "RxMulticastFrames ",
+ "RxUnicastFrames ",
+
+ "RxFramesTooLong ",
+ "RxJabberErrors ",
+ "RxFCSErrors ",
+ "RxLengthErrors ",
+ "RxSymbolErrors ",
+ "RxRuntFrames ",
+
+ "RxFrames64 ",
+ "RxFrames65To127 ",
+ "RxFrames128To255 ",
+ "RxFrames256To511 ",
+ "RxFrames512To1023 ",
+ "RxFrames1024To1518 ",
+ "RxFrames1519ToMax ",
+
+ "RxPauseFrames ",
+ "RxPPP0Frames ",
+ "RxPPP1Frames ",
+ "RxPPP2Frames ",
+ "RxPPP3Frames ",
+ "RxPPP4Frames ",
+ "RxPPP5Frames ",
+ "RxPPP6Frames ",
+ "RxPPP7Frames ",
+
+ "RxBG0FramesDropped ",
+ "RxBG1FramesDropped ",
+ "RxBG2FramesDropped ",
+ "RxBG3FramesDropped ",
+ "RxBG0FramesTrunc ",
+ "RxBG1FramesTrunc ",
+ "RxBG2FramesTrunc ",
+ "RxBG3FramesTrunc ",
+
+ "TSO ",
+ "TxCsumOffload ",
+ "RxCsumGood ",
+ "VLANextractions ",
+ "VLANinsertions ",
+ "GROpackets ",
+ "GROmerged ",
+};
+
+static int get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(stats_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+#define T4_REGMAP_SIZE (160 * 1024)
+
+static int get_regs_len(struct net_device *dev)
+{
+ return T4_REGMAP_SIZE;
+}
+
+static int get_eeprom_len(struct net_device *dev)
+{
+ return EEPROMSIZE;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct adapter *adapter = netdev2adap(dev);
+
+ strcpy(info->driver, KBUILD_MODNAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(adapter->pdev));
+
+ if (!adapter->params.fw_vers)
+ strcpy(info->fw_version, "N/A");
+ else
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%u.%u.%u.%u, TP %u.%u.%u.%u",
+ FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
+ FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
+ FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
+ FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
+ FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
+}
+
+static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ if (stringset == ETH_SS_STATS)
+ memcpy(data, stats_strings, sizeof(stats_strings));
+}
+
+/*
+ * port stats maintained per queue of the port. They should be in the same
+ * order as in stats_strings above.
+ */
+struct queue_port_stats {
+ u64 tso;
+ u64 tx_csum;
+ u64 rx_csum;
+ u64 vlan_ex;
+ u64 vlan_ins;
+ u64 gro_pkts;
+ u64 gro_merged;
+};
+
+static void collect_sge_port_stats(const struct adapter *adap,
+ const struct port_info *p, struct queue_port_stats *s)
+{
+ int i;
+ const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
+ const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
+
+ memset(s, 0, sizeof(*s));
+ for (i = 0; i < p->nqsets; i++, rx++, tx++) {
+ s->tso += tx->tso;
+ s->tx_csum += tx->tx_cso;
+ s->rx_csum += rx->stats.rx_cso;
+ s->vlan_ex += rx->stats.vlan_ex;
+ s->vlan_ins += tx->vlan_ins;
+ s->gro_pkts += rx->stats.lro_pkts;
+ s->gro_merged += rx->stats.lro_merged;
+ }
+}
+
+static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
+
+ data += sizeof(struct port_stats) / sizeof(u64);
+ collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
+}
+
+/*
+ * Return a version number to identify the type of adapter. The scheme is:
+ * - bits 0..9: chip version
+ * - bits 10..15: chip revision
+ * - bits 16..23: register dump version
+ */
+static inline unsigned int mk_adap_vers(const struct adapter *ap)
+{
+ return 4 | (ap->params.rev << 10) | (1 << 16);
+}
+
+static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
+ unsigned int end)
+{
+ u32 *p = buf + start;
+
+ for ( ; start <= end; start += sizeof(u32))
+ *p++ = t4_read_reg(ap, start);
+}
+
+static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *buf)
+{
+ static const unsigned int reg_ranges[] = {
+ 0x1008, 0x1108,
+ 0x1180, 0x11b4,
+ 0x11fc, 0x123c,
+ 0x1300, 0x173c,
+ 0x1800, 0x18fc,
+ 0x3000, 0x30d8,
+ 0x30e0, 0x5924,
+ 0x5960, 0x59d4,
+ 0x5a00, 0x5af8,
+ 0x6000, 0x6098,
+ 0x6100, 0x6150,
+ 0x6200, 0x6208,
+ 0x6240, 0x6248,
+ 0x6280, 0x6338,
+ 0x6370, 0x638c,
+ 0x6400, 0x643c,
+ 0x6500, 0x6524,
+ 0x6a00, 0x6a38,
+ 0x6a60, 0x6a78,
+ 0x6b00, 0x6b84,
+ 0x6bf0, 0x6c84,
+ 0x6cf0, 0x6d84,
+ 0x6df0, 0x6e84,
+ 0x6ef0, 0x6f84,
+ 0x6ff0, 0x7084,
+ 0x70f0, 0x7184,
+ 0x71f0, 0x7284,
+ 0x72f0, 0x7384,
+ 0x73f0, 0x7450,
+ 0x7500, 0x7530,
+ 0x7600, 0x761c,
+ 0x7680, 0x76cc,
+ 0x7700, 0x7798,
+ 0x77c0, 0x77fc,
+ 0x7900, 0x79fc,
+ 0x7b00, 0x7c38,
+ 0x7d00, 0x7efc,
+ 0x8dc0, 0x8e1c,
+ 0x8e30, 0x8e78,
+ 0x8ea0, 0x8f6c,
+ 0x8fc0, 0x9074,
+ 0x90fc, 0x90fc,
+ 0x9400, 0x9458,
+ 0x9600, 0x96bc,
+ 0x9800, 0x9808,
+ 0x9820, 0x983c,
+ 0x9850, 0x9864,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0x9fec,
+ 0xd004, 0xd03c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0xea7c,
+ 0xf000, 0x11190,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x19124,
+ 0x19150, 0x191b0,
+ 0x191d0, 0x191e8,
+ 0x19238, 0x1924c,
+ 0x193f8, 0x19474,
+ 0x19490, 0x194f8,
+ 0x19800, 0x19f30,
+ 0x1a000, 0x1a06c,
+ 0x1a0b0, 0x1a120,
+ 0x1a128, 0x1a138,
+ 0x1a190, 0x1a1c4,
+ 0x1a1fc, 0x1a1fc,
+ 0x1e040, 0x1e04c,
+ 0x1e284, 0x1e28c,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e0,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e440, 0x1e44c,
+ 0x1e684, 0x1e68c,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e0,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e840, 0x1e84c,
+ 0x1ea84, 0x1ea8c,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae0,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec40, 0x1ec4c,
+ 0x1ee84, 0x1ee8c,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee0,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f040, 0x1f04c,
+ 0x1f284, 0x1f28c,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e0,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f440, 0x1f44c,
+ 0x1f684, 0x1f68c,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e0,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f840, 0x1f84c,
+ 0x1fa84, 0x1fa8c,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae0,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc40, 0x1fc4c,
+ 0x1fe84, 0x1fe8c,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee0,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x20000, 0x2002c,
+ 0x20100, 0x2013c,
+ 0x20190, 0x201c8,
+ 0x20200, 0x20318,
+ 0x20400, 0x20528,
+ 0x20540, 0x20614,
+ 0x21000, 0x21040,
+ 0x2104c, 0x21060,
+ 0x210c0, 0x210ec,
+ 0x21200, 0x21268,
+ 0x21270, 0x21284,
+ 0x212fc, 0x21388,
+ 0x21400, 0x21404,
+ 0x21500, 0x21518,
+ 0x2152c, 0x2153c,
+ 0x21550, 0x21554,
+ 0x21600, 0x21600,
+ 0x21608, 0x21628,
+ 0x21630, 0x2163c,
+ 0x21700, 0x2171c,
+ 0x21780, 0x2178c,
+ 0x21800, 0x21c38,
+ 0x21c80, 0x21d7c,
+ 0x21e00, 0x21e04,
+ 0x22000, 0x2202c,
+ 0x22100, 0x2213c,
+ 0x22190, 0x221c8,
+ 0x22200, 0x22318,
+ 0x22400, 0x22528,
+ 0x22540, 0x22614,
+ 0x23000, 0x23040,
+ 0x2304c, 0x23060,
+ 0x230c0, 0x230ec,
+ 0x23200, 0x23268,
+ 0x23270, 0x23284,
+ 0x232fc, 0x23388,
+ 0x23400, 0x23404,
+ 0x23500, 0x23518,
+ 0x2352c, 0x2353c,
+ 0x23550, 0x23554,
+ 0x23600, 0x23600,
+ 0x23608, 0x23628,
+ 0x23630, 0x2363c,
+ 0x23700, 0x2371c,
+ 0x23780, 0x2378c,
+ 0x23800, 0x23c38,
+ 0x23c80, 0x23d7c,
+ 0x23e00, 0x23e04,
+ 0x24000, 0x2402c,
+ 0x24100, 0x2413c,
+ 0x24190, 0x241c8,
+ 0x24200, 0x24318,
+ 0x24400, 0x24528,
+ 0x24540, 0x24614,
+ 0x25000, 0x25040,
+ 0x2504c, 0x25060,
+ 0x250c0, 0x250ec,
+ 0x25200, 0x25268,
+ 0x25270, 0x25284,
+ 0x252fc, 0x25388,
+ 0x25400, 0x25404,
+ 0x25500, 0x25518,
+ 0x2552c, 0x2553c,
+ 0x25550, 0x25554,
+ 0x25600, 0x25600,
+ 0x25608, 0x25628,
+ 0x25630, 0x2563c,
+ 0x25700, 0x2571c,
+ 0x25780, 0x2578c,
+ 0x25800, 0x25c38,
+ 0x25c80, 0x25d7c,
+ 0x25e00, 0x25e04,
+ 0x26000, 0x2602c,
+ 0x26100, 0x2613c,
+ 0x26190, 0x261c8,
+ 0x26200, 0x26318,
+ 0x26400, 0x26528,
+ 0x26540, 0x26614,
+ 0x27000, 0x27040,
+ 0x2704c, 0x27060,
+ 0x270c0, 0x270ec,
+ 0x27200, 0x27268,
+ 0x27270, 0x27284,
+ 0x272fc, 0x27388,
+ 0x27400, 0x27404,
+ 0x27500, 0x27518,
+ 0x2752c, 0x2753c,
+ 0x27550, 0x27554,
+ 0x27600, 0x27600,
+ 0x27608, 0x27628,
+ 0x27630, 0x2763c,
+ 0x27700, 0x2771c,
+ 0x27780, 0x2778c,
+ 0x27800, 0x27c38,
+ 0x27c80, 0x27d7c,
+ 0x27e00, 0x27e04
+ };
+
+ int i;
+ struct adapter *ap = netdev2adap(dev);
+
+ regs->version = mk_adap_vers(ap);
+
+ memset(buf, 0, T4_REGMAP_SIZE);
+ for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
+ reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
+}
+
+static int restart_autoneg(struct net_device *dev)
+{
+ struct port_info *p = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+ if (p->link_cfg.autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+ t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
+ return 0;
+}
+
+static int identify_port(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ unsigned int val;
+ struct adapter *adap = netdev2adap(dev);
+
+ if (state == ETHTOOL_ID_ACTIVE)
+ val = 0xffff;
+ else if (state == ETHTOOL_ID_INACTIVE)
+ val = 0;
+ else
+ return -EINVAL;
+
+ return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
+}
+
+static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
+{
+ unsigned int v = 0;
+
+ if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
+ type == FW_PORT_TYPE_BT_XAUI) {
+ v |= SUPPORTED_TP;
+ if (caps & FW_PORT_CAP_SPEED_100M)
+ v |= SUPPORTED_100baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseT_Full;
+ } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
+ v |= SUPPORTED_Backplane;
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseKX_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseKX4_Full;
+ } else if (type == FW_PORT_TYPE_KR)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
+ else if (type == FW_PORT_TYPE_BP_AP)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+ SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
+ else if (type == FW_PORT_TYPE_BP4_AP)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+ SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
+ SUPPORTED_10000baseKX4_Full;
+ else if (type == FW_PORT_TYPE_FIBER_XFI ||
+ type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
+ v |= SUPPORTED_FIBRE;
+
+ if (caps & FW_PORT_CAP_ANEG)
+ v |= SUPPORTED_Autoneg;
+ return v;
+}
+
+static unsigned int to_fw_linkcaps(unsigned int caps)
+{
+ unsigned int v = 0;
+
+ if (caps & ADVERTISED_100baseT_Full)
+ v |= FW_PORT_CAP_SPEED_100M;
+ if (caps & ADVERTISED_1000baseT_Full)
+ v |= FW_PORT_CAP_SPEED_1G;
+ if (caps & ADVERTISED_10000baseT_Full)
+ v |= FW_PORT_CAP_SPEED_10G;
+ return v;
+}
+
+static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ const struct port_info *p = netdev_priv(dev);
+
+ if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
+ p->port_type == FW_PORT_TYPE_BT_XFI ||
+ p->port_type == FW_PORT_TYPE_BT_XAUI)
+ cmd->port = PORT_TP;
+ else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
+ p->port_type == FW_PORT_TYPE_FIBER_XAUI)
+ cmd->port = PORT_FIBRE;
+ else if (p->port_type == FW_PORT_TYPE_SFP) {
+ if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+ p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+ cmd->port = PORT_DA;
+ else
+ cmd->port = PORT_FIBRE;
+ } else
+ cmd->port = PORT_OTHER;
+
+ if (p->mdio_addr >= 0) {
+ cmd->phy_address = p->mdio_addr;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
+ MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
+ } else {
+ cmd->phy_address = 0; /* not really, but no better option */
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->mdio_support = 0;
+ }
+
+ cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
+ cmd->advertising = from_fw_linkcaps(p->port_type,
+ p->link_cfg.advertising);
+ ethtool_cmd_speed_set(cmd,
+ netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
+ cmd->duplex = DUPLEX_FULL;
+ cmd->autoneg = p->link_cfg.autoneg;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+ return 0;
+}
+
+static unsigned int speed_to_caps(int speed)
+{
+ if (speed == SPEED_100)
+ return FW_PORT_CAP_SPEED_100M;
+ if (speed == SPEED_1000)
+ return FW_PORT_CAP_SPEED_1G;
+ if (speed == SPEED_10000)
+ return FW_PORT_CAP_SPEED_10G;
+ return 0;
+}
+
+static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ unsigned int cap;
+ struct port_info *p = netdev_priv(dev);
+ struct link_config *lc = &p->link_cfg;
+ u32 speed = ethtool_cmd_speed(cmd);
+
+ if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
+ return -EINVAL;
+
+ if (!(lc->supported & FW_PORT_CAP_ANEG)) {
+ /*
+ * PHY offers a single speed. See if that's what's
+ * being requested.
+ */
+ if (cmd->autoneg == AUTONEG_DISABLE &&
+ (lc->supported & speed_to_caps(speed)))
+ return 0;
+ return -EINVAL;
+ }
+
+ if (cmd->autoneg == AUTONEG_DISABLE) {
+ cap = speed_to_caps(speed);
+
+ if (!(lc->supported & cap) || (speed == SPEED_1000) ||
+ (speed == SPEED_10000))
+ return -EINVAL;
+ lc->requested_speed = cap;
+ lc->advertising = 0;
+ } else {
+ cap = to_fw_linkcaps(cmd->advertising);
+ if (!(lc->supported & cap))
+ return -EINVAL;
+ lc->requested_speed = 0;
+ lc->advertising = cap | FW_PORT_CAP_ANEG;
+ }
+ lc->autoneg = cmd->autoneg;
+
+ if (netif_running(dev))
+ return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+ lc);
+ return 0;
+}
+
+static void get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct port_info *p = netdev_priv(dev);
+
+ epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
+ epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
+ epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
+}
+
+static int set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct port_info *p = netdev_priv(dev);
+ struct link_config *lc = &p->link_cfg;
+
+ if (epause->autoneg == AUTONEG_DISABLE)
+ lc->requested_fc = 0;
+ else if (lc->supported & FW_PORT_CAP_ANEG)
+ lc->requested_fc = PAUSE_AUTONEG;
+ else
+ return -EINVAL;
+
+ if (epause->rx_pause)
+ lc->requested_fc |= PAUSE_RX;
+ if (epause->tx_pause)
+ lc->requested_fc |= PAUSE_TX;
+ if (netif_running(dev))
+ return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+ lc);
+ return 0;
+}
+
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ const struct sge *s = &pi->adapter->sge;
+
+ e->rx_max_pending = MAX_RX_BUFFERS;
+ e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
+ e->rx_jumbo_max_pending = 0;
+ e->tx_max_pending = MAX_TXQ_ENTRIES;
+
+ e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
+ e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
+ e->rx_jumbo_pending = 0;
+ e->tx_pending = s->ethtxq[pi->first_qset].q.size;
+}
+
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+ int i;
+ const struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+
+ if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
+ e->tx_pending > MAX_TXQ_ENTRIES ||
+ e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
+ e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
+ e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
+ return -EINVAL;
+
+ if (adapter->flags & FULL_INIT_DONE)
+ return -EBUSY;
+
+ for (i = 0; i < pi->nqsets; ++i) {
+ s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
+ s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
+ s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
+ }
+ return 0;
+}
+
+static int closest_timer(const struct sge *s, int time)
+{
+ int i, delta, match = 0, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
+ delta = time - s->timer_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ match = i;
+ }
+ }
+ return match;
+}
+
+static int closest_thres(const struct sge *s, int thres)
+{
+ int i, delta, match = 0, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
+ delta = thres - s->counter_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ match = i;
+ }
+ }
+ return match;
+}
+
+/*
+ * Return a queue's interrupt hold-off time in us. 0 means no timer.
+ */
+static unsigned int qtimer_val(const struct adapter *adap,
+ const struct sge_rspq *q)
+{
+ unsigned int idx = q->intr_params >> 1;
+
+ return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
+}
+
+/**
+ * set_rxq_intr_params - set a queue's interrupt holdoff parameters
+ * @adap: the adapter
+ * @q: the Rx queue
+ * @us: the hold-off time in us, or 0 to disable timer
+ * @cnt: the hold-off packet count, or 0 to disable counter
+ *
+ * Sets an Rx queue's interrupt hold-off time and packet count. At least
+ * one of the two needs to be enabled for the queue to generate interrupts.
+ */
+static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
+ unsigned int us, unsigned int cnt)
+{
+ if ((us | cnt) == 0)
+ cnt = 1;
+
+ if (cnt) {
+ int err;
+ u32 v, new_idx;
+
+ new_idx = closest_thres(&adap->sge, cnt);
+ if (q->desc && q->pktcnt_idx != new_idx) {
+ /* the queue has already been created, update it */
+ v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
+ FW_PARAMS_PARAM_YZ(q->cntxt_id);
+ err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
+ &new_idx);
+ if (err)
+ return err;
+ }
+ q->pktcnt_idx = new_idx;
+ }
+
+ us = us == 0 ? 6 : closest_timer(&adap->sge, us);
+ q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
+ return 0;
+}
+
+static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+
+ return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
+ c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
+}
+
+static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ const struct adapter *adap = pi->adapter;
+ const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
+
+ c->rx_coalesce_usecs = qtimer_val(adap, rq);
+ c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
+ adap->sge.counter_val[rq->pktcnt_idx] : 0;
+ return 0;
+}
+
+/**
+ * eeprom_ptov - translate a physical EEPROM address to virtual
+ * @phys_addr: the physical EEPROM address
+ * @fn: the PCI function number
+ * @sz: size of function-specific area
+ *
+ * Translate a physical EEPROM address to virtual. The first 1K is
+ * accessed through virtual addresses starting at 31K, the rest is
+ * accessed through virtual addresses starting at 0.
+ *
+ * The mapping is as follows:
+ * [0..1K) -> [31K..32K)
+ * [1K..1K+A) -> [31K-A..31K)
+ * [1K+A..ES) -> [0..ES-A-1K)
+ *
+ * where A = @fn * @sz, and ES = EEPROM size.
+ */
+static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
+{
+ fn *= sz;
+ if (phys_addr < 1024)
+ return phys_addr + (31 << 10);
+ if (phys_addr < 1024 + fn)
+ return 31744 - fn + phys_addr - 1024;
+ if (phys_addr < EEPROMSIZE)
+ return phys_addr - 1024 - fn;
+ return -EINVAL;
+}
+
+/*
+ * The next two routines implement eeprom read/write from physical addresses.
+ */
+static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
+{
+ int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
+
+ if (vaddr >= 0)
+ vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
+ return vaddr < 0 ? vaddr : 0;
+}
+
+static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
+{
+ int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
+
+ if (vaddr >= 0)
+ vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
+ return vaddr < 0 ? vaddr : 0;
+}
+
+#define EEPROM_MAGIC 0x38E2F10C
+
+static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
+ u8 *data)
+{
+ int i, err = 0;
+ struct adapter *adapter = netdev2adap(dev);
+
+ u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ e->magic = EEPROM_MAGIC;
+ for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
+ err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
+
+ if (!err)
+ memcpy(data, buf + e->offset, e->len);
+ kfree(buf);
+ return err;
+}
+
+static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ u8 *buf;
+ int err = 0;
+ u32 aligned_offset, aligned_len, *p;
+ struct adapter *adapter = netdev2adap(dev);
+
+ if (eeprom->magic != EEPROM_MAGIC)
+ return -EINVAL;
+
+ aligned_offset = eeprom->offset & ~3;
+ aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
+
+ if (adapter->fn > 0) {
+ u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
+
+ if (aligned_offset < start ||
+ aligned_offset + aligned_len > start + EEPROMPFSIZE)
+ return -EPERM;
+ }
+
+ if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
+ /*
+ * RMW possibly needed for first or last words.
+ */
+ buf = kmalloc(aligned_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
+ if (!err && aligned_len > 4)
+ err = eeprom_rd_phys(adapter,
+ aligned_offset + aligned_len - 4,
+ (u32 *)&buf[aligned_len - 4]);
+ if (err)
+ goto out;
+ memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
+ } else
+ buf = data;
+
+ err = t4_seeprom_wp(adapter, false);
+ if (err)
+ goto out;
+
+ for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
+ err = eeprom_wr_phys(adapter, aligned_offset, *p);
+ aligned_offset += 4;
+ }
+
+ if (!err)
+ err = t4_seeprom_wp(adapter, true);
+out:
+ if (buf != data)
+ kfree(buf);
+ return err;
+}
+
+static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
+{
+ int ret;
+ const struct firmware *fw;
+ struct adapter *adap = netdev2adap(netdev);
+
+ ef->data[sizeof(ef->data) - 1] = '\0';
+ ret = request_firmware(&fw, ef->data, adap->pdev_dev);
+ if (ret < 0)
+ return ret;
+
+ ret = t4_load_fw(adap, fw->data, fw->size);
+ release_firmware(fw);
+ if (!ret)
+ dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
+ return ret;
+}
+
+#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
+#define BCAST_CRC 0xa0ccc1a6
+
+static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ wol->supported = WAKE_BCAST | WAKE_MAGIC;
+ wol->wolopts = netdev2adap(dev)->wol;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ int err = 0;
+ struct port_info *pi = netdev_priv(dev);
+
+ if (wol->wolopts & ~WOL_SUPPORTED)
+ return -EINVAL;
+ t4_wol_magic_enable(pi->adapter, pi->tx_chan,
+ (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
+ if (wol->wolopts & WAKE_BCAST) {
+ err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
+ ~0ULL, 0, false);
+ if (!err)
+ err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
+ ~6ULL, ~0ULL, BCAST_CRC, true);
+ } else
+ t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
+ return err;
+}
+
+static int cxgb_set_features(struct net_device *dev, u32 features)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ u32 changed = dev->features ^ features;
+ int err;
+
+ if (!(changed & NETIF_F_HW_VLAN_RX))
+ return 0;
+
+ err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
+ -1, -1, -1,
+ !!(features & NETIF_F_HW_VLAN_RX), true);
+ if (unlikely(err))
+ dev->features = features ^ NETIF_F_HW_VLAN_RX;
+ return err;
+}
+
+static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ unsigned int n = min_t(unsigned int, p->size, pi->rss_size);
+
+ p->size = pi->rss_size;
+ while (n--)
+ p->ring_index[n] = pi->rss[n];
+ return 0;
+}
+
+static int set_rss_table(struct net_device *dev,
+ const struct ethtool_rxfh_indir *p)
+{
+ unsigned int i;
+ struct port_info *pi = netdev_priv(dev);
+
+ if (p->size != pi->rss_size)
+ return -EINVAL;
+ for (i = 0; i < p->size; i++)
+ if (p->ring_index[i] >= pi->nqsets)
+ return -EINVAL;
+ for (i = 0; i < p->size; i++)
+ pi->rss[i] = p->ring_index[i];
+ if (pi->adapter->flags & FULL_INIT_DONE)
+ return write_rss(pi, pi->rss);
+ return 0;
+}
+
+static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rules)
+{
+ const struct port_info *pi = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXFH: {
+ unsigned int v = pi->rss_mode;
+
+ info->data = 0;
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V4_FLOW:
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V6_FLOW:
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV6_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ }
+ return 0;
+ }
+ case ETHTOOL_GRXRINGS:
+ info->data = pi->nqsets;
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+static struct ethtool_ops cxgb_ethtool_ops = {
+ .get_settings = get_settings,
+ .set_settings = set_settings,
+ .get_drvinfo = get_drvinfo,
+ .get_msglevel = get_msglevel,
+ .set_msglevel = set_msglevel,
+ .get_ringparam = get_sge_param,
+ .set_ringparam = set_sge_param,
+ .get_coalesce = get_coalesce,
+ .set_coalesce = set_coalesce,
+ .get_eeprom_len = get_eeprom_len,
+ .get_eeprom = get_eeprom,
+ .set_eeprom = set_eeprom,
+ .get_pauseparam = get_pauseparam,
+ .set_pauseparam = set_pauseparam,
+ .get_link = ethtool_op_get_link,
+ .get_strings = get_strings,
+ .set_phys_id = identify_port,
+ .nway_reset = restart_autoneg,
+ .get_sset_count = get_sset_count,
+ .get_ethtool_stats = get_stats,
+ .get_regs_len = get_regs_len,
+ .get_regs = get_regs,
+ .get_wol = get_wol,
+ .set_wol = set_wol,
+ .get_rxnfc = get_rxnfc,
+ .get_rxfh_indir = get_rss_table,
+ .set_rxfh_indir = set_rss_table,
+ .flash_device = set_flash,
+};
+
+/*
+ * debugfs support
+ */
+
+static int mem_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ loff_t pos = *ppos;
+ loff_t avail = file->f_path.dentry->d_inode->i_size;
+ unsigned int mem = (uintptr_t)file->private_data & 3;
+ struct adapter *adap = file->private_data - mem;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= avail)
+ return 0;
+ if (count > avail - pos)
+ count = avail - pos;
+
+ while (count) {
+ size_t len;
+ int ret, ofst;
+ __be32 data[16];
+
+ if (mem == MEM_MC)
+ ret = t4_mc_read(adap, pos, data, NULL);
+ else
+ ret = t4_edc_read(adap, mem, pos, data, NULL);
+ if (ret)
+ return ret;
+
+ ofst = pos % sizeof(data);
+ len = min(count, sizeof(data) - ofst);
+ if (copy_to_user(buf, (u8 *)data + ofst, len))
+ return -EFAULT;
+
+ buf += len;
+ pos += len;
+ count -= len;
+ }
+ count = pos - *ppos;
+ *ppos = pos;
+ return count;
+}
+
+static const struct file_operations mem_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = mem_open,
+ .read = mem_read,
+ .llseek = default_llseek,
+};
+
+static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
+ unsigned int idx, unsigned int size_mb)
+{
+ struct dentry *de;
+
+ de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
+ (void *)adap + idx, &mem_debugfs_fops);
+ if (de && de->d_inode)
+ de->d_inode->i_size = size_mb << 20;
+}
+
+static int __devinit setup_debugfs(struct adapter *adap)
+{
+ int i;
+
+ if (IS_ERR_OR_NULL(adap->debugfs_root))
+ return -1;
+
+ i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
+ if (i & EDRAM0_ENABLE)
+ add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
+ if (i & EDRAM1_ENABLE)
+ add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
+ if (i & EXT_MEM_ENABLE)
+ add_debugfs_mem(adap, "mc", MEM_MC,
+ EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
+ if (adap->l2t)
+ debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
+ &t4_l2t_fops);
+ return 0;
+}
+
+/*
+ * upper-layer driver support
+ */
+
+/*
+ * Allocate an active-open TID and set it to the supplied value.
+ */
+int cxgb4_alloc_atid(struct tid_info *t, void *data)
+{
+ int atid = -1;
+
+ spin_lock_bh(&t->atid_lock);
+ if (t->afree) {
+ union aopen_entry *p = t->afree;
+
+ atid = p - t->atid_tab;
+ t->afree = p->next;
+ p->data = data;
+ t->atids_in_use++;
+ }
+ spin_unlock_bh(&t->atid_lock);
+ return atid;
+}
+EXPORT_SYMBOL(cxgb4_alloc_atid);
+
+/*
+ * Release an active-open TID.
+ */
+void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
+{
+ union aopen_entry *p = &t->atid_tab[atid];
+
+ spin_lock_bh(&t->atid_lock);
+ p->next = t->afree;
+ t->afree = p;
+ t->atids_in_use--;
+ spin_unlock_bh(&t->atid_lock);
+}
+EXPORT_SYMBOL(cxgb4_free_atid);
+
+/*
+ * Allocate a server TID and set it to the supplied value.
+ */
+int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
+{
+ int stid;
+
+ spin_lock_bh(&t->stid_lock);
+ if (family == PF_INET) {
+ stid = find_first_zero_bit(t->stid_bmap, t->nstids);
+ if (stid < t->nstids)
+ __set_bit(stid, t->stid_bmap);
+ else
+ stid = -1;
+ } else {
+ stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
+ if (stid < 0)
+ stid = -1;
+ }
+ if (stid >= 0) {
+ t->stid_tab[stid].data = data;
+ stid += t->stid_base;
+ t->stids_in_use++;
+ }
+ spin_unlock_bh(&t->stid_lock);
+ return stid;
+}
+EXPORT_SYMBOL(cxgb4_alloc_stid);
+
+/*
+ * Release a server TID.
+ */
+void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
+{
+ stid -= t->stid_base;
+ spin_lock_bh(&t->stid_lock);
+ if (family == PF_INET)
+ __clear_bit(stid, t->stid_bmap);
+ else
+ bitmap_release_region(t->stid_bmap, stid, 2);
+ t->stid_tab[stid].data = NULL;
+ t->stids_in_use--;
+ spin_unlock_bh(&t->stid_lock);
+}
+EXPORT_SYMBOL(cxgb4_free_stid);
+
+/*
+ * Populate a TID_RELEASE WR. Caller must properly size the skb.
+ */
+static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
+ unsigned int tid)
+{
+ struct cpl_tid_release *req;
+
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
+ req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
+ INIT_TP_WR(req, tid);
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
+}
+
+/*
+ * Queue a TID release request and if necessary schedule a work queue to
+ * process it.
+ */
+static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
+ unsigned int tid)
+{
+ void **p = &t->tid_tab[tid];
+ struct adapter *adap = container_of(t, struct adapter, tids);
+
+ spin_lock_bh(&adap->tid_release_lock);
+ *p = adap->tid_release_head;
+ /* Low 2 bits encode the Tx channel number */
+ adap->tid_release_head = (void **)((uintptr_t)p | chan);
+ if (!adap->tid_release_task_busy) {
+ adap->tid_release_task_busy = true;
+ schedule_work(&adap->tid_release_task);
+ }
+ spin_unlock_bh(&adap->tid_release_lock);
+}
+
+/*
+ * Process the list of pending TID release requests.
+ */
+static void process_tid_release_list(struct work_struct *work)
+{
+ struct sk_buff *skb;
+ struct adapter *adap;
+
+ adap = container_of(work, struct adapter, tid_release_task);
+
+ spin_lock_bh(&adap->tid_release_lock);
+ while (adap->tid_release_head) {
+ void **p = adap->tid_release_head;
+ unsigned int chan = (uintptr_t)p & 3;
+ p = (void *)p - chan;
+
+ adap->tid_release_head = *p;
+ *p = NULL;
+ spin_unlock_bh(&adap->tid_release_lock);
+
+ while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
+ GFP_KERNEL)))
+ schedule_timeout_uninterruptible(1);
+
+ mk_tid_release(skb, chan, p - adap->tids.tid_tab);
+ t4_ofld_send(adap, skb);
+ spin_lock_bh(&adap->tid_release_lock);
+ }
+ adap->tid_release_task_busy = false;
+ spin_unlock_bh(&adap->tid_release_lock);
+}
+
+/*
+ * Release a TID and inform HW. If we are unable to allocate the release
+ * message we defer to a work queue.
+ */
+void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
+{
+ void *old;
+ struct sk_buff *skb;
+ struct adapter *adap = container_of(t, struct adapter, tids);
+
+ old = t->tid_tab[tid];
+ skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
+ if (likely(skb)) {
+ t->tid_tab[tid] = NULL;
+ mk_tid_release(skb, chan, tid);
+ t4_ofld_send(adap, skb);
+ } else
+ cxgb4_queue_tid_release(t, chan, tid);
+ if (old)
+ atomic_dec(&t->tids_in_use);
+}
+EXPORT_SYMBOL(cxgb4_remove_tid);
+
+/*
+ * Allocate and initialize the TID tables. Returns 0 on success.
+ */
+static int tid_init(struct tid_info *t)
+{
+ size_t size;
+ unsigned int natids = t->natids;
+
+ size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
+ t->nstids * sizeof(*t->stid_tab) +
+ BITS_TO_LONGS(t->nstids) * sizeof(long);
+ t->tid_tab = t4_alloc_mem(size);
+ if (!t->tid_tab)
+ return -ENOMEM;
+
+ t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
+ t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
+ t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
+ spin_lock_init(&t->stid_lock);
+ spin_lock_init(&t->atid_lock);
+
+ t->stids_in_use = 0;
+ t->afree = NULL;
+ t->atids_in_use = 0;
+ atomic_set(&t->tids_in_use, 0);
+
+ /* Setup the free list for atid_tab and clear the stid bitmap. */
+ if (natids) {
+ while (--natids)
+ t->atid_tab[natids - 1].next = &t->atid_tab[natids];
+ t->afree = t->atid_tab;
+ }
+ bitmap_zero(t->stid_bmap, t->nstids);
+ return 0;
+}
+
+/**
+ * cxgb4_create_server - create an IP server
+ * @dev: the device
+ * @stid: the server TID
+ * @sip: local IP address to bind server to
+ * @sport: the server's TCP port
+ * @queue: queue to direct messages from this server to
+ *
+ * Create an IP server for the given port and address.
+ * Returns <0 on error and one of the %NET_XMIT_* values on success.
+ */
+int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
+ __be32 sip, __be16 sport, unsigned int queue)
+{
+ unsigned int chan;
+ struct sk_buff *skb;
+ struct adapter *adap;
+ struct cpl_pass_open_req *req;
+
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ adap = netdev2adap(dev);
+ req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
+ req->local_port = sport;
+ req->peer_port = htons(0);
+ req->local_ip = sip;
+ req->peer_ip = htonl(0);
+ chan = rxq_to_chan(&adap->sge, queue);
+ req->opt0 = cpu_to_be64(TX_CHAN(chan));
+ req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
+ SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
+ return t4_mgmt_tx(adap, skb);
+}
+EXPORT_SYMBOL(cxgb4_create_server);
+
+/**
+ * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
+ * @mtus: the HW MTU table
+ * @mtu: the target MTU
+ * @idx: index of selected entry in the MTU table
+ *
+ * Returns the index and the value in the HW MTU table that is closest to
+ * but does not exceed @mtu, unless @mtu is smaller than any value in the
+ * table, in which case that smallest available value is selected.
+ */
+unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
+ unsigned int *idx)
+{
+ unsigned int i = 0;
+
+ while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
+ ++i;
+ if (idx)
+ *idx = i;
+ return mtus[i];
+}
+EXPORT_SYMBOL(cxgb4_best_mtu);
+
+/**
+ * cxgb4_port_chan - get the HW channel of a port
+ * @dev: the net device for the port
+ *
+ * Return the HW Tx channel of the given port.
+ */
+unsigned int cxgb4_port_chan(const struct net_device *dev)
+{
+ return netdev2pinfo(dev)->tx_chan;
+}
+EXPORT_SYMBOL(cxgb4_port_chan);
+
+/**
+ * cxgb4_port_viid - get the VI id of a port
+ * @dev: the net device for the port
+ *
+ * Return the VI id of the given port.
+ */
+unsigned int cxgb4_port_viid(const struct net_device *dev)
+{
+ return netdev2pinfo(dev)->viid;
+}
+EXPORT_SYMBOL(cxgb4_port_viid);
+
+/**
+ * cxgb4_port_idx - get the index of a port
+ * @dev: the net device for the port
+ *
+ * Return the index of the given port.
+ */
+unsigned int cxgb4_port_idx(const struct net_device *dev)
+{
+ return netdev2pinfo(dev)->port_id;
+}
+EXPORT_SYMBOL(cxgb4_port_idx);
+
+void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
+ struct tp_tcp_stats *v6)
+{
+ struct adapter *adap = pci_get_drvdata(pdev);
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_tcp_stats(adap, v4, v6);
+ spin_unlock(&adap->stats_lock);
+}
+EXPORT_SYMBOL(cxgb4_get_tcp_stats);
+
+void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
+ const unsigned int *pgsz_order)
+{
+ struct adapter *adap = netdev2adap(dev);
+
+ t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
+ t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
+ HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
+ HPZ3(pgsz_order[3]));
+}
+EXPORT_SYMBOL(cxgb4_iscsi_init);
+
+static struct pci_driver cxgb4_driver;
+
+static void check_neigh_update(struct neighbour *neigh)
+{
+ const struct device *parent;
+ const struct net_device *netdev = neigh->dev;
+
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ netdev = vlan_dev_real_dev(netdev);
+ parent = netdev->dev.parent;
+ if (parent && parent->driver == &cxgb4_driver.driver)
+ t4_l2t_update(dev_get_drvdata(parent), neigh);
+}
+
+static int netevent_cb(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ check_neigh_update(data);
+ break;
+ case NETEVENT_REDIRECT:
+ default:
+ break;
+ }
+ return 0;
+}
+
+static bool netevent_registered;
+static struct notifier_block cxgb4_netevent_nb = {
+ .notifier_call = netevent_cb
+};
+
+static void uld_attach(struct adapter *adap, unsigned int uld)
+{
+ void *handle;
+ struct cxgb4_lld_info lli;
+
+ lli.pdev = adap->pdev;
+ lli.l2t = adap->l2t;
+ lli.tids = &adap->tids;
+ lli.ports = adap->port;
+ lli.vr = &adap->vres;
+ lli.mtus = adap->params.mtus;
+ if (uld == CXGB4_ULD_RDMA) {
+ lli.rxq_ids = adap->sge.rdma_rxq;
+ lli.nrxq = adap->sge.rdmaqs;
+ } else if (uld == CXGB4_ULD_ISCSI) {
+ lli.rxq_ids = adap->sge.ofld_rxq;
+ lli.nrxq = adap->sge.ofldqsets;
+ }
+ lli.ntxq = adap->sge.ofldqsets;
+ lli.nchan = adap->params.nports;
+ lli.nports = adap->params.nports;
+ lli.wr_cred = adap->params.ofldq_wr_cred;
+ lli.adapter_type = adap->params.rev;
+ lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
+ lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
+ t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
+ (adap->fn * 4));
+ lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
+ t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
+ (adap->fn * 4));
+ lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
+ lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
+ lli.fw_vers = adap->params.fw_vers;
+
+ handle = ulds[uld].add(&lli);
+ if (IS_ERR(handle)) {
+ dev_warn(adap->pdev_dev,
+ "could not attach to the %s driver, error %ld\n",
+ uld_str[uld], PTR_ERR(handle));
+ return;
+ }
+
+ adap->uld_handle[uld] = handle;
+
+ if (!netevent_registered) {
+ register_netevent_notifier(&cxgb4_netevent_nb);
+ netevent_registered = true;
+ }
+
+ if (adap->flags & FULL_INIT_DONE)
+ ulds[uld].state_change(handle, CXGB4_STATE_UP);
+}
+
+static void attach_ulds(struct adapter *adap)
+{
+ unsigned int i;
+
+ mutex_lock(&uld_mutex);
+ list_add_tail(&adap->list_node, &adapter_list);
+ for (i = 0; i < CXGB4_ULD_MAX; i++)
+ if (ulds[i].add)
+ uld_attach(adap, i);
+ mutex_unlock(&uld_mutex);
+}
+
+static void detach_ulds(struct adapter *adap)
+{
+ unsigned int i;
+
+ mutex_lock(&uld_mutex);
+ list_del(&adap->list_node);
+ for (i = 0; i < CXGB4_ULD_MAX; i++)
+ if (adap->uld_handle[i]) {
+ ulds[i].state_change(adap->uld_handle[i],
+ CXGB4_STATE_DETACH);
+ adap->uld_handle[i] = NULL;
+ }
+ if (netevent_registered && list_empty(&adapter_list)) {
+ unregister_netevent_notifier(&cxgb4_netevent_nb);
+ netevent_registered = false;
+ }
+ mutex_unlock(&uld_mutex);
+}
+
+static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
+{
+ unsigned int i;
+
+ mutex_lock(&uld_mutex);
+ for (i = 0; i < CXGB4_ULD_MAX; i++)
+ if (adap->uld_handle[i])
+ ulds[i].state_change(adap->uld_handle[i], new_state);
+ mutex_unlock(&uld_mutex);
+}
+
+/**
+ * cxgb4_register_uld - register an upper-layer driver
+ * @type: the ULD type
+ * @p: the ULD methods
+ *
+ * Registers an upper-layer driver with this driver and notifies the ULD
+ * about any presently available devices that support its type. Returns
+ * %-EBUSY if a ULD of the same type is already registered.
+ */
+int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
+{
+ int ret = 0;
+ struct adapter *adap;
+
+ if (type >= CXGB4_ULD_MAX)
+ return -EINVAL;
+ mutex_lock(&uld_mutex);
+ if (ulds[type].add) {
+ ret = -EBUSY;
+ goto out;
+ }
+ ulds[type] = *p;
+ list_for_each_entry(adap, &adapter_list, list_node)
+ uld_attach(adap, type);
+out: mutex_unlock(&uld_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(cxgb4_register_uld);
+
+/**
+ * cxgb4_unregister_uld - unregister an upper-layer driver
+ * @type: the ULD type
+ *
+ * Unregisters an existing upper-layer driver.
+ */
+int cxgb4_unregister_uld(enum cxgb4_uld type)
+{
+ struct adapter *adap;
+
+ if (type >= CXGB4_ULD_MAX)
+ return -EINVAL;
+ mutex_lock(&uld_mutex);
+ list_for_each_entry(adap, &adapter_list, list_node)
+ adap->uld_handle[type] = NULL;
+ ulds[type].add = NULL;
+ mutex_unlock(&uld_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(cxgb4_unregister_uld);
+
+/**
+ * cxgb_up - enable the adapter
+ * @adap: adapter being enabled
+ *
+ * Called when the first port is enabled, this function performs the
+ * actions necessary to make an adapter operational, such as completing
+ * the initialization of HW modules, and enabling interrupts.
+ *
+ * Must be called with the rtnl lock held.
+ */
+static int cxgb_up(struct adapter *adap)
+{
+ int err;
+
+ err = setup_sge_queues(adap);
+ if (err)
+ goto out;
+ err = setup_rss(adap);
+ if (err)
+ goto freeq;
+
+ if (adap->flags & USING_MSIX) {
+ name_msix_vecs(adap);
+ err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
+ adap->msix_info[0].desc, adap);
+ if (err)
+ goto irq_err;
+
+ err = request_msix_queue_irqs(adap);
+ if (err) {
+ free_irq(adap->msix_info[0].vec, adap);
+ goto irq_err;
+ }
+ } else {
+ err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
+ (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
+ adap->port[0]->name, adap);
+ if (err)
+ goto irq_err;
+ }
+ enable_rx(adap);
+ t4_sge_start(adap);
+ t4_intr_enable(adap);
+ adap->flags |= FULL_INIT_DONE;
+ notify_ulds(adap, CXGB4_STATE_UP);
+ out:
+ return err;
+ irq_err:
+ dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
+ freeq:
+ t4_free_sge_resources(adap);
+ goto out;
+}
+
+static void cxgb_down(struct adapter *adapter)
+{
+ t4_intr_disable(adapter);
+ cancel_work_sync(&adapter->tid_release_task);
+ adapter->tid_release_task_busy = false;
+ adapter->tid_release_head = NULL;
+
+ if (adapter->flags & USING_MSIX) {
+ free_msix_queue_irqs(adapter);
+ free_irq(adapter->msix_info[0].vec, adapter);
+ } else
+ free_irq(adapter->pdev->irq, adapter);
+ quiesce_rx(adapter);
+ t4_sge_stop(adapter);
+ t4_free_sge_resources(adapter);
+ adapter->flags &= ~FULL_INIT_DONE;
+}
+
+/*
+ * net_device operations
+ */
+static int cxgb_open(struct net_device *dev)
+{
+ int err;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ netif_carrier_off(dev);
+
+ if (!(adapter->flags & FULL_INIT_DONE)) {
+ err = cxgb_up(adapter);
+ if (err < 0)
+ return err;
+ }
+
+ err = link_start(dev);
+ if (!err)
+ netif_tx_start_all_queues(dev);
+ return err;
+}
+
+static int cxgb_close(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
+ return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
+}
+
+static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *ns)
+{
+ struct port_stats stats;
+ struct port_info *p = netdev_priv(dev);
+ struct adapter *adapter = p->adapter;
+
+ spin_lock(&adapter->stats_lock);
+ t4_get_port_stats(adapter, p->tx_chan, &stats);
+ spin_unlock(&adapter->stats_lock);
+
+ ns->tx_bytes = stats.tx_octets;
+ ns->tx_packets = stats.tx_frames;
+ ns->rx_bytes = stats.rx_octets;
+ ns->rx_packets = stats.rx_frames;
+ ns->multicast = stats.rx_mcast_frames;
+
+ /* detailed rx_errors */
+ ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
+ stats.rx_runt;
+ ns->rx_over_errors = 0;
+ ns->rx_crc_errors = stats.rx_fcs_err;
+ ns->rx_frame_errors = stats.rx_symbol_err;
+ ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
+ stats.rx_ovflow2 + stats.rx_ovflow3 +
+ stats.rx_trunc0 + stats.rx_trunc1 +
+ stats.rx_trunc2 + stats.rx_trunc3;
+ ns->rx_missed_errors = 0;
+
+ /* detailed tx_errors */
+ ns->tx_aborted_errors = 0;
+ ns->tx_carrier_errors = 0;
+ ns->tx_fifo_errors = 0;
+ ns->tx_heartbeat_errors = 0;
+ ns->tx_window_errors = 0;
+
+ ns->tx_errors = stats.tx_error_frames;
+ ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
+ ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
+ return ns;
+}
+
+static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ unsigned int mbox;
+ int ret = 0, prtad, devad;
+ struct port_info *pi = netdev_priv(dev);
+ struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ if (pi->mdio_addr < 0)
+ return -EOPNOTSUPP;
+ data->phy_id = pi->mdio_addr;
+ break;
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ if (mdio_phy_id_is_c45(data->phy_id)) {
+ prtad = mdio_phy_id_prtad(data->phy_id);
+ devad = mdio_phy_id_devad(data->phy_id);
+ } else if (data->phy_id < 32) {
+ prtad = data->phy_id;
+ devad = 0;
+ data->reg_num &= 0x1f;
+ } else
+ return -EINVAL;
+
+ mbox = pi->adapter->fn;
+ if (cmd == SIOCGMIIREG)
+ ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
+ data->reg_num, &data->val_out);
+ else
+ ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
+ data->reg_num, data->val_in);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static void cxgb_set_rxmode(struct net_device *dev)
+{
+ /* unfortunately we can't return errors to the stack */
+ set_rxmode(dev, -1, false);
+}
+
+static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int ret;
+ struct port_info *pi = netdev_priv(dev);
+
+ if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
+ return -EINVAL;
+ ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
+ -1, -1, -1, true);
+ if (!ret)
+ dev->mtu = new_mtu;
+ return ret;
+}
+
+static int cxgb_set_mac_addr(struct net_device *dev, void *p)
+{
+ int ret;
+ struct sockaddr *addr = p;
+ struct port_info *pi = netdev_priv(dev);
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
+ pi->xact_addr_filt, addr->sa_data, true, true);
+ if (ret < 0)
+ return ret;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ pi->xact_addr_filt = ret;
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cxgb_netpoll(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+
+ if (adap->flags & USING_MSIX) {
+ int i;
+ struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
+
+ for (i = pi->nqsets; i; i--, rx++)
+ t4_sge_intr_msix(0, &rx->rspq);
+ } else
+ t4_intr_handler(adap)(0, adap);
+}
+#endif
+
+static const struct net_device_ops cxgb4_netdev_ops = {
+ .ndo_open = cxgb_open,
+ .ndo_stop = cxgb_close,
+ .ndo_start_xmit = t4_eth_xmit,
+ .ndo_get_stats64 = cxgb_get_stats,
+ .ndo_set_rx_mode = cxgb_set_rxmode,
+ .ndo_set_mac_address = cxgb_set_mac_addr,
+ .ndo_set_features = cxgb_set_features,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = cxgb_ioctl,
+ .ndo_change_mtu = cxgb_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cxgb_netpoll,
+#endif
+};
+
+void t4_fatal_err(struct adapter *adap)
+{
+ t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
+ t4_intr_disable(adap);
+ dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
+}
+
+static void setup_memwin(struct adapter *adap)
+{
+ u32 bar0;
+
+ bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
+ t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
+ (bar0 + MEMWIN0_BASE) | BIR(0) |
+ WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
+ t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
+ (bar0 + MEMWIN1_BASE) | BIR(0) |
+ WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
+ t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
+ (bar0 + MEMWIN2_BASE) | BIR(0) |
+ WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
+ if (adap->vres.ocq.size) {
+ unsigned int start, sz_kb;
+
+ start = pci_resource_start(adap->pdev, 2) +
+ OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
+ sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
+ t4_write_reg(adap,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
+ start | BIR(1) | WINDOW(ilog2(sz_kb)));
+ t4_write_reg(adap,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
+ adap->vres.ocq.start);
+ t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
+ }
+}
+
+static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
+{
+ u32 v;
+ int ret;
+
+ /* get device capabilities */
+ memset(c, 0, sizeof(*c));
+ c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST | FW_CMD_READ);
+ c->retval_len16 = htonl(FW_LEN16(*c));
+ ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
+ if (ret < 0)
+ return ret;
+
+ /* select capabilities we'll be using */
+ if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
+ if (!vf_acls)
+ c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
+ else
+ c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
+ } else if (vf_acls) {
+ dev_err(adap->pdev_dev, "virtualization ACLs not supported");
+ return ret;
+ }
+ c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = t4_config_glbl_rss(adap, adap->fn,
+ FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
+ FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
+ FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
+ if (ret < 0)
+ return ret;
+
+ ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
+ 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
+ if (ret < 0)
+ return ret;
+
+ t4_sge_init(adap);
+
+ /* tweak some settings */
+ t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
+ t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
+ t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
+ v = t4_read_reg(adap, TP_PIO_DATA);
+ t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
+
+ /* get basic stuff going */
+ return t4_early_init(adap, adap->fn);
+}
+
+/*
+ * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
+ */
+#define MAX_ATIDS 8192U
+
+/*
+ * Phase 0 of initialization: contact FW, obtain config, perform basic init.
+ */
+static int adap_init0(struct adapter *adap)
+{
+ int ret;
+ u32 v, port_vec;
+ enum dev_state state;
+ u32 params[7], val[7];
+ struct fw_caps_config_cmd c;
+
+ ret = t4_check_fw_version(adap);
+ if (ret == -EINVAL || ret > 0) {
+ if (upgrade_fw(adap) >= 0) /* recache FW version */
+ ret = t4_check_fw_version(adap);
+ }
+ if (ret < 0)
+ return ret;
+
+ /* contact FW, request master */
+ ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state);
+ if (ret < 0) {
+ dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
+ ret);
+ return ret;
+ }
+
+ /* reset device */
+ ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST);
+ if (ret < 0)
+ goto bye;
+
+ for (v = 0; v < SGE_NTIMERS - 1; v++)
+ adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
+ adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
+ adap->sge.counter_val[0] = 1;
+ for (v = 1; v < SGE_NCOUNTERS; v++)
+ adap->sge.counter_val[v] = min(intr_cnt[v - 1],
+ THRESHOLD_3_MASK);
+#define FW_PARAM_DEV(param) \
+ (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+
+ params[0] = FW_PARAM_DEV(CCLK);
+ ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val);
+ if (ret < 0)
+ goto bye;
+ adap->params.vpd.cclk = val[0];
+
+ ret = adap_init1(adap, &c);
+ if (ret < 0)
+ goto bye;
+
+#define FW_PARAM_PFVF(param) \
+ (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
+ FW_PARAMS_PARAM_Y(adap->fn))
+
+ params[0] = FW_PARAM_DEV(PORTVEC);
+ params[1] = FW_PARAM_PFVF(L2T_START);
+ params[2] = FW_PARAM_PFVF(L2T_END);
+ params[3] = FW_PARAM_PFVF(FILTER_START);
+ params[4] = FW_PARAM_PFVF(FILTER_END);
+ params[5] = FW_PARAM_PFVF(IQFLINT_START);
+ params[6] = FW_PARAM_PFVF(EQ_START);
+ ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val);
+ if (ret < 0)
+ goto bye;
+ port_vec = val[0];
+ adap->tids.ftid_base = val[3];
+ adap->tids.nftids = val[4] - val[3] + 1;
+ adap->sge.ingr_start = val[5];
+ adap->sge.egr_start = val[6];
+
+ if (c.ofldcaps) {
+ /* query offload-related parameters */
+ params[0] = FW_PARAM_DEV(NTID);
+ params[1] = FW_PARAM_PFVF(SERVER_START);
+ params[2] = FW_PARAM_PFVF(SERVER_END);
+ params[3] = FW_PARAM_PFVF(TDDP_START);
+ params[4] = FW_PARAM_PFVF(TDDP_END);
+ params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
+ ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
+ val);
+ if (ret < 0)
+ goto bye;
+ adap->tids.ntids = val[0];
+ adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
+ adap->tids.stid_base = val[1];
+ adap->tids.nstids = val[2] - val[1] + 1;
+ adap->vres.ddp.start = val[3];
+ adap->vres.ddp.size = val[4] - val[3] + 1;
+ adap->params.ofldq_wr_cred = val[5];
+ adap->params.offload = 1;
+ }
+ if (c.rdmacaps) {
+ params[0] = FW_PARAM_PFVF(STAG_START);
+ params[1] = FW_PARAM_PFVF(STAG_END);
+ params[2] = FW_PARAM_PFVF(RQ_START);
+ params[3] = FW_PARAM_PFVF(RQ_END);
+ params[4] = FW_PARAM_PFVF(PBL_START);
+ params[5] = FW_PARAM_PFVF(PBL_END);
+ ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
+ val);
+ if (ret < 0)
+ goto bye;
+ adap->vres.stag.start = val[0];
+ adap->vres.stag.size = val[1] - val[0] + 1;
+ adap->vres.rq.start = val[2];
+ adap->vres.rq.size = val[3] - val[2] + 1;
+ adap->vres.pbl.start = val[4];
+ adap->vres.pbl.size = val[5] - val[4] + 1;
+
+ params[0] = FW_PARAM_PFVF(SQRQ_START);
+ params[1] = FW_PARAM_PFVF(SQRQ_END);
+ params[2] = FW_PARAM_PFVF(CQ_START);
+ params[3] = FW_PARAM_PFVF(CQ_END);
+ params[4] = FW_PARAM_PFVF(OCQ_START);
+ params[5] = FW_PARAM_PFVF(OCQ_END);
+ ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
+ val);
+ if (ret < 0)
+ goto bye;
+ adap->vres.qp.start = val[0];
+ adap->vres.qp.size = val[1] - val[0] + 1;
+ adap->vres.cq.start = val[2];
+ adap->vres.cq.size = val[3] - val[2] + 1;
+ adap->vres.ocq.start = val[4];
+ adap->vres.ocq.size = val[5] - val[4] + 1;
+ }
+ if (c.iscsicaps) {
+ params[0] = FW_PARAM_PFVF(ISCSI_START);
+ params[1] = FW_PARAM_PFVF(ISCSI_END);
+ ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params,
+ val);
+ if (ret < 0)
+ goto bye;
+ adap->vres.iscsi.start = val[0];
+ adap->vres.iscsi.size = val[1] - val[0] + 1;
+ }
+#undef FW_PARAM_PFVF
+#undef FW_PARAM_DEV
+
+ adap->params.nports = hweight32(port_vec);
+ adap->params.portvec = port_vec;
+ adap->flags |= FW_OK;
+
+ /* These are finalized by FW initialization, load their values now */
+ v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
+ adap->params.tp.tre = TIMERRESOLUTION_GET(v);
+ t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
+ t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
+ adap->params.b_wnd);
+
+#ifdef CONFIG_PCI_IOV
+ /*
+ * Provision resource limits for Virtual Functions. We currently
+ * grant them all the same static resource limits except for the Port
+ * Access Rights Mask which we're assigning based on the PF. All of
+ * the static provisioning stuff for both the PF and VF really needs
+ * to be managed in a persistent manner for each device which the
+ * firmware controls.
+ */
+ {
+ int pf, vf;
+
+ for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
+ if (num_vf[pf] <= 0)
+ continue;
+
+ /* VF numbering starts at 1! */
+ for (vf = 1; vf <= num_vf[pf]; vf++) {
+ ret = t4_cfg_pfvf(adap, adap->fn, pf, vf,
+ VFRES_NEQ, VFRES_NETHCTRL,
+ VFRES_NIQFLINT, VFRES_NIQ,
+ VFRES_TC, VFRES_NVI,
+ FW_PFVF_CMD_CMASK_MASK,
+ pfvfres_pmask(adap, pf, vf),
+ VFRES_NEXACTF,
+ VFRES_R_CAPS, VFRES_WX_CAPS);
+ if (ret < 0)
+ dev_warn(adap->pdev_dev, "failed to "
+ "provision pf/vf=%d/%d; "
+ "err=%d\n", pf, vf, ret);
+ }
+ }
+ }
+#endif
+
+ setup_memwin(adap);
+ return 0;
+
+ /*
+ * If a command timed out or failed with EIO FW does not operate within
+ * its spec or something catastrophic happened to HW/FW, stop issuing
+ * commands.
+ */
+bye: if (ret != -ETIMEDOUT && ret != -EIO)
+ t4_fw_bye(adap, adap->fn);
+ return ret;
+}
+
+/* EEH callbacks */
+
+static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ int i;
+ struct adapter *adap = pci_get_drvdata(pdev);
+
+ if (!adap)
+ goto out;
+
+ rtnl_lock();
+ adap->flags &= ~FW_OK;
+ notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
+ for_each_port(adap, i) {
+ struct net_device *dev = adap->port[i];
+
+ netif_device_detach(dev);
+ netif_carrier_off(dev);
+ }
+ if (adap->flags & FULL_INIT_DONE)
+ cxgb_down(adap);
+ rtnl_unlock();
+ pci_disable_device(pdev);
+out: return state == pci_channel_io_perm_failure ?
+ PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
+{
+ int i, ret;
+ struct fw_caps_config_cmd c;
+ struct adapter *adap = pci_get_drvdata(pdev);
+
+ if (!adap) {
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ if (t4_wait_dev_ready(adap) < 0)
+ return PCI_ERS_RESULT_DISCONNECT;
+ if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
+ return PCI_ERS_RESULT_DISCONNECT;
+ adap->flags |= FW_OK;
+ if (adap_init1(adap, &c))
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ for_each_port(adap, i) {
+ struct port_info *p = adap2pinfo(adap, i);
+
+ ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
+ NULL, NULL);
+ if (ret < 0)
+ return PCI_ERS_RESULT_DISCONNECT;
+ p->viid = ret;
+ p->xact_addr_filt = -1;
+ }
+
+ t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
+ adap->params.b_wnd);
+ setup_memwin(adap);
+ if (cxgb_up(adap))
+ return PCI_ERS_RESULT_DISCONNECT;
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void eeh_resume(struct pci_dev *pdev)
+{
+ int i;
+ struct adapter *adap = pci_get_drvdata(pdev);
+
+ if (!adap)
+ return;
+
+ rtnl_lock();
+ for_each_port(adap, i) {
+ struct net_device *dev = adap->port[i];
+
+ if (netif_running(dev)) {
+ link_start(dev);
+ cxgb_set_rxmode(dev);
+ }
+ netif_device_attach(dev);
+ }
+ rtnl_unlock();
+}
+
+static struct pci_error_handlers cxgb4_eeh = {
+ .error_detected = eeh_err_detected,
+ .slot_reset = eeh_slot_reset,
+ .resume = eeh_resume,
+};
+
+static inline bool is_10g_port(const struct link_config *lc)
+{
+ return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
+}
+
+static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
+ unsigned int size, unsigned int iqe_size)
+{
+ q->intr_params = QINTR_TIMER_IDX(timer_idx) |
+ (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
+ q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
+ q->iqe_len = iqe_size;
+ q->size = size;
+}
+
+/*
+ * Perform default configuration of DMA queues depending on the number and type
+ * of ports we found and the number of available CPUs. Most settings can be
+ * modified by the admin prior to actual use.
+ */
+static void __devinit cfg_queues(struct adapter *adap)
+{
+ struct sge *s = &adap->sge;
+ int i, q10g = 0, n10g = 0, qidx = 0;
+
+ for_each_port(adap, i)
+ n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
+
+ /*
+ * We default to 1 queue per non-10G port and up to # of cores queues
+ * per 10G port.
+ */
+ if (n10g)
+ q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
+ if (q10g > num_online_cpus())
+ q10g = num_online_cpus();
+
+ for_each_port(adap, i) {
+ struct port_info *pi = adap2pinfo(adap, i);
+
+ pi->first_qset = qidx;
+ pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
+ qidx += pi->nqsets;
+ }
+
+ s->ethqsets = qidx;
+ s->max_ethqsets = qidx; /* MSI-X may lower it later */
+
+ if (is_offload(adap)) {
+ /*
+ * For offload we use 1 queue/channel if all ports are up to 1G,
+ * otherwise we divide all available queues amongst the channels
+ * capped by the number of available cores.
+ */
+ if (n10g) {
+ i = min_t(int, ARRAY_SIZE(s->ofldrxq),
+ num_online_cpus());
+ s->ofldqsets = roundup(i, adap->params.nports);
+ } else
+ s->ofldqsets = adap->params.nports;
+ /* For RDMA one Rx queue per channel suffices */
+ s->rdmaqs = adap->params.nports;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
+ struct sge_eth_rxq *r = &s->ethrxq[i];
+
+ init_rspq(&r->rspq, 0, 0, 1024, 64);
+ r->fl.size = 72;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
+ s->ethtxq[i].q.size = 1024;
+
+ for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
+ s->ctrlq[i].q.size = 512;
+
+ for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
+ s->ofldtxq[i].q.size = 1024;
+
+ for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
+ struct sge_ofld_rxq *r = &s->ofldrxq[i];
+
+ init_rspq(&r->rspq, 0, 0, 1024, 64);
+ r->rspq.uld = CXGB4_ULD_ISCSI;
+ r->fl.size = 72;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
+ struct sge_ofld_rxq *r = &s->rdmarxq[i];
+
+ init_rspq(&r->rspq, 0, 0, 511, 64);
+ r->rspq.uld = CXGB4_ULD_RDMA;
+ r->fl.size = 72;
+ }
+
+ init_rspq(&s->fw_evtq, 6, 0, 512, 64);
+ init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
+}
+
+/*
+ * Reduce the number of Ethernet queues across all ports to at most n.
+ * n provides at least one queue per port.
+ */
+static void __devinit reduce_ethqs(struct adapter *adap, int n)
+{
+ int i;
+ struct port_info *pi;
+
+ while (n < adap->sge.ethqsets)
+ for_each_port(adap, i) {
+ pi = adap2pinfo(adap, i);
+ if (pi->nqsets > 1) {
+ pi->nqsets--;
+ adap->sge.ethqsets--;
+ if (adap->sge.ethqsets <= n)
+ break;
+ }
+ }
+
+ n = 0;
+ for_each_port(adap, i) {
+ pi = adap2pinfo(adap, i);
+ pi->first_qset = n;
+ n += pi->nqsets;
+ }
+}
+
+/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
+#define EXTRA_VECS 2
+
+static int __devinit enable_msix(struct adapter *adap)
+{
+ int ofld_need = 0;
+ int i, err, want, need;
+ struct sge *s = &adap->sge;
+ unsigned int nchan = adap->params.nports;
+ struct msix_entry entries[MAX_INGQ + 1];
+
+ for (i = 0; i < ARRAY_SIZE(entries); ++i)
+ entries[i].entry = i;
+
+ want = s->max_ethqsets + EXTRA_VECS;
+ if (is_offload(adap)) {
+ want += s->rdmaqs + s->ofldqsets;
+ /* need nchan for each possible ULD */
+ ofld_need = 2 * nchan;
+ }
+ need = adap->params.nports + EXTRA_VECS + ofld_need;
+
+ while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
+ want = err;
+
+ if (!err) {
+ /*
+ * Distribute available vectors to the various queue groups.
+ * Every group gets its minimum requirement and NIC gets top
+ * priority for leftovers.
+ */
+ i = want - EXTRA_VECS - ofld_need;
+ if (i < s->max_ethqsets) {
+ s->max_ethqsets = i;
+ if (i < s->ethqsets)
+ reduce_ethqs(adap, i);
+ }
+ if (is_offload(adap)) {
+ i = want - EXTRA_VECS - s->max_ethqsets;
+ i -= ofld_need - nchan;
+ s->ofldqsets = (i / nchan) * nchan; /* round down */
+ }
+ for (i = 0; i < want; ++i)
+ adap->msix_info[i].vec = entries[i].vector;
+ } else if (err > 0)
+ dev_info(adap->pdev_dev,
+ "only %d MSI-X vectors left, not using MSI-X\n", err);
+ return err;
+}
+
+#undef EXTRA_VECS
+
+static int __devinit init_rss(struct adapter *adap)
+{
+ unsigned int i, j;
+
+ for_each_port(adap, i) {
+ struct port_info *pi = adap2pinfo(adap, i);
+
+ pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
+ if (!pi->rss)
+ return -ENOMEM;
+ for (j = 0; j < pi->rss_size; j++)
+ pi->rss[j] = j % pi->nqsets;
+ }
+ return 0;
+}
+
+static void __devinit print_port_info(const struct net_device *dev)
+{
+ static const char *base[] = {
+ "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
+ "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
+ };
+
+ char buf[80];
+ char *bufp = buf;
+ const char *spd = "";
+ const struct port_info *pi = netdev_priv(dev);
+ const struct adapter *adap = pi->adapter;
+
+ if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
+ spd = " 2.5 GT/s";
+ else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
+ spd = " 5 GT/s";
+
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
+ bufp += sprintf(bufp, "100/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
+ bufp += sprintf(bufp, "1000/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
+ bufp += sprintf(bufp, "10G/");
+ if (bufp != buf)
+ --bufp;
+ sprintf(bufp, "BASE-%s", base[pi->port_type]);
+
+ netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
+ adap->params.vpd.id, adap->params.rev, buf,
+ is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
+ (adap->flags & USING_MSIX) ? " MSI-X" :
+ (adap->flags & USING_MSI) ? " MSI" : "");
+ netdev_info(dev, "S/N: %s, E/C: %s\n",
+ adap->params.vpd.sn, adap->params.vpd.ec);
+}
+
+static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev)
+{
+ u16 v;
+ int pos;
+
+ pos = pci_pcie_cap(dev);
+ if (pos > 0) {
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v);
+ v |= PCI_EXP_DEVCTL_RELAX_EN;
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v);
+ }
+}
+
+/*
+ * Free the following resources:
+ * - memory used for tables
+ * - MSI/MSI-X
+ * - net devices
+ * - resources FW is holding for us
+ */
+static void free_some_resources(struct adapter *adapter)
+{
+ unsigned int i;
+
+ t4_free_mem(adapter->l2t);
+ t4_free_mem(adapter->tids.tid_tab);
+ disable_msi(adapter);
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]) {
+ kfree(adap2pinfo(adapter, i)->rss);
+ free_netdev(adapter->port[i]);
+ }
+ if (adapter->flags & FW_OK)
+ t4_fw_bye(adapter, adapter->fn);
+}
+
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
+ NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
+
+static int __devinit init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int func, i, err;
+ struct port_info *pi;
+ unsigned int highdma = 0;
+ struct adapter *adapter = NULL;
+
+ printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
+
+ err = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (err) {
+ /* Just info, some other driver may have claimed the device. */
+ dev_info(&pdev->dev, "cannot obtain PCI resources\n");
+ return err;
+ }
+
+ /* We control everything through one PF */
+ func = PCI_FUNC(pdev->devfn);
+ if (func != ent->driver_data) {
+ pci_save_state(pdev); /* to restore SR-IOV later */
+ goto sriov;
+ }
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "cannot enable PCI device\n");
+ goto out_release_regions;
+ }
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ highdma = NETIF_F_HIGHDMA;
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
+ "coherent allocations\n");
+ goto out_disable_device;
+ }
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
+ goto out_disable_device;
+ }
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ enable_pcie_relaxed_ordering(pdev);
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter) {
+ err = -ENOMEM;
+ goto out_disable_device;
+ }
+
+ adapter->regs = pci_ioremap_bar(pdev, 0);
+ if (!adapter->regs) {
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+
+ adapter->pdev = pdev;
+ adapter->pdev_dev = &pdev->dev;
+ adapter->fn = func;
+ adapter->msg_enable = dflt_msg_enable;
+ memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
+
+ spin_lock_init(&adapter->stats_lock);
+ spin_lock_init(&adapter->tid_release_lock);
+
+ INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
+
+ err = t4_prep_adapter(adapter);
+ if (err)
+ goto out_unmap_bar;
+ err = adap_init0(adapter);
+ if (err)
+ goto out_unmap_bar;
+
+ for_each_port(adapter, i) {
+ struct net_device *netdev;
+
+ netdev = alloc_etherdev_mq(sizeof(struct port_info),
+ MAX_ETH_QSETS);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto out_free_dev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter->port[i] = netdev;
+ pi = netdev_priv(netdev);
+ pi->adapter = adapter;
+ pi->xact_addr_filt = -1;
+ pi->port_id = i;
+ netdev->irq = pdev->irq;
+
+ netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_RXHASH |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ netdev->features |= netdev->hw_features | highdma;
+ netdev->vlan_features = netdev->features & VLAN_FEAT;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ netdev->netdev_ops = &cxgb4_netdev_ops;
+ SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
+ }
+
+ pci_set_drvdata(pdev, adapter);
+
+ if (adapter->flags & FW_OK) {
+ err = t4_port_init(adapter, func, func, 0);
+ if (err)
+ goto out_free_dev;
+ }
+
+ /*
+ * Configure queues and allocate tables now, they can be needed as
+ * soon as the first register_netdev completes.
+ */
+ cfg_queues(adapter);
+
+ adapter->l2t = t4_init_l2t();
+ if (!adapter->l2t) {
+ /* We tolerate a lack of L2T, giving up some functionality */
+ dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
+ adapter->params.offload = 0;
+ }
+
+ if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
+ dev_warn(&pdev->dev, "could not allocate TID table, "
+ "continuing\n");
+ adapter->params.offload = 0;
+ }
+
+ /* See what interrupts we'll be using */
+ if (msi > 1 && enable_msix(adapter) == 0)
+ adapter->flags |= USING_MSIX;
+ else if (msi > 0 && pci_enable_msi(pdev) == 0)
+ adapter->flags |= USING_MSI;
+
+ err = init_rss(adapter);
+ if (err)
+ goto out_free_dev;
+
+ /*
+ * The card is now ready to go. If any errors occur during device
+ * registration we do not fail the whole card but rather proceed only
+ * with the ports we manage to register successfully. However we must
+ * register at least one net device.
+ */
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
+ netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
+
+ err = register_netdev(adapter->port[i]);
+ if (err)
+ break;
+ adapter->chan_map[pi->tx_chan] = i;
+ print_port_info(adapter->port[i]);
+ }
+ if (i == 0) {
+ dev_err(&pdev->dev, "could not register any net devices\n");
+ goto out_free_dev;
+ }
+ if (err) {
+ dev_warn(&pdev->dev, "only %d net devices registered\n", i);
+ err = 0;
+ }
+
+ if (cxgb4_debugfs_root) {
+ adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
+ cxgb4_debugfs_root);
+ setup_debugfs(adapter);
+ }
+
+ /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
+ pdev->needs_freset = 1;
+
+ if (is_offload(adapter))
+ attach_ulds(adapter);
+
+sriov:
+#ifdef CONFIG_PCI_IOV
+ if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
+ if (pci_enable_sriov(pdev, num_vf[func]) == 0)
+ dev_info(&pdev->dev,
+ "instantiated %u virtual functions\n",
+ num_vf[func]);
+#endif
+ return 0;
+
+ out_free_dev:
+ free_some_resources(adapter);
+ out_unmap_bar:
+ iounmap(adapter->regs);
+ out_free_adapter:
+ kfree(adapter);
+ out_disable_device:
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ out_release_regions:
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void __devexit remove_one(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+
+ pci_disable_sriov(pdev);
+
+ if (adapter) {
+ int i;
+
+ if (is_offload(adapter))
+ detach_ulds(adapter);
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+ unregister_netdev(adapter->port[i]);
+
+ if (adapter->debugfs_root)
+ debugfs_remove_recursive(adapter->debugfs_root);
+
+ if (adapter->flags & FULL_INIT_DONE)
+ cxgb_down(adapter);
+
+ free_some_resources(adapter);
+ iounmap(adapter->regs);
+ kfree(adapter);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ } else
+ pci_release_regions(pdev);
+}
+
+static struct pci_driver cxgb4_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = cxgb4_pci_tbl,
+ .probe = init_one,
+ .remove = __devexit_p(remove_one),
+ .err_handler = &cxgb4_eeh,
+};
+
+static int __init cxgb4_init_module(void)
+{
+ int ret;
+
+ /* Debugfs support is optional, just warn if this fails */
+ cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!cxgb4_debugfs_root)
+ pr_warning("could not create debugfs entry, continuing\n");
+
+ ret = pci_register_driver(&cxgb4_driver);
+ if (ret < 0)
+ debugfs_remove(cxgb4_debugfs_root);
+ return ret;
+}
+
+static void __exit cxgb4_cleanup_module(void)
+{
+ pci_unregister_driver(&cxgb4_driver);
+ debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
+}
+
+module_init(cxgb4_init_module);
+module_exit(cxgb4_cleanup_module);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
new file mode 100644
index 000000000000..b1d39b8d141a
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -0,0 +1,239 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_OFLD_H
+#define __CXGB4_OFLD_H
+
+#include <linux/cache.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/atomic.h>
+
+/* CPL message priority levels */
+enum {
+ CPL_PRIORITY_DATA = 0, /* data messages */
+ CPL_PRIORITY_SETUP = 1, /* connection setup messages */
+ CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
+ CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
+ CPL_PRIORITY_ACK = 1, /* RX ACK messages */
+ CPL_PRIORITY_CONTROL = 1 /* control messages */
+};
+
+#define INIT_TP_WR(w, tid) do { \
+ (w)->wr.wr_hi = htonl(FW_WR_OP(FW_TP_WR) | \
+ FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \
+ (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \
+ FW_WR_FLOWID(tid)); \
+ (w)->wr.wr_lo = cpu_to_be64(0); \
+} while (0)
+
+#define INIT_TP_WR_CPL(w, cpl, tid) do { \
+ INIT_TP_WR(w, tid); \
+ OPCODE_TID(w) = htonl(MK_OPCODE_TID(cpl, tid)); \
+} while (0)
+
+#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \
+ (w)->wr.wr_hi = htonl(FW_WR_OP(FW_ULPTX_WR) | FW_WR_ATOMIC(atomic)); \
+ (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \
+ FW_WR_FLOWID(tid)); \
+ (w)->wr.wr_lo = cpu_to_be64(0); \
+} while (0)
+
+/* Special asynchronous notification message */
+#define CXGB4_MSG_AN ((void *)1)
+
+struct serv_entry {
+ void *data;
+};
+
+union aopen_entry {
+ void *data;
+ union aopen_entry *next;
+};
+
+/*
+ * Holds the size, base address, free list start, etc of the TID, server TID,
+ * and active-open TID tables. The tables themselves are allocated dynamically.
+ */
+struct tid_info {
+ void **tid_tab;
+ unsigned int ntids;
+
+ struct serv_entry *stid_tab;
+ unsigned long *stid_bmap;
+ unsigned int nstids;
+ unsigned int stid_base;
+
+ union aopen_entry *atid_tab;
+ unsigned int natids;
+
+ unsigned int nftids;
+ unsigned int ftid_base;
+
+ spinlock_t atid_lock ____cacheline_aligned_in_smp;
+ union aopen_entry *afree;
+ unsigned int atids_in_use;
+
+ spinlock_t stid_lock;
+ unsigned int stids_in_use;
+
+ atomic_t tids_in_use;
+};
+
+static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
+{
+ return tid < t->ntids ? t->tid_tab[tid] : NULL;
+}
+
+static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
+{
+ return atid < t->natids ? t->atid_tab[atid].data : NULL;
+}
+
+static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
+{
+ stid -= t->stid_base;
+ return stid < t->nstids ? t->stid_tab[stid].data : NULL;
+}
+
+static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
+ unsigned int tid)
+{
+ t->tid_tab[tid] = data;
+ atomic_inc(&t->tids_in_use);
+}
+
+int cxgb4_alloc_atid(struct tid_info *t, void *data);
+int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
+void cxgb4_free_atid(struct tid_info *t, unsigned int atid);
+void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family);
+void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
+
+struct in6_addr;
+
+int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
+ __be32 sip, __be16 sport, unsigned int queue);
+
+static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
+{
+ skb_set_queue_mapping(skb, (queue << 1) | prio);
+}
+
+enum cxgb4_uld {
+ CXGB4_ULD_RDMA,
+ CXGB4_ULD_ISCSI,
+ CXGB4_ULD_MAX
+};
+
+enum cxgb4_state {
+ CXGB4_STATE_UP,
+ CXGB4_STATE_START_RECOVERY,
+ CXGB4_STATE_DOWN,
+ CXGB4_STATE_DETACH
+};
+
+struct pci_dev;
+struct l2t_data;
+struct net_device;
+struct pkt_gl;
+struct tp_tcp_stats;
+
+struct cxgb4_range {
+ unsigned int start;
+ unsigned int size;
+};
+
+struct cxgb4_virt_res { /* virtualized HW resources */
+ struct cxgb4_range ddp;
+ struct cxgb4_range iscsi;
+ struct cxgb4_range stag;
+ struct cxgb4_range rq;
+ struct cxgb4_range pbl;
+ struct cxgb4_range qp;
+ struct cxgb4_range cq;
+ struct cxgb4_range ocq;
+};
+
+#define OCQ_WIN_OFFSET(pdev, vres) \
+ (pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size))
+
+/*
+ * Block of information the LLD provides to ULDs attaching to a device.
+ */
+struct cxgb4_lld_info {
+ struct pci_dev *pdev; /* associated PCI device */
+ struct l2t_data *l2t; /* L2 table */
+ struct tid_info *tids; /* TID table */
+ struct net_device **ports; /* device ports */
+ const struct cxgb4_virt_res *vr; /* assorted HW resources */
+ const unsigned short *mtus; /* MTU table */
+ const unsigned short *rxq_ids; /* the ULD's Rx queue ids */
+ unsigned short nrxq; /* # of Rx queues */
+ unsigned short ntxq; /* # of Tx queues */
+ unsigned char nchan:4; /* # of channels */
+ unsigned char nports:4; /* # of ports */
+ unsigned char wr_cred; /* WR 16-byte credits */
+ unsigned char adapter_type; /* type of adapter */
+ unsigned char fw_api_ver; /* FW API version */
+ unsigned int fw_vers; /* FW version */
+ unsigned int iscsi_iolen; /* iSCSI max I/O length */
+ unsigned short udb_density; /* # of user DB/page */
+ unsigned short ucq_density; /* # of user CQs/page */
+ void __iomem *gts_reg; /* address of GTS register */
+ void __iomem *db_reg; /* address of kernel doorbell */
+};
+
+struct cxgb4_uld_info {
+ const char *name;
+ void *(*add)(const struct cxgb4_lld_info *p);
+ int (*rx_handler)(void *handle, const __be64 *rsp,
+ const struct pkt_gl *gl);
+ int (*state_change)(void *handle, enum cxgb4_state new_state);
+};
+
+int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
+int cxgb4_unregister_uld(enum cxgb4_uld type);
+int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
+unsigned int cxgb4_port_chan(const struct net_device *dev);
+unsigned int cxgb4_port_viid(const struct net_device *dev);
+unsigned int cxgb4_port_idx(const struct net_device *dev);
+unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
+ unsigned int *idx);
+void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
+ struct tp_tcp_stats *v6);
+void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
+ const unsigned int *pgsz_order);
+struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
+ unsigned int skb_len, unsigned int pull_len);
+#endif /* !__CXGB4_OFLD_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
new file mode 100644
index 000000000000..a2d323c473f8
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -0,0 +1,597 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/jhash.h>
+#include <net/neighbour.h>
+#include "cxgb4.h"
+#include "l2t.h"
+#include "t4_msg.h"
+#include "t4fw_api.h"
+
+#define VLAN_NONE 0xfff
+
+/* identifies sync vs async L2T_WRITE_REQs */
+#define F_SYNC_WR (1 << 12)
+
+enum {
+ L2T_STATE_VALID, /* entry is up to date */
+ L2T_STATE_STALE, /* entry may be used but needs revalidation */
+ L2T_STATE_RESOLVING, /* entry needs address resolution */
+ L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
+
+ /* when state is one of the below the entry is not hashed */
+ L2T_STATE_SWITCHING, /* entry is being used by a switching filter */
+ L2T_STATE_UNUSED /* entry not in use */
+};
+
+struct l2t_data {
+ rwlock_t lock;
+ atomic_t nfree; /* number of free entries */
+ struct l2t_entry *rover; /* starting point for next allocation */
+ struct l2t_entry l2tab[L2T_SIZE];
+};
+
+static inline unsigned int vlan_prio(const struct l2t_entry *e)
+{
+ return e->vlan >> 13;
+}
+
+static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
+{
+ if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
+ atomic_dec(&d->nfree);
+}
+
+/*
+ * To avoid having to check address families we do not allow v4 and v6
+ * neighbors to be on the same hash chain. We keep v4 entries in the first
+ * half of available hash buckets and v6 in the second.
+ */
+enum {
+ L2T_SZ_HALF = L2T_SIZE / 2,
+ L2T_HASH_MASK = L2T_SZ_HALF - 1
+};
+
+static inline unsigned int arp_hash(const u32 *key, int ifindex)
+{
+ return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK;
+}
+
+static inline unsigned int ipv6_hash(const u32 *key, int ifindex)
+{
+ u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
+
+ return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK);
+}
+
+static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex)
+{
+ return addr_len == 4 ? arp_hash(addr, ifindex) :
+ ipv6_hash(addr, ifindex);
+}
+
+/*
+ * Checks if an L2T entry is for the given IP/IPv6 address. It does not check
+ * whether the L2T entry and the address are of the same address family.
+ * Callers ensure an address is only checked against L2T entries of the same
+ * family, something made trivial by the separation of IP and IPv6 hash chains
+ * mentioned above. Returns 0 if there's a match,
+ */
+static int addreq(const struct l2t_entry *e, const u32 *addr)
+{
+ if (e->v6)
+ return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) |
+ (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]);
+ return e->addr[0] ^ addr[0];
+}
+
+static void neigh_replace(struct l2t_entry *e, struct neighbour *n)
+{
+ neigh_hold(n);
+ if (e->neigh)
+ neigh_release(e->neigh);
+ e->neigh = n;
+}
+
+/*
+ * Write an L2T entry. Must be called with the entry locked.
+ * The write may be synchronous or asynchronous.
+ */
+static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
+{
+ struct sk_buff *skb;
+ struct cpl_l2t_write_req *req;
+
+ skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
+ INIT_TP_WR(req, 0);
+
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
+ e->idx | (sync ? F_SYNC_WR : 0) |
+ TID_QID(adap->sge.fw_evtq.abs_id)));
+ req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync));
+ req->l2t_idx = htons(e->idx);
+ req->vlan = htons(e->vlan);
+ if (e->neigh)
+ memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
+ memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
+
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+ t4_ofld_send(adap, skb);
+
+ if (sync && e->state != L2T_STATE_SWITCHING)
+ e->state = L2T_STATE_SYNC_WRITE;
+ return 0;
+}
+
+/*
+ * Send packets waiting in an L2T entry's ARP queue. Must be called with the
+ * entry locked.
+ */
+static void send_pending(struct adapter *adap, struct l2t_entry *e)
+{
+ while (e->arpq_head) {
+ struct sk_buff *skb = e->arpq_head;
+
+ e->arpq_head = skb->next;
+ skb->next = NULL;
+ t4_ofld_send(adap, skb);
+ }
+ e->arpq_tail = NULL;
+}
+
+/*
+ * Process a CPL_L2T_WRITE_RPL. Wake up the ARP queue if it completes a
+ * synchronous L2T_WRITE. Note that the TID in the reply is really the L2T
+ * index it refers to.
+ */
+void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
+{
+ unsigned int tid = GET_TID(rpl);
+ unsigned int idx = tid & (L2T_SIZE - 1);
+
+ if (unlikely(rpl->status != CPL_ERR_NONE)) {
+ dev_err(adap->pdev_dev,
+ "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
+ rpl->status, idx);
+ return;
+ }
+
+ if (tid & F_SYNC_WR) {
+ struct l2t_entry *e = &adap->l2t->l2tab[idx];
+
+ spin_lock(&e->lock);
+ if (e->state != L2T_STATE_SWITCHING) {
+ send_pending(adap, e);
+ e->state = (e->neigh->nud_state & NUD_STALE) ?
+ L2T_STATE_STALE : L2T_STATE_VALID;
+ }
+ spin_unlock(&e->lock);
+ }
+}
+
+/*
+ * Add a packet to an L2T entry's queue of packets awaiting resolution.
+ * Must be called with the entry's lock held.
+ */
+static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
+{
+ skb->next = NULL;
+ if (e->arpq_head)
+ e->arpq_tail->next = skb;
+ else
+ e->arpq_head = skb;
+ e->arpq_tail = skb;
+}
+
+int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
+ struct l2t_entry *e)
+{
+ struct adapter *adap = netdev2adap(dev);
+
+again:
+ switch (e->state) {
+ case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
+ neigh_event_send(e->neigh, NULL);
+ spin_lock_bh(&e->lock);
+ if (e->state == L2T_STATE_STALE)
+ e->state = L2T_STATE_VALID;
+ spin_unlock_bh(&e->lock);
+ case L2T_STATE_VALID: /* fast-path, send the packet on */
+ return t4_ofld_send(adap, skb);
+ case L2T_STATE_RESOLVING:
+ case L2T_STATE_SYNC_WRITE:
+ spin_lock_bh(&e->lock);
+ if (e->state != L2T_STATE_SYNC_WRITE &&
+ e->state != L2T_STATE_RESOLVING) {
+ spin_unlock_bh(&e->lock);
+ goto again;
+ }
+ arpq_enqueue(e, skb);
+ spin_unlock_bh(&e->lock);
+
+ if (e->state == L2T_STATE_RESOLVING &&
+ !neigh_event_send(e->neigh, NULL)) {
+ spin_lock_bh(&e->lock);
+ if (e->state == L2T_STATE_RESOLVING && e->arpq_head)
+ write_l2e(adap, e, 1);
+ spin_unlock_bh(&e->lock);
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(cxgb4_l2t_send);
+
+/*
+ * Allocate a free L2T entry. Must be called with l2t_data.lock held.
+ */
+static struct l2t_entry *alloc_l2e(struct l2t_data *d)
+{
+ struct l2t_entry *end, *e, **p;
+
+ if (!atomic_read(&d->nfree))
+ return NULL;
+
+ /* there's definitely a free entry */
+ for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e)
+ if (atomic_read(&e->refcnt) == 0)
+ goto found;
+
+ for (e = d->l2tab; atomic_read(&e->refcnt); ++e)
+ ;
+found:
+ d->rover = e + 1;
+ atomic_dec(&d->nfree);
+
+ /*
+ * The entry we found may be an inactive entry that is
+ * presently in the hash table. We need to remove it.
+ */
+ if (e->state < L2T_STATE_SWITCHING)
+ for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
+ if (*p == e) {
+ *p = e->next;
+ e->next = NULL;
+ break;
+ }
+
+ e->state = L2T_STATE_UNUSED;
+ return e;
+}
+
+/*
+ * Called when an L2T entry has no more users.
+ */
+static void t4_l2e_free(struct l2t_entry *e)
+{
+ struct l2t_data *d;
+
+ spin_lock_bh(&e->lock);
+ if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
+ if (e->neigh) {
+ neigh_release(e->neigh);
+ e->neigh = NULL;
+ }
+ while (e->arpq_head) {
+ struct sk_buff *skb = e->arpq_head;
+
+ e->arpq_head = skb->next;
+ kfree_skb(skb);
+ }
+ e->arpq_tail = NULL;
+ }
+ spin_unlock_bh(&e->lock);
+
+ d = container_of(e, struct l2t_data, l2tab[e->idx]);
+ atomic_inc(&d->nfree);
+}
+
+void cxgb4_l2t_release(struct l2t_entry *e)
+{
+ if (atomic_dec_and_test(&e->refcnt))
+ t4_l2e_free(e);
+}
+EXPORT_SYMBOL(cxgb4_l2t_release);
+
+/*
+ * Update an L2T entry that was previously used for the same next hop as neigh.
+ * Must be called with softirqs disabled.
+ */
+static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
+{
+ unsigned int nud_state;
+
+ spin_lock(&e->lock); /* avoid race with t4_l2t_free */
+ if (neigh != e->neigh)
+ neigh_replace(e, neigh);
+ nud_state = neigh->nud_state;
+ if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
+ !(nud_state & NUD_VALID))
+ e->state = L2T_STATE_RESOLVING;
+ else if (nud_state & NUD_CONNECTED)
+ e->state = L2T_STATE_VALID;
+ else
+ e->state = L2T_STATE_STALE;
+ spin_unlock(&e->lock);
+}
+
+struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
+ const struct net_device *physdev,
+ unsigned int priority)
+{
+ u8 lport;
+ u16 vlan;
+ struct l2t_entry *e;
+ int addr_len = neigh->tbl->key_len;
+ u32 *addr = (u32 *)neigh->primary_key;
+ int ifidx = neigh->dev->ifindex;
+ int hash = addr_hash(addr, addr_len, ifidx);
+
+ if (neigh->dev->flags & IFF_LOOPBACK)
+ lport = netdev2pinfo(physdev)->tx_chan + 4;
+ else
+ lport = netdev2pinfo(physdev)->lport;
+
+ if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
+ vlan = vlan_dev_vlan_id(neigh->dev);
+ else
+ vlan = VLAN_NONE;
+
+ write_lock_bh(&d->lock);
+ for (e = d->l2tab[hash].first; e; e = e->next)
+ if (!addreq(e, addr) && e->ifindex == ifidx &&
+ e->vlan == vlan && e->lport == lport) {
+ l2t_hold(d, e);
+ if (atomic_read(&e->refcnt) == 1)
+ reuse_entry(e, neigh);
+ goto done;
+ }
+
+ /* Need to allocate a new entry */
+ e = alloc_l2e(d);
+ if (e) {
+ spin_lock(&e->lock); /* avoid race with t4_l2t_free */
+ e->state = L2T_STATE_RESOLVING;
+ memcpy(e->addr, addr, addr_len);
+ e->ifindex = ifidx;
+ e->hash = hash;
+ e->lport = lport;
+ e->v6 = addr_len == 16;
+ atomic_set(&e->refcnt, 1);
+ neigh_replace(e, neigh);
+ e->vlan = vlan;
+ e->next = d->l2tab[hash].first;
+ d->l2tab[hash].first = e;
+ spin_unlock(&e->lock);
+ }
+done:
+ write_unlock_bh(&d->lock);
+ return e;
+}
+EXPORT_SYMBOL(cxgb4_l2t_get);
+
+/*
+ * Called when address resolution fails for an L2T entry to handle packets
+ * on the arpq head. If a packet specifies a failure handler it is invoked,
+ * otherwise the packet is sent to the device.
+ */
+static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq)
+{
+ while (arpq) {
+ struct sk_buff *skb = arpq;
+ const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
+
+ arpq = skb->next;
+ skb->next = NULL;
+ if (cb->arp_err_handler)
+ cb->arp_err_handler(cb->handle, skb);
+ else
+ t4_ofld_send(adap, skb);
+ }
+}
+
+/*
+ * Called when the host's neighbor layer makes a change to some entry that is
+ * loaded into the HW L2 table.
+ */
+void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
+{
+ struct l2t_entry *e;
+ struct sk_buff *arpq = NULL;
+ struct l2t_data *d = adap->l2t;
+ int addr_len = neigh->tbl->key_len;
+ u32 *addr = (u32 *) neigh->primary_key;
+ int ifidx = neigh->dev->ifindex;
+ int hash = addr_hash(addr, addr_len, ifidx);
+
+ read_lock_bh(&d->lock);
+ for (e = d->l2tab[hash].first; e; e = e->next)
+ if (!addreq(e, addr) && e->ifindex == ifidx) {
+ spin_lock(&e->lock);
+ if (atomic_read(&e->refcnt))
+ goto found;
+ spin_unlock(&e->lock);
+ break;
+ }
+ read_unlock_bh(&d->lock);
+ return;
+
+ found:
+ read_unlock(&d->lock);
+
+ if (neigh != e->neigh)
+ neigh_replace(e, neigh);
+
+ if (e->state == L2T_STATE_RESOLVING) {
+ if (neigh->nud_state & NUD_FAILED) {
+ arpq = e->arpq_head;
+ e->arpq_head = e->arpq_tail = NULL;
+ } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) &&
+ e->arpq_head) {
+ write_l2e(adap, e, 1);
+ }
+ } else {
+ e->state = neigh->nud_state & NUD_CONNECTED ?
+ L2T_STATE_VALID : L2T_STATE_STALE;
+ if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)))
+ write_l2e(adap, e, 0);
+ }
+
+ spin_unlock_bh(&e->lock);
+
+ if (arpq)
+ handle_failed_resolution(adap, arpq);
+}
+
+struct l2t_data *t4_init_l2t(void)
+{
+ int i;
+ struct l2t_data *d;
+
+ d = t4_alloc_mem(sizeof(*d));
+ if (!d)
+ return NULL;
+
+ d->rover = d->l2tab;
+ atomic_set(&d->nfree, L2T_SIZE);
+ rwlock_init(&d->lock);
+
+ for (i = 0; i < L2T_SIZE; ++i) {
+ d->l2tab[i].idx = i;
+ d->l2tab[i].state = L2T_STATE_UNUSED;
+ spin_lock_init(&d->l2tab[i].lock);
+ atomic_set(&d->l2tab[i].refcnt, 0);
+ }
+ return d;
+}
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
+{
+ struct l2t_entry *l2tab = seq->private;
+
+ return pos >= L2T_SIZE ? NULL : &l2tab[pos];
+}
+
+static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+}
+
+static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ v = l2t_get_idx(seq, *pos);
+ if (v)
+ ++*pos;
+ return v;
+}
+
+static void l2t_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static char l2e_state(const struct l2t_entry *e)
+{
+ switch (e->state) {
+ case L2T_STATE_VALID: return 'V';
+ case L2T_STATE_STALE: return 'S';
+ case L2T_STATE_SYNC_WRITE: return 'W';
+ case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R';
+ case L2T_STATE_SWITCHING: return 'X';
+ default:
+ return 'U';
+ }
+}
+
+static int l2t_seq_show(struct seq_file *seq, void *v)
+{
+ if (v == SEQ_START_TOKEN)
+ seq_puts(seq, " Idx IP address "
+ "Ethernet address VLAN/P LP State Users Port\n");
+ else {
+ char ip[60];
+ struct l2t_entry *e = v;
+
+ spin_lock_bh(&e->lock);
+ if (e->state == L2T_STATE_SWITCHING)
+ ip[0] = '\0';
+ else
+ sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
+ seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n",
+ e->idx, ip, e->dmac,
+ e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
+ l2e_state(e), atomic_read(&e->refcnt),
+ e->neigh ? e->neigh->dev->name : "");
+ spin_unlock_bh(&e->lock);
+ }
+ return 0;
+}
+
+static const struct seq_operations l2t_seq_ops = {
+ .start = l2t_seq_start,
+ .next = l2t_seq_next,
+ .stop = l2t_seq_stop,
+ .show = l2t_seq_show
+};
+
+static int l2t_seq_open(struct inode *inode, struct file *file)
+{
+ int rc = seq_open(file, &l2t_seq_ops);
+
+ if (!rc) {
+ struct adapter *adap = inode->i_private;
+ struct seq_file *seq = file->private_data;
+
+ seq->private = adap->l2t->l2tab;
+ }
+ return rc;
+}
+
+const struct file_operations t4_l2t_fops = {
+ .owner = THIS_MODULE,
+ .open = l2t_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
new file mode 100644
index 000000000000..02b31d0c6410
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -0,0 +1,107 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_L2T_H
+#define __CXGB4_L2T_H
+
+#include <linux/spinlock.h>
+#include <linux/if_ether.h>
+#include <linux/atomic.h>
+
+struct adapter;
+struct l2t_data;
+struct neighbour;
+struct net_device;
+struct file_operations;
+struct cpl_l2t_write_rpl;
+
+/*
+ * Each L2T entry plays multiple roles. First of all, it keeps state for the
+ * corresponding entry of the HW L2 table and maintains a queue of offload
+ * packets awaiting address resolution. Second, it is a node of a hash table
+ * chain, where the nodes of the chain are linked together through their next
+ * pointer. Finally, each node is a bucket of a hash table, pointing to the
+ * first element in its chain through its first pointer.
+ */
+struct l2t_entry {
+ u16 state; /* entry state */
+ u16 idx; /* entry index */
+ u32 addr[4]; /* next hop IP or IPv6 address */
+ int ifindex; /* neighbor's net_device's ifindex */
+ struct neighbour *neigh; /* associated neighbour */
+ struct l2t_entry *first; /* start of hash chain */
+ struct l2t_entry *next; /* next l2t_entry on chain */
+ struct sk_buff *arpq_head; /* queue of packets awaiting resolution */
+ struct sk_buff *arpq_tail;
+ spinlock_t lock;
+ atomic_t refcnt; /* entry reference count */
+ u16 hash; /* hash bucket the entry is on */
+ u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
+ u8 v6; /* whether entry is for IPv6 */
+ u8 lport; /* associated offload logical interface */
+ u8 dmac[ETH_ALEN]; /* neighbour's MAC address */
+};
+
+typedef void (*arp_err_handler_t)(void *handle, struct sk_buff *skb);
+
+/*
+ * Callback stored in an skb to handle address resolution failure.
+ */
+struct l2t_skb_cb {
+ void *handle;
+ arp_err_handler_t arp_err_handler;
+};
+
+#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
+
+static inline void t4_set_arp_err_handler(struct sk_buff *skb, void *handle,
+ arp_err_handler_t handler)
+{
+ L2T_SKB_CB(skb)->handle = handle;
+ L2T_SKB_CB(skb)->arp_err_handler = handler;
+}
+
+void cxgb4_l2t_release(struct l2t_entry *e);
+int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
+ struct l2t_entry *e);
+struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
+ const struct net_device *physdev,
+ unsigned int priority);
+
+void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
+struct l2t_data *t4_init_l2t(void);
+void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
+
+extern const struct file_operations t4_l2t_fops;
+#endif /* __CXGB4_L2T_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
new file mode 100644
index 000000000000..56adf448b9fe
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -0,0 +1,2442 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/dma-mapping.h>
+#include <linux/jiffies.h>
+#include <linux/prefetch.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "t4fw_api.h"
+
+/*
+ * Rx buffer size. We use largish buffers if possible but settle for single
+ * pages under memory shortage.
+ */
+#if PAGE_SHIFT >= 16
+# define FL_PG_ORDER 0
+#else
+# define FL_PG_ORDER (16 - PAGE_SHIFT)
+#endif
+
+/* RX_PULL_LEN should be <= RX_COPY_THRES */
+#define RX_COPY_THRES 256
+#define RX_PULL_LEN 128
+
+/*
+ * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
+ * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
+ */
+#define RX_PKT_SKB_LEN 512
+
+/* Ethernet header padding prepended to RX_PKTs */
+#define RX_PKT_PAD 2
+
+/*
+ * Max number of Tx descriptors we clean up at a time. Should be modest as
+ * freeing skbs isn't cheap and it happens while holding locks. We just need
+ * to free packets faster than they arrive, we eventually catch up and keep
+ * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES.
+ */
+#define MAX_TX_RECLAIM 16
+
+/*
+ * Max number of Rx buffers we replenish at a time. Again keep this modest,
+ * allocating buffers isn't cheap either.
+ */
+#define MAX_RX_REFILL 16U
+
+/*
+ * Period of the Rx queue check timer. This timer is infrequent as it has
+ * something to do only when the system experiences severe memory shortage.
+ */
+#define RX_QCHECK_PERIOD (HZ / 2)
+
+/*
+ * Period of the Tx queue check timer.
+ */
+#define TX_QCHECK_PERIOD (HZ / 2)
+
+/*
+ * Max number of Tx descriptors to be reclaimed by the Tx timer.
+ */
+#define MAX_TIMER_TX_RECLAIM 100
+
+/*
+ * Timer index used when backing off due to memory shortage.
+ */
+#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
+
+/*
+ * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
+ * attempt to refill it.
+ */
+#define FL_STARVE_THRES 4
+
+/*
+ * Suspend an Ethernet Tx queue with fewer available descriptors than this.
+ * This is the same as calc_tx_descs() for a TSO packet with
+ * nr_frags == MAX_SKB_FRAGS.
+ */
+#define ETHTXQ_STOP_THRES \
+ (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
+
+/*
+ * Suspension threshold for non-Ethernet Tx queues. We require enough room
+ * for a full sized WR.
+ */
+#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
+
+/*
+ * Max Tx descriptor space we allow for an Ethernet packet to be inlined
+ * into a WR.
+ */
+#define MAX_IMM_TX_PKT_LEN 128
+
+/*
+ * Max size of a WR sent through a control Tx queue.
+ */
+#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
+
+enum {
+ /* packet alignment in FL buffers */
+ FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES,
+ /* egress status entry size */
+ STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64
+};
+
+struct tx_sw_desc { /* SW state per Tx descriptor */
+ struct sk_buff *skb;
+ struct ulptx_sgl *sgl;
+};
+
+struct rx_sw_desc { /* SW state per Rx descriptor */
+ struct page *page;
+ dma_addr_t dma_addr;
+};
+
+/*
+ * The low bits of rx_sw_desc.dma_addr have special meaning.
+ */
+enum {
+ RX_LARGE_BUF = 1 << 0, /* buffer is larger than PAGE_SIZE */
+ RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
+};
+
+static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
+{
+ return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
+}
+
+static inline bool is_buf_mapped(const struct rx_sw_desc *d)
+{
+ return !(d->dma_addr & RX_UNMAPPED_BUF);
+}
+
+/**
+ * txq_avail - return the number of available slots in a Tx queue
+ * @q: the Tx queue
+ *
+ * Returns the number of descriptors in a Tx queue available to write new
+ * packets.
+ */
+static inline unsigned int txq_avail(const struct sge_txq *q)
+{
+ return q->size - 1 - q->in_use;
+}
+
+/**
+ * fl_cap - return the capacity of a free-buffer list
+ * @fl: the FL
+ *
+ * Returns the capacity of a free-buffer list. The capacity is less than
+ * the size because one descriptor needs to be left unpopulated, otherwise
+ * HW will think the FL is empty.
+ */
+static inline unsigned int fl_cap(const struct sge_fl *fl)
+{
+ return fl->size - 8; /* 1 descriptor = 8 buffers */
+}
+
+static inline bool fl_starving(const struct sge_fl *fl)
+{
+ return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
+}
+
+static int map_skb(struct device *dev, const struct sk_buff *skb,
+ dma_addr_t *addr)
+{
+ const skb_frag_t *fp, *end;
+ const struct skb_shared_info *si;
+
+ *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, *addr))
+ goto out_err;
+
+ si = skb_shinfo(skb);
+ end = &si->frags[si->nr_frags];
+
+ for (fp = si->frags; fp < end; fp++) {
+ *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, *addr))
+ goto unwind;
+ }
+ return 0;
+
+unwind:
+ while (fp-- > si->frags)
+ dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
+
+ dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
+out_err:
+ return -ENOMEM;
+}
+
+#ifdef CONFIG_NEED_DMA_MAP_STATE
+static void unmap_skb(struct device *dev, const struct sk_buff *skb,
+ const dma_addr_t *addr)
+{
+ const skb_frag_t *fp, *end;
+ const struct skb_shared_info *si;
+
+ dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
+
+ si = skb_shinfo(skb);
+ end = &si->frags[si->nr_frags];
+ for (fp = si->frags; fp < end; fp++)
+ dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE);
+}
+
+/**
+ * deferred_unmap_destructor - unmap a packet when it is freed
+ * @skb: the packet
+ *
+ * This is the packet destructor used for Tx packets that need to remain
+ * mapped until they are freed rather than until their Tx descriptors are
+ * freed.
+ */
+static void deferred_unmap_destructor(struct sk_buff *skb)
+{
+ unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
+}
+#endif
+
+static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
+ const struct ulptx_sgl *sgl, const struct sge_txq *q)
+{
+ const struct ulptx_sge_pair *p;
+ unsigned int nfrags = skb_shinfo(skb)->nr_frags;
+
+ if (likely(skb_headlen(skb)))
+ dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
+ DMA_TO_DEVICE);
+ else {
+ dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
+ DMA_TO_DEVICE);
+ nfrags--;
+ }
+
+ /*
+ * the complexity below is because of the possibility of a wrap-around
+ * in the middle of an SGL
+ */
+ for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
+ if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
+unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
+ ntohl(p->len[0]), DMA_TO_DEVICE);
+ dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
+ ntohl(p->len[1]), DMA_TO_DEVICE);
+ p++;
+ } else if ((u8 *)p == (u8 *)q->stat) {
+ p = (const struct ulptx_sge_pair *)q->desc;
+ goto unmap;
+ } else if ((u8 *)p + 8 == (u8 *)q->stat) {
+ const __be64 *addr = (const __be64 *)q->desc;
+
+ dma_unmap_page(dev, be64_to_cpu(addr[0]),
+ ntohl(p->len[0]), DMA_TO_DEVICE);
+ dma_unmap_page(dev, be64_to_cpu(addr[1]),
+ ntohl(p->len[1]), DMA_TO_DEVICE);
+ p = (const struct ulptx_sge_pair *)&addr[2];
+ } else {
+ const __be64 *addr = (const __be64 *)q->desc;
+
+ dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
+ ntohl(p->len[0]), DMA_TO_DEVICE);
+ dma_unmap_page(dev, be64_to_cpu(addr[0]),
+ ntohl(p->len[1]), DMA_TO_DEVICE);
+ p = (const struct ulptx_sge_pair *)&addr[1];
+ }
+ }
+ if (nfrags) {
+ __be64 addr;
+
+ if ((u8 *)p == (u8 *)q->stat)
+ p = (const struct ulptx_sge_pair *)q->desc;
+ addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
+ *(const __be64 *)q->desc;
+ dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
+ DMA_TO_DEVICE);
+ }
+}
+
+/**
+ * free_tx_desc - reclaims Tx descriptors and their buffers
+ * @adapter: the adapter
+ * @q: the Tx queue to reclaim descriptors from
+ * @n: the number of descriptors to reclaim
+ * @unmap: whether the buffers should be unmapped for DMA
+ *
+ * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
+ * Tx buffers. Called with the Tx queue lock held.
+ */
+static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
+ unsigned int n, bool unmap)
+{
+ struct tx_sw_desc *d;
+ unsigned int cidx = q->cidx;
+ struct device *dev = adap->pdev_dev;
+
+ d = &q->sdesc[cidx];
+ while (n--) {
+ if (d->skb) { /* an SGL is present */
+ if (unmap)
+ unmap_sgl(dev, d->skb, d->sgl, q);
+ kfree_skb(d->skb);
+ d->skb = NULL;
+ }
+ ++d;
+ if (++cidx == q->size) {
+ cidx = 0;
+ d = q->sdesc;
+ }
+ }
+ q->cidx = cidx;
+}
+
+/*
+ * Return the number of reclaimable descriptors in a Tx queue.
+ */
+static inline int reclaimable(const struct sge_txq *q)
+{
+ int hw_cidx = ntohs(q->stat->cidx);
+ hw_cidx -= q->cidx;
+ return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
+}
+
+/**
+ * reclaim_completed_tx - reclaims completed Tx descriptors
+ * @adap: the adapter
+ * @q: the Tx queue to reclaim completed descriptors from
+ * @unmap: whether the buffers should be unmapped for DMA
+ *
+ * Reclaims Tx descriptors that the SGE has indicated it has processed,
+ * and frees the associated buffers if possible. Called with the Tx
+ * queue locked.
+ */
+static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
+ bool unmap)
+{
+ int avail = reclaimable(q);
+
+ if (avail) {
+ /*
+ * Limit the amount of clean up work we do at a time to keep
+ * the Tx lock hold time O(1).
+ */
+ if (avail > MAX_TX_RECLAIM)
+ avail = MAX_TX_RECLAIM;
+
+ free_tx_desc(adap, q, avail, unmap);
+ q->in_use -= avail;
+ }
+}
+
+static inline int get_buf_size(const struct rx_sw_desc *d)
+{
+#if FL_PG_ORDER > 0
+ return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) :
+ PAGE_SIZE;
+#else
+ return PAGE_SIZE;
+#endif
+}
+
+/**
+ * free_rx_bufs - free the Rx buffers on an SGE free list
+ * @adap: the adapter
+ * @q: the SGE free list to free buffers from
+ * @n: how many buffers to free
+ *
+ * Release the next @n buffers on an SGE free-buffer Rx queue. The
+ * buffers must be made inaccessible to HW before calling this function.
+ */
+static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
+{
+ while (n--) {
+ struct rx_sw_desc *d = &q->sdesc[q->cidx];
+
+ if (is_buf_mapped(d))
+ dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
+ get_buf_size(d), PCI_DMA_FROMDEVICE);
+ put_page(d->page);
+ d->page = NULL;
+ if (++q->cidx == q->size)
+ q->cidx = 0;
+ q->avail--;
+ }
+}
+
+/**
+ * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
+ * @adap: the adapter
+ * @q: the SGE free list
+ *
+ * Unmap the current buffer on an SGE free-buffer Rx queue. The
+ * buffer must be made inaccessible to HW before calling this function.
+ *
+ * This is similar to @free_rx_bufs above but does not free the buffer.
+ * Do note that the FL still loses any further access to the buffer.
+ */
+static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
+{
+ struct rx_sw_desc *d = &q->sdesc[q->cidx];
+
+ if (is_buf_mapped(d))
+ dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
+ get_buf_size(d), PCI_DMA_FROMDEVICE);
+ d->page = NULL;
+ if (++q->cidx == q->size)
+ q->cidx = 0;
+ q->avail--;
+}
+
+static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
+{
+ if (q->pend_cred >= 8) {
+ wmb();
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO |
+ QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
+ q->pend_cred &= 7;
+ }
+}
+
+static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
+ dma_addr_t mapping)
+{
+ sd->page = pg;
+ sd->dma_addr = mapping; /* includes size low bits */
+}
+
+/**
+ * refill_fl - refill an SGE Rx buffer ring
+ * @adap: the adapter
+ * @q: the ring to refill
+ * @n: the number of new buffers to allocate
+ * @gfp: the gfp flags for the allocations
+ *
+ * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
+ * allocated with the supplied gfp flags. The caller must assure that
+ * @n does not exceed the queue's capacity. If afterwards the queue is
+ * found critically low mark it as starving in the bitmap of starving FLs.
+ *
+ * Returns the number of buffers allocated.
+ */
+static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
+ gfp_t gfp)
+{
+ struct page *pg;
+ dma_addr_t mapping;
+ unsigned int cred = q->avail;
+ __be64 *d = &q->desc[q->pidx];
+ struct rx_sw_desc *sd = &q->sdesc[q->pidx];
+
+ gfp |= __GFP_NOWARN; /* failures are expected */
+
+#if FL_PG_ORDER > 0
+ /*
+ * Prefer large buffers
+ */
+ while (n) {
+ pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER);
+ if (unlikely(!pg)) {
+ q->large_alloc_failed++;
+ break; /* fall back to single pages */
+ }
+
+ mapping = dma_map_page(adap->pdev_dev, pg, 0,
+ PAGE_SIZE << FL_PG_ORDER,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
+ __free_pages(pg, FL_PG_ORDER);
+ goto out; /* do not try small pages for this error */
+ }
+ mapping |= RX_LARGE_BUF;
+ *d++ = cpu_to_be64(mapping);
+
+ set_rx_sw_desc(sd, pg, mapping);
+ sd++;
+
+ q->avail++;
+ if (++q->pidx == q->size) {
+ q->pidx = 0;
+ sd = q->sdesc;
+ d = q->desc;
+ }
+ n--;
+ }
+#endif
+
+ while (n--) {
+ pg = __netdev_alloc_page(adap->port[0], gfp);
+ if (unlikely(!pg)) {
+ q->alloc_failed++;
+ break;
+ }
+
+ mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
+ netdev_free_page(adap->port[0], pg);
+ goto out;
+ }
+ *d++ = cpu_to_be64(mapping);
+
+ set_rx_sw_desc(sd, pg, mapping);
+ sd++;
+
+ q->avail++;
+ if (++q->pidx == q->size) {
+ q->pidx = 0;
+ sd = q->sdesc;
+ d = q->desc;
+ }
+ }
+
+out: cred = q->avail - cred;
+ q->pend_cred += cred;
+ ring_fl_db(adap, q);
+
+ if (unlikely(fl_starving(q))) {
+ smp_wmb();
+ set_bit(q->cntxt_id - adap->sge.egr_start,
+ adap->sge.starving_fl);
+ }
+
+ return cred;
+}
+
+static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
+{
+ refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
+ GFP_ATOMIC);
+}
+
+/**
+ * alloc_ring - allocate resources for an SGE descriptor ring
+ * @dev: the PCI device's core device
+ * @nelem: the number of descriptors
+ * @elem_size: the size of each descriptor
+ * @sw_size: the size of the SW state associated with each ring element
+ * @phys: the physical address of the allocated ring
+ * @metadata: address of the array holding the SW state for the ring
+ * @stat_size: extra space in HW ring for status information
+ * @node: preferred node for memory allocations
+ *
+ * Allocates resources for an SGE descriptor ring, such as Tx queues,
+ * free buffer lists, or response queues. Each SGE ring requires
+ * space for its HW descriptors plus, optionally, space for the SW state
+ * associated with each HW entry (the metadata). The function returns
+ * three values: the virtual address for the HW ring (the return value
+ * of the function), the bus address of the HW ring, and the address
+ * of the SW ring.
+ */
+static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
+ size_t sw_size, dma_addr_t *phys, void *metadata,
+ size_t stat_size, int node)
+{
+ size_t len = nelem * elem_size + stat_size;
+ void *s = NULL;
+ void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
+
+ if (!p)
+ return NULL;
+ if (sw_size) {
+ s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
+
+ if (!s) {
+ dma_free_coherent(dev, len, p, *phys);
+ return NULL;
+ }
+ }
+ if (metadata)
+ *(void **)metadata = s;
+ memset(p, 0, len);
+ return p;
+}
+
+/**
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ *
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
+/**
+ * flits_to_desc - returns the num of Tx descriptors for the given flits
+ * @n: the number of flits
+ *
+ * Returns the number of Tx descriptors needed for the supplied number
+ * of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int n)
+{
+ BUG_ON(n > SGE_MAX_WR_LEN / 8);
+ return DIV_ROUND_UP(n, 8);
+}
+
+/**
+ * is_eth_imm - can an Ethernet packet be sent as immediate data?
+ * @skb: the packet
+ *
+ * Returns whether an Ethernet packet is small enough to fit as
+ * immediate data.
+ */
+static inline int is_eth_imm(const struct sk_buff *skb)
+{
+ return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt);
+}
+
+/**
+ * calc_tx_flits - calculate the number of flits for a packet Tx WR
+ * @skb: the packet
+ *
+ * Returns the number of flits needed for a Tx WR for the given Ethernet
+ * packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
+{
+ unsigned int flits;
+
+ if (is_eth_imm(skb))
+ return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8);
+
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
+ if (skb_shinfo(skb)->gso_size)
+ flits += 2;
+ return flits;
+}
+
+/**
+ * calc_tx_descs - calculate the number of Tx descriptors for a packet
+ * @skb: the packet
+ *
+ * Returns the number of Tx descriptors needed for the given Ethernet
+ * packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
+{
+ return flits_to_desc(calc_tx_flits(skb));
+}
+
+/**
+ * write_sgl - populate a scatter/gather list for a packet
+ * @skb: the packet
+ * @q: the Tx queue we are writing into
+ * @sgl: starting location for writing the SGL
+ * @end: points right after the end of the SGL
+ * @start: start offset into skb main-body data to include in the SGL
+ * @addr: the list of bus addresses for the SGL elements
+ *
+ * Generates a gather list for the buffers that make up a packet.
+ * The caller must provide adequate space for the SGL that will be written.
+ * The SGL includes all of the packet's page fragments and the data in its
+ * main body except for the first @start bytes. @sgl must be 16-byte
+ * aligned and within a Tx descriptor with available space. @end points
+ * right after the end of the SGL but does not account for any potential
+ * wrap around, i.e., @end > @sgl.
+ */
+static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+ const dma_addr_t *addr)
+{
+ unsigned int i, len;
+ struct ulptx_sge_pair *to;
+ const struct skb_shared_info *si = skb_shinfo(skb);
+ unsigned int nfrags = si->nr_frags;
+ struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
+
+ len = skb_headlen(skb) - start;
+ if (likely(len)) {
+ sgl->len0 = htonl(len);
+ sgl->addr0 = cpu_to_be64(addr[0] + start);
+ nfrags++;
+ } else {
+ sgl->len0 = htonl(si->frags[0].size);
+ sgl->addr0 = cpu_to_be64(addr[1]);
+ }
+
+ sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
+ if (likely(--nfrags == 0))
+ return;
+ /*
+ * Most of the complexity below deals with the possibility we hit the
+ * end of the queue in the middle of writing the SGL. For this case
+ * only we create the SGL in a temporary buffer and then copy it.
+ */
+ to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
+
+ for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
+ to->len[0] = cpu_to_be32(si->frags[i].size);
+ to->len[1] = cpu_to_be32(si->frags[++i].size);
+ to->addr[0] = cpu_to_be64(addr[i]);
+ to->addr[1] = cpu_to_be64(addr[++i]);
+ }
+ if (nfrags) {
+ to->len[0] = cpu_to_be32(si->frags[i].size);
+ to->len[1] = cpu_to_be32(0);
+ to->addr[0] = cpu_to_be64(addr[i + 1]);
+ }
+ if (unlikely((u8 *)end > (u8 *)q->stat)) {
+ unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
+
+ if (likely(part0))
+ memcpy(sgl->sge, buf, part0);
+ part1 = (u8 *)end - (u8 *)q->stat;
+ memcpy(q->desc, (u8 *)buf + part0, part1);
+ end = (void *)q->desc + part1;
+ }
+ if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
+ *(u64 *)end = 0;
+}
+
+/**
+ * ring_tx_db - check and potentially ring a Tx queue's doorbell
+ * @adap: the adapter
+ * @q: the Tx queue
+ * @n: number of new descriptors to give to HW
+ *
+ * Ring the doorbel for a Tx queue.
+ */
+static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
+{
+ wmb(); /* write descriptors before telling HW */
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+ QID(q->cntxt_id) | PIDX(n));
+}
+
+/**
+ * inline_tx_skb - inline a packet's data into Tx descriptors
+ * @skb: the packet
+ * @q: the Tx queue where the packet will be inlined
+ * @pos: starting position in the Tx queue where to inline the packet
+ *
+ * Inline a packet's contents directly into Tx descriptors, starting at
+ * the given position within the Tx DMA ring.
+ * Most of the complexity of this operation is dealing with wrap arounds
+ * in the middle of the packet we want to inline.
+ */
+static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
+ void *pos)
+{
+ u64 *p;
+ int left = (void *)q->stat - pos;
+
+ if (likely(skb->len <= left)) {
+ if (likely(!skb->data_len))
+ skb_copy_from_linear_data(skb, pos, skb->len);
+ else
+ skb_copy_bits(skb, 0, pos, skb->len);
+ pos += skb->len;
+ } else {
+ skb_copy_bits(skb, 0, pos, left);
+ skb_copy_bits(skb, left, q->desc, skb->len - left);
+ pos = (void *)q->desc + (skb->len - left);
+ }
+
+ /* 0-pad to multiple of 16 */
+ p = PTR_ALIGN(pos, 8);
+ if ((uintptr_t)p & 8)
+ *p = 0;
+}
+
+/*
+ * Figure out what HW csum a packet wants and return the appropriate control
+ * bits.
+ */
+static u64 hwcsum(const struct sk_buff *skb)
+{
+ int csum_type;
+ const struct iphdr *iph = ip_hdr(skb);
+
+ if (iph->version == 4) {
+ if (iph->protocol == IPPROTO_TCP)
+ csum_type = TX_CSUM_TCPIP;
+ else if (iph->protocol == IPPROTO_UDP)
+ csum_type = TX_CSUM_UDPIP;
+ else {
+nocsum: /*
+ * unknown protocol, disable HW csum
+ * and hope a bad packet is detected
+ */
+ return TXPKT_L4CSUM_DIS;
+ }
+ } else {
+ /*
+ * this doesn't work with extension headers
+ */
+ const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
+
+ if (ip6h->nexthdr == IPPROTO_TCP)
+ csum_type = TX_CSUM_TCPIP6;
+ else if (ip6h->nexthdr == IPPROTO_UDP)
+ csum_type = TX_CSUM_UDPIP6;
+ else
+ goto nocsum;
+ }
+
+ if (likely(csum_type >= TX_CSUM_TCPIP))
+ return TXPKT_CSUM_TYPE(csum_type) |
+ TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
+ TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
+ else {
+ int start = skb_transport_offset(skb);
+
+ return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
+ TXPKT_CSUM_LOC(start + skb->csum_offset);
+ }
+}
+
+static void eth_txq_stop(struct sge_eth_txq *q)
+{
+ netif_tx_stop_queue(q->txq);
+ q->q.stops++;
+}
+
+static inline void txq_advance(struct sge_txq *q, unsigned int n)
+{
+ q->in_use += n;
+ q->pidx += n;
+ if (q->pidx >= q->size)
+ q->pidx -= q->size;
+}
+
+/**
+ * t4_eth_xmit - add a packet to an Ethernet Tx queue
+ * @skb: the packet
+ * @dev: the egress net device
+ *
+ * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
+ */
+netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ u32 wr_mid;
+ u64 cntrl, *end;
+ int qidx, credits;
+ unsigned int flits, ndesc;
+ struct adapter *adap;
+ struct sge_eth_txq *q;
+ const struct port_info *pi;
+ struct fw_eth_tx_pkt_wr *wr;
+ struct cpl_tx_pkt_core *cpl;
+ const struct skb_shared_info *ssi;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+
+ /*
+ * The chip min packet length is 10 octets but play safe and reject
+ * anything shorter than an Ethernet header.
+ */
+ if (unlikely(skb->len < ETH_HLEN)) {
+out_free: dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb_get_queue_mapping(skb);
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+ reclaim_completed_tx(adap, &q->q, true);
+
+ flits = calc_tx_flits(skb);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&q->q) - ndesc;
+
+ if (unlikely(credits < 0)) {
+ eth_txq_stop(q);
+ dev_err(adap->pdev_dev,
+ "%s: Tx ring %u full while queue awake!\n",
+ dev->name, qidx);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (!is_eth_imm(skb) &&
+ unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ q->mapping_err++;
+ goto out_free;
+ }
+
+ wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
+ }
+
+ wr = (void *)&q->q.desc[q->q.pidx];
+ wr->equiq_to_len16 = htonl(wr_mid);
+ wr->r3 = cpu_to_be64(0);
+ end = (u64 *)wr + flits;
+
+ ssi = skb_shinfo(skb);
+ if (ssi->gso_size) {
+ struct cpl_tx_pkt_lso *lso = (void *)wr;
+ bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
+ int l3hdr_len = skb_network_header_len(skb);
+ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+
+ wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
+ FW_WR_IMMDLEN(sizeof(*lso)));
+ lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE | LSO_LAST_SLICE |
+ LSO_IPV6(v6) |
+ LSO_ETHHDR_LEN(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+ lso->c.ipid_ofst = htons(0);
+ lso->c.mss = htons(ssi->gso_size);
+ lso->c.seqno_offset = htonl(0);
+ lso->c.len = htonl(skb->len);
+ cpl = (void *)(lso + 1);
+ cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN(l3hdr_len) |
+ TXPKT_ETHHDR_LEN(eth_xtra_len);
+ q->tso++;
+ q->tx_cso += ssi->gso_segs;
+ } else {
+ int len;
+
+ len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
+ wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
+ FW_WR_IMMDLEN(len));
+ cpl = (void *)(wr + 1);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
+ q->tx_cso++;
+ } else
+ cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+ }
+
+ if (vlan_tx_tag_present(skb)) {
+ q->vlan_ins++;
+ cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
+ }
+
+ cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
+ TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
+ cpl->pack = htons(0);
+ cpl->len = htons(skb->len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ if (is_eth_imm(skb)) {
+ inline_tx_skb(skb, &q->q, cpl + 1);
+ dev_kfree_skb(skb);
+ } else {
+ int last_desc;
+
+ write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
+ addr);
+ skb_orphan(skb);
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ q->q.sdesc[last_desc].skb = skb;
+ q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
+ }
+
+ txq_advance(&q->q, ndesc);
+
+ ring_tx_db(adap, &q->q, ndesc);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ * @q: the SGE control Tx queue
+ *
+ * This is a variant of reclaim_completed_tx() that is used for Tx queues
+ * that send only immediate data (presently just the control queues) and
+ * thus do not have any sk_buffs to release.
+ */
+static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+{
+ int hw_cidx = ntohs(q->stat->cidx);
+ int reclaim = hw_cidx - q->cidx;
+
+ if (reclaim < 0)
+ reclaim += q->size;
+
+ q->in_use -= reclaim;
+ q->cidx = hw_cidx;
+}
+
+/**
+ * is_imm - check whether a packet can be sent as immediate data
+ * @skb: the packet
+ *
+ * Returns true if a packet can be sent as a WR with immediate data.
+ */
+static inline int is_imm(const struct sk_buff *skb)
+{
+ return skb->len <= MAX_CTRL_WR_LEN;
+}
+
+/**
+ * ctrlq_check_stop - check if a control queue is full and should stop
+ * @q: the queue
+ * @wr: most recent WR written to the queue
+ *
+ * Check if a control queue has become full and should be stopped.
+ * We clean up control queue descriptors very lazily, only when we are out.
+ * If the queue is still full after reclaiming any completed descriptors
+ * we suspend it and have the last WR wake it up.
+ */
+static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
+{
+ reclaim_completed_tx_imm(&q->q);
+ if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
+ wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
+ q->q.stops++;
+ q->full = 1;
+ }
+}
+
+/**
+ * ctrl_xmit - send a packet through an SGE control Tx queue
+ * @q: the control queue
+ * @skb: the packet
+ *
+ * Send a packet through an SGE control Tx queue. Packets sent through
+ * a control queue must fit entirely as immediate data.
+ */
+static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
+{
+ unsigned int ndesc;
+ struct fw_wr_hdr *wr;
+
+ if (unlikely(!is_imm(skb))) {
+ WARN_ON(1);
+ dev_kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+
+ ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
+ spin_lock(&q->sendq.lock);
+
+ if (unlikely(q->full)) {
+ skb->priority = ndesc; /* save for restart */
+ __skb_queue_tail(&q->sendq, skb);
+ spin_unlock(&q->sendq.lock);
+ return NET_XMIT_CN;
+ }
+
+ wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+ inline_tx_skb(skb, &q->q, wr);
+
+ txq_advance(&q->q, ndesc);
+ if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
+ ctrlq_check_stop(q, wr);
+
+ ring_tx_db(q->adap, &q->q, ndesc);
+ spin_unlock(&q->sendq.lock);
+
+ kfree_skb(skb);
+ return NET_XMIT_SUCCESS;
+}
+
+/**
+ * restart_ctrlq - restart a suspended control queue
+ * @data: the control queue to restart
+ *
+ * Resumes transmission on a suspended Tx control queue.
+ */
+static void restart_ctrlq(unsigned long data)
+{
+ struct sk_buff *skb;
+ unsigned int written = 0;
+ struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
+
+ spin_lock(&q->sendq.lock);
+ reclaim_completed_tx_imm(&q->q);
+ BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */
+
+ while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
+ struct fw_wr_hdr *wr;
+ unsigned int ndesc = skb->priority; /* previously saved */
+
+ /*
+ * Write descriptors and free skbs outside the lock to limit
+ * wait times. q->full is still set so new skbs will be queued.
+ */
+ spin_unlock(&q->sendq.lock);
+
+ wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+ inline_tx_skb(skb, &q->q, wr);
+ kfree_skb(skb);
+
+ written += ndesc;
+ txq_advance(&q->q, ndesc);
+ if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
+ unsigned long old = q->q.stops;
+
+ ctrlq_check_stop(q, wr);
+ if (q->q.stops != old) { /* suspended anew */
+ spin_lock(&q->sendq.lock);
+ goto ringdb;
+ }
+ }
+ if (written > 16) {
+ ring_tx_db(q->adap, &q->q, written);
+ written = 0;
+ }
+ spin_lock(&q->sendq.lock);
+ }
+ q->full = 0;
+ringdb: if (written)
+ ring_tx_db(q->adap, &q->q, written);
+ spin_unlock(&q->sendq.lock);
+}
+
+/**
+ * t4_mgmt_tx - send a management message
+ * @adap: the adapter
+ * @skb: the packet containing the management message
+ *
+ * Send a management message through control queue 0.
+ */
+int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
+{
+ int ret;
+
+ local_bh_disable();
+ ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
+ local_bh_enable();
+ return ret;
+}
+
+/**
+ * is_ofld_imm - check whether a packet can be sent as immediate data
+ * @skb: the packet
+ *
+ * Returns true if a packet can be sent as an offload WR with immediate
+ * data. We currently use the same limit as for Ethernet packets.
+ */
+static inline int is_ofld_imm(const struct sk_buff *skb)
+{
+ return skb->len <= MAX_IMM_TX_PKT_LEN;
+}
+
+/**
+ * calc_tx_flits_ofld - calculate # of flits for an offload packet
+ * @skb: the packet
+ *
+ * Returns the number of flits needed for the given offload packet.
+ * These packets are already fully constructed and no additional headers
+ * will be added.
+ */
+static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
+{
+ unsigned int flits, cnt;
+
+ if (is_ofld_imm(skb))
+ return DIV_ROUND_UP(skb->len, 8);
+
+ flits = skb_transport_offset(skb) / 8U; /* headers */
+ cnt = skb_shinfo(skb)->nr_frags;
+ if (skb->tail != skb->transport_header)
+ cnt++;
+ return flits + sgl_len(cnt);
+}
+
+/**
+ * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
+ * @adap: the adapter
+ * @q: the queue to stop
+ *
+ * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
+ * inability to map packets. A periodic timer attempts to restart
+ * queues so marked.
+ */
+static void txq_stop_maperr(struct sge_ofld_txq *q)
+{
+ q->mapping_err++;
+ q->q.stops++;
+ set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
+ q->adap->sge.txq_maperr);
+}
+
+/**
+ * ofldtxq_stop - stop an offload Tx queue that has become full
+ * @q: the queue to stop
+ * @skb: the packet causing the queue to become full
+ *
+ * Stops an offload Tx queue that has become full and modifies the packet
+ * being written to request a wakeup.
+ */
+static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
+{
+ struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
+
+ wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
+ q->q.stops++;
+ q->full = 1;
+}
+
+/**
+ * service_ofldq - restart a suspended offload queue
+ * @q: the offload queue
+ *
+ * Services an offload Tx queue by moving packets from its packet queue
+ * to the HW Tx ring. The function starts and ends with the queue locked.
+ */
+static void service_ofldq(struct sge_ofld_txq *q)
+{
+ u64 *pos;
+ int credits;
+ struct sk_buff *skb;
+ unsigned int written = 0;
+ unsigned int flits, ndesc;
+
+ while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
+ /*
+ * We drop the lock but leave skb on sendq, thus retaining
+ * exclusive access to the state of the queue.
+ */
+ spin_unlock(&q->sendq.lock);
+
+ reclaim_completed_tx(q->adap, &q->q, false);
+
+ flits = skb->priority; /* previously saved */
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&q->q) - ndesc;
+ BUG_ON(credits < 0);
+ if (unlikely(credits < TXQ_STOP_THRES))
+ ofldtxq_stop(q, skb);
+
+ pos = (u64 *)&q->q.desc[q->q.pidx];
+ if (is_ofld_imm(skb))
+ inline_tx_skb(skb, &q->q, pos);
+ else if (map_skb(q->adap->pdev_dev, skb,
+ (dma_addr_t *)skb->head)) {
+ txq_stop_maperr(q);
+ spin_lock(&q->sendq.lock);
+ break;
+ } else {
+ int last_desc, hdr_len = skb_transport_offset(skb);
+
+ memcpy(pos, skb->data, hdr_len);
+ write_sgl(skb, &q->q, (void *)pos + hdr_len,
+ pos + flits, hdr_len,
+ (dma_addr_t *)skb->head);
+#ifdef CONFIG_NEED_DMA_MAP_STATE
+ skb->dev = q->adap->port[0];
+ skb->destructor = deferred_unmap_destructor;
+#endif
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ q->q.sdesc[last_desc].skb = skb;
+ }
+
+ txq_advance(&q->q, ndesc);
+ written += ndesc;
+ if (unlikely(written > 32)) {
+ ring_tx_db(q->adap, &q->q, written);
+ written = 0;
+ }
+
+ spin_lock(&q->sendq.lock);
+ __skb_unlink(skb, &q->sendq);
+ if (is_ofld_imm(skb))
+ kfree_skb(skb);
+ }
+ if (likely(written))
+ ring_tx_db(q->adap, &q->q, written);
+}
+
+/**
+ * ofld_xmit - send a packet through an offload queue
+ * @q: the Tx offload queue
+ * @skb: the packet
+ *
+ * Send an offload packet through an SGE offload queue.
+ */
+static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
+{
+ skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
+ spin_lock(&q->sendq.lock);
+ __skb_queue_tail(&q->sendq, skb);
+ if (q->sendq.qlen == 1)
+ service_ofldq(q);
+ spin_unlock(&q->sendq.lock);
+ return NET_XMIT_SUCCESS;
+}
+
+/**
+ * restart_ofldq - restart a suspended offload queue
+ * @data: the offload queue to restart
+ *
+ * Resumes transmission on a suspended Tx offload queue.
+ */
+static void restart_ofldq(unsigned long data)
+{
+ struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
+
+ spin_lock(&q->sendq.lock);
+ q->full = 0; /* the queue actually is completely empty now */
+ service_ofldq(q);
+ spin_unlock(&q->sendq.lock);
+}
+
+/**
+ * skb_txq - return the Tx queue an offload packet should use
+ * @skb: the packet
+ *
+ * Returns the Tx queue an offload packet should use as indicated by bits
+ * 1-15 in the packet's queue_mapping.
+ */
+static inline unsigned int skb_txq(const struct sk_buff *skb)
+{
+ return skb->queue_mapping >> 1;
+}
+
+/**
+ * is_ctrl_pkt - return whether an offload packet is a control packet
+ * @skb: the packet
+ *
+ * Returns whether an offload packet should use an OFLD or a CTRL
+ * Tx queue as indicated by bit 0 in the packet's queue_mapping.
+ */
+static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
+{
+ return skb->queue_mapping & 1;
+}
+
+static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
+{
+ unsigned int idx = skb_txq(skb);
+
+ if (unlikely(is_ctrl_pkt(skb)))
+ return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
+ return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
+}
+
+/**
+ * t4_ofld_send - send an offload packet
+ * @adap: the adapter
+ * @skb: the packet
+ *
+ * Sends an offload packet. We use the packet queue_mapping to select the
+ * appropriate Tx queue as follows: bit 0 indicates whether the packet
+ * should be sent as regular or control, bits 1-15 select the queue.
+ */
+int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
+{
+ int ret;
+
+ local_bh_disable();
+ ret = ofld_send(adap, skb);
+ local_bh_enable();
+ return ret;
+}
+
+/**
+ * cxgb4_ofld_send - send an offload packet
+ * @dev: the net device
+ * @skb: the packet
+ *
+ * Sends an offload packet. This is an exported version of @t4_ofld_send,
+ * intended for ULDs.
+ */
+int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
+{
+ return t4_ofld_send(netdev2adap(dev), skb);
+}
+EXPORT_SYMBOL(cxgb4_ofld_send);
+
+static inline void copy_frags(struct skb_shared_info *ssi,
+ const struct pkt_gl *gl, unsigned int offset)
+{
+ unsigned int n;
+
+ /* usually there's just one frag */
+ ssi->frags[0].page = gl->frags[0].page;
+ ssi->frags[0].page_offset = gl->frags[0].page_offset + offset;
+ ssi->frags[0].size = gl->frags[0].size - offset;
+ ssi->nr_frags = gl->nfrags;
+ n = gl->nfrags - 1;
+ if (n)
+ memcpy(&ssi->frags[1], &gl->frags[1], n * sizeof(skb_frag_t));
+
+ /* get a reference to the last page, we don't own it */
+ get_page(gl->frags[n].page);
+}
+
+/**
+ * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
+ * @gl: the gather list
+ * @skb_len: size of sk_buff main body if it carries fragments
+ * @pull_len: amount of data to move to the sk_buff's main body
+ *
+ * Builds an sk_buff from the given packet gather list. Returns the
+ * sk_buff or %NULL if sk_buff allocation failed.
+ */
+struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
+ unsigned int skb_len, unsigned int pull_len)
+{
+ struct sk_buff *skb;
+
+ /*
+ * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
+ * size, which is expected since buffers are at least PAGE_SIZEd.
+ * In this case packets up to RX_COPY_THRES have only one fragment.
+ */
+ if (gl->tot_len <= RX_COPY_THRES) {
+ skb = dev_alloc_skb(gl->tot_len);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, gl->tot_len);
+ skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
+ } else {
+ skb = dev_alloc_skb(skb_len);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, pull_len);
+ skb_copy_to_linear_data(skb, gl->va, pull_len);
+
+ copy_frags(skb_shinfo(skb), gl, pull_len);
+ skb->len = gl->tot_len;
+ skb->data_len = skb->len - pull_len;
+ skb->truesize += skb->data_len;
+ }
+out: return skb;
+}
+EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
+
+/**
+ * t4_pktgl_free - free a packet gather list
+ * @gl: the gather list
+ *
+ * Releases the pages of a packet gather list. We do not own the last
+ * page on the list and do not free it.
+ */
+static void t4_pktgl_free(const struct pkt_gl *gl)
+{
+ int n;
+ const skb_frag_t *p;
+
+ for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
+ put_page(p->page);
+}
+
+/*
+ * Process an MPS trace packet. Give it an unused protocol number so it won't
+ * be delivered to anyone and send it to the stack for capture.
+ */
+static noinline int handle_trace_pkt(struct adapter *adap,
+ const struct pkt_gl *gl)
+{
+ struct sk_buff *skb;
+ struct cpl_trace_pkt *p;
+
+ skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
+ if (unlikely(!skb)) {
+ t4_pktgl_free(gl);
+ return 0;
+ }
+
+ p = (struct cpl_trace_pkt *)skb->data;
+ __skb_pull(skb, sizeof(*p));
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(0xffff);
+ skb->dev = adap->port[0];
+ netif_receive_skb(skb);
+ return 0;
+}
+
+static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
+ const struct cpl_rx_pkt *pkt)
+{
+ int ret;
+ struct sk_buff *skb;
+
+ skb = napi_get_frags(&rxq->rspq.napi);
+ if (unlikely(!skb)) {
+ t4_pktgl_free(gl);
+ rxq->stats.rx_drops++;
+ return;
+ }
+
+ copy_frags(skb_shinfo(skb), gl, RX_PKT_PAD);
+ skb->len = gl->tot_len - RX_PKT_PAD;
+ skb->data_len = skb->len;
+ skb->truesize += skb->data_len;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(skb, rxq->rspq.idx);
+ if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
+ skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
+
+ if (unlikely(pkt->vlan_ex)) {
+ __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
+ rxq->stats.vlan_ex++;
+ }
+ ret = napi_gro_frags(&rxq->rspq.napi);
+ if (ret == GRO_HELD)
+ rxq->stats.lro_pkts++;
+ else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
+ rxq->stats.lro_merged++;
+ rxq->stats.pkts++;
+ rxq->stats.rx_cso++;
+}
+
+/**
+ * t4_ethrx_handler - process an ingress ethernet packet
+ * @q: the response queue that received the packet
+ * @rsp: the response queue descriptor holding the RX_PKT message
+ * @si: the gather list of packet fragments
+ *
+ * Process an ingress ethernet packet and deliver it to the stack.
+ */
+int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *si)
+{
+ bool csum_ok;
+ struct sk_buff *skb;
+ const struct cpl_rx_pkt *pkt;
+ struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+
+ if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
+ return handle_trace_pkt(q->adap, si);
+
+ pkt = (const struct cpl_rx_pkt *)rsp;
+ csum_ok = pkt->csum_calc && !pkt->err_vec;
+ if ((pkt->l2info & htonl(RXF_TCP)) &&
+ (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
+ do_gro(rxq, si, pkt);
+ return 0;
+ }
+
+ skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
+ if (unlikely(!skb)) {
+ t4_pktgl_free(si);
+ rxq->stats.rx_drops++;
+ return 0;
+ }
+
+ __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */
+ skb->protocol = eth_type_trans(skb, q->netdev);
+ skb_record_rx_queue(skb, q->idx);
+ if (skb->dev->features & NETIF_F_RXHASH)
+ skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
+
+ rxq->stats.pkts++;
+
+ if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) &&
+ (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
+ if (!pkt->ip_frag) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ rxq->stats.rx_cso++;
+ } else if (pkt->l2info & htonl(RXF_IP)) {
+ __sum16 c = (__force __sum16)pkt->csum;
+ skb->csum = csum_unfold(c);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ rxq->stats.rx_cso++;
+ }
+ } else
+ skb_checksum_none_assert(skb);
+
+ if (unlikely(pkt->vlan_ex)) {
+ __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
+ rxq->stats.vlan_ex++;
+ }
+ netif_receive_skb(skb);
+ return 0;
+}
+
+/**
+ * restore_rx_bufs - put back a packet's Rx buffers
+ * @si: the packet gather list
+ * @q: the SGE free list
+ * @frags: number of FL buffers to restore
+ *
+ * Puts back on an FL the Rx buffers associated with @si. The buffers
+ * have already been unmapped and are left unmapped, we mark them so to
+ * prevent further unmapping attempts.
+ *
+ * This function undoes a series of @unmap_rx_buf calls when we find out
+ * that the current packet can't be processed right away afterall and we
+ * need to come back to it later. This is a very rare event and there's
+ * no effort to make this particularly efficient.
+ */
+static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
+ int frags)
+{
+ struct rx_sw_desc *d;
+
+ while (frags--) {
+ if (q->cidx == 0)
+ q->cidx = q->size - 1;
+ else
+ q->cidx--;
+ d = &q->sdesc[q->cidx];
+ d->page = si->frags[frags].page;
+ d->dma_addr |= RX_UNMAPPED_BUF;
+ q->avail++;
+ }
+}
+
+/**
+ * is_new_response - check if a response is newly written
+ * @r: the response descriptor
+ * @q: the response queue
+ *
+ * Returns true if a response descriptor contains a yet unprocessed
+ * response.
+ */
+static inline bool is_new_response(const struct rsp_ctrl *r,
+ const struct sge_rspq *q)
+{
+ return RSPD_GEN(r->type_gen) == q->gen;
+}
+
+/**
+ * rspq_next - advance to the next entry in a response queue
+ * @q: the queue
+ *
+ * Updates the state of a response queue to advance it to the next entry.
+ */
+static inline void rspq_next(struct sge_rspq *q)
+{
+ q->cur_desc = (void *)q->cur_desc + q->iqe_len;
+ if (unlikely(++q->cidx == q->size)) {
+ q->cidx = 0;
+ q->gen ^= 1;
+ q->cur_desc = q->desc;
+ }
+}
+
+/**
+ * process_responses - process responses from an SGE response queue
+ * @q: the ingress queue to process
+ * @budget: how many responses can be processed in this round
+ *
+ * Process responses from an SGE response queue up to the supplied budget.
+ * Responses include received packets as well as control messages from FW
+ * or HW.
+ *
+ * Additionally choose the interrupt holdoff time for the next interrupt
+ * on this queue. If the system is under memory shortage use a fairly
+ * long delay to help recovery.
+ */
+static int process_responses(struct sge_rspq *q, int budget)
+{
+ int ret, rsp_type;
+ int budget_left = budget;
+ const struct rsp_ctrl *rc;
+ struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+
+ while (likely(budget_left)) {
+ rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
+ if (!is_new_response(rc, q))
+ break;
+
+ rmb();
+ rsp_type = RSPD_TYPE(rc->type_gen);
+ if (likely(rsp_type == RSP_TYPE_FLBUF)) {
+ skb_frag_t *fp;
+ struct pkt_gl si;
+ const struct rx_sw_desc *rsd;
+ u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
+
+ if (len & RSPD_NEWBUF) {
+ if (likely(q->offset > 0)) {
+ free_rx_bufs(q->adap, &rxq->fl, 1);
+ q->offset = 0;
+ }
+ len = RSPD_LEN(len);
+ }
+ si.tot_len = len;
+
+ /* gather packet fragments */
+ for (frags = 0, fp = si.frags; ; frags++, fp++) {
+ rsd = &rxq->fl.sdesc[rxq->fl.cidx];
+ bufsz = get_buf_size(rsd);
+ fp->page = rsd->page;
+ fp->page_offset = q->offset;
+ fp->size = min(bufsz, len);
+ len -= fp->size;
+ if (!len)
+ break;
+ unmap_rx_buf(q->adap, &rxq->fl);
+ }
+
+ /*
+ * Last buffer remains mapped so explicitly make it
+ * coherent for CPU access.
+ */
+ dma_sync_single_for_cpu(q->adap->pdev_dev,
+ get_buf_addr(rsd),
+ fp->size, DMA_FROM_DEVICE);
+
+ si.va = page_address(si.frags[0].page) +
+ si.frags[0].page_offset;
+ prefetch(si.va);
+
+ si.nfrags = frags + 1;
+ ret = q->handler(q, q->cur_desc, &si);
+ if (likely(ret == 0))
+ q->offset += ALIGN(fp->size, FL_ALIGN);
+ else
+ restore_rx_bufs(&si, &rxq->fl, frags);
+ } else if (likely(rsp_type == RSP_TYPE_CPL)) {
+ ret = q->handler(q, q->cur_desc, NULL);
+ } else {
+ ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
+ }
+
+ if (unlikely(ret)) {
+ /* couldn't process descriptor, back off for recovery */
+ q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
+ break;
+ }
+
+ rspq_next(q);
+ budget_left--;
+ }
+
+ if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
+ __refill_fl(q->adap, &rxq->fl);
+ return budget - budget_left;
+}
+
+/**
+ * napi_rx_handler - the NAPI handler for Rx processing
+ * @napi: the napi instance
+ * @budget: how many packets we can process in this round
+ *
+ * Handler for new data events when using NAPI. This does not need any
+ * locking or protection from interrupts as data interrupts are off at
+ * this point and other adapter interrupts do not interfere (the latter
+ * in not a concern at all with MSI-X as non-data interrupts then have
+ * a separate handler).
+ */
+static int napi_rx_handler(struct napi_struct *napi, int budget)
+{
+ unsigned int params;
+ struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
+ int work_done = process_responses(q, budget);
+
+ if (likely(work_done < budget)) {
+ napi_complete(napi);
+ params = q->next_intr_params;
+ q->next_intr_params = q->intr_params;
+ } else
+ params = QINTR_TIMER_IDX(7);
+
+ t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) |
+ INGRESSQID((u32)q->cntxt_id) | SEINTARM(params));
+ return work_done;
+}
+
+/*
+ * The MSI-X interrupt handler for an SGE response queue.
+ */
+irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
+{
+ struct sge_rspq *q = cookie;
+
+ napi_schedule(&q->napi);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Process the indirect interrupt entries in the interrupt queue and kick off
+ * NAPI for each queue that has generated an entry.
+ */
+static unsigned int process_intrq(struct adapter *adap)
+{
+ unsigned int credits;
+ const struct rsp_ctrl *rc;
+ struct sge_rspq *q = &adap->sge.intrq;
+
+ spin_lock(&adap->sge.intrq_lock);
+ for (credits = 0; ; credits++) {
+ rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
+ if (!is_new_response(rc, q))
+ break;
+
+ rmb();
+ if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
+ unsigned int qid = ntohl(rc->pldbuflen_qid);
+
+ qid -= adap->sge.ingr_start;
+ napi_schedule(&adap->sge.ingr_map[qid]->napi);
+ }
+
+ rspq_next(q);
+ }
+
+ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) |
+ INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params));
+ spin_unlock(&adap->sge.intrq_lock);
+ return credits;
+}
+
+/*
+ * The MSI interrupt handler, which handles data events from SGE response queues
+ * as well as error and other async events as they all use the same MSI vector.
+ */
+static irqreturn_t t4_intr_msi(int irq, void *cookie)
+{
+ struct adapter *adap = cookie;
+
+ t4_slow_intr_handler(adap);
+ process_intrq(adap);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Interrupt handler for legacy INTx interrupts.
+ * Handles data events from SGE response queues as well as error and other
+ * async events as they all use the same interrupt line.
+ */
+static irqreturn_t t4_intr_intx(int irq, void *cookie)
+{
+ struct adapter *adap = cookie;
+
+ t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
+ if (t4_slow_intr_handler(adap) | process_intrq(adap))
+ return IRQ_HANDLED;
+ return IRQ_NONE; /* probably shared interrupt */
+}
+
+/**
+ * t4_intr_handler - select the top-level interrupt handler
+ * @adap: the adapter
+ *
+ * Selects the top-level interrupt handler based on the type of interrupts
+ * (MSI-X, MSI, or INTx).
+ */
+irq_handler_t t4_intr_handler(struct adapter *adap)
+{
+ if (adap->flags & USING_MSIX)
+ return t4_sge_intr_msix;
+ if (adap->flags & USING_MSI)
+ return t4_intr_msi;
+ return t4_intr_intx;
+}
+
+static void sge_rx_timer_cb(unsigned long data)
+{
+ unsigned long m;
+ unsigned int i, cnt[2];
+ struct adapter *adap = (struct adapter *)data;
+ struct sge *s = &adap->sge;
+
+ for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
+ for (m = s->starving_fl[i]; m; m &= m - 1) {
+ struct sge_eth_rxq *rxq;
+ unsigned int id = __ffs(m) + i * BITS_PER_LONG;
+ struct sge_fl *fl = s->egr_map[id];
+
+ clear_bit(id, s->starving_fl);
+ smp_mb__after_clear_bit();
+
+ if (fl_starving(fl)) {
+ rxq = container_of(fl, struct sge_eth_rxq, fl);
+ if (napi_reschedule(&rxq->rspq.napi))
+ fl->starving++;
+ else
+ set_bit(id, s->starving_fl);
+ }
+ }
+
+ t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
+ cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
+ cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+
+ for (i = 0; i < 2; i++)
+ if (cnt[i] >= s->starve_thres) {
+ if (s->idma_state[i] || cnt[i] == 0xffffffff)
+ continue;
+ s->idma_state[i] = 1;
+ t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
+ m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16);
+ dev_warn(adap->pdev_dev,
+ "SGE idma%u starvation detected for "
+ "queue %lu\n", i, m & 0xffff);
+ } else if (s->idma_state[i])
+ s->idma_state[i] = 0;
+
+ mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
+}
+
+static void sge_tx_timer_cb(unsigned long data)
+{
+ unsigned long m;
+ unsigned int i, budget;
+ struct adapter *adap = (struct adapter *)data;
+ struct sge *s = &adap->sge;
+
+ for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
+ for (m = s->txq_maperr[i]; m; m &= m - 1) {
+ unsigned long id = __ffs(m) + i * BITS_PER_LONG;
+ struct sge_ofld_txq *txq = s->egr_map[id];
+
+ clear_bit(id, s->txq_maperr);
+ tasklet_schedule(&txq->qresume_tsk);
+ }
+
+ budget = MAX_TIMER_TX_RECLAIM;
+ i = s->ethtxq_rover;
+ do {
+ struct sge_eth_txq *q = &s->ethtxq[i];
+
+ if (q->q.in_use &&
+ time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
+ __netif_tx_trylock(q->txq)) {
+ int avail = reclaimable(&q->q);
+
+ if (avail) {
+ if (avail > budget)
+ avail = budget;
+
+ free_tx_desc(adap, &q->q, avail, true);
+ q->q.in_use -= avail;
+ budget -= avail;
+ }
+ __netif_tx_unlock(q->txq);
+ }
+
+ if (++i >= s->ethqsets)
+ i = 0;
+ } while (budget && i != s->ethtxq_rover);
+ s->ethtxq_rover = i;
+ mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
+}
+
+int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
+ struct net_device *dev, int intr_idx,
+ struct sge_fl *fl, rspq_handler_t hnd)
+{
+ int ret, flsz = 0;
+ struct fw_iq_cmd c;
+ struct port_info *pi = netdev_priv(dev);
+
+ /* Size needs to be multiple of 16, including status entry. */
+ iq->size = roundup(iq->size, 16);
+
+ iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
+ &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
+ if (!iq->desc)
+ return -ENOMEM;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_CMD_EXEC |
+ FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0));
+ c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) |
+ FW_LEN16(c));
+ c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
+ FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) |
+ FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) |
+ FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
+ -intr_idx - 1));
+ c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
+ FW_IQ_CMD_IQGTSMODE |
+ FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
+ FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
+ c.iqsize = htons(iq->size);
+ c.iqaddr = cpu_to_be64(iq->phys_addr);
+
+ if (fl) {
+ fl->size = roundup(fl->size, 8);
+ fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
+ sizeof(struct rx_sw_desc), &fl->addr,
+ &fl->sdesc, STAT_LEN, NUMA_NO_NODE);
+ if (!fl->desc)
+ goto fl_nomem;
+
+ flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc);
+ c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
+ FW_IQ_CMD_FL0FETCHRO(1) |
+ FW_IQ_CMD_FL0DATARO(1) |
+ FW_IQ_CMD_FL0PADEN);
+ c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
+ FW_IQ_CMD_FL0FBMAX(3));
+ c.fl0size = htons(flsz);
+ c.fl0addr = cpu_to_be64(fl->addr);
+ }
+
+ ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ if (ret)
+ goto err;
+
+ netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
+ iq->cur_desc = iq->desc;
+ iq->cidx = 0;
+ iq->gen = 1;
+ iq->next_intr_params = iq->intr_params;
+ iq->cntxt_id = ntohs(c.iqid);
+ iq->abs_id = ntohs(c.physiqid);
+ iq->size--; /* subtract status entry */
+ iq->adap = adap;
+ iq->netdev = dev;
+ iq->handler = hnd;
+
+ /* set offset to -1 to distinguish ingress queues without FL */
+ iq->offset = fl ? 0 : -1;
+
+ adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
+
+ if (fl) {
+ fl->cntxt_id = ntohs(c.fl0id);
+ fl->avail = fl->pend_cred = 0;
+ fl->pidx = fl->cidx = 0;
+ fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
+ adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
+ refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
+ }
+ return 0;
+
+fl_nomem:
+ ret = -ENOMEM;
+err:
+ if (iq->desc) {
+ dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
+ iq->desc, iq->phys_addr);
+ iq->desc = NULL;
+ }
+ if (fl && fl->desc) {
+ kfree(fl->sdesc);
+ fl->sdesc = NULL;
+ dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
+ fl->desc, fl->addr);
+ fl->desc = NULL;
+ }
+ return ret;
+}
+
+static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
+{
+ q->in_use = 0;
+ q->cidx = q->pidx = 0;
+ q->stops = q->restarts = 0;
+ q->stat = (void *)&q->desc[q->size];
+ q->cntxt_id = id;
+ adap->sge.egr_map[id - adap->sge.egr_start] = q;
+}
+
+int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
+ struct net_device *dev, struct netdev_queue *netdevq,
+ unsigned int iqid)
+{
+ int ret, nentries;
+ struct fw_eq_eth_cmd c;
+ struct port_info *pi = netdev_priv(dev);
+
+ /* Add status entries */
+ nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+
+ txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
+ sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
+ &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
+ netdev_queue_numa_node_read(netdevq));
+ if (!txq->q.desc)
+ return -ENOMEM;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_CMD_EXEC |
+ FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
+ c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
+ FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
+ c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
+ c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
+ FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
+ FW_EQ_ETH_CMD_FETCHRO(1) |
+ FW_EQ_ETH_CMD_IQID(iqid));
+ c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) |
+ FW_EQ_ETH_CMD_FBMAX(3) |
+ FW_EQ_ETH_CMD_CIDXFTHRESH(5) |
+ FW_EQ_ETH_CMD_EQSIZE(nentries));
+ c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+ ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ if (ret) {
+ kfree(txq->q.sdesc);
+ txq->q.sdesc = NULL;
+ dma_free_coherent(adap->pdev_dev,
+ nentries * sizeof(struct tx_desc),
+ txq->q.desc, txq->q.phys_addr);
+ txq->q.desc = NULL;
+ return ret;
+ }
+
+ init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd)));
+ txq->txq = netdevq;
+ txq->tso = txq->tx_cso = txq->vlan_ins = 0;
+ txq->mapping_err = 0;
+ return 0;
+}
+
+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+ struct net_device *dev, unsigned int iqid,
+ unsigned int cmplqid)
+{
+ int ret, nentries;
+ struct fw_eq_ctrl_cmd c;
+ struct port_info *pi = netdev_priv(dev);
+
+ /* Add status entries */
+ nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+
+ txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
+ sizeof(struct tx_desc), 0, &txq->q.phys_addr,
+ NULL, 0, NUMA_NO_NODE);
+ if (!txq->q.desc)
+ return -ENOMEM;
+
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_CMD_EXEC |
+ FW_EQ_CTRL_CMD_PFN(adap->fn) |
+ FW_EQ_CTRL_CMD_VFN(0));
+ c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC |
+ FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
+ c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid));
+ c.physeqid_pkd = htonl(0);
+ c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
+ FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
+ FW_EQ_CTRL_CMD_FETCHRO |
+ FW_EQ_CTRL_CMD_IQID(iqid));
+ c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
+ FW_EQ_CTRL_CMD_FBMAX(3) |
+ FW_EQ_CTRL_CMD_CIDXFTHRESH(5) |
+ FW_EQ_CTRL_CMD_EQSIZE(nentries));
+ c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+ ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ if (ret) {
+ dma_free_coherent(adap->pdev_dev,
+ nentries * sizeof(struct tx_desc),
+ txq->q.desc, txq->q.phys_addr);
+ txq->q.desc = NULL;
+ return ret;
+ }
+
+ init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid)));
+ txq->adap = adap;
+ skb_queue_head_init(&txq->sendq);
+ tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
+ txq->full = 0;
+ return 0;
+}
+
+int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
+ struct net_device *dev, unsigned int iqid)
+{
+ int ret, nentries;
+ struct fw_eq_ofld_cmd c;
+ struct port_info *pi = netdev_priv(dev);
+
+ /* Add status entries */
+ nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+
+ txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
+ sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
+ &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
+ NUMA_NO_NODE);
+ if (!txq->q.desc)
+ return -ENOMEM;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_CMD_EXEC |
+ FW_EQ_OFLD_CMD_PFN(adap->fn) |
+ FW_EQ_OFLD_CMD_VFN(0));
+ c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
+ FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
+ c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
+ FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) |
+ FW_EQ_OFLD_CMD_FETCHRO(1) |
+ FW_EQ_OFLD_CMD_IQID(iqid));
+ c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
+ FW_EQ_OFLD_CMD_FBMAX(3) |
+ FW_EQ_OFLD_CMD_CIDXFTHRESH(5) |
+ FW_EQ_OFLD_CMD_EQSIZE(nentries));
+ c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+ ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ if (ret) {
+ kfree(txq->q.sdesc);
+ txq->q.sdesc = NULL;
+ dma_free_coherent(adap->pdev_dev,
+ nentries * sizeof(struct tx_desc),
+ txq->q.desc, txq->q.phys_addr);
+ txq->q.desc = NULL;
+ return ret;
+ }
+
+ init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd)));
+ txq->adap = adap;
+ skb_queue_head_init(&txq->sendq);
+ tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
+ txq->full = 0;
+ txq->mapping_err = 0;
+ return 0;
+}
+
+static void free_txq(struct adapter *adap, struct sge_txq *q)
+{
+ dma_free_coherent(adap->pdev_dev,
+ q->size * sizeof(struct tx_desc) + STAT_LEN,
+ q->desc, q->phys_addr);
+ q->cntxt_id = 0;
+ q->sdesc = NULL;
+ q->desc = NULL;
+}
+
+static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
+ struct sge_fl *fl)
+{
+ unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
+
+ adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
+ t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
+ rq->cntxt_id, fl_id, 0xffff);
+ dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
+ rq->desc, rq->phys_addr);
+ netif_napi_del(&rq->napi);
+ rq->netdev = NULL;
+ rq->cntxt_id = rq->abs_id = 0;
+ rq->desc = NULL;
+
+ if (fl) {
+ free_rx_bufs(adap, fl, fl->avail);
+ dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN,
+ fl->desc, fl->addr);
+ kfree(fl->sdesc);
+ fl->sdesc = NULL;
+ fl->cntxt_id = 0;
+ fl->desc = NULL;
+ }
+}
+
+/**
+ * t4_free_sge_resources - free SGE resources
+ * @adap: the adapter
+ *
+ * Frees resources used by the SGE queue sets.
+ */
+void t4_free_sge_resources(struct adapter *adap)
+{
+ int i;
+ struct sge_eth_rxq *eq = adap->sge.ethrxq;
+ struct sge_eth_txq *etq = adap->sge.ethtxq;
+ struct sge_ofld_rxq *oq = adap->sge.ofldrxq;
+
+ /* clean up Ethernet Tx/Rx queues */
+ for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
+ if (eq->rspq.desc)
+ free_rspq_fl(adap, &eq->rspq, &eq->fl);
+ if (etq->q.desc) {
+ t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
+ etq->q.cntxt_id);
+ free_tx_desc(adap, &etq->q, etq->q.in_use, true);
+ kfree(etq->q.sdesc);
+ free_txq(adap, &etq->q);
+ }
+ }
+
+ /* clean up RDMA and iSCSI Rx queues */
+ for (i = 0; i < adap->sge.ofldqsets; i++, oq++) {
+ if (oq->rspq.desc)
+ free_rspq_fl(adap, &oq->rspq, &oq->fl);
+ }
+ for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) {
+ if (oq->rspq.desc)
+ free_rspq_fl(adap, &oq->rspq, &oq->fl);
+ }
+
+ /* clean up offload Tx queues */
+ for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
+ struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
+
+ if (q->q.desc) {
+ tasklet_kill(&q->qresume_tsk);
+ t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
+ q->q.cntxt_id);
+ free_tx_desc(adap, &q->q, q->q.in_use, false);
+ kfree(q->q.sdesc);
+ __skb_queue_purge(&q->sendq);
+ free_txq(adap, &q->q);
+ }
+ }
+
+ /* clean up control Tx queues */
+ for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
+ struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
+
+ if (cq->q.desc) {
+ tasklet_kill(&cq->qresume_tsk);
+ t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
+ cq->q.cntxt_id);
+ __skb_queue_purge(&cq->sendq);
+ free_txq(adap, &cq->q);
+ }
+ }
+
+ if (adap->sge.fw_evtq.desc)
+ free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
+
+ if (adap->sge.intrq.desc)
+ free_rspq_fl(adap, &adap->sge.intrq, NULL);
+
+ /* clear the reverse egress queue map */
+ memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
+}
+
+void t4_sge_start(struct adapter *adap)
+{
+ adap->sge.ethtxq_rover = 0;
+ mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
+ mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
+}
+
+/**
+ * t4_sge_stop - disable SGE operation
+ * @adap: the adapter
+ *
+ * Stop tasklets and timers associated with the DMA engine. Note that
+ * this is effective only if measures have been taken to disable any HW
+ * events that may restart them.
+ */
+void t4_sge_stop(struct adapter *adap)
+{
+ int i;
+ struct sge *s = &adap->sge;
+
+ if (in_interrupt()) /* actions below require waiting */
+ return;
+
+ if (s->rx_timer.function)
+ del_timer_sync(&s->rx_timer);
+ if (s->tx_timer.function)
+ del_timer_sync(&s->tx_timer);
+
+ for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
+ struct sge_ofld_txq *q = &s->ofldtxq[i];
+
+ if (q->q.desc)
+ tasklet_kill(&q->qresume_tsk);
+ }
+ for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
+ struct sge_ctrl_txq *cq = &s->ctrlq[i];
+
+ if (cq->q.desc)
+ tasklet_kill(&cq->qresume_tsk);
+ }
+}
+
+/**
+ * t4_sge_init - initialize SGE
+ * @adap: the adapter
+ *
+ * Performs SGE initialization needed every time after a chip reset.
+ * We do not initialize any of the queues here, instead the driver
+ * top-level must request them individually.
+ */
+void t4_sge_init(struct adapter *adap)
+{
+ unsigned int i, v;
+ struct sge *s = &adap->sge;
+ unsigned int fl_align_log = ilog2(FL_ALIGN);
+
+ t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK |
+ INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE,
+ INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) |
+ RXPKTCPLMODE |
+ (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
+
+ for (i = v = 0; i < 32; i += 4)
+ v |= (PAGE_SHIFT - 10) << i;
+ t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v);
+ t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE);
+#if FL_PG_ORDER > 0
+ t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER);
+#endif
+ t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
+ THRESHOLD_0(s->counter_val[0]) |
+ THRESHOLD_1(s->counter_val[1]) |
+ THRESHOLD_2(s->counter_val[2]) |
+ THRESHOLD_3(s->counter_val[3]));
+ t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
+ TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
+ TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
+ t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
+ TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) |
+ TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3])));
+ t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
+ TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) |
+ TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5])));
+ setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
+ setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
+ s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */
+ s->idma_state[0] = s->idma_state[1] = 0;
+ spin_lock_init(&s->intrq_lock);
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
new file mode 100644
index 000000000000..d1ec111aebd8
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -0,0 +1,2856 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/init.h>
+#include <linux/delay.h>
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "t4fw_api.h"
+
+/**
+ * t4_wait_op_done_val - wait until an operation is completed
+ * @adapter: the adapter performing the operation
+ * @reg: the register to check for completion
+ * @mask: a single-bit field within @reg that indicates completion
+ * @polarity: the value of the field when the operation is completed
+ * @attempts: number of check iterations
+ * @delay: delay in usecs between iterations
+ * @valp: where to store the value of the register at completion time
+ *
+ * Wait until an operation is completed by checking a bit in a register
+ * up to @attempts times. If @valp is not NULL the value of the register
+ * at the time it indicated completion is stored there. Returns 0 if the
+ * operation completes and -EAGAIN otherwise.
+ */
+static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay, u32 *valp)
+{
+ while (1) {
+ u32 val = t4_read_reg(adapter, reg);
+
+ if (!!(val & mask) == polarity) {
+ if (valp)
+ *valp = val;
+ return 0;
+ }
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ udelay(delay);
+ }
+}
+
+static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay)
+{
+ return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
+ delay, NULL);
+}
+
+/**
+ * t4_set_reg_field - set a register field to a value
+ * @adapter: the adapter to program
+ * @addr: the register address
+ * @mask: specifies the portion of the register to modify
+ * @val: the new value for the register field
+ *
+ * Sets a register field specified by the supplied mask to the
+ * given value.
+ */
+void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
+ u32 val)
+{
+ u32 v = t4_read_reg(adapter, addr) & ~mask;
+
+ t4_write_reg(adapter, addr, v | val);
+ (void) t4_read_reg(adapter, addr); /* flush */
+}
+
+/**
+ * t4_read_indirect - read indirectly addressed registers
+ * @adap: the adapter
+ * @addr_reg: register holding the indirect address
+ * @data_reg: register holding the value of the indirect register
+ * @vals: where the read register values are stored
+ * @nregs: how many indirect registers to read
+ * @start_idx: index of first indirect register to read
+ *
+ * Reads registers that are accessed indirectly through an address/data
+ * register pair.
+ */
+static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals,
+ unsigned int nregs, unsigned int start_idx)
+{
+ while (nregs--) {
+ t4_write_reg(adap, addr_reg, start_idx);
+ *vals++ = t4_read_reg(adap, data_reg);
+ start_idx++;
+ }
+}
+
+/*
+ * Get the reply to a mailbox command and store it in @rpl in big-endian order.
+ */
+static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
+ u32 mbox_addr)
+{
+ for ( ; nflit; nflit--, mbox_addr += 8)
+ *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
+}
+
+/*
+ * Handle a FW assertion reported in a mailbox.
+ */
+static void fw_asrt(struct adapter *adap, u32 mbox_addr)
+{
+ struct fw_debug_cmd asrt;
+
+ get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
+ dev_alert(adap->pdev_dev,
+ "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
+ asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
+ ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
+}
+
+static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
+{
+ dev_err(adap->pdev_dev,
+ "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
+ (unsigned long long)t4_read_reg64(adap, data_reg),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 8),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 16),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 24),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 32),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 40),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 48),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 56));
+}
+
+/**
+ * t4_wr_mbox_meat - send a command to FW through the given mailbox
+ * @adap: the adapter
+ * @mbox: index of the mailbox to use
+ * @cmd: the command to write
+ * @size: command length in bytes
+ * @rpl: where to optionally store the reply
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Sends the given command to FW through the selected mailbox and waits
+ * for the FW to execute the command. If @rpl is not %NULL it is used to
+ * store the FW's reply to the command. The command and its optional
+ * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
+ * to respond. @sleep_ok determines whether we may sleep while awaiting
+ * the response. If sleeping is allowed we use progressive backoff
+ * otherwise we spin.
+ *
+ * The return value is 0 on success or a negative errno on failure. A
+ * failure can happen either because we are not able to execute the
+ * command or FW executes it but signals an error. In the latter case
+ * the return value is the error code indicated by FW (negated).
+ */
+int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+ void *rpl, bool sleep_ok)
+{
+ static const int delay[] = {
+ 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
+ };
+
+ u32 v;
+ u64 res;
+ int i, ms, delay_idx;
+ const __be64 *p = cmd;
+ u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
+ u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
+
+ if ((size & 15) || size > MBOX_LEN)
+ return -EINVAL;
+
+ /*
+ * If the device is off-line, as in EEH, commands will time out.
+ * Fail them early so we don't waste time waiting.
+ */
+ if (adap->pdev->error_state != pci_channel_io_normal)
+ return -EIO;
+
+ v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
+ for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
+ v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
+
+ if (v != MBOX_OWNER_DRV)
+ return v ? -EBUSY : -ETIMEDOUT;
+
+ for (i = 0; i < size; i += 8)
+ t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
+
+ t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
+ t4_read_reg(adap, ctl_reg); /* flush write */
+
+ delay_idx = 0;
+ ms = delay[0];
+
+ for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else
+ mdelay(ms);
+
+ v = t4_read_reg(adap, ctl_reg);
+ if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
+ if (!(v & MBMSGVALID)) {
+ t4_write_reg(adap, ctl_reg, 0);
+ continue;
+ }
+
+ res = t4_read_reg64(adap, data_reg);
+ if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
+ fw_asrt(adap, data_reg);
+ res = FW_CMD_RETVAL(EIO);
+ } else if (rpl)
+ get_mbox_rpl(adap, rpl, size / 8, data_reg);
+
+ if (FW_CMD_RETVAL_GET((int)res))
+ dump_mbox(adap, mbox, data_reg);
+ t4_write_reg(adap, ctl_reg, 0);
+ return -FW_CMD_RETVAL_GET((int)res);
+ }
+ }
+
+ dump_mbox(adap, mbox, data_reg);
+ dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
+ *(const u8 *)cmd, mbox);
+ return -ETIMEDOUT;
+}
+
+/**
+ * t4_mc_read - read from MC through backdoor accesses
+ * @adap: the adapter
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from MC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
+{
+ int i;
+
+ if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
+ return -EBUSY;
+ t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
+ t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
+ t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
+ t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
+ BIST_CMD_GAP(1));
+ i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
+ if (i)
+ return i;
+
+#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
+ if (ecc)
+ *ecc = t4_read_reg64(adap, MC_DATA(16));
+#undef MC_DATA
+ return 0;
+}
+
+/**
+ * t4_edc_read - read from EDC through backdoor accesses
+ * @adap: the adapter
+ * @idx: which EDC to access
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
+{
+ int i;
+
+ idx *= EDC_STRIDE;
+ if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
+ return -EBUSY;
+ t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
+ t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
+ t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
+ t4_write_reg(adap, EDC_BIST_CMD + idx,
+ BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
+ i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
+ if (i)
+ return i;
+
+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
+ if (ecc)
+ *ecc = t4_read_reg64(adap, EDC_DATA(16));
+#undef EDC_DATA
+ return 0;
+}
+
+#define EEPROM_STAT_ADDR 0x7bfc
+#define VPD_BASE 0
+#define VPD_LEN 512
+
+/**
+ * t4_seeprom_wp - enable/disable EEPROM write protection
+ * @adapter: the adapter
+ * @enable: whether to enable or disable write protection
+ *
+ * Enables or disables write protection on the serial EEPROM.
+ */
+int t4_seeprom_wp(struct adapter *adapter, bool enable)
+{
+ unsigned int v = enable ? 0xc : 0;
+ int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
+ return ret < 0 ? ret : 0;
+}
+
+/**
+ * get_vpd_params - read VPD parameters from VPD EEPROM
+ * @adapter: adapter to read
+ * @p: where to store the parameters
+ *
+ * Reads card parameters stored in VPD EEPROM.
+ */
+static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+{
+ int i, ret;
+ int ec, sn;
+ u8 vpd[VPD_LEN], csum;
+ unsigned int vpdr_len, kw_offset, id_len;
+
+ ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
+ if (ret < 0)
+ return ret;
+
+ if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
+ dev_err(adapter->pdev_dev, "missing VPD ID string\n");
+ return -EINVAL;
+ }
+
+ id_len = pci_vpd_lrdt_size(vpd);
+ if (id_len > ID_LEN)
+ id_len = ID_LEN;
+
+ i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
+ if (i < 0) {
+ dev_err(adapter->pdev_dev, "missing VPD-R section\n");
+ return -EINVAL;
+ }
+
+ vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
+ kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
+ if (vpdr_len + kw_offset > VPD_LEN) {
+ dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
+ return -EINVAL;
+ }
+
+#define FIND_VPD_KW(var, name) do { \
+ var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
+ if (var < 0) { \
+ dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
+ return -EINVAL; \
+ } \
+ var += PCI_VPD_INFO_FLD_HDR_SIZE; \
+} while (0)
+
+ FIND_VPD_KW(i, "RV");
+ for (csum = 0; i >= 0; i--)
+ csum += vpd[i];
+
+ if (csum) {
+ dev_err(adapter->pdev_dev,
+ "corrupted VPD EEPROM, actual csum %u\n", csum);
+ return -EINVAL;
+ }
+
+ FIND_VPD_KW(ec, "EC");
+ FIND_VPD_KW(sn, "SN");
+#undef FIND_VPD_KW
+
+ memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
+ strim(p->id);
+ memcpy(p->ec, vpd + ec, EC_LEN);
+ strim(p->ec);
+ i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
+ memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
+ strim(p->sn);
+ return 0;
+}
+
+/* serial flash and firmware constants */
+enum {
+ SF_ATTEMPTS = 10, /* max retries for SF operations */
+
+ /* flash command opcodes */
+ SF_PROG_PAGE = 2, /* program page */
+ SF_WR_DISABLE = 4, /* disable writes */
+ SF_RD_STATUS = 5, /* read status register */
+ SF_WR_ENABLE = 6, /* enable writes */
+ SF_RD_DATA_FAST = 0xb, /* read flash */
+ SF_RD_ID = 0x9f, /* read ID */
+ SF_ERASE_SECTOR = 0xd8, /* erase sector */
+
+ FW_MAX_SIZE = 512 * 1024,
+};
+
+/**
+ * sf1_read - read data from the serial flash
+ * @adapter: the adapter
+ * @byte_cnt: number of bytes to read
+ * @cont: whether another operation will be chained
+ * @lock: whether to lock SF for PL access only
+ * @valp: where to store the read data
+ *
+ * Reads up to 4 bytes of data from the serial flash. The location of
+ * the read needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
+ int lock, u32 *valp)
+{
+ int ret;
+
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (t4_read_reg(adapter, SF_OP) & BUSY)
+ return -EBUSY;
+ cont = cont ? SF_CONT : 0;
+ lock = lock ? SF_LOCK : 0;
+ t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
+ ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
+ if (!ret)
+ *valp = t4_read_reg(adapter, SF_DATA);
+ return ret;
+}
+
+/**
+ * sf1_write - write data to the serial flash
+ * @adapter: the adapter
+ * @byte_cnt: number of bytes to write
+ * @cont: whether another operation will be chained
+ * @lock: whether to lock SF for PL access only
+ * @val: value to write
+ *
+ * Writes up to 4 bytes of data to the serial flash. The location of
+ * the write needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
+ int lock, u32 val)
+{
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (t4_read_reg(adapter, SF_OP) & BUSY)
+ return -EBUSY;
+ cont = cont ? SF_CONT : 0;
+ lock = lock ? SF_LOCK : 0;
+ t4_write_reg(adapter, SF_DATA, val);
+ t4_write_reg(adapter, SF_OP, lock |
+ cont | BYTECNT(byte_cnt - 1) | OP_WR);
+ return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
+}
+
+/**
+ * flash_wait_op - wait for a flash operation to complete
+ * @adapter: the adapter
+ * @attempts: max number of polls of the status register
+ * @delay: delay between polls in ms
+ *
+ * Wait for a flash operation to complete by polling the status register.
+ */
+static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
+{
+ int ret;
+ u32 status;
+
+ while (1) {
+ if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
+ (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
+ return ret;
+ if (!(status & 1))
+ return 0;
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ msleep(delay);
+ }
+}
+
+/**
+ * t4_read_flash - read words from serial flash
+ * @adapter: the adapter
+ * @addr: the start address for the read
+ * @nwords: how many 32-bit words to read
+ * @data: where to store the read data
+ * @byte_oriented: whether to store data as bytes or as words
+ *
+ * Read the specified number of 32-bit words from the serial flash.
+ * If @byte_oriented is set the read data is stored as a byte array
+ * (i.e., big-endian), otherwise as 32-bit words in the platform's
+ * natural endianess.
+ */
+static int t4_read_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int nwords, u32 *data, int byte_oriented)
+{
+ int ret;
+
+ if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
+ return -EINVAL;
+
+ addr = swab32(addr) | SF_RD_DATA_FAST;
+
+ if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
+ (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
+ return ret;
+
+ for ( ; nwords; nwords--, data++) {
+ ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
+ if (nwords == 1)
+ t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
+ if (ret)
+ return ret;
+ if (byte_oriented)
+ *data = htonl(*data);
+ }
+ return 0;
+}
+
+/**
+ * t4_write_flash - write up to a page of data to the serial flash
+ * @adapter: the adapter
+ * @addr: the start address to write
+ * @n: length of data to write in bytes
+ * @data: the data to write
+ *
+ * Writes up to a page of data (256 bytes) to the serial flash starting
+ * at the given address. All the data must be written to the same page.
+ */
+static int t4_write_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int n, const u8 *data)
+{
+ int ret;
+ u32 buf[64];
+ unsigned int i, c, left, val, offset = addr & 0xff;
+
+ if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
+ return -EINVAL;
+
+ val = swab32(addr) | SF_PROG_PAGE;
+
+ if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
+ (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
+ goto unlock;
+
+ for (left = n; left; left -= c) {
+ c = min(left, 4U);
+ for (val = 0, i = 0; i < c; ++i)
+ val = (val << 8) + *data++;
+
+ ret = sf1_write(adapter, c, c != left, 1, val);
+ if (ret)
+ goto unlock;
+ }
+ ret = flash_wait_op(adapter, 8, 1);
+ if (ret)
+ goto unlock;
+
+ t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
+
+ /* Read the page to verify the write succeeded */
+ ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+ if (ret)
+ return ret;
+
+ if (memcmp(data - n, (u8 *)buf + offset, n)) {
+ dev_err(adapter->pdev_dev,
+ "failed to correctly write the flash page at %#x\n",
+ addr);
+ return -EIO;
+ }
+ return 0;
+
+unlock:
+ t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
+ return ret;
+}
+
+/**
+ * get_fw_version - read the firmware version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the FW version from flash.
+ */
+static int get_fw_version(struct adapter *adapter, u32 *vers)
+{
+ return t4_read_flash(adapter, adapter->params.sf_fw_start +
+ offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
+}
+
+/**
+ * get_tp_version - read the TP microcode version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the TP microcode version from flash.
+ */
+static int get_tp_version(struct adapter *adapter, u32 *vers)
+{
+ return t4_read_flash(adapter, adapter->params.sf_fw_start +
+ offsetof(struct fw_hdr, tp_microcode_ver),
+ 1, vers, 0);
+}
+
+/**
+ * t4_check_fw_version - check if the FW is compatible with this driver
+ * @adapter: the adapter
+ *
+ * Checks if an adapter's FW is compatible with the driver. Returns 0
+ * if there's exact match, a negative error if the version could not be
+ * read or there's a major version mismatch, and a positive value if the
+ * expected major version is found but there's a minor version mismatch.
+ */
+int t4_check_fw_version(struct adapter *adapter)
+{
+ u32 api_vers[2];
+ int ret, major, minor, micro;
+
+ ret = get_fw_version(adapter, &adapter->params.fw_vers);
+ if (!ret)
+ ret = get_tp_version(adapter, &adapter->params.tp_vers);
+ if (!ret)
+ ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
+ offsetof(struct fw_hdr, intfver_nic),
+ 2, api_vers, 1);
+ if (ret)
+ return ret;
+
+ major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
+ minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
+ micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
+ memcpy(adapter->params.api_vers, api_vers,
+ sizeof(adapter->params.api_vers));
+
+ if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
+ dev_err(adapter->pdev_dev,
+ "card FW has major version %u, driver wants %u\n",
+ major, FW_VERSION_MAJOR);
+ return -EINVAL;
+ }
+
+ if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
+ return 0; /* perfect match */
+
+ /* Minor/micro version mismatch. Report it but often it's OK. */
+ return 1;
+}
+
+/**
+ * t4_flash_erase_sectors - erase a range of flash sectors
+ * @adapter: the adapter
+ * @start: the first sector to erase
+ * @end: the last sector to erase
+ *
+ * Erases the sectors in the given inclusive range.
+ */
+static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
+{
+ int ret = 0;
+
+ while (start <= end) {
+ if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
+ (ret = sf1_write(adapter, 4, 0, 1,
+ SF_ERASE_SECTOR | (start << 8))) != 0 ||
+ (ret = flash_wait_op(adapter, 14, 500)) != 0) {
+ dev_err(adapter->pdev_dev,
+ "erase of flash sector %d failed, error %d\n",
+ start, ret);
+ break;
+ }
+ start++;
+ }
+ t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
+ return ret;
+}
+
+/**
+ * t4_load_fw - download firmware
+ * @adap: the adapter
+ * @fw_data: the firmware image to write
+ * @size: image size
+ *
+ * Write the supplied firmware image to the card's serial flash.
+ */
+int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
+{
+ u32 csum;
+ int ret, addr;
+ unsigned int i;
+ u8 first_page[SF_PAGE_SIZE];
+ const u32 *p = (const u32 *)fw_data;
+ const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
+ unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+ unsigned int fw_img_start = adap->params.sf_fw_start;
+ unsigned int fw_start_sec = fw_img_start / sf_sec_size;
+
+ if (!size) {
+ dev_err(adap->pdev_dev, "FW image has no data\n");
+ return -EINVAL;
+ }
+ if (size & 511) {
+ dev_err(adap->pdev_dev,
+ "FW image size not multiple of 512 bytes\n");
+ return -EINVAL;
+ }
+ if (ntohs(hdr->len512) * 512 != size) {
+ dev_err(adap->pdev_dev,
+ "FW image size differs from size in FW header\n");
+ return -EINVAL;
+ }
+ if (size > FW_MAX_SIZE) {
+ dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
+ FW_MAX_SIZE);
+ return -EFBIG;
+ }
+
+ for (csum = 0, i = 0; i < size / sizeof(csum); i++)
+ csum += ntohl(p[i]);
+
+ if (csum != 0xffffffff) {
+ dev_err(adap->pdev_dev,
+ "corrupted firmware image, checksum %#x\n", csum);
+ return -EINVAL;
+ }
+
+ i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
+ ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
+ if (ret)
+ goto out;
+
+ /*
+ * We write the correct version at the end so the driver can see a bad
+ * version if the FW write fails. Start by writing a copy of the
+ * first page with a bad version.
+ */
+ memcpy(first_page, fw_data, SF_PAGE_SIZE);
+ ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
+ ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
+ if (ret)
+ goto out;
+
+ addr = fw_img_start;
+ for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
+ addr += SF_PAGE_SIZE;
+ fw_data += SF_PAGE_SIZE;
+ ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
+ if (ret)
+ goto out;
+ }
+
+ ret = t4_write_flash(adap,
+ fw_img_start + offsetof(struct fw_hdr, fw_ver),
+ sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
+out:
+ if (ret)
+ dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
+ ret);
+ return ret;
+}
+
+#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
+
+/**
+ * t4_link_start - apply link configuration to MAC/PHY
+ * @phy: the PHY to setup
+ * @mac: the MAC to setup
+ * @lc: the requested link configuration
+ *
+ * Set up a port's MAC and PHY according to a desired link configuration.
+ * - If the PHY can auto-negotiate first decide what to advertise, then
+ * enable/disable auto-negotiation as desired, and reset.
+ * - If the PHY does not auto-negotiate just reset it.
+ * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ * otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+ struct link_config *lc)
+{
+ struct fw_port_cmd c;
+ unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
+
+ lc->link_ok = 0;
+ if (lc->requested_fc & PAUSE_RX)
+ fc |= FW_PORT_CAP_FC_RX;
+ if (lc->requested_fc & PAUSE_TX)
+ fc |= FW_PORT_CAP_FC_TX;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
+ FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
+ c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
+ FW_LEN16(c));
+
+ if (!(lc->supported & FW_PORT_CAP_ANEG)) {
+ c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
+ lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+ } else if (lc->autoneg == AUTONEG_DISABLE) {
+ c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
+ lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+ } else
+ c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
+
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_restart_aneg - restart autonegotiation
+ * @adap: the adapter
+ * @mbox: mbox to use for the FW command
+ * @port: the port id
+ *
+ * Restarts autonegotiation for the selected port.
+ */
+int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
+{
+ struct fw_port_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
+ FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
+ c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
+ FW_LEN16(c));
+ c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+struct intr_info {
+ unsigned int mask; /* bits to check in interrupt status */
+ const char *msg; /* message to print or NULL */
+ short stat_idx; /* stat counter to increment or -1 */
+ unsigned short fatal; /* whether the condition reported is fatal */
+};
+
+/**
+ * t4_handle_intr_status - table driven interrupt handler
+ * @adapter: the adapter that generated the interrupt
+ * @reg: the interrupt status register to process
+ * @acts: table of interrupt actions
+ *
+ * A table driven interrupt handler that applies a set of masks to an
+ * interrupt status word and performs the corresponding actions if the
+ * interrupts described by the mask have occurred. The actions include
+ * optionally emitting a warning or alert message. The table is terminated
+ * by an entry specifying mask 0. Returns the number of fatal interrupt
+ * conditions.
+ */
+static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
+ const struct intr_info *acts)
+{
+ int fatal = 0;
+ unsigned int mask = 0;
+ unsigned int status = t4_read_reg(adapter, reg);
+
+ for ( ; acts->mask; ++acts) {
+ if (!(status & acts->mask))
+ continue;
+ if (acts->fatal) {
+ fatal++;
+ dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
+ status & acts->mask);
+ } else if (acts->msg && printk_ratelimit())
+ dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
+ status & acts->mask);
+ mask |= acts->mask;
+ }
+ status &= mask;
+ if (status) /* clear processed interrupts */
+ t4_write_reg(adapter, reg, status);
+ return fatal;
+}
+
+/*
+ * Interrupt handler for the PCIE module.
+ */
+static void pcie_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info sysbus_intr_info[] = {
+ { RNPP, "RXNP array parity error", -1, 1 },
+ { RPCP, "RXPC array parity error", -1, 1 },
+ { RCIP, "RXCIF array parity error", -1, 1 },
+ { RCCP, "Rx completions control array parity error", -1, 1 },
+ { RFTP, "RXFT array parity error", -1, 1 },
+ { 0 }
+ };
+ static const struct intr_info pcie_port_intr_info[] = {
+ { TPCP, "TXPC array parity error", -1, 1 },
+ { TNPP, "TXNP array parity error", -1, 1 },
+ { TFTP, "TXFT array parity error", -1, 1 },
+ { TCAP, "TXCA array parity error", -1, 1 },
+ { TCIP, "TXCIF array parity error", -1, 1 },
+ { RCAP, "RXCA array parity error", -1, 1 },
+ { OTDD, "outbound request TLP discarded", -1, 1 },
+ { RDPE, "Rx data parity error", -1, 1 },
+ { TDUE, "Tx uncorrectable data error", -1, 1 },
+ { 0 }
+ };
+ static const struct intr_info pcie_intr_info[] = {
+ { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
+ { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
+ { MSIDATAPERR, "MSI data parity error", -1, 1 },
+ { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
+ { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
+ { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
+ { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
+ { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
+ { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
+ { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
+ { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
+ { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
+ { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
+ { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
+ { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
+ { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
+ { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
+ { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
+ { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
+ { FIDPERR, "PCI FID parity error", -1, 1 },
+ { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
+ { MATAGPERR, "PCI MA tag parity error", -1, 1 },
+ { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
+ { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
+ { RXWRPERR, "PCI Rx write parity error", -1, 1 },
+ { RPLPERR, "PCI replay buffer parity error", -1, 1 },
+ { PCIESINT, "PCI core secondary fault", -1, 1 },
+ { PCIEPINT, "PCI core primary fault", -1, 1 },
+ { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
+ { 0 }
+ };
+
+ int fat;
+
+ fat = t4_handle_intr_status(adapter,
+ PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+ sysbus_intr_info) +
+ t4_handle_intr_status(adapter,
+ PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+ pcie_port_intr_info) +
+ t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
+ if (fat)
+ t4_fatal_err(adapter);
+}
+
+/*
+ * TP interrupt handler.
+ */
+static void tp_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info tp_intr_info[] = {
+ { 0x3fffffff, "TP parity error", -1, 1 },
+ { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
+ t4_fatal_err(adapter);
+}
+
+/*
+ * SGE interrupt handler.
+ */
+static void sge_intr_handler(struct adapter *adapter)
+{
+ u64 v;
+
+ static const struct intr_info sge_intr_info[] = {
+ { ERR_CPL_EXCEED_IQE_SIZE,
+ "SGE received CPL exceeding IQE size", -1, 1 },
+ { ERR_INVALID_CIDX_INC,
+ "SGE GTS CIDX increment too large", -1, 0 },
+ { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
+ { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
+ { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
+ "SGE IQID > 1023 received CPL for FL", -1, 0 },
+ { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
+ 0 },
+ { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
+ 0 },
+ { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
+ 0 },
+ { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
+ 0 },
+ { ERR_ING_CTXT_PRIO,
+ "SGE too many priority ingress contexts", -1, 0 },
+ { ERR_EGR_CTXT_PRIO,
+ "SGE too many priority egress contexts", -1, 0 },
+ { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
+ { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
+ { 0 }
+ };
+
+ v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
+ ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
+ if (v) {
+ dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
+ (unsigned long long)v);
+ t4_write_reg(adapter, SGE_INT_CAUSE1, v);
+ t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
+ }
+
+ if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
+ v != 0)
+ t4_fatal_err(adapter);
+}
+
+/*
+ * CIM interrupt handler.
+ */
+static void cim_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info cim_intr_info[] = {
+ { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
+ { OBQPARERR, "CIM OBQ parity error", -1, 1 },
+ { IBQPARERR, "CIM IBQ parity error", -1, 1 },
+ { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
+ { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
+ { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
+ { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
+ { 0 }
+ };
+ static const struct intr_info cim_upintr_info[] = {
+ { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
+ { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
+ { ILLWRINT, "CIM illegal write", -1, 1 },
+ { ILLRDINT, "CIM illegal read", -1, 1 },
+ { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
+ { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
+ { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
+ { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
+ { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
+ { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
+ { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
+ { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
+ { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
+ { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
+ { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
+ { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
+ { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
+ { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
+ { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
+ { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
+ { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
+ { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
+ { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
+ { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
+ { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
+ { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
+ { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
+ { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
+ { 0 }
+ };
+
+ int fat;
+
+ fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
+ cim_intr_info) +
+ t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
+ cim_upintr_info);
+ if (fat)
+ t4_fatal_err(adapter);
+}
+
+/*
+ * ULP RX interrupt handler.
+ */
+static void ulprx_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info ulprx_intr_info[] = {
+ { 0x1800000, "ULPRX context error", -1, 1 },
+ { 0x7fffff, "ULPRX parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
+ t4_fatal_err(adapter);
+}
+
+/*
+ * ULP TX interrupt handler.
+ */
+static void ulptx_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info ulptx_intr_info[] = {
+ { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
+ 0 },
+ { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
+ 0 },
+ { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
+ 0 },
+ { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
+ 0 },
+ { 0xfffffff, "ULPTX parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
+ t4_fatal_err(adapter);
+}
+
+/*
+ * PM TX interrupt handler.
+ */
+static void pmtx_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info pmtx_intr_info[] = {
+ { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
+ { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
+ { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
+ { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
+ { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
+ { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
+ { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
+ { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
+ { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
+ t4_fatal_err(adapter);
+}
+
+/*
+ * PM RX interrupt handler.
+ */
+static void pmrx_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info pmrx_intr_info[] = {
+ { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
+ { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
+ { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
+ { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
+ { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
+ { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
+ t4_fatal_err(adapter);
+}
+
+/*
+ * CPL switch interrupt handler.
+ */
+static void cplsw_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info cplsw_intr_info[] = {
+ { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
+ { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
+ { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
+ { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
+ { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
+ { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
+ t4_fatal_err(adapter);
+}
+
+/*
+ * LE interrupt handler.
+ */
+static void le_intr_handler(struct adapter *adap)
+{
+ static const struct intr_info le_intr_info[] = {
+ { LIPMISS, "LE LIP miss", -1, 0 },
+ { LIP0, "LE 0 LIP error", -1, 0 },
+ { PARITYERR, "LE parity error", -1, 1 },
+ { UNKNOWNCMD, "LE unknown command", -1, 1 },
+ { REQQPARERR, "LE request queue parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
+ t4_fatal_err(adap);
+}
+
+/*
+ * MPS interrupt handler.
+ */
+static void mps_intr_handler(struct adapter *adapter)
+{
+ static const struct intr_info mps_rx_intr_info[] = {
+ { 0xffffff, "MPS Rx parity error", -1, 1 },
+ { 0 }
+ };
+ static const struct intr_info mps_tx_intr_info[] = {
+ { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
+ { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
+ { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
+ { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
+ { BUBBLE, "MPS Tx underflow", -1, 1 },
+ { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
+ { FRMERR, "MPS Tx framing error", -1, 1 },
+ { 0 }
+ };
+ static const struct intr_info mps_trc_intr_info[] = {
+ { FILTMEM, "MPS TRC filter parity error", -1, 1 },
+ { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
+ { MISCPERR, "MPS TRC misc parity error", -1, 1 },
+ { 0 }
+ };
+ static const struct intr_info mps_stat_sram_intr_info[] = {
+ { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
+ { 0 }
+ };
+ static const struct intr_info mps_stat_tx_intr_info[] = {
+ { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
+ { 0 }
+ };
+ static const struct intr_info mps_stat_rx_intr_info[] = {
+ { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
+ { 0 }
+ };
+ static const struct intr_info mps_cls_intr_info[] = {
+ { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
+ { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
+ { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
+ { 0 }
+ };
+
+ int fat;
+
+ fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
+ mps_rx_intr_info) +
+ t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
+ mps_tx_intr_info) +
+ t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
+ mps_trc_intr_info) +
+ t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
+ mps_stat_sram_intr_info) +
+ t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
+ mps_stat_tx_intr_info) +
+ t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
+ mps_stat_rx_intr_info) +
+ t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
+ mps_cls_intr_info);
+
+ t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
+ RXINT | TXINT | STATINT);
+ t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
+ if (fat)
+ t4_fatal_err(adapter);
+}
+
+#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
+
+/*
+ * EDC/MC interrupt handler.
+ */
+static void mem_intr_handler(struct adapter *adapter, int idx)
+{
+ static const char name[3][5] = { "EDC0", "EDC1", "MC" };
+
+ unsigned int addr, cnt_addr, v;
+
+ if (idx <= MEM_EDC1) {
+ addr = EDC_REG(EDC_INT_CAUSE, idx);
+ cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
+ } else {
+ addr = MC_INT_CAUSE;
+ cnt_addr = MC_ECC_STATUS;
+ }
+
+ v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
+ if (v & PERR_INT_CAUSE)
+ dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
+ name[idx]);
+ if (v & ECC_CE_INT_CAUSE) {
+ u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
+
+ t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
+ if (printk_ratelimit())
+ dev_warn(adapter->pdev_dev,
+ "%u %s correctable ECC data error%s\n",
+ cnt, name[idx], cnt > 1 ? "s" : "");
+ }
+ if (v & ECC_UE_INT_CAUSE)
+ dev_alert(adapter->pdev_dev,
+ "%s uncorrectable ECC data error\n", name[idx]);
+
+ t4_write_reg(adapter, addr, v);
+ if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
+ t4_fatal_err(adapter);
+}
+
+/*
+ * MA interrupt handler.
+ */
+static void ma_intr_handler(struct adapter *adap)
+{
+ u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
+
+ if (status & MEM_PERR_INT_CAUSE)
+ dev_alert(adap->pdev_dev,
+ "MA parity error, parity status %#x\n",
+ t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
+ if (status & MEM_WRAP_INT_CAUSE) {
+ v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
+ dev_alert(adap->pdev_dev, "MA address wrap-around error by "
+ "client %u to address %#x\n",
+ MEM_WRAP_CLIENT_NUM_GET(v),
+ MEM_WRAP_ADDRESS_GET(v) << 4);
+ }
+ t4_write_reg(adap, MA_INT_CAUSE, status);
+ t4_fatal_err(adap);
+}
+
+/*
+ * SMB interrupt handler.
+ */
+static void smb_intr_handler(struct adapter *adap)
+{
+ static const struct intr_info smb_intr_info[] = {
+ { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
+ { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
+ { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
+ t4_fatal_err(adap);
+}
+
+/*
+ * NC-SI interrupt handler.
+ */
+static void ncsi_intr_handler(struct adapter *adap)
+{
+ static const struct intr_info ncsi_intr_info[] = {
+ { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
+ { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
+ { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
+ { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
+ t4_fatal_err(adap);
+}
+
+/*
+ * XGMAC interrupt handler.
+ */
+static void xgmac_intr_handler(struct adapter *adap, int port)
+{
+ u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
+
+ v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
+ if (!v)
+ return;
+
+ if (v & TXFIFO_PRTY_ERR)
+ dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
+ port);
+ if (v & RXFIFO_PRTY_ERR)
+ dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
+ port);
+ t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
+ t4_fatal_err(adap);
+}
+
+/*
+ * PL interrupt handler.
+ */
+static void pl_intr_handler(struct adapter *adap)
+{
+ static const struct intr_info pl_intr_info[] = {
+ { FATALPERR, "T4 fatal parity error", -1, 1 },
+ { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
+ t4_fatal_err(adap);
+}
+
+#define PF_INTR_MASK (PFSW)
+#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
+ EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
+ CPL_SWITCH | SGE | ULP_TX)
+
+/**
+ * t4_slow_intr_handler - control path interrupt handler
+ * @adapter: the adapter
+ *
+ * T4 interrupt handler for non-data global interrupt events, e.g., errors.
+ * The designation 'slow' is because it involves register reads, while
+ * data interrupts typically don't involve any MMIOs.
+ */
+int t4_slow_intr_handler(struct adapter *adapter)
+{
+ u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
+
+ if (!(cause & GLBL_INTR_MASK))
+ return 0;
+ if (cause & CIM)
+ cim_intr_handler(adapter);
+ if (cause & MPS)
+ mps_intr_handler(adapter);
+ if (cause & NCSI)
+ ncsi_intr_handler(adapter);
+ if (cause & PL)
+ pl_intr_handler(adapter);
+ if (cause & SMB)
+ smb_intr_handler(adapter);
+ if (cause & XGMAC0)
+ xgmac_intr_handler(adapter, 0);
+ if (cause & XGMAC1)
+ xgmac_intr_handler(adapter, 1);
+ if (cause & XGMAC_KR0)
+ xgmac_intr_handler(adapter, 2);
+ if (cause & XGMAC_KR1)
+ xgmac_intr_handler(adapter, 3);
+ if (cause & PCIE)
+ pcie_intr_handler(adapter);
+ if (cause & MC)
+ mem_intr_handler(adapter, MEM_MC);
+ if (cause & EDC0)
+ mem_intr_handler(adapter, MEM_EDC0);
+ if (cause & EDC1)
+ mem_intr_handler(adapter, MEM_EDC1);
+ if (cause & LE)
+ le_intr_handler(adapter);
+ if (cause & TP)
+ tp_intr_handler(adapter);
+ if (cause & MA)
+ ma_intr_handler(adapter);
+ if (cause & PM_TX)
+ pmtx_intr_handler(adapter);
+ if (cause & PM_RX)
+ pmrx_intr_handler(adapter);
+ if (cause & ULP_RX)
+ ulprx_intr_handler(adapter);
+ if (cause & CPL_SWITCH)
+ cplsw_intr_handler(adapter);
+ if (cause & SGE)
+ sge_intr_handler(adapter);
+ if (cause & ULP_TX)
+ ulptx_intr_handler(adapter);
+
+ /* Clear the interrupts just processed for which we are the master. */
+ t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
+ (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
+ return 1;
+}
+
+/**
+ * t4_intr_enable - enable interrupts
+ * @adapter: the adapter whose interrupts should be enabled
+ *
+ * Enable PF-specific interrupts for the calling function and the top-level
+ * interrupt concentrator for global interrupts. Interrupts are already
+ * enabled at each module, here we just enable the roots of the interrupt
+ * hierarchies.
+ *
+ * Note: this function should be called only when the driver manages
+ * non PF-specific interrupts from the various HW modules. Only one PCI
+ * function at a time should be doing this.
+ */
+void t4_intr_enable(struct adapter *adapter)
+{
+ u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
+
+ t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
+ ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
+ ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
+ ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
+ ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
+ ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
+ ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
+ EGRESS_SIZE_ERR);
+ t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
+ t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
+}
+
+/**
+ * t4_intr_disable - disable interrupts
+ * @adapter: the adapter whose interrupts should be disabled
+ *
+ * Disable interrupts. We only disable the top-level interrupt
+ * concentrators. The caller must be a PCI function managing global
+ * interrupts.
+ */
+void t4_intr_disable(struct adapter *adapter)
+{
+ u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
+
+ t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
+ t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
+}
+
+/**
+ * hash_mac_addr - return the hash value of a MAC address
+ * @addr: the 48-bit Ethernet MAC address
+ *
+ * Hashes a MAC address according to the hash function used by HW inexact
+ * (hash) address matching.
+ */
+static int hash_mac_addr(const u8 *addr)
+{
+ u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
+ u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
+ a ^= b;
+ a ^= (a >> 12);
+ a ^= (a >> 6);
+ return a & 0x3f;
+}
+
+/**
+ * t4_config_rss_range - configure a portion of the RSS mapping table
+ * @adapter: the adapter
+ * @mbox: mbox to use for the FW command
+ * @viid: virtual interface whose RSS subtable is to be written
+ * @start: start entry in the table to write
+ * @n: how many table entries to write
+ * @rspq: values for the response queue lookup table
+ * @nrspq: number of values in @rspq
+ *
+ * Programs the selected part of the VI's RSS mapping table with the
+ * provided values. If @nrspq < @n the supplied values are used repeatedly
+ * until the full table range is populated.
+ *
+ * The caller must ensure the values in @rspq are in the range allowed for
+ * @viid.
+ */
+int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
+ int start, int n, const u16 *rspq, unsigned int nrspq)
+{
+ int ret;
+ const u16 *rsp = rspq;
+ const u16 *rsp_end = rspq + nrspq;
+ struct fw_rss_ind_tbl_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE |
+ FW_RSS_IND_TBL_CMD_VIID(viid));
+ cmd.retval_len16 = htonl(FW_LEN16(cmd));
+
+ /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
+ while (n > 0) {
+ int nq = min(n, 32);
+ __be32 *qp = &cmd.iq0_to_iq2;
+
+ cmd.niqid = htons(nq);
+ cmd.startidx = htons(start);
+
+ start += nq;
+ n -= nq;
+
+ while (nq > 0) {
+ unsigned int v;
+
+ v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
+ if (++rsp >= rsp_end)
+ rsp = rspq;
+ v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
+ if (++rsp >= rsp_end)
+ rsp = rspq;
+ v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
+ if (++rsp >= rsp_end)
+ rsp = rspq;
+
+ *qp++ = htonl(v);
+ nq -= 3;
+ }
+
+ ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * t4_config_glbl_rss - configure the global RSS mode
+ * @adapter: the adapter
+ * @mbox: mbox to use for the FW command
+ * @mode: global RSS mode
+ * @flags: mode-specific flags
+ *
+ * Sets the global RSS mode.
+ */
+int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
+ unsigned int flags)
+{
+ struct fw_rss_glb_config_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ c.retval_len16 = htonl(FW_LEN16(c));
+ if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
+ c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+ } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
+ c.u.basicvirtual.mode_pkd =
+ htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+ c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
+ } else
+ return -EINVAL;
+ return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_tp_get_tcp_stats - read TP's TCP MIB counters
+ * @adap: the adapter
+ * @v4: holds the TCP/IP counter values
+ * @v6: holds the TCP/IPv6 counter values
+ *
+ * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
+ * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
+ */
+void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
+ struct tp_tcp_stats *v6)
+{
+ u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
+
+#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
+#define STAT(x) val[STAT_IDX(x)]
+#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
+
+ if (v4) {
+ t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
+ ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
+ v4->tcpOutRsts = STAT(OUT_RST);
+ v4->tcpInSegs = STAT64(IN_SEG);
+ v4->tcpOutSegs = STAT64(OUT_SEG);
+ v4->tcpRetransSegs = STAT64(RXT_SEG);
+ }
+ if (v6) {
+ t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
+ ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
+ v6->tcpOutRsts = STAT(OUT_RST);
+ v6->tcpInSegs = STAT64(IN_SEG);
+ v6->tcpOutSegs = STAT64(OUT_SEG);
+ v6->tcpRetransSegs = STAT64(RXT_SEG);
+ }
+#undef STAT64
+#undef STAT
+#undef STAT_IDX
+}
+
+/**
+ * t4_read_mtu_tbl - returns the values in the HW path MTU table
+ * @adap: the adapter
+ * @mtus: where to store the MTU values
+ * @mtu_log: where to store the MTU base-2 log (may be %NULL)
+ *
+ * Reads the HW path MTU table.
+ */
+void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
+{
+ u32 v;
+ int i;
+
+ for (i = 0; i < NMTUS; ++i) {
+ t4_write_reg(adap, TP_MTU_TABLE,
+ MTUINDEX(0xff) | MTUVALUE(i));
+ v = t4_read_reg(adap, TP_MTU_TABLE);
+ mtus[i] = MTUVALUE_GET(v);
+ if (mtu_log)
+ mtu_log[i] = MTUWIDTH_GET(v);
+ }
+}
+
+/**
+ * init_cong_ctrl - initialize congestion control parameters
+ * @a: the alpha values for congestion control
+ * @b: the beta values for congestion control
+ *
+ * Initialize the congestion control parameters.
+ */
+static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
+{
+ a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
+ a[9] = 2;
+ a[10] = 3;
+ a[11] = 4;
+ a[12] = 5;
+ a[13] = 6;
+ a[14] = 7;
+ a[15] = 8;
+ a[16] = 9;
+ a[17] = 10;
+ a[18] = 14;
+ a[19] = 17;
+ a[20] = 21;
+ a[21] = 25;
+ a[22] = 30;
+ a[23] = 35;
+ a[24] = 45;
+ a[25] = 60;
+ a[26] = 80;
+ a[27] = 100;
+ a[28] = 200;
+ a[29] = 300;
+ a[30] = 400;
+ a[31] = 500;
+
+ b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
+ b[9] = b[10] = 1;
+ b[11] = b[12] = 2;
+ b[13] = b[14] = b[15] = b[16] = 3;
+ b[17] = b[18] = b[19] = b[20] = b[21] = 4;
+ b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
+ b[28] = b[29] = 6;
+ b[30] = b[31] = 7;
+}
+
+/* The minimum additive increment value for the congestion control table */
+#define CC_MIN_INCR 2U
+
+/**
+ * t4_load_mtus - write the MTU and congestion control HW tables
+ * @adap: the adapter
+ * @mtus: the values for the MTU table
+ * @alpha: the values for the congestion control alpha parameter
+ * @beta: the values for the congestion control beta parameter
+ *
+ * Write the HW MTU table with the supplied MTUs and the high-speed
+ * congestion control table with the supplied alpha, beta, and MTUs.
+ * We write the two tables together because the additive increments
+ * depend on the MTUs.
+ */
+void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
+ const unsigned short *alpha, const unsigned short *beta)
+{
+ static const unsigned int avg_pkts[NCCTRL_WIN] = {
+ 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
+ 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
+ 28672, 40960, 57344, 81920, 114688, 163840, 229376
+ };
+
+ unsigned int i, w;
+
+ for (i = 0; i < NMTUS; ++i) {
+ unsigned int mtu = mtus[i];
+ unsigned int log2 = fls(mtu);
+
+ if (!(mtu & ((1 << log2) >> 2))) /* round */
+ log2--;
+ t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
+ MTUWIDTH(log2) | MTUVALUE(mtu));
+
+ for (w = 0; w < NCCTRL_WIN; ++w) {
+ unsigned int inc;
+
+ inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
+ CC_MIN_INCR);
+
+ t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
+ (w << 16) | (beta[w] << 13) | inc);
+ }
+ }
+}
+
+/**
+ * get_mps_bg_map - return the buffer groups associated with a port
+ * @adap: the adapter
+ * @idx: the port index
+ *
+ * Returns a bitmap indicating which MPS buffer groups are associated
+ * with the given port. Bit i is set if buffer group i is used by the
+ * port.
+ */
+static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
+{
+ u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
+
+ if (n == 0)
+ return idx == 0 ? 0xf : 0;
+ if (n == 1)
+ return idx < 2 ? (3 << (2 * idx)) : 0;
+ return 1 << idx;
+}
+
+/**
+ * t4_get_port_stats - collect port statistics
+ * @adap: the adapter
+ * @idx: the port index
+ * @p: the stats structure to fill
+ *
+ * Collect statistics related to the given port from HW.
+ */
+void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
+{
+ u32 bgmap = get_mps_bg_map(adap, idx);
+
+#define GET_STAT(name) \
+ t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
+#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
+
+ p->tx_octets = GET_STAT(TX_PORT_BYTES);
+ p->tx_frames = GET_STAT(TX_PORT_FRAMES);
+ p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
+ p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
+ p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
+ p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
+ p->tx_frames_64 = GET_STAT(TX_PORT_64B);
+ p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
+ p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
+ p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
+ p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
+ p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
+ p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
+ p->tx_drop = GET_STAT(TX_PORT_DROP);
+ p->tx_pause = GET_STAT(TX_PORT_PAUSE);
+ p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
+ p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
+ p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
+ p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
+ p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
+ p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
+ p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
+ p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
+
+ p->rx_octets = GET_STAT(RX_PORT_BYTES);
+ p->rx_frames = GET_STAT(RX_PORT_FRAMES);
+ p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
+ p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
+ p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
+ p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
+ p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
+ p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
+ p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
+ p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
+ p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
+ p->rx_frames_64 = GET_STAT(RX_PORT_64B);
+ p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
+ p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
+ p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
+ p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
+ p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
+ p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
+ p->rx_pause = GET_STAT(RX_PORT_PAUSE);
+ p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
+ p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
+ p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
+ p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
+ p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
+ p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
+ p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
+ p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
+
+ p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
+ p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
+ p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
+ p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
+ p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
+ p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
+ p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
+ p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
+
+#undef GET_STAT
+#undef GET_STAT_COM
+}
+
+/**
+ * t4_wol_magic_enable - enable/disable magic packet WoL
+ * @adap: the adapter
+ * @port: the physical port index
+ * @addr: MAC address expected in magic packets, %NULL to disable
+ *
+ * Enables/disables magic packet wake-on-LAN for the selected port.
+ */
+void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
+ const u8 *addr)
+{
+ if (addr) {
+ t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
+ (addr[2] << 24) | (addr[3] << 16) |
+ (addr[4] << 8) | addr[5]);
+ t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
+ (addr[0] << 8) | addr[1]);
+ }
+ t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
+ addr ? MAGICEN : 0);
+}
+
+/**
+ * t4_wol_pat_enable - enable/disable pattern-based WoL
+ * @adap: the adapter
+ * @port: the physical port index
+ * @map: bitmap of which HW pattern filters to set
+ * @mask0: byte mask for bytes 0-63 of a packet
+ * @mask1: byte mask for bytes 64-127 of a packet
+ * @crc: Ethernet CRC for selected bytes
+ * @enable: enable/disable switch
+ *
+ * Sets the pattern filters indicated in @map to mask out the bytes
+ * specified in @mask0/@mask1 in received packets and compare the CRC of
+ * the resulting packet against @crc. If @enable is %true pattern-based
+ * WoL is enabled, otherwise disabled.
+ */
+int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
+ u64 mask0, u64 mask1, unsigned int crc, bool enable)
+{
+ int i;
+
+ if (!enable) {
+ t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
+ PATEN, 0);
+ return 0;
+ }
+ if (map > 0xff)
+ return -EINVAL;
+
+#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
+
+ t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
+ t4_write_reg(adap, EPIO_REG(DATA2), mask1);
+ t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
+
+ for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
+ if (!(map & 1))
+ continue;
+
+ /* write byte masks */
+ t4_write_reg(adap, EPIO_REG(DATA0), mask0);
+ t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
+ t4_read_reg(adap, EPIO_REG(OP)); /* flush */
+ if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
+ return -ETIMEDOUT;
+
+ /* write CRC */
+ t4_write_reg(adap, EPIO_REG(DATA0), crc);
+ t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
+ t4_read_reg(adap, EPIO_REG(OP)); /* flush */
+ if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
+ return -ETIMEDOUT;
+ }
+#undef EPIO_REG
+
+ t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
+ return 0;
+}
+
+#define INIT_CMD(var, cmd, rd_wr) do { \
+ (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
+ FW_CMD_REQUEST | FW_CMD_##rd_wr); \
+ (var).retval_len16 = htonl(FW_LEN16(var)); \
+} while (0)
+
+/**
+ * t4_mdio_rd - read a PHY register through MDIO
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @phy_addr: the PHY address
+ * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
+ * @reg: the register to read
+ * @valp: where to store the value
+ *
+ * Issues a FW command through the given mailbox to read a PHY register.
+ */
+int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+ unsigned int mmd, unsigned int reg, u16 *valp)
+{
+ int ret;
+ struct fw_ldst_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
+ FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
+ c.cycles_to_len16 = htonl(FW_LEN16(c));
+ c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
+ FW_LDST_CMD_MMD(mmd));
+ c.u.mdio.raddr = htons(reg);
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret == 0)
+ *valp = ntohs(c.u.mdio.rval);
+ return ret;
+}
+
+/**
+ * t4_mdio_wr - write a PHY register through MDIO
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @phy_addr: the PHY address
+ * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
+ * @reg: the register to write
+ * @valp: value to write
+ *
+ * Issues a FW command through the given mailbox to write a PHY register.
+ */
+int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+ unsigned int mmd, unsigned int reg, u16 val)
+{
+ struct fw_ldst_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
+ c.cycles_to_len16 = htonl(FW_LEN16(c));
+ c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
+ FW_LDST_CMD_MMD(mmd));
+ c.u.mdio.raddr = htons(reg);
+ c.u.mdio.rval = htons(val);
+
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_fw_hello - establish communication with FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @evt_mbox: mailbox to receive async FW events
+ * @master: specifies the caller's willingness to be the device master
+ * @state: returns the current device state
+ *
+ * Issues a command to establish communication with FW.
+ */
+int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
+ enum dev_master master, enum dev_state *state)
+{
+ int ret;
+ struct fw_hello_cmd c;
+
+ INIT_CMD(c, HELLO, WRITE);
+ c.err_to_mbasyncnot = htonl(
+ FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
+ FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
+ FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
+ FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret == 0 && state) {
+ u32 v = ntohl(c.err_to_mbasyncnot);
+ if (v & FW_HELLO_CMD_INIT)
+ *state = DEV_STATE_INIT;
+ else if (v & FW_HELLO_CMD_ERR)
+ *state = DEV_STATE_ERR;
+ else
+ *state = DEV_STATE_UNINIT;
+ }
+ return ret;
+}
+
+/**
+ * t4_fw_bye - end communication with FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ *
+ * Issues a command to terminate communication with FW.
+ */
+int t4_fw_bye(struct adapter *adap, unsigned int mbox)
+{
+ struct fw_bye_cmd c;
+
+ INIT_CMD(c, BYE, WRITE);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_init_cmd - ask FW to initialize the device
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ *
+ * Issues a command to FW to partially initialize the device. This
+ * performs initialization that generally doesn't depend on user input.
+ */
+int t4_early_init(struct adapter *adap, unsigned int mbox)
+{
+ struct fw_initialize_cmd c;
+
+ INIT_CMD(c, INITIALIZE, WRITE);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_fw_reset - issue a reset to FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @reset: specifies the type of reset to perform
+ *
+ * Issues a reset command of the specified type to FW.
+ */
+int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
+{
+ struct fw_reset_cmd c;
+
+ INIT_CMD(c, RESET, WRITE);
+ c.val = htonl(reset);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_query_params - query FW or device parameters
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF
+ * @vf: the VF
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @val: the parameter values
+ *
+ * Reads the value of FW or device parameters. Up to 7 parameters can be
+ * queried at once.
+ */
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val)
+{
+ int i, ret;
+ struct fw_params_cmd c;
+ __be32 *p = &c.param[0].mnem;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
+ FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
+ FW_PARAMS_CMD_VFN(vf));
+ c.retval_len16 = htonl(FW_LEN16(c));
+ for (i = 0; i < nparams; i++, p += 2)
+ *p = htonl(*params++);
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret == 0)
+ for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
+ *val++ = ntohl(*p);
+ return ret;
+}
+
+/**
+ * t4_set_params - sets FW or device parameters
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF
+ * @vf: the VF
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @val: the parameter values
+ *
+ * Sets the value of FW or device parameters. Up to 7 parameters can be
+ * specified at once.
+ */
+int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ const u32 *val)
+{
+ struct fw_params_cmd c;
+ __be32 *p = &c.param[0].mnem;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
+ FW_PARAMS_CMD_VFN(vf));
+ c.retval_len16 = htonl(FW_LEN16(c));
+ while (nparams--) {
+ *p++ = htonl(*params++);
+ *p++ = htonl(*val++);
+ }
+
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_cfg_pfvf - configure PF/VF resource limits
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF being configured
+ * @vf: the VF being configured
+ * @txq: the max number of egress queues
+ * @txq_eth_ctrl: the max number of egress Ethernet or control queues
+ * @rxqi: the max number of interrupt-capable ingress queues
+ * @rxq: the max number of interruptless ingress queues
+ * @tc: the PCI traffic class
+ * @vi: the max number of virtual interfaces
+ * @cmask: the channel access rights mask for the PF/VF
+ * @pmask: the port access rights mask for the PF/VF
+ * @nexact: the maximum number of exact MPS filters
+ * @rcaps: read capabilities
+ * @wxcaps: write/execute capabilities
+ *
+ * Configures resource limits and capabilities for a physical or virtual
+ * function.
+ */
+int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
+ unsigned int rxqi, unsigned int rxq, unsigned int tc,
+ unsigned int vi, unsigned int cmask, unsigned int pmask,
+ unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
+{
+ struct fw_pfvf_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
+ FW_PFVF_CMD_VFN(vf));
+ c.retval_len16 = htonl(FW_LEN16(c));
+ c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
+ FW_PFVF_CMD_NIQ(rxq));
+ c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
+ FW_PFVF_CMD_PMASK(pmask) |
+ FW_PFVF_CMD_NEQ(txq));
+ c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
+ FW_PFVF_CMD_NEXACTF(nexact));
+ c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
+ FW_PFVF_CMD_WX_CAPS(wxcaps) |
+ FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_alloc_vi - allocate a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @port: physical port associated with the VI
+ * @pf: the PF owning the VI
+ * @vf: the VF owning the VI
+ * @nmac: number of MAC addresses needed (1 to 5)
+ * @mac: the MAC addresses of the VI
+ * @rss_size: size of RSS table slice associated with this VI
+ *
+ * Allocates a virtual interface for the given physical port. If @mac is
+ * not %NULL it contains the MAC addresses of the VI as assigned by FW.
+ * @mac should be large enough to hold @nmac Ethernet addresses, they are
+ * stored consecutively so the space needed is @nmac * 6 bytes.
+ * Returns a negative error number or the non-negative VI id.
+ */
+int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
+ unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
+ unsigned int *rss_size)
+{
+ int ret;
+ struct fw_vi_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_CMD_EXEC |
+ FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
+ c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
+ c.portid_pkd = FW_VI_CMD_PORTID(port);
+ c.nmac = nmac - 1;
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret)
+ return ret;
+
+ if (mac) {
+ memcpy(mac, c.mac, sizeof(c.mac));
+ switch (nmac) {
+ case 5:
+ memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
+ case 4:
+ memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
+ case 3:
+ memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
+ case 2:
+ memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
+ }
+ }
+ if (rss_size)
+ *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
+ return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
+}
+
+/**
+ * t4_set_rxmode - set Rx properties of a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @mtu: the new MTU or -1
+ * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
+ * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
+ * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
+ * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Sets Rx properties of a virtual interface.
+ */
+int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int mtu, int promisc, int all_multi, int bcast, int vlanex,
+ bool sleep_ok)
+{
+ struct fw_vi_rxmode_cmd c;
+
+ /* convert to FW values */
+ if (mtu < 0)
+ mtu = FW_RXMODE_MTU_NO_CHG;
+ if (promisc < 0)
+ promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
+ if (all_multi < 0)
+ all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
+ if (bcast < 0)
+ bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
+ if (vlanex < 0)
+ vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
+ c.retval_len16 = htonl(FW_LEN16(c));
+ c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
+ FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
+ FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
+ FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
+ FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
+ return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
+}
+
+/**
+ * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @free: if true any existing filters for this VI id are first removed
+ * @naddr: the number of MAC addresses to allocate filters for (up to 7)
+ * @addr: the MAC address(es)
+ * @idx: where to store the index of each allocated filter
+ * @hash: pointer to hash address filter bitmap
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Allocates an exact-match filter for each of the supplied addresses and
+ * sets it to the corresponding address. If @idx is not %NULL it should
+ * have at least @naddr entries, each of which will be set to the index of
+ * the filter allocated for the corresponding MAC address. If a filter
+ * could not be allocated for an address its index is set to 0xffff.
+ * If @hash is not %NULL addresses that fail to allocate an exact filter
+ * are hashed and update the hash filter bitmap pointed at by @hash.
+ *
+ * Returns a negative error number or the number of filters allocated.
+ */
+int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
+ unsigned int viid, bool free, unsigned int naddr,
+ const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
+{
+ int i, ret;
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_exact *p;
+
+ if (naddr > 7)
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
+ FW_VI_MAC_CMD_VIID(viid));
+ c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
+ FW_CMD_LEN16((naddr + 2) / 2));
+
+ for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
+ p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
+ FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
+ memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
+ }
+
+ ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
+ if (ret)
+ return ret;
+
+ for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
+ u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
+
+ if (idx)
+ idx[i] = index >= NEXACT_MAC ? 0xffff : index;
+ if (index < NEXACT_MAC)
+ ret++;
+ else if (hash)
+ *hash |= (1ULL << hash_mac_addr(addr[i]));
+ }
+ return ret;
+}
+
+/**
+ * t4_change_mac - modifies the exact-match filter for a MAC address
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @idx: index of existing filter for old value of MAC address, or -1
+ * @addr: the new MAC address value
+ * @persist: whether a new MAC allocation should be persistent
+ * @add_smt: if true also add the address to the HW SMT
+ *
+ * Modifies an exact-match filter and sets it to the new MAC address.
+ * Note that in general it is not possible to modify the value of a given
+ * filter so the generic way to modify an address filter is to free the one
+ * being used by the old address value and allocate a new filter for the
+ * new address value. @idx can be -1 if the address is a new addition.
+ *
+ * Returns a negative error number or the index of the filter with the new
+ * MAC value.
+ */
+int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int idx, const u8 *addr, bool persist, bool add_smt)
+{
+ int ret, mode;
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_exact *p = c.u.exact;
+
+ if (idx < 0) /* new allocation */
+ idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
+ mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
+ c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
+ p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
+ FW_VI_MAC_CMD_SMAC_RESULT(mode) |
+ FW_VI_MAC_CMD_IDX(idx));
+ memcpy(p->macaddr, addr, sizeof(p->macaddr));
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret == 0) {
+ ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
+ if (ret >= NEXACT_MAC)
+ ret = -ENOMEM;
+ }
+ return ret;
+}
+
+/**
+ * t4_set_addr_hash - program the MAC inexact-match hash filter
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @ucast: whether the hash filter should also match unicast addresses
+ * @vec: the value to be written to the hash filter
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Sets the 64-bit inexact-match hash filter for a virtual interface.
+ */
+int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ bool ucast, u64 vec, bool sleep_ok)
+{
+ struct fw_vi_mac_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
+ c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
+ FW_VI_MAC_CMD_HASHUNIEN(ucast) |
+ FW_CMD_LEN16(1));
+ c.u.hash.hashvec = cpu_to_be64(vec);
+ return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
+}
+
+/**
+ * t4_enable_vi - enable/disable a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @rx_en: 1=enable Rx, 0=disable Rx
+ * @tx_en: 1=enable Tx, 0=disable Tx
+ *
+ * Enables/disables a virtual interface.
+ */
+int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ bool rx_en, bool tx_en)
+{
+ struct fw_vi_enable_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
+ FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
+ c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
+ FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_identify_port - identify a VI's port by blinking its LED
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @nblinks: how many times to blink LED at 2.5 Hz
+ *
+ * Identifies a VI's port by blinking its LED.
+ */
+int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ unsigned int nblinks)
+{
+ struct fw_vi_enable_cmd c;
+
+ c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
+ FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
+ c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
+ c.blinkdur = htons(nblinks);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_iq_free - free an ingress queue and its FLs
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queues
+ * @vf: the VF owning the queues
+ * @iqtype: the ingress queue type
+ * @iqid: ingress queue id
+ * @fl0id: FL0 queue id or 0xffff if no attached FL0
+ * @fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ * Frees an ingress queue and its associated FLs, if any.
+ */
+int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int iqtype, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id)
+{
+ struct fw_iq_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
+ FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
+ FW_IQ_CMD_VFN(vf));
+ c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
+ c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
+ c.iqid = htons(iqid);
+ c.fl0id = htons(fl0id);
+ c.fl1id = htons(fl1id);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_eth_eq_free - free an Ethernet egress queue
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queue
+ * @vf: the VF owning the queue
+ * @eqid: egress queue id
+ *
+ * Frees an Ethernet egress queue.
+ */
+int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid)
+{
+ struct fw_eq_eth_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
+ FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
+ FW_EQ_ETH_CMD_VFN(vf));
+ c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
+ c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_ctrl_eq_free - free a control egress queue
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queue
+ * @vf: the VF owning the queue
+ * @eqid: egress queue id
+ *
+ * Frees a control egress queue.
+ */
+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid)
+{
+ struct fw_eq_ctrl_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
+ FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
+ FW_EQ_CTRL_CMD_VFN(vf));
+ c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
+ c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_ofld_eq_free - free an offload egress queue
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queue
+ * @vf: the VF owning the queue
+ * @eqid: egress queue id
+ *
+ * Frees a control egress queue.
+ */
+int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid)
+{
+ struct fw_eq_ofld_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
+ FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
+ FW_EQ_OFLD_CMD_VFN(vf));
+ c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
+ c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_handle_fw_rpl - process a FW reply message
+ * @adap: the adapter
+ * @rpl: start of the FW message
+ *
+ * Processes a FW message, such as link state change messages.
+ */
+int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
+{
+ u8 opcode = *(const u8 *)rpl;
+
+ if (opcode == FW_PORT_CMD) { /* link/module state change message */
+ int speed = 0, fc = 0;
+ const struct fw_port_cmd *p = (void *)rpl;
+ int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
+ int port = adap->chan_map[chan];
+ struct port_info *pi = adap2pinfo(adap, port);
+ struct link_config *lc = &pi->link_cfg;
+ u32 stat = ntohl(p->u.info.lstatus_to_modtype);
+ int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
+ u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
+
+ if (stat & FW_PORT_CMD_RXPAUSE)
+ fc |= PAUSE_RX;
+ if (stat & FW_PORT_CMD_TXPAUSE)
+ fc |= PAUSE_TX;
+ if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
+ speed = SPEED_100;
+ else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
+ speed = SPEED_1000;
+ else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
+ speed = SPEED_10000;
+
+ if (link_ok != lc->link_ok || speed != lc->speed ||
+ fc != lc->fc) { /* something changed */
+ lc->link_ok = link_ok;
+ lc->speed = speed;
+ lc->fc = fc;
+ t4_os_link_changed(adap, port, link_ok);
+ }
+ if (mod != pi->mod_type) {
+ pi->mod_type = mod;
+ t4_os_portmod_changed(adap, port);
+ }
+ }
+ return 0;
+}
+
+static void __devinit get_pci_mode(struct adapter *adapter,
+ struct pci_params *p)
+{
+ u16 val;
+ u32 pcie_cap = pci_pcie_cap(adapter->pdev);
+
+ if (pcie_cap) {
+ pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
+ &val);
+ p->speed = val & PCI_EXP_LNKSTA_CLS;
+ p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
+ }
+}
+
+/**
+ * init_link_config - initialize a link's SW state
+ * @lc: structure holding the link state
+ * @caps: link capabilities
+ *
+ * Initializes the SW state maintained for each link, including the link's
+ * capabilities and default speed/flow-control/autonegotiation settings.
+ */
+static void __devinit init_link_config(struct link_config *lc,
+ unsigned int caps)
+{
+ lc->supported = caps;
+ lc->requested_speed = 0;
+ lc->speed = 0;
+ lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+ if (lc->supported & FW_PORT_CAP_ANEG) {
+ lc->advertising = lc->supported & ADVERT_MASK;
+ lc->autoneg = AUTONEG_ENABLE;
+ lc->requested_fc |= PAUSE_AUTONEG;
+ } else {
+ lc->advertising = 0;
+ lc->autoneg = AUTONEG_DISABLE;
+ }
+}
+
+int t4_wait_dev_ready(struct adapter *adap)
+{
+ if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
+ return 0;
+ msleep(500);
+ return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
+}
+
+static int __devinit get_flash_params(struct adapter *adap)
+{
+ int ret;
+ u32 info;
+
+ ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
+ if (!ret)
+ ret = sf1_read(adap, 3, 0, 1, &info);
+ t4_write_reg(adap, SF_OP, 0); /* unlock SF */
+ if (ret)
+ return ret;
+
+ if ((info & 0xff) != 0x20) /* not a Numonix flash */
+ return -EINVAL;
+ info >>= 16; /* log2 of size */
+ if (info >= 0x14 && info < 0x18)
+ adap->params.sf_nsec = 1 << (info - 16);
+ else if (info == 0x18)
+ adap->params.sf_nsec = 64;
+ else
+ return -EINVAL;
+ adap->params.sf_size = 1 << info;
+ adap->params.sf_fw_start =
+ t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
+ return 0;
+}
+
+/**
+ * t4_prep_adapter - prepare SW and HW for operation
+ * @adapter: the adapter
+ * @reset: if true perform a HW reset
+ *
+ * Initialize adapter SW state for the various HW modules, set initial
+ * values for some adapter tunables, take PHYs out of reset, and
+ * initialize the MDIO interface.
+ */
+int __devinit t4_prep_adapter(struct adapter *adapter)
+{
+ int ret;
+
+ ret = t4_wait_dev_ready(adapter);
+ if (ret < 0)
+ return ret;
+
+ get_pci_mode(adapter, &adapter->params.pci);
+ adapter->params.rev = t4_read_reg(adapter, PL_REV);
+
+ ret = get_flash_params(adapter);
+ if (ret < 0) {
+ dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
+ return ret;
+ }
+
+ ret = get_vpd_params(adapter, &adapter->params.vpd);
+ if (ret < 0)
+ return ret;
+
+ init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
+
+ /*
+ * Default port for debugging in case we can't reach FW.
+ */
+ adapter->params.nports = 1;
+ adapter->params.portvec = 1;
+ return 0;
+}
+
+int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
+{
+ u8 addr[6];
+ int ret, i, j = 0;
+ struct fw_port_cmd c;
+ struct fw_rss_vi_config_cmd rvc;
+
+ memset(&c, 0, sizeof(c));
+ memset(&rvc, 0, sizeof(rvc));
+
+ for_each_port(adap, i) {
+ unsigned int rss_size;
+ struct port_info *p = adap2pinfo(adap, i);
+
+ while ((adap->params.portvec & (1 << j)) == 0)
+ j++;
+
+ c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
+ FW_CMD_REQUEST | FW_CMD_READ |
+ FW_PORT_CMD_PORTID(j));
+ c.action_to_len16 = htonl(
+ FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
+ FW_LEN16(c));
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret)
+ return ret;
+
+ ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
+ if (ret < 0)
+ return ret;
+
+ p->viid = ret;
+ p->tx_chan = j;
+ p->lport = j;
+ p->rss_size = rss_size;
+ memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
+ memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
+ adap->port[i]->dev_id = j;
+
+ ret = ntohl(c.u.info.lstatus_to_modtype);
+ p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
+ FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
+ p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
+ p->mod_type = FW_PORT_MOD_TYPE_NA;
+
+ rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
+ FW_CMD_REQUEST | FW_CMD_READ |
+ FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
+ rvc.retval_len16 = htonl(FW_LEN16(rvc));
+ ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
+ if (ret)
+ return ret;
+ p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
+
+ init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
+ j++;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
new file mode 100644
index 000000000000..c26b455f37de
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -0,0 +1,140 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4_HW_H
+#define __T4_HW_H
+
+#include <linux/types.h>
+
+enum {
+ NCHAN = 4, /* # of HW channels */
+ MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
+ EEPROMSIZE = 17408, /* Serial EEPROM physical size */
+ EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
+ EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
+ RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
+ TCB_SIZE = 128, /* TCB size */
+ NMTUS = 16, /* size of MTU table */
+ NCCTRL_WIN = 32, /* # of congestion control windows */
+ NEXACT_MAC = 336, /* # of exact MAC address filters */
+ L2T_SIZE = 4096, /* # of L2T entries */
+ MBOX_LEN = 64, /* mailbox size in bytes */
+ TRACE_LEN = 112, /* length of trace data and mask */
+ FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
+ NWOL_PAT = 8, /* # of WoL patterns */
+ WOL_PAT_LEN = 128, /* length of WoL patterns */
+};
+
+enum {
+ SF_PAGE_SIZE = 256, /* serial flash page size */
+};
+
+enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */
+
+enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */
+
+enum {
+ SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
+ SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
+ SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
+
+ SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */
+ SGE_TIMER_UPD_CIDX = 7, /* update cidx only */
+
+ SGE_EQ_IDXSIZE = 64, /* egress queue pidx/cidx unit size */
+
+ SGE_INTRDST_PCI = 0, /* interrupt destination is PCI-E */
+ SGE_INTRDST_IQ = 1, /* destination is an ingress queue */
+
+ SGE_UPDATEDEL_NONE = 0, /* ingress queue pidx update delivery */
+ SGE_UPDATEDEL_INTR = 1, /* interrupt */
+ SGE_UPDATEDEL_STPG = 2, /* status page */
+ SGE_UPDATEDEL_BOTH = 3, /* interrupt and status page */
+
+ SGE_HOSTFCMODE_NONE = 0, /* egress queue cidx updates */
+ SGE_HOSTFCMODE_IQ = 1, /* sent to ingress queue */
+ SGE_HOSTFCMODE_STPG = 2, /* sent to status page */
+ SGE_HOSTFCMODE_BOTH = 3, /* ingress queue and status page */
+
+ SGE_FETCHBURSTMIN_16B = 0,/* egress queue descriptor fetch minimum */
+ SGE_FETCHBURSTMIN_32B = 1,
+ SGE_FETCHBURSTMIN_64B = 2,
+ SGE_FETCHBURSTMIN_128B = 3,
+
+ SGE_FETCHBURSTMAX_64B = 0,/* egress queue descriptor fetch maximum */
+ SGE_FETCHBURSTMAX_128B = 1,
+ SGE_FETCHBURSTMAX_256B = 2,
+ SGE_FETCHBURSTMAX_512B = 3,
+
+ SGE_CIDXFLUSHTHRESH_1 = 0,/* egress queue cidx flush threshold */
+ SGE_CIDXFLUSHTHRESH_2 = 1,
+ SGE_CIDXFLUSHTHRESH_4 = 2,
+ SGE_CIDXFLUSHTHRESH_8 = 3,
+ SGE_CIDXFLUSHTHRESH_16 = 4,
+ SGE_CIDXFLUSHTHRESH_32 = 5,
+ SGE_CIDXFLUSHTHRESH_64 = 6,
+ SGE_CIDXFLUSHTHRESH_128 = 7,
+
+ SGE_INGPADBOUNDARY_SHIFT = 5,/* ingress queue pad boundary */
+};
+
+struct sge_qstat { /* data written to SGE queue status entries */
+ __be32 qid;
+ __be16 cidx;
+ __be16 pidx;
+};
+
+/*
+ * Structure for last 128 bits of response descriptors
+ */
+struct rsp_ctrl {
+ __be32 hdrbuflen_pidx;
+ __be32 pldbuflen_qid;
+ union {
+ u8 type_gen;
+ __be64 last_flit;
+ };
+};
+
+#define RSPD_NEWBUF 0x80000000U
+#define RSPD_LEN(x) (((x) >> 0) & 0x7fffffffU)
+#define RSPD_QID(x) RSPD_LEN(x)
+
+#define RSPD_GEN(x) ((x) >> 7)
+#define RSPD_TYPE(x) (((x) >> 4) & 3)
+
+#define QINTR_CNT_EN 0x1
+#define QINTR_TIMER_IDX(x) ((x) << 1)
+#define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7)
+#endif /* __T4_HW_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
new file mode 100644
index 000000000000..eb71b8250b91
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -0,0 +1,678 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4_MSG_H
+#define __T4_MSG_H
+
+#include <linux/types.h>
+
+enum {
+ CPL_PASS_OPEN_REQ = 0x1,
+ CPL_PASS_ACCEPT_RPL = 0x2,
+ CPL_ACT_OPEN_REQ = 0x3,
+ CPL_SET_TCB_FIELD = 0x5,
+ CPL_GET_TCB = 0x6,
+ CPL_CLOSE_CON_REQ = 0x8,
+ CPL_CLOSE_LISTSRV_REQ = 0x9,
+ CPL_ABORT_REQ = 0xA,
+ CPL_ABORT_RPL = 0xB,
+ CPL_RX_DATA_ACK = 0xD,
+ CPL_TX_PKT = 0xE,
+ CPL_L2T_WRITE_REQ = 0x12,
+ CPL_TID_RELEASE = 0x1A,
+
+ CPL_CLOSE_LISTSRV_RPL = 0x20,
+ CPL_L2T_WRITE_RPL = 0x23,
+ CPL_PASS_OPEN_RPL = 0x24,
+ CPL_ACT_OPEN_RPL = 0x25,
+ CPL_PEER_CLOSE = 0x26,
+ CPL_ABORT_REQ_RSS = 0x2B,
+ CPL_ABORT_RPL_RSS = 0x2D,
+
+ CPL_CLOSE_CON_RPL = 0x32,
+ CPL_ISCSI_HDR = 0x33,
+ CPL_RDMA_CQE = 0x35,
+ CPL_RDMA_CQE_READ_RSP = 0x36,
+ CPL_RDMA_CQE_ERR = 0x37,
+ CPL_RX_DATA = 0x39,
+ CPL_SET_TCB_RPL = 0x3A,
+ CPL_RX_PKT = 0x3B,
+ CPL_RX_DDP_COMPLETE = 0x3F,
+
+ CPL_ACT_ESTABLISH = 0x40,
+ CPL_PASS_ESTABLISH = 0x41,
+ CPL_RX_DATA_DDP = 0x42,
+ CPL_PASS_ACCEPT_REQ = 0x44,
+
+ CPL_RDMA_READ_REQ = 0x60,
+
+ CPL_PASS_OPEN_REQ6 = 0x81,
+ CPL_ACT_OPEN_REQ6 = 0x83,
+
+ CPL_RDMA_TERMINATE = 0xA2,
+ CPL_RDMA_WRITE = 0xA4,
+ CPL_SGE_EGR_UPDATE = 0xA5,
+
+ CPL_TRACE_PKT = 0xB0,
+
+ CPL_FW4_MSG = 0xC0,
+ CPL_FW4_PLD = 0xC1,
+ CPL_FW4_ACK = 0xC3,
+
+ CPL_FW6_MSG = 0xE0,
+ CPL_FW6_PLD = 0xE1,
+ CPL_TX_PKT_LSO = 0xED,
+ CPL_TX_PKT_XT = 0xEE,
+
+ NUM_CPL_CMDS
+};
+
+enum CPL_error {
+ CPL_ERR_NONE = 0,
+ CPL_ERR_TCAM_FULL = 3,
+ CPL_ERR_BAD_LENGTH = 15,
+ CPL_ERR_BAD_ROUTE = 18,
+ CPL_ERR_CONN_RESET = 20,
+ CPL_ERR_CONN_EXIST_SYNRECV = 21,
+ CPL_ERR_CONN_EXIST = 22,
+ CPL_ERR_ARP_MISS = 23,
+ CPL_ERR_BAD_SYN = 24,
+ CPL_ERR_CONN_TIMEDOUT = 30,
+ CPL_ERR_XMIT_TIMEDOUT = 31,
+ CPL_ERR_PERSIST_TIMEDOUT = 32,
+ CPL_ERR_FINWAIT2_TIMEDOUT = 33,
+ CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
+ CPL_ERR_RTX_NEG_ADVICE = 35,
+ CPL_ERR_PERSIST_NEG_ADVICE = 36,
+ CPL_ERR_ABORT_FAILED = 42,
+ CPL_ERR_IWARP_FLM = 50,
+};
+
+enum {
+ ULP_MODE_NONE = 0,
+ ULP_MODE_ISCSI = 2,
+ ULP_MODE_RDMA = 4,
+ ULP_MODE_TCPDDP = 5,
+ ULP_MODE_FCOE = 6,
+};
+
+enum {
+ ULP_CRC_HEADER = 1 << 0,
+ ULP_CRC_DATA = 1 << 1
+};
+
+enum {
+ CPL_ABORT_SEND_RST = 0,
+ CPL_ABORT_NO_RST,
+};
+
+enum { /* TX_PKT_XT checksum types */
+ TX_CSUM_TCP = 0,
+ TX_CSUM_UDP = 1,
+ TX_CSUM_CRC16 = 4,
+ TX_CSUM_CRC32 = 5,
+ TX_CSUM_CRC32C = 6,
+ TX_CSUM_FCOE = 7,
+ TX_CSUM_TCPIP = 8,
+ TX_CSUM_UDPIP = 9,
+ TX_CSUM_TCPIP6 = 10,
+ TX_CSUM_UDPIP6 = 11,
+ TX_CSUM_IP = 12,
+};
+
+union opcode_tid {
+ __be32 opcode_tid;
+ u8 opcode;
+};
+
+#define CPL_OPCODE(x) ((x) << 24)
+#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid))
+#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
+#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF)
+
+/* partitioning of TID fields that also carry a queue id */
+#define GET_TID_TID(x) ((x) & 0x3fff)
+#define GET_TID_QID(x) (((x) >> 14) & 0x3ff)
+#define TID_QID(x) ((x) << 14)
+
+struct rss_header {
+ u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 channel:2;
+ u8 filter_hit:1;
+ u8 filter_tid:1;
+ u8 hash_type:2;
+ u8 ipv6:1;
+ u8 send2fw:1;
+#else
+ u8 send2fw:1;
+ u8 ipv6:1;
+ u8 hash_type:2;
+ u8 filter_tid:1;
+ u8 filter_hit:1;
+ u8 channel:2;
+#endif
+ __be16 qid;
+ __be32 hash_val;
+};
+
+struct work_request_hdr {
+ __be32 wr_hi;
+ __be32 wr_mid;
+ __be64 wr_lo;
+};
+
+#define WR_HDR struct work_request_hdr wr
+
+struct cpl_pass_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be64 opt0;
+#define TX_CHAN(x) ((x) << 2)
+#define DELACK(x) ((x) << 5)
+#define ULP_MODE(x) ((x) << 8)
+#define RCV_BUFSIZ(x) ((x) << 12)
+#define DSCP(x) ((x) << 22)
+#define SMAC_SEL(x) ((u64)(x) << 28)
+#define L2T_IDX(x) ((u64)(x) << 36)
+#define NAGLE(x) ((u64)(x) << 49)
+#define WND_SCALE(x) ((u64)(x) << 50)
+#define KEEP_ALIVE(x) ((u64)(x) << 54)
+#define MSS_IDX(x) ((u64)(x) << 60)
+ __be64 opt1;
+#define SYN_RSS_ENABLE (1 << 0)
+#define SYN_RSS_QUEUE(x) ((x) << 2)
+#define CONN_POLICY_ASK (1 << 22)
+};
+
+struct cpl_pass_open_req6 {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be64 local_ip_hi;
+ __be64 local_ip_lo;
+ __be64 peer_ip_hi;
+ __be64 peer_ip_lo;
+ __be64 opt0;
+ __be64 opt1;
+};
+
+struct cpl_pass_open_rpl {
+ union opcode_tid ot;
+ u8 rsvd[3];
+ u8 status;
+};
+
+struct cpl_pass_accept_rpl {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 opt2;
+#define RSS_QUEUE(x) ((x) << 0)
+#define RSS_QUEUE_VALID (1 << 10)
+#define RX_COALESCE_VALID(x) ((x) << 11)
+#define RX_COALESCE(x) ((x) << 12)
+#define TX_QUEUE(x) ((x) << 23)
+#define RX_CHANNEL(x) ((x) << 26)
+#define WND_SCALE_EN(x) ((x) << 28)
+#define TSTAMPS_EN(x) ((x) << 29)
+#define SACK_EN(x) ((x) << 30)
+ __be64 opt0;
+};
+
+struct cpl_act_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be64 opt0;
+ __be32 params;
+ __be32 opt2;
+};
+
+struct cpl_act_open_req6 {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be64 local_ip_hi;
+ __be64 local_ip_lo;
+ __be64 peer_ip_hi;
+ __be64 peer_ip_lo;
+ __be64 opt0;
+ __be32 params;
+ __be32 opt2;
+};
+
+struct cpl_act_open_rpl {
+ union opcode_tid ot;
+ __be32 atid_status;
+#define GET_AOPEN_STATUS(x) ((x) & 0xff)
+#define GET_AOPEN_ATID(x) (((x) >> 8) & 0xffffff)
+};
+
+struct cpl_pass_establish {
+ union opcode_tid ot;
+ __be32 rsvd;
+ __be32 tos_stid;
+#define GET_POPEN_TID(x) ((x) & 0xffffff)
+#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff)
+ __be16 mac_idx;
+ __be16 tcp_opt;
+#define GET_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
+#define GET_TCPOPT_SACK(x) (((x) >> 6) & 1)
+#define GET_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
+#define GET_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
+#define GET_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
+ __be32 snd_isn;
+ __be32 rcv_isn;
+};
+
+struct cpl_act_establish {
+ union opcode_tid ot;
+ __be32 rsvd;
+ __be32 tos_atid;
+ __be16 mac_idx;
+ __be16 tcp_opt;
+ __be32 snd_isn;
+ __be32 rcv_isn;
+};
+
+struct cpl_get_tcb {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 reply_ctrl;
+#define QUEUENO(x) ((x) << 0)
+#define REPLY_CHAN(x) ((x) << 14)
+#define NO_REPLY(x) ((x) << 15)
+ __be16 cookie;
+};
+
+struct cpl_set_tcb_field {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 reply_ctrl;
+ __be16 word_cookie;
+#define TCB_WORD(x) ((x) << 0)
+#define TCB_COOKIE(x) ((x) << 5)
+ __be64 mask;
+ __be64 val;
+};
+
+struct cpl_set_tcb_rpl {
+ union opcode_tid ot;
+ __be16 rsvd;
+ u8 cookie;
+ u8 status;
+ __be64 oldval;
+};
+
+struct cpl_close_con_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd;
+};
+
+struct cpl_close_con_rpl {
+ union opcode_tid ot;
+ u8 rsvd[3];
+ u8 status;
+ __be32 snd_nxt;
+ __be32 rcv_nxt;
+};
+
+struct cpl_close_listsvr_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 reply_ctrl;
+#define LISTSVR_IPV6 (1 << 14)
+ __be16 rsvd;
+};
+
+struct cpl_close_listsvr_rpl {
+ union opcode_tid ot;
+ u8 rsvd[3];
+ u8 status;
+};
+
+struct cpl_abort_req_rss {
+ union opcode_tid ot;
+ u8 rsvd[3];
+ u8 status;
+};
+
+struct cpl_abort_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ u8 rsvd1;
+ u8 cmd;
+ u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl_rss {
+ union opcode_tid ot;
+ u8 rsvd[3];
+ u8 status;
+};
+
+struct cpl_abort_rpl {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ u8 rsvd1;
+ u8 cmd;
+ u8 rsvd2[6];
+};
+
+struct cpl_peer_close {
+ union opcode_tid ot;
+ __be32 rcv_nxt;
+};
+
+struct cpl_tid_release {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd;
+};
+
+struct cpl_tx_pkt_core {
+ __be32 ctrl0;
+#define TXPKT_VF(x) ((x) << 0)
+#define TXPKT_PF(x) ((x) << 8)
+#define TXPKT_VF_VLD (1 << 11)
+#define TXPKT_OVLAN_IDX(x) ((x) << 12)
+#define TXPKT_INTF(x) ((x) << 16)
+#define TXPKT_INS_OVLAN (1 << 21)
+#define TXPKT_OPCODE(x) ((x) << 24)
+ __be16 pack;
+ __be16 len;
+ __be64 ctrl1;
+#define TXPKT_CSUM_END(x) ((x) << 12)
+#define TXPKT_CSUM_START(x) ((x) << 20)
+#define TXPKT_IPHDR_LEN(x) ((u64)(x) << 20)
+#define TXPKT_CSUM_LOC(x) ((u64)(x) << 30)
+#define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34)
+#define TXPKT_CSUM_TYPE(x) ((u64)(x) << 40)
+#define TXPKT_VLAN(x) ((u64)(x) << 44)
+#define TXPKT_VLAN_VLD (1ULL << 60)
+#define TXPKT_IPCSUM_DIS (1ULL << 62)
+#define TXPKT_L4CSUM_DIS (1ULL << 63)
+};
+
+struct cpl_tx_pkt {
+ WR_HDR;
+ struct cpl_tx_pkt_core c;
+};
+
+#define cpl_tx_pkt_xt cpl_tx_pkt
+
+struct cpl_tx_pkt_lso_core {
+ __be32 lso_ctrl;
+#define LSO_TCPHDR_LEN(x) ((x) << 0)
+#define LSO_IPHDR_LEN(x) ((x) << 4)
+#define LSO_ETHHDR_LEN(x) ((x) << 16)
+#define LSO_IPV6(x) ((x) << 20)
+#define LSO_LAST_SLICE (1 << 22)
+#define LSO_FIRST_SLICE (1 << 23)
+#define LSO_OPCODE(x) ((x) << 24)
+ __be16 ipid_ofst;
+ __be16 mss;
+ __be32 seqno_offset;
+ __be32 len;
+ /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
+};
+
+struct cpl_tx_pkt_lso {
+ WR_HDR;
+ struct cpl_tx_pkt_lso_core c;
+ /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
+};
+
+struct cpl_iscsi_hdr {
+ union opcode_tid ot;
+ __be16 pdu_len_ddp;
+#define ISCSI_PDU_LEN(x) ((x) & 0x7FFF)
+#define ISCSI_DDP (1 << 15)
+ __be16 len;
+ __be32 seq;
+ __be16 urg;
+ u8 rsvd;
+ u8 status;
+};
+
+struct cpl_rx_data {
+ union opcode_tid ot;
+ __be16 rsvd;
+ __be16 len;
+ __be32 seq;
+ __be16 urg;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 dack_mode:2;
+ u8 psh:1;
+ u8 heartbeat:1;
+ u8 ddp_off:1;
+ u8 :3;
+#else
+ u8 :3;
+ u8 ddp_off:1;
+ u8 heartbeat:1;
+ u8 psh:1;
+ u8 dack_mode:2;
+#endif
+ u8 status;
+};
+
+struct cpl_rx_data_ack {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 credit_dack;
+#define RX_CREDITS(x) ((x) << 0)
+#define RX_FORCE_ACK(x) ((x) << 28)
+};
+
+struct cpl_rx_pkt {
+ struct rss_header rsshdr;
+ u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 iff:4;
+ u8 csum_calc:1;
+ u8 ipmi_pkt:1;
+ u8 vlan_ex:1;
+ u8 ip_frag:1;
+#else
+ u8 ip_frag:1;
+ u8 vlan_ex:1;
+ u8 ipmi_pkt:1;
+ u8 csum_calc:1;
+ u8 iff:4;
+#endif
+ __be16 csum;
+ __be16 vlan;
+ __be16 len;
+ __be32 l2info;
+#define RXF_UDP (1 << 22)
+#define RXF_TCP (1 << 23)
+#define RXF_IP (1 << 24)
+#define RXF_IP6 (1 << 25)
+ __be16 hdr_len;
+ __be16 err_vec;
+};
+
+struct cpl_trace_pkt {
+ u8 opcode;
+ u8 intf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 runt:4;
+ u8 filter_hit:4;
+ u8 :6;
+ u8 err:1;
+ u8 trunc:1;
+#else
+ u8 filter_hit:4;
+ u8 runt:4;
+ u8 trunc:1;
+ u8 err:1;
+ u8 :6;
+#endif
+ __be16 rsvd;
+ __be16 len;
+ __be64 tstamp;
+};
+
+struct cpl_l2t_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 params;
+#define L2T_W_INFO(x) ((x) << 2)
+#define L2T_W_PORT(x) ((x) << 8)
+#define L2T_W_NOREPLY(x) ((x) << 15)
+ __be16 l2t_idx;
+ __be16 vlan;
+ u8 dst_mac[6];
+};
+
+struct cpl_l2t_write_rpl {
+ union opcode_tid ot;
+ u8 status;
+ u8 rsvd[3];
+};
+
+struct cpl_rdma_terminate {
+ union opcode_tid ot;
+ __be16 rsvd;
+ __be16 len;
+};
+
+struct cpl_sge_egr_update {
+ __be32 opcode_qid;
+#define EGR_QID(x) ((x) & 0x1FFFF)
+ __be16 cidx;
+ __be16 pidx;
+};
+
+struct cpl_fw4_pld {
+ u8 opcode;
+ u8 rsvd0[3];
+ u8 type;
+ u8 rsvd1;
+ __be16 len;
+ __be64 data;
+ __be64 rsvd2;
+};
+
+struct cpl_fw6_pld {
+ u8 opcode;
+ u8 rsvd[5];
+ __be16 len;
+ __be64 data[4];
+};
+
+struct cpl_fw4_msg {
+ u8 opcode;
+ u8 type;
+ __be16 rsvd0;
+ __be32 rsvd1;
+ __be64 data[2];
+};
+
+struct cpl_fw4_ack {
+ union opcode_tid ot;
+ u8 credits;
+ u8 rsvd0[2];
+ u8 seq_vld;
+ __be32 snd_nxt;
+ __be32 snd_una;
+ __be64 rsvd1;
+};
+
+struct cpl_fw6_msg {
+ u8 opcode;
+ u8 type;
+ __be16 rsvd0;
+ __be32 rsvd1;
+ __be64 data[4];
+};
+
+/* cpl_fw6_msg.type values */
+enum {
+ FW6_TYPE_CMD_RPL = 0,
+};
+
+enum {
+ ULP_TX_MEM_READ = 2,
+ ULP_TX_MEM_WRITE = 3,
+ ULP_TX_PKT = 4
+};
+
+enum {
+ ULP_TX_SC_NOOP = 0x80,
+ ULP_TX_SC_IMM = 0x81,
+ ULP_TX_SC_DSGL = 0x82,
+ ULP_TX_SC_ISGL = 0x83
+};
+
+struct ulptx_sge_pair {
+ __be32 len[2];
+ __be64 addr[2];
+};
+
+struct ulptx_sgl {
+ __be32 cmd_nsge;
+#define ULPTX_CMD(x) ((x) << 24)
+#define ULPTX_NSGE(x) ((x) << 0)
+ __be32 len0;
+ __be64 addr0;
+ struct ulptx_sge_pair sge[0];
+};
+
+struct ulp_mem_io {
+ WR_HDR;
+ __be32 cmd;
+#define ULP_MEMIO_ORDER(x) ((x) << 23)
+ __be32 len16; /* command length */
+ __be32 dlen; /* data length in 32-byte units */
+#define ULP_MEMIO_DATA_LEN(x) ((x) << 0)
+ __be32 lock_addr;
+#define ULP_MEMIO_ADDR(x) ((x) << 0)
+#define ULP_MEMIO_LOCK(x) ((x) << 31)
+};
+
+#endif /* __T4_MSG_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
new file mode 100644
index 000000000000..0adc5bcec7c4
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -0,0 +1,885 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4_REGS_H
+#define __T4_REGS_H
+
+#define MYPF_BASE 0x1b000
+#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
+
+#define PF0_BASE 0x1e000
+#define PF0_REG(reg_addr) (PF0_BASE + (reg_addr))
+
+#define PF_STRIDE 0x400
+#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
+#define PF_REG(idx, reg) (PF_BASE(idx) + (reg))
+
+#define MYPORT_BASE 0x1c000
+#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr))
+
+#define PORT0_BASE 0x20000
+#define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr))
+
+#define PORT_STRIDE 0x2000
+#define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE)
+#define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg))
+
+#define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR)
+#define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx)
+
+#define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define PCIE_MAILBOX_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+
+#define SGE_PF_KDOORBELL 0x0
+#define QID_MASK 0xffff8000U
+#define QID_SHIFT 15
+#define QID(x) ((x) << QID_SHIFT)
+#define DBPRIO 0x00004000U
+#define PIDX_MASK 0x00003fffU
+#define PIDX_SHIFT 0
+#define PIDX(x) ((x) << PIDX_SHIFT)
+
+#define SGE_PF_GTS 0x4
+#define INGRESSQID_MASK 0xffff0000U
+#define INGRESSQID_SHIFT 16
+#define INGRESSQID(x) ((x) << INGRESSQID_SHIFT)
+#define TIMERREG_MASK 0x0000e000U
+#define TIMERREG_SHIFT 13
+#define TIMERREG(x) ((x) << TIMERREG_SHIFT)
+#define SEINTARM_MASK 0x00001000U
+#define SEINTARM_SHIFT 12
+#define SEINTARM(x) ((x) << SEINTARM_SHIFT)
+#define CIDXINC_MASK 0x00000fffU
+#define CIDXINC_SHIFT 0
+#define CIDXINC(x) ((x) << CIDXINC_SHIFT)
+
+#define SGE_CONTROL 0x1008
+#define DCASYSTYPE 0x00080000U
+#define RXPKTCPLMODE 0x00040000U
+#define EGRSTATUSPAGESIZE 0x00020000U
+#define PKTSHIFT_MASK 0x00001c00U
+#define PKTSHIFT_SHIFT 10
+#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
+#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT)
+#define INGPCIEBOUNDARY_MASK 0x00000380U
+#define INGPCIEBOUNDARY_SHIFT 7
+#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT)
+#define INGPADBOUNDARY_MASK 0x00000070U
+#define INGPADBOUNDARY_SHIFT 4
+#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT)
+#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \
+ >> INGPADBOUNDARY_SHIFT)
+#define EGRPCIEBOUNDARY_MASK 0x0000000eU
+#define EGRPCIEBOUNDARY_SHIFT 1
+#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT)
+#define GLOBALENABLE 0x00000001U
+
+#define SGE_HOST_PAGE_SIZE 0x100c
+#define HOSTPAGESIZEPF0_MASK 0x0000000fU
+#define HOSTPAGESIZEPF0_SHIFT 0
+#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT)
+
+#define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010
+#define QUEUESPERPAGEPF0_MASK 0x0000000fU
+#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK)
+
+#define SGE_INT_CAUSE1 0x1024
+#define SGE_INT_CAUSE2 0x1030
+#define SGE_INT_CAUSE3 0x103c
+#define ERR_FLM_DBP 0x80000000U
+#define ERR_FLM_IDMA1 0x40000000U
+#define ERR_FLM_IDMA0 0x20000000U
+#define ERR_FLM_HINT 0x10000000U
+#define ERR_PCIE_ERROR3 0x08000000U
+#define ERR_PCIE_ERROR2 0x04000000U
+#define ERR_PCIE_ERROR1 0x02000000U
+#define ERR_PCIE_ERROR0 0x01000000U
+#define ERR_TIMER_ABOVE_MAX_QID 0x00800000U
+#define ERR_CPL_EXCEED_IQE_SIZE 0x00400000U
+#define ERR_INVALID_CIDX_INC 0x00200000U
+#define ERR_ITP_TIME_PAUSED 0x00100000U
+#define ERR_CPL_OPCODE_0 0x00080000U
+#define ERR_DROPPED_DB 0x00040000U
+#define ERR_DATA_CPL_ON_HIGH_QID1 0x00020000U
+#define ERR_DATA_CPL_ON_HIGH_QID0 0x00010000U
+#define ERR_BAD_DB_PIDX3 0x00008000U
+#define ERR_BAD_DB_PIDX2 0x00004000U
+#define ERR_BAD_DB_PIDX1 0x00002000U
+#define ERR_BAD_DB_PIDX0 0x00001000U
+#define ERR_ING_PCIE_CHAN 0x00000800U
+#define ERR_ING_CTXT_PRIO 0x00000400U
+#define ERR_EGR_CTXT_PRIO 0x00000200U
+#define DBFIFO_HP_INT 0x00000100U
+#define DBFIFO_LP_INT 0x00000080U
+#define REG_ADDRESS_ERR 0x00000040U
+#define INGRESS_SIZE_ERR 0x00000020U
+#define EGRESS_SIZE_ERR 0x00000010U
+#define ERR_INV_CTXT3 0x00000008U
+#define ERR_INV_CTXT2 0x00000004U
+#define ERR_INV_CTXT1 0x00000002U
+#define ERR_INV_CTXT0 0x00000001U
+
+#define SGE_INT_ENABLE3 0x1040
+#define SGE_FL_BUFFER_SIZE0 0x1044
+#define SGE_FL_BUFFER_SIZE1 0x1048
+#define SGE_INGRESS_RX_THRESHOLD 0x10a0
+#define THRESHOLD_0_MASK 0x3f000000U
+#define THRESHOLD_0_SHIFT 24
+#define THRESHOLD_0(x) ((x) << THRESHOLD_0_SHIFT)
+#define THRESHOLD_0_GET(x) (((x) & THRESHOLD_0_MASK) >> THRESHOLD_0_SHIFT)
+#define THRESHOLD_1_MASK 0x003f0000U
+#define THRESHOLD_1_SHIFT 16
+#define THRESHOLD_1(x) ((x) << THRESHOLD_1_SHIFT)
+#define THRESHOLD_1_GET(x) (((x) & THRESHOLD_1_MASK) >> THRESHOLD_1_SHIFT)
+#define THRESHOLD_2_MASK 0x00003f00U
+#define THRESHOLD_2_SHIFT 8
+#define THRESHOLD_2(x) ((x) << THRESHOLD_2_SHIFT)
+#define THRESHOLD_2_GET(x) (((x) & THRESHOLD_2_MASK) >> THRESHOLD_2_SHIFT)
+#define THRESHOLD_3_MASK 0x0000003fU
+#define THRESHOLD_3_SHIFT 0
+#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT)
+#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT)
+
+#define SGE_TIMER_VALUE_0_AND_1 0x10b8
+#define TIMERVALUE0_MASK 0xffff0000U
+#define TIMERVALUE0_SHIFT 16
+#define TIMERVALUE0(x) ((x) << TIMERVALUE0_SHIFT)
+#define TIMERVALUE0_GET(x) (((x) & TIMERVALUE0_MASK) >> TIMERVALUE0_SHIFT)
+#define TIMERVALUE1_MASK 0x0000ffffU
+#define TIMERVALUE1_SHIFT 0
+#define TIMERVALUE1(x) ((x) << TIMERVALUE1_SHIFT)
+#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT)
+
+#define SGE_TIMER_VALUE_2_AND_3 0x10bc
+#define SGE_TIMER_VALUE_4_AND_5 0x10c0
+#define SGE_DEBUG_INDEX 0x10cc
+#define SGE_DEBUG_DATA_HIGH 0x10d0
+#define SGE_DEBUG_DATA_LOW 0x10d4
+#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
+
+#define PCIE_PF_CLI 0x44
+#define PCIE_INT_CAUSE 0x3004
+#define UNXSPLCPLERR 0x20000000U
+#define PCIEPINT 0x10000000U
+#define PCIESINT 0x08000000U
+#define RPLPERR 0x04000000U
+#define RXWRPERR 0x02000000U
+#define RXCPLPERR 0x01000000U
+#define PIOTAGPERR 0x00800000U
+#define MATAGPERR 0x00400000U
+#define INTXCLRPERR 0x00200000U
+#define FIDPERR 0x00100000U
+#define CFGSNPPERR 0x00080000U
+#define HRSPPERR 0x00040000U
+#define HREQPERR 0x00020000U
+#define HCNTPERR 0x00010000U
+#define DRSPPERR 0x00008000U
+#define DREQPERR 0x00004000U
+#define DCNTPERR 0x00002000U
+#define CRSPPERR 0x00001000U
+#define CREQPERR 0x00000800U
+#define CCNTPERR 0x00000400U
+#define TARTAGPERR 0x00000200U
+#define PIOREQPERR 0x00000100U
+#define PIOCPLPERR 0x00000080U
+#define MSIXDIPERR 0x00000040U
+#define MSIXDATAPERR 0x00000020U
+#define MSIXADDRHPERR 0x00000010U
+#define MSIXADDRLPERR 0x00000008U
+#define MSIDATAPERR 0x00000004U
+#define MSIADDRHPERR 0x00000002U
+#define MSIADDRLPERR 0x00000001U
+
+#define PCIE_NONFAT_ERR 0x3010
+#define PCIE_MEM_ACCESS_BASE_WIN 0x3068
+#define PCIEOFST_MASK 0xfffffc00U
+#define BIR_MASK 0x00000300U
+#define BIR_SHIFT 8
+#define BIR(x) ((x) << BIR_SHIFT)
+#define WINDOW_MASK 0x000000ffU
+#define WINDOW_SHIFT 0
+#define WINDOW(x) ((x) << WINDOW_SHIFT)
+#define PCIE_MEM_ACCESS_OFFSET 0x306c
+
+#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
+#define RNPP 0x80000000U
+#define RPCP 0x20000000U
+#define RCIP 0x08000000U
+#define RCCP 0x04000000U
+#define RFTP 0x00800000U
+#define PTRP 0x00100000U
+
+#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS 0x59a4
+#define TPCP 0x40000000U
+#define TNPP 0x20000000U
+#define TFTP 0x10000000U
+#define TCAP 0x08000000U
+#define TCIP 0x04000000U
+#define RCAP 0x02000000U
+#define PLUP 0x00800000U
+#define PLDN 0x00400000U
+#define OTDD 0x00200000U
+#define GTRP 0x00100000U
+#define RDPE 0x00040000U
+#define TDCE 0x00020000U
+#define TDUE 0x00010000U
+
+#define MC_INT_CAUSE 0x7518
+#define ECC_UE_INT_CAUSE 0x00000004U
+#define ECC_CE_INT_CAUSE 0x00000002U
+#define PERR_INT_CAUSE 0x00000001U
+
+#define MC_ECC_STATUS 0x751c
+#define ECC_CECNT_MASK 0xffff0000U
+#define ECC_CECNT_SHIFT 16
+#define ECC_CECNT(x) ((x) << ECC_CECNT_SHIFT)
+#define ECC_CECNT_GET(x) (((x) & ECC_CECNT_MASK) >> ECC_CECNT_SHIFT)
+#define ECC_UECNT_MASK 0x0000ffffU
+#define ECC_UECNT_SHIFT 0
+#define ECC_UECNT(x) ((x) << ECC_UECNT_SHIFT)
+#define ECC_UECNT_GET(x) (((x) & ECC_UECNT_MASK) >> ECC_UECNT_SHIFT)
+
+#define MC_BIST_CMD 0x7600
+#define START_BIST 0x80000000U
+#define BIST_CMD_GAP_MASK 0x0000ff00U
+#define BIST_CMD_GAP_SHIFT 8
+#define BIST_CMD_GAP(x) ((x) << BIST_CMD_GAP_SHIFT)
+#define BIST_OPCODE_MASK 0x00000003U
+#define BIST_OPCODE_SHIFT 0
+#define BIST_OPCODE(x) ((x) << BIST_OPCODE_SHIFT)
+
+#define MC_BIST_CMD_ADDR 0x7604
+#define MC_BIST_CMD_LEN 0x7608
+#define MC_BIST_DATA_PATTERN 0x760c
+#define BIST_DATA_TYPE_MASK 0x0000000fU
+#define BIST_DATA_TYPE_SHIFT 0
+#define BIST_DATA_TYPE(x) ((x) << BIST_DATA_TYPE_SHIFT)
+
+#define MC_BIST_STATUS_RDATA 0x7688
+
+#define MA_EXT_MEMORY_BAR 0x77c8
+#define EXT_MEM_SIZE_MASK 0x00000fffU
+#define EXT_MEM_SIZE_SHIFT 0
+#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT)
+
+#define MA_TARGET_MEM_ENABLE 0x77d8
+#define EXT_MEM_ENABLE 0x00000004U
+#define EDRAM1_ENABLE 0x00000002U
+#define EDRAM0_ENABLE 0x00000001U
+
+#define MA_INT_CAUSE 0x77e0
+#define MEM_PERR_INT_CAUSE 0x00000002U
+#define MEM_WRAP_INT_CAUSE 0x00000001U
+
+#define MA_INT_WRAP_STATUS 0x77e4
+#define MEM_WRAP_ADDRESS_MASK 0xfffffff0U
+#define MEM_WRAP_ADDRESS_SHIFT 4
+#define MEM_WRAP_ADDRESS_GET(x) (((x) & MEM_WRAP_ADDRESS_MASK) >> MEM_WRAP_ADDRESS_SHIFT)
+#define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU
+#define MEM_WRAP_CLIENT_NUM_SHIFT 0
+#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
+
+#define MA_PARITY_ERROR_STATUS 0x77f4
+
+#define EDC_0_BASE_ADDR 0x7900
+
+#define EDC_BIST_CMD 0x7904
+#define EDC_BIST_CMD_ADDR 0x7908
+#define EDC_BIST_CMD_LEN 0x790c
+#define EDC_BIST_DATA_PATTERN 0x7910
+#define EDC_BIST_STATUS_RDATA 0x7928
+#define EDC_INT_CAUSE 0x7978
+#define ECC_UE_PAR 0x00000020U
+#define ECC_CE_PAR 0x00000010U
+#define PERR_PAR_CAUSE 0x00000008U
+
+#define EDC_ECC_STATUS 0x797c
+
+#define EDC_1_BASE_ADDR 0x7980
+
+#define CIM_BOOT_CFG 0x7b00
+#define BOOTADDR_MASK 0xffffff00U
+
+#define CIM_PF_MAILBOX_DATA 0x240
+#define CIM_PF_MAILBOX_CTRL 0x280
+#define MBMSGVALID 0x00000008U
+#define MBINTREQ 0x00000004U
+#define MBOWNER_MASK 0x00000003U
+#define MBOWNER_SHIFT 0
+#define MBOWNER(x) ((x) << MBOWNER_SHIFT)
+#define MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT)
+
+#define CIM_PF_HOST_INT_CAUSE 0x28c
+#define MBMSGRDYINT 0x00080000U
+
+#define CIM_HOST_INT_CAUSE 0x7b2c
+#define TIEQOUTPARERRINT 0x00100000U
+#define TIEQINPARERRINT 0x00080000U
+#define MBHOSTPARERR 0x00040000U
+#define MBUPPARERR 0x00020000U
+#define IBQPARERR 0x0001f800U
+#define IBQTP0PARERR 0x00010000U
+#define IBQTP1PARERR 0x00008000U
+#define IBQULPPARERR 0x00004000U
+#define IBQSGELOPARERR 0x00002000U
+#define IBQSGEHIPARERR 0x00001000U
+#define IBQNCSIPARERR 0x00000800U
+#define OBQPARERR 0x000007e0U
+#define OBQULP0PARERR 0x00000400U
+#define OBQULP1PARERR 0x00000200U
+#define OBQULP2PARERR 0x00000100U
+#define OBQULP3PARERR 0x00000080U
+#define OBQSGEPARERR 0x00000040U
+#define OBQNCSIPARERR 0x00000020U
+#define PREFDROPINT 0x00000002U
+#define UPACCNONZERO 0x00000001U
+
+#define CIM_HOST_UPACC_INT_CAUSE 0x7b34
+#define EEPROMWRINT 0x40000000U
+#define TIMEOUTMAINT 0x20000000U
+#define TIMEOUTINT 0x10000000U
+#define RSPOVRLOOKUPINT 0x08000000U
+#define REQOVRLOOKUPINT 0x04000000U
+#define BLKWRPLINT 0x02000000U
+#define BLKRDPLINT 0x01000000U
+#define SGLWRPLINT 0x00800000U
+#define SGLRDPLINT 0x00400000U
+#define BLKWRCTLINT 0x00200000U
+#define BLKRDCTLINT 0x00100000U
+#define SGLWRCTLINT 0x00080000U
+#define SGLRDCTLINT 0x00040000U
+#define BLKWREEPROMINT 0x00020000U
+#define BLKRDEEPROMINT 0x00010000U
+#define SGLWREEPROMINT 0x00008000U
+#define SGLRDEEPROMINT 0x00004000U
+#define BLKWRFLASHINT 0x00002000U
+#define BLKRDFLASHINT 0x00001000U
+#define SGLWRFLASHINT 0x00000800U
+#define SGLRDFLASHINT 0x00000400U
+#define BLKWRBOOTINT 0x00000200U
+#define BLKRDBOOTINT 0x00000100U
+#define SGLWRBOOTINT 0x00000080U
+#define SGLRDBOOTINT 0x00000040U
+#define ILLWRBEINT 0x00000020U
+#define ILLRDBEINT 0x00000010U
+#define ILLRDINT 0x00000008U
+#define ILLWRINT 0x00000004U
+#define ILLTRANSINT 0x00000002U
+#define RSVDSPACEINT 0x00000001U
+
+#define TP_OUT_CONFIG 0x7d04
+#define VLANEXTENABLE_MASK 0x0000f000U
+#define VLANEXTENABLE_SHIFT 12
+
+#define TP_PARA_REG2 0x7d68
+#define MAXRXDATA_MASK 0xffff0000U
+#define MAXRXDATA_SHIFT 16
+#define MAXRXDATA_GET(x) (((x) & MAXRXDATA_MASK) >> MAXRXDATA_SHIFT)
+
+#define TP_TIMER_RESOLUTION 0x7d90
+#define TIMERRESOLUTION_MASK 0x00ff0000U
+#define TIMERRESOLUTION_SHIFT 16
+#define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT)
+
+#define TP_SHIFT_CNT 0x7dc0
+
+#define TP_CCTRL_TABLE 0x7ddc
+#define TP_MTU_TABLE 0x7de4
+#define MTUINDEX_MASK 0xff000000U
+#define MTUINDEX_SHIFT 24
+#define MTUINDEX(x) ((x) << MTUINDEX_SHIFT)
+#define MTUWIDTH_MASK 0x000f0000U
+#define MTUWIDTH_SHIFT 16
+#define MTUWIDTH(x) ((x) << MTUWIDTH_SHIFT)
+#define MTUWIDTH_GET(x) (((x) & MTUWIDTH_MASK) >> MTUWIDTH_SHIFT)
+#define MTUVALUE_MASK 0x00003fffU
+#define MTUVALUE_SHIFT 0
+#define MTUVALUE(x) ((x) << MTUVALUE_SHIFT)
+#define MTUVALUE_GET(x) (((x) & MTUVALUE_MASK) >> MTUVALUE_SHIFT)
+
+#define TP_RSS_LKP_TABLE 0x7dec
+#define LKPTBLROWVLD 0x80000000U
+#define LKPTBLQUEUE1_MASK 0x000ffc00U
+#define LKPTBLQUEUE1_SHIFT 10
+#define LKPTBLQUEUE1(x) ((x) << LKPTBLQUEUE1_SHIFT)
+#define LKPTBLQUEUE1_GET(x) (((x) & LKPTBLQUEUE1_MASK) >> LKPTBLQUEUE1_SHIFT)
+#define LKPTBLQUEUE0_MASK 0x000003ffU
+#define LKPTBLQUEUE0_SHIFT 0
+#define LKPTBLQUEUE0(x) ((x) << LKPTBLQUEUE0_SHIFT)
+#define LKPTBLQUEUE0_GET(x) (((x) & LKPTBLQUEUE0_MASK) >> LKPTBLQUEUE0_SHIFT)
+
+#define TP_PIO_ADDR 0x7e40
+#define TP_PIO_DATA 0x7e44
+#define TP_MIB_INDEX 0x7e50
+#define TP_MIB_DATA 0x7e54
+#define TP_INT_CAUSE 0x7e74
+#define FLMTXFLSTEMPTY 0x40000000U
+
+#define TP_INGRESS_CONFIG 0x141
+#define VNIC 0x00000800U
+#define CSUM_HAS_PSEUDO_HDR 0x00000400U
+#define RM_OVLAN 0x00000200U
+#define LOOKUPEVERYPKT 0x00000100U
+
+#define TP_MIB_MAC_IN_ERR_0 0x0
+#define TP_MIB_TCP_OUT_RST 0xc
+#define TP_MIB_TCP_IN_SEG_HI 0x10
+#define TP_MIB_TCP_IN_SEG_LO 0x11
+#define TP_MIB_TCP_OUT_SEG_HI 0x12
+#define TP_MIB_TCP_OUT_SEG_LO 0x13
+#define TP_MIB_TCP_RXT_SEG_HI 0x14
+#define TP_MIB_TCP_RXT_SEG_LO 0x15
+#define TP_MIB_TNL_CNG_DROP_0 0x18
+#define TP_MIB_TCP_V6IN_ERR_0 0x28
+#define TP_MIB_TCP_V6OUT_RST 0x2c
+#define TP_MIB_OFD_ARP_DROP 0x36
+#define TP_MIB_TNL_DROP_0 0x44
+#define TP_MIB_OFD_VLN_DROP_0 0x58
+
+#define ULP_TX_INT_CAUSE 0x8dcc
+#define PBL_BOUND_ERR_CH3 0x80000000U
+#define PBL_BOUND_ERR_CH2 0x40000000U
+#define PBL_BOUND_ERR_CH1 0x20000000U
+#define PBL_BOUND_ERR_CH0 0x10000000U
+
+#define PM_RX_INT_CAUSE 0x8fdc
+#define ZERO_E_CMD_ERROR 0x00400000U
+#define PMRX_FRAMING_ERROR 0x003ffff0U
+#define OCSPI_PAR_ERROR 0x00000008U
+#define DB_OPTIONS_PAR_ERROR 0x00000004U
+#define IESPI_PAR_ERROR 0x00000002U
+#define E_PCMD_PAR_ERROR 0x00000001U
+
+#define PM_TX_INT_CAUSE 0x8ffc
+#define PCMD_LEN_OVFL0 0x80000000U
+#define PCMD_LEN_OVFL1 0x40000000U
+#define PCMD_LEN_OVFL2 0x20000000U
+#define ZERO_C_CMD_ERROR 0x10000000U
+#define PMTX_FRAMING_ERROR 0x0ffffff0U
+#define OESPI_PAR_ERROR 0x00000008U
+#define ICSPI_PAR_ERROR 0x00000002U
+#define C_PCMD_PAR_ERROR 0x00000001U
+
+#define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
+#define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
+#define MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408
+#define MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c
+#define MPS_PORT_STAT_TX_PORT_BCAST_L 0x410
+#define MPS_PORT_STAT_TX_PORT_BCAST_H 0x414
+#define MPS_PORT_STAT_TX_PORT_MCAST_L 0x418
+#define MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c
+#define MPS_PORT_STAT_TX_PORT_UCAST_L 0x420
+#define MPS_PORT_STAT_TX_PORT_UCAST_H 0x424
+#define MPS_PORT_STAT_TX_PORT_ERROR_L 0x428
+#define MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c
+#define MPS_PORT_STAT_TX_PORT_64B_L 0x430
+#define MPS_PORT_STAT_TX_PORT_64B_H 0x434
+#define MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438
+#define MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c
+#define MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440
+#define MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444
+#define MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448
+#define MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c
+#define MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450
+#define MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454
+#define MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458
+#define MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c
+#define MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460
+#define MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464
+#define MPS_PORT_STAT_TX_PORT_DROP_L 0x468
+#define MPS_PORT_STAT_TX_PORT_DROP_H 0x46c
+#define MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470
+#define MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474
+#define MPS_PORT_STAT_TX_PORT_PPP0_L 0x478
+#define MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c
+#define MPS_PORT_STAT_TX_PORT_PPP1_L 0x480
+#define MPS_PORT_STAT_TX_PORT_PPP1_H 0x484
+#define MPS_PORT_STAT_TX_PORT_PPP2_L 0x488
+#define MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c
+#define MPS_PORT_STAT_TX_PORT_PPP3_L 0x490
+#define MPS_PORT_STAT_TX_PORT_PPP3_H 0x494
+#define MPS_PORT_STAT_TX_PORT_PPP4_L 0x498
+#define MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c
+#define MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0
+#define MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4
+#define MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8
+#define MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac
+#define MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0
+#define MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4
+#define MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0
+#define MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4
+#define MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8
+#define MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc
+#define MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0
+#define MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4
+#define MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8
+#define MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc
+#define MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0
+#define MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4
+#define MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8
+#define MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec
+#define MPS_PORT_STAT_LB_PORT_64B_L 0x4f0
+#define MPS_PORT_STAT_LB_PORT_64B_H 0x4f4
+#define MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8
+#define MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc
+#define MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500
+#define MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504
+#define MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508
+#define MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c
+#define MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510
+#define MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514
+#define MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518
+#define MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c
+#define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520
+#define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524
+#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528
+#define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540
+#define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544
+#define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548
+#define MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c
+#define MPS_PORT_STAT_RX_PORT_BCAST_L 0x550
+#define MPS_PORT_STAT_RX_PORT_BCAST_H 0x554
+#define MPS_PORT_STAT_RX_PORT_MCAST_L 0x558
+#define MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c
+#define MPS_PORT_STAT_RX_PORT_UCAST_L 0x560
+#define MPS_PORT_STAT_RX_PORT_UCAST_H 0x564
+#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568
+#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c
+#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570
+#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574
+#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578
+#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c
+#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580
+#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584
+#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588
+#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c
+#define MPS_PORT_STAT_RX_PORT_64B_L 0x590
+#define MPS_PORT_STAT_RX_PORT_64B_H 0x594
+#define MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598
+#define MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c
+#define MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0
+#define MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4
+#define MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8
+#define MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac
+#define MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0
+#define MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4
+#define MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8
+#define MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc
+#define MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0
+#define MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4
+#define MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8
+#define MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc
+#define MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0
+#define MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4
+#define MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8
+#define MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc
+#define MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0
+#define MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4
+#define MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8
+#define MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec
+#define MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0
+#define MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4
+#define MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8
+#define MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc
+#define MPS_PORT_STAT_RX_PORT_PPP6_L 0x600
+#define MPS_PORT_STAT_RX_PORT_PPP6_H 0x604
+#define MPS_PORT_STAT_RX_PORT_PPP7_L 0x608
+#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
+#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
+#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
+#define MPS_CMN_CTL 0x9000
+#define NUMPORTS_MASK 0x00000003U
+#define NUMPORTS_SHIFT 0
+#define NUMPORTS_GET(x) (((x) & NUMPORTS_MASK) >> NUMPORTS_SHIFT)
+
+#define MPS_INT_CAUSE 0x9008
+#define STATINT 0x00000020U
+#define TXINT 0x00000010U
+#define RXINT 0x00000008U
+#define TRCINT 0x00000004U
+#define CLSINT 0x00000002U
+#define PLINT 0x00000001U
+
+#define MPS_TX_INT_CAUSE 0x9408
+#define PORTERR 0x00010000U
+#define FRMERR 0x00008000U
+#define SECNTERR 0x00004000U
+#define BUBBLE 0x00002000U
+#define TXDESCFIFO 0x00001e00U
+#define TXDATAFIFO 0x000001e0U
+#define NCSIFIFO 0x00000010U
+#define TPFIFO 0x0000000fU
+
+#define MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614
+#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620
+#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO 0x962c
+
+#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640
+#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644
+#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648
+#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c
+#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650
+#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654
+#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658
+#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c
+#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660
+#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664
+#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668
+#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c
+#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670
+#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674
+#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678
+#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c
+#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680
+#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684
+#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688
+#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c
+#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690
+#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694
+#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698
+#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c
+#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0
+#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4
+#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8
+#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac
+#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0
+#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4
+#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
+#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
+#define MPS_TRC_CFG 0x9800
+#define TRCFIFOEMPTY 0x00000010U
+#define TRCIGNOREDROPINPUT 0x00000008U
+#define TRCKEEPDUPLICATES 0x00000004U
+#define TRCEN 0x00000002U
+#define TRCMULTIFILTER 0x00000001U
+
+#define MPS_TRC_RSS_CONTROL 0x9808
+#define RSSCONTROL_MASK 0x00ff0000U
+#define RSSCONTROL_SHIFT 16
+#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT)
+#define QUEUENUMBER_MASK 0x0000ffffU
+#define QUEUENUMBER_SHIFT 0
+#define QUEUENUMBER(x) ((x) << QUEUENUMBER_SHIFT)
+
+#define MPS_TRC_FILTER_MATCH_CTL_A 0x9810
+#define TFINVERTMATCH 0x01000000U
+#define TFPKTTOOLARGE 0x00800000U
+#define TFEN 0x00400000U
+#define TFPORT_MASK 0x003c0000U
+#define TFPORT_SHIFT 18
+#define TFPORT(x) ((x) << TFPORT_SHIFT)
+#define TFPORT_GET(x) (((x) & TFPORT_MASK) >> TFPORT_SHIFT)
+#define TFDROP 0x00020000U
+#define TFSOPEOPERR 0x00010000U
+#define TFLENGTH_MASK 0x00001f00U
+#define TFLENGTH_SHIFT 8
+#define TFLENGTH(x) ((x) << TFLENGTH_SHIFT)
+#define TFLENGTH_GET(x) (((x) & TFLENGTH_MASK) >> TFLENGTH_SHIFT)
+#define TFOFFSET_MASK 0x0000001fU
+#define TFOFFSET_SHIFT 0
+#define TFOFFSET(x) ((x) << TFOFFSET_SHIFT)
+#define TFOFFSET_GET(x) (((x) & TFOFFSET_MASK) >> TFOFFSET_SHIFT)
+
+#define MPS_TRC_FILTER_MATCH_CTL_B 0x9820
+#define TFMINPKTSIZE_MASK 0x01ff0000U
+#define TFMINPKTSIZE_SHIFT 16
+#define TFMINPKTSIZE(x) ((x) << TFMINPKTSIZE_SHIFT)
+#define TFMINPKTSIZE_GET(x) (((x) & TFMINPKTSIZE_MASK) >> TFMINPKTSIZE_SHIFT)
+#define TFCAPTUREMAX_MASK 0x00003fffU
+#define TFCAPTUREMAX_SHIFT 0
+#define TFCAPTUREMAX(x) ((x) << TFCAPTUREMAX_SHIFT)
+#define TFCAPTUREMAX_GET(x) (((x) & TFCAPTUREMAX_MASK) >> TFCAPTUREMAX_SHIFT)
+
+#define MPS_TRC_INT_CAUSE 0x985c
+#define MISCPERR 0x00000100U
+#define PKTFIFO 0x000000f0U
+#define FILTMEM 0x0000000fU
+
+#define MPS_TRC_FILTER0_MATCH 0x9c00
+#define MPS_TRC_FILTER0_DONT_CARE 0x9c80
+#define MPS_TRC_FILTER1_MATCH 0x9d00
+#define MPS_CLS_INT_CAUSE 0xd028
+#define PLERRENB 0x00000008U
+#define HASHSRAM 0x00000004U
+#define MATCHTCAM 0x00000002U
+#define MATCHSRAM 0x00000001U
+
+#define MPS_RX_PERR_INT_CAUSE 0x11074
+
+#define CPL_INTR_CAUSE 0x19054
+#define CIM_OP_MAP_PERR 0x00000020U
+#define CIM_OVFL_ERROR 0x00000010U
+#define TP_FRAMING_ERROR 0x00000008U
+#define SGE_FRAMING_ERROR 0x00000004U
+#define CIM_FRAMING_ERROR 0x00000002U
+#define ZERO_SWITCH_ERROR 0x00000001U
+
+#define SMB_INT_CAUSE 0x19090
+#define MSTTXFIFOPARINT 0x00200000U
+#define MSTRXFIFOPARINT 0x00100000U
+#define SLVFIFOPARINT 0x00080000U
+
+#define ULP_RX_INT_CAUSE 0x19158
+#define ULP_RX_ISCSI_TAGMASK 0x19164
+#define ULP_RX_ISCSI_PSZ 0x19168
+#define HPZ3_MASK 0x0f000000U
+#define HPZ3_SHIFT 24
+#define HPZ3(x) ((x) << HPZ3_SHIFT)
+#define HPZ2_MASK 0x000f0000U
+#define HPZ2_SHIFT 16
+#define HPZ2(x) ((x) << HPZ2_SHIFT)
+#define HPZ1_MASK 0x00000f00U
+#define HPZ1_SHIFT 8
+#define HPZ1(x) ((x) << HPZ1_SHIFT)
+#define HPZ0_MASK 0x0000000fU
+#define HPZ0_SHIFT 0
+#define HPZ0(x) ((x) << HPZ0_SHIFT)
+
+#define ULP_RX_TDDP_PSZ 0x19178
+
+#define SF_DATA 0x193f8
+#define SF_OP 0x193fc
+#define BUSY 0x80000000U
+#define SF_LOCK 0x00000010U
+#define SF_CONT 0x00000008U
+#define BYTECNT_MASK 0x00000006U
+#define BYTECNT_SHIFT 1
+#define BYTECNT(x) ((x) << BYTECNT_SHIFT)
+#define OP_WR 0x00000001U
+
+#define PL_PF_INT_CAUSE 0x3c0
+#define PFSW 0x00000008U
+#define PFSGE 0x00000004U
+#define PFCIM 0x00000002U
+#define PFMPS 0x00000001U
+
+#define PL_PF_INT_ENABLE 0x3c4
+#define PL_PF_CTL 0x3c8
+#define SWINT 0x00000001U
+
+#define PL_WHOAMI 0x19400
+#define SOURCEPF_MASK 0x00000700U
+#define SOURCEPF_SHIFT 8
+#define SOURCEPF(x) ((x) << SOURCEPF_SHIFT)
+#define SOURCEPF_GET(x) (((x) & SOURCEPF_MASK) >> SOURCEPF_SHIFT)
+#define ISVF 0x00000080U
+#define VFID_MASK 0x0000007fU
+#define VFID_SHIFT 0
+#define VFID(x) ((x) << VFID_SHIFT)
+#define VFID_GET(x) (((x) & VFID_MASK) >> VFID_SHIFT)
+
+#define PL_INT_CAUSE 0x1940c
+#define ULP_TX 0x08000000U
+#define SGE 0x04000000U
+#define HMA 0x02000000U
+#define CPL_SWITCH 0x01000000U
+#define ULP_RX 0x00800000U
+#define PM_RX 0x00400000U
+#define PM_TX 0x00200000U
+#define MA 0x00100000U
+#define TP 0x00080000U
+#define LE 0x00040000U
+#define EDC1 0x00020000U
+#define EDC0 0x00010000U
+#define MC 0x00008000U
+#define PCIE 0x00004000U
+#define PMU 0x00002000U
+#define XGMAC_KR1 0x00001000U
+#define XGMAC_KR0 0x00000800U
+#define XGMAC1 0x00000400U
+#define XGMAC0 0x00000200U
+#define SMB 0x00000100U
+#define SF 0x00000080U
+#define PL 0x00000040U
+#define NCSI 0x00000020U
+#define MPS 0x00000010U
+#define MI 0x00000008U
+#define DBG 0x00000004U
+#define I2CM 0x00000002U
+#define CIM 0x00000001U
+
+#define PL_INT_MAP0 0x19414
+#define PL_RST 0x19428
+#define PIORST 0x00000002U
+#define PIORSTMODE 0x00000001U
+
+#define PL_PL_INT_CAUSE 0x19430
+#define FATALPERR 0x00000010U
+#define PERRVFID 0x00000001U
+
+#define PL_REV 0x1943c
+
+#define LE_DB_CONFIG 0x19c04
+#define HASHEN 0x00100000U
+
+#define LE_DB_SERVER_INDEX 0x19c18
+#define LE_DB_ACT_CNT_IPV4 0x19c20
+#define LE_DB_ACT_CNT_IPV6 0x19c24
+
+#define LE_DB_INT_CAUSE 0x19c3c
+#define REQQPARERR 0x00010000U
+#define UNKNOWNCMD 0x00008000U
+#define PARITYERR 0x00000040U
+#define LIPMISS 0x00000020U
+#define LIP0 0x00000010U
+
+#define LE_DB_TID_HASHBASE 0x19df8
+
+#define NCSI_INT_CAUSE 0x1a0d8
+#define CIM_DM_PRTY_ERR 0x00000100U
+#define MPS_DM_PRTY_ERR 0x00000080U
+#define TXFIFO_PRTY_ERR 0x00000002U
+#define RXFIFO_PRTY_ERR 0x00000001U
+
+#define XGMAC_PORT_CFG2 0x1018
+#define PATEN 0x00040000U
+#define MAGICEN 0x00020000U
+
+#define XGMAC_PORT_MAGIC_MACID_LO 0x1024
+#define XGMAC_PORT_MAGIC_MACID_HI 0x1028
+
+#define XGMAC_PORT_EPIO_DATA0 0x10c0
+#define XGMAC_PORT_EPIO_DATA1 0x10c4
+#define XGMAC_PORT_EPIO_DATA2 0x10c8
+#define XGMAC_PORT_EPIO_DATA3 0x10cc
+#define XGMAC_PORT_EPIO_OP 0x10d0
+#define EPIOWR 0x00000100U
+#define ADDRESS_MASK 0x000000ffU
+#define ADDRESS_SHIFT 0
+#define ADDRESS(x) ((x) << ADDRESS_SHIFT)
+
+#define XGMAC_PORT_INT_CAUSE 0x10dc
+#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
new file mode 100644
index 000000000000..edcfd7ec7802
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -0,0 +1,1623 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _T4FW_INTERFACE_H_
+#define _T4FW_INTERFACE_H_
+
+#define FW_T4VF_SGE_BASE_ADDR 0x0000
+#define FW_T4VF_MPS_BASE_ADDR 0x0100
+#define FW_T4VF_PL_BASE_ADDR 0x0200
+#define FW_T4VF_MBDATA_BASE_ADDR 0x0240
+#define FW_T4VF_CIM_BASE_ADDR 0x0300
+
+enum fw_wr_opcodes {
+ FW_FILTER_WR = 0x02,
+ FW_ULPTX_WR = 0x04,
+ FW_TP_WR = 0x05,
+ FW_ETH_TX_PKT_WR = 0x08,
+ FW_FLOWC_WR = 0x0a,
+ FW_OFLD_TX_DATA_WR = 0x0b,
+ FW_CMD_WR = 0x10,
+ FW_ETH_TX_PKT_VM_WR = 0x11,
+ FW_RI_RES_WR = 0x0c,
+ FW_RI_INIT_WR = 0x0d,
+ FW_RI_RDMA_WRITE_WR = 0x14,
+ FW_RI_SEND_WR = 0x15,
+ FW_RI_RDMA_READ_WR = 0x16,
+ FW_RI_RECV_WR = 0x17,
+ FW_RI_BIND_MW_WR = 0x18,
+ FW_RI_FR_NSMR_WR = 0x19,
+ FW_RI_INV_LSTAG_WR = 0x1a,
+ FW_LASTC2E_WR = 0x40
+};
+
+struct fw_wr_hdr {
+ __be32 hi;
+ __be32 lo;
+};
+
+#define FW_WR_OP(x) ((x) << 24)
+#define FW_WR_ATOMIC(x) ((x) << 23)
+#define FW_WR_FLUSH(x) ((x) << 22)
+#define FW_WR_COMPL(x) ((x) << 21)
+#define FW_WR_IMMDLEN_MASK 0xff
+#define FW_WR_IMMDLEN(x) ((x) << 0)
+
+#define FW_WR_EQUIQ (1U << 31)
+#define FW_WR_EQUEQ (1U << 30)
+#define FW_WR_FLOWID(x) ((x) << 8)
+#define FW_WR_LEN16(x) ((x) << 0)
+
+struct fw_ulptx_wr {
+ __be32 op_to_compl;
+ __be32 flowid_len16;
+ u64 cookie;
+};
+
+struct fw_tp_wr {
+ __be32 op_to_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+};
+
+struct fw_eth_tx_pkt_wr {
+ __be32 op_immdlen;
+ __be32 equiq_to_len16;
+ __be64 r3;
+};
+
+enum fw_flowc_mnem {
+ FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */
+ FW_FLOWC_MNEM_CH,
+ FW_FLOWC_MNEM_PORT,
+ FW_FLOWC_MNEM_IQID,
+ FW_FLOWC_MNEM_SNDNXT,
+ FW_FLOWC_MNEM_RCVNXT,
+ FW_FLOWC_MNEM_SNDBUF,
+ FW_FLOWC_MNEM_MSS,
+};
+
+struct fw_flowc_mnemval {
+ u8 mnemonic;
+ u8 r4[3];
+ __be32 val;
+};
+
+struct fw_flowc_wr {
+ __be32 op_to_nparams;
+#define FW_FLOWC_WR_NPARAMS(x) ((x) << 0)
+ __be32 flowid_len16;
+ struct fw_flowc_mnemval mnemval[0];
+};
+
+struct fw_ofld_tx_data_wr {
+ __be32 op_to_immdlen;
+ __be32 flowid_len16;
+ __be32 plen;
+ __be32 tunnel_to_proxy;
+#define FW_OFLD_TX_DATA_WR_TUNNEL(x) ((x) << 19)
+#define FW_OFLD_TX_DATA_WR_SAVE(x) ((x) << 18)
+#define FW_OFLD_TX_DATA_WR_FLUSH(x) ((x) << 17)
+#define FW_OFLD_TX_DATA_WR_URGENT(x) ((x) << 16)
+#define FW_OFLD_TX_DATA_WR_MORE(x) ((x) << 15)
+#define FW_OFLD_TX_DATA_WR_SHOVE(x) ((x) << 14)
+#define FW_OFLD_TX_DATA_WR_ULPMODE(x) ((x) << 10)
+#define FW_OFLD_TX_DATA_WR_ULPSUBMODE(x) ((x) << 6)
+};
+
+struct fw_cmd_wr {
+ __be32 op_dma;
+#define FW_CMD_WR_DMA (1U << 17)
+ __be32 len16_pkd;
+ __be64 cookie_daddr;
+};
+
+struct fw_eth_tx_pkt_vm_wr {
+ __be32 op_immdlen;
+ __be32 equiq_to_len16;
+ __be32 r3[2];
+ u8 ethmacdst[6];
+ u8 ethmacsrc[6];
+ __be16 ethtype;
+ __be16 vlantci;
+};
+
+#define FW_CMD_MAX_TIMEOUT 3000
+
+enum fw_cmd_opcodes {
+ FW_LDST_CMD = 0x01,
+ FW_RESET_CMD = 0x03,
+ FW_HELLO_CMD = 0x04,
+ FW_BYE_CMD = 0x05,
+ FW_INITIALIZE_CMD = 0x06,
+ FW_CAPS_CONFIG_CMD = 0x07,
+ FW_PARAMS_CMD = 0x08,
+ FW_PFVF_CMD = 0x09,
+ FW_IQ_CMD = 0x10,
+ FW_EQ_MNGT_CMD = 0x11,
+ FW_EQ_ETH_CMD = 0x12,
+ FW_EQ_CTRL_CMD = 0x13,
+ FW_EQ_OFLD_CMD = 0x21,
+ FW_VI_CMD = 0x14,
+ FW_VI_MAC_CMD = 0x15,
+ FW_VI_RXMODE_CMD = 0x16,
+ FW_VI_ENABLE_CMD = 0x17,
+ FW_ACL_MAC_CMD = 0x18,
+ FW_ACL_VLAN_CMD = 0x19,
+ FW_VI_STATS_CMD = 0x1a,
+ FW_PORT_CMD = 0x1b,
+ FW_PORT_STATS_CMD = 0x1c,
+ FW_PORT_LB_STATS_CMD = 0x1d,
+ FW_PORT_TRACE_CMD = 0x1e,
+ FW_PORT_TRACE_MMAP_CMD = 0x1f,
+ FW_RSS_IND_TBL_CMD = 0x20,
+ FW_RSS_GLB_CONFIG_CMD = 0x22,
+ FW_RSS_VI_CONFIG_CMD = 0x23,
+ FW_LASTC2E_CMD = 0x40,
+ FW_ERROR_CMD = 0x80,
+ FW_DEBUG_CMD = 0x81,
+};
+
+enum fw_cmd_cap {
+ FW_CMD_CAP_PF = 0x01,
+ FW_CMD_CAP_DMAQ = 0x02,
+ FW_CMD_CAP_PORT = 0x04,
+ FW_CMD_CAP_PORTPROMISC = 0x08,
+ FW_CMD_CAP_PORTSTATS = 0x10,
+ FW_CMD_CAP_VF = 0x80,
+};
+
+/*
+ * Generic command header flit0
+ */
+struct fw_cmd_hdr {
+ __be32 hi;
+ __be32 lo;
+};
+
+#define FW_CMD_OP(x) ((x) << 24)
+#define FW_CMD_OP_GET(x) (((x) >> 24) & 0xff)
+#define FW_CMD_REQUEST (1U << 23)
+#define FW_CMD_READ (1U << 22)
+#define FW_CMD_WRITE (1U << 21)
+#define FW_CMD_EXEC (1U << 20)
+#define FW_CMD_RAMASK(x) ((x) << 20)
+#define FW_CMD_RETVAL(x) ((x) << 8)
+#define FW_CMD_RETVAL_GET(x) (((x) >> 8) & 0xff)
+#define FW_CMD_LEN16(x) ((x) << 0)
+
+enum fw_ldst_addrspc {
+ FW_LDST_ADDRSPC_FIRMWARE = 0x0001,
+ FW_LDST_ADDRSPC_SGE_EGRC = 0x0008,
+ FW_LDST_ADDRSPC_SGE_INGC = 0x0009,
+ FW_LDST_ADDRSPC_SGE_FLMC = 0x000a,
+ FW_LDST_ADDRSPC_SGE_CONMC = 0x000b,
+ FW_LDST_ADDRSPC_TP_PIO = 0x0010,
+ FW_LDST_ADDRSPC_TP_TM_PIO = 0x0011,
+ FW_LDST_ADDRSPC_TP_MIB = 0x0012,
+ FW_LDST_ADDRSPC_MDIO = 0x0018,
+ FW_LDST_ADDRSPC_MPS = 0x0020,
+ FW_LDST_ADDRSPC_FUNC = 0x0028
+};
+
+enum fw_ldst_mps_fid {
+ FW_LDST_MPS_ATRB,
+ FW_LDST_MPS_RPLC
+};
+
+enum fw_ldst_func_access_ctl {
+ FW_LDST_FUNC_ACC_CTL_VIID,
+ FW_LDST_FUNC_ACC_CTL_FID
+};
+
+enum fw_ldst_func_mod_index {
+ FW_LDST_FUNC_MPS
+};
+
+struct fw_ldst_cmd {
+ __be32 op_to_addrspace;
+#define FW_LDST_CMD_ADDRSPACE(x) ((x) << 0)
+ __be32 cycles_to_len16;
+ union fw_ldst {
+ struct fw_ldst_addrval {
+ __be32 addr;
+ __be32 val;
+ } addrval;
+ struct fw_ldst_idctxt {
+ __be32 physid;
+ __be32 msg_pkd;
+ __be32 ctxt_data7;
+ __be32 ctxt_data6;
+ __be32 ctxt_data5;
+ __be32 ctxt_data4;
+ __be32 ctxt_data3;
+ __be32 ctxt_data2;
+ __be32 ctxt_data1;
+ __be32 ctxt_data0;
+ } idctxt;
+ struct fw_ldst_mdio {
+ __be16 paddr_mmd;
+ __be16 raddr;
+ __be16 vctl;
+ __be16 rval;
+ } mdio;
+ struct fw_ldst_mps {
+ __be16 fid_ctl;
+ __be16 rplcpf_pkd;
+ __be32 rplc127_96;
+ __be32 rplc95_64;
+ __be32 rplc63_32;
+ __be32 rplc31_0;
+ __be32 atrb;
+ __be16 vlan[16];
+ } mps;
+ struct fw_ldst_func {
+ u8 access_ctl;
+ u8 mod_index;
+ __be16 ctl_id;
+ __be32 offset;
+ __be64 data0;
+ __be64 data1;
+ } func;
+ } u;
+};
+
+#define FW_LDST_CMD_MSG(x) ((x) << 31)
+#define FW_LDST_CMD_PADDR(x) ((x) << 8)
+#define FW_LDST_CMD_MMD(x) ((x) << 0)
+#define FW_LDST_CMD_FID(x) ((x) << 15)
+#define FW_LDST_CMD_CTL(x) ((x) << 0)
+#define FW_LDST_CMD_RPLCPF(x) ((x) << 0)
+
+struct fw_reset_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be32 val;
+ __be32 r3;
+};
+
+struct fw_hello_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be32 err_to_mbasyncnot;
+#define FW_HELLO_CMD_ERR (1U << 31)
+#define FW_HELLO_CMD_INIT (1U << 30)
+#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29)
+#define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28)
+#define FW_HELLO_CMD_MBMASTER(x) ((x) << 24)
+#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20)
+ __be32 fwrev;
+};
+
+struct fw_bye_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be64 r3;
+};
+
+struct fw_initialize_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be64 r3;
+};
+
+enum fw_caps_config_hm {
+ FW_CAPS_CONFIG_HM_PCIE = 0x00000001,
+ FW_CAPS_CONFIG_HM_PL = 0x00000002,
+ FW_CAPS_CONFIG_HM_SGE = 0x00000004,
+ FW_CAPS_CONFIG_HM_CIM = 0x00000008,
+ FW_CAPS_CONFIG_HM_ULPTX = 0x00000010,
+ FW_CAPS_CONFIG_HM_TP = 0x00000020,
+ FW_CAPS_CONFIG_HM_ULPRX = 0x00000040,
+ FW_CAPS_CONFIG_HM_PMRX = 0x00000080,
+ FW_CAPS_CONFIG_HM_PMTX = 0x00000100,
+ FW_CAPS_CONFIG_HM_MC = 0x00000200,
+ FW_CAPS_CONFIG_HM_LE = 0x00000400,
+ FW_CAPS_CONFIG_HM_MPS = 0x00000800,
+ FW_CAPS_CONFIG_HM_XGMAC = 0x00001000,
+ FW_CAPS_CONFIG_HM_CPLSWITCH = 0x00002000,
+ FW_CAPS_CONFIG_HM_T4DBG = 0x00004000,
+ FW_CAPS_CONFIG_HM_MI = 0x00008000,
+ FW_CAPS_CONFIG_HM_I2CM = 0x00010000,
+ FW_CAPS_CONFIG_HM_NCSI = 0x00020000,
+ FW_CAPS_CONFIG_HM_SMB = 0x00040000,
+ FW_CAPS_CONFIG_HM_MA = 0x00080000,
+ FW_CAPS_CONFIG_HM_EDRAM = 0x00100000,
+ FW_CAPS_CONFIG_HM_PMU = 0x00200000,
+ FW_CAPS_CONFIG_HM_UART = 0x00400000,
+ FW_CAPS_CONFIG_HM_SF = 0x00800000,
+};
+
+enum fw_caps_config_nbm {
+ FW_CAPS_CONFIG_NBM_IPMI = 0x00000001,
+ FW_CAPS_CONFIG_NBM_NCSI = 0x00000002,
+};
+
+enum fw_caps_config_link {
+ FW_CAPS_CONFIG_LINK_PPP = 0x00000001,
+ FW_CAPS_CONFIG_LINK_QFC = 0x00000002,
+ FW_CAPS_CONFIG_LINK_DCBX = 0x00000004,
+};
+
+enum fw_caps_config_switch {
+ FW_CAPS_CONFIG_SWITCH_INGRESS = 0x00000001,
+ FW_CAPS_CONFIG_SWITCH_EGRESS = 0x00000002,
+};
+
+enum fw_caps_config_nic {
+ FW_CAPS_CONFIG_NIC = 0x00000001,
+ FW_CAPS_CONFIG_NIC_VM = 0x00000002,
+};
+
+enum fw_caps_config_ofld {
+ FW_CAPS_CONFIG_OFLD = 0x00000001,
+};
+
+enum fw_caps_config_rdma {
+ FW_CAPS_CONFIG_RDMA_RDDP = 0x00000001,
+ FW_CAPS_CONFIG_RDMA_RDMAC = 0x00000002,
+};
+
+enum fw_caps_config_iscsi {
+ FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU = 0x00000001,
+ FW_CAPS_CONFIG_ISCSI_TARGET_PDU = 0x00000002,
+ FW_CAPS_CONFIG_ISCSI_INITIATOR_CNXOFLD = 0x00000004,
+ FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008,
+};
+
+enum fw_caps_config_fcoe {
+ FW_CAPS_CONFIG_FCOE_INITIATOR = 0x00000001,
+ FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002,
+};
+
+struct fw_caps_config_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be32 r2;
+ __be32 hwmbitmap;
+ __be16 nbmcaps;
+ __be16 linkcaps;
+ __be16 switchcaps;
+ __be16 r3;
+ __be16 niccaps;
+ __be16 ofldcaps;
+ __be16 rdmacaps;
+ __be16 r4;
+ __be16 iscsicaps;
+ __be16 fcoecaps;
+ __be32 r5;
+ __be64 r6;
+};
+
+/*
+ * params command mnemonics
+ */
+enum fw_params_mnem {
+ FW_PARAMS_MNEM_DEV = 1, /* device params */
+ FW_PARAMS_MNEM_PFVF = 2, /* function params */
+ FW_PARAMS_MNEM_REG = 3, /* limited register access */
+ FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */
+ FW_PARAMS_MNEM_LAST
+};
+
+/*
+ * device parameters
+ */
+enum fw_params_param_dev {
+ FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */
+ FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */
+ FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs
+ * allocated by the device's
+ * Lookup Engine
+ */
+ FW_PARAMS_PARAM_DEV_FLOWC_BUFFIFO_SZ = 0x03,
+ FW_PARAMS_PARAM_DEV_INTVER_NIC = 0x04,
+ FW_PARAMS_PARAM_DEV_INTVER_VNIC = 0x05,
+ FW_PARAMS_PARAM_DEV_INTVER_OFLD = 0x06,
+ FW_PARAMS_PARAM_DEV_INTVER_RI = 0x07,
+ FW_PARAMS_PARAM_DEV_INTVER_ISCSIPDU = 0x08,
+ FW_PARAMS_PARAM_DEV_INTVER_ISCSI = 0x09,
+ FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A,
+ FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
+ FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
+};
+
+/*
+ * physical and virtual function parameters
+ */
+enum fw_params_param_pfvf {
+ FW_PARAMS_PARAM_PFVF_RWXCAPS = 0x00,
+ FW_PARAMS_PARAM_PFVF_ROUTE_START = 0x01,
+ FW_PARAMS_PARAM_PFVF_ROUTE_END = 0x02,
+ FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03,
+ FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04,
+ FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05,
+ FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06,
+ FW_PARAMS_PARAM_PFVF_SERVER_START = 0x07,
+ FW_PARAMS_PARAM_PFVF_SERVER_END = 0x08,
+ FW_PARAMS_PARAM_PFVF_TDDP_START = 0x09,
+ FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A,
+ FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B,
+ FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C,
+ FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D,
+ FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E,
+ FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F,
+ FW_PARAMS_PARAM_PFVF_RQ_END = 0x10,
+ FW_PARAMS_PARAM_PFVF_PBL_START = 0x11,
+ FW_PARAMS_PARAM_PFVF_PBL_END = 0x12,
+ FW_PARAMS_PARAM_PFVF_L2T_START = 0x13,
+ FW_PARAMS_PARAM_PFVF_L2T_END = 0x14,
+ FW_PARAMS_PARAM_PFVF_SQRQ_START = 0x15,
+ FW_PARAMS_PARAM_PFVF_SQRQ_END = 0x16,
+ FW_PARAMS_PARAM_PFVF_CQ_START = 0x17,
+ FW_PARAMS_PARAM_PFVF_CQ_END = 0x18,
+ FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20,
+ FW_PARAMS_PARAM_PFVF_VIID = 0x24,
+ FW_PARAMS_PARAM_PFVF_CPMASK = 0x25,
+ FW_PARAMS_PARAM_PFVF_OCQ_START = 0x26,
+ FW_PARAMS_PARAM_PFVF_OCQ_END = 0x27,
+ FW_PARAMS_PARAM_PFVF_CONM_MAP = 0x28,
+ FW_PARAMS_PARAM_PFVF_IQFLINT_START = 0x29,
+ FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A,
+ FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B,
+ FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C,
+};
+
+/*
+ * dma queue parameters
+ */
+enum fw_params_param_dmaq {
+ FW_PARAMS_PARAM_DMAQ_IQ_DCAEN_DCACPU = 0x00,
+ FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01,
+ FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_MNGT = 0x10,
+ FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11,
+ FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12,
+};
+
+#define FW_PARAMS_MNEM(x) ((x) << 24)
+#define FW_PARAMS_PARAM_X(x) ((x) << 16)
+#define FW_PARAMS_PARAM_Y(x) ((x) << 8)
+#define FW_PARAMS_PARAM_Z(x) ((x) << 0)
+#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0)
+#define FW_PARAMS_PARAM_YZ(x) ((x) << 0)
+
+struct fw_params_cmd {
+ __be32 op_to_vfn;
+ __be32 retval_len16;
+ struct fw_params_param {
+ __be32 mnem;
+ __be32 val;
+ } param[7];
+};
+
+#define FW_PARAMS_CMD_PFN(x) ((x) << 8)
+#define FW_PARAMS_CMD_VFN(x) ((x) << 0)
+
+struct fw_pfvf_cmd {
+ __be32 op_to_vfn;
+ __be32 retval_len16;
+ __be32 niqflint_niq;
+ __be32 type_to_neq;
+ __be32 tc_to_nexactf;
+ __be32 r_caps_to_nethctrl;
+ __be16 nricq;
+ __be16 nriqp;
+ __be32 r4;
+};
+
+#define FW_PFVF_CMD_PFN(x) ((x) << 8)
+#define FW_PFVF_CMD_VFN(x) ((x) << 0)
+
+#define FW_PFVF_CMD_NIQFLINT(x) ((x) << 20)
+#define FW_PFVF_CMD_NIQFLINT_GET(x) (((x) >> 20) & 0xfff)
+
+#define FW_PFVF_CMD_NIQ(x) ((x) << 0)
+#define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff)
+
+#define FW_PFVF_CMD_TYPE (1 << 31)
+#define FW_PFVF_CMD_TYPE_GET(x) (((x) >> 31) & 0x1)
+
+#define FW_PFVF_CMD_CMASK(x) ((x) << 24)
+#define FW_PFVF_CMD_CMASK_MASK 0xf
+#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & FW_PFVF_CMD_CMASK_MASK)
+
+#define FW_PFVF_CMD_PMASK(x) ((x) << 20)
+#define FW_PFVF_CMD_PMASK_MASK 0xf
+#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & FW_PFVF_CMD_PMASK_MASK)
+
+#define FW_PFVF_CMD_NEQ(x) ((x) << 0)
+#define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff)
+
+#define FW_PFVF_CMD_TC(x) ((x) << 24)
+#define FW_PFVF_CMD_TC_GET(x) (((x) >> 24) & 0xff)
+
+#define FW_PFVF_CMD_NVI(x) ((x) << 16)
+#define FW_PFVF_CMD_NVI_GET(x) (((x) >> 16) & 0xff)
+
+#define FW_PFVF_CMD_NEXACTF(x) ((x) << 0)
+#define FW_PFVF_CMD_NEXACTF_GET(x) (((x) >> 0) & 0xffff)
+
+#define FW_PFVF_CMD_R_CAPS(x) ((x) << 24)
+#define FW_PFVF_CMD_R_CAPS_GET(x) (((x) >> 24) & 0xff)
+
+#define FW_PFVF_CMD_WX_CAPS(x) ((x) << 16)
+#define FW_PFVF_CMD_WX_CAPS_GET(x) (((x) >> 16) & 0xff)
+
+#define FW_PFVF_CMD_NETHCTRL(x) ((x) << 0)
+#define FW_PFVF_CMD_NETHCTRL_GET(x) (((x) >> 0) & 0xffff)
+
+enum fw_iq_type {
+ FW_IQ_TYPE_FL_INT_CAP,
+ FW_IQ_TYPE_NO_FL_INT_CAP
+};
+
+struct fw_iq_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be16 physiqid;
+ __be16 iqid;
+ __be16 fl0id;
+ __be16 fl1id;
+ __be32 type_to_iqandstindex;
+ __be16 iqdroprss_to_iqesize;
+ __be16 iqsize;
+ __be64 iqaddr;
+ __be32 iqns_to_fl0congen;
+ __be16 fl0dcaen_to_fl0cidxfthresh;
+ __be16 fl0size;
+ __be64 fl0addr;
+ __be32 fl1cngchmap_to_fl1congen;
+ __be16 fl1dcaen_to_fl1cidxfthresh;
+ __be16 fl1size;
+ __be64 fl1addr;
+};
+
+#define FW_IQ_CMD_PFN(x) ((x) << 8)
+#define FW_IQ_CMD_VFN(x) ((x) << 0)
+
+#define FW_IQ_CMD_ALLOC (1U << 31)
+#define FW_IQ_CMD_FREE (1U << 30)
+#define FW_IQ_CMD_MODIFY (1U << 29)
+#define FW_IQ_CMD_IQSTART(x) ((x) << 28)
+#define FW_IQ_CMD_IQSTOP(x) ((x) << 27)
+
+#define FW_IQ_CMD_TYPE(x) ((x) << 29)
+#define FW_IQ_CMD_IQASYNCH(x) ((x) << 28)
+#define FW_IQ_CMD_VIID(x) ((x) << 16)
+#define FW_IQ_CMD_IQANDST(x) ((x) << 15)
+#define FW_IQ_CMD_IQANUS(x) ((x) << 14)
+#define FW_IQ_CMD_IQANUD(x) ((x) << 12)
+#define FW_IQ_CMD_IQANDSTINDEX(x) ((x) << 0)
+
+#define FW_IQ_CMD_IQDROPRSS (1U << 15)
+#define FW_IQ_CMD_IQGTSMODE (1U << 14)
+#define FW_IQ_CMD_IQPCIECH(x) ((x) << 12)
+#define FW_IQ_CMD_IQDCAEN(x) ((x) << 11)
+#define FW_IQ_CMD_IQDCACPU(x) ((x) << 6)
+#define FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << 4)
+#define FW_IQ_CMD_IQO (1U << 3)
+#define FW_IQ_CMD_IQCPRIO(x) ((x) << 2)
+#define FW_IQ_CMD_IQESIZE(x) ((x) << 0)
+
+#define FW_IQ_CMD_IQNS(x) ((x) << 31)
+#define FW_IQ_CMD_IQRO(x) ((x) << 30)
+#define FW_IQ_CMD_IQFLINTIQHSEN(x) ((x) << 28)
+#define FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << 27)
+#define FW_IQ_CMD_IQFLINTISCSIC(x) ((x) << 26)
+#define FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << 20)
+#define FW_IQ_CMD_FL0CACHELOCK(x) ((x) << 15)
+#define FW_IQ_CMD_FL0DBP(x) ((x) << 14)
+#define FW_IQ_CMD_FL0DATANS(x) ((x) << 13)
+#define FW_IQ_CMD_FL0DATARO(x) ((x) << 12)
+#define FW_IQ_CMD_FL0CONGCIF(x) ((x) << 11)
+#define FW_IQ_CMD_FL0ONCHIP(x) ((x) << 10)
+#define FW_IQ_CMD_FL0STATUSPGNS(x) ((x) << 9)
+#define FW_IQ_CMD_FL0STATUSPGRO(x) ((x) << 8)
+#define FW_IQ_CMD_FL0FETCHNS(x) ((x) << 7)
+#define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6)
+#define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4)
+#define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3)
+#define FW_IQ_CMD_FL0PADEN (1U << 2)
+#define FW_IQ_CMD_FL0PACKEN (1U << 1)
+#define FW_IQ_CMD_FL0CONGEN (1U << 0)
+
+#define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15)
+#define FW_IQ_CMD_FL0DCACPU(x) ((x) << 10)
+#define FW_IQ_CMD_FL0FBMIN(x) ((x) << 7)
+#define FW_IQ_CMD_FL0FBMAX(x) ((x) << 4)
+#define FW_IQ_CMD_FL0CIDXFTHRESHO (1U << 3)
+#define FW_IQ_CMD_FL0CIDXFTHRESH(x) ((x) << 0)
+
+#define FW_IQ_CMD_FL1CNGCHMAP(x) ((x) << 20)
+#define FW_IQ_CMD_FL1CACHELOCK(x) ((x) << 15)
+#define FW_IQ_CMD_FL1DBP(x) ((x) << 14)
+#define FW_IQ_CMD_FL1DATANS(x) ((x) << 13)
+#define FW_IQ_CMD_FL1DATARO(x) ((x) << 12)
+#define FW_IQ_CMD_FL1CONGCIF(x) ((x) << 11)
+#define FW_IQ_CMD_FL1ONCHIP(x) ((x) << 10)
+#define FW_IQ_CMD_FL1STATUSPGNS(x) ((x) << 9)
+#define FW_IQ_CMD_FL1STATUSPGRO(x) ((x) << 8)
+#define FW_IQ_CMD_FL1FETCHNS(x) ((x) << 7)
+#define FW_IQ_CMD_FL1FETCHRO(x) ((x) << 6)
+#define FW_IQ_CMD_FL1HOSTFCMODE(x) ((x) << 4)
+#define FW_IQ_CMD_FL1CPRIO(x) ((x) << 3)
+#define FW_IQ_CMD_FL1PADEN (1U << 2)
+#define FW_IQ_CMD_FL1PACKEN (1U << 1)
+#define FW_IQ_CMD_FL1CONGEN (1U << 0)
+
+#define FW_IQ_CMD_FL1DCAEN(x) ((x) << 15)
+#define FW_IQ_CMD_FL1DCACPU(x) ((x) << 10)
+#define FW_IQ_CMD_FL1FBMIN(x) ((x) << 7)
+#define FW_IQ_CMD_FL1FBMAX(x) ((x) << 4)
+#define FW_IQ_CMD_FL1CIDXFTHRESHO (1U << 3)
+#define FW_IQ_CMD_FL1CIDXFTHRESH(x) ((x) << 0)
+
+struct fw_eq_eth_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be32 eqid_pkd;
+ __be32 physeqid_pkd;
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+ __be32 viid_pkd;
+ __be32 r8_lo;
+ __be64 r9;
+};
+
+#define FW_EQ_ETH_CMD_PFN(x) ((x) << 8)
+#define FW_EQ_ETH_CMD_VFN(x) ((x) << 0)
+#define FW_EQ_ETH_CMD_ALLOC (1U << 31)
+#define FW_EQ_ETH_CMD_FREE (1U << 30)
+#define FW_EQ_ETH_CMD_MODIFY (1U << 29)
+#define FW_EQ_ETH_CMD_EQSTART (1U << 28)
+#define FW_EQ_ETH_CMD_EQSTOP (1U << 27)
+
+#define FW_EQ_ETH_CMD_EQID(x) ((x) << 0)
+#define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
+#define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0)
+#define FW_EQ_ETH_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
+
+#define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26)
+#define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25)
+#define FW_EQ_ETH_CMD_STATUSPGRO(x) ((x) << 24)
+#define FW_EQ_ETH_CMD_FETCHNS(x) ((x) << 23)
+#define FW_EQ_ETH_CMD_FETCHRO(x) ((x) << 22)
+#define FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << 20)
+#define FW_EQ_ETH_CMD_CPRIO(x) ((x) << 19)
+#define FW_EQ_ETH_CMD_ONCHIP(x) ((x) << 18)
+#define FW_EQ_ETH_CMD_PCIECHN(x) ((x) << 16)
+#define FW_EQ_ETH_CMD_IQID(x) ((x) << 0)
+
+#define FW_EQ_ETH_CMD_DCAEN(x) ((x) << 31)
+#define FW_EQ_ETH_CMD_DCACPU(x) ((x) << 26)
+#define FW_EQ_ETH_CMD_FBMIN(x) ((x) << 23)
+#define FW_EQ_ETH_CMD_FBMAX(x) ((x) << 20)
+#define FW_EQ_ETH_CMD_CIDXFTHRESHO(x) ((x) << 19)
+#define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16)
+#define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0)
+
+#define FW_EQ_ETH_CMD_VIID(x) ((x) << 16)
+
+struct fw_eq_ctrl_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be32 cmpliqid_eqid;
+ __be32 physeqid_pkd;
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+};
+
+#define FW_EQ_CTRL_CMD_PFN(x) ((x) << 8)
+#define FW_EQ_CTRL_CMD_VFN(x) ((x) << 0)
+
+#define FW_EQ_CTRL_CMD_ALLOC (1U << 31)
+#define FW_EQ_CTRL_CMD_FREE (1U << 30)
+#define FW_EQ_CTRL_CMD_MODIFY (1U << 29)
+#define FW_EQ_CTRL_CMD_EQSTART (1U << 28)
+#define FW_EQ_CTRL_CMD_EQSTOP (1U << 27)
+
+#define FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << 20)
+#define FW_EQ_CTRL_CMD_EQID(x) ((x) << 0)
+#define FW_EQ_CTRL_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
+#define FW_EQ_CTRL_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
+
+#define FW_EQ_CTRL_CMD_FETCHSZM (1U << 26)
+#define FW_EQ_CTRL_CMD_STATUSPGNS (1U << 25)
+#define FW_EQ_CTRL_CMD_STATUSPGRO (1U << 24)
+#define FW_EQ_CTRL_CMD_FETCHNS (1U << 23)
+#define FW_EQ_CTRL_CMD_FETCHRO (1U << 22)
+#define FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << 20)
+#define FW_EQ_CTRL_CMD_CPRIO(x) ((x) << 19)
+#define FW_EQ_CTRL_CMD_ONCHIP(x) ((x) << 18)
+#define FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << 16)
+#define FW_EQ_CTRL_CMD_IQID(x) ((x) << 0)
+
+#define FW_EQ_CTRL_CMD_DCAEN(x) ((x) << 31)
+#define FW_EQ_CTRL_CMD_DCACPU(x) ((x) << 26)
+#define FW_EQ_CTRL_CMD_FBMIN(x) ((x) << 23)
+#define FW_EQ_CTRL_CMD_FBMAX(x) ((x) << 20)
+#define FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) ((x) << 19)
+#define FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << 16)
+#define FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << 0)
+
+struct fw_eq_ofld_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be32 eqid_pkd;
+ __be32 physeqid_pkd;
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+};
+
+#define FW_EQ_OFLD_CMD_PFN(x) ((x) << 8)
+#define FW_EQ_OFLD_CMD_VFN(x) ((x) << 0)
+
+#define FW_EQ_OFLD_CMD_ALLOC (1U << 31)
+#define FW_EQ_OFLD_CMD_FREE (1U << 30)
+#define FW_EQ_OFLD_CMD_MODIFY (1U << 29)
+#define FW_EQ_OFLD_CMD_EQSTART (1U << 28)
+#define FW_EQ_OFLD_CMD_EQSTOP (1U << 27)
+
+#define FW_EQ_OFLD_CMD_EQID(x) ((x) << 0)
+#define FW_EQ_OFLD_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
+#define FW_EQ_OFLD_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
+
+#define FW_EQ_OFLD_CMD_FETCHSZM(x) ((x) << 26)
+#define FW_EQ_OFLD_CMD_STATUSPGNS(x) ((x) << 25)
+#define FW_EQ_OFLD_CMD_STATUSPGRO(x) ((x) << 24)
+#define FW_EQ_OFLD_CMD_FETCHNS(x) ((x) << 23)
+#define FW_EQ_OFLD_CMD_FETCHRO(x) ((x) << 22)
+#define FW_EQ_OFLD_CMD_HOSTFCMODE(x) ((x) << 20)
+#define FW_EQ_OFLD_CMD_CPRIO(x) ((x) << 19)
+#define FW_EQ_OFLD_CMD_ONCHIP(x) ((x) << 18)
+#define FW_EQ_OFLD_CMD_PCIECHN(x) ((x) << 16)
+#define FW_EQ_OFLD_CMD_IQID(x) ((x) << 0)
+
+#define FW_EQ_OFLD_CMD_DCAEN(x) ((x) << 31)
+#define FW_EQ_OFLD_CMD_DCACPU(x) ((x) << 26)
+#define FW_EQ_OFLD_CMD_FBMIN(x) ((x) << 23)
+#define FW_EQ_OFLD_CMD_FBMAX(x) ((x) << 20)
+#define FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) ((x) << 19)
+#define FW_EQ_OFLD_CMD_CIDXFTHRESH(x) ((x) << 16)
+#define FW_EQ_OFLD_CMD_EQSIZE(x) ((x) << 0)
+
+/*
+ * Macros for VIID parsing:
+ * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number
+ */
+#define FW_VIID_PFN_GET(x) (((x) >> 8) & 0x7)
+#define FW_VIID_VIVLD_GET(x) (((x) >> 7) & 0x1)
+#define FW_VIID_VIN_GET(x) (((x) >> 0) & 0x7F)
+
+struct fw_vi_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be16 type_viid;
+ u8 mac[6];
+ u8 portid_pkd;
+ u8 nmac;
+ u8 nmac0[6];
+ __be16 rsssize_pkd;
+ u8 nmac1[6];
+ __be16 idsiiq_pkd;
+ u8 nmac2[6];
+ __be16 idseiq_pkd;
+ u8 nmac3[6];
+ __be64 r9;
+ __be64 r10;
+};
+
+#define FW_VI_CMD_PFN(x) ((x) << 8)
+#define FW_VI_CMD_VFN(x) ((x) << 0)
+#define FW_VI_CMD_ALLOC (1U << 31)
+#define FW_VI_CMD_FREE (1U << 30)
+#define FW_VI_CMD_VIID(x) ((x) << 0)
+#define FW_VI_CMD_VIID_GET(x) ((x) & 0xfff)
+#define FW_VI_CMD_PORTID(x) ((x) << 4)
+#define FW_VI_CMD_PORTID_GET(x) (((x) >> 4) & 0xf)
+#define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff)
+
+/* Special VI_MAC command index ids */
+#define FW_VI_MAC_ADD_MAC 0x3FF
+#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE
+#define FW_VI_MAC_MAC_BASED_FREE 0x3FD
+#define FW_CLS_TCAM_NUM_ENTRIES 336
+
+enum fw_vi_mac_smac {
+ FW_VI_MAC_MPS_TCAM_ENTRY,
+ FW_VI_MAC_MPS_TCAM_ONLY,
+ FW_VI_MAC_SMT_ONLY,
+ FW_VI_MAC_SMT_AND_MPSTCAM
+};
+
+enum fw_vi_mac_result {
+ FW_VI_MAC_R_SUCCESS,
+ FW_VI_MAC_R_F_NONEXISTENT_NOMEM,
+ FW_VI_MAC_R_SMAC_FAIL,
+ FW_VI_MAC_R_F_ACL_CHECK
+};
+
+struct fw_vi_mac_cmd {
+ __be32 op_to_viid;
+ __be32 freemacs_to_len16;
+ union fw_vi_mac {
+ struct fw_vi_mac_exact {
+ __be16 valid_to_idx;
+ u8 macaddr[6];
+ } exact[7];
+ struct fw_vi_mac_hash {
+ __be64 hashvec;
+ } hash;
+ } u;
+};
+
+#define FW_VI_MAC_CMD_VIID(x) ((x) << 0)
+#define FW_VI_MAC_CMD_FREEMACS(x) ((x) << 31)
+#define FW_VI_MAC_CMD_HASHVECEN (1U << 23)
+#define FW_VI_MAC_CMD_HASHUNIEN(x) ((x) << 22)
+#define FW_VI_MAC_CMD_VALID (1U << 15)
+#define FW_VI_MAC_CMD_PRIO(x) ((x) << 12)
+#define FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << 10)
+#define FW_VI_MAC_CMD_SMAC_RESULT_GET(x) (((x) >> 10) & 0x3)
+#define FW_VI_MAC_CMD_IDX(x) ((x) << 0)
+#define FW_VI_MAC_CMD_IDX_GET(x) (((x) >> 0) & 0x3ff)
+
+#define FW_RXMODE_MTU_NO_CHG 65535
+
+struct fw_vi_rxmode_cmd {
+ __be32 op_to_viid;
+ __be32 retval_len16;
+ __be32 mtu_to_vlanexen;
+ __be32 r4_lo;
+};
+
+#define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0)
+#define FW_VI_RXMODE_CMD_MTU_MASK 0xffff
+#define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16)
+#define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3
+#define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14)
+#define FW_VI_RXMODE_CMD_ALLMULTIEN_MASK 0x3
+#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12)
+#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3
+#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10)
+#define FW_VI_RXMODE_CMD_VLANEXEN_MASK 0x3
+#define FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << 8)
+
+struct fw_vi_enable_cmd {
+ __be32 op_to_viid;
+ __be32 ien_to_len16;
+ __be16 blinkdur;
+ __be16 r3;
+ __be32 r4;
+};
+
+#define FW_VI_ENABLE_CMD_VIID(x) ((x) << 0)
+#define FW_VI_ENABLE_CMD_IEN(x) ((x) << 31)
+#define FW_VI_ENABLE_CMD_EEN(x) ((x) << 30)
+#define FW_VI_ENABLE_CMD_LED (1U << 29)
+
+/* VI VF stats offset definitions */
+#define VI_VF_NUM_STATS 16
+enum fw_vi_stats_vf_index {
+ FW_VI_VF_STAT_TX_BCAST_BYTES_IX,
+ FW_VI_VF_STAT_TX_BCAST_FRAMES_IX,
+ FW_VI_VF_STAT_TX_MCAST_BYTES_IX,
+ FW_VI_VF_STAT_TX_MCAST_FRAMES_IX,
+ FW_VI_VF_STAT_TX_UCAST_BYTES_IX,
+ FW_VI_VF_STAT_TX_UCAST_FRAMES_IX,
+ FW_VI_VF_STAT_TX_DROP_FRAMES_IX,
+ FW_VI_VF_STAT_TX_OFLD_BYTES_IX,
+ FW_VI_VF_STAT_TX_OFLD_FRAMES_IX,
+ FW_VI_VF_STAT_RX_BCAST_BYTES_IX,
+ FW_VI_VF_STAT_RX_BCAST_FRAMES_IX,
+ FW_VI_VF_STAT_RX_MCAST_BYTES_IX,
+ FW_VI_VF_STAT_RX_MCAST_FRAMES_IX,
+ FW_VI_VF_STAT_RX_UCAST_BYTES_IX,
+ FW_VI_VF_STAT_RX_UCAST_FRAMES_IX,
+ FW_VI_VF_STAT_RX_ERR_FRAMES_IX
+};
+
+/* VI PF stats offset definitions */
+#define VI_PF_NUM_STATS 17
+enum fw_vi_stats_pf_index {
+ FW_VI_PF_STAT_TX_BCAST_BYTES_IX,
+ FW_VI_PF_STAT_TX_BCAST_FRAMES_IX,
+ FW_VI_PF_STAT_TX_MCAST_BYTES_IX,
+ FW_VI_PF_STAT_TX_MCAST_FRAMES_IX,
+ FW_VI_PF_STAT_TX_UCAST_BYTES_IX,
+ FW_VI_PF_STAT_TX_UCAST_FRAMES_IX,
+ FW_VI_PF_STAT_TX_OFLD_BYTES_IX,
+ FW_VI_PF_STAT_TX_OFLD_FRAMES_IX,
+ FW_VI_PF_STAT_RX_BYTES_IX,
+ FW_VI_PF_STAT_RX_FRAMES_IX,
+ FW_VI_PF_STAT_RX_BCAST_BYTES_IX,
+ FW_VI_PF_STAT_RX_BCAST_FRAMES_IX,
+ FW_VI_PF_STAT_RX_MCAST_BYTES_IX,
+ FW_VI_PF_STAT_RX_MCAST_FRAMES_IX,
+ FW_VI_PF_STAT_RX_UCAST_BYTES_IX,
+ FW_VI_PF_STAT_RX_UCAST_FRAMES_IX,
+ FW_VI_PF_STAT_RX_ERR_FRAMES_IX
+};
+
+struct fw_vi_stats_cmd {
+ __be32 op_to_viid;
+ __be32 retval_len16;
+ union fw_vi_stats {
+ struct fw_vi_stats_ctl {
+ __be16 nstats_ix;
+ __be16 r6;
+ __be32 r7;
+ __be64 stat0;
+ __be64 stat1;
+ __be64 stat2;
+ __be64 stat3;
+ __be64 stat4;
+ __be64 stat5;
+ } ctl;
+ struct fw_vi_stats_pf {
+ __be64 tx_bcast_bytes;
+ __be64 tx_bcast_frames;
+ __be64 tx_mcast_bytes;
+ __be64 tx_mcast_frames;
+ __be64 tx_ucast_bytes;
+ __be64 tx_ucast_frames;
+ __be64 tx_offload_bytes;
+ __be64 tx_offload_frames;
+ __be64 rx_pf_bytes;
+ __be64 rx_pf_frames;
+ __be64 rx_bcast_bytes;
+ __be64 rx_bcast_frames;
+ __be64 rx_mcast_bytes;
+ __be64 rx_mcast_frames;
+ __be64 rx_ucast_bytes;
+ __be64 rx_ucast_frames;
+ __be64 rx_err_frames;
+ } pf;
+ struct fw_vi_stats_vf {
+ __be64 tx_bcast_bytes;
+ __be64 tx_bcast_frames;
+ __be64 tx_mcast_bytes;
+ __be64 tx_mcast_frames;
+ __be64 tx_ucast_bytes;
+ __be64 tx_ucast_frames;
+ __be64 tx_drop_frames;
+ __be64 tx_offload_bytes;
+ __be64 tx_offload_frames;
+ __be64 rx_bcast_bytes;
+ __be64 rx_bcast_frames;
+ __be64 rx_mcast_bytes;
+ __be64 rx_mcast_frames;
+ __be64 rx_ucast_bytes;
+ __be64 rx_ucast_frames;
+ __be64 rx_err_frames;
+ } vf;
+ } u;
+};
+
+#define FW_VI_STATS_CMD_VIID(x) ((x) << 0)
+#define FW_VI_STATS_CMD_NSTATS(x) ((x) << 12)
+#define FW_VI_STATS_CMD_IX(x) ((x) << 0)
+
+struct fw_acl_mac_cmd {
+ __be32 op_to_vfn;
+ __be32 en_to_len16;
+ u8 nmac;
+ u8 r3[7];
+ __be16 r4;
+ u8 macaddr0[6];
+ __be16 r5;
+ u8 macaddr1[6];
+ __be16 r6;
+ u8 macaddr2[6];
+ __be16 r7;
+ u8 macaddr3[6];
+};
+
+#define FW_ACL_MAC_CMD_PFN(x) ((x) << 8)
+#define FW_ACL_MAC_CMD_VFN(x) ((x) << 0)
+#define FW_ACL_MAC_CMD_EN(x) ((x) << 31)
+
+struct fw_acl_vlan_cmd {
+ __be32 op_to_vfn;
+ __be32 en_to_len16;
+ u8 nvlan;
+ u8 dropnovlan_fm;
+ u8 r3_lo[6];
+ __be16 vlanid[16];
+};
+
+#define FW_ACL_VLAN_CMD_PFN(x) ((x) << 8)
+#define FW_ACL_VLAN_CMD_VFN(x) ((x) << 0)
+#define FW_ACL_VLAN_CMD_EN(x) ((x) << 31)
+#define FW_ACL_VLAN_CMD_DROPNOVLAN(x) ((x) << 7)
+#define FW_ACL_VLAN_CMD_FM(x) ((x) << 6)
+
+enum fw_port_cap {
+ FW_PORT_CAP_SPEED_100M = 0x0001,
+ FW_PORT_CAP_SPEED_1G = 0x0002,
+ FW_PORT_CAP_SPEED_2_5G = 0x0004,
+ FW_PORT_CAP_SPEED_10G = 0x0008,
+ FW_PORT_CAP_SPEED_40G = 0x0010,
+ FW_PORT_CAP_SPEED_100G = 0x0020,
+ FW_PORT_CAP_FC_RX = 0x0040,
+ FW_PORT_CAP_FC_TX = 0x0080,
+ FW_PORT_CAP_ANEG = 0x0100,
+ FW_PORT_CAP_MDI_0 = 0x0200,
+ FW_PORT_CAP_MDI_1 = 0x0400,
+ FW_PORT_CAP_BEAN = 0x0800,
+ FW_PORT_CAP_PMA_LPBK = 0x1000,
+ FW_PORT_CAP_PCS_LPBK = 0x2000,
+ FW_PORT_CAP_PHYXS_LPBK = 0x4000,
+ FW_PORT_CAP_FAR_END_LPBK = 0x8000,
+};
+
+enum fw_port_mdi {
+ FW_PORT_MDI_UNCHANGED,
+ FW_PORT_MDI_AUTO,
+ FW_PORT_MDI_F_STRAIGHT,
+ FW_PORT_MDI_F_CROSSOVER
+};
+
+#define FW_PORT_MDI(x) ((x) << 9)
+
+enum fw_port_action {
+ FW_PORT_ACTION_L1_CFG = 0x0001,
+ FW_PORT_ACTION_L2_CFG = 0x0002,
+ FW_PORT_ACTION_GET_PORT_INFO = 0x0003,
+ FW_PORT_ACTION_L2_PPP_CFG = 0x0004,
+ FW_PORT_ACTION_L2_DCB_CFG = 0x0005,
+ FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010,
+ FW_PORT_ACTION_L1_LOW_PWR_EN = 0x0011,
+ FW_PORT_ACTION_L2_WOL_MODE_EN = 0x0012,
+ FW_PORT_ACTION_LPBK_TO_NORMAL = 0x0020,
+ FW_PORT_ACTION_L1_LPBK = 0x0021,
+ FW_PORT_ACTION_L1_PMA_LPBK = 0x0022,
+ FW_PORT_ACTION_L1_PCS_LPBK = 0x0023,
+ FW_PORT_ACTION_L1_PHYXS_CSIDE_LPBK = 0x0024,
+ FW_PORT_ACTION_L1_PHYXS_ESIDE_LPBK = 0x0025,
+ FW_PORT_ACTION_PHY_RESET = 0x0040,
+ FW_PORT_ACTION_PMA_RESET = 0x0041,
+ FW_PORT_ACTION_PCS_RESET = 0x0042,
+ FW_PORT_ACTION_PHYXS_RESET = 0x0043,
+ FW_PORT_ACTION_DTEXS_REEST = 0x0044,
+ FW_PORT_ACTION_AN_RESET = 0x0045
+};
+
+enum fw_port_l2cfg_ctlbf {
+ FW_PORT_L2_CTLBF_OVLAN0 = 0x01,
+ FW_PORT_L2_CTLBF_OVLAN1 = 0x02,
+ FW_PORT_L2_CTLBF_OVLAN2 = 0x04,
+ FW_PORT_L2_CTLBF_OVLAN3 = 0x08,
+ FW_PORT_L2_CTLBF_IVLAN = 0x10,
+ FW_PORT_L2_CTLBF_TXIPG = 0x20
+};
+
+enum fw_port_dcb_cfg {
+ FW_PORT_DCB_CFG_PG = 0x01,
+ FW_PORT_DCB_CFG_PFC = 0x02,
+ FW_PORT_DCB_CFG_APPL = 0x04
+};
+
+enum fw_port_dcb_cfg_rc {
+ FW_PORT_DCB_CFG_SUCCESS = 0x0,
+ FW_PORT_DCB_CFG_ERROR = 0x1
+};
+
+struct fw_port_cmd {
+ __be32 op_to_portid;
+ __be32 action_to_len16;
+ union fw_port {
+ struct fw_port_l1cfg {
+ __be32 rcap;
+ __be32 r;
+ } l1cfg;
+ struct fw_port_l2cfg {
+ __be16 ctlbf_to_ivlan0;
+ __be16 ivlantype;
+ __be32 txipg_pkd;
+ __be16 ovlan0mask;
+ __be16 ovlan0type;
+ __be16 ovlan1mask;
+ __be16 ovlan1type;
+ __be16 ovlan2mask;
+ __be16 ovlan2type;
+ __be16 ovlan3mask;
+ __be16 ovlan3type;
+ } l2cfg;
+ struct fw_port_info {
+ __be32 lstatus_to_modtype;
+ __be16 pcap;
+ __be16 acap;
+ __be16 mtu;
+ __u8 cbllen;
+ __u8 r9;
+ __be32 r10;
+ __be64 r11;
+ } info;
+ struct fw_port_ppp {
+ __be32 pppen_to_ncsich;
+ __be32 r11;
+ } ppp;
+ struct fw_port_dcb {
+ __be16 cfg;
+ u8 up_map;
+ u8 sf_cfgrc;
+ __be16 prot_ix;
+ u8 pe7_to_pe0;
+ u8 numTCPFCs;
+ __be32 pgid0_to_pgid7;
+ __be32 numTCs_oui;
+ u8 pgpc[8];
+ } dcb;
+ } u;
+};
+
+#define FW_PORT_CMD_READ (1U << 22)
+
+#define FW_PORT_CMD_PORTID(x) ((x) << 0)
+#define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
+
+#define FW_PORT_CMD_ACTION(x) ((x) << 16)
+#define FW_PORT_CMD_ACTION_GET(x) (((x) >> 16) & 0xffff)
+
+#define FW_PORT_CMD_CTLBF(x) ((x) << 10)
+#define FW_PORT_CMD_OVLAN3(x) ((x) << 7)
+#define FW_PORT_CMD_OVLAN2(x) ((x) << 6)
+#define FW_PORT_CMD_OVLAN1(x) ((x) << 5)
+#define FW_PORT_CMD_OVLAN0(x) ((x) << 4)
+#define FW_PORT_CMD_IVLAN0(x) ((x) << 3)
+
+#define FW_PORT_CMD_TXIPG(x) ((x) << 19)
+
+#define FW_PORT_CMD_LSTATUS (1U << 31)
+#define FW_PORT_CMD_LSPEED(x) ((x) << 24)
+#define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f)
+#define FW_PORT_CMD_TXPAUSE (1U << 23)
+#define FW_PORT_CMD_RXPAUSE (1U << 22)
+#define FW_PORT_CMD_MDIOCAP (1U << 21)
+#define FW_PORT_CMD_MDIOADDR_GET(x) (((x) >> 16) & 0x1f)
+#define FW_PORT_CMD_LPTXPAUSE (1U << 15)
+#define FW_PORT_CMD_LPRXPAUSE (1U << 14)
+#define FW_PORT_CMD_PTYPE_MASK 0x1f
+#define FW_PORT_CMD_PTYPE_GET(x) (((x) >> 8) & FW_PORT_CMD_PTYPE_MASK)
+#define FW_PORT_CMD_MODTYPE_MASK 0x1f
+#define FW_PORT_CMD_MODTYPE_GET(x) (((x) >> 0) & FW_PORT_CMD_MODTYPE_MASK)
+
+#define FW_PORT_CMD_PPPEN(x) ((x) << 31)
+#define FW_PORT_CMD_TPSRC(x) ((x) << 28)
+#define FW_PORT_CMD_NCSISRC(x) ((x) << 24)
+
+#define FW_PORT_CMD_CH0(x) ((x) << 20)
+#define FW_PORT_CMD_CH1(x) ((x) << 16)
+#define FW_PORT_CMD_CH2(x) ((x) << 12)
+#define FW_PORT_CMD_CH3(x) ((x) << 8)
+#define FW_PORT_CMD_NCSICH(x) ((x) << 4)
+
+enum fw_port_type {
+ FW_PORT_TYPE_FIBER_XFI,
+ FW_PORT_TYPE_FIBER_XAUI,
+ FW_PORT_TYPE_BT_SGMII,
+ FW_PORT_TYPE_BT_XFI,
+ FW_PORT_TYPE_BT_XAUI,
+ FW_PORT_TYPE_KX4,
+ FW_PORT_TYPE_CX4,
+ FW_PORT_TYPE_KX,
+ FW_PORT_TYPE_KR,
+ FW_PORT_TYPE_SFP,
+ FW_PORT_TYPE_BP_AP,
+ FW_PORT_TYPE_BP4_AP,
+
+ FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
+};
+
+enum fw_port_module_type {
+ FW_PORT_MOD_TYPE_NA,
+ FW_PORT_MOD_TYPE_LR,
+ FW_PORT_MOD_TYPE_SR,
+ FW_PORT_MOD_TYPE_ER,
+ FW_PORT_MOD_TYPE_TWINAX_PASSIVE,
+ FW_PORT_MOD_TYPE_TWINAX_ACTIVE,
+ FW_PORT_MOD_TYPE_LRM,
+
+ FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK
+};
+
+/* port stats */
+#define FW_NUM_PORT_STATS 50
+#define FW_NUM_PORT_TX_STATS 23
+#define FW_NUM_PORT_RX_STATS 27
+
+enum fw_port_stats_tx_index {
+ FW_STAT_TX_PORT_BYTES_IX,
+ FW_STAT_TX_PORT_FRAMES_IX,
+ FW_STAT_TX_PORT_BCAST_IX,
+ FW_STAT_TX_PORT_MCAST_IX,
+ FW_STAT_TX_PORT_UCAST_IX,
+ FW_STAT_TX_PORT_ERROR_IX,
+ FW_STAT_TX_PORT_64B_IX,
+ FW_STAT_TX_PORT_65B_127B_IX,
+ FW_STAT_TX_PORT_128B_255B_IX,
+ FW_STAT_TX_PORT_256B_511B_IX,
+ FW_STAT_TX_PORT_512B_1023B_IX,
+ FW_STAT_TX_PORT_1024B_1518B_IX,
+ FW_STAT_TX_PORT_1519B_MAX_IX,
+ FW_STAT_TX_PORT_DROP_IX,
+ FW_STAT_TX_PORT_PAUSE_IX,
+ FW_STAT_TX_PORT_PPP0_IX,
+ FW_STAT_TX_PORT_PPP1_IX,
+ FW_STAT_TX_PORT_PPP2_IX,
+ FW_STAT_TX_PORT_PPP3_IX,
+ FW_STAT_TX_PORT_PPP4_IX,
+ FW_STAT_TX_PORT_PPP5_IX,
+ FW_STAT_TX_PORT_PPP6_IX,
+ FW_STAT_TX_PORT_PPP7_IX
+};
+
+enum fw_port_stat_rx_index {
+ FW_STAT_RX_PORT_BYTES_IX,
+ FW_STAT_RX_PORT_FRAMES_IX,
+ FW_STAT_RX_PORT_BCAST_IX,
+ FW_STAT_RX_PORT_MCAST_IX,
+ FW_STAT_RX_PORT_UCAST_IX,
+ FW_STAT_RX_PORT_MTU_ERROR_IX,
+ FW_STAT_RX_PORT_MTU_CRC_ERROR_IX,
+ FW_STAT_RX_PORT_CRC_ERROR_IX,
+ FW_STAT_RX_PORT_LEN_ERROR_IX,
+ FW_STAT_RX_PORT_SYM_ERROR_IX,
+ FW_STAT_RX_PORT_64B_IX,
+ FW_STAT_RX_PORT_65B_127B_IX,
+ FW_STAT_RX_PORT_128B_255B_IX,
+ FW_STAT_RX_PORT_256B_511B_IX,
+ FW_STAT_RX_PORT_512B_1023B_IX,
+ FW_STAT_RX_PORT_1024B_1518B_IX,
+ FW_STAT_RX_PORT_1519B_MAX_IX,
+ FW_STAT_RX_PORT_PAUSE_IX,
+ FW_STAT_RX_PORT_PPP0_IX,
+ FW_STAT_RX_PORT_PPP1_IX,
+ FW_STAT_RX_PORT_PPP2_IX,
+ FW_STAT_RX_PORT_PPP3_IX,
+ FW_STAT_RX_PORT_PPP4_IX,
+ FW_STAT_RX_PORT_PPP5_IX,
+ FW_STAT_RX_PORT_PPP6_IX,
+ FW_STAT_RX_PORT_PPP7_IX,
+ FW_STAT_RX_PORT_LESS_64B_IX
+};
+
+struct fw_port_stats_cmd {
+ __be32 op_to_portid;
+ __be32 retval_len16;
+ union fw_port_stats {
+ struct fw_port_stats_ctl {
+ u8 nstats_bg_bm;
+ u8 tx_ix;
+ __be16 r6;
+ __be32 r7;
+ __be64 stat0;
+ __be64 stat1;
+ __be64 stat2;
+ __be64 stat3;
+ __be64 stat4;
+ __be64 stat5;
+ } ctl;
+ struct fw_port_stats_all {
+ __be64 tx_bytes;
+ __be64 tx_frames;
+ __be64 tx_bcast;
+ __be64 tx_mcast;
+ __be64 tx_ucast;
+ __be64 tx_error;
+ __be64 tx_64b;
+ __be64 tx_65b_127b;
+ __be64 tx_128b_255b;
+ __be64 tx_256b_511b;
+ __be64 tx_512b_1023b;
+ __be64 tx_1024b_1518b;
+ __be64 tx_1519b_max;
+ __be64 tx_drop;
+ __be64 tx_pause;
+ __be64 tx_ppp0;
+ __be64 tx_ppp1;
+ __be64 tx_ppp2;
+ __be64 tx_ppp3;
+ __be64 tx_ppp4;
+ __be64 tx_ppp5;
+ __be64 tx_ppp6;
+ __be64 tx_ppp7;
+ __be64 rx_bytes;
+ __be64 rx_frames;
+ __be64 rx_bcast;
+ __be64 rx_mcast;
+ __be64 rx_ucast;
+ __be64 rx_mtu_error;
+ __be64 rx_mtu_crc_error;
+ __be64 rx_crc_error;
+ __be64 rx_len_error;
+ __be64 rx_sym_error;
+ __be64 rx_64b;
+ __be64 rx_65b_127b;
+ __be64 rx_128b_255b;
+ __be64 rx_256b_511b;
+ __be64 rx_512b_1023b;
+ __be64 rx_1024b_1518b;
+ __be64 rx_1519b_max;
+ __be64 rx_pause;
+ __be64 rx_ppp0;
+ __be64 rx_ppp1;
+ __be64 rx_ppp2;
+ __be64 rx_ppp3;
+ __be64 rx_ppp4;
+ __be64 rx_ppp5;
+ __be64 rx_ppp6;
+ __be64 rx_ppp7;
+ __be64 rx_less_64b;
+ __be64 rx_bg_drop;
+ __be64 rx_bg_trunc;
+ } all;
+ } u;
+};
+
+#define FW_PORT_STATS_CMD_NSTATS(x) ((x) << 4)
+#define FW_PORT_STATS_CMD_BG_BM(x) ((x) << 0)
+#define FW_PORT_STATS_CMD_TX(x) ((x) << 7)
+#define FW_PORT_STATS_CMD_IX(x) ((x) << 0)
+
+/* port loopback stats */
+#define FW_NUM_LB_STATS 16
+enum fw_port_lb_stats_index {
+ FW_STAT_LB_PORT_BYTES_IX,
+ FW_STAT_LB_PORT_FRAMES_IX,
+ FW_STAT_LB_PORT_BCAST_IX,
+ FW_STAT_LB_PORT_MCAST_IX,
+ FW_STAT_LB_PORT_UCAST_IX,
+ FW_STAT_LB_PORT_ERROR_IX,
+ FW_STAT_LB_PORT_64B_IX,
+ FW_STAT_LB_PORT_65B_127B_IX,
+ FW_STAT_LB_PORT_128B_255B_IX,
+ FW_STAT_LB_PORT_256B_511B_IX,
+ FW_STAT_LB_PORT_512B_1023B_IX,
+ FW_STAT_LB_PORT_1024B_1518B_IX,
+ FW_STAT_LB_PORT_1519B_MAX_IX,
+ FW_STAT_LB_PORT_DROP_FRAMES_IX
+};
+
+struct fw_port_lb_stats_cmd {
+ __be32 op_to_lbport;
+ __be32 retval_len16;
+ union fw_port_lb_stats {
+ struct fw_port_lb_stats_ctl {
+ u8 nstats_bg_bm;
+ u8 ix_pkd;
+ __be16 r6;
+ __be32 r7;
+ __be64 stat0;
+ __be64 stat1;
+ __be64 stat2;
+ __be64 stat3;
+ __be64 stat4;
+ __be64 stat5;
+ } ctl;
+ struct fw_port_lb_stats_all {
+ __be64 tx_bytes;
+ __be64 tx_frames;
+ __be64 tx_bcast;
+ __be64 tx_mcast;
+ __be64 tx_ucast;
+ __be64 tx_error;
+ __be64 tx_64b;
+ __be64 tx_65b_127b;
+ __be64 tx_128b_255b;
+ __be64 tx_256b_511b;
+ __be64 tx_512b_1023b;
+ __be64 tx_1024b_1518b;
+ __be64 tx_1519b_max;
+ __be64 rx_lb_drop;
+ __be64 rx_lb_trunc;
+ } all;
+ } u;
+};
+
+#define FW_PORT_LB_STATS_CMD_LBPORT(x) ((x) << 0)
+#define FW_PORT_LB_STATS_CMD_NSTATS(x) ((x) << 4)
+#define FW_PORT_LB_STATS_CMD_BG_BM(x) ((x) << 0)
+#define FW_PORT_LB_STATS_CMD_IX(x) ((x) << 0)
+
+struct fw_rss_ind_tbl_cmd {
+ __be32 op_to_viid;
+#define FW_RSS_IND_TBL_CMD_VIID(x) ((x) << 0)
+ __be32 retval_len16;
+ __be16 niqid;
+ __be16 startidx;
+ __be32 r3;
+ __be32 iq0_to_iq2;
+#define FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << 20)
+#define FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << 10)
+#define FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << 0)
+ __be32 iq3_to_iq5;
+ __be32 iq6_to_iq8;
+ __be32 iq9_to_iq11;
+ __be32 iq12_to_iq14;
+ __be32 iq15_to_iq17;
+ __be32 iq18_to_iq20;
+ __be32 iq21_to_iq23;
+ __be32 iq24_to_iq26;
+ __be32 iq27_to_iq29;
+ __be32 iq30_iq31;
+ __be32 r15_lo;
+};
+
+struct fw_rss_glb_config_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ union fw_rss_glb_config {
+ struct fw_rss_glb_config_manual {
+ __be32 mode_pkd;
+ __be32 r3;
+ __be64 r4;
+ __be64 r5;
+ } manual;
+ struct fw_rss_glb_config_basicvirtual {
+ __be32 mode_pkd;
+ __be32 synmapen_to_hashtoeplitz;
+#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN (1U << 8)
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 (1U << 7)
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 (1U << 6)
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 (1U << 5)
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 (1U << 4)
+#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN (1U << 3)
+#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN (1U << 2)
+#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP (1U << 1)
+#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ (1U << 0)
+ __be64 r8;
+ __be64 r9;
+ } basicvirtual;
+ } u;
+};
+
+#define FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << 28)
+#define FW_RSS_GLB_CONFIG_CMD_MODE_GET(x) (((x) >> 28) & 0xf)
+
+#define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0
+#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1
+
+struct fw_rss_vi_config_cmd {
+ __be32 op_to_viid;
+#define FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << 0)
+ __be32 retval_len16;
+ union fw_rss_vi_config {
+ struct fw_rss_vi_config_manual {
+ __be64 r3;
+ __be64 r4;
+ __be64 r5;
+ } manual;
+ struct fw_rss_vi_config_basicvirtual {
+ __be32 r6;
+ __be32 defaultq_to_udpen;
+#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) ((x) << 16)
+#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(x) (((x) >> 16) & 0x3ff)
+#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4)
+#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN (1U << 3)
+#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2)
+#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN (1U << 1)
+#define FW_RSS_VI_CONFIG_CMD_UDPEN (1U << 0)
+ __be64 r9;
+ __be64 r10;
+ } basicvirtual;
+ } u;
+};
+
+enum fw_error_type {
+ FW_ERROR_TYPE_EXCEPTION = 0x0,
+ FW_ERROR_TYPE_HWMODULE = 0x1,
+ FW_ERROR_TYPE_WR = 0x2,
+ FW_ERROR_TYPE_ACL = 0x3,
+};
+
+struct fw_error_cmd {
+ __be32 op_to_type;
+ __be32 len16_pkd;
+ union fw_error {
+ struct fw_error_exception {
+ __be32 info[6];
+ } exception;
+ struct fw_error_hwmodule {
+ __be32 regaddr;
+ __be32 regval;
+ } hwmodule;
+ struct fw_error_wr {
+ __be16 cidx;
+ __be16 pfn_vfn;
+ __be32 eqid;
+ u8 wrhdr[16];
+ } wr;
+ struct fw_error_acl {
+ __be16 cidx;
+ __be16 pfn_vfn;
+ __be32 eqid;
+ __be16 mv_pkd;
+ u8 val[6];
+ __be64 r4;
+ } acl;
+ } u;
+};
+
+struct fw_debug_cmd {
+ __be32 op_type;
+#define FW_DEBUG_CMD_TYPE_GET(x) ((x) & 0xff)
+ __be32 len16_pkd;
+ union fw_debug {
+ struct fw_debug_assert {
+ __be32 fcid;
+ __be32 line;
+ __be32 x;
+ __be32 y;
+ u8 filename_0_7[8];
+ u8 filename_8_15[8];
+ __be64 r3;
+ } assert;
+ struct fw_debug_prt {
+ __be16 dprtstridx;
+ __be16 r3[3];
+ __be32 dprtstrparam0;
+ __be32 dprtstrparam1;
+ __be32 dprtstrparam2;
+ __be32 dprtstrparam3;
+ } prt;
+ } u;
+};
+
+struct fw_hdr {
+ u8 ver;
+ u8 reserved1;
+ __be16 len512; /* bin length in units of 512-bytes */
+ __be32 fw_ver; /* firmware version */
+ __be32 tp_microcode_ver;
+ u8 intfver_nic;
+ u8 intfver_vnic;
+ u8 intfver_ofld;
+ u8 intfver_ri;
+ u8 intfver_iscsipdu;
+ u8 intfver_iscsi;
+ u8 intfver_fcoe;
+ u8 reserved2;
+ __be32 reserved3[27];
+};
+
+#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff)
+#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff)
+#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff)
+#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff)
+#endif /* _T4FW_INTERFACE_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/Makefile b/drivers/net/ethernet/chelsio/cxgb4vf/Makefile
new file mode 100644
index 000000000000..d72ee26cb4c7
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/Makefile
@@ -0,0 +1,7 @@
+#
+# Chelsio T4 SR-IOV Virtual Function Driver
+#
+
+obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf.o
+
+cxgb4vf-objs := cxgb4vf_main.o t4vf_hw.o sge.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
new file mode 100644
index 000000000000..594334d5c711
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -0,0 +1,534 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file should not be included directly. Include t4vf_common.h instead.
+ */
+
+#ifndef __CXGB4VF_ADAPTER_H__
+#define __CXGB4VF_ADAPTER_H__
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+
+#include "../cxgb4/t4_hw.h"
+
+/*
+ * Constants of the implementation.
+ */
+enum {
+ MAX_NPORTS = 1, /* max # of "ports" */
+ MAX_PORT_QSETS = 8, /* max # of Queue Sets / "port" */
+ MAX_ETH_QSETS = MAX_NPORTS*MAX_PORT_QSETS,
+
+ /*
+ * MSI-X interrupt index usage.
+ */
+ MSIX_FW = 0, /* MSI-X index for firmware Q */
+ MSIX_IQFLINT = 1, /* MSI-X index base for Ingress Qs */
+ MSIX_EXTRAS = 1,
+ MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS,
+
+ /*
+ * The maximum number of Ingress and Egress Queues is determined by
+ * the maximum number of "Queue Sets" which we support plus any
+ * ancillary queues. Each "Queue Set" requires one Ingress Queue
+ * for RX Packet Ingress Event notifications and two Egress Queues for
+ * a Free List and an Ethernet TX list.
+ */
+ INGQ_EXTRAS = 2, /* firmware event queue and */
+ /* forwarded interrupts */
+ MAX_INGQ = MAX_ETH_QSETS+INGQ_EXTRAS,
+ MAX_EGRQ = MAX_ETH_QSETS*2,
+};
+
+/*
+ * Forward structure definition references.
+ */
+struct adapter;
+struct sge_eth_rxq;
+struct sge_rspq;
+
+/*
+ * Per-"port" information. This is really per-Virtual Interface information
+ * but the use of the "port" nomanclature makes it easier to go back and forth
+ * between the PF and VF drivers ...
+ */
+struct port_info {
+ struct adapter *adapter; /* our adapter */
+ u16 viid; /* virtual interface ID */
+ s16 xact_addr_filt; /* index of our MAC address filter */
+ u16 rss_size; /* size of VI's RSS table slice */
+ u8 pidx; /* index into adapter port[] */
+ u8 port_id; /* physical port ID */
+ u8 nqsets; /* # of "Queue Sets" */
+ u8 first_qset; /* index of first "Queue Set" */
+ struct link_config link_cfg; /* physical port configuration */
+};
+
+/*
+ * Scatter Gather Engine resources for the "adapter". Our ingress and egress
+ * queues are organized into "Queue Sets" with one ingress and one egress
+ * queue per Queue Set. These Queue Sets are aportionable between the "ports"
+ * (Virtual Interfaces). One extra ingress queue is used to receive
+ * asynchronous messages from the firmware. Note that the "Queue IDs" that we
+ * use here are really "Relative Queue IDs" which are returned as part of the
+ * firmware command to allocate queues. These queue IDs are relative to the
+ * absolute Queue ID base of the section of the Queue ID space allocated to
+ * the PF/VF.
+ */
+
+/*
+ * SGE free-list queue state.
+ */
+struct rx_sw_desc;
+struct sge_fl {
+ unsigned int avail; /* # of available RX buffers */
+ unsigned int pend_cred; /* new buffers since last FL DB ring */
+ unsigned int cidx; /* consumer index */
+ unsigned int pidx; /* producer index */
+ unsigned long alloc_failed; /* # of buffer allocation failures */
+ unsigned long large_alloc_failed;
+ unsigned long starving; /* # of times FL was found starving */
+
+ /*
+ * Write-once/infrequently fields.
+ * -------------------------------
+ */
+
+ unsigned int cntxt_id; /* SGE relative QID for the free list */
+ unsigned int abs_id; /* SGE absolute QID for the free list */
+ unsigned int size; /* capacity of free list */
+ struct rx_sw_desc *sdesc; /* address of SW RX descriptor ring */
+ __be64 *desc; /* address of HW RX descriptor ring */
+ dma_addr_t addr; /* PCI bus address of hardware ring */
+};
+
+/*
+ * An ingress packet gather list.
+ */
+struct pkt_gl {
+ skb_frag_t frags[MAX_SKB_FRAGS];
+ void *va; /* virtual address of first byte */
+ unsigned int nfrags; /* # of fragments */
+ unsigned int tot_len; /* total length of fragments */
+};
+
+typedef int (*rspq_handler_t)(struct sge_rspq *, const __be64 *,
+ const struct pkt_gl *);
+
+/*
+ * State for an SGE Response Queue.
+ */
+struct sge_rspq {
+ struct napi_struct napi; /* NAPI scheduling control */
+ const __be64 *cur_desc; /* current descriptor in queue */
+ unsigned int cidx; /* consumer index */
+ u8 gen; /* current generation bit */
+ u8 next_intr_params; /* holdoff params for next interrupt */
+ int offset; /* offset into current FL buffer */
+
+ unsigned int unhandled_irqs; /* bogus interrupts */
+
+ /*
+ * Write-once/infrequently fields.
+ * -------------------------------
+ */
+
+ u8 intr_params; /* interrupt holdoff parameters */
+ u8 pktcnt_idx; /* interrupt packet threshold */
+ u8 idx; /* queue index within its group */
+ u16 cntxt_id; /* SGE rel QID for the response Q */
+ u16 abs_id; /* SGE abs QID for the response Q */
+ __be64 *desc; /* address of hardware response ring */
+ dma_addr_t phys_addr; /* PCI bus address of ring */
+ unsigned int iqe_len; /* entry size */
+ unsigned int size; /* capcity of response Q */
+ struct adapter *adapter; /* our adapter */
+ struct net_device *netdev; /* associated net device */
+ rspq_handler_t handler; /* the handler for this response Q */
+};
+
+/*
+ * Ethernet queue statistics
+ */
+struct sge_eth_stats {
+ unsigned long pkts; /* # of ethernet packets */
+ unsigned long lro_pkts; /* # of LRO super packets */
+ unsigned long lro_merged; /* # of wire packets merged by LRO */
+ unsigned long rx_cso; /* # of Rx checksum offloads */
+ unsigned long vlan_ex; /* # of Rx VLAN extractions */
+ unsigned long rx_drops; /* # of packets dropped due to no mem */
+};
+
+/*
+ * State for an Ethernet Receive Queue.
+ */
+struct sge_eth_rxq {
+ struct sge_rspq rspq; /* Response Queue */
+ struct sge_fl fl; /* Free List */
+ struct sge_eth_stats stats; /* receive statistics */
+};
+
+/*
+ * SGE Transmit Queue state. This contains all of the resources associated
+ * with the hardware status of a TX Queue which is a circular ring of hardware
+ * TX Descriptors. For convenience, it also contains a pointer to a parallel
+ * "Software Descriptor" array but we don't know anything about it here other
+ * than its type name.
+ */
+struct tx_desc {
+ /*
+ * Egress Queues are measured in units of SGE_EQ_IDXSIZE by the
+ * hardware: Sizes, Producer and Consumer indices, etc.
+ */
+ __be64 flit[SGE_EQ_IDXSIZE/sizeof(__be64)];
+};
+struct tx_sw_desc;
+struct sge_txq {
+ unsigned int in_use; /* # of in-use TX descriptors */
+ unsigned int size; /* # of descriptors */
+ unsigned int cidx; /* SW consumer index */
+ unsigned int pidx; /* producer index */
+ unsigned long stops; /* # of times queue has been stopped */
+ unsigned long restarts; /* # of queue restarts */
+
+ /*
+ * Write-once/infrequently fields.
+ * -------------------------------
+ */
+
+ unsigned int cntxt_id; /* SGE relative QID for the TX Q */
+ unsigned int abs_id; /* SGE absolute QID for the TX Q */
+ struct tx_desc *desc; /* address of HW TX descriptor ring */
+ struct tx_sw_desc *sdesc; /* address of SW TX descriptor ring */
+ struct sge_qstat *stat; /* queue status entry */
+ dma_addr_t phys_addr; /* PCI bus address of hardware ring */
+};
+
+/*
+ * State for an Ethernet Transmit Queue.
+ */
+struct sge_eth_txq {
+ struct sge_txq q; /* SGE TX Queue */
+ struct netdev_queue *txq; /* associated netdev TX queue */
+ unsigned long tso; /* # of TSO requests */
+ unsigned long tx_cso; /* # of TX checksum offloads */
+ unsigned long vlan_ins; /* # of TX VLAN insertions */
+ unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
+};
+
+/*
+ * The complete set of Scatter/Gather Engine resources.
+ */
+struct sge {
+ /*
+ * Our "Queue Sets" ...
+ */
+ struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
+ struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
+
+ /*
+ * Extra ingress queues for asynchronous firmware events and
+ * forwarded interrupts (when in MSI mode).
+ */
+ struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
+
+ struct sge_rspq intrq ____cacheline_aligned_in_smp;
+ spinlock_t intrq_lock;
+
+ /*
+ * State for managing "starving Free Lists" -- Free Lists which have
+ * fallen below a certain threshold of buffers available to the
+ * hardware and attempts to refill them up to that threshold have
+ * failed. We have a regular "slow tick" timer process which will
+ * make periodic attempts to refill these starving Free Lists ...
+ */
+ DECLARE_BITMAP(starving_fl, MAX_EGRQ);
+ struct timer_list rx_timer;
+
+ /*
+ * State for cleaning up completed TX descriptors.
+ */
+ struct timer_list tx_timer;
+
+ /*
+ * Write-once/infrequently fields.
+ * -------------------------------
+ */
+
+ u16 max_ethqsets; /* # of available Ethernet queue sets */
+ u16 ethqsets; /* # of active Ethernet queue sets */
+ u16 ethtxq_rover; /* Tx queue to clean up next */
+ u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */
+ u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */
+
+ /*
+ * Reverse maps from Absolute Queue IDs to associated queue pointers.
+ * The absolute Queue IDs are in a compact range which start at a
+ * [potentially large] Base Queue ID. We perform the reverse map by
+ * first converting the Absolute Queue ID into a Relative Queue ID by
+ * subtracting off the Base Queue ID and then use a Relative Queue ID
+ * indexed table to get the pointer to the corresponding software
+ * queue structure.
+ */
+ unsigned int egr_base;
+ unsigned int ingr_base;
+ void *egr_map[MAX_EGRQ];
+ struct sge_rspq *ingr_map[MAX_INGQ];
+};
+
+/*
+ * Utility macros to convert Absolute- to Relative-Queue indices and Egress-
+ * and Ingress-Queues. The EQ_MAP() and IQ_MAP() macros which provide
+ * pointers to Ingress- and Egress-Queues can be used as both L- and R-values
+ */
+#define EQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->egr_base))
+#define IQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->ingr_base))
+
+#define EQ_MAP(s, abs_id) ((s)->egr_map[EQ_IDX(s, abs_id)])
+#define IQ_MAP(s, abs_id) ((s)->ingr_map[IQ_IDX(s, abs_id)])
+
+/*
+ * Macro to iterate across Queue Sets ("rxq" is a historic misnomer).
+ */
+#define for_each_ethrxq(sge, iter) \
+ for (iter = 0; iter < (sge)->ethqsets; iter++)
+
+/*
+ * Per-"adapter" (Virtual Function) information.
+ */
+struct adapter {
+ /* PCI resources */
+ void __iomem *regs;
+ struct pci_dev *pdev;
+ struct device *pdev_dev;
+
+ /* "adapter" resources */
+ unsigned long registered_device_map;
+ unsigned long open_device_map;
+ unsigned long flags;
+ struct adapter_params params;
+
+ /* queue and interrupt resources */
+ struct {
+ unsigned short vec;
+ char desc[22];
+ } msix_info[MSIX_ENTRIES];
+ struct sge sge;
+
+ /* Linux network device resources */
+ struct net_device *port[MAX_NPORTS];
+ const char *name;
+ unsigned int msg_enable;
+
+ /* debugfs resources */
+ struct dentry *debugfs_root;
+
+ /* various locks */
+ spinlock_t stats_lock;
+};
+
+enum { /* adapter flags */
+ FULL_INIT_DONE = (1UL << 0),
+ USING_MSI = (1UL << 1),
+ USING_MSIX = (1UL << 2),
+ QUEUES_BOUND = (1UL << 3),
+};
+
+/*
+ * The following register read/write routine definitions are required by
+ * the common code.
+ */
+
+/**
+ * t4_read_reg - read a HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ *
+ * Returns the 32-bit value of the given HW register.
+ */
+static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr)
+{
+ return readl(adapter->regs + reg_addr);
+}
+
+/**
+ * t4_write_reg - write a HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ * @val: the value to write
+ *
+ * Write a 32-bit value into the given HW register.
+ */
+static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
+{
+ writel(val, adapter->regs + reg_addr);
+}
+
+#ifndef readq
+static inline u64 readq(const volatile void __iomem *addr)
+{
+ return readl(addr) + ((u64)readl(addr + 4) << 32);
+}
+
+static inline void writeq(u64 val, volatile void __iomem *addr)
+{
+ writel(val, addr);
+ writel(val >> 32, addr + 4);
+}
+#endif
+
+/**
+ * t4_read_reg64 - read a 64-bit HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ *
+ * Returns the 64-bit value of the given HW register.
+ */
+static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr)
+{
+ return readq(adapter->regs + reg_addr);
+}
+
+/**
+ * t4_write_reg64 - write a 64-bit HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ * @val: the value to write
+ *
+ * Write a 64-bit value into the given HW register.
+ */
+static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
+ u64 val)
+{
+ writeq(val, adapter->regs + reg_addr);
+}
+
+/**
+ * port_name - return the string name of a port
+ * @adapter: the adapter
+ * @pidx: the port index
+ *
+ * Return the string name of the selected port.
+ */
+static inline const char *port_name(struct adapter *adapter, int pidx)
+{
+ return adapter->port[pidx]->name;
+}
+
+/**
+ * t4_os_set_hw_addr - store a port's MAC address in SW
+ * @adapter: the adapter
+ * @pidx: the port index
+ * @hw_addr: the Ethernet address
+ *
+ * Store the Ethernet address of the given port in SW. Called by the common
+ * code when it retrieves a port's Ethernet address from EEPROM.
+ */
+static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx,
+ u8 hw_addr[])
+{
+ memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN);
+ memcpy(adapter->port[pidx]->perm_addr, hw_addr, ETH_ALEN);
+}
+
+/**
+ * netdev2pinfo - return the port_info structure associated with a net_device
+ * @dev: the netdev
+ *
+ * Return the struct port_info associated with a net_device
+ */
+static inline struct port_info *netdev2pinfo(const struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+/**
+ * adap2pinfo - return the port_info of a port
+ * @adap: the adapter
+ * @pidx: the port index
+ *
+ * Return the port_info structure for the adapter.
+ */
+static inline struct port_info *adap2pinfo(struct adapter *adapter, int pidx)
+{
+ return netdev_priv(adapter->port[pidx]);
+}
+
+/**
+ * netdev2adap - return the adapter structure associated with a net_device
+ * @dev: the netdev
+ *
+ * Return the struct adapter associated with a net_device
+ */
+static inline struct adapter *netdev2adap(const struct net_device *dev)
+{
+ return netdev2pinfo(dev)->adapter;
+}
+
+/*
+ * OS "Callback" function declarations. These are functions that the OS code
+ * is "contracted" to provide for the common code.
+ */
+void t4vf_os_link_changed(struct adapter *, int, int);
+
+/*
+ * SGE function prototype declarations.
+ */
+int t4vf_sge_alloc_rxq(struct adapter *, struct sge_rspq *, bool,
+ struct net_device *, int,
+ struct sge_fl *, rspq_handler_t);
+int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *,
+ struct net_device *, struct netdev_queue *,
+ unsigned int);
+void t4vf_free_sge_resources(struct adapter *);
+
+int t4vf_eth_xmit(struct sk_buff *, struct net_device *);
+int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *,
+ const struct pkt_gl *);
+
+irq_handler_t t4vf_intr_handler(struct adapter *);
+irqreturn_t t4vf_sge_intr_msix(int, void *);
+
+int t4vf_sge_init(struct adapter *);
+void t4vf_sge_start(struct adapter *);
+void t4vf_sge_stop(struct adapter *);
+
+#endif /* __CXGB4VF_ADAPTER_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
new file mode 100644
index 000000000000..da9072bfca8b
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -0,0 +1,2949 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/debugfs.h>
+#include <linux/ethtool.h>
+
+#include "t4vf_common.h"
+#include "t4vf_defs.h"
+
+#include "../cxgb4/t4_regs.h"
+#include "../cxgb4/t4_msg.h"
+
+/*
+ * Generic information about the driver.
+ */
+#define DRV_VERSION "1.0.0"
+#define DRV_DESC "Chelsio T4 Virtual Function (VF) Network Driver"
+
+/*
+ * Module Parameters.
+ * ==================
+ */
+
+/*
+ * Default ethtool "message level" for adapters.
+ */
+#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
+ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+static int dflt_msg_enable = DFLT_MSG_ENABLE;
+
+module_param(dflt_msg_enable, int, 0644);
+MODULE_PARM_DESC(dflt_msg_enable,
+ "default adapter ethtool message level bitmap");
+
+/*
+ * The driver uses the best interrupt scheme available on a platform in the
+ * order MSI-X then MSI. This parameter determines which of these schemes the
+ * driver may consider as follows:
+ *
+ * msi = 2: choose from among MSI-X and MSI
+ * msi = 1: only consider MSI interrupts
+ *
+ * Note that unlike the Physical Function driver, this Virtual Function driver
+ * does _not_ support legacy INTx interrupts (this limitation is mandated by
+ * the PCI-E SR-IOV standard).
+ */
+#define MSI_MSIX 2
+#define MSI_MSI 1
+#define MSI_DEFAULT MSI_MSIX
+
+static int msi = MSI_DEFAULT;
+
+module_param(msi, int, 0644);
+MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
+
+/*
+ * Fundamental constants.
+ * ======================
+ */
+
+enum {
+ MAX_TXQ_ENTRIES = 16384,
+ MAX_RSPQ_ENTRIES = 16384,
+ MAX_RX_BUFFERS = 16384,
+
+ MIN_TXQ_ENTRIES = 32,
+ MIN_RSPQ_ENTRIES = 128,
+ MIN_FL_ENTRIES = 16,
+
+ /*
+ * For purposes of manipulating the Free List size we need to
+ * recognize that Free Lists are actually Egress Queues (the host
+ * produces free buffers which the hardware consumes), Egress Queues
+ * indices are all in units of Egress Context Units bytes, and free
+ * list entries are 64-bit PCI DMA addresses. And since the state of
+ * the Producer Index == the Consumer Index implies an EMPTY list, we
+ * always have at least one Egress Unit's worth of Free List entries
+ * unused. See sge.c for more details ...
+ */
+ EQ_UNIT = SGE_EQ_IDXSIZE,
+ FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+ MIN_FL_RESID = FL_PER_EQ_UNIT,
+};
+
+/*
+ * Global driver state.
+ * ====================
+ */
+
+static struct dentry *cxgb4vf_debugfs_root;
+
+/*
+ * OS "Callback" functions.
+ * ========================
+ */
+
+/*
+ * The link status has changed on the indicated "port" (Virtual Interface).
+ */
+void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
+{
+ struct net_device *dev = adapter->port[pidx];
+
+ /*
+ * If the port is disabled or the current recorded "link up"
+ * status matches the new status, just return.
+ */
+ if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
+ return;
+
+ /*
+ * Tell the OS that the link status has changed and print a short
+ * informative message on the console about the event.
+ */
+ if (link_ok) {
+ const char *s;
+ const char *fc;
+ const struct port_info *pi = netdev_priv(dev);
+
+ netif_carrier_on(dev);
+
+ switch (pi->link_cfg.speed) {
+ case SPEED_10000:
+ s = "10Gbps";
+ break;
+
+ case SPEED_1000:
+ s = "1000Mbps";
+ break;
+
+ case SPEED_100:
+ s = "100Mbps";
+ break;
+
+ default:
+ s = "unknown";
+ break;
+ }
+
+ switch (pi->link_cfg.fc) {
+ case PAUSE_RX:
+ fc = "RX";
+ break;
+
+ case PAUSE_TX:
+ fc = "TX";
+ break;
+
+ case PAUSE_RX|PAUSE_TX:
+ fc = "RX/TX";
+ break;
+
+ default:
+ fc = "no";
+ break;
+ }
+
+ printk(KERN_INFO "%s: link up, %s, full-duplex, %s PAUSE\n",
+ dev->name, s, fc);
+ } else {
+ netif_carrier_off(dev);
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ }
+}
+
+/*
+ * Net device operations.
+ * ======================
+ */
+
+
+
+
+/*
+ * Perform the MAC and PHY actions needed to enable a "port" (Virtual
+ * Interface).
+ */
+static int link_start(struct net_device *dev)
+{
+ int ret;
+ struct port_info *pi = netdev_priv(dev);
+
+ /*
+ * We do not set address filters and promiscuity here, the stack does
+ * that step explicitly. Enable vlan accel.
+ */
+ ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
+ true);
+ if (ret == 0) {
+ ret = t4vf_change_mac(pi->adapter, pi->viid,
+ pi->xact_addr_filt, dev->dev_addr, true);
+ if (ret >= 0) {
+ pi->xact_addr_filt = ret;
+ ret = 0;
+ }
+ }
+
+ /*
+ * We don't need to actually "start the link" itself since the
+ * firmware will do that for us when the first Virtual Interface
+ * is enabled on a port.
+ */
+ if (ret == 0)
+ ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true);
+ return ret;
+}
+
+/*
+ * Name the MSI-X interrupts.
+ */
+static void name_msix_vecs(struct adapter *adapter)
+{
+ int namelen = sizeof(adapter->msix_info[0].desc) - 1;
+ int pidx;
+
+ /*
+ * Firmware events.
+ */
+ snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
+ "%s-FWeventq", adapter->name);
+ adapter->msix_info[MSIX_FW].desc[namelen] = 0;
+
+ /*
+ * Ethernet queues.
+ */
+ for_each_port(adapter, pidx) {
+ struct net_device *dev = adapter->port[pidx];
+ const struct port_info *pi = netdev_priv(dev);
+ int qs, msi;
+
+ for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
+ snprintf(adapter->msix_info[msi].desc, namelen,
+ "%s-%d", dev->name, qs);
+ adapter->msix_info[msi].desc[namelen] = 0;
+ }
+ }
+}
+
+/*
+ * Request all of our MSI-X resources.
+ */
+static int request_msix_queue_irqs(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ int rxq, msi, err;
+
+ /*
+ * Firmware events.
+ */
+ err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
+ 0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
+ if (err)
+ return err;
+
+ /*
+ * Ethernet queues.
+ */
+ msi = MSIX_IQFLINT;
+ for_each_ethrxq(s, rxq) {
+ err = request_irq(adapter->msix_info[msi].vec,
+ t4vf_sge_intr_msix, 0,
+ adapter->msix_info[msi].desc,
+ &s->ethrxq[rxq].rspq);
+ if (err)
+ goto err_free_irqs;
+ msi++;
+ }
+ return 0;
+
+err_free_irqs:
+ while (--rxq >= 0)
+ free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
+ free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
+ return err;
+}
+
+/*
+ * Free our MSI-X resources.
+ */
+static void free_msix_queue_irqs(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ int rxq, msi;
+
+ free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
+ msi = MSIX_IQFLINT;
+ for_each_ethrxq(s, rxq)
+ free_irq(adapter->msix_info[msi++].vec,
+ &s->ethrxq[rxq].rspq);
+}
+
+/*
+ * Turn on NAPI and start up interrupts on a response queue.
+ */
+static void qenable(struct sge_rspq *rspq)
+{
+ napi_enable(&rspq->napi);
+
+ /*
+ * 0-increment the Going To Sleep register to start the timer and
+ * enable interrupts.
+ */
+ t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+ CIDXINC(0) |
+ SEINTARM(rspq->intr_params) |
+ INGRESSQID(rspq->cntxt_id));
+}
+
+/*
+ * Enable NAPI scheduling and interrupt generation for all Receive Queues.
+ */
+static void enable_rx(struct adapter *adapter)
+{
+ int rxq;
+ struct sge *s = &adapter->sge;
+
+ for_each_ethrxq(s, rxq)
+ qenable(&s->ethrxq[rxq].rspq);
+ qenable(&s->fw_evtq);
+
+ /*
+ * The interrupt queue doesn't use NAPI so we do the 0-increment of
+ * its Going To Sleep register here to get it started.
+ */
+ if (adapter->flags & USING_MSI)
+ t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+ CIDXINC(0) |
+ SEINTARM(s->intrq.intr_params) |
+ INGRESSQID(s->intrq.cntxt_id));
+
+}
+
+/*
+ * Wait until all NAPI handlers are descheduled.
+ */
+static void quiesce_rx(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ int rxq;
+
+ for_each_ethrxq(s, rxq)
+ napi_disable(&s->ethrxq[rxq].rspq.napi);
+ napi_disable(&s->fw_evtq.napi);
+}
+
+/*
+ * Response queue handler for the firmware event queue.
+ */
+static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
+ const struct pkt_gl *gl)
+{
+ /*
+ * Extract response opcode and get pointer to CPL message body.
+ */
+ struct adapter *adapter = rspq->adapter;
+ u8 opcode = ((const struct rss_header *)rsp)->opcode;
+ void *cpl = (void *)(rsp + 1);
+
+ switch (opcode) {
+ case CPL_FW6_MSG: {
+ /*
+ * We've received an asynchronous message from the firmware.
+ */
+ const struct cpl_fw6_msg *fw_msg = cpl;
+ if (fw_msg->type == FW6_TYPE_CMD_RPL)
+ t4vf_handle_fw_rpl(adapter, fw_msg->data);
+ break;
+ }
+
+ case CPL_SGE_EGR_UPDATE: {
+ /*
+ * We've received an Egress Queue Status Update message. We
+ * get these, if the SGE is configured to send these when the
+ * firmware passes certain points in processing our TX
+ * Ethernet Queue or if we make an explicit request for one.
+ * We use these updates to determine when we may need to
+ * restart a TX Ethernet Queue which was stopped for lack of
+ * free TX Queue Descriptors ...
+ */
+ const struct cpl_sge_egr_update *p = (void *)cpl;
+ unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid));
+ struct sge *s = &adapter->sge;
+ struct sge_txq *tq;
+ struct sge_eth_txq *txq;
+ unsigned int eq_idx;
+
+ /*
+ * Perform sanity checking on the Queue ID to make sure it
+ * really refers to one of our TX Ethernet Egress Queues which
+ * is active and matches the queue's ID. None of these error
+ * conditions should ever happen so we may want to either make
+ * them fatal and/or conditionalized under DEBUG.
+ */
+ eq_idx = EQ_IDX(s, qid);
+ if (unlikely(eq_idx >= MAX_EGRQ)) {
+ dev_err(adapter->pdev_dev,
+ "Egress Update QID %d out of range\n", qid);
+ break;
+ }
+ tq = s->egr_map[eq_idx];
+ if (unlikely(tq == NULL)) {
+ dev_err(adapter->pdev_dev,
+ "Egress Update QID %d TXQ=NULL\n", qid);
+ break;
+ }
+ txq = container_of(tq, struct sge_eth_txq, q);
+ if (unlikely(tq->abs_id != qid)) {
+ dev_err(adapter->pdev_dev,
+ "Egress Update QID %d refers to TXQ %d\n",
+ qid, tq->abs_id);
+ break;
+ }
+
+ /*
+ * Restart a stopped TX Queue which has less than half of its
+ * TX ring in use ...
+ */
+ txq->q.restarts++;
+ netif_tx_wake_queue(txq->txq);
+ break;
+ }
+
+ default:
+ dev_err(adapter->pdev_dev,
+ "unexpected CPL %#x on FW event queue\n", opcode);
+ }
+
+ return 0;
+}
+
+/*
+ * Allocate SGE TX/RX response queues. Determine how many sets of SGE queues
+ * to use and initializes them. We support multiple "Queue Sets" per port if
+ * we have MSI-X, otherwise just one queue set per port.
+ */
+static int setup_sge_queues(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ int err, pidx, msix;
+
+ /*
+ * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
+ * state.
+ */
+ bitmap_zero(s->starving_fl, MAX_EGRQ);
+
+ /*
+ * If we're using MSI interrupt mode we need to set up a "forwarded
+ * interrupt" queue which we'll set up with our MSI vector. The rest
+ * of the ingress queues will be set up to forward their interrupts to
+ * this queue ... This must be first since t4vf_sge_alloc_rxq() uses
+ * the intrq's queue ID as the interrupt forwarding queue for the
+ * subsequent calls ...
+ */
+ if (adapter->flags & USING_MSI) {
+ err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
+ adapter->port[0], 0, NULL, NULL);
+ if (err)
+ goto err_free_queues;
+ }
+
+ /*
+ * Allocate our ingress queue for asynchronous firmware messages.
+ */
+ err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
+ MSIX_FW, NULL, fwevtq_handler);
+ if (err)
+ goto err_free_queues;
+
+ /*
+ * Allocate each "port"'s initial Queue Sets. These can be changed
+ * later on ... up to the point where any interface on the adapter is
+ * brought up at which point lots of things get nailed down
+ * permanently ...
+ */
+ msix = MSIX_IQFLINT;
+ for_each_port(adapter, pidx) {
+ struct net_device *dev = adapter->port[pidx];
+ struct port_info *pi = netdev_priv(dev);
+ struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
+ struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
+ int qs;
+
+ for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
+ err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
+ dev, msix++,
+ &rxq->fl, t4vf_ethrx_handler);
+ if (err)
+ goto err_free_queues;
+
+ err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
+ netdev_get_tx_queue(dev, qs),
+ s->fw_evtq.cntxt_id);
+ if (err)
+ goto err_free_queues;
+
+ rxq->rspq.idx = qs;
+ memset(&rxq->stats, 0, sizeof(rxq->stats));
+ }
+ }
+
+ /*
+ * Create the reverse mappings for the queues.
+ */
+ s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
+ s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
+ IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
+ for_each_port(adapter, pidx) {
+ struct net_device *dev = adapter->port[pidx];
+ struct port_info *pi = netdev_priv(dev);
+ struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
+ struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
+ int qs;
+
+ for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
+ IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
+ EQ_MAP(s, txq->q.abs_id) = &txq->q;
+
+ /*
+ * The FW_IQ_CMD doesn't return the Absolute Queue IDs
+ * for Free Lists but since all of the Egress Queues
+ * (including Free Lists) have Relative Queue IDs
+ * which are computed as Absolute - Base Queue ID, we
+ * can synthesize the Absolute Queue IDs for the Free
+ * Lists. This is useful for debugging purposes when
+ * we want to dump Queue Contexts via the PF Driver.
+ */
+ rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
+ EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
+ }
+ }
+ return 0;
+
+err_free_queues:
+ t4vf_free_sge_resources(adapter);
+ return err;
+}
+
+/*
+ * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
+ * queues. We configure the RSS CPU lookup table to distribute to the number
+ * of HW receive queues, and the response queue lookup table to narrow that
+ * down to the response queues actually configured for each "port" (Virtual
+ * Interface). We always configure the RSS mapping for all ports since the
+ * mapping table has plenty of entries.
+ */
+static int setup_rss(struct adapter *adapter)
+{
+ int pidx;
+
+ for_each_port(adapter, pidx) {
+ struct port_info *pi = adap2pinfo(adapter, pidx);
+ struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
+ u16 rss[MAX_PORT_QSETS];
+ int qs, err;
+
+ for (qs = 0; qs < pi->nqsets; qs++)
+ rss[qs] = rxq[qs].rspq.abs_id;
+
+ err = t4vf_config_rss_range(adapter, pi->viid,
+ 0, pi->rss_size, rss, pi->nqsets);
+ if (err)
+ return err;
+
+ /*
+ * Perform Global RSS Mode-specific initialization.
+ */
+ switch (adapter->params.rss.mode) {
+ case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
+ /*
+ * If Tunnel All Lookup isn't specified in the global
+ * RSS Configuration, then we need to specify a
+ * default Ingress Queue for any ingress packets which
+ * aren't hashed. We'll use our first ingress queue
+ * ...
+ */
+ if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
+ union rss_vi_config config;
+ err = t4vf_read_rss_vi_config(adapter,
+ pi->viid,
+ &config);
+ if (err)
+ return err;
+ config.basicvirtual.defaultq =
+ rxq[0].rspq.abs_id;
+ err = t4vf_write_rss_vi_config(adapter,
+ pi->viid,
+ &config);
+ if (err)
+ return err;
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Bring the adapter up. Called whenever we go from no "ports" open to having
+ * one open. This function performs the actions necessary to make an adapter
+ * operational, such as completing the initialization of HW modules, and
+ * enabling interrupts. Must be called with the rtnl lock held. (Note that
+ * this is called "cxgb_up" in the PF Driver.)
+ */
+static int adapter_up(struct adapter *adapter)
+{
+ int err;
+
+ /*
+ * If this is the first time we've been called, perform basic
+ * adapter setup. Once we've done this, many of our adapter
+ * parameters can no longer be changed ...
+ */
+ if ((adapter->flags & FULL_INIT_DONE) == 0) {
+ err = setup_sge_queues(adapter);
+ if (err)
+ return err;
+ err = setup_rss(adapter);
+ if (err) {
+ t4vf_free_sge_resources(adapter);
+ return err;
+ }
+
+ if (adapter->flags & USING_MSIX)
+ name_msix_vecs(adapter);
+ adapter->flags |= FULL_INIT_DONE;
+ }
+
+ /*
+ * Acquire our interrupt resources. We only support MSI-X and MSI.
+ */
+ BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
+ if (adapter->flags & USING_MSIX)
+ err = request_msix_queue_irqs(adapter);
+ else
+ err = request_irq(adapter->pdev->irq,
+ t4vf_intr_handler(adapter), 0,
+ adapter->name, adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
+ err);
+ return err;
+ }
+
+ /*
+ * Enable NAPI ingress processing and return success.
+ */
+ enable_rx(adapter);
+ t4vf_sge_start(adapter);
+ return 0;
+}
+
+/*
+ * Bring the adapter down. Called whenever the last "port" (Virtual
+ * Interface) closed. (Note that this routine is called "cxgb_down" in the PF
+ * Driver.)
+ */
+static void adapter_down(struct adapter *adapter)
+{
+ /*
+ * Free interrupt resources.
+ */
+ if (adapter->flags & USING_MSIX)
+ free_msix_queue_irqs(adapter);
+ else
+ free_irq(adapter->pdev->irq, adapter);
+
+ /*
+ * Wait for NAPI handlers to finish.
+ */
+ quiesce_rx(adapter);
+}
+
+/*
+ * Start up a net device.
+ */
+static int cxgb4vf_open(struct net_device *dev)
+{
+ int err;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ /*
+ * If this is the first interface that we're opening on the "adapter",
+ * bring the "adapter" up now.
+ */
+ if (adapter->open_device_map == 0) {
+ err = adapter_up(adapter);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Note that this interface is up and start everything up ...
+ */
+ netif_set_real_num_tx_queues(dev, pi->nqsets);
+ err = netif_set_real_num_rx_queues(dev, pi->nqsets);
+ if (err)
+ goto err_unwind;
+ err = link_start(dev);
+ if (err)
+ goto err_unwind;
+
+ netif_tx_start_all_queues(dev);
+ set_bit(pi->port_id, &adapter->open_device_map);
+ return 0;
+
+err_unwind:
+ if (adapter->open_device_map == 0)
+ adapter_down(adapter);
+ return err;
+}
+
+/*
+ * Shut down a net device. This routine is called "cxgb_close" in the PF
+ * Driver ...
+ */
+static int cxgb4vf_stop(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
+ t4vf_enable_vi(adapter, pi->viid, false, false);
+ pi->link_cfg.link_ok = 0;
+
+ clear_bit(pi->port_id, &adapter->open_device_map);
+ if (adapter->open_device_map == 0)
+ adapter_down(adapter);
+ return 0;
+}
+
+/*
+ * Translate our basic statistics into the standard "ifconfig" statistics.
+ */
+static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
+{
+ struct t4vf_port_stats stats;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adapter = pi->adapter;
+ struct net_device_stats *ns = &dev->stats;
+ int err;
+
+ spin_lock(&adapter->stats_lock);
+ err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
+ spin_unlock(&adapter->stats_lock);
+
+ memset(ns, 0, sizeof(*ns));
+ if (err)
+ return ns;
+
+ ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
+ stats.tx_ucast_bytes + stats.tx_offload_bytes);
+ ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
+ stats.tx_ucast_frames + stats.tx_offload_frames);
+ ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
+ stats.rx_ucast_bytes);
+ ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
+ stats.rx_ucast_frames);
+ ns->multicast = stats.rx_mcast_frames;
+ ns->tx_errors = stats.tx_drop_frames;
+ ns->rx_errors = stats.rx_err_frames;
+
+ return ns;
+}
+
+/*
+ * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
+ * at a specified offset within the list, into an array of addrss pointers and
+ * return the number collected.
+ */
+static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev,
+ const u8 **addr,
+ unsigned int offset,
+ unsigned int maxaddrs)
+{
+ unsigned int index = 0;
+ unsigned int naddr = 0;
+ const struct netdev_hw_addr *ha;
+
+ for_each_dev_addr(dev, ha)
+ if (index++ >= offset) {
+ addr[naddr++] = ha->addr;
+ if (naddr >= maxaddrs)
+ break;
+ }
+ return naddr;
+}
+
+/*
+ * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
+ * at a specified offset within the list, into an array of addrss pointers and
+ * return the number collected.
+ */
+static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev,
+ const u8 **addr,
+ unsigned int offset,
+ unsigned int maxaddrs)
+{
+ unsigned int index = 0;
+ unsigned int naddr = 0;
+ const struct netdev_hw_addr *ha;
+
+ netdev_for_each_mc_addr(ha, dev)
+ if (index++ >= offset) {
+ addr[naddr++] = ha->addr;
+ if (naddr >= maxaddrs)
+ break;
+ }
+ return naddr;
+}
+
+/*
+ * Configure the exact and hash address filters to handle a port's multicast
+ * and secondary unicast MAC addresses.
+ */
+static int set_addr_filters(const struct net_device *dev, bool sleep)
+{
+ u64 mhash = 0;
+ u64 uhash = 0;
+ bool free = true;
+ unsigned int offset, naddr;
+ const u8 *addr[7];
+ int ret;
+ const struct port_info *pi = netdev_priv(dev);
+
+ /* first do the secondary unicast addresses */
+ for (offset = 0; ; offset += naddr) {
+ naddr = collect_netdev_uc_list_addrs(dev, addr, offset,
+ ARRAY_SIZE(addr));
+ if (naddr == 0)
+ break;
+
+ ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
+ naddr, addr, NULL, &uhash, sleep);
+ if (ret < 0)
+ return ret;
+
+ free = false;
+ }
+
+ /* next set up the multicast addresses */
+ for (offset = 0; ; offset += naddr) {
+ naddr = collect_netdev_mc_list_addrs(dev, addr, offset,
+ ARRAY_SIZE(addr));
+ if (naddr == 0)
+ break;
+
+ ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
+ naddr, addr, NULL, &mhash, sleep);
+ if (ret < 0)
+ return ret;
+ free = false;
+ }
+
+ return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0,
+ uhash | mhash, sleep);
+}
+
+/*
+ * Set RX properties of a port, such as promiscruity, address filters, and MTU.
+ * If @mtu is -1 it is left unchanged.
+ */
+static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
+{
+ int ret;
+ struct port_info *pi = netdev_priv(dev);
+
+ ret = set_addr_filters(dev, sleep_ok);
+ if (ret == 0)
+ ret = t4vf_set_rxmode(pi->adapter, pi->viid, -1,
+ (dev->flags & IFF_PROMISC) != 0,
+ (dev->flags & IFF_ALLMULTI) != 0,
+ 1, -1, sleep_ok);
+ return ret;
+}
+
+/*
+ * Set the current receive modes on the device.
+ */
+static void cxgb4vf_set_rxmode(struct net_device *dev)
+{
+ /* unfortunately we can't return errors to the stack */
+ set_rxmode(dev, -1, false);
+}
+
+/*
+ * Find the entry in the interrupt holdoff timer value array which comes
+ * closest to the specified interrupt holdoff value.
+ */
+static int closest_timer(const struct sge *s, int us)
+{
+ int i, timer_idx = 0, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
+ int delta = us - s->timer_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ timer_idx = i;
+ }
+ }
+ return timer_idx;
+}
+
+static int closest_thres(const struct sge *s, int thres)
+{
+ int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
+ delta = thres - s->counter_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ pktcnt_idx = i;
+ }
+ }
+ return pktcnt_idx;
+}
+
+/*
+ * Return a queue's interrupt hold-off time in us. 0 means no timer.
+ */
+static unsigned int qtimer_val(const struct adapter *adapter,
+ const struct sge_rspq *rspq)
+{
+ unsigned int timer_idx = QINTR_TIMER_IDX_GET(rspq->intr_params);
+
+ return timer_idx < SGE_NTIMERS
+ ? adapter->sge.timer_val[timer_idx]
+ : 0;
+}
+
+/**
+ * set_rxq_intr_params - set a queue's interrupt holdoff parameters
+ * @adapter: the adapter
+ * @rspq: the RX response queue
+ * @us: the hold-off time in us, or 0 to disable timer
+ * @cnt: the hold-off packet count, or 0 to disable counter
+ *
+ * Sets an RX response queue's interrupt hold-off time and packet count.
+ * At least one of the two needs to be enabled for the queue to generate
+ * interrupts.
+ */
+static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
+ unsigned int us, unsigned int cnt)
+{
+ unsigned int timer_idx;
+
+ /*
+ * If both the interrupt holdoff timer and count are specified as
+ * zero, default to a holdoff count of 1 ...
+ */
+ if ((us | cnt) == 0)
+ cnt = 1;
+
+ /*
+ * If an interrupt holdoff count has been specified, then find the
+ * closest configured holdoff count and use that. If the response
+ * queue has already been created, then update its queue context
+ * parameters ...
+ */
+ if (cnt) {
+ int err;
+ u32 v, pktcnt_idx;
+
+ pktcnt_idx = closest_thres(&adapter->sge, cnt);
+ if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
+ v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X(
+ FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
+ FW_PARAMS_PARAM_YZ(rspq->cntxt_id);
+ err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
+ if (err)
+ return err;
+ }
+ rspq->pktcnt_idx = pktcnt_idx;
+ }
+
+ /*
+ * Compute the closest holdoff timer index from the supplied holdoff
+ * timer value.
+ */
+ timer_idx = (us == 0
+ ? SGE_TIMER_RSTRT_CNTR
+ : closest_timer(&adapter->sge, us));
+
+ /*
+ * Update the response queue's interrupt coalescing parameters and
+ * return success.
+ */
+ rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
+ (cnt > 0 ? QINTR_CNT_EN : 0));
+ return 0;
+}
+
+/*
+ * Return a version number to identify the type of adapter. The scheme is:
+ * - bits 0..9: chip version
+ * - bits 10..15: chip revision
+ */
+static inline unsigned int mk_adap_vers(const struct adapter *adapter)
+{
+ /*
+ * Chip version 4, revision 0x3f (cxgb4vf).
+ */
+ return 4 | (0x3f << 10);
+}
+
+/*
+ * Execute the specified ioctl command.
+ */
+static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ /*
+ * The VF Driver doesn't have access to any of the other
+ * common Ethernet device ioctl()'s (like reading/writing
+ * PHY registers, etc.
+ */
+
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Change the device's MTU.
+ */
+static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int ret;
+ struct port_info *pi = netdev_priv(dev);
+
+ /* accommodate SACK */
+ if (new_mtu < 81)
+ return -EINVAL;
+
+ ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
+ -1, -1, -1, -1, true);
+ if (!ret)
+ dev->mtu = new_mtu;
+ return ret;
+}
+
+static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features)
+{
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_RX)
+ features |= NETIF_F_HW_VLAN_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_TX;
+
+ return features;
+}
+
+static int cxgb4vf_set_features(struct net_device *dev, u32 features)
+{
+ struct port_info *pi = netdev_priv(dev);
+ u32 changed = dev->features ^ features;
+
+ if (changed & NETIF_F_HW_VLAN_RX)
+ t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
+ features & NETIF_F_HW_VLAN_TX, 0);
+
+ return 0;
+}
+
+/*
+ * Change the devices MAC address.
+ */
+static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
+{
+ int ret;
+ struct sockaddr *addr = _addr;
+ struct port_info *pi = netdev_priv(dev);
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt,
+ addr->sa_data, true);
+ if (ret < 0)
+ return ret;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ pi->xact_addr_filt = ret;
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Poll all of our receive queues. This is called outside of normal interrupt
+ * context.
+ */
+static void cxgb4vf_poll_controller(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ if (adapter->flags & USING_MSIX) {
+ struct sge_eth_rxq *rxq;
+ int nqsets;
+
+ rxq = &adapter->sge.ethrxq[pi->first_qset];
+ for (nqsets = pi->nqsets; nqsets; nqsets--) {
+ t4vf_sge_intr_msix(0, &rxq->rspq);
+ rxq++;
+ }
+ } else
+ t4vf_intr_handler(adapter)(0, adapter);
+}
+#endif
+
+/*
+ * Ethtool operations.
+ * ===================
+ *
+ * Note that we don't support any ethtool operations which change the physical
+ * state of the port to which we're linked.
+ */
+
+/*
+ * Return current port link settings.
+ */
+static int cxgb4vf_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ const struct port_info *pi = netdev_priv(dev);
+
+ cmd->supported = pi->link_cfg.supported;
+ cmd->advertising = pi->link_cfg.advertising;
+ ethtool_cmd_speed_set(cmd,
+ netif_carrier_ok(dev) ? pi->link_cfg.speed : -1);
+ cmd->duplex = DUPLEX_FULL;
+
+ cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+ cmd->phy_address = pi->port_id;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->autoneg = pi->link_cfg.autoneg;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+ return 0;
+}
+
+/*
+ * Return our driver information.
+ */
+static void cxgb4vf_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct adapter *adapter = netdev2adap(dev);
+
+ strcpy(drvinfo->driver, KBUILD_MODNAME);
+ strcpy(drvinfo->version, DRV_VERSION);
+ strcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%u.%u.%u.%u, TP %u.%u.%u.%u",
+ FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev),
+ FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.fwrev),
+ FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.fwrev),
+ FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.fwrev),
+ FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.tprev),
+ FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.tprev),
+ FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.tprev),
+ FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.tprev));
+}
+
+/*
+ * Return current adapter message level.
+ */
+static u32 cxgb4vf_get_msglevel(struct net_device *dev)
+{
+ return netdev2adap(dev)->msg_enable;
+}
+
+/*
+ * Set current adapter message level.
+ */
+static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
+{
+ netdev2adap(dev)->msg_enable = msglevel;
+}
+
+/*
+ * Return the device's current Queue Set ring size parameters along with the
+ * allowed maximum values. Since ethtool doesn't understand the concept of
+ * multi-queue devices, we just return the current values associated with the
+ * first Queue Set.
+ */
+static void cxgb4vf_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *rp)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ const struct sge *s = &pi->adapter->sge;
+
+ rp->rx_max_pending = MAX_RX_BUFFERS;
+ rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
+ rp->rx_jumbo_max_pending = 0;
+ rp->tx_max_pending = MAX_TXQ_ENTRIES;
+
+ rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
+ rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
+ rp->rx_jumbo_pending = 0;
+ rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
+}
+
+/*
+ * Set the Queue Set ring size parameters for the device. Again, since
+ * ethtool doesn't allow for the concept of multiple queues per device, we'll
+ * apply these new values across all of the Queue Sets associated with the
+ * device -- after vetting them of course!
+ */
+static int cxgb4vf_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *rp)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ int qs;
+
+ if (rp->rx_pending > MAX_RX_BUFFERS ||
+ rp->rx_jumbo_pending ||
+ rp->tx_pending > MAX_TXQ_ENTRIES ||
+ rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
+ rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
+ rp->rx_pending < MIN_FL_ENTRIES ||
+ rp->tx_pending < MIN_TXQ_ENTRIES)
+ return -EINVAL;
+
+ if (adapter->flags & FULL_INIT_DONE)
+ return -EBUSY;
+
+ for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
+ s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
+ s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
+ s->ethtxq[qs].q.size = rp->tx_pending;
+ }
+ return 0;
+}
+
+/*
+ * Return the interrupt holdoff timer and count for the first Queue Set on the
+ * device. Our extension ioctl() (the cxgbtool interface) allows the
+ * interrupt holdoff timer to be read on all of the device's Queue Sets.
+ */
+static int cxgb4vf_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coalesce)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ const struct adapter *adapter = pi->adapter;
+ const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
+
+ coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
+ coalesce->rx_max_coalesced_frames =
+ ((rspq->intr_params & QINTR_CNT_EN)
+ ? adapter->sge.counter_val[rspq->pktcnt_idx]
+ : 0);
+ return 0;
+}
+
+/*
+ * Set the RX interrupt holdoff timer and count for the first Queue Set on the
+ * interface. Our extension ioctl() (the cxgbtool interface) allows us to set
+ * the interrupt holdoff timer on any of the device's Queue Sets.
+ */
+static int cxgb4vf_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coalesce)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ return set_rxq_intr_params(adapter,
+ &adapter->sge.ethrxq[pi->first_qset].rspq,
+ coalesce->rx_coalesce_usecs,
+ coalesce->rx_max_coalesced_frames);
+}
+
+/*
+ * Report current port link pause parameter settings.
+ */
+static void cxgb4vf_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct port_info *pi = netdev_priv(dev);
+
+ pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
+ pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
+ pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
+}
+
+/*
+ * Identify the port by blinking the port's LED.
+ */
+static int cxgb4vf_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ unsigned int val;
+ struct port_info *pi = netdev_priv(dev);
+
+ if (state == ETHTOOL_ID_ACTIVE)
+ val = 0xffff;
+ else if (state == ETHTOOL_ID_INACTIVE)
+ val = 0;
+ else
+ return -EINVAL;
+
+ return t4vf_identify_port(pi->adapter, pi->viid, val);
+}
+
+/*
+ * Port stats maintained per queue of the port.
+ */
+struct queue_port_stats {
+ u64 tso;
+ u64 tx_csum;
+ u64 rx_csum;
+ u64 vlan_ex;
+ u64 vlan_ins;
+ u64 lro_pkts;
+ u64 lro_merged;
+};
+
+/*
+ * Strings for the ETH_SS_STATS statistics set ("ethtool -S"). Note that
+ * these need to match the order of statistics returned by
+ * t4vf_get_port_stats().
+ */
+static const char stats_strings[][ETH_GSTRING_LEN] = {
+ /*
+ * These must match the layout of the t4vf_port_stats structure.
+ */
+ "TxBroadcastBytes ",
+ "TxBroadcastFrames ",
+ "TxMulticastBytes ",
+ "TxMulticastFrames ",
+ "TxUnicastBytes ",
+ "TxUnicastFrames ",
+ "TxDroppedFrames ",
+ "TxOffloadBytes ",
+ "TxOffloadFrames ",
+ "RxBroadcastBytes ",
+ "RxBroadcastFrames ",
+ "RxMulticastBytes ",
+ "RxMulticastFrames ",
+ "RxUnicastBytes ",
+ "RxUnicastFrames ",
+ "RxErrorFrames ",
+
+ /*
+ * These are accumulated per-queue statistics and must match the
+ * order of the fields in the queue_port_stats structure.
+ */
+ "TSO ",
+ "TxCsumOffload ",
+ "RxCsumGood ",
+ "VLANextractions ",
+ "VLANinsertions ",
+ "GROPackets ",
+ "GROMerged ",
+};
+
+/*
+ * Return the number of statistics in the specified statistics set.
+ */
+static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(stats_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+ /*NOTREACHED*/
+}
+
+/*
+ * Return the strings for the specified statistics set.
+ */
+static void cxgb4vf_get_strings(struct net_device *dev,
+ u32 sset,
+ u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ memcpy(data, stats_strings, sizeof(stats_strings));
+ break;
+ }
+}
+
+/*
+ * Small utility routine to accumulate queue statistics across the queues of
+ * a "port".
+ */
+static void collect_sge_port_stats(const struct adapter *adapter,
+ const struct port_info *pi,
+ struct queue_port_stats *stats)
+{
+ const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
+ const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
+ int qs;
+
+ memset(stats, 0, sizeof(*stats));
+ for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
+ stats->tso += txq->tso;
+ stats->tx_csum += txq->tx_cso;
+ stats->rx_csum += rxq->stats.rx_cso;
+ stats->vlan_ex += rxq->stats.vlan_ex;
+ stats->vlan_ins += txq->vlan_ins;
+ stats->lro_pkts += rxq->stats.lro_pkts;
+ stats->lro_merged += rxq->stats.lro_merged;
+ }
+}
+
+/*
+ * Return the ETH_SS_STATS statistics set.
+ */
+static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adapter = pi->adapter;
+ int err = t4vf_get_port_stats(adapter, pi->pidx,
+ (struct t4vf_port_stats *)data);
+ if (err)
+ memset(data, 0, sizeof(struct t4vf_port_stats));
+
+ data += sizeof(struct t4vf_port_stats) / sizeof(u64);
+ collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
+}
+
+/*
+ * Return the size of our register map.
+ */
+static int cxgb4vf_get_regs_len(struct net_device *dev)
+{
+ return T4VF_REGMAP_SIZE;
+}
+
+/*
+ * Dump a block of registers, start to end inclusive, into a buffer.
+ */
+static void reg_block_dump(struct adapter *adapter, void *regbuf,
+ unsigned int start, unsigned int end)
+{
+ u32 *bp = regbuf + start - T4VF_REGMAP_START;
+
+ for ( ; start <= end; start += sizeof(u32)) {
+ /*
+ * Avoid reading the Mailbox Control register since that
+ * can trigger a Mailbox Ownership Arbitration cycle and
+ * interfere with communication with the firmware.
+ */
+ if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
+ *bp++ = 0xffff;
+ else
+ *bp++ = t4_read_reg(adapter, start);
+ }
+}
+
+/*
+ * Copy our entire register map into the provided buffer.
+ */
+static void cxgb4vf_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs,
+ void *regbuf)
+{
+ struct adapter *adapter = netdev2adap(dev);
+
+ regs->version = mk_adap_vers(adapter);
+
+ /*
+ * Fill in register buffer with our register map.
+ */
+ memset(regbuf, 0, T4VF_REGMAP_SIZE);
+
+ reg_block_dump(adapter, regbuf,
+ T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
+ T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
+ reg_block_dump(adapter, regbuf,
+ T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
+ T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
+ reg_block_dump(adapter, regbuf,
+ T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
+ T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_LAST);
+ reg_block_dump(adapter, regbuf,
+ T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
+ T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
+
+ reg_block_dump(adapter, regbuf,
+ T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
+ T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
+}
+
+/*
+ * Report current Wake On LAN settings.
+ */
+static void cxgb4vf_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+/*
+ * TCP Segmentation Offload flags which we support.
+ */
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+
+static struct ethtool_ops cxgb4vf_ethtool_ops = {
+ .get_settings = cxgb4vf_get_settings,
+ .get_drvinfo = cxgb4vf_get_drvinfo,
+ .get_msglevel = cxgb4vf_get_msglevel,
+ .set_msglevel = cxgb4vf_set_msglevel,
+ .get_ringparam = cxgb4vf_get_ringparam,
+ .set_ringparam = cxgb4vf_set_ringparam,
+ .get_coalesce = cxgb4vf_get_coalesce,
+ .set_coalesce = cxgb4vf_set_coalesce,
+ .get_pauseparam = cxgb4vf_get_pauseparam,
+ .get_link = ethtool_op_get_link,
+ .get_strings = cxgb4vf_get_strings,
+ .set_phys_id = cxgb4vf_phys_id,
+ .get_sset_count = cxgb4vf_get_sset_count,
+ .get_ethtool_stats = cxgb4vf_get_ethtool_stats,
+ .get_regs_len = cxgb4vf_get_regs_len,
+ .get_regs = cxgb4vf_get_regs,
+ .get_wol = cxgb4vf_get_wol,
+};
+
+/*
+ * /sys/kernel/debug/cxgb4vf support code and data.
+ * ================================================
+ */
+
+/*
+ * Show SGE Queue Set information. We display QPL Queues Sets per line.
+ */
+#define QPL 4
+
+static int sge_qinfo_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adapter = seq->private;
+ int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
+ int qs, r = (uintptr_t)v - 1;
+
+ if (r)
+ seq_putc(seq, '\n');
+
+ #define S3(fmt_spec, s, v) \
+ do {\
+ seq_printf(seq, "%-12s", s); \
+ for (qs = 0; qs < n; ++qs) \
+ seq_printf(seq, " %16" fmt_spec, v); \
+ seq_putc(seq, '\n'); \
+ } while (0)
+ #define S(s, v) S3("s", s, v)
+ #define T(s, v) S3("u", s, txq[qs].v)
+ #define R(s, v) S3("u", s, rxq[qs].v)
+
+ if (r < eth_entries) {
+ const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
+ const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
+ int n = min(QPL, adapter->sge.ethqsets - QPL * r);
+
+ S("QType:", "Ethernet");
+ S("Interface:",
+ (rxq[qs].rspq.netdev
+ ? rxq[qs].rspq.netdev->name
+ : "N/A"));
+ S3("d", "Port:",
+ (rxq[qs].rspq.netdev
+ ? ((struct port_info *)
+ netdev_priv(rxq[qs].rspq.netdev))->port_id
+ : -1));
+ T("TxQ ID:", q.abs_id);
+ T("TxQ size:", q.size);
+ T("TxQ inuse:", q.in_use);
+ T("TxQ PIdx:", q.pidx);
+ T("TxQ CIdx:", q.cidx);
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
+ S3("u", "Intr pktcnt:",
+ adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
+ R("RspQ CIdx:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ R("FL ID:", fl.abs_id);
+ R("FL size:", fl.size - MIN_FL_RESID);
+ R("FL avail:", fl.avail);
+ R("FL PIdx:", fl.pidx);
+ R("FL CIdx:", fl.cidx);
+ return 0;
+ }
+
+ r -= eth_entries;
+ if (r == 0) {
+ const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
+
+ seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
+ seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
+ seq_printf(seq, "%-12s %16u\n", "Intr delay:",
+ qtimer_val(adapter, evtq));
+ seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
+ adapter->sge.counter_val[evtq->pktcnt_idx]);
+ seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
+ seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
+ } else if (r == 1) {
+ const struct sge_rspq *intrq = &adapter->sge.intrq;
+
+ seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
+ seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
+ seq_printf(seq, "%-12s %16u\n", "Intr delay:",
+ qtimer_val(adapter, intrq));
+ seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
+ adapter->sge.counter_val[intrq->pktcnt_idx]);
+ seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
+ seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
+ }
+
+ #undef R
+ #undef T
+ #undef S
+ #undef S3
+
+ return 0;
+}
+
+/*
+ * Return the number of "entries" in our "file". We group the multi-Queue
+ * sections with QPL Queue Sets per "entry". The sections of the output are:
+ *
+ * Ethernet RX/TX Queue Sets
+ * Firmware Event Queue
+ * Forwarded Interrupt Queue (if in MSI mode)
+ */
+static int sge_queue_entries(const struct adapter *adapter)
+{
+ return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
+ ((adapter->flags & USING_MSI) != 0);
+}
+
+static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
+{
+ int entries = sge_queue_entries(seq->private);
+
+ return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static void sge_queue_stop(struct seq_file *seq, void *v)
+{
+}
+
+static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ int entries = sge_queue_entries(seq->private);
+
+ ++*pos;
+ return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static const struct seq_operations sge_qinfo_seq_ops = {
+ .start = sge_queue_start,
+ .next = sge_queue_next,
+ .stop = sge_queue_stop,
+ .show = sge_qinfo_show
+};
+
+static int sge_qinfo_open(struct inode *inode, struct file *file)
+{
+ int res = seq_open(file, &sge_qinfo_seq_ops);
+
+ if (!res) {
+ struct seq_file *seq = file->private_data;
+ seq->private = inode->i_private;
+ }
+ return res;
+}
+
+static const struct file_operations sge_qinfo_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = sge_qinfo_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
+ * Show SGE Queue Set statistics. We display QPL Queues Sets per line.
+ */
+#define QPL 4
+
+static int sge_qstats_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adapter = seq->private;
+ int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
+ int qs, r = (uintptr_t)v - 1;
+
+ if (r)
+ seq_putc(seq, '\n');
+
+ #define S3(fmt, s, v) \
+ do { \
+ seq_printf(seq, "%-16s", s); \
+ for (qs = 0; qs < n; ++qs) \
+ seq_printf(seq, " %8" fmt, v); \
+ seq_putc(seq, '\n'); \
+ } while (0)
+ #define S(s, v) S3("s", s, v)
+
+ #define T3(fmt, s, v) S3(fmt, s, txq[qs].v)
+ #define T(s, v) T3("lu", s, v)
+
+ #define R3(fmt, s, v) S3(fmt, s, rxq[qs].v)
+ #define R(s, v) R3("lu", s, v)
+
+ if (r < eth_entries) {
+ const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
+ const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
+ int n = min(QPL, adapter->sge.ethqsets - QPL * r);
+
+ S("QType:", "Ethernet");
+ S("Interface:",
+ (rxq[qs].rspq.netdev
+ ? rxq[qs].rspq.netdev->name
+ : "N/A"));
+ R3("u", "RspQNullInts:", rspq.unhandled_irqs);
+ R("RxPackets:", stats.pkts);
+ R("RxCSO:", stats.rx_cso);
+ R("VLANxtract:", stats.vlan_ex);
+ R("LROmerged:", stats.lro_merged);
+ R("LROpackets:", stats.lro_pkts);
+ R("RxDrops:", stats.rx_drops);
+ T("TSO:", tso);
+ T("TxCSO:", tx_cso);
+ T("VLANins:", vlan_ins);
+ T("TxQFull:", q.stops);
+ T("TxQRestarts:", q.restarts);
+ T("TxMapErr:", mapping_err);
+ R("FLAllocErr:", fl.alloc_failed);
+ R("FLLrgAlcErr:", fl.large_alloc_failed);
+ R("FLStarving:", fl.starving);
+ return 0;
+ }
+
+ r -= eth_entries;
+ if (r == 0) {
+ const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
+
+ seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
+ seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
+ evtq->unhandled_irqs);
+ seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
+ seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
+ } else if (r == 1) {
+ const struct sge_rspq *intrq = &adapter->sge.intrq;
+
+ seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
+ seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
+ intrq->unhandled_irqs);
+ seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
+ seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
+ }
+
+ #undef R
+ #undef T
+ #undef S
+ #undef R3
+ #undef T3
+ #undef S3
+
+ return 0;
+}
+
+/*
+ * Return the number of "entries" in our "file". We group the multi-Queue
+ * sections with QPL Queue Sets per "entry". The sections of the output are:
+ *
+ * Ethernet RX/TX Queue Sets
+ * Firmware Event Queue
+ * Forwarded Interrupt Queue (if in MSI mode)
+ */
+static int sge_qstats_entries(const struct adapter *adapter)
+{
+ return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
+ ((adapter->flags & USING_MSI) != 0);
+}
+
+static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
+{
+ int entries = sge_qstats_entries(seq->private);
+
+ return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static void sge_qstats_stop(struct seq_file *seq, void *v)
+{
+}
+
+static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ int entries = sge_qstats_entries(seq->private);
+
+ (*pos)++;
+ return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
+}
+
+static const struct seq_operations sge_qstats_seq_ops = {
+ .start = sge_qstats_start,
+ .next = sge_qstats_next,
+ .stop = sge_qstats_stop,
+ .show = sge_qstats_show
+};
+
+static int sge_qstats_open(struct inode *inode, struct file *file)
+{
+ int res = seq_open(file, &sge_qstats_seq_ops);
+
+ if (res == 0) {
+ struct seq_file *seq = file->private_data;
+ seq->private = inode->i_private;
+ }
+ return res;
+}
+
+static const struct file_operations sge_qstats_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = sge_qstats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
+ * Show PCI-E SR-IOV Virtual Function Resource Limits.
+ */
+static int resources_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adapter = seq->private;
+ struct vf_resources *vfres = &adapter->params.vfres;
+
+ #define S(desc, fmt, var) \
+ seq_printf(seq, "%-60s " fmt "\n", \
+ desc " (" #var "):", vfres->var)
+
+ S("Virtual Interfaces", "%d", nvi);
+ S("Egress Queues", "%d", neq);
+ S("Ethernet Control", "%d", nethctrl);
+ S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
+ S("Ingress Queues", "%d", niq);
+ S("Traffic Class", "%d", tc);
+ S("Port Access Rights Mask", "%#x", pmask);
+ S("MAC Address Filters", "%d", nexactf);
+ S("Firmware Command Read Capabilities", "%#x", r_caps);
+ S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
+
+ #undef S
+
+ return 0;
+}
+
+static int resources_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, resources_show, inode->i_private);
+}
+
+static const struct file_operations resources_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = resources_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*
+ * Show Virtual Interfaces.
+ */
+static int interfaces_show(struct seq_file *seq, void *v)
+{
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "Interface Port VIID\n");
+ } else {
+ struct adapter *adapter = seq->private;
+ int pidx = (uintptr_t)v - 2;
+ struct net_device *dev = adapter->port[pidx];
+ struct port_info *pi = netdev_priv(dev);
+
+ seq_printf(seq, "%9s %4d %#5x\n",
+ dev->name, pi->port_id, pi->viid);
+ }
+ return 0;
+}
+
+static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
+{
+ return pos <= adapter->params.nports
+ ? (void *)(uintptr_t)(pos + 1)
+ : NULL;
+}
+
+static void *interfaces_start(struct seq_file *seq, loff_t *pos)
+{
+ return *pos
+ ? interfaces_get_idx(seq->private, *pos)
+ : SEQ_START_TOKEN;
+}
+
+static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return interfaces_get_idx(seq->private, *pos);
+}
+
+static void interfaces_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations interfaces_seq_ops = {
+ .start = interfaces_start,
+ .next = interfaces_next,
+ .stop = interfaces_stop,
+ .show = interfaces_show
+};
+
+static int interfaces_open(struct inode *inode, struct file *file)
+{
+ int res = seq_open(file, &interfaces_seq_ops);
+
+ if (res == 0) {
+ struct seq_file *seq = file->private_data;
+ seq->private = inode->i_private;
+ }
+ return res;
+}
+
+static const struct file_operations interfaces_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = interfaces_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
+ * /sys/kernel/debugfs/cxgb4vf/ files list.
+ */
+struct cxgb4vf_debugfs_entry {
+ const char *name; /* name of debugfs node */
+ mode_t mode; /* file system mode */
+ const struct file_operations *fops;
+};
+
+static struct cxgb4vf_debugfs_entry debugfs_files[] = {
+ { "sge_qinfo", S_IRUGO, &sge_qinfo_debugfs_fops },
+ { "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
+ { "resources", S_IRUGO, &resources_proc_fops },
+ { "interfaces", S_IRUGO, &interfaces_proc_fops },
+};
+
+/*
+ * Module and device initialization and cleanup code.
+ * ==================================================
+ */
+
+/*
+ * Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the
+ * directory (debugfs_root) has already been set up.
+ */
+static int __devinit setup_debugfs(struct adapter *adapter)
+{
+ int i;
+
+ BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
+
+ /*
+ * Debugfs support is best effort.
+ */
+ for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+ (void)debugfs_create_file(debugfs_files[i].name,
+ debugfs_files[i].mode,
+ adapter->debugfs_root,
+ (void *)adapter,
+ debugfs_files[i].fops);
+
+ return 0;
+}
+
+/*
+ * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave
+ * it to our caller to tear down the directory (debugfs_root).
+ */
+static void cleanup_debugfs(struct adapter *adapter)
+{
+ BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
+
+ /*
+ * Unlike our sister routine cleanup_proc(), we don't need to remove
+ * individual entries because a call will be made to
+ * debugfs_remove_recursive(). We just need to clean up any ancillary
+ * persistent state.
+ */
+ /* nothing to do */
+}
+
+/*
+ * Perform early "adapter" initialization. This is where we discover what
+ * adapter parameters we're going to be using and initialize basic adapter
+ * hardware support.
+ */
+static int __devinit adap_init0(struct adapter *adapter)
+{
+ struct vf_resources *vfres = &adapter->params.vfres;
+ struct sge_params *sge_params = &adapter->params.sge;
+ struct sge *s = &adapter->sge;
+ unsigned int ethqsets;
+ int err;
+
+ /*
+ * Wait for the device to become ready before proceeding ...
+ */
+ err = t4vf_wait_dev_ready(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "device didn't become ready:"
+ " err=%d\n", err);
+ return err;
+ }
+
+ /*
+ * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
+ * 2.6.31 and later we can't call pci_reset_function() in order to
+ * issue an FLR because of a self- deadlock on the device semaphore.
+ * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
+ * cases where they're needed -- for instance, some versions of KVM
+ * fail to reset "Assigned Devices" when the VM reboots. Therefore we
+ * use the firmware based reset in order to reset any per function
+ * state.
+ */
+ err = t4vf_fw_reset(adapter);
+ if (err < 0) {
+ dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
+ return err;
+ }
+
+ /*
+ * Grab basic operational parameters. These will predominantly have
+ * been set up by the Physical Function Driver or will be hard coded
+ * into the adapter. We just have to live with them ... Note that
+ * we _must_ get our VPD parameters before our SGE parameters because
+ * we need to know the adapter's core clock from the VPD in order to
+ * properly decode the SGE Timer Values.
+ */
+ err = t4vf_get_dev_params(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " device parameters: err=%d\n", err);
+ return err;
+ }
+ err = t4vf_get_vpd_params(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " VPD parameters: err=%d\n", err);
+ return err;
+ }
+ err = t4vf_get_sge_params(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " SGE parameters: err=%d\n", err);
+ return err;
+ }
+ err = t4vf_get_rss_glb_config(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " RSS parameters: err=%d\n", err);
+ return err;
+ }
+ if (adapter->params.rss.mode !=
+ FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
+ dev_err(adapter->pdev_dev, "unable to operate with global RSS"
+ " mode %d\n", adapter->params.rss.mode);
+ return -EINVAL;
+ }
+ err = t4vf_sge_init(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
+ " err=%d\n", err);
+ return err;
+ }
+
+ /*
+ * Retrieve our RX interrupt holdoff timer values and counter
+ * threshold values from the SGE parameters.
+ */
+ s->timer_val[0] = core_ticks_to_us(adapter,
+ TIMERVALUE0_GET(sge_params->sge_timer_value_0_and_1));
+ s->timer_val[1] = core_ticks_to_us(adapter,
+ TIMERVALUE1_GET(sge_params->sge_timer_value_0_and_1));
+ s->timer_val[2] = core_ticks_to_us(adapter,
+ TIMERVALUE0_GET(sge_params->sge_timer_value_2_and_3));
+ s->timer_val[3] = core_ticks_to_us(adapter,
+ TIMERVALUE1_GET(sge_params->sge_timer_value_2_and_3));
+ s->timer_val[4] = core_ticks_to_us(adapter,
+ TIMERVALUE0_GET(sge_params->sge_timer_value_4_and_5));
+ s->timer_val[5] = core_ticks_to_us(adapter,
+ TIMERVALUE1_GET(sge_params->sge_timer_value_4_and_5));
+
+ s->counter_val[0] =
+ THRESHOLD_0_GET(sge_params->sge_ingress_rx_threshold);
+ s->counter_val[1] =
+ THRESHOLD_1_GET(sge_params->sge_ingress_rx_threshold);
+ s->counter_val[2] =
+ THRESHOLD_2_GET(sge_params->sge_ingress_rx_threshold);
+ s->counter_val[3] =
+ THRESHOLD_3_GET(sge_params->sge_ingress_rx_threshold);
+
+ /*
+ * Grab our Virtual Interface resource allocation, extract the
+ * features that we're interested in and do a bit of sanity testing on
+ * what we discover.
+ */
+ err = t4vf_get_vfres(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to get virtual interface"
+ " resources: err=%d\n", err);
+ return err;
+ }
+
+ /*
+ * The number of "ports" which we support is equal to the number of
+ * Virtual Interfaces with which we've been provisioned.
+ */
+ adapter->params.nports = vfres->nvi;
+ if (adapter->params.nports > MAX_NPORTS) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d allowed"
+ " virtual interfaces\n", MAX_NPORTS,
+ adapter->params.nports);
+ adapter->params.nports = MAX_NPORTS;
+ }
+
+ /*
+ * We need to reserve a number of the ingress queues with Free List
+ * and Interrupt capabilities for special interrupt purposes (like
+ * asynchronous firmware messages, or forwarded interrupts if we're
+ * using MSI). The rest of the FL/Intr-capable ingress queues will be
+ * matched up one-for-one with Ethernet/Control egress queues in order
+ * to form "Queue Sets" which will be aportioned between the "ports".
+ * For each Queue Set, we'll need the ability to allocate two Egress
+ * Contexts -- one for the Ingress Queue Free List and one for the TX
+ * Ethernet Queue.
+ */
+ ethqsets = vfres->niqflint - INGQ_EXTRAS;
+ if (vfres->nethctrl != ethqsets) {
+ dev_warn(adapter->pdev_dev, "unequal number of [available]"
+ " ingress/egress queues (%d/%d); using minimum for"
+ " number of Queue Sets\n", ethqsets, vfres->nethctrl);
+ ethqsets = min(vfres->nethctrl, ethqsets);
+ }
+ if (vfres->neq < ethqsets*2) {
+ dev_warn(adapter->pdev_dev, "Not enough Egress Contexts (%d)"
+ " to support Queue Sets (%d); reducing allowed Queue"
+ " Sets\n", vfres->neq, ethqsets);
+ ethqsets = vfres->neq/2;
+ }
+ if (ethqsets > MAX_ETH_QSETS) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d allowed Queue"
+ " Sets\n", MAX_ETH_QSETS, adapter->sge.max_ethqsets);
+ ethqsets = MAX_ETH_QSETS;
+ }
+ if (vfres->niq != 0 || vfres->neq > ethqsets*2) {
+ dev_warn(adapter->pdev_dev, "unused resources niq/neq (%d/%d)"
+ " ignored\n", vfres->niq, vfres->neq - ethqsets*2);
+ }
+ adapter->sge.max_ethqsets = ethqsets;
+
+ /*
+ * Check for various parameter sanity issues. Most checks simply
+ * result in us using fewer resources than our provissioning but we
+ * do need at least one "port" with which to work ...
+ */
+ if (adapter->sge.max_ethqsets < adapter->params.nports) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d available"
+ " virtual interfaces (too few Queue Sets)\n",
+ adapter->sge.max_ethqsets, adapter->params.nports);
+ adapter->params.nports = adapter->sge.max_ethqsets;
+ }
+ if (adapter->params.nports == 0) {
+ dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
+ "usable!\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
+ u8 pkt_cnt_idx, unsigned int size,
+ unsigned int iqe_size)
+{
+ rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
+ (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0));
+ rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
+ ? pkt_cnt_idx
+ : 0);
+ rspq->iqe_len = iqe_size;
+ rspq->size = size;
+}
+
+/*
+ * Perform default configuration of DMA queues depending on the number and
+ * type of ports we found and the number of available CPUs. Most settings can
+ * be modified by the admin via ethtool and cxgbtool prior to the adapter
+ * being brought up for the first time.
+ */
+static void __devinit cfg_queues(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ int q10g, n10g, qidx, pidx, qs;
+ size_t iqe_size;
+
+ /*
+ * We should not be called till we know how many Queue Sets we can
+ * support. In particular, this means that we need to know what kind
+ * of interrupts we'll be using ...
+ */
+ BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
+
+ /*
+ * Count the number of 10GbE Virtual Interfaces that we have.
+ */
+ n10g = 0;
+ for_each_port(adapter, pidx)
+ n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
+
+ /*
+ * We default to 1 queue per non-10G port and up to # of cores queues
+ * per 10G port.
+ */
+ if (n10g == 0)
+ q10g = 0;
+ else {
+ int n1g = (adapter->params.nports - n10g);
+ q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
+ if (q10g > num_online_cpus())
+ q10g = num_online_cpus();
+ }
+
+ /*
+ * Allocate the "Queue Sets" to the various Virtual Interfaces.
+ * The layout will be established in setup_sge_queues() when the
+ * adapter is brough up for the first time.
+ */
+ qidx = 0;
+ for_each_port(adapter, pidx) {
+ struct port_info *pi = adap2pinfo(adapter, pidx);
+
+ pi->first_qset = qidx;
+ pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
+ qidx += pi->nqsets;
+ }
+ s->ethqsets = qidx;
+
+ /*
+ * The Ingress Queue Entry Size for our various Response Queues needs
+ * to be big enough to accommodate the largest message we can receive
+ * from the chip/firmware; which is 64 bytes ...
+ */
+ iqe_size = 64;
+
+ /*
+ * Set up default Queue Set parameters ... Start off with the
+ * shortest interrupt holdoff timer.
+ */
+ for (qs = 0; qs < s->max_ethqsets; qs++) {
+ struct sge_eth_rxq *rxq = &s->ethrxq[qs];
+ struct sge_eth_txq *txq = &s->ethtxq[qs];
+
+ init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
+ rxq->fl.size = 72;
+ txq->q.size = 1024;
+ }
+
+ /*
+ * The firmware event queue is used for link state changes and
+ * notifications of TX DMA completions.
+ */
+ init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
+
+ /*
+ * The forwarded interrupt queue is used when we're in MSI interrupt
+ * mode. In this mode all interrupts associated with RX queues will
+ * be forwarded to a single queue which we'll associate with our MSI
+ * interrupt vector. The messages dropped in the forwarded interrupt
+ * queue will indicate which ingress queue needs servicing ... This
+ * queue needs to be large enough to accommodate all of the ingress
+ * queues which are forwarding their interrupt (+1 to prevent the PIDX
+ * from equalling the CIDX if every ingress queue has an outstanding
+ * interrupt). The queue doesn't need to be any larger because no
+ * ingress queue will ever have more than one outstanding interrupt at
+ * any time ...
+ */
+ init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
+ iqe_size);
+}
+
+/*
+ * Reduce the number of Ethernet queues across all ports to at most n.
+ * n provides at least one queue per port.
+ */
+static void __devinit reduce_ethqs(struct adapter *adapter, int n)
+{
+ int i;
+ struct port_info *pi;
+
+ /*
+ * While we have too many active Ether Queue Sets, interate across the
+ * "ports" and reduce their individual Queue Set allocations.
+ */
+ BUG_ON(n < adapter->params.nports);
+ while (n < adapter->sge.ethqsets)
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ if (pi->nqsets > 1) {
+ pi->nqsets--;
+ adapter->sge.ethqsets--;
+ if (adapter->sge.ethqsets <= n)
+ break;
+ }
+ }
+
+ /*
+ * Reassign the starting Queue Sets for each of the "ports" ...
+ */
+ n = 0;
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ pi->first_qset = n;
+ n += pi->nqsets;
+ }
+}
+
+/*
+ * We need to grab enough MSI-X vectors to cover our interrupt needs. Ideally
+ * we get a separate MSI-X vector for every "Queue Set" plus any extras we
+ * need. Minimally we need one for every Virtual Interface plus those needed
+ * for our "extras". Note that this process may lower the maximum number of
+ * allowed Queue Sets ...
+ */
+static int __devinit enable_msix(struct adapter *adapter)
+{
+ int i, err, want, need;
+ struct msix_entry entries[MSIX_ENTRIES];
+ struct sge *s = &adapter->sge;
+
+ for (i = 0; i < MSIX_ENTRIES; ++i)
+ entries[i].entry = i;
+
+ /*
+ * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
+ * plus those needed for our "extras" (for example, the firmware
+ * message queue). We _need_ at least one "Queue Set" per Virtual
+ * Interface plus those needed for our "extras". So now we get to see
+ * if the song is right ...
+ */
+ want = s->max_ethqsets + MSIX_EXTRAS;
+ need = adapter->params.nports + MSIX_EXTRAS;
+ while ((err = pci_enable_msix(adapter->pdev, entries, want)) >= need)
+ want = err;
+
+ if (err == 0) {
+ int nqsets = want - MSIX_EXTRAS;
+ if (nqsets < s->max_ethqsets) {
+ dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
+ " for %d Queue Sets\n", nqsets);
+ s->max_ethqsets = nqsets;
+ if (nqsets < s->ethqsets)
+ reduce_ethqs(adapter, nqsets);
+ }
+ for (i = 0; i < want; ++i)
+ adapter->msix_info[i].vec = entries[i].vector;
+ } else if (err > 0) {
+ pci_disable_msix(adapter->pdev);
+ dev_info(adapter->pdev_dev, "only %d MSI-X vectors left,"
+ " not using MSI-X\n", err);
+ }
+ return err;
+}
+
+static const struct net_device_ops cxgb4vf_netdev_ops = {
+ .ndo_open = cxgb4vf_open,
+ .ndo_stop = cxgb4vf_stop,
+ .ndo_start_xmit = t4vf_eth_xmit,
+ .ndo_get_stats = cxgb4vf_get_stats,
+ .ndo_set_rx_mode = cxgb4vf_set_rxmode,
+ .ndo_set_mac_address = cxgb4vf_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = cxgb4vf_do_ioctl,
+ .ndo_change_mtu = cxgb4vf_change_mtu,
+ .ndo_fix_features = cxgb4vf_fix_features,
+ .ndo_set_features = cxgb4vf_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cxgb4vf_poll_controller,
+#endif
+};
+
+/*
+ * "Probe" a device: initialize a device and construct all kernel and driver
+ * state needed to manage the device. This routine is called "init_one" in
+ * the PF Driver ...
+ */
+static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int version_printed;
+
+ int pci_using_dac;
+ int err, pidx;
+ unsigned int pmask;
+ struct adapter *adapter;
+ struct port_info *pi;
+ struct net_device *netdev;
+
+ /*
+ * Print our driver banner the first time we're called to initialize a
+ * device.
+ */
+ if (version_printed == 0) {
+ printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
+ version_printed = 1;
+ }
+
+ /*
+ * Initialize generic PCI device state.
+ */
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "cannot enable PCI device\n");
+ return err;
+ }
+
+ /*
+ * Reserve PCI resources for the device. If we can't get them some
+ * other driver may have already claimed the device ...
+ */
+ err = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (err) {
+ dev_err(&pdev->dev, "cannot obtain PCI resources\n");
+ goto err_disable_device;
+ }
+
+ /*
+ * Set up our DMA mask: try for 64-bit address masking first and
+ * fall back to 32-bit if we can't get 64 bits ...
+ */
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err == 0) {
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
+ " coherent allocations\n");
+ goto err_release_regions;
+ }
+ pci_using_dac = 1;
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err != 0) {
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
+ goto err_release_regions;
+ }
+ pci_using_dac = 0;
+ }
+
+ /*
+ * Enable bus mastering for the device ...
+ */
+ pci_set_master(pdev);
+
+ /*
+ * Allocate our adapter data structure and attach it to the device.
+ */
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter) {
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+ pci_set_drvdata(pdev, adapter);
+ adapter->pdev = pdev;
+ adapter->pdev_dev = &pdev->dev;
+
+ /*
+ * Initialize SMP data synchronization resources.
+ */
+ spin_lock_init(&adapter->stats_lock);
+
+ /*
+ * Map our I/O registers in BAR0.
+ */
+ adapter->regs = pci_ioremap_bar(pdev, 0);
+ if (!adapter->regs) {
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ err = -ENOMEM;
+ goto err_free_adapter;
+ }
+
+ /*
+ * Initialize adapter level features.
+ */
+ adapter->name = pci_name(pdev);
+ adapter->msg_enable = dflt_msg_enable;
+ err = adap_init0(adapter);
+ if (err)
+ goto err_unmap_bar;
+
+ /*
+ * Allocate our "adapter ports" and stitch everything together.
+ */
+ pmask = adapter->params.vfres.pmask;
+ for_each_port(adapter, pidx) {
+ int port_id, viid;
+
+ /*
+ * We simplistically allocate our virtual interfaces
+ * sequentially across the port numbers to which we have
+ * access rights. This should be configurable in some manner
+ * ...
+ */
+ if (pmask == 0)
+ break;
+ port_id = ffs(pmask) - 1;
+ pmask &= ~(1 << port_id);
+ viid = t4vf_alloc_vi(adapter, port_id);
+ if (viid < 0) {
+ dev_err(&pdev->dev, "cannot allocate VI for port %d:"
+ " err=%d\n", port_id, viid);
+ err = viid;
+ goto err_free_dev;
+ }
+
+ /*
+ * Allocate our network device and stitch things together.
+ */
+ netdev = alloc_etherdev_mq(sizeof(struct port_info),
+ MAX_PORT_QSETS);
+ if (netdev == NULL) {
+ dev_err(&pdev->dev, "cannot allocate netdev for"
+ " port %d\n", port_id);
+ t4vf_free_vi(adapter, viid);
+ err = -ENOMEM;
+ goto err_free_dev;
+ }
+ adapter->port[pidx] = netdev;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ pi = netdev_priv(netdev);
+ pi->adapter = adapter;
+ pi->pidx = pidx;
+ pi->port_id = port_id;
+ pi->viid = viid;
+
+ /*
+ * Initialize the starting state of our "port" and register
+ * it.
+ */
+ pi->xact_addr_filt = -1;
+ netif_carrier_off(netdev);
+ netdev->irq = pdev->irq;
+
+ netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM;
+ netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_HIGHDMA;
+ netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_TX;
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ netdev->netdev_ops = &cxgb4vf_netdev_ops;
+ SET_ETHTOOL_OPS(netdev, &cxgb4vf_ethtool_ops);
+
+ /*
+ * Initialize the hardware/software state for the port.
+ */
+ err = t4vf_port_init(adapter, pidx);
+ if (err) {
+ dev_err(&pdev->dev, "cannot initialize port %d\n",
+ pidx);
+ goto err_free_dev;
+ }
+ }
+
+ /*
+ * The "card" is now ready to go. If any errors occur during device
+ * registration we do not fail the whole "card" but rather proceed
+ * only with the ports we manage to register successfully. However we
+ * must register at least one net device.
+ */
+ for_each_port(adapter, pidx) {
+ netdev = adapter->port[pidx];
+ if (netdev == NULL)
+ continue;
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_warn(&pdev->dev, "cannot register net device %s,"
+ " skipping\n", netdev->name);
+ continue;
+ }
+
+ set_bit(pidx, &adapter->registered_device_map);
+ }
+ if (adapter->registered_device_map == 0) {
+ dev_err(&pdev->dev, "could not register any net devices\n");
+ goto err_free_dev;
+ }
+
+ /*
+ * Set up our debugfs entries.
+ */
+ if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
+ adapter->debugfs_root =
+ debugfs_create_dir(pci_name(pdev),
+ cxgb4vf_debugfs_root);
+ if (IS_ERR_OR_NULL(adapter->debugfs_root))
+ dev_warn(&pdev->dev, "could not create debugfs"
+ " directory");
+ else
+ setup_debugfs(adapter);
+ }
+
+ /*
+ * See what interrupts we'll be using. If we've been configured to
+ * use MSI-X interrupts, try to enable them but fall back to using
+ * MSI interrupts if we can't enable MSI-X interrupts. If we can't
+ * get MSI interrupts we bail with the error.
+ */
+ if (msi == MSI_MSIX && enable_msix(adapter) == 0)
+ adapter->flags |= USING_MSIX;
+ else {
+ err = pci_enable_msi(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to allocate %s interrupts;"
+ " err=%d\n",
+ msi == MSI_MSIX ? "MSI-X or MSI" : "MSI", err);
+ goto err_free_debugfs;
+ }
+ adapter->flags |= USING_MSI;
+ }
+
+ /*
+ * Now that we know how many "ports" we have and what their types are,
+ * and how many Queue Sets we can support, we can configure our queue
+ * resources.
+ */
+ cfg_queues(adapter);
+
+ /*
+ * Print a short notice on the existence and configuration of the new
+ * VF network device ...
+ */
+ for_each_port(adapter, pidx) {
+ dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
+ adapter->port[pidx]->name,
+ (adapter->flags & USING_MSIX) ? "MSI-X" :
+ (adapter->flags & USING_MSI) ? "MSI" : "");
+ }
+
+ /*
+ * Return success!
+ */
+ return 0;
+
+ /*
+ * Error recovery and exit code. Unwind state that's been created
+ * so far and return the error.
+ */
+
+err_free_debugfs:
+ if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
+ cleanup_debugfs(adapter);
+ debugfs_remove_recursive(adapter->debugfs_root);
+ }
+
+err_free_dev:
+ for_each_port(adapter, pidx) {
+ netdev = adapter->port[pidx];
+ if (netdev == NULL)
+ continue;
+ pi = netdev_priv(netdev);
+ t4vf_free_vi(adapter, pi->viid);
+ if (test_bit(pidx, &adapter->registered_device_map))
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+ }
+
+err_unmap_bar:
+ iounmap(adapter->regs);
+
+err_free_adapter:
+ kfree(adapter);
+ pci_set_drvdata(pdev, NULL);
+
+err_release_regions:
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ pci_clear_master(pdev);
+
+err_disable_device:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+/*
+ * "Remove" a device: tear down all kernel and driver state created in the
+ * "probe" routine and quiesce the device (disable interrupts, etc.). (Note
+ * that this is called "remove_one" in the PF Driver.)
+ */
+static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+
+ /*
+ * Tear down driver state associated with device.
+ */
+ if (adapter) {
+ int pidx;
+
+ /*
+ * Stop all of our activity. Unregister network port,
+ * disable interrupts, etc.
+ */
+ for_each_port(adapter, pidx)
+ if (test_bit(pidx, &adapter->registered_device_map))
+ unregister_netdev(adapter->port[pidx]);
+ t4vf_sge_stop(adapter);
+ if (adapter->flags & USING_MSIX) {
+ pci_disable_msix(adapter->pdev);
+ adapter->flags &= ~USING_MSIX;
+ } else if (adapter->flags & USING_MSI) {
+ pci_disable_msi(adapter->pdev);
+ adapter->flags &= ~USING_MSI;
+ }
+
+ /*
+ * Tear down our debugfs entries.
+ */
+ if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
+ cleanup_debugfs(adapter);
+ debugfs_remove_recursive(adapter->debugfs_root);
+ }
+
+ /*
+ * Free all of the various resources which we've acquired ...
+ */
+ t4vf_free_sge_resources(adapter);
+ for_each_port(adapter, pidx) {
+ struct net_device *netdev = adapter->port[pidx];
+ struct port_info *pi;
+
+ if (netdev == NULL)
+ continue;
+
+ pi = netdev_priv(netdev);
+ t4vf_free_vi(adapter, pi->viid);
+ free_netdev(netdev);
+ }
+ iounmap(adapter->regs);
+ kfree(adapter);
+ pci_set_drvdata(pdev, NULL);
+ }
+
+ /*
+ * Disable the device and release its PCI resources.
+ */
+ pci_disable_device(pdev);
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+}
+
+/*
+ * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
+ * delivery.
+ */
+static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev)
+{
+ struct adapter *adapter;
+ int pidx;
+
+ adapter = pci_get_drvdata(pdev);
+ if (!adapter)
+ return;
+
+ /*
+ * Disable all Virtual Interfaces. This will shut down the
+ * delivery of all ingress packets into the chip for these
+ * Virtual Interfaces.
+ */
+ for_each_port(adapter, pidx) {
+ struct net_device *netdev;
+ struct port_info *pi;
+
+ if (!test_bit(pidx, &adapter->registered_device_map))
+ continue;
+
+ netdev = adapter->port[pidx];
+ if (!netdev)
+ continue;
+
+ pi = netdev_priv(netdev);
+ t4vf_enable_vi(adapter, pi->viid, false, false);
+ }
+
+ /*
+ * Free up all Queues which will prevent further DMA and
+ * Interrupts allowing various internal pathways to drain.
+ */
+ t4vf_free_sge_resources(adapter);
+}
+
+/*
+ * PCI Device registration data structures.
+ */
+#define CH_DEVICE(devid, idx) \
+ { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
+
+static struct pci_device_id cxgb4vf_pci_tbl[] = {
+ CH_DEVICE(0xb000, 0), /* PE10K FPGA */
+ CH_DEVICE(0x4800, 0), /* T440-dbg */
+ CH_DEVICE(0x4801, 0), /* T420-cr */
+ CH_DEVICE(0x4802, 0), /* T422-cr */
+ CH_DEVICE(0x4803, 0), /* T440-cr */
+ CH_DEVICE(0x4804, 0), /* T420-bch */
+ CH_DEVICE(0x4805, 0), /* T440-bch */
+ CH_DEVICE(0x4806, 0), /* T460-ch */
+ CH_DEVICE(0x4807, 0), /* T420-so */
+ CH_DEVICE(0x4808, 0), /* T420-cx */
+ CH_DEVICE(0x4809, 0), /* T420-bt */
+ CH_DEVICE(0x480a, 0), /* T404-bt */
+ { 0, }
+};
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
+
+static struct pci_driver cxgb4vf_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = cxgb4vf_pci_tbl,
+ .probe = cxgb4vf_pci_probe,
+ .remove = __devexit_p(cxgb4vf_pci_remove),
+ .shutdown = __devexit_p(cxgb4vf_pci_shutdown),
+};
+
+/*
+ * Initialize global driver state.
+ */
+static int __init cxgb4vf_module_init(void)
+{
+ int ret;
+
+ /*
+ * Vet our module parameters.
+ */
+ if (msi != MSI_MSIX && msi != MSI_MSI) {
+ printk(KERN_WARNING KBUILD_MODNAME
+ ": bad module parameter msi=%d; must be %d"
+ " (MSI-X or MSI) or %d (MSI)\n",
+ msi, MSI_MSIX, MSI_MSI);
+ return -EINVAL;
+ }
+
+ /* Debugfs support is optional, just warn if this fails */
+ cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
+ printk(KERN_WARNING KBUILD_MODNAME ": could not create"
+ " debugfs entry, continuing\n");
+
+ ret = pci_register_driver(&cxgb4vf_driver);
+ if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
+ debugfs_remove(cxgb4vf_debugfs_root);
+ return ret;
+}
+
+/*
+ * Tear down global driver state.
+ */
+static void __exit cxgb4vf_module_exit(void)
+{
+ pci_unregister_driver(&cxgb4vf_driver);
+ debugfs_remove(cxgb4vf_debugfs_root);
+}
+
+module_init(cxgb4vf_module_init);
+module_exit(cxgb4vf_module_exit);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
new file mode 100644
index 000000000000..cffb328c46c3
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -0,0 +1,2465 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <linux/dma-mapping.h>
+#include <linux/prefetch.h>
+
+#include "t4vf_common.h"
+#include "t4vf_defs.h"
+
+#include "../cxgb4/t4_regs.h"
+#include "../cxgb4/t4fw_api.h"
+#include "../cxgb4/t4_msg.h"
+
+/*
+ * Decoded Adapter Parameters.
+ */
+static u32 FL_PG_ORDER; /* large page allocation size */
+static u32 STAT_LEN; /* length of status page at ring end */
+static u32 PKTSHIFT; /* padding between CPL and packet data */
+static u32 FL_ALIGN; /* response queue message alignment */
+
+/*
+ * Constants ...
+ */
+enum {
+ /*
+ * Egress Queue sizes, producer and consumer indices are all in units
+ * of Egress Context Units bytes. Note that as far as the hardware is
+ * concerned, the free list is an Egress Queue (the host produces free
+ * buffers which the hardware consumes) and free list entries are
+ * 64-bit PCI DMA addresses.
+ */
+ EQ_UNIT = SGE_EQ_IDXSIZE,
+ FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+ TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+
+ /*
+ * Max number of TX descriptors we clean up at a time. Should be
+ * modest as freeing skbs isn't cheap and it happens while holding
+ * locks. We just need to free packets faster than they arrive, we
+ * eventually catch up and keep the amortized cost reasonable.
+ */
+ MAX_TX_RECLAIM = 16,
+
+ /*
+ * Max number of Rx buffers we replenish at a time. Again keep this
+ * modest, allocating buffers isn't cheap either.
+ */
+ MAX_RX_REFILL = 16,
+
+ /*
+ * Period of the Rx queue check timer. This timer is infrequent as it
+ * has something to do only when the system experiences severe memory
+ * shortage.
+ */
+ RX_QCHECK_PERIOD = (HZ / 2),
+
+ /*
+ * Period of the TX queue check timer and the maximum number of TX
+ * descriptors to be reclaimed by the TX timer.
+ */
+ TX_QCHECK_PERIOD = (HZ / 2),
+ MAX_TIMER_TX_RECLAIM = 100,
+
+ /*
+ * An FL with <= FL_STARVE_THRES buffers is starving and a periodic
+ * timer will attempt to refill it.
+ */
+ FL_STARVE_THRES = 4,
+
+ /*
+ * Suspend an Ethernet TX queue with fewer available descriptors than
+ * this. We always want to have room for a maximum sized packet:
+ * inline immediate data + MAX_SKB_FRAGS. This is the same as
+ * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS
+ * (see that function and its helpers for a description of the
+ * calculation).
+ */
+ ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1,
+ ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 +
+ ((ETHTXQ_MAX_FRAGS-1) & 1) +
+ 2),
+ ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
+ ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR,
+
+ ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT),
+
+ /*
+ * Max TX descriptor space we allow for an Ethernet packet to be
+ * inlined into a WR. This is limited by the maximum value which
+ * we can specify for immediate data in the firmware Ethernet TX
+ * Work Request.
+ */
+ MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_MASK,
+
+ /*
+ * Max size of a WR sent through a control TX queue.
+ */
+ MAX_CTRL_WR_LEN = 256,
+
+ /*
+ * Maximum amount of data which we'll ever need to inline into a
+ * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN).
+ */
+ MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN
+ ? MAX_IMM_TX_PKT_LEN
+ : MAX_CTRL_WR_LEN),
+
+ /*
+ * For incoming packets less than RX_COPY_THRES, we copy the data into
+ * an skb rather than referencing the data. We allocate enough
+ * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes
+ * of the data (header).
+ */
+ RX_COPY_THRES = 256,
+ RX_PULL_LEN = 128,
+
+ /*
+ * Main body length for sk_buffs used for RX Ethernet packets with
+ * fragments. Should be >= RX_PULL_LEN but possibly bigger to give
+ * pskb_may_pull() some room.
+ */
+ RX_SKB_LEN = 512,
+};
+
+/*
+ * Software state per TX descriptor.
+ */
+struct tx_sw_desc {
+ struct sk_buff *skb; /* socket buffer of TX data source */
+ struct ulptx_sgl *sgl; /* scatter/gather list in TX Queue */
+};
+
+/*
+ * Software state per RX Free List descriptor. We keep track of the allocated
+ * FL page, its size, and its PCI DMA address (if the page is mapped). The FL
+ * page size and its PCI DMA mapped state are stored in the low bits of the
+ * PCI DMA address as per below.
+ */
+struct rx_sw_desc {
+ struct page *page; /* Free List page buffer */
+ dma_addr_t dma_addr; /* PCI DMA address (if mapped) */
+ /* and flags (see below) */
+};
+
+/*
+ * The low bits of rx_sw_desc.dma_addr have special meaning. Note that the
+ * SGE also uses the low 4 bits to determine the size of the buffer. It uses
+ * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array.
+ * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4
+ * bits can only contain a 0 or a 1 to indicate which size buffer we're giving
+ * to the SGE. Thus, our software state of "is the buffer mapped for DMA" is
+ * maintained in an inverse sense so the hardware never sees that bit high.
+ */
+enum {
+ RX_LARGE_BUF = 1 << 0, /* buffer is SGE_FL_BUFFER_SIZE[1] */
+ RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
+};
+
+/**
+ * get_buf_addr - return DMA buffer address of software descriptor
+ * @sdesc: pointer to the software buffer descriptor
+ *
+ * Return the DMA buffer address of a software descriptor (stripping out
+ * our low-order flag bits).
+ */
+static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc)
+{
+ return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
+}
+
+/**
+ * is_buf_mapped - is buffer mapped for DMA?
+ * @sdesc: pointer to the software buffer descriptor
+ *
+ * Determine whether the buffer associated with a software descriptor in
+ * mapped for DMA or not.
+ */
+static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc)
+{
+ return !(sdesc->dma_addr & RX_UNMAPPED_BUF);
+}
+
+/**
+ * need_skb_unmap - does the platform need unmapping of sk_buffs?
+ *
+ * Returns true if the platform needs sk_buff unmapping. The compiler
+ * optimizes away unnecessary code if this returns true.
+ */
+static inline int need_skb_unmap(void)
+{
+#ifdef CONFIG_NEED_DMA_MAP_STATE
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+/**
+ * txq_avail - return the number of available slots in a TX queue
+ * @tq: the TX queue
+ *
+ * Returns the number of available descriptors in a TX queue.
+ */
+static inline unsigned int txq_avail(const struct sge_txq *tq)
+{
+ return tq->size - 1 - tq->in_use;
+}
+
+/**
+ * fl_cap - return the capacity of a Free List
+ * @fl: the Free List
+ *
+ * Returns the capacity of a Free List. The capacity is less than the
+ * size because an Egress Queue Index Unit worth of descriptors needs to
+ * be left unpopulated, otherwise the Producer and Consumer indices PIDX
+ * and CIDX will match and the hardware will think the FL is empty.
+ */
+static inline unsigned int fl_cap(const struct sge_fl *fl)
+{
+ return fl->size - FL_PER_EQ_UNIT;
+}
+
+/**
+ * fl_starving - return whether a Free List is starving.
+ * @fl: the Free List
+ *
+ * Tests specified Free List to see whether the number of buffers
+ * available to the hardware has falled below our "starvation"
+ * threshold.
+ */
+static inline bool fl_starving(const struct sge_fl *fl)
+{
+ return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
+}
+
+/**
+ * map_skb - map an skb for DMA to the device
+ * @dev: the egress net device
+ * @skb: the packet to map
+ * @addr: a pointer to the base of the DMA mapping array
+ *
+ * Map an skb for DMA to the device and return an array of DMA addresses.
+ */
+static int map_skb(struct device *dev, const struct sk_buff *skb,
+ dma_addr_t *addr)
+{
+ const skb_frag_t *fp, *end;
+ const struct skb_shared_info *si;
+
+ *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, *addr))
+ goto out_err;
+
+ si = skb_shinfo(skb);
+ end = &si->frags[si->nr_frags];
+ for (fp = si->frags; fp < end; fp++) {
+ *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, *addr))
+ goto unwind;
+ }
+ return 0;
+
+unwind:
+ while (fp-- > si->frags)
+ dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
+ dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
+
+out_err:
+ return -ENOMEM;
+}
+
+static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
+ const struct ulptx_sgl *sgl, const struct sge_txq *tq)
+{
+ const struct ulptx_sge_pair *p;
+ unsigned int nfrags = skb_shinfo(skb)->nr_frags;
+
+ if (likely(skb_headlen(skb)))
+ dma_unmap_single(dev, be64_to_cpu(sgl->addr0),
+ be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
+ else {
+ dma_unmap_page(dev, be64_to_cpu(sgl->addr0),
+ be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
+ nfrags--;
+ }
+
+ /*
+ * the complexity below is because of the possibility of a wrap-around
+ * in the middle of an SGL
+ */
+ for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
+ if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
+unmap:
+ dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
+ be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
+ dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
+ be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
+ p++;
+ } else if ((u8 *)p == (u8 *)tq->stat) {
+ p = (const struct ulptx_sge_pair *)tq->desc;
+ goto unmap;
+ } else if ((u8 *)p + 8 == (u8 *)tq->stat) {
+ const __be64 *addr = (const __be64 *)tq->desc;
+
+ dma_unmap_page(dev, be64_to_cpu(addr[0]),
+ be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
+ dma_unmap_page(dev, be64_to_cpu(addr[1]),
+ be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
+ p = (const struct ulptx_sge_pair *)&addr[2];
+ } else {
+ const __be64 *addr = (const __be64 *)tq->desc;
+
+ dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
+ be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
+ dma_unmap_page(dev, be64_to_cpu(addr[0]),
+ be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
+ p = (const struct ulptx_sge_pair *)&addr[1];
+ }
+ }
+ if (nfrags) {
+ __be64 addr;
+
+ if ((u8 *)p == (u8 *)tq->stat)
+ p = (const struct ulptx_sge_pair *)tq->desc;
+ addr = ((u8 *)p + 16 <= (u8 *)tq->stat
+ ? p->addr[0]
+ : *(const __be64 *)tq->desc);
+ dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]),
+ DMA_TO_DEVICE);
+ }
+}
+
+/**
+ * free_tx_desc - reclaims TX descriptors and their buffers
+ * @adapter: the adapter
+ * @tq: the TX queue to reclaim descriptors from
+ * @n: the number of descriptors to reclaim
+ * @unmap: whether the buffers should be unmapped for DMA
+ *
+ * Reclaims TX descriptors from an SGE TX queue and frees the associated
+ * TX buffers. Called with the TX queue lock held.
+ */
+static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
+ unsigned int n, bool unmap)
+{
+ struct tx_sw_desc *sdesc;
+ unsigned int cidx = tq->cidx;
+ struct device *dev = adapter->pdev_dev;
+
+ const int need_unmap = need_skb_unmap() && unmap;
+
+ sdesc = &tq->sdesc[cidx];
+ while (n--) {
+ /*
+ * If we kept a reference to the original TX skb, we need to
+ * unmap it from PCI DMA space (if required) and free it.
+ */
+ if (sdesc->skb) {
+ if (need_unmap)
+ unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
+ kfree_skb(sdesc->skb);
+ sdesc->skb = NULL;
+ }
+
+ sdesc++;
+ if (++cidx == tq->size) {
+ cidx = 0;
+ sdesc = tq->sdesc;
+ }
+ }
+ tq->cidx = cidx;
+}
+
+/*
+ * Return the number of reclaimable descriptors in a TX queue.
+ */
+static inline int reclaimable(const struct sge_txq *tq)
+{
+ int hw_cidx = be16_to_cpu(tq->stat->cidx);
+ int reclaimable = hw_cidx - tq->cidx;
+ if (reclaimable < 0)
+ reclaimable += tq->size;
+ return reclaimable;
+}
+
+/**
+ * reclaim_completed_tx - reclaims completed TX descriptors
+ * @adapter: the adapter
+ * @tq: the TX queue to reclaim completed descriptors from
+ * @unmap: whether the buffers should be unmapped for DMA
+ *
+ * Reclaims TX descriptors that the SGE has indicated it has processed,
+ * and frees the associated buffers if possible. Called with the TX
+ * queue locked.
+ */
+static inline void reclaim_completed_tx(struct adapter *adapter,
+ struct sge_txq *tq,
+ bool unmap)
+{
+ int avail = reclaimable(tq);
+
+ if (avail) {
+ /*
+ * Limit the amount of clean up work we do at a time to keep
+ * the TX lock hold time O(1).
+ */
+ if (avail > MAX_TX_RECLAIM)
+ avail = MAX_TX_RECLAIM;
+
+ free_tx_desc(adapter, tq, avail, unmap);
+ tq->in_use -= avail;
+ }
+}
+
+/**
+ * get_buf_size - return the size of an RX Free List buffer.
+ * @sdesc: pointer to the software buffer descriptor
+ */
+static inline int get_buf_size(const struct rx_sw_desc *sdesc)
+{
+ return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
+ ? (PAGE_SIZE << FL_PG_ORDER)
+ : PAGE_SIZE;
+}
+
+/**
+ * free_rx_bufs - free RX buffers on an SGE Free List
+ * @adapter: the adapter
+ * @fl: the SGE Free List to free buffers from
+ * @n: how many buffers to free
+ *
+ * Release the next @n buffers on an SGE Free List RX queue. The
+ * buffers must be made inaccessible to hardware before calling this
+ * function.
+ */
+static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
+{
+ while (n--) {
+ struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
+
+ if (is_buf_mapped(sdesc))
+ dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
+ get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
+ put_page(sdesc->page);
+ sdesc->page = NULL;
+ if (++fl->cidx == fl->size)
+ fl->cidx = 0;
+ fl->avail--;
+ }
+}
+
+/**
+ * unmap_rx_buf - unmap the current RX buffer on an SGE Free List
+ * @adapter: the adapter
+ * @fl: the SGE Free List
+ *
+ * Unmap the current buffer on an SGE Free List RX queue. The
+ * buffer must be made inaccessible to HW before calling this function.
+ *
+ * This is similar to @free_rx_bufs above but does not free the buffer.
+ * Do note that the FL still loses any further access to the buffer.
+ * This is used predominantly to "transfer ownership" of an FL buffer
+ * to another entity (typically an skb's fragment list).
+ */
+static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
+{
+ struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
+
+ if (is_buf_mapped(sdesc))
+ dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
+ get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
+ sdesc->page = NULL;
+ if (++fl->cidx == fl->size)
+ fl->cidx = 0;
+ fl->avail--;
+}
+
+/**
+ * ring_fl_db - righ doorbell on free list
+ * @adapter: the adapter
+ * @fl: the Free List whose doorbell should be rung ...
+ *
+ * Tell the Scatter Gather Engine that there are new free list entries
+ * available.
+ */
+static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
+{
+ /*
+ * The SGE keeps track of its Producer and Consumer Indices in terms
+ * of Egress Queue Units so we can only tell it about integral numbers
+ * of multiples of Free List Entries per Egress Queue Units ...
+ */
+ if (fl->pend_cred >= FL_PER_EQ_UNIT) {
+ wmb();
+ t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
+ DBPRIO |
+ QID(fl->cntxt_id) |
+ PIDX(fl->pend_cred / FL_PER_EQ_UNIT));
+ fl->pend_cred %= FL_PER_EQ_UNIT;
+ }
+}
+
+/**
+ * set_rx_sw_desc - initialize software RX buffer descriptor
+ * @sdesc: pointer to the softwore RX buffer descriptor
+ * @page: pointer to the page data structure backing the RX buffer
+ * @dma_addr: PCI DMA address (possibly with low-bit flags)
+ */
+static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page,
+ dma_addr_t dma_addr)
+{
+ sdesc->page = page;
+ sdesc->dma_addr = dma_addr;
+}
+
+/*
+ * Support for poisoning RX buffers ...
+ */
+#define POISON_BUF_VAL -1
+
+static inline void poison_buf(struct page *page, size_t sz)
+{
+#if POISON_BUF_VAL >= 0
+ memset(page_address(page), POISON_BUF_VAL, sz);
+#endif
+}
+
+/**
+ * refill_fl - refill an SGE RX buffer ring
+ * @adapter: the adapter
+ * @fl: the Free List ring to refill
+ * @n: the number of new buffers to allocate
+ * @gfp: the gfp flags for the allocations
+ *
+ * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
+ * allocated with the supplied gfp flags. The caller must assure that
+ * @n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN
+ * EGRESS QUEUE UNITS_ indicates an empty Free List! Returns the number
+ * of buffers allocated. If afterwards the queue is found critically low,
+ * mark it as starving in the bitmap of starving FLs.
+ */
+static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
+ int n, gfp_t gfp)
+{
+ struct page *page;
+ dma_addr_t dma_addr;
+ unsigned int cred = fl->avail;
+ __be64 *d = &fl->desc[fl->pidx];
+ struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx];
+
+ /*
+ * Sanity: ensure that the result of adding n Free List buffers
+ * won't result in wrapping the SGE's Producer Index around to
+ * it's Consumer Index thereby indicating an empty Free List ...
+ */
+ BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
+
+ /*
+ * If we support large pages, prefer large buffers and fail over to
+ * small pages if we can't allocate large pages to satisfy the refill.
+ * If we don't support large pages, drop directly into the small page
+ * allocation code.
+ */
+ if (FL_PG_ORDER == 0)
+ goto alloc_small_pages;
+
+ while (n) {
+ page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
+ FL_PG_ORDER);
+ if (unlikely(!page)) {
+ /*
+ * We've failed inour attempt to allocate a "large
+ * page". Fail over to the "small page" allocation
+ * below.
+ */
+ fl->large_alloc_failed++;
+ break;
+ }
+ poison_buf(page, PAGE_SIZE << FL_PG_ORDER);
+
+ dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
+ PAGE_SIZE << FL_PG_ORDER,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
+ /*
+ * We've run out of DMA mapping space. Free up the
+ * buffer and return with what we've managed to put
+ * into the free list. We don't want to fail over to
+ * the small page allocation below in this case
+ * because DMA mapping resources are typically
+ * critical resources once they become scarse.
+ */
+ __free_pages(page, FL_PG_ORDER);
+ goto out;
+ }
+ dma_addr |= RX_LARGE_BUF;
+ *d++ = cpu_to_be64(dma_addr);
+
+ set_rx_sw_desc(sdesc, page, dma_addr);
+ sdesc++;
+
+ fl->avail++;
+ if (++fl->pidx == fl->size) {
+ fl->pidx = 0;
+ sdesc = fl->sdesc;
+ d = fl->desc;
+ }
+ n--;
+ }
+
+alloc_small_pages:
+ while (n--) {
+ page = __netdev_alloc_page(adapter->port[0],
+ gfp | __GFP_NOWARN);
+ if (unlikely(!page)) {
+ fl->alloc_failed++;
+ break;
+ }
+ poison_buf(page, PAGE_SIZE);
+
+ dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
+ netdev_free_page(adapter->port[0], page);
+ break;
+ }
+ *d++ = cpu_to_be64(dma_addr);
+
+ set_rx_sw_desc(sdesc, page, dma_addr);
+ sdesc++;
+
+ fl->avail++;
+ if (++fl->pidx == fl->size) {
+ fl->pidx = 0;
+ sdesc = fl->sdesc;
+ d = fl->desc;
+ }
+ }
+
+out:
+ /*
+ * Update our accounting state to incorporate the new Free List
+ * buffers, tell the hardware about them and return the number of
+ * bufers which we were able to allocate.
+ */
+ cred = fl->avail - cred;
+ fl->pend_cred += cred;
+ ring_fl_db(adapter, fl);
+
+ if (unlikely(fl_starving(fl))) {
+ smp_wmb();
+ set_bit(fl->cntxt_id, adapter->sge.starving_fl);
+ }
+
+ return cred;
+}
+
+/*
+ * Refill a Free List to its capacity or the Maximum Refill Increment,
+ * whichever is smaller ...
+ */
+static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
+{
+ refill_fl(adapter, fl,
+ min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail),
+ GFP_ATOMIC);
+}
+
+/**
+ * alloc_ring - allocate resources for an SGE descriptor ring
+ * @dev: the PCI device's core device
+ * @nelem: the number of descriptors
+ * @hwsize: the size of each hardware descriptor
+ * @swsize: the size of each software descriptor
+ * @busaddrp: the physical PCI bus address of the allocated ring
+ * @swringp: return address pointer for software ring
+ * @stat_size: extra space in hardware ring for status information
+ *
+ * Allocates resources for an SGE descriptor ring, such as TX queues,
+ * free buffer lists, response queues, etc. Each SGE ring requires
+ * space for its hardware descriptors plus, optionally, space for software
+ * state associated with each hardware entry (the metadata). The function
+ * returns three values: the virtual address for the hardware ring (the
+ * return value of the function), the PCI bus address of the hardware
+ * ring (in *busaddrp), and the address of the software ring (in swringp).
+ * Both the hardware and software rings are returned zeroed out.
+ */
+static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
+ size_t swsize, dma_addr_t *busaddrp, void *swringp,
+ size_t stat_size)
+{
+ /*
+ * Allocate the hardware ring and PCI DMA bus address space for said.
+ */
+ size_t hwlen = nelem * hwsize + stat_size;
+ void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
+
+ if (!hwring)
+ return NULL;
+
+ /*
+ * If the caller wants a software ring, allocate it and return a
+ * pointer to it in *swringp.
+ */
+ BUG_ON((swsize != 0) != (swringp != NULL));
+ if (swsize) {
+ void *swring = kcalloc(nelem, swsize, GFP_KERNEL);
+
+ if (!swring) {
+ dma_free_coherent(dev, hwlen, hwring, *busaddrp);
+ return NULL;
+ }
+ *(void **)swringp = swring;
+ }
+
+ /*
+ * Zero out the hardware ring and return its address as our function
+ * value.
+ */
+ memset(hwring, 0, hwlen);
+ return hwring;
+}
+
+/**
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ *
+ * Calculates the number of flits (8-byte units) needed for a Direct
+ * Scatter/Gather List that can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+ /*
+ * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
+ * addresses. The DSGL Work Request starts off with a 32-bit DSGL
+ * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
+ * repeated sequences of { Length[i], Length[i+1], Address[i],
+ * Address[i+1] } (this ensures that all addresses are on 64-bit
+ * boundaries). If N is even, then Length[N+1] should be set to 0 and
+ * Address[N+1] is omitted.
+ *
+ * The following calculation incorporates all of the above. It's
+ * somewhat hard to follow but, briefly: the "+2" accounts for the
+ * first two flits which include the DSGL header, Length0 and
+ * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
+ * flits for every pair of the remaining N) +1 if (n-1) is odd; and
+ * finally the "+((n-1)&1)" adds the one remaining flit needed if
+ * (n-1) is odd ...
+ */
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
+/**
+ * flits_to_desc - returns the num of TX descriptors for the given flits
+ * @flits: the number of flits
+ *
+ * Returns the number of TX descriptors needed for the supplied number
+ * of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int flits)
+{
+ BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64));
+ return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT);
+}
+
+/**
+ * is_eth_imm - can an Ethernet packet be sent as immediate data?
+ * @skb: the packet
+ *
+ * Returns whether an Ethernet packet is small enough to fit completely as
+ * immediate data.
+ */
+static inline int is_eth_imm(const struct sk_buff *skb)
+{
+ /*
+ * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
+ * which does not accommodate immediate data. We could dike out all
+ * of the support code for immediate data but that would tie our hands
+ * too much if we ever want to enhace the firmware. It would also
+ * create more differences between the PF and VF Drivers.
+ */
+ return false;
+}
+
+/**
+ * calc_tx_flits - calculate the number of flits for a packet TX WR
+ * @skb: the packet
+ *
+ * Returns the number of flits needed for a TX Work Request for the
+ * given Ethernet packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
+{
+ unsigned int flits;
+
+ /*
+ * If the skb is small enough, we can pump it out as a work request
+ * with only immediate data. In that case we just have to have the
+ * TX Packet header plus the skb data in the Work Request.
+ */
+ if (is_eth_imm(skb))
+ return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
+ sizeof(__be64));
+
+ /*
+ * Otherwise, we're going to have to construct a Scatter gather list
+ * of the skb body and fragments. We also include the flits necessary
+ * for the TX Packet Work Request and CPL. We always have a firmware
+ * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+ * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+ * message or, if we're doing a Large Send Offload, an LSO CPL message
+ * with an embeded TX Packet Write CPL message.
+ */
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+ if (skb_shinfo(skb)->gso_size)
+ flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ else
+ flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ return flits;
+}
+
+/**
+ * write_sgl - populate a Scatter/Gather List for a packet
+ * @skb: the packet
+ * @tq: the TX queue we are writing into
+ * @sgl: starting location for writing the SGL
+ * @end: points right after the end of the SGL
+ * @start: start offset into skb main-body data to include in the SGL
+ * @addr: the list of DMA bus addresses for the SGL elements
+ *
+ * Generates a Scatter/Gather List for the buffers that make up a packet.
+ * The caller must provide adequate space for the SGL that will be written.
+ * The SGL includes all of the packet's page fragments and the data in its
+ * main body except for the first @start bytes. @pos must be 16-byte
+ * aligned and within a TX descriptor with available space. @end points
+ * write after the end of the SGL but does not account for any potential
+ * wrap around, i.e., @end > @tq->stat.
+ */
+static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
+ struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+ const dma_addr_t *addr)
+{
+ unsigned int i, len;
+ struct ulptx_sge_pair *to;
+ const struct skb_shared_info *si = skb_shinfo(skb);
+ unsigned int nfrags = si->nr_frags;
+ struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
+
+ len = skb_headlen(skb) - start;
+ if (likely(len)) {
+ sgl->len0 = htonl(len);
+ sgl->addr0 = cpu_to_be64(addr[0] + start);
+ nfrags++;
+ } else {
+ sgl->len0 = htonl(si->frags[0].size);
+ sgl->addr0 = cpu_to_be64(addr[1]);
+ }
+
+ sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) |
+ ULPTX_NSGE(nfrags));
+ if (likely(--nfrags == 0))
+ return;
+ /*
+ * Most of the complexity below deals with the possibility we hit the
+ * end of the queue in the middle of writing the SGL. For this case
+ * only we create the SGL in a temporary buffer and then copy it.
+ */
+ to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
+
+ for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
+ to->len[0] = cpu_to_be32(si->frags[i].size);
+ to->len[1] = cpu_to_be32(si->frags[++i].size);
+ to->addr[0] = cpu_to_be64(addr[i]);
+ to->addr[1] = cpu_to_be64(addr[++i]);
+ }
+ if (nfrags) {
+ to->len[0] = cpu_to_be32(si->frags[i].size);
+ to->len[1] = cpu_to_be32(0);
+ to->addr[0] = cpu_to_be64(addr[i + 1]);
+ }
+ if (unlikely((u8 *)end > (u8 *)tq->stat)) {
+ unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
+
+ if (likely(part0))
+ memcpy(sgl->sge, buf, part0);
+ part1 = (u8 *)end - (u8 *)tq->stat;
+ memcpy(tq->desc, (u8 *)buf + part0, part1);
+ end = (void *)tq->desc + part1;
+ }
+ if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
+ *(u64 *)end = 0;
+}
+
+/**
+ * check_ring_tx_db - check and potentially ring a TX queue's doorbell
+ * @adapter: the adapter
+ * @tq: the TX queue
+ * @n: number of new descriptors to give to HW
+ *
+ * Ring the doorbel for a TX queue.
+ */
+static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
+ int n)
+{
+ /*
+ * Warn if we write doorbells with the wrong priority and write
+ * descriptors before telling HW.
+ */
+ WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO);
+ wmb();
+ t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
+ QID(tq->cntxt_id) | PIDX(n));
+}
+
+/**
+ * inline_tx_skb - inline a packet's data into TX descriptors
+ * @skb: the packet
+ * @tq: the TX queue where the packet will be inlined
+ * @pos: starting position in the TX queue to inline the packet
+ *
+ * Inline a packet's contents directly into TX descriptors, starting at
+ * the given position within the TX DMA ring.
+ * Most of the complexity of this operation is dealing with wrap arounds
+ * in the middle of the packet we want to inline.
+ */
+static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
+ void *pos)
+{
+ u64 *p;
+ int left = (void *)tq->stat - pos;
+
+ if (likely(skb->len <= left)) {
+ if (likely(!skb->data_len))
+ skb_copy_from_linear_data(skb, pos, skb->len);
+ else
+ skb_copy_bits(skb, 0, pos, skb->len);
+ pos += skb->len;
+ } else {
+ skb_copy_bits(skb, 0, pos, left);
+ skb_copy_bits(skb, left, tq->desc, skb->len - left);
+ pos = (void *)tq->desc + (skb->len - left);
+ }
+
+ /* 0-pad to multiple of 16 */
+ p = PTR_ALIGN(pos, 8);
+ if ((uintptr_t)p & 8)
+ *p = 0;
+}
+
+/*
+ * Figure out what HW csum a packet wants and return the appropriate control
+ * bits.
+ */
+static u64 hwcsum(const struct sk_buff *skb)
+{
+ int csum_type;
+ const struct iphdr *iph = ip_hdr(skb);
+
+ if (iph->version == 4) {
+ if (iph->protocol == IPPROTO_TCP)
+ csum_type = TX_CSUM_TCPIP;
+ else if (iph->protocol == IPPROTO_UDP)
+ csum_type = TX_CSUM_UDPIP;
+ else {
+nocsum:
+ /*
+ * unknown protocol, disable HW csum
+ * and hope a bad packet is detected
+ */
+ return TXPKT_L4CSUM_DIS;
+ }
+ } else {
+ /*
+ * this doesn't work with extension headers
+ */
+ const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
+
+ if (ip6h->nexthdr == IPPROTO_TCP)
+ csum_type = TX_CSUM_TCPIP6;
+ else if (ip6h->nexthdr == IPPROTO_UDP)
+ csum_type = TX_CSUM_UDPIP6;
+ else
+ goto nocsum;
+ }
+
+ if (likely(csum_type >= TX_CSUM_TCPIP))
+ return TXPKT_CSUM_TYPE(csum_type) |
+ TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
+ TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
+ else {
+ int start = skb_transport_offset(skb);
+
+ return TXPKT_CSUM_TYPE(csum_type) |
+ TXPKT_CSUM_START(start) |
+ TXPKT_CSUM_LOC(start + skb->csum_offset);
+ }
+}
+
+/*
+ * Stop an Ethernet TX queue and record that state change.
+ */
+static void txq_stop(struct sge_eth_txq *txq)
+{
+ netif_tx_stop_queue(txq->txq);
+ txq->q.stops++;
+}
+
+/*
+ * Advance our software state for a TX queue by adding n in use descriptors.
+ */
+static inline void txq_advance(struct sge_txq *tq, unsigned int n)
+{
+ tq->in_use += n;
+ tq->pidx += n;
+ if (tq->pidx >= tq->size)
+ tq->pidx -= tq->size;
+}
+
+/**
+ * t4vf_eth_xmit - add a packet to an Ethernet TX queue
+ * @skb: the packet
+ * @dev: the egress net device
+ *
+ * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
+ */
+int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ u32 wr_mid;
+ u64 cntrl, *end;
+ int qidx, credits;
+ unsigned int flits, ndesc;
+ struct adapter *adapter;
+ struct sge_eth_txq *txq;
+ const struct port_info *pi;
+ struct fw_eth_tx_pkt_vm_wr *wr;
+ struct cpl_tx_pkt_core *cpl;
+ const struct skb_shared_info *ssi;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
+ sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) +
+ sizeof(wr->vlantci));
+
+ /*
+ * The chip minimum packet length is 10 octets but the firmware
+ * command that we are using requires that we copy the Ethernet header
+ * (including the VLAN tag) into the header so we reject anything
+ * smaller than that ...
+ */
+ if (unlikely(skb->len < fw_hdr_copy_len))
+ goto out_free;
+
+ /*
+ * Figure out which TX Queue we're going to use.
+ */
+ pi = netdev_priv(dev);
+ adapter = pi->adapter;
+ qidx = skb_get_queue_mapping(skb);
+ BUG_ON(qidx >= pi->nqsets);
+ txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
+
+ /*
+ * Take this opportunity to reclaim any TX Descriptors whose DMA
+ * transfers have completed.
+ */
+ reclaim_completed_tx(adapter, &txq->q, true);
+
+ /*
+ * Calculate the number of flits and TX Descriptors we're going to
+ * need along with how many TX Descriptors will be left over after
+ * we inject our Work Request.
+ */
+ flits = calc_tx_flits(skb);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&txq->q) - ndesc;
+
+ if (unlikely(credits < 0)) {
+ /*
+ * Not enough room for this packet's Work Request. Stop the
+ * TX Queue and return a "busy" condition. The queue will get
+ * started later on when the firmware informs us that space
+ * has opened up.
+ */
+ txq_stop(txq);
+ dev_err(adapter->pdev_dev,
+ "%s: TX ring %u full while queue awake!\n",
+ dev->name, qidx);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (!is_eth_imm(skb) &&
+ unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) {
+ /*
+ * We need to map the skb into PCI DMA space (because it can't
+ * be in-lined directly into the Work Request) and the mapping
+ * operation failed. Record the error and drop the packet.
+ */
+ txq->mapping_err++;
+ goto out_free;
+ }
+
+ wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ /*
+ * After we're done injecting the Work Request for this
+ * packet, we'll be below our "stop threshold" so stop the TX
+ * Queue now and schedule a request for an SGE Egress Queue
+ * Update message. The queue will get started later on when
+ * the firmware processes this Work Request and sends us an
+ * Egress Queue Status Update message indicating that space
+ * has opened up.
+ */
+ txq_stop(txq);
+ wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
+ }
+
+ /*
+ * Start filling in our Work Request. Note that we do _not_ handle
+ * the WR Header wrapping around the TX Descriptor Ring. If our
+ * maximum header size ever exceeds one TX Descriptor, we'll need to
+ * do something else here.
+ */
+ BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
+ wr = (void *)&txq->q.desc[txq->q.pidx];
+ wr->equiq_to_len16 = cpu_to_be32(wr_mid);
+ wr->r3[0] = cpu_to_be64(0);
+ wr->r3[1] = cpu_to_be64(0);
+ skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
+ end = (u64 *)wr + flits;
+
+ /*
+ * If this is a Large Send Offload packet we'll put in an LSO CPL
+ * message with an encapsulated TX Packet CPL message. Otherwise we
+ * just use a TX Packet CPL message.
+ */
+ ssi = skb_shinfo(skb);
+ if (ssi->gso_size) {
+ struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
+ bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
+ int l3hdr_len = skb_network_header_len(skb);
+ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+
+ wr->op_immdlen =
+ cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
+ FW_WR_IMMDLEN(sizeof(*lso) +
+ sizeof(*cpl)));
+ /*
+ * Fill in the LSO CPL message.
+ */
+ lso->lso_ctrl =
+ cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE |
+ LSO_LAST_SLICE |
+ LSO_IPV6(v6) |
+ LSO_ETHHDR_LEN(eth_xtra_len/4) |
+ LSO_IPHDR_LEN(l3hdr_len/4) |
+ LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+ lso->ipid_ofst = cpu_to_be16(0);
+ lso->mss = cpu_to_be16(ssi->gso_size);
+ lso->seqno_offset = cpu_to_be32(0);
+ lso->len = cpu_to_be32(skb->len);
+
+ /*
+ * Set up TX Packet CPL pointer, control word and perform
+ * accounting.
+ */
+ cpl = (void *)(lso + 1);
+ cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN(l3hdr_len) |
+ TXPKT_ETHHDR_LEN(eth_xtra_len));
+ txq->tso++;
+ txq->tx_cso += ssi->gso_segs;
+ } else {
+ int len;
+
+ len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
+ wr->op_immdlen =
+ cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
+ FW_WR_IMMDLEN(len));
+
+ /*
+ * Set up TX Packet CPL pointer, control word and perform
+ * accounting.
+ */
+ cpl = (void *)(wr + 1);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
+ txq->tx_cso++;
+ } else
+ cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+ }
+
+ /*
+ * If there's a VLAN tag present, add that to the list of things to
+ * do in this Work Request.
+ */
+ if (vlan_tx_tag_present(skb)) {
+ txq->vlan_ins++;
+ cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
+ }
+
+ /*
+ * Fill in the TX Packet CPL message header.
+ */
+ cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) |
+ TXPKT_INTF(pi->port_id) |
+ TXPKT_PF(0));
+ cpl->pack = cpu_to_be16(0);
+ cpl->len = cpu_to_be16(skb->len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+#ifdef T4_TRACE
+ T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7],
+ "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
+ ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags);
+#endif
+
+ /*
+ * Fill in the body of the TX Packet CPL message with either in-lined
+ * data or a Scatter/Gather List.
+ */
+ if (is_eth_imm(skb)) {
+ /*
+ * In-line the packet's data and free the skb since we don't
+ * need it any longer.
+ */
+ inline_tx_skb(skb, &txq->q, cpl + 1);
+ dev_kfree_skb(skb);
+ } else {
+ /*
+ * Write the skb's Scatter/Gather list into the TX Packet CPL
+ * message and retain a pointer to the skb so we can free it
+ * later when its DMA completes. (We store the skb pointer
+ * in the Software Descriptor corresponding to the last TX
+ * Descriptor used by the Work Request.)
+ *
+ * The retained skb will be freed when the corresponding TX
+ * Descriptors are reclaimed after their DMAs complete.
+ * However, this could take quite a while since, in general,
+ * the hardware is set up to be lazy about sending DMA
+ * completion notifications to us and we mostly perform TX
+ * reclaims in the transmit routine.
+ *
+ * This is good for performamce but means that we rely on new
+ * TX packets arriving to run the destructors of completed
+ * packets, which open up space in their sockets' send queues.
+ * Sometimes we do not get such new packets causing TX to
+ * stall. A single UDP transmitter is a good example of this
+ * situation. We have a clean up timer that periodically
+ * reclaims completed packets but it doesn't run often enough
+ * (nor do we want it to) to prevent lengthy stalls. A
+ * solution to this problem is to run the destructor early,
+ * after the packet is queued but before it's DMAd. A con is
+ * that we lie to socket memory accounting, but the amount of
+ * extra memory is reasonable (limited by the number of TX
+ * descriptors), the packets do actually get freed quickly by
+ * new packets almost always, and for protocols like TCP that
+ * wait for acks to really free up the data the extra memory
+ * is even less. On the positive side we run the destructors
+ * on the sending CPU rather than on a potentially different
+ * completing CPU, usually a good thing.
+ *
+ * Run the destructor before telling the DMA engine about the
+ * packet to make sure it doesn't complete and get freed
+ * prematurely.
+ */
+ struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
+ struct sge_txq *tq = &txq->q;
+ int last_desc;
+
+ /*
+ * If the Work Request header was an exact multiple of our TX
+ * Descriptor length, then it's possible that the starting SGL
+ * pointer lines up exactly with the end of our TX Descriptor
+ * ring. If that's the case, wrap around to the beginning
+ * here ...
+ */
+ if (unlikely((void *)sgl == (void *)tq->stat)) {
+ sgl = (void *)tq->desc;
+ end = (void *)((void *)tq->desc +
+ ((void *)end - (void *)tq->stat));
+ }
+
+ write_sgl(skb, tq, sgl, end, 0, addr);
+ skb_orphan(skb);
+
+ last_desc = tq->pidx + ndesc - 1;
+ if (last_desc >= tq->size)
+ last_desc -= tq->size;
+ tq->sdesc[last_desc].skb = skb;
+ tq->sdesc[last_desc].sgl = sgl;
+ }
+
+ /*
+ * Advance our internal TX Queue state, tell the hardware about
+ * the new TX descriptors and return success.
+ */
+ txq_advance(&txq->q, ndesc);
+ dev->trans_start = jiffies;
+ ring_tx_db(adapter, &txq->q, ndesc);
+ return NETDEV_TX_OK;
+
+out_free:
+ /*
+ * An error of some sort happened. Free the TX skb and tell the
+ * OS that we've "dealt" with the packet ...
+ */
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
+ * @gl: the gather list
+ * @skb_len: size of sk_buff main body if it carries fragments
+ * @pull_len: amount of data to move to the sk_buff's main body
+ *
+ * Builds an sk_buff from the given packet gather list. Returns the
+ * sk_buff or %NULL if sk_buff allocation failed.
+ */
+struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
+ unsigned int skb_len, unsigned int pull_len)
+{
+ struct sk_buff *skb;
+ struct skb_shared_info *ssi;
+
+ /*
+ * If the ingress packet is small enough, allocate an skb large enough
+ * for all of the data and copy it inline. Otherwise, allocate an skb
+ * with enough room to pull in the header and reference the rest of
+ * the data via the skb fragment list.
+ *
+ * Below we rely on RX_COPY_THRES being less than the smallest Rx
+ * buff! size, which is expected since buffers are at least
+ * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one
+ * fragment.
+ */
+ if (gl->tot_len <= RX_COPY_THRES) {
+ /* small packets have only one fragment */
+ skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, gl->tot_len);
+ skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
+ } else {
+ skb = alloc_skb(skb_len, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, pull_len);
+ skb_copy_to_linear_data(skb, gl->va, pull_len);
+
+ ssi = skb_shinfo(skb);
+ ssi->frags[0].page = gl->frags[0].page;
+ ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
+ ssi->frags[0].size = gl->frags[0].size - pull_len;
+ if (gl->nfrags > 1)
+ memcpy(&ssi->frags[1], &gl->frags[1],
+ (gl->nfrags-1) * sizeof(skb_frag_t));
+ ssi->nr_frags = gl->nfrags;
+
+ skb->len = gl->tot_len;
+ skb->data_len = skb->len - pull_len;
+ skb->truesize += skb->data_len;
+
+ /* Get a reference for the last page, we don't own it */
+ get_page(gl->frags[gl->nfrags - 1].page);
+ }
+
+out:
+ return skb;
+}
+
+/**
+ * t4vf_pktgl_free - free a packet gather list
+ * @gl: the gather list
+ *
+ * Releases the pages of a packet gather list. We do not own the last
+ * page on the list and do not free it.
+ */
+void t4vf_pktgl_free(const struct pkt_gl *gl)
+{
+ int frag;
+
+ frag = gl->nfrags - 1;
+ while (frag--)
+ put_page(gl->frags[frag].page);
+}
+
+/**
+ * copy_frags - copy fragments from gather list into skb_shared_info
+ * @si: destination skb shared info structure
+ * @gl: source internal packet gather list
+ * @offset: packet start offset in first page
+ *
+ * Copy an internal packet gather list into a Linux skb_shared_info
+ * structure.
+ */
+static inline void copy_frags(struct skb_shared_info *si,
+ const struct pkt_gl *gl,
+ unsigned int offset)
+{
+ unsigned int n;
+
+ /* usually there's just one frag */
+ si->frags[0].page = gl->frags[0].page;
+ si->frags[0].page_offset = gl->frags[0].page_offset + offset;
+ si->frags[0].size = gl->frags[0].size - offset;
+ si->nr_frags = gl->nfrags;
+
+ n = gl->nfrags - 1;
+ if (n)
+ memcpy(&si->frags[1], &gl->frags[1], n * sizeof(skb_frag_t));
+
+ /* get a reference to the last page, we don't own it */
+ get_page(gl->frags[n].page);
+}
+
+/**
+ * do_gro - perform Generic Receive Offload ingress packet processing
+ * @rxq: ingress RX Ethernet Queue
+ * @gl: gather list for ingress packet
+ * @pkt: CPL header for last packet fragment
+ *
+ * Perform Generic Receive Offload (GRO) ingress packet processing.
+ * We use the standard Linux GRO interfaces for this.
+ */
+static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
+ const struct cpl_rx_pkt *pkt)
+{
+ int ret;
+ struct sk_buff *skb;
+
+ skb = napi_get_frags(&rxq->rspq.napi);
+ if (unlikely(!skb)) {
+ t4vf_pktgl_free(gl);
+ rxq->stats.rx_drops++;
+ return;
+ }
+
+ copy_frags(skb_shinfo(skb), gl, PKTSHIFT);
+ skb->len = gl->tot_len - PKTSHIFT;
+ skb->data_len = skb->len;
+ skb->truesize += skb->data_len;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(skb, rxq->rspq.idx);
+
+ if (pkt->vlan_ex)
+ __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan));
+ ret = napi_gro_frags(&rxq->rspq.napi);
+
+ if (ret == GRO_HELD)
+ rxq->stats.lro_pkts++;
+ else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
+ rxq->stats.lro_merged++;
+ rxq->stats.pkts++;
+ rxq->stats.rx_cso++;
+}
+
+/**
+ * t4vf_ethrx_handler - process an ingress ethernet packet
+ * @rspq: the response queue that received the packet
+ * @rsp: the response queue descriptor holding the RX_PKT message
+ * @gl: the gather list of packet fragments
+ *
+ * Process an ingress ethernet packet and deliver it to the stack.
+ */
+int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
+ const struct pkt_gl *gl)
+{
+ struct sk_buff *skb;
+ const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
+ bool csum_ok = pkt->csum_calc && !pkt->err_vec;
+ struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+
+ /*
+ * If this is a good TCP packet and we have Generic Receive Offload
+ * enabled, handle the packet in the GRO path.
+ */
+ if ((pkt->l2info & cpu_to_be32(RXF_TCP)) &&
+ (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
+ !pkt->ip_frag) {
+ do_gro(rxq, gl, pkt);
+ return 0;
+ }
+
+ /*
+ * Convert the Packet Gather List into an skb.
+ */
+ skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
+ if (unlikely(!skb)) {
+ t4vf_pktgl_free(gl);
+ rxq->stats.rx_drops++;
+ return 0;
+ }
+ __skb_pull(skb, PKTSHIFT);
+ skb->protocol = eth_type_trans(skb, rspq->netdev);
+ skb_record_rx_queue(skb, rspq->idx);
+ rxq->stats.pkts++;
+
+ if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) &&
+ !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
+ if (!pkt->ip_frag)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else {
+ __sum16 c = (__force __sum16)pkt->csum;
+ skb->csum = csum_unfold(c);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+ rxq->stats.rx_cso++;
+ } else
+ skb_checksum_none_assert(skb);
+
+ if (pkt->vlan_ex) {
+ rxq->stats.vlan_ex++;
+ __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan));
+ }
+
+ netif_receive_skb(skb);
+
+ return 0;
+}
+
+/**
+ * is_new_response - check if a response is newly written
+ * @rc: the response control descriptor
+ * @rspq: the response queue
+ *
+ * Returns true if a response descriptor contains a yet unprocessed
+ * response.
+ */
+static inline bool is_new_response(const struct rsp_ctrl *rc,
+ const struct sge_rspq *rspq)
+{
+ return RSPD_GEN(rc->type_gen) == rspq->gen;
+}
+
+/**
+ * restore_rx_bufs - put back a packet's RX buffers
+ * @gl: the packet gather list
+ * @fl: the SGE Free List
+ * @nfrags: how many fragments in @si
+ *
+ * Called when we find out that the current packet, @si, can't be
+ * processed right away for some reason. This is a very rare event and
+ * there's no effort to make this suspension/resumption process
+ * particularly efficient.
+ *
+ * We implement the suspension by putting all of the RX buffers associated
+ * with the current packet back on the original Free List. The buffers
+ * have already been unmapped and are left unmapped, we mark them as
+ * unmapped in order to prevent further unmapping attempts. (Effectively
+ * this function undoes the series of @unmap_rx_buf calls which were done
+ * to create the current packet's gather list.) This leaves us ready to
+ * restart processing of the packet the next time we start processing the
+ * RX Queue ...
+ */
+static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl,
+ int frags)
+{
+ struct rx_sw_desc *sdesc;
+
+ while (frags--) {
+ if (fl->cidx == 0)
+ fl->cidx = fl->size - 1;
+ else
+ fl->cidx--;
+ sdesc = &fl->sdesc[fl->cidx];
+ sdesc->page = gl->frags[frags].page;
+ sdesc->dma_addr |= RX_UNMAPPED_BUF;
+ fl->avail++;
+ }
+}
+
+/**
+ * rspq_next - advance to the next entry in a response queue
+ * @rspq: the queue
+ *
+ * Updates the state of a response queue to advance it to the next entry.
+ */
+static inline void rspq_next(struct sge_rspq *rspq)
+{
+ rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len;
+ if (unlikely(++rspq->cidx == rspq->size)) {
+ rspq->cidx = 0;
+ rspq->gen ^= 1;
+ rspq->cur_desc = rspq->desc;
+ }
+}
+
+/**
+ * process_responses - process responses from an SGE response queue
+ * @rspq: the ingress response queue to process
+ * @budget: how many responses can be processed in this round
+ *
+ * Process responses from a Scatter Gather Engine response queue up to
+ * the supplied budget. Responses include received packets as well as
+ * control messages from firmware or hardware.
+ *
+ * Additionally choose the interrupt holdoff time for the next interrupt
+ * on this queue. If the system is under memory shortage use a fairly
+ * long delay to help recovery.
+ */
+int process_responses(struct sge_rspq *rspq, int budget)
+{
+ struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+ int budget_left = budget;
+
+ while (likely(budget_left)) {
+ int ret, rsp_type;
+ const struct rsp_ctrl *rc;
+
+ rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc));
+ if (!is_new_response(rc, rspq))
+ break;
+
+ /*
+ * Figure out what kind of response we've received from the
+ * SGE.
+ */
+ rmb();
+ rsp_type = RSPD_TYPE(rc->type_gen);
+ if (likely(rsp_type == RSP_TYPE_FLBUF)) {
+ skb_frag_t *fp;
+ struct pkt_gl gl;
+ const struct rx_sw_desc *sdesc;
+ u32 bufsz, frag;
+ u32 len = be32_to_cpu(rc->pldbuflen_qid);
+
+ /*
+ * If we get a "new buffer" message from the SGE we
+ * need to move on to the next Free List buffer.
+ */
+ if (len & RSPD_NEWBUF) {
+ /*
+ * We get one "new buffer" message when we
+ * first start up a queue so we need to ignore
+ * it when our offset into the buffer is 0.
+ */
+ if (likely(rspq->offset > 0)) {
+ free_rx_bufs(rspq->adapter, &rxq->fl,
+ 1);
+ rspq->offset = 0;
+ }
+ len = RSPD_LEN(len);
+ }
+ gl.tot_len = len;
+
+ /*
+ * Gather packet fragments.
+ */
+ for (frag = 0, fp = gl.frags; /**/; frag++, fp++) {
+ BUG_ON(frag >= MAX_SKB_FRAGS);
+ BUG_ON(rxq->fl.avail == 0);
+ sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
+ bufsz = get_buf_size(sdesc);
+ fp->page = sdesc->page;
+ fp->page_offset = rspq->offset;
+ fp->size = min(bufsz, len);
+ len -= fp->size;
+ if (!len)
+ break;
+ unmap_rx_buf(rspq->adapter, &rxq->fl);
+ }
+ gl.nfrags = frag+1;
+
+ /*
+ * Last buffer remains mapped so explicitly make it
+ * coherent for CPU access and start preloading first
+ * cache line ...
+ */
+ dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
+ get_buf_addr(sdesc),
+ fp->size, DMA_FROM_DEVICE);
+ gl.va = (page_address(gl.frags[0].page) +
+ gl.frags[0].page_offset);
+ prefetch(gl.va);
+
+ /*
+ * Hand the new ingress packet to the handler for
+ * this Response Queue.
+ */
+ ret = rspq->handler(rspq, rspq->cur_desc, &gl);
+ if (likely(ret == 0))
+ rspq->offset += ALIGN(fp->size, FL_ALIGN);
+ else
+ restore_rx_bufs(&gl, &rxq->fl, frag);
+ } else if (likely(rsp_type == RSP_TYPE_CPL)) {
+ ret = rspq->handler(rspq, rspq->cur_desc, NULL);
+ } else {
+ WARN_ON(rsp_type > RSP_TYPE_CPL);
+ ret = 0;
+ }
+
+ if (unlikely(ret)) {
+ /*
+ * Couldn't process descriptor, back off for recovery.
+ * We use the SGE's last timer which has the longest
+ * interrupt coalescing value ...
+ */
+ const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
+ rspq->next_intr_params =
+ QINTR_TIMER_IDX(NOMEM_TIMER_IDX);
+ break;
+ }
+
+ rspq_next(rspq);
+ budget_left--;
+ }
+
+ /*
+ * If this is a Response Queue with an associated Free List and
+ * at least two Egress Queue units available in the Free List
+ * for new buffer pointers, refill the Free List.
+ */
+ if (rspq->offset >= 0 &&
+ rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
+ __refill_fl(rspq->adapter, &rxq->fl);
+ return budget - budget_left;
+}
+
+/**
+ * napi_rx_handler - the NAPI handler for RX processing
+ * @napi: the napi instance
+ * @budget: how many packets we can process in this round
+ *
+ * Handler for new data events when using NAPI. This does not need any
+ * locking or protection from interrupts as data interrupts are off at
+ * this point and other adapter interrupts do not interfere (the latter
+ * in not a concern at all with MSI-X as non-data interrupts then have
+ * a separate handler).
+ */
+static int napi_rx_handler(struct napi_struct *napi, int budget)
+{
+ unsigned int intr_params;
+ struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi);
+ int work_done = process_responses(rspq, budget);
+
+ if (likely(work_done < budget)) {
+ napi_complete(napi);
+ intr_params = rspq->next_intr_params;
+ rspq->next_intr_params = rspq->intr_params;
+ } else
+ intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX);
+
+ if (unlikely(work_done == 0))
+ rspq->unhandled_irqs++;
+
+ t4_write_reg(rspq->adapter,
+ T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+ CIDXINC(work_done) |
+ INGRESSQID((u32)rspq->cntxt_id) |
+ SEINTARM(intr_params));
+ return work_done;
+}
+
+/*
+ * The MSI-X interrupt handler for an SGE response queue for the NAPI case
+ * (i.e., response queue serviced by NAPI polling).
+ */
+irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie)
+{
+ struct sge_rspq *rspq = cookie;
+
+ napi_schedule(&rspq->napi);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Process the indirect interrupt entries in the interrupt queue and kick off
+ * NAPI for each queue that has generated an entry.
+ */
+static unsigned int process_intrq(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ struct sge_rspq *intrq = &s->intrq;
+ unsigned int work_done;
+
+ spin_lock(&adapter->sge.intrq_lock);
+ for (work_done = 0; ; work_done++) {
+ const struct rsp_ctrl *rc;
+ unsigned int qid, iq_idx;
+ struct sge_rspq *rspq;
+
+ /*
+ * Grab the next response from the interrupt queue and bail
+ * out if it's not a new response.
+ */
+ rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc));
+ if (!is_new_response(rc, intrq))
+ break;
+
+ /*
+ * If the response isn't a forwarded interrupt message issue a
+ * error and go on to the next response message. This should
+ * never happen ...
+ */
+ rmb();
+ if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) {
+ dev_err(adapter->pdev_dev,
+ "Unexpected INTRQ response type %d\n",
+ RSPD_TYPE(rc->type_gen));
+ continue;
+ }
+
+ /*
+ * Extract the Queue ID from the interrupt message and perform
+ * sanity checking to make sure it really refers to one of our
+ * Ingress Queues which is active and matches the queue's ID.
+ * None of these error conditions should ever happen so we may
+ * want to either make them fatal and/or conditionalized under
+ * DEBUG.
+ */
+ qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid));
+ iq_idx = IQ_IDX(s, qid);
+ if (unlikely(iq_idx >= MAX_INGQ)) {
+ dev_err(adapter->pdev_dev,
+ "Ingress QID %d out of range\n", qid);
+ continue;
+ }
+ rspq = s->ingr_map[iq_idx];
+ if (unlikely(rspq == NULL)) {
+ dev_err(adapter->pdev_dev,
+ "Ingress QID %d RSPQ=NULL\n", qid);
+ continue;
+ }
+ if (unlikely(rspq->abs_id != qid)) {
+ dev_err(adapter->pdev_dev,
+ "Ingress QID %d refers to RSPQ %d\n",
+ qid, rspq->abs_id);
+ continue;
+ }
+
+ /*
+ * Schedule NAPI processing on the indicated Response Queue
+ * and move on to the next entry in the Forwarded Interrupt
+ * Queue.
+ */
+ napi_schedule(&rspq->napi);
+ rspq_next(intrq);
+ }
+
+ t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
+ CIDXINC(work_done) |
+ INGRESSQID(intrq->cntxt_id) |
+ SEINTARM(intrq->intr_params));
+
+ spin_unlock(&adapter->sge.intrq_lock);
+
+ return work_done;
+}
+
+/*
+ * The MSI interrupt handler handles data events from SGE response queues as
+ * well as error and other async events as they all use the same MSI vector.
+ */
+irqreturn_t t4vf_intr_msi(int irq, void *cookie)
+{
+ struct adapter *adapter = cookie;
+
+ process_intrq(adapter);
+ return IRQ_HANDLED;
+}
+
+/**
+ * t4vf_intr_handler - select the top-level interrupt handler
+ * @adapter: the adapter
+ *
+ * Selects the top-level interrupt handler based on the type of interrupts
+ * (MSI-X or MSI).
+ */
+irq_handler_t t4vf_intr_handler(struct adapter *adapter)
+{
+ BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
+ if (adapter->flags & USING_MSIX)
+ return t4vf_sge_intr_msix;
+ else
+ return t4vf_intr_msi;
+}
+
+/**
+ * sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
+ * @data: the adapter
+ *
+ * Runs periodically from a timer to perform maintenance of SGE RX queues.
+ *
+ * a) Replenishes RX queues that have run out due to memory shortage.
+ * Normally new RX buffers are added when existing ones are consumed but
+ * when out of memory a queue can become empty. We schedule NAPI to do
+ * the actual refill.
+ */
+static void sge_rx_timer_cb(unsigned long data)
+{
+ struct adapter *adapter = (struct adapter *)data;
+ struct sge *s = &adapter->sge;
+ unsigned int i;
+
+ /*
+ * Scan the "Starving Free Lists" flag array looking for any Free
+ * Lists in need of more free buffers. If we find one and it's not
+ * being actively polled, then bump its "starving" counter and attempt
+ * to refill it. If we're successful in adding enough buffers to push
+ * the Free List over the starving threshold, then we can clear its
+ * "starving" status.
+ */
+ for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) {
+ unsigned long m;
+
+ for (m = s->starving_fl[i]; m; m &= m - 1) {
+ unsigned int id = __ffs(m) + i * BITS_PER_LONG;
+ struct sge_fl *fl = s->egr_map[id];
+
+ clear_bit(id, s->starving_fl);
+ smp_mb__after_clear_bit();
+
+ /*
+ * Since we are accessing fl without a lock there's a
+ * small probability of a false positive where we
+ * schedule napi but the FL is no longer starving.
+ * No biggie.
+ */
+ if (fl_starving(fl)) {
+ struct sge_eth_rxq *rxq;
+
+ rxq = container_of(fl, struct sge_eth_rxq, fl);
+ if (napi_reschedule(&rxq->rspq.napi))
+ fl->starving++;
+ else
+ set_bit(id, s->starving_fl);
+ }
+ }
+ }
+
+ /*
+ * Reschedule the next scan for starving Free Lists ...
+ */
+ mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
+}
+
+/**
+ * sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
+ * @data: the adapter
+ *
+ * Runs periodically from a timer to perform maintenance of SGE TX queues.
+ *
+ * b) Reclaims completed Tx packets for the Ethernet queues. Normally
+ * packets are cleaned up by new Tx packets, this timer cleans up packets
+ * when no new packets are being submitted. This is essential for pktgen,
+ * at least.
+ */
+static void sge_tx_timer_cb(unsigned long data)
+{
+ struct adapter *adapter = (struct adapter *)data;
+ struct sge *s = &adapter->sge;
+ unsigned int i, budget;
+
+ budget = MAX_TIMER_TX_RECLAIM;
+ i = s->ethtxq_rover;
+ do {
+ struct sge_eth_txq *txq = &s->ethtxq[i];
+
+ if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) {
+ int avail = reclaimable(&txq->q);
+
+ if (avail > budget)
+ avail = budget;
+
+ free_tx_desc(adapter, &txq->q, avail, true);
+ txq->q.in_use -= avail;
+ __netif_tx_unlock(txq->txq);
+
+ budget -= avail;
+ if (!budget)
+ break;
+ }
+
+ i++;
+ if (i >= s->ethqsets)
+ i = 0;
+ } while (i != s->ethtxq_rover);
+ s->ethtxq_rover = i;
+
+ /*
+ * If we found too many reclaimable packets schedule a timer in the
+ * near future to continue where we left off. Otherwise the next timer
+ * will be at its normal interval.
+ */
+ mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
+}
+
+/**
+ * t4vf_sge_alloc_rxq - allocate an SGE RX Queue
+ * @adapter: the adapter
+ * @rspq: pointer to to the new rxq's Response Queue to be filled in
+ * @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
+ * @dev: the network device associated with the new rspq
+ * @intr_dest: MSI-X vector index (overriden in MSI mode)
+ * @fl: pointer to the new rxq's Free List to be filled in
+ * @hnd: the interrupt handler to invoke for the rspq
+ */
+int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
+ bool iqasynch, struct net_device *dev,
+ int intr_dest,
+ struct sge_fl *fl, rspq_handler_t hnd)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct fw_iq_cmd cmd, rpl;
+ int ret, iqandst, flsz = 0;
+
+ /*
+ * If we're using MSI interrupts and we're not initializing the
+ * Forwarded Interrupt Queue itself, then set up this queue for
+ * indirect interrupts to the Forwarded Interrupt Queue. Obviously
+ * the Forwarded Interrupt Queue must be set up before any other
+ * ingress queue ...
+ */
+ if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) {
+ iqandst = SGE_INTRDST_IQ;
+ intr_dest = adapter->sge.intrq.abs_id;
+ } else
+ iqandst = SGE_INTRDST_PCI;
+
+ /*
+ * Allocate the hardware ring for the Response Queue. The size needs
+ * to be a multiple of 16 which includes the mandatory status entry
+ * (regardless of whether the Status Page capabilities are enabled or
+ * not).
+ */
+ rspq->size = roundup(rspq->size, 16);
+ rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len,
+ 0, &rspq->phys_addr, NULL, 0);
+ if (!rspq->desc)
+ return -ENOMEM;
+
+ /*
+ * Fill in the Ingress Queue Command. Note: Ideally this code would
+ * be in t4vf_hw.c but there are so many parameters and dependencies
+ * on our Linux SGE state that we would end up having to pass tons of
+ * parameters. We'll have to think about how this might be migrated
+ * into OS-independent common code ...
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_CMD_EXEC);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC |
+ FW_IQ_CMD_IQSTART(1) |
+ FW_LEN16(cmd));
+ cmd.type_to_iqandstindex =
+ cpu_to_be32(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
+ FW_IQ_CMD_IQASYNCH(iqasynch) |
+ FW_IQ_CMD_VIID(pi->viid) |
+ FW_IQ_CMD_IQANDST(iqandst) |
+ FW_IQ_CMD_IQANUS(1) |
+ FW_IQ_CMD_IQANUD(SGE_UPDATEDEL_INTR) |
+ FW_IQ_CMD_IQANDSTINDEX(intr_dest));
+ cmd.iqdroprss_to_iqesize =
+ cpu_to_be16(FW_IQ_CMD_IQPCIECH(pi->port_id) |
+ FW_IQ_CMD_IQGTSMODE |
+ FW_IQ_CMD_IQINTCNTTHRESH(rspq->pktcnt_idx) |
+ FW_IQ_CMD_IQESIZE(ilog2(rspq->iqe_len) - 4));
+ cmd.iqsize = cpu_to_be16(rspq->size);
+ cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
+
+ if (fl) {
+ /*
+ * Allocate the ring for the hardware free list (with space
+ * for its status page) along with the associated software
+ * descriptor ring. The free list size needs to be a multiple
+ * of the Egress Queue Unit.
+ */
+ fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
+ fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
+ sizeof(__be64), sizeof(struct rx_sw_desc),
+ &fl->addr, &fl->sdesc, STAT_LEN);
+ if (!fl->desc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /*
+ * Calculate the size of the hardware free list ring plus
+ * Status Page (which the SGE will place after the end of the
+ * free list ring) in Egress Queue Units.
+ */
+ flsz = (fl->size / FL_PER_EQ_UNIT +
+ STAT_LEN / EQ_UNIT);
+
+ /*
+ * Fill in all the relevant firmware Ingress Queue Command
+ * fields for the free list.
+ */
+ cmd.iqns_to_fl0congen =
+ cpu_to_be32(
+ FW_IQ_CMD_FL0HOSTFCMODE(SGE_HOSTFCMODE_NONE) |
+ FW_IQ_CMD_FL0PACKEN |
+ FW_IQ_CMD_FL0PADEN);
+ cmd.fl0dcaen_to_fl0cidxfthresh =
+ cpu_to_be16(
+ FW_IQ_CMD_FL0FBMIN(SGE_FETCHBURSTMIN_64B) |
+ FW_IQ_CMD_FL0FBMAX(SGE_FETCHBURSTMAX_512B));
+ cmd.fl0size = cpu_to_be16(flsz);
+ cmd.fl0addr = cpu_to_be64(fl->addr);
+ }
+
+ /*
+ * Issue the firmware Ingress Queue Command and extract the results if
+ * it completes successfully.
+ */
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (ret)
+ goto err;
+
+ netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
+ rspq->cur_desc = rspq->desc;
+ rspq->cidx = 0;
+ rspq->gen = 1;
+ rspq->next_intr_params = rspq->intr_params;
+ rspq->cntxt_id = be16_to_cpu(rpl.iqid);
+ rspq->abs_id = be16_to_cpu(rpl.physiqid);
+ rspq->size--; /* subtract status entry */
+ rspq->adapter = adapter;
+ rspq->netdev = dev;
+ rspq->handler = hnd;
+
+ /* set offset to -1 to distinguish ingress queues without FL */
+ rspq->offset = fl ? 0 : -1;
+
+ if (fl) {
+ fl->cntxt_id = be16_to_cpu(rpl.fl0id);
+ fl->avail = 0;
+ fl->pend_cred = 0;
+ fl->pidx = 0;
+ fl->cidx = 0;
+ fl->alloc_failed = 0;
+ fl->large_alloc_failed = 0;
+ fl->starving = 0;
+ refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL);
+ }
+
+ return 0;
+
+err:
+ /*
+ * An error occurred. Clean up our partial allocation state and
+ * return the error.
+ */
+ if (rspq->desc) {
+ dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len,
+ rspq->desc, rspq->phys_addr);
+ rspq->desc = NULL;
+ }
+ if (fl && fl->desc) {
+ kfree(fl->sdesc);
+ fl->sdesc = NULL;
+ dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT,
+ fl->desc, fl->addr);
+ fl->desc = NULL;
+ }
+ return ret;
+}
+
+/**
+ * t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
+ * @adapter: the adapter
+ * @txq: pointer to the new txq to be filled in
+ * @devq: the network TX queue associated with the new txq
+ * @iqid: the relative ingress queue ID to which events relating to
+ * the new txq should be directed
+ */
+int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
+ struct net_device *dev, struct netdev_queue *devq,
+ unsigned int iqid)
+{
+ int ret, nentries;
+ struct fw_eq_eth_cmd cmd, rpl;
+ struct port_info *pi = netdev_priv(dev);
+
+ /*
+ * Calculate the size of the hardware TX Queue (including the Status
+ * Page on the end of the TX Queue) in units of TX Descriptors.
+ */
+ nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+
+ /*
+ * Allocate the hardware ring for the TX ring (with space for its
+ * status page) along with the associated software descriptor ring.
+ */
+ txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
+ sizeof(struct tx_desc),
+ sizeof(struct tx_sw_desc),
+ &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
+ if (!txq->q.desc)
+ return -ENOMEM;
+
+ /*
+ * Fill in the Egress Queue Command. Note: As with the direct use of
+ * the firmware Ingress Queue COmmand above in our RXQ allocation
+ * routine, ideally, this code would be in t4vf_hw.c. Again, we'll
+ * have to see if there's some reasonable way to parameterize it
+ * into the common code ...
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_CMD_EXEC);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC |
+ FW_EQ_ETH_CMD_EQSTART |
+ FW_LEN16(cmd));
+ cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_VIID(pi->viid));
+ cmd.fetchszm_to_iqid =
+ cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) |
+ FW_EQ_ETH_CMD_PCIECHN(pi->port_id) |
+ FW_EQ_ETH_CMD_IQID(iqid));
+ cmd.dcaen_to_eqsize =
+ cpu_to_be32(FW_EQ_ETH_CMD_FBMIN(SGE_FETCHBURSTMIN_64B) |
+ FW_EQ_ETH_CMD_FBMAX(SGE_FETCHBURSTMAX_512B) |
+ FW_EQ_ETH_CMD_CIDXFTHRESH(SGE_CIDXFLUSHTHRESH_32) |
+ FW_EQ_ETH_CMD_EQSIZE(nentries));
+ cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+ /*
+ * Issue the firmware Egress Queue Command and extract the results if
+ * it completes successfully.
+ */
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (ret) {
+ /*
+ * The girmware Ingress Queue Command failed for some reason.
+ * Free up our partial allocation state and return the error.
+ */
+ kfree(txq->q.sdesc);
+ txq->q.sdesc = NULL;
+ dma_free_coherent(adapter->pdev_dev,
+ nentries * sizeof(struct tx_desc),
+ txq->q.desc, txq->q.phys_addr);
+ txq->q.desc = NULL;
+ return ret;
+ }
+
+ txq->q.in_use = 0;
+ txq->q.cidx = 0;
+ txq->q.pidx = 0;
+ txq->q.stat = (void *)&txq->q.desc[txq->q.size];
+ txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_GET(be32_to_cpu(rpl.eqid_pkd));
+ txq->q.abs_id =
+ FW_EQ_ETH_CMD_PHYSEQID_GET(be32_to_cpu(rpl.physeqid_pkd));
+ txq->txq = devq;
+ txq->tso = 0;
+ txq->tx_cso = 0;
+ txq->vlan_ins = 0;
+ txq->q.stops = 0;
+ txq->q.restarts = 0;
+ txq->mapping_err = 0;
+ return 0;
+}
+
+/*
+ * Free the DMA map resources associated with a TX queue.
+ */
+static void free_txq(struct adapter *adapter, struct sge_txq *tq)
+{
+ dma_free_coherent(adapter->pdev_dev,
+ tq->size * sizeof(*tq->desc) + STAT_LEN,
+ tq->desc, tq->phys_addr);
+ tq->cntxt_id = 0;
+ tq->sdesc = NULL;
+ tq->desc = NULL;
+}
+
+/*
+ * Free the resources associated with a response queue (possibly including a
+ * free list).
+ */
+static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
+ struct sge_fl *fl)
+{
+ unsigned int flid = fl ? fl->cntxt_id : 0xffff;
+
+ t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
+ rspq->cntxt_id, flid, 0xffff);
+ dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len,
+ rspq->desc, rspq->phys_addr);
+ netif_napi_del(&rspq->napi);
+ rspq->netdev = NULL;
+ rspq->cntxt_id = 0;
+ rspq->abs_id = 0;
+ rspq->desc = NULL;
+
+ if (fl) {
+ free_rx_bufs(adapter, fl, fl->avail);
+ dma_free_coherent(adapter->pdev_dev,
+ fl->size * sizeof(*fl->desc) + STAT_LEN,
+ fl->desc, fl->addr);
+ kfree(fl->sdesc);
+ fl->sdesc = NULL;
+ fl->cntxt_id = 0;
+ fl->desc = NULL;
+ }
+}
+
+/**
+ * t4vf_free_sge_resources - free SGE resources
+ * @adapter: the adapter
+ *
+ * Frees resources used by the SGE queue sets.
+ */
+void t4vf_free_sge_resources(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ struct sge_eth_rxq *rxq = s->ethrxq;
+ struct sge_eth_txq *txq = s->ethtxq;
+ struct sge_rspq *evtq = &s->fw_evtq;
+ struct sge_rspq *intrq = &s->intrq;
+ int qs;
+
+ for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) {
+ if (rxq->rspq.desc)
+ free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
+ if (txq->q.desc) {
+ t4vf_eth_eq_free(adapter, txq->q.cntxt_id);
+ free_tx_desc(adapter, &txq->q, txq->q.in_use, true);
+ kfree(txq->q.sdesc);
+ free_txq(adapter, &txq->q);
+ }
+ }
+ if (evtq->desc)
+ free_rspq_fl(adapter, evtq, NULL);
+ if (intrq->desc)
+ free_rspq_fl(adapter, intrq, NULL);
+}
+
+/**
+ * t4vf_sge_start - enable SGE operation
+ * @adapter: the adapter
+ *
+ * Start tasklets and timers associated with the DMA engine.
+ */
+void t4vf_sge_start(struct adapter *adapter)
+{
+ adapter->sge.ethtxq_rover = 0;
+ mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
+ mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
+}
+
+/**
+ * t4vf_sge_stop - disable SGE operation
+ * @adapter: the adapter
+ *
+ * Stop tasklets and timers associated with the DMA engine. Note that
+ * this is effective only if measures have been taken to disable any HW
+ * events that may restart them.
+ */
+void t4vf_sge_stop(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+
+ if (s->rx_timer.function)
+ del_timer_sync(&s->rx_timer);
+ if (s->tx_timer.function)
+ del_timer_sync(&s->tx_timer);
+}
+
+/**
+ * t4vf_sge_init - initialize SGE
+ * @adapter: the adapter
+ *
+ * Performs SGE initialization needed every time after a chip reset.
+ * We do not initialize any of the queue sets here, instead the driver
+ * top-level must request those individually. We also do not enable DMA
+ * here, that should be done after the queues have been set up.
+ */
+int t4vf_sge_init(struct adapter *adapter)
+{
+ struct sge_params *sge_params = &adapter->params.sge;
+ u32 fl0 = sge_params->sge_fl_buffer_size[0];
+ u32 fl1 = sge_params->sge_fl_buffer_size[1];
+ struct sge *s = &adapter->sge;
+
+ /*
+ * Start by vetting the basic SGE parameters which have been set up by
+ * the Physical Function Driver. Ideally we should be able to deal
+ * with _any_ configuration. Practice is different ...
+ */
+ if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
+ dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
+ fl0, fl1);
+ return -EINVAL;
+ }
+ if ((sge_params->sge_control & RXPKTCPLMODE) == 0) {
+ dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Now translate the adapter parameters into our internal forms.
+ */
+ if (fl1)
+ FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT;
+ STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE) ? 128 : 64);
+ PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control);
+ FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
+ SGE_INGPADBOUNDARY_SHIFT);
+
+ /*
+ * Set up tasklet timers.
+ */
+ setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter);
+ setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter);
+
+ /*
+ * Initialize Forwarded Interrupt Queue lock.
+ */
+ spin_lock_init(&s->intrq_lock);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
new file mode 100644
index 000000000000..a65c80aed1f2
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -0,0 +1,274 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4VF_COMMON_H__
+#define __T4VF_COMMON_H__
+
+#include "../cxgb4/t4fw_api.h"
+
+/*
+ * The "len16" field of a Firmware Command Structure ...
+ */
+#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
+
+/*
+ * Per-VF statistics.
+ */
+struct t4vf_port_stats {
+ /*
+ * TX statistics.
+ */
+ u64 tx_bcast_bytes; /* broadcast */
+ u64 tx_bcast_frames;
+ u64 tx_mcast_bytes; /* multicast */
+ u64 tx_mcast_frames;
+ u64 tx_ucast_bytes; /* unicast */
+ u64 tx_ucast_frames;
+ u64 tx_drop_frames; /* TX dropped frames */
+ u64 tx_offload_bytes; /* offload */
+ u64 tx_offload_frames;
+
+ /*
+ * RX statistics.
+ */
+ u64 rx_bcast_bytes; /* broadcast */
+ u64 rx_bcast_frames;
+ u64 rx_mcast_bytes; /* multicast */
+ u64 rx_mcast_frames;
+ u64 rx_ucast_bytes;
+ u64 rx_ucast_frames; /* unicast */
+
+ u64 rx_err_frames; /* RX error frames */
+};
+
+/*
+ * Per-"port" (Virtual Interface) link configuration ...
+ */
+struct link_config {
+ unsigned int supported; /* link capabilities */
+ unsigned int advertising; /* advertised capabilities */
+ unsigned short requested_speed; /* speed user has requested */
+ unsigned short speed; /* actual link speed */
+ unsigned char requested_fc; /* flow control user has requested */
+ unsigned char fc; /* actual link flow control */
+ unsigned char autoneg; /* autonegotiating? */
+ unsigned char link_ok; /* link up? */
+};
+
+enum {
+ PAUSE_RX = 1 << 0,
+ PAUSE_TX = 1 << 1,
+ PAUSE_AUTONEG = 1 << 2
+};
+
+/*
+ * General device parameters ...
+ */
+struct dev_params {
+ u32 fwrev; /* firmware version */
+ u32 tprev; /* TP Microcode Version */
+};
+
+/*
+ * Scatter Gather Engine parameters. These are almost all determined by the
+ * Physical Function Driver. We just need to grab them to see within which
+ * environment we're playing ...
+ */
+struct sge_params {
+ u32 sge_control; /* padding, boundaries, lengths, etc. */
+ u32 sge_host_page_size; /* RDMA page sizes */
+ u32 sge_queues_per_page; /* RDMA queues/page */
+ u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */
+ u32 sge_fl_buffer_size[16]; /* free list buffer sizes */
+ u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */
+ u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */
+ u32 sge_timer_value_2_and_3;
+ u32 sge_timer_value_4_and_5;
+};
+
+/*
+ * Vital Product Data parameters.
+ */
+struct vpd_params {
+ u32 cclk; /* Core Clock (KHz) */
+};
+
+/*
+ * Global Receive Side Scaling (RSS) parameters in host-native format.
+ */
+struct rss_params {
+ unsigned int mode; /* RSS mode */
+ union {
+ struct {
+ unsigned int synmapen:1; /* SYN Map Enable */
+ unsigned int syn4tupenipv6:1; /* enable hashing 4-tuple IPv6 SYNs */
+ unsigned int syn2tupenipv6:1; /* enable hashing 2-tuple IPv6 SYNs */
+ unsigned int syn4tupenipv4:1; /* enable hashing 4-tuple IPv4 SYNs */
+ unsigned int syn2tupenipv4:1; /* enable hashing 2-tuple IPv4 SYNs */
+ unsigned int ofdmapen:1; /* Offload Map Enable */
+ unsigned int tnlmapen:1; /* Tunnel Map Enable */
+ unsigned int tnlalllookup:1; /* Tunnel All Lookup */
+ unsigned int hashtoeplitz:1; /* use Toeplitz hash */
+ } basicvirtual;
+ } u;
+};
+
+/*
+ * Virtual Interface RSS Configuration in host-native format.
+ */
+union rss_vi_config {
+ struct {
+ u16 defaultq; /* Ingress Queue ID for !tnlalllookup */
+ unsigned int ip6fourtupen:1; /* hash 4-tuple IPv6 ingress packets */
+ unsigned int ip6twotupen:1; /* hash 2-tuple IPv6 ingress packets */
+ unsigned int ip4fourtupen:1; /* hash 4-tuple IPv4 ingress packets */
+ unsigned int ip4twotupen:1; /* hash 2-tuple IPv4 ingress packets */
+ int udpen; /* hash 4-tuple UDP ingress packets */
+ } basicvirtual;
+};
+
+/*
+ * Maximum resources provisioned for a PCI VF.
+ */
+struct vf_resources {
+ unsigned int nvi; /* N virtual interfaces */
+ unsigned int neq; /* N egress Qs */
+ unsigned int nethctrl; /* N egress ETH or CTRL Qs */
+ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
+ unsigned int niq; /* N ingress Qs */
+ unsigned int tc; /* PCI-E traffic class */
+ unsigned int pmask; /* port access rights mask */
+ unsigned int nexactf; /* N exact MPS filters */
+ unsigned int r_caps; /* read capabilities */
+ unsigned int wx_caps; /* write/execute capabilities */
+};
+
+/*
+ * Per-"adapter" (Virtual Function) parameters.
+ */
+struct adapter_params {
+ struct dev_params dev; /* general device parameters */
+ struct sge_params sge; /* Scatter Gather Engine */
+ struct vpd_params vpd; /* Vital Product Data */
+ struct rss_params rss; /* Receive Side Scaling */
+ struct vf_resources vfres; /* Virtual Function Resource limits */
+ u8 nports; /* # of Ethernet "ports" */
+};
+
+#include "adapter.h"
+
+#ifndef PCI_VENDOR_ID_CHELSIO
+# define PCI_VENDOR_ID_CHELSIO 0x1425
+#endif
+
+#define for_each_port(adapter, iter) \
+ for (iter = 0; iter < (adapter)->params.nports; iter++)
+
+static inline bool is_10g_port(const struct link_config *lc)
+{
+ return (lc->supported & SUPPORTED_10000baseT_Full) != 0;
+}
+
+static inline unsigned int core_ticks_per_usec(const struct adapter *adapter)
+{
+ return adapter->params.vpd.cclk / 1000;
+}
+
+static inline unsigned int us_to_core_ticks(const struct adapter *adapter,
+ unsigned int us)
+{
+ return (us * adapter->params.vpd.cclk) / 1000;
+}
+
+static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
+ unsigned int ticks)
+{
+ return (ticks * 1000) / adapter->params.vpd.cclk;
+}
+
+int t4vf_wr_mbox_core(struct adapter *, const void *, int, void *, bool);
+
+static inline int t4vf_wr_mbox(struct adapter *adapter, const void *cmd,
+ int size, void *rpl)
+{
+ return t4vf_wr_mbox_core(adapter, cmd, size, rpl, true);
+}
+
+static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
+ int size, void *rpl)
+{
+ return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
+}
+
+int __devinit t4vf_wait_dev_ready(struct adapter *);
+int __devinit t4vf_port_init(struct adapter *, int);
+
+int t4vf_fw_reset(struct adapter *);
+int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
+int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
+
+int t4vf_get_sge_params(struct adapter *);
+int t4vf_get_vpd_params(struct adapter *);
+int t4vf_get_dev_params(struct adapter *);
+int t4vf_get_rss_glb_config(struct adapter *);
+int t4vf_get_vfres(struct adapter *);
+
+int t4vf_read_rss_vi_config(struct adapter *, unsigned int,
+ union rss_vi_config *);
+int t4vf_write_rss_vi_config(struct adapter *, unsigned int,
+ union rss_vi_config *);
+int t4vf_config_rss_range(struct adapter *, unsigned int, int, int,
+ const u16 *, int);
+
+int t4vf_alloc_vi(struct adapter *, int);
+int t4vf_free_vi(struct adapter *, int);
+int t4vf_enable_vi(struct adapter *, unsigned int, bool, bool);
+int t4vf_identify_port(struct adapter *, unsigned int, unsigned int);
+
+int t4vf_set_rxmode(struct adapter *, unsigned int, int, int, int, int, int,
+ bool);
+int t4vf_alloc_mac_filt(struct adapter *, unsigned int, bool, unsigned int,
+ const u8 **, u16 *, u64 *, bool);
+int t4vf_change_mac(struct adapter *, unsigned int, int, const u8 *, bool);
+int t4vf_set_addr_hash(struct adapter *, unsigned int, bool, u64, bool);
+int t4vf_get_port_stats(struct adapter *, int, struct t4vf_port_stats *);
+
+int t4vf_iq_free(struct adapter *, unsigned int, unsigned int, unsigned int,
+ unsigned int);
+int t4vf_eth_eq_free(struct adapter *, unsigned int);
+
+int t4vf_handle_fw_rpl(struct adapter *, const __be64 *);
+
+#endif /* __T4VF_COMMON_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
new file mode 100644
index 000000000000..c7b127d93767
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
@@ -0,0 +1,121 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4VF_DEFS_H__
+#define __T4VF_DEFS_H__
+
+#include "../cxgb4/t4_regs.h"
+
+/*
+ * The VF Register Map.
+ *
+ * The Scatter Gather Engine (SGE), Multiport Support module (MPS), PIO Local
+ * bus module (PL) and CPU Interface Module (CIM) components are mapped via
+ * the Slice to Module Map Table (see below) in the Physical Function Register
+ * Map. The Mail Box Data (MBDATA) range is mapped via the PCI-E Mailbox Base
+ * and Offset registers in the PF Register Map. The MBDATA base address is
+ * quite constrained as it determines the Mailbox Data addresses for both PFs
+ * and VFs, and therefore must fit in both the VF and PF Register Maps without
+ * overlapping other registers.
+ */
+#define T4VF_SGE_BASE_ADDR 0x0000
+#define T4VF_MPS_BASE_ADDR 0x0100
+#define T4VF_PL_BASE_ADDR 0x0200
+#define T4VF_MBDATA_BASE_ADDR 0x0240
+#define T4VF_CIM_BASE_ADDR 0x0300
+
+#define T4VF_REGMAP_START 0x0000
+#define T4VF_REGMAP_SIZE 0x0400
+
+/*
+ * There's no hardware limitation which requires that the addresses of the
+ * Mailbox Data in the fixed CIM PF map and the programmable VF map must
+ * match. However, it's a useful convention ...
+ */
+#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA
+#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA!
+#endif
+
+/*
+ * Virtual Function "Slice to Module Map Table" definitions.
+ *
+ * This table allows us to map subsets of the various module register sets
+ * into the T4VF Register Map. Each table entry identifies the index of the
+ * module whose registers are being mapped, the offset within the module's
+ * register set that the mapping should start at, the limit of the mapping,
+ * and the offset within the T4VF Register Map to which the module's registers
+ * are being mapped. All addresses and qualtities are in terms of 32-bit
+ * words. The "limit" value is also in terms of 32-bit words and is equal to
+ * the last address mapped in the T4VF Register Map 1 (i.e. it's a "<="
+ * relation rather than a "<").
+ */
+#define T4VF_MOD_MAP(module, index, first, last) \
+ T4VF_MOD_MAP_##module##_INDEX = (index), \
+ T4VF_MOD_MAP_##module##_FIRST = (first), \
+ T4VF_MOD_MAP_##module##_LAST = (last), \
+ T4VF_MOD_MAP_##module##_OFFSET = ((first)/4), \
+ T4VF_MOD_MAP_##module##_BASE = \
+ (T4VF_##module##_BASE_ADDR/4 + (first)/4), \
+ T4VF_MOD_MAP_##module##_LIMIT = \
+ (T4VF_##module##_BASE_ADDR/4 + (last)/4),
+
+#define SGE_VF_KDOORBELL 0x0
+#define SGE_VF_GTS 0x4
+#define MPS_VF_CTL 0x0
+#define MPS_VF_STAT_RX_VF_ERR_FRAMES_H 0xfc
+#define PL_VF_WHOAMI 0x0
+#define CIM_VF_EXT_MAILBOX_CTRL 0x0
+#define CIM_VF_EXT_MAILBOX_STATUS 0x4
+
+enum {
+ T4VF_MOD_MAP(SGE, 2, SGE_VF_KDOORBELL, SGE_VF_GTS)
+ T4VF_MOD_MAP(MPS, 0, MPS_VF_CTL, MPS_VF_STAT_RX_VF_ERR_FRAMES_H)
+ T4VF_MOD_MAP(PL, 3, PL_VF_WHOAMI, PL_VF_WHOAMI)
+ T4VF_MOD_MAP(CIM, 1, CIM_VF_EXT_MAILBOX_CTRL, CIM_VF_EXT_MAILBOX_STATUS)
+};
+
+/*
+ * There isn't a Slice to Module Map Table entry for the Mailbox Data
+ * registers, but it's convenient to use similar names as above. There are 8
+ * little-endian 64-bit Mailbox Data registers. Note that the "instances"
+ * value below is in terms of 32-bit words which matches the "word" addressing
+ * space we use above for the Slice to Module Map Space.
+ */
+#define NUM_CIM_VF_MAILBOX_DATA_INSTANCES 16
+
+#define T4VF_MBDATA_FIRST 0
+#define T4VF_MBDATA_LAST ((NUM_CIM_VF_MAILBOX_DATA_INSTANCES-1)*4)
+
+#endif /* __T4T4VF_DEFS_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
new file mode 100644
index 000000000000..fe3fd3dad6f7
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -0,0 +1,1387 @@
+/*
+ * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
+ * driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+
+#include "t4vf_common.h"
+#include "t4vf_defs.h"
+
+#include "../cxgb4/t4_regs.h"
+#include "../cxgb4/t4fw_api.h"
+
+/*
+ * Wait for the device to become ready (signified by our "who am I" register
+ * returning a value other than all 1's). Return an error if it doesn't
+ * become ready ...
+ */
+int __devinit t4vf_wait_dev_ready(struct adapter *adapter)
+{
+ const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI;
+ const u32 notready1 = 0xffffffff;
+ const u32 notready2 = 0xeeeeeeee;
+ u32 val;
+
+ val = t4_read_reg(adapter, whoami);
+ if (val != notready1 && val != notready2)
+ return 0;
+ msleep(500);
+ val = t4_read_reg(adapter, whoami);
+ if (val != notready1 && val != notready2)
+ return 0;
+ else
+ return -EIO;
+}
+
+/*
+ * Get the reply to a mailbox command and store it in @rpl in big-endian order
+ * (since the firmware data structures are specified in a big-endian layout).
+ */
+static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size,
+ u32 mbox_data)
+{
+ for ( ; size; size -= 8, mbox_data += 8)
+ *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data));
+}
+
+/*
+ * Dump contents of mailbox with a leading tag.
+ */
+static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data)
+{
+ dev_err(adapter->pdev_dev,
+ "mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag,
+ (unsigned long long)t4_read_reg64(adapter, mbox_data + 0),
+ (unsigned long long)t4_read_reg64(adapter, mbox_data + 8),
+ (unsigned long long)t4_read_reg64(adapter, mbox_data + 16),
+ (unsigned long long)t4_read_reg64(adapter, mbox_data + 24),
+ (unsigned long long)t4_read_reg64(adapter, mbox_data + 32),
+ (unsigned long long)t4_read_reg64(adapter, mbox_data + 40),
+ (unsigned long long)t4_read_reg64(adapter, mbox_data + 48),
+ (unsigned long long)t4_read_reg64(adapter, mbox_data + 56));
+}
+
+/**
+ * t4vf_wr_mbox_core - send a command to FW through the mailbox
+ * @adapter: the adapter
+ * @cmd: the command to write
+ * @size: command length in bytes
+ * @rpl: where to optionally store the reply
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Sends the given command to FW through the mailbox and waits for the
+ * FW to execute the command. If @rpl is not %NULL it is used to store
+ * the FW's reply to the command. The command and its optional reply
+ * are of the same length. FW can take up to 500 ms to respond.
+ * @sleep_ok determines whether we may sleep while awaiting the response.
+ * If sleeping is allowed we use progressive backoff otherwise we spin.
+ *
+ * The return value is 0 on success or a negative errno on failure. A
+ * failure can happen either because we are not able to execute the
+ * command or FW executes it but signals an error. In the latter case
+ * the return value is the error code indicated by FW (negated).
+ */
+int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
+ void *rpl, bool sleep_ok)
+{
+ static const int delay[] = {
+ 1, 1, 3, 5, 10, 10, 20, 50, 100
+ };
+
+ u32 v;
+ int i, ms, delay_idx;
+ const __be64 *p;
+ u32 mbox_data = T4VF_MBDATA_BASE_ADDR;
+ u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
+
+ /*
+ * Commands must be multiples of 16 bytes in length and may not be
+ * larger than the size of the Mailbox Data register array.
+ */
+ if ((size % 16) != 0 ||
+ size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
+ return -EINVAL;
+
+ /*
+ * Loop trying to get ownership of the mailbox. Return an error
+ * if we can't gain ownership.
+ */
+ v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
+ for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
+ v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
+ if (v != MBOX_OWNER_DRV)
+ return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;
+
+ /*
+ * Write the command array into the Mailbox Data register array and
+ * transfer ownership of the mailbox to the firmware.
+ *
+ * For the VFs, the Mailbox Data "registers" are actually backed by
+ * T4's "MA" interface rather than PL Registers (as is the case for
+ * the PFs). Because these are in different coherency domains, the
+ * write to the VF's PL-register-backed Mailbox Control can race in
+ * front of the writes to the MA-backed VF Mailbox Data "registers".
+ * So we need to do a read-back on at least one byte of the VF Mailbox
+ * Data registers before doing the write to the VF Mailbox Control
+ * register.
+ */
+ for (i = 0, p = cmd; i < size; i += 8)
+ t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
+ t4_read_reg(adapter, mbox_data); /* flush write */
+
+ t4_write_reg(adapter, mbox_ctl,
+ MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
+ t4_read_reg(adapter, mbox_ctl); /* flush write */
+
+ /*
+ * Spin waiting for firmware to acknowledge processing our command.
+ */
+ delay_idx = 0;
+ ms = delay[0];
+
+ for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
+ if (sleep_ok) {
+ ms = delay[delay_idx];
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else
+ mdelay(ms);
+
+ /*
+ * If we're the owner, see if this is the reply we wanted.
+ */
+ v = t4_read_reg(adapter, mbox_ctl);
+ if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
+ /*
+ * If the Message Valid bit isn't on, revoke ownership
+ * of the mailbox and continue waiting for our reply.
+ */
+ if ((v & MBMSGVALID) == 0) {
+ t4_write_reg(adapter, mbox_ctl,
+ MBOWNER(MBOX_OWNER_NONE));
+ continue;
+ }
+
+ /*
+ * We now have our reply. Extract the command return
+ * value, copy the reply back to our caller's buffer
+ * (if specified) and revoke ownership of the mailbox.
+ * We return the (negated) firmware command return
+ * code (this depends on FW_SUCCESS == 0).
+ */
+
+ /* return value in low-order little-endian word */
+ v = t4_read_reg(adapter, mbox_data);
+ if (FW_CMD_RETVAL_GET(v))
+ dump_mbox(adapter, "FW Error", mbox_data);
+
+ if (rpl) {
+ /* request bit in high-order BE word */
+ WARN_ON((be32_to_cpu(*(const u32 *)cmd)
+ & FW_CMD_REQUEST) == 0);
+ get_mbox_rpl(adapter, rpl, size, mbox_data);
+ WARN_ON((be32_to_cpu(*(u32 *)rpl)
+ & FW_CMD_REQUEST) != 0);
+ }
+ t4_write_reg(adapter, mbox_ctl,
+ MBOWNER(MBOX_OWNER_NONE));
+ return -FW_CMD_RETVAL_GET(v);
+ }
+ }
+
+ /*
+ * We timed out. Return the error ...
+ */
+ dump_mbox(adapter, "FW Timeout", mbox_data);
+ return -ETIMEDOUT;
+}
+
+/**
+ * hash_mac_addr - return the hash value of a MAC address
+ * @addr: the 48-bit Ethernet MAC address
+ *
+ * Hashes a MAC address according to the hash function used by hardware
+ * inexact (hash) address matching.
+ */
+static int hash_mac_addr(const u8 *addr)
+{
+ u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
+ u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
+ a ^= b;
+ a ^= (a >> 12);
+ a ^= (a >> 6);
+ return a & 0x3f;
+}
+
+/**
+ * init_link_config - initialize a link's SW state
+ * @lc: structure holding the link state
+ * @caps: link capabilities
+ *
+ * Initializes the SW state maintained for each link, including the link's
+ * capabilities and default speed/flow-control/autonegotiation settings.
+ */
+static void __devinit init_link_config(struct link_config *lc,
+ unsigned int caps)
+{
+ lc->supported = caps;
+ lc->requested_speed = 0;
+ lc->speed = 0;
+ lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+ if (lc->supported & SUPPORTED_Autoneg) {
+ lc->advertising = lc->supported;
+ lc->autoneg = AUTONEG_ENABLE;
+ lc->requested_fc |= PAUSE_AUTONEG;
+ } else {
+ lc->advertising = 0;
+ lc->autoneg = AUTONEG_DISABLE;
+ }
+}
+
+/**
+ * t4vf_port_init - initialize port hardware/software state
+ * @adapter: the adapter
+ * @pidx: the adapter port index
+ */
+int __devinit t4vf_port_init(struct adapter *adapter, int pidx)
+{
+ struct port_info *pi = adap2pinfo(adapter, pidx);
+ struct fw_vi_cmd vi_cmd, vi_rpl;
+ struct fw_port_cmd port_cmd, port_rpl;
+ int v;
+ u32 word;
+
+ /*
+ * Execute a VI Read command to get our Virtual Interface information
+ * like MAC address, etc.
+ */
+ memset(&vi_cmd, 0, sizeof(vi_cmd));
+ vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ);
+ vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd));
+ vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(pi->viid));
+ v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl);
+ if (v)
+ return v;
+
+ BUG_ON(pi->port_id != FW_VI_CMD_PORTID_GET(vi_rpl.portid_pkd));
+ pi->rss_size = FW_VI_CMD_RSSSIZE_GET(be16_to_cpu(vi_rpl.rsssize_pkd));
+ t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac);
+
+ /*
+ * If we don't have read access to our port information, we're done
+ * now. Otherwise, execute a PORT Read command to get it ...
+ */
+ if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT))
+ return 0;
+
+ memset(&port_cmd, 0, sizeof(port_cmd));
+ port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP(FW_PORT_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ |
+ FW_PORT_CMD_PORTID(pi->port_id));
+ port_cmd.action_to_len16 =
+ cpu_to_be32(FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
+ FW_LEN16(port_cmd));
+ v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl);
+ if (v)
+ return v;
+
+ v = 0;
+ word = be16_to_cpu(port_rpl.u.info.pcap);
+ if (word & FW_PORT_CAP_SPEED_100M)
+ v |= SUPPORTED_100baseT_Full;
+ if (word & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseT_Full;
+ if (word & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseT_Full;
+ if (word & FW_PORT_CAP_ANEG)
+ v |= SUPPORTED_Autoneg;
+ init_link_config(&pi->link_cfg, v);
+
+ return 0;
+}
+
+/**
+ * t4vf_fw_reset - issue a reset to FW
+ * @adapter: the adapter
+ *
+ * Issues a reset command to FW. For a Physical Function this would
+ * result in the Firmware reseting all of its state. For a Virtual
+ * Function this just resets the state associated with the VF.
+ */
+int t4vf_fw_reset(struct adapter *adapter)
+{
+ struct fw_reset_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) |
+ FW_CMD_WRITE);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_query_params - query FW or device parameters
+ * @adapter: the adapter
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @vals: the parameter values
+ *
+ * Reads the values of firmware or device parameters. Up to 7 parameters
+ * can be queried at once.
+ */
+int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, u32 *vals)
+{
+ int i, ret;
+ struct fw_params_cmd cmd, rpl;
+ struct fw_params_param *p;
+ size_t len16;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ);
+ len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
+ param[nparams].mnem), 16);
+ cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
+ for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++)
+ p->mnem = htonl(*params++);
+
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (ret == 0)
+ for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++)
+ *vals++ = be32_to_cpu(p->val);
+ return ret;
+}
+
+/**
+ * t4vf_set_params - sets FW or device parameters
+ * @adapter: the adapter
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @vals: the parameter values
+ *
+ * Sets the values of firmware or device parameters. Up to 7 parameters
+ * can be specified at once.
+ */
+int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, const u32 *vals)
+{
+ int i;
+ struct fw_params_cmd cmd;
+ struct fw_params_param *p;
+ size_t len16;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE);
+ len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
+ param[nparams]), 16);
+ cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
+ for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) {
+ p->mnem = cpu_to_be32(*params++);
+ p->val = cpu_to_be32(*vals++);
+ }
+
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters
+ * @adapter: the adapter
+ *
+ * Retrieves various core SGE parameters in the form of hardware SGE
+ * register values. The caller is responsible for decoding these as
+ * needed. The SGE parameters are stored in @adapter->params.sge.
+ */
+int t4vf_get_sge_params(struct adapter *adapter)
+{
+ struct sge_params *sge_params = &adapter->params.sge;
+ u32 params[7], vals[7];
+ int v;
+
+ params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_CONTROL));
+ params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_HOST_PAGE_SIZE));
+ params[2] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE0));
+ params[3] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_FL_BUFFER_SIZE1));
+ params[4] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_0_AND_1));
+ params[5] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_2_AND_3));
+ params[6] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_TIMER_VALUE_4_AND_5));
+ v = t4vf_query_params(adapter, 7, params, vals);
+ if (v)
+ return v;
+ sge_params->sge_control = vals[0];
+ sge_params->sge_host_page_size = vals[1];
+ sge_params->sge_fl_buffer_size[0] = vals[2];
+ sge_params->sge_fl_buffer_size[1] = vals[3];
+ sge_params->sge_timer_value_0_and_1 = vals[4];
+ sge_params->sge_timer_value_2_and_3 = vals[5];
+ sge_params->sge_timer_value_4_and_5 = vals[6];
+
+ params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD));
+ v = t4vf_query_params(adapter, 1, params, vals);
+ if (v)
+ return v;
+ sge_params->sge_ingress_rx_threshold = vals[0];
+
+ return 0;
+}
+
+/**
+ * t4vf_get_vpd_params - retrieve device VPD paremeters
+ * @adapter: the adapter
+ *
+ * Retrives various device Vital Product Data parameters. The parameters
+ * are stored in @adapter->params.vpd.
+ */
+int t4vf_get_vpd_params(struct adapter *adapter)
+{
+ struct vpd_params *vpd_params = &adapter->params.vpd;
+ u32 params[7], vals[7];
+ int v;
+
+ params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
+ v = t4vf_query_params(adapter, 1, params, vals);
+ if (v)
+ return v;
+ vpd_params->cclk = vals[0];
+
+ return 0;
+}
+
+/**
+ * t4vf_get_dev_params - retrieve device paremeters
+ * @adapter: the adapter
+ *
+ * Retrives various device parameters. The parameters are stored in
+ * @adapter->params.dev.
+ */
+int t4vf_get_dev_params(struct adapter *adapter)
+{
+ struct dev_params *dev_params = &adapter->params.dev;
+ u32 params[7], vals[7];
+ int v;
+
+ params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV));
+ params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV));
+ v = t4vf_query_params(adapter, 2, params, vals);
+ if (v)
+ return v;
+ dev_params->fwrev = vals[0];
+ dev_params->tprev = vals[1];
+
+ return 0;
+}
+
+/**
+ * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration
+ * @adapter: the adapter
+ *
+ * Retrieves global RSS mode and parameters with which we have to live
+ * and stores them in the @adapter's RSS parameters.
+ */
+int t4vf_get_rss_glb_config(struct adapter *adapter)
+{
+ struct rss_params *rss = &adapter->params.rss;
+ struct fw_rss_glb_config_cmd cmd, rpl;
+ int v;
+
+ /*
+ * Execute an RSS Global Configuration read command to retrieve
+ * our RSS configuration.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v)
+ return v;
+
+ /*
+ * Transate the big-endian RSS Global Configuration into our
+ * cpu-endian format based on the RSS mode. We also do first level
+ * filtering at this point to weed out modes which don't support
+ * VF Drivers ...
+ */
+ rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_GET(
+ be32_to_cpu(rpl.u.manual.mode_pkd));
+ switch (rss->mode) {
+ case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
+ u32 word = be32_to_cpu(
+ rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
+
+ rss->u.basicvirtual.synmapen =
+ ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0);
+ rss->u.basicvirtual.syn4tupenipv6 =
+ ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0);
+ rss->u.basicvirtual.syn2tupenipv6 =
+ ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0);
+ rss->u.basicvirtual.syn4tupenipv4 =
+ ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0);
+ rss->u.basicvirtual.syn2tupenipv4 =
+ ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0);
+
+ rss->u.basicvirtual.ofdmapen =
+ ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0);
+
+ rss->u.basicvirtual.tnlmapen =
+ ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0);
+ rss->u.basicvirtual.tnlalllookup =
+ ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0);
+
+ rss->u.basicvirtual.hashtoeplitz =
+ ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0);
+
+ /* we need at least Tunnel Map Enable to be set */
+ if (!rss->u.basicvirtual.tnlmapen)
+ return -EINVAL;
+ break;
+ }
+
+ default:
+ /* all unknown/unsupported RSS modes result in an error */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * t4vf_get_vfres - retrieve VF resource limits
+ * @adapter: the adapter
+ *
+ * Retrieves configured resource limits and capabilities for a virtual
+ * function. The results are stored in @adapter->vfres.
+ */
+int t4vf_get_vfres(struct adapter *adapter)
+{
+ struct vf_resources *vfres = &adapter->params.vfres;
+ struct fw_pfvf_cmd cmd, rpl;
+ int v;
+ u32 word;
+
+ /*
+ * Execute PFVF Read command to get VF resource limits; bail out early
+ * with error on command failure.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PFVF_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v)
+ return v;
+
+ /*
+ * Extract VF resource limits and return success.
+ */
+ word = be32_to_cpu(rpl.niqflint_niq);
+ vfres->niqflint = FW_PFVF_CMD_NIQFLINT_GET(word);
+ vfres->niq = FW_PFVF_CMD_NIQ_GET(word);
+
+ word = be32_to_cpu(rpl.type_to_neq);
+ vfres->neq = FW_PFVF_CMD_NEQ_GET(word);
+ vfres->pmask = FW_PFVF_CMD_PMASK_GET(word);
+
+ word = be32_to_cpu(rpl.tc_to_nexactf);
+ vfres->tc = FW_PFVF_CMD_TC_GET(word);
+ vfres->nvi = FW_PFVF_CMD_NVI_GET(word);
+ vfres->nexactf = FW_PFVF_CMD_NEXACTF_GET(word);
+
+ word = be32_to_cpu(rpl.r_caps_to_nethctrl);
+ vfres->r_caps = FW_PFVF_CMD_R_CAPS_GET(word);
+ vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_GET(word);
+ vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_GET(word);
+
+ return 0;
+}
+
+/**
+ * t4vf_read_rss_vi_config - read a VI's RSS configuration
+ * @adapter: the adapter
+ * @viid: Virtual Interface ID
+ * @config: pointer to host-native VI RSS Configuration buffer
+ *
+ * Reads the Virtual Interface's RSS configuration information and
+ * translates it into CPU-native format.
+ */
+int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid,
+ union rss_vi_config *config)
+{
+ struct fw_rss_vi_config_cmd cmd, rpl;
+ int v;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ |
+ FW_RSS_VI_CONFIG_CMD_VIID(viid));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v)
+ return v;
+
+ switch (adapter->params.rss.mode) {
+ case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
+ u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen);
+
+ config->basicvirtual.ip6fourtupen =
+ ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) != 0);
+ config->basicvirtual.ip6twotupen =
+ ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) != 0);
+ config->basicvirtual.ip4fourtupen =
+ ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) != 0);
+ config->basicvirtual.ip4twotupen =
+ ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) != 0);
+ config->basicvirtual.udpen =
+ ((word & FW_RSS_VI_CONFIG_CMD_UDPEN) != 0);
+ config->basicvirtual.defaultq =
+ FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(word);
+ break;
+ }
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * t4vf_write_rss_vi_config - write a VI's RSS configuration
+ * @adapter: the adapter
+ * @viid: Virtual Interface ID
+ * @config: pointer to host-native VI RSS Configuration buffer
+ *
+ * Write the Virtual Interface's RSS configuration information
+ * (translating it into firmware-native format before writing).
+ */
+int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid,
+ union rss_vi_config *config)
+{
+ struct fw_rss_vi_config_cmd cmd, rpl;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_RSS_VI_CONFIG_CMD_VIID(viid));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ switch (adapter->params.rss.mode) {
+ case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
+ u32 word = 0;
+
+ if (config->basicvirtual.ip6fourtupen)
+ word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
+ if (config->basicvirtual.ip6twotupen)
+ word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
+ if (config->basicvirtual.ip4fourtupen)
+ word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
+ if (config->basicvirtual.ip4twotupen)
+ word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
+ if (config->basicvirtual.udpen)
+ word |= FW_RSS_VI_CONFIG_CMD_UDPEN;
+ word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ(
+ config->basicvirtual.defaultq);
+ cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word);
+ break;
+ }
+
+ default:
+ return -EINVAL;
+ }
+
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+}
+
+/**
+ * t4vf_config_rss_range - configure a portion of the RSS mapping table
+ * @adapter: the adapter
+ * @viid: Virtual Interface of RSS Table Slice
+ * @start: starting entry in the table to write
+ * @n: how many table entries to write
+ * @rspq: values for the "Response Queue" (Ingress Queue) lookup table
+ * @nrspq: number of values in @rspq
+ *
+ * Programs the selected part of the VI's RSS mapping table with the
+ * provided values. If @nrspq < @n the supplied values are used repeatedly
+ * until the full table range is populated.
+ *
+ * The caller must ensure the values in @rspq are in the range 0..1023.
+ */
+int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid,
+ int start, int n, const u16 *rspq, int nrspq)
+{
+ const u16 *rsp = rspq;
+ const u16 *rsp_end = rspq+nrspq;
+ struct fw_rss_ind_tbl_cmd cmd;
+
+ /*
+ * Initialize firmware command template to write the RSS table.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_RSS_IND_TBL_CMD_VIID(viid));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+
+ /*
+ * Each firmware RSS command can accommodate up to 32 RSS Ingress
+ * Queue Identifiers. These Ingress Queue IDs are packed three to
+ * a 32-bit word as 10-bit values with the upper remaining 2 bits
+ * reserved.
+ */
+ while (n > 0) {
+ __be32 *qp = &cmd.iq0_to_iq2;
+ int nq = min(n, 32);
+ int ret;
+
+ /*
+ * Set up the firmware RSS command header to send the next
+ * "nq" Ingress Queue IDs to the firmware.
+ */
+ cmd.niqid = cpu_to_be16(nq);
+ cmd.startidx = cpu_to_be16(start);
+
+ /*
+ * "nq" more done for the start of the next loop.
+ */
+ start += nq;
+ n -= nq;
+
+ /*
+ * While there are still Ingress Queue IDs to stuff into the
+ * current firmware RSS command, retrieve them from the
+ * Ingress Queue ID array and insert them into the command.
+ */
+ while (nq > 0) {
+ /*
+ * Grab up to the next 3 Ingress Queue IDs (wrapping
+ * around the Ingress Queue ID array if necessary) and
+ * insert them into the firmware RSS command at the
+ * current 3-tuple position within the commad.
+ */
+ u16 qbuf[3];
+ u16 *qbp = qbuf;
+ int nqbuf = min(3, nq);
+
+ nq -= nqbuf;
+ qbuf[0] = qbuf[1] = qbuf[2] = 0;
+ while (nqbuf) {
+ nqbuf--;
+ *qbp++ = *rsp++;
+ if (rsp >= rsp_end)
+ rsp = rspq;
+ }
+ *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
+ FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
+ FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
+ }
+
+ /*
+ * Send this portion of the RRS table update to the firmware;
+ * bail out on any errors.
+ */
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * t4vf_alloc_vi - allocate a virtual interface on a port
+ * @adapter: the adapter
+ * @port_id: physical port associated with the VI
+ *
+ * Allocate a new Virtual Interface and bind it to the indicated
+ * physical port. Return the new Virtual Interface Identifier on
+ * success, or a [negative] error number on failure.
+ */
+int t4vf_alloc_vi(struct adapter *adapter, int port_id)
+{
+ struct fw_vi_cmd cmd, rpl;
+ int v;
+
+ /*
+ * Execute a VI command to allocate Virtual Interface and return its
+ * VIID.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_CMD_EXEC);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
+ FW_VI_CMD_ALLOC);
+ cmd.portid_pkd = FW_VI_CMD_PORTID(port_id);
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v)
+ return v;
+
+ return FW_VI_CMD_VIID_GET(be16_to_cpu(rpl.type_viid));
+}
+
+/**
+ * t4vf_free_vi -- free a virtual interface
+ * @adapter: the adapter
+ * @viid: the virtual interface identifier
+ *
+ * Free a previously allocated Virtual Interface. Return an error on
+ * failure.
+ */
+int t4vf_free_vi(struct adapter *adapter, int viid)
+{
+ struct fw_vi_cmd cmd;
+
+ /*
+ * Execute a VI command to free the Virtual Interface.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_EXEC);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
+ FW_VI_CMD_FREE);
+ cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(viid));
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_enable_vi - enable/disable a virtual interface
+ * @adapter: the adapter
+ * @viid: the Virtual Interface ID
+ * @rx_en: 1=enable Rx, 0=disable Rx
+ * @tx_en: 1=enable Tx, 0=disable Tx
+ *
+ * Enables/disables a virtual interface.
+ */
+int t4vf_enable_vi(struct adapter *adapter, unsigned int viid,
+ bool rx_en, bool tx_en)
+{
+ struct fw_vi_enable_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_EXEC |
+ FW_VI_ENABLE_CMD_VIID(viid));
+ cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN(rx_en) |
+ FW_VI_ENABLE_CMD_EEN(tx_en) |
+ FW_LEN16(cmd));
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_identify_port - identify a VI's port by blinking its LED
+ * @adapter: the adapter
+ * @viid: the Virtual Interface ID
+ * @nblinks: how many times to blink LED at 2.5 Hz
+ *
+ * Identifies a VI's port by blinking its LED.
+ */
+int t4vf_identify_port(struct adapter *adapter, unsigned int viid,
+ unsigned int nblinks)
+{
+ struct fw_vi_enable_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_EXEC |
+ FW_VI_ENABLE_CMD_VIID(viid));
+ cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED |
+ FW_LEN16(cmd));
+ cmd.blinkdur = cpu_to_be16(nblinks);
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_set_rxmode - set Rx properties of a virtual interface
+ * @adapter: the adapter
+ * @viid: the VI id
+ * @mtu: the new MTU or -1 for no change
+ * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
+ * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
+ * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
+ * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
+ * -1 no change
+ *
+ * Sets Rx properties of a virtual interface.
+ */
+int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid,
+ int mtu, int promisc, int all_multi, int bcast, int vlanex,
+ bool sleep_ok)
+{
+ struct fw_vi_rxmode_cmd cmd;
+
+ /* convert to FW values */
+ if (mtu < 0)
+ mtu = FW_VI_RXMODE_CMD_MTU_MASK;
+ if (promisc < 0)
+ promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
+ if (all_multi < 0)
+ all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
+ if (bcast < 0)
+ bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
+ if (vlanex < 0)
+ vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_RXMODE_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_VI_RXMODE_CMD_VIID(viid));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ cmd.mtu_to_vlanexen =
+ cpu_to_be32(FW_VI_RXMODE_CMD_MTU(mtu) |
+ FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
+ FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
+ FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
+ FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
+ return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
+}
+
+/**
+ * t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses
+ * @adapter: the adapter
+ * @viid: the Virtual Interface Identifier
+ * @free: if true any existing filters for this VI id are first removed
+ * @naddr: the number of MAC addresses to allocate filters for (up to 7)
+ * @addr: the MAC address(es)
+ * @idx: where to store the index of each allocated filter
+ * @hash: pointer to hash address filter bitmap
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Allocates an exact-match filter for each of the supplied addresses and
+ * sets it to the corresponding address. If @idx is not %NULL it should
+ * have at least @naddr entries, each of which will be set to the index of
+ * the filter allocated for the corresponding MAC address. If a filter
+ * could not be allocated for an address its index is set to 0xffff.
+ * If @hash is not %NULL addresses that fail to allocate an exact filter
+ * are hashed and update the hash filter bitmap pointed at by @hash.
+ *
+ * Returns a negative error number or the number of filters allocated.
+ */
+int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
+ unsigned int naddr, const u8 **addr, u16 *idx,
+ u64 *hash, bool sleep_ok)
+{
+ int offset, ret = 0;
+ unsigned nfilters = 0;
+ unsigned int rem = naddr;
+ struct fw_vi_mac_cmd cmd, rpl;
+
+ if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
+ return -EINVAL;
+
+ for (offset = 0; offset < naddr; /**/) {
+ unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact)
+ ? rem
+ : ARRAY_SIZE(cmd.u.exact));
+ size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+ u.exact[fw_naddr]), 16);
+ struct fw_vi_mac_exact *p;
+ int i;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ (free ? FW_CMD_EXEC : 0) |
+ FW_VI_MAC_CMD_VIID(viid));
+ cmd.freemacs_to_len16 =
+ cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
+ FW_CMD_LEN16(len16));
+
+ for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
+ p->valid_to_idx = cpu_to_be16(
+ FW_VI_MAC_CMD_VALID |
+ FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
+ memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
+ }
+
+
+ ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl,
+ sleep_ok);
+ if (ret && ret != -ENOMEM)
+ break;
+
+ for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
+ u16 index = FW_VI_MAC_CMD_IDX_GET(
+ be16_to_cpu(p->valid_to_idx));
+
+ if (idx)
+ idx[offset+i] =
+ (index >= FW_CLS_TCAM_NUM_ENTRIES
+ ? 0xffff
+ : index);
+ if (index < FW_CLS_TCAM_NUM_ENTRIES)
+ nfilters++;
+ else if (hash)
+ *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
+ }
+
+ free = false;
+ offset += fw_naddr;
+ rem -= fw_naddr;
+ }
+
+ /*
+ * If there were no errors or we merely ran out of room in our MAC
+ * address arena, return the number of filters actually written.
+ */
+ if (ret == 0 || ret == -ENOMEM)
+ ret = nfilters;
+ return ret;
+}
+
+/**
+ * t4vf_change_mac - modifies the exact-match filter for a MAC address
+ * @adapter: the adapter
+ * @viid: the Virtual Interface ID
+ * @idx: index of existing filter for old value of MAC address, or -1
+ * @addr: the new MAC address value
+ * @persist: if idx < 0, the new MAC allocation should be persistent
+ *
+ * Modifies an exact-match filter and sets it to the new MAC address.
+ * Note that in general it is not possible to modify the value of a given
+ * filter so the generic way to modify an address filter is to free the
+ * one being used by the old address value and allocate a new filter for
+ * the new address value. @idx can be -1 if the address is a new
+ * addition.
+ *
+ * Returns a negative error number or the index of the filter with the new
+ * MAC value.
+ */
+int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
+ int idx, const u8 *addr, bool persist)
+{
+ int ret;
+ struct fw_vi_mac_cmd cmd, rpl;
+ struct fw_vi_mac_exact *p = &cmd.u.exact[0];
+ size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+ u.exact[1]), 16);
+
+ /*
+ * If this is a new allocation, determine whether it should be
+ * persistent (across a "freemacs" operation) or not.
+ */
+ if (idx < 0)
+ idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_VI_MAC_CMD_VIID(viid));
+ cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
+ p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID |
+ FW_VI_MAC_CMD_IDX(idx));
+ memcpy(p->macaddr, addr, sizeof(p->macaddr));
+
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (ret == 0) {
+ p = &rpl.u.exact[0];
+ ret = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
+ if (ret >= FW_CLS_TCAM_NUM_ENTRIES)
+ ret = -ENOMEM;
+ }
+ return ret;
+}
+
+/**
+ * t4vf_set_addr_hash - program the MAC inexact-match hash filter
+ * @adapter: the adapter
+ * @viid: the Virtual Interface Identifier
+ * @ucast: whether the hash filter should also match unicast addresses
+ * @vec: the value to be written to the hash filter
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Sets the 64-bit inexact-match hash filter for a virtual interface.
+ */
+int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid,
+ bool ucast, u64 vec, bool sleep_ok)
+{
+ struct fw_vi_mac_cmd cmd;
+ size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+ u.exact[0]), 16);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_VI_ENABLE_CMD_VIID(viid));
+ cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN |
+ FW_VI_MAC_CMD_HASHUNIEN(ucast) |
+ FW_CMD_LEN16(len16));
+ cmd.u.hash.hashvec = cpu_to_be64(vec);
+ return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
+}
+
+/**
+ * t4vf_get_port_stats - collect "port" statistics
+ * @adapter: the adapter
+ * @pidx: the port index
+ * @s: the stats structure to fill
+ *
+ * Collect statistics for the "port"'s Virtual Interface.
+ */
+int t4vf_get_port_stats(struct adapter *adapter, int pidx,
+ struct t4vf_port_stats *s)
+{
+ struct port_info *pi = adap2pinfo(adapter, pidx);
+ struct fw_vi_stats_vf fwstats;
+ unsigned int rem = VI_VF_NUM_STATS;
+ __be64 *fwsp = (__be64 *)&fwstats;
+
+ /*
+ * Grab the Virtual Interface statistics a chunk at a time via mailbox
+ * commands. We could use a Work Request and get all of them at once
+ * but that's an asynchronous interface which is awkward to use.
+ */
+ while (rem) {
+ unsigned int ix = VI_VF_NUM_STATS - rem;
+ unsigned int nstats = min(6U, rem);
+ struct fw_vi_stats_cmd cmd, rpl;
+ size_t len = (offsetof(struct fw_vi_stats_cmd, u) +
+ sizeof(struct fw_vi_stats_ctl));
+ size_t len16 = DIV_ROUND_UP(len, 16);
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_STATS_CMD) |
+ FW_VI_STATS_CMD_VIID(pi->viid) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ);
+ cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16(len16));
+ cmd.u.ctl.nstats_ix =
+ cpu_to_be16(FW_VI_STATS_CMD_IX(ix) |
+ FW_VI_STATS_CMD_NSTATS(nstats));
+ ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl);
+ if (ret)
+ return ret;
+
+ memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats);
+
+ rem -= nstats;
+ fwsp += nstats;
+ }
+
+ /*
+ * Translate firmware statistics into host native statistics.
+ */
+ s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes);
+ s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames);
+ s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes);
+ s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames);
+ s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes);
+ s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames);
+ s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames);
+ s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes);
+ s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames);
+
+ s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes);
+ s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames);
+ s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes);
+ s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames);
+ s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes);
+ s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames);
+
+ s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames);
+
+ return 0;
+}
+
+/**
+ * t4vf_iq_free - free an ingress queue and its free lists
+ * @adapter: the adapter
+ * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
+ * @iqid: ingress queue ID
+ * @fl0id: FL0 queue ID or 0xffff if no attached FL0
+ * @fl1id: FL1 queue ID or 0xffff if no attached FL1
+ *
+ * Frees an ingress queue and its associated free lists, if any.
+ */
+int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype,
+ unsigned int iqid, unsigned int fl0id, unsigned int fl1id)
+{
+ struct fw_iq_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_EXEC);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE |
+ FW_LEN16(cmd));
+ cmd.type_to_iqandstindex =
+ cpu_to_be32(FW_IQ_CMD_TYPE(iqtype));
+
+ cmd.iqid = cpu_to_be16(iqid);
+ cmd.fl0id = cpu_to_be16(fl0id);
+ cmd.fl1id = cpu_to_be16(fl1id);
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_eth_eq_free - free an Ethernet egress queue
+ * @adapter: the adapter
+ * @eqid: egress queue ID
+ *
+ * Frees an Ethernet egress queue.
+ */
+int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
+{
+ struct fw_eq_eth_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_EXEC);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE |
+ FW_LEN16(cmd));
+ cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID(eqid));
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_handle_fw_rpl - process a firmware reply message
+ * @adapter: the adapter
+ * @rpl: start of the firmware message
+ *
+ * Processes a firmware message, such as link state change messages.
+ */
+int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
+{
+ const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl;
+ u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi));
+
+ switch (opcode) {
+ case FW_PORT_CMD: {
+ /*
+ * Link/module state change message.
+ */
+ const struct fw_port_cmd *port_cmd =
+ (const struct fw_port_cmd *)rpl;
+ u32 word;
+ int action, port_id, link_ok, speed, fc, pidx;
+
+ /*
+ * Extract various fields from port status change message.
+ */
+ action = FW_PORT_CMD_ACTION_GET(
+ be32_to_cpu(port_cmd->action_to_len16));
+ if (action != FW_PORT_ACTION_GET_PORT_INFO) {
+ dev_err(adapter->pdev_dev,
+ "Unknown firmware PORT reply action %x\n",
+ action);
+ break;
+ }
+
+ port_id = FW_PORT_CMD_PORTID_GET(
+ be32_to_cpu(port_cmd->op_to_portid));
+
+ word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
+ link_ok = (word & FW_PORT_CMD_LSTATUS) != 0;
+ speed = 0;
+ fc = 0;
+ if (word & FW_PORT_CMD_RXPAUSE)
+ fc |= PAUSE_RX;
+ if (word & FW_PORT_CMD_TXPAUSE)
+ fc |= PAUSE_TX;
+ if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
+ speed = SPEED_100;
+ else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
+ speed = SPEED_1000;
+ else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
+ speed = SPEED_10000;
+
+ /*
+ * Scan all of our "ports" (Virtual Interfaces) looking for
+ * those bound to the physical port which has changed. If
+ * our recorded state doesn't match the current state,
+ * signal that change to the OS code.
+ */
+ for_each_port(adapter, pidx) {
+ struct port_info *pi = adap2pinfo(adapter, pidx);
+ struct link_config *lc;
+
+ if (pi->port_id != port_id)
+ continue;
+
+ lc = &pi->link_cfg;
+ if (link_ok != lc->link_ok || speed != lc->speed ||
+ fc != lc->fc) {
+ /* something changed */
+ lc->link_ok = link_ok;
+ lc->speed = speed;
+ lc->fc = fc;
+ t4vf_os_link_changed(adapter, pidx, link_ok);
+ }
+ }
+ break;
+ }
+
+ default:
+ dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n",
+ opcode);
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
new file mode 100644
index 000000000000..e9386ef524aa
--- /dev/null
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -0,0 +1,30 @@
+#
+# Cirrus network device configuration
+#
+
+config NET_VENDOR_CIRRUS
+ bool "Cirrus devices"
+ default y
+ depends on ARM && ARCH_EP93XX
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Cirrus cards. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_CIRRUS
+
+config EP93XX_ETH
+ tristate "EP93xx Ethernet support"
+ depends on ARM && ARCH_EP93XX
+ select NET_CORE
+ select MII
+ help
+ This is a driver for the ethernet hardware included in EP93xx CPUs.
+ Say Y if you are building a kernel for EP93xx based devices.
+
+endif # NET_VENDOR_CIRRUS
diff --git a/drivers/net/ethernet/cirrus/Makefile b/drivers/net/ethernet/cirrus/Makefile
new file mode 100644
index 000000000000..9905ea20f9ff
--- /dev/null
+++ b/drivers/net/ethernet/cirrus/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Cirrus network device drivers.
+#
+
+obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
new file mode 100644
index 000000000000..4317af8d2f0a
--- /dev/null
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -0,0 +1,904 @@
+/*
+ * EP93xx ethernet network device driver
+ * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Dedicated to Marija Kulikova.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include <mach/hardware.h>
+
+#define DRV_MODULE_NAME "ep93xx-eth"
+#define DRV_MODULE_VERSION "0.1"
+
+#define RX_QUEUE_ENTRIES 64
+#define TX_QUEUE_ENTRIES 8
+
+#define MAX_PKT_SIZE 2044
+#define PKT_BUF_SIZE 2048
+
+#define REG_RXCTL 0x0000
+#define REG_RXCTL_DEFAULT 0x00073800
+#define REG_TXCTL 0x0004
+#define REG_TXCTL_ENABLE 0x00000001
+#define REG_MIICMD 0x0010
+#define REG_MIICMD_READ 0x00008000
+#define REG_MIICMD_WRITE 0x00004000
+#define REG_MIIDATA 0x0014
+#define REG_MIISTS 0x0018
+#define REG_MIISTS_BUSY 0x00000001
+#define REG_SELFCTL 0x0020
+#define REG_SELFCTL_RESET 0x00000001
+#define REG_INTEN 0x0024
+#define REG_INTEN_TX 0x00000008
+#define REG_INTEN_RX 0x00000007
+#define REG_INTSTSP 0x0028
+#define REG_INTSTS_TX 0x00000008
+#define REG_INTSTS_RX 0x00000004
+#define REG_INTSTSC 0x002c
+#define REG_AFP 0x004c
+#define REG_INDAD0 0x0050
+#define REG_INDAD1 0x0051
+#define REG_INDAD2 0x0052
+#define REG_INDAD3 0x0053
+#define REG_INDAD4 0x0054
+#define REG_INDAD5 0x0055
+#define REG_GIINTMSK 0x0064
+#define REG_GIINTMSK_ENABLE 0x00008000
+#define REG_BMCTL 0x0080
+#define REG_BMCTL_ENABLE_TX 0x00000100
+#define REG_BMCTL_ENABLE_RX 0x00000001
+#define REG_BMSTS 0x0084
+#define REG_BMSTS_RX_ACTIVE 0x00000008
+#define REG_RXDQBADD 0x0090
+#define REG_RXDQBLEN 0x0094
+#define REG_RXDCURADD 0x0098
+#define REG_RXDENQ 0x009c
+#define REG_RXSTSQBADD 0x00a0
+#define REG_RXSTSQBLEN 0x00a4
+#define REG_RXSTSQCURADD 0x00a8
+#define REG_RXSTSENQ 0x00ac
+#define REG_TXDQBADD 0x00b0
+#define REG_TXDQBLEN 0x00b4
+#define REG_TXDQCURADD 0x00b8
+#define REG_TXDENQ 0x00bc
+#define REG_TXSTSQBADD 0x00c0
+#define REG_TXSTSQBLEN 0x00c4
+#define REG_TXSTSQCURADD 0x00c8
+#define REG_MAXFRMLEN 0x00e8
+
+struct ep93xx_rdesc
+{
+ u32 buf_addr;
+ u32 rdesc1;
+};
+
+#define RDESC1_NSOF 0x80000000
+#define RDESC1_BUFFER_INDEX 0x7fff0000
+#define RDESC1_BUFFER_LENGTH 0x0000ffff
+
+struct ep93xx_rstat
+{
+ u32 rstat0;
+ u32 rstat1;
+};
+
+#define RSTAT0_RFP 0x80000000
+#define RSTAT0_RWE 0x40000000
+#define RSTAT0_EOF 0x20000000
+#define RSTAT0_EOB 0x10000000
+#define RSTAT0_AM 0x00c00000
+#define RSTAT0_RX_ERR 0x00200000
+#define RSTAT0_OE 0x00100000
+#define RSTAT0_FE 0x00080000
+#define RSTAT0_RUNT 0x00040000
+#define RSTAT0_EDATA 0x00020000
+#define RSTAT0_CRCE 0x00010000
+#define RSTAT0_CRCI 0x00008000
+#define RSTAT0_HTI 0x00003f00
+#define RSTAT1_RFP 0x80000000
+#define RSTAT1_BUFFER_INDEX 0x7fff0000
+#define RSTAT1_FRAME_LENGTH 0x0000ffff
+
+struct ep93xx_tdesc
+{
+ u32 buf_addr;
+ u32 tdesc1;
+};
+
+#define TDESC1_EOF 0x80000000
+#define TDESC1_BUFFER_INDEX 0x7fff0000
+#define TDESC1_BUFFER_ABORT 0x00008000
+#define TDESC1_BUFFER_LENGTH 0x00000fff
+
+struct ep93xx_tstat
+{
+ u32 tstat0;
+};
+
+#define TSTAT0_TXFP 0x80000000
+#define TSTAT0_TXWE 0x40000000
+#define TSTAT0_FA 0x20000000
+#define TSTAT0_LCRS 0x10000000
+#define TSTAT0_OW 0x04000000
+#define TSTAT0_TXU 0x02000000
+#define TSTAT0_ECOLL 0x01000000
+#define TSTAT0_NCOLL 0x001f0000
+#define TSTAT0_BUFFER_INDEX 0x00007fff
+
+struct ep93xx_descs
+{
+ struct ep93xx_rdesc rdesc[RX_QUEUE_ENTRIES];
+ struct ep93xx_tdesc tdesc[TX_QUEUE_ENTRIES];
+ struct ep93xx_rstat rstat[RX_QUEUE_ENTRIES];
+ struct ep93xx_tstat tstat[TX_QUEUE_ENTRIES];
+};
+
+struct ep93xx_priv
+{
+ struct resource *res;
+ void __iomem *base_addr;
+ int irq;
+
+ struct ep93xx_descs *descs;
+ dma_addr_t descs_dma_addr;
+
+ void *rx_buf[RX_QUEUE_ENTRIES];
+ void *tx_buf[TX_QUEUE_ENTRIES];
+
+ spinlock_t rx_lock;
+ unsigned int rx_pointer;
+ unsigned int tx_clean_pointer;
+ unsigned int tx_pointer;
+ spinlock_t tx_pending_lock;
+ unsigned int tx_pending;
+
+ struct net_device *dev;
+ struct napi_struct napi;
+
+ struct mii_if_info mii;
+ u8 mdc_divisor;
+};
+
+#define rdb(ep, off) __raw_readb((ep)->base_addr + (off))
+#define rdw(ep, off) __raw_readw((ep)->base_addr + (off))
+#define rdl(ep, off) __raw_readl((ep)->base_addr + (off))
+#define wrb(ep, off, val) __raw_writeb((val), (ep)->base_addr + (off))
+#define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off))
+#define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off))
+
+static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ int data;
+ int i;
+
+ wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg);
+
+ for (i = 0; i < 10; i++) {
+ if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+
+ if (i == 10) {
+ pr_info("mdio read timed out\n");
+ data = 0xffff;
+ } else {
+ data = rdl(ep, REG_MIIDATA);
+ }
+
+ return data;
+}
+
+static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ int i;
+
+ wrl(ep, REG_MIIDATA, data);
+ wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg);
+
+ for (i = 0; i < 10; i++) {
+ if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+
+ if (i == 10)
+ pr_info("mdio write timed out\n");
+}
+
+static int ep93xx_rx(struct net_device *dev, int processed, int budget)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+
+ while (processed < budget) {
+ int entry;
+ struct ep93xx_rstat *rstat;
+ u32 rstat0;
+ u32 rstat1;
+ int length;
+ struct sk_buff *skb;
+
+ entry = ep->rx_pointer;
+ rstat = ep->descs->rstat + entry;
+
+ rstat0 = rstat->rstat0;
+ rstat1 = rstat->rstat1;
+ if (!(rstat0 & RSTAT0_RFP) || !(rstat1 & RSTAT1_RFP))
+ break;
+
+ rstat->rstat0 = 0;
+ rstat->rstat1 = 0;
+
+ if (!(rstat0 & RSTAT0_EOF))
+ pr_crit("not end-of-frame %.8x %.8x\n", rstat0, rstat1);
+ if (!(rstat0 & RSTAT0_EOB))
+ pr_crit("not end-of-buffer %.8x %.8x\n", rstat0, rstat1);
+ if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry)
+ pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1);
+
+ if (!(rstat0 & RSTAT0_RWE)) {
+ dev->stats.rx_errors++;
+ if (rstat0 & RSTAT0_OE)
+ dev->stats.rx_fifo_errors++;
+ if (rstat0 & RSTAT0_FE)
+ dev->stats.rx_frame_errors++;
+ if (rstat0 & (RSTAT0_RUNT | RSTAT0_EDATA))
+ dev->stats.rx_length_errors++;
+ if (rstat0 & RSTAT0_CRCE)
+ dev->stats.rx_crc_errors++;
+ goto err;
+ }
+
+ length = rstat1 & RSTAT1_FRAME_LENGTH;
+ if (length > MAX_PKT_SIZE) {
+ pr_notice("invalid length %.8x %.8x\n", rstat0, rstat1);
+ goto err;
+ }
+
+ /* Strip FCS. */
+ if (rstat0 & RSTAT0_CRCI)
+ length -= 4;
+
+ skb = dev_alloc_skb(length + 2);
+ if (likely(skb != NULL)) {
+ struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry];
+ skb_reserve(skb, 2);
+ dma_sync_single_for_cpu(dev->dev.parent, rxd->buf_addr,
+ length, DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
+ dma_sync_single_for_device(dev->dev.parent,
+ rxd->buf_addr, length,
+ DMA_FROM_DEVICE);
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ netif_receive_skb(skb);
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += length;
+ } else {
+ dev->stats.rx_dropped++;
+ }
+
+err:
+ ep->rx_pointer = (entry + 1) & (RX_QUEUE_ENTRIES - 1);
+ processed++;
+ }
+
+ return processed;
+}
+
+static int ep93xx_have_more_rx(struct ep93xx_priv *ep)
+{
+ struct ep93xx_rstat *rstat = ep->descs->rstat + ep->rx_pointer;
+ return !!((rstat->rstat0 & RSTAT0_RFP) && (rstat->rstat1 & RSTAT1_RFP));
+}
+
+static int ep93xx_poll(struct napi_struct *napi, int budget)
+{
+ struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi);
+ struct net_device *dev = ep->dev;
+ int rx = 0;
+
+poll_some_more:
+ rx = ep93xx_rx(dev, rx, budget);
+ if (rx < budget) {
+ int more = 0;
+
+ spin_lock_irq(&ep->rx_lock);
+ __napi_complete(napi);
+ wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX);
+ if (ep93xx_have_more_rx(ep)) {
+ wrl(ep, REG_INTEN, REG_INTEN_TX);
+ wrl(ep, REG_INTSTSP, REG_INTSTS_RX);
+ more = 1;
+ }
+ spin_unlock_irq(&ep->rx_lock);
+
+ if (more && napi_reschedule(napi))
+ goto poll_some_more;
+ }
+
+ if (rx) {
+ wrw(ep, REG_RXDENQ, rx);
+ wrw(ep, REG_RXSTSENQ, rx);
+ }
+
+ return rx;
+}
+
+static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ struct ep93xx_tdesc *txd;
+ int entry;
+
+ if (unlikely(skb->len > MAX_PKT_SIZE)) {
+ dev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ entry = ep->tx_pointer;
+ ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1);
+
+ txd = &ep->descs->tdesc[entry];
+
+ txd->tdesc1 = TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
+ dma_sync_single_for_cpu(dev->dev.parent, txd->buf_addr, skb->len,
+ DMA_TO_DEVICE);
+ skb_copy_and_csum_dev(skb, ep->tx_buf[entry]);
+ dma_sync_single_for_device(dev->dev.parent, txd->buf_addr, skb->len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
+
+ spin_lock_irq(&ep->tx_pending_lock);
+ ep->tx_pending++;
+ if (ep->tx_pending == TX_QUEUE_ENTRIES)
+ netif_stop_queue(dev);
+ spin_unlock_irq(&ep->tx_pending_lock);
+
+ wrl(ep, REG_TXDENQ, 1);
+
+ return NETDEV_TX_OK;
+}
+
+static void ep93xx_tx_complete(struct net_device *dev)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ int wake;
+
+ wake = 0;
+
+ spin_lock(&ep->tx_pending_lock);
+ while (1) {
+ int entry;
+ struct ep93xx_tstat *tstat;
+ u32 tstat0;
+
+ entry = ep->tx_clean_pointer;
+ tstat = ep->descs->tstat + entry;
+
+ tstat0 = tstat->tstat0;
+ if (!(tstat0 & TSTAT0_TXFP))
+ break;
+
+ tstat->tstat0 = 0;
+
+ if (tstat0 & TSTAT0_FA)
+ pr_crit("frame aborted %.8x\n", tstat0);
+ if ((tstat0 & TSTAT0_BUFFER_INDEX) != entry)
+ pr_crit("entry mismatch %.8x\n", tstat0);
+
+ if (tstat0 & TSTAT0_TXWE) {
+ int length = ep->descs->tdesc[entry].tdesc1 & 0xfff;
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += length;
+ } else {
+ dev->stats.tx_errors++;
+ }
+
+ if (tstat0 & TSTAT0_OW)
+ dev->stats.tx_window_errors++;
+ if (tstat0 & TSTAT0_TXU)
+ dev->stats.tx_fifo_errors++;
+ dev->stats.collisions += (tstat0 >> 16) & 0x1f;
+
+ ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1);
+ if (ep->tx_pending == TX_QUEUE_ENTRIES)
+ wake = 1;
+ ep->tx_pending--;
+ }
+ spin_unlock(&ep->tx_pending_lock);
+
+ if (wake)
+ netif_wake_queue(dev);
+}
+
+static irqreturn_t ep93xx_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ u32 status;
+
+ status = rdl(ep, REG_INTSTSC);
+ if (status == 0)
+ return IRQ_NONE;
+
+ if (status & REG_INTSTS_RX) {
+ spin_lock(&ep->rx_lock);
+ if (likely(napi_schedule_prep(&ep->napi))) {
+ wrl(ep, REG_INTEN, REG_INTEN_TX);
+ __napi_schedule(&ep->napi);
+ }
+ spin_unlock(&ep->rx_lock);
+ }
+
+ if (status & REG_INTSTS_TX)
+ ep93xx_tx_complete(dev);
+
+ return IRQ_HANDLED;
+}
+
+static void ep93xx_free_buffers(struct ep93xx_priv *ep)
+{
+ struct device *dev = ep->dev->dev.parent;
+ int i;
+
+ for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
+ dma_addr_t d;
+
+ d = ep->descs->rdesc[i].buf_addr;
+ if (d)
+ dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE);
+
+ if (ep->rx_buf[i] != NULL)
+ kfree(ep->rx_buf[i]);
+ }
+
+ for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
+ dma_addr_t d;
+
+ d = ep->descs->tdesc[i].buf_addr;
+ if (d)
+ dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE);
+
+ if (ep->tx_buf[i] != NULL)
+ kfree(ep->tx_buf[i]);
+ }
+
+ dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
+ ep->descs_dma_addr);
+}
+
+static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
+{
+ struct device *dev = ep->dev->dev.parent;
+ int i;
+
+ ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs),
+ &ep->descs_dma_addr, GFP_KERNEL);
+ if (ep->descs == NULL)
+ return 1;
+
+ for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
+ void *buf;
+ dma_addr_t d;
+
+ buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
+ if (buf == NULL)
+ goto err;
+
+ d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, d)) {
+ kfree(buf);
+ goto err;
+ }
+
+ ep->rx_buf[i] = buf;
+ ep->descs->rdesc[i].buf_addr = d;
+ ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE;
+ }
+
+ for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
+ void *buf;
+ dma_addr_t d;
+
+ buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
+ if (buf == NULL)
+ goto err;
+
+ d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, d)) {
+ kfree(buf);
+ goto err;
+ }
+
+ ep->tx_buf[i] = buf;
+ ep->descs->tdesc[i].buf_addr = d;
+ }
+
+ return 0;
+
+err:
+ ep93xx_free_buffers(ep);
+ return 1;
+}
+
+static int ep93xx_start_hw(struct net_device *dev)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ unsigned long addr;
+ int i;
+
+ wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET);
+ for (i = 0; i < 10; i++) {
+ if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0)
+ break;
+ msleep(1);
+ }
+
+ if (i == 10) {
+ pr_crit("hw failed to reset\n");
+ return 1;
+ }
+
+ wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9));
+
+ /* Does the PHY support preamble suppress? */
+ if ((ep93xx_mdio_read(dev, ep->mii.phy_id, MII_BMSR) & 0x0040) != 0)
+ wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9) | (1 << 8));
+
+ /* Receive descriptor ring. */
+ addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rdesc);
+ wrl(ep, REG_RXDQBADD, addr);
+ wrl(ep, REG_RXDCURADD, addr);
+ wrw(ep, REG_RXDQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rdesc));
+
+ /* Receive status ring. */
+ addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rstat);
+ wrl(ep, REG_RXSTSQBADD, addr);
+ wrl(ep, REG_RXSTSQCURADD, addr);
+ wrw(ep, REG_RXSTSQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rstat));
+
+ /* Transmit descriptor ring. */
+ addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tdesc);
+ wrl(ep, REG_TXDQBADD, addr);
+ wrl(ep, REG_TXDQCURADD, addr);
+ wrw(ep, REG_TXDQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tdesc));
+
+ /* Transmit status ring. */
+ addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tstat);
+ wrl(ep, REG_TXSTSQBADD, addr);
+ wrl(ep, REG_TXSTSQCURADD, addr);
+ wrw(ep, REG_TXSTSQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tstat));
+
+ wrl(ep, REG_BMCTL, REG_BMCTL_ENABLE_TX | REG_BMCTL_ENABLE_RX);
+ wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX);
+ wrl(ep, REG_GIINTMSK, 0);
+
+ for (i = 0; i < 10; i++) {
+ if ((rdl(ep, REG_BMSTS) & REG_BMSTS_RX_ACTIVE) != 0)
+ break;
+ msleep(1);
+ }
+
+ if (i == 10) {
+ pr_crit("hw failed to start\n");
+ return 1;
+ }
+
+ wrl(ep, REG_RXDENQ, RX_QUEUE_ENTRIES);
+ wrl(ep, REG_RXSTSENQ, RX_QUEUE_ENTRIES);
+
+ wrb(ep, REG_INDAD0, dev->dev_addr[0]);
+ wrb(ep, REG_INDAD1, dev->dev_addr[1]);
+ wrb(ep, REG_INDAD2, dev->dev_addr[2]);
+ wrb(ep, REG_INDAD3, dev->dev_addr[3]);
+ wrb(ep, REG_INDAD4, dev->dev_addr[4]);
+ wrb(ep, REG_INDAD5, dev->dev_addr[5]);
+ wrl(ep, REG_AFP, 0);
+
+ wrl(ep, REG_MAXFRMLEN, (MAX_PKT_SIZE << 16) | MAX_PKT_SIZE);
+
+ wrl(ep, REG_RXCTL, REG_RXCTL_DEFAULT);
+ wrl(ep, REG_TXCTL, REG_TXCTL_ENABLE);
+
+ return 0;
+}
+
+static void ep93xx_stop_hw(struct net_device *dev)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ int i;
+
+ wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET);
+ for (i = 0; i < 10; i++) {
+ if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0)
+ break;
+ msleep(1);
+ }
+
+ if (i == 10)
+ pr_crit("hw failed to reset\n");
+}
+
+static int ep93xx_open(struct net_device *dev)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ int err;
+
+ if (ep93xx_alloc_buffers(ep))
+ return -ENOMEM;
+
+ napi_enable(&ep->napi);
+
+ if (ep93xx_start_hw(dev)) {
+ napi_disable(&ep->napi);
+ ep93xx_free_buffers(ep);
+ return -EIO;
+ }
+
+ spin_lock_init(&ep->rx_lock);
+ ep->rx_pointer = 0;
+ ep->tx_clean_pointer = 0;
+ ep->tx_pointer = 0;
+ spin_lock_init(&ep->tx_pending_lock);
+ ep->tx_pending = 0;
+
+ err = request_irq(ep->irq, ep93xx_irq, IRQF_SHARED, dev->name, dev);
+ if (err) {
+ napi_disable(&ep->napi);
+ ep93xx_stop_hw(dev);
+ ep93xx_free_buffers(ep);
+ return err;
+ }
+
+ wrl(ep, REG_GIINTMSK, REG_GIINTMSK_ENABLE);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int ep93xx_close(struct net_device *dev)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+
+ napi_disable(&ep->napi);
+ netif_stop_queue(dev);
+
+ wrl(ep, REG_GIINTMSK, 0);
+ free_irq(ep->irq, dev);
+ ep93xx_stop_hw(dev);
+ ep93xx_free_buffers(ep);
+
+ return 0;
+}
+
+static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ return generic_mii_ioctl(&ep->mii, data, cmd, NULL);
+}
+
+static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_MODULE_NAME);
+ strcpy(info->version, DRV_MODULE_VERSION);
+}
+
+static int ep93xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ return mii_ethtool_gset(&ep->mii, cmd);
+}
+
+static int ep93xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ return mii_ethtool_sset(&ep->mii, cmd);
+}
+
+static int ep93xx_nway_reset(struct net_device *dev)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ return mii_nway_restart(&ep->mii);
+}
+
+static u32 ep93xx_get_link(struct net_device *dev)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ return mii_link_ok(&ep->mii);
+}
+
+static const struct ethtool_ops ep93xx_ethtool_ops = {
+ .get_drvinfo = ep93xx_get_drvinfo,
+ .get_settings = ep93xx_get_settings,
+ .set_settings = ep93xx_set_settings,
+ .nway_reset = ep93xx_nway_reset,
+ .get_link = ep93xx_get_link,
+};
+
+static const struct net_device_ops ep93xx_netdev_ops = {
+ .ndo_open = ep93xx_open,
+ .ndo_stop = ep93xx_close,
+ .ndo_start_xmit = ep93xx_xmit,
+ .ndo_do_ioctl = ep93xx_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data)
+{
+ struct net_device *dev;
+
+ dev = alloc_etherdev(sizeof(struct ep93xx_priv));
+ if (dev == NULL)
+ return NULL;
+
+ memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN);
+
+ dev->ethtool_ops = &ep93xx_ethtool_ops;
+ dev->netdev_ops = &ep93xx_netdev_ops;
+
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+
+ return dev;
+}
+
+
+static int ep93xx_eth_remove(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct ep93xx_priv *ep;
+
+ dev = platform_get_drvdata(pdev);
+ if (dev == NULL)
+ return 0;
+ platform_set_drvdata(pdev, NULL);
+
+ ep = netdev_priv(dev);
+
+ /* @@@ Force down. */
+ unregister_netdev(dev);
+ ep93xx_free_buffers(ep);
+
+ if (ep->base_addr != NULL)
+ iounmap(ep->base_addr);
+
+ if (ep->res != NULL) {
+ release_resource(ep->res);
+ kfree(ep->res);
+ }
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+static int ep93xx_eth_probe(struct platform_device *pdev)
+{
+ struct ep93xx_eth_data *data;
+ struct net_device *dev;
+ struct ep93xx_priv *ep;
+ struct resource *mem;
+ int irq;
+ int err;
+
+ if (pdev == NULL)
+ return -ENODEV;
+ data = pdev->dev.platform_data;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!mem || irq < 0)
+ return -ENXIO;
+
+ dev = ep93xx_dev_alloc(data);
+ if (dev == NULL) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ ep = netdev_priv(dev);
+ ep->dev = dev;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ netif_napi_add(dev, &ep->napi, ep93xx_poll, 64);
+
+ platform_set_drvdata(pdev, dev);
+
+ ep->res = request_mem_region(mem->start, resource_size(mem),
+ dev_name(&pdev->dev));
+ if (ep->res == NULL) {
+ dev_err(&pdev->dev, "Could not reserve memory region\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ ep->base_addr = ioremap(mem->start, resource_size(mem));
+ if (ep->base_addr == NULL) {
+ dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
+ err = -EIO;
+ goto err_out;
+ }
+ ep->irq = irq;
+
+ ep->mii.phy_id = data->phy_id;
+ ep->mii.phy_id_mask = 0x1f;
+ ep->mii.reg_num_mask = 0x1f;
+ ep->mii.dev = dev;
+ ep->mii.mdio_read = ep93xx_mdio_read;
+ ep->mii.mdio_write = ep93xx_mdio_write;
+ ep->mdc_divisor = 40; /* Max HCLK 100 MHz, min MDIO clk 2.5 MHz. */
+
+ if (is_zero_ether_addr(dev->dev_addr))
+ random_ether_addr(dev->dev_addr);
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register netdev\n");
+ goto err_out;
+ }
+
+ printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, %pM\n",
+ dev->name, ep->irq, dev->dev_addr);
+
+ return 0;
+
+err_out:
+ ep93xx_eth_remove(pdev);
+ return err;
+}
+
+
+static struct platform_driver ep93xx_eth_driver = {
+ .probe = ep93xx_eth_probe,
+ .remove = ep93xx_eth_remove,
+ .driver = {
+ .name = "ep93xx-eth",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ep93xx_eth_init_module(void)
+{
+ printk(KERN_INFO DRV_MODULE_NAME " version " DRV_MODULE_VERSION " loading\n");
+ return platform_driver_register(&ep93xx_eth_driver);
+}
+
+static void __exit ep93xx_eth_cleanup_module(void)
+{
+ platform_driver_unregister(&ep93xx_eth_driver);
+}
+
+module_init(ep93xx_eth_init_module);
+module_exit(ep93xx_eth_cleanup_module);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ep93xx-eth");
diff --git a/drivers/net/ethernet/cisco/Kconfig b/drivers/net/ethernet/cisco/Kconfig
new file mode 100644
index 000000000000..94606f7ee13a
--- /dev/null
+++ b/drivers/net/ethernet/cisco/Kconfig
@@ -0,0 +1,23 @@
+#
+# Cisco device configuration
+#
+
+config NET_VENDOR_CISCO
+ bool "Cisco devices"
+ default y
+ depends on PCI && INET
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Cisco cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_CISCO
+
+source "drivers/net/ethernet/cisco/enic/Kconfig"
+
+endif # NET_VENDOR_CISCO
diff --git a/drivers/net/ethernet/cisco/Makefile b/drivers/net/ethernet/cisco/Makefile
new file mode 100644
index 000000000000..6c7437bc4a92
--- /dev/null
+++ b/drivers/net/ethernet/cisco/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Cisco device drivers.
+#
+
+obj-$(CONFIG_ENIC) += enic/
diff --git a/drivers/net/ethernet/cisco/enic/Kconfig b/drivers/net/ethernet/cisco/enic/Kconfig
new file mode 100644
index 000000000000..9cc706a6cffd
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/Kconfig
@@ -0,0 +1,9 @@
+#
+# Cisco device configuration
+#
+
+config ENIC
+ tristate "Cisco VIC Ethernet NIC Support"
+ depends on PCI && INET
+ ---help---
+ This enables the support for the Cisco VIC Ethernet card.
diff --git a/drivers/net/ethernet/cisco/enic/Makefile b/drivers/net/ethernet/cisco/enic/Makefile
new file mode 100644
index 000000000000..9d4974bba247
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_ENIC) := enic.o
+
+enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
+ enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o
+
diff --git a/drivers/net/ethernet/cisco/enic/cq_desc.h b/drivers/net/ethernet/cisco/enic/cq_desc.h
new file mode 100644
index 000000000000..d6dd1b4edf6e
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/cq_desc.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _CQ_DESC_H_
+#define _CQ_DESC_H_
+
+/*
+ * Completion queue descriptor types
+ */
+enum cq_desc_types {
+ CQ_DESC_TYPE_WQ_ENET = 0,
+ CQ_DESC_TYPE_DESC_COPY = 1,
+ CQ_DESC_TYPE_WQ_EXCH = 2,
+ CQ_DESC_TYPE_RQ_ENET = 3,
+ CQ_DESC_TYPE_RQ_FCP = 4,
+};
+
+/* Completion queue descriptor: 16B
+ *
+ * All completion queues have this basic layout. The
+ * type_specfic area is unique for each completion
+ * queue type.
+ */
+struct cq_desc {
+ __le16 completed_index;
+ __le16 q_number;
+ u8 type_specfic[11];
+ u8 type_color;
+};
+
+#define CQ_DESC_TYPE_BITS 4
+#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
+#define CQ_DESC_COLOR_MASK 1
+#define CQ_DESC_COLOR_SHIFT 7
+#define CQ_DESC_Q_NUM_BITS 10
+#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
+#define CQ_DESC_COMP_NDX_BITS 12
+#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
+
+static inline void cq_desc_dec(const struct cq_desc *desc_arg,
+ u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+{
+ const struct cq_desc *desc = desc_arg;
+ const u8 type_color = desc->type_color;
+
+ *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+
+ /*
+ * Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+
+ rmb();
+
+ *type = type_color & CQ_DESC_TYPE_MASK;
+ *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ CQ_DESC_COMP_NDX_MASK;
+}
+
+#endif /* _CQ_DESC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
new file mode 100644
index 000000000000..c2c0680a1146
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _CQ_ENET_DESC_H_
+#define _CQ_ENET_DESC_H_
+
+#include "cq_desc.h"
+
+/* Ethernet completion queue descriptor: 16B */
+struct cq_enet_wq_desc {
+ __le16 completed_index;
+ __le16 q_number;
+ u8 reserved[11];
+ u8 type_color;
+};
+
+static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
+ u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
+{
+ cq_desc_dec((struct cq_desc *)desc, type,
+ color, q_number, completed_index);
+}
+
+/* Completion queue descriptor: Ethernet receive queue, 16B */
+struct cq_enet_rq_desc {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le32 rss_hash;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 type_color;
+};
+
+#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
+#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
+#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
+#define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15)
+
+#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4
+#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \
+ ((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1)
+#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4
+#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5
+#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6
+
+#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14)
+
+#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14
+#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \
+ ((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14)
+#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15)
+
+#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS 12
+#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK \
+ ((1 << CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS) - 1)
+#define CQ_ENET_RQ_DESC_VLAN_TCI_CFI_MASK (0x1 << 12)
+#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS 3
+#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_MASK \
+ ((1 << CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS) - 1)
+#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_SHIFT 13
+
+#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 8
+#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
+ ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8
+#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \
+ ((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8
+
+#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0)
+#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0)
+#define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1)
+#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1)
+#define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
+#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
+
+static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
+ u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
+ u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
+ u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
+ u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof,
+ u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
+ u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
+ u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
+{
+ u16 completed_index_flags;
+ u16 q_number_rss_type_flags;
+ u16 bytes_written_flags;
+
+ cq_desc_dec((struct cq_desc *)desc, type,
+ color, q_number, completed_index);
+
+ completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+ q_number_rss_type_flags =
+ le16_to_cpu(desc->q_number_rss_type_flags);
+ bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+
+ *ingress_port = (completed_index_flags &
+ CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
+ *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
+ 1 : 0;
+ *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
+ 1 : 0;
+ *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
+ 1 : 0;
+
+ *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
+ CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+ *csum_not_calc = (q_number_rss_type_flags &
+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
+
+ *rss_hash = le32_to_cpu(desc->rss_hash);
+
+ *bytes_written = bytes_written_flags &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+ *packet_error = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
+ *vlan_stripped = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
+
+ /*
+ * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
+ */
+ *vlan_tci = le16_to_cpu(desc->vlan);
+
+ if (*fcoe) {
+ *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
+ CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
+ *fcoe_fc_crc_ok = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
+ *fcoe_enc_error = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
+ *fcoe_eof = (u8)((desc->checksum_fcoe >>
+ CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
+ CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
+ *checksum = 0;
+ } else {
+ *fcoe_sof = 0;
+ *fcoe_fc_crc_ok = 0;
+ *fcoe_enc_error = 0;
+ *fcoe_eof = 0;
+ *checksum = le16_to_cpu(desc->checksum_fcoe);
+ }
+
+ *tcp_udp_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
+ *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
+ *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
+ *ipv4_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
+ *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
+ *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
+ *ipv4_fragment =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
+ *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
+}
+
+#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
new file mode 100644
index 000000000000..fe0c29acdbe6
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _ENIC_H_
+#define _ENIC_H_
+
+#include "vnic_enet.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_nic.h"
+#include "vnic_rss.h"
+
+#define DRV_NAME "enic"
+#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
+#define DRV_VERSION "2.1.1.28"
+#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
+
+#define ENIC_BARS_MAX 6
+
+#define ENIC_WQ_MAX 1
+#define ENIC_RQ_MAX 1
+#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
+#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
+
+struct enic_msix_entry {
+ int requested;
+ char devname[IFNAMSIZ];
+ irqreturn_t (*isr)(int, void *);
+ void *devid;
+};
+
+/* priv_flags */
+#define ENIC_SRIOV_ENABLED (1 << 0)
+
+/* enic port profile set flags */
+#define ENIC_PORT_REQUEST_APPLIED (1 << 0)
+#define ENIC_SET_REQUEST (1 << 1)
+#define ENIC_SET_NAME (1 << 2)
+#define ENIC_SET_INSTANCE (1 << 3)
+#define ENIC_SET_HOST (1 << 4)
+
+struct enic_port_profile {
+ u32 set;
+ u8 request;
+ char name[PORT_PROFILE_MAX];
+ u8 instance_uuid[PORT_UUID_MAX];
+ u8 host_uuid[PORT_UUID_MAX];
+ u8 vf_mac[ETH_ALEN];
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Per-instance private data structure */
+struct enic {
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct vnic_enet_config config;
+ struct vnic_dev_bar bar[ENIC_BARS_MAX];
+ struct vnic_dev *vdev;
+ struct timer_list notify_timer;
+ struct work_struct reset;
+ struct work_struct change_mtu_work;
+ struct msix_entry msix_entry[ENIC_INTR_MAX];
+ struct enic_msix_entry msix[ENIC_INTR_MAX];
+ u32 msg_enable;
+ spinlock_t devcmd_lock;
+ u8 mac_addr[ETH_ALEN];
+ u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
+ u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
+ unsigned int flags;
+ unsigned int priv_flags;
+ unsigned int mc_count;
+ unsigned int uc_count;
+ u32 port_mtu;
+ u32 rx_coalesce_usecs;
+ u32 tx_coalesce_usecs;
+#ifdef CONFIG_PCI_IOV
+ u32 num_vfs;
+#endif
+ struct enic_port_profile *pp;
+
+ /* work queue cache line section */
+ ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
+ spinlock_t wq_lock[ENIC_WQ_MAX];
+ unsigned int wq_count;
+ u16 loop_enable;
+ u16 loop_tag;
+
+ /* receive queue cache line section */
+ ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
+ unsigned int rq_count;
+ u64 rq_truncated_pkts;
+ u64 rq_bad_fcs;
+ struct napi_struct napi[ENIC_RQ_MAX];
+
+ /* interrupt resource cache line section */
+ ____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX];
+ unsigned int intr_count;
+ u32 __iomem *legacy_pba; /* memory-mapped */
+
+ /* completion queue cache line section */
+ ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
+ unsigned int cq_count;
+};
+
+static inline struct device *enic_get_dev(struct enic *enic)
+{
+ return &(enic->pdev->dev);
+}
+
+void enic_reset_addr_lists(struct enic *enic);
+int enic_sriov_enabled(struct enic *enic);
+int enic_is_valid_vf(struct enic *enic, int vf);
+int enic_is_dynamic(struct enic *enic);
+
+#endif /* _ENIC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
new file mode 100644
index 000000000000..fd6247b3c0ee
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -0,0 +1,286 @@
+/*
+ * Copyright 2011 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+
+#include "vnic_dev.h"
+#include "vnic_vic.h"
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_dev.h"
+
+int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_fw_info(enic->vdev, fw_info);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_stats_dump(enic->vdev, vstats);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_add_station_addr(struct enic *enic)
+{
+ int err;
+
+ if (!is_valid_ether_addr(enic->netdev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_del_station_addr(struct enic *enic)
+{
+ int err;
+
+ if (!is_valid_ether_addr(enic->netdev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
+ int broadcast, int promisc, int allmulti)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_packet_filter(enic->vdev, directed,
+ multicast, broadcast, promisc, allmulti);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_add_addr(struct enic *enic, u8 *addr)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_add_addr(enic->vdev, addr);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_del_addr(struct enic *enic, u8 *addr)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_del_addr(enic->vdev, addr);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_notify_unset(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_notify_unset(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_hang_notify(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_hang_notify(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
+ IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_enable(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_enable_wait(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_disable(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_disable(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_intr_coal_timer_info(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_intr_coal_timer_info(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_vnic_dev_deinit(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_deinit(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_init_prov2(enic->vdev,
+ (u8 *)vp, vic_provinfo_size(vp));
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_deinit_done(struct enic *enic, int *status)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_deinit_done(enic->vdev, status);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+/* rtnl lock is held */
+void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ spin_lock(&enic->devcmd_lock);
+ enic_add_vlan(enic, vid);
+ spin_unlock(&enic->devcmd_lock);
+}
+
+/* rtnl lock is held */
+void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ spin_lock(&enic->devcmd_lock);
+ enic_del_vlan(enic, vid);
+ spin_unlock(&enic->devcmd_lock);
+}
+
+int enic_dev_enable2(struct enic *enic, int active)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_enable2(enic->vdev, active);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_enable2_done(struct enic *enic, int *status)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_enable2_done(enic->vdev, status);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_status_to_errno(int devcmd_status)
+{
+ switch (devcmd_status) {
+ case ERR_SUCCESS:
+ return 0;
+ case ERR_EINVAL:
+ return -EINVAL;
+ case ERR_EFAULT:
+ return -EFAULT;
+ case ERR_EPERM:
+ return -EPERM;
+ case ERR_EBUSY:
+ return -EBUSY;
+ case ERR_ECMDUNKNOWN:
+ case ERR_ENOTSUPPORTED:
+ return -EOPNOTSUPP;
+ case ERR_EBADSTATE:
+ return -EINVAL;
+ case ERR_ENOMEM:
+ return -ENOMEM;
+ case ERR_ETIMEDOUT:
+ return -ETIMEDOUT;
+ case ERR_ELINKDOWN:
+ return -ENETDOWN;
+ case ERR_EINPROGRESS:
+ return -EINPROGRESS;
+ case ERR_EMAXRES:
+ default:
+ return (devcmd_status < 0) ? devcmd_status : -1;
+ }
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
new file mode 100644
index 000000000000..1f83a4747ba0
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2011 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _ENIC_DEV_H_
+#define _ENIC_DEV_H_
+
+#include "vnic_dev.h"
+
+/*
+ * Calls the devcmd function given by argument vnicdevcmdfn.
+ * If vf argument is valid, it proxies the devcmd
+ */
+#define ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnicdevcmdfn, ...) \
+ do { \
+ spin_lock(&enic->devcmd_lock); \
+ if (enic_is_valid_vf(enic, vf)) { \
+ vnic_dev_cmd_proxy_by_index_start(enic->vdev, vf); \
+ err = vnicdevcmdfn(enic->vdev, ##__VA_ARGS__); \
+ vnic_dev_cmd_proxy_end(enic->vdev); \
+ } else { \
+ err = vnicdevcmdfn(enic->vdev, ##__VA_ARGS__); \
+ } \
+ spin_unlock(&enic->devcmd_lock); \
+ } while (0)
+
+int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info);
+int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats);
+int enic_dev_add_station_addr(struct enic *enic);
+int enic_dev_del_station_addr(struct enic *enic);
+int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
+ int broadcast, int promisc, int allmulti);
+int enic_dev_add_addr(struct enic *enic, u8 *addr);
+int enic_dev_del_addr(struct enic *enic, u8 *addr);
+void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+int enic_dev_notify_unset(struct enic *enic);
+int enic_dev_hang_notify(struct enic *enic);
+int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
+int enic_dev_enable(struct enic *enic);
+int enic_dev_disable(struct enic *enic);
+int enic_dev_intr_coal_timer_info(struct enic *enic);
+int enic_vnic_dev_deinit(struct enic *enic);
+int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp);
+int enic_dev_deinit_done(struct enic *enic, int *status);
+int enic_dev_enable2(struct enic *enic, int arg);
+int enic_dev_enable2_done(struct enic *enic, int *status);
+int enic_dev_status_to_errno(int devcmd_status);
+
+#endif /* _ENIC_DEV_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
new file mode 100644
index 000000000000..1bc908f595de
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -0,0 +1,2591 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/ethtool.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/rtnetlink.h>
+#include <linux/prefetch.h>
+#include <net/ip6_checksum.h>
+
+#include "cq_enet_desc.h"
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_vic.h"
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_dev.h"
+#include "enic_pp.h"
+
+#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
+#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
+#define MAX_TSO (1 << 16)
+#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
+
+#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
+#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
+
+/* Supported devices */
+static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
+ { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
+ { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
+ { 0, } /* end of table */
+};
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, enic_id_table);
+
+struct enic_stat {
+ char name[ETH_GSTRING_LEN];
+ unsigned int offset;
+};
+
+#define ENIC_TX_STAT(stat) \
+ { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
+#define ENIC_RX_STAT(stat) \
+ { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
+
+static const struct enic_stat enic_tx_stats[] = {
+ ENIC_TX_STAT(tx_frames_ok),
+ ENIC_TX_STAT(tx_unicast_frames_ok),
+ ENIC_TX_STAT(tx_multicast_frames_ok),
+ ENIC_TX_STAT(tx_broadcast_frames_ok),
+ ENIC_TX_STAT(tx_bytes_ok),
+ ENIC_TX_STAT(tx_unicast_bytes_ok),
+ ENIC_TX_STAT(tx_multicast_bytes_ok),
+ ENIC_TX_STAT(tx_broadcast_bytes_ok),
+ ENIC_TX_STAT(tx_drops),
+ ENIC_TX_STAT(tx_errors),
+ ENIC_TX_STAT(tx_tso),
+};
+
+static const struct enic_stat enic_rx_stats[] = {
+ ENIC_RX_STAT(rx_frames_ok),
+ ENIC_RX_STAT(rx_frames_total),
+ ENIC_RX_STAT(rx_unicast_frames_ok),
+ ENIC_RX_STAT(rx_multicast_frames_ok),
+ ENIC_RX_STAT(rx_broadcast_frames_ok),
+ ENIC_RX_STAT(rx_bytes_ok),
+ ENIC_RX_STAT(rx_unicast_bytes_ok),
+ ENIC_RX_STAT(rx_multicast_bytes_ok),
+ ENIC_RX_STAT(rx_broadcast_bytes_ok),
+ ENIC_RX_STAT(rx_drop),
+ ENIC_RX_STAT(rx_no_bufs),
+ ENIC_RX_STAT(rx_errors),
+ ENIC_RX_STAT(rx_rss),
+ ENIC_RX_STAT(rx_crc_errors),
+ ENIC_RX_STAT(rx_frames_64),
+ ENIC_RX_STAT(rx_frames_127),
+ ENIC_RX_STAT(rx_frames_255),
+ ENIC_RX_STAT(rx_frames_511),
+ ENIC_RX_STAT(rx_frames_1023),
+ ENIC_RX_STAT(rx_frames_1518),
+ ENIC_RX_STAT(rx_frames_to_max),
+};
+
+static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
+static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
+
+int enic_is_dynamic(struct enic *enic)
+{
+ return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
+}
+
+int enic_sriov_enabled(struct enic *enic)
+{
+ return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
+}
+
+int enic_is_valid_vf(struct enic *enic, int vf)
+{
+#ifdef CONFIG_PCI_IOV
+ return vf >= 0 && vf < enic->num_vfs;
+#else
+ return 0;
+#endif
+}
+
+static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
+{
+ return rq;
+}
+
+static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
+{
+ return enic->rq_count + wq;
+}
+
+static inline unsigned int enic_legacy_io_intr(void)
+{
+ return 0;
+}
+
+static inline unsigned int enic_legacy_err_intr(void)
+{
+ return 1;
+}
+
+static inline unsigned int enic_legacy_notify_intr(void)
+{
+ return 2;
+}
+
+static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq)
+{
+ return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
+}
+
+static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq)
+{
+ return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
+}
+
+static inline unsigned int enic_msix_err_intr(struct enic *enic)
+{
+ return enic->rq_count + enic->wq_count;
+}
+
+static inline unsigned int enic_msix_notify_intr(struct enic *enic)
+{
+ return enic->rq_count + enic->wq_count + 1;
+}
+
+static int enic_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ if (netif_carrier_ok(netdev)) {
+ ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ethtool_cmd_speed_set(ecmd, -1);
+ ecmd->duplex = -1;
+ }
+
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static void enic_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct enic *enic = netdev_priv(netdev);
+ struct vnic_devcmd_fw_info *fw_info;
+
+ enic_dev_fw_info(enic, &fw_info);
+
+ strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strncpy(drvinfo->fw_version, fw_info->fw_version,
+ sizeof(drvinfo->fw_version));
+ strncpy(drvinfo->bus_info, pci_name(enic->pdev),
+ sizeof(drvinfo->bus_info));
+}
+
+static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ unsigned int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < enic_n_tx_stats; i++) {
+ memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < enic_n_rx_stats; i++) {
+ memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int enic_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return enic_n_tx_stats + enic_n_rx_stats;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void enic_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct enic *enic = netdev_priv(netdev);
+ struct vnic_stats *vstats;
+ unsigned int i;
+
+ enic_dev_stats_dump(enic, &vstats);
+
+ for (i = 0; i < enic_n_tx_stats; i++)
+ *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
+ for (i = 0; i < enic_n_rx_stats; i++)
+ *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
+}
+
+static u32 enic_get_msglevel(struct net_device *netdev)
+{
+ struct enic *enic = netdev_priv(netdev);
+ return enic->msg_enable;
+}
+
+static void enic_set_msglevel(struct net_device *netdev, u32 value)
+{
+ struct enic *enic = netdev_priv(netdev);
+ enic->msg_enable = value;
+}
+
+static int enic_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
+ ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
+
+ return 0;
+}
+
+static int enic_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct enic *enic = netdev_priv(netdev);
+ u32 tx_coalesce_usecs;
+ u32 rx_coalesce_usecs;
+ unsigned int i, intr;
+
+ tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
+ vnic_dev_get_intr_coal_timer_max(enic->vdev));
+ rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
+ vnic_dev_get_intr_coal_timer_max(enic->vdev));
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ if (tx_coalesce_usecs != rx_coalesce_usecs)
+ return -EINVAL;
+
+ intr = enic_legacy_io_intr();
+ vnic_intr_coalescing_timer_set(&enic->intr[intr],
+ tx_coalesce_usecs);
+ break;
+ case VNIC_DEV_INTR_MODE_MSI:
+ if (tx_coalesce_usecs != rx_coalesce_usecs)
+ return -EINVAL;
+
+ vnic_intr_coalescing_timer_set(&enic->intr[0],
+ tx_coalesce_usecs);
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ for (i = 0; i < enic->wq_count; i++) {
+ intr = enic_msix_wq_intr(enic, i);
+ vnic_intr_coalescing_timer_set(&enic->intr[intr],
+ tx_coalesce_usecs);
+ }
+
+ for (i = 0; i < enic->rq_count; i++) {
+ intr = enic_msix_rq_intr(enic, i);
+ vnic_intr_coalescing_timer_set(&enic->intr[intr],
+ rx_coalesce_usecs);
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ enic->tx_coalesce_usecs = tx_coalesce_usecs;
+ enic->rx_coalesce_usecs = rx_coalesce_usecs;
+
+ return 0;
+}
+
+static const struct ethtool_ops enic_ethtool_ops = {
+ .get_settings = enic_get_settings,
+ .get_drvinfo = enic_get_drvinfo,
+ .get_msglevel = enic_get_msglevel,
+ .set_msglevel = enic_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_strings = enic_get_strings,
+ .get_sset_count = enic_get_sset_count,
+ .get_ethtool_stats = enic_get_ethtool_stats,
+ .get_coalesce = enic_get_coalesce,
+ .set_coalesce = enic_set_coalesce,
+};
+
+static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
+{
+ struct enic *enic = vnic_dev_priv(wq->vdev);
+
+ if (buf->sop)
+ pci_unmap_single(enic->pdev, buf->dma_addr,
+ buf->len, PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(enic->pdev, buf->dma_addr,
+ buf->len, PCI_DMA_TODEVICE);
+
+ if (buf->os_buf)
+ dev_kfree_skb_any(buf->os_buf);
+}
+
+static void enic_wq_free_buf(struct vnic_wq *wq,
+ struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
+{
+ enic_free_wq_buf(wq, buf);
+}
+
+static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+ u8 type, u16 q_number, u16 completed_index, void *opaque)
+{
+ struct enic *enic = vnic_dev_priv(vdev);
+
+ spin_lock(&enic->wq_lock[q_number]);
+
+ vnic_wq_service(&enic->wq[q_number], cq_desc,
+ completed_index, enic_wq_free_buf,
+ opaque);
+
+ if (netif_queue_stopped(enic->netdev) &&
+ vnic_wq_desc_avail(&enic->wq[q_number]) >=
+ (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
+ netif_wake_queue(enic->netdev);
+
+ spin_unlock(&enic->wq_lock[q_number]);
+
+ return 0;
+}
+
+static void enic_log_q_error(struct enic *enic)
+{
+ unsigned int i;
+ u32 error_status;
+
+ for (i = 0; i < enic->wq_count; i++) {
+ error_status = vnic_wq_error_status(&enic->wq[i]);
+ if (error_status)
+ netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
+ i, error_status);
+ }
+
+ for (i = 0; i < enic->rq_count; i++) {
+ error_status = vnic_rq_error_status(&enic->rq[i]);
+ if (error_status)
+ netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
+ i, error_status);
+ }
+}
+
+static void enic_msglvl_check(struct enic *enic)
+{
+ u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
+
+ if (msg_enable != enic->msg_enable) {
+ netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
+ enic->msg_enable, msg_enable);
+ enic->msg_enable = msg_enable;
+ }
+}
+
+static void enic_mtu_check(struct enic *enic)
+{
+ u32 mtu = vnic_dev_mtu(enic->vdev);
+ struct net_device *netdev = enic->netdev;
+
+ if (mtu && mtu != enic->port_mtu) {
+ enic->port_mtu = mtu;
+ if (enic_is_dynamic(enic)) {
+ mtu = max_t(int, ENIC_MIN_MTU,
+ min_t(int, ENIC_MAX_MTU, mtu));
+ if (mtu != netdev->mtu)
+ schedule_work(&enic->change_mtu_work);
+ } else {
+ if (mtu < netdev->mtu)
+ netdev_warn(netdev,
+ "interface MTU (%d) set higher "
+ "than switch port MTU (%d)\n",
+ netdev->mtu, mtu);
+ }
+ }
+}
+
+static void enic_link_check(struct enic *enic)
+{
+ int link_status = vnic_dev_link_status(enic->vdev);
+ int carrier_ok = netif_carrier_ok(enic->netdev);
+
+ if (link_status && !carrier_ok) {
+ netdev_info(enic->netdev, "Link UP\n");
+ netif_carrier_on(enic->netdev);
+ } else if (!link_status && carrier_ok) {
+ netdev_info(enic->netdev, "Link DOWN\n");
+ netif_carrier_off(enic->netdev);
+ }
+}
+
+static void enic_notify_check(struct enic *enic)
+{
+ enic_msglvl_check(enic);
+ enic_mtu_check(enic);
+ enic_link_check(enic);
+}
+
+#define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
+
+static irqreturn_t enic_isr_legacy(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct enic *enic = netdev_priv(netdev);
+ unsigned int io_intr = enic_legacy_io_intr();
+ unsigned int err_intr = enic_legacy_err_intr();
+ unsigned int notify_intr = enic_legacy_notify_intr();
+ u32 pba;
+
+ vnic_intr_mask(&enic->intr[io_intr]);
+
+ pba = vnic_intr_legacy_pba(enic->legacy_pba);
+ if (!pba) {
+ vnic_intr_unmask(&enic->intr[io_intr]);
+ return IRQ_NONE; /* not our interrupt */
+ }
+
+ if (ENIC_TEST_INTR(pba, notify_intr)) {
+ vnic_intr_return_all_credits(&enic->intr[notify_intr]);
+ enic_notify_check(enic);
+ }
+
+ if (ENIC_TEST_INTR(pba, err_intr)) {
+ vnic_intr_return_all_credits(&enic->intr[err_intr]);
+ enic_log_q_error(enic);
+ /* schedule recovery from WQ/RQ error */
+ schedule_work(&enic->reset);
+ return IRQ_HANDLED;
+ }
+
+ if (ENIC_TEST_INTR(pba, io_intr)) {
+ if (napi_schedule_prep(&enic->napi[0]))
+ __napi_schedule(&enic->napi[0]);
+ } else {
+ vnic_intr_unmask(&enic->intr[io_intr]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t enic_isr_msi(int irq, void *data)
+{
+ struct enic *enic = data;
+
+ /* With MSI, there is no sharing of interrupts, so this is
+ * our interrupt and there is no need to ack it. The device
+ * is not providing per-vector masking, so the OS will not
+ * write to PCI config space to mask/unmask the interrupt.
+ * We're using mask_on_assertion for MSI, so the device
+ * automatically masks the interrupt when the interrupt is
+ * generated. Later, when exiting polling, the interrupt
+ * will be unmasked (see enic_poll).
+ *
+ * Also, the device uses the same PCIe Traffic Class (TC)
+ * for Memory Write data and MSI, so there are no ordering
+ * issues; the MSI will always arrive at the Root Complex
+ * _after_ corresponding Memory Writes (i.e. descriptor
+ * writes).
+ */
+
+ napi_schedule(&enic->napi[0]);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t enic_isr_msix_rq(int irq, void *data)
+{
+ struct napi_struct *napi = data;
+
+ /* schedule NAPI polling for RQ cleanup */
+ napi_schedule(napi);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t enic_isr_msix_wq(int irq, void *data)
+{
+ struct enic *enic = data;
+ unsigned int cq = enic_cq_wq(enic, 0);
+ unsigned int intr = enic_msix_wq_intr(enic, 0);
+ unsigned int wq_work_to_do = -1; /* no limit */
+ unsigned int wq_work_done;
+
+ wq_work_done = vnic_cq_service(&enic->cq[cq],
+ wq_work_to_do, enic_wq_service, NULL);
+
+ vnic_intr_return_credits(&enic->intr[intr],
+ wq_work_done,
+ 1 /* unmask intr */,
+ 1 /* reset intr timer */);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t enic_isr_msix_err(int irq, void *data)
+{
+ struct enic *enic = data;
+ unsigned int intr = enic_msix_err_intr(enic);
+
+ vnic_intr_return_all_credits(&enic->intr[intr]);
+
+ enic_log_q_error(enic);
+
+ /* schedule recovery from WQ/RQ error */
+ schedule_work(&enic->reset);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t enic_isr_msix_notify(int irq, void *data)
+{
+ struct enic *enic = data;
+ unsigned int intr = enic_msix_notify_intr(enic);
+
+ vnic_intr_return_all_credits(&enic->intr[intr]);
+ enic_notify_check(enic);
+
+ return IRQ_HANDLED;
+}
+
+static inline void enic_queue_wq_skb_cont(struct enic *enic,
+ struct vnic_wq *wq, struct sk_buff *skb,
+ unsigned int len_left, int loopback)
+{
+ skb_frag_t *frag;
+
+ /* Queue additional data fragments */
+ for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
+ len_left -= frag->size;
+ enic_queue_wq_desc_cont(wq, skb,
+ skb_frag_dma_map(&enic->pdev->dev,
+ frag, 0, frag->size,
+ DMA_TO_DEVICE),
+ frag->size,
+ (len_left == 0), /* EOP? */
+ loopback);
+ }
+}
+
+static inline void enic_queue_wq_skb_vlan(struct enic *enic,
+ struct vnic_wq *wq, struct sk_buff *skb,
+ int vlan_tag_insert, unsigned int vlan_tag, int loopback)
+{
+ unsigned int head_len = skb_headlen(skb);
+ unsigned int len_left = skb->len - head_len;
+ int eop = (len_left == 0);
+
+ /* Queue the main skb fragment. The fragments are no larger
+ * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
+ * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
+ * per fragment is queued.
+ */
+ enic_queue_wq_desc(wq, skb,
+ pci_map_single(enic->pdev, skb->data,
+ head_len, PCI_DMA_TODEVICE),
+ head_len,
+ vlan_tag_insert, vlan_tag,
+ eop, loopback);
+
+ if (!eop)
+ enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+}
+
+static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
+ struct vnic_wq *wq, struct sk_buff *skb,
+ int vlan_tag_insert, unsigned int vlan_tag, int loopback)
+{
+ unsigned int head_len = skb_headlen(skb);
+ unsigned int len_left = skb->len - head_len;
+ unsigned int hdr_len = skb_checksum_start_offset(skb);
+ unsigned int csum_offset = hdr_len + skb->csum_offset;
+ int eop = (len_left == 0);
+
+ /* Queue the main skb fragment. The fragments are no larger
+ * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
+ * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
+ * per fragment is queued.
+ */
+ enic_queue_wq_desc_csum_l4(wq, skb,
+ pci_map_single(enic->pdev, skb->data,
+ head_len, PCI_DMA_TODEVICE),
+ head_len,
+ csum_offset,
+ hdr_len,
+ vlan_tag_insert, vlan_tag,
+ eop, loopback);
+
+ if (!eop)
+ enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+}
+
+static inline void enic_queue_wq_skb_tso(struct enic *enic,
+ struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
+ int vlan_tag_insert, unsigned int vlan_tag, int loopback)
+{
+ unsigned int frag_len_left = skb_headlen(skb);
+ unsigned int len_left = skb->len - frag_len_left;
+ unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int eop = (len_left == 0);
+ unsigned int len;
+ dma_addr_t dma_addr;
+ unsigned int offset = 0;
+ skb_frag_t *frag;
+
+ /* Preload TCP csum field with IP pseudo hdr calculated
+ * with IP length set to zero. HW will later add in length
+ * to each TCP segment resulting from the TSO.
+ */
+
+ if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
+ ip_hdr(skb)->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+ } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
+ tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+ }
+
+ /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
+ * for the main skb fragment
+ */
+ while (frag_len_left) {
+ len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
+ dma_addr = pci_map_single(enic->pdev, skb->data + offset,
+ len, PCI_DMA_TODEVICE);
+ enic_queue_wq_desc_tso(wq, skb,
+ dma_addr,
+ len,
+ mss, hdr_len,
+ vlan_tag_insert, vlan_tag,
+ eop && (len == frag_len_left), loopback);
+ frag_len_left -= len;
+ offset += len;
+ }
+
+ if (eop)
+ return;
+
+ /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
+ * for additional data fragments
+ */
+ for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
+ len_left -= frag->size;
+ frag_len_left = frag->size;
+ offset = 0;
+
+ while (frag_len_left) {
+ len = min(frag_len_left,
+ (unsigned int)WQ_ENET_MAX_DESC_LEN);
+ dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
+ offset, len,
+ DMA_TO_DEVICE);
+ enic_queue_wq_desc_cont(wq, skb,
+ dma_addr,
+ len,
+ (len_left == 0) &&
+ (len == frag_len_left), /* EOP? */
+ loopback);
+ frag_len_left -= len;
+ offset += len;
+ }
+ }
+}
+
+static inline void enic_queue_wq_skb(struct enic *enic,
+ struct vnic_wq *wq, struct sk_buff *skb)
+{
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int vlan_tag = 0;
+ int vlan_tag_insert = 0;
+ int loopback = 0;
+
+ if (vlan_tx_tag_present(skb)) {
+ /* VLAN tag from trunking driver */
+ vlan_tag_insert = 1;
+ vlan_tag = vlan_tx_tag_get(skb);
+ } else if (enic->loop_enable) {
+ vlan_tag = enic->loop_tag;
+ loopback = 1;
+ }
+
+ if (mss)
+ enic_queue_wq_skb_tso(enic, wq, skb, mss,
+ vlan_tag_insert, vlan_tag, loopback);
+ else if (skb->ip_summed == CHECKSUM_PARTIAL)
+ enic_queue_wq_skb_csum_l4(enic, wq, skb,
+ vlan_tag_insert, vlan_tag, loopback);
+ else
+ enic_queue_wq_skb_vlan(enic, wq, skb,
+ vlan_tag_insert, vlan_tag, loopback);
+}
+
+/* netif_tx_lock held, process context with BHs disabled, or BH */
+static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct enic *enic = netdev_priv(netdev);
+ struct vnic_wq *wq = &enic->wq[0];
+ unsigned long flags;
+
+ if (skb->len <= 0) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
+ * which is very likely. In the off chance it's going to take
+ * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
+ */
+
+ if (skb_shinfo(skb)->gso_size == 0 &&
+ skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
+ skb_linearize(skb)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ spin_lock_irqsave(&enic->wq_lock[0], flags);
+
+ if (vnic_wq_desc_avail(wq) <
+ skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
+ netif_stop_queue(netdev);
+ /* This is a hard error, log it */
+ netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
+ spin_unlock_irqrestore(&enic->wq_lock[0], flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ enic_queue_wq_skb(enic, wq, skb);
+
+ if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
+ netif_stop_queue(netdev);
+
+ spin_unlock_irqrestore(&enic->wq_lock[0], flags);
+
+ return NETDEV_TX_OK;
+}
+
+/* dev_base_lock rwlock held, nominally process context */
+static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *net_stats)
+{
+ struct enic *enic = netdev_priv(netdev);
+ struct vnic_stats *stats;
+
+ enic_dev_stats_dump(enic, &stats);
+
+ net_stats->tx_packets = stats->tx.tx_frames_ok;
+ net_stats->tx_bytes = stats->tx.tx_bytes_ok;
+ net_stats->tx_errors = stats->tx.tx_errors;
+ net_stats->tx_dropped = stats->tx.tx_drops;
+
+ net_stats->rx_packets = stats->rx.rx_frames_ok;
+ net_stats->rx_bytes = stats->rx.rx_bytes_ok;
+ net_stats->rx_errors = stats->rx.rx_errors;
+ net_stats->multicast = stats->rx.rx_multicast_frames_ok;
+ net_stats->rx_over_errors = enic->rq_truncated_pkts;
+ net_stats->rx_crc_errors = enic->rq_bad_fcs;
+ net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
+
+ return net_stats;
+}
+
+void enic_reset_addr_lists(struct enic *enic)
+{
+ enic->mc_count = 0;
+ enic->uc_count = 0;
+ enic->flags = 0;
+}
+
+static int enic_set_mac_addr(struct net_device *netdev, char *addr)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ if (enic_is_dynamic(enic)) {
+ if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
+ return -EADDRNOTAVAIL;
+ } else {
+ if (!is_valid_ether_addr(addr))
+ return -EADDRNOTAVAIL;
+ }
+
+ memcpy(netdev->dev_addr, addr, netdev->addr_len);
+
+ return 0;
+}
+
+static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
+{
+ struct enic *enic = netdev_priv(netdev);
+ struct sockaddr *saddr = p;
+ char *addr = saddr->sa_data;
+ int err;
+
+ if (netif_running(enic->netdev)) {
+ err = enic_dev_del_station_addr(enic);
+ if (err)
+ return err;
+ }
+
+ err = enic_set_mac_addr(netdev, addr);
+ if (err)
+ return err;
+
+ if (netif_running(enic->netdev)) {
+ err = enic_dev_add_station_addr(enic);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int enic_set_mac_address(struct net_device *netdev, void *p)
+{
+ struct sockaddr *saddr = p;
+ char *addr = saddr->sa_data;
+ struct enic *enic = netdev_priv(netdev);
+ int err;
+
+ err = enic_dev_del_station_addr(enic);
+ if (err)
+ return err;
+
+ err = enic_set_mac_addr(netdev, addr);
+ if (err)
+ return err;
+
+ return enic_dev_add_station_addr(enic);
+}
+
+static void enic_update_multicast_addr_list(struct enic *enic)
+{
+ struct net_device *netdev = enic->netdev;
+ struct netdev_hw_addr *ha;
+ unsigned int mc_count = netdev_mc_count(netdev);
+ u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
+ unsigned int i, j;
+
+ if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
+ netdev_warn(netdev, "Registering only %d out of %d "
+ "multicast addresses\n",
+ ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
+ mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
+ }
+
+ /* Is there an easier way? Trying to minimize to
+ * calls to add/del multicast addrs. We keep the
+ * addrs from the last call in enic->mc_addr and
+ * look for changes to add/del.
+ */
+
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (i == mc_count)
+ break;
+ memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
+ }
+
+ for (i = 0; i < enic->mc_count; i++) {
+ for (j = 0; j < mc_count; j++)
+ if (compare_ether_addr(enic->mc_addr[i],
+ mc_addr[j]) == 0)
+ break;
+ if (j == mc_count)
+ enic_dev_del_addr(enic, enic->mc_addr[i]);
+ }
+
+ for (i = 0; i < mc_count; i++) {
+ for (j = 0; j < enic->mc_count; j++)
+ if (compare_ether_addr(mc_addr[i],
+ enic->mc_addr[j]) == 0)
+ break;
+ if (j == enic->mc_count)
+ enic_dev_add_addr(enic, mc_addr[i]);
+ }
+
+ /* Save the list to compare against next time
+ */
+
+ for (i = 0; i < mc_count; i++)
+ memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
+
+ enic->mc_count = mc_count;
+}
+
+static void enic_update_unicast_addr_list(struct enic *enic)
+{
+ struct net_device *netdev = enic->netdev;
+ struct netdev_hw_addr *ha;
+ unsigned int uc_count = netdev_uc_count(netdev);
+ u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
+ unsigned int i, j;
+
+ if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
+ netdev_warn(netdev, "Registering only %d out of %d "
+ "unicast addresses\n",
+ ENIC_UNICAST_PERFECT_FILTERS, uc_count);
+ uc_count = ENIC_UNICAST_PERFECT_FILTERS;
+ }
+
+ /* Is there an easier way? Trying to minimize to
+ * calls to add/del unicast addrs. We keep the
+ * addrs from the last call in enic->uc_addr and
+ * look for changes to add/del.
+ */
+
+ i = 0;
+ netdev_for_each_uc_addr(ha, netdev) {
+ if (i == uc_count)
+ break;
+ memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
+ }
+
+ for (i = 0; i < enic->uc_count; i++) {
+ for (j = 0; j < uc_count; j++)
+ if (compare_ether_addr(enic->uc_addr[i],
+ uc_addr[j]) == 0)
+ break;
+ if (j == uc_count)
+ enic_dev_del_addr(enic, enic->uc_addr[i]);
+ }
+
+ for (i = 0; i < uc_count; i++) {
+ for (j = 0; j < enic->uc_count; j++)
+ if (compare_ether_addr(uc_addr[i],
+ enic->uc_addr[j]) == 0)
+ break;
+ if (j == enic->uc_count)
+ enic_dev_add_addr(enic, uc_addr[i]);
+ }
+
+ /* Save the list to compare against next time
+ */
+
+ for (i = 0; i < uc_count; i++)
+ memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
+
+ enic->uc_count = uc_count;
+}
+
+/* netif_tx_lock held, BHs disabled */
+static void enic_set_rx_mode(struct net_device *netdev)
+{
+ struct enic *enic = netdev_priv(netdev);
+ int directed = 1;
+ int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
+ int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
+ int promisc = (netdev->flags & IFF_PROMISC) ||
+ netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
+ int allmulti = (netdev->flags & IFF_ALLMULTI) ||
+ netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
+ unsigned int flags = netdev->flags |
+ (allmulti ? IFF_ALLMULTI : 0) |
+ (promisc ? IFF_PROMISC : 0);
+
+ if (enic->flags != flags) {
+ enic->flags = flags;
+ enic_dev_packet_filter(enic, directed,
+ multicast, broadcast, promisc, allmulti);
+ }
+
+ if (!promisc) {
+ enic_update_unicast_addr_list(enic);
+ if (!allmulti)
+ enic_update_multicast_addr_list(enic);
+ }
+}
+
+/* netif_tx_lock held, BHs disabled */
+static void enic_tx_timeout(struct net_device *netdev)
+{
+ struct enic *enic = netdev_priv(netdev);
+ schedule_work(&enic->reset);
+}
+
+static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct enic *enic = netdev_priv(netdev);
+ struct enic_port_profile *pp;
+ int err;
+
+ ENIC_PP_BY_INDEX(enic, vf, pp, &err);
+ if (err)
+ return err;
+
+ if (is_valid_ether_addr(mac)) {
+ memcpy(pp->vf_mac, mac, ETH_ALEN);
+ return 0;
+ } else
+ return -EINVAL;
+}
+
+static int enic_set_vf_port(struct net_device *netdev, int vf,
+ struct nlattr *port[])
+{
+ struct enic *enic = netdev_priv(netdev);
+ struct enic_port_profile prev_pp;
+ struct enic_port_profile *pp;
+ int err = 0, restore_pp = 1;
+
+ ENIC_PP_BY_INDEX(enic, vf, pp, &err);
+ if (err)
+ return err;
+
+ if (!port[IFLA_PORT_REQUEST])
+ return -EOPNOTSUPP;
+
+ memcpy(&prev_pp, pp, sizeof(*enic->pp));
+ memset(pp, 0, sizeof(*enic->pp));
+
+ pp->set |= ENIC_SET_REQUEST;
+ pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
+
+ if (port[IFLA_PORT_PROFILE]) {
+ pp->set |= ENIC_SET_NAME;
+ memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
+ PORT_PROFILE_MAX);
+ }
+
+ if (port[IFLA_PORT_INSTANCE_UUID]) {
+ pp->set |= ENIC_SET_INSTANCE;
+ memcpy(pp->instance_uuid,
+ nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
+ }
+
+ if (port[IFLA_PORT_HOST_UUID]) {
+ pp->set |= ENIC_SET_HOST;
+ memcpy(pp->host_uuid,
+ nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
+ }
+
+ /* Special case handling: mac came from IFLA_VF_MAC */
+ if (!is_zero_ether_addr(prev_pp.vf_mac))
+ memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
+
+ if (vf == PORT_SELF_VF && is_zero_ether_addr(netdev->dev_addr))
+ random_ether_addr(netdev->dev_addr);
+
+ err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp);
+ if (err) {
+ if (restore_pp) {
+ /* Things are still the way they were: Implicit
+ * DISASSOCIATE failed
+ */
+ memcpy(pp, &prev_pp, sizeof(*pp));
+ } else {
+ memset(pp, 0, sizeof(*pp));
+ if (vf == PORT_SELF_VF)
+ memset(netdev->dev_addr, 0, ETH_ALEN);
+ }
+ } else {
+ /* Set flag to indicate that the port assoc/disassoc
+ * request has been sent out to fw
+ */
+ pp->set |= ENIC_PORT_REQUEST_APPLIED;
+
+ /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
+ if (pp->request == PORT_REQUEST_DISASSOCIATE) {
+ memset(pp->mac_addr, 0, ETH_ALEN);
+ if (vf == PORT_SELF_VF)
+ memset(netdev->dev_addr, 0, ETH_ALEN);
+ }
+ }
+
+ memset(pp->vf_mac, 0, ETH_ALEN);
+
+ return err;
+}
+
+static int enic_get_vf_port(struct net_device *netdev, int vf,
+ struct sk_buff *skb)
+{
+ struct enic *enic = netdev_priv(netdev);
+ u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
+ struct enic_port_profile *pp;
+ int err;
+
+ ENIC_PP_BY_INDEX(enic, vf, pp, &err);
+ if (err)
+ return err;
+
+ if (!(pp->set & ENIC_PORT_REQUEST_APPLIED))
+ return -ENODATA;
+
+ err = enic_process_get_pp_request(enic, vf, pp->request, &response);
+ if (err)
+ return err;
+
+ NLA_PUT_U16(skb, IFLA_PORT_REQUEST, pp->request);
+ NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
+ if (pp->set & ENIC_SET_NAME)
+ NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
+ pp->name);
+ if (pp->set & ENIC_SET_INSTANCE)
+ NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
+ pp->instance_uuid);
+ if (pp->set & ENIC_SET_HOST)
+ NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
+ pp->host_uuid);
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
+{
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+
+ if (!buf->os_buf)
+ return;
+
+ pci_unmap_single(enic->pdev, buf->dma_addr,
+ buf->len, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(buf->os_buf);
+}
+
+static int enic_rq_alloc_buf(struct vnic_rq *rq)
+{
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+ struct net_device *netdev = enic->netdev;
+ struct sk_buff *skb;
+ unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
+ unsigned int os_buf_index = 0;
+ dma_addr_t dma_addr;
+
+ skb = netdev_alloc_skb_ip_align(netdev, len);
+ if (!skb)
+ return -ENOMEM;
+
+ dma_addr = pci_map_single(enic->pdev, skb->data,
+ len, PCI_DMA_FROMDEVICE);
+
+ enic_queue_rq_desc(rq, skb, os_buf_index,
+ dma_addr, len);
+
+ return 0;
+}
+
+static void enic_rq_indicate_buf(struct vnic_rq *rq,
+ struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
+ int skipped, void *opaque)
+{
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+ struct net_device *netdev = enic->netdev;
+ struct sk_buff *skb;
+
+ u8 type, color, eop, sop, ingress_port, vlan_stripped;
+ u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
+ u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+ u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
+ u8 packet_error;
+ u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
+ u32 rss_hash;
+
+ if (skipped)
+ return;
+
+ skb = buf->os_buf;
+ prefetch(skb->data - NET_IP_ALIGN);
+ pci_unmap_single(enic->pdev, buf->dma_addr,
+ buf->len, PCI_DMA_FROMDEVICE);
+
+ cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
+ &type, &color, &q_number, &completed_index,
+ &ingress_port, &fcoe, &eop, &sop, &rss_type,
+ &csum_not_calc, &rss_hash, &bytes_written,
+ &packet_error, &vlan_stripped, &vlan_tci, &checksum,
+ &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
+ &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
+ &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
+ &fcs_ok);
+
+ if (packet_error) {
+
+ if (!fcs_ok) {
+ if (bytes_written > 0)
+ enic->rq_bad_fcs++;
+ else if (bytes_written == 0)
+ enic->rq_truncated_pkts++;
+ }
+
+ dev_kfree_skb_any(skb);
+
+ return;
+ }
+
+ if (eop && bytes_written > 0) {
+
+ /* Good receive
+ */
+
+ skb_put(skb, bytes_written);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
+ skb->csum = htons(checksum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+
+ skb->dev = netdev;
+
+ if (vlan_stripped)
+ __vlan_hwaccel_put_tag(skb, vlan_tci);
+
+ if (netdev->features & NETIF_F_GRO)
+ napi_gro_receive(&enic->napi[q_number], skb);
+ else
+ netif_receive_skb(skb);
+ } else {
+
+ /* Buffer overflow
+ */
+
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+ u8 type, u16 q_number, u16 completed_index, void *opaque)
+{
+ struct enic *enic = vnic_dev_priv(vdev);
+
+ vnic_rq_service(&enic->rq[q_number], cq_desc,
+ completed_index, VNIC_RQ_RETURN_DESC,
+ enic_rq_indicate_buf, opaque);
+
+ return 0;
+}
+
+static int enic_poll(struct napi_struct *napi, int budget)
+{
+ struct net_device *netdev = napi->dev;
+ struct enic *enic = netdev_priv(netdev);
+ unsigned int cq_rq = enic_cq_rq(enic, 0);
+ unsigned int cq_wq = enic_cq_wq(enic, 0);
+ unsigned int intr = enic_legacy_io_intr();
+ unsigned int rq_work_to_do = budget;
+ unsigned int wq_work_to_do = -1; /* no limit */
+ unsigned int work_done, rq_work_done, wq_work_done;
+ int err;
+
+ /* Service RQ (first) and WQ
+ */
+
+ rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
+ rq_work_to_do, enic_rq_service, NULL);
+
+ wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
+ wq_work_to_do, enic_wq_service, NULL);
+
+ /* Accumulate intr event credits for this polling
+ * cycle. An intr event is the completion of a
+ * a WQ or RQ packet.
+ */
+
+ work_done = rq_work_done + wq_work_done;
+
+ if (work_done > 0)
+ vnic_intr_return_credits(&enic->intr[intr],
+ work_done,
+ 0 /* don't unmask intr */,
+ 0 /* don't reset intr timer */);
+
+ err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
+
+ /* Buffer allocation failed. Stay in polling
+ * mode so we can try to fill the ring again.
+ */
+
+ if (err)
+ rq_work_done = rq_work_to_do;
+
+ if (rq_work_done < rq_work_to_do) {
+
+ /* Some work done, but not enough to stay in polling,
+ * exit polling
+ */
+
+ napi_complete(napi);
+ vnic_intr_unmask(&enic->intr[intr]);
+ }
+
+ return rq_work_done;
+}
+
+static int enic_poll_msix(struct napi_struct *napi, int budget)
+{
+ struct net_device *netdev = napi->dev;
+ struct enic *enic = netdev_priv(netdev);
+ unsigned int rq = (napi - &enic->napi[0]);
+ unsigned int cq = enic_cq_rq(enic, rq);
+ unsigned int intr = enic_msix_rq_intr(enic, rq);
+ unsigned int work_to_do = budget;
+ unsigned int work_done;
+ int err;
+
+ /* Service RQ
+ */
+
+ work_done = vnic_cq_service(&enic->cq[cq],
+ work_to_do, enic_rq_service, NULL);
+
+ /* Return intr event credits for this polling
+ * cycle. An intr event is the completion of a
+ * RQ packet.
+ */
+
+ if (work_done > 0)
+ vnic_intr_return_credits(&enic->intr[intr],
+ work_done,
+ 0 /* don't unmask intr */,
+ 0 /* don't reset intr timer */);
+
+ err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
+
+ /* Buffer allocation failed. Stay in polling mode
+ * so we can try to fill the ring again.
+ */
+
+ if (err)
+ work_done = work_to_do;
+
+ if (work_done < work_to_do) {
+
+ /* Some work done, but not enough to stay in polling,
+ * exit polling
+ */
+
+ napi_complete(napi);
+ vnic_intr_unmask(&enic->intr[intr]);
+ }
+
+ return work_done;
+}
+
+static void enic_notify_timer(unsigned long data)
+{
+ struct enic *enic = (struct enic *)data;
+
+ enic_notify_check(enic);
+
+ mod_timer(&enic->notify_timer,
+ round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
+}
+
+static void enic_free_intr(struct enic *enic)
+{
+ struct net_device *netdev = enic->netdev;
+ unsigned int i;
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ free_irq(enic->pdev->irq, netdev);
+ break;
+ case VNIC_DEV_INTR_MODE_MSI:
+ free_irq(enic->pdev->irq, enic);
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
+ if (enic->msix[i].requested)
+ free_irq(enic->msix_entry[i].vector,
+ enic->msix[i].devid);
+ break;
+ default:
+ break;
+ }
+}
+
+static int enic_request_intr(struct enic *enic)
+{
+ struct net_device *netdev = enic->netdev;
+ unsigned int i, intr;
+ int err = 0;
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+
+ case VNIC_DEV_INTR_MODE_INTX:
+
+ err = request_irq(enic->pdev->irq, enic_isr_legacy,
+ IRQF_SHARED, netdev->name, netdev);
+ break;
+
+ case VNIC_DEV_INTR_MODE_MSI:
+
+ err = request_irq(enic->pdev->irq, enic_isr_msi,
+ 0, netdev->name, enic);
+ break;
+
+ case VNIC_DEV_INTR_MODE_MSIX:
+
+ for (i = 0; i < enic->rq_count; i++) {
+ intr = enic_msix_rq_intr(enic, i);
+ sprintf(enic->msix[intr].devname,
+ "%.11s-rx-%d", netdev->name, i);
+ enic->msix[intr].isr = enic_isr_msix_rq;
+ enic->msix[intr].devid = &enic->napi[i];
+ }
+
+ for (i = 0; i < enic->wq_count; i++) {
+ intr = enic_msix_wq_intr(enic, i);
+ sprintf(enic->msix[intr].devname,
+ "%.11s-tx-%d", netdev->name, i);
+ enic->msix[intr].isr = enic_isr_msix_wq;
+ enic->msix[intr].devid = enic;
+ }
+
+ intr = enic_msix_err_intr(enic);
+ sprintf(enic->msix[intr].devname,
+ "%.11s-err", netdev->name);
+ enic->msix[intr].isr = enic_isr_msix_err;
+ enic->msix[intr].devid = enic;
+
+ intr = enic_msix_notify_intr(enic);
+ sprintf(enic->msix[intr].devname,
+ "%.11s-notify", netdev->name);
+ enic->msix[intr].isr = enic_isr_msix_notify;
+ enic->msix[intr].devid = enic;
+
+ for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
+ enic->msix[i].requested = 0;
+
+ for (i = 0; i < enic->intr_count; i++) {
+ err = request_irq(enic->msix_entry[i].vector,
+ enic->msix[i].isr, 0,
+ enic->msix[i].devname,
+ enic->msix[i].devid);
+ if (err) {
+ enic_free_intr(enic);
+ break;
+ }
+ enic->msix[i].requested = 1;
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static void enic_synchronize_irqs(struct enic *enic)
+{
+ unsigned int i;
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ case VNIC_DEV_INTR_MODE_MSI:
+ synchronize_irq(enic->pdev->irq);
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ for (i = 0; i < enic->intr_count; i++)
+ synchronize_irq(enic->msix_entry[i].vector);
+ break;
+ default:
+ break;
+ }
+}
+
+static int enic_dev_notify_set(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ err = vnic_dev_notify_set(enic->vdev,
+ enic_legacy_notify_intr());
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ err = vnic_dev_notify_set(enic->vdev,
+ enic_msix_notify_intr(enic));
+ break;
+ default:
+ err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
+ break;
+ }
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+static void enic_notify_timer_start(struct enic *enic)
+{
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_MSI:
+ mod_timer(&enic->notify_timer, jiffies);
+ break;
+ default:
+ /* Using intr for notification for INTx/MSI-X */
+ break;
+ }
+}
+
+/* rtnl lock is held, process context */
+static int enic_open(struct net_device *netdev)
+{
+ struct enic *enic = netdev_priv(netdev);
+ unsigned int i;
+ int err;
+
+ err = enic_request_intr(enic);
+ if (err) {
+ netdev_err(netdev, "Unable to request irq.\n");
+ return err;
+ }
+
+ err = enic_dev_notify_set(enic);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to alloc notify buffer, aborting.\n");
+ goto err_out_free_intr;
+ }
+
+ for (i = 0; i < enic->rq_count; i++) {
+ vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
+ /* Need at least one buffer on ring to get going */
+ if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
+ netdev_err(netdev, "Unable to alloc receive buffers\n");
+ err = -ENOMEM;
+ goto err_out_notify_unset;
+ }
+ }
+
+ for (i = 0; i < enic->wq_count; i++)
+ vnic_wq_enable(&enic->wq[i]);
+ for (i = 0; i < enic->rq_count; i++)
+ vnic_rq_enable(&enic->rq[i]);
+
+ if (!enic_is_dynamic(enic))
+ enic_dev_add_station_addr(enic);
+
+ enic_set_rx_mode(netdev);
+
+ netif_wake_queue(netdev);
+
+ for (i = 0; i < enic->rq_count; i++)
+ napi_enable(&enic->napi[i]);
+
+ enic_dev_enable(enic);
+
+ for (i = 0; i < enic->intr_count; i++)
+ vnic_intr_unmask(&enic->intr[i]);
+
+ enic_notify_timer_start(enic);
+
+ return 0;
+
+err_out_notify_unset:
+ enic_dev_notify_unset(enic);
+err_out_free_intr:
+ enic_free_intr(enic);
+
+ return err;
+}
+
+/* rtnl lock is held, process context */
+static int enic_stop(struct net_device *netdev)
+{
+ struct enic *enic = netdev_priv(netdev);
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < enic->intr_count; i++) {
+ vnic_intr_mask(&enic->intr[i]);
+ (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
+ }
+
+ enic_synchronize_irqs(enic);
+
+ del_timer_sync(&enic->notify_timer);
+
+ enic_dev_disable(enic);
+
+ for (i = 0; i < enic->rq_count; i++)
+ napi_disable(&enic->napi[i]);
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ if (!enic_is_dynamic(enic))
+ enic_dev_del_station_addr(enic);
+
+ for (i = 0; i < enic->wq_count; i++) {
+ err = vnic_wq_disable(&enic->wq[i]);
+ if (err)
+ return err;
+ }
+ for (i = 0; i < enic->rq_count; i++) {
+ err = vnic_rq_disable(&enic->rq[i]);
+ if (err)
+ return err;
+ }
+
+ enic_dev_notify_unset(enic);
+ enic_free_intr(enic);
+
+ for (i = 0; i < enic->wq_count; i++)
+ vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
+ for (i = 0; i < enic->rq_count; i++)
+ vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+ for (i = 0; i < enic->cq_count; i++)
+ vnic_cq_clean(&enic->cq[i]);
+ for (i = 0; i < enic->intr_count; i++)
+ vnic_intr_clean(&enic->intr[i]);
+
+ return 0;
+}
+
+static int enic_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct enic *enic = netdev_priv(netdev);
+ int running = netif_running(netdev);
+
+ if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
+ return -EINVAL;
+
+ if (enic_is_dynamic(enic))
+ return -EOPNOTSUPP;
+
+ if (running)
+ enic_stop(netdev);
+
+ netdev->mtu = new_mtu;
+
+ if (netdev->mtu > enic->port_mtu)
+ netdev_warn(netdev,
+ "interface MTU (%d) set higher than port MTU (%d)\n",
+ netdev->mtu, enic->port_mtu);
+
+ if (running)
+ enic_open(netdev);
+
+ return 0;
+}
+
+static void enic_change_mtu_work(struct work_struct *work)
+{
+ struct enic *enic = container_of(work, struct enic, change_mtu_work);
+ struct net_device *netdev = enic->netdev;
+ int new_mtu = vnic_dev_mtu(enic->vdev);
+ int err;
+ unsigned int i;
+
+ new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
+
+ rtnl_lock();
+
+ /* Stop RQ */
+ del_timer_sync(&enic->notify_timer);
+
+ for (i = 0; i < enic->rq_count; i++)
+ napi_disable(&enic->napi[i]);
+
+ vnic_intr_mask(&enic->intr[0]);
+ enic_synchronize_irqs(enic);
+ err = vnic_rq_disable(&enic->rq[0]);
+ if (err) {
+ netdev_err(netdev, "Unable to disable RQ.\n");
+ return;
+ }
+ vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
+ vnic_cq_clean(&enic->cq[0]);
+ vnic_intr_clean(&enic->intr[0]);
+
+ /* Fill RQ with new_mtu-sized buffers */
+ netdev->mtu = new_mtu;
+ vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
+ /* Need at least one buffer on ring to get going */
+ if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
+ netdev_err(netdev, "Unable to alloc receive buffers.\n");
+ return;
+ }
+
+ /* Start RQ */
+ vnic_rq_enable(&enic->rq[0]);
+ napi_enable(&enic->napi[0]);
+ vnic_intr_unmask(&enic->intr[0]);
+ enic_notify_timer_start(enic);
+
+ rtnl_unlock();
+
+ netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void enic_poll_controller(struct net_device *netdev)
+{
+ struct enic *enic = netdev_priv(netdev);
+ struct vnic_dev *vdev = enic->vdev;
+ unsigned int i, intr;
+
+ switch (vnic_dev_get_intr_mode(vdev)) {
+ case VNIC_DEV_INTR_MODE_MSIX:
+ for (i = 0; i < enic->rq_count; i++) {
+ intr = enic_msix_rq_intr(enic, i);
+ enic_isr_msix_rq(enic->msix_entry[intr].vector,
+ &enic->napi[i]);
+ }
+
+ for (i = 0; i < enic->wq_count; i++) {
+ intr = enic_msix_wq_intr(enic, i);
+ enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
+ }
+
+ break;
+ case VNIC_DEV_INTR_MODE_MSI:
+ enic_isr_msi(enic->pdev->irq, enic);
+ break;
+ case VNIC_DEV_INTR_MODE_INTX:
+ enic_isr_legacy(enic->pdev->irq, netdev);
+ break;
+ default:
+ break;
+ }
+}
+#endif
+
+static int enic_dev_wait(struct vnic_dev *vdev,
+ int (*start)(struct vnic_dev *, int),
+ int (*finished)(struct vnic_dev *, int *),
+ int arg)
+{
+ unsigned long time;
+ int done;
+ int err;
+
+ BUG_ON(in_interrupt());
+
+ err = start(vdev, arg);
+ if (err)
+ return err;
+
+ /* Wait for func to complete...2 seconds max
+ */
+
+ time = jiffies + (HZ * 2);
+ do {
+
+ err = finished(vdev, &done);
+ if (err)
+ return err;
+
+ if (done)
+ return 0;
+
+ schedule_timeout_uninterruptible(HZ / 10);
+
+ } while (time_after(time, jiffies));
+
+ return -ETIMEDOUT;
+}
+
+static int enic_dev_open(struct enic *enic)
+{
+ int err;
+
+ err = enic_dev_wait(enic->vdev, vnic_dev_open,
+ vnic_dev_open_done, 0);
+ if (err)
+ dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
+ err);
+
+ return err;
+}
+
+static int enic_dev_hang_reset(struct enic *enic)
+{
+ int err;
+
+ err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
+ vnic_dev_hang_reset_done, 0);
+ if (err)
+ netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
+ err);
+
+ return err;
+}
+
+static int enic_set_rsskey(struct enic *enic)
+{
+ dma_addr_t rss_key_buf_pa;
+ union vnic_rss_key *rss_key_buf_va = NULL;
+ union vnic_rss_key rss_key = {
+ .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
+ .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101},
+ .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115},
+ .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108},
+ };
+ int err;
+
+ rss_key_buf_va = pci_alloc_consistent(enic->pdev,
+ sizeof(union vnic_rss_key), &rss_key_buf_pa);
+ if (!rss_key_buf_va)
+ return -ENOMEM;
+
+ memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
+
+ spin_lock(&enic->devcmd_lock);
+ err = enic_set_rss_key(enic,
+ rss_key_buf_pa,
+ sizeof(union vnic_rss_key));
+ spin_unlock(&enic->devcmd_lock);
+
+ pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
+ rss_key_buf_va, rss_key_buf_pa);
+
+ return err;
+}
+
+static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
+{
+ dma_addr_t rss_cpu_buf_pa;
+ union vnic_rss_cpu *rss_cpu_buf_va = NULL;
+ unsigned int i;
+ int err;
+
+ rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
+ sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
+ if (!rss_cpu_buf_va)
+ return -ENOMEM;
+
+ for (i = 0; i < (1 << rss_hash_bits); i++)
+ (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
+
+ spin_lock(&enic->devcmd_lock);
+ err = enic_set_rss_cpu(enic,
+ rss_cpu_buf_pa,
+ sizeof(union vnic_rss_cpu));
+ spin_unlock(&enic->devcmd_lock);
+
+ pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
+ rss_cpu_buf_va, rss_cpu_buf_pa);
+
+ return err;
+}
+
+static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
+ u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
+{
+ const u8 tso_ipid_split_en = 0;
+ const u8 ig_vlan_strip_en = 1;
+ int err;
+
+ /* Enable VLAN tag stripping.
+ */
+
+ spin_lock(&enic->devcmd_lock);
+ err = enic_set_nic_cfg(enic,
+ rss_default_cpu, rss_hash_type,
+ rss_hash_bits, rss_base_cpu,
+ rss_enable, tso_ipid_split_en,
+ ig_vlan_strip_en);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+static int enic_set_rss_nic_cfg(struct enic *enic)
+{
+ struct device *dev = enic_get_dev(enic);
+ const u8 rss_default_cpu = 0;
+ const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
+ NIC_CFG_RSS_HASH_TYPE_IPV6 |
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
+ const u8 rss_hash_bits = 7;
+ const u8 rss_base_cpu = 0;
+ u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
+
+ if (rss_enable) {
+ if (!enic_set_rsskey(enic)) {
+ if (enic_set_rsscpu(enic, rss_hash_bits)) {
+ rss_enable = 0;
+ dev_warn(dev, "RSS disabled, "
+ "Failed to set RSS cpu indirection table.");
+ }
+ } else {
+ rss_enable = 0;
+ dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
+ }
+ }
+
+ return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
+ rss_hash_bits, rss_base_cpu, rss_enable);
+}
+
+static void enic_reset(struct work_struct *work)
+{
+ struct enic *enic = container_of(work, struct enic, reset);
+
+ if (!netif_running(enic->netdev))
+ return;
+
+ rtnl_lock();
+
+ enic_dev_hang_notify(enic);
+ enic_stop(enic->netdev);
+ enic_dev_hang_reset(enic);
+ enic_reset_addr_lists(enic);
+ enic_init_vnic_resources(enic);
+ enic_set_rss_nic_cfg(enic);
+ enic_dev_set_ig_vlan_rewrite_mode(enic);
+ enic_open(enic->netdev);
+
+ rtnl_unlock();
+}
+
+static int enic_set_intr_mode(struct enic *enic)
+{
+ unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
+ unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
+ unsigned int i;
+
+ /* Set interrupt mode (INTx, MSI, MSI-X) depending
+ * on system capabilities.
+ *
+ * Try MSI-X first
+ *
+ * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
+ * (the second to last INTR is used for WQ/RQ errors)
+ * (the last INTR is used for notifications)
+ */
+
+ BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
+ for (i = 0; i < n + m + 2; i++)
+ enic->msix_entry[i].entry = i;
+
+ /* Use multiple RQs if RSS is enabled
+ */
+
+ if (ENIC_SETTING(enic, RSS) &&
+ enic->config.intr_mode < 1 &&
+ enic->rq_count >= n &&
+ enic->wq_count >= m &&
+ enic->cq_count >= n + m &&
+ enic->intr_count >= n + m + 2) {
+
+ if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
+
+ enic->rq_count = n;
+ enic->wq_count = m;
+ enic->cq_count = n + m;
+ enic->intr_count = n + m + 2;
+
+ vnic_dev_set_intr_mode(enic->vdev,
+ VNIC_DEV_INTR_MODE_MSIX);
+
+ return 0;
+ }
+ }
+
+ if (enic->config.intr_mode < 1 &&
+ enic->rq_count >= 1 &&
+ enic->wq_count >= m &&
+ enic->cq_count >= 1 + m &&
+ enic->intr_count >= 1 + m + 2) {
+ if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) {
+
+ enic->rq_count = 1;
+ enic->wq_count = m;
+ enic->cq_count = 1 + m;
+ enic->intr_count = 1 + m + 2;
+
+ vnic_dev_set_intr_mode(enic->vdev,
+ VNIC_DEV_INTR_MODE_MSIX);
+
+ return 0;
+ }
+ }
+
+ /* Next try MSI
+ *
+ * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
+ */
+
+ if (enic->config.intr_mode < 2 &&
+ enic->rq_count >= 1 &&
+ enic->wq_count >= 1 &&
+ enic->cq_count >= 2 &&
+ enic->intr_count >= 1 &&
+ !pci_enable_msi(enic->pdev)) {
+
+ enic->rq_count = 1;
+ enic->wq_count = 1;
+ enic->cq_count = 2;
+ enic->intr_count = 1;
+
+ vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
+
+ return 0;
+ }
+
+ /* Next try INTx
+ *
+ * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
+ * (the first INTR is used for WQ/RQ)
+ * (the second INTR is used for WQ/RQ errors)
+ * (the last INTR is used for notifications)
+ */
+
+ if (enic->config.intr_mode < 3 &&
+ enic->rq_count >= 1 &&
+ enic->wq_count >= 1 &&
+ enic->cq_count >= 2 &&
+ enic->intr_count >= 3) {
+
+ enic->rq_count = 1;
+ enic->wq_count = 1;
+ enic->cq_count = 2;
+ enic->intr_count = 3;
+
+ vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
+
+ return 0;
+ }
+
+ vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
+
+ return -EINVAL;
+}
+
+static void enic_clear_intr_mode(struct enic *enic)
+{
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_MSIX:
+ pci_disable_msix(enic->pdev);
+ break;
+ case VNIC_DEV_INTR_MODE_MSI:
+ pci_disable_msi(enic->pdev);
+ break;
+ default:
+ break;
+ }
+
+ vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
+}
+
+static const struct net_device_ops enic_netdev_dynamic_ops = {
+ .ndo_open = enic_open,
+ .ndo_stop = enic_stop,
+ .ndo_start_xmit = enic_hard_start_xmit,
+ .ndo_get_stats64 = enic_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = enic_set_rx_mode,
+ .ndo_set_mac_address = enic_set_mac_address_dynamic,
+ .ndo_change_mtu = enic_change_mtu,
+ .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
+ .ndo_tx_timeout = enic_tx_timeout,
+ .ndo_set_vf_port = enic_set_vf_port,
+ .ndo_get_vf_port = enic_get_vf_port,
+ .ndo_set_vf_mac = enic_set_vf_mac,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = enic_poll_controller,
+#endif
+};
+
+static const struct net_device_ops enic_netdev_ops = {
+ .ndo_open = enic_open,
+ .ndo_stop = enic_stop,
+ .ndo_start_xmit = enic_hard_start_xmit,
+ .ndo_get_stats64 = enic_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = enic_set_mac_address,
+ .ndo_set_rx_mode = enic_set_rx_mode,
+ .ndo_change_mtu = enic_change_mtu,
+ .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
+ .ndo_tx_timeout = enic_tx_timeout,
+ .ndo_set_vf_port = enic_set_vf_port,
+ .ndo_get_vf_port = enic_get_vf_port,
+ .ndo_set_vf_mac = enic_set_vf_mac,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = enic_poll_controller,
+#endif
+};
+
+static void enic_dev_deinit(struct enic *enic)
+{
+ unsigned int i;
+
+ for (i = 0; i < enic->rq_count; i++)
+ netif_napi_del(&enic->napi[i]);
+
+ enic_free_vnic_resources(enic);
+ enic_clear_intr_mode(enic);
+}
+
+static int enic_dev_init(struct enic *enic)
+{
+ struct device *dev = enic_get_dev(enic);
+ struct net_device *netdev = enic->netdev;
+ unsigned int i;
+ int err;
+
+ /* Get interrupt coalesce timer info */
+ err = enic_dev_intr_coal_timer_info(enic);
+ if (err) {
+ dev_warn(dev, "Using default conversion factor for "
+ "interrupt coalesce timer\n");
+ vnic_dev_intr_coal_timer_info_default(enic->vdev);
+ }
+
+ /* Get vNIC configuration
+ */
+
+ err = enic_get_vnic_config(enic);
+ if (err) {
+ dev_err(dev, "Get vNIC configuration failed, aborting\n");
+ return err;
+ }
+
+ /* Get available resource counts
+ */
+
+ enic_get_res_counts(enic);
+
+ /* Set interrupt mode based on resource counts and system
+ * capabilities
+ */
+
+ err = enic_set_intr_mode(enic);
+ if (err) {
+ dev_err(dev, "Failed to set intr mode based on resource "
+ "counts and system capabilities, aborting\n");
+ return err;
+ }
+
+ /* Allocate and configure vNIC resources
+ */
+
+ err = enic_alloc_vnic_resources(enic);
+ if (err) {
+ dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
+ goto err_out_free_vnic_resources;
+ }
+
+ enic_init_vnic_resources(enic);
+
+ err = enic_set_rss_nic_cfg(enic);
+ if (err) {
+ dev_err(dev, "Failed to config nic, aborting\n");
+ goto err_out_free_vnic_resources;
+ }
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ default:
+ netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ for (i = 0; i < enic->rq_count; i++)
+ netif_napi_add(netdev, &enic->napi[i],
+ enic_poll_msix, 64);
+ break;
+ }
+
+ return 0;
+
+err_out_free_vnic_resources:
+ enic_clear_intr_mode(enic);
+ enic_free_vnic_resources(enic);
+
+ return err;
+}
+
+static void enic_iounmap(struct enic *enic)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
+ if (enic->bar[i].vaddr)
+ iounmap(enic->bar[i].vaddr);
+}
+
+static int __devinit enic_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *netdev;
+ struct enic *enic;
+ int using_dac = 0;
+ unsigned int i;
+ int err;
+ int num_pps = 1;
+#ifdef CONFIG_PCI_IOV
+ int pos = 0;
+#endif
+
+ /* Allocate net device structure and initialize. Private
+ * instance data is initialized to zero.
+ */
+
+ netdev = alloc_etherdev(sizeof(struct enic));
+ if (!netdev) {
+ pr_err("Etherdev alloc failed, aborting\n");
+ return -ENOMEM;
+ }
+
+ pci_set_drvdata(pdev, netdev);
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ enic = netdev_priv(netdev);
+ enic->netdev = netdev;
+ enic->pdev = pdev;
+
+ /* Setup PCI resources
+ */
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(dev, "Cannot enable PCI device, aborting\n");
+ goto err_out_free_netdev;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "Cannot request PCI regions, aborting\n");
+ goto err_out_disable_device;
+ }
+
+ pci_set_master(pdev);
+
+ /* Query PCI controller on system for DMA addressing
+ * limitation for the device. Try 40-bit first, and
+ * fail to 32-bit.
+ */
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
+ if (err) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(dev, "No usable DMA configuration, aborting\n");
+ goto err_out_release_regions;
+ }
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(dev, "Unable to obtain %u-bit DMA "
+ "for consistent allocations, aborting\n", 32);
+ goto err_out_release_regions;
+ }
+ } else {
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
+ if (err) {
+ dev_err(dev, "Unable to obtain %u-bit DMA "
+ "for consistent allocations, aborting\n", 40);
+ goto err_out_release_regions;
+ }
+ using_dac = 1;
+ }
+
+ /* Map vNIC resources from BAR0-5
+ */
+
+ for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
+ if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
+ continue;
+ enic->bar[i].len = pci_resource_len(pdev, i);
+ enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
+ if (!enic->bar[i].vaddr) {
+ dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
+ err = -ENODEV;
+ goto err_out_iounmap;
+ }
+ enic->bar[i].bus_addr = pci_resource_start(pdev, i);
+ }
+
+ /* Register vNIC device
+ */
+
+ enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
+ ARRAY_SIZE(enic->bar));
+ if (!enic->vdev) {
+ dev_err(dev, "vNIC registration failed, aborting\n");
+ err = -ENODEV;
+ goto err_out_iounmap;
+ }
+
+#ifdef CONFIG_PCI_IOV
+ /* Get number of subvnics */
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
+ (u16 *)&enic->num_vfs);
+ if (enic->num_vfs) {
+ err = pci_enable_sriov(pdev, enic->num_vfs);
+ if (err) {
+ dev_err(dev, "SRIOV enable failed, aborting."
+ " pci_enable_sriov() returned %d\n",
+ err);
+ goto err_out_vnic_unregister;
+ }
+ enic->priv_flags |= ENIC_SRIOV_ENABLED;
+ num_pps = enic->num_vfs;
+ }
+ }
+
+#endif
+ /* Allocate structure for port profiles */
+ enic->pp = kzalloc(num_pps * sizeof(*enic->pp), GFP_KERNEL);
+ if (!enic->pp) {
+ pr_err("port profile alloc failed, aborting\n");
+ err = -ENOMEM;
+ goto err_out_disable_sriov;
+ }
+
+ /* Issue device open to get device in known state
+ */
+
+ err = enic_dev_open(enic);
+ if (err) {
+ dev_err(dev, "vNIC dev open failed, aborting\n");
+ goto err_out_free_pp;
+ }
+
+ /* Setup devcmd lock
+ */
+
+ spin_lock_init(&enic->devcmd_lock);
+
+ /*
+ * Set ingress vlan rewrite mode before vnic initialization
+ */
+
+ err = enic_dev_set_ig_vlan_rewrite_mode(enic);
+ if (err) {
+ dev_err(dev,
+ "Failed to set ingress vlan rewrite mode, aborting.\n");
+ goto err_out_dev_close;
+ }
+
+ /* Issue device init to initialize the vnic-to-switch link.
+ * We'll start with carrier off and wait for link UP
+ * notification later to turn on carrier. We don't need
+ * to wait here for the vnic-to-switch link initialization
+ * to complete; link UP notification is the indication that
+ * the process is complete.
+ */
+
+ netif_carrier_off(netdev);
+
+ /* Do not call dev_init for a dynamic vnic.
+ * For a dynamic vnic, init_prov_info will be
+ * called later by an upper layer.
+ */
+
+ if (!enic_is_dynamic(enic)) {
+ err = vnic_dev_init(enic->vdev, 0);
+ if (err) {
+ dev_err(dev, "vNIC dev init failed, aborting\n");
+ goto err_out_dev_close;
+ }
+ }
+
+ err = enic_dev_init(enic);
+ if (err) {
+ dev_err(dev, "Device initialization failed, aborting\n");
+ goto err_out_dev_close;
+ }
+
+ /* Setup notification timer, HW reset task, and wq locks
+ */
+
+ init_timer(&enic->notify_timer);
+ enic->notify_timer.function = enic_notify_timer;
+ enic->notify_timer.data = (unsigned long)enic;
+
+ INIT_WORK(&enic->reset, enic_reset);
+ INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
+
+ for (i = 0; i < enic->wq_count; i++)
+ spin_lock_init(&enic->wq_lock[i]);
+
+ /* Register net device
+ */
+
+ enic->port_mtu = enic->config.mtu;
+ (void)enic_change_mtu(netdev, enic->port_mtu);
+
+#ifdef CONFIG_PCI_IOV
+ if (enic_is_dynamic(enic) && pdev->is_virtfn &&
+ is_zero_ether_addr(enic->mac_addr))
+ random_ether_addr(enic->mac_addr);
+#endif
+
+ err = enic_set_mac_addr(netdev, enic->mac_addr);
+ if (err) {
+ dev_err(dev, "Invalid MAC address, aborting\n");
+ goto err_out_dev_deinit;
+ }
+
+ enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
+ enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
+
+ if (enic_is_dynamic(enic))
+ netdev->netdev_ops = &enic_netdev_dynamic_ops;
+ else
+ netdev->netdev_ops = &enic_netdev_ops;
+
+ netdev->watchdog_timeo = 2 * HZ;
+ netdev->ethtool_ops = &enic_ethtool_ops;
+
+ netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ if (ENIC_SETTING(enic, LOOP)) {
+ netdev->features &= ~NETIF_F_HW_VLAN_TX;
+ enic->loop_enable = 1;
+ enic->loop_tag = enic->config.loop_tag;
+ dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
+ }
+ if (ENIC_SETTING(enic, TXCSUM))
+ netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ if (ENIC_SETTING(enic, TSO))
+ netdev->hw_features |= NETIF_F_TSO |
+ NETIF_F_TSO6 | NETIF_F_TSO_ECN;
+ if (ENIC_SETTING(enic, RXCSUM))
+ netdev->hw_features |= NETIF_F_RXCSUM;
+
+ netdev->features |= netdev->hw_features;
+
+ if (using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(dev, "Cannot register net device, aborting\n");
+ goto err_out_dev_deinit;
+ }
+
+ return 0;
+
+err_out_dev_deinit:
+ enic_dev_deinit(enic);
+err_out_dev_close:
+ vnic_dev_close(enic->vdev);
+err_out_free_pp:
+ kfree(enic->pp);
+err_out_disable_sriov:
+#ifdef CONFIG_PCI_IOV
+ if (enic_sriov_enabled(enic)) {
+ pci_disable_sriov(pdev);
+ enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
+ }
+err_out_vnic_unregister:
+ vnic_dev_unregister(enic->vdev);
+#endif
+err_out_iounmap:
+ enic_iounmap(enic);
+err_out_release_regions:
+ pci_release_regions(pdev);
+err_out_disable_device:
+ pci_disable_device(pdev);
+err_out_free_netdev:
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+
+ return err;
+}
+
+static void __devexit enic_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ if (netdev) {
+ struct enic *enic = netdev_priv(netdev);
+
+ cancel_work_sync(&enic->reset);
+ cancel_work_sync(&enic->change_mtu_work);
+ unregister_netdev(netdev);
+ enic_dev_deinit(enic);
+ vnic_dev_close(enic->vdev);
+#ifdef CONFIG_PCI_IOV
+ if (enic_sriov_enabled(enic)) {
+ pci_disable_sriov(pdev);
+ enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
+ }
+#endif
+ kfree(enic->pp);
+ vnic_dev_unregister(enic->vdev);
+ enic_iounmap(enic);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+ }
+}
+
+static struct pci_driver enic_driver = {
+ .name = DRV_NAME,
+ .id_table = enic_id_table,
+ .probe = enic_probe,
+ .remove = __devexit_p(enic_remove),
+};
+
+static int __init enic_init_module(void)
+{
+ pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
+
+ return pci_register_driver(&enic_driver);
+}
+
+static void __exit enic_cleanup_module(void)
+{
+ pci_unregister_driver(&enic_driver);
+}
+
+module_init(enic_init_module);
+module_exit(enic_cleanup_module);
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
new file mode 100644
index 000000000000..22bf03a1829e
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright 2011 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <net/ip.h>
+
+#include "vnic_vic.h"
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_dev.h"
+#include "enic_pp.h"
+
+/*
+ * Checks validity of vf index that came in
+ * port profile request
+ */
+int enic_is_valid_pp_vf(struct enic *enic, int vf, int *err)
+{
+ if (vf != PORT_SELF_VF) {
+#ifdef CONFIG_PCI_IOV
+ if (enic_sriov_enabled(enic)) {
+ if (vf < 0 || vf >= enic->num_vfs) {
+ *err = -EINVAL;
+ goto err_out;
+ }
+ } else {
+ *err = -EOPNOTSUPP;
+ goto err_out;
+ }
+#else
+ *err = -EOPNOTSUPP;
+ goto err_out;
+#endif
+ }
+
+ if (vf == PORT_SELF_VF && !enic_is_dynamic(enic)) {
+ *err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
+ *err = 0;
+ return 1;
+
+err_out:
+ return 0;
+}
+
+static int enic_set_port_profile(struct enic *enic, int vf)
+{
+ struct net_device *netdev = enic->netdev;
+ struct enic_port_profile *pp;
+ struct vic_provinfo *vp;
+ const u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
+ const u16 os_type = htons(VIC_GENERIC_PROV_OS_TYPE_LINUX);
+ char uuid_str[38];
+ char client_mac_str[18];
+ u8 *client_mac;
+ int err;
+
+ ENIC_PP_BY_INDEX(enic, vf, pp, &err);
+ if (err)
+ return err;
+
+ if (!(pp->set & ENIC_SET_NAME) || !strlen(pp->name))
+ return -EINVAL;
+
+ vp = vic_provinfo_alloc(GFP_KERNEL, oui,
+ VIC_PROVINFO_GENERIC_TYPE);
+ if (!vp)
+ return -ENOMEM;
+
+ VIC_PROVINFO_ADD_TLV(vp,
+ VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR,
+ strlen(pp->name) + 1, pp->name);
+
+ if (!is_zero_ether_addr(pp->mac_addr)) {
+ client_mac = pp->mac_addr;
+ } else if (vf == PORT_SELF_VF) {
+ client_mac = netdev->dev_addr;
+ } else {
+ netdev_err(netdev, "Cannot find pp mac address "
+ "for VF %d\n", vf);
+ err = -EINVAL;
+ goto add_tlv_failure;
+ }
+
+ VIC_PROVINFO_ADD_TLV(vp,
+ VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR,
+ ETH_ALEN, client_mac);
+
+ snprintf(client_mac_str, sizeof(client_mac_str), "%pM", client_mac);
+ VIC_PROVINFO_ADD_TLV(vp,
+ VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR,
+ sizeof(client_mac_str), client_mac_str);
+
+ if (pp->set & ENIC_SET_INSTANCE) {
+ sprintf(uuid_str, "%pUB", pp->instance_uuid);
+ VIC_PROVINFO_ADD_TLV(vp,
+ VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR,
+ sizeof(uuid_str), uuid_str);
+ }
+
+ if (pp->set & ENIC_SET_HOST) {
+ sprintf(uuid_str, "%pUB", pp->host_uuid);
+ VIC_PROVINFO_ADD_TLV(vp,
+ VIC_GENERIC_PROV_TLV_HOST_UUID_STR,
+ sizeof(uuid_str), uuid_str);
+ }
+
+ VIC_PROVINFO_ADD_TLV(vp,
+ VIC_GENERIC_PROV_TLV_OS_TYPE,
+ sizeof(os_type), &os_type);
+
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_init_prov2, (u8 *)vp,
+ vic_provinfo_size(vp));
+ err = enic_dev_status_to_errno(err);
+
+add_tlv_failure:
+ vic_provinfo_free(vp);
+
+ return err;
+}
+
+static int enic_unset_port_profile(struct enic *enic, int vf)
+{
+ int err;
+
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_deinit);
+ if (err)
+ return enic_dev_status_to_errno(err);
+
+ if (vf == PORT_SELF_VF)
+ enic_reset_addr_lists(enic);
+
+ return 0;
+}
+
+static int enic_are_pp_different(struct enic_port_profile *pp1,
+ struct enic_port_profile *pp2)
+{
+ return strcmp(pp1->name, pp2->name) | !!memcmp(pp1->instance_uuid,
+ pp2->instance_uuid, PORT_UUID_MAX) |
+ !!memcmp(pp1->host_uuid, pp2->host_uuid, PORT_UUID_MAX) |
+ !!memcmp(pp1->mac_addr, pp2->mac_addr, ETH_ALEN);
+}
+
+static int enic_pp_preassociate(struct enic *enic, int vf,
+ struct enic_port_profile *prev_pp, int *restore_pp);
+static int enic_pp_disassociate(struct enic *enic, int vf,
+ struct enic_port_profile *prev_pp, int *restore_pp);
+static int enic_pp_preassociate_rr(struct enic *enic, int vf,
+ struct enic_port_profile *prev_pp, int *restore_pp);
+static int enic_pp_associate(struct enic *enic, int vf,
+ struct enic_port_profile *prev_pp, int *restore_pp);
+
+static int (*enic_pp_handlers[])(struct enic *enic, int vf,
+ struct enic_port_profile *prev_state,
+ int *restore_pp) = {
+ [PORT_REQUEST_PREASSOCIATE] = enic_pp_preassociate,
+ [PORT_REQUEST_PREASSOCIATE_RR] = enic_pp_preassociate_rr,
+ [PORT_REQUEST_ASSOCIATE] = enic_pp_associate,
+ [PORT_REQUEST_DISASSOCIATE] = enic_pp_disassociate,
+};
+
+static const int enic_pp_handlers_count =
+ sizeof(enic_pp_handlers)/sizeof(*enic_pp_handlers);
+
+static int enic_pp_preassociate(struct enic *enic, int vf,
+ struct enic_port_profile *prev_pp, int *restore_pp)
+{
+ return -EOPNOTSUPP;
+}
+
+static int enic_pp_disassociate(struct enic *enic, int vf,
+ struct enic_port_profile *prev_pp, int *restore_pp)
+{
+ struct net_device *netdev = enic->netdev;
+ struct enic_port_profile *pp;
+ int err;
+
+ ENIC_PP_BY_INDEX(enic, vf, pp, &err);
+ if (err)
+ return err;
+
+ /* Deregister mac addresses */
+ if (!is_zero_ether_addr(pp->mac_addr))
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_del_addr,
+ pp->mac_addr);
+ else if (!is_zero_ether_addr(netdev->dev_addr))
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_del_addr,
+ netdev->dev_addr);
+
+ return enic_unset_port_profile(enic, vf);
+}
+
+static int enic_pp_preassociate_rr(struct enic *enic, int vf,
+ struct enic_port_profile *prev_pp, int *restore_pp)
+{
+ struct enic_port_profile *pp;
+ int err;
+ int active = 0;
+
+ ENIC_PP_BY_INDEX(enic, vf, pp, &err);
+ if (err)
+ return err;
+
+ if (pp->request != PORT_REQUEST_ASSOCIATE) {
+ /* If pre-associate is not part of an associate.
+ We always disassociate first */
+ err = enic_pp_handlers[PORT_REQUEST_DISASSOCIATE](enic, vf,
+ prev_pp, restore_pp);
+ if (err)
+ return err;
+
+ *restore_pp = 0;
+ }
+
+ *restore_pp = 0;
+
+ err = enic_set_port_profile(enic, vf);
+ if (err)
+ return err;
+
+ /* If pre-associate is not part of an associate. */
+ if (pp->request != PORT_REQUEST_ASSOCIATE) {
+ /* Enable device as standby */
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_enable2,
+ active);
+ err = enic_dev_status_to_errno(err);
+ }
+
+ return err;
+}
+
+static int enic_pp_associate(struct enic *enic, int vf,
+ struct enic_port_profile *prev_pp, int *restore_pp)
+{
+ struct net_device *netdev = enic->netdev;
+ struct enic_port_profile *pp;
+ int err;
+ int active = 1;
+
+ ENIC_PP_BY_INDEX(enic, vf, pp, &err);
+ if (err)
+ return err;
+
+ /* Check if a pre-associate was called before */
+ if (prev_pp->request != PORT_REQUEST_PREASSOCIATE_RR ||
+ (prev_pp->request == PORT_REQUEST_PREASSOCIATE_RR &&
+ enic_are_pp_different(prev_pp, pp))) {
+ err = enic_pp_handlers[PORT_REQUEST_DISASSOCIATE](
+ enic, vf, prev_pp, restore_pp);
+ if (err)
+ return err;
+
+ *restore_pp = 0;
+ }
+
+ err = enic_pp_handlers[PORT_REQUEST_PREASSOCIATE_RR](
+ enic, vf, prev_pp, restore_pp);
+ if (err)
+ return err;
+
+ *restore_pp = 0;
+
+ /* Enable device as active */
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_enable2, active);
+ err = enic_dev_status_to_errno(err);
+ if (err)
+ return err;
+
+ /* Register mac address */
+ if (!is_zero_ether_addr(pp->mac_addr))
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_add_addr,
+ pp->mac_addr);
+ else if (!is_zero_ether_addr(netdev->dev_addr))
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_add_addr,
+ netdev->dev_addr);
+
+ return 0;
+}
+
+int enic_process_set_pp_request(struct enic *enic, int vf,
+ struct enic_port_profile *prev_pp, int *restore_pp)
+{
+ struct enic_port_profile *pp;
+ int err;
+
+ ENIC_PP_BY_INDEX(enic, vf, pp, &err);
+ if (err)
+ return err;
+
+ if (pp->request >= enic_pp_handlers_count
+ || !enic_pp_handlers[pp->request])
+ return -EOPNOTSUPP;
+
+ return enic_pp_handlers[pp->request](enic, vf, prev_pp, restore_pp);
+}
+
+int enic_process_get_pp_request(struct enic *enic, int vf,
+ int request, u16 *response)
+{
+ int err, status = ERR_SUCCESS;
+
+ switch (request) {
+
+ case PORT_REQUEST_PREASSOCIATE_RR:
+ case PORT_REQUEST_ASSOCIATE:
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
+ vnic_dev_enable2_done, &status);
+ break;
+
+ case PORT_REQUEST_DISASSOCIATE:
+ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
+ vnic_dev_deinit_done, &status);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (err)
+ status = err;
+
+ switch (status) {
+ case ERR_SUCCESS:
+ *response = PORT_PROFILE_RESPONSE_SUCCESS;
+ break;
+ case ERR_EINVAL:
+ *response = PORT_PROFILE_RESPONSE_INVALID;
+ break;
+ case ERR_EBADSTATE:
+ *response = PORT_PROFILE_RESPONSE_BADSTATE;
+ break;
+ case ERR_ENOMEM:
+ *response = PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES;
+ break;
+ case ERR_EINPROGRESS:
+ *response = PORT_PROFILE_RESPONSE_INPROGRESS;
+ break;
+ default:
+ *response = PORT_PROFILE_RESPONSE_ERROR;
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.h b/drivers/net/ethernet/cisco/enic/enic_pp.h
new file mode 100644
index 000000000000..a09ff392c1c6
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2011 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _ENIC_PP_H_
+#define _ENIC_PP_H_
+
+#define ENIC_PP_BY_INDEX(enic, vf, pp, err) \
+ do { \
+ if (enic_is_valid_pp_vf(enic, vf, err)) \
+ pp = (vf == PORT_SELF_VF) ? enic->pp : enic->pp + vf; \
+ else \
+ pp = NULL; \
+ } while (0)
+
+int enic_process_set_pp_request(struct enic *enic, int vf,
+ struct enic_port_profile *prev_pp, int *restore_pp);
+int enic_process_get_pp_request(struct enic *enic, int vf,
+ int request, u16 *response);
+int enic_is_valid_pp_vf(struct enic *enic, int vf, int *err);
+
+#endif /* _ENIC_PP_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c
new file mode 100644
index 000000000000..4a35367de790
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_res.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "cq_enet_desc.h"
+#include "vnic_resource.h"
+#include "vnic_enet.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_nic.h"
+#include "vnic_rss.h"
+#include "enic_res.h"
+#include "enic.h"
+
+int enic_get_vnic_config(struct enic *enic)
+{
+ struct vnic_enet_config *c = &enic->config;
+ int err;
+
+ err = vnic_dev_mac_addr(enic->vdev, enic->mac_addr);
+ if (err) {
+ dev_err(enic_get_dev(enic),
+ "Error getting MAC addr, %d\n", err);
+ return err;
+ }
+
+#define GET_CONFIG(m) \
+ do { \
+ err = vnic_dev_spec(enic->vdev, \
+ offsetof(struct vnic_enet_config, m), \
+ sizeof(c->m), &c->m); \
+ if (err) { \
+ dev_err(enic_get_dev(enic), \
+ "Error getting %s, %d\n", #m, err); \
+ return err; \
+ } \
+ } while (0)
+
+ GET_CONFIG(flags);
+ GET_CONFIG(wq_desc_count);
+ GET_CONFIG(rq_desc_count);
+ GET_CONFIG(mtu);
+ GET_CONFIG(intr_timer_type);
+ GET_CONFIG(intr_mode);
+ GET_CONFIG(intr_timer_usec);
+ GET_CONFIG(loop_tag);
+
+ c->wq_desc_count =
+ min_t(u32, ENIC_MAX_WQ_DESCS,
+ max_t(u32, ENIC_MIN_WQ_DESCS,
+ c->wq_desc_count));
+ c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
+
+ c->rq_desc_count =
+ min_t(u32, ENIC_MAX_RQ_DESCS,
+ max_t(u32, ENIC_MIN_RQ_DESCS,
+ c->rq_desc_count));
+ c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
+
+ if (c->mtu == 0)
+ c->mtu = 1500;
+ c->mtu = min_t(u16, ENIC_MAX_MTU,
+ max_t(u16, ENIC_MIN_MTU,
+ c->mtu));
+
+ c->intr_timer_usec = min_t(u32, c->intr_timer_usec,
+ vnic_dev_get_intr_coal_timer_max(enic->vdev));
+
+ dev_info(enic_get_dev(enic),
+ "vNIC MAC addr %pM wq/rq %d/%d mtu %d\n",
+ enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu);
+
+ dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s "
+ "tso/lro %s/%s rss %s intr mode %s type %s timer %d usec "
+ "loopback tag 0x%04x\n",
+ ENIC_SETTING(enic, TXCSUM) ? "yes" : "no",
+ ENIC_SETTING(enic, RXCSUM) ? "yes" : "no",
+ ENIC_SETTING(enic, TSO) ? "yes" : "no",
+ ENIC_SETTING(enic, LRO) ? "yes" : "no",
+ ENIC_SETTING(enic, RSS) ? "yes" : "no",
+ c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" :
+ c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" :
+ c->intr_mode == VENET_INTR_MODE_ANY ? "any" :
+ "unknown",
+ c->intr_timer_type == VENET_INTR_TYPE_MIN ? "min" :
+ c->intr_timer_type == VENET_INTR_TYPE_IDLE ? "idle" :
+ "unknown",
+ c->intr_timer_usec,
+ c->loop_tag);
+
+ return 0;
+}
+
+int enic_add_vlan(struct enic *enic, u16 vlanid)
+{
+ u64 a0 = vlanid, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait);
+ if (err)
+ dev_err(enic_get_dev(enic), "Can't add vlan id, %d\n", err);
+
+ return err;
+}
+
+int enic_del_vlan(struct enic *enic, u16 vlanid)
+{
+ u64 a0 = vlanid, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait);
+ if (err)
+ dev_err(enic_get_dev(enic), "Can't delete vlan id, %d\n", err);
+
+ return err;
+}
+
+int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
+ u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
+ u8 ig_vlan_strip_en)
+{
+ u64 a0, a1;
+ u32 nic_cfg;
+ int wait = 1000;
+
+ vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
+ rss_hash_type, rss_hash_bits, rss_base_cpu,
+ rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
+
+ a0 = nic_cfg;
+ a1 = 0;
+
+ return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
+}
+
+int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len)
+{
+ u64 a0 = (u64)key_pa, a1 = len;
+ int wait = 1000;
+
+ return vnic_dev_cmd(enic->vdev, CMD_RSS_KEY, &a0, &a1, wait);
+}
+
+int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len)
+{
+ u64 a0 = (u64)cpu_pa, a1 = len;
+ int wait = 1000;
+
+ return vnic_dev_cmd(enic->vdev, CMD_RSS_CPU, &a0, &a1, wait);
+}
+
+void enic_free_vnic_resources(struct enic *enic)
+{
+ unsigned int i;
+
+ for (i = 0; i < enic->wq_count; i++)
+ vnic_wq_free(&enic->wq[i]);
+ for (i = 0; i < enic->rq_count; i++)
+ vnic_rq_free(&enic->rq[i]);
+ for (i = 0; i < enic->cq_count; i++)
+ vnic_cq_free(&enic->cq[i]);
+ for (i = 0; i < enic->intr_count; i++)
+ vnic_intr_free(&enic->intr[i]);
+}
+
+void enic_get_res_counts(struct enic *enic)
+{
+ enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
+ enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
+ enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
+ enic->intr_count = vnic_dev_get_res_count(enic->vdev,
+ RES_TYPE_INTR_CTRL);
+
+ dev_info(enic_get_dev(enic),
+ "vNIC resources avail: wq %d rq %d cq %d intr %d\n",
+ enic->wq_count, enic->rq_count,
+ enic->cq_count, enic->intr_count);
+}
+
+void enic_init_vnic_resources(struct enic *enic)
+{
+ enum vnic_dev_intr_mode intr_mode;
+ unsigned int mask_on_assertion;
+ unsigned int interrupt_offset;
+ unsigned int error_interrupt_enable;
+ unsigned int error_interrupt_offset;
+ unsigned int cq_index;
+ unsigned int i;
+
+ intr_mode = vnic_dev_get_intr_mode(enic->vdev);
+
+ /* Init RQ/WQ resources.
+ *
+ * RQ[0 - n-1] point to CQ[0 - n-1]
+ * WQ[0 - m-1] point to CQ[n - n+m-1]
+ *
+ * Error interrupt is not enabled for MSI.
+ */
+
+ switch (intr_mode) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ case VNIC_DEV_INTR_MODE_MSIX:
+ error_interrupt_enable = 1;
+ error_interrupt_offset = enic->intr_count - 2;
+ break;
+ default:
+ error_interrupt_enable = 0;
+ error_interrupt_offset = 0;
+ break;
+ }
+
+ for (i = 0; i < enic->rq_count; i++) {
+ cq_index = i;
+ vnic_rq_init(&enic->rq[i],
+ cq_index,
+ error_interrupt_enable,
+ error_interrupt_offset);
+ }
+
+ for (i = 0; i < enic->wq_count; i++) {
+ cq_index = enic->rq_count + i;
+ vnic_wq_init(&enic->wq[i],
+ cq_index,
+ error_interrupt_enable,
+ error_interrupt_offset);
+ }
+
+ /* Init CQ resources
+ *
+ * CQ[0 - n+m-1] point to INTR[0] for INTx, MSI
+ * CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X
+ */
+
+ for (i = 0; i < enic->cq_count; i++) {
+
+ switch (intr_mode) {
+ case VNIC_DEV_INTR_MODE_MSIX:
+ interrupt_offset = i;
+ break;
+ default:
+ interrupt_offset = 0;
+ break;
+ }
+
+ vnic_cq_init(&enic->cq[i],
+ 0 /* flow_control_enable */,
+ 1 /* color_enable */,
+ 0 /* cq_head */,
+ 0 /* cq_tail */,
+ 1 /* cq_tail_color */,
+ 1 /* interrupt_enable */,
+ 1 /* cq_entry_enable */,
+ 0 /* cq_message_enable */,
+ interrupt_offset,
+ 0 /* cq_message_addr */);
+ }
+
+ /* Init INTR resources
+ *
+ * mask_on_assertion is not used for INTx due to the level-
+ * triggered nature of INTx
+ */
+
+ switch (intr_mode) {
+ case VNIC_DEV_INTR_MODE_MSI:
+ case VNIC_DEV_INTR_MODE_MSIX:
+ mask_on_assertion = 1;
+ break;
+ default:
+ mask_on_assertion = 0;
+ break;
+ }
+
+ for (i = 0; i < enic->intr_count; i++) {
+ vnic_intr_init(&enic->intr[i],
+ enic->config.intr_timer_usec,
+ enic->config.intr_timer_type,
+ mask_on_assertion);
+ }
+}
+
+int enic_alloc_vnic_resources(struct enic *enic)
+{
+ enum vnic_dev_intr_mode intr_mode;
+ unsigned int i;
+ int err;
+
+ intr_mode = vnic_dev_get_intr_mode(enic->vdev);
+
+ dev_info(enic_get_dev(enic), "vNIC resources used: "
+ "wq %d rq %d cq %d intr %d intr mode %s\n",
+ enic->wq_count, enic->rq_count,
+ enic->cq_count, enic->intr_count,
+ intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
+ intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
+ intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
+ "unknown");
+
+ /* Allocate queue resources
+ */
+
+ for (i = 0; i < enic->wq_count; i++) {
+ err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i,
+ enic->config.wq_desc_count,
+ sizeof(struct wq_enet_desc));
+ if (err)
+ goto err_out_cleanup;
+ }
+
+ for (i = 0; i < enic->rq_count; i++) {
+ err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i,
+ enic->config.rq_desc_count,
+ sizeof(struct rq_enet_desc));
+ if (err)
+ goto err_out_cleanup;
+ }
+
+ for (i = 0; i < enic->cq_count; i++) {
+ if (i < enic->rq_count)
+ err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
+ enic->config.rq_desc_count,
+ sizeof(struct cq_enet_rq_desc));
+ else
+ err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
+ enic->config.wq_desc_count,
+ sizeof(struct cq_enet_wq_desc));
+ if (err)
+ goto err_out_cleanup;
+ }
+
+ for (i = 0; i < enic->intr_count; i++) {
+ err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
+ if (err)
+ goto err_out_cleanup;
+ }
+
+ /* Hook remaining resource
+ */
+
+ enic->legacy_pba = vnic_dev_get_res(enic->vdev,
+ RES_TYPE_INTR_PBA_LEGACY, 0);
+ if (!enic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
+ dev_err(enic_get_dev(enic),
+ "Failed to hook legacy pba resource\n");
+ err = -ENODEV;
+ goto err_out_cleanup;
+ }
+
+ return 0;
+
+err_out_cleanup:
+ enic_free_vnic_resources(enic);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h
new file mode 100644
index 000000000000..25be2734c3fe
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_res.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _ENIC_RES_H_
+#define _ENIC_RES_H_
+
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+
+#define ENIC_MIN_WQ_DESCS 64
+#define ENIC_MAX_WQ_DESCS 4096
+#define ENIC_MIN_RQ_DESCS 64
+#define ENIC_MAX_RQ_DESCS 4096
+
+#define ENIC_MIN_MTU 68
+#define ENIC_MAX_MTU 9000
+
+#define ENIC_MULTICAST_PERFECT_FILTERS 32
+#define ENIC_UNICAST_PERFECT_FILTERS 32
+
+#define ENIC_NON_TSO_MAX_DESC 16
+
+#define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
+
+static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
+ void *os_buf, dma_addr_t dma_addr, unsigned int len,
+ unsigned int mss_or_csum_offset, unsigned int hdr_len,
+ int vlan_tag_insert, unsigned int vlan_tag,
+ int offload_mode, int cq_entry, int sop, int eop, int loopback)
+{
+ struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
+
+ wq_enet_desc_enc(desc,
+ (u64)dma_addr | VNIC_PADDR_TARGET,
+ (u16)len,
+ (u16)mss_or_csum_offset,
+ (u16)hdr_len, (u8)offload_mode,
+ (u8)eop, (u8)cq_entry,
+ 0, /* fcoe_encap */
+ (u8)vlan_tag_insert,
+ (u16)vlan_tag,
+ (u8)loopback);
+
+ vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
+}
+
+static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
+ void *os_buf, dma_addr_t dma_addr, unsigned int len,
+ int eop, int loopback)
+{
+ enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
+ 0, 0, 0, 0, 0,
+ eop, 0 /* !SOP */, eop, loopback);
+}
+
+static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf,
+ dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert,
+ unsigned int vlan_tag, int eop, int loopback)
+{
+ enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
+ 0, 0, vlan_tag_insert, vlan_tag,
+ WQ_ENET_OFFLOAD_MODE_CSUM,
+ eop, 1 /* SOP */, eop, loopback);
+}
+
+static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq,
+ void *os_buf, dma_addr_t dma_addr, unsigned int len,
+ int ip_csum, int tcpudp_csum, int vlan_tag_insert,
+ unsigned int vlan_tag, int eop, int loopback)
+{
+ enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
+ (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0),
+ 0, vlan_tag_insert, vlan_tag,
+ WQ_ENET_OFFLOAD_MODE_CSUM,
+ eop, 1 /* SOP */, eop, loopback);
+}
+
+static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq,
+ void *os_buf, dma_addr_t dma_addr, unsigned int len,
+ unsigned int csum_offset, unsigned int hdr_len,
+ int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback)
+{
+ enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
+ csum_offset, hdr_len, vlan_tag_insert, vlan_tag,
+ WQ_ENET_OFFLOAD_MODE_CSUM_L4,
+ eop, 1 /* SOP */, eop, loopback);
+}
+
+static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq,
+ void *os_buf, dma_addr_t dma_addr, unsigned int len,
+ unsigned int mss, unsigned int hdr_len, int vlan_tag_insert,
+ unsigned int vlan_tag, int eop, int loopback)
+{
+ enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
+ mss, hdr_len, vlan_tag_insert, vlan_tag,
+ WQ_ENET_OFFLOAD_MODE_TSO,
+ eop, 1 /* SOP */, eop, loopback);
+}
+
+static inline void enic_queue_rq_desc(struct vnic_rq *rq,
+ void *os_buf, unsigned int os_buf_index,
+ dma_addr_t dma_addr, unsigned int len)
+{
+ struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
+ u8 type = os_buf_index ?
+ RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP;
+
+ rq_enet_desc_enc(desc,
+ (u64)dma_addr | VNIC_PADDR_TARGET,
+ type, (u16)len);
+
+ vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len);
+}
+
+struct enic;
+
+int enic_get_vnic_config(struct enic *);
+int enic_add_vlan(struct enic *enic, u16 vlanid);
+int enic_del_vlan(struct enic *enic, u16 vlanid);
+int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
+ u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
+ u8 ig_vlan_strip_en);
+int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len);
+int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len);
+void enic_get_res_counts(struct enic *enic);
+void enic_init_vnic_resources(struct enic *enic);
+int enic_alloc_vnic_resources(struct enic *);
+void enic_free_vnic_resources(struct enic *);
+
+#endif /* _ENIC_RES_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/rq_enet_desc.h b/drivers/net/ethernet/cisco/enic/rq_enet_desc.h
new file mode 100644
index 000000000000..e6dd30988d6f
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/rq_enet_desc.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _RQ_ENET_DESC_H_
+#define _RQ_ENET_DESC_H_
+
+/* Ethernet receive queue descriptor: 16B */
+struct rq_enet_desc {
+ __le64 address;
+ __le16 length_type;
+ u8 reserved[6];
+};
+
+enum rq_enet_type_types {
+ RQ_ENET_TYPE_ONLY_SOP = 0,
+ RQ_ENET_TYPE_NOT_SOP = 1,
+ RQ_ENET_TYPE_RESV2 = 2,
+ RQ_ENET_TYPE_RESV3 = 3,
+};
+
+#define RQ_ENET_ADDR_BITS 64
+#define RQ_ENET_LEN_BITS 14
+#define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1)
+#define RQ_ENET_TYPE_BITS 2
+#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1)
+
+static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
+ u64 address, u8 type, u16 length)
+{
+ desc->address = cpu_to_le64(address);
+ desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) |
+ ((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS));
+}
+
+static inline void rq_enet_desc_dec(struct rq_enet_desc *desc,
+ u64 *address, u8 *type, u16 *length)
+{
+ *address = le64_to_cpu(desc->address);
+ *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK;
+ *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) &
+ RQ_ENET_TYPE_MASK);
+}
+
+#endif /* _RQ_ENET_DESC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.c b/drivers/net/ethernet/cisco/enic/vnic_cq.c
new file mode 100644
index 000000000000..0daa1c7073cb
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+void vnic_cq_free(struct vnic_cq *cq)
+{
+ vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
+
+ cq->ctrl = NULL;
+}
+
+int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+
+ cq->index = index;
+ cq->vdev = vdev;
+
+ cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
+ if (!cq->ctrl) {
+ pr_err("Failed to hook CQ[%d] resource\n", index);
+ return -EINVAL;
+ }
+
+ err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+ unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+ unsigned int cq_tail_color, unsigned int interrupt_enable,
+ unsigned int cq_entry_enable, unsigned int cq_message_enable,
+ unsigned int interrupt_offset, u64 cq_message_addr)
+{
+ u64 paddr;
+
+ paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
+ writeq(paddr, &cq->ctrl->ring_base);
+ iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
+ iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
+ iowrite32(color_enable, &cq->ctrl->color_enable);
+ iowrite32(cq_head, &cq->ctrl->cq_head);
+ iowrite32(cq_tail, &cq->ctrl->cq_tail);
+ iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
+ iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
+ iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
+ iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
+ iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
+ writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
+
+ cq->interrupt_offset = interrupt_offset;
+}
+
+void vnic_cq_clean(struct vnic_cq *cq)
+{
+ cq->to_clean = 0;
+ cq->last_color = 0;
+
+ iowrite32(0, &cq->ctrl->cq_head);
+ iowrite32(0, &cq->ctrl->cq_tail);
+ iowrite32(1, &cq->ctrl->cq_tail_color);
+
+ vnic_dev_clear_desc_ring(&cq->ring);
+}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h
new file mode 100644
index 000000000000..579315cbe803
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_CQ_H_
+#define _VNIC_CQ_H_
+
+#include "cq_desc.h"
+#include "vnic_dev.h"
+
+/* Completion queue control */
+struct vnic_cq_ctrl {
+ u64 ring_base; /* 0x00 */
+ u32 ring_size; /* 0x08 */
+ u32 pad0;
+ u32 flow_control_enable; /* 0x10 */
+ u32 pad1;
+ u32 color_enable; /* 0x18 */
+ u32 pad2;
+ u32 cq_head; /* 0x20 */
+ u32 pad3;
+ u32 cq_tail; /* 0x28 */
+ u32 pad4;
+ u32 cq_tail_color; /* 0x30 */
+ u32 pad5;
+ u32 interrupt_enable; /* 0x38 */
+ u32 pad6;
+ u32 cq_entry_enable; /* 0x40 */
+ u32 pad7;
+ u32 cq_message_enable; /* 0x48 */
+ u32 pad8;
+ u32 interrupt_offset; /* 0x50 */
+ u32 pad9;
+ u64 cq_message_addr; /* 0x58 */
+ u32 pad10;
+};
+
+struct vnic_cq {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */
+ struct vnic_dev_ring ring;
+ unsigned int to_clean;
+ unsigned int last_color;
+ unsigned int interrupt_offset;
+};
+
+static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
+ unsigned int work_to_do,
+ int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+ u8 type, u16 q_number, u16 completed_index, void *opaque),
+ void *opaque)
+{
+ struct cq_desc *cq_desc;
+ unsigned int work_done = 0;
+ u16 q_number, completed_index;
+ u8 type, color;
+
+ cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+ cq->ring.desc_size * cq->to_clean);
+ cq_desc_dec(cq_desc, &type, &color,
+ &q_number, &completed_index);
+
+ while (color != cq->last_color) {
+
+ if ((*q_service)(cq->vdev, cq_desc, type,
+ q_number, completed_index, opaque))
+ break;
+
+ cq->to_clean++;
+ if (cq->to_clean == cq->ring.desc_count) {
+ cq->to_clean = 0;
+ cq->last_color = cq->last_color ? 0 : 1;
+ }
+
+ cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+ cq->ring.desc_size * cq->to_clean);
+ cq_desc_dec(cq_desc, &type, &color,
+ &q_number, &completed_index);
+
+ work_done++;
+ if (work_done >= work_to_do)
+ break;
+ }
+
+ return work_done;
+}
+
+void vnic_cq_free(struct vnic_cq *cq);
+int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+ unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+ unsigned int cq_tail_color, unsigned int interrupt_enable,
+ unsigned int cq_entry_enable, unsigned int message_enable,
+ unsigned int interrupt_offset, u64 message_addr);
+void vnic_cq_clean(struct vnic_cq *cq);
+
+#endif /* _VNIC_CQ_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
new file mode 100644
index 000000000000..31e7f9bc2067
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -0,0 +1,1021 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/if_ether.h>
+
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+#include "vnic_dev.h"
+#include "vnic_stats.h"
+
+enum vnic_proxy_type {
+ PROXY_NONE,
+ PROXY_BY_BDF,
+ PROXY_BY_INDEX,
+};
+
+struct vnic_res {
+ void __iomem *vaddr;
+ dma_addr_t bus_addr;
+ unsigned int count;
+};
+
+struct vnic_intr_coal_timer_info {
+ u32 mul;
+ u32 div;
+ u32 max_usec;
+};
+
+struct vnic_dev {
+ void *priv;
+ struct pci_dev *pdev;
+ struct vnic_res res[RES_TYPE_MAX];
+ enum vnic_dev_intr_mode intr_mode;
+ struct vnic_devcmd __iomem *devcmd;
+ struct vnic_devcmd_notify *notify;
+ struct vnic_devcmd_notify notify_copy;
+ dma_addr_t notify_pa;
+ u32 notify_sz;
+ dma_addr_t linkstatus_pa;
+ struct vnic_stats *stats;
+ dma_addr_t stats_pa;
+ struct vnic_devcmd_fw_info *fw_info;
+ dma_addr_t fw_info_pa;
+ enum vnic_proxy_type proxy;
+ u32 proxy_index;
+ u64 args[VNIC_DEVCMD_NARGS];
+ struct vnic_intr_coal_timer_info intr_coal_timer_info;
+};
+
+#define VNIC_MAX_RES_HDR_SIZE \
+ (sizeof(struct vnic_resource_header) + \
+ sizeof(struct vnic_resource) * RES_TYPE_MAX)
+#define VNIC_RES_STRIDE 128
+
+void *vnic_dev_priv(struct vnic_dev *vdev)
+{
+ return vdev->priv;
+}
+
+static int vnic_dev_discover_res(struct vnic_dev *vdev,
+ struct vnic_dev_bar *bar, unsigned int num_bars)
+{
+ struct vnic_resource_header __iomem *rh;
+ struct mgmt_barmap_hdr __iomem *mrh;
+ struct vnic_resource __iomem *r;
+ u8 type;
+
+ if (num_bars == 0)
+ return -EINVAL;
+
+ if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
+ pr_err("vNIC BAR0 res hdr length error\n");
+ return -EINVAL;
+ }
+
+ rh = bar->vaddr;
+ mrh = bar->vaddr;
+ if (!rh) {
+ pr_err("vNIC BAR0 res hdr not mem-mapped\n");
+ return -EINVAL;
+ }
+
+ /* Check for mgmt vnic in addition to normal vnic */
+ if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) ||
+ (ioread32(&rh->version) != VNIC_RES_VERSION)) {
+ if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
+ (ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
+ pr_err("vNIC BAR0 res magic/version error "
+ "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
+ VNIC_RES_MAGIC, VNIC_RES_VERSION,
+ MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
+ ioread32(&rh->magic), ioread32(&rh->version));
+ return -EINVAL;
+ }
+ }
+
+ if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC)
+ r = (struct vnic_resource __iomem *)(mrh + 1);
+ else
+ r = (struct vnic_resource __iomem *)(rh + 1);
+
+
+ while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
+
+ u8 bar_num = ioread8(&r->bar);
+ u32 bar_offset = ioread32(&r->bar_offset);
+ u32 count = ioread32(&r->count);
+ u32 len;
+
+ r++;
+
+ if (bar_num >= num_bars)
+ continue;
+
+ if (!bar[bar_num].len || !bar[bar_num].vaddr)
+ continue;
+
+ switch (type) {
+ case RES_TYPE_WQ:
+ case RES_TYPE_RQ:
+ case RES_TYPE_CQ:
+ case RES_TYPE_INTR_CTRL:
+ /* each count is stride bytes long */
+ len = count * VNIC_RES_STRIDE;
+ if (len + bar_offset > bar[bar_num].len) {
+ pr_err("vNIC BAR0 resource %d "
+ "out-of-bounds, offset 0x%x + "
+ "size 0x%x > bar len 0x%lx\n",
+ type, bar_offset,
+ len,
+ bar[bar_num].len);
+ return -EINVAL;
+ }
+ break;
+ case RES_TYPE_INTR_PBA_LEGACY:
+ case RES_TYPE_DEVCMD:
+ len = count;
+ break;
+ default:
+ continue;
+ }
+
+ vdev->res[type].count = count;
+ vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr +
+ bar_offset;
+ vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset;
+ }
+
+ return 0;
+}
+
+unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
+ enum vnic_res_type type)
+{
+ return vdev->res[type].count;
+}
+
+void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+ unsigned int index)
+{
+ if (!vdev->res[type].vaddr)
+ return NULL;
+
+ switch (type) {
+ case RES_TYPE_WQ:
+ case RES_TYPE_RQ:
+ case RES_TYPE_CQ:
+ case RES_TYPE_INTR_CTRL:
+ return (char __iomem *)vdev->res[type].vaddr +
+ index * VNIC_RES_STRIDE;
+ default:
+ return (char __iomem *)vdev->res[type].vaddr;
+ }
+}
+
+static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ /* The base address of the desc rings must be 512 byte aligned.
+ * Descriptor count is aligned to groups of 32 descriptors. A
+ * count of 0 means the maximum 4096 descriptors. Descriptor
+ * size is aligned to 16 bytes.
+ */
+
+ unsigned int count_align = 32;
+ unsigned int desc_align = 16;
+
+ ring->base_align = 512;
+
+ if (desc_count == 0)
+ desc_count = 4096;
+
+ ring->desc_count = ALIGN(desc_count, count_align);
+
+ ring->desc_size = ALIGN(desc_size, desc_align);
+
+ ring->size = ring->desc_count * ring->desc_size;
+ ring->size_unaligned = ring->size + ring->base_align;
+
+ return ring->size_unaligned;
+}
+
+void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
+{
+ memset(ring->descs, 0, ring->size);
+}
+
+int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ vnic_dev_desc_ring_size(ring, desc_count, desc_size);
+
+ ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
+ ring->size_unaligned,
+ &ring->base_addr_unaligned);
+
+ if (!ring->descs_unaligned) {
+ pr_err("Failed to allocate ring (size=%d), aborting\n",
+ (int)ring->size);
+ return -ENOMEM;
+ }
+
+ ring->base_addr = ALIGN(ring->base_addr_unaligned,
+ ring->base_align);
+ ring->descs = (u8 *)ring->descs_unaligned +
+ (ring->base_addr - ring->base_addr_unaligned);
+
+ vnic_dev_clear_desc_ring(ring);
+
+ ring->desc_avail = ring->desc_count - 1;
+
+ return 0;
+}
+
+void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
+{
+ if (ring->descs) {
+ pci_free_consistent(vdev->pdev,
+ ring->size_unaligned,
+ ring->descs_unaligned,
+ ring->base_addr_unaligned);
+ ring->descs = NULL;
+ }
+}
+
+static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ int wait)
+{
+ struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
+ unsigned int i;
+ int delay;
+ u32 status;
+ int err;
+
+ status = ioread32(&devcmd->status);
+ if (status == 0xFFFFFFFF) {
+ /* PCI-e target device is gone */
+ return -ENODEV;
+ }
+ if (status & STAT_BUSY) {
+ pr_err("Busy devcmd %d\n", _CMD_N(cmd));
+ return -EBUSY;
+ }
+
+ if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
+ for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+ writeq(vdev->args[i], &devcmd->args[i]);
+ wmb();
+ }
+
+ iowrite32(cmd, &devcmd->cmd);
+
+ if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
+ return 0;
+
+ for (delay = 0; delay < wait; delay++) {
+
+ udelay(100);
+
+ status = ioread32(&devcmd->status);
+ if (status == 0xFFFFFFFF) {
+ /* PCI-e target device is gone */
+ return -ENODEV;
+ }
+
+ if (!(status & STAT_BUSY)) {
+
+ if (status & STAT_ERROR) {
+ err = (int)readq(&devcmd->args[0]);
+ if (err != ERR_ECMDUNKNOWN ||
+ cmd != CMD_CAPABILITY)
+ pr_err("Error %d devcmd %d\n",
+ err, _CMD_N(cmd));
+ return err;
+ }
+
+ if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
+ rmb();
+ for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+ vdev->args[i] = readq(&devcmd->args[i]);
+ }
+
+ return 0;
+ }
+ }
+
+ pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
+ return -ETIMEDOUT;
+}
+
+static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
+ enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
+ u64 *a0, u64 *a1, int wait)
+{
+ u32 status;
+ int err;
+
+ memset(vdev->args, 0, sizeof(vdev->args));
+
+ vdev->args[0] = vdev->proxy_index;
+ vdev->args[1] = cmd;
+ vdev->args[2] = *a0;
+ vdev->args[3] = *a1;
+
+ err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
+ if (err)
+ return err;
+
+ status = (u32)vdev->args[0];
+ if (status & STAT_ERROR) {
+ err = (int)vdev->args[1];
+ if (err != ERR_ECMDUNKNOWN ||
+ cmd != CMD_CAPABILITY)
+ pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
+ return err;
+ }
+
+ *a0 = vdev->args[1];
+ *a1 = vdev->args[2];
+
+ return 0;
+}
+
+static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
+ enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
+{
+ int err;
+
+ vdev->args[0] = *a0;
+ vdev->args[1] = *a1;
+
+ err = _vnic_dev_cmd(vdev, cmd, wait);
+
+ *a0 = vdev->args[0];
+ *a1 = vdev->args[1];
+
+ return err;
+}
+
+void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index)
+{
+ vdev->proxy = PROXY_BY_INDEX;
+ vdev->proxy_index = index;
+}
+
+void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev)
+{
+ vdev->proxy = PROXY_NONE;
+ vdev->proxy_index = 0;
+}
+
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *a0, u64 *a1, int wait)
+{
+ memset(vdev->args, 0, sizeof(vdev->args));
+
+ switch (vdev->proxy) {
+ case PROXY_BY_INDEX:
+ return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
+ a0, a1, wait);
+ case PROXY_BY_BDF:
+ return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
+ a0, a1, wait);
+ case PROXY_NONE:
+ default:
+ return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
+ }
+}
+
+static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
+{
+ u64 a0 = (u32)cmd, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
+
+ return !(err || a0);
+}
+
+int vnic_dev_fw_info(struct vnic_dev *vdev,
+ struct vnic_devcmd_fw_info **fw_info)
+{
+ u64 a0, a1 = 0;
+ int wait = 1000;
+ int err = 0;
+
+ if (!vdev->fw_info) {
+ vdev->fw_info = pci_alloc_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_fw_info),
+ &vdev->fw_info_pa);
+ if (!vdev->fw_info)
+ return -ENOMEM;
+
+ memset(vdev->fw_info, 0, sizeof(struct vnic_devcmd_fw_info));
+
+ a0 = vdev->fw_info_pa;
+ a1 = sizeof(struct vnic_devcmd_fw_info);
+
+ /* only get fw_info once and cache it */
+ err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
+ if (err == ERR_ECMDUNKNOWN) {
+ err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD,
+ &a0, &a1, wait);
+ }
+ }
+
+ *fw_info = vdev->fw_info;
+
+ return err;
+}
+
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
+ void *value)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ int err;
+
+ a0 = offset;
+ a1 = size;
+
+ err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
+
+ switch (size) {
+ case 1: *(u8 *)value = (u8)a0; break;
+ case 2: *(u16 *)value = (u16)a0; break;
+ case 4: *(u32 *)value = (u32)a0; break;
+ case 8: *(u64 *)value = a0; break;
+ default: BUG(); break;
+ }
+
+ return err;
+}
+
+int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
+{
+ u64 a0, a1;
+ int wait = 1000;
+
+ if (!vdev->stats) {
+ vdev->stats = pci_alloc_consistent(vdev->pdev,
+ sizeof(struct vnic_stats), &vdev->stats_pa);
+ if (!vdev->stats)
+ return -ENOMEM;
+ }
+
+ *stats = vdev->stats;
+ a0 = vdev->stats_pa;
+ a1 = sizeof(struct vnic_stats);
+
+ return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
+}
+
+int vnic_dev_close(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
+}
+
+int vnic_dev_enable_wait(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
+ if (err == ERR_ECMDUNKNOWN)
+ return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
+
+ return err;
+}
+
+int vnic_dev_disable(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
+}
+
+int vnic_dev_open(struct vnic_dev *vdev, int arg)
+{
+ u64 a0 = (u32)arg, a1 = 0;
+ int wait = 1000;
+ return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
+}
+
+int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ *done = 0;
+
+ err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
+ if (err)
+ return err;
+
+ *done = (a0 == 0);
+
+ return 0;
+}
+
+static int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
+{
+ u64 a0 = (u32)arg, a1 = 0;
+ int wait = 1000;
+ return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
+}
+
+static int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ *done = 0;
+
+ err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
+ if (err)
+ return err;
+
+ *done = (a0 == 0);
+
+ return 0;
+}
+
+int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg)
+{
+ u64 a0 = (u32)arg, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(vdev, CMD_HANG_RESET, &a0, &a1, wait);
+ if (err == ERR_ECMDUNKNOWN) {
+ err = vnic_dev_soft_reset(vdev, arg);
+ if (err)
+ return err;
+
+ return vnic_dev_init(vdev, 0);
+ }
+
+ return err;
+}
+
+int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ *done = 0;
+
+ err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS, &a0, &a1, wait);
+ if (err) {
+ if (err == ERR_ECMDUNKNOWN)
+ return vnic_dev_soft_reset_done(vdev, done);
+ return err;
+ }
+
+ *done = (a0 == 0);
+
+ return 0;
+}
+
+int vnic_dev_hang_notify(struct vnic_dev *vdev)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
+}
+
+int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ int err, i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = 0;
+
+ err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
+ if (err)
+ return err;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = ((u8 *)&a0)[i];
+
+ return 0;
+}
+
+int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+ int broadcast, int promisc, int allmulti)
+{
+ u64 a0, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
+ (multicast ? CMD_PFILTER_MULTICAST : 0) |
+ (broadcast ? CMD_PFILTER_BROADCAST : 0) |
+ (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
+ (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
+
+ err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
+ if (err)
+ pr_err("Can't set packet filter\n");
+
+ return err;
+}
+
+int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ ((u8 *)&a0)[i] = addr[i];
+
+ err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+ if (err)
+ pr_err("Can't add addr [%pM], %d\n", addr, err);
+
+ return err;
+}
+
+int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int err;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ ((u8 *)&a0)[i] = addr[i];
+
+ err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
+ if (err)
+ pr_err("Can't del addr [%pM], %d\n", addr, err);
+
+ return err;
+}
+
+int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
+ u8 ig_vlan_rewrite_mode)
+{
+ u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, &a0, &a1, wait);
+ if (err == ERR_ECMDUNKNOWN)
+ return 0;
+
+ return err;
+}
+
+static int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
+ void *notify_addr, dma_addr_t notify_pa, u16 intr)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ int r;
+
+ memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
+ vdev->notify = notify_addr;
+ vdev->notify_pa = notify_pa;
+
+ a0 = (u64)notify_pa;
+ a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
+ a1 += sizeof(struct vnic_devcmd_notify);
+
+ r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+ vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
+ return r;
+}
+
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
+{
+ void *notify_addr;
+ dma_addr_t notify_pa;
+
+ if (vdev->notify || vdev->notify_pa) {
+ pr_err("notify block %p still allocated", vdev->notify);
+ return -EINVAL;
+ }
+
+ notify_addr = pci_alloc_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_notify),
+ &notify_pa);
+ if (!notify_addr)
+ return -ENOMEM;
+
+ return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
+}
+
+static int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
+{
+ u64 a0, a1;
+ int wait = 1000;
+ int err;
+
+ a0 = 0; /* paddr = 0 to unset notify buffer */
+ a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
+ a1 += sizeof(struct vnic_devcmd_notify);
+
+ err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+ vdev->notify = NULL;
+ vdev->notify_pa = 0;
+ vdev->notify_sz = 0;
+
+ return err;
+}
+
+int vnic_dev_notify_unset(struct vnic_dev *vdev)
+{
+ if (vdev->notify) {
+ pci_free_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_notify),
+ vdev->notify,
+ vdev->notify_pa);
+ }
+
+ return vnic_dev_notify_unsetcmd(vdev);
+}
+
+static int vnic_dev_notify_ready(struct vnic_dev *vdev)
+{
+ u32 *words;
+ unsigned int nwords = vdev->notify_sz / 4;
+ unsigned int i;
+ u32 csum;
+
+ if (!vdev->notify || !vdev->notify_sz)
+ return 0;
+
+ do {
+ csum = 0;
+ memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
+ words = (u32 *)&vdev->notify_copy;
+ for (i = 1; i < nwords; i++)
+ csum += words[i];
+ } while (csum != words[0]);
+
+ return 1;
+}
+
+int vnic_dev_init(struct vnic_dev *vdev, int arg)
+{
+ u64 a0 = (u32)arg, a1 = 0;
+ int wait = 1000;
+ int r = 0;
+
+ if (vnic_dev_capable(vdev, CMD_INIT))
+ r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
+ else {
+ vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
+ if (a0 & CMD_INITF_DEFAULT_MAC) {
+ /* Emulate these for old CMD_INIT_v1 which
+ * didn't pass a0 so no CMD_INITF_*.
+ */
+ vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
+ vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+ }
+ }
+ return r;
+}
+
+int vnic_dev_deinit(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait);
+}
+
+void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
+{
+ /* Default: hardware intr coal timer is in units of 1.5 usecs */
+ vdev->intr_coal_timer_info.mul = 2;
+ vdev->intr_coal_timer_info.div = 3;
+ vdev->intr_coal_timer_info.max_usec =
+ vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
+}
+
+int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
+{
+ int wait = 1000;
+ int err;
+
+ memset(vdev->args, 0, sizeof(vdev->args));
+
+ err = _vnic_dev_cmd(vdev, CMD_INTR_COAL_CONVERT, wait);
+
+ /* Use defaults when firmware doesn't support the devcmd at all or
+ * supports it for only specific hardware
+ */
+ if ((err == ERR_ECMDUNKNOWN) ||
+ (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) {
+ pr_warning("Using default conversion factor for "
+ "interrupt coalesce timer\n");
+ vnic_dev_intr_coal_timer_info_default(vdev);
+ return 0;
+ }
+
+ vdev->intr_coal_timer_info.mul = (u32) vdev->args[0];
+ vdev->intr_coal_timer_info.div = (u32) vdev->args[1];
+ vdev->intr_coal_timer_info.max_usec = (u32) vdev->args[2];
+
+ return err;
+}
+
+int vnic_dev_link_status(struct vnic_dev *vdev)
+{
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.link_state;
+}
+
+u32 vnic_dev_port_speed(struct vnic_dev *vdev)
+{
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.port_speed;
+}
+
+u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
+{
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.msglvl;
+}
+
+u32 vnic_dev_mtu(struct vnic_dev *vdev)
+{
+ if (!vnic_dev_notify_ready(vdev))
+ return 0;
+
+ return vdev->notify_copy.mtu;
+}
+
+void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
+ enum vnic_dev_intr_mode intr_mode)
+{
+ vdev->intr_mode = intr_mode;
+}
+
+enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
+ struct vnic_dev *vdev)
+{
+ return vdev->intr_mode;
+}
+
+u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
+{
+ return (usec * vdev->intr_coal_timer_info.mul) /
+ vdev->intr_coal_timer_info.div;
+}
+
+u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
+{
+ return (hw_cycles * vdev->intr_coal_timer_info.div) /
+ vdev->intr_coal_timer_info.mul;
+}
+
+u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
+{
+ return vdev->intr_coal_timer_info.max_usec;
+}
+
+void vnic_dev_unregister(struct vnic_dev *vdev)
+{
+ if (vdev) {
+ if (vdev->notify)
+ pci_free_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_notify),
+ vdev->notify,
+ vdev->notify_pa);
+ if (vdev->stats)
+ pci_free_consistent(vdev->pdev,
+ sizeof(struct vnic_stats),
+ vdev->stats, vdev->stats_pa);
+ if (vdev->fw_info)
+ pci_free_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_fw_info),
+ vdev->fw_info, vdev->fw_info_pa);
+ kfree(vdev);
+ }
+}
+
+struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
+ void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
+ unsigned int num_bars)
+{
+ if (!vdev) {
+ vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
+ if (!vdev)
+ return NULL;
+ }
+
+ vdev->priv = priv;
+ vdev->pdev = pdev;
+
+ if (vnic_dev_discover_res(vdev, bar, num_bars))
+ goto err_out;
+
+ vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
+ if (!vdev->devcmd)
+ goto err_out;
+
+ return vdev;
+
+err_out:
+ vnic_dev_unregister(vdev);
+ return NULL;
+}
+
+int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
+{
+ u64 a0, a1 = len;
+ int wait = 1000;
+ dma_addr_t prov_pa;
+ void *prov_buf;
+ int ret;
+
+ prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
+ if (!prov_buf)
+ return -ENOMEM;
+
+ memcpy(prov_buf, buf, len);
+
+ a0 = prov_pa;
+
+ ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO2, &a0, &a1, wait);
+
+ pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
+
+ return ret;
+}
+
+int vnic_dev_enable2(struct vnic_dev *vdev, int active)
+{
+ u64 a0, a1 = 0;
+ int wait = 1000;
+
+ a0 = (active ? CMD_ENABLE2_ACTIVE : 0);
+
+ return vnic_dev_cmd(vdev, CMD_ENABLE2, &a0, &a1, wait);
+}
+
+static int vnic_dev_cmd_status(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ int *status)
+{
+ u64 a0 = cmd, a1 = 0;
+ int wait = 1000;
+ int ret;
+
+ ret = vnic_dev_cmd(vdev, CMD_STATUS, &a0, &a1, wait);
+ if (!ret)
+ *status = (int)a0;
+
+ return ret;
+}
+
+int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status)
+{
+ return vnic_dev_cmd_status(vdev, CMD_ENABLE2, status);
+}
+
+int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status)
+{
+ return vnic_dev_cmd_status(vdev, CMD_DEINIT, status);
+}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
new file mode 100644
index 000000000000..6a138b625d13
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_DEV_H_
+#define _VNIC_DEV_H_
+
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+
+#ifndef VNIC_PADDR_TARGET
+#define VNIC_PADDR_TARGET 0x0000000000000000ULL
+#endif
+
+#ifndef readq
+static inline u64 readq(void __iomem *reg)
+{
+ return (((u64)readl(reg + 0x4UL) << 32) |
+ (u64)readl(reg));
+}
+
+static inline void writeq(u64 val, void __iomem *reg)
+{
+ writel(val & 0xffffffff, reg);
+ writel(val >> 32, reg + 0x4UL);
+}
+#endif
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+enum vnic_dev_intr_mode {
+ VNIC_DEV_INTR_MODE_UNKNOWN,
+ VNIC_DEV_INTR_MODE_INTX,
+ VNIC_DEV_INTR_MODE_MSI,
+ VNIC_DEV_INTR_MODE_MSIX,
+};
+
+struct vnic_dev_bar {
+ void __iomem *vaddr;
+ dma_addr_t bus_addr;
+ unsigned long len;
+};
+
+struct vnic_dev_ring {
+ void *descs;
+ size_t size;
+ dma_addr_t base_addr;
+ size_t base_align;
+ void *descs_unaligned;
+ size_t size_unaligned;
+ dma_addr_t base_addr_unaligned;
+ unsigned int desc_size;
+ unsigned int desc_count;
+ unsigned int desc_avail;
+};
+
+struct vnic_dev;
+struct vnic_stats;
+
+void *vnic_dev_priv(struct vnic_dev *vdev);
+unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
+ enum vnic_res_type type);
+void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+ unsigned int index);
+void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
+int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
+ struct vnic_dev_ring *ring);
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ u64 *a0, u64 *a1, int wait);
+void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index);
+void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev);
+int vnic_dev_fw_info(struct vnic_dev *vdev,
+ struct vnic_devcmd_fw_info **fw_info);
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
+ void *value);
+int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
+int vnic_dev_hang_notify(struct vnic_dev *vdev);
+int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+ int broadcast, int promisc, int allmulti);
+int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
+int vnic_dev_notify_unset(struct vnic_dev *vdev);
+int vnic_dev_link_status(struct vnic_dev *vdev);
+u32 vnic_dev_port_speed(struct vnic_dev *vdev);
+u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
+u32 vnic_dev_mtu(struct vnic_dev *vdev);
+int vnic_dev_close(struct vnic_dev *vdev);
+int vnic_dev_enable_wait(struct vnic_dev *vdev);
+int vnic_dev_disable(struct vnic_dev *vdev);
+int vnic_dev_open(struct vnic_dev *vdev, int arg);
+int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
+int vnic_dev_init(struct vnic_dev *vdev, int arg);
+int vnic_dev_deinit(struct vnic_dev *vdev);
+void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev);
+int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev);
+int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg);
+int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done);
+void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
+ enum vnic_dev_intr_mode intr_mode);
+enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
+u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec);
+u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles);
+u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev);
+void vnic_dev_unregister(struct vnic_dev *vdev);
+int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
+ u8 ig_vlan_rewrite_mode);
+struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
+ void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
+ unsigned int num_bars);
+int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len);
+int vnic_dev_enable2(struct vnic_dev *vdev, int active);
+int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
+int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
+
+#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
new file mode 100644
index 000000000000..8025e8808d61
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -0,0 +1,450 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_DEVCMD_H_
+#define _VNIC_DEVCMD_H_
+
+#define _CMD_NBITS 14
+#define _CMD_VTYPEBITS 10
+#define _CMD_FLAGSBITS 6
+#define _CMD_DIRBITS 2
+
+#define _CMD_NMASK ((1 << _CMD_NBITS)-1)
+#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1)
+#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1)
+#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1)
+
+#define _CMD_NSHIFT 0
+#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
+#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
+#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
+
+/*
+ * Direction bits (from host perspective).
+ */
+#define _CMD_DIR_NONE 0U
+#define _CMD_DIR_WRITE 1U
+#define _CMD_DIR_READ 2U
+#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ)
+
+/*
+ * Flag bits.
+ */
+#define _CMD_FLAGS_NONE 0U
+#define _CMD_FLAGS_NOWAIT 1U
+
+/*
+ * vNIC type bits.
+ */
+#define _CMD_VTYPE_NONE 0U
+#define _CMD_VTYPE_ENET 1U
+#define _CMD_VTYPE_FC 2U
+#define _CMD_VTYPE_SCSI 4U
+#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
+
+/*
+ * Used to create cmds..
+*/
+#define _CMDCF(dir, flags, vtype, nr) \
+ (((dir) << _CMD_DIRSHIFT) | \
+ ((flags) << _CMD_FLAGSSHIFT) | \
+ ((vtype) << _CMD_VTYPESHIFT) | \
+ ((nr) << _CMD_NSHIFT))
+#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr)
+#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
+
+/*
+ * Used to decode cmds..
+*/
+#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
+#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
+#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
+#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
+
+enum vnic_devcmd_cmd {
+ CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
+
+ /*
+ * mcpu fw info in mem:
+ * in:
+ * (u64)a0=paddr to struct vnic_devcmd_fw_info
+ * action:
+ * Fills in struct vnic_devcmd_fw_info (128 bytes)
+ * note:
+ * An old definition of CMD_MCPU_FW_INFO
+ */
+ CMD_MCPU_FW_INFO_OLD = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
+
+ /*
+ * mcpu fw info in mem:
+ * in:
+ * (u64)a0=paddr to struct vnic_devcmd_fw_info
+ * (u16)a1=size of the structure
+ * out:
+ * (u16)a1=0 for in:a1 = 0,
+ * data size actually written for other values.
+ * action:
+ * Fills in first 128 bytes of vnic_devcmd_fw_info for in:a1 = 0,
+ * first in:a1 bytes for 0 < in:a1 <= 132,
+ * 132 bytes for other values of in:a1.
+ * note:
+ * CMD_MCPU_FW_INFO and CMD_MCPU_FW_INFO_OLD have the same enum 1
+ * for source compatibility.
+ */
+ CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 1),
+
+ /* dev-specific block member:
+ * in: (u16)a0=offset,(u8)a1=size
+ * out: a0=value */
+ CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
+
+ /* stats clear */
+ CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
+
+ /* stats dump in mem: (u64)a0=paddr to stats area,
+ * (u16)a1=sizeof stats area */
+ CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
+
+ /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
+ CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
+
+ /* set Rx packet filter for all: (u32)a0=filters (see CMD_PFILTER_*) */
+ CMD_PACKET_FILTER_ALL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 7),
+
+ /* hang detection notification */
+ CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
+
+ /* MAC address in (u48)a0 */
+ CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ,
+ _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
+
+ /* add addr from (u48)a0 */
+ CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE,
+ _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12),
+
+ /* del addr from (u48)a0 */
+ CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE,
+ _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13),
+
+ /* add VLAN id in (u16)a0 */
+ CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14),
+
+ /* del VLAN id in (u16)a0 */
+ CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
+
+ /* nic_cfg in (u32)a0 */
+ CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
+
+ /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
+ CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
+
+ /* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */
+ CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18),
+
+ /* initiate softreset */
+ CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19),
+
+ /* softreset status:
+ * out: a0=0 reset complete, a0=1 reset in progress */
+ CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20),
+
+ /* set struct vnic_devcmd_notify buffer in mem:
+ * in:
+ * (u64)a0=paddr to notify (set paddr=0 to unset)
+ * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
+ * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
+ * out:
+ * (u32)a1 = effective size
+ */
+ CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
+
+ /* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct,
+ * (u8)a1=PXENV_UNDI_xxx */
+ CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22),
+
+ /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
+ CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
+
+ /* open status:
+ * out: a0=0 open complete, a0=1 open in progress */
+ CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
+
+ /* close vnic */
+ CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
+
+ /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
+/***** Replaced by CMD_INIT *****/
+ CMD_INIT_v1 = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
+
+ /* variant of CMD_INIT, with provisioning info
+ * (u64)a0=paddr of vnic_devcmd_provinfo
+ * (u32)a1=sizeof provision info */
+ CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
+
+ /* enable virtual link */
+ CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
+
+ /* enable virtual link, waiting variant. */
+ CMD_ENABLE_WAIT = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
+
+ /* disable virtual link */
+ CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
+
+ /* stats dump sum of all vnic stats on same uplink in mem:
+ * (u64)a0=paddr
+ * (u16)a1=sizeof stats area */
+ CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
+
+ /* init status:
+ * out: a0=0 init complete, a0=1 init in progress
+ * if a0=0, a1=errno */
+ CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
+
+ /* INT13 API: (u64)a0=paddr to vnic_int13_params struct
+ * (u32)a1=INT13_CMD_xxx */
+ CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32),
+
+ /* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */
+ CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33),
+
+ /* undo initialize of virtual link */
+ CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
+
+ /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
+ CMD_INIT = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 35),
+
+ /* check fw capability of a cmd:
+ * in: (u32)a0=cmd
+ * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */
+ CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36),
+
+ /* persistent binding info
+ * in: (u64)a0=paddr of arg
+ * (u32)a1=CMD_PERBI_XXX */
+ CMD_PERBI = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37),
+
+ /* Interrupt Assert Register functionality
+ * in: (u16)a0=interrupt number to assert
+ */
+ CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38),
+
+ /* initiate hangreset, like softreset after hang detected */
+ CMD_HANG_RESET = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39),
+
+ /* hangreset status:
+ * out: a0=0 reset complete, a0=1 reset in progress */
+ CMD_HANG_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40),
+
+ /*
+ * Set hw ingress packet vlan rewrite mode:
+ * in: (u32)a0=new vlan rewrite mode
+ * out: (u32)a0=old vlan rewrite mode */
+ CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41),
+
+ /*
+ * in: (u16)a0=bdf of target vnic
+ * (u32)a1=cmd to proxy
+ * a2-a15=args to cmd in a1
+ * out: (u32)a0=status of proxied cmd
+ * a1-a15=out args of proxied cmd */
+ CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
+
+ /*
+ * As for BY_BDF except a0 is index of hvnlink subordinate vnic
+ * or SR-IOV virtual vnic
+ */
+ CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43),
+
+ /*
+ * For HPP toggle:
+ * adapter-info-get
+ * in: (u64)a0=phsical address of buffer passed in from caller.
+ * (u16)a1=size of buffer specified in a0.
+ * out: (u64)a0=phsical address of buffer passed in from caller.
+ * (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or
+ * 0 if no VIF-CONFIG-INFO TLV was ever received. */
+ CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
+
+ /* init_prov_info2:
+ * Variant of CMD_INIT_PROV_INFO, where it will not try to enable
+ * the vnic until CMD_ENABLE2 is issued.
+ * (u64)a0=paddr of vnic_devcmd_provinfo
+ * (u32)a1=sizeof provision info */
+ CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47),
+
+ /* enable2:
+ * (u32)a0=0 ==> standby
+ * =CMD_ENABLE2_ACTIVE ==> active
+ */
+ CMD_ENABLE2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 48),
+
+ /*
+ * cmd_status:
+ * Returns the status of the specified command
+ * Input:
+ * a0 = command for which status is being queried.
+ * Possible values are:
+ * CMD_SOFT_RESET
+ * CMD_HANG_RESET
+ * CMD_OPEN
+ * CMD_INIT
+ * CMD_INIT_PROV_INFO
+ * CMD_DEINIT
+ * CMD_INIT_PROV_INFO2
+ * CMD_ENABLE2
+ * Output:
+ * if status == STAT_ERROR
+ * a0 = ERR_ENOTSUPPORTED - status for command in a0 is
+ * not supported
+ * if status == STAT_NONE
+ * a0 = status of the devcmd specified in a0 as follows.
+ * ERR_SUCCESS - command in a0 completed successfully
+ * ERR_EINPROGRESS - command in a0 is still in progress
+ */
+ CMD_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 49),
+
+ /*
+ * Returns interrupt coalescing timer conversion factors.
+ * After calling this devcmd, ENIC driver can convert
+ * interrupt coalescing timer in usec into CPU cycles as follows:
+ *
+ * intr_timer_cycles = intr_timer_usec * multiplier / divisor
+ *
+ * Interrupt coalescing timer in usecs can be obtained from
+ * CPU cycles as follows:
+ *
+ * intr_timer_usec = intr_timer_cycles * divisor / multiplier
+ *
+ * in: none
+ * out: (u32)a0 = multiplier
+ * (u32)a1 = divisor
+ * (u32)a2 = maximum timer value in usec
+ */
+ CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50),
+};
+
+/* CMD_ENABLE2 flags */
+#define CMD_ENABLE2_ACTIVE 0x1
+
+/* flags for CMD_OPEN */
+#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
+
+/* flags for CMD_INIT */
+#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
+
+/* flags for CMD_PACKET_FILTER */
+#define CMD_PFILTER_DIRECTED 0x01
+#define CMD_PFILTER_MULTICAST 0x02
+#define CMD_PFILTER_BROADCAST 0x04
+#define CMD_PFILTER_PROMISCUOUS 0x08
+#define CMD_PFILTER_ALL_MULTICAST 0x10
+
+/* rewrite modes for CMD_IG_VLAN_REWRITE_MODE */
+#define IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK 0
+#define IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN 1
+#define IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN 2
+#define IG_VLAN_REWRITE_MODE_PASS_THRU 3
+
+enum vnic_devcmd_status {
+ STAT_NONE = 0,
+ STAT_BUSY = 1 << 0, /* cmd in progress */
+ STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
+};
+
+enum vnic_devcmd_error {
+ ERR_SUCCESS = 0,
+ ERR_EINVAL = 1,
+ ERR_EFAULT = 2,
+ ERR_EPERM = 3,
+ ERR_EBUSY = 4,
+ ERR_ECMDUNKNOWN = 5,
+ ERR_EBADSTATE = 6,
+ ERR_ENOMEM = 7,
+ ERR_ETIMEDOUT = 8,
+ ERR_ELINKDOWN = 9,
+ ERR_EMAXRES = 10,
+ ERR_ENOTSUPPORTED = 11,
+ ERR_EINPROGRESS = 12,
+};
+
+/*
+ * note: hw_version and asic_rev refer to the same thing,
+ * but have different formats. hw_version is
+ * a 32-byte string (e.g. "A2") and asic_rev is
+ * a 16-bit integer (e.g. 0xA2).
+ */
+struct vnic_devcmd_fw_info {
+ char fw_version[32];
+ char fw_build[32];
+ char hw_version[32];
+ char hw_serial_number[32];
+ u16 asic_type;
+ u16 asic_rev;
+};
+
+struct vnic_devcmd_notify {
+ u32 csum; /* checksum over following words */
+
+ u32 link_state; /* link up == 1 */
+ u32 port_speed; /* effective port speed (rate limit) */
+ u32 mtu; /* MTU */
+ u32 msglvl; /* requested driver msg lvl */
+ u32 uif; /* uplink interface */
+ u32 status; /* status bits (see VNIC_STF_*) */
+ u32 error; /* error code (see ERR_*) for first ERR */
+ u32 link_down_cnt; /* running count of link down transitions */
+ u32 perbi_rebuild_cnt; /* running count of perbi rebuilds */
+};
+#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
+#define VNIC_STF_STD_PAUSE 0x0002 /* standard link-level pause on */
+#define VNIC_STF_PFC_PAUSE 0x0004 /* priority flow control pause on */
+/* all supported status flags */
+#define VNIC_STF_ALL (VNIC_STF_FATAL_ERR |\
+ VNIC_STF_STD_PAUSE |\
+ VNIC_STF_PFC_PAUSE |\
+ 0)
+
+struct vnic_devcmd_provinfo {
+ u8 oui[3];
+ u8 type;
+ u8 data[0];
+};
+
+/*
+ * Writing cmd register causes STAT_BUSY to get set in status register.
+ * When cmd completes, STAT_BUSY will be cleared.
+ *
+ * If cmd completed successfully STAT_ERROR will be clear
+ * and args registers contain cmd-specific results.
+ *
+ * If cmd error, STAT_ERROR will be set and args[0] contains error code.
+ *
+ * status register is read-only. While STAT_BUSY is set,
+ * all other register contents are read-only.
+ */
+
+/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
+#define VNIC_DEVCMD_NARGS 15
+struct vnic_devcmd {
+ u32 status; /* RO */
+ u32 cmd; /* RW */
+ u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
+};
+
+#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_enet.h b/drivers/net/ethernet/cisco/enic/vnic_enet.h
new file mode 100644
index 000000000000..609542848e02
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/vnic_enet.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_ENIC_H_
+#define _VNIC_ENIC_H_
+
+/* Device-specific region: enet configuration */
+struct vnic_enet_config {
+ u32 flags;
+ u32 wq_desc_count;
+ u32 rq_desc_count;
+ u16 mtu;
+ u16 intr_timer_deprecated;
+ u8 intr_timer_type;
+ u8 intr_mode;
+ char devname[16];
+ u32 intr_timer_usec;
+ u16 loop_tag;
+};
+
+#define VENETF_TSO 0x1 /* TSO enabled */
+#define VENETF_LRO 0x2 /* LRO enabled */
+#define VENETF_RXCSUM 0x4 /* RX csum enabled */
+#define VENETF_TXCSUM 0x8 /* TX csum enabled */
+#define VENETF_RSS 0x10 /* RSS enabled */
+#define VENETF_RSSHASH_IPV4 0x20 /* Hash on IPv4 fields */
+#define VENETF_RSSHASH_TCPIPV4 0x40 /* Hash on TCP + IPv4 fields */
+#define VENETF_RSSHASH_IPV6 0x80 /* Hash on IPv6 fields */
+#define VENETF_RSSHASH_TCPIPV6 0x100 /* Hash on TCP + IPv6 fields */
+#define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */
+#define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */
+#define VENETF_LOOP 0x800 /* Loopback enabled */
+
+#define VENET_INTR_TYPE_MIN 0 /* Timer specs min interrupt spacing */
+#define VENET_INTR_TYPE_IDLE 1 /* Timer specs idle time before irq */
+
+#define VENET_INTR_MODE_ANY 0 /* Try MSI-X, then MSI, then INTx */
+#define VENET_INTR_MODE_MSI 1 /* Try MSI then INTx */
+#define VENET_INTR_MODE_INTX 2 /* Try INTx only */
+
+#endif /* _VNIC_ENIC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_intr.c b/drivers/net/ethernet/cisco/enic/vnic_intr.c
new file mode 100644
index 000000000000..0ca107f7bc8c
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/vnic_intr.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+
+void vnic_intr_free(struct vnic_intr *intr)
+{
+ intr->ctrl = NULL;
+}
+
+int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
+ unsigned int index)
+{
+ intr->index = index;
+ intr->vdev = vdev;
+
+ intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
+ if (!intr->ctrl) {
+ pr_err("Failed to hook INTR[%d].ctrl resource\n", index);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void vnic_intr_init(struct vnic_intr *intr, u32 coalescing_timer,
+ unsigned int coalescing_type, unsigned int mask_on_assertion)
+{
+ vnic_intr_coalescing_timer_set(intr, coalescing_timer);
+ iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
+ iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
+ iowrite32(0, &intr->ctrl->int_credits);
+}
+
+void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
+ u32 coalescing_timer)
+{
+ iowrite32(vnic_dev_intr_coal_timer_usec_to_hw(intr->vdev,
+ coalescing_timer), &intr->ctrl->coalescing_timer);
+}
+
+void vnic_intr_clean(struct vnic_intr *intr)
+{
+ iowrite32(0, &intr->ctrl->int_credits);
+}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_intr.h b/drivers/net/ethernet/cisco/enic/vnic_intr.h
new file mode 100644
index 000000000000..2b1636392294
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/vnic_intr.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_INTR_H_
+#define _VNIC_INTR_H_
+
+#include <linux/pci.h>
+
+#include "vnic_dev.h"
+
+#define VNIC_INTR_TIMER_TYPE_ABS 0
+#define VNIC_INTR_TIMER_TYPE_QUIET 1
+
+/* Interrupt control */
+struct vnic_intr_ctrl {
+ u32 coalescing_timer; /* 0x00 */
+ u32 pad0;
+ u32 coalescing_value; /* 0x08 */
+ u32 pad1;
+ u32 coalescing_type; /* 0x10 */
+ u32 pad2;
+ u32 mask_on_assertion; /* 0x18 */
+ u32 pad3;
+ u32 mask; /* 0x20 */
+ u32 pad4;
+ u32 int_credits; /* 0x28 */
+ u32 pad5;
+ u32 int_credit_return; /* 0x30 */
+ u32 pad6;
+};
+
+struct vnic_intr {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */
+};
+
+static inline void vnic_intr_unmask(struct vnic_intr *intr)
+{
+ iowrite32(0, &intr->ctrl->mask);
+}
+
+static inline void vnic_intr_mask(struct vnic_intr *intr)
+{
+ iowrite32(1, &intr->ctrl->mask);
+}
+
+static inline int vnic_intr_masked(struct vnic_intr *intr)
+{
+ return ioread32(&intr->ctrl->mask);
+}
+
+static inline void vnic_intr_return_credits(struct vnic_intr *intr,
+ unsigned int credits, int unmask, int reset_timer)
+{
+#define VNIC_INTR_UNMASK_SHIFT 16
+#define VNIC_INTR_RESET_TIMER_SHIFT 17
+
+ u32 int_credit_return = (credits & 0xffff) |
+ (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
+ (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
+
+ iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
+}
+
+static inline unsigned int vnic_intr_credits(struct vnic_intr *intr)
+{
+ return ioread32(&intr->ctrl->int_credits);
+}
+
+static inline void vnic_intr_return_all_credits(struct vnic_intr *intr)
+{
+ unsigned int credits = vnic_intr_credits(intr);
+ int unmask = 1;
+ int reset_timer = 1;
+
+ vnic_intr_return_credits(intr, credits, unmask, reset_timer);
+}
+
+static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
+{
+ /* read PBA without clearing */
+ return ioread32(legacy_pba);
+}
+
+void vnic_intr_free(struct vnic_intr *intr);
+int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
+ unsigned int index);
+void vnic_intr_init(struct vnic_intr *intr, u32 coalescing_timer,
+ unsigned int coalescing_type, unsigned int mask_on_assertion);
+void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
+ u32 coalescing_timer);
+void vnic_intr_clean(struct vnic_intr *intr);
+
+#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_nic.h b/drivers/net/ethernet/cisco/enic/vnic_nic.h
new file mode 100644
index 000000000000..995a50dd4c99
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/vnic_nic.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_NIC_H_
+#define _VNIC_NIC_H_
+
+#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL
+#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0
+#define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8)
+#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL
+#define NIC_CFG_RSS_HASH_TYPE_SHIFT 8
+#define NIC_CFG_RSS_HASH_BITS (7UL << 16)
+#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL
+#define NIC_CFG_RSS_HASH_BITS_SHIFT 16
+#define NIC_CFG_RSS_BASE_CPU (7UL << 19)
+#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL
+#define NIC_CFG_RSS_BASE_CPU_SHIFT 19
+#define NIC_CFG_RSS_ENABLE (1UL << 22)
+#define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL
+#define NIC_CFG_RSS_ENABLE_SHIFT 22
+#define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23)
+#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL
+#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23
+#define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24)
+#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
+#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
+
+#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2)
+#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4)
+#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6)
+
+static inline void vnic_set_nic_cfg(u32 *nic_cfg,
+ u8 rss_default_cpu, u8 rss_hash_type,
+ u8 rss_hash_bits, u8 rss_base_cpu,
+ u8 rss_enable, u8 tso_ipid_split_en,
+ u8 ig_vlan_strip_en)
+{
+ *nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) |
+ ((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD)
+ << NIC_CFG_RSS_HASH_TYPE_SHIFT) |
+ ((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD)
+ << NIC_CFG_RSS_HASH_BITS_SHIFT) |
+ ((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD)
+ << NIC_CFG_RSS_BASE_CPU_SHIFT) |
+ ((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD)
+ << NIC_CFG_RSS_ENABLE_SHIFT) |
+ ((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD)
+ << NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) |
+ ((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD)
+ << NIC_CFG_IG_VLAN_STRIP_EN_SHIFT);
+}
+
+#endif /* _VNIC_NIC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_resource.h b/drivers/net/ethernet/cisco/enic/vnic_resource.h
new file mode 100644
index 000000000000..e0a73f1ca6f4
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/vnic_resource.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_RESOURCE_H_
+#define _VNIC_RESOURCE_H_
+
+#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
+#define VNIC_RES_VERSION 0x00000000L
+#define MGMTVNIC_MAGIC 0x544d474dL /* 'MGMT' */
+#define MGMTVNIC_VERSION 0x00000000L
+
+/* The MAC address assigned to the CFG vNIC is fixed. */
+#define MGMTVNIC_MAC { 0x02, 0x00, 0x54, 0x4d, 0x47, 0x4d }
+
+/* vNIC resource types */
+enum vnic_res_type {
+ RES_TYPE_EOL, /* End-of-list */
+ RES_TYPE_WQ, /* Work queues */
+ RES_TYPE_RQ, /* Receive queues */
+ RES_TYPE_CQ, /* Completion queues */
+ RES_TYPE_RSVD1,
+ RES_TYPE_NIC_CFG, /* Enet NIC config registers */
+ RES_TYPE_RSVD2,
+ RES_TYPE_RSVD3,
+ RES_TYPE_RSVD4,
+ RES_TYPE_RSVD5,
+ RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */
+ RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */
+ RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */
+ RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */
+ RES_TYPE_RSVD6,
+ RES_TYPE_RSVD7,
+ RES_TYPE_DEVCMD, /* Device command region */
+ RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
+
+ RES_TYPE_MAX, /* Count of resource types */
+};
+
+struct vnic_resource_header {
+ u32 magic;
+ u32 version;
+};
+
+struct mgmt_barmap_hdr {
+ u32 magic; /* magic number */
+ u32 version; /* header format version */
+ u16 lif; /* loopback lif for mgmt frames */
+ u16 pci_slot; /* installed pci slot */
+ char serial[16]; /* card serial number */
+};
+
+struct vnic_resource {
+ u8 type;
+ u8 bar;
+ u8 pad[2];
+ u32 bar_offset;
+ u32 count;
+};
+
+#endif /* _VNIC_RESOURCE_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
new file mode 100644
index 000000000000..34105e0951a5
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "vnic_dev.h"
+#include "vnic_rq.h"
+
+static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
+{
+ struct vnic_rq_buf *buf;
+ struct vnic_dev *vdev;
+ unsigned int i, j, count = rq->ring.desc_count;
+ unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
+
+ vdev = rq->vdev;
+
+ for (i = 0; i < blks; i++) {
+ rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
+ if (!rq->bufs[i]) {
+ pr_err("Failed to alloc rq_bufs\n");
+ return -ENOMEM;
+ }
+ }
+
+ for (i = 0; i < blks; i++) {
+ buf = rq->bufs[i];
+ for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES(count); j++) {
+ buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES(count) + j;
+ buf->desc = (u8 *)rq->ring.descs +
+ rq->ring.desc_size * buf->index;
+ if (buf->index + 1 == count) {
+ buf->next = rq->bufs[0];
+ break;
+ } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES(count)) {
+ buf->next = rq->bufs[i + 1];
+ } else {
+ buf->next = buf + 1;
+ buf++;
+ }
+ }
+ }
+
+ rq->to_use = rq->to_clean = rq->bufs[0];
+
+ return 0;
+}
+
+void vnic_rq_free(struct vnic_rq *rq)
+{
+ struct vnic_dev *vdev;
+ unsigned int i;
+
+ vdev = rq->vdev;
+
+ vnic_dev_free_desc_ring(vdev, &rq->ring);
+
+ for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
+ if (rq->bufs[i]) {
+ kfree(rq->bufs[i]);
+ rq->bufs[i] = NULL;
+ }
+ }
+
+ rq->ctrl = NULL;
+}
+
+int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+
+ rq->index = index;
+ rq->vdev = vdev;
+
+ rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
+ if (!rq->ctrl) {
+ pr_err("Failed to hook RQ[%d] resource\n", index);
+ return -EINVAL;
+ }
+
+ vnic_rq_disable(rq);
+
+ err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
+ if (err)
+ return err;
+
+ err = vnic_rq_alloc_bufs(rq);
+ if (err) {
+ vnic_rq_free(rq);
+ return err;
+ }
+
+ return 0;
+}
+
+static void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ u64 paddr;
+ unsigned int count = rq->ring.desc_count;
+
+ paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
+ writeq(paddr, &rq->ctrl->ring_base);
+ iowrite32(count, &rq->ctrl->ring_size);
+ iowrite32(cq_index, &rq->ctrl->cq_index);
+ iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
+ iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
+ iowrite32(0, &rq->ctrl->dropped_packet_count);
+ iowrite32(0, &rq->ctrl->error_status);
+ iowrite32(fetch_index, &rq->ctrl->fetch_index);
+ iowrite32(posted_index, &rq->ctrl->posted_index);
+
+ rq->to_use = rq->to_clean =
+ &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
+ [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
+}
+
+void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ u32 fetch_index;
+
+ /* Use current fetch_index as the ring starting point */
+ fetch_index = ioread32(&rq->ctrl->fetch_index);
+
+ if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
+ /* Hardware surprise removal: reset fetch_index */
+ fetch_index = 0;
+ }
+
+ vnic_rq_init_start(rq, cq_index,
+ fetch_index, fetch_index,
+ error_interrupt_enable,
+ error_interrupt_offset);
+}
+
+unsigned int vnic_rq_error_status(struct vnic_rq *rq)
+{
+ return ioread32(&rq->ctrl->error_status);
+}
+
+void vnic_rq_enable(struct vnic_rq *rq)
+{
+ iowrite32(1, &rq->ctrl->enable);
+}
+
+int vnic_rq_disable(struct vnic_rq *rq)
+{
+ unsigned int wait;
+
+ iowrite32(0, &rq->ctrl->enable);
+
+ /* Wait for HW to ACK disable request */
+ for (wait = 0; wait < 1000; wait++) {
+ if (!(ioread32(&rq->ctrl->running)))
+ return 0;
+ udelay(10);
+ }
+
+ pr_err("Failed to disable RQ[%d]\n", rq->index);
+
+ return -ETIMEDOUT;
+}
+
+void vnic_rq_clean(struct vnic_rq *rq,
+ void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
+{
+ struct vnic_rq_buf *buf;
+ u32 fetch_index;
+ unsigned int count = rq->ring.desc_count;
+
+ buf = rq->to_clean;
+
+ while (vnic_rq_desc_used(rq) > 0) {
+
+ (*buf_clean)(rq, buf);
+
+ buf = rq->to_clean = buf->next;
+ rq->ring.desc_avail++;
+ }
+
+ /* Use current fetch_index as the ring starting point */
+ fetch_index = ioread32(&rq->ctrl->fetch_index);
+
+ if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
+ /* Hardware surprise removal: reset fetch_index */
+ fetch_index = 0;
+ }
+ rq->to_use = rq->to_clean =
+ &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
+ [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
+ iowrite32(fetch_index, &rq->ctrl->posted_index);
+
+ vnic_dev_clear_desc_ring(&rq->ring);
+}
+
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h
new file mode 100644
index 000000000000..2056586f4d4b
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_RQ_H_
+#define _VNIC_RQ_H_
+
+#include <linux/pci.h>
+
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+/* Receive queue control */
+struct vnic_rq_ctrl {
+ u64 ring_base; /* 0x00 */
+ u32 ring_size; /* 0x08 */
+ u32 pad0;
+ u32 posted_index; /* 0x10 */
+ u32 pad1;
+ u32 cq_index; /* 0x18 */
+ u32 pad2;
+ u32 enable; /* 0x20 */
+ u32 pad3;
+ u32 running; /* 0x28 */
+ u32 pad4;
+ u32 fetch_index; /* 0x30 */
+ u32 pad5;
+ u32 error_interrupt_enable; /* 0x38 */
+ u32 pad6;
+ u32 error_interrupt_offset; /* 0x40 */
+ u32 pad7;
+ u32 error_status; /* 0x48 */
+ u32 pad8;
+ u32 dropped_packet_count; /* 0x50 */
+ u32 pad9;
+ u32 dropped_packet_count_rc; /* 0x58 */
+ u32 pad10;
+};
+
+/* Break the vnic_rq_buf allocations into blocks of 32/64 entries */
+#define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32
+#define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64
+#define VNIC_RQ_BUF_BLK_ENTRIES(entries) \
+ ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \
+ VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES))
+#define VNIC_RQ_BUF_BLK_SZ(entries) \
+ (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
+#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
+ DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
+#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
+
+struct vnic_rq_buf {
+ struct vnic_rq_buf *next;
+ dma_addr_t dma_addr;
+ void *os_buf;
+ unsigned int os_buf_index;
+ unsigned int len;
+ unsigned int index;
+ void *desc;
+};
+
+struct vnic_rq {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
+ struct vnic_dev_ring ring;
+ struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
+ struct vnic_rq_buf *to_use;
+ struct vnic_rq_buf *to_clean;
+ void *os_buf_head;
+ unsigned int pkts_outstanding;
+};
+
+static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
+{
+ /* how many does SW own? */
+ return rq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
+{
+ /* how many does HW own? */
+ return rq->ring.desc_count - rq->ring.desc_avail - 1;
+}
+
+static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
+{
+ return rq->to_use->desc;
+}
+
+static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
+{
+ return rq->to_use->index;
+}
+
+static inline void vnic_rq_post(struct vnic_rq *rq,
+ void *os_buf, unsigned int os_buf_index,
+ dma_addr_t dma_addr, unsigned int len)
+{
+ struct vnic_rq_buf *buf = rq->to_use;
+
+ buf->os_buf = os_buf;
+ buf->os_buf_index = os_buf_index;
+ buf->dma_addr = dma_addr;
+ buf->len = len;
+
+ buf = buf->next;
+ rq->to_use = buf;
+ rq->ring.desc_avail--;
+
+ /* Move the posted_index every nth descriptor
+ */
+
+#ifndef VNIC_RQ_RETURN_RATE
+#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
+#endif
+
+ if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
+ /* Adding write memory barrier prevents compiler and/or CPU
+ * reordering, thus avoiding descriptor posting before
+ * descriptor is initialized. Otherwise, hardware can read
+ * stale descriptor fields.
+ */
+ wmb();
+ iowrite32(buf->index, &rq->ctrl->posted_index);
+ }
+}
+
+static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
+{
+ rq->ring.desc_avail += count;
+}
+
+enum desc_return_options {
+ VNIC_RQ_RETURN_DESC,
+ VNIC_RQ_DEFER_RETURN_DESC,
+};
+
+static inline void vnic_rq_service(struct vnic_rq *rq,
+ struct cq_desc *cq_desc, u16 completed_index,
+ int desc_return, void (*buf_service)(struct vnic_rq *rq,
+ struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
+ int skipped, void *opaque), void *opaque)
+{
+ struct vnic_rq_buf *buf;
+ int skipped;
+
+ buf = rq->to_clean;
+ while (1) {
+
+ skipped = (buf->index != completed_index);
+
+ (*buf_service)(rq, cq_desc, buf, skipped, opaque);
+
+ if (desc_return == VNIC_RQ_RETURN_DESC)
+ rq->ring.desc_avail++;
+
+ rq->to_clean = buf->next;
+
+ if (!skipped)
+ break;
+
+ buf = rq->to_clean;
+ }
+}
+
+static inline int vnic_rq_fill(struct vnic_rq *rq,
+ int (*buf_fill)(struct vnic_rq *rq))
+{
+ int err;
+
+ while (vnic_rq_desc_avail(rq) > 0) {
+
+ err = (*buf_fill)(rq);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void vnic_rq_free(struct vnic_rq *rq);
+int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+unsigned int vnic_rq_error_status(struct vnic_rq *rq);
+void vnic_rq_enable(struct vnic_rq *rq);
+int vnic_rq_disable(struct vnic_rq *rq);
+void vnic_rq_clean(struct vnic_rq *rq,
+ void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
+
+#endif /* _VNIC_RQ_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rss.h b/drivers/net/ethernet/cisco/enic/vnic_rss.h
new file mode 100644
index 000000000000..fa421baf45b8
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/vnic_rss.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VNIC_RSS_H_
+#define _VNIC_RSS_H_
+
+/* RSS key array */
+union vnic_rss_key {
+ struct {
+ u8 b[10];
+ u8 b_pad[6];
+ } key[4];
+ u64 raw[8];
+};
+
+/* RSS cpu array */
+union vnic_rss_cpu {
+ struct {
+ u8 b[4] ;
+ u8 b_pad[4];
+ } cpu[32];
+ u64 raw[32];
+};
+
+#endif /* _VNIC_RSS_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_stats.h b/drivers/net/ethernet/cisco/enic/vnic_stats.h
new file mode 100644
index 000000000000..77750ec93954
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/vnic_stats.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_STATS_H_
+#define _VNIC_STATS_H_
+
+/* Tx statistics */
+struct vnic_tx_stats {
+ u64 tx_frames_ok;
+ u64 tx_unicast_frames_ok;
+ u64 tx_multicast_frames_ok;
+ u64 tx_broadcast_frames_ok;
+ u64 tx_bytes_ok;
+ u64 tx_unicast_bytes_ok;
+ u64 tx_multicast_bytes_ok;
+ u64 tx_broadcast_bytes_ok;
+ u64 tx_drops;
+ u64 tx_errors;
+ u64 tx_tso;
+ u64 rsvd[16];
+};
+
+/* Rx statistics */
+struct vnic_rx_stats {
+ u64 rx_frames_ok;
+ u64 rx_frames_total;
+ u64 rx_unicast_frames_ok;
+ u64 rx_multicast_frames_ok;
+ u64 rx_broadcast_frames_ok;
+ u64 rx_bytes_ok;
+ u64 rx_unicast_bytes_ok;
+ u64 rx_multicast_bytes_ok;
+ u64 rx_broadcast_bytes_ok;
+ u64 rx_drop;
+ u64 rx_no_bufs;
+ u64 rx_errors;
+ u64 rx_rss;
+ u64 rx_crc_errors;
+ u64 rx_frames_64;
+ u64 rx_frames_127;
+ u64 rx_frames_255;
+ u64 rx_frames_511;
+ u64 rx_frames_1023;
+ u64 rx_frames_1518;
+ u64 rx_frames_to_max;
+ u64 rsvd[16];
+};
+
+struct vnic_stats {
+ struct vnic_tx_stats tx;
+ struct vnic_rx_stats rx;
+};
+
+#endif /* _VNIC_STATS_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.c b/drivers/net/ethernet/cisco/enic/vnic_vic.c
new file mode 100644
index 000000000000..24ef8cd40545
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/vnic_vic.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include "vnic_vic.h"
+
+struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, const u8 *oui,
+ const u8 type)
+{
+ struct vic_provinfo *vp;
+
+ if (!oui)
+ return NULL;
+
+ vp = kzalloc(VIC_PROVINFO_MAX_DATA, flags);
+ if (!vp)
+ return NULL;
+
+ memcpy(vp->oui, oui, sizeof(vp->oui));
+ vp->type = type;
+ vp->length = htonl(sizeof(vp->num_tlvs));
+
+ return vp;
+}
+
+void vic_provinfo_free(struct vic_provinfo *vp)
+{
+ kfree(vp);
+}
+
+int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
+ const void *value)
+{
+ struct vic_provinfo_tlv *tlv;
+
+ if (!vp || !value)
+ return -EINVAL;
+
+ if (ntohl(vp->length) + offsetof(struct vic_provinfo_tlv, value) +
+ length > VIC_PROVINFO_MAX_TLV_DATA)
+ return -ENOMEM;
+
+ tlv = (struct vic_provinfo_tlv *)((u8 *)vp->tlv +
+ ntohl(vp->length) - sizeof(vp->num_tlvs));
+
+ tlv->type = htons(type);
+ tlv->length = htons(length);
+ memcpy(tlv->value, value, length);
+
+ vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1);
+ vp->length = htonl(ntohl(vp->length) +
+ offsetof(struct vic_provinfo_tlv, value) + length);
+
+ return 0;
+}
+
+size_t vic_provinfo_size(struct vic_provinfo *vp)
+{
+ return vp ? ntohl(vp->length) + sizeof(*vp) - sizeof(vp->num_tlvs) : 0;
+}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.h b/drivers/net/ethernet/cisco/enic/vnic_vic.h
new file mode 100644
index 000000000000..9ef81f148351
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/vnic_vic.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_VIC_H_
+#define _VNIC_VIC_H_
+
+/* Note: All integer fields in NETWORK byte order */
+
+/* Note: String field lengths include null char */
+
+#define VIC_PROVINFO_CISCO_OUI { 0x00, 0x00, 0x0c }
+#define VIC_PROVINFO_GENERIC_TYPE 0x4
+
+enum vic_generic_prov_tlv_type {
+ VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR = 0,
+ VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR = 1,
+ VIC_GENERIC_PROV_TLV_CLIENT_NAME_STR = 2,
+ VIC_GENERIC_PROV_TLV_CLUSTER_PORT_NAME_STR = 3,
+ VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR = 4,
+ VIC_GENERIC_PROV_TLV_CLUSTER_UUID_STR = 5,
+ VIC_GENERIC_PROV_TLV_CLUSTER_NAME_STR = 7,
+ VIC_GENERIC_PROV_TLV_HOST_UUID_STR = 8,
+ VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR = 9,
+ VIC_GENERIC_PROV_TLV_INCARNATION_NUMBER = 10,
+ VIC_GENERIC_PROV_TLV_OS_TYPE = 11,
+ VIC_GENERIC_PROV_TLV_OS_VENDOR = 12,
+ VIC_GENERIC_PROV_TLV_CLIENT_TYPE = 15,
+};
+
+enum vic_generic_prov_os_type {
+ VIC_GENERIC_PROV_OS_TYPE_UNKNOWN = 0,
+ VIC_GENERIC_PROV_OS_TYPE_ESX = 1,
+ VIC_GENERIC_PROV_OS_TYPE_LINUX = 2,
+ VIC_GENERIC_PROV_OS_TYPE_WINDOWS = 3,
+ VIC_GENERIC_PROV_OS_TYPE_SOLARIS = 4,
+};
+
+struct vic_provinfo {
+ u8 oui[3]; /* OUI of data provider */
+ u8 type; /* provider-specific type */
+ u32 length; /* length of data below */
+ u32 num_tlvs; /* number of tlvs */
+ struct vic_provinfo_tlv {
+ u16 type;
+ u16 length;
+ u8 value[0];
+ } tlv[0];
+} __packed;
+
+#define VIC_PROVINFO_ADD_TLV(vp, tlvtype, tlvlen, data) \
+ do { \
+ err = vic_provinfo_add_tlv(vp, tlvtype, tlvlen, data); \
+ if (err) \
+ goto add_tlv_failure; \
+ } while (0)
+
+#define VIC_PROVINFO_MAX_DATA 1385
+#define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \
+ sizeof(struct vic_provinfo))
+
+struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, const u8 *oui,
+ const u8 type);
+void vic_provinfo_free(struct vic_provinfo *vp);
+int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
+ const void *value);
+size_t vic_provinfo_size(struct vic_provinfo *vp);
+
+#endif /* _VNIC_VIC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
new file mode 100644
index 000000000000..df61bd932ea6
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+
+static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
+{
+ struct vnic_wq_buf *buf;
+ struct vnic_dev *vdev;
+ unsigned int i, j, count = wq->ring.desc_count;
+ unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
+
+ vdev = wq->vdev;
+
+ for (i = 0; i < blks; i++) {
+ wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
+ if (!wq->bufs[i]) {
+ pr_err("Failed to alloc wq_bufs\n");
+ return -ENOMEM;
+ }
+ }
+
+ for (i = 0; i < blks; i++) {
+ buf = wq->bufs[i];
+ for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) {
+ buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j;
+ buf->desc = (u8 *)wq->ring.descs +
+ wq->ring.desc_size * buf->index;
+ if (buf->index + 1 == count) {
+ buf->next = wq->bufs[0];
+ break;
+ } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) {
+ buf->next = wq->bufs[i + 1];
+ } else {
+ buf->next = buf + 1;
+ buf++;
+ }
+ }
+ }
+
+ wq->to_use = wq->to_clean = wq->bufs[0];
+
+ return 0;
+}
+
+void vnic_wq_free(struct vnic_wq *wq)
+{
+ struct vnic_dev *vdev;
+ unsigned int i;
+
+ vdev = wq->vdev;
+
+ vnic_dev_free_desc_ring(vdev, &wq->ring);
+
+ for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
+ if (wq->bufs[i]) {
+ kfree(wq->bufs[i]);
+ wq->bufs[i] = NULL;
+ }
+ }
+
+ wq->ctrl = NULL;
+}
+
+int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+
+ wq->index = index;
+ wq->vdev = vdev;
+
+ wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
+ if (!wq->ctrl) {
+ pr_err("Failed to hook WQ[%d] resource\n", index);
+ return -EINVAL;
+ }
+
+ vnic_wq_disable(wq);
+
+ err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
+ if (err)
+ return err;
+
+ err = vnic_wq_alloc_bufs(wq);
+ if (err) {
+ vnic_wq_free(wq);
+ return err;
+ }
+
+ return 0;
+}
+
+static void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ u64 paddr;
+ unsigned int count = wq->ring.desc_count;
+
+ paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
+ writeq(paddr, &wq->ctrl->ring_base);
+ iowrite32(count, &wq->ctrl->ring_size);
+ iowrite32(fetch_index, &wq->ctrl->fetch_index);
+ iowrite32(posted_index, &wq->ctrl->posted_index);
+ iowrite32(cq_index, &wq->ctrl->cq_index);
+ iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
+ iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
+ iowrite32(0, &wq->ctrl->error_status);
+
+ wq->to_use = wq->to_clean =
+ &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)]
+ [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)];
+}
+
+void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
+{
+ vnic_wq_init_start(wq, cq_index, 0, 0,
+ error_interrupt_enable,
+ error_interrupt_offset);
+}
+
+unsigned int vnic_wq_error_status(struct vnic_wq *wq)
+{
+ return ioread32(&wq->ctrl->error_status);
+}
+
+void vnic_wq_enable(struct vnic_wq *wq)
+{
+ iowrite32(1, &wq->ctrl->enable);
+}
+
+int vnic_wq_disable(struct vnic_wq *wq)
+{
+ unsigned int wait;
+
+ iowrite32(0, &wq->ctrl->enable);
+
+ /* Wait for HW to ACK disable request */
+ for (wait = 0; wait < 1000; wait++) {
+ if (!(ioread32(&wq->ctrl->running)))
+ return 0;
+ udelay(10);
+ }
+
+ pr_err("Failed to disable WQ[%d]\n", wq->index);
+
+ return -ETIMEDOUT;
+}
+
+void vnic_wq_clean(struct vnic_wq *wq,
+ void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
+{
+ struct vnic_wq_buf *buf;
+
+ buf = wq->to_clean;
+
+ while (vnic_wq_desc_used(wq) > 0) {
+
+ (*buf_clean)(wq, buf);
+
+ buf = wq->to_clean = buf->next;
+ wq->ring.desc_avail++;
+ }
+
+ wq->to_use = wq->to_clean = wq->bufs[0];
+
+ iowrite32(0, &wq->ctrl->fetch_index);
+ iowrite32(0, &wq->ctrl->posted_index);
+ iowrite32(0, &wq->ctrl->error_status);
+
+ vnic_dev_clear_desc_ring(&wq->ring);
+}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
new file mode 100644
index 000000000000..7dd937ac11c2
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_WQ_H_
+#define _VNIC_WQ_H_
+
+#include <linux/pci.h>
+
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+/* Work queue control */
+struct vnic_wq_ctrl {
+ u64 ring_base; /* 0x00 */
+ u32 ring_size; /* 0x08 */
+ u32 pad0;
+ u32 posted_index; /* 0x10 */
+ u32 pad1;
+ u32 cq_index; /* 0x18 */
+ u32 pad2;
+ u32 enable; /* 0x20 */
+ u32 pad3;
+ u32 running; /* 0x28 */
+ u32 pad4;
+ u32 fetch_index; /* 0x30 */
+ u32 pad5;
+ u32 dca_value; /* 0x38 */
+ u32 pad6;
+ u32 error_interrupt_enable; /* 0x40 */
+ u32 pad7;
+ u32 error_interrupt_offset; /* 0x48 */
+ u32 pad8;
+ u32 error_status; /* 0x50 */
+ u32 pad9;
+};
+
+struct vnic_wq_buf {
+ struct vnic_wq_buf *next;
+ dma_addr_t dma_addr;
+ void *os_buf;
+ unsigned int len;
+ unsigned int index;
+ int sop;
+ void *desc;
+};
+
+/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
+#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32
+#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64
+#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \
+ ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \
+ VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES))
+#define VNIC_WQ_BUF_BLK_SZ(entries) \
+ (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf))
+#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
+ DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries))
+#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
+
+struct vnic_wq {
+ unsigned int index;
+ struct vnic_dev *vdev;
+ struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
+ struct vnic_dev_ring ring;
+ struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
+ struct vnic_wq_buf *to_use;
+ struct vnic_wq_buf *to_clean;
+ unsigned int pkts_outstanding;
+};
+
+static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
+{
+ /* how many does SW own? */
+ return wq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
+{
+ /* how many does HW own? */
+ return wq->ring.desc_count - wq->ring.desc_avail - 1;
+}
+
+static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
+{
+ return wq->to_use->desc;
+}
+
+static inline void vnic_wq_post(struct vnic_wq *wq,
+ void *os_buf, dma_addr_t dma_addr,
+ unsigned int len, int sop, int eop)
+{
+ struct vnic_wq_buf *buf = wq->to_use;
+
+ buf->sop = sop;
+ buf->os_buf = eop ? os_buf : NULL;
+ buf->dma_addr = dma_addr;
+ buf->len = len;
+
+ buf = buf->next;
+ if (eop) {
+ /* Adding write memory barrier prevents compiler and/or CPU
+ * reordering, thus avoiding descriptor posting before
+ * descriptor is initialized. Otherwise, hardware can read
+ * stale descriptor fields.
+ */
+ wmb();
+ iowrite32(buf->index, &wq->ctrl->posted_index);
+ }
+ wq->to_use = buf;
+
+ wq->ring.desc_avail--;
+}
+
+static inline void vnic_wq_service(struct vnic_wq *wq,
+ struct cq_desc *cq_desc, u16 completed_index,
+ void (*buf_service)(struct vnic_wq *wq,
+ struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
+ void *opaque)
+{
+ struct vnic_wq_buf *buf;
+
+ buf = wq->to_clean;
+ while (1) {
+
+ (*buf_service)(wq, cq_desc, buf, opaque);
+
+ wq->ring.desc_avail++;
+
+ wq->to_clean = buf->next;
+
+ if (buf->index == completed_index)
+ break;
+
+ buf = wq->to_clean;
+ }
+}
+
+void vnic_wq_free(struct vnic_wq *wq);
+int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
+unsigned int vnic_wq_error_status(struct vnic_wq *wq);
+void vnic_wq_enable(struct vnic_wq *wq);
+int vnic_wq_disable(struct vnic_wq *wq);
+void vnic_wq_clean(struct vnic_wq *wq,
+ void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
+
+#endif /* _VNIC_WQ_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/wq_enet_desc.h b/drivers/net/ethernet/cisco/enic/wq_enet_desc.h
new file mode 100644
index 000000000000..c7021e3a631f
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/wq_enet_desc.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _WQ_ENET_DESC_H_
+#define _WQ_ENET_DESC_H_
+
+/* Ethernet work queue descriptor: 16B */
+struct wq_enet_desc {
+ __le64 address;
+ __le16 length;
+ __le16 mss_loopback;
+ __le16 header_length_flags;
+ __le16 vlan_tag;
+};
+
+#define WQ_ENET_ADDR_BITS 64
+#define WQ_ENET_LEN_BITS 14
+#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1)
+#define WQ_ENET_MSS_BITS 14
+#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1)
+#define WQ_ENET_MSS_SHIFT 2
+#define WQ_ENET_LOOPBACK_SHIFT 1
+#define WQ_ENET_HDRLEN_BITS 10
+#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1)
+#define WQ_ENET_FLAGS_OM_BITS 2
+#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1)
+#define WQ_ENET_FLAGS_EOP_SHIFT 12
+#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13
+#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14
+#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15
+
+#define WQ_ENET_OFFLOAD_MODE_CSUM 0
+#define WQ_ENET_OFFLOAD_MODE_RESERVED 1
+#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2
+#define WQ_ENET_OFFLOAD_MODE_TSO 3
+
+static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
+ u64 address, u16 length, u16 mss, u16 header_length,
+ u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
+ u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
+{
+ desc->address = cpu_to_le64(address);
+ desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
+ desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
+ WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
+ desc->header_length_flags = cpu_to_le16(
+ (header_length & WQ_ENET_HDRLEN_MASK) |
+ (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
+ (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
+ (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
+ (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
+ (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
+ desc->vlan_tag = cpu_to_le16(vlan_tag);
+}
+
+static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
+ u64 *address, u16 *length, u16 *mss, u16 *header_length,
+ u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
+ u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
+{
+ *address = le64_to_cpu(desc->address);
+ *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
+ *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
+ WQ_ENET_MSS_MASK;
+ *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
+ WQ_ENET_LOOPBACK_SHIFT) & 1);
+ *header_length = le16_to_cpu(desc->header_length_flags) &
+ WQ_ENET_HDRLEN_MASK;
+ *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
+ *eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_EOP_SHIFT) & 1);
+ *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
+ *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
+ *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
+ WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
+ *vlan_tag = le16_to_cpu(desc->vlan_tag);
+}
+
+#endif /* _WQ_ENET_DESC_H_ */
diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig
new file mode 100644
index 000000000000..972b62b31837
--- /dev/null
+++ b/drivers/net/ethernet/davicom/Kconfig
@@ -0,0 +1,24 @@
+#
+# Davicom device configuration
+#
+
+config DM9000
+ tristate "DM9000 support"
+ depends on ARM || BLACKFIN || MIPS
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ Support for DM9000 chipset.
+
+ To compile this driver as a module, choose M here. The module
+ will be called dm9000.
+
+config DM9000_FORCE_SIMPLE_PHY_POLL
+ bool "Force simple NSR based PHY polling"
+ depends on DM9000
+ ---help---
+ This configuration forces the DM9000 to use the NSR's LinkStatus
+ bit to determine if the link is up or down instead of the more
+ costly MII PHY reads. Note, this will not work if the chip is
+ operating with an external PHY.
diff --git a/drivers/net/ethernet/davicom/Makefile b/drivers/net/ethernet/davicom/Makefile
new file mode 100644
index 000000000000..74b31f0ebe18
--- /dev/null
+++ b/drivers/net/ethernet/davicom/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Davicom device drivers.
+#
+
+obj-$(CONFIG_DM9000) += dm9000.o
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
new file mode 100644
index 000000000000..438f4580bf66
--- /dev/null
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -0,0 +1,1710 @@
+/*
+ * Davicom DM9000 Fast Ethernet driver for Linux.
+ * Copyright (C) 1997 Sten Wang
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
+ *
+ * Additional updates, Copyright:
+ * Ben Dooks <ben@simtec.co.uk>
+ * Sascha Hauer <s.hauer@pengutronix.de>
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/dm9000.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+
+#include <asm/delay.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+
+#include "dm9000.h"
+
+/* Board/System/Debug information/definition ---------------- */
+
+#define DM9000_PHY 0x40 /* PHY address 0x01 */
+
+#define CARDNAME "dm9000"
+#define DRV_VERSION "1.31"
+
+/*
+ * Transmit timeout, default 5 seconds.
+ */
+static int watchdog = 5000;
+module_param(watchdog, int, 0400);
+MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
+
+/*
+ * Debug messages level
+ */
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "dm9000 debug level (0-4)");
+
+/* DM9000 register address locking.
+ *
+ * The DM9000 uses an address register to control where data written
+ * to the data register goes. This means that the address register
+ * must be preserved over interrupts or similar calls.
+ *
+ * During interrupt and other critical calls, a spinlock is used to
+ * protect the system, but the calls themselves save the address
+ * in the address register in case they are interrupting another
+ * access to the device.
+ *
+ * For general accesses a lock is provided so that calls which are
+ * allowed to sleep are serialised so that the address register does
+ * not need to be saved. This lock also serves to serialise access
+ * to the EEPROM and PHY access registers which are shared between
+ * these two devices.
+ */
+
+/* The driver supports the original DM9000E, and now the two newer
+ * devices, DM9000A and DM9000B.
+ */
+
+enum dm9000_type {
+ TYPE_DM9000E, /* original DM9000 */
+ TYPE_DM9000A,
+ TYPE_DM9000B
+};
+
+/* Structure/enum declaration ------------------------------- */
+typedef struct board_info {
+
+ void __iomem *io_addr; /* Register I/O base address */
+ void __iomem *io_data; /* Data I/O address */
+ u16 irq; /* IRQ */
+
+ u16 tx_pkt_cnt;
+ u16 queue_pkt_len;
+ u16 queue_start_addr;
+ u16 queue_ip_summed;
+ u16 dbug_cnt;
+ u8 io_mode; /* 0:word, 2:byte */
+ u8 phy_addr;
+ u8 imr_all;
+
+ unsigned int flags;
+ unsigned int in_suspend :1;
+ unsigned int wake_supported :1;
+
+ enum dm9000_type type;
+
+ void (*inblk)(void __iomem *port, void *data, int length);
+ void (*outblk)(void __iomem *port, void *data, int length);
+ void (*dumpblk)(void __iomem *port, int length);
+
+ struct device *dev; /* parent device */
+
+ struct resource *addr_res; /* resources found */
+ struct resource *data_res;
+ struct resource *addr_req; /* resources requested */
+ struct resource *data_req;
+ struct resource *irq_res;
+
+ int irq_wake;
+
+ struct mutex addr_lock; /* phy and eeprom access lock */
+
+ struct delayed_work phy_poll;
+ struct net_device *ndev;
+
+ spinlock_t lock;
+
+ struct mii_if_info mii;
+ u32 msg_enable;
+ u32 wake_state;
+
+ int ip_summed;
+} board_info_t;
+
+/* debug code */
+
+#define dm9000_dbg(db, lev, msg...) do { \
+ if ((lev) < debug) { \
+ dev_dbg(db->dev, msg); \
+ } \
+} while (0)
+
+static inline board_info_t *to_dm9000_board(struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+/* DM9000 network board routine ---------------------------- */
+
+static void
+dm9000_reset(board_info_t * db)
+{
+ dev_dbg(db->dev, "resetting device\n");
+
+ /* RESET device */
+ writeb(DM9000_NCR, db->io_addr);
+ udelay(200);
+ writeb(NCR_RST, db->io_data);
+ udelay(200);
+}
+
+/*
+ * Read a byte from I/O port
+ */
+static u8
+ior(board_info_t * db, int reg)
+{
+ writeb(reg, db->io_addr);
+ return readb(db->io_data);
+}
+
+/*
+ * Write a byte to I/O port
+ */
+
+static void
+iow(board_info_t * db, int reg, int value)
+{
+ writeb(reg, db->io_addr);
+ writeb(value, db->io_data);
+}
+
+/* routines for sending block to chip */
+
+static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
+{
+ writesb(reg, data, count);
+}
+
+static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
+{
+ writesw(reg, data, (count+1) >> 1);
+}
+
+static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
+{
+ writesl(reg, data, (count+3) >> 2);
+}
+
+/* input block from chip to memory */
+
+static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
+{
+ readsb(reg, data, count);
+}
+
+
+static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
+{
+ readsw(reg, data, (count+1) >> 1);
+}
+
+static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
+{
+ readsl(reg, data, (count+3) >> 2);
+}
+
+/* dump block from chip to null */
+
+static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
+{
+ int i;
+ int tmp;
+
+ for (i = 0; i < count; i++)
+ tmp = readb(reg);
+}
+
+static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
+{
+ int i;
+ int tmp;
+
+ count = (count + 1) >> 1;
+
+ for (i = 0; i < count; i++)
+ tmp = readw(reg);
+}
+
+static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
+{
+ int i;
+ int tmp;
+
+ count = (count + 3) >> 2;
+
+ for (i = 0; i < count; i++)
+ tmp = readl(reg);
+}
+
+/* dm9000_set_io
+ *
+ * select the specified set of io routines to use with the
+ * device
+ */
+
+static void dm9000_set_io(struct board_info *db, int byte_width)
+{
+ /* use the size of the data resource to work out what IO
+ * routines we want to use
+ */
+
+ switch (byte_width) {
+ case 1:
+ db->dumpblk = dm9000_dumpblk_8bit;
+ db->outblk = dm9000_outblk_8bit;
+ db->inblk = dm9000_inblk_8bit;
+ break;
+
+
+ case 3:
+ dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
+ case 2:
+ db->dumpblk = dm9000_dumpblk_16bit;
+ db->outblk = dm9000_outblk_16bit;
+ db->inblk = dm9000_inblk_16bit;
+ break;
+
+ case 4:
+ default:
+ db->dumpblk = dm9000_dumpblk_32bit;
+ db->outblk = dm9000_outblk_32bit;
+ db->inblk = dm9000_inblk_32bit;
+ break;
+ }
+}
+
+static void dm9000_schedule_poll(board_info_t *db)
+{
+ if (db->type == TYPE_DM9000E)
+ schedule_delayed_work(&db->phy_poll, HZ * 2);
+}
+
+static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ board_info_t *dm = to_dm9000_board(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
+}
+
+static unsigned int
+dm9000_read_locked(board_info_t *db, int reg)
+{
+ unsigned long flags;
+ unsigned int ret;
+
+ spin_lock_irqsave(&db->lock, flags);
+ ret = ior(db, reg);
+ spin_unlock_irqrestore(&db->lock, flags);
+
+ return ret;
+}
+
+static int dm9000_wait_eeprom(board_info_t *db)
+{
+ unsigned int status;
+ int timeout = 8; /* wait max 8msec */
+
+ /* The DM9000 data sheets say we should be able to
+ * poll the ERRE bit in EPCR to wait for the EEPROM
+ * operation. From testing several chips, this bit
+ * does not seem to work.
+ *
+ * We attempt to use the bit, but fall back to the
+ * timeout (which is why we do not return an error
+ * on expiry) to say that the EEPROM operation has
+ * completed.
+ */
+
+ while (1) {
+ status = dm9000_read_locked(db, DM9000_EPCR);
+
+ if ((status & EPCR_ERRE) == 0)
+ break;
+
+ msleep(1);
+
+ if (timeout-- < 0) {
+ dev_dbg(db->dev, "timeout waiting EEPROM\n");
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Read a word data from EEPROM
+ */
+static void
+dm9000_read_eeprom(board_info_t *db, int offset, u8 *to)
+{
+ unsigned long flags;
+
+ if (db->flags & DM9000_PLATF_NO_EEPROM) {
+ to[0] = 0xff;
+ to[1] = 0xff;
+ return;
+ }
+
+ mutex_lock(&db->addr_lock);
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ iow(db, DM9000_EPAR, offset);
+ iow(db, DM9000_EPCR, EPCR_ERPRR);
+
+ spin_unlock_irqrestore(&db->lock, flags);
+
+ dm9000_wait_eeprom(db);
+
+ /* delay for at-least 150uS */
+ msleep(1);
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ iow(db, DM9000_EPCR, 0x0);
+
+ to[0] = ior(db, DM9000_EPDRL);
+ to[1] = ior(db, DM9000_EPDRH);
+
+ spin_unlock_irqrestore(&db->lock, flags);
+
+ mutex_unlock(&db->addr_lock);
+}
+
+/*
+ * Write a word data to SROM
+ */
+static void
+dm9000_write_eeprom(board_info_t *db, int offset, u8 *data)
+{
+ unsigned long flags;
+
+ if (db->flags & DM9000_PLATF_NO_EEPROM)
+ return;
+
+ mutex_lock(&db->addr_lock);
+
+ spin_lock_irqsave(&db->lock, flags);
+ iow(db, DM9000_EPAR, offset);
+ iow(db, DM9000_EPDRH, data[1]);
+ iow(db, DM9000_EPDRL, data[0]);
+ iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
+ spin_unlock_irqrestore(&db->lock, flags);
+
+ dm9000_wait_eeprom(db);
+
+ mdelay(1); /* wait at least 150uS to clear */
+
+ spin_lock_irqsave(&db->lock, flags);
+ iow(db, DM9000_EPCR, 0);
+ spin_unlock_irqrestore(&db->lock, flags);
+
+ mutex_unlock(&db->addr_lock);
+}
+
+/* ethtool ops */
+
+static void dm9000_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ board_info_t *dm = to_dm9000_board(dev);
+
+ strcpy(info->driver, CARDNAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, to_platform_device(dm->dev)->name);
+}
+
+static u32 dm9000_get_msglevel(struct net_device *dev)
+{
+ board_info_t *dm = to_dm9000_board(dev);
+
+ return dm->msg_enable;
+}
+
+static void dm9000_set_msglevel(struct net_device *dev, u32 value)
+{
+ board_info_t *dm = to_dm9000_board(dev);
+
+ dm->msg_enable = value;
+}
+
+static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ board_info_t *dm = to_dm9000_board(dev);
+
+ mii_ethtool_gset(&dm->mii, cmd);
+ return 0;
+}
+
+static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ board_info_t *dm = to_dm9000_board(dev);
+
+ return mii_ethtool_sset(&dm->mii, cmd);
+}
+
+static int dm9000_nway_reset(struct net_device *dev)
+{
+ board_info_t *dm = to_dm9000_board(dev);
+ return mii_nway_restart(&dm->mii);
+}
+
+static int dm9000_set_features(struct net_device *dev, u32 features)
+{
+ board_info_t *dm = to_dm9000_board(dev);
+ u32 changed = dev->features ^ features;
+ unsigned long flags;
+
+ if (!(changed & NETIF_F_RXCSUM))
+ return 0;
+
+ spin_lock_irqsave(&dm->lock, flags);
+ iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
+ spin_unlock_irqrestore(&dm->lock, flags);
+
+ return 0;
+}
+
+static u32 dm9000_get_link(struct net_device *dev)
+{
+ board_info_t *dm = to_dm9000_board(dev);
+ u32 ret;
+
+ if (dm->flags & DM9000_PLATF_EXT_PHY)
+ ret = mii_link_ok(&dm->mii);
+ else
+ ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0;
+
+ return ret;
+}
+
+#define DM_EEPROM_MAGIC (0x444D394B)
+
+static int dm9000_get_eeprom_len(struct net_device *dev)
+{
+ return 128;
+}
+
+static int dm9000_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ board_info_t *dm = to_dm9000_board(dev);
+ int offset = ee->offset;
+ int len = ee->len;
+ int i;
+
+ /* EEPROM access is aligned to two bytes */
+
+ if ((len & 1) != 0 || (offset & 1) != 0)
+ return -EINVAL;
+
+ if (dm->flags & DM9000_PLATF_NO_EEPROM)
+ return -ENOENT;
+
+ ee->magic = DM_EEPROM_MAGIC;
+
+ for (i = 0; i < len; i += 2)
+ dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
+
+ return 0;
+}
+
+static int dm9000_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ board_info_t *dm = to_dm9000_board(dev);
+ int offset = ee->offset;
+ int len = ee->len;
+ int done;
+
+ /* EEPROM access is aligned to two bytes */
+
+ if (dm->flags & DM9000_PLATF_NO_EEPROM)
+ return -ENOENT;
+
+ if (ee->magic != DM_EEPROM_MAGIC)
+ return -EINVAL;
+
+ while (len > 0) {
+ if (len & 1 || offset & 1) {
+ int which = offset & 1;
+ u8 tmp[2];
+
+ dm9000_read_eeprom(dm, offset / 2, tmp);
+ tmp[which] = *data;
+ dm9000_write_eeprom(dm, offset / 2, tmp);
+
+ done = 1;
+ } else {
+ dm9000_write_eeprom(dm, offset / 2, data);
+ done = 2;
+ }
+
+ data += done;
+ offset += done;
+ len -= done;
+ }
+
+ return 0;
+}
+
+static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
+{
+ board_info_t *dm = to_dm9000_board(dev);
+
+ memset(w, 0, sizeof(struct ethtool_wolinfo));
+
+ /* note, we could probably support wake-phy too */
+ w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
+ w->wolopts = dm->wake_state;
+}
+
+static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
+{
+ board_info_t *dm = to_dm9000_board(dev);
+ unsigned long flags;
+ u32 opts = w->wolopts;
+ u32 wcr = 0;
+
+ if (!dm->wake_supported)
+ return -EOPNOTSUPP;
+
+ if (opts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ if (opts & WAKE_MAGIC)
+ wcr |= WCR_MAGICEN;
+
+ mutex_lock(&dm->addr_lock);
+
+ spin_lock_irqsave(&dm->lock, flags);
+ iow(dm, DM9000_WCR, wcr);
+ spin_unlock_irqrestore(&dm->lock, flags);
+
+ mutex_unlock(&dm->addr_lock);
+
+ if (dm->wake_state != opts) {
+ /* change in wol state, update IRQ state */
+
+ if (!dm->wake_state)
+ irq_set_irq_wake(dm->irq_wake, 1);
+ else if (dm->wake_state & !opts)
+ irq_set_irq_wake(dm->irq_wake, 0);
+ }
+
+ dm->wake_state = opts;
+ return 0;
+}
+
+static const struct ethtool_ops dm9000_ethtool_ops = {
+ .get_drvinfo = dm9000_get_drvinfo,
+ .get_settings = dm9000_get_settings,
+ .set_settings = dm9000_set_settings,
+ .get_msglevel = dm9000_get_msglevel,
+ .set_msglevel = dm9000_set_msglevel,
+ .nway_reset = dm9000_nway_reset,
+ .get_link = dm9000_get_link,
+ .get_wol = dm9000_get_wol,
+ .set_wol = dm9000_set_wol,
+ .get_eeprom_len = dm9000_get_eeprom_len,
+ .get_eeprom = dm9000_get_eeprom,
+ .set_eeprom = dm9000_set_eeprom,
+};
+
+static void dm9000_show_carrier(board_info_t *db,
+ unsigned carrier, unsigned nsr)
+{
+ struct net_device *ndev = db->ndev;
+ unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
+
+ if (carrier)
+ dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n",
+ ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
+ (ncr & NCR_FDX) ? "full" : "half");
+ else
+ dev_info(db->dev, "%s: link down\n", ndev->name);
+}
+
+static void
+dm9000_poll_work(struct work_struct *w)
+{
+ struct delayed_work *dw = to_delayed_work(w);
+ board_info_t *db = container_of(dw, board_info_t, phy_poll);
+ struct net_device *ndev = db->ndev;
+
+ if (db->flags & DM9000_PLATF_SIMPLE_PHY &&
+ !(db->flags & DM9000_PLATF_EXT_PHY)) {
+ unsigned nsr = dm9000_read_locked(db, DM9000_NSR);
+ unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0;
+ unsigned new_carrier;
+
+ new_carrier = (nsr & NSR_LINKST) ? 1 : 0;
+
+ if (old_carrier != new_carrier) {
+ if (netif_msg_link(db))
+ dm9000_show_carrier(db, new_carrier, nsr);
+
+ if (!new_carrier)
+ netif_carrier_off(ndev);
+ else
+ netif_carrier_on(ndev);
+ }
+ } else
+ mii_check_media(&db->mii, netif_msg_link(db), 0);
+
+ if (netif_running(ndev))
+ dm9000_schedule_poll(db);
+}
+
+/* dm9000_release_board
+ *
+ * release a board, and any mapped resources
+ */
+
+static void
+dm9000_release_board(struct platform_device *pdev, struct board_info *db)
+{
+ /* unmap our resources */
+
+ iounmap(db->io_addr);
+ iounmap(db->io_data);
+
+ /* release the resources */
+
+ release_resource(db->data_req);
+ kfree(db->data_req);
+
+ release_resource(db->addr_req);
+ kfree(db->addr_req);
+}
+
+static unsigned char dm9000_type_to_char(enum dm9000_type type)
+{
+ switch (type) {
+ case TYPE_DM9000E: return 'e';
+ case TYPE_DM9000A: return 'a';
+ case TYPE_DM9000B: return 'b';
+ }
+
+ return '?';
+}
+
+/*
+ * Set DM9000 multicast address
+ */
+static void
+dm9000_hash_table_unlocked(struct net_device *dev)
+{
+ board_info_t *db = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ int i, oft;
+ u32 hash_val;
+ u16 hash_table[4];
+ u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
+
+ dm9000_dbg(db, 1, "entering %s\n", __func__);
+
+ for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
+ iow(db, oft, dev->dev_addr[i]);
+
+ /* Clear Hash Table */
+ for (i = 0; i < 4; i++)
+ hash_table[i] = 0x0;
+
+ /* broadcast address */
+ hash_table[3] = 0x8000;
+
+ if (dev->flags & IFF_PROMISC)
+ rcr |= RCR_PRMSC;
+
+ if (dev->flags & IFF_ALLMULTI)
+ rcr |= RCR_ALL;
+
+ /* the multicast address in Hash Table : 64 bits */
+ netdev_for_each_mc_addr(ha, dev) {
+ hash_val = ether_crc_le(6, ha->addr) & 0x3f;
+ hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
+ }
+
+ /* Write the hash table to MAC MD table */
+ for (i = 0, oft = DM9000_MAR; i < 4; i++) {
+ iow(db, oft++, hash_table[i]);
+ iow(db, oft++, hash_table[i] >> 8);
+ }
+
+ iow(db, DM9000_RCR, rcr);
+}
+
+static void
+dm9000_hash_table(struct net_device *dev)
+{
+ board_info_t *db = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&db->lock, flags);
+ dm9000_hash_table_unlocked(dev);
+ spin_unlock_irqrestore(&db->lock, flags);
+}
+
+/*
+ * Initialize dm9000 board
+ */
+static void
+dm9000_init_dm9000(struct net_device *dev)
+{
+ board_info_t *db = netdev_priv(dev);
+ unsigned int imr;
+ unsigned int ncr;
+
+ dm9000_dbg(db, 1, "entering %s\n", __func__);
+
+ /* I/O mode */
+ db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
+
+ /* Checksum mode */
+ if (dev->hw_features & NETIF_F_RXCSUM)
+ iow(db, DM9000_RCSR,
+ (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
+
+ iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
+
+ ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
+
+ /* if wol is needed, then always set NCR_WAKEEN otherwise we end
+ * up dumping the wake events if we disable this. There is already
+ * a wake-mask in DM9000_WCR */
+ if (db->wake_supported)
+ ncr |= NCR_WAKEEN;
+
+ iow(db, DM9000_NCR, ncr);
+
+ /* Program operating register */
+ iow(db, DM9000_TCR, 0); /* TX Polling clear */
+ iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */
+ iow(db, DM9000_FCR, 0xff); /* Flow Control */
+ iow(db, DM9000_SMCR, 0); /* Special Mode */
+ /* clear TX status */
+ iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
+ iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
+
+ /* Set address filter table */
+ dm9000_hash_table_unlocked(dev);
+
+ imr = IMR_PAR | IMR_PTM | IMR_PRM;
+ if (db->type != TYPE_DM9000E)
+ imr |= IMR_LNKCHNG;
+
+ db->imr_all = imr;
+
+ /* Enable TX/RX interrupt mask */
+ iow(db, DM9000_IMR, imr);
+
+ /* Init Driver variable */
+ db->tx_pkt_cnt = 0;
+ db->queue_pkt_len = 0;
+ dev->trans_start = jiffies;
+}
+
+/* Our watchdog timed out. Called by the networking layer */
+static void dm9000_timeout(struct net_device *dev)
+{
+ board_info_t *db = netdev_priv(dev);
+ u8 reg_save;
+ unsigned long flags;
+
+ /* Save previous register address */
+ spin_lock_irqsave(&db->lock, flags);
+ reg_save = readb(db->io_addr);
+
+ netif_stop_queue(dev);
+ dm9000_reset(db);
+ dm9000_init_dm9000(dev);
+ /* We can accept TX packets again */
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+
+ /* Restore previous register address */
+ writeb(reg_save, db->io_addr);
+ spin_unlock_irqrestore(&db->lock, flags);
+}
+
+static void dm9000_send_packet(struct net_device *dev,
+ int ip_summed,
+ u16 pkt_len)
+{
+ board_info_t *dm = to_dm9000_board(dev);
+
+ /* The DM9000 is not smart enough to leave fragmented packets alone. */
+ if (dm->ip_summed != ip_summed) {
+ if (ip_summed == CHECKSUM_NONE)
+ iow(dm, DM9000_TCCR, 0);
+ else
+ iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP);
+ dm->ip_summed = ip_summed;
+ }
+
+ /* Set TX length to DM9000 */
+ iow(dm, DM9000_TXPLL, pkt_len);
+ iow(dm, DM9000_TXPLH, pkt_len >> 8);
+
+ /* Issue TX polling command */
+ iow(dm, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */
+}
+
+/*
+ * Hardware start transmission.
+ * Send a packet to media from the upper layer.
+ */
+static int
+dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned long flags;
+ board_info_t *db = netdev_priv(dev);
+
+ dm9000_dbg(db, 3, "%s:\n", __func__);
+
+ if (db->tx_pkt_cnt > 1)
+ return NETDEV_TX_BUSY;
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ /* Move data to DM9000 TX RAM */
+ writeb(DM9000_MWCMD, db->io_addr);
+
+ (db->outblk)(db->io_data, skb->data, skb->len);
+ dev->stats.tx_bytes += skb->len;
+
+ db->tx_pkt_cnt++;
+ /* TX control: First packet immediately send, second packet queue */
+ if (db->tx_pkt_cnt == 1) {
+ dm9000_send_packet(dev, skb->ip_summed, skb->len);
+ } else {
+ /* Second packet */
+ db->queue_pkt_len = skb->len;
+ db->queue_ip_summed = skb->ip_summed;
+ netif_stop_queue(dev);
+ }
+
+ spin_unlock_irqrestore(&db->lock, flags);
+
+ /* free this SKB */
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * DM9000 interrupt handler
+ * receive the packet to upper layer, free the transmitted packet
+ */
+
+static void dm9000_tx_done(struct net_device *dev, board_info_t *db)
+{
+ int tx_status = ior(db, DM9000_NSR); /* Got TX status */
+
+ if (tx_status & (NSR_TX2END | NSR_TX1END)) {
+ /* One packet sent complete */
+ db->tx_pkt_cnt--;
+ dev->stats.tx_packets++;
+
+ if (netif_msg_tx_done(db))
+ dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
+
+ /* Queue packet check & send */
+ if (db->tx_pkt_cnt > 0)
+ dm9000_send_packet(dev, db->queue_ip_summed,
+ db->queue_pkt_len);
+ netif_wake_queue(dev);
+ }
+}
+
+struct dm9000_rxhdr {
+ u8 RxPktReady;
+ u8 RxStatus;
+ __le16 RxLen;
+} __packed;
+
+/*
+ * Received a packet and pass to upper layer
+ */
+static void
+dm9000_rx(struct net_device *dev)
+{
+ board_info_t *db = netdev_priv(dev);
+ struct dm9000_rxhdr rxhdr;
+ struct sk_buff *skb;
+ u8 rxbyte, *rdptr;
+ bool GoodPacket;
+ int RxLen;
+
+ /* Check packet ready or not */
+ do {
+ ior(db, DM9000_MRCMDX); /* Dummy read */
+
+ /* Get most updated data */
+ rxbyte = readb(db->io_data);
+
+ /* Status check: this byte must be 0 or 1 */
+ if (rxbyte & DM9000_PKT_ERR) {
+ dev_warn(db->dev, "status check fail: %d\n", rxbyte);
+ iow(db, DM9000_RCR, 0x00); /* Stop Device */
+ iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */
+ return;
+ }
+
+ if (!(rxbyte & DM9000_PKT_RDY))
+ return;
+
+ /* A packet ready now & Get status/length */
+ GoodPacket = true;
+ writeb(DM9000_MRCMD, db->io_addr);
+
+ (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
+
+ RxLen = le16_to_cpu(rxhdr.RxLen);
+
+ if (netif_msg_rx_status(db))
+ dev_dbg(db->dev, "RX: status %02x, length %04x\n",
+ rxhdr.RxStatus, RxLen);
+
+ /* Packet Status check */
+ if (RxLen < 0x40) {
+ GoodPacket = false;
+ if (netif_msg_rx_err(db))
+ dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
+ }
+
+ if (RxLen > DM9000_PKT_MAX) {
+ dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
+ }
+
+ /* rxhdr.RxStatus is identical to RSR register. */
+ if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
+ RSR_PLE | RSR_RWTO |
+ RSR_LCS | RSR_RF)) {
+ GoodPacket = false;
+ if (rxhdr.RxStatus & RSR_FOE) {
+ if (netif_msg_rx_err(db))
+ dev_dbg(db->dev, "fifo error\n");
+ dev->stats.rx_fifo_errors++;
+ }
+ if (rxhdr.RxStatus & RSR_CE) {
+ if (netif_msg_rx_err(db))
+ dev_dbg(db->dev, "crc error\n");
+ dev->stats.rx_crc_errors++;
+ }
+ if (rxhdr.RxStatus & RSR_RF) {
+ if (netif_msg_rx_err(db))
+ dev_dbg(db->dev, "length error\n");
+ dev->stats.rx_length_errors++;
+ }
+ }
+
+ /* Move data from DM9000 */
+ if (GoodPacket &&
+ ((skb = dev_alloc_skb(RxLen + 4)) != NULL)) {
+ skb_reserve(skb, 2);
+ rdptr = (u8 *) skb_put(skb, RxLen - 4);
+
+ /* Read received packet from RX SRAM */
+
+ (db->inblk)(db->io_data, rdptr, RxLen);
+ dev->stats.rx_bytes += RxLen;
+
+ /* Pass to upper layer */
+ skb->protocol = eth_type_trans(skb, dev);
+ if (dev->features & NETIF_F_RXCSUM) {
+ if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+ }
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+
+ } else {
+ /* need to dump the packet's data */
+
+ (db->dumpblk)(db->io_data, RxLen);
+ }
+ } while (rxbyte & DM9000_PKT_RDY);
+}
+
+static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ board_info_t *db = netdev_priv(dev);
+ int int_status;
+ unsigned long flags;
+ u8 reg_save;
+
+ dm9000_dbg(db, 3, "entering %s\n", __func__);
+
+ /* A real interrupt coming */
+
+ /* holders of db->lock must always block IRQs */
+ spin_lock_irqsave(&db->lock, flags);
+
+ /* Save previous register address */
+ reg_save = readb(db->io_addr);
+
+ /* Disable all interrupts */
+ iow(db, DM9000_IMR, IMR_PAR);
+
+ /* Got DM9000 interrupt status */
+ int_status = ior(db, DM9000_ISR); /* Got ISR */
+ iow(db, DM9000_ISR, int_status); /* Clear ISR status */
+
+ if (netif_msg_intr(db))
+ dev_dbg(db->dev, "interrupt status %02x\n", int_status);
+
+ /* Received the coming packet */
+ if (int_status & ISR_PRS)
+ dm9000_rx(dev);
+
+ /* Trnasmit Interrupt check */
+ if (int_status & ISR_PTS)
+ dm9000_tx_done(dev, db);
+
+ if (db->type != TYPE_DM9000E) {
+ if (int_status & ISR_LNKCHNG) {
+ /* fire a link-change request */
+ schedule_delayed_work(&db->phy_poll, 1);
+ }
+ }
+
+ /* Re-enable interrupt mask */
+ iow(db, DM9000_IMR, db->imr_all);
+
+ /* Restore previous register address */
+ writeb(reg_save, db->io_addr);
+
+ spin_unlock_irqrestore(&db->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ board_info_t *db = netdev_priv(dev);
+ unsigned long flags;
+ unsigned nsr, wcr;
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ nsr = ior(db, DM9000_NSR);
+ wcr = ior(db, DM9000_WCR);
+
+ dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
+
+ if (nsr & NSR_WAKEST) {
+ /* clear, so we can avoid */
+ iow(db, DM9000_NSR, NSR_WAKEST);
+
+ if (wcr & WCR_LINKST)
+ dev_info(db->dev, "wake by link status change\n");
+ if (wcr & WCR_SAMPLEST)
+ dev_info(db->dev, "wake by sample packet\n");
+ if (wcr & WCR_MAGICST )
+ dev_info(db->dev, "wake by magic packet\n");
+ if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
+ dev_err(db->dev, "wake signalled with no reason? "
+ "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
+
+ }
+
+ spin_unlock_irqrestore(&db->lock, flags);
+
+ return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ *Used by netconsole
+ */
+static void dm9000_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ dm9000_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+/*
+ * Open the interface.
+ * The interface is opened whenever "ifconfig" actives it.
+ */
+static int
+dm9000_open(struct net_device *dev)
+{
+ board_info_t *db = netdev_priv(dev);
+ unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
+
+ if (netif_msg_ifup(db))
+ dev_dbg(db->dev, "enabling %s\n", dev->name);
+
+ /* If there is no IRQ type specified, default to something that
+ * may work, and tell the user that this is a problem */
+
+ if (irqflags == IRQF_TRIGGER_NONE)
+ dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
+
+ irqflags |= IRQF_SHARED;
+
+ /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
+ iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
+ mdelay(1); /* delay needs by DM9000B */
+
+ /* Initialize DM9000 board */
+ dm9000_reset(db);
+ dm9000_init_dm9000(dev);
+
+ if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
+ return -EAGAIN;
+
+ /* Init driver variable */
+ db->dbug_cnt = 0;
+
+ mii_check_media(&db->mii, netif_msg_link(db), 1);
+ netif_start_queue(dev);
+
+ dm9000_schedule_poll(db);
+
+ return 0;
+}
+
+/*
+ * Sleep, either by using msleep() or if we are suspending, then
+ * use mdelay() to sleep.
+ */
+static void dm9000_msleep(board_info_t *db, unsigned int ms)
+{
+ if (db->in_suspend)
+ mdelay(ms);
+ else
+ msleep(ms);
+}
+
+/*
+ * Read a word from phyxcer
+ */
+static int
+dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
+{
+ board_info_t *db = netdev_priv(dev);
+ unsigned long flags;
+ unsigned int reg_save;
+ int ret;
+
+ mutex_lock(&db->addr_lock);
+
+ spin_lock_irqsave(&db->lock,flags);
+
+ /* Save previous register address */
+ reg_save = readb(db->io_addr);
+
+ /* Fill the phyxcer register into REG_0C */
+ iow(db, DM9000_EPAR, DM9000_PHY | reg);
+
+ iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */
+
+ writeb(reg_save, db->io_addr);
+ spin_unlock_irqrestore(&db->lock,flags);
+
+ dm9000_msleep(db, 1); /* Wait read complete */
+
+ spin_lock_irqsave(&db->lock,flags);
+ reg_save = readb(db->io_addr);
+
+ iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
+
+ /* The read data keeps on REG_0D & REG_0E */
+ ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
+
+ /* restore the previous address */
+ writeb(reg_save, db->io_addr);
+ spin_unlock_irqrestore(&db->lock,flags);
+
+ mutex_unlock(&db->addr_lock);
+
+ dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
+ return ret;
+}
+
+/*
+ * Write a word to phyxcer
+ */
+static void
+dm9000_phy_write(struct net_device *dev,
+ int phyaddr_unused, int reg, int value)
+{
+ board_info_t *db = netdev_priv(dev);
+ unsigned long flags;
+ unsigned long reg_save;
+
+ dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
+ mutex_lock(&db->addr_lock);
+
+ spin_lock_irqsave(&db->lock,flags);
+
+ /* Save previous register address */
+ reg_save = readb(db->io_addr);
+
+ /* Fill the phyxcer register into REG_0C */
+ iow(db, DM9000_EPAR, DM9000_PHY | reg);
+
+ /* Fill the written data into REG_0D & REG_0E */
+ iow(db, DM9000_EPDRL, value);
+ iow(db, DM9000_EPDRH, value >> 8);
+
+ iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */
+
+ writeb(reg_save, db->io_addr);
+ spin_unlock_irqrestore(&db->lock, flags);
+
+ dm9000_msleep(db, 1); /* Wait write complete */
+
+ spin_lock_irqsave(&db->lock,flags);
+ reg_save = readb(db->io_addr);
+
+ iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
+
+ /* restore the previous address */
+ writeb(reg_save, db->io_addr);
+
+ spin_unlock_irqrestore(&db->lock, flags);
+ mutex_unlock(&db->addr_lock);
+}
+
+static void
+dm9000_shutdown(struct net_device *dev)
+{
+ board_info_t *db = netdev_priv(dev);
+
+ /* RESET device */
+ dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
+ iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */
+ iow(db, DM9000_IMR, IMR_PAR); /* Disable all interrupt */
+ iow(db, DM9000_RCR, 0x00); /* Disable RX */
+}
+
+/*
+ * Stop the interface.
+ * The interface is stopped when it is brought.
+ */
+static int
+dm9000_stop(struct net_device *ndev)
+{
+ board_info_t *db = netdev_priv(ndev);
+
+ if (netif_msg_ifdown(db))
+ dev_dbg(db->dev, "shutting down %s\n", ndev->name);
+
+ cancel_delayed_work_sync(&db->phy_poll);
+
+ netif_stop_queue(ndev);
+ netif_carrier_off(ndev);
+
+ /* free interrupt */
+ free_irq(ndev->irq, ndev);
+
+ dm9000_shutdown(ndev);
+
+ return 0;
+}
+
+static const struct net_device_ops dm9000_netdev_ops = {
+ .ndo_open = dm9000_open,
+ .ndo_stop = dm9000_stop,
+ .ndo_start_xmit = dm9000_start_xmit,
+ .ndo_tx_timeout = dm9000_timeout,
+ .ndo_set_rx_mode = dm9000_hash_table,
+ .ndo_do_ioctl = dm9000_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_features = dm9000_set_features,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = dm9000_poll_controller,
+#endif
+};
+
+/*
+ * Search DM9000 board, allocate space and register it
+ */
+static int __devinit
+dm9000_probe(struct platform_device *pdev)
+{
+ struct dm9000_plat_data *pdata = pdev->dev.platform_data;
+ struct board_info *db; /* Point a board information structure */
+ struct net_device *ndev;
+ const unsigned char *mac_src;
+ int ret = 0;
+ int iosize;
+ int i;
+ u32 id_val;
+
+ /* Init network device */
+ ndev = alloc_etherdev(sizeof(struct board_info));
+ if (!ndev) {
+ dev_err(&pdev->dev, "could not allocate device.\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ dev_dbg(&pdev->dev, "dm9000_probe()\n");
+
+ /* setup board info structure */
+ db = netdev_priv(ndev);
+
+ db->dev = &pdev->dev;
+ db->ndev = ndev;
+
+ spin_lock_init(&db->lock);
+ mutex_init(&db->addr_lock);
+
+ INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
+
+ db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ if (db->addr_res == NULL || db->data_res == NULL ||
+ db->irq_res == NULL) {
+ dev_err(db->dev, "insufficient resources\n");
+ ret = -ENOENT;
+ goto out;
+ }
+
+ db->irq_wake = platform_get_irq(pdev, 1);
+ if (db->irq_wake >= 0) {
+ dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
+
+ ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
+ IRQF_SHARED, dev_name(db->dev), ndev);
+ if (ret) {
+ dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
+ } else {
+
+ /* test to see if irq is really wakeup capable */
+ ret = irq_set_irq_wake(db->irq_wake, 1);
+ if (ret) {
+ dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
+ db->irq_wake, ret);
+ ret = 0;
+ } else {
+ irq_set_irq_wake(db->irq_wake, 0);
+ db->wake_supported = 1;
+ }
+ }
+ }
+
+ iosize = resource_size(db->addr_res);
+ db->addr_req = request_mem_region(db->addr_res->start, iosize,
+ pdev->name);
+
+ if (db->addr_req == NULL) {
+ dev_err(db->dev, "cannot claim address reg area\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ db->io_addr = ioremap(db->addr_res->start, iosize);
+
+ if (db->io_addr == NULL) {
+ dev_err(db->dev, "failed to ioremap address reg\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ iosize = resource_size(db->data_res);
+ db->data_req = request_mem_region(db->data_res->start, iosize,
+ pdev->name);
+
+ if (db->data_req == NULL) {
+ dev_err(db->dev, "cannot claim data reg area\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ db->io_data = ioremap(db->data_res->start, iosize);
+
+ if (db->io_data == NULL) {
+ dev_err(db->dev, "failed to ioremap data reg\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* fill in parameters for net-dev structure */
+ ndev->base_addr = (unsigned long)db->io_addr;
+ ndev->irq = db->irq_res->start;
+
+ /* ensure at least we have a default set of IO routines */
+ dm9000_set_io(db, iosize);
+
+ /* check to see if anything is being over-ridden */
+ if (pdata != NULL) {
+ /* check to see if the driver wants to over-ride the
+ * default IO width */
+
+ if (pdata->flags & DM9000_PLATF_8BITONLY)
+ dm9000_set_io(db, 1);
+
+ if (pdata->flags & DM9000_PLATF_16BITONLY)
+ dm9000_set_io(db, 2);
+
+ if (pdata->flags & DM9000_PLATF_32BITONLY)
+ dm9000_set_io(db, 4);
+
+ /* check to see if there are any IO routine
+ * over-rides */
+
+ if (pdata->inblk != NULL)
+ db->inblk = pdata->inblk;
+
+ if (pdata->outblk != NULL)
+ db->outblk = pdata->outblk;
+
+ if (pdata->dumpblk != NULL)
+ db->dumpblk = pdata->dumpblk;
+
+ db->flags = pdata->flags;
+ }
+
+#ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL
+ db->flags |= DM9000_PLATF_SIMPLE_PHY;
+#endif
+
+ dm9000_reset(db);
+
+ /* try multiple times, DM9000 sometimes gets the read wrong */
+ for (i = 0; i < 8; i++) {
+ id_val = ior(db, DM9000_VIDL);
+ id_val |= (u32)ior(db, DM9000_VIDH) << 8;
+ id_val |= (u32)ior(db, DM9000_PIDL) << 16;
+ id_val |= (u32)ior(db, DM9000_PIDH) << 24;
+
+ if (id_val == DM9000_ID)
+ break;
+ dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
+ }
+
+ if (id_val != DM9000_ID) {
+ dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* Identify what type of DM9000 we are working on */
+
+ id_val = ior(db, DM9000_CHIPR);
+ dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val);
+
+ switch (id_val) {
+ case CHIPR_DM9000A:
+ db->type = TYPE_DM9000A;
+ break;
+ case CHIPR_DM9000B:
+ db->type = TYPE_DM9000B;
+ break;
+ default:
+ dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val);
+ db->type = TYPE_DM9000E;
+ }
+
+ /* dm9000a/b are capable of hardware checksum offload */
+ if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
+ ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
+ ndev->features |= ndev->hw_features;
+ }
+
+ /* from this point we assume that we have found a DM9000 */
+
+ /* driver system function */
+ ether_setup(ndev);
+
+ ndev->netdev_ops = &dm9000_netdev_ops;
+ ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
+ ndev->ethtool_ops = &dm9000_ethtool_ops;
+
+ db->msg_enable = NETIF_MSG_LINK;
+ db->mii.phy_id_mask = 0x1f;
+ db->mii.reg_num_mask = 0x1f;
+ db->mii.force_media = 0;
+ db->mii.full_duplex = 0;
+ db->mii.dev = ndev;
+ db->mii.mdio_read = dm9000_phy_read;
+ db->mii.mdio_write = dm9000_phy_write;
+
+ mac_src = "eeprom";
+
+ /* try reading the node address from the attached EEPROM */
+ for (i = 0; i < 6; i += 2)
+ dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
+
+ if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
+ mac_src = "platform data";
+ memcpy(ndev->dev_addr, pdata->dev_addr, 6);
+ }
+
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ /* try reading from mac */
+
+ mac_src = "chip";
+ for (i = 0; i < 6; i++)
+ ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
+ }
+
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
+ "set using ifconfig\n", ndev->name);
+
+ random_ether_addr(ndev->dev_addr);
+ mac_src = "random";
+ }
+
+
+ platform_set_drvdata(pdev, ndev);
+ ret = register_netdev(ndev);
+
+ if (ret == 0)
+ printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
+ ndev->name, dm9000_type_to_char(db->type),
+ db->io_addr, db->io_data, ndev->irq,
+ ndev->dev_addr, mac_src);
+ return 0;
+
+out:
+ dev_err(db->dev, "not found (%d).\n", ret);
+
+ dm9000_release_board(pdev, db);
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int
+dm9000_drv_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ board_info_t *db;
+
+ if (ndev) {
+ db = netdev_priv(ndev);
+ db->in_suspend = 1;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ netif_device_detach(ndev);
+
+ /* only shutdown if not using WoL */
+ if (!db->wake_state)
+ dm9000_shutdown(ndev);
+ }
+ return 0;
+}
+
+static int
+dm9000_drv_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ board_info_t *db = netdev_priv(ndev);
+
+ if (ndev) {
+ if (netif_running(ndev)) {
+ /* reset if we were not in wake mode to ensure if
+ * the device was powered off it is in a known state */
+ if (!db->wake_state) {
+ dm9000_reset(db);
+ dm9000_init_dm9000(ndev);
+ }
+
+ netif_device_attach(ndev);
+ }
+
+ db->in_suspend = 0;
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops dm9000_drv_pm_ops = {
+ .suspend = dm9000_drv_suspend,
+ .resume = dm9000_drv_resume,
+};
+
+static int __devexit
+dm9000_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ unregister_netdev(ndev);
+ dm9000_release_board(pdev, netdev_priv(ndev));
+ free_netdev(ndev); /* free device structure */
+
+ dev_dbg(&pdev->dev, "released and freed device\n");
+ return 0;
+}
+
+static struct platform_driver dm9000_driver = {
+ .driver = {
+ .name = "dm9000",
+ .owner = THIS_MODULE,
+ .pm = &dm9000_drv_pm_ops,
+ },
+ .probe = dm9000_probe,
+ .remove = __devexit_p(dm9000_drv_remove),
+};
+
+static int __init
+dm9000_init(void)
+{
+ printk(KERN_INFO "%s Ethernet Driver, V%s\n", CARDNAME, DRV_VERSION);
+
+ return platform_driver_register(&dm9000_driver);
+}
+
+static void __exit
+dm9000_cleanup(void)
+{
+ platform_driver_unregister(&dm9000_driver);
+}
+
+module_init(dm9000_init);
+module_exit(dm9000_cleanup);
+
+MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
+MODULE_DESCRIPTION("Davicom DM9000 network driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:dm9000");
diff --git a/drivers/net/ethernet/davicom/dm9000.h b/drivers/net/ethernet/davicom/dm9000.h
new file mode 100644
index 000000000000..55688bd1a3ef
--- /dev/null
+++ b/drivers/net/ethernet/davicom/dm9000.h
@@ -0,0 +1,171 @@
+/*
+ * dm9000 Ethernet
+ */
+
+#ifndef _DM9000X_H_
+#define _DM9000X_H_
+
+#define DM9000_ID 0x90000A46
+
+/* although the registers are 16 bit, they are 32-bit aligned.
+ */
+
+#define DM9000_NCR 0x00
+#define DM9000_NSR 0x01
+#define DM9000_TCR 0x02
+#define DM9000_TSR1 0x03
+#define DM9000_TSR2 0x04
+#define DM9000_RCR 0x05
+#define DM9000_RSR 0x06
+#define DM9000_ROCR 0x07
+#define DM9000_BPTR 0x08
+#define DM9000_FCTR 0x09
+#define DM9000_FCR 0x0A
+#define DM9000_EPCR 0x0B
+#define DM9000_EPAR 0x0C
+#define DM9000_EPDRL 0x0D
+#define DM9000_EPDRH 0x0E
+#define DM9000_WCR 0x0F
+
+#define DM9000_PAR 0x10
+#define DM9000_MAR 0x16
+
+#define DM9000_GPCR 0x1e
+#define DM9000_GPR 0x1f
+#define DM9000_TRPAL 0x22
+#define DM9000_TRPAH 0x23
+#define DM9000_RWPAL 0x24
+#define DM9000_RWPAH 0x25
+
+#define DM9000_VIDL 0x28
+#define DM9000_VIDH 0x29
+#define DM9000_PIDL 0x2A
+#define DM9000_PIDH 0x2B
+
+#define DM9000_CHIPR 0x2C
+#define DM9000_SMCR 0x2F
+
+#define DM9000_ETXCSR 0x30
+#define DM9000_TCCR 0x31
+#define DM9000_RCSR 0x32
+
+#define CHIPR_DM9000A 0x19
+#define CHIPR_DM9000B 0x1A
+
+#define DM9000_MRCMDX 0xF0
+#define DM9000_MRCMD 0xF2
+#define DM9000_MRRL 0xF4
+#define DM9000_MRRH 0xF5
+#define DM9000_MWCMDX 0xF6
+#define DM9000_MWCMD 0xF8
+#define DM9000_MWRL 0xFA
+#define DM9000_MWRH 0xFB
+#define DM9000_TXPLL 0xFC
+#define DM9000_TXPLH 0xFD
+#define DM9000_ISR 0xFE
+#define DM9000_IMR 0xFF
+
+#define NCR_EXT_PHY (1<<7)
+#define NCR_WAKEEN (1<<6)
+#define NCR_FCOL (1<<4)
+#define NCR_FDX (1<<3)
+#define NCR_LBK (3<<1)
+#define NCR_RST (1<<0)
+
+#define NSR_SPEED (1<<7)
+#define NSR_LINKST (1<<6)
+#define NSR_WAKEST (1<<5)
+#define NSR_TX2END (1<<3)
+#define NSR_TX1END (1<<2)
+#define NSR_RXOV (1<<1)
+
+#define TCR_TJDIS (1<<6)
+#define TCR_EXCECM (1<<5)
+#define TCR_PAD_DIS2 (1<<4)
+#define TCR_CRC_DIS2 (1<<3)
+#define TCR_PAD_DIS1 (1<<2)
+#define TCR_CRC_DIS1 (1<<1)
+#define TCR_TXREQ (1<<0)
+
+#define TSR_TJTO (1<<7)
+#define TSR_LC (1<<6)
+#define TSR_NC (1<<5)
+#define TSR_LCOL (1<<4)
+#define TSR_COL (1<<3)
+#define TSR_EC (1<<2)
+
+#define RCR_WTDIS (1<<6)
+#define RCR_DIS_LONG (1<<5)
+#define RCR_DIS_CRC (1<<4)
+#define RCR_ALL (1<<3)
+#define RCR_RUNT (1<<2)
+#define RCR_PRMSC (1<<1)
+#define RCR_RXEN (1<<0)
+
+#define RSR_RF (1<<7)
+#define RSR_MF (1<<6)
+#define RSR_LCS (1<<5)
+#define RSR_RWTO (1<<4)
+#define RSR_PLE (1<<3)
+#define RSR_AE (1<<2)
+#define RSR_CE (1<<1)
+#define RSR_FOE (1<<0)
+
+#define WCR_LINKEN (1 << 5)
+#define WCR_SAMPLEEN (1 << 4)
+#define WCR_MAGICEN (1 << 3)
+#define WCR_LINKST (1 << 2)
+#define WCR_SAMPLEST (1 << 1)
+#define WCR_MAGICST (1 << 0)
+
+#define FCTR_HWOT(ot) (( ot & 0xf ) << 4 )
+#define FCTR_LWOT(ot) ( ot & 0xf )
+
+#define IMR_PAR (1<<7)
+#define IMR_ROOM (1<<3)
+#define IMR_ROM (1<<2)
+#define IMR_PTM (1<<1)
+#define IMR_PRM (1<<0)
+
+#define ISR_ROOS (1<<3)
+#define ISR_ROS (1<<2)
+#define ISR_PTS (1<<1)
+#define ISR_PRS (1<<0)
+#define ISR_CLR_STATUS (ISR_ROOS | ISR_ROS | ISR_PTS | ISR_PRS)
+
+#define EPCR_REEP (1<<5)
+#define EPCR_WEP (1<<4)
+#define EPCR_EPOS (1<<3)
+#define EPCR_ERPRR (1<<2)
+#define EPCR_ERPRW (1<<1)
+#define EPCR_ERRE (1<<0)
+
+#define GPCR_GEP_CNTL (1<<0)
+
+#define TCCR_IP (1<<0)
+#define TCCR_TCP (1<<1)
+#define TCCR_UDP (1<<2)
+
+#define RCSR_UDP_BAD (1<<7)
+#define RCSR_TCP_BAD (1<<6)
+#define RCSR_IP_BAD (1<<5)
+#define RCSR_UDP (1<<4)
+#define RCSR_TCP (1<<3)
+#define RCSR_IP (1<<2)
+#define RCSR_CSUM (1<<1)
+#define RCSR_DISCARD (1<<0)
+
+#define DM9000_PKT_RDY 0x01 /* Packet ready to receive */
+#define DM9000_PKT_ERR 0x02
+#define DM9000_PKT_MAX 1536 /* Received packet max size */
+
+/* DM9000A / DM9000B definitions */
+
+#define IMR_LNKCHNG (1<<5)
+#define IMR_UNDERRUN (1<<4)
+
+#define ISR_LNKCHNG (1<<5)
+#define ISR_UNDERRUN (1<<4)
+
+#endif /* _DM9000X_H_ */
+
diff --git a/drivers/net/ethernet/dec/Kconfig b/drivers/net/ethernet/dec/Kconfig
new file mode 100644
index 000000000000..37940279ded8
--- /dev/null
+++ b/drivers/net/ethernet/dec/Kconfig
@@ -0,0 +1,37 @@
+#
+# Digital Equipment Inc network device configuration
+#
+
+config NET_VENDOR_DEC
+ bool "Digital Equipment devices"
+ default y
+ depends on PCI || EISA || CARDBUS
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about DEC cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_DEC
+
+config EWRK3
+ tristate "EtherWORKS 3 (DE203, DE204, DE205) support"
+ depends on ISA
+ select CRC32
+ ---help---
+ This driver supports the DE203, DE204 and DE205 network (Ethernet)
+ cards. If this is for you, say Y and read
+ <file:Documentation/networking/ewrk3.txt> in the kernel source as
+ well as the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ewrk3.
+
+source "drivers/net/ethernet/dec/tulip/Kconfig"
+
+endif # NET_VENDOR_DEC
diff --git a/drivers/net/ethernet/dec/Makefile b/drivers/net/ethernet/dec/Makefile
new file mode 100644
index 000000000000..1b01ed8d42c8
--- /dev/null
+++ b/drivers/net/ethernet/dec/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Digital Equipment Inc. network device drivers.
+#
+
+obj-$(CONFIG_EWRK3) += ewrk3.o
+obj-$(CONFIG_NET_TULIP) += tulip/
diff --git a/drivers/net/ethernet/dec/ewrk3.c b/drivers/net/ethernet/dec/ewrk3.c
new file mode 100644
index 000000000000..f9df5e4d0341
--- /dev/null
+++ b/drivers/net/ethernet/dec/ewrk3.c
@@ -0,0 +1,1959 @@
+/* ewrk3.c: A DIGITAL EtherWORKS 3 ethernet driver for Linux.
+
+ Written 1994 by David C. Davies.
+
+ Copyright 1994 Digital Equipment Corporation.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License, incorporated herein by reference.
+
+ This driver is written for the Digital Equipment Corporation series
+ of EtherWORKS ethernet cards:
+
+ DE203 Turbo (BNC)
+ DE204 Turbo (TP)
+ DE205 Turbo (TP BNC)
+
+ The driver has been tested on a relatively busy network using the DE205
+ card and benchmarked with 'ttcp': it transferred 16M of data at 975kB/s
+ (7.8Mb/s) to a DECstation 5000/200.
+
+ The author may be reached at davies@maniac.ultranet.com.
+
+ =========================================================================
+ This driver has been written substantially from scratch, although its
+ inheritance of style and stack interface from 'depca.c' and in turn from
+ Donald Becker's 'lance.c' should be obvious.
+
+ The DE203/4/5 boards all use a new proprietary chip in place of the
+ LANCE chip used in prior cards (DEPCA, DE100, DE200/1/2, DE210, DE422).
+ Use the depca.c driver in the standard distribution for the LANCE based
+ cards from DIGITAL; this driver will not work with them.
+
+ The DE203/4/5 cards have 2 main modes: shared memory and I/O only. I/O
+ only makes all the card accesses through I/O transactions and no high
+ (shared) memory is used. This mode provides a >48% performance penalty
+ and is deprecated in this driver, although allowed to provide initial
+ setup when hardstrapped.
+
+ The shared memory mode comes in 3 flavours: 2kB, 32kB and 64kB. There is
+ no point in using any mode other than the 2kB mode - their performances
+ are virtually identical, although the driver has been tested in the 2kB
+ and 32kB modes. I would suggest you uncomment the line:
+
+ FORCE_2K_MODE;
+
+ to allow the driver to configure the card as a 2kB card at your current
+ base address, thus leaving more room to clutter your system box with
+ other memory hungry boards.
+
+ As many ISA and EISA cards can be supported under this driver as you
+ wish, limited primarily by the available IRQ lines, rather than by the
+ available I/O addresses (24 ISA, 16 EISA). I have checked different
+ configurations of multiple depca cards and ewrk3 cards and have not
+ found a problem yet (provided you have at least depca.c v0.38) ...
+
+ The board IRQ setting must be at an unused IRQ which is auto-probed
+ using Donald Becker's autoprobe routines. All these cards are at
+ {5,10,11,15}.
+
+ No 16MB memory limitation should exist with this driver as DMA is not
+ used and the common memory area is in low memory on the network card (my
+ current system has 20MB and I've not had problems yet).
+
+ The ability to load this driver as a loadable module has been included
+ and used extensively during the driver development (to save those long
+ reboot sequences). To utilise this ability, you have to do 8 things:
+
+ 0) have a copy of the loadable modules code installed on your system.
+ 1) copy ewrk3.c from the /linux/drivers/net directory to your favourite
+ temporary directory.
+ 2) edit the source code near line 1898 to reflect the I/O address and
+ IRQ you're using.
+ 3) compile ewrk3.c, but include -DMODULE in the command line to ensure
+ that the correct bits are compiled (see end of source code).
+ 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
+ kernel with the ewrk3 configuration turned off and reboot.
+ 5) insmod ewrk3.o
+ [Alan Cox: Changed this so you can insmod ewrk3.o irq=x io=y]
+ [Adam Kropelin: now accepts irq=x1,x2 io=y1,y2 for multiple cards]
+ 6) run the net startup bits for your new eth?? interface manually
+ (usually /etc/rc.inet[12] at boot time).
+ 7) enjoy!
+
+ Note that autoprobing is not allowed in loadable modules - the system is
+ already up and running and you're messing with interrupts.
+
+ To unload a module, turn off the associated interface
+ 'ifconfig eth?? down' then 'rmmod ewrk3'.
+
+ Promiscuous mode has been turned off in this driver, but all the
+ multicast address bits have been turned on. This improved the send
+ performance on a busy network by about 13%.
+
+ Ioctl's have now been provided (primarily because I wanted to grab some
+ packet size statistics). They are patterned after 'plipconfig.c' from a
+ suggestion by Alan Cox. Using these ioctls, you can enable promiscuous
+ mode, add/delete multicast addresses, change the hardware address, get
+ packet size distribution statistics and muck around with the control and
+ status register. I'll add others if and when the need arises.
+
+ TO DO:
+ ------
+
+
+ Revision History
+ ----------------
+
+ Version Date Description
+
+ 0.1 26-aug-94 Initial writing. ALPHA code release.
+ 0.11 31-aug-94 Fixed: 2k mode memory base calc.,
+ LeMAC version calc.,
+ IRQ vector assignments during autoprobe.
+ 0.12 31-aug-94 Tested working on LeMAC2 (DE20[345]-AC) card.
+ Fixed up MCA hash table algorithm.
+ 0.20 4-sep-94 Added IOCTL functionality.
+ 0.21 14-sep-94 Added I/O mode.
+ 0.21axp 15-sep-94 Special version for ALPHA AXP Linux V1.0.
+ 0.22 16-sep-94 Added more IOCTLs & tidied up.
+ 0.23 21-sep-94 Added transmit cut through.
+ 0.24 31-oct-94 Added uid checks in some ioctls.
+ 0.30 1-nov-94 BETA code release.
+ 0.31 5-dec-94 Added check/allocate region code.
+ 0.32 16-jan-95 Broadcast packet fix.
+ 0.33 10-Feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>.
+ 0.40 27-Dec-95 Rationalise MODULE and autoprobe code.
+ Rewrite for portability & updated.
+ ALPHA support from <jestabro@amt.tay1.dec.com>
+ Added verify_area() calls in ewrk3_ioctl() from
+ suggestion by <heiko@colossus.escape.de>.
+ Add new multicasting code.
+ 0.41 20-Jan-96 Fix IRQ set up problem reported by
+ <kenneth@bbs.sas.ntu.ac.sg>.
+ 0.42 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi>
+ 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c
+ 0.44 08-Nov-01 use library crc32 functions <Matt_Domsch@dell.com>
+ 0.45 19-Jul-02 fix unaligned access on alpha <martin@bruli.net>
+ 0.46 10-Oct-02 Multiple NIC support when module <akropel1@rochester.rr.com>
+ 0.47 18-Oct-02 ethtool support <akropel1@rochester.rr.com>
+ 0.48 18-Oct-02 cli/sti removal for 2.5 <vda@port.imtp.ilyichevsk.odessa.ua>
+ ioctl locking, signature search cleanup <akropel1@rochester.rr.com>
+
+ =========================================================================
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+#include <linux/ctype.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/uaccess.h>
+
+#include "ewrk3.h"
+
+#define DRV_NAME "ewrk3"
+#define DRV_VERSION "0.48"
+
+static char version[] __initdata =
+DRV_NAME ":v" DRV_VERSION " 2002/10/18 davies@maniac.ultranet.com\n";
+
+#ifdef EWRK3_DEBUG
+static int ewrk3_debug = EWRK3_DEBUG;
+#else
+static int ewrk3_debug = 1;
+#endif
+
+#define EWRK3_NDA 0xffe0 /* No Device Address */
+
+#define PROBE_LENGTH 32
+#define ETH_PROM_SIG 0xAA5500FFUL
+
+#ifndef EWRK3_SIGNATURE
+#define EWRK3_SIGNATURE {"DE203","DE204","DE205",""}
+#define EWRK3_STRLEN 8
+#endif
+
+#ifndef EWRK3_RAM_BASE_ADDRESSES
+#define EWRK3_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0x00000}
+#endif
+
+/*
+ ** Sets up the I/O area for the autoprobe.
+ */
+#define EWRK3_IO_BASE 0x100 /* Start address for probe search */
+#define EWRK3_IOP_INC 0x20 /* I/O address increment */
+#define EWRK3_TOTAL_SIZE 0x20 /* required I/O address length */
+
+#ifndef MAX_NUM_EWRK3S
+#define MAX_NUM_EWRK3S 21
+#endif
+
+#ifndef EWRK3_EISA_IO_PORTS
+#define EWRK3_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
+#endif
+
+#ifndef MAX_EISA_SLOTS
+#define MAX_EISA_SLOTS 16
+#define EISA_SLOT_INC 0x1000
+#endif
+
+#define QUEUE_PKT_TIMEOUT (1*HZ) /* Jiffies */
+
+/*
+ ** EtherWORKS 3 shared memory window sizes
+ */
+#define IO_ONLY 0x00
+#define SHMEM_2K 0x800
+#define SHMEM_32K 0x8000
+#define SHMEM_64K 0x10000
+
+/*
+ ** EtherWORKS 3 IRQ ENABLE/DISABLE
+ */
+#define ENABLE_IRQs { \
+ icr |= lp->irq_mask;\
+ outb(icr, EWRK3_ICR); /* Enable the IRQs */\
+}
+
+#define DISABLE_IRQs { \
+ icr = inb(EWRK3_ICR);\
+ icr &= ~lp->irq_mask;\
+ outb(icr, EWRK3_ICR); /* Disable the IRQs */\
+}
+
+/*
+ ** EtherWORKS 3 START/STOP
+ */
+#define START_EWRK3 { \
+ csr = inb(EWRK3_CSR);\
+ csr &= ~(CSR_TXD|CSR_RXD);\
+ outb(csr, EWRK3_CSR); /* Enable the TX and/or RX */\
+}
+
+#define STOP_EWRK3 { \
+ csr = (CSR_TXD|CSR_RXD);\
+ outb(csr, EWRK3_CSR); /* Disable the TX and/or RX */\
+}
+
+/*
+ ** The EtherWORKS 3 private structure
+ */
+#define EWRK3_PKT_STAT_SZ 16
+#define EWRK3_PKT_BIN_SZ 128 /* Should be >=100 unless you
+ increase EWRK3_PKT_STAT_SZ */
+
+struct ewrk3_stats {
+ u32 bins[EWRK3_PKT_STAT_SZ];
+ u32 unicast;
+ u32 multicast;
+ u32 broadcast;
+ u32 excessive_collisions;
+ u32 tx_underruns;
+ u32 excessive_underruns;
+};
+
+struct ewrk3_private {
+ char adapter_name[80]; /* Name exported to /proc/ioports */
+ u_long shmem_base; /* Shared memory start address */
+ void __iomem *shmem;
+ u_long shmem_length; /* Shared memory window length */
+ struct ewrk3_stats pktStats; /* Private stats counters */
+ u_char irq_mask; /* Adapter IRQ mask bits */
+ u_char mPage; /* Maximum 2kB Page number */
+ u_char lemac; /* Chip rev. level */
+ u_char hard_strapped; /* Don't allow a full open */
+ u_char txc; /* Transmit cut through */
+ void __iomem *mctbl; /* Pointer to the multicast table */
+ u_char led_mask; /* Used to reserve LED access for ethtool */
+ spinlock_t hw_lock;
+};
+
+/*
+ ** Force the EtherWORKS 3 card to be in 2kB MODE
+ */
+#define FORCE_2K_MODE { \
+ shmem_length = SHMEM_2K;\
+ outb(((mem_start - 0x80000) >> 11), EWRK3_MBR);\
+}
+
+/*
+ ** Public Functions
+ */
+static int ewrk3_open(struct net_device *dev);
+static netdev_tx_t ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t ewrk3_interrupt(int irq, void *dev_id);
+static int ewrk3_close(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static const struct ethtool_ops ethtool_ops_203;
+static const struct ethtool_ops ethtool_ops;
+
+/*
+ ** Private functions
+ */
+static int ewrk3_hw_init(struct net_device *dev, u_long iobase);
+static void ewrk3_init(struct net_device *dev);
+static int ewrk3_rx(struct net_device *dev);
+static int ewrk3_tx(struct net_device *dev);
+static void ewrk3_timeout(struct net_device *dev);
+
+static void EthwrkSignature(char *name, char *eeprom_image);
+static int DevicePresent(u_long iobase);
+static void SetMulticastFilter(struct net_device *dev);
+static int EISA_signature(char *name, s32 eisa_id);
+
+static int Read_EEPROM(u_long iobase, u_char eaddr);
+static int Write_EEPROM(short data, u_long iobase, u_char eaddr);
+static u_char get_hw_addr(struct net_device *dev, u_char * eeprom_image, char chipType);
+
+static int ewrk3_probe1(struct net_device *dev, u_long iobase, int irq);
+static int isa_probe(struct net_device *dev, u_long iobase);
+static int eisa_probe(struct net_device *dev, u_long iobase);
+
+static u_char irq[MAX_NUM_EWRK3S+1] = {5, 0, 10, 3, 11, 9, 15, 12};
+
+static char name[EWRK3_STRLEN + 1];
+static int num_ewrks3s;
+
+/*
+ ** Miscellaneous defines...
+ */
+#define INIT_EWRK3 {\
+ outb(EEPROM_INIT, EWRK3_IOPR);\
+ mdelay(1);\
+}
+
+#ifndef MODULE
+struct net_device * __init ewrk3_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct ewrk3_private));
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ err = ewrk3_probe1(dev, dev->base_addr, dev->irq);
+ if (err)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+
+}
+#endif
+
+static int __init ewrk3_probe1(struct net_device *dev, u_long iobase, int irq)
+{
+ int err;
+
+ dev->base_addr = iobase;
+ dev->irq = irq;
+
+ /* Address PROM pattern */
+ err = isa_probe(dev, iobase);
+ if (err != 0)
+ err = eisa_probe(dev, iobase);
+
+ if (err)
+ return err;
+
+ err = register_netdev(dev);
+ if (err)
+ release_region(dev->base_addr, EWRK3_TOTAL_SIZE);
+
+ return err;
+}
+
+static const struct net_device_ops ewrk3_netdev_ops = {
+ .ndo_open = ewrk3_open,
+ .ndo_start_xmit = ewrk3_queue_pkt,
+ .ndo_stop = ewrk3_close,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_do_ioctl = ewrk3_ioctl,
+ .ndo_tx_timeout = ewrk3_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __init
+ewrk3_hw_init(struct net_device *dev, u_long iobase)
+{
+ struct ewrk3_private *lp;
+ int i, status = 0;
+ u_long mem_start, shmem_length;
+ u_char cr, cmr, icr, nicsr, lemac, hard_strapped = 0;
+ u_char eeprom_image[EEPROM_MAX], chksum, eisa_cr = 0;
+
+ /*
+ ** Stop the EWRK3. Enable the DBR ROM. Disable interrupts and remote boot.
+ ** This also disables the EISA_ENABLE bit in the EISA Control Register.
+ */
+ if (iobase > 0x400)
+ eisa_cr = inb(EISA_CR);
+ INIT_EWRK3;
+
+ nicsr = inb(EWRK3_CSR);
+
+ icr = inb(EWRK3_ICR);
+ icr &= 0x70;
+ outb(icr, EWRK3_ICR); /* Disable all the IRQs */
+
+ if (nicsr != (CSR_TXD | CSR_RXD))
+ return -ENXIO;
+
+ /* Check that the EEPROM is alive and well and not living on Pluto... */
+ for (chksum = 0, i = 0; i < EEPROM_MAX; i += 2) {
+ union {
+ short val;
+ char c[2];
+ } tmp;
+
+ tmp.val = (short) Read_EEPROM(iobase, (i >> 1));
+ eeprom_image[i] = tmp.c[0];
+ eeprom_image[i + 1] = tmp.c[1];
+ chksum += eeprom_image[i] + eeprom_image[i + 1];
+ }
+
+ if (chksum != 0) { /* Bad EEPROM Data! */
+ printk("%s: Device has a bad on-board EEPROM.\n", dev->name);
+ return -ENXIO;
+ }
+
+ EthwrkSignature(name, eeprom_image);
+ if (*name == '\0')
+ return -ENXIO;
+
+ dev->base_addr = iobase;
+
+ if (iobase > 0x400) {
+ outb(eisa_cr, EISA_CR); /* Rewrite the EISA CR */
+ }
+ lemac = eeprom_image[EEPROM_CHIPVER];
+ cmr = inb(EWRK3_CMR);
+
+ if (((lemac == LeMAC) && ((cmr & CMR_NO_EEPROM) != CMR_NO_EEPROM)) ||
+ ((lemac == LeMAC2) && !(cmr & CMR_HS))) {
+ printk("%s: %s at %#4lx", dev->name, name, iobase);
+ hard_strapped = 1;
+ } else if ((iobase & 0x0fff) == EWRK3_EISA_IO_PORTS) {
+ /* EISA slot address */
+ printk("%s: %s at %#4lx (EISA slot %ld)",
+ dev->name, name, iobase, ((iobase >> 12) & 0x0f));
+ } else { /* ISA port address */
+ printk("%s: %s at %#4lx", dev->name, name, iobase);
+ }
+
+ printk(", h/w address ");
+ if (lemac != LeMAC2)
+ DevicePresent(iobase); /* need after EWRK3_INIT */
+ status = get_hw_addr(dev, eeprom_image, lemac);
+ printk("%pM\n", dev->dev_addr);
+
+ if (status) {
+ printk(" which has an EEPROM CRC error.\n");
+ return -ENXIO;
+ }
+
+ if (lemac == LeMAC2) { /* Special LeMAC2 CMR things */
+ cmr &= ~(CMR_RA | CMR_WB | CMR_LINK | CMR_POLARITY | CMR_0WS);
+ if (eeprom_image[EEPROM_MISC0] & READ_AHEAD)
+ cmr |= CMR_RA;
+ if (eeprom_image[EEPROM_MISC0] & WRITE_BEHIND)
+ cmr |= CMR_WB;
+ if (eeprom_image[EEPROM_NETMAN0] & NETMAN_POL)
+ cmr |= CMR_POLARITY;
+ if (eeprom_image[EEPROM_NETMAN0] & NETMAN_LINK)
+ cmr |= CMR_LINK;
+ if (eeprom_image[EEPROM_MISC0] & _0WS_ENA)
+ cmr |= CMR_0WS;
+ }
+ if (eeprom_image[EEPROM_SETUP] & SETUP_DRAM)
+ cmr |= CMR_DRAM;
+ outb(cmr, EWRK3_CMR);
+
+ cr = inb(EWRK3_CR); /* Set up the Control Register */
+ cr |= eeprom_image[EEPROM_SETUP] & SETUP_APD;
+ if (cr & SETUP_APD)
+ cr |= eeprom_image[EEPROM_SETUP] & SETUP_PS;
+ cr |= eeprom_image[EEPROM_MISC0] & FAST_BUS;
+ cr |= eeprom_image[EEPROM_MISC0] & ENA_16;
+ outb(cr, EWRK3_CR);
+
+ /*
+ ** Determine the base address and window length for the EWRK3
+ ** RAM from the memory base register.
+ */
+ mem_start = inb(EWRK3_MBR);
+ shmem_length = 0;
+ if (mem_start != 0) {
+ if ((mem_start >= 0x0a) && (mem_start <= 0x0f)) {
+ mem_start *= SHMEM_64K;
+ shmem_length = SHMEM_64K;
+ } else if ((mem_start >= 0x14) && (mem_start <= 0x1f)) {
+ mem_start *= SHMEM_32K;
+ shmem_length = SHMEM_32K;
+ } else if ((mem_start >= 0x40) && (mem_start <= 0xff)) {
+ mem_start = mem_start * SHMEM_2K + 0x80000;
+ shmem_length = SHMEM_2K;
+ } else {
+ return -ENXIO;
+ }
+ }
+ /*
+ ** See the top of this source code for comments about
+ ** uncommenting this line.
+ */
+/* FORCE_2K_MODE; */
+
+ if (hard_strapped) {
+ printk(" is hard strapped.\n");
+ } else if (mem_start) {
+ printk(" has a %dk RAM window", (int) (shmem_length >> 10));
+ printk(" at 0x%.5lx", mem_start);
+ } else {
+ printk(" is in I/O only mode");
+ }
+
+ lp = netdev_priv(dev);
+ lp->shmem_base = mem_start;
+ lp->shmem = ioremap(mem_start, shmem_length);
+ if (!lp->shmem)
+ return -ENOMEM;
+ lp->shmem_length = shmem_length;
+ lp->lemac = lemac;
+ lp->hard_strapped = hard_strapped;
+ lp->led_mask = CR_LED;
+ spin_lock_init(&lp->hw_lock);
+
+ lp->mPage = 64;
+ if (cmr & CMR_DRAM)
+ lp->mPage <<= 1; /* 2 DRAMS on module */
+
+ sprintf(lp->adapter_name, "%s (%s)", name, dev->name);
+
+ lp->irq_mask = ICR_TNEM | ICR_TXDM | ICR_RNEM | ICR_RXDM;
+
+ if (!hard_strapped) {
+ /*
+ ** Enable EWRK3 board interrupts for autoprobing
+ */
+ icr |= ICR_IE; /* Enable interrupts */
+ outb(icr, EWRK3_ICR);
+
+ /* The DMA channel may be passed in on this parameter. */
+ dev->dma = 0;
+
+ /* To auto-IRQ we enable the initialization-done and DMA err,
+ interrupts. For now we will always get a DMA error. */
+ if (dev->irq < 2) {
+#ifndef MODULE
+ u_char irqnum;
+ unsigned long irq_mask;
+
+
+ irq_mask = probe_irq_on();
+
+ /*
+ ** Trigger a TNE interrupt.
+ */
+ icr |= ICR_TNEM;
+ outb(1, EWRK3_TDQ); /* Write to the TX done queue */
+ outb(icr, EWRK3_ICR); /* Unmask the TXD interrupt */
+
+ irqnum = irq[((icr & IRQ_SEL) >> 4)];
+
+ mdelay(20);
+ dev->irq = probe_irq_off(irq_mask);
+ if ((dev->irq) && (irqnum == dev->irq)) {
+ printk(" and uses IRQ%d.\n", dev->irq);
+ } else {
+ if (!dev->irq) {
+ printk(" and failed to detect IRQ line.\n");
+ } else if ((irqnum == 1) && (lemac == LeMAC2)) {
+ printk(" and an illegal IRQ line detected.\n");
+ } else {
+ printk(", but incorrect IRQ line detected.\n");
+ }
+ iounmap(lp->shmem);
+ return -ENXIO;
+ }
+
+ DISABLE_IRQs; /* Mask all interrupts */
+
+#endif /* MODULE */
+ } else {
+ printk(" and requires IRQ%d.\n", dev->irq);
+ }
+ }
+
+ if (ewrk3_debug > 1) {
+ printk(version);
+ }
+ /* The EWRK3-specific entries in the device structure. */
+ dev->netdev_ops = &ewrk3_netdev_ops;
+ if (lp->adapter_name[4] == '3')
+ SET_ETHTOOL_OPS(dev, &ethtool_ops_203);
+ else
+ SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->watchdog_timeo = QUEUE_PKT_TIMEOUT;
+
+ dev->mem_start = 0;
+
+ return 0;
+}
+
+
+static int ewrk3_open(struct net_device *dev)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int status = 0;
+ u_char icr, csr;
+
+ /*
+ ** Stop the TX and RX...
+ */
+ STOP_EWRK3;
+
+ if (!lp->hard_strapped) {
+ if (request_irq(dev->irq, (void *) ewrk3_interrupt, 0, "ewrk3", dev)) {
+ printk("ewrk3_open(): Requested IRQ%d is busy\n", dev->irq);
+ status = -EAGAIN;
+ } else {
+
+ /*
+ ** Re-initialize the EWRK3...
+ */
+ ewrk3_init(dev);
+
+ if (ewrk3_debug > 1) {
+ printk("%s: ewrk3 open with irq %d\n", dev->name, dev->irq);
+ printk(" physical address: %pM\n", dev->dev_addr);
+ if (lp->shmem_length == 0) {
+ printk(" no shared memory, I/O only mode\n");
+ } else {
+ printk(" start of shared memory: 0x%08lx\n", lp->shmem_base);
+ printk(" window length: 0x%04lx\n", lp->shmem_length);
+ }
+ printk(" # of DRAMS: %d\n", ((inb(EWRK3_CMR) & 0x02) ? 2 : 1));
+ printk(" csr: 0x%02x\n", inb(EWRK3_CSR));
+ printk(" cr: 0x%02x\n", inb(EWRK3_CR));
+ printk(" icr: 0x%02x\n", inb(EWRK3_ICR));
+ printk(" cmr: 0x%02x\n", inb(EWRK3_CMR));
+ printk(" fmqc: 0x%02x\n", inb(EWRK3_FMQC));
+ }
+ netif_start_queue(dev);
+ /*
+ ** Unmask EWRK3 board interrupts
+ */
+ icr = inb(EWRK3_ICR);
+ ENABLE_IRQs;
+
+ }
+ } else {
+ printk(KERN_ERR "%s: ewrk3 available for hard strapped set up only.\n", dev->name);
+ printk(KERN_ERR " Run the 'ewrk3setup' utility or remove the hard straps.\n");
+ return -EINVAL;
+ }
+
+ return status;
+}
+
+/*
+ ** Initialize the EtherWORKS 3 operating conditions
+ */
+static void ewrk3_init(struct net_device *dev)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ u_char csr, page;
+ u_long iobase = dev->base_addr;
+ int i;
+
+ /*
+ ** Enable any multicasts
+ */
+ set_multicast_list(dev);
+
+ /*
+ ** Set hardware MAC address. Address is initialized from the EEPROM
+ ** during startup but may have since been changed by the user.
+ */
+ for (i=0; i<ETH_ALEN; i++)
+ outb(dev->dev_addr[i], EWRK3_PAR0 + i);
+
+ /*
+ ** Clean out any remaining entries in all the queues here
+ */
+ while (inb(EWRK3_TQ));
+ while (inb(EWRK3_TDQ));
+ while (inb(EWRK3_RQ));
+ while (inb(EWRK3_FMQ));
+
+ /*
+ ** Write a clean free memory queue
+ */
+ for (page = 1; page < lp->mPage; page++) { /* Write the free page numbers */
+ outb(page, EWRK3_FMQ); /* to the Free Memory Queue */
+ }
+
+ START_EWRK3; /* Enable the TX and/or RX */
+}
+
+/*
+ * Transmit timeout
+ */
+
+static void ewrk3_timeout(struct net_device *dev)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ u_char icr, csr;
+ u_long iobase = dev->base_addr;
+
+ if (!lp->hard_strapped)
+ {
+ printk(KERN_WARNING"%s: transmit timed/locked out, status %04x, resetting.\n",
+ dev->name, inb(EWRK3_CSR));
+
+ /*
+ ** Mask all board interrupts
+ */
+ DISABLE_IRQs;
+
+ /*
+ ** Stop the TX and RX...
+ */
+ STOP_EWRK3;
+
+ ewrk3_init(dev);
+
+ /*
+ ** Unmask EWRK3 board interrupts
+ */
+ ENABLE_IRQs;
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+ }
+}
+
+/*
+ ** Writes a socket buffer to the free page queue
+ */
+static netdev_tx_t ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ void __iomem *buf = NULL;
+ u_char icr;
+ u_char page;
+
+ spin_lock_irq (&lp->hw_lock);
+ DISABLE_IRQs;
+
+ /* if no resources available, exit, request packet be queued */
+ if (inb (EWRK3_FMQC) == 0) {
+ printk (KERN_WARNING "%s: ewrk3_queue_pkt(): No free resources...\n",
+ dev->name);
+ printk (KERN_WARNING "%s: ewrk3_queue_pkt(): CSR: %02x ICR: %02x FMQC: %02x\n",
+ dev->name, inb (EWRK3_CSR), inb (EWRK3_ICR),
+ inb (EWRK3_FMQC));
+ goto err_out;
+ }
+
+ /*
+ ** Get a free page from the FMQ
+ */
+ if ((page = inb (EWRK3_FMQ)) >= lp->mPage) {
+ printk ("ewrk3_queue_pkt(): Invalid free memory page (%d).\n",
+ (u_char) page);
+ goto err_out;
+ }
+
+
+ /*
+ ** Set up shared memory window and pointer into the window
+ */
+ if (lp->shmem_length == IO_ONLY) {
+ outb (page, EWRK3_IOPR);
+ } else if (lp->shmem_length == SHMEM_2K) {
+ buf = lp->shmem;
+ outb (page, EWRK3_MPR);
+ } else if (lp->shmem_length == SHMEM_32K) {
+ buf = (((short) page << 11) & 0x7800) + lp->shmem;
+ outb ((page >> 4), EWRK3_MPR);
+ } else if (lp->shmem_length == SHMEM_64K) {
+ buf = (((short) page << 11) & 0xf800) + lp->shmem;
+ outb ((page >> 5), EWRK3_MPR);
+ } else {
+ printk (KERN_ERR "%s: Oops - your private data area is hosed!\n",
+ dev->name);
+ BUG ();
+ }
+
+ /*
+ ** Set up the buffer control structures and copy the data from
+ ** the socket buffer to the shared memory .
+ */
+ if (lp->shmem_length == IO_ONLY) {
+ int i;
+ u_char *p = skb->data;
+ outb ((char) (TCR_QMODE | TCR_PAD | TCR_IFC), EWRK3_DATA);
+ outb ((char) (skb->len & 0xff), EWRK3_DATA);
+ outb ((char) ((skb->len >> 8) & 0xff), EWRK3_DATA);
+ outb ((char) 0x04, EWRK3_DATA);
+ for (i = 0; i < skb->len; i++) {
+ outb (*p++, EWRK3_DATA);
+ }
+ outb (page, EWRK3_TQ); /* Start sending pkt */
+ } else {
+ writeb ((char) (TCR_QMODE | TCR_PAD | TCR_IFC), buf); /* ctrl byte */
+ buf += 1;
+ writeb ((char) (skb->len & 0xff), buf); /* length (16 bit xfer) */
+ buf += 1;
+ if (lp->txc) {
+ writeb(((skb->len >> 8) & 0xff) | XCT, buf);
+ buf += 1;
+ writeb (0x04, buf); /* index byte */
+ buf += 1;
+ writeb (0x00, (buf + skb->len)); /* Write the XCT flag */
+ memcpy_toio (buf, skb->data, PRELOAD); /* Write PRELOAD bytes */
+ outb (page, EWRK3_TQ); /* Start sending pkt */
+ memcpy_toio (buf + PRELOAD,
+ skb->data + PRELOAD,
+ skb->len - PRELOAD);
+ writeb (0xff, (buf + skb->len)); /* Write the XCT flag */
+ } else {
+ writeb ((skb->len >> 8) & 0xff, buf);
+ buf += 1;
+ writeb (0x04, buf); /* index byte */
+ buf += 1;
+ memcpy_toio (buf, skb->data, skb->len); /* Write data bytes */
+ outb (page, EWRK3_TQ); /* Start sending pkt */
+ }
+ }
+
+ ENABLE_IRQs;
+ spin_unlock_irq (&lp->hw_lock);
+
+ dev->stats.tx_bytes += skb->len;
+ dev_kfree_skb (skb);
+
+ /* Check for free resources: stop Tx queue if there are none */
+ if (inb (EWRK3_FMQC) == 0)
+ netif_stop_queue (dev);
+
+ return NETDEV_TX_OK;
+
+err_out:
+ ENABLE_IRQs;
+ spin_unlock_irq (&lp->hw_lock);
+ return NETDEV_TX_BUSY;
+}
+
+/*
+ ** The EWRK3 interrupt handler.
+ */
+static irqreturn_t ewrk3_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct ewrk3_private *lp;
+ u_long iobase;
+ u_char icr, cr, csr;
+
+ lp = netdev_priv(dev);
+ iobase = dev->base_addr;
+
+ /* get the interrupt information */
+ csr = inb(EWRK3_CSR);
+
+ /*
+ ** Mask the EWRK3 board interrupts and turn on the LED
+ */
+ spin_lock(&lp->hw_lock);
+ DISABLE_IRQs;
+
+ cr = inb(EWRK3_CR);
+ cr |= lp->led_mask;
+ outb(cr, EWRK3_CR);
+
+ if (csr & CSR_RNE) /* Rx interrupt (packet[s] arrived) */
+ ewrk3_rx(dev);
+
+ if (csr & CSR_TNE) /* Tx interrupt (packet sent) */
+ ewrk3_tx(dev);
+
+ /*
+ ** Now deal with the TX/RX disable flags. These are set when there
+ ** are no more resources. If resources free up then enable these
+ ** interrupts, otherwise mask them - failure to do this will result
+ ** in the system hanging in an interrupt loop.
+ */
+ if (inb(EWRK3_FMQC)) { /* any resources available? */
+ lp->irq_mask |= ICR_TXDM | ICR_RXDM; /* enable the interrupt source */
+ csr &= ~(CSR_TXD | CSR_RXD); /* ensure restart of a stalled TX or RX */
+ outb(csr, EWRK3_CSR);
+ netif_wake_queue(dev);
+ } else {
+ lp->irq_mask &= ~(ICR_TXDM | ICR_RXDM); /* disable the interrupt source */
+ }
+
+ /* Unmask the EWRK3 board interrupts and turn off the LED */
+ cr &= ~(lp->led_mask);
+ outb(cr, EWRK3_CR);
+ ENABLE_IRQs;
+ spin_unlock(&lp->hw_lock);
+ return IRQ_HANDLED;
+}
+
+/* Called with lp->hw_lock held */
+static int ewrk3_rx(struct net_device *dev)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int i, status = 0;
+ u_char page;
+ void __iomem *buf = NULL;
+
+ while (inb(EWRK3_RQC) && !status) { /* Whilst there's incoming data */
+ if ((page = inb(EWRK3_RQ)) < lp->mPage) { /* Get next entry's buffer page */
+ /*
+ ** Set up shared memory window and pointer into the window
+ */
+ if (lp->shmem_length == IO_ONLY) {
+ outb(page, EWRK3_IOPR);
+ } else if (lp->shmem_length == SHMEM_2K) {
+ buf = lp->shmem;
+ outb(page, EWRK3_MPR);
+ } else if (lp->shmem_length == SHMEM_32K) {
+ buf = (((short) page << 11) & 0x7800) + lp->shmem;
+ outb((page >> 4), EWRK3_MPR);
+ } else if (lp->shmem_length == SHMEM_64K) {
+ buf = (((short) page << 11) & 0xf800) + lp->shmem;
+ outb((page >> 5), EWRK3_MPR);
+ } else {
+ status = -1;
+ printk("%s: Oops - your private data area is hosed!\n", dev->name);
+ }
+
+ if (!status) {
+ char rx_status;
+ int pkt_len;
+
+ if (lp->shmem_length == IO_ONLY) {
+ rx_status = inb(EWRK3_DATA);
+ pkt_len = inb(EWRK3_DATA);
+ pkt_len |= ((u_short) inb(EWRK3_DATA) << 8);
+ } else {
+ rx_status = readb(buf);
+ buf += 1;
+ pkt_len = readw(buf);
+ buf += 3;
+ }
+
+ if (!(rx_status & R_ROK)) { /* There was an error. */
+ dev->stats.rx_errors++; /* Update the error stats. */
+ if (rx_status & R_DBE)
+ dev->stats.rx_frame_errors++;
+ if (rx_status & R_CRC)
+ dev->stats.rx_crc_errors++;
+ if (rx_status & R_PLL)
+ dev->stats.rx_fifo_errors++;
+ } else {
+ struct sk_buff *skb;
+
+ if ((skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ unsigned char *p;
+ skb_reserve(skb, 2); /* Align to 16 bytes */
+ p = skb_put(skb, pkt_len);
+
+ if (lp->shmem_length == IO_ONLY) {
+ *p = inb(EWRK3_DATA); /* dummy read */
+ for (i = 0; i < pkt_len; i++) {
+ *p++ = inb(EWRK3_DATA);
+ }
+ } else {
+ memcpy_fromio(p, buf, pkt_len);
+ }
+
+ for (i = 1; i < EWRK3_PKT_STAT_SZ - 1; i++) {
+ if (pkt_len < i * EWRK3_PKT_BIN_SZ) {
+ lp->pktStats.bins[i]++;
+ i = EWRK3_PKT_STAT_SZ;
+ }
+ }
+ p = skb->data; /* Look at the dest addr */
+ if (is_multicast_ether_addr(p)) {
+ if (is_broadcast_ether_addr(p)) {
+ lp->pktStats.broadcast++;
+ } else {
+ lp->pktStats.multicast++;
+ }
+ } else if (compare_ether_addr(p, dev->dev_addr) == 0) {
+ lp->pktStats.unicast++;
+ }
+ lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
+ if (lp->pktStats.bins[0] == 0) { /* Reset counters */
+ memset(&lp->pktStats, 0, sizeof(lp->pktStats));
+ }
+ /*
+ ** Notify the upper protocol layers that there is another
+ ** packet to handle
+ */
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+
+ /*
+ ** Update stats
+ */
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ } else {
+ printk("%s: Insufficient memory; nuking packet.\n", dev->name);
+ dev->stats.rx_dropped++; /* Really, deferred. */
+ break;
+ }
+ }
+ }
+ /*
+ ** Return the received buffer to the free memory queue
+ */
+ outb(page, EWRK3_FMQ);
+ } else {
+ printk("ewrk3_rx(): Illegal page number, page %d\n", page);
+ printk("ewrk3_rx(): CSR: %02x ICR: %02x FMQC: %02x\n", inb(EWRK3_CSR), inb(EWRK3_ICR), inb(EWRK3_FMQC));
+ }
+ }
+ return status;
+}
+
+/*
+** Buffer sent - check for TX buffer errors.
+** Called with lp->hw_lock held
+*/
+static int ewrk3_tx(struct net_device *dev)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ u_char tx_status;
+
+ while ((tx_status = inb(EWRK3_TDQ)) > 0) { /* Whilst there's old buffers */
+ if (tx_status & T_VSTS) { /* The status is valid */
+ if (tx_status & T_TXE) {
+ dev->stats.tx_errors++;
+ if (tx_status & T_NCL)
+ dev->stats.tx_carrier_errors++;
+ if (tx_status & T_LCL)
+ dev->stats.tx_window_errors++;
+ if (tx_status & T_CTU) {
+ if ((tx_status & T_COLL) ^ T_XUR) {
+ lp->pktStats.tx_underruns++;
+ } else {
+ lp->pktStats.excessive_underruns++;
+ }
+ } else if (tx_status & T_COLL) {
+ if ((tx_status & T_COLL) ^ T_XCOLL) {
+ dev->stats.collisions++;
+ } else {
+ lp->pktStats.excessive_collisions++;
+ }
+ }
+ } else {
+ dev->stats.tx_packets++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int ewrk3_close(struct net_device *dev)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ u_char icr, csr;
+
+ netif_stop_queue(dev);
+
+ if (ewrk3_debug > 1) {
+ printk("%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inb(EWRK3_CSR));
+ }
+ /*
+ ** We stop the EWRK3 here... mask interrupts and stop TX & RX
+ */
+ DISABLE_IRQs;
+
+ STOP_EWRK3;
+
+ /*
+ ** Clean out the TX and RX queues here (note that one entry
+ ** may get added to either the TXD or RX queues if the TX or RX
+ ** just starts processing a packet before the STOP_EWRK3 command
+ ** is received. This will be flushed in the ewrk3_open() call).
+ */
+ while (inb(EWRK3_TQ));
+ while (inb(EWRK3_TDQ));
+ while (inb(EWRK3_RQ));
+
+ if (!lp->hard_strapped) {
+ free_irq(dev->irq, dev);
+ }
+ return 0;
+}
+
+/*
+ ** Set or clear the multicast filter for this adapter.
+ */
+static void set_multicast_list(struct net_device *dev)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ u_char csr;
+
+ csr = inb(EWRK3_CSR);
+
+ if (lp->shmem_length == IO_ONLY) {
+ lp->mctbl = NULL;
+ } else {
+ lp->mctbl = lp->shmem + PAGE0_HTE;
+ }
+
+ csr &= ~(CSR_PME | CSR_MCE);
+ if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
+ csr |= CSR_PME;
+ outb(csr, EWRK3_CSR);
+ } else {
+ SetMulticastFilter(dev);
+ csr |= CSR_MCE;
+ outb(csr, EWRK3_CSR);
+ }
+}
+
+/*
+ ** Calculate the hash code and update the logical address filter
+ ** from a list of ethernet multicast addresses.
+ ** Little endian crc one liner from Matt Thomas, DEC.
+ **
+ ** Note that when clearing the table, the broadcast bit must remain asserted
+ ** to receive broadcast messages.
+ */
+static void SetMulticastFilter(struct net_device *dev)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ u_long iobase = dev->base_addr;
+ int i;
+ char bit, byte;
+ short __iomem *p = lp->mctbl;
+ u16 hashcode;
+ u32 crc;
+
+ spin_lock_irq(&lp->hw_lock);
+
+ if (lp->shmem_length == IO_ONLY) {
+ outb(0, EWRK3_IOPR);
+ outw(PAGE0_HTE, EWRK3_PIR1);
+ } else {
+ outb(0, EWRK3_MPR);
+ }
+
+ if (dev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) {
+ if (lp->shmem_length == IO_ONLY) {
+ outb(0xff, EWRK3_DATA);
+ } else { /* memset didn't work here */
+ writew(0xffff, p);
+ p++;
+ i++;
+ }
+ }
+ } else {
+ /* Clear table except for broadcast bit */
+ if (lp->shmem_length == IO_ONLY) {
+ for (i = 0; i < (HASH_TABLE_LEN >> 4) - 1; i++) {
+ outb(0x00, EWRK3_DATA);
+ }
+ outb(0x80, EWRK3_DATA);
+ i++; /* insert the broadcast bit */
+ for (; i < (HASH_TABLE_LEN >> 3); i++) {
+ outb(0x00, EWRK3_DATA);
+ }
+ } else {
+ memset_io(lp->mctbl, 0, HASH_TABLE_LEN >> 3);
+ writeb(0x80, lp->mctbl + (HASH_TABLE_LEN >> 4) - 1);
+ }
+
+ /* Update table */
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */
+
+ byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
+ bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
+
+ if (lp->shmem_length == IO_ONLY) {
+ u_char tmp;
+
+ outw(PAGE0_HTE + byte, EWRK3_PIR1);
+ tmp = inb(EWRK3_DATA);
+ tmp |= bit;
+ outw(PAGE0_HTE + byte, EWRK3_PIR1);
+ outb(tmp, EWRK3_DATA);
+ } else {
+ writeb(readb(lp->mctbl + byte) | bit, lp->mctbl + byte);
+ }
+ }
+ }
+
+ spin_unlock_irq(&lp->hw_lock);
+}
+
+/*
+ ** ISA bus I/O device probe
+ */
+static int __init isa_probe(struct net_device *dev, u_long ioaddr)
+{
+ int i = num_ewrks3s, maxSlots;
+ int ret = -ENODEV;
+
+ u_long iobase;
+
+ if (ioaddr >= 0x400)
+ goto out;
+
+ if (ioaddr == 0) { /* Autoprobing */
+ iobase = EWRK3_IO_BASE; /* Get the first slot address */
+ maxSlots = 24;
+ } else { /* Probe a specific location */
+ iobase = ioaddr;
+ maxSlots = i + 1;
+ }
+
+ for (; (i < maxSlots) && (dev != NULL);
+ iobase += EWRK3_IOP_INC, i++)
+ {
+ if (request_region(iobase, EWRK3_TOTAL_SIZE, DRV_NAME)) {
+ if (DevicePresent(iobase) == 0) {
+ int irq = dev->irq;
+ ret = ewrk3_hw_init(dev, iobase);
+ if (!ret)
+ break;
+ dev->irq = irq;
+ }
+ release_region(iobase, EWRK3_TOTAL_SIZE);
+ }
+ }
+ out:
+
+ return ret;
+}
+
+/*
+ ** EISA bus I/O device probe. Probe from slot 1 since slot 0 is usually
+ ** the motherboard.
+ */
+static int __init eisa_probe(struct net_device *dev, u_long ioaddr)
+{
+ int i, maxSlots;
+ u_long iobase;
+ int ret = -ENODEV;
+
+ if (ioaddr < 0x1000)
+ goto out;
+
+ iobase = ioaddr;
+ i = (ioaddr >> 12);
+ maxSlots = i + 1;
+
+ for (i = 1; (i < maxSlots) && (dev != NULL); i++, iobase += EISA_SLOT_INC) {
+ if (EISA_signature(name, EISA_ID) == 0) {
+ if (request_region(iobase, EWRK3_TOTAL_SIZE, DRV_NAME) &&
+ DevicePresent(iobase) == 0) {
+ int irq = dev->irq;
+ ret = ewrk3_hw_init(dev, iobase);
+ if (!ret)
+ break;
+ dev->irq = irq;
+ }
+ release_region(iobase, EWRK3_TOTAL_SIZE);
+ }
+ }
+
+ out:
+ return ret;
+}
+
+
+/*
+ ** Read the EWRK3 EEPROM using this routine
+ */
+static int Read_EEPROM(u_long iobase, u_char eaddr)
+{
+ int i;
+
+ outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */
+ outb(EEPROM_RD, EWRK3_IOPR); /* issue read command */
+ for (i = 0; i < 5000; i++)
+ inb(EWRK3_CSR); /* wait 1msec */
+
+ return inw(EWRK3_EPROM1); /* 16 bits data return */
+}
+
+/*
+ ** Write the EWRK3 EEPROM using this routine
+ */
+static int Write_EEPROM(short data, u_long iobase, u_char eaddr)
+{
+ int i;
+
+ outb(EEPROM_WR_EN, EWRK3_IOPR); /* issue write enable command */
+ for (i = 0; i < 5000; i++)
+ inb(EWRK3_CSR); /* wait 1msec */
+ outw(data, EWRK3_EPROM1); /* write data to register */
+ outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */
+ outb(EEPROM_WR, EWRK3_IOPR); /* issue write command */
+ for (i = 0; i < 75000; i++)
+ inb(EWRK3_CSR); /* wait 15msec */
+ outb(EEPROM_WR_DIS, EWRK3_IOPR); /* issue write disable command */
+ for (i = 0; i < 5000; i++)
+ inb(EWRK3_CSR); /* wait 1msec */
+
+ return 0;
+}
+
+/*
+ ** Look for a particular board name in the on-board EEPROM.
+ */
+static void __init EthwrkSignature(char *name, char *eeprom_image)
+{
+ int i;
+ char *signatures[] = EWRK3_SIGNATURE;
+
+ for (i=0; *signatures[i] != '\0'; i++)
+ if( !strncmp(eeprom_image+EEPROM_PNAME7, signatures[i], strlen(signatures[i])) )
+ break;
+
+ if (*signatures[i] != '\0') {
+ memcpy(name, eeprom_image+EEPROM_PNAME7, EWRK3_STRLEN);
+ name[EWRK3_STRLEN] = '\0';
+ } else
+ name[0] = '\0';
+}
+
+/*
+ ** Look for a special sequence in the Ethernet station address PROM that
+ ** is common across all EWRK3 products.
+ **
+ ** Search the Ethernet address ROM for the signature. Since the ROM address
+ ** counter can start at an arbitrary point, the search must include the entire
+ ** probe sequence length plus the (length_of_the_signature - 1).
+ ** Stop the search IMMEDIATELY after the signature is found so that the
+ ** PROM address counter is correctly positioned at the start of the
+ ** ethernet address for later read out.
+ */
+
+static int __init DevicePresent(u_long iobase)
+{
+ union {
+ struct {
+ u32 a;
+ u32 b;
+ } llsig;
+ char Sig[sizeof(u32) << 1];
+ }
+ dev;
+ short sigLength;
+ char data;
+ int i, j, status = 0;
+
+ dev.llsig.a = ETH_PROM_SIG;
+ dev.llsig.b = ETH_PROM_SIG;
+ sigLength = sizeof(u32) << 1;
+
+ for (i = 0, j = 0; j < sigLength && i < PROBE_LENGTH + sigLength - 1; i++) {
+ data = inb(EWRK3_APROM);
+ if (dev.Sig[j] == data) { /* track signature */
+ j++;
+ } else { /* lost signature; begin search again */
+ if (data == dev.Sig[0]) {
+ j = 1;
+ } else {
+ j = 0;
+ }
+ }
+ }
+
+ if (j != sigLength) {
+ status = -ENODEV; /* search failed */
+ }
+ return status;
+}
+
+static u_char __init get_hw_addr(struct net_device *dev, u_char * eeprom_image, char chipType)
+{
+ int i, j, k;
+ u_short chksum;
+ u_char crc, lfsr, sd, status = 0;
+ u_long iobase = dev->base_addr;
+ u16 tmp;
+
+ if (chipType == LeMAC2) {
+ for (crc = 0x6a, j = 0; j < ETH_ALEN; j++) {
+ sd = dev->dev_addr[j] = eeprom_image[EEPROM_PADDR0 + j];
+ outb(dev->dev_addr[j], EWRK3_PAR0 + j);
+ for (k = 0; k < 8; k++, sd >>= 1) {
+ lfsr = ((((crc & 0x02) >> 1) ^ (crc & 0x01)) ^ (sd & 0x01)) << 7;
+ crc = (crc >> 1) + lfsr;
+ }
+ }
+ if (crc != eeprom_image[EEPROM_PA_CRC])
+ status = -1;
+ } else {
+ for (i = 0, k = 0; i < ETH_ALEN;) {
+ k <<= 1;
+ if (k > 0xffff)
+ k -= 0xffff;
+
+ k += (u_char) (tmp = inb(EWRK3_APROM));
+ dev->dev_addr[i] = (u_char) tmp;
+ outb(dev->dev_addr[i], EWRK3_PAR0 + i);
+ i++;
+ k += (u_short) ((tmp = inb(EWRK3_APROM)) << 8);
+ dev->dev_addr[i] = (u_char) tmp;
+ outb(dev->dev_addr[i], EWRK3_PAR0 + i);
+ i++;
+
+ if (k > 0xffff)
+ k -= 0xffff;
+ }
+ if (k == 0xffff)
+ k = 0;
+ chksum = inb(EWRK3_APROM);
+ chksum |= (inb(EWRK3_APROM) << 8);
+ if (k != chksum)
+ status = -1;
+ }
+
+ return status;
+}
+
+/*
+ ** Look for a particular board name in the EISA configuration space
+ */
+static int __init EISA_signature(char *name, s32 eisa_id)
+{
+ u_long i;
+ char *signatures[] = EWRK3_SIGNATURE;
+ char ManCode[EWRK3_STRLEN];
+ union {
+ s32 ID;
+ char Id[4];
+ } Eisa;
+ int status = 0;
+
+ *name = '\0';
+ for (i = 0; i < 4; i++) {
+ Eisa.Id[i] = inb(eisa_id + i);
+ }
+
+ ManCode[0] = (((Eisa.Id[0] >> 2) & 0x1f) + 0x40);
+ ManCode[1] = (((Eisa.Id[1] & 0xe0) >> 5) + ((Eisa.Id[0] & 0x03) << 3) + 0x40);
+ ManCode[2] = (((Eisa.Id[2] >> 4) & 0x0f) + 0x30);
+ ManCode[3] = ((Eisa.Id[2] & 0x0f) + 0x30);
+ ManCode[4] = (((Eisa.Id[3] >> 4) & 0x0f) + 0x30);
+ ManCode[5] = '\0';
+
+ for (i = 0; (*signatures[i] != '\0') && (*name == '\0'); i++) {
+ if (strstr(ManCode, signatures[i]) != NULL) {
+ strcpy(name, ManCode);
+ status = 1;
+ }
+ }
+
+ return status; /* return the device name string */
+}
+
+static void ewrk3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ int fwrev = Read_EEPROM(dev->base_addr, EEPROM_REVLVL);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->fw_version, "%d", fwrev);
+ strcpy(info->bus_info, "N/A");
+ info->eedump_len = EEPROM_MAX;
+}
+
+static int ewrk3_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ unsigned long iobase = dev->base_addr;
+ u8 cr = inb(EWRK3_CR);
+
+ switch (lp->adapter_name[4]) {
+ case '3': /* DE203 */
+ ecmd->supported = SUPPORTED_BNC;
+ ecmd->port = PORT_BNC;
+ break;
+
+ case '4': /* DE204 */
+ ecmd->supported = SUPPORTED_TP;
+ ecmd->port = PORT_TP;
+ break;
+
+ case '5': /* DE205 */
+ ecmd->supported = SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_AUI;
+ ecmd->autoneg = !(cr & CR_APD);
+ /*
+ ** Port is only valid if autoneg is disabled
+ ** and even then we don't know if AUI is jumpered.
+ */
+ if (!ecmd->autoneg)
+ ecmd->port = (cr & CR_PSEL) ? PORT_BNC : PORT_TP;
+ break;
+ }
+
+ ecmd->supported |= SUPPORTED_10baseT_Half;
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
+ ecmd->duplex = DUPLEX_HALF;
+ return 0;
+}
+
+static int ewrk3_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ unsigned long iobase = dev->base_addr;
+ unsigned long flags;
+ u8 cr;
+
+ /* DE205 is the only card with anything to set */
+ if (lp->adapter_name[4] != '5')
+ return -EOPNOTSUPP;
+
+ /* Sanity-check parameters */
+ if (ecmd->speed != SPEED_10)
+ return -EINVAL;
+ if (ecmd->port != PORT_TP && ecmd->port != PORT_BNC)
+ return -EINVAL; /* AUI is not software-selectable */
+ if (ecmd->transceiver != XCVR_INTERNAL)
+ return -EINVAL;
+ if (ecmd->duplex != DUPLEX_HALF)
+ return -EINVAL;
+ if (ecmd->phy_address != 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ cr = inb(EWRK3_CR);
+
+ /* If Autoneg is set, change to Auto Port mode */
+ /* Otherwise, disable Auto Port and set port explicitly */
+ if (ecmd->autoneg) {
+ cr &= ~CR_APD;
+ } else {
+ cr |= CR_APD;
+ if (ecmd->port == PORT_TP)
+ cr &= ~CR_PSEL; /* Force TP */
+ else
+ cr |= CR_PSEL; /* Force BNC */
+ }
+
+ /* Commit the changes */
+ outb(cr, EWRK3_CR);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ return 0;
+}
+
+static u32 ewrk3_get_link(struct net_device *dev)
+{
+ unsigned long iobase = dev->base_addr;
+ u8 cmr = inb(EWRK3_CMR);
+ /* DE203 has BNC only and link status does not apply */
+ /* On DE204 this is always valid since TP is the only port. */
+ /* On DE205 this reflects TP status even if BNC or AUI is selected. */
+ return !(cmr & CMR_LINK);
+}
+
+static int ewrk3_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ unsigned long iobase = dev->base_addr;
+ u8 cr;
+
+ spin_lock_irq(&lp->hw_lock);
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ /* Prevent ISR from twiddling the LED */
+ lp->led_mask = 0;
+ spin_unlock_irq(&lp->hw_lock);
+ return 2; /* cycle on/off twice per second */
+
+ case ETHTOOL_ID_ON:
+ cr = inb(EWRK3_CR);
+ outb(cr | CR_LED, EWRK3_CR);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ cr = inb(EWRK3_CR);
+ outb(cr & ~CR_LED, EWRK3_CR);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ lp->led_mask = CR_LED;
+ cr = inb(EWRK3_CR);
+ outb(cr & ~CR_LED, EWRK3_CR);
+ }
+ spin_unlock_irq(&lp->hw_lock);
+
+ return 0;
+}
+
+static const struct ethtool_ops ethtool_ops_203 = {
+ .get_drvinfo = ewrk3_get_drvinfo,
+ .get_settings = ewrk3_get_settings,
+ .set_settings = ewrk3_set_settings,
+ .set_phys_id = ewrk3_set_phys_id,
+};
+
+static const struct ethtool_ops ethtool_ops = {
+ .get_drvinfo = ewrk3_get_drvinfo,
+ .get_settings = ewrk3_get_settings,
+ .set_settings = ewrk3_set_settings,
+ .get_link = ewrk3_get_link,
+ .set_phys_id = ewrk3_set_phys_id,
+};
+
+/*
+ ** Perform IOCTL call functions here. Some are privileged operations and the
+ ** effective uid is checked in those cases.
+ */
+static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct ewrk3_private *lp = netdev_priv(dev);
+ struct ewrk3_ioctl *ioc = (struct ewrk3_ioctl *) &rq->ifr_ifru;
+ u_long iobase = dev->base_addr;
+ int i, j, status = 0;
+ u_char csr;
+ unsigned long flags;
+ union ewrk3_addr {
+ u_char addr[HASH_TABLE_LEN * ETH_ALEN];
+ u_short val[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
+ };
+
+ union ewrk3_addr *tmp;
+
+ /* All we handle are private IOCTLs */
+ if (cmd != EWRK3IOCTL)
+ return -EOPNOTSUPP;
+
+ tmp = kmalloc(sizeof(union ewrk3_addr), GFP_KERNEL);
+ if(tmp==NULL)
+ return -ENOMEM;
+
+ switch (ioc->cmd) {
+ case EWRK3_GET_HWADDR: /* Get the hardware address */
+ for (i = 0; i < ETH_ALEN; i++) {
+ tmp->addr[i] = dev->dev_addr[i];
+ }
+ ioc->len = ETH_ALEN;
+ if (copy_to_user(ioc->data, tmp->addr, ioc->len))
+ status = -EFAULT;
+ break;
+
+ case EWRK3_SET_HWADDR: /* Set the hardware address */
+ if (capable(CAP_NET_ADMIN)) {
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ csr = inb(EWRK3_CSR);
+ csr |= (CSR_TXD | CSR_RXD);
+ outb(csr, EWRK3_CSR); /* Disable the TX and RX */
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+
+ if (copy_from_user(tmp->addr, ioc->data, ETH_ALEN)) {
+ status = -EFAULT;
+ break;
+ }
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ for (i = 0; i < ETH_ALEN; i++) {
+ dev->dev_addr[i] = tmp->addr[i];
+ outb(tmp->addr[i], EWRK3_PAR0 + i);
+ }
+
+ csr = inb(EWRK3_CSR);
+ csr &= ~(CSR_TXD | CSR_RXD); /* Enable the TX and RX */
+ outb(csr, EWRK3_CSR);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_SET_PROM: /* Set Promiscuous Mode */
+ if (capable(CAP_NET_ADMIN)) {
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ csr = inb(EWRK3_CSR);
+ csr |= CSR_PME;
+ csr &= ~CSR_MCE;
+ outb(csr, EWRK3_CSR);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_CLR_PROM: /* Clear Promiscuous Mode */
+ if (capable(CAP_NET_ADMIN)) {
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ csr = inb(EWRK3_CSR);
+ csr &= ~CSR_PME;
+ outb(csr, EWRK3_CSR);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_MCA: /* Get the multicast address table */
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ if (lp->shmem_length == IO_ONLY) {
+ outb(0, EWRK3_IOPR);
+ outw(PAGE0_HTE, EWRK3_PIR1);
+ for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) {
+ tmp->addr[i] = inb(EWRK3_DATA);
+ }
+ } else {
+ outb(0, EWRK3_MPR);
+ memcpy_fromio(tmp->addr, lp->shmem + PAGE0_HTE, (HASH_TABLE_LEN >> 3));
+ }
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+
+ ioc->len = (HASH_TABLE_LEN >> 3);
+ if (copy_to_user(ioc->data, tmp->addr, ioc->len))
+ status = -EFAULT;
+
+ break;
+ case EWRK3_SET_MCA: /* Set a multicast address */
+ if (capable(CAP_NET_ADMIN)) {
+ if (ioc->len > HASH_TABLE_LEN) {
+ status = -EINVAL;
+ break;
+ }
+ if (copy_from_user(tmp->addr, ioc->data, ETH_ALEN * ioc->len)) {
+ status = -EFAULT;
+ break;
+ }
+ set_multicast_list(dev);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_CLR_MCA: /* Clear all multicast addresses */
+ if (capable(CAP_NET_ADMIN)) {
+ set_multicast_list(dev);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_MCA_EN: /* Enable multicast addressing */
+ if (capable(CAP_NET_ADMIN)) {
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ csr = inb(EWRK3_CSR);
+ csr |= CSR_MCE;
+ csr &= ~CSR_PME;
+ outb(csr, EWRK3_CSR);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_STATS: { /* Get the driver statistics */
+ struct ewrk3_stats *tmp_stats =
+ kmalloc(sizeof(lp->pktStats), GFP_KERNEL);
+ if (!tmp_stats) {
+ status = -ENOMEM;
+ break;
+ }
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ memcpy(tmp_stats, &lp->pktStats, sizeof(lp->pktStats));
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+
+ ioc->len = sizeof(lp->pktStats);
+ if (copy_to_user(ioc->data, tmp_stats, sizeof(lp->pktStats)))
+ status = -EFAULT;
+ kfree(tmp_stats);
+ break;
+ }
+ case EWRK3_CLR_STATS: /* Zero out the driver statistics */
+ if (capable(CAP_NET_ADMIN)) {
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ memset(&lp->pktStats, 0, sizeof(lp->pktStats));
+ spin_unlock_irqrestore(&lp->hw_lock,flags);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_CSR: /* Get the CSR Register contents */
+ tmp->addr[0] = inb(EWRK3_CSR);
+ ioc->len = 1;
+ if (copy_to_user(ioc->data, tmp->addr, ioc->len))
+ status = -EFAULT;
+ break;
+ case EWRK3_SET_CSR: /* Set the CSR Register contents */
+ if (capable(CAP_NET_ADMIN)) {
+ if (copy_from_user(tmp->addr, ioc->data, 1)) {
+ status = -EFAULT;
+ break;
+ }
+ outb(tmp->addr[0], EWRK3_CSR);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_EEPROM: /* Get the EEPROM contents */
+ if (capable(CAP_NET_ADMIN)) {
+ for (i = 0; i < (EEPROM_MAX >> 1); i++) {
+ tmp->val[i] = (short) Read_EEPROM(iobase, i);
+ }
+ i = EEPROM_MAX;
+ tmp->addr[i++] = inb(EWRK3_CMR); /* Config/Management Reg. */
+ for (j = 0; j < ETH_ALEN; j++) {
+ tmp->addr[i++] = inb(EWRK3_PAR0 + j);
+ }
+ ioc->len = EEPROM_MAX + 1 + ETH_ALEN;
+ if (copy_to_user(ioc->data, tmp->addr, ioc->len))
+ status = -EFAULT;
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_SET_EEPROM: /* Set the EEPROM contents */
+ if (capable(CAP_NET_ADMIN)) {
+ if (copy_from_user(tmp->addr, ioc->data, EEPROM_MAX)) {
+ status = -EFAULT;
+ break;
+ }
+ for (i = 0; i < (EEPROM_MAX >> 1); i++) {
+ Write_EEPROM(tmp->val[i], iobase, i);
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_CMR: /* Get the CMR Register contents */
+ tmp->addr[0] = inb(EWRK3_CMR);
+ ioc->len = 1;
+ if (copy_to_user(ioc->data, tmp->addr, ioc->len))
+ status = -EFAULT;
+ break;
+ case EWRK3_SET_TX_CUT_THRU: /* Set TX cut through mode */
+ if (capable(CAP_NET_ADMIN)) {
+ lp->txc = 1;
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_CLR_TX_CUT_THRU: /* Clear TX cut through mode */
+ if (capable(CAP_NET_ADMIN)) {
+ lp->txc = 0;
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ default:
+ status = -EOPNOTSUPP;
+ }
+ kfree(tmp);
+ return status;
+}
+
+#ifdef MODULE
+static struct net_device *ewrk3_devs[MAX_NUM_EWRK3S];
+static int ndevs;
+static int io[MAX_NUM_EWRK3S+1] = { 0x300, 0, };
+
+/* '21' below should really be 'MAX_NUM_EWRK3S' */
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(io, "EtherWORKS 3 I/O base address(es)");
+MODULE_PARM_DESC(irq, "EtherWORKS 3 IRQ number(s)");
+
+static __exit void ewrk3_exit_module(void)
+{
+ int i;
+
+ for( i=0; i<ndevs; i++ ) {
+ struct net_device *dev = ewrk3_devs[i];
+ struct ewrk3_private *lp = netdev_priv(dev);
+ ewrk3_devs[i] = NULL;
+ unregister_netdev(dev);
+ release_region(dev->base_addr, EWRK3_TOTAL_SIZE);
+ iounmap(lp->shmem);
+ free_netdev(dev);
+ }
+}
+
+static __init int ewrk3_init_module(void)
+{
+ int i=0;
+
+ while( io[i] && irq[i] ) {
+ struct net_device *dev
+ = alloc_etherdev(sizeof(struct ewrk3_private));
+
+ if (!dev)
+ break;
+
+ if (ewrk3_probe1(dev, io[i], irq[i]) != 0) {
+ free_netdev(dev);
+ break;
+ }
+
+ ewrk3_devs[ndevs++] = dev;
+ i++;
+ }
+
+ return ndevs ? 0 : -EIO;
+}
+
+
+/* Hack for breakage in new module stuff */
+module_exit(ewrk3_exit_module);
+module_init(ewrk3_init_module);
+#endif /* MODULE */
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/dec/ewrk3.h b/drivers/net/ethernet/dec/ewrk3.h
new file mode 100644
index 000000000000..8e0ee906567b
--- /dev/null
+++ b/drivers/net/ethernet/dec/ewrk3.h
@@ -0,0 +1,322 @@
+/*
+ Written 1994 by David C. Davies.
+
+ Copyright 1994 Digital Equipment Corporation.
+
+ This software may be used and distributed according to the terms of the
+ GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as davies@wanton.lkg.dec.com or Digital
+ Equipment Corporation, 550 King Street, Littleton MA 01460.
+
+ =========================================================================
+*/
+
+/*
+** I/O Address Register Map
+*/
+#define EWRK3_CSR iobase+0x00 /* Control and Status Register */
+#define EWRK3_CR iobase+0x01 /* Control Register */
+#define EWRK3_ICR iobase+0x02 /* Interrupt Control Register */
+#define EWRK3_TSR iobase+0x03 /* Transmit Status Register */
+#define EWRK3_RSVD1 iobase+0x04 /* RESERVED */
+#define EWRK3_RSVD2 iobase+0x05 /* RESERVED */
+#define EWRK3_FMQ iobase+0x06 /* Free Memory Queue */
+#define EWRK3_FMQC iobase+0x07 /* Free Memory Queue Counter */
+#define EWRK3_RQ iobase+0x08 /* Receive Queue */
+#define EWRK3_RQC iobase+0x09 /* Receive Queue Counter */
+#define EWRK3_TQ iobase+0x0a /* Transmit Queue */
+#define EWRK3_TQC iobase+0x0b /* Transmit Queue Counter */
+#define EWRK3_TDQ iobase+0x0c /* Transmit Done Queue */
+#define EWRK3_TDQC iobase+0x0d /* Transmit Done Queue Counter */
+#define EWRK3_PIR1 iobase+0x0e /* Page Index Register 1 */
+#define EWRK3_PIR2 iobase+0x0f /* Page Index Register 2 */
+#define EWRK3_DATA iobase+0x10 /* Data Register */
+#define EWRK3_IOPR iobase+0x11 /* I/O Page Register */
+#define EWRK3_IOBR iobase+0x12 /* I/O Base Register */
+#define EWRK3_MPR iobase+0x13 /* Memory Page Register */
+#define EWRK3_MBR iobase+0x14 /* Memory Base Register */
+#define EWRK3_APROM iobase+0x15 /* Address PROM */
+#define EWRK3_EPROM1 iobase+0x16 /* EEPROM Data Register 1 */
+#define EWRK3_EPROM2 iobase+0x17 /* EEPROM Data Register 2 */
+#define EWRK3_PAR0 iobase+0x18 /* Physical Address Register 0 */
+#define EWRK3_PAR1 iobase+0x19 /* Physical Address Register 1 */
+#define EWRK3_PAR2 iobase+0x1a /* Physical Address Register 2 */
+#define EWRK3_PAR3 iobase+0x1b /* Physical Address Register 3 */
+#define EWRK3_PAR4 iobase+0x1c /* Physical Address Register 4 */
+#define EWRK3_PAR5 iobase+0x1d /* Physical Address Register 5 */
+#define EWRK3_CMR iobase+0x1e /* Configuration/Management Register */
+
+/*
+** Control Page Map
+*/
+#define PAGE0_FMQ 0x000 /* Free Memory Queue */
+#define PAGE0_RQ 0x080 /* Receive Queue */
+#define PAGE0_TQ 0x100 /* Transmit Queue */
+#define PAGE0_TDQ 0x180 /* Transmit Done Queue */
+#define PAGE0_HTE 0x200 /* Hash Table Entries */
+#define PAGE0_RSVD 0x240 /* RESERVED */
+#define PAGE0_USRD 0x600 /* User Data */
+
+/*
+** Control and Status Register bit definitions (EWRK3_CSR)
+*/
+#define CSR_RA 0x80 /* Runt Accept */
+#define CSR_PME 0x40 /* Promiscuous Mode Enable */
+#define CSR_MCE 0x20 /* Multicast Enable */
+#define CSR_TNE 0x08 /* TX Done Queue Not Empty */
+#define CSR_RNE 0x04 /* RX Queue Not Empty */
+#define CSR_TXD 0x02 /* TX Disable */
+#define CSR_RXD 0x01 /* RX Disable */
+
+/*
+** Control Register bit definitions (EWRK3_CR)
+*/
+#define CR_APD 0x80 /* Auto Port Disable */
+#define CR_PSEL 0x40 /* Port Select (0->TP port) */
+#define CR_LBCK 0x20 /* LoopBaCK enable */
+#define CR_FDUP 0x10 /* Full DUPlex enable */
+#define CR_FBUS 0x08 /* Fast BUS enable (ISA clk > 8.33MHz) */
+#define CR_EN_16 0x04 /* ENable 16 bit memory accesses */
+#define CR_LED 0x02 /* LED (1-> turn on) */
+
+/*
+** Interrupt Control Register bit definitions (EWRK3_ICR)
+*/
+#define ICR_IE 0x80 /* Interrupt Enable */
+#define ICR_IS 0x60 /* Interrupt Selected */
+#define ICR_TNEM 0x08 /* TNE Mask (0->mask) */
+#define ICR_RNEM 0x04 /* RNE Mask (0->mask) */
+#define ICR_TXDM 0x02 /* TXD Mask (0->mask) */
+#define ICR_RXDM 0x01 /* RXD Mask (0->mask) */
+
+/*
+** Transmit Status Register bit definitions (EWRK3_TSR)
+*/
+#define TSR_NCL 0x80 /* No Carrier Loopback */
+#define TSR_ID 0x40 /* Initially Deferred */
+#define TSR_LCL 0x20 /* Late CoLlision */
+#define TSR_ECL 0x10 /* Excessive CoLlisions */
+#define TSR_RCNTR 0x0f /* Retries CouNTeR */
+
+/*
+** I/O Page Register bit definitions (EWRK3_IOPR)
+*/
+#define EEPROM_INIT 0xc0 /* EEPROM INIT command */
+#define EEPROM_WR_EN 0xc8 /* EEPROM WRITE ENABLE command */
+#define EEPROM_WR 0xd0 /* EEPROM WRITE command */
+#define EEPROM_WR_DIS 0xd8 /* EEPROM WRITE DISABLE command */
+#define EEPROM_RD 0xe0 /* EEPROM READ command */
+
+/*
+** I/O Base Register bit definitions (EWRK3_IOBR)
+*/
+#define EISA_REGS_EN 0x20 /* Enable EISA ID and Control Registers */
+#define EISA_IOB 0x1f /* Compare bits for I/O Base Address */
+
+/*
+** I/O Configuration/Management Register bit definitions (EWRK3_CMR)
+*/
+#define CMR_RA 0x80 /* Read Ahead */
+#define CMR_WB 0x40 /* Write Behind */
+#define CMR_LINK 0x20 /* 0->TP */
+#define CMR_POLARITY 0x10 /* Informational */
+#define CMR_NO_EEPROM 0x0c /* NO_EEPROM<1:0> pin status */
+#define CMR_HS 0x08 /* Hard Strapped pin status (LeMAC2) */
+#define CMR_PNP 0x04 /* Plug 'n Play */
+#define CMR_DRAM 0x02 /* 0-> 1DRAM, 1-> 2 DRAM on board */
+#define CMR_0WS 0x01 /* Zero Wait State */
+
+/*
+** MAC Receive Status Register bit definitions
+*/
+
+#define R_ROK 0x80 /* Receive OK summary */
+#define R_IAM 0x10 /* Individual Address Match */
+#define R_MCM 0x08 /* MultiCast Match */
+#define R_DBE 0x04 /* Dribble Bit Error */
+#define R_CRC 0x02 /* CRC error */
+#define R_PLL 0x01 /* Phase Lock Lost */
+
+/*
+** MAC Transmit Control Register bit definitions
+*/
+
+#define TCR_SQEE 0x40 /* SQE Enable - look for heartbeat */
+#define TCR_SED 0x20 /* Stop when Error Detected */
+#define TCR_QMODE 0x10 /* Q_MODE */
+#define TCR_LAB 0x08 /* Less Aggressive Backoff */
+#define TCR_PAD 0x04 /* PAD Runt Packets */
+#define TCR_IFC 0x02 /* Insert Frame Check */
+#define TCR_ISA 0x01 /* Insert Source Address */
+
+/*
+** MAC Transmit Status Register bit definitions
+*/
+
+#define T_VSTS 0x80 /* Valid STatuS */
+#define T_CTU 0x40 /* Cut Through Used */
+#define T_SQE 0x20 /* Signal Quality Error */
+#define T_NCL 0x10 /* No Carrier Loopback */
+#define T_LCL 0x08 /* Late Collision */
+#define T_ID 0x04 /* Initially Deferred */
+#define T_COLL 0x03 /* COLLision status */
+#define T_XCOLL 0x03 /* Excessive Collisions */
+#define T_MCOLL 0x02 /* Multiple Collisions */
+#define T_OCOLL 0x01 /* One Collision */
+#define T_NOCOLL 0x00 /* No Collisions */
+#define T_XUR 0x03 /* Excessive Underruns */
+#define T_TXE 0x7f /* TX Errors */
+
+/*
+** EISA Configuration Register bit definitions
+*/
+
+#define EISA_ID iobase + 0x0c80 /* EISA ID Registers */
+#define EISA_ID0 iobase + 0x0c80 /* EISA ID Register 0 */
+#define EISA_ID1 iobase + 0x0c81 /* EISA ID Register 1 */
+#define EISA_ID2 iobase + 0x0c82 /* EISA ID Register 2 */
+#define EISA_ID3 iobase + 0x0c83 /* EISA ID Register 3 */
+#define EISA_CR iobase + 0x0c84 /* EISA Control Register */
+
+/*
+** EEPROM BYTES
+*/
+#define EEPROM_MEMB 0x00
+#define EEPROM_IOB 0x01
+#define EEPROM_EISA_ID0 0x02
+#define EEPROM_EISA_ID1 0x03
+#define EEPROM_EISA_ID2 0x04
+#define EEPROM_EISA_ID3 0x05
+#define EEPROM_MISC0 0x06
+#define EEPROM_MISC1 0x07
+#define EEPROM_PNAME7 0x08
+#define EEPROM_PNAME6 0x09
+#define EEPROM_PNAME5 0x0a
+#define EEPROM_PNAME4 0x0b
+#define EEPROM_PNAME3 0x0c
+#define EEPROM_PNAME2 0x0d
+#define EEPROM_PNAME1 0x0e
+#define EEPROM_PNAME0 0x0f
+#define EEPROM_SWFLAGS 0x10
+#define EEPROM_HWCAT 0x11
+#define EEPROM_NETMAN2 0x12
+#define EEPROM_REVLVL 0x13
+#define EEPROM_NETMAN0 0x14
+#define EEPROM_NETMAN1 0x15
+#define EEPROM_CHIPVER 0x16
+#define EEPROM_SETUP 0x17
+#define EEPROM_PADDR0 0x18
+#define EEPROM_PADDR1 0x19
+#define EEPROM_PADDR2 0x1a
+#define EEPROM_PADDR3 0x1b
+#define EEPROM_PADDR4 0x1c
+#define EEPROM_PADDR5 0x1d
+#define EEPROM_PA_CRC 0x1e
+#define EEPROM_CHKSUM 0x1f
+
+/*
+** EEPROM bytes for checksumming
+*/
+#define EEPROM_MAX 32 /* bytes */
+
+/*
+** EEPROM MISCELLANEOUS FLAGS
+*/
+#define RBE_SHADOW 0x0100 /* Remote Boot Enable Shadow */
+#define READ_AHEAD 0x0080 /* Read Ahead feature */
+#define IRQ_SEL2 0x0070 /* IRQ line selection (LeMAC2) */
+#define IRQ_SEL 0x0060 /* IRQ line selection */
+#define FAST_BUS 0x0008 /* ISA Bus speeds > 8.33MHz */
+#define ENA_16 0x0004 /* Enables 16 bit memory transfers */
+#define WRITE_BEHIND 0x0002 /* Write Behind feature */
+#define _0WS_ENA 0x0001 /* Zero Wait State Enable */
+
+/*
+** EEPROM NETWORK MANAGEMENT FLAGS
+*/
+#define NETMAN_POL 0x04 /* Polarity defeat */
+#define NETMAN_LINK 0x02 /* Link defeat */
+#define NETMAN_CCE 0x01 /* Custom Counters Enable */
+
+/*
+** EEPROM SW FLAGS
+*/
+#define SW_SQE 0x10 /* Signal Quality Error */
+#define SW_LAB 0x08 /* Less Aggressive Backoff */
+#define SW_INIT 0x04 /* Initialized */
+#define SW_TIMEOUT 0x02 /* 0:2.5 mins, 1: 30 secs */
+#define SW_REMOTE 0x01 /* Remote Boot Enable -> 1 */
+
+/*
+** EEPROM SETUP FLAGS
+*/
+#define SETUP_APD 0x80 /* AutoPort Disable */
+#define SETUP_PS 0x40 /* Port Select */
+#define SETUP_MP 0x20 /* MultiPort */
+#define SETUP_1TP 0x10 /* 1 port, TP */
+#define SETUP_1COAX 0x00 /* 1 port, Coax */
+#define SETUP_DRAM 0x02 /* Number of DRAMS on board */
+
+/*
+** EEPROM MANAGEMENT FLAGS
+*/
+#define MGMT_CCE 0x01 /* Custom Counters Enable */
+
+/*
+** EEPROM VERSIONS
+*/
+#define LeMAC 0x11
+#define LeMAC2 0x12
+
+/*
+** Miscellaneous
+*/
+
+#define EEPROM_WAIT_TIME 1000 /* Number of microseconds */
+#define EISA_EN 0x0001 /* Enable EISA bus buffers */
+
+#define HASH_TABLE_LEN 512 /* Bits */
+
+#define XCT 0x80 /* Transmit Cut Through */
+#define PRELOAD 16 /* 4 long words */
+
+#define MASK_INTERRUPTS 1
+#define UNMASK_INTERRUPTS 0
+
+#define EEPROM_OFFSET(a) ((u_short)((u_long)(a)))
+
+/*
+** Include the IOCTL stuff
+*/
+#include <linux/sockios.h>
+
+#define EWRK3IOCTL SIOCDEVPRIVATE
+
+struct ewrk3_ioctl {
+ unsigned short cmd; /* Command to run */
+ unsigned short len; /* Length of the data buffer */
+ unsigned char __user *data; /* Pointer to the data buffer */
+};
+
+/*
+** Recognised commands for the driver
+*/
+#define EWRK3_GET_HWADDR 0x01 /* Get the hardware address */
+#define EWRK3_SET_HWADDR 0x02 /* Get the hardware address */
+#define EWRK3_SET_PROM 0x03 /* Set Promiscuous Mode */
+#define EWRK3_CLR_PROM 0x04 /* Clear Promiscuous Mode */
+#define EWRK3_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
+#define EWRK3_GET_MCA 0x06 /* Get a multicast address */
+#define EWRK3_SET_MCA 0x07 /* Set a multicast address */
+#define EWRK3_CLR_MCA 0x08 /* Clear a multicast address */
+#define EWRK3_MCA_EN 0x09 /* Enable a multicast address group */
+#define EWRK3_GET_STATS 0x0a /* Get the driver statistics */
+#define EWRK3_CLR_STATS 0x0b /* Zero out the driver statistics */
+#define EWRK3_GET_CSR 0x0c /* Get the CSR Register contents */
+#define EWRK3_SET_CSR 0x0d /* Set the CSR Register contents */
+#define EWRK3_GET_EEPROM 0x0e /* Get the EEPROM contents */
+#define EWRK3_SET_EEPROM 0x0f /* Set the EEPROM contents */
+#define EWRK3_GET_CMR 0x10 /* Get the CMR Register contents */
+#define EWRK3_CLR_TX_CUT_THRU 0x11 /* Clear the TX cut through mode */
+#define EWRK3_SET_TX_CUT_THRU 0x12 /* Set the TX cut through mode */
diff --git a/drivers/net/ethernet/dec/tulip/21142.c b/drivers/net/ethernet/dec/tulip/21142.c
new file mode 100644
index 000000000000..092c3faa882a
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/21142.c
@@ -0,0 +1,260 @@
+/*
+ drivers/net/tulip/21142.c
+
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
+ for more information on this driver.
+
+ DC21143 manual "21143 PCI/CardBus 10/100Mb/s Ethernet LAN Controller
+ Hardware Reference Manual" is currently available at :
+ http://developer.intel.com/design/network/manuals/278074.htm
+
+ Please submit bugs to http://bugzilla.kernel.org/ .
+*/
+
+#include <linux/delay.h>
+#include "tulip.h"
+
+
+static u16 t21142_csr13[] = { 0x0001, 0x0009, 0x0009, 0x0000, 0x0001, };
+u16 t21142_csr14[] = { 0xFFFF, 0x0705, 0x0705, 0x0000, 0x7F3D, };
+static u16 t21142_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
+
+
+/* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list
+ of available transceivers. */
+void t21142_media_task(struct work_struct *work)
+{
+ struct tulip_private *tp =
+ container_of(work, struct tulip_private, media_work);
+ struct net_device *dev = tp->dev;
+ void __iomem *ioaddr = tp->base_addr;
+ int csr12 = ioread32(ioaddr + CSR12);
+ int next_tick = 60*HZ;
+ int new_csr6 = 0;
+ int csr14 = ioread32(ioaddr + CSR14);
+
+ /* CSR12[LS10,LS100] are not reliable during autonegotiation */
+ if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
+ csr12 |= 6;
+ if (tulip_debug > 2)
+ dev_info(&dev->dev, "21143 negotiation status %08x, %s\n",
+ csr12, medianame[dev->if_port]);
+ if (tulip_media_cap[dev->if_port] & MediaIsMII) {
+ if (tulip_check_duplex(dev) < 0) {
+ netif_carrier_off(dev);
+ next_tick = 3*HZ;
+ } else {
+ netif_carrier_on(dev);
+ next_tick = 60*HZ;
+ }
+ } else if (tp->nwayset) {
+ /* Don't screw up a negotiated session! */
+ if (tulip_debug > 1)
+ dev_info(&dev->dev,
+ "Using NWay-set %s media, csr12 %08x\n",
+ medianame[dev->if_port], csr12);
+ } else if (tp->medialock) {
+ ;
+ } else if (dev->if_port == 3) {
+ if (csr12 & 2) { /* No 100mbps link beat, revert to 10mbps. */
+ if (tulip_debug > 1)
+ dev_info(&dev->dev,
+ "No 21143 100baseTx link beat, %08x, trying NWay\n",
+ csr12);
+ t21142_start_nway(dev);
+ next_tick = 3*HZ;
+ }
+ } else if ((csr12 & 0x7000) != 0x5000) {
+ /* Negotiation failed. Search media types. */
+ if (tulip_debug > 1)
+ dev_info(&dev->dev,
+ "21143 negotiation failed, status %08x\n",
+ csr12);
+ if (!(csr12 & 4)) { /* 10mbps link beat good. */
+ new_csr6 = 0x82420000;
+ dev->if_port = 0;
+ iowrite32(0, ioaddr + CSR13);
+ iowrite32(0x0003FFFF, ioaddr + CSR14);
+ iowrite16(t21142_csr15[dev->if_port], ioaddr + CSR15);
+ iowrite32(t21142_csr13[dev->if_port], ioaddr + CSR13);
+ } else {
+ /* Select 100mbps port to check for link beat. */
+ new_csr6 = 0x83860000;
+ dev->if_port = 3;
+ iowrite32(0, ioaddr + CSR13);
+ iowrite32(0x0003FFFF, ioaddr + CSR14);
+ iowrite16(8, ioaddr + CSR15);
+ iowrite32(1, ioaddr + CSR13);
+ }
+ if (tulip_debug > 1)
+ dev_info(&dev->dev, "Testing new 21143 media %s\n",
+ medianame[dev->if_port]);
+ if (new_csr6 != (tp->csr6 & ~0x00D5)) {
+ tp->csr6 &= 0x00D5;
+ tp->csr6 |= new_csr6;
+ iowrite32(0x0301, ioaddr + CSR12);
+ tulip_restart_rxtx(tp);
+ }
+ next_tick = 3*HZ;
+ }
+
+ /* mod_timer synchronizes us with potential add_timer calls
+ * from interrupts.
+ */
+ mod_timer(&tp->timer, RUN_AT(next_tick));
+}
+
+
+void t21142_start_nway(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int csr14 = ((tp->sym_advertise & 0x0780) << 9) |
+ ((tp->sym_advertise & 0x0020) << 1) | 0xffbf;
+
+ dev->if_port = 0;
+ tp->nway = tp->mediasense = 1;
+ tp->nwayset = tp->lpar = 0;
+ if (tulip_debug > 1)
+ netdev_dbg(dev, "Restarting 21143 autonegotiation, csr14=%08x\n",
+ csr14);
+ iowrite32(0x0001, ioaddr + CSR13);
+ udelay(100);
+ iowrite32(csr14, ioaddr + CSR14);
+ tp->csr6 = 0x82420000 | (tp->sym_advertise & 0x0040 ? FullDuplex : 0);
+ iowrite32(tp->csr6, ioaddr + CSR6);
+ if (tp->mtable && tp->mtable->csr15dir) {
+ iowrite32(tp->mtable->csr15dir, ioaddr + CSR15);
+ iowrite32(tp->mtable->csr15val, ioaddr + CSR15);
+ } else
+ iowrite16(0x0008, ioaddr + CSR15);
+ iowrite32(0x1301, ioaddr + CSR12); /* Trigger NWAY. */
+}
+
+
+
+void t21142_lnk_change(struct net_device *dev, int csr5)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int csr12 = ioread32(ioaddr + CSR12);
+ int csr14 = ioread32(ioaddr + CSR14);
+
+ /* CSR12[LS10,LS100] are not reliable during autonegotiation */
+ if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
+ csr12 |= 6;
+ if (tulip_debug > 1)
+ dev_info(&dev->dev,
+ "21143 link status interrupt %08x, CSR5 %x, %08x\n",
+ csr12, csr5, csr14);
+
+ /* If NWay finished and we have a negotiated partner capability. */
+ if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) {
+ int setup_done = 0;
+ int negotiated = tp->sym_advertise & (csr12 >> 16);
+ tp->lpar = csr12 >> 16;
+ tp->nwayset = 1;
+ /* If partner cannot negotiate, it is 10Mbps Half Duplex */
+ if (!(csr12 & 0x8000)) dev->if_port = 0;
+ else if (negotiated & 0x0100) dev->if_port = 5;
+ else if (negotiated & 0x0080) dev->if_port = 3;
+ else if (negotiated & 0x0040) dev->if_port = 4;
+ else if (negotiated & 0x0020) dev->if_port = 0;
+ else {
+ tp->nwayset = 0;
+ if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180))
+ dev->if_port = 3;
+ }
+ tp->full_duplex = (tulip_media_cap[dev->if_port] & MediaAlwaysFD) ? 1:0;
+
+ if (tulip_debug > 1) {
+ if (tp->nwayset)
+ dev_info(&dev->dev,
+ "Switching to %s based on link negotiation %04x & %04x = %04x\n",
+ medianame[dev->if_port],
+ tp->sym_advertise, tp->lpar,
+ negotiated);
+ else
+ dev_info(&dev->dev,
+ "Autonegotiation failed, using %s, link beat status %04x\n",
+ medianame[dev->if_port], csr12);
+ }
+
+ if (tp->mtable) {
+ int i;
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == dev->if_port) {
+ int startup = ! ((tp->chip_id == DC21143 && (tp->revision == 48 || tp->revision == 65)));
+ tp->cur_index = i;
+ tulip_select_media(dev, startup);
+ setup_done = 1;
+ break;
+ }
+ }
+ if ( ! setup_done) {
+ tp->csr6 = (dev->if_port & 1 ? 0x838E0000 : 0x82420000) | (tp->csr6 & 0x20ff);
+ if (tp->full_duplex)
+ tp->csr6 |= 0x0200;
+ iowrite32(1, ioaddr + CSR13);
+ }
+#if 0 /* Restart shouldn't be needed. */
+ iowrite32(tp->csr6 | RxOn, ioaddr + CSR6);
+ if (tulip_debug > 2)
+ netdev_dbg(dev, " Restarting Tx and Rx, CSR5 is %08x\n",
+ ioread32(ioaddr + CSR5));
+#endif
+ tulip_start_rxtx(tp);
+ if (tulip_debug > 2)
+ netdev_dbg(dev, " Setting CSR6 %08x/%x CSR12 %08x\n",
+ tp->csr6, ioread32(ioaddr + CSR6),
+ ioread32(ioaddr + CSR12));
+ } else if ((tp->nwayset && (csr5 & 0x08000000) &&
+ (dev->if_port == 3 || dev->if_port == 5) &&
+ (csr12 & 2) == 2) ||
+ (tp->nway && (csr5 & (TPLnkFail)))) {
+ /* Link blew? Maybe restart NWay. */
+ del_timer_sync(&tp->timer);
+ t21142_start_nway(dev);
+ tp->timer.expires = RUN_AT(3*HZ);
+ add_timer(&tp->timer);
+ } else if (dev->if_port == 3 || dev->if_port == 5) {
+ if (tulip_debug > 1)
+ dev_info(&dev->dev, "21143 %s link beat %s\n",
+ medianame[dev->if_port],
+ (csr12 & 2) ? "failed" : "good");
+ if ((csr12 & 2) && ! tp->medialock) {
+ del_timer_sync(&tp->timer);
+ t21142_start_nway(dev);
+ tp->timer.expires = RUN_AT(3*HZ);
+ add_timer(&tp->timer);
+ } else if (dev->if_port == 5)
+ iowrite32(csr14 & ~0x080, ioaddr + CSR14);
+ } else if (dev->if_port == 0 || dev->if_port == 4) {
+ if ((csr12 & 4) == 0)
+ dev_info(&dev->dev, "21143 10baseT link beat good\n");
+ } else if (!(csr12 & 4)) { /* 10mbps link beat good. */
+ if (tulip_debug)
+ dev_info(&dev->dev, "21143 10mbps sensed media\n");
+ dev->if_port = 0;
+ } else if (tp->nwayset) {
+ if (tulip_debug)
+ dev_info(&dev->dev, "21143 using NWay-set %s, csr6 %08x\n",
+ medianame[dev->if_port], tp->csr6);
+ } else { /* 100mbps link beat good. */
+ if (tulip_debug)
+ dev_info(&dev->dev, "21143 100baseTx sensed media\n");
+ dev->if_port = 3;
+ tp->csr6 = 0x838E0000 | (tp->csr6 & 0x20ff);
+ iowrite32(0x0003FF7F, ioaddr + CSR14);
+ iowrite32(0x0301, ioaddr + CSR12);
+ tulip_restart_rxtx(tp);
+ }
+}
+
+
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig
new file mode 100644
index 000000000000..1203be0436e2
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/Kconfig
@@ -0,0 +1,172 @@
+#
+# Tulip family network device configuration
+#
+
+config NET_TULIP
+ bool "DEC - Tulip devices"
+ depends on (PCI || EISA || CARDBUS)
+ ---help---
+ This selects the "Tulip" family of EISA/PCI network cards.
+
+if NET_TULIP
+
+config DE2104X
+ tristate "Early DECchip Tulip (dc2104x) PCI support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This driver is developed for the SMC EtherPower series Ethernet
+ cards and also works with cards based on the DECchip
+ 21040 (Tulip series) chips. Some LinkSys PCI cards are
+ of this type. (If your card is NOT SMC EtherPower 10/100 PCI
+ (smc9332dst), you can also try the driver for "Generic DECchip"
+ cards, below. However, most people with a network card of this type
+ will say Y here.) Do read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module will
+ be called de2104x.
+
+config DE2104X_DSL
+ int "Descriptor Skip Length in 32 bit longwords"
+ depends on DE2104X
+ range 0 31
+ default 0
+ ---help---
+ Setting this value allows to align ring buffer descriptors into their
+ own cache lines. Value of 4 corresponds to the typical 32 byte line
+ (the descriptor is 16 bytes). This is necessary on systems that lack
+ cache coherence, an example is PowerMac 5500. Otherwise 0 is safe.
+ Default is 0, and range is 0 to 31.
+
+config TULIP
+ tristate "DECchip Tulip (dc2114x) PCI support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This driver is developed for the SMC EtherPower series Ethernet
+ cards and also works with cards based on the DECchip
+ 21140 (Tulip series) chips. Some LinkSys PCI cards are
+ of this type. (If your card is NOT SMC EtherPower 10/100 PCI
+ (smc9332dst), you can also try the driver for "Generic DECchip"
+ cards, above. However, most people with a network card of this type
+ will say Y here.) Do read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module will
+ be called tulip.
+
+config TULIP_MWI
+ bool "New bus configuration (EXPERIMENTAL)"
+ depends on TULIP && EXPERIMENTAL
+ ---help---
+ This configures your Tulip card specifically for the card and
+ system cache line size type you are using.
+
+ This is experimental code, not yet tested on many boards.
+
+ If unsure, say N.
+
+config TULIP_MMIO
+ bool "Use PCI shared mem for NIC registers"
+ depends on TULIP
+ ---help---
+ Use PCI shared memory for the NIC registers, rather than going through
+ the Tulip's PIO (programmed I/O ports). Faster, but could produce
+ obscure bugs if your mainboard has memory controller timing issues.
+ If in doubt, say N.
+
+config TULIP_NAPI
+ bool "Use RX polling (NAPI)"
+ depends on TULIP
+ ---help---
+ NAPI is a new driver API designed to reduce CPU and interrupt load
+ when the driver is receiving lots of packets from the card. It is
+ still somewhat experimental and thus not yet enabled by default.
+
+ If your estimated Rx load is 10kpps or more, or if the card will be
+ deployed on potentially unfriendly networks (e.g. in a firewall),
+ then say Y here.
+
+ If in doubt, say N.
+
+config TULIP_NAPI_HW_MITIGATION
+ bool "Use Interrupt Mitigation"
+ depends on TULIP_NAPI
+ ---help---
+ Use HW to reduce RX interrupts. Not strictly necessary since NAPI
+ reduces RX interrupts by itself. Interrupt mitigation reduces RX
+ interrupts even at low levels of traffic at the cost of a small
+ latency.
+
+ If in doubt, say Y.
+
+config TULIP_DM910X
+ def_bool y
+ depends on TULIP && SPARC
+
+config DE4X5
+ tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
+ depends on (PCI || EISA)
+ select CRC32
+ ---help---
+ This is support for the DIGITAL series of PCI/EISA Ethernet cards.
+ These include the DE425, DE434, DE435, DE450 and DE500 models. If
+ you have a network card of this type, say Y and read the
+ Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. More specific
+ information is contained in
+ <file:Documentation/networking/de4x5.txt>.
+
+ To compile this driver as a module, choose M here. The module will
+ be called de4x5.
+
+config WINBOND_840
+ tristate "Winbond W89c840 Ethernet support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This driver is for the Winbond W89c840 chip. It also works with
+ the TX9882 chip on the Compex RL100-ATX board.
+ More specific information and updates are available from
+ <http://www.scyld.com/network/drivers.html>.
+
+config DM9102
+ tristate "Davicom DM910x/DM980x support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This driver is for DM9102(A)/DM9132/DM9801 compatible PCI cards from
+ Davicom (<http://www.davicom.com.tw/>). If you have such a network
+ (Ethernet) card, say Y. Some information is contained in the file
+ <file:Documentation/networking/dmfe.txt>.
+
+ To compile this driver as a module, choose M here. The module will
+ be called dmfe.
+
+config ULI526X
+ tristate "ULi M526x controller support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This driver is for ULi M5261/M5263 10/100M Ethernet Controller
+ (<http://www.nvidia.com/page/uli_drivers.html>).
+
+ To compile this driver as a module, choose M here. The module will
+ be called uli526x.
+
+config PCMCIA_XIRCOM
+ tristate "Xircom CardBus support"
+ depends on CARDBUS
+ ---help---
+ This driver is for the Digital "Tulip" Ethernet CardBus adapters.
+ It should work with most DEC 21*4*-based chips/ethercards, as well
+ as with work-alike chips from Lite-On (PNIC) and Macronix (MXIC) and
+ ASIX.
+
+ To compile this driver as a module, choose M here. The module will
+ be called xircom_cb. If unsure, say N.
+
+endif # NET_TULIP
diff --git a/drivers/net/ethernet/dec/tulip/Makefile b/drivers/net/ethernet/dec/tulip/Makefile
new file mode 100644
index 000000000000..5e8be38b45bb
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/Makefile
@@ -0,0 +1,19 @@
+#
+# Makefile for the Linux "Tulip" family network device drivers.
+#
+
+ccflags-$(CONFIG_NET_TULIP) := -DDEBUG
+
+obj-$(CONFIG_PCMCIA_XIRCOM) += xircom_cb.o
+obj-$(CONFIG_DM9102) += dmfe.o
+obj-$(CONFIG_WINBOND_840) += winbond-840.o
+obj-$(CONFIG_DE2104X) += de2104x.o
+obj-$(CONFIG_TULIP) += tulip.o
+obj-$(CONFIG_DE4X5) += de4x5.o
+obj-$(CONFIG_ULI526X) += uli526x.o
+
+# Declare multi-part drivers.
+
+tulip-objs := eeprom.o interrupt.o media.o \
+ timer.o tulip_core.o \
+ 21142.o pnic.o pnic2.o
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
new file mode 100644
index 000000000000..1427739d9a51
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -0,0 +1,2215 @@
+/* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
+/*
+ Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
+
+ Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c]
+ Written/copyright 1994-2001 by Donald Becker. [tulip.c]
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ See the file COPYING in this distribution for more information.
+
+ TODO, in rough priority order:
+ * Support forcing media type with a module parameter,
+ like dl2k.c/sundance.c
+ * Constants (module parms?) for Rx work limit
+ * Complete reset on PciErr
+ * Jumbo frames / dev->change_mtu
+ * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
+ * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
+ * Implement Tx software interrupt mitigation via
+ Tx descriptor bit
+
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DRV_NAME "de2104x"
+#define DRV_VERSION "0.7"
+#define DRV_RELDATE "Mar 17, 2004"
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/compiler.h>
+#include <linux/rtnetlink.h>
+#include <linux/crc32.h>
+#include <linux/slab.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+/* These identify the driver base version and may not be removed. */
+static char version[] =
+"PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")";
+
+MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
+MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static int debug = -1;
+module_param (debug, int, 0);
+MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
+
+/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
+#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
+ defined(CONFIG_SPARC) || defined(__ia64__) || \
+ defined(__sh__) || defined(__mips__)
+static int rx_copybreak = 1518;
+#else
+static int rx_copybreak = 100;
+#endif
+module_param (rx_copybreak, int, 0);
+MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
+
+#define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+/* Descriptor skip length in 32 bit longwords. */
+#ifndef CONFIG_DE2104X_DSL
+#define DSL 0
+#else
+#define DSL CONFIG_DE2104X_DSL
+#endif
+
+#define DE_RX_RING_SIZE 64
+#define DE_TX_RING_SIZE 64
+#define DE_RING_BYTES \
+ ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
+ (sizeof(struct de_desc) * DE_TX_RING_SIZE))
+#define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
+#define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
+#define TX_BUFFS_AVAIL(CP) \
+ (((CP)->tx_tail <= (CP)->tx_head) ? \
+ (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
+ (CP)->tx_tail - (CP)->tx_head - 1)
+
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+#define RX_OFFSET 2
+
+#define DE_SETUP_SKB ((struct sk_buff *) 1)
+#define DE_DUMMY_SKB ((struct sk_buff *) 2)
+#define DE_SETUP_FRAME_WORDS 96
+#define DE_EEPROM_WORDS 256
+#define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
+#define DE_MAX_MEDIA 5
+
+#define DE_MEDIA_TP_AUTO 0
+#define DE_MEDIA_BNC 1
+#define DE_MEDIA_AUI 2
+#define DE_MEDIA_TP 3
+#define DE_MEDIA_TP_FD 4
+#define DE_MEDIA_INVALID DE_MAX_MEDIA
+#define DE_MEDIA_FIRST 0
+#define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
+#define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
+
+#define DE_TIMER_LINK (60 * HZ)
+#define DE_TIMER_NO_LINK (5 * HZ)
+
+#define DE_NUM_REGS 16
+#define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
+#define DE_REGS_VER 1
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* This is a mysterious value that can be written to CSR11 in the 21040 (only)
+ to support a pre-NWay full-duplex signaling mechanism using short frames.
+ No one knows what it should be, but if left at its default value some
+ 10base2(!) packets trigger a full-duplex-request interrupt. */
+#define FULL_DUPLEX_MAGIC 0x6969
+
+enum {
+ /* NIC registers */
+ BusMode = 0x00,
+ TxPoll = 0x08,
+ RxPoll = 0x10,
+ RxRingAddr = 0x18,
+ TxRingAddr = 0x20,
+ MacStatus = 0x28,
+ MacMode = 0x30,
+ IntrMask = 0x38,
+ RxMissed = 0x40,
+ ROMCmd = 0x48,
+ CSR11 = 0x58,
+ SIAStatus = 0x60,
+ CSR13 = 0x68,
+ CSR14 = 0x70,
+ CSR15 = 0x78,
+ PCIPM = 0x40,
+
+ /* BusMode bits */
+ CmdReset = (1 << 0),
+ CacheAlign16 = 0x00008000,
+ BurstLen4 = 0x00000400,
+ DescSkipLen = (DSL << 2),
+
+ /* Rx/TxPoll bits */
+ NormalTxPoll = (1 << 0),
+ NormalRxPoll = (1 << 0),
+
+ /* Tx/Rx descriptor status bits */
+ DescOwn = (1 << 31),
+ RxError = (1 << 15),
+ RxErrLong = (1 << 7),
+ RxErrCRC = (1 << 1),
+ RxErrFIFO = (1 << 0),
+ RxErrRunt = (1 << 11),
+ RxErrFrame = (1 << 14),
+ RingEnd = (1 << 25),
+ FirstFrag = (1 << 29),
+ LastFrag = (1 << 30),
+ TxError = (1 << 15),
+ TxFIFOUnder = (1 << 1),
+ TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11),
+ TxMaxCol = (1 << 8),
+ TxOWC = (1 << 9),
+ TxJabber = (1 << 14),
+ SetupFrame = (1 << 27),
+ TxSwInt = (1 << 31),
+
+ /* MacStatus bits */
+ IntrOK = (1 << 16),
+ IntrErr = (1 << 15),
+ RxIntr = (1 << 6),
+ RxEmpty = (1 << 7),
+ TxIntr = (1 << 0),
+ TxEmpty = (1 << 2),
+ PciErr = (1 << 13),
+ TxState = (1 << 22) | (1 << 21) | (1 << 20),
+ RxState = (1 << 19) | (1 << 18) | (1 << 17),
+ LinkFail = (1 << 12),
+ LinkPass = (1 << 4),
+ RxStopped = (1 << 8),
+ TxStopped = (1 << 1),
+
+ /* MacMode bits */
+ TxEnable = (1 << 13),
+ RxEnable = (1 << 1),
+ RxTx = TxEnable | RxEnable,
+ FullDuplex = (1 << 9),
+ AcceptAllMulticast = (1 << 7),
+ AcceptAllPhys = (1 << 6),
+ BOCnt = (1 << 5),
+ MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
+ RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
+
+ /* ROMCmd bits */
+ EE_SHIFT_CLK = 0x02, /* EEPROM shift clock. */
+ EE_CS = 0x01, /* EEPROM chip select. */
+ EE_DATA_WRITE = 0x04, /* Data from the Tulip to EEPROM. */
+ EE_WRITE_0 = 0x01,
+ EE_WRITE_1 = 0x05,
+ EE_DATA_READ = 0x08, /* Data from the EEPROM chip. */
+ EE_ENB = (0x4800 | EE_CS),
+
+ /* The EEPROM commands include the alway-set leading bit. */
+ EE_READ_CMD = 6,
+
+ /* RxMissed bits */
+ RxMissedOver = (1 << 16),
+ RxMissedMask = 0xffff,
+
+ /* SROM-related bits */
+ SROMC0InfoLeaf = 27,
+ MediaBlockMask = 0x3f,
+ MediaCustomCSRs = (1 << 6),
+
+ /* PCIPM bits */
+ PM_Sleep = (1 << 31),
+ PM_Snooze = (1 << 30),
+ PM_Mask = PM_Sleep | PM_Snooze,
+
+ /* SIAStatus bits */
+ NWayState = (1 << 14) | (1 << 13) | (1 << 12),
+ NWayRestart = (1 << 12),
+ NonselPortActive = (1 << 9),
+ SelPortActive = (1 << 8),
+ LinkFailStatus = (1 << 2),
+ NetCxnErr = (1 << 1),
+};
+
+static const u32 de_intr_mask =
+ IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
+ LinkPass | LinkFail | PciErr;
+
+/*
+ * Set the programmable burst length to 4 longwords for all:
+ * DMA errors result without these values. Cache align 16 long.
+ */
+static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
+
+struct de_srom_media_block {
+ u8 opts;
+ u16 csr13;
+ u16 csr14;
+ u16 csr15;
+} __packed;
+
+struct de_srom_info_leaf {
+ u16 default_media;
+ u8 n_blocks;
+ u8 unused;
+} __packed;
+
+struct de_desc {
+ __le32 opts1;
+ __le32 opts2;
+ __le32 addr1;
+ __le32 addr2;
+#if DSL
+ __le32 skip[DSL];
+#endif
+};
+
+struct media_info {
+ u16 type; /* DE_MEDIA_xxx */
+ u16 csr13;
+ u16 csr14;
+ u16 csr15;
+};
+
+struct ring_info {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+};
+
+struct de_private {
+ unsigned tx_head;
+ unsigned tx_tail;
+ unsigned rx_tail;
+
+ void __iomem *regs;
+ struct net_device *dev;
+ spinlock_t lock;
+
+ struct de_desc *rx_ring;
+ struct de_desc *tx_ring;
+ struct ring_info tx_skb[DE_TX_RING_SIZE];
+ struct ring_info rx_skb[DE_RX_RING_SIZE];
+ unsigned rx_buf_sz;
+ dma_addr_t ring_dma;
+
+ u32 msg_enable;
+
+ struct net_device_stats net_stats;
+
+ struct pci_dev *pdev;
+
+ u16 setup_frame[DE_SETUP_FRAME_WORDS];
+
+ u32 media_type;
+ u32 media_supported;
+ u32 media_advertise;
+ struct media_info media[DE_MAX_MEDIA];
+ struct timer_list media_timer;
+
+ u8 *ee_data;
+ unsigned board_idx;
+ unsigned de21040 : 1;
+ unsigned media_lock : 1;
+};
+
+
+static void de_set_rx_mode (struct net_device *dev);
+static void de_tx (struct de_private *de);
+static void de_clean_rings (struct de_private *de);
+static void de_media_interrupt (struct de_private *de, u32 status);
+static void de21040_media_timer (unsigned long data);
+static void de21041_media_timer (unsigned long data);
+static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
+
+
+static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = {
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { },
+};
+MODULE_DEVICE_TABLE(pci, de_pci_tbl);
+
+static const char * const media_name[DE_MAX_MEDIA] = {
+ "10baseT auto",
+ "BNC",
+ "AUI",
+ "10baseT-HD",
+ "10baseT-FD"
+};
+
+/* 21040 transceiver register settings:
+ * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
+static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
+static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
+static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
+
+/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
+static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
+static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
+/* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
+static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
+static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
+
+
+#define dr32(reg) ioread32(de->regs + (reg))
+#define dw32(reg, val) iowrite32((val), de->regs + (reg))
+
+
+static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
+ u32 status, u32 len)
+{
+ netif_dbg(de, rx_err, de->dev,
+ "rx err, slot %d status 0x%x len %d\n",
+ rx_tail, status, len);
+
+ if ((status & 0x38000300) != 0x0300) {
+ /* Ingore earlier buffers. */
+ if ((status & 0xffff) != 0x7fff) {
+ netif_warn(de, rx_err, de->dev,
+ "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
+ status);
+ de->net_stats.rx_length_errors++;
+ }
+ } else if (status & RxError) {
+ /* There was a fatal error. */
+ de->net_stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x0890) de->net_stats.rx_length_errors++;
+ if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
+ if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
+ }
+}
+
+static void de_rx (struct de_private *de)
+{
+ unsigned rx_tail = de->rx_tail;
+ unsigned rx_work = DE_RX_RING_SIZE;
+ unsigned drop = 0;
+ int rc;
+
+ while (--rx_work) {
+ u32 status, len;
+ dma_addr_t mapping;
+ struct sk_buff *skb, *copy_skb;
+ unsigned copying_skb, buflen;
+
+ skb = de->rx_skb[rx_tail].skb;
+ BUG_ON(!skb);
+ rmb();
+ status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
+ if (status & DescOwn)
+ break;
+
+ len = ((status >> 16) & 0x7ff) - 4;
+ mapping = de->rx_skb[rx_tail].mapping;
+
+ if (unlikely(drop)) {
+ de->net_stats.rx_dropped++;
+ goto rx_next;
+ }
+
+ if (unlikely((status & 0x38008300) != 0x0300)) {
+ de_rx_err_acct(de, rx_tail, status, len);
+ goto rx_next;
+ }
+
+ copying_skb = (len <= rx_copybreak);
+
+ netif_dbg(de, rx_status, de->dev,
+ "rx slot %d status 0x%x len %d copying? %d\n",
+ rx_tail, status, len, copying_skb);
+
+ buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
+ copy_skb = dev_alloc_skb (buflen);
+ if (unlikely(!copy_skb)) {
+ de->net_stats.rx_dropped++;
+ drop = 1;
+ rx_work = 100;
+ goto rx_next;
+ }
+
+ if (!copying_skb) {
+ pci_unmap_single(de->pdev, mapping,
+ buflen, PCI_DMA_FROMDEVICE);
+ skb_put(skb, len);
+
+ mapping =
+ de->rx_skb[rx_tail].mapping =
+ pci_map_single(de->pdev, copy_skb->data,
+ buflen, PCI_DMA_FROMDEVICE);
+ de->rx_skb[rx_tail].skb = copy_skb;
+ } else {
+ pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
+ skb_reserve(copy_skb, RX_OFFSET);
+ skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
+ len);
+ pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
+
+ /* We'll reuse the original ring buffer. */
+ skb = copy_skb;
+ }
+
+ skb->protocol = eth_type_trans (skb, de->dev);
+
+ de->net_stats.rx_packets++;
+ de->net_stats.rx_bytes += skb->len;
+ rc = netif_rx (skb);
+ if (rc == NET_RX_DROP)
+ drop = 1;
+
+rx_next:
+ if (rx_tail == (DE_RX_RING_SIZE - 1))
+ de->rx_ring[rx_tail].opts2 =
+ cpu_to_le32(RingEnd | de->rx_buf_sz);
+ else
+ de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
+ de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
+ wmb();
+ de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
+ rx_tail = NEXT_RX(rx_tail);
+ }
+
+ if (!rx_work)
+ netdev_warn(de->dev, "rx work limit reached\n");
+
+ de->rx_tail = rx_tail;
+}
+
+static irqreturn_t de_interrupt (int irq, void *dev_instance)
+{
+ struct net_device *dev = dev_instance;
+ struct de_private *de = netdev_priv(dev);
+ u32 status;
+
+ status = dr32(MacStatus);
+ if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
+ return IRQ_NONE;
+
+ netif_dbg(de, intr, dev, "intr, status %08x mode %08x desc %u/%u/%u\n",
+ status, dr32(MacMode),
+ de->rx_tail, de->tx_head, de->tx_tail);
+
+ dw32(MacStatus, status);
+
+ if (status & (RxIntr | RxEmpty)) {
+ de_rx(de);
+ if (status & RxEmpty)
+ dw32(RxPoll, NormalRxPoll);
+ }
+
+ spin_lock(&de->lock);
+
+ if (status & (TxIntr | TxEmpty))
+ de_tx(de);
+
+ if (status & (LinkPass | LinkFail))
+ de_media_interrupt(de, status);
+
+ spin_unlock(&de->lock);
+
+ if (status & PciErr) {
+ u16 pci_status;
+
+ pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
+ pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
+ netdev_err(de->dev,
+ "PCI bus error, status=%08x, PCI status=%04x\n",
+ status, pci_status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void de_tx (struct de_private *de)
+{
+ unsigned tx_head = de->tx_head;
+ unsigned tx_tail = de->tx_tail;
+
+ while (tx_tail != tx_head) {
+ struct sk_buff *skb;
+ u32 status;
+
+ rmb();
+ status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
+ if (status & DescOwn)
+ break;
+
+ skb = de->tx_skb[tx_tail].skb;
+ BUG_ON(!skb);
+ if (unlikely(skb == DE_DUMMY_SKB))
+ goto next;
+
+ if (unlikely(skb == DE_SETUP_SKB)) {
+ pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
+ sizeof(de->setup_frame), PCI_DMA_TODEVICE);
+ goto next;
+ }
+
+ pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
+ skb->len, PCI_DMA_TODEVICE);
+
+ if (status & LastFrag) {
+ if (status & TxError) {
+ netif_dbg(de, tx_err, de->dev,
+ "tx err, status 0x%x\n",
+ status);
+ de->net_stats.tx_errors++;
+ if (status & TxOWC)
+ de->net_stats.tx_window_errors++;
+ if (status & TxMaxCol)
+ de->net_stats.tx_aborted_errors++;
+ if (status & TxLinkFail)
+ de->net_stats.tx_carrier_errors++;
+ if (status & TxFIFOUnder)
+ de->net_stats.tx_fifo_errors++;
+ } else {
+ de->net_stats.tx_packets++;
+ de->net_stats.tx_bytes += skb->len;
+ netif_dbg(de, tx_done, de->dev,
+ "tx done, slot %d\n", tx_tail);
+ }
+ dev_kfree_skb_irq(skb);
+ }
+
+next:
+ de->tx_skb[tx_tail].skb = NULL;
+
+ tx_tail = NEXT_TX(tx_tail);
+ }
+
+ de->tx_tail = tx_tail;
+
+ if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
+ netif_wake_queue(de->dev);
+}
+
+static netdev_tx_t de_start_xmit (struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct de_private *de = netdev_priv(dev);
+ unsigned int entry, tx_free;
+ u32 mapping, len, flags = FirstFrag | LastFrag;
+ struct de_desc *txd;
+
+ spin_lock_irq(&de->lock);
+
+ tx_free = TX_BUFFS_AVAIL(de);
+ if (tx_free == 0) {
+ netif_stop_queue(dev);
+ spin_unlock_irq(&de->lock);
+ return NETDEV_TX_BUSY;
+ }
+ tx_free--;
+
+ entry = de->tx_head;
+
+ txd = &de->tx_ring[entry];
+
+ len = skb->len;
+ mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ if (entry == (DE_TX_RING_SIZE - 1))
+ flags |= RingEnd;
+ if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
+ flags |= TxSwInt;
+ flags |= len;
+ txd->opts2 = cpu_to_le32(flags);
+ txd->addr1 = cpu_to_le32(mapping);
+
+ de->tx_skb[entry].skb = skb;
+ de->tx_skb[entry].mapping = mapping;
+ wmb();
+
+ txd->opts1 = cpu_to_le32(DescOwn);
+ wmb();
+
+ de->tx_head = NEXT_TX(entry);
+ netif_dbg(de, tx_queued, dev, "tx queued, slot %d, skblen %d\n",
+ entry, skb->len);
+
+ if (tx_free == 0)
+ netif_stop_queue(dev);
+
+ spin_unlock_irq(&de->lock);
+
+ /* Trigger an immediate transmit demand. */
+ dw32(TxPoll, NormalTxPoll);
+
+ return NETDEV_TX_OK;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ Note that we only use exclusion around actually queueing the
+ new frame, not around filling de->setup_frame. This is non-deterministic
+ when re-entered but still correct. */
+
+#undef set_bit_le
+#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
+
+static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
+{
+ struct de_private *de = netdev_priv(dev);
+ u16 hash_table[32];
+ struct netdev_hw_addr *ha;
+ int i;
+ u16 *eaddrs;
+
+ memset(hash_table, 0, sizeof(hash_table));
+ set_bit_le(255, hash_table); /* Broadcast entry */
+ /* This should work on big-endian machines as well. */
+ netdev_for_each_mc_addr(ha, dev) {
+ int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
+
+ set_bit_le(index, hash_table);
+ }
+
+ for (i = 0; i < 32; i++) {
+ *setup_frm++ = hash_table[i];
+ *setup_frm++ = hash_table[i];
+ }
+ setup_frm = &de->setup_frame[13*6];
+
+ /* Fill the final entry with our physical address. */
+ eaddrs = (u16 *)dev->dev_addr;
+ *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
+ *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
+ *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+}
+
+static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
+{
+ struct de_private *de = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ u16 *eaddrs;
+
+ /* We have <= 14 addresses so we can use the wonderful
+ 16 address perfect filtering of the Tulip. */
+ netdev_for_each_mc_addr(ha, dev) {
+ eaddrs = (u16 *) ha->addr;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ }
+ /* Fill the unused entries with the broadcast address. */
+ memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
+ setup_frm = &de->setup_frame[15*6];
+
+ /* Fill the final entry with our physical address. */
+ eaddrs = (u16 *)dev->dev_addr;
+ *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
+ *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
+ *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+}
+
+
+static void __de_set_rx_mode (struct net_device *dev)
+{
+ struct de_private *de = netdev_priv(dev);
+ u32 macmode;
+ unsigned int entry;
+ u32 mapping;
+ struct de_desc *txd;
+ struct de_desc *dummy_txd = NULL;
+
+ macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ macmode |= AcceptAllMulticast | AcceptAllPhys;
+ goto out;
+ }
+
+ if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter well -- accept all multicasts. */
+ macmode |= AcceptAllMulticast;
+ goto out;
+ }
+
+ /* Note that only the low-address shortword of setup_frame is valid!
+ The values are doubled for big-endian architectures. */
+ if (netdev_mc_count(dev) > 14) /* Must use a multicast hash table. */
+ build_setup_frame_hash (de->setup_frame, dev);
+ else
+ build_setup_frame_perfect (de->setup_frame, dev);
+
+ /*
+ * Now add this frame to the Tx list.
+ */
+
+ entry = de->tx_head;
+
+ /* Avoid a chip errata by prefixing a dummy entry. */
+ if (entry != 0) {
+ de->tx_skb[entry].skb = DE_DUMMY_SKB;
+
+ dummy_txd = &de->tx_ring[entry];
+ dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
+ cpu_to_le32(RingEnd) : 0;
+ dummy_txd->addr1 = 0;
+
+ /* Must set DescOwned later to avoid race with chip */
+
+ entry = NEXT_TX(entry);
+ }
+
+ de->tx_skb[entry].skb = DE_SETUP_SKB;
+ de->tx_skb[entry].mapping = mapping =
+ pci_map_single (de->pdev, de->setup_frame,
+ sizeof (de->setup_frame), PCI_DMA_TODEVICE);
+
+ /* Put the setup frame on the Tx list. */
+ txd = &de->tx_ring[entry];
+ if (entry == (DE_TX_RING_SIZE - 1))
+ txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
+ else
+ txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
+ txd->addr1 = cpu_to_le32(mapping);
+ wmb();
+
+ txd->opts1 = cpu_to_le32(DescOwn);
+ wmb();
+
+ if (dummy_txd) {
+ dummy_txd->opts1 = cpu_to_le32(DescOwn);
+ wmb();
+ }
+
+ de->tx_head = NEXT_TX(entry);
+
+ if (TX_BUFFS_AVAIL(de) == 0)
+ netif_stop_queue(dev);
+
+ /* Trigger an immediate transmit demand. */
+ dw32(TxPoll, NormalTxPoll);
+
+out:
+ if (macmode != dr32(MacMode))
+ dw32(MacMode, macmode);
+}
+
+static void de_set_rx_mode (struct net_device *dev)
+{
+ unsigned long flags;
+ struct de_private *de = netdev_priv(dev);
+
+ spin_lock_irqsave (&de->lock, flags);
+ __de_set_rx_mode(dev);
+ spin_unlock_irqrestore (&de->lock, flags);
+}
+
+static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
+{
+ if (unlikely(rx_missed & RxMissedOver))
+ de->net_stats.rx_missed_errors += RxMissedMask;
+ else
+ de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
+}
+
+static void __de_get_stats(struct de_private *de)
+{
+ u32 tmp = dr32(RxMissed); /* self-clearing */
+
+ de_rx_missed(de, tmp);
+}
+
+static struct net_device_stats *de_get_stats(struct net_device *dev)
+{
+ struct de_private *de = netdev_priv(dev);
+
+ /* The chip only need report frame silently dropped. */
+ spin_lock_irq(&de->lock);
+ if (netif_running(dev) && netif_device_present(dev))
+ __de_get_stats(de);
+ spin_unlock_irq(&de->lock);
+
+ return &de->net_stats;
+}
+
+static inline int de_is_running (struct de_private *de)
+{
+ return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
+}
+
+static void de_stop_rxtx (struct de_private *de)
+{
+ u32 macmode;
+ unsigned int i = 1300/100;
+
+ macmode = dr32(MacMode);
+ if (macmode & RxTx) {
+ dw32(MacMode, macmode & ~RxTx);
+ dr32(MacMode);
+ }
+
+ /* wait until in-flight frame completes.
+ * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
+ * Typically expect this loop to end in < 50 us on 100BT.
+ */
+ while (--i) {
+ if (!de_is_running(de))
+ return;
+ udelay(100);
+ }
+
+ netdev_warn(de->dev, "timeout expired, stopping DMA\n");
+}
+
+static inline void de_start_rxtx (struct de_private *de)
+{
+ u32 macmode;
+
+ macmode = dr32(MacMode);
+ if ((macmode & RxTx) != RxTx) {
+ dw32(MacMode, macmode | RxTx);
+ dr32(MacMode);
+ }
+}
+
+static void de_stop_hw (struct de_private *de)
+{
+
+ udelay(5);
+ dw32(IntrMask, 0);
+
+ de_stop_rxtx(de);
+
+ dw32(MacStatus, dr32(MacStatus));
+
+ udelay(10);
+
+ de->rx_tail = 0;
+ de->tx_head = de->tx_tail = 0;
+}
+
+static void de_link_up(struct de_private *de)
+{
+ if (!netif_carrier_ok(de->dev)) {
+ netif_carrier_on(de->dev);
+ netif_info(de, link, de->dev, "link up, media %s\n",
+ media_name[de->media_type]);
+ }
+}
+
+static void de_link_down(struct de_private *de)
+{
+ if (netif_carrier_ok(de->dev)) {
+ netif_carrier_off(de->dev);
+ netif_info(de, link, de->dev, "link down\n");
+ }
+}
+
+static void de_set_media (struct de_private *de)
+{
+ unsigned media = de->media_type;
+ u32 macmode = dr32(MacMode);
+
+ if (de_is_running(de))
+ netdev_warn(de->dev, "chip is running while changing media!\n");
+
+ if (de->de21040)
+ dw32(CSR11, FULL_DUPLEX_MAGIC);
+ dw32(CSR13, 0); /* Reset phy */
+ dw32(CSR14, de->media[media].csr14);
+ dw32(CSR15, de->media[media].csr15);
+ dw32(CSR13, de->media[media].csr13);
+
+ /* must delay 10ms before writing to other registers,
+ * especially CSR6
+ */
+ mdelay(10);
+
+ if (media == DE_MEDIA_TP_FD)
+ macmode |= FullDuplex;
+ else
+ macmode &= ~FullDuplex;
+
+ netif_info(de, link, de->dev, "set link %s\n", media_name[media]);
+ netif_info(de, hw, de->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
+ dr32(MacMode), dr32(SIAStatus),
+ dr32(CSR13), dr32(CSR14), dr32(CSR15));
+ netif_info(de, hw, de->dev, "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
+ macmode, de->media[media].csr13,
+ de->media[media].csr14, de->media[media].csr15);
+ if (macmode != dr32(MacMode))
+ dw32(MacMode, macmode);
+}
+
+static void de_next_media (struct de_private *de, const u32 *media,
+ unsigned int n_media)
+{
+ unsigned int i;
+
+ for (i = 0; i < n_media; i++) {
+ if (de_ok_to_advertise(de, media[i])) {
+ de->media_type = media[i];
+ return;
+ }
+ }
+}
+
+static void de21040_media_timer (unsigned long data)
+{
+ struct de_private *de = (struct de_private *) data;
+ struct net_device *dev = de->dev;
+ u32 status = dr32(SIAStatus);
+ unsigned int carrier;
+ unsigned long flags;
+
+ carrier = (status & NetCxnErr) ? 0 : 1;
+
+ if (carrier) {
+ if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
+ goto no_link_yet;
+
+ de->media_timer.expires = jiffies + DE_TIMER_LINK;
+ add_timer(&de->media_timer);
+ if (!netif_carrier_ok(dev))
+ de_link_up(de);
+ else
+ netif_info(de, timer, dev, "%s link ok, status %x\n",
+ media_name[de->media_type], status);
+ return;
+ }
+
+ de_link_down(de);
+
+ if (de->media_lock)
+ return;
+
+ if (de->media_type == DE_MEDIA_AUI) {
+ static const u32 next_state = DE_MEDIA_TP;
+ de_next_media(de, &next_state, 1);
+ } else {
+ static const u32 next_state = DE_MEDIA_AUI;
+ de_next_media(de, &next_state, 1);
+ }
+
+ spin_lock_irqsave(&de->lock, flags);
+ de_stop_rxtx(de);
+ spin_unlock_irqrestore(&de->lock, flags);
+ de_set_media(de);
+ de_start_rxtx(de);
+
+no_link_yet:
+ de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
+ add_timer(&de->media_timer);
+
+ netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
+ media_name[de->media_type], status);
+}
+
+static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
+{
+ switch (new_media) {
+ case DE_MEDIA_TP_AUTO:
+ if (!(de->media_advertise & ADVERTISED_Autoneg))
+ return 0;
+ if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
+ return 0;
+ break;
+ case DE_MEDIA_BNC:
+ if (!(de->media_advertise & ADVERTISED_BNC))
+ return 0;
+ break;
+ case DE_MEDIA_AUI:
+ if (!(de->media_advertise & ADVERTISED_AUI))
+ return 0;
+ break;
+ case DE_MEDIA_TP:
+ if (!(de->media_advertise & ADVERTISED_10baseT_Half))
+ return 0;
+ break;
+ case DE_MEDIA_TP_FD:
+ if (!(de->media_advertise & ADVERTISED_10baseT_Full))
+ return 0;
+ break;
+ }
+
+ return 1;
+}
+
+static void de21041_media_timer (unsigned long data)
+{
+ struct de_private *de = (struct de_private *) data;
+ struct net_device *dev = de->dev;
+ u32 status = dr32(SIAStatus);
+ unsigned int carrier;
+ unsigned long flags;
+
+ /* clear port active bits */
+ dw32(SIAStatus, NonselPortActive | SelPortActive);
+
+ carrier = (status & NetCxnErr) ? 0 : 1;
+
+ if (carrier) {
+ if ((de->media_type == DE_MEDIA_TP_AUTO ||
+ de->media_type == DE_MEDIA_TP ||
+ de->media_type == DE_MEDIA_TP_FD) &&
+ (status & LinkFailStatus))
+ goto no_link_yet;
+
+ de->media_timer.expires = jiffies + DE_TIMER_LINK;
+ add_timer(&de->media_timer);
+ if (!netif_carrier_ok(dev))
+ de_link_up(de);
+ else
+ netif_info(de, timer, dev,
+ "%s link ok, mode %x status %x\n",
+ media_name[de->media_type],
+ dr32(MacMode), status);
+ return;
+ }
+
+ de_link_down(de);
+
+ /* if media type locked, don't switch media */
+ if (de->media_lock)
+ goto set_media;
+
+ /* if activity detected, use that as hint for new media type */
+ if (status & NonselPortActive) {
+ unsigned int have_media = 1;
+
+ /* if AUI/BNC selected, then activity is on TP port */
+ if (de->media_type == DE_MEDIA_AUI ||
+ de->media_type == DE_MEDIA_BNC) {
+ if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
+ de->media_type = DE_MEDIA_TP_AUTO;
+ else
+ have_media = 0;
+ }
+
+ /* TP selected. If there is only TP and BNC, then it's BNC */
+ else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
+ de_ok_to_advertise(de, DE_MEDIA_BNC))
+ de->media_type = DE_MEDIA_BNC;
+
+ /* TP selected. If there is only TP and AUI, then it's AUI */
+ else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
+ de_ok_to_advertise(de, DE_MEDIA_AUI))
+ de->media_type = DE_MEDIA_AUI;
+
+ /* otherwise, ignore the hint */
+ else
+ have_media = 0;
+
+ if (have_media)
+ goto set_media;
+ }
+
+ /*
+ * Absent or ambiguous activity hint, move to next advertised
+ * media state. If de->media_type is left unchanged, this
+ * simply resets the PHY and reloads the current media settings.
+ */
+ if (de->media_type == DE_MEDIA_AUI) {
+ static const u32 next_states[] = {
+ DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
+ };
+ de_next_media(de, next_states, ARRAY_SIZE(next_states));
+ } else if (de->media_type == DE_MEDIA_BNC) {
+ static const u32 next_states[] = {
+ DE_MEDIA_TP_AUTO, DE_MEDIA_AUI
+ };
+ de_next_media(de, next_states, ARRAY_SIZE(next_states));
+ } else {
+ static const u32 next_states[] = {
+ DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
+ };
+ de_next_media(de, next_states, ARRAY_SIZE(next_states));
+ }
+
+set_media:
+ spin_lock_irqsave(&de->lock, flags);
+ de_stop_rxtx(de);
+ spin_unlock_irqrestore(&de->lock, flags);
+ de_set_media(de);
+ de_start_rxtx(de);
+
+no_link_yet:
+ de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
+ add_timer(&de->media_timer);
+
+ netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
+ media_name[de->media_type], status);
+}
+
+static void de_media_interrupt (struct de_private *de, u32 status)
+{
+ if (status & LinkPass) {
+ /* Ignore if current media is AUI or BNC and we can't use TP */
+ if ((de->media_type == DE_MEDIA_AUI ||
+ de->media_type == DE_MEDIA_BNC) &&
+ (de->media_lock ||
+ !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
+ return;
+ /* If current media is not TP, change it to TP */
+ if ((de->media_type == DE_MEDIA_AUI ||
+ de->media_type == DE_MEDIA_BNC)) {
+ de->media_type = DE_MEDIA_TP_AUTO;
+ de_stop_rxtx(de);
+ de_set_media(de);
+ de_start_rxtx(de);
+ }
+ de_link_up(de);
+ mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
+ return;
+ }
+
+ BUG_ON(!(status & LinkFail));
+ /* Mark the link as down only if current media is TP */
+ if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
+ de->media_type != DE_MEDIA_BNC) {
+ de_link_down(de);
+ mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
+ }
+}
+
+static int de_reset_mac (struct de_private *de)
+{
+ u32 status, tmp;
+
+ /*
+ * Reset MAC. de4x5.c and tulip.c examined for "advice"
+ * in this area.
+ */
+
+ if (dr32(BusMode) == 0xffffffff)
+ return -EBUSY;
+
+ /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
+ dw32 (BusMode, CmdReset);
+ mdelay (1);
+
+ dw32 (BusMode, de_bus_mode);
+ mdelay (1);
+
+ for (tmp = 0; tmp < 5; tmp++) {
+ dr32 (BusMode);
+ mdelay (1);
+ }
+
+ mdelay (1);
+
+ status = dr32(MacStatus);
+ if (status & (RxState | TxState))
+ return -EBUSY;
+ if (status == 0xffffffff)
+ return -ENODEV;
+ return 0;
+}
+
+static void de_adapter_wake (struct de_private *de)
+{
+ u32 pmctl;
+
+ if (de->de21040)
+ return;
+
+ pci_read_config_dword(de->pdev, PCIPM, &pmctl);
+ if (pmctl & PM_Mask) {
+ pmctl &= ~PM_Mask;
+ pci_write_config_dword(de->pdev, PCIPM, pmctl);
+
+ /* de4x5.c delays, so we do too */
+ msleep(10);
+ }
+}
+
+static void de_adapter_sleep (struct de_private *de)
+{
+ u32 pmctl;
+
+ if (de->de21040)
+ return;
+
+ dw32(CSR13, 0); /* Reset phy */
+ pci_read_config_dword(de->pdev, PCIPM, &pmctl);
+ pmctl |= PM_Sleep;
+ pci_write_config_dword(de->pdev, PCIPM, pmctl);
+}
+
+static int de_init_hw (struct de_private *de)
+{
+ struct net_device *dev = de->dev;
+ u32 macmode;
+ int rc;
+
+ de_adapter_wake(de);
+
+ macmode = dr32(MacMode) & ~MacModeClear;
+
+ rc = de_reset_mac(de);
+ if (rc)
+ return rc;
+
+ de_set_media(de); /* reset phy */
+
+ dw32(RxRingAddr, de->ring_dma);
+ dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
+
+ dw32(MacMode, RxTx | macmode);
+
+ dr32(RxMissed); /* self-clearing */
+
+ dw32(IntrMask, de_intr_mask);
+
+ de_set_rx_mode(dev);
+
+ return 0;
+}
+
+static int de_refill_rx (struct de_private *de)
+{
+ unsigned i;
+
+ for (i = 0; i < DE_RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(de->rx_buf_sz);
+ if (!skb)
+ goto err_out;
+
+ skb->dev = de->dev;
+
+ de->rx_skb[i].mapping = pci_map_single(de->pdev,
+ skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ de->rx_skb[i].skb = skb;
+
+ de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
+ if (i == (DE_RX_RING_SIZE - 1))
+ de->rx_ring[i].opts2 =
+ cpu_to_le32(RingEnd | de->rx_buf_sz);
+ else
+ de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
+ de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
+ de->rx_ring[i].addr2 = 0;
+ }
+
+ return 0;
+
+err_out:
+ de_clean_rings(de);
+ return -ENOMEM;
+}
+
+static int de_init_rings (struct de_private *de)
+{
+ memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
+ de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
+
+ de->rx_tail = 0;
+ de->tx_head = de->tx_tail = 0;
+
+ return de_refill_rx (de);
+}
+
+static int de_alloc_rings (struct de_private *de)
+{
+ de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
+ if (!de->rx_ring)
+ return -ENOMEM;
+ de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
+ return de_init_rings(de);
+}
+
+static void de_clean_rings (struct de_private *de)
+{
+ unsigned i;
+
+ memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
+ de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
+ wmb();
+ memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
+ de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
+ wmb();
+
+ for (i = 0; i < DE_RX_RING_SIZE; i++) {
+ if (de->rx_skb[i].skb) {
+ pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
+ de->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(de->rx_skb[i].skb);
+ }
+ }
+
+ for (i = 0; i < DE_TX_RING_SIZE; i++) {
+ struct sk_buff *skb = de->tx_skb[i].skb;
+ if ((skb) && (skb != DE_DUMMY_SKB)) {
+ if (skb != DE_SETUP_SKB) {
+ de->net_stats.tx_dropped++;
+ pci_unmap_single(de->pdev,
+ de->tx_skb[i].mapping,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ } else {
+ pci_unmap_single(de->pdev,
+ de->tx_skb[i].mapping,
+ sizeof(de->setup_frame),
+ PCI_DMA_TODEVICE);
+ }
+ }
+ }
+
+ memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
+ memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
+}
+
+static void de_free_rings (struct de_private *de)
+{
+ de_clean_rings(de);
+ pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
+ de->rx_ring = NULL;
+ de->tx_ring = NULL;
+}
+
+static int de_open (struct net_device *dev)
+{
+ struct de_private *de = netdev_priv(dev);
+ int rc;
+
+ netif_dbg(de, ifup, dev, "enabling interface\n");
+
+ de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+
+ rc = de_alloc_rings(de);
+ if (rc) {
+ netdev_err(dev, "ring allocation failure, err=%d\n", rc);
+ return rc;
+ }
+
+ dw32(IntrMask, 0);
+
+ rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
+ if (rc) {
+ netdev_err(dev, "IRQ %d request failure, err=%d\n",
+ dev->irq, rc);
+ goto err_out_free;
+ }
+
+ rc = de_init_hw(de);
+ if (rc) {
+ netdev_err(dev, "h/w init failure, err=%d\n", rc);
+ goto err_out_free_irq;
+ }
+
+ netif_start_queue(dev);
+ mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
+
+ return 0;
+
+err_out_free_irq:
+ free_irq(dev->irq, dev);
+err_out_free:
+ de_free_rings(de);
+ return rc;
+}
+
+static int de_close (struct net_device *dev)
+{
+ struct de_private *de = netdev_priv(dev);
+ unsigned long flags;
+
+ netif_dbg(de, ifdown, dev, "disabling interface\n");
+
+ del_timer_sync(&de->media_timer);
+
+ spin_lock_irqsave(&de->lock, flags);
+ de_stop_hw(de);
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ spin_unlock_irqrestore(&de->lock, flags);
+
+ free_irq(dev->irq, dev);
+
+ de_free_rings(de);
+ de_adapter_sleep(de);
+ return 0;
+}
+
+static void de_tx_timeout (struct net_device *dev)
+{
+ struct de_private *de = netdev_priv(dev);
+
+ netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
+ dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
+ de->rx_tail, de->tx_head, de->tx_tail);
+
+ del_timer_sync(&de->media_timer);
+
+ disable_irq(dev->irq);
+ spin_lock_irq(&de->lock);
+
+ de_stop_hw(de);
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ spin_unlock_irq(&de->lock);
+ enable_irq(dev->irq);
+
+ /* Update the error counts. */
+ __de_get_stats(de);
+
+ synchronize_irq(dev->irq);
+ de_clean_rings(de);
+
+ de_init_rings(de);
+
+ de_init_hw(de);
+
+ netif_wake_queue(dev);
+}
+
+static void __de_get_regs(struct de_private *de, u8 *buf)
+{
+ int i;
+ u32 *rbuf = (u32 *)buf;
+
+ /* read all CSRs */
+ for (i = 0; i < DE_NUM_REGS; i++)
+ rbuf[i] = dr32(i * 8);
+
+ /* handle self-clearing RxMissed counter, CSR8 */
+ de_rx_missed(de, rbuf[8]);
+}
+
+static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
+{
+ ecmd->supported = de->media_supported;
+ ecmd->transceiver = XCVR_INTERNAL;
+ ecmd->phy_address = 0;
+ ecmd->advertising = de->media_advertise;
+
+ switch (de->media_type) {
+ case DE_MEDIA_AUI:
+ ecmd->port = PORT_AUI;
+ break;
+ case DE_MEDIA_BNC:
+ ecmd->port = PORT_BNC;
+ break;
+ default:
+ ecmd->port = PORT_TP;
+ break;
+ }
+
+ ethtool_cmd_speed_set(ecmd, 10);
+
+ if (dr32(MacMode) & FullDuplex)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+
+ if (de->media_lock)
+ ecmd->autoneg = AUTONEG_DISABLE;
+ else
+ ecmd->autoneg = AUTONEG_ENABLE;
+
+ /* ignore maxtxpkt, maxrxpkt for now */
+
+ return 0;
+}
+
+static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
+{
+ u32 new_media;
+ unsigned int media_lock;
+
+ if (ethtool_cmd_speed(ecmd) != 10)
+ return -EINVAL;
+ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
+ return -EINVAL;
+ if (de->de21040 && ecmd->port == PORT_BNC)
+ return -EINVAL;
+ if (ecmd->transceiver != XCVR_INTERNAL)
+ return -EINVAL;
+ if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+ if (ecmd->advertising & ~de->media_supported)
+ return -EINVAL;
+ if (ecmd->autoneg == AUTONEG_ENABLE &&
+ (!(ecmd->advertising & ADVERTISED_Autoneg)))
+ return -EINVAL;
+
+ switch (ecmd->port) {
+ case PORT_AUI:
+ new_media = DE_MEDIA_AUI;
+ if (!(ecmd->advertising & ADVERTISED_AUI))
+ return -EINVAL;
+ break;
+ case PORT_BNC:
+ new_media = DE_MEDIA_BNC;
+ if (!(ecmd->advertising & ADVERTISED_BNC))
+ return -EINVAL;
+ break;
+ default:
+ if (ecmd->autoneg == AUTONEG_ENABLE)
+ new_media = DE_MEDIA_TP_AUTO;
+ else if (ecmd->duplex == DUPLEX_FULL)
+ new_media = DE_MEDIA_TP_FD;
+ else
+ new_media = DE_MEDIA_TP;
+ if (!(ecmd->advertising & ADVERTISED_TP))
+ return -EINVAL;
+ if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
+ return -EINVAL;
+ break;
+ }
+
+ media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
+
+ if ((new_media == de->media_type) &&
+ (media_lock == de->media_lock) &&
+ (ecmd->advertising == de->media_advertise))
+ return 0; /* nothing to change */
+
+ de_link_down(de);
+ mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
+ de_stop_rxtx(de);
+
+ de->media_type = new_media;
+ de->media_lock = media_lock;
+ de->media_advertise = ecmd->advertising;
+ de_set_media(de);
+ if (netif_running(de->dev))
+ de_start_rxtx(de);
+
+ return 0;
+}
+
+static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
+{
+ struct de_private *de = netdev_priv(dev);
+
+ strcpy (info->driver, DRV_NAME);
+ strcpy (info->version, DRV_VERSION);
+ strcpy (info->bus_info, pci_name(de->pdev));
+ info->eedump_len = DE_EEPROM_SIZE;
+}
+
+static int de_get_regs_len(struct net_device *dev)
+{
+ return DE_REGS_SIZE;
+}
+
+static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct de_private *de = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&de->lock);
+ rc = __de_get_settings(de, ecmd);
+ spin_unlock_irq(&de->lock);
+
+ return rc;
+}
+
+static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct de_private *de = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&de->lock);
+ rc = __de_set_settings(de, ecmd);
+ spin_unlock_irq(&de->lock);
+
+ return rc;
+}
+
+static u32 de_get_msglevel(struct net_device *dev)
+{
+ struct de_private *de = netdev_priv(dev);
+
+ return de->msg_enable;
+}
+
+static void de_set_msglevel(struct net_device *dev, u32 msglvl)
+{
+ struct de_private *de = netdev_priv(dev);
+
+ de->msg_enable = msglvl;
+}
+
+static int de_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct de_private *de = netdev_priv(dev);
+
+ if (!de->ee_data)
+ return -EOPNOTSUPP;
+ if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
+ (eeprom->len != DE_EEPROM_SIZE))
+ return -EINVAL;
+ memcpy(data, de->ee_data, eeprom->len);
+
+ return 0;
+}
+
+static int de_nway_reset(struct net_device *dev)
+{
+ struct de_private *de = netdev_priv(dev);
+ u32 status;
+
+ if (de->media_type != DE_MEDIA_TP_AUTO)
+ return -EINVAL;
+ if (netif_carrier_ok(de->dev))
+ de_link_down(de);
+
+ status = dr32(SIAStatus);
+ dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
+ netif_info(de, link, dev, "link nway restart, status %x,%x\n",
+ status, dr32(SIAStatus));
+ return 0;
+}
+
+static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *data)
+{
+ struct de_private *de = netdev_priv(dev);
+
+ regs->version = (DE_REGS_VER << 2) | de->de21040;
+
+ spin_lock_irq(&de->lock);
+ __de_get_regs(de, data);
+ spin_unlock_irq(&de->lock);
+}
+
+static const struct ethtool_ops de_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_drvinfo = de_get_drvinfo,
+ .get_regs_len = de_get_regs_len,
+ .get_settings = de_get_settings,
+ .set_settings = de_set_settings,
+ .get_msglevel = de_get_msglevel,
+ .set_msglevel = de_set_msglevel,
+ .get_eeprom = de_get_eeprom,
+ .nway_reset = de_nway_reset,
+ .get_regs = de_get_regs,
+};
+
+static void __devinit de21040_get_mac_address (struct de_private *de)
+{
+ unsigned i;
+
+ dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
+ udelay(5);
+
+ for (i = 0; i < 6; i++) {
+ int value, boguscnt = 100000;
+ do {
+ value = dr32(ROMCmd);
+ rmb();
+ } while (value < 0 && --boguscnt > 0);
+ de->dev->dev_addr[i] = value;
+ udelay(1);
+ if (boguscnt <= 0)
+ pr_warn("timeout reading 21040 MAC address byte %u\n",
+ i);
+ }
+}
+
+static void __devinit de21040_get_media_info(struct de_private *de)
+{
+ unsigned int i;
+
+ de->media_type = DE_MEDIA_TP;
+ de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
+ SUPPORTED_10baseT_Half | SUPPORTED_AUI;
+ de->media_advertise = de->media_supported;
+
+ for (i = 0; i < DE_MAX_MEDIA; i++) {
+ switch (i) {
+ case DE_MEDIA_AUI:
+ case DE_MEDIA_TP:
+ case DE_MEDIA_TP_FD:
+ de->media[i].type = i;
+ de->media[i].csr13 = t21040_csr13[i];
+ de->media[i].csr14 = t21040_csr14[i];
+ de->media[i].csr15 = t21040_csr15[i];
+ break;
+ default:
+ de->media[i].type = DE_MEDIA_INVALID;
+ break;
+ }
+ }
+}
+
+/* Note: this routine returns extra data bits for size detection. */
+static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
+{
+ int i;
+ unsigned retval = 0;
+ void __iomem *ee_addr = regs + ROMCmd;
+ int read_cmd = location | (EE_READ_CMD << addr_len);
+
+ writel(EE_ENB & ~EE_CS, ee_addr);
+ writel(EE_ENB, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 4 + addr_len; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ writel(EE_ENB | dataval, ee_addr);
+ readl(ee_addr);
+ writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ readl(ee_addr);
+ retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ }
+ writel(EE_ENB, ee_addr);
+ readl(ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
+ readl(ee_addr);
+ retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ writel(EE_ENB, ee_addr);
+ readl(ee_addr);
+ }
+
+ /* Terminate the EEPROM access. */
+ writel(EE_ENB & ~EE_CS, ee_addr);
+ return retval;
+}
+
+static void __devinit de21041_get_srom_info (struct de_private *de)
+{
+ unsigned i, sa_offset = 0, ofs;
+ u8 ee_data[DE_EEPROM_SIZE + 6] = {};
+ unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
+ struct de_srom_info_leaf *il;
+ void *bufp;
+
+ /* download entire eeprom */
+ for (i = 0; i < DE_EEPROM_WORDS; i++)
+ ((__le16 *)ee_data)[i] =
+ cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size));
+
+ /* DEC now has a specification but early board makers
+ just put the address in the first EEPROM locations. */
+ /* This does memcmp(eedata, eedata+16, 8) */
+
+#ifndef CONFIG_MIPS_COBALT
+
+ for (i = 0; i < 8; i ++)
+ if (ee_data[i] != ee_data[16+i])
+ sa_offset = 20;
+
+#endif
+
+ /* store MAC address */
+ for (i = 0; i < 6; i ++)
+ de->dev->dev_addr[i] = ee_data[i + sa_offset];
+
+ /* get offset of controller 0 info leaf. ignore 2nd byte. */
+ ofs = ee_data[SROMC0InfoLeaf];
+ if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
+ goto bad_srom;
+
+ /* get pointer to info leaf */
+ il = (struct de_srom_info_leaf *) &ee_data[ofs];
+
+ /* paranoia checks */
+ if (il->n_blocks == 0)
+ goto bad_srom;
+ if ((sizeof(ee_data) - ofs) <
+ (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
+ goto bad_srom;
+
+ /* get default media type */
+ switch (get_unaligned(&il->default_media)) {
+ case 0x0001: de->media_type = DE_MEDIA_BNC; break;
+ case 0x0002: de->media_type = DE_MEDIA_AUI; break;
+ case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
+ default: de->media_type = DE_MEDIA_TP_AUTO; break;
+ }
+
+ if (netif_msg_probe(de))
+ pr_info("de%d: SROM leaf offset %u, default media %s\n",
+ de->board_idx, ofs, media_name[de->media_type]);
+
+ /* init SIA register values to defaults */
+ for (i = 0; i < DE_MAX_MEDIA; i++) {
+ de->media[i].type = DE_MEDIA_INVALID;
+ de->media[i].csr13 = 0xffff;
+ de->media[i].csr14 = 0xffff;
+ de->media[i].csr15 = 0xffff;
+ }
+
+ /* parse media blocks to see what medias are supported,
+ * and if any custom CSR values are provided
+ */
+ bufp = ((void *)il) + sizeof(*il);
+ for (i = 0; i < il->n_blocks; i++) {
+ struct de_srom_media_block *ib = bufp;
+ unsigned idx;
+
+ /* index based on media type in media block */
+ switch(ib->opts & MediaBlockMask) {
+ case 0: /* 10baseT */
+ de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
+ | SUPPORTED_Autoneg;
+ idx = DE_MEDIA_TP;
+ de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
+ break;
+ case 1: /* BNC */
+ de->media_supported |= SUPPORTED_BNC;
+ idx = DE_MEDIA_BNC;
+ break;
+ case 2: /* AUI */
+ de->media_supported |= SUPPORTED_AUI;
+ idx = DE_MEDIA_AUI;
+ break;
+ case 4: /* 10baseT-FD */
+ de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
+ | SUPPORTED_Autoneg;
+ idx = DE_MEDIA_TP_FD;
+ de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
+ break;
+ default:
+ goto bad_srom;
+ }
+
+ de->media[idx].type = idx;
+
+ if (netif_msg_probe(de))
+ pr_info("de%d: media block #%u: %s",
+ de->board_idx, i,
+ media_name[de->media[idx].type]);
+
+ bufp += sizeof (ib->opts);
+
+ if (ib->opts & MediaCustomCSRs) {
+ de->media[idx].csr13 = get_unaligned(&ib->csr13);
+ de->media[idx].csr14 = get_unaligned(&ib->csr14);
+ de->media[idx].csr15 = get_unaligned(&ib->csr15);
+ bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
+ sizeof(ib->csr15);
+
+ if (netif_msg_probe(de))
+ pr_cont(" (%x,%x,%x)\n",
+ de->media[idx].csr13,
+ de->media[idx].csr14,
+ de->media[idx].csr15);
+
+ } else {
+ if (netif_msg_probe(de))
+ pr_cont("\n");
+ }
+
+ if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
+ break;
+ }
+
+ de->media_advertise = de->media_supported;
+
+fill_defaults:
+ /* fill in defaults, for cases where custom CSRs not used */
+ for (i = 0; i < DE_MAX_MEDIA; i++) {
+ if (de->media[i].csr13 == 0xffff)
+ de->media[i].csr13 = t21041_csr13[i];
+ if (de->media[i].csr14 == 0xffff) {
+ /* autonegotiation is broken at least on some chip
+ revisions - rev. 0x21 works, 0x11 does not */
+ if (de->pdev->revision < 0x20)
+ de->media[i].csr14 = t21041_csr14_brk[i];
+ else
+ de->media[i].csr14 = t21041_csr14[i];
+ }
+ if (de->media[i].csr15 == 0xffff)
+ de->media[i].csr15 = t21041_csr15[i];
+ }
+
+ de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
+
+ return;
+
+bad_srom:
+ /* for error cases, it's ok to assume we support all these */
+ for (i = 0; i < DE_MAX_MEDIA; i++)
+ de->media[i].type = i;
+ de->media_supported =
+ SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP |
+ SUPPORTED_AUI |
+ SUPPORTED_BNC;
+ goto fill_defaults;
+}
+
+static const struct net_device_ops de_netdev_ops = {
+ .ndo_open = de_open,
+ .ndo_stop = de_close,
+ .ndo_set_rx_mode = de_set_rx_mode,
+ .ndo_start_xmit = de_start_xmit,
+ .ndo_get_stats = de_get_stats,
+ .ndo_tx_timeout = de_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __devinit de_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct de_private *de;
+ int rc;
+ void __iomem *regs;
+ unsigned long pciaddr;
+ static int board_idx = -1;
+
+ board_idx++;
+
+#ifndef MODULE
+ if (board_idx == 0)
+ pr_info("%s\n", version);
+#endif
+
+ /* allocate a new ethernet device structure, and fill in defaults */
+ dev = alloc_etherdev(sizeof(struct de_private));
+ if (!dev)
+ return -ENOMEM;
+
+ dev->netdev_ops = &de_netdev_ops;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->ethtool_ops = &de_ethtool_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ de = netdev_priv(dev);
+ de->de21040 = ent->driver_data == 0 ? 1 : 0;
+ de->pdev = pdev;
+ de->dev = dev;
+ de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
+ de->board_idx = board_idx;
+ spin_lock_init (&de->lock);
+ init_timer(&de->media_timer);
+ if (de->de21040)
+ de->media_timer.function = de21040_media_timer;
+ else
+ de->media_timer.function = de21041_media_timer;
+ de->media_timer.data = (unsigned long) de;
+
+ netif_carrier_off(dev);
+
+ /* wake up device, assign resources */
+ rc = pci_enable_device(pdev);
+ if (rc)
+ goto err_out_free;
+
+ /* reserve PCI resources to ensure driver atomicity */
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out_disable;
+
+ /* check for invalid IRQ value */
+ if (pdev->irq < 2) {
+ rc = -EIO;
+ pr_err("invalid irq (%d) for pci dev %s\n",
+ pdev->irq, pci_name(pdev));
+ goto err_out_res;
+ }
+
+ dev->irq = pdev->irq;
+
+ /* obtain and check validity of PCI I/O address */
+ pciaddr = pci_resource_start(pdev, 1);
+ if (!pciaddr) {
+ rc = -EIO;
+ pr_err("no MMIO resource for pci dev %s\n", pci_name(pdev));
+ goto err_out_res;
+ }
+ if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
+ rc = -EIO;
+ pr_err("MMIO resource (%llx) too small on pci dev %s\n",
+ (unsigned long long)pci_resource_len(pdev, 1),
+ pci_name(pdev));
+ goto err_out_res;
+ }
+
+ /* remap CSR registers */
+ regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
+ if (!regs) {
+ rc = -EIO;
+ pr_err("Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
+ (unsigned long long)pci_resource_len(pdev, 1),
+ pciaddr, pci_name(pdev));
+ goto err_out_res;
+ }
+ dev->base_addr = (unsigned long) regs;
+ de->regs = regs;
+
+ de_adapter_wake(de);
+
+ /* make sure hardware is not running */
+ rc = de_reset_mac(de);
+ if (rc) {
+ pr_err("Cannot reset MAC, pci dev %s\n", pci_name(pdev));
+ goto err_out_iomap;
+ }
+
+ /* get MAC address, initialize default media type and
+ * get list of supported media
+ */
+ if (de->de21040) {
+ de21040_get_mac_address(de);
+ de21040_get_media_info(de);
+ } else {
+ de21041_get_srom_info(de);
+ }
+
+ /* register new network interface with kernel */
+ rc = register_netdev(dev);
+ if (rc)
+ goto err_out_iomap;
+
+ /* print info about board and interface just registered */
+ netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n",
+ de->de21040 ? "21040" : "21041",
+ dev->base_addr,
+ dev->dev_addr,
+ dev->irq);
+
+ pci_set_drvdata(pdev, dev);
+
+ /* enable busmastering */
+ pci_set_master(pdev);
+
+ /* put adapter to sleep */
+ de_adapter_sleep(de);
+
+ return 0;
+
+err_out_iomap:
+ kfree(de->ee_data);
+ iounmap(regs);
+err_out_res:
+ pci_release_regions(pdev);
+err_out_disable:
+ pci_disable_device(pdev);
+err_out_free:
+ free_netdev(dev);
+ return rc;
+}
+
+static void __devexit de_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct de_private *de = netdev_priv(dev);
+
+ BUG_ON(!dev);
+ unregister_netdev(dev);
+ kfree(de->ee_data);
+ iounmap(de->regs);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(dev);
+}
+
+#ifdef CONFIG_PM
+
+static int de_suspend (struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct de_private *de = netdev_priv(dev);
+
+ rtnl_lock();
+ if (netif_running (dev)) {
+ del_timer_sync(&de->media_timer);
+
+ disable_irq(dev->irq);
+ spin_lock_irq(&de->lock);
+
+ de_stop_hw(de);
+ netif_stop_queue(dev);
+ netif_device_detach(dev);
+ netif_carrier_off(dev);
+
+ spin_unlock_irq(&de->lock);
+ enable_irq(dev->irq);
+
+ /* Update the error counts. */
+ __de_get_stats(de);
+
+ synchronize_irq(dev->irq);
+ de_clean_rings(de);
+
+ de_adapter_sleep(de);
+ pci_disable_device(pdev);
+ } else {
+ netif_device_detach(dev);
+ }
+ rtnl_unlock();
+ return 0;
+}
+
+static int de_resume (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct de_private *de = netdev_priv(dev);
+ int retval = 0;
+
+ rtnl_lock();
+ if (netif_device_present(dev))
+ goto out;
+ if (!netif_running(dev))
+ goto out_attach;
+ if ((retval = pci_enable_device(pdev))) {
+ netdev_err(dev, "pci_enable_device failed in resume\n");
+ goto out;
+ }
+ pci_set_master(pdev);
+ de_init_rings(de);
+ de_init_hw(de);
+out_attach:
+ netif_device_attach(dev);
+out:
+ rtnl_unlock();
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+static struct pci_driver de_driver = {
+ .name = DRV_NAME,
+ .id_table = de_pci_tbl,
+ .probe = de_init_one,
+ .remove = __devexit_p(de_remove_one),
+#ifdef CONFIG_PM
+ .suspend = de_suspend,
+ .resume = de_resume,
+#endif
+};
+
+static int __init de_init (void)
+{
+#ifdef MODULE
+ pr_info("%s\n", version);
+#endif
+ return pci_register_driver(&de_driver);
+}
+
+static void __exit de_exit (void)
+{
+ pci_unregister_driver (&de_driver);
+}
+
+module_init(de_init);
+module_exit(de_exit);
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
new file mode 100644
index 000000000000..871bcaa7068d
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -0,0 +1,5599 @@
+/* de4x5.c: A DIGITAL DC21x4x DECchip and DE425/DE434/DE435/DE450/DE500
+ ethernet driver for Linux.
+
+ Copyright 1994, 1995 Digital Equipment Corporation.
+
+ Testing resources for this driver have been made available
+ in part by NASA Ames Research Center (mjacob@nas.nasa.gov).
+
+ The author may be reached at davies@maniac.ultranet.com.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2 of the License, or (at your
+ option) any later version.
+
+ THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ Originally, this driver was written for the Digital Equipment
+ Corporation series of EtherWORKS ethernet cards:
+
+ DE425 TP/COAX EISA
+ DE434 TP PCI
+ DE435 TP/COAX/AUI PCI
+ DE450 TP/COAX/AUI PCI
+ DE500 10/100 PCI Fasternet
+
+ but it will now attempt to support all cards which conform to the
+ Digital Semiconductor SROM Specification. The driver currently
+ recognises the following chips:
+
+ DC21040 (no SROM)
+ DC21041[A]
+ DC21140[A]
+ DC21142
+ DC21143
+
+ So far the driver is known to work with the following cards:
+
+ KINGSTON
+ Linksys
+ ZNYX342
+ SMC8432
+ SMC9332 (w/new SROM)
+ ZNYX31[45]
+ ZNYX346 10/100 4 port (can act as a 10/100 bridge!)
+
+ The driver has been tested on a relatively busy network using the DE425,
+ DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred
+ 16M of data to a DECstation 5000/200 as follows:
+
+ TCP UDP
+ TX RX TX RX
+ DE425 1030k 997k 1170k 1128k
+ DE434 1063k 995k 1170k 1125k
+ DE435 1063k 995k 1170k 1125k
+ DE500 1063k 998k 1170k 1125k in 10Mb/s mode
+
+ All values are typical (in kBytes/sec) from a sample of 4 for each
+ measurement. Their error is +/-20k on a quiet (private) network and also
+ depend on what load the CPU has.
+
+ =========================================================================
+ This driver has been written substantially from scratch, although its
+ inheritance of style and stack interface from 'ewrk3.c' and in turn from
+ Donald Becker's 'lance.c' should be obvious. With the module autoload of
+ every usable DECchip board, I pinched Donald's 'next_module' field to
+ link my modules together.
+
+ Up to 15 EISA cards can be supported under this driver, limited primarily
+ by the available IRQ lines. I have checked different configurations of
+ multiple depca, EtherWORKS 3 cards and de4x5 cards and have not found a
+ problem yet (provided you have at least depca.c v0.38) ...
+
+ PCI support has been added to allow the driver to work with the DE434,
+ DE435, DE450 and DE500 cards. The I/O accesses are a bit of a kludge due
+ to the differences in the EISA and PCI CSR address offsets from the base
+ address.
+
+ The ability to load this driver as a loadable module has been included
+ and used extensively during the driver development (to save those long
+ reboot sequences). Loadable module support under PCI and EISA has been
+ achieved by letting the driver autoprobe as if it were compiled into the
+ kernel. Do make sure you're not sharing interrupts with anything that
+ cannot accommodate interrupt sharing!
+
+ To utilise this ability, you have to do 8 things:
+
+ 0) have a copy of the loadable modules code installed on your system.
+ 1) copy de4x5.c from the /linux/drivers/net directory to your favourite
+ temporary directory.
+ 2) for fixed autoprobes (not recommended), edit the source code near
+ line 5594 to reflect the I/O address you're using, or assign these when
+ loading by:
+
+ insmod de4x5 io=0xghh where g = bus number
+ hh = device number
+
+ NB: autoprobing for modules is now supported by default. You may just
+ use:
+
+ insmod de4x5
+
+ to load all available boards. For a specific board, still use
+ the 'io=?' above.
+ 3) compile de4x5.c, but include -DMODULE in the command line to ensure
+ that the correct bits are compiled (see end of source code).
+ 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
+ kernel with the de4x5 configuration turned off and reboot.
+ 5) insmod de4x5 [io=0xghh]
+ 6) run the net startup bits for your new eth?? interface(s) manually
+ (usually /etc/rc.inet[12] at boot time).
+ 7) enjoy!
+
+ To unload a module, turn off the associated interface(s)
+ 'ifconfig eth?? down' then 'rmmod de4x5'.
+
+ Automedia detection is included so that in principal you can disconnect
+ from, e.g. TP, reconnect to BNC and things will still work (after a
+ pause whilst the driver figures out where its media went). My tests
+ using ping showed that it appears to work....
+
+ By default, the driver will now autodetect any DECchip based card.
+ Should you have a need to restrict the driver to DIGITAL only cards, you
+ can compile with a DEC_ONLY define, or if loading as a module, use the
+ 'dec_only=1' parameter.
+
+ I've changed the timing routines to use the kernel timer and scheduling
+ functions so that the hangs and other assorted problems that occurred
+ while autosensing the media should be gone. A bonus for the DC21040
+ auto media sense algorithm is that it can now use one that is more in
+ line with the rest (the DC21040 chip doesn't have a hardware timer).
+ The downside is the 1 'jiffies' (10ms) resolution.
+
+ IEEE 802.3u MII interface code has been added in anticipation that some
+ products may use it in the future.
+
+ The SMC9332 card has a non-compliant SROM which needs fixing - I have
+ patched this driver to detect it because the SROM format used complies
+ to a previous DEC-STD format.
+
+ I have removed the buffer copies needed for receive on Intels. I cannot
+ remove them for Alphas since the Tulip hardware only does longword
+ aligned DMA transfers and the Alphas get alignment traps with non
+ longword aligned data copies (which makes them really slow). No comment.
+
+ I have added SROM decoding routines to make this driver work with any
+ card that supports the Digital Semiconductor SROM spec. This will help
+ all cards running the dc2114x series chips in particular. Cards using
+ the dc2104x chips should run correctly with the basic driver. I'm in
+ debt to <mjacob@feral.com> for the testing and feedback that helped get
+ this feature working. So far we have tested KINGSTON, SMC8432, SMC9332
+ (with the latest SROM complying with the SROM spec V3: their first was
+ broken), ZNYX342 and LinkSys. ZYNX314 (dual 21041 MAC) and ZNYX 315
+ (quad 21041 MAC) cards also appear to work despite their incorrectly
+ wired IRQs.
+
+ I have added a temporary fix for interrupt problems when some SCSI cards
+ share the same interrupt as the DECchip based cards. The problem occurs
+ because the SCSI card wants to grab the interrupt as a fast interrupt
+ (runs the service routine with interrupts turned off) vs. this card
+ which really needs to run the service routine with interrupts turned on.
+ This driver will now add the interrupt service routine as a fast
+ interrupt if it is bounced from the slow interrupt. THIS IS NOT A
+ RECOMMENDED WAY TO RUN THE DRIVER and has been done for a limited time
+ until people sort out their compatibility issues and the kernel
+ interrupt service code is fixed. YOU SHOULD SEPARATE OUT THE FAST
+ INTERRUPT CARDS FROM THE SLOW INTERRUPT CARDS to ensure that they do not
+ run on the same interrupt. PCMCIA/CardBus is another can of worms...
+
+ Finally, I think I have really fixed the module loading problem with
+ more than one DECchip based card. As a side effect, I don't mess with
+ the device structure any more which means that if more than 1 card in
+ 2.0.x is installed (4 in 2.1.x), the user will have to edit
+ linux/drivers/net/Space.c to make room for them. Hence, module loading
+ is the preferred way to use this driver, since it doesn't have this
+ limitation.
+
+ Where SROM media detection is used and full duplex is specified in the
+ SROM, the feature is ignored unless lp->params.fdx is set at compile
+ time OR during a module load (insmod de4x5 args='eth??:fdx' [see
+ below]). This is because there is no way to automatically detect full
+ duplex links except through autonegotiation. When I include the
+ autonegotiation feature in the SROM autoconf code, this detection will
+ occur automatically for that case.
+
+ Command line arguments are now allowed, similar to passing arguments
+ through LILO. This will allow a per adapter board set up of full duplex
+ and media. The only lexical constraints are: the board name (dev->name)
+ appears in the list before its parameters. The list of parameters ends
+ either at the end of the parameter list or with another board name. The
+ following parameters are allowed:
+
+ fdx for full duplex
+ autosense to set the media/speed; with the following
+ sub-parameters:
+ TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO
+
+ Case sensitivity is important for the sub-parameters. They *must* be
+ upper case. Examples:
+
+ insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
+
+ For a compiled in driver, at or above line 548, place e.g.
+ #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP"
+
+ Yes, I know full duplex isn't permissible on BNC or AUI; they're just
+ examples. By default, full duplex is turned off and AUTO is the default
+ autosense setting. In reality, I expect only the full duplex option to
+ be used. Note the use of single quotes in the two examples above and the
+ lack of commas to separate items. ALSO, you must get the requested media
+ correct in relation to what the adapter SROM says it has. There's no way
+ to determine this in advance other than by trial and error and common
+ sense, e.g. call a BNC connectored port 'BNC', not '10Mb'.
+
+ Changed the bus probing. EISA used to be done first, followed by PCI.
+ Most people probably don't even know what a de425 is today and the EISA
+ probe has messed up some SCSI cards in the past, so now PCI is always
+ probed first followed by EISA if a) the architecture allows EISA and
+ either b) there have been no PCI cards detected or c) an EISA probe is
+ forced by the user. To force a probe include "force_eisa" in your
+ insmod "args" line; for built-in kernels either change the driver to do
+ this automatically or include #define DE4X5_FORCE_EISA on or before
+ line 1040 in the driver.
+
+ TO DO:
+ ------
+
+ Revision History
+ ----------------
+
+ Version Date Description
+
+ 0.1 17-Nov-94 Initial writing. ALPHA code release.
+ 0.2 13-Jan-95 Added PCI support for DE435's.
+ 0.21 19-Jan-95 Added auto media detection.
+ 0.22 10-Feb-95 Fix interrupt handler call <chris@cosy.sbg.ac.at>.
+ Fix recognition bug reported by <bkm@star.rl.ac.uk>.
+ Add request/release_region code.
+ Add loadable modules support for PCI.
+ Clean up loadable modules support.
+ 0.23 28-Feb-95 Added DC21041 and DC21140 support.
+ Fix missed frame counter value and initialisation.
+ Fixed EISA probe.
+ 0.24 11-Apr-95 Change delay routine to use <linux/udelay>.
+ Change TX_BUFFS_AVAIL macro.
+ Change media autodetection to allow manual setting.
+ Completed DE500 (DC21140) support.
+ 0.241 18-Apr-95 Interim release without DE500 Autosense Algorithm.
+ 0.242 10-May-95 Minor changes.
+ 0.30 12-Jun-95 Timer fix for DC21140.
+ Portability changes.
+ Add ALPHA changes from <jestabro@ant.tay1.dec.com>.
+ Add DE500 semi automatic autosense.
+ Add Link Fail interrupt TP failure detection.
+ Add timer based link change detection.
+ Plugged a memory leak in de4x5_queue_pkt().
+ 0.31 13-Jun-95 Fixed PCI stuff for 1.3.1.
+ 0.32 26-Jun-95 Added verify_area() calls in de4x5_ioctl() from a
+ suggestion by <heiko@colossus.escape.de>.
+ 0.33 8-Aug-95 Add shared interrupt support (not released yet).
+ 0.331 21-Aug-95 Fix de4x5_open() with fast CPUs.
+ Fix de4x5_interrupt().
+ Fix dc21140_autoconf() mess.
+ No shared interrupt support.
+ 0.332 11-Sep-95 Added MII management interface routines.
+ 0.40 5-Mar-96 Fix setup frame timeout <maartenb@hpkuipc.cern.ch>.
+ Add kernel timer code (h/w is too flaky).
+ Add MII based PHY autosense.
+ Add new multicasting code.
+ Add new autosense algorithms for media/mode
+ selection using kernel scheduling/timing.
+ Re-formatted.
+ Made changes suggested by <jeff@router.patch.net>:
+ Change driver to detect all DECchip based cards
+ with DEC_ONLY restriction a special case.
+ Changed driver to autoprobe as a module. No irq
+ checking is done now - assume BIOS is good!
+ Added SMC9332 detection <manabe@Roy.dsl.tutics.ac.jp>
+ 0.41 21-Mar-96 Don't check for get_hw_addr checksum unless DEC card
+ only <niles@axp745gsfc.nasa.gov>
+ Fix for multiple PCI cards reported by <jos@xos.nl>
+ Duh, put the IRQF_SHARED flag into request_interrupt().
+ Fix SMC ethernet address in enet_det[].
+ Print chip name instead of "UNKNOWN" during boot.
+ 0.42 26-Apr-96 Fix MII write TA bit error.
+ Fix bug in dc21040 and dc21041 autosense code.
+ Remove buffer copies on receive for Intels.
+ Change sk_buff handling during media disconnects to
+ eliminate DUP packets.
+ Add dynamic TX thresholding.
+ Change all chips to use perfect multicast filtering.
+ Fix alloc_device() bug <jari@markkus2.fimr.fi>
+ 0.43 21-Jun-96 Fix unconnected media TX retry bug.
+ Add Accton to the list of broken cards.
+ Fix TX under-run bug for non DC21140 chips.
+ Fix boot command probe bug in alloc_device() as
+ reported by <koen.gadeyne@barco.com> and
+ <orava@nether.tky.hut.fi>.
+ Add cache locks to prevent a race condition as
+ reported by <csd@microplex.com> and
+ <baba@beckman.uiuc.edu>.
+ Upgraded alloc_device() code.
+ 0.431 28-Jun-96 Fix potential bug in queue_pkt() from discussion
+ with <csd@microplex.com>
+ 0.44 13-Aug-96 Fix RX overflow bug in 2114[023] chips.
+ Fix EISA probe bugs reported by <os2@kpi.kharkov.ua>
+ and <michael@compurex.com>.
+ 0.441 9-Sep-96 Change dc21041_autoconf() to probe quiet BNC media
+ with a loopback packet.
+ 0.442 9-Sep-96 Include AUI in dc21041 media printout. Bug reported
+ by <bhat@mundook.cs.mu.OZ.AU>
+ 0.45 8-Dec-96 Include endian functions for PPC use, from work
+ by <cort@cs.nmt.edu> and <g.thomas@opengroup.org>.
+ 0.451 28-Dec-96 Added fix to allow autoprobe for modules after
+ suggestion from <mjacob@feral.com>.
+ 0.5 30-Jan-97 Added SROM decoding functions.
+ Updated debug flags.
+ Fix sleep/wakeup calls for PCI cards, bug reported
+ by <cross@gweep.lkg.dec.com>.
+ Added multi-MAC, one SROM feature from discussion
+ with <mjacob@feral.com>.
+ Added full module autoprobe capability.
+ Added attempt to use an SMC9332 with broken SROM.
+ Added fix for ZYNX multi-mac cards that didn't
+ get their IRQs wired correctly.
+ 0.51 13-Feb-97 Added endian fixes for the SROM accesses from
+ <paubert@iram.es>
+ Fix init_connection() to remove extra device reset.
+ Fix MAC/PHY reset ordering in dc21140m_autoconf().
+ Fix initialisation problem with lp->timeout in
+ typeX_infoblock() from <paubert@iram.es>.
+ Fix MII PHY reset problem from work done by
+ <paubert@iram.es>.
+ 0.52 26-Apr-97 Some changes may not credit the right people -
+ a disk crash meant I lost some mail.
+ Change RX interrupt routine to drop rather than
+ defer packets to avoid hang reported by
+ <g.thomas@opengroup.org>.
+ Fix srom_exec() to return for COMPACT and type 1
+ infoblocks.
+ Added DC21142 and DC21143 functions.
+ Added byte counters from <phil@tazenda.demon.co.uk>
+ Added IRQF_DISABLED temporary fix from
+ <mjacob@feral.com>.
+ 0.53 12-Nov-97 Fix the *_probe() to include 'eth??' name during
+ module load: bug reported by
+ <Piete.Brooks@cl.cam.ac.uk>
+ Fix multi-MAC, one SROM, to work with 2114x chips:
+ bug reported by <cmetz@inner.net>.
+ Make above search independent of BIOS device scan
+ direction.
+ Completed DC2114[23] autosense functions.
+ 0.531 21-Dec-97 Fix DE500-XA 100Mb/s bug reported by
+ <robin@intercore.com
+ Fix type1_infoblock() bug introduced in 0.53, from
+ problem reports by
+ <parmee@postecss.ncrfran.france.ncr.com> and
+ <jo@ice.dillingen.baynet.de>.
+ Added argument list to set up each board from either
+ a module's command line or a compiled in #define.
+ Added generic MII PHY functionality to deal with
+ newer PHY chips.
+ Fix the mess in 2.1.67.
+ 0.532 5-Jan-98 Fix bug in mii_get_phy() reported by
+ <redhat@cococo.net>.
+ Fix bug in pci_probe() for 64 bit systems reported
+ by <belliott@accessone.com>.
+ 0.533 9-Jan-98 Fix more 64 bit bugs reported by <jal@cs.brown.edu>.
+ 0.534 24-Jan-98 Fix last (?) endian bug from <geert@linux-m68k.org>
+ 0.535 21-Feb-98 Fix Ethernet Address PROM reset bug for DC21040.
+ 0.536 21-Mar-98 Change pci_probe() to use the pci_dev structure.
+ **Incompatible with 2.0.x from here.**
+ 0.540 5-Jul-98 Atomicize assertion of dev->interrupt for SMP
+ from <lma@varesearch.com>
+ Add TP, AUI and BNC cases to 21140m_autoconf() for
+ case where a 21140 under SROM control uses, e.g. AUI
+ from problem report by <delchini@lpnp09.in2p3.fr>
+ Add MII parallel detection to 2114x_autoconf() for
+ case where no autonegotiation partner exists from
+ problem report by <mlapsley@ndirect.co.uk>.
+ Add ability to force connection type directly even
+ when using SROM control from problem report by
+ <earl@exis.net>.
+ Updated the PCI interface to conform with the latest
+ version. I hope nothing is broken...
+ Add TX done interrupt modification from suggestion
+ by <Austin.Donnelly@cl.cam.ac.uk>.
+ Fix is_anc_capable() bug reported by
+ <Austin.Donnelly@cl.cam.ac.uk>.
+ Fix type[13]_infoblock() bug: during MII search, PHY
+ lp->rst not run because lp->ibn not initialised -
+ from report & fix by <paubert@iram.es>.
+ Fix probe bug with EISA & PCI cards present from
+ report by <eirik@netcom.com>.
+ 0.541 24-Aug-98 Fix compiler problems associated with i386-string
+ ops from multiple bug reports and temporary fix
+ from <paubert@iram.es>.
+ Fix pci_probe() to correctly emulate the old
+ pcibios_find_class() function.
+ Add an_exception() for old ZYNX346 and fix compile
+ warning on PPC & SPARC, from <ecd@skynet.be>.
+ Fix lastPCI to correctly work with compiled in
+ kernels and modules from bug report by
+ <Zlatko.Calusic@CARNet.hr> et al.
+ 0.542 15-Sep-98 Fix dc2114x_autoconf() to stop multiple messages
+ when media is unconnected.
+ Change dev->interrupt to lp->interrupt to ensure
+ alignment for Alpha's and avoid their unaligned
+ access traps. This flag is merely for log messages:
+ should do something more definitive though...
+ 0.543 30-Dec-98 Add SMP spin locking.
+ 0.544 8-May-99 Fix for buggy SROM in Motorola embedded boards using
+ a 21143 by <mmporter@home.com>.
+ Change PCI/EISA bus probing order.
+ 0.545 28-Nov-99 Further Moto SROM bug fix from
+ <mporter@eng.mcd.mot.com>
+ Remove double checking for DEBUG_RX in de4x5_dbg_rx()
+ from report by <geert@linux-m68k.org>
+ 0.546 22-Feb-01 Fixes Alpha XP1000 oops. The srom_search function
+ was causing a page fault when initializing the
+ variable 'pb', on a non de4x5 PCI device, in this
+ case a PCI bridge (DEC chip 21152). The value of
+ 'pb' is now only initialized if a de4x5 chip is
+ present.
+ <france@handhelds.org>
+ 0.547 08-Nov-01 Use library crc32 functions by <Matt_Domsch@dell.com>
+ 0.548 30-Aug-03 Big 2.6 cleanup. Ported to PCI/EISA probing and
+ generic DMA APIs. Fixed DE425 support on Alpha.
+ <maz@wild-wind.fr.eu.org>
+ =========================================================================
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/eisa.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+#include <linux/ctype.h>
+#include <linux/dma-mapping.h>
+#include <linux/moduleparam.h>
+#include <linux/bitops.h>
+#include <linux/gfp.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+#include <asm/uaccess.h>
+#ifdef CONFIG_PPC_PMAC
+#include <asm/machdep.h>
+#endif /* CONFIG_PPC_PMAC */
+
+#include "de4x5.h"
+
+static const char version[] __devinitconst =
+ KERN_INFO "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n";
+
+#define c_char const char
+
+/*
+** MII Information
+*/
+struct phy_table {
+ int reset; /* Hard reset required? */
+ int id; /* IEEE OUI */
+ int ta; /* One cycle TA time - 802.3u is confusing here */
+ struct { /* Non autonegotiation (parallel) speed det. */
+ int reg;
+ int mask;
+ int value;
+ } spd;
+};
+
+struct mii_phy {
+ int reset; /* Hard reset required? */
+ int id; /* IEEE OUI */
+ int ta; /* One cycle TA time */
+ struct { /* Non autonegotiation (parallel) speed det. */
+ int reg;
+ int mask;
+ int value;
+ } spd;
+ int addr; /* MII address for the PHY */
+ u_char *gep; /* Start of GEP sequence block in SROM */
+ u_char *rst; /* Start of reset sequence in SROM */
+ u_int mc; /* Media Capabilities */
+ u_int ana; /* NWay Advertisement */
+ u_int fdx; /* Full DupleX capabilities for each media */
+ u_int ttm; /* Transmit Threshold Mode for each media */
+ u_int mci; /* 21142 MII Connector Interrupt info */
+};
+
+#define DE4X5_MAX_PHY 8 /* Allow up to 8 attached PHY devices per board */
+
+struct sia_phy {
+ u_char mc; /* Media Code */
+ u_char ext; /* csr13-15 valid when set */
+ int csr13; /* SIA Connectivity Register */
+ int csr14; /* SIA TX/RX Register */
+ int csr15; /* SIA General Register */
+ int gepc; /* SIA GEP Control Information */
+ int gep; /* SIA GEP Data */
+};
+
+/*
+** Define the know universe of PHY devices that can be
+** recognised by this driver.
+*/
+static struct phy_table phy_info[] = {
+ {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}}, /* National TX */
+ {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}}, /* Broadcom T4 */
+ {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}}, /* SEEQ T4 */
+ {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}}, /* Cypress T4 */
+ {0, 0x7810 , 1, {0x14, 0x0800, 0x0800}} /* Level One LTX970 */
+};
+
+/*
+** These GENERIC values assumes that the PHY devices follow 802.3u and
+** allow parallel detection to set the link partner ability register.
+** Detection of 100Base-TX [H/F Duplex] and 100Base-T4 is supported.
+*/
+#define GENERIC_REG 0x05 /* Autoneg. Link Partner Advertisement Reg. */
+#define GENERIC_MASK MII_ANLPA_100M /* All 100Mb/s Technologies */
+#define GENERIC_VALUE MII_ANLPA_100M /* 100B-TX, 100B-TX FDX, 100B-T4 */
+
+/*
+** Define special SROM detection cases
+*/
+static c_char enet_det[][ETH_ALEN] = {
+ {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00},
+ {0x00, 0x00, 0xe8, 0x00, 0x00, 0x00}
+};
+
+#define SMC 1
+#define ACCTON 2
+
+/*
+** SROM Repair definitions. If a broken SROM is detected a card may
+** use this information to help figure out what to do. This is a
+** "stab in the dark" and so far for SMC9332's only.
+*/
+static c_char srom_repair_info[][100] = {
+ {0x00,0x1e,0x00,0x00,0x00,0x08, /* SMC9332 */
+ 0x1f,0x01,0x8f,0x01,0x00,0x01,0x00,0x02,
+ 0x01,0x00,0x00,0x78,0xe0,0x01,0x00,0x50,
+ 0x00,0x18,}
+};
+
+
+#ifdef DE4X5_DEBUG
+static int de4x5_debug = DE4X5_DEBUG;
+#else
+/*static int de4x5_debug = (DEBUG_MII | DEBUG_SROM | DEBUG_PCICFG | DEBUG_MEDIA | DEBUG_VERSION);*/
+static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION);
+#endif
+
+/*
+** Allow per adapter set up. For modules this is simply a command line
+** parameter, e.g.:
+** insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
+**
+** For a compiled in driver, place e.g.
+** #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP"
+** here
+*/
+#ifdef DE4X5_PARM
+static char *args = DE4X5_PARM;
+#else
+static char *args;
+#endif
+
+struct parameters {
+ bool fdx;
+ int autosense;
+};
+
+#define DE4X5_AUTOSENSE_MS 250 /* msec autosense tick (DE500) */
+
+#define DE4X5_NDA 0xffe0 /* No Device (I/O) Address */
+
+/*
+** Ethernet PROM defines
+*/
+#define PROBE_LENGTH 32
+#define ETH_PROM_SIG 0xAA5500FFUL
+
+/*
+** Ethernet Info
+*/
+#define PKT_BUF_SZ 1536 /* Buffer size for each Tx/Rx buffer */
+#define IEEE802_3_SZ 1518 /* Packet + CRC */
+#define MAX_PKT_SZ 1514 /* Maximum ethernet packet length */
+#define MAX_DAT_SZ 1500 /* Maximum ethernet data length */
+#define MIN_DAT_SZ 1 /* Minimum ethernet data length */
+#define PKT_HDR_LEN 14 /* Addresses and data length info */
+#define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
+#define QUEUE_PKT_TIMEOUT (3*HZ) /* 3 second timeout */
+
+
+/*
+** EISA bus defines
+*/
+#define DE4X5_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
+#define DE4X5_EISA_TOTAL_SIZE 0x100 /* I/O address extent */
+
+#define EISA_ALLOWED_IRQ_LIST {5, 9, 10, 11}
+
+#define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"}
+#define DE4X5_NAME_LENGTH 8
+
+static c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
+
+/*
+** Ethernet PROM defines for DC21040
+*/
+#define PROBE_LENGTH 32
+#define ETH_PROM_SIG 0xAA5500FFUL
+
+/*
+** PCI Bus defines
+*/
+#define PCI_MAX_BUS_NUM 8
+#define DE4X5_PCI_TOTAL_SIZE 0x80 /* I/O address extent */
+#define DE4X5_CLASS_CODE 0x00020000 /* Network controller, Ethernet */
+
+/*
+** Memory Alignment. Each descriptor is 4 longwords long. To force a
+** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
+** DESC_ALIGN. ALIGN aligns the start address of the private memory area
+** and hence the RX descriptor ring's first entry.
+*/
+#define DE4X5_ALIGN4 ((u_long)4 - 1) /* 1 longword align */
+#define DE4X5_ALIGN8 ((u_long)8 - 1) /* 2 longword align */
+#define DE4X5_ALIGN16 ((u_long)16 - 1) /* 4 longword align */
+#define DE4X5_ALIGN32 ((u_long)32 - 1) /* 8 longword align */
+#define DE4X5_ALIGN64 ((u_long)64 - 1) /* 16 longword align */
+#define DE4X5_ALIGN128 ((u_long)128 - 1) /* 32 longword align */
+
+#define DE4X5_ALIGN DE4X5_ALIGN32 /* Keep the DC21040 happy... */
+#define DE4X5_CACHE_ALIGN CAL_16LONG
+#define DESC_SKIP_LEN DSL_0 /* Must agree with DESC_ALIGN */
+/*#define DESC_ALIGN u32 dummy[4]; / * Must agree with DESC_SKIP_LEN */
+#define DESC_ALIGN
+
+#ifndef DEC_ONLY /* See README.de4x5 for using this */
+static int dec_only;
+#else
+static int dec_only = 1;
+#endif
+
+/*
+** DE4X5 IRQ ENABLE/DISABLE
+*/
+#define ENABLE_IRQs { \
+ imr |= lp->irq_en;\
+ outl(imr, DE4X5_IMR); /* Enable the IRQs */\
+}
+
+#define DISABLE_IRQs {\
+ imr = inl(DE4X5_IMR);\
+ imr &= ~lp->irq_en;\
+ outl(imr, DE4X5_IMR); /* Disable the IRQs */\
+}
+
+#define UNMASK_IRQs {\
+ imr |= lp->irq_mask;\
+ outl(imr, DE4X5_IMR); /* Unmask the IRQs */\
+}
+
+#define MASK_IRQs {\
+ imr = inl(DE4X5_IMR);\
+ imr &= ~lp->irq_mask;\
+ outl(imr, DE4X5_IMR); /* Mask the IRQs */\
+}
+
+/*
+** DE4X5 START/STOP
+*/
+#define START_DE4X5 {\
+ omr = inl(DE4X5_OMR);\
+ omr |= OMR_ST | OMR_SR;\
+ outl(omr, DE4X5_OMR); /* Enable the TX and/or RX */\
+}
+
+#define STOP_DE4X5 {\
+ omr = inl(DE4X5_OMR);\
+ omr &= ~(OMR_ST|OMR_SR);\
+ outl(omr, DE4X5_OMR); /* Disable the TX and/or RX */ \
+}
+
+/*
+** DE4X5 SIA RESET
+*/
+#define RESET_SIA outl(0, DE4X5_SICR); /* Reset SIA connectivity regs */
+
+/*
+** DE500 AUTOSENSE TIMER INTERVAL (MILLISECS)
+*/
+#define DE4X5_AUTOSENSE_MS 250
+
+/*
+** SROM Structure
+*/
+struct de4x5_srom {
+ char sub_vendor_id[2];
+ char sub_system_id[2];
+ char reserved[12];
+ char id_block_crc;
+ char reserved2;
+ char version;
+ char num_controllers;
+ char ieee_addr[6];
+ char info[100];
+ short chksum;
+};
+#define SUB_VENDOR_ID 0x500a
+
+/*
+** DE4X5 Descriptors. Make sure that all the RX buffers are contiguous
+** and have sizes of both a power of 2 and a multiple of 4.
+** A size of 256 bytes for each buffer could be chosen because over 90% of
+** all packets in our network are <256 bytes long and 64 longword alignment
+** is possible. 1536 showed better 'ttcp' performance. Take your pick. 32 TX
+** descriptors are needed for machines with an ALPHA CPU.
+*/
+#define NUM_RX_DESC 8 /* Number of RX descriptors */
+#define NUM_TX_DESC 32 /* Number of TX descriptors */
+#define RX_BUFF_SZ 1536 /* Power of 2 for kmalloc and */
+ /* Multiple of 4 for DC21040 */
+ /* Allows 512 byte alignment */
+struct de4x5_desc {
+ volatile __le32 status;
+ __le32 des1;
+ __le32 buf;
+ __le32 next;
+ DESC_ALIGN
+};
+
+/*
+** The DE4X5 private structure
+*/
+#define DE4X5_PKT_STAT_SZ 16
+#define DE4X5_PKT_BIN_SZ 128 /* Should be >=100 unless you
+ increase DE4X5_PKT_STAT_SZ */
+
+struct pkt_stats {
+ u_int bins[DE4X5_PKT_STAT_SZ]; /* Private stats counters */
+ u_int unicast;
+ u_int multicast;
+ u_int broadcast;
+ u_int excessive_collisions;
+ u_int tx_underruns;
+ u_int excessive_underruns;
+ u_int rx_runt_frames;
+ u_int rx_collision;
+ u_int rx_dribble;
+ u_int rx_overflow;
+};
+
+struct de4x5_private {
+ char adapter_name[80]; /* Adapter name */
+ u_long interrupt; /* Aligned ISR flag */
+ struct de4x5_desc *rx_ring; /* RX descriptor ring */
+ struct de4x5_desc *tx_ring; /* TX descriptor ring */
+ struct sk_buff *tx_skb[NUM_TX_DESC]; /* TX skb for freeing when sent */
+ struct sk_buff *rx_skb[NUM_RX_DESC]; /* RX skb's */
+ int rx_new, rx_old; /* RX descriptor ring pointers */
+ int tx_new, tx_old; /* TX descriptor ring pointers */
+ char setup_frame[SETUP_FRAME_LEN]; /* Holds MCA and PA info. */
+ char frame[64]; /* Min sized packet for loopback*/
+ spinlock_t lock; /* Adapter specific spinlock */
+ struct net_device_stats stats; /* Public stats */
+ struct pkt_stats pktStats; /* Private stats counters */
+ char rxRingSize;
+ char txRingSize;
+ int bus; /* EISA or PCI */
+ int bus_num; /* PCI Bus number */
+ int device; /* Device number on PCI bus */
+ int state; /* Adapter OPENED or CLOSED */
+ int chipset; /* DC21040, DC21041 or DC21140 */
+ s32 irq_mask; /* Interrupt Mask (Enable) bits */
+ s32 irq_en; /* Summary interrupt bits */
+ int media; /* Media (eg TP), mode (eg 100B)*/
+ int c_media; /* Remember the last media conn */
+ bool fdx; /* media full duplex flag */
+ int linkOK; /* Link is OK */
+ int autosense; /* Allow/disallow autosensing */
+ bool tx_enable; /* Enable descriptor polling */
+ int setup_f; /* Setup frame filtering type */
+ int local_state; /* State within a 'media' state */
+ struct mii_phy phy[DE4X5_MAX_PHY]; /* List of attached PHY devices */
+ struct sia_phy sia; /* SIA PHY Information */
+ int active; /* Index to active PHY device */
+ int mii_cnt; /* Number of attached PHY's */
+ int timeout; /* Scheduling counter */
+ struct timer_list timer; /* Timer info for kernel */
+ int tmp; /* Temporary global per card */
+ struct {
+ u_long lock; /* Lock the cache accesses */
+ s32 csr0; /* Saved Bus Mode Register */
+ s32 csr6; /* Saved Operating Mode Reg. */
+ s32 csr7; /* Saved IRQ Mask Register */
+ s32 gep; /* Saved General Purpose Reg. */
+ s32 gepc; /* Control info for GEP */
+ s32 csr13; /* Saved SIA Connectivity Reg. */
+ s32 csr14; /* Saved SIA TX/RX Register */
+ s32 csr15; /* Saved SIA General Register */
+ int save_cnt; /* Flag if state already saved */
+ struct sk_buff_head queue; /* Save the (re-ordered) skb's */
+ } cache;
+ struct de4x5_srom srom; /* A copy of the SROM */
+ int cfrv; /* Card CFRV copy */
+ int rx_ovf; /* Check for 'RX overflow' tag */
+ bool useSROM; /* For non-DEC card use SROM */
+ bool useMII; /* Infoblock using the MII */
+ int asBitValid; /* Autosense bits in GEP? */
+ int asPolarity; /* 0 => asserted high */
+ int asBit; /* Autosense bit number in GEP */
+ int defMedium; /* SROM default medium */
+ int tcount; /* Last infoblock number */
+ int infoblock_init; /* Initialised this infoblock? */
+ int infoleaf_offset; /* SROM infoleaf for controller */
+ s32 infoblock_csr6; /* csr6 value in SROM infoblock */
+ int infoblock_media; /* infoblock media */
+ int (*infoleaf_fn)(struct net_device *); /* Pointer to infoleaf function */
+ u_char *rst; /* Pointer to Type 5 reset info */
+ u_char ibn; /* Infoblock number */
+ struct parameters params; /* Command line/ #defined params */
+ struct device *gendev; /* Generic device */
+ dma_addr_t dma_rings; /* DMA handle for rings */
+ int dma_size; /* Size of the DMA area */
+ char *rx_bufs; /* rx bufs on alpha, sparc, ... */
+};
+
+/*
+** To get around certain poxy cards that don't provide an SROM
+** for the second and more DECchip, I have to key off the first
+** chip's address. I'll assume there's not a bad SROM iff:
+**
+** o the chipset is the same
+** o the bus number is the same and > 0
+** o the sum of all the returned hw address bytes is 0 or 0x5fa
+**
+** Also have to save the irq for those cards whose hardware designers
+** can't follow the PCI to PCI Bridge Architecture spec.
+*/
+static struct {
+ int chipset;
+ int bus;
+ int irq;
+ u_char addr[ETH_ALEN];
+} last = {0,};
+
+/*
+** The transmit ring full condition is described by the tx_old and tx_new
+** pointers by:
+** tx_old = tx_new Empty ring
+** tx_old = tx_new+1 Full ring
+** tx_old+txRingSize = tx_new+1 Full ring (wrapped condition)
+*/
+#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
+ lp->tx_old+lp->txRingSize-lp->tx_new-1:\
+ lp->tx_old -lp->tx_new-1)
+
+#define TX_PKT_PENDING (lp->tx_old != lp->tx_new)
+
+/*
+** Public Functions
+*/
+static int de4x5_open(struct net_device *dev);
+static netdev_tx_t de4x5_queue_pkt(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t de4x5_interrupt(int irq, void *dev_id);
+static int de4x5_close(struct net_device *dev);
+static struct net_device_stats *de4x5_get_stats(struct net_device *dev);
+static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len);
+static void set_multicast_list(struct net_device *dev);
+static int de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+
+/*
+** Private functions
+*/
+static int de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev);
+static int de4x5_init(struct net_device *dev);
+static int de4x5_sw_reset(struct net_device *dev);
+static int de4x5_rx(struct net_device *dev);
+static int de4x5_tx(struct net_device *dev);
+static void de4x5_ast(struct net_device *dev);
+static int de4x5_txur(struct net_device *dev);
+static int de4x5_rx_ovfc(struct net_device *dev);
+
+static int autoconf_media(struct net_device *dev);
+static void create_packet(struct net_device *dev, char *frame, int len);
+static void load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb);
+static int dc21040_autoconf(struct net_device *dev);
+static int dc21041_autoconf(struct net_device *dev);
+static int dc21140m_autoconf(struct net_device *dev);
+static int dc2114x_autoconf(struct net_device *dev);
+static int srom_autoconf(struct net_device *dev);
+static int de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state, int (*fn)(struct net_device *, int), int (*asfn)(struct net_device *));
+static int dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct net_device *, int));
+static int test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
+static int test_for_100Mb(struct net_device *dev, int msec);
+static int wait_for_link(struct net_device *dev);
+static int test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec);
+static int is_spd_100(struct net_device *dev);
+static int is_100_up(struct net_device *dev);
+static int is_10_up(struct net_device *dev);
+static int is_anc_capable(struct net_device *dev);
+static int ping_media(struct net_device *dev, int msec);
+static struct sk_buff *de4x5_alloc_rx_buff(struct net_device *dev, int index, int len);
+static void de4x5_free_rx_buffs(struct net_device *dev);
+static void de4x5_free_tx_buffs(struct net_device *dev);
+static void de4x5_save_skbs(struct net_device *dev);
+static void de4x5_rst_desc_ring(struct net_device *dev);
+static void de4x5_cache_state(struct net_device *dev, int flag);
+static void de4x5_put_cache(struct net_device *dev, struct sk_buff *skb);
+static void de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb);
+static struct sk_buff *de4x5_get_cache(struct net_device *dev);
+static void de4x5_setup_intr(struct net_device *dev);
+static void de4x5_init_connection(struct net_device *dev);
+static int de4x5_reset_phy(struct net_device *dev);
+static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 sigr);
+static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec);
+static int test_tp(struct net_device *dev, s32 msec);
+static int EISA_signature(char *name, struct device *device);
+static int PCI_signature(char *name, struct de4x5_private *lp);
+static void DevicePresent(struct net_device *dev, u_long iobase);
+static void enet_addr_rst(u_long aprom_addr);
+static int de4x5_bad_srom(struct de4x5_private *lp);
+static short srom_rd(u_long address, u_char offset);
+static void srom_latch(u_int command, u_long address);
+static void srom_command(u_int command, u_long address);
+static void srom_address(u_int command, u_long address, u_char offset);
+static short srom_data(u_int command, u_long address);
+/*static void srom_busy(u_int command, u_long address);*/
+static void sendto_srom(u_int command, u_long addr);
+static int getfrom_srom(u_long addr);
+static int srom_map_media(struct net_device *dev);
+static int srom_infoleaf_info(struct net_device *dev);
+static void srom_init(struct net_device *dev);
+static void srom_exec(struct net_device *dev, u_char *p);
+static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr);
+static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr);
+static int mii_rdata(u_long ioaddr);
+static void mii_wdata(int data, int len, u_long ioaddr);
+static void mii_ta(u_long rw, u_long ioaddr);
+static int mii_swap(int data, int len);
+static void mii_address(u_char addr, u_long ioaddr);
+static void sendto_mii(u32 command, int data, u_long ioaddr);
+static int getfrom_mii(u32 command, u_long ioaddr);
+static int mii_get_oui(u_char phyaddr, u_long ioaddr);
+static int mii_get_phy(struct net_device *dev);
+static void SetMulticastFilter(struct net_device *dev);
+static int get_hw_addr(struct net_device *dev);
+static void srom_repair(struct net_device *dev, int card);
+static int test_bad_enet(struct net_device *dev, int status);
+static int an_exception(struct de4x5_private *lp);
+static char *build_setup_frame(struct net_device *dev, int mode);
+static void disable_ast(struct net_device *dev);
+static long de4x5_switch_mac_port(struct net_device *dev);
+static int gep_rd(struct net_device *dev);
+static void gep_wr(s32 data, struct net_device *dev);
+static void yawn(struct net_device *dev, int state);
+static void de4x5_parse_params(struct net_device *dev);
+static void de4x5_dbg_open(struct net_device *dev);
+static void de4x5_dbg_mii(struct net_device *dev, int k);
+static void de4x5_dbg_media(struct net_device *dev);
+static void de4x5_dbg_srom(struct de4x5_srom *p);
+static void de4x5_dbg_rx(struct sk_buff *skb, int len);
+static int de4x5_strncmp(char *a, char *b, int n);
+static int dc21041_infoleaf(struct net_device *dev);
+static int dc21140_infoleaf(struct net_device *dev);
+static int dc21142_infoleaf(struct net_device *dev);
+static int dc21143_infoleaf(struct net_device *dev);
+static int type0_infoblock(struct net_device *dev, u_char count, u_char *p);
+static int type1_infoblock(struct net_device *dev, u_char count, u_char *p);
+static int type2_infoblock(struct net_device *dev, u_char count, u_char *p);
+static int type3_infoblock(struct net_device *dev, u_char count, u_char *p);
+static int type4_infoblock(struct net_device *dev, u_char count, u_char *p);
+static int type5_infoblock(struct net_device *dev, u_char count, u_char *p);
+static int compact_infoblock(struct net_device *dev, u_char count, u_char *p);
+
+/*
+** Note now that module autoprobing is allowed under EISA and PCI. The
+** IRQ lines will not be auto-detected; instead I'll rely on the BIOSes
+** to "do the right thing".
+*/
+
+static int io=0x0;/* EDIT THIS LINE FOR YOUR CONFIGURATION IF NEEDED */
+
+module_param(io, int, 0);
+module_param(de4x5_debug, int, 0);
+module_param(dec_only, int, 0);
+module_param(args, charp, 0);
+
+MODULE_PARM_DESC(io, "de4x5 I/O base address");
+MODULE_PARM_DESC(de4x5_debug, "de4x5 debug mask");
+MODULE_PARM_DESC(dec_only, "de4x5 probe only for Digital boards (0-1)");
+MODULE_PARM_DESC(args, "de4x5 full duplex and media type settings; see de4x5.c for details");
+MODULE_LICENSE("GPL");
+
+/*
+** List the SROM infoleaf functions and chipsets
+*/
+struct InfoLeaf {
+ int chipset;
+ int (*fn)(struct net_device *);
+};
+static struct InfoLeaf infoleaf_array[] = {
+ {DC21041, dc21041_infoleaf},
+ {DC21140, dc21140_infoleaf},
+ {DC21142, dc21142_infoleaf},
+ {DC21143, dc21143_infoleaf}
+};
+#define INFOLEAF_SIZE ARRAY_SIZE(infoleaf_array)
+
+/*
+** List the SROM info block functions
+*/
+static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = {
+ type0_infoblock,
+ type1_infoblock,
+ type2_infoblock,
+ type3_infoblock,
+ type4_infoblock,
+ type5_infoblock,
+ compact_infoblock
+};
+
+#define COMPACT (ARRAY_SIZE(dc_infoblock) - 1)
+
+/*
+** Miscellaneous defines...
+*/
+#define RESET_DE4X5 {\
+ int i;\
+ i=inl(DE4X5_BMR);\
+ mdelay(1);\
+ outl(i | BMR_SWR, DE4X5_BMR);\
+ mdelay(1);\
+ outl(i, DE4X5_BMR);\
+ mdelay(1);\
+ for (i=0;i<5;i++) {inl(DE4X5_BMR); mdelay(1);}\
+ mdelay(1);\
+}
+
+#define PHY_HARD_RESET {\
+ outl(GEP_HRST, DE4X5_GEP); /* Hard RESET the PHY dev. */\
+ mdelay(1); /* Assert for 1ms */\
+ outl(0x00, DE4X5_GEP);\
+ mdelay(2); /* Wait for 2ms */\
+}
+
+static const struct net_device_ops de4x5_netdev_ops = {
+ .ndo_open = de4x5_open,
+ .ndo_stop = de4x5_close,
+ .ndo_start_xmit = de4x5_queue_pkt,
+ .ndo_get_stats = de4x5_get_stats,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_do_ioctl = de4x5_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address= eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+
+static int __devinit
+de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
+{
+ char name[DE4X5_NAME_LENGTH + 1];
+ struct de4x5_private *lp = netdev_priv(dev);
+ struct pci_dev *pdev = NULL;
+ int i, status=0;
+
+ dev_set_drvdata(gendev, dev);
+
+ /* Ensure we're not sleeping */
+ if (lp->bus == EISA) {
+ outb(WAKEUP, PCI_CFPM);
+ } else {
+ pdev = to_pci_dev (gendev);
+ pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
+ }
+ mdelay(10);
+
+ RESET_DE4X5;
+
+ if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
+ return -ENXIO; /* Hardware could not reset */
+ }
+
+ /*
+ ** Now find out what kind of DC21040/DC21041/DC21140 board we have.
+ */
+ lp->useSROM = false;
+ if (lp->bus == PCI) {
+ PCI_signature(name, lp);
+ } else {
+ EISA_signature(name, gendev);
+ }
+
+ if (*name == '\0') { /* Not found a board signature */
+ return -ENXIO;
+ }
+
+ dev->base_addr = iobase;
+ printk ("%s: %s at 0x%04lx", dev_name(gendev), name, iobase);
+
+ status = get_hw_addr(dev);
+ printk(", h/w address %pM\n", dev->dev_addr);
+
+ if (status != 0) {
+ printk(" which has an Ethernet PROM CRC error.\n");
+ return -ENXIO;
+ } else {
+ skb_queue_head_init(&lp->cache.queue);
+ lp->cache.gepc = GEP_INIT;
+ lp->asBit = GEP_SLNK;
+ lp->asPolarity = GEP_SLNK;
+ lp->asBitValid = ~0;
+ lp->timeout = -1;
+ lp->gendev = gendev;
+ spin_lock_init(&lp->lock);
+ init_timer(&lp->timer);
+ lp->timer.function = (void (*)(unsigned long))de4x5_ast;
+ lp->timer.data = (unsigned long)dev;
+ de4x5_parse_params(dev);
+
+ /*
+ ** Choose correct autosensing in case someone messed up
+ */
+ lp->autosense = lp->params.autosense;
+ if (lp->chipset != DC21140) {
+ if ((lp->chipset==DC21040) && (lp->params.autosense&TP_NW)) {
+ lp->params.autosense = TP;
+ }
+ if ((lp->chipset==DC21041) && (lp->params.autosense&BNC_AUI)) {
+ lp->params.autosense = BNC;
+ }
+ }
+ lp->fdx = lp->params.fdx;
+ sprintf(lp->adapter_name,"%s (%s)", name, dev_name(gendev));
+
+ lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc);
+#if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY)
+ lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN;
+#endif
+ lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size,
+ &lp->dma_rings, GFP_ATOMIC);
+ if (lp->rx_ring == NULL) {
+ return -ENOMEM;
+ }
+
+ lp->tx_ring = lp->rx_ring + NUM_RX_DESC;
+
+ /*
+ ** Set up the RX descriptor ring (Intels)
+ ** Allocate contiguous receive buffers, long word aligned (Alphas)
+ */
+#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
+ for (i=0; i<NUM_RX_DESC; i++) {
+ lp->rx_ring[i].status = 0;
+ lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
+ lp->rx_ring[i].buf = 0;
+ lp->rx_ring[i].next = 0;
+ lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */
+ }
+
+#else
+ {
+ dma_addr_t dma_rx_bufs;
+
+ dma_rx_bufs = lp->dma_rings + (NUM_RX_DESC + NUM_TX_DESC)
+ * sizeof(struct de4x5_desc);
+ dma_rx_bufs = (dma_rx_bufs + DE4X5_ALIGN) & ~DE4X5_ALIGN;
+ lp->rx_bufs = (char *)(((long)(lp->rx_ring + NUM_RX_DESC
+ + NUM_TX_DESC) + DE4X5_ALIGN) & ~DE4X5_ALIGN);
+ for (i=0; i<NUM_RX_DESC; i++) {
+ lp->rx_ring[i].status = 0;
+ lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
+ lp->rx_ring[i].buf =
+ cpu_to_le32(dma_rx_bufs+i*RX_BUFF_SZ);
+ lp->rx_ring[i].next = 0;
+ lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */
+ }
+
+ }
+#endif
+
+ barrier();
+
+ lp->rxRingSize = NUM_RX_DESC;
+ lp->txRingSize = NUM_TX_DESC;
+
+ /* Write the end of list marker to the descriptor lists */
+ lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER);
+ lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
+
+ /* Tell the adapter where the TX/RX rings are located. */
+ outl(lp->dma_rings, DE4X5_RRBA);
+ outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
+ DE4X5_TRBA);
+
+ /* Initialise the IRQ mask and Enable/Disable */
+ lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
+ lp->irq_en = IMR_NIM | IMR_AIM;
+
+ /* Create a loopback packet frame for later media probing */
+ create_packet(dev, lp->frame, sizeof(lp->frame));
+
+ /* Check if the RX overflow bug needs testing for */
+ i = lp->cfrv & 0x000000fe;
+ if ((lp->chipset == DC21140) && (i == 0x20)) {
+ lp->rx_ovf = 1;
+ }
+
+ /* Initialise the SROM pointers if possible */
+ if (lp->useSROM) {
+ lp->state = INITIALISED;
+ if (srom_infoleaf_info(dev)) {
+ dma_free_coherent (gendev, lp->dma_size,
+ lp->rx_ring, lp->dma_rings);
+ return -ENXIO;
+ }
+ srom_init(dev);
+ }
+
+ lp->state = CLOSED;
+
+ /*
+ ** Check for an MII interface
+ */
+ if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) {
+ mii_get_phy(dev);
+ }
+
+ printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
+ ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
+ }
+
+ if (de4x5_debug & DEBUG_VERSION) {
+ printk(version);
+ }
+
+ /* The DE4X5-specific entries in the device structure. */
+ SET_NETDEV_DEV(dev, gendev);
+ dev->netdev_ops = &de4x5_netdev_ops;
+ dev->mem_start = 0;
+
+ /* Fill in the generic fields of the device structure. */
+ if ((status = register_netdev (dev))) {
+ dma_free_coherent (gendev, lp->dma_size,
+ lp->rx_ring, lp->dma_rings);
+ return status;
+ }
+
+ /* Let the adapter sleep to save power */
+ yawn(dev, SLEEP);
+
+ return status;
+}
+
+
+static int
+de4x5_open(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int i, status = 0;
+ s32 omr;
+
+ /* Allocate the RX buffers */
+ for (i=0; i<lp->rxRingSize; i++) {
+ if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) {
+ de4x5_free_rx_buffs(dev);
+ return -EAGAIN;
+ }
+ }
+
+ /*
+ ** Wake up the adapter
+ */
+ yawn(dev, WAKEUP);
+
+ /*
+ ** Re-initialize the DE4X5...
+ */
+ status = de4x5_init(dev);
+ spin_lock_init(&lp->lock);
+ lp->state = OPEN;
+ de4x5_dbg_open(dev);
+
+ if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
+ lp->adapter_name, dev)) {
+ printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
+ if (request_irq(dev->irq, de4x5_interrupt, IRQF_DISABLED | IRQF_SHARED,
+ lp->adapter_name, dev)) {
+ printk("\n Cannot get IRQ- reconfigure your hardware.\n");
+ disable_ast(dev);
+ de4x5_free_rx_buffs(dev);
+ de4x5_free_tx_buffs(dev);
+ yawn(dev, SLEEP);
+ lp->state = CLOSED;
+ return -EAGAIN;
+ } else {
+ printk("\n Succeeded, but you should reconfigure your hardware to avoid this.\n");
+ printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n");
+ }
+ }
+
+ lp->interrupt = UNMASK_INTERRUPTS;
+ dev->trans_start = jiffies; /* prevent tx timeout */
+
+ START_DE4X5;
+
+ de4x5_setup_intr(dev);
+
+ if (de4x5_debug & DEBUG_OPEN) {
+ printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
+ printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
+ printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
+ printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
+ printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
+ printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
+ printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
+ printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
+ }
+
+ return status;
+}
+
+/*
+** Initialize the DE4X5 operating conditions. NB: a chip problem with the
+** DC21140 requires using perfect filtering mode for that chip. Since I can't
+** see why I'd want > 14 multicast addresses, I have changed all chips to use
+** the perfect filtering mode. Keep the DMA burst length at 8: there seems
+** to be data corruption problems if it is larger (UDP errors seen from a
+** ttcp source).
+*/
+static int
+de4x5_init(struct net_device *dev)
+{
+ /* Lock out other processes whilst setting up the hardware */
+ netif_stop_queue(dev);
+
+ de4x5_sw_reset(dev);
+
+ /* Autoconfigure the connected port */
+ autoconf_media(dev);
+
+ return 0;
+}
+
+static int
+de4x5_sw_reset(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int i, j, status = 0;
+ s32 bmr, omr;
+
+ /* Select the MII or SRL port now and RESET the MAC */
+ if (!lp->useSROM) {
+ if (lp->phy[lp->active].id != 0) {
+ lp->infoblock_csr6 = OMR_SDP | OMR_PS | OMR_HBD;
+ } else {
+ lp->infoblock_csr6 = OMR_SDP | OMR_TTM;
+ }
+ de4x5_switch_mac_port(dev);
+ }
+
+ /*
+ ** Set the programmable burst length to 8 longwords for all the DC21140
+ ** Fasternet chips and 4 longwords for all others: DMA errors result
+ ** without these values. Cache align 16 long.
+ */
+ bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | DE4X5_CACHE_ALIGN;
+ bmr |= ((lp->chipset & ~0x00ff)==DC2114x ? BMR_RML : 0);
+ outl(bmr, DE4X5_BMR);
+
+ omr = inl(DE4X5_OMR) & ~OMR_PR; /* Turn off promiscuous mode */
+ if (lp->chipset == DC21140) {
+ omr |= (OMR_SDP | OMR_SB);
+ }
+ lp->setup_f = PERFECT;
+ outl(lp->dma_rings, DE4X5_RRBA);
+ outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
+ DE4X5_TRBA);
+
+ lp->rx_new = lp->rx_old = 0;
+ lp->tx_new = lp->tx_old = 0;
+
+ for (i = 0; i < lp->rxRingSize; i++) {
+ lp->rx_ring[i].status = cpu_to_le32(R_OWN);
+ }
+
+ for (i = 0; i < lp->txRingSize; i++) {
+ lp->tx_ring[i].status = cpu_to_le32(0);
+ }
+
+ barrier();
+
+ /* Build the setup frame depending on filtering mode */
+ SetMulticastFilter(dev);
+
+ load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1);
+ outl(omr|OMR_ST, DE4X5_OMR);
+
+ /* Poll for setup frame completion (adapter interrupts are disabled now) */
+
+ for (j=0, i=0;(i<500) && (j==0);i++) { /* Up to 500ms delay */
+ mdelay(1);
+ if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1;
+ }
+ outl(omr, DE4X5_OMR); /* Stop everything! */
+
+ if (j == 0) {
+ printk("%s: Setup frame timed out, status %08x\n", dev->name,
+ inl(DE4X5_STS));
+ status = -EIO;
+ }
+
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
+ lp->tx_old = lp->tx_new;
+
+ return status;
+}
+
+/*
+** Writes a socket buffer address to the next available transmit descriptor.
+*/
+static netdev_tx_t
+de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ u_long flags = 0;
+
+ netif_stop_queue(dev);
+ if (!lp->tx_enable) /* Cannot send for now */
+ return NETDEV_TX_LOCKED;
+
+ /*
+ ** Clean out the TX ring asynchronously to interrupts - sometimes the
+ ** interrupts are lost by delayed descriptor status updates relative to
+ ** the irq assertion, especially with a busy PCI bus.
+ */
+ spin_lock_irqsave(&lp->lock, flags);
+ de4x5_tx(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ /* Test if cache is already locked - requeue skb if so */
+ if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
+ return NETDEV_TX_LOCKED;
+
+ /* Transmit descriptor ring full or stale skb */
+ if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
+ if (lp->interrupt) {
+ de4x5_putb_cache(dev, skb); /* Requeue the buffer */
+ } else {
+ de4x5_put_cache(dev, skb);
+ }
+ if (de4x5_debug & DEBUG_TX) {
+ printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), netif_queue_stopped(dev), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO");
+ }
+ } else if (skb->len > 0) {
+ /* If we already have stuff queued locally, use that first */
+ if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) {
+ de4x5_put_cache(dev, skb);
+ skb = de4x5_get_cache(dev);
+ }
+
+ while (skb && !netif_queue_stopped(dev) &&
+ (u_long) lp->tx_skb[lp->tx_new] <= 1) {
+ spin_lock_irqsave(&lp->lock, flags);
+ netif_stop_queue(dev);
+ load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
+ lp->stats.tx_bytes += skb->len;
+ outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
+
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
+
+ if (TX_BUFFS_AVAIL) {
+ netif_start_queue(dev); /* Another pkt may be queued */
+ }
+ skb = de4x5_get_cache(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ if (skb) de4x5_putb_cache(dev, skb);
+ }
+
+ lp->cache.lock = 0;
+
+ return NETDEV_TX_OK;
+}
+
+/*
+** The DE4X5 interrupt handler.
+**
+** I/O Read/Writes through intermediate PCI bridges are never 'posted',
+** so that the asserted interrupt always has some real data to work with -
+** if these I/O accesses are ever changed to memory accesses, ensure the
+** STS write is read immediately to complete the transaction if the adapter
+** is not on bus 0. Lost interrupts can still occur when the PCI bus load
+** is high and descriptor status bits cannot be set before the associated
+** interrupt is asserted and this routine entered.
+*/
+static irqreturn_t
+de4x5_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct de4x5_private *lp;
+ s32 imr, omr, sts, limit;
+ u_long iobase;
+ unsigned int handled = 0;
+
+ lp = netdev_priv(dev);
+ spin_lock(&lp->lock);
+ iobase = dev->base_addr;
+
+ DISABLE_IRQs; /* Ensure non re-entrancy */
+
+ if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+ synchronize_irq(dev->irq);
+
+ for (limit=0; limit<8; limit++) {
+ sts = inl(DE4X5_STS); /* Read IRQ status */
+ outl(sts, DE4X5_STS); /* Reset the board interrupts */
+
+ if (!(sts & lp->irq_mask)) break;/* All done */
+ handled = 1;
+
+ if (sts & (STS_RI | STS_RU)) /* Rx interrupt (packet[s] arrived) */
+ de4x5_rx(dev);
+
+ if (sts & (STS_TI | STS_TU)) /* Tx interrupt (packet sent) */
+ de4x5_tx(dev);
+
+ if (sts & STS_LNF) { /* TP Link has failed */
+ lp->irq_mask &= ~IMR_LFM;
+ }
+
+ if (sts & STS_UNF) { /* Transmit underrun */
+ de4x5_txur(dev);
+ }
+
+ if (sts & STS_SE) { /* Bus Error */
+ STOP_DE4X5;
+ printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
+ dev->name, sts);
+ spin_unlock(&lp->lock);
+ return IRQ_HANDLED;
+ }
+ }
+
+ /* Load the TX ring with any locally stored packets */
+ if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
+ while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) {
+ de4x5_queue_pkt(de4x5_get_cache(dev), dev);
+ }
+ lp->cache.lock = 0;
+ }
+
+ lp->interrupt = UNMASK_INTERRUPTS;
+ ENABLE_IRQs;
+ spin_unlock(&lp->lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+static int
+de4x5_rx(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int entry;
+ s32 status;
+
+ for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
+ entry=lp->rx_new) {
+ status = (s32)le32_to_cpu(lp->rx_ring[entry].status);
+
+ if (lp->rx_ovf) {
+ if (inl(DE4X5_MFC) & MFC_FOCM) {
+ de4x5_rx_ovfc(dev);
+ break;
+ }
+ }
+
+ if (status & RD_FS) { /* Remember the start of frame */
+ lp->rx_old = entry;
+ }
+
+ if (status & RD_LS) { /* Valid frame status */
+ if (lp->tx_enable) lp->linkOK++;
+ if (status & RD_ES) { /* There was an error. */
+ lp->stats.rx_errors++; /* Update the error stats. */
+ if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
+ if (status & RD_CE) lp->stats.rx_crc_errors++;
+ if (status & RD_OF) lp->stats.rx_fifo_errors++;
+ if (status & RD_TL) lp->stats.rx_length_errors++;
+ if (status & RD_RF) lp->pktStats.rx_runt_frames++;
+ if (status & RD_CS) lp->pktStats.rx_collision++;
+ if (status & RD_DB) lp->pktStats.rx_dribble++;
+ if (status & RD_OF) lp->pktStats.rx_overflow++;
+ } else { /* A valid frame received */
+ struct sk_buff *skb;
+ short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
+ >> 16) - 4;
+
+ if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
+ printk("%s: Insufficient memory; nuking packet.\n",
+ dev->name);
+ lp->stats.rx_dropped++;
+ } else {
+ de4x5_dbg_rx(skb, pkt_len);
+
+ /* Push up the protocol stack */
+ skb->protocol=eth_type_trans(skb,dev);
+ de4x5_local_stats(dev, skb->data, pkt_len);
+ netif_rx(skb);
+
+ /* Update stats */
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+ }
+ }
+
+ /* Change buffer ownership for this frame, back to the adapter */
+ for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old + 1)%lp->rxRingSize) {
+ lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
+ barrier();
+ }
+ lp->rx_ring[entry].status = cpu_to_le32(R_OWN);
+ barrier();
+ }
+
+ /*
+ ** Update entry information
+ */
+ lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
+ }
+
+ return 0;
+}
+
+static inline void
+de4x5_free_tx_buff(struct de4x5_private *lp, int entry)
+{
+ dma_unmap_single(lp->gendev, le32_to_cpu(lp->tx_ring[entry].buf),
+ le32_to_cpu(lp->tx_ring[entry].des1) & TD_TBS1,
+ DMA_TO_DEVICE);
+ if ((u_long) lp->tx_skb[entry] > 1)
+ dev_kfree_skb_irq(lp->tx_skb[entry]);
+ lp->tx_skb[entry] = NULL;
+}
+
+/*
+** Buffer sent - check for TX buffer errors.
+*/
+static int
+de4x5_tx(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int entry;
+ s32 status;
+
+ for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
+ status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
+ if (status < 0) { /* Buffer not sent yet */
+ break;
+ } else if (status != 0x7fffffff) { /* Not setup frame */
+ if (status & TD_ES) { /* An error happened */
+ lp->stats.tx_errors++;
+ if (status & TD_NC) lp->stats.tx_carrier_errors++;
+ if (status & TD_LC) lp->stats.tx_window_errors++;
+ if (status & TD_UF) lp->stats.tx_fifo_errors++;
+ if (status & TD_EC) lp->pktStats.excessive_collisions++;
+ if (status & TD_DE) lp->stats.tx_aborted_errors++;
+
+ if (TX_PKT_PENDING) {
+ outl(POLL_DEMAND, DE4X5_TPD);/* Restart a stalled TX */
+ }
+ } else { /* Packet sent */
+ lp->stats.tx_packets++;
+ if (lp->tx_enable) lp->linkOK++;
+ }
+ /* Update the collision counter */
+ lp->stats.collisions += ((status & TD_EC) ? 16 :
+ ((status & TD_CC) >> 3));
+
+ /* Free the buffer. */
+ if (lp->tx_skb[entry] != NULL)
+ de4x5_free_tx_buff(lp, entry);
+ }
+
+ /* Update all the pointers */
+ lp->tx_old = (lp->tx_old + 1) % lp->txRingSize;
+ }
+
+ /* Any resources available? */
+ if (TX_BUFFS_AVAIL && netif_queue_stopped(dev)) {
+ if (lp->interrupt)
+ netif_wake_queue(dev);
+ else
+ netif_start_queue(dev);
+ }
+
+ return 0;
+}
+
+static void
+de4x5_ast(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int next_tick = DE4X5_AUTOSENSE_MS;
+ int dt;
+
+ if (lp->useSROM)
+ next_tick = srom_autoconf(dev);
+ else if (lp->chipset == DC21140)
+ next_tick = dc21140m_autoconf(dev);
+ else if (lp->chipset == DC21041)
+ next_tick = dc21041_autoconf(dev);
+ else if (lp->chipset == DC21040)
+ next_tick = dc21040_autoconf(dev);
+ lp->linkOK = 0;
+
+ dt = (next_tick * HZ) / 1000;
+
+ if (!dt)
+ dt = 1;
+
+ mod_timer(&lp->timer, jiffies + dt);
+}
+
+static int
+de4x5_txur(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int omr;
+
+ omr = inl(DE4X5_OMR);
+ if (!(omr & OMR_SF) || (lp->chipset==DC21041) || (lp->chipset==DC21040)) {
+ omr &= ~(OMR_ST|OMR_SR);
+ outl(omr, DE4X5_OMR);
+ while (inl(DE4X5_STS) & STS_TS);
+ if ((omr & OMR_TR) < OMR_TR) {
+ omr += 0x4000;
+ } else {
+ omr |= OMR_SF;
+ }
+ outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
+ }
+
+ return 0;
+}
+
+static int
+de4x5_rx_ovfc(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int omr;
+
+ omr = inl(DE4X5_OMR);
+ outl(omr & ~OMR_SR, DE4X5_OMR);
+ while (inl(DE4X5_STS) & STS_RS);
+
+ for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) {
+ lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN);
+ lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
+ }
+
+ outl(omr, DE4X5_OMR);
+
+ return 0;
+}
+
+static int
+de4x5_close(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ s32 imr, omr;
+
+ disable_ast(dev);
+
+ netif_stop_queue(dev);
+
+ if (de4x5_debug & DEBUG_CLOSE) {
+ printk("%s: Shutting down ethercard, status was %8.8x.\n",
+ dev->name, inl(DE4X5_STS));
+ }
+
+ /*
+ ** We stop the DE4X5 here... mask interrupts and stop TX & RX
+ */
+ DISABLE_IRQs;
+ STOP_DE4X5;
+
+ /* Free the associated irq */
+ free_irq(dev->irq, dev);
+ lp->state = CLOSED;
+
+ /* Free any socket buffers */
+ de4x5_free_rx_buffs(dev);
+ de4x5_free_tx_buffs(dev);
+
+ /* Put the adapter to sleep to save power */
+ yawn(dev, SLEEP);
+
+ return 0;
+}
+
+static struct net_device_stats *
+de4x5_get_stats(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
+
+ return &lp->stats;
+}
+
+static void
+de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int i;
+
+ for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
+ if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
+ lp->pktStats.bins[i]++;
+ i = DE4X5_PKT_STAT_SZ;
+ }
+ }
+ if (is_multicast_ether_addr(buf)) {
+ if (is_broadcast_ether_addr(buf)) {
+ lp->pktStats.broadcast++;
+ } else {
+ lp->pktStats.multicast++;
+ }
+ } else if (compare_ether_addr(buf, dev->dev_addr) == 0) {
+ lp->pktStats.unicast++;
+ }
+
+ lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
+ if (lp->pktStats.bins[0] == 0) { /* Reset counters */
+ memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
+ }
+}
+
+/*
+** Removes the TD_IC flag from previous descriptor to improve TX performance.
+** If the flag is changed on a descriptor that is being read by the hardware,
+** I assume PCI transaction ordering will mean you are either successful or
+** just miss asserting the change to the hardware. Anyway you're messing with
+** a descriptor you don't own, but this shouldn't kill the chip provided
+** the descriptor register is read only to the hardware.
+*/
+static void
+load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int entry = (lp->tx_new ? lp->tx_new-1 : lp->txRingSize-1);
+ dma_addr_t buf_dma = dma_map_single(lp->gendev, buf, flags & TD_TBS1, DMA_TO_DEVICE);
+
+ lp->tx_ring[lp->tx_new].buf = cpu_to_le32(buf_dma);
+ lp->tx_ring[lp->tx_new].des1 &= cpu_to_le32(TD_TER);
+ lp->tx_ring[lp->tx_new].des1 |= cpu_to_le32(flags);
+ lp->tx_skb[lp->tx_new] = skb;
+ lp->tx_ring[entry].des1 &= cpu_to_le32(~TD_IC);
+ barrier();
+
+ lp->tx_ring[lp->tx_new].status = cpu_to_le32(T_OWN);
+ barrier();
+}
+
+/*
+** Set or clear the multicast filter for this adaptor.
+*/
+static void
+set_multicast_list(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ /* First, double check that the adapter is open */
+ if (lp->state == OPEN) {
+ if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
+ u32 omr;
+ omr = inl(DE4X5_OMR);
+ omr |= OMR_PR;
+ outl(omr, DE4X5_OMR);
+ } else {
+ SetMulticastFilter(dev);
+ load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
+ SETUP_FRAME_LEN, (struct sk_buff *)1);
+
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
+ outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ }
+ }
+}
+
+/*
+** Calculate the hash code and update the logical address filter
+** from a list of ethernet multicast addresses.
+** Little endian crc one liner from Matt Thomas, DEC.
+*/
+static void
+SetMulticastFilter(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ u_long iobase = dev->base_addr;
+ int i, bit, byte;
+ u16 hashcode;
+ u32 omr, crc;
+ char *pa;
+ unsigned char *addrs;
+
+ omr = inl(DE4X5_OMR);
+ omr &= ~(OMR_PR | OMR_PM);
+ pa = build_setup_frame(dev, ALL); /* Build the basic frame */
+
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
+ omr |= OMR_PM; /* Pass all multicasts */
+ } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
+
+ byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
+ bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */
+
+ byte <<= 1; /* calc offset into setup frame */
+ if (byte & 0x02) {
+ byte -= 1;
+ }
+ lp->setup_frame[byte] |= bit;
+ }
+ } else { /* Perfect filtering */
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
+ for (i=0; i<ETH_ALEN; i++) {
+ *(pa + (i&1)) = *addrs++;
+ if (i & 0x01) pa += 4;
+ }
+ }
+ }
+ outl(omr, DE4X5_OMR);
+}
+
+#ifdef CONFIG_EISA
+
+static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
+
+static int __init de4x5_eisa_probe (struct device *gendev)
+{
+ struct eisa_device *edev;
+ u_long iobase;
+ u_char irq, regval;
+ u_short vendor;
+ u32 cfid;
+ int status, device;
+ struct net_device *dev;
+ struct de4x5_private *lp;
+
+ edev = to_eisa_device (gendev);
+ iobase = edev->base_addr;
+
+ if (!request_region (iobase, DE4X5_EISA_TOTAL_SIZE, "de4x5"))
+ return -EBUSY;
+
+ if (!request_region (iobase + DE4X5_EISA_IO_PORTS,
+ DE4X5_EISA_TOTAL_SIZE, "de4x5")) {
+ status = -EBUSY;
+ goto release_reg_1;
+ }
+
+ if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
+ status = -ENOMEM;
+ goto release_reg_2;
+ }
+ lp = netdev_priv(dev);
+
+ cfid = (u32) inl(PCI_CFID);
+ lp->cfrv = (u_short) inl(PCI_CFRV);
+ device = (cfid >> 8) & 0x00ffff00;
+ vendor = (u_short) cfid;
+
+ /* Read the EISA Configuration Registers */
+ regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT);
+#ifdef CONFIG_ALPHA
+ /* Looks like the Jensen firmware (rev 2.2) doesn't really
+ * care about the EISA configuration, and thus doesn't
+ * configure the PLX bridge properly. Oh well... Simply mimic
+ * the EISA config file to sort it out. */
+
+ /* EISA REG1: Assert DecChip 21040 HW Reset */
+ outb (ER1_IAM | 1, EISA_REG1);
+ mdelay (1);
+
+ /* EISA REG1: Deassert DecChip 21040 HW Reset */
+ outb (ER1_IAM, EISA_REG1);
+ mdelay (1);
+
+ /* EISA REG3: R/W Burst Transfer Enable */
+ outb (ER3_BWE | ER3_BRE, EISA_REG3);
+
+ /* 32_bit slave/master, Preempt Time=23 bclks, Unlatched Interrupt */
+ outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0);
+#endif
+ irq = de4x5_irq[(regval >> 1) & 0x03];
+
+ if (is_DC2114x) {
+ device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
+ }
+ lp->chipset = device;
+ lp->bus = EISA;
+
+ /* Write the PCI Configuration Registers */
+ outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
+ outl(0x00006000, PCI_CFLT);
+ outl(iobase, PCI_CBIO);
+
+ DevicePresent(dev, EISA_APROM);
+
+ dev->irq = irq;
+
+ if (!(status = de4x5_hw_init (dev, iobase, gendev))) {
+ return 0;
+ }
+
+ free_netdev (dev);
+ release_reg_2:
+ release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
+ release_reg_1:
+ release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
+
+ return status;
+}
+
+static int __devexit de4x5_eisa_remove (struct device *device)
+{
+ struct net_device *dev;
+ u_long iobase;
+
+ dev = dev_get_drvdata(device);
+ iobase = dev->base_addr;
+
+ unregister_netdev (dev);
+ free_netdev (dev);
+ release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
+ release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
+
+ return 0;
+}
+
+static struct eisa_device_id de4x5_eisa_ids[] = {
+ { "DEC4250", 0 }, /* 0 is the board name index... */
+ { "" }
+};
+MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
+
+static struct eisa_driver de4x5_eisa_driver = {
+ .id_table = de4x5_eisa_ids,
+ .driver = {
+ .name = "de4x5",
+ .probe = de4x5_eisa_probe,
+ .remove = __devexit_p (de4x5_eisa_remove),
+ }
+};
+MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
+#endif
+
+#ifdef CONFIG_PCI
+
+/*
+** This function searches the current bus (which is >0) for a DECchip with an
+** SROM, so that in multiport cards that have one SROM shared between multiple
+** DECchips, we can find the base SROM irrespective of the BIOS scan direction.
+** For single port cards this is a time waster...
+*/
+static void __devinit
+srom_search(struct net_device *dev, struct pci_dev *pdev)
+{
+ u_char pb;
+ u_short vendor, status;
+ u_int irq = 0, device;
+ u_long iobase = 0; /* Clear upper 32 bits in Alphas */
+ int i, j;
+ struct de4x5_private *lp = netdev_priv(dev);
+ struct list_head *walk;
+
+ list_for_each(walk, &pdev->bus_list) {
+ struct pci_dev *this_dev = pci_dev_b(walk);
+
+ /* Skip the pci_bus list entry */
+ if (list_entry(walk, struct pci_bus, devices) == pdev->bus) continue;
+
+ vendor = this_dev->vendor;
+ device = this_dev->device << 8;
+ if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
+
+ /* Get the chip configuration revision register */
+ pb = this_dev->bus->number;
+
+ /* Set the device number information */
+ lp->device = PCI_SLOT(this_dev->devfn);
+ lp->bus_num = pb;
+
+ /* Set the chipset information */
+ if (is_DC2114x) {
+ device = ((this_dev->revision & CFRV_RN) < DC2114x_BRK
+ ? DC21142 : DC21143);
+ }
+ lp->chipset = device;
+
+ /* Get the board I/O address (64 bits on sparc64) */
+ iobase = pci_resource_start(this_dev, 0);
+
+ /* Fetch the IRQ to be used */
+ irq = this_dev->irq;
+ if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
+
+ /* Check if I/O accesses are enabled */
+ pci_read_config_word(this_dev, PCI_COMMAND, &status);
+ if (!(status & PCI_COMMAND_IO)) continue;
+
+ /* Search for a valid SROM attached to this DECchip */
+ DevicePresent(dev, DE4X5_APROM);
+ for (j=0, i=0; i<ETH_ALEN; i++) {
+ j += (u_char) *((u_char *)&lp->srom + SROM_HWADD + i);
+ }
+ if (j != 0 && j != 6 * 0xff) {
+ last.chipset = device;
+ last.bus = pb;
+ last.irq = irq;
+ for (i=0; i<ETH_ALEN; i++) {
+ last.addr[i] = (u_char)*((u_char *)&lp->srom + SROM_HWADD + i);
+ }
+ return;
+ }
+ }
+}
+
+/*
+** PCI bus I/O device probe
+** NB: PCI I/O accesses and Bus Mastering are enabled by the PCI BIOS, not
+** the driver. Some PCI BIOS's, pre V2.1, need the slot + features to be
+** enabled by the user first in the set up utility. Hence we just check for
+** enabled features and silently ignore the card if they're not.
+**
+** STOP PRESS: Some BIOS's __require__ the driver to enable the bus mastering
+** bit. Here, check for I/O accesses and then set BM. If you put the card in
+** a non BM slot, you're on your own (and complain to the PC vendor that your
+** PC doesn't conform to the PCI standard)!
+**
+** This function is only compatible with the *latest* 2.1.x kernels. For 2.0.x
+** kernels use the V0.535[n] drivers.
+*/
+
+static int __devinit de4x5_pci_probe (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ u_char pb, pbus = 0, dev_num, dnum = 0, timer;
+ u_short vendor, status;
+ u_int irq = 0, device;
+ u_long iobase = 0; /* Clear upper 32 bits in Alphas */
+ int error;
+ struct net_device *dev;
+ struct de4x5_private *lp;
+
+ dev_num = PCI_SLOT(pdev->devfn);
+ pb = pdev->bus->number;
+
+ if (io) { /* probe a single PCI device */
+ pbus = (u_short)(io >> 8);
+ dnum = (u_short)(io & 0xff);
+ if ((pbus != pb) || (dnum != dev_num))
+ return -ENODEV;
+ }
+
+ vendor = pdev->vendor;
+ device = pdev->device << 8;
+ if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x))
+ return -ENODEV;
+
+ /* Ok, the device seems to be for us. */
+ if ((error = pci_enable_device (pdev)))
+ return error;
+
+ if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
+ error = -ENOMEM;
+ goto disable_dev;
+ }
+
+ lp = netdev_priv(dev);
+ lp->bus = PCI;
+ lp->bus_num = 0;
+
+ /* Search for an SROM on this bus */
+ if (lp->bus_num != pb) {
+ lp->bus_num = pb;
+ srom_search(dev, pdev);
+ }
+
+ /* Get the chip configuration revision register */
+ lp->cfrv = pdev->revision;
+
+ /* Set the device number information */
+ lp->device = dev_num;
+ lp->bus_num = pb;
+
+ /* Set the chipset information */
+ if (is_DC2114x) {
+ device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
+ }
+ lp->chipset = device;
+
+ /* Get the board I/O address (64 bits on sparc64) */
+ iobase = pci_resource_start(pdev, 0);
+
+ /* Fetch the IRQ to be used */
+ irq = pdev->irq;
+ if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) {
+ error = -ENODEV;
+ goto free_dev;
+ }
+
+ /* Check if I/O accesses and Bus Mastering are enabled */
+ pci_read_config_word(pdev, PCI_COMMAND, &status);
+#ifdef __powerpc__
+ if (!(status & PCI_COMMAND_IO)) {
+ status |= PCI_COMMAND_IO;
+ pci_write_config_word(pdev, PCI_COMMAND, status);
+ pci_read_config_word(pdev, PCI_COMMAND, &status);
+ }
+#endif /* __powerpc__ */
+ if (!(status & PCI_COMMAND_IO)) {
+ error = -ENODEV;
+ goto free_dev;
+ }
+
+ if (!(status & PCI_COMMAND_MASTER)) {
+ status |= PCI_COMMAND_MASTER;
+ pci_write_config_word(pdev, PCI_COMMAND, status);
+ pci_read_config_word(pdev, PCI_COMMAND, &status);
+ }
+ if (!(status & PCI_COMMAND_MASTER)) {
+ error = -ENODEV;
+ goto free_dev;
+ }
+
+ /* Check the latency timer for values >= 0x60 */
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &timer);
+ if (timer < 0x60) {
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x60);
+ }
+
+ DevicePresent(dev, DE4X5_APROM);
+
+ if (!request_region (iobase, DE4X5_PCI_TOTAL_SIZE, "de4x5")) {
+ error = -EBUSY;
+ goto free_dev;
+ }
+
+ dev->irq = irq;
+
+ if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) {
+ goto release;
+ }
+
+ return 0;
+
+ release:
+ release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
+ free_dev:
+ free_netdev (dev);
+ disable_dev:
+ pci_disable_device (pdev);
+ return error;
+}
+
+static void __devexit de4x5_pci_remove (struct pci_dev *pdev)
+{
+ struct net_device *dev;
+ u_long iobase;
+
+ dev = dev_get_drvdata(&pdev->dev);
+ iobase = dev->base_addr;
+
+ unregister_netdev (dev);
+ free_netdev (dev);
+ release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
+ pci_disable_device (pdev);
+}
+
+static struct pci_device_id de4x5_pci_tbl[] = {
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+ { },
+};
+
+static struct pci_driver de4x5_pci_driver = {
+ .name = "de4x5",
+ .id_table = de4x5_pci_tbl,
+ .probe = de4x5_pci_probe,
+ .remove = __devexit_p (de4x5_pci_remove),
+};
+
+#endif
+
+/*
+** Auto configure the media here rather than setting the port at compile
+** time. This routine is called by de4x5_init() and when a loss of media is
+** detected (excessive collisions, loss of carrier, no carrier or link fail
+** [TP] or no recent receive activity) to check whether the user has been
+** sneaky and changed the port on us.
+*/
+static int
+autoconf_media(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ disable_ast(dev);
+
+ lp->c_media = AUTO; /* Bogus last media */
+ inl(DE4X5_MFC); /* Zero the lost frames counter */
+ lp->media = INIT;
+ lp->tcount = 0;
+
+ de4x5_ast(dev);
+
+ return lp->media;
+}
+
+/*
+** Autoconfigure the media when using the DC21040. AUI cannot be distinguished
+** from BNC as the port has a jumper to set thick or thin wire. When set for
+** BNC, the BNC port will indicate activity if it's not terminated correctly.
+** The only way to test for that is to place a loopback packet onto the
+** network and watch for errors. Since we're messing with the interrupt mask
+** register, disable the board interrupts and do not allow any more packets to
+** be queued to the hardware. Re-enable everything only when the media is
+** found.
+** I may have to "age out" locally queued packets so that the higher layer
+** timeouts don't effectively duplicate packets on the network.
+*/
+static int
+dc21040_autoconf(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+ s32 imr;
+
+ switch (lp->media) {
+ case INIT:
+ DISABLE_IRQs;
+ lp->tx_enable = false;
+ lp->timeout = -1;
+ de4x5_save_skbs(dev);
+ if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
+ lp->media = TP;
+ } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) {
+ lp->media = BNC_AUI;
+ } else if (lp->autosense == EXT_SIA) {
+ lp->media = EXT_SIA;
+ } else {
+ lp->media = NC;
+ }
+ lp->local_state = 0;
+ next_tick = dc21040_autoconf(dev);
+ break;
+
+ case TP:
+ next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
+ TP_SUSPECT, test_tp);
+ break;
+
+ case TP_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
+ break;
+
+ case BNC:
+ case AUI:
+ case BNC_AUI:
+ next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
+ BNC_AUI_SUSPECT, ping_media);
+ break;
+
+ case BNC_AUI_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
+ break;
+
+ case EXT_SIA:
+ next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
+ NC, EXT_SIA_SUSPECT, ping_media);
+ break;
+
+ case EXT_SIA_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
+ break;
+
+ case NC:
+ /* default to TP for all */
+ reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tx_enable = false;
+ break;
+ }
+
+ return next_tick;
+}
+
+static int
+dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout,
+ int next_state, int suspect_state,
+ int (*fn)(struct net_device *, int))
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int next_tick = DE4X5_AUTOSENSE_MS;
+ int linkBad;
+
+ switch (lp->local_state) {
+ case 0:
+ reset_init_sia(dev, csr13, csr14, csr15);
+ lp->local_state++;
+ next_tick = 500;
+ break;
+
+ case 1:
+ if (!lp->tx_enable) {
+ linkBad = fn(dev, timeout);
+ if (linkBad < 0) {
+ next_tick = linkBad & ~TIMER_CB;
+ } else {
+ if (linkBad && (lp->autosense == AUTO)) {
+ lp->local_state = 0;
+ lp->media = next_state;
+ } else {
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = suspect_state;
+ next_tick = 3000;
+ }
+ break;
+ }
+
+ return next_tick;
+}
+
+static int
+de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state,
+ int (*fn)(struct net_device *, int),
+ int (*asfn)(struct net_device *))
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int next_tick = DE4X5_AUTOSENSE_MS;
+ int linkBad;
+
+ switch (lp->local_state) {
+ case 1:
+ if (lp->linkOK) {
+ lp->media = prev_state;
+ } else {
+ lp->local_state++;
+ next_tick = asfn(dev);
+ }
+ break;
+
+ case 2:
+ linkBad = fn(dev, timeout);
+ if (linkBad < 0) {
+ next_tick = linkBad & ~TIMER_CB;
+ } else if (!linkBad) {
+ lp->local_state--;
+ lp->media = prev_state;
+ } else {
+ lp->media = INIT;
+ lp->tcount++;
+ }
+ }
+
+ return next_tick;
+}
+
+/*
+** Autoconfigure the media when using the DC21041. AUI needs to be tested
+** before BNC, because the BNC port will indicate activity if it's not
+** terminated correctly. The only way to test for that is to place a loopback
+** packet onto the network and watch for errors. Since we're messing with
+** the interrupt mask register, disable the board interrupts and do not allow
+** any more packets to be queued to the hardware. Re-enable everything only
+** when the media is found.
+*/
+static int
+dc21041_autoconf(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ s32 sts, irqs, irq_mask, imr, omr;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ switch (lp->media) {
+ case INIT:
+ DISABLE_IRQs;
+ lp->tx_enable = false;
+ lp->timeout = -1;
+ de4x5_save_skbs(dev); /* Save non transmitted skb's */
+ if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
+ lp->media = TP; /* On chip auto negotiation is broken */
+ } else if (lp->autosense == TP) {
+ lp->media = TP;
+ } else if (lp->autosense == BNC) {
+ lp->media = BNC;
+ } else if (lp->autosense == AUI) {
+ lp->media = AUI;
+ } else {
+ lp->media = NC;
+ }
+ lp->local_state = 0;
+ next_tick = dc21041_autoconf(dev);
+ break;
+
+ case TP_NW:
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR);/* Set up full duplex for the autonegotiate */
+ outl(omr | OMR_FDX, DE4X5_OMR);
+ }
+ irqs = STS_LNF | STS_LNP;
+ irq_mask = IMR_LFM | IMR_LPM;
+ sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (sts & STS_LNP) {
+ lp->media = ANS;
+ } else {
+ lp->media = AUI;
+ }
+ next_tick = dc21041_autoconf(dev);
+ }
+ break;
+
+ case ANS:
+ if (!lp->tx_enable) {
+ irqs = STS_LNP;
+ irq_mask = IMR_LPM;
+ sts = test_ans(dev, irqs, irq_mask, 3000);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
+ lp->media = TP;
+ next_tick = dc21041_autoconf(dev);
+ } else {
+ lp->local_state = 1;
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = ANS_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+
+ case ANS_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
+ break;
+
+ case TP:
+ if (!lp->tx_enable) {
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR); /* Set up half duplex for TP */
+ outl(omr & ~OMR_FDX, DE4X5_OMR);
+ }
+ irqs = STS_LNF | STS_LNP;
+ irq_mask = IMR_LFM | IMR_LPM;
+ sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
+ if (inl(DE4X5_SISR) & SISR_NRA) {
+ lp->media = AUI; /* Non selected port activity */
+ } else {
+ lp->media = BNC;
+ }
+ next_tick = dc21041_autoconf(dev);
+ } else {
+ lp->local_state = 1;
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = TP_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+
+ case TP_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
+ break;
+
+ case AUI:
+ if (!lp->tx_enable) {
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */
+ outl(omr & ~OMR_FDX, DE4X5_OMR);
+ }
+ irqs = 0;
+ irq_mask = 0;
+ sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x000e, 1000);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
+ lp->media = BNC;
+ next_tick = dc21041_autoconf(dev);
+ } else {
+ lp->local_state = 1;
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = AUI_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+
+ case AUI_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
+ break;
+
+ case BNC:
+ switch (lp->local_state) {
+ case 0:
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */
+ outl(omr & ~OMR_FDX, DE4X5_OMR);
+ }
+ irqs = 0;
+ irq_mask = 0;
+ sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x0006, 1000);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ lp->local_state++; /* Ensure media connected */
+ next_tick = dc21041_autoconf(dev);
+ }
+ break;
+
+ case 1:
+ if (!lp->tx_enable) {
+ if ((sts = ping_media(dev, 3000)) < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (sts) {
+ lp->local_state = 0;
+ lp->media = NC;
+ } else {
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = BNC_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+ }
+ break;
+
+ case BNC_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
+ break;
+
+ case NC:
+ omr = inl(DE4X5_OMR); /* Set up full duplex for the autonegotiate */
+ outl(omr | OMR_FDX, DE4X5_OMR);
+ reset_init_sia(dev, 0xef01, 0xffff, 0x0008);/* Initialise the SIA */
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tx_enable = false;
+ break;
+ }
+
+ return next_tick;
+}
+
+/*
+** Some autonegotiation chips are broken in that they do not return the
+** acknowledge bit (anlpa & MII_ANLPA_ACK) in the link partner advertisement
+** register, except at the first power up negotiation.
+*/
+static int
+dc21140m_autoconf(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int ana, anlpa, cap, cr, slnk, sr;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+ u_long imr, omr, iobase = dev->base_addr;
+
+ switch(lp->media) {
+ case INIT:
+ if (lp->timeout < 0) {
+ DISABLE_IRQs;
+ lp->tx_enable = false;
+ lp->linkOK = 0;
+ de4x5_save_skbs(dev); /* Save non transmitted skb's */
+ }
+ if ((next_tick = de4x5_reset_phy(dev)) < 0) {
+ next_tick &= ~TIMER_CB;
+ } else {
+ if (lp->useSROM) {
+ if (srom_map_media(dev) < 0) {
+ lp->tcount++;
+ return next_tick;
+ }
+ srom_exec(dev, lp->phy[lp->active].gep);
+ if (lp->infoblock_media == ANS) {
+ ana = lp->phy[lp->active].ana | MII_ANA_CSMA;
+ mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
+ }
+ } else {
+ lp->tmp = MII_SR_ASSC; /* Fake out the MII speed set */
+ SET_10Mb;
+ if (lp->autosense == _100Mb) {
+ lp->media = _100Mb;
+ } else if (lp->autosense == _10Mb) {
+ lp->media = _10Mb;
+ } else if ((lp->autosense == AUTO) &&
+ ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
+ ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
+ ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
+ mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
+ lp->media = ANS;
+ } else if (lp->autosense == AUTO) {
+ lp->media = SPD_DET;
+ } else if (is_spd_100(dev) && is_100_up(dev)) {
+ lp->media = _100Mb;
+ } else {
+ lp->media = NC;
+ }
+ }
+ lp->local_state = 0;
+ next_tick = dc21140m_autoconf(dev);
+ }
+ break;
+
+ case ANS:
+ switch (lp->local_state) {
+ case 0:
+ if (lp->timeout < 0) {
+ mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
+ }
+ cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
+ if (cr < 0) {
+ next_tick = cr & ~TIMER_CB;
+ } else {
+ if (cr) {
+ lp->local_state = 0;
+ lp->media = SPD_DET;
+ } else {
+ lp->local_state++;
+ }
+ next_tick = dc21140m_autoconf(dev);
+ }
+ break;
+
+ case 1:
+ if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000)) < 0) {
+ next_tick = sr & ~TIMER_CB;
+ } else {
+ lp->media = SPD_DET;
+ lp->local_state = 0;
+ if (sr) { /* Success! */
+ lp->tmp = MII_SR_ASSC;
+ anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
+ ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
+ if (!(anlpa & MII_ANLPA_RF) &&
+ (cap = anlpa & MII_ANLPA_TAF & ana)) {
+ if (cap & MII_ANA_100M) {
+ lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
+ lp->media = _100Mb;
+ } else if (cap & MII_ANA_10M) {
+ lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
+
+ lp->media = _10Mb;
+ }
+ }
+ } /* Auto Negotiation failed to finish */
+ next_tick = dc21140m_autoconf(dev);
+ } /* Auto Negotiation failed to start */
+ break;
+ }
+ break;
+
+ case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
+ if (lp->timeout < 0) {
+ lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
+ (~gep_rd(dev) & GEP_LNP));
+ SET_100Mb_PDET;
+ }
+ if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
+ next_tick = slnk & ~TIMER_CB;
+ } else {
+ if (is_spd_100(dev) && is_100_up(dev)) {
+ lp->media = _100Mb;
+ } else if ((!is_spd_100(dev) && (is_10_up(dev) & lp->tmp))) {
+ lp->media = _10Mb;
+ } else {
+ lp->media = NC;
+ }
+ next_tick = dc21140m_autoconf(dev);
+ }
+ break;
+
+ case _100Mb: /* Set 100Mb/s */
+ next_tick = 3000;
+ if (!lp->tx_enable) {
+ SET_100Mb;
+ de4x5_init_connection(dev);
+ } else {
+ if (!lp->linkOK && (lp->autosense == AUTO)) {
+ if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
+ lp->media = INIT;
+ lp->tcount++;
+ next_tick = DE4X5_AUTOSENSE_MS;
+ }
+ }
+ }
+ break;
+
+ case BNC:
+ case AUI:
+ case _10Mb: /* Set 10Mb/s */
+ next_tick = 3000;
+ if (!lp->tx_enable) {
+ SET_10Mb;
+ de4x5_init_connection(dev);
+ } else {
+ if (!lp->linkOK && (lp->autosense == AUTO)) {
+ if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
+ lp->media = INIT;
+ lp->tcount++;
+ next_tick = DE4X5_AUTOSENSE_MS;
+ }
+ }
+ }
+ break;
+
+ case NC:
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tx_enable = false;
+ break;
+ }
+
+ return next_tick;
+}
+
+/*
+** This routine may be merged into dc21140m_autoconf() sometime as I'm
+** changing how I figure out the media - but trying to keep it backwards
+** compatible with the de500-xa and de500-aa.
+** Whether it's BNC, AUI, SYM or MII is sorted out in the infoblock
+** functions and set during de4x5_mac_port() and/or de4x5_reset_phy().
+** This routine just has to figure out whether 10Mb/s or 100Mb/s is
+** active.
+** When autonegotiation is working, the ANS part searches the SROM for
+** the highest common speed (TP) link that both can run and if that can
+** be full duplex. That infoblock is executed and then the link speed set.
+**
+** Only _10Mb and _100Mb are tested here.
+*/
+static int
+dc2114x_autoconf(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ s32 cr, anlpa, ana, cap, irqs, irq_mask, imr, omr, slnk, sr, sts;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ switch (lp->media) {
+ case INIT:
+ if (lp->timeout < 0) {
+ DISABLE_IRQs;
+ lp->tx_enable = false;
+ lp->linkOK = 0;
+ lp->timeout = -1;
+ de4x5_save_skbs(dev); /* Save non transmitted skb's */
+ if (lp->params.autosense & ~AUTO) {
+ srom_map_media(dev); /* Fixed media requested */
+ if (lp->media != lp->params.autosense) {
+ lp->tcount++;
+ lp->media = INIT;
+ return next_tick;
+ }
+ lp->media = INIT;
+ }
+ }
+ if ((next_tick = de4x5_reset_phy(dev)) < 0) {
+ next_tick &= ~TIMER_CB;
+ } else {
+ if (lp->autosense == _100Mb) {
+ lp->media = _100Mb;
+ } else if (lp->autosense == _10Mb) {
+ lp->media = _10Mb;
+ } else if (lp->autosense == TP) {
+ lp->media = TP;
+ } else if (lp->autosense == BNC) {
+ lp->media = BNC;
+ } else if (lp->autosense == AUI) {
+ lp->media = AUI;
+ } else {
+ lp->media = SPD_DET;
+ if ((lp->infoblock_media == ANS) &&
+ ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
+ ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
+ ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
+ mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
+ lp->media = ANS;
+ }
+ }
+ lp->local_state = 0;
+ next_tick = dc2114x_autoconf(dev);
+ }
+ break;
+
+ case ANS:
+ switch (lp->local_state) {
+ case 0:
+ if (lp->timeout < 0) {
+ mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
+ }
+ cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
+ if (cr < 0) {
+ next_tick = cr & ~TIMER_CB;
+ } else {
+ if (cr) {
+ lp->local_state = 0;
+ lp->media = SPD_DET;
+ } else {
+ lp->local_state++;
+ }
+ next_tick = dc2114x_autoconf(dev);
+ }
+ break;
+
+ case 1:
+ sr = test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000);
+ if (sr < 0) {
+ next_tick = sr & ~TIMER_CB;
+ } else {
+ lp->media = SPD_DET;
+ lp->local_state = 0;
+ if (sr) { /* Success! */
+ lp->tmp = MII_SR_ASSC;
+ anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
+ ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
+ if (!(anlpa & MII_ANLPA_RF) &&
+ (cap = anlpa & MII_ANLPA_TAF & ana)) {
+ if (cap & MII_ANA_100M) {
+ lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
+ lp->media = _100Mb;
+ } else if (cap & MII_ANA_10M) {
+ lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
+ lp->media = _10Mb;
+ }
+ }
+ } /* Auto Negotiation failed to finish */
+ next_tick = dc2114x_autoconf(dev);
+ } /* Auto Negotiation failed to start */
+ break;
+ }
+ break;
+
+ case AUI:
+ if (!lp->tx_enable) {
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */
+ outl(omr & ~OMR_FDX, DE4X5_OMR);
+ }
+ irqs = 0;
+ irq_mask = 0;
+ sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
+ lp->media = BNC;
+ next_tick = dc2114x_autoconf(dev);
+ } else {
+ lp->local_state = 1;
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = AUI_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+
+ case AUI_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf);
+ break;
+
+ case BNC:
+ switch (lp->local_state) {
+ case 0:
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */
+ outl(omr & ~OMR_FDX, DE4X5_OMR);
+ }
+ irqs = 0;
+ irq_mask = 0;
+ sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ lp->local_state++; /* Ensure media connected */
+ next_tick = dc2114x_autoconf(dev);
+ }
+ break;
+
+ case 1:
+ if (!lp->tx_enable) {
+ if ((sts = ping_media(dev, 3000)) < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (sts) {
+ lp->local_state = 0;
+ lp->tcount++;
+ lp->media = INIT;
+ } else {
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = BNC_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+ }
+ break;
+
+ case BNC_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf);
+ break;
+
+ case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
+ if (srom_map_media(dev) < 0) {
+ lp->tcount++;
+ lp->media = INIT;
+ return next_tick;
+ }
+ if (lp->media == _100Mb) {
+ if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
+ lp->media = SPD_DET;
+ return slnk & ~TIMER_CB;
+ }
+ } else {
+ if (wait_for_link(dev) < 0) {
+ lp->media = SPD_DET;
+ return PDET_LINK_WAIT;
+ }
+ }
+ if (lp->media == ANS) { /* Do MII parallel detection */
+ if (is_spd_100(dev)) {
+ lp->media = _100Mb;
+ } else {
+ lp->media = _10Mb;
+ }
+ next_tick = dc2114x_autoconf(dev);
+ } else if (((lp->media == _100Mb) && is_100_up(dev)) ||
+ (((lp->media == _10Mb) || (lp->media == TP) ||
+ (lp->media == BNC) || (lp->media == AUI)) &&
+ is_10_up(dev))) {
+ next_tick = dc2114x_autoconf(dev);
+ } else {
+ lp->tcount++;
+ lp->media = INIT;
+ }
+ break;
+
+ case _10Mb:
+ next_tick = 3000;
+ if (!lp->tx_enable) {
+ SET_10Mb;
+ de4x5_init_connection(dev);
+ } else {
+ if (!lp->linkOK && (lp->autosense == AUTO)) {
+ if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
+ lp->media = INIT;
+ lp->tcount++;
+ next_tick = DE4X5_AUTOSENSE_MS;
+ }
+ }
+ }
+ break;
+
+ case _100Mb:
+ next_tick = 3000;
+ if (!lp->tx_enable) {
+ SET_100Mb;
+ de4x5_init_connection(dev);
+ } else {
+ if (!lp->linkOK && (lp->autosense == AUTO)) {
+ if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
+ lp->media = INIT;
+ lp->tcount++;
+ next_tick = DE4X5_AUTOSENSE_MS;
+ }
+ }
+ }
+ break;
+
+ default:
+ lp->tcount++;
+printk("Huh?: media:%02x\n", lp->media);
+ lp->media = INIT;
+ break;
+ }
+
+ return next_tick;
+}
+
+static int
+srom_autoconf(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+
+ return lp->infoleaf_fn(dev);
+}
+
+/*
+** This mapping keeps the original media codes and FDX flag unchanged.
+** While it isn't strictly necessary, it helps me for the moment...
+** The early return avoids a media state / SROM media space clash.
+*/
+static int
+srom_map_media(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+
+ lp->fdx = false;
+ if (lp->infoblock_media == lp->media)
+ return 0;
+
+ switch(lp->infoblock_media) {
+ case SROM_10BASETF:
+ if (!lp->params.fdx) return -1;
+ lp->fdx = true;
+ case SROM_10BASET:
+ if (lp->params.fdx && !lp->fdx) return -1;
+ if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
+ lp->media = _10Mb;
+ } else {
+ lp->media = TP;
+ }
+ break;
+
+ case SROM_10BASE2:
+ lp->media = BNC;
+ break;
+
+ case SROM_10BASE5:
+ lp->media = AUI;
+ break;
+
+ case SROM_100BASETF:
+ if (!lp->params.fdx) return -1;
+ lp->fdx = true;
+ case SROM_100BASET:
+ if (lp->params.fdx && !lp->fdx) return -1;
+ lp->media = _100Mb;
+ break;
+
+ case SROM_100BASET4:
+ lp->media = _100Mb;
+ break;
+
+ case SROM_100BASEFF:
+ if (!lp->params.fdx) return -1;
+ lp->fdx = true;
+ case SROM_100BASEF:
+ if (lp->params.fdx && !lp->fdx) return -1;
+ lp->media = _100Mb;
+ break;
+
+ case ANS:
+ lp->media = ANS;
+ lp->fdx = lp->params.fdx;
+ break;
+
+ default:
+ printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
+ lp->infoblock_media);
+ return -1;
+ break;
+ }
+
+ return 0;
+}
+
+static void
+de4x5_init_connection(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ u_long flags = 0;
+
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media; /* Stop scrolling media messages */
+ }
+
+ spin_lock_irqsave(&lp->lock, flags);
+ de4x5_rst_desc_ring(dev);
+ de4x5_setup_intr(dev);
+ lp->tx_enable = true;
+ spin_unlock_irqrestore(&lp->lock, flags);
+ outl(POLL_DEMAND, DE4X5_TPD);
+
+ netif_wake_queue(dev);
+}
+
+/*
+** General PHY reset function. Some MII devices don't reset correctly
+** since their MII address pins can float at voltages that are dependent
+** on the signal pin use. Do a double reset to ensure a reset.
+*/
+static int
+de4x5_reset_phy(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int next_tick = 0;
+
+ if ((lp->useSROM) || (lp->phy[lp->active].id)) {
+ if (lp->timeout < 0) {
+ if (lp->useSROM) {
+ if (lp->phy[lp->active].rst) {
+ srom_exec(dev, lp->phy[lp->active].rst);
+ srom_exec(dev, lp->phy[lp->active].rst);
+ } else if (lp->rst) { /* Type 5 infoblock reset */
+ srom_exec(dev, lp->rst);
+ srom_exec(dev, lp->rst);
+ }
+ } else {
+ PHY_HARD_RESET;
+ }
+ if (lp->useMII) {
+ mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
+ }
+ }
+ if (lp->useMII) {
+ next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, false, 500);
+ }
+ } else if (lp->chipset == DC21140) {
+ PHY_HARD_RESET;
+ }
+
+ return next_tick;
+}
+
+static int
+test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ s32 sts, csr12;
+
+ if (lp->timeout < 0) {
+ lp->timeout = msec/100;
+ if (!lp->useSROM) { /* Already done if by SROM, else dc2104[01] */
+ reset_init_sia(dev, csr13, csr14, csr15);
+ }
+
+ /* set up the interrupt mask */
+ outl(irq_mask, DE4X5_IMR);
+
+ /* clear all pending interrupts */
+ sts = inl(DE4X5_STS);
+ outl(sts, DE4X5_STS);
+
+ /* clear csr12 NRA and SRA bits */
+ if ((lp->chipset == DC21041) || lp->useSROM) {
+ csr12 = inl(DE4X5_SISR);
+ outl(csr12, DE4X5_SISR);
+ }
+ }
+
+ sts = inl(DE4X5_STS) & ~TIMER_CB;
+
+ if (!(sts & irqs) && --lp->timeout) {
+ sts = 100 | TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return sts;
+}
+
+static int
+test_tp(struct net_device *dev, s32 msec)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int sisr;
+
+ if (lp->timeout < 0) {
+ lp->timeout = msec/100;
+ }
+
+ sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
+
+ if (sisr && --lp->timeout) {
+ sisr = 100 | TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return sisr;
+}
+
+/*
+** Samples the 100Mb Link State Signal. The sample interval is important
+** because too fast a rate can give erroneous results and confuse the
+** speed sense algorithm.
+*/
+#define SAMPLE_INTERVAL 500 /* ms */
+#define SAMPLE_DELAY 2000 /* ms */
+static int
+test_for_100Mb(struct net_device *dev, int msec)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int gep = 0, ret = ((lp->chipset & ~0x00ff)==DC2114x? -1 :GEP_SLNK);
+
+ if (lp->timeout < 0) {
+ if ((msec/SAMPLE_INTERVAL) <= 0) return 0;
+ if (msec > SAMPLE_DELAY) {
+ lp->timeout = (msec - SAMPLE_DELAY)/SAMPLE_INTERVAL;
+ gep = SAMPLE_DELAY | TIMER_CB;
+ return gep;
+ } else {
+ lp->timeout = msec/SAMPLE_INTERVAL;
+ }
+ }
+
+ if (lp->phy[lp->active].id || lp->useSROM) {
+ gep = is_100_up(dev) | is_spd_100(dev);
+ } else {
+ gep = (~gep_rd(dev) & (GEP_SLNK | GEP_LNP));
+ }
+ if (!(gep & ret) && --lp->timeout) {
+ gep = SAMPLE_INTERVAL | TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return gep;
+}
+
+static int
+wait_for_link(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+
+ if (lp->timeout < 0) {
+ lp->timeout = 1;
+ }
+
+ if (lp->timeout--) {
+ return TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return 0;
+}
+
+/*
+**
+**
+*/
+static int
+test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int test;
+ u_long iobase = dev->base_addr;
+
+ if (lp->timeout < 0) {
+ lp->timeout = msec/100;
+ }
+
+ reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
+ test = (reg ^ (pol ? ~0 : 0)) & mask;
+
+ if (test && --lp->timeout) {
+ reg = 100 | TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return reg;
+}
+
+static int
+is_spd_100(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int spd;
+
+ if (lp->useMII) {
+ spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
+ spd = ~(spd ^ lp->phy[lp->active].spd.value);
+ spd &= lp->phy[lp->active].spd.mask;
+ } else if (!lp->useSROM) { /* de500-xa */
+ spd = ((~gep_rd(dev)) & GEP_SLNK);
+ } else {
+ if ((lp->ibn == 2) || !lp->asBitValid)
+ return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
+
+ spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
+ (lp->linkOK & ~lp->asBitValid);
+ }
+
+ return spd;
+}
+
+static int
+is_100_up(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ if (lp->useMII) {
+ /* Double read for sticky bits & temporary drops */
+ mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
+ return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
+ } else if (!lp->useSROM) { /* de500-xa */
+ return (~gep_rd(dev)) & GEP_SLNK;
+ } else {
+ if ((lp->ibn == 2) || !lp->asBitValid)
+ return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
+
+ return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
+ (lp->linkOK & ~lp->asBitValid);
+ }
+}
+
+static int
+is_10_up(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ if (lp->useMII) {
+ /* Double read for sticky bits & temporary drops */
+ mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
+ return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
+ } else if (!lp->useSROM) { /* de500-xa */
+ return (~gep_rd(dev)) & GEP_LNP;
+ } else {
+ if ((lp->ibn == 2) || !lp->asBitValid)
+ return ((lp->chipset & ~0x00ff) == DC2114x) ?
+ (~inl(DE4X5_SISR)&SISR_LS10):
+ 0;
+
+ return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
+ (lp->linkOK & ~lp->asBitValid);
+ }
+}
+
+static int
+is_anc_capable(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
+ return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
+ } else if ((lp->chipset & ~0x00ff) == DC2114x) {
+ return (inl(DE4X5_SISR) & SISR_LPN) >> 12;
+ } else {
+ return 0;
+ }
+}
+
+/*
+** Send a packet onto the media and watch for send errors that indicate the
+** media is bad or unconnected.
+*/
+static int
+ping_media(struct net_device *dev, int msec)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int sisr;
+
+ if (lp->timeout < 0) {
+ lp->timeout = msec/100;
+
+ lp->tmp = lp->tx_new; /* Remember the ring position */
+ load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
+ outl(POLL_DEMAND, DE4X5_TPD);
+ }
+
+ sisr = inl(DE4X5_SISR);
+
+ if ((!(sisr & SISR_NCR)) &&
+ ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
+ (--lp->timeout)) {
+ sisr = 100 | TIMER_CB;
+ } else {
+ if ((!(sisr & SISR_NCR)) &&
+ !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
+ lp->timeout) {
+ sisr = 0;
+ } else {
+ sisr = 1;
+ }
+ lp->timeout = -1;
+ }
+
+ return sisr;
+}
+
+/*
+** This function does 2 things: on Intels it kmalloc's another buffer to
+** replace the one about to be passed up. On Alpha's it kmallocs a buffer
+** into which the packet is copied.
+*/
+static struct sk_buff *
+de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ struct sk_buff *p;
+
+#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
+ struct sk_buff *ret;
+ u_long i=0, tmp;
+
+ p = dev_alloc_skb(IEEE802_3_SZ + DE4X5_ALIGN + 2);
+ if (!p) return NULL;
+
+ tmp = virt_to_bus(p->data);
+ i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp;
+ skb_reserve(p, i);
+ lp->rx_ring[index].buf = cpu_to_le32(tmp + i);
+
+ ret = lp->rx_skb[index];
+ lp->rx_skb[index] = p;
+
+ if ((u_long) ret > 1) {
+ skb_put(ret, len);
+ }
+
+ return ret;
+
+#else
+ if (lp->state != OPEN) return (struct sk_buff *)1; /* Fake out the open */
+
+ p = dev_alloc_skb(len + 2);
+ if (!p) return NULL;
+
+ skb_reserve(p, 2); /* Align */
+ if (index < lp->rx_old) { /* Wrapped buffer */
+ short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
+ memcpy(skb_put(p,tlen),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,tlen);
+ memcpy(skb_put(p,len-tlen),lp->rx_bufs,len-tlen);
+ } else { /* Linear buffer */
+ memcpy(skb_put(p,len),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,len);
+ }
+
+ return p;
+#endif
+}
+
+static void
+de4x5_free_rx_buffs(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int i;
+
+ for (i=0; i<lp->rxRingSize; i++) {
+ if ((u_long) lp->rx_skb[i] > 1) {
+ dev_kfree_skb(lp->rx_skb[i]);
+ }
+ lp->rx_ring[i].status = 0;
+ lp->rx_skb[i] = (struct sk_buff *)1; /* Dummy entry */
+ }
+}
+
+static void
+de4x5_free_tx_buffs(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int i;
+
+ for (i=0; i<lp->txRingSize; i++) {
+ if (lp->tx_skb[i])
+ de4x5_free_tx_buff(lp, i);
+ lp->tx_ring[i].status = 0;
+ }
+
+ /* Unload the locally queued packets */
+ __skb_queue_purge(&lp->cache.queue);
+}
+
+/*
+** When a user pulls a connection, the DECchip can end up in a
+** 'running - waiting for end of transmission' state. This means that we
+** have to perform a chip soft reset to ensure that we can synchronize
+** the hardware and software and make any media probes using a loopback
+** packet meaningful.
+*/
+static void
+de4x5_save_skbs(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ s32 omr;
+
+ if (!lp->cache.save_cnt) {
+ STOP_DE4X5;
+ de4x5_tx(dev); /* Flush any sent skb's */
+ de4x5_free_tx_buffs(dev);
+ de4x5_cache_state(dev, DE4X5_SAVE_STATE);
+ de4x5_sw_reset(dev);
+ de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
+ lp->cache.save_cnt++;
+ START_DE4X5;
+ }
+}
+
+static void
+de4x5_rst_desc_ring(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int i;
+ s32 omr;
+
+ if (lp->cache.save_cnt) {
+ STOP_DE4X5;
+ outl(lp->dma_rings, DE4X5_RRBA);
+ outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
+ DE4X5_TRBA);
+
+ lp->rx_new = lp->rx_old = 0;
+ lp->tx_new = lp->tx_old = 0;
+
+ for (i = 0; i < lp->rxRingSize; i++) {
+ lp->rx_ring[i].status = cpu_to_le32(R_OWN);
+ }
+
+ for (i = 0; i < lp->txRingSize; i++) {
+ lp->tx_ring[i].status = cpu_to_le32(0);
+ }
+
+ barrier();
+ lp->cache.save_cnt--;
+ START_DE4X5;
+ }
+}
+
+static void
+de4x5_cache_state(struct net_device *dev, int flag)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ switch(flag) {
+ case DE4X5_SAVE_STATE:
+ lp->cache.csr0 = inl(DE4X5_BMR);
+ lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR));
+ lp->cache.csr7 = inl(DE4X5_IMR);
+ break;
+
+ case DE4X5_RESTORE_STATE:
+ outl(lp->cache.csr0, DE4X5_BMR);
+ outl(lp->cache.csr6, DE4X5_OMR);
+ outl(lp->cache.csr7, DE4X5_IMR);
+ if (lp->chipset == DC21140) {
+ gep_wr(lp->cache.gepc, dev);
+ gep_wr(lp->cache.gep, dev);
+ } else {
+ reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
+ lp->cache.csr15);
+ }
+ break;
+ }
+}
+
+static void
+de4x5_put_cache(struct net_device *dev, struct sk_buff *skb)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+
+ __skb_queue_tail(&lp->cache.queue, skb);
+}
+
+static void
+de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+
+ __skb_queue_head(&lp->cache.queue, skb);
+}
+
+static struct sk_buff *
+de4x5_get_cache(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+
+ return __skb_dequeue(&lp->cache.queue);
+}
+
+/*
+** Check the Auto Negotiation State. Return OK when a link pass interrupt
+** is received and the auto-negotiation status is NWAY OK.
+*/
+static int
+test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ s32 sts, ans;
+
+ if (lp->timeout < 0) {
+ lp->timeout = msec/100;
+ outl(irq_mask, DE4X5_IMR);
+
+ /* clear all pending interrupts */
+ sts = inl(DE4X5_STS);
+ outl(sts, DE4X5_STS);
+ }
+
+ ans = inl(DE4X5_SISR) & SISR_ANS;
+ sts = inl(DE4X5_STS) & ~TIMER_CB;
+
+ if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
+ sts = 100 | TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return sts;
+}
+
+static void
+de4x5_setup_intr(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ s32 imr, sts;
+
+ if (inl(DE4X5_OMR) & OMR_SR) { /* Only unmask if TX/RX is enabled */
+ imr = 0;
+ UNMASK_IRQs;
+ sts = inl(DE4X5_STS); /* Reset any pending (stale) interrupts */
+ outl(sts, DE4X5_STS);
+ ENABLE_IRQs;
+ }
+}
+
+/*
+**
+*/
+static void
+reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ RESET_SIA;
+ if (lp->useSROM) {
+ if (lp->ibn == 3) {
+ srom_exec(dev, lp->phy[lp->active].rst);
+ srom_exec(dev, lp->phy[lp->active].gep);
+ outl(1, DE4X5_SICR);
+ return;
+ } else {
+ csr15 = lp->cache.csr15;
+ csr14 = lp->cache.csr14;
+ csr13 = lp->cache.csr13;
+ outl(csr15 | lp->cache.gepc, DE4X5_SIGR);
+ outl(csr15 | lp->cache.gep, DE4X5_SIGR);
+ }
+ } else {
+ outl(csr15, DE4X5_SIGR);
+ }
+ outl(csr14, DE4X5_STRR);
+ outl(csr13, DE4X5_SICR);
+
+ mdelay(10);
+}
+
+/*
+** Create a loopback ethernet packet
+*/
+static void
+create_packet(struct net_device *dev, char *frame, int len)
+{
+ int i;
+ char *buf = frame;
+
+ for (i=0; i<ETH_ALEN; i++) { /* Use this source address */
+ *buf++ = dev->dev_addr[i];
+ }
+ for (i=0; i<ETH_ALEN; i++) { /* Use this destination address */
+ *buf++ = dev->dev_addr[i];
+ }
+
+ *buf++ = 0; /* Packet length (2 bytes) */
+ *buf++ = 1;
+}
+
+/*
+** Look for a particular board name in the EISA configuration space
+*/
+static int
+EISA_signature(char *name, struct device *device)
+{
+ int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
+ struct eisa_device *edev;
+
+ *name = '\0';
+ edev = to_eisa_device (device);
+ i = edev->id.driver_data;
+
+ if (i >= 0 && i < siglen) {
+ strcpy (name, de4x5_signatures[i]);
+ status = 1;
+ }
+
+ return status; /* return the device name string */
+}
+
+/*
+** Look for a particular board name in the PCI configuration space
+*/
+static int
+PCI_signature(char *name, struct de4x5_private *lp)
+{
+ int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
+
+ if (lp->chipset == DC21040) {
+ strcpy(name, "DE434/5");
+ return status;
+ } else { /* Search for a DEC name in the SROM */
+ int tmp = *((char *)&lp->srom + 19) * 3;
+ strncpy(name, (char *)&lp->srom + 26 + tmp, 8);
+ }
+ name[8] = '\0';
+ for (i=0; i<siglen; i++) {
+ if (strstr(name,de4x5_signatures[i])!=NULL) break;
+ }
+ if (i == siglen) {
+ if (dec_only) {
+ *name = '\0';
+ } else { /* Use chip name to avoid confusion */
+ strcpy(name, (((lp->chipset == DC21040) ? "DC21040" :
+ ((lp->chipset == DC21041) ? "DC21041" :
+ ((lp->chipset == DC21140) ? "DC21140" :
+ ((lp->chipset == DC21142) ? "DC21142" :
+ ((lp->chipset == DC21143) ? "DC21143" : "UNKNOWN"
+ )))))));
+ }
+ if (lp->chipset != DC21041) {
+ lp->useSROM = true; /* card is not recognisably DEC */
+ }
+ } else if ((lp->chipset & ~0x00ff) == DC2114x) {
+ lp->useSROM = true;
+ }
+
+ return status;
+}
+
+/*
+** Set up the Ethernet PROM counter to the start of the Ethernet address on
+** the DC21040, else read the SROM for the other chips.
+** The SROM may not be present in a multi-MAC card, so first read the
+** MAC address and check for a bad address. If there is a bad one then exit
+** immediately with the prior srom contents intact (the h/w address will
+** be fixed up later).
+*/
+static void
+DevicePresent(struct net_device *dev, u_long aprom_addr)
+{
+ int i, j=0;
+ struct de4x5_private *lp = netdev_priv(dev);
+
+ if (lp->chipset == DC21040) {
+ if (lp->bus == EISA) {
+ enet_addr_rst(aprom_addr); /* Reset Ethernet Address ROM Pointer */
+ } else {
+ outl(0, aprom_addr); /* Reset Ethernet Address ROM Pointer */
+ }
+ } else { /* Read new srom */
+ u_short tmp;
+ __le16 *p = (__le16 *)((char *)&lp->srom + SROM_HWADD);
+ for (i=0; i<(ETH_ALEN>>1); i++) {
+ tmp = srom_rd(aprom_addr, (SROM_HWADD>>1) + i);
+ j += tmp; /* for check for 0:0:0:0:0:0 or ff:ff:ff:ff:ff:ff */
+ *p = cpu_to_le16(tmp);
+ }
+ if (j == 0 || j == 3 * 0xffff) {
+ /* could get 0 only from all-0 and 3 * 0xffff only from all-1 */
+ return;
+ }
+
+ p = (__le16 *)&lp->srom;
+ for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
+ tmp = srom_rd(aprom_addr, i);
+ *p++ = cpu_to_le16(tmp);
+ }
+ de4x5_dbg_srom((struct de4x5_srom *)&lp->srom);
+ }
+}
+
+/*
+** Since the write on the Enet PROM register doesn't seem to reset the PROM
+** pointer correctly (at least on my DE425 EISA card), this routine should do
+** it...from depca.c.
+*/
+static void
+enet_addr_rst(u_long aprom_addr)
+{
+ union {
+ struct {
+ u32 a;
+ u32 b;
+ } llsig;
+ char Sig[sizeof(u32) << 1];
+ } dev;
+ short sigLength=0;
+ s8 data;
+ int i, j;
+
+ dev.llsig.a = ETH_PROM_SIG;
+ dev.llsig.b = ETH_PROM_SIG;
+ sigLength = sizeof(u32) << 1;
+
+ for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
+ data = inb(aprom_addr);
+ if (dev.Sig[j] == data) { /* track signature */
+ j++;
+ } else { /* lost signature; begin search again */
+ if (data == dev.Sig[0]) { /* rare case.... */
+ j=1;
+ } else {
+ j=0;
+ }
+ }
+ }
+}
+
+/*
+** For the bad status case and no SROM, then add one to the previous
+** address. However, need to add one backwards in case we have 0xff
+** as one or more of the bytes. Only the last 3 bytes should be checked
+** as the first three are invariant - assigned to an organisation.
+*/
+static int
+get_hw_addr(struct net_device *dev)
+{
+ u_long iobase = dev->base_addr;
+ int broken, i, k, tmp, status = 0;
+ u_short j,chksum;
+ struct de4x5_private *lp = netdev_priv(dev);
+
+ broken = de4x5_bad_srom(lp);
+
+ for (i=0,k=0,j=0;j<3;j++) {
+ k <<= 1;
+ if (k > 0xffff) k-=0xffff;
+
+ if (lp->bus == PCI) {
+ if (lp->chipset == DC21040) {
+ while ((tmp = inl(DE4X5_APROM)) < 0);
+ k += (u_char) tmp;
+ dev->dev_addr[i++] = (u_char) tmp;
+ while ((tmp = inl(DE4X5_APROM)) < 0);
+ k += (u_short) (tmp << 8);
+ dev->dev_addr[i++] = (u_char) tmp;
+ } else if (!broken) {
+ dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
+ dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
+ } else if ((broken == SMC) || (broken == ACCTON)) {
+ dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
+ dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
+ }
+ } else {
+ k += (u_char) (tmp = inb(EISA_APROM));
+ dev->dev_addr[i++] = (u_char) tmp;
+ k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
+ dev->dev_addr[i++] = (u_char) tmp;
+ }
+
+ if (k > 0xffff) k-=0xffff;
+ }
+ if (k == 0xffff) k=0;
+
+ if (lp->bus == PCI) {
+ if (lp->chipset == DC21040) {
+ while ((tmp = inl(DE4X5_APROM)) < 0);
+ chksum = (u_char) tmp;
+ while ((tmp = inl(DE4X5_APROM)) < 0);
+ chksum |= (u_short) (tmp << 8);
+ if ((k != chksum) && (dec_only)) status = -1;
+ }
+ } else {
+ chksum = (u_char) inb(EISA_APROM);
+ chksum |= (u_short) (inb(EISA_APROM) << 8);
+ if ((k != chksum) && (dec_only)) status = -1;
+ }
+
+ /* If possible, try to fix a broken card - SMC only so far */
+ srom_repair(dev, broken);
+
+#ifdef CONFIG_PPC_PMAC
+ /*
+ ** If the address starts with 00 a0, we have to bit-reverse
+ ** each byte of the address.
+ */
+ if ( machine_is(powermac) &&
+ (dev->dev_addr[0] == 0) &&
+ (dev->dev_addr[1] == 0xa0) )
+ {
+ for (i = 0; i < ETH_ALEN; ++i)
+ {
+ int x = dev->dev_addr[i];
+ x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4);
+ x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2);
+ dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
+ }
+ }
+#endif /* CONFIG_PPC_PMAC */
+
+ /* Test for a bad enet address */
+ status = test_bad_enet(dev, status);
+
+ return status;
+}
+
+/*
+** Test for enet addresses in the first 32 bytes. The built-in strncmp
+** didn't seem to work here...?
+*/
+static int
+de4x5_bad_srom(struct de4x5_private *lp)
+{
+ int i, status = 0;
+
+ for (i = 0; i < ARRAY_SIZE(enet_det); i++) {
+ if (!de4x5_strncmp((char *)&lp->srom, (char *)&enet_det[i], 3) &&
+ !de4x5_strncmp((char *)&lp->srom+0x10, (char *)&enet_det[i], 3)) {
+ if (i == 0) {
+ status = SMC;
+ } else if (i == 1) {
+ status = ACCTON;
+ }
+ break;
+ }
+ }
+
+ return status;
+}
+
+static int
+de4x5_strncmp(char *a, char *b, int n)
+{
+ int ret=0;
+
+ for (;n && !ret; n--) {
+ ret = *a++ - *b++;
+ }
+
+ return ret;
+}
+
+static void
+srom_repair(struct net_device *dev, int card)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+
+ switch(card) {
+ case SMC:
+ memset((char *)&lp->srom, 0, sizeof(struct de4x5_srom));
+ memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN);
+ memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100);
+ lp->useSROM = true;
+ break;
+ }
+}
+
+/*
+** Assume that the irq's do not follow the PCI spec - this is seems
+** to be true so far (2 for 2).
+*/
+static int
+test_bad_enet(struct net_device *dev, int status)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int i, tmp;
+
+ for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i];
+ if ((tmp == 0) || (tmp == 0x5fa)) {
+ if ((lp->chipset == last.chipset) &&
+ (lp->bus_num == last.bus) && (lp->bus_num > 0)) {
+ for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i];
+ for (i=ETH_ALEN-1; i>2; --i) {
+ dev->dev_addr[i] += 1;
+ if (dev->dev_addr[i] != 0) break;
+ }
+ for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
+ if (!an_exception(lp)) {
+ dev->irq = last.irq;
+ }
+
+ status = 0;
+ }
+ } else if (!status) {
+ last.chipset = lp->chipset;
+ last.bus = lp->bus_num;
+ last.irq = dev->irq;
+ for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
+ }
+
+ return status;
+}
+
+/*
+** List of board exceptions with correctly wired IRQs
+*/
+static int
+an_exception(struct de4x5_private *lp)
+{
+ if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
+ (*(u_short *)lp->srom.sub_system_id == 0x95e0)) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+** SROM Read
+*/
+static short
+srom_rd(u_long addr, u_char offset)
+{
+ sendto_srom(SROM_RD | SROM_SR, addr);
+
+ srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
+ srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
+ srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
+
+ return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
+}
+
+static void
+srom_latch(u_int command, u_long addr)
+{
+ sendto_srom(command, addr);
+ sendto_srom(command | DT_CLK, addr);
+ sendto_srom(command, addr);
+}
+
+static void
+srom_command(u_int command, u_long addr)
+{
+ srom_latch(command, addr);
+ srom_latch(command, addr);
+ srom_latch((command & 0x0000ff00) | DT_CS, addr);
+}
+
+static void
+srom_address(u_int command, u_long addr, u_char offset)
+{
+ int i, a;
+
+ a = offset << 2;
+ for (i=0; i<6; i++, a <<= 1) {
+ srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr);
+ }
+ udelay(1);
+
+ i = (getfrom_srom(addr) >> 3) & 0x01;
+}
+
+static short
+srom_data(u_int command, u_long addr)
+{
+ int i;
+ short word = 0;
+ s32 tmp;
+
+ for (i=0; i<16; i++) {
+ sendto_srom(command | DT_CLK, addr);
+ tmp = getfrom_srom(addr);
+ sendto_srom(command, addr);
+
+ word = (word << 1) | ((tmp >> 3) & 0x01);
+ }
+
+ sendto_srom(command & 0x0000ff00, addr);
+
+ return word;
+}
+
+/*
+static void
+srom_busy(u_int command, u_long addr)
+{
+ sendto_srom((command & 0x0000ff00) | DT_CS, addr);
+
+ while (!((getfrom_srom(addr) >> 3) & 0x01)) {
+ mdelay(1);
+ }
+
+ sendto_srom(command & 0x0000ff00, addr);
+}
+*/
+
+static void
+sendto_srom(u_int command, u_long addr)
+{
+ outl(command, addr);
+ udelay(1);
+}
+
+static int
+getfrom_srom(u_long addr)
+{
+ s32 tmp;
+
+ tmp = inl(addr);
+ udelay(1);
+
+ return tmp;
+}
+
+static int
+srom_infoleaf_info(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int i, count;
+ u_char *p;
+
+ /* Find the infoleaf decoder function that matches this chipset */
+ for (i=0; i<INFOLEAF_SIZE; i++) {
+ if (lp->chipset == infoleaf_array[i].chipset) break;
+ }
+ if (i == INFOLEAF_SIZE) {
+ lp->useSROM = false;
+ printk("%s: Cannot find correct chipset for SROM decoding!\n",
+ dev->name);
+ return -ENXIO;
+ }
+
+ lp->infoleaf_fn = infoleaf_array[i].fn;
+
+ /* Find the information offset that this function should use */
+ count = *((u_char *)&lp->srom + 19);
+ p = (u_char *)&lp->srom + 26;
+
+ if (count > 1) {
+ for (i=count; i; --i, p+=3) {
+ if (lp->device == *p) break;
+ }
+ if (i == 0) {
+ lp->useSROM = false;
+ printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
+ dev->name, lp->device);
+ return -ENXIO;
+ }
+ }
+
+ lp->infoleaf_offset = get_unaligned_le16(p + 1);
+
+ return 0;
+}
+
+/*
+** This routine loads any type 1 or 3 MII info into the mii device
+** struct and executes any type 5 code to reset PHY devices for this
+** controller.
+** The info for the MII devices will be valid since the index used
+** will follow the discovery process from MII address 1-31 then 0.
+*/
+static void
+srom_init(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
+ u_char count;
+
+ p+=2;
+ if (lp->chipset == DC21140) {
+ lp->cache.gepc = (*p++ | GEP_CTRL);
+ gep_wr(lp->cache.gepc, dev);
+ }
+
+ /* Block count */
+ count = *p++;
+
+ /* Jump the infoblocks to find types */
+ for (;count; --count) {
+ if (*p < 128) {
+ p += COMPACT_LEN;
+ } else if (*(p+1) == 5) {
+ type5_infoblock(dev, 1, p);
+ p += ((*p & BLOCK_LEN) + 1);
+ } else if (*(p+1) == 4) {
+ p += ((*p & BLOCK_LEN) + 1);
+ } else if (*(p+1) == 3) {
+ type3_infoblock(dev, 1, p);
+ p += ((*p & BLOCK_LEN) + 1);
+ } else if (*(p+1) == 2) {
+ p += ((*p & BLOCK_LEN) + 1);
+ } else if (*(p+1) == 1) {
+ type1_infoblock(dev, 1, p);
+ p += ((*p & BLOCK_LEN) + 1);
+ } else {
+ p += ((*p & BLOCK_LEN) + 1);
+ }
+ }
+}
+
+/*
+** A generic routine that writes GEP control, data and reset information
+** to the GEP register (21140) or csr15 GEP portion (2114[23]).
+*/
+static void
+srom_exec(struct net_device *dev, u_char *p)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ u_char count = (p ? *p++ : 0);
+ u_short *w = (u_short *)p;
+
+ if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return;
+
+ if (lp->chipset != DC21140) RESET_SIA;
+
+ while (count--) {
+ gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
+ *p++ : get_unaligned_le16(w++)), dev);
+ mdelay(2); /* 2ms per action */
+ }
+
+ if (lp->chipset != DC21140) {
+ outl(lp->cache.csr14, DE4X5_STRR);
+ outl(lp->cache.csr13, DE4X5_SICR);
+ }
+}
+
+/*
+** Basically this function is a NOP since it will never be called,
+** unless I implement the DC21041 SROM functions. There's no need
+** since the existing code will be satisfactory for all boards.
+*/
+static int
+dc21041_infoleaf(struct net_device *dev)
+{
+ return DE4X5_AUTOSENSE_MS;
+}
+
+static int
+dc21140_infoleaf(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char count = 0;
+ u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ /* Read the connection type */
+ p+=2;
+
+ /* GEP control */
+ lp->cache.gepc = (*p++ | GEP_CTRL);
+
+ /* Block count */
+ count = *p++;
+
+ /* Recursively figure out the info blocks */
+ if (*p < 128) {
+ next_tick = dc_infoblock[COMPACT](dev, count, p);
+ } else {
+ next_tick = dc_infoblock[*(p+1)](dev, count, p);
+ }
+
+ if (lp->tcount == count) {
+ lp->media = NC;
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tcount = 0;
+ lp->tx_enable = false;
+ }
+
+ return next_tick & ~TIMER_CB;
+}
+
+static int
+dc21142_infoleaf(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char count = 0;
+ u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ /* Read the connection type */
+ p+=2;
+
+ /* Block count */
+ count = *p++;
+
+ /* Recursively figure out the info blocks */
+ if (*p < 128) {
+ next_tick = dc_infoblock[COMPACT](dev, count, p);
+ } else {
+ next_tick = dc_infoblock[*(p+1)](dev, count, p);
+ }
+
+ if (lp->tcount == count) {
+ lp->media = NC;
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tcount = 0;
+ lp->tx_enable = false;
+ }
+
+ return next_tick & ~TIMER_CB;
+}
+
+static int
+dc21143_infoleaf(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char count = 0;
+ u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ /* Read the connection type */
+ p+=2;
+
+ /* Block count */
+ count = *p++;
+
+ /* Recursively figure out the info blocks */
+ if (*p < 128) {
+ next_tick = dc_infoblock[COMPACT](dev, count, p);
+ } else {
+ next_tick = dc_infoblock[*(p+1)](dev, count, p);
+ }
+ if (lp->tcount == count) {
+ lp->media = NC;
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tcount = 0;
+ lp->tx_enable = false;
+ }
+
+ return next_tick & ~TIMER_CB;
+}
+
+/*
+** The compact infoblock is only designed for DC21140[A] chips, so
+** we'll reuse the dc21140m_autoconf function. Non MII media only.
+*/
+static int
+compact_infoblock(struct net_device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char flags, csr6;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+COMPACT_LEN) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+COMPACT_LEN);
+ } else {
+ return dc_infoblock[*(p+COMPACT_LEN+1)](dev, count, p+COMPACT_LEN);
+ }
+ }
+
+ if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = COMPACT;
+ lp->active = 0;
+ gep_wr(lp->cache.gepc, dev);
+ lp->infoblock_media = (*p++) & COMPACT_MC;
+ lp->cache.gep = *p++;
+ csr6 = *p++;
+ flags = *p++;
+
+ lp->asBitValid = (flags & 0x80) ? 0 : -1;
+ lp->defMedium = (flags & 0x40) ? -1 : 0;
+ lp->asBit = 1 << ((csr6 >> 1) & 0x07);
+ lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
+ lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
+ lp->useMII = false;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc21140m_autoconf(dev);
+}
+
+/*
+** This block describes non MII media for the DC21140[A] only.
+*/
+static int
+type0_infoblock(struct net_device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = 0;
+ lp->active = 0;
+ gep_wr(lp->cache.gepc, dev);
+ p+=2;
+ lp->infoblock_media = (*p++) & BLOCK0_MC;
+ lp->cache.gep = *p++;
+ csr6 = *p++;
+ flags = *p++;
+
+ lp->asBitValid = (flags & 0x80) ? 0 : -1;
+ lp->defMedium = (flags & 0x40) ? -1 : 0;
+ lp->asBit = 1 << ((csr6 >> 1) & 0x07);
+ lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
+ lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
+ lp->useMII = false;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc21140m_autoconf(dev);
+}
+
+/* These functions are under construction! */
+
+static int
+type1_infoblock(struct net_device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ p += 2;
+ if (lp->state == INITIALISED) {
+ lp->ibn = 1;
+ lp->active = *p++;
+ lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1);
+ lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1);
+ lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
+ lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
+ lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
+ lp->phy[lp->active].ttm = get_unaligned_le16(p);
+ return 0;
+ } else if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = 1;
+ lp->active = *p;
+ lp->infoblock_csr6 = OMR_MII_100;
+ lp->useMII = true;
+ lp->infoblock_media = ANS;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc21140m_autoconf(dev);
+}
+
+static int
+type2_infoblock(struct net_device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = 2;
+ lp->active = 0;
+ p += 2;
+ lp->infoblock_media = (*p) & MEDIA_CODE;
+
+ if ((*p++) & EXT_FIELD) {
+ lp->cache.csr13 = get_unaligned_le16(p); p += 2;
+ lp->cache.csr14 = get_unaligned_le16(p); p += 2;
+ lp->cache.csr15 = get_unaligned_le16(p); p += 2;
+ } else {
+ lp->cache.csr13 = CSR13;
+ lp->cache.csr14 = CSR14;
+ lp->cache.csr15 = CSR15;
+ }
+ lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
+ lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16);
+ lp->infoblock_csr6 = OMR_SIA;
+ lp->useMII = false;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc2114x_autoconf(dev);
+}
+
+static int
+type3_infoblock(struct net_device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ p += 2;
+ if (lp->state == INITIALISED) {
+ lp->ibn = 3;
+ lp->active = *p++;
+ if (MOTO_SROM_BUG) lp->active = 0;
+ lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
+ lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
+ lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
+ lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
+ lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
+ lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2;
+ lp->phy[lp->active].mci = *p;
+ return 0;
+ } else if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = 3;
+ lp->active = *p;
+ if (MOTO_SROM_BUG) lp->active = 0;
+ lp->infoblock_csr6 = OMR_MII_100;
+ lp->useMII = true;
+ lp->infoblock_media = ANS;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc2114x_autoconf(dev);
+}
+
+static int
+type4_infoblock(struct net_device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = 4;
+ lp->active = 0;
+ p+=2;
+ lp->infoblock_media = (*p++) & MEDIA_CODE;
+ lp->cache.csr13 = CSR13; /* Hard coded defaults */
+ lp->cache.csr14 = CSR14;
+ lp->cache.csr15 = CSR15;
+ lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
+ lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
+ csr6 = *p++;
+ flags = *p++;
+
+ lp->asBitValid = (flags & 0x80) ? 0 : -1;
+ lp->defMedium = (flags & 0x40) ? -1 : 0;
+ lp->asBit = 1 << ((csr6 >> 1) & 0x07);
+ lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
+ lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
+ lp->useMII = false;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc2114x_autoconf(dev);
+}
+
+/*
+** This block type provides information for resetting external devices
+** (chips) through the General Purpose Register.
+*/
+static int
+type5_infoblock(struct net_device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_char len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ /* Must be initializing to run this code */
+ if ((lp->state == INITIALISED) || (lp->media == INIT)) {
+ p+=2;
+ lp->rst = p;
+ srom_exec(dev, lp->rst);
+ }
+
+ return DE4X5_AUTOSENSE_MS;
+}
+
+/*
+** MII Read/Write
+*/
+
+static int
+mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
+{
+ mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */
+ mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */
+ mii_wdata(MII_STRD, 4, ioaddr); /* SFD and Read operation */
+ mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
+ mii_address(phyreg, ioaddr); /* PHY Register to read */
+ mii_ta(MII_STRD, ioaddr); /* Turn around time - 2 MDC */
+
+ return mii_rdata(ioaddr); /* Read data */
+}
+
+static void
+mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
+{
+ mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */
+ mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */
+ mii_wdata(MII_STWR, 4, ioaddr); /* SFD and Write operation */
+ mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
+ mii_address(phyreg, ioaddr); /* PHY Register to write */
+ mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */
+ data = mii_swap(data, 16); /* Swap data bit ordering */
+ mii_wdata(data, 16, ioaddr); /* Write data */
+}
+
+static int
+mii_rdata(u_long ioaddr)
+{
+ int i;
+ s32 tmp = 0;
+
+ for (i=0; i<16; i++) {
+ tmp <<= 1;
+ tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
+ }
+
+ return tmp;
+}
+
+static void
+mii_wdata(int data, int len, u_long ioaddr)
+{
+ int i;
+
+ for (i=0; i<len; i++) {
+ sendto_mii(MII_MWR | MII_WR, data, ioaddr);
+ data >>= 1;
+ }
+}
+
+static void
+mii_address(u_char addr, u_long ioaddr)
+{
+ int i;
+
+ addr = mii_swap(addr, 5);
+ for (i=0; i<5; i++) {
+ sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
+ addr >>= 1;
+ }
+}
+
+static void
+mii_ta(u_long rw, u_long ioaddr)
+{
+ if (rw == MII_STWR) {
+ sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
+ sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
+ } else {
+ getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */
+ }
+}
+
+static int
+mii_swap(int data, int len)
+{
+ int i, tmp = 0;
+
+ for (i=0; i<len; i++) {
+ tmp <<= 1;
+ tmp |= (data & 1);
+ data >>= 1;
+ }
+
+ return tmp;
+}
+
+static void
+sendto_mii(u32 command, int data, u_long ioaddr)
+{
+ u32 j;
+
+ j = (data & 1) << 17;
+ outl(command | j, ioaddr);
+ udelay(1);
+ outl(command | MII_MDC | j, ioaddr);
+ udelay(1);
+}
+
+static int
+getfrom_mii(u32 command, u_long ioaddr)
+{
+ outl(command, ioaddr);
+ udelay(1);
+ outl(command | MII_MDC, ioaddr);
+ udelay(1);
+
+ return (inl(ioaddr) >> 19) & 1;
+}
+
+/*
+** Here's 3 ways to calculate the OUI from the ID registers.
+*/
+static int
+mii_get_oui(u_char phyaddr, u_long ioaddr)
+{
+/*
+ union {
+ u_short reg;
+ u_char breg[2];
+ } a;
+ int i, r2, r3, ret=0;*/
+ int r2, r3;
+
+ /* Read r2 and r3 */
+ r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
+ r3 = mii_rd(MII_ID1, phyaddr, ioaddr);
+ /* SEEQ and Cypress way * /
+ / * Shuffle r2 and r3 * /
+ a.reg=0;
+ r3 = ((r3>>10)|(r2<<6))&0x0ff;
+ r2 = ((r2>>2)&0x3fff);
+
+ / * Bit reverse r3 * /
+ for (i=0;i<8;i++) {
+ ret<<=1;
+ ret |= (r3&1);
+ r3>>=1;
+ }
+
+ / * Bit reverse r2 * /
+ for (i=0;i<16;i++) {
+ a.reg<<=1;
+ a.reg |= (r2&1);
+ r2>>=1;
+ }
+
+ / * Swap r2 bytes * /
+ i=a.breg[0];
+ a.breg[0]=a.breg[1];
+ a.breg[1]=i;
+
+ return (a.reg<<8)|ret; */ /* SEEQ and Cypress way */
+/* return (r2<<6)|(u_int)(r3>>10); */ /* NATIONAL and BROADCOM way */
+ return r2; /* (I did it) My way */
+}
+
+/*
+** The SROM spec forces us to search addresses [1-31 0]. Bummer.
+*/
+static int
+mii_get_phy(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ int i, j, k, n, limit=ARRAY_SIZE(phy_info);
+ int id;
+
+ lp->active = 0;
+ lp->useMII = true;
+
+ /* Search the MII address space for possible PHY devices */
+ for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) {
+ lp->phy[lp->active].addr = i;
+ if (i==0) n++; /* Count cycles */
+ while (de4x5_reset_phy(dev)<0) udelay(100);/* Wait for reset */
+ id = mii_get_oui(i, DE4X5_MII);
+ if ((id == 0) || (id == 65535)) continue; /* Valid ID? */
+ for (j=0; j<limit; j++) { /* Search PHY table */
+ if (id != phy_info[j].id) continue; /* ID match? */
+ for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
+ if (k < DE4X5_MAX_PHY) {
+ memcpy((char *)&lp->phy[k],
+ (char *)&phy_info[j], sizeof(struct phy_table));
+ lp->phy[k].addr = i;
+ lp->mii_cnt++;
+ lp->active++;
+ } else {
+ goto purgatory; /* Stop the search */
+ }
+ break;
+ }
+ if ((j == limit) && (i < DE4X5_MAX_MII)) {
+ for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
+ lp->phy[k].addr = i;
+ lp->phy[k].id = id;
+ lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
+ lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
+ lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
+ lp->mii_cnt++;
+ lp->active++;
+ printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
+ j = de4x5_debug;
+ de4x5_debug |= DEBUG_MII;
+ de4x5_dbg_mii(dev, k);
+ de4x5_debug = j;
+ printk("\n");
+ }
+ }
+ purgatory:
+ lp->active = 0;
+ if (lp->phy[0].id) { /* Reset the PHY devices */
+ for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) { /*For each PHY*/
+ mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
+ while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
+
+ de4x5_dbg_mii(dev, k);
+ }
+ }
+ if (!lp->mii_cnt) lp->useMII = false;
+
+ return lp->mii_cnt;
+}
+
+static char *
+build_setup_frame(struct net_device *dev, int mode)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int i;
+ char *pa = lp->setup_frame;
+
+ /* Initialise the setup frame */
+ if (mode == ALL) {
+ memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
+ }
+
+ if (lp->setup_f == HASH_PERF) {
+ for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
+ *(pa + i) = dev->dev_addr[i]; /* Host address */
+ if (i & 0x01) pa += 2;
+ }
+ *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80;
+ } else {
+ for (i=0; i<ETH_ALEN; i++) { /* Host address */
+ *(pa + (i&1)) = dev->dev_addr[i];
+ if (i & 0x01) pa += 4;
+ }
+ for (i=0; i<ETH_ALEN; i++) { /* Broadcast address */
+ *(pa + (i&1)) = (char) 0xff;
+ if (i & 0x01) pa += 4;
+ }
+ }
+
+ return pa; /* Points to the next entry */
+}
+
+static void
+disable_ast(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ del_timer_sync(&lp->timer);
+}
+
+static long
+de4x5_switch_mac_port(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+ s32 omr;
+
+ STOP_DE4X5;
+
+ /* Assert the OMR_PS bit in CSR6 */
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR |
+ OMR_FDX));
+ omr |= lp->infoblock_csr6;
+ if (omr & OMR_PS) omr |= OMR_HBD;
+ outl(omr, DE4X5_OMR);
+
+ /* Soft Reset */
+ RESET_DE4X5;
+
+ /* Restore the GEP - especially for COMPACT and Type 0 Infoblocks */
+ if (lp->chipset == DC21140) {
+ gep_wr(lp->cache.gepc, dev);
+ gep_wr(lp->cache.gep, dev);
+ } else if ((lp->chipset & ~0x0ff) == DC2114x) {
+ reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, lp->cache.csr15);
+ }
+
+ /* Restore CSR6 */
+ outl(omr, DE4X5_OMR);
+
+ /* Reset CSR8 */
+ inl(DE4X5_MFC);
+
+ return omr;
+}
+
+static void
+gep_wr(s32 data, struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ if (lp->chipset == DC21140) {
+ outl(data, DE4X5_GEP);
+ } else if ((lp->chipset & ~0x00ff) == DC2114x) {
+ outl((data<<16) | lp->cache.csr15, DE4X5_SIGR);
+ }
+}
+
+static int
+gep_rd(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ if (lp->chipset == DC21140) {
+ return inl(DE4X5_GEP);
+ } else if ((lp->chipset & ~0x00ff) == DC2114x) {
+ return inl(DE4X5_SIGR) & 0x000fffff;
+ }
+
+ return 0;
+}
+
+static void
+yawn(struct net_device *dev, int state)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ if ((lp->chipset == DC21040) || (lp->chipset == DC21140)) return;
+
+ if(lp->bus == EISA) {
+ switch(state) {
+ case WAKEUP:
+ outb(WAKEUP, PCI_CFPM);
+ mdelay(10);
+ break;
+
+ case SNOOZE:
+ outb(SNOOZE, PCI_CFPM);
+ break;
+
+ case SLEEP:
+ outl(0, DE4X5_SICR);
+ outb(SLEEP, PCI_CFPM);
+ break;
+ }
+ } else {
+ struct pci_dev *pdev = to_pci_dev (lp->gendev);
+ switch(state) {
+ case WAKEUP:
+ pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
+ mdelay(10);
+ break;
+
+ case SNOOZE:
+ pci_write_config_byte(pdev, PCI_CFDA_PSM, SNOOZE);
+ break;
+
+ case SLEEP:
+ outl(0, DE4X5_SICR);
+ pci_write_config_byte(pdev, PCI_CFDA_PSM, SLEEP);
+ break;
+ }
+ }
+}
+
+static void
+de4x5_parse_params(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ char *p, *q, t;
+
+ lp->params.fdx = 0;
+ lp->params.autosense = AUTO;
+
+ if (args == NULL) return;
+
+ if ((p = strstr(args, dev->name))) {
+ if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p);
+ t = *q;
+ *q = '\0';
+
+ if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = 1;
+
+ if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
+ if (strstr(p, "TP")) {
+ lp->params.autosense = TP;
+ } else if (strstr(p, "TP_NW")) {
+ lp->params.autosense = TP_NW;
+ } else if (strstr(p, "BNC")) {
+ lp->params.autosense = BNC;
+ } else if (strstr(p, "AUI")) {
+ lp->params.autosense = AUI;
+ } else if (strstr(p, "BNC_AUI")) {
+ lp->params.autosense = BNC;
+ } else if (strstr(p, "10Mb")) {
+ lp->params.autosense = _10Mb;
+ } else if (strstr(p, "100Mb")) {
+ lp->params.autosense = _100Mb;
+ } else if (strstr(p, "AUTO")) {
+ lp->params.autosense = AUTO;
+ }
+ }
+ *q = t;
+ }
+}
+
+static void
+de4x5_dbg_open(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ int i;
+
+ if (de4x5_debug & DEBUG_OPEN) {
+ printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
+ printk("\tphysical address: ");
+ for (i=0;i<6;i++) {
+ printk("%2.2x:",(short)dev->dev_addr[i]);
+ }
+ printk("\n");
+ printk("Descriptor head addresses:\n");
+ printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
+ printk("Descriptor addresses:\nRX: ");
+ for (i=0;i<lp->rxRingSize-1;i++){
+ if (i < 3) {
+ printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
+ }
+ }
+ printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
+ printk("TX: ");
+ for (i=0;i<lp->txRingSize-1;i++){
+ if (i < 3) {
+ printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
+ }
+ }
+ printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
+ printk("Descriptor buffers:\nRX: ");
+ for (i=0;i<lp->rxRingSize-1;i++){
+ if (i < 3) {
+ printk("0x%8.8x ",le32_to_cpu(lp->rx_ring[i].buf));
+ }
+ }
+ printk("...0x%8.8x\n",le32_to_cpu(lp->rx_ring[i].buf));
+ printk("TX: ");
+ for (i=0;i<lp->txRingSize-1;i++){
+ if (i < 3) {
+ printk("0x%8.8x ", le32_to_cpu(lp->tx_ring[i].buf));
+ }
+ }
+ printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
+ printk("Ring size:\nRX: %d\nTX: %d\n",
+ (short)lp->rxRingSize,
+ (short)lp->txRingSize);
+ }
+}
+
+static void
+de4x5_dbg_mii(struct net_device *dev, int k)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ u_long iobase = dev->base_addr;
+
+ if (de4x5_debug & DEBUG_MII) {
+ printk("\nMII device address: %d\n", lp->phy[k].addr);
+ printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
+ printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII));
+ printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII));
+ printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII));
+ if (lp->phy[k].id != BROADCOM_T4) {
+ printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII));
+ printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII));
+ }
+ printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII));
+ if (lp->phy[k].id != BROADCOM_T4) {
+ printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII));
+ printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII));
+ } else {
+ printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
+ }
+ }
+}
+
+static void
+de4x5_dbg_media(struct net_device *dev)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+
+ if (lp->media != lp->c_media) {
+ if (de4x5_debug & DEBUG_MEDIA) {
+ printk("%s: media is %s%s\n", dev->name,
+ (lp->media == NC ? "unconnected, link down or incompatible connection" :
+ (lp->media == TP ? "TP" :
+ (lp->media == ANS ? "TP/Nway" :
+ (lp->media == BNC ? "BNC" :
+ (lp->media == AUI ? "AUI" :
+ (lp->media == BNC_AUI ? "BNC/AUI" :
+ (lp->media == EXT_SIA ? "EXT SIA" :
+ (lp->media == _100Mb ? "100Mb/s" :
+ (lp->media == _10Mb ? "10Mb/s" :
+ "???"
+ ))))))))), (lp->fdx?" full duplex.":"."));
+ }
+ lp->c_media = lp->media;
+ }
+}
+
+static void
+de4x5_dbg_srom(struct de4x5_srom *p)
+{
+ int i;
+
+ if (de4x5_debug & DEBUG_SROM) {
+ printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id));
+ printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id));
+ printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc));
+ printk("SROM version: %02x\n", (u_char)(p->version));
+ printk("# controllers: %02x\n", (u_char)(p->num_controllers));
+
+ printk("Hardware Address: %pM\n", p->ieee_addr);
+ printk("CRC checksum: %04x\n", (u_short)(p->chksum));
+ for (i=0; i<64; i++) {
+ printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
+ }
+ }
+}
+
+static void
+de4x5_dbg_rx(struct sk_buff *skb, int len)
+{
+ int i, j;
+
+ if (de4x5_debug & DEBUG_RX) {
+ printk("R: %pM <- %pM len/SAP:%02x%02x [%d]\n",
+ skb->data, &skb->data[6],
+ (u_char)skb->data[12],
+ (u_char)skb->data[13],
+ len);
+ for (j=0; len>0;j+=16, len-=16) {
+ printk(" %03x: ",j);
+ for (i=0; i<16 && i<len; i++) {
+ printk("%02x ",(u_char)skb->data[i+j]);
+ }
+ printk("\n");
+ }
+ }
+}
+
+/*
+** Perform IOCTL call functions here. Some are privileged operations and the
+** effective uid is checked in those cases. In the normal course of events
+** this function is only used for my testing.
+*/
+static int
+de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct de4x5_private *lp = netdev_priv(dev);
+ struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru;
+ u_long iobase = dev->base_addr;
+ int i, j, status = 0;
+ s32 omr;
+ union {
+ u8 addr[144];
+ u16 sval[72];
+ u32 lval[36];
+ } tmp;
+ u_long flags = 0;
+
+ switch(ioc->cmd) {
+ case DE4X5_GET_HWADDR: /* Get the hardware address */
+ ioc->len = ETH_ALEN;
+ for (i=0; i<ETH_ALEN; i++) {
+ tmp.addr[i] = dev->dev_addr[i];
+ }
+ if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
+ break;
+
+ case DE4X5_SET_HWADDR: /* Set the hardware address */
+ if (!capable(CAP_NET_ADMIN)) return -EPERM;
+ if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) return -EFAULT;
+ if (netif_queue_stopped(dev))
+ return -EBUSY;
+ netif_stop_queue(dev);
+ for (i=0; i<ETH_ALEN; i++) {
+ dev->dev_addr[i] = tmp.addr[i];
+ }
+ build_setup_frame(dev, PHYS_ADDR_ONLY);
+ /* Set up the descriptor and give ownership to the card */
+ load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
+ SETUP_FRAME_LEN, (struct sk_buff *)1);
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
+ outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
+ netif_wake_queue(dev); /* Unlock the TX ring */
+ break;
+
+ case DE4X5_SAY_BOO: /* Say "Boo!" to the kernel log file */
+ if (!capable(CAP_NET_ADMIN)) return -EPERM;
+ printk("%s: Boo!\n", dev->name);
+ break;
+
+ case DE4X5_MCA_EN: /* Enable pass all multicast addressing */
+ if (!capable(CAP_NET_ADMIN)) return -EPERM;
+ omr = inl(DE4X5_OMR);
+ omr |= OMR_PM;
+ outl(omr, DE4X5_OMR);
+ break;
+
+ case DE4X5_GET_STATS: /* Get the driver statistics */
+ {
+ struct pkt_stats statbuf;
+ ioc->len = sizeof(statbuf);
+ spin_lock_irqsave(&lp->lock, flags);
+ memcpy(&statbuf, &lp->pktStats, ioc->len);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ if (copy_to_user(ioc->data, &statbuf, ioc->len))
+ return -EFAULT;
+ break;
+ }
+ case DE4X5_CLR_STATS: /* Zero out the driver statistics */
+ if (!capable(CAP_NET_ADMIN)) return -EPERM;
+ spin_lock_irqsave(&lp->lock, flags);
+ memset(&lp->pktStats, 0, sizeof(lp->pktStats));
+ spin_unlock_irqrestore(&lp->lock, flags);
+ break;
+
+ case DE4X5_GET_OMR: /* Get the OMR Register contents */
+ tmp.addr[0] = inl(DE4X5_OMR);
+ if (copy_to_user(ioc->data, tmp.addr, 1)) return -EFAULT;
+ break;
+
+ case DE4X5_SET_OMR: /* Set the OMR Register contents */
+ if (!capable(CAP_NET_ADMIN)) return -EPERM;
+ if (copy_from_user(tmp.addr, ioc->data, 1)) return -EFAULT;
+ outl(tmp.addr[0], DE4X5_OMR);
+ break;
+
+ case DE4X5_GET_REG: /* Get the DE4X5 Registers */
+ j = 0;
+ tmp.lval[0] = inl(DE4X5_STS); j+=4;
+ tmp.lval[1] = inl(DE4X5_BMR); j+=4;
+ tmp.lval[2] = inl(DE4X5_IMR); j+=4;
+ tmp.lval[3] = inl(DE4X5_OMR); j+=4;
+ tmp.lval[4] = inl(DE4X5_SISR); j+=4;
+ tmp.lval[5] = inl(DE4X5_SICR); j+=4;
+ tmp.lval[6] = inl(DE4X5_STRR); j+=4;
+ tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
+ ioc->len = j;
+ if (copy_to_user(ioc->data, tmp.lval, ioc->len))
+ return -EFAULT;
+ break;
+
+#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */
+/*
+ case DE4X5_DUMP:
+ j = 0;
+ tmp.addr[j++] = dev->irq;
+ for (i=0; i<ETH_ALEN; i++) {
+ tmp.addr[j++] = dev->dev_addr[i];
+ }
+ tmp.addr[j++] = lp->rxRingSize;
+ tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
+ tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
+
+ for (i=0;i<lp->rxRingSize-1;i++){
+ if (i < 3) {
+ tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
+ }
+ }
+ tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
+ for (i=0;i<lp->txRingSize-1;i++){
+ if (i < 3) {
+ tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
+ }
+ }
+ tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
+
+ for (i=0;i<lp->rxRingSize-1;i++){
+ if (i < 3) {
+ tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
+ }
+ }
+ tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
+ for (i=0;i<lp->txRingSize-1;i++){
+ if (i < 3) {
+ tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
+ }
+ }
+ tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
+
+ for (i=0;i<lp->rxRingSize;i++){
+ tmp.lval[j>>2] = le32_to_cpu(lp->rx_ring[i].status); j+=4;
+ }
+ for (i=0;i<lp->txRingSize;i++){
+ tmp.lval[j>>2] = le32_to_cpu(lp->tx_ring[i].status); j+=4;
+ }
+
+ tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_RRBA); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_TRBA); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
+ tmp.lval[j>>2] = lp->chipset; j+=4;
+ if (lp->chipset == DC21140) {
+ tmp.lval[j>>2] = gep_rd(dev); j+=4;
+ } else {
+ tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
+ }
+ tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4;
+ if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
+ tmp.lval[j>>2] = lp->active; j+=4;
+ tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ tmp.lval[j>>2]=mii_rd(MII_ID1,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ if (lp->phy[lp->active].id != BROADCOM_T4) {
+ tmp.lval[j>>2]=mii_rd(MII_ANA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ tmp.lval[j>>2]=mii_rd(MII_ANLPA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ }
+ tmp.lval[j>>2]=mii_rd(0x10,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ if (lp->phy[lp->active].id != BROADCOM_T4) {
+ tmp.lval[j>>2]=mii_rd(0x11,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ tmp.lval[j>>2]=mii_rd(0x12,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ } else {
+ tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ }
+ }
+
+ tmp.addr[j++] = lp->txRingSize;
+ tmp.addr[j++] = netif_queue_stopped(dev);
+
+ ioc->len = j;
+ if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
+ break;
+
+*/
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return status;
+}
+
+static int __init de4x5_module_init (void)
+{
+ int err = 0;
+
+#ifdef CONFIG_PCI
+ err = pci_register_driver(&de4x5_pci_driver);
+#endif
+#ifdef CONFIG_EISA
+ err |= eisa_driver_register (&de4x5_eisa_driver);
+#endif
+
+ return err;
+}
+
+static void __exit de4x5_module_exit (void)
+{
+#ifdef CONFIG_PCI
+ pci_unregister_driver (&de4x5_pci_driver);
+#endif
+#ifdef CONFIG_EISA
+ eisa_driver_unregister (&de4x5_eisa_driver);
+#endif
+}
+
+module_init (de4x5_module_init);
+module_exit (de4x5_module_exit);
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.h b/drivers/net/ethernet/dec/tulip/de4x5.h
new file mode 100644
index 000000000000..ec756eba397b
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/de4x5.h
@@ -0,0 +1,1017 @@
+/*
+ Copyright 1994 Digital Equipment Corporation.
+
+ This software may be used and distributed according to the terms of the
+ GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as davies@wanton.lkg.dec.com or Digital
+ Equipment Corporation, 550 King Street, Littleton MA 01460.
+
+ =========================================================================
+*/
+
+/*
+** DC21040 CSR<1..15> Register Address Map
+*/
+#define DE4X5_BMR iobase+(0x000 << lp->bus) /* Bus Mode Register */
+#define DE4X5_TPD iobase+(0x008 << lp->bus) /* Transmit Poll Demand Reg */
+#define DE4X5_RPD iobase+(0x010 << lp->bus) /* Receive Poll Demand Reg */
+#define DE4X5_RRBA iobase+(0x018 << lp->bus) /* RX Ring Base Address Reg */
+#define DE4X5_TRBA iobase+(0x020 << lp->bus) /* TX Ring Base Address Reg */
+#define DE4X5_STS iobase+(0x028 << lp->bus) /* Status Register */
+#define DE4X5_OMR iobase+(0x030 << lp->bus) /* Operation Mode Register */
+#define DE4X5_IMR iobase+(0x038 << lp->bus) /* Interrupt Mask Register */
+#define DE4X5_MFC iobase+(0x040 << lp->bus) /* Missed Frame Counter */
+#define DE4X5_APROM iobase+(0x048 << lp->bus) /* Ethernet Address PROM */
+#define DE4X5_BROM iobase+(0x048 << lp->bus) /* Boot ROM Register */
+#define DE4X5_SROM iobase+(0x048 << lp->bus) /* Serial ROM Register */
+#define DE4X5_MII iobase+(0x048 << lp->bus) /* MII Interface Register */
+#define DE4X5_DDR iobase+(0x050 << lp->bus) /* Data Diagnostic Register */
+#define DE4X5_FDR iobase+(0x058 << lp->bus) /* Full Duplex Register */
+#define DE4X5_GPT iobase+(0x058 << lp->bus) /* General Purpose Timer Reg.*/
+#define DE4X5_GEP iobase+(0x060 << lp->bus) /* General Purpose Register */
+#define DE4X5_SISR iobase+(0x060 << lp->bus) /* SIA Status Register */
+#define DE4X5_SICR iobase+(0x068 << lp->bus) /* SIA Connectivity Register */
+#define DE4X5_STRR iobase+(0x070 << lp->bus) /* SIA TX/RX Register */
+#define DE4X5_SIGR iobase+(0x078 << lp->bus) /* SIA General Register */
+
+/*
+** EISA Register Address Map
+*/
+#define EISA_ID iobase+0x0c80 /* EISA ID Registers */
+#define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */
+#define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */
+#define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */
+#define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */
+#define EISA_CR iobase+0x0c84 /* EISA Control Register */
+#define EISA_REG0 iobase+0x0c88 /* EISA Configuration Register 0 */
+#define EISA_REG1 iobase+0x0c89 /* EISA Configuration Register 1 */
+#define EISA_REG2 iobase+0x0c8a /* EISA Configuration Register 2 */
+#define EISA_REG3 iobase+0x0c8f /* EISA Configuration Register 3 */
+#define EISA_APROM iobase+0x0c90 /* Ethernet Address PROM */
+
+/*
+** PCI/EISA Configuration Registers Address Map
+*/
+#define PCI_CFID iobase+0x0008 /* PCI Configuration ID Register */
+#define PCI_CFCS iobase+0x000c /* PCI Command/Status Register */
+#define PCI_CFRV iobase+0x0018 /* PCI Revision Register */
+#define PCI_CFLT iobase+0x001c /* PCI Latency Timer Register */
+#define PCI_CBIO iobase+0x0028 /* PCI Base I/O Register */
+#define PCI_CBMA iobase+0x002c /* PCI Base Memory Address Register */
+#define PCI_CBER iobase+0x0030 /* PCI Expansion ROM Base Address Reg. */
+#define PCI_CFIT iobase+0x003c /* PCI Configuration Interrupt Register */
+#define PCI_CFDA iobase+0x0040 /* PCI Driver Area Register */
+#define PCI_CFDD iobase+0x0041 /* PCI Driver Dependent Area Register */
+#define PCI_CFPM iobase+0x0043 /* PCI Power Management Area Register */
+
+/*
+** EISA Configuration Register 0 bit definitions
+*/
+#define ER0_BSW 0x80 /* EISA Bus Slave Width, 1: 32 bits */
+#define ER0_BMW 0x40 /* EISA Bus Master Width, 1: 32 bits */
+#define ER0_EPT 0x20 /* EISA PREEMPT Time, 0: 23 BCLKs */
+#define ER0_ISTS 0x10 /* Interrupt Status (X) */
+#define ER0_LI 0x08 /* Latch Interrupts */
+#define ER0_INTL 0x06 /* INTerrupt Level */
+#define ER0_INTT 0x01 /* INTerrupt Type, 0: Level, 1: Edge */
+
+/*
+** EISA Configuration Register 1 bit definitions
+*/
+#define ER1_IAM 0xe0 /* ISA Address Mode */
+#define ER1_IAE 0x10 /* ISA Addressing Enable */
+#define ER1_UPIN 0x0f /* User Pins */
+
+/*
+** EISA Configuration Register 2 bit definitions
+*/
+#define ER2_BRS 0xc0 /* Boot ROM Size */
+#define ER2_BRA 0x3c /* Boot ROM Address <16:13> */
+
+/*
+** EISA Configuration Register 3 bit definitions
+*/
+#define ER3_BWE 0x40 /* Burst Write Enable */
+#define ER3_BRE 0x04 /* Burst Read Enable */
+#define ER3_LSR 0x02 /* Local Software Reset */
+
+/*
+** PCI Configuration ID Register (PCI_CFID). The Device IDs are left
+** shifted 8 bits to allow detection of DC21142 and DC21143 variants with
+** the configuration revision register step number.
+*/
+#define CFID_DID 0xff00 /* Device ID */
+#define CFID_VID 0x00ff /* Vendor ID */
+#define DC21040_DID 0x0200 /* Unique Device ID # */
+#define DC21040_VID 0x1011 /* DC21040 Manufacturer */
+#define DC21041_DID 0x1400 /* Unique Device ID # */
+#define DC21041_VID 0x1011 /* DC21041 Manufacturer */
+#define DC21140_DID 0x0900 /* Unique Device ID # */
+#define DC21140_VID 0x1011 /* DC21140 Manufacturer */
+#define DC2114x_DID 0x1900 /* Unique Device ID # */
+#define DC2114x_VID 0x1011 /* DC2114[23] Manufacturer */
+
+/*
+** Chipset defines
+*/
+#define DC21040 DC21040_DID
+#define DC21041 DC21041_DID
+#define DC21140 DC21140_DID
+#define DC2114x DC2114x_DID
+#define DC21142 (DC2114x_DID | 0x0010)
+#define DC21143 (DC2114x_DID | 0x0030)
+#define DC2114x_BRK 0x0020 /* CFRV break between DC21142 & DC21143 */
+
+#define is_DC21040 ((vendor == DC21040_VID) && (device == DC21040_DID))
+#define is_DC21041 ((vendor == DC21041_VID) && (device == DC21041_DID))
+#define is_DC21140 ((vendor == DC21140_VID) && (device == DC21140_DID))
+#define is_DC2114x ((vendor == DC2114x_VID) && (device == DC2114x_DID))
+#define is_DC21142 ((vendor == DC2114x_VID) && (device == DC21142))
+#define is_DC21143 ((vendor == DC2114x_VID) && (device == DC21143))
+
+/*
+** PCI Configuration Command/Status Register (PCI_CFCS)
+*/
+#define CFCS_DPE 0x80000000 /* Detected Parity Error (S) */
+#define CFCS_SSE 0x40000000 /* Signal System Error (S) */
+#define CFCS_RMA 0x20000000 /* Receive Master Abort (S) */
+#define CFCS_RTA 0x10000000 /* Receive Target Abort (S) */
+#define CFCS_DST 0x06000000 /* DEVSEL Timing (S) */
+#define CFCS_DPR 0x01000000 /* Data Parity Report (S) */
+#define CFCS_FBB 0x00800000 /* Fast Back-To-Back (S) */
+#define CFCS_SEE 0x00000100 /* System Error Enable (C) */
+#define CFCS_PER 0x00000040 /* Parity Error Response (C) */
+#define CFCS_MO 0x00000004 /* Master Operation (C) */
+#define CFCS_MSA 0x00000002 /* Memory Space Access (C) */
+#define CFCS_IOSA 0x00000001 /* I/O Space Access (C) */
+
+/*
+** PCI Configuration Revision Register (PCI_CFRV)
+*/
+#define CFRV_BC 0xff000000 /* Base Class */
+#define CFRV_SC 0x00ff0000 /* Subclass */
+#define CFRV_RN 0x000000f0 /* Revision Number */
+#define CFRV_SN 0x0000000f /* Step Number */
+#define BASE_CLASS 0x02000000 /* Indicates Network Controller */
+#define SUB_CLASS 0x00000000 /* Indicates Ethernet Controller */
+#define STEP_NUMBER 0x00000020 /* Increments for future chips */
+#define REV_NUMBER 0x00000003 /* 0x00, 0x01, 0x02, 0x03: Rev in Step */
+#define CFRV_MASK 0xffff0000 /* Register mask */
+
+/*
+** PCI Configuration Latency Timer Register (PCI_CFLT)
+*/
+#define CFLT_BC 0x0000ff00 /* Latency Timer bits */
+
+/*
+** PCI Configuration Base I/O Address Register (PCI_CBIO)
+*/
+#define CBIO_MASK -128 /* Base I/O Address Mask */
+#define CBIO_IOSI 0x00000001 /* I/O Space Indicator (RO, value is 1) */
+
+/*
+** PCI Configuration Card Information Structure Register (PCI_CCIS)
+*/
+#define CCIS_ROMI 0xf0000000 /* ROM Image */
+#define CCIS_ASO 0x0ffffff8 /* Address Space Offset */
+#define CCIS_ASI 0x00000007 /* Address Space Indicator */
+
+/*
+** PCI Configuration Subsystem ID Register (PCI_SSID)
+*/
+#define SSID_SSID 0xffff0000 /* Subsystem ID */
+#define SSID_SVID 0x0000ffff /* Subsystem Vendor ID */
+
+/*
+** PCI Configuration Expansion ROM Base Address Register (PCI_CBER)
+*/
+#define CBER_MASK 0xfffffc00 /* Expansion ROM Base Address Mask */
+#define CBER_ROME 0x00000001 /* ROM Enable */
+
+/*
+** PCI Configuration Interrupt Register (PCI_CFIT)
+*/
+#define CFIT_MXLT 0xff000000 /* MAX_LAT Value (0.25us periods) */
+#define CFIT_MNGT 0x00ff0000 /* MIN_GNT Value (0.25us periods) */
+#define CFIT_IRQP 0x0000ff00 /* Interrupt Pin */
+#define CFIT_IRQL 0x000000ff /* Interrupt Line */
+
+/*
+** PCI Configuration Power Management Area Register (PCI_CFPM)
+*/
+#define SLEEP 0x80 /* Power Saving Sleep Mode */
+#define SNOOZE 0x40 /* Power Saving Snooze Mode */
+#define WAKEUP 0x00 /* Power Saving Wakeup */
+
+#define PCI_CFDA_DSU 0x41 /* 8 bit Configuration Space Address */
+#define PCI_CFDA_PSM 0x43 /* 8 bit Configuration Space Address */
+
+/*
+** DC21040 Bus Mode Register (DE4X5_BMR)
+*/
+#define BMR_RML 0x00200000 /* [Memory] Read Multiple */
+#define BMR_DBO 0x00100000 /* Descriptor Byte Ordering (Endian) */
+#define BMR_TAP 0x000e0000 /* Transmit Automatic Polling */
+#define BMR_DAS 0x00010000 /* Diagnostic Address Space */
+#define BMR_CAL 0x0000c000 /* Cache Alignment */
+#define BMR_PBL 0x00003f00 /* Programmable Burst Length */
+#define BMR_BLE 0x00000080 /* Big/Little Endian */
+#define BMR_DSL 0x0000007c /* Descriptor Skip Length */
+#define BMR_BAR 0x00000002 /* Bus ARbitration */
+#define BMR_SWR 0x00000001 /* Software Reset */
+
+ /* Timings here are for 10BASE-T/AUI only*/
+#define TAP_NOPOLL 0x00000000 /* No automatic polling */
+#define TAP_200US 0x00020000 /* TX automatic polling every 200us */
+#define TAP_800US 0x00040000 /* TX automatic polling every 800us */
+#define TAP_1_6MS 0x00060000 /* TX automatic polling every 1.6ms */
+#define TAP_12_8US 0x00080000 /* TX automatic polling every 12.8us */
+#define TAP_25_6US 0x000a0000 /* TX automatic polling every 25.6us */
+#define TAP_51_2US 0x000c0000 /* TX automatic polling every 51.2us */
+#define TAP_102_4US 0x000e0000 /* TX automatic polling every 102.4us */
+
+#define CAL_NOUSE 0x00000000 /* Not used */
+#define CAL_8LONG 0x00004000 /* 8-longword alignment */
+#define CAL_16LONG 0x00008000 /* 16-longword alignment */
+#define CAL_32LONG 0x0000c000 /* 32-longword alignment */
+
+#define PBL_0 0x00000000 /* DMA burst length = amount in RX FIFO */
+#define PBL_1 0x00000100 /* 1 longword DMA burst length */
+#define PBL_2 0x00000200 /* 2 longwords DMA burst length */
+#define PBL_4 0x00000400 /* 4 longwords DMA burst length */
+#define PBL_8 0x00000800 /* 8 longwords DMA burst length */
+#define PBL_16 0x00001000 /* 16 longwords DMA burst length */
+#define PBL_32 0x00002000 /* 32 longwords DMA burst length */
+
+#define DSL_0 0x00000000 /* 0 longword / descriptor */
+#define DSL_1 0x00000004 /* 1 longword / descriptor */
+#define DSL_2 0x00000008 /* 2 longwords / descriptor */
+#define DSL_4 0x00000010 /* 4 longwords / descriptor */
+#define DSL_8 0x00000020 /* 8 longwords / descriptor */
+#define DSL_16 0x00000040 /* 16 longwords / descriptor */
+#define DSL_32 0x00000080 /* 32 longwords / descriptor */
+
+/*
+** DC21040 Transmit Poll Demand Register (DE4X5_TPD)
+*/
+#define TPD 0x00000001 /* Transmit Poll Demand */
+
+/*
+** DC21040 Receive Poll Demand Register (DE4X5_RPD)
+*/
+#define RPD 0x00000001 /* Receive Poll Demand */
+
+/*
+** DC21040 Receive Ring Base Address Register (DE4X5_RRBA)
+*/
+#define RRBA 0xfffffffc /* RX Descriptor List Start Address */
+
+/*
+** DC21040 Transmit Ring Base Address Register (DE4X5_TRBA)
+*/
+#define TRBA 0xfffffffc /* TX Descriptor List Start Address */
+
+/*
+** Status Register (DE4X5_STS)
+*/
+#define STS_GPI 0x04000000 /* General Purpose Port Interrupt */
+#define STS_BE 0x03800000 /* Bus Error Bits */
+#define STS_TS 0x00700000 /* Transmit Process State */
+#define STS_RS 0x000e0000 /* Receive Process State */
+#define STS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define STS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define STS_ER 0x00004000 /* Early Receive */
+#define STS_FBE 0x00002000 /* Fatal Bus Error */
+#define STS_SE 0x00002000 /* System Error */
+#define STS_LNF 0x00001000 /* Link Fail */
+#define STS_FD 0x00000800 /* Full-Duplex Short Frame Received */
+#define STS_TM 0x00000800 /* Timer Expired (DC21041) */
+#define STS_ETI 0x00000400 /* Early Transmit Interrupt */
+#define STS_AT 0x00000400 /* AUI/TP Pin */
+#define STS_RWT 0x00000200 /* Receive Watchdog Time-Out */
+#define STS_RPS 0x00000100 /* Receive Process Stopped */
+#define STS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define STS_RI 0x00000040 /* Receive Interrupt */
+#define STS_UNF 0x00000020 /* Transmit Underflow */
+#define STS_LNP 0x00000010 /* Link Pass */
+#define STS_ANC 0x00000010 /* Autonegotiation Complete */
+#define STS_TJT 0x00000008 /* Transmit Jabber Time-Out */
+#define STS_TU 0x00000004 /* Transmit Buffer Unavailable */
+#define STS_TPS 0x00000002 /* Transmit Process Stopped */
+#define STS_TI 0x00000001 /* Transmit Interrupt */
+
+#define EB_PAR 0x00000000 /* Parity Error */
+#define EB_MA 0x00800000 /* Master Abort */
+#define EB_TA 0x01000000 /* Target Abort */
+#define EB_RES0 0x01800000 /* Reserved */
+#define EB_RES1 0x02000000 /* Reserved */
+
+#define TS_STOP 0x00000000 /* Stopped */
+#define TS_FTD 0x00100000 /* Fetch Transmit Descriptor */
+#define TS_WEOT 0x00200000 /* Wait for End Of Transmission */
+#define TS_QDAT 0x00300000 /* Queue skb data into TX FIFO */
+#define TS_RES 0x00400000 /* Reserved */
+#define TS_SPKT 0x00500000 /* Setup Packet */
+#define TS_SUSP 0x00600000 /* Suspended */
+#define TS_CLTD 0x00700000 /* Close Transmit Descriptor */
+
+#define RS_STOP 0x00000000 /* Stopped */
+#define RS_FRD 0x00020000 /* Fetch Receive Descriptor */
+#define RS_CEOR 0x00040000 /* Check for End of Receive Packet */
+#define RS_WFRP 0x00060000 /* Wait for Receive Packet */
+#define RS_SUSP 0x00080000 /* Suspended */
+#define RS_CLRD 0x000a0000 /* Close Receive Descriptor */
+#define RS_FLUSH 0x000c0000 /* Flush RX FIFO */
+#define RS_QRFS 0x000e0000 /* Queue RX FIFO into RX Skb */
+
+#define INT_CANCEL 0x0001ffff /* For zeroing all interrupt sources */
+
+/*
+** Operation Mode Register (DE4X5_OMR)
+*/
+#define OMR_SC 0x80000000 /* Special Capture Effect Enable */
+#define OMR_RA 0x40000000 /* Receive All */
+#define OMR_SDP 0x02000000 /* SD Polarity - MUST BE ASSERTED */
+#define OMR_SCR 0x01000000 /* Scrambler Mode */
+#define OMR_PCS 0x00800000 /* PCS Function */
+#define OMR_TTM 0x00400000 /* Transmit Threshold Mode */
+#define OMR_SF 0x00200000 /* Store and Forward */
+#define OMR_HBD 0x00080000 /* HeartBeat Disable */
+#define OMR_PS 0x00040000 /* Port Select */
+#define OMR_CA 0x00020000 /* Capture Effect Enable */
+#define OMR_BP 0x00010000 /* Back Pressure */
+#define OMR_TR 0x0000c000 /* Threshold Control Bits */
+#define OMR_ST 0x00002000 /* Start/Stop Transmission Command */
+#define OMR_FC 0x00001000 /* Force Collision Mode */
+#define OMR_OM 0x00000c00 /* Operating Mode */
+#define OMR_FDX 0x00000200 /* Full Duplex Mode */
+#define OMR_FKD 0x00000100 /* Flaky Oscillator Disable */
+#define OMR_PM 0x00000080 /* Pass All Multicast */
+#define OMR_PR 0x00000040 /* Promiscuous Mode */
+#define OMR_SB 0x00000020 /* Start/Stop Backoff Counter */
+#define OMR_IF 0x00000010 /* Inverse Filtering */
+#define OMR_PB 0x00000008 /* Pass Bad Frames */
+#define OMR_HO 0x00000004 /* Hash Only Filtering Mode */
+#define OMR_SR 0x00000002 /* Start/Stop Receive */
+#define OMR_HP 0x00000001 /* Hash/Perfect Receive Filtering Mode */
+
+#define TR_72 0x00000000 /* Threshold set to 72 (128) bytes */
+#define TR_96 0x00004000 /* Threshold set to 96 (256) bytes */
+#define TR_128 0x00008000 /* Threshold set to 128 (512) bytes */
+#define TR_160 0x0000c000 /* Threshold set to 160 (1024) bytes */
+
+#define OMR_DEF (OMR_SDP)
+#define OMR_SIA (OMR_SDP | OMR_TTM)
+#define OMR_SYM (OMR_SDP | OMR_SCR | OMR_PCS | OMR_HBD | OMR_PS)
+#define OMR_MII_10 (OMR_SDP | OMR_TTM | OMR_PS)
+#define OMR_MII_100 (OMR_SDP | OMR_HBD | OMR_PS)
+
+/*
+** DC21040 Interrupt Mask Register (DE4X5_IMR)
+*/
+#define IMR_GPM 0x04000000 /* General Purpose Port Mask */
+#define IMR_NIM 0x00010000 /* Normal Interrupt Summary Mask */
+#define IMR_AIM 0x00008000 /* Abnormal Interrupt Summary Mask */
+#define IMR_ERM 0x00004000 /* Early Receive Mask */
+#define IMR_FBM 0x00002000 /* Fatal Bus Error Mask */
+#define IMR_SEM 0x00002000 /* System Error Mask */
+#define IMR_LFM 0x00001000 /* Link Fail Mask */
+#define IMR_FDM 0x00000800 /* Full-Duplex (Short Frame) Mask */
+#define IMR_TMM 0x00000800 /* Timer Expired Mask (DC21041) */
+#define IMR_ETM 0x00000400 /* Early Transmit Interrupt Mask */
+#define IMR_ATM 0x00000400 /* AUI/TP Switch Mask */
+#define IMR_RWM 0x00000200 /* Receive Watchdog Time-Out Mask */
+#define IMR_RSM 0x00000100 /* Receive Stopped Mask */
+#define IMR_RUM 0x00000080 /* Receive Buffer Unavailable Mask */
+#define IMR_RIM 0x00000040 /* Receive Interrupt Mask */
+#define IMR_UNM 0x00000020 /* Underflow Interrupt Mask */
+#define IMR_ANM 0x00000010 /* Autonegotiation Complete Mask */
+#define IMR_LPM 0x00000010 /* Link Pass */
+#define IMR_TJM 0x00000008 /* Transmit Time-Out Jabber Mask */
+#define IMR_TUM 0x00000004 /* Transmit Buffer Unavailable Mask */
+#define IMR_TSM 0x00000002 /* Transmission Stopped Mask */
+#define IMR_TIM 0x00000001 /* Transmit Interrupt Mask */
+
+/*
+** Missed Frames and FIFO Overflow Counters (DE4X5_MFC)
+*/
+#define MFC_FOCO 0x10000000 /* FIFO Overflow Counter Overflow Bit */
+#define MFC_FOC 0x0ffe0000 /* FIFO Overflow Counter Bits */
+#define MFC_OVFL 0x00010000 /* Missed Frames Counter Overflow Bit */
+#define MFC_CNTR 0x0000ffff /* Missed Frames Counter Bits */
+#define MFC_FOCM 0x1ffe0000 /* FIFO Overflow Counter Mask */
+
+/*
+** DC21040 Ethernet Address PROM (DE4X5_APROM)
+*/
+#define APROM_DN 0x80000000 /* Data Not Valid */
+#define APROM_DT 0x000000ff /* Address Byte */
+
+/*
+** DC21041 Boot/Ethernet Address ROM (DE4X5_BROM)
+*/
+#define BROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */
+#define BROM_RD 0x00004000 /* Read from Boot ROM */
+#define BROM_WR 0x00002000 /* Write to Boot ROM */
+#define BROM_BR 0x00001000 /* Select Boot ROM when set */
+#define BROM_SR 0x00000800 /* Select Serial ROM when set */
+#define BROM_REG 0x00000400 /* External Register Select */
+#define BROM_DT 0x000000ff /* Data Byte */
+
+/*
+** DC21041 Serial/Ethernet Address ROM (DE4X5_SROM, DE4X5_MII)
+*/
+#define MII_MDI 0x00080000 /* MII Management Data In */
+#define MII_MDO 0x00060000 /* MII Management Mode/Data Out */
+#define MII_MRD 0x00040000 /* MII Management Define Read Mode */
+#define MII_MWR 0x00000000 /* MII Management Define Write Mode */
+#define MII_MDT 0x00020000 /* MII Management Data Out */
+#define MII_MDC 0x00010000 /* MII Management Clock */
+#define MII_RD 0x00004000 /* Read from MII */
+#define MII_WR 0x00002000 /* Write to MII */
+#define MII_SEL 0x00000800 /* Select MII when RESET */
+
+#define SROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */
+#define SROM_RD 0x00004000 /* Read from Boot ROM */
+#define SROM_WR 0x00002000 /* Write to Boot ROM */
+#define SROM_BR 0x00001000 /* Select Boot ROM when set */
+#define SROM_SR 0x00000800 /* Select Serial ROM when set */
+#define SROM_REG 0x00000400 /* External Register Select */
+#define SROM_DT 0x000000ff /* Data Byte */
+
+#define DT_OUT 0x00000008 /* Serial Data Out */
+#define DT_IN 0x00000004 /* Serial Data In */
+#define DT_CLK 0x00000002 /* Serial ROM Clock */
+#define DT_CS 0x00000001 /* Serial ROM Chip Select */
+
+#define MII_PREAMBLE 0xffffffff /* MII Management Preamble */
+#define MII_TEST 0xaaaaaaaa /* MII Test Signal */
+#define MII_STRD 0x06 /* Start of Frame+Op Code: use low nibble */
+#define MII_STWR 0x0a /* Start of Frame+Op Code: use low nibble */
+
+#define MII_CR 0x00 /* MII Management Control Register */
+#define MII_SR 0x01 /* MII Management Status Register */
+#define MII_ID0 0x02 /* PHY Identifier Register 0 */
+#define MII_ID1 0x03 /* PHY Identifier Register 1 */
+#define MII_ANA 0x04 /* Auto Negotiation Advertisement */
+#define MII_ANLPA 0x05 /* Auto Negotiation Link Partner Ability */
+#define MII_ANE 0x06 /* Auto Negotiation Expansion */
+#define MII_ANP 0x07 /* Auto Negotiation Next Page TX */
+
+#define DE4X5_MAX_MII 32 /* Maximum address of MII PHY devices */
+
+/*
+** MII Management Control Register
+*/
+#define MII_CR_RST 0x8000 /* RESET the PHY chip */
+#define MII_CR_LPBK 0x4000 /* Loopback enable */
+#define MII_CR_SPD 0x2000 /* 0: 10Mb/s; 1: 100Mb/s */
+#define MII_CR_10 0x0000 /* Set 10Mb/s */
+#define MII_CR_100 0x2000 /* Set 100Mb/s */
+#define MII_CR_ASSE 0x1000 /* Auto Speed Select Enable */
+#define MII_CR_PD 0x0800 /* Power Down */
+#define MII_CR_ISOL 0x0400 /* Isolate Mode */
+#define MII_CR_RAN 0x0200 /* Restart Auto Negotiation */
+#define MII_CR_FDM 0x0100 /* Full Duplex Mode */
+#define MII_CR_CTE 0x0080 /* Collision Test Enable */
+
+/*
+** MII Management Status Register
+*/
+#define MII_SR_T4C 0x8000 /* 100BASE-T4 capable */
+#define MII_SR_TXFD 0x4000 /* 100BASE-TX Full Duplex capable */
+#define MII_SR_TXHD 0x2000 /* 100BASE-TX Half Duplex capable */
+#define MII_SR_TFD 0x1000 /* 10BASE-T Full Duplex capable */
+#define MII_SR_THD 0x0800 /* 10BASE-T Half Duplex capable */
+#define MII_SR_ASSC 0x0020 /* Auto Speed Selection Complete*/
+#define MII_SR_RFD 0x0010 /* Remote Fault Detected */
+#define MII_SR_ANC 0x0008 /* Auto Negotiation capable */
+#define MII_SR_LKS 0x0004 /* Link Status */
+#define MII_SR_JABD 0x0002 /* Jabber Detect */
+#define MII_SR_XC 0x0001 /* Extended Capabilities */
+
+/*
+** MII Management Auto Negotiation Advertisement Register
+*/
+#define MII_ANA_TAF 0x03e0 /* Technology Ability Field */
+#define MII_ANA_T4AM 0x0200 /* T4 Technology Ability Mask */
+#define MII_ANA_TXAM 0x0180 /* TX Technology Ability Mask */
+#define MII_ANA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */
+#define MII_ANA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */
+#define MII_ANA_100M 0x0380 /* 100Mb Technology Ability Mask */
+#define MII_ANA_10M 0x0060 /* 10Mb Technology Ability Mask */
+#define MII_ANA_CSMA 0x0001 /* CSMA-CD Capable */
+
+/*
+** MII Management Auto Negotiation Remote End Register
+*/
+#define MII_ANLPA_NP 0x8000 /* Next Page (Enable) */
+#define MII_ANLPA_ACK 0x4000 /* Remote Acknowledge */
+#define MII_ANLPA_RF 0x2000 /* Remote Fault */
+#define MII_ANLPA_TAF 0x03e0 /* Technology Ability Field */
+#define MII_ANLPA_T4AM 0x0200 /* T4 Technology Ability Mask */
+#define MII_ANLPA_TXAM 0x0180 /* TX Technology Ability Mask */
+#define MII_ANLPA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */
+#define MII_ANLPA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */
+#define MII_ANLPA_100M 0x0380 /* 100Mb Technology Ability Mask */
+#define MII_ANLPA_10M 0x0060 /* 10Mb Technology Ability Mask */
+#define MII_ANLPA_CSMA 0x0001 /* CSMA-CD Capable */
+
+/*
+** SROM Media Definitions (ABG SROM Section)
+*/
+#define MEDIA_NWAY 0x0080 /* Nway (Auto Negotiation) on PHY */
+#define MEDIA_MII 0x0040 /* MII Present on the adapter */
+#define MEDIA_FIBRE 0x0008 /* Fibre Media present */
+#define MEDIA_AUI 0x0004 /* AUI Media present */
+#define MEDIA_TP 0x0002 /* TP Media present */
+#define MEDIA_BNC 0x0001 /* BNC Media present */
+
+/*
+** SROM Definitions (Digital Semiconductor Format)
+*/
+#define SROM_SSVID 0x0000 /* Sub-system Vendor ID offset */
+#define SROM_SSID 0x0002 /* Sub-system ID offset */
+#define SROM_CISPL 0x0004 /* CardBus CIS Pointer low offset */
+#define SROM_CISPH 0x0006 /* CardBus CIS Pointer high offset */
+#define SROM_IDCRC 0x0010 /* ID Block CRC offset*/
+#define SROM_RSVD2 0x0011 /* ID Reserved 2 offset */
+#define SROM_SFV 0x0012 /* SROM Format Version offset */
+#define SROM_CCNT 0x0013 /* Controller Count offset */
+#define SROM_HWADD 0x0014 /* Hardware Address offset */
+#define SROM_MRSVD 0x007c /* Manufacturer Reserved offset*/
+#define SROM_CRC 0x007e /* SROM CRC offset */
+
+/*
+** SROM Media Connection Definitions
+*/
+#define SROM_10BT 0x0000 /* 10BASE-T half duplex */
+#define SROM_10BTN 0x0100 /* 10BASE-T with Nway */
+#define SROM_10BTF 0x0204 /* 10BASE-T full duplex */
+#define SROM_10BTNLP 0x0400 /* 10BASE-T without Link Pass test */
+#define SROM_10B2 0x0001 /* 10BASE-2 (BNC) */
+#define SROM_10B5 0x0002 /* 10BASE-5 (AUI) */
+#define SROM_100BTH 0x0003 /* 100BASE-T half duplex */
+#define SROM_100BTF 0x0205 /* 100BASE-T full duplex */
+#define SROM_100BT4 0x0006 /* 100BASE-T4 */
+#define SROM_100BFX 0x0007 /* 100BASE-FX half duplex (Fiber) */
+#define SROM_M10BT 0x0009 /* MII 10BASE-T half duplex */
+#define SROM_M10BTF 0x020a /* MII 10BASE-T full duplex */
+#define SROM_M100BT 0x000d /* MII 100BASE-T half duplex */
+#define SROM_M100BTF 0x020e /* MII 100BASE-T full duplex */
+#define SROM_M100BT4 0x000f /* MII 100BASE-T4 */
+#define SROM_M100BF 0x0010 /* MII 100BASE-FX half duplex */
+#define SROM_M100BFF 0x0211 /* MII 100BASE-FX full duplex */
+#define SROM_PDA 0x0800 /* Powerup & Dynamic Autosense */
+#define SROM_PAO 0x8800 /* Powerup Autosense Only */
+#define SROM_NSMI 0xffff /* No Selected Media Information */
+
+/*
+** SROM Media Definitions
+*/
+#define SROM_10BASET 0x0000 /* 10BASE-T half duplex */
+#define SROM_10BASE2 0x0001 /* 10BASE-2 (BNC) */
+#define SROM_10BASE5 0x0002 /* 10BASE-5 (AUI) */
+#define SROM_100BASET 0x0003 /* 100BASE-T half duplex */
+#define SROM_10BASETF 0x0004 /* 10BASE-T full duplex */
+#define SROM_100BASETF 0x0005 /* 100BASE-T full duplex */
+#define SROM_100BASET4 0x0006 /* 100BASE-T4 */
+#define SROM_100BASEF 0x0007 /* 100BASE-FX half duplex */
+#define SROM_100BASEFF 0x0008 /* 100BASE-FX full duplex */
+
+#define BLOCK_LEN 0x7f /* Extended blocks length mask */
+#define EXT_FIELD 0x40 /* Extended blocks extension field bit */
+#define MEDIA_CODE 0x3f /* Extended blocks media code mask */
+
+/*
+** SROM Compact Format Block Masks
+*/
+#define COMPACT_FI 0x80 /* Format Indicator */
+#define COMPACT_LEN 0x04 /* Length */
+#define COMPACT_MC 0x3f /* Media Code */
+
+/*
+** SROM Extended Format Block Type 0 Masks
+*/
+#define BLOCK0_FI 0x80 /* Format Indicator */
+#define BLOCK0_MCS 0x80 /* Media Code byte Sign */
+#define BLOCK0_MC 0x3f /* Media Code */
+
+/*
+** DC21040 Full Duplex Register (DE4X5_FDR)
+*/
+#define FDR_FDACV 0x0000ffff /* Full Duplex Auto Configuration Value */
+
+/*
+** DC21041 General Purpose Timer Register (DE4X5_GPT)
+*/
+#define GPT_CON 0x00010000 /* One shot: 0, Continuous: 1 */
+#define GPT_VAL 0x0000ffff /* Timer Value */
+
+/*
+** DC21140 General Purpose Register (DE4X5_GEP) (hardware dependent bits)
+*/
+/* Valid ONLY for DE500 hardware */
+#define GEP_LNP 0x00000080 /* Link Pass (input) */
+#define GEP_SLNK 0x00000040 /* SYM LINK (input) */
+#define GEP_SDET 0x00000020 /* Signal Detect (input) */
+#define GEP_HRST 0x00000010 /* Hard RESET (to PHY) (output) */
+#define GEP_FDXD 0x00000008 /* Full Duplex Disable (output) */
+#define GEP_PHYL 0x00000004 /* PHY Loopback (output) */
+#define GEP_FLED 0x00000002 /* Force Activity LED on (output) */
+#define GEP_MODE 0x00000001 /* 0: 10Mb/s, 1: 100Mb/s */
+#define GEP_INIT 0x0000011f /* Setup inputs (0) and outputs (1) */
+#define GEP_CTRL 0x00000100 /* GEP control bit */
+
+/*
+** SIA Register Defaults
+*/
+#define CSR13 0x00000001
+#define CSR14 0x0003ff7f /* Autonegotiation disabled */
+#define CSR15 0x00000008
+
+/*
+** SIA Status Register (DE4X5_SISR)
+*/
+#define SISR_LPC 0xffff0000 /* Link Partner's Code Word */
+#define SISR_LPN 0x00008000 /* Link Partner Negotiable */
+#define SISR_ANS 0x00007000 /* Auto Negotiation Arbitration State */
+#define SISR_NSN 0x00000800 /* Non Stable NLPs Detected (DC21041) */
+#define SISR_TRF 0x00000800 /* Transmit Remote Fault */
+#define SISR_NSND 0x00000400 /* Non Stable NLPs Detected (DC21142) */
+#define SISR_ANR_FDS 0x00000400 /* Auto Negotiate Restart/Full Duplex Sel.*/
+#define SISR_TRA 0x00000200 /* 10BASE-T Receive Port Activity */
+#define SISR_NRA 0x00000200 /* Non Selected Port Receive Activity */
+#define SISR_ARA 0x00000100 /* AUI Receive Port Activity */
+#define SISR_SRA 0x00000100 /* Selected Port Receive Activity */
+#define SISR_DAO 0x00000080 /* PLL All One */
+#define SISR_DAZ 0x00000040 /* PLL All Zero */
+#define SISR_DSP 0x00000020 /* PLL Self-Test Pass */
+#define SISR_DSD 0x00000010 /* PLL Self-Test Done */
+#define SISR_APS 0x00000008 /* Auto Polarity State */
+#define SISR_LKF 0x00000004 /* Link Fail Status */
+#define SISR_LS10 0x00000004 /* 10Mb/s Link Fail Status */
+#define SISR_NCR 0x00000002 /* Network Connection Error */
+#define SISR_LS100 0x00000002 /* 100Mb/s Link Fail Status */
+#define SISR_PAUI 0x00000001 /* AUI_TP Indication */
+#define SISR_MRA 0x00000001 /* MII Receive Port Activity */
+
+#define ANS_NDIS 0x00000000 /* Nway disable */
+#define ANS_TDIS 0x00001000 /* Transmit Disable */
+#define ANS_ADET 0x00002000 /* Ability Detect */
+#define ANS_ACK 0x00003000 /* Acknowledge */
+#define ANS_CACK 0x00004000 /* Complete Acknowledge */
+#define ANS_NWOK 0x00005000 /* Nway OK - FLP Link Good */
+#define ANS_LCHK 0x00006000 /* Link Check */
+
+#define SISR_RST 0x00000301 /* CSR12 reset */
+#define SISR_ANR 0x00001301 /* Autonegotiation restart */
+
+/*
+** SIA Connectivity Register (DE4X5_SICR)
+*/
+#define SICR_SDM 0xffff0000 /* SIA Diagnostics Mode */
+#define SICR_OE57 0x00008000 /* Output Enable 5 6 7 */
+#define SICR_OE24 0x00004000 /* Output Enable 2 4 */
+#define SICR_OE13 0x00002000 /* Output Enable 1 3 */
+#define SICR_IE 0x00001000 /* Input Enable */
+#define SICR_EXT 0x00000000 /* SIA MUX Select External SIA Mode */
+#define SICR_D_SIA 0x00000400 /* SIA MUX Select Diagnostics - SIA Sigs */
+#define SICR_DPLL 0x00000800 /* SIA MUX Select Diagnostics - DPLL Sigs*/
+#define SICR_APLL 0x00000a00 /* SIA MUX Select Diagnostics - DPLL Sigs*/
+#define SICR_D_RxM 0x00000c00 /* SIA MUX Select Diagnostics - RxM Sigs */
+#define SICR_M_RxM 0x00000d00 /* SIA MUX Select Diagnostics - RxM Sigs */
+#define SICR_LNKT 0x00000e00 /* SIA MUX Select Diagnostics - Link Test*/
+#define SICR_SEL 0x00000f00 /* SIA MUX Select AUI or TP with LEDs */
+#define SICR_ASE 0x00000080 /* APLL Start Enable*/
+#define SICR_SIM 0x00000040 /* Serial Interface Input Multiplexer */
+#define SICR_ENI 0x00000020 /* Encoder Input Multiplexer */
+#define SICR_EDP 0x00000010 /* SIA PLL External Input Enable */
+#define SICR_AUI 0x00000008 /* 10Base-T (0) or AUI (1) */
+#define SICR_CAC 0x00000004 /* CSR Auto Configuration */
+#define SICR_PS 0x00000002 /* Pin AUI/TP Selection */
+#define SICR_SRL 0x00000001 /* SIA Reset */
+#define SIA_RESET 0x00000000 /* SIA Reset Value */
+
+/*
+** SIA Transmit and Receive Register (DE4X5_STRR)
+*/
+#define STRR_TAS 0x00008000 /* 10Base-T/AUI Autosensing Enable */
+#define STRR_SPP 0x00004000 /* Set Polarity Plus */
+#define STRR_APE 0x00002000 /* Auto Polarity Enable */
+#define STRR_LTE 0x00001000 /* Link Test Enable */
+#define STRR_SQE 0x00000800 /* Signal Quality Enable */
+#define STRR_CLD 0x00000400 /* Collision Detect Enable */
+#define STRR_CSQ 0x00000200 /* Collision Squelch Enable */
+#define STRR_RSQ 0x00000100 /* Receive Squelch Enable */
+#define STRR_ANE 0x00000080 /* Auto Negotiate Enable */
+#define STRR_HDE 0x00000040 /* Half Duplex Enable */
+#define STRR_CPEN 0x00000030 /* Compensation Enable */
+#define STRR_LSE 0x00000008 /* Link Pulse Send Enable */
+#define STRR_DREN 0x00000004 /* Driver Enable */
+#define STRR_LBK 0x00000002 /* Loopback Enable */
+#define STRR_ECEN 0x00000001 /* Encoder Enable */
+#define STRR_RESET 0xffffffff /* Reset value for STRR */
+
+/*
+** SIA General Register (DE4X5_SIGR)
+*/
+#define SIGR_RMI 0x40000000 /* Receive Match Interrupt */
+#define SIGR_GI1 0x20000000 /* General Port Interrupt 1 */
+#define SIGR_GI0 0x10000000 /* General Port Interrupt 0 */
+#define SIGR_CWE 0x08000000 /* Control Write Enable */
+#define SIGR_RME 0x04000000 /* Receive Match Enable */
+#define SIGR_GEI1 0x02000000 /* GEP Interrupt Enable on Port 1 */
+#define SIGR_GEI0 0x01000000 /* GEP Interrupt Enable on Port 0 */
+#define SIGR_LGS3 0x00800000 /* LED/GEP3 Select */
+#define SIGR_LGS2 0x00400000 /* LED/GEP2 Select */
+#define SIGR_LGS1 0x00200000 /* LED/GEP1 Select */
+#define SIGR_LGS0 0x00100000 /* LED/GEP0 Select */
+#define SIGR_MD 0x000f0000 /* General Purpose Mode and Data */
+#define SIGR_LV2 0x00008000 /* General Purpose LED2 value */
+#define SIGR_LE2 0x00004000 /* General Purpose LED2 enable */
+#define SIGR_FRL 0x00002000 /* Force Receiver Low */
+#define SIGR_DPST 0x00001000 /* PLL Self Test Start */
+#define SIGR_LSD 0x00000800 /* LED Stretch Disable */
+#define SIGR_FLF 0x00000400 /* Force Link Fail */
+#define SIGR_FUSQ 0x00000200 /* Force Unsquelch */
+#define SIGR_TSCK 0x00000100 /* Test Clock */
+#define SIGR_LV1 0x00000080 /* General Purpose LED1 value */
+#define SIGR_LE1 0x00000040 /* General Purpose LED1 enable */
+#define SIGR_RWR 0x00000020 /* Receive Watchdog Release */
+#define SIGR_RWD 0x00000010 /* Receive Watchdog Disable */
+#define SIGR_ABM 0x00000008 /* BNC: 0, AUI:1 */
+#define SIGR_JCK 0x00000004 /* Jabber Clock */
+#define SIGR_HUJ 0x00000002 /* Host Unjab */
+#define SIGR_JBD 0x00000001 /* Jabber Disable */
+#define SIGR_RESET 0xffff0000 /* Reset value for SIGR */
+
+/*
+** Receive Descriptor Bit Summary
+*/
+#define R_OWN 0x80000000 /* Own Bit */
+#define RD_FF 0x40000000 /* Filtering Fail */
+#define RD_FL 0x3fff0000 /* Frame Length */
+#define RD_ES 0x00008000 /* Error Summary */
+#define RD_LE 0x00004000 /* Length Error */
+#define RD_DT 0x00003000 /* Data Type */
+#define RD_RF 0x00000800 /* Runt Frame */
+#define RD_MF 0x00000400 /* Multicast Frame */
+#define RD_FS 0x00000200 /* First Descriptor */
+#define RD_LS 0x00000100 /* Last Descriptor */
+#define RD_TL 0x00000080 /* Frame Too Long */
+#define RD_CS 0x00000040 /* Collision Seen */
+#define RD_FT 0x00000020 /* Frame Type */
+#define RD_RJ 0x00000010 /* Receive Watchdog */
+#define RD_RE 0x00000008 /* Report on MII Error */
+#define RD_DB 0x00000004 /* Dribbling Bit */
+#define RD_CE 0x00000002 /* CRC Error */
+#define RD_OF 0x00000001 /* Overflow */
+
+#define RD_RER 0x02000000 /* Receive End Of Ring */
+#define RD_RCH 0x01000000 /* Second Address Chained */
+#define RD_RBS2 0x003ff800 /* Buffer 2 Size */
+#define RD_RBS1 0x000007ff /* Buffer 1 Size */
+
+/*
+** Transmit Descriptor Bit Summary
+*/
+#define T_OWN 0x80000000 /* Own Bit */
+#define TD_ES 0x00008000 /* Error Summary */
+#define TD_TO 0x00004000 /* Transmit Jabber Time-Out */
+#define TD_LO 0x00000800 /* Loss Of Carrier */
+#define TD_NC 0x00000400 /* No Carrier */
+#define TD_LC 0x00000200 /* Late Collision */
+#define TD_EC 0x00000100 /* Excessive Collisions */
+#define TD_HF 0x00000080 /* Heartbeat Fail */
+#define TD_CC 0x00000078 /* Collision Counter */
+#define TD_LF 0x00000004 /* Link Fail */
+#define TD_UF 0x00000002 /* Underflow Error */
+#define TD_DE 0x00000001 /* Deferred */
+
+#define TD_IC 0x80000000 /* Interrupt On Completion */
+#define TD_LS 0x40000000 /* Last Segment */
+#define TD_FS 0x20000000 /* First Segment */
+#define TD_FT1 0x10000000 /* Filtering Type */
+#define TD_SET 0x08000000 /* Setup Packet */
+#define TD_AC 0x04000000 /* Add CRC Disable */
+#define TD_TER 0x02000000 /* Transmit End Of Ring */
+#define TD_TCH 0x01000000 /* Second Address Chained */
+#define TD_DPD 0x00800000 /* Disabled Padding */
+#define TD_FT0 0x00400000 /* Filtering Type */
+#define TD_TBS2 0x003ff800 /* Buffer 2 Size */
+#define TD_TBS1 0x000007ff /* Buffer 1 Size */
+
+#define PERFECT_F 0x00000000
+#define HASH_F TD_FT0
+#define INVERSE_F TD_FT1
+#define HASH_O_F (TD_FT1 | TD_F0)
+
+/*
+** Media / mode state machine definitions
+** User selectable:
+*/
+#define TP 0x0040 /* 10Base-T (now equiv to _10Mb) */
+#define TP_NW 0x0002 /* 10Base-T with Nway */
+#define BNC 0x0004 /* Thinwire */
+#define AUI 0x0008 /* Thickwire */
+#define BNC_AUI 0x0010 /* BNC/AUI on DC21040 indistinguishable */
+#define _10Mb 0x0040 /* 10Mb/s Ethernet */
+#define _100Mb 0x0080 /* 100Mb/s Ethernet */
+#define AUTO 0x4000 /* Auto sense the media or speed */
+
+/*
+** Internal states
+*/
+#define NC 0x0000 /* No Connection */
+#define ANS 0x0020 /* Intermediate AutoNegotiation State */
+#define SPD_DET 0x0100 /* Parallel speed detection */
+#define INIT 0x0200 /* Initial state */
+#define EXT_SIA 0x0400 /* External SIA for motherboard chip */
+#define ANS_SUSPECT 0x0802 /* Suspect the ANS (TP) port is down */
+#define TP_SUSPECT 0x0803 /* Suspect the TP port is down */
+#define BNC_AUI_SUSPECT 0x0804 /* Suspect the BNC or AUI port is down */
+#define EXT_SIA_SUSPECT 0x0805 /* Suspect the EXT SIA port is down */
+#define BNC_SUSPECT 0x0806 /* Suspect the BNC port is down */
+#define AUI_SUSPECT 0x0807 /* Suspect the AUI port is down */
+#define MII 0x1000 /* MII on the 21143 */
+
+#define TIMER_CB 0x80000000 /* Timer callback detection */
+
+/*
+** DE4X5 DEBUG Options
+*/
+#define DEBUG_NONE 0x0000 /* No DEBUG messages */
+#define DEBUG_VERSION 0x0001 /* Print version message */
+#define DEBUG_MEDIA 0x0002 /* Print media messages */
+#define DEBUG_TX 0x0004 /* Print TX (queue_pkt) messages */
+#define DEBUG_RX 0x0008 /* Print RX (de4x5_rx) messages */
+#define DEBUG_SROM 0x0010 /* Print SROM messages */
+#define DEBUG_MII 0x0020 /* Print MII messages */
+#define DEBUG_OPEN 0x0040 /* Print de4x5_open() messages */
+#define DEBUG_CLOSE 0x0080 /* Print de4x5_close() messages */
+#define DEBUG_PCICFG 0x0100
+#define DEBUG_ALL 0x01ff
+
+/*
+** Miscellaneous
+*/
+#define PCI 0
+#define EISA 1
+
+#define HASH_TABLE_LEN 512 /* Bits */
+#define HASH_BITS 0x01ff /* 9 LS bits */
+
+#define SETUP_FRAME_LEN 192 /* Bytes */
+#define IMPERF_PA_OFFSET 156 /* Bytes */
+
+#define POLL_DEMAND 1
+
+#define LOST_MEDIA_THRESHOLD 3
+
+#define MASK_INTERRUPTS 1
+#define UNMASK_INTERRUPTS 0
+
+#define DE4X5_STRLEN 8
+
+#define DE4X5_INIT 0 /* Initialisation time */
+#define DE4X5_RUN 1 /* Run time */
+
+#define DE4X5_SAVE_STATE 0
+#define DE4X5_RESTORE_STATE 1
+
+/*
+** Address Filtering Modes
+*/
+#define PERFECT 0 /* 16 perfect physical addresses */
+#define HASH_PERF 1 /* 1 perfect, 512 multicast addresses */
+#define PERFECT_REJ 2 /* Reject 16 perfect physical addresses */
+#define ALL_HASH 3 /* Hashes all physical & multicast addrs */
+
+#define ALL 0 /* Clear out all the setup frame */
+#define PHYS_ADDR_ONLY 1 /* Update the physical address only */
+
+/*
+** Adapter state
+*/
+#define INITIALISED 0 /* After h/w initialised and mem alloc'd */
+#define CLOSED 1 /* Ready for opening */
+#define OPEN 2 /* Running */
+
+/*
+** Various wait times
+*/
+#define PDET_LINK_WAIT 1200 /* msecs to wait for link detect bits */
+#define ANS_FINISH_WAIT 1000 /* msecs to wait for link detect bits */
+
+/*
+** IEEE OUIs for various PHY vendor/chip combos - Reg 2 values only. Since
+** the vendors seem split 50-50 on how to calculate the OUI register values
+** anyway, just reading Reg2 seems reasonable for now [see de4x5_get_oui()].
+*/
+#define NATIONAL_TX 0x2000
+#define BROADCOM_T4 0x03e0
+#define SEEQ_T4 0x0016
+#define CYPRESS_T4 0x0014
+
+/*
+** Speed Selection stuff
+*/
+#define SET_10Mb {\
+ if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
+ omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\
+ if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\
+ mii_wr(MII_CR_10|(lp->fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
+ }\
+ omr |= ((lp->fdx ? OMR_FDX : 0) | OMR_TTM);\
+ outl(omr, DE4X5_OMR);\
+ if (!lp->useSROM) lp->cache.gep = 0;\
+ } else if (lp->useSROM && !lp->useMII) {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ omr |= (lp->fdx ? OMR_FDX : 0);\
+ outl(omr | (lp->infoblock_csr6 & ~(OMR_SCR | OMR_HBD)), DE4X5_OMR);\
+ } else {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ omr |= (lp->fdx ? OMR_FDX : 0);\
+ outl(omr | OMR_SDP | OMR_TTM, DE4X5_OMR);\
+ lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD);\
+ gep_wr(lp->cache.gep, dev);\
+ }\
+}
+
+#define SET_100Mb {\
+ if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
+ int fdx=0;\
+ if (lp->phy[lp->active].id == NATIONAL_TX) {\
+ mii_wr(mii_rd(0x18, lp->phy[lp->active].addr, DE4X5_MII) & ~0x2000,\
+ 0x18, lp->phy[lp->active].addr, DE4X5_MII);\
+ }\
+ omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\
+ sr = mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);\
+ if (!(sr & MII_ANA_T4AM) && lp->fdx) fdx=1;\
+ if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\
+ mii_wr(MII_CR_100|(fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
+ }\
+ if (fdx) omr |= OMR_FDX;\
+ outl(omr, DE4X5_OMR);\
+ if (!lp->useSROM) lp->cache.gep = 0;\
+ } else if (lp->useSROM && !lp->useMII) {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ omr |= (lp->fdx ? OMR_FDX : 0);\
+ outl(omr | lp->infoblock_csr6, DE4X5_OMR);\
+ } else {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ omr |= (lp->fdx ? OMR_FDX : 0);\
+ outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS | OMR_SCR, DE4X5_OMR);\
+ lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD) | GEP_MODE;\
+ gep_wr(lp->cache.gep, dev);\
+ }\
+}
+
+/* FIX ME so I don't jam 10Mb networks */
+#define SET_100Mb_PDET {\
+ if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
+ mii_wr(MII_CR_100|MII_CR_ASSE, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
+ omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ outl(omr, DE4X5_OMR);\
+ } else if (lp->useSROM && !lp->useMII) {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ outl(omr, DE4X5_OMR);\
+ } else {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS, DE4X5_OMR);\
+ lp->cache.gep = (GEP_FDXD | GEP_MODE);\
+ gep_wr(lp->cache.gep, dev);\
+ }\
+}
+
+/*
+** Include the IOCTL stuff
+*/
+#include <linux/sockios.h>
+
+struct de4x5_ioctl {
+ unsigned short cmd; /* Command to run */
+ unsigned short len; /* Length of the data buffer */
+ unsigned char __user *data; /* Pointer to the data buffer */
+};
+
+/*
+** Recognised commands for the driver
+*/
+#define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */
+#define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */
+/* 0x03 and 0x04 were used before and are obsoleted now. Don't use them. */
+#define DE4X5_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
+#define DE4X5_GET_MCA 0x06 /* Get a multicast address */
+#define DE4X5_SET_MCA 0x07 /* Set a multicast address */
+#define DE4X5_CLR_MCA 0x08 /* Clear a multicast address */
+#define DE4X5_MCA_EN 0x09 /* Enable a multicast address group */
+#define DE4X5_GET_STATS 0x0a /* Get the driver statistics */
+#define DE4X5_CLR_STATS 0x0b /* Zero out the driver statistics */
+#define DE4X5_GET_OMR 0x0c /* Get the OMR Register contents */
+#define DE4X5_SET_OMR 0x0d /* Set the OMR Register contents */
+#define DE4X5_GET_REG 0x0e /* Get the DE4X5 Registers */
+
+#define MOTO_SROM_BUG (lp->active == 8 && (get_unaligned_le32(dev->dev_addr) & 0x00ffffff) == 0x3e0008)
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
new file mode 100644
index 000000000000..17b11ee1745a
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -0,0 +1,2253 @@
+/*
+ A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
+ ethernet driver for Linux.
+ Copyright (C) 1997 Sten Wang
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ DAVICOM Web-Site: www.davicom.com.tw
+
+ Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
+ Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
+
+ (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
+
+ Marcelo Tosatti <marcelo@conectiva.com.br> :
+ Made it compile in 2.3 (device to net_device)
+
+ Alan Cox <alan@lxorguk.ukuu.org.uk> :
+ Cleaned up for kernel merge.
+ Removed the back compatibility support
+ Reformatted, fixing spelling etc as I went
+ Removed IRQ 0-15 assumption
+
+ Jeff Garzik <jgarzik@pobox.com> :
+ Updated to use new PCI driver API.
+ Resource usage cleanups.
+ Report driver version to user.
+
+ Tobias Ringstrom <tori@unhappy.mine.nu> :
+ Cleaned up and added SMP safety. Thanks go to Jeff Garzik,
+ Andrew Morton and Frank Davis for the SMP safety fixes.
+
+ Vojtech Pavlik <vojtech@suse.cz> :
+ Cleaned up pointer arithmetics.
+ Fixed a lot of 64bit issues.
+ Cleaned up printk()s a bit.
+ Fixed some obvious big endian problems.
+
+ Tobias Ringstrom <tori@unhappy.mine.nu> :
+ Use time_after for jiffies calculation. Added ethtool
+ support. Updated PCI resource allocation. Do not
+ forget to unmap PCI mapped skbs.
+
+ Alan Cox <alan@lxorguk.ukuu.org.uk>
+ Added new PCI identifiers provided by Clear Zhang at ALi
+ for their 1563 ethernet device.
+
+ TODO
+
+ Check on 64 bit boxes.
+ Check and fix on big endian boxes.
+
+ Test and make sure PCI latency is now correct for all cases.
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DRV_NAME "dmfe"
+#define DRV_VERSION "1.36.4"
+#define DRV_RELDATE "2002-01-17"
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+
+#ifdef CONFIG_TULIP_DM910X
+#include <linux/of.h>
+#endif
+
+
+/* Board/System/Debug information/definition ---------------- */
+#define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */
+#define PCI_DM9102_ID 0x91021282 /* Davicom DM9102 ID */
+#define PCI_DM9100_ID 0x91001282 /* Davicom DM9100 ID */
+#define PCI_DM9009_ID 0x90091282 /* Davicom DM9009 ID */
+
+#define DM9102_IO_SIZE 0x80
+#define DM9102A_IO_SIZE 0x100
+#define TX_MAX_SEND_CNT 0x1 /* Maximum tx packet per time */
+#define TX_DESC_CNT 0x10 /* Allocated Tx descriptors */
+#define RX_DESC_CNT 0x20 /* Allocated Rx descriptors */
+#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */
+#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */
+#define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
+#define TX_BUF_ALLOC 0x600
+#define RX_ALLOC_SIZE 0x620
+#define DM910X_RESET 1
+#define CR0_DEFAULT 0x00E00000 /* TX & RX burst mode */
+#define CR6_DEFAULT 0x00080000 /* HD */
+#define CR7_DEFAULT 0x180c1
+#define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */
+#define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */
+#define MAX_PACKET_SIZE 1514
+#define DMFE_MAX_MULTICAST 14
+#define RX_COPY_SIZE 100
+#define MAX_CHECK_PACKET 0x8000
+#define DM9801_NOISE_FLOOR 8
+#define DM9802_NOISE_FLOOR 5
+
+#define DMFE_WOL_LINKCHANGE 0x20000000
+#define DMFE_WOL_SAMPLEPACKET 0x10000000
+#define DMFE_WOL_MAGICPACKET 0x08000000
+
+
+#define DMFE_10MHF 0
+#define DMFE_100MHF 1
+#define DMFE_10MFD 4
+#define DMFE_100MFD 5
+#define DMFE_AUTO 8
+#define DMFE_1M_HPNA 0x10
+
+#define DMFE_TXTH_72 0x400000 /* TX TH 72 byte */
+#define DMFE_TXTH_96 0x404000 /* TX TH 96 byte */
+#define DMFE_TXTH_128 0x0000 /* TX TH 128 byte */
+#define DMFE_TXTH_256 0x4000 /* TX TH 256 byte */
+#define DMFE_TXTH_512 0x8000 /* TX TH 512 byte */
+#define DMFE_TXTH_1K 0xC000 /* TX TH 1K byte */
+
+#define DMFE_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
+#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
+#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
+
+#define DMFE_DBUG(dbug_now, msg, value) \
+ do { \
+ if (dmfe_debug || (dbug_now)) \
+ pr_err("%s %lx\n", \
+ (msg), (long) (value)); \
+ } while (0)
+
+#define SHOW_MEDIA_TYPE(mode) \
+ pr_info("Change Speed to %sMhz %s duplex\n" , \
+ (mode & 1) ? "100":"10", \
+ (mode & 4) ? "full":"half");
+
+
+/* CR9 definition: SROM/MII */
+#define CR9_SROM_READ 0x4800
+#define CR9_SRCS 0x1
+#define CR9_SRCLK 0x2
+#define CR9_CRDOUT 0x8
+#define SROM_DATA_0 0x0
+#define SROM_DATA_1 0x4
+#define PHY_DATA_1 0x20000
+#define PHY_DATA_0 0x00000
+#define MDCLKH 0x10000
+
+#define PHY_POWER_DOWN 0x800
+
+#define SROM_V41_CODE 0x14
+
+#define SROM_CLK_WRITE(data, ioaddr) \
+ outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
+ udelay(5); \
+ outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
+ udelay(5); \
+ outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
+ udelay(5);
+
+#define __CHK_IO_SIZE(pci_id, dev_rev) \
+ (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
+ DM9102A_IO_SIZE: DM9102_IO_SIZE)
+
+#define CHK_IO_SIZE(pci_dev) \
+ (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
+ (pci_dev)->revision))
+
+/* Sten Check */
+#define DEVICE net_device
+
+/* Structure/enum declaration ------------------------------- */
+struct tx_desc {
+ __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
+ char *tx_buf_ptr; /* Data for us */
+ struct tx_desc *next_tx_desc;
+} __attribute__(( aligned(32) ));
+
+struct rx_desc {
+ __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
+ struct sk_buff *rx_skb_ptr; /* Data for us */
+ struct rx_desc *next_rx_desc;
+} __attribute__(( aligned(32) ));
+
+struct dmfe_board_info {
+ u32 chip_id; /* Chip vendor/Device ID */
+ u8 chip_revision; /* Chip revision */
+ struct DEVICE *next_dev; /* next device */
+ struct pci_dev *pdev; /* PCI device */
+ spinlock_t lock;
+
+ long ioaddr; /* I/O base address */
+ u32 cr0_data;
+ u32 cr5_data;
+ u32 cr6_data;
+ u32 cr7_data;
+ u32 cr15_data;
+
+ /* pointer for memory physical address */
+ dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */
+ dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */
+ dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */
+ dma_addr_t first_tx_desc_dma;
+ dma_addr_t first_rx_desc_dma;
+
+ /* descriptor pointer */
+ unsigned char *buf_pool_ptr; /* Tx buffer pool memory */
+ unsigned char *buf_pool_start; /* Tx buffer pool align dword */
+ unsigned char *desc_pool_ptr; /* descriptor pool memory */
+ struct tx_desc *first_tx_desc;
+ struct tx_desc *tx_insert_ptr;
+ struct tx_desc *tx_remove_ptr;
+ struct rx_desc *first_rx_desc;
+ struct rx_desc *rx_insert_ptr;
+ struct rx_desc *rx_ready_ptr; /* packet come pointer */
+ unsigned long tx_packet_cnt; /* transmitted packet count */
+ unsigned long tx_queue_cnt; /* wait to send packet count */
+ unsigned long rx_avail_cnt; /* available rx descriptor count */
+ unsigned long interval_rx_cnt; /* rx packet count a callback time */
+
+ u16 HPNA_command; /* For HPNA register 16 */
+ u16 HPNA_timer; /* For HPNA remote device check */
+ u16 dbug_cnt;
+ u16 NIC_capability; /* NIC media capability */
+ u16 PHY_reg4; /* Saved Phyxcer register 4 value */
+
+ u8 HPNA_present; /* 0:none, 1:DM9801, 2:DM9802 */
+ u8 chip_type; /* Keep DM9102A chip type */
+ u8 media_mode; /* user specify media mode */
+ u8 op_mode; /* real work media mode */
+ u8 phy_addr;
+ u8 wait_reset; /* Hardware failed, need to reset */
+ u8 dm910x_chk_mode; /* Operating mode check */
+ u8 first_in_callback; /* Flag to record state */
+ u8 wol_mode; /* user WOL settings */
+ struct timer_list timer;
+
+ /* Driver defined statistic counter */
+ unsigned long tx_fifo_underrun;
+ unsigned long tx_loss_carrier;
+ unsigned long tx_no_carrier;
+ unsigned long tx_late_collision;
+ unsigned long tx_excessive_collision;
+ unsigned long tx_jabber_timeout;
+ unsigned long reset_count;
+ unsigned long reset_cr8;
+ unsigned long reset_fatal;
+ unsigned long reset_TXtimeout;
+
+ /* NIC SROM data */
+ unsigned char srom[128];
+};
+
+enum dmfe_offsets {
+ DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
+ DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
+ DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
+ DCR15 = 0x78
+};
+
+enum dmfe_CR6_bits {
+ CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
+ CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
+ CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
+};
+
+/* Global variable declaration ----------------------------- */
+static int __devinitdata printed_version;
+static const char version[] __devinitconst =
+ "Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
+
+static int dmfe_debug;
+static unsigned char dmfe_media_mode = DMFE_AUTO;
+static u32 dmfe_cr6_user_set;
+
+/* For module input parameter */
+static int debug;
+static u32 cr6set;
+static unsigned char mode = 8;
+static u8 chkmode = 1;
+static u8 HPNA_mode; /* Default: Low Power/High Speed */
+static u8 HPNA_rx_cmd; /* Default: Disable Rx remote command */
+static u8 HPNA_tx_cmd; /* Default: Don't issue remote command */
+static u8 HPNA_NoiseFloor; /* Default: HPNA NoiseFloor */
+static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control
+ 4: TX pause packet */
+
+
+/* function declaration ------------------------------------- */
+static int dmfe_open(struct DEVICE *);
+static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
+static int dmfe_stop(struct DEVICE *);
+static void dmfe_set_filter_mode(struct DEVICE *);
+static const struct ethtool_ops netdev_ethtool_ops;
+static u16 read_srom_word(long ,int);
+static irqreturn_t dmfe_interrupt(int , void *);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void poll_dmfe (struct net_device *dev);
+#endif
+static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
+static void allocate_rx_buffer(struct dmfe_board_info *);
+static void update_cr6(u32, unsigned long);
+static void send_filter_frame(struct DEVICE *);
+static void dm9132_id_table(struct DEVICE *);
+static u16 phy_read(unsigned long, u8, u8, u32);
+static void phy_write(unsigned long, u8, u8, u16, u32);
+static void phy_write_1bit(unsigned long, u32);
+static u16 phy_read_1bit(unsigned long);
+static u8 dmfe_sense_speed(struct dmfe_board_info *);
+static void dmfe_process_mode(struct dmfe_board_info *);
+static void dmfe_timer(unsigned long);
+static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
+static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
+static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
+static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
+static void dmfe_dynamic_reset(struct DEVICE *);
+static void dmfe_free_rxbuffer(struct dmfe_board_info *);
+static void dmfe_init_dm910x(struct DEVICE *);
+static void dmfe_parse_srom(struct dmfe_board_info *);
+static void dmfe_program_DM9801(struct dmfe_board_info *, int);
+static void dmfe_program_DM9802(struct dmfe_board_info *);
+static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
+static void dmfe_set_phyxcer(struct dmfe_board_info *);
+
+/* DM910X network board routine ---------------------------- */
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = dmfe_open,
+ .ndo_stop = dmfe_stop,
+ .ndo_start_xmit = dmfe_start_xmit,
+ .ndo_set_rx_mode = dmfe_set_filter_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = poll_dmfe,
+#endif
+};
+
+/*
+ * Search DM910X board ,allocate space and register it
+ */
+
+static int __devinit dmfe_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct dmfe_board_info *db; /* board information structure */
+ struct net_device *dev;
+ u32 pci_pmr;
+ int i, err;
+
+ DMFE_DBUG(0, "dmfe_init_one()", 0);
+
+ if (!printed_version++)
+ pr_info("%s\n", version);
+
+ /*
+ * SPARC on-board DM910x chips should be handled by the main
+ * tulip driver, except for early DM9100s.
+ */
+#ifdef CONFIG_TULIP_DM910X
+ if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
+ ent->driver_data == PCI_DM9102_ID) {
+ struct device_node *dp = pci_device_to_OF_node(pdev);
+
+ if (dp && of_get_property(dp, "local-mac-address", NULL)) {
+ pr_info("skipping on-board DM910x (use tulip)\n");
+ return -ENODEV;
+ }
+ }
+#endif
+
+ /* Init network device */
+ dev = alloc_etherdev(sizeof(*db));
+ if (dev == NULL)
+ return -ENOMEM;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ pr_warn("32-bit PCI DMA not available\n");
+ err = -ENODEV;
+ goto err_out_free;
+ }
+
+ /* Enable Master/IO access, Disable memory access */
+ err = pci_enable_device(pdev);
+ if (err)
+ goto err_out_free;
+
+ if (!pci_resource_start(pdev, 0)) {
+ pr_err("I/O base is zero\n");
+ err = -ENODEV;
+ goto err_out_disable;
+ }
+
+ if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
+ pr_err("Allocated I/O size too small\n");
+ err = -ENODEV;
+ goto err_out_disable;
+ }
+
+#if 0 /* pci_{enable_device,set_master} sets minimum latency for us now */
+
+ /* Set Latency Timer 80h */
+ /* FIXME: setting values > 32 breaks some SiS 559x stuff.
+ Need a PCI quirk.. */
+
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
+#endif
+
+ if (pci_request_regions(pdev, DRV_NAME)) {
+ pr_err("Failed to request PCI regions\n");
+ err = -ENODEV;
+ goto err_out_disable;
+ }
+
+ /* Init system & device */
+ db = netdev_priv(dev);
+
+ /* Allocate Tx/Rx descriptor memory */
+ db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
+ DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
+ if (!db->desc_pool_ptr)
+ goto err_out_res;
+
+ db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
+ TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
+ if (!db->buf_pool_ptr)
+ goto err_out_free_desc;
+
+ db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
+ db->first_tx_desc_dma = db->desc_pool_dma_ptr;
+ db->buf_pool_start = db->buf_pool_ptr;
+ db->buf_pool_dma_start = db->buf_pool_dma_ptr;
+
+ db->chip_id = ent->driver_data;
+ db->ioaddr = pci_resource_start(pdev, 0);
+ db->chip_revision = pdev->revision;
+ db->wol_mode = 0;
+
+ db->pdev = pdev;
+
+ dev->base_addr = db->ioaddr;
+ dev->irq = pdev->irq;
+ pci_set_drvdata(pdev, dev);
+ dev->netdev_ops = &netdev_ops;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+ netif_carrier_off(dev);
+ spin_lock_init(&db->lock);
+
+ pci_read_config_dword(pdev, 0x50, &pci_pmr);
+ pci_pmr &= 0x70000;
+ if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) )
+ db->chip_type = 1; /* DM9102A E3 */
+ else
+ db->chip_type = 0;
+
+ /* read 64 word srom data */
+ for (i = 0; i < 64; i++)
+ ((__le16 *) db->srom)[i] =
+ cpu_to_le16(read_srom_word(db->ioaddr, i));
+
+ /* Set Node address */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = db->srom[20 + i];
+
+ err = register_netdev (dev);
+ if (err)
+ goto err_out_free_buf;
+
+ dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
+ ent->driver_data >> 16,
+ pci_name(pdev), dev->dev_addr, dev->irq);
+
+ pci_set_master(pdev);
+
+ return 0;
+
+err_out_free_buf:
+ pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ db->buf_pool_ptr, db->buf_pool_dma_ptr);
+err_out_free_desc:
+ pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+ db->desc_pool_ptr, db->desc_pool_dma_ptr);
+err_out_res:
+ pci_release_regions(pdev);
+err_out_disable:
+ pci_disable_device(pdev);
+err_out_free:
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(dev);
+
+ return err;
+}
+
+
+static void __devexit dmfe_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct dmfe_board_info *db = netdev_priv(dev);
+
+ DMFE_DBUG(0, "dmfe_remove_one()", 0);
+
+ if (dev) {
+
+ unregister_netdev(dev);
+
+ pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
+ DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
+ db->desc_pool_dma_ptr);
+ pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ db->buf_pool_ptr, db->buf_pool_dma_ptr);
+ pci_release_regions(pdev);
+ free_netdev(dev); /* free board information */
+
+ pci_set_drvdata(pdev, NULL);
+ }
+
+ DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
+}
+
+
+/*
+ * Open the interface.
+ * The interface is opened whenever "ifconfig" actives it.
+ */
+
+static int dmfe_open(struct DEVICE *dev)
+{
+ int ret;
+ struct dmfe_board_info *db = netdev_priv(dev);
+
+ DMFE_DBUG(0, "dmfe_open", 0);
+
+ ret = request_irq(dev->irq, dmfe_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (ret)
+ return ret;
+
+ /* system variable init */
+ db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
+ db->tx_packet_cnt = 0;
+ db->tx_queue_cnt = 0;
+ db->rx_avail_cnt = 0;
+ db->wait_reset = 0;
+
+ db->first_in_callback = 0;
+ db->NIC_capability = 0xf; /* All capability*/
+ db->PHY_reg4 = 0x1e0;
+
+ /* CR6 operation mode decision */
+ if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
+ (db->chip_revision >= 0x30) ) {
+ db->cr6_data |= DMFE_TXTH_256;
+ db->cr0_data = CR0_DEFAULT;
+ db->dm910x_chk_mode=4; /* Enter the normal mode */
+ } else {
+ db->cr6_data |= CR6_SFT; /* Store & Forward mode */
+ db->cr0_data = 0;
+ db->dm910x_chk_mode = 1; /* Enter the check mode */
+ }
+
+ /* Initialize DM910X board */
+ dmfe_init_dm910x(dev);
+
+ /* Active System Interface */
+ netif_wake_queue(dev);
+
+ /* set and active a timer process */
+ init_timer(&db->timer);
+ db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
+ db->timer.data = (unsigned long)dev;
+ db->timer.function = dmfe_timer;
+ add_timer(&db->timer);
+
+ return 0;
+}
+
+
+/* Initialize DM910X board
+ * Reset DM910X board
+ * Initialize TX/Rx descriptor chain structure
+ * Send the set-up frame
+ * Enable Tx/Rx machine
+ */
+
+static void dmfe_init_dm910x(struct DEVICE *dev)
+{
+ struct dmfe_board_info *db = netdev_priv(dev);
+ unsigned long ioaddr = db->ioaddr;
+
+ DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
+
+ /* Reset DM910x MAC controller */
+ outl(DM910X_RESET, ioaddr + DCR0); /* RESET MAC */
+ udelay(100);
+ outl(db->cr0_data, ioaddr + DCR0);
+ udelay(5);
+
+ /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
+ db->phy_addr = 1;
+
+ /* Parser SROM and media mode */
+ dmfe_parse_srom(db);
+ db->media_mode = dmfe_media_mode;
+
+ /* RESET Phyxcer Chip by GPR port bit 7 */
+ outl(0x180, ioaddr + DCR12); /* Let bit 7 output port */
+ if (db->chip_id == PCI_DM9009_ID) {
+ outl(0x80, ioaddr + DCR12); /* Issue RESET signal */
+ mdelay(300); /* Delay 300 ms */
+ }
+ outl(0x0, ioaddr + DCR12); /* Clear RESET signal */
+
+ /* Process Phyxcer Media Mode */
+ if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
+ dmfe_set_phyxcer(db);
+
+ /* Media Mode Process */
+ if ( !(db->media_mode & DMFE_AUTO) )
+ db->op_mode = db->media_mode; /* Force Mode */
+
+ /* Initialize Transmit/Receive decriptor and CR3/4 */
+ dmfe_descriptor_init(db, ioaddr);
+
+ /* Init CR6 to program DM910x operation */
+ update_cr6(db->cr6_data, ioaddr);
+
+ /* Send setup frame */
+ if (db->chip_id == PCI_DM9132_ID)
+ dm9132_id_table(dev); /* DM9132 */
+ else
+ send_filter_frame(dev); /* DM9102/DM9102A */
+
+ /* Init CR7, interrupt active bit */
+ db->cr7_data = CR7_DEFAULT;
+ outl(db->cr7_data, ioaddr + DCR7);
+
+ /* Init CR15, Tx jabber and Rx watchdog timer */
+ outl(db->cr15_data, ioaddr + DCR15);
+
+ /* Enable DM910X Tx/Rx function */
+ db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
+ update_cr6(db->cr6_data, ioaddr);
+}
+
+
+/*
+ * Hardware start transmission.
+ * Send a packet to media from the upper layer.
+ */
+
+static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
+ struct DEVICE *dev)
+{
+ struct dmfe_board_info *db = netdev_priv(dev);
+ struct tx_desc *txptr;
+ unsigned long flags;
+
+ DMFE_DBUG(0, "dmfe_start_xmit", 0);
+
+ /* Too large packet check */
+ if (skb->len > MAX_PACKET_SIZE) {
+ pr_err("big packet = %d\n", (u16)skb->len);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* Resource flag check */
+ netif_stop_queue(dev);
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ /* No Tx resource check, it never happen nromally */
+ if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
+ spin_unlock_irqrestore(&db->lock, flags);
+ pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Disable NIC interrupt */
+ outl(0, dev->base_addr + DCR7);
+
+ /* transmit this packet */
+ txptr = db->tx_insert_ptr;
+ skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
+ txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
+
+ /* Point to next transmit free descriptor */
+ db->tx_insert_ptr = txptr->next_tx_desc;
+
+ /* Transmit Packet Process */
+ if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
+ txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
+ db->tx_packet_cnt++; /* Ready to send */
+ outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
+ dev->trans_start = jiffies; /* saved time stamp */
+ } else {
+ db->tx_queue_cnt++; /* queue TX packet */
+ outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
+ }
+
+ /* Tx resource check */
+ if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
+ netif_wake_queue(dev);
+
+ /* Restore CR7 to enable interrupt */
+ spin_unlock_irqrestore(&db->lock, flags);
+ outl(db->cr7_data, dev->base_addr + DCR7);
+
+ /* free this SKB */
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+
+/*
+ * Stop the interface.
+ * The interface is stopped when it is brought.
+ */
+
+static int dmfe_stop(struct DEVICE *dev)
+{
+ struct dmfe_board_info *db = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+
+ DMFE_DBUG(0, "dmfe_stop", 0);
+
+ /* disable system */
+ netif_stop_queue(dev);
+
+ /* deleted timer */
+ del_timer_sync(&db->timer);
+
+ /* Reset & stop DM910X board */
+ outl(DM910X_RESET, ioaddr + DCR0);
+ udelay(5);
+ phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
+
+ /* free interrupt */
+ free_irq(dev->irq, dev);
+
+ /* free allocated rx buffer */
+ dmfe_free_rxbuffer(db);
+
+#if 0
+ /* show statistic counter */
+ printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
+ db->tx_fifo_underrun, db->tx_excessive_collision,
+ db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
+ db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
+ db->reset_fatal, db->reset_TXtimeout);
+#endif
+
+ return 0;
+}
+
+
+/*
+ * DM9102 insterrupt handler
+ * receive the packet to upper layer, free the transmitted packet
+ */
+
+static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
+{
+ struct DEVICE *dev = dev_id;
+ struct dmfe_board_info *db = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ DMFE_DBUG(0, "dmfe_interrupt()", 0);
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ /* Got DM910X status */
+ db->cr5_data = inl(ioaddr + DCR5);
+ outl(db->cr5_data, ioaddr + DCR5);
+ if ( !(db->cr5_data & 0xc1) ) {
+ spin_unlock_irqrestore(&db->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ /* Disable all interrupt in CR7 to solve the interrupt edge problem */
+ outl(0, ioaddr + DCR7);
+
+ /* Check system status */
+ if (db->cr5_data & 0x2000) {
+ /* system bus error happen */
+ DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
+ db->reset_fatal++;
+ db->wait_reset = 1; /* Need to RESET */
+ spin_unlock_irqrestore(&db->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ /* Received the coming packet */
+ if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
+ dmfe_rx_packet(dev, db);
+
+ /* reallocate rx descriptor buffer */
+ if (db->rx_avail_cnt<RX_DESC_CNT)
+ allocate_rx_buffer(db);
+
+ /* Free the transmitted descriptor */
+ if ( db->cr5_data & 0x01)
+ dmfe_free_tx_pkt(dev, db);
+
+ /* Mode Check */
+ if (db->dm910x_chk_mode & 0x2) {
+ db->dm910x_chk_mode = 0x4;
+ db->cr6_data |= 0x100;
+ update_cr6(db->cr6_data, db->ioaddr);
+ }
+
+ /* Restore CR7 to enable interrupt mask */
+ outl(db->cr7_data, ioaddr + DCR7);
+
+ spin_unlock_irqrestore(&db->lock, flags);
+ return IRQ_HANDLED;
+}
+
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+
+static void poll_dmfe (struct net_device *dev)
+{
+ /* disable_irq here is not very nice, but with the lockless
+ interrupt handler we have no other choice. */
+ disable_irq(dev->irq);
+ dmfe_interrupt (dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+/*
+ * Free TX resource after TX complete
+ */
+
+static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
+{
+ struct tx_desc *txptr;
+ unsigned long ioaddr = dev->base_addr;
+ u32 tdes0;
+
+ txptr = db->tx_remove_ptr;
+ while(db->tx_packet_cnt) {
+ tdes0 = le32_to_cpu(txptr->tdes0);
+ if (tdes0 & 0x80000000)
+ break;
+
+ /* A packet sent completed */
+ db->tx_packet_cnt--;
+ dev->stats.tx_packets++;
+
+ /* Transmit statistic counter */
+ if ( tdes0 != 0x7fffffff ) {
+ dev->stats.collisions += (tdes0 >> 3) & 0xf;
+ dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
+ if (tdes0 & TDES0_ERR_MASK) {
+ dev->stats.tx_errors++;
+
+ if (tdes0 & 0x0002) { /* UnderRun */
+ db->tx_fifo_underrun++;
+ if ( !(db->cr6_data & CR6_SFT) ) {
+ db->cr6_data = db->cr6_data | CR6_SFT;
+ update_cr6(db->cr6_data, db->ioaddr);
+ }
+ }
+ if (tdes0 & 0x0100)
+ db->tx_excessive_collision++;
+ if (tdes0 & 0x0200)
+ db->tx_late_collision++;
+ if (tdes0 & 0x0400)
+ db->tx_no_carrier++;
+ if (tdes0 & 0x0800)
+ db->tx_loss_carrier++;
+ if (tdes0 & 0x4000)
+ db->tx_jabber_timeout++;
+ }
+ }
+
+ txptr = txptr->next_tx_desc;
+ }/* End of while */
+
+ /* Update TX remove pointer to next */
+ db->tx_remove_ptr = txptr;
+
+ /* Send the Tx packet in queue */
+ if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
+ txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
+ db->tx_packet_cnt++; /* Ready to send */
+ db->tx_queue_cnt--;
+ outl(0x1, ioaddr + DCR1); /* Issue Tx polling */
+ dev->trans_start = jiffies; /* saved time stamp */
+ }
+
+ /* Resource available check */
+ if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
+ netif_wake_queue(dev); /* Active upper layer, send again */
+}
+
+
+/*
+ * Calculate the CRC valude of the Rx packet
+ * flag = 1 : return the reverse CRC (for the received packet CRC)
+ * 0 : return the normal CRC (for Hash Table index)
+ */
+
+static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
+{
+ u32 crc = crc32(~0, Data, Len);
+ if (flag) crc = ~crc;
+ return crc;
+}
+
+
+/*
+ * Receive the come packet and pass to upper layer
+ */
+
+static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
+{
+ struct rx_desc *rxptr;
+ struct sk_buff *skb, *newskb;
+ int rxlen;
+ u32 rdes0;
+
+ rxptr = db->rx_ready_ptr;
+
+ while(db->rx_avail_cnt) {
+ rdes0 = le32_to_cpu(rxptr->rdes0);
+ if (rdes0 & 0x80000000) /* packet owner check */
+ break;
+
+ db->rx_avail_cnt--;
+ db->interval_rx_cnt++;
+
+ pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
+ RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+
+ if ( (rdes0 & 0x300) != 0x300) {
+ /* A packet without First/Last flag */
+ /* reuse this SKB */
+ DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
+ dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
+ } else {
+ /* A packet with First/Last flag */
+ rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
+
+ /* error summary bit check */
+ if (rdes0 & 0x8000) {
+ /* This is a error packet */
+ dev->stats.rx_errors++;
+ if (rdes0 & 1)
+ dev->stats.rx_fifo_errors++;
+ if (rdes0 & 2)
+ dev->stats.rx_crc_errors++;
+ if (rdes0 & 0x80)
+ dev->stats.rx_length_errors++;
+ }
+
+ if ( !(rdes0 & 0x8000) ||
+ ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
+ skb = rxptr->rx_skb_ptr;
+
+ /* Received Packet CRC check need or not */
+ if ( (db->dm910x_chk_mode & 1) &&
+ (cal_CRC(skb->data, rxlen, 1) !=
+ (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
+ /* Found a error received packet */
+ dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
+ db->dm910x_chk_mode = 3;
+ } else {
+ /* Good packet, send to upper layer */
+ /* Shorst packet used new SKB */
+ if ((rxlen < RX_COPY_SIZE) &&
+ ((newskb = dev_alloc_skb(rxlen + 2))
+ != NULL)) {
+
+ skb = newskb;
+ /* size less than COPY_SIZE, allocate a rxlen SKB */
+ skb_reserve(skb, 2); /* 16byte align */
+ skb_copy_from_linear_data(rxptr->rx_skb_ptr,
+ skb_put(skb, rxlen),
+ rxlen);
+ dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
+ } else
+ skb_put(skb, rxlen);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += rxlen;
+ }
+ } else {
+ /* Reuse SKB buffer when the packet is error */
+ DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
+ dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
+ }
+ }
+
+ rxptr = rxptr->next_rx_desc;
+ }
+
+ db->rx_ready_ptr = rxptr;
+}
+
+/*
+ * Set DM910X multicast address
+ */
+
+static void dmfe_set_filter_mode(struct DEVICE * dev)
+{
+ struct dmfe_board_info *db = netdev_priv(dev);
+ unsigned long flags;
+ int mc_count = netdev_mc_count(dev);
+
+ DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
+ spin_lock_irqsave(&db->lock, flags);
+
+ if (dev->flags & IFF_PROMISC) {
+ DMFE_DBUG(0, "Enable PROM Mode", 0);
+ db->cr6_data |= CR6_PM | CR6_PBF;
+ update_cr6(db->cr6_data, db->ioaddr);
+ spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
+
+ if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
+ DMFE_DBUG(0, "Pass all multicast address", mc_count);
+ db->cr6_data &= ~(CR6_PM | CR6_PBF);
+ db->cr6_data |= CR6_PAM;
+ spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
+
+ DMFE_DBUG(0, "Set multicast address", mc_count);
+ if (db->chip_id == PCI_DM9132_ID)
+ dm9132_id_table(dev); /* DM9132 */
+ else
+ send_filter_frame(dev); /* DM9102/DM9102A */
+ spin_unlock_irqrestore(&db->lock, flags);
+}
+
+/*
+ * Ethtool interace
+ */
+
+static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct dmfe_board_info *np = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ if (np->pdev)
+ strcpy(info->bus_info, pci_name(np->pdev));
+ else
+ sprintf(info->bus_info, "EISA 0x%lx %d",
+ dev->base_addr, dev->irq);
+}
+
+static int dmfe_ethtool_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct dmfe_board_info *db = netdev_priv(dev);
+
+ if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
+ WAKE_ARP | WAKE_MAGICSECURE))
+ return -EOPNOTSUPP;
+
+ db->wol_mode = wolinfo->wolopts;
+ return 0;
+}
+
+static void dmfe_ethtool_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct dmfe_board_info *db = netdev_priv(dev);
+
+ wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
+ wolinfo->wolopts = db->wol_mode;
+}
+
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = dmfe_ethtool_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .set_wol = dmfe_ethtool_set_wol,
+ .get_wol = dmfe_ethtool_get_wol,
+};
+
+/*
+ * A periodic timer routine
+ * Dynamic media sense, allocate Rx buffer...
+ */
+
+static void dmfe_timer(unsigned long data)
+{
+ u32 tmp_cr8;
+ unsigned char tmp_cr12;
+ struct DEVICE *dev = (struct DEVICE *) data;
+ struct dmfe_board_info *db = netdev_priv(dev);
+ unsigned long flags;
+
+ int link_ok, link_ok_phy;
+
+ DMFE_DBUG(0, "dmfe_timer()", 0);
+ spin_lock_irqsave(&db->lock, flags);
+
+ /* Media mode process when Link OK before enter this route */
+ if (db->first_in_callback == 0) {
+ db->first_in_callback = 1;
+ if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
+ db->cr6_data &= ~0x40000;
+ update_cr6(db->cr6_data, db->ioaddr);
+ phy_write(db->ioaddr,
+ db->phy_addr, 0, 0x1000, db->chip_id);
+ db->cr6_data |= 0x40000;
+ update_cr6(db->cr6_data, db->ioaddr);
+ db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
+ add_timer(&db->timer);
+ spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
+ }
+
+
+ /* Operating Mode Check */
+ if ( (db->dm910x_chk_mode & 0x1) &&
+ (dev->stats.rx_packets > MAX_CHECK_PACKET) )
+ db->dm910x_chk_mode = 0x4;
+
+ /* Dynamic reset DM910X : system error or transmit time-out */
+ tmp_cr8 = inl(db->ioaddr + DCR8);
+ if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
+ db->reset_cr8++;
+ db->wait_reset = 1;
+ }
+ db->interval_rx_cnt = 0;
+
+ /* TX polling kick monitor */
+ if ( db->tx_packet_cnt &&
+ time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
+ outl(0x1, dev->base_addr + DCR1); /* Tx polling again */
+
+ /* TX Timeout */
+ if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
+ db->reset_TXtimeout++;
+ db->wait_reset = 1;
+ dev_warn(&dev->dev, "Tx timeout - resetting\n");
+ }
+ }
+
+ if (db->wait_reset) {
+ DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
+ db->reset_count++;
+ dmfe_dynamic_reset(dev);
+ db->first_in_callback = 0;
+ db->timer.expires = DMFE_TIMER_WUT;
+ add_timer(&db->timer);
+ spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
+
+ /* Link status check, Dynamic media type change */
+ if (db->chip_id == PCI_DM9132_ID)
+ tmp_cr12 = inb(db->ioaddr + DCR9 + 3); /* DM9132 */
+ else
+ tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */
+
+ if ( ((db->chip_id == PCI_DM9102_ID) &&
+ (db->chip_revision == 0x30)) ||
+ ((db->chip_id == PCI_DM9132_ID) &&
+ (db->chip_revision == 0x10)) ) {
+ /* DM9102A Chip */
+ if (tmp_cr12 & 2)
+ link_ok = 0;
+ else
+ link_ok = 1;
+ }
+ else
+ /*0x43 is used instead of 0x3 because bit 6 should represent
+ link status of external PHY */
+ link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
+
+
+ /* If chip reports that link is failed it could be because external
+ PHY link status pin is not connected correctly to chip
+ To be sure ask PHY too.
+ */
+
+ /* need a dummy read because of PHY's register latch*/
+ phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
+ link_ok_phy = (phy_read (db->ioaddr,
+ db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
+
+ if (link_ok_phy != link_ok) {
+ DMFE_DBUG (0, "PHY and chip report different link status", 0);
+ link_ok = link_ok | link_ok_phy;
+ }
+
+ if ( !link_ok && netif_carrier_ok(dev)) {
+ /* Link Failed */
+ DMFE_DBUG(0, "Link Failed", tmp_cr12);
+ netif_carrier_off(dev);
+
+ /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
+ /* AUTO or force 1M Homerun/Longrun don't need */
+ if ( !(db->media_mode & 0x38) )
+ phy_write(db->ioaddr, db->phy_addr,
+ 0, 0x1000, db->chip_id);
+
+ /* AUTO mode, if INT phyxcer link failed, select EXT device */
+ if (db->media_mode & DMFE_AUTO) {
+ /* 10/100M link failed, used 1M Home-Net */
+ db->cr6_data|=0x00040000; /* bit18=1, MII */
+ db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
+ update_cr6(db->cr6_data, db->ioaddr);
+ }
+ } else if (!netif_carrier_ok(dev)) {
+
+ DMFE_DBUG(0, "Link link OK", tmp_cr12);
+
+ /* Auto Sense Speed */
+ if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
+ netif_carrier_on(dev);
+ SHOW_MEDIA_TYPE(db->op_mode);
+ }
+
+ dmfe_process_mode(db);
+ }
+
+ /* HPNA remote command check */
+ if (db->HPNA_command & 0xf00) {
+ db->HPNA_timer--;
+ if (!db->HPNA_timer)
+ dmfe_HPNA_remote_cmd_chk(db);
+ }
+
+ /* Timer active again */
+ db->timer.expires = DMFE_TIMER_WUT;
+ add_timer(&db->timer);
+ spin_unlock_irqrestore(&db->lock, flags);
+}
+
+
+/*
+ * Dynamic reset the DM910X board
+ * Stop DM910X board
+ * Free Tx/Rx allocated memory
+ * Reset DM910X board
+ * Re-initialize DM910X board
+ */
+
+static void dmfe_dynamic_reset(struct DEVICE *dev)
+{
+ struct dmfe_board_info *db = netdev_priv(dev);
+
+ DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
+
+ /* Sopt MAC controller */
+ db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
+ update_cr6(db->cr6_data, dev->base_addr);
+ outl(0, dev->base_addr + DCR7); /* Disable Interrupt */
+ outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
+
+ /* Disable upper layer interface */
+ netif_stop_queue(dev);
+
+ /* Free Rx Allocate buffer */
+ dmfe_free_rxbuffer(db);
+
+ /* system variable init */
+ db->tx_packet_cnt = 0;
+ db->tx_queue_cnt = 0;
+ db->rx_avail_cnt = 0;
+ netif_carrier_off(dev);
+ db->wait_reset = 0;
+
+ /* Re-initialize DM910X board */
+ dmfe_init_dm910x(dev);
+
+ /* Restart upper layer interface */
+ netif_wake_queue(dev);
+}
+
+
+/*
+ * free all allocated rx buffer
+ */
+
+static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
+{
+ DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
+
+ /* free allocated rx buffer */
+ while (db->rx_avail_cnt) {
+ dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
+ db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
+ db->rx_avail_cnt--;
+ }
+}
+
+
+/*
+ * Reuse the SK buffer
+ */
+
+static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
+{
+ struct rx_desc *rxptr = db->rx_insert_ptr;
+
+ if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
+ rxptr->rx_skb_ptr = skb;
+ rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
+ skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+ wmb();
+ rxptr->rdes0 = cpu_to_le32(0x80000000);
+ db->rx_avail_cnt++;
+ db->rx_insert_ptr = rxptr->next_rx_desc;
+ } else
+ DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
+}
+
+
+/*
+ * Initialize transmit/Receive descriptor
+ * Using Chain structure, and allocate Tx/Rx buffer
+ */
+
+static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioaddr)
+{
+ struct tx_desc *tmp_tx;
+ struct rx_desc *tmp_rx;
+ unsigned char *tmp_buf;
+ dma_addr_t tmp_tx_dma, tmp_rx_dma;
+ dma_addr_t tmp_buf_dma;
+ int i;
+
+ DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
+
+ /* tx descriptor start pointer */
+ db->tx_insert_ptr = db->first_tx_desc;
+ db->tx_remove_ptr = db->first_tx_desc;
+ outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */
+
+ /* rx descriptor start pointer */
+ db->first_rx_desc = (void *)db->first_tx_desc +
+ sizeof(struct tx_desc) * TX_DESC_CNT;
+
+ db->first_rx_desc_dma = db->first_tx_desc_dma +
+ sizeof(struct tx_desc) * TX_DESC_CNT;
+ db->rx_insert_ptr = db->first_rx_desc;
+ db->rx_ready_ptr = db->first_rx_desc;
+ outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */
+
+ /* Init Transmit chain */
+ tmp_buf = db->buf_pool_start;
+ tmp_buf_dma = db->buf_pool_dma_start;
+ tmp_tx_dma = db->first_tx_desc_dma;
+ for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
+ tmp_tx->tx_buf_ptr = tmp_buf;
+ tmp_tx->tdes0 = cpu_to_le32(0);
+ tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */
+ tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
+ tmp_tx_dma += sizeof(struct tx_desc);
+ tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
+ tmp_tx->next_tx_desc = tmp_tx + 1;
+ tmp_buf = tmp_buf + TX_BUF_ALLOC;
+ tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
+ }
+ (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
+ tmp_tx->next_tx_desc = db->first_tx_desc;
+
+ /* Init Receive descriptor chain */
+ tmp_rx_dma=db->first_rx_desc_dma;
+ for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
+ tmp_rx->rdes0 = cpu_to_le32(0);
+ tmp_rx->rdes1 = cpu_to_le32(0x01000600);
+ tmp_rx_dma += sizeof(struct rx_desc);
+ tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
+ tmp_rx->next_rx_desc = tmp_rx + 1;
+ }
+ (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
+ tmp_rx->next_rx_desc = db->first_rx_desc;
+
+ /* pre-allocate Rx buffer */
+ allocate_rx_buffer(db);
+}
+
+
+/*
+ * Update CR6 value
+ * Firstly stop DM910X , then written value and start
+ */
+
+static void update_cr6(u32 cr6_data, unsigned long ioaddr)
+{
+ u32 cr6_tmp;
+
+ cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */
+ outl(cr6_tmp, ioaddr + DCR6);
+ udelay(5);
+ outl(cr6_data, ioaddr + DCR6);
+ udelay(5);
+}
+
+
+/*
+ * Send a setup frame for DM9132
+ * This setup frame initialize DM910X address filter mode
+*/
+
+static void dm9132_id_table(struct DEVICE *dev)
+{
+ struct netdev_hw_addr *ha;
+ u16 * addrptr;
+ unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */
+ u32 hash_val;
+ u16 i, hash_table[4];
+
+ DMFE_DBUG(0, "dm9132_id_table()", 0);
+
+ /* Node address */
+ addrptr = (u16 *) dev->dev_addr;
+ outw(addrptr[0], ioaddr);
+ ioaddr += 4;
+ outw(addrptr[1], ioaddr);
+ ioaddr += 4;
+ outw(addrptr[2], ioaddr);
+ ioaddr += 4;
+
+ /* Clear Hash Table */
+ memset(hash_table, 0, sizeof(hash_table));
+
+ /* broadcast address */
+ hash_table[3] = 0x8000;
+
+ /* the multicast address in Hash Table : 64 bits */
+ netdev_for_each_mc_addr(ha, dev) {
+ hash_val = cal_CRC((char *) ha->addr, 6, 0) & 0x3f;
+ hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
+ }
+
+ /* Write the hash table to MAC MD table */
+ for (i = 0; i < 4; i++, ioaddr += 4)
+ outw(hash_table[i], ioaddr);
+}
+
+
+/*
+ * Send a setup frame for DM9102/DM9102A
+ * This setup frame initialize DM910X address filter mode
+ */
+
+static void send_filter_frame(struct DEVICE *dev)
+{
+ struct dmfe_board_info *db = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ struct tx_desc *txptr;
+ u16 * addrptr;
+ u32 * suptr;
+ int i;
+
+ DMFE_DBUG(0, "send_filter_frame()", 0);
+
+ txptr = db->tx_insert_ptr;
+ suptr = (u32 *) txptr->tx_buf_ptr;
+
+ /* Node address */
+ addrptr = (u16 *) dev->dev_addr;
+ *suptr++ = addrptr[0];
+ *suptr++ = addrptr[1];
+ *suptr++ = addrptr[2];
+
+ /* broadcast address */
+ *suptr++ = 0xffff;
+ *suptr++ = 0xffff;
+ *suptr++ = 0xffff;
+
+ /* fit the multicast address */
+ netdev_for_each_mc_addr(ha, dev) {
+ addrptr = (u16 *) ha->addr;
+ *suptr++ = addrptr[0];
+ *suptr++ = addrptr[1];
+ *suptr++ = addrptr[2];
+ }
+
+ for (i = netdev_mc_count(dev); i < 14; i++) {
+ *suptr++ = 0xffff;
+ *suptr++ = 0xffff;
+ *suptr++ = 0xffff;
+ }
+
+ /* prepare the setup frame */
+ db->tx_insert_ptr = txptr->next_tx_desc;
+ txptr->tdes1 = cpu_to_le32(0x890000c0);
+
+ /* Resource Check and Send the setup packet */
+ if (!db->tx_packet_cnt) {
+ /* Resource Empty */
+ db->tx_packet_cnt++;
+ txptr->tdes0 = cpu_to_le32(0x80000000);
+ update_cr6(db->cr6_data | 0x2000, dev->base_addr);
+ outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
+ update_cr6(db->cr6_data, dev->base_addr);
+ dev->trans_start = jiffies;
+ } else
+ db->tx_queue_cnt++; /* Put in TX queue */
+}
+
+
+/*
+ * Allocate rx buffer,
+ * As possible as allocate maxiumn Rx buffer
+ */
+
+static void allocate_rx_buffer(struct dmfe_board_info *db)
+{
+ struct rx_desc *rxptr;
+ struct sk_buff *skb;
+
+ rxptr = db->rx_insert_ptr;
+
+ while(db->rx_avail_cnt < RX_DESC_CNT) {
+ if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
+ break;
+ rxptr->rx_skb_ptr = skb; /* FIXME (?) */
+ rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
+ RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+ wmb();
+ rxptr->rdes0 = cpu_to_le32(0x80000000);
+ rxptr = rxptr->next_rx_desc;
+ db->rx_avail_cnt++;
+ }
+
+ db->rx_insert_ptr = rxptr;
+}
+
+
+/*
+ * Read one word data from the serial ROM
+ */
+
+static u16 read_srom_word(long ioaddr, int offset)
+{
+ int i;
+ u16 srom_data = 0;
+ long cr9_ioaddr = ioaddr + DCR9;
+
+ outl(CR9_SROM_READ, cr9_ioaddr);
+ outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+
+ /* Send the Read Command 110b */
+ SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
+ SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
+ SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
+
+ /* Send the offset */
+ for (i = 5; i >= 0; i--) {
+ srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
+ SROM_CLK_WRITE(srom_data, cr9_ioaddr);
+ }
+
+ outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+
+ for (i = 16; i > 0; i--) {
+ outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
+ udelay(5);
+ srom_data = (srom_data << 1) |
+ ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
+ outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+ udelay(5);
+ }
+
+ outl(CR9_SROM_READ, cr9_ioaddr);
+ return srom_data;
+}
+
+
+/*
+ * Auto sense the media mode
+ */
+
+static u8 dmfe_sense_speed(struct dmfe_board_info * db)
+{
+ u8 ErrFlag = 0;
+ u16 phy_mode;
+
+ /* CR6 bit18=0, select 10/100M */
+ update_cr6( (db->cr6_data & ~0x40000), db->ioaddr);
+
+ phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
+ phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
+
+ if ( (phy_mode & 0x24) == 0x24 ) {
+ if (db->chip_id == PCI_DM9132_ID) /* DM9132 */
+ phy_mode = phy_read(db->ioaddr,
+ db->phy_addr, 7, db->chip_id) & 0xf000;
+ else /* DM9102/DM9102A */
+ phy_mode = phy_read(db->ioaddr,
+ db->phy_addr, 17, db->chip_id) & 0xf000;
+ switch (phy_mode) {
+ case 0x1000: db->op_mode = DMFE_10MHF; break;
+ case 0x2000: db->op_mode = DMFE_10MFD; break;
+ case 0x4000: db->op_mode = DMFE_100MHF; break;
+ case 0x8000: db->op_mode = DMFE_100MFD; break;
+ default: db->op_mode = DMFE_10MHF;
+ ErrFlag = 1;
+ break;
+ }
+ } else {
+ db->op_mode = DMFE_10MHF;
+ DMFE_DBUG(0, "Link Failed :", phy_mode);
+ ErrFlag = 1;
+ }
+
+ return ErrFlag;
+}
+
+
+/*
+ * Set 10/100 phyxcer capability
+ * AUTO mode : phyxcer register4 is NIC capability
+ * Force mode: phyxcer register4 is the force media
+ */
+
+static void dmfe_set_phyxcer(struct dmfe_board_info *db)
+{
+ u16 phy_reg;
+
+ /* Select 10/100M phyxcer */
+ db->cr6_data &= ~0x40000;
+ update_cr6(db->cr6_data, db->ioaddr);
+
+ /* DM9009 Chip: Phyxcer reg18 bit12=0 */
+ if (db->chip_id == PCI_DM9009_ID) {
+ phy_reg = phy_read(db->ioaddr,
+ db->phy_addr, 18, db->chip_id) & ~0x1000;
+
+ phy_write(db->ioaddr,
+ db->phy_addr, 18, phy_reg, db->chip_id);
+ }
+
+ /* Phyxcer capability setting */
+ phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
+
+ if (db->media_mode & DMFE_AUTO) {
+ /* AUTO Mode */
+ phy_reg |= db->PHY_reg4;
+ } else {
+ /* Force Mode */
+ switch(db->media_mode) {
+ case DMFE_10MHF: phy_reg |= 0x20; break;
+ case DMFE_10MFD: phy_reg |= 0x40; break;
+ case DMFE_100MHF: phy_reg |= 0x80; break;
+ case DMFE_100MFD: phy_reg |= 0x100; break;
+ }
+ if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
+ }
+
+ /* Write new capability to Phyxcer Reg4 */
+ if ( !(phy_reg & 0x01e0)) {
+ phy_reg|=db->PHY_reg4;
+ db->media_mode|=DMFE_AUTO;
+ }
+ phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
+
+ /* Restart Auto-Negotiation */
+ if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
+ phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
+ if ( !db->chip_type )
+ phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
+}
+
+
+/*
+ * Process op-mode
+ * AUTO mode : PHY controller in Auto-negotiation Mode
+ * Force mode: PHY controller in force mode with HUB
+ * N-way force capability with SWITCH
+ */
+
+static void dmfe_process_mode(struct dmfe_board_info *db)
+{
+ u16 phy_reg;
+
+ /* Full Duplex Mode Check */
+ if (db->op_mode & 0x4)
+ db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */
+ else
+ db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */
+
+ /* Transciver Selection */
+ if (db->op_mode & 0x10) /* 1M HomePNA */
+ db->cr6_data |= 0x40000;/* External MII select */
+ else
+ db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
+
+ update_cr6(db->cr6_data, db->ioaddr);
+
+ /* 10/100M phyxcer force mode need */
+ if ( !(db->media_mode & 0x18)) {
+ /* Forece Mode */
+ phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
+ if ( !(phy_reg & 0x1) ) {
+ /* parter without N-Way capability */
+ phy_reg = 0x0;
+ switch(db->op_mode) {
+ case DMFE_10MHF: phy_reg = 0x0; break;
+ case DMFE_10MFD: phy_reg = 0x100; break;
+ case DMFE_100MHF: phy_reg = 0x2000; break;
+ case DMFE_100MFD: phy_reg = 0x2100; break;
+ }
+ phy_write(db->ioaddr,
+ db->phy_addr, 0, phy_reg, db->chip_id);
+ if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
+ mdelay(20);
+ phy_write(db->ioaddr,
+ db->phy_addr, 0, phy_reg, db->chip_id);
+ }
+ }
+}
+
+
+/*
+ * Write a word to Phy register
+ */
+
+static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
+ u16 phy_data, u32 chip_id)
+{
+ u16 i;
+ unsigned long ioaddr;
+
+ if (chip_id == PCI_DM9132_ID) {
+ ioaddr = iobase + 0x80 + offset * 4;
+ outw(phy_data, ioaddr);
+ } else {
+ /* DM9102/DM9102A Chip */
+ ioaddr = iobase + DCR9;
+
+ /* Send 33 synchronization clock to Phy controller */
+ for (i = 0; i < 35; i++)
+ phy_write_1bit(ioaddr, PHY_DATA_1);
+
+ /* Send start command(01) to Phy */
+ phy_write_1bit(ioaddr, PHY_DATA_0);
+ phy_write_1bit(ioaddr, PHY_DATA_1);
+
+ /* Send write command(01) to Phy */
+ phy_write_1bit(ioaddr, PHY_DATA_0);
+ phy_write_1bit(ioaddr, PHY_DATA_1);
+
+ /* Send Phy address */
+ for (i = 0x10; i > 0; i = i >> 1)
+ phy_write_1bit(ioaddr,
+ phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
+
+ /* Send register address */
+ for (i = 0x10; i > 0; i = i >> 1)
+ phy_write_1bit(ioaddr,
+ offset & i ? PHY_DATA_1 : PHY_DATA_0);
+
+ /* written trasnition */
+ phy_write_1bit(ioaddr, PHY_DATA_1);
+ phy_write_1bit(ioaddr, PHY_DATA_0);
+
+ /* Write a word data to PHY controller */
+ for ( i = 0x8000; i > 0; i >>= 1)
+ phy_write_1bit(ioaddr,
+ phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
+ }
+}
+
+
+/*
+ * Read a word data from phy register
+ */
+
+static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
+{
+ int i;
+ u16 phy_data;
+ unsigned long ioaddr;
+
+ if (chip_id == PCI_DM9132_ID) {
+ /* DM9132 Chip */
+ ioaddr = iobase + 0x80 + offset * 4;
+ phy_data = inw(ioaddr);
+ } else {
+ /* DM9102/DM9102A Chip */
+ ioaddr = iobase + DCR9;
+
+ /* Send 33 synchronization clock to Phy controller */
+ for (i = 0; i < 35; i++)
+ phy_write_1bit(ioaddr, PHY_DATA_1);
+
+ /* Send start command(01) to Phy */
+ phy_write_1bit(ioaddr, PHY_DATA_0);
+ phy_write_1bit(ioaddr, PHY_DATA_1);
+
+ /* Send read command(10) to Phy */
+ phy_write_1bit(ioaddr, PHY_DATA_1);
+ phy_write_1bit(ioaddr, PHY_DATA_0);
+
+ /* Send Phy address */
+ for (i = 0x10; i > 0; i = i >> 1)
+ phy_write_1bit(ioaddr,
+ phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
+
+ /* Send register address */
+ for (i = 0x10; i > 0; i = i >> 1)
+ phy_write_1bit(ioaddr,
+ offset & i ? PHY_DATA_1 : PHY_DATA_0);
+
+ /* Skip transition state */
+ phy_read_1bit(ioaddr);
+
+ /* read 16bit data */
+ for (phy_data = 0, i = 0; i < 16; i++) {
+ phy_data <<= 1;
+ phy_data |= phy_read_1bit(ioaddr);
+ }
+ }
+
+ return phy_data;
+}
+
+
+/*
+ * Write one bit data to Phy Controller
+ */
+
+static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
+{
+ outl(phy_data, ioaddr); /* MII Clock Low */
+ udelay(1);
+ outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */
+ udelay(1);
+ outl(phy_data, ioaddr); /* MII Clock Low */
+ udelay(1);
+}
+
+
+/*
+ * Read one bit phy data from PHY controller
+ */
+
+static u16 phy_read_1bit(unsigned long ioaddr)
+{
+ u16 phy_data;
+
+ outl(0x50000, ioaddr);
+ udelay(1);
+ phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
+ outl(0x40000, ioaddr);
+ udelay(1);
+
+ return phy_data;
+}
+
+
+/*
+ * Parser SROM and media mode
+ */
+
+static void dmfe_parse_srom(struct dmfe_board_info * db)
+{
+ char * srom = db->srom;
+ int dmfe_mode, tmp_reg;
+
+ DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
+
+ /* Init CR15 */
+ db->cr15_data = CR15_DEFAULT;
+
+ /* Check SROM Version */
+ if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
+ /* SROM V4.01 */
+ /* Get NIC support media mode */
+ db->NIC_capability = le16_to_cpup((__le16 *) (srom + 34));
+ db->PHY_reg4 = 0;
+ for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
+ switch( db->NIC_capability & tmp_reg ) {
+ case 0x1: db->PHY_reg4 |= 0x0020; break;
+ case 0x2: db->PHY_reg4 |= 0x0040; break;
+ case 0x4: db->PHY_reg4 |= 0x0080; break;
+ case 0x8: db->PHY_reg4 |= 0x0100; break;
+ }
+ }
+
+ /* Media Mode Force or not check */
+ dmfe_mode = (le32_to_cpup((__le32 *) (srom + 34)) &
+ le32_to_cpup((__le32 *) (srom + 36)));
+ switch(dmfe_mode) {
+ case 0x4: dmfe_media_mode = DMFE_100MHF; break; /* 100MHF */
+ case 0x2: dmfe_media_mode = DMFE_10MFD; break; /* 10MFD */
+ case 0x8: dmfe_media_mode = DMFE_100MFD; break; /* 100MFD */
+ case 0x100:
+ case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
+ }
+
+ /* Special Function setting */
+ /* VLAN function */
+ if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
+ db->cr15_data |= 0x40;
+
+ /* Flow Control */
+ if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
+ db->cr15_data |= 0x400;
+
+ /* TX pause packet */
+ if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
+ db->cr15_data |= 0x9800;
+ }
+
+ /* Parse HPNA parameter */
+ db->HPNA_command = 1;
+
+ /* Accept remote command or not */
+ if (HPNA_rx_cmd == 0)
+ db->HPNA_command |= 0x8000;
+
+ /* Issue remote command & operation mode */
+ if (HPNA_tx_cmd == 1)
+ switch(HPNA_mode) { /* Issue Remote Command */
+ case 0: db->HPNA_command |= 0x0904; break;
+ case 1: db->HPNA_command |= 0x0a00; break;
+ case 2: db->HPNA_command |= 0x0506; break;
+ case 3: db->HPNA_command |= 0x0602; break;
+ }
+ else
+ switch(HPNA_mode) { /* Don't Issue */
+ case 0: db->HPNA_command |= 0x0004; break;
+ case 1: db->HPNA_command |= 0x0000; break;
+ case 2: db->HPNA_command |= 0x0006; break;
+ case 3: db->HPNA_command |= 0x0002; break;
+ }
+
+ /* Check DM9801 or DM9802 present or not */
+ db->HPNA_present = 0;
+ update_cr6(db->cr6_data|0x40000, db->ioaddr);
+ tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
+ if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
+ /* DM9801 or DM9802 present */
+ db->HPNA_timer = 8;
+ if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
+ /* DM9801 HomeRun */
+ db->HPNA_present = 1;
+ dmfe_program_DM9801(db, tmp_reg);
+ } else {
+ /* DM9802 LongRun */
+ db->HPNA_present = 2;
+ dmfe_program_DM9802(db);
+ }
+ }
+
+}
+
+
+/*
+ * Init HomeRun DM9801
+ */
+
+static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
+{
+ uint reg17, reg25;
+
+ if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
+ switch(HPNA_rev) {
+ case 0xb900: /* DM9801 E3 */
+ db->HPNA_command |= 0x1000;
+ reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
+ reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
+ reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
+ break;
+ case 0xb901: /* DM9801 E4 */
+ reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
+ reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
+ reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
+ reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
+ break;
+ case 0xb902: /* DM9801 E5 */
+ case 0xb903: /* DM9801 E6 */
+ default:
+ db->HPNA_command |= 0x1000;
+ reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
+ reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
+ reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
+ reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
+ break;
+ }
+ phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
+ phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
+ phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
+}
+
+
+/*
+ * Init HomeRun DM9802
+ */
+
+static void dmfe_program_DM9802(struct dmfe_board_info * db)
+{
+ uint phy_reg;
+
+ if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
+ phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
+ phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
+ phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
+ phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
+}
+
+
+/*
+ * Check remote HPNA power and speed status. If not correct,
+ * issue command again.
+*/
+
+static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
+{
+ uint phy_reg;
+
+ /* Got remote device status */
+ phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
+ switch(phy_reg) {
+ case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
+ case 0x20: phy_reg = 0x0900;break; /* LP/HS */
+ case 0x40: phy_reg = 0x0600;break; /* HP/LS */
+ case 0x60: phy_reg = 0x0500;break; /* HP/HS */
+ }
+
+ /* Check remote device status match our setting ot not */
+ if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
+ phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
+ db->chip_id);
+ db->HPNA_timer=8;
+ } else
+ db->HPNA_timer=600; /* Match, every 10 minutes, check */
+}
+
+
+
+static DEFINE_PCI_DEVICE_TABLE(dmfe_pci_tbl) = {
+ { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
+ { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
+ { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
+ { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
+
+
+#ifdef CONFIG_PM
+static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pci_dev);
+ struct dmfe_board_info *db = netdev_priv(dev);
+ u32 tmp;
+
+ /* Disable upper layer interface */
+ netif_device_detach(dev);
+
+ /* Disable Tx/Rx */
+ db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
+ update_cr6(db->cr6_data, dev->base_addr);
+
+ /* Disable Interrupt */
+ outl(0, dev->base_addr + DCR7);
+ outl(inl (dev->base_addr + DCR5), dev->base_addr + DCR5);
+
+ /* Fre RX buffers */
+ dmfe_free_rxbuffer(db);
+
+ /* Enable WOL */
+ pci_read_config_dword(pci_dev, 0x40, &tmp);
+ tmp &= ~(DMFE_WOL_LINKCHANGE|DMFE_WOL_MAGICPACKET);
+
+ if (db->wol_mode & WAKE_PHY)
+ tmp |= DMFE_WOL_LINKCHANGE;
+ if (db->wol_mode & WAKE_MAGIC)
+ tmp |= DMFE_WOL_MAGICPACKET;
+
+ pci_write_config_dword(pci_dev, 0x40, tmp);
+
+ pci_enable_wake(pci_dev, PCI_D3hot, 1);
+ pci_enable_wake(pci_dev, PCI_D3cold, 1);
+
+ /* Power down device*/
+ pci_save_state(pci_dev);
+ pci_set_power_state(pci_dev, pci_choose_state (pci_dev, state));
+
+ return 0;
+}
+
+static int dmfe_resume(struct pci_dev *pci_dev)
+{
+ struct net_device *dev = pci_get_drvdata(pci_dev);
+ u32 tmp;
+
+ pci_set_power_state(pci_dev, PCI_D0);
+ pci_restore_state(pci_dev);
+
+ /* Re-initialize DM910X board */
+ dmfe_init_dm910x(dev);
+
+ /* Disable WOL */
+ pci_read_config_dword(pci_dev, 0x40, &tmp);
+
+ tmp &= ~(DMFE_WOL_LINKCHANGE | DMFE_WOL_MAGICPACKET);
+ pci_write_config_dword(pci_dev, 0x40, tmp);
+
+ pci_enable_wake(pci_dev, PCI_D3hot, 0);
+ pci_enable_wake(pci_dev, PCI_D3cold, 0);
+
+ /* Restart upper layer interface */
+ netif_device_attach(dev);
+
+ return 0;
+}
+#else
+#define dmfe_suspend NULL
+#define dmfe_resume NULL
+#endif
+
+static struct pci_driver dmfe_driver = {
+ .name = "dmfe",
+ .id_table = dmfe_pci_tbl,
+ .probe = dmfe_init_one,
+ .remove = __devexit_p(dmfe_remove_one),
+ .suspend = dmfe_suspend,
+ .resume = dmfe_resume
+};
+
+MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
+MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_param(debug, int, 0);
+module_param(mode, byte, 0);
+module_param(cr6set, int, 0);
+module_param(chkmode, byte, 0);
+module_param(HPNA_mode, byte, 0);
+module_param(HPNA_rx_cmd, byte, 0);
+module_param(HPNA_tx_cmd, byte, 0);
+module_param(HPNA_NoiseFloor, byte, 0);
+module_param(SF_mode, byte, 0);
+MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
+MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
+ "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
+
+MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
+ "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
+
+/* Description:
+ * when user used insmod to add module, system invoked init_module()
+ * to initialize and register.
+ */
+
+static int __init dmfe_init_module(void)
+{
+ int rc;
+
+ pr_info("%s\n", version);
+ printed_version = 1;
+
+ DMFE_DBUG(0, "init_module() ", debug);
+
+ if (debug)
+ dmfe_debug = debug; /* set debug flag */
+ if (cr6set)
+ dmfe_cr6_user_set = cr6set;
+
+ switch(mode) {
+ case DMFE_10MHF:
+ case DMFE_100MHF:
+ case DMFE_10MFD:
+ case DMFE_100MFD:
+ case DMFE_1M_HPNA:
+ dmfe_media_mode = mode;
+ break;
+ default:dmfe_media_mode = DMFE_AUTO;
+ break;
+ }
+
+ if (HPNA_mode > 4)
+ HPNA_mode = 0; /* Default: LP/HS */
+ if (HPNA_rx_cmd > 1)
+ HPNA_rx_cmd = 0; /* Default: Ignored remote cmd */
+ if (HPNA_tx_cmd > 1)
+ HPNA_tx_cmd = 0; /* Default: Don't issue remote cmd */
+ if (HPNA_NoiseFloor > 15)
+ HPNA_NoiseFloor = 0;
+
+ rc = pci_register_driver(&dmfe_driver);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+
+/*
+ * Description:
+ * when user used rmmod to delete module, system invoked clean_module()
+ * to un-register all registered services.
+ */
+
+static void __exit dmfe_cleanup_module(void)
+{
+ DMFE_DBUG(0, "dmfe_clean_module() ", debug);
+ pci_unregister_driver(&dmfe_driver);
+}
+
+module_init(dmfe_init_module);
+module_exit(dmfe_cleanup_module);
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
new file mode 100644
index 000000000000..fa5eee925f25
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
@@ -0,0 +1,385 @@
+/*
+ drivers/net/tulip/eeprom.c
+
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
+ for more information on this driver.
+ Please submit bug reports to http://bugzilla.kernel.org/.
+*/
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include "tulip.h"
+#include <linux/init.h>
+#include <asm/unaligned.h>
+
+
+
+/* Serial EEPROM section. */
+/* The main routine to parse the very complicated SROM structure.
+ Search www.digital.com for "21X4 SROM" to get details.
+ This code is very complex, and will require changes to support
+ additional cards, so I'll be verbose about what is going on.
+ */
+
+/* Known cards that have old-style EEPROMs. */
+static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
+ {"Asante", 0, 0, 0x94, {0x1e00, 0x0000, 0x0800, 0x0100, 0x018c,
+ 0x0000, 0x0000, 0xe078, 0x0001, 0x0050, 0x0018 }},
+ {"SMC9332DST", 0, 0, 0xC0, { 0x1e00, 0x0000, 0x0800, 0x041f,
+ 0x0000, 0x009E, /* 10baseT */
+ 0x0004, 0x009E, /* 10baseT-FD */
+ 0x0903, 0x006D, /* 100baseTx */
+ 0x0905, 0x006D, /* 100baseTx-FD */ }},
+ {"Cogent EM100", 0, 0, 0x92, { 0x1e00, 0x0000, 0x0800, 0x063f,
+ 0x0107, 0x8021, /* 100baseFx */
+ 0x0108, 0x8021, /* 100baseFx-FD */
+ 0x0100, 0x009E, /* 10baseT */
+ 0x0104, 0x009E, /* 10baseT-FD */
+ 0x0103, 0x006D, /* 100baseTx */
+ 0x0105, 0x006D, /* 100baseTx-FD */ }},
+ {"Maxtech NX-110", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x0513,
+ 0x1001, 0x009E, /* 10base2, CSR12 0x10*/
+ 0x0000, 0x009E, /* 10baseT */
+ 0x0004, 0x009E, /* 10baseT-FD */
+ 0x0303, 0x006D, /* 100baseTx, CSR12 0x03 */
+ 0x0305, 0x006D, /* 100baseTx-FD CSR12 0x03 */}},
+ {"Accton EN1207", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x051F,
+ 0x1B01, 0x0000, /* 10base2, CSR12 0x1B */
+ 0x0B00, 0x009E, /* 10baseT, CSR12 0x0B */
+ 0x0B04, 0x009E, /* 10baseT-FD,CSR12 0x0B */
+ 0x1B03, 0x006D, /* 100baseTx, CSR12 0x1B */
+ 0x1B05, 0x006D, /* 100baseTx-FD CSR12 0x1B */
+ }},
+ {"NetWinder", 0x00, 0x10, 0x57,
+ /* Default media = MII
+ * MII block, reset sequence (3) = 0x0821 0x0000 0x0001, capabilities 0x01e1
+ */
+ { 0x1e00, 0x0000, 0x000b, 0x8f01, 0x0103, 0x0300, 0x0821, 0x000, 0x0001, 0x0000, 0x01e1 }
+ },
+ {"Cobalt Microserver", 0, 0x10, 0xE0, {0x1e00, /* 0 == controller #, 1e == offset */
+ 0x0000, /* 0 == high offset, 0 == gap */
+ 0x0800, /* Default Autoselect */
+ 0x8001, /* 1 leaf, extended type, bogus len */
+ 0x0003, /* Type 3 (MII), PHY #0 */
+ 0x0400, /* 0 init instr, 4 reset instr */
+ 0x0801, /* Set control mode, GP0 output */
+ 0x0000, /* Drive GP0 Low (RST is active low) */
+ 0x0800, /* control mode, GP0 input (undriven) */
+ 0x0000, /* clear control mode */
+ 0x7800, /* 100TX FDX + HDX, 10bT FDX + HDX */
+ 0x01e0, /* Advertise all above */
+ 0x5000, /* FDX all above */
+ 0x1800, /* Set fast TTM in 100bt modes */
+ 0x0000, /* PHY cannot be unplugged */
+ }},
+ {NULL}};
+
+
+static const char *block_name[] __devinitdata = {
+ "21140 non-MII",
+ "21140 MII PHY",
+ "21142 Serial PHY",
+ "21142 MII PHY",
+ "21143 SYM PHY",
+ "21143 reset method"
+};
+
+
+/**
+ * tulip_build_fake_mediatable - Build a fake mediatable entry.
+ * @tp: Ptr to the tulip private data.
+ *
+ * Some cards like the 3x5 HSC cards (J3514A) do not have a standard
+ * srom and can not be handled under the fixup routine. These cards
+ * still need a valid mediatable entry for correct csr12 setup and
+ * mii handling.
+ *
+ * Since this is currently a parisc-linux specific function, the
+ * #ifdef __hppa__ should completely optimize this function away for
+ * non-parisc hardware.
+ */
+static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp)
+{
+#ifdef CONFIG_GSC
+ if (tp->flags & NEEDS_FAKE_MEDIA_TABLE) {
+ static unsigned char leafdata[] =
+ { 0x01, /* phy number */
+ 0x02, /* gpr setup sequence length */
+ 0x02, 0x00, /* gpr setup sequence */
+ 0x02, /* phy reset sequence length */
+ 0x01, 0x00, /* phy reset sequence */
+ 0x00, 0x78, /* media capabilities */
+ 0x00, 0xe0, /* nway advertisement */
+ 0x00, 0x05, /* fdx bit map */
+ 0x00, 0x06 /* ttm bit map */
+ };
+
+ tp->mtable = kmalloc(sizeof(struct mediatable) +
+ sizeof(struct medialeaf), GFP_KERNEL);
+
+ if (tp->mtable == NULL)
+ return; /* Horrible, impossible failure. */
+
+ tp->mtable->defaultmedia = 0x800;
+ tp->mtable->leafcount = 1;
+ tp->mtable->csr12dir = 0x3f; /* inputs on bit7 for hsc-pci, bit6 for pci-fx */
+ tp->mtable->has_nonmii = 0;
+ tp->mtable->has_reset = 0;
+ tp->mtable->has_mii = 1;
+ tp->mtable->csr15dir = tp->mtable->csr15val = 0;
+ tp->mtable->mleaf[0].type = 1;
+ tp->mtable->mleaf[0].media = 11;
+ tp->mtable->mleaf[0].leafdata = &leafdata[0];
+ tp->flags |= HAS_PHY_IRQ;
+ tp->csr12_shadow = -1;
+ }
+#endif
+}
+
+void __devinit tulip_parse_eeprom(struct net_device *dev)
+{
+ /*
+ dev is not registered at this point, so logging messages can't
+ use dev_<level> or netdev_<level> but dev->name is good via a
+ hack in the caller
+ */
+
+ /* The last media info list parsed, for multiport boards. */
+ static struct mediatable *last_mediatable;
+ static unsigned char *last_ee_data;
+ static int controller_index;
+ struct tulip_private *tp = netdev_priv(dev);
+ unsigned char *ee_data = tp->eeprom;
+ int i;
+
+ tp->mtable = NULL;
+ /* Detect an old-style (SA only) EEPROM layout:
+ memcmp(eedata, eedata+16, 8). */
+ for (i = 0; i < 8; i ++)
+ if (ee_data[i] != ee_data[16+i])
+ break;
+ if (i >= 8) {
+ if (ee_data[0] == 0xff) {
+ if (last_mediatable) {
+ controller_index++;
+ pr_info("%s: Controller %d of multiport board\n",
+ dev->name, controller_index);
+ tp->mtable = last_mediatable;
+ ee_data = last_ee_data;
+ goto subsequent_board;
+ } else
+ pr_info("%s: Missing EEPROM, this interface may not work correctly!\n",
+ dev->name);
+ return;
+ }
+ /* Do a fix-up based on the vendor half of the station address prefix. */
+ for (i = 0; eeprom_fixups[i].name; i++) {
+ if (dev->dev_addr[0] == eeprom_fixups[i].addr0 &&
+ dev->dev_addr[1] == eeprom_fixups[i].addr1 &&
+ dev->dev_addr[2] == eeprom_fixups[i].addr2) {
+ if (dev->dev_addr[2] == 0xE8 && ee_data[0x1a] == 0x55)
+ i++; /* An Accton EN1207, not an outlaw Maxtech. */
+ memcpy(ee_data + 26, eeprom_fixups[i].newtable,
+ sizeof(eeprom_fixups[i].newtable));
+ pr_info("%s: Old format EEPROM on '%s' board. Using substitute media control info\n",
+ dev->name, eeprom_fixups[i].name);
+ break;
+ }
+ }
+ if (eeprom_fixups[i].name == NULL) { /* No fixup found. */
+ pr_info("%s: Old style EEPROM with no media selection information\n",
+ dev->name);
+ return;
+ }
+ }
+
+ controller_index = 0;
+ if (ee_data[19] > 1) { /* Multiport board. */
+ last_ee_data = ee_data;
+ }
+subsequent_board:
+
+ if (ee_data[27] == 0) { /* No valid media table. */
+ tulip_build_fake_mediatable(tp);
+ } else {
+ unsigned char *p = (void *)ee_data + ee_data[27];
+ unsigned char csr12dir = 0;
+ int count, new_advertise = 0;
+ struct mediatable *mtable;
+ u16 media = get_u16(p);
+
+ p += 2;
+ if (tp->flags & CSR12_IN_SROM)
+ csr12dir = *p++;
+ count = *p++;
+
+ /* there is no phy information, don't even try to build mtable */
+ if (count == 0) {
+ if (tulip_debug > 0)
+ pr_warn("%s: no phy info, aborting mtable build\n",
+ dev->name);
+ return;
+ }
+
+ mtable = kmalloc(sizeof(struct mediatable) +
+ count * sizeof(struct medialeaf),
+ GFP_KERNEL);
+ if (mtable == NULL)
+ return; /* Horrible, impossible failure. */
+ last_mediatable = tp->mtable = mtable;
+ mtable->defaultmedia = media;
+ mtable->leafcount = count;
+ mtable->csr12dir = csr12dir;
+ mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0;
+ mtable->csr15dir = mtable->csr15val = 0;
+
+ pr_info("%s: EEPROM default media type %s\n",
+ dev->name,
+ media & 0x0800 ? "Autosense"
+ : medianame[media & MEDIA_MASK]);
+ for (i = 0; i < count; i++) {
+ struct medialeaf *leaf = &mtable->mleaf[i];
+
+ if ((p[0] & 0x80) == 0) { /* 21140 Compact block. */
+ leaf->type = 0;
+ leaf->media = p[0] & 0x3f;
+ leaf->leafdata = p;
+ if ((p[2] & 0x61) == 0x01) /* Bogus, but Znyx boards do it. */
+ mtable->has_mii = 1;
+ p += 4;
+ } else {
+ leaf->type = p[1];
+ if (p[1] == 0x05) {
+ mtable->has_reset = i;
+ leaf->media = p[2] & 0x0f;
+ } else if (tp->chip_id == DM910X && p[1] == 0x80) {
+ /* Hack to ignore Davicom delay period block */
+ mtable->leafcount--;
+ count--;
+ i--;
+ leaf->leafdata = p + 2;
+ p += (p[0] & 0x3f) + 1;
+ continue;
+ } else if (p[1] & 1) {
+ int gpr_len, reset_len;
+
+ mtable->has_mii = 1;
+ leaf->media = 11;
+ gpr_len=p[3]*2;
+ reset_len=p[4+gpr_len]*2;
+ new_advertise |= get_u16(&p[7+gpr_len+reset_len]);
+ } else {
+ mtable->has_nonmii = 1;
+ leaf->media = p[2] & MEDIA_MASK;
+ /* Davicom's media number for 100BaseTX is strange */
+ if (tp->chip_id == DM910X && leaf->media == 1)
+ leaf->media = 3;
+ switch (leaf->media) {
+ case 0: new_advertise |= 0x0020; break;
+ case 4: new_advertise |= 0x0040; break;
+ case 3: new_advertise |= 0x0080; break;
+ case 5: new_advertise |= 0x0100; break;
+ case 6: new_advertise |= 0x0200; break;
+ }
+ if (p[1] == 2 && leaf->media == 0) {
+ if (p[2] & 0x40) {
+ u32 base15 = get_unaligned((u16*)&p[7]);
+ mtable->csr15dir =
+ (get_unaligned((u16*)&p[9])<<16) + base15;
+ mtable->csr15val =
+ (get_unaligned((u16*)&p[11])<<16) + base15;
+ } else {
+ mtable->csr15dir = get_unaligned((u16*)&p[3])<<16;
+ mtable->csr15val = get_unaligned((u16*)&p[5])<<16;
+ }
+ }
+ }
+ leaf->leafdata = p + 2;
+ p += (p[0] & 0x3f) + 1;
+ }
+ if (tulip_debug > 1 && leaf->media == 11) {
+ unsigned char *bp = leaf->leafdata;
+ pr_info("%s: MII interface PHY %d, setup/reset sequences %d/%d long, capabilities %02x %02x\n",
+ dev->name,
+ bp[0], bp[1], bp[2 + bp[1]*2],
+ bp[5 + bp[2 + bp[1]*2]*2],
+ bp[4 + bp[2 + bp[1]*2]*2]);
+ }
+ pr_info("%s: Index #%d - Media %s (#%d) described by a %s (%d) block\n",
+ dev->name,
+ i, medianame[leaf->media & 15], leaf->media,
+ leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>",
+ leaf->type);
+ }
+ if (new_advertise)
+ tp->sym_advertise = new_advertise;
+ }
+}
+/* Reading a serial EEPROM is a "bit" grungy, but we work our way through:->.*/
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x02 /* EEPROM shift clock. */
+#define EE_CS 0x01 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x04 /* Data from the Tulip to EEPROM. */
+#define EE_WRITE_0 0x01
+#define EE_WRITE_1 0x05
+#define EE_DATA_READ 0x08 /* Data from the EEPROM chip. */
+#define EE_ENB (0x4800 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+ Even at 33Mhz current PCI implementations don't overrun the EEPROM clock.
+ We add a bus turn-around to insure that this remains true. */
+#define eeprom_delay() ioread32(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_READ_CMD (6)
+
+/* Note: this routine returns extra data bits for size detection. */
+int __devinit tulip_read_eeprom(struct net_device *dev, int location, int addr_len)
+{
+ int i;
+ unsigned retval = 0;
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ee_addr = tp->base_addr + CSR9;
+ int read_cmd = location | (EE_READ_CMD << addr_len);
+
+ /* If location is past the end of what we can address, don't
+ * read some other location (ie truncate). Just return zero.
+ */
+ if (location > (1 << addr_len) - 1)
+ return 0;
+
+ iowrite32(EE_ENB & ~EE_CS, ee_addr);
+ iowrite32(EE_ENB, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 4 + addr_len; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ iowrite32(EE_ENB | dataval, ee_addr);
+ eeprom_delay();
+ iowrite32(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((ioread32(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ }
+ iowrite32(EE_ENB, ee_addr);
+ eeprom_delay();
+
+ for (i = 16; i > 0; i--) {
+ iowrite32(EE_ENB | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((ioread32(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ iowrite32(EE_ENB, ee_addr);
+ eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+ iowrite32(EE_ENB & ~EE_CS, ee_addr);
+ return (tp->flags & HAS_SWAPPED_SEEPROM) ? swab16(retval) : retval;
+}
+
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
new file mode 100644
index 000000000000..5350d753e0ff
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -0,0 +1,811 @@
+/*
+ drivers/net/tulip/interrupt.c
+
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
+ for more information on this driver.
+ Please submit bugs to http://bugzilla.kernel.org/ .
+
+*/
+
+#include <linux/pci.h>
+#include "tulip.h"
+#include <linux/etherdevice.h>
+
+int tulip_rx_copybreak;
+unsigned int tulip_max_interrupt_work;
+
+#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
+#define MIT_SIZE 15
+#define MIT_TABLE 15 /* We use 0 or max */
+
+static unsigned int mit_table[MIT_SIZE+1] =
+{
+ /* CRS11 21143 hardware Mitigation Control Interrupt
+ We use only RX mitigation we other techniques for
+ TX intr. mitigation.
+
+ 31 Cycle Size (timer control)
+ 30:27 TX timer in 16 * Cycle size
+ 26:24 TX No pkts before Int.
+ 23:20 RX timer in Cycle size
+ 19:17 RX No pkts before Int.
+ 16 Continues Mode (CM)
+ */
+
+ 0x0, /* IM disabled */
+ 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
+ 0x80150000,
+ 0x80270000,
+ 0x80370000,
+ 0x80490000,
+ 0x80590000,
+ 0x80690000,
+ 0x807B0000,
+ 0x808B0000,
+ 0x809D0000,
+ 0x80AD0000,
+ 0x80BD0000,
+ 0x80CF0000,
+ 0x80DF0000,
+// 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
+ 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
+};
+#endif
+
+
+int tulip_refill_rx(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ int entry;
+ int refilled = 0;
+
+ /* Refill the Rx ring buffers. */
+ for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
+ entry = tp->dirty_rx % RX_RING_SIZE;
+ if (tp->rx_buffers[entry].skb == NULL) {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+
+ skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
+ if (skb == NULL)
+ break;
+
+ mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
+ PCI_DMA_FROMDEVICE);
+ tp->rx_buffers[entry].mapping = mapping;
+
+ skb->dev = dev; /* Mark as being used by this device. */
+ tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
+ refilled++;
+ }
+ tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
+ }
+ if(tp->chip_id == LC82C168) {
+ if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
+ /* Rx stopped due to out of buffers,
+ * restart it
+ */
+ iowrite32(0x01, tp->base_addr + CSR2);
+ }
+ }
+ return refilled;
+}
+
+#ifdef CONFIG_TULIP_NAPI
+
+void oom_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = netdev_priv(dev);
+ napi_schedule(&tp->napi);
+}
+
+int tulip_poll(struct napi_struct *napi, int budget)
+{
+ struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
+ struct net_device *dev = tp->dev;
+ int entry = tp->cur_rx % RX_RING_SIZE;
+ int work_done = 0;
+#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
+ int received = 0;
+#endif
+
+#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
+
+/* that one buffer is needed for mit activation; or might be a
+ bug in the ring buffer code; check later -- JHS*/
+
+ if (budget >=RX_RING_SIZE) budget--;
+#endif
+
+ if (tulip_debug > 4)
+ netdev_dbg(dev, " In tulip_rx(), entry %d %08x\n",
+ entry, tp->rx_ring[entry].status);
+
+ do {
+ if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
+ netdev_dbg(dev, " In tulip_poll(), hardware disappeared\n");
+ break;
+ }
+ /* Acknowledge current RX interrupt sources. */
+ iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
+
+
+ /* If we own the next entry, it is a new packet. Send it up. */
+ while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
+ s32 status = le32_to_cpu(tp->rx_ring[entry].status);
+ short pkt_len;
+
+ if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
+ break;
+
+ if (tulip_debug > 5)
+ netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
+ entry, status);
+
+ if (++work_done >= budget)
+ goto not_done;
+
+ /*
+ * Omit the four octet CRC from the length.
+ * (May not be considered valid until we have
+ * checked status for RxLengthOver2047 bits)
+ */
+ pkt_len = ((status >> 16) & 0x7ff) - 4;
+
+ /*
+ * Maximum pkt_len is 1518 (1514 + vlan header)
+ * Anything higher than this is always invalid
+ * regardless of RxLengthOver2047 bits
+ */
+
+ if ((status & (RxLengthOver2047 |
+ RxDescCRCError |
+ RxDescCollisionSeen |
+ RxDescRunt |
+ RxDescDescErr |
+ RxWholePkt)) != RxWholePkt ||
+ pkt_len > 1518) {
+ if ((status & (RxLengthOver2047 |
+ RxWholePkt)) != RxWholePkt) {
+ /* Ingore earlier buffers. */
+ if ((status & 0xffff) != 0x7fff) {
+ if (tulip_debug > 1)
+ dev_warn(&dev->dev,
+ "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
+ status);
+ dev->stats.rx_length_errors++;
+ }
+ } else {
+ /* There was a fatal error. */
+ if (tulip_debug > 2)
+ netdev_dbg(dev, "Receive error, Rx status %08x\n",
+ status);
+ dev->stats.rx_errors++; /* end of a packet.*/
+ if (pkt_len > 1518 ||
+ (status & RxDescRunt))
+ dev->stats.rx_length_errors++;
+
+ if (status & 0x0004)
+ dev->stats.rx_frame_errors++;
+ if (status & 0x0002)
+ dev->stats.rx_crc_errors++;
+ if (status & 0x0001)
+ dev->stats.rx_fifo_errors++;
+ }
+ } else {
+ struct sk_buff *skb;
+
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < tulip_rx_copybreak &&
+ (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ pci_dma_sync_single_for_cpu(tp->pdev,
+ tp->rx_buffers[entry].mapping,
+ pkt_len, PCI_DMA_FROMDEVICE);
+#if ! defined(__alpha__)
+ skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
+ pkt_len);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len),
+ tp->rx_buffers[entry].skb->data,
+ pkt_len);
+#endif
+ pci_dma_sync_single_for_device(tp->pdev,
+ tp->rx_buffers[entry].mapping,
+ pkt_len, PCI_DMA_FROMDEVICE);
+ } else { /* Pass up the skb already on the Rx ring. */
+ char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
+ pkt_len);
+
+#ifndef final_version
+ if (tp->rx_buffers[entry].mapping !=
+ le32_to_cpu(tp->rx_ring[entry].buffer1)) {
+ dev_err(&dev->dev,
+ "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
+ le32_to_cpu(tp->rx_ring[entry].buffer1),
+ (unsigned long long)tp->rx_buffers[entry].mapping,
+ skb->head, temp);
+ }
+#endif
+
+ pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+
+ tp->rx_buffers[entry].skb = NULL;
+ tp->rx_buffers[entry].mapping = 0;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+
+ netif_receive_skb(skb);
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
+ received++;
+#endif
+
+ entry = (++tp->cur_rx) % RX_RING_SIZE;
+ if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
+ tulip_refill_rx(dev);
+
+ }
+
+ /* New ack strategy... irq does not ack Rx any longer
+ hopefully this helps */
+
+ /* Really bad things can happen here... If new packet arrives
+ * and an irq arrives (tx or just due to occasionally unset
+ * mask), it will be acked by irq handler, but new thread
+ * is not scheduled. It is major hole in design.
+ * No idea how to fix this if "playing with fire" will fail
+ * tomorrow (night 011029). If it will not fail, we won
+ * finally: amount of IO did not increase at all. */
+ } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
+
+ #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
+
+ /* We use this simplistic scheme for IM. It's proven by
+ real life installations. We can have IM enabled
+ continuesly but this would cause unnecessary latency.
+ Unfortunely we can't use all the NET_RX_* feedback here.
+ This would turn on IM for devices that is not contributing
+ to backlog congestion with unnecessary latency.
+
+ We monitor the device RX-ring and have:
+
+ HW Interrupt Mitigation either ON or OFF.
+
+ ON: More then 1 pkt received (per intr.) OR we are dropping
+ OFF: Only 1 pkt received
+
+ Note. We only use min and max (0, 15) settings from mit_table */
+
+
+ if( tp->flags & HAS_INTR_MITIGATION) {
+ if( received > 1 ) {
+ if( ! tp->mit_on ) {
+ tp->mit_on = 1;
+ iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
+ }
+ }
+ else {
+ if( tp->mit_on ) {
+ tp->mit_on = 0;
+ iowrite32(0, tp->base_addr + CSR11);
+ }
+ }
+ }
+
+#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
+
+ tulip_refill_rx(dev);
+
+ /* If RX ring is not full we are out of memory. */
+ if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
+ goto oom;
+
+ /* Remove us from polling list and enable RX intr. */
+
+ napi_complete(napi);
+ iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
+
+ /* The last op happens after poll completion. Which means the following:
+ * 1. it can race with disabling irqs in irq handler
+ * 2. it can race with dise/enabling irqs in other poll threads
+ * 3. if an irq raised after beginning loop, it will be immediately
+ * triggered here.
+ *
+ * Summarizing: the logic results in some redundant irqs both
+ * due to races in masking and due to too late acking of already
+ * processed irqs. But it must not result in losing events.
+ */
+
+ return work_done;
+
+ not_done:
+ if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
+ tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
+ tulip_refill_rx(dev);
+
+ if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
+ goto oom;
+
+ return work_done;
+
+ oom: /* Executed with RX ints disabled */
+
+ /* Start timer, stop polling, but do not enable rx interrupts. */
+ mod_timer(&tp->oom_timer, jiffies+1);
+
+ /* Think: timer_pending() was an explicit signature of bug.
+ * Timer can be pending now but fired and completed
+ * before we did napi_complete(). See? We would lose it. */
+
+ /* remove ourselves from the polling list */
+ napi_complete(napi);
+
+ return work_done;
+}
+
+#else /* CONFIG_TULIP_NAPI */
+
+static int tulip_rx(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ int entry = tp->cur_rx % RX_RING_SIZE;
+ int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
+ int received = 0;
+
+ if (tulip_debug > 4)
+ netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
+ entry, tp->rx_ring[entry].status);
+ /* If we own the next entry, it is a new packet. Send it up. */
+ while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
+ s32 status = le32_to_cpu(tp->rx_ring[entry].status);
+ short pkt_len;
+
+ if (tulip_debug > 5)
+ netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
+ entry, status);
+ if (--rx_work_limit < 0)
+ break;
+
+ /*
+ Omit the four octet CRC from the length.
+ (May not be considered valid until we have
+ checked status for RxLengthOver2047 bits)
+ */
+ pkt_len = ((status >> 16) & 0x7ff) - 4;
+ /*
+ Maximum pkt_len is 1518 (1514 + vlan header)
+ Anything higher than this is always invalid
+ regardless of RxLengthOver2047 bits
+ */
+
+ if ((status & (RxLengthOver2047 |
+ RxDescCRCError |
+ RxDescCollisionSeen |
+ RxDescRunt |
+ RxDescDescErr |
+ RxWholePkt)) != RxWholePkt ||
+ pkt_len > 1518) {
+ if ((status & (RxLengthOver2047 |
+ RxWholePkt)) != RxWholePkt) {
+ /* Ingore earlier buffers. */
+ if ((status & 0xffff) != 0x7fff) {
+ if (tulip_debug > 1)
+ netdev_warn(dev,
+ "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
+ status);
+ dev->stats.rx_length_errors++;
+ }
+ } else {
+ /* There was a fatal error. */
+ if (tulip_debug > 2)
+ netdev_dbg(dev, "Receive error, Rx status %08x\n",
+ status);
+ dev->stats.rx_errors++; /* end of a packet.*/
+ if (pkt_len > 1518 ||
+ (status & RxDescRunt))
+ dev->stats.rx_length_errors++;
+ if (status & 0x0004)
+ dev->stats.rx_frame_errors++;
+ if (status & 0x0002)
+ dev->stats.rx_crc_errors++;
+ if (status & 0x0001)
+ dev->stats.rx_fifo_errors++;
+ }
+ } else {
+ struct sk_buff *skb;
+
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < tulip_rx_copybreak &&
+ (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ pci_dma_sync_single_for_cpu(tp->pdev,
+ tp->rx_buffers[entry].mapping,
+ pkt_len, PCI_DMA_FROMDEVICE);
+#if ! defined(__alpha__)
+ skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
+ pkt_len);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len),
+ tp->rx_buffers[entry].skb->data,
+ pkt_len);
+#endif
+ pci_dma_sync_single_for_device(tp->pdev,
+ tp->rx_buffers[entry].mapping,
+ pkt_len, PCI_DMA_FROMDEVICE);
+ } else { /* Pass up the skb already on the Rx ring. */
+ char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
+ pkt_len);
+
+#ifndef final_version
+ if (tp->rx_buffers[entry].mapping !=
+ le32_to_cpu(tp->rx_ring[entry].buffer1)) {
+ dev_err(&dev->dev,
+ "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
+ le32_to_cpu(tp->rx_ring[entry].buffer1),
+ (long long)tp->rx_buffers[entry].mapping,
+ skb->head, temp);
+ }
+#endif
+
+ pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+
+ tp->rx_buffers[entry].skb = NULL;
+ tp->rx_buffers[entry].mapping = 0;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+
+ netif_rx(skb);
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+ received++;
+ entry = (++tp->cur_rx) % RX_RING_SIZE;
+ }
+ return received;
+}
+#endif /* CONFIG_TULIP_NAPI */
+
+static inline unsigned int phy_interrupt (struct net_device *dev)
+{
+#ifdef __hppa__
+ struct tulip_private *tp = netdev_priv(dev);
+ int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
+
+ if (csr12 != tp->csr12_shadow) {
+ /* ack interrupt */
+ iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
+ tp->csr12_shadow = csr12;
+ /* do link change stuff */
+ spin_lock(&tp->lock);
+ tulip_check_duplex(dev);
+ spin_unlock(&tp->lock);
+ /* clear irq ack bit */
+ iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
+
+ return 1;
+ }
+#endif
+
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+irqreturn_t tulip_interrupt(int irq, void *dev_instance)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int csr5;
+ int missed;
+ int rx = 0;
+ int tx = 0;
+ int oi = 0;
+ int maxrx = RX_RING_SIZE;
+ int maxtx = TX_RING_SIZE;
+ int maxoi = TX_RING_SIZE;
+#ifdef CONFIG_TULIP_NAPI
+ int rxd = 0;
+#else
+ int entry;
+#endif
+ unsigned int work_count = tulip_max_interrupt_work;
+ unsigned int handled = 0;
+
+ /* Let's see whether the interrupt really is for us */
+ csr5 = ioread32(ioaddr + CSR5);
+
+ if (tp->flags & HAS_PHY_IRQ)
+ handled = phy_interrupt (dev);
+
+ if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
+ return IRQ_RETVAL(handled);
+
+ tp->nir++;
+
+ do {
+
+#ifdef CONFIG_TULIP_NAPI
+
+ if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
+ rxd++;
+ /* Mask RX intrs and add the device to poll list. */
+ iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
+ napi_schedule(&tp->napi);
+
+ if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
+ break;
+ }
+
+ /* Acknowledge the interrupt sources we handle here ASAP
+ the poll function does Rx and RxNoBuf acking */
+
+ iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
+
+#else
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
+
+
+ if (csr5 & (RxIntr | RxNoBuf)) {
+ rx += tulip_rx(dev);
+ tulip_refill_rx(dev);
+ }
+
+#endif /* CONFIG_TULIP_NAPI */
+
+ if (tulip_debug > 4)
+ netdev_dbg(dev, "interrupt csr5=%#8.8x new csr5=%#8.8x\n",
+ csr5, ioread32(ioaddr + CSR5));
+
+
+ if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
+ unsigned int dirty_tx;
+
+ spin_lock(&tp->lock);
+
+ for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
+ dirty_tx++) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int status = le32_to_cpu(tp->tx_ring[entry].status);
+
+ if (status < 0)
+ break; /* It still has not been Txed */
+
+ /* Check for Rx filter setup frames. */
+ if (tp->tx_buffers[entry].skb == NULL) {
+ /* test because dummy frames not mapped */
+ if (tp->tx_buffers[entry].mapping)
+ pci_unmap_single(tp->pdev,
+ tp->tx_buffers[entry].mapping,
+ sizeof(tp->setup_frame),
+ PCI_DMA_TODEVICE);
+ continue;
+ }
+
+ if (status & 0x8000) {
+ /* There was an major error, log it. */
+#ifndef final_version
+ if (tulip_debug > 1)
+ netdev_dbg(dev, "Transmit error, Tx status %08x\n",
+ status);
+#endif
+ dev->stats.tx_errors++;
+ if (status & 0x4104)
+ dev->stats.tx_aborted_errors++;
+ if (status & 0x0C00)
+ dev->stats.tx_carrier_errors++;
+ if (status & 0x0200)
+ dev->stats.tx_window_errors++;
+ if (status & 0x0002)
+ dev->stats.tx_fifo_errors++;
+ if ((status & 0x0080) && tp->full_duplex == 0)
+ dev->stats.tx_heartbeat_errors++;
+ } else {
+ dev->stats.tx_bytes +=
+ tp->tx_buffers[entry].skb->len;
+ dev->stats.collisions += (status >> 3) & 15;
+ dev->stats.tx_packets++;
+ }
+
+ pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
+ tp->tx_buffers[entry].skb->len,
+ PCI_DMA_TODEVICE);
+
+ /* Free the original skb. */
+ dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
+ tp->tx_buffers[entry].skb = NULL;
+ tp->tx_buffers[entry].mapping = 0;
+ tx++;
+ }
+
+#ifndef final_version
+ if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
+ dev_err(&dev->dev,
+ "Out-of-sync dirty pointer, %d vs. %d\n",
+ dirty_tx, tp->cur_tx);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
+ netif_wake_queue(dev);
+
+ tp->dirty_tx = dirty_tx;
+ if (csr5 & TxDied) {
+ if (tulip_debug > 2)
+ dev_warn(&dev->dev,
+ "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
+ csr5, ioread32(ioaddr + CSR6),
+ tp->csr6);
+ tulip_restart_rxtx(tp);
+ }
+ spin_unlock(&tp->lock);
+ }
+
+ /* Log errors. */
+ if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
+ if (csr5 == 0xffffffff)
+ break;
+ if (csr5 & TxJabber)
+ dev->stats.tx_errors++;
+ if (csr5 & TxFIFOUnderflow) {
+ if ((tp->csr6 & 0xC000) != 0xC000)
+ tp->csr6 += 0x4000; /* Bump up the Tx threshold */
+ else
+ tp->csr6 |= 0x00200000; /* Store-n-forward. */
+ /* Restart the transmit process. */
+ tulip_restart_rxtx(tp);
+ iowrite32(0, ioaddr + CSR1);
+ }
+ if (csr5 & (RxDied | RxNoBuf)) {
+ if (tp->flags & COMET_MAC_ADDR) {
+ iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
+ iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
+ }
+ }
+ if (csr5 & RxDied) { /* Missed a Rx frame. */
+ dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
+ dev->stats.rx_errors++;
+ tulip_start_rxtx(tp);
+ }
+ /*
+ * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
+ * call is ever done under the spinlock
+ */
+ if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
+ if (tp->link_change)
+ (tp->link_change)(dev, csr5);
+ }
+ if (csr5 & SystemError) {
+ int error = (csr5 >> 23) & 7;
+ /* oops, we hit a PCI error. The code produced corresponds
+ * to the reason:
+ * 0 - parity error
+ * 1 - master abort
+ * 2 - target abort
+ * Note that on parity error, we should do a software reset
+ * of the chip to get it back into a sane state (according
+ * to the 21142/3 docs that is).
+ * -- rmk
+ */
+ dev_err(&dev->dev,
+ "(%lu) System Error occurred (%d)\n",
+ tp->nir, error);
+ }
+ /* Clear all error sources, included undocumented ones! */
+ iowrite32(0x0800f7ba, ioaddr + CSR5);
+ oi++;
+ }
+ if (csr5 & TimerInt) {
+
+ if (tulip_debug > 2)
+ dev_err(&dev->dev,
+ "Re-enabling interrupts, %08x\n",
+ csr5);
+ iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+ tp->ttimer = 0;
+ oi++;
+ }
+ if (tx > maxtx || rx > maxrx || oi > maxoi) {
+ if (tulip_debug > 1)
+ dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
+ csr5, tp->nir, tx, rx, oi);
+
+ /* Acknowledge all interrupt sources. */
+ iowrite32(0x8001ffff, ioaddr + CSR5);
+ if (tp->flags & HAS_INTR_MITIGATION) {
+ /* Josip Loncaric at ICASE did extensive experimentation
+ to develop a good interrupt mitigation setting.*/
+ iowrite32(0x8b240000, ioaddr + CSR11);
+ } else if (tp->chip_id == LC82C168) {
+ /* the LC82C168 doesn't have a hw timer.*/
+ iowrite32(0x00, ioaddr + CSR7);
+ mod_timer(&tp->timer, RUN_AT(HZ/50));
+ } else {
+ /* Mask all interrupting sources, set timer to
+ re-enable. */
+ iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
+ iowrite32(0x0012, ioaddr + CSR11);
+ }
+ break;
+ }
+
+ work_count--;
+ if (work_count == 0)
+ break;
+
+ csr5 = ioread32(ioaddr + CSR5);
+
+#ifdef CONFIG_TULIP_NAPI
+ if (rxd)
+ csr5 &= ~RxPollInt;
+ } while ((csr5 & (TxNoBuf |
+ TxDied |
+ TxIntr |
+ TimerInt |
+ /* Abnormal intr. */
+ RxDied |
+ TxFIFOUnderflow |
+ TxJabber |
+ TPLnkFail |
+ SystemError )) != 0);
+#else
+ } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
+
+ tulip_refill_rx(dev);
+
+ /* check if the card is in suspend mode */
+ entry = tp->dirty_rx % RX_RING_SIZE;
+ if (tp->rx_buffers[entry].skb == NULL) {
+ if (tulip_debug > 1)
+ dev_warn(&dev->dev,
+ "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
+ tp->nir, tp->cur_rx, tp->ttimer, rx);
+ if (tp->chip_id == LC82C168) {
+ iowrite32(0x00, ioaddr + CSR7);
+ mod_timer(&tp->timer, RUN_AT(HZ/50));
+ } else {
+ if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
+ if (tulip_debug > 1)
+ dev_warn(&dev->dev,
+ "in rx suspend mode: (%lu) set timer\n",
+ tp->nir);
+ iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
+ ioaddr + CSR7);
+ iowrite32(TimerInt, ioaddr + CSR5);
+ iowrite32(12, ioaddr + CSR11);
+ tp->ttimer = 1;
+ }
+ }
+ }
+#endif /* CONFIG_TULIP_NAPI */
+
+ if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
+ dev->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
+ }
+
+ if (tulip_debug > 4)
+ netdev_dbg(dev, "exiting interrupt, csr5=%#04x\n",
+ ioread32(ioaddr + CSR5));
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/net/ethernet/dec/tulip/media.c b/drivers/net/ethernet/dec/tulip/media.c
new file mode 100644
index 000000000000..4bd13922875d
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/media.c
@@ -0,0 +1,556 @@
+/*
+ drivers/net/tulip/media.c
+
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
+ for more information on this driver.
+
+ Please submit bugs to http://bugzilla.kernel.org/ .
+*/
+
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include "tulip.h"
+
+
+/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+ "overclocking" issues or future 66Mhz PCI. */
+#define mdio_delay() ioread32(mdio_addr)
+
+/* Read and write the MII registers using software-generated serial
+ MDIO protocol. It is just different enough from the EEPROM protocol
+ to not share code. The maxium data clock rate is 2.5 Mhz. */
+#define MDIO_SHIFT_CLK 0x10000
+#define MDIO_DATA_WRITE0 0x00000
+#define MDIO_DATA_WRITE1 0x20000
+#define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */
+#define MDIO_ENB_IN 0x40000
+#define MDIO_DATA_READ 0x80000
+
+static const unsigned char comet_miireg2offset[32] = {
+ 0xB4, 0xB8, 0xBC, 0xC0, 0xC4, 0xC8, 0xCC, 0, 0,0,0,0, 0,0,0,0,
+ 0,0xD0,0,0, 0,0,0,0, 0,0,0,0, 0, 0xD4, 0xD8, 0xDC, };
+
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol.
+ See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management functions")
+ or DP83840A data sheet for more details.
+ */
+
+int tulip_mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ int i;
+ int read_cmd = (0xf6 << 10) | ((phy_id & 0x1f) << 5) | location;
+ int retval = 0;
+ void __iomem *ioaddr = tp->base_addr;
+ void __iomem *mdio_addr = ioaddr + CSR9;
+ unsigned long flags;
+
+ if (location & ~0x1f)
+ return 0xffff;
+
+ if (tp->chip_id == COMET && phy_id == 30) {
+ if (comet_miireg2offset[location])
+ return ioread32(ioaddr + comet_miireg2offset[location]);
+ return 0xffff;
+ }
+
+ spin_lock_irqsave(&tp->mii_lock, flags);
+ if (tp->chip_id == LC82C168) {
+ iowrite32(0x60020000 + (phy_id<<23) + (location<<18), ioaddr + 0xA0);
+ ioread32(ioaddr + 0xA0);
+ ioread32(ioaddr + 0xA0);
+ for (i = 1000; i >= 0; --i) {
+ barrier();
+ if ( ! ((retval = ioread32(ioaddr + 0xA0)) & 0x80000000))
+ break;
+ }
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ return retval & 0xffff;
+ }
+
+ /* Establish sync by sending at least 32 logic ones. */
+ for (i = 32; i >= 0; i--) {
+ iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
+ mdio_delay();
+ iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
+
+ iowrite32(MDIO_ENB | dataval, mdio_addr);
+ mdio_delay();
+ iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ iowrite32(MDIO_ENB_IN, mdio_addr);
+ mdio_delay();
+ retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
+ iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ return (retval>>1) & 0xffff;
+}
+
+void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ int i;
+ int cmd = (0x5002 << 16) | ((phy_id & 0x1f) << 23) | (location<<18) | (val & 0xffff);
+ void __iomem *ioaddr = tp->base_addr;
+ void __iomem *mdio_addr = ioaddr + CSR9;
+ unsigned long flags;
+
+ if (location & ~0x1f)
+ return;
+
+ if (tp->chip_id == COMET && phy_id == 30) {
+ if (comet_miireg2offset[location])
+ iowrite32(val, ioaddr + comet_miireg2offset[location]);
+ return;
+ }
+
+ spin_lock_irqsave(&tp->mii_lock, flags);
+ if (tp->chip_id == LC82C168) {
+ iowrite32(cmd, ioaddr + 0xA0);
+ for (i = 1000; i >= 0; --i) {
+ barrier();
+ if ( ! (ioread32(ioaddr + 0xA0) & 0x80000000))
+ break;
+ }
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ return;
+ }
+
+ /* Establish sync by sending 32 logic ones. */
+ for (i = 32; i >= 0; i--) {
+ iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
+ mdio_delay();
+ iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
+ iowrite32(MDIO_ENB | dataval, mdio_addr);
+ mdio_delay();
+ iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ iowrite32(MDIO_ENB_IN, mdio_addr);
+ mdio_delay();
+ iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+}
+
+
+/* Set up the transceiver control registers for the selected media type. */
+void tulip_select_media(struct net_device *dev, int startup)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ struct mediatable *mtable = tp->mtable;
+ u32 new_csr6;
+ int i;
+
+ if (mtable) {
+ struct medialeaf *mleaf = &mtable->mleaf[tp->cur_index];
+ unsigned char *p = mleaf->leafdata;
+ switch (mleaf->type) {
+ case 0: /* 21140 non-MII xcvr. */
+ if (tulip_debug > 1)
+ netdev_dbg(dev, "Using a 21140 non-MII transceiver with control setting %02x\n",
+ p[1]);
+ dev->if_port = p[0];
+ if (startup)
+ iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12);
+ iowrite32(p[1], ioaddr + CSR12);
+ new_csr6 = 0x02000000 | ((p[2] & 0x71) << 18);
+ break;
+ case 2: case 4: {
+ u16 setup[5];
+ u32 csr13val, csr14val, csr15dir, csr15val;
+ for (i = 0; i < 5; i++)
+ setup[i] = get_u16(&p[i*2 + 1]);
+
+ dev->if_port = p[0] & MEDIA_MASK;
+ if (tulip_media_cap[dev->if_port] & MediaAlwaysFD)
+ tp->full_duplex = 1;
+
+ if (startup && mtable->has_reset) {
+ struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
+ unsigned char *rst = rleaf->leafdata;
+ if (tulip_debug > 1)
+ netdev_dbg(dev, "Resetting the transceiver\n");
+ for (i = 0; i < rst[0]; i++)
+ iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
+ }
+ if (tulip_debug > 1)
+ netdev_dbg(dev, "21143 non-MII %s transceiver control %04x/%04x\n",
+ medianame[dev->if_port],
+ setup[0], setup[1]);
+ if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */
+ csr13val = setup[0];
+ csr14val = setup[1];
+ csr15dir = (setup[3]<<16) | setup[2];
+ csr15val = (setup[4]<<16) | setup[2];
+ iowrite32(0, ioaddr + CSR13);
+ iowrite32(csr14val, ioaddr + CSR14);
+ iowrite32(csr15dir, ioaddr + CSR15); /* Direction */
+ iowrite32(csr15val, ioaddr + CSR15); /* Data */
+ iowrite32(csr13val, ioaddr + CSR13);
+ } else {
+ csr13val = 1;
+ csr14val = 0;
+ csr15dir = (setup[0]<<16) | 0x0008;
+ csr15val = (setup[1]<<16) | 0x0008;
+ if (dev->if_port <= 4)
+ csr14val = t21142_csr14[dev->if_port];
+ if (startup) {
+ iowrite32(0, ioaddr + CSR13);
+ iowrite32(csr14val, ioaddr + CSR14);
+ }
+ iowrite32(csr15dir, ioaddr + CSR15); /* Direction */
+ iowrite32(csr15val, ioaddr + CSR15); /* Data */
+ if (startup) iowrite32(csr13val, ioaddr + CSR13);
+ }
+ if (tulip_debug > 1)
+ netdev_dbg(dev, "Setting CSR15 to %08x/%08x\n",
+ csr15dir, csr15val);
+ if (mleaf->type == 4)
+ new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18);
+ else
+ new_csr6 = 0x82420000;
+ break;
+ }
+ case 1: case 3: {
+ int phy_num = p[0];
+ int init_length = p[1];
+ u16 *misc_info, tmp_info;
+
+ dev->if_port = 11;
+ new_csr6 = 0x020E0000;
+ if (mleaf->type == 3) { /* 21142 */
+ u16 *init_sequence = (u16*)(p+2);
+ u16 *reset_sequence = &((u16*)(p+3))[init_length];
+ int reset_length = p[2 + init_length*2];
+ misc_info = reset_sequence + reset_length;
+ if (startup) {
+ int timeout = 10; /* max 1 ms */
+ for (i = 0; i < reset_length; i++)
+ iowrite32(get_u16(&reset_sequence[i]) << 16, ioaddr + CSR15);
+
+ /* flush posted writes */
+ ioread32(ioaddr + CSR15);
+
+ /* Sect 3.10.3 in DP83840A.pdf (p39) */
+ udelay(500);
+
+ /* Section 4.2 in DP83840A.pdf (p43) */
+ /* and IEEE 802.3 "22.2.4.1.1 Reset" */
+ while (timeout-- &&
+ (tulip_mdio_read (dev, phy_num, MII_BMCR) & BMCR_RESET))
+ udelay(100);
+ }
+ for (i = 0; i < init_length; i++)
+ iowrite32(get_u16(&init_sequence[i]) << 16, ioaddr + CSR15);
+
+ ioread32(ioaddr + CSR15); /* flush posted writes */
+ } else {
+ u8 *init_sequence = p + 2;
+ u8 *reset_sequence = p + 3 + init_length;
+ int reset_length = p[2 + init_length];
+ misc_info = (u16*)(reset_sequence + reset_length);
+ if (startup) {
+ int timeout = 10; /* max 1 ms */
+ iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12);
+ for (i = 0; i < reset_length; i++)
+ iowrite32(reset_sequence[i], ioaddr + CSR12);
+
+ /* flush posted writes */
+ ioread32(ioaddr + CSR12);
+
+ /* Sect 3.10.3 in DP83840A.pdf (p39) */
+ udelay(500);
+
+ /* Section 4.2 in DP83840A.pdf (p43) */
+ /* and IEEE 802.3 "22.2.4.1.1 Reset" */
+ while (timeout-- &&
+ (tulip_mdio_read (dev, phy_num, MII_BMCR) & BMCR_RESET))
+ udelay(100);
+ }
+ for (i = 0; i < init_length; i++)
+ iowrite32(init_sequence[i], ioaddr + CSR12);
+
+ ioread32(ioaddr + CSR12); /* flush posted writes */
+ }
+
+ tmp_info = get_u16(&misc_info[1]);
+ if (tmp_info)
+ tp->advertising[phy_num] = tmp_info | 1;
+ if (tmp_info && startup < 2) {
+ if (tp->mii_advertise == 0)
+ tp->mii_advertise = tp->advertising[phy_num];
+ if (tulip_debug > 1)
+ netdev_dbg(dev, " Advertising %04x on MII %d\n",
+ tp->mii_advertise,
+ tp->phys[phy_num]);
+ tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise);
+ }
+ break;
+ }
+ case 5: case 6: {
+ u16 setup[5];
+
+ new_csr6 = 0; /* FIXME */
+
+ for (i = 0; i < 5; i++)
+ setup[i] = get_u16(&p[i*2 + 1]);
+
+ if (startup && mtable->has_reset) {
+ struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
+ unsigned char *rst = rleaf->leafdata;
+ if (tulip_debug > 1)
+ netdev_dbg(dev, "Resetting the transceiver\n");
+ for (i = 0; i < rst[0]; i++)
+ iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
+ }
+
+ break;
+ }
+ default:
+ netdev_dbg(dev, " Invalid media table selection %d\n",
+ mleaf->type);
+ new_csr6 = 0x020E0000;
+ }
+ if (tulip_debug > 1)
+ netdev_dbg(dev, "Using media type %s, CSR12 is %02x\n",
+ medianame[dev->if_port],
+ ioread32(ioaddr + CSR12) & 0xff);
+ } else if (tp->chip_id == LC82C168) {
+ if (startup && ! tp->medialock)
+ dev->if_port = tp->mii_cnt ? 11 : 0;
+ if (tulip_debug > 1)
+ netdev_dbg(dev, "PNIC PHY status is %3.3x, media %s\n",
+ ioread32(ioaddr + 0xB8),
+ medianame[dev->if_port]);
+ if (tp->mii_cnt) {
+ new_csr6 = 0x810C0000;
+ iowrite32(0x0001, ioaddr + CSR15);
+ iowrite32(0x0201B07A, ioaddr + 0xB8);
+ } else if (startup) {
+ /* Start with 10mbps to do autonegotiation. */
+ iowrite32(0x32, ioaddr + CSR12);
+ new_csr6 = 0x00420000;
+ iowrite32(0x0001B078, ioaddr + 0xB8);
+ iowrite32(0x0201B078, ioaddr + 0xB8);
+ } else if (dev->if_port == 3 || dev->if_port == 5) {
+ iowrite32(0x33, ioaddr + CSR12);
+ new_csr6 = 0x01860000;
+ /* Trigger autonegotiation. */
+ iowrite32(startup ? 0x0201F868 : 0x0001F868, ioaddr + 0xB8);
+ } else {
+ iowrite32(0x32, ioaddr + CSR12);
+ new_csr6 = 0x00420000;
+ iowrite32(0x1F078, ioaddr + 0xB8);
+ }
+ } else { /* Unknown chip type with no media table. */
+ if (tp->default_port == 0)
+ dev->if_port = tp->mii_cnt ? 11 : 3;
+ if (tulip_media_cap[dev->if_port] & MediaIsMII) {
+ new_csr6 = 0x020E0000;
+ } else if (tulip_media_cap[dev->if_port] & MediaIsFx) {
+ new_csr6 = 0x02860000;
+ } else
+ new_csr6 = 0x03860000;
+ if (tulip_debug > 1)
+ netdev_dbg(dev, "No media description table, assuming %s transceiver, CSR12 %02x\n",
+ medianame[dev->if_port],
+ ioread32(ioaddr + CSR12));
+ }
+
+ tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0);
+
+ mdelay(1);
+}
+
+/*
+ Check the MII negotiated duplex and change the CSR6 setting if
+ required.
+ Return 0 if everything is OK.
+ Return < 0 if the transceiver is missing or has no link beat.
+ */
+int tulip_check_duplex(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ unsigned int bmsr, lpa, negotiated, new_csr6;
+
+ bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR);
+ lpa = tulip_mdio_read(dev, tp->phys[0], MII_LPA);
+ if (tulip_debug > 1)
+ dev_info(&dev->dev, "MII status %04x, Link partner report %04x\n",
+ bmsr, lpa);
+ if (bmsr == 0xffff)
+ return -2;
+ if ((bmsr & BMSR_LSTATUS) == 0) {
+ int new_bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR);
+ if ((new_bmsr & BMSR_LSTATUS) == 0) {
+ if (tulip_debug > 1)
+ dev_info(&dev->dev,
+ "No link beat on the MII interface, status %04x\n",
+ new_bmsr);
+ return -1;
+ }
+ }
+ negotiated = lpa & tp->advertising[0];
+ tp->full_duplex = mii_duplex(tp->full_duplex_lock, negotiated);
+
+ new_csr6 = tp->csr6;
+
+ if (negotiated & LPA_100) new_csr6 &= ~TxThreshold;
+ else new_csr6 |= TxThreshold;
+ if (tp->full_duplex) new_csr6 |= FullDuplex;
+ else new_csr6 &= ~FullDuplex;
+
+ if (new_csr6 != tp->csr6) {
+ tp->csr6 = new_csr6;
+ tulip_restart_rxtx(tp);
+
+ if (tulip_debug > 0)
+ dev_info(&dev->dev,
+ "Setting %s-duplex based on MII#%d link partner capability of %04x\n",
+ tp->full_duplex ? "full" : "half",
+ tp->phys[0], lpa);
+ return 1;
+ }
+
+ return 0;
+}
+
+void __devinit tulip_find_mii (struct net_device *dev, int board_idx)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ int phyn, phy_idx = 0;
+ int mii_reg0;
+ int mii_advert;
+ unsigned int to_advert, new_bmcr, ane_switch;
+
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs later,
+ but takes much time. */
+ for (phyn = 1; phyn <= 32 && phy_idx < sizeof (tp->phys); phyn++) {
+ int phy = phyn & 0x1f;
+ int mii_status = tulip_mdio_read (dev, phy, MII_BMSR);
+ if ((mii_status & 0x8301) == 0x8001 ||
+ ((mii_status & BMSR_100BASE4) == 0 &&
+ (mii_status & 0x7800) != 0)) {
+ /* preserve Becker logic, gain indentation level */
+ } else {
+ continue;
+ }
+
+ mii_reg0 = tulip_mdio_read (dev, phy, MII_BMCR);
+ mii_advert = tulip_mdio_read (dev, phy, MII_ADVERTISE);
+ ane_switch = 0;
+
+ /* if not advertising at all, gen an
+ * advertising value from the capability
+ * bits in BMSR
+ */
+ if ((mii_advert & ADVERTISE_ALL) == 0) {
+ unsigned int tmpadv = tulip_mdio_read (dev, phy, MII_BMSR);
+ mii_advert = ((tmpadv >> 6) & 0x3e0) | 1;
+ }
+
+ if (tp->mii_advertise) {
+ tp->advertising[phy_idx] =
+ to_advert = tp->mii_advertise;
+ } else if (tp->advertising[phy_idx]) {
+ to_advert = tp->advertising[phy_idx];
+ } else {
+ tp->advertising[phy_idx] =
+ tp->mii_advertise =
+ to_advert = mii_advert;
+ }
+
+ tp->phys[phy_idx++] = phy;
+
+ pr_info("tulip%d: MII transceiver #%d config %04x status %04x advertising %04x\n",
+ board_idx, phy, mii_reg0, mii_status, mii_advert);
+
+ /* Fixup for DLink with miswired PHY. */
+ if (mii_advert != to_advert) {
+ pr_debug("tulip%d: Advertising %04x on PHY %d, previously advertising %04x\n",
+ board_idx, to_advert, phy, mii_advert);
+ tulip_mdio_write (dev, phy, 4, to_advert);
+ }
+
+ /* Enable autonegotiation: some boards default to off. */
+ if (tp->default_port == 0) {
+ new_bmcr = mii_reg0 | BMCR_ANENABLE;
+ if (new_bmcr != mii_reg0) {
+ new_bmcr |= BMCR_ANRESTART;
+ ane_switch = 1;
+ }
+ }
+ /* ...or disable nway, if forcing media */
+ else {
+ new_bmcr = mii_reg0 & ~BMCR_ANENABLE;
+ if (new_bmcr != mii_reg0)
+ ane_switch = 1;
+ }
+
+ /* clear out bits we never want at this point */
+ new_bmcr &= ~(BMCR_CTST | BMCR_FULLDPLX | BMCR_ISOLATE |
+ BMCR_PDOWN | BMCR_SPEED100 | BMCR_LOOPBACK |
+ BMCR_RESET);
+
+ if (tp->full_duplex)
+ new_bmcr |= BMCR_FULLDPLX;
+ if (tulip_media_cap[tp->default_port] & MediaIs100)
+ new_bmcr |= BMCR_SPEED100;
+
+ if (new_bmcr != mii_reg0) {
+ /* some phys need the ANE switch to
+ * happen before forced media settings
+ * will "take." However, we write the
+ * same value twice in order not to
+ * confuse the sane phys.
+ */
+ if (ane_switch) {
+ tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr);
+ udelay (10);
+ }
+ tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr);
+ }
+ }
+ tp->mii_cnt = phy_idx;
+ if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) {
+ pr_info("tulip%d: ***WARNING***: No MII transceiver found!\n",
+ board_idx);
+ tp->phys[0] = 1;
+ }
+}
diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c
new file mode 100644
index 000000000000..52d898bdbeb4
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/pnic.c
@@ -0,0 +1,173 @@
+/*
+ drivers/net/tulip/pnic.c
+
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
+ for more information on this driver.
+
+ Please submit bugs to http://bugzilla.kernel.org/ .
+*/
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include "tulip.h"
+
+
+void pnic_do_nway(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ u32 phy_reg = ioread32(ioaddr + 0xB8);
+ u32 new_csr6 = tp->csr6 & ~0x40C40200;
+
+ if (phy_reg & 0x78000000) { /* Ignore baseT4 */
+ if (phy_reg & 0x20000000) dev->if_port = 5;
+ else if (phy_reg & 0x40000000) dev->if_port = 3;
+ else if (phy_reg & 0x10000000) dev->if_port = 4;
+ else if (phy_reg & 0x08000000) dev->if_port = 0;
+ tp->nwayset = 1;
+ new_csr6 = (dev->if_port & 1) ? 0x01860000 : 0x00420000;
+ iowrite32(0x32 | (dev->if_port & 1), ioaddr + CSR12);
+ if (dev->if_port & 1)
+ iowrite32(0x1F868, ioaddr + 0xB8);
+ if (phy_reg & 0x30000000) {
+ tp->full_duplex = 1;
+ new_csr6 |= 0x00000200;
+ }
+ if (tulip_debug > 1)
+ netdev_dbg(dev, "PNIC autonegotiated status %08x, %s\n",
+ phy_reg, medianame[dev->if_port]);
+ if (tp->csr6 != new_csr6) {
+ tp->csr6 = new_csr6;
+ /* Restart Tx */
+ tulip_restart_rxtx(tp);
+ dev->trans_start = jiffies;
+ }
+ }
+}
+
+void pnic_lnk_change(struct net_device *dev, int csr5)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int phy_reg = ioread32(ioaddr + 0xB8);
+
+ if (tulip_debug > 1)
+ netdev_dbg(dev, "PNIC link changed state %08x, CSR5 %08x\n",
+ phy_reg, csr5);
+ if (ioread32(ioaddr + CSR5) & TPLnkFail) {
+ iowrite32((ioread32(ioaddr + CSR7) & ~TPLnkFail) | TPLnkPass, ioaddr + CSR7);
+ /* If we use an external MII, then we mustn't use the
+ * internal negotiation.
+ */
+ if (tulip_media_cap[dev->if_port] & MediaIsMII)
+ return;
+ if (! tp->nwayset || time_after(jiffies, dev_trans_start(dev) + 1*HZ)) {
+ tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff);
+ iowrite32(tp->csr6, ioaddr + CSR6);
+ iowrite32(0x30, ioaddr + CSR12);
+ iowrite32(0x0201F078, ioaddr + 0xB8); /* Turn on autonegotiation. */
+ dev->trans_start = jiffies;
+ }
+ } else if (ioread32(ioaddr + CSR5) & TPLnkPass) {
+ if (tulip_media_cap[dev->if_port] & MediaIsMII) {
+ spin_lock(&tp->lock);
+ tulip_check_duplex(dev);
+ spin_unlock(&tp->lock);
+ } else {
+ pnic_do_nway(dev);
+ }
+ iowrite32((ioread32(ioaddr + CSR7) & ~TPLnkPass) | TPLnkFail, ioaddr + CSR7);
+ }
+}
+
+void pnic_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int next_tick = 60*HZ;
+
+ if(!ioread32(ioaddr + CSR7)) {
+ /* the timer was called due to a work overflow
+ * in the interrupt handler. Skip the connection
+ * checks, the nic is definitively speaking with
+ * his link partner.
+ */
+ goto too_good_connection;
+ }
+
+ if (tulip_media_cap[dev->if_port] & MediaIsMII) {
+ spin_lock_irq(&tp->lock);
+ if (tulip_check_duplex(dev) > 0)
+ next_tick = 3*HZ;
+ spin_unlock_irq(&tp->lock);
+ } else {
+ int csr12 = ioread32(ioaddr + CSR12);
+ int new_csr6 = tp->csr6 & ~0x40C40200;
+ int phy_reg = ioread32(ioaddr + 0xB8);
+ int csr5 = ioread32(ioaddr + CSR5);
+
+ if (tulip_debug > 1)
+ netdev_dbg(dev, "PNIC timer PHY status %08x, %s CSR5 %08x\n",
+ phy_reg, medianame[dev->if_port], csr5);
+ if (phy_reg & 0x04000000) { /* Remote link fault */
+ iowrite32(0x0201F078, ioaddr + 0xB8);
+ next_tick = 1*HZ;
+ tp->nwayset = 0;
+ } else if (phy_reg & 0x78000000) { /* Ignore baseT4 */
+ pnic_do_nway(dev);
+ next_tick = 60*HZ;
+ } else if (csr5 & TPLnkFail) { /* 100baseTx link beat */
+ if (tulip_debug > 1)
+ netdev_dbg(dev, "%s link beat failed, CSR12 %04x, CSR5 %08x, PHY %03x\n",
+ medianame[dev->if_port],
+ csr12,
+ ioread32(ioaddr + CSR5),
+ ioread32(ioaddr + 0xB8));
+ next_tick = 3*HZ;
+ if (tp->medialock) {
+ } else if (tp->nwayset && (dev->if_port & 1)) {
+ next_tick = 1*HZ;
+ } else if (dev->if_port == 0) {
+ dev->if_port = 3;
+ iowrite32(0x33, ioaddr + CSR12);
+ new_csr6 = 0x01860000;
+ iowrite32(0x1F868, ioaddr + 0xB8);
+ } else {
+ dev->if_port = 0;
+ iowrite32(0x32, ioaddr + CSR12);
+ new_csr6 = 0x00420000;
+ iowrite32(0x1F078, ioaddr + 0xB8);
+ }
+ if (tp->csr6 != new_csr6) {
+ tp->csr6 = new_csr6;
+ /* Restart Tx */
+ tulip_restart_rxtx(tp);
+ dev->trans_start = jiffies;
+ if (tulip_debug > 1)
+ dev_info(&dev->dev,
+ "Changing PNIC configuration to %s %s-duplex, CSR6 %08x\n",
+ medianame[dev->if_port],
+ tp->full_duplex ? "full" : "half",
+ new_csr6);
+ }
+ }
+ }
+too_good_connection:
+ mod_timer(&tp->timer, RUN_AT(next_tick));
+ if(!ioread32(ioaddr + CSR7)) {
+ if (tulip_debug > 1)
+ dev_info(&dev->dev, "sw timer wakeup\n");
+ disable_irq(dev->irq);
+ tulip_refill_rx(dev);
+ enable_irq(dev->irq);
+ iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+ }
+}
diff --git a/drivers/net/ethernet/dec/tulip/pnic2.c b/drivers/net/ethernet/dec/tulip/pnic2.c
new file mode 100644
index 000000000000..93358ee4d830
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/pnic2.c
@@ -0,0 +1,406 @@
+/*
+ drivers/net/tulip/pnic2.c
+
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+ Modified to hep support PNIC_II by Kevin B. Hendricks
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
+ for more information on this driver.
+
+ Please submit bugs to http://bugzilla.kernel.org/ .
+*/
+
+
+/* Understanding the PNIC_II - everything is this file is based
+ * on the PNIC_II_PDF datasheet which is sorely lacking in detail
+ *
+ * As I understand things, here are the registers and bits that
+ * explain the masks and constants used in this file that are
+ * either different from the 21142/3 or important for basic operation.
+ *
+ *
+ * CSR 6 (mask = 0xfe3bd1fd of bits not to change)
+ * -----
+ * Bit 24 - SCR
+ * Bit 23 - PCS
+ * Bit 22 - TTM (Trasmit Threshold Mode)
+ * Bit 18 - Port Select
+ * Bit 13 - Start - 1, Stop - 0 Transmissions
+ * Bit 11:10 - Loop Back Operation Mode
+ * Bit 9 - Full Duplex mode (Advertise 10BaseT-FD is CSR14<7> is set)
+ * Bit 1 - Start - 1, Stop - 0 Receive
+ *
+ *
+ * CSR 14 (mask = 0xfff0ee39 of bits not to change)
+ * ------
+ * Bit 19 - PAUSE-Pause
+ * Bit 18 - Advertise T4
+ * Bit 17 - Advertise 100baseTx-FD
+ * Bit 16 - Advertise 100baseTx-HD
+ * Bit 12 - LTE - Link Test Enable
+ * Bit 7 - ANE - Auto Negotiate Enable
+ * Bit 6 - HDE - Advertise 10baseT-HD
+ * Bit 2 - Reset to Power down - kept as 1 for normal operation
+ * Bit 1 - Loop Back enable for 10baseT MCC
+ *
+ *
+ * CSR 12
+ * ------
+ * Bit 25 - Partner can do T4
+ * Bit 24 - Partner can do 100baseTx-FD
+ * Bit 23 - Partner can do 100baseTx-HD
+ * Bit 22 - Partner can do 10baseT-FD
+ * Bit 21 - Partner can do 10baseT-HD
+ * Bit 15 - LPN is 1 if all above bits are valid other wise 0
+ * Bit 14:12 - autonegotiation state (write 001 to start autonegotiate)
+ * Bit 3 - Autopolarity state
+ * Bit 2 - LS10B - link state of 10baseT 0 - good, 1 - failed
+ * Bit 1 - LS100B - link state of 100baseT 0 - good, 1 - failed
+ *
+ *
+ * Data Port Selection Info
+ *-------------------------
+ *
+ * CSR14<7> CSR6<18> CSR6<22> CSR6<23> CSR6<24> MODE/PORT
+ * 1 0 0 (X) 0 (X) 1 NWAY
+ * 0 0 1 0 (X) 0 10baseT
+ * 0 1 0 1 1 (X) 100baseT
+ *
+ *
+ */
+
+
+
+#include "tulip.h"
+#include <linux/delay.h>
+
+
+void pnic2_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int next_tick = 60*HZ;
+
+ if (tulip_debug > 3)
+ dev_info(&dev->dev, "PNIC2 negotiation status %08x\n",
+ ioread32(ioaddr + CSR12));
+
+ if (next_tick) {
+ mod_timer(&tp->timer, RUN_AT(next_tick));
+ }
+}
+
+
+void pnic2_start_nway(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int csr14;
+ int csr12;
+
+ /* set up what to advertise during the negotiation */
+
+ /* load in csr14 and mask off bits not to touch
+ * comment at top of file explains mask value
+ */
+ csr14 = (ioread32(ioaddr + CSR14) & 0xfff0ee39);
+
+ /* bit 17 - advetise 100baseTx-FD */
+ if (tp->sym_advertise & 0x0100) csr14 |= 0x00020000;
+
+ /* bit 16 - advertise 100baseTx-HD */
+ if (tp->sym_advertise & 0x0080) csr14 |= 0x00010000;
+
+ /* bit 6 - advertise 10baseT-HD */
+ if (tp->sym_advertise & 0x0020) csr14 |= 0x00000040;
+
+ /* Now set bit 12 Link Test Enable, Bit 7 Autonegotiation Enable
+ * and bit 0 Don't PowerDown 10baseT
+ */
+ csr14 |= 0x00001184;
+
+ if (tulip_debug > 1)
+ netdev_dbg(dev, "Restarting PNIC2 autonegotiation, csr14=%08x\n",
+ csr14);
+
+ /* tell pnic2_lnk_change we are doing an nway negotiation */
+ dev->if_port = 0;
+ tp->nway = tp->mediasense = 1;
+ tp->nwayset = tp->lpar = 0;
+
+ /* now we have to set up csr6 for NWAY state */
+
+ tp->csr6 = ioread32(ioaddr + CSR6);
+ if (tulip_debug > 1)
+ netdev_dbg(dev, "On Entry to Nway, csr6=%08x\n", tp->csr6);
+
+ /* mask off any bits not to touch
+ * comment at top of file explains mask value
+ */
+ tp->csr6 = tp->csr6 & 0xfe3bd1fd;
+
+ /* don't forget that bit 9 is also used for advertising */
+ /* advertise 10baseT-FD for the negotiation (bit 9) */
+ if (tp->sym_advertise & 0x0040) tp->csr6 |= 0x00000200;
+
+ /* set bit 24 for nway negotiation mode ...
+ * see Data Port Selection comment at top of file
+ * and "Stop" - reset both Transmit (bit 13) and Receive (bit 1)
+ */
+ tp->csr6 |= 0x01000000;
+ iowrite32(csr14, ioaddr + CSR14);
+ iowrite32(tp->csr6, ioaddr + CSR6);
+ udelay(100);
+
+ /* all set up so now force the negotiation to begin */
+
+ /* read in current values and mask off all but the
+ * Autonegotiation bits 14:12. Writing a 001 to those bits
+ * should start the autonegotiation
+ */
+ csr12 = (ioread32(ioaddr + CSR12) & 0xffff8fff);
+ csr12 |= 0x1000;
+ iowrite32(csr12, ioaddr + CSR12);
+}
+
+
+
+void pnic2_lnk_change(struct net_device *dev, int csr5)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int csr14;
+
+ /* read the staus register to find out what is up */
+ int csr12 = ioread32(ioaddr + CSR12);
+
+ if (tulip_debug > 1)
+ dev_info(&dev->dev,
+ "PNIC2 link status interrupt %08x, CSR5 %x, %08x\n",
+ csr12, csr5, ioread32(ioaddr + CSR14));
+
+ /* If NWay finished and we have a negotiated partner capability.
+ * check bits 14:12 for bit pattern 101 - all is good
+ */
+ if (tp->nway && !tp->nwayset) {
+
+ /* we did an auto negotiation */
+
+ if ((csr12 & 0x7000) == 0x5000) {
+
+ /* negotiation ended successfully */
+
+ /* get the link partners reply and mask out all but
+ * bits 24-21 which show the partners capabilities
+ * and match those to what we advertised
+ *
+ * then begin to interpret the results of the negotiation.
+ * Always go in this order : (we are ignoring T4 for now)
+ * 100baseTx-FD, 100baseTx-HD, 10baseT-FD, 10baseT-HD
+ */
+
+ int negotiated = ((csr12 >> 16) & 0x01E0) & tp->sym_advertise;
+ tp->lpar = (csr12 >> 16);
+ tp->nwayset = 1;
+
+ if (negotiated & 0x0100) dev->if_port = 5;
+ else if (negotiated & 0x0080) dev->if_port = 3;
+ else if (negotiated & 0x0040) dev->if_port = 4;
+ else if (negotiated & 0x0020) dev->if_port = 0;
+ else {
+ if (tulip_debug > 1)
+ dev_info(&dev->dev,
+ "funny autonegotiate result csr12 %08x advertising %04x\n",
+ csr12, tp->sym_advertise);
+ tp->nwayset = 0;
+ /* so check if 100baseTx link state is okay */
+ if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180))
+ dev->if_port = 3;
+ }
+
+ /* now record the duplex that was negotiated */
+ tp->full_duplex = 0;
+ if ((dev->if_port == 4) || (dev->if_port == 5))
+ tp->full_duplex = 1;
+
+ if (tulip_debug > 1) {
+ if (tp->nwayset)
+ dev_info(&dev->dev,
+ "Switching to %s based on link negotiation %04x & %04x = %04x\n",
+ medianame[dev->if_port],
+ tp->sym_advertise, tp->lpar,
+ negotiated);
+ }
+
+ /* remember to turn off bit 7 - autonegotiate
+ * enable so we can properly end nway mode and
+ * set duplex (ie. use csr6<9> again)
+ */
+ csr14 = (ioread32(ioaddr + CSR14) & 0xffffff7f);
+ iowrite32(csr14,ioaddr + CSR14);
+
+
+ /* now set the data port and operating mode
+ * (see the Data Port Selection comments at
+ * the top of the file
+ */
+
+ /* get current csr6 and mask off bits not to touch */
+ /* see comment at top of file */
+
+ tp->csr6 = (ioread32(ioaddr + CSR6) & 0xfe3bd1fd);
+
+ /* so if using if_port 3 or 5 then select the 100baseT
+ * port else select the 10baseT port.
+ * See the Data Port Selection table at the top
+ * of the file which was taken from the PNIC_II.PDF
+ * datasheet
+ */
+ if (dev->if_port & 1) tp->csr6 |= 0x01840000;
+ else tp->csr6 |= 0x00400000;
+
+ /* now set the full duplex bit appropriately */
+ if (tp->full_duplex) tp->csr6 |= 0x00000200;
+
+ iowrite32(1, ioaddr + CSR13);
+
+ if (tulip_debug > 2)
+ netdev_dbg(dev, "Setting CSR6 %08x/%x CSR12 %08x\n",
+ tp->csr6,
+ ioread32(ioaddr + CSR6),
+ ioread32(ioaddr + CSR12));
+
+ /* now the following actually writes out the
+ * new csr6 values
+ */
+ tulip_start_rxtx(tp);
+
+ return;
+
+ } else {
+ dev_info(&dev->dev,
+ "Autonegotiation failed, using %s, link beat status %04x\n",
+ medianame[dev->if_port], csr12);
+
+ /* remember to turn off bit 7 - autonegotiate
+ * enable so we don't forget
+ */
+ csr14 = (ioread32(ioaddr + CSR14) & 0xffffff7f);
+ iowrite32(csr14,ioaddr + CSR14);
+
+ /* what should we do when autonegotiate fails?
+ * should we try again or default to baseline
+ * case. I just don't know.
+ *
+ * for now default to some baseline case
+ */
+
+ dev->if_port = 0;
+ tp->nway = 0;
+ tp->nwayset = 1;
+
+ /* set to 10baseTx-HD - see Data Port Selection
+ * comment given at the top of the file
+ */
+ tp->csr6 = (ioread32(ioaddr + CSR6) & 0xfe3bd1fd);
+ tp->csr6 |= 0x00400000;
+
+ tulip_restart_rxtx(tp);
+
+ return;
+
+ }
+ }
+
+ if ((tp->nwayset && (csr5 & 0x08000000) &&
+ (dev->if_port == 3 || dev->if_port == 5) &&
+ (csr12 & 2) == 2) || (tp->nway && (csr5 & (TPLnkFail)))) {
+
+ /* Link blew? Maybe restart NWay. */
+
+ if (tulip_debug > 2)
+ netdev_dbg(dev, "Ugh! Link blew?\n");
+
+ del_timer_sync(&tp->timer);
+ pnic2_start_nway(dev);
+ tp->timer.expires = RUN_AT(3*HZ);
+ add_timer(&tp->timer);
+
+ return;
+ }
+
+
+ if (dev->if_port == 3 || dev->if_port == 5) {
+
+ /* we are at 100mb and a potential link change occurred */
+
+ if (tulip_debug > 1)
+ dev_info(&dev->dev, "PNIC2 %s link beat %s\n",
+ medianame[dev->if_port],
+ (csr12 & 2) ? "failed" : "good");
+
+ /* check 100 link beat */
+
+ tp->nway = 0;
+ tp->nwayset = 1;
+
+ /* if failed then try doing an nway to get in sync */
+ if ((csr12 & 2) && ! tp->medialock) {
+ del_timer_sync(&tp->timer);
+ pnic2_start_nway(dev);
+ tp->timer.expires = RUN_AT(3*HZ);
+ add_timer(&tp->timer);
+ }
+
+ return;
+ }
+
+ if (dev->if_port == 0 || dev->if_port == 4) {
+
+ /* we are at 10mb and a potential link change occurred */
+
+ if (tulip_debug > 1)
+ dev_info(&dev->dev, "PNIC2 %s link beat %s\n",
+ medianame[dev->if_port],
+ (csr12 & 4) ? "failed" : "good");
+
+
+ tp->nway = 0;
+ tp->nwayset = 1;
+
+ /* if failed, try doing an nway to get in sync */
+ if ((csr12 & 4) && ! tp->medialock) {
+ del_timer_sync(&tp->timer);
+ pnic2_start_nway(dev);
+ tp->timer.expires = RUN_AT(3*HZ);
+ add_timer(&tp->timer);
+ }
+
+ return;
+ }
+
+
+ if (tulip_debug > 1)
+ dev_info(&dev->dev, "PNIC2 Link Change Default?\n");
+
+ /* if all else fails default to trying 10baseT-HD */
+ dev->if_port = 0;
+
+ /* make sure autonegotiate enable is off */
+ csr14 = (ioread32(ioaddr + CSR14) & 0xffffff7f);
+ iowrite32(csr14,ioaddr + CSR14);
+
+ /* set to 10baseTx-HD - see Data Port Selection
+ * comment given at the top of the file
+ */
+ tp->csr6 = (ioread32(ioaddr + CSR6) & 0xfe3bd1fd);
+ tp->csr6 |= 0x00400000;
+
+ tulip_restart_rxtx(tp);
+}
+
diff --git a/drivers/net/ethernet/dec/tulip/timer.c b/drivers/net/ethernet/dec/tulip/timer.c
new file mode 100644
index 000000000000..2017faf2d0e6
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/timer.c
@@ -0,0 +1,179 @@
+/*
+ drivers/net/tulip/timer.c
+
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
+ for more information on this driver.
+
+ Please submit bugs to http://bugzilla.kernel.org/ .
+*/
+
+
+#include "tulip.h"
+
+
+void tulip_media_task(struct work_struct *work)
+{
+ struct tulip_private *tp =
+ container_of(work, struct tulip_private, media_work);
+ struct net_device *dev = tp->dev;
+ void __iomem *ioaddr = tp->base_addr;
+ u32 csr12 = ioread32(ioaddr + CSR12);
+ int next_tick = 2*HZ;
+ unsigned long flags;
+
+ if (tulip_debug > 2) {
+ netdev_dbg(dev, "Media selection tick, %s, status %08x mode %08x SIA %08x %08x %08x %08x\n",
+ medianame[dev->if_port],
+ ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR6),
+ csr12, ioread32(ioaddr + CSR13),
+ ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
+ }
+ switch (tp->chip_id) {
+ case DC21140:
+ case DC21142:
+ case MX98713:
+ case COMPEX9881:
+ case DM910X:
+ default: {
+ struct medialeaf *mleaf;
+ unsigned char *p;
+ if (tp->mtable == NULL) { /* No EEPROM info, use generic code. */
+ /* Not much that can be done.
+ Assume this a generic MII or SYM transceiver. */
+ next_tick = 60*HZ;
+ if (tulip_debug > 2)
+ netdev_dbg(dev, "network media monitor CSR6 %08x CSR12 0x%02x\n",
+ ioread32(ioaddr + CSR6),
+ csr12 & 0xff);
+ break;
+ }
+ mleaf = &tp->mtable->mleaf[tp->cur_index];
+ p = mleaf->leafdata;
+ switch (mleaf->type) {
+ case 0: case 4: {
+ /* Type 0 serial or 4 SYM transceiver. Check the link beat bit. */
+ int offset = mleaf->type == 4 ? 5 : 2;
+ s8 bitnum = p[offset];
+ if (p[offset+1] & 0x80) {
+ if (tulip_debug > 1)
+ netdev_dbg(dev, "Transceiver monitor tick CSR12=%#02x, no media sense\n",
+ csr12);
+ if (mleaf->type == 4) {
+ if (mleaf->media == 3 && (csr12 & 0x02))
+ goto select_next_media;
+ }
+ break;
+ }
+ if (tulip_debug > 2)
+ netdev_dbg(dev, "Transceiver monitor tick: CSR12=%#02x bit %d is %d, expecting %d\n",
+ csr12, (bitnum >> 1) & 7,
+ (csr12 & (1 << ((bitnum >> 1) & 7))) != 0,
+ (bitnum >= 0));
+ /* Check that the specified bit has the proper value. */
+ if ((bitnum < 0) !=
+ ((csr12 & (1 << ((bitnum >> 1) & 7))) != 0)) {
+ if (tulip_debug > 2)
+ netdev_dbg(dev, "Link beat detected for %s\n",
+ medianame[mleaf->media & MEDIA_MASK]);
+ if ((p[2] & 0x61) == 0x01) /* Bogus Znyx board. */
+ goto actually_mii;
+ netif_carrier_on(dev);
+ break;
+ }
+ netif_carrier_off(dev);
+ if (tp->medialock)
+ break;
+ select_next_media:
+ if (--tp->cur_index < 0) {
+ /* We start again, but should instead look for default. */
+ tp->cur_index = tp->mtable->leafcount - 1;
+ }
+ dev->if_port = tp->mtable->mleaf[tp->cur_index].media;
+ if (tulip_media_cap[dev->if_port] & MediaIsFD)
+ goto select_next_media; /* Skip FD entries. */
+ if (tulip_debug > 1)
+ netdev_dbg(dev, "No link beat on media %s, trying transceiver type %s\n",
+ medianame[mleaf->media & MEDIA_MASK],
+ medianame[tp->mtable->mleaf[tp->cur_index].media]);
+ tulip_select_media(dev, 0);
+ /* Restart the transmit process. */
+ tulip_restart_rxtx(tp);
+ next_tick = (24*HZ)/10;
+ break;
+ }
+ case 1: case 3: /* 21140, 21142 MII */
+ actually_mii:
+ if (tulip_check_duplex(dev) < 0) {
+ netif_carrier_off(dev);
+ next_tick = 3*HZ;
+ } else {
+ netif_carrier_on(dev);
+ next_tick = 60*HZ;
+ }
+ break;
+ case 2: /* 21142 serial block has no link beat. */
+ default:
+ break;
+ }
+ }
+ break;
+ }
+
+
+ spin_lock_irqsave(&tp->lock, flags);
+ if (tp->timeout_recovery) {
+ tulip_tx_timeout_complete(tp, ioaddr);
+ tp->timeout_recovery = 0;
+ }
+ spin_unlock_irqrestore(&tp->lock, flags);
+
+ /* mod_timer synchronizes us with potential add_timer calls
+ * from interrupts.
+ */
+ mod_timer(&tp->timer, RUN_AT(next_tick));
+}
+
+
+void mxic_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int next_tick = 60*HZ;
+
+ if (tulip_debug > 3) {
+ dev_info(&dev->dev, "MXIC negotiation status %08x\n",
+ ioread32(ioaddr + CSR12));
+ }
+ if (next_tick) {
+ mod_timer(&tp->timer, RUN_AT(next_tick));
+ }
+}
+
+
+void comet_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = netdev_priv(dev);
+ int next_tick = 60*HZ;
+
+ if (tulip_debug > 1)
+ netdev_dbg(dev, "Comet link status %04x partner capability %04x\n",
+ tulip_mdio_read(dev, tp->phys[0], 1),
+ tulip_mdio_read(dev, tp->phys[0], 5));
+ /* mod_timer synchronizes us with potential add_timer calls
+ * from interrupts.
+ */
+ if (tulip_check_duplex(dev) < 0)
+ { netif_carrier_off(dev); }
+ else
+ { netif_carrier_on(dev); }
+ mod_timer(&tp->timer, RUN_AT(next_tick));
+}
+
diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h
new file mode 100644
index 000000000000..9db528967da9
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/tulip.h
@@ -0,0 +1,573 @@
+/*
+ drivers/net/tulip/tulip.h
+
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
+ for more information on this driver.
+
+ Please submit bugs to http://bugzilla.kernel.org/ .
+*/
+
+#ifndef __NET_TULIP_H__
+#define __NET_TULIP_H__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+
+
+
+/* undefine, or define to various debugging levels (>4 == obscene levels) */
+#define TULIP_DEBUG 1
+
+#ifdef CONFIG_TULIP_MMIO
+#define TULIP_BAR 1 /* CBMA */
+#else
+#define TULIP_BAR 0 /* CBIO */
+#endif
+
+
+
+struct tulip_chip_table {
+ char *chip_name;
+ int io_size;
+ int valid_intrs; /* CSR7 interrupt enable settings */
+ int flags;
+ void (*media_timer) (unsigned long);
+ work_func_t media_task;
+};
+
+
+enum tbl_flag {
+ HAS_MII = 0x00001,
+ HAS_MEDIA_TABLE = 0x00002,
+ CSR12_IN_SROM = 0x00004,
+ ALWAYS_CHECK_MII = 0x00008,
+ HAS_ACPI = 0x00010,
+ MC_HASH_ONLY = 0x00020, /* Hash-only multicast filter. */
+ HAS_PNICNWAY = 0x00080,
+ HAS_NWAY = 0x00040, /* Uses internal NWay xcvr. */
+ HAS_INTR_MITIGATION = 0x00100,
+ IS_ASIX = 0x00200,
+ HAS_8023X = 0x00400,
+ COMET_MAC_ADDR = 0x00800,
+ HAS_PCI_MWI = 0x01000,
+ HAS_PHY_IRQ = 0x02000,
+ HAS_SWAPPED_SEEPROM = 0x04000,
+ NEEDS_FAKE_MEDIA_TABLE = 0x08000,
+ COMET_PM = 0x10000,
+};
+
+
+/* chip types. careful! order is VERY IMPORTANT here, as these
+ * are used throughout the driver as indices into arrays */
+/* Note 21142 == 21143. */
+enum chips {
+ DC21040 = 0,
+ DC21041 = 1,
+ DC21140 = 2,
+ DC21142 = 3, DC21143 = 3,
+ LC82C168,
+ MX98713,
+ MX98715,
+ MX98725,
+ AX88140,
+ PNIC2,
+ COMET,
+ COMPEX9881,
+ I21145,
+ DM910X,
+ CONEXANT,
+};
+
+
+enum MediaIs {
+ MediaIsFD = 1,
+ MediaAlwaysFD = 2,
+ MediaIsMII = 4,
+ MediaIsFx = 8,
+ MediaIs100 = 16
+};
+
+
+/* Offsets to the Command and Status Registers, "CSRs". All accesses
+ must be longword instructions and quadword aligned. */
+enum tulip_offsets {
+ CSR0 = 0,
+ CSR1 = 0x08,
+ CSR2 = 0x10,
+ CSR3 = 0x18,
+ CSR4 = 0x20,
+ CSR5 = 0x28,
+ CSR6 = 0x30,
+ CSR7 = 0x38,
+ CSR8 = 0x40,
+ CSR9 = 0x48,
+ CSR10 = 0x50,
+ CSR11 = 0x58,
+ CSR12 = 0x60,
+ CSR13 = 0x68,
+ CSR14 = 0x70,
+ CSR15 = 0x78,
+ CSR18 = 0x88,
+ CSR19 = 0x8c,
+ CSR20 = 0x90,
+ CSR27 = 0xAC,
+ CSR28 = 0xB0,
+};
+
+/* register offset and bits for CFDD PCI config reg */
+enum pci_cfg_driver_reg {
+ CFDD = 0x40,
+ CFDD_Sleep = (1 << 31),
+ CFDD_Snooze = (1 << 30),
+};
+
+#define RxPollInt (RxIntr|RxNoBuf|RxDied|RxJabber)
+
+/* The bits in the CSR5 status registers, mostly interrupt sources. */
+enum status_bits {
+ TimerInt = 0x800,
+ SystemError = 0x2000,
+ TPLnkFail = 0x1000,
+ TPLnkPass = 0x10,
+ NormalIntr = 0x10000,
+ AbnormalIntr = 0x8000,
+ RxJabber = 0x200,
+ RxDied = 0x100,
+ RxNoBuf = 0x80,
+ RxIntr = 0x40,
+ TxFIFOUnderflow = 0x20,
+ RxErrIntr = 0x10,
+ TxJabber = 0x08,
+ TxNoBuf = 0x04,
+ TxDied = 0x02,
+ TxIntr = 0x01,
+};
+
+/* bit mask for CSR5 TX/RX process state */
+#define CSR5_TS 0x00700000
+#define CSR5_RS 0x000e0000
+
+enum tulip_mode_bits {
+ TxThreshold = (1 << 22),
+ FullDuplex = (1 << 9),
+ TxOn = 0x2000,
+ AcceptBroadcast = 0x0100,
+ AcceptAllMulticast = 0x0080,
+ AcceptAllPhys = 0x0040,
+ AcceptRunt = 0x0008,
+ RxOn = 0x0002,
+ RxTx = (TxOn | RxOn),
+};
+
+
+enum tulip_busconfig_bits {
+ MWI = (1 << 24),
+ MRL = (1 << 23),
+ MRM = (1 << 21),
+ CALShift = 14,
+ BurstLenShift = 8,
+};
+
+
+/* The Tulip Rx and Tx buffer descriptors. */
+struct tulip_rx_desc {
+ __le32 status;
+ __le32 length;
+ __le32 buffer1;
+ __le32 buffer2;
+};
+
+
+struct tulip_tx_desc {
+ __le32 status;
+ __le32 length;
+ __le32 buffer1;
+ __le32 buffer2; /* We use only buffer 1. */
+};
+
+
+enum desc_status_bits {
+ DescOwned = 0x80000000,
+ DescWholePkt = 0x60000000,
+ DescEndPkt = 0x40000000,
+ DescStartPkt = 0x20000000,
+ DescEndRing = 0x02000000,
+ DescUseLink = 0x01000000,
+
+ /*
+ * Error summary flag is logical or of 'CRC Error', 'Collision Seen',
+ * 'Frame Too Long', 'Runt' and 'Descriptor Error' flags generated
+ * within tulip chip.
+ */
+ RxDescErrorSummary = 0x8000,
+ RxDescCRCError = 0x0002,
+ RxDescCollisionSeen = 0x0040,
+
+ /*
+ * 'Frame Too Long' flag is set if packet length including CRC exceeds
+ * 1518. However, a full sized VLAN tagged frame is 1522 bytes
+ * including CRC.
+ *
+ * The tulip chip does not block oversized frames, and if this flag is
+ * set on a receive descriptor it does not indicate the frame has been
+ * truncated. The receive descriptor also includes the actual length.
+ * Therefore we can safety ignore this flag and check the length
+ * ourselves.
+ */
+ RxDescFrameTooLong = 0x0080,
+ RxDescRunt = 0x0800,
+ RxDescDescErr = 0x4000,
+ RxWholePkt = 0x00000300,
+ /*
+ * Top three bits of 14 bit frame length (status bits 27-29) should
+ * never be set as that would make frame over 2047 bytes. The Receive
+ * Watchdog flag (bit 4) may indicate the length is over 2048 and the
+ * length field is invalid.
+ */
+ RxLengthOver2047 = 0x38000010
+};
+
+
+enum t21143_csr6_bits {
+ csr6_sc = (1<<31),
+ csr6_ra = (1<<30),
+ csr6_ign_dest_msb = (1<<26),
+ csr6_mbo = (1<<25),
+ csr6_scr = (1<<24), /* scramble mode flag: can't be set */
+ csr6_pcs = (1<<23), /* Enables PCS functions (symbol mode requires csr6_ps be set) default is set */
+ csr6_ttm = (1<<22), /* Transmit Threshold Mode, set for 10baseT, 0 for 100BaseTX */
+ csr6_sf = (1<<21), /* Store and forward. If set ignores TR bits */
+ csr6_hbd = (1<<19), /* Heart beat disable. Disables SQE function in 10baseT */
+ csr6_ps = (1<<18), /* Port Select. 0 (defualt) = 10baseT, 1 = 100baseTX: can't be set */
+ csr6_ca = (1<<17), /* Collision Offset Enable. If set uses special algorithm in low collision situations */
+ csr6_trh = (1<<15), /* Transmit Threshold high bit */
+ csr6_trl = (1<<14), /* Transmit Threshold low bit */
+
+ /***************************************************************
+ * This table shows transmit threshold values based on media *
+ * and these two registers (from PNIC1 & 2 docs) Note: this is *
+ * all meaningless if sf is set. *
+ ***************************************************************/
+
+ /***********************************
+ * (trh,trl) * 100BaseTX * 10BaseT *
+ ***********************************
+ * (0,0) * 128 * 72 *
+ * (0,1) * 256 * 96 *
+ * (1,0) * 512 * 128 *
+ * (1,1) * 1024 * 160 *
+ ***********************************/
+
+ csr6_fc = (1<<12), /* Forces a collision in next transmission (for testing in loopback mode) */
+ csr6_om_int_loop = (1<<10), /* internal (FIFO) loopback flag */
+ csr6_om_ext_loop = (1<<11), /* external (PMD) loopback flag */
+ /* set both and you get (PHY) loopback */
+ csr6_fd = (1<<9), /* Full duplex mode, disables hearbeat, no loopback */
+ csr6_pm = (1<<7), /* Pass All Multicast */
+ csr6_pr = (1<<6), /* Promiscuous mode */
+ csr6_sb = (1<<5), /* Start(1)/Stop(0) backoff counter */
+ csr6_if = (1<<4), /* Inverse Filtering, rejects only addresses in address table: can't be set */
+ csr6_pb = (1<<3), /* Pass Bad Frames, (1) causes even bad frames to be passed on */
+ csr6_ho = (1<<2), /* Hash-only filtering mode: can't be set */
+ csr6_hp = (1<<0), /* Hash/Perfect Receive Filtering Mode: can't be set */
+
+ csr6_mask_capture = (csr6_sc | csr6_ca),
+ csr6_mask_defstate = (csr6_mask_capture | csr6_mbo),
+ csr6_mask_hdcap = (csr6_mask_defstate | csr6_hbd | csr6_ps),
+ csr6_mask_hdcaptt = (csr6_mask_hdcap | csr6_trh | csr6_trl),
+ csr6_mask_fullcap = (csr6_mask_hdcaptt | csr6_fd),
+ csr6_mask_fullpromisc = (csr6_pr | csr6_pm),
+ csr6_mask_filters = (csr6_hp | csr6_ho | csr6_if),
+ csr6_mask_100bt = (csr6_scr | csr6_pcs | csr6_hbd),
+};
+
+enum tulip_comet_csr13_bits {
+/* The LINKOFFE and LINKONE work in conjunction with LSCE, i.e. they
+ * determine which link status transition wakes up if LSCE is
+ * enabled */
+ comet_csr13_linkoffe = (1 << 17),
+ comet_csr13_linkone = (1 << 16),
+ comet_csr13_wfre = (1 << 10),
+ comet_csr13_mpre = (1 << 9),
+ comet_csr13_lsce = (1 << 8),
+ comet_csr13_wfr = (1 << 2),
+ comet_csr13_mpr = (1 << 1),
+ comet_csr13_lsc = (1 << 0),
+};
+
+enum tulip_comet_csr18_bits {
+ comet_csr18_pmes_sticky = (1 << 24),
+ comet_csr18_pm_mode = (1 << 19),
+ comet_csr18_apm_mode = (1 << 18),
+ comet_csr18_d3a = (1 << 7)
+};
+
+enum tulip_comet_csr20_bits {
+ comet_csr20_pmes = (1 << 15),
+};
+
+/* Keep the ring sizes a power of two for efficiency.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+
+#define TX_RING_SIZE 32
+#define RX_RING_SIZE 128
+#define MEDIA_MASK 31
+
+/* The receiver on the DC21143 rev 65 can fail to close the last
+ * receive descriptor in certain circumstances (see errata) when
+ * using MWI. This can only occur if the receive buffer ends on
+ * a cache line boundary, so the "+ 4" below ensures it doesn't.
+ */
+#define PKT_BUF_SZ (1536 + 4) /* Size of each temporary Rx buffer. */
+
+#define TULIP_MIN_CACHE_LINE 8 /* in units of 32-bit words */
+
+#if defined(__sparc__) || defined(__hppa__)
+/* The UltraSparc PCI controllers will disconnect at every 64-byte
+ * crossing anyways so it makes no sense to tell Tulip to burst
+ * any more than that.
+ */
+#define TULIP_MAX_CACHE_LINE 16 /* in units of 32-bit words */
+#else
+#define TULIP_MAX_CACHE_LINE 32 /* in units of 32-bit words */
+#endif
+
+
+/* Ring-wrap flag in length field, use for last ring entry.
+ 0x01000000 means chain on buffer2 address,
+ 0x02000000 means use the ring start address in CSR2/3.
+ Note: Some work-alike chips do not function correctly in chained mode.
+ The ASIX chip works only in chained mode.
+ Thus we indicates ring mode, but always write the 'next' field for
+ chained mode as well.
+*/
+#define DESC_RING_WRAP 0x02000000
+
+
+#define EEPROM_SIZE 512 /* 2 << EEPROM_ADDRLEN */
+
+
+#define RUN_AT(x) (jiffies + (x))
+
+#define get_u16(ptr) get_unaligned_le16((ptr))
+
+struct medialeaf {
+ u8 type;
+ u8 media;
+ unsigned char *leafdata;
+};
+
+
+struct mediatable {
+ u16 defaultmedia;
+ u8 leafcount;
+ u8 csr12dir; /* General purpose pin directions. */
+ unsigned has_mii:1;
+ unsigned has_nonmii:1;
+ unsigned has_reset:6;
+ u32 csr15dir;
+ u32 csr15val; /* 21143 NWay setting. */
+ struct medialeaf mleaf[0];
+};
+
+
+struct mediainfo {
+ struct mediainfo *next;
+ int info_type;
+ int index;
+ unsigned char *info;
+};
+
+struct ring_info {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+};
+
+
+struct tulip_private {
+ const char *product_name;
+ struct net_device *next_module;
+ struct tulip_rx_desc *rx_ring;
+ struct tulip_tx_desc *tx_ring;
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct ring_info tx_buffers[TX_RING_SIZE];
+ /* The addresses of receive-in-place skbuffs. */
+ struct ring_info rx_buffers[RX_RING_SIZE];
+ u16 setup_frame[96]; /* Pseudo-Tx frame to init address table. */
+ int chip_id;
+ int revision;
+ int flags;
+ struct napi_struct napi;
+ struct timer_list timer; /* Media selection timer. */
+ struct timer_list oom_timer; /* Out of memory timer. */
+ u32 mc_filter[2];
+ spinlock_t lock;
+ spinlock_t mii_lock;
+ unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+
+#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
+ int mit_on;
+#endif
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int full_duplex_lock:1;
+ unsigned int fake_addr:1; /* Multiport board faked address. */
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ unsigned int media2:4; /* Secondary monitored media port. */
+ unsigned int medialock:1; /* Don't sense media type. */
+ unsigned int mediasense:1; /* Media sensing in progress. */
+ unsigned int nway:1, nwayset:1; /* 21143 internal NWay. */
+ unsigned int timeout_recovery:1;
+ unsigned int csr0; /* CSR0 setting. */
+ unsigned int csr6; /* Current CSR6 control settings. */
+ unsigned char eeprom[EEPROM_SIZE]; /* Serial EEPROM contents. */
+ void (*link_change) (struct net_device * dev, int csr5);
+ struct ethtool_wolinfo wolinfo; /* WOL settings */
+ u16 sym_advertise, mii_advertise; /* NWay capabilities advertised. */
+ u16 lpar; /* 21143 Link partner ability. */
+ u16 advertising[4];
+ signed char phys[4], mii_cnt; /* MII device addresses. */
+ struct mediatable *mtable;
+ int cur_index; /* Current media index. */
+ int saved_if_port;
+ struct pci_dev *pdev;
+ int ttimer;
+ int susp_rx;
+ unsigned long nir;
+ void __iomem *base_addr;
+ int csr12_shadow;
+ int pad0; /* Used for 8-byte alignment */
+ struct work_struct media_work;
+ struct net_device *dev;
+};
+
+
+struct eeprom_fixup {
+ char *name;
+ unsigned char addr0;
+ unsigned char addr1;
+ unsigned char addr2;
+ u16 newtable[32]; /* Max length below. */
+};
+
+
+/* 21142.c */
+extern u16 t21142_csr14[];
+void t21142_media_task(struct work_struct *work);
+void t21142_start_nway(struct net_device *dev);
+void t21142_lnk_change(struct net_device *dev, int csr5);
+
+
+/* PNIC2.c */
+void pnic2_lnk_change(struct net_device *dev, int csr5);
+void pnic2_timer(unsigned long data);
+void pnic2_start_nway(struct net_device *dev);
+void pnic2_lnk_change(struct net_device *dev, int csr5);
+
+/* eeprom.c */
+void tulip_parse_eeprom(struct net_device *dev);
+int tulip_read_eeprom(struct net_device *dev, int location, int addr_len);
+
+/* interrupt.c */
+extern unsigned int tulip_max_interrupt_work;
+extern int tulip_rx_copybreak;
+irqreturn_t tulip_interrupt(int irq, void *dev_instance);
+int tulip_refill_rx(struct net_device *dev);
+#ifdef CONFIG_TULIP_NAPI
+int tulip_poll(struct napi_struct *napi, int budget);
+#endif
+
+
+/* media.c */
+int tulip_mdio_read(struct net_device *dev, int phy_id, int location);
+void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int value);
+void tulip_select_media(struct net_device *dev, int startup);
+int tulip_check_duplex(struct net_device *dev);
+void tulip_find_mii (struct net_device *dev, int board_idx);
+
+/* pnic.c */
+void pnic_do_nway(struct net_device *dev);
+void pnic_lnk_change(struct net_device *dev, int csr5);
+void pnic_timer(unsigned long data);
+
+/* timer.c */
+void tulip_media_task(struct work_struct *work);
+void mxic_timer(unsigned long data);
+void comet_timer(unsigned long data);
+
+/* tulip_core.c */
+extern int tulip_debug;
+extern const char * const medianame[];
+extern const char tulip_media_cap[];
+extern struct tulip_chip_table tulip_tbl[];
+void oom_timer(unsigned long data);
+extern u8 t21040_csr13[];
+
+static inline void tulip_start_rxtx(struct tulip_private *tp)
+{
+ void __iomem *ioaddr = tp->base_addr;
+ iowrite32(tp->csr6 | RxTx, ioaddr + CSR6);
+ barrier();
+ (void) ioread32(ioaddr + CSR6); /* mmio sync */
+}
+
+static inline void tulip_stop_rxtx(struct tulip_private *tp)
+{
+ void __iomem *ioaddr = tp->base_addr;
+ u32 csr6 = ioread32(ioaddr + CSR6);
+
+ if (csr6 & RxTx) {
+ unsigned i=1300/10;
+ iowrite32(csr6 & ~RxTx, ioaddr + CSR6);
+ barrier();
+ /* wait until in-flight frame completes.
+ * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
+ * Typically expect this loop to end in < 50 us on 100BT.
+ */
+ while (--i && (ioread32(ioaddr + CSR5) & (CSR5_TS|CSR5_RS)))
+ udelay(10);
+
+ if (!i)
+ netdev_dbg(tp->dev, "tulip_stop_rxtx() failed (CSR5 0x%x CSR6 0x%x)\n",
+ ioread32(ioaddr + CSR5),
+ ioread32(ioaddr + CSR6));
+ }
+}
+
+static inline void tulip_restart_rxtx(struct tulip_private *tp)
+{
+ tulip_stop_rxtx(tp);
+ udelay(5);
+ tulip_start_rxtx(tp);
+}
+
+static inline void tulip_tx_timeout_complete(struct tulip_private *tp, void __iomem *ioaddr)
+{
+ /* Stop and restart the chip's Tx processes. */
+ tulip_restart_rxtx(tp);
+ /* Trigger an immediate transmit demand. */
+ iowrite32(0, ioaddr + CSR1);
+
+ tp->dev->stats.tx_errors++;
+}
+
+#endif /* __NET_TULIP_H__ */
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
new file mode 100644
index 000000000000..011f67c7ca47
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -0,0 +1,2011 @@
+/* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux.
+
+ Copyright 2000,2001 The Linux Kernel Team
+ Written/copyright 1994-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
+ for more information on this driver.
+
+ Please submit bugs to http://bugzilla.kernel.org/ .
+*/
+
+#define pr_fmt(fmt) "tulip: " fmt
+
+#define DRV_NAME "tulip"
+#ifdef CONFIG_TULIP_NAPI
+#define DRV_VERSION "1.1.15-NAPI" /* Keep at least for test */
+#else
+#define DRV_VERSION "1.1.15"
+#endif
+#define DRV_RELDATE "Feb 27, 2007"
+
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include "tulip.h"
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <asm/unaligned.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_SPARC
+#include <asm/prom.h>
+#endif
+
+static char version[] __devinitdata =
+ "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
+
+/* A few user-configurable values. */
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static unsigned int max_interrupt_work = 25;
+
+#define MAX_UNITS 8
+/* Used to pass the full-duplex flag, etc. */
+static int full_duplex[MAX_UNITS];
+static int options[MAX_UNITS];
+static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
+
+/* The possible media types that can be set in options[] are: */
+const char * const medianame[32] = {
+ "10baseT", "10base2", "AUI", "100baseTx",
+ "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
+ "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
+ "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
+ "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
+ "","","","", "","","","", "","","","Transceiver reset",
+};
+
+/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
+#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
+ defined(CONFIG_SPARC) || defined(__ia64__) || \
+ defined(__sh__) || defined(__mips__)
+static int rx_copybreak = 1518;
+#else
+static int rx_copybreak = 100;
+#endif
+
+/*
+ Set the bus performance register.
+ Typical: Set 16 longword cache alignment, no burst limit.
+ Cache alignment bits 15:14 Burst length 13:8
+ 0000 No alignment 0x00000000 unlimited 0800 8 longwords
+ 4000 8 longwords 0100 1 longword 1000 16 longwords
+ 8000 16 longwords 0200 2 longwords 2000 32 longwords
+ C000 32 longwords 0400 4 longwords
+ Warning: many older 486 systems are broken and require setting 0x00A04800
+ 8 longword cache alignment, 8 longword burst.
+ ToDo: Non-Intel setting could be better.
+*/
+
+#if defined(__alpha__) || defined(__ia64__)
+static int csr0 = 0x01A00000 | 0xE000;
+#elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
+static int csr0 = 0x01A00000 | 0x8000;
+#elif defined(CONFIG_SPARC) || defined(__hppa__)
+/* The UltraSparc PCI controllers will disconnect at every 64-byte
+ * crossing anyways so it makes no sense to tell Tulip to burst
+ * any more than that.
+ */
+static int csr0 = 0x01A00000 | 0x9000;
+#elif defined(__arm__) || defined(__sh__)
+static int csr0 = 0x01A00000 | 0x4800;
+#elif defined(__mips__)
+static int csr0 = 0x00200000 | 0x4000;
+#else
+#warning Processor architecture undefined!
+static int csr0 = 0x00A00000 | 0x4800;
+#endif
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (4*HZ)
+
+
+MODULE_AUTHOR("The Linux Kernel Team");
+MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+module_param(tulip_debug, int, 0);
+module_param(max_interrupt_work, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param(csr0, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+
+#ifdef TULIP_DEBUG
+int tulip_debug = TULIP_DEBUG;
+#else
+int tulip_debug = 1;
+#endif
+
+static void tulip_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = netdev_priv(dev);
+
+ if (netif_running(dev))
+ schedule_work(&tp->media_work);
+}
+
+/*
+ * This table use during operation for capabilities and media timer.
+ *
+ * It is indexed via the values in 'enum chips'
+ */
+
+struct tulip_chip_table tulip_tbl[] = {
+ { }, /* placeholder for array, slot unused currently */
+ { }, /* placeholder for array, slot unused currently */
+
+ /* DC21140 */
+ { "Digital DS21140 Tulip", 128, 0x0001ebef,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
+ tulip_media_task },
+
+ /* DC21142, DC21143 */
+ { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
+ HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
+ | HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
+
+ /* LC82C168 */
+ { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
+ HAS_MII | HAS_PNICNWAY, pnic_timer, },
+
+ /* MX98713 */
+ { "Macronix 98713 PMAC", 128, 0x0001ebef,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
+
+ /* MX98715 */
+ { "Macronix 98715 PMAC", 256, 0x0001ebef,
+ HAS_MEDIA_TABLE, mxic_timer, },
+
+ /* MX98725 */
+ { "Macronix 98725 PMAC", 256, 0x0001ebef,
+ HAS_MEDIA_TABLE, mxic_timer, },
+
+ /* AX88140 */
+ { "ASIX AX88140", 128, 0x0001fbff,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
+ | IS_ASIX, tulip_timer, tulip_media_task },
+
+ /* PNIC2 */
+ { "Lite-On PNIC-II", 256, 0x0801fbff,
+ HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
+
+ /* COMET */
+ { "ADMtek Comet", 256, 0x0001abef,
+ HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
+
+ /* COMPEX9881 */
+ { "Compex 9881 PMAC", 128, 0x0001ebef,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
+
+ /* I21145 */
+ { "Intel DS21145 Tulip", 128, 0x0801fbff,
+ HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
+ | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
+
+ /* DM910X */
+#ifdef CONFIG_TULIP_DM910X
+ { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
+ tulip_timer, tulip_media_task },
+#else
+ { NULL },
+#endif
+
+ /* RS7112 */
+ { "Conexant LANfinity", 256, 0x0001ebef,
+ HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
+
+};
+
+
+static DEFINE_PCI_DEVICE_TABLE(tulip_pci_tbl) = {
+ { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
+ { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
+ { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
+ { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
+ { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
+/* { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
+ { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
+ { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
+ { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
+ { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
+#ifdef CONFIG_TULIP_DM910X
+ { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
+ { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
+#endif
+ { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
+ { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
+ { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
+ { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
+ { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
+ { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+ { } /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
+
+
+/* A full-duplex map for media types. */
+const char tulip_media_cap[32] =
+{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
+
+static void tulip_tx_timeout(struct net_device *dev);
+static void tulip_init_ring(struct net_device *dev);
+static void tulip_free_ring(struct net_device *dev);
+static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static int tulip_open(struct net_device *dev);
+static int tulip_close(struct net_device *dev);
+static void tulip_up(struct net_device *dev);
+static void tulip_down(struct net_device *dev);
+static struct net_device_stats *tulip_get_stats(struct net_device *dev);
+static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void set_rx_mode(struct net_device *dev);
+static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void poll_tulip(struct net_device *dev);
+#endif
+
+static void tulip_set_power_state (struct tulip_private *tp,
+ int sleep, int snooze)
+{
+ if (tp->flags & HAS_ACPI) {
+ u32 tmp, newtmp;
+ pci_read_config_dword (tp->pdev, CFDD, &tmp);
+ newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
+ if (sleep)
+ newtmp |= CFDD_Sleep;
+ else if (snooze)
+ newtmp |= CFDD_Snooze;
+ if (tmp != newtmp)
+ pci_write_config_dword (tp->pdev, CFDD, newtmp);
+ }
+
+}
+
+
+static void tulip_up(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int next_tick = 3*HZ;
+ u32 reg;
+ int i;
+
+#ifdef CONFIG_TULIP_NAPI
+ napi_enable(&tp->napi);
+#endif
+
+ /* Wake the chip from sleep/snooze mode. */
+ tulip_set_power_state (tp, 0, 0);
+
+ /* Disable all WOL events */
+ pci_enable_wake(tp->pdev, PCI_D3hot, 0);
+ pci_enable_wake(tp->pdev, PCI_D3cold, 0);
+ tulip_set_wolopts(tp->pdev, 0);
+
+ /* On some chip revs we must set the MII/SYM port before the reset!? */
+ if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
+ iowrite32(0x00040000, ioaddr + CSR6);
+
+ /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
+ iowrite32(0x00000001, ioaddr + CSR0);
+ pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg); /* flush write */
+ udelay(100);
+
+ /* Deassert reset.
+ Wait the specified 50 PCI cycles after a reset by initializing
+ Tx and Rx queues and the address filter list. */
+ iowrite32(tp->csr0, ioaddr + CSR0);
+ pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg); /* flush write */
+ udelay(100);
+
+ if (tulip_debug > 1)
+ netdev_dbg(dev, "tulip_up(), irq==%d\n", dev->irq);
+
+ iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
+ iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
+ tp->cur_rx = tp->cur_tx = 0;
+ tp->dirty_rx = tp->dirty_tx = 0;
+
+ if (tp->flags & MC_HASH_ONLY) {
+ u32 addr_low = get_unaligned_le32(dev->dev_addr);
+ u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
+ if (tp->chip_id == AX88140) {
+ iowrite32(0, ioaddr + CSR13);
+ iowrite32(addr_low, ioaddr + CSR14);
+ iowrite32(1, ioaddr + CSR13);
+ iowrite32(addr_high, ioaddr + CSR14);
+ } else if (tp->flags & COMET_MAC_ADDR) {
+ iowrite32(addr_low, ioaddr + 0xA4);
+ iowrite32(addr_high, ioaddr + 0xA8);
+ iowrite32(0, ioaddr + CSR27);
+ iowrite32(0, ioaddr + CSR28);
+ }
+ } else {
+ /* This is set_rx_mode(), but without starting the transmitter. */
+ u16 *eaddrs = (u16 *)dev->dev_addr;
+ u16 *setup_frm = &tp->setup_frame[15*6];
+ dma_addr_t mapping;
+
+ /* 21140 bug: you must add the broadcast address. */
+ memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
+ /* Fill the final entry of the table with our physical address. */
+ *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
+ *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
+ *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+
+ mapping = pci_map_single(tp->pdev, tp->setup_frame,
+ sizeof(tp->setup_frame),
+ PCI_DMA_TODEVICE);
+ tp->tx_buffers[tp->cur_tx].skb = NULL;
+ tp->tx_buffers[tp->cur_tx].mapping = mapping;
+
+ /* Put the setup frame on the Tx list. */
+ tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
+ tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
+ tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
+
+ tp->cur_tx++;
+ }
+
+ tp->saved_if_port = dev->if_port;
+ if (dev->if_port == 0)
+ dev->if_port = tp->default_port;
+
+ /* Allow selecting a default media. */
+ i = 0;
+ if (tp->mtable == NULL)
+ goto media_picked;
+ if (dev->if_port) {
+ int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
+ (dev->if_port == 12 ? 0 : dev->if_port);
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == looking_for) {
+ dev_info(&dev->dev,
+ "Using user-specified media %s\n",
+ medianame[dev->if_port]);
+ goto media_picked;
+ }
+ }
+ if ((tp->mtable->defaultmedia & 0x0800) == 0) {
+ int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == looking_for) {
+ dev_info(&dev->dev,
+ "Using EEPROM-set media %s\n",
+ medianame[looking_for]);
+ goto media_picked;
+ }
+ }
+ /* Start sensing first non-full-duplex media. */
+ for (i = tp->mtable->leafcount - 1;
+ (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
+ ;
+media_picked:
+
+ tp->csr6 = 0;
+ tp->cur_index = i;
+ tp->nwayset = 0;
+
+ if (dev->if_port) {
+ if (tp->chip_id == DC21143 &&
+ (tulip_media_cap[dev->if_port] & MediaIsMII)) {
+ /* We must reset the media CSRs when we force-select MII mode. */
+ iowrite32(0x0000, ioaddr + CSR13);
+ iowrite32(0x0000, ioaddr + CSR14);
+ iowrite32(0x0008, ioaddr + CSR15);
+ }
+ tulip_select_media(dev, 1);
+ } else if (tp->chip_id == DC21142) {
+ if (tp->mii_cnt) {
+ tulip_select_media(dev, 1);
+ if (tulip_debug > 1)
+ dev_info(&dev->dev,
+ "Using MII transceiver %d, status %04x\n",
+ tp->phys[0],
+ tulip_mdio_read(dev, tp->phys[0], 1));
+ iowrite32(csr6_mask_defstate, ioaddr + CSR6);
+ tp->csr6 = csr6_mask_hdcap;
+ dev->if_port = 11;
+ iowrite32(0x0000, ioaddr + CSR13);
+ iowrite32(0x0000, ioaddr + CSR14);
+ } else
+ t21142_start_nway(dev);
+ } else if (tp->chip_id == PNIC2) {
+ /* for initial startup advertise 10/100 Full and Half */
+ tp->sym_advertise = 0x01E0;
+ /* enable autonegotiate end interrupt */
+ iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
+ iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
+ pnic2_start_nway(dev);
+ } else if (tp->chip_id == LC82C168 && ! tp->medialock) {
+ if (tp->mii_cnt) {
+ dev->if_port = 11;
+ tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
+ iowrite32(0x0001, ioaddr + CSR15);
+ } else if (ioread32(ioaddr + CSR5) & TPLnkPass)
+ pnic_do_nway(dev);
+ else {
+ /* Start with 10mbps to do autonegotiation. */
+ iowrite32(0x32, ioaddr + CSR12);
+ tp->csr6 = 0x00420000;
+ iowrite32(0x0001B078, ioaddr + 0xB8);
+ iowrite32(0x0201B078, ioaddr + 0xB8);
+ next_tick = 1*HZ;
+ }
+ } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
+ ! tp->medialock) {
+ dev->if_port = 0;
+ tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
+ iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
+ } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
+ /* Provided by BOLO, Macronix - 12/10/1998. */
+ dev->if_port = 0;
+ tp->csr6 = 0x01a80200;
+ iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
+ iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
+ } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
+ /* Enable automatic Tx underrun recovery. */
+ iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
+ dev->if_port = tp->mii_cnt ? 11 : 0;
+ tp->csr6 = 0x00040000;
+ } else if (tp->chip_id == AX88140) {
+ tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
+ } else
+ tulip_select_media(dev, 1);
+
+ /* Start the chip's Tx to process setup frame. */
+ tulip_stop_rxtx(tp);
+ barrier();
+ udelay(5);
+ iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
+ iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+ tulip_start_rxtx(tp);
+ iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
+
+ if (tulip_debug > 2) {
+ netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
+ ioread32(ioaddr + CSR0),
+ ioread32(ioaddr + CSR5),
+ ioread32(ioaddr + CSR6));
+ }
+
+ /* Set the timer to switch to check for link beat and perhaps switch
+ to an alternate media type. */
+ tp->timer.expires = RUN_AT(next_tick);
+ add_timer(&tp->timer);
+#ifdef CONFIG_TULIP_NAPI
+ init_timer(&tp->oom_timer);
+ tp->oom_timer.data = (unsigned long)dev;
+ tp->oom_timer.function = oom_timer;
+#endif
+}
+
+static int
+tulip_open(struct net_device *dev)
+{
+ int retval;
+
+ tulip_init_ring (dev);
+
+ retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev);
+ if (retval)
+ goto free_ring;
+
+ tulip_up (dev);
+
+ netif_start_queue (dev);
+
+ return 0;
+
+free_ring:
+ tulip_free_ring (dev);
+ return retval;
+}
+
+
+static void tulip_tx_timeout(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave (&tp->lock, flags);
+
+ if (tulip_media_cap[dev->if_port] & MediaIsMII) {
+ /* Do nothing -- the media monitor should handle this. */
+ if (tulip_debug > 1)
+ dev_warn(&dev->dev,
+ "Transmit timeout using MII device\n");
+ } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
+ tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
+ tp->chip_id == DM910X) {
+ dev_warn(&dev->dev,
+ "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
+ ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
+ ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
+ ioread32(ioaddr + CSR15));
+ tp->timeout_recovery = 1;
+ schedule_work(&tp->media_work);
+ goto out_unlock;
+ } else if (tp->chip_id == PNIC2) {
+ dev_warn(&dev->dev,
+ "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
+ (int)ioread32(ioaddr + CSR5),
+ (int)ioread32(ioaddr + CSR6),
+ (int)ioread32(ioaddr + CSR7),
+ (int)ioread32(ioaddr + CSR12));
+ } else {
+ dev_warn(&dev->dev,
+ "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
+ ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
+ dev->if_port = 0;
+ }
+
+#if defined(way_too_many_messages)
+ if (tulip_debug > 3) {
+ int i;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
+ int j;
+ printk(KERN_DEBUG
+ "%2d: %08x %08x %08x %08x %02x %02x %02x\n",
+ i,
+ (unsigned int)tp->rx_ring[i].status,
+ (unsigned int)tp->rx_ring[i].length,
+ (unsigned int)tp->rx_ring[i].buffer1,
+ (unsigned int)tp->rx_ring[i].buffer2,
+ buf[0], buf[1], buf[2]);
+ for (j = 0; buf[j] != 0xee && j < 1600; j++)
+ if (j < 100)
+ pr_cont(" %02x", buf[j]);
+ pr_cont(" j=%d\n", j);
+ }
+ printk(KERN_DEBUG " Rx ring %p: ", tp->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
+ printk(KERN_DEBUG " Tx ring %p: ", tp->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
+ pr_cont("\n");
+ }
+#endif
+
+ tulip_tx_timeout_complete(tp, ioaddr);
+
+out_unlock:
+ spin_unlock_irqrestore (&tp->lock, flags);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue (dev);
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void tulip_init_ring(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ int i;
+
+ tp->susp_rx = 0;
+ tp->ttimer = 0;
+ tp->nir = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ tp->rx_ring[i].status = 0x00000000;
+ tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
+ tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
+ tp->rx_buffers[i].skb = NULL;
+ tp->rx_buffers[i].mapping = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
+ tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ dma_addr_t mapping;
+
+ /* Note the receive buffer must be longword aligned.
+ dev_alloc_skb() provides 16 byte alignment. But do *not*
+ use skb_reserve() to align the IP header! */
+ struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
+ tp->rx_buffers[i].skb = skb;
+ if (skb == NULL)
+ break;
+ mapping = pci_map_single(tp->pdev, skb->data,
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ tp->rx_buffers[i].mapping = mapping;
+ skb->dev = dev; /* Mark as being used by this device. */
+ tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
+ tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
+ }
+ tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ /* The Tx buffer descriptor is filled in as needed, but we
+ do need to clear the ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ tp->tx_buffers[i].skb = NULL;
+ tp->tx_buffers[i].mapping = 0;
+ tp->tx_ring[i].status = 0x00000000;
+ tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
+ }
+ tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
+}
+
+static netdev_tx_t
+tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ int entry;
+ u32 flag;
+ dma_addr_t mapping;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->lock, flags);
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = tp->cur_tx % TX_RING_SIZE;
+
+ tp->tx_buffers[entry].skb = skb;
+ mapping = pci_map_single(tp->pdev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
+ tp->tx_buffers[entry].mapping = mapping;
+ tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
+
+ if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
+ flag = 0x60000000; /* No interrupt */
+ } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
+ flag = 0xe0000000; /* Tx-done intr. */
+ } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
+ flag = 0x60000000; /* No Tx-done intr. */
+ } else { /* Leave room for set_rx_mode() to fill entries. */
+ flag = 0xe0000000; /* Tx-done intr. */
+ netif_stop_queue(dev);
+ }
+ if (entry == TX_RING_SIZE-1)
+ flag = 0xe0000000 | DESC_RING_WRAP;
+
+ tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
+ /* if we were using Transmit Automatic Polling, we would need a
+ * wmb() here. */
+ tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+ wmb();
+
+ tp->cur_tx++;
+
+ /* Trigger an immediate transmit demand. */
+ iowrite32(0, tp->base_addr + CSR1);
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static void tulip_clean_tx_ring(struct tulip_private *tp)
+{
+ unsigned int dirty_tx;
+
+ for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
+ dirty_tx++) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int status = le32_to_cpu(tp->tx_ring[entry].status);
+
+ if (status < 0) {
+ tp->dev->stats.tx_errors++; /* It wasn't Txed */
+ tp->tx_ring[entry].status = 0;
+ }
+
+ /* Check for Tx filter setup frames. */
+ if (tp->tx_buffers[entry].skb == NULL) {
+ /* test because dummy frames not mapped */
+ if (tp->tx_buffers[entry].mapping)
+ pci_unmap_single(tp->pdev,
+ tp->tx_buffers[entry].mapping,
+ sizeof(tp->setup_frame),
+ PCI_DMA_TODEVICE);
+ continue;
+ }
+
+ pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
+ tp->tx_buffers[entry].skb->len,
+ PCI_DMA_TODEVICE);
+
+ /* Free the original skb. */
+ dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
+ tp->tx_buffers[entry].skb = NULL;
+ tp->tx_buffers[entry].mapping = 0;
+ }
+}
+
+static void tulip_down (struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ unsigned long flags;
+
+ cancel_work_sync(&tp->media_work);
+
+#ifdef CONFIG_TULIP_NAPI
+ napi_disable(&tp->napi);
+#endif
+
+ del_timer_sync (&tp->timer);
+#ifdef CONFIG_TULIP_NAPI
+ del_timer_sync (&tp->oom_timer);
+#endif
+ spin_lock_irqsave (&tp->lock, flags);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ iowrite32 (0x00000000, ioaddr + CSR7);
+
+ /* Stop the Tx and Rx processes. */
+ tulip_stop_rxtx(tp);
+
+ /* prepare receive buffers */
+ tulip_refill_rx(dev);
+
+ /* release any unconsumed transmit buffers */
+ tulip_clean_tx_ring(tp);
+
+ if (ioread32(ioaddr + CSR6) != 0xffffffff)
+ dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
+
+ spin_unlock_irqrestore (&tp->lock, flags);
+
+ init_timer(&tp->timer);
+ tp->timer.data = (unsigned long)dev;
+ tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
+
+ dev->if_port = tp->saved_if_port;
+
+ /* Leave the driver in snooze, not sleep, mode. */
+ tulip_set_power_state (tp, 0, 1);
+}
+
+static void tulip_free_ring (struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ int i;
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = tp->rx_buffers[i].skb;
+ dma_addr_t mapping = tp->rx_buffers[i].mapping;
+
+ tp->rx_buffers[i].skb = NULL;
+ tp->rx_buffers[i].mapping = 0;
+
+ tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */
+ tp->rx_ring[i].length = 0;
+ /* An invalid address. */
+ tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
+ if (skb) {
+ pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb (skb);
+ }
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ struct sk_buff *skb = tp->tx_buffers[i].skb;
+
+ if (skb != NULL) {
+ pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb (skb);
+ }
+ tp->tx_buffers[i].skb = NULL;
+ tp->tx_buffers[i].mapping = 0;
+ }
+}
+
+static int tulip_close (struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+
+ netif_stop_queue (dev);
+
+ tulip_down (dev);
+
+ if (tulip_debug > 1)
+ netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
+ ioread32 (ioaddr + CSR5));
+
+ free_irq (dev->irq, dev);
+
+ tulip_free_ring (dev);
+
+ return 0;
+}
+
+static struct net_device_stats *tulip_get_stats(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+
+ if (netif_running(dev)) {
+ unsigned long flags;
+
+ spin_lock_irqsave (&tp->lock, flags);
+
+ dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+ }
+
+ return &dev->stats;
+}
+
+
+static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct tulip_private *np = netdev_priv(dev);
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(np->pdev));
+}
+
+
+static int tulip_ethtool_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+
+ if (wolinfo->wolopts & (~tp->wolinfo.supported))
+ return -EOPNOTSUPP;
+
+ tp->wolinfo.wolopts = wolinfo->wolopts;
+ device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
+ return 0;
+}
+
+static void tulip_ethtool_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+
+ wolinfo->supported = tp->wolinfo.supported;
+ wolinfo->wolopts = tp->wolinfo.wolopts;
+ return;
+}
+
+
+static const struct ethtool_ops ops = {
+ .get_drvinfo = tulip_get_drvinfo,
+ .set_wol = tulip_ethtool_set_wol,
+ .get_wol = tulip_ethtool_get_wol,
+};
+
+/* Provide ioctl() calls to examine the MII xcvr state. */
+static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ struct mii_ioctl_data *data = if_mii(rq);
+ const unsigned int phy_idx = 0;
+ int phy = tp->phys[phy_idx] & 0x1f;
+ unsigned int regnum = data->reg_num;
+
+ switch (cmd) {
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ if (tp->mii_cnt)
+ data->phy_id = phy;
+ else if (tp->flags & HAS_NWAY)
+ data->phy_id = 32;
+ else if (tp->chip_id == COMET)
+ data->phy_id = 1;
+ else
+ return -ENODEV;
+
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
+ int csr12 = ioread32 (ioaddr + CSR12);
+ int csr14 = ioread32 (ioaddr + CSR14);
+ switch (regnum) {
+ case 0:
+ if (((csr14<<5) & 0x1000) ||
+ (dev->if_port == 5 && tp->nwayset))
+ data->val_out = 0x1000;
+ else
+ data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
+ | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
+ break;
+ case 1:
+ data->val_out =
+ 0x1848 +
+ ((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
+ ((csr12&0x06) == 6 ? 0 : 4);
+ data->val_out |= 0x6048;
+ break;
+ case 4:
+ /* Advertised value, bogus 10baseTx-FD value from CSR6. */
+ data->val_out =
+ ((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
+ ((csr14 >> 1) & 0x20) + 1;
+ data->val_out |= ((csr14 >> 9) & 0x03C0);
+ break;
+ case 5: data->val_out = tp->lpar; break;
+ default: data->val_out = 0; break;
+ }
+ } else {
+ data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
+ }
+ return 0;
+
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ if (regnum & ~0x1f)
+ return -EINVAL;
+ if (data->phy_id == phy) {
+ u16 value = data->val_in;
+ switch (regnum) {
+ case 0: /* Check for autonegotiation on or reset. */
+ tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
+ if (tp->full_duplex_lock)
+ tp->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4:
+ tp->advertising[phy_idx] =
+ tp->mii_advertise = data->val_in;
+ break;
+ }
+ }
+ if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
+ u16 value = data->val_in;
+ if (regnum == 0) {
+ if ((value & 0x1200) == 0x1200) {
+ if (tp->chip_id == PNIC2) {
+ pnic2_start_nway (dev);
+ } else {
+ t21142_start_nway (dev);
+ }
+ }
+ } else if (regnum == 4)
+ tp->sym_advertise = value;
+ } else {
+ tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+
+/* Set or clear the multicast filter for this adaptor.
+ Note that we only use exclusion around actually queueing the
+ new frame, not around filling tp->setup_frame. This is non-deterministic
+ when re-entered but still correct. */
+
+#undef set_bit_le
+#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
+
+static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ u16 hash_table[32];
+ struct netdev_hw_addr *ha;
+ int i;
+ u16 *eaddrs;
+
+ memset(hash_table, 0, sizeof(hash_table));
+ set_bit_le(255, hash_table); /* Broadcast entry */
+ /* This should work on big-endian machines as well. */
+ netdev_for_each_mc_addr(ha, dev) {
+ int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
+
+ set_bit_le(index, hash_table);
+ }
+ for (i = 0; i < 32; i++) {
+ *setup_frm++ = hash_table[i];
+ *setup_frm++ = hash_table[i];
+ }
+ setup_frm = &tp->setup_frame[13*6];
+
+ /* Fill the final entry with our physical address. */
+ eaddrs = (u16 *)dev->dev_addr;
+ *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
+ *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
+ *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+}
+
+static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ u16 *eaddrs;
+
+ /* We have <= 14 addresses so we can use the wonderful
+ 16 address perfect filtering of the Tulip. */
+ netdev_for_each_mc_addr(ha, dev) {
+ eaddrs = (u16 *) ha->addr;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ }
+ /* Fill the unused entries with the broadcast address. */
+ memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
+ setup_frm = &tp->setup_frame[15*6];
+
+ /* Fill the final entry with our physical address. */
+ eaddrs = (u16 *)dev->dev_addr;
+ *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
+ *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
+ *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+}
+
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int csr6;
+
+ csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
+
+ tp->csr6 &= ~0x00D5;
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
+ csr6 |= AcceptAllMulticast | AcceptAllPhys;
+ } else if ((netdev_mc_count(dev) > 1000) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter well -- accept all multicasts. */
+ tp->csr6 |= AcceptAllMulticast;
+ csr6 |= AcceptAllMulticast;
+ } else if (tp->flags & MC_HASH_ONLY) {
+ /* Some work-alikes have only a 64-entry hash filter table. */
+ /* Should verify correctness on big-endian/__powerpc__ */
+ struct netdev_hw_addr *ha;
+ if (netdev_mc_count(dev) > 64) {
+ /* Arbitrary non-effective limit. */
+ tp->csr6 |= AcceptAllMulticast;
+ csr6 |= AcceptAllMulticast;
+ } else {
+ u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
+ int filterbit;
+ netdev_for_each_mc_addr(ha, dev) {
+ if (tp->flags & COMET_MAC_ADDR)
+ filterbit = ether_crc_le(ETH_ALEN,
+ ha->addr);
+ else
+ filterbit = ether_crc(ETH_ALEN,
+ ha->addr) >> 26;
+ filterbit &= 0x3f;
+ mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
+ if (tulip_debug > 2)
+ dev_info(&dev->dev,
+ "Added filter for %pM %08x bit %d\n",
+ ha->addr,
+ ether_crc(ETH_ALEN, ha->addr),
+ filterbit);
+ }
+ if (mc_filter[0] == tp->mc_filter[0] &&
+ mc_filter[1] == tp->mc_filter[1])
+ ; /* No change. */
+ else if (tp->flags & IS_ASIX) {
+ iowrite32(2, ioaddr + CSR13);
+ iowrite32(mc_filter[0], ioaddr + CSR14);
+ iowrite32(3, ioaddr + CSR13);
+ iowrite32(mc_filter[1], ioaddr + CSR14);
+ } else if (tp->flags & COMET_MAC_ADDR) {
+ iowrite32(mc_filter[0], ioaddr + CSR27);
+ iowrite32(mc_filter[1], ioaddr + CSR28);
+ }
+ tp->mc_filter[0] = mc_filter[0];
+ tp->mc_filter[1] = mc_filter[1];
+ }
+ } else {
+ unsigned long flags;
+ u32 tx_flags = 0x08000000 | 192;
+
+ /* Note that only the low-address shortword of setup_frame is valid!
+ The values are doubled for big-endian architectures. */
+ if (netdev_mc_count(dev) > 14) {
+ /* Must use a multicast hash table. */
+ build_setup_frame_hash(tp->setup_frame, dev);
+ tx_flags = 0x08400000 | 192;
+ } else {
+ build_setup_frame_perfect(tp->setup_frame, dev);
+ }
+
+ spin_lock_irqsave(&tp->lock, flags);
+
+ if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
+ /* Same setup recently queued, we need not add it. */
+ } else {
+ unsigned int entry;
+ int dummy = -1;
+
+ /* Now add this frame to the Tx list. */
+
+ entry = tp->cur_tx++ % TX_RING_SIZE;
+
+ if (entry != 0) {
+ /* Avoid a chip errata by prefixing a dummy entry. */
+ tp->tx_buffers[entry].skb = NULL;
+ tp->tx_buffers[entry].mapping = 0;
+ tp->tx_ring[entry].length =
+ (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
+ tp->tx_ring[entry].buffer1 = 0;
+ /* Must set DescOwned later to avoid race with chip */
+ dummy = entry;
+ entry = tp->cur_tx++ % TX_RING_SIZE;
+
+ }
+
+ tp->tx_buffers[entry].skb = NULL;
+ tp->tx_buffers[entry].mapping =
+ pci_map_single(tp->pdev, tp->setup_frame,
+ sizeof(tp->setup_frame),
+ PCI_DMA_TODEVICE);
+ /* Put the setup frame on the Tx list. */
+ if (entry == TX_RING_SIZE-1)
+ tx_flags |= DESC_RING_WRAP; /* Wrap ring. */
+ tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
+ tp->tx_ring[entry].buffer1 =
+ cpu_to_le32(tp->tx_buffers[entry].mapping);
+ tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+ if (dummy >= 0)
+ tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
+ if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
+ netif_stop_queue(dev);
+
+ /* Trigger an immediate transmit demand. */
+ iowrite32(0, ioaddr + CSR1);
+ }
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+ }
+
+ iowrite32(csr6, ioaddr + CSR6);
+}
+
+#ifdef CONFIG_TULIP_MWI
+static void __devinit tulip_mwi_config (struct pci_dev *pdev,
+ struct net_device *dev)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+ u8 cache;
+ u16 pci_command;
+ u32 csr0;
+
+ if (tulip_debug > 3)
+ netdev_dbg(dev, "tulip_mwi_config()\n");
+
+ tp->csr0 = csr0 = 0;
+
+ /* if we have any cache line size at all, we can do MRM and MWI */
+ csr0 |= MRM | MWI;
+
+ /* Enable MWI in the standard PCI command bit.
+ * Check for the case where MWI is desired but not available
+ */
+ pci_try_set_mwi(pdev);
+
+ /* read result from hardware (in case bit refused to enable) */
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+ if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
+ csr0 &= ~MWI;
+
+ /* if cache line size hardwired to zero, no MWI */
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
+ if ((csr0 & MWI) && (cache == 0)) {
+ csr0 &= ~MWI;
+ pci_clear_mwi(pdev);
+ }
+
+ /* assign per-cacheline-size cache alignment and
+ * burst length values
+ */
+ switch (cache) {
+ case 8:
+ csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
+ break;
+ case 16:
+ csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
+ break;
+ case 32:
+ csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
+ break;
+ default:
+ cache = 0;
+ break;
+ }
+
+ /* if we have a good cache line size, we by now have a good
+ * csr0, so save it and exit
+ */
+ if (cache)
+ goto out;
+
+ /* we don't have a good csr0 or cache line size, disable MWI */
+ if (csr0 & MWI) {
+ pci_clear_mwi(pdev);
+ csr0 &= ~MWI;
+ }
+
+ /* sane defaults for burst length and cache alignment
+ * originally from de4x5 driver
+ */
+ csr0 |= (8 << BurstLenShift) | (1 << CALShift);
+
+out:
+ tp->csr0 = csr0;
+ if (tulip_debug > 2)
+ netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
+ cache, csr0);
+}
+#endif
+
+/*
+ * Chips that have the MRM/reserved bit quirk and the burst quirk. That
+ * is the DM910X and the on chip ULi devices
+ */
+
+static int tulip_uli_dm_quirk(struct pci_dev *pdev)
+{
+ if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
+ return 1;
+ return 0;
+}
+
+static const struct net_device_ops tulip_netdev_ops = {
+ .ndo_open = tulip_open,
+ .ndo_start_xmit = tulip_start_xmit,
+ .ndo_tx_timeout = tulip_tx_timeout,
+ .ndo_stop = tulip_close,
+ .ndo_get_stats = tulip_get_stats,
+ .ndo_do_ioctl = private_ioctl,
+ .ndo_set_rx_mode = set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = poll_tulip,
+#endif
+};
+
+DEFINE_PCI_DEVICE_TABLE(early_486_chipsets) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
+ { },
+};
+
+static int __devinit tulip_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct tulip_private *tp;
+ /* See note below on the multiport cards. */
+ static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
+ static int last_irq;
+ static int multiport_cnt; /* For four-port boards w/one EEPROM */
+ int i, irq;
+ unsigned short sum;
+ unsigned char *ee_data;
+ struct net_device *dev;
+ void __iomem *ioaddr;
+ static int board_idx = -1;
+ int chip_idx = ent->driver_data;
+ const char *chip_name = tulip_tbl[chip_idx].chip_name;
+ unsigned int eeprom_missing = 0;
+ unsigned int force_csr0 = 0;
+
+#ifndef MODULE
+ if (tulip_debug > 0)
+ printk_once(KERN_INFO "%s", version);
+#endif
+
+ board_idx++;
+
+ /*
+ * Lan media wire a tulip chip to a wan interface. Needs a very
+ * different driver (lmc driver)
+ */
+
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
+ pr_err("skipping LMC card\n");
+ return -ENODEV;
+ } else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
+ (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
+ pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
+ pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
+ pr_err("skipping SBE T3E3 port\n");
+ return -ENODEV;
+ }
+
+ /*
+ * DM910x chips should be handled by the dmfe driver, except
+ * on-board chips on SPARC systems. Also, early DM9100s need
+ * software CRC which only the dmfe driver supports.
+ */
+
+#ifdef CONFIG_TULIP_DM910X
+ if (chip_idx == DM910X) {
+ struct device_node *dp;
+
+ if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
+ pdev->revision < 0x30) {
+ pr_info("skipping early DM9100 with Crc bug (use dmfe)\n");
+ return -ENODEV;
+ }
+
+ dp = pci_device_to_OF_node(pdev);
+ if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
+ pr_info("skipping DM910x expansion card (use dmfe)\n");
+ return -ENODEV;
+ }
+ }
+#endif
+
+ /*
+ * Looks for early PCI chipsets where people report hangs
+ * without the workarounds being on.
+ */
+
+ /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
+ aligned. Aries might need this too. The Saturn errata are not
+ pretty reading but thankfully it's an old 486 chipset.
+
+ 2. The dreaded SiS496 486 chipset. Same workaround as Intel
+ Saturn.
+ */
+
+ if (pci_dev_present(early_486_chipsets)) {
+ csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
+ force_csr0 = 1;
+ }
+
+ /* bugfix: the ASIX must have a burst limit or horrible things happen. */
+ if (chip_idx == AX88140) {
+ if ((csr0 & 0x3f00) == 0)
+ csr0 |= 0x2000;
+ }
+
+ /* PNIC doesn't have MWI/MRL/MRM... */
+ if (chip_idx == LC82C168)
+ csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
+
+ /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
+ if (tulip_uli_dm_quirk(pdev)) {
+ csr0 &= ~0x01f100ff;
+#if defined(CONFIG_SPARC)
+ csr0 = (csr0 & ~0xff00) | 0xe000;
+#endif
+ }
+ /*
+ * And back to business
+ */
+
+ i = pci_enable_device(pdev);
+ if (i) {
+ pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
+ return i;
+ }
+
+ /* The chip will fail to enter a low-power state later unless
+ * first explicitly commanded into D0 */
+ if (pci_set_power_state(pdev, PCI_D0)) {
+ pr_notice("Failed to set power state to D0\n");
+ }
+
+ irq = pdev->irq;
+
+ /* alloc_etherdev ensures aligned and zeroed private structures */
+ dev = alloc_etherdev (sizeof (*tp));
+ if (!dev) {
+ pr_err("ether device alloc failed, aborting\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
+ pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
+ pci_name(pdev),
+ (unsigned long long)pci_resource_len (pdev, 0),
+ (unsigned long long)pci_resource_start (pdev, 0));
+ goto err_out_free_netdev;
+ }
+
+ /* grab all resources from both PIO and MMIO regions, as we
+ * don't want anyone else messing around with our hardware */
+ if (pci_request_regions (pdev, DRV_NAME))
+ goto err_out_free_netdev;
+
+ ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
+
+ if (!ioaddr)
+ goto err_out_free_res;
+
+ /*
+ * initialize private data structure 'tp'
+ * it is zeroed and aligned in alloc_etherdev
+ */
+ tp = netdev_priv(dev);
+ tp->dev = dev;
+
+ tp->rx_ring = pci_alloc_consistent(pdev,
+ sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
+ &tp->rx_ring_dma);
+ if (!tp->rx_ring)
+ goto err_out_mtable;
+ tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
+ tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
+
+ tp->chip_id = chip_idx;
+ tp->flags = tulip_tbl[chip_idx].flags;
+
+ tp->wolinfo.supported = 0;
+ tp->wolinfo.wolopts = 0;
+ /* COMET: Enable power management only for AN983B */
+ if (chip_idx == COMET ) {
+ u32 sig;
+ pci_read_config_dword (pdev, 0x80, &sig);
+ if (sig == 0x09811317) {
+ tp->flags |= COMET_PM;
+ tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
+ pr_info("%s: Enabled WOL support for AN983B\n",
+ __func__);
+ }
+ }
+ tp->pdev = pdev;
+ tp->base_addr = ioaddr;
+ tp->revision = pdev->revision;
+ tp->csr0 = csr0;
+ spin_lock_init(&tp->lock);
+ spin_lock_init(&tp->mii_lock);
+ init_timer(&tp->timer);
+ tp->timer.data = (unsigned long)dev;
+ tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
+
+ INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
+
+ dev->base_addr = (unsigned long)ioaddr;
+
+#ifdef CONFIG_TULIP_MWI
+ if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
+ tulip_mwi_config (pdev, dev);
+#endif
+
+ /* Stop the chip's Tx and Rx processes. */
+ tulip_stop_rxtx(tp);
+
+ pci_set_master(pdev);
+
+#ifdef CONFIG_GSC
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
+ switch (pdev->subsystem_device) {
+ default:
+ break;
+ case 0x1061:
+ case 0x1062:
+ case 0x1063:
+ case 0x1098:
+ case 0x1099:
+ case 0x10EE:
+ tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
+ chip_name = "GSC DS21140 Tulip";
+ }
+ }
+#endif
+
+ /* Clear the missed-packet counter. */
+ ioread32(ioaddr + CSR8);
+
+ /* The station address ROM is read byte serially. The register must
+ be polled, waiting for the value to be read bit serially from the
+ EEPROM.
+ */
+ ee_data = tp->eeprom;
+ memset(ee_data, 0, sizeof(tp->eeprom));
+ sum = 0;
+ if (chip_idx == LC82C168) {
+ for (i = 0; i < 3; i++) {
+ int value, boguscnt = 100000;
+ iowrite32(0x600 | i, ioaddr + 0x98);
+ do {
+ value = ioread32(ioaddr + CSR9);
+ } while (value < 0 && --boguscnt > 0);
+ put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
+ sum += value & 0xffff;
+ }
+ } else if (chip_idx == COMET) {
+ /* No need to read the EEPROM. */
+ put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
+ put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
+ for (i = 0; i < 6; i ++)
+ sum += dev->dev_addr[i];
+ } else {
+ /* A serial EEPROM interface, we read now and sort it out later. */
+ int sa_offset = 0;
+ int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
+ int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
+
+ if (ee_max_addr > sizeof(tp->eeprom))
+ ee_max_addr = sizeof(tp->eeprom);
+
+ for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
+ u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
+ ee_data[i] = data & 0xff;
+ ee_data[i + 1] = data >> 8;
+ }
+
+ /* DEC now has a specification (see Notes) but early board makers
+ just put the address in the first EEPROM locations. */
+ /* This does memcmp(ee_data, ee_data+16, 8) */
+ for (i = 0; i < 8; i ++)
+ if (ee_data[i] != ee_data[16+i])
+ sa_offset = 20;
+ if (chip_idx == CONEXANT) {
+ /* Check that the tuple type and length is correct. */
+ if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6)
+ sa_offset = 0x19A;
+ } else if (ee_data[0] == 0xff && ee_data[1] == 0xff &&
+ ee_data[2] == 0) {
+ sa_offset = 2; /* Grrr, damn Matrox boards. */
+ multiport_cnt = 4;
+ }
+#ifdef CONFIG_MIPS_COBALT
+ if ((pdev->bus->number == 0) &&
+ ((PCI_SLOT(pdev->devfn) == 7) ||
+ (PCI_SLOT(pdev->devfn) == 12))) {
+ /* Cobalt MAC address in first EEPROM locations. */
+ sa_offset = 0;
+ /* Ensure our media table fixup get's applied */
+ memcpy(ee_data + 16, ee_data, 8);
+ }
+#endif
+#ifdef CONFIG_GSC
+ /* Check to see if we have a broken srom */
+ if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
+ /* pci_vendor_id and subsystem_id are swapped */
+ ee_data[0] = ee_data[2];
+ ee_data[1] = ee_data[3];
+ ee_data[2] = 0x61;
+ ee_data[3] = 0x10;
+
+ /* HSC-PCI boards need to be byte-swaped and shifted
+ * up 1 word. This shift needs to happen at the end
+ * of the MAC first because of the 2 byte overlap.
+ */
+ for (i = 4; i >= 0; i -= 2) {
+ ee_data[17 + i + 3] = ee_data[17 + i];
+ ee_data[16 + i + 5] = ee_data[16 + i];
+ }
+ }
+#endif
+
+ for (i = 0; i < 6; i ++) {
+ dev->dev_addr[i] = ee_data[i + sa_offset];
+ sum += ee_data[i + sa_offset];
+ }
+ }
+ /* Lite-On boards have the address byte-swapped. */
+ if ((dev->dev_addr[0] == 0xA0 ||
+ dev->dev_addr[0] == 0xC0 ||
+ dev->dev_addr[0] == 0x02) &&
+ dev->dev_addr[1] == 0x00)
+ for (i = 0; i < 6; i+=2) {
+ char tmp = dev->dev_addr[i];
+ dev->dev_addr[i] = dev->dev_addr[i+1];
+ dev->dev_addr[i+1] = tmp;
+ }
+ /* On the Zynx 315 Etherarray and other multiport boards only the
+ first Tulip has an EEPROM.
+ On Sparc systems the mac address is held in the OBP property
+ "local-mac-address".
+ The addresses of the subsequent ports are derived from the first.
+ Many PCI BIOSes also incorrectly report the IRQ line, so we correct
+ that here as well. */
+ if (sum == 0 || sum == 6*0xff) {
+#if defined(CONFIG_SPARC)
+ struct device_node *dp = pci_device_to_OF_node(pdev);
+ const unsigned char *addr;
+ int len;
+#endif
+ eeprom_missing = 1;
+ for (i = 0; i < 5; i++)
+ dev->dev_addr[i] = last_phys_addr[i];
+ dev->dev_addr[i] = last_phys_addr[i] + 1;
+#if defined(CONFIG_SPARC)
+ addr = of_get_property(dp, "local-mac-address", &len);
+ if (addr && len == 6)
+ memcpy(dev->dev_addr, addr, 6);
+#endif
+#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
+ if (last_irq)
+ irq = last_irq;
+#endif
+ }
+
+ for (i = 0; i < 6; i++)
+ last_phys_addr[i] = dev->dev_addr[i];
+ last_irq = irq;
+ dev->irq = irq;
+
+ /* The lower four bits are the media type. */
+ if (board_idx >= 0 && board_idx < MAX_UNITS) {
+ if (options[board_idx] & MEDIA_MASK)
+ tp->default_port = options[board_idx] & MEDIA_MASK;
+ if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
+ tp->full_duplex = 1;
+ if (mtu[board_idx] > 0)
+ dev->mtu = mtu[board_idx];
+ }
+ if (dev->mem_start & MEDIA_MASK)
+ tp->default_port = dev->mem_start & MEDIA_MASK;
+ if (tp->default_port) {
+ pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
+ board_idx, medianame[tp->default_port & MEDIA_MASK]);
+ tp->medialock = 1;
+ if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
+ tp->full_duplex = 1;
+ }
+ if (tp->full_duplex)
+ tp->full_duplex_lock = 1;
+
+ if (tulip_media_cap[tp->default_port] & MediaIsMII) {
+ static const u16 media2advert[] = {
+ 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
+ };
+ tp->mii_advertise = media2advert[tp->default_port - 9];
+ tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
+ }
+
+ if (tp->flags & HAS_MEDIA_TABLE) {
+ sprintf(dev->name, DRV_NAME "%d", board_idx); /* hack */
+ tulip_parse_eeprom(dev);
+ strcpy(dev->name, "eth%d"); /* un-hack */
+ }
+
+ if ((tp->flags & ALWAYS_CHECK_MII) ||
+ (tp->mtable && tp->mtable->has_mii) ||
+ ( ! tp->mtable && (tp->flags & HAS_MII))) {
+ if (tp->mtable && tp->mtable->has_mii) {
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == 11) {
+ tp->cur_index = i;
+ tp->saved_if_port = dev->if_port;
+ tulip_select_media(dev, 2);
+ dev->if_port = tp->saved_if_port;
+ break;
+ }
+ }
+
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs
+ later, but takes much time. */
+ tulip_find_mii (dev, board_idx);
+ }
+
+ /* The Tulip-specific entries in the device structure. */
+ dev->netdev_ops = &tulip_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#ifdef CONFIG_TULIP_NAPI
+ netif_napi_add(dev, &tp->napi, tulip_poll, 16);
+#endif
+ SET_ETHTOOL_OPS(dev, &ops);
+
+ if (register_netdev(dev))
+ goto err_out_free_ring;
+
+ pci_set_drvdata(pdev, dev);
+
+ dev_info(&dev->dev,
+#ifdef CONFIG_TULIP_MMIO
+ "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
+#else
+ "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
+#endif
+ chip_name, pdev->revision,
+ (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
+ eeprom_missing ? " EEPROM not present," : "",
+ dev->dev_addr, irq);
+
+ if (tp->chip_id == PNIC2)
+ tp->link_change = pnic2_lnk_change;
+ else if (tp->flags & HAS_NWAY)
+ tp->link_change = t21142_lnk_change;
+ else if (tp->flags & HAS_PNICNWAY)
+ tp->link_change = pnic_lnk_change;
+
+ /* Reset the xcvr interface and turn on heartbeat. */
+ switch (chip_idx) {
+ case DC21140:
+ case DM910X:
+ default:
+ if (tp->mtable)
+ iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
+ break;
+ case DC21142:
+ if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) {
+ iowrite32(csr6_mask_defstate, ioaddr + CSR6);
+ iowrite32(0x0000, ioaddr + CSR13);
+ iowrite32(0x0000, ioaddr + CSR14);
+ iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
+ } else
+ t21142_start_nway(dev);
+ break;
+ case PNIC2:
+ /* just do a reset for sanity sake */
+ iowrite32(0x0000, ioaddr + CSR13);
+ iowrite32(0x0000, ioaddr + CSR14);
+ break;
+ case LC82C168:
+ if ( ! tp->mii_cnt) {
+ tp->nway = 1;
+ tp->nwayset = 0;
+ iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
+ iowrite32(0x30, ioaddr + CSR12);
+ iowrite32(0x0001F078, ioaddr + CSR6);
+ iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
+ }
+ break;
+ case MX98713:
+ case COMPEX9881:
+ iowrite32(0x00000000, ioaddr + CSR6);
+ iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
+ iowrite32(0x00000001, ioaddr + CSR13);
+ break;
+ case MX98715:
+ case MX98725:
+ iowrite32(0x01a80000, ioaddr + CSR6);
+ iowrite32(0xFFFFFFFF, ioaddr + CSR14);
+ iowrite32(0x00001000, ioaddr + CSR12);
+ break;
+ case COMET:
+ /* No initialization necessary. */
+ break;
+ }
+
+ /* put the chip in snooze mode until opened */
+ tulip_set_power_state (tp, 0, 1);
+
+ return 0;
+
+err_out_free_ring:
+ pci_free_consistent (pdev,
+ sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
+ tp->rx_ring, tp->rx_ring_dma);
+
+err_out_mtable:
+ kfree (tp->mtable);
+ pci_iounmap(pdev, ioaddr);
+
+err_out_free_res:
+ pci_release_regions (pdev);
+
+err_out_free_netdev:
+ free_netdev (dev);
+ return -ENODEV;
+}
+
+
+/* set the registers according to the given wolopts */
+static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+
+ if (tp->flags & COMET_PM) {
+
+ unsigned int tmp;
+
+ tmp = ioread32(ioaddr + CSR18);
+ tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
+ tmp |= comet_csr18_pm_mode;
+ iowrite32(tmp, ioaddr + CSR18);
+
+ /* Set the Wake-up Control/Status Register to the given WOL options*/
+ tmp = ioread32(ioaddr + CSR13);
+ tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
+ if (wolopts & WAKE_MAGIC)
+ tmp |= comet_csr13_mpre;
+ if (wolopts & WAKE_PHY)
+ tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
+ /* Clear the event flags */
+ tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
+ iowrite32(tmp, ioaddr + CSR13);
+ }
+}
+
+#ifdef CONFIG_PM
+
+
+static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
+{
+ pci_power_t pstate;
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tulip_private *tp = netdev_priv(dev);
+
+ if (!dev)
+ return -EINVAL;
+
+ if (!netif_running(dev))
+ goto save_state;
+
+ tulip_down(dev);
+
+ netif_device_detach(dev);
+ free_irq(dev->irq, dev);
+
+save_state:
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pstate = pci_choose_state(pdev, state);
+ if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) {
+ int rc;
+
+ tulip_set_wolopts(pdev, tp->wolinfo.wolopts);
+ rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts);
+ if (rc)
+ pr_err("pci_enable_wake failed (%d)\n", rc);
+ }
+ pci_set_power_state(pdev, pstate);
+
+ return 0;
+}
+
+
+static int tulip_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+ int retval;
+ unsigned int tmp;
+
+ if (!dev)
+ return -EINVAL;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ if ((retval = pci_enable_device(pdev))) {
+ pr_err("pci_enable_device failed in resume\n");
+ return retval;
+ }
+
+ if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
+ pr_err("request_irq failed in resume\n");
+ return retval;
+ }
+
+ if (tp->flags & COMET_PM) {
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ /* Clear the PMES flag */
+ tmp = ioread32(ioaddr + CSR20);
+ tmp |= comet_csr20_pmes;
+ iowrite32(tmp, ioaddr + CSR20);
+
+ /* Disable all wake-up events */
+ tulip_set_wolopts(pdev, 0);
+ }
+ netif_device_attach(dev);
+
+ if (netif_running(dev))
+ tulip_up(dev);
+
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+
+static void __devexit tulip_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct tulip_private *tp;
+
+ if (!dev)
+ return;
+
+ tp = netdev_priv(dev);
+ unregister_netdev(dev);
+ pci_free_consistent (pdev,
+ sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
+ tp->rx_ring, tp->rx_ring_dma);
+ kfree (tp->mtable);
+ pci_iounmap(pdev, tp->base_addr);
+ free_netdev (dev);
+ pci_release_regions (pdev);
+ pci_set_drvdata (pdev, NULL);
+
+ /* pci_power_off (pdev, -1); */
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+
+static void poll_tulip (struct net_device *dev)
+{
+ /* disable_irq here is not very nice, but with the lockless
+ interrupt handler we have no other choice. */
+ disable_irq(dev->irq);
+ tulip_interrupt (dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static struct pci_driver tulip_driver = {
+ .name = DRV_NAME,
+ .id_table = tulip_pci_tbl,
+ .probe = tulip_init_one,
+ .remove = __devexit_p(tulip_remove_one),
+#ifdef CONFIG_PM
+ .suspend = tulip_suspend,
+ .resume = tulip_resume,
+#endif /* CONFIG_PM */
+};
+
+
+static int __init tulip_init (void)
+{
+#ifdef MODULE
+ pr_info("%s", version);
+#endif
+
+ /* copy module parms into globals */
+ tulip_rx_copybreak = rx_copybreak;
+ tulip_max_interrupt_work = max_interrupt_work;
+
+ /* probe for and init boards */
+ return pci_register_driver(&tulip_driver);
+}
+
+
+static void __exit tulip_cleanup (void)
+{
+ pci_unregister_driver (&tulip_driver);
+}
+
+
+module_init(tulip_init);
+module_exit(tulip_cleanup);
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
new file mode 100644
index 000000000000..7a44a7a6adc8
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -0,0 +1,1850 @@
+/*
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DRV_NAME "uli526x"
+#define DRV_VERSION "0.9.3"
+#define DRV_RELDATE "2005-7-29"
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/uaccess.h>
+
+
+/* Board/System/Debug information/definition ---------------- */
+#define PCI_ULI5261_ID 0x526110B9 /* ULi M5261 ID*/
+#define PCI_ULI5263_ID 0x526310B9 /* ULi M5263 ID*/
+
+#define ULI526X_IO_SIZE 0x100
+#define TX_DESC_CNT 0x20 /* Allocated Tx descriptors */
+#define RX_DESC_CNT 0x30 /* Allocated Rx descriptors */
+#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */
+#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */
+#define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
+#define TX_BUF_ALLOC 0x600
+#define RX_ALLOC_SIZE 0x620
+#define ULI526X_RESET 1
+#define CR0_DEFAULT 0
+#define CR6_DEFAULT 0x22200000
+#define CR7_DEFAULT 0x180c1
+#define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */
+#define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */
+#define MAX_PACKET_SIZE 1514
+#define ULI5261_MAX_MULTICAST 14
+#define RX_COPY_SIZE 100
+#define MAX_CHECK_PACKET 0x8000
+
+#define ULI526X_10MHF 0
+#define ULI526X_100MHF 1
+#define ULI526X_10MFD 4
+#define ULI526X_100MFD 5
+#define ULI526X_AUTO 8
+
+#define ULI526X_TXTH_72 0x400000 /* TX TH 72 byte */
+#define ULI526X_TXTH_96 0x404000 /* TX TH 96 byte */
+#define ULI526X_TXTH_128 0x0000 /* TX TH 128 byte */
+#define ULI526X_TXTH_256 0x4000 /* TX TH 256 byte */
+#define ULI526X_TXTH_512 0x8000 /* TX TH 512 byte */
+#define ULI526X_TXTH_1K 0xC000 /* TX TH 1K byte */
+
+#define ULI526X_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
+#define ULI526X_TX_TIMEOUT ((16*HZ)/2) /* tx packet time-out time 8 s" */
+#define ULI526X_TX_KICK (4*HZ/2) /* tx packet Kick-out time 2 s" */
+
+#define ULI526X_DBUG(dbug_now, msg, value) \
+do { \
+ if (uli526x_debug || (dbug_now)) \
+ pr_err("%s %lx\n", (msg), (long) (value)); \
+} while (0)
+
+#define SHOW_MEDIA_TYPE(mode) \
+ pr_err("Change Speed to %sMhz %s duplex\n", \
+ mode & 1 ? "100" : "10", \
+ mode & 4 ? "full" : "half");
+
+
+/* CR9 definition: SROM/MII */
+#define CR9_SROM_READ 0x4800
+#define CR9_SRCS 0x1
+#define CR9_SRCLK 0x2
+#define CR9_CRDOUT 0x8
+#define SROM_DATA_0 0x0
+#define SROM_DATA_1 0x4
+#define PHY_DATA_1 0x20000
+#define PHY_DATA_0 0x00000
+#define MDCLKH 0x10000
+
+#define PHY_POWER_DOWN 0x800
+
+#define SROM_V41_CODE 0x14
+
+#define SROM_CLK_WRITE(data, ioaddr) \
+ outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
+ udelay(5); \
+ outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
+ udelay(5); \
+ outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
+ udelay(5);
+
+/* Structure/enum declaration ------------------------------- */
+struct tx_desc {
+ __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
+ char *tx_buf_ptr; /* Data for us */
+ struct tx_desc *next_tx_desc;
+} __attribute__(( aligned(32) ));
+
+struct rx_desc {
+ __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
+ struct sk_buff *rx_skb_ptr; /* Data for us */
+ struct rx_desc *next_rx_desc;
+} __attribute__(( aligned(32) ));
+
+struct uli526x_board_info {
+ u32 chip_id; /* Chip vendor/Device ID */
+ struct net_device *next_dev; /* next device */
+ struct pci_dev *pdev; /* PCI device */
+ spinlock_t lock;
+
+ long ioaddr; /* I/O base address */
+ u32 cr0_data;
+ u32 cr5_data;
+ u32 cr6_data;
+ u32 cr7_data;
+ u32 cr15_data;
+
+ /* pointer for memory physical address */
+ dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */
+ dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */
+ dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */
+ dma_addr_t first_tx_desc_dma;
+ dma_addr_t first_rx_desc_dma;
+
+ /* descriptor pointer */
+ unsigned char *buf_pool_ptr; /* Tx buffer pool memory */
+ unsigned char *buf_pool_start; /* Tx buffer pool align dword */
+ unsigned char *desc_pool_ptr; /* descriptor pool memory */
+ struct tx_desc *first_tx_desc;
+ struct tx_desc *tx_insert_ptr;
+ struct tx_desc *tx_remove_ptr;
+ struct rx_desc *first_rx_desc;
+ struct rx_desc *rx_insert_ptr;
+ struct rx_desc *rx_ready_ptr; /* packet come pointer */
+ unsigned long tx_packet_cnt; /* transmitted packet count */
+ unsigned long rx_avail_cnt; /* available rx descriptor count */
+ unsigned long interval_rx_cnt; /* rx packet count a callback time */
+
+ u16 dbug_cnt;
+ u16 NIC_capability; /* NIC media capability */
+ u16 PHY_reg4; /* Saved Phyxcer register 4 value */
+
+ u8 media_mode; /* user specify media mode */
+ u8 op_mode; /* real work media mode */
+ u8 phy_addr;
+ u8 link_failed; /* Ever link failed */
+ u8 wait_reset; /* Hardware failed, need to reset */
+ struct timer_list timer;
+
+ /* Driver defined statistic counter */
+ unsigned long tx_fifo_underrun;
+ unsigned long tx_loss_carrier;
+ unsigned long tx_no_carrier;
+ unsigned long tx_late_collision;
+ unsigned long tx_excessive_collision;
+ unsigned long tx_jabber_timeout;
+ unsigned long reset_count;
+ unsigned long reset_cr8;
+ unsigned long reset_fatal;
+ unsigned long reset_TXtimeout;
+
+ /* NIC SROM data */
+ unsigned char srom[128];
+ u8 init;
+};
+
+enum uli526x_offsets {
+ DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
+ DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
+ DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
+ DCR15 = 0x78
+};
+
+enum uli526x_CR6_bits {
+ CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
+ CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
+ CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
+};
+
+/* Global variable declaration ----------------------------- */
+static int __devinitdata printed_version;
+static const char version[] __devinitconst =
+ "ULi M5261/M5263 net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
+
+static int uli526x_debug;
+static unsigned char uli526x_media_mode = ULI526X_AUTO;
+static u32 uli526x_cr6_user_set;
+
+/* For module input parameter */
+static int debug;
+static u32 cr6set;
+static int mode = 8;
+
+/* function declaration ------------------------------------- */
+static int uli526x_open(struct net_device *);
+static netdev_tx_t uli526x_start_xmit(struct sk_buff *,
+ struct net_device *);
+static int uli526x_stop(struct net_device *);
+static void uli526x_set_filter_mode(struct net_device *);
+static const struct ethtool_ops netdev_ethtool_ops;
+static u16 read_srom_word(long, int);
+static irqreturn_t uli526x_interrupt(int, void *);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void uli526x_poll(struct net_device *dev);
+#endif
+static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long);
+static void allocate_rx_buffer(struct uli526x_board_info *);
+static void update_cr6(u32, unsigned long);
+static void send_filter_frame(struct net_device *, int);
+static u16 phy_read(unsigned long, u8, u8, u32);
+static u16 phy_readby_cr10(unsigned long, u8, u8);
+static void phy_write(unsigned long, u8, u8, u16, u32);
+static void phy_writeby_cr10(unsigned long, u8, u8, u16);
+static void phy_write_1bit(unsigned long, u32, u32);
+static u16 phy_read_1bit(unsigned long, u32);
+static u8 uli526x_sense_speed(struct uli526x_board_info *);
+static void uli526x_process_mode(struct uli526x_board_info *);
+static void uli526x_timer(unsigned long);
+static void uli526x_rx_packet(struct net_device *, struct uli526x_board_info *);
+static void uli526x_free_tx_pkt(struct net_device *, struct uli526x_board_info *);
+static void uli526x_reuse_skb(struct uli526x_board_info *, struct sk_buff *);
+static void uli526x_dynamic_reset(struct net_device *);
+static void uli526x_free_rxbuffer(struct uli526x_board_info *);
+static void uli526x_init(struct net_device *);
+static void uli526x_set_phyxcer(struct uli526x_board_info *);
+
+/* ULI526X network board routine ---------------------------- */
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = uli526x_open,
+ .ndo_stop = uli526x_stop,
+ .ndo_start_xmit = uli526x_start_xmit,
+ .ndo_set_rx_mode = uli526x_set_filter_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = uli526x_poll,
+#endif
+};
+
+/*
+ * Search ULI526X board, allocate space and register it
+ */
+
+static int __devinit uli526x_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct uli526x_board_info *db; /* board information structure */
+ struct net_device *dev;
+ int i, err;
+
+ ULI526X_DBUG(0, "uli526x_init_one()", 0);
+
+ if (!printed_version++)
+ pr_info("%s\n", version);
+
+ /* Init network device */
+ dev = alloc_etherdev(sizeof(*db));
+ if (dev == NULL)
+ return -ENOMEM;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ pr_warn("32-bit PCI DMA not available\n");
+ err = -ENODEV;
+ goto err_out_free;
+ }
+
+ /* Enable Master/IO access, Disable memory access */
+ err = pci_enable_device(pdev);
+ if (err)
+ goto err_out_free;
+
+ if (!pci_resource_start(pdev, 0)) {
+ pr_err("I/O base is zero\n");
+ err = -ENODEV;
+ goto err_out_disable;
+ }
+
+ if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) {
+ pr_err("Allocated I/O size too small\n");
+ err = -ENODEV;
+ goto err_out_disable;
+ }
+
+ if (pci_request_regions(pdev, DRV_NAME)) {
+ pr_err("Failed to request PCI regions\n");
+ err = -ENODEV;
+ goto err_out_disable;
+ }
+
+ /* Init system & device */
+ db = netdev_priv(dev);
+
+ /* Allocate Tx/Rx descriptor memory */
+ db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
+ if(db->desc_pool_ptr == NULL)
+ {
+ err = -ENOMEM;
+ goto err_out_nomem;
+ }
+ db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
+ if(db->buf_pool_ptr == NULL)
+ {
+ err = -ENOMEM;
+ goto err_out_nomem;
+ }
+
+ db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
+ db->first_tx_desc_dma = db->desc_pool_dma_ptr;
+ db->buf_pool_start = db->buf_pool_ptr;
+ db->buf_pool_dma_start = db->buf_pool_dma_ptr;
+
+ db->chip_id = ent->driver_data;
+ db->ioaddr = pci_resource_start(pdev, 0);
+
+ db->pdev = pdev;
+ db->init = 1;
+
+ dev->base_addr = db->ioaddr;
+ dev->irq = pdev->irq;
+ pci_set_drvdata(pdev, dev);
+
+ /* Register some necessary functions */
+ dev->netdev_ops = &netdev_ops;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+
+ spin_lock_init(&db->lock);
+
+
+ /* read 64 word srom data */
+ for (i = 0; i < 64; i++)
+ ((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
+
+ /* Set Node address */
+ if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0) /* SROM absent, so read MAC address from ID Table */
+ {
+ outl(0x10000, db->ioaddr + DCR0); //Diagnosis mode
+ outl(0x1c0, db->ioaddr + DCR13); //Reset dianostic pointer port
+ outl(0, db->ioaddr + DCR14); //Clear reset port
+ outl(0x10, db->ioaddr + DCR14); //Reset ID Table pointer
+ outl(0, db->ioaddr + DCR14); //Clear reset port
+ outl(0, db->ioaddr + DCR13); //Clear CR13
+ outl(0x1b0, db->ioaddr + DCR13); //Select ID Table access port
+ //Read MAC address from CR14
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inl(db->ioaddr + DCR14);
+ //Read end
+ outl(0, db->ioaddr + DCR13); //Clear CR13
+ outl(0, db->ioaddr + DCR0); //Clear CR0
+ udelay(10);
+ }
+ else /*Exist SROM*/
+ {
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = db->srom[20 + i];
+ }
+ err = register_netdev (dev);
+ if (err)
+ goto err_out_res;
+
+ netdev_info(dev, "ULi M%04lx at pci%s, %pM, irq %d\n",
+ ent->driver_data >> 16, pci_name(pdev),
+ dev->dev_addr, dev->irq);
+
+ pci_set_master(pdev);
+
+ return 0;
+
+err_out_res:
+ pci_release_regions(pdev);
+err_out_nomem:
+ if(db->desc_pool_ptr)
+ pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+ db->desc_pool_ptr, db->desc_pool_dma_ptr);
+
+ if(db->buf_pool_ptr != NULL)
+ pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ db->buf_pool_ptr, db->buf_pool_dma_ptr);
+err_out_disable:
+ pci_disable_device(pdev);
+err_out_free:
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(dev);
+
+ return err;
+}
+
+
+static void __devexit uli526x_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct uli526x_board_info *db = netdev_priv(dev);
+
+ ULI526X_DBUG(0, "uli526x_remove_one()", 0);
+
+ pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
+ DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
+ db->desc_pool_dma_ptr);
+ pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ db->buf_pool_ptr, db->buf_pool_dma_ptr);
+ unregister_netdev(dev);
+ pci_release_regions(pdev);
+ free_netdev(dev); /* free board information */
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+ ULI526X_DBUG(0, "uli526x_remove_one() exit", 0);
+}
+
+
+/*
+ * Open the interface.
+ * The interface is opened whenever "ifconfig" activates it.
+ */
+
+static int uli526x_open(struct net_device *dev)
+{
+ int ret;
+ struct uli526x_board_info *db = netdev_priv(dev);
+
+ ULI526X_DBUG(0, "uli526x_open", 0);
+
+ /* system variable init */
+ db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set;
+ db->tx_packet_cnt = 0;
+ db->rx_avail_cnt = 0;
+ db->link_failed = 1;
+ netif_carrier_off(dev);
+ db->wait_reset = 0;
+
+ db->NIC_capability = 0xf; /* All capability*/
+ db->PHY_reg4 = 0x1e0;
+
+ /* CR6 operation mode decision */
+ db->cr6_data |= ULI526X_TXTH_256;
+ db->cr0_data = CR0_DEFAULT;
+
+ /* Initialize ULI526X board */
+ uli526x_init(dev);
+
+ ret = request_irq(dev->irq, uli526x_interrupt, IRQF_SHARED, dev->name, dev);
+ if (ret)
+ return ret;
+
+ /* Active System Interface */
+ netif_wake_queue(dev);
+
+ /* set and active a timer process */
+ init_timer(&db->timer);
+ db->timer.expires = ULI526X_TIMER_WUT + HZ * 2;
+ db->timer.data = (unsigned long)dev;
+ db->timer.function = uli526x_timer;
+ add_timer(&db->timer);
+
+ return 0;
+}
+
+
+/* Initialize ULI526X board
+ * Reset ULI526X board
+ * Initialize TX/Rx descriptor chain structure
+ * Send the set-up frame
+ * Enable Tx/Rx machine
+ */
+
+static void uli526x_init(struct net_device *dev)
+{
+ struct uli526x_board_info *db = netdev_priv(dev);
+ unsigned long ioaddr = db->ioaddr;
+ u8 phy_tmp;
+ u8 timeout;
+ u16 phy_value;
+ u16 phy_reg_reset;
+
+
+ ULI526X_DBUG(0, "uli526x_init()", 0);
+
+ /* Reset M526x MAC controller */
+ outl(ULI526X_RESET, ioaddr + DCR0); /* RESET MAC */
+ udelay(100);
+ outl(db->cr0_data, ioaddr + DCR0);
+ udelay(5);
+
+ /* Phy addr : In some boards,M5261/M5263 phy address != 1 */
+ db->phy_addr = 1;
+ for(phy_tmp=0;phy_tmp<32;phy_tmp++)
+ {
+ phy_value=phy_read(db->ioaddr,phy_tmp,3,db->chip_id);//peer add
+ if(phy_value != 0xffff&&phy_value!=0)
+ {
+ db->phy_addr = phy_tmp;
+ break;
+ }
+ }
+ if(phy_tmp == 32)
+ pr_warn("Can not find the phy address!!!\n");
+ /* Parser SROM and media mode */
+ db->media_mode = uli526x_media_mode;
+
+ /* phyxcer capability setting */
+ phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id);
+ phy_reg_reset = (phy_reg_reset | 0x8000);
+ phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id);
+
+ /* See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management
+ * functions") or phy data sheet for details on phy reset
+ */
+ udelay(500);
+ timeout = 10;
+ while (timeout-- &&
+ phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id) & 0x8000)
+ udelay(100);
+
+ /* Process Phyxcer Media Mode */
+ uli526x_set_phyxcer(db);
+
+ /* Media Mode Process */
+ if ( !(db->media_mode & ULI526X_AUTO) )
+ db->op_mode = db->media_mode; /* Force Mode */
+
+ /* Initialize Transmit/Receive decriptor and CR3/4 */
+ uli526x_descriptor_init(db, ioaddr);
+
+ /* Init CR6 to program M526X operation */
+ update_cr6(db->cr6_data, ioaddr);
+
+ /* Send setup frame */
+ send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */
+
+ /* Init CR7, interrupt active bit */
+ db->cr7_data = CR7_DEFAULT;
+ outl(db->cr7_data, ioaddr + DCR7);
+
+ /* Init CR15, Tx jabber and Rx watchdog timer */
+ outl(db->cr15_data, ioaddr + DCR15);
+
+ /* Enable ULI526X Tx/Rx function */
+ db->cr6_data |= CR6_RXSC | CR6_TXSC;
+ update_cr6(db->cr6_data, ioaddr);
+}
+
+
+/*
+ * Hardware start transmission.
+ * Send a packet to media from the upper layer.
+ */
+
+static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct uli526x_board_info *db = netdev_priv(dev);
+ struct tx_desc *txptr;
+ unsigned long flags;
+
+ ULI526X_DBUG(0, "uli526x_start_xmit", 0);
+
+ /* Resource flag check */
+ netif_stop_queue(dev);
+
+ /* Too large packet check */
+ if (skb->len > MAX_PACKET_SIZE) {
+ netdev_err(dev, "big packet = %d\n", (u16)skb->len);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ /* No Tx resource check, it never happen nromally */
+ if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) {
+ spin_unlock_irqrestore(&db->lock, flags);
+ netdev_err(dev, "No Tx resource %ld\n", db->tx_packet_cnt);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Disable NIC interrupt */
+ outl(0, dev->base_addr + DCR7);
+
+ /* transmit this packet */
+ txptr = db->tx_insert_ptr;
+ skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
+ txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
+
+ /* Point to next transmit free descriptor */
+ db->tx_insert_ptr = txptr->next_tx_desc;
+
+ /* Transmit Packet Process */
+ if ( (db->tx_packet_cnt < TX_DESC_CNT) ) {
+ txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
+ db->tx_packet_cnt++; /* Ready to send */
+ outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
+ dev->trans_start = jiffies; /* saved time stamp */
+ }
+
+ /* Tx resource check */
+ if ( db->tx_packet_cnt < TX_FREE_DESC_CNT )
+ netif_wake_queue(dev);
+
+ /* Restore CR7 to enable interrupt */
+ spin_unlock_irqrestore(&db->lock, flags);
+ outl(db->cr7_data, dev->base_addr + DCR7);
+
+ /* free this SKB */
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+
+/*
+ * Stop the interface.
+ * The interface is stopped when it is brought.
+ */
+
+static int uli526x_stop(struct net_device *dev)
+{
+ struct uli526x_board_info *db = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+
+ ULI526X_DBUG(0, "uli526x_stop", 0);
+
+ /* disable system */
+ netif_stop_queue(dev);
+
+ /* deleted timer */
+ del_timer_sync(&db->timer);
+
+ /* Reset & stop ULI526X board */
+ outl(ULI526X_RESET, ioaddr + DCR0);
+ udelay(5);
+ phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
+
+ /* free interrupt */
+ free_irq(dev->irq, dev);
+
+ /* free allocated rx buffer */
+ uli526x_free_rxbuffer(db);
+
+ return 0;
+}
+
+
+/*
+ * M5261/M5263 insterrupt handler
+ * receive the packet to upper layer, free the transmitted packet
+ */
+
+static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct uli526x_board_info *db = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&db->lock, flags);
+ outl(0, ioaddr + DCR7);
+
+ /* Got ULI526X status */
+ db->cr5_data = inl(ioaddr + DCR5);
+ outl(db->cr5_data, ioaddr + DCR5);
+ if ( !(db->cr5_data & 0x180c1) ) {
+ /* Restore CR7 to enable interrupt mask */
+ outl(db->cr7_data, ioaddr + DCR7);
+ spin_unlock_irqrestore(&db->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ /* Check system status */
+ if (db->cr5_data & 0x2000) {
+ /* system bus error happen */
+ ULI526X_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
+ db->reset_fatal++;
+ db->wait_reset = 1; /* Need to RESET */
+ spin_unlock_irqrestore(&db->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ /* Received the coming packet */
+ if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
+ uli526x_rx_packet(dev, db);
+
+ /* reallocate rx descriptor buffer */
+ if (db->rx_avail_cnt<RX_DESC_CNT)
+ allocate_rx_buffer(db);
+
+ /* Free the transmitted descriptor */
+ if ( db->cr5_data & 0x01)
+ uli526x_free_tx_pkt(dev, db);
+
+ /* Restore CR7 to enable interrupt mask */
+ outl(db->cr7_data, ioaddr + DCR7);
+
+ spin_unlock_irqrestore(&db->lock, flags);
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void uli526x_poll(struct net_device *dev)
+{
+ /* ISR grabs the irqsave lock, so this should be safe */
+ uli526x_interrupt(dev->irq, dev);
+}
+#endif
+
+/*
+ * Free TX resource after TX complete
+ */
+
+static void uli526x_free_tx_pkt(struct net_device *dev,
+ struct uli526x_board_info * db)
+{
+ struct tx_desc *txptr;
+ u32 tdes0;
+
+ txptr = db->tx_remove_ptr;
+ while(db->tx_packet_cnt) {
+ tdes0 = le32_to_cpu(txptr->tdes0);
+ if (tdes0 & 0x80000000)
+ break;
+
+ /* A packet sent completed */
+ db->tx_packet_cnt--;
+ dev->stats.tx_packets++;
+
+ /* Transmit statistic counter */
+ if ( tdes0 != 0x7fffffff ) {
+ dev->stats.collisions += (tdes0 >> 3) & 0xf;
+ dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
+ if (tdes0 & TDES0_ERR_MASK) {
+ dev->stats.tx_errors++;
+ if (tdes0 & 0x0002) { /* UnderRun */
+ db->tx_fifo_underrun++;
+ if ( !(db->cr6_data & CR6_SFT) ) {
+ db->cr6_data = db->cr6_data | CR6_SFT;
+ update_cr6(db->cr6_data, db->ioaddr);
+ }
+ }
+ if (tdes0 & 0x0100)
+ db->tx_excessive_collision++;
+ if (tdes0 & 0x0200)
+ db->tx_late_collision++;
+ if (tdes0 & 0x0400)
+ db->tx_no_carrier++;
+ if (tdes0 & 0x0800)
+ db->tx_loss_carrier++;
+ if (tdes0 & 0x4000)
+ db->tx_jabber_timeout++;
+ }
+ }
+
+ txptr = txptr->next_tx_desc;
+ }/* End of while */
+
+ /* Update TX remove pointer to next */
+ db->tx_remove_ptr = txptr;
+
+ /* Resource available check */
+ if ( db->tx_packet_cnt < TX_WAKE_DESC_CNT )
+ netif_wake_queue(dev); /* Active upper layer, send again */
+}
+
+
+/*
+ * Receive the come packet and pass to upper layer
+ */
+
+static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info * db)
+{
+ struct rx_desc *rxptr;
+ struct sk_buff *skb;
+ int rxlen;
+ u32 rdes0;
+
+ rxptr = db->rx_ready_ptr;
+
+ while(db->rx_avail_cnt) {
+ rdes0 = le32_to_cpu(rxptr->rdes0);
+ if (rdes0 & 0x80000000) /* packet owner check */
+ {
+ break;
+ }
+
+ db->rx_avail_cnt--;
+ db->interval_rx_cnt++;
+
+ pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+ if ( (rdes0 & 0x300) != 0x300) {
+ /* A packet without First/Last flag */
+ /* reuse this SKB */
+ ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
+ uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
+ } else {
+ /* A packet with First/Last flag */
+ rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
+
+ /* error summary bit check */
+ if (rdes0 & 0x8000) {
+ /* This is a error packet */
+ dev->stats.rx_errors++;
+ if (rdes0 & 1)
+ dev->stats.rx_fifo_errors++;
+ if (rdes0 & 2)
+ dev->stats.rx_crc_errors++;
+ if (rdes0 & 0x80)
+ dev->stats.rx_length_errors++;
+ }
+
+ if ( !(rdes0 & 0x8000) ||
+ ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
+ struct sk_buff *new_skb = NULL;
+
+ skb = rxptr->rx_skb_ptr;
+
+ /* Good packet, send to upper layer */
+ /* Shorst packet used new SKB */
+ if ((rxlen < RX_COPY_SIZE) &&
+ (((new_skb = dev_alloc_skb(rxlen + 2)) != NULL))) {
+ skb = new_skb;
+ /* size less than COPY_SIZE, allocate a rxlen SKB */
+ skb_reserve(skb, 2); /* 16byte align */
+ memcpy(skb_put(skb, rxlen),
+ skb_tail_pointer(rxptr->rx_skb_ptr),
+ rxlen);
+ uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
+ } else
+ skb_put(skb, rxlen);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += rxlen;
+
+ } else {
+ /* Reuse SKB buffer when the packet is error */
+ ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
+ uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
+ }
+ }
+
+ rxptr = rxptr->next_rx_desc;
+ }
+
+ db->rx_ready_ptr = rxptr;
+}
+
+
+/*
+ * Set ULI526X multicast address
+ */
+
+static void uli526x_set_filter_mode(struct net_device * dev)
+{
+ struct uli526x_board_info *db = netdev_priv(dev);
+ unsigned long flags;
+
+ ULI526X_DBUG(0, "uli526x_set_filter_mode()", 0);
+ spin_lock_irqsave(&db->lock, flags);
+
+ if (dev->flags & IFF_PROMISC) {
+ ULI526X_DBUG(0, "Enable PROM Mode", 0);
+ db->cr6_data |= CR6_PM | CR6_PBF;
+ update_cr6(db->cr6_data, db->ioaddr);
+ spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
+
+ if (dev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(dev) > ULI5261_MAX_MULTICAST) {
+ ULI526X_DBUG(0, "Pass all multicast address",
+ netdev_mc_count(dev));
+ db->cr6_data &= ~(CR6_PM | CR6_PBF);
+ db->cr6_data |= CR6_PAM;
+ spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
+
+ ULI526X_DBUG(0, "Set multicast address", netdev_mc_count(dev));
+ send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */
+ spin_unlock_irqrestore(&db->lock, flags);
+}
+
+static void
+ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
+{
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_MII);
+
+ ecmd->advertising = (ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_Autoneg |
+ ADVERTISED_MII);
+
+
+ ecmd->port = PORT_MII;
+ ecmd->phy_address = db->phy_addr;
+
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
+ ecmd->duplex = DUPLEX_HALF;
+
+ if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
+ {
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
+ }
+ if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
+ {
+ ecmd->duplex = DUPLEX_FULL;
+ }
+ if(db->link_failed)
+ {
+ ethtool_cmd_speed_set(ecmd, -1);
+ ecmd->duplex = -1;
+ }
+
+ if (db->media_mode & ULI526X_AUTO)
+ {
+ ecmd->autoneg = AUTONEG_ENABLE;
+ }
+}
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct uli526x_board_info *np = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ if (np->pdev)
+ strcpy(info->bus_info, pci_name(np->pdev));
+ else
+ sprintf(info->bus_info, "EISA 0x%lx %d",
+ dev->base_addr, dev->irq);
+}
+
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
+ struct uli526x_board_info *np = netdev_priv(dev);
+
+ ULi_ethtool_gset(np, cmd);
+
+ return 0;
+}
+
+static u32 netdev_get_link(struct net_device *dev) {
+ struct uli526x_board_info *np = netdev_priv(dev);
+
+ if(np->link_failed)
+ return 0;
+ else
+ return 1;
+}
+
+static void uli526x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ wol->supported = WAKE_PHY | WAKE_MAGIC;
+ wol->wolopts = 0;
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_settings = netdev_get_settings,
+ .get_link = netdev_get_link,
+ .get_wol = uli526x_get_wol,
+};
+
+/*
+ * A periodic timer routine
+ * Dynamic media sense, allocate Rx buffer...
+ */
+
+static void uli526x_timer(unsigned long data)
+{
+ u32 tmp_cr8;
+ unsigned char tmp_cr12=0;
+ struct net_device *dev = (struct net_device *) data;
+ struct uli526x_board_info *db = netdev_priv(dev);
+ unsigned long flags;
+
+ //ULI526X_DBUG(0, "uli526x_timer()", 0);
+ spin_lock_irqsave(&db->lock, flags);
+
+
+ /* Dynamic reset ULI526X : system error or transmit time-out */
+ tmp_cr8 = inl(db->ioaddr + DCR8);
+ if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
+ db->reset_cr8++;
+ db->wait_reset = 1;
+ }
+ db->interval_rx_cnt = 0;
+
+ /* TX polling kick monitor */
+ if ( db->tx_packet_cnt &&
+ time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) {
+ outl(0x1, dev->base_addr + DCR1); // Tx polling again
+
+ // TX Timeout
+ if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) {
+ db->reset_TXtimeout++;
+ db->wait_reset = 1;
+ netdev_err(dev, " Tx timeout - resetting\n");
+ }
+ }
+
+ if (db->wait_reset) {
+ ULI526X_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
+ db->reset_count++;
+ uli526x_dynamic_reset(dev);
+ db->timer.expires = ULI526X_TIMER_WUT;
+ add_timer(&db->timer);
+ spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
+
+ /* Link status check, Dynamic media type change */
+ if((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)!=0)
+ tmp_cr12 = 3;
+
+ if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
+ /* Link Failed */
+ ULI526X_DBUG(0, "Link Failed", tmp_cr12);
+ netif_carrier_off(dev);
+ netdev_info(dev, "NIC Link is Down\n");
+ db->link_failed = 1;
+
+ /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
+ /* AUTO don't need */
+ if ( !(db->media_mode & 0x8) )
+ phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
+
+ /* AUTO mode, if INT phyxcer link failed, select EXT device */
+ if (db->media_mode & ULI526X_AUTO) {
+ db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
+ update_cr6(db->cr6_data, db->ioaddr);
+ }
+ } else
+ if ((tmp_cr12 & 0x3) && db->link_failed) {
+ ULI526X_DBUG(0, "Link link OK", tmp_cr12);
+ db->link_failed = 0;
+
+ /* Auto Sense Speed */
+ if ( (db->media_mode & ULI526X_AUTO) &&
+ uli526x_sense_speed(db) )
+ db->link_failed = 1;
+ uli526x_process_mode(db);
+
+ if(db->link_failed==0)
+ {
+ netdev_info(dev, "NIC Link is Up %d Mbps %s duplex\n",
+ (db->op_mode == ULI526X_100MHF ||
+ db->op_mode == ULI526X_100MFD)
+ ? 100 : 10,
+ (db->op_mode == ULI526X_10MFD ||
+ db->op_mode == ULI526X_100MFD)
+ ? "Full" : "Half");
+ netif_carrier_on(dev);
+ }
+ /* SHOW_MEDIA_TYPE(db->op_mode); */
+ }
+ else if(!(tmp_cr12 & 0x3) && db->link_failed)
+ {
+ if(db->init==1)
+ {
+ netdev_info(dev, "NIC Link is Down\n");
+ netif_carrier_off(dev);
+ }
+ }
+ db->init=0;
+
+ /* Timer active again */
+ db->timer.expires = ULI526X_TIMER_WUT;
+ add_timer(&db->timer);
+ spin_unlock_irqrestore(&db->lock, flags);
+}
+
+
+/*
+ * Stop ULI526X board
+ * Free Tx/Rx allocated memory
+ * Init system variable
+ */
+
+static void uli526x_reset_prepare(struct net_device *dev)
+{
+ struct uli526x_board_info *db = netdev_priv(dev);
+
+ /* Sopt MAC controller */
+ db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
+ update_cr6(db->cr6_data, dev->base_addr);
+ outl(0, dev->base_addr + DCR7); /* Disable Interrupt */
+ outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
+
+ /* Disable upper layer interface */
+ netif_stop_queue(dev);
+
+ /* Free Rx Allocate buffer */
+ uli526x_free_rxbuffer(db);
+
+ /* system variable init */
+ db->tx_packet_cnt = 0;
+ db->rx_avail_cnt = 0;
+ db->link_failed = 1;
+ db->init=1;
+ db->wait_reset = 0;
+}
+
+
+/*
+ * Dynamic reset the ULI526X board
+ * Stop ULI526X board
+ * Free Tx/Rx allocated memory
+ * Reset ULI526X board
+ * Re-initialize ULI526X board
+ */
+
+static void uli526x_dynamic_reset(struct net_device *dev)
+{
+ ULI526X_DBUG(0, "uli526x_dynamic_reset()", 0);
+
+ uli526x_reset_prepare(dev);
+
+ /* Re-initialize ULI526X board */
+ uli526x_init(dev);
+
+ /* Restart upper layer interface */
+ netif_wake_queue(dev);
+}
+
+
+#ifdef CONFIG_PM
+
+/*
+ * Suspend the interface.
+ */
+
+static int uli526x_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ pci_power_t power_state;
+ int err;
+
+ ULI526X_DBUG(0, "uli526x_suspend", 0);
+
+ if (!netdev_priv(dev))
+ return 0;
+
+ pci_save_state(pdev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ netif_device_detach(dev);
+ uli526x_reset_prepare(dev);
+
+ power_state = pci_choose_state(pdev, state);
+ pci_enable_wake(pdev, power_state, 0);
+ err = pci_set_power_state(pdev, power_state);
+ if (err) {
+ netif_device_attach(dev);
+ /* Re-initialize ULI526X board */
+ uli526x_init(dev);
+ /* Restart upper layer interface */
+ netif_wake_queue(dev);
+ }
+
+ return err;
+}
+
+/*
+ * Resume the interface.
+ */
+
+static int uli526x_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ int err;
+
+ ULI526X_DBUG(0, "uli526x_resume", 0);
+
+ if (!netdev_priv(dev))
+ return 0;
+
+ pci_restore_state(pdev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ err = pci_set_power_state(pdev, PCI_D0);
+ if (err) {
+ netdev_warn(dev, "Could not put device into D0\n");
+ return err;
+ }
+
+ netif_device_attach(dev);
+ /* Re-initialize ULI526X board */
+ uli526x_init(dev);
+ /* Restart upper layer interface */
+ netif_wake_queue(dev);
+
+ return 0;
+}
+
+#else /* !CONFIG_PM */
+
+#define uli526x_suspend NULL
+#define uli526x_resume NULL
+
+#endif /* !CONFIG_PM */
+
+
+/*
+ * free all allocated rx buffer
+ */
+
+static void uli526x_free_rxbuffer(struct uli526x_board_info * db)
+{
+ ULI526X_DBUG(0, "uli526x_free_rxbuffer()", 0);
+
+ /* free allocated rx buffer */
+ while (db->rx_avail_cnt) {
+ dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
+ db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
+ db->rx_avail_cnt--;
+ }
+}
+
+
+/*
+ * Reuse the SK buffer
+ */
+
+static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * skb)
+{
+ struct rx_desc *rxptr = db->rx_insert_ptr;
+
+ if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
+ rxptr->rx_skb_ptr = skb;
+ rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev,
+ skb_tail_pointer(skb),
+ RX_ALLOC_SIZE,
+ PCI_DMA_FROMDEVICE));
+ wmb();
+ rxptr->rdes0 = cpu_to_le32(0x80000000);
+ db->rx_avail_cnt++;
+ db->rx_insert_ptr = rxptr->next_rx_desc;
+ } else
+ ULI526X_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
+}
+
+
+/*
+ * Initialize transmit/Receive descriptor
+ * Using Chain structure, and allocate Tx/Rx buffer
+ */
+
+static void uli526x_descriptor_init(struct uli526x_board_info *db, unsigned long ioaddr)
+{
+ struct tx_desc *tmp_tx;
+ struct rx_desc *tmp_rx;
+ unsigned char *tmp_buf;
+ dma_addr_t tmp_tx_dma, tmp_rx_dma;
+ dma_addr_t tmp_buf_dma;
+ int i;
+
+ ULI526X_DBUG(0, "uli526x_descriptor_init()", 0);
+
+ /* tx descriptor start pointer */
+ db->tx_insert_ptr = db->first_tx_desc;
+ db->tx_remove_ptr = db->first_tx_desc;
+ outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */
+
+ /* rx descriptor start pointer */
+ db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
+ db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
+ db->rx_insert_ptr = db->first_rx_desc;
+ db->rx_ready_ptr = db->first_rx_desc;
+ outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */
+
+ /* Init Transmit chain */
+ tmp_buf = db->buf_pool_start;
+ tmp_buf_dma = db->buf_pool_dma_start;
+ tmp_tx_dma = db->first_tx_desc_dma;
+ for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
+ tmp_tx->tx_buf_ptr = tmp_buf;
+ tmp_tx->tdes0 = cpu_to_le32(0);
+ tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */
+ tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
+ tmp_tx_dma += sizeof(struct tx_desc);
+ tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
+ tmp_tx->next_tx_desc = tmp_tx + 1;
+ tmp_buf = tmp_buf + TX_BUF_ALLOC;
+ tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
+ }
+ (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
+ tmp_tx->next_tx_desc = db->first_tx_desc;
+
+ /* Init Receive descriptor chain */
+ tmp_rx_dma=db->first_rx_desc_dma;
+ for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
+ tmp_rx->rdes0 = cpu_to_le32(0);
+ tmp_rx->rdes1 = cpu_to_le32(0x01000600);
+ tmp_rx_dma += sizeof(struct rx_desc);
+ tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
+ tmp_rx->next_rx_desc = tmp_rx + 1;
+ }
+ (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
+ tmp_rx->next_rx_desc = db->first_rx_desc;
+
+ /* pre-allocate Rx buffer */
+ allocate_rx_buffer(db);
+}
+
+
+/*
+ * Update CR6 value
+ * Firstly stop ULI526X, then written value and start
+ */
+
+static void update_cr6(u32 cr6_data, unsigned long ioaddr)
+{
+
+ outl(cr6_data, ioaddr + DCR6);
+ udelay(5);
+}
+
+
+/*
+ * Send a setup frame for M5261/M5263
+ * This setup frame initialize ULI526X address filter mode
+ */
+
+#ifdef __BIG_ENDIAN
+#define FLT_SHIFT 16
+#else
+#define FLT_SHIFT 0
+#endif
+
+static void send_filter_frame(struct net_device *dev, int mc_cnt)
+{
+ struct uli526x_board_info *db = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ struct tx_desc *txptr;
+ u16 * addrptr;
+ u32 * suptr;
+ int i;
+
+ ULI526X_DBUG(0, "send_filter_frame()", 0);
+
+ txptr = db->tx_insert_ptr;
+ suptr = (u32 *) txptr->tx_buf_ptr;
+
+ /* Node address */
+ addrptr = (u16 *) dev->dev_addr;
+ *suptr++ = addrptr[0] << FLT_SHIFT;
+ *suptr++ = addrptr[1] << FLT_SHIFT;
+ *suptr++ = addrptr[2] << FLT_SHIFT;
+
+ /* broadcast address */
+ *suptr++ = 0xffff << FLT_SHIFT;
+ *suptr++ = 0xffff << FLT_SHIFT;
+ *suptr++ = 0xffff << FLT_SHIFT;
+
+ /* fit the multicast address */
+ netdev_for_each_mc_addr(ha, dev) {
+ addrptr = (u16 *) ha->addr;
+ *suptr++ = addrptr[0] << FLT_SHIFT;
+ *suptr++ = addrptr[1] << FLT_SHIFT;
+ *suptr++ = addrptr[2] << FLT_SHIFT;
+ }
+
+ for (i = netdev_mc_count(dev); i < 14; i++) {
+ *suptr++ = 0xffff << FLT_SHIFT;
+ *suptr++ = 0xffff << FLT_SHIFT;
+ *suptr++ = 0xffff << FLT_SHIFT;
+ }
+
+ /* prepare the setup frame */
+ db->tx_insert_ptr = txptr->next_tx_desc;
+ txptr->tdes1 = cpu_to_le32(0x890000c0);
+
+ /* Resource Check and Send the setup packet */
+ if (db->tx_packet_cnt < TX_DESC_CNT) {
+ /* Resource Empty */
+ db->tx_packet_cnt++;
+ txptr->tdes0 = cpu_to_le32(0x80000000);
+ update_cr6(db->cr6_data | 0x2000, dev->base_addr);
+ outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
+ update_cr6(db->cr6_data, dev->base_addr);
+ dev->trans_start = jiffies;
+ } else
+ netdev_err(dev, "No Tx resource - Send_filter_frame!\n");
+}
+
+
+/*
+ * Allocate rx buffer,
+ * As possible as allocate maxiumn Rx buffer
+ */
+
+static void allocate_rx_buffer(struct uli526x_board_info *db)
+{
+ struct rx_desc *rxptr;
+ struct sk_buff *skb;
+
+ rxptr = db->rx_insert_ptr;
+
+ while(db->rx_avail_cnt < RX_DESC_CNT) {
+ if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
+ break;
+ rxptr->rx_skb_ptr = skb; /* FIXME (?) */
+ rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev,
+ skb_tail_pointer(skb),
+ RX_ALLOC_SIZE,
+ PCI_DMA_FROMDEVICE));
+ wmb();
+ rxptr->rdes0 = cpu_to_le32(0x80000000);
+ rxptr = rxptr->next_rx_desc;
+ db->rx_avail_cnt++;
+ }
+
+ db->rx_insert_ptr = rxptr;
+}
+
+
+/*
+ * Read one word data from the serial ROM
+ */
+
+static u16 read_srom_word(long ioaddr, int offset)
+{
+ int i;
+ u16 srom_data = 0;
+ long cr9_ioaddr = ioaddr + DCR9;
+
+ outl(CR9_SROM_READ, cr9_ioaddr);
+ outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+
+ /* Send the Read Command 110b */
+ SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
+ SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
+ SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
+
+ /* Send the offset */
+ for (i = 5; i >= 0; i--) {
+ srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
+ SROM_CLK_WRITE(srom_data, cr9_ioaddr);
+ }
+
+ outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+
+ for (i = 16; i > 0; i--) {
+ outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
+ udelay(5);
+ srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
+ outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+ udelay(5);
+ }
+
+ outl(CR9_SROM_READ, cr9_ioaddr);
+ return srom_data;
+}
+
+
+/*
+ * Auto sense the media mode
+ */
+
+static u8 uli526x_sense_speed(struct uli526x_board_info * db)
+{
+ u8 ErrFlag = 0;
+ u16 phy_mode;
+
+ phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
+ phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
+
+ if ( (phy_mode & 0x24) == 0x24 ) {
+
+ phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7);
+ if(phy_mode&0x8000)
+ phy_mode = 0x8000;
+ else if(phy_mode&0x4000)
+ phy_mode = 0x4000;
+ else if(phy_mode&0x2000)
+ phy_mode = 0x2000;
+ else
+ phy_mode = 0x1000;
+
+ switch (phy_mode) {
+ case 0x1000: db->op_mode = ULI526X_10MHF; break;
+ case 0x2000: db->op_mode = ULI526X_10MFD; break;
+ case 0x4000: db->op_mode = ULI526X_100MHF; break;
+ case 0x8000: db->op_mode = ULI526X_100MFD; break;
+ default: db->op_mode = ULI526X_10MHF; ErrFlag = 1; break;
+ }
+ } else {
+ db->op_mode = ULI526X_10MHF;
+ ULI526X_DBUG(0, "Link Failed :", phy_mode);
+ ErrFlag = 1;
+ }
+
+ return ErrFlag;
+}
+
+
+/*
+ * Set 10/100 phyxcer capability
+ * AUTO mode : phyxcer register4 is NIC capability
+ * Force mode: phyxcer register4 is the force media
+ */
+
+static void uli526x_set_phyxcer(struct uli526x_board_info *db)
+{
+ u16 phy_reg;
+
+ /* Phyxcer capability setting */
+ phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
+
+ if (db->media_mode & ULI526X_AUTO) {
+ /* AUTO Mode */
+ phy_reg |= db->PHY_reg4;
+ } else {
+ /* Force Mode */
+ switch(db->media_mode) {
+ case ULI526X_10MHF: phy_reg |= 0x20; break;
+ case ULI526X_10MFD: phy_reg |= 0x40; break;
+ case ULI526X_100MHF: phy_reg |= 0x80; break;
+ case ULI526X_100MFD: phy_reg |= 0x100; break;
+ }
+
+ }
+
+ /* Write new capability to Phyxcer Reg4 */
+ if ( !(phy_reg & 0x01e0)) {
+ phy_reg|=db->PHY_reg4;
+ db->media_mode|=ULI526X_AUTO;
+ }
+ phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
+
+ /* Restart Auto-Negotiation */
+ phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
+ udelay(50);
+}
+
+
+/*
+ * Process op-mode
+ AUTO mode : PHY controller in Auto-negotiation Mode
+ * Force mode: PHY controller in force mode with HUB
+ * N-way force capability with SWITCH
+ */
+
+static void uli526x_process_mode(struct uli526x_board_info *db)
+{
+ u16 phy_reg;
+
+ /* Full Duplex Mode Check */
+ if (db->op_mode & 0x4)
+ db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */
+ else
+ db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */
+
+ update_cr6(db->cr6_data, db->ioaddr);
+
+ /* 10/100M phyxcer force mode need */
+ if ( !(db->media_mode & 0x8)) {
+ /* Forece Mode */
+ phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
+ if ( !(phy_reg & 0x1) ) {
+ /* parter without N-Way capability */
+ phy_reg = 0x0;
+ switch(db->op_mode) {
+ case ULI526X_10MHF: phy_reg = 0x0; break;
+ case ULI526X_10MFD: phy_reg = 0x100; break;
+ case ULI526X_100MHF: phy_reg = 0x2000; break;
+ case ULI526X_100MFD: phy_reg = 0x2100; break;
+ }
+ phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
+ }
+ }
+}
+
+
+/*
+ * Write a word to Phy register
+ */
+
+static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id)
+{
+ u16 i;
+ unsigned long ioaddr;
+
+ if(chip_id == PCI_ULI5263_ID)
+ {
+ phy_writeby_cr10(iobase, phy_addr, offset, phy_data);
+ return;
+ }
+ /* M5261/M5263 Chip */
+ ioaddr = iobase + DCR9;
+
+ /* Send 33 synchronization clock to Phy controller */
+ for (i = 0; i < 35; i++)
+ phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+
+ /* Send start command(01) to Phy */
+ phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+ phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+
+ /* Send write command(01) to Phy */
+ phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+ phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+
+ /* Send Phy address */
+ for (i = 0x10; i > 0; i = i >> 1)
+ phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+
+ /* Send register address */
+ for (i = 0x10; i > 0; i = i >> 1)
+ phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+
+ /* written trasnition */
+ phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+ phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+
+ /* Write a word data to PHY controller */
+ for ( i = 0x8000; i > 0; i >>= 1)
+ phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+
+}
+
+
+/*
+ * Read a word data from phy register
+ */
+
+static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
+{
+ int i;
+ u16 phy_data;
+ unsigned long ioaddr;
+
+ if(chip_id == PCI_ULI5263_ID)
+ return phy_readby_cr10(iobase, phy_addr, offset);
+ /* M5261/M5263 Chip */
+ ioaddr = iobase + DCR9;
+
+ /* Send 33 synchronization clock to Phy controller */
+ for (i = 0; i < 35; i++)
+ phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+
+ /* Send start command(01) to Phy */
+ phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+ phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+
+ /* Send read command(10) to Phy */
+ phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+ phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+
+ /* Send Phy address */
+ for (i = 0x10; i > 0; i = i >> 1)
+ phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+
+ /* Send register address */
+ for (i = 0x10; i > 0; i = i >> 1)
+ phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+
+ /* Skip transition state */
+ phy_read_1bit(ioaddr, chip_id);
+
+ /* read 16bit data */
+ for (phy_data = 0, i = 0; i < 16; i++) {
+ phy_data <<= 1;
+ phy_data |= phy_read_1bit(ioaddr, chip_id);
+ }
+
+ return phy_data;
+}
+
+static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
+{
+ unsigned long ioaddr,cr10_value;
+
+ ioaddr = iobase + DCR10;
+ cr10_value = phy_addr;
+ cr10_value = (cr10_value<<5) + offset;
+ cr10_value = (cr10_value<<16) + 0x08000000;
+ outl(cr10_value,ioaddr);
+ udelay(1);
+ while(1)
+ {
+ cr10_value = inl(ioaddr);
+ if(cr10_value&0x10000000)
+ break;
+ }
+ return cr10_value & 0x0ffff;
+}
+
+static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data)
+{
+ unsigned long ioaddr,cr10_value;
+
+ ioaddr = iobase + DCR10;
+ cr10_value = phy_addr;
+ cr10_value = (cr10_value<<5) + offset;
+ cr10_value = (cr10_value<<16) + 0x04000000 + phy_data;
+ outl(cr10_value,ioaddr);
+ udelay(1);
+}
+/*
+ * Write one bit data to Phy Controller
+ */
+
+static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id)
+{
+ outl(phy_data , ioaddr); /* MII Clock Low */
+ udelay(1);
+ outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */
+ udelay(1);
+ outl(phy_data , ioaddr); /* MII Clock Low */
+ udelay(1);
+}
+
+
+/*
+ * Read one bit phy data from PHY controller
+ */
+
+static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
+{
+ u16 phy_data;
+
+ outl(0x50000 , ioaddr);
+ udelay(1);
+ phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
+ outl(0x40000 , ioaddr);
+ udelay(1);
+
+ return phy_data;
+}
+
+
+static DEFINE_PCI_DEVICE_TABLE(uli526x_pci_tbl) = {
+ { 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID },
+ { 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, uli526x_pci_tbl);
+
+
+static struct pci_driver uli526x_driver = {
+ .name = "uli526x",
+ .id_table = uli526x_pci_tbl,
+ .probe = uli526x_init_one,
+ .remove = __devexit_p(uli526x_remove_one),
+ .suspend = uli526x_suspend,
+ .resume = uli526x_resume,
+};
+
+MODULE_AUTHOR("Peer Chen, peer.chen@uli.com.tw");
+MODULE_DESCRIPTION("ULi M5261/M5263 fast ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(debug, int, 0644);
+module_param(mode, int, 0);
+module_param(cr6set, int, 0);
+MODULE_PARM_DESC(debug, "ULi M5261/M5263 enable debugging (0-1)");
+MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
+
+/* Description:
+ * when user used insmod to add module, system invoked init_module()
+ * to register the services.
+ */
+
+static int __init uli526x_init_module(void)
+{
+
+ pr_info("%s\n", version);
+ printed_version = 1;
+
+ ULI526X_DBUG(0, "init_module() ", debug);
+
+ if (debug)
+ uli526x_debug = debug; /* set debug flag */
+ if (cr6set)
+ uli526x_cr6_user_set = cr6set;
+
+ switch (mode) {
+ case ULI526X_10MHF:
+ case ULI526X_100MHF:
+ case ULI526X_10MFD:
+ case ULI526X_100MFD:
+ uli526x_media_mode = mode;
+ break;
+ default:
+ uli526x_media_mode = ULI526X_AUTO;
+ break;
+ }
+
+ return pci_register_driver(&uli526x_driver);
+}
+
+
+/*
+ * Description:
+ * when user used rmmod to delete module, system invoked clean_module()
+ * to un-register all registered services.
+ */
+
+static void __exit uli526x_cleanup_module(void)
+{
+ ULI526X_DBUG(0, "uli526x_clean_module() ", debug);
+ pci_unregister_driver(&uli526x_driver);
+}
+
+module_init(uli526x_init_module);
+module_exit(uli526x_cleanup_module);
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
new file mode 100644
index 000000000000..4d01219ba22f
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -0,0 +1,1670 @@
+/* winbond-840.c: A Linux PCI network adapter device driver. */
+/*
+ Written 1998-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support and updates available at
+ http://www.scyld.com/network/drivers.html
+
+ Do not remove the copyright information.
+ Do not change the version information unless an improvement has been made.
+ Merely removing my name, as Compex has done in the past, does not count
+ as an improvement.
+
+ Changelog:
+ * ported to 2.4
+ ???
+ * spin lock update, memory barriers, new style dma mappings
+ limit each tx buffer to < 1024 bytes
+ remove DescIntr from Rx descriptors (that's an Tx flag)
+ remove next pointer from Tx descriptors
+ synchronize tx_q_bytes
+ software reset in tx_timeout
+ Copyright (C) 2000 Manfred Spraul
+ * further cleanups
+ power management.
+ support for big endian descriptors
+ Copyright (C) 2001 Manfred Spraul
+ * ethtool support (jgarzik)
+ * Replace some MII-related magic numbers with constants (jgarzik)
+
+ TODO:
+ * enable pci_power_off
+ * Wake-On-LAN
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DRV_NAME "winbond-840"
+#define DRV_VERSION "1.01-e"
+#define DRV_RELDATE "Sep-11-2006"
+
+
+/* Automatically extracted configuration info:
+probe-func: winbond840_probe
+config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
+
+c-help-name: Winbond W89c840 PCI Ethernet support
+c-help-symbol: CONFIG_WINBOND_840
+c-help: This driver is for the Winbond W89c840 chip. It also works with
+c-help: the TX9882 chip on the Compex RL100-ATX board.
+c-help: More specific information and updates are available from
+c-help: http://www.scyld.com/network/drivers.html
+*/
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+static int max_interrupt_work = 20;
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ The '840 uses a 64 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak;
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability.
+ The media type is usually passed in 'options[]'.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
+#define TX_QUEUE_LEN_RESTART 5
+
+#define TX_BUFLIMIT (1024-128)
+
+/* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
+ To avoid overflowing we don't queue again until we have room for a
+ full-size packet.
+ */
+#define TX_FIFO_SIZE (2048)
+#define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
+
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2*HZ)
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/rtnetlink.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "tulip.h"
+
+#undef PKT_BUF_SZ /* tulip.h also defines this */
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version[] __initconst =
+ "v" DRV_VERSION " (2.4 port) "
+ DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
+ " http://www.scyld.com/network/drivers.html\n";
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_param(max_interrupt_work, int, 0);
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param(multicast_filter_limit, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
+MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
+MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
+MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is for the Winbond w89c840 chip.
+
+II. Board-specific settings
+
+None.
+
+III. Driver operation
+
+This chip is very similar to the Digital 21*4* "Tulip" family. The first
+twelve registers and the descriptor format are nearly identical. Read a
+Tulip manual for operational details.
+
+A significant difference is that the multicast filter and station address are
+stored in registers rather than loaded through a pseudo-transmit packet.
+
+Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a
+full-sized packet we must use both data buffers in a descriptor. Thus the
+driver uses ring mode where descriptors are implicitly sequential in memory,
+rather than using the second descriptor address as a chain pointer to
+subsequent descriptors.
+
+IV. Notes
+
+If you are going to almost clone a Tulip, why not go all the way and avoid
+the need for a new driver?
+
+IVb. References
+
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+http://www.winbond.com.tw/
+
+IVc. Errata
+
+A horrible bug exists in the transmit FIFO. Apparently the chip doesn't
+correctly detect a full FIFO, and queuing more than 2048 bytes may result in
+silent data corruption.
+
+Test with 'ping -s 10000' on a fast computer.
+
+*/
+
+
+
+/*
+ PCI probe table.
+*/
+enum chip_capability_flags {
+ CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(w840_pci_tbl) = {
+ { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
+ { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
+
+enum {
+ netdev_res_size = 128, /* size of PCI BAR resource */
+};
+
+struct pci_id_info {
+ const char *name;
+ int drv_flags; /* Driver use, intended as capability flags. */
+};
+
+static const struct pci_id_info pci_id_tbl[] __devinitdata = {
+ { /* Sometime a Level-One switch card. */
+ "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
+ { "Winbond W89c840", CanHaveMII | HasBrokenTx},
+ { "Compex RL100-ATX", CanHaveMII | HasBrokenTx},
+ { } /* terminate list. */
+};
+
+/* This driver was written to use PCI memory space, however some x86 systems
+ work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config
+*/
+
+/* Offsets to the Command and Status Registers, "CSRs".
+ While similar to the Tulip, these registers are longword aligned.
+ Note: It's not useful to define symbolic names for every register bit in
+ the device. The name can only partially document the semantics and make
+ the driver longer and more difficult to read.
+*/
+enum w840_offsets {
+ PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
+ RxRingPtr=0x0C, TxRingPtr=0x10,
+ IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
+ RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
+ CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */
+ MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
+ CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
+};
+
+/* Bits in the NetworkConfig register. */
+enum rx_mode_bits {
+ AcceptErr=0x80,
+ RxAcceptBroadcast=0x20, AcceptMulticast=0x10,
+ RxAcceptAllPhys=0x08, AcceptMyPhys=0x02,
+};
+
+enum mii_reg_bits {
+ MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
+ MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
+};
+
+/* The Tulip Rx and Tx buffer descriptors. */
+struct w840_rx_desc {
+ s32 status;
+ s32 length;
+ u32 buffer1;
+ u32 buffer2;
+};
+
+struct w840_tx_desc {
+ s32 status;
+ s32 length;
+ u32 buffer1, buffer2;
+};
+
+#define MII_CNT 1 /* winbond only supports one MII */
+struct netdev_private {
+ struct w840_rx_desc *rx_ring;
+ dma_addr_t rx_addr[RX_RING_SIZE];
+ struct w840_tx_desc *tx_ring;
+ dma_addr_t tx_addr[TX_RING_SIZE];
+ dma_addr_t ring_dma_addr;
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ /* Frequently used values: keep some adjacent for cache effect. */
+ spinlock_t lock;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ int csr6;
+ struct w840_rx_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_q_bytes;
+ unsigned int tx_full; /* The Tx queue is full. */
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ unsigned char phys[MII_CNT]; /* MII device addresses, but only the first is used */
+ u32 mii;
+ struct mii_if_info mii_if;
+ void __iomem *base_addr;
+};
+
+static int eeprom_read(void __iomem *ioaddr, int location);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int netdev_open(struct net_device *dev);
+static int update_link(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void init_rxtx_rings(struct net_device *dev);
+static void free_rxtx_rings(struct netdev_private *np);
+static void init_registers(struct net_device *dev);
+static void tx_timeout(struct net_device *dev);
+static int alloc_ringdesc(struct net_device *dev);
+static void free_ringdesc(struct netdev_private *np);
+static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t intr_handler(int irq, void *dev_instance);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int netdev_rx(struct net_device *dev);
+static u32 __set_rx_mode(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static const struct ethtool_ops netdev_ethtool_ops;
+static int netdev_close(struct net_device *dev);
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = netdev_open,
+ .ndo_stop = netdev_close,
+ .ndo_start_xmit = start_tx,
+ .ndo_get_stats = get_stats,
+ .ndo_set_rx_mode = set_rx_mode,
+ .ndo_do_ioctl = netdev_ioctl,
+ .ndo_tx_timeout = tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __devinit w840_probe1 (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ static int find_cnt;
+ int chip_idx = ent->driver_data;
+ int irq;
+ int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
+ void __iomem *ioaddr;
+
+ i = pci_enable_device(pdev);
+ if (i) return i;
+
+ pci_set_master(pdev);
+
+ irq = pdev->irq;
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ pr_warn("Device %s disabled due to DMA limitations\n",
+ pci_name(pdev));
+ return -EIO;
+ }
+ dev = alloc_etherdev(sizeof(*np));
+ if (!dev)
+ return -ENOMEM;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ if (pci_request_regions(pdev, DRV_NAME))
+ goto err_out_netdev;
+
+ ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
+ if (!ioaddr)
+ goto err_out_free_res;
+
+ for (i = 0; i < 3; i++)
+ ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
+
+ /* Reset the chip to erase previous misconfiguration.
+ No hold time required! */
+ iowrite32(0x00000001, ioaddr + PCIBusCfg);
+
+ dev->base_addr = (unsigned long)ioaddr;
+ dev->irq = irq;
+
+ np = netdev_priv(dev);
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ spin_lock_init(&np->lock);
+ np->mii_if.dev = dev;
+ np->mii_if.mdio_read = mdio_read;
+ np->mii_if.mdio_write = mdio_write;
+ np->base_addr = ioaddr;
+
+ pci_set_drvdata(pdev, dev);
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ if (option & 0x200)
+ np->mii_if.full_duplex = 1;
+ if (option & 15)
+ dev_info(&dev->dev,
+ "ignoring user supplied media type %d",
+ option & 15);
+ }
+ if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
+ np->mii_if.full_duplex = 1;
+
+ if (np->mii_if.full_duplex)
+ np->mii_if.force_media = 1;
+
+ /* The chip-specific entries in the device structure. */
+ dev->netdev_ops = &netdev_ops;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ i = register_netdev(dev);
+ if (i)
+ goto err_out_cleardev;
+
+ dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n",
+ pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq);
+
+ if (np->drv_flags & CanHaveMII) {
+ int phy, phy_idx = 0;
+ for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
+ int mii_status = mdio_read(dev, phy, MII_BMSR);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
+ np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
+ mdio_read(dev, phy, MII_PHYSID2);
+ dev_info(&dev->dev,
+ "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
+ np->mii, phy, mii_status,
+ np->mii_if.advertising);
+ }
+ }
+ np->mii_cnt = phy_idx;
+ np->mii_if.phy_id = np->phys[0];
+ if (phy_idx == 0) {
+ dev_warn(&dev->dev,
+ "MII PHY not found -- this device may not operate correctly\n");
+ }
+ }
+
+ find_cnt++;
+ return 0;
+
+err_out_cleardev:
+ pci_set_drvdata(pdev, NULL);
+ pci_iounmap(pdev, ioaddr);
+err_out_free_res:
+ pci_release_regions(pdev);
+err_out_netdev:
+ free_netdev (dev);
+ return -ENODEV;
+}
+
+
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
+ often serial bit streams generated by the host processor.
+ The example below is for the common 93c46 EEPROM, 64 16 bit words. */
+
+/* Delay between EEPROM clock transitions.
+ No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
+ a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
+ made udelay() unreliable.
+ The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
+ deprecated.
+*/
+#define eeprom_delay(ee_addr) ioread32(ee_addr)
+
+enum EEPROM_Ctrl_Bits {
+ EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
+ EE_ChipSelect=0x801, EE_DataIn=0x08,
+};
+
+/* The EEPROM commands include the alway-set leading bit. */
+enum EEPROM_Cmds {
+ EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
+};
+
+static int eeprom_read(void __iomem *addr, int location)
+{
+ int i;
+ int retval = 0;
+ void __iomem *ee_addr = addr + EECtrl;
+ int read_cmd = location | EE_ReadCmd;
+ iowrite32(EE_ChipSelect, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 10; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
+ iowrite32(dataval, ee_addr);
+ eeprom_delay(ee_addr);
+ iowrite32(dataval | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ }
+ iowrite32(EE_ChipSelect, ee_addr);
+ eeprom_delay(ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0);
+ iowrite32(EE_ChipSelect, ee_addr);
+ eeprom_delay(ee_addr);
+ }
+
+ /* Terminate the EEPROM access. */
+ iowrite32(0, ee_addr);
+ return retval;
+}
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details.
+
+ The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back 33Mhz PCI cycles. */
+#define mdio_delay(mdio_addr) ioread32(mdio_addr)
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+ This only set with older transceivers, so the extra
+ code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required = 1;
+
+#define MDIO_WRITE0 (MDIO_EnbOutput)
+#define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(void __iomem *mdio_addr)
+{
+ int bits = 32;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (--bits >= 0) {
+ iowrite32(MDIO_WRITE1, mdio_addr);
+ mdio_delay(mdio_addr);
+ iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *mdio_addr = np->base_addr + MIICtrl;
+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int i, retval = 0;
+
+ if (mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ iowrite32(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 20; i > 0; i--) {
+ iowrite32(MDIO_EnbIn, mdio_addr);
+ mdio_delay(mdio_addr);
+ retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0);
+ iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *mdio_addr = np->base_addr + MIICtrl;
+ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
+ int i;
+
+ if (location == 4 && phy_id == np->phys[0])
+ np->mii_if.advertising = value;
+
+ if (mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ iowrite32(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ iowrite32(MDIO_EnbIn, mdio_addr);
+ mdio_delay(mdio_addr);
+ iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+ int i;
+
+ iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */
+
+ netif_device_detach(dev);
+ i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
+ if (i)
+ goto out_err;
+
+ if (debug > 1)
+ netdev_dbg(dev, "w89c840_open() irq %d\n", dev->irq);
+
+ if((i=alloc_ringdesc(dev)))
+ goto out_err;
+
+ spin_lock_irq(&np->lock);
+ netif_device_attach(dev);
+ init_registers(dev);
+ spin_unlock_irq(&np->lock);
+
+ netif_start_queue(dev);
+ if (debug > 2)
+ netdev_dbg(dev, "Done netdev_open()\n");
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 1*HZ;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+ return 0;
+out_err:
+ netif_device_attach(dev);
+ return i;
+}
+
+#define MII_DAVICOM_DM9101 0x0181b800
+
+static int update_link(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int duplex, fasteth, result, mii_reg;
+
+ /* BSMR */
+ mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
+
+ if (mii_reg == 0xffff)
+ return np->csr6;
+ /* reread: the link status bit is sticky */
+ mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
+ if (!(mii_reg & 0x4)) {
+ if (netif_carrier_ok(dev)) {
+ if (debug)
+ dev_info(&dev->dev,
+ "MII #%d reports no link. Disabling watchdog\n",
+ np->phys[0]);
+ netif_carrier_off(dev);
+ }
+ return np->csr6;
+ }
+ if (!netif_carrier_ok(dev)) {
+ if (debug)
+ dev_info(&dev->dev,
+ "MII #%d link is back. Enabling watchdog\n",
+ np->phys[0]);
+ netif_carrier_on(dev);
+ }
+
+ if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
+ /* If the link partner doesn't support autonegotiation
+ * the MII detects it's abilities with the "parallel detection".
+ * Some MIIs update the LPA register to the result of the parallel
+ * detection, some don't.
+ * The Davicom PHY [at least 0181b800] doesn't.
+ * Instead bit 9 and 13 of the BMCR are updated to the result
+ * of the negotiation..
+ */
+ mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
+ duplex = mii_reg & BMCR_FULLDPLX;
+ fasteth = mii_reg & BMCR_SPEED100;
+ } else {
+ int negotiated;
+ mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
+ negotiated = mii_reg & np->mii_if.advertising;
+
+ duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
+ fasteth = negotiated & 0x380;
+ }
+ duplex |= np->mii_if.force_media;
+ /* remove fastether and fullduplex */
+ result = np->csr6 & ~0x20000200;
+ if (duplex)
+ result |= 0x200;
+ if (fasteth)
+ result |= 0x20000000;
+ if (result != np->csr6 && debug)
+ dev_info(&dev->dev,
+ "Setting %dMBit-%s-duplex based on MII#%d\n",
+ fasteth ? 100 : 10, duplex ? "full" : "half",
+ np->phys[0]);
+ return result;
+}
+
+#define RXTX_TIMEOUT 2000
+static inline void update_csr6(struct net_device *dev, int new)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+ int limit = RXTX_TIMEOUT;
+
+ if (!netif_device_present(dev))
+ new = 0;
+ if (new==np->csr6)
+ return;
+ /* stop both Tx and Rx processes */
+ iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
+ /* wait until they have really stopped */
+ for (;;) {
+ int csr5 = ioread32(ioaddr + IntrStatus);
+ int t;
+
+ t = (csr5 >> 17) & 0x07;
+ if (t==0||t==1) {
+ /* rx stopped */
+ t = (csr5 >> 20) & 0x07;
+ if (t==0||t==1)
+ break;
+ }
+
+ limit--;
+ if(!limit) {
+ dev_info(&dev->dev,
+ "couldn't stop rxtx, IntrStatus %xh\n", csr5);
+ break;
+ }
+ udelay(1);
+ }
+ np->csr6 = new;
+ /* and restart them with the new configuration */
+ iowrite32(np->csr6, ioaddr + NetworkConfig);
+ if (new & 0x200)
+ np->mii_if.full_duplex = 1;
+}
+
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+
+ if (debug > 2)
+ netdev_dbg(dev, "Media selection timer tick, status %08x config %08x\n",
+ ioread32(ioaddr + IntrStatus),
+ ioread32(ioaddr + NetworkConfig));
+ spin_lock_irq(&np->lock);
+ update_csr6(dev, update_link(dev));
+ spin_unlock_irq(&np->lock);
+ np->timer.expires = jiffies + 10*HZ;
+ add_timer(&np->timer);
+}
+
+static void init_rxtx_rings(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ np->rx_head_desc = &np->rx_ring[0];
+ np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
+
+ /* Initial all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].length = np->rx_buf_sz;
+ np->rx_ring[i].status = 0;
+ np->rx_skbuff[i] = NULL;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ np->rx_ring[i-1].length |= DescEndRing;
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
+ np->rx_buf_sz,PCI_DMA_FROMDEVICE);
+
+ np->rx_ring[i].buffer1 = np->rx_addr[i];
+ np->rx_ring[i].status = DescOwned;
+ }
+
+ np->cur_rx = 0;
+ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ /* Initialize the Tx descriptors */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = NULL;
+ np->tx_ring[i].status = 0;
+ }
+ np->tx_full = 0;
+ np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
+
+ iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
+ iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
+ np->base_addr + TxRingPtr);
+
+}
+
+static void free_rxtx_rings(struct netdev_private* np)
+{
+ int i;
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].status = 0;
+ if (np->rx_skbuff[i]) {
+ pci_unmap_single(np->pci_dev,
+ np->rx_addr[i],
+ np->rx_skbuff[i]->len,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = NULL;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (np->tx_skbuff[i]) {
+ pci_unmap_single(np->pci_dev,
+ np->tx_addr[i],
+ np->tx_skbuff[i]->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(np->tx_skbuff[i]);
+ }
+ np->tx_skbuff[i] = NULL;
+ }
+}
+
+static void init_registers(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
+
+ /* Initialize other registers. */
+#ifdef __BIG_ENDIAN
+ i = (1<<20); /* Big-endian descriptors */
+#else
+ i = 0;
+#endif
+ i |= (0x04<<2); /* skip length 4 u32 */
+ i |= 0x02; /* give Rx priority */
+
+ /* Configure the PCI bus bursts and FIFO thresholds.
+ 486: Set 8 longword cache alignment, 8 longword burst.
+ 586: Set 16 longword cache alignment, no burst limit.
+ Cache alignment bits 15:14 Burst length 13:8
+ 0000 <not allowed> 0000 align to cache 0800 8 longwords
+ 4000 8 longwords 0100 1 longword 1000 16 longwords
+ 8000 16 longwords 0200 2 longwords 2000 32 longwords
+ C000 32 longwords 0400 4 longwords */
+
+#if defined (__i386__) && !defined(MODULE)
+ /* When not a module we can work around broken '486 PCI boards. */
+ if (boot_cpu_data.x86 <= 4) {
+ i |= 0x4800;
+ dev_info(&dev->dev,
+ "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
+ } else {
+ i |= 0xE000;
+ }
+#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
+ i |= 0xE000;
+#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC)
+ i |= 0x4800;
+#else
+#warning Processor architecture undefined
+ i |= 0x4800;
+#endif
+ iowrite32(i, ioaddr + PCIBusCfg);
+
+ np->csr6 = 0;
+ /* 128 byte Tx threshold;
+ Transmit on; Receive on; */
+ update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
+
+ /* Clear and Enable interrupts by setting the interrupt mask. */
+ iowrite32(0x1A0F5, ioaddr + IntrStatus);
+ iowrite32(0x1A0F5, ioaddr + IntrEnable);
+
+ iowrite32(0, ioaddr + RxStartDemand);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+
+ dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
+ ioread32(ioaddr + IntrStatus));
+
+ {
+ int i;
+ printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
+ printk(KERN_CONT "\n");
+ printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(KERN_CONT " %08x", np->tx_ring[i].status);
+ printk(KERN_CONT "\n");
+ }
+ printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
+ np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
+ printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
+
+ disable_irq(dev->irq);
+ spin_lock_irq(&np->lock);
+ /*
+ * Under high load dirty_tx and the internal tx descriptor pointer
+ * come out of sync, thus perform a software reset and reinitialize
+ * everything.
+ */
+
+ iowrite32(1, np->base_addr+PCIBusCfg);
+ udelay(1);
+
+ free_rxtx_rings(np);
+ init_rxtx_rings(dev);
+ init_registers(dev);
+ spin_unlock_irq(&np->lock);
+ enable_irq(dev->irq);
+
+ netif_wake_queue(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ np->stats.tx_errors++;
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static int alloc_ringdesc(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+
+ np->rx_ring = pci_alloc_consistent(np->pci_dev,
+ sizeof(struct w840_rx_desc)*RX_RING_SIZE +
+ sizeof(struct w840_tx_desc)*TX_RING_SIZE,
+ &np->ring_dma_addr);
+ if(!np->rx_ring)
+ return -ENOMEM;
+ init_rxtx_rings(dev);
+ return 0;
+}
+
+static void free_ringdesc(struct netdev_private *np)
+{
+ pci_free_consistent(np->pci_dev,
+ sizeof(struct w840_rx_desc)*RX_RING_SIZE +
+ sizeof(struct w840_tx_desc)*TX_RING_SIZE,
+ np->rx_ring, np->ring_dma_addr);
+
+}
+
+static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ unsigned entry;
+
+ /* Caution: the write order is important here, set the field
+ with the "ownership" bits last. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+
+ np->tx_addr[entry] = pci_map_single(np->pci_dev,
+ skb->data,skb->len, PCI_DMA_TODEVICE);
+ np->tx_skbuff[entry] = skb;
+
+ np->tx_ring[entry].buffer1 = np->tx_addr[entry];
+ if (skb->len < TX_BUFLIMIT) {
+ np->tx_ring[entry].length = DescWholePkt | skb->len;
+ } else {
+ int len = skb->len - TX_BUFLIMIT;
+
+ np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
+ np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
+ }
+ if(entry == TX_RING_SIZE-1)
+ np->tx_ring[entry].length |= DescEndRing;
+
+ /* Now acquire the irq spinlock.
+ * The difficult race is the ordering between
+ * increasing np->cur_tx and setting DescOwned:
+ * - if np->cur_tx is increased first the interrupt
+ * handler could consider the packet as transmitted
+ * since DescOwned is cleared.
+ * - If DescOwned is set first the NIC could report the
+ * packet as sent, but the interrupt handler would ignore it
+ * since the np->cur_tx was not yet increased.
+ */
+ spin_lock_irq(&np->lock);
+ np->cur_tx++;
+
+ wmb(); /* flush length, buffer1, buffer2 */
+ np->tx_ring[entry].status = DescOwned;
+ wmb(); /* flush status and kick the hardware */
+ iowrite32(0, np->base_addr + TxStartDemand);
+ np->tx_q_bytes += skb->len;
+ /* Work around horrible bug in the chip by marking the queue as full
+ when we do not have FIFO room for a maximum sized packet. */
+ if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
+ ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
+ netif_stop_queue(dev);
+ wmb();
+ np->tx_full = 1;
+ }
+ spin_unlock_irq(&np->lock);
+
+ if (debug > 4) {
+ netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
+ np->cur_tx, entry);
+ }
+ return NETDEV_TX_OK;
+}
+
+static void netdev_tx_done(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ int tx_status = np->tx_ring[entry].status;
+
+ if (tx_status < 0)
+ break;
+ if (tx_status & 0x8000) { /* There was an error, log it. */
+#ifndef final_version
+ if (debug > 1)
+ netdev_dbg(dev, "Transmit error, Tx status %08x\n",
+ tx_status);
+#endif
+ np->stats.tx_errors++;
+ if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
+ if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
+ if (tx_status & 0x0200) np->stats.tx_window_errors++;
+ if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
+ if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
+ np->stats.tx_heartbeat_errors++;
+ } else {
+#ifndef final_version
+ if (debug > 3)
+ netdev_dbg(dev, "Transmit slot %d ok, Tx status %08x\n",
+ entry, tx_status);
+#endif
+ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+ np->stats.collisions += (tx_status >> 3) & 15;
+ np->stats.tx_packets++;
+ }
+ /* Free the original skb. */
+ pci_unmap_single(np->pci_dev,np->tx_addr[entry],
+ np->tx_skbuff[entry]->len,
+ PCI_DMA_TODEVICE);
+ np->tx_q_bytes -= np->tx_skbuff[entry]->len;
+ dev_kfree_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = NULL;
+ }
+ if (np->tx_full &&
+ np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
+ np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
+ /* The ring is no longer full, clear tbusy. */
+ np->tx_full = 0;
+ wmb();
+ netif_wake_queue(dev);
+ }
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t intr_handler(int irq, void *dev_instance)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+ int work_limit = max_interrupt_work;
+ int handled = 0;
+
+ if (!netif_device_present(dev))
+ return IRQ_NONE;
+ do {
+ u32 intr_status = ioread32(ioaddr + IntrStatus);
+
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
+
+ if (debug > 4)
+ netdev_dbg(dev, "Interrupt, status %04x\n", intr_status);
+
+ if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
+ break;
+
+ handled = 1;
+
+ if (intr_status & (RxIntr | RxNoBuf))
+ netdev_rx(dev);
+ if (intr_status & RxNoBuf)
+ iowrite32(0, ioaddr + RxStartDemand);
+
+ if (intr_status & (TxNoBuf | TxIntr) &&
+ np->cur_tx != np->dirty_tx) {
+ spin_lock(&np->lock);
+ netdev_tx_done(dev);
+ spin_unlock(&np->lock);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SystemError |
+ TimerInt | TxDied))
+ netdev_error(dev, intr_status);
+
+ if (--work_limit < 0) {
+ dev_warn(&dev->dev,
+ "Too much work at interrupt, status=0x%04x\n",
+ intr_status);
+ /* Set the timer to re-enable the other interrupts after
+ 10*82usec ticks. */
+ spin_lock(&np->lock);
+ if (netif_device_present(dev)) {
+ iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
+ iowrite32(10, ioaddr + GPTimer);
+ }
+ spin_unlock(&np->lock);
+ break;
+ }
+ } while (1);
+
+ if (debug > 3)
+ netdev_dbg(dev, "exiting interrupt, status=%#4.4x\n",
+ ioread32(ioaddr + IntrStatus));
+ return IRQ_RETVAL(handled);
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+
+ if (debug > 4) {
+ netdev_dbg(dev, " In netdev_rx(), entry %d status %04x\n",
+ entry, np->rx_ring[entry].status);
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (--work_limit >= 0) {
+ struct w840_rx_desc *desc = np->rx_head_desc;
+ s32 status = desc->status;
+
+ if (debug > 4)
+ netdev_dbg(dev, " netdev_rx() status was %08x\n",
+ status);
+ if (status < 0)
+ break;
+ if ((status & 0x38008300) != 0x0300) {
+ if ((status & 0x38000300) != 0x0300) {
+ /* Ingore earlier buffers. */
+ if ((status & 0xffff) != 0x7fff) {
+ dev_warn(&dev->dev,
+ "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
+ np->cur_rx, status);
+ np->stats.rx_length_errors++;
+ }
+ } else if (status & 0x8000) {
+ /* There was a fatal error. */
+ if (debug > 2)
+ netdev_dbg(dev, "Receive error, Rx status %08x\n",
+ status);
+ np->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x0890) np->stats.rx_length_errors++;
+ if (status & 0x004C) np->stats.rx_frame_errors++;
+ if (status & 0x0002) np->stats.rx_crc_errors++;
+ }
+ } else {
+ struct sk_buff *skb;
+ /* Omit the four octet CRC from the length. */
+ int pkt_len = ((status >> 16) & 0x7ff) - 4;
+
+#ifndef final_version
+ if (debug > 4)
+ netdev_dbg(dev, " netdev_rx() normal Rx pkt length %d status %x\n",
+ pkt_len, status);
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak &&
+ (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
+ np->rx_skbuff[entry]->len,
+ PCI_DMA_FROMDEVICE);
+ skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
+ skb_put(skb, pkt_len);
+ pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
+ np->rx_skbuff[entry]->len,
+ PCI_DMA_FROMDEVICE);
+ } else {
+ pci_unmap_single(np->pci_dev,np->rx_addr[entry],
+ np->rx_skbuff[entry]->len,
+ PCI_DMA_FROMDEVICE);
+ skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+ }
+#ifndef final_version /* Remove after testing. */
+ /* You will want this info for the initial debug. */
+ if (debug > 5)
+ netdev_dbg(dev, " Rx data %pM %pM %02x%02x %pI4\n",
+ &skb->data[0], &skb->data[6],
+ skb->data[12], skb->data[13],
+ &skb->data[14]);
+#endif
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ np->stats.rx_packets++;
+ np->stats.rx_bytes += pkt_len;
+ }
+ entry = (++np->cur_rx) % RX_RING_SIZE;
+ np->rx_head_desc = &np->rx_ring[entry];
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ np->rx_addr[entry] = pci_map_single(np->pci_dev,
+ skb->data,
+ np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ np->rx_ring[entry].buffer1 = np->rx_addr[entry];
+ }
+ wmb();
+ np->rx_ring[entry].status = DescOwned;
+ }
+
+ return 0;
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+
+ if (debug > 2)
+ netdev_dbg(dev, "Abnormal event, %08x\n", intr_status);
+ if (intr_status == 0xffffffff)
+ return;
+ spin_lock(&np->lock);
+ if (intr_status & TxFIFOUnderflow) {
+ int new;
+ /* Bump up the Tx threshold */
+#if 0
+ /* This causes lots of dropped packets,
+ * and under high load even tx_timeouts
+ */
+ new = np->csr6 + 0x4000;
+#else
+ new = (np->csr6 >> 14)&0x7f;
+ if (new < 64)
+ new *= 2;
+ else
+ new = 127; /* load full packet before starting */
+ new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
+#endif
+ netdev_dbg(dev, "Tx underflow, new csr6 %08x\n", new);
+ update_csr6(dev, new);
+ }
+ if (intr_status & RxDied) { /* Missed a Rx frame. */
+ np->stats.rx_errors++;
+ }
+ if (intr_status & TimerInt) {
+ /* Re-enable other interrupts. */
+ if (netif_device_present(dev))
+ iowrite32(0x1A0F5, ioaddr + IntrEnable);
+ }
+ np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
+ iowrite32(0, ioaddr + RxStartDemand);
+ spin_unlock(&np->lock);
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+
+ /* The chip only need report frame silently dropped. */
+ spin_lock_irq(&np->lock);
+ if (netif_running(dev) && netif_device_present(dev))
+ np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
+ spin_unlock_irq(&np->lock);
+
+ return &np->stats;
+}
+
+
+static u32 __set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ u32 rx_mode;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
+ | AcceptMyPhys;
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ } else {
+ struct netdev_hw_addr *ha;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ int filbit;
+
+ filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
+ filbit &= 0x3f;
+ mc_filter[filbit >> 5] |= 1 << (filbit & 31);
+ }
+ rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ }
+ iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
+ iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
+ return rx_mode;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ u32 rx_mode = __set_rx_mode(dev);
+ spin_lock_irq(&np->lock);
+ update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
+ spin_unlock_irq(&np->lock);
+}
+
+static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ strcpy (info->driver, DRV_NAME);
+ strcpy (info->version, DRV_VERSION);
+ strcpy (info->bus_info, pci_name(np->pci_dev));
+}
+
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&np->lock);
+ rc = mii_ethtool_gset(&np->mii_if, cmd);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&np->lock);
+ rc = mii_ethtool_sset(&np->mii_if, cmd);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+static int netdev_nway_reset(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_nway_restart(&np->mii_if);
+}
+
+static u32 netdev_get_link(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_link_ok(&np->mii_if);
+}
+
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 value)
+{
+ debug = value;
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_settings = netdev_get_settings,
+ .set_settings = netdev_set_settings,
+ .nway_reset = netdev_nway_reset,
+ .get_link = netdev_get_link,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+};
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(rq);
+ struct netdev_private *np = netdev_priv(dev);
+
+ switch(cmd) {
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
+ /* Fall Through */
+
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ spin_lock_irq(&np->lock);
+ data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
+ spin_unlock_irq(&np->lock);
+ return 0;
+
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ spin_lock_irq(&np->lock);
+ mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
+ spin_unlock_irq(&np->lock);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+
+ netif_stop_queue(dev);
+
+ if (debug > 1) {
+ netdev_dbg(dev, "Shutting down ethercard, status was %08x Config %08x\n",
+ ioread32(ioaddr + IntrStatus),
+ ioread32(ioaddr + NetworkConfig));
+ netdev_dbg(dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
+ np->cur_tx, np->dirty_tx,
+ np->cur_rx, np->dirty_rx);
+ }
+
+ /* Stop the chip's Tx and Rx processes. */
+ spin_lock_irq(&np->lock);
+ netif_device_detach(dev);
+ update_csr6(dev, 0);
+ iowrite32(0x0000, ioaddr + IntrEnable);
+ spin_unlock_irq(&np->lock);
+
+ free_irq(dev->irq, dev);
+ wmb();
+ netif_device_attach(dev);
+
+ if (ioread32(ioaddr + NetworkConfig) != 0xffffffff)
+ np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
+
+#ifdef __i386__
+ if (debug > 2) {
+ int i;
+
+ printk(KERN_DEBUG" Tx ring at %p:\n", np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
+ i, np->tx_ring[i].length,
+ np->tx_ring[i].status, np->tx_ring[i].buffer1);
+ printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
+ i, np->rx_ring[i].length,
+ np->rx_ring[i].status, np->rx_ring[i].buffer1);
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ del_timer_sync(&np->timer);
+
+ free_rxtx_rings(np);
+ free_ringdesc(np);
+
+ return 0;
+}
+
+static void __devexit w840_remove1 (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct netdev_private *np = netdev_priv(dev);
+ unregister_netdev(dev);
+ pci_release_regions(pdev);
+ pci_iounmap(pdev, np->base_addr);
+ free_netdev(dev);
+ }
+
+ pci_set_drvdata(pdev, NULL);
+}
+
+#ifdef CONFIG_PM
+
+/*
+ * suspend/resume synchronization:
+ * - open, close, do_ioctl:
+ * rtnl_lock, & netif_device_detach after the rtnl_unlock.
+ * - get_stats:
+ * spin_lock_irq(np->lock), doesn't touch hw if not present
+ * - start_xmit:
+ * synchronize_irq + netif_tx_disable;
+ * - tx_timeout:
+ * netif_device_detach + netif_tx_disable;
+ * - set_multicast_list
+ * netif_device_detach + netif_tx_disable;
+ * - interrupt handler
+ * doesn't touch hw if not present, synchronize_irq waits for
+ * running instances of the interrupt handler.
+ *
+ * Disabling hw requires clearing csr6 & IntrEnable.
+ * update_csr6 & all function that write IntrEnable check netif_device_present
+ * before settings any bits.
+ *
+ * Detach must occur under spin_unlock_irq(), interrupts from a detached
+ * device would cause an irq storm.
+ */
+static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
+
+ rtnl_lock();
+ if (netif_running (dev)) {
+ del_timer_sync(&np->timer);
+
+ spin_lock_irq(&np->lock);
+ netif_device_detach(dev);
+ update_csr6(dev, 0);
+ iowrite32(0, ioaddr + IntrEnable);
+ spin_unlock_irq(&np->lock);
+
+ synchronize_irq(dev->irq);
+ netif_tx_disable(dev);
+
+ np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
+
+ /* no more hardware accesses behind this line. */
+
+ BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable));
+
+ /* pci_power_off(pdev, -1); */
+
+ free_rxtx_rings(np);
+ } else {
+ netif_device_detach(dev);
+ }
+ rtnl_unlock();
+ return 0;
+}
+
+static int w840_resume (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct netdev_private *np = netdev_priv(dev);
+ int retval = 0;
+
+ rtnl_lock();
+ if (netif_device_present(dev))
+ goto out; /* device not suspended */
+ if (netif_running(dev)) {
+ if ((retval = pci_enable_device(pdev))) {
+ dev_err(&dev->dev,
+ "pci_enable_device failed in resume\n");
+ goto out;
+ }
+ spin_lock_irq(&np->lock);
+ iowrite32(1, np->base_addr+PCIBusCfg);
+ ioread32(np->base_addr+PCIBusCfg);
+ udelay(1);
+ netif_device_attach(dev);
+ init_rxtx_rings(dev);
+ init_registers(dev);
+ spin_unlock_irq(&np->lock);
+
+ netif_wake_queue(dev);
+
+ mod_timer(&np->timer, jiffies + 1*HZ);
+ } else {
+ netif_device_attach(dev);
+ }
+out:
+ rtnl_unlock();
+ return retval;
+}
+#endif
+
+static struct pci_driver w840_driver = {
+ .name = DRV_NAME,
+ .id_table = w840_pci_tbl,
+ .probe = w840_probe1,
+ .remove = __devexit_p(w840_remove1),
+#ifdef CONFIG_PM
+ .suspend = w840_suspend,
+ .resume = w840_resume,
+#endif
+};
+
+static int __init w840_init(void)
+{
+ printk(version);
+ return pci_register_driver(&w840_driver);
+}
+
+static void __exit w840_exit(void)
+{
+ pci_unregister_driver(&w840_driver);
+}
+
+module_init(w840_init);
+module_exit(w840_exit);
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
new file mode 100644
index 000000000000..988b8eb24d37
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -0,0 +1,1154 @@
+/*
+ * xircom_cb: A driver for the (tulip-like) Xircom Cardbus ethernet cards
+ *
+ * This software is (C) by the respective authors, and licensed under the GPL
+ * License.
+ *
+ * Written by Arjan van de Ven for Red Hat, Inc.
+ * Based on work by Jeff Garzik, Doug Ledford and Donald Becker
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ *
+ * $Id: xircom_cb.c,v 1.33 2001/03/19 14:02:07 arjanv Exp $
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#ifdef CONFIG_NET_POLL_CONTROLLER
+#include <asm/irq.h>
+#endif
+
+MODULE_DESCRIPTION("Xircom Cardbus ethernet driver");
+MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>");
+MODULE_LICENSE("GPL");
+
+
+
+/* IO registers on the card, offsets */
+#define CSR0 0x00
+#define CSR1 0x08
+#define CSR2 0x10
+#define CSR3 0x18
+#define CSR4 0x20
+#define CSR5 0x28
+#define CSR6 0x30
+#define CSR7 0x38
+#define CSR8 0x40
+#define CSR9 0x48
+#define CSR10 0x50
+#define CSR11 0x58
+#define CSR12 0x60
+#define CSR13 0x68
+#define CSR14 0x70
+#define CSR15 0x78
+#define CSR16 0x80
+
+/* PCI registers */
+#define PCI_POWERMGMT 0x40
+
+/* Offsets of the buffers within the descriptor pages, in bytes */
+
+#define NUMDESCRIPTORS 4
+
+static int bufferoffsets[NUMDESCRIPTORS] = {128,2048,4096,6144};
+
+
+struct xircom_private {
+ /* Send and receive buffers, kernel-addressable and dma addressable forms */
+
+ __le32 *rx_buffer;
+ __le32 *tx_buffer;
+
+ dma_addr_t rx_dma_handle;
+ dma_addr_t tx_dma_handle;
+
+ struct sk_buff *tx_skb[4];
+
+ unsigned long io_port;
+ int open;
+
+ /* transmit_used is the rotating counter that indicates which transmit
+ descriptor has to be used next */
+ int transmit_used;
+
+ /* Spinlock to serialize register operations.
+ It must be helt while manipulating the following registers:
+ CSR0, CSR6, CSR7, CSR9, CSR10, CSR15
+ */
+ spinlock_t lock;
+
+ struct pci_dev *pdev;
+ struct net_device *dev;
+};
+
+
+/* Function prototypes */
+static int xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+static void xircom_remove(struct pci_dev *pdev);
+static irqreturn_t xircom_interrupt(int irq, void *dev_instance);
+static netdev_tx_t xircom_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static int xircom_open(struct net_device *dev);
+static int xircom_close(struct net_device *dev);
+static void xircom_up(struct xircom_private *card);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void xircom_poll_controller(struct net_device *dev);
+#endif
+
+static void investigate_read_descriptor(struct net_device *dev,struct xircom_private *card, int descnr, unsigned int bufferoffset);
+static void investigate_write_descriptor(struct net_device *dev, struct xircom_private *card, int descnr, unsigned int bufferoffset);
+static void read_mac_address(struct xircom_private *card);
+static void transceiver_voodoo(struct xircom_private *card);
+static void initialize_card(struct xircom_private *card);
+static void trigger_transmit(struct xircom_private *card);
+static void trigger_receive(struct xircom_private *card);
+static void setup_descriptors(struct xircom_private *card);
+static void remove_descriptors(struct xircom_private *card);
+static int link_status_changed(struct xircom_private *card);
+static void activate_receiver(struct xircom_private *card);
+static void deactivate_receiver(struct xircom_private *card);
+static void activate_transmitter(struct xircom_private *card);
+static void deactivate_transmitter(struct xircom_private *card);
+static void enable_transmit_interrupt(struct xircom_private *card);
+static void enable_receive_interrupt(struct xircom_private *card);
+static void enable_link_interrupt(struct xircom_private *card);
+static void disable_all_interrupts(struct xircom_private *card);
+static int link_status(struct xircom_private *card);
+
+
+
+static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = {
+ {0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,},
+ {0,},
+};
+MODULE_DEVICE_TABLE(pci, xircom_pci_table);
+
+static struct pci_driver xircom_ops = {
+ .name = "xircom_cb",
+ .id_table = xircom_pci_table,
+ .probe = xircom_probe,
+ .remove = xircom_remove,
+ .suspend =NULL,
+ .resume =NULL
+};
+
+
+#if defined DEBUG && DEBUG > 1
+static void print_binary(unsigned int number)
+{
+ int i,i2;
+ char buffer[64];
+ memset(buffer,0,64);
+ i2=0;
+ for (i=31;i>=0;i--) {
+ if (number & (1<<i))
+ buffer[i2++]='1';
+ else
+ buffer[i2++]='0';
+ if ((i&3)==0)
+ buffer[i2++]=' ';
+ }
+ pr_debug("%s\n",buffer);
+}
+#endif
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = xircom_open,
+ .ndo_stop = xircom_close,
+ .ndo_start_xmit = xircom_start_xmit,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = xircom_poll_controller,
+#endif
+};
+
+/* xircom_probe is the code that gets called on device insertion.
+ it sets up the hardware and registers the device to the networklayer.
+
+ TODO: Send 1 or 2 "dummy" packets here as the card seems to discard the
+ first two packets that get send, and pump hates that.
+
+ */
+static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct net_device *dev = NULL;
+ struct xircom_private *private;
+ unsigned long flags;
+ unsigned short tmp16;
+
+ /* First do the PCI initialisation */
+
+ if (pci_enable_device(pdev))
+ return -ENODEV;
+
+ /* disable all powermanagement */
+ pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000);
+
+ pci_set_master(pdev); /* Why isn't this done by pci_enable_device ?*/
+
+ /* clear PCI status, if any */
+ pci_read_config_word (pdev,PCI_STATUS, &tmp16);
+ pci_write_config_word (pdev, PCI_STATUS,tmp16);
+
+ if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) {
+ pr_err("%s: failed to allocate io-region\n", __func__);
+ return -ENODEV;
+ }
+
+ /*
+ Before changing the hardware, allocate the memory.
+ This way, we can fail gracefully if not enough memory
+ is available.
+ */
+ dev = alloc_etherdev(sizeof(struct xircom_private));
+ if (!dev) {
+ pr_err("%s: failed to allocate etherdev\n", __func__);
+ goto device_fail;
+ }
+ private = netdev_priv(dev);
+
+ /* Allocate the send/receive buffers */
+ private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle);
+ if (private->rx_buffer == NULL) {
+ pr_err("%s: no memory for rx buffer\n", __func__);
+ goto rx_buf_fail;
+ }
+ private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle);
+ if (private->tx_buffer == NULL) {
+ pr_err("%s: no memory for tx buffer\n", __func__);
+ goto tx_buf_fail;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+
+ private->dev = dev;
+ private->pdev = pdev;
+ private->io_port = pci_resource_start(pdev, 0);
+ spin_lock_init(&private->lock);
+ dev->irq = pdev->irq;
+ dev->base_addr = private->io_port;
+
+ initialize_card(private);
+ read_mac_address(private);
+ setup_descriptors(private);
+
+ dev->netdev_ops = &netdev_ops;
+ pci_set_drvdata(pdev, dev);
+
+ if (register_netdev(dev)) {
+ pr_err("%s: netdevice registration failed\n", __func__);
+ goto reg_fail;
+ }
+
+ netdev_info(dev, "Xircom cardbus revision %i at irq %i\n",
+ pdev->revision, pdev->irq);
+ /* start the transmitter to get a heartbeat */
+ /* TODO: send 2 dummy packets here */
+ transceiver_voodoo(private);
+
+ spin_lock_irqsave(&private->lock,flags);
+ activate_transmitter(private);
+ activate_receiver(private);
+ spin_unlock_irqrestore(&private->lock,flags);
+
+ trigger_receive(private);
+
+ return 0;
+
+reg_fail:
+ kfree(private->tx_buffer);
+tx_buf_fail:
+ kfree(private->rx_buffer);
+rx_buf_fail:
+ free_netdev(dev);
+device_fail:
+ return -ENODEV;
+}
+
+
+/*
+ xircom_remove is called on module-unload or on device-eject.
+ it unregisters the irq, io-region and network device.
+ Interrupts and such are already stopped in the "ifconfig ethX down"
+ code.
+ */
+static void __devexit xircom_remove(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct xircom_private *card = netdev_priv(dev);
+
+ pci_free_consistent(pdev,8192,card->rx_buffer,card->rx_dma_handle);
+ pci_free_consistent(pdev,8192,card->tx_buffer,card->tx_dma_handle);
+
+ release_region(dev->base_addr, 128);
+ unregister_netdev(dev);
+ free_netdev(dev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
+{
+ struct net_device *dev = (struct net_device *) dev_instance;
+ struct xircom_private *card = netdev_priv(dev);
+ unsigned int status;
+ int i;
+
+ spin_lock(&card->lock);
+ status = inl(card->io_port+CSR5);
+
+#if defined DEBUG && DEBUG > 1
+ print_binary(status);
+ pr_debug("tx status 0x%08x 0x%08x\n",
+ card->tx_buffer[0], card->tx_buffer[4]);
+ pr_debug("rx status 0x%08x 0x%08x\n",
+ card->rx_buffer[0], card->rx_buffer[4]);
+#endif
+ /* Handle shared irq and hotplug */
+ if (status == 0 || status == 0xffffffff) {
+ spin_unlock(&card->lock);
+ return IRQ_NONE;
+ }
+
+ if (link_status_changed(card)) {
+ int newlink;
+ netdev_dbg(dev, "Link status has changed\n");
+ newlink = link_status(card);
+ netdev_info(dev, "Link is %d mbit\n", newlink);
+ if (newlink)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ }
+
+ /* Clear all remaining interrupts */
+ status |= 0xffffffff; /* FIXME: make this clear only the
+ real existing bits */
+ outl(status,card->io_port+CSR5);
+
+
+ for (i=0;i<NUMDESCRIPTORS;i++)
+ investigate_write_descriptor(dev,card,i,bufferoffsets[i]);
+ for (i=0;i<NUMDESCRIPTORS;i++)
+ investigate_read_descriptor(dev,card,i,bufferoffsets[i]);
+
+ spin_unlock(&card->lock);
+ return IRQ_HANDLED;
+}
+
+static netdev_tx_t xircom_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct xircom_private *card;
+ unsigned long flags;
+ int nextdescriptor;
+ int desc;
+
+ card = netdev_priv(dev);
+ spin_lock_irqsave(&card->lock,flags);
+
+ /* First see if we can free some descriptors */
+ for (desc=0;desc<NUMDESCRIPTORS;desc++)
+ investigate_write_descriptor(dev,card,desc,bufferoffsets[desc]);
+
+
+ nextdescriptor = (card->transmit_used +1) % (NUMDESCRIPTORS);
+ desc = card->transmit_used;
+
+ /* only send the packet if the descriptor is free */
+ if (card->tx_buffer[4*desc]==0) {
+ /* Copy the packet data; zero the memory first as the card
+ sometimes sends more than you ask it to. */
+
+ memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536);
+ skb_copy_from_linear_data(skb,
+ &(card->tx_buffer[bufferoffsets[desc] / 4]),
+ skb->len);
+ /* FIXME: The specification tells us that the length we send HAS to be a multiple of
+ 4 bytes. */
+
+ card->tx_buffer[4*desc+1] = cpu_to_le32(skb->len);
+ if (desc == NUMDESCRIPTORS - 1) /* bit 25: last descriptor of the ring */
+ card->tx_buffer[4*desc+1] |= cpu_to_le32(1<<25);
+
+ card->tx_buffer[4*desc+1] |= cpu_to_le32(0xF0000000);
+ /* 0xF0... means want interrupts*/
+ card->tx_skb[desc] = skb;
+
+ wmb();
+ /* This gives the descriptor to the card */
+ card->tx_buffer[4*desc] = cpu_to_le32(0x80000000);
+ trigger_transmit(card);
+ if (card->tx_buffer[nextdescriptor*4] & cpu_to_le32(0x8000000)) {
+ /* next descriptor is occupied... */
+ netif_stop_queue(dev);
+ }
+ card->transmit_used = nextdescriptor;
+ spin_unlock_irqrestore(&card->lock,flags);
+ return NETDEV_TX_OK;
+ }
+
+ /* Uh oh... no free descriptor... drop the packet */
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&card->lock,flags);
+ trigger_transmit(card);
+
+ return NETDEV_TX_BUSY;
+}
+
+
+
+
+static int xircom_open(struct net_device *dev)
+{
+ struct xircom_private *xp = netdev_priv(dev);
+ int retval;
+
+ netdev_info(dev, "xircom cardbus adaptor found, using irq %i\n",
+ dev->irq);
+ retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
+ if (retval)
+ return retval;
+
+ xircom_up(xp);
+ xp->open = 1;
+
+ return 0;
+}
+
+static int xircom_close(struct net_device *dev)
+{
+ struct xircom_private *card;
+ unsigned long flags;
+
+ card = netdev_priv(dev);
+ netif_stop_queue(dev); /* we don't want new packets */
+
+
+ spin_lock_irqsave(&card->lock,flags);
+
+ disable_all_interrupts(card);
+#if 0
+ /* We can enable this again once we send dummy packets on ifconfig ethX up */
+ deactivate_receiver(card);
+ deactivate_transmitter(card);
+#endif
+ remove_descriptors(card);
+
+ spin_unlock_irqrestore(&card->lock,flags);
+
+ card->open = 0;
+ free_irq(dev->irq,dev);
+
+ return 0;
+
+}
+
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void xircom_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ xircom_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+
+static void initialize_card(struct xircom_private *card)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+
+ /* First: reset the card */
+ val = inl(card->io_port + CSR0);
+ val |= 0x01; /* Software reset */
+ outl(val, card->io_port + CSR0);
+
+ udelay(100); /* give the card some time to reset */
+
+ val = inl(card->io_port + CSR0);
+ val &= ~0x01; /* disable Software reset */
+ outl(val, card->io_port + CSR0);
+
+
+ val = 0; /* Value 0x00 is a safe and conservative value
+ for the PCI configuration settings */
+ outl(val, card->io_port + CSR0);
+
+
+ disable_all_interrupts(card);
+ deactivate_receiver(card);
+ deactivate_transmitter(card);
+
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+
+/*
+trigger_transmit causes the card to check for frames to be transmitted.
+This is accomplished by writing to the CSR1 port. The documentation
+claims that the act of writing is sufficient and that the value is
+ignored; I chose zero.
+*/
+static void trigger_transmit(struct xircom_private *card)
+{
+ unsigned int val;
+
+ val = 0;
+ outl(val, card->io_port + CSR1);
+}
+
+/*
+trigger_receive causes the card to check for empty frames in the
+descriptor list in which packets can be received.
+This is accomplished by writing to the CSR2 port. The documentation
+claims that the act of writing is sufficient and that the value is
+ignored; I chose zero.
+*/
+static void trigger_receive(struct xircom_private *card)
+{
+ unsigned int val;
+
+ val = 0;
+ outl(val, card->io_port + CSR2);
+}
+
+/*
+setup_descriptors initializes the send and receive buffers to be valid
+descriptors and programs the addresses into the card.
+*/
+static void setup_descriptors(struct xircom_private *card)
+{
+ u32 address;
+ int i;
+
+ BUG_ON(card->rx_buffer == NULL);
+ BUG_ON(card->tx_buffer == NULL);
+
+ /* Receive descriptors */
+ memset(card->rx_buffer, 0, 128); /* clear the descriptors */
+ for (i=0;i<NUMDESCRIPTORS;i++ ) {
+
+ /* Rx Descr0: It's empty, let the card own it, no errors -> 0x80000000 */
+ card->rx_buffer[i*4 + 0] = cpu_to_le32(0x80000000);
+ /* Rx Descr1: buffer 1 is 1536 bytes, buffer 2 is 0 bytes */
+ card->rx_buffer[i*4 + 1] = cpu_to_le32(1536);
+ if (i == NUMDESCRIPTORS - 1) /* bit 25 is "last descriptor" */
+ card->rx_buffer[i*4 + 1] |= cpu_to_le32(1 << 25);
+
+ /* Rx Descr2: address of the buffer
+ we store the buffer at the 2nd half of the page */
+
+ address = card->rx_dma_handle;
+ card->rx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]);
+ /* Rx Desc3: address of 2nd buffer -> 0 */
+ card->rx_buffer[i*4 + 3] = 0;
+ }
+
+ wmb();
+ /* Write the receive descriptor ring address to the card */
+ address = card->rx_dma_handle;
+ outl(address, card->io_port + CSR3); /* Receive descr list address */
+
+
+ /* transmit descriptors */
+ memset(card->tx_buffer, 0, 128); /* clear the descriptors */
+
+ for (i=0;i<NUMDESCRIPTORS;i++ ) {
+ /* Tx Descr0: Empty, we own it, no errors -> 0x00000000 */
+ card->tx_buffer[i*4 + 0] = 0x00000000;
+ /* Tx Descr1: buffer 1 is 1536 bytes, buffer 2 is 0 bytes */
+ card->tx_buffer[i*4 + 1] = cpu_to_le32(1536);
+ if (i == NUMDESCRIPTORS - 1) /* bit 25 is "last descriptor" */
+ card->tx_buffer[i*4 + 1] |= cpu_to_le32(1 << 25);
+
+ /* Tx Descr2: address of the buffer
+ we store the buffer at the 2nd half of the page */
+ address = card->tx_dma_handle;
+ card->tx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]);
+ /* Tx Desc3: address of 2nd buffer -> 0 */
+ card->tx_buffer[i*4 + 3] = 0;
+ }
+
+ wmb();
+ /* wite the transmit descriptor ring to the card */
+ address = card->tx_dma_handle;
+ outl(address, card->io_port + CSR4); /* xmit descr list address */
+}
+
+/*
+remove_descriptors informs the card the descriptors are no longer
+valid by setting the address in the card to 0x00.
+*/
+static void remove_descriptors(struct xircom_private *card)
+{
+ unsigned int val;
+
+ val = 0;
+ outl(val, card->io_port + CSR3); /* Receive descriptor address */
+ outl(val, card->io_port + CSR4); /* Send descriptor address */
+}
+
+/*
+link_status_changed returns 1 if the card has indicated that
+the link status has changed. The new link status has to be read from CSR12.
+
+This function also clears the status-bit.
+*/
+static int link_status_changed(struct xircom_private *card)
+{
+ unsigned int val;
+
+ val = inl(card->io_port + CSR5); /* Status register */
+
+ if ((val & (1 << 27)) == 0) /* no change */
+ return 0;
+
+ /* clear the event by writing a 1 to the bit in the
+ status register. */
+ val = (1 << 27);
+ outl(val, card->io_port + CSR5);
+
+ return 1;
+}
+
+
+/*
+transmit_active returns 1 if the transmitter on the card is
+in a non-stopped state.
+*/
+static int transmit_active(struct xircom_private *card)
+{
+ unsigned int val;
+
+ val = inl(card->io_port + CSR5); /* Status register */
+
+ if ((val & (7 << 20)) == 0) /* transmitter disabled */
+ return 0;
+
+ return 1;
+}
+
+/*
+receive_active returns 1 if the receiver on the card is
+in a non-stopped state.
+*/
+static int receive_active(struct xircom_private *card)
+{
+ unsigned int val;
+
+ val = inl(card->io_port + CSR5); /* Status register */
+
+ if ((val & (7 << 17)) == 0) /* receiver disabled */
+ return 0;
+
+ return 1;
+}
+
+/*
+activate_receiver enables the receiver on the card.
+Before being allowed to active the receiver, the receiver
+must be completely de-activated. To achieve this,
+this code actually disables the receiver first; then it waits for the
+receiver to become inactive, then it activates the receiver and then
+it waits for the receiver to be active.
+
+must be called with the lock held and interrupts disabled.
+*/
+static void activate_receiver(struct xircom_private *card)
+{
+ unsigned int val;
+ int counter;
+
+ val = inl(card->io_port + CSR6); /* Operation mode */
+
+ /* If the "active" bit is set and the receiver is already
+ active, no need to do the expensive thing */
+ if ((val&2) && (receive_active(card)))
+ return;
+
+
+ val = val & ~2; /* disable the receiver */
+ outl(val, card->io_port + CSR6);
+
+ counter = 10;
+ while (counter > 0) {
+ if (!receive_active(card))
+ break;
+ /* wait a while */
+ udelay(50);
+ counter--;
+ if (counter <= 0)
+ netdev_err(card->dev, "Receiver failed to deactivate\n");
+ }
+
+ /* enable the receiver */
+ val = inl(card->io_port + CSR6); /* Operation mode */
+ val = val | 2; /* enable the receiver */
+ outl(val, card->io_port + CSR6);
+
+ /* now wait for the card to activate again */
+ counter = 10;
+ while (counter > 0) {
+ if (receive_active(card))
+ break;
+ /* wait a while */
+ udelay(50);
+ counter--;
+ if (counter <= 0)
+ netdev_err(card->dev,
+ "Receiver failed to re-activate\n");
+ }
+}
+
+/*
+deactivate_receiver disables the receiver on the card.
+To achieve this this code disables the receiver first;
+then it waits for the receiver to become inactive.
+
+must be called with the lock held and interrupts disabled.
+*/
+static void deactivate_receiver(struct xircom_private *card)
+{
+ unsigned int val;
+ int counter;
+
+ val = inl(card->io_port + CSR6); /* Operation mode */
+ val = val & ~2; /* disable the receiver */
+ outl(val, card->io_port + CSR6);
+
+ counter = 10;
+ while (counter > 0) {
+ if (!receive_active(card))
+ break;
+ /* wait a while */
+ udelay(50);
+ counter--;
+ if (counter <= 0)
+ netdev_err(card->dev, "Receiver failed to deactivate\n");
+ }
+}
+
+
+/*
+activate_transmitter enables the transmitter on the card.
+Before being allowed to active the transmitter, the transmitter
+must be completely de-activated. To achieve this,
+this code actually disables the transmitter first; then it waits for the
+transmitter to become inactive, then it activates the transmitter and then
+it waits for the transmitter to be active again.
+
+must be called with the lock held and interrupts disabled.
+*/
+static void activate_transmitter(struct xircom_private *card)
+{
+ unsigned int val;
+ int counter;
+
+ val = inl(card->io_port + CSR6); /* Operation mode */
+
+ /* If the "active" bit is set and the receiver is already
+ active, no need to do the expensive thing */
+ if ((val&(1<<13)) && (transmit_active(card)))
+ return;
+
+ val = val & ~(1 << 13); /* disable the transmitter */
+ outl(val, card->io_port + CSR6);
+
+ counter = 10;
+ while (counter > 0) {
+ if (!transmit_active(card))
+ break;
+ /* wait a while */
+ udelay(50);
+ counter--;
+ if (counter <= 0)
+ netdev_err(card->dev,
+ "Transmitter failed to deactivate\n");
+ }
+
+ /* enable the transmitter */
+ val = inl(card->io_port + CSR6); /* Operation mode */
+ val = val | (1 << 13); /* enable the transmitter */
+ outl(val, card->io_port + CSR6);
+
+ /* now wait for the card to activate again */
+ counter = 10;
+ while (counter > 0) {
+ if (transmit_active(card))
+ break;
+ /* wait a while */
+ udelay(50);
+ counter--;
+ if (counter <= 0)
+ netdev_err(card->dev,
+ "Transmitter failed to re-activate\n");
+ }
+}
+
+/*
+deactivate_transmitter disables the transmitter on the card.
+To achieve this this code disables the transmitter first;
+then it waits for the transmitter to become inactive.
+
+must be called with the lock held and interrupts disabled.
+*/
+static void deactivate_transmitter(struct xircom_private *card)
+{
+ unsigned int val;
+ int counter;
+
+ val = inl(card->io_port + CSR6); /* Operation mode */
+ val = val & ~2; /* disable the transmitter */
+ outl(val, card->io_port + CSR6);
+
+ counter = 20;
+ while (counter > 0) {
+ if (!transmit_active(card))
+ break;
+ /* wait a while */
+ udelay(50);
+ counter--;
+ if (counter <= 0)
+ netdev_err(card->dev,
+ "Transmitter failed to deactivate\n");
+ }
+}
+
+
+/*
+enable_transmit_interrupt enables the transmit interrupt
+
+must be called with the lock held and interrupts disabled.
+*/
+static void enable_transmit_interrupt(struct xircom_private *card)
+{
+ unsigned int val;
+
+ val = inl(card->io_port + CSR7); /* Interrupt enable register */
+ val |= 1; /* enable the transmit interrupt */
+ outl(val, card->io_port + CSR7);
+}
+
+
+/*
+enable_receive_interrupt enables the receive interrupt
+
+must be called with the lock held and interrupts disabled.
+*/
+static void enable_receive_interrupt(struct xircom_private *card)
+{
+ unsigned int val;
+
+ val = inl(card->io_port + CSR7); /* Interrupt enable register */
+ val = val | (1 << 6); /* enable the receive interrupt */
+ outl(val, card->io_port + CSR7);
+}
+
+/*
+enable_link_interrupt enables the link status change interrupt
+
+must be called with the lock held and interrupts disabled.
+*/
+static void enable_link_interrupt(struct xircom_private *card)
+{
+ unsigned int val;
+
+ val = inl(card->io_port + CSR7); /* Interrupt enable register */
+ val = val | (1 << 27); /* enable the link status chage interrupt */
+ outl(val, card->io_port + CSR7);
+}
+
+
+
+/*
+disable_all_interrupts disables all interrupts
+
+must be called with the lock held and interrupts disabled.
+*/
+static void disable_all_interrupts(struct xircom_private *card)
+{
+ unsigned int val;
+
+ val = 0; /* disable all interrupts */
+ outl(val, card->io_port + CSR7);
+}
+
+/*
+enable_common_interrupts enables several weird interrupts
+
+must be called with the lock held and interrupts disabled.
+*/
+static void enable_common_interrupts(struct xircom_private *card)
+{
+ unsigned int val;
+
+ val = inl(card->io_port + CSR7); /* Interrupt enable register */
+ val |= (1<<16); /* Normal Interrupt Summary */
+ val |= (1<<15); /* Abnormal Interrupt Summary */
+ val |= (1<<13); /* Fatal bus error */
+ val |= (1<<8); /* Receive Process Stopped */
+ val |= (1<<7); /* Receive Buffer Unavailable */
+ val |= (1<<5); /* Transmit Underflow */
+ val |= (1<<2); /* Transmit Buffer Unavailable */
+ val |= (1<<1); /* Transmit Process Stopped */
+ outl(val, card->io_port + CSR7);
+}
+
+/*
+enable_promisc starts promisc mode
+
+must be called with the lock held and interrupts disabled.
+*/
+static int enable_promisc(struct xircom_private *card)
+{
+ unsigned int val;
+
+ val = inl(card->io_port + CSR6);
+ val = val | (1 << 6);
+ outl(val, card->io_port + CSR6);
+
+ return 1;
+}
+
+
+
+
+/*
+link_status() checks the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what.
+
+Must be called in locked state with interrupts disabled
+*/
+static int link_status(struct xircom_private *card)
+{
+ unsigned int val;
+
+ val = inb(card->io_port + CSR12);
+
+ if (!(val&(1<<2))) /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */
+ return 10;
+ if (!(val&(1<<1))) /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */
+ return 100;
+
+ /* If we get here -> no link at all */
+
+ return 0;
+}
+
+
+
+
+
+/*
+ read_mac_address() reads the MAC address from the NIC and stores it in the "dev" structure.
+
+ This function will take the spinlock itself and can, as a result, not be called with the lock helt.
+ */
+static void read_mac_address(struct xircom_private *card)
+{
+ unsigned char j, tuple, link, data_id, data_count;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&card->lock, flags);
+
+ outl(1 << 12, card->io_port + CSR9); /* enable boot rom access */
+ for (i = 0x100; i < 0x1f7; i += link + 2) {
+ outl(i, card->io_port + CSR10);
+ tuple = inl(card->io_port + CSR9) & 0xff;
+ outl(i + 1, card->io_port + CSR10);
+ link = inl(card->io_port + CSR9) & 0xff;
+ outl(i + 2, card->io_port + CSR10);
+ data_id = inl(card->io_port + CSR9) & 0xff;
+ outl(i + 3, card->io_port + CSR10);
+ data_count = inl(card->io_port + CSR9) & 0xff;
+ if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) {
+ /*
+ * This is it. We have the data we want.
+ */
+ for (j = 0; j < 6; j++) {
+ outl(i + j + 4, card->io_port + CSR10);
+ card->dev->dev_addr[j] = inl(card->io_port + CSR9) & 0xff;
+ }
+ break;
+ } else if (link == 0) {
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&card->lock, flags);
+ pr_debug(" %pM\n", card->dev->dev_addr);
+}
+
+
+/*
+ transceiver_voodoo() enables the external UTP plug thingy.
+ it's called voodoo as I stole this code and cannot cross-reference
+ it with the specification.
+ */
+static void transceiver_voodoo(struct xircom_private *card)
+{
+ unsigned long flags;
+
+ /* disable all powermanagement */
+ pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000);
+
+ setup_descriptors(card);
+
+ spin_lock_irqsave(&card->lock, flags);
+
+ outl(0x0008, card->io_port + CSR15);
+ udelay(25);
+ outl(0xa8050000, card->io_port + CSR15);
+ udelay(25);
+ outl(0xa00f0000, card->io_port + CSR15);
+ udelay(25);
+
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ netif_start_queue(card->dev);
+}
+
+
+static void xircom_up(struct xircom_private *card)
+{
+ unsigned long flags;
+ int i;
+
+ /* disable all powermanagement */
+ pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000);
+
+ setup_descriptors(card);
+
+ spin_lock_irqsave(&card->lock, flags);
+
+
+ enable_link_interrupt(card);
+ enable_transmit_interrupt(card);
+ enable_receive_interrupt(card);
+ enable_common_interrupts(card);
+ enable_promisc(card);
+
+ /* The card can have received packets already, read them away now */
+ for (i=0;i<NUMDESCRIPTORS;i++)
+ investigate_read_descriptor(card->dev,card,i,bufferoffsets[i]);
+
+
+ spin_unlock_irqrestore(&card->lock, flags);
+ trigger_receive(card);
+ trigger_transmit(card);
+ netif_start_queue(card->dev);
+}
+
+/* Bufferoffset is in BYTES */
+static void
+investigate_read_descriptor(struct net_device *dev, struct xircom_private *card,
+ int descnr, unsigned int bufferoffset)
+{
+ int status;
+
+ status = le32_to_cpu(card->rx_buffer[4*descnr]);
+
+ if (status > 0) { /* packet received */
+
+ /* TODO: discard error packets */
+
+ short pkt_len = ((status >> 16) & 0x7ff) - 4;
+ /* minus 4, we don't want the CRC */
+ struct sk_buff *skb;
+
+ if (pkt_len > 1518) {
+ netdev_err(dev, "Packet length %i is bogus\n", pkt_len);
+ pkt_len = 1518;
+ }
+
+ skb = dev_alloc_skb(pkt_len + 2);
+ if (skb == NULL) {
+ dev->stats.rx_dropped++;
+ goto out;
+ }
+ skb_reserve(skb, 2);
+ skb_copy_to_linear_data(skb,
+ &card->rx_buffer[bufferoffset / 4],
+ pkt_len);
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+
+out:
+ /* give the buffer back to the card */
+ card->rx_buffer[4*descnr] = cpu_to_le32(0x80000000);
+ trigger_receive(card);
+ }
+}
+
+
+/* Bufferoffset is in BYTES */
+static void
+investigate_write_descriptor(struct net_device *dev,
+ struct xircom_private *card,
+ int descnr, unsigned int bufferoffset)
+{
+ int status;
+
+ status = le32_to_cpu(card->tx_buffer[4*descnr]);
+#if 0
+ if (status & 0x8000) { /* Major error */
+ pr_err("Major transmit error status %x\n", status);
+ card->tx_buffer[4*descnr] = 0;
+ netif_wake_queue (dev);
+ }
+#endif
+ if (status > 0) { /* bit 31 is 0 when done */
+ if (card->tx_skb[descnr]!=NULL) {
+ dev->stats.tx_bytes += card->tx_skb[descnr]->len;
+ dev_kfree_skb_irq(card->tx_skb[descnr]);
+ }
+ card->tx_skb[descnr] = NULL;
+ /* Bit 8 in the status field is 1 if there was a collision */
+ if (status & (1 << 8))
+ dev->stats.collisions++;
+ card->tx_buffer[4*descnr] = 0; /* descriptor is free again */
+ netif_wake_queue (dev);
+ dev->stats.tx_packets++;
+ }
+}
+
+static int __init xircom_init(void)
+{
+ return pci_register_driver(&xircom_ops);
+}
+
+static void __exit xircom_exit(void)
+{
+ pci_unregister_driver(&xircom_ops);
+}
+
+module_init(xircom_init)
+module_exit(xircom_exit)
+
diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig
new file mode 100644
index 000000000000..b5afe218c31b
--- /dev/null
+++ b/drivers/net/ethernet/dlink/Kconfig
@@ -0,0 +1,86 @@
+#
+# D-Link device configuration
+#
+
+config NET_VENDOR_DLINK
+ bool "D-Link devices"
+ default y
+ depends on PCI || PARPORT
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about D-Link devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_DLINK
+
+config DE600
+ tristate "D-Link DE600 pocket adapter support"
+ depends on PARPORT
+ ---help---
+ This is a network (Ethernet) device which attaches to your parallel
+ port. Read <file:Documentation/networking/DLINK.txt> as well as the
+ Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>, if you want to use
+ this. It is possible to have several devices share a single parallel
+ port and it is safe to compile the corresponding drivers into the
+ kernel.
+
+ To compile this driver as a module, choose M here: the module
+ will be called de600.
+
+config DE620
+ tristate "D-Link DE620 pocket adapter support"
+ depends on PARPORT
+ ---help---
+ This is a network (Ethernet) device which attaches to your parallel
+ port. Read <file:Documentation/networking/DLINK.txt> as well as the
+ Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>, if you want to use
+ this. It is possible to have several devices share a single parallel
+ port and it is safe to compile the corresponding drivers into the
+ kernel.
+
+ To compile this driver as a module, choose M here: the module
+ will be called de620.
+
+config DL2K
+ tristate "DL2000/TC902x-based Gigabit Ethernet support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This driver supports DL2000/TC902x-based Gigabit ethernet cards,
+ which includes
+ D-Link DGE-550T Gigabit Ethernet Adapter.
+ D-Link DL2000-based Gigabit Ethernet Adapter.
+ Sundance/Tamarack TC902x Gigabit Ethernet Adapter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called dl2k.
+
+config SUNDANCE
+ tristate "Sundance Alta support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This driver is for the Sundance "Alta" chip.
+ More specific information and updates are available from
+ <http://www.scyld.com/network/sundance.html>.
+
+config SUNDANCE_MMIO
+ bool "Use MMIO instead of PIO"
+ depends on SUNDANCE
+ ---help---
+ Enable memory-mapped I/O for interaction with Sundance NIC registers.
+ Do NOT enable this by default, PIO (enabled when MMIO is disabled)
+ is known to solve bugs on certain chips.
+
+ If unsure, say N.
+
+endif # NET_VENDOR_DLINK
diff --git a/drivers/net/ethernet/dlink/Makefile b/drivers/net/ethernet/dlink/Makefile
new file mode 100644
index 000000000000..c705eaa4f5b2
--- /dev/null
+++ b/drivers/net/ethernet/dlink/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the D-Link network device drivers.
+#
+
+obj-$(CONFIG_DE600) += de600.o
+obj-$(CONFIG_DE620) += de620.o
+obj-$(CONFIG_DL2K) += dl2k.o
+obj-$(CONFIG_SUNDANCE) += sundance.o
diff --git a/drivers/net/ethernet/dlink/de600.c b/drivers/net/ethernet/dlink/de600.c
new file mode 100644
index 000000000000..23a65398d011
--- /dev/null
+++ b/drivers/net/ethernet/dlink/de600.c
@@ -0,0 +1,530 @@
+static const char version[] = "de600.c: $Revision: 1.41-2.5 $, Bjorn Ekwall (bj0rn@blox.se)\n";
+/*
+ * de600.c
+ *
+ * Linux driver for the D-Link DE-600 Ethernet pocket adapter.
+ *
+ * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall
+ * The Author may be reached as bj0rn@blox.se
+ *
+ * Based on adapter information gathered from DE600.ASM by D-Link Inc.,
+ * as included on disk C in the v.2.11 of PC/TCP from FTP Software.
+ * For DE600.asm:
+ * Portions (C) Copyright 1990 D-Link, Inc.
+ * Copyright, 1988-1992, Russell Nelson, Crynwr Software
+ *
+ * Adapted to the sample network driver core for linux,
+ * written by: Donald Becker <becker@super.org>
+ * (Now at <becker@scyld.com>)
+ *
+ **************************************************************/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ **************************************************************/
+
+/* Add more time here if your adapter won't work OK: */
+#define DE600_SLOW_DOWN udelay(delay_time)
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <asm/system.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/io.h>
+
+#include "de600.h"
+
+static unsigned int check_lost = 1;
+module_param(check_lost, bool, 0);
+MODULE_PARM_DESC(check_lost, "If set then check for unplugged de600");
+
+static unsigned int delay_time = 10;
+module_param(delay_time, int, 0);
+MODULE_PARM_DESC(delay_time, "DE-600 deley on I/O in microseconds");
+
+
+/*
+ * D-Link driver variables:
+ */
+
+static volatile int rx_page;
+
+#define TX_PAGES 2
+static volatile int tx_fifo[TX_PAGES];
+static volatile int tx_fifo_in;
+static volatile int tx_fifo_out;
+static volatile int free_tx_pages = TX_PAGES;
+static int was_down;
+static DEFINE_SPINLOCK(de600_lock);
+
+static inline u8 de600_read_status(struct net_device *dev)
+{
+ u8 status;
+
+ outb_p(STATUS, DATA_PORT);
+ status = inb(STATUS_PORT);
+ outb_p(NULL_COMMAND | HI_NIBBLE, DATA_PORT);
+
+ return status;
+}
+
+static inline u8 de600_read_byte(unsigned char type, struct net_device *dev)
+{
+ /* dev used by macros */
+ u8 lo;
+ outb_p((type), DATA_PORT);
+ lo = ((unsigned char)inb(STATUS_PORT)) >> 4;
+ outb_p((type) | HI_NIBBLE, DATA_PORT);
+ return ((unsigned char)inb(STATUS_PORT) & (unsigned char)0xf0) | lo;
+}
+
+/*
+ * Open/initialize the board. This is called (in the current kernel)
+ * after booting when 'ifconfig <dev->name> $IP_ADDR' is run (in rc.inet1).
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is a non-reboot way to recover if something goes wrong.
+ */
+
+static int de600_open(struct net_device *dev)
+{
+ unsigned long flags;
+ int ret = request_irq(DE600_IRQ, de600_interrupt, 0, dev->name, dev);
+ if (ret) {
+ printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, DE600_IRQ);
+ return ret;
+ }
+ spin_lock_irqsave(&de600_lock, flags);
+ ret = adapter_init(dev);
+ spin_unlock_irqrestore(&de600_lock, flags);
+ return ret;
+}
+
+/*
+ * The inverse routine to de600_open().
+ */
+
+static int de600_close(struct net_device *dev)
+{
+ select_nic();
+ rx_page = 0;
+ de600_put_command(RESET);
+ de600_put_command(STOP_RESET);
+ de600_put_command(0);
+ select_prn();
+ free_irq(DE600_IRQ, dev);
+ return 0;
+}
+
+static inline void trigger_interrupt(struct net_device *dev)
+{
+ de600_put_command(FLIP_IRQ);
+ select_prn();
+ DE600_SLOW_DOWN;
+ select_nic();
+ de600_put_command(0);
+}
+
+/*
+ * Copy a buffer to the adapter transmit page memory.
+ * Start sending.
+ */
+
+static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned long flags;
+ int transmit_from;
+ int len;
+ int tickssofar;
+ u8 *buffer = skb->data;
+ int i;
+
+ if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */
+ tickssofar = jiffies - dev_trans_start(dev);
+ if (tickssofar < HZ/20)
+ return NETDEV_TX_BUSY;
+ /* else */
+ printk(KERN_WARNING "%s: transmit timed out (%d), %s?\n", dev->name, tickssofar, "network cable problem");
+ /* Restart the adapter. */
+ spin_lock_irqsave(&de600_lock, flags);
+ if (adapter_init(dev)) {
+ spin_unlock_irqrestore(&de600_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+ spin_unlock_irqrestore(&de600_lock, flags);
+ }
+
+ /* Start real output */
+ pr_debug("de600_start_xmit:len=%d, page %d/%d\n", skb->len, tx_fifo_in, free_tx_pages);
+
+ if ((len = skb->len) < RUNT)
+ len = RUNT;
+
+ spin_lock_irqsave(&de600_lock, flags);
+ select_nic();
+ tx_fifo[tx_fifo_in] = transmit_from = tx_page_adr(tx_fifo_in) - len;
+ tx_fifo_in = (tx_fifo_in + 1) % TX_PAGES; /* Next free tx page */
+
+ if(check_lost)
+ {
+ /* This costs about 40 instructions per packet... */
+ de600_setup_address(NODE_ADDRESS, RW_ADDR);
+ de600_read_byte(READ_DATA, dev);
+ if (was_down || (de600_read_byte(READ_DATA, dev) != 0xde)) {
+ if (adapter_init(dev)) {
+ spin_unlock_irqrestore(&de600_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+ }
+ }
+
+ de600_setup_address(transmit_from, RW_ADDR);
+ for (i = 0; i < skb->len ; ++i, ++buffer)
+ de600_put_byte(*buffer);
+ for (; i < len; ++i)
+ de600_put_byte(0);
+
+ if (free_tx_pages-- == TX_PAGES) { /* No transmission going on */
+ dev->trans_start = jiffies;
+ netif_start_queue(dev); /* allow more packets into adapter */
+ /* Send page and generate a faked interrupt */
+ de600_setup_address(transmit_from, TX_ADDR);
+ de600_put_command(TX_ENABLE);
+ }
+ else {
+ if (free_tx_pages)
+ netif_start_queue(dev);
+ else
+ netif_stop_queue(dev);
+ select_prn();
+ }
+ spin_unlock_irqrestore(&de600_lock, flags);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+/*
+ * The typical workload of the driver:
+ * Handle the network interface interrupts.
+ */
+
+static irqreturn_t de600_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ u8 irq_status;
+ int retrig = 0;
+ int boguscount = 0;
+
+ spin_lock(&de600_lock);
+
+ select_nic();
+ irq_status = de600_read_status(dev);
+
+ do {
+ pr_debug("de600_interrupt (%02X)\n", irq_status);
+
+ if (irq_status & RX_GOOD)
+ de600_rx_intr(dev);
+ else if (!(irq_status & RX_BUSY))
+ de600_put_command(RX_ENABLE);
+
+ /* Any transmission in progress? */
+ if (free_tx_pages < TX_PAGES)
+ retrig = de600_tx_intr(dev, irq_status);
+ else
+ retrig = 0;
+
+ irq_status = de600_read_status(dev);
+ } while ( (irq_status & RX_GOOD) || ((++boguscount < 100) && retrig) );
+ /*
+ * Yeah, it _looks_ like busy waiting, smells like busy waiting
+ * and I know it's not PC, but please, it will only occur once
+ * in a while and then only for a loop or so (< 1ms for sure!)
+ */
+
+ /* Enable adapter interrupts */
+ select_prn();
+ if (retrig)
+ trigger_interrupt(dev);
+ spin_unlock(&de600_lock);
+ return IRQ_HANDLED;
+}
+
+static int de600_tx_intr(struct net_device *dev, int irq_status)
+{
+ /*
+ * Returns 1 if tx still not done
+ */
+
+ /* Check if current transmission is done yet */
+ if (irq_status & TX_BUSY)
+ return 1; /* tx not done, try again */
+
+ /* else */
+ /* If last transmission OK then bump fifo index */
+ if (!(irq_status & TX_FAILED16)) {
+ tx_fifo_out = (tx_fifo_out + 1) % TX_PAGES;
+ ++free_tx_pages;
+ dev->stats.tx_packets++;
+ netif_wake_queue(dev);
+ }
+
+ /* More to send, or resend last packet? */
+ if ((free_tx_pages < TX_PAGES) || (irq_status & TX_FAILED16)) {
+ dev->trans_start = jiffies;
+ de600_setup_address(tx_fifo[tx_fifo_out], TX_ADDR);
+ de600_put_command(TX_ENABLE);
+ return 1;
+ }
+ /* else */
+
+ return 0;
+}
+
+/*
+ * We have a good packet, get it out of the adapter.
+ */
+static void de600_rx_intr(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ int i;
+ int read_from;
+ int size;
+ unsigned char *buffer;
+
+ /* Get size of received packet */
+ size = de600_read_byte(RX_LEN, dev); /* low byte */
+ size += (de600_read_byte(RX_LEN, dev) << 8); /* high byte */
+ size -= 4; /* Ignore trailing 4 CRC-bytes */
+
+ /* Tell adapter where to store next incoming packet, enable receiver */
+ read_from = rx_page_adr();
+ next_rx_page();
+ de600_put_command(RX_ENABLE);
+
+ if ((size < 32) || (size > 1535)) {
+ printk(KERN_WARNING "%s: Bogus packet size %d.\n", dev->name, size);
+ if (size > 10000)
+ adapter_init(dev);
+ return;
+ }
+
+ skb = dev_alloc_skb(size+2);
+ if (skb == NULL) {
+ printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size);
+ return;
+ }
+ /* else */
+
+ skb_reserve(skb,2); /* Align */
+
+ /* 'skb->data' points to the start of sk_buff data area. */
+ buffer = skb_put(skb,size);
+
+ /* copy the packet into the buffer */
+ de600_setup_address(read_from, RW_ADDR);
+ for (i = size; i > 0; --i, ++buffer)
+ *buffer = de600_read_byte(READ_DATA, dev);
+
+ skb->protocol=eth_type_trans(skb,dev);
+
+ netif_rx(skb);
+
+ /* update stats */
+ dev->stats.rx_packets++; /* count all receives */
+ dev->stats.rx_bytes += size; /* count all received bytes */
+
+ /*
+ * If any worth-while packets have been received, netif_rx()
+ * will work on them when we get to the tasklets.
+ */
+}
+
+static const struct net_device_ops de600_netdev_ops = {
+ .ndo_open = de600_open,
+ .ndo_stop = de600_close,
+ .ndo_start_xmit = de600_start_xmit,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+
+static struct net_device * __init de600_probe(void)
+{
+ int i;
+ struct net_device *dev;
+ int err;
+
+ dev = alloc_etherdev(0);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+
+ if (!request_region(DE600_IO, 3, "de600")) {
+ printk(KERN_WARNING "DE600: port 0x%x busy\n", DE600_IO);
+ err = -EBUSY;
+ goto out;
+ }
+
+ printk(KERN_INFO "%s: D-Link DE-600 pocket adapter", dev->name);
+ /* Alpha testers must have the version number to report bugs. */
+ pr_debug("%s", version);
+
+ /* probe for adapter */
+ err = -ENODEV;
+ rx_page = 0;
+ select_nic();
+ (void)de600_read_status(dev);
+ de600_put_command(RESET);
+ de600_put_command(STOP_RESET);
+ if (de600_read_status(dev) & 0xf0) {
+ printk(": not at I/O %#3x.\n", DATA_PORT);
+ goto out1;
+ }
+
+ /*
+ * Maybe we found one,
+ * have to check if it is a D-Link DE-600 adapter...
+ */
+
+ /* Get the adapter ethernet address from the ROM */
+ de600_setup_address(NODE_ADDRESS, RW_ADDR);
+ for (i = 0; i < ETH_ALEN; i++) {
+ dev->dev_addr[i] = de600_read_byte(READ_DATA, dev);
+ dev->broadcast[i] = 0xff;
+ }
+
+ /* Check magic code */
+ if ((dev->dev_addr[1] == 0xde) && (dev->dev_addr[2] == 0x15)) {
+ /* OK, install real address */
+ dev->dev_addr[0] = 0x00;
+ dev->dev_addr[1] = 0x80;
+ dev->dev_addr[2] = 0xc8;
+ dev->dev_addr[3] &= 0x0f;
+ dev->dev_addr[3] |= 0x70;
+ } else {
+ printk(" not identified in the printer port\n");
+ goto out1;
+ }
+
+ printk(", Ethernet Address: %pM\n", dev->dev_addr);
+
+ dev->netdev_ops = &de600_netdev_ops;
+
+ dev->flags&=~IFF_MULTICAST;
+
+ select_prn();
+
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+
+ return dev;
+
+out1:
+ release_region(DE600_IO, 3);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static int adapter_init(struct net_device *dev)
+{
+ int i;
+
+ select_nic();
+ rx_page = 0; /* used by RESET */
+ de600_put_command(RESET);
+ de600_put_command(STOP_RESET);
+
+ /* Check if it is still there... */
+ /* Get the some bytes of the adapter ethernet address from the ROM */
+ de600_setup_address(NODE_ADDRESS, RW_ADDR);
+ de600_read_byte(READ_DATA, dev);
+ if ((de600_read_byte(READ_DATA, dev) != 0xde) ||
+ (de600_read_byte(READ_DATA, dev) != 0x15)) {
+ /* was: if (de600_read_status(dev) & 0xf0) { */
+ printk("Something has happened to the DE-600! Please check it and do a new ifconfig!\n");
+ /* Goodbye, cruel world... */
+ dev->flags &= ~IFF_UP;
+ de600_close(dev);
+ was_down = 1;
+ netif_stop_queue(dev); /* Transmit busy... */
+ return 1; /* failed */
+ }
+
+ if (was_down) {
+ printk(KERN_INFO "%s: Thanks, I feel much better now!\n", dev->name);
+ was_down = 0;
+ }
+
+ tx_fifo_in = 0;
+ tx_fifo_out = 0;
+ free_tx_pages = TX_PAGES;
+
+
+ /* set the ether address. */
+ de600_setup_address(NODE_ADDRESS, RW_ADDR);
+ for (i = 0; i < ETH_ALEN; i++)
+ de600_put_byte(dev->dev_addr[i]);
+
+ /* where to start saving incoming packets */
+ rx_page = RX_BP | RX_BASE_PAGE;
+ de600_setup_address(MEM_4K, RW_ADDR);
+ /* Enable receiver */
+ de600_put_command(RX_ENABLE);
+ select_prn();
+
+ netif_start_queue(dev);
+
+ return 0; /* OK */
+}
+
+static struct net_device *de600_dev;
+
+static int __init de600_init(void)
+{
+ de600_dev = de600_probe();
+ if (IS_ERR(de600_dev))
+ return PTR_ERR(de600_dev);
+ return 0;
+}
+
+static void __exit de600_exit(void)
+{
+ unregister_netdev(de600_dev);
+ release_region(DE600_IO, 3);
+ free_netdev(de600_dev);
+}
+
+module_init(de600_init);
+module_exit(de600_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/dlink/de600.h b/drivers/net/ethernet/dlink/de600.h
new file mode 100644
index 000000000000..e80ecbabcf4e
--- /dev/null
+++ b/drivers/net/ethernet/dlink/de600.h
@@ -0,0 +1,168 @@
+/**************************************************
+ * *
+ * Definition of D-Link Ethernet Pocket adapter *
+ * *
+ **************************************************/
+/*
+ * D-Link Ethernet pocket adapter ports
+ */
+/*
+ * OK, so I'm cheating, but there are an awful lot of
+ * reads and writes in order to get anything in and out
+ * of the DE-600 with 4 bits at a time in the parallel port,
+ * so every saved instruction really helps :-)
+ */
+
+#ifndef DE600_IO
+#define DE600_IO 0x378
+#endif
+
+#define DATA_PORT (DE600_IO)
+#define STATUS_PORT (DE600_IO + 1)
+#define COMMAND_PORT (DE600_IO + 2)
+
+#ifndef DE600_IRQ
+#define DE600_IRQ 7
+#endif
+/*
+ * It really should look like this, and autoprobing as well...
+ *
+#define DATA_PORT (dev->base_addr + 0)
+#define STATUS_PORT (dev->base_addr + 1)
+#define COMMAND_PORT (dev->base_addr + 2)
+#define DE600_IRQ dev->irq
+ */
+
+/*
+ * D-Link COMMAND_PORT commands
+ */
+#define SELECT_NIC 0x04 /* select Network Interface Card */
+#define SELECT_PRN 0x1c /* select Printer */
+#define NML_PRN 0xec /* normal Printer situation */
+#define IRQEN 0x10 /* enable IRQ line */
+
+/*
+ * D-Link STATUS_PORT
+ */
+#define RX_BUSY 0x80
+#define RX_GOOD 0x40
+#define TX_FAILED16 0x10
+#define TX_BUSY 0x08
+
+/*
+ * D-Link DATA_PORT commands
+ * command in low 4 bits
+ * data in high 4 bits
+ * select current data nibble with HI_NIBBLE bit
+ */
+#define WRITE_DATA 0x00 /* write memory */
+#define READ_DATA 0x01 /* read memory */
+#define STATUS 0x02 /* read status register */
+#define COMMAND 0x03 /* write command register (see COMMAND below) */
+#define NULL_COMMAND 0x04 /* null command */
+#define RX_LEN 0x05 /* read received packet length */
+#define TX_ADDR 0x06 /* set adapter transmit memory address */
+#define RW_ADDR 0x07 /* set adapter read/write memory address */
+#define HI_NIBBLE 0x08 /* read/write the high nibble of data,
+ or-ed with rest of command */
+
+/*
+ * command register, accessed through DATA_PORT with low bits = COMMAND
+ */
+#define RX_ALL 0x01 /* PROMISCUOUS */
+#define RX_BP 0x02 /* default: BROADCAST & PHYSICAL ADDRESS */
+#define RX_MBP 0x03 /* MULTICAST, BROADCAST & PHYSICAL ADDRESS */
+
+#define TX_ENABLE 0x04 /* bit 2 */
+#define RX_ENABLE 0x08 /* bit 3 */
+
+#define RESET 0x80 /* set bit 7 high */
+#define STOP_RESET 0x00 /* set bit 7 low */
+
+/*
+ * data to command register
+ * (high 4 bits in write to DATA_PORT)
+ */
+#define RX_PAGE2_SELECT 0x10 /* bit 4, only 2 pages to select */
+#define RX_BASE_PAGE 0x20 /* bit 5, always set when specifying RX_ADDR */
+#define FLIP_IRQ 0x40 /* bit 6 */
+
+/*
+ * D-Link adapter internal memory:
+ *
+ * 0-2K 1:st transmit page (send from pointer up to 2K)
+ * 2-4K 2:nd transmit page (send from pointer up to 4K)
+ *
+ * 4-6K 1:st receive page (data from 4K upwards)
+ * 6-8K 2:nd receive page (data from 6K upwards)
+ *
+ * 8K+ Adapter ROM (contains magic code and last 3 bytes of Ethernet address)
+ */
+#define MEM_2K 0x0800 /* 2048 */
+#define MEM_4K 0x1000 /* 4096 */
+#define MEM_6K 0x1800 /* 6144 */
+#define NODE_ADDRESS 0x2000 /* 8192 */
+
+#define RUNT 60 /* Too small Ethernet packet */
+
+/**************************************************
+ * *
+ * End of definition *
+ * *
+ **************************************************/
+
+/*
+ * Index to functions, as function prototypes.
+ */
+/* Routines used internally. (See "convenience macros") */
+static u8 de600_read_status(struct net_device *dev);
+static u8 de600_read_byte(unsigned char type, struct net_device *dev);
+
+/* Put in the device structure. */
+static int de600_open(struct net_device *dev);
+static int de600_close(struct net_device *dev);
+static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev);
+
+/* Dispatch from interrupts. */
+static irqreturn_t de600_interrupt(int irq, void *dev_id);
+static int de600_tx_intr(struct net_device *dev, int irq_status);
+static void de600_rx_intr(struct net_device *dev);
+
+/* Initialization */
+static void trigger_interrupt(struct net_device *dev);
+static int adapter_init(struct net_device *dev);
+
+/*
+ * Convenience macros/functions for D-Link adapter
+ */
+
+#define select_prn() outb_p(SELECT_PRN, COMMAND_PORT); DE600_SLOW_DOWN
+#define select_nic() outb_p(SELECT_NIC, COMMAND_PORT); DE600_SLOW_DOWN
+
+/* Thanks for hints from Mark Burton <markb@ordern.demon.co.uk> */
+#define de600_put_byte(data) ( \
+ outb_p(((data) << 4) | WRITE_DATA , DATA_PORT), \
+ outb_p(((data) & 0xf0) | WRITE_DATA | HI_NIBBLE, DATA_PORT))
+
+/*
+ * The first two outb_p()'s below could perhaps be deleted if there
+ * would be more delay in the last two. Not certain about it yet...
+ */
+#define de600_put_command(cmd) ( \
+ outb_p(( rx_page << 4) | COMMAND , DATA_PORT), \
+ outb_p(( rx_page & 0xf0) | COMMAND | HI_NIBBLE, DATA_PORT), \
+ outb_p(((rx_page | cmd) << 4) | COMMAND , DATA_PORT), \
+ outb_p(((rx_page | cmd) & 0xf0) | COMMAND | HI_NIBBLE, DATA_PORT))
+
+#define de600_setup_address(addr,type) ( \
+ outb_p((((addr) << 4) & 0xf0) | type , DATA_PORT), \
+ outb_p(( (addr) & 0xf0) | type | HI_NIBBLE, DATA_PORT), \
+ outb_p((((addr) >> 4) & 0xf0) | type , DATA_PORT), \
+ outb_p((((addr) >> 8) & 0xf0) | type | HI_NIBBLE, DATA_PORT))
+
+#define rx_page_adr() ((rx_page & RX_PAGE2_SELECT)?(MEM_6K):(MEM_4K))
+
+/* Flip bit, only 2 pages */
+#define next_rx_page() (rx_page ^= RX_PAGE2_SELECT)
+
+#define tx_page_adr(a) (((a) + 1) * MEM_2K)
diff --git a/drivers/net/ethernet/dlink/de620.c b/drivers/net/ethernet/dlink/de620.c
new file mode 100644
index 000000000000..3b934ab784d3
--- /dev/null
+++ b/drivers/net/ethernet/dlink/de620.c
@@ -0,0 +1,988 @@
+/*
+ * de620.c $Revision: 1.40 $ BETA
+ *
+ *
+ * Linux driver for the D-Link DE-620 Ethernet pocket adapter.
+ *
+ * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall <bj0rn@blox.se>
+ *
+ * Based on adapter information gathered from DOS packetdriver
+ * sources from D-Link Inc: (Special thanks to Henry Ngai of D-Link.)
+ * Portions (C) Copyright D-Link SYSTEM Inc. 1991, 1992
+ * Copyright, 1988, Russell Nelson, Crynwr Software
+ *
+ * Adapted to the sample network driver core for linux,
+ * written by: Donald Becker <becker@super.org>
+ * (Now at <becker@scyld.com>)
+ *
+ * Valuable assistance from:
+ * J. Joshua Kopper <kopper@rtsg.mot.com>
+ * Olav Kvittem <Olav.Kvittem@uninett.no>
+ * Germano Caronni <caronni@nessie.cs.id.ethz.ch>
+ * Jeremy Fitzhardinge <jeremy@suite.sw.oz.au>
+ *
+ *****************************************************************************/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+static const char version[] =
+ "de620.c: $Revision: 1.40 $, Bjorn Ekwall <bj0rn@blox.se>\n";
+
+/***********************************************************************
+ *
+ * "Tuning" section.
+ *
+ * Compile-time options: (see below for descriptions)
+ * -DDE620_IO=0x378 (lpt1)
+ * -DDE620_IRQ=7 (lpt1)
+ * -DSHUTDOWN_WHEN_LOST
+ * -DCOUNT_LOOPS
+ * -DLOWSPEED
+ * -DREAD_DELAY
+ * -DWRITE_DELAY
+ */
+
+/*
+ * This driver assumes that the printer port is a "normal",
+ * dumb, uni-directional port!
+ * If your port is "fancy" in any way, please try to set it to "normal"
+ * with your BIOS setup. I have no access to machines with bi-directional
+ * ports, so I can't test such a driver :-(
+ * (Yes, I _know_ it is possible to use DE620 with bidirectional ports...)
+ *
+ * There are some clones of DE620 out there, with different names.
+ * If the current driver does not recognize a clone, try to change
+ * the following #define to:
+ *
+ * #define DE620_CLONE 1
+ */
+#define DE620_CLONE 0
+
+/*
+ * If the adapter has problems with high speeds, enable this #define
+ * otherwise full printerport speed will be attempted.
+ *
+ * You can tune the READ_DELAY/WRITE_DELAY below if you enable LOWSPEED
+ *
+#define LOWSPEED
+ */
+
+#ifndef READ_DELAY
+#define READ_DELAY 100 /* adapter internal read delay in 100ns units */
+#endif
+
+#ifndef WRITE_DELAY
+#define WRITE_DELAY 100 /* adapter internal write delay in 100ns units */
+#endif
+
+/*
+ * Enable this #define if you want the adapter to do a "ifconfig down" on
+ * itself when we have detected that something is possibly wrong with it.
+ * The default behaviour is to retry with "adapter_init()" until success.
+ * This should be used for debugging purposes only.
+ *
+#define SHUTDOWN_WHEN_LOST
+ */
+
+#ifdef LOWSPEED
+/*
+ * Enable this #define if you want to see debugging output that show how long
+ * we have to wait before the DE-620 is ready for the next read/write/command.
+ *
+#define COUNT_LOOPS
+ */
+#endif
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+/* Constant definitions for the DE-620 registers, commands and bits */
+#include "de620.h"
+
+typedef unsigned char byte;
+
+/*******************************************************
+ * *
+ * Definition of D-Link DE-620 Ethernet Pocket adapter *
+ * See also "de620.h" *
+ * *
+ *******************************************************/
+#ifndef DE620_IO /* Compile-time configurable */
+#define DE620_IO 0x378
+#endif
+
+#ifndef DE620_IRQ /* Compile-time configurable */
+#define DE620_IRQ 7
+#endif
+
+#define DATA_PORT (dev->base_addr)
+#define STATUS_PORT (dev->base_addr + 1)
+#define COMMAND_PORT (dev->base_addr + 2)
+
+#define RUNT 60 /* Too small Ethernet packet */
+#define GIANT 1514 /* largest legal size packet, no fcs */
+
+/*
+ * Force media with insmod:
+ * insmod de620.o bnc=1
+ * or
+ * insmod de620.o utp=1
+ *
+ * Force io and/or irq with insmod:
+ * insmod de620.o io=0x378 irq=7
+ *
+ * Make a clone skip the Ethernet-address range check:
+ * insmod de620.o clone=1
+ */
+static int bnc;
+static int utp;
+static int io = DE620_IO;
+static int irq = DE620_IRQ;
+static int clone = DE620_CLONE;
+
+static spinlock_t de620_lock;
+
+module_param(bnc, int, 0);
+module_param(utp, int, 0);
+module_param(io, int, 0);
+module_param(irq, int, 0);
+module_param(clone, int, 0);
+MODULE_PARM_DESC(bnc, "DE-620 set BNC medium (0-1)");
+MODULE_PARM_DESC(utp, "DE-620 set UTP medium (0-1)");
+MODULE_PARM_DESC(io, "DE-620 I/O base address,required");
+MODULE_PARM_DESC(irq, "DE-620 IRQ number,required");
+MODULE_PARM_DESC(clone, "Check also for non-D-Link DE-620 clones (0-1)");
+
+/***********************************************
+ * *
+ * Index to functions, as function prototypes. *
+ * *
+ ***********************************************/
+
+/*
+ * Routines used internally. (See also "convenience macros.. below")
+ */
+
+/* Put in the device structure. */
+static int de620_open(struct net_device *);
+static int de620_close(struct net_device *);
+static void de620_set_multicast_list(struct net_device *);
+static int de620_start_xmit(struct sk_buff *, struct net_device *);
+
+/* Dispatch from interrupts. */
+static irqreturn_t de620_interrupt(int, void *);
+static int de620_rx_intr(struct net_device *);
+
+/* Initialization */
+static int adapter_init(struct net_device *);
+static int read_eeprom(struct net_device *);
+
+
+/*
+ * D-Link driver variables:
+ */
+#define SCR_DEF NIBBLEMODE |INTON | SLEEP | AUTOTX
+#define TCR_DEF RXPB /* not used: | TXSUCINT | T16INT */
+#define DE620_RX_START_PAGE 12 /* 12 pages (=3k) reserved for tx */
+#define DEF_NIC_CMD IRQEN | ICEN | DS1
+
+static volatile byte NIC_Cmd;
+static volatile byte next_rx_page;
+static byte first_rx_page;
+static byte last_rx_page;
+static byte EIPRegister;
+
+static struct nic {
+ byte NodeID[6];
+ byte RAM_Size;
+ byte Model;
+ byte Media;
+ byte SCR;
+} nic_data;
+
+/**********************************************************
+ * *
+ * Convenience macros/functions for D-Link DE-620 adapter *
+ * *
+ **********************************************************/
+#define de620_tx_buffs(dd) (inb(STATUS_PORT) & (TXBF0 | TXBF1))
+#define de620_flip_ds(dd) NIC_Cmd ^= DS0 | DS1; outb(NIC_Cmd, COMMAND_PORT);
+
+/* Check for ready-status, and return a nibble (high 4 bits) for data input */
+#ifdef COUNT_LOOPS
+static int tot_cnt;
+#endif
+static inline byte
+de620_ready(struct net_device *dev)
+{
+ byte value;
+ register short int cnt = 0;
+
+ while ((((value = inb(STATUS_PORT)) & READY) == 0) && (cnt <= 1000))
+ ++cnt;
+
+#ifdef COUNT_LOOPS
+ tot_cnt += cnt;
+#endif
+ return value & 0xf0; /* nibble */
+}
+
+static inline void
+de620_send_command(struct net_device *dev, byte cmd)
+{
+ de620_ready(dev);
+ if (cmd == W_DUMMY)
+ outb(NIC_Cmd, COMMAND_PORT);
+
+ outb(cmd, DATA_PORT);
+
+ outb(NIC_Cmd ^ CS0, COMMAND_PORT);
+ de620_ready(dev);
+ outb(NIC_Cmd, COMMAND_PORT);
+}
+
+static inline void
+de620_put_byte(struct net_device *dev, byte value)
+{
+ /* The de620_ready() makes 7 loops, on the average, on a DX2/66 */
+ de620_ready(dev);
+ outb(value, DATA_PORT);
+ de620_flip_ds(dev);
+}
+
+static inline byte
+de620_read_byte(struct net_device *dev)
+{
+ byte value;
+
+ /* The de620_ready() makes 7 loops, on the average, on a DX2/66 */
+ value = de620_ready(dev); /* High nibble */
+ de620_flip_ds(dev);
+ value |= de620_ready(dev) >> 4; /* Low nibble */
+ return value;
+}
+
+static inline void
+de620_write_block(struct net_device *dev, byte *buffer, int count, int pad)
+{
+#ifndef LOWSPEED
+ byte uflip = NIC_Cmd ^ (DS0 | DS1);
+ byte dflip = NIC_Cmd;
+#else /* LOWSPEED */
+#ifdef COUNT_LOOPS
+ int bytes = count;
+#endif /* COUNT_LOOPS */
+#endif /* LOWSPEED */
+
+#ifdef LOWSPEED
+#ifdef COUNT_LOOPS
+ tot_cnt = 0;
+#endif /* COUNT_LOOPS */
+ /* No further optimization useful, the limit is in the adapter. */
+ for ( ; count > 0; --count, ++buffer) {
+ de620_put_byte(dev,*buffer);
+ }
+ for ( count = pad ; count > 0; --count, ++buffer) {
+ de620_put_byte(dev, 0);
+ }
+ de620_send_command(dev,W_DUMMY);
+#ifdef COUNT_LOOPS
+ /* trial debug output: loops per byte in de620_ready() */
+ printk("WRITE(%d)\n", tot_cnt/((bytes?bytes:1)));
+#endif /* COUNT_LOOPS */
+#else /* not LOWSPEED */
+ for ( ; count > 0; count -=2) {
+ outb(*buffer++, DATA_PORT);
+ outb(uflip, COMMAND_PORT);
+ outb(*buffer++, DATA_PORT);
+ outb(dflip, COMMAND_PORT);
+ }
+ de620_send_command(dev,W_DUMMY);
+#endif /* LOWSPEED */
+}
+
+static inline void
+de620_read_block(struct net_device *dev, byte *data, int count)
+{
+#ifndef LOWSPEED
+ byte value;
+ byte uflip = NIC_Cmd ^ (DS0 | DS1);
+ byte dflip = NIC_Cmd;
+#else /* LOWSPEED */
+#ifdef COUNT_LOOPS
+ int bytes = count;
+
+ tot_cnt = 0;
+#endif /* COUNT_LOOPS */
+#endif /* LOWSPEED */
+
+#ifdef LOWSPEED
+ /* No further optimization useful, the limit is in the adapter. */
+ while (count-- > 0) {
+ *data++ = de620_read_byte(dev);
+ de620_flip_ds(dev);
+ }
+#ifdef COUNT_LOOPS
+ /* trial debug output: loops per byte in de620_ready() */
+ printk("READ(%d)\n", tot_cnt/(2*(bytes?bytes:1)));
+#endif /* COUNT_LOOPS */
+#else /* not LOWSPEED */
+ while (count-- > 0) {
+ value = inb(STATUS_PORT) & 0xf0; /* High nibble */
+ outb(uflip, COMMAND_PORT);
+ *data++ = value | inb(STATUS_PORT) >> 4; /* Low nibble */
+ outb(dflip , COMMAND_PORT);
+ }
+#endif /* LOWSPEED */
+}
+
+static inline void
+de620_set_delay(struct net_device *dev)
+{
+ de620_ready(dev);
+ outb(W_DFR, DATA_PORT);
+ outb(NIC_Cmd ^ CS0, COMMAND_PORT);
+
+ de620_ready(dev);
+#ifdef LOWSPEED
+ outb(WRITE_DELAY, DATA_PORT);
+#else
+ outb(0, DATA_PORT);
+#endif
+ de620_flip_ds(dev);
+
+ de620_ready(dev);
+#ifdef LOWSPEED
+ outb(READ_DELAY, DATA_PORT);
+#else
+ outb(0, DATA_PORT);
+#endif
+ de620_flip_ds(dev);
+}
+
+static inline void
+de620_set_register(struct net_device *dev, byte reg, byte value)
+{
+ de620_ready(dev);
+ outb(reg, DATA_PORT);
+ outb(NIC_Cmd ^ CS0, COMMAND_PORT);
+
+ de620_put_byte(dev, value);
+}
+
+static inline byte
+de620_get_register(struct net_device *dev, byte reg)
+{
+ byte value;
+
+ de620_send_command(dev,reg);
+ value = de620_read_byte(dev);
+ de620_send_command(dev,W_DUMMY);
+
+ return value;
+}
+
+/*********************************************************************
+ *
+ * Open/initialize the board.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is a non-reboot way to recover if something goes wrong.
+ *
+ */
+static int de620_open(struct net_device *dev)
+{
+ int ret = request_irq(dev->irq, de620_interrupt, 0, dev->name, dev);
+ if (ret) {
+ printk (KERN_ERR "%s: unable to get IRQ %d\n", dev->name, dev->irq);
+ return ret;
+ }
+
+ if (adapter_init(dev)) {
+ ret = -EIO;
+ goto out_free_irq;
+ }
+
+ netif_start_queue(dev);
+ return 0;
+
+out_free_irq:
+ free_irq(dev->irq, dev);
+ return ret;
+}
+
+/************************************************
+ *
+ * The inverse routine to de620_open().
+ *
+ */
+
+static int de620_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ /* disable recv */
+ de620_set_register(dev, W_TCR, RXOFF);
+ free_irq(dev->irq, dev);
+ return 0;
+}
+
+/*********************************************
+ *
+ * Set or clear the multicast filter for this adaptor.
+ * (no real multicast implemented for the DE-620, but she can be promiscuous...)
+ *
+ */
+
+static void de620_set_multicast_list(struct net_device *dev)
+{
+ if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ { /* Enable promiscuous mode */
+ de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL);
+ }
+ else
+ { /* Disable promiscuous mode, use normal mode */
+ de620_set_register(dev, W_TCR, TCR_DEF);
+ }
+}
+
+/*******************************************************
+ *
+ * Handle timeouts on transmit
+ */
+
+static void de620_timeout(struct net_device *dev)
+{
+ printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, "network cable problem");
+ /* Restart the adapter. */
+ if (!adapter_init(dev)) /* maybe close it */
+ netif_wake_queue(dev);
+}
+
+/*******************************************************
+ *
+ * Copy a buffer to the adapter transmit page memory.
+ * Start sending.
+ */
+static int de620_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned long flags;
+ int len;
+ byte *buffer = skb->data;
+ byte using_txbuf;
+
+ using_txbuf = de620_tx_buffs(dev); /* Peek at the adapter */
+
+ netif_stop_queue(dev);
+
+
+ if ((len = skb->len) < RUNT)
+ len = RUNT;
+ if (len & 1) /* send an even number of bytes */
+ ++len;
+
+ /* Start real output */
+
+ spin_lock_irqsave(&de620_lock, flags);
+ pr_debug("de620_start_xmit: len=%d, bufs 0x%02x\n",
+ (int)skb->len, using_txbuf);
+
+ /* select a free tx buffer. if there is one... */
+ switch (using_txbuf) {
+ default: /* both are free: use TXBF0 */
+ case TXBF1: /* use TXBF0 */
+ de620_send_command(dev,W_CR | RW0);
+ using_txbuf |= TXBF0;
+ break;
+
+ case TXBF0: /* use TXBF1 */
+ de620_send_command(dev,W_CR | RW1);
+ using_txbuf |= TXBF1;
+ break;
+
+ case (TXBF0 | TXBF1): /* NONE!!! */
+ printk(KERN_WARNING "%s: No tx-buffer available!\n", dev->name);
+ spin_unlock_irqrestore(&de620_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+ de620_write_block(dev, buffer, skb->len, len-skb->len);
+
+ if(!(using_txbuf == (TXBF0 | TXBF1)))
+ netif_wake_queue(dev);
+
+ dev->stats.tx_packets++;
+ spin_unlock_irqrestore(&de620_lock, flags);
+ dev_kfree_skb (skb);
+ return NETDEV_TX_OK;
+}
+
+/*****************************************************
+ *
+ * Handle the network interface interrupts.
+ *
+ */
+static irqreturn_t
+de620_interrupt(int irq_in, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ byte irq_status;
+ int bogus_count = 0;
+ int again = 0;
+
+ spin_lock(&de620_lock);
+
+ /* Read the status register (_not_ the status port) */
+ irq_status = de620_get_register(dev, R_STS);
+
+ pr_debug("de620_interrupt (%2.2X)\n", irq_status);
+
+ if (irq_status & RXGOOD) {
+ do {
+ again = de620_rx_intr(dev);
+ pr_debug("again=%d\n", again);
+ }
+ while (again && (++bogus_count < 100));
+ }
+
+ if(de620_tx_buffs(dev) != (TXBF0 | TXBF1))
+ netif_wake_queue(dev);
+
+ spin_unlock(&de620_lock);
+ return IRQ_HANDLED;
+}
+
+/**************************************
+ *
+ * Get a packet from the adapter
+ *
+ * Send it "upstairs"
+ *
+ */
+static int de620_rx_intr(struct net_device *dev)
+{
+ struct header_buf {
+ byte status;
+ byte Rx_NextPage;
+ unsigned short Rx_ByteCount;
+ } header_buf;
+ struct sk_buff *skb;
+ int size;
+ byte *buffer;
+ byte pagelink;
+ byte curr_page;
+
+ pr_debug("de620_rx_intr: next_rx_page = %d\n", next_rx_page);
+
+ /* Tell the adapter that we are going to read data, and from where */
+ de620_send_command(dev, W_CR | RRN);
+ de620_set_register(dev, W_RSA1, next_rx_page);
+ de620_set_register(dev, W_RSA0, 0);
+
+ /* Deep breath, and away we goooooo */
+ de620_read_block(dev, (byte *)&header_buf, sizeof(struct header_buf));
+ pr_debug("page status=0x%02x, nextpage=%d, packetsize=%d\n",
+ header_buf.status, header_buf.Rx_NextPage,
+ header_buf.Rx_ByteCount);
+
+ /* Plausible page header? */
+ pagelink = header_buf.Rx_NextPage;
+ if ((pagelink < first_rx_page) || (last_rx_page < pagelink)) {
+ /* Ouch... Forget it! Skip all and start afresh... */
+ printk(KERN_WARNING "%s: Ring overrun? Restoring...\n", dev->name);
+ /* You win some, you lose some. And sometimes plenty... */
+ adapter_init(dev);
+ netif_wake_queue(dev);
+ dev->stats.rx_over_errors++;
+ return 0;
+ }
+
+ /* OK, this look good, so far. Let's see if it's consistent... */
+ /* Let's compute the start of the next packet, based on where we are */
+ pagelink = next_rx_page +
+ ((header_buf.Rx_ByteCount + (4 - 1 + 0x100)) >> 8);
+
+ /* Are we going to wrap around the page counter? */
+ if (pagelink > last_rx_page)
+ pagelink -= (last_rx_page - first_rx_page + 1);
+
+ /* Is the _computed_ next page number equal to what the adapter says? */
+ if (pagelink != header_buf.Rx_NextPage) {
+ /* Naah, we'll skip this packet. Probably bogus data as well */
+ printk(KERN_WARNING "%s: Page link out of sync! Restoring...\n", dev->name);
+ next_rx_page = header_buf.Rx_NextPage; /* at least a try... */
+ de620_send_command(dev, W_DUMMY);
+ de620_set_register(dev, W_NPRF, next_rx_page);
+ dev->stats.rx_over_errors++;
+ return 0;
+ }
+ next_rx_page = pagelink;
+
+ size = header_buf.Rx_ByteCount - 4;
+ if ((size < RUNT) || (GIANT < size)) {
+ printk(KERN_WARNING "%s: Illegal packet size: %d!\n", dev->name, size);
+ }
+ else { /* Good packet? */
+ skb = dev_alloc_skb(size+2);
+ if (skb == NULL) { /* Yeah, but no place to put it... */
+ printk(KERN_WARNING "%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size);
+ dev->stats.rx_dropped++;
+ }
+ else { /* Yep! Go get it! */
+ skb_reserve(skb,2); /* Align */
+ /* skb->data points to the start of sk_buff data area */
+ buffer = skb_put(skb,size);
+ /* copy the packet into the buffer */
+ de620_read_block(dev, buffer, size);
+ pr_debug("Read %d bytes\n", size);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb); /* deliver it "upstairs" */
+ /* count all receives */
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += size;
+ }
+ }
+
+ /* Let's peek ahead to see if we have read the last current packet */
+ /* NOTE! We're _not_ checking the 'EMPTY'-flag! This seems better... */
+ curr_page = de620_get_register(dev, R_CPR);
+ de620_set_register(dev, W_NPRF, next_rx_page);
+ pr_debug("next_rx_page=%d CPR=%d\n", next_rx_page, curr_page);
+
+ return next_rx_page != curr_page; /* That was slightly tricky... */
+}
+
+/*********************************************
+ *
+ * Reset the adapter to a known state
+ *
+ */
+static int adapter_init(struct net_device *dev)
+{
+ int i;
+ static int was_down;
+
+ if ((nic_data.Model == 3) || (nic_data.Model == 0)) { /* CT */
+ EIPRegister = NCTL0;
+ if (nic_data.Media != 1)
+ EIPRegister |= NIS0; /* not BNC */
+ }
+ else if (nic_data.Model == 2) { /* UTP */
+ EIPRegister = NCTL0 | NIS0;
+ }
+
+ if (utp)
+ EIPRegister = NCTL0 | NIS0;
+ if (bnc)
+ EIPRegister = NCTL0;
+
+ de620_send_command(dev, W_CR | RNOP | CLEAR);
+ de620_send_command(dev, W_CR | RNOP);
+
+ de620_set_register(dev, W_SCR, SCR_DEF);
+ /* disable recv to wait init */
+ de620_set_register(dev, W_TCR, RXOFF);
+
+ /* Set the node ID in the adapter */
+ for (i = 0; i < 6; ++i) { /* W_PARn = 0xaa + n */
+ de620_set_register(dev, W_PAR0 + i, dev->dev_addr[i]);
+ }
+
+ de620_set_register(dev, W_EIP, EIPRegister);
+
+ next_rx_page = first_rx_page = DE620_RX_START_PAGE;
+ if (nic_data.RAM_Size)
+ last_rx_page = nic_data.RAM_Size - 1;
+ else /* 64k RAM */
+ last_rx_page = 255;
+
+ de620_set_register(dev, W_SPR, first_rx_page); /* Start Page Register*/
+ de620_set_register(dev, W_EPR, last_rx_page); /* End Page Register */
+ de620_set_register(dev, W_CPR, first_rx_page);/*Current Page Register*/
+ de620_send_command(dev, W_NPR | first_rx_page); /* Next Page Register*/
+ de620_send_command(dev, W_DUMMY);
+ de620_set_delay(dev);
+
+ /* Final sanity check: Anybody out there? */
+ /* Let's hope some bits from the statusregister make a good check */
+#define CHECK_MASK ( 0 | TXSUC | T16 | 0 | RXCRC | RXSHORT | 0 | 0 )
+#define CHECK_OK ( 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 )
+ /* success: X 0 0 X 0 0 X X */
+ /* ignore: EEDI RXGOOD COLS LNKS*/
+
+ if (((i = de620_get_register(dev, R_STS)) & CHECK_MASK) != CHECK_OK) {
+ printk(KERN_ERR "%s: Something has happened to the DE-620! Please check it"
+#ifdef SHUTDOWN_WHEN_LOST
+ " and do a new ifconfig"
+#endif
+ "! (%02x)\n", dev->name, i);
+#ifdef SHUTDOWN_WHEN_LOST
+ /* Goodbye, cruel world... */
+ dev->flags &= ~IFF_UP;
+ de620_close(dev);
+#endif
+ was_down = 1;
+ return 1; /* failed */
+ }
+ if (was_down) {
+ printk(KERN_WARNING "%s: Thanks, I feel much better now!\n", dev->name);
+ was_down = 0;
+ }
+
+ /* All OK, go ahead... */
+ de620_set_register(dev, W_TCR, TCR_DEF);
+
+ return 0; /* all ok */
+}
+
+static const struct net_device_ops de620_netdev_ops = {
+ .ndo_open = de620_open,
+ .ndo_stop = de620_close,
+ .ndo_start_xmit = de620_start_xmit,
+ .ndo_tx_timeout = de620_timeout,
+ .ndo_set_rx_mode = de620_set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/******************************************************************************
+ *
+ * Only start-up code below
+ *
+ */
+/****************************************
+ *
+ * Check if there is a DE-620 connected
+ */
+struct net_device * __init de620_probe(int unit)
+{
+ byte checkbyte = 0xa5;
+ struct net_device *dev;
+ int err = -ENOMEM;
+ int i;
+
+ dev = alloc_etherdev(0);
+ if (!dev)
+ goto out;
+
+ spin_lock_init(&de620_lock);
+
+ /*
+ * This is where the base_addr and irq gets set.
+ * Tunable at compile-time and insmod-time
+ */
+ dev->base_addr = io;
+ dev->irq = irq;
+
+ /* allow overriding parameters on command line */
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ pr_debug("%s", version);
+
+ printk(KERN_INFO "D-Link DE-620 pocket adapter");
+
+ if (!request_region(dev->base_addr, 3, "de620")) {
+ printk(" io 0x%3lX, which is busy.\n", dev->base_addr);
+ err = -EBUSY;
+ goto out1;
+ }
+
+ /* Initially, configure basic nibble mode, so we can read the EEPROM */
+ NIC_Cmd = DEF_NIC_CMD;
+ de620_set_register(dev, W_EIP, EIPRegister);
+
+ /* Anybody out there? */
+ de620_set_register(dev, W_CPR, checkbyte);
+ checkbyte = de620_get_register(dev, R_CPR);
+
+ if ((checkbyte != 0xa5) || (read_eeprom(dev) != 0)) {
+ printk(" not identified in the printer port\n");
+ err = -ENODEV;
+ goto out2;
+ }
+
+ /* else, got it! */
+ dev->dev_addr[0] = nic_data.NodeID[0];
+ for (i = 1; i < ETH_ALEN; i++) {
+ dev->dev_addr[i] = nic_data.NodeID[i];
+ dev->broadcast[i] = 0xff;
+ }
+
+ printk(", Ethernet Address: %pM", dev->dev_addr);
+
+ printk(" (%dk RAM,",
+ (nic_data.RAM_Size) ? (nic_data.RAM_Size >> 2) : 64);
+
+ if (nic_data.Media == 1)
+ printk(" BNC)\n");
+ else
+ printk(" UTP)\n");
+
+ dev->netdev_ops = &de620_netdev_ops;
+ dev->watchdog_timeo = HZ*2;
+
+ /* base_addr and irq are already set, see above! */
+
+ /* dump eeprom */
+ pr_debug("\nEEPROM contents:\n"
+ "RAM_Size = 0x%02X\n"
+ "NodeID = %pM\n"
+ "Model = %d\n"
+ "Media = %d\n"
+ "SCR = 0x%02x\n", nic_data.RAM_Size, nic_data.NodeID,
+ nic_data.Model, nic_data.Media, nic_data.SCR);
+
+ err = register_netdev(dev);
+ if (err)
+ goto out2;
+ return dev;
+
+out2:
+ release_region(dev->base_addr, 3);
+out1:
+ free_netdev(dev);
+out:
+ return ERR_PTR(err);
+}
+
+/**********************************
+ *
+ * Read info from on-board EEPROM
+ *
+ * Note: Bitwise serial I/O to/from the EEPROM vi the status _register_!
+ */
+#define sendit(dev,data) de620_set_register(dev, W_EIP, data | EIPRegister);
+
+static unsigned short __init ReadAWord(struct net_device *dev, int from)
+{
+ unsigned short data;
+ int nbits;
+
+ /* cs [__~~] SET SEND STATE */
+ /* di [____] */
+ /* sck [_~~_] */
+ sendit(dev, 0); sendit(dev, 1); sendit(dev, 5); sendit(dev, 4);
+
+ /* Send the 9-bit address from where we want to read the 16-bit word */
+ for (nbits = 9; nbits > 0; --nbits, from <<= 1) {
+ if (from & 0x0100) { /* bit set? */
+ /* cs [~~~~] SEND 1 */
+ /* di [~~~~] */
+ /* sck [_~~_] */
+ sendit(dev, 6); sendit(dev, 7); sendit(dev, 7); sendit(dev, 6);
+ }
+ else {
+ /* cs [~~~~] SEND 0 */
+ /* di [____] */
+ /* sck [_~~_] */
+ sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4);
+ }
+ }
+
+ /* Shift in the 16-bit word. The bits appear serially in EEDI (=0x80) */
+ for (data = 0, nbits = 16; nbits > 0; --nbits) {
+ /* cs [~~~~] SEND 0 */
+ /* di [____] */
+ /* sck [_~~_] */
+ sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4);
+ data = (data << 1) | ((de620_get_register(dev, R_STS) & EEDI) >> 7);
+ }
+ /* cs [____] RESET SEND STATE */
+ /* di [____] */
+ /* sck [_~~_] */
+ sendit(dev, 0); sendit(dev, 1); sendit(dev, 1); sendit(dev, 0);
+
+ return data;
+}
+
+static int __init read_eeprom(struct net_device *dev)
+{
+ unsigned short wrd;
+
+ /* D-Link Ethernet addresses are in the series 00:80:c8:7X:XX:XX:XX */
+ wrd = ReadAWord(dev, 0x1aa); /* bytes 0 + 1 of NodeID */
+ if (!clone && (wrd != htons(0x0080))) /* Valid D-Link ether sequence? */
+ return -1; /* Nope, not a DE-620 */
+ nic_data.NodeID[0] = wrd & 0xff;
+ nic_data.NodeID[1] = wrd >> 8;
+
+ wrd = ReadAWord(dev, 0x1ab); /* bytes 2 + 3 of NodeID */
+ if (!clone && ((wrd & 0xff) != 0xc8)) /* Valid D-Link ether sequence? */
+ return -1; /* Nope, not a DE-620 */
+ nic_data.NodeID[2] = wrd & 0xff;
+ nic_data.NodeID[3] = wrd >> 8;
+
+ wrd = ReadAWord(dev, 0x1ac); /* bytes 4 + 5 of NodeID */
+ nic_data.NodeID[4] = wrd & 0xff;
+ nic_data.NodeID[5] = wrd >> 8;
+
+ wrd = ReadAWord(dev, 0x1ad); /* RAM size in pages (256 bytes). 0 = 64k */
+ nic_data.RAM_Size = (wrd >> 8);
+
+ wrd = ReadAWord(dev, 0x1ae); /* hardware model (CT = 3) */
+ nic_data.Model = (wrd & 0xff);
+
+ wrd = ReadAWord(dev, 0x1af); /* media (indicates BNC/UTP) */
+ nic_data.Media = (wrd & 0xff);
+
+ wrd = ReadAWord(dev, 0x1a8); /* System Configuration Register */
+ nic_data.SCR = (wrd >> 8);
+
+ return 0; /* no errors */
+}
+
+/******************************************************************************
+ *
+ * Loadable module skeleton
+ *
+ */
+#ifdef MODULE
+static struct net_device *de620_dev;
+
+int __init init_module(void)
+{
+ de620_dev = de620_probe(-1);
+ if (IS_ERR(de620_dev))
+ return PTR_ERR(de620_dev);
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ unregister_netdev(de620_dev);
+ release_region(de620_dev->base_addr, 3);
+ free_netdev(de620_dev);
+}
+#endif /* MODULE */
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/dlink/de620.h b/drivers/net/ethernet/dlink/de620.h
new file mode 100644
index 000000000000..e8d9a88f4cb5
--- /dev/null
+++ b/drivers/net/ethernet/dlink/de620.h
@@ -0,0 +1,117 @@
+/*********************************************************
+ * *
+ * Definition of D-Link DE-620 Ethernet Pocket adapter *
+ * *
+ *********************************************************/
+
+/* DE-620's CMD port Command */
+#define CS0 0x08 /* 1->0 command strobe */
+#define ICEN 0x04 /* 0=enable DL3520 host interface */
+#define DS0 0x02 /* 1->0 data strobe 0 */
+#define DS1 0x01 /* 1->0 data strobe 1 */
+
+#define WDIR 0x20 /* general 0=read 1=write */
+#define RDIR 0x00 /* (not 100% confirm ) */
+#define PS2WDIR 0x00 /* ps/2 mode 1=read, 0=write */
+#define PS2RDIR 0x20
+
+#define IRQEN 0x10 /* 1 = enable printer IRQ line */
+#define SELECTIN 0x08 /* 1 = select printer */
+#define INITP 0x04 /* 0 = initial printer */
+#define AUTOFEED 0x02 /* 1 = printer auto form feed */
+#define STROBE 0x01 /* 0->1 data strobe */
+
+#define RESET 0x08
+#define NIS0 0x20 /* 0 = BNC, 1 = UTP */
+#define NCTL0 0x10
+
+/* DE-620 DIC Command */
+#define W_DUMMY 0x00 /* DIC reserved command */
+#define W_CR 0x20 /* DIC write command register */
+#define W_NPR 0x40 /* DIC write Next Page Register */
+#define W_TBR 0x60 /* DIC write Tx Byte Count 1 reg */
+#define W_RSA 0x80 /* DIC write Remote Start Addr 1 */
+
+/* DE-620's STAT port bits 7-4 */
+#define EMPTY 0x80 /* 1 = receive buffer empty */
+#define INTLEVEL 0x40 /* 1 = interrupt level is high */
+#define TXBF1 0x20 /* 1 = transmit buffer 1 is in use */
+#define TXBF0 0x10 /* 1 = transmit buffer 0 is in use */
+#define READY 0x08 /* 1 = h/w ready to accept cmd/data */
+
+/* IDC 1 Command */
+#define W_RSA1 0xa0 /* write remote start address 1 */
+#define W_RSA0 0xa1 /* write remote start address 0 */
+#define W_NPRF 0xa2 /* write next page register NPR15-NPR8 */
+#define W_DFR 0xa3 /* write delay factor register */
+#define W_CPR 0xa4 /* write current page register */
+#define W_SPR 0xa5 /* write start page register */
+#define W_EPR 0xa6 /* write end page register */
+#define W_SCR 0xa7 /* write system configuration register */
+#define W_TCR 0xa8 /* write Transceiver Configuration reg */
+#define W_EIP 0xa9 /* write EEPM Interface port */
+#define W_PAR0 0xaa /* write physical address register 0 */
+#define W_PAR1 0xab /* write physical address register 1 */
+#define W_PAR2 0xac /* write physical address register 2 */
+#define W_PAR3 0xad /* write physical address register 3 */
+#define W_PAR4 0xae /* write physical address register 4 */
+#define W_PAR5 0xaf /* write physical address register 5 */
+
+/* IDC 2 Command */
+#define R_STS 0xc0 /* read status register */
+#define R_CPR 0xc1 /* read current page register */
+#define R_BPR 0xc2 /* read boundary page register */
+#define R_TDR 0xc3 /* read time domain reflectometry reg */
+
+/* STATUS Register */
+#define EEDI 0x80 /* EEPM DO pin */
+#define TXSUC 0x40 /* tx success */
+#define T16 0x20 /* tx fail 16 times */
+#define TS1 0x40 /* 0=Tx success, 1=T16 */
+#define TS0 0x20 /* 0=Tx success, 1=T16 */
+#define RXGOOD 0x10 /* rx a good packet */
+#define RXCRC 0x08 /* rx a CRC error packet */
+#define RXSHORT 0x04 /* rx a short packet */
+#define COLS 0x02 /* coaxial collision status */
+#define LNKS 0x01 /* UTP link status */
+
+/* Command Register */
+#define CLEAR 0x10 /* reset part of hardware */
+#define NOPER 0x08 /* No Operation */
+#define RNOP 0x08
+#define RRA 0x06 /* After RR then auto-advance NPR & BPR(=NPR-1) */
+#define RRN 0x04 /* Normal Remote Read mode */
+#define RW1 0x02 /* Remote Write tx buffer 1 ( page 6 - 11 ) */
+#define RW0 0x00 /* Remote Write tx buffer 0 ( page 0 - 5 ) */
+#define TXEN 0x01 /* 0->1 tx enable */
+
+/* System Configuration Register */
+#define TESTON 0x80 /* test host data transfer reliability */
+#define SLEEP 0x40 /* sleep mode */
+#if 0
+#define FASTMODE 0x04 /* fast mode for intel 82360SL fast mode */
+#define BYTEMODE 0x02 /* byte mode */
+#else
+#define FASTMODE 0x20 /* fast mode for intel 82360SL fast mode */
+#define BYTEMODE 0x10 /* byte mode */
+#endif
+#define NIBBLEMODE 0x00 /* nibble mode */
+#define IRQINV 0x08 /* turn off IRQ line inverter */
+#define IRQNML 0x00 /* turn on IRQ line inverter */
+#define INTON 0x04
+#define AUTOFFSET 0x02 /* auto shift address to TPR+12 */
+#define AUTOTX 0x01 /* auto tx when leave RW mode */
+
+/* Transceiver Configuration Register */
+#define JABBER 0x80 /* generate jabber condition */
+#define TXSUCINT 0x40 /* enable tx success interrupt */
+#define T16INT 0x20 /* enable T16 interrupt */
+#define RXERRPKT 0x10 /* accept CRC error or short packet */
+#define EXTERNALB2 0x0C /* external loopback 2 */
+#define EXTERNALB1 0x08 /* external loopback 1 */
+#define INTERNALB 0x04 /* internal loopback */
+#define NMLOPERATE 0x00 /* normal operation */
+#define RXPBM 0x03 /* rx physical, broadcast, multicast */
+#define RXPB 0x02 /* rx physical, broadcast */
+#define RXALL 0x01 /* rx all packet */
+#define RXOFF 0x00 /* rx disable */
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
new file mode 100644
index 000000000000..b2dc2c81a147
--- /dev/null
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -0,0 +1,1821 @@
+/* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */
+/*
+ Copyright (c) 2001, 2002 by D-Link Corporation
+ Written by Edward Peng.<edward_peng@dlink.com.tw>
+ Created 03-May-2001, base on Linux' sundance.c.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+*/
+
+#define DRV_NAME "DL2000/TC902x-based linux driver"
+#define DRV_VERSION "v1.19"
+#define DRV_RELDATE "2007/08/12"
+#include "dl2k.h"
+#include <linux/dma-mapping.h>
+
+static char version[] __devinitdata =
+ KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
+#define MAX_UNITS 8
+static int mtu[MAX_UNITS];
+static int vlan[MAX_UNITS];
+static int jumbo[MAX_UNITS];
+static char *media[MAX_UNITS];
+static int tx_flow=-1;
+static int rx_flow=-1;
+static int copy_thresh;
+static int rx_coalesce=10; /* Rx frame count each interrupt */
+static int rx_timeout=200; /* Rx DMA wait time in 640ns increments */
+static int tx_coalesce=16; /* HW xmit count each TxDMAComplete */
+
+
+MODULE_AUTHOR ("Edward Peng");
+MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter");
+MODULE_LICENSE("GPL");
+module_param_array(mtu, int, NULL, 0);
+module_param_array(media, charp, NULL, 0);
+module_param_array(vlan, int, NULL, 0);
+module_param_array(jumbo, int, NULL, 0);
+module_param(tx_flow, int, 0);
+module_param(rx_flow, int, 0);
+module_param(copy_thresh, int, 0);
+module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */
+module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */
+module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */
+
+
+/* Enable the default interrupts */
+#define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \
+ UpdateStats | LinkEvent)
+#define EnableInt() \
+writew(DEFAULT_INTR, ioaddr + IntEnable)
+
+static const int max_intrloop = 50;
+static const int multicast_filter_limit = 0x40;
+
+static int rio_open (struct net_device *dev);
+static void rio_timer (unsigned long data);
+static void rio_tx_timeout (struct net_device *dev);
+static void alloc_list (struct net_device *dev);
+static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t rio_interrupt (int irq, void *dev_instance);
+static void rio_free_tx (struct net_device *dev, int irq);
+static void tx_error (struct net_device *dev, int tx_status);
+static int receive_packet (struct net_device *dev);
+static void rio_error (struct net_device *dev, int int_status);
+static int change_mtu (struct net_device *dev, int new_mtu);
+static void set_multicast (struct net_device *dev);
+static struct net_device_stats *get_stats (struct net_device *dev);
+static int clear_stats (struct net_device *dev);
+static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
+static int rio_close (struct net_device *dev);
+static int find_miiphy (struct net_device *dev);
+static int parse_eeprom (struct net_device *dev);
+static int read_eeprom (long ioaddr, int eep_addr);
+static int mii_wait_link (struct net_device *dev, int wait);
+static int mii_set_media (struct net_device *dev);
+static int mii_get_media (struct net_device *dev);
+static int mii_set_media_pcs (struct net_device *dev);
+static int mii_get_media_pcs (struct net_device *dev);
+static int mii_read (struct net_device *dev, int phy_addr, int reg_num);
+static int mii_write (struct net_device *dev, int phy_addr, int reg_num,
+ u16 data);
+
+static const struct ethtool_ops ethtool_ops;
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = rio_open,
+ .ndo_start_xmit = start_xmit,
+ .ndo_stop = rio_close,
+ .ndo_get_stats = get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_rx_mode = set_multicast,
+ .ndo_do_ioctl = rio_ioctl,
+ .ndo_tx_timeout = rio_tx_timeout,
+ .ndo_change_mtu = change_mtu,
+};
+
+static int __devinit
+rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ static int card_idx;
+ int chip_idx = ent->driver_data;
+ int err, irq;
+ long ioaddr;
+ static int version_printed;
+ void *ring_space;
+ dma_addr_t ring_dma;
+
+ if (!version_printed++)
+ printk ("%s", version);
+
+ err = pci_enable_device (pdev);
+ if (err)
+ return err;
+
+ irq = pdev->irq;
+ err = pci_request_regions (pdev, "dl2k");
+ if (err)
+ goto err_out_disable;
+
+ pci_set_master (pdev);
+ dev = alloc_etherdev (sizeof (*np));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_out_res;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+#ifdef MEM_MAPPING
+ ioaddr = pci_resource_start (pdev, 1);
+ ioaddr = (long) ioremap (ioaddr, RIO_IO_SIZE);
+ if (!ioaddr) {
+ err = -ENOMEM;
+ goto err_out_dev;
+ }
+#else
+ ioaddr = pci_resource_start (pdev, 0);
+#endif
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ np = netdev_priv(dev);
+ np->chip_id = chip_idx;
+ np->pdev = pdev;
+ spin_lock_init (&np->tx_lock);
+ spin_lock_init (&np->rx_lock);
+
+ /* Parse manual configuration */
+ np->an_enable = 1;
+ np->tx_coalesce = 1;
+ if (card_idx < MAX_UNITS) {
+ if (media[card_idx] != NULL) {
+ np->an_enable = 0;
+ if (strcmp (media[card_idx], "auto") == 0 ||
+ strcmp (media[card_idx], "autosense") == 0 ||
+ strcmp (media[card_idx], "0") == 0 ) {
+ np->an_enable = 2;
+ } else if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
+ strcmp (media[card_idx], "4") == 0) {
+ np->speed = 100;
+ np->full_duplex = 1;
+ } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
+ strcmp (media[card_idx], "3") == 0) {
+ np->speed = 100;
+ np->full_duplex = 0;
+ } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
+ strcmp (media[card_idx], "2") == 0) {
+ np->speed = 10;
+ np->full_duplex = 1;
+ } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
+ strcmp (media[card_idx], "1") == 0) {
+ np->speed = 10;
+ np->full_duplex = 0;
+ } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 ||
+ strcmp (media[card_idx], "6") == 0) {
+ np->speed=1000;
+ np->full_duplex=1;
+ } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 ||
+ strcmp (media[card_idx], "5") == 0) {
+ np->speed = 1000;
+ np->full_duplex = 0;
+ } else {
+ np->an_enable = 1;
+ }
+ }
+ if (jumbo[card_idx] != 0) {
+ np->jumbo = 1;
+ dev->mtu = MAX_JUMBO;
+ } else {
+ np->jumbo = 0;
+ if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE)
+ dev->mtu = mtu[card_idx];
+ }
+ np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ?
+ vlan[card_idx] : 0;
+ if (rx_coalesce > 0 && rx_timeout > 0) {
+ np->rx_coalesce = rx_coalesce;
+ np->rx_timeout = rx_timeout;
+ np->coalesce = 1;
+ }
+ np->tx_flow = (tx_flow == 0) ? 0 : 1;
+ np->rx_flow = (rx_flow == 0) ? 0 : 1;
+
+ if (tx_coalesce < 1)
+ tx_coalesce = 1;
+ else if (tx_coalesce > TX_RING_SIZE-1)
+ tx_coalesce = TX_RING_SIZE - 1;
+ }
+ dev->netdev_ops = &netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ SET_ETHTOOL_OPS(dev, &ethtool_ops);
+#if 0
+ dev->features = NETIF_F_IP_CSUM;
+#endif
+ pci_set_drvdata (pdev, dev);
+
+ ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space)
+ goto err_out_iounmap;
+ np->tx_ring = ring_space;
+ np->tx_ring_dma = ring_dma;
+
+ ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space)
+ goto err_out_unmap_tx;
+ np->rx_ring = ring_space;
+ np->rx_ring_dma = ring_dma;
+
+ /* Parse eeprom data */
+ parse_eeprom (dev);
+
+ /* Find PHY address */
+ err = find_miiphy (dev);
+ if (err)
+ goto err_out_unmap_rx;
+
+ /* Fiber device? */
+ np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0;
+ np->link_status = 0;
+ /* Set media and reset PHY */
+ if (np->phy_media) {
+ /* default Auto-Negotiation for fiber deivices */
+ if (np->an_enable == 2) {
+ np->an_enable = 1;
+ }
+ mii_set_media_pcs (dev);
+ } else {
+ /* Auto-Negotiation is mandatory for 1000BASE-T,
+ IEEE 802.3ab Annex 28D page 14 */
+ if (np->speed == 1000)
+ np->an_enable = 1;
+ mii_set_media (dev);
+ }
+
+ err = register_netdev (dev);
+ if (err)
+ goto err_out_unmap_rx;
+
+ card_idx++;
+
+ printk (KERN_INFO "%s: %s, %pM, IRQ %d\n",
+ dev->name, np->name, dev->dev_addr, irq);
+ if (tx_coalesce > 1)
+ printk(KERN_INFO "tx_coalesce:\t%d packets\n",
+ tx_coalesce);
+ if (np->coalesce)
+ printk(KERN_INFO
+ "rx_coalesce:\t%d packets\n"
+ "rx_timeout: \t%d ns\n",
+ np->rx_coalesce, np->rx_timeout*640);
+ if (np->vlan)
+ printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
+ return 0;
+
+ err_out_unmap_rx:
+ pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
+ err_out_unmap_tx:
+ pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
+ err_out_iounmap:
+#ifdef MEM_MAPPING
+ iounmap ((void *) ioaddr);
+
+ err_out_dev:
+#endif
+ free_netdev (dev);
+
+ err_out_res:
+ pci_release_regions (pdev);
+
+ err_out_disable:
+ pci_disable_device (pdev);
+ return err;
+}
+
+static int
+find_miiphy (struct net_device *dev)
+{
+ int i, phy_found = 0;
+ struct netdev_private *np;
+ long ioaddr;
+ np = netdev_priv(dev);
+ ioaddr = dev->base_addr;
+ np->phy_addr = 1;
+
+ for (i = 31; i >= 0; i--) {
+ int mii_status = mii_read (dev, i, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phy_addr = i;
+ phy_found++;
+ }
+ }
+ if (!phy_found) {
+ printk (KERN_ERR "%s: No MII PHY found!\n", dev->name);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int
+parse_eeprom (struct net_device *dev)
+{
+ int i, j;
+ long ioaddr = dev->base_addr;
+ u8 sromdata[256];
+ u8 *psib;
+ u32 crc;
+ PSROM_t psrom = (PSROM_t) sromdata;
+ struct netdev_private *np = netdev_priv(dev);
+
+ int cid, next;
+
+#ifdef MEM_MAPPING
+ ioaddr = pci_resource_start (np->pdev, 0);
+#endif
+ /* Read eeprom */
+ for (i = 0; i < 128; i++) {
+ ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom (ioaddr, i));
+ }
+#ifdef MEM_MAPPING
+ ioaddr = dev->base_addr;
+#endif
+ if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */
+ /* Check CRC */
+ crc = ~ether_crc_le (256 - 4, sromdata);
+ if (psrom->crc != cpu_to_le32(crc)) {
+ printk (KERN_ERR "%s: EEPROM data CRC error.\n",
+ dev->name);
+ return -1;
+ }
+ }
+
+ /* Set MAC address */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = psrom->mac_addr[i];
+
+ if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) {
+ return 0;
+ }
+
+ /* Parse Software Information Block */
+ i = 0x30;
+ psib = (u8 *) sromdata;
+ do {
+ cid = psib[i++];
+ next = psib[i++];
+ if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) {
+ printk (KERN_ERR "Cell data error\n");
+ return -1;
+ }
+ switch (cid) {
+ case 0: /* Format version */
+ break;
+ case 1: /* End of cell */
+ return 0;
+ case 2: /* Duplex Polarity */
+ np->duplex_polarity = psib[i];
+ writeb (readb (ioaddr + PhyCtrl) | psib[i],
+ ioaddr + PhyCtrl);
+ break;
+ case 3: /* Wake Polarity */
+ np->wake_polarity = psib[i];
+ break;
+ case 9: /* Adapter description */
+ j = (next - i > 255) ? 255 : next - i;
+ memcpy (np->name, &(psib[i]), j);
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 8: /* Reversed */
+ break;
+ default: /* Unknown cell */
+ return -1;
+ }
+ i = next;
+ } while (1);
+
+ return 0;
+}
+
+static int
+rio_open (struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ int i;
+ u16 macctrl;
+
+ i = request_irq (dev->irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
+ if (i)
+ return i;
+
+ /* Reset all logic functions */
+ writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset,
+ ioaddr + ASICCtrl + 2);
+ mdelay(10);
+
+ /* DebugCtrl bit 4, 5, 9 must set */
+ writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl);
+
+ /* Jumbo frame */
+ if (np->jumbo != 0)
+ writew (MAX_JUMBO+14, ioaddr + MaxFrameSize);
+
+ alloc_list (dev);
+
+ /* Get station address */
+ for (i = 0; i < 6; i++)
+ writeb (dev->dev_addr[i], ioaddr + StationAddr0 + i);
+
+ set_multicast (dev);
+ if (np->coalesce) {
+ writel (np->rx_coalesce | np->rx_timeout << 16,
+ ioaddr + RxDMAIntCtrl);
+ }
+ /* Set RIO to poll every N*320nsec. */
+ writeb (0x20, ioaddr + RxDMAPollPeriod);
+ writeb (0xff, ioaddr + TxDMAPollPeriod);
+ writeb (0x30, ioaddr + RxDMABurstThresh);
+ writeb (0x30, ioaddr + RxDMAUrgentThresh);
+ writel (0x0007ffff, ioaddr + RmonStatMask);
+ /* clear statistics */
+ clear_stats (dev);
+
+ /* VLAN supported */
+ if (np->vlan) {
+ /* priority field in RxDMAIntCtrl */
+ writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10,
+ ioaddr + RxDMAIntCtrl);
+ /* VLANId */
+ writew (np->vlan, ioaddr + VLANId);
+ /* Length/Type should be 0x8100 */
+ writel (0x8100 << 16 | np->vlan, ioaddr + VLANTag);
+ /* Enable AutoVLANuntagging, but disable AutoVLANtagging.
+ VLAN information tagged by TFC' VID, CFI fields. */
+ writel (readl (ioaddr + MACCtrl) | AutoVLANuntagging,
+ ioaddr + MACCtrl);
+ }
+
+ init_timer (&np->timer);
+ np->timer.expires = jiffies + 1*HZ;
+ np->timer.data = (unsigned long) dev;
+ np->timer.function = rio_timer;
+ add_timer (&np->timer);
+
+ /* Start Tx/Rx */
+ writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable,
+ ioaddr + MACCtrl);
+
+ macctrl = 0;
+ macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
+ macctrl |= (np->full_duplex) ? DuplexSelect : 0;
+ macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0;
+ macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0;
+ writew(macctrl, ioaddr + MACCtrl);
+
+ netif_start_queue (dev);
+
+ /* Enable default interrupts */
+ EnableInt ();
+ return 0;
+}
+
+static void
+rio_timer (unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = netdev_priv(dev);
+ unsigned int entry;
+ int next_tick = 1*HZ;
+ unsigned long flags;
+
+ spin_lock_irqsave(&np->rx_lock, flags);
+ /* Recover rx ring exhausted error */
+ if (np->cur_rx - np->old_rx >= RX_RING_SIZE) {
+ printk(KERN_INFO "Try to recover rx ring exhausted...\n");
+ /* Re-allocate skbuffs to fill the descriptor ring */
+ for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {
+ struct sk_buff *skb;
+ entry = np->old_rx % RX_RING_SIZE;
+ /* Dropped packets don't need to re-allocate */
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = netdev_alloc_skb_ip_align(dev,
+ np->rx_buf_sz);
+ if (skb == NULL) {
+ np->rx_ring[entry].fraginfo = 0;
+ printk (KERN_INFO
+ "%s: Still unable to re-allocate Rx skbuff.#%d\n",
+ dev->name, entry);
+ break;
+ }
+ np->rx_skbuff[entry] = skb;
+ np->rx_ring[entry].fraginfo =
+ cpu_to_le64 (pci_map_single
+ (np->pdev, skb->data, np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE));
+ }
+ np->rx_ring[entry].fraginfo |=
+ cpu_to_le64((u64)np->rx_buf_sz << 48);
+ np->rx_ring[entry].status = 0;
+ } /* end for */
+ } /* end if */
+ spin_unlock_irqrestore (&np->rx_lock, flags);
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void
+rio_tx_timeout (struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+
+ printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
+ dev->name, readl (ioaddr + TxStatus));
+ rio_free_tx(dev, 0);
+ dev->if_port = 0;
+ dev->trans_start = jiffies; /* prevent tx timeout */
+}
+
+ /* allocate and initialize Tx and Rx descriptors */
+static void
+alloc_list (struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ np->cur_rx = np->cur_tx = 0;
+ np->old_rx = np->old_tx = 0;
+ np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
+
+ /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = NULL;
+ np->tx_ring[i].status = cpu_to_le64 (TFDDone);
+ np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
+ ((i+1)%TX_RING_SIZE) *
+ sizeof (struct netdev_desc));
+ }
+
+ /* Initialize Rx descriptors */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +
+ ((i + 1) % RX_RING_SIZE) *
+ sizeof (struct netdev_desc));
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].fraginfo = 0;
+ np->rx_skbuff[i] = NULL;
+ }
+
+ /* Allocate the rx buffers */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ /* Allocated fixed size of skbuff */
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL) {
+ printk (KERN_ERR
+ "%s: alloc_list: allocate Rx buffer error! ",
+ dev->name);
+ break;
+ }
+ /* Rubicon now supports 40 bits of addressing space. */
+ np->rx_ring[i].fraginfo =
+ cpu_to_le64 ( pci_map_single (
+ np->pdev, skb->data, np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE));
+ np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
+ }
+
+ /* Set RFDListPtr */
+ writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0);
+ writel (0, dev->base_addr + RFDListPtr1);
+}
+
+static netdev_tx_t
+start_xmit (struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ struct netdev_desc *txdesc;
+ unsigned entry;
+ u32 ioaddr;
+ u64 tfc_vlan_tag = 0;
+
+ if (np->link_status == 0) { /* Link Down */
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ ioaddr = dev->base_addr;
+ entry = np->cur_tx % TX_RING_SIZE;
+ np->tx_skbuff[entry] = skb;
+ txdesc = &np->tx_ring[entry];
+
+#if 0
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ txdesc->status |=
+ cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable |
+ IPChecksumEnable);
+ }
+#endif
+ if (np->vlan) {
+ tfc_vlan_tag = VLANTagInsert |
+ ((u64)np->vlan << 32) |
+ ((u64)skb->priority << 45);
+ }
+ txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,
+ skb->len,
+ PCI_DMA_TODEVICE));
+ txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48);
+
+ /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode
+ * Work around: Always use 1 descriptor in 10Mbps mode */
+ if (entry % np->tx_coalesce == 0 || np->speed == 10)
+ txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
+ WordAlignDisable |
+ TxDMAIndicate |
+ (1 << FragCountShift));
+ else
+ txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
+ WordAlignDisable |
+ (1 << FragCountShift));
+
+ /* TxDMAPollNow */
+ writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl);
+ /* Schedule ISR */
+ writel(10000, ioaddr + CountDown);
+ np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
+ if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
+ < TX_QUEUE_LEN - 1 && np->speed != 10) {
+ /* do nothing */
+ } else if (!netif_queue_stopped(dev)) {
+ netif_stop_queue (dev);
+ }
+
+ /* The first TFDListPtr */
+ if (readl (dev->base_addr + TFDListPtr0) == 0) {
+ writel (np->tx_ring_dma + entry * sizeof (struct netdev_desc),
+ dev->base_addr + TFDListPtr0);
+ writel (0, dev->base_addr + TFDListPtr1);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static irqreturn_t
+rio_interrupt (int irq, void *dev_instance)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np;
+ unsigned int_status;
+ long ioaddr;
+ int cnt = max_intrloop;
+ int handled = 0;
+
+ ioaddr = dev->base_addr;
+ np = netdev_priv(dev);
+ while (1) {
+ int_status = readw (ioaddr + IntStatus);
+ writew (int_status, ioaddr + IntStatus);
+ int_status &= DEFAULT_INTR;
+ if (int_status == 0 || --cnt < 0)
+ break;
+ handled = 1;
+ /* Processing received packets */
+ if (int_status & RxDMAComplete)
+ receive_packet (dev);
+ /* TxDMAComplete interrupt */
+ if ((int_status & (TxDMAComplete|IntRequested))) {
+ int tx_status;
+ tx_status = readl (ioaddr + TxStatus);
+ if (tx_status & 0x01)
+ tx_error (dev, tx_status);
+ /* Free used tx skbuffs */
+ rio_free_tx (dev, 1);
+ }
+
+ /* Handle uncommon events */
+ if (int_status &
+ (HostError | LinkEvent | UpdateStats))
+ rio_error (dev, int_status);
+ }
+ if (np->cur_tx != np->old_tx)
+ writel (100, ioaddr + CountDown);
+ return IRQ_RETVAL(handled);
+}
+
+static inline dma_addr_t desc_to_dma(struct netdev_desc *desc)
+{
+ return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48);
+}
+
+static void
+rio_free_tx (struct net_device *dev, int irq)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int entry = np->old_tx % TX_RING_SIZE;
+ int tx_use = 0;
+ unsigned long flag = 0;
+
+ if (irq)
+ spin_lock(&np->tx_lock);
+ else
+ spin_lock_irqsave(&np->tx_lock, flag);
+
+ /* Free used tx skbuffs */
+ while (entry != np->cur_tx) {
+ struct sk_buff *skb;
+
+ if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone)))
+ break;
+ skb = np->tx_skbuff[entry];
+ pci_unmap_single (np->pdev,
+ desc_to_dma(&np->tx_ring[entry]),
+ skb->len, PCI_DMA_TODEVICE);
+ if (irq)
+ dev_kfree_skb_irq (skb);
+ else
+ dev_kfree_skb (skb);
+
+ np->tx_skbuff[entry] = NULL;
+ entry = (entry + 1) % TX_RING_SIZE;
+ tx_use++;
+ }
+ if (irq)
+ spin_unlock(&np->tx_lock);
+ else
+ spin_unlock_irqrestore(&np->tx_lock, flag);
+ np->old_tx = entry;
+
+ /* If the ring is no longer full, clear tx_full and
+ call netif_wake_queue() */
+
+ if (netif_queue_stopped(dev) &&
+ ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
+ < TX_QUEUE_LEN - 1 || np->speed == 10)) {
+ netif_wake_queue (dev);
+ }
+}
+
+static void
+tx_error (struct net_device *dev, int tx_status)
+{
+ struct netdev_private *np;
+ long ioaddr = dev->base_addr;
+ int frame_id;
+ int i;
+
+ np = netdev_priv(dev);
+
+ frame_id = (tx_status & 0xffff0000);
+ printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
+ dev->name, tx_status, frame_id);
+ np->stats.tx_errors++;
+ /* Ttransmit Underrun */
+ if (tx_status & 0x10) {
+ np->stats.tx_fifo_errors++;
+ writew (readw (ioaddr + TxStartThresh) + 0x10,
+ ioaddr + TxStartThresh);
+ /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
+ writew (TxReset | DMAReset | FIFOReset | NetworkReset,
+ ioaddr + ASICCtrl + 2);
+ /* Wait for ResetBusy bit clear */
+ for (i = 50; i > 0; i--) {
+ if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
+ break;
+ mdelay (1);
+ }
+ rio_free_tx (dev, 1);
+ /* Reset TFDListPtr */
+ writel (np->tx_ring_dma +
+ np->old_tx * sizeof (struct netdev_desc),
+ dev->base_addr + TFDListPtr0);
+ writel (0, dev->base_addr + TFDListPtr1);
+
+ /* Let TxStartThresh stay default value */
+ }
+ /* Late Collision */
+ if (tx_status & 0x04) {
+ np->stats.tx_fifo_errors++;
+ /* TxReset and clear FIFO */
+ writew (TxReset | FIFOReset, ioaddr + ASICCtrl + 2);
+ /* Wait reset done */
+ for (i = 50; i > 0; i--) {
+ if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
+ break;
+ mdelay (1);
+ }
+ /* Let TxStartThresh stay default value */
+ }
+ /* Maximum Collisions */
+#ifdef ETHER_STATS
+ if (tx_status & 0x08)
+ np->stats.collisions16++;
+#else
+ if (tx_status & 0x08)
+ np->stats.collisions++;
+#endif
+ /* Restart the Tx */
+ writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl);
+}
+
+static int
+receive_packet (struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int cnt = 30;
+
+ /* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */
+ while (1) {
+ struct netdev_desc *desc = &np->rx_ring[entry];
+ int pkt_len;
+ u64 frame_status;
+
+ if (!(desc->status & cpu_to_le64(RFDDone)) ||
+ !(desc->status & cpu_to_le64(FrameStart)) ||
+ !(desc->status & cpu_to_le64(FrameEnd)))
+ break;
+
+ /* Chip omits the CRC. */
+ frame_status = le64_to_cpu(desc->status);
+ pkt_len = frame_status & 0xffff;
+ if (--cnt < 0)
+ break;
+ /* Update rx error statistics, drop packet. */
+ if (frame_status & RFS_Errors) {
+ np->stats.rx_errors++;
+ if (frame_status & (RxRuntFrame | RxLengthError))
+ np->stats.rx_length_errors++;
+ if (frame_status & RxFCSError)
+ np->stats.rx_crc_errors++;
+ if (frame_status & RxAlignmentError && np->speed != 1000)
+ np->stats.rx_frame_errors++;
+ if (frame_status & RxFIFOOverrun)
+ np->stats.rx_fifo_errors++;
+ } else {
+ struct sk_buff *skb;
+
+ /* Small skbuffs for short packets */
+ if (pkt_len > copy_thresh) {
+ pci_unmap_single (np->pdev,
+ desc_to_dma(desc),
+ np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ skb_put (skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+ } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
+ pci_dma_sync_single_for_cpu(np->pdev,
+ desc_to_dma(desc),
+ np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ skb_copy_to_linear_data (skb,
+ np->rx_skbuff[entry]->data,
+ pkt_len);
+ skb_put (skb, pkt_len);
+ pci_dma_sync_single_for_device(np->pdev,
+ desc_to_dma(desc),
+ np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ }
+ skb->protocol = eth_type_trans (skb, dev);
+#if 0
+ /* Checksum done by hw, but csum value unavailable. */
+ if (np->pdev->pci_rev_id >= 0x0c &&
+ !(frame_status & (TCPError | UDPError | IPError))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+#endif
+ netif_rx (skb);
+ }
+ entry = (entry + 1) % RX_RING_SIZE;
+ }
+ spin_lock(&np->rx_lock);
+ np->cur_rx = entry;
+ /* Re-allocate skbuffs to fill the descriptor ring */
+ entry = np->old_rx;
+ while (entry != np->cur_rx) {
+ struct sk_buff *skb;
+ /* Dropped packets don't need to re-allocate */
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
+ if (skb == NULL) {
+ np->rx_ring[entry].fraginfo = 0;
+ printk (KERN_INFO
+ "%s: receive_packet: "
+ "Unable to re-allocate Rx skbuff.#%d\n",
+ dev->name, entry);
+ break;
+ }
+ np->rx_skbuff[entry] = skb;
+ np->rx_ring[entry].fraginfo =
+ cpu_to_le64 (pci_map_single
+ (np->pdev, skb->data, np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE));
+ }
+ np->rx_ring[entry].fraginfo |=
+ cpu_to_le64((u64)np->rx_buf_sz << 48);
+ np->rx_ring[entry].status = 0;
+ entry = (entry + 1) % RX_RING_SIZE;
+ }
+ np->old_rx = entry;
+ spin_unlock(&np->rx_lock);
+ return 0;
+}
+
+static void
+rio_error (struct net_device *dev, int int_status)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ u16 macctrl;
+
+ /* Link change event */
+ if (int_status & LinkEvent) {
+ if (mii_wait_link (dev, 10) == 0) {
+ printk (KERN_INFO "%s: Link up\n", dev->name);
+ if (np->phy_media)
+ mii_get_media_pcs (dev);
+ else
+ mii_get_media (dev);
+ if (np->speed == 1000)
+ np->tx_coalesce = tx_coalesce;
+ else
+ np->tx_coalesce = 1;
+ macctrl = 0;
+ macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
+ macctrl |= (np->full_duplex) ? DuplexSelect : 0;
+ macctrl |= (np->tx_flow) ?
+ TxFlowControlEnable : 0;
+ macctrl |= (np->rx_flow) ?
+ RxFlowControlEnable : 0;
+ writew(macctrl, ioaddr + MACCtrl);
+ np->link_status = 1;
+ netif_carrier_on(dev);
+ } else {
+ printk (KERN_INFO "%s: Link off\n", dev->name);
+ np->link_status = 0;
+ netif_carrier_off(dev);
+ }
+ }
+
+ /* UpdateStats statistics registers */
+ if (int_status & UpdateStats) {
+ get_stats (dev);
+ }
+
+ /* PCI Error, a catastronphic error related to the bus interface
+ occurs, set GlobalReset and HostReset to reset. */
+ if (int_status & HostError) {
+ printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
+ dev->name, int_status);
+ writew (GlobalReset | HostReset, ioaddr + ASICCtrl + 2);
+ mdelay (500);
+ }
+}
+
+static struct net_device_stats *
+get_stats (struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+#ifdef MEM_MAPPING
+ int i;
+#endif
+ unsigned int stat_reg;
+
+ /* All statistics registers need to be acknowledged,
+ else statistic overflow could cause problems */
+
+ np->stats.rx_packets += readl (ioaddr + FramesRcvOk);
+ np->stats.tx_packets += readl (ioaddr + FramesXmtOk);
+ np->stats.rx_bytes += readl (ioaddr + OctetRcvOk);
+ np->stats.tx_bytes += readl (ioaddr + OctetXmtOk);
+
+ np->stats.multicast = readl (ioaddr + McstFramesRcvdOk);
+ np->stats.collisions += readl (ioaddr + SingleColFrames)
+ + readl (ioaddr + MultiColFrames);
+
+ /* detailed tx errors */
+ stat_reg = readw (ioaddr + FramesAbortXSColls);
+ np->stats.tx_aborted_errors += stat_reg;
+ np->stats.tx_errors += stat_reg;
+
+ stat_reg = readw (ioaddr + CarrierSenseErrors);
+ np->stats.tx_carrier_errors += stat_reg;
+ np->stats.tx_errors += stat_reg;
+
+ /* Clear all other statistic register. */
+ readl (ioaddr + McstOctetXmtOk);
+ readw (ioaddr + BcstFramesXmtdOk);
+ readl (ioaddr + McstFramesXmtdOk);
+ readw (ioaddr + BcstFramesRcvdOk);
+ readw (ioaddr + MacControlFramesRcvd);
+ readw (ioaddr + FrameTooLongErrors);
+ readw (ioaddr + InRangeLengthErrors);
+ readw (ioaddr + FramesCheckSeqErrors);
+ readw (ioaddr + FramesLostRxErrors);
+ readl (ioaddr + McstOctetXmtOk);
+ readl (ioaddr + BcstOctetXmtOk);
+ readl (ioaddr + McstFramesXmtdOk);
+ readl (ioaddr + FramesWDeferredXmt);
+ readl (ioaddr + LateCollisions);
+ readw (ioaddr + BcstFramesXmtdOk);
+ readw (ioaddr + MacControlFramesXmtd);
+ readw (ioaddr + FramesWEXDeferal);
+
+#ifdef MEM_MAPPING
+ for (i = 0x100; i <= 0x150; i += 4)
+ readl (ioaddr + i);
+#endif
+ readw (ioaddr + TxJumboFrames);
+ readw (ioaddr + RxJumboFrames);
+ readw (ioaddr + TCPCheckSumErrors);
+ readw (ioaddr + UDPCheckSumErrors);
+ readw (ioaddr + IPCheckSumErrors);
+ return &np->stats;
+}
+
+static int
+clear_stats (struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+#ifdef MEM_MAPPING
+ int i;
+#endif
+
+ /* All statistics registers need to be acknowledged,
+ else statistic overflow could cause problems */
+ readl (ioaddr + FramesRcvOk);
+ readl (ioaddr + FramesXmtOk);
+ readl (ioaddr + OctetRcvOk);
+ readl (ioaddr + OctetXmtOk);
+
+ readl (ioaddr + McstFramesRcvdOk);
+ readl (ioaddr + SingleColFrames);
+ readl (ioaddr + MultiColFrames);
+ readl (ioaddr + LateCollisions);
+ /* detailed rx errors */
+ readw (ioaddr + FrameTooLongErrors);
+ readw (ioaddr + InRangeLengthErrors);
+ readw (ioaddr + FramesCheckSeqErrors);
+ readw (ioaddr + FramesLostRxErrors);
+
+ /* detailed tx errors */
+ readw (ioaddr + FramesAbortXSColls);
+ readw (ioaddr + CarrierSenseErrors);
+
+ /* Clear all other statistic register. */
+ readl (ioaddr + McstOctetXmtOk);
+ readw (ioaddr + BcstFramesXmtdOk);
+ readl (ioaddr + McstFramesXmtdOk);
+ readw (ioaddr + BcstFramesRcvdOk);
+ readw (ioaddr + MacControlFramesRcvd);
+ readl (ioaddr + McstOctetXmtOk);
+ readl (ioaddr + BcstOctetXmtOk);
+ readl (ioaddr + McstFramesXmtdOk);
+ readl (ioaddr + FramesWDeferredXmt);
+ readw (ioaddr + BcstFramesXmtdOk);
+ readw (ioaddr + MacControlFramesXmtd);
+ readw (ioaddr + FramesWEXDeferal);
+#ifdef MEM_MAPPING
+ for (i = 0x100; i <= 0x150; i += 4)
+ readl (ioaddr + i);
+#endif
+ readw (ioaddr + TxJumboFrames);
+ readw (ioaddr + RxJumboFrames);
+ readw (ioaddr + TCPCheckSumErrors);
+ readw (ioaddr + UDPCheckSumErrors);
+ readw (ioaddr + IPCheckSumErrors);
+ return 0;
+}
+
+
+static int
+change_mtu (struct net_device *dev, int new_mtu)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int max = (np->jumbo) ? MAX_JUMBO : 1536;
+
+ if ((new_mtu < 68) || (new_mtu > max)) {
+ return -EINVAL;
+ }
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+static void
+set_multicast (struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ u32 hash_table[2];
+ u16 rx_mode = 0;
+ struct netdev_private *np = netdev_priv(dev);
+
+ hash_table[0] = hash_table[1] = 0;
+ /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
+ hash_table[1] |= 0x02000000;
+ if (dev->flags & IFF_PROMISC) {
+ /* Receive all frames promiscuously. */
+ rx_mode = ReceiveAllFrames;
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > multicast_filter_limit)) {
+ /* Receive broadcast and multicast frames */
+ rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
+ } else if (!netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+ /* Receive broadcast frames and multicast frames filtering
+ by Hashtable */
+ rx_mode =
+ ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit, index = 0;
+ int crc = ether_crc_le(ETH_ALEN, ha->addr);
+ /* The inverted high significant 6 bits of CRC are
+ used as an index to hashtable */
+ for (bit = 0; bit < 6; bit++)
+ if (crc & (1 << (31 - bit)))
+ index |= (1 << bit);
+ hash_table[index / 32] |= (1 << (index % 32));
+ }
+ } else {
+ rx_mode = ReceiveBroadcast | ReceiveUnicast;
+ }
+ if (np->vlan) {
+ /* ReceiveVLANMatch field in ReceiveMode */
+ rx_mode |= ReceiveVLANMatch;
+ }
+
+ writel (hash_table[0], ioaddr + HashTable0);
+ writel (hash_table[1], ioaddr + HashTable1);
+ writew (rx_mode, ioaddr + ReceiveMode);
+}
+
+static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ strcpy(info->driver, "dl2k");
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(np->pdev));
+}
+
+static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ if (np->phy_media) {
+ /* fiber device */
+ cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE;
+ cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE;
+ cmd->port = PORT_FIBRE;
+ cmd->transceiver = XCVR_INTERNAL;
+ } else {
+ /* copper device */
+ cmd->supported = SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg | SUPPORTED_MII;
+ cmd->advertising = ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full|
+ ADVERTISED_Autoneg | ADVERTISED_MII;
+ cmd->port = PORT_MII;
+ cmd->transceiver = XCVR_INTERNAL;
+ }
+ if ( np->link_status ) {
+ ethtool_cmd_speed_set(cmd, np->speed);
+ cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ } else {
+ ethtool_cmd_speed_set(cmd, -1);
+ cmd->duplex = -1;
+ }
+ if ( np->an_enable)
+ cmd->autoneg = AUTONEG_ENABLE;
+ else
+ cmd->autoneg = AUTONEG_DISABLE;
+
+ cmd->phy_address = np->phy_addr;
+ return 0;
+}
+
+static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ netif_carrier_off(dev);
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (np->an_enable)
+ return 0;
+ else {
+ np->an_enable = 1;
+ mii_set_media(dev);
+ return 0;
+ }
+ } else {
+ np->an_enable = 0;
+ if (np->speed == 1000) {
+ ethtool_cmd_speed_set(cmd, SPEED_100);
+ cmd->duplex = DUPLEX_FULL;
+ printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n");
+ }
+ switch (ethtool_cmd_speed(cmd)) {
+ case SPEED_10:
+ np->speed = 10;
+ np->full_duplex = (cmd->duplex == DUPLEX_FULL);
+ break;
+ case SPEED_100:
+ np->speed = 100;
+ np->full_duplex = (cmd->duplex == DUPLEX_FULL);
+ break;
+ case SPEED_1000: /* not supported */
+ default:
+ return -EINVAL;
+ }
+ mii_set_media(dev);
+ }
+ return 0;
+}
+
+static u32 rio_get_link(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return np->link_status;
+}
+
+static const struct ethtool_ops ethtool_ops = {
+ .get_drvinfo = rio_get_drvinfo,
+ .get_settings = rio_get_settings,
+ .set_settings = rio_set_settings,
+ .get_link = rio_get_link,
+};
+
+static int
+rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ int phy_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru;
+
+ struct netdev_desc *desc;
+ int i;
+
+ phy_addr = np->phy_addr;
+ switch (cmd) {
+ case SIOCDEVPRIVATE:
+ break;
+
+ case SIOCDEVPRIVATE + 1:
+ miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);
+ break;
+ case SIOCDEVPRIVATE + 2:
+ mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value);
+ break;
+ case SIOCDEVPRIVATE + 3:
+ break;
+ case SIOCDEVPRIVATE + 4:
+ break;
+ case SIOCDEVPRIVATE + 5:
+ netif_stop_queue (dev);
+ break;
+ case SIOCDEVPRIVATE + 6:
+ netif_wake_queue (dev);
+ break;
+ case SIOCDEVPRIVATE + 7:
+ printk
+ ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",
+ netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx,
+ np->old_rx);
+ break;
+ case SIOCDEVPRIVATE + 8:
+ printk("TX ring:\n");
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ desc = &np->tx_ring[i];
+ printk
+ ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
+ i,
+ (u32) (np->tx_ring_dma + i * sizeof (*desc)),
+ (u32)le64_to_cpu(desc->next_desc),
+ (u32)le64_to_cpu(desc->status),
+ (u32)(le64_to_cpu(desc->fraginfo) >> 32),
+ (u32)le64_to_cpu(desc->fraginfo));
+ printk ("\n");
+ }
+ printk ("\n");
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+#define EEP_READ 0x0200
+#define EEP_BUSY 0x8000
+/* Read the EEPROM word */
+/* We use I/O instruction to read/write eeprom to avoid fail on some machines */
+static int
+read_eeprom (long ioaddr, int eep_addr)
+{
+ int i = 1000;
+ outw (EEP_READ | (eep_addr & 0xff), ioaddr + EepromCtrl);
+ while (i-- > 0) {
+ if (!(inw (ioaddr + EepromCtrl) & EEP_BUSY)) {
+ return inw (ioaddr + EepromData);
+ }
+ }
+ return 0;
+}
+
+enum phy_ctrl_bits {
+ MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04,
+ MII_DUPLEX = 0x08,
+};
+
+#define mii_delay() readb(ioaddr)
+static void
+mii_sendbit (struct net_device *dev, u32 data)
+{
+ long ioaddr = dev->base_addr + PhyCtrl;
+ data = (data) ? MII_DATA1 : 0;
+ data |= MII_WRITE;
+ data |= (readb (ioaddr) & 0xf8) | MII_WRITE;
+ writeb (data, ioaddr);
+ mii_delay ();
+ writeb (data | MII_CLK, ioaddr);
+ mii_delay ();
+}
+
+static int
+mii_getbit (struct net_device *dev)
+{
+ long ioaddr = dev->base_addr + PhyCtrl;
+ u8 data;
+
+ data = (readb (ioaddr) & 0xf8) | MII_READ;
+ writeb (data, ioaddr);
+ mii_delay ();
+ writeb (data | MII_CLK, ioaddr);
+ mii_delay ();
+ return ((readb (ioaddr) >> 1) & 1);
+}
+
+static void
+mii_send_bits (struct net_device *dev, u32 data, int len)
+{
+ int i;
+ for (i = len - 1; i >= 0; i--) {
+ mii_sendbit (dev, data & (1 << i));
+ }
+}
+
+static int
+mii_read (struct net_device *dev, int phy_addr, int reg_num)
+{
+ u32 cmd;
+ int i;
+ u32 retval = 0;
+
+ /* Preamble */
+ mii_send_bits (dev, 0xffffffff, 32);
+ /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
+ /* ST,OP = 0110'b for read operation */
+ cmd = (0x06 << 10 | phy_addr << 5 | reg_num);
+ mii_send_bits (dev, cmd, 14);
+ /* Turnaround */
+ if (mii_getbit (dev))
+ goto err_out;
+ /* Read data */
+ for (i = 0; i < 16; i++) {
+ retval |= mii_getbit (dev);
+ retval <<= 1;
+ }
+ /* End cycle */
+ mii_getbit (dev);
+ return (retval >> 1) & 0xffff;
+
+ err_out:
+ return 0;
+}
+static int
+mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data)
+{
+ u32 cmd;
+
+ /* Preamble */
+ mii_send_bits (dev, 0xffffffff, 32);
+ /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
+ /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
+ cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data;
+ mii_send_bits (dev, cmd, 32);
+ /* End cycle */
+ mii_getbit (dev);
+ return 0;
+}
+static int
+mii_wait_link (struct net_device *dev, int wait)
+{
+ __u16 bmsr;
+ int phy_addr;
+ struct netdev_private *np;
+
+ np = netdev_priv(dev);
+ phy_addr = np->phy_addr;
+
+ do {
+ bmsr = mii_read (dev, phy_addr, MII_BMSR);
+ if (bmsr & BMSR_LSTATUS)
+ return 0;
+ mdelay (1);
+ } while (--wait > 0);
+ return -1;
+}
+static int
+mii_get_media (struct net_device *dev)
+{
+ __u16 negotiate;
+ __u16 bmsr;
+ __u16 mscr;
+ __u16 mssr;
+ int phy_addr;
+ struct netdev_private *np;
+
+ np = netdev_priv(dev);
+ phy_addr = np->phy_addr;
+
+ bmsr = mii_read (dev, phy_addr, MII_BMSR);
+ if (np->an_enable) {
+ if (!(bmsr & BMSR_ANEGCOMPLETE)) {
+ /* Auto-Negotiation not completed */
+ return -1;
+ }
+ negotiate = mii_read (dev, phy_addr, MII_ADVERTISE) &
+ mii_read (dev, phy_addr, MII_LPA);
+ mscr = mii_read (dev, phy_addr, MII_CTRL1000);
+ mssr = mii_read (dev, phy_addr, MII_STAT1000);
+ if (mscr & ADVERTISE_1000FULL && mssr & LPA_1000FULL) {
+ np->speed = 1000;
+ np->full_duplex = 1;
+ printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
+ } else if (mscr & ADVERTISE_1000HALF && mssr & LPA_1000HALF) {
+ np->speed = 1000;
+ np->full_duplex = 0;
+ printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n");
+ } else if (negotiate & ADVERTISE_100FULL) {
+ np->speed = 100;
+ np->full_duplex = 1;
+ printk (KERN_INFO "Auto 100 Mbps, Full duplex\n");
+ } else if (negotiate & ADVERTISE_100HALF) {
+ np->speed = 100;
+ np->full_duplex = 0;
+ printk (KERN_INFO "Auto 100 Mbps, Half duplex\n");
+ } else if (negotiate & ADVERTISE_10FULL) {
+ np->speed = 10;
+ np->full_duplex = 1;
+ printk (KERN_INFO "Auto 10 Mbps, Full duplex\n");
+ } else if (negotiate & ADVERTISE_10HALF) {
+ np->speed = 10;
+ np->full_duplex = 0;
+ printk (KERN_INFO "Auto 10 Mbps, Half duplex\n");
+ }
+ if (negotiate & ADVERTISE_PAUSE_CAP) {
+ np->tx_flow &= 1;
+ np->rx_flow &= 1;
+ } else if (negotiate & ADVERTISE_PAUSE_ASYM) {
+ np->tx_flow = 0;
+ np->rx_flow &= 1;
+ }
+ /* else tx_flow, rx_flow = user select */
+ } else {
+ __u16 bmcr = mii_read (dev, phy_addr, MII_BMCR);
+ switch (bmcr & (BMCR_SPEED100 | BMCR_SPEED1000)) {
+ case BMCR_SPEED1000:
+ printk (KERN_INFO "Operating at 1000 Mbps, ");
+ break;
+ case BMCR_SPEED100:
+ printk (KERN_INFO "Operating at 100 Mbps, ");
+ break;
+ case 0:
+ printk (KERN_INFO "Operating at 10 Mbps, ");
+ }
+ if (bmcr & BMCR_FULLDPLX) {
+ printk (KERN_CONT "Full duplex\n");
+ } else {
+ printk (KERN_CONT "Half duplex\n");
+ }
+ }
+ if (np->tx_flow)
+ printk(KERN_INFO "Enable Tx Flow Control\n");
+ else
+ printk(KERN_INFO "Disable Tx Flow Control\n");
+ if (np->rx_flow)
+ printk(KERN_INFO "Enable Rx Flow Control\n");
+ else
+ printk(KERN_INFO "Disable Rx Flow Control\n");
+
+ return 0;
+}
+
+static int
+mii_set_media (struct net_device *dev)
+{
+ __u16 pscr;
+ __u16 bmcr;
+ __u16 bmsr;
+ __u16 anar;
+ int phy_addr;
+ struct netdev_private *np;
+ np = netdev_priv(dev);
+ phy_addr = np->phy_addr;
+
+ /* Does user set speed? */
+ if (np->an_enable) {
+ /* Advertise capabilities */
+ bmsr = mii_read (dev, phy_addr, MII_BMSR);
+ anar = mii_read (dev, phy_addr, MII_ADVERTISE) &
+ ~(ADVERTISE_100FULL | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_10HALF |
+ ADVERTISE_100BASE4);
+ if (bmsr & BMSR_100FULL)
+ anar |= ADVERTISE_100FULL;
+ if (bmsr & BMSR_100HALF)
+ anar |= ADVERTISE_100HALF;
+ if (bmsr & BMSR_100BASE4)
+ anar |= ADVERTISE_100BASE4;
+ if (bmsr & BMSR_10FULL)
+ anar |= ADVERTISE_10FULL;
+ if (bmsr & BMSR_10HALF)
+ anar |= ADVERTISE_10HALF;
+ anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ mii_write (dev, phy_addr, MII_ADVERTISE, anar);
+
+ /* Enable Auto crossover */
+ pscr = mii_read (dev, phy_addr, MII_PHY_SCR);
+ pscr |= 3 << 5; /* 11'b */
+ mii_write (dev, phy_addr, MII_PHY_SCR, pscr);
+
+ /* Soft reset PHY */
+ mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET);
+ bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET;
+ mii_write (dev, phy_addr, MII_BMCR, bmcr);
+ mdelay(1);
+ } else {
+ /* Force speed setting */
+ /* 1) Disable Auto crossover */
+ pscr = mii_read (dev, phy_addr, MII_PHY_SCR);
+ pscr &= ~(3 << 5);
+ mii_write (dev, phy_addr, MII_PHY_SCR, pscr);
+
+ /* 2) PHY Reset */
+ bmcr = mii_read (dev, phy_addr, MII_BMCR);
+ bmcr |= BMCR_RESET;
+ mii_write (dev, phy_addr, MII_BMCR, bmcr);
+
+ /* 3) Power Down */
+ bmcr = 0x1940; /* must be 0x1940 */
+ mii_write (dev, phy_addr, MII_BMCR, bmcr);
+ mdelay (100); /* wait a certain time */
+
+ /* 4) Advertise nothing */
+ mii_write (dev, phy_addr, MII_ADVERTISE, 0);
+
+ /* 5) Set media and Power Up */
+ bmcr = BMCR_PDOWN;
+ if (np->speed == 100) {
+ bmcr |= BMCR_SPEED100;
+ printk (KERN_INFO "Manual 100 Mbps, ");
+ } else if (np->speed == 10) {
+ printk (KERN_INFO "Manual 10 Mbps, ");
+ }
+ if (np->full_duplex) {
+ bmcr |= BMCR_FULLDPLX;
+ printk (KERN_CONT "Full duplex\n");
+ } else {
+ printk (KERN_CONT "Half duplex\n");
+ }
+#if 0
+ /* Set 1000BaseT Master/Slave setting */
+ mscr = mii_read (dev, phy_addr, MII_CTRL1000);
+ mscr |= MII_MSCR_CFG_ENABLE;
+ mscr &= ~MII_MSCR_CFG_VALUE = 0;
+#endif
+ mii_write (dev, phy_addr, MII_BMCR, bmcr);
+ mdelay(10);
+ }
+ return 0;
+}
+
+static int
+mii_get_media_pcs (struct net_device *dev)
+{
+ __u16 negotiate;
+ __u16 bmsr;
+ int phy_addr;
+ struct netdev_private *np;
+
+ np = netdev_priv(dev);
+ phy_addr = np->phy_addr;
+
+ bmsr = mii_read (dev, phy_addr, PCS_BMSR);
+ if (np->an_enable) {
+ if (!(bmsr & BMSR_ANEGCOMPLETE)) {
+ /* Auto-Negotiation not completed */
+ return -1;
+ }
+ negotiate = mii_read (dev, phy_addr, PCS_ANAR) &
+ mii_read (dev, phy_addr, PCS_ANLPAR);
+ np->speed = 1000;
+ if (negotiate & PCS_ANAR_FULL_DUPLEX) {
+ printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
+ np->full_duplex = 1;
+ } else {
+ printk (KERN_INFO "Auto 1000 Mbps, half duplex\n");
+ np->full_duplex = 0;
+ }
+ if (negotiate & PCS_ANAR_PAUSE) {
+ np->tx_flow &= 1;
+ np->rx_flow &= 1;
+ } else if (negotiate & PCS_ANAR_ASYMMETRIC) {
+ np->tx_flow = 0;
+ np->rx_flow &= 1;
+ }
+ /* else tx_flow, rx_flow = user select */
+ } else {
+ __u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR);
+ printk (KERN_INFO "Operating at 1000 Mbps, ");
+ if (bmcr & BMCR_FULLDPLX) {
+ printk (KERN_CONT "Full duplex\n");
+ } else {
+ printk (KERN_CONT "Half duplex\n");
+ }
+ }
+ if (np->tx_flow)
+ printk(KERN_INFO "Enable Tx Flow Control\n");
+ else
+ printk(KERN_INFO "Disable Tx Flow Control\n");
+ if (np->rx_flow)
+ printk(KERN_INFO "Enable Rx Flow Control\n");
+ else
+ printk(KERN_INFO "Disable Rx Flow Control\n");
+
+ return 0;
+}
+
+static int
+mii_set_media_pcs (struct net_device *dev)
+{
+ __u16 bmcr;
+ __u16 esr;
+ __u16 anar;
+ int phy_addr;
+ struct netdev_private *np;
+ np = netdev_priv(dev);
+ phy_addr = np->phy_addr;
+
+ /* Auto-Negotiation? */
+ if (np->an_enable) {
+ /* Advertise capabilities */
+ esr = mii_read (dev, phy_addr, PCS_ESR);
+ anar = mii_read (dev, phy_addr, MII_ADVERTISE) &
+ ~PCS_ANAR_HALF_DUPLEX &
+ ~PCS_ANAR_FULL_DUPLEX;
+ if (esr & (MII_ESR_1000BT_HD | MII_ESR_1000BX_HD))
+ anar |= PCS_ANAR_HALF_DUPLEX;
+ if (esr & (MII_ESR_1000BT_FD | MII_ESR_1000BX_FD))
+ anar |= PCS_ANAR_FULL_DUPLEX;
+ anar |= PCS_ANAR_PAUSE | PCS_ANAR_ASYMMETRIC;
+ mii_write (dev, phy_addr, MII_ADVERTISE, anar);
+
+ /* Soft reset PHY */
+ mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET);
+ bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET;
+ mii_write (dev, phy_addr, MII_BMCR, bmcr);
+ mdelay(1);
+ } else {
+ /* Force speed setting */
+ /* PHY Reset */
+ bmcr = BMCR_RESET;
+ mii_write (dev, phy_addr, MII_BMCR, bmcr);
+ mdelay(10);
+ if (np->full_duplex) {
+ bmcr = BMCR_FULLDPLX;
+ printk (KERN_INFO "Manual full duplex\n");
+ } else {
+ bmcr = 0;
+ printk (KERN_INFO "Manual half duplex\n");
+ }
+ mii_write (dev, phy_addr, MII_BMCR, bmcr);
+ mdelay(10);
+
+ /* Advertise nothing */
+ mii_write (dev, phy_addr, MII_ADVERTISE, 0);
+ }
+ return 0;
+}
+
+
+static int
+rio_close (struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ struct sk_buff *skb;
+ int i;
+
+ netif_stop_queue (dev);
+
+ /* Disable interrupts */
+ writew (0, ioaddr + IntEnable);
+
+ /* Stop Tx and Rx logics */
+ writel (TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl);
+
+ free_irq (dev->irq, dev);
+ del_timer_sync (&np->timer);
+
+ /* Free all the skbuffs in the queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ skb = np->rx_skbuff[i];
+ if (skb) {
+ pci_unmap_single(np->pdev,
+ desc_to_dma(&np->rx_ring[i]),
+ skb->len, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb (skb);
+ np->rx_skbuff[i] = NULL;
+ }
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].fraginfo = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ skb = np->tx_skbuff[i];
+ if (skb) {
+ pci_unmap_single(np->pdev,
+ desc_to_dma(&np->tx_ring[i]),
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb (skb);
+ np->tx_skbuff[i] = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static void __devexit
+rio_remove1 (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+
+ if (dev) {
+ struct netdev_private *np = netdev_priv(dev);
+
+ unregister_netdev (dev);
+ pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring,
+ np->rx_ring_dma);
+ pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring,
+ np->tx_ring_dma);
+#ifdef MEM_MAPPING
+ iounmap ((char *) (dev->base_addr));
+#endif
+ free_netdev (dev);
+ pci_release_regions (pdev);
+ pci_disable_device (pdev);
+ }
+ pci_set_drvdata (pdev, NULL);
+}
+
+static struct pci_driver rio_driver = {
+ .name = "dl2k",
+ .id_table = rio_pci_tbl,
+ .probe = rio_probe1,
+ .remove = __devexit_p(rio_remove1),
+};
+
+static int __init
+rio_init (void)
+{
+ return pci_register_driver(&rio_driver);
+}
+
+static void __exit
+rio_exit (void)
+{
+ pci_unregister_driver (&rio_driver);
+}
+
+module_init (rio_init);
+module_exit (rio_exit);
+
+/*
+
+Compile command:
+
+gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c
+
+Read Documentation/networking/dl2k.txt for details.
+
+*/
+
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
new file mode 100644
index 000000000000..ba0adcafa55a
--- /dev/null
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -0,0 +1,448 @@
+/* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */
+/*
+ Copyright (c) 2001, 2002 by D-Link Corporation
+ Written by Edward Peng.<edward_peng@dlink.com.tw>
+ Created 03-May-2001, base on Linux' sundance.c.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+*/
+
+#ifndef __DL2K_H__
+#define __DL2K_H__
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/bitops.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/time.h>
+#define TX_RING_SIZE 256
+#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used.*/
+#define RX_RING_SIZE 256
+#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
+#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
+
+/* This driver was written to use PCI memory space, however x86-oriented
+ hardware often uses I/O space accesses. */
+#ifndef MEM_MAPPING
+#undef readb
+#undef readw
+#undef readl
+#undef writeb
+#undef writew
+#undef writel
+#define readb inb
+#define readw inw
+#define readl inl
+#define writeb outb
+#define writew outw
+#define writel outl
+#endif
+
+/* Offsets to the device registers.
+ Unlike software-only systems, device drivers interact with complex hardware.
+ It's not useful to define symbolic names for every register bit in the
+ device. The name can only partially document the semantics and make
+ the driver longer and more difficult to read.
+ In general, only the important configuration values or bits changed
+ multiple times should be defined symbolically.
+*/
+enum dl2x_offsets {
+ /* I/O register offsets */
+ DMACtrl = 0x00,
+ RxDMAStatus = 0x08,
+ TFDListPtr0 = 0x10,
+ TFDListPtr1 = 0x14,
+ TxDMABurstThresh = 0x18,
+ TxDMAUrgentThresh = 0x19,
+ TxDMAPollPeriod = 0x1a,
+ RFDListPtr0 = 0x1c,
+ RFDListPtr1 = 0x20,
+ RxDMABurstThresh = 0x24,
+ RxDMAUrgentThresh = 0x25,
+ RxDMAPollPeriod = 0x26,
+ RxDMAIntCtrl = 0x28,
+ DebugCtrl = 0x2c,
+ ASICCtrl = 0x30,
+ FifoCtrl = 0x38,
+ RxEarlyThresh = 0x3a,
+ FlowOffThresh = 0x3c,
+ FlowOnThresh = 0x3e,
+ TxStartThresh = 0x44,
+ EepromData = 0x48,
+ EepromCtrl = 0x4a,
+ ExpromAddr = 0x4c,
+ Exprodata = 0x50,
+ WakeEvent = 0x51,
+ CountDown = 0x54,
+ IntStatusAck = 0x5a,
+ IntEnable = 0x5c,
+ IntStatus = 0x5e,
+ TxStatus = 0x60,
+ MACCtrl = 0x6c,
+ VLANTag = 0x70,
+ PhyCtrl = 0x76,
+ StationAddr0 = 0x78,
+ StationAddr1 = 0x7a,
+ StationAddr2 = 0x7c,
+ VLANId = 0x80,
+ MaxFrameSize = 0x86,
+ ReceiveMode = 0x88,
+ HashTable0 = 0x8c,
+ HashTable1 = 0x90,
+ RmonStatMask = 0x98,
+ StatMask = 0x9c,
+ RxJumboFrames = 0xbc,
+ TCPCheckSumErrors = 0xc0,
+ IPCheckSumErrors = 0xc2,
+ UDPCheckSumErrors = 0xc4,
+ TxJumboFrames = 0xf4,
+ /* Ethernet MIB statistic register offsets */
+ OctetRcvOk = 0xa8,
+ McstOctetRcvOk = 0xac,
+ BcstOctetRcvOk = 0xb0,
+ FramesRcvOk = 0xb4,
+ McstFramesRcvdOk = 0xb8,
+ BcstFramesRcvdOk = 0xbe,
+ MacControlFramesRcvd = 0xc6,
+ FrameTooLongErrors = 0xc8,
+ InRangeLengthErrors = 0xca,
+ FramesCheckSeqErrors = 0xcc,
+ FramesLostRxErrors = 0xce,
+ OctetXmtOk = 0xd0,
+ McstOctetXmtOk = 0xd4,
+ BcstOctetXmtOk = 0xd8,
+ FramesXmtOk = 0xdc,
+ McstFramesXmtdOk = 0xe0,
+ FramesWDeferredXmt = 0xe4,
+ LateCollisions = 0xe8,
+ MultiColFrames = 0xec,
+ SingleColFrames = 0xf0,
+ BcstFramesXmtdOk = 0xf6,
+ CarrierSenseErrors = 0xf8,
+ MacControlFramesXmtd = 0xfa,
+ FramesAbortXSColls = 0xfc,
+ FramesWEXDeferal = 0xfe,
+ /* RMON statistic register offsets */
+ EtherStatsCollisions = 0x100,
+ EtherStatsOctetsTransmit = 0x104,
+ EtherStatsPktsTransmit = 0x108,
+ EtherStatsPkts64OctetTransmit = 0x10c,
+ EtherStats65to127OctetsTransmit = 0x110,
+ EtherStatsPkts128to255OctetsTransmit = 0x114,
+ EtherStatsPkts256to511OctetsTransmit = 0x118,
+ EtherStatsPkts512to1023OctetsTransmit = 0x11c,
+ EtherStatsPkts1024to1518OctetsTransmit = 0x120,
+ EtherStatsCRCAlignErrors = 0x124,
+ EtherStatsUndersizePkts = 0x128,
+ EtherStatsFragments = 0x12c,
+ EtherStatsJabbers = 0x130,
+ EtherStatsOctets = 0x134,
+ EtherStatsPkts = 0x138,
+ EtherStats64Octets = 0x13c,
+ EtherStatsPkts65to127Octets = 0x140,
+ EtherStatsPkts128to255Octets = 0x144,
+ EtherStatsPkts256to511Octets = 0x148,
+ EtherStatsPkts512to1023Octets = 0x14c,
+ EtherStatsPkts1024to1518Octets = 0x150,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum IntStatus_bits {
+ InterruptStatus = 0x0001,
+ HostError = 0x0002,
+ MACCtrlFrame = 0x0008,
+ TxComplete = 0x0004,
+ RxComplete = 0x0010,
+ RxEarly = 0x0020,
+ IntRequested = 0x0040,
+ UpdateStats = 0x0080,
+ LinkEvent = 0x0100,
+ TxDMAComplete = 0x0200,
+ RxDMAComplete = 0x0400,
+ RFDListEnd = 0x0800,
+ RxDMAPriority = 0x1000,
+};
+
+/* Bits in the ReceiveMode register. */
+enum ReceiveMode_bits {
+ ReceiveUnicast = 0x0001,
+ ReceiveMulticast = 0x0002,
+ ReceiveBroadcast = 0x0004,
+ ReceiveAllFrames = 0x0008,
+ ReceiveMulticastHash = 0x0010,
+ ReceiveIPMulticast = 0x0020,
+ ReceiveVLANMatch = 0x0100,
+ ReceiveVLANHash = 0x0200,
+};
+/* Bits in MACCtrl. */
+enum MACCtrl_bits {
+ DuplexSelect = 0x20,
+ TxFlowControlEnable = 0x80,
+ RxFlowControlEnable = 0x0100,
+ RcvFCS = 0x200,
+ AutoVLANtagging = 0x1000,
+ AutoVLANuntagging = 0x2000,
+ StatsEnable = 0x00200000,
+ StatsDisable = 0x00400000,
+ StatsEnabled = 0x00800000,
+ TxEnable = 0x01000000,
+ TxDisable = 0x02000000,
+ TxEnabled = 0x04000000,
+ RxEnable = 0x08000000,
+ RxDisable = 0x10000000,
+ RxEnabled = 0x20000000,
+};
+
+enum ASICCtrl_LoWord_bits {
+ PhyMedia = 0x0080,
+};
+
+enum ASICCtrl_HiWord_bits {
+ GlobalReset = 0x0001,
+ RxReset = 0x0002,
+ TxReset = 0x0004,
+ DMAReset = 0x0008,
+ FIFOReset = 0x0010,
+ NetworkReset = 0x0020,
+ HostReset = 0x0040,
+ ResetBusy = 0x0400,
+};
+
+/* Transmit Frame Control bits */
+enum TFC_bits {
+ DwordAlign = 0x00000000,
+ WordAlignDisable = 0x00030000,
+ WordAlign = 0x00020000,
+ TCPChecksumEnable = 0x00040000,
+ UDPChecksumEnable = 0x00080000,
+ IPChecksumEnable = 0x00100000,
+ FCSAppendDisable = 0x00200000,
+ TxIndicate = 0x00400000,
+ TxDMAIndicate = 0x00800000,
+ FragCountShift = 24,
+ VLANTagInsert = 0x0000000010000000,
+ TFDDone = 0x80000000,
+ VIDShift = 32,
+ UsePriorityShift = 48,
+};
+
+/* Receive Frames Status bits */
+enum RFS_bits {
+ RxFIFOOverrun = 0x00010000,
+ RxRuntFrame = 0x00020000,
+ RxAlignmentError = 0x00040000,
+ RxFCSError = 0x00080000,
+ RxOverSizedFrame = 0x00100000,
+ RxLengthError = 0x00200000,
+ VLANDetected = 0x00400000,
+ TCPDetected = 0x00800000,
+ TCPError = 0x01000000,
+ UDPDetected = 0x02000000,
+ UDPError = 0x04000000,
+ IPDetected = 0x08000000,
+ IPError = 0x10000000,
+ FrameStart = 0x20000000,
+ FrameEnd = 0x40000000,
+ RFDDone = 0x80000000,
+ TCIShift = 32,
+ RFS_Errors = 0x003f0000,
+};
+
+#define MII_RESET_TIME_OUT 10000
+/* MII register */
+enum _mii_reg {
+ MII_PHY_SCR = 16,
+};
+
+/* PCS register */
+enum _pcs_reg {
+ PCS_BMCR = 0,
+ PCS_BMSR = 1,
+ PCS_ANAR = 4,
+ PCS_ANLPAR = 5,
+ PCS_ANER = 6,
+ PCS_ANNPT = 7,
+ PCS_ANLPRNP = 8,
+ PCS_ESR = 15,
+};
+
+/* IEEE Extened Status Register */
+enum _mii_esr {
+ MII_ESR_1000BX_FD = 0x8000,
+ MII_ESR_1000BX_HD = 0x4000,
+ MII_ESR_1000BT_FD = 0x2000,
+ MII_ESR_1000BT_HD = 0x1000,
+};
+/* PHY Specific Control Register */
+#if 0
+typedef union t_MII_PHY_SCR {
+ u16 image;
+ struct {
+ u16 disable_jabber:1; // bit 0
+ u16 polarity_reversal:1; // bit 1
+ u16 SEQ_test:1; // bit 2
+ u16 _bit_3:1; // bit 3
+ u16 disable_CLK125:1; // bit 4
+ u16 mdi_crossover_mode:2; // bit 6:5
+ u16 enable_ext_dist:1; // bit 7
+ u16 _bit_8_9:2; // bit 9:8
+ u16 force_link:1; // bit 10
+ u16 assert_CRS:1; // bit 11
+ u16 rcv_fifo_depth:2; // bit 13:12
+ u16 xmit_fifo_depth:2; // bit 15:14
+ } bits;
+} PHY_SCR_t, *PPHY_SCR_t;
+#endif
+
+typedef enum t_MII_ADMIN_STATUS {
+ adm_reset,
+ adm_operational,
+ adm_loopback,
+ adm_power_down,
+ adm_isolate
+} MII_ADMIN_t, *PMII_ADMIN_t;
+
+/* Physical Coding Sublayer Management (PCS) */
+/* PCS control and status registers bitmap as the same as MII */
+/* PCS Extended Status register bitmap as the same as MII */
+/* PCS ANAR */
+enum _pcs_anar {
+ PCS_ANAR_NEXT_PAGE = 0x8000,
+ PCS_ANAR_REMOTE_FAULT = 0x3000,
+ PCS_ANAR_ASYMMETRIC = 0x0100,
+ PCS_ANAR_PAUSE = 0x0080,
+ PCS_ANAR_HALF_DUPLEX = 0x0040,
+ PCS_ANAR_FULL_DUPLEX = 0x0020,
+};
+/* PCS ANLPAR */
+enum _pcs_anlpar {
+ PCS_ANLPAR_NEXT_PAGE = PCS_ANAR_NEXT_PAGE,
+ PCS_ANLPAR_REMOTE_FAULT = PCS_ANAR_REMOTE_FAULT,
+ PCS_ANLPAR_ASYMMETRIC = PCS_ANAR_ASYMMETRIC,
+ PCS_ANLPAR_PAUSE = PCS_ANAR_PAUSE,
+ PCS_ANLPAR_HALF_DUPLEX = PCS_ANAR_HALF_DUPLEX,
+ PCS_ANLPAR_FULL_DUPLEX = PCS_ANAR_FULL_DUPLEX,
+};
+
+typedef struct t_SROM {
+ u16 config_param; /* 0x00 */
+ u16 asic_ctrl; /* 0x02 */
+ u16 sub_vendor_id; /* 0x04 */
+ u16 sub_system_id; /* 0x06 */
+ u16 reserved1[12]; /* 0x08-0x1f */
+ u8 mac_addr[6]; /* 0x20-0x25 */
+ u8 reserved2[10]; /* 0x26-0x2f */
+ u8 sib[204]; /* 0x30-0xfb */
+ u32 crc; /* 0xfc-0xff */
+} SROM_t, *PSROM_t;
+
+/* Ioctl custom data */
+struct ioctl_data {
+ char signature[10];
+ int cmd;
+ int len;
+ char *data;
+};
+
+struct mii_data {
+ __u16 reserved;
+ __u16 reg_num;
+ __u16 in_value;
+ __u16 out_value;
+};
+
+/* The Rx and Tx buffer descriptors. */
+struct netdev_desc {
+ __le64 next_desc;
+ __le64 status;
+ __le64 fraginfo;
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
+ within the structure. */
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct netdev_desc *rx_ring;
+ struct netdev_desc *tx_ring;
+ struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+ dma_addr_t tx_ring_dma;
+ dma_addr_t rx_ring_dma;
+ struct pci_dev *pdev;
+ spinlock_t tx_lock;
+ spinlock_t rx_lock;
+ struct net_device_stats stats;
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ unsigned int speed; /* Operating speed */
+ unsigned int vlan; /* VLAN Id */
+ unsigned int chip_id; /* PCI table chip id */
+ unsigned int rx_coalesce; /* Maximum frames each RxDMAComplete intr */
+ unsigned int rx_timeout; /* Wait time between RxDMAComplete intr */
+ unsigned int tx_coalesce; /* Maximum frames each tx interrupt */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int an_enable:2; /* Auto-Negotiated Enable */
+ unsigned int jumbo:1; /* Jumbo frame enable */
+ unsigned int coalesce:1; /* Rx coalescing enable */
+ unsigned int tx_flow:1; /* Tx flow control enable */
+ unsigned int rx_flow:1; /* Rx flow control enable */
+ unsigned int phy_media:1; /* 1: fiber, 0: copper */
+ unsigned int link_status:1; /* Current link status */
+ struct netdev_desc *last_tx; /* Last Tx descriptor used. */
+ unsigned long cur_rx, old_rx; /* Producer/consumer ring indices */
+ unsigned long cur_tx, old_tx;
+ struct timer_list timer;
+ int wake_polarity;
+ char name[256]; /* net device description */
+ u8 duplex_polarity;
+ u16 mcast_filter[4];
+ u16 advertising; /* NWay media advertisement */
+ u16 negotiate; /* Negotiated media */
+ int phy_addr; /* PHY addresses. */
+};
+
+/* The station address location in the EEPROM. */
+/* The struct pci_device_id consist of:
+ vendor, device Vendor and device ID to match (or PCI_ANY_ID)
+ subvendor, subdevice Subsystem vendor and device ID to match (or PCI_ANY_ID)
+ class Device class to match. The class_mask tells which bits
+ class_mask of the class are honored during the comparison.
+ driver_data Data private to the driver.
+*/
+
+static DEFINE_PCI_DEVICE_TABLE(rio_pci_tbl) = {
+ {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, },
+ {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, },
+ { }
+};
+MODULE_DEVICE_TABLE (pci, rio_pci_tbl);
+#define TX_TIMEOUT (4*HZ)
+#define PACKET_SIZE 1536
+#define MAX_JUMBO 8000
+#define RIO_IO_SIZE 340
+#define DEFAULT_RXC 5
+#define DEFAULT_RXT 750
+#define DEFAULT_TXC 1
+#define MAX_TXC 8
+#endif /* __DL2K_H__ */
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
new file mode 100644
index 000000000000..dcd7f7a71ad4
--- /dev/null
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -0,0 +1,1940 @@
+/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
+/*
+ Written 1999-2000 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support and updates available at
+ http://www.scyld.com/network/sundance.html
+ [link no longer provides useful info -jgarzik]
+ Archives of the mailing list are still available at
+ http://www.beowulf.org/pipermail/netdrivers/
+
+*/
+
+#define DRV_NAME "sundance"
+#define DRV_VERSION "1.2"
+#define DRV_RELDATE "11-Sep-2006"
+
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ Typical is a 64 element hash table based on the Ethernet CRC. */
+static const int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature.
+ This chip can receive into offset buffers, so the Alpha does not
+ need a copy-align. */
+static int rx_copybreak;
+static int flowctrl=1;
+
+/* media[] specifies the media type the NIC operates at.
+ autosense Autosensing active media.
+ 10mbps_hd 10Mbps half duplex.
+ 10mbps_fd 10Mbps full duplex.
+ 100mbps_hd 100Mbps half duplex.
+ 100mbps_fd 100Mbps full duplex.
+ 0 Autosensing active media.
+ 1 10Mbps half duplex.
+ 2 10Mbps full duplex.
+ 3 100Mbps half duplex.
+ 4 100Mbps full duplex.
+*/
+#define MAX_UNITS 8
+static char *media[MAX_UNITS];
+
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority, and more than 128 requires modifying the
+ Tx error recovery.
+ Large receive rings merely waste memory. */
+#define TX_RING_SIZE 32
+#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
+#define RX_RING_SIZE 64
+#define RX_BUDGET 32
+#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
+#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (4*HZ)
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+
+/* These identify the driver base version and may not be removed. */
+static const char version[] __devinitconst =
+ KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
+ " Written by Donald Becker\n";
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param_array(media, charp, NULL, 0);
+module_param(flowctrl, int, 0);
+MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
+MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for the Sundance Technologies "Alta" ST201 chip.
+
+II. Board-specific settings
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+Some chips explicitly use only 2^N sized rings, while others use a
+'next descriptor' pointer that the driver forms into rings.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack. Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets. When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine. Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that the IP header at offset 14 in an
+ethernet frame isn't longword aligned for further processing.
+Unaligned buffers are permitted by the Sundance hardware, so
+frames are received into the skbuff at an offset of "+2", 16-byte aligning
+the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+IVb. References
+
+The Sundance ST201 datasheet, preliminary version.
+The Kendin KS8723 datasheet, preliminary version.
+The ICplus IP100 datasheet, preliminary version.
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+
+IVc. Errata
+
+*/
+
+/* Work-around for Kendin chip bugs. */
+#ifndef CONFIG_SUNDANCE_MMIO
+#define USE_IO_OPS 1
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
+ { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
+ { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
+ { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
+ { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
+ { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+ { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+ { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
+
+enum {
+ netdev_io_size = 128
+};
+
+struct pci_id_info {
+ const char *name;
+};
+static const struct pci_id_info pci_id_tbl[] __devinitdata = {
+ {"D-Link DFE-550TX FAST Ethernet Adapter"},
+ {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
+ {"D-Link DFE-580TX 4 port Server Adapter"},
+ {"D-Link DFE-530TXS FAST Ethernet Adapter"},
+ {"D-Link DL10050-based FAST Ethernet Adapter"},
+ {"Sundance Technology Alta"},
+ {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
+ { } /* terminate list. */
+};
+
+/* This driver was written to use PCI memory space, however x86-oriented
+ hardware often uses I/O space accesses. */
+
+/* Offsets to the device registers.
+ Unlike software-only systems, device drivers interact with complex hardware.
+ It's not useful to define symbolic names for every register bit in the
+ device. The name can only partially document the semantics and make
+ the driver longer and more difficult to read.
+ In general, only the important configuration values or bits changed
+ multiple times should be defined symbolically.
+*/
+enum alta_offsets {
+ DMACtrl = 0x00,
+ TxListPtr = 0x04,
+ TxDMABurstThresh = 0x08,
+ TxDMAUrgentThresh = 0x09,
+ TxDMAPollPeriod = 0x0a,
+ RxDMAStatus = 0x0c,
+ RxListPtr = 0x10,
+ DebugCtrl0 = 0x1a,
+ DebugCtrl1 = 0x1c,
+ RxDMABurstThresh = 0x14,
+ RxDMAUrgentThresh = 0x15,
+ RxDMAPollPeriod = 0x16,
+ LEDCtrl = 0x1a,
+ ASICCtrl = 0x30,
+ EEData = 0x34,
+ EECtrl = 0x36,
+ FlashAddr = 0x40,
+ FlashData = 0x44,
+ TxStatus = 0x46,
+ TxFrameId = 0x47,
+ DownCounter = 0x18,
+ IntrClear = 0x4a,
+ IntrEnable = 0x4c,
+ IntrStatus = 0x4e,
+ MACCtrl0 = 0x50,
+ MACCtrl1 = 0x52,
+ StationAddr = 0x54,
+ MaxFrameSize = 0x5A,
+ RxMode = 0x5c,
+ MIICtrl = 0x5e,
+ MulticastFilter0 = 0x60,
+ MulticastFilter1 = 0x64,
+ RxOctetsLow = 0x68,
+ RxOctetsHigh = 0x6a,
+ TxOctetsLow = 0x6c,
+ TxOctetsHigh = 0x6e,
+ TxFramesOK = 0x70,
+ RxFramesOK = 0x72,
+ StatsCarrierError = 0x74,
+ StatsLateColl = 0x75,
+ StatsMultiColl = 0x76,
+ StatsOneColl = 0x77,
+ StatsTxDefer = 0x78,
+ RxMissed = 0x79,
+ StatsTxXSDefer = 0x7a,
+ StatsTxAbort = 0x7b,
+ StatsBcastTx = 0x7c,
+ StatsBcastRx = 0x7d,
+ StatsMcastTx = 0x7e,
+ StatsMcastRx = 0x7f,
+ /* Aliased and bogus values! */
+ RxStatus = 0x0c,
+};
+
+#define ASIC_HI_WORD(x) ((x) + 2)
+
+enum ASICCtrl_HiWord_bit {
+ GlobalReset = 0x0001,
+ RxReset = 0x0002,
+ TxReset = 0x0004,
+ DMAReset = 0x0008,
+ FIFOReset = 0x0010,
+ NetworkReset = 0x0020,
+ HostReset = 0x0040,
+ ResetBusy = 0x0400,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
+ IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
+ IntrDrvRqst=0x0040,
+ StatsMax=0x0080, LinkChange=0x0100,
+ IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
+};
+
+/* Bits in the RxMode register. */
+enum rx_mode_bits {
+ AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
+ AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
+};
+/* Bits in MACCtrl. */
+enum mac_ctrl0_bits {
+ EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
+ EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
+};
+enum mac_ctrl1_bits {
+ StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
+ TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
+ RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
+};
+
+/* The Rx and Tx buffer descriptors. */
+/* Note that using only 32 bit fields simplifies conversion to big-endian
+ architectures. */
+struct netdev_desc {
+ __le32 next_desc;
+ __le32 status;
+ struct desc_frag { __le32 addr, length; } frag[1];
+};
+
+/* Bits in netdev_desc.status */
+enum desc_status_bits {
+ DescOwn=0x8000,
+ DescEndPacket=0x4000,
+ DescEndRing=0x2000,
+ LastFrag=0x80000000,
+ DescIntrOnTx=0x8000,
+ DescIntrOnDMADone=0x80000000,
+ DisableAlign = 0x00000001,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
+ within the structure. */
+#define MII_CNT 4
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct netdev_desc *rx_ring;
+ struct netdev_desc *tx_ring;
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ dma_addr_t tx_ring_dma;
+ dma_addr_t rx_ring_dma;
+ struct timer_list timer; /* Media monitoring timer. */
+ /* ethtool extra stats */
+ struct {
+ u64 tx_multiple_collisions;
+ u64 tx_single_collisions;
+ u64 tx_late_collisions;
+ u64 tx_deferred;
+ u64 tx_deferred_excessive;
+ u64 tx_aborted;
+ u64 tx_bcasts;
+ u64 rx_bcasts;
+ u64 tx_mcasts;
+ u64 rx_mcasts;
+ } xstats;
+ /* Frequently used values: keep some adjacent for cache effect. */
+ spinlock_t lock;
+ int msg_enable;
+ int chip_id;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ struct netdev_desc *last_tx; /* Last Tx descriptor used. */
+ unsigned int cur_tx, dirty_tx;
+ /* These values are keep track of the transceiver/media in use. */
+ unsigned int flowctrl:1;
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ unsigned int an_enable:1;
+ unsigned int speed;
+ struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tx_tasklet;
+ int budget;
+ int cur_task;
+ /* Multicast and receive mode. */
+ spinlock_t mcastlock; /* SMP lock multicast updates. */
+ u16 mcast_filter[4];
+ /* MII transceiver section. */
+ struct mii_if_info mii_if;
+ int mii_preamble_required;
+ unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
+ struct pci_dev *pci_dev;
+ void __iomem *base;
+ spinlock_t statlock;
+};
+
+/* The station address location in the EEPROM. */
+#define EEPROM_SA_OFFSET 0x10
+#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
+ IntrDrvRqst | IntrTxDone | StatsMax | \
+ LinkChange)
+
+static int change_mtu(struct net_device *dev, int new_mtu);
+static int eeprom_read(void __iomem *ioaddr, int location);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int mdio_wait_link(struct net_device *dev, int wait);
+static int netdev_open(struct net_device *dev);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
+static int reset_tx (struct net_device *dev);
+static irqreturn_t intr_handler(int irq, void *dev_instance);
+static void rx_poll(unsigned long data);
+static void tx_poll(unsigned long data);
+static void refill_rx (struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static int __set_mac_addr(struct net_device *dev);
+static int sundance_set_mac_addr(struct net_device *dev, void *data);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+static const struct ethtool_ops ethtool_ops;
+
+static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base + ASICCtrl;
+ int countdown;
+
+ /* ST201 documentation states ASICCtrl is a 32bit register */
+ iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
+ /* ST201 documentation states reset can take up to 1 ms */
+ countdown = 10 + 1;
+ while (ioread32 (ioaddr) & (ResetBusy << 16)) {
+ if (--countdown == 0) {
+ printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
+ break;
+ }
+ udelay(100);
+ }
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = netdev_open,
+ .ndo_stop = netdev_close,
+ .ndo_start_xmit = start_tx,
+ .ndo_get_stats = get_stats,
+ .ndo_set_rx_mode = set_rx_mode,
+ .ndo_do_ioctl = netdev_ioctl,
+ .ndo_tx_timeout = tx_timeout,
+ .ndo_change_mtu = change_mtu,
+ .ndo_set_mac_address = sundance_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __devinit sundance_probe1 (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ static int card_idx;
+ int chip_idx = ent->driver_data;
+ int irq;
+ int i;
+ void __iomem *ioaddr;
+ u16 mii_ctl;
+ void *ring_space;
+ dma_addr_t ring_dma;
+#ifdef USE_IO_OPS
+ int bar = 0;
+#else
+ int bar = 1;
+#endif
+ int phy, phy_end, phy_idx = 0;
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(version);
+#endif
+
+ if (pci_enable_device(pdev))
+ return -EIO;
+ pci_set_master(pdev);
+
+ irq = pdev->irq;
+
+ dev = alloc_etherdev(sizeof(*np));
+ if (!dev)
+ return -ENOMEM;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ if (pci_request_regions(pdev, DRV_NAME))
+ goto err_out_netdev;
+
+ ioaddr = pci_iomap(pdev, bar, netdev_io_size);
+ if (!ioaddr)
+ goto err_out_res;
+
+ for (i = 0; i < 3; i++)
+ ((__le16 *)dev->dev_addr)[i] =
+ cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ dev->base_addr = (unsigned long)ioaddr;
+ dev->irq = irq;
+
+ np = netdev_priv(dev);
+ np->base = ioaddr;
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->msg_enable = (1 << debug) - 1;
+ spin_lock_init(&np->lock);
+ spin_lock_init(&np->statlock);
+ tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
+ tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
+
+ ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
+ &ring_dma, GFP_KERNEL);
+ if (!ring_space)
+ goto err_out_cleardev;
+ np->tx_ring = (struct netdev_desc *)ring_space;
+ np->tx_ring_dma = ring_dma;
+
+ ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
+ &ring_dma, GFP_KERNEL);
+ if (!ring_space)
+ goto err_out_unmap_tx;
+ np->rx_ring = (struct netdev_desc *)ring_space;
+ np->rx_ring_dma = ring_dma;
+
+ np->mii_if.dev = dev;
+ np->mii_if.mdio_read = mdio_read;
+ np->mii_if.mdio_write = mdio_write;
+ np->mii_if.phy_id_mask = 0x1f;
+ np->mii_if.reg_num_mask = 0x1f;
+
+ /* The chip-specific entries in the device structure. */
+ dev->netdev_ops = &netdev_ops;
+ SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ pci_set_drvdata(pdev, dev);
+
+ i = register_netdev(dev);
+ if (i)
+ goto err_out_unmap_rx;
+
+ printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr,
+ dev->dev_addr, irq);
+
+ np->phys[0] = 1; /* Default setting */
+ np->mii_preamble_required++;
+
+ /*
+ * It seems some phys doesn't deal well with address 0 being accessed
+ * first
+ */
+ if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
+ phy = 0;
+ phy_end = 31;
+ } else {
+ phy = 1;
+ phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */
+ }
+ for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
+ int phyx = phy & 0x1f;
+ int mii_status = mdio_read(dev, phyx, MII_BMSR);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phyx;
+ np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
+ if ((mii_status & 0x0040) == 0)
+ np->mii_preamble_required++;
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x.\n",
+ dev->name, phyx, mii_status, np->mii_if.advertising);
+ }
+ }
+ np->mii_preamble_required--;
+
+ if (phy_idx == 0) {
+ printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
+ dev->name, ioread32(ioaddr + ASICCtrl));
+ goto err_out_unregister;
+ }
+
+ np->mii_if.phy_id = np->phys[0];
+
+ /* Parse override configuration */
+ np->an_enable = 1;
+ if (card_idx < MAX_UNITS) {
+ if (media[card_idx] != NULL) {
+ np->an_enable = 0;
+ if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
+ strcmp (media[card_idx], "4") == 0) {
+ np->speed = 100;
+ np->mii_if.full_duplex = 1;
+ } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
+ strcmp (media[card_idx], "3") == 0) {
+ np->speed = 100;
+ np->mii_if.full_duplex = 0;
+ } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
+ strcmp (media[card_idx], "2") == 0) {
+ np->speed = 10;
+ np->mii_if.full_duplex = 1;
+ } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
+ strcmp (media[card_idx], "1") == 0) {
+ np->speed = 10;
+ np->mii_if.full_duplex = 0;
+ } else {
+ np->an_enable = 1;
+ }
+ }
+ if (flowctrl == 1)
+ np->flowctrl = 1;
+ }
+
+ /* Fibre PHY? */
+ if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
+ /* Default 100Mbps Full */
+ if (np->an_enable) {
+ np->speed = 100;
+ np->mii_if.full_duplex = 1;
+ np->an_enable = 0;
+ }
+ }
+ /* Reset PHY */
+ mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
+ mdelay (300);
+ /* If flow control enabled, we need to advertise it.*/
+ if (np->flowctrl)
+ mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
+ mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
+ /* Force media type */
+ if (!np->an_enable) {
+ mii_ctl = 0;
+ mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
+ mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
+ mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
+ printk (KERN_INFO "Override speed=%d, %s duplex\n",
+ np->speed, np->mii_if.full_duplex ? "Full" : "Half");
+
+ }
+
+ /* Perhaps move the reset here? */
+ /* Reset the chip to erase previous misconfiguration. */
+ if (netif_msg_hw(np))
+ printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
+ sundance_reset(dev, 0x00ff << 16);
+ if (netif_msg_hw(np))
+ printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
+
+ card_idx++;
+ return 0;
+
+err_out_unregister:
+ unregister_netdev(dev);
+err_out_unmap_rx:
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
+ np->rx_ring, np->rx_ring_dma);
+err_out_unmap_tx:
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
+ np->tx_ring, np->tx_ring_dma);
+err_out_cleardev:
+ pci_set_drvdata(pdev, NULL);
+ pci_iounmap(pdev, ioaddr);
+err_out_res:
+ pci_release_regions(pdev);
+err_out_netdev:
+ free_netdev (dev);
+ return -ENODEV;
+}
+
+static int change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
+ return -EINVAL;
+ if (netif_running(dev))
+ return -EBUSY;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+#define eeprom_delay(ee_addr) ioread32(ee_addr)
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
+static int __devinit eeprom_read(void __iomem *ioaddr, int location)
+{
+ int boguscnt = 10000; /* Typical 1900 ticks. */
+ iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
+ do {
+ eeprom_delay(ioaddr + EECtrl);
+ if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
+ return ioread16(ioaddr + EEData);
+ }
+ } while (--boguscnt > 0);
+ return 0;
+}
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details.
+
+ The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back 33Mhz PCI cycles. */
+#define mdio_delay() ioread8(mdio_addr)
+
+enum mii_reg_bits {
+ MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
+};
+#define MDIO_EnbIn (0)
+#define MDIO_WRITE0 (MDIO_EnbOutput)
+#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(void __iomem *mdio_addr)
+{
+ int bits = 32;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (--bits >= 0) {
+ iowrite8(MDIO_WRITE1, mdio_addr);
+ mdio_delay();
+ iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
+ mdio_delay();
+ }
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *mdio_addr = np->base + MIICtrl;
+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int i, retval = 0;
+
+ if (np->mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ iowrite8(dataval, mdio_addr);
+ mdio_delay();
+ iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay();
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ iowrite8(MDIO_EnbIn, mdio_addr);
+ mdio_delay();
+ retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
+ iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay();
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *mdio_addr = np->base + MIICtrl;
+ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
+ int i;
+
+ if (np->mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ iowrite8(dataval, mdio_addr);
+ mdio_delay();
+ iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay();
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ iowrite8(MDIO_EnbIn, mdio_addr);
+ mdio_delay();
+ iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay();
+ }
+}
+
+static int mdio_wait_link(struct net_device *dev, int wait)
+{
+ int bmsr;
+ int phy_id;
+ struct netdev_private *np;
+
+ np = netdev_priv(dev);
+ phy_id = np->phys[0];
+
+ do {
+ bmsr = mdio_read(dev, phy_id, MII_BMSR);
+ if (bmsr & 0x0004)
+ return 0;
+ mdelay(1);
+ } while (--wait > 0);
+ return -1;
+}
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ unsigned long flags;
+ int i;
+
+ /* Do we need to reset the chip??? */
+
+ i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
+ if (i)
+ return i;
+
+ if (netif_msg_ifup(np))
+ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+ dev->name, dev->irq);
+ init_ring(dev);
+
+ iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
+ /* The Tx list pointer is written as packets are queued. */
+
+ /* Initialize other registers. */
+ __set_mac_addr(dev);
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+ iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
+#else
+ iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
+#endif
+ if (dev->mtu > 2047)
+ iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
+
+ /* Configure the PCI bus bursts and FIFO thresholds. */
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ spin_lock_init(&np->mcastlock);
+
+ set_rx_mode(dev);
+ iowrite16(0, ioaddr + IntrEnable);
+ iowrite16(0, ioaddr + DownCounter);
+ /* Set the chip to poll every N*320nsec. */
+ iowrite8(100, ioaddr + RxDMAPollPeriod);
+ iowrite8(127, ioaddr + TxDMAPollPeriod);
+ /* Fix DFE-580TX packet drop issue */
+ if (np->pci_dev->revision >= 0x14)
+ iowrite8(0x01, ioaddr + DebugCtrl1);
+ netif_start_queue(dev);
+
+ spin_lock_irqsave(&np->lock, flags);
+ reset_tx(dev);
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
+
+ if (netif_msg_ifup(np))
+ printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
+ "MAC Control %x, %4.4x %4.4x.\n",
+ dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
+ ioread32(ioaddr + MACCtrl0),
+ ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 3*HZ;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
+
+ return 0;
+}
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
+ int negotiated = mii_lpa & np->mii_if.advertising;
+ int duplex;
+
+ /* Force media */
+ if (!np->an_enable || mii_lpa == 0xffff) {
+ if (np->mii_if.full_duplex)
+ iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
+ ioaddr + MACCtrl0);
+ return;
+ }
+
+ /* Autonegotiation */
+ duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+ if (np->mii_if.full_duplex != duplex) {
+ np->mii_if.full_duplex = duplex;
+ if (netif_msg_link(np))
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
+ "negotiated capability %4.4x.\n", dev->name,
+ duplex ? "full" : "half", np->phys[0], negotiated);
+ iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
+ }
+}
+
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int next_tick = 10*HZ;
+
+ if (netif_msg_timer(np)) {
+ printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
+ "Tx %x Rx %x.\n",
+ dev->name, ioread16(ioaddr + IntrEnable),
+ ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
+ }
+ check_duplex(dev);
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ unsigned long flag;
+
+ netif_stop_queue(dev);
+ tasklet_disable(&np->tx_tasklet);
+ iowrite16(0, ioaddr + IntrEnable);
+ printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
+ "TxFrameId %2.2x,"
+ " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
+ ioread8(ioaddr + TxFrameId));
+
+ {
+ int i;
+ for (i=0; i<TX_RING_SIZE; i++) {
+ printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
+ (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
+ le32_to_cpu(np->tx_ring[i].next_desc),
+ le32_to_cpu(np->tx_ring[i].status),
+ (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
+ le32_to_cpu(np->tx_ring[i].frag[0].addr),
+ le32_to_cpu(np->tx_ring[i].frag[0].length));
+ }
+ printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
+ ioread32(np->base + TxListPtr),
+ netif_queue_stopped(dev));
+ printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
+ np->cur_tx, np->cur_tx % TX_RING_SIZE,
+ np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
+ printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
+ printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
+ }
+ spin_lock_irqsave(&np->lock, flag);
+
+ /* Stop and restart the chip's Tx processes . */
+ reset_tx(dev);
+ spin_unlock_irqrestore(&np->lock, flag);
+
+ dev->if_port = 0;
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ dev->stats.tx_errors++;
+ if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ netif_wake_queue(dev);
+ }
+ iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
+ tasklet_enable(&np->tx_tasklet);
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ np->cur_rx = np->cur_tx = 0;
+ np->dirty_rx = np->dirty_tx = 0;
+ np->cur_task = 0;
+
+ np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
+ ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].frag[0].length = 0;
+ np->rx_skbuff[i] = NULL;
+ }
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + 2);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ np->rx_ring[i].frag[0].addr = cpu_to_le32(
+ dma_map_single(&np->pci_dev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE));
+ if (dma_mapping_error(&np->pci_dev->dev,
+ np->rx_ring[i].frag[0].addr)) {
+ dev_kfree_skb(skb);
+ np->rx_skbuff[i] = NULL;
+ break;
+ }
+ np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
+ }
+ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = NULL;
+ np->tx_ring[i].status = 0;
+ }
+}
+
+static void tx_poll (unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = netdev_priv(dev);
+ unsigned head = np->cur_task % TX_RING_SIZE;
+ struct netdev_desc *txdesc =
+ &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
+
+ /* Chain the next pointer */
+ for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
+ int entry = np->cur_task % TX_RING_SIZE;
+ txdesc = &np->tx_ring[entry];
+ if (np->last_tx) {
+ np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
+ entry*sizeof(struct netdev_desc));
+ }
+ np->last_tx = txdesc;
+ }
+ /* Indicate the latest descriptor of tx ring */
+ txdesc->status |= cpu_to_le32(DescIntrOnTx);
+
+ if (ioread32 (np->base + TxListPtr) == 0)
+ iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
+ np->base + TxListPtr);
+}
+
+static netdev_tx_t
+start_tx (struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ struct netdev_desc *txdesc;
+ unsigned entry;
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+ np->tx_skbuff[entry] = skb;
+ txdesc = &np->tx_ring[entry];
+
+ txdesc->next_desc = 0;
+ txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
+ txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
+ skb->data, skb->len, DMA_TO_DEVICE));
+ if (dma_mapping_error(&np->pci_dev->dev,
+ txdesc->frag[0].addr))
+ goto drop_frame;
+ txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
+
+ /* Increment cur_tx before tasklet_schedule() */
+ np->cur_tx++;
+ mb();
+ /* Schedule a tx_poll() task */
+ tasklet_schedule(&np->tx_tasklet);
+
+ /* On some architectures: explicitly flush cache lines here. */
+ if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
+ !netif_queue_stopped(dev)) {
+ /* do nothing */
+ } else {
+ netif_stop_queue (dev);
+ }
+ if (netif_msg_tx_queued(np)) {
+ printk (KERN_DEBUG
+ "%s: Transmit frame #%d queued in slot %d.\n",
+ dev->name, np->cur_tx, entry);
+ }
+ return NETDEV_TX_OK;
+
+drop_frame:
+ dev_kfree_skb(skb);
+ np->tx_skbuff[entry] = NULL;
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+/* Reset hardware tx and free all of tx buffers */
+static int
+reset_tx (struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ struct sk_buff *skb;
+ int i;
+
+ /* Reset tx logic, TxListPtr will be cleaned */
+ iowrite16 (TxDisable, ioaddr + MACCtrl1);
+ sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
+
+ /* free all tx skbuff */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_ring[i].next_desc = 0;
+
+ skb = np->tx_skbuff[i];
+ if (skb) {
+ dma_unmap_single(&np->pci_dev->dev,
+ le32_to_cpu(np->tx_ring[i].frag[0].addr),
+ skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ np->tx_skbuff[i] = NULL;
+ dev->stats.tx_dropped++;
+ }
+ }
+ np->cur_tx = np->dirty_tx = 0;
+ np->cur_task = 0;
+
+ np->last_tx = NULL;
+ iowrite8(127, ioaddr + TxDMAPollPeriod);
+
+ iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
+ return 0;
+}
+
+/* The interrupt handler cleans up after the Tx thread,
+ and schedule a Rx thread work */
+static irqreturn_t intr_handler(int irq, void *dev_instance)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int hw_frame_id;
+ int tx_cnt;
+ int tx_status;
+ int handled = 0;
+ int i;
+
+
+ do {
+ int intr_status = ioread16(ioaddr + IntrStatus);
+ iowrite16(intr_status, ioaddr + IntrStatus);
+
+ if (netif_msg_intr(np))
+ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (!(intr_status & DEFAULT_INTR))
+ break;
+
+ handled = 1;
+
+ if (intr_status & (IntrRxDMADone)) {
+ iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
+ ioaddr + IntrEnable);
+ if (np->budget < 0)
+ np->budget = RX_BUDGET;
+ tasklet_schedule(&np->rx_tasklet);
+ }
+ if (intr_status & (IntrTxDone | IntrDrvRqst)) {
+ tx_status = ioread16 (ioaddr + TxStatus);
+ for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
+ if (netif_msg_tx_done(np))
+ printk
+ ("%s: Transmit status is %2.2x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x1e) {
+ if (netif_msg_tx_err(np))
+ printk("%s: Transmit error status %4.4x.\n",
+ dev->name, tx_status);
+ dev->stats.tx_errors++;
+ if (tx_status & 0x10)
+ dev->stats.tx_fifo_errors++;
+ if (tx_status & 0x08)
+ dev->stats.collisions++;
+ if (tx_status & 0x04)
+ dev->stats.tx_fifo_errors++;
+ if (tx_status & 0x02)
+ dev->stats.tx_window_errors++;
+
+ /*
+ ** This reset has been verified on
+ ** DFE-580TX boards ! phdm@macqel.be.
+ */
+ if (tx_status & 0x10) { /* TxUnderrun */
+ /* Restart Tx FIFO and transmitter */
+ sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
+ /* No need to reset the Tx pointer here */
+ }
+ /* Restart the Tx. Need to make sure tx enabled */
+ i = 10;
+ do {
+ iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
+ if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
+ break;
+ mdelay(1);
+ } while (--i);
+ }
+ /* Yup, this is a documentation bug. It cost me *hours*. */
+ iowrite16 (0, ioaddr + TxStatus);
+ if (tx_cnt < 0) {
+ iowrite32(5000, ioaddr + DownCounter);
+ break;
+ }
+ tx_status = ioread16 (ioaddr + TxStatus);
+ }
+ hw_frame_id = (tx_status >> 8) & 0xff;
+ } else {
+ hw_frame_id = ioread8(ioaddr + TxFrameId);
+ }
+
+ if (np->pci_dev->revision >= 0x14) {
+ spin_lock(&np->lock);
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ struct sk_buff *skb;
+ int sw_frame_id;
+ sw_frame_id = (le32_to_cpu(
+ np->tx_ring[entry].status) >> 2) & 0xff;
+ if (sw_frame_id == hw_frame_id &&
+ !(le32_to_cpu(np->tx_ring[entry].status)
+ & 0x00010000))
+ break;
+ if (sw_frame_id == (hw_frame_id + 1) %
+ TX_RING_SIZE)
+ break;
+ skb = np->tx_skbuff[entry];
+ /* Free the original skb. */
+ dma_unmap_single(&np->pci_dev->dev,
+ le32_to_cpu(np->tx_ring[entry].frag[0].addr),
+ skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_irq (np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = NULL;
+ np->tx_ring[entry].frag[0].addr = 0;
+ np->tx_ring[entry].frag[0].length = 0;
+ }
+ spin_unlock(&np->lock);
+ } else {
+ spin_lock(&np->lock);
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ struct sk_buff *skb;
+ if (!(le32_to_cpu(np->tx_ring[entry].status)
+ & 0x00010000))
+ break;
+ skb = np->tx_skbuff[entry];
+ /* Free the original skb. */
+ dma_unmap_single(&np->pci_dev->dev,
+ le32_to_cpu(np->tx_ring[entry].frag[0].addr),
+ skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_irq (np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = NULL;
+ np->tx_ring[entry].frag[0].addr = 0;
+ np->tx_ring[entry].frag[0].length = 0;
+ }
+ spin_unlock(&np->lock);
+ }
+
+ if (netif_queue_stopped(dev) &&
+ np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, clear busy flag. */
+ netif_wake_queue (dev);
+ }
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
+ netdev_error(dev, intr_status);
+ } while (0);
+ if (netif_msg_intr(np))
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, ioread16(ioaddr + IntrStatus));
+ return IRQ_RETVAL(handled);
+}
+
+static void rx_poll(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = netdev_priv(dev);
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int boguscnt = np->budget;
+ void __iomem *ioaddr = np->base;
+ int received = 0;
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (1) {
+ struct netdev_desc *desc = &(np->rx_ring[entry]);
+ u32 frame_status = le32_to_cpu(desc->status);
+ int pkt_len;
+
+ if (--boguscnt < 0) {
+ goto not_done;
+ }
+ if (!(frame_status & DescOwn))
+ break;
+ pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
+ if (netif_msg_rx_status(np))
+ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
+ frame_status);
+ if (frame_status & 0x001f4000) {
+ /* There was a error. */
+ if (netif_msg_rx_err(np))
+ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
+ frame_status);
+ dev->stats.rx_errors++;
+ if (frame_status & 0x00100000)
+ dev->stats.rx_length_errors++;
+ if (frame_status & 0x00010000)
+ dev->stats.rx_fifo_errors++;
+ if (frame_status & 0x00060000)
+ dev->stats.rx_frame_errors++;
+ if (frame_status & 0x00080000)
+ dev->stats.rx_crc_errors++;
+ if (frame_status & 0x00100000) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame,"
+ " status %8.8x.\n",
+ dev->name, frame_status);
+ }
+ } else {
+ struct sk_buff *skb;
+#ifndef final_version
+ if (netif_msg_rx_status(np))
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
+ ", bogus_cnt %d.\n",
+ pkt_len, boguscnt);
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak &&
+ (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ dma_sync_single_for_cpu(&np->pci_dev->dev,
+ le32_to_cpu(desc->frag[0].addr),
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
+ dma_sync_single_for_device(&np->pci_dev->dev,
+ le32_to_cpu(desc->frag[0].addr),
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ skb_put(skb, pkt_len);
+ } else {
+ dma_unmap_single(&np->pci_dev->dev,
+ le32_to_cpu(desc->frag[0].addr),
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
+ netif_rx(skb);
+ }
+ entry = (entry + 1) % RX_RING_SIZE;
+ received++;
+ }
+ np->cur_rx = entry;
+ refill_rx (dev);
+ np->budget -= received;
+ iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
+ return;
+
+not_done:
+ np->cur_rx = entry;
+ refill_rx (dev);
+ if (!received)
+ received = 1;
+ np->budget -= received;
+ if (np->budget <= 0)
+ np->budget = RX_BUDGET;
+ tasklet_schedule(&np->rx_tasklet);
+}
+
+static void refill_rx (struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int entry;
+ int cnt = 0;
+
+ /* Refill the Rx ring buffers. */
+ for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
+ np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
+ struct sk_buff *skb;
+ entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(np->rx_buf_sz + 2);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ np->rx_ring[entry].frag[0].addr = cpu_to_le32(
+ dma_map_single(&np->pci_dev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE));
+ if (dma_mapping_error(&np->pci_dev->dev,
+ np->rx_ring[entry].frag[0].addr)) {
+ dev_kfree_skb_irq(skb);
+ np->rx_skbuff[entry] = NULL;
+ break;
+ }
+ }
+ /* Perhaps we need not reset this field. */
+ np->rx_ring[entry].frag[0].length =
+ cpu_to_le32(np->rx_buf_sz | LastFrag);
+ np->rx_ring[entry].status = 0;
+ cnt++;
+ }
+}
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ u16 mii_ctl, mii_advertise, mii_lpa;
+ int speed;
+
+ if (intr_status & LinkChange) {
+ if (mdio_wait_link(dev, 10) == 0) {
+ printk(KERN_INFO "%s: Link up\n", dev->name);
+ if (np->an_enable) {
+ mii_advertise = mdio_read(dev, np->phys[0],
+ MII_ADVERTISE);
+ mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
+ mii_advertise &= mii_lpa;
+ printk(KERN_INFO "%s: Link changed: ",
+ dev->name);
+ if (mii_advertise & ADVERTISE_100FULL) {
+ np->speed = 100;
+ printk("100Mbps, full duplex\n");
+ } else if (mii_advertise & ADVERTISE_100HALF) {
+ np->speed = 100;
+ printk("100Mbps, half duplex\n");
+ } else if (mii_advertise & ADVERTISE_10FULL) {
+ np->speed = 10;
+ printk("10Mbps, full duplex\n");
+ } else if (mii_advertise & ADVERTISE_10HALF) {
+ np->speed = 10;
+ printk("10Mbps, half duplex\n");
+ } else
+ printk("\n");
+
+ } else {
+ mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
+ speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
+ np->speed = speed;
+ printk(KERN_INFO "%s: Link changed: %dMbps ,",
+ dev->name, speed);
+ printk("%s duplex.\n",
+ (mii_ctl & BMCR_FULLDPLX) ?
+ "full" : "half");
+ }
+ check_duplex(dev);
+ if (np->flowctrl && np->mii_if.full_duplex) {
+ iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
+ ioaddr + MulticastFilter1+2);
+ iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
+ ioaddr + MACCtrl0);
+ }
+ netif_carrier_on(dev);
+ } else {
+ printk(KERN_INFO "%s: Link down\n", dev->name);
+ netif_carrier_off(dev);
+ }
+ }
+ if (intr_status & StatsMax) {
+ get_stats(dev);
+ }
+ if (intr_status & IntrPCIErr) {
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* We must do a global reset of DMA to continue. */
+ }
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ unsigned long flags;
+ u8 late_coll, single_coll, mult_coll;
+
+ spin_lock_irqsave(&np->statlock, flags);
+ /* The chip only need report frame silently dropped. */
+ dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
+ dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
+ dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
+ dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
+
+ mult_coll = ioread8(ioaddr + StatsMultiColl);
+ np->xstats.tx_multiple_collisions += mult_coll;
+ single_coll = ioread8(ioaddr + StatsOneColl);
+ np->xstats.tx_single_collisions += single_coll;
+ late_coll = ioread8(ioaddr + StatsLateColl);
+ np->xstats.tx_late_collisions += late_coll;
+ dev->stats.collisions += mult_coll
+ + single_coll
+ + late_coll;
+
+ np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
+ np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
+ np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
+ np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
+ np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
+ np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
+ np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
+
+ dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
+ dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
+ dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
+ dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
+
+ spin_unlock_irqrestore(&np->statlock, flags);
+
+ return &dev->stats;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ u16 mc_filter[4]; /* Multicast hash filter */
+ u32 rx_mode;
+ int i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ } else if (!netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+ int bit;
+ int index;
+ int crc;
+ memset (mc_filter, 0, sizeof (mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
+ if (crc & 0x80000000) index |= 1 << bit;
+ mc_filter[index/16] |= (1 << (index % 16));
+ }
+ rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
+ } else {
+ iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
+ return;
+ }
+ if (np->mii_if.full_duplex && np->flowctrl)
+ mc_filter[3] |= 0x0200;
+
+ for (i = 0; i < 4; i++)
+ iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
+ iowrite8(rx_mode, ioaddr + RxMode);
+}
+
+static int __set_mac_addr(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ u16 addr16;
+
+ addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
+ iowrite16(addr16, np->base + StationAddr);
+ addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
+ iowrite16(addr16, np->base + StationAddr+2);
+ addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
+ iowrite16(addr16, np->base + StationAddr+4);
+ return 0;
+}
+
+/* Invoked with rtnl_lock held */
+static int sundance_set_mac_addr(struct net_device *dev, void *data)
+{
+ const struct sockaddr *addr = data;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ __set_mac_addr(dev);
+
+ return 0;
+}
+
+static const struct {
+ const char name[ETH_GSTRING_LEN];
+} sundance_stats[] = {
+ { "tx_multiple_collisions" },
+ { "tx_single_collisions" },
+ { "tx_late_collisions" },
+ { "tx_deferred" },
+ { "tx_deferred_excessive" },
+ { "tx_aborted" },
+ { "tx_bcasts" },
+ { "rx_bcasts" },
+ { "tx_mcasts" },
+ { "rx_mcasts" },
+};
+
+static int check_if_running(struct net_device *dev)
+{
+ if (!netif_running(dev))
+ return -EINVAL;
+ return 0;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(np->pci_dev));
+}
+
+static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ spin_lock_irq(&np->lock);
+ mii_ethtool_gset(&np->mii_if, ecmd);
+ spin_unlock_irq(&np->lock);
+ return 0;
+}
+
+static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int res;
+ spin_lock_irq(&np->lock);
+ res = mii_ethtool_sset(&np->mii_if, ecmd);
+ spin_unlock_irq(&np->lock);
+ return res;
+}
+
+static int nway_reset(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_nway_restart(&np->mii_if);
+}
+
+static u32 get_link(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_link_ok(&np->mii_if);
+}
+
+static u32 get_msglevel(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return np->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ np->msg_enable = val;
+}
+
+static void get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
+{
+ if (stringset == ETH_SS_STATS)
+ memcpy(data, sundance_stats, sizeof(sundance_stats));
+}
+
+static int get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(sundance_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i = 0;
+
+ get_stats(dev);
+ data[i++] = np->xstats.tx_multiple_collisions;
+ data[i++] = np->xstats.tx_single_collisions;
+ data[i++] = np->xstats.tx_late_collisions;
+ data[i++] = np->xstats.tx_deferred;
+ data[i++] = np->xstats.tx_deferred_excessive;
+ data[i++] = np->xstats.tx_aborted;
+ data[i++] = np->xstats.tx_bcasts;
+ data[i++] = np->xstats.rx_bcasts;
+ data[i++] = np->xstats.tx_mcasts;
+ data[i++] = np->xstats.rx_mcasts;
+}
+
+static const struct ethtool_ops ethtool_ops = {
+ .begin = check_if_running,
+ .get_drvinfo = get_drvinfo,
+ .get_settings = get_settings,
+ .set_settings = set_settings,
+ .nway_reset = nway_reset,
+ .get_link = get_link,
+ .get_msglevel = get_msglevel,
+ .set_msglevel = set_msglevel,
+ .get_strings = get_strings,
+ .get_sset_count = get_sset_count,
+ .get_ethtool_stats = get_ethtool_stats,
+};
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int rc;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irq(&np->lock);
+ rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ struct sk_buff *skb;
+ int i;
+
+ /* Wait and kill tasklet */
+ tasklet_kill(&np->rx_tasklet);
+ tasklet_kill(&np->tx_tasklet);
+ np->cur_tx = 0;
+ np->dirty_tx = 0;
+ np->cur_task = 0;
+ np->last_tx = NULL;
+
+ netif_stop_queue(dev);
+
+ if (netif_msg_ifdown(np)) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
+ "Rx %4.4x Int %2.2x.\n",
+ dev->name, ioread8(ioaddr + TxStatus),
+ ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ iowrite16(0x0000, ioaddr + IntrEnable);
+
+ /* Disable Rx and Tx DMA for safely release resource */
+ iowrite32(0x500, ioaddr + DMACtrl);
+
+ /* Stop the chip's Tx and Rx processes. */
+ iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
+
+ for (i = 2000; i > 0; i--) {
+ if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
+ break;
+ mdelay(1);
+ }
+
+ iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
+ ioaddr + ASIC_HI_WORD(ASICCtrl));
+
+ for (i = 2000; i > 0; i--) {
+ if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
+ break;
+ mdelay(1);
+ }
+
+#ifdef __i386__
+ if (netif_msg_hw(np)) {
+ printk(KERN_DEBUG " Tx ring at %8.8x:\n",
+ (int)(np->tx_ring_dma));
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
+ i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
+ np->tx_ring[i].frag[0].length);
+ printk(KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)(np->rx_ring_dma));
+ for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
+ printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
+ i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
+ np->rx_ring[i].frag[0].length);
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ del_timer_sync(&np->timer);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].status = 0;
+ skb = np->rx_skbuff[i];
+ if (skb) {
+ dma_unmap_single(&np->pci_dev->dev,
+ le32_to_cpu(np->rx_ring[i].frag[0].addr),
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ np->rx_skbuff[i] = NULL;
+ }
+ np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_ring[i].next_desc = 0;
+ skb = np->tx_skbuff[i];
+ if (skb) {
+ dma_unmap_single(&np->pci_dev->dev,
+ le32_to_cpu(np->tx_ring[i].frag[0].addr),
+ skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
+ np->tx_skbuff[i] = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static void __devexit sundance_remove1 (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct netdev_private *np = netdev_priv(dev);
+ unregister_netdev(dev);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
+ np->rx_ring, np->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
+ np->tx_ring, np->tx_ring_dma);
+ pci_iounmap(pdev, np->base);
+ pci_release_regions(pdev);
+ free_netdev(dev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+#ifdef CONFIG_PM
+
+static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pci_dev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ netdev_close(dev);
+ netif_device_detach(dev);
+
+ pci_save_state(pci_dev);
+ pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
+
+ return 0;
+}
+
+static int sundance_resume(struct pci_dev *pci_dev)
+{
+ struct net_device *dev = pci_get_drvdata(pci_dev);
+ int err = 0;
+
+ if (!netif_running(dev))
+ return 0;
+
+ pci_set_power_state(pci_dev, PCI_D0);
+ pci_restore_state(pci_dev);
+
+ err = netdev_open(dev);
+ if (err) {
+ printk(KERN_ERR "%s: Can't resume interface!\n",
+ dev->name);
+ goto out;
+ }
+
+ netif_device_attach(dev);
+
+out:
+ return err;
+}
+
+#endif /* CONFIG_PM */
+
+static struct pci_driver sundance_driver = {
+ .name = DRV_NAME,
+ .id_table = sundance_pci_tbl,
+ .probe = sundance_probe1,
+ .remove = __devexit_p(sundance_remove1),
+#ifdef CONFIG_PM
+ .suspend = sundance_suspend,
+ .resume = sundance_resume,
+#endif /* CONFIG_PM */
+};
+
+static int __init sundance_init(void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk(version);
+#endif
+ return pci_register_driver(&sundance_driver);
+}
+
+static void __exit sundance_exit(void)
+{
+ pci_unregister_driver(&sundance_driver);
+}
+
+module_init(sundance_init);
+module_exit(sundance_exit);
+
+
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
new file mode 100644
index 000000000000..c1063d1540c2
--- /dev/null
+++ b/drivers/net/ethernet/dnet.c
@@ -0,0 +1,996 @@
+/*
+ * Dave DNET Ethernet Controller driver
+ *
+ * Copyright (C) 2008 Dave S.r.l. <www.dave.eu>
+ * Copyright (C) 2009 Ilya Yanok, Emcraft Systems Ltd, <yanok@emcraft.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+
+#include "dnet.h"
+
+#undef DEBUG
+
+/* function for reading internal MAC register */
+static u16 dnet_readw_mac(struct dnet *bp, u16 reg)
+{
+ u16 data_read;
+
+ /* issue a read */
+ dnet_writel(bp, reg, MACREG_ADDR);
+
+ /* since a read/write op to the MAC is very slow,
+ * we must wait before reading the data */
+ ndelay(500);
+
+ /* read data read from the MAC register */
+ data_read = dnet_readl(bp, MACREG_DATA);
+
+ /* all done */
+ return data_read;
+}
+
+/* function for writing internal MAC register */
+static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val)
+{
+ /* load data to write */
+ dnet_writel(bp, val, MACREG_DATA);
+
+ /* issue a write */
+ dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR);
+
+ /* since a read/write op to the MAC is very slow,
+ * we must wait before exiting */
+ ndelay(500);
+}
+
+static void __dnet_set_hwaddr(struct dnet *bp)
+{
+ u16 tmp;
+
+ tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr);
+ dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
+ tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2));
+ dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
+ tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4));
+ dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
+}
+
+static void __devinit dnet_get_hwaddr(struct dnet *bp)
+{
+ u16 tmp;
+ u8 addr[6];
+
+ /*
+ * from MAC docs:
+ * "Note that the MAC address is stored in the registers in Hexadecimal
+ * form. For example, to set the MAC Address to: AC-DE-48-00-00-80
+ * would require writing 0xAC (octet 0) to address 0x0B (high byte of
+ * Mac_addr[15:0]), 0xDE (octet 1) to address 0x0A (Low byte of
+ * Mac_addr[15:0]), 0x48 (octet 2) to address 0x0D (high byte of
+ * Mac_addr[15:0]), 0x00 (octet 3) to address 0x0C (Low byte of
+ * Mac_addr[15:0]), 0x00 (octet 4) to address 0x0F (high byte of
+ * Mac_addr[15:0]), and 0x80 (octet 5) to address * 0x0E (Low byte of
+ * Mac_addr[15:0]).
+ */
+ tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG);
+ *((__be16 *)addr) = cpu_to_be16(tmp);
+ tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG);
+ *((__be16 *)(addr + 2)) = cpu_to_be16(tmp);
+ tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG);
+ *((__be16 *)(addr + 4)) = cpu_to_be16(tmp);
+
+ if (is_valid_ether_addr(addr))
+ memcpy(bp->dev->dev_addr, addr, sizeof(addr));
+}
+
+static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct dnet *bp = bus->priv;
+ u16 value;
+
+ while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
+ & DNET_INTERNAL_GMII_MNG_CMD_FIN))
+ cpu_relax();
+
+ /* only 5 bits allowed for phy-addr and reg_offset */
+ mii_id &= 0x1f;
+ regnum &= 0x1f;
+
+ /* prepare reg_value for a read */
+ value = (mii_id << 8);
+ value |= regnum;
+
+ /* write control word */
+ dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value);
+
+ /* wait for end of transfer */
+ while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
+ & DNET_INTERNAL_GMII_MNG_CMD_FIN))
+ cpu_relax();
+
+ value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG);
+
+ pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value);
+
+ return value;
+}
+
+static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ struct dnet *bp = bus->priv;
+ u16 tmp;
+
+ pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value);
+
+ while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
+ & DNET_INTERNAL_GMII_MNG_CMD_FIN))
+ cpu_relax();
+
+ /* prepare for a write operation */
+ tmp = (1 << 13);
+
+ /* only 5 bits allowed for phy-addr and reg_offset */
+ mii_id &= 0x1f;
+ regnum &= 0x1f;
+
+ /* only 16 bits on data */
+ value &= 0xffff;
+
+ /* prepare reg_value for a write */
+ tmp |= (mii_id << 8);
+ tmp |= regnum;
+
+ /* write data to write first */
+ dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value);
+
+ /* write control word */
+ dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp);
+
+ while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
+ & DNET_INTERNAL_GMII_MNG_CMD_FIN))
+ cpu_relax();
+
+ return 0;
+}
+
+static int dnet_mdio_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+static void dnet_handle_link_change(struct net_device *dev)
+{
+ struct dnet *bp = netdev_priv(dev);
+ struct phy_device *phydev = bp->phy_dev;
+ unsigned long flags;
+ u32 mode_reg, ctl_reg;
+
+ int status_change = 0;
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG);
+ ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
+
+ if (phydev->link) {
+ if (bp->duplex != phydev->duplex) {
+ if (phydev->duplex)
+ ctl_reg &=
+ ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP);
+ else
+ ctl_reg |=
+ DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP;
+
+ bp->duplex = phydev->duplex;
+ status_change = 1;
+ }
+
+ if (bp->speed != phydev->speed) {
+ status_change = 1;
+ switch (phydev->speed) {
+ case 1000:
+ mode_reg |= DNET_INTERNAL_MODE_GBITEN;
+ break;
+ case 100:
+ case 10:
+ mode_reg &= ~DNET_INTERNAL_MODE_GBITEN;
+ break;
+ default:
+ printk(KERN_WARNING
+ "%s: Ack! Speed (%d) is not "
+ "10/100/1000!\n", dev->name,
+ phydev->speed);
+ break;
+ }
+ bp->speed = phydev->speed;
+ }
+ }
+
+ if (phydev->link != bp->link) {
+ if (phydev->link) {
+ mode_reg |=
+ (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN);
+ } else {
+ mode_reg &=
+ ~(DNET_INTERNAL_MODE_RXEN |
+ DNET_INTERNAL_MODE_TXEN);
+ bp->speed = 0;
+ bp->duplex = -1;
+ }
+ bp->link = phydev->link;
+
+ status_change = 1;
+ }
+
+ if (status_change) {
+ dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg);
+ dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg);
+ }
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ if (status_change) {
+ if (phydev->link)
+ printk(KERN_INFO "%s: link up (%d/%s)\n",
+ dev->name, phydev->speed,
+ DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
+ else
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ }
+}
+
+static int dnet_mii_probe(struct net_device *dev)
+{
+ struct dnet *bp = netdev_priv(dev);
+ struct phy_device *phydev = NULL;
+ int phy_addr;
+
+ /* find the first phy */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+ if (bp->mii_bus->phy_map[phy_addr]) {
+ phydev = bp->mii_bus->phy_map[phy_addr];
+ break;
+ }
+ }
+
+ if (!phydev) {
+ printk(KERN_ERR "%s: no PHY found\n", dev->name);
+ return -ENODEV;
+ }
+
+ /* TODO : add pin_irq */
+
+ /* attach the mac to the phy */
+ if (bp->capabilities & DNET_HAS_RMII) {
+ phydev = phy_connect(dev, dev_name(&phydev->dev),
+ &dnet_handle_link_change, 0,
+ PHY_INTERFACE_MODE_RMII);
+ } else {
+ phydev = phy_connect(dev, dev_name(&phydev->dev),
+ &dnet_handle_link_change, 0,
+ PHY_INTERFACE_MODE_MII);
+ }
+
+ if (IS_ERR(phydev)) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(phydev);
+ }
+
+ /* mask with MAC supported features */
+ if (bp->capabilities & DNET_HAS_GIGABIT)
+ phydev->supported &= PHY_GBIT_FEATURES;
+ else
+ phydev->supported &= PHY_BASIC_FEATURES;
+
+ phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
+
+ phydev->advertising = phydev->supported;
+
+ bp->link = 0;
+ bp->speed = 0;
+ bp->duplex = -1;
+ bp->phy_dev = phydev;
+
+ return 0;
+}
+
+static int dnet_mii_init(struct dnet *bp)
+{
+ int err, i;
+
+ bp->mii_bus = mdiobus_alloc();
+ if (bp->mii_bus == NULL)
+ return -ENOMEM;
+
+ bp->mii_bus->name = "dnet_mii_bus";
+ bp->mii_bus->read = &dnet_mdio_read;
+ bp->mii_bus->write = &dnet_mdio_write;
+ bp->mii_bus->reset = &dnet_mdio_reset;
+
+ snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
+
+ bp->mii_bus->priv = bp;
+
+ bp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!bp->mii_bus->irq) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ bp->mii_bus->irq[i] = PHY_POLL;
+
+ if (mdiobus_register(bp->mii_bus)) {
+ err = -ENXIO;
+ goto err_out_free_mdio_irq;
+ }
+
+ if (dnet_mii_probe(bp->dev) != 0) {
+ err = -ENXIO;
+ goto err_out_unregister_bus;
+ }
+
+ return 0;
+
+err_out_unregister_bus:
+ mdiobus_unregister(bp->mii_bus);
+err_out_free_mdio_irq:
+ kfree(bp->mii_bus->irq);
+err_out:
+ mdiobus_free(bp->mii_bus);
+ return err;
+}
+
+/* For Neptune board: LINK1000 as Link LED and TX as activity LED */
+static int dnet_phy_marvell_fixup(struct phy_device *phydev)
+{
+ return phy_write(phydev, 0x18, 0x4148);
+}
+
+static void dnet_update_stats(struct dnet *bp)
+{
+ u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT;
+ u32 *p = &bp->hw_stats.rx_pkt_ignr;
+ u32 *end = &bp->hw_stats.rx_byte + 1;
+
+ WARN_ON((unsigned long)(end - p - 1) !=
+ (DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4);
+
+ for (; p < end; p++, reg++)
+ *p += readl(reg);
+
+ reg = bp->regs + DNET_TX_UNICAST_CNT;
+ p = &bp->hw_stats.tx_unicast;
+ end = &bp->hw_stats.tx_byte + 1;
+
+ WARN_ON((unsigned long)(end - p - 1) !=
+ (DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4);
+
+ for (; p < end; p++, reg++)
+ *p += readl(reg);
+}
+
+static int dnet_poll(struct napi_struct *napi, int budget)
+{
+ struct dnet *bp = container_of(napi, struct dnet, napi);
+ struct net_device *dev = bp->dev;
+ int npackets = 0;
+ unsigned int pkt_len;
+ struct sk_buff *skb;
+ unsigned int *data_ptr;
+ u32 int_enable;
+ u32 cmd_word;
+ int i;
+
+ while (npackets < budget) {
+ /*
+ * break out of while loop if there are no more
+ * packets waiting
+ */
+ if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) {
+ napi_complete(napi);
+ int_enable = dnet_readl(bp, INTR_ENB);
+ int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
+ dnet_writel(bp, int_enable, INTR_ENB);
+ return 0;
+ }
+
+ cmd_word = dnet_readl(bp, RX_LEN_FIFO);
+ pkt_len = cmd_word & 0xFFFF;
+
+ if (cmd_word & 0xDF180000)
+ printk(KERN_ERR "%s packet receive error %x\n",
+ __func__, cmd_word);
+
+ skb = dev_alloc_skb(pkt_len + 5);
+ if (skb != NULL) {
+ /* Align IP on 16 byte boundaries */
+ skb_reserve(skb, 2);
+ /*
+ * 'skb_put()' points to the start of sk_buff
+ * data area.
+ */
+ data_ptr = (unsigned int *)skb_put(skb, pkt_len);
+ for (i = 0; i < (pkt_len + 3) >> 2; i++)
+ *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ npackets++;
+ } else
+ printk(KERN_NOTICE
+ "%s: No memory to allocate a sk_buff of "
+ "size %u.\n", dev->name, pkt_len);
+ }
+
+ budget -= npackets;
+
+ if (npackets < budget) {
+ /* We processed all packets available. Tell NAPI it can
+ * stop polling then re-enable rx interrupts */
+ napi_complete(napi);
+ int_enable = dnet_readl(bp, INTR_ENB);
+ int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
+ dnet_writel(bp, int_enable, INTR_ENB);
+ return 0;
+ }
+
+ /* There are still packets waiting */
+ return 1;
+}
+
+static irqreturn_t dnet_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct dnet *bp = netdev_priv(dev);
+ u32 int_src, int_enable, int_current;
+ unsigned long flags;
+ unsigned int handled = 0;
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ /* read and clear the DNET irq (clear on read) */
+ int_src = dnet_readl(bp, INTR_SRC);
+ int_enable = dnet_readl(bp, INTR_ENB);
+ int_current = int_src & int_enable;
+
+ /* restart the queue if we had stopped it for TX fifo almost full */
+ if (int_current & DNET_INTR_SRC_TX_FIFOAE) {
+ int_enable = dnet_readl(bp, INTR_ENB);
+ int_enable &= ~DNET_INTR_ENB_TX_FIFOAE;
+ dnet_writel(bp, int_enable, INTR_ENB);
+ netif_wake_queue(dev);
+ handled = 1;
+ }
+
+ /* RX FIFO error checking */
+ if (int_current &
+ (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) {
+ printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__,
+ dnet_readl(bp, RX_STATUS), int_current);
+ /* we can only flush the RX FIFOs */
+ dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL);
+ ndelay(500);
+ dnet_writel(bp, 0, SYS_CTL);
+ handled = 1;
+ }
+
+ /* TX FIFO error checking */
+ if (int_current &
+ (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) {
+ printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__,
+ dnet_readl(bp, TX_STATUS), int_current);
+ /* we can only flush the TX FIFOs */
+ dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL);
+ ndelay(500);
+ dnet_writel(bp, 0, SYS_CTL);
+ handled = 1;
+ }
+
+ if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) {
+ if (napi_schedule_prep(&bp->napi)) {
+ /*
+ * There's no point taking any more interrupts
+ * until we have processed the buffers
+ */
+ /* Disable Rx interrupts and schedule NAPI poll */
+ int_enable = dnet_readl(bp, INTR_ENB);
+ int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF;
+ dnet_writel(bp, int_enable, INTR_ENB);
+ __napi_schedule(&bp->napi);
+ }
+ handled = 1;
+ }
+
+ if (!handled)
+ pr_debug("%s: irq %x remains\n", __func__, int_current);
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+#ifdef DEBUG
+static inline void dnet_print_skb(struct sk_buff *skb)
+{
+ int k;
+ printk(KERN_DEBUG PFX "data:");
+ for (k = 0; k < skb->len; k++)
+ printk(" %02x", (unsigned int)skb->data[k]);
+ printk("\n");
+}
+#else
+#define dnet_print_skb(skb) do {} while (0)
+#endif
+
+static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+
+ struct dnet *bp = netdev_priv(dev);
+ u32 tx_status, irq_enable;
+ unsigned int len, i, tx_cmd, wrsz;
+ unsigned long flags;
+ unsigned int *bufp;
+
+ tx_status = dnet_readl(bp, TX_STATUS);
+
+ pr_debug("start_xmit: len %u head %p data %p\n",
+ skb->len, skb->head, skb->data);
+ dnet_print_skb(skb);
+
+ /* frame size (words) */
+ len = (skb->len + 3) >> 2;
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ tx_status = dnet_readl(bp, TX_STATUS);
+
+ bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL);
+ wrsz = (u32) skb->len + 3;
+ wrsz += ((unsigned long) skb->data) & 0x3;
+ wrsz >>= 2;
+ tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len;
+
+ /* check if there is enough room for the current frame */
+ if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) {
+ for (i = 0; i < wrsz; i++)
+ dnet_writel(bp, *bufp++, TX_DATA_FIFO);
+
+ /*
+ * inform MAC that a packet's written and ready to be
+ * shipped out
+ */
+ dnet_writel(bp, tx_cmd, TX_LEN_FIFO);
+ }
+
+ if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) {
+ netif_stop_queue(dev);
+ tx_status = dnet_readl(bp, INTR_SRC);
+ irq_enable = dnet_readl(bp, INTR_ENB);
+ irq_enable |= DNET_INTR_ENB_TX_FIFOAE;
+ dnet_writel(bp, irq_enable, INTR_ENB);
+ }
+
+ skb_tx_timestamp(skb);
+
+ /* free the buffer */
+ dev_kfree_skb(skb);
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static void dnet_reset_hw(struct dnet *bp)
+{
+ /* put ts_mac in IDLE state i.e. disable rx/tx */
+ dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN);
+
+ /*
+ * RX FIFO almost full threshold: only cmd FIFO almost full is
+ * implemented for RX side
+ */
+ dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH);
+ /*
+ * TX FIFO almost empty threshold: only data FIFO almost empty
+ * is implemented for TX side
+ */
+ dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH);
+
+ /* flush rx/tx fifos */
+ dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH,
+ SYS_CTL);
+ msleep(1);
+ dnet_writel(bp, 0, SYS_CTL);
+}
+
+static void dnet_init_hw(struct dnet *bp)
+{
+ u32 config;
+
+ dnet_reset_hw(bp);
+ __dnet_set_hwaddr(bp);
+
+ config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
+
+ if (bp->dev->flags & IFF_PROMISC)
+ /* Copy All Frames */
+ config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC;
+ if (!(bp->dev->flags & IFF_BROADCAST))
+ /* No BroadCast */
+ config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST;
+
+ config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE |
+ DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST |
+ DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL |
+ DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS;
+
+ dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config);
+
+ /* clear irq before enabling them */
+ config = dnet_readl(bp, INTR_SRC);
+
+ /* enable RX/TX interrupt, recv packet ready interrupt */
+ dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY |
+ DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR |
+ DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL |
+ DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM |
+ DNET_INTR_ENB_RX_PKTRDY, INTR_ENB);
+}
+
+static int dnet_open(struct net_device *dev)
+{
+ struct dnet *bp = netdev_priv(dev);
+
+ /* if the phy is not yet register, retry later */
+ if (!bp->phy_dev)
+ return -EAGAIN;
+
+ if (!is_valid_ether_addr(dev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ napi_enable(&bp->napi);
+ dnet_init_hw(bp);
+
+ phy_start_aneg(bp->phy_dev);
+
+ /* schedule a link state check */
+ phy_start(bp->phy_dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int dnet_close(struct net_device *dev)
+{
+ struct dnet *bp = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ napi_disable(&bp->napi);
+
+ if (bp->phy_dev)
+ phy_stop(bp->phy_dev);
+
+ dnet_reset_hw(bp);
+ netif_carrier_off(dev);
+
+ return 0;
+}
+
+static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat)
+{
+ pr_debug("%s\n", __func__);
+ pr_debug("----------------------------- RX statistics "
+ "-------------------------------\n");
+ pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr);
+ pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err);
+ pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm);
+ pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm);
+ pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol);
+ pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err);
+ pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt);
+ pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm);
+ pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm);
+ pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast);
+ pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast);
+ pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag);
+ pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink);
+ pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib);
+ pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd);
+ pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte);
+ pr_debug("----------------------------- TX statistics "
+ "-------------------------------\n");
+ pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast);
+ pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm);
+ pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast);
+ pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast);
+ pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag);
+ pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs);
+ pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo);
+ pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte);
+}
+
+static struct net_device_stats *dnet_get_stats(struct net_device *dev)
+{
+
+ struct dnet *bp = netdev_priv(dev);
+ struct net_device_stats *nstat = &dev->stats;
+ struct dnet_stats *hwstat = &bp->hw_stats;
+
+ /* read stats from hardware */
+ dnet_update_stats(bp);
+
+ /* Convert HW stats into netdevice stats */
+ nstat->rx_errors = (hwstat->rx_len_chk_err +
+ hwstat->rx_lng_frm + hwstat->rx_shrt_frm +
+ /* ignore IGP violation error
+ hwstat->rx_ipg_viol + */
+ hwstat->rx_crc_err +
+ hwstat->rx_pre_shrink +
+ hwstat->rx_drib_nib + hwstat->rx_unsup_opcd);
+ nstat->tx_errors = hwstat->tx_bad_fcs;
+ nstat->rx_length_errors = (hwstat->rx_len_chk_err +
+ hwstat->rx_lng_frm +
+ hwstat->rx_shrt_frm + hwstat->rx_pre_shrink);
+ nstat->rx_crc_errors = hwstat->rx_crc_err;
+ nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib;
+ nstat->rx_packets = hwstat->rx_ok_pkt;
+ nstat->tx_packets = (hwstat->tx_unicast +
+ hwstat->tx_multicast + hwstat->tx_brdcast);
+ nstat->rx_bytes = hwstat->rx_byte;
+ nstat->tx_bytes = hwstat->tx_byte;
+ nstat->multicast = hwstat->rx_multicast;
+ nstat->rx_missed_errors = hwstat->rx_pkt_ignr;
+
+ dnet_print_pretty_hwstats(hwstat);
+
+ return nstat;
+}
+
+static int dnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct dnet *bp = netdev_priv(dev);
+ struct phy_device *phydev = bp->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(phydev, cmd);
+}
+
+static int dnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct dnet *bp = netdev_priv(dev);
+ struct phy_device *phydev = bp->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, cmd);
+}
+
+static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct dnet *bp = netdev_priv(dev);
+ struct phy_device *phydev = bp->phy_dev;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(phydev, rq, cmd);
+}
+
+static void dnet_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, "0");
+}
+
+static const struct ethtool_ops dnet_ethtool_ops = {
+ .get_settings = dnet_get_settings,
+ .set_settings = dnet_set_settings,
+ .get_drvinfo = dnet_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+static const struct net_device_ops dnet_netdev_ops = {
+ .ndo_open = dnet_open,
+ .ndo_stop = dnet_close,
+ .ndo_get_stats = dnet_get_stats,
+ .ndo_start_xmit = dnet_start_xmit,
+ .ndo_do_ioctl = dnet_ioctl,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static int __devinit dnet_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct net_device *dev;
+ struct dnet *bp;
+ struct phy_device *phydev;
+ int err = -ENXIO;
+ unsigned int mem_base, mem_size, irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no mmio resource defined\n");
+ goto err_out;
+ }
+ mem_base = res->start;
+ mem_size = resource_size(res);
+ irq = platform_get_irq(pdev, 0);
+
+ if (!request_mem_region(mem_base, mem_size, DRV_NAME)) {
+ dev_err(&pdev->dev, "no memory region available\n");
+ err = -EBUSY;
+ goto err_out;
+ }
+
+ err = -ENOMEM;
+ dev = alloc_etherdev(sizeof(*bp));
+ if (!dev) {
+ dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");
+ goto err_out_release_mem;
+ }
+
+ /* TODO: Actually, we have some interesting features... */
+ dev->features |= 0;
+
+ bp = netdev_priv(dev);
+ bp->dev = dev;
+
+ platform_set_drvdata(pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ spin_lock_init(&bp->lock);
+
+ bp->regs = ioremap(mem_base, mem_size);
+ if (!bp->regs) {
+ dev_err(&pdev->dev, "failed to map registers, aborting.\n");
+ err = -ENOMEM;
+ goto err_out_free_dev;
+ }
+
+ dev->irq = irq;
+ err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
+ irq, err);
+ goto err_out_iounmap;
+ }
+
+ dev->netdev_ops = &dnet_netdev_ops;
+ netif_napi_add(dev, &bp->napi, dnet_poll, 64);
+ dev->ethtool_ops = &dnet_ethtool_ops;
+
+ dev->base_addr = (unsigned long)bp->regs;
+
+ bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK;
+
+ dnet_get_hwaddr(bp);
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ /* choose a random ethernet address */
+ random_ether_addr(dev->dev_addr);
+ __dnet_set_hwaddr(bp);
+ }
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_free_irq;
+ }
+
+ /* register the PHY board fixup (for Marvell 88E1111) */
+ err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0,
+ dnet_phy_marvell_fixup);
+ /* we can live without it, so just issue a warning */
+ if (err)
+ dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n");
+
+ err = dnet_mii_init(bp);
+ if (err)
+ goto err_out_unregister_netdev;
+
+ dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
+ bp->regs, mem_base, dev->irq, dev->dev_addr);
+ dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n",
+ (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
+ (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
+ (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
+ (bp->capabilities & DNET_HAS_DMA) ? "" : "no ");
+ phydev = bp->phy_dev;
+ dev_info(&pdev->dev, "attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, irq=%d)\n",
+ phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
+
+ return 0;
+
+err_out_unregister_netdev:
+ unregister_netdev(dev);
+err_out_free_irq:
+ free_irq(dev->irq, dev);
+err_out_iounmap:
+ iounmap(bp->regs);
+err_out_free_dev:
+ free_netdev(dev);
+err_out_release_mem:
+ release_mem_region(mem_base, mem_size);
+err_out:
+ return err;
+}
+
+static int __devexit dnet_remove(struct platform_device *pdev)
+{
+
+ struct net_device *dev;
+ struct dnet *bp;
+
+ dev = platform_get_drvdata(pdev);
+
+ if (dev) {
+ bp = netdev_priv(dev);
+ if (bp->phy_dev)
+ phy_disconnect(bp->phy_dev);
+ mdiobus_unregister(bp->mii_bus);
+ kfree(bp->mii_bus->irq);
+ mdiobus_free(bp->mii_bus);
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+ iounmap(bp->regs);
+ free_netdev(dev);
+ }
+
+ return 0;
+}
+
+static struct platform_driver dnet_driver = {
+ .probe = dnet_probe,
+ .remove = __devexit_p(dnet_remove),
+ .driver = {
+ .name = "dnet",
+ },
+};
+
+static int __init dnet_init(void)
+{
+ return platform_driver_register(&dnet_driver);
+}
+
+static void __exit dnet_exit(void)
+{
+ platform_driver_unregister(&dnet_driver);
+}
+
+module_init(dnet_init);
+module_exit(dnet_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Dave DNET Ethernet driver");
+MODULE_AUTHOR("Ilya Yanok <yanok@emcraft.com>, "
+ "Matteo Vit <matteo.vit@dave.eu>");
diff --git a/drivers/net/ethernet/dnet.h b/drivers/net/ethernet/dnet.h
new file mode 100644
index 000000000000..37f5b30fa78b
--- /dev/null
+++ b/drivers/net/ethernet/dnet.h
@@ -0,0 +1,225 @@
+/*
+ * Dave DNET Ethernet Controller driver
+ *
+ * Copyright (C) 2008 Dave S.r.l. <www.dave.eu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _DNET_H
+#define _DNET_H
+
+#define DRV_NAME "dnet"
+#define DRV_VERSION "0.9.1"
+#define PFX DRV_NAME ": "
+
+/* Register access macros */
+#define dnet_writel(port, value, reg) \
+ writel((value), (port)->regs + DNET_##reg)
+#define dnet_readl(port, reg) readl((port)->regs + DNET_##reg)
+
+/* ALL DNET FIFO REGISTERS */
+#define DNET_RX_LEN_FIFO 0x000 /* RX_LEN_FIFO */
+#define DNET_RX_DATA_FIFO 0x004 /* RX_DATA_FIFO */
+#define DNET_TX_LEN_FIFO 0x008 /* TX_LEN_FIFO */
+#define DNET_TX_DATA_FIFO 0x00C /* TX_DATA_FIFO */
+
+/* ALL DNET CONTROL/STATUS REGISTERS OFFSETS */
+#define DNET_VERCAPS 0x100 /* VERCAPS */
+#define DNET_INTR_SRC 0x104 /* INTR_SRC */
+#define DNET_INTR_ENB 0x108 /* INTR_ENB */
+#define DNET_RX_STATUS 0x10C /* RX_STATUS */
+#define DNET_TX_STATUS 0x110 /* TX_STATUS */
+#define DNET_RX_FRAMES_CNT 0x114 /* RX_FRAMES_CNT */
+#define DNET_TX_FRAMES_CNT 0x118 /* TX_FRAMES_CNT */
+#define DNET_RX_FIFO_TH 0x11C /* RX_FIFO_TH */
+#define DNET_TX_FIFO_TH 0x120 /* TX_FIFO_TH */
+#define DNET_SYS_CTL 0x124 /* SYS_CTL */
+#define DNET_PAUSE_TMR 0x128 /* PAUSE_TMR */
+#define DNET_RX_FIFO_WCNT 0x12C /* RX_FIFO_WCNT */
+#define DNET_TX_FIFO_WCNT 0x130 /* TX_FIFO_WCNT */
+
+/* ALL DNET MAC REGISTERS */
+#define DNET_MACREG_DATA 0x200 /* Mac-Reg Data */
+#define DNET_MACREG_ADDR 0x204 /* Mac-Reg Addr */
+
+/* ALL DNET RX STATISTICS COUNTERS */
+#define DNET_RX_PKT_IGNR_CNT 0x300
+#define DNET_RX_LEN_CHK_ERR_CNT 0x304
+#define DNET_RX_LNG_FRM_CNT 0x308
+#define DNET_RX_SHRT_FRM_CNT 0x30C
+#define DNET_RX_IPG_VIOL_CNT 0x310
+#define DNET_RX_CRC_ERR_CNT 0x314
+#define DNET_RX_OK_PKT_CNT 0x318
+#define DNET_RX_CTL_FRM_CNT 0x31C
+#define DNET_RX_PAUSE_FRM_CNT 0x320
+#define DNET_RX_MULTICAST_CNT 0x324
+#define DNET_RX_BROADCAST_CNT 0x328
+#define DNET_RX_VLAN_TAG_CNT 0x32C
+#define DNET_RX_PRE_SHRINK_CNT 0x330
+#define DNET_RX_DRIB_NIB_CNT 0x334
+#define DNET_RX_UNSUP_OPCD_CNT 0x338
+#define DNET_RX_BYTE_CNT 0x33C
+
+/* DNET TX STATISTICS COUNTERS */
+#define DNET_TX_UNICAST_CNT 0x400
+#define DNET_TX_PAUSE_FRM_CNT 0x404
+#define DNET_TX_MULTICAST_CNT 0x408
+#define DNET_TX_BRDCAST_CNT 0x40C
+#define DNET_TX_VLAN_TAG_CNT 0x410
+#define DNET_TX_BAD_FCS_CNT 0x414
+#define DNET_TX_JUMBO_CNT 0x418
+#define DNET_TX_BYTE_CNT 0x41C
+
+/* SOME INTERNAL MAC-CORE REGISTER */
+#define DNET_INTERNAL_MODE_REG 0x0
+#define DNET_INTERNAL_RXTX_CONTROL_REG 0x2
+#define DNET_INTERNAL_MAX_PKT_SIZE_REG 0x4
+#define DNET_INTERNAL_IGP_REG 0x8
+#define DNET_INTERNAL_MAC_ADDR_0_REG 0xa
+#define DNET_INTERNAL_MAC_ADDR_1_REG 0xc
+#define DNET_INTERNAL_MAC_ADDR_2_REG 0xe
+#define DNET_INTERNAL_TX_RX_STS_REG 0x12
+#define DNET_INTERNAL_GMII_MNG_CTL_REG 0x14
+#define DNET_INTERNAL_GMII_MNG_DAT_REG 0x16
+
+#define DNET_INTERNAL_GMII_MNG_CMD_FIN (1 << 14)
+
+#define DNET_INTERNAL_WRITE (1 << 31)
+
+/* MAC-CORE REGISTER FIELDS */
+
+/* MAC-CORE MODE REGISTER FIELDS */
+#define DNET_INTERNAL_MODE_GBITEN (1 << 0)
+#define DNET_INTERNAL_MODE_FCEN (1 << 1)
+#define DNET_INTERNAL_MODE_RXEN (1 << 2)
+#define DNET_INTERNAL_MODE_TXEN (1 << 3)
+
+/* MAC-CORE RXTX CONTROL REGISTER FIELDS */
+#define DNET_INTERNAL_RXTX_CONTROL_RXSHORTFRAME (1 << 8)
+#define DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST (1 << 7)
+#define DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST (1 << 4)
+#define DNET_INTERNAL_RXTX_CONTROL_RXPAUSE (1 << 3)
+#define DNET_INTERNAL_RXTX_CONTROL_DISTXFCS (1 << 2)
+#define DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS (1 << 1)
+#define DNET_INTERNAL_RXTX_CONTROL_ENPROMISC (1 << 0)
+#define DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL (1 << 6)
+#define DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP (1 << 5)
+
+/* SYSTEM CONTROL REGISTER FIELDS */
+#define DNET_SYS_CTL_IGNORENEXTPKT (1 << 0)
+#define DNET_SYS_CTL_SENDPAUSE (1 << 2)
+#define DNET_SYS_CTL_RXFIFOFLUSH (1 << 3)
+#define DNET_SYS_CTL_TXFIFOFLUSH (1 << 4)
+
+/* TX STATUS REGISTER FIELDS */
+#define DNET_TX_STATUS_FIFO_ALMOST_EMPTY (1 << 2)
+#define DNET_TX_STATUS_FIFO_ALMOST_FULL (1 << 1)
+
+/* INTERRUPT SOURCE REGISTER FIELDS */
+#define DNET_INTR_SRC_TX_PKTSENT (1 << 0)
+#define DNET_INTR_SRC_TX_FIFOAF (1 << 1)
+#define DNET_INTR_SRC_TX_FIFOAE (1 << 2)
+#define DNET_INTR_SRC_TX_DISCFRM (1 << 3)
+#define DNET_INTR_SRC_TX_FIFOFULL (1 << 4)
+#define DNET_INTR_SRC_RX_CMDFIFOAF (1 << 8)
+#define DNET_INTR_SRC_RX_CMDFIFOFF (1 << 9)
+#define DNET_INTR_SRC_RX_DATAFIFOFF (1 << 10)
+#define DNET_INTR_SRC_TX_SUMMARY (1 << 16)
+#define DNET_INTR_SRC_RX_SUMMARY (1 << 17)
+#define DNET_INTR_SRC_PHY (1 << 19)
+
+/* INTERRUPT ENABLE REGISTER FIELDS */
+#define DNET_INTR_ENB_TX_PKTSENT (1 << 0)
+#define DNET_INTR_ENB_TX_FIFOAF (1 << 1)
+#define DNET_INTR_ENB_TX_FIFOAE (1 << 2)
+#define DNET_INTR_ENB_TX_DISCFRM (1 << 3)
+#define DNET_INTR_ENB_TX_FIFOFULL (1 << 4)
+#define DNET_INTR_ENB_RX_PKTRDY (1 << 8)
+#define DNET_INTR_ENB_RX_FIFOAF (1 << 9)
+#define DNET_INTR_ENB_RX_FIFOERR (1 << 10)
+#define DNET_INTR_ENB_RX_ERROR (1 << 11)
+#define DNET_INTR_ENB_RX_FIFOFULL (1 << 12)
+#define DNET_INTR_ENB_RX_FIFOAE (1 << 13)
+#define DNET_INTR_ENB_TX_SUMMARY (1 << 16)
+#define DNET_INTR_ENB_RX_SUMMARY (1 << 17)
+#define DNET_INTR_ENB_GLOBAL_ENABLE (1 << 18)
+
+/* default values:
+ * almost empty = less than one full sized ethernet frame (no jumbo) inside
+ * the fifo almost full = can write less than one full sized ethernet frame
+ * (no jumbo) inside the fifo
+ */
+#define DNET_CFG_TX_FIFO_FULL_THRES 25
+#define DNET_CFG_RX_FIFO_FULL_THRES 20
+
+/*
+ * Capabilities. Used by the driver to know the capabilities that the ethernet
+ * controller inside the FPGA have.
+ */
+
+#define DNET_HAS_MDIO (1 << 0)
+#define DNET_HAS_IRQ (1 << 1)
+#define DNET_HAS_GIGABIT (1 << 2)
+#define DNET_HAS_DMA (1 << 3)
+
+#define DNET_HAS_MII (1 << 4) /* or GMII */
+#define DNET_HAS_RMII (1 << 5) /* or RGMII */
+
+#define DNET_CAPS_MASK 0xFFFF
+
+#define DNET_FIFO_SIZE 1024 /* 1K x 32 bit */
+#define DNET_FIFO_TX_DATA_AF_TH (DNET_FIFO_SIZE - 384) /* 384 = 1536 / 4 */
+#define DNET_FIFO_TX_DATA_AE_TH 384
+
+#define DNET_FIFO_RX_CMD_AF_TH (1 << 16) /* just one frame inside the FIFO */
+
+/*
+ * Hardware-collected statistics.
+ */
+struct dnet_stats {
+ u32 rx_pkt_ignr;
+ u32 rx_len_chk_err;
+ u32 rx_lng_frm;
+ u32 rx_shrt_frm;
+ u32 rx_ipg_viol;
+ u32 rx_crc_err;
+ u32 rx_ok_pkt;
+ u32 rx_ctl_frm;
+ u32 rx_pause_frm;
+ u32 rx_multicast;
+ u32 rx_broadcast;
+ u32 rx_vlan_tag;
+ u32 rx_pre_shrink;
+ u32 rx_drib_nib;
+ u32 rx_unsup_opcd;
+ u32 rx_byte;
+ u32 tx_unicast;
+ u32 tx_pause_frm;
+ u32 tx_multicast;
+ u32 tx_brdcast;
+ u32 tx_vlan_tag;
+ u32 tx_bad_fcs;
+ u32 tx_jumbo;
+ u32 tx_byte;
+};
+
+struct dnet {
+ void __iomem *regs;
+ spinlock_t lock;
+ struct platform_device *pdev;
+ struct net_device *dev;
+ struct dnet_stats hw_stats;
+ unsigned int capabilities; /* read from FPGA */
+ struct napi_struct napi;
+
+ /* PHY stuff */
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ unsigned int link;
+ unsigned int speed;
+ unsigned int duplex;
+};
+
+#endif /* _DNET_H */
diff --git a/drivers/net/ethernet/emulex/Kconfig b/drivers/net/ethernet/emulex/Kconfig
new file mode 100644
index 000000000000..7a28a6433944
--- /dev/null
+++ b/drivers/net/ethernet/emulex/Kconfig
@@ -0,0 +1,23 @@
+#
+# Emulex driver configuration
+#
+
+config NET_VENDOR_EMULEX
+ bool "Emulex devices"
+ default y
+ depends on PCI && INET
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Emulex cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_EMULEX
+
+source "drivers/net/ethernet/emulex/benet/Kconfig"
+
+endif # NET_VENDOR_EMULEX
diff --git a/drivers/net/ethernet/emulex/Makefile b/drivers/net/ethernet/emulex/Makefile
new file mode 100644
index 000000000000..ea8ec574d45a
--- /dev/null
+++ b/drivers/net/ethernet/emulex/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Emulex device drivers.
+#
+
+obj-$(CONFIG_BE2NET) += benet/
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
new file mode 100644
index 000000000000..804db04a2bd0
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -0,0 +1,6 @@
+config BE2NET
+ tristate "ServerEngines' 10Gbps NIC - BladeEngine"
+ depends on PCI && INET
+ ---help---
+ This driver implements the NIC functionality for ServerEngines'
+ 10Gbps network adapter - BladeEngine.
diff --git a/drivers/net/ethernet/emulex/benet/Makefile b/drivers/net/ethernet/emulex/benet/Makefile
new file mode 100644
index 000000000000..a60cd8051135
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile to build the network driver for ServerEngine's BladeEngine.
+#
+
+obj-$(CONFIG_BE2NET) += be2net.o
+
+be2net-y := be_main.o be_cmds.o be_ethtool.o
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
new file mode 100644
index 000000000000..644e8fed8364
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -0,0 +1,532 @@
+/*
+ * Copyright (C) 2005 - 2011 Emulex
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#ifndef BE_H
+#define BE_H
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <net/tcp.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <linux/if_vlan.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/u64_stats_sync.h>
+
+#include "be_hw.h"
+
+#define DRV_VER "4.0.100u"
+#define DRV_NAME "be2net"
+#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
+#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
+#define OC_NAME "Emulex OneConnect 10Gbps NIC"
+#define OC_NAME_BE OC_NAME "(be3)"
+#define OC_NAME_LANCER OC_NAME "(Lancer)"
+#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
+
+#define BE_VENDOR_ID 0x19a2
+#define EMULEX_VENDOR_ID 0x10df
+#define BE_DEVICE_ID1 0x211
+#define BE_DEVICE_ID2 0x221
+#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
+#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
+#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
+#define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */
+
+static inline char *nic_name(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case OC_DEVICE_ID1:
+ return OC_NAME;
+ case OC_DEVICE_ID2:
+ return OC_NAME_BE;
+ case OC_DEVICE_ID3:
+ case OC_DEVICE_ID4:
+ return OC_NAME_LANCER;
+ case BE_DEVICE_ID2:
+ return BE3_NAME;
+ default:
+ return BE_NAME;
+ }
+}
+
+/* Number of bytes of an RX frame that are copied to skb->data */
+#define BE_HDR_LEN ((u16) 64)
+#define BE_MAX_JUMBO_FRAME_SIZE 9018
+#define BE_MIN_MTU 256
+
+#define BE_NUM_VLANS_SUPPORTED 64
+#define BE_MAX_EQD 96
+#define BE_MAX_TX_FRAG_COUNT 30
+
+#define EVNT_Q_LEN 1024
+#define TX_Q_LEN 2048
+#define TX_CQ_LEN 1024
+#define RX_Q_LEN 1024 /* Does not support any other value */
+#define RX_CQ_LEN 1024
+#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
+#define MCC_CQ_LEN 256
+
+#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */
+#define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
+#define MAX_TX_QS 8
+#define BE_MAX_MSIX_VECTORS (MAX_RX_QS + 1)/* RX + TX */
+#define BE_NAPI_WEIGHT 64
+#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
+#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
+
+#define FW_VER_LEN 32
+
+struct be_dma_mem {
+ void *va;
+ dma_addr_t dma;
+ u32 size;
+};
+
+struct be_queue_info {
+ struct be_dma_mem dma_mem;
+ u16 len;
+ u16 entry_size; /* Size of an element in the queue */
+ u16 id;
+ u16 tail, head;
+ bool created;
+ atomic_t used; /* Number of valid elements in the queue */
+};
+
+static inline u32 MODULO(u16 val, u16 limit)
+{
+ BUG_ON(limit & (limit - 1));
+ return val & (limit - 1);
+}
+
+static inline void index_adv(u16 *index, u16 val, u16 limit)
+{
+ *index = MODULO((*index + val), limit);
+}
+
+static inline void index_inc(u16 *index, u16 limit)
+{
+ *index = MODULO((*index + 1), limit);
+}
+
+static inline void *queue_head_node(struct be_queue_info *q)
+{
+ return q->dma_mem.va + q->head * q->entry_size;
+}
+
+static inline void *queue_tail_node(struct be_queue_info *q)
+{
+ return q->dma_mem.va + q->tail * q->entry_size;
+}
+
+static inline void *queue_index_node(struct be_queue_info *q, u16 index)
+{
+ return q->dma_mem.va + index * q->entry_size;
+}
+
+static inline void queue_head_inc(struct be_queue_info *q)
+{
+ index_inc(&q->head, q->len);
+}
+
+static inline void queue_tail_inc(struct be_queue_info *q)
+{
+ index_inc(&q->tail, q->len);
+}
+
+struct be_eq_obj {
+ struct be_queue_info q;
+ char desc[32];
+
+ /* Adaptive interrupt coalescing (AIC) info */
+ bool enable_aic;
+ u16 min_eqd; /* in usecs */
+ u16 max_eqd; /* in usecs */
+ u16 cur_eqd; /* in usecs */
+ u8 eq_idx;
+
+ struct napi_struct napi;
+};
+
+struct be_mcc_obj {
+ struct be_queue_info q;
+ struct be_queue_info cq;
+ bool rearm_cq;
+};
+
+struct be_tx_stats {
+ u64 tx_bytes;
+ u64 tx_pkts;
+ u64 tx_reqs;
+ u64 tx_wrbs;
+ u64 tx_compl;
+ ulong tx_jiffies;
+ u32 tx_stops;
+ struct u64_stats_sync sync;
+ struct u64_stats_sync sync_compl;
+};
+
+struct be_tx_obj {
+ struct be_queue_info q;
+ struct be_queue_info cq;
+ /* Remember the skbs that were transmitted */
+ struct sk_buff *sent_skb_list[TX_Q_LEN];
+ struct be_tx_stats stats;
+};
+
+/* Struct to remember the pages posted for rx frags */
+struct be_rx_page_info {
+ struct page *page;
+ DEFINE_DMA_UNMAP_ADDR(bus);
+ u16 page_offset;
+ bool last_page_user;
+};
+
+struct be_rx_stats {
+ u64 rx_bytes;
+ u64 rx_pkts;
+ u64 rx_pkts_prev;
+ ulong rx_jiffies;
+ u32 rx_drops_no_skbs; /* skb allocation errors */
+ u32 rx_drops_no_frags; /* HW has no fetched frags */
+ u32 rx_post_fail; /* page post alloc failures */
+ u32 rx_polls; /* NAPI calls */
+ u32 rx_events;
+ u32 rx_compl;
+ u32 rx_mcast_pkts;
+ u32 rx_compl_err; /* completions with err set */
+ u32 rx_pps; /* pkts per second */
+ struct u64_stats_sync sync;
+};
+
+struct be_rx_compl_info {
+ u32 rss_hash;
+ u16 vlan_tag;
+ u16 pkt_size;
+ u16 rxq_idx;
+ u16 port;
+ u8 vlanf;
+ u8 num_rcvd;
+ u8 err;
+ u8 ipf;
+ u8 tcpf;
+ u8 udpf;
+ u8 ip_csum;
+ u8 l4_csum;
+ u8 ipv6;
+ u8 vtm;
+ u8 pkt_type;
+};
+
+struct be_rx_obj {
+ struct be_adapter *adapter;
+ struct be_queue_info q;
+ struct be_queue_info cq;
+ struct be_rx_compl_info rxcp;
+ struct be_rx_page_info page_info_tbl[RX_Q_LEN];
+ struct be_eq_obj rx_eq;
+ struct be_rx_stats stats;
+ u8 rss_id;
+ bool rx_post_starved; /* Zero rx frags have been posted to BE */
+ u32 cache_line_barrier[16];
+};
+
+struct be_drv_stats {
+ u32 be_on_die_temperature;
+ u32 tx_events;
+ u32 eth_red_drops;
+ u32 rx_drops_no_pbuf;
+ u32 rx_drops_no_txpb;
+ u32 rx_drops_no_erx_descr;
+ u32 rx_drops_no_tpre_descr;
+ u32 rx_drops_too_many_frags;
+ u32 rx_drops_invalid_ring;
+ u32 forwarded_packets;
+ u32 rx_drops_mtu;
+ u32 rx_crc_errors;
+ u32 rx_alignment_symbol_errors;
+ u32 rx_pause_frames;
+ u32 rx_priority_pause_frames;
+ u32 rx_control_frames;
+ u32 rx_in_range_errors;
+ u32 rx_out_range_errors;
+ u32 rx_frame_too_long;
+ u32 rx_address_match_errors;
+ u32 rx_dropped_too_small;
+ u32 rx_dropped_too_short;
+ u32 rx_dropped_header_too_small;
+ u32 rx_dropped_tcp_length;
+ u32 rx_dropped_runt;
+ u32 rx_ip_checksum_errs;
+ u32 rx_tcp_checksum_errs;
+ u32 rx_udp_checksum_errs;
+ u32 tx_pauseframes;
+ u32 tx_priority_pauseframes;
+ u32 tx_controlframes;
+ u32 rxpp_fifo_overflow_drop;
+ u32 rx_input_fifo_overflow_drop;
+ u32 pmem_fifo_overflow_drop;
+ u32 jabber_events;
+};
+
+struct be_vf_cfg {
+ unsigned char vf_mac_addr[ETH_ALEN];
+ u32 vf_if_handle;
+ u32 vf_pmac_id;
+ u16 vf_vlan_tag;
+ u32 vf_tx_rate;
+};
+
+#define BE_INVALID_PMAC_ID 0xffffffff
+
+struct be_adapter {
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+
+ u8 __iomem *csr;
+ u8 __iomem *db; /* Door Bell */
+
+ struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
+ struct be_dma_mem mbox_mem;
+ /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
+ * is stored for freeing purpose */
+ struct be_dma_mem mbox_mem_alloced;
+
+ struct be_mcc_obj mcc_obj;
+ spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
+ spinlock_t mcc_cq_lock;
+
+ struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
+ u32 num_msix_vec;
+ bool isr_registered;
+
+ /* TX Rings */
+ struct be_eq_obj tx_eq;
+ struct be_tx_obj tx_obj[MAX_TX_QS];
+ u8 num_tx_qs;
+
+ u32 cache_line_break[8];
+
+ /* Rx rings */
+ struct be_rx_obj rx_obj[MAX_RX_QS];
+ u32 num_rx_qs;
+ u32 big_page_size; /* Compounded page size shared by rx wrbs */
+
+ u8 eq_next_idx;
+ struct be_drv_stats drv_stats;
+
+ u16 vlans_added;
+ u16 max_vlans; /* Number of vlans supported */
+ u8 vlan_tag[VLAN_N_VID];
+ u8 vlan_prio_bmap; /* Available Priority BitMap */
+ u16 recommended_prio; /* Recommended Priority */
+ struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
+
+ struct be_dma_mem stats_cmd;
+ /* Work queue used to perform periodic tasks like getting statistics */
+ struct delayed_work work;
+ u16 work_counter;
+
+ /* Ethtool knobs and info */
+ char fw_ver[FW_VER_LEN];
+ u32 if_handle; /* Used to configure filtering */
+ u32 pmac_id; /* MAC addr handle used by BE card */
+ u32 beacon_state; /* for set_phys_id */
+
+ bool eeh_err;
+ u32 port_num;
+ bool promiscuous;
+ bool wol;
+ u32 function_mode;
+ u32 function_caps;
+ u32 rx_fc; /* Rx flow control */
+ u32 tx_fc; /* Tx flow control */
+ bool ue_detected;
+ bool stats_cmd_sent;
+ int link_speed;
+ u8 port_type;
+ u8 transceiver;
+ u8 autoneg;
+ u8 generation; /* BladeEngine ASIC generation */
+ u32 flash_status;
+ struct completion flash_compl;
+
+ bool be3_native;
+ bool sriov_enabled;
+ struct be_vf_cfg *vf_cfg;
+ u8 is_virtfn;
+ u32 sli_family;
+ u8 hba_port_num;
+ u16 pvid;
+};
+
+#define be_physfn(adapter) (!adapter->is_virtfn)
+
+/* BladeEngine Generation numbers */
+#define BE_GEN2 2
+#define BE_GEN3 3
+
+#define ON 1
+#define OFF 0
+#define lancer_chip(adapter) ((adapter->pdev->device == OC_DEVICE_ID3) || \
+ (adapter->pdev->device == OC_DEVICE_ID4))
+
+extern const struct ethtool_ops be_ethtool_ops;
+
+#define msix_enabled(adapter) (adapter->num_msix_vec > 0)
+#define tx_stats(txo) (&txo->stats)
+#define rx_stats(rxo) (&rxo->stats)
+
+#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
+
+#define for_all_rx_queues(adapter, rxo, i) \
+ for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \
+ i++, rxo++)
+
+/* Just skip the first default non-rss queue */
+#define for_all_rss_queues(adapter, rxo, i) \
+ for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\
+ i++, rxo++)
+
+#define for_all_tx_queues(adapter, txo, i) \
+ for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \
+ i++, txo++)
+
+#define PAGE_SHIFT_4K 12
+#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
+
+/* Returns number of pages spanned by the data starting at the given addr */
+#define PAGES_4K_SPANNED(_address, size) \
+ ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
+ (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
+
+/* Byte offset into the page corresponding to given address */
+#define OFFSET_IN_PAGE(addr) \
+ ((size_t)(addr) & (PAGE_SIZE_4K-1))
+
+/* Returns bit offset within a DWORD of a bitfield */
+#define AMAP_BIT_OFFSET(_struct, field) \
+ (((size_t)&(((_struct *)0)->field))%32)
+
+/* Returns the bit mask of the field that is NOT shifted into location. */
+static inline u32 amap_mask(u32 bitsize)
+{
+ return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
+}
+
+static inline void
+amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
+{
+ u32 *dw = (u32 *) ptr + dw_offset;
+ *dw &= ~(mask << offset);
+ *dw |= (mask & value) << offset;
+}
+
+#define AMAP_SET_BITS(_struct, field, ptr, val) \
+ amap_set(ptr, \
+ offsetof(_struct, field)/32, \
+ amap_mask(sizeof(((_struct *)0)->field)), \
+ AMAP_BIT_OFFSET(_struct, field), \
+ val)
+
+static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
+{
+ u32 *dw = (u32 *) ptr;
+ return mask & (*(dw + dw_offset) >> offset);
+}
+
+#define AMAP_GET_BITS(_struct, field, ptr) \
+ amap_get(ptr, \
+ offsetof(_struct, field)/32, \
+ amap_mask(sizeof(((_struct *)0)->field)), \
+ AMAP_BIT_OFFSET(_struct, field))
+
+#define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len)
+#define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len)
+static inline void swap_dws(void *wrb, int len)
+{
+#ifdef __BIG_ENDIAN
+ u32 *dw = wrb;
+ BUG_ON(len % 4);
+ do {
+ *dw = cpu_to_le32(*dw);
+ dw++;
+ len -= 4;
+ } while (len);
+#endif /* __BIG_ENDIAN */
+}
+
+static inline u8 is_tcp_pkt(struct sk_buff *skb)
+{
+ u8 val = 0;
+
+ if (ip_hdr(skb)->version == 4)
+ val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
+ else if (ip_hdr(skb)->version == 6)
+ val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);
+
+ return val;
+}
+
+static inline u8 is_udp_pkt(struct sk_buff *skb)
+{
+ u8 val = 0;
+
+ if (ip_hdr(skb)->version == 4)
+ val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
+ else if (ip_hdr(skb)->version == 6)
+ val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);
+
+ return val;
+}
+
+static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
+{
+ u32 sli_intf;
+
+ pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
+ adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
+}
+
+static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
+{
+ u32 addr;
+
+ addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
+
+ mac[5] = (u8)(addr & 0xFF);
+ mac[4] = (u8)((addr >> 8) & 0xFF);
+ mac[3] = (u8)((addr >> 16) & 0xFF);
+ /* Use the OUI from the current MAC address */
+ memcpy(mac, adapter->netdev->dev_addr, 3);
+}
+
+static inline bool be_multi_rxq(const struct be_adapter *adapter)
+{
+ return adapter->num_rx_qs > 1;
+}
+
+extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
+ u16 num_popped);
+extern void be_link_status_update(struct be_adapter *adapter, u32 link_status);
+extern void be_parse_stats(struct be_adapter *adapter);
+extern int be_load_fw(struct be_adapter *adapter, u8 *func);
+#endif /* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
new file mode 100644
index 000000000000..6e7b5218c784
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -0,0 +1,2384 @@
+/*
+ * Copyright (C) 2005 - 2011 Emulex
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include "be.h"
+#include "be_cmds.h"
+
+/* Must be a power of 2 or else MODULO will BUG_ON */
+static int be_get_temp_freq = 64;
+
+static inline void *embedded_payload(struct be_mcc_wrb *wrb)
+{
+ return wrb->payload.embedded_payload;
+}
+
+static void be_mcc_notify(struct be_adapter *adapter)
+{
+ struct be_queue_info *mccq = &adapter->mcc_obj.q;
+ u32 val = 0;
+
+ if (adapter->eeh_err) {
+ dev_info(&adapter->pdev->dev,
+ "Error in Card Detected! Cannot issue commands\n");
+ return;
+ }
+
+ val |= mccq->id & DB_MCCQ_RING_ID_MASK;
+ val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
+
+ wmb();
+ iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
+}
+
+/* To check if valid bit is set, check the entire word as we don't know
+ * the endianness of the data (old entry is host endian while a new entry is
+ * little endian) */
+static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
+{
+ if (compl->flags != 0) {
+ compl->flags = le32_to_cpu(compl->flags);
+ BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/* Need to reset the entire word that houses the valid bit */
+static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
+{
+ compl->flags = 0;
+}
+
+static int be_mcc_compl_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl)
+{
+ u16 compl_status, extd_status;
+
+ /* Just swap the status to host endian; mcc tag is opaquely copied
+ * from mcc_wrb */
+ be_dws_le_to_cpu(compl, 4);
+
+ compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
+ CQE_STATUS_COMPL_MASK;
+
+ if (((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) ||
+ (compl->tag0 == OPCODE_COMMON_WRITE_OBJECT)) &&
+ (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
+ adapter->flash_status = compl_status;
+ complete(&adapter->flash_compl);
+ }
+
+ if (compl_status == MCC_STATUS_SUCCESS) {
+ if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) ||
+ (compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) &&
+ (compl->tag1 == CMD_SUBSYSTEM_ETH)) {
+ be_parse_stats(adapter);
+ adapter->stats_cmd_sent = false;
+ }
+ if (compl->tag0 ==
+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) {
+ struct be_mcc_wrb *mcc_wrb =
+ queue_index_node(&adapter->mcc_obj.q,
+ compl->tag1);
+ struct be_cmd_resp_get_cntl_addnl_attribs *resp =
+ embedded_payload(mcc_wrb);
+ adapter->drv_stats.be_on_die_temperature =
+ resp->on_die_temperature;
+ }
+ } else {
+ if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
+ be_get_temp_freq = 0;
+
+ if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
+ compl_status == MCC_STATUS_ILLEGAL_REQUEST)
+ goto done;
+
+ if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
+ dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
+ "permitted to execute this cmd (opcode %d)\n",
+ compl->tag0);
+ } else {
+ extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
+ CQE_STATUS_EXTD_MASK;
+ dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
+ "status %d, extd-status %d\n",
+ compl->tag0, compl_status, extd_status);
+ }
+ }
+done:
+ return compl_status;
+}
+
+/* Link state evt is a string of bytes; no need for endian swapping */
+static void be_async_link_state_process(struct be_adapter *adapter,
+ struct be_async_event_link_state *evt)
+{
+ be_link_status_update(adapter, evt->port_link_status);
+}
+
+/* Grp5 CoS Priority evt */
+static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
+ struct be_async_event_grp5_cos_priority *evt)
+{
+ if (evt->valid) {
+ adapter->vlan_prio_bmap = evt->available_priority_bmap;
+ adapter->recommended_prio &= ~VLAN_PRIO_MASK;
+ adapter->recommended_prio =
+ evt->reco_default_priority << VLAN_PRIO_SHIFT;
+ }
+}
+
+/* Grp5 QOS Speed evt */
+static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
+ struct be_async_event_grp5_qos_link_speed *evt)
+{
+ if (evt->physical_port == adapter->port_num) {
+ /* qos_link_speed is in units of 10 Mbps */
+ adapter->link_speed = evt->qos_link_speed * 10;
+ }
+}
+
+/*Grp5 PVID evt*/
+static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
+ struct be_async_event_grp5_pvid_state *evt)
+{
+ if (evt->enabled)
+ adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
+ else
+ adapter->pvid = 0;
+}
+
+static void be_async_grp5_evt_process(struct be_adapter *adapter,
+ u32 trailer, struct be_mcc_compl *evt)
+{
+ u8 event_type = 0;
+
+ event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
+ ASYNC_TRAILER_EVENT_TYPE_MASK;
+
+ switch (event_type) {
+ case ASYNC_EVENT_COS_PRIORITY:
+ be_async_grp5_cos_priority_process(adapter,
+ (struct be_async_event_grp5_cos_priority *)evt);
+ break;
+ case ASYNC_EVENT_QOS_SPEED:
+ be_async_grp5_qos_speed_process(adapter,
+ (struct be_async_event_grp5_qos_link_speed *)evt);
+ break;
+ case ASYNC_EVENT_PVID_STATE:
+ be_async_grp5_pvid_state_process(adapter,
+ (struct be_async_event_grp5_pvid_state *)evt);
+ break;
+ default:
+ dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
+ break;
+ }
+}
+
+static inline bool is_link_state_evt(u32 trailer)
+{
+ return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
+ ASYNC_TRAILER_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_LINK_STATE;
+}
+
+static inline bool is_grp5_evt(u32 trailer)
+{
+ return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
+ ASYNC_TRAILER_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_GRP_5);
+}
+
+static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
+{
+ struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
+ struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
+
+ if (be_mcc_compl_is_new(compl)) {
+ queue_tail_inc(mcc_cq);
+ return compl;
+ }
+ return NULL;
+}
+
+void be_async_mcc_enable(struct be_adapter *adapter)
+{
+ spin_lock_bh(&adapter->mcc_cq_lock);
+
+ be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
+ adapter->mcc_obj.rearm_cq = true;
+
+ spin_unlock_bh(&adapter->mcc_cq_lock);
+}
+
+void be_async_mcc_disable(struct be_adapter *adapter)
+{
+ adapter->mcc_obj.rearm_cq = false;
+}
+
+int be_process_mcc(struct be_adapter *adapter, int *status)
+{
+ struct be_mcc_compl *compl;
+ int num = 0;
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+
+ spin_lock_bh(&adapter->mcc_cq_lock);
+ while ((compl = be_mcc_compl_get(adapter))) {
+ if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
+ /* Interpret flags as an async trailer */
+ if (is_link_state_evt(compl->flags))
+ be_async_link_state_process(adapter,
+ (struct be_async_event_link_state *) compl);
+ else if (is_grp5_evt(compl->flags))
+ be_async_grp5_evt_process(adapter,
+ compl->flags, compl);
+ } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
+ *status = be_mcc_compl_process(adapter, compl);
+ atomic_dec(&mcc_obj->q.used);
+ }
+ be_mcc_compl_use(compl);
+ num++;
+ }
+
+ spin_unlock_bh(&adapter->mcc_cq_lock);
+ return num;
+}
+
+/* Wait till no more pending mcc requests are present */
+static int be_mcc_wait_compl(struct be_adapter *adapter)
+{
+#define mcc_timeout 120000 /* 12s timeout */
+ int i, num, status = 0;
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+
+ if (adapter->eeh_err)
+ return -EIO;
+
+ for (i = 0; i < mcc_timeout; i++) {
+ num = be_process_mcc(adapter, &status);
+ if (num)
+ be_cq_notify(adapter, mcc_obj->cq.id,
+ mcc_obj->rearm_cq, num);
+
+ if (atomic_read(&mcc_obj->q.used) == 0)
+ break;
+ udelay(100);
+ }
+ if (i == mcc_timeout) {
+ dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
+ return -1;
+ }
+ return status;
+}
+
+/* Notify MCC requests and wait for completion */
+static int be_mcc_notify_wait(struct be_adapter *adapter)
+{
+ be_mcc_notify(adapter);
+ return be_mcc_wait_compl(adapter);
+}
+
+static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
+{
+ int msecs = 0;
+ u32 ready;
+
+ if (adapter->eeh_err) {
+ dev_err(&adapter->pdev->dev,
+ "Error detected in card.Cannot issue commands\n");
+ return -EIO;
+ }
+
+ do {
+ ready = ioread32(db);
+ if (ready == 0xffffffff) {
+ dev_err(&adapter->pdev->dev,
+ "pci slot disconnected\n");
+ return -1;
+ }
+
+ ready &= MPU_MAILBOX_DB_RDY_MASK;
+ if (ready)
+ break;
+
+ if (msecs > 4000) {
+ dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
+ if (!lancer_chip(adapter))
+ be_detect_dump_ue(adapter);
+ return -1;
+ }
+
+ msleep(1);
+ msecs++;
+ } while (true);
+
+ return 0;
+}
+
+/*
+ * Insert the mailbox address into the doorbell in two steps
+ * Polls on the mbox doorbell till a command completion (or a timeout) occurs
+ */
+static int be_mbox_notify_wait(struct be_adapter *adapter)
+{
+ int status;
+ u32 val = 0;
+ void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
+ struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
+ struct be_mcc_mailbox *mbox = mbox_mem->va;
+ struct be_mcc_compl *compl = &mbox->compl;
+
+ /* wait for ready to be set */
+ status = be_mbox_db_ready_wait(adapter, db);
+ if (status != 0)
+ return status;
+
+ val |= MPU_MAILBOX_DB_HI_MASK;
+ /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
+ val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
+ iowrite32(val, db);
+
+ /* wait for ready to be set */
+ status = be_mbox_db_ready_wait(adapter, db);
+ if (status != 0)
+ return status;
+
+ val = 0;
+ /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
+ val |= (u32)(mbox_mem->dma >> 4) << 2;
+ iowrite32(val, db);
+
+ status = be_mbox_db_ready_wait(adapter, db);
+ if (status != 0)
+ return status;
+
+ /* A cq entry has been made now */
+ if (be_mcc_compl_is_new(compl)) {
+ status = be_mcc_compl_process(adapter, &mbox->compl);
+ be_mcc_compl_use(compl);
+ if (status)
+ return status;
+ } else {
+ dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
+{
+ u32 sem;
+
+ if (lancer_chip(adapter))
+ sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
+ else
+ sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
+
+ *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
+ if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
+ return -1;
+ else
+ return 0;
+}
+
+int be_cmd_POST(struct be_adapter *adapter)
+{
+ u16 stage;
+ int status, timeout = 0;
+ struct device *dev = &adapter->pdev->dev;
+
+ do {
+ status = be_POST_stage_get(adapter, &stage);
+ if (status) {
+ dev_err(dev, "POST error; stage=0x%x\n", stage);
+ return -1;
+ } else if (stage != POST_STAGE_ARMFW_RDY) {
+ if (msleep_interruptible(2000)) {
+ dev_err(dev, "Waiting for POST aborted\n");
+ return -EINTR;
+ }
+ timeout += 2;
+ } else {
+ return 0;
+ }
+ } while (timeout < 60);
+
+ dev_err(dev, "POST timeout; stage=0x%x\n", stage);
+ return -1;
+}
+
+
+static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
+{
+ return &wrb->payload.sgl[0];
+}
+
+/* Don't touch the hdr after it's prepared */
+static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
+ bool embedded, u8 sge_cnt, u32 opcode)
+{
+ if (embedded)
+ wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
+ else
+ wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
+ MCC_WRB_SGE_CNT_SHIFT;
+ wrb->payload_length = payload_len;
+ wrb->tag0 = opcode;
+ be_dws_cpu_to_le(wrb, 8);
+}
+
+/* Don't touch the hdr after it's prepared */
+static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
+ u8 subsystem, u8 opcode, int cmd_len)
+{
+ req_hdr->opcode = opcode;
+ req_hdr->subsystem = subsystem;
+ req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
+ req_hdr->version = 0;
+}
+
+static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
+ struct be_dma_mem *mem)
+{
+ int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
+ u64 dma = (u64)mem->dma;
+
+ for (i = 0; i < buf_pages; i++) {
+ pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
+ pages[i].hi = cpu_to_le32(upper_32_bits(dma));
+ dma += PAGE_SIZE_4K;
+ }
+}
+
+/* Converts interrupt delay in microseconds to multiplier value */
+static u32 eq_delay_to_mult(u32 usec_delay)
+{
+#define MAX_INTR_RATE 651042
+ const u32 round = 10;
+ u32 multiplier;
+
+ if (usec_delay == 0)
+ multiplier = 0;
+ else {
+ u32 interrupt_rate = 1000000 / usec_delay;
+ /* Max delay, corresponding to the lowest interrupt rate */
+ if (interrupt_rate == 0)
+ multiplier = 1023;
+ else {
+ multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
+ multiplier /= interrupt_rate;
+ /* Round the multiplier to the closest value.*/
+ multiplier = (multiplier + round/2) / round;
+ multiplier = min(multiplier, (u32)1023);
+ }
+ }
+ return multiplier;
+}
+
+static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
+{
+ struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
+ struct be_mcc_wrb *wrb
+ = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
+ memset(wrb, 0, sizeof(*wrb));
+ return wrb;
+}
+
+static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
+{
+ struct be_queue_info *mccq = &adapter->mcc_obj.q;
+ struct be_mcc_wrb *wrb;
+
+ if (atomic_read(&mccq->used) >= mccq->len) {
+ dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
+ return NULL;
+ }
+
+ wrb = queue_head_node(mccq);
+ queue_head_inc(mccq);
+ atomic_inc(&mccq->used);
+ memset(wrb, 0, sizeof(*wrb));
+ return wrb;
+}
+
+/* Tell fw we're about to start firing cmds by writing a
+ * special pattern across the wrb hdr; uses mbox
+ */
+int be_cmd_fw_init(struct be_adapter *adapter)
+{
+ u8 *wrb;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = (u8 *)wrb_from_mbox(adapter);
+ *wrb++ = 0xFF;
+ *wrb++ = 0x12;
+ *wrb++ = 0x34;
+ *wrb++ = 0xFF;
+ *wrb++ = 0xFF;
+ *wrb++ = 0x56;
+ *wrb++ = 0x78;
+ *wrb = 0xFF;
+
+ status = be_mbox_notify_wait(adapter);
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Tell fw we're done with firing cmds by writing a
+ * special pattern across the wrb hdr; uses mbox
+ */
+int be_cmd_fw_clean(struct be_adapter *adapter)
+{
+ u8 *wrb;
+ int status;
+
+ if (adapter->eeh_err)
+ return -EIO;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = (u8 *)wrb_from_mbox(adapter);
+ *wrb++ = 0xFF;
+ *wrb++ = 0xAA;
+ *wrb++ = 0xBB;
+ *wrb++ = 0xFF;
+ *wrb++ = 0xFF;
+ *wrb++ = 0xCC;
+ *wrb++ = 0xDD;
+ *wrb = 0xFF;
+
+ status = be_mbox_notify_wait(adapter);
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+int be_cmd_eq_create(struct be_adapter *adapter,
+ struct be_queue_info *eq, int eq_delay)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_eq_create *req;
+ struct be_dma_mem *q_mem = &eq->dma_mem;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_EQ_CREATE, sizeof(*req));
+
+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+
+ AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
+ /* 4byte eqe*/
+ AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
+ AMAP_SET_BITS(struct amap_eq_context, count, req->context,
+ __ilog2_u32(eq->len/256));
+ AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
+ eq_delay_to_mult(eq_delay));
+ be_dws_cpu_to_le(req->context, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
+ eq->id = le16_to_cpu(resp->eq_id);
+ eq->created = true;
+ }
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Uses mbox */
+int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
+ u8 type, bool permanent, u32 if_handle)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_mac_query *req;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_NTWK_MAC_QUERY);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
+
+ req->type = type;
+ if (permanent) {
+ req->permanent = 1;
+ } else {
+ req->if_id = cpu_to_le16((u16) if_handle);
+ req->permanent = 0;
+ }
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
+ memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
+ }
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Uses synchronous MCCQ */
+int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
+ u32 if_id, u32 *pmac_id, u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_pmac_add *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_NTWK_PMAC_ADD);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
+
+ req->hdr.domain = domain;
+ req->if_id = cpu_to_le32(if_id);
+ memcpy(req->mac_address, mac_addr, ETH_ALEN);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
+ *pmac_id = le32_to_cpu(resp->pmac_id);
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses synchronous MCCQ */
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_pmac_del *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_NTWK_PMAC_DEL);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
+
+ req->hdr.domain = dom;
+ req->if_id = cpu_to_le32(if_id);
+ req->pmac_id = cpu_to_le32(pmac_id);
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses Mbox */
+int be_cmd_cq_create(struct be_adapter *adapter,
+ struct be_queue_info *cq, struct be_queue_info *eq,
+ bool sol_evts, bool no_delay, int coalesce_wm)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_cq_create *req;
+ struct be_dma_mem *q_mem = &cq->dma_mem;
+ void *ctxt;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+ ctxt = &req->context;
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_CQ_CREATE);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_CQ_CREATE, sizeof(*req));
+
+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+ if (lancer_chip(adapter)) {
+ req->hdr.version = 2;
+ req->page_size = 1; /* 1 for 4K */
+ AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
+ no_delay);
+ AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
+ __ilog2_u32(cq->len/256));
+ AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
+ ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
+ ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
+ } else {
+ AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
+ coalesce_wm);
+ AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
+ ctxt, no_delay);
+ AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
+ __ilog2_u32(cq->len/256));
+ AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_be, solevent,
+ ctxt, sol_evts);
+ AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
+ }
+
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
+ cq->id = le16_to_cpu(resp->cq_id);
+ cq->created = true;
+ }
+
+ mutex_unlock(&adapter->mbox_lock);
+
+ return status;
+}
+
+static u32 be_encoded_q_len(int q_len)
+{
+ u32 len_encoded = fls(q_len); /* log2(len) + 1 */
+ if (len_encoded == 16)
+ len_encoded = 0;
+ return len_encoded;
+}
+
+int be_cmd_mccq_ext_create(struct be_adapter *adapter,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_mcc_ext_create *req;
+ struct be_dma_mem *q_mem = &mccq->dma_mem;
+ void *ctxt;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+ ctxt = &req->context;
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_MCC_CREATE_EXT);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
+
+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+ if (lancer_chip(adapter)) {
+ req->hdr.version = 1;
+ req->cq_id = cpu_to_le16(cq->id);
+
+ AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
+ ctxt, cq->id);
+ AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
+ ctxt, 1);
+
+ } else {
+ AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
+ }
+
+ /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
+ req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
+ mccq->id = le16_to_cpu(resp->id);
+ mccq->created = true;
+ }
+ mutex_unlock(&adapter->mbox_lock);
+
+ return status;
+}
+
+int be_cmd_mccq_org_create(struct be_adapter *adapter,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_mcc_create *req;
+ struct be_dma_mem *q_mem = &mccq->dma_mem;
+ void *ctxt;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+ ctxt = &req->context;
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_MCC_CREATE);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MCC_CREATE, sizeof(*req));
+
+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+
+ AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
+
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
+ mccq->id = le16_to_cpu(resp->id);
+ mccq->created = true;
+ }
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+int be_cmd_mccq_create(struct be_adapter *adapter,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
+{
+ int status;
+
+ status = be_cmd_mccq_ext_create(adapter, mccq, cq);
+ if (status && !lancer_chip(adapter)) {
+ dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
+ "or newer to avoid conflicting priorities between NIC "
+ "and FCoE traffic");
+ status = be_cmd_mccq_org_create(adapter, mccq, cq);
+ }
+ return status;
+}
+
+int be_cmd_txq_create(struct be_adapter *adapter,
+ struct be_queue_info *txq,
+ struct be_queue_info *cq)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_eth_tx_create *req;
+ struct be_dma_mem *q_mem = &txq->dma_mem;
+ void *ctxt;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+ ctxt = &req->context;
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_ETH_TX_CREATE);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
+ sizeof(*req));
+
+ if (lancer_chip(adapter)) {
+ req->hdr.version = 1;
+ AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
+ adapter->if_handle);
+ }
+
+ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+ req->ulp_num = BE_ULP1_NUM;
+ req->type = BE_ETH_TX_RING_TYPE_STANDARD;
+
+ AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
+ be_encoded_q_len(txq->len));
+ AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
+
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
+ txq->id = le16_to_cpu(resp->cid);
+ txq->created = true;
+ }
+
+ mutex_unlock(&adapter->mbox_lock);
+
+ return status;
+}
+
+/* Uses MCC */
+int be_cmd_rxq_create(struct be_adapter *adapter,
+ struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
+ u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_eth_rx_create *req;
+ struct be_dma_mem *q_mem = &rxq->dma_mem;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_ETH_RX_CREATE);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
+ sizeof(*req));
+
+ req->cq_id = cpu_to_le16(cq_id);
+ req->frag_size = fls(frag_size) - 1;
+ req->num_pages = 2;
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+ req->interface_id = cpu_to_le32(if_id);
+ req->max_frame_size = cpu_to_le16(max_frame_size);
+ req->rss_queue = cpu_to_le32(rss);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
+ rxq->id = le16_to_cpu(resp->id);
+ rxq->created = true;
+ *rss_id = resp->rss_id;
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Generic destroyer function for all types of queues
+ * Uses Mbox
+ */
+int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
+ int queue_type)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_q_destroy *req;
+ u8 subsys = 0, opcode = 0;
+ int status;
+
+ if (adapter->eeh_err)
+ return -EIO;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ switch (queue_type) {
+ case QTYPE_EQ:
+ subsys = CMD_SUBSYSTEM_COMMON;
+ opcode = OPCODE_COMMON_EQ_DESTROY;
+ break;
+ case QTYPE_CQ:
+ subsys = CMD_SUBSYSTEM_COMMON;
+ opcode = OPCODE_COMMON_CQ_DESTROY;
+ break;
+ case QTYPE_TXQ:
+ subsys = CMD_SUBSYSTEM_ETH;
+ opcode = OPCODE_ETH_TX_DESTROY;
+ break;
+ case QTYPE_RXQ:
+ subsys = CMD_SUBSYSTEM_ETH;
+ opcode = OPCODE_ETH_RX_DESTROY;
+ break;
+ case QTYPE_MCCQ:
+ subsys = CMD_SUBSYSTEM_COMMON;
+ opcode = OPCODE_COMMON_MCC_DESTROY;
+ break;
+ default:
+ BUG();
+ }
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
+
+ be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
+ req->id = cpu_to_le16(q->id);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status)
+ q->created = false;
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Uses MCC */
+int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_q_destroy *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_RX_DESTROY);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_DESTROY,
+ sizeof(*req));
+ req->id = cpu_to_le16(q->id);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status)
+ q->created = false;
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Create an rx filtering policy configuration on an i/f
+ * Uses mbox
+ */
+int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
+ u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
+ u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_if_create *req;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_NTWK_INTERFACE_CREATE);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
+
+ req->hdr.domain = domain;
+ req->capability_flags = cpu_to_le32(cap_flags);
+ req->enable_flags = cpu_to_le32(en_flags);
+ req->pmac_invalid = pmac_invalid;
+ if (!pmac_invalid)
+ memcpy(req->mac_addr, mac, ETH_ALEN);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
+ *if_handle = le32_to_cpu(resp->interface_id);
+ if (!pmac_invalid)
+ *pmac_id = le32_to_cpu(resp->pmac_id);
+ }
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Uses mbox */
+int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_if_destroy *req;
+ int status;
+
+ if (adapter->eeh_err)
+ return -EIO;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
+
+ req->hdr.domain = domain;
+ req->interface_id = cpu_to_le32(interface_id);
+
+ status = be_mbox_notify_wait(adapter);
+
+ mutex_unlock(&adapter->mbox_lock);
+
+ return status;
+}
+
+/* Get stats is a non embedded command: the request is not embedded inside
+ * WRB but is a separate dma memory block
+ * Uses asynchronous MCC
+ */
+int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_hdr *hdr;
+ struct be_sge *sge;
+ int status = 0;
+
+ if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
+ be_cmd_get_die_temperature(adapter);
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ hdr = nonemb_cmd->va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
+ OPCODE_ETH_GET_STATISTICS);
+
+ be_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
+ OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size);
+
+ if (adapter->generation == BE_GEN3)
+ hdr->version = 1;
+
+ wrb->tag1 = CMD_SUBSYSTEM_ETH;
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd->size);
+
+ be_mcc_notify(adapter);
+ adapter->stats_cmd_sent = true;
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Lancer Stats */
+int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd)
+{
+
+ struct be_mcc_wrb *wrb;
+ struct lancer_cmd_req_pport_stats *req;
+ struct be_sge *sge;
+ int status = 0;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = nonemb_cmd->va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
+ OPCODE_ETH_GET_PPORT_STATS);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
+ OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size);
+
+
+ req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num);
+ req->cmd_params.params.reset_stats = 0;
+
+ wrb->tag1 = CMD_SUBSYSTEM_ETH;
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd->size);
+
+ be_mcc_notify(adapter);
+ adapter->stats_cmd_sent = true;
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses synchronous mcc */
+int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
+ u16 *link_speed, u32 dom)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_link_status *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
+ if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
+ *link_speed = le16_to_cpu(resp->link_speed);
+ *mac_speed = resp->mac_speed;
+ }
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses synchronous mcc */
+int be_cmd_get_die_temperature(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_cntl_addnl_attribs *req;
+ u16 mccq_index;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ mccq_index = adapter->mcc_obj.q.head;
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
+
+ wrb->tag1 = mccq_index;
+
+ be_mcc_notify(adapter);
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses synchronous mcc */
+int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_fat *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_MANAGE_FAT);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MANAGE_FAT, sizeof(*req));
+ req->fat_operation = cpu_to_le32(QUERY_FAT);
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
+ if (log_size && resp->log_size)
+ *log_size = le32_to_cpu(resp->log_size) -
+ sizeof(u32);
+ }
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
+{
+ struct be_dma_mem get_fat_cmd;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_fat *req;
+ struct be_sge *sge;
+ u32 offset = 0, total_size, buf_size,
+ log_offset = sizeof(u32), payload_len;
+ int status;
+
+ if (buf_len == 0)
+ return;
+
+ total_size = buf_len;
+
+ get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
+ get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
+ get_fat_cmd.size,
+ &get_fat_cmd.dma);
+ if (!get_fat_cmd.va) {
+ status = -ENOMEM;
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure while retrieving FAT data\n");
+ return;
+ }
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ while (total_size) {
+ buf_size = min(total_size, (u32)60*1024);
+ total_size -= buf_size;
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = get_fat_cmd.va;
+ sge = nonembedded_sgl(wrb);
+
+ payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
+ be_wrb_hdr_prepare(wrb, payload_len, false, 1,
+ OPCODE_COMMON_MANAGE_FAT);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MANAGE_FAT, payload_len);
+
+ sge->pa_hi = cpu_to_le32(upper_32_bits(get_fat_cmd.dma));
+ sge->pa_lo = cpu_to_le32(get_fat_cmd.dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(get_fat_cmd.size);
+
+ req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
+ req->read_log_offset = cpu_to_le32(log_offset);
+ req->read_log_length = cpu_to_le32(buf_size);
+ req->data_buffer_size = cpu_to_le32(buf_size);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
+ memcpy(buf + offset,
+ resp->data_buffer,
+ le32_to_cpu(resp->read_log_length));
+ } else {
+ dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
+ goto err;
+ }
+ offset += buf_size;
+ log_offset += buf_size;
+ }
+err:
+ pci_free_consistent(adapter->pdev, get_fat_cmd.size,
+ get_fat_cmd.va,
+ get_fat_cmd.dma);
+ spin_unlock_bh(&adapter->mcc_lock);
+}
+
+/* Uses synchronous mcc */
+int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
+ char *fw_on_flash)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_fw_version *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_GET_FW_VERSION);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
+ strcpy(fw_ver, resp->firmware_version_string);
+ if (fw_on_flash)
+ strcpy(fw_on_flash, resp->fw_on_flash_version_string);
+ }
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* set the EQ delay interval of an EQ to specified value
+ * Uses async mcc
+ */
+int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_modify_eq_delay *req;
+ int status = 0;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_MODIFY_EQ_DELAY);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
+
+ req->num_eq = cpu_to_le32(1);
+ req->delay[0].eq_id = cpu_to_le32(eq_id);
+ req->delay[0].phase = 0;
+ req->delay[0].delay_multiplier = cpu_to_le32(eqd);
+
+ be_mcc_notify(adapter);
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses sycnhronous mcc */
+int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
+ u32 num, bool untagged, bool promiscuous)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_vlan_config *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_NTWK_VLAN_CONFIG);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
+
+ req->interface_id = if_id;
+ req->promiscuous = promiscuous;
+ req->untagged = untagged;
+ req->num_vlan = num;
+ if (!promiscuous) {
+ memcpy(req->normal_vlan, vtag_array,
+ req->num_vlan * sizeof(vtag_array[0]));
+ }
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_dma_mem *mem = &adapter->rx_filter;
+ struct be_cmd_req_rx_filter *req = mem->va;
+ struct be_sge *sge;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ sge = nonembedded_sgl(wrb);
+ sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
+ sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(mem->size);
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
+ OPCODE_COMMON_NTWK_RX_FILTER);
+
+ memset(req, 0, sizeof(*req));
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req));
+
+ req->if_id = cpu_to_le32(adapter->if_handle);
+ if (flags & IFF_PROMISC) {
+ req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
+ BE_IF_FLAGS_VLAN_PROMISCUOUS);
+ if (value == ON)
+ req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
+ BE_IF_FLAGS_VLAN_PROMISCUOUS);
+ } else if (flags & IFF_ALLMULTI) {
+ req->if_flags_mask = req->if_flags =
+ cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
+ } else {
+ struct netdev_hw_addr *ha;
+ int i = 0;
+
+ req->if_flags_mask = req->if_flags =
+ cpu_to_le32(BE_IF_FLAGS_MULTICAST);
+ req->mcast_num = cpu_to_le16(netdev_mc_count(adapter->netdev));
+ netdev_for_each_mc_addr(ha, adapter->netdev)
+ memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
+ }
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses synchrounous mcc */
+int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_flow_control *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_SET_FLOW_CONTROL);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
+
+ req->tx_flow_control = cpu_to_le16((u16)tx_fc);
+ req->rx_flow_control = cpu_to_le16((u16)rx_fc);
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses sycn mcc */
+int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_flow_control *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_GET_FLOW_CONTROL);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_flow_control *resp =
+ embedded_payload(wrb);
+ *tx_fc = le16_to_cpu(resp->tx_flow_control);
+ *rx_fc = le16_to_cpu(resp->rx_flow_control);
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses mbox */
+int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
+ u32 *mode, u32 *caps)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_query_fw_cfg *req;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
+ *port_num = le32_to_cpu(resp->phys_port);
+ *mode = le32_to_cpu(resp->function_mode);
+ *caps = le32_to_cpu(resp->function_caps);
+ }
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Uses mbox */
+int be_cmd_reset_function(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_hdr *req;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_FUNCTION_RESET);
+
+ be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
+
+ status = be_mbox_notify_wait(adapter);
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_rss_config *req;
+ u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
+ 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_ETH_RSS_CONFIG);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
+ OPCODE_ETH_RSS_CONFIG, sizeof(*req));
+
+ req->if_id = cpu_to_le32(adapter->if_handle);
+ req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
+ req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
+ memcpy(req->cpu_table, rsstable, table_size);
+ memcpy(req->hash, myhash, sizeof(myhash));
+ be_dws_cpu_to_le(req->hash, sizeof(req->hash));
+
+ status = be_mbox_notify_wait(adapter);
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Uses sync mcc */
+int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
+ u8 bcn, u8 sts, u8 state)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_enable_disable_beacon *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_ENABLE_DISABLE_BEACON);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
+
+ req->port_num = port_num;
+ req->beacon_state = state;
+ req->beacon_duration = bcn;
+ req->status_duration = sts;
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses sync mcc */
+int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_beacon_state *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_GET_BEACON_STATE);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
+
+ req->port_num = port_num;
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_beacon_state *resp =
+ embedded_payload(wrb);
+ *state = resp->beacon_state;
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ u32 data_size, u32 data_offset, const char *obj_name,
+ u32 *data_written, u8 *addn_status)
+{
+ struct be_mcc_wrb *wrb;
+ struct lancer_cmd_req_write_object *req;
+ struct lancer_cmd_resp_write_object *resp;
+ void *ctxt = NULL;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+ adapter->flash_status = 0;
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err_unlock;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(struct lancer_cmd_req_write_object),
+ true, 1, OPCODE_COMMON_WRITE_OBJECT);
+ wrb->tag1 = CMD_SUBSYSTEM_COMMON;
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_WRITE_OBJECT,
+ sizeof(struct lancer_cmd_req_write_object));
+
+ ctxt = &req->context;
+ AMAP_SET_BITS(struct amap_lancer_write_obj_context,
+ write_length, ctxt, data_size);
+
+ if (data_size == 0)
+ AMAP_SET_BITS(struct amap_lancer_write_obj_context,
+ eof, ctxt, 1);
+ else
+ AMAP_SET_BITS(struct amap_lancer_write_obj_context,
+ eof, ctxt, 0);
+
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+ req->write_offset = cpu_to_le32(data_offset);
+ strcpy(req->object_name, obj_name);
+ req->descriptor_count = cpu_to_le32(1);
+ req->buf_len = cpu_to_le32(data_size);
+ req->addr_low = cpu_to_le32((cmd->dma +
+ sizeof(struct lancer_cmd_req_write_object))
+ & 0xFFFFFFFF);
+ req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
+ sizeof(struct lancer_cmd_req_write_object)));
+
+ be_mcc_notify(adapter);
+ spin_unlock_bh(&adapter->mcc_lock);
+
+ if (!wait_for_completion_timeout(&adapter->flash_compl,
+ msecs_to_jiffies(12000)))
+ status = -1;
+ else
+ status = adapter->flash_status;
+
+ resp = embedded_payload(wrb);
+ if (!status) {
+ *data_written = le32_to_cpu(resp->actual_write_len);
+ } else {
+ *addn_status = resp->additional_status;
+ status = resp->status;
+ }
+
+ return status;
+
+err_unlock:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ u32 flash_type, u32 flash_opcode, u32 buf_size)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_write_flashrom *req;
+ struct be_sge *sge;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+ adapter->flash_status = 0;
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err_unlock;
+ }
+ req = cmd->va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
+ OPCODE_COMMON_WRITE_FLASHROM);
+ wrb->tag1 = CMD_SUBSYSTEM_COMMON;
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
+ sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
+ sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(cmd->size);
+
+ req->params.op_type = cpu_to_le32(flash_type);
+ req->params.op_code = cpu_to_le32(flash_opcode);
+ req->params.data_buf_size = cpu_to_le32(buf_size);
+
+ be_mcc_notify(adapter);
+ spin_unlock_bh(&adapter->mcc_lock);
+
+ if (!wait_for_completion_timeout(&adapter->flash_compl,
+ msecs_to_jiffies(40000)))
+ status = -1;
+ else
+ status = adapter->flash_status;
+
+ return status;
+
+err_unlock:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
+ int offset)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_write_flashrom *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
+ OPCODE_COMMON_READ_FLASHROM);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
+
+ req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
+ req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
+ req->params.offset = cpu_to_le32(offset);
+ req->params.data_buf_size = cpu_to_le32(0x4);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status)
+ memcpy(flashed_crc, req->params.data_buf, 4);
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
+ struct be_dma_mem *nonemb_cmd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_acpi_wol_magic_config *req;
+ struct be_sge *sge;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = nonemb_cmd->va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
+ memcpy(req->magic_mac, mac, ETH_ALEN);
+
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd->size);
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+ u8 loopback_type, u8 enable)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_lmode *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
+ sizeof(*req));
+
+ req->src_port = port_num;
+ req->dest_port = port_num;
+ req->loopback_type = loopback_type;
+ req->loopback_state = enable;
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
+ u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_loopback_test *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_LOWLEVEL_LOOPBACK_TEST);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
+ OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
+ req->hdr.timeout = cpu_to_le32(4);
+
+ req->pattern = cpu_to_le64(pattern);
+ req->src_port = cpu_to_le32(port_num);
+ req->dest_port = cpu_to_le32(port_num);
+ req->pkt_size = cpu_to_le32(pkt_size);
+ req->num_pkts = cpu_to_le32(num_pkts);
+ req->loopback_type = cpu_to_le32(loopback_type);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
+ status = le32_to_cpu(resp->status);
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
+ u32 byte_cnt, struct be_dma_mem *cmd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_ddrdma_test *req;
+ struct be_sge *sge;
+ int status;
+ int i, j = 0;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = cmd->va;
+ sge = nonembedded_sgl(wrb);
+ be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
+ OPCODE_LOWLEVEL_HOST_DDR_DMA);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
+ OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
+
+ sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
+ sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(cmd->size);
+
+ req->pattern = cpu_to_le64(pattern);
+ req->byte_count = cpu_to_le32(byte_cnt);
+ for (i = 0; i < byte_cnt; i++) {
+ req->snd_buff[i] = (u8)(pattern >> (j*8));
+ j++;
+ if (j > 7)
+ j = 0;
+ }
+
+ status = be_mcc_notify_wait(adapter);
+
+ if (!status) {
+ struct be_cmd_resp_ddrdma_test *resp;
+ resp = cmd->va;
+ if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
+ resp->snd_err) {
+ status = -1;
+ }
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_seeprom_read *req;
+ struct be_sge *sge;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = nonemb_cmd->va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
+ OPCODE_COMMON_SEEPROM_READ);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
+
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd->size);
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_get_phy_info(struct be_adapter *adapter,
+ struct be_phy_info *phy_info)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_phy_info *req;
+ struct be_sge *sge;
+ struct be_dma_mem cmd;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ cmd.size = sizeof(struct be_cmd_req_get_phy_info);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
+ &cmd.dma);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ status = -ENOMEM;
+ goto err;
+ }
+
+ req = cmd.va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
+ OPCODE_COMMON_GET_PHY_DETAILS);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_PHY_DETAILS,
+ sizeof(*req));
+
+ sge->pa_hi = cpu_to_le32(upper_32_bits(cmd.dma));
+ sge->pa_lo = cpu_to_le32(cmd.dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(cmd.size);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_phy_info *resp_phy_info =
+ cmd.va + sizeof(struct be_cmd_req_hdr);
+ phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type);
+ phy_info->interface_type =
+ le16_to_cpu(resp_phy_info->interface_type);
+ }
+ pci_free_consistent(adapter->pdev, cmd.size,
+ cmd.va, cmd.dma);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_qos *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_SET_QOS);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_QOS, sizeof(*req));
+
+ req->hdr.domain = domain;
+ req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
+ req->max_bps_nic = cpu_to_le32(bps);
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_cntl_attribs *req;
+ struct be_cmd_resp_cntl_attribs *resp;
+ struct be_sge *sge;
+ int status;
+ int payload_len = max(sizeof(*req), sizeof(*resp));
+ struct mgmt_controller_attrib *attribs;
+ struct be_dma_mem attribs_cmd;
+
+ memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
+ attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
+ attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
+ &attribs_cmd.dma);
+ if (!attribs_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure\n");
+ return -ENOMEM;
+ }
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = attribs_cmd.va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, payload_len, false, 1,
+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
+ sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
+ sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(attribs_cmd.size);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
+ adapter->hba_port_num = attribs->hba_attribs.phy_port;
+ }
+
+err:
+ mutex_unlock(&adapter->mbox_lock);
+ pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
+ attribs_cmd.dma);
+ return status;
+}
+
+/* Uses mbox */
+int be_cmd_req_native_mode(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_func_cap *req;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req));
+
+ req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
+ CAPABILITY_BE3_NATIVE_ERX_API);
+ req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
+ adapter->be3_native = le32_to_cpu(resp->cap_flags) &
+ CAPABILITY_BE3_NATIVE_ERX_API;
+ }
+err:
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
new file mode 100644
index 000000000000..abaa90cbfea2
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -0,0 +1,1503 @@
+/*
+ * Copyright (C) 2005 - 2011 Emulex
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+/*
+ * The driver sends configuration and managements command requests to the
+ * firmware in the BE. These requests are communicated to the processor
+ * using Work Request Blocks (WRBs) submitted to the MCC-WRB ring or via one
+ * WRB inside a MAILBOX.
+ * The commands are serviced by the ARM processor in the BladeEngine's MPU.
+ */
+
+struct be_sge {
+ u32 pa_lo;
+ u32 pa_hi;
+ u32 len;
+};
+
+#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/
+#define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */
+#define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */
+struct be_mcc_wrb {
+ u32 embedded; /* dword 0 */
+ u32 payload_length; /* dword 1 */
+ u32 tag0; /* dword 2 */
+ u32 tag1; /* dword 3 */
+ u32 rsvd; /* dword 4 */
+ union {
+ u8 embedded_payload[236]; /* used by embedded cmds */
+ struct be_sge sgl[19]; /* used by non-embedded cmds */
+ } payload;
+};
+
+#define CQE_FLAGS_VALID_MASK (1 << 31)
+#define CQE_FLAGS_ASYNC_MASK (1 << 30)
+#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
+#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
+
+/* Completion Status */
+enum {
+ MCC_STATUS_SUCCESS = 0,
+ MCC_STATUS_FAILED = 1,
+ MCC_STATUS_ILLEGAL_REQUEST = 2,
+ MCC_STATUS_ILLEGAL_FIELD = 3,
+ MCC_STATUS_INSUFFICIENT_BUFFER = 4,
+ MCC_STATUS_UNAUTHORIZED_REQUEST = 5,
+ MCC_STATUS_NOT_SUPPORTED = 66
+};
+
+#define CQE_STATUS_COMPL_MASK 0xFFFF
+#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
+#define CQE_STATUS_EXTD_MASK 0xFFFF
+#define CQE_STATUS_EXTD_SHIFT 16 /* bits 16 - 31 */
+
+struct be_mcc_compl {
+ u32 status; /* dword 0 */
+ u32 tag0; /* dword 1 */
+ u32 tag1; /* dword 2 */
+ u32 flags; /* dword 3 */
+};
+
+/* When the async bit of mcc_compl is set, the last 4 bytes of
+ * mcc_compl is interpreted as follows:
+ */
+#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
+#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
+#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16
+#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF
+#define ASYNC_EVENT_CODE_LINK_STATE 0x1
+#define ASYNC_EVENT_CODE_GRP_5 0x5
+#define ASYNC_EVENT_QOS_SPEED 0x1
+#define ASYNC_EVENT_COS_PRIORITY 0x2
+#define ASYNC_EVENT_PVID_STATE 0x3
+struct be_async_event_trailer {
+ u32 code;
+};
+
+enum {
+ LINK_DOWN = 0x0,
+ LINK_UP = 0x1
+};
+#define LINK_STATUS_MASK 0x1
+
+/* When the event code of an async trailer is link-state, the mcc_compl
+ * must be interpreted as follows
+ */
+struct be_async_event_link_state {
+ u8 physical_port;
+ u8 port_link_status;
+ u8 port_duplex;
+ u8 port_speed;
+ u8 port_fault;
+ u8 rsvd0[7];
+ struct be_async_event_trailer trailer;
+} __packed;
+
+/* When the event code of an async trailer is GRP-5 and event_type is QOS_SPEED
+ * the mcc_compl must be interpreted as follows
+ */
+struct be_async_event_grp5_qos_link_speed {
+ u8 physical_port;
+ u8 rsvd[5];
+ u16 qos_link_speed;
+ u32 event_tag;
+ struct be_async_event_trailer trailer;
+} __packed;
+
+/* When the event code of an async trailer is GRP5 and event type is
+ * CoS-Priority, the mcc_compl must be interpreted as follows
+ */
+struct be_async_event_grp5_cos_priority {
+ u8 physical_port;
+ u8 available_priority_bmap;
+ u8 reco_default_priority;
+ u8 valid;
+ u8 rsvd0;
+ u8 event_tag;
+ struct be_async_event_trailer trailer;
+} __packed;
+
+/* When the event code of an async trailer is GRP5 and event type is
+ * PVID state, the mcc_compl must be interpreted as follows
+ */
+struct be_async_event_grp5_pvid_state {
+ u8 enabled;
+ u8 rsvd0;
+ u16 tag;
+ u32 event_tag;
+ u32 rsvd1;
+ struct be_async_event_trailer trailer;
+} __packed;
+
+struct be_mcc_mailbox {
+ struct be_mcc_wrb wrb;
+ struct be_mcc_compl compl;
+};
+
+#define CMD_SUBSYSTEM_COMMON 0x1
+#define CMD_SUBSYSTEM_ETH 0x3
+#define CMD_SUBSYSTEM_LOWLEVEL 0xb
+
+#define OPCODE_COMMON_NTWK_MAC_QUERY 1
+#define OPCODE_COMMON_NTWK_MAC_SET 2
+#define OPCODE_COMMON_NTWK_MULTICAST_SET 3
+#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4
+#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5
+#define OPCODE_COMMON_READ_FLASHROM 6
+#define OPCODE_COMMON_WRITE_FLASHROM 7
+#define OPCODE_COMMON_CQ_CREATE 12
+#define OPCODE_COMMON_EQ_CREATE 13
+#define OPCODE_COMMON_MCC_CREATE 21
+#define OPCODE_COMMON_SET_QOS 28
+#define OPCODE_COMMON_MCC_CREATE_EXT 90
+#define OPCODE_COMMON_SEEPROM_READ 30
+#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
+#define OPCODE_COMMON_NTWK_RX_FILTER 34
+#define OPCODE_COMMON_GET_FW_VERSION 35
+#define OPCODE_COMMON_SET_FLOW_CONTROL 36
+#define OPCODE_COMMON_GET_FLOW_CONTROL 37
+#define OPCODE_COMMON_SET_FRAME_SIZE 39
+#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
+#define OPCODE_COMMON_FIRMWARE_CONFIG 42
+#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
+#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
+#define OPCODE_COMMON_MCC_DESTROY 53
+#define OPCODE_COMMON_CQ_DESTROY 54
+#define OPCODE_COMMON_EQ_DESTROY 55
+#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
+#define OPCODE_COMMON_NTWK_PMAC_ADD 59
+#define OPCODE_COMMON_NTWK_PMAC_DEL 60
+#define OPCODE_COMMON_FUNCTION_RESET 61
+#define OPCODE_COMMON_MANAGE_FAT 68
+#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
+#define OPCODE_COMMON_GET_BEACON_STATE 70
+#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
+#define OPCODE_COMMON_GET_PHY_DETAILS 102
+#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
+#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
+#define OPCODE_COMMON_WRITE_OBJECT 172
+
+#define OPCODE_ETH_RSS_CONFIG 1
+#define OPCODE_ETH_ACPI_CONFIG 2
+#define OPCODE_ETH_PROMISCUOUS 3
+#define OPCODE_ETH_GET_STATISTICS 4
+#define OPCODE_ETH_TX_CREATE 7
+#define OPCODE_ETH_RX_CREATE 8
+#define OPCODE_ETH_TX_DESTROY 9
+#define OPCODE_ETH_RX_DESTROY 10
+#define OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG 12
+#define OPCODE_ETH_GET_PPORT_STATS 18
+
+#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17
+#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18
+#define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19
+
+struct be_cmd_req_hdr {
+ u8 opcode; /* dword 0 */
+ u8 subsystem; /* dword 0 */
+ u8 port_number; /* dword 0 */
+ u8 domain; /* dword 0 */
+ u32 timeout; /* dword 1 */
+ u32 request_length; /* dword 2 */
+ u8 version; /* dword 3 */
+ u8 rsvd[3]; /* dword 3 */
+};
+
+#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
+#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */
+struct be_cmd_resp_hdr {
+ u32 info; /* dword 0 */
+ u32 status; /* dword 1 */
+ u32 response_length; /* dword 2 */
+ u32 actual_resp_len; /* dword 3 */
+};
+
+struct phys_addr {
+ u32 lo;
+ u32 hi;
+};
+
+/**************************
+ * BE Command definitions *
+ **************************/
+
+/* Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field */
+struct amap_eq_context {
+ u8 cidx[13]; /* dword 0*/
+ u8 rsvd0[3]; /* dword 0*/
+ u8 epidx[13]; /* dword 0*/
+ u8 valid; /* dword 0*/
+ u8 rsvd1; /* dword 0*/
+ u8 size; /* dword 0*/
+ u8 pidx[13]; /* dword 1*/
+ u8 rsvd2[3]; /* dword 1*/
+ u8 pd[10]; /* dword 1*/
+ u8 count[3]; /* dword 1*/
+ u8 solevent; /* dword 1*/
+ u8 stalled; /* dword 1*/
+ u8 armed; /* dword 1*/
+ u8 rsvd3[4]; /* dword 2*/
+ u8 func[8]; /* dword 2*/
+ u8 rsvd4; /* dword 2*/
+ u8 delaymult[10]; /* dword 2*/
+ u8 rsvd5[2]; /* dword 2*/
+ u8 phase[2]; /* dword 2*/
+ u8 nodelay; /* dword 2*/
+ u8 rsvd6[4]; /* dword 2*/
+ u8 rsvd7[32]; /* dword 3*/
+} __packed;
+
+struct be_cmd_req_eq_create {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages; /* sword */
+ u16 rsvd0; /* sword */
+ u8 context[sizeof(struct amap_eq_context) / 8];
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_resp_eq_create {
+ struct be_cmd_resp_hdr resp_hdr;
+ u16 eq_id; /* sword */
+ u16 rsvd0; /* sword */
+} __packed;
+
+/******************** Mac query ***************************/
+enum {
+ MAC_ADDRESS_TYPE_STORAGE = 0x0,
+ MAC_ADDRESS_TYPE_NETWORK = 0x1,
+ MAC_ADDRESS_TYPE_PD = 0x2,
+ MAC_ADDRESS_TYPE_MANAGEMENT = 0x3
+};
+
+struct mac_addr {
+ u16 size_of_struct;
+ u8 addr[ETH_ALEN];
+} __packed;
+
+struct be_cmd_req_mac_query {
+ struct be_cmd_req_hdr hdr;
+ u8 type;
+ u8 permanent;
+ u16 if_id;
+} __packed;
+
+struct be_cmd_resp_mac_query {
+ struct be_cmd_resp_hdr hdr;
+ struct mac_addr mac;
+};
+
+/******************** PMac Add ***************************/
+struct be_cmd_req_pmac_add {
+ struct be_cmd_req_hdr hdr;
+ u32 if_id;
+ u8 mac_address[ETH_ALEN];
+ u8 rsvd0[2];
+} __packed;
+
+struct be_cmd_resp_pmac_add {
+ struct be_cmd_resp_hdr hdr;
+ u32 pmac_id;
+};
+
+/******************** PMac Del ***************************/
+struct be_cmd_req_pmac_del {
+ struct be_cmd_req_hdr hdr;
+ u32 if_id;
+ u32 pmac_id;
+};
+
+/******************** Create CQ ***************************/
+/* Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field */
+struct amap_cq_context_be {
+ u8 cidx[11]; /* dword 0*/
+ u8 rsvd0; /* dword 0*/
+ u8 coalescwm[2]; /* dword 0*/
+ u8 nodelay; /* dword 0*/
+ u8 epidx[11]; /* dword 0*/
+ u8 rsvd1; /* dword 0*/
+ u8 count[2]; /* dword 0*/
+ u8 valid; /* dword 0*/
+ u8 solevent; /* dword 0*/
+ u8 eventable; /* dword 0*/
+ u8 pidx[11]; /* dword 1*/
+ u8 rsvd2; /* dword 1*/
+ u8 pd[10]; /* dword 1*/
+ u8 eqid[8]; /* dword 1*/
+ u8 stalled; /* dword 1*/
+ u8 armed; /* dword 1*/
+ u8 rsvd3[4]; /* dword 2*/
+ u8 func[8]; /* dword 2*/
+ u8 rsvd4[20]; /* dword 2*/
+ u8 rsvd5[32]; /* dword 3*/
+} __packed;
+
+struct amap_cq_context_lancer {
+ u8 rsvd0[12]; /* dword 0*/
+ u8 coalescwm[2]; /* dword 0*/
+ u8 nodelay; /* dword 0*/
+ u8 rsvd1[12]; /* dword 0*/
+ u8 count[2]; /* dword 0*/
+ u8 valid; /* dword 0*/
+ u8 rsvd2; /* dword 0*/
+ u8 eventable; /* dword 0*/
+ u8 eqid[16]; /* dword 1*/
+ u8 rsvd3[15]; /* dword 1*/
+ u8 armed; /* dword 1*/
+ u8 rsvd4[32]; /* dword 2*/
+ u8 rsvd5[32]; /* dword 3*/
+} __packed;
+
+struct be_cmd_req_cq_create {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u8 page_size;
+ u8 rsvd0;
+ u8 context[sizeof(struct amap_cq_context_be) / 8];
+ struct phys_addr pages[8];
+} __packed;
+
+
+struct be_cmd_resp_cq_create {
+ struct be_cmd_resp_hdr hdr;
+ u16 cq_id;
+ u16 rsvd0;
+} __packed;
+
+struct be_cmd_req_get_fat {
+ struct be_cmd_req_hdr hdr;
+ u32 fat_operation;
+ u32 read_log_offset;
+ u32 read_log_length;
+ u32 data_buffer_size;
+ u32 data_buffer[1];
+} __packed;
+
+struct be_cmd_resp_get_fat {
+ struct be_cmd_resp_hdr hdr;
+ u32 log_size;
+ u32 read_log_length;
+ u32 rsvd[2];
+ u32 data_buffer[1];
+} __packed;
+
+
+/******************** Create MCCQ ***************************/
+/* Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field */
+struct amap_mcc_context_be {
+ u8 con_index[14];
+ u8 rsvd0[2];
+ u8 ring_size[4];
+ u8 fetch_wrb;
+ u8 fetch_r2t;
+ u8 cq_id[10];
+ u8 prod_index[14];
+ u8 fid[8];
+ u8 pdid[9];
+ u8 valid;
+ u8 rsvd1[32];
+ u8 rsvd2[32];
+} __packed;
+
+struct amap_mcc_context_lancer {
+ u8 async_cq_id[16];
+ u8 ring_size[4];
+ u8 rsvd0[12];
+ u8 rsvd1[31];
+ u8 valid;
+ u8 async_cq_valid[1];
+ u8 rsvd2[31];
+ u8 rsvd3[32];
+} __packed;
+
+struct be_cmd_req_mcc_create {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u16 cq_id;
+ u8 context[sizeof(struct amap_mcc_context_be) / 8];
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_req_mcc_ext_create {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u16 cq_id;
+ u32 async_event_bitmap[1];
+ u8 context[sizeof(struct amap_mcc_context_be) / 8];
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_resp_mcc_create {
+ struct be_cmd_resp_hdr hdr;
+ u16 id;
+ u16 rsvd0;
+} __packed;
+
+/******************** Create TxQ ***************************/
+#define BE_ETH_TX_RING_TYPE_STANDARD 2
+#define BE_ULP1_NUM 1
+
+/* Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field */
+struct amap_tx_context {
+ u8 if_id[16]; /* dword 0 */
+ u8 tx_ring_size[4]; /* dword 0 */
+ u8 rsvd1[26]; /* dword 0 */
+ u8 pci_func_id[8]; /* dword 1 */
+ u8 rsvd2[9]; /* dword 1 */
+ u8 ctx_valid; /* dword 1 */
+ u8 cq_id_send[16]; /* dword 2 */
+ u8 rsvd3[16]; /* dword 2 */
+ u8 rsvd4[32]; /* dword 3 */
+ u8 rsvd5[32]; /* dword 4 */
+ u8 rsvd6[32]; /* dword 5 */
+ u8 rsvd7[32]; /* dword 6 */
+ u8 rsvd8[32]; /* dword 7 */
+ u8 rsvd9[32]; /* dword 8 */
+ u8 rsvd10[32]; /* dword 9 */
+ u8 rsvd11[32]; /* dword 10 */
+ u8 rsvd12[32]; /* dword 11 */
+ u8 rsvd13[32]; /* dword 12 */
+ u8 rsvd14[32]; /* dword 13 */
+ u8 rsvd15[32]; /* dword 14 */
+ u8 rsvd16[32]; /* dword 15 */
+} __packed;
+
+struct be_cmd_req_eth_tx_create {
+ struct be_cmd_req_hdr hdr;
+ u8 num_pages;
+ u8 ulp_num;
+ u8 type;
+ u8 bound_port;
+ u8 context[sizeof(struct amap_tx_context) / 8];
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_resp_eth_tx_create {
+ struct be_cmd_resp_hdr hdr;
+ u16 cid;
+ u16 rsvd0;
+} __packed;
+
+/******************** Create RxQ ***************************/
+struct be_cmd_req_eth_rx_create {
+ struct be_cmd_req_hdr hdr;
+ u16 cq_id;
+ u8 frag_size;
+ u8 num_pages;
+ struct phys_addr pages[2];
+ u32 interface_id;
+ u16 max_frame_size;
+ u16 rsvd0;
+ u32 rss_queue;
+} __packed;
+
+struct be_cmd_resp_eth_rx_create {
+ struct be_cmd_resp_hdr hdr;
+ u16 id;
+ u8 rss_id;
+ u8 rsvd0;
+} __packed;
+
+/******************** Q Destroy ***************************/
+/* Type of Queue to be destroyed */
+enum {
+ QTYPE_EQ = 1,
+ QTYPE_CQ,
+ QTYPE_TXQ,
+ QTYPE_RXQ,
+ QTYPE_MCCQ
+};
+
+struct be_cmd_req_q_destroy {
+ struct be_cmd_req_hdr hdr;
+ u16 id;
+ u16 bypass_flush; /* valid only for rx q destroy */
+} __packed;
+
+/************ I/f Create (it's actually I/f Config Create)**********/
+
+/* Capability flags for the i/f */
+enum be_if_flags {
+ BE_IF_FLAGS_RSS = 0x4,
+ BE_IF_FLAGS_PROMISCUOUS = 0x8,
+ BE_IF_FLAGS_BROADCAST = 0x10,
+ BE_IF_FLAGS_UNTAGGED = 0x20,
+ BE_IF_FLAGS_ULP = 0x40,
+ BE_IF_FLAGS_VLAN_PROMISCUOUS = 0x80,
+ BE_IF_FLAGS_VLAN = 0x100,
+ BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
+ BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
+ BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800,
+ BE_IF_FLAGS_MULTICAST = 0x1000
+};
+
+/* An RX interface is an object with one or more MAC addresses and
+ * filtering capabilities. */
+struct be_cmd_req_if_create {
+ struct be_cmd_req_hdr hdr;
+ u32 version; /* ignore currently */
+ u32 capability_flags;
+ u32 enable_flags;
+ u8 mac_addr[ETH_ALEN];
+ u8 rsvd0;
+ u8 pmac_invalid; /* if set, don't attach the mac addr to the i/f */
+ u32 vlan_tag; /* not used currently */
+} __packed;
+
+struct be_cmd_resp_if_create {
+ struct be_cmd_resp_hdr hdr;
+ u32 interface_id;
+ u32 pmac_id;
+};
+
+/****** I/f Destroy(it's actually I/f Config Destroy )**********/
+struct be_cmd_req_if_destroy {
+ struct be_cmd_req_hdr hdr;
+ u32 interface_id;
+};
+
+/*************** HW Stats Get **********************************/
+struct be_port_rxf_stats_v0 {
+ u32 rx_bytes_lsd; /* dword 0*/
+ u32 rx_bytes_msd; /* dword 1*/
+ u32 rx_total_frames; /* dword 2*/
+ u32 rx_unicast_frames; /* dword 3*/
+ u32 rx_multicast_frames; /* dword 4*/
+ u32 rx_broadcast_frames; /* dword 5*/
+ u32 rx_crc_errors; /* dword 6*/
+ u32 rx_alignment_symbol_errors; /* dword 7*/
+ u32 rx_pause_frames; /* dword 8*/
+ u32 rx_control_frames; /* dword 9*/
+ u32 rx_in_range_errors; /* dword 10*/
+ u32 rx_out_range_errors; /* dword 11*/
+ u32 rx_frame_too_long; /* dword 12*/
+ u32 rx_address_match_errors; /* dword 13*/
+ u32 rx_vlan_mismatch; /* dword 14*/
+ u32 rx_dropped_too_small; /* dword 15*/
+ u32 rx_dropped_too_short; /* dword 16*/
+ u32 rx_dropped_header_too_small; /* dword 17*/
+ u32 rx_dropped_tcp_length; /* dword 18*/
+ u32 rx_dropped_runt; /* dword 19*/
+ u32 rx_64_byte_packets; /* dword 20*/
+ u32 rx_65_127_byte_packets; /* dword 21*/
+ u32 rx_128_256_byte_packets; /* dword 22*/
+ u32 rx_256_511_byte_packets; /* dword 23*/
+ u32 rx_512_1023_byte_packets; /* dword 24*/
+ u32 rx_1024_1518_byte_packets; /* dword 25*/
+ u32 rx_1519_2047_byte_packets; /* dword 26*/
+ u32 rx_2048_4095_byte_packets; /* dword 27*/
+ u32 rx_4096_8191_byte_packets; /* dword 28*/
+ u32 rx_8192_9216_byte_packets; /* dword 29*/
+ u32 rx_ip_checksum_errs; /* dword 30*/
+ u32 rx_tcp_checksum_errs; /* dword 31*/
+ u32 rx_udp_checksum_errs; /* dword 32*/
+ u32 rx_non_rss_packets; /* dword 33*/
+ u32 rx_ipv4_packets; /* dword 34*/
+ u32 rx_ipv6_packets; /* dword 35*/
+ u32 rx_ipv4_bytes_lsd; /* dword 36*/
+ u32 rx_ipv4_bytes_msd; /* dword 37*/
+ u32 rx_ipv6_bytes_lsd; /* dword 38*/
+ u32 rx_ipv6_bytes_msd; /* dword 39*/
+ u32 rx_chute1_packets; /* dword 40*/
+ u32 rx_chute2_packets; /* dword 41*/
+ u32 rx_chute3_packets; /* dword 42*/
+ u32 rx_management_packets; /* dword 43*/
+ u32 rx_switched_unicast_packets; /* dword 44*/
+ u32 rx_switched_multicast_packets; /* dword 45*/
+ u32 rx_switched_broadcast_packets; /* dword 46*/
+ u32 tx_bytes_lsd; /* dword 47*/
+ u32 tx_bytes_msd; /* dword 48*/
+ u32 tx_unicastframes; /* dword 49*/
+ u32 tx_multicastframes; /* dword 50*/
+ u32 tx_broadcastframes; /* dword 51*/
+ u32 tx_pauseframes; /* dword 52*/
+ u32 tx_controlframes; /* dword 53*/
+ u32 tx_64_byte_packets; /* dword 54*/
+ u32 tx_65_127_byte_packets; /* dword 55*/
+ u32 tx_128_256_byte_packets; /* dword 56*/
+ u32 tx_256_511_byte_packets; /* dword 57*/
+ u32 tx_512_1023_byte_packets; /* dword 58*/
+ u32 tx_1024_1518_byte_packets; /* dword 59*/
+ u32 tx_1519_2047_byte_packets; /* dword 60*/
+ u32 tx_2048_4095_byte_packets; /* dword 61*/
+ u32 tx_4096_8191_byte_packets; /* dword 62*/
+ u32 tx_8192_9216_byte_packets; /* dword 63*/
+ u32 rx_fifo_overflow; /* dword 64*/
+ u32 rx_input_fifo_overflow; /* dword 65*/
+};
+
+struct be_rxf_stats_v0 {
+ struct be_port_rxf_stats_v0 port[2];
+ u32 rx_drops_no_pbuf; /* dword 132*/
+ u32 rx_drops_no_txpb; /* dword 133*/
+ u32 rx_drops_no_erx_descr; /* dword 134*/
+ u32 rx_drops_no_tpre_descr; /* dword 135*/
+ u32 management_rx_port_packets; /* dword 136*/
+ u32 management_rx_port_bytes; /* dword 137*/
+ u32 management_rx_port_pause_frames; /* dword 138*/
+ u32 management_rx_port_errors; /* dword 139*/
+ u32 management_tx_port_packets; /* dword 140*/
+ u32 management_tx_port_bytes; /* dword 141*/
+ u32 management_tx_port_pause; /* dword 142*/
+ u32 management_rx_port_rxfifo_overflow; /* dword 143*/
+ u32 rx_drops_too_many_frags; /* dword 144*/
+ u32 rx_drops_invalid_ring; /* dword 145*/
+ u32 forwarded_packets; /* dword 146*/
+ u32 rx_drops_mtu; /* dword 147*/
+ u32 rsvd0[7];
+ u32 port0_jabber_events;
+ u32 port1_jabber_events;
+ u32 rsvd1[6];
+};
+
+struct be_erx_stats_v0 {
+ u32 rx_drops_no_fragments[44]; /* dwordS 0 to 43*/
+ u32 rsvd[4];
+};
+
+struct be_pmem_stats {
+ u32 eth_red_drops;
+ u32 rsvd[5];
+};
+
+struct be_hw_stats_v0 {
+ struct be_rxf_stats_v0 rxf;
+ u32 rsvd[48];
+ struct be_erx_stats_v0 erx;
+ struct be_pmem_stats pmem;
+};
+
+struct be_cmd_req_get_stats_v0 {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd[sizeof(struct be_hw_stats_v0)];
+};
+
+struct be_cmd_resp_get_stats_v0 {
+ struct be_cmd_resp_hdr hdr;
+ struct be_hw_stats_v0 hw_stats;
+};
+
+struct lancer_pport_stats {
+ u32 tx_packets_lo;
+ u32 tx_packets_hi;
+ u32 tx_unicast_packets_lo;
+ u32 tx_unicast_packets_hi;
+ u32 tx_multicast_packets_lo;
+ u32 tx_multicast_packets_hi;
+ u32 tx_broadcast_packets_lo;
+ u32 tx_broadcast_packets_hi;
+ u32 tx_bytes_lo;
+ u32 tx_bytes_hi;
+ u32 tx_unicast_bytes_lo;
+ u32 tx_unicast_bytes_hi;
+ u32 tx_multicast_bytes_lo;
+ u32 tx_multicast_bytes_hi;
+ u32 tx_broadcast_bytes_lo;
+ u32 tx_broadcast_bytes_hi;
+ u32 tx_discards_lo;
+ u32 tx_discards_hi;
+ u32 tx_errors_lo;
+ u32 tx_errors_hi;
+ u32 tx_pause_frames_lo;
+ u32 tx_pause_frames_hi;
+ u32 tx_pause_on_frames_lo;
+ u32 tx_pause_on_frames_hi;
+ u32 tx_pause_off_frames_lo;
+ u32 tx_pause_off_frames_hi;
+ u32 tx_internal_mac_errors_lo;
+ u32 tx_internal_mac_errors_hi;
+ u32 tx_control_frames_lo;
+ u32 tx_control_frames_hi;
+ u32 tx_packets_64_bytes_lo;
+ u32 tx_packets_64_bytes_hi;
+ u32 tx_packets_65_to_127_bytes_lo;
+ u32 tx_packets_65_to_127_bytes_hi;
+ u32 tx_packets_128_to_255_bytes_lo;
+ u32 tx_packets_128_to_255_bytes_hi;
+ u32 tx_packets_256_to_511_bytes_lo;
+ u32 tx_packets_256_to_511_bytes_hi;
+ u32 tx_packets_512_to_1023_bytes_lo;
+ u32 tx_packets_512_to_1023_bytes_hi;
+ u32 tx_packets_1024_to_1518_bytes_lo;
+ u32 tx_packets_1024_to_1518_bytes_hi;
+ u32 tx_packets_1519_to_2047_bytes_lo;
+ u32 tx_packets_1519_to_2047_bytes_hi;
+ u32 tx_packets_2048_to_4095_bytes_lo;
+ u32 tx_packets_2048_to_4095_bytes_hi;
+ u32 tx_packets_4096_to_8191_bytes_lo;
+ u32 tx_packets_4096_to_8191_bytes_hi;
+ u32 tx_packets_8192_to_9216_bytes_lo;
+ u32 tx_packets_8192_to_9216_bytes_hi;
+ u32 tx_lso_packets_lo;
+ u32 tx_lso_packets_hi;
+ u32 rx_packets_lo;
+ u32 rx_packets_hi;
+ u32 rx_unicast_packets_lo;
+ u32 rx_unicast_packets_hi;
+ u32 rx_multicast_packets_lo;
+ u32 rx_multicast_packets_hi;
+ u32 rx_broadcast_packets_lo;
+ u32 rx_broadcast_packets_hi;
+ u32 rx_bytes_lo;
+ u32 rx_bytes_hi;
+ u32 rx_unicast_bytes_lo;
+ u32 rx_unicast_bytes_hi;
+ u32 rx_multicast_bytes_lo;
+ u32 rx_multicast_bytes_hi;
+ u32 rx_broadcast_bytes_lo;
+ u32 rx_broadcast_bytes_hi;
+ u32 rx_unknown_protos;
+ u32 rsvd_69; /* Word 69 is reserved */
+ u32 rx_discards_lo;
+ u32 rx_discards_hi;
+ u32 rx_errors_lo;
+ u32 rx_errors_hi;
+ u32 rx_crc_errors_lo;
+ u32 rx_crc_errors_hi;
+ u32 rx_alignment_errors_lo;
+ u32 rx_alignment_errors_hi;
+ u32 rx_symbol_errors_lo;
+ u32 rx_symbol_errors_hi;
+ u32 rx_pause_frames_lo;
+ u32 rx_pause_frames_hi;
+ u32 rx_pause_on_frames_lo;
+ u32 rx_pause_on_frames_hi;
+ u32 rx_pause_off_frames_lo;
+ u32 rx_pause_off_frames_hi;
+ u32 rx_frames_too_long_lo;
+ u32 rx_frames_too_long_hi;
+ u32 rx_internal_mac_errors_lo;
+ u32 rx_internal_mac_errors_hi;
+ u32 rx_undersize_packets;
+ u32 rx_oversize_packets;
+ u32 rx_fragment_packets;
+ u32 rx_jabbers;
+ u32 rx_control_frames_lo;
+ u32 rx_control_frames_hi;
+ u32 rx_control_frames_unknown_opcode_lo;
+ u32 rx_control_frames_unknown_opcode_hi;
+ u32 rx_in_range_errors;
+ u32 rx_out_of_range_errors;
+ u32 rx_address_match_errors;
+ u32 rx_vlan_mismatch_errors;
+ u32 rx_dropped_too_small;
+ u32 rx_dropped_too_short;
+ u32 rx_dropped_header_too_small;
+ u32 rx_dropped_invalid_tcp_length;
+ u32 rx_dropped_runt;
+ u32 rx_ip_checksum_errors;
+ u32 rx_tcp_checksum_errors;
+ u32 rx_udp_checksum_errors;
+ u32 rx_non_rss_packets;
+ u32 rsvd_111;
+ u32 rx_ipv4_packets_lo;
+ u32 rx_ipv4_packets_hi;
+ u32 rx_ipv6_packets_lo;
+ u32 rx_ipv6_packets_hi;
+ u32 rx_ipv4_bytes_lo;
+ u32 rx_ipv4_bytes_hi;
+ u32 rx_ipv6_bytes_lo;
+ u32 rx_ipv6_bytes_hi;
+ u32 rx_nic_packets_lo;
+ u32 rx_nic_packets_hi;
+ u32 rx_tcp_packets_lo;
+ u32 rx_tcp_packets_hi;
+ u32 rx_iscsi_packets_lo;
+ u32 rx_iscsi_packets_hi;
+ u32 rx_management_packets_lo;
+ u32 rx_management_packets_hi;
+ u32 rx_switched_unicast_packets_lo;
+ u32 rx_switched_unicast_packets_hi;
+ u32 rx_switched_multicast_packets_lo;
+ u32 rx_switched_multicast_packets_hi;
+ u32 rx_switched_broadcast_packets_lo;
+ u32 rx_switched_broadcast_packets_hi;
+ u32 num_forwards_lo;
+ u32 num_forwards_hi;
+ u32 rx_fifo_overflow;
+ u32 rx_input_fifo_overflow;
+ u32 rx_drops_too_many_frags_lo;
+ u32 rx_drops_too_many_frags_hi;
+ u32 rx_drops_invalid_queue;
+ u32 rsvd_141;
+ u32 rx_drops_mtu_lo;
+ u32 rx_drops_mtu_hi;
+ u32 rx_packets_64_bytes_lo;
+ u32 rx_packets_64_bytes_hi;
+ u32 rx_packets_65_to_127_bytes_lo;
+ u32 rx_packets_65_to_127_bytes_hi;
+ u32 rx_packets_128_to_255_bytes_lo;
+ u32 rx_packets_128_to_255_bytes_hi;
+ u32 rx_packets_256_to_511_bytes_lo;
+ u32 rx_packets_256_to_511_bytes_hi;
+ u32 rx_packets_512_to_1023_bytes_lo;
+ u32 rx_packets_512_to_1023_bytes_hi;
+ u32 rx_packets_1024_to_1518_bytes_lo;
+ u32 rx_packets_1024_to_1518_bytes_hi;
+ u32 rx_packets_1519_to_2047_bytes_lo;
+ u32 rx_packets_1519_to_2047_bytes_hi;
+ u32 rx_packets_2048_to_4095_bytes_lo;
+ u32 rx_packets_2048_to_4095_bytes_hi;
+ u32 rx_packets_4096_to_8191_bytes_lo;
+ u32 rx_packets_4096_to_8191_bytes_hi;
+ u32 rx_packets_8192_to_9216_bytes_lo;
+ u32 rx_packets_8192_to_9216_bytes_hi;
+};
+
+struct pport_stats_params {
+ u16 pport_num;
+ u8 rsvd;
+ u8 reset_stats;
+};
+
+struct lancer_cmd_req_pport_stats {
+ struct be_cmd_req_hdr hdr;
+ union {
+ struct pport_stats_params params;
+ u8 rsvd[sizeof(struct lancer_pport_stats)];
+ } cmd_params;
+};
+
+struct lancer_cmd_resp_pport_stats {
+ struct be_cmd_resp_hdr hdr;
+ struct lancer_pport_stats pport_stats;
+};
+
+static inline struct lancer_pport_stats*
+ pport_stats_from_cmd(struct be_adapter *adapter)
+{
+ struct lancer_cmd_resp_pport_stats *cmd = adapter->stats_cmd.va;
+ return &cmd->pport_stats;
+}
+
+struct be_cmd_req_get_cntl_addnl_attribs {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct be_cmd_resp_get_cntl_addnl_attribs {
+ struct be_cmd_resp_hdr hdr;
+ u16 ipl_file_number;
+ u8 ipl_file_version;
+ u8 rsvd0;
+ u8 on_die_temperature; /* in degrees centigrade*/
+ u8 rsvd1[3];
+};
+
+struct be_cmd_req_vlan_config {
+ struct be_cmd_req_hdr hdr;
+ u8 interface_id;
+ u8 promiscuous;
+ u8 untagged;
+ u8 num_vlan;
+ u16 normal_vlan[64];
+} __packed;
+
+/******************* RX FILTER ******************************/
+#define BE_MAX_MC 64 /* set mcast promisc if > 64 */
+struct macaddr {
+ u8 byte[ETH_ALEN];
+};
+
+struct be_cmd_req_rx_filter {
+ struct be_cmd_req_hdr hdr;
+ u32 global_flags_mask;
+ u32 global_flags;
+ u32 if_flags_mask;
+ u32 if_flags;
+ u32 if_id;
+ u32 mcast_num;
+ struct macaddr mcast_mac[BE_MAX_MC];
+};
+
+/******************** Link Status Query *******************/
+struct be_cmd_req_link_status {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd;
+};
+
+enum {
+ PHY_LINK_DUPLEX_NONE = 0x0,
+ PHY_LINK_DUPLEX_HALF = 0x1,
+ PHY_LINK_DUPLEX_FULL = 0x2
+};
+
+enum {
+ PHY_LINK_SPEED_ZERO = 0x0, /* => No link */
+ PHY_LINK_SPEED_10MBPS = 0x1,
+ PHY_LINK_SPEED_100MBPS = 0x2,
+ PHY_LINK_SPEED_1GBPS = 0x3,
+ PHY_LINK_SPEED_10GBPS = 0x4
+};
+
+struct be_cmd_resp_link_status {
+ struct be_cmd_resp_hdr hdr;
+ u8 physical_port;
+ u8 mac_duplex;
+ u8 mac_speed;
+ u8 mac_fault;
+ u8 mgmt_mac_duplex;
+ u8 mgmt_mac_speed;
+ u16 link_speed;
+ u32 rsvd0;
+} __packed;
+
+/******************** Port Identification ***************************/
+/* Identifies the type of port attached to NIC */
+struct be_cmd_req_port_type {
+ struct be_cmd_req_hdr hdr;
+ u32 page_num;
+ u32 port;
+};
+
+enum {
+ TR_PAGE_A0 = 0xa0,
+ TR_PAGE_A2 = 0xa2
+};
+
+struct be_cmd_resp_port_type {
+ struct be_cmd_resp_hdr hdr;
+ u32 page_num;
+ u32 port;
+ struct data {
+ u8 identifier;
+ u8 identifier_ext;
+ u8 connector;
+ u8 transceiver[8];
+ u8 rsvd0[3];
+ u8 length_km;
+ u8 length_hm;
+ u8 length_om1;
+ u8 length_om2;
+ u8 length_cu;
+ u8 length_cu_m;
+ u8 vendor_name[16];
+ u8 rsvd;
+ u8 vendor_oui[3];
+ u8 vendor_pn[16];
+ u8 vendor_rev[4];
+ } data;
+};
+
+/******************** Get FW Version *******************/
+struct be_cmd_req_get_fw_version {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd0[FW_VER_LEN];
+ u8 rsvd1[FW_VER_LEN];
+} __packed;
+
+struct be_cmd_resp_get_fw_version {
+ struct be_cmd_resp_hdr hdr;
+ u8 firmware_version_string[FW_VER_LEN];
+ u8 fw_on_flash_version_string[FW_VER_LEN];
+} __packed;
+
+/******************** Set Flow Contrl *******************/
+struct be_cmd_req_set_flow_control {
+ struct be_cmd_req_hdr hdr;
+ u16 tx_flow_control;
+ u16 rx_flow_control;
+} __packed;
+
+/******************** Get Flow Contrl *******************/
+struct be_cmd_req_get_flow_control {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd;
+};
+
+struct be_cmd_resp_get_flow_control {
+ struct be_cmd_resp_hdr hdr;
+ u16 tx_flow_control;
+ u16 rx_flow_control;
+} __packed;
+
+/******************** Modify EQ Delay *******************/
+struct be_cmd_req_modify_eq_delay {
+ struct be_cmd_req_hdr hdr;
+ u32 num_eq;
+ struct {
+ u32 eq_id;
+ u32 phase;
+ u32 delay_multiplier;
+ } delay[8];
+} __packed;
+
+struct be_cmd_resp_modify_eq_delay {
+ struct be_cmd_resp_hdr hdr;
+ u32 rsvd0;
+} __packed;
+
+/******************** Get FW Config *******************/
+#define BE_FUNCTION_CAPS_RSS 0x2
+struct be_cmd_req_query_fw_cfg {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd[31];
+};
+
+struct be_cmd_resp_query_fw_cfg {
+ struct be_cmd_resp_hdr hdr;
+ u32 be_config_number;
+ u32 asic_revision;
+ u32 phys_port;
+ u32 function_mode;
+ u32 rsvd[26];
+ u32 function_caps;
+};
+
+/******************** RSS Config *******************/
+/* RSS types */
+#define RSS_ENABLE_NONE 0x0
+#define RSS_ENABLE_IPV4 0x1
+#define RSS_ENABLE_TCP_IPV4 0x2
+#define RSS_ENABLE_IPV6 0x4
+#define RSS_ENABLE_TCP_IPV6 0x8
+
+struct be_cmd_req_rss_config {
+ struct be_cmd_req_hdr hdr;
+ u32 if_id;
+ u16 enable_rss;
+ u16 cpu_table_size_log2;
+ u32 hash[10];
+ u8 cpu_table[128];
+ u8 flush;
+ u8 rsvd0[3];
+};
+
+/******************** Port Beacon ***************************/
+
+#define BEACON_STATE_ENABLED 0x1
+#define BEACON_STATE_DISABLED 0x0
+
+struct be_cmd_req_enable_disable_beacon {
+ struct be_cmd_req_hdr hdr;
+ u8 port_num;
+ u8 beacon_state;
+ u8 beacon_duration;
+ u8 status_duration;
+} __packed;
+
+struct be_cmd_resp_enable_disable_beacon {
+ struct be_cmd_resp_hdr resp_hdr;
+ u32 rsvd0;
+} __packed;
+
+struct be_cmd_req_get_beacon_state {
+ struct be_cmd_req_hdr hdr;
+ u8 port_num;
+ u8 rsvd0;
+ u16 rsvd1;
+} __packed;
+
+struct be_cmd_resp_get_beacon_state {
+ struct be_cmd_resp_hdr resp_hdr;
+ u8 beacon_state;
+ u8 rsvd0[3];
+} __packed;
+
+/****************** Firmware Flash ******************/
+struct flashrom_params {
+ u32 op_code;
+ u32 op_type;
+ u32 data_buf_size;
+ u32 offset;
+ u8 data_buf[4];
+};
+
+struct be_cmd_write_flashrom {
+ struct be_cmd_req_hdr hdr;
+ struct flashrom_params params;
+};
+
+/**************** Lancer Firmware Flash ************/
+struct amap_lancer_write_obj_context {
+ u8 write_length[24];
+ u8 reserved1[7];
+ u8 eof;
+} __packed;
+
+struct lancer_cmd_req_write_object {
+ struct be_cmd_req_hdr hdr;
+ u8 context[sizeof(struct amap_lancer_write_obj_context) / 8];
+ u32 write_offset;
+ u8 object_name[104];
+ u32 descriptor_count;
+ u32 buf_len;
+ u32 addr_low;
+ u32 addr_high;
+};
+
+struct lancer_cmd_resp_write_object {
+ u8 opcode;
+ u8 subsystem;
+ u8 rsvd1[2];
+ u8 status;
+ u8 additional_status;
+ u8 rsvd2[2];
+ u32 resp_len;
+ u32 actual_resp_len;
+ u32 actual_write_len;
+};
+
+/************************ WOL *******************************/
+struct be_cmd_req_acpi_wol_magic_config{
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd0[145];
+ u8 magic_mac[6];
+ u8 rsvd2[2];
+} __packed;
+
+/********************** LoopBack test *********************/
+struct be_cmd_req_loopback_test {
+ struct be_cmd_req_hdr hdr;
+ u32 loopback_type;
+ u32 num_pkts;
+ u64 pattern;
+ u32 src_port;
+ u32 dest_port;
+ u32 pkt_size;
+};
+
+struct be_cmd_resp_loopback_test {
+ struct be_cmd_resp_hdr resp_hdr;
+ u32 status;
+ u32 num_txfer;
+ u32 num_rx;
+ u32 miscomp_off;
+ u32 ticks_compl;
+};
+
+struct be_cmd_req_set_lmode {
+ struct be_cmd_req_hdr hdr;
+ u8 src_port;
+ u8 dest_port;
+ u8 loopback_type;
+ u8 loopback_state;
+};
+
+struct be_cmd_resp_set_lmode {
+ struct be_cmd_resp_hdr resp_hdr;
+ u8 rsvd0[4];
+};
+
+/********************** DDR DMA test *********************/
+struct be_cmd_req_ddrdma_test {
+ struct be_cmd_req_hdr hdr;
+ u64 pattern;
+ u32 byte_count;
+ u32 rsvd0;
+ u8 snd_buff[4096];
+ u8 rsvd1[4096];
+};
+
+struct be_cmd_resp_ddrdma_test {
+ struct be_cmd_resp_hdr hdr;
+ u64 pattern;
+ u32 byte_cnt;
+ u32 snd_err;
+ u8 rsvd0[4096];
+ u8 rcv_buff[4096];
+};
+
+/*********************** SEEPROM Read ***********************/
+
+#define BE_READ_SEEPROM_LEN 1024
+struct be_cmd_req_seeprom_read {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd0[BE_READ_SEEPROM_LEN];
+};
+
+struct be_cmd_resp_seeprom_read {
+ struct be_cmd_req_hdr hdr;
+ u8 seeprom_data[BE_READ_SEEPROM_LEN];
+};
+
+enum {
+ PHY_TYPE_CX4_10GB = 0,
+ PHY_TYPE_XFP_10GB,
+ PHY_TYPE_SFP_1GB,
+ PHY_TYPE_SFP_PLUS_10GB,
+ PHY_TYPE_KR_10GB,
+ PHY_TYPE_KX4_10GB,
+ PHY_TYPE_BASET_10GB,
+ PHY_TYPE_BASET_1GB,
+ PHY_TYPE_DISABLED = 255
+};
+
+struct be_cmd_req_get_phy_info {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd0[24];
+};
+
+struct be_phy_info {
+ u16 phy_type;
+ u16 interface_type;
+ u32 misc_params;
+ u32 future_use[4];
+};
+
+struct be_cmd_resp_get_phy_info {
+ struct be_cmd_req_hdr hdr;
+ struct be_phy_info phy_info;
+};
+
+/*********************** Set QOS ***********************/
+
+#define BE_QOS_BITS_NIC 1
+
+struct be_cmd_req_set_qos {
+ struct be_cmd_req_hdr hdr;
+ u32 valid_bits;
+ u32 max_bps_nic;
+ u32 rsvd[7];
+};
+
+struct be_cmd_resp_set_qos {
+ struct be_cmd_resp_hdr hdr;
+ u32 rsvd;
+};
+
+/*********************** Controller Attributes ***********************/
+struct be_cmd_req_cntl_attribs {
+ struct be_cmd_req_hdr hdr;
+};
+
+struct be_cmd_resp_cntl_attribs {
+ struct be_cmd_resp_hdr hdr;
+ struct mgmt_controller_attrib attribs;
+};
+
+/*********************** Set driver function ***********************/
+#define CAPABILITY_SW_TIMESTAMPS 2
+#define CAPABILITY_BE3_NATIVE_ERX_API 4
+
+struct be_cmd_req_set_func_cap {
+ struct be_cmd_req_hdr hdr;
+ u32 valid_cap_flags;
+ u32 cap_flags;
+ u8 rsvd[212];
+};
+
+struct be_cmd_resp_set_func_cap {
+ struct be_cmd_resp_hdr hdr;
+ u32 valid_cap_flags;
+ u32 cap_flags;
+ u8 rsvd[212];
+};
+
+/*************** HW Stats Get v1 **********************************/
+#define BE_TXP_SW_SZ 48
+struct be_port_rxf_stats_v1 {
+ u32 rsvd0[12];
+ u32 rx_crc_errors;
+ u32 rx_alignment_symbol_errors;
+ u32 rx_pause_frames;
+ u32 rx_priority_pause_frames;
+ u32 rx_control_frames;
+ u32 rx_in_range_errors;
+ u32 rx_out_range_errors;
+ u32 rx_frame_too_long;
+ u32 rx_address_match_errors;
+ u32 rx_dropped_too_small;
+ u32 rx_dropped_too_short;
+ u32 rx_dropped_header_too_small;
+ u32 rx_dropped_tcp_length;
+ u32 rx_dropped_runt;
+ u32 rsvd1[10];
+ u32 rx_ip_checksum_errs;
+ u32 rx_tcp_checksum_errs;
+ u32 rx_udp_checksum_errs;
+ u32 rsvd2[7];
+ u32 rx_switched_unicast_packets;
+ u32 rx_switched_multicast_packets;
+ u32 rx_switched_broadcast_packets;
+ u32 rsvd3[3];
+ u32 tx_pauseframes;
+ u32 tx_priority_pauseframes;
+ u32 tx_controlframes;
+ u32 rsvd4[10];
+ u32 rxpp_fifo_overflow_drop;
+ u32 rx_input_fifo_overflow_drop;
+ u32 pmem_fifo_overflow_drop;
+ u32 jabber_events;
+ u32 rsvd5[3];
+};
+
+
+struct be_rxf_stats_v1 {
+ struct be_port_rxf_stats_v1 port[4];
+ u32 rsvd0[2];
+ u32 rx_drops_no_pbuf;
+ u32 rx_drops_no_txpb;
+ u32 rx_drops_no_erx_descr;
+ u32 rx_drops_no_tpre_descr;
+ u32 rsvd1[6];
+ u32 rx_drops_too_many_frags;
+ u32 rx_drops_invalid_ring;
+ u32 forwarded_packets;
+ u32 rx_drops_mtu;
+ u32 rsvd2[14];
+};
+
+struct be_erx_stats_v1 {
+ u32 rx_drops_no_fragments[68]; /* dwordS 0 to 67*/
+ u32 rsvd[4];
+};
+
+struct be_hw_stats_v1 {
+ struct be_rxf_stats_v1 rxf;
+ u32 rsvd0[BE_TXP_SW_SZ];
+ struct be_erx_stats_v1 erx;
+ struct be_pmem_stats pmem;
+ u32 rsvd1[3];
+};
+
+struct be_cmd_req_get_stats_v1 {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd[sizeof(struct be_hw_stats_v1)];
+};
+
+struct be_cmd_resp_get_stats_v1 {
+ struct be_cmd_resp_hdr hdr;
+ struct be_hw_stats_v1 hw_stats;
+};
+
+static inline void *hw_stats_from_cmd(struct be_adapter *adapter)
+{
+ if (adapter->generation == BE_GEN3) {
+ struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
+
+ return &cmd->hw_stats;
+ } else {
+ struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
+
+ return &cmd->hw_stats;
+ }
+}
+
+static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
+{
+ if (adapter->generation == BE_GEN3) {
+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
+
+ return &hw_stats->erx;
+ } else {
+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
+
+ return &hw_stats->erx;
+ }
+}
+
+extern int be_pci_fnum_get(struct be_adapter *adapter);
+extern int be_cmd_POST(struct be_adapter *adapter);
+extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
+ u8 type, bool permanent, u32 if_handle);
+extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
+ u32 if_id, u32 *pmac_id, u32 domain);
+extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
+ u32 pmac_id, u32 domain);
+extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
+ u32 en_flags, u8 *mac, bool pmac_invalid,
+ u32 *if_handle, u32 *pmac_id, u32 domain);
+extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
+ u32 domain);
+extern int be_cmd_eq_create(struct be_adapter *adapter,
+ struct be_queue_info *eq, int eq_delay);
+extern int be_cmd_cq_create(struct be_adapter *adapter,
+ struct be_queue_info *cq, struct be_queue_info *eq,
+ bool sol_evts, bool no_delay,
+ int num_cqe_dma_coalesce);
+extern int be_cmd_mccq_create(struct be_adapter *adapter,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq);
+extern int be_cmd_txq_create(struct be_adapter *adapter,
+ struct be_queue_info *txq,
+ struct be_queue_info *cq);
+extern int be_cmd_rxq_create(struct be_adapter *adapter,
+ struct be_queue_info *rxq, u16 cq_id,
+ u16 frag_size, u16 max_frame_size, u32 if_id,
+ u32 rss, u8 *rss_id);
+extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
+ int type);
+extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
+ struct be_queue_info *q);
+extern int be_cmd_link_status_query(struct be_adapter *adapter,
+ u8 *mac_speed, u16 *link_speed, u32 dom);
+extern int be_cmd_reset(struct be_adapter *adapter);
+extern int be_cmd_get_stats(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd);
+extern int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd);
+extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
+ char *fw_on_flash);
+
+extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
+extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
+ u16 *vtag_array, u32 num, bool untagged,
+ bool promiscuous);
+extern int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
+extern int be_cmd_set_flow_control(struct be_adapter *adapter,
+ u32 tx_fc, u32 rx_fc);
+extern int be_cmd_get_flow_control(struct be_adapter *adapter,
+ u32 *tx_fc, u32 *rx_fc);
+extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
+ u32 *port_num, u32 *function_mode, u32 *function_caps);
+extern int be_cmd_reset_function(struct be_adapter *adapter);
+extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
+ u16 table_size);
+extern int be_process_mcc(struct be_adapter *adapter, int *status);
+extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
+ u8 port_num, u8 beacon, u8 status, u8 state);
+extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
+ u8 port_num, u32 *state);
+extern int be_cmd_write_flashrom(struct be_adapter *adapter,
+ struct be_dma_mem *cmd, u32 flash_oper,
+ u32 flash_opcode, u32 buf_size);
+extern int lancer_cmd_write_object(struct be_adapter *adapter,
+ struct be_dma_mem *cmd,
+ u32 data_size, u32 data_offset,
+ const char *obj_name,
+ u32 *data_written, u8 *addn_status);
+int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
+ int offset);
+extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
+ struct be_dma_mem *nonemb_cmd);
+extern int be_cmd_fw_init(struct be_adapter *adapter);
+extern int be_cmd_fw_clean(struct be_adapter *adapter);
+extern void be_async_mcc_enable(struct be_adapter *adapter);
+extern void be_async_mcc_disable(struct be_adapter *adapter);
+extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
+ u32 loopback_type, u32 pkt_size,
+ u32 num_pkts, u64 pattern);
+extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
+ u32 byte_cnt, struct be_dma_mem *cmd);
+extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd);
+extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+ u8 loopback_type, u8 enable);
+extern int be_cmd_get_phy_info(struct be_adapter *adapter,
+ struct be_phy_info *phy_info);
+extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
+extern void be_detect_dump_ue(struct be_adapter *adapter);
+extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
+extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
+extern int be_cmd_req_native_mode(struct be_adapter *adapter);
+extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
+extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
+
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
new file mode 100644
index 000000000000..bf8153ea4ed8
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -0,0 +1,724 @@
+/*
+ * Copyright (C) 2005 - 2011 Emulex
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include "be.h"
+#include "be_cmds.h"
+#include <linux/ethtool.h>
+
+struct be_ethtool_stat {
+ char desc[ETH_GSTRING_LEN];
+ int type;
+ int size;
+ int offset;
+};
+
+enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
+#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
+ offsetof(_struct, field)
+#define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
+ FIELDINFO(struct be_tx_stats, field)
+#define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
+ FIELDINFO(struct be_rx_stats, field)
+#define DRVSTAT_INFO(field) #field, DRVSTAT,\
+ FIELDINFO(struct be_drv_stats, field)
+
+static const struct be_ethtool_stat et_stats[] = {
+ {DRVSTAT_INFO(tx_events)},
+ {DRVSTAT_INFO(rx_crc_errors)},
+ {DRVSTAT_INFO(rx_alignment_symbol_errors)},
+ {DRVSTAT_INFO(rx_pause_frames)},
+ {DRVSTAT_INFO(rx_control_frames)},
+ {DRVSTAT_INFO(rx_in_range_errors)},
+ {DRVSTAT_INFO(rx_out_range_errors)},
+ {DRVSTAT_INFO(rx_frame_too_long)},
+ {DRVSTAT_INFO(rx_address_match_errors)},
+ {DRVSTAT_INFO(rx_dropped_too_small)},
+ {DRVSTAT_INFO(rx_dropped_too_short)},
+ {DRVSTAT_INFO(rx_dropped_header_too_small)},
+ {DRVSTAT_INFO(rx_dropped_tcp_length)},
+ {DRVSTAT_INFO(rx_dropped_runt)},
+ {DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
+ {DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
+ {DRVSTAT_INFO(rx_ip_checksum_errs)},
+ {DRVSTAT_INFO(rx_tcp_checksum_errs)},
+ {DRVSTAT_INFO(rx_udp_checksum_errs)},
+ {DRVSTAT_INFO(tx_pauseframes)},
+ {DRVSTAT_INFO(tx_controlframes)},
+ {DRVSTAT_INFO(rx_priority_pause_frames)},
+ {DRVSTAT_INFO(pmem_fifo_overflow_drop)},
+ {DRVSTAT_INFO(jabber_events)},
+ {DRVSTAT_INFO(rx_drops_no_pbuf)},
+ {DRVSTAT_INFO(rx_drops_no_txpb)},
+ {DRVSTAT_INFO(rx_drops_no_erx_descr)},
+ {DRVSTAT_INFO(rx_drops_no_tpre_descr)},
+ {DRVSTAT_INFO(rx_drops_too_many_frags)},
+ {DRVSTAT_INFO(rx_drops_invalid_ring)},
+ {DRVSTAT_INFO(forwarded_packets)},
+ {DRVSTAT_INFO(rx_drops_mtu)},
+ {DRVSTAT_INFO(eth_red_drops)},
+ {DRVSTAT_INFO(be_on_die_temperature)}
+};
+#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
+
+/* Stats related to multi RX queues: get_stats routine assumes bytes, pkts
+ * are first and second members respectively.
+ */
+static const struct be_ethtool_stat et_rx_stats[] = {
+ {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
+ {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
+ {DRVSTAT_RX_INFO(rx_polls)},
+ {DRVSTAT_RX_INFO(rx_events)},
+ {DRVSTAT_RX_INFO(rx_compl)},
+ {DRVSTAT_RX_INFO(rx_mcast_pkts)},
+ {DRVSTAT_RX_INFO(rx_post_fail)},
+ {DRVSTAT_RX_INFO(rx_drops_no_skbs)},
+ {DRVSTAT_RX_INFO(rx_drops_no_frags)}
+};
+#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
+
+/* Stats related to multi TX queues: get_stats routine assumes compl is the
+ * first member
+ */
+static const struct be_ethtool_stat et_tx_stats[] = {
+ {DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */
+ {DRVSTAT_TX_INFO(tx_bytes)},
+ {DRVSTAT_TX_INFO(tx_pkts)},
+ {DRVSTAT_TX_INFO(tx_reqs)},
+ {DRVSTAT_TX_INFO(tx_wrbs)},
+ {DRVSTAT_TX_INFO(tx_compl)},
+ {DRVSTAT_TX_INFO(tx_stops)}
+};
+#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
+
+static const char et_self_tests[][ETH_GSTRING_LEN] = {
+ "MAC Loopback test",
+ "PHY Loopback test",
+ "External Loopback test",
+ "DDR DMA test",
+ "Link test"
+};
+
+#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
+#define BE_MAC_LOOPBACK 0x0
+#define BE_PHY_LOOPBACK 0x1
+#define BE_ONE_PORT_EXT_LOOPBACK 0x2
+#define BE_NO_LOOPBACK 0xff
+
+static void be_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ char fw_on_flash[FW_VER_LEN];
+
+ memset(fw_on_flash, 0 , sizeof(fw_on_flash));
+ be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash);
+
+ strcpy(drvinfo->driver, DRV_NAME);
+ strcpy(drvinfo->version, DRV_VER);
+ strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
+ if (memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN) != 0) {
+ strcat(drvinfo->fw_version, " [");
+ strcat(drvinfo->fw_version, fw_on_flash);
+ strcat(drvinfo->fw_version, "]");
+ }
+
+ strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
+ drvinfo->testinfo_len = 0;
+ drvinfo->regdump_len = 0;
+ drvinfo->eedump_len = 0;
+}
+
+static int
+be_get_reg_len(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u32 log_size = 0;
+
+ if (be_physfn(adapter))
+ be_cmd_get_reg_len(adapter, &log_size);
+
+ return log_size;
+}
+
+static void
+be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (be_physfn(adapter)) {
+ memset(buf, 0, regs->len);
+ be_cmd_get_regs(adapter, regs->len, buf);
+ }
+}
+
+static int
+be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_eq_obj *rx_eq = &adapter->rx_obj[0].rx_eq;
+ struct be_eq_obj *tx_eq = &adapter->tx_eq;
+
+ coalesce->rx_coalesce_usecs = rx_eq->cur_eqd;
+ coalesce->rx_coalesce_usecs_high = rx_eq->max_eqd;
+ coalesce->rx_coalesce_usecs_low = rx_eq->min_eqd;
+
+ coalesce->tx_coalesce_usecs = tx_eq->cur_eqd;
+ coalesce->tx_coalesce_usecs_high = tx_eq->max_eqd;
+ coalesce->tx_coalesce_usecs_low = tx_eq->min_eqd;
+
+ coalesce->use_adaptive_rx_coalesce = rx_eq->enable_aic;
+ coalesce->use_adaptive_tx_coalesce = tx_eq->enable_aic;
+
+ return 0;
+}
+
+/*
+ * This routine is used to set interrup coalescing delay
+ */
+static int
+be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_rx_obj *rxo;
+ struct be_eq_obj *rx_eq;
+ struct be_eq_obj *tx_eq = &adapter->tx_eq;
+ u32 rx_max, rx_min, rx_cur;
+ int status = 0, i;
+ u32 tx_cur;
+
+ if (coalesce->use_adaptive_tx_coalesce == 1)
+ return -EINVAL;
+
+ for_all_rx_queues(adapter, rxo, i) {
+ rx_eq = &rxo->rx_eq;
+
+ if (!rx_eq->enable_aic && coalesce->use_adaptive_rx_coalesce)
+ rx_eq->cur_eqd = 0;
+ rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
+
+ rx_max = coalesce->rx_coalesce_usecs_high;
+ rx_min = coalesce->rx_coalesce_usecs_low;
+ rx_cur = coalesce->rx_coalesce_usecs;
+
+ if (rx_eq->enable_aic) {
+ if (rx_max > BE_MAX_EQD)
+ rx_max = BE_MAX_EQD;
+ if (rx_min > rx_max)
+ rx_min = rx_max;
+ rx_eq->max_eqd = rx_max;
+ rx_eq->min_eqd = rx_min;
+ if (rx_eq->cur_eqd > rx_max)
+ rx_eq->cur_eqd = rx_max;
+ if (rx_eq->cur_eqd < rx_min)
+ rx_eq->cur_eqd = rx_min;
+ } else {
+ if (rx_cur > BE_MAX_EQD)
+ rx_cur = BE_MAX_EQD;
+ if (rx_eq->cur_eqd != rx_cur) {
+ status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
+ rx_cur);
+ if (!status)
+ rx_eq->cur_eqd = rx_cur;
+ }
+ }
+ }
+
+ tx_cur = coalesce->tx_coalesce_usecs;
+
+ if (tx_cur > BE_MAX_EQD)
+ tx_cur = BE_MAX_EQD;
+ if (tx_eq->cur_eqd != tx_cur) {
+ status = be_cmd_modify_eqd(adapter, tx_eq->q.id, tx_cur);
+ if (!status)
+ tx_eq->cur_eqd = tx_cur;
+ }
+
+ return 0;
+}
+
+static void
+be_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, uint64_t *data)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_rx_obj *rxo;
+ struct be_tx_obj *txo;
+ void *p;
+ unsigned int i, j, base = 0, start;
+
+ for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
+ p = (u8 *)&adapter->drv_stats + et_stats[i].offset;
+ data[i] = *(u32 *)p;
+ }
+ base += ETHTOOL_STATS_NUM;
+
+ for_all_rx_queues(adapter, rxo, j) {
+ struct be_rx_stats *stats = rx_stats(rxo);
+
+ do {
+ start = u64_stats_fetch_begin_bh(&stats->sync);
+ data[base] = stats->rx_bytes;
+ data[base + 1] = stats->rx_pkts;
+ } while (u64_stats_fetch_retry_bh(&stats->sync, start));
+
+ for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
+ p = (u8 *)stats + et_rx_stats[i].offset;
+ data[base + i] = *(u32 *)p;
+ }
+ base += ETHTOOL_RXSTATS_NUM;
+ }
+
+ for_all_tx_queues(adapter, txo, j) {
+ struct be_tx_stats *stats = tx_stats(txo);
+
+ do {
+ start = u64_stats_fetch_begin_bh(&stats->sync_compl);
+ data[base] = stats->tx_compl;
+ } while (u64_stats_fetch_retry_bh(&stats->sync_compl, start));
+
+ do {
+ start = u64_stats_fetch_begin_bh(&stats->sync);
+ for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
+ p = (u8 *)stats + et_tx_stats[i].offset;
+ data[base + i] =
+ (et_tx_stats[i].size == sizeof(u64)) ?
+ *(u64 *)p : *(u32 *)p;
+ }
+ } while (u64_stats_fetch_retry_bh(&stats->sync, start));
+ base += ETHTOOL_TXSTATS_NUM;
+ }
+}
+
+static void
+be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
+ uint8_t *data)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
+ memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->num_rx_qs; i++) {
+ for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
+ sprintf(data, "rxq%d: %s", i,
+ et_rx_stats[j].desc);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ for (i = 0; i < adapter->num_tx_qs; i++) {
+ for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
+ sprintf(data, "txq%d: %s", i,
+ et_tx_stats[j].desc);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ case ETH_SS_TEST:
+ for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
+ memcpy(data, et_self_tests[i], ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int be_get_sset_count(struct net_device *netdev, int stringset)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ return ETHTOOL_TESTS_NUM;
+ case ETH_SS_STATS:
+ return ETHTOOL_STATS_NUM +
+ adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
+ adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_phy_info phy_info;
+ u8 mac_speed = 0;
+ u16 link_speed = 0;
+ int status;
+
+ if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
+ status = be_cmd_link_status_query(adapter, &mac_speed,
+ &link_speed, 0);
+
+ /* link_speed is in units of 10 Mbps */
+ if (link_speed) {
+ ethtool_cmd_speed_set(ecmd, link_speed*10);
+ } else {
+ switch (mac_speed) {
+ case PHY_LINK_SPEED_10MBPS:
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
+ break;
+ case PHY_LINK_SPEED_100MBPS:
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
+ break;
+ case PHY_LINK_SPEED_1GBPS:
+ ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ break;
+ case PHY_LINK_SPEED_10GBPS:
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ break;
+ case PHY_LINK_SPEED_ZERO:
+ ethtool_cmd_speed_set(ecmd, 0);
+ break;
+ }
+ }
+
+ status = be_cmd_get_phy_info(adapter, &phy_info);
+ if (!status) {
+ switch (phy_info.interface_type) {
+ case PHY_TYPE_XFP_10GB:
+ case PHY_TYPE_SFP_1GB:
+ case PHY_TYPE_SFP_PLUS_10GB:
+ ecmd->port = PORT_FIBRE;
+ break;
+ default:
+ ecmd->port = PORT_TP;
+ break;
+ }
+
+ switch (phy_info.interface_type) {
+ case PHY_TYPE_KR_10GB:
+ case PHY_TYPE_KX4_10GB:
+ ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->transceiver = XCVR_INTERNAL;
+ break;
+ default:
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ }
+ }
+
+ /* Save for future use */
+ adapter->link_speed = ethtool_cmd_speed(ecmd);
+ adapter->port_type = ecmd->port;
+ adapter->transceiver = ecmd->transceiver;
+ adapter->autoneg = ecmd->autoneg;
+ } else {
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
+ ecmd->port = adapter->port_type;
+ ecmd->transceiver = adapter->transceiver;
+ ecmd->autoneg = adapter->autoneg;
+ }
+
+ ecmd->duplex = DUPLEX_FULL;
+ ecmd->phy_address = adapter->port_num;
+ switch (ecmd->port) {
+ case PORT_FIBRE:
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ break;
+ case PORT_TP:
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
+ break;
+ case PORT_AUI:
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI);
+ break;
+ }
+
+ if (ecmd->autoneg) {
+ ecmd->supported |= SUPPORTED_1000baseT_Full;
+ ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= (ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full);
+ }
+
+ return 0;
+}
+
+static void
+be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ ring->rx_max_pending = adapter->rx_obj[0].q.len;
+ ring->tx_max_pending = adapter->tx_obj[0].q.len;
+
+ ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
+ ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used);
+}
+
+static void
+be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
+ ecmd->autoneg = 0;
+}
+
+static int
+be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status;
+
+ if (ecmd->autoneg != 0)
+ return -EINVAL;
+ adapter->tx_fc = ecmd->tx_pause;
+ adapter->rx_fc = ecmd->rx_pause;
+
+ status = be_cmd_set_flow_control(adapter,
+ adapter->tx_fc, adapter->rx_fc);
+ if (status)
+ dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
+
+ return status;
+}
+
+static int
+be_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
+ &adapter->beacon_state);
+ return 1; /* cycle on/off once per second */
+
+ case ETHTOOL_ID_ON:
+ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
+ BEACON_STATE_ENABLED);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
+ BEACON_STATE_DISABLED);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
+ adapter->beacon_state);
+ }
+
+ return 0;
+}
+
+static bool
+be_is_wol_supported(struct be_adapter *adapter)
+{
+ if (!be_physfn(adapter))
+ return false;
+ else
+ return true;
+}
+
+static void
+be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (be_is_wol_supported(adapter))
+ wol->supported = WAKE_MAGIC;
+
+ if (adapter->wol)
+ wol->wolopts = WAKE_MAGIC;
+ else
+ wol->wolopts = 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int
+be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ if ((wol->wolopts & WAKE_MAGIC) && be_is_wol_supported(adapter))
+ adapter->wol = true;
+ else
+ adapter->wol = false;
+
+ return 0;
+}
+
+static int
+be_test_ddr_dma(struct be_adapter *adapter)
+{
+ int ret, i;
+ struct be_dma_mem ddrdma_cmd;
+ static const u64 pattern[2] = {
+ 0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL
+ };
+
+ ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
+ ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
+ &ddrdma_cmd.dma, GFP_KERNEL);
+ if (!ddrdma_cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < 2; i++) {
+ ret = be_cmd_ddr_dma_test(adapter, pattern[i],
+ 4096, &ddrdma_cmd);
+ if (ret != 0)
+ goto err;
+ }
+
+err:
+ dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
+ ddrdma_cmd.dma);
+ return ret;
+}
+
+static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
+ u64 *status)
+{
+ be_cmd_set_loopback(adapter, adapter->hba_port_num,
+ loopback_type, 1);
+ *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
+ loopback_type, 1500,
+ 2, 0xabc);
+ be_cmd_set_loopback(adapter, adapter->hba_port_num,
+ BE_NO_LOOPBACK, 1);
+ return *status;
+}
+
+static void
+be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u8 mac_speed = 0;
+ u16 qos_link_speed = 0;
+
+ memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
+
+ if (test->flags & ETH_TEST_FL_OFFLINE) {
+ if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
+ &data[0]) != 0) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ }
+ if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
+ &data[1]) != 0) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ }
+ if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
+ &data[2]) != 0) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ }
+ }
+
+ if (be_test_ddr_dma(adapter) != 0) {
+ data[3] = 1;
+ test->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (be_cmd_link_status_query(adapter, &mac_speed,
+ &qos_link_speed, 0) != 0) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ data[4] = -1;
+ } else if (!mac_speed) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ data[4] = 1;
+ }
+}
+
+static int
+be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ char file_name[ETHTOOL_FLASH_MAX_FILENAME];
+
+ file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
+ strcpy(file_name, efl->data);
+
+ return be_load_fw(adapter, file_name);
+}
+
+static int
+be_get_eeprom_len(struct net_device *netdev)
+{
+ return BE_READ_SEEPROM_LEN;
+}
+
+static int
+be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ uint8_t *data)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_dma_mem eeprom_cmd;
+ struct be_cmd_resp_seeprom_read *resp;
+ int status;
+
+ if (!eeprom->len)
+ return -EINVAL;
+
+ eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
+
+ memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
+ eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
+ eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
+ &eeprom_cmd.dma, GFP_KERNEL);
+
+ if (!eeprom_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure. Could not read eeprom\n");
+ return -ENOMEM;
+ }
+
+ status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
+
+ if (!status) {
+ resp = eeprom_cmd.va;
+ memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
+ }
+ dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
+ eeprom_cmd.dma);
+
+ return status;
+}
+
+const struct ethtool_ops be_ethtool_ops = {
+ .get_settings = be_get_settings,
+ .get_drvinfo = be_get_drvinfo,
+ .get_wol = be_get_wol,
+ .set_wol = be_set_wol,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = be_get_eeprom_len,
+ .get_eeprom = be_read_eeprom,
+ .get_coalesce = be_get_coalesce,
+ .set_coalesce = be_set_coalesce,
+ .get_ringparam = be_get_ringparam,
+ .get_pauseparam = be_get_pauseparam,
+ .set_pauseparam = be_set_pauseparam,
+ .get_strings = be_get_stat_strings,
+ .set_phys_id = be_set_phys_id,
+ .get_sset_count = be_get_sset_count,
+ .get_ethtool_stats = be_get_ethtool_stats,
+ .get_regs_len = be_get_reg_len,
+ .get_regs = be_get_regs,
+ .flash_device = be_do_flash,
+ .self_test = be_self_test,
+};
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
new file mode 100644
index 000000000000..fbc8a915519e
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -0,0 +1,510 @@
+/*
+ * Copyright (C) 2005 - 2011 Emulex
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+/********* Mailbox door bell *************/
+/* Used for driver communication with the FW.
+ * The software must write this register twice to post any command. First,
+ * it writes the register with hi=1 and the upper bits of the physical address
+ * for the MAILBOX structure. Software must poll the ready bit until this
+ * is acknowledged. Then, sotware writes the register with hi=0 with the lower
+ * bits in the address. It must poll the ready bit until the command is
+ * complete. Upon completion, the MAILBOX will contain a valid completion
+ * queue entry.
+ */
+#define MPU_MAILBOX_DB_OFFSET 0x160
+#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
+#define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */
+
+#define MPU_EP_CONTROL 0
+
+/********** MPU semphore ******************/
+#define MPU_EP_SEMAPHORE_OFFSET 0xac
+#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400
+#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
+#define EP_SEMAPHORE_POST_ERR_MASK 0x1
+#define EP_SEMAPHORE_POST_ERR_SHIFT 31
+
+/* MPU semphore POST stage values */
+#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
+#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
+#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
+#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
+
+
+/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
+#define SLIPORT_STATUS_OFFSET 0x404
+#define SLIPORT_CONTROL_OFFSET 0x408
+
+#define SLIPORT_STATUS_ERR_MASK 0x80000000
+#define SLIPORT_STATUS_RN_MASK 0x01000000
+#define SLIPORT_STATUS_RDY_MASK 0x00800000
+
+
+#define SLI_PORT_CONTROL_IP_MASK 0x08000000
+
+/********* Memory BAR register ************/
+#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
+/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
+ * Disable" may still globally block interrupts in addition to individual
+ * interrupt masks; a mechanism for the device driver to block all interrupts
+ * atomically without having to arbitrate for the PCI Interrupt Disable bit
+ * with the OS.
+ */
+#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
+
+/********* Power management (WOL) **********/
+#define PCICFG_PM_CONTROL_OFFSET 0x44
+#define PCICFG_PM_CONTROL_MASK 0x108 /* bits 3 & 8 */
+
+/********* Online Control Registers *******/
+#define PCICFG_ONLINE0 0xB0
+#define PCICFG_ONLINE1 0xB4
+
+/********* UE Status and Mask Registers ***/
+#define PCICFG_UE_STATUS_LOW 0xA0
+#define PCICFG_UE_STATUS_HIGH 0xA4
+#define PCICFG_UE_STATUS_LOW_MASK 0xA8
+#define PCICFG_UE_STATUS_HI_MASK 0xAC
+
+/******** SLI_INTF ***********************/
+#define SLI_INTF_REG_OFFSET 0x58
+#define SLI_INTF_VALID_MASK 0xE0000000
+#define SLI_INTF_VALID 0xC0000000
+#define SLI_INTF_HINT2_MASK 0x1F000000
+#define SLI_INTF_HINT2_SHIFT 24
+#define SLI_INTF_HINT1_MASK 0x00FF0000
+#define SLI_INTF_HINT1_SHIFT 16
+#define SLI_INTF_FAMILY_MASK 0x00000F00
+#define SLI_INTF_FAMILY_SHIFT 8
+#define SLI_INTF_IF_TYPE_MASK 0x0000F000
+#define SLI_INTF_IF_TYPE_SHIFT 12
+#define SLI_INTF_REV_MASK 0x000000F0
+#define SLI_INTF_REV_SHIFT 4
+#define SLI_INTF_FT_MASK 0x00000001
+
+
+/* SLI family */
+#define BE_SLI_FAMILY 0x0
+#define LANCER_A0_SLI_FAMILY 0xA
+
+
+/********* ISR0 Register offset **********/
+#define CEV_ISR0_OFFSET 0xC18
+#define CEV_ISR_SIZE 4
+
+/********* Event Q door bell *************/
+#define DB_EQ_OFFSET DB_CQ_OFFSET
+#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
+#define DB_EQ_RING_ID_EXT_MASK 0x3e00 /* bits 9-13 */
+#define DB_EQ_RING_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 placing at 11-15 */
+
+/* Clear the interrupt for this eq */
+#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
+/* Must be 1 */
+#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
+/* Number of event entries processed */
+#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
+/* Rearm bit */
+#define DB_EQ_REARM_SHIFT (29) /* bit 29 */
+
+/********* Compl Q door bell *************/
+#define DB_CQ_OFFSET 0x120
+#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
+#define DB_CQ_RING_ID_EXT_MASK 0x7C00 /* bits 10-14 */
+#define DB_CQ_RING_ID_EXT_MASK_SHIFT (1) /* qid bits 10-14
+ placing at 11-15 */
+
+/* Number of event entries processed */
+#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
+/* Rearm bit */
+#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
+
+/********** TX ULP door bell *************/
+#define DB_TXULP1_OFFSET 0x60
+#define DB_TXULP_RING_ID_MASK 0x7FF /* bits 0 - 10 */
+/* Number of tx entries posted */
+#define DB_TXULP_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
+#define DB_TXULP_NUM_POSTED_MASK 0x3FFF /* bits 16 - 29 */
+
+/********** RQ(erx) door bell ************/
+#define DB_RQ_OFFSET 0x100
+#define DB_RQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
+/* Number of rx frags posted */
+#define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */
+
+/********** MCC door bell ************/
+#define DB_MCCQ_OFFSET 0x140
+#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
+/* Number of entries posted */
+#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
+
+/********** SRIOV VF PCICFG OFFSET ********/
+#define SRIOV_VF_PCICFG_OFFSET (4096)
+
+/********** FAT TABLE ********/
+#define RETRIEVE_FAT 0
+#define QUERY_FAT 1
+
+/* Flashrom related descriptors */
+#define IMAGE_TYPE_FIRMWARE 160
+#define IMAGE_TYPE_BOOTCODE 224
+#define IMAGE_TYPE_OPTIONROM 32
+
+#define NUM_FLASHDIR_ENTRIES 32
+
+#define IMG_TYPE_ISCSI_ACTIVE 0
+#define IMG_TYPE_REDBOOT 1
+#define IMG_TYPE_BIOS 2
+#define IMG_TYPE_PXE_BIOS 3
+#define IMG_TYPE_FCOE_BIOS 8
+#define IMG_TYPE_ISCSI_BACKUP 9
+#define IMG_TYPE_FCOE_FW_ACTIVE 10
+#define IMG_TYPE_FCOE_FW_BACKUP 11
+#define IMG_TYPE_NCSI_FW 13
+#define IMG_TYPE_PHY_FW 99
+#define TN_8022 13
+
+#define ILLEGAL_IOCTL_REQ 2
+#define FLASHROM_OPER_PHY_FLASH 9
+#define FLASHROM_OPER_PHY_SAVE 10
+#define FLASHROM_OPER_FLASH 1
+#define FLASHROM_OPER_SAVE 2
+#define FLASHROM_OPER_REPORT 4
+
+#define FLASH_IMAGE_MAX_SIZE_g2 (1310720) /* Max firmware image size */
+#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 (262144) /* Max OPTION ROM image sz */
+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 (262144) /* Max Redboot image sz */
+#define FLASH_IMAGE_MAX_SIZE_g3 (2097152) /* Max firmware image size */
+#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 (524288) /* Max OPTION ROM image sz */
+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 (1048576) /* Max Redboot image sz */
+#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 (262144)
+#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 262144
+
+#define FLASH_NCSI_MAGIC (0x16032009)
+#define FLASH_NCSI_DISABLED (0)
+#define FLASH_NCSI_ENABLED (1)
+
+#define FLASH_NCSI_BITFILE_HDR_OFFSET (0x600000)
+
+/* Offsets for components on Flash. */
+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 (1048576)
+#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 (2359296)
+#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 (3670016)
+#define FLASH_FCoE_BACKUP_IMAGE_START_g2 (4980736)
+#define FLASH_iSCSI_BIOS_START_g2 (7340032)
+#define FLASH_PXE_BIOS_START_g2 (7864320)
+#define FLASH_FCoE_BIOS_START_g2 (524288)
+#define FLASH_REDBOOT_START_g2 (0)
+
+#define FLASH_NCSI_START_g3 (15990784)
+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 (2097152)
+#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 (4194304)
+#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 (6291456)
+#define FLASH_FCoE_BACKUP_IMAGE_START_g3 (8388608)
+#define FLASH_iSCSI_BIOS_START_g3 (12582912)
+#define FLASH_PXE_BIOS_START_g3 (13107200)
+#define FLASH_FCoE_BIOS_START_g3 (13631488)
+#define FLASH_REDBOOT_START_g3 (262144)
+#define FLASH_PHY_FW_START_g3 1310720
+
+/************* Rx Packet Type Encoding **************/
+#define BE_UNICAST_PACKET 0
+#define BE_MULTICAST_PACKET 1
+#define BE_BROADCAST_PACKET 2
+#define BE_RSVD_PACKET 3
+
+/*
+ * BE descriptors: host memory data structures whose formats
+ * are hardwired in BE silicon.
+ */
+/* Event Queue Descriptor */
+#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
+#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
+#define EQ_ENTRY_RES_ID_SHIFT 16
+
+struct be_eq_entry {
+ u32 evt;
+};
+
+/* TX Queue Descriptor */
+#define ETH_WRB_FRAG_LEN_MASK 0xFFFF
+struct be_eth_wrb {
+ u32 frag_pa_hi; /* dword 0 */
+ u32 frag_pa_lo; /* dword 1 */
+ u32 rsvd0; /* dword 2 */
+ u32 frag_len; /* dword 3: bits 0 - 15 */
+} __packed;
+
+/* Pseudo amap definition for eth_hdr_wrb in which each bit of the
+ * actual structure is defined as a byte : used to calculate
+ * offset/shift/mask of each field */
+struct amap_eth_hdr_wrb {
+ u8 rsvd0[32]; /* dword 0 */
+ u8 rsvd1[32]; /* dword 1 */
+ u8 complete; /* dword 2 */
+ u8 event;
+ u8 crc;
+ u8 forward;
+ u8 lso6;
+ u8 mgmt;
+ u8 ipcs;
+ u8 udpcs;
+ u8 tcpcs;
+ u8 lso;
+ u8 vlan;
+ u8 gso[2];
+ u8 num_wrb[5];
+ u8 lso_mss[14];
+ u8 len[16]; /* dword 3 */
+ u8 vlan_tag[16];
+} __packed;
+
+struct be_eth_hdr_wrb {
+ u32 dw[4];
+};
+
+/* TX Compl Queue Descriptor */
+
+/* Pseudo amap definition for eth_tx_compl in which each bit of the
+ * actual structure is defined as a byte: used to calculate
+ * offset/shift/mask of each field */
+struct amap_eth_tx_compl {
+ u8 wrb_index[16]; /* dword 0 */
+ u8 ct[2]; /* dword 0 */
+ u8 port[2]; /* dword 0 */
+ u8 rsvd0[8]; /* dword 0 */
+ u8 status[4]; /* dword 0 */
+ u8 user_bytes[16]; /* dword 1 */
+ u8 nwh_bytes[8]; /* dword 1 */
+ u8 lso; /* dword 1 */
+ u8 cast_enc[2]; /* dword 1 */
+ u8 rsvd1[5]; /* dword 1 */
+ u8 rsvd2[32]; /* dword 2 */
+ u8 pkts[16]; /* dword 3 */
+ u8 ringid[11]; /* dword 3 */
+ u8 hash_val[4]; /* dword 3 */
+ u8 valid; /* dword 3 */
+} __packed;
+
+struct be_eth_tx_compl {
+ u32 dw[4];
+};
+
+/* RX Queue Descriptor */
+struct be_eth_rx_d {
+ u32 fragpa_hi;
+ u32 fragpa_lo;
+};
+
+/* RX Compl Queue Descriptor */
+
+/* Pseudo amap definition for BE2 and BE3 legacy mode eth_rx_compl in which
+ * each bit of the actual structure is defined as a byte: used to calculate
+ * offset/shift/mask of each field */
+struct amap_eth_rx_compl_v0 {
+ u8 vlan_tag[16]; /* dword 0 */
+ u8 pktsize[14]; /* dword 0 */
+ u8 port; /* dword 0 */
+ u8 ip_opt; /* dword 0 */
+ u8 err; /* dword 1 */
+ u8 rsshp; /* dword 1 */
+ u8 ipf; /* dword 1 */
+ u8 tcpf; /* dword 1 */
+ u8 udpf; /* dword 1 */
+ u8 ipcksm; /* dword 1 */
+ u8 l4_cksm; /* dword 1 */
+ u8 ip_version; /* dword 1 */
+ u8 macdst[6]; /* dword 1 */
+ u8 vtp; /* dword 1 */
+ u8 rsvd0; /* dword 1 */
+ u8 fragndx[10]; /* dword 1 */
+ u8 ct[2]; /* dword 1 */
+ u8 sw; /* dword 1 */
+ u8 numfrags[3]; /* dword 1 */
+ u8 rss_flush; /* dword 2 */
+ u8 cast_enc[2]; /* dword 2 */
+ u8 vtm; /* dword 2 */
+ u8 rss_bank; /* dword 2 */
+ u8 rsvd1[23]; /* dword 2 */
+ u8 lro_pkt; /* dword 2 */
+ u8 rsvd2[2]; /* dword 2 */
+ u8 valid; /* dword 2 */
+ u8 rsshash[32]; /* dword 3 */
+} __packed;
+
+/* Pseudo amap definition for BE3 native mode eth_rx_compl in which
+ * each bit of the actual structure is defined as a byte: used to calculate
+ * offset/shift/mask of each field */
+struct amap_eth_rx_compl_v1 {
+ u8 vlan_tag[16]; /* dword 0 */
+ u8 pktsize[14]; /* dword 0 */
+ u8 vtp; /* dword 0 */
+ u8 ip_opt; /* dword 0 */
+ u8 err; /* dword 1 */
+ u8 rsshp; /* dword 1 */
+ u8 ipf; /* dword 1 */
+ u8 tcpf; /* dword 1 */
+ u8 udpf; /* dword 1 */
+ u8 ipcksm; /* dword 1 */
+ u8 l4_cksm; /* dword 1 */
+ u8 ip_version; /* dword 1 */
+ u8 macdst[7]; /* dword 1 */
+ u8 rsvd0; /* dword 1 */
+ u8 fragndx[10]; /* dword 1 */
+ u8 ct[2]; /* dword 1 */
+ u8 sw; /* dword 1 */
+ u8 numfrags[3]; /* dword 1 */
+ u8 rss_flush; /* dword 2 */
+ u8 cast_enc[2]; /* dword 2 */
+ u8 vtm; /* dword 2 */
+ u8 rss_bank; /* dword 2 */
+ u8 port[2]; /* dword 2 */
+ u8 vntagp; /* dword 2 */
+ u8 header_len[8]; /* dword 2 */
+ u8 header_split[2]; /* dword 2 */
+ u8 rsvd1[13]; /* dword 2 */
+ u8 valid; /* dword 2 */
+ u8 rsshash[32]; /* dword 3 */
+} __packed;
+
+struct be_eth_rx_compl {
+ u32 dw[4];
+};
+
+struct mgmt_hba_attribs {
+ u8 flashrom_version_string[32];
+ u8 manufacturer_name[32];
+ u32 supported_modes;
+ u32 rsvd0[3];
+ u8 ncsi_ver_string[12];
+ u32 default_extended_timeout;
+ u8 controller_model_number[32];
+ u8 controller_description[64];
+ u8 controller_serial_number[32];
+ u8 ip_version_string[32];
+ u8 firmware_version_string[32];
+ u8 bios_version_string[32];
+ u8 redboot_version_string[32];
+ u8 driver_version_string[32];
+ u8 fw_on_flash_version_string[32];
+ u32 functionalities_supported;
+ u16 max_cdblength;
+ u8 asic_revision;
+ u8 generational_guid[16];
+ u8 hba_port_count;
+ u16 default_link_down_timeout;
+ u8 iscsi_ver_min_max;
+ u8 multifunction_device;
+ u8 cache_valid;
+ u8 hba_status;
+ u8 max_domains_supported;
+ u8 phy_port;
+ u32 firmware_post_status;
+ u32 hba_mtu[8];
+ u32 rsvd1[4];
+};
+
+struct mgmt_controller_attrib {
+ struct mgmt_hba_attribs hba_attribs;
+ u16 pci_vendor_id;
+ u16 pci_device_id;
+ u16 pci_sub_vendor_id;
+ u16 pci_sub_system_id;
+ u8 pci_bus_number;
+ u8 pci_device_number;
+ u8 pci_function_number;
+ u8 interface_type;
+ u64 unique_identifier;
+ u32 rsvd0[5];
+};
+
+struct controller_id {
+ u32 vendor;
+ u32 device;
+ u32 subvendor;
+ u32 subdevice;
+};
+
+struct flash_comp {
+ unsigned long offset;
+ int optype;
+ int size;
+};
+
+struct image_hdr {
+ u32 imageid;
+ u32 imageoffset;
+ u32 imagelength;
+ u32 image_checksum;
+ u8 image_version[32];
+};
+struct flash_file_hdr_g2 {
+ u8 sign[32];
+ u32 cksum;
+ u32 antidote;
+ struct controller_id cont_id;
+ u32 file_len;
+ u32 chunk_num;
+ u32 total_chunks;
+ u32 num_imgs;
+ u8 build[24];
+};
+
+struct flash_file_hdr_g3 {
+ u8 sign[52];
+ u8 ufi_version[4];
+ u32 file_len;
+ u32 cksum;
+ u32 antidote;
+ u32 num_imgs;
+ u8 build[24];
+ u8 rsvd[32];
+};
+
+struct flash_section_hdr {
+ u32 format_rev;
+ u32 cksum;
+ u32 antidote;
+ u32 build_no;
+ u8 id_string[64];
+ u32 active_entry_mask;
+ u32 valid_entry_mask;
+ u32 org_content_mask;
+ u32 rsvd0;
+ u32 rsvd1;
+ u32 rsvd2;
+ u32 rsvd3;
+ u32 rsvd4;
+};
+
+struct flash_section_entry {
+ u32 type;
+ u32 offset;
+ u32 pad_size;
+ u32 image_size;
+ u32 cksum;
+ u32 entry_point;
+ u32 rsvd0;
+ u32 rsvd1;
+ u8 ver_data[32];
+};
+
+struct flash_section_info {
+ u8 cookie[32];
+ struct flash_section_hdr fsec_hdr;
+ struct flash_section_entry fsec_entry[32];
+};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
new file mode 100644
index 000000000000..816ce56de7ac
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -0,0 +1,3636 @@
+/*
+ * Copyright (C) 2005 - 2011 Emulex
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include <linux/prefetch.h>
+#include "be.h"
+#include "be_cmds.h"
+#include <asm/div64.h>
+
+MODULE_VERSION(DRV_VER);
+MODULE_DEVICE_TABLE(pci, be_dev_ids);
+MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
+MODULE_AUTHOR("ServerEngines Corporation");
+MODULE_LICENSE("GPL");
+
+static ushort rx_frag_size = 2048;
+static unsigned int num_vfs;
+module_param(rx_frag_size, ushort, S_IRUGO);
+module_param(num_vfs, uint, S_IRUGO);
+MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
+MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
+
+static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
+ { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
+ { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
+ { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
+ { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
+ { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
+ { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, be_dev_ids);
+/* UE Status Low CSR */
+static const char * const ue_status_low_desc[] = {
+ "CEV",
+ "CTX",
+ "DBUF",
+ "ERX",
+ "Host",
+ "MPU",
+ "NDMA",
+ "PTC ",
+ "RDMA ",
+ "RXF ",
+ "RXIPS ",
+ "RXULP0 ",
+ "RXULP1 ",
+ "RXULP2 ",
+ "TIM ",
+ "TPOST ",
+ "TPRE ",
+ "TXIPS ",
+ "TXULP0 ",
+ "TXULP1 ",
+ "UC ",
+ "WDMA ",
+ "TXULP2 ",
+ "HOST1 ",
+ "P0_OB_LINK ",
+ "P1_OB_LINK ",
+ "HOST_GPIO ",
+ "MBOX ",
+ "AXGMAC0",
+ "AXGMAC1",
+ "JTAG",
+ "MPU_INTPEND"
+};
+/* UE Status High CSR */
+static const char * const ue_status_hi_desc[] = {
+ "LPCMEMHOST",
+ "MGMT_MAC",
+ "PCS0ONLINE",
+ "MPU_IRAM",
+ "PCS1ONLINE",
+ "PCTL0",
+ "PCTL1",
+ "PMEM",
+ "RR",
+ "TXPB",
+ "RXPP",
+ "XAUI",
+ "TXP",
+ "ARM",
+ "IPC",
+ "HOST2",
+ "HOST3",
+ "HOST4",
+ "HOST5",
+ "HOST6",
+ "HOST7",
+ "HOST8",
+ "HOST9",
+ "NETC",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown"
+};
+
+static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
+{
+ struct be_dma_mem *mem = &q->dma_mem;
+ if (mem->va)
+ dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+ mem->dma);
+}
+
+static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
+ u16 len, u16 entry_size)
+{
+ struct be_dma_mem *mem = &q->dma_mem;
+
+ memset(q, 0, sizeof(*q));
+ q->len = len;
+ q->entry_size = entry_size;
+ mem->size = len * entry_size;
+ mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
+ GFP_KERNEL);
+ if (!mem->va)
+ return -1;
+ memset(mem->va, 0, mem->size);
+ return 0;
+}
+
+static void be_intr_set(struct be_adapter *adapter, bool enable)
+{
+ u32 reg, enabled;
+
+ if (adapter->eeh_err)
+ return;
+
+ pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
+ &reg);
+ enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+
+ if (!enabled && enable)
+ reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+ else if (enabled && !enable)
+ reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+ else
+ return;
+
+ pci_write_config_dword(adapter->pdev,
+ PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
+}
+
+static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
+{
+ u32 val = 0;
+ val |= qid & DB_RQ_RING_ID_MASK;
+ val |= posted << DB_RQ_NUM_POSTED_SHIFT;
+
+ wmb();
+ iowrite32(val, adapter->db + DB_RQ_OFFSET);
+}
+
+static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
+{
+ u32 val = 0;
+ val |= qid & DB_TXULP_RING_ID_MASK;
+ val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
+
+ wmb();
+ iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
+}
+
+static void be_eq_notify(struct be_adapter *adapter, u16 qid,
+ bool arm, bool clear_int, u16 num_popped)
+{
+ u32 val = 0;
+ val |= qid & DB_EQ_RING_ID_MASK;
+ val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
+ DB_EQ_RING_ID_EXT_MASK_SHIFT);
+
+ if (adapter->eeh_err)
+ return;
+
+ if (arm)
+ val |= 1 << DB_EQ_REARM_SHIFT;
+ if (clear_int)
+ val |= 1 << DB_EQ_CLR_SHIFT;
+ val |= 1 << DB_EQ_EVNT_SHIFT;
+ val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
+ iowrite32(val, adapter->db + DB_EQ_OFFSET);
+}
+
+void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
+{
+ u32 val = 0;
+ val |= qid & DB_CQ_RING_ID_MASK;
+ val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
+ DB_CQ_RING_ID_EXT_MASK_SHIFT);
+
+ if (adapter->eeh_err)
+ return;
+
+ if (arm)
+ val |= 1 << DB_CQ_REARM_SHIFT;
+ val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
+ iowrite32(val, adapter->db + DB_CQ_OFFSET);
+}
+
+static int be_mac_addr_set(struct net_device *netdev, void *p)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+ int status = 0;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ /* MAC addr configuration will be done in hardware for VFs
+ * by their corresponding PFs. Just copy to netdev addr here
+ */
+ if (!be_physfn(adapter))
+ goto netdev_addr;
+
+ status = be_cmd_pmac_del(adapter, adapter->if_handle,
+ adapter->pmac_id, 0);
+ if (status)
+ return status;
+
+ status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
+ adapter->if_handle, &adapter->pmac_id, 0);
+netdev_addr:
+ if (!status)
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+ return status;
+}
+
+static void populate_be2_stats(struct be_adapter *adapter)
+{
+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
+ struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
+ struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
+ struct be_port_rxf_stats_v0 *port_stats =
+ &rxf_stats->port[adapter->port_num];
+ struct be_drv_stats *drvs = &adapter->drv_stats;
+
+ be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
+ drvs->rx_pause_frames = port_stats->rx_pause_frames;
+ drvs->rx_crc_errors = port_stats->rx_crc_errors;
+ drvs->rx_control_frames = port_stats->rx_control_frames;
+ drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
+ drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
+ drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
+ drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
+ drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
+ drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
+ drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
+ drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
+ drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
+ drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
+ drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
+ drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
+ drvs->rx_dropped_header_too_small =
+ port_stats->rx_dropped_header_too_small;
+ drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
+ drvs->rx_alignment_symbol_errors =
+ port_stats->rx_alignment_symbol_errors;
+
+ drvs->tx_pauseframes = port_stats->tx_pauseframes;
+ drvs->tx_controlframes = port_stats->tx_controlframes;
+
+ if (adapter->port_num)
+ drvs->jabber_events = rxf_stats->port1_jabber_events;
+ else
+ drvs->jabber_events = rxf_stats->port0_jabber_events;
+ drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
+ drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
+ drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
+ drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
+ drvs->forwarded_packets = rxf_stats->forwarded_packets;
+ drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
+ drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
+ drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
+ adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
+}
+
+static void populate_be3_stats(struct be_adapter *adapter)
+{
+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
+ struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
+ struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
+ struct be_port_rxf_stats_v1 *port_stats =
+ &rxf_stats->port[adapter->port_num];
+ struct be_drv_stats *drvs = &adapter->drv_stats;
+
+ be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
+ drvs->rx_pause_frames = port_stats->rx_pause_frames;
+ drvs->rx_crc_errors = port_stats->rx_crc_errors;
+ drvs->rx_control_frames = port_stats->rx_control_frames;
+ drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
+ drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
+ drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
+ drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
+ drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
+ drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
+ drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
+ drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
+ drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
+ drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
+ drvs->rx_dropped_header_too_small =
+ port_stats->rx_dropped_header_too_small;
+ drvs->rx_input_fifo_overflow_drop =
+ port_stats->rx_input_fifo_overflow_drop;
+ drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
+ drvs->rx_alignment_symbol_errors =
+ port_stats->rx_alignment_symbol_errors;
+ drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
+ drvs->tx_pauseframes = port_stats->tx_pauseframes;
+ drvs->tx_controlframes = port_stats->tx_controlframes;
+ drvs->jabber_events = port_stats->jabber_events;
+ drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
+ drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
+ drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
+ drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
+ drvs->forwarded_packets = rxf_stats->forwarded_packets;
+ drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
+ drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
+ drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
+ adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
+}
+
+static void populate_lancer_stats(struct be_adapter *adapter)
+{
+
+ struct be_drv_stats *drvs = &adapter->drv_stats;
+ struct lancer_pport_stats *pport_stats =
+ pport_stats_from_cmd(adapter);
+
+ be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
+ drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
+ drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
+ drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
+ drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
+ drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
+ drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
+ drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
+ drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
+ drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
+ drvs->rx_dropped_tcp_length =
+ pport_stats->rx_dropped_invalid_tcp_length;
+ drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
+ drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
+ drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
+ drvs->rx_dropped_header_too_small =
+ pport_stats->rx_dropped_header_too_small;
+ drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
+ drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
+ drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
+ drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
+ drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
+ drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
+ drvs->jabber_events = pport_stats->rx_jabbers;
+ drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
+ drvs->forwarded_packets = pport_stats->num_forwards_lo;
+ drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
+ drvs->rx_drops_too_many_frags =
+ pport_stats->rx_drops_too_many_frags_lo;
+}
+
+static void accumulate_16bit_val(u32 *acc, u16 val)
+{
+#define lo(x) (x & 0xFFFF)
+#define hi(x) (x & 0xFFFF0000)
+ bool wrapped = val < lo(*acc);
+ u32 newacc = hi(*acc) + val;
+
+ if (wrapped)
+ newacc += 65536;
+ ACCESS_ONCE(*acc) = newacc;
+}
+
+void be_parse_stats(struct be_adapter *adapter)
+{
+ struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
+ struct be_rx_obj *rxo;
+ int i;
+
+ if (adapter->generation == BE_GEN3) {
+ if (lancer_chip(adapter))
+ populate_lancer_stats(adapter);
+ else
+ populate_be3_stats(adapter);
+ } else {
+ populate_be2_stats(adapter);
+ }
+
+ /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
+ for_all_rx_queues(adapter, rxo, i) {
+ /* below erx HW counter can actually wrap around after
+ * 65535. Driver accumulates a 32-bit value
+ */
+ accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
+ (u16)erx->rx_drops_no_fragments[rxo->q.id]);
+ }
+}
+
+static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_drv_stats *drvs = &adapter->drv_stats;
+ struct be_rx_obj *rxo;
+ struct be_tx_obj *txo;
+ u64 pkts, bytes;
+ unsigned int start;
+ int i;
+
+ for_all_rx_queues(adapter, rxo, i) {
+ const struct be_rx_stats *rx_stats = rx_stats(rxo);
+ do {
+ start = u64_stats_fetch_begin_bh(&rx_stats->sync);
+ pkts = rx_stats(rxo)->rx_pkts;
+ bytes = rx_stats(rxo)->rx_bytes;
+ } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
+ stats->rx_packets += pkts;
+ stats->rx_bytes += bytes;
+ stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
+ stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
+ rx_stats(rxo)->rx_drops_no_frags;
+ }
+
+ for_all_tx_queues(adapter, txo, i) {
+ const struct be_tx_stats *tx_stats = tx_stats(txo);
+ do {
+ start = u64_stats_fetch_begin_bh(&tx_stats->sync);
+ pkts = tx_stats(txo)->tx_pkts;
+ bytes = tx_stats(txo)->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
+ stats->tx_packets += pkts;
+ stats->tx_bytes += bytes;
+ }
+
+ /* bad pkts received */
+ stats->rx_errors = drvs->rx_crc_errors +
+ drvs->rx_alignment_symbol_errors +
+ drvs->rx_in_range_errors +
+ drvs->rx_out_range_errors +
+ drvs->rx_frame_too_long +
+ drvs->rx_dropped_too_small +
+ drvs->rx_dropped_too_short +
+ drvs->rx_dropped_header_too_small +
+ drvs->rx_dropped_tcp_length +
+ drvs->rx_dropped_runt;
+
+ /* detailed rx errors */
+ stats->rx_length_errors = drvs->rx_in_range_errors +
+ drvs->rx_out_range_errors +
+ drvs->rx_frame_too_long;
+
+ stats->rx_crc_errors = drvs->rx_crc_errors;
+
+ /* frame alignment errors */
+ stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
+
+ /* receiver fifo overrun */
+ /* drops_no_pbuf is no per i/f, it's per BE card */
+ stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
+ drvs->rx_input_fifo_overflow_drop +
+ drvs->rx_drops_no_pbuf;
+ return stats;
+}
+
+void be_link_status_update(struct be_adapter *adapter, u32 link_status)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ /* when link status changes, link speed must be re-queried from card */
+ adapter->link_speed = -1;
+ if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
+ netif_carrier_on(netdev);
+ dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
+ } else {
+ netif_carrier_off(netdev);
+ dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
+ }
+}
+
+static void be_tx_stats_update(struct be_tx_obj *txo,
+ u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
+{
+ struct be_tx_stats *stats = tx_stats(txo);
+
+ u64_stats_update_begin(&stats->sync);
+ stats->tx_reqs++;
+ stats->tx_wrbs += wrb_cnt;
+ stats->tx_bytes += copied;
+ stats->tx_pkts += (gso_segs ? gso_segs : 1);
+ if (stopped)
+ stats->tx_stops++;
+ u64_stats_update_end(&stats->sync);
+}
+
+/* Determine number of WRB entries needed to xmit data in an skb */
+static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
+ bool *dummy)
+{
+ int cnt = (skb->len > skb->data_len);
+
+ cnt += skb_shinfo(skb)->nr_frags;
+
+ /* to account for hdr wrb */
+ cnt++;
+ if (lancer_chip(adapter) || !(cnt & 1)) {
+ *dummy = false;
+ } else {
+ /* add a dummy to make it an even num */
+ cnt++;
+ *dummy = true;
+ }
+ BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
+ return cnt;
+}
+
+static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
+{
+ wrb->frag_pa_hi = upper_32_bits(addr);
+ wrb->frag_pa_lo = addr & 0xFFFFFFFF;
+ wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
+}
+
+static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
+ struct sk_buff *skb, u32 wrb_cnt, u32 len)
+{
+ u8 vlan_prio = 0;
+ u16 vlan_tag = 0;
+
+ memset(hdr, 0, sizeof(*hdr));
+
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
+
+ if (skb_is_gso(skb)) {
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
+ hdr, skb_shinfo(skb)->gso_size);
+ if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
+ if (lancer_chip(adapter) && adapter->sli_family ==
+ LANCER_A0_SLI_FAMILY) {
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
+ if (is_tcp_pkt(skb))
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb,
+ tcpcs, hdr, 1);
+ else if (is_udp_pkt(skb))
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb,
+ udpcs, hdr, 1);
+ }
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (is_tcp_pkt(skb))
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
+ else if (is_udp_pkt(skb))
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
+ }
+
+ if (vlan_tx_tag_present(skb)) {
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
+ vlan_tag = vlan_tx_tag_get(skb);
+ vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ /* If vlan priority provided by OS is NOT in available bmap */
+ if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
+ vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
+ adapter->recommended_prio;
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
+ }
+
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
+}
+
+static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
+ bool unmap_single)
+{
+ dma_addr_t dma;
+
+ be_dws_le_to_cpu(wrb, sizeof(*wrb));
+
+ dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
+ if (wrb->frag_len) {
+ if (unmap_single)
+ dma_unmap_single(dev, dma, wrb->frag_len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
+ }
+}
+
+static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
+ struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
+{
+ dma_addr_t busaddr;
+ int i, copied = 0;
+ struct device *dev = &adapter->pdev->dev;
+ struct sk_buff *first_skb = skb;
+ struct be_eth_wrb *wrb;
+ struct be_eth_hdr_wrb *hdr;
+ bool map_single = false;
+ u16 map_head;
+
+ hdr = queue_head_node(txq);
+ queue_head_inc(txq);
+ map_head = txq->head;
+
+ if (skb->len > skb->data_len) {
+ int len = skb_headlen(skb);
+ busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, busaddr))
+ goto dma_err;
+ map_single = true;
+ wrb = queue_head_node(txq);
+ wrb_fill(wrb, busaddr, len);
+ be_dws_cpu_to_le(wrb, sizeof(*wrb));
+ queue_head_inc(txq);
+ copied += len;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ struct skb_frag_struct *frag =
+ &skb_shinfo(skb)->frags[i];
+ busaddr = skb_frag_dma_map(dev, frag, 0,
+ frag->size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, busaddr))
+ goto dma_err;
+ wrb = queue_head_node(txq);
+ wrb_fill(wrb, busaddr, frag->size);
+ be_dws_cpu_to_le(wrb, sizeof(*wrb));
+ queue_head_inc(txq);
+ copied += frag->size;
+ }
+
+ if (dummy_wrb) {
+ wrb = queue_head_node(txq);
+ wrb_fill(wrb, 0, 0);
+ be_dws_cpu_to_le(wrb, sizeof(*wrb));
+ queue_head_inc(txq);
+ }
+
+ wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
+ be_dws_cpu_to_le(hdr, sizeof(*hdr));
+
+ return copied;
+dma_err:
+ txq->head = map_head;
+ while (copied) {
+ wrb = queue_head_node(txq);
+ unmap_tx_frag(dev, wrb, map_single);
+ map_single = false;
+ copied -= wrb->frag_len;
+ queue_head_inc(txq);
+ }
+ return 0;
+}
+
+static netdev_tx_t be_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
+ struct be_queue_info *txq = &txo->q;
+ u32 wrb_cnt = 0, copied = 0;
+ u32 start = txq->head;
+ bool dummy_wrb, stopped = false;
+
+ wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
+
+ copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
+ if (copied) {
+ /* record the sent skb in the sent_skb table */
+ BUG_ON(txo->sent_skb_list[start]);
+ txo->sent_skb_list[start] = skb;
+
+ /* Ensure txq has space for the next skb; Else stop the queue
+ * *BEFORE* ringing the tx doorbell, so that we serialze the
+ * tx compls of the current transmit which'll wake up the queue
+ */
+ atomic_add(wrb_cnt, &txq->used);
+ if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
+ txq->len) {
+ netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
+ stopped = true;
+ }
+
+ be_txq_notify(adapter, txq->id, wrb_cnt);
+
+ be_tx_stats_update(txo, wrb_cnt, copied,
+ skb_shinfo(skb)->gso_segs, stopped);
+ } else {
+ txq->head = start;
+ dev_kfree_skb_any(skb);
+ }
+ return NETDEV_TX_OK;
+}
+
+static int be_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ if (new_mtu < BE_MIN_MTU ||
+ new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
+ (ETH_HLEN + ETH_FCS_LEN))) {
+ dev_info(&adapter->pdev->dev,
+ "MTU must be between %d and %d bytes\n",
+ BE_MIN_MTU,
+ (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
+ return -EINVAL;
+ }
+ dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+ return 0;
+}
+
+/*
+ * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
+ * If the user configures more, place BE in vlan promiscuous mode.
+ */
+static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
+{
+ u16 vtag[BE_NUM_VLANS_SUPPORTED];
+ u16 ntags = 0, i;
+ int status = 0;
+ u32 if_handle;
+
+ if (vf) {
+ if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
+ vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
+ status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
+ }
+
+ /* No need to further configure vids if in promiscuous mode */
+ if (adapter->promiscuous)
+ return 0;
+
+ if (adapter->vlans_added <= adapter->max_vlans) {
+ /* Construct VLAN Table to give to HW */
+ for (i = 0; i < VLAN_N_VID; i++) {
+ if (adapter->vlan_tag[i]) {
+ vtag[ntags] = cpu_to_le16(i);
+ ntags++;
+ }
+ }
+ status = be_cmd_vlan_config(adapter, adapter->if_handle,
+ vtag, ntags, 1, 0);
+ } else {
+ status = be_cmd_vlan_config(adapter, adapter->if_handle,
+ NULL, 0, 1, 1);
+ }
+
+ return status;
+}
+
+static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ adapter->vlans_added++;
+ if (!be_physfn(adapter))
+ return;
+
+ adapter->vlan_tag[vid] = 1;
+ if (adapter->vlans_added <= (adapter->max_vlans + 1))
+ be_vid_config(adapter, false, 0);
+}
+
+static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ adapter->vlans_added--;
+
+ if (!be_physfn(adapter))
+ return;
+
+ adapter->vlan_tag[vid] = 0;
+ if (adapter->vlans_added <= adapter->max_vlans)
+ be_vid_config(adapter, false, 0);
+}
+
+static void be_set_multicast_list(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (netdev->flags & IFF_PROMISC) {
+ be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
+ adapter->promiscuous = true;
+ goto done;
+ }
+
+ /* BE was previously in promiscuous mode; disable it */
+ if (adapter->promiscuous) {
+ adapter->promiscuous = false;
+ be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
+
+ if (adapter->vlans_added)
+ be_vid_config(adapter, false, 0);
+ }
+
+ /* Enable multicast promisc if num configured exceeds what we support */
+ if (netdev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(netdev) > BE_MAX_MC) {
+ be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
+ goto done;
+ }
+
+ be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
+done:
+ return;
+}
+
+static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status;
+
+ if (!adapter->sriov_enabled)
+ return -EPERM;
+
+ if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
+ return -EINVAL;
+
+ if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
+ status = be_cmd_pmac_del(adapter,
+ adapter->vf_cfg[vf].vf_if_handle,
+ adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+
+ status = be_cmd_pmac_add(adapter, mac,
+ adapter->vf_cfg[vf].vf_if_handle,
+ &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+
+ if (status)
+ dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
+ mac, vf);
+ else
+ memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+
+ return status;
+}
+
+static int be_get_vf_config(struct net_device *netdev, int vf,
+ struct ifla_vf_info *vi)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (!adapter->sriov_enabled)
+ return -EPERM;
+
+ if (vf >= num_vfs)
+ return -EINVAL;
+
+ vi->vf = vf;
+ vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
+ vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
+ vi->qos = 0;
+ memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
+
+ return 0;
+}
+
+static int be_set_vf_vlan(struct net_device *netdev,
+ int vf, u16 vlan, u8 qos)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status = 0;
+
+ if (!adapter->sriov_enabled)
+ return -EPERM;
+
+ if ((vf >= num_vfs) || (vlan > 4095))
+ return -EINVAL;
+
+ if (vlan) {
+ adapter->vf_cfg[vf].vf_vlan_tag = vlan;
+ adapter->vlans_added++;
+ } else {
+ adapter->vf_cfg[vf].vf_vlan_tag = 0;
+ adapter->vlans_added--;
+ }
+
+ status = be_vid_config(adapter, true, vf);
+
+ if (status)
+ dev_info(&adapter->pdev->dev,
+ "VLAN %d config on VF %d failed\n", vlan, vf);
+ return status;
+}
+
+static int be_set_vf_tx_rate(struct net_device *netdev,
+ int vf, int rate)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status = 0;
+
+ if (!adapter->sriov_enabled)
+ return -EPERM;
+
+ if ((vf >= num_vfs) || (rate < 0))
+ return -EINVAL;
+
+ if (rate > 10000)
+ rate = 10000;
+
+ adapter->vf_cfg[vf].vf_tx_rate = rate;
+ status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
+
+ if (status)
+ dev_info(&adapter->pdev->dev,
+ "tx rate %d on VF %d failed\n", rate, vf);
+ return status;
+}
+
+static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
+{
+ struct be_eq_obj *rx_eq = &rxo->rx_eq;
+ struct be_rx_stats *stats = rx_stats(rxo);
+ ulong now = jiffies;
+ ulong delta = now - stats->rx_jiffies;
+ u64 pkts;
+ unsigned int start, eqd;
+
+ if (!rx_eq->enable_aic)
+ return;
+
+ /* Wrapped around */
+ if (time_before(now, stats->rx_jiffies)) {
+ stats->rx_jiffies = now;
+ return;
+ }
+
+ /* Update once a second */
+ if (delta < HZ)
+ return;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&stats->sync);
+ pkts = stats->rx_pkts;
+ } while (u64_stats_fetch_retry_bh(&stats->sync, start));
+
+ stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
+ stats->rx_pkts_prev = pkts;
+ stats->rx_jiffies = now;
+ eqd = stats->rx_pps / 110000;
+ eqd = eqd << 3;
+ if (eqd > rx_eq->max_eqd)
+ eqd = rx_eq->max_eqd;
+ if (eqd < rx_eq->min_eqd)
+ eqd = rx_eq->min_eqd;
+ if (eqd < 10)
+ eqd = 0;
+ if (eqd != rx_eq->cur_eqd) {
+ be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
+ rx_eq->cur_eqd = eqd;
+ }
+}
+
+static void be_rx_stats_update(struct be_rx_obj *rxo,
+ struct be_rx_compl_info *rxcp)
+{
+ struct be_rx_stats *stats = rx_stats(rxo);
+
+ u64_stats_update_begin(&stats->sync);
+ stats->rx_compl++;
+ stats->rx_bytes += rxcp->pkt_size;
+ stats->rx_pkts++;
+ if (rxcp->pkt_type == BE_MULTICAST_PACKET)
+ stats->rx_mcast_pkts++;
+ if (rxcp->err)
+ stats->rx_compl_err++;
+ u64_stats_update_end(&stats->sync);
+}
+
+static inline bool csum_passed(struct be_rx_compl_info *rxcp)
+{
+ /* L4 checksum is not reliable for non TCP/UDP packets.
+ * Also ignore ipcksm for ipv6 pkts */
+ return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
+ (rxcp->ip_csum || rxcp->ipv6);
+}
+
+static struct be_rx_page_info *
+get_rx_page_info(struct be_adapter *adapter,
+ struct be_rx_obj *rxo,
+ u16 frag_idx)
+{
+ struct be_rx_page_info *rx_page_info;
+ struct be_queue_info *rxq = &rxo->q;
+
+ rx_page_info = &rxo->page_info_tbl[frag_idx];
+ BUG_ON(!rx_page_info->page);
+
+ if (rx_page_info->last_page_user) {
+ dma_unmap_page(&adapter->pdev->dev,
+ dma_unmap_addr(rx_page_info, bus),
+ adapter->big_page_size, DMA_FROM_DEVICE);
+ rx_page_info->last_page_user = false;
+ }
+
+ atomic_dec(&rxq->used);
+ return rx_page_info;
+}
+
+/* Throwaway the data in the Rx completion */
+static void be_rx_compl_discard(struct be_adapter *adapter,
+ struct be_rx_obj *rxo,
+ struct be_rx_compl_info *rxcp)
+{
+ struct be_queue_info *rxq = &rxo->q;
+ struct be_rx_page_info *page_info;
+ u16 i, num_rcvd = rxcp->num_rcvd;
+
+ for (i = 0; i < num_rcvd; i++) {
+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
+ put_page(page_info->page);
+ memset(page_info, 0, sizeof(*page_info));
+ index_inc(&rxcp->rxq_idx, rxq->len);
+ }
+}
+
+/*
+ * skb_fill_rx_data forms a complete skb for an ether frame
+ * indicated by rxcp.
+ */
+static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
+ struct sk_buff *skb, struct be_rx_compl_info *rxcp)
+{
+ struct be_queue_info *rxq = &rxo->q;
+ struct be_rx_page_info *page_info;
+ u16 i, j;
+ u16 hdr_len, curr_frag_len, remaining;
+ u8 *start;
+
+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
+ start = page_address(page_info->page) + page_info->page_offset;
+ prefetch(start);
+
+ /* Copy data in the first descriptor of this completion */
+ curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
+
+ /* Copy the header portion into skb_data */
+ hdr_len = min(BE_HDR_LEN, curr_frag_len);
+ memcpy(skb->data, start, hdr_len);
+ skb->len = curr_frag_len;
+ if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
+ /* Complete packet has now been moved to data */
+ put_page(page_info->page);
+ skb->data_len = 0;
+ skb->tail += curr_frag_len;
+ } else {
+ skb_shinfo(skb)->nr_frags = 1;
+ skb_frag_set_page(skb, 0, page_info->page);
+ skb_shinfo(skb)->frags[0].page_offset =
+ page_info->page_offset + hdr_len;
+ skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
+ skb->data_len = curr_frag_len - hdr_len;
+ skb->tail += hdr_len;
+ }
+ page_info->page = NULL;
+
+ if (rxcp->pkt_size <= rx_frag_size) {
+ BUG_ON(rxcp->num_rcvd != 1);
+ return;
+ }
+
+ /* More frags present for this completion */
+ index_inc(&rxcp->rxq_idx, rxq->len);
+ remaining = rxcp->pkt_size - curr_frag_len;
+ for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
+ curr_frag_len = min(remaining, rx_frag_size);
+
+ /* Coalesce all frags from the same physical page in one slot */
+ if (page_info->page_offset == 0) {
+ /* Fresh page */
+ j++;
+ skb_frag_set_page(skb, j, page_info->page);
+ skb_shinfo(skb)->frags[j].page_offset =
+ page_info->page_offset;
+ skb_shinfo(skb)->frags[j].size = 0;
+ skb_shinfo(skb)->nr_frags++;
+ } else {
+ put_page(page_info->page);
+ }
+
+ skb_shinfo(skb)->frags[j].size += curr_frag_len;
+ skb->len += curr_frag_len;
+ skb->data_len += curr_frag_len;
+
+ remaining -= curr_frag_len;
+ index_inc(&rxcp->rxq_idx, rxq->len);
+ page_info->page = NULL;
+ }
+ BUG_ON(j > MAX_SKB_FRAGS);
+}
+
+/* Process the RX completion indicated by rxcp when GRO is disabled */
+static void be_rx_compl_process(struct be_adapter *adapter,
+ struct be_rx_obj *rxo,
+ struct be_rx_compl_info *rxcp)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
+ if (unlikely(!skb)) {
+ rx_stats(rxo)->rx_drops_no_skbs++;
+ be_rx_compl_discard(adapter, rxo, rxcp);
+ return;
+ }
+
+ skb_fill_rx_data(adapter, rxo, skb, rxcp);
+
+ if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+
+ skb->truesize = skb->len + sizeof(struct sk_buff);
+ skb->protocol = eth_type_trans(skb, netdev);
+ if (adapter->netdev->features & NETIF_F_RXHASH)
+ skb->rxhash = rxcp->rss_hash;
+
+
+ if (rxcp->vlanf)
+ __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
+
+ netif_receive_skb(skb);
+}
+
+/* Process the RX completion indicated by rxcp when GRO is enabled */
+static void be_rx_compl_process_gro(struct be_adapter *adapter,
+ struct be_rx_obj *rxo,
+ struct be_rx_compl_info *rxcp)
+{
+ struct be_rx_page_info *page_info;
+ struct sk_buff *skb = NULL;
+ struct be_queue_info *rxq = &rxo->q;
+ struct be_eq_obj *eq_obj = &rxo->rx_eq;
+ u16 remaining, curr_frag_len;
+ u16 i, j;
+
+ skb = napi_get_frags(&eq_obj->napi);
+ if (!skb) {
+ be_rx_compl_discard(adapter, rxo, rxcp);
+ return;
+ }
+
+ remaining = rxcp->pkt_size;
+ for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
+
+ curr_frag_len = min(remaining, rx_frag_size);
+
+ /* Coalesce all frags from the same physical page in one slot */
+ if (i == 0 || page_info->page_offset == 0) {
+ /* First frag or Fresh page */
+ j++;
+ skb_frag_set_page(skb, j, page_info->page);
+ skb_shinfo(skb)->frags[j].page_offset =
+ page_info->page_offset;
+ skb_shinfo(skb)->frags[j].size = 0;
+ } else {
+ put_page(page_info->page);
+ }
+ skb_shinfo(skb)->frags[j].size += curr_frag_len;
+
+ remaining -= curr_frag_len;
+ index_inc(&rxcp->rxq_idx, rxq->len);
+ memset(page_info, 0, sizeof(*page_info));
+ }
+ BUG_ON(j > MAX_SKB_FRAGS);
+
+ skb_shinfo(skb)->nr_frags = j + 1;
+ skb->len = rxcp->pkt_size;
+ skb->data_len = rxcp->pkt_size;
+ skb->truesize += rxcp->pkt_size;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (adapter->netdev->features & NETIF_F_RXHASH)
+ skb->rxhash = rxcp->rss_hash;
+
+ if (rxcp->vlanf)
+ __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
+
+ napi_gro_frags(&eq_obj->napi);
+}
+
+static void be_parse_rx_compl_v1(struct be_adapter *adapter,
+ struct be_eth_rx_compl *compl,
+ struct be_rx_compl_info *rxcp)
+{
+ rxcp->pkt_size =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
+ rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
+ rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
+ rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
+ rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
+ rxcp->ip_csum =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
+ rxcp->l4_csum =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
+ rxcp->ipv6 =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
+ rxcp->rxq_idx =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
+ rxcp->num_rcvd =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
+ rxcp->pkt_type =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
+ rxcp->rss_hash =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
+ if (rxcp->vlanf) {
+ rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
+ compl);
+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
+ compl);
+ }
+ rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
+}
+
+static void be_parse_rx_compl_v0(struct be_adapter *adapter,
+ struct be_eth_rx_compl *compl,
+ struct be_rx_compl_info *rxcp)
+{
+ rxcp->pkt_size =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
+ rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
+ rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
+ rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
+ rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
+ rxcp->ip_csum =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
+ rxcp->l4_csum =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
+ rxcp->ipv6 =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
+ rxcp->rxq_idx =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
+ rxcp->num_rcvd =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
+ rxcp->pkt_type =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
+ rxcp->rss_hash =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
+ if (rxcp->vlanf) {
+ rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
+ compl);
+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
+ compl);
+ }
+ rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
+}
+
+static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
+{
+ struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
+ struct be_rx_compl_info *rxcp = &rxo->rxcp;
+ struct be_adapter *adapter = rxo->adapter;
+
+ /* For checking the valid bit it is Ok to use either definition as the
+ * valid bit is at the same position in both v0 and v1 Rx compl */
+ if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
+ return NULL;
+
+ rmb();
+ be_dws_le_to_cpu(compl, sizeof(*compl));
+
+ if (adapter->be3_native)
+ be_parse_rx_compl_v1(adapter, compl, rxcp);
+ else
+ be_parse_rx_compl_v0(adapter, compl, rxcp);
+
+ if (rxcp->vlanf) {
+ /* vlanf could be wrongly set in some cards.
+ * ignore if vtm is not set */
+ if ((adapter->function_mode & 0x400) && !rxcp->vtm)
+ rxcp->vlanf = 0;
+
+ if (!lancer_chip(adapter))
+ rxcp->vlan_tag = swab16(rxcp->vlan_tag);
+
+ if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
+ !adapter->vlan_tag[rxcp->vlan_tag])
+ rxcp->vlanf = 0;
+ }
+
+ /* As the compl has been parsed, reset it; we wont touch it again */
+ compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
+
+ queue_tail_inc(&rxo->cq);
+ return rxcp;
+}
+
+static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
+{
+ u32 order = get_order(size);
+
+ if (order > 0)
+ gfp |= __GFP_COMP;
+ return alloc_pages(gfp, order);
+}
+
+/*
+ * Allocate a page, split it to fragments of size rx_frag_size and post as
+ * receive buffers to BE
+ */
+static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
+{
+ struct be_adapter *adapter = rxo->adapter;
+ struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
+ struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
+ struct be_queue_info *rxq = &rxo->q;
+ struct page *pagep = NULL;
+ struct be_eth_rx_d *rxd;
+ u64 page_dmaaddr = 0, frag_dmaaddr;
+ u32 posted, page_offset = 0;
+
+ page_info = &rxo->page_info_tbl[rxq->head];
+ for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
+ if (!pagep) {
+ pagep = be_alloc_pages(adapter->big_page_size, gfp);
+ if (unlikely(!pagep)) {
+ rx_stats(rxo)->rx_post_fail++;
+ break;
+ }
+ page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
+ 0, adapter->big_page_size,
+ DMA_FROM_DEVICE);
+ page_info->page_offset = 0;
+ } else {
+ get_page(pagep);
+ page_info->page_offset = page_offset + rx_frag_size;
+ }
+ page_offset = page_info->page_offset;
+ page_info->page = pagep;
+ dma_unmap_addr_set(page_info, bus, page_dmaaddr);
+ frag_dmaaddr = page_dmaaddr + page_info->page_offset;
+
+ rxd = queue_head_node(rxq);
+ rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
+ rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
+
+ /* Any space left in the current big page for another frag? */
+ if ((page_offset + rx_frag_size + rx_frag_size) >
+ adapter->big_page_size) {
+ pagep = NULL;
+ page_info->last_page_user = true;
+ }
+
+ prev_page_info = page_info;
+ queue_head_inc(rxq);
+ page_info = &page_info_tbl[rxq->head];
+ }
+ if (pagep)
+ prev_page_info->last_page_user = true;
+
+ if (posted) {
+ atomic_add(posted, &rxq->used);
+ be_rxq_notify(adapter, rxq->id, posted);
+ } else if (atomic_read(&rxq->used) == 0) {
+ /* Let be_worker replenish when memory is available */
+ rxo->rx_post_starved = true;
+ }
+}
+
+static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
+{
+ struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
+
+ if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
+ return NULL;
+
+ rmb();
+ be_dws_le_to_cpu(txcp, sizeof(*txcp));
+
+ txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
+
+ queue_tail_inc(tx_cq);
+ return txcp;
+}
+
+static u16 be_tx_compl_process(struct be_adapter *adapter,
+ struct be_tx_obj *txo, u16 last_index)
+{
+ struct be_queue_info *txq = &txo->q;
+ struct be_eth_wrb *wrb;
+ struct sk_buff **sent_skbs = txo->sent_skb_list;
+ struct sk_buff *sent_skb;
+ u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
+ bool unmap_skb_hdr = true;
+
+ sent_skb = sent_skbs[txq->tail];
+ BUG_ON(!sent_skb);
+ sent_skbs[txq->tail] = NULL;
+
+ /* skip header wrb */
+ queue_tail_inc(txq);
+
+ do {
+ cur_index = txq->tail;
+ wrb = queue_tail_node(txq);
+ unmap_tx_frag(&adapter->pdev->dev, wrb,
+ (unmap_skb_hdr && skb_headlen(sent_skb)));
+ unmap_skb_hdr = false;
+
+ num_wrbs++;
+ queue_tail_inc(txq);
+ } while (cur_index != last_index);
+
+ kfree_skb(sent_skb);
+ return num_wrbs;
+}
+
+static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
+{
+ struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
+
+ if (!eqe->evt)
+ return NULL;
+
+ rmb();
+ eqe->evt = le32_to_cpu(eqe->evt);
+ queue_tail_inc(&eq_obj->q);
+ return eqe;
+}
+
+static int event_handle(struct be_adapter *adapter,
+ struct be_eq_obj *eq_obj,
+ bool rearm)
+{
+ struct be_eq_entry *eqe;
+ u16 num = 0;
+
+ while ((eqe = event_get(eq_obj)) != NULL) {
+ eqe->evt = 0;
+ num++;
+ }
+
+ /* Deal with any spurious interrupts that come
+ * without events
+ */
+ if (!num)
+ rearm = true;
+
+ be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
+ if (num)
+ napi_schedule(&eq_obj->napi);
+
+ return num;
+}
+
+/* Just read and notify events without processing them.
+ * Used at the time of destroying event queues */
+static void be_eq_clean(struct be_adapter *adapter,
+ struct be_eq_obj *eq_obj)
+{
+ struct be_eq_entry *eqe;
+ u16 num = 0;
+
+ while ((eqe = event_get(eq_obj)) != NULL) {
+ eqe->evt = 0;
+ num++;
+ }
+
+ if (num)
+ be_eq_notify(adapter, eq_obj->q.id, false, true, num);
+}
+
+static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
+{
+ struct be_rx_page_info *page_info;
+ struct be_queue_info *rxq = &rxo->q;
+ struct be_queue_info *rx_cq = &rxo->cq;
+ struct be_rx_compl_info *rxcp;
+ u16 tail;
+
+ /* First cleanup pending rx completions */
+ while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
+ be_rx_compl_discard(adapter, rxo, rxcp);
+ be_cq_notify(adapter, rx_cq->id, false, 1);
+ }
+
+ /* Then free posted rx buffer that were not used */
+ tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
+ for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
+ page_info = get_rx_page_info(adapter, rxo, tail);
+ put_page(page_info->page);
+ memset(page_info, 0, sizeof(*page_info));
+ }
+ BUG_ON(atomic_read(&rxq->used));
+ rxq->tail = rxq->head = 0;
+}
+
+static void be_tx_compl_clean(struct be_adapter *adapter,
+ struct be_tx_obj *txo)
+{
+ struct be_queue_info *tx_cq = &txo->cq;
+ struct be_queue_info *txq = &txo->q;
+ struct be_eth_tx_compl *txcp;
+ u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
+ struct sk_buff **sent_skbs = txo->sent_skb_list;
+ struct sk_buff *sent_skb;
+ bool dummy_wrb;
+
+ /* Wait for a max of 200ms for all the tx-completions to arrive. */
+ do {
+ while ((txcp = be_tx_compl_get(tx_cq))) {
+ end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
+ wrb_index, txcp);
+ num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
+ cmpl++;
+ }
+ if (cmpl) {
+ be_cq_notify(adapter, tx_cq->id, false, cmpl);
+ atomic_sub(num_wrbs, &txq->used);
+ cmpl = 0;
+ num_wrbs = 0;
+ }
+
+ if (atomic_read(&txq->used) == 0 || ++timeo > 200)
+ break;
+
+ mdelay(1);
+ } while (true);
+
+ if (atomic_read(&txq->used))
+ dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
+ atomic_read(&txq->used));
+
+ /* free posted tx for which compls will never arrive */
+ while (atomic_read(&txq->used)) {
+ sent_skb = sent_skbs[txq->tail];
+ end_idx = txq->tail;
+ index_adv(&end_idx,
+ wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
+ txq->len);
+ num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
+ atomic_sub(num_wrbs, &txq->used);
+ }
+}
+
+static void be_mcc_queues_destroy(struct be_adapter *adapter)
+{
+ struct be_queue_info *q;
+
+ q = &adapter->mcc_obj.q;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
+ be_queue_free(adapter, q);
+
+ q = &adapter->mcc_obj.cq;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
+ be_queue_free(adapter, q);
+}
+
+/* Must be called only after TX qs are created as MCC shares TX EQ */
+static int be_mcc_queues_create(struct be_adapter *adapter)
+{
+ struct be_queue_info *q, *cq;
+
+ /* Alloc MCC compl queue */
+ cq = &adapter->mcc_obj.cq;
+ if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
+ sizeof(struct be_mcc_compl)))
+ goto err;
+
+ /* Ask BE to create MCC compl queue; share TX's eq */
+ if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
+ goto mcc_cq_free;
+
+ /* Alloc MCC queue */
+ q = &adapter->mcc_obj.q;
+ if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
+ goto mcc_cq_destroy;
+
+ /* Ask BE to create MCC queue */
+ if (be_cmd_mccq_create(adapter, q, cq))
+ goto mcc_q_free;
+
+ return 0;
+
+mcc_q_free:
+ be_queue_free(adapter, q);
+mcc_cq_destroy:
+ be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
+mcc_cq_free:
+ be_queue_free(adapter, cq);
+err:
+ return -1;
+}
+
+static void be_tx_queues_destroy(struct be_adapter *adapter)
+{
+ struct be_queue_info *q;
+ struct be_tx_obj *txo;
+ u8 i;
+
+ for_all_tx_queues(adapter, txo, i) {
+ q = &txo->q;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
+ be_queue_free(adapter, q);
+
+ q = &txo->cq;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
+ be_queue_free(adapter, q);
+ }
+
+ /* Clear any residual events */
+ be_eq_clean(adapter, &adapter->tx_eq);
+
+ q = &adapter->tx_eq.q;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_EQ);
+ be_queue_free(adapter, q);
+}
+
+/* One TX event queue is shared by all TX compl qs */
+static int be_tx_queues_create(struct be_adapter *adapter)
+{
+ struct be_queue_info *eq, *q, *cq;
+ struct be_tx_obj *txo;
+ u8 i;
+
+ adapter->tx_eq.max_eqd = 0;
+ adapter->tx_eq.min_eqd = 0;
+ adapter->tx_eq.cur_eqd = 96;
+ adapter->tx_eq.enable_aic = false;
+
+ eq = &adapter->tx_eq.q;
+ if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
+ sizeof(struct be_eq_entry)))
+ return -1;
+
+ if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
+ goto err;
+ adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
+
+ for_all_tx_queues(adapter, txo, i) {
+ cq = &txo->cq;
+ if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
+ sizeof(struct be_eth_tx_compl)))
+ goto err;
+
+ if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
+ goto err;
+
+ q = &txo->q;
+ if (be_queue_alloc(adapter, q, TX_Q_LEN,
+ sizeof(struct be_eth_wrb)))
+ goto err;
+
+ if (be_cmd_txq_create(adapter, q, cq))
+ goto err;
+ }
+ return 0;
+
+err:
+ be_tx_queues_destroy(adapter);
+ return -1;
+}
+
+static void be_rx_queues_destroy(struct be_adapter *adapter)
+{
+ struct be_queue_info *q;
+ struct be_rx_obj *rxo;
+ int i;
+
+ for_all_rx_queues(adapter, rxo, i) {
+ be_queue_free(adapter, &rxo->q);
+
+ q = &rxo->cq;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
+ be_queue_free(adapter, q);
+
+ q = &rxo->rx_eq.q;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_EQ);
+ be_queue_free(adapter, q);
+ }
+}
+
+static u32 be_num_rxqs_want(struct be_adapter *adapter)
+{
+ if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
+ !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
+ return 1 + MAX_RSS_QS; /* one default non-RSS queue */
+ } else {
+ dev_warn(&adapter->pdev->dev,
+ "No support for multiple RX queues\n");
+ return 1;
+ }
+}
+
+static int be_rx_queues_create(struct be_adapter *adapter)
+{
+ struct be_queue_info *eq, *q, *cq;
+ struct be_rx_obj *rxo;
+ int rc, i;
+
+ adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
+ msix_enabled(adapter) ?
+ adapter->num_msix_vec - 1 : 1);
+ if (adapter->num_rx_qs != MAX_RX_QS)
+ dev_warn(&adapter->pdev->dev,
+ "Can create only %d RX queues", adapter->num_rx_qs);
+
+ adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
+ for_all_rx_queues(adapter, rxo, i) {
+ rxo->adapter = adapter;
+ rxo->rx_eq.max_eqd = BE_MAX_EQD;
+ rxo->rx_eq.enable_aic = true;
+
+ /* EQ */
+ eq = &rxo->rx_eq.q;
+ rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
+ sizeof(struct be_eq_entry));
+ if (rc)
+ goto err;
+
+ rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
+ if (rc)
+ goto err;
+
+ rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
+
+ /* CQ */
+ cq = &rxo->cq;
+ rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
+ sizeof(struct be_eth_rx_compl));
+ if (rc)
+ goto err;
+
+ rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
+ if (rc)
+ goto err;
+
+ /* Rx Q - will be created in be_open() */
+ q = &rxo->q;
+ rc = be_queue_alloc(adapter, q, RX_Q_LEN,
+ sizeof(struct be_eth_rx_d));
+ if (rc)
+ goto err;
+
+ }
+
+ return 0;
+err:
+ be_rx_queues_destroy(adapter);
+ return -1;
+}
+
+static bool event_peek(struct be_eq_obj *eq_obj)
+{
+ struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
+ if (!eqe->evt)
+ return false;
+ else
+ return true;
+}
+
+static irqreturn_t be_intx(int irq, void *dev)
+{
+ struct be_adapter *adapter = dev;
+ struct be_rx_obj *rxo;
+ int isr, i, tx = 0 , rx = 0;
+
+ if (lancer_chip(adapter)) {
+ if (event_peek(&adapter->tx_eq))
+ tx = event_handle(adapter, &adapter->tx_eq, false);
+ for_all_rx_queues(adapter, rxo, i) {
+ if (event_peek(&rxo->rx_eq))
+ rx |= event_handle(adapter, &rxo->rx_eq, true);
+ }
+
+ if (!(tx || rx))
+ return IRQ_NONE;
+
+ } else {
+ isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
+ (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
+ if (!isr)
+ return IRQ_NONE;
+
+ if ((1 << adapter->tx_eq.eq_idx & isr))
+ event_handle(adapter, &adapter->tx_eq, false);
+
+ for_all_rx_queues(adapter, rxo, i) {
+ if ((1 << rxo->rx_eq.eq_idx & isr))
+ event_handle(adapter, &rxo->rx_eq, true);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t be_msix_rx(int irq, void *dev)
+{
+ struct be_rx_obj *rxo = dev;
+ struct be_adapter *adapter = rxo->adapter;
+
+ event_handle(adapter, &rxo->rx_eq, true);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
+{
+ struct be_adapter *adapter = dev;
+
+ event_handle(adapter, &adapter->tx_eq, false);
+
+ return IRQ_HANDLED;
+}
+
+static inline bool do_gro(struct be_rx_compl_info *rxcp)
+{
+ return (rxcp->tcpf && !rxcp->err) ? true : false;
+}
+
+static int be_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
+ struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
+ struct be_adapter *adapter = rxo->adapter;
+ struct be_queue_info *rx_cq = &rxo->cq;
+ struct be_rx_compl_info *rxcp;
+ u32 work_done;
+
+ rx_stats(rxo)->rx_polls++;
+ for (work_done = 0; work_done < budget; work_done++) {
+ rxcp = be_rx_compl_get(rxo);
+ if (!rxcp)
+ break;
+
+ /* Is it a flush compl that has no data */
+ if (unlikely(rxcp->num_rcvd == 0))
+ goto loop_continue;
+
+ /* Discard compl with partial DMA Lancer B0 */
+ if (unlikely(!rxcp->pkt_size)) {
+ be_rx_compl_discard(adapter, rxo, rxcp);
+ goto loop_continue;
+ }
+
+ /* On BE drop pkts that arrive due to imperfect filtering in
+ * promiscuous mode on some skews
+ */
+ if (unlikely(rxcp->port != adapter->port_num &&
+ !lancer_chip(adapter))) {
+ be_rx_compl_discard(adapter, rxo, rxcp);
+ goto loop_continue;
+ }
+
+ if (do_gro(rxcp))
+ be_rx_compl_process_gro(adapter, rxo, rxcp);
+ else
+ be_rx_compl_process(adapter, rxo, rxcp);
+loop_continue:
+ be_rx_stats_update(rxo, rxcp);
+ }
+
+ /* Refill the queue */
+ if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
+ be_post_rx_frags(rxo, GFP_ATOMIC);
+
+ /* All consumed */
+ if (work_done < budget) {
+ napi_complete(napi);
+ be_cq_notify(adapter, rx_cq->id, true, work_done);
+ } else {
+ /* More to be consumed; continue with interrupts disabled */
+ be_cq_notify(adapter, rx_cq->id, false, work_done);
+ }
+ return work_done;
+}
+
+/* As TX and MCC share the same EQ check for both TX and MCC completions.
+ * For TX/MCC we don't honour budget; consume everything
+ */
+static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
+{
+ struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
+ struct be_adapter *adapter =
+ container_of(tx_eq, struct be_adapter, tx_eq);
+ struct be_tx_obj *txo;
+ struct be_eth_tx_compl *txcp;
+ int tx_compl, mcc_compl, status = 0;
+ u8 i;
+ u16 num_wrbs;
+
+ for_all_tx_queues(adapter, txo, i) {
+ tx_compl = 0;
+ num_wrbs = 0;
+ while ((txcp = be_tx_compl_get(&txo->cq))) {
+ num_wrbs += be_tx_compl_process(adapter, txo,
+ AMAP_GET_BITS(struct amap_eth_tx_compl,
+ wrb_index, txcp));
+ tx_compl++;
+ }
+ if (tx_compl) {
+ be_cq_notify(adapter, txo->cq.id, true, tx_compl);
+
+ atomic_sub(num_wrbs, &txo->q.used);
+
+ /* As Tx wrbs have been freed up, wake up netdev queue
+ * if it was stopped due to lack of tx wrbs. */
+ if (__netif_subqueue_stopped(adapter->netdev, i) &&
+ atomic_read(&txo->q.used) < txo->q.len / 2) {
+ netif_wake_subqueue(adapter->netdev, i);
+ }
+
+ u64_stats_update_begin(&tx_stats(txo)->sync_compl);
+ tx_stats(txo)->tx_compl += tx_compl;
+ u64_stats_update_end(&tx_stats(txo)->sync_compl);
+ }
+ }
+
+ mcc_compl = be_process_mcc(adapter, &status);
+
+ if (mcc_compl) {
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+ be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
+ }
+
+ napi_complete(napi);
+
+ be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
+ adapter->drv_stats.tx_events++;
+ return 1;
+}
+
+void be_detect_dump_ue(struct be_adapter *adapter)
+{
+ u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
+ u32 i;
+
+ pci_read_config_dword(adapter->pdev,
+ PCICFG_UE_STATUS_LOW, &ue_status_lo);
+ pci_read_config_dword(adapter->pdev,
+ PCICFG_UE_STATUS_HIGH, &ue_status_hi);
+ pci_read_config_dword(adapter->pdev,
+ PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
+ pci_read_config_dword(adapter->pdev,
+ PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
+
+ ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
+ ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
+
+ if (ue_status_lo || ue_status_hi) {
+ adapter->ue_detected = true;
+ adapter->eeh_err = true;
+ dev_err(&adapter->pdev->dev, "UE Detected!!\n");
+ }
+
+ if (ue_status_lo) {
+ for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
+ if (ue_status_lo & 1)
+ dev_err(&adapter->pdev->dev,
+ "UE: %s bit set\n", ue_status_low_desc[i]);
+ }
+ }
+ if (ue_status_hi) {
+ for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
+ if (ue_status_hi & 1)
+ dev_err(&adapter->pdev->dev,
+ "UE: %s bit set\n", ue_status_hi_desc[i]);
+ }
+ }
+
+}
+
+static void be_worker(struct work_struct *work)
+{
+ struct be_adapter *adapter =
+ container_of(work, struct be_adapter, work.work);
+ struct be_rx_obj *rxo;
+ int i;
+
+ if (!adapter->ue_detected && !lancer_chip(adapter))
+ be_detect_dump_ue(adapter);
+
+ /* when interrupts are not yet enabled, just reap any pending
+ * mcc completions */
+ if (!netif_running(adapter->netdev)) {
+ int mcc_compl, status = 0;
+
+ mcc_compl = be_process_mcc(adapter, &status);
+
+ if (mcc_compl) {
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+ be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
+ }
+
+ goto reschedule;
+ }
+
+ if (!adapter->stats_cmd_sent) {
+ if (lancer_chip(adapter))
+ lancer_cmd_get_pport_stats(adapter,
+ &adapter->stats_cmd);
+ else
+ be_cmd_get_stats(adapter, &adapter->stats_cmd);
+ }
+
+ for_all_rx_queues(adapter, rxo, i) {
+ be_rx_eqd_update(adapter, rxo);
+
+ if (rxo->rx_post_starved) {
+ rxo->rx_post_starved = false;
+ be_post_rx_frags(rxo, GFP_KERNEL);
+ }
+ }
+
+reschedule:
+ adapter->work_counter++;
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
+}
+
+static void be_msix_disable(struct be_adapter *adapter)
+{
+ if (msix_enabled(adapter)) {
+ pci_disable_msix(adapter->pdev);
+ adapter->num_msix_vec = 0;
+ }
+}
+
+static void be_msix_enable(struct be_adapter *adapter)
+{
+#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
+ int i, status, num_vec;
+
+ num_vec = be_num_rxqs_want(adapter) + 1;
+
+ for (i = 0; i < num_vec; i++)
+ adapter->msix_entries[i].entry = i;
+
+ status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
+ if (status == 0) {
+ goto done;
+ } else if (status >= BE_MIN_MSIX_VECTORS) {
+ num_vec = status;
+ if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
+ num_vec) == 0)
+ goto done;
+ }
+ return;
+done:
+ adapter->num_msix_vec = num_vec;
+ return;
+}
+
+static void be_sriov_enable(struct be_adapter *adapter)
+{
+ be_check_sriov_fn_type(adapter);
+#ifdef CONFIG_PCI_IOV
+ if (be_physfn(adapter) && num_vfs) {
+ int status, pos;
+ u16 nvfs;
+
+ pos = pci_find_ext_capability(adapter->pdev,
+ PCI_EXT_CAP_ID_SRIOV);
+ pci_read_config_word(adapter->pdev,
+ pos + PCI_SRIOV_TOTAL_VF, &nvfs);
+
+ if (num_vfs > nvfs) {
+ dev_info(&adapter->pdev->dev,
+ "Device supports %d VFs and not %d\n",
+ nvfs, num_vfs);
+ num_vfs = nvfs;
+ }
+
+ status = pci_enable_sriov(adapter->pdev, num_vfs);
+ adapter->sriov_enabled = status ? false : true;
+ }
+#endif
+}
+
+static void be_sriov_disable(struct be_adapter *adapter)
+{
+#ifdef CONFIG_PCI_IOV
+ if (adapter->sriov_enabled) {
+ pci_disable_sriov(adapter->pdev);
+ adapter->sriov_enabled = false;
+ }
+#endif
+}
+
+static inline int be_msix_vec_get(struct be_adapter *adapter,
+ struct be_eq_obj *eq_obj)
+{
+ return adapter->msix_entries[eq_obj->eq_idx].vector;
+}
+
+static int be_request_irq(struct be_adapter *adapter,
+ struct be_eq_obj *eq_obj,
+ void *handler, char *desc, void *context)
+{
+ struct net_device *netdev = adapter->netdev;
+ int vec;
+
+ sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
+ vec = be_msix_vec_get(adapter, eq_obj);
+ return request_irq(vec, handler, 0, eq_obj->desc, context);
+}
+
+static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
+ void *context)
+{
+ int vec = be_msix_vec_get(adapter, eq_obj);
+ free_irq(vec, context);
+}
+
+static int be_msix_register(struct be_adapter *adapter)
+{
+ struct be_rx_obj *rxo;
+ int status, i;
+ char qname[10];
+
+ status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
+ adapter);
+ if (status)
+ goto err;
+
+ for_all_rx_queues(adapter, rxo, i) {
+ sprintf(qname, "rxq%d", i);
+ status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
+ qname, rxo);
+ if (status)
+ goto err_msix;
+ }
+
+ return 0;
+
+err_msix:
+ be_free_irq(adapter, &adapter->tx_eq, adapter);
+
+ for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
+ be_free_irq(adapter, &rxo->rx_eq, rxo);
+
+err:
+ dev_warn(&adapter->pdev->dev,
+ "MSIX Request IRQ failed - err %d\n", status);
+ be_msix_disable(adapter);
+ return status;
+}
+
+static int be_irq_register(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int status;
+
+ if (msix_enabled(adapter)) {
+ status = be_msix_register(adapter);
+ if (status == 0)
+ goto done;
+ /* INTx is not supported for VF */
+ if (!be_physfn(adapter))
+ return status;
+ }
+
+ /* INTx */
+ netdev->irq = adapter->pdev->irq;
+ status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
+ adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "INTx request IRQ failed - err %d\n", status);
+ return status;
+ }
+done:
+ adapter->isr_registered = true;
+ return 0;
+}
+
+static void be_irq_unregister(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct be_rx_obj *rxo;
+ int i;
+
+ if (!adapter->isr_registered)
+ return;
+
+ /* INTx */
+ if (!msix_enabled(adapter)) {
+ free_irq(netdev->irq, adapter);
+ goto done;
+ }
+
+ /* MSIx */
+ be_free_irq(adapter, &adapter->tx_eq, adapter);
+
+ for_all_rx_queues(adapter, rxo, i)
+ be_free_irq(adapter, &rxo->rx_eq, rxo);
+
+done:
+ adapter->isr_registered = false;
+}
+
+static void be_rx_queues_clear(struct be_adapter *adapter)
+{
+ struct be_queue_info *q;
+ struct be_rx_obj *rxo;
+ int i;
+
+ for_all_rx_queues(adapter, rxo, i) {
+ q = &rxo->q;
+ if (q->created) {
+ be_cmd_rxq_destroy(adapter, q);
+ /* After the rxq is invalidated, wait for a grace time
+ * of 1ms for all dma to end and the flush compl to
+ * arrive
+ */
+ mdelay(1);
+ be_rx_q_clean(adapter, rxo);
+ }
+
+ /* Clear any residual events */
+ q = &rxo->rx_eq.q;
+ if (q->created)
+ be_eq_clean(adapter, &rxo->rx_eq);
+ }
+}
+
+static int be_close(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_rx_obj *rxo;
+ struct be_tx_obj *txo;
+ struct be_eq_obj *tx_eq = &adapter->tx_eq;
+ int vec, i;
+
+ be_async_mcc_disable(adapter);
+
+ if (!lancer_chip(adapter))
+ be_intr_set(adapter, false);
+
+ for_all_rx_queues(adapter, rxo, i)
+ napi_disable(&rxo->rx_eq.napi);
+
+ napi_disable(&tx_eq->napi);
+
+ if (lancer_chip(adapter)) {
+ be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
+ for_all_rx_queues(adapter, rxo, i)
+ be_cq_notify(adapter, rxo->cq.id, false, 0);
+ for_all_tx_queues(adapter, txo, i)
+ be_cq_notify(adapter, txo->cq.id, false, 0);
+ }
+
+ if (msix_enabled(adapter)) {
+ vec = be_msix_vec_get(adapter, tx_eq);
+ synchronize_irq(vec);
+
+ for_all_rx_queues(adapter, rxo, i) {
+ vec = be_msix_vec_get(adapter, &rxo->rx_eq);
+ synchronize_irq(vec);
+ }
+ } else {
+ synchronize_irq(netdev->irq);
+ }
+ be_irq_unregister(adapter);
+
+ /* Wait for all pending tx completions to arrive so that
+ * all tx skbs are freed.
+ */
+ for_all_tx_queues(adapter, txo, i)
+ be_tx_compl_clean(adapter, txo);
+
+ be_rx_queues_clear(adapter);
+ return 0;
+}
+
+static int be_rx_queues_setup(struct be_adapter *adapter)
+{
+ struct be_rx_obj *rxo;
+ int rc, i;
+ u8 rsstable[MAX_RSS_QS];
+
+ for_all_rx_queues(adapter, rxo, i) {
+ rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
+ rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
+ adapter->if_handle,
+ (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
+ if (rc)
+ return rc;
+ }
+
+ if (be_multi_rxq(adapter)) {
+ for_all_rss_queues(adapter, rxo, i)
+ rsstable[i] = rxo->rss_id;
+
+ rc = be_cmd_rss_config(adapter, rsstable,
+ adapter->num_rx_qs - 1);
+ if (rc)
+ return rc;
+ }
+
+ /* First time posting */
+ for_all_rx_queues(adapter, rxo, i) {
+ be_post_rx_frags(rxo, GFP_KERNEL);
+ napi_enable(&rxo->rx_eq.napi);
+ }
+ return 0;
+}
+
+static int be_open(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_eq_obj *tx_eq = &adapter->tx_eq;
+ struct be_rx_obj *rxo;
+ int status, i;
+
+ status = be_rx_queues_setup(adapter);
+ if (status)
+ goto err;
+
+ napi_enable(&tx_eq->napi);
+
+ be_irq_register(adapter);
+
+ if (!lancer_chip(adapter))
+ be_intr_set(adapter, true);
+
+ /* The evt queues are created in unarmed state; arm them */
+ for_all_rx_queues(adapter, rxo, i) {
+ be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
+ be_cq_notify(adapter, rxo->cq.id, true, 0);
+ }
+ be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
+
+ /* Now that interrupts are on we can process async mcc */
+ be_async_mcc_enable(adapter);
+
+ if (be_physfn(adapter)) {
+ status = be_vid_config(adapter, false, 0);
+ if (status)
+ goto err;
+
+ status = be_cmd_set_flow_control(adapter,
+ adapter->tx_fc, adapter->rx_fc);
+ if (status)
+ goto err;
+ }
+
+ return 0;
+err:
+ be_close(adapter->netdev);
+ return -EIO;
+}
+
+static int be_setup_wol(struct be_adapter *adapter, bool enable)
+{
+ struct be_dma_mem cmd;
+ int status = 0;
+ u8 mac[ETH_ALEN];
+
+ memset(mac, 0, ETH_ALEN);
+
+ cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_KERNEL);
+ if (cmd.va == NULL)
+ return -1;
+ memset(cmd.va, 0, cmd.size);
+
+ if (enable) {
+ status = pci_write_config_dword(adapter->pdev,
+ PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Could not enable Wake-on-lan\n");
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+ cmd.dma);
+ return status;
+ }
+ status = be_cmd_enable_magic_wol(adapter,
+ adapter->netdev->dev_addr, &cmd);
+ pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
+ pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
+ } else {
+ status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
+ pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
+ pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
+ }
+
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+ return status;
+}
+
+/*
+ * Generate a seed MAC address from the PF MAC Address using jhash.
+ * MAC Address for VFs are assigned incrementally starting from the seed.
+ * These addresses are programmed in the ASIC by the PF and the VF driver
+ * queries for the MAC address during its probe.
+ */
+static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
+{
+ u32 vf = 0;
+ int status = 0;
+ u8 mac[ETH_ALEN];
+
+ be_vf_eth_addr_generate(adapter, mac);
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ status = be_cmd_pmac_add(adapter, mac,
+ adapter->vf_cfg[vf].vf_if_handle,
+ &adapter->vf_cfg[vf].vf_pmac_id,
+ vf + 1);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Mac address add failed for VF %d\n", vf);
+ else
+ memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+
+ mac[5] += 1;
+ }
+ return status;
+}
+
+static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
+{
+ u32 vf;
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
+ be_cmd_pmac_del(adapter,
+ adapter->vf_cfg[vf].vf_if_handle,
+ adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+ }
+}
+
+static int be_setup(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 cap_flags, en_flags, vf = 0;
+ int status;
+ u8 mac[ETH_ALEN];
+
+ be_cmd_req_native_mode(adapter);
+
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST;
+
+ if (be_physfn(adapter)) {
+ cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
+ BE_IF_FLAGS_PROMISCUOUS |
+ BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
+
+ if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
+ cap_flags |= BE_IF_FLAGS_RSS;
+ en_flags |= BE_IF_FLAGS_RSS;
+ }
+ }
+
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ netdev->dev_addr, false/* pmac_invalid */,
+ &adapter->if_handle, &adapter->pmac_id, 0);
+ if (status != 0)
+ goto do_none;
+
+ if (be_physfn(adapter)) {
+ if (adapter->sriov_enabled) {
+ while (vf < num_vfs) {
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST;
+ status = be_cmd_if_create(adapter, cap_flags,
+ en_flags, mac, true,
+ &adapter->vf_cfg[vf].vf_if_handle,
+ NULL, vf+1);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Interface Create failed for VF %d\n",
+ vf);
+ goto if_destroy;
+ }
+ adapter->vf_cfg[vf].vf_pmac_id =
+ BE_INVALID_PMAC_ID;
+ vf++;
+ }
+ }
+ } else {
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+ if (!status) {
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ }
+ }
+
+ status = be_tx_queues_create(adapter);
+ if (status != 0)
+ goto if_destroy;
+
+ status = be_rx_queues_create(adapter);
+ if (status != 0)
+ goto tx_qs_destroy;
+
+ /* Allow all priorities by default. A GRP5 evt may modify this */
+ adapter->vlan_prio_bmap = 0xff;
+
+ status = be_mcc_queues_create(adapter);
+ if (status != 0)
+ goto rx_qs_destroy;
+
+ adapter->link_speed = -1;
+
+ be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
+
+ pcie_set_readrq(adapter->pdev, 4096);
+ return 0;
+
+rx_qs_destroy:
+ be_rx_queues_destroy(adapter);
+tx_qs_destroy:
+ be_tx_queues_destroy(adapter);
+if_destroy:
+ if (be_physfn(adapter) && adapter->sriov_enabled)
+ for (vf = 0; vf < num_vfs; vf++)
+ if (adapter->vf_cfg[vf].vf_if_handle)
+ be_cmd_if_destroy(adapter,
+ adapter->vf_cfg[vf].vf_if_handle,
+ vf + 1);
+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
+do_none:
+ return status;
+}
+
+static int be_clear(struct be_adapter *adapter)
+{
+ int vf;
+
+ if (be_physfn(adapter) && adapter->sriov_enabled)
+ be_vf_eth_addr_rem(adapter);
+
+ be_mcc_queues_destroy(adapter);
+ be_rx_queues_destroy(adapter);
+ be_tx_queues_destroy(adapter);
+ adapter->eq_next_idx = 0;
+
+ if (be_physfn(adapter) && adapter->sriov_enabled)
+ for (vf = 0; vf < num_vfs; vf++)
+ if (adapter->vf_cfg[vf].vf_if_handle)
+ be_cmd_if_destroy(adapter,
+ adapter->vf_cfg[vf].vf_if_handle,
+ vf + 1);
+
+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
+
+ adapter->be3_native = 0;
+
+ /* tell fw we're done with firing cmds */
+ be_cmd_fw_clean(adapter);
+ return 0;
+}
+
+
+#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
+static bool be_flash_redboot(struct be_adapter *adapter,
+ const u8 *p, u32 img_start, int image_size,
+ int hdr_size)
+{
+ u32 crc_offset;
+ u8 flashed_crc[4];
+ int status;
+
+ crc_offset = hdr_size + img_start + image_size - 4;
+
+ p += crc_offset;
+
+ status = be_cmd_get_flash_crc(adapter, flashed_crc,
+ (image_size - 4));
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "could not get crc from flash, not flashing redboot\n");
+ return false;
+ }
+
+ /*update redboot only if crc does not match*/
+ if (!memcmp(flashed_crc, p, 4))
+ return false;
+ else
+ return true;
+}
+
+static bool phy_flashing_required(struct be_adapter *adapter)
+{
+ int status = 0;
+ struct be_phy_info phy_info;
+
+ status = be_cmd_get_phy_info(adapter, &phy_info);
+ if (status)
+ return false;
+ if ((phy_info.phy_type == TN_8022) &&
+ (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
+ return true;
+ }
+ return false;
+}
+
+static int be_flash_data(struct be_adapter *adapter,
+ const struct firmware *fw,
+ struct be_dma_mem *flash_cmd, int num_of_images)
+
+{
+ int status = 0, i, filehdr_size = 0;
+ u32 total_bytes = 0, flash_op;
+ int num_bytes;
+ const u8 *p = fw->data;
+ struct be_cmd_write_flashrom *req = flash_cmd->va;
+ const struct flash_comp *pflashcomp;
+ int num_comp;
+
+ static const struct flash_comp gen3_flash_types[10] = {
+ { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
+ FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
+ { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
+ { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
+ { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
+ { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
+ FLASH_NCSI_IMAGE_MAX_SIZE_g3},
+ { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
+ FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
+ };
+ static const struct flash_comp gen2_flash_types[8] = {
+ { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g2},
+ { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
+ FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
+ { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
+ { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
+ { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
+ { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g2},
+ { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g2},
+ { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g2}
+ };
+
+ if (adapter->generation == BE_GEN3) {
+ pflashcomp = gen3_flash_types;
+ filehdr_size = sizeof(struct flash_file_hdr_g3);
+ num_comp = ARRAY_SIZE(gen3_flash_types);
+ } else {
+ pflashcomp = gen2_flash_types;
+ filehdr_size = sizeof(struct flash_file_hdr_g2);
+ num_comp = ARRAY_SIZE(gen2_flash_types);
+ }
+ for (i = 0; i < num_comp; i++) {
+ if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
+ memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
+ continue;
+ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
+ if (!phy_flashing_required(adapter))
+ continue;
+ }
+ if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
+ (!be_flash_redboot(adapter, fw->data,
+ pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
+ (num_of_images * sizeof(struct image_hdr)))))
+ continue;
+ p = fw->data;
+ p += filehdr_size + pflashcomp[i].offset
+ + (num_of_images * sizeof(struct image_hdr));
+ if (p + pflashcomp[i].size > fw->data + fw->size)
+ return -1;
+ total_bytes = pflashcomp[i].size;
+ while (total_bytes) {
+ if (total_bytes > 32*1024)
+ num_bytes = 32*1024;
+ else
+ num_bytes = total_bytes;
+ total_bytes -= num_bytes;
+ if (!total_bytes) {
+ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
+ flash_op = FLASHROM_OPER_PHY_FLASH;
+ else
+ flash_op = FLASHROM_OPER_FLASH;
+ } else {
+ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
+ flash_op = FLASHROM_OPER_PHY_SAVE;
+ else
+ flash_op = FLASHROM_OPER_SAVE;
+ }
+ memcpy(req->params.data_buf, p, num_bytes);
+ p += num_bytes;
+ status = be_cmd_write_flashrom(adapter, flash_cmd,
+ pflashcomp[i].optype, flash_op, num_bytes);
+ if (status) {
+ if ((status == ILLEGAL_IOCTL_REQ) &&
+ (pflashcomp[i].optype ==
+ IMG_TYPE_PHY_FW))
+ break;
+ dev_err(&adapter->pdev->dev,
+ "cmd to write to flash rom failed.\n");
+ return -1;
+ }
+ }
+ }
+ return 0;
+}
+
+static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
+{
+ if (fhdr == NULL)
+ return 0;
+ if (fhdr->build[0] == '3')
+ return BE_GEN3;
+ else if (fhdr->build[0] == '2')
+ return BE_GEN2;
+ else
+ return 0;
+}
+
+static int lancer_fw_download(struct be_adapter *adapter,
+ const struct firmware *fw)
+{
+#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
+#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
+ struct be_dma_mem flash_cmd;
+ const u8 *data_ptr = NULL;
+ u8 *dest_image_ptr = NULL;
+ size_t image_size = 0;
+ u32 chunk_size = 0;
+ u32 data_written = 0;
+ u32 offset = 0;
+ int status = 0;
+ u8 add_status = 0;
+
+ if (!IS_ALIGNED(fw->size, sizeof(u32))) {
+ dev_err(&adapter->pdev->dev,
+ "FW Image not properly aligned. "
+ "Length must be 4 byte aligned.\n");
+ status = -EINVAL;
+ goto lancer_fw_exit;
+ }
+
+ flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
+ + LANCER_FW_DOWNLOAD_CHUNK;
+ flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
+ &flash_cmd.dma, GFP_KERNEL);
+ if (!flash_cmd.va) {
+ status = -ENOMEM;
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure while flashing\n");
+ goto lancer_fw_exit;
+ }
+
+ dest_image_ptr = flash_cmd.va +
+ sizeof(struct lancer_cmd_req_write_object);
+ image_size = fw->size;
+ data_ptr = fw->data;
+
+ while (image_size) {
+ chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
+
+ /* Copy the image chunk content. */
+ memcpy(dest_image_ptr, data_ptr, chunk_size);
+
+ status = lancer_cmd_write_object(adapter, &flash_cmd,
+ chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
+ &data_written, &add_status);
+
+ if (status)
+ break;
+
+ offset += data_written;
+ data_ptr += data_written;
+ image_size -= data_written;
+ }
+
+ if (!status) {
+ /* Commit the FW written */
+ status = lancer_cmd_write_object(adapter, &flash_cmd,
+ 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
+ &data_written, &add_status);
+ }
+
+ dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
+ flash_cmd.dma);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Firmware load error. "
+ "Status code: 0x%x Additional Status: 0x%x\n",
+ status, add_status);
+ goto lancer_fw_exit;
+ }
+
+ dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
+lancer_fw_exit:
+ return status;
+}
+
+static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
+{
+ struct flash_file_hdr_g2 *fhdr;
+ struct flash_file_hdr_g3 *fhdr3;
+ struct image_hdr *img_hdr_ptr = NULL;
+ struct be_dma_mem flash_cmd;
+ const u8 *p;
+ int status = 0, i = 0, num_imgs = 0;
+
+ p = fw->data;
+ fhdr = (struct flash_file_hdr_g2 *) p;
+
+ flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
+ flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
+ &flash_cmd.dma, GFP_KERNEL);
+ if (!flash_cmd.va) {
+ status = -ENOMEM;
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure while flashing\n");
+ goto be_fw_exit;
+ }
+
+ if ((adapter->generation == BE_GEN3) &&
+ (get_ufigen_type(fhdr) == BE_GEN3)) {
+ fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
+ num_imgs = le32_to_cpu(fhdr3->num_imgs);
+ for (i = 0; i < num_imgs; i++) {
+ img_hdr_ptr = (struct image_hdr *) (fw->data +
+ (sizeof(struct flash_file_hdr_g3) +
+ i * sizeof(struct image_hdr)));
+ if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
+ status = be_flash_data(adapter, fw, &flash_cmd,
+ num_imgs);
+ }
+ } else if ((adapter->generation == BE_GEN2) &&
+ (get_ufigen_type(fhdr) == BE_GEN2)) {
+ status = be_flash_data(adapter, fw, &flash_cmd, 0);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "UFI and Interface are not compatible for flashing\n");
+ status = -1;
+ }
+
+ dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
+ flash_cmd.dma);
+ if (status) {
+ dev_err(&adapter->pdev->dev, "Firmware load error\n");
+ goto be_fw_exit;
+ }
+
+ dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
+
+be_fw_exit:
+ return status;
+}
+
+int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
+{
+ const struct firmware *fw;
+ int status;
+
+ if (!netif_running(adapter->netdev)) {
+ dev_err(&adapter->pdev->dev,
+ "Firmware load not allowed (interface is down)\n");
+ return -1;
+ }
+
+ status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
+ if (status)
+ goto fw_exit;
+
+ dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
+
+ if (lancer_chip(adapter))
+ status = lancer_fw_download(adapter, fw);
+ else
+ status = be_fw_download(adapter, fw);
+
+fw_exit:
+ release_firmware(fw);
+ return status;
+}
+
+static struct net_device_ops be_netdev_ops = {
+ .ndo_open = be_open,
+ .ndo_stop = be_close,
+ .ndo_start_xmit = be_xmit,
+ .ndo_set_rx_mode = be_set_multicast_list,
+ .ndo_set_mac_address = be_mac_addr_set,
+ .ndo_change_mtu = be_change_mtu,
+ .ndo_get_stats64 = be_get_stats64,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = be_vlan_add_vid,
+ .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
+ .ndo_set_vf_mac = be_set_vf_mac,
+ .ndo_set_vf_vlan = be_set_vf_vlan,
+ .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
+ .ndo_get_vf_config = be_get_vf_config
+};
+
+static void be_netdev_init(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_rx_obj *rxo;
+ int i;
+
+ netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_TX;
+ if (be_multi_rxq(adapter))
+ netdev->hw_features |= NETIF_F_RXHASH;
+
+ netdev->features |= netdev->hw_features |
+ NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+
+ netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+ netdev->flags |= IFF_MULTICAST;
+
+ /* Default settings for Rx and Tx flow control */
+ adapter->rx_fc = true;
+ adapter->tx_fc = true;
+
+ netif_set_gso_max_size(netdev, 65535);
+
+ BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
+
+ SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
+
+ for_all_rx_queues(adapter, rxo, i)
+ netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
+ BE_NAPI_WEIGHT);
+
+ netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
+ BE_NAPI_WEIGHT);
+}
+
+static void be_unmap_pci_bars(struct be_adapter *adapter)
+{
+ if (adapter->csr)
+ iounmap(adapter->csr);
+ if (adapter->db)
+ iounmap(adapter->db);
+}
+
+static int be_map_pci_bars(struct be_adapter *adapter)
+{
+ u8 __iomem *addr;
+ int db_reg;
+
+ if (lancer_chip(adapter)) {
+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
+ pci_resource_len(adapter->pdev, 0));
+ if (addr == NULL)
+ return -ENOMEM;
+ adapter->db = addr;
+ return 0;
+ }
+
+ if (be_physfn(adapter)) {
+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
+ pci_resource_len(adapter->pdev, 2));
+ if (addr == NULL)
+ return -ENOMEM;
+ adapter->csr = addr;
+ }
+
+ if (adapter->generation == BE_GEN2) {
+ db_reg = 4;
+ } else {
+ if (be_physfn(adapter))
+ db_reg = 4;
+ else
+ db_reg = 0;
+ }
+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
+ pci_resource_len(adapter->pdev, db_reg));
+ if (addr == NULL)
+ goto pci_map_err;
+ adapter->db = addr;
+
+ return 0;
+pci_map_err:
+ be_unmap_pci_bars(adapter);
+ return -ENOMEM;
+}
+
+
+static void be_ctrl_cleanup(struct be_adapter *adapter)
+{
+ struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
+
+ be_unmap_pci_bars(adapter);
+
+ if (mem->va)
+ dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+ mem->dma);
+
+ mem = &adapter->rx_filter;
+ if (mem->va)
+ dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+ mem->dma);
+}
+
+static int be_ctrl_init(struct be_adapter *adapter)
+{
+ struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
+ struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
+ struct be_dma_mem *rx_filter = &adapter->rx_filter;
+ int status;
+
+ status = be_map_pci_bars(adapter);
+ if (status)
+ goto done;
+
+ mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
+ mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
+ mbox_mem_alloc->size,
+ &mbox_mem_alloc->dma,
+ GFP_KERNEL);
+ if (!mbox_mem_alloc->va) {
+ status = -ENOMEM;
+ goto unmap_pci_bars;
+ }
+ mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
+ mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
+ mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
+ memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
+
+ rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
+ rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
+ &rx_filter->dma, GFP_KERNEL);
+ if (rx_filter->va == NULL) {
+ status = -ENOMEM;
+ goto free_mbox;
+ }
+ memset(rx_filter->va, 0, rx_filter->size);
+
+ mutex_init(&adapter->mbox_lock);
+ spin_lock_init(&adapter->mcc_lock);
+ spin_lock_init(&adapter->mcc_cq_lock);
+
+ init_completion(&adapter->flash_compl);
+ pci_save_state(adapter->pdev);
+ return 0;
+
+free_mbox:
+ dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
+ mbox_mem_alloc->va, mbox_mem_alloc->dma);
+
+unmap_pci_bars:
+ be_unmap_pci_bars(adapter);
+
+done:
+ return status;
+}
+
+static void be_stats_cleanup(struct be_adapter *adapter)
+{
+ struct be_dma_mem *cmd = &adapter->stats_cmd;
+
+ if (cmd->va)
+ dma_free_coherent(&adapter->pdev->dev, cmd->size,
+ cmd->va, cmd->dma);
+}
+
+static int be_stats_init(struct be_adapter *adapter)
+{
+ struct be_dma_mem *cmd = &adapter->stats_cmd;
+
+ if (adapter->generation == BE_GEN2) {
+ cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
+ } else {
+ if (lancer_chip(adapter))
+ cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
+ else
+ cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
+ }
+ cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
+ GFP_KERNEL);
+ if (cmd->va == NULL)
+ return -1;
+ memset(cmd->va, 0, cmd->size);
+ return 0;
+}
+
+static void __devexit be_remove(struct pci_dev *pdev)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (!adapter)
+ return;
+
+ cancel_delayed_work_sync(&adapter->work);
+
+ unregister_netdev(adapter->netdev);
+
+ be_clear(adapter);
+
+ be_stats_cleanup(adapter);
+
+ be_ctrl_cleanup(adapter);
+
+ kfree(adapter->vf_cfg);
+ be_sriov_disable(adapter);
+
+ be_msix_disable(adapter);
+
+ pci_set_drvdata(pdev, NULL);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ free_netdev(adapter->netdev);
+}
+
+static int be_get_config(struct be_adapter *adapter)
+{
+ int status;
+ u8 mac[ETH_ALEN];
+
+ status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
+ &adapter->function_mode, &adapter->function_caps);
+ if (status)
+ return status;
+
+ memset(mac, 0, ETH_ALEN);
+
+ /* A default permanent address is given to each VF for Lancer*/
+ if (be_physfn(adapter) || lancer_chip(adapter)) {
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
+
+ if (status)
+ return status;
+
+ if (!is_valid_ether_addr(mac))
+ return -EADDRNOTAVAIL;
+
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ }
+
+ if (adapter->function_mode & 0x400)
+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
+ else
+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
+
+ status = be_cmd_get_cntl_attributes(adapter);
+ if (status)
+ return status;
+
+ if ((num_vfs && adapter->sriov_enabled) ||
+ (adapter->function_mode & 0x400) ||
+ lancer_chip(adapter) || !be_physfn(adapter)) {
+ adapter->num_tx_qs = 1;
+ netif_set_real_num_tx_queues(adapter->netdev,
+ adapter->num_tx_qs);
+ } else {
+ adapter->num_tx_qs = MAX_TX_QS;
+ }
+
+ return 0;
+}
+
+static int be_dev_family_check(struct be_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ u32 sli_intf = 0, if_type;
+
+ switch (pdev->device) {
+ case BE_DEVICE_ID1:
+ case OC_DEVICE_ID1:
+ adapter->generation = BE_GEN2;
+ break;
+ case BE_DEVICE_ID2:
+ case OC_DEVICE_ID2:
+ adapter->generation = BE_GEN3;
+ break;
+ case OC_DEVICE_ID3:
+ case OC_DEVICE_ID4:
+ pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
+ if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
+ SLI_INTF_IF_TYPE_SHIFT;
+
+ if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
+ if_type != 0x02) {
+ dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
+ return -EINVAL;
+ }
+ adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
+ SLI_INTF_FAMILY_SHIFT);
+ adapter->generation = BE_GEN3;
+ break;
+ default:
+ adapter->generation = 0;
+ }
+ return 0;
+}
+
+static int lancer_wait_ready(struct be_adapter *adapter)
+{
+#define SLIPORT_READY_TIMEOUT 500
+ u32 sliport_status;
+ int status = 0, i;
+
+ for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ if (sliport_status & SLIPORT_STATUS_RDY_MASK)
+ break;
+
+ msleep(20);
+ }
+
+ if (i == SLIPORT_READY_TIMEOUT)
+ status = -1;
+
+ return status;
+}
+
+static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
+{
+ int status;
+ u32 sliport_status, err, reset_needed;
+ status = lancer_wait_ready(adapter);
+ if (!status) {
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ err = sliport_status & SLIPORT_STATUS_ERR_MASK;
+ reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
+ if (err && reset_needed) {
+ iowrite32(SLI_PORT_CONTROL_IP_MASK,
+ adapter->db + SLIPORT_CONTROL_OFFSET);
+
+ /* check adapter has corrected the error */
+ status = lancer_wait_ready(adapter);
+ sliport_status = ioread32(adapter->db +
+ SLIPORT_STATUS_OFFSET);
+ sliport_status &= (SLIPORT_STATUS_ERR_MASK |
+ SLIPORT_STATUS_RN_MASK);
+ if (status || sliport_status)
+ status = -1;
+ } else if (err || reset_needed) {
+ status = -1;
+ }
+ }
+ return status;
+}
+
+static int __devinit be_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pdev_id)
+{
+ int status = 0;
+ struct be_adapter *adapter;
+ struct net_device *netdev;
+
+ status = pci_enable_device(pdev);
+ if (status)
+ goto do_none;
+
+ status = pci_request_regions(pdev, DRV_NAME);
+ if (status)
+ goto disable_dev;
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
+ if (netdev == NULL) {
+ status = -ENOMEM;
+ goto rel_reg;
+ }
+ adapter = netdev_priv(netdev);
+ adapter->pdev = pdev;
+ pci_set_drvdata(pdev, adapter);
+
+ status = be_dev_family_check(adapter);
+ if (status)
+ goto free_netdev;
+
+ adapter->netdev = netdev;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!status) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ } else {
+ status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (status) {
+ dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
+ goto free_netdev;
+ }
+ }
+
+ be_sriov_enable(adapter);
+ if (adapter->sriov_enabled) {
+ adapter->vf_cfg = kcalloc(num_vfs,
+ sizeof(struct be_vf_cfg), GFP_KERNEL);
+
+ if (!adapter->vf_cfg)
+ goto free_netdev;
+ }
+
+ status = be_ctrl_init(adapter);
+ if (status)
+ goto free_vf_cfg;
+
+ if (lancer_chip(adapter)) {
+ status = lancer_test_and_set_rdy_state(adapter);
+ if (status) {
+ dev_err(&pdev->dev, "Adapter in non recoverable error\n");
+ goto ctrl_clean;
+ }
+ }
+
+ /* sync up with fw's ready state */
+ if (be_physfn(adapter)) {
+ status = be_cmd_POST(adapter);
+ if (status)
+ goto ctrl_clean;
+ }
+
+ /* tell fw we're ready to fire cmds */
+ status = be_cmd_fw_init(adapter);
+ if (status)
+ goto ctrl_clean;
+
+ status = be_cmd_reset_function(adapter);
+ if (status)
+ goto ctrl_clean;
+
+ status = be_stats_init(adapter);
+ if (status)
+ goto ctrl_clean;
+
+ status = be_get_config(adapter);
+ if (status)
+ goto stats_clean;
+
+ /* The INTR bit may be set in the card when probed by a kdump kernel
+ * after a crash.
+ */
+ if (!lancer_chip(adapter))
+ be_intr_set(adapter, false);
+
+ be_msix_enable(adapter);
+
+ INIT_DELAYED_WORK(&adapter->work, be_worker);
+
+ status = be_setup(adapter);
+ if (status)
+ goto msix_disable;
+
+ be_netdev_init(netdev);
+ status = register_netdev(netdev);
+ if (status != 0)
+ goto unsetup;
+
+ if (be_physfn(adapter) && adapter->sriov_enabled) {
+ u8 mac_speed;
+ u16 vf, lnk_speed;
+
+ if (!lancer_chip(adapter)) {
+ status = be_vf_eth_addr_config(adapter);
+ if (status)
+ goto unreg_netdev;
+ }
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ status = be_cmd_link_status_query(adapter, &mac_speed,
+ &lnk_speed, vf + 1);
+ if (!status)
+ adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
+ else
+ goto unreg_netdev;
+ }
+ }
+
+ dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
+
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
+ return 0;
+
+unreg_netdev:
+ unregister_netdev(netdev);
+unsetup:
+ be_clear(adapter);
+msix_disable:
+ be_msix_disable(adapter);
+stats_clean:
+ be_stats_cleanup(adapter);
+ctrl_clean:
+ be_ctrl_cleanup(adapter);
+free_vf_cfg:
+ kfree(adapter->vf_cfg);
+free_netdev:
+ be_sriov_disable(adapter);
+ free_netdev(netdev);
+ pci_set_drvdata(pdev, NULL);
+rel_reg:
+ pci_release_regions(pdev);
+disable_dev:
+ pci_disable_device(pdev);
+do_none:
+ dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
+ return status;
+}
+
+static int be_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ cancel_delayed_work_sync(&adapter->work);
+ if (adapter->wol)
+ be_setup_wol(adapter, true);
+
+ netif_device_detach(netdev);
+ if (netif_running(netdev)) {
+ rtnl_lock();
+ be_close(netdev);
+ rtnl_unlock();
+ }
+ be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
+ be_clear(adapter);
+
+ be_msix_disable(adapter);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+
+static int be_resume(struct pci_dev *pdev)
+{
+ int status = 0;
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+
+ status = pci_enable_device(pdev);
+ if (status)
+ return status;
+
+ pci_set_power_state(pdev, 0);
+ pci_restore_state(pdev);
+
+ be_msix_enable(adapter);
+ /* tell fw we're ready to fire cmds */
+ status = be_cmd_fw_init(adapter);
+ if (status)
+ return status;
+
+ be_setup(adapter);
+ if (netif_running(netdev)) {
+ rtnl_lock();
+ be_open(netdev);
+ rtnl_unlock();
+ }
+ netif_device_attach(netdev);
+
+ if (adapter->wol)
+ be_setup_wol(adapter, false);
+
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
+ return 0;
+}
+
+/*
+ * An FLR will stop BE from DMAing any data.
+ */
+static void be_shutdown(struct pci_dev *pdev)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (!adapter)
+ return;
+
+ cancel_delayed_work_sync(&adapter->work);
+
+ netif_device_detach(adapter->netdev);
+
+ if (adapter->wol)
+ be_setup_wol(adapter, true);
+
+ be_cmd_reset_function(adapter);
+
+ pci_disable_device(pdev);
+}
+
+static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ dev_err(&adapter->pdev->dev, "EEH error detected\n");
+
+ adapter->eeh_err = true;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ rtnl_lock();
+ be_close(netdev);
+ rtnl_unlock();
+ }
+ be_clear(adapter);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ int status;
+
+ dev_info(&adapter->pdev->dev, "EEH reset\n");
+ adapter->eeh_err = false;
+
+ status = pci_enable_device(pdev);
+ if (status)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_set_master(pdev);
+ pci_set_power_state(pdev, 0);
+ pci_restore_state(pdev);
+
+ /* Check if card is ok and fw is ready */
+ status = be_cmd_POST(adapter);
+ if (status)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void be_eeh_resume(struct pci_dev *pdev)
+{
+ int status = 0;
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ dev_info(&adapter->pdev->dev, "EEH resume\n");
+
+ pci_save_state(pdev);
+
+ /* tell fw we're ready to fire cmds */
+ status = be_cmd_fw_init(adapter);
+ if (status)
+ goto err;
+
+ status = be_setup(adapter);
+ if (status)
+ goto err;
+
+ if (netif_running(netdev)) {
+ status = be_open(netdev);
+ if (status)
+ goto err;
+ }
+ netif_device_attach(netdev);
+ return;
+err:
+ dev_err(&adapter->pdev->dev, "EEH resume failed\n");
+}
+
+static struct pci_error_handlers be_eeh_handlers = {
+ .error_detected = be_eeh_err_detected,
+ .slot_reset = be_eeh_reset,
+ .resume = be_eeh_resume,
+};
+
+static struct pci_driver be_driver = {
+ .name = DRV_NAME,
+ .id_table = be_dev_ids,
+ .probe = be_probe,
+ .remove = be_remove,
+ .suspend = be_suspend,
+ .resume = be_resume,
+ .shutdown = be_shutdown,
+ .err_handler = &be_eeh_handlers
+};
+
+static int __init be_init_module(void)
+{
+ if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
+ rx_frag_size != 2048) {
+ printk(KERN_WARNING DRV_NAME
+ " : Module param rx_frag_size must be 2048/4096/8192."
+ " Using 2048\n");
+ rx_frag_size = 2048;
+ }
+
+ return pci_register_driver(&be_driver);
+}
+module_init(be_init_module);
+
+static void __exit be_exit_module(void)
+{
+ pci_unregister_driver(&be_driver);
+}
+module_exit(be_exit_module);
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
new file mode 100644
index 000000000000..bdb348a5ccf6
--- /dev/null
+++ b/drivers/net/ethernet/ethoc.c
@@ -0,0 +1,1203 @@
+/*
+ * linux/drivers/net/ethoc.c
+ *
+ * Copyright (C) 2007-2008 Avionic Design Development GmbH
+ * Copyright (C) 2008-2009 Avionic Design GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Written by Thierry Reding <thierry.reding@avionic-design.de>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <net/ethoc.h>
+
+static int buffer_size = 0x8000; /* 32 KBytes */
+module_param(buffer_size, int, 0);
+MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
+
+/* register offsets */
+#define MODER 0x00
+#define INT_SOURCE 0x04
+#define INT_MASK 0x08
+#define IPGT 0x0c
+#define IPGR1 0x10
+#define IPGR2 0x14
+#define PACKETLEN 0x18
+#define COLLCONF 0x1c
+#define TX_BD_NUM 0x20
+#define CTRLMODER 0x24
+#define MIIMODER 0x28
+#define MIICOMMAND 0x2c
+#define MIIADDRESS 0x30
+#define MIITX_DATA 0x34
+#define MIIRX_DATA 0x38
+#define MIISTATUS 0x3c
+#define MAC_ADDR0 0x40
+#define MAC_ADDR1 0x44
+#define ETH_HASH0 0x48
+#define ETH_HASH1 0x4c
+#define ETH_TXCTRL 0x50
+
+/* mode register */
+#define MODER_RXEN (1 << 0) /* receive enable */
+#define MODER_TXEN (1 << 1) /* transmit enable */
+#define MODER_NOPRE (1 << 2) /* no preamble */
+#define MODER_BRO (1 << 3) /* broadcast address */
+#define MODER_IAM (1 << 4) /* individual address mode */
+#define MODER_PRO (1 << 5) /* promiscuous mode */
+#define MODER_IFG (1 << 6) /* interframe gap for incoming frames */
+#define MODER_LOOP (1 << 7) /* loopback */
+#define MODER_NBO (1 << 8) /* no back-off */
+#define MODER_EDE (1 << 9) /* excess defer enable */
+#define MODER_FULLD (1 << 10) /* full duplex */
+#define MODER_RESET (1 << 11) /* FIXME: reset (undocumented) */
+#define MODER_DCRC (1 << 12) /* delayed CRC enable */
+#define MODER_CRC (1 << 13) /* CRC enable */
+#define MODER_HUGE (1 << 14) /* huge packets enable */
+#define MODER_PAD (1 << 15) /* padding enabled */
+#define MODER_RSM (1 << 16) /* receive small packets */
+
+/* interrupt source and mask registers */
+#define INT_MASK_TXF (1 << 0) /* transmit frame */
+#define INT_MASK_TXE (1 << 1) /* transmit error */
+#define INT_MASK_RXF (1 << 2) /* receive frame */
+#define INT_MASK_RXE (1 << 3) /* receive error */
+#define INT_MASK_BUSY (1 << 4)
+#define INT_MASK_TXC (1 << 5) /* transmit control frame */
+#define INT_MASK_RXC (1 << 6) /* receive control frame */
+
+#define INT_MASK_TX (INT_MASK_TXF | INT_MASK_TXE)
+#define INT_MASK_RX (INT_MASK_RXF | INT_MASK_RXE)
+
+#define INT_MASK_ALL ( \
+ INT_MASK_TXF | INT_MASK_TXE | \
+ INT_MASK_RXF | INT_MASK_RXE | \
+ INT_MASK_TXC | INT_MASK_RXC | \
+ INT_MASK_BUSY \
+ )
+
+/* packet length register */
+#define PACKETLEN_MIN(min) (((min) & 0xffff) << 16)
+#define PACKETLEN_MAX(max) (((max) & 0xffff) << 0)
+#define PACKETLEN_MIN_MAX(min, max) (PACKETLEN_MIN(min) | \
+ PACKETLEN_MAX(max))
+
+/* transmit buffer number register */
+#define TX_BD_NUM_VAL(x) (((x) <= 0x80) ? (x) : 0x80)
+
+/* control module mode register */
+#define CTRLMODER_PASSALL (1 << 0) /* pass all receive frames */
+#define CTRLMODER_RXFLOW (1 << 1) /* receive control flow */
+#define CTRLMODER_TXFLOW (1 << 2) /* transmit control flow */
+
+/* MII mode register */
+#define MIIMODER_CLKDIV(x) ((x) & 0xfe) /* needs to be an even number */
+#define MIIMODER_NOPRE (1 << 8) /* no preamble */
+
+/* MII command register */
+#define MIICOMMAND_SCAN (1 << 0) /* scan status */
+#define MIICOMMAND_READ (1 << 1) /* read status */
+#define MIICOMMAND_WRITE (1 << 2) /* write control data */
+
+/* MII address register */
+#define MIIADDRESS_FIAD(x) (((x) & 0x1f) << 0)
+#define MIIADDRESS_RGAD(x) (((x) & 0x1f) << 8)
+#define MIIADDRESS_ADDR(phy, reg) (MIIADDRESS_FIAD(phy) | \
+ MIIADDRESS_RGAD(reg))
+
+/* MII transmit data register */
+#define MIITX_DATA_VAL(x) ((x) & 0xffff)
+
+/* MII receive data register */
+#define MIIRX_DATA_VAL(x) ((x) & 0xffff)
+
+/* MII status register */
+#define MIISTATUS_LINKFAIL (1 << 0)
+#define MIISTATUS_BUSY (1 << 1)
+#define MIISTATUS_INVALID (1 << 2)
+
+/* TX buffer descriptor */
+#define TX_BD_CS (1 << 0) /* carrier sense lost */
+#define TX_BD_DF (1 << 1) /* defer indication */
+#define TX_BD_LC (1 << 2) /* late collision */
+#define TX_BD_RL (1 << 3) /* retransmission limit */
+#define TX_BD_RETRY_MASK (0x00f0)
+#define TX_BD_RETRY(x) (((x) & 0x00f0) >> 4)
+#define TX_BD_UR (1 << 8) /* transmitter underrun */
+#define TX_BD_CRC (1 << 11) /* TX CRC enable */
+#define TX_BD_PAD (1 << 12) /* pad enable for short packets */
+#define TX_BD_WRAP (1 << 13)
+#define TX_BD_IRQ (1 << 14) /* interrupt request enable */
+#define TX_BD_READY (1 << 15) /* TX buffer ready */
+#define TX_BD_LEN(x) (((x) & 0xffff) << 16)
+#define TX_BD_LEN_MASK (0xffff << 16)
+
+#define TX_BD_STATS (TX_BD_CS | TX_BD_DF | TX_BD_LC | \
+ TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR)
+
+/* RX buffer descriptor */
+#define RX_BD_LC (1 << 0) /* late collision */
+#define RX_BD_CRC (1 << 1) /* RX CRC error */
+#define RX_BD_SF (1 << 2) /* short frame */
+#define RX_BD_TL (1 << 3) /* too long */
+#define RX_BD_DN (1 << 4) /* dribble nibble */
+#define RX_BD_IS (1 << 5) /* invalid symbol */
+#define RX_BD_OR (1 << 6) /* receiver overrun */
+#define RX_BD_MISS (1 << 7)
+#define RX_BD_CF (1 << 8) /* control frame */
+#define RX_BD_WRAP (1 << 13)
+#define RX_BD_IRQ (1 << 14) /* interrupt request enable */
+#define RX_BD_EMPTY (1 << 15)
+#define RX_BD_LEN(x) (((x) & 0xffff) << 16)
+
+#define RX_BD_STATS (RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \
+ RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS)
+
+#define ETHOC_BUFSIZ 1536
+#define ETHOC_ZLEN 64
+#define ETHOC_BD_BASE 0x400
+#define ETHOC_TIMEOUT (HZ / 2)
+#define ETHOC_MII_TIMEOUT (1 + (HZ / 5))
+
+/**
+ * struct ethoc - driver-private device structure
+ * @iobase: pointer to I/O memory region
+ * @membase: pointer to buffer memory region
+ * @dma_alloc: dma allocated buffer size
+ * @io_region_size: I/O memory region size
+ * @num_tx: number of send buffers
+ * @cur_tx: last send buffer written
+ * @dty_tx: last buffer actually sent
+ * @num_rx: number of receive buffers
+ * @cur_rx: current receive buffer
+ * @vma: pointer to array of virtual memory addresses for buffers
+ * @netdev: pointer to network device structure
+ * @napi: NAPI structure
+ * @msg_enable: device state flags
+ * @lock: device lock
+ * @phy: attached PHY
+ * @mdio: MDIO bus for PHY access
+ * @phy_id: address of attached PHY
+ */
+struct ethoc {
+ void __iomem *iobase;
+ void __iomem *membase;
+ int dma_alloc;
+ resource_size_t io_region_size;
+
+ unsigned int num_tx;
+ unsigned int cur_tx;
+ unsigned int dty_tx;
+
+ unsigned int num_rx;
+ unsigned int cur_rx;
+
+ void** vma;
+
+ struct net_device *netdev;
+ struct napi_struct napi;
+ u32 msg_enable;
+
+ spinlock_t lock;
+
+ struct phy_device *phy;
+ struct mii_bus *mdio;
+ s8 phy_id;
+};
+
+/**
+ * struct ethoc_bd - buffer descriptor
+ * @stat: buffer statistics
+ * @addr: physical memory address
+ */
+struct ethoc_bd {
+ u32 stat;
+ u32 addr;
+};
+
+static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
+{
+ return ioread32(dev->iobase + offset);
+}
+
+static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
+{
+ iowrite32(data, dev->iobase + offset);
+}
+
+static inline void ethoc_read_bd(struct ethoc *dev, int index,
+ struct ethoc_bd *bd)
+{
+ loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
+ bd->stat = ethoc_read(dev, offset + 0);
+ bd->addr = ethoc_read(dev, offset + 4);
+}
+
+static inline void ethoc_write_bd(struct ethoc *dev, int index,
+ const struct ethoc_bd *bd)
+{
+ loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
+ ethoc_write(dev, offset + 0, bd->stat);
+ ethoc_write(dev, offset + 4, bd->addr);
+}
+
+static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask)
+{
+ u32 imask = ethoc_read(dev, INT_MASK);
+ imask |= mask;
+ ethoc_write(dev, INT_MASK, imask);
+}
+
+static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask)
+{
+ u32 imask = ethoc_read(dev, INT_MASK);
+ imask &= ~mask;
+ ethoc_write(dev, INT_MASK, imask);
+}
+
+static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask)
+{
+ ethoc_write(dev, INT_SOURCE, mask);
+}
+
+static inline void ethoc_enable_rx_and_tx(struct ethoc *dev)
+{
+ u32 mode = ethoc_read(dev, MODER);
+ mode |= MODER_RXEN | MODER_TXEN;
+ ethoc_write(dev, MODER, mode);
+}
+
+static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
+{
+ u32 mode = ethoc_read(dev, MODER);
+ mode &= ~(MODER_RXEN | MODER_TXEN);
+ ethoc_write(dev, MODER, mode);
+}
+
+static int ethoc_init_ring(struct ethoc *dev, unsigned long mem_start)
+{
+ struct ethoc_bd bd;
+ int i;
+ void* vma;
+
+ dev->cur_tx = 0;
+ dev->dty_tx = 0;
+ dev->cur_rx = 0;
+
+ ethoc_write(dev, TX_BD_NUM, dev->num_tx);
+
+ /* setup transmission buffers */
+ bd.addr = mem_start;
+ bd.stat = TX_BD_IRQ | TX_BD_CRC;
+ vma = dev->membase;
+
+ for (i = 0; i < dev->num_tx; i++) {
+ if (i == dev->num_tx - 1)
+ bd.stat |= TX_BD_WRAP;
+
+ ethoc_write_bd(dev, i, &bd);
+ bd.addr += ETHOC_BUFSIZ;
+
+ dev->vma[i] = vma;
+ vma += ETHOC_BUFSIZ;
+ }
+
+ bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
+
+ for (i = 0; i < dev->num_rx; i++) {
+ if (i == dev->num_rx - 1)
+ bd.stat |= RX_BD_WRAP;
+
+ ethoc_write_bd(dev, dev->num_tx + i, &bd);
+ bd.addr += ETHOC_BUFSIZ;
+
+ dev->vma[dev->num_tx + i] = vma;
+ vma += ETHOC_BUFSIZ;
+ }
+
+ return 0;
+}
+
+static int ethoc_reset(struct ethoc *dev)
+{
+ u32 mode;
+
+ /* TODO: reset controller? */
+
+ ethoc_disable_rx_and_tx(dev);
+
+ /* TODO: setup registers */
+
+ /* enable FCS generation and automatic padding */
+ mode = ethoc_read(dev, MODER);
+ mode |= MODER_CRC | MODER_PAD;
+ ethoc_write(dev, MODER, mode);
+
+ /* set full-duplex mode */
+ mode = ethoc_read(dev, MODER);
+ mode |= MODER_FULLD;
+ ethoc_write(dev, MODER, mode);
+ ethoc_write(dev, IPGT, 0x15);
+
+ ethoc_ack_irq(dev, INT_MASK_ALL);
+ ethoc_enable_irq(dev, INT_MASK_ALL);
+ ethoc_enable_rx_and_tx(dev);
+ return 0;
+}
+
+static unsigned int ethoc_update_rx_stats(struct ethoc *dev,
+ struct ethoc_bd *bd)
+{
+ struct net_device *netdev = dev->netdev;
+ unsigned int ret = 0;
+
+ if (bd->stat & RX_BD_TL) {
+ dev_err(&netdev->dev, "RX: frame too long\n");
+ netdev->stats.rx_length_errors++;
+ ret++;
+ }
+
+ if (bd->stat & RX_BD_SF) {
+ dev_err(&netdev->dev, "RX: frame too short\n");
+ netdev->stats.rx_length_errors++;
+ ret++;
+ }
+
+ if (bd->stat & RX_BD_DN) {
+ dev_err(&netdev->dev, "RX: dribble nibble\n");
+ netdev->stats.rx_frame_errors++;
+ }
+
+ if (bd->stat & RX_BD_CRC) {
+ dev_err(&netdev->dev, "RX: wrong CRC\n");
+ netdev->stats.rx_crc_errors++;
+ ret++;
+ }
+
+ if (bd->stat & RX_BD_OR) {
+ dev_err(&netdev->dev, "RX: overrun\n");
+ netdev->stats.rx_over_errors++;
+ ret++;
+ }
+
+ if (bd->stat & RX_BD_MISS)
+ netdev->stats.rx_missed_errors++;
+
+ if (bd->stat & RX_BD_LC) {
+ dev_err(&netdev->dev, "RX: late collision\n");
+ netdev->stats.collisions++;
+ ret++;
+ }
+
+ return ret;
+}
+
+static int ethoc_rx(struct net_device *dev, int limit)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ int count;
+
+ for (count = 0; count < limit; ++count) {
+ unsigned int entry;
+ struct ethoc_bd bd;
+
+ entry = priv->num_tx + priv->cur_rx;
+ ethoc_read_bd(priv, entry, &bd);
+ if (bd.stat & RX_BD_EMPTY) {
+ ethoc_ack_irq(priv, INT_MASK_RX);
+ /* If packet (interrupt) came in between checking
+ * BD_EMTPY and clearing the interrupt source, then we
+ * risk missing the packet as the RX interrupt won't
+ * trigger right away when we reenable it; hence, check
+ * BD_EMTPY here again to make sure there isn't such a
+ * packet waiting for us...
+ */
+ ethoc_read_bd(priv, entry, &bd);
+ if (bd.stat & RX_BD_EMPTY)
+ break;
+ }
+
+ if (ethoc_update_rx_stats(priv, &bd) == 0) {
+ int size = bd.stat >> 16;
+ struct sk_buff *skb;
+
+ size -= 4; /* strip the CRC */
+ skb = netdev_alloc_skb_ip_align(dev, size);
+
+ if (likely(skb)) {
+ void *src = priv->vma[entry];
+ memcpy_fromio(skb_put(skb, size), src, size);
+ skb->protocol = eth_type_trans(skb, dev);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += size;
+ netif_receive_skb(skb);
+ } else {
+ if (net_ratelimit())
+ dev_warn(&dev->dev, "low on memory - "
+ "packet dropped\n");
+
+ dev->stats.rx_dropped++;
+ break;
+ }
+ }
+
+ /* clear the buffer descriptor so it can be reused */
+ bd.stat &= ~RX_BD_STATS;
+ bd.stat |= RX_BD_EMPTY;
+ ethoc_write_bd(priv, entry, &bd);
+ if (++priv->cur_rx == priv->num_rx)
+ priv->cur_rx = 0;
+ }
+
+ return count;
+}
+
+static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
+{
+ struct net_device *netdev = dev->netdev;
+
+ if (bd->stat & TX_BD_LC) {
+ dev_err(&netdev->dev, "TX: late collision\n");
+ netdev->stats.tx_window_errors++;
+ }
+
+ if (bd->stat & TX_BD_RL) {
+ dev_err(&netdev->dev, "TX: retransmit limit\n");
+ netdev->stats.tx_aborted_errors++;
+ }
+
+ if (bd->stat & TX_BD_UR) {
+ dev_err(&netdev->dev, "TX: underrun\n");
+ netdev->stats.tx_fifo_errors++;
+ }
+
+ if (bd->stat & TX_BD_CS) {
+ dev_err(&netdev->dev, "TX: carrier sense lost\n");
+ netdev->stats.tx_carrier_errors++;
+ }
+
+ if (bd->stat & TX_BD_STATS)
+ netdev->stats.tx_errors++;
+
+ netdev->stats.collisions += (bd->stat >> 4) & 0xf;
+ netdev->stats.tx_bytes += bd->stat >> 16;
+ netdev->stats.tx_packets++;
+}
+
+static int ethoc_tx(struct net_device *dev, int limit)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ int count;
+ struct ethoc_bd bd;
+
+ for (count = 0; count < limit; ++count) {
+ unsigned int entry;
+
+ entry = priv->dty_tx & (priv->num_tx-1);
+
+ ethoc_read_bd(priv, entry, &bd);
+
+ if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) {
+ ethoc_ack_irq(priv, INT_MASK_TX);
+ /* If interrupt came in between reading in the BD
+ * and clearing the interrupt source, then we risk
+ * missing the event as the TX interrupt won't trigger
+ * right away when we reenable it; hence, check
+ * BD_EMPTY here again to make sure there isn't such an
+ * event pending...
+ */
+ ethoc_read_bd(priv, entry, &bd);
+ if (bd.stat & TX_BD_READY ||
+ (priv->dty_tx == priv->cur_tx))
+ break;
+ }
+
+ ethoc_update_tx_stats(priv, &bd);
+ priv->dty_tx++;
+ }
+
+ if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2))
+ netif_wake_queue(dev);
+
+ return count;
+}
+
+static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct ethoc *priv = netdev_priv(dev);
+ u32 pending;
+ u32 mask;
+
+ /* Figure out what triggered the interrupt...
+ * The tricky bit here is that the interrupt source bits get
+ * set in INT_SOURCE for an event regardless of whether that
+ * event is masked or not. Thus, in order to figure out what
+ * triggered the interrupt, we need to remove the sources
+ * for all events that are currently masked. This behaviour
+ * is not particularly well documented but reasonable...
+ */
+ mask = ethoc_read(priv, INT_MASK);
+ pending = ethoc_read(priv, INT_SOURCE);
+ pending &= mask;
+
+ if (unlikely(pending == 0)) {
+ return IRQ_NONE;
+ }
+
+ ethoc_ack_irq(priv, pending);
+
+ /* We always handle the dropped packet interrupt */
+ if (pending & INT_MASK_BUSY) {
+ dev_err(&dev->dev, "packet dropped\n");
+ dev->stats.rx_dropped++;
+ }
+
+ /* Handle receive/transmit event by switching to polling */
+ if (pending & (INT_MASK_TX | INT_MASK_RX)) {
+ ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
+ napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ethoc_get_mac_address(struct net_device *dev, void *addr)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ u8 *mac = (u8 *)addr;
+ u32 reg;
+
+ reg = ethoc_read(priv, MAC_ADDR0);
+ mac[2] = (reg >> 24) & 0xff;
+ mac[3] = (reg >> 16) & 0xff;
+ mac[4] = (reg >> 8) & 0xff;
+ mac[5] = (reg >> 0) & 0xff;
+
+ reg = ethoc_read(priv, MAC_ADDR1);
+ mac[0] = (reg >> 8) & 0xff;
+ mac[1] = (reg >> 0) & 0xff;
+
+ return 0;
+}
+
+static int ethoc_poll(struct napi_struct *napi, int budget)
+{
+ struct ethoc *priv = container_of(napi, struct ethoc, napi);
+ int rx_work_done = 0;
+ int tx_work_done = 0;
+
+ rx_work_done = ethoc_rx(priv->netdev, budget);
+ tx_work_done = ethoc_tx(priv->netdev, budget);
+
+ if (rx_work_done < budget && tx_work_done < budget) {
+ napi_complete(napi);
+ ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
+ }
+
+ return rx_work_done;
+}
+
+static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct ethoc *priv = bus->priv;
+ int i;
+
+ ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
+ ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
+
+ for (i=0; i < 5; i++) {
+ u32 status = ethoc_read(priv, MIISTATUS);
+ if (!(status & MIISTATUS_BUSY)) {
+ u32 data = ethoc_read(priv, MIIRX_DATA);
+ /* reset MII command register */
+ ethoc_write(priv, MIICOMMAND, 0);
+ return data;
+ }
+ usleep_range(100,200);
+ }
+
+ return -EBUSY;
+}
+
+static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct ethoc *priv = bus->priv;
+ int i;
+
+ ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
+ ethoc_write(priv, MIITX_DATA, val);
+ ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
+
+ for (i=0; i < 5; i++) {
+ u32 stat = ethoc_read(priv, MIISTATUS);
+ if (!(stat & MIISTATUS_BUSY)) {
+ /* reset MII command register */
+ ethoc_write(priv, MIICOMMAND, 0);
+ return 0;
+ }
+ usleep_range(100,200);
+ }
+
+ return -EBUSY;
+}
+
+static int ethoc_mdio_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+static void ethoc_mdio_poll(struct net_device *dev)
+{
+}
+
+static int __devinit ethoc_mdio_probe(struct net_device *dev)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ struct phy_device *phy;
+ int err;
+
+ if (priv->phy_id != -1) {
+ phy = priv->mdio->phy_map[priv->phy_id];
+ } else {
+ phy = phy_find_first(priv->mdio);
+ }
+
+ if (!phy) {
+ dev_err(&dev->dev, "no PHY found\n");
+ return -ENXIO;
+ }
+
+ err = phy_connect_direct(dev, phy, ethoc_mdio_poll, 0,
+ PHY_INTERFACE_MODE_GMII);
+ if (err) {
+ dev_err(&dev->dev, "could not attach to PHY\n");
+ return err;
+ }
+
+ priv->phy = phy;
+ return 0;
+}
+
+static int ethoc_open(struct net_device *dev)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ int ret;
+
+ ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED,
+ dev->name, dev);
+ if (ret)
+ return ret;
+
+ ethoc_init_ring(priv, dev->mem_start);
+ ethoc_reset(priv);
+
+ if (netif_queue_stopped(dev)) {
+ dev_dbg(&dev->dev, " resuming queue\n");
+ netif_wake_queue(dev);
+ } else {
+ dev_dbg(&dev->dev, " starting queue\n");
+ netif_start_queue(dev);
+ }
+
+ phy_start(priv->phy);
+ napi_enable(&priv->napi);
+
+ if (netif_msg_ifup(priv)) {
+ dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
+ dev->base_addr, dev->mem_start, dev->mem_end);
+ }
+
+ return 0;
+}
+
+static int ethoc_stop(struct net_device *dev)
+{
+ struct ethoc *priv = netdev_priv(dev);
+
+ napi_disable(&priv->napi);
+
+ if (priv->phy)
+ phy_stop(priv->phy);
+
+ ethoc_disable_rx_and_tx(priv);
+ free_irq(dev->irq, dev);
+
+ if (!netif_queue_stopped(dev))
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ struct mii_ioctl_data *mdio = if_mii(ifr);
+ struct phy_device *phy = NULL;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (cmd != SIOCGMIIPHY) {
+ if (mdio->phy_id >= PHY_MAX_ADDR)
+ return -ERANGE;
+
+ phy = priv->mdio->phy_map[mdio->phy_id];
+ if (!phy)
+ return -ENODEV;
+ } else {
+ phy = priv->phy;
+ }
+
+ return phy_mii_ioctl(phy, ifr, cmd);
+}
+
+static int ethoc_config(struct net_device *dev, struct ifmap *map)
+{
+ return -ENOSYS;
+}
+
+static int ethoc_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ u8 *mac = (u8 *)addr;
+
+ ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) |
+ (mac[4] << 8) | (mac[5] << 0));
+ ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0));
+
+ return 0;
+}
+
+static void ethoc_set_multicast_list(struct net_device *dev)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ u32 mode = ethoc_read(priv, MODER);
+ struct netdev_hw_addr *ha;
+ u32 hash[2] = { 0, 0 };
+
+ /* set loopback mode if requested */
+ if (dev->flags & IFF_LOOPBACK)
+ mode |= MODER_LOOP;
+ else
+ mode &= ~MODER_LOOP;
+
+ /* receive broadcast frames if requested */
+ if (dev->flags & IFF_BROADCAST)
+ mode &= ~MODER_BRO;
+ else
+ mode |= MODER_BRO;
+
+ /* enable promiscuous mode if requested */
+ if (dev->flags & IFF_PROMISC)
+ mode |= MODER_PRO;
+ else
+ mode &= ~MODER_PRO;
+
+ ethoc_write(priv, MODER, mode);
+
+ /* receive multicast frames */
+ if (dev->flags & IFF_ALLMULTI) {
+ hash[0] = 0xffffffff;
+ hash[1] = 0xffffffff;
+ } else {
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 crc = ether_crc(ETH_ALEN, ha->addr);
+ int bit = (crc >> 26) & 0x3f;
+ hash[bit >> 5] |= 1 << (bit & 0x1f);
+ }
+ }
+
+ ethoc_write(priv, ETH_HASH0, hash[0]);
+ ethoc_write(priv, ETH_HASH1, hash[1]);
+}
+
+static int ethoc_change_mtu(struct net_device *dev, int new_mtu)
+{
+ return -ENOSYS;
+}
+
+static void ethoc_tx_timeout(struct net_device *dev)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ u32 pending = ethoc_read(priv, INT_SOURCE);
+ if (likely(pending))
+ ethoc_interrupt(dev->irq, dev);
+}
+
+static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ struct ethoc_bd bd;
+ unsigned int entry;
+ void *dest;
+
+ if (unlikely(skb->len > ETHOC_BUFSIZ)) {
+ dev->stats.tx_errors++;
+ goto out;
+ }
+
+ entry = priv->cur_tx % priv->num_tx;
+ spin_lock_irq(&priv->lock);
+ priv->cur_tx++;
+
+ ethoc_read_bd(priv, entry, &bd);
+ if (unlikely(skb->len < ETHOC_ZLEN))
+ bd.stat |= TX_BD_PAD;
+ else
+ bd.stat &= ~TX_BD_PAD;
+
+ dest = priv->vma[entry];
+ memcpy_toio(dest, skb->data, skb->len);
+
+ bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
+ bd.stat |= TX_BD_LEN(skb->len);
+ ethoc_write_bd(priv, entry, &bd);
+
+ bd.stat |= TX_BD_READY;
+ ethoc_write_bd(priv, entry, &bd);
+
+ if (priv->cur_tx == (priv->dty_tx + priv->num_tx)) {
+ dev_dbg(&dev->dev, "stopping queue\n");
+ netif_stop_queue(dev);
+ }
+
+ spin_unlock_irq(&priv->lock);
+ skb_tx_timestamp(skb);
+out:
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops ethoc_netdev_ops = {
+ .ndo_open = ethoc_open,
+ .ndo_stop = ethoc_stop,
+ .ndo_do_ioctl = ethoc_ioctl,
+ .ndo_set_config = ethoc_config,
+ .ndo_set_mac_address = ethoc_set_mac_address,
+ .ndo_set_rx_mode = ethoc_set_multicast_list,
+ .ndo_change_mtu = ethoc_change_mtu,
+ .ndo_tx_timeout = ethoc_tx_timeout,
+ .ndo_start_xmit = ethoc_start_xmit,
+};
+
+/**
+ * ethoc_probe() - initialize OpenCores ethernet MAC
+ * pdev: platform device
+ */
+static int __devinit ethoc_probe(struct platform_device *pdev)
+{
+ struct net_device *netdev = NULL;
+ struct resource *res = NULL;
+ struct resource *mmio = NULL;
+ struct resource *mem = NULL;
+ struct ethoc *priv = NULL;
+ unsigned int phy;
+ int num_bd;
+ int ret = 0;
+
+ /* allocate networking device */
+ netdev = alloc_etherdev(sizeof(struct ethoc));
+ if (!netdev) {
+ dev_err(&pdev->dev, "cannot allocate network device\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ platform_set_drvdata(pdev, netdev);
+
+ /* obtain I/O memory space */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot obtain I/O memory space\n");
+ ret = -ENXIO;
+ goto free;
+ }
+
+ mmio = devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), res->name);
+ if (!mmio) {
+ dev_err(&pdev->dev, "cannot request I/O memory space\n");
+ ret = -ENXIO;
+ goto free;
+ }
+
+ netdev->base_addr = mmio->start;
+
+ /* obtain buffer memory space */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ mem = devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), res->name);
+ if (!mem) {
+ dev_err(&pdev->dev, "cannot request memory space\n");
+ ret = -ENXIO;
+ goto free;
+ }
+
+ netdev->mem_start = mem->start;
+ netdev->mem_end = mem->end;
+ }
+
+
+ /* obtain device IRQ number */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot obtain IRQ\n");
+ ret = -ENXIO;
+ goto free;
+ }
+
+ netdev->irq = res->start;
+
+ /* setup driver-private data */
+ priv = netdev_priv(netdev);
+ priv->netdev = netdev;
+ priv->dma_alloc = 0;
+ priv->io_region_size = resource_size(mmio);
+
+ priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
+ resource_size(mmio));
+ if (!priv->iobase) {
+ dev_err(&pdev->dev, "cannot remap I/O memory space\n");
+ ret = -ENXIO;
+ goto error;
+ }
+
+ if (netdev->mem_end) {
+ priv->membase = devm_ioremap_nocache(&pdev->dev,
+ netdev->mem_start, resource_size(mem));
+ if (!priv->membase) {
+ dev_err(&pdev->dev, "cannot remap memory space\n");
+ ret = -ENXIO;
+ goto error;
+ }
+ } else {
+ /* Allocate buffer memory */
+ priv->membase = dmam_alloc_coherent(&pdev->dev,
+ buffer_size, (void *)&netdev->mem_start,
+ GFP_KERNEL);
+ if (!priv->membase) {
+ dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
+ buffer_size);
+ ret = -ENOMEM;
+ goto error;
+ }
+ netdev->mem_end = netdev->mem_start + buffer_size;
+ priv->dma_alloc = buffer_size;
+ }
+
+ /* calculate the number of TX/RX buffers, maximum 128 supported */
+ num_bd = min_t(unsigned int,
+ 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
+ if (num_bd < 4) {
+ ret = -ENODEV;
+ goto error;
+ }
+ /* num_tx must be a power of two */
+ priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
+ priv->num_rx = num_bd - priv->num_tx;
+
+ dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n",
+ priv->num_tx, priv->num_rx);
+
+ priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL);
+ if (!priv->vma) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Allow the platform setup code to pass in a MAC address. */
+ if (pdev->dev.platform_data) {
+ struct ethoc_platform_data *pdata = pdev->dev.platform_data;
+ memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
+ priv->phy_id = pdata->phy_id;
+ } else {
+ priv->phy_id = -1;
+
+#ifdef CONFIG_OF
+ {
+ const uint8_t* mac;
+
+ mac = of_get_property(pdev->dev.of_node,
+ "local-mac-address",
+ NULL);
+ if (mac)
+ memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
+ }
+#endif
+ }
+
+ /* Check that the given MAC address is valid. If it isn't, read the
+ * current MAC from the controller. */
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ ethoc_get_mac_address(netdev, netdev->dev_addr);
+
+ /* Check the MAC again for validity, if it still isn't choose and
+ * program a random one. */
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ random_ether_addr(netdev->dev_addr);
+
+ ethoc_set_mac_address(netdev, netdev->dev_addr);
+
+ /* register MII bus */
+ priv->mdio = mdiobus_alloc();
+ if (!priv->mdio) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ priv->mdio->name = "ethoc-mdio";
+ snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%d",
+ priv->mdio->name, pdev->id);
+ priv->mdio->read = ethoc_mdio_read;
+ priv->mdio->write = ethoc_mdio_write;
+ priv->mdio->reset = ethoc_mdio_reset;
+ priv->mdio->priv = priv;
+
+ priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!priv->mdio->irq) {
+ ret = -ENOMEM;
+ goto free_mdio;
+ }
+
+ for (phy = 0; phy < PHY_MAX_ADDR; phy++)
+ priv->mdio->irq[phy] = PHY_POLL;
+
+ ret = mdiobus_register(priv->mdio);
+ if (ret) {
+ dev_err(&netdev->dev, "failed to register MDIO bus\n");
+ goto free_mdio;
+ }
+
+ ret = ethoc_mdio_probe(netdev);
+ if (ret) {
+ dev_err(&netdev->dev, "failed to probe MDIO bus\n");
+ goto error;
+ }
+
+ ether_setup(netdev);
+
+ /* setup the net_device structure */
+ netdev->netdev_ops = &ethoc_netdev_ops;
+ netdev->watchdog_timeo = ETHOC_TIMEOUT;
+ netdev->features |= 0;
+
+ /* setup NAPI */
+ netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
+
+ spin_lock_init(&priv->lock);
+
+ ret = register_netdev(netdev);
+ if (ret < 0) {
+ dev_err(&netdev->dev, "failed to register interface\n");
+ goto error2;
+ }
+
+ goto out;
+
+error2:
+ netif_napi_del(&priv->napi);
+error:
+ mdiobus_unregister(priv->mdio);
+free_mdio:
+ kfree(priv->mdio->irq);
+ mdiobus_free(priv->mdio);
+free:
+ free_netdev(netdev);
+out:
+ return ret;
+}
+
+/**
+ * ethoc_remove() - shutdown OpenCores ethernet MAC
+ * @pdev: platform device
+ */
+static int __devexit ethoc_remove(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+ struct ethoc *priv = netdev_priv(netdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (netdev) {
+ netif_napi_del(&priv->napi);
+ phy_disconnect(priv->phy);
+ priv->phy = NULL;
+
+ if (priv->mdio) {
+ mdiobus_unregister(priv->mdio);
+ kfree(priv->mdio->irq);
+ mdiobus_free(priv->mdio);
+ }
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ethoc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return -ENOSYS;
+}
+
+static int ethoc_resume(struct platform_device *pdev)
+{
+ return -ENOSYS;
+}
+#else
+# define ethoc_suspend NULL
+# define ethoc_resume NULL
+#endif
+
+static struct of_device_id ethoc_match[] = {
+ { .compatible = "opencores,ethoc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ethoc_match);
+
+static struct platform_driver ethoc_driver = {
+ .probe = ethoc_probe,
+ .remove = __devexit_p(ethoc_remove),
+ .suspend = ethoc_suspend,
+ .resume = ethoc_resume,
+ .driver = {
+ .name = "ethoc",
+ .owner = THIS_MODULE,
+ .of_match_table = ethoc_match,
+ },
+};
+
+static int __init ethoc_init(void)
+{
+ return platform_driver_register(&ethoc_driver);
+}
+
+static void __exit ethoc_exit(void)
+{
+ platform_driver_unregister(&ethoc_driver);
+}
+
+module_init(ethoc_init);
+module_exit(ethoc_exit);
+
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_DESCRIPTION("OpenCores Ethernet MAC driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
new file mode 100644
index 000000000000..b8974b9e3b47
--- /dev/null
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -0,0 +1,40 @@
+#
+# Faraday device configuration
+#
+
+config NET_VENDOR_FARADAY
+ bool "Faraday devices"
+ default y
+ depends on ARM
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Faraday cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_FARADAY
+
+config FTMAC100
+ tristate "Faraday FTMAC100 10/100 Ethernet support"
+ depends on ARM
+ select NET_CORE
+ select MII
+ ---help---
+ This driver supports the FTMAC100 10/100 Ethernet controller
+ from Faraday. It is used on Faraday A320, Andes AG101 and some
+ other ARM/NDS32 SoC's.
+
+config FTGMAC100
+ tristate "Faraday FTGMAC100 Gigabit Ethernet support"
+ depends on ARM
+ select PHYLIB
+ ---help---
+ This driver supports the FTGMAC100 Gigabit Ethernet controller
+ from Faraday. It is used on Faraday A369, Andes AG102 and some
+ other ARM/NDS32 SoC's.
+
+endif # NET_VENDOR_FARADAY
diff --git a/drivers/net/ethernet/faraday/Makefile b/drivers/net/ethernet/faraday/Makefile
new file mode 100644
index 000000000000..408b53980d53
--- /dev/null
+++ b/drivers/net/ethernet/faraday/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Faraday device drivers.
+#
+
+obj-$(CONFIG_FTGMAC100) += ftgmac100.o
+obj-$(CONFIG_FTMAC100) += ftmac100.o
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
new file mode 100644
index 000000000000..54709af917e9
--- /dev/null
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -0,0 +1,1365 @@
+/*
+ * Faraday FTGMAC100 Gigabit Ethernet
+ *
+ * (C) Copyright 2009-2011 Faraday Technology
+ * Po-Yu Chuang <ratbert@faraday-tech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <net/ip.h>
+
+#include "ftgmac100.h"
+
+#define DRV_NAME "ftgmac100"
+#define DRV_VERSION "0.7"
+
+#define RX_QUEUE_ENTRIES 256 /* must be power of 2 */
+#define TX_QUEUE_ENTRIES 512 /* must be power of 2 */
+
+#define MAX_PKT_SIZE 1518
+#define RX_BUF_SIZE PAGE_SIZE /* must be smaller than 0x3fff */
+
+/******************************************************************************
+ * private data
+ *****************************************************************************/
+struct ftgmac100_descs {
+ struct ftgmac100_rxdes rxdes[RX_QUEUE_ENTRIES];
+ struct ftgmac100_txdes txdes[TX_QUEUE_ENTRIES];
+};
+
+struct ftgmac100 {
+ struct resource *res;
+ void __iomem *base;
+ int irq;
+
+ struct ftgmac100_descs *descs;
+ dma_addr_t descs_dma_addr;
+
+ unsigned int rx_pointer;
+ unsigned int tx_clean_pointer;
+ unsigned int tx_pointer;
+ unsigned int tx_pending;
+
+ spinlock_t tx_lock;
+
+ struct net_device *netdev;
+ struct device *dev;
+ struct napi_struct napi;
+
+ struct mii_bus *mii_bus;
+ int phy_irq[PHY_MAX_ADDR];
+ struct phy_device *phydev;
+ int old_speed;
+};
+
+static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes, gfp_t gfp);
+
+/******************************************************************************
+ * internal functions (hardware register access)
+ *****************************************************************************/
+#define INT_MASK_ALL_ENABLED (FTGMAC100_INT_RPKT_LOST | \
+ FTGMAC100_INT_XPKT_ETH | \
+ FTGMAC100_INT_XPKT_LOST | \
+ FTGMAC100_INT_AHB_ERR | \
+ FTGMAC100_INT_PHYSTS_CHG | \
+ FTGMAC100_INT_RPKT_BUF | \
+ FTGMAC100_INT_NO_RXBUF)
+
+static void ftgmac100_set_rx_ring_base(struct ftgmac100 *priv, dma_addr_t addr)
+{
+ iowrite32(addr, priv->base + FTGMAC100_OFFSET_RXR_BADR);
+}
+
+static void ftgmac100_set_rx_buffer_size(struct ftgmac100 *priv,
+ unsigned int size)
+{
+ size = FTGMAC100_RBSR_SIZE(size);
+ iowrite32(size, priv->base + FTGMAC100_OFFSET_RBSR);
+}
+
+static void ftgmac100_set_normal_prio_tx_ring_base(struct ftgmac100 *priv,
+ dma_addr_t addr)
+{
+ iowrite32(addr, priv->base + FTGMAC100_OFFSET_NPTXR_BADR);
+}
+
+static void ftgmac100_txdma_normal_prio_start_polling(struct ftgmac100 *priv)
+{
+ iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD);
+}
+
+static int ftgmac100_reset_hw(struct ftgmac100 *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ int i;
+
+ /* NOTE: reset clears all registers */
+ iowrite32(FTGMAC100_MACCR_SW_RST, priv->base + FTGMAC100_OFFSET_MACCR);
+ for (i = 0; i < 5; i++) {
+ unsigned int maccr;
+
+ maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
+ if (!(maccr & FTGMAC100_MACCR_SW_RST))
+ return 0;
+
+ udelay(1000);
+ }
+
+ netdev_err(netdev, "software reset failed\n");
+ return -EIO;
+}
+
+static void ftgmac100_set_mac(struct ftgmac100 *priv, const unsigned char *mac)
+{
+ unsigned int maddr = mac[0] << 8 | mac[1];
+ unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
+
+ iowrite32(maddr, priv->base + FTGMAC100_OFFSET_MAC_MADR);
+ iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR);
+}
+
+static void ftgmac100_init_hw(struct ftgmac100 *priv)
+{
+ /* setup ring buffer base registers */
+ ftgmac100_set_rx_ring_base(priv,
+ priv->descs_dma_addr +
+ offsetof(struct ftgmac100_descs, rxdes));
+ ftgmac100_set_normal_prio_tx_ring_base(priv,
+ priv->descs_dma_addr +
+ offsetof(struct ftgmac100_descs, txdes));
+
+ ftgmac100_set_rx_buffer_size(priv, RX_BUF_SIZE);
+
+ iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1), priv->base + FTGMAC100_OFFSET_APTC);
+
+ ftgmac100_set_mac(priv, priv->netdev->dev_addr);
+}
+
+#define MACCR_ENABLE_ALL (FTGMAC100_MACCR_TXDMA_EN | \
+ FTGMAC100_MACCR_RXDMA_EN | \
+ FTGMAC100_MACCR_TXMAC_EN | \
+ FTGMAC100_MACCR_RXMAC_EN | \
+ FTGMAC100_MACCR_FULLDUP | \
+ FTGMAC100_MACCR_CRC_APD | \
+ FTGMAC100_MACCR_RX_RUNT | \
+ FTGMAC100_MACCR_RX_BROADPKT)
+
+static void ftgmac100_start_hw(struct ftgmac100 *priv, int speed)
+{
+ int maccr = MACCR_ENABLE_ALL;
+
+ switch (speed) {
+ default:
+ case 10:
+ break;
+
+ case 100:
+ maccr |= FTGMAC100_MACCR_FAST_MODE;
+ break;
+
+ case 1000:
+ maccr |= FTGMAC100_MACCR_GIGA_MODE;
+ break;
+ }
+
+ iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
+}
+
+static void ftgmac100_stop_hw(struct ftgmac100 *priv)
+{
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR);
+}
+
+/******************************************************************************
+ * internal functions (receive descriptor)
+ *****************************************************************************/
+static bool ftgmac100_rxdes_first_segment(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_FRS);
+}
+
+static bool ftgmac100_rxdes_last_segment(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_LRS);
+}
+
+static bool ftgmac100_rxdes_packet_ready(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY);
+}
+
+static void ftgmac100_rxdes_set_dma_own(struct ftgmac100_rxdes *rxdes)
+{
+ /* clear status bits */
+ rxdes->rxdes0 &= cpu_to_le32(FTGMAC100_RXDES0_EDORR);
+}
+
+static bool ftgmac100_rxdes_rx_error(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RX_ERR);
+}
+
+static bool ftgmac100_rxdes_crc_error(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_CRC_ERR);
+}
+
+static bool ftgmac100_rxdes_frame_too_long(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_FTL);
+}
+
+static bool ftgmac100_rxdes_runt(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RUNT);
+}
+
+static bool ftgmac100_rxdes_odd_nibble(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RX_ODD_NB);
+}
+
+static unsigned int ftgmac100_rxdes_data_length(struct ftgmac100_rxdes *rxdes)
+{
+ return le32_to_cpu(rxdes->rxdes0) & FTGMAC100_RXDES0_VDBC;
+}
+
+static bool ftgmac100_rxdes_multicast(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_MULTICAST);
+}
+
+static void ftgmac100_rxdes_set_end_of_ring(struct ftgmac100_rxdes *rxdes)
+{
+ rxdes->rxdes0 |= cpu_to_le32(FTGMAC100_RXDES0_EDORR);
+}
+
+static void ftgmac100_rxdes_set_dma_addr(struct ftgmac100_rxdes *rxdes,
+ dma_addr_t addr)
+{
+ rxdes->rxdes3 = cpu_to_le32(addr);
+}
+
+static dma_addr_t ftgmac100_rxdes_get_dma_addr(struct ftgmac100_rxdes *rxdes)
+{
+ return le32_to_cpu(rxdes->rxdes3);
+}
+
+static bool ftgmac100_rxdes_is_tcp(struct ftgmac100_rxdes *rxdes)
+{
+ return (rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_PROT_MASK)) ==
+ cpu_to_le32(FTGMAC100_RXDES1_PROT_TCPIP);
+}
+
+static bool ftgmac100_rxdes_is_udp(struct ftgmac100_rxdes *rxdes)
+{
+ return (rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_PROT_MASK)) ==
+ cpu_to_le32(FTGMAC100_RXDES1_PROT_UDPIP);
+}
+
+static bool ftgmac100_rxdes_tcpcs_err(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_TCP_CHKSUM_ERR);
+}
+
+static bool ftgmac100_rxdes_udpcs_err(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_UDP_CHKSUM_ERR);
+}
+
+static bool ftgmac100_rxdes_ipcs_err(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_IP_CHKSUM_ERR);
+}
+
+/*
+ * rxdes2 is not used by hardware. We use it to keep track of page.
+ * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
+ */
+static void ftgmac100_rxdes_set_page(struct ftgmac100_rxdes *rxdes, struct page *page)
+{
+ rxdes->rxdes2 = (unsigned int)page;
+}
+
+static struct page *ftgmac100_rxdes_get_page(struct ftgmac100_rxdes *rxdes)
+{
+ return (struct page *)rxdes->rxdes2;
+}
+
+/******************************************************************************
+ * internal functions (receive)
+ *****************************************************************************/
+static int ftgmac100_next_rx_pointer(int pointer)
+{
+ return (pointer + 1) & (RX_QUEUE_ENTRIES - 1);
+}
+
+static void ftgmac100_rx_pointer_advance(struct ftgmac100 *priv)
+{
+ priv->rx_pointer = ftgmac100_next_rx_pointer(priv->rx_pointer);
+}
+
+static struct ftgmac100_rxdes *ftgmac100_current_rxdes(struct ftgmac100 *priv)
+{
+ return &priv->descs->rxdes[priv->rx_pointer];
+}
+
+static struct ftgmac100_rxdes *
+ftgmac100_rx_locate_first_segment(struct ftgmac100 *priv)
+{
+ struct ftgmac100_rxdes *rxdes = ftgmac100_current_rxdes(priv);
+
+ while (ftgmac100_rxdes_packet_ready(rxdes)) {
+ if (ftgmac100_rxdes_first_segment(rxdes))
+ return rxdes;
+
+ ftgmac100_rxdes_set_dma_own(rxdes);
+ ftgmac100_rx_pointer_advance(priv);
+ rxdes = ftgmac100_current_rxdes(priv);
+ }
+
+ return NULL;
+}
+
+static bool ftgmac100_rx_packet_error(struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes)
+{
+ struct net_device *netdev = priv->netdev;
+ bool error = false;
+
+ if (unlikely(ftgmac100_rxdes_rx_error(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx err\n");
+
+ netdev->stats.rx_errors++;
+ error = true;
+ }
+
+ if (unlikely(ftgmac100_rxdes_crc_error(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx crc err\n");
+
+ netdev->stats.rx_crc_errors++;
+ error = true;
+ } else if (unlikely(ftgmac100_rxdes_ipcs_err(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx IP checksum err\n");
+
+ error = true;
+ }
+
+ if (unlikely(ftgmac100_rxdes_frame_too_long(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx frame too long\n");
+
+ netdev->stats.rx_length_errors++;
+ error = true;
+ } else if (unlikely(ftgmac100_rxdes_runt(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx runt\n");
+
+ netdev->stats.rx_length_errors++;
+ error = true;
+ } else if (unlikely(ftgmac100_rxdes_odd_nibble(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx odd nibble\n");
+
+ netdev->stats.rx_length_errors++;
+ error = true;
+ }
+
+ return error;
+}
+
+static void ftgmac100_rx_drop_packet(struct ftgmac100 *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct ftgmac100_rxdes *rxdes = ftgmac100_current_rxdes(priv);
+ bool done = false;
+
+ if (net_ratelimit())
+ netdev_dbg(netdev, "drop packet %p\n", rxdes);
+
+ do {
+ if (ftgmac100_rxdes_last_segment(rxdes))
+ done = true;
+
+ ftgmac100_rxdes_set_dma_own(rxdes);
+ ftgmac100_rx_pointer_advance(priv);
+ rxdes = ftgmac100_current_rxdes(priv);
+ } while (!done && ftgmac100_rxdes_packet_ready(rxdes));
+
+ netdev->stats.rx_dropped++;
+}
+
+static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
+{
+ struct net_device *netdev = priv->netdev;
+ struct ftgmac100_rxdes *rxdes;
+ struct sk_buff *skb;
+ bool done = false;
+
+ rxdes = ftgmac100_rx_locate_first_segment(priv);
+ if (!rxdes)
+ return false;
+
+ if (unlikely(ftgmac100_rx_packet_error(priv, rxdes))) {
+ ftgmac100_rx_drop_packet(priv);
+ return true;
+ }
+
+ /* start processing */
+ skb = netdev_alloc_skb_ip_align(netdev, 128);
+ if (unlikely(!skb)) {
+ if (net_ratelimit())
+ netdev_err(netdev, "rx skb alloc failed\n");
+
+ ftgmac100_rx_drop_packet(priv);
+ return true;
+ }
+
+ if (unlikely(ftgmac100_rxdes_multicast(rxdes)))
+ netdev->stats.multicast++;
+
+ /*
+ * It seems that HW does checksum incorrectly with fragmented packets,
+ * so we are conservative here - if HW checksum error, let software do
+ * the checksum again.
+ */
+ if ((ftgmac100_rxdes_is_tcp(rxdes) && !ftgmac100_rxdes_tcpcs_err(rxdes)) ||
+ (ftgmac100_rxdes_is_udp(rxdes) && !ftgmac100_rxdes_udpcs_err(rxdes)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ do {
+ dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
+ struct page *page = ftgmac100_rxdes_get_page(rxdes);
+ unsigned int size;
+
+ dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+
+ size = ftgmac100_rxdes_data_length(rxdes);
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, 0, size);
+
+ skb->len += size;
+ skb->data_len += size;
+ skb->truesize += size;
+
+ if (ftgmac100_rxdes_last_segment(rxdes))
+ done = true;
+
+ ftgmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC);
+
+ ftgmac100_rx_pointer_advance(priv);
+ rxdes = ftgmac100_current_rxdes(priv);
+ } while (!done);
+
+ __pskb_pull_tail(skb, min(skb->len, 64U));
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += skb->len;
+
+ /* push packet to protocol stack */
+ napi_gro_receive(&priv->napi, skb);
+
+ (*processed)++;
+ return true;
+}
+
+/******************************************************************************
+ * internal functions (transmit descriptor)
+ *****************************************************************************/
+static void ftgmac100_txdes_reset(struct ftgmac100_txdes *txdes)
+{
+ /* clear all except end of ring bit */
+ txdes->txdes0 &= cpu_to_le32(FTGMAC100_TXDES0_EDOTR);
+ txdes->txdes1 = 0;
+ txdes->txdes2 = 0;
+ txdes->txdes3 = 0;
+}
+
+static bool ftgmac100_txdes_owned_by_dma(struct ftgmac100_txdes *txdes)
+{
+ return txdes->txdes0 & cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN);
+}
+
+static void ftgmac100_txdes_set_dma_own(struct ftgmac100_txdes *txdes)
+{
+ /*
+ * Make sure dma own bit will not be set before any other
+ * descriptor fields.
+ */
+ wmb();
+ txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN);
+}
+
+static void ftgmac100_txdes_set_end_of_ring(struct ftgmac100_txdes *txdes)
+{
+ txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_EDOTR);
+}
+
+static void ftgmac100_txdes_set_first_segment(struct ftgmac100_txdes *txdes)
+{
+ txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_FTS);
+}
+
+static void ftgmac100_txdes_set_last_segment(struct ftgmac100_txdes *txdes)
+{
+ txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_LTS);
+}
+
+static void ftgmac100_txdes_set_buffer_size(struct ftgmac100_txdes *txdes,
+ unsigned int len)
+{
+ txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXBUF_SIZE(len));
+}
+
+static void ftgmac100_txdes_set_txint(struct ftgmac100_txdes *txdes)
+{
+ txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_TXIC);
+}
+
+static void ftgmac100_txdes_set_tcpcs(struct ftgmac100_txdes *txdes)
+{
+ txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_TCP_CHKSUM);
+}
+
+static void ftgmac100_txdes_set_udpcs(struct ftgmac100_txdes *txdes)
+{
+ txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_UDP_CHKSUM);
+}
+
+static void ftgmac100_txdes_set_ipcs(struct ftgmac100_txdes *txdes)
+{
+ txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_IP_CHKSUM);
+}
+
+static void ftgmac100_txdes_set_dma_addr(struct ftgmac100_txdes *txdes,
+ dma_addr_t addr)
+{
+ txdes->txdes3 = cpu_to_le32(addr);
+}
+
+static dma_addr_t ftgmac100_txdes_get_dma_addr(struct ftgmac100_txdes *txdes)
+{
+ return le32_to_cpu(txdes->txdes3);
+}
+
+/*
+ * txdes2 is not used by hardware. We use it to keep track of socket buffer.
+ * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
+ */
+static void ftgmac100_txdes_set_skb(struct ftgmac100_txdes *txdes,
+ struct sk_buff *skb)
+{
+ txdes->txdes2 = (unsigned int)skb;
+}
+
+static struct sk_buff *ftgmac100_txdes_get_skb(struct ftgmac100_txdes *txdes)
+{
+ return (struct sk_buff *)txdes->txdes2;
+}
+
+/******************************************************************************
+ * internal functions (transmit)
+ *****************************************************************************/
+static int ftgmac100_next_tx_pointer(int pointer)
+{
+ return (pointer + 1) & (TX_QUEUE_ENTRIES - 1);
+}
+
+static void ftgmac100_tx_pointer_advance(struct ftgmac100 *priv)
+{
+ priv->tx_pointer = ftgmac100_next_tx_pointer(priv->tx_pointer);
+}
+
+static void ftgmac100_tx_clean_pointer_advance(struct ftgmac100 *priv)
+{
+ priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv->tx_clean_pointer);
+}
+
+static struct ftgmac100_txdes *ftgmac100_current_txdes(struct ftgmac100 *priv)
+{
+ return &priv->descs->txdes[priv->tx_pointer];
+}
+
+static struct ftgmac100_txdes *
+ftgmac100_current_clean_txdes(struct ftgmac100 *priv)
+{
+ return &priv->descs->txdes[priv->tx_clean_pointer];
+}
+
+static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct ftgmac100_txdes *txdes;
+ struct sk_buff *skb;
+ dma_addr_t map;
+
+ if (priv->tx_pending == 0)
+ return false;
+
+ txdes = ftgmac100_current_clean_txdes(priv);
+
+ if (ftgmac100_txdes_owned_by_dma(txdes))
+ return false;
+
+ skb = ftgmac100_txdes_get_skb(txdes);
+ map = ftgmac100_txdes_get_dma_addr(txdes);
+
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+
+ dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
+
+ dev_kfree_skb(skb);
+
+ ftgmac100_txdes_reset(txdes);
+
+ ftgmac100_tx_clean_pointer_advance(priv);
+
+ spin_lock(&priv->tx_lock);
+ priv->tx_pending--;
+ spin_unlock(&priv->tx_lock);
+ netif_wake_queue(netdev);
+
+ return true;
+}
+
+static void ftgmac100_tx_complete(struct ftgmac100 *priv)
+{
+ while (ftgmac100_tx_complete_packet(priv))
+ ;
+}
+
+static int ftgmac100_xmit(struct ftgmac100 *priv, struct sk_buff *skb,
+ dma_addr_t map)
+{
+ struct net_device *netdev = priv->netdev;
+ struct ftgmac100_txdes *txdes;
+ unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
+
+ txdes = ftgmac100_current_txdes(priv);
+ ftgmac100_tx_pointer_advance(priv);
+
+ /* setup TX descriptor */
+ ftgmac100_txdes_set_skb(txdes, skb);
+ ftgmac100_txdes_set_dma_addr(txdes, map);
+ ftgmac100_txdes_set_buffer_size(txdes, len);
+
+ ftgmac100_txdes_set_first_segment(txdes);
+ ftgmac100_txdes_set_last_segment(txdes);
+ ftgmac100_txdes_set_txint(txdes);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ __be16 protocol = skb->protocol;
+
+ if (protocol == cpu_to_be16(ETH_P_IP)) {
+ u8 ip_proto = ip_hdr(skb)->protocol;
+
+ ftgmac100_txdes_set_ipcs(txdes);
+ if (ip_proto == IPPROTO_TCP)
+ ftgmac100_txdes_set_tcpcs(txdes);
+ else if (ip_proto == IPPROTO_UDP)
+ ftgmac100_txdes_set_udpcs(txdes);
+ }
+ }
+
+ spin_lock(&priv->tx_lock);
+ priv->tx_pending++;
+ if (priv->tx_pending == TX_QUEUE_ENTRIES)
+ netif_stop_queue(netdev);
+
+ /* start transmit */
+ ftgmac100_txdes_set_dma_own(txdes);
+ spin_unlock(&priv->tx_lock);
+
+ ftgmac100_txdma_normal_prio_start_polling(priv);
+
+ return NETDEV_TX_OK;
+}
+
+/******************************************************************************
+ * internal functions (buffer)
+ *****************************************************************************/
+static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes, gfp_t gfp)
+{
+ struct net_device *netdev = priv->netdev;
+ struct page *page;
+ dma_addr_t map;
+
+ page = alloc_page(gfp);
+ if (!page) {
+ if (net_ratelimit())
+ netdev_err(netdev, "failed to allocate rx page\n");
+ return -ENOMEM;
+ }
+
+ map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, map))) {
+ if (net_ratelimit())
+ netdev_err(netdev, "failed to map rx page\n");
+ __free_page(page);
+ return -ENOMEM;
+ }
+
+ ftgmac100_rxdes_set_page(rxdes, page);
+ ftgmac100_rxdes_set_dma_addr(rxdes, map);
+ ftgmac100_rxdes_set_dma_own(rxdes);
+ return 0;
+}
+
+static void ftgmac100_free_buffers(struct ftgmac100 *priv)
+{
+ int i;
+
+ for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
+ struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
+ struct page *page = ftgmac100_rxdes_get_page(rxdes);
+ dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
+
+ if (!page)
+ continue;
+
+ dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+ __free_page(page);
+ }
+
+ for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
+ struct ftgmac100_txdes *txdes = &priv->descs->txdes[i];
+ struct sk_buff *skb = ftgmac100_txdes_get_skb(txdes);
+ dma_addr_t map = ftgmac100_txdes_get_dma_addr(txdes);
+
+ if (!skb)
+ continue;
+
+ dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
+ }
+
+ dma_free_coherent(priv->dev, sizeof(struct ftgmac100_descs),
+ priv->descs, priv->descs_dma_addr);
+}
+
+static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
+{
+ int i;
+
+ priv->descs = dma_alloc_coherent(priv->dev,
+ sizeof(struct ftgmac100_descs),
+ &priv->descs_dma_addr, GFP_KERNEL);
+ if (!priv->descs)
+ return -ENOMEM;
+
+ memset(priv->descs, 0, sizeof(struct ftgmac100_descs));
+
+ /* initialize RX ring */
+ ftgmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
+
+ for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
+ struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
+
+ if (ftgmac100_alloc_rx_page(priv, rxdes, GFP_KERNEL))
+ goto err;
+ }
+
+ /* initialize TX ring */
+ ftgmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
+ return 0;
+
+err:
+ ftgmac100_free_buffers(priv);
+ return -ENOMEM;
+}
+
+/******************************************************************************
+ * internal functions (mdio)
+ *****************************************************************************/
+static void ftgmac100_adjust_link(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct phy_device *phydev = priv->phydev;
+ int ier;
+
+ if (phydev->speed == priv->old_speed)
+ return;
+
+ priv->old_speed = phydev->speed;
+
+ ier = ioread32(priv->base + FTGMAC100_OFFSET_IER);
+
+ /* disable all interrupts */
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+
+ netif_stop_queue(netdev);
+ ftgmac100_stop_hw(priv);
+
+ netif_start_queue(netdev);
+ ftgmac100_init_hw(priv);
+ ftgmac100_start_hw(priv, phydev->speed);
+
+ /* re-enable interrupts */
+ iowrite32(ier, priv->base + FTGMAC100_OFFSET_IER);
+}
+
+static int ftgmac100_mii_probe(struct ftgmac100 *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct phy_device *phydev = NULL;
+ int i;
+
+ /* search for connect PHY device */
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ struct phy_device *tmp = priv->mii_bus->phy_map[i];
+
+ if (tmp) {
+ phydev = tmp;
+ break;
+ }
+ }
+
+ /* now we are supposed to have a proper phydev, to attach to... */
+ if (!phydev) {
+ netdev_info(netdev, "%s: no PHY found\n", netdev->name);
+ return -ENODEV;
+ }
+
+ phydev = phy_connect(netdev, dev_name(&phydev->dev),
+ &ftgmac100_adjust_link, 0,
+ PHY_INTERFACE_MODE_GMII);
+
+ if (IS_ERR(phydev)) {
+ netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
+ return PTR_ERR(phydev);
+ }
+
+ priv->phydev = phydev;
+ return 0;
+}
+
+/******************************************************************************
+ * struct mii_bus functions
+ *****************************************************************************/
+static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct net_device *netdev = bus->priv;
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ unsigned int phycr;
+ int i;
+
+ phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
+
+ /* preserve MDC cycle threshold */
+ phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
+
+ phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
+ FTGMAC100_PHYCR_REGAD(regnum) |
+ FTGMAC100_PHYCR_MIIRD;
+
+ iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
+
+ for (i = 0; i < 10; i++) {
+ phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
+
+ if ((phycr & FTGMAC100_PHYCR_MIIRD) == 0) {
+ int data;
+
+ data = ioread32(priv->base + FTGMAC100_OFFSET_PHYDATA);
+ return FTGMAC100_PHYDATA_MIIRDATA(data);
+ }
+
+ udelay(100);
+ }
+
+ netdev_err(netdev, "mdio read timed out\n");
+ return -EIO;
+}
+
+static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
+ int regnum, u16 value)
+{
+ struct net_device *netdev = bus->priv;
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ unsigned int phycr;
+ int data;
+ int i;
+
+ phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
+
+ /* preserve MDC cycle threshold */
+ phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
+
+ phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
+ FTGMAC100_PHYCR_REGAD(regnum) |
+ FTGMAC100_PHYCR_MIIWR;
+
+ data = FTGMAC100_PHYDATA_MIIWDATA(value);
+
+ iowrite32(data, priv->base + FTGMAC100_OFFSET_PHYDATA);
+ iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
+
+ for (i = 0; i < 10; i++) {
+ phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
+
+ if ((phycr & FTGMAC100_PHYCR_MIIWR) == 0)
+ return 0;
+
+ udelay(100);
+ }
+
+ netdev_err(netdev, "mdio write timed out\n");
+ return -EIO;
+}
+
+static int ftgmac100_mdiobus_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+/******************************************************************************
+ * struct ethtool_ops functions
+ *****************************************************************************/
+static void ftgmac100_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, dev_name(&netdev->dev));
+}
+
+static int ftgmac100_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
+ return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int ftgmac100_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
+ return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static const struct ethtool_ops ftgmac100_ethtool_ops = {
+ .set_settings = ftgmac100_set_settings,
+ .get_settings = ftgmac100_get_settings,
+ .get_drvinfo = ftgmac100_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+/******************************************************************************
+ * interrupt handler
+ *****************************************************************************/
+static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id)
+{
+ struct net_device *netdev = dev_id;
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
+ if (likely(netif_running(netdev))) {
+ /* Disable interrupts for polling */
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+ napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/******************************************************************************
+ * struct napi_struct functions
+ *****************************************************************************/
+static int ftgmac100_poll(struct napi_struct *napi, int budget)
+{
+ struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi);
+ struct net_device *netdev = priv->netdev;
+ unsigned int status;
+ bool completed = true;
+ int rx = 0;
+
+ status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
+ iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
+
+ if (status & (FTGMAC100_INT_RPKT_BUF | FTGMAC100_INT_NO_RXBUF)) {
+ /*
+ * FTGMAC100_INT_RPKT_BUF:
+ * RX DMA has received packets into RX buffer successfully
+ *
+ * FTGMAC100_INT_NO_RXBUF:
+ * RX buffer unavailable
+ */
+ bool retry;
+
+ do {
+ retry = ftgmac100_rx_packet(priv, &rx);
+ } while (retry && rx < budget);
+
+ if (retry && rx == budget)
+ completed = false;
+ }
+
+ if (status & (FTGMAC100_INT_XPKT_ETH | FTGMAC100_INT_XPKT_LOST)) {
+ /*
+ * FTGMAC100_INT_XPKT_ETH:
+ * packet transmitted to ethernet successfully
+ *
+ * FTGMAC100_INT_XPKT_LOST:
+ * packet transmitted to ethernet lost due to late
+ * collision or excessive collision
+ */
+ ftgmac100_tx_complete(priv);
+ }
+
+ if (status & (FTGMAC100_INT_NO_RXBUF | FTGMAC100_INT_RPKT_LOST |
+ FTGMAC100_INT_AHB_ERR | FTGMAC100_INT_PHYSTS_CHG)) {
+ if (net_ratelimit())
+ netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status,
+ status & FTGMAC100_INT_NO_RXBUF ? "NO_RXBUF " : "",
+ status & FTGMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
+ status & FTGMAC100_INT_AHB_ERR ? "AHB_ERR " : "",
+ status & FTGMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : "");
+
+ if (status & FTGMAC100_INT_NO_RXBUF) {
+ /* RX buffer unavailable */
+ netdev->stats.rx_over_errors++;
+ }
+
+ if (status & FTGMAC100_INT_RPKT_LOST) {
+ /* received packet lost due to RX FIFO full */
+ netdev->stats.rx_fifo_errors++;
+ }
+ }
+
+ if (completed) {
+ napi_complete(napi);
+
+ /* enable all interrupts */
+ iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTGMAC100_OFFSET_IER);
+ }
+
+ return rx;
+}
+
+/******************************************************************************
+ * struct net_device_ops functions
+ *****************************************************************************/
+static int ftgmac100_open(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ int err;
+
+ err = ftgmac100_alloc_buffers(priv);
+ if (err) {
+ netdev_err(netdev, "failed to allocate buffers\n");
+ goto err_alloc;
+ }
+
+ err = request_irq(priv->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
+ if (err) {
+ netdev_err(netdev, "failed to request irq %d\n", priv->irq);
+ goto err_irq;
+ }
+
+ priv->rx_pointer = 0;
+ priv->tx_clean_pointer = 0;
+ priv->tx_pointer = 0;
+ priv->tx_pending = 0;
+
+ err = ftgmac100_reset_hw(priv);
+ if (err)
+ goto err_hw;
+
+ ftgmac100_init_hw(priv);
+ ftgmac100_start_hw(priv, 10);
+
+ phy_start(priv->phydev);
+
+ napi_enable(&priv->napi);
+ netif_start_queue(netdev);
+
+ /* enable all interrupts */
+ iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTGMAC100_OFFSET_IER);
+ return 0;
+
+err_hw:
+ free_irq(priv->irq, netdev);
+err_irq:
+ ftgmac100_free_buffers(priv);
+err_alloc:
+ return err;
+}
+
+static int ftgmac100_stop(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
+ /* disable all interrupts */
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+
+ netif_stop_queue(netdev);
+ napi_disable(&priv->napi);
+ phy_stop(priv->phydev);
+
+ ftgmac100_stop_hw(priv);
+ free_irq(priv->irq, netdev);
+ ftgmac100_free_buffers(priv);
+
+ return 0;
+}
+
+static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ dma_addr_t map;
+
+ if (unlikely(skb->len > MAX_PKT_SIZE)) {
+ if (net_ratelimit())
+ netdev_dbg(netdev, "tx packet too big\n");
+
+ netdev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, map))) {
+ /* drop packet */
+ if (net_ratelimit())
+ netdev_err(netdev, "map socket buffer failed\n");
+
+ netdev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ return ftgmac100_xmit(priv, skb, map);
+}
+
+/* optional */
+static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
+ return phy_mii_ioctl(priv->phydev, ifr, cmd);
+}
+
+static const struct net_device_ops ftgmac100_netdev_ops = {
+ .ndo_open = ftgmac100_open,
+ .ndo_stop = ftgmac100_stop,
+ .ndo_start_xmit = ftgmac100_hard_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = ftgmac100_do_ioctl,
+};
+
+/******************************************************************************
+ * struct platform_driver functions
+ *****************************************************************************/
+static int ftgmac100_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int irq;
+ struct net_device *netdev;
+ struct ftgmac100 *priv;
+ int err;
+ int i;
+
+ if (!pdev)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENXIO;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ /* setup net_device */
+ netdev = alloc_etherdev(sizeof(*priv));
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ SET_ETHTOOL_OPS(netdev, &ftgmac100_ethtool_ops);
+ netdev->netdev_ops = &ftgmac100_netdev_ops;
+ netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO;
+
+ platform_set_drvdata(pdev, netdev);
+
+ /* setup private data */
+ priv = netdev_priv(netdev);
+ priv->netdev = netdev;
+ priv->dev = &pdev->dev;
+
+ spin_lock_init(&priv->tx_lock);
+
+ /* initialize NAPI */
+ netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
+
+ /* map io memory */
+ priv->res = request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev));
+ if (!priv->res) {
+ dev_err(&pdev->dev, "Could not reserve memory region\n");
+ err = -ENOMEM;
+ goto err_req_mem;
+ }
+
+ priv->base = ioremap(res->start, resource_size(res));
+ if (!priv->base) {
+ dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ priv->irq = irq;
+
+ /* initialize mdio bus */
+ priv->mii_bus = mdiobus_alloc();
+ if (!priv->mii_bus) {
+ err = -EIO;
+ goto err_alloc_mdiobus;
+ }
+
+ priv->mii_bus->name = "ftgmac100_mdio";
+ snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "ftgmac100_mii");
+
+ priv->mii_bus->priv = netdev;
+ priv->mii_bus->read = ftgmac100_mdiobus_read;
+ priv->mii_bus->write = ftgmac100_mdiobus_write;
+ priv->mii_bus->reset = ftgmac100_mdiobus_reset;
+ priv->mii_bus->irq = priv->phy_irq;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ priv->mii_bus->irq[i] = PHY_POLL;
+
+ err = mdiobus_register(priv->mii_bus);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
+ goto err_register_mdiobus;
+ }
+
+ err = ftgmac100_mii_probe(priv);
+ if (err) {
+ dev_err(&pdev->dev, "MII Probe failed!\n");
+ goto err_mii_probe;
+ }
+
+ /* register network device */
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register netdev\n");
+ goto err_register_netdev;
+ }
+
+ netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base);
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ random_ether_addr(netdev->dev_addr);
+ netdev_info(netdev, "generated random MAC address %pM\n",
+ netdev->dev_addr);
+ }
+
+ return 0;
+
+err_register_netdev:
+ phy_disconnect(priv->phydev);
+err_mii_probe:
+ mdiobus_unregister(priv->mii_bus);
+err_register_mdiobus:
+ mdiobus_free(priv->mii_bus);
+err_alloc_mdiobus:
+ iounmap(priv->base);
+err_ioremap:
+ release_resource(priv->res);
+err_req_mem:
+ netif_napi_del(&priv->napi);
+ platform_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+err_alloc_etherdev:
+ return err;
+}
+
+static int __exit ftgmac100_remove(struct platform_device *pdev)
+{
+ struct net_device *netdev;
+ struct ftgmac100 *priv;
+
+ netdev = platform_get_drvdata(pdev);
+ priv = netdev_priv(netdev);
+
+ unregister_netdev(netdev);
+
+ phy_disconnect(priv->phydev);
+ mdiobus_unregister(priv->mii_bus);
+ mdiobus_free(priv->mii_bus);
+
+ iounmap(priv->base);
+ release_resource(priv->res);
+
+ netif_napi_del(&priv->napi);
+ platform_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+ return 0;
+}
+
+static struct platform_driver ftgmac100_driver = {
+ .probe = ftgmac100_probe,
+ .remove = __exit_p(ftgmac100_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+/******************************************************************************
+ * initialization / finalization
+ *****************************************************************************/
+static int __init ftgmac100_init(void)
+{
+ pr_info("Loading version " DRV_VERSION " ...\n");
+ return platform_driver_register(&ftgmac100_driver);
+}
+
+static void __exit ftgmac100_exit(void)
+{
+ platform_driver_unregister(&ftgmac100_driver);
+}
+
+module_init(ftgmac100_init);
+module_exit(ftgmac100_exit);
+
+MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
+MODULE_DESCRIPTION("FTGMAC100 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
new file mode 100644
index 000000000000..13408d448b05
--- /dev/null
+++ b/drivers/net/ethernet/faraday/ftgmac100.h
@@ -0,0 +1,246 @@
+/*
+ * Faraday FTGMAC100 Gigabit Ethernet
+ *
+ * (C) Copyright 2009-2011 Faraday Technology
+ * Po-Yu Chuang <ratbert@faraday-tech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __FTGMAC100_H
+#define __FTGMAC100_H
+
+#define FTGMAC100_OFFSET_ISR 0x00
+#define FTGMAC100_OFFSET_IER 0x04
+#define FTGMAC100_OFFSET_MAC_MADR 0x08
+#define FTGMAC100_OFFSET_MAC_LADR 0x0c
+#define FTGMAC100_OFFSET_MAHT0 0x10
+#define FTGMAC100_OFFSET_MAHT1 0x14
+#define FTGMAC100_OFFSET_NPTXPD 0x18
+#define FTGMAC100_OFFSET_RXPD 0x1c
+#define FTGMAC100_OFFSET_NPTXR_BADR 0x20
+#define FTGMAC100_OFFSET_RXR_BADR 0x24
+#define FTGMAC100_OFFSET_HPTXPD 0x28
+#define FTGMAC100_OFFSET_HPTXR_BADR 0x2c
+#define FTGMAC100_OFFSET_ITC 0x30
+#define FTGMAC100_OFFSET_APTC 0x34
+#define FTGMAC100_OFFSET_DBLAC 0x38
+#define FTGMAC100_OFFSET_DMAFIFOS 0x3c
+#define FTGMAC100_OFFSET_REVR 0x40
+#define FTGMAC100_OFFSET_FEAR 0x44
+#define FTGMAC100_OFFSET_TPAFCR 0x48
+#define FTGMAC100_OFFSET_RBSR 0x4c
+#define FTGMAC100_OFFSET_MACCR 0x50
+#define FTGMAC100_OFFSET_MACSR 0x54
+#define FTGMAC100_OFFSET_TM 0x58
+#define FTGMAC100_OFFSET_PHYCR 0x60
+#define FTGMAC100_OFFSET_PHYDATA 0x64
+#define FTGMAC100_OFFSET_FCR 0x68
+#define FTGMAC100_OFFSET_BPR 0x6c
+#define FTGMAC100_OFFSET_WOLCR 0x70
+#define FTGMAC100_OFFSET_WOLSR 0x74
+#define FTGMAC100_OFFSET_WFCRC 0x78
+#define FTGMAC100_OFFSET_WFBM1 0x80
+#define FTGMAC100_OFFSET_WFBM2 0x84
+#define FTGMAC100_OFFSET_WFBM3 0x88
+#define FTGMAC100_OFFSET_WFBM4 0x8c
+#define FTGMAC100_OFFSET_NPTXR_PTR 0x90
+#define FTGMAC100_OFFSET_HPTXR_PTR 0x94
+#define FTGMAC100_OFFSET_RXR_PTR 0x98
+#define FTGMAC100_OFFSET_TX 0xa0
+#define FTGMAC100_OFFSET_TX_MCOL_SCOL 0xa4
+#define FTGMAC100_OFFSET_TX_ECOL_FAIL 0xa8
+#define FTGMAC100_OFFSET_TX_LCOL_UND 0xac
+#define FTGMAC100_OFFSET_RX 0xb0
+#define FTGMAC100_OFFSET_RX_BC 0xb4
+#define FTGMAC100_OFFSET_RX_MC 0xb8
+#define FTGMAC100_OFFSET_RX_PF_AEP 0xbc
+#define FTGMAC100_OFFSET_RX_RUNT 0xc0
+#define FTGMAC100_OFFSET_RX_CRCER_FTL 0xc4
+#define FTGMAC100_OFFSET_RX_COL_LOST 0xc8
+
+/*
+ * Interrupt status register & interrupt enable register
+ */
+#define FTGMAC100_INT_RPKT_BUF (1 << 0)
+#define FTGMAC100_INT_RPKT_FIFO (1 << 1)
+#define FTGMAC100_INT_NO_RXBUF (1 << 2)
+#define FTGMAC100_INT_RPKT_LOST (1 << 3)
+#define FTGMAC100_INT_XPKT_ETH (1 << 4)
+#define FTGMAC100_INT_XPKT_FIFO (1 << 5)
+#define FTGMAC100_INT_NO_NPTXBUF (1 << 6)
+#define FTGMAC100_INT_XPKT_LOST (1 << 7)
+#define FTGMAC100_INT_AHB_ERR (1 << 8)
+#define FTGMAC100_INT_PHYSTS_CHG (1 << 9)
+#define FTGMAC100_INT_NO_HPTXBUF (1 << 10)
+
+/*
+ * Interrupt timer control register
+ */
+#define FTGMAC100_ITC_RXINT_CNT(x) (((x) & 0xf) << 0)
+#define FTGMAC100_ITC_RXINT_THR(x) (((x) & 0x7) << 4)
+#define FTGMAC100_ITC_RXINT_TIME_SEL (1 << 7)
+#define FTGMAC100_ITC_TXINT_CNT(x) (((x) & 0xf) << 8)
+#define FTGMAC100_ITC_TXINT_THR(x) (((x) & 0x7) << 12)
+#define FTGMAC100_ITC_TXINT_TIME_SEL (1 << 15)
+
+/*
+ * Automatic polling timer control register
+ */
+#define FTGMAC100_APTC_RXPOLL_CNT(x) (((x) & 0xf) << 0)
+#define FTGMAC100_APTC_RXPOLL_TIME_SEL (1 << 4)
+#define FTGMAC100_APTC_TXPOLL_CNT(x) (((x) & 0xf) << 8)
+#define FTGMAC100_APTC_TXPOLL_TIME_SEL (1 << 12)
+
+/*
+ * DMA burst length and arbitration control register
+ */
+#define FTGMAC100_DBLAC_RXFIFO_LTHR(x) (((x) & 0x7) << 0)
+#define FTGMAC100_DBLAC_RXFIFO_HTHR(x) (((x) & 0x7) << 3)
+#define FTGMAC100_DBLAC_RX_THR_EN (1 << 6)
+#define FTGMAC100_DBLAC_RXBURST_SIZE(x) (((x) & 0x3) << 8)
+#define FTGMAC100_DBLAC_TXBURST_SIZE(x) (((x) & 0x3) << 10)
+#define FTGMAC100_DBLAC_RXDES_SIZE(x) (((x) & 0xf) << 12)
+#define FTGMAC100_DBLAC_TXDES_SIZE(x) (((x) & 0xf) << 16)
+#define FTGMAC100_DBLAC_IFG_CNT(x) (((x) & 0x7) << 20)
+#define FTGMAC100_DBLAC_IFG_INC (1 << 23)
+
+/*
+ * DMA FIFO status register
+ */
+#define FTGMAC100_DMAFIFOS_RXDMA1_SM(dmafifos) ((dmafifos) & 0xf)
+#define FTGMAC100_DMAFIFOS_RXDMA2_SM(dmafifos) (((dmafifos) >> 4) & 0xf)
+#define FTGMAC100_DMAFIFOS_RXDMA3_SM(dmafifos) (((dmafifos) >> 8) & 0x7)
+#define FTGMAC100_DMAFIFOS_TXDMA1_SM(dmafifos) (((dmafifos) >> 12) & 0xf)
+#define FTGMAC100_DMAFIFOS_TXDMA2_SM(dmafifos) (((dmafifos) >> 16) & 0x3)
+#define FTGMAC100_DMAFIFOS_TXDMA3_SM(dmafifos) (((dmafifos) >> 18) & 0xf)
+#define FTGMAC100_DMAFIFOS_RXFIFO_EMPTY (1 << 26)
+#define FTGMAC100_DMAFIFOS_TXFIFO_EMPTY (1 << 27)
+#define FTGMAC100_DMAFIFOS_RXDMA_GRANT (1 << 28)
+#define FTGMAC100_DMAFIFOS_TXDMA_GRANT (1 << 29)
+#define FTGMAC100_DMAFIFOS_RXDMA_REQ (1 << 30)
+#define FTGMAC100_DMAFIFOS_TXDMA_REQ (1 << 31)
+
+/*
+ * Receive buffer size register
+ */
+#define FTGMAC100_RBSR_SIZE(x) ((x) & 0x3fff)
+
+/*
+ * MAC control register
+ */
+#define FTGMAC100_MACCR_TXDMA_EN (1 << 0)
+#define FTGMAC100_MACCR_RXDMA_EN (1 << 1)
+#define FTGMAC100_MACCR_TXMAC_EN (1 << 2)
+#define FTGMAC100_MACCR_RXMAC_EN (1 << 3)
+#define FTGMAC100_MACCR_RM_VLAN (1 << 4)
+#define FTGMAC100_MACCR_HPTXR_EN (1 << 5)
+#define FTGMAC100_MACCR_LOOP_EN (1 << 6)
+#define FTGMAC100_MACCR_ENRX_IN_HALFTX (1 << 7)
+#define FTGMAC100_MACCR_FULLDUP (1 << 8)
+#define FTGMAC100_MACCR_GIGA_MODE (1 << 9)
+#define FTGMAC100_MACCR_CRC_APD (1 << 10)
+#define FTGMAC100_MACCR_RX_RUNT (1 << 12)
+#define FTGMAC100_MACCR_JUMBO_LF (1 << 13)
+#define FTGMAC100_MACCR_RX_ALL (1 << 14)
+#define FTGMAC100_MACCR_HT_MULTI_EN (1 << 15)
+#define FTGMAC100_MACCR_RX_MULTIPKT (1 << 16)
+#define FTGMAC100_MACCR_RX_BROADPKT (1 << 17)
+#define FTGMAC100_MACCR_DISCARD_CRCERR (1 << 18)
+#define FTGMAC100_MACCR_FAST_MODE (1 << 19)
+#define FTGMAC100_MACCR_SW_RST (1 << 31)
+
+/*
+ * PHY control register
+ */
+#define FTGMAC100_PHYCR_MDC_CYCTHR_MASK 0x3f
+#define FTGMAC100_PHYCR_MDC_CYCTHR(x) ((x) & 0x3f)
+#define FTGMAC100_PHYCR_PHYAD(x) (((x) & 0x1f) << 16)
+#define FTGMAC100_PHYCR_REGAD(x) (((x) & 0x1f) << 21)
+#define FTGMAC100_PHYCR_MIIRD (1 << 26)
+#define FTGMAC100_PHYCR_MIIWR (1 << 27)
+
+/*
+ * PHY data register
+ */
+#define FTGMAC100_PHYDATA_MIIWDATA(x) ((x) & 0xffff)
+#define FTGMAC100_PHYDATA_MIIRDATA(phydata) (((phydata) >> 16) & 0xffff)
+
+/*
+ * Transmit descriptor, aligned to 16 bytes
+ */
+struct ftgmac100_txdes {
+ unsigned int txdes0;
+ unsigned int txdes1;
+ unsigned int txdes2; /* not used by HW */
+ unsigned int txdes3; /* TXBUF_BADR */
+} __attribute__ ((aligned(16)));
+
+#define FTGMAC100_TXDES0_TXBUF_SIZE(x) ((x) & 0x3fff)
+#define FTGMAC100_TXDES0_EDOTR (1 << 15)
+#define FTGMAC100_TXDES0_CRC_ERR (1 << 19)
+#define FTGMAC100_TXDES0_LTS (1 << 28)
+#define FTGMAC100_TXDES0_FTS (1 << 29)
+#define FTGMAC100_TXDES0_TXDMA_OWN (1 << 31)
+
+#define FTGMAC100_TXDES1_VLANTAG_CI(x) ((x) & 0xffff)
+#define FTGMAC100_TXDES1_INS_VLANTAG (1 << 16)
+#define FTGMAC100_TXDES1_TCP_CHKSUM (1 << 17)
+#define FTGMAC100_TXDES1_UDP_CHKSUM (1 << 18)
+#define FTGMAC100_TXDES1_IP_CHKSUM (1 << 19)
+#define FTGMAC100_TXDES1_LLC (1 << 22)
+#define FTGMAC100_TXDES1_TX2FIC (1 << 30)
+#define FTGMAC100_TXDES1_TXIC (1 << 31)
+
+/*
+ * Receive descriptor, aligned to 16 bytes
+ */
+struct ftgmac100_rxdes {
+ unsigned int rxdes0;
+ unsigned int rxdes1;
+ unsigned int rxdes2; /* not used by HW */
+ unsigned int rxdes3; /* RXBUF_BADR */
+} __attribute__ ((aligned(16)));
+
+#define FTGMAC100_RXDES0_VDBC 0x3fff
+#define FTGMAC100_RXDES0_EDORR (1 << 15)
+#define FTGMAC100_RXDES0_MULTICAST (1 << 16)
+#define FTGMAC100_RXDES0_BROADCAST (1 << 17)
+#define FTGMAC100_RXDES0_RX_ERR (1 << 18)
+#define FTGMAC100_RXDES0_CRC_ERR (1 << 19)
+#define FTGMAC100_RXDES0_FTL (1 << 20)
+#define FTGMAC100_RXDES0_RUNT (1 << 21)
+#define FTGMAC100_RXDES0_RX_ODD_NB (1 << 22)
+#define FTGMAC100_RXDES0_FIFO_FULL (1 << 23)
+#define FTGMAC100_RXDES0_PAUSE_OPCODE (1 << 24)
+#define FTGMAC100_RXDES0_PAUSE_FRAME (1 << 25)
+#define FTGMAC100_RXDES0_LRS (1 << 28)
+#define FTGMAC100_RXDES0_FRS (1 << 29)
+#define FTGMAC100_RXDES0_RXPKT_RDY (1 << 31)
+
+#define FTGMAC100_RXDES1_VLANTAG_CI 0xffff
+#define FTGMAC100_RXDES1_PROT_MASK (0x3 << 20)
+#define FTGMAC100_RXDES1_PROT_NONIP (0x0 << 20)
+#define FTGMAC100_RXDES1_PROT_IP (0x1 << 20)
+#define FTGMAC100_RXDES1_PROT_TCPIP (0x2 << 20)
+#define FTGMAC100_RXDES1_PROT_UDPIP (0x3 << 20)
+#define FTGMAC100_RXDES1_LLC (1 << 22)
+#define FTGMAC100_RXDES1_DF (1 << 23)
+#define FTGMAC100_RXDES1_VLANTAG_AVAIL (1 << 24)
+#define FTGMAC100_RXDES1_TCP_CHKSUM_ERR (1 << 25)
+#define FTGMAC100_RXDES1_UDP_CHKSUM_ERR (1 << 26)
+#define FTGMAC100_RXDES1_IP_CHKSUM_ERR (1 << 27)
+
+#endif /* __FTGMAC100_H */
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
new file mode 100644
index 000000000000..9bd7746cbfcf
--- /dev/null
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -0,0 +1,1198 @@
+/*
+ * Faraday FTMAC100 10/100 Ethernet
+ *
+ * (C) Copyright 2009-2011 Faraday Technology
+ * Po-Yu Chuang <ratbert@faraday-tech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "ftmac100.h"
+
+#define DRV_NAME "ftmac100"
+#define DRV_VERSION "0.2"
+
+#define RX_QUEUE_ENTRIES 128 /* must be power of 2 */
+#define TX_QUEUE_ENTRIES 16 /* must be power of 2 */
+
+#define MAX_PKT_SIZE 1518
+#define RX_BUF_SIZE 2044 /* must be smaller than 0x7ff */
+
+#if MAX_PKT_SIZE > 0x7ff
+#error invalid MAX_PKT_SIZE
+#endif
+
+#if RX_BUF_SIZE > 0x7ff || RX_BUF_SIZE > PAGE_SIZE
+#error invalid RX_BUF_SIZE
+#endif
+
+/******************************************************************************
+ * private data
+ *****************************************************************************/
+struct ftmac100_descs {
+ struct ftmac100_rxdes rxdes[RX_QUEUE_ENTRIES];
+ struct ftmac100_txdes txdes[TX_QUEUE_ENTRIES];
+};
+
+struct ftmac100 {
+ struct resource *res;
+ void __iomem *base;
+ int irq;
+
+ struct ftmac100_descs *descs;
+ dma_addr_t descs_dma_addr;
+
+ unsigned int rx_pointer;
+ unsigned int tx_clean_pointer;
+ unsigned int tx_pointer;
+ unsigned int tx_pending;
+
+ spinlock_t tx_lock;
+
+ struct net_device *netdev;
+ struct device *dev;
+ struct napi_struct napi;
+
+ struct mii_if_info mii;
+};
+
+static int ftmac100_alloc_rx_page(struct ftmac100 *priv,
+ struct ftmac100_rxdes *rxdes, gfp_t gfp);
+
+/******************************************************************************
+ * internal functions (hardware register access)
+ *****************************************************************************/
+#define INT_MASK_ALL_ENABLED (FTMAC100_INT_RPKT_FINISH | \
+ FTMAC100_INT_NORXBUF | \
+ FTMAC100_INT_XPKT_OK | \
+ FTMAC100_INT_XPKT_LOST | \
+ FTMAC100_INT_RPKT_LOST | \
+ FTMAC100_INT_AHB_ERR | \
+ FTMAC100_INT_PHYSTS_CHG)
+
+#define INT_MASK_ALL_DISABLED 0
+
+static void ftmac100_enable_all_int(struct ftmac100 *priv)
+{
+ iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTMAC100_OFFSET_IMR);
+}
+
+static void ftmac100_disable_all_int(struct ftmac100 *priv)
+{
+ iowrite32(INT_MASK_ALL_DISABLED, priv->base + FTMAC100_OFFSET_IMR);
+}
+
+static void ftmac100_set_rx_ring_base(struct ftmac100 *priv, dma_addr_t addr)
+{
+ iowrite32(addr, priv->base + FTMAC100_OFFSET_RXR_BADR);
+}
+
+static void ftmac100_set_tx_ring_base(struct ftmac100 *priv, dma_addr_t addr)
+{
+ iowrite32(addr, priv->base + FTMAC100_OFFSET_TXR_BADR);
+}
+
+static void ftmac100_txdma_start_polling(struct ftmac100 *priv)
+{
+ iowrite32(1, priv->base + FTMAC100_OFFSET_TXPD);
+}
+
+static int ftmac100_reset(struct ftmac100 *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ int i;
+
+ /* NOTE: reset clears all registers */
+ iowrite32(FTMAC100_MACCR_SW_RST, priv->base + FTMAC100_OFFSET_MACCR);
+
+ for (i = 0; i < 5; i++) {
+ unsigned int maccr;
+
+ maccr = ioread32(priv->base + FTMAC100_OFFSET_MACCR);
+ if (!(maccr & FTMAC100_MACCR_SW_RST)) {
+ /*
+ * FTMAC100_MACCR_SW_RST cleared does not indicate
+ * that hardware reset completed (what the f*ck).
+ * We still need to wait for a while.
+ */
+ udelay(500);
+ return 0;
+ }
+
+ udelay(1000);
+ }
+
+ netdev_err(netdev, "software reset failed\n");
+ return -EIO;
+}
+
+static void ftmac100_set_mac(struct ftmac100 *priv, const unsigned char *mac)
+{
+ unsigned int maddr = mac[0] << 8 | mac[1];
+ unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
+
+ iowrite32(maddr, priv->base + FTMAC100_OFFSET_MAC_MADR);
+ iowrite32(laddr, priv->base + FTMAC100_OFFSET_MAC_LADR);
+}
+
+#define MACCR_ENABLE_ALL (FTMAC100_MACCR_XMT_EN | \
+ FTMAC100_MACCR_RCV_EN | \
+ FTMAC100_MACCR_XDMA_EN | \
+ FTMAC100_MACCR_RDMA_EN | \
+ FTMAC100_MACCR_CRC_APD | \
+ FTMAC100_MACCR_FULLDUP | \
+ FTMAC100_MACCR_RX_RUNT | \
+ FTMAC100_MACCR_RX_BROADPKT)
+
+static int ftmac100_start_hw(struct ftmac100 *priv)
+{
+ struct net_device *netdev = priv->netdev;
+
+ if (ftmac100_reset(priv))
+ return -EIO;
+
+ /* setup ring buffer base registers */
+ ftmac100_set_rx_ring_base(priv,
+ priv->descs_dma_addr +
+ offsetof(struct ftmac100_descs, rxdes));
+ ftmac100_set_tx_ring_base(priv,
+ priv->descs_dma_addr +
+ offsetof(struct ftmac100_descs, txdes));
+
+ iowrite32(FTMAC100_APTC_RXPOLL_CNT(1), priv->base + FTMAC100_OFFSET_APTC);
+
+ ftmac100_set_mac(priv, netdev->dev_addr);
+
+ iowrite32(MACCR_ENABLE_ALL, priv->base + FTMAC100_OFFSET_MACCR);
+ return 0;
+}
+
+static void ftmac100_stop_hw(struct ftmac100 *priv)
+{
+ iowrite32(0, priv->base + FTMAC100_OFFSET_MACCR);
+}
+
+/******************************************************************************
+ * internal functions (receive descriptor)
+ *****************************************************************************/
+static bool ftmac100_rxdes_first_segment(struct ftmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FRS);
+}
+
+static bool ftmac100_rxdes_last_segment(struct ftmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_LRS);
+}
+
+static bool ftmac100_rxdes_owned_by_dma(struct ftmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN);
+}
+
+static void ftmac100_rxdes_set_dma_own(struct ftmac100_rxdes *rxdes)
+{
+ /* clear status bits */
+ rxdes->rxdes0 = cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN);
+}
+
+static bool ftmac100_rxdes_rx_error(struct ftmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ERR);
+}
+
+static bool ftmac100_rxdes_crc_error(struct ftmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_CRC_ERR);
+}
+
+static bool ftmac100_rxdes_frame_too_long(struct ftmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FTL);
+}
+
+static bool ftmac100_rxdes_runt(struct ftmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RUNT);
+}
+
+static bool ftmac100_rxdes_odd_nibble(struct ftmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ODD_NB);
+}
+
+static unsigned int ftmac100_rxdes_frame_length(struct ftmac100_rxdes *rxdes)
+{
+ return le32_to_cpu(rxdes->rxdes0) & FTMAC100_RXDES0_RFL;
+}
+
+static bool ftmac100_rxdes_multicast(struct ftmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_MULTICAST);
+}
+
+static void ftmac100_rxdes_set_buffer_size(struct ftmac100_rxdes *rxdes,
+ unsigned int size)
+{
+ rxdes->rxdes1 &= cpu_to_le32(FTMAC100_RXDES1_EDORR);
+ rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_RXBUF_SIZE(size));
+}
+
+static void ftmac100_rxdes_set_end_of_ring(struct ftmac100_rxdes *rxdes)
+{
+ rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_EDORR);
+}
+
+static void ftmac100_rxdes_set_dma_addr(struct ftmac100_rxdes *rxdes,
+ dma_addr_t addr)
+{
+ rxdes->rxdes2 = cpu_to_le32(addr);
+}
+
+static dma_addr_t ftmac100_rxdes_get_dma_addr(struct ftmac100_rxdes *rxdes)
+{
+ return le32_to_cpu(rxdes->rxdes2);
+}
+
+/*
+ * rxdes3 is not used by hardware. We use it to keep track of page.
+ * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
+ */
+static void ftmac100_rxdes_set_page(struct ftmac100_rxdes *rxdes, struct page *page)
+{
+ rxdes->rxdes3 = (unsigned int)page;
+}
+
+static struct page *ftmac100_rxdes_get_page(struct ftmac100_rxdes *rxdes)
+{
+ return (struct page *)rxdes->rxdes3;
+}
+
+/******************************************************************************
+ * internal functions (receive)
+ *****************************************************************************/
+static int ftmac100_next_rx_pointer(int pointer)
+{
+ return (pointer + 1) & (RX_QUEUE_ENTRIES - 1);
+}
+
+static void ftmac100_rx_pointer_advance(struct ftmac100 *priv)
+{
+ priv->rx_pointer = ftmac100_next_rx_pointer(priv->rx_pointer);
+}
+
+static struct ftmac100_rxdes *ftmac100_current_rxdes(struct ftmac100 *priv)
+{
+ return &priv->descs->rxdes[priv->rx_pointer];
+}
+
+static struct ftmac100_rxdes *
+ftmac100_rx_locate_first_segment(struct ftmac100 *priv)
+{
+ struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv);
+
+ while (!ftmac100_rxdes_owned_by_dma(rxdes)) {
+ if (ftmac100_rxdes_first_segment(rxdes))
+ return rxdes;
+
+ ftmac100_rxdes_set_dma_own(rxdes);
+ ftmac100_rx_pointer_advance(priv);
+ rxdes = ftmac100_current_rxdes(priv);
+ }
+
+ return NULL;
+}
+
+static bool ftmac100_rx_packet_error(struct ftmac100 *priv,
+ struct ftmac100_rxdes *rxdes)
+{
+ struct net_device *netdev = priv->netdev;
+ bool error = false;
+
+ if (unlikely(ftmac100_rxdes_rx_error(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx err\n");
+
+ netdev->stats.rx_errors++;
+ error = true;
+ }
+
+ if (unlikely(ftmac100_rxdes_crc_error(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx crc err\n");
+
+ netdev->stats.rx_crc_errors++;
+ error = true;
+ }
+
+ if (unlikely(ftmac100_rxdes_frame_too_long(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx frame too long\n");
+
+ netdev->stats.rx_length_errors++;
+ error = true;
+ } else if (unlikely(ftmac100_rxdes_runt(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx runt\n");
+
+ netdev->stats.rx_length_errors++;
+ error = true;
+ } else if (unlikely(ftmac100_rxdes_odd_nibble(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx odd nibble\n");
+
+ netdev->stats.rx_length_errors++;
+ error = true;
+ }
+
+ return error;
+}
+
+static void ftmac100_rx_drop_packet(struct ftmac100 *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv);
+ bool done = false;
+
+ if (net_ratelimit())
+ netdev_dbg(netdev, "drop packet %p\n", rxdes);
+
+ do {
+ if (ftmac100_rxdes_last_segment(rxdes))
+ done = true;
+
+ ftmac100_rxdes_set_dma_own(rxdes);
+ ftmac100_rx_pointer_advance(priv);
+ rxdes = ftmac100_current_rxdes(priv);
+ } while (!done && !ftmac100_rxdes_owned_by_dma(rxdes));
+
+ netdev->stats.rx_dropped++;
+}
+
+static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed)
+{
+ struct net_device *netdev = priv->netdev;
+ struct ftmac100_rxdes *rxdes;
+ struct sk_buff *skb;
+ struct page *page;
+ dma_addr_t map;
+ int length;
+
+ rxdes = ftmac100_rx_locate_first_segment(priv);
+ if (!rxdes)
+ return false;
+
+ if (unlikely(ftmac100_rx_packet_error(priv, rxdes))) {
+ ftmac100_rx_drop_packet(priv);
+ return true;
+ }
+
+ /*
+ * It is impossible to get multi-segment packets
+ * because we always provide big enough receive buffers.
+ */
+ if (unlikely(!ftmac100_rxdes_last_segment(rxdes)))
+ BUG();
+
+ /* start processing */
+ skb = netdev_alloc_skb_ip_align(netdev, 128);
+ if (unlikely(!skb)) {
+ if (net_ratelimit())
+ netdev_err(netdev, "rx skb alloc failed\n");
+
+ ftmac100_rx_drop_packet(priv);
+ return true;
+ }
+
+ if (unlikely(ftmac100_rxdes_multicast(rxdes)))
+ netdev->stats.multicast++;
+
+ map = ftmac100_rxdes_get_dma_addr(rxdes);
+ dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+
+ length = ftmac100_rxdes_frame_length(rxdes);
+ page = ftmac100_rxdes_get_page(rxdes);
+ skb_fill_page_desc(skb, 0, page, 0, length);
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+ __pskb_pull_tail(skb, min(length, 64));
+
+ ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC);
+
+ ftmac100_rx_pointer_advance(priv);
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += skb->len;
+
+ /* push packet to protocol stack */
+ netif_receive_skb(skb);
+
+ (*processed)++;
+ return true;
+}
+
+/******************************************************************************
+ * internal functions (transmit descriptor)
+ *****************************************************************************/
+static void ftmac100_txdes_reset(struct ftmac100_txdes *txdes)
+{
+ /* clear all except end of ring bit */
+ txdes->txdes0 = 0;
+ txdes->txdes1 &= cpu_to_le32(FTMAC100_TXDES1_EDOTR);
+ txdes->txdes2 = 0;
+ txdes->txdes3 = 0;
+}
+
+static bool ftmac100_txdes_owned_by_dma(struct ftmac100_txdes *txdes)
+{
+ return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN);
+}
+
+static void ftmac100_txdes_set_dma_own(struct ftmac100_txdes *txdes)
+{
+ /*
+ * Make sure dma own bit will not be set before any other
+ * descriptor fields.
+ */
+ wmb();
+ txdes->txdes0 |= cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN);
+}
+
+static bool ftmac100_txdes_excessive_collision(struct ftmac100_txdes *txdes)
+{
+ return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_EXSCOL);
+}
+
+static bool ftmac100_txdes_late_collision(struct ftmac100_txdes *txdes)
+{
+ return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_LATECOL);
+}
+
+static void ftmac100_txdes_set_end_of_ring(struct ftmac100_txdes *txdes)
+{
+ txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_EDOTR);
+}
+
+static void ftmac100_txdes_set_first_segment(struct ftmac100_txdes *txdes)
+{
+ txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_FTS);
+}
+
+static void ftmac100_txdes_set_last_segment(struct ftmac100_txdes *txdes)
+{
+ txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_LTS);
+}
+
+static void ftmac100_txdes_set_txint(struct ftmac100_txdes *txdes)
+{
+ txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXIC);
+}
+
+static void ftmac100_txdes_set_buffer_size(struct ftmac100_txdes *txdes,
+ unsigned int len)
+{
+ txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXBUF_SIZE(len));
+}
+
+static void ftmac100_txdes_set_dma_addr(struct ftmac100_txdes *txdes,
+ dma_addr_t addr)
+{
+ txdes->txdes2 = cpu_to_le32(addr);
+}
+
+static dma_addr_t ftmac100_txdes_get_dma_addr(struct ftmac100_txdes *txdes)
+{
+ return le32_to_cpu(txdes->txdes2);
+}
+
+/*
+ * txdes3 is not used by hardware. We use it to keep track of socket buffer.
+ * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
+ */
+static void ftmac100_txdes_set_skb(struct ftmac100_txdes *txdes, struct sk_buff *skb)
+{
+ txdes->txdes3 = (unsigned int)skb;
+}
+
+static struct sk_buff *ftmac100_txdes_get_skb(struct ftmac100_txdes *txdes)
+{
+ return (struct sk_buff *)txdes->txdes3;
+}
+
+/******************************************************************************
+ * internal functions (transmit)
+ *****************************************************************************/
+static int ftmac100_next_tx_pointer(int pointer)
+{
+ return (pointer + 1) & (TX_QUEUE_ENTRIES - 1);
+}
+
+static void ftmac100_tx_pointer_advance(struct ftmac100 *priv)
+{
+ priv->tx_pointer = ftmac100_next_tx_pointer(priv->tx_pointer);
+}
+
+static void ftmac100_tx_clean_pointer_advance(struct ftmac100 *priv)
+{
+ priv->tx_clean_pointer = ftmac100_next_tx_pointer(priv->tx_clean_pointer);
+}
+
+static struct ftmac100_txdes *ftmac100_current_txdes(struct ftmac100 *priv)
+{
+ return &priv->descs->txdes[priv->tx_pointer];
+}
+
+static struct ftmac100_txdes *ftmac100_current_clean_txdes(struct ftmac100 *priv)
+{
+ return &priv->descs->txdes[priv->tx_clean_pointer];
+}
+
+static bool ftmac100_tx_complete_packet(struct ftmac100 *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct ftmac100_txdes *txdes;
+ struct sk_buff *skb;
+ dma_addr_t map;
+
+ if (priv->tx_pending == 0)
+ return false;
+
+ txdes = ftmac100_current_clean_txdes(priv);
+
+ if (ftmac100_txdes_owned_by_dma(txdes))
+ return false;
+
+ skb = ftmac100_txdes_get_skb(txdes);
+ map = ftmac100_txdes_get_dma_addr(txdes);
+
+ if (unlikely(ftmac100_txdes_excessive_collision(txdes) ||
+ ftmac100_txdes_late_collision(txdes))) {
+ /*
+ * packet transmitted to ethernet lost due to late collision
+ * or excessive collision
+ */
+ netdev->stats.tx_aborted_errors++;
+ } else {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+ }
+
+ dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
+
+ ftmac100_txdes_reset(txdes);
+
+ ftmac100_tx_clean_pointer_advance(priv);
+
+ spin_lock(&priv->tx_lock);
+ priv->tx_pending--;
+ spin_unlock(&priv->tx_lock);
+ netif_wake_queue(netdev);
+
+ return true;
+}
+
+static void ftmac100_tx_complete(struct ftmac100 *priv)
+{
+ while (ftmac100_tx_complete_packet(priv))
+ ;
+}
+
+static int ftmac100_xmit(struct ftmac100 *priv, struct sk_buff *skb,
+ dma_addr_t map)
+{
+ struct net_device *netdev = priv->netdev;
+ struct ftmac100_txdes *txdes;
+ unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
+
+ txdes = ftmac100_current_txdes(priv);
+ ftmac100_tx_pointer_advance(priv);
+
+ /* setup TX descriptor */
+ ftmac100_txdes_set_skb(txdes, skb);
+ ftmac100_txdes_set_dma_addr(txdes, map);
+
+ ftmac100_txdes_set_first_segment(txdes);
+ ftmac100_txdes_set_last_segment(txdes);
+ ftmac100_txdes_set_txint(txdes);
+ ftmac100_txdes_set_buffer_size(txdes, len);
+
+ spin_lock(&priv->tx_lock);
+ priv->tx_pending++;
+ if (priv->tx_pending == TX_QUEUE_ENTRIES)
+ netif_stop_queue(netdev);
+
+ /* start transmit */
+ ftmac100_txdes_set_dma_own(txdes);
+ spin_unlock(&priv->tx_lock);
+
+ ftmac100_txdma_start_polling(priv);
+ return NETDEV_TX_OK;
+}
+
+/******************************************************************************
+ * internal functions (buffer)
+ *****************************************************************************/
+static int ftmac100_alloc_rx_page(struct ftmac100 *priv,
+ struct ftmac100_rxdes *rxdes, gfp_t gfp)
+{
+ struct net_device *netdev = priv->netdev;
+ struct page *page;
+ dma_addr_t map;
+
+ page = alloc_page(gfp);
+ if (!page) {
+ if (net_ratelimit())
+ netdev_err(netdev, "failed to allocate rx page\n");
+ return -ENOMEM;
+ }
+
+ map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, map))) {
+ if (net_ratelimit())
+ netdev_err(netdev, "failed to map rx page\n");
+ __free_page(page);
+ return -ENOMEM;
+ }
+
+ ftmac100_rxdes_set_page(rxdes, page);
+ ftmac100_rxdes_set_dma_addr(rxdes, map);
+ ftmac100_rxdes_set_buffer_size(rxdes, RX_BUF_SIZE);
+ ftmac100_rxdes_set_dma_own(rxdes);
+ return 0;
+}
+
+static void ftmac100_free_buffers(struct ftmac100 *priv)
+{
+ int i;
+
+ for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
+ struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i];
+ struct page *page = ftmac100_rxdes_get_page(rxdes);
+ dma_addr_t map = ftmac100_rxdes_get_dma_addr(rxdes);
+
+ if (!page)
+ continue;
+
+ dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+ __free_page(page);
+ }
+
+ for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
+ struct ftmac100_txdes *txdes = &priv->descs->txdes[i];
+ struct sk_buff *skb = ftmac100_txdes_get_skb(txdes);
+ dma_addr_t map = ftmac100_txdes_get_dma_addr(txdes);
+
+ if (!skb)
+ continue;
+
+ dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
+ }
+
+ dma_free_coherent(priv->dev, sizeof(struct ftmac100_descs),
+ priv->descs, priv->descs_dma_addr);
+}
+
+static int ftmac100_alloc_buffers(struct ftmac100 *priv)
+{
+ int i;
+
+ priv->descs = dma_alloc_coherent(priv->dev, sizeof(struct ftmac100_descs),
+ &priv->descs_dma_addr, GFP_KERNEL);
+ if (!priv->descs)
+ return -ENOMEM;
+
+ memset(priv->descs, 0, sizeof(struct ftmac100_descs));
+
+ /* initialize RX ring */
+ ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
+
+ for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
+ struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i];
+
+ if (ftmac100_alloc_rx_page(priv, rxdes, GFP_KERNEL))
+ goto err;
+ }
+
+ /* initialize TX ring */
+ ftmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
+ return 0;
+
+err:
+ ftmac100_free_buffers(priv);
+ return -ENOMEM;
+}
+
+/******************************************************************************
+ * struct mii_if_info functions
+ *****************************************************************************/
+static int ftmac100_mdio_read(struct net_device *netdev, int phy_id, int reg)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ unsigned int phycr;
+ int i;
+
+ phycr = FTMAC100_PHYCR_PHYAD(phy_id) |
+ FTMAC100_PHYCR_REGAD(reg) |
+ FTMAC100_PHYCR_MIIRD;
+
+ iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR);
+
+ for (i = 0; i < 10; i++) {
+ phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR);
+
+ if ((phycr & FTMAC100_PHYCR_MIIRD) == 0)
+ return phycr & FTMAC100_PHYCR_MIIRDATA;
+
+ udelay(100);
+ }
+
+ netdev_err(netdev, "mdio read timed out\n");
+ return 0;
+}
+
+static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg,
+ int data)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ unsigned int phycr;
+ int i;
+
+ phycr = FTMAC100_PHYCR_PHYAD(phy_id) |
+ FTMAC100_PHYCR_REGAD(reg) |
+ FTMAC100_PHYCR_MIIWR;
+
+ data = FTMAC100_PHYWDATA_MIIWDATA(data);
+
+ iowrite32(data, priv->base + FTMAC100_OFFSET_PHYWDATA);
+ iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR);
+
+ for (i = 0; i < 10; i++) {
+ phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR);
+
+ if ((phycr & FTMAC100_PHYCR_MIIWR) == 0)
+ return;
+
+ udelay(100);
+ }
+
+ netdev_err(netdev, "mdio write timed out\n");
+}
+
+/******************************************************************************
+ * struct ethtool_ops functions
+ *****************************************************************************/
+static void ftmac100_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, dev_name(&netdev->dev));
+}
+
+static int ftmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ return mii_ethtool_gset(&priv->mii, cmd);
+}
+
+static int ftmac100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ return mii_ethtool_sset(&priv->mii, cmd);
+}
+
+static int ftmac100_nway_reset(struct net_device *netdev)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ return mii_nway_restart(&priv->mii);
+}
+
+static u32 ftmac100_get_link(struct net_device *netdev)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ return mii_link_ok(&priv->mii);
+}
+
+static const struct ethtool_ops ftmac100_ethtool_ops = {
+ .set_settings = ftmac100_set_settings,
+ .get_settings = ftmac100_get_settings,
+ .get_drvinfo = ftmac100_get_drvinfo,
+ .nway_reset = ftmac100_nway_reset,
+ .get_link = ftmac100_get_link,
+};
+
+/******************************************************************************
+ * interrupt handler
+ *****************************************************************************/
+static irqreturn_t ftmac100_interrupt(int irq, void *dev_id)
+{
+ struct net_device *netdev = dev_id;
+ struct ftmac100 *priv = netdev_priv(netdev);
+
+ if (likely(netif_running(netdev))) {
+ /* Disable interrupts for polling */
+ ftmac100_disable_all_int(priv);
+ napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/******************************************************************************
+ * struct napi_struct functions
+ *****************************************************************************/
+static int ftmac100_poll(struct napi_struct *napi, int budget)
+{
+ struct ftmac100 *priv = container_of(napi, struct ftmac100, napi);
+ struct net_device *netdev = priv->netdev;
+ unsigned int status;
+ bool completed = true;
+ int rx = 0;
+
+ status = ioread32(priv->base + FTMAC100_OFFSET_ISR);
+
+ if (status & (FTMAC100_INT_RPKT_FINISH | FTMAC100_INT_NORXBUF)) {
+ /*
+ * FTMAC100_INT_RPKT_FINISH:
+ * RX DMA has received packets into RX buffer successfully
+ *
+ * FTMAC100_INT_NORXBUF:
+ * RX buffer unavailable
+ */
+ bool retry;
+
+ do {
+ retry = ftmac100_rx_packet(priv, &rx);
+ } while (retry && rx < budget);
+
+ if (retry && rx == budget)
+ completed = false;
+ }
+
+ if (status & (FTMAC100_INT_XPKT_OK | FTMAC100_INT_XPKT_LOST)) {
+ /*
+ * FTMAC100_INT_XPKT_OK:
+ * packet transmitted to ethernet successfully
+ *
+ * FTMAC100_INT_XPKT_LOST:
+ * packet transmitted to ethernet lost due to late
+ * collision or excessive collision
+ */
+ ftmac100_tx_complete(priv);
+ }
+
+ if (status & (FTMAC100_INT_NORXBUF | FTMAC100_INT_RPKT_LOST |
+ FTMAC100_INT_AHB_ERR | FTMAC100_INT_PHYSTS_CHG)) {
+ if (net_ratelimit())
+ netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status,
+ status & FTMAC100_INT_NORXBUF ? "NORXBUF " : "",
+ status & FTMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
+ status & FTMAC100_INT_AHB_ERR ? "AHB_ERR " : "",
+ status & FTMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : "");
+
+ if (status & FTMAC100_INT_NORXBUF) {
+ /* RX buffer unavailable */
+ netdev->stats.rx_over_errors++;
+ }
+
+ if (status & FTMAC100_INT_RPKT_LOST) {
+ /* received packet lost due to RX FIFO full */
+ netdev->stats.rx_fifo_errors++;
+ }
+
+ if (status & FTMAC100_INT_PHYSTS_CHG) {
+ /* PHY link status change */
+ mii_check_link(&priv->mii);
+ }
+ }
+
+ if (completed) {
+ /* stop polling */
+ napi_complete(napi);
+ ftmac100_enable_all_int(priv);
+ }
+
+ return rx;
+}
+
+/******************************************************************************
+ * struct net_device_ops functions
+ *****************************************************************************/
+static int ftmac100_open(struct net_device *netdev)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ int err;
+
+ err = ftmac100_alloc_buffers(priv);
+ if (err) {
+ netdev_err(netdev, "failed to allocate buffers\n");
+ goto err_alloc;
+ }
+
+ err = request_irq(priv->irq, ftmac100_interrupt, 0, netdev->name, netdev);
+ if (err) {
+ netdev_err(netdev, "failed to request irq %d\n", priv->irq);
+ goto err_irq;
+ }
+
+ priv->rx_pointer = 0;
+ priv->tx_clean_pointer = 0;
+ priv->tx_pointer = 0;
+ priv->tx_pending = 0;
+
+ err = ftmac100_start_hw(priv);
+ if (err)
+ goto err_hw;
+
+ napi_enable(&priv->napi);
+ netif_start_queue(netdev);
+
+ ftmac100_enable_all_int(priv);
+
+ return 0;
+
+err_hw:
+ free_irq(priv->irq, netdev);
+err_irq:
+ ftmac100_free_buffers(priv);
+err_alloc:
+ return err;
+}
+
+static int ftmac100_stop(struct net_device *netdev)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+
+ ftmac100_disable_all_int(priv);
+ netif_stop_queue(netdev);
+ napi_disable(&priv->napi);
+ ftmac100_stop_hw(priv);
+ free_irq(priv->irq, netdev);
+ ftmac100_free_buffers(priv);
+
+ return 0;
+}
+
+static int ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ dma_addr_t map;
+
+ if (unlikely(skb->len > MAX_PKT_SIZE)) {
+ if (net_ratelimit())
+ netdev_dbg(netdev, "tx packet too big\n");
+
+ netdev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, map))) {
+ /* drop packet */
+ if (net_ratelimit())
+ netdev_err(netdev, "map socket buffer failed\n");
+
+ netdev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ return ftmac100_xmit(priv, skb, map);
+}
+
+/* optional */
+static int ftmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ return generic_mii_ioctl(&priv->mii, data, cmd, NULL);
+}
+
+static const struct net_device_ops ftmac100_netdev_ops = {
+ .ndo_open = ftmac100_open,
+ .ndo_stop = ftmac100_stop,
+ .ndo_start_xmit = ftmac100_hard_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = ftmac100_do_ioctl,
+};
+
+/******************************************************************************
+ * struct platform_driver functions
+ *****************************************************************************/
+static int ftmac100_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int irq;
+ struct net_device *netdev;
+ struct ftmac100 *priv;
+ int err;
+
+ if (!pdev)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENXIO;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ /* setup net_device */
+ netdev = alloc_etherdev(sizeof(*priv));
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ SET_ETHTOOL_OPS(netdev, &ftmac100_ethtool_ops);
+ netdev->netdev_ops = &ftmac100_netdev_ops;
+
+ platform_set_drvdata(pdev, netdev);
+
+ /* setup private data */
+ priv = netdev_priv(netdev);
+ priv->netdev = netdev;
+ priv->dev = &pdev->dev;
+
+ spin_lock_init(&priv->tx_lock);
+
+ /* initialize NAPI */
+ netif_napi_add(netdev, &priv->napi, ftmac100_poll, 64);
+
+ /* map io memory */
+ priv->res = request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev));
+ if (!priv->res) {
+ dev_err(&pdev->dev, "Could not reserve memory region\n");
+ err = -ENOMEM;
+ goto err_req_mem;
+ }
+
+ priv->base = ioremap(res->start, resource_size(res));
+ if (!priv->base) {
+ dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ priv->irq = irq;
+
+ /* initialize struct mii_if_info */
+ priv->mii.phy_id = 0;
+ priv->mii.phy_id_mask = 0x1f;
+ priv->mii.reg_num_mask = 0x1f;
+ priv->mii.dev = netdev;
+ priv->mii.mdio_read = ftmac100_mdio_read;
+ priv->mii.mdio_write = ftmac100_mdio_write;
+
+ /* register network device */
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register netdev\n");
+ goto err_register_netdev;
+ }
+
+ netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base);
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ random_ether_addr(netdev->dev_addr);
+ netdev_info(netdev, "generated random MAC address %pM\n",
+ netdev->dev_addr);
+ }
+
+ return 0;
+
+err_register_netdev:
+ iounmap(priv->base);
+err_ioremap:
+ release_resource(priv->res);
+err_req_mem:
+ netif_napi_del(&priv->napi);
+ platform_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+err_alloc_etherdev:
+ return err;
+}
+
+static int __exit ftmac100_remove(struct platform_device *pdev)
+{
+ struct net_device *netdev;
+ struct ftmac100 *priv;
+
+ netdev = platform_get_drvdata(pdev);
+ priv = netdev_priv(netdev);
+
+ unregister_netdev(netdev);
+
+ iounmap(priv->base);
+ release_resource(priv->res);
+
+ netif_napi_del(&priv->napi);
+ platform_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+ return 0;
+}
+
+static struct platform_driver ftmac100_driver = {
+ .probe = ftmac100_probe,
+ .remove = __exit_p(ftmac100_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+/******************************************************************************
+ * initialization / finalization
+ *****************************************************************************/
+static int __init ftmac100_init(void)
+{
+ pr_info("Loading version " DRV_VERSION " ...\n");
+ return platform_driver_register(&ftmac100_driver);
+}
+
+static void __exit ftmac100_exit(void)
+{
+ platform_driver_unregister(&ftmac100_driver);
+}
+
+module_init(ftmac100_init);
+module_exit(ftmac100_exit);
+
+MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
+MODULE_DESCRIPTION("FTMAC100 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/faraday/ftmac100.h b/drivers/net/ethernet/faraday/ftmac100.h
new file mode 100644
index 000000000000..46a0c47b1ee1
--- /dev/null
+++ b/drivers/net/ethernet/faraday/ftmac100.h
@@ -0,0 +1,180 @@
+/*
+ * Faraday FTMAC100 10/100 Ethernet
+ *
+ * (C) Copyright 2009-2011 Faraday Technology
+ * Po-Yu Chuang <ratbert@faraday-tech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __FTMAC100_H
+#define __FTMAC100_H
+
+#define FTMAC100_OFFSET_ISR 0x00
+#define FTMAC100_OFFSET_IMR 0x04
+#define FTMAC100_OFFSET_MAC_MADR 0x08
+#define FTMAC100_OFFSET_MAC_LADR 0x0c
+#define FTMAC100_OFFSET_MAHT0 0x10
+#define FTMAC100_OFFSET_MAHT1 0x14
+#define FTMAC100_OFFSET_TXPD 0x18
+#define FTMAC100_OFFSET_RXPD 0x1c
+#define FTMAC100_OFFSET_TXR_BADR 0x20
+#define FTMAC100_OFFSET_RXR_BADR 0x24
+#define FTMAC100_OFFSET_ITC 0x28
+#define FTMAC100_OFFSET_APTC 0x2c
+#define FTMAC100_OFFSET_DBLAC 0x30
+#define FTMAC100_OFFSET_MACCR 0x88
+#define FTMAC100_OFFSET_MACSR 0x8c
+#define FTMAC100_OFFSET_PHYCR 0x90
+#define FTMAC100_OFFSET_PHYWDATA 0x94
+#define FTMAC100_OFFSET_FCR 0x98
+#define FTMAC100_OFFSET_BPR 0x9c
+#define FTMAC100_OFFSET_TS 0xc4
+#define FTMAC100_OFFSET_DMAFIFOS 0xc8
+#define FTMAC100_OFFSET_TM 0xcc
+#define FTMAC100_OFFSET_TX_MCOL_SCOL 0xd4
+#define FTMAC100_OFFSET_RPF_AEP 0xd8
+#define FTMAC100_OFFSET_XM_PG 0xdc
+#define FTMAC100_OFFSET_RUNT_TLCC 0xe0
+#define FTMAC100_OFFSET_CRCER_FTL 0xe4
+#define FTMAC100_OFFSET_RLC_RCC 0xe8
+#define FTMAC100_OFFSET_BROC 0xec
+#define FTMAC100_OFFSET_MULCA 0xf0
+#define FTMAC100_OFFSET_RP 0xf4
+#define FTMAC100_OFFSET_XP 0xf8
+
+/*
+ * Interrupt status register & interrupt mask register
+ */
+#define FTMAC100_INT_RPKT_FINISH (1 << 0)
+#define FTMAC100_INT_NORXBUF (1 << 1)
+#define FTMAC100_INT_XPKT_FINISH (1 << 2)
+#define FTMAC100_INT_NOTXBUF (1 << 3)
+#define FTMAC100_INT_XPKT_OK (1 << 4)
+#define FTMAC100_INT_XPKT_LOST (1 << 5)
+#define FTMAC100_INT_RPKT_SAV (1 << 6)
+#define FTMAC100_INT_RPKT_LOST (1 << 7)
+#define FTMAC100_INT_AHB_ERR (1 << 8)
+#define FTMAC100_INT_PHYSTS_CHG (1 << 9)
+
+/*
+ * Interrupt timer control register
+ */
+#define FTMAC100_ITC_RXINT_CNT(x) (((x) & 0xf) << 0)
+#define FTMAC100_ITC_RXINT_THR(x) (((x) & 0x7) << 4)
+#define FTMAC100_ITC_RXINT_TIME_SEL (1 << 7)
+#define FTMAC100_ITC_TXINT_CNT(x) (((x) & 0xf) << 8)
+#define FTMAC100_ITC_TXINT_THR(x) (((x) & 0x7) << 12)
+#define FTMAC100_ITC_TXINT_TIME_SEL (1 << 15)
+
+/*
+ * Automatic polling timer control register
+ */
+#define FTMAC100_APTC_RXPOLL_CNT(x) (((x) & 0xf) << 0)
+#define FTMAC100_APTC_RXPOLL_TIME_SEL (1 << 4)
+#define FTMAC100_APTC_TXPOLL_CNT(x) (((x) & 0xf) << 8)
+#define FTMAC100_APTC_TXPOLL_TIME_SEL (1 << 12)
+
+/*
+ * DMA burst length and arbitration control register
+ */
+#define FTMAC100_DBLAC_INCR4_EN (1 << 0)
+#define FTMAC100_DBLAC_INCR8_EN (1 << 1)
+#define FTMAC100_DBLAC_INCR16_EN (1 << 2)
+#define FTMAC100_DBLAC_RXFIFO_LTHR(x) (((x) & 0x7) << 3)
+#define FTMAC100_DBLAC_RXFIFO_HTHR(x) (((x) & 0x7) << 6)
+#define FTMAC100_DBLAC_RX_THR_EN (1 << 9)
+
+/*
+ * MAC control register
+ */
+#define FTMAC100_MACCR_XDMA_EN (1 << 0)
+#define FTMAC100_MACCR_RDMA_EN (1 << 1)
+#define FTMAC100_MACCR_SW_RST (1 << 2)
+#define FTMAC100_MACCR_LOOP_EN (1 << 3)
+#define FTMAC100_MACCR_CRC_DIS (1 << 4)
+#define FTMAC100_MACCR_XMT_EN (1 << 5)
+#define FTMAC100_MACCR_ENRX_IN_HALFTX (1 << 6)
+#define FTMAC100_MACCR_RCV_EN (1 << 8)
+#define FTMAC100_MACCR_HT_MULTI_EN (1 << 9)
+#define FTMAC100_MACCR_RX_RUNT (1 << 10)
+#define FTMAC100_MACCR_RX_FTL (1 << 11)
+#define FTMAC100_MACCR_RCV_ALL (1 << 12)
+#define FTMAC100_MACCR_CRC_APD (1 << 14)
+#define FTMAC100_MACCR_FULLDUP (1 << 15)
+#define FTMAC100_MACCR_RX_MULTIPKT (1 << 16)
+#define FTMAC100_MACCR_RX_BROADPKT (1 << 17)
+
+/*
+ * PHY control register
+ */
+#define FTMAC100_PHYCR_MIIRDATA 0xffff
+#define FTMAC100_PHYCR_PHYAD(x) (((x) & 0x1f) << 16)
+#define FTMAC100_PHYCR_REGAD(x) (((x) & 0x1f) << 21)
+#define FTMAC100_PHYCR_MIIRD (1 << 26)
+#define FTMAC100_PHYCR_MIIWR (1 << 27)
+
+/*
+ * PHY write data register
+ */
+#define FTMAC100_PHYWDATA_MIIWDATA(x) ((x) & 0xffff)
+
+/*
+ * Transmit descriptor, aligned to 16 bytes
+ */
+struct ftmac100_txdes {
+ unsigned int txdes0;
+ unsigned int txdes1;
+ unsigned int txdes2; /* TXBUF_BADR */
+ unsigned int txdes3; /* not used by HW */
+} __attribute__ ((aligned(16)));
+
+#define FTMAC100_TXDES0_TXPKT_LATECOL (1 << 0)
+#define FTMAC100_TXDES0_TXPKT_EXSCOL (1 << 1)
+#define FTMAC100_TXDES0_TXDMA_OWN (1 << 31)
+
+#define FTMAC100_TXDES1_TXBUF_SIZE(x) ((x) & 0x7ff)
+#define FTMAC100_TXDES1_LTS (1 << 27)
+#define FTMAC100_TXDES1_FTS (1 << 28)
+#define FTMAC100_TXDES1_TX2FIC (1 << 29)
+#define FTMAC100_TXDES1_TXIC (1 << 30)
+#define FTMAC100_TXDES1_EDOTR (1 << 31)
+
+/*
+ * Receive descriptor, aligned to 16 bytes
+ */
+struct ftmac100_rxdes {
+ unsigned int rxdes0;
+ unsigned int rxdes1;
+ unsigned int rxdes2; /* RXBUF_BADR */
+ unsigned int rxdes3; /* not used by HW */
+} __attribute__ ((aligned(16)));
+
+#define FTMAC100_RXDES0_RFL 0x7ff
+#define FTMAC100_RXDES0_MULTICAST (1 << 16)
+#define FTMAC100_RXDES0_BROADCAST (1 << 17)
+#define FTMAC100_RXDES0_RX_ERR (1 << 18)
+#define FTMAC100_RXDES0_CRC_ERR (1 << 19)
+#define FTMAC100_RXDES0_FTL (1 << 20)
+#define FTMAC100_RXDES0_RUNT (1 << 21)
+#define FTMAC100_RXDES0_RX_ODD_NB (1 << 22)
+#define FTMAC100_RXDES0_LRS (1 << 28)
+#define FTMAC100_RXDES0_FRS (1 << 29)
+#define FTMAC100_RXDES0_RXDMA_OWN (1 << 31)
+
+#define FTMAC100_RXDES1_RXBUF_SIZE(x) ((x) & 0x7ff)
+#define FTMAC100_RXDES1_EDORR (1 << 31)
+
+#endif /* __FTMAC100_H */
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
new file mode 100644
index 000000000000..61d2bddec1fa
--- /dev/null
+++ b/drivers/net/ethernet/fealnx.c
@@ -0,0 +1,1976 @@
+/*
+ Written 1998-2000 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/pci-skeleton.html
+
+ Linux kernel updates:
+
+ Version 2.51, Nov 17, 2001 (jgarzik):
+ - Add ethtool support
+ - Replace some MII-related magic numbers with constants
+
+*/
+
+#define DRV_NAME "fealnx"
+#define DRV_VERSION "2.52"
+#define DRV_RELDATE "Sep-11-2006"
+
+static int debug; /* 1-> print debug message */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
+static int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme. */
+/* Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak;
+
+/* Used to pass the media type, etc. */
+/* Both 'options[]' and 'full_duplex[]' should exist for driver */
+/* interoperability. */
+/* The media type is usually passed in 'options[]'. */
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
+static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
+
+/* Operational parameters that are set at compile time. */
+/* Keep the ring sizes a power of two for compile efficiency. */
+/* The compiler will convert <unsigned>'%'<2^N> into a bit mask. */
+/* Making the Tx ring too large decreases the effectiveness of channel */
+/* bonding and packet priority. */
+/* There are no ill effects from too-large receive rings. */
+// 88-12-9 modify,
+// #define TX_RING_SIZE 16
+// #define RX_RING_SIZE 32
+#define TX_RING_SIZE 6
+#define RX_RING_SIZE 12
+#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc)
+#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct fealnx_desc)
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2*HZ)
+
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
+
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/byteorder.h>
+
+/* These identify the driver base version and may not be removed. */
+static const char version[] __devinitconst =
+ KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n";
+
+
+/* This driver was written to use PCI memory space, however some x86 systems
+ work only with I/O space accesses. */
+#ifndef __alpha__
+#define USE_IO_OPS
+#endif
+
+/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */
+/* This is only in the support-all-kernels source code. */
+
+#define RUN_AT(x) (jiffies + (x))
+
+MODULE_AUTHOR("Myson or whoever");
+MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver");
+MODULE_LICENSE("GPL");
+module_param(max_interrupt_work, int, 0);
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param(multicast_filter_limit, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt");
+MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)");
+MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses");
+MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)");
+
+enum {
+ MIN_REGION_SIZE = 136,
+};
+
+/* A chip capabilities table, matching the entries in pci_tbl[] above. */
+enum chip_capability_flags {
+ HAS_MII_XCVR,
+ HAS_CHIP_XCVR,
+};
+
+/* 89/6/13 add, */
+/* for different PHY */
+enum phy_type_flags {
+ MysonPHY = 1,
+ AhdocPHY = 2,
+ SeeqPHY = 3,
+ MarvellPHY = 4,
+ Myson981 = 5,
+ LevelOnePHY = 6,
+ OtherPHY = 10,
+};
+
+struct chip_info {
+ char *chip_name;
+ int flags;
+};
+
+static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
+ { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
+ { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
+ { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
+};
+
+/* Offsets to the Command and Status Registers. */
+enum fealnx_offsets {
+ PAR0 = 0x0, /* physical address 0-3 */
+ PAR1 = 0x04, /* physical address 4-5 */
+ MAR0 = 0x08, /* multicast address 0-3 */
+ MAR1 = 0x0C, /* multicast address 4-7 */
+ FAR0 = 0x10, /* flow-control address 0-3 */
+ FAR1 = 0x14, /* flow-control address 4-5 */
+ TCRRCR = 0x18, /* receive & transmit configuration */
+ BCR = 0x1C, /* bus command */
+ TXPDR = 0x20, /* transmit polling demand */
+ RXPDR = 0x24, /* receive polling demand */
+ RXCWP = 0x28, /* receive current word pointer */
+ TXLBA = 0x2C, /* transmit list base address */
+ RXLBA = 0x30, /* receive list base address */
+ ISR = 0x34, /* interrupt status */
+ IMR = 0x38, /* interrupt mask */
+ FTH = 0x3C, /* flow control high/low threshold */
+ MANAGEMENT = 0x40, /* bootrom/eeprom and mii management */
+ TALLY = 0x44, /* tally counters for crc and mpa */
+ TSR = 0x48, /* tally counter for transmit status */
+ BMCRSR = 0x4c, /* basic mode control and status */
+ PHYIDENTIFIER = 0x50, /* phy identifier */
+ ANARANLPAR = 0x54, /* auto-negotiation advertisement and link
+ partner ability */
+ ANEROCR = 0x58, /* auto-negotiation expansion and pci conf. */
+ BPREMRPSR = 0x5c, /* bypass & receive error mask and phy status */
+};
+
+/* Bits in the interrupt status/enable registers. */
+/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
+enum intr_status_bits {
+ RFCON = 0x00020000, /* receive flow control xon packet */
+ RFCOFF = 0x00010000, /* receive flow control xoff packet */
+ LSCStatus = 0x00008000, /* link status change */
+ ANCStatus = 0x00004000, /* autonegotiation completed */
+ FBE = 0x00002000, /* fatal bus error */
+ FBEMask = 0x00001800, /* mask bit12-11 */
+ ParityErr = 0x00000000, /* parity error */
+ TargetErr = 0x00001000, /* target abort */
+ MasterErr = 0x00000800, /* master error */
+ TUNF = 0x00000400, /* transmit underflow */
+ ROVF = 0x00000200, /* receive overflow */
+ ETI = 0x00000100, /* transmit early int */
+ ERI = 0x00000080, /* receive early int */
+ CNTOVF = 0x00000040, /* counter overflow */
+ RBU = 0x00000020, /* receive buffer unavailable */
+ TBU = 0x00000010, /* transmit buffer unavilable */
+ TI = 0x00000008, /* transmit interrupt */
+ RI = 0x00000004, /* receive interrupt */
+ RxErr = 0x00000002, /* receive error */
+};
+
+/* Bits in the NetworkConfig register, W for writing, R for reading */
+/* FIXME: some names are invented by me. Marked with (name?) */
+/* If you have docs and know bit names, please fix 'em */
+enum rx_mode_bits {
+ CR_W_ENH = 0x02000000, /* enhanced mode (name?) */
+ CR_W_FD = 0x00100000, /* full duplex */
+ CR_W_PS10 = 0x00080000, /* 10 mbit */
+ CR_W_TXEN = 0x00040000, /* tx enable (name?) */
+ CR_W_PS1000 = 0x00010000, /* 1000 mbit */
+ /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */
+ CR_W_RXMODEMASK = 0x000000e0,
+ CR_W_PROM = 0x00000080, /* promiscuous mode */
+ CR_W_AB = 0x00000040, /* accept broadcast */
+ CR_W_AM = 0x00000020, /* accept mutlicast */
+ CR_W_ARP = 0x00000008, /* receive runt pkt */
+ CR_W_ALP = 0x00000004, /* receive long pkt */
+ CR_W_SEP = 0x00000002, /* receive error pkt */
+ CR_W_RXEN = 0x00000001, /* rx enable (unicast?) (name?) */
+
+ CR_R_TXSTOP = 0x04000000, /* tx stopped (name?) */
+ CR_R_FD = 0x00100000, /* full duplex detected */
+ CR_R_PS10 = 0x00080000, /* 10 mbit detected */
+ CR_R_RXSTOP = 0x00008000, /* rx stopped (name?) */
+};
+
+/* The Tulip Rx and Tx buffer descriptors. */
+struct fealnx_desc {
+ s32 status;
+ s32 control;
+ u32 buffer;
+ u32 next_desc;
+ struct fealnx_desc *next_desc_logical;
+ struct sk_buff *skbuff;
+ u32 reserved1;
+ u32 reserved2;
+};
+
+/* Bits in network_desc.status */
+enum rx_desc_status_bits {
+ RXOWN = 0x80000000, /* own bit */
+ FLNGMASK = 0x0fff0000, /* frame length */
+ FLNGShift = 16,
+ MARSTATUS = 0x00004000, /* multicast address received */
+ BARSTATUS = 0x00002000, /* broadcast address received */
+ PHYSTATUS = 0x00001000, /* physical address received */
+ RXFSD = 0x00000800, /* first descriptor */
+ RXLSD = 0x00000400, /* last descriptor */
+ ErrorSummary = 0x80, /* error summary */
+ RUNT = 0x40, /* runt packet received */
+ LONG = 0x20, /* long packet received */
+ FAE = 0x10, /* frame align error */
+ CRC = 0x08, /* crc error */
+ RXER = 0x04, /* receive error */
+};
+
+enum rx_desc_control_bits {
+ RXIC = 0x00800000, /* interrupt control */
+ RBSShift = 0,
+};
+
+enum tx_desc_status_bits {
+ TXOWN = 0x80000000, /* own bit */
+ JABTO = 0x00004000, /* jabber timeout */
+ CSL = 0x00002000, /* carrier sense lost */
+ LC = 0x00001000, /* late collision */
+ EC = 0x00000800, /* excessive collision */
+ UDF = 0x00000400, /* fifo underflow */
+ DFR = 0x00000200, /* deferred */
+ HF = 0x00000100, /* heartbeat fail */
+ NCRMask = 0x000000ff, /* collision retry count */
+ NCRShift = 0,
+};
+
+enum tx_desc_control_bits {
+ TXIC = 0x80000000, /* interrupt control */
+ ETIControl = 0x40000000, /* early transmit interrupt */
+ TXLD = 0x20000000, /* last descriptor */
+ TXFD = 0x10000000, /* first descriptor */
+ CRCEnable = 0x08000000, /* crc control */
+ PADEnable = 0x04000000, /* padding control */
+ RetryTxLC = 0x02000000, /* retry late collision */
+ PKTSMask = 0x3ff800, /* packet size bit21-11 */
+ PKTSShift = 11,
+ TBSMask = 0x000007ff, /* transmit buffer bit 10-0 */
+ TBSShift = 0,
+};
+
+/* BootROM/EEPROM/MII Management Register */
+#define MASK_MIIR_MII_READ 0x00000000
+#define MASK_MIIR_MII_WRITE 0x00000008
+#define MASK_MIIR_MII_MDO 0x00000004
+#define MASK_MIIR_MII_MDI 0x00000002
+#define MASK_MIIR_MII_MDC 0x00000001
+
+/* ST+OP+PHYAD+REGAD+TA */
+#define OP_READ 0x6000 /* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */
+#define OP_WRITE 0x5002 /* ST:01+OP:01+PHYAD+REGAD+TA:10 */
+
+/* ------------------------------------------------------------------------- */
+/* Constants for Myson PHY */
+/* ------------------------------------------------------------------------- */
+#define MysonPHYID 0xd0000302
+/* 89-7-27 add, (begin) */
+#define MysonPHYID0 0x0302
+#define StatusRegister 18
+#define SPEED100 0x0400 // bit10
+#define FULLMODE 0x0800 // bit11
+/* 89-7-27 add, (end) */
+
+/* ------------------------------------------------------------------------- */
+/* Constants for Seeq 80225 PHY */
+/* ------------------------------------------------------------------------- */
+#define SeeqPHYID0 0x0016
+
+#define MIIRegister18 18
+#define SPD_DET_100 0x80
+#define DPLX_DET_FULL 0x40
+
+/* ------------------------------------------------------------------------- */
+/* Constants for Ahdoc 101 PHY */
+/* ------------------------------------------------------------------------- */
+#define AhdocPHYID0 0x0022
+
+#define DiagnosticReg 18
+#define DPLX_FULL 0x0800
+#define Speed_100 0x0400
+
+/* 89/6/13 add, */
+/* -------------------------------------------------------------------------- */
+/* Constants */
+/* -------------------------------------------------------------------------- */
+#define MarvellPHYID0 0x0141
+#define LevelOnePHYID0 0x0013
+
+#define MII1000BaseTControlReg 9
+#define MII1000BaseTStatusReg 10
+#define SpecificReg 17
+
+/* for 1000BaseT Control Register */
+#define PHYAbletoPerform1000FullDuplex 0x0200
+#define PHYAbletoPerform1000HalfDuplex 0x0100
+#define PHY1000AbilityMask 0x300
+
+// for phy specific status register, marvell phy.
+#define SpeedMask 0x0c000
+#define Speed_1000M 0x08000
+#define Speed_100M 0x4000
+#define Speed_10M 0
+#define Full_Duplex 0x2000
+
+// 89/12/29 add, for phy specific status register, levelone phy, (begin)
+#define LXT1000_100M 0x08000
+#define LXT1000_1000M 0x0c000
+#define LXT1000_Full 0x200
+// 89/12/29 add, for phy specific status register, levelone phy, (end)
+
+/* for 3-in-1 case, BMCRSR register */
+#define LinkIsUp2 0x00040000
+
+/* for PHY */
+#define LinkIsUp 0x0004
+
+
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct fealnx_desc *rx_ring;
+ struct fealnx_desc *tx_ring;
+
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+
+ spinlock_t lock;
+
+ /* Media monitoring timer. */
+ struct timer_list timer;
+
+ /* Reset timer */
+ struct timer_list reset_timer;
+ int reset_timer_armed;
+ unsigned long crvalue_sv;
+ unsigned long imrvalue_sv;
+
+ /* Frequently used values: keep some adjacent for cache effect. */
+ int flags;
+ struct pci_dev *pci_dev;
+ unsigned long crvalue;
+ unsigned long bcrvalue;
+ unsigned long imrvalue;
+ struct fealnx_desc *cur_rx;
+ struct fealnx_desc *lack_rxbuf;
+ int really_rx_count;
+ struct fealnx_desc *cur_tx;
+ struct fealnx_desc *cur_tx_copy;
+ int really_tx_count;
+ int free_tx_count;
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+
+ /* These values are keep track of the transceiver/media in use. */
+ unsigned int linkok;
+ unsigned int line_speed;
+ unsigned int duplexmode;
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ unsigned int PHYType;
+
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ unsigned char phys[2]; /* MII device addresses. */
+ struct mii_if_info mii;
+ void __iomem *mem;
+};
+
+
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int netdev_open(struct net_device *dev);
+static void getlinktype(struct net_device *dev);
+static void getlinkstatus(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void reset_timer(unsigned long data);
+static void fealnx_tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t intr_handler(int irq, void *dev_instance);
+static int netdev_rx(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static void __set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static const struct ethtool_ops netdev_ethtool_ops;
+static int netdev_close(struct net_device *dev);
+static void reset_rx_descriptors(struct net_device *dev);
+static void reset_tx_descriptors(struct net_device *dev);
+
+static void stop_nic_rx(void __iomem *ioaddr, long crvalue)
+{
+ int delay = 0x1000;
+ iowrite32(crvalue & ~(CR_W_RXEN), ioaddr + TCRRCR);
+ while (--delay) {
+ if ( (ioread32(ioaddr + TCRRCR) & CR_R_RXSTOP) == CR_R_RXSTOP)
+ break;
+ }
+}
+
+
+static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue)
+{
+ int delay = 0x1000;
+ iowrite32(crvalue & ~(CR_W_RXEN+CR_W_TXEN), ioaddr + TCRRCR);
+ while (--delay) {
+ if ( (ioread32(ioaddr + TCRRCR) & (CR_R_RXSTOP+CR_R_TXSTOP))
+ == (CR_R_RXSTOP+CR_R_TXSTOP) )
+ break;
+ }
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = netdev_open,
+ .ndo_stop = netdev_close,
+ .ndo_start_xmit = start_tx,
+ .ndo_get_stats = get_stats,
+ .ndo_set_rx_mode = set_rx_mode,
+ .ndo_do_ioctl = mii_ioctl,
+ .ndo_tx_timeout = fealnx_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __devinit fealnx_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct netdev_private *np;
+ int i, option, err, irq;
+ static int card_idx = -1;
+ char boardname[12];
+ void __iomem *ioaddr;
+ unsigned long len;
+ unsigned int chip_id = ent->driver_data;
+ struct net_device *dev;
+ void *ring_space;
+ dma_addr_t ring_dma;
+#ifdef USE_IO_OPS
+ int bar = 0;
+#else
+ int bar = 1;
+#endif
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(version);
+#endif
+
+ card_idx++;
+ sprintf(boardname, "fealnx%d", card_idx);
+
+ option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+ i = pci_enable_device(pdev);
+ if (i) return i;
+ pci_set_master(pdev);
+
+ len = pci_resource_len(pdev, bar);
+ if (len < MIN_REGION_SIZE) {
+ dev_err(&pdev->dev,
+ "region size %ld too small, aborting\n", len);
+ return -ENODEV;
+ }
+
+ i = pci_request_regions(pdev, boardname);
+ if (i)
+ return i;
+
+ irq = pdev->irq;
+
+ ioaddr = pci_iomap(pdev, bar, len);
+ if (!ioaddr) {
+ err = -ENOMEM;
+ goto err_out_res;
+ }
+
+ dev = alloc_etherdev(sizeof(struct netdev_private));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_out_unmap;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* read ethernet id */
+ for (i = 0; i < 6; ++i)
+ dev->dev_addr[i] = ioread8(ioaddr + PAR0 + i);
+
+ /* Reset the chip to erase previous misconfiguration. */
+ iowrite32(0x00000001, ioaddr + BCR);
+
+ dev->base_addr = (unsigned long)ioaddr;
+ dev->irq = irq;
+
+ /* Make certain the descriptor lists are aligned. */
+ np = netdev_priv(dev);
+ np->mem = ioaddr;
+ spin_lock_init(&np->lock);
+ np->pci_dev = pdev;
+ np->flags = skel_netdrv_tbl[chip_id].flags;
+ pci_set_drvdata(pdev, dev);
+ np->mii.dev = dev;
+ np->mii.mdio_read = mdio_read;
+ np->mii.mdio_write = mdio_write;
+ np->mii.phy_id_mask = 0x1f;
+ np->mii.reg_num_mask = 0x1f;
+
+ ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space) {
+ err = -ENOMEM;
+ goto err_out_free_dev;
+ }
+ np->rx_ring = ring_space;
+ np->rx_ring_dma = ring_dma;
+
+ ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space) {
+ err = -ENOMEM;
+ goto err_out_free_rx;
+ }
+ np->tx_ring = ring_space;
+ np->tx_ring_dma = ring_dma;
+
+ /* find the connected MII xcvrs */
+ if (np->flags == HAS_MII_XCVR) {
+ int phy, phy_idx = 0;
+
+ for (phy = 1; phy < 32 && phy_idx < ARRAY_SIZE(np->phys);
+ phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ dev_info(&pdev->dev,
+ "MII PHY found at address %d, status "
+ "0x%4.4x.\n", phy, mii_status);
+ /* get phy type */
+ {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], 2);
+ if (data == SeeqPHYID0)
+ np->PHYType = SeeqPHY;
+ else if (data == AhdocPHYID0)
+ np->PHYType = AhdocPHY;
+ else if (data == MarvellPHYID0)
+ np->PHYType = MarvellPHY;
+ else if (data == MysonPHYID0)
+ np->PHYType = Myson981;
+ else if (data == LevelOnePHYID0)
+ np->PHYType = LevelOnePHY;
+ else
+ np->PHYType = OtherPHY;
+ }
+ }
+ }
+
+ np->mii_cnt = phy_idx;
+ if (phy_idx == 0)
+ dev_warn(&pdev->dev,
+ "MII PHY not found -- this device may "
+ "not operate correctly.\n");
+ } else {
+ np->phys[0] = 32;
+/* 89/6/23 add, (begin) */
+ /* get phy type */
+ if (ioread32(ioaddr + PHYIDENTIFIER) == MysonPHYID)
+ np->PHYType = MysonPHY;
+ else
+ np->PHYType = OtherPHY;
+ }
+ np->mii.phy_id = np->phys[0];
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ if (option & 0x200)
+ np->mii.full_duplex = 1;
+ np->default_port = option & 15;
+ }
+
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->mii.full_duplex = full_duplex[card_idx];
+
+ if (np->mii.full_duplex) {
+ dev_info(&pdev->dev, "Media type forced to Full Duplex.\n");
+/* 89/6/13 add, (begin) */
+// if (np->PHYType==MarvellPHY)
+ if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], 9);
+ data = (data & 0xfcff) | 0x0200;
+ mdio_write(dev, np->phys[0], 9, data);
+ }
+/* 89/6/13 add, (end) */
+ if (np->flags == HAS_MII_XCVR)
+ mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
+ else
+ iowrite32(ADVERTISE_FULL, ioaddr + ANARANLPAR);
+ np->mii.force_media = 1;
+ }
+
+ dev->netdev_ops = &netdev_ops;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_out_free_tx;
+
+ printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
+ dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr,
+ dev->dev_addr, irq);
+
+ return 0;
+
+err_out_free_tx:
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
+err_out_free_rx:
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
+err_out_free_dev:
+ free_netdev(dev);
+err_out_unmap:
+ pci_iounmap(pdev, ioaddr);
+err_out_res:
+ pci_release_regions(pdev);
+ return err;
+}
+
+
+static void __devexit fealnx_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct netdev_private *np = netdev_priv(dev);
+
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
+ np->tx_ring_dma);
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
+ np->rx_ring_dma);
+ unregister_netdev(dev);
+ pci_iounmap(pdev, np->mem);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ } else
+ printk(KERN_ERR "fealnx: remove for unknown device\n");
+}
+
+
+static ulong m80x_send_cmd_to_phy(void __iomem *miiport, int opcode, int phyad, int regad)
+{
+ ulong miir;
+ int i;
+ unsigned int mask, data;
+
+ /* enable MII output */
+ miir = (ulong) ioread32(miiport);
+ miir &= 0xfffffff0;
+
+ miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO;
+
+ /* send 32 1's preamble */
+ for (i = 0; i < 32; i++) {
+ /* low MDC; MDO is already high (miir) */
+ miir &= ~MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+
+ /* high MDC */
+ miir |= MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+ }
+
+ /* calculate ST+OP+PHYAD+REGAD+TA */
+ data = opcode | (phyad << 7) | (regad << 2);
+
+ /* sent out */
+ mask = 0x8000;
+ while (mask) {
+ /* low MDC, prepare MDO */
+ miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
+ if (mask & data)
+ miir |= MASK_MIIR_MII_MDO;
+
+ iowrite32(miir, miiport);
+ /* high MDC */
+ miir |= MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+ udelay(30);
+
+ /* next */
+ mask >>= 1;
+ if (mask == 0x2 && opcode == OP_READ)
+ miir &= ~MASK_MIIR_MII_WRITE;
+ }
+ return miir;
+}
+
+
+static int mdio_read(struct net_device *dev, int phyad, int regad)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *miiport = np->mem + MANAGEMENT;
+ ulong miir;
+ unsigned int mask, data;
+
+ miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad);
+
+ /* read data */
+ mask = 0x8000;
+ data = 0;
+ while (mask) {
+ /* low MDC */
+ miir &= ~MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+
+ /* read MDI */
+ miir = ioread32(miiport);
+ if (miir & MASK_MIIR_MII_MDI)
+ data |= mask;
+
+ /* high MDC, and wait */
+ miir |= MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+ udelay(30);
+
+ /* next */
+ mask >>= 1;
+ }
+
+ /* low MDC */
+ miir &= ~MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+
+ return data & 0xffff;
+}
+
+
+static void mdio_write(struct net_device *dev, int phyad, int regad, int data)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *miiport = np->mem + MANAGEMENT;
+ ulong miir;
+ unsigned int mask;
+
+ miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad);
+
+ /* write data */
+ mask = 0x8000;
+ while (mask) {
+ /* low MDC, prepare MDO */
+ miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
+ if (mask & data)
+ miir |= MASK_MIIR_MII_MDO;
+ iowrite32(miir, miiport);
+
+ /* high MDC */
+ miir |= MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+
+ /* next */
+ mask >>= 1;
+ }
+
+ /* low MDC */
+ miir &= ~MASK_MIIR_MII_MDC;
+ iowrite32(miir, miiport);
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ int i;
+
+ iowrite32(0x00000001, ioaddr + BCR); /* Reset */
+
+ if (request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev))
+ return -EAGAIN;
+
+ for (i = 0; i < 3; i++)
+ iowrite16(((unsigned short*)dev->dev_addr)[i],
+ ioaddr + PAR0 + i*2);
+
+ init_ring(dev);
+
+ iowrite32(np->rx_ring_dma, ioaddr + RXLBA);
+ iowrite32(np->tx_ring_dma, ioaddr + TXLBA);
+
+ /* Initialize other registers. */
+ /* Configure the PCI bus bursts and FIFO thresholds.
+ 486: Set 8 longword burst.
+ 586: no burst limit.
+ Burst length 5:3
+ 0 0 0 1
+ 0 0 1 4
+ 0 1 0 8
+ 0 1 1 16
+ 1 0 0 32
+ 1 0 1 64
+ 1 1 0 128
+ 1 1 1 256
+ Wait the specified 50 PCI cycles after a reset by initializing
+ Tx and Rx queues and the address filter list.
+ FIXME (Ueimor): optimistic for alpha + posted writes ? */
+
+ np->bcrvalue = 0x10; /* little-endian, 8 burst length */
+#ifdef __BIG_ENDIAN
+ np->bcrvalue |= 0x04; /* big-endian */
+#endif
+
+#if defined(__i386__) && !defined(MODULE)
+ if (boot_cpu_data.x86 <= 4)
+ np->crvalue = 0xa00;
+ else
+#endif
+ np->crvalue = 0xe00; /* rx 128 burst length */
+
+
+// 89/12/29 add,
+// 90/1/16 modify,
+// np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI;
+ np->imrvalue = TUNF | CNTOVF | RBU | TI | RI;
+ if (np->pci_dev->device == 0x891) {
+ np->bcrvalue |= 0x200; /* set PROG bit */
+ np->crvalue |= CR_W_ENH; /* set enhanced bit */
+ np->imrvalue |= ETI;
+ }
+ iowrite32(np->bcrvalue, ioaddr + BCR);
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ iowrite32(0, ioaddr + RXPDR);
+// 89/9/1 modify,
+// np->crvalue = 0x00e40001; /* tx store and forward, tx/rx enable */
+ np->crvalue |= 0x00e40001; /* tx store and forward, tx/rx enable */
+ np->mii.full_duplex = np->mii.force_media;
+ getlinkstatus(dev);
+ if (np->linkok)
+ getlinktype(dev);
+ __set_rx_mode(dev);
+
+ netif_start_queue(dev);
+
+ /* Clear and Enable interrupts by setting the interrupt mask. */
+ iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
+ iowrite32(np->imrvalue, ioaddr + IMR);
+
+ if (debug)
+ printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = RUN_AT(3 * HZ);
+ np->timer.data = (unsigned long) dev;
+ np->timer.function = netdev_timer;
+
+ /* timer handler */
+ add_timer(&np->timer);
+
+ init_timer(&np->reset_timer);
+ np->reset_timer.data = (unsigned long) dev;
+ np->reset_timer.function = reset_timer;
+ np->reset_timer_armed = 0;
+
+ return 0;
+}
+
+
+static void getlinkstatus(struct net_device *dev)
+/* function: Routine will read MII Status Register to get link status. */
+/* input : dev... pointer to the adapter block. */
+/* output : none. */
+{
+ struct netdev_private *np = netdev_priv(dev);
+ unsigned int i, DelayTime = 0x1000;
+
+ np->linkok = 0;
+
+ if (np->PHYType == MysonPHY) {
+ for (i = 0; i < DelayTime; ++i) {
+ if (ioread32(np->mem + BMCRSR) & LinkIsUp2) {
+ np->linkok = 1;
+ return;
+ }
+ udelay(100);
+ }
+ } else {
+ for (i = 0; i < DelayTime; ++i) {
+ if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) {
+ np->linkok = 1;
+ return;
+ }
+ udelay(100);
+ }
+ }
+}
+
+
+static void getlinktype(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ if (np->PHYType == MysonPHY) { /* 3-in-1 case */
+ if (ioread32(np->mem + TCRRCR) & CR_R_FD)
+ np->duplexmode = 2; /* full duplex */
+ else
+ np->duplexmode = 1; /* half duplex */
+ if (ioread32(np->mem + TCRRCR) & CR_R_PS10)
+ np->line_speed = 1; /* 10M */
+ else
+ np->line_speed = 2; /* 100M */
+ } else {
+ if (np->PHYType == SeeqPHY) { /* this PHY is SEEQ 80225 */
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], MIIRegister18);
+ if (data & SPD_DET_100)
+ np->line_speed = 2; /* 100M */
+ else
+ np->line_speed = 1; /* 10M */
+ if (data & DPLX_DET_FULL)
+ np->duplexmode = 2; /* full duplex mode */
+ else
+ np->duplexmode = 1; /* half duplex mode */
+ } else if (np->PHYType == AhdocPHY) {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], DiagnosticReg);
+ if (data & Speed_100)
+ np->line_speed = 2; /* 100M */
+ else
+ np->line_speed = 1; /* 10M */
+ if (data & DPLX_FULL)
+ np->duplexmode = 2; /* full duplex mode */
+ else
+ np->duplexmode = 1; /* half duplex mode */
+ }
+/* 89/6/13 add, (begin) */
+ else if (np->PHYType == MarvellPHY) {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], SpecificReg);
+ if (data & Full_Duplex)
+ np->duplexmode = 2; /* full duplex mode */
+ else
+ np->duplexmode = 1; /* half duplex mode */
+ data &= SpeedMask;
+ if (data == Speed_1000M)
+ np->line_speed = 3; /* 1000M */
+ else if (data == Speed_100M)
+ np->line_speed = 2; /* 100M */
+ else
+ np->line_speed = 1; /* 10M */
+ }
+/* 89/6/13 add, (end) */
+/* 89/7/27 add, (begin) */
+ else if (np->PHYType == Myson981) {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], StatusRegister);
+
+ if (data & SPEED100)
+ np->line_speed = 2;
+ else
+ np->line_speed = 1;
+
+ if (data & FULLMODE)
+ np->duplexmode = 2;
+ else
+ np->duplexmode = 1;
+ }
+/* 89/7/27 add, (end) */
+/* 89/12/29 add */
+ else if (np->PHYType == LevelOnePHY) {
+ unsigned int data;
+
+ data = mdio_read(dev, np->phys[0], SpecificReg);
+ if (data & LXT1000_Full)
+ np->duplexmode = 2; /* full duplex mode */
+ else
+ np->duplexmode = 1; /* half duplex mode */
+ data &= SpeedMask;
+ if (data == LXT1000_1000M)
+ np->line_speed = 3; /* 1000M */
+ else if (data == LXT1000_100M)
+ np->line_speed = 2; /* 100M */
+ else
+ np->line_speed = 1; /* 10M */
+ }
+ np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000);
+ if (np->line_speed == 1)
+ np->crvalue |= CR_W_PS10;
+ else if (np->line_speed == 3)
+ np->crvalue |= CR_W_PS1000;
+ if (np->duplexmode == 2)
+ np->crvalue |= CR_W_FD;
+ }
+}
+
+
+/* Take lock before calling this */
+static void allocate_rx_buffers(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ /* allocate skb for rx buffers */
+ while (np->really_rx_count != RX_RING_SIZE) {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(np->rx_buf_sz);
+ if (skb == NULL)
+ break; /* Better luck next round. */
+
+ while (np->lack_rxbuf->skbuff)
+ np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
+
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->lack_rxbuf->skbuff = skb;
+ np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data,
+ np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ np->lack_rxbuf->status = RXOWN;
+ ++np->really_rx_count;
+ }
+}
+
+
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ int old_crvalue = np->crvalue;
+ unsigned int old_linkok = np->linkok;
+ unsigned long flags;
+
+ if (debug)
+ printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
+ "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR),
+ ioread32(ioaddr + TCRRCR));
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ if (np->flags == HAS_MII_XCVR) {
+ getlinkstatus(dev);
+ if ((old_linkok == 0) && (np->linkok == 1)) { /* we need to detect the media type again */
+ getlinktype(dev);
+ if (np->crvalue != old_crvalue) {
+ stop_nic_rxtx(ioaddr, np->crvalue);
+ iowrite32(np->crvalue, ioaddr + TCRRCR);
+ }
+ }
+ }
+
+ allocate_rx_buffers(dev);
+
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ np->timer.expires = RUN_AT(10 * HZ);
+ add_timer(&np->timer);
+}
+
+
+/* Take lock before calling */
+/* Reset chip and disable rx, tx and interrupts */
+static void reset_and_disable_rxtx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ int delay=51;
+
+ /* Reset the chip's Tx and Rx processes. */
+ stop_nic_rxtx(ioaddr, 0);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ iowrite32(0, ioaddr + IMR);
+
+ /* Reset the chip to erase previous misconfiguration. */
+ iowrite32(0x00000001, ioaddr + BCR);
+
+ /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw).
+ We surely wait too long (address+data phase). Who cares? */
+ while (--delay) {
+ ioread32(ioaddr + BCR);
+ rmb();
+ }
+}
+
+
+/* Take lock before calling */
+/* Restore chip after reset */
+static void enable_rxtx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+
+ reset_rx_descriptors(dev);
+
+ iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring),
+ ioaddr + TXLBA);
+ iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
+ ioaddr + RXLBA);
+
+ iowrite32(np->bcrvalue, ioaddr + BCR);
+
+ iowrite32(0, ioaddr + RXPDR);
+ __set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */
+
+ /* Clear and Enable interrupts by setting the interrupt mask. */
+ iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
+ iowrite32(np->imrvalue, ioaddr + IMR);
+
+ iowrite32(0, ioaddr + TXPDR);
+}
+
+
+static void reset_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct netdev_private *np = netdev_priv(dev);
+ unsigned long flags;
+
+ printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name);
+
+ spin_lock_irqsave(&np->lock, flags);
+ np->crvalue = np->crvalue_sv;
+ np->imrvalue = np->imrvalue_sv;
+
+ reset_and_disable_rxtx(dev);
+ /* works for me without this:
+ reset_tx_descriptors(dev); */
+ enable_rxtx(dev);
+ netif_start_queue(dev); /* FIXME: or netif_wake_queue(dev); ? */
+
+ np->reset_timer_armed = 0;
+
+ spin_unlock_irqrestore(&np->lock, flags);
+}
+
+
+static void fealnx_tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ unsigned long flags;
+ int i;
+
+ printk(KERN_WARNING
+ "%s: Transmit timed out, status %8.8x, resetting...\n",
+ dev->name, ioread32(ioaddr + ISR));
+
+ {
+ printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(KERN_CONT " %8.8x",
+ (unsigned int) np->rx_ring[i].status);
+ printk(KERN_CONT "\n");
+ printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(KERN_CONT " %4.4x", np->tx_ring[i].status);
+ printk(KERN_CONT "\n");
+ }
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ reset_and_disable_rxtx(dev);
+ reset_tx_descriptors(dev);
+ enable_rxtx(dev);
+
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ dev->stats.tx_errors++;
+ netif_wake_queue(dev); /* or .._start_.. ?? */
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ /* initialize rx variables */
+ np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+ np->cur_rx = &np->rx_ring[0];
+ np->lack_rxbuf = np->rx_ring;
+ np->really_rx_count = 0;
+
+ /* initial rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].control = np->rx_buf_sz << RBSShift;
+ np->rx_ring[i].next_desc = np->rx_ring_dma +
+ (i + 1)*sizeof(struct fealnx_desc);
+ np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1];
+ np->rx_ring[i].skbuff = NULL;
+ }
+
+ /* for the last rx descriptor */
+ np->rx_ring[i - 1].next_desc = np->rx_ring_dma;
+ np->rx_ring[i - 1].next_desc_logical = np->rx_ring;
+
+ /* allocate skb for rx buffers */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+
+ if (skb == NULL) {
+ np->lack_rxbuf = &np->rx_ring[i];
+ break;
+ }
+
+ ++np->really_rx_count;
+ np->rx_ring[i].skbuff = skb;
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data,
+ np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ np->rx_ring[i].status = RXOWN;
+ np->rx_ring[i].control |= RXIC;
+ }
+
+ /* initialize tx variables */
+ np->cur_tx = &np->tx_ring[0];
+ np->cur_tx_copy = &np->tx_ring[0];
+ np->really_tx_count = 0;
+ np->free_tx_count = TX_RING_SIZE;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_ring[i].status = 0;
+ /* do we need np->tx_ring[i].control = XXX; ?? */
+ np->tx_ring[i].next_desc = np->tx_ring_dma +
+ (i + 1)*sizeof(struct fealnx_desc);
+ np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1];
+ np->tx_ring[i].skbuff = NULL;
+ }
+
+ /* for the last tx descriptor */
+ np->tx_ring[i - 1].next_desc = np->tx_ring_dma;
+ np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0];
+}
+
+
+static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ np->cur_tx_copy->skbuff = skb;
+
+#define one_buffer
+#define BPT 1022
+#if defined(one_buffer)
+ np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
+ np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
+ np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
+ np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
+// 89/12/29 add,
+ if (np->pci_dev->device == 0x891)
+ np->cur_tx_copy->control |= ETIControl | RetryTxLC;
+ np->cur_tx_copy->status = TXOWN;
+ np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
+ --np->free_tx_count;
+#elif defined(two_buffer)
+ if (skb->len > BPT) {
+ struct fealnx_desc *next;
+
+ /* for the first descriptor */
+ np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
+ BPT, PCI_DMA_TODEVICE);
+ np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable;
+ np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
+ np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */
+
+ /* for the last descriptor */
+ next = np->cur_tx_copy->next_desc_logical;
+ next->skbuff = skb;
+ next->control = TXIC | TXLD | CRCEnable | PADEnable;
+ next->control |= (skb->len << PKTSShift); /* pkt size */
+ next->control |= ((skb->len - BPT) << TBSShift); /* buf size */
+// 89/12/29 add,
+ if (np->pci_dev->device == 0x891)
+ np->cur_tx_copy->control |= ETIControl | RetryTxLC;
+ next->buffer = pci_map_single(ep->pci_dev, skb->data + BPT,
+ skb->len - BPT, PCI_DMA_TODEVICE);
+
+ next->status = TXOWN;
+ np->cur_tx_copy->status = TXOWN;
+
+ np->cur_tx_copy = next->next_desc_logical;
+ np->free_tx_count -= 2;
+ } else {
+ np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
+ np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
+ np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
+ np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
+// 89/12/29 add,
+ if (np->pci_dev->device == 0x891)
+ np->cur_tx_copy->control |= ETIControl | RetryTxLC;
+ np->cur_tx_copy->status = TXOWN;
+ np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
+ --np->free_tx_count;
+ }
+#endif
+
+ if (np->free_tx_count < 2)
+ netif_stop_queue(dev);
+ ++np->really_tx_count;
+ iowrite32(0, np->mem + TXPDR);
+
+ spin_unlock_irqrestore(&np->lock, flags);
+ return NETDEV_TX_OK;
+}
+
+
+/* Take lock before calling */
+/* Chip probably hosed tx ring. Clean up. */
+static void reset_tx_descriptors(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ struct fealnx_desc *cur;
+ int i;
+
+ /* initialize tx variables */
+ np->cur_tx = &np->tx_ring[0];
+ np->cur_tx_copy = &np->tx_ring[0];
+ np->really_tx_count = 0;
+ np->free_tx_count = TX_RING_SIZE;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ cur = &np->tx_ring[i];
+ if (cur->skbuff) {
+ pci_unmap_single(np->pci_dev, cur->buffer,
+ cur->skbuff->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(cur->skbuff);
+ cur->skbuff = NULL;
+ }
+ cur->status = 0;
+ cur->control = 0; /* needed? */
+ /* probably not needed. We do it for purely paranoid reasons */
+ cur->next_desc = np->tx_ring_dma +
+ (i + 1)*sizeof(struct fealnx_desc);
+ cur->next_desc_logical = &np->tx_ring[i + 1];
+ }
+ /* for the last tx descriptor */
+ np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma;
+ np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0];
+}
+
+
+/* Take lock and stop rx before calling this */
+static void reset_rx_descriptors(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ struct fealnx_desc *cur = np->cur_rx;
+ int i;
+
+ allocate_rx_buffers(dev);
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (cur->skbuff)
+ cur->status = RXOWN;
+ cur = cur->next_desc_logical;
+ }
+
+ iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
+ np->mem + RXLBA);
+}
+
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t intr_handler(int irq, void *dev_instance)
+{
+ struct net_device *dev = (struct net_device *) dev_instance;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ long boguscnt = max_interrupt_work;
+ unsigned int num_tx = 0;
+ int handled = 0;
+
+ spin_lock(&np->lock);
+
+ iowrite32(0, ioaddr + IMR);
+
+ do {
+ u32 intr_status = ioread32(ioaddr + ISR);
+
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ iowrite32(intr_status, ioaddr + ISR);
+
+ if (debug)
+ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name,
+ intr_status);
+
+ if (!(intr_status & np->imrvalue))
+ break;
+
+ handled = 1;
+
+// 90/1/16 delete,
+//
+// if (intr_status & FBE)
+// { /* fatal error */
+// stop_nic_tx(ioaddr, 0);
+// stop_nic_rx(ioaddr, 0);
+// break;
+// };
+
+ if (intr_status & TUNF)
+ iowrite32(0, ioaddr + TXPDR);
+
+ if (intr_status & CNTOVF) {
+ /* missed pkts */
+ dev->stats.rx_missed_errors +=
+ ioread32(ioaddr + TALLY) & 0x7fff;
+
+ /* crc error */
+ dev->stats.rx_crc_errors +=
+ (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
+ }
+
+ if (intr_status & (RI | RBU)) {
+ if (intr_status & RI)
+ netdev_rx(dev);
+ else {
+ stop_nic_rx(ioaddr, np->crvalue);
+ reset_rx_descriptors(dev);
+ iowrite32(np->crvalue, ioaddr + TCRRCR);
+ }
+ }
+
+ while (np->really_tx_count) {
+ long tx_status = np->cur_tx->status;
+ long tx_control = np->cur_tx->control;
+
+ if (!(tx_control & TXLD)) { /* this pkt is combined by two tx descriptors */
+ struct fealnx_desc *next;
+
+ next = np->cur_tx->next_desc_logical;
+ tx_status = next->status;
+ tx_control = next->control;
+ }
+
+ if (tx_status & TXOWN)
+ break;
+
+ if (!(np->crvalue & CR_W_ENH)) {
+ if (tx_status & (CSL | LC | EC | UDF | HF)) {
+ dev->stats.tx_errors++;
+ if (tx_status & EC)
+ dev->stats.tx_aborted_errors++;
+ if (tx_status & CSL)
+ dev->stats.tx_carrier_errors++;
+ if (tx_status & LC)
+ dev->stats.tx_window_errors++;
+ if (tx_status & UDF)
+ dev->stats.tx_fifo_errors++;
+ if ((tx_status & HF) && np->mii.full_duplex == 0)
+ dev->stats.tx_heartbeat_errors++;
+
+ } else {
+ dev->stats.tx_bytes +=
+ ((tx_control & PKTSMask) >> PKTSShift);
+
+ dev->stats.collisions +=
+ ((tx_status & NCRMask) >> NCRShift);
+ dev->stats.tx_packets++;
+ }
+ } else {
+ dev->stats.tx_bytes +=
+ ((tx_control & PKTSMask) >> PKTSShift);
+ dev->stats.tx_packets++;
+ }
+
+ /* Free the original skb. */
+ pci_unmap_single(np->pci_dev, np->cur_tx->buffer,
+ np->cur_tx->skbuff->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(np->cur_tx->skbuff);
+ np->cur_tx->skbuff = NULL;
+ --np->really_tx_count;
+ if (np->cur_tx->control & TXLD) {
+ np->cur_tx = np->cur_tx->next_desc_logical;
+ ++np->free_tx_count;
+ } else {
+ np->cur_tx = np->cur_tx->next_desc_logical;
+ np->cur_tx = np->cur_tx->next_desc_logical;
+ np->free_tx_count += 2;
+ }
+ num_tx++;
+ } /* end of for loop */
+
+ if (num_tx && np->free_tx_count >= 2)
+ netif_wake_queue(dev);
+
+ /* read transmit status for enhanced mode only */
+ if (np->crvalue & CR_W_ENH) {
+ long data;
+
+ data = ioread32(ioaddr + TSR);
+ dev->stats.tx_errors += (data & 0xff000000) >> 24;
+ dev->stats.tx_aborted_errors +=
+ (data & 0xff000000) >> 24;
+ dev->stats.tx_window_errors +=
+ (data & 0x00ff0000) >> 16;
+ dev->stats.collisions += (data & 0x0000ffff);
+ }
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n", dev->name, intr_status);
+ if (!np->reset_timer_armed) {
+ np->reset_timer_armed = 1;
+ np->reset_timer.expires = RUN_AT(HZ/2);
+ add_timer(&np->reset_timer);
+ stop_nic_rxtx(ioaddr, 0);
+ netif_stop_queue(dev);
+ /* or netif_tx_disable(dev); ?? */
+ /* Prevent other paths from enabling tx,rx,intrs */
+ np->crvalue_sv = np->crvalue;
+ np->imrvalue_sv = np->imrvalue;
+ np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */
+ np->imrvalue = 0;
+ }
+
+ break;
+ }
+ } while (1);
+
+ /* read the tally counters */
+ /* missed pkts */
+ dev->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
+
+ /* crc error */
+ dev->stats.rx_crc_errors +=
+ (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
+
+ if (debug)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, ioread32(ioaddr + ISR));
+
+ iowrite32(np->imrvalue, ioaddr + IMR);
+
+ spin_unlock(&np->lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) {
+ s32 rx_status = np->cur_rx->status;
+
+ if (np->really_rx_count == 0)
+ break;
+
+ if (debug)
+ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", rx_status);
+
+ if ((!((rx_status & RXFSD) && (rx_status & RXLSD))) ||
+ (rx_status & ErrorSummary)) {
+ if (rx_status & ErrorSummary) { /* there was a fatal error */
+ if (debug)
+ printk(KERN_DEBUG
+ "%s: Receive error, Rx status %8.8x.\n",
+ dev->name, rx_status);
+
+ dev->stats.rx_errors++; /* end of a packet. */
+ if (rx_status & (LONG | RUNT))
+ dev->stats.rx_length_errors++;
+ if (rx_status & RXER)
+ dev->stats.rx_frame_errors++;
+ if (rx_status & CRC)
+ dev->stats.rx_crc_errors++;
+ } else {
+ int need_to_reset = 0;
+ int desno = 0;
+
+ if (rx_status & RXFSD) { /* this pkt is too long, over one rx buffer */
+ struct fealnx_desc *cur;
+
+ /* check this packet is received completely? */
+ cur = np->cur_rx;
+ while (desno <= np->really_rx_count) {
+ ++desno;
+ if ((!(cur->status & RXOWN)) &&
+ (cur->status & RXLSD))
+ break;
+ /* goto next rx descriptor */
+ cur = cur->next_desc_logical;
+ }
+ if (desno > np->really_rx_count)
+ need_to_reset = 1;
+ } else /* RXLSD did not find, something error */
+ need_to_reset = 1;
+
+ if (need_to_reset == 0) {
+ int i;
+
+ dev->stats.rx_length_errors++;
+
+ /* free all rx descriptors related this long pkt */
+ for (i = 0; i < desno; ++i) {
+ if (!np->cur_rx->skbuff) {
+ printk(KERN_DEBUG
+ "%s: I'm scared\n", dev->name);
+ break;
+ }
+ np->cur_rx->status = RXOWN;
+ np->cur_rx = np->cur_rx->next_desc_logical;
+ }
+ continue;
+ } else { /* rx error, need to reset this chip */
+ stop_nic_rx(ioaddr, np->crvalue);
+ reset_rx_descriptors(dev);
+ iowrite32(np->crvalue, ioaddr + TCRRCR);
+ }
+ break; /* exit the while loop */
+ }
+ } else { /* this received pkt is ok */
+
+ struct sk_buff *skb;
+ /* Omit the four octet CRC from the length. */
+ short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4;
+
+#ifndef final_version
+ if (debug)
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
+ " status %x.\n", pkt_len, rx_status);
+#endif
+
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak &&
+ (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ pci_dma_sync_single_for_cpu(np->pci_dev,
+ np->cur_rx->buffer,
+ np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ /* Call copy + cksum if available. */
+
+#if ! defined(__alpha__)
+ skb_copy_to_linear_data(skb,
+ np->cur_rx->skbuff->data, pkt_len);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len),
+ np->cur_rx->skbuff->data, pkt_len);
+#endif
+ pci_dma_sync_single_for_device(np->pci_dev,
+ np->cur_rx->buffer,
+ np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ } else {
+ pci_unmap_single(np->pci_dev,
+ np->cur_rx->buffer,
+ np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ skb_put(skb = np->cur_rx->skbuff, pkt_len);
+ np->cur_rx->skbuff = NULL;
+ --np->really_rx_count;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+
+ np->cur_rx = np->cur_rx->next_desc_logical;
+ } /* end of while loop */
+
+ /* allocate skb for rx buffers */
+ allocate_rx_buffers(dev);
+
+ return 0;
+}
+
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+
+ /* The chip only need report frame silently dropped. */
+ if (netif_running(dev)) {
+ dev->stats.rx_missed_errors +=
+ ioread32(ioaddr + TALLY) & 0x7fff;
+ dev->stats.rx_crc_errors +=
+ (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
+ }
+
+ return &dev->stats;
+}
+
+
+/* for dev->set_multicast_list */
+static void set_rx_mode(struct net_device *dev)
+{
+ spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock;
+ unsigned long flags;
+ spin_lock_irqsave(lp, flags);
+ __set_rx_mode(dev);
+ spin_unlock_irqrestore(lp, flags);
+}
+
+
+/* Take lock before calling */
+static void __set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ u32 rx_mode;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = CR_W_AB | CR_W_AM;
+ } else {
+ struct netdev_hw_addr *ha;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned int bit;
+ bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
+ mc_filter[bit >> 5] |= (1 << bit);
+ }
+ rx_mode = CR_W_AB | CR_W_AM;
+ }
+
+ stop_nic_rxtx(ioaddr, np->crvalue);
+
+ iowrite32(mc_filter[0], ioaddr + MAR0);
+ iowrite32(mc_filter[1], ioaddr + MAR1);
+ np->crvalue &= ~CR_W_RXMODEMASK;
+ np->crvalue |= rx_mode;
+ iowrite32(np->crvalue, ioaddr + TCRRCR);
+}
+
+static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(np->pci_dev));
+}
+
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&np->lock);
+ rc = mii_ethtool_gset(&np->mii, cmd);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&np->lock);
+ rc = mii_ethtool_sset(&np->mii, cmd);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+static int netdev_nway_reset(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_nway_restart(&np->mii);
+}
+
+static u32 netdev_get_link(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_link_ok(&np->mii);
+}
+
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 value)
+{
+ debug = value;
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_settings = netdev_get_settings,
+ .set_settings = netdev_set_settings,
+ .nway_reset = netdev_nway_reset,
+ .get_link = netdev_get_link,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+};
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int rc;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irq(&np->lock);
+ rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+
+static int netdev_close(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->mem;
+ int i;
+
+ netif_stop_queue(dev);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ iowrite32(0x0000, ioaddr + IMR);
+
+ /* Stop the chip's Tx and Rx processes. */
+ stop_nic_rxtx(ioaddr, 0);
+
+ del_timer_sync(&np->timer);
+ del_timer_sync(&np->reset_timer);
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = np->rx_ring[i].skbuff;
+
+ np->rx_ring[i].status = 0;
+ if (skb) {
+ pci_unmap_single(np->pci_dev, np->rx_ring[i].buffer,
+ np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(skb);
+ np->rx_ring[i].skbuff = NULL;
+ }
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ struct sk_buff *skb = np->tx_ring[i].skbuff;
+
+ if (skb) {
+ pci_unmap_single(np->pci_dev, np->tx_ring[i].buffer,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ np->tx_ring[i].skbuff = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(fealnx_pci_tbl) = {
+ {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
+ {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
+ {} /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl);
+
+
+static struct pci_driver fealnx_driver = {
+ .name = "fealnx",
+ .id_table = fealnx_pci_tbl,
+ .probe = fealnx_init_one,
+ .remove = __devexit_p(fealnx_remove_one),
+};
+
+static int __init fealnx_init(void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk(version);
+#endif
+
+ return pci_register_driver(&fealnx_driver);
+}
+
+static void __exit fealnx_exit(void)
+{
+ pci_unregister_driver(&fealnx_driver);
+}
+
+module_init(fealnx_init);
+module_exit(fealnx_exit);
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
new file mode 100644
index 000000000000..1cf671643d1f
--- /dev/null
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -0,0 +1,88 @@
+#
+# Freescale device configuration
+#
+
+config NET_VENDOR_FREESCALE
+ bool "Freescale devices"
+ default y
+ depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
+ M523x || M527x || M5272 || M528x || M520x || M532x || \
+ ARCH_MXC || ARCH_MXS || \
+ (PPC_MPC52xx && PPC_BESTCOMM)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Freescale devices. If you say Y, you will be
+ asked for your specific card in the following questions.
+
+if NET_VENDOR_FREESCALE
+
+config FEC
+ bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
+ depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
+ ARCH_MXC || ARCH_MXS)
+ select PHYLIB
+ ---help---
+ Say Y here if you want to use the built-in 10/100 Fast ethernet
+ controller on some Motorola ColdFire and Freescale i.MX processors.
+
+config FEC_MPC52xx
+ tristate "FEC MPC52xx driver"
+ depends on PPC_MPC52xx && PPC_BESTCOMM
+ select CRC32
+ select PHYLIB
+ select PPC_BESTCOMM_FEC
+ ---help---
+ This option enables support for the MPC5200's on-chip
+ Fast Ethernet Controller
+ If compiled as module, it will be called fec_mpc52xx.
+
+config FEC_MPC52xx_MDIO
+ bool "FEC MPC52xx MDIO bus driver"
+ depends on FEC_MPC52xx
+ default y
+ ---help---
+ The MPC5200's FEC can connect to the Ethernet either with
+ an external MII PHY chip or 10 Mbps 7-wire interface
+ (Motorola? industry standard).
+ If your board uses an external PHY connected to FEC, enable this.
+ If not sure, enable.
+ If compiled as module, it will be called fec_mpc52xx_phy.
+
+source "drivers/net/ethernet/freescale/fs_enet/Kconfig"
+
+config FSL_PQ_MDIO
+ tristate "Freescale PQ MDIO"
+ depends on FSL_SOC
+ select PHYLIB
+ ---help---
+ This driver supports the MDIO bus used by the gianfar and UCC drivers.
+
+config UCC_GETH
+ tristate "Freescale QE Gigabit Ethernet"
+ depends on QUICC_ENGINE
+ select FSL_PQ_MDIO
+ select PHYLIB
+ ---help---
+ This driver supports the Gigabit Ethernet mode of the QUICC Engine,
+ which is available on some Freescale SOCs.
+
+config UGETH_TX_ON_DEMAND
+ bool "Transmit on Demand support"
+ depends on UCC_GETH
+
+config GIANFAR
+ tristate "Gianfar Ethernet"
+ depends on FSL_SOC
+ select FSL_PQ_MDIO
+ select PHYLIB
+ select CRC32
+ ---help---
+ This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
+ and MPC86xx family of chips, and the FEC on the 8540.
+
+endif # NET_VENDOR_FREESCALE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
new file mode 100644
index 000000000000..1752488c9ee5
--- /dev/null
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the Freescale network device drivers.
+#
+
+obj-$(CONFIG_FEC) += fec.o
+obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
+ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
+ obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
+endif
+obj-$(CONFIG_FS_ENET) += fs_enet/
+obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o
+obj-$(CONFIG_GIANFAR) += gianfar_driver.o
+obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
+gianfar_driver-objs := gianfar.o \
+ gianfar_ethtool.o \
+ gianfar_sysfs.o
+obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
+ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
new file mode 100644
index 000000000000..1124ce0a1594
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -0,0 +1,1728 @@
+/*
+ * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
+ * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
+ *
+ * Right now, I am very wasteful with the buffers. I allocate memory
+ * pages and then divide them into 2K frame buffers. This way I know I
+ * have buffers large enough to hold one frame within one buffer descriptor.
+ * Once I get this working, I will use 64 or 128 byte CPM buffers, which
+ * will be much more memory efficient and will easily handle lots of
+ * small packets.
+ *
+ * Much better multiple PHY support by Magnus Damm.
+ * Copyright (c) 2000 Ericsson Radio Systems AB.
+ *
+ * Support for FEC controller of ColdFire processors.
+ * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
+ *
+ * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
+ * Copyright (c) 2004-2006 Macq Electronique SA.
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/fec.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
+
+#include <asm/cacheflush.h>
+
+#ifndef CONFIG_ARM
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#endif
+
+#include "fec.h"
+
+#if defined(CONFIG_ARM)
+#define FEC_ALIGNMENT 0xf
+#else
+#define FEC_ALIGNMENT 0x3
+#endif
+
+#define DRIVER_NAME "fec"
+
+/* Controller is ENET-MAC */
+#define FEC_QUIRK_ENET_MAC (1 << 0)
+/* Controller needs driver to swap frame */
+#define FEC_QUIRK_SWAP_FRAME (1 << 1)
+/* Controller uses gasket */
+#define FEC_QUIRK_USE_GASKET (1 << 2)
+/* Controller has GBIT support */
+#define FEC_QUIRK_HAS_GBIT (1 << 3)
+
+static struct platform_device_id fec_devtype[] = {
+ {
+ /* keep it for coldfire */
+ .name = DRIVER_NAME,
+ .driver_data = 0,
+ }, {
+ .name = "imx25-fec",
+ .driver_data = FEC_QUIRK_USE_GASKET,
+ }, {
+ .name = "imx27-fec",
+ .driver_data = 0,
+ }, {
+ .name = "imx28-fec",
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
+ }, {
+ .name = "imx6q-fec",
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, fec_devtype);
+
+enum imx_fec_type {
+ IMX25_FEC = 1, /* runs on i.mx25/50/53 */
+ IMX27_FEC, /* runs on i.mx27/35/51 */
+ IMX28_FEC,
+ IMX6Q_FEC,
+};
+
+static const struct of_device_id fec_dt_ids[] = {
+ { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
+ { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
+ { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
+ { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fec_dt_ids);
+
+static unsigned char macaddr[ETH_ALEN];
+module_param_array(macaddr, byte, NULL, 0);
+MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
+
+#if defined(CONFIG_M5272)
+/*
+ * Some hardware gets it MAC address out of local flash memory.
+ * if this is non-zero then assume it is the address to get MAC from.
+ */
+#if defined(CONFIG_NETtel)
+#define FEC_FLASHMAC 0xf0006006
+#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
+#define FEC_FLASHMAC 0xf0006000
+#elif defined(CONFIG_CANCam)
+#define FEC_FLASHMAC 0xf0020000
+#elif defined (CONFIG_M5272C3)
+#define FEC_FLASHMAC (0xffe04000 + 4)
+#elif defined(CONFIG_MOD5272)
+#define FEC_FLASHMAC 0xffc0406b
+#else
+#define FEC_FLASHMAC 0
+#endif
+#endif /* CONFIG_M5272 */
+
+/* The number of Tx and Rx buffers. These are allocated from the page
+ * pool. The code may assume these are power of two, so it it best
+ * to keep them that size.
+ * We don't need to allocate pages for the transmitter. We just use
+ * the skbuffer directly.
+ */
+#define FEC_ENET_RX_PAGES 8
+#define FEC_ENET_RX_FRSIZE 2048
+#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
+#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
+#define FEC_ENET_TX_FRSIZE 2048
+#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
+#define TX_RING_SIZE 16 /* Must be power of two */
+#define TX_RING_MOD_MASK 15 /* for this to work */
+
+#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
+#error "FEC: descriptor ring size constants too large"
+#endif
+
+/* Interrupt events/masks. */
+#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
+#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
+#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
+#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
+#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */
+#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
+#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */
+#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
+#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
+#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
+
+#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
+
+/* The FEC stores dest/src/type, data, and checksum for receive packets.
+ */
+#define PKT_MAXBUF_SIZE 1518
+#define PKT_MINBUF_SIZE 64
+#define PKT_MAXBLR_SIZE 1520
+
+/* This device has up to three irqs on some platforms */
+#define FEC_IRQ_NUM 3
+
+/*
+ * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
+ * size bits. Other FEC hardware does not, so we need to take that into
+ * account when setting it.
+ */
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
+#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
+#else
+#define OPT_FRAME_SIZE 0
+#endif
+
+/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
+ * tx_bd_base always point to the base of the buffer descriptors. The
+ * cur_rx and cur_tx point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller. The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions. The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct fec_enet_private {
+ /* Hardware registers of the FEC device */
+ void __iomem *hwp;
+
+ struct net_device *netdev;
+
+ struct clk *clk;
+
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ unsigned char *tx_bounce[TX_RING_SIZE];
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ ushort skb_cur;
+ ushort skb_dirty;
+
+ /* CPM dual port RAM relative addresses */
+ dma_addr_t bd_dma;
+ /* Address of Rx and Tx buffers */
+ struct bufdesc *rx_bd_base;
+ struct bufdesc *tx_bd_base;
+ /* The next free ring entry */
+ struct bufdesc *cur_rx, *cur_tx;
+ /* The ring entries to be free()ed */
+ struct bufdesc *dirty_tx;
+
+ uint tx_full;
+ /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
+ spinlock_t hw_lock;
+
+ struct platform_device *pdev;
+
+ int opened;
+
+ /* Phylib and MDIO interface */
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ int mii_timeout;
+ uint phy_speed;
+ phy_interface_t phy_interface;
+ int link;
+ int full_duplex;
+ struct completion mdio_done;
+ int irq[FEC_IRQ_NUM];
+};
+
+/* FEC MII MMFR bits definition */
+#define FEC_MMFR_ST (1 << 30)
+#define FEC_MMFR_OP_READ (2 << 28)
+#define FEC_MMFR_OP_WRITE (1 << 28)
+#define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
+#define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
+#define FEC_MMFR_TA (2 << 16)
+#define FEC_MMFR_DATA(v) (v & 0xffff)
+
+#define FEC_MII_TIMEOUT 1000 /* us */
+
+/* Transmitter timeout */
+#define TX_TIMEOUT (2 * HZ)
+
+static void *swap_buffer(void *bufaddr, int len)
+{
+ int i;
+ unsigned int *buf = bufaddr;
+
+ for (i = 0; i < (len + 3) / 4; i++, buf++)
+ *buf = cpu_to_be32(*buf);
+
+ return bufaddr;
+}
+
+static netdev_tx_t
+fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ struct bufdesc *bdp;
+ void *bufaddr;
+ unsigned short status;
+ unsigned long flags;
+
+ if (!fep->link) {
+ /* Link is down or autonegotiation is in progress. */
+ return NETDEV_TX_BUSY;
+ }
+
+ spin_lock_irqsave(&fep->hw_lock, flags);
+ /* Fill in a Tx ring entry */
+ bdp = fep->cur_tx;
+
+ status = bdp->cbd_sc;
+
+ if (status & BD_ENET_TX_READY) {
+ /* Ooops. All transmit buffers are full. Bail out.
+ * This should not happen, since ndev->tbusy should be set.
+ */
+ printk("%s: tx queue full!.\n", ndev->name);
+ spin_unlock_irqrestore(&fep->hw_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Clear all of the status flags */
+ status &= ~BD_ENET_TX_STATS;
+
+ /* Set buffer length and buffer pointer */
+ bufaddr = skb->data;
+ bdp->cbd_datlen = skb->len;
+
+ /*
+ * On some FEC implementations data must be aligned on
+ * 4-byte boundaries. Use bounce buffers to copy data
+ * and get it aligned. Ugh.
+ */
+ if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
+ unsigned int index;
+ index = bdp - fep->tx_bd_base;
+ memcpy(fep->tx_bounce[index], skb->data, skb->len);
+ bufaddr = fep->tx_bounce[index];
+ }
+
+ /*
+ * Some design made an incorrect assumption on endian mode of
+ * the system that it's running on. As the result, driver has to
+ * swap every frame going to and coming from the controller.
+ */
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(bufaddr, skb->len);
+
+ /* Save skb pointer */
+ fep->tx_skbuff[fep->skb_cur] = skb;
+
+ ndev->stats.tx_bytes += skb->len;
+ fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
+
+ /* Push the data cache so the CPM does not get stale memory
+ * data.
+ */
+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
+ FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
+
+ /* Send it on its way. Tell FEC it's ready, interrupt when done,
+ * it's the last BD of the frame, and to put the CRC on the end.
+ */
+ status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
+ | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ bdp->cbd_sc = status;
+
+ /* Trigger transmission start */
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE);
+
+ /* If this was the last BD in the ring, start at the beginning again. */
+ if (status & BD_ENET_TX_WRAP)
+ bdp = fep->tx_bd_base;
+ else
+ bdp++;
+
+ if (bdp == fep->dirty_tx) {
+ fep->tx_full = 1;
+ netif_stop_queue(ndev);
+ }
+
+ fep->cur_tx = bdp;
+
+ skb_tx_timestamp(skb);
+
+ spin_unlock_irqrestore(&fep->hw_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/* This function is called to start or restart the FEC during a link
+ * change. This only happens when switching between half and full
+ * duplex.
+ */
+static void
+fec_restart(struct net_device *ndev, int duplex)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ int i;
+ u32 temp_mac[2];
+ u32 rcntl = OPT_FRAME_SIZE | 0x04;
+ u32 ecntl = 0x2; /* ETHEREN */
+
+ /* Whack a reset. We should wait for this. */
+ writel(1, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+
+ /*
+ * enet-mac reset will reset mac address registers too,
+ * so need to reconfigure it.
+ */
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
+ writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
+ writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
+ }
+
+ /* Clear any outstanding interrupt. */
+ writel(0xffc00000, fep->hwp + FEC_IEVENT);
+
+ /* Reset all multicast. */
+ writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+ writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+#ifndef CONFIG_M5272
+ writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
+ writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
+#endif
+
+ /* Set maximum receive buffer size. */
+ writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
+
+ /* Set receive and transmit descriptor base. */
+ writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
+ writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
+ fep->hwp + FEC_X_DES_START);
+
+ fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
+ fep->cur_rx = fep->rx_bd_base;
+
+ /* Reset SKB transmit buffers. */
+ fep->skb_cur = fep->skb_dirty = 0;
+ for (i = 0; i <= TX_RING_MOD_MASK; i++) {
+ if (fep->tx_skbuff[i]) {
+ dev_kfree_skb_any(fep->tx_skbuff[i]);
+ fep->tx_skbuff[i] = NULL;
+ }
+ }
+
+ /* Enable MII mode */
+ if (duplex) {
+ /* FD enable */
+ writel(0x04, fep->hwp + FEC_X_CNTRL);
+ } else {
+ /* No Rcv on Xmit */
+ rcntl |= 0x02;
+ writel(0x0, fep->hwp + FEC_X_CNTRL);
+ }
+
+ fep->full_duplex = duplex;
+
+ /* Set MII speed */
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+
+ /*
+ * The phy interface and speed need to get configured
+ * differently on enet-mac.
+ */
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ /* Enable flow control and length check */
+ rcntl |= 0x40000000 | 0x00000020;
+
+ /* RGMII, RMII or MII */
+ if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII)
+ rcntl |= (1 << 6);
+ else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+ rcntl |= (1 << 8);
+ else
+ rcntl &= ~(1 << 8);
+
+ /* 1G, 100M or 10M */
+ if (fep->phy_dev) {
+ if (fep->phy_dev->speed == SPEED_1000)
+ ecntl |= (1 << 5);
+ else if (fep->phy_dev->speed == SPEED_100)
+ rcntl &= ~(1 << 9);
+ else
+ rcntl |= (1 << 9);
+ }
+ } else {
+#ifdef FEC_MIIGSK_ENR
+ if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) {
+ /* disable the gasket and wait */
+ writel(0, fep->hwp + FEC_MIIGSK_ENR);
+ while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
+ udelay(1);
+
+ /*
+ * configure the gasket:
+ * RMII, 50 MHz, no loopback, no echo
+ * MII, 25 MHz, no loopback, no echo
+ */
+ writel((fep->phy_interface == PHY_INTERFACE_MODE_RMII) ?
+ 1 : 0, fep->hwp + FEC_MIIGSK_CFGR);
+
+
+ /* re-enable the gasket */
+ writel(2, fep->hwp + FEC_MIIGSK_ENR);
+ }
+#endif
+ }
+ writel(rcntl, fep->hwp + FEC_R_CNTRL);
+
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ /* enable ENET endian swap */
+ ecntl |= (1 << 8);
+ /* enable ENET store and forward mode */
+ writel(1 << 8, fep->hwp + FEC_X_WMRK);
+ }
+
+ /* And last, enable the transmit and receive processing */
+ writel(ecntl, fep->hwp + FEC_ECNTRL);
+ writel(0, fep->hwp + FEC_R_DES_ACTIVE);
+
+ /* Enable interrupts we wish to service */
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+}
+
+static void
+fec_stop(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+
+ /* We cannot expect a graceful transmit stop without link !!! */
+ if (fep->link) {
+ writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
+ udelay(10);
+ if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
+ printk("fec_stop : Graceful transmit stop did not complete !\n");
+ }
+
+ /* Whack a reset. We should wait for this. */
+ writel(1, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+
+ /* We have to keep ENET enabled to have MII interrupt stay working */
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+ writel(2, fep->hwp + FEC_ECNTRL);
+}
+
+
+static void
+fec_timeout(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ ndev->stats.tx_errors++;
+
+ fec_restart(ndev, fep->full_duplex);
+ netif_wake_queue(ndev);
+}
+
+static void
+fec_enet_tx(struct net_device *ndev)
+{
+ struct fec_enet_private *fep;
+ struct bufdesc *bdp;
+ unsigned short status;
+ struct sk_buff *skb;
+
+ fep = netdev_priv(ndev);
+ spin_lock(&fep->hw_lock);
+ bdp = fep->dirty_tx;
+
+ while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
+ if (bdp == fep->cur_tx && fep->tx_full == 0)
+ break;
+
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
+ bdp->cbd_bufaddr = 0;
+
+ skb = fep->tx_skbuff[fep->skb_dirty];
+ /* Check for errors. */
+ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
+ BD_ENET_TX_RL | BD_ENET_TX_UN |
+ BD_ENET_TX_CSL)) {
+ ndev->stats.tx_errors++;
+ if (status & BD_ENET_TX_HB) /* No heartbeat */
+ ndev->stats.tx_heartbeat_errors++;
+ if (status & BD_ENET_TX_LC) /* Late collision */
+ ndev->stats.tx_window_errors++;
+ if (status & BD_ENET_TX_RL) /* Retrans limit */
+ ndev->stats.tx_aborted_errors++;
+ if (status & BD_ENET_TX_UN) /* Underrun */
+ ndev->stats.tx_fifo_errors++;
+ if (status & BD_ENET_TX_CSL) /* Carrier lost */
+ ndev->stats.tx_carrier_errors++;
+ } else {
+ ndev->stats.tx_packets++;
+ }
+
+ if (status & BD_ENET_TX_READY)
+ printk("HEY! Enet xmit interrupt and TX_READY.\n");
+
+ /* Deferred means some collisions occurred during transmit,
+ * but we eventually sent the packet OK.
+ */
+ if (status & BD_ENET_TX_DEF)
+ ndev->stats.collisions++;
+
+ /* Free the sk buffer associated with this last transmit */
+ dev_kfree_skb_any(skb);
+ fep->tx_skbuff[fep->skb_dirty] = NULL;
+ fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
+
+ /* Update pointer to next buffer descriptor to be transmitted */
+ if (status & BD_ENET_TX_WRAP)
+ bdp = fep->tx_bd_base;
+ else
+ bdp++;
+
+ /* Since we have freed up a buffer, the ring is no longer full
+ */
+ if (fep->tx_full) {
+ fep->tx_full = 0;
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+ }
+ }
+ fep->dirty_tx = bdp;
+ spin_unlock(&fep->hw_lock);
+}
+
+
+/* During a receive, the cur_rx points to the current incoming buffer.
+ * When we update through the ring, if the next incoming buffer has
+ * not been given to the system, we just set the empty indicator,
+ * effectively tossing the packet.
+ */
+static void
+fec_enet_rx(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ struct bufdesc *bdp;
+ unsigned short status;
+ struct sk_buff *skb;
+ ushort pkt_len;
+ __u8 *data;
+
+#ifdef CONFIG_M532x
+ flush_cache_all();
+#endif
+
+ spin_lock(&fep->hw_lock);
+
+ /* First, grab all of the stats for the incoming packet.
+ * These get messed up if we get called due to a busy condition.
+ */
+ bdp = fep->cur_rx;
+
+ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
+
+ /* Since we have allocated space to hold a complete frame,
+ * the last indicator should be set.
+ */
+ if ((status & BD_ENET_RX_LAST) == 0)
+ printk("FEC ENET: rcv is not +last\n");
+
+ if (!fep->opened)
+ goto rx_processing_done;
+
+ /* Check for errors. */
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
+ BD_ENET_RX_CR | BD_ENET_RX_OV)) {
+ ndev->stats.rx_errors++;
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
+ /* Frame too long or too short. */
+ ndev->stats.rx_length_errors++;
+ }
+ if (status & BD_ENET_RX_NO) /* Frame alignment */
+ ndev->stats.rx_frame_errors++;
+ if (status & BD_ENET_RX_CR) /* CRC Error */
+ ndev->stats.rx_crc_errors++;
+ if (status & BD_ENET_RX_OV) /* FIFO overrun */
+ ndev->stats.rx_fifo_errors++;
+ }
+
+ /* Report late collisions as a frame error.
+ * On this error, the BD is closed, but we don't know what we
+ * have in the buffer. So, just drop this frame on the floor.
+ */
+ if (status & BD_ENET_RX_CL) {
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_frame_errors++;
+ goto rx_processing_done;
+ }
+
+ /* Process the incoming frame. */
+ ndev->stats.rx_packets++;
+ pkt_len = bdp->cbd_datlen;
+ ndev->stats.rx_bytes += pkt_len;
+ data = (__u8*)__va(bdp->cbd_bufaddr);
+
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
+
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(data, pkt_len);
+
+ /* This does 16 byte alignment, exactly what we need.
+ * The packet length includes FCS, but we don't want to
+ * include that when passing upstream as it messes up
+ * bridging applications.
+ */
+ skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN);
+
+ if (unlikely(!skb)) {
+ printk("%s: Memory squeeze, dropping packet.\n",
+ ndev->name);
+ ndev->stats.rx_dropped++;
+ } else {
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb_put(skb, pkt_len - 4); /* Make room */
+ skb_copy_to_linear_data(skb, data, pkt_len - 4);
+ skb->protocol = eth_type_trans(skb, ndev);
+ if (!skb_defer_rx_timestamp(skb))
+ netif_rx(skb);
+ }
+
+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
+ FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
+rx_processing_done:
+ /* Clear the status flags for this buffer */
+ status &= ~BD_ENET_RX_STATS;
+
+ /* Mark the buffer empty */
+ status |= BD_ENET_RX_EMPTY;
+ bdp->cbd_sc = status;
+
+ /* Update BD pointer to next entry */
+ if (status & BD_ENET_RX_WRAP)
+ bdp = fep->rx_bd_base;
+ else
+ bdp++;
+ /* Doing this here will keep the FEC running while we process
+ * incoming frames. On a heavily loaded network, we should be
+ * able to keep up at the expense of system resources.
+ */
+ writel(0, fep->hwp + FEC_R_DES_ACTIVE);
+ }
+ fep->cur_rx = bdp;
+
+ spin_unlock(&fep->hw_lock);
+}
+
+static irqreturn_t
+fec_enet_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ uint int_events;
+ irqreturn_t ret = IRQ_NONE;
+
+ do {
+ int_events = readl(fep->hwp + FEC_IEVENT);
+ writel(int_events, fep->hwp + FEC_IEVENT);
+
+ if (int_events & FEC_ENET_RXF) {
+ ret = IRQ_HANDLED;
+ fec_enet_rx(ndev);
+ }
+
+ /* Transmit OK, or non-fatal error. Update the buffer
+ * descriptors. FEC handles all errors, we just discover
+ * them as part of the transmit process.
+ */
+ if (int_events & FEC_ENET_TXF) {
+ ret = IRQ_HANDLED;
+ fec_enet_tx(ndev);
+ }
+
+ if (int_events & FEC_ENET_MII) {
+ ret = IRQ_HANDLED;
+ complete(&fep->mdio_done);
+ }
+ } while (int_events);
+
+ return ret;
+}
+
+
+
+/* ------------------------------------------------------------------------- */
+static void __inline__ fec_get_mac(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
+ unsigned char *iap, tmpaddr[ETH_ALEN];
+
+ /*
+ * try to get mac address in following order:
+ *
+ * 1) module parameter via kernel command line in form
+ * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
+ */
+ iap = macaddr;
+
+#ifdef CONFIG_OF
+ /*
+ * 2) from device tree data
+ */
+ if (!is_valid_ether_addr(iap)) {
+ struct device_node *np = fep->pdev->dev.of_node;
+ if (np) {
+ const char *mac = of_get_mac_address(np);
+ if (mac)
+ iap = (unsigned char *) mac;
+ }
+ }
+#endif
+
+ /*
+ * 3) from flash or fuse (via platform data)
+ */
+ if (!is_valid_ether_addr(iap)) {
+#ifdef CONFIG_M5272
+ if (FEC_FLASHMAC)
+ iap = (unsigned char *)FEC_FLASHMAC;
+#else
+ if (pdata)
+ memcpy(iap, pdata->mac, ETH_ALEN);
+#endif
+ }
+
+ /*
+ * 4) FEC mac registers set by bootloader
+ */
+ if (!is_valid_ether_addr(iap)) {
+ *((unsigned long *) &tmpaddr[0]) =
+ be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW));
+ *((unsigned short *) &tmpaddr[4]) =
+ be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
+ iap = &tmpaddr[0];
+ }
+
+ memcpy(ndev->dev_addr, iap, ETH_ALEN);
+
+ /* Adjust MAC if using macaddr */
+ if (iap == macaddr)
+ ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Phy section
+ */
+static void fec_enet_adjust_link(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct phy_device *phy_dev = fep->phy_dev;
+ unsigned long flags;
+
+ int status_change = 0;
+
+ spin_lock_irqsave(&fep->hw_lock, flags);
+
+ /* Prevent a state halted on mii error */
+ if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
+ phy_dev->state = PHY_RESUMING;
+ goto spin_unlock;
+ }
+
+ /* Duplex link change */
+ if (phy_dev->link) {
+ if (fep->full_duplex != phy_dev->duplex) {
+ fec_restart(ndev, phy_dev->duplex);
+ status_change = 1;
+ }
+ }
+
+ /* Link on or off change */
+ if (phy_dev->link != fep->link) {
+ fep->link = phy_dev->link;
+ if (phy_dev->link)
+ fec_restart(ndev, phy_dev->duplex);
+ else
+ fec_stop(ndev);
+ status_change = 1;
+ }
+
+spin_unlock:
+ spin_unlock_irqrestore(&fep->hw_lock, flags);
+
+ if (status_change)
+ phy_print_status(phy_dev);
+}
+
+static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct fec_enet_private *fep = bus->priv;
+ unsigned long time_left;
+
+ fep->mii_timeout = 0;
+ init_completion(&fep->mdio_done);
+
+ /* start a read op */
+ writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+ FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ time_left = wait_for_completion_timeout(&fep->mdio_done,
+ usecs_to_jiffies(FEC_MII_TIMEOUT));
+ if (time_left == 0) {
+ fep->mii_timeout = 1;
+ printk(KERN_ERR "FEC: MDIO read timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ /* return value */
+ return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
+}
+
+static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ struct fec_enet_private *fep = bus->priv;
+ unsigned long time_left;
+
+ fep->mii_timeout = 0;
+ init_completion(&fep->mdio_done);
+
+ /* start a write op */
+ writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+ FEC_MMFR_TA | FEC_MMFR_DATA(value),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ time_left = wait_for_completion_timeout(&fep->mdio_done,
+ usecs_to_jiffies(FEC_MII_TIMEOUT));
+ if (time_left == 0) {
+ fep->mii_timeout = 1;
+ printk(KERN_ERR "FEC: MDIO write timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int fec_enet_mdio_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+static int fec_enet_mii_probe(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ struct phy_device *phy_dev = NULL;
+ char mdio_bus_id[MII_BUS_ID_SIZE];
+ char phy_name[MII_BUS_ID_SIZE + 3];
+ int phy_id;
+ int dev_id = fep->pdev->id;
+
+ fep->phy_dev = NULL;
+
+ /* check for attached phy */
+ for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
+ if ((fep->mii_bus->phy_mask & (1 << phy_id)))
+ continue;
+ if (fep->mii_bus->phy_map[phy_id] == NULL)
+ continue;
+ if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
+ continue;
+ if (dev_id--)
+ continue;
+ strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
+ break;
+ }
+
+ if (phy_id >= PHY_MAX_ADDR) {
+ printk(KERN_INFO "%s: no PHY, assuming direct connection "
+ "to switch\n", ndev->name);
+ strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
+ phy_id = 0;
+ }
+
+ snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
+ phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0,
+ fep->phy_interface);
+ if (IS_ERR(phy_dev)) {
+ printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name);
+ return PTR_ERR(phy_dev);
+ }
+
+ /* mask with MAC supported features */
+ if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT)
+ phy_dev->supported &= PHY_GBIT_FEATURES;
+ else
+ phy_dev->supported &= PHY_BASIC_FEATURES;
+
+ phy_dev->advertising = phy_dev->supported;
+
+ fep->phy_dev = phy_dev;
+ fep->link = 0;
+ fep->full_duplex = 0;
+
+ printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
+ fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
+ fep->phy_dev->irq);
+
+ return 0;
+}
+
+static int fec_enet_mii_init(struct platform_device *pdev)
+{
+ static struct mii_bus *fec0_mii_bus;
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ int err = -ENXIO, i;
+
+ /*
+ * The dual fec interfaces are not equivalent with enet-mac.
+ * Here are the differences:
+ *
+ * - fec0 supports MII & RMII modes while fec1 only supports RMII
+ * - fec0 acts as the 1588 time master while fec1 is slave
+ * - external phys can only be configured by fec0
+ *
+ * That is to say fec1 can not work independently. It only works
+ * when fec0 is working. The reason behind this design is that the
+ * second interface is added primarily for Switch mode.
+ *
+ * Because of the last point above, both phys are attached on fec0
+ * mdio interface in board design, and need to be configured by
+ * fec0 mii_bus.
+ */
+ if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id > 0) {
+ /* fec1 uses fec0 mii_bus */
+ fep->mii_bus = fec0_mii_bus;
+ return 0;
+ }
+
+ fep->mii_timeout = 0;
+
+ /*
+ * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
+ *
+ * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
+ * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28
+ * Reference Manual has an error on this, and gets fixed on i.MX6Q
+ * document.
+ */
+ fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000);
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+ fep->phy_speed--;
+ fep->phy_speed <<= 1;
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+
+ fep->mii_bus = mdiobus_alloc();
+ if (fep->mii_bus == NULL) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ fep->mii_bus->name = "fec_enet_mii_bus";
+ fep->mii_bus->read = fec_enet_mdio_read;
+ fep->mii_bus->write = fec_enet_mdio_write;
+ fep->mii_bus->reset = fec_enet_mdio_reset;
+ snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1);
+ fep->mii_bus->priv = fep;
+ fep->mii_bus->parent = &pdev->dev;
+
+ fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!fep->mii_bus->irq) {
+ err = -ENOMEM;
+ goto err_out_free_mdiobus;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ fep->mii_bus->irq[i] = PHY_POLL;
+
+ if (mdiobus_register(fep->mii_bus))
+ goto err_out_free_mdio_irq;
+
+ /* save fec0 mii_bus */
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+ fec0_mii_bus = fep->mii_bus;
+
+ return 0;
+
+err_out_free_mdio_irq:
+ kfree(fep->mii_bus->irq);
+err_out_free_mdiobus:
+ mdiobus_free(fep->mii_bus);
+err_out:
+ return err;
+}
+
+static void fec_enet_mii_remove(struct fec_enet_private *fep)
+{
+ if (fep->phy_dev)
+ phy_disconnect(fep->phy_dev);
+ mdiobus_unregister(fep->mii_bus);
+ kfree(fep->mii_bus->irq);
+ mdiobus_free(fep->mii_bus);
+}
+
+static int fec_enet_get_settings(struct net_device *ndev,
+ struct ethtool_cmd *cmd)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct phy_device *phydev = fep->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(phydev, cmd);
+}
+
+static int fec_enet_set_settings(struct net_device *ndev,
+ struct ethtool_cmd *cmd)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct phy_device *phydev = fep->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, cmd);
+}
+
+static void fec_enet_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ strcpy(info->driver, fep->pdev->dev.driver->name);
+ strcpy(info->version, "Revision: 1.0");
+ strcpy(info->bus_info, dev_name(&ndev->dev));
+}
+
+static struct ethtool_ops fec_enet_ethtool_ops = {
+ .get_settings = fec_enet_get_settings,
+ .set_settings = fec_enet_set_settings,
+ .get_drvinfo = fec_enet_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct phy_device *phydev = fep->phy_dev;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(phydev, rq, cmd);
+}
+
+static void fec_enet_free_buffers(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int i;
+ struct sk_buff *skb;
+ struct bufdesc *bdp;
+
+ bdp = fep->rx_bd_base;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ skb = fep->rx_skbuff[i];
+
+ if (bdp->cbd_bufaddr)
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+ if (skb)
+ dev_kfree_skb(skb);
+ bdp++;
+ }
+
+ bdp = fep->tx_bd_base;
+ for (i = 0; i < TX_RING_SIZE; i++)
+ kfree(fep->tx_bounce[i]);
+}
+
+static int fec_enet_alloc_buffers(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int i;
+ struct sk_buff *skb;
+ struct bufdesc *bdp;
+
+ bdp = fep->rx_bd_base;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
+ if (!skb) {
+ fec_enet_free_buffers(ndev);
+ return -ENOMEM;
+ }
+ fep->rx_skbuff[i] = skb;
+
+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
+ FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+ bdp->cbd_sc = BD_ENET_RX_EMPTY;
+ bdp++;
+ }
+
+ /* Set the last buffer to wrap. */
+ bdp--;
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ bdp = fep->tx_bd_base;
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
+
+ bdp->cbd_sc = 0;
+ bdp->cbd_bufaddr = 0;
+ bdp++;
+ }
+
+ /* Set the last buffer to wrap. */
+ bdp--;
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ return 0;
+}
+
+static int
+fec_enet_open(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int ret;
+
+ /* I should reset the ring buffers here, but I don't yet know
+ * a simple way to do that.
+ */
+
+ ret = fec_enet_alloc_buffers(ndev);
+ if (ret)
+ return ret;
+
+ /* Probe and connect to PHY when open the interface */
+ ret = fec_enet_mii_probe(ndev);
+ if (ret) {
+ fec_enet_free_buffers(ndev);
+ return ret;
+ }
+ phy_start(fep->phy_dev);
+ netif_start_queue(ndev);
+ fep->opened = 1;
+ return 0;
+}
+
+static int
+fec_enet_close(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ /* Don't know what to do yet. */
+ fep->opened = 0;
+ netif_stop_queue(ndev);
+ fec_stop(ndev);
+
+ if (fep->phy_dev) {
+ phy_stop(fep->phy_dev);
+ phy_disconnect(fep->phy_dev);
+ }
+
+ fec_enet_free_buffers(ndev);
+
+ return 0;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ * Skeleton taken from sunlance driver.
+ * The CPM Ethernet implementation allows Multicast as well as individual
+ * MAC address filtering. Some of the drivers check to make sure it is
+ * a group multicast address, and discard those that are not. I guess I
+ * will do the same for now, but just remove the test if you want
+ * individual filtering as well (do the upper net layers want or support
+ * this kind of feature?).
+ */
+
+#define HASH_BITS 6 /* #bits in hash */
+#define CRC32_POLY 0xEDB88320
+
+static void set_multicast_list(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct netdev_hw_addr *ha;
+ unsigned int i, bit, data, crc, tmp;
+ unsigned char hash;
+
+ if (ndev->flags & IFF_PROMISC) {
+ tmp = readl(fep->hwp + FEC_R_CNTRL);
+ tmp |= 0x8;
+ writel(tmp, fep->hwp + FEC_R_CNTRL);
+ return;
+ }
+
+ tmp = readl(fep->hwp + FEC_R_CNTRL);
+ tmp &= ~0x8;
+ writel(tmp, fep->hwp + FEC_R_CNTRL);
+
+ if (ndev->flags & IFF_ALLMULTI) {
+ /* Catch all multicast addresses, so set the
+ * filter to all 1's
+ */
+ writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+ writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+
+ return;
+ }
+
+ /* Clear filter and add the addresses in hash register
+ */
+ writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+ writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ /* calculate crc32 value of mac address */
+ crc = 0xffffffff;
+
+ for (i = 0; i < ndev->addr_len; i++) {
+ data = ha->addr[i];
+ for (bit = 0; bit < 8; bit++, data >>= 1) {
+ crc = (crc >> 1) ^
+ (((crc ^ data) & 1) ? CRC32_POLY : 0);
+ }
+ }
+
+ /* only upper 6 bits (HASH_BITS) are used
+ * which point to specific bit in he hash registers
+ */
+ hash = (crc >> (32 - HASH_BITS)) & 0x3f;
+
+ if (hash > 31) {
+ tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+ tmp |= 1 << (hash - 32);
+ writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+ } else {
+ tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+ tmp |= 1 << hash;
+ writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+ }
+ }
+}
+
+/* Set a MAC change in hardware. */
+static int
+fec_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+
+ writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
+ (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
+ fep->hwp + FEC_ADDR_LOW);
+ writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
+ fep->hwp + FEC_ADDR_HIGH);
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * fec_poll_controller: FEC Poll controller function
+ * @dev: The FEC network adapter
+ *
+ * Polled functionality used by netconsole and others in non interrupt mode
+ *
+ */
+void fec_poll_controller(struct net_device *dev)
+{
+ int i;
+ struct fec_enet_private *fep = netdev_priv(dev);
+
+ for (i = 0; i < FEC_IRQ_NUM; i++) {
+ if (fep->irq[i] > 0) {
+ disable_irq(fep->irq[i]);
+ fec_enet_interrupt(fep->irq[i], dev);
+ enable_irq(fep->irq[i]);
+ }
+ }
+}
+#endif
+
+static const struct net_device_ops fec_netdev_ops = {
+ .ndo_open = fec_enet_open,
+ .ndo_stop = fec_enet_close,
+ .ndo_start_xmit = fec_enet_start_xmit,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = fec_timeout,
+ .ndo_set_mac_address = fec_set_mac_address,
+ .ndo_do_ioctl = fec_enet_ioctl,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = fec_poll_controller,
+#endif
+};
+
+ /*
+ * XXX: We need to clean up on failure exits here.
+ *
+ */
+static int fec_enet_init(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct bufdesc *cbd_base;
+ struct bufdesc *bdp;
+ int i;
+
+ /* Allocate memory for buffer descriptors. */
+ cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
+ GFP_KERNEL);
+ if (!cbd_base) {
+ printk("FEC: allocate descriptor memory failed?\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&fep->hw_lock);
+
+ fep->netdev = ndev;
+
+ /* Get the Ethernet address */
+ fec_get_mac(ndev);
+
+ /* Set receive and transmit descriptor base. */
+ fep->rx_bd_base = cbd_base;
+ fep->tx_bd_base = cbd_base + RX_RING_SIZE;
+
+ /* The FEC Ethernet specific entries in the device structure */
+ ndev->watchdog_timeo = TX_TIMEOUT;
+ ndev->netdev_ops = &fec_netdev_ops;
+ ndev->ethtool_ops = &fec_enet_ethtool_ops;
+
+ /* Initialize the receive buffer descriptors. */
+ bdp = fep->rx_bd_base;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+
+ /* Initialize the BD for every fragment in the page. */
+ bdp->cbd_sc = 0;
+ bdp++;
+ }
+
+ /* Set the last buffer to wrap */
+ bdp--;
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ /* ...and the same for transmit */
+ bdp = fep->tx_bd_base;
+ for (i = 0; i < TX_RING_SIZE; i++) {
+
+ /* Initialize the BD for every fragment in the page. */
+ bdp->cbd_sc = 0;
+ bdp->cbd_bufaddr = 0;
+ bdp++;
+ }
+
+ /* Set the last buffer to wrap */
+ bdp--;
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ fec_restart(ndev, 0);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static int __devinit fec_get_phy_mode_dt(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ if (np)
+ return of_get_phy_mode(np);
+
+ return -ENODEV;
+}
+
+static void __devinit fec_reset_phy(struct platform_device *pdev)
+{
+ int err, phy_reset;
+ struct device_node *np = pdev->dev.of_node;
+
+ if (!np)
+ return;
+
+ phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
+ err = gpio_request_one(phy_reset, GPIOF_OUT_INIT_LOW, "phy-reset");
+ if (err) {
+ pr_debug("FEC: failed to get gpio phy-reset: %d\n", err);
+ return;
+ }
+ msleep(1);
+ gpio_set_value(phy_reset, 1);
+}
+#else /* CONFIG_OF */
+static inline int fec_get_phy_mode_dt(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+
+static inline void fec_reset_phy(struct platform_device *pdev)
+{
+ /*
+ * In case of platform probe, the reset has been done
+ * by machine code.
+ */
+}
+#endif /* CONFIG_OF */
+
+static int __devinit
+fec_probe(struct platform_device *pdev)
+{
+ struct fec_enet_private *fep;
+ struct fec_platform_data *pdata;
+ struct net_device *ndev;
+ int i, irq, ret = 0;
+ struct resource *r;
+ const struct of_device_id *of_id;
+
+ of_id = of_match_device(fec_dt_ids, &pdev->dev);
+ if (of_id)
+ pdev->id_entry = of_id->data;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -ENXIO;
+
+ r = request_mem_region(r->start, resource_size(r), pdev->name);
+ if (!r)
+ return -EBUSY;
+
+ /* Init network device */
+ ndev = alloc_etherdev(sizeof(struct fec_enet_private));
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto failed_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ /* setup board info structure */
+ fep = netdev_priv(ndev);
+
+ fep->hwp = ioremap(r->start, resource_size(r));
+ fep->pdev = pdev;
+
+ if (!fep->hwp) {
+ ret = -ENOMEM;
+ goto failed_ioremap;
+ }
+
+ platform_set_drvdata(pdev, ndev);
+
+ ret = fec_get_phy_mode_dt(pdev);
+ if (ret < 0) {
+ pdata = pdev->dev.platform_data;
+ if (pdata)
+ fep->phy_interface = pdata->phy;
+ else
+ fep->phy_interface = PHY_INTERFACE_MODE_MII;
+ } else {
+ fep->phy_interface = ret;
+ }
+
+ fec_reset_phy(pdev);
+
+ for (i = 0; i < FEC_IRQ_NUM; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (i && irq < 0)
+ break;
+ ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
+ if (ret) {
+ while (--i >= 0) {
+ irq = platform_get_irq(pdev, i);
+ free_irq(irq, ndev);
+ }
+ goto failed_irq;
+ }
+ }
+
+ fep->clk = clk_get(&pdev->dev, "fec_clk");
+ if (IS_ERR(fep->clk)) {
+ ret = PTR_ERR(fep->clk);
+ goto failed_clk;
+ }
+ clk_enable(fep->clk);
+
+ ret = fec_enet_init(ndev);
+ if (ret)
+ goto failed_init;
+
+ ret = fec_enet_mii_init(pdev);
+ if (ret)
+ goto failed_mii_init;
+
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(ndev);
+
+ ret = register_netdev(ndev);
+ if (ret)
+ goto failed_register;
+
+ return 0;
+
+failed_register:
+ fec_enet_mii_remove(fep);
+failed_mii_init:
+failed_init:
+ clk_disable(fep->clk);
+ clk_put(fep->clk);
+failed_clk:
+ for (i = 0; i < FEC_IRQ_NUM; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq > 0)
+ free_irq(irq, ndev);
+ }
+failed_irq:
+ iounmap(fep->hwp);
+failed_ioremap:
+ free_netdev(ndev);
+failed_alloc_etherdev:
+ release_mem_region(r->start, resource_size(r));
+
+ return ret;
+}
+
+static int __devexit
+fec_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct resource *r;
+
+ fec_stop(ndev);
+ fec_enet_mii_remove(fep);
+ clk_disable(fep->clk);
+ clk_put(fep->clk);
+ iounmap(fep->hwp);
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ BUG_ON(!r);
+ release_mem_region(r->start, resource_size(r));
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int
+fec_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ fec_stop(ndev);
+ netif_device_detach(ndev);
+ }
+ clk_disable(fep->clk);
+
+ return 0;
+}
+
+static int
+fec_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ clk_enable(fep->clk);
+ if (netif_running(ndev)) {
+ fec_restart(ndev, fep->full_duplex);
+ netif_device_attach(ndev);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops fec_pm_ops = {
+ .suspend = fec_suspend,
+ .resume = fec_resume,
+ .freeze = fec_suspend,
+ .thaw = fec_resume,
+ .poweroff = fec_suspend,
+ .restore = fec_resume,
+};
+#endif
+
+static struct platform_driver fec_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &fec_pm_ops,
+#endif
+ .of_match_table = fec_dt_ids,
+ },
+ .id_table = fec_devtype,
+ .probe = fec_probe,
+ .remove = __devexit_p(fec_drv_remove),
+};
+
+static int __init
+fec_enet_module_init(void)
+{
+ printk(KERN_INFO "FEC Ethernet Driver\n");
+
+ return platform_driver_register(&fec_driver);
+}
+
+static void __exit
+fec_enet_cleanup(void)
+{
+ platform_driver_unregister(&fec_driver);
+}
+
+module_exit(fec_enet_cleanup);
+module_init(fec_enet_module_init);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
new file mode 100644
index 000000000000..8b2c6d797e6d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -0,0 +1,148 @@
+/****************************************************************************/
+
+/*
+ * fec.h -- Fast Ethernet Controller for Motorola ColdFire SoC
+ * processors.
+ *
+ * (C) Copyright 2000-2005, Greg Ungerer (gerg@snapgear.com)
+ * (C) Copyright 2000-2001, Lineo (www.lineo.com)
+ */
+
+/****************************************************************************/
+#ifndef FEC_H
+#define FEC_H
+/****************************************************************************/
+
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
+ defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+/*
+ * Just figures, Motorola would have to change the offsets for
+ * registers in the same peripheral device on different models
+ * of the ColdFire!
+ */
+#define FEC_IEVENT 0x004 /* Interrupt event reg */
+#define FEC_IMASK 0x008 /* Interrupt mask reg */
+#define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */
+#define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */
+#define FEC_ECNTRL 0x024 /* Ethernet control reg */
+#define FEC_MII_DATA 0x040 /* MII manage frame reg */
+#define FEC_MII_SPEED 0x044 /* MII speed control reg */
+#define FEC_MIB_CTRLSTAT 0x064 /* MIB control/status reg */
+#define FEC_R_CNTRL 0x084 /* Receive control reg */
+#define FEC_X_CNTRL 0x0c4 /* Transmit Control reg */
+#define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */
+#define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */
+#define FEC_OPD 0x0ec /* Opcode + Pause duration */
+#define FEC_HASH_TABLE_HIGH 0x118 /* High 32bits hash table */
+#define FEC_HASH_TABLE_LOW 0x11c /* Low 32bits hash table */
+#define FEC_GRP_HASH_TABLE_HIGH 0x120 /* High 32bits hash table */
+#define FEC_GRP_HASH_TABLE_LOW 0x124 /* Low 32bits hash table */
+#define FEC_X_WMRK 0x144 /* FIFO transmit water mark */
+#define FEC_R_BOUND 0x14c /* FIFO receive bound reg */
+#define FEC_R_FSTART 0x150 /* FIFO receive start reg */
+#define FEC_R_DES_START 0x180 /* Receive descriptor ring */
+#define FEC_X_DES_START 0x184 /* Transmit descriptor ring */
+#define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */
+#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
+#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
+
+#else
+
+#define FEC_ECNTRL 0x000 /* Ethernet control reg */
+#define FEC_IEVENT 0x004 /* Interrupt even reg */
+#define FEC_IMASK 0x008 /* Interrupt mask reg */
+#define FEC_IVEC 0x00c /* Interrupt vec status reg */
+#define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */
+#define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */
+#define FEC_MII_DATA 0x040 /* MII manage frame reg */
+#define FEC_MII_SPEED 0x044 /* MII speed control reg */
+#define FEC_R_BOUND 0x08c /* FIFO receive bound reg */
+#define FEC_R_FSTART 0x090 /* FIFO receive start reg */
+#define FEC_X_WMRK 0x0a4 /* FIFO transmit water mark */
+#define FEC_X_FSTART 0x0ac /* FIFO transmit start reg */
+#define FEC_R_CNTRL 0x104 /* Receive control reg */
+#define FEC_MAX_FRM_LEN 0x108 /* Maximum frame length reg */
+#define FEC_X_CNTRL 0x144 /* Transmit Control reg */
+#define FEC_ADDR_LOW 0x3c0 /* Low 32bits MAC address */
+#define FEC_ADDR_HIGH 0x3c4 /* High 16bits MAC address */
+#define FEC_GRP_HASH_TABLE_HIGH 0x3c8 /* High 32bits hash table */
+#define FEC_GRP_HASH_TABLE_LOW 0x3cc /* Low 32bits hash table */
+#define FEC_R_DES_START 0x3d0 /* Receive descriptor ring */
+#define FEC_X_DES_START 0x3d4 /* Transmit descriptor ring */
+#define FEC_R_BUFF_SIZE 0x3d8 /* Maximum receive buff size */
+#define FEC_FIFO_RAM 0x400 /* FIFO RAM buffer */
+
+#endif /* CONFIG_M5272 */
+
+
+/*
+ * Define the buffer descriptor structure.
+ */
+#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+struct bufdesc {
+ unsigned short cbd_datlen; /* Data length */
+ unsigned short cbd_sc; /* Control and status info */
+ unsigned long cbd_bufaddr; /* Buffer address */
+};
+#else
+struct bufdesc {
+ unsigned short cbd_sc; /* Control and status info */
+ unsigned short cbd_datlen; /* Data length */
+ unsigned long cbd_bufaddr; /* Buffer address */
+};
+#endif
+
+/*
+ * The following definitions courtesy of commproc.h, which where
+ * Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
+ */
+#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */
+#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */
+#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */
+#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */
+#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */
+#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */
+#define BD_SC_P ((ushort)0x0100) /* xmt preamble */
+#define BD_SC_BR ((ushort)0x0020) /* Break received */
+#define BD_SC_FR ((ushort)0x0010) /* Framing error */
+#define BD_SC_PR ((ushort)0x0008) /* Parity error */
+#define BD_SC_OV ((ushort)0x0002) /* Overrun */
+#define BD_SC_CD ((ushort)0x0001) /* ?? */
+
+/* Buffer descriptor control/status used by Ethernet receive.
+*/
+#define BD_ENET_RX_EMPTY ((ushort)0x8000)
+#define BD_ENET_RX_WRAP ((ushort)0x2000)
+#define BD_ENET_RX_INTR ((ushort)0x1000)
+#define BD_ENET_RX_LAST ((ushort)0x0800)
+#define BD_ENET_RX_FIRST ((ushort)0x0400)
+#define BD_ENET_RX_MISS ((ushort)0x0100)
+#define BD_ENET_RX_LG ((ushort)0x0020)
+#define BD_ENET_RX_NO ((ushort)0x0010)
+#define BD_ENET_RX_SH ((ushort)0x0008)
+#define BD_ENET_RX_CR ((ushort)0x0004)
+#define BD_ENET_RX_OV ((ushort)0x0002)
+#define BD_ENET_RX_CL ((ushort)0x0001)
+#define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */
+
+/* Buffer descriptor control/status used by Ethernet transmit.
+*/
+#define BD_ENET_TX_READY ((ushort)0x8000)
+#define BD_ENET_TX_PAD ((ushort)0x4000)
+#define BD_ENET_TX_WRAP ((ushort)0x2000)
+#define BD_ENET_TX_INTR ((ushort)0x1000)
+#define BD_ENET_TX_LAST ((ushort)0x0800)
+#define BD_ENET_TX_TC ((ushort)0x0400)
+#define BD_ENET_TX_DEF ((ushort)0x0200)
+#define BD_ENET_TX_HB ((ushort)0x0100)
+#define BD_ENET_TX_LC ((ushort)0x0080)
+#define BD_ENET_TX_RL ((ushort)0x0040)
+#define BD_ENET_TX_RCMASK ((ushort)0x003c)
+#define BD_ENET_TX_UN ((ushort)0x0002)
+#define BD_ENET_TX_CSL ((ushort)0x0001)
+#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */
+
+
+/****************************************************************************/
+#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
new file mode 100644
index 000000000000..30745b56fe5d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -0,0 +1,1104 @@
+/*
+ * Driver for the MPC5200 Fast Ethernet Controller
+ *
+ * Originally written by Dale Farnsworth <dfarnsworth@mvista.com> and
+ * now maintained by Sylvain Munaut <tnt@246tNt.com>
+ *
+ * Copyright (C) 2007 Domen Puncer, Telargo, Inc.
+ * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003-2004 MontaVista, Software, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/crc32.h>
+#include <linux/hardirq.h>
+#include <linux/delay.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/mpc52xx.h>
+
+#include <sysdev/bestcomm/bestcomm.h>
+#include <sysdev/bestcomm/fec.h>
+
+#include "fec_mpc52xx.h"
+
+#define DRIVER_NAME "mpc52xx-fec"
+
+/* Private driver data structure */
+struct mpc52xx_fec_priv {
+ struct net_device *ndev;
+ int duplex;
+ int speed;
+ int r_irq;
+ int t_irq;
+ struct mpc52xx_fec __iomem *fec;
+ struct bcom_task *rx_dmatsk;
+ struct bcom_task *tx_dmatsk;
+ spinlock_t lock;
+ int msg_enable;
+
+ /* MDIO link details */
+ unsigned int mdio_speed;
+ struct device_node *phy_node;
+ struct phy_device *phydev;
+ enum phy_state link;
+ int seven_wire_mode;
+};
+
+
+static irqreturn_t mpc52xx_fec_interrupt(int, void *);
+static irqreturn_t mpc52xx_fec_rx_interrupt(int, void *);
+static irqreturn_t mpc52xx_fec_tx_interrupt(int, void *);
+static void mpc52xx_fec_stop(struct net_device *dev);
+static void mpc52xx_fec_start(struct net_device *dev);
+static void mpc52xx_fec_reset(struct net_device *dev);
+
+static u8 mpc52xx_fec_mac_addr[6];
+module_param_array_named(mac, mpc52xx_fec_mac_addr, byte, NULL, 0);
+MODULE_PARM_DESC(mac, "six hex digits, ie. 0x1,0x2,0xc0,0x01,0xba,0xbe");
+
+#define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+static int debug = -1; /* the above default */
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "debugging messages level");
+
+static void mpc52xx_fec_tx_timeout(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ dev_warn(&dev->dev, "transmit timed out\n");
+
+ spin_lock_irqsave(&priv->lock, flags);
+ mpc52xx_fec_reset(dev);
+ dev->stats.tx_errors++;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ netif_wake_queue(dev);
+}
+
+static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ out_be32(&fec->paddr1, *(u32 *)(&mac[0]));
+ out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
+}
+
+static void mpc52xx_fec_get_paddr(struct net_device *dev, u8 *mac)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ *(u32 *)(&mac[0]) = in_be32(&fec->paddr1);
+ *(u16 *)(&mac[4]) = in_be32(&fec->paddr2) >> 16;
+}
+
+static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sock = addr;
+
+ memcpy(dev->dev_addr, sock->sa_data, dev->addr_len);
+
+ mpc52xx_fec_set_paddr(dev, sock->sa_data);
+ return 0;
+}
+
+static void mpc52xx_fec_free_rx_buffers(struct net_device *dev, struct bcom_task *s)
+{
+ while (!bcom_queue_empty(s)) {
+ struct bcom_fec_bd *bd;
+ struct sk_buff *skb;
+
+ skb = bcom_retrieve_buffer(s, NULL, (struct bcom_bd **)&bd);
+ dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
+ DMA_FROM_DEVICE);
+ kfree_skb(skb);
+ }
+}
+
+static void
+mpc52xx_fec_rx_submit(struct net_device *dev, struct sk_buff *rskb)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct bcom_fec_bd *bd;
+
+ bd = (struct bcom_fec_bd *) bcom_prepare_next_buffer(priv->rx_dmatsk);
+ bd->status = FEC_RX_BUFFER_SIZE;
+ bd->skb_pa = dma_map_single(dev->dev.parent, rskb->data,
+ FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
+ bcom_submit_next_buffer(priv->rx_dmatsk, rskb);
+}
+
+static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task *rxtsk)
+{
+ struct sk_buff *skb;
+
+ while (!bcom_queue_full(rxtsk)) {
+ skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE);
+ if (!skb)
+ return -EAGAIN;
+
+ /* zero out the initial receive buffers to aid debugging */
+ memset(skb->data, 0, FEC_RX_BUFFER_SIZE);
+ mpc52xx_fec_rx_submit(dev, skb);
+ }
+ return 0;
+}
+
+/* based on generic_adjust_link from fs_enet-main.c */
+static void mpc52xx_fec_adjust_link(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ int new_state = 0;
+
+ if (phydev->link != PHY_DOWN) {
+ if (phydev->duplex != priv->duplex) {
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ u32 rcntrl;
+ u32 tcntrl;
+
+ new_state = 1;
+ priv->duplex = phydev->duplex;
+
+ rcntrl = in_be32(&fec->r_cntrl);
+ tcntrl = in_be32(&fec->x_cntrl);
+
+ rcntrl &= ~FEC_RCNTRL_DRT;
+ tcntrl &= ~FEC_TCNTRL_FDEN;
+ if (phydev->duplex == DUPLEX_FULL)
+ tcntrl |= FEC_TCNTRL_FDEN; /* FD enable */
+ else
+ rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */
+
+ out_be32(&fec->r_cntrl, rcntrl);
+ out_be32(&fec->x_cntrl, tcntrl);
+ }
+
+ if (phydev->speed != priv->speed) {
+ new_state = 1;
+ priv->speed = phydev->speed;
+ }
+
+ if (priv->link == PHY_DOWN) {
+ new_state = 1;
+ priv->link = phydev->link;
+ }
+
+ } else if (priv->link) {
+ new_state = 1;
+ priv->link = PHY_DOWN;
+ priv->speed = 0;
+ priv->duplex = -1;
+ }
+
+ if (new_state && netif_msg_link(priv))
+ phy_print_status(phydev);
+}
+
+static int mpc52xx_fec_open(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ int err = -EBUSY;
+
+ if (priv->phy_node) {
+ priv->phydev = of_phy_connect(priv->ndev, priv->phy_node,
+ mpc52xx_fec_adjust_link, 0, 0);
+ if (!priv->phydev) {
+ dev_err(&dev->dev, "of_phy_connect failed\n");
+ return -ENODEV;
+ }
+ phy_start(priv->phydev);
+ }
+
+ if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED,
+ DRIVER_NAME "_ctrl", dev)) {
+ dev_err(&dev->dev, "ctrl interrupt request failed\n");
+ goto free_phy;
+ }
+ if (request_irq(priv->r_irq, mpc52xx_fec_rx_interrupt, 0,
+ DRIVER_NAME "_rx", dev)) {
+ dev_err(&dev->dev, "rx interrupt request failed\n");
+ goto free_ctrl_irq;
+ }
+ if (request_irq(priv->t_irq, mpc52xx_fec_tx_interrupt, 0,
+ DRIVER_NAME "_tx", dev)) {
+ dev_err(&dev->dev, "tx interrupt request failed\n");
+ goto free_2irqs;
+ }
+
+ bcom_fec_rx_reset(priv->rx_dmatsk);
+ bcom_fec_tx_reset(priv->tx_dmatsk);
+
+ err = mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk);
+ if (err) {
+ dev_err(&dev->dev, "mpc52xx_fec_alloc_rx_buffers failed\n");
+ goto free_irqs;
+ }
+
+ bcom_enable(priv->rx_dmatsk);
+ bcom_enable(priv->tx_dmatsk);
+
+ mpc52xx_fec_start(dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+
+ free_irqs:
+ free_irq(priv->t_irq, dev);
+ free_2irqs:
+ free_irq(priv->r_irq, dev);
+ free_ctrl_irq:
+ free_irq(dev->irq, dev);
+ free_phy:
+ if (priv->phydev) {
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ }
+
+ return err;
+}
+
+static int mpc52xx_fec_close(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ mpc52xx_fec_stop(dev);
+
+ mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk);
+
+ free_irq(dev->irq, dev);
+ free_irq(priv->r_irq, dev);
+ free_irq(priv->t_irq, dev);
+
+ if (priv->phydev) {
+ /* power down phy */
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ }
+
+ return 0;
+}
+
+/* This will only be invoked if your driver is _not_ in XOFF state.
+ * What this means is that you need not check it, and that this
+ * invariant will hold if you make sure that the netif_*_queue()
+ * calls are done at the proper times.
+ */
+static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct bcom_fec_bd *bd;
+ unsigned long flags;
+
+ if (bcom_queue_full(priv->tx_dmatsk)) {
+ if (net_ratelimit())
+ dev_err(&dev->dev, "transmit queue overrun\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ bd = (struct bcom_fec_bd *)
+ bcom_prepare_next_buffer(priv->tx_dmatsk);
+
+ bd->status = skb->len | BCOM_FEC_TX_BD_TFD | BCOM_FEC_TX_BD_TC;
+ bd->skb_pa = dma_map_single(dev->dev.parent, skb->data, skb->len,
+ DMA_TO_DEVICE);
+
+ skb_tx_timestamp(skb);
+ bcom_submit_next_buffer(priv->tx_dmatsk, skb);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (bcom_queue_full(priv->tx_dmatsk)) {
+ netif_stop_queue(dev);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void mpc52xx_fec_poll_controller(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+
+ disable_irq(priv->t_irq);
+ mpc52xx_fec_tx_interrupt(priv->t_irq, dev);
+ enable_irq(priv->t_irq);
+ disable_irq(priv->r_irq);
+ mpc52xx_fec_rx_interrupt(priv->r_irq, dev);
+ enable_irq(priv->r_irq);
+}
+#endif
+
+
+/* This handles BestComm transmit task interrupts
+ */
+static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+
+ spin_lock(&priv->lock);
+ while (bcom_buffer_done(priv->tx_dmatsk)) {
+ struct sk_buff *skb;
+ struct bcom_fec_bd *bd;
+ skb = bcom_retrieve_buffer(priv->tx_dmatsk, NULL,
+ (struct bcom_bd **)&bd);
+ dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
+ DMA_TO_DEVICE);
+
+ dev_kfree_skb_irq(skb);
+ }
+ spin_unlock(&priv->lock);
+
+ netif_wake_queue(dev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct sk_buff *rskb; /* received sk_buff */
+ struct sk_buff *skb; /* new sk_buff to enqueue in its place */
+ struct bcom_fec_bd *bd;
+ u32 status, physaddr;
+ int length;
+
+ spin_lock(&priv->lock);
+
+ while (bcom_buffer_done(priv->rx_dmatsk)) {
+
+ rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status,
+ (struct bcom_bd **)&bd);
+ physaddr = bd->skb_pa;
+
+ /* Test for errors in received frame */
+ if (status & BCOM_FEC_RX_BD_ERRORS) {
+ /* Drop packet and reuse the buffer */
+ mpc52xx_fec_rx_submit(dev, rskb);
+ dev->stats.rx_dropped++;
+ continue;
+ }
+
+ /* skbs are allocated on open, so now we allocate a new one,
+ * and remove the old (with the packet) */
+ skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE);
+ if (!skb) {
+ /* Can't get a new one : reuse the same & drop pkt */
+ dev_notice(&dev->dev, "Low memory - dropped packet.\n");
+ mpc52xx_fec_rx_submit(dev, rskb);
+ dev->stats.rx_dropped++;
+ continue;
+ }
+
+ /* Enqueue the new sk_buff back on the hardware */
+ mpc52xx_fec_rx_submit(dev, skb);
+
+ /* Process the received skb - Drop the spin lock while
+ * calling into the network stack */
+ spin_unlock(&priv->lock);
+
+ dma_unmap_single(dev->dev.parent, physaddr, rskb->len,
+ DMA_FROM_DEVICE);
+ length = status & BCOM_FEC_RX_BD_LEN_MASK;
+ skb_put(rskb, length - 4); /* length without CRC32 */
+ rskb->protocol = eth_type_trans(rskb, dev);
+ if (!skb_defer_rx_timestamp(skb))
+ netif_rx(rskb);
+
+ spin_lock(&priv->lock);
+ }
+
+ spin_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ u32 ievent;
+
+ ievent = in_be32(&fec->ievent);
+
+ ievent &= ~FEC_IEVENT_MII; /* mii is handled separately */
+ if (!ievent)
+ return IRQ_NONE;
+
+ out_be32(&fec->ievent, ievent); /* clear pending events */
+
+ /* on fifo error, soft-reset fec */
+ if (ievent & (FEC_IEVENT_RFIFO_ERROR | FEC_IEVENT_XFIFO_ERROR)) {
+
+ if (net_ratelimit() && (ievent & FEC_IEVENT_RFIFO_ERROR))
+ dev_warn(&dev->dev, "FEC_IEVENT_RFIFO_ERROR\n");
+ if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
+ dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
+
+ spin_lock(&priv->lock);
+ mpc52xx_fec_reset(dev);
+ spin_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+ }
+
+ if (ievent & ~FEC_IEVENT_TFINT)
+ dev_dbg(&dev->dev, "ievent: %08x\n", ievent);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Get the current statistics.
+ * This may be called with the card open or closed.
+ */
+static struct net_device_stats *mpc52xx_fec_get_stats(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ stats->rx_bytes = in_be32(&fec->rmon_r_octets);
+ stats->rx_packets = in_be32(&fec->rmon_r_packets);
+ stats->rx_errors = in_be32(&fec->rmon_r_crc_align) +
+ in_be32(&fec->rmon_r_undersize) +
+ in_be32(&fec->rmon_r_oversize) +
+ in_be32(&fec->rmon_r_frag) +
+ in_be32(&fec->rmon_r_jab);
+
+ stats->tx_bytes = in_be32(&fec->rmon_t_octets);
+ stats->tx_packets = in_be32(&fec->rmon_t_packets);
+ stats->tx_errors = in_be32(&fec->rmon_t_crc_align) +
+ in_be32(&fec->rmon_t_undersize) +
+ in_be32(&fec->rmon_t_oversize) +
+ in_be32(&fec->rmon_t_frag) +
+ in_be32(&fec->rmon_t_jab);
+
+ stats->multicast = in_be32(&fec->rmon_r_mc_pkt);
+ stats->collisions = in_be32(&fec->rmon_t_col);
+
+ /* detailed rx_errors: */
+ stats->rx_length_errors = in_be32(&fec->rmon_r_undersize)
+ + in_be32(&fec->rmon_r_oversize)
+ + in_be32(&fec->rmon_r_frag)
+ + in_be32(&fec->rmon_r_jab);
+ stats->rx_over_errors = in_be32(&fec->r_macerr);
+ stats->rx_crc_errors = in_be32(&fec->ieee_r_crc);
+ stats->rx_frame_errors = in_be32(&fec->ieee_r_align);
+ stats->rx_fifo_errors = in_be32(&fec->rmon_r_drop);
+ stats->rx_missed_errors = in_be32(&fec->rmon_r_drop);
+
+ /* detailed tx_errors: */
+ stats->tx_aborted_errors = 0;
+ stats->tx_carrier_errors = in_be32(&fec->ieee_t_cserr);
+ stats->tx_fifo_errors = in_be32(&fec->rmon_t_drop);
+ stats->tx_heartbeat_errors = in_be32(&fec->ieee_t_sqe);
+ stats->tx_window_errors = in_be32(&fec->ieee_t_lcol);
+
+ return stats;
+}
+
+/*
+ * Read MIB counters in order to reset them,
+ * then zero all the stats fields in memory
+ */
+static void mpc52xx_fec_reset_stats(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ out_be32(&fec->mib_control, FEC_MIB_DISABLE);
+ memset_io(&fec->rmon_t_drop, 0,
+ offsetof(struct mpc52xx_fec, reserved10) -
+ offsetof(struct mpc52xx_fec, rmon_t_drop));
+ out_be32(&fec->mib_control, 0);
+
+ memset(&dev->stats, 0, sizeof(dev->stats));
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+static void mpc52xx_fec_set_multicast_list(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ u32 rx_control;
+
+ rx_control = in_be32(&fec->r_cntrl);
+
+ if (dev->flags & IFF_PROMISC) {
+ rx_control |= FEC_RCNTRL_PROM;
+ out_be32(&fec->r_cntrl, rx_control);
+ } else {
+ rx_control &= ~FEC_RCNTRL_PROM;
+ out_be32(&fec->r_cntrl, rx_control);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ out_be32(&fec->gaddr1, 0xffffffff);
+ out_be32(&fec->gaddr2, 0xffffffff);
+ } else {
+ u32 crc;
+ struct netdev_hw_addr *ha;
+ u32 gaddr1 = 0x00000000;
+ u32 gaddr2 = 0x00000000;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr) >> 26;
+ if (crc >= 32)
+ gaddr1 |= 1 << (crc-32);
+ else
+ gaddr2 |= 1 << crc;
+ }
+ out_be32(&fec->gaddr1, gaddr1);
+ out_be32(&fec->gaddr2, gaddr2);
+ }
+ }
+}
+
+/**
+ * mpc52xx_fec_hw_init
+ * @dev: network device
+ *
+ * Setup various hardware setting, only needed once on start
+ */
+static void mpc52xx_fec_hw_init(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ int i;
+
+ /* Whack a reset. We should wait for this. */
+ out_be32(&fec->ecntrl, FEC_ECNTRL_RESET);
+ for (i = 0; i < FEC_RESET_DELAY; ++i) {
+ if ((in_be32(&fec->ecntrl) & FEC_ECNTRL_RESET) == 0)
+ break;
+ udelay(1);
+ }
+ if (i == FEC_RESET_DELAY)
+ dev_err(&dev->dev, "FEC Reset timeout!\n");
+
+ /* set pause to 0x20 frames */
+ out_be32(&fec->op_pause, FEC_OP_PAUSE_OPCODE | 0x20);
+
+ /* high service request will be deasserted when there's < 7 bytes in fifo
+ * low service request will be deasserted when there's < 4*7 bytes in fifo
+ */
+ out_be32(&fec->rfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7);
+ out_be32(&fec->tfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7);
+
+ /* alarm when <= x bytes in FIFO */
+ out_be32(&fec->rfifo_alarm, 0x0000030c);
+ out_be32(&fec->tfifo_alarm, 0x00000100);
+
+ /* begin transmittion when 256 bytes are in FIFO (or EOF or FIFO full) */
+ out_be32(&fec->x_wmrk, FEC_FIFO_WMRK_256B);
+
+ /* enable crc generation */
+ out_be32(&fec->xmit_fsm, FEC_XMIT_FSM_APPEND_CRC | FEC_XMIT_FSM_ENABLE_CRC);
+ out_be32(&fec->iaddr1, 0x00000000); /* No individual filter */
+ out_be32(&fec->iaddr2, 0x00000000); /* No individual filter */
+
+ /* set phy speed.
+ * this can't be done in phy driver, since it needs to be called
+ * before fec stuff (even on resume) */
+ out_be32(&fec->mii_speed, priv->mdio_speed);
+}
+
+/**
+ * mpc52xx_fec_start
+ * @dev: network device
+ *
+ * This function is called to start or restart the FEC during a link
+ * change. This happens on fifo errors or when switching between half
+ * and full duplex.
+ */
+static void mpc52xx_fec_start(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ u32 rcntrl;
+ u32 tcntrl;
+ u32 tmp;
+
+ /* clear sticky error bits */
+ tmp = FEC_FIFO_STATUS_ERR | FEC_FIFO_STATUS_UF | FEC_FIFO_STATUS_OF;
+ out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status) & tmp);
+ out_be32(&fec->tfifo_status, in_be32(&fec->tfifo_status) & tmp);
+
+ /* FIFOs will reset on mpc52xx_fec_enable */
+ out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_ENABLE_IS_RESET);
+
+ /* Set station address. */
+ mpc52xx_fec_set_paddr(dev, dev->dev_addr);
+
+ mpc52xx_fec_set_multicast_list(dev);
+
+ /* set max frame len, enable flow control, select mii mode */
+ rcntrl = FEC_RX_BUFFER_SIZE << 16; /* max frame length */
+ rcntrl |= FEC_RCNTRL_FCE;
+
+ if (!priv->seven_wire_mode)
+ rcntrl |= FEC_RCNTRL_MII_MODE;
+
+ if (priv->duplex == DUPLEX_FULL)
+ tcntrl = FEC_TCNTRL_FDEN; /* FD enable */
+ else {
+ rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */
+ tcntrl = 0;
+ }
+ out_be32(&fec->r_cntrl, rcntrl);
+ out_be32(&fec->x_cntrl, tcntrl);
+
+ /* Clear any outstanding interrupt. */
+ out_be32(&fec->ievent, 0xffffffff);
+
+ /* Enable interrupts we wish to service. */
+ out_be32(&fec->imask, FEC_IMASK_ENABLE);
+
+ /* And last, enable the transmit and receive processing. */
+ out_be32(&fec->ecntrl, FEC_ECNTRL_ETHER_EN);
+ out_be32(&fec->r_des_active, 0x01000000);
+}
+
+/**
+ * mpc52xx_fec_stop
+ * @dev: network device
+ *
+ * stop all activity on fec and empty dma buffers
+ */
+static void mpc52xx_fec_stop(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ unsigned long timeout;
+
+ /* disable all interrupts */
+ out_be32(&fec->imask, 0);
+
+ /* Disable the rx task. */
+ bcom_disable(priv->rx_dmatsk);
+
+ /* Wait for tx queue to drain, but only if we're in process context */
+ if (!in_interrupt()) {
+ timeout = jiffies + msecs_to_jiffies(2000);
+ while (time_before(jiffies, timeout) &&
+ !bcom_queue_empty(priv->tx_dmatsk))
+ msleep(100);
+
+ if (time_after_eq(jiffies, timeout))
+ dev_err(&dev->dev, "queues didn't drain\n");
+#if 1
+ if (time_after_eq(jiffies, timeout)) {
+ dev_err(&dev->dev, " tx: index: %i, outdex: %i\n",
+ priv->tx_dmatsk->index,
+ priv->tx_dmatsk->outdex);
+ dev_err(&dev->dev, " rx: index: %i, outdex: %i\n",
+ priv->rx_dmatsk->index,
+ priv->rx_dmatsk->outdex);
+ }
+#endif
+ }
+
+ bcom_disable(priv->tx_dmatsk);
+
+ /* Stop FEC */
+ out_be32(&fec->ecntrl, in_be32(&fec->ecntrl) & ~FEC_ECNTRL_ETHER_EN);
+}
+
+/* reset fec and bestcomm tasks */
+static void mpc52xx_fec_reset(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ mpc52xx_fec_stop(dev);
+
+ out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status));
+ out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_RESET_FIFO);
+
+ mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk);
+
+ mpc52xx_fec_hw_init(dev);
+
+ bcom_fec_rx_reset(priv->rx_dmatsk);
+ bcom_fec_tx_reset(priv->tx_dmatsk);
+
+ mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk);
+
+ bcom_enable(priv->rx_dmatsk);
+ bcom_enable(priv->tx_dmatsk);
+
+ mpc52xx_fec_start(dev);
+
+ netif_wake_queue(dev);
+}
+
+
+/* ethtool interface */
+
+static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+
+ if (!priv->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+
+ if (!priv->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static u32 mpc52xx_fec_get_msglevel(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ priv->msg_enable = level;
+}
+
+static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
+ .get_settings = mpc52xx_fec_get_settings,
+ .set_settings = mpc52xx_fec_set_settings,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = mpc52xx_fec_get_msglevel,
+ .set_msglevel = mpc52xx_fec_set_msglevel,
+};
+
+
+static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+
+ if (!priv->phydev)
+ return -ENOTSUPP;
+
+ return phy_mii_ioctl(priv->phydev, rq, cmd);
+}
+
+static const struct net_device_ops mpc52xx_fec_netdev_ops = {
+ .ndo_open = mpc52xx_fec_open,
+ .ndo_stop = mpc52xx_fec_close,
+ .ndo_start_xmit = mpc52xx_fec_start_xmit,
+ .ndo_set_rx_mode = mpc52xx_fec_set_multicast_list,
+ .ndo_set_mac_address = mpc52xx_fec_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = mpc52xx_fec_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_tx_timeout = mpc52xx_fec_tx_timeout,
+ .ndo_get_stats = mpc52xx_fec_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = mpc52xx_fec_poll_controller,
+#endif
+};
+
+/* ======================================================================== */
+/* OF Driver */
+/* ======================================================================== */
+
+static int __devinit mpc52xx_fec_probe(struct platform_device *op)
+{
+ int rv;
+ struct net_device *ndev;
+ struct mpc52xx_fec_priv *priv = NULL;
+ struct resource mem;
+ const u32 *prop;
+ int prop_size;
+
+ phys_addr_t rx_fifo;
+ phys_addr_t tx_fifo;
+
+ /* Get the ether ndev & it's private zone */
+ ndev = alloc_etherdev(sizeof(struct mpc52xx_fec_priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+
+ /* Reserve FEC control zone */
+ rv = of_address_to_resource(op->dev.of_node, 0, &mem);
+ if (rv) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Error while parsing device node resource\n" );
+ goto err_netdev;
+ }
+ if (resource_size(&mem) < sizeof(struct mpc52xx_fec)) {
+ printk(KERN_ERR DRIVER_NAME
+ " - invalid resource size (%lx < %x), check mpc52xx_devices.c\n",
+ (unsigned long)resource_size(&mem),
+ sizeof(struct mpc52xx_fec));
+ rv = -EINVAL;
+ goto err_netdev;
+ }
+
+ if (!request_mem_region(mem.start, sizeof(struct mpc52xx_fec),
+ DRIVER_NAME)) {
+ rv = -EBUSY;
+ goto err_netdev;
+ }
+
+ /* Init ether ndev with what we have */
+ ndev->netdev_ops = &mpc52xx_fec_netdev_ops;
+ ndev->ethtool_ops = &mpc52xx_fec_ethtool_ops;
+ ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT;
+ ndev->base_addr = mem.start;
+ SET_NETDEV_DEV(ndev, &op->dev);
+
+ spin_lock_init(&priv->lock);
+
+ /* ioremap the zones */
+ priv->fec = ioremap(mem.start, sizeof(struct mpc52xx_fec));
+
+ if (!priv->fec) {
+ rv = -ENOMEM;
+ goto err_mem_region;
+ }
+
+ /* Bestcomm init */
+ rx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, rfifo_data);
+ tx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, tfifo_data);
+
+ priv->rx_dmatsk = bcom_fec_rx_init(FEC_RX_NUM_BD, rx_fifo, FEC_RX_BUFFER_SIZE);
+ priv->tx_dmatsk = bcom_fec_tx_init(FEC_TX_NUM_BD, tx_fifo);
+
+ if (!priv->rx_dmatsk || !priv->tx_dmatsk) {
+ printk(KERN_ERR DRIVER_NAME ": Can not init SDMA tasks\n" );
+ rv = -ENOMEM;
+ goto err_rx_tx_dmatsk;
+ }
+
+ /* Get the IRQ we need one by one */
+ /* Control */
+ ndev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
+
+ /* RX */
+ priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk);
+
+ /* TX */
+ priv->t_irq = bcom_get_task_irq(priv->tx_dmatsk);
+
+ /* MAC address init */
+ if (!is_zero_ether_addr(mpc52xx_fec_mac_addr))
+ memcpy(ndev->dev_addr, mpc52xx_fec_mac_addr, 6);
+ else
+ mpc52xx_fec_get_paddr(ndev, ndev->dev_addr);
+
+ priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT);
+
+ /*
+ * Link mode configuration
+ */
+
+ /* Start with safe defaults for link connection */
+ priv->speed = 100;
+ priv->duplex = DUPLEX_HALF;
+ priv->mdio_speed = ((mpc5xxx_get_bus_frequency(op->dev.of_node) >> 20) / 5) << 1;
+
+ /* The current speed preconfigures the speed of the MII link */
+ prop = of_get_property(op->dev.of_node, "current-speed", &prop_size);
+ if (prop && (prop_size >= sizeof(u32) * 2)) {
+ priv->speed = prop[0];
+ priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF;
+ }
+
+ /* If there is a phy handle, then get the PHY node */
+ priv->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
+
+ /* the 7-wire property means don't use MII mode */
+ if (of_find_property(op->dev.of_node, "fsl,7-wire-mode", NULL)) {
+ priv->seven_wire_mode = 1;
+ dev_info(&ndev->dev, "using 7-wire PHY mode\n");
+ }
+
+ /* Hardware init */
+ mpc52xx_fec_hw_init(ndev);
+ mpc52xx_fec_reset_stats(ndev);
+
+ rv = register_netdev(ndev);
+ if (rv < 0)
+ goto err_node;
+
+ /* We're done ! */
+ dev_set_drvdata(&op->dev, ndev);
+
+ return 0;
+
+err_node:
+ of_node_put(priv->phy_node);
+ irq_dispose_mapping(ndev->irq);
+err_rx_tx_dmatsk:
+ if (priv->rx_dmatsk)
+ bcom_fec_rx_release(priv->rx_dmatsk);
+ if (priv->tx_dmatsk)
+ bcom_fec_tx_release(priv->tx_dmatsk);
+ iounmap(priv->fec);
+err_mem_region:
+ release_mem_region(mem.start, sizeof(struct mpc52xx_fec));
+err_netdev:
+ free_netdev(ndev);
+
+ return rv;
+}
+
+static int
+mpc52xx_fec_remove(struct platform_device *op)
+{
+ struct net_device *ndev;
+ struct mpc52xx_fec_priv *priv;
+
+ ndev = dev_get_drvdata(&op->dev);
+ priv = netdev_priv(ndev);
+
+ unregister_netdev(ndev);
+
+ if (priv->phy_node)
+ of_node_put(priv->phy_node);
+ priv->phy_node = NULL;
+
+ irq_dispose_mapping(ndev->irq);
+
+ bcom_fec_rx_release(priv->rx_dmatsk);
+ bcom_fec_tx_release(priv->tx_dmatsk);
+
+ iounmap(priv->fec);
+
+ release_mem_region(ndev->base_addr, sizeof(struct mpc52xx_fec));
+
+ free_netdev(ndev);
+
+ dev_set_drvdata(&op->dev, NULL);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state)
+{
+ struct net_device *dev = dev_get_drvdata(&op->dev);
+
+ if (netif_running(dev))
+ mpc52xx_fec_close(dev);
+
+ return 0;
+}
+
+static int mpc52xx_fec_of_resume(struct platform_device *op)
+{
+ struct net_device *dev = dev_get_drvdata(&op->dev);
+
+ mpc52xx_fec_hw_init(dev);
+ mpc52xx_fec_reset_stats(dev);
+
+ if (netif_running(dev))
+ mpc52xx_fec_open(dev);
+
+ return 0;
+}
+#endif
+
+static struct of_device_id mpc52xx_fec_match[] = {
+ { .compatible = "fsl,mpc5200b-fec", },
+ { .compatible = "fsl,mpc5200-fec", },
+ { .compatible = "mpc5200-fec", },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, mpc52xx_fec_match);
+
+static struct platform_driver mpc52xx_fec_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_fec_match,
+ },
+ .probe = mpc52xx_fec_probe,
+ .remove = mpc52xx_fec_remove,
+#ifdef CONFIG_PM
+ .suspend = mpc52xx_fec_of_suspend,
+ .resume = mpc52xx_fec_of_resume,
+#endif
+};
+
+
+/* ======================================================================== */
+/* Module */
+/* ======================================================================== */
+
+static int __init
+mpc52xx_fec_init(void)
+{
+#ifdef CONFIG_FEC_MPC52xx_MDIO
+ int ret;
+ ret = platform_driver_register(&mpc52xx_fec_mdio_driver);
+ if (ret) {
+ printk(KERN_ERR DRIVER_NAME ": failed to register mdio driver\n");
+ return ret;
+ }
+#endif
+ return platform_driver_register(&mpc52xx_fec_driver);
+}
+
+static void __exit
+mpc52xx_fec_exit(void)
+{
+ platform_driver_unregister(&mpc52xx_fec_driver);
+#ifdef CONFIG_FEC_MPC52xx_MDIO
+ platform_driver_unregister(&mpc52xx_fec_mdio_driver);
+#endif
+}
+
+
+module_init(mpc52xx_fec_init);
+module_exit(mpc52xx_fec_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dale Farnsworth");
+MODULE_DESCRIPTION("Ethernet driver for the Freescale MPC52xx FEC");
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.h b/drivers/net/ethernet/freescale/fec_mpc52xx.h
new file mode 100644
index 000000000000..41d2dffde55b
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.h
@@ -0,0 +1,294 @@
+/*
+ * drivers/drivers/net/fec_mpc52xx/fec.h
+ *
+ * Driver for the MPC5200 Fast Ethernet Controller
+ *
+ * Author: Dale Farnsworth <dfarnsworth@mvista.com>
+ *
+ * 2003-2004 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#ifndef __DRIVERS_NET_MPC52XX_FEC_H__
+#define __DRIVERS_NET_MPC52XX_FEC_H__
+
+#include <linux/phy.h>
+
+/* Tunable constant */
+/* FEC_RX_BUFFER_SIZE includes 4 bytes for CRC32 */
+#define FEC_RX_BUFFER_SIZE 1522 /* max receive packet size */
+#define FEC_RX_NUM_BD 256
+#define FEC_TX_NUM_BD 64
+
+#define FEC_RESET_DELAY 50 /* uS */
+
+#define FEC_WATCHDOG_TIMEOUT ((400*HZ)/1000)
+
+/* ======================================================================== */
+/* Hardware register sets & bits */
+/* ======================================================================== */
+
+struct mpc52xx_fec {
+ u32 fec_id; /* FEC + 0x000 */
+ u32 ievent; /* FEC + 0x004 */
+ u32 imask; /* FEC + 0x008 */
+
+ u32 reserved0[1]; /* FEC + 0x00C */
+ u32 r_des_active; /* FEC + 0x010 */
+ u32 x_des_active; /* FEC + 0x014 */
+ u32 r_des_active_cl; /* FEC + 0x018 */
+ u32 x_des_active_cl; /* FEC + 0x01C */
+ u32 ivent_set; /* FEC + 0x020 */
+ u32 ecntrl; /* FEC + 0x024 */
+
+ u32 reserved1[6]; /* FEC + 0x028-03C */
+ u32 mii_data; /* FEC + 0x040 */
+ u32 mii_speed; /* FEC + 0x044 */
+ u32 mii_status; /* FEC + 0x048 */
+
+ u32 reserved2[5]; /* FEC + 0x04C-05C */
+ u32 mib_data; /* FEC + 0x060 */
+ u32 mib_control; /* FEC + 0x064 */
+
+ u32 reserved3[6]; /* FEC + 0x068-7C */
+ u32 r_activate; /* FEC + 0x080 */
+ u32 r_cntrl; /* FEC + 0x084 */
+ u32 r_hash; /* FEC + 0x088 */
+ u32 r_data; /* FEC + 0x08C */
+ u32 ar_done; /* FEC + 0x090 */
+ u32 r_test; /* FEC + 0x094 */
+ u32 r_mib; /* FEC + 0x098 */
+ u32 r_da_low; /* FEC + 0x09C */
+ u32 r_da_high; /* FEC + 0x0A0 */
+
+ u32 reserved4[7]; /* FEC + 0x0A4-0BC */
+ u32 x_activate; /* FEC + 0x0C0 */
+ u32 x_cntrl; /* FEC + 0x0C4 */
+ u32 backoff; /* FEC + 0x0C8 */
+ u32 x_data; /* FEC + 0x0CC */
+ u32 x_status; /* FEC + 0x0D0 */
+ u32 x_mib; /* FEC + 0x0D4 */
+ u32 x_test; /* FEC + 0x0D8 */
+ u32 fdxfc_da1; /* FEC + 0x0DC */
+ u32 fdxfc_da2; /* FEC + 0x0E0 */
+ u32 paddr1; /* FEC + 0x0E4 */
+ u32 paddr2; /* FEC + 0x0E8 */
+ u32 op_pause; /* FEC + 0x0EC */
+
+ u32 reserved5[4]; /* FEC + 0x0F0-0FC */
+ u32 instr_reg; /* FEC + 0x100 */
+ u32 context_reg; /* FEC + 0x104 */
+ u32 test_cntrl; /* FEC + 0x108 */
+ u32 acc_reg; /* FEC + 0x10C */
+ u32 ones; /* FEC + 0x110 */
+ u32 zeros; /* FEC + 0x114 */
+ u32 iaddr1; /* FEC + 0x118 */
+ u32 iaddr2; /* FEC + 0x11C */
+ u32 gaddr1; /* FEC + 0x120 */
+ u32 gaddr2; /* FEC + 0x124 */
+ u32 random; /* FEC + 0x128 */
+ u32 rand1; /* FEC + 0x12C */
+ u32 tmp; /* FEC + 0x130 */
+
+ u32 reserved6[3]; /* FEC + 0x134-13C */
+ u32 fifo_id; /* FEC + 0x140 */
+ u32 x_wmrk; /* FEC + 0x144 */
+ u32 fcntrl; /* FEC + 0x148 */
+ u32 r_bound; /* FEC + 0x14C */
+ u32 r_fstart; /* FEC + 0x150 */
+ u32 r_count; /* FEC + 0x154 */
+ u32 r_lag; /* FEC + 0x158 */
+ u32 r_read; /* FEC + 0x15C */
+ u32 r_write; /* FEC + 0x160 */
+ u32 x_count; /* FEC + 0x164 */
+ u32 x_lag; /* FEC + 0x168 */
+ u32 x_retry; /* FEC + 0x16C */
+ u32 x_write; /* FEC + 0x170 */
+ u32 x_read; /* FEC + 0x174 */
+
+ u32 reserved7[2]; /* FEC + 0x178-17C */
+ u32 fm_cntrl; /* FEC + 0x180 */
+ u32 rfifo_data; /* FEC + 0x184 */
+ u32 rfifo_status; /* FEC + 0x188 */
+ u32 rfifo_cntrl; /* FEC + 0x18C */
+ u32 rfifo_lrf_ptr; /* FEC + 0x190 */
+ u32 rfifo_lwf_ptr; /* FEC + 0x194 */
+ u32 rfifo_alarm; /* FEC + 0x198 */
+ u32 rfifo_rdptr; /* FEC + 0x19C */
+ u32 rfifo_wrptr; /* FEC + 0x1A0 */
+ u32 tfifo_data; /* FEC + 0x1A4 */
+ u32 tfifo_status; /* FEC + 0x1A8 */
+ u32 tfifo_cntrl; /* FEC + 0x1AC */
+ u32 tfifo_lrf_ptr; /* FEC + 0x1B0 */
+ u32 tfifo_lwf_ptr; /* FEC + 0x1B4 */
+ u32 tfifo_alarm; /* FEC + 0x1B8 */
+ u32 tfifo_rdptr; /* FEC + 0x1BC */
+ u32 tfifo_wrptr; /* FEC + 0x1C0 */
+
+ u32 reset_cntrl; /* FEC + 0x1C4 */
+ u32 xmit_fsm; /* FEC + 0x1C8 */
+
+ u32 reserved8[3]; /* FEC + 0x1CC-1D4 */
+ u32 rdes_data0; /* FEC + 0x1D8 */
+ u32 rdes_data1; /* FEC + 0x1DC */
+ u32 r_length; /* FEC + 0x1E0 */
+ u32 x_length; /* FEC + 0x1E4 */
+ u32 x_addr; /* FEC + 0x1E8 */
+ u32 cdes_data; /* FEC + 0x1EC */
+ u32 status; /* FEC + 0x1F0 */
+ u32 dma_control; /* FEC + 0x1F4 */
+ u32 des_cmnd; /* FEC + 0x1F8 */
+ u32 data; /* FEC + 0x1FC */
+
+ u32 rmon_t_drop; /* FEC + 0x200 */
+ u32 rmon_t_packets; /* FEC + 0x204 */
+ u32 rmon_t_bc_pkt; /* FEC + 0x208 */
+ u32 rmon_t_mc_pkt; /* FEC + 0x20C */
+ u32 rmon_t_crc_align; /* FEC + 0x210 */
+ u32 rmon_t_undersize; /* FEC + 0x214 */
+ u32 rmon_t_oversize; /* FEC + 0x218 */
+ u32 rmon_t_frag; /* FEC + 0x21C */
+ u32 rmon_t_jab; /* FEC + 0x220 */
+ u32 rmon_t_col; /* FEC + 0x224 */
+ u32 rmon_t_p64; /* FEC + 0x228 */
+ u32 rmon_t_p65to127; /* FEC + 0x22C */
+ u32 rmon_t_p128to255; /* FEC + 0x230 */
+ u32 rmon_t_p256to511; /* FEC + 0x234 */
+ u32 rmon_t_p512to1023; /* FEC + 0x238 */
+ u32 rmon_t_p1024to2047; /* FEC + 0x23C */
+ u32 rmon_t_p_gte2048; /* FEC + 0x240 */
+ u32 rmon_t_octets; /* FEC + 0x244 */
+ u32 ieee_t_drop; /* FEC + 0x248 */
+ u32 ieee_t_frame_ok; /* FEC + 0x24C */
+ u32 ieee_t_1col; /* FEC + 0x250 */
+ u32 ieee_t_mcol; /* FEC + 0x254 */
+ u32 ieee_t_def; /* FEC + 0x258 */
+ u32 ieee_t_lcol; /* FEC + 0x25C */
+ u32 ieee_t_excol; /* FEC + 0x260 */
+ u32 ieee_t_macerr; /* FEC + 0x264 */
+ u32 ieee_t_cserr; /* FEC + 0x268 */
+ u32 ieee_t_sqe; /* FEC + 0x26C */
+ u32 t_fdxfc; /* FEC + 0x270 */
+ u32 ieee_t_octets_ok; /* FEC + 0x274 */
+
+ u32 reserved9[2]; /* FEC + 0x278-27C */
+ u32 rmon_r_drop; /* FEC + 0x280 */
+ u32 rmon_r_packets; /* FEC + 0x284 */
+ u32 rmon_r_bc_pkt; /* FEC + 0x288 */
+ u32 rmon_r_mc_pkt; /* FEC + 0x28C */
+ u32 rmon_r_crc_align; /* FEC + 0x290 */
+ u32 rmon_r_undersize; /* FEC + 0x294 */
+ u32 rmon_r_oversize; /* FEC + 0x298 */
+ u32 rmon_r_frag; /* FEC + 0x29C */
+ u32 rmon_r_jab; /* FEC + 0x2A0 */
+
+ u32 rmon_r_resvd_0; /* FEC + 0x2A4 */
+
+ u32 rmon_r_p64; /* FEC + 0x2A8 */
+ u32 rmon_r_p65to127; /* FEC + 0x2AC */
+ u32 rmon_r_p128to255; /* FEC + 0x2B0 */
+ u32 rmon_r_p256to511; /* FEC + 0x2B4 */
+ u32 rmon_r_p512to1023; /* FEC + 0x2B8 */
+ u32 rmon_r_p1024to2047; /* FEC + 0x2BC */
+ u32 rmon_r_p_gte2048; /* FEC + 0x2C0 */
+ u32 rmon_r_octets; /* FEC + 0x2C4 */
+ u32 ieee_r_drop; /* FEC + 0x2C8 */
+ u32 ieee_r_frame_ok; /* FEC + 0x2CC */
+ u32 ieee_r_crc; /* FEC + 0x2D0 */
+ u32 ieee_r_align; /* FEC + 0x2D4 */
+ u32 r_macerr; /* FEC + 0x2D8 */
+ u32 r_fdxfc; /* FEC + 0x2DC */
+ u32 ieee_r_octets_ok; /* FEC + 0x2E0 */
+
+ u32 reserved10[7]; /* FEC + 0x2E4-2FC */
+
+ u32 reserved11[64]; /* FEC + 0x300-3FF */
+};
+
+#define FEC_MIB_DISABLE 0x80000000
+
+#define FEC_IEVENT_HBERR 0x80000000
+#define FEC_IEVENT_BABR 0x40000000
+#define FEC_IEVENT_BABT 0x20000000
+#define FEC_IEVENT_GRA 0x10000000
+#define FEC_IEVENT_TFINT 0x08000000
+#define FEC_IEVENT_MII 0x00800000
+#define FEC_IEVENT_LATE_COL 0x00200000
+#define FEC_IEVENT_COL_RETRY_LIM 0x00100000
+#define FEC_IEVENT_XFIFO_UN 0x00080000
+#define FEC_IEVENT_XFIFO_ERROR 0x00040000
+#define FEC_IEVENT_RFIFO_ERROR 0x00020000
+
+#define FEC_IMASK_HBERR 0x80000000
+#define FEC_IMASK_BABR 0x40000000
+#define FEC_IMASK_BABT 0x20000000
+#define FEC_IMASK_GRA 0x10000000
+#define FEC_IMASK_MII 0x00800000
+#define FEC_IMASK_LATE_COL 0x00200000
+#define FEC_IMASK_COL_RETRY_LIM 0x00100000
+#define FEC_IMASK_XFIFO_UN 0x00080000
+#define FEC_IMASK_XFIFO_ERROR 0x00040000
+#define FEC_IMASK_RFIFO_ERROR 0x00020000
+
+/* all but MII, which is enabled separately */
+#define FEC_IMASK_ENABLE (FEC_IMASK_HBERR | FEC_IMASK_BABR | \
+ FEC_IMASK_BABT | FEC_IMASK_GRA | FEC_IMASK_LATE_COL | \
+ FEC_IMASK_COL_RETRY_LIM | FEC_IMASK_XFIFO_UN | \
+ FEC_IMASK_XFIFO_ERROR | FEC_IMASK_RFIFO_ERROR)
+
+#define FEC_RCNTRL_MAX_FL_SHIFT 16
+#define FEC_RCNTRL_LOOP 0x01
+#define FEC_RCNTRL_DRT 0x02
+#define FEC_RCNTRL_MII_MODE 0x04
+#define FEC_RCNTRL_PROM 0x08
+#define FEC_RCNTRL_BC_REJ 0x10
+#define FEC_RCNTRL_FCE 0x20
+
+#define FEC_TCNTRL_GTS 0x00000001
+#define FEC_TCNTRL_HBC 0x00000002
+#define FEC_TCNTRL_FDEN 0x00000004
+#define FEC_TCNTRL_TFC_PAUSE 0x00000008
+#define FEC_TCNTRL_RFC_PAUSE 0x00000010
+
+#define FEC_ECNTRL_RESET 0x00000001
+#define FEC_ECNTRL_ETHER_EN 0x00000002
+
+#define FEC_MII_DATA_ST 0x40000000 /* Start frame */
+#define FEC_MII_DATA_OP_RD 0x20000000 /* Perform read */
+#define FEC_MII_DATA_OP_WR 0x10000000 /* Perform write */
+#define FEC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address mask */
+#define FEC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register mask */
+#define FEC_MII_DATA_TA 0x00020000 /* Turnaround */
+#define FEC_MII_DATA_DATAMSK 0x0000ffff /* PHY data mask */
+
+#define FEC_MII_READ_FRAME (FEC_MII_DATA_ST | FEC_MII_DATA_OP_RD | FEC_MII_DATA_TA)
+#define FEC_MII_WRITE_FRAME (FEC_MII_DATA_ST | FEC_MII_DATA_OP_WR | FEC_MII_DATA_TA)
+
+#define FEC_MII_DATA_RA_SHIFT 0x12 /* MII reg addr bits */
+#define FEC_MII_DATA_PA_SHIFT 0x17 /* MII PHY addr bits */
+
+#define FEC_PADDR2_TYPE 0x8808
+
+#define FEC_OP_PAUSE_OPCODE 0x00010000
+
+#define FEC_FIFO_WMRK_256B 0x3
+
+#define FEC_FIFO_STATUS_ERR 0x00400000
+#define FEC_FIFO_STATUS_UF 0x00200000
+#define FEC_FIFO_STATUS_OF 0x00100000
+
+#define FEC_FIFO_CNTRL_FRAME 0x08000000
+#define FEC_FIFO_CNTRL_LTG_7 0x07000000
+
+#define FEC_RESET_CNTRL_RESET_FIFO 0x02000000
+#define FEC_RESET_CNTRL_ENABLE_IS_RESET 0x01000000
+
+#define FEC_XMIT_FSM_APPEND_CRC 0x02000000
+#define FEC_XMIT_FSM_ENABLE_CRC 0x01000000
+
+
+extern struct platform_driver mpc52xx_fec_mdio_driver;
+
+#endif /* __DRIVERS_NET_MPC52XX_FEC_H__ */
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
new file mode 100644
index 000000000000..360a578c2bb7
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
@@ -0,0 +1,160 @@
+/*
+ * Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver
+ *
+ * Copyright (C) 2007 Domen Puncer, Telargo, Inc.
+ * Copyright (C) 2008 Wolfram Sang, Pengutronix
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/of_mdio.h>
+#include <asm/io.h>
+#include <asm/mpc52xx.h>
+#include "fec_mpc52xx.h"
+
+struct mpc52xx_fec_mdio_priv {
+ struct mpc52xx_fec __iomem *regs;
+ int mdio_irqs[PHY_MAX_ADDR];
+};
+
+static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
+ int reg, u32 value)
+{
+ struct mpc52xx_fec_mdio_priv *priv = bus->priv;
+ struct mpc52xx_fec __iomem *fec = priv->regs;
+ int tries = 3;
+
+ value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
+ value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
+
+ out_be32(&fec->ievent, FEC_IEVENT_MII);
+ out_be32(&fec->mii_data, value);
+
+ /* wait for it to finish, this takes about 23 us on lite5200b */
+ while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
+ msleep(1);
+
+ if (!tries)
+ return -ETIMEDOUT;
+
+ return value & FEC_MII_DATA_OP_RD ?
+ in_be32(&fec->mii_data) & FEC_MII_DATA_DATAMSK : 0;
+}
+
+static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ return mpc52xx_fec_mdio_transfer(bus, phy_id, reg, FEC_MII_READ_FRAME);
+}
+
+static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg,
+ u16 data)
+{
+ return mpc52xx_fec_mdio_transfer(bus, phy_id, reg,
+ data | FEC_MII_WRITE_FRAME);
+}
+
+static int mpc52xx_fec_mdio_probe(struct platform_device *of)
+{
+ struct device *dev = &of->dev;
+ struct device_node *np = of->dev.of_node;
+ struct mii_bus *bus;
+ struct mpc52xx_fec_mdio_priv *priv;
+ struct resource res;
+ int err;
+
+ bus = mdiobus_alloc();
+ if (bus == NULL)
+ return -ENOMEM;
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (priv == NULL) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ bus->name = "mpc52xx MII bus";
+ bus->read = mpc52xx_fec_mdio_read;
+ bus->write = mpc52xx_fec_mdio_write;
+
+ /* setup irqs */
+ bus->irq = priv->mdio_irqs;
+
+ /* setup registers */
+ err = of_address_to_resource(np, 0, &res);
+ if (err)
+ goto out_free;
+ priv->regs = ioremap(res.start, resource_size(&res));
+ if (priv->regs == NULL) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
+ bus->priv = priv;
+
+ bus->parent = dev;
+ dev_set_drvdata(dev, bus);
+
+ /* set MII speed */
+ out_be32(&priv->regs->mii_speed,
+ ((mpc5xxx_get_bus_frequency(of->dev.of_node) >> 20) / 5) << 1);
+
+ err = of_mdiobus_register(bus, np);
+ if (err)
+ goto out_unmap;
+
+ return 0;
+
+ out_unmap:
+ iounmap(priv->regs);
+ out_free:
+ kfree(priv);
+ mdiobus_free(bus);
+
+ return err;
+}
+
+static int mpc52xx_fec_mdio_remove(struct platform_device *of)
+{
+ struct device *dev = &of->dev;
+ struct mii_bus *bus = dev_get_drvdata(dev);
+ struct mpc52xx_fec_mdio_priv *priv = bus->priv;
+
+ mdiobus_unregister(bus);
+ dev_set_drvdata(dev, NULL);
+ iounmap(priv->regs);
+ kfree(priv);
+ mdiobus_free(bus);
+
+ return 0;
+}
+
+static struct of_device_id mpc52xx_fec_mdio_match[] = {
+ { .compatible = "fsl,mpc5200b-mdio", },
+ { .compatible = "fsl,mpc5200-mdio", },
+ { .compatible = "mpc5200b-fec-phy", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match);
+
+struct platform_driver mpc52xx_fec_mdio_driver = {
+ .driver = {
+ .name = "mpc5200b-fec-phy",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_fec_mdio_match,
+ },
+ .probe = mpc52xx_fec_mdio_probe,
+ .remove = mpc52xx_fec_mdio_remove,
+};
+
+/* let fec driver call it, since this has to be registered before it */
+EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/fs_enet/Kconfig b/drivers/net/ethernet/freescale/fs_enet/Kconfig
new file mode 100644
index 000000000000..268414d9f2cb
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fs_enet/Kconfig
@@ -0,0 +1,35 @@
+config FS_ENET
+ tristate "Freescale Ethernet Driver"
+ depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x)
+ select NET_CORE
+ select MII
+ select PHYLIB
+
+config FS_ENET_MPC5121_FEC
+ def_bool y if (FS_ENET && PPC_MPC512x)
+ select FS_ENET_HAS_FEC
+
+config FS_ENET_HAS_SCC
+ bool "Chip has an SCC usable for ethernet"
+ depends on FS_ENET && (CPM1 || CPM2)
+ default y
+
+config FS_ENET_HAS_FCC
+ bool "Chip has an FCC usable for ethernet"
+ depends on FS_ENET && CPM2
+ default y
+
+config FS_ENET_HAS_FEC
+ bool "Chip has an FEC usable for ethernet"
+ depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC)
+ select FS_ENET_MDIO_FEC
+ default y
+
+config FS_ENET_MDIO_FEC
+ tristate "MDIO driver for FEC"
+ depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC)
+
+config FS_ENET_MDIO_FCC
+ tristate "MDIO driver for FCC"
+ depends on FS_ENET && CPM2
+ select MDIO_BITBANG
diff --git a/drivers/net/ethernet/freescale/fs_enet/Makefile b/drivers/net/ethernet/freescale/fs_enet/Makefile
new file mode 100644
index 000000000000..d4a305ee3455
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fs_enet/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the Freescale Ethernet controllers
+#
+
+obj-$(CONFIG_FS_ENET) += fs_enet.o
+
+fs_enet-$(CONFIG_FS_ENET_HAS_SCC) += mac-scc.o
+fs_enet-$(CONFIG_FS_ENET_HAS_FEC) += mac-fec.o
+fs_enet-$(CONFIG_FS_ENET_HAS_FCC) += mac-fcc.o
+
+obj-$(CONFIG_FS_ENET_MDIO_FEC) += mii-fec.o
+obj-$(CONFIG_FS_ENET_MDIO_FCC) += mii-bitbang.o
+
+fs_enet-objs := fs_enet-main.o $(fs_enet-m)
diff --git a/drivers/net/ethernet/freescale/fs_enet/fec.h b/drivers/net/ethernet/freescale/fs_enet/fec.h
new file mode 100644
index 000000000000..e980527e2b99
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fs_enet/fec.h
@@ -0,0 +1,42 @@
+#ifndef FS_ENET_FEC_H
+#define FS_ENET_FEC_H
+
+/* CRC polynomium used by the FEC for the multicast group filtering */
+#define FEC_CRC_POLY 0x04C11DB7
+
+#define FEC_MAX_MULTICAST_ADDRS 64
+
+/* Interrupt events/masks.
+*/
+#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */
+#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */
+#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */
+#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */
+#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */
+#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */
+#define FEC_ENET_RXF 0x02000000U /* Full frame received */
+#define FEC_ENET_RXB 0x01000000U /* A buffer was received */
+#define FEC_ENET_MII 0x00800000U /* MII interrupt */
+#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */
+
+#define FEC_ECNTRL_PINMUX 0x00000004
+#define FEC_ECNTRL_ETHER_EN 0x00000002
+#define FEC_ECNTRL_RESET 0x00000001
+
+#define FEC_RCNTRL_BC_REJ 0x00000010
+#define FEC_RCNTRL_PROM 0x00000008
+#define FEC_RCNTRL_MII_MODE 0x00000004
+#define FEC_RCNTRL_DRT 0x00000002
+#define FEC_RCNTRL_LOOP 0x00000001
+
+#define FEC_TCNTRL_FDEN 0x00000004
+#define FEC_TCNTRL_HBC 0x00000002
+#define FEC_TCNTRL_GTS 0x00000001
+
+
+
+/*
+ * Delay to wait for FEC reset command to complete (in us)
+ */
+#define FEC_RESET_DELAY 50
+#endif
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
new file mode 100644
index 000000000000..5bf5471f06ff
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -0,0 +1,1196 @@
+/*
+ * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
+ * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
+
+#include <linux/vmalloc.h>
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "fs_enet.h"
+
+/*************************************************/
+
+MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
+MODULE_DESCRIPTION("Freescale Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
+module_param(fs_enet_debug, int, 0);
+MODULE_PARM_DESC(fs_enet_debug,
+ "Freescale bitmapped debugging message enable value");
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void fs_enet_netpoll(struct net_device *dev);
+#endif
+
+static void fs_set_multicast_list(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ (*fep->ops->set_multicast_list)(dev);
+}
+
+static void skb_align(struct sk_buff *skb, int align)
+{
+ int off = ((unsigned long)skb->data) & (align - 1);
+
+ if (off)
+ skb_reserve(skb, align - off);
+}
+
+/* NAPI receive function */
+static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
+{
+ struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
+ struct net_device *dev = fep->ndev;
+ const struct fs_platform_info *fpi = fep->fpi;
+ cbd_t __iomem *bdp;
+ struct sk_buff *skb, *skbn, *skbt;
+ int received = 0;
+ u16 pkt_len, sc;
+ int curidx;
+
+ /*
+ * First, grab all of the stats for the incoming packet.
+ * These get messed up if we get called due to a busy condition.
+ */
+ bdp = fep->cur_rx;
+
+ /* clear RX status bits for napi*/
+ (*fep->ops->napi_clear_rx_event)(dev);
+
+ while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
+ curidx = bdp - fep->rx_bd_base;
+
+ /*
+ * Since we have allocated space to hold a complete frame,
+ * the last indicator should be set.
+ */
+ if ((sc & BD_ENET_RX_LAST) == 0)
+ dev_warn(fep->dev, "rcv is not +last\n");
+
+ /*
+ * Check for errors.
+ */
+ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
+ BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
+ fep->stats.rx_errors++;
+ /* Frame too long or too short. */
+ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
+ fep->stats.rx_length_errors++;
+ /* Frame alignment */
+ if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
+ fep->stats.rx_frame_errors++;
+ /* CRC Error */
+ if (sc & BD_ENET_RX_CR)
+ fep->stats.rx_crc_errors++;
+ /* FIFO overrun */
+ if (sc & BD_ENET_RX_OV)
+ fep->stats.rx_crc_errors++;
+
+ skb = fep->rx_skbuff[curidx];
+
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+
+ skbn = skb;
+
+ } else {
+ skb = fep->rx_skbuff[curidx];
+
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+
+ /*
+ * Process the incoming frame.
+ */
+ fep->stats.rx_packets++;
+ pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
+ fep->stats.rx_bytes += pkt_len + 4;
+
+ if (pkt_len <= fpi->rx_copybreak) {
+ /* +2 to make IP header L1 cache aligned */
+ skbn = dev_alloc_skb(pkt_len + 2);
+ if (skbn != NULL) {
+ skb_reserve(skbn, 2); /* align IP header */
+ skb_copy_from_linear_data(skb,
+ skbn->data, pkt_len);
+ /* swap */
+ skbt = skb;
+ skb = skbn;
+ skbn = skbt;
+ }
+ } else {
+ skbn = dev_alloc_skb(ENET_RX_FRSIZE);
+
+ if (skbn)
+ skb_align(skbn, ENET_RX_ALIGN);
+ }
+
+ if (skbn != NULL) {
+ skb_put(skb, pkt_len); /* Make room */
+ skb->protocol = eth_type_trans(skb, dev);
+ received++;
+ netif_receive_skb(skb);
+ } else {
+ dev_warn(fep->dev,
+ "Memory squeeze, dropping packet.\n");
+ fep->stats.rx_dropped++;
+ skbn = skb;
+ }
+ }
+
+ fep->rx_skbuff[curidx] = skbn;
+ CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE));
+ CBDW_DATLEN(bdp, 0);
+ CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
+
+ /*
+ * Update BD pointer to next entry.
+ */
+ if ((sc & BD_ENET_RX_WRAP) == 0)
+ bdp++;
+ else
+ bdp = fep->rx_bd_base;
+
+ (*fep->ops->rx_bd_done)(dev);
+
+ if (received >= budget)
+ break;
+ }
+
+ fep->cur_rx = bdp;
+
+ if (received < budget) {
+ /* done */
+ napi_complete(napi);
+ (*fep->ops->napi_enable_rx)(dev);
+ }
+ return received;
+}
+
+/* non NAPI receive function */
+static int fs_enet_rx_non_napi(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+ cbd_t __iomem *bdp;
+ struct sk_buff *skb, *skbn, *skbt;
+ int received = 0;
+ u16 pkt_len, sc;
+ int curidx;
+ /*
+ * First, grab all of the stats for the incoming packet.
+ * These get messed up if we get called due to a busy condition.
+ */
+ bdp = fep->cur_rx;
+
+ while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
+
+ curidx = bdp - fep->rx_bd_base;
+
+ /*
+ * Since we have allocated space to hold a complete frame,
+ * the last indicator should be set.
+ */
+ if ((sc & BD_ENET_RX_LAST) == 0)
+ dev_warn(fep->dev, "rcv is not +last\n");
+
+ /*
+ * Check for errors.
+ */
+ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
+ BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
+ fep->stats.rx_errors++;
+ /* Frame too long or too short. */
+ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
+ fep->stats.rx_length_errors++;
+ /* Frame alignment */
+ if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
+ fep->stats.rx_frame_errors++;
+ /* CRC Error */
+ if (sc & BD_ENET_RX_CR)
+ fep->stats.rx_crc_errors++;
+ /* FIFO overrun */
+ if (sc & BD_ENET_RX_OV)
+ fep->stats.rx_crc_errors++;
+
+ skb = fep->rx_skbuff[curidx];
+
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+
+ skbn = skb;
+
+ } else {
+
+ skb = fep->rx_skbuff[curidx];
+
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+
+ /*
+ * Process the incoming frame.
+ */
+ fep->stats.rx_packets++;
+ pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
+ fep->stats.rx_bytes += pkt_len + 4;
+
+ if (pkt_len <= fpi->rx_copybreak) {
+ /* +2 to make IP header L1 cache aligned */
+ skbn = dev_alloc_skb(pkt_len + 2);
+ if (skbn != NULL) {
+ skb_reserve(skbn, 2); /* align IP header */
+ skb_copy_from_linear_data(skb,
+ skbn->data, pkt_len);
+ /* swap */
+ skbt = skb;
+ skb = skbn;
+ skbn = skbt;
+ }
+ } else {
+ skbn = dev_alloc_skb(ENET_RX_FRSIZE);
+
+ if (skbn)
+ skb_align(skbn, ENET_RX_ALIGN);
+ }
+
+ if (skbn != NULL) {
+ skb_put(skb, pkt_len); /* Make room */
+ skb->protocol = eth_type_trans(skb, dev);
+ received++;
+ netif_rx(skb);
+ } else {
+ dev_warn(fep->dev,
+ "Memory squeeze, dropping packet.\n");
+ fep->stats.rx_dropped++;
+ skbn = skb;
+ }
+ }
+
+ fep->rx_skbuff[curidx] = skbn;
+ CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE));
+ CBDW_DATLEN(bdp, 0);
+ CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
+
+ /*
+ * Update BD pointer to next entry.
+ */
+ if ((sc & BD_ENET_RX_WRAP) == 0)
+ bdp++;
+ else
+ bdp = fep->rx_bd_base;
+
+ (*fep->ops->rx_bd_done)(dev);
+ }
+
+ fep->cur_rx = bdp;
+
+ return 0;
+}
+
+static void fs_enet_tx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ cbd_t __iomem *bdp;
+ struct sk_buff *skb;
+ int dirtyidx, do_wake, do_restart;
+ u16 sc;
+
+ spin_lock(&fep->tx_lock);
+ bdp = fep->dirty_tx;
+
+ do_wake = do_restart = 0;
+ while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
+ dirtyidx = bdp - fep->tx_bd_base;
+
+ if (fep->tx_free == fep->tx_ring)
+ break;
+
+ skb = fep->tx_skbuff[dirtyidx];
+
+ /*
+ * Check for errors.
+ */
+ if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
+ BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
+
+ if (sc & BD_ENET_TX_HB) /* No heartbeat */
+ fep->stats.tx_heartbeat_errors++;
+ if (sc & BD_ENET_TX_LC) /* Late collision */
+ fep->stats.tx_window_errors++;
+ if (sc & BD_ENET_TX_RL) /* Retrans limit */
+ fep->stats.tx_aborted_errors++;
+ if (sc & BD_ENET_TX_UN) /* Underrun */
+ fep->stats.tx_fifo_errors++;
+ if (sc & BD_ENET_TX_CSL) /* Carrier lost */
+ fep->stats.tx_carrier_errors++;
+
+ if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
+ fep->stats.tx_errors++;
+ do_restart = 1;
+ }
+ } else
+ fep->stats.tx_packets++;
+
+ if (sc & BD_ENET_TX_READY) {
+ dev_warn(fep->dev,
+ "HEY! Enet xmit interrupt and TX_READY.\n");
+ }
+
+ /*
+ * Deferred means some collisions occurred during transmit,
+ * but we eventually sent the packet OK.
+ */
+ if (sc & BD_ENET_TX_DEF)
+ fep->stats.collisions++;
+
+ /* unmap */
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ skb->len, DMA_TO_DEVICE);
+
+ /*
+ * Free the sk buffer associated with this last transmit.
+ */
+ dev_kfree_skb_irq(skb);
+ fep->tx_skbuff[dirtyidx] = NULL;
+
+ /*
+ * Update pointer to next buffer descriptor to be transmitted.
+ */
+ if ((sc & BD_ENET_TX_WRAP) == 0)
+ bdp++;
+ else
+ bdp = fep->tx_bd_base;
+
+ /*
+ * Since we have freed up a buffer, the ring is no longer
+ * full.
+ */
+ if (!fep->tx_free++)
+ do_wake = 1;
+ }
+
+ fep->dirty_tx = bdp;
+
+ if (do_restart)
+ (*fep->ops->tx_restart)(dev);
+
+ spin_unlock(&fep->tx_lock);
+
+ if (do_wake)
+ netif_wake_queue(dev);
+}
+
+/*
+ * The interrupt handler.
+ * This is called from the MPC core interrupt.
+ */
+static irqreturn_t
+fs_enet_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct fs_enet_private *fep;
+ const struct fs_platform_info *fpi;
+ u32 int_events;
+ u32 int_clr_events;
+ int nr, napi_ok;
+ int handled;
+
+ fep = netdev_priv(dev);
+ fpi = fep->fpi;
+
+ nr = 0;
+ while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
+ nr++;
+
+ int_clr_events = int_events;
+ if (fpi->use_napi)
+ int_clr_events &= ~fep->ev_napi_rx;
+
+ (*fep->ops->clear_int_events)(dev, int_clr_events);
+
+ if (int_events & fep->ev_err)
+ (*fep->ops->ev_error)(dev, int_events);
+
+ if (int_events & fep->ev_rx) {
+ if (!fpi->use_napi)
+ fs_enet_rx_non_napi(dev);
+ else {
+ napi_ok = napi_schedule_prep(&fep->napi);
+
+ (*fep->ops->napi_disable_rx)(dev);
+ (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
+
+ /* NOTE: it is possible for FCCs in NAPI mode */
+ /* to submit a spurious interrupt while in poll */
+ if (napi_ok)
+ __napi_schedule(&fep->napi);
+ }
+ }
+
+ if (int_events & fep->ev_tx)
+ fs_enet_tx(dev);
+ }
+
+ handled = nr > 0;
+ return IRQ_RETVAL(handled);
+}
+
+void fs_init_bds(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ cbd_t __iomem *bdp;
+ struct sk_buff *skb;
+ int i;
+
+ fs_cleanup_bds(dev);
+
+ fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
+ fep->tx_free = fep->tx_ring;
+ fep->cur_rx = fep->rx_bd_base;
+
+ /*
+ * Initialize the receive buffer descriptors.
+ */
+ for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
+ skb = dev_alloc_skb(ENET_RX_FRSIZE);
+ if (skb == NULL) {
+ dev_warn(fep->dev,
+ "Memory squeeze, unable to allocate skb\n");
+ break;
+ }
+ skb_align(skb, ENET_RX_ALIGN);
+ fep->rx_skbuff[i] = skb;
+ CBDW_BUFADDR(bdp,
+ dma_map_single(fep->dev, skb->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE));
+ CBDW_DATLEN(bdp, 0); /* zero */
+ CBDW_SC(bdp, BD_ENET_RX_EMPTY |
+ ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
+ }
+ /*
+ * if we failed, fillup remainder
+ */
+ for (; i < fep->rx_ring; i++, bdp++) {
+ fep->rx_skbuff[i] = NULL;
+ CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
+ }
+
+ /*
+ * ...and the same for transmit.
+ */
+ for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
+ fep->tx_skbuff[i] = NULL;
+ CBDW_BUFADDR(bdp, 0);
+ CBDW_DATLEN(bdp, 0);
+ CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
+ }
+}
+
+void fs_cleanup_bds(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct sk_buff *skb;
+ cbd_t __iomem *bdp;
+ int i;
+
+ /*
+ * Reset SKB transmit buffers.
+ */
+ for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
+ if ((skb = fep->tx_skbuff[i]) == NULL)
+ continue;
+
+ /* unmap */
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ skb->len, DMA_TO_DEVICE);
+
+ fep->tx_skbuff[i] = NULL;
+ dev_kfree_skb(skb);
+ }
+
+ /*
+ * Reset SKB receive buffers
+ */
+ for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
+ if ((skb = fep->rx_skbuff[i]) == NULL)
+ continue;
+
+ /* unmap */
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+
+ fep->rx_skbuff[i] = NULL;
+
+ dev_kfree_skb(skb);
+ }
+}
+
+/**********************************************************************************/
+
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+/*
+ * MPC5121 FEC requeries 4-byte alignment for TX data buffer!
+ */
+static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ struct sk_buff *new_skb;
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ /* Alloc new skb */
+ new_skb = dev_alloc_skb(skb->len + 4);
+ if (!new_skb) {
+ if (net_ratelimit()) {
+ dev_warn(fep->dev,
+ "Memory squeeze, dropping tx packet.\n");
+ }
+ return NULL;
+ }
+
+ /* Make sure new skb is properly aligned */
+ skb_align(new_skb, 4);
+
+ /* Copy data to new skb ... */
+ skb_copy_from_linear_data(skb, new_skb->data, skb->len);
+ skb_put(new_skb, skb->len);
+
+ /* ... and free an old one */
+ dev_kfree_skb_any(skb);
+
+ return new_skb;
+}
+#endif
+
+static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ cbd_t __iomem *bdp;
+ int curidx;
+ u16 sc;
+ unsigned long flags;
+
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+ if (((unsigned long)skb->data) & 0x3) {
+ skb = tx_skb_align_workaround(dev, skb);
+ if (!skb) {
+ /*
+ * We have lost packet due to memory allocation error
+ * in tx_skb_align_workaround(). Hopefully original
+ * skb is still valid, so try transmit it later.
+ */
+ return NETDEV_TX_BUSY;
+ }
+ }
+#endif
+ spin_lock_irqsave(&fep->tx_lock, flags);
+
+ /*
+ * Fill in a Tx ring entry
+ */
+ bdp = fep->cur_tx;
+
+ if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&fep->tx_lock, flags);
+
+ /*
+ * Ooops. All transmit buffers are full. Bail out.
+ * This should not happen, since the tx queue should be stopped.
+ */
+ dev_warn(fep->dev, "tx queue full!.\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ curidx = bdp - fep->tx_bd_base;
+ /*
+ * Clear all of the status flags.
+ */
+ CBDC_SC(bdp, BD_ENET_TX_STATS);
+
+ /*
+ * Save skb pointer.
+ */
+ fep->tx_skbuff[curidx] = skb;
+
+ fep->stats.tx_bytes += skb->len;
+
+ /*
+ * Push the data cache so the CPM does not get stale memory data.
+ */
+ CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
+ skb->data, skb->len, DMA_TO_DEVICE));
+ CBDW_DATLEN(bdp, skb->len);
+
+ /*
+ * If this was the last BD in the ring, start at the beginning again.
+ */
+ if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
+ fep->cur_tx++;
+ else
+ fep->cur_tx = fep->tx_bd_base;
+
+ if (!--fep->tx_free)
+ netif_stop_queue(dev);
+
+ /* Trigger transmission start */
+ sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
+ BD_ENET_TX_LAST | BD_ENET_TX_TC;
+
+ /* note that while FEC does not have this bit
+ * it marks it as available for software use
+ * yay for hw reuse :) */
+ if (skb->len <= 60)
+ sc |= BD_ENET_TX_PAD;
+ CBDS_SC(bdp, sc);
+
+ skb_tx_timestamp(skb);
+
+ (*fep->ops->tx_kickstart)(dev);
+
+ spin_unlock_irqrestore(&fep->tx_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static void fs_timeout(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ unsigned long flags;
+ int wake = 0;
+
+ fep->stats.tx_errors++;
+
+ spin_lock_irqsave(&fep->lock, flags);
+
+ if (dev->flags & IFF_UP) {
+ phy_stop(fep->phydev);
+ (*fep->ops->stop)(dev);
+ (*fep->ops->restart)(dev);
+ phy_start(fep->phydev);
+ }
+
+ phy_start(fep->phydev);
+ wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ if (wake)
+ netif_wake_queue(dev);
+}
+
+/*-----------------------------------------------------------------------------
+ * generic link-change handler - should be sufficient for most cases
+ *-----------------------------------------------------------------------------*/
+static void generic_adjust_link(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev = fep->phydev;
+ int new_state = 0;
+
+ if (phydev->link) {
+ /* adjust to duplex mode */
+ if (phydev->duplex != fep->oldduplex) {
+ new_state = 1;
+ fep->oldduplex = phydev->duplex;
+ }
+
+ if (phydev->speed != fep->oldspeed) {
+ new_state = 1;
+ fep->oldspeed = phydev->speed;
+ }
+
+ if (!fep->oldlink) {
+ new_state = 1;
+ fep->oldlink = 1;
+ }
+
+ if (new_state)
+ fep->ops->restart(dev);
+ } else if (fep->oldlink) {
+ new_state = 1;
+ fep->oldlink = 0;
+ fep->oldspeed = 0;
+ fep->oldduplex = -1;
+ }
+
+ if (new_state && netif_msg_link(fep))
+ phy_print_status(phydev);
+}
+
+
+static void fs_adjust_link(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fep->lock, flags);
+
+ if(fep->ops->adjust_link)
+ fep->ops->adjust_link(dev);
+ else
+ generic_adjust_link(dev);
+
+ spin_unlock_irqrestore(&fep->lock, flags);
+}
+
+static int fs_init_phy(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev;
+
+ fep->oldlink = 0;
+ fep->oldspeed = 0;
+ fep->oldduplex = -1;
+
+ phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+ if (!phydev) {
+ phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ }
+ if (!phydev) {
+ dev_err(&dev->dev, "Could not attach to PHY\n");
+ return -ENODEV;
+ }
+
+ fep->phydev = phydev;
+
+ return 0;
+}
+
+static int fs_enet_open(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ int r;
+ int err;
+
+ /* to initialize the fep->cur_rx,... */
+ /* not doing this, will cause a crash in fs_enet_rx_napi */
+ fs_init_bds(fep->ndev);
+
+ if (fep->fpi->use_napi)
+ napi_enable(&fep->napi);
+
+ /* Install our interrupt handler. */
+ r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
+ "fs_enet-mac", dev);
+ if (r != 0) {
+ dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
+ if (fep->fpi->use_napi)
+ napi_disable(&fep->napi);
+ return -EINVAL;
+ }
+
+ err = fs_init_phy(dev);
+ if (err) {
+ free_irq(fep->interrupt, dev);
+ if (fep->fpi->use_napi)
+ napi_disable(&fep->napi);
+ return err;
+ }
+ phy_start(fep->phydev);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int fs_enet_close(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ if (fep->fpi->use_napi)
+ napi_disable(&fep->napi);
+ phy_stop(fep->phydev);
+
+ spin_lock_irqsave(&fep->lock, flags);
+ spin_lock(&fep->tx_lock);
+ (*fep->ops->stop)(dev);
+ spin_unlock(&fep->tx_lock);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ /* release any irqs */
+ phy_disconnect(fep->phydev);
+ fep->phydev = NULL;
+ free_irq(fep->interrupt, dev);
+
+ return 0;
+}
+
+static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ return &fep->stats;
+}
+
+/*************************************************************************/
+
+static void fs_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_MODULE_NAME);
+ strcpy(info->version, DRV_MODULE_VERSION);
+}
+
+static int fs_get_regs_len(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ return (*fep->ops->get_regs_len)(dev);
+}
+
+static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ unsigned long flags;
+ int r, len;
+
+ len = regs->len;
+
+ spin_lock_irqsave(&fep->lock, flags);
+ r = (*fep->ops->get_regs)(dev, p, &len);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ if (r == 0)
+ regs->version = 0;
+}
+
+static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (!fep->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(fep->phydev, cmd);
+}
+
+static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (!fep->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(fep->phydev, cmd);
+}
+
+static int fs_nway_reset(struct net_device *dev)
+{
+ return 0;
+}
+
+static u32 fs_get_msglevel(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ return fep->msg_enable;
+}
+
+static void fs_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fep->msg_enable = value;
+}
+
+static const struct ethtool_ops fs_ethtool_ops = {
+ .get_drvinfo = fs_get_drvinfo,
+ .get_regs_len = fs_get_regs_len,
+ .get_settings = fs_get_settings,
+ .set_settings = fs_set_settings,
+ .nway_reset = fs_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = fs_get_msglevel,
+ .set_msglevel = fs_set_msglevel,
+ .get_regs = fs_get_regs,
+};
+
+static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return phy_mii_ioctl(fep->phydev, rq, cmd);
+}
+
+extern int fs_mii_connect(struct net_device *dev);
+extern void fs_mii_disconnect(struct net_device *dev);
+
+/**************************************************************************************/
+
+#ifdef CONFIG_FS_ENET_HAS_FEC
+#define IS_FEC(match) ((match)->data == &fs_fec_ops)
+#else
+#define IS_FEC(match) 0
+#endif
+
+static const struct net_device_ops fs_enet_netdev_ops = {
+ .ndo_open = fs_enet_open,
+ .ndo_stop = fs_enet_close,
+ .ndo_get_stats = fs_enet_get_stats,
+ .ndo_start_xmit = fs_enet_start_xmit,
+ .ndo_tx_timeout = fs_timeout,
+ .ndo_set_rx_mode = fs_set_multicast_list,
+ .ndo_do_ioctl = fs_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = fs_enet_netpoll,
+#endif
+};
+
+static struct of_device_id fs_enet_match[];
+static int __devinit fs_enet_probe(struct platform_device *ofdev)
+{
+ const struct of_device_id *match;
+ struct net_device *ndev;
+ struct fs_enet_private *fep;
+ struct fs_platform_info *fpi;
+ const u32 *data;
+ const u8 *mac_addr;
+ int privsize, len, ret = -ENODEV;
+
+ match = of_match_device(fs_enet_match, &ofdev->dev);
+ if (!match)
+ return -EINVAL;
+
+ fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
+ if (!fpi)
+ return -ENOMEM;
+
+ if (!IS_FEC(match)) {
+ data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
+ if (!data || len != 4)
+ goto out_free_fpi;
+
+ fpi->cp_command = *data;
+ }
+
+ fpi->rx_ring = 32;
+ fpi->tx_ring = 32;
+ fpi->rx_copybreak = 240;
+ fpi->use_napi = 1;
+ fpi->napi_weight = 17;
+ fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
+ if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link",
+ NULL)))
+ goto out_free_fpi;
+
+ privsize = sizeof(*fep) +
+ sizeof(struct sk_buff **) *
+ (fpi->rx_ring + fpi->tx_ring);
+
+ ndev = alloc_etherdev(privsize);
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+
+ SET_NETDEV_DEV(ndev, &ofdev->dev);
+ dev_set_drvdata(&ofdev->dev, ndev);
+
+ fep = netdev_priv(ndev);
+ fep->dev = &ofdev->dev;
+ fep->ndev = ndev;
+ fep->fpi = fpi;
+ fep->ops = match->data;
+
+ ret = fep->ops->setup_data(ndev);
+ if (ret)
+ goto out_free_dev;
+
+ fep->rx_skbuff = (struct sk_buff **)&fep[1];
+ fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
+
+ spin_lock_init(&fep->lock);
+ spin_lock_init(&fep->tx_lock);
+
+ mac_addr = of_get_mac_address(ofdev->dev.of_node);
+ if (mac_addr)
+ memcpy(ndev->dev_addr, mac_addr, 6);
+
+ ret = fep->ops->allocate_bd(ndev);
+ if (ret)
+ goto out_cleanup_data;
+
+ fep->rx_bd_base = fep->ring_base;
+ fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
+
+ fep->tx_ring = fpi->tx_ring;
+ fep->rx_ring = fpi->rx_ring;
+
+ ndev->netdev_ops = &fs_enet_netdev_ops;
+ ndev->watchdog_timeo = 2 * HZ;
+ if (fpi->use_napi)
+ netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi,
+ fpi->napi_weight);
+
+ ndev->ethtool_ops = &fs_ethtool_ops;
+
+ init_timer(&fep->phy_timer_list);
+
+ netif_carrier_off(ndev);
+
+ ret = register_netdev(ndev);
+ if (ret)
+ goto out_free_bd;
+
+ pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr);
+
+ return 0;
+
+out_free_bd:
+ fep->ops->free_bd(ndev);
+out_cleanup_data:
+ fep->ops->cleanup_data(ndev);
+out_free_dev:
+ free_netdev(ndev);
+ dev_set_drvdata(&ofdev->dev, NULL);
+out_put:
+ of_node_put(fpi->phy_node);
+out_free_fpi:
+ kfree(fpi);
+ return ret;
+}
+
+static int fs_enet_remove(struct platform_device *ofdev)
+{
+ struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
+ struct fs_enet_private *fep = netdev_priv(ndev);
+
+ unregister_netdev(ndev);
+
+ fep->ops->free_bd(ndev);
+ fep->ops->cleanup_data(ndev);
+ dev_set_drvdata(fep->dev, NULL);
+ of_node_put(fep->fpi->phy_node);
+ free_netdev(ndev);
+ return 0;
+}
+
+static struct of_device_id fs_enet_match[] = {
+#ifdef CONFIG_FS_ENET_HAS_SCC
+ {
+ .compatible = "fsl,cpm1-scc-enet",
+ .data = (void *)&fs_scc_ops,
+ },
+ {
+ .compatible = "fsl,cpm2-scc-enet",
+ .data = (void *)&fs_scc_ops,
+ },
+#endif
+#ifdef CONFIG_FS_ENET_HAS_FCC
+ {
+ .compatible = "fsl,cpm2-fcc-enet",
+ .data = (void *)&fs_fcc_ops,
+ },
+#endif
+#ifdef CONFIG_FS_ENET_HAS_FEC
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+ {
+ .compatible = "fsl,mpc5121-fec",
+ .data = (void *)&fs_fec_ops,
+ },
+#else
+ {
+ .compatible = "fsl,pq1-fec-enet",
+ .data = (void *)&fs_fec_ops,
+ },
+#endif
+#endif
+ {}
+};
+MODULE_DEVICE_TABLE(of, fs_enet_match);
+
+static struct platform_driver fs_enet_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "fs_enet",
+ .of_match_table = fs_enet_match,
+ },
+ .probe = fs_enet_probe,
+ .remove = fs_enet_remove,
+};
+
+static int __init fs_init(void)
+{
+ return platform_driver_register(&fs_enet_driver);
+}
+
+static void __exit fs_cleanup(void)
+{
+ platform_driver_unregister(&fs_enet_driver);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void fs_enet_netpoll(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ fs_enet_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+/**************************************************************************************/
+
+module_init(fs_init);
+module_exit(fs_cleanup);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
new file mode 100644
index 000000000000..1ece4b1a689e
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -0,0 +1,244 @@
+#ifndef FS_ENET_H
+#define FS_ENET_H
+
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/phy.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/fs_enet_pd.h>
+#include <asm/fs_pd.h>
+
+#ifdef CONFIG_CPM1
+#include <asm/cpm1.h>
+#endif
+
+#if defined(CONFIG_FS_ENET_HAS_FEC)
+#include <asm/cpm.h>
+
+#if defined(CONFIG_FS_ENET_MPC5121_FEC)
+/* MPC5121 FEC has different register layout */
+struct fec {
+ u32 fec_reserved0;
+ u32 fec_ievent; /* Interrupt event reg */
+ u32 fec_imask; /* Interrupt mask reg */
+ u32 fec_reserved1;
+ u32 fec_r_des_active; /* Receive descriptor reg */
+ u32 fec_x_des_active; /* Transmit descriptor reg */
+ u32 fec_reserved2[3];
+ u32 fec_ecntrl; /* Ethernet control reg */
+ u32 fec_reserved3[6];
+ u32 fec_mii_data; /* MII manage frame reg */
+ u32 fec_mii_speed; /* MII speed control reg */
+ u32 fec_reserved4[7];
+ u32 fec_mib_ctrlstat; /* MIB control/status reg */
+ u32 fec_reserved5[7];
+ u32 fec_r_cntrl; /* Receive control reg */
+ u32 fec_reserved6[15];
+ u32 fec_x_cntrl; /* Transmit Control reg */
+ u32 fec_reserved7[7];
+ u32 fec_addr_low; /* Low 32bits MAC address */
+ u32 fec_addr_high; /* High 16bits MAC address */
+ u32 fec_opd; /* Opcode + Pause duration */
+ u32 fec_reserved8[10];
+ u32 fec_hash_table_high; /* High 32bits hash table */
+ u32 fec_hash_table_low; /* Low 32bits hash table */
+ u32 fec_grp_hash_table_high; /* High 32bits hash table */
+ u32 fec_grp_hash_table_low; /* Low 32bits hash table */
+ u32 fec_reserved9[7];
+ u32 fec_x_wmrk; /* FIFO transmit water mark */
+ u32 fec_reserved10;
+ u32 fec_r_bound; /* FIFO receive bound reg */
+ u32 fec_r_fstart; /* FIFO receive start reg */
+ u32 fec_reserved11[11];
+ u32 fec_r_des_start; /* Receive descriptor ring */
+ u32 fec_x_des_start; /* Transmit descriptor ring */
+ u32 fec_r_buff_size; /* Maximum receive buff size */
+ u32 fec_reserved12[26];
+ u32 fec_dma_control; /* DMA Endian and other ctrl */
+};
+#endif
+
+struct fec_info {
+ struct fec __iomem *fecp;
+ u32 mii_speed;
+};
+#endif
+
+#ifdef CONFIG_CPM2
+#include <asm/cpm2.h>
+#endif
+
+/* hw driver ops */
+struct fs_ops {
+ int (*setup_data)(struct net_device *dev);
+ int (*allocate_bd)(struct net_device *dev);
+ void (*free_bd)(struct net_device *dev);
+ void (*cleanup_data)(struct net_device *dev);
+ void (*set_multicast_list)(struct net_device *dev);
+ void (*adjust_link)(struct net_device *dev);
+ void (*restart)(struct net_device *dev);
+ void (*stop)(struct net_device *dev);
+ void (*napi_clear_rx_event)(struct net_device *dev);
+ void (*napi_enable_rx)(struct net_device *dev);
+ void (*napi_disable_rx)(struct net_device *dev);
+ void (*rx_bd_done)(struct net_device *dev);
+ void (*tx_kickstart)(struct net_device *dev);
+ u32 (*get_int_events)(struct net_device *dev);
+ void (*clear_int_events)(struct net_device *dev, u32 int_events);
+ void (*ev_error)(struct net_device *dev, u32 int_events);
+ int (*get_regs)(struct net_device *dev, void *p, int *sizep);
+ int (*get_regs_len)(struct net_device *dev);
+ void (*tx_restart)(struct net_device *dev);
+};
+
+struct phy_info {
+ unsigned int id;
+ const char *name;
+ void (*startup) (struct net_device * dev);
+ void (*shutdown) (struct net_device * dev);
+ void (*ack_int) (struct net_device * dev);
+};
+
+/* The FEC stores dest/src/type, data, and checksum for receive packets.
+ */
+#define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */
+#define MIN_MTU 46 /* this is data size */
+#define CRC_LEN 4
+
+#define PKT_MAXBUF_SIZE (MAX_MTU+ETH_HLEN+CRC_LEN)
+#define PKT_MINBUF_SIZE (MIN_MTU+ETH_HLEN+CRC_LEN)
+
+/* Must be a multiple of 32 (to cover both FEC & FCC) */
+#define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE + 31) & ~31)
+/* This is needed so that invalidate_xxx wont invalidate too much */
+#define ENET_RX_ALIGN 16
+#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE + ENET_RX_ALIGN - 1)
+
+struct fs_enet_private {
+ struct napi_struct napi;
+ struct device *dev; /* pointer back to the device (must be initialized first) */
+ struct net_device *ndev;
+ spinlock_t lock; /* during all ops except TX pckt processing */
+ spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */
+ struct fs_platform_info *fpi;
+ const struct fs_ops *ops;
+ int rx_ring, tx_ring;
+ dma_addr_t ring_mem_addr;
+ void __iomem *ring_base;
+ struct sk_buff **rx_skbuff;
+ struct sk_buff **tx_skbuff;
+ cbd_t __iomem *rx_bd_base; /* Address of Rx and Tx buffers. */
+ cbd_t __iomem *tx_bd_base;
+ cbd_t __iomem *dirty_tx; /* ring entries to be free()ed. */
+ cbd_t __iomem *cur_rx;
+ cbd_t __iomem *cur_tx;
+ int tx_free;
+ struct net_device_stats stats;
+ struct timer_list phy_timer_list;
+ const struct phy_info *phy;
+ u32 msg_enable;
+ struct mii_if_info mii_if;
+ unsigned int last_mii_status;
+ int interrupt;
+
+ struct phy_device *phydev;
+ int oldduplex, oldspeed, oldlink; /* current settings */
+
+ /* event masks */
+ u32 ev_napi_rx; /* mask of NAPI rx events */
+ u32 ev_rx; /* rx event mask */
+ u32 ev_tx; /* tx event mask */
+ u32 ev_err; /* error event mask */
+
+ u16 bd_rx_empty; /* mask of BD rx empty */
+ u16 bd_rx_err; /* mask of BD rx errors */
+
+ union {
+ struct {
+ int idx; /* FEC1 = 0, FEC2 = 1 */
+ void __iomem *fecp; /* hw registers */
+ u32 hthi, htlo; /* state for multicast */
+ } fec;
+
+ struct {
+ int idx; /* FCC1-3 = 0-2 */
+ void __iomem *fccp; /* hw registers */
+ void __iomem *ep; /* parameter ram */
+ void __iomem *fcccp; /* hw registers cont. */
+ void __iomem *mem; /* FCC DPRAM */
+ u32 gaddrh, gaddrl; /* group address */
+ } fcc;
+
+ struct {
+ int idx; /* FEC1 = 0, FEC2 = 1 */
+ void __iomem *sccp; /* hw registers */
+ void __iomem *ep; /* parameter ram */
+ u32 hthi, htlo; /* state for multicast */
+ } scc;
+
+ };
+};
+
+/***************************************************************************/
+
+void fs_init_bds(struct net_device *dev);
+void fs_cleanup_bds(struct net_device *dev);
+
+/***************************************************************************/
+
+#define DRV_MODULE_NAME "fs_enet"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_MODULE_VERSION "1.0"
+#define DRV_MODULE_RELDATE "Aug 8, 2005"
+
+/***************************************************************************/
+
+int fs_enet_platform_init(void);
+void fs_enet_platform_cleanup(void);
+
+/***************************************************************************/
+/* buffer descriptor access macros */
+
+/* access macros */
+#if defined(CONFIG_CPM1)
+/* for a a CPM1 __raw_xxx's are sufficient */
+#define __cbd_out32(addr, x) __raw_writel(x, addr)
+#define __cbd_out16(addr, x) __raw_writew(x, addr)
+#define __cbd_in32(addr) __raw_readl(addr)
+#define __cbd_in16(addr) __raw_readw(addr)
+#else
+/* for others play it safe */
+#define __cbd_out32(addr, x) out_be32(addr, x)
+#define __cbd_out16(addr, x) out_be16(addr, x)
+#define __cbd_in32(addr) in_be32(addr)
+#define __cbd_in16(addr) in_be16(addr)
+#endif
+
+/* write */
+#define CBDW_SC(_cbd, _sc) __cbd_out16(&(_cbd)->cbd_sc, (_sc))
+#define CBDW_DATLEN(_cbd, _datlen) __cbd_out16(&(_cbd)->cbd_datlen, (_datlen))
+#define CBDW_BUFADDR(_cbd, _bufaddr) __cbd_out32(&(_cbd)->cbd_bufaddr, (_bufaddr))
+
+/* read */
+#define CBDR_SC(_cbd) __cbd_in16(&(_cbd)->cbd_sc)
+#define CBDR_DATLEN(_cbd) __cbd_in16(&(_cbd)->cbd_datlen)
+#define CBDR_BUFADDR(_cbd) __cbd_in32(&(_cbd)->cbd_bufaddr)
+
+/* set bits */
+#define CBDS_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) | (_sc))
+
+/* clear bits */
+#define CBDC_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) & ~(_sc))
+
+/*******************************************************************/
+
+extern const struct fs_ops fs_fec_ops;
+extern const struct fs_ops fs_fcc_ops;
+extern const struct fs_ops fs_scc_ops;
+
+/*******************************************************************/
+
+#endif
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
new file mode 100644
index 000000000000..7583a9572bcc
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -0,0 +1,584 @@
+/*
+ * FCC driver for Motorola MPC82xx (PQ2).
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/of_device.h>
+#include <linux/gfp.h>
+
+#include <asm/immap_cpm2.h>
+#include <asm/mpc8260.h>
+#include <asm/cpm2.h>
+
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "fs_enet.h"
+
+/*************************************************/
+
+/* FCC access macros */
+
+/* write, read, set bits, clear bits */
+#define W32(_p, _m, _v) out_be32(&(_p)->_m, (_v))
+#define R32(_p, _m) in_be32(&(_p)->_m)
+#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
+#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
+
+#define W16(_p, _m, _v) out_be16(&(_p)->_m, (_v))
+#define R16(_p, _m) in_be16(&(_p)->_m)
+#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
+#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
+
+#define W8(_p, _m, _v) out_8(&(_p)->_m, (_v))
+#define R8(_p, _m) in_8(&(_p)->_m)
+#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
+#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
+
+/*************************************************/
+
+#define FCC_MAX_MULTICAST_ADDRS 64
+
+#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
+#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
+#define mk_mii_end 0
+
+#define MAX_CR_CMD_LOOPS 10000
+
+static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 op)
+{
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ return cpm_command(fpi->cp_command, op);
+}
+
+static int do_pd_setup(struct fs_enet_private *fep)
+{
+ struct platform_device *ofdev = to_platform_device(fep->dev);
+ struct fs_platform_info *fpi = fep->fpi;
+ int ret = -EINVAL;
+
+ fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
+ if (fep->interrupt == NO_IRQ)
+ goto out;
+
+ fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0);
+ if (!fep->fcc.fccp)
+ goto out;
+
+ fep->fcc.ep = of_iomap(ofdev->dev.of_node, 1);
+ if (!fep->fcc.ep)
+ goto out_fccp;
+
+ fep->fcc.fcccp = of_iomap(ofdev->dev.of_node, 2);
+ if (!fep->fcc.fcccp)
+ goto out_ep;
+
+ fep->fcc.mem = (void __iomem *)cpm2_immr;
+ fpi->dpram_offset = cpm_dpalloc(128, 32);
+ if (IS_ERR_VALUE(fpi->dpram_offset)) {
+ ret = fpi->dpram_offset;
+ goto out_fcccp;
+ }
+
+ return 0;
+
+out_fcccp:
+ iounmap(fep->fcc.fcccp);
+out_ep:
+ iounmap(fep->fcc.ep);
+out_fccp:
+ iounmap(fep->fcc.fccp);
+out:
+ return ret;
+}
+
+#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
+#define FCC_RX_EVENT (FCC_ENET_RXF)
+#define FCC_TX_EVENT (FCC_ENET_TXB)
+#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE)
+
+static int setup_data(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (do_pd_setup(fep) != 0)
+ return -EINVAL;
+
+ fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK;
+ fep->ev_rx = FCC_RX_EVENT;
+ fep->ev_tx = FCC_TX_EVENT;
+ fep->ev_err = FCC_ERR_EVENT_MSK;
+
+ return 0;
+}
+
+static int allocate_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ fep->ring_base = (void __iomem __force *)dma_alloc_coherent(fep->dev,
+ (fpi->tx_ring + fpi->rx_ring) *
+ sizeof(cbd_t), &fep->ring_mem_addr,
+ GFP_KERNEL);
+ if (fep->ring_base == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void free_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ if (fep->ring_base)
+ dma_free_coherent(fep->dev,
+ (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
+ (void __force *)fep->ring_base, fep->ring_mem_addr);
+}
+
+static void cleanup_data(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static void set_promiscuous_mode(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ S32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
+}
+
+static void set_multicast_start(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_enet_t __iomem *ep = fep->fcc.ep;
+
+ W32(ep, fen_gaddrh, 0);
+ W32(ep, fen_gaddrl, 0);
+}
+
+static void set_multicast_one(struct net_device *dev, const u8 *mac)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_enet_t __iomem *ep = fep->fcc.ep;
+ u16 taddrh, taddrm, taddrl;
+
+ taddrh = ((u16)mac[5] << 8) | mac[4];
+ taddrm = ((u16)mac[3] << 8) | mac[2];
+ taddrl = ((u16)mac[1] << 8) | mac[0];
+
+ W16(ep, fen_taddrh, taddrh);
+ W16(ep, fen_taddrm, taddrm);
+ W16(ep, fen_taddrl, taddrl);
+ fcc_cr_cmd(fep, CPM_CR_SET_GADDR);
+}
+
+static void set_multicast_finish(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+ fcc_enet_t __iomem *ep = fep->fcc.ep;
+
+ /* clear promiscuous always */
+ C32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
+
+ /* if all multi or too many multicasts; just enable all */
+ if ((dev->flags & IFF_ALLMULTI) != 0 ||
+ netdev_mc_count(dev) > FCC_MAX_MULTICAST_ADDRS) {
+
+ W32(ep, fen_gaddrh, 0xffffffff);
+ W32(ep, fen_gaddrl, 0xffffffff);
+ }
+
+ /* read back */
+ fep->fcc.gaddrh = R32(ep, fen_gaddrh);
+ fep->fcc.gaddrl = R32(ep, fen_gaddrl);
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+
+ if ((dev->flags & IFF_PROMISC) == 0) {
+ set_multicast_start(dev);
+ netdev_for_each_mc_addr(ha, dev)
+ set_multicast_one(dev, ha->addr);
+ set_multicast_finish(dev);
+ } else
+ set_promiscuous_mode(dev);
+}
+
+static void restart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+ fcc_c_t __iomem *fcccp = fep->fcc.fcccp;
+ fcc_enet_t __iomem *ep = fep->fcc.ep;
+ dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
+ u16 paddrh, paddrm, paddrl;
+ const unsigned char *mac;
+ int i;
+
+ C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
+
+ /* clear everything (slow & steady does it) */
+ for (i = 0; i < sizeof(*ep); i++)
+ out_8((u8 __iomem *)ep + i, 0);
+
+ /* get physical address */
+ rx_bd_base_phys = fep->ring_mem_addr;
+ tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
+
+ /* point to bds */
+ W32(ep, fen_genfcc.fcc_rbase, rx_bd_base_phys);
+ W32(ep, fen_genfcc.fcc_tbase, tx_bd_base_phys);
+
+ /* Set maximum bytes per receive buffer.
+ * It must be a multiple of 32.
+ */
+ W16(ep, fen_genfcc.fcc_mrblr, PKT_MAXBLR_SIZE);
+
+ W32(ep, fen_genfcc.fcc_rstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
+ W32(ep, fen_genfcc.fcc_tstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
+
+ /* Allocate space in the reserved FCC area of DPRAM for the
+ * internal buffers. No one uses this space (yet), so we
+ * can do this. Later, we will add resource management for
+ * this area.
+ */
+
+ W16(ep, fen_genfcc.fcc_riptr, fpi->dpram_offset);
+ W16(ep, fen_genfcc.fcc_tiptr, fpi->dpram_offset + 32);
+
+ W16(ep, fen_padptr, fpi->dpram_offset + 64);
+
+ /* fill with special symbol... */
+ memset_io(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32);
+
+ W32(ep, fen_genfcc.fcc_rbptr, 0);
+ W32(ep, fen_genfcc.fcc_tbptr, 0);
+ W32(ep, fen_genfcc.fcc_rcrc, 0);
+ W32(ep, fen_genfcc.fcc_tcrc, 0);
+ W16(ep, fen_genfcc.fcc_res1, 0);
+ W32(ep, fen_genfcc.fcc_res2, 0);
+
+ /* no CAM */
+ W32(ep, fen_camptr, 0);
+
+ /* Set CRC preset and mask */
+ W32(ep, fen_cmask, 0xdebb20e3);
+ W32(ep, fen_cpres, 0xffffffff);
+
+ W32(ep, fen_crcec, 0); /* CRC Error counter */
+ W32(ep, fen_alec, 0); /* alignment error counter */
+ W32(ep, fen_disfc, 0); /* discard frame counter */
+ W16(ep, fen_retlim, 15); /* Retry limit threshold */
+ W16(ep, fen_pper, 0); /* Normal persistence */
+
+ /* set group address */
+ W32(ep, fen_gaddrh, fep->fcc.gaddrh);
+ W32(ep, fen_gaddrl, fep->fcc.gaddrh);
+
+ /* Clear hash filter tables */
+ W32(ep, fen_iaddrh, 0);
+ W32(ep, fen_iaddrl, 0);
+
+ /* Clear the Out-of-sequence TxBD */
+ W16(ep, fen_tfcstat, 0);
+ W16(ep, fen_tfclen, 0);
+ W32(ep, fen_tfcptr, 0);
+
+ W16(ep, fen_mflr, PKT_MAXBUF_SIZE); /* maximum frame length register */
+ W16(ep, fen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
+
+ /* set address */
+ mac = dev->dev_addr;
+ paddrh = ((u16)mac[5] << 8) | mac[4];
+ paddrm = ((u16)mac[3] << 8) | mac[2];
+ paddrl = ((u16)mac[1] << 8) | mac[0];
+
+ W16(ep, fen_paddrh, paddrh);
+ W16(ep, fen_paddrm, paddrm);
+ W16(ep, fen_paddrl, paddrl);
+
+ W16(ep, fen_taddrh, 0);
+ W16(ep, fen_taddrm, 0);
+ W16(ep, fen_taddrl, 0);
+
+ W16(ep, fen_maxd1, 1520); /* maximum DMA1 length */
+ W16(ep, fen_maxd2, 1520); /* maximum DMA2 length */
+
+ /* Clear stat counters, in case we ever enable RMON */
+ W32(ep, fen_octc, 0);
+ W32(ep, fen_colc, 0);
+ W32(ep, fen_broc, 0);
+ W32(ep, fen_mulc, 0);
+ W32(ep, fen_uspc, 0);
+ W32(ep, fen_frgc, 0);
+ W32(ep, fen_ospc, 0);
+ W32(ep, fen_jbrc, 0);
+ W32(ep, fen_p64c, 0);
+ W32(ep, fen_p65c, 0);
+ W32(ep, fen_p128c, 0);
+ W32(ep, fen_p256c, 0);
+ W32(ep, fen_p512c, 0);
+ W32(ep, fen_p1024c, 0);
+
+ W16(ep, fen_rfthr, 0); /* Suggested by manual */
+ W16(ep, fen_rfcnt, 0);
+ W16(ep, fen_cftype, 0);
+
+ fs_init_bds(dev);
+
+ /* adjust to speed (for RMII mode) */
+ if (fpi->use_rmii) {
+ if (fep->phydev->speed == 100)
+ C8(fcccp, fcc_gfemr, 0x20);
+ else
+ S8(fcccp, fcc_gfemr, 0x20);
+ }
+
+ fcc_cr_cmd(fep, CPM_CR_INIT_TRX);
+
+ /* clear events */
+ W16(fccp, fcc_fcce, 0xffff);
+
+ /* Enable interrupts we wish to service */
+ W16(fccp, fcc_fccm, FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB);
+
+ /* Set GFMR to enable Ethernet operating mode */
+ W32(fccp, fcc_gfmr, FCC_GFMR_TCI | FCC_GFMR_MODE_ENET);
+
+ /* set sync/delimiters */
+ W16(fccp, fcc_fdsr, 0xd555);
+
+ W32(fccp, fcc_fpsmr, FCC_PSMR_ENCRC);
+
+ if (fpi->use_rmii)
+ S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
+
+ /* adjust to duplex mode */
+ if (fep->phydev->duplex)
+ S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
+ else
+ C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
+
+ /* Restore multicast and promiscuous settings */
+ set_multicast_list(dev);
+
+ S32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
+}
+
+static void stop(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ /* stop ethernet */
+ C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
+
+ /* clear events */
+ W16(fccp, fcc_fcce, 0xffff);
+
+ /* clear interrupt mask */
+ W16(fccp, fcc_fccm, 0);
+
+ fs_cleanup_bds(dev);
+}
+
+static void napi_clear_rx_event(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK);
+}
+
+static void napi_enable_rx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
+}
+
+static void napi_disable_rx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
+}
+
+static void rx_bd_done(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static void tx_kickstart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ S16(fccp, fcc_ftodr, 0x8000);
+}
+
+static u32 get_int_events(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ return (u32)R16(fccp, fcc_fcce);
+}
+
+static void clear_int_events(struct net_device *dev, u32 int_events)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ W16(fccp, fcc_fcce, int_events & 0xffff);
+}
+
+static void ev_error(struct net_device *dev, u32 int_events)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ dev_warn(fep->dev, "FS_ENET ERROR(s) 0x%x\n", int_events);
+}
+
+static int get_regs(struct net_device *dev, void *p, int *sizep)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (*sizep < sizeof(fcc_t) + sizeof(fcc_enet_t) + 1)
+ return -EINVAL;
+
+ memcpy_fromio(p, fep->fcc.fccp, sizeof(fcc_t));
+ p = (char *)p + sizeof(fcc_t);
+
+ memcpy_fromio(p, fep->fcc.ep, sizeof(fcc_enet_t));
+ p = (char *)p + sizeof(fcc_enet_t);
+
+ memcpy_fromio(p, fep->fcc.fcccp, 1);
+ return 0;
+}
+
+static int get_regs_len(struct net_device *dev)
+{
+ return sizeof(fcc_t) + sizeof(fcc_enet_t) + 1;
+}
+
+/* Some transmit errors cause the transmitter to shut
+ * down. We now issue a restart transmit.
+ * Also, to workaround 8260 device erratum CPM37, we must
+ * disable and then re-enable the transmitterfollowing a
+ * Late Collision, Underrun, or Retry Limit error.
+ * In addition, tbptr may point beyond BDs beyond still marked
+ * as ready due to internal pipelining, so we need to look back
+ * through the BDs and adjust tbptr to point to the last BD
+ * marked as ready. This may result in some buffers being
+ * retransmitted.
+ */
+static void tx_restart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+ const struct fs_platform_info *fpi = fep->fpi;
+ fcc_enet_t __iomem *ep = fep->fcc.ep;
+ cbd_t __iomem *curr_tbptr;
+ cbd_t __iomem *recheck_bd;
+ cbd_t __iomem *prev_bd;
+ cbd_t __iomem *last_tx_bd;
+
+ last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t));
+
+ /* get the current bd held in TBPTR and scan back from this point */
+ recheck_bd = curr_tbptr = (cbd_t __iomem *)
+ ((R32(ep, fen_genfcc.fcc_tbptr) - fep->ring_mem_addr) +
+ fep->ring_base);
+
+ prev_bd = (recheck_bd == fep->tx_bd_base) ? last_tx_bd : recheck_bd - 1;
+
+ /* Move through the bds in reverse, look for the earliest buffer
+ * that is not ready. Adjust TBPTR to the following buffer */
+ while ((CBDR_SC(prev_bd) & BD_ENET_TX_READY) != 0) {
+ /* Go back one buffer */
+ recheck_bd = prev_bd;
+
+ /* update the previous buffer */
+ prev_bd = (prev_bd == fep->tx_bd_base) ? last_tx_bd : prev_bd - 1;
+
+ /* We should never see all bds marked as ready, check anyway */
+ if (recheck_bd == curr_tbptr)
+ break;
+ }
+ /* Now update the TBPTR and dirty flag to the current buffer */
+ W32(ep, fen_genfcc.fcc_tbptr,
+ (uint) (((void *)recheck_bd - fep->ring_base) +
+ fep->ring_mem_addr));
+ fep->dirty_tx = recheck_bd;
+
+ C32(fccp, fcc_gfmr, FCC_GFMR_ENT);
+ udelay(10);
+ S32(fccp, fcc_gfmr, FCC_GFMR_ENT);
+
+ fcc_cr_cmd(fep, CPM_CR_RESTART_TX);
+}
+
+/*************************************************************************/
+
+const struct fs_ops fs_fcc_ops = {
+ .setup_data = setup_data,
+ .cleanup_data = cleanup_data,
+ .set_multicast_list = set_multicast_list,
+ .restart = restart,
+ .stop = stop,
+ .napi_clear_rx_event = napi_clear_rx_event,
+ .napi_enable_rx = napi_enable_rx,
+ .napi_disable_rx = napi_disable_rx,
+ .rx_bd_done = rx_bd_done,
+ .tx_kickstart = tx_kickstart,
+ .get_int_events = get_int_events,
+ .clear_int_events = clear_int_events,
+ .ev_error = ev_error,
+ .get_regs = get_regs,
+ .get_regs_len = get_regs_len,
+ .tx_restart = tx_restart,
+ .allocate_bd = allocate_bd,
+ .free_bd = free_bd,
+};
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
new file mode 100644
index 000000000000..b9fbc83d64a7
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -0,0 +1,498 @@
+/*
+ * Freescale Ethernet controllers
+ *
+ * Copyright (c) 2005 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/gfp.h>
+
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_8xx
+#include <asm/8xx_immap.h>
+#include <asm/pgtable.h>
+#include <asm/mpc8xx.h>
+#include <asm/cpm1.h>
+#endif
+
+#include "fs_enet.h"
+#include "fec.h"
+
+/*************************************************/
+
+#if defined(CONFIG_CPM1)
+/* for a CPM1 __raw_xxx's are sufficient */
+#define __fs_out32(addr, x) __raw_writel(x, addr)
+#define __fs_out16(addr, x) __raw_writew(x, addr)
+#define __fs_in32(addr) __raw_readl(addr)
+#define __fs_in16(addr) __raw_readw(addr)
+#else
+/* for others play it safe */
+#define __fs_out32(addr, x) out_be32(addr, x)
+#define __fs_out16(addr, x) out_be16(addr, x)
+#define __fs_in32(addr) in_be32(addr)
+#define __fs_in16(addr) in_be16(addr)
+#endif
+
+/* write */
+#define FW(_fecp, _reg, _v) __fs_out32(&(_fecp)->fec_ ## _reg, (_v))
+
+/* read */
+#define FR(_fecp, _reg) __fs_in32(&(_fecp)->fec_ ## _reg)
+
+/* set bits */
+#define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v))
+
+/* clear bits */
+#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
+
+/*
+ * Delay to wait for FEC reset command to complete (in us)
+ */
+#define FEC_RESET_DELAY 50
+
+static int whack_reset(struct fec __iomem *fecp)
+{
+ int i;
+
+ FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET);
+ for (i = 0; i < FEC_RESET_DELAY; i++) {
+ if ((FR(fecp, ecntrl) & FEC_ECNTRL_RESET) == 0)
+ return 0; /* OK */
+ udelay(1);
+ }
+
+ return -1;
+}
+
+static int do_pd_setup(struct fs_enet_private *fep)
+{
+ struct platform_device *ofdev = to_platform_device(fep->dev);
+
+ fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
+ if (fep->interrupt == NO_IRQ)
+ return -EINVAL;
+
+ fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
+ if (!fep->fcc.fccp)
+ return -EINVAL;
+
+ return 0;
+}
+
+#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
+#define FEC_RX_EVENT (FEC_ENET_RXF)
+#define FEC_TX_EVENT (FEC_ENET_TXF)
+#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
+ FEC_ENET_BABT | FEC_ENET_EBERR)
+
+static int setup_data(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (do_pd_setup(fep) != 0)
+ return -EINVAL;
+
+ fep->fec.hthi = 0;
+ fep->fec.htlo = 0;
+
+ fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK;
+ fep->ev_rx = FEC_RX_EVENT;
+ fep->ev_tx = FEC_TX_EVENT;
+ fep->ev_err = FEC_ERR_EVENT_MSK;
+
+ return 0;
+}
+
+static int allocate_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ fep->ring_base = (void __force __iomem *)dma_alloc_coherent(fep->dev,
+ (fpi->tx_ring + fpi->rx_ring) *
+ sizeof(cbd_t), &fep->ring_mem_addr,
+ GFP_KERNEL);
+ if (fep->ring_base == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void free_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ if(fep->ring_base)
+ dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring)
+ * sizeof(cbd_t),
+ (void __force *)fep->ring_base,
+ fep->ring_mem_addr);
+}
+
+static void cleanup_data(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static void set_promiscuous_mode(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FS(fecp, r_cntrl, FEC_RCNTRL_PROM);
+}
+
+static void set_multicast_start(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ fep->fec.hthi = 0;
+ fep->fec.htlo = 0;
+}
+
+static void set_multicast_one(struct net_device *dev, const u8 *mac)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ int temp, hash_index, i, j;
+ u32 crc, csrVal;
+ u8 byte, msb;
+
+ crc = 0xffffffff;
+ for (i = 0; i < 6; i++) {
+ byte = mac[i];
+ for (j = 0; j < 8; j++) {
+ msb = crc >> 31;
+ crc <<= 1;
+ if (msb ^ (byte & 0x1))
+ crc ^= FEC_CRC_POLY;
+ byte >>= 1;
+ }
+ }
+
+ temp = (crc & 0x3f) >> 1;
+ hash_index = ((temp & 0x01) << 4) |
+ ((temp & 0x02) << 2) |
+ ((temp & 0x04)) |
+ ((temp & 0x08) >> 2) |
+ ((temp & 0x10) >> 4);
+ csrVal = 1 << hash_index;
+ if (crc & 1)
+ fep->fec.hthi |= csrVal;
+ else
+ fep->fec.htlo |= csrVal;
+}
+
+static void set_multicast_finish(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ /* if all multi or too many multicasts; just enable all */
+ if ((dev->flags & IFF_ALLMULTI) != 0 ||
+ netdev_mc_count(dev) > FEC_MAX_MULTICAST_ADDRS) {
+ fep->fec.hthi = 0xffffffffU;
+ fep->fec.htlo = 0xffffffffU;
+ }
+
+ FC(fecp, r_cntrl, FEC_RCNTRL_PROM);
+ FW(fecp, grp_hash_table_high, fep->fec.hthi);
+ FW(fecp, grp_hash_table_low, fep->fec.htlo);
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+
+ if ((dev->flags & IFF_PROMISC) == 0) {
+ set_multicast_start(dev);
+ netdev_for_each_mc_addr(ha, dev)
+ set_multicast_one(dev, ha->addr);
+ set_multicast_finish(dev);
+ } else
+ set_promiscuous_mode(dev);
+}
+
+static void restart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+ const struct fs_platform_info *fpi = fep->fpi;
+ dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
+ int r;
+ u32 addrhi, addrlo;
+
+ struct mii_bus* mii = fep->phydev->bus;
+ struct fec_info* fec_inf = mii->priv;
+
+ r = whack_reset(fep->fec.fecp);
+ if (r != 0)
+ dev_err(fep->dev, "FEC Reset FAILED!\n");
+ /*
+ * Set station address.
+ */
+ addrhi = ((u32) dev->dev_addr[0] << 24) |
+ ((u32) dev->dev_addr[1] << 16) |
+ ((u32) dev->dev_addr[2] << 8) |
+ (u32) dev->dev_addr[3];
+ addrlo = ((u32) dev->dev_addr[4] << 24) |
+ ((u32) dev->dev_addr[5] << 16);
+ FW(fecp, addr_low, addrhi);
+ FW(fecp, addr_high, addrlo);
+
+ /*
+ * Reset all multicast.
+ */
+ FW(fecp, grp_hash_table_high, fep->fec.hthi);
+ FW(fecp, grp_hash_table_low, fep->fec.htlo);
+
+ /*
+ * Set maximum receive buffer size.
+ */
+ FW(fecp, r_buff_size, PKT_MAXBLR_SIZE);
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+ FW(fecp, r_cntrl, PKT_MAXBUF_SIZE << 16);
+#else
+ FW(fecp, r_hash, PKT_MAXBUF_SIZE);
+#endif
+
+ /* get physical address */
+ rx_bd_base_phys = fep->ring_mem_addr;
+ tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
+
+ /*
+ * Set receive and transmit descriptor base.
+ */
+ FW(fecp, r_des_start, rx_bd_base_phys);
+ FW(fecp, x_des_start, tx_bd_base_phys);
+
+ fs_init_bds(dev);
+
+ /*
+ * Enable big endian and don't care about SDMA FC.
+ */
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+ FS(fecp, dma_control, 0xC0000000);
+#else
+ FW(fecp, fun_code, 0x78000000);
+#endif
+
+ /*
+ * Set MII speed.
+ */
+ FW(fecp, mii_speed, fec_inf->mii_speed);
+
+ /*
+ * Clear any outstanding interrupt.
+ */
+ FW(fecp, ievent, 0xffc0);
+#ifndef CONFIG_FS_ENET_MPC5121_FEC
+ FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29);
+
+ FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
+#else
+ /*
+ * Only set MII mode - do not touch maximum frame length
+ * configured before.
+ */
+ FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE);
+#endif
+ /*
+ * adjust to duplex mode
+ */
+ if (fep->phydev->duplex) {
+ FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
+ FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
+ } else {
+ FS(fecp, r_cntrl, FEC_RCNTRL_DRT);
+ FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */
+ }
+
+ /*
+ * Enable interrupts we wish to service.
+ */
+ FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB |
+ FEC_ENET_RXF | FEC_ENET_RXB);
+
+ /*
+ * And last, enable the transmit and receive processing.
+ */
+ FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
+ FW(fecp, r_des_active, 0x01000000);
+}
+
+static void stop(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ struct fec_info* feci= fep->phydev->bus->priv;
+
+ int i;
+
+ if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
+ return; /* already down */
+
+ FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */
+ for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) &&
+ i < FEC_RESET_DELAY; i++)
+ udelay(1);
+
+ if (i == FEC_RESET_DELAY)
+ dev_warn(fep->dev, "FEC timeout on graceful transmit stop\n");
+ /*
+ * Disable FEC. Let only MII interrupts.
+ */
+ FW(fecp, imask, 0);
+ FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN);
+
+ fs_cleanup_bds(dev);
+
+ /* shut down FEC1? that's where the mii bus is */
+ if (fpi->has_phy) {
+ FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
+ FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
+ FW(fecp, ievent, FEC_ENET_MII);
+ FW(fecp, mii_speed, feci->mii_speed);
+ }
+}
+
+static void napi_clear_rx_event(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK);
+}
+
+static void napi_enable_rx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
+}
+
+static void napi_disable_rx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
+}
+
+static void rx_bd_done(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FW(fecp, r_des_active, 0x01000000);
+}
+
+static void tx_kickstart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FW(fecp, x_des_active, 0x01000000);
+}
+
+static u32 get_int_events(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ return FR(fecp, ievent) & FR(fecp, imask);
+}
+
+static void clear_int_events(struct net_device *dev, u32 int_events)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FW(fecp, ievent, int_events);
+}
+
+static void ev_error(struct net_device *dev, u32 int_events)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ dev_warn(fep->dev, "FEC ERROR(s) 0x%x\n", int_events);
+}
+
+static int get_regs(struct net_device *dev, void *p, int *sizep)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (*sizep < sizeof(struct fec))
+ return -EINVAL;
+
+ memcpy_fromio(p, fep->fec.fecp, sizeof(struct fec));
+
+ return 0;
+}
+
+static int get_regs_len(struct net_device *dev)
+{
+ return sizeof(struct fec);
+}
+
+static void tx_restart(struct net_device *dev)
+{
+ /* nothing */
+}
+
+/*************************************************************************/
+
+const struct fs_ops fs_fec_ops = {
+ .setup_data = setup_data,
+ .cleanup_data = cleanup_data,
+ .set_multicast_list = set_multicast_list,
+ .restart = restart,
+ .stop = stop,
+ .napi_clear_rx_event = napi_clear_rx_event,
+ .napi_enable_rx = napi_enable_rx,
+ .napi_disable_rx = napi_disable_rx,
+ .rx_bd_done = rx_bd_done,
+ .tx_kickstart = tx_kickstart,
+ .get_int_events = get_int_events,
+ .clear_int_events = clear_int_events,
+ .ev_error = ev_error,
+ .get_regs = get_regs,
+ .get_regs_len = get_regs_len,
+ .tx_restart = tx_restart,
+ .allocate_bd = allocate_bd,
+ .free_bd = free_bd,
+};
+
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
new file mode 100644
index 000000000000..22a02a767069
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -0,0 +1,484 @@
+/*
+ * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_8xx
+#include <asm/8xx_immap.h>
+#include <asm/pgtable.h>
+#include <asm/mpc8xx.h>
+#include <asm/cpm1.h>
+#endif
+
+#include "fs_enet.h"
+
+/*************************************************/
+#if defined(CONFIG_CPM1)
+/* for a 8xx __raw_xxx's are sufficient */
+#define __fs_out32(addr, x) __raw_writel(x, addr)
+#define __fs_out16(addr, x) __raw_writew(x, addr)
+#define __fs_out8(addr, x) __raw_writeb(x, addr)
+#define __fs_in32(addr) __raw_readl(addr)
+#define __fs_in16(addr) __raw_readw(addr)
+#define __fs_in8(addr) __raw_readb(addr)
+#else
+/* for others play it safe */
+#define __fs_out32(addr, x) out_be32(addr, x)
+#define __fs_out16(addr, x) out_be16(addr, x)
+#define __fs_in32(addr) in_be32(addr)
+#define __fs_in16(addr) in_be16(addr)
+#define __fs_out8(addr, x) out_8(addr, x)
+#define __fs_in8(addr) in_8(addr)
+#endif
+
+/* write, read, set bits, clear bits */
+#define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v))
+#define R32(_p, _m) __fs_in32(&(_p)->_m)
+#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
+#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
+
+#define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v))
+#define R16(_p, _m) __fs_in16(&(_p)->_m)
+#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
+#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
+
+#define W8(_p, _m, _v) __fs_out8(&(_p)->_m, (_v))
+#define R8(_p, _m) __fs_in8(&(_p)->_m)
+#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
+#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
+
+#define SCC_MAX_MULTICAST_ADDRS 64
+
+/*
+ * Delay to wait for SCC reset command to complete (in us)
+ */
+#define SCC_RESET_DELAY 50
+
+static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
+{
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ return cpm_command(fpi->cp_command, op);
+}
+
+static int do_pd_setup(struct fs_enet_private *fep)
+{
+ struct platform_device *ofdev = to_platform_device(fep->dev);
+
+ fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
+ if (fep->interrupt == NO_IRQ)
+ return -EINVAL;
+
+ fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
+ if (!fep->scc.sccp)
+ return -EINVAL;
+
+ fep->scc.ep = of_iomap(ofdev->dev.of_node, 1);
+ if (!fep->scc.ep) {
+ iounmap(fep->scc.sccp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB)
+#define SCC_RX_EVENT (SCCE_ENET_RXF)
+#define SCC_TX_EVENT (SCCE_ENET_TXB)
+#define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
+
+static int setup_data(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ do_pd_setup(fep);
+
+ fep->scc.hthi = 0;
+ fep->scc.htlo = 0;
+
+ fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK;
+ fep->ev_rx = SCC_RX_EVENT;
+ fep->ev_tx = SCC_TX_EVENT | SCCE_ENET_TXE;
+ fep->ev_err = SCC_ERR_EVENT_MSK;
+
+ return 0;
+}
+
+static int allocate_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) *
+ sizeof(cbd_t), 8);
+ if (IS_ERR_VALUE(fep->ring_mem_addr))
+ return -ENOMEM;
+
+ fep->ring_base = (void __iomem __force*)
+ cpm_dpram_addr(fep->ring_mem_addr);
+
+ return 0;
+}
+
+static void free_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (fep->ring_base)
+ cpm_dpfree(fep->ring_mem_addr);
+}
+
+static void cleanup_data(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static void set_promiscuous_mode(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ S16(sccp, scc_psmr, SCC_PSMR_PRO);
+}
+
+static void set_multicast_start(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_enet_t __iomem *ep = fep->scc.ep;
+
+ W16(ep, sen_gaddr1, 0);
+ W16(ep, sen_gaddr2, 0);
+ W16(ep, sen_gaddr3, 0);
+ W16(ep, sen_gaddr4, 0);
+}
+
+static void set_multicast_one(struct net_device *dev, const u8 * mac)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_enet_t __iomem *ep = fep->scc.ep;
+ u16 taddrh, taddrm, taddrl;
+
+ taddrh = ((u16) mac[5] << 8) | mac[4];
+ taddrm = ((u16) mac[3] << 8) | mac[2];
+ taddrl = ((u16) mac[1] << 8) | mac[0];
+
+ W16(ep, sen_taddrh, taddrh);
+ W16(ep, sen_taddrm, taddrm);
+ W16(ep, sen_taddrl, taddrl);
+ scc_cr_cmd(fep, CPM_CR_SET_GADDR);
+}
+
+static void set_multicast_finish(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+ scc_enet_t __iomem *ep = fep->scc.ep;
+
+ /* clear promiscuous always */
+ C16(sccp, scc_psmr, SCC_PSMR_PRO);
+
+ /* if all multi or too many multicasts; just enable all */
+ if ((dev->flags & IFF_ALLMULTI) != 0 ||
+ netdev_mc_count(dev) > SCC_MAX_MULTICAST_ADDRS) {
+
+ W16(ep, sen_gaddr1, 0xffff);
+ W16(ep, sen_gaddr2, 0xffff);
+ W16(ep, sen_gaddr3, 0xffff);
+ W16(ep, sen_gaddr4, 0xffff);
+ }
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+
+ if ((dev->flags & IFF_PROMISC) == 0) {
+ set_multicast_start(dev);
+ netdev_for_each_mc_addr(ha, dev)
+ set_multicast_one(dev, ha->addr);
+ set_multicast_finish(dev);
+ } else
+ set_promiscuous_mode(dev);
+}
+
+/*
+ * This function is called to start or restart the FEC during a link
+ * change. This only happens when switching between half and full
+ * duplex.
+ */
+static void restart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+ scc_enet_t __iomem *ep = fep->scc.ep;
+ const struct fs_platform_info *fpi = fep->fpi;
+ u16 paddrh, paddrm, paddrl;
+ const unsigned char *mac;
+ int i;
+
+ C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+
+ /* clear everything (slow & steady does it) */
+ for (i = 0; i < sizeof(*ep); i++)
+ __fs_out8((u8 __iomem *)ep + i, 0);
+
+ /* point to bds */
+ W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr);
+ W16(ep, sen_genscc.scc_tbase,
+ fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring);
+
+ /* Initialize function code registers for big-endian.
+ */
+#ifndef CONFIG_NOT_COHERENT_CACHE
+ W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL);
+ W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL);
+#else
+ W8(ep, sen_genscc.scc_rfcr, SCC_EB);
+ W8(ep, sen_genscc.scc_tfcr, SCC_EB);
+#endif
+
+ /* Set maximum bytes per receive buffer.
+ * This appears to be an Ethernet frame size, not the buffer
+ * fragment size. It must be a multiple of four.
+ */
+ W16(ep, sen_genscc.scc_mrblr, 0x5f0);
+
+ /* Set CRC preset and mask.
+ */
+ W32(ep, sen_cpres, 0xffffffff);
+ W32(ep, sen_cmask, 0xdebb20e3);
+
+ W32(ep, sen_crcec, 0); /* CRC Error counter */
+ W32(ep, sen_alec, 0); /* alignment error counter */
+ W32(ep, sen_disfc, 0); /* discard frame counter */
+
+ W16(ep, sen_pads, 0x8888); /* Tx short frame pad character */
+ W16(ep, sen_retlim, 15); /* Retry limit threshold */
+
+ W16(ep, sen_maxflr, 0x5ee); /* maximum frame length register */
+
+ W16(ep, sen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
+
+ W16(ep, sen_maxd1, 0x000005f0); /* maximum DMA1 length */
+ W16(ep, sen_maxd2, 0x000005f0); /* maximum DMA2 length */
+
+ /* Clear hash tables.
+ */
+ W16(ep, sen_gaddr1, 0);
+ W16(ep, sen_gaddr2, 0);
+ W16(ep, sen_gaddr3, 0);
+ W16(ep, sen_gaddr4, 0);
+ W16(ep, sen_iaddr1, 0);
+ W16(ep, sen_iaddr2, 0);
+ W16(ep, sen_iaddr3, 0);
+ W16(ep, sen_iaddr4, 0);
+
+ /* set address
+ */
+ mac = dev->dev_addr;
+ paddrh = ((u16) mac[5] << 8) | mac[4];
+ paddrm = ((u16) mac[3] << 8) | mac[2];
+ paddrl = ((u16) mac[1] << 8) | mac[0];
+
+ W16(ep, sen_paddrh, paddrh);
+ W16(ep, sen_paddrm, paddrm);
+ W16(ep, sen_paddrl, paddrl);
+
+ W16(ep, sen_pper, 0);
+ W16(ep, sen_taddrl, 0);
+ W16(ep, sen_taddrm, 0);
+ W16(ep, sen_taddrh, 0);
+
+ fs_init_bds(dev);
+
+ scc_cr_cmd(fep, CPM_CR_INIT_TRX);
+
+ W16(sccp, scc_scce, 0xffff);
+
+ /* Enable interrupts we wish to service.
+ */
+ W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
+
+ /* Set GSMR_H to enable all normal operating modes.
+ * Set GSMR_L to enable Ethernet to MC68160.
+ */
+ W32(sccp, scc_gsmrh, 0);
+ W32(sccp, scc_gsmrl,
+ SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 |
+ SCC_GSMRL_MODE_ENET);
+
+ /* Set sync/delimiters.
+ */
+ W16(sccp, scc_dsr, 0xd555);
+
+ /* Set processing mode. Use Ethernet CRC, catch broadcast, and
+ * start frame search 22 bit times after RENA.
+ */
+ W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
+
+ /* Set full duplex mode if needed */
+ if (fep->phydev->duplex)
+ S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
+
+ S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+}
+
+static void stop(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+ int i;
+
+ for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++)
+ udelay(1);
+
+ if (i == SCC_RESET_DELAY)
+ dev_warn(fep->dev, "SCC timeout on graceful transmit stop\n");
+
+ W16(sccp, scc_sccm, 0);
+ C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+
+ fs_cleanup_bds(dev);
+}
+
+static void napi_clear_rx_event(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK);
+}
+
+static void napi_enable_rx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
+}
+
+static void napi_disable_rx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
+}
+
+static void rx_bd_done(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static void tx_kickstart(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static u32 get_int_events(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ return (u32) R16(sccp, scc_scce);
+}
+
+static void clear_int_events(struct net_device *dev, u32 int_events)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ W16(sccp, scc_scce, int_events & 0xffff);
+}
+
+static void ev_error(struct net_device *dev, u32 int_events)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ dev_warn(fep->dev, "SCC ERROR(s) 0x%x\n", int_events);
+}
+
+static int get_regs(struct net_device *dev, void *p, int *sizep)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t __iomem *))
+ return -EINVAL;
+
+ memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t));
+ p = (char *)p + sizeof(scc_t);
+
+ memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t __iomem *));
+
+ return 0;
+}
+
+static int get_regs_len(struct net_device *dev)
+{
+ return sizeof(scc_t) + sizeof(scc_enet_t __iomem *);
+}
+
+static void tx_restart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ scc_cr_cmd(fep, CPM_CR_RESTART_TX);
+}
+
+
+
+/*************************************************************************/
+
+const struct fs_ops fs_scc_ops = {
+ .setup_data = setup_data,
+ .cleanup_data = cleanup_data,
+ .set_multicast_list = set_multicast_list,
+ .restart = restart,
+ .stop = stop,
+ .napi_clear_rx_event = napi_clear_rx_event,
+ .napi_enable_rx = napi_enable_rx,
+ .napi_disable_rx = napi_disable_rx,
+ .rx_bd_done = rx_bd_done,
+ .tx_kickstart = tx_kickstart,
+ .get_int_events = get_int_events,
+ .clear_int_events = clear_int_events,
+ .ev_error = ev_error,
+ .get_regs = get_regs,
+ .get_regs_len = get_regs_len,
+ .tx_restart = tx_restart,
+ .allocate_bd = allocate_bd,
+ .free_bd = free_bd,
+};
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
new file mode 100644
index 000000000000..b09270b5d0a5
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -0,0 +1,246 @@
+/*
+ * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/platform_device.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+
+#include "fs_enet.h"
+
+struct bb_info {
+ struct mdiobb_ctrl ctrl;
+ __be32 __iomem *dir;
+ __be32 __iomem *dat;
+ u32 mdio_msk;
+ u32 mdc_msk;
+};
+
+/* FIXME: If any other users of GPIO crop up, then these will have to
+ * have some sort of global synchronization to avoid races with other
+ * pins on the same port. The ideal solution would probably be to
+ * bind the ports to a GPIO driver, and have this be a client of it.
+ */
+static inline void bb_set(u32 __iomem *p, u32 m)
+{
+ out_be32(p, in_be32(p) | m);
+}
+
+static inline void bb_clr(u32 __iomem *p, u32 m)
+{
+ out_be32(p, in_be32(p) & ~m);
+}
+
+static inline int bb_read(u32 __iomem *p, u32 m)
+{
+ return (in_be32(p) & m) != 0;
+}
+
+static inline void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
+{
+ struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+
+ if (dir)
+ bb_set(bitbang->dir, bitbang->mdio_msk);
+ else
+ bb_clr(bitbang->dir, bitbang->mdio_msk);
+
+ /* Read back to flush the write. */
+ in_be32(bitbang->dir);
+}
+
+static inline int mdio_read(struct mdiobb_ctrl *ctrl)
+{
+ struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+ return bb_read(bitbang->dat, bitbang->mdio_msk);
+}
+
+static inline void mdio(struct mdiobb_ctrl *ctrl, int what)
+{
+ struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+
+ if (what)
+ bb_set(bitbang->dat, bitbang->mdio_msk);
+ else
+ bb_clr(bitbang->dat, bitbang->mdio_msk);
+
+ /* Read back to flush the write. */
+ in_be32(bitbang->dat);
+}
+
+static inline void mdc(struct mdiobb_ctrl *ctrl, int what)
+{
+ struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+
+ if (what)
+ bb_set(bitbang->dat, bitbang->mdc_msk);
+ else
+ bb_clr(bitbang->dat, bitbang->mdc_msk);
+
+ /* Read back to flush the write. */
+ in_be32(bitbang->dat);
+}
+
+static struct mdiobb_ops bb_ops = {
+ .owner = THIS_MODULE,
+ .set_mdc = mdc,
+ .set_mdio_dir = mdio_dir,
+ .set_mdio_data = mdio,
+ .get_mdio_data = mdio_read,
+};
+
+static int __devinit fs_mii_bitbang_init(struct mii_bus *bus,
+ struct device_node *np)
+{
+ struct resource res;
+ const u32 *data;
+ int mdio_pin, mdc_pin, len;
+ struct bb_info *bitbang = bus->priv;
+
+ int ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ return ret;
+
+ if (resource_size(&res) <= 13)
+ return -ENODEV;
+
+ /* This should really encode the pin number as well, but all
+ * we get is an int, and the odds of multiple bitbang mdio buses
+ * is low enough that it's not worth going too crazy.
+ */
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
+
+ data = of_get_property(np, "fsl,mdio-pin", &len);
+ if (!data || len != 4)
+ return -ENODEV;
+ mdio_pin = *data;
+
+ data = of_get_property(np, "fsl,mdc-pin", &len);
+ if (!data || len != 4)
+ return -ENODEV;
+ mdc_pin = *data;
+
+ bitbang->dir = ioremap(res.start, resource_size(&res));
+ if (!bitbang->dir)
+ return -ENOMEM;
+
+ bitbang->dat = bitbang->dir + 4;
+ bitbang->mdio_msk = 1 << (31 - mdio_pin);
+ bitbang->mdc_msk = 1 << (31 - mdc_pin);
+
+ return 0;
+}
+
+static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)
+{
+ struct mii_bus *new_bus;
+ struct bb_info *bitbang;
+ int ret = -ENOMEM;
+
+ bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
+ if (!bitbang)
+ goto out;
+
+ bitbang->ctrl.ops = &bb_ops;
+
+ new_bus = alloc_mdio_bitbang(&bitbang->ctrl);
+ if (!new_bus)
+ goto out_free_priv;
+
+ new_bus->name = "CPM2 Bitbanged MII",
+
+ ret = fs_mii_bitbang_init(new_bus, ofdev->dev.of_node);
+ if (ret)
+ goto out_free_bus;
+
+ new_bus->phy_mask = ~0;
+ new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!new_bus->irq)
+ goto out_unmap_regs;
+
+ new_bus->parent = &ofdev->dev;
+ dev_set_drvdata(&ofdev->dev, new_bus);
+
+ ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
+ if (ret)
+ goto out_free_irqs;
+
+ return 0;
+
+out_free_irqs:
+ dev_set_drvdata(&ofdev->dev, NULL);
+ kfree(new_bus->irq);
+out_unmap_regs:
+ iounmap(bitbang->dir);
+out_free_bus:
+ free_mdio_bitbang(new_bus);
+out_free_priv:
+ kfree(bitbang);
+out:
+ return ret;
+}
+
+static int fs_enet_mdio_remove(struct platform_device *ofdev)
+{
+ struct mii_bus *bus = dev_get_drvdata(&ofdev->dev);
+ struct bb_info *bitbang = bus->priv;
+
+ mdiobus_unregister(bus);
+ dev_set_drvdata(&ofdev->dev, NULL);
+ kfree(bus->irq);
+ free_mdio_bitbang(bus);
+ iounmap(bitbang->dir);
+ kfree(bitbang);
+
+ return 0;
+}
+
+static struct of_device_id fs_enet_mdio_bb_match[] = {
+ {
+ .compatible = "fsl,cpm2-mdio-bitbang",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match);
+
+static struct platform_driver fs_enet_bb_mdio_driver = {
+ .driver = {
+ .name = "fsl-bb-mdio",
+ .owner = THIS_MODULE,
+ .of_match_table = fs_enet_mdio_bb_match,
+ },
+ .probe = fs_enet_mdio_probe,
+ .remove = fs_enet_mdio_remove,
+};
+
+static int fs_enet_mdio_bb_init(void)
+{
+ return platform_driver_register(&fs_enet_bb_mdio_driver);
+}
+
+static void fs_enet_mdio_bb_exit(void)
+{
+ platform_driver_unregister(&fs_enet_bb_mdio_driver);
+}
+
+module_init(fs_enet_mdio_bb_init);
+module_exit(fs_enet_mdio_bb_exit);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
new file mode 100644
index 000000000000..e0e9d6c35d83
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -0,0 +1,251 @@
+/*
+ * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/mpc5xxx.h>
+
+#include "fs_enet.h"
+#include "fec.h"
+
+/* Make MII read/write commands for the FEC.
+*/
+#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
+#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
+#define mk_mii_end 0
+
+#define FEC_MII_LOOPS 10000
+
+static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location)
+{
+ struct fec_info* fec = bus->priv;
+ struct fec __iomem *fecp = fec->fecp;
+ int i, ret = -1;
+
+ BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0);
+
+ /* Add PHY address to register command. */
+ out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location));
+
+ for (i = 0; i < FEC_MII_LOOPS; i++)
+ if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
+ break;
+
+ if (i < FEC_MII_LOOPS) {
+ out_be32(&fecp->fec_ievent, FEC_ENET_MII);
+ ret = in_be32(&fecp->fec_mii_data) & 0xffff;
+ }
+
+ return ret;
+}
+
+static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val)
+{
+ struct fec_info* fec = bus->priv;
+ struct fec __iomem *fecp = fec->fecp;
+ int i;
+
+ /* this must never happen */
+ BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0);
+
+ /* Add PHY address to register command. */
+ out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val));
+
+ for (i = 0; i < FEC_MII_LOOPS; i++)
+ if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
+ break;
+
+ if (i < FEC_MII_LOOPS)
+ out_be32(&fecp->fec_ievent, FEC_ENET_MII);
+
+ return 0;
+
+}
+
+static int fs_enet_fec_mii_reset(struct mii_bus *bus)
+{
+ /* nothing here - for now */
+ return 0;
+}
+
+static struct of_device_id fs_enet_mdio_fec_match[];
+static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)
+{
+ const struct of_device_id *match;
+ struct resource res;
+ struct mii_bus *new_bus;
+ struct fec_info *fec;
+ int (*get_bus_freq)(struct device_node *);
+ int ret = -ENOMEM, clock, speed;
+
+ match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev);
+ if (!match)
+ return -EINVAL;
+ get_bus_freq = match->data;
+
+ new_bus = mdiobus_alloc();
+ if (!new_bus)
+ goto out;
+
+ fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL);
+ if (!fec)
+ goto out_mii;
+
+ new_bus->priv = fec;
+ new_bus->name = "FEC MII Bus";
+ new_bus->read = &fs_enet_fec_mii_read;
+ new_bus->write = &fs_enet_fec_mii_write;
+ new_bus->reset = &fs_enet_fec_mii_reset;
+
+ ret = of_address_to_resource(ofdev->dev.of_node, 0, &res);
+ if (ret)
+ goto out_res;
+
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start);
+
+ fec->fecp = ioremap(res.start, resource_size(&res));
+ if (!fec->fecp)
+ goto out_fec;
+
+ if (get_bus_freq) {
+ clock = get_bus_freq(ofdev->dev.of_node);
+ if (!clock) {
+ /* Use maximum divider if clock is unknown */
+ dev_warn(&ofdev->dev, "could not determine IPS clock\n");
+ clock = 0x3F * 5000000;
+ }
+ } else
+ clock = ppc_proc_freq;
+
+ /*
+ * Scale for a MII clock <= 2.5 MHz
+ * Note that only 6 bits (25:30) are available for MII speed.
+ */
+ speed = (clock + 4999999) / 5000000;
+ if (speed > 0x3F) {
+ speed = 0x3F;
+ dev_err(&ofdev->dev,
+ "MII clock (%d Hz) exceeds max (2.5 MHz)\n",
+ clock / speed);
+ }
+
+ fec->mii_speed = speed << 1;
+
+ setbits32(&fec->fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE);
+ setbits32(&fec->fecp->fec_ecntrl, FEC_ECNTRL_PINMUX |
+ FEC_ECNTRL_ETHER_EN);
+ out_be32(&fec->fecp->fec_ievent, FEC_ENET_MII);
+ clrsetbits_be32(&fec->fecp->fec_mii_speed, 0x7E, fec->mii_speed);
+
+ new_bus->phy_mask = ~0;
+ new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!new_bus->irq)
+ goto out_unmap_regs;
+
+ new_bus->parent = &ofdev->dev;
+ dev_set_drvdata(&ofdev->dev, new_bus);
+
+ ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
+ if (ret)
+ goto out_free_irqs;
+
+ return 0;
+
+out_free_irqs:
+ dev_set_drvdata(&ofdev->dev, NULL);
+ kfree(new_bus->irq);
+out_unmap_regs:
+ iounmap(fec->fecp);
+out_res:
+out_fec:
+ kfree(fec);
+out_mii:
+ mdiobus_free(new_bus);
+out:
+ return ret;
+}
+
+static int fs_enet_mdio_remove(struct platform_device *ofdev)
+{
+ struct mii_bus *bus = dev_get_drvdata(&ofdev->dev);
+ struct fec_info *fec = bus->priv;
+
+ mdiobus_unregister(bus);
+ dev_set_drvdata(&ofdev->dev, NULL);
+ kfree(bus->irq);
+ iounmap(fec->fecp);
+ kfree(fec);
+ mdiobus_free(bus);
+
+ return 0;
+}
+
+static struct of_device_id fs_enet_mdio_fec_match[] = {
+ {
+ .compatible = "fsl,pq1-fec-mdio",
+ },
+#if defined(CONFIG_PPC_MPC512x)
+ {
+ .compatible = "fsl,mpc5121-fec-mdio",
+ .data = mpc5xxx_get_bus_frequency,
+ },
+#endif
+ {},
+};
+MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match);
+
+static struct platform_driver fs_enet_fec_mdio_driver = {
+ .driver = {
+ .name = "fsl-fec-mdio",
+ .owner = THIS_MODULE,
+ .of_match_table = fs_enet_mdio_fec_match,
+ },
+ .probe = fs_enet_mdio_probe,
+ .remove = fs_enet_mdio_remove,
+};
+
+static int fs_enet_mdio_fec_init(void)
+{
+ return platform_driver_register(&fs_enet_fec_mdio_driver);
+}
+
+static void fs_enet_mdio_fec_exit(void)
+{
+ platform_driver_unregister(&fs_enet_fec_mdio_driver);
+}
+
+module_init(fs_enet_mdio_fec_init);
+module_exit(fs_enet_mdio_fec_exit);
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
new file mode 100644
index 000000000000..52f4e8ad48e7
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -0,0 +1,494 @@
+/*
+ * Freescale PowerQUICC Ethernet Driver -- MIIM bus implementation
+ * Provides Bus interface for MIIM regs
+ *
+ * Author: Andy Fleming <afleming@freescale.com>
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
+ *
+ * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
+ *
+ * Based on gianfar_mii.c and ucc_geth_mii.c (Li Yang, Kim Phillips)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/ucc.h>
+
+#include "gianfar.h"
+#include "fsl_pq_mdio.h"
+
+struct fsl_pq_mdio_priv {
+ void __iomem *map;
+ struct fsl_pq_mdio __iomem *regs;
+};
+
+/*
+ * Write value to the PHY at mii_id at register regnum,
+ * on the bus attached to the local interface, which may be different from the
+ * generic mdio bus (tied to a single interface), waiting until the write is
+ * done before returning. This is helpful in programming interfaces like
+ * the TBI which control interfaces like onchip SERDES and are always tied to
+ * the local mdio pins, which may not be the same as system mdio bus, used for
+ * controlling the external PHYs, for example.
+ */
+int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
+ int regnum, u16 value)
+{
+ /* Set the PHY address and the register address we want to write */
+ out_be32(&regs->miimadd, (mii_id << 8) | regnum);
+
+ /* Write out the value we want */
+ out_be32(&regs->miimcon, value);
+
+ /* Wait for the transaction to finish */
+ while (in_be32(&regs->miimind) & MIIMIND_BUSY)
+ cpu_relax();
+
+ return 0;
+}
+
+/*
+ * Read the bus for PHY at addr mii_id, register regnum, and
+ * return the value. Clears miimcom first. All PHY operation
+ * done on the bus attached to the local interface,
+ * which may be different from the generic mdio bus
+ * This is helpful in programming interfaces like
+ * the TBI which, in turn, control interfaces like onchip SERDES
+ * and are always tied to the local mdio pins, which may not be the
+ * same as system mdio bus, used for controlling the external PHYs, for eg.
+ */
+int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
+ int mii_id, int regnum)
+{
+ u16 value;
+
+ /* Set the PHY address and the register address we want to read */
+ out_be32(&regs->miimadd, (mii_id << 8) | regnum);
+
+ /* Clear miimcom, and then initiate a read */
+ out_be32(&regs->miimcom, 0);
+ out_be32(&regs->miimcom, MII_READ_COMMAND);
+
+ /* Wait for the transaction to finish */
+ while (in_be32(&regs->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY))
+ cpu_relax();
+
+ /* Grab the value of the register from miimstat */
+ value = in_be32(&regs->miimstat);
+
+ return value;
+}
+
+static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus)
+{
+ struct fsl_pq_mdio_priv *priv = bus->priv;
+
+ return priv->regs;
+}
+
+/*
+ * Write value to the PHY at mii_id at register regnum,
+ * on the bus, waiting until the write is done before returning.
+ */
+int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
+{
+ struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
+
+ /* Write to the local MII regs */
+ return fsl_pq_local_mdio_write(regs, mii_id, regnum, value);
+}
+
+/*
+ * Read the bus for PHY at addr mii_id, register regnum, and
+ * return the value. Clears miimcom first.
+ */
+int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
+
+ /* Read the local MII regs */
+ return fsl_pq_local_mdio_read(regs, mii_id, regnum);
+}
+
+/* Reset the MIIM registers, and wait for the bus to free */
+static int fsl_pq_mdio_reset(struct mii_bus *bus)
+{
+ struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
+ int timeout = PHY_INIT_TIMEOUT;
+
+ mutex_lock(&bus->mdio_lock);
+
+ /* Reset the management interface */
+ out_be32(&regs->miimcfg, MIIMCFG_RESET);
+
+ /* Setup the MII Mgmt clock speed */
+ out_be32(&regs->miimcfg, MIIMCFG_INIT_VALUE);
+
+ /* Wait until the bus is free */
+ while ((in_be32(&regs->miimind) & MIIMIND_BUSY) && timeout--)
+ cpu_relax();
+
+ mutex_unlock(&bus->mdio_lock);
+
+ if (timeout < 0) {
+ printk(KERN_ERR "%s: The MII Bus is stuck!\n",
+ bus->name);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
+{
+ const u32 *addr;
+ u64 taddr = OF_BAD_ADDR;
+
+ addr = of_get_address(np, 0, NULL, NULL);
+ if (addr)
+ taddr = of_translate_address(np, addr);
+
+ snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name,
+ (unsigned long long)taddr);
+}
+EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
+
+/* Scan the bus in reverse, looking for an empty spot */
+static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
+{
+ int i;
+
+ for (i = PHY_MAX_ADDR; i > 0; i--) {
+ u32 phy_id;
+
+ if (get_phy_id(new_bus, i, &phy_id))
+ return -1;
+
+ if (phy_id == 0xffffffff)
+ break;
+ }
+
+ return i;
+}
+
+
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
+{
+ struct gfar __iomem *enet_regs;
+
+ /*
+ * This is mildly evil, but so is our hardware for doing this.
+ * Also, we have to cast back to struct gfar because of
+ * definition weirdness done in gianfar.h.
+ */
+ if(of_device_is_compatible(np, "fsl,gianfar-mdio") ||
+ of_device_is_compatible(np, "fsl,gianfar-tbi") ||
+ of_device_is_compatible(np, "gianfar")) {
+ enet_regs = (struct gfar __iomem *)regs;
+ return &enet_regs->tbipa;
+ } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
+ of_device_is_compatible(np, "fsl,etsec2-tbi")) {
+ return of_iomap(np, 1);
+ } else
+ return NULL;
+}
+#endif
+
+
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
+{
+ struct device_node *np = NULL;
+ int err = 0;
+
+ for_each_compatible_node(np, NULL, "ucc_geth") {
+ struct resource tempres;
+
+ err = of_address_to_resource(np, 0, &tempres);
+ if (err)
+ continue;
+
+ /* if our mdio regs fall within this UCC regs range */
+ if ((start >= tempres.start) && (end <= tempres.end)) {
+ /* Find the id of the UCC */
+ const u32 *id;
+
+ id = of_get_property(np, "cell-index", NULL);
+ if (!id) {
+ id = of_get_property(np, "device-id", NULL);
+ if (!id)
+ continue;
+ }
+
+ *ucc_id = *id;
+
+ return 0;
+ }
+ }
+
+ if (err)
+ return err;
+ else
+ return -EINVAL;
+}
+#endif
+
+
+static int fsl_pq_mdio_probe(struct platform_device *ofdev)
+{
+ struct device_node *np = ofdev->dev.of_node;
+ struct device_node *tbi;
+ struct fsl_pq_mdio_priv *priv;
+ struct fsl_pq_mdio __iomem *regs = NULL;
+ void __iomem *map;
+ u32 __iomem *tbipa;
+ struct mii_bus *new_bus;
+ int tbiaddr = -1;
+ const u32 *addrp;
+ u64 addr = 0, size = 0;
+ int err;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ new_bus = mdiobus_alloc();
+ if (!new_bus) {
+ err = -ENOMEM;
+ goto err_free_priv;
+ }
+
+ new_bus->name = "Freescale PowerQUICC MII Bus",
+ new_bus->read = &fsl_pq_mdio_read,
+ new_bus->write = &fsl_pq_mdio_write,
+ new_bus->reset = &fsl_pq_mdio_reset,
+ new_bus->priv = priv;
+ fsl_pq_mdio_bus_name(new_bus->id, np);
+
+ addrp = of_get_address(np, 0, &size, NULL);
+ if (!addrp) {
+ err = -EINVAL;
+ goto err_free_bus;
+ }
+
+ /* Set the PHY base address */
+ addr = of_translate_address(np, addrp);
+ if (addr == OF_BAD_ADDR) {
+ err = -EINVAL;
+ goto err_free_bus;
+ }
+
+ map = ioremap(addr, size);
+ if (!map) {
+ err = -ENOMEM;
+ goto err_free_bus;
+ }
+ priv->map = map;
+
+ if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
+ of_device_is_compatible(np, "fsl,gianfar-tbi") ||
+ of_device_is_compatible(np, "fsl,ucc-mdio") ||
+ of_device_is_compatible(np, "ucc_geth_phy"))
+ map -= offsetof(struct fsl_pq_mdio, miimcfg);
+ regs = map;
+ priv->regs = regs;
+
+ new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
+
+ if (NULL == new_bus->irq) {
+ err = -ENOMEM;
+ goto err_unmap_regs;
+ }
+
+ new_bus->parent = &ofdev->dev;
+ dev_set_drvdata(&ofdev->dev, new_bus);
+
+ if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
+ of_device_is_compatible(np, "fsl,gianfar-tbi") ||
+ of_device_is_compatible(np, "fsl,etsec2-mdio") ||
+ of_device_is_compatible(np, "fsl,etsec2-tbi") ||
+ of_device_is_compatible(np, "gianfar")) {
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+ tbipa = get_gfar_tbipa(regs, np);
+ if (!tbipa) {
+ err = -EINVAL;
+ goto err_free_irqs;
+ }
+#else
+ err = -ENODEV;
+ goto err_free_irqs;
+#endif
+ } else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
+ of_device_is_compatible(np, "ucc_geth_phy")) {
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+ u32 id;
+ static u32 mii_mng_master;
+
+ tbipa = &regs->utbipar;
+
+ if ((err = get_ucc_id_for_range(addr, addr + size, &id)))
+ goto err_free_irqs;
+
+ if (!mii_mng_master) {
+ mii_mng_master = id;
+ ucc_set_qe_mux_mii_mng(id - 1);
+ }
+#else
+ err = -ENODEV;
+ goto err_free_irqs;
+#endif
+ } else {
+ err = -ENODEV;
+ goto err_free_irqs;
+ }
+
+ for_each_child_of_node(np, tbi) {
+ if (!strncmp(tbi->type, "tbi-phy", 8))
+ break;
+ }
+
+ if (tbi) {
+ const u32 *prop = of_get_property(tbi, "reg", NULL);
+
+ if (prop)
+ tbiaddr = *prop;
+ }
+
+ if (tbiaddr == -1) {
+ out_be32(tbipa, 0);
+
+ tbiaddr = fsl_pq_mdio_find_free(new_bus);
+ }
+
+ /*
+ * We define TBIPA at 0 to be illegal, opting to fail for boards that
+ * have PHYs at 1-31, rather than change tbipa and rescan.
+ */
+ if (tbiaddr == 0) {
+ err = -EBUSY;
+
+ goto err_free_irqs;
+ }
+
+ out_be32(tbipa, tbiaddr);
+
+ err = of_mdiobus_register(new_bus, np);
+ if (err) {
+ printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
+ new_bus->name);
+ goto err_free_irqs;
+ }
+
+ return 0;
+
+err_free_irqs:
+ kfree(new_bus->irq);
+err_unmap_regs:
+ iounmap(priv->map);
+err_free_bus:
+ kfree(new_bus);
+err_free_priv:
+ kfree(priv);
+ return err;
+}
+
+
+static int fsl_pq_mdio_remove(struct platform_device *ofdev)
+{
+ struct device *device = &ofdev->dev;
+ struct mii_bus *bus = dev_get_drvdata(device);
+ struct fsl_pq_mdio_priv *priv = bus->priv;
+
+ mdiobus_unregister(bus);
+
+ dev_set_drvdata(device, NULL);
+
+ iounmap(priv->map);
+ bus->priv = NULL;
+ mdiobus_free(bus);
+ kfree(priv);
+
+ return 0;
+}
+
+static struct of_device_id fsl_pq_mdio_match[] = {
+ {
+ .type = "mdio",
+ .compatible = "ucc_geth_phy",
+ },
+ {
+ .type = "mdio",
+ .compatible = "gianfar",
+ },
+ {
+ .compatible = "fsl,ucc-mdio",
+ },
+ {
+ .compatible = "fsl,gianfar-tbi",
+ },
+ {
+ .compatible = "fsl,gianfar-mdio",
+ },
+ {
+ .compatible = "fsl,etsec2-tbi",
+ },
+ {
+ .compatible = "fsl,etsec2-mdio",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
+
+static struct platform_driver fsl_pq_mdio_driver = {
+ .driver = {
+ .name = "fsl-pq_mdio",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_pq_mdio_match,
+ },
+ .probe = fsl_pq_mdio_probe,
+ .remove = fsl_pq_mdio_remove,
+};
+
+int __init fsl_pq_mdio_init(void)
+{
+ return platform_driver_register(&fsl_pq_mdio_driver);
+}
+module_init(fsl_pq_mdio_init);
+
+void fsl_pq_mdio_exit(void)
+{
+ platform_driver_unregister(&fsl_pq_mdio_driver);
+}
+module_exit(fsl_pq_mdio_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.h b/drivers/net/ethernet/freescale/fsl_pq_mdio.h
new file mode 100644
index 000000000000..bd17a2a0139b
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.h
@@ -0,0 +1,52 @@
+/*
+ * Freescale PowerQUICC MDIO Driver -- MII Management Bus Implementation
+ * Driver for the MDIO bus controller on Freescale PowerQUICC processors
+ *
+ * Author: Andy Fleming
+ * Modifier: Sandeep Gopalpet
+ *
+ * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __FSL_PQ_MDIO_H
+#define __FSL_PQ_MDIO_H
+
+#define MIIMIND_BUSY 0x00000001
+#define MIIMIND_NOTVALID 0x00000004
+#define MIIMCFG_INIT_VALUE 0x00000007
+#define MIIMCFG_RESET 0x80000000
+
+#define MII_READ_COMMAND 0x00000001
+
+struct fsl_pq_mdio {
+ u8 res1[16];
+ u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/
+ u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/
+ u8 res2[4];
+ u32 emapm; /* MDIO Event mapping register (for etsec2)*/
+ u8 res3[1280];
+ u32 miimcfg; /* MII management configuration reg */
+ u32 miimcom; /* MII management command reg */
+ u32 miimadd; /* MII management address reg */
+ u32 miimcon; /* MII management control reg */
+ u32 miimstat; /* MII management status reg */
+ u32 miimind; /* MII management indication reg */
+ u8 reserved[28]; /* Space holder */
+ u32 utbipar; /* TBI phy address reg (only on UCC) */
+ u8 res4[2728];
+} __packed;
+
+int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
+int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
+int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
+ int regnum, u16 value);
+int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, int mii_id, int regnum);
+int __init fsl_pq_mdio_init(void);
+void fsl_pq_mdio_exit(void);
+void fsl_pq_mdio_bus_name(char *name, struct device_node *np);
+#endif /* FSL_PQ_MDIO_H */
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
new file mode 100644
index 000000000000..83199fd0d62b
--- /dev/null
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -0,0 +1,3296 @@
+/*
+ * drivers/net/gianfar.c
+ *
+ * Gianfar Ethernet Driver
+ * This driver is designed for the non-CPM ethernet controllers
+ * on the 85xx and 83xx family of integrated processors
+ * Based on 8260_io/fcc_enet.c
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
+ *
+ * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
+ * Copyright 2007 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Gianfar: AKA Lambda Draconis, "Dragon"
+ * RA 11 31 24.2
+ * Dec +69 19 52
+ * V 3.84
+ * B-V +1.62
+ *
+ * Theory of operation
+ *
+ * The driver is initialized through of_device. Configuration information
+ * is therefore conveyed through an OF-style device tree.
+ *
+ * The Gianfar Ethernet Controller uses a ring of buffer
+ * descriptors. The beginning is indicated by a register
+ * pointing to the physical address of the start of the ring.
+ * The end is determined by a "wrap" bit being set in the
+ * last descriptor of the ring.
+ *
+ * When a packet is received, the RXF bit in the
+ * IEVENT register is set, triggering an interrupt when the
+ * corresponding bit in the IMASK register is also set (if
+ * interrupt coalescing is active, then the interrupt may not
+ * happen immediately, but will wait until either a set number
+ * of frames or amount of time have passed). In NAPI, the
+ * interrupt handler will signal there is work to be done, and
+ * exit. This method will start at the last known empty
+ * descriptor, and process every subsequent descriptor until there
+ * are none left with data (NAPI will stop after a set number of
+ * packets to give time to other tasks, but will eventually
+ * process all the packets). The data arrives inside a
+ * pre-allocated skb, and so after the skb is passed up to the
+ * stack, a new skb must be allocated, and the address field in
+ * the buffer descriptor must be updated to indicate this new
+ * skb.
+ *
+ * When the kernel requests that a packet be transmitted, the
+ * driver starts where it left off last time, and points the
+ * descriptor at the buffer which was passed in. The driver
+ * then informs the DMA engine that there are packets ready to
+ * be transmitted. Once the controller is finished transmitting
+ * the packet, an interrupt may be triggered (under the same
+ * conditions as for reception, but depending on the TXF bit).
+ * The driver then cleans up the buffer.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define DEBUG
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/in.h>
+#include <linux/net_tstamp.h>
+
+#include <asm/io.h>
+#include <asm/reg.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+
+#include "gianfar.h"
+#include "fsl_pq_mdio.h"
+
+#define TX_TIMEOUT (1*HZ)
+#undef BRIEF_GFAR_ERRORS
+#undef VERBOSE_GFAR_ERRORS
+
+const char gfar_driver_name[] = "Gianfar Ethernet";
+const char gfar_driver_version[] = "1.3";
+
+static int gfar_enet_open(struct net_device *dev);
+static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void gfar_reset_task(struct work_struct *work);
+static void gfar_timeout(struct net_device *dev);
+static int gfar_close(struct net_device *dev);
+struct sk_buff *gfar_new_skb(struct net_device *dev);
+static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
+ struct sk_buff *skb);
+static int gfar_set_mac_address(struct net_device *dev);
+static int gfar_change_mtu(struct net_device *dev, int new_mtu);
+static irqreturn_t gfar_error(int irq, void *dev_id);
+static irqreturn_t gfar_transmit(int irq, void *dev_id);
+static irqreturn_t gfar_interrupt(int irq, void *dev_id);
+static void adjust_link(struct net_device *dev);
+static void init_registers(struct net_device *dev);
+static int init_phy(struct net_device *dev);
+static int gfar_probe(struct platform_device *ofdev);
+static int gfar_remove(struct platform_device *ofdev);
+static void free_skb_resources(struct gfar_private *priv);
+static void gfar_set_multi(struct net_device *dev);
+static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
+static void gfar_configure_serdes(struct net_device *dev);
+static int gfar_poll(struct napi_struct *napi, int budget);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void gfar_netpoll(struct net_device *dev);
+#endif
+int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
+static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
+static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
+ int amount_pull);
+void gfar_halt(struct net_device *dev);
+static void gfar_halt_nodisable(struct net_device *dev);
+void gfar_start(struct net_device *dev);
+static void gfar_clear_exact_match(struct net_device *dev);
+static void gfar_set_mac_for_addr(struct net_device *dev, int num,
+ const u8 *addr);
+static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("Gianfar Ethernet Driver");
+MODULE_LICENSE("GPL");
+
+static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
+ dma_addr_t buf)
+{
+ u32 lstatus;
+
+ bdp->bufPtr = buf;
+
+ lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
+ if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
+ lstatus |= BD_LFLAG(RXBD_WRAP);
+
+ eieio();
+
+ bdp->lstatus = lstatus;
+}
+
+static int gfar_init_bds(struct net_device *ndev)
+{
+ struct gfar_private *priv = netdev_priv(ndev);
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ struct txbd8 *txbdp;
+ struct rxbd8 *rxbdp;
+ int i, j;
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ tx_queue = priv->tx_queue[i];
+ /* Initialize some variables in our dev structure */
+ tx_queue->num_txbdfree = tx_queue->tx_ring_size;
+ tx_queue->dirty_tx = tx_queue->tx_bd_base;
+ tx_queue->cur_tx = tx_queue->tx_bd_base;
+ tx_queue->skb_curtx = 0;
+ tx_queue->skb_dirtytx = 0;
+
+ /* Initialize Transmit Descriptor Ring */
+ txbdp = tx_queue->tx_bd_base;
+ for (j = 0; j < tx_queue->tx_ring_size; j++) {
+ txbdp->lstatus = 0;
+ txbdp->bufPtr = 0;
+ txbdp++;
+ }
+
+ /* Set the last descriptor in the ring to indicate wrap */
+ txbdp--;
+ txbdp->status |= TXBD_WRAP;
+ }
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_queue = priv->rx_queue[i];
+ rx_queue->cur_rx = rx_queue->rx_bd_base;
+ rx_queue->skb_currx = 0;
+ rxbdp = rx_queue->rx_bd_base;
+
+ for (j = 0; j < rx_queue->rx_ring_size; j++) {
+ struct sk_buff *skb = rx_queue->rx_skbuff[j];
+
+ if (skb) {
+ gfar_init_rxbdp(rx_queue, rxbdp,
+ rxbdp->bufPtr);
+ } else {
+ skb = gfar_new_skb(ndev);
+ if (!skb) {
+ netdev_err(ndev, "Can't allocate RX buffers\n");
+ goto err_rxalloc_fail;
+ }
+ rx_queue->rx_skbuff[j] = skb;
+
+ gfar_new_rxbdp(rx_queue, rxbdp, skb);
+ }
+
+ rxbdp++;
+ }
+
+ }
+
+ return 0;
+
+err_rxalloc_fail:
+ free_skb_resources(priv);
+ return -ENOMEM;
+}
+
+static int gfar_alloc_skb_resources(struct net_device *ndev)
+{
+ void *vaddr;
+ dma_addr_t addr;
+ int i, j, k;
+ struct gfar_private *priv = netdev_priv(ndev);
+ struct device *dev = &priv->ofdev->dev;
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+
+ priv->total_tx_ring_size = 0;
+ for (i = 0; i < priv->num_tx_queues; i++)
+ priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
+
+ priv->total_rx_ring_size = 0;
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
+
+ /* Allocate memory for the buffer descriptors */
+ vaddr = dma_alloc_coherent(dev,
+ sizeof(struct txbd8) * priv->total_tx_ring_size +
+ sizeof(struct rxbd8) * priv->total_rx_ring_size,
+ &addr, GFP_KERNEL);
+ if (!vaddr) {
+ netif_err(priv, ifup, ndev,
+ "Could not allocate buffer descriptors!\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ tx_queue = priv->tx_queue[i];
+ tx_queue->tx_bd_base = vaddr;
+ tx_queue->tx_bd_dma_base = addr;
+ tx_queue->dev = ndev;
+ /* enet DMA only understands physical addresses */
+ addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
+ vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
+ }
+
+ /* Start the rx descriptor ring where the tx ring leaves off */
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_queue = priv->rx_queue[i];
+ rx_queue->rx_bd_base = vaddr;
+ rx_queue->rx_bd_dma_base = addr;
+ rx_queue->dev = ndev;
+ addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
+ vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
+ }
+
+ /* Setup the skbuff rings */
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ tx_queue = priv->tx_queue[i];
+ tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
+ tx_queue->tx_ring_size, GFP_KERNEL);
+ if (!tx_queue->tx_skbuff) {
+ netif_err(priv, ifup, ndev,
+ "Could not allocate tx_skbuff\n");
+ goto cleanup;
+ }
+
+ for (k = 0; k < tx_queue->tx_ring_size; k++)
+ tx_queue->tx_skbuff[k] = NULL;
+ }
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_queue = priv->rx_queue[i];
+ rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
+ rx_queue->rx_ring_size, GFP_KERNEL);
+
+ if (!rx_queue->rx_skbuff) {
+ netif_err(priv, ifup, ndev,
+ "Could not allocate rx_skbuff\n");
+ goto cleanup;
+ }
+
+ for (j = 0; j < rx_queue->rx_ring_size; j++)
+ rx_queue->rx_skbuff[j] = NULL;
+ }
+
+ if (gfar_init_bds(ndev))
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ free_skb_resources(priv);
+ return -ENOMEM;
+}
+
+static void gfar_init_tx_rx_base(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 __iomem *baddr;
+ int i;
+
+ baddr = &regs->tbase0;
+ for(i = 0; i < priv->num_tx_queues; i++) {
+ gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
+ baddr += 2;
+ }
+
+ baddr = &regs->rbase0;
+ for(i = 0; i < priv->num_rx_queues; i++) {
+ gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
+ baddr += 2;
+ }
+}
+
+static void gfar_init_mac(struct net_device *ndev)
+{
+ struct gfar_private *priv = netdev_priv(ndev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 rctrl = 0;
+ u32 tctrl = 0;
+ u32 attrs = 0;
+
+ /* write the tx/rx base registers */
+ gfar_init_tx_rx_base(priv);
+
+ /* Configure the coalescing support */
+ gfar_configure_coalescing(priv, 0xFF, 0xFF);
+
+ if (priv->rx_filer_enable) {
+ rctrl |= RCTRL_FILREN;
+ /* Program the RIR0 reg with the required distribution */
+ gfar_write(&regs->rir0, DEFAULT_RIR0);
+ }
+
+ if (ndev->features & NETIF_F_RXCSUM)
+ rctrl |= RCTRL_CHECKSUMMING;
+
+ if (priv->extended_hash) {
+ rctrl |= RCTRL_EXTHASH;
+
+ gfar_clear_exact_match(ndev);
+ rctrl |= RCTRL_EMEN;
+ }
+
+ if (priv->padding) {
+ rctrl &= ~RCTRL_PAL_MASK;
+ rctrl |= RCTRL_PADDING(priv->padding);
+ }
+
+ /* Insert receive time stamps into padding alignment bytes */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
+ rctrl &= ~RCTRL_PAL_MASK;
+ rctrl |= RCTRL_PADDING(8);
+ priv->padding = 8;
+ }
+
+ /* Enable HW time stamping if requested from user space */
+ if (priv->hwts_rx_en)
+ rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
+
+ if (ndev->features & NETIF_F_HW_VLAN_RX)
+ rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
+
+ /* Init rctrl based on our settings */
+ gfar_write(&regs->rctrl, rctrl);
+
+ if (ndev->features & NETIF_F_IP_CSUM)
+ tctrl |= TCTRL_INIT_CSUM;
+
+ tctrl |= TCTRL_TXSCHED_PRIO;
+
+ gfar_write(&regs->tctrl, tctrl);
+
+ /* Set the extraction length and index */
+ attrs = ATTRELI_EL(priv->rx_stash_size) |
+ ATTRELI_EI(priv->rx_stash_index);
+
+ gfar_write(&regs->attreli, attrs);
+
+ /* Start with defaults, and add stashing or locking
+ * depending on the approprate variables */
+ attrs = ATTR_INIT_SETTINGS;
+
+ if (priv->bd_stash_en)
+ attrs |= ATTR_BDSTASH;
+
+ if (priv->rx_stash_size != 0)
+ attrs |= ATTR_BUFSTASH;
+
+ gfar_write(&regs->attr, attrs);
+
+ gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
+ gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
+ gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
+}
+
+static struct net_device_stats *gfar_get_stats(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
+ unsigned long tx_packets = 0, tx_bytes = 0;
+ int i = 0;
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_packets += priv->rx_queue[i]->stats.rx_packets;
+ rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
+ rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
+ }
+
+ dev->stats.rx_packets = rx_packets;
+ dev->stats.rx_bytes = rx_bytes;
+ dev->stats.rx_dropped = rx_dropped;
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
+ tx_packets += priv->tx_queue[i]->stats.tx_packets;
+ }
+
+ dev->stats.tx_bytes = tx_bytes;
+ dev->stats.tx_packets = tx_packets;
+
+ return &dev->stats;
+}
+
+static const struct net_device_ops gfar_netdev_ops = {
+ .ndo_open = gfar_enet_open,
+ .ndo_start_xmit = gfar_start_xmit,
+ .ndo_stop = gfar_close,
+ .ndo_change_mtu = gfar_change_mtu,
+ .ndo_set_features = gfar_set_features,
+ .ndo_set_rx_mode = gfar_set_multi,
+ .ndo_tx_timeout = gfar_timeout,
+ .ndo_do_ioctl = gfar_ioctl,
+ .ndo_get_stats = gfar_get_stats,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = gfar_netpoll,
+#endif
+};
+
+void lock_rx_qs(struct gfar_private *priv)
+{
+ int i = 0x0;
+
+ for (i = 0; i < priv->num_rx_queues; i++)
+ spin_lock(&priv->rx_queue[i]->rxlock);
+}
+
+void lock_tx_qs(struct gfar_private *priv)
+{
+ int i = 0x0;
+
+ for (i = 0; i < priv->num_tx_queues; i++)
+ spin_lock(&priv->tx_queue[i]->txlock);
+}
+
+void unlock_rx_qs(struct gfar_private *priv)
+{
+ int i = 0x0;
+
+ for (i = 0; i < priv->num_rx_queues; i++)
+ spin_unlock(&priv->rx_queue[i]->rxlock);
+}
+
+void unlock_tx_qs(struct gfar_private *priv)
+{
+ int i = 0x0;
+
+ for (i = 0; i < priv->num_tx_queues; i++)
+ spin_unlock(&priv->tx_queue[i]->txlock);
+}
+
+static bool gfar_is_vlan_on(struct gfar_private *priv)
+{
+ return (priv->ndev->features & NETIF_F_HW_VLAN_RX) ||
+ (priv->ndev->features & NETIF_F_HW_VLAN_TX);
+}
+
+/* Returns 1 if incoming frames use an FCB */
+static inline int gfar_uses_fcb(struct gfar_private *priv)
+{
+ return gfar_is_vlan_on(priv) ||
+ (priv->ndev->features & NETIF_F_RXCSUM) ||
+ (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
+}
+
+static void free_tx_pointers(struct gfar_private *priv)
+{
+ int i = 0;
+
+ for (i = 0; i < priv->num_tx_queues; i++)
+ kfree(priv->tx_queue[i]);
+}
+
+static void free_rx_pointers(struct gfar_private *priv)
+{
+ int i = 0;
+
+ for (i = 0; i < priv->num_rx_queues; i++)
+ kfree(priv->rx_queue[i]);
+}
+
+static void unmap_group_regs(struct gfar_private *priv)
+{
+ int i = 0;
+
+ for (i = 0; i < MAXGROUPS; i++)
+ if (priv->gfargrp[i].regs)
+ iounmap(priv->gfargrp[i].regs);
+}
+
+static void disable_napi(struct gfar_private *priv)
+{
+ int i = 0;
+
+ for (i = 0; i < priv->num_grps; i++)
+ napi_disable(&priv->gfargrp[i].napi);
+}
+
+static void enable_napi(struct gfar_private *priv)
+{
+ int i = 0;
+
+ for (i = 0; i < priv->num_grps; i++)
+ napi_enable(&priv->gfargrp[i].napi);
+}
+
+static int gfar_parse_group(struct device_node *np,
+ struct gfar_private *priv, const char *model)
+{
+ u32 *queue_mask;
+
+ priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
+ if (!priv->gfargrp[priv->num_grps].regs)
+ return -ENOMEM;
+
+ priv->gfargrp[priv->num_grps].interruptTransmit =
+ irq_of_parse_and_map(np, 0);
+
+ /* If we aren't the FEC we have multiple interrupts */
+ if (model && strcasecmp(model, "FEC")) {
+ priv->gfargrp[priv->num_grps].interruptReceive =
+ irq_of_parse_and_map(np, 1);
+ priv->gfargrp[priv->num_grps].interruptError =
+ irq_of_parse_and_map(np,2);
+ if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
+ priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ ||
+ priv->gfargrp[priv->num_grps].interruptError == NO_IRQ)
+ return -EINVAL;
+ }
+
+ priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
+ priv->gfargrp[priv->num_grps].priv = priv;
+ spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
+ if(priv->mode == MQ_MG_MODE) {
+ queue_mask = (u32 *)of_get_property(np,
+ "fsl,rx-bit-map", NULL);
+ priv->gfargrp[priv->num_grps].rx_bit_map =
+ queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
+ queue_mask = (u32 *)of_get_property(np,
+ "fsl,tx-bit-map", NULL);
+ priv->gfargrp[priv->num_grps].tx_bit_map =
+ queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
+ } else {
+ priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
+ priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
+ }
+ priv->num_grps++;
+
+ return 0;
+}
+
+static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
+{
+ const char *model;
+ const char *ctype;
+ const void *mac_addr;
+ int err = 0, i;
+ struct net_device *dev = NULL;
+ struct gfar_private *priv = NULL;
+ struct device_node *np = ofdev->dev.of_node;
+ struct device_node *child = NULL;
+ const u32 *stash;
+ const u32 *stash_len;
+ const u32 *stash_idx;
+ unsigned int num_tx_qs, num_rx_qs;
+ u32 *tx_queues, *rx_queues;
+
+ if (!np || !of_device_is_available(np))
+ return -ENODEV;
+
+ /* parse the num of tx and rx queues */
+ tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
+ num_tx_qs = tx_queues ? *tx_queues : 1;
+
+ if (num_tx_qs > MAX_TX_QS) {
+ pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
+ num_tx_qs, MAX_TX_QS);
+ pr_err("Cannot do alloc_etherdev, aborting\n");
+ return -EINVAL;
+ }
+
+ rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
+ num_rx_qs = rx_queues ? *rx_queues : 1;
+
+ if (num_rx_qs > MAX_RX_QS) {
+ pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
+ num_rx_qs, MAX_RX_QS);
+ pr_err("Cannot do alloc_etherdev, aborting\n");
+ return -EINVAL;
+ }
+
+ *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
+ dev = *pdev;
+ if (NULL == dev)
+ return -ENOMEM;
+
+ priv = netdev_priv(dev);
+ priv->node = ofdev->dev.of_node;
+ priv->ndev = dev;
+
+ priv->num_tx_queues = num_tx_qs;
+ netif_set_real_num_rx_queues(dev, num_rx_qs);
+ priv->num_rx_queues = num_rx_qs;
+ priv->num_grps = 0x0;
+
+ /* Init Rx queue filer rule set linked list*/
+ INIT_LIST_HEAD(&priv->rx_list.list);
+ priv->rx_list.count = 0;
+ mutex_init(&priv->rx_queue_access);
+
+ model = of_get_property(np, "model", NULL);
+
+ for (i = 0; i < MAXGROUPS; i++)
+ priv->gfargrp[i].regs = NULL;
+
+ /* Parse and initialize group specific information */
+ if (of_device_is_compatible(np, "fsl,etsec2")) {
+ priv->mode = MQ_MG_MODE;
+ for_each_child_of_node(np, child) {
+ err = gfar_parse_group(child, priv, model);
+ if (err)
+ goto err_grp_init;
+ }
+ } else {
+ priv->mode = SQ_SG_MODE;
+ err = gfar_parse_group(np, priv, model);
+ if(err)
+ goto err_grp_init;
+ }
+
+ for (i = 0; i < priv->num_tx_queues; i++)
+ priv->tx_queue[i] = NULL;
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->rx_queue[i] = NULL;
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
+ GFP_KERNEL);
+ if (!priv->tx_queue[i]) {
+ err = -ENOMEM;
+ goto tx_alloc_failed;
+ }
+ priv->tx_queue[i]->tx_skbuff = NULL;
+ priv->tx_queue[i]->qindex = i;
+ priv->tx_queue[i]->dev = dev;
+ spin_lock_init(&(priv->tx_queue[i]->txlock));
+ }
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
+ GFP_KERNEL);
+ if (!priv->rx_queue[i]) {
+ err = -ENOMEM;
+ goto rx_alloc_failed;
+ }
+ priv->rx_queue[i]->rx_skbuff = NULL;
+ priv->rx_queue[i]->qindex = i;
+ priv->rx_queue[i]->dev = dev;
+ spin_lock_init(&(priv->rx_queue[i]->rxlock));
+ }
+
+
+ stash = of_get_property(np, "bd-stash", NULL);
+
+ if (stash) {
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
+ priv->bd_stash_en = 1;
+ }
+
+ stash_len = of_get_property(np, "rx-stash-len", NULL);
+
+ if (stash_len)
+ priv->rx_stash_size = *stash_len;
+
+ stash_idx = of_get_property(np, "rx-stash-idx", NULL);
+
+ if (stash_idx)
+ priv->rx_stash_index = *stash_idx;
+
+ if (stash_len || stash_idx)
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
+
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr)
+ memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
+
+ if (model && !strcasecmp(model, "TSEC"))
+ priv->device_flags =
+ FSL_GIANFAR_DEV_HAS_GIGABIT |
+ FSL_GIANFAR_DEV_HAS_COALESCE |
+ FSL_GIANFAR_DEV_HAS_RMON |
+ FSL_GIANFAR_DEV_HAS_MULTI_INTR;
+ if (model && !strcasecmp(model, "eTSEC"))
+ priv->device_flags =
+ FSL_GIANFAR_DEV_HAS_GIGABIT |
+ FSL_GIANFAR_DEV_HAS_COALESCE |
+ FSL_GIANFAR_DEV_HAS_RMON |
+ FSL_GIANFAR_DEV_HAS_MULTI_INTR |
+ FSL_GIANFAR_DEV_HAS_PADDING |
+ FSL_GIANFAR_DEV_HAS_CSUM |
+ FSL_GIANFAR_DEV_HAS_VLAN |
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
+ FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
+ FSL_GIANFAR_DEV_HAS_TIMER;
+
+ ctype = of_get_property(np, "phy-connection-type", NULL);
+
+ /* We only care about rgmii-id. The rest are autodetected */
+ if (ctype && !strcmp(ctype, "rgmii-id"))
+ priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
+ else
+ priv->interface = PHY_INTERFACE_MODE_MII;
+
+ if (of_get_property(np, "fsl,magic-packet", NULL))
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
+
+ priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+
+ /* Find the TBI PHY. If it's not there, we don't support SGMII */
+ priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
+
+ return 0;
+
+rx_alloc_failed:
+ free_rx_pointers(priv);
+tx_alloc_failed:
+ free_tx_pointers(priv);
+err_grp_init:
+ unmap_group_regs(priv);
+ free_netdev(dev);
+ return err;
+}
+
+static int gfar_hwtstamp_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config config;
+ struct gfar_private *priv = netdev_priv(netdev);
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->hwts_tx_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+ return -ERANGE;
+ priv->hwts_tx_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ if (priv->hwts_rx_en) {
+ stop_gfar(netdev);
+ priv->hwts_rx_en = 0;
+ startup_gfar(netdev);
+ }
+ break;
+ default:
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+ return -ERANGE;
+ if (!priv->hwts_rx_en) {
+ stop_gfar(netdev);
+ priv->hwts_rx_en = 1;
+ startup_gfar(netdev);
+ }
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/* Ioctl MII Interface */
+static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (cmd == SIOCSHWTSTAMP)
+ return gfar_hwtstamp_ioctl(dev, rq, cmd);
+
+ if (!priv->phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(priv->phydev, rq, cmd);
+}
+
+static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
+{
+ unsigned int new_bit_map = 0x0;
+ int mask = 0x1 << (max_qs - 1), i;
+ for (i = 0; i < max_qs; i++) {
+ if (bit_map & mask)
+ new_bit_map = new_bit_map + (1 << i);
+ mask = mask >> 0x1;
+ }
+ return new_bit_map;
+}
+
+static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
+ u32 class)
+{
+ u32 rqfpr = FPR_FILER_MASK;
+ u32 rqfcr = 0x0;
+
+ rqfar--;
+ rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
+ gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+ rqfar--;
+ rqfcr = RQFCR_CMP_NOMATCH;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
+ gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+ rqfar--;
+ rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
+ rqfpr = class;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
+ gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+ rqfar--;
+ rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
+ rqfpr = class;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
+ gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+ return rqfar;
+}
+
+static void gfar_init_filer_table(struct gfar_private *priv)
+{
+ int i = 0x0;
+ u32 rqfar = MAX_FILER_IDX;
+ u32 rqfcr = 0x0;
+ u32 rqfpr = FPR_FILER_MASK;
+
+ /* Default rule */
+ rqfcr = RQFCR_CMP_MATCH;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
+ gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
+
+ /* cur_filer_idx indicated the first non-masked rule */
+ priv->cur_filer_idx = rqfar;
+
+ /* Rest are masked rules */
+ rqfcr = RQFCR_CMP_NOMATCH;
+ for (i = 0; i < rqfar; i++) {
+ priv->ftp_rqfcr[i] = rqfcr;
+ priv->ftp_rqfpr[i] = rqfpr;
+ gfar_write_filer(priv, i, rqfcr, rqfpr);
+ }
+}
+
+static void gfar_detect_errata(struct gfar_private *priv)
+{
+ struct device *dev = &priv->ofdev->dev;
+ unsigned int pvr = mfspr(SPRN_PVR);
+ unsigned int svr = mfspr(SPRN_SVR);
+ unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
+ unsigned int rev = svr & 0xffff;
+
+ /* MPC8313 Rev 2.0 and higher; All MPC837x */
+ if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ priv->errata |= GFAR_ERRATA_74;
+
+ /* MPC8313 and MPC837x all rev */
+ if ((pvr == 0x80850010 && mod == 0x80b0) ||
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ priv->errata |= GFAR_ERRATA_76;
+
+ /* MPC8313 and MPC837x all rev */
+ if ((pvr == 0x80850010 && mod == 0x80b0) ||
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ priv->errata |= GFAR_ERRATA_A002;
+
+ /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
+ if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
+ (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
+ priv->errata |= GFAR_ERRATA_12;
+
+ if (priv->errata)
+ dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
+ priv->errata);
+}
+
+/* Set up the ethernet device structure, private data,
+ * and anything else we need before we start */
+static int gfar_probe(struct platform_device *ofdev)
+{
+ u32 tempval;
+ struct net_device *dev = NULL;
+ struct gfar_private *priv = NULL;
+ struct gfar __iomem *regs = NULL;
+ int err = 0, i, grp_idx = 0;
+ int len_devname;
+ u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
+ u32 isrg = 0;
+ u32 __iomem *baddr;
+
+ err = gfar_of_init(ofdev, &dev);
+
+ if (err)
+ return err;
+
+ priv = netdev_priv(dev);
+ priv->ndev = dev;
+ priv->ofdev = ofdev;
+ priv->node = ofdev->dev.of_node;
+ SET_NETDEV_DEV(dev, &ofdev->dev);
+
+ spin_lock_init(&priv->bflock);
+ INIT_WORK(&priv->reset_task, gfar_reset_task);
+
+ dev_set_drvdata(&ofdev->dev, priv);
+ regs = priv->gfargrp[0].regs;
+
+ gfar_detect_errata(priv);
+
+ /* Stop the DMA engine now, in case it was running before */
+ /* (The firmware could have used it, and left it running). */
+ gfar_halt(dev);
+
+ /* Reset MAC layer */
+ gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
+
+ /* We need to delay at least 3 TX clocks */
+ udelay(2);
+
+ tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+ gfar_write(&regs->maccfg1, tempval);
+
+ /* Initialize MACCFG2. */
+ tempval = MACCFG2_INIT_SETTINGS;
+ if (gfar_has_errata(priv, GFAR_ERRATA_74))
+ tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
+ gfar_write(&regs->maccfg2, tempval);
+
+ /* Initialize ECNTRL */
+ gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
+
+ /* Set the dev->base_addr to the gfar reg region */
+ dev->base_addr = (unsigned long) regs;
+
+ SET_NETDEV_DEV(dev, &ofdev->dev);
+
+ /* Fill in the dev structure */
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->mtu = 1500;
+ dev->netdev_ops = &gfar_netdev_ops;
+ dev->ethtool_ops = &gfar_ethtool_ops;
+
+ /* Register for napi ...We are registering NAPI for each grp */
+ for (i = 0; i < priv->num_grps; i++)
+ netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
+ }
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
+ dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ }
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
+ priv->extended_hash = 1;
+ priv->hash_width = 9;
+
+ priv->hash_regs[0] = &regs->igaddr0;
+ priv->hash_regs[1] = &regs->igaddr1;
+ priv->hash_regs[2] = &regs->igaddr2;
+ priv->hash_regs[3] = &regs->igaddr3;
+ priv->hash_regs[4] = &regs->igaddr4;
+ priv->hash_regs[5] = &regs->igaddr5;
+ priv->hash_regs[6] = &regs->igaddr6;
+ priv->hash_regs[7] = &regs->igaddr7;
+ priv->hash_regs[8] = &regs->gaddr0;
+ priv->hash_regs[9] = &regs->gaddr1;
+ priv->hash_regs[10] = &regs->gaddr2;
+ priv->hash_regs[11] = &regs->gaddr3;
+ priv->hash_regs[12] = &regs->gaddr4;
+ priv->hash_regs[13] = &regs->gaddr5;
+ priv->hash_regs[14] = &regs->gaddr6;
+ priv->hash_regs[15] = &regs->gaddr7;
+
+ } else {
+ priv->extended_hash = 0;
+ priv->hash_width = 8;
+
+ priv->hash_regs[0] = &regs->gaddr0;
+ priv->hash_regs[1] = &regs->gaddr1;
+ priv->hash_regs[2] = &regs->gaddr2;
+ priv->hash_regs[3] = &regs->gaddr3;
+ priv->hash_regs[4] = &regs->gaddr4;
+ priv->hash_regs[5] = &regs->gaddr5;
+ priv->hash_regs[6] = &regs->gaddr6;
+ priv->hash_regs[7] = &regs->gaddr7;
+ }
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
+ priv->padding = DEFAULT_PADDING;
+ else
+ priv->padding = 0;
+
+ if (dev->features & NETIF_F_IP_CSUM ||
+ priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+ dev->hard_header_len += GMAC_FCB_LEN;
+
+ /* Program the isrg regs only if number of grps > 1 */
+ if (priv->num_grps > 1) {
+ baddr = &regs->isrg0;
+ for (i = 0; i < priv->num_grps; i++) {
+ isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
+ isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
+ gfar_write(baddr, isrg);
+ baddr++;
+ isrg = 0x0;
+ }
+ }
+
+ /* Need to reverse the bit maps as bit_map's MSB is q0
+ * but, for_each_set_bit parses from right to left, which
+ * basically reverses the queue numbers */
+ for (i = 0; i< priv->num_grps; i++) {
+ priv->gfargrp[i].tx_bit_map = reverse_bitmap(
+ priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
+ priv->gfargrp[i].rx_bit_map = reverse_bitmap(
+ priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
+ }
+
+ /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
+ * also assign queues to groups */
+ for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
+ priv->gfargrp[grp_idx].num_rx_queues = 0x0;
+ for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
+ priv->num_rx_queues) {
+ priv->gfargrp[grp_idx].num_rx_queues++;
+ priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
+ rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
+ rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
+ }
+ priv->gfargrp[grp_idx].num_tx_queues = 0x0;
+ for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
+ priv->num_tx_queues) {
+ priv->gfargrp[grp_idx].num_tx_queues++;
+ priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
+ tstat = tstat | (TSTAT_CLEAR_THALT >> i);
+ tqueue = tqueue | (TQUEUE_EN0 >> i);
+ }
+ priv->gfargrp[grp_idx].rstat = rstat;
+ priv->gfargrp[grp_idx].tstat = tstat;
+ rstat = tstat =0;
+ }
+
+ gfar_write(&regs->rqueue, rqueue);
+ gfar_write(&regs->tqueue, tqueue);
+
+ priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
+
+ /* Initializing some of the rx/tx queue level parameters */
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
+ priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
+ priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
+ priv->tx_queue[i]->txic = DEFAULT_TXIC;
+ }
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
+ priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
+ priv->rx_queue[i]->rxic = DEFAULT_RXIC;
+ }
+
+ /* always enable rx filer*/
+ priv->rx_filer_enable = 1;
+ /* Enable most messages by default */
+ priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
+
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(dev);
+
+ err = register_netdev(dev);
+
+ if (err) {
+ pr_err("%s: Cannot register net device, aborting\n", dev->name);
+ goto register_fail;
+ }
+
+ device_init_wakeup(&dev->dev,
+ priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+
+ /* fill out IRQ number and name fields */
+ len_devname = strlen(dev->name);
+ for (i = 0; i < priv->num_grps; i++) {
+ strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
+ len_devname);
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
+ "_g", sizeof("_g"));
+ priv->gfargrp[i].int_name_tx[
+ strlen(priv->gfargrp[i].int_name_tx)] = i+48;
+ strncpy(&priv->gfargrp[i].int_name_tx[strlen(
+ priv->gfargrp[i].int_name_tx)],
+ "_tx", sizeof("_tx") + 1);
+
+ strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
+ len_devname);
+ strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
+ "_g", sizeof("_g"));
+ priv->gfargrp[i].int_name_rx[
+ strlen(priv->gfargrp[i].int_name_rx)] = i+48;
+ strncpy(&priv->gfargrp[i].int_name_rx[strlen(
+ priv->gfargrp[i].int_name_rx)],
+ "_rx", sizeof("_rx") + 1);
+
+ strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
+ len_devname);
+ strncpy(&priv->gfargrp[i].int_name_er[len_devname],
+ "_g", sizeof("_g"));
+ priv->gfargrp[i].int_name_er[strlen(
+ priv->gfargrp[i].int_name_er)] = i+48;
+ strncpy(&priv->gfargrp[i].int_name_er[strlen(\
+ priv->gfargrp[i].int_name_er)],
+ "_er", sizeof("_er") + 1);
+ } else
+ priv->gfargrp[i].int_name_tx[len_devname] = '\0';
+ }
+
+ /* Initialize the filer table */
+ gfar_init_filer_table(priv);
+
+ /* Create all the sysfs files */
+ gfar_init_sysfs(dev);
+
+ /* Print out the device info */
+ netdev_info(dev, "mac: %pM\n", dev->dev_addr);
+
+ /* Even more device info helps when determining which kernel */
+ /* provided which set of benchmarks. */
+ netdev_info(dev, "Running with NAPI enabled\n");
+ for (i = 0; i < priv->num_rx_queues; i++)
+ netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
+ i, priv->rx_queue[i]->rx_ring_size);
+ for(i = 0; i < priv->num_tx_queues; i++)
+ netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
+ i, priv->tx_queue[i]->tx_ring_size);
+
+ return 0;
+
+register_fail:
+ unmap_group_regs(priv);
+ free_tx_pointers(priv);
+ free_rx_pointers(priv);
+ if (priv->phy_node)
+ of_node_put(priv->phy_node);
+ if (priv->tbi_node)
+ of_node_put(priv->tbi_node);
+ free_netdev(dev);
+ return err;
+}
+
+static int gfar_remove(struct platform_device *ofdev)
+{
+ struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
+
+ if (priv->phy_node)
+ of_node_put(priv->phy_node);
+ if (priv->tbi_node)
+ of_node_put(priv->tbi_node);
+
+ dev_set_drvdata(&ofdev->dev, NULL);
+
+ unregister_netdev(priv->ndev);
+ unmap_group_regs(priv);
+ free_netdev(priv->ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int gfar_suspend(struct device *dev)
+{
+ struct gfar_private *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ unsigned long flags;
+ u32 tempval;
+
+ int magic_packet = priv->wol_en &&
+ (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+
+ netif_device_detach(ndev);
+
+ if (netif_running(ndev)) {
+
+ local_irq_save(flags);
+ lock_tx_qs(priv);
+ lock_rx_qs(priv);
+
+ gfar_halt_nodisable(ndev);
+
+ /* Disable Tx, and Rx if wake-on-LAN is disabled. */
+ tempval = gfar_read(&regs->maccfg1);
+
+ tempval &= ~MACCFG1_TX_EN;
+
+ if (!magic_packet)
+ tempval &= ~MACCFG1_RX_EN;
+
+ gfar_write(&regs->maccfg1, tempval);
+
+ unlock_rx_qs(priv);
+ unlock_tx_qs(priv);
+ local_irq_restore(flags);
+
+ disable_napi(priv);
+
+ if (magic_packet) {
+ /* Enable interrupt on Magic Packet */
+ gfar_write(&regs->imask, IMASK_MAG);
+
+ /* Enable Magic Packet mode */
+ tempval = gfar_read(&regs->maccfg2);
+ tempval |= MACCFG2_MPEN;
+ gfar_write(&regs->maccfg2, tempval);
+ } else {
+ phy_stop(priv->phydev);
+ }
+ }
+
+ return 0;
+}
+
+static int gfar_resume(struct device *dev)
+{
+ struct gfar_private *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ unsigned long flags;
+ u32 tempval;
+ int magic_packet = priv->wol_en &&
+ (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+
+ if (!netif_running(ndev)) {
+ netif_device_attach(ndev);
+ return 0;
+ }
+
+ if (!magic_packet && priv->phydev)
+ phy_start(priv->phydev);
+
+ /* Disable Magic Packet mode, in case something
+ * else woke us up.
+ */
+ local_irq_save(flags);
+ lock_tx_qs(priv);
+ lock_rx_qs(priv);
+
+ tempval = gfar_read(&regs->maccfg2);
+ tempval &= ~MACCFG2_MPEN;
+ gfar_write(&regs->maccfg2, tempval);
+
+ gfar_start(ndev);
+
+ unlock_rx_qs(priv);
+ unlock_tx_qs(priv);
+ local_irq_restore(flags);
+
+ netif_device_attach(ndev);
+
+ enable_napi(priv);
+
+ return 0;
+}
+
+static int gfar_restore(struct device *dev)
+{
+ struct gfar_private *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ gfar_init_bds(ndev);
+ init_registers(ndev);
+ gfar_set_mac_address(ndev);
+ gfar_init_mac(ndev);
+ gfar_start(ndev);
+
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
+ if (priv->phydev)
+ phy_start(priv->phydev);
+
+ netif_device_attach(ndev);
+ enable_napi(priv);
+
+ return 0;
+}
+
+static struct dev_pm_ops gfar_pm_ops = {
+ .suspend = gfar_suspend,
+ .resume = gfar_resume,
+ .freeze = gfar_suspend,
+ .thaw = gfar_resume,
+ .restore = gfar_restore,
+};
+
+#define GFAR_PM_OPS (&gfar_pm_ops)
+
+#else
+
+#define GFAR_PM_OPS NULL
+
+#endif
+
+/* Reads the controller's registers to determine what interface
+ * connects it to the PHY.
+ */
+static phy_interface_t gfar_get_interface(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 ecntrl;
+
+ ecntrl = gfar_read(&regs->ecntrl);
+
+ if (ecntrl & ECNTRL_SGMII_MODE)
+ return PHY_INTERFACE_MODE_SGMII;
+
+ if (ecntrl & ECNTRL_TBI_MODE) {
+ if (ecntrl & ECNTRL_REDUCED_MODE)
+ return PHY_INTERFACE_MODE_RTBI;
+ else
+ return PHY_INTERFACE_MODE_TBI;
+ }
+
+ if (ecntrl & ECNTRL_REDUCED_MODE) {
+ if (ecntrl & ECNTRL_REDUCED_MII_MODE)
+ return PHY_INTERFACE_MODE_RMII;
+ else {
+ phy_interface_t interface = priv->interface;
+
+ /*
+ * This isn't autodetected right now, so it must
+ * be set by the device tree or platform code.
+ */
+ if (interface == PHY_INTERFACE_MODE_RGMII_ID)
+ return PHY_INTERFACE_MODE_RGMII_ID;
+
+ return PHY_INTERFACE_MODE_RGMII;
+ }
+ }
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
+ return PHY_INTERFACE_MODE_GMII;
+
+ return PHY_INTERFACE_MODE_MII;
+}
+
+
+/* Initializes driver's PHY state, and attaches to the PHY.
+ * Returns 0 on success.
+ */
+static int init_phy(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ uint gigabit_support =
+ priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
+ SUPPORTED_1000baseT_Full : 0;
+ phy_interface_t interface;
+
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
+ interface = gfar_get_interface(dev);
+
+ priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
+ interface);
+ if (!priv->phydev)
+ priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
+ interface);
+ if (!priv->phydev) {
+ dev_err(&dev->dev, "could not attach to PHY\n");
+ return -ENODEV;
+ }
+
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ gfar_configure_serdes(dev);
+
+ /* Remove any features not supported by the controller */
+ priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
+ priv->phydev->advertising = priv->phydev->supported;
+
+ return 0;
+}
+
+/*
+ * Initialize TBI PHY interface for communicating with the
+ * SERDES lynx PHY on the chip. We communicate with this PHY
+ * through the MDIO bus on each controller, treating it as a
+ * "normal" PHY at the address found in the TBIPA register. We assume
+ * that the TBIPA register is valid. Either the MDIO bus code will set
+ * it to a value that doesn't conflict with other PHYs on the bus, or the
+ * value doesn't matter, as there are no other PHYs on the bus.
+ */
+static void gfar_configure_serdes(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *tbiphy;
+
+ if (!priv->tbi_node) {
+ dev_warn(&dev->dev, "error: SGMII mode requires that the "
+ "device tree specify a tbi-handle\n");
+ return;
+ }
+
+ tbiphy = of_phy_find_device(priv->tbi_node);
+ if (!tbiphy) {
+ dev_err(&dev->dev, "error: Could not get TBI device\n");
+ return;
+ }
+
+ /*
+ * If the link is already up, we must already be ok, and don't need to
+ * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
+ * everything for us? Resetting it takes the link down and requires
+ * several seconds for it to come back.
+ */
+ if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
+ return;
+
+ /* Single clk mode, mii mode off(for serdes communication) */
+ phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
+
+ phy_write(tbiphy, MII_ADVERTISE,
+ ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
+ ADVERTISE_1000XPSE_ASYM);
+
+ phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
+ BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
+}
+
+static void init_registers(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = NULL;
+ int i = 0;
+
+ for (i = 0; i < priv->num_grps; i++) {
+ regs = priv->gfargrp[i].regs;
+ /* Clear IEVENT */
+ gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+
+ /* Initialize IMASK */
+ gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+ }
+
+ regs = priv->gfargrp[0].regs;
+ /* Init hash registers to zero */
+ gfar_write(&regs->igaddr0, 0);
+ gfar_write(&regs->igaddr1, 0);
+ gfar_write(&regs->igaddr2, 0);
+ gfar_write(&regs->igaddr3, 0);
+ gfar_write(&regs->igaddr4, 0);
+ gfar_write(&regs->igaddr5, 0);
+ gfar_write(&regs->igaddr6, 0);
+ gfar_write(&regs->igaddr7, 0);
+
+ gfar_write(&regs->gaddr0, 0);
+ gfar_write(&regs->gaddr1, 0);
+ gfar_write(&regs->gaddr2, 0);
+ gfar_write(&regs->gaddr3, 0);
+ gfar_write(&regs->gaddr4, 0);
+ gfar_write(&regs->gaddr5, 0);
+ gfar_write(&regs->gaddr6, 0);
+ gfar_write(&regs->gaddr7, 0);
+
+ /* Zero out the rmon mib registers if it has them */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
+ memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
+
+ /* Mask off the CAM interrupts */
+ gfar_write(&regs->rmon.cam1, 0xffffffff);
+ gfar_write(&regs->rmon.cam2, 0xffffffff);
+ }
+
+ /* Initialize the max receive buffer length */
+ gfar_write(&regs->mrblr, priv->rx_buffer_size);
+
+ /* Initialize the Minimum Frame Length Register */
+ gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
+}
+
+static int __gfar_is_rx_idle(struct gfar_private *priv)
+{
+ u32 res;
+
+ /*
+ * Normaly TSEC should not hang on GRS commands, so we should
+ * actually wait for IEVENT_GRSC flag.
+ */
+ if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
+ return 0;
+
+ /*
+ * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
+ * the same as bits 23-30, the eTSEC Rx is assumed to be idle
+ * and the Rx can be safely reset.
+ */
+ res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
+ res &= 0x7f807f80;
+ if ((res & 0xffff) == (res >> 16))
+ return 1;
+
+ return 0;
+}
+
+/* Halt the receive and transmit queues */
+static void gfar_halt_nodisable(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = NULL;
+ u32 tempval;
+ int i = 0;
+
+ for (i = 0; i < priv->num_grps; i++) {
+ regs = priv->gfargrp[i].regs;
+ /* Mask all interrupts */
+ gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+
+ /* Clear all interrupts */
+ gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+ }
+
+ regs = priv->gfargrp[0].regs;
+ /* Stop the DMA, and wait for it to stop */
+ tempval = gfar_read(&regs->dmactrl);
+ if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
+ != (DMACTRL_GRS | DMACTRL_GTS)) {
+ int ret;
+
+ tempval |= (DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&regs->dmactrl, tempval);
+
+ do {
+ ret = spin_event_timeout(((gfar_read(&regs->ievent) &
+ (IEVENT_GRSC | IEVENT_GTSC)) ==
+ (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
+ if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
+ ret = __gfar_is_rx_idle(priv);
+ } while (!ret);
+ }
+}
+
+/* Halt the receive and transmit queues */
+void gfar_halt(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+
+ gfar_halt_nodisable(dev);
+
+ /* Disable Rx and Tx */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
+ gfar_write(&regs->maccfg1, tempval);
+}
+
+static void free_grp_irqs(struct gfar_priv_grp *grp)
+{
+ free_irq(grp->interruptError, grp);
+ free_irq(grp->interruptTransmit, grp);
+ free_irq(grp->interruptReceive, grp);
+}
+
+void stop_gfar(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ unsigned long flags;
+ int i;
+
+ phy_stop(priv->phydev);
+
+
+ /* Lock it down */
+ local_irq_save(flags);
+ lock_tx_qs(priv);
+ lock_rx_qs(priv);
+
+ gfar_halt(dev);
+
+ unlock_rx_qs(priv);
+ unlock_tx_qs(priv);
+ local_irq_restore(flags);
+
+ /* Free the IRQs */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ for (i = 0; i < priv->num_grps; i++)
+ free_grp_irqs(&priv->gfargrp[i]);
+ } else {
+ for (i = 0; i < priv->num_grps; i++)
+ free_irq(priv->gfargrp[i].interruptTransmit,
+ &priv->gfargrp[i]);
+ }
+
+ free_skb_resources(priv);
+}
+
+static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
+{
+ struct txbd8 *txbdp;
+ struct gfar_private *priv = netdev_priv(tx_queue->dev);
+ int i, j;
+
+ txbdp = tx_queue->tx_bd_base;
+
+ for (i = 0; i < tx_queue->tx_ring_size; i++) {
+ if (!tx_queue->tx_skbuff[i])
+ continue;
+
+ dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
+ txbdp->length, DMA_TO_DEVICE);
+ txbdp->lstatus = 0;
+ for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
+ j++) {
+ txbdp++;
+ dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
+ txbdp->length, DMA_TO_DEVICE);
+ }
+ txbdp++;
+ dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
+ tx_queue->tx_skbuff[i] = NULL;
+ }
+ kfree(tx_queue->tx_skbuff);
+}
+
+static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
+{
+ struct rxbd8 *rxbdp;
+ struct gfar_private *priv = netdev_priv(rx_queue->dev);
+ int i;
+
+ rxbdp = rx_queue->rx_bd_base;
+
+ for (i = 0; i < rx_queue->rx_ring_size; i++) {
+ if (rx_queue->rx_skbuff[i]) {
+ dma_unmap_single(&priv->ofdev->dev,
+ rxbdp->bufPtr, priv->rx_buffer_size,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
+ rx_queue->rx_skbuff[i] = NULL;
+ }
+ rxbdp->lstatus = 0;
+ rxbdp->bufPtr = 0;
+ rxbdp++;
+ }
+ kfree(rx_queue->rx_skbuff);
+}
+
+/* If there are any tx skbs or rx skbs still around, free them.
+ * Then free tx_skbuff and rx_skbuff */
+static void free_skb_resources(struct gfar_private *priv)
+{
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ int i;
+
+ /* Go through all the buffer descriptors and free their data buffers */
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ tx_queue = priv->tx_queue[i];
+ if(tx_queue->tx_skbuff)
+ free_skb_tx_queue(tx_queue);
+ }
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_queue = priv->rx_queue[i];
+ if(rx_queue->rx_skbuff)
+ free_skb_rx_queue(rx_queue);
+ }
+
+ dma_free_coherent(&priv->ofdev->dev,
+ sizeof(struct txbd8) * priv->total_tx_ring_size +
+ sizeof(struct rxbd8) * priv->total_rx_ring_size,
+ priv->tx_queue[0]->tx_bd_base,
+ priv->tx_queue[0]->tx_bd_dma_base);
+ skb_queue_purge(&priv->rx_recycle);
+}
+
+void gfar_start(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+ int i = 0;
+
+ /* Enable Rx and Tx in MACCFG1 */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
+ gfar_write(&regs->maccfg1, tempval);
+
+ /* Initialize DMACTRL to have WWR and WOP */
+ tempval = gfar_read(&regs->dmactrl);
+ tempval |= DMACTRL_INIT_SETTINGS;
+ gfar_write(&regs->dmactrl, tempval);
+
+ /* Make sure we aren't stopped */
+ tempval = gfar_read(&regs->dmactrl);
+ tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&regs->dmactrl, tempval);
+
+ for (i = 0; i < priv->num_grps; i++) {
+ regs = priv->gfargrp[i].regs;
+ /* Clear THLT/RHLT, so that the DMA starts polling now */
+ gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
+ gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
+ /* Unmask the interrupts we look for */
+ gfar_write(&regs->imask, IMASK_DEFAULT);
+ }
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+}
+
+void gfar_configure_coalescing(struct gfar_private *priv,
+ unsigned long tx_mask, unsigned long rx_mask)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 __iomem *baddr;
+ int i = 0;
+
+ /* Backward compatible case ---- even if we enable
+ * multiple queues, there's only single reg to program
+ */
+ gfar_write(&regs->txic, 0);
+ if(likely(priv->tx_queue[0]->txcoalescing))
+ gfar_write(&regs->txic, priv->tx_queue[0]->txic);
+
+ gfar_write(&regs->rxic, 0);
+ if(unlikely(priv->rx_queue[0]->rxcoalescing))
+ gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
+
+ if (priv->mode == MQ_MG_MODE) {
+ baddr = &regs->txic0;
+ for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
+ if (likely(priv->tx_queue[i]->txcoalescing)) {
+ gfar_write(baddr + i, 0);
+ gfar_write(baddr + i, priv->tx_queue[i]->txic);
+ }
+ }
+
+ baddr = &regs->rxic0;
+ for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
+ if (likely(priv->rx_queue[i]->rxcoalescing)) {
+ gfar_write(baddr + i, 0);
+ gfar_write(baddr + i, priv->rx_queue[i]->rxic);
+ }
+ }
+ }
+}
+
+static int register_grp_irqs(struct gfar_priv_grp *grp)
+{
+ struct gfar_private *priv = grp->priv;
+ struct net_device *dev = priv->ndev;
+ int err;
+
+ /* If the device has multiple interrupts, register for
+ * them. Otherwise, only register for the one */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ /* Install our interrupt handlers for Error,
+ * Transmit, and Receive */
+ if ((err = request_irq(grp->interruptError, gfar_error, 0,
+ grp->int_name_er,grp)) < 0) {
+ netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+ grp->interruptError);
+
+ goto err_irq_fail;
+ }
+
+ if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
+ 0, grp->int_name_tx, grp)) < 0) {
+ netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+ grp->interruptTransmit);
+ goto tx_irq_fail;
+ }
+
+ if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
+ grp->int_name_rx, grp)) < 0) {
+ netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+ grp->interruptReceive);
+ goto rx_irq_fail;
+ }
+ } else {
+ if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
+ grp->int_name_tx, grp)) < 0) {
+ netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+ grp->interruptTransmit);
+ goto err_irq_fail;
+ }
+ }
+
+ return 0;
+
+rx_irq_fail:
+ free_irq(grp->interruptTransmit, grp);
+tx_irq_fail:
+ free_irq(grp->interruptError, grp);
+err_irq_fail:
+ return err;
+
+}
+
+/* Bring the controller up and running */
+int startup_gfar(struct net_device *ndev)
+{
+ struct gfar_private *priv = netdev_priv(ndev);
+ struct gfar __iomem *regs = NULL;
+ int err, i, j;
+
+ for (i = 0; i < priv->num_grps; i++) {
+ regs= priv->gfargrp[i].regs;
+ gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+ }
+
+ regs= priv->gfargrp[0].regs;
+ err = gfar_alloc_skb_resources(ndev);
+ if (err)
+ return err;
+
+ gfar_init_mac(ndev);
+
+ for (i = 0; i < priv->num_grps; i++) {
+ err = register_grp_irqs(&priv->gfargrp[i]);
+ if (err) {
+ for (j = 0; j < i; j++)
+ free_grp_irqs(&priv->gfargrp[j]);
+ goto irq_fail;
+ }
+ }
+
+ /* Start the controller */
+ gfar_start(ndev);
+
+ phy_start(priv->phydev);
+
+ gfar_configure_coalescing(priv, 0xFF, 0xFF);
+
+ return 0;
+
+irq_fail:
+ free_skb_resources(priv);
+ return err;
+}
+
+/* Called when something needs to use the ethernet device */
+/* Returns 0 for success. */
+static int gfar_enet_open(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int err;
+
+ enable_napi(priv);
+
+ skb_queue_head_init(&priv->rx_recycle);
+
+ /* Initialize a bunch of registers */
+ init_registers(dev);
+
+ gfar_set_mac_address(dev);
+
+ err = init_phy(dev);
+
+ if (err) {
+ disable_napi(priv);
+ return err;
+ }
+
+ err = startup_gfar(dev);
+ if (err) {
+ disable_napi(priv);
+ return err;
+ }
+
+ netif_tx_start_all_queues(dev);
+
+ device_set_wakeup_enable(&dev->dev, priv->wol_en);
+
+ return err;
+}
+
+static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
+{
+ struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
+
+ memset(fcb, 0, GMAC_FCB_LEN);
+
+ return fcb;
+}
+
+static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
+{
+ u8 flags = 0;
+
+ /* If we're here, it's a IP packet with a TCP or UDP
+ * payload. We set it to checksum, using a pseudo-header
+ * we provide
+ */
+ flags = TXFCB_DEFAULT;
+
+ /* Tell the controller what the protocol is */
+ /* And provide the already calculated phcs */
+ if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
+ flags |= TXFCB_UDP;
+ fcb->phcs = udp_hdr(skb)->check;
+ } else
+ fcb->phcs = tcp_hdr(skb)->check;
+
+ /* l3os is the distance between the start of the
+ * frame (skb->data) and the start of the IP hdr.
+ * l4os is the distance between the start of the
+ * l3 hdr and the l4 hdr */
+ fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
+ fcb->l4os = skb_network_header_len(skb);
+
+ fcb->flags = flags;
+}
+
+void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
+{
+ fcb->flags |= TXFCB_VLN;
+ fcb->vlctl = vlan_tx_tag_get(skb);
+}
+
+static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
+ struct txbd8 *base, int ring_size)
+{
+ struct txbd8 *new_bd = bdp + stride;
+
+ return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
+}
+
+static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
+ int ring_size)
+{
+ return skip_txbd(bdp, 1, base, ring_size);
+}
+
+/* This is called by the kernel when a frame is ready for transmission. */
+/* It is pointed to by the dev->hard_start_xmit function pointer */
+static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ struct netdev_queue *txq;
+ struct gfar __iomem *regs = NULL;
+ struct txfcb *fcb = NULL;
+ struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
+ u32 lstatus;
+ int i, rq = 0, do_tstamp = 0;
+ u32 bufaddr;
+ unsigned long flags;
+ unsigned int nr_frags, nr_txbds, length;
+
+ /*
+ * TOE=1 frames larger than 2500 bytes may see excess delays
+ * before start of transmission.
+ */
+ if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
+ skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb->len > 2500)) {
+ int ret;
+
+ ret = skb_checksum_help(skb);
+ if (ret)
+ return ret;
+ }
+
+ rq = skb->queue_mapping;
+ tx_queue = priv->tx_queue[rq];
+ txq = netdev_get_tx_queue(dev, rq);
+ base = tx_queue->tx_bd_base;
+ regs = tx_queue->grp->regs;
+
+ /* check if time stamp should be generated */
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ priv->hwts_tx_en))
+ do_tstamp = 1;
+
+ /* make space for additional header when fcb is needed */
+ if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
+ vlan_tx_tag_present(skb) ||
+ unlikely(do_tstamp)) &&
+ (skb_headroom(skb) < GMAC_FCB_LEN)) {
+ struct sk_buff *skb_new;
+
+ skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
+ if (!skb_new) {
+ dev->stats.tx_errors++;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ kfree_skb(skb);
+ skb = skb_new;
+ }
+
+ /* total number of fragments in the SKB */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ /* calculate the required number of TxBDs for this skb */
+ if (unlikely(do_tstamp))
+ nr_txbds = nr_frags + 2;
+ else
+ nr_txbds = nr_frags + 1;
+
+ /* check if there is space to queue this packet */
+ if (nr_txbds > tx_queue->num_txbdfree) {
+ /* no space, stop the queue */
+ netif_tx_stop_queue(txq);
+ dev->stats.tx_fifo_errors++;
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Update transmit stats */
+ tx_queue->stats.tx_bytes += skb->len;
+ tx_queue->stats.tx_packets++;
+
+ txbdp = txbdp_start = tx_queue->cur_tx;
+ lstatus = txbdp->lstatus;
+
+ /* Time stamp insertion requires one additional TxBD */
+ if (unlikely(do_tstamp))
+ txbdp_tstamp = txbdp = next_txbd(txbdp, base,
+ tx_queue->tx_ring_size);
+
+ if (nr_frags == 0) {
+ if (unlikely(do_tstamp))
+ txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
+ TXBD_INTERRUPT);
+ else
+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+ } else {
+ /* Place the fragment addresses and lengths into the TxBDs */
+ for (i = 0; i < nr_frags; i++) {
+ /* Point at the next BD, wrapping as needed */
+ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+
+ length = skb_shinfo(skb)->frags[i].size;
+
+ lstatus = txbdp->lstatus | length |
+ BD_LFLAG(TXBD_READY);
+
+ /* Handle the last BD specially */
+ if (i == nr_frags - 1)
+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+
+ bufaddr = skb_frag_dma_map(&priv->ofdev->dev,
+ &skb_shinfo(skb)->frags[i],
+ 0,
+ length,
+ DMA_TO_DEVICE);
+
+ /* set the TxBD length and buffer pointer */
+ txbdp->bufPtr = bufaddr;
+ txbdp->lstatus = lstatus;
+ }
+
+ lstatus = txbdp_start->lstatus;
+ }
+
+ /* Set up checksumming */
+ if (CHECKSUM_PARTIAL == skb->ip_summed) {
+ fcb = gfar_add_fcb(skb);
+ /* as specified by errata */
+ if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12)
+ && ((unsigned long)fcb % 0x20) > 0x18)) {
+ __skb_pull(skb, GMAC_FCB_LEN);
+ skb_checksum_help(skb);
+ } else {
+ lstatus |= BD_LFLAG(TXBD_TOE);
+ gfar_tx_checksum(skb, fcb);
+ }
+ }
+
+ if (vlan_tx_tag_present(skb)) {
+ if (unlikely(NULL == fcb)) {
+ fcb = gfar_add_fcb(skb);
+ lstatus |= BD_LFLAG(TXBD_TOE);
+ }
+
+ gfar_tx_vlan(skb, fcb);
+ }
+
+ /* Setup tx hardware time stamping if requested */
+ if (unlikely(do_tstamp)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ if (fcb == NULL)
+ fcb = gfar_add_fcb(skb);
+ fcb->ptp = 1;
+ lstatus |= BD_LFLAG(TXBD_TOE);
+ }
+
+ txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
+ /*
+ * If time stamping is requested one additional TxBD must be set up. The
+ * first TxBD points to the FCB and must have a data length of
+ * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
+ * the full frame length.
+ */
+ if (unlikely(do_tstamp)) {
+ txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
+ txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
+ (skb_headlen(skb) - GMAC_FCB_LEN);
+ lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
+ } else {
+ lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
+ }
+
+ /*
+ * We can work in parallel with gfar_clean_tx_ring(), except
+ * when modifying num_txbdfree. Note that we didn't grab the lock
+ * when we were reading the num_txbdfree and checking for available
+ * space, that's because outside of this function it can only grow,
+ * and once we've got needed space, it cannot suddenly disappear.
+ *
+ * The lock also protects us from gfar_error(), which can modify
+ * regs->tstat and thus retrigger the transfers, which is why we
+ * also must grab the lock before setting ready bit for the first
+ * to be transmitted BD.
+ */
+ spin_lock_irqsave(&tx_queue->txlock, flags);
+
+ /*
+ * The powerpc-specific eieio() is used, as wmb() has too strong
+ * semantics (it requires synchronization between cacheable and
+ * uncacheable mappings, which eieio doesn't provide and which we
+ * don't need), thus requiring a more expensive sync instruction. At
+ * some point, the set of architecture-independent barrier functions
+ * should be expanded to include weaker barriers.
+ */
+ eieio();
+
+ txbdp_start->lstatus = lstatus;
+
+ eieio(); /* force lstatus write before tx_skbuff */
+
+ tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
+
+ /* Update the current skb pointer to the next entry we will use
+ * (wrapping if necessary) */
+ tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
+ TX_RING_MOD_MASK(tx_queue->tx_ring_size);
+
+ tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+
+ /* reduce TxBD free count */
+ tx_queue->num_txbdfree -= (nr_txbds);
+
+ /* If the next BD still needs to be cleaned up, then the bds
+ are full. We need to tell the kernel to stop sending us stuff. */
+ if (!tx_queue->num_txbdfree) {
+ netif_tx_stop_queue(txq);
+
+ dev->stats.tx_fifo_errors++;
+ }
+
+ /* Tell the DMA to go go go */
+ gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
+
+ /* Unlock priv */
+ spin_unlock_irqrestore(&tx_queue->txlock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/* Stops the kernel queue, and halts the controller */
+static int gfar_close(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ disable_napi(priv);
+
+ cancel_work_sync(&priv->reset_task);
+ stop_gfar(dev);
+
+ /* Disconnect from the PHY */
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+
+ netif_tx_stop_all_queues(dev);
+
+ return 0;
+}
+
+/* Changes the mac address if the controller is not running. */
+static int gfar_set_mac_address(struct net_device *dev)
+{
+ gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
+
+ return 0;
+}
+
+/* Check if rx parser should be activated */
+void gfar_check_rx_parser_mode(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs;
+ u32 tempval;
+
+ regs = priv->gfargrp[0].regs;
+
+ tempval = gfar_read(&regs->rctrl);
+ /* If parse is no longer required, then disable parser */
+ if (tempval & RCTRL_REQ_PARSER)
+ tempval |= RCTRL_PRSDEP_INIT;
+ else
+ tempval &= ~RCTRL_PRSDEP_INIT;
+ gfar_write(&regs->rctrl, tempval);
+}
+
+/* Enables and disables VLAN insertion/extraction */
+void gfar_vlan_mode(struct net_device *dev, u32 features)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = NULL;
+ unsigned long flags;
+ u32 tempval;
+
+ regs = priv->gfargrp[0].regs;
+ local_irq_save(flags);
+ lock_rx_qs(priv);
+
+ if (features & NETIF_F_HW_VLAN_TX) {
+ /* Enable VLAN tag insertion */
+ tempval = gfar_read(&regs->tctrl);
+ tempval |= TCTRL_VLINS;
+ gfar_write(&regs->tctrl, tempval);
+ } else {
+ /* Disable VLAN tag insertion */
+ tempval = gfar_read(&regs->tctrl);
+ tempval &= ~TCTRL_VLINS;
+ gfar_write(&regs->tctrl, tempval);
+ }
+
+ if (features & NETIF_F_HW_VLAN_RX) {
+ /* Enable VLAN tag extraction */
+ tempval = gfar_read(&regs->rctrl);
+ tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
+ gfar_write(&regs->rctrl, tempval);
+ } else {
+ /* Disable VLAN tag extraction */
+ tempval = gfar_read(&regs->rctrl);
+ tempval &= ~RCTRL_VLEX;
+ gfar_write(&regs->rctrl, tempval);
+
+ gfar_check_rx_parser_mode(priv);
+ }
+
+ gfar_change_mtu(dev, dev->mtu);
+
+ unlock_rx_qs(priv);
+ local_irq_restore(flags);
+}
+
+static int gfar_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int tempsize, tempval;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ int oldsize = priv->rx_buffer_size;
+ int frame_size = new_mtu + ETH_HLEN;
+
+ if (gfar_is_vlan_on(priv))
+ frame_size += VLAN_HLEN;
+
+ if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
+ netif_err(priv, drv, dev, "Invalid MTU setting\n");
+ return -EINVAL;
+ }
+
+ if (gfar_uses_fcb(priv))
+ frame_size += GMAC_FCB_LEN;
+
+ frame_size += priv->padding;
+
+ tempsize =
+ (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
+ INCREMENTAL_BUFFER_SIZE;
+
+ /* Only stop and start the controller if it isn't already
+ * stopped, and we changed something */
+ if ((oldsize != tempsize) && (dev->flags & IFF_UP))
+ stop_gfar(dev);
+
+ priv->rx_buffer_size = tempsize;
+
+ dev->mtu = new_mtu;
+
+ gfar_write(&regs->mrblr, priv->rx_buffer_size);
+ gfar_write(&regs->maxfrm, priv->rx_buffer_size);
+
+ /* If the mtu is larger than the max size for standard
+ * ethernet frames (ie, a jumbo frame), then set maccfg2
+ * to allow huge frames, and to check the length */
+ tempval = gfar_read(&regs->maccfg2);
+
+ if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
+ gfar_has_errata(priv, GFAR_ERRATA_74))
+ tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
+ else
+ tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
+
+ gfar_write(&regs->maccfg2, tempval);
+
+ if ((oldsize != tempsize) && (dev->flags & IFF_UP))
+ startup_gfar(dev);
+
+ return 0;
+}
+
+/* gfar_reset_task gets scheduled when a packet has not been
+ * transmitted after a set amount of time.
+ * For now, assume that clearing out all the structures, and
+ * starting over will fix the problem.
+ */
+static void gfar_reset_task(struct work_struct *work)
+{
+ struct gfar_private *priv = container_of(work, struct gfar_private,
+ reset_task);
+ struct net_device *dev = priv->ndev;
+
+ if (dev->flags & IFF_UP) {
+ netif_tx_stop_all_queues(dev);
+ stop_gfar(dev);
+ startup_gfar(dev);
+ netif_tx_start_all_queues(dev);
+ }
+
+ netif_tx_schedule_all(dev);
+}
+
+static void gfar_timeout(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ dev->stats.tx_errors++;
+ schedule_work(&priv->reset_task);
+}
+
+static void gfar_align_skb(struct sk_buff *skb)
+{
+ /* We need the data buffer to be aligned properly. We will reserve
+ * as many bytes as needed to align the data properly
+ */
+ skb_reserve(skb, RXBUF_ALIGNMENT -
+ (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
+}
+
+/* Interrupt Handler for Transmit complete */
+static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
+{
+ struct net_device *dev = tx_queue->dev;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ struct txbd8 *bdp, *next = NULL;
+ struct txbd8 *lbdp = NULL;
+ struct txbd8 *base = tx_queue->tx_bd_base;
+ struct sk_buff *skb;
+ int skb_dirtytx;
+ int tx_ring_size = tx_queue->tx_ring_size;
+ int frags = 0, nr_txbds = 0;
+ int i;
+ int howmany = 0;
+ u32 lstatus;
+ size_t buflen;
+
+ rx_queue = priv->rx_queue[tx_queue->qindex];
+ bdp = tx_queue->dirty_tx;
+ skb_dirtytx = tx_queue->skb_dirtytx;
+
+ while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
+ unsigned long flags;
+
+ frags = skb_shinfo(skb)->nr_frags;
+
+ /*
+ * When time stamping, one additional TxBD must be freed.
+ * Also, we need to dma_unmap_single() the TxPAL.
+ */
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+ nr_txbds = frags + 2;
+ else
+ nr_txbds = frags + 1;
+
+ lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
+
+ lstatus = lbdp->lstatus;
+
+ /* Only clean completed frames */
+ if ((lstatus & BD_LFLAG(TXBD_READY)) &&
+ (lstatus & BD_LENGTH_MASK))
+ break;
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ next = next_txbd(bdp, base, tx_ring_size);
+ buflen = next->length + GMAC_FCB_LEN;
+ } else
+ buflen = bdp->length;
+
+ dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
+ buflen, DMA_TO_DEVICE);
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(*ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
+ bdp = next;
+ }
+
+ bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
+ bdp = next_txbd(bdp, base, tx_ring_size);
+
+ for (i = 0; i < frags; i++) {
+ dma_unmap_page(&priv->ofdev->dev,
+ bdp->bufPtr,
+ bdp->length,
+ DMA_TO_DEVICE);
+ bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
+ bdp = next_txbd(bdp, base, tx_ring_size);
+ }
+
+ /*
+ * If there's room in the queue (limit it to rx_buffer_size)
+ * we add this skb back into the pool, if it's the right size
+ */
+ if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
+ skb_recycle_check(skb, priv->rx_buffer_size +
+ RXBUF_ALIGNMENT)) {
+ gfar_align_skb(skb);
+ skb_queue_head(&priv->rx_recycle, skb);
+ } else
+ dev_kfree_skb_any(skb);
+
+ tx_queue->tx_skbuff[skb_dirtytx] = NULL;
+
+ skb_dirtytx = (skb_dirtytx + 1) &
+ TX_RING_MOD_MASK(tx_ring_size);
+
+ howmany++;
+ spin_lock_irqsave(&tx_queue->txlock, flags);
+ tx_queue->num_txbdfree += nr_txbds;
+ spin_unlock_irqrestore(&tx_queue->txlock, flags);
+ }
+
+ /* If we freed a buffer, we can restart transmission, if necessary */
+ if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
+ netif_wake_subqueue(dev, tx_queue->qindex);
+
+ /* Update dirty indicators */
+ tx_queue->skb_dirtytx = skb_dirtytx;
+ tx_queue->dirty_tx = bdp;
+
+ return howmany;
+}
+
+static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&gfargrp->grplock, flags);
+ if (napi_schedule_prep(&gfargrp->napi)) {
+ gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
+ __napi_schedule(&gfargrp->napi);
+ } else {
+ /*
+ * Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived.
+ */
+ gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
+ }
+ spin_unlock_irqrestore(&gfargrp->grplock, flags);
+
+}
+
+/* Interrupt Handler for Transmit complete */
+static irqreturn_t gfar_transmit(int irq, void *grp_id)
+{
+ gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
+ return IRQ_HANDLED;
+}
+
+static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
+ struct sk_buff *skb)
+{
+ struct net_device *dev = rx_queue->dev;
+ struct gfar_private *priv = netdev_priv(dev);
+ dma_addr_t buf;
+
+ buf = dma_map_single(&priv->ofdev->dev, skb->data,
+ priv->rx_buffer_size, DMA_FROM_DEVICE);
+ gfar_init_rxbdp(rx_queue, bdp, buf);
+}
+
+static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct sk_buff *skb = NULL;
+
+ skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
+ if (!skb)
+ return NULL;
+
+ gfar_align_skb(skb);
+
+ return skb;
+}
+
+struct sk_buff * gfar_new_skb(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct sk_buff *skb = NULL;
+
+ skb = skb_dequeue(&priv->rx_recycle);
+ if (!skb)
+ skb = gfar_alloc_skb(dev);
+
+ return skb;
+}
+
+static inline void count_errors(unsigned short status, struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct gfar_extra_stats *estats = &priv->extra_stats;
+
+ /* If the packet was truncated, none of the other errors
+ * matter */
+ if (status & RXBD_TRUNCATED) {
+ stats->rx_length_errors++;
+
+ estats->rx_trunc++;
+
+ return;
+ }
+ /* Count the errors, if there were any */
+ if (status & (RXBD_LARGE | RXBD_SHORT)) {
+ stats->rx_length_errors++;
+
+ if (status & RXBD_LARGE)
+ estats->rx_large++;
+ else
+ estats->rx_short++;
+ }
+ if (status & RXBD_NONOCTET) {
+ stats->rx_frame_errors++;
+ estats->rx_nonoctet++;
+ }
+ if (status & RXBD_CRCERR) {
+ estats->rx_crcerr++;
+ stats->rx_crc_errors++;
+ }
+ if (status & RXBD_OVERRUN) {
+ estats->rx_overrun++;
+ stats->rx_crc_errors++;
+ }
+}
+
+irqreturn_t gfar_receive(int irq, void *grp_id)
+{
+ gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
+ return IRQ_HANDLED;
+}
+
+static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
+{
+ /* If valid headers were found, and valid sums
+ * were verified, then we tell the kernel that no
+ * checksumming is necessary. Otherwise, it is */
+ if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+}
+
+
+/* gfar_process_frame() -- handle one incoming packet if skb
+ * isn't NULL. */
+static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
+ int amount_pull)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct rxfcb *fcb = NULL;
+
+ int ret;
+
+ /* fcb is at the beginning if exists */
+ fcb = (struct rxfcb *)skb->data;
+
+ /* Remove the FCB from the skb */
+ /* Remove the padded bytes, if there are any */
+ if (amount_pull) {
+ skb_record_rx_queue(skb, fcb->rq);
+ skb_pull(skb, amount_pull);
+ }
+
+ /* Get receive timestamp from the skb */
+ if (priv->hwts_rx_en) {
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ u64 *ns = (u64 *) skb->data;
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(*ns);
+ }
+
+ if (priv->padding)
+ skb_pull(skb, priv->padding);
+
+ if (dev->features & NETIF_F_RXCSUM)
+ gfar_rx_checksum(skb, fcb);
+
+ /* Tell the skb what kind of packet this is */
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /*
+ * There's need to check for NETIF_F_HW_VLAN_RX here.
+ * Even if vlan rx accel is disabled, on some chips
+ * RXFCB_VLN is pseudo randomly set.
+ */
+ if (dev->features & NETIF_F_HW_VLAN_RX &&
+ fcb->flags & RXFCB_VLN)
+ __vlan_hwaccel_put_tag(skb, fcb->vlctl);
+
+ /* Send the packet up the stack */
+ ret = netif_receive_skb(skb);
+
+ if (NET_RX_DROP == ret)
+ priv->extra_stats.kernel_dropped++;
+
+ return 0;
+}
+
+/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
+ * until the budget/quota has been reached. Returns the number
+ * of frames handled
+ */
+int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
+{
+ struct net_device *dev = rx_queue->dev;
+ struct rxbd8 *bdp, *base;
+ struct sk_buff *skb;
+ int pkt_len;
+ int amount_pull;
+ int howmany = 0;
+ struct gfar_private *priv = netdev_priv(dev);
+
+ /* Get the first full descriptor */
+ bdp = rx_queue->cur_rx;
+ base = rx_queue->rx_bd_base;
+
+ amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
+
+ while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
+ struct sk_buff *newskb;
+ rmb();
+
+ /* Add another skb for the future */
+ newskb = gfar_new_skb(dev);
+
+ skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
+
+ dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
+ priv->rx_buffer_size, DMA_FROM_DEVICE);
+
+ if (unlikely(!(bdp->status & RXBD_ERR) &&
+ bdp->length > priv->rx_buffer_size))
+ bdp->status = RXBD_LARGE;
+
+ /* We drop the frame if we failed to allocate a new buffer */
+ if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
+ bdp->status & RXBD_ERR)) {
+ count_errors(bdp->status, dev);
+
+ if (unlikely(!newskb))
+ newskb = skb;
+ else if (skb)
+ skb_queue_head(&priv->rx_recycle, skb);
+ } else {
+ /* Increment the number of packets */
+ rx_queue->stats.rx_packets++;
+ howmany++;
+
+ if (likely(skb)) {
+ pkt_len = bdp->length - ETH_FCS_LEN;
+ /* Remove the FCS from the packet length */
+ skb_put(skb, pkt_len);
+ rx_queue->stats.rx_bytes += pkt_len;
+ skb_record_rx_queue(skb, rx_queue->qindex);
+ gfar_process_frame(dev, skb, amount_pull);
+
+ } else {
+ netif_warn(priv, rx_err, dev, "Missing skb!\n");
+ rx_queue->stats.rx_dropped++;
+ priv->extra_stats.rx_skbmissing++;
+ }
+
+ }
+
+ rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
+
+ /* Setup the new bdp */
+ gfar_new_rxbdp(rx_queue, bdp, newskb);
+
+ /* Update to the next pointer */
+ bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
+
+ /* update to point at the next skb */
+ rx_queue->skb_currx =
+ (rx_queue->skb_currx + 1) &
+ RX_RING_MOD_MASK(rx_queue->rx_ring_size);
+ }
+
+ /* Update the current rxbd pointer to be the next one */
+ rx_queue->cur_rx = bdp;
+
+ return howmany;
+}
+
+static int gfar_poll(struct napi_struct *napi, int budget)
+{
+ struct gfar_priv_grp *gfargrp = container_of(napi,
+ struct gfar_priv_grp, napi);
+ struct gfar_private *priv = gfargrp->priv;
+ struct gfar __iomem *regs = gfargrp->regs;
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
+ int tx_cleaned = 0, i, left_over_budget = budget;
+ unsigned long serviced_queues = 0;
+ int num_queues = 0;
+
+ num_queues = gfargrp->num_rx_queues;
+ budget_per_queue = budget/num_queues;
+
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived */
+ gfar_write(&regs->ievent, IEVENT_RTX_MASK);
+
+ while (num_queues && left_over_budget) {
+
+ budget_per_queue = left_over_budget/num_queues;
+ left_over_budget = 0;
+
+ for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
+ if (test_bit(i, &serviced_queues))
+ continue;
+ rx_queue = priv->rx_queue[i];
+ tx_queue = priv->tx_queue[rx_queue->qindex];
+
+ tx_cleaned += gfar_clean_tx_ring(tx_queue);
+ rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
+ budget_per_queue);
+ rx_cleaned += rx_cleaned_per_queue;
+ if(rx_cleaned_per_queue < budget_per_queue) {
+ left_over_budget = left_over_budget +
+ (budget_per_queue - rx_cleaned_per_queue);
+ set_bit(i, &serviced_queues);
+ num_queues--;
+ }
+ }
+ }
+
+ if (tx_cleaned)
+ return budget;
+
+ if (rx_cleaned < budget) {
+ napi_complete(napi);
+
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&regs->rstat, gfargrp->rstat);
+
+ gfar_write(&regs->imask, IMASK_DEFAULT);
+
+ /* If we are coalescing interrupts, update the timer */
+ /* Otherwise, clear it */
+ gfar_configure_coalescing(priv,
+ gfargrp->rx_bit_map, gfargrp->tx_bit_map);
+ }
+
+ return rx_cleaned;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void gfar_netpoll(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int i = 0;
+
+ /* If the device has multiple interrupts, run tx/rx */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ for (i = 0; i < priv->num_grps; i++) {
+ disable_irq(priv->gfargrp[i].interruptTransmit);
+ disable_irq(priv->gfargrp[i].interruptReceive);
+ disable_irq(priv->gfargrp[i].interruptError);
+ gfar_interrupt(priv->gfargrp[i].interruptTransmit,
+ &priv->gfargrp[i]);
+ enable_irq(priv->gfargrp[i].interruptError);
+ enable_irq(priv->gfargrp[i].interruptReceive);
+ enable_irq(priv->gfargrp[i].interruptTransmit);
+ }
+ } else {
+ for (i = 0; i < priv->num_grps; i++) {
+ disable_irq(priv->gfargrp[i].interruptTransmit);
+ gfar_interrupt(priv->gfargrp[i].interruptTransmit,
+ &priv->gfargrp[i]);
+ enable_irq(priv->gfargrp[i].interruptTransmit);
+ }
+ }
+}
+#endif
+
+/* The interrupt handler for devices with one interrupt */
+static irqreturn_t gfar_interrupt(int irq, void *grp_id)
+{
+ struct gfar_priv_grp *gfargrp = grp_id;
+
+ /* Save ievent for future reference */
+ u32 events = gfar_read(&gfargrp->regs->ievent);
+
+ /* Check for reception */
+ if (events & IEVENT_RX_MASK)
+ gfar_receive(irq, grp_id);
+
+ /* Check for transmit completion */
+ if (events & IEVENT_TX_MASK)
+ gfar_transmit(irq, grp_id);
+
+ /* Check for errors */
+ if (events & IEVENT_ERR_MASK)
+ gfar_error(irq, grp_id);
+
+ return IRQ_HANDLED;
+}
+
+/* Called every time the controller might need to be made
+ * aware of new link state. The PHY code conveys this
+ * information through variables in the phydev structure, and this
+ * function converts those variables into the appropriate
+ * register values, and can bring down the device if needed.
+ */
+static void adjust_link(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ unsigned long flags;
+ struct phy_device *phydev = priv->phydev;
+ int new_state = 0;
+
+ local_irq_save(flags);
+ lock_tx_qs(priv);
+
+ if (phydev->link) {
+ u32 tempval = gfar_read(&regs->maccfg2);
+ u32 ecntrl = gfar_read(&regs->ecntrl);
+
+ /* Now we make sure that we can be in full duplex mode.
+ * If not, we operate in half-duplex mode. */
+ if (phydev->duplex != priv->oldduplex) {
+ new_state = 1;
+ if (!(phydev->duplex))
+ tempval &= ~(MACCFG2_FULL_DUPLEX);
+ else
+ tempval |= MACCFG2_FULL_DUPLEX;
+
+ priv->oldduplex = phydev->duplex;
+ }
+
+ if (phydev->speed != priv->oldspeed) {
+ new_state = 1;
+ switch (phydev->speed) {
+ case 1000:
+ tempval =
+ ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
+
+ ecntrl &= ~(ECNTRL_R100);
+ break;
+ case 100:
+ case 10:
+ tempval =
+ ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
+
+ /* Reduced mode distinguishes
+ * between 10 and 100 */
+ if (phydev->speed == SPEED_100)
+ ecntrl |= ECNTRL_R100;
+ else
+ ecntrl &= ~(ECNTRL_R100);
+ break;
+ default:
+ netif_warn(priv, link, dev,
+ "Ack! Speed (%d) is not 10/100/1000!\n",
+ phydev->speed);
+ break;
+ }
+
+ priv->oldspeed = phydev->speed;
+ }
+
+ gfar_write(&regs->maccfg2, tempval);
+ gfar_write(&regs->ecntrl, ecntrl);
+
+ if (!priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 1;
+ }
+ } else if (priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+ }
+
+ if (new_state && netif_msg_link(priv))
+ phy_print_status(phydev);
+ unlock_tx_qs(priv);
+ local_irq_restore(flags);
+}
+
+/* Update the hash table based on the current list of multicast
+ * addresses we subscribe to. Also, change the promiscuity of
+ * the device based on the flags (this function is called
+ * whenever dev->flags is changed */
+static void gfar_set_multi(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Set RCTRL to PROM */
+ tempval = gfar_read(&regs->rctrl);
+ tempval |= RCTRL_PROM;
+ gfar_write(&regs->rctrl, tempval);
+ } else {
+ /* Set RCTRL to not PROM */
+ tempval = gfar_read(&regs->rctrl);
+ tempval &= ~(RCTRL_PROM);
+ gfar_write(&regs->rctrl, tempval);
+ }
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /* Set the hash to rx all multicast frames */
+ gfar_write(&regs->igaddr0, 0xffffffff);
+ gfar_write(&regs->igaddr1, 0xffffffff);
+ gfar_write(&regs->igaddr2, 0xffffffff);
+ gfar_write(&regs->igaddr3, 0xffffffff);
+ gfar_write(&regs->igaddr4, 0xffffffff);
+ gfar_write(&regs->igaddr5, 0xffffffff);
+ gfar_write(&regs->igaddr6, 0xffffffff);
+ gfar_write(&regs->igaddr7, 0xffffffff);
+ gfar_write(&regs->gaddr0, 0xffffffff);
+ gfar_write(&regs->gaddr1, 0xffffffff);
+ gfar_write(&regs->gaddr2, 0xffffffff);
+ gfar_write(&regs->gaddr3, 0xffffffff);
+ gfar_write(&regs->gaddr4, 0xffffffff);
+ gfar_write(&regs->gaddr5, 0xffffffff);
+ gfar_write(&regs->gaddr6, 0xffffffff);
+ gfar_write(&regs->gaddr7, 0xffffffff);
+ } else {
+ int em_num;
+ int idx;
+
+ /* zero out the hash */
+ gfar_write(&regs->igaddr0, 0x0);
+ gfar_write(&regs->igaddr1, 0x0);
+ gfar_write(&regs->igaddr2, 0x0);
+ gfar_write(&regs->igaddr3, 0x0);
+ gfar_write(&regs->igaddr4, 0x0);
+ gfar_write(&regs->igaddr5, 0x0);
+ gfar_write(&regs->igaddr6, 0x0);
+ gfar_write(&regs->igaddr7, 0x0);
+ gfar_write(&regs->gaddr0, 0x0);
+ gfar_write(&regs->gaddr1, 0x0);
+ gfar_write(&regs->gaddr2, 0x0);
+ gfar_write(&regs->gaddr3, 0x0);
+ gfar_write(&regs->gaddr4, 0x0);
+ gfar_write(&regs->gaddr5, 0x0);
+ gfar_write(&regs->gaddr6, 0x0);
+ gfar_write(&regs->gaddr7, 0x0);
+
+ /* If we have extended hash tables, we need to
+ * clear the exact match registers to prepare for
+ * setting them */
+ if (priv->extended_hash) {
+ em_num = GFAR_EM_NUM + 1;
+ gfar_clear_exact_match(dev);
+ idx = 1;
+ } else {
+ idx = 0;
+ em_num = 0;
+ }
+
+ if (netdev_mc_empty(dev))
+ return;
+
+ /* Parse the list, and set the appropriate bits */
+ netdev_for_each_mc_addr(ha, dev) {
+ if (idx < em_num) {
+ gfar_set_mac_for_addr(dev, idx, ha->addr);
+ idx++;
+ } else
+ gfar_set_hash_for_addr(dev, ha->addr);
+ }
+ }
+}
+
+
+/* Clears each of the exact match registers to zero, so they
+ * don't interfere with normal reception */
+static void gfar_clear_exact_match(struct net_device *dev)
+{
+ int idx;
+ static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
+
+ for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
+ gfar_set_mac_for_addr(dev, idx, zero_arr);
+}
+
+/* Set the appropriate hash bit for the given addr */
+/* The algorithm works like so:
+ * 1) Take the Destination Address (ie the multicast address), and
+ * do a CRC on it (little endian), and reverse the bits of the
+ * result.
+ * 2) Use the 8 most significant bits as a hash into a 256-entry
+ * table. The table is controlled through 8 32-bit registers:
+ * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
+ * gaddr7. This means that the 3 most significant bits in the
+ * hash index which gaddr register to use, and the 5 other bits
+ * indicate which bit (assuming an IBM numbering scheme, which
+ * for PowerPC (tm) is usually the case) in the register holds
+ * the entry. */
+static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
+{
+ u32 tempval;
+ struct gfar_private *priv = netdev_priv(dev);
+ u32 result = ether_crc(MAC_ADDR_LEN, addr);
+ int width = priv->hash_width;
+ u8 whichbit = (result >> (32 - width)) & 0x1f;
+ u8 whichreg = result >> (32 - width + 5);
+ u32 value = (1 << (31-whichbit));
+
+ tempval = gfar_read(priv->hash_regs[whichreg]);
+ tempval |= value;
+ gfar_write(priv->hash_regs[whichreg], tempval);
+}
+
+
+/* There are multiple MAC Address register pairs on some controllers
+ * This function sets the numth pair to a given address
+ */
+static void gfar_set_mac_for_addr(struct net_device *dev, int num,
+ const u8 *addr)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ int idx;
+ char tmpbuf[MAC_ADDR_LEN];
+ u32 tempval;
+ u32 __iomem *macptr = &regs->macstnaddr1;
+
+ macptr += num*2;
+
+ /* Now copy it into the mac registers backwards, cuz */
+ /* little endian is silly */
+ for (idx = 0; idx < MAC_ADDR_LEN; idx++)
+ tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
+
+ gfar_write(macptr, *((u32 *) (tmpbuf)));
+
+ tempval = *((u32 *) (tmpbuf + 4));
+
+ gfar_write(macptr+1, tempval);
+}
+
+/* GFAR error interrupt handler */
+static irqreturn_t gfar_error(int irq, void *grp_id)
+{
+ struct gfar_priv_grp *gfargrp = grp_id;
+ struct gfar __iomem *regs = gfargrp->regs;
+ struct gfar_private *priv= gfargrp->priv;
+ struct net_device *dev = priv->ndev;
+
+ /* Save ievent for future reference */
+ u32 events = gfar_read(&regs->ievent);
+
+ /* Clear IEVENT */
+ gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
+
+ /* Magic Packet is not an error. */
+ if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
+ (events & IEVENT_MAG))
+ events &= ~IEVENT_MAG;
+
+ /* Hmm... */
+ if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
+ netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n",
+ events, gfar_read(&regs->imask));
+
+ /* Update the error counters */
+ if (events & IEVENT_TXE) {
+ dev->stats.tx_errors++;
+
+ if (events & IEVENT_LC)
+ dev->stats.tx_window_errors++;
+ if (events & IEVENT_CRL)
+ dev->stats.tx_aborted_errors++;
+ if (events & IEVENT_XFUN) {
+ unsigned long flags;
+
+ netif_dbg(priv, tx_err, dev,
+ "TX FIFO underrun, packet dropped\n");
+ dev->stats.tx_dropped++;
+ priv->extra_stats.tx_underrun++;
+
+ local_irq_save(flags);
+ lock_tx_qs(priv);
+
+ /* Reactivate the Tx Queues */
+ gfar_write(&regs->tstat, gfargrp->tstat);
+
+ unlock_tx_qs(priv);
+ local_irq_restore(flags);
+ }
+ netif_dbg(priv, tx_err, dev, "Transmit Error\n");
+ }
+ if (events & IEVENT_BSY) {
+ dev->stats.rx_errors++;
+ priv->extra_stats.rx_bsy++;
+
+ gfar_receive(irq, grp_id);
+
+ netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
+ gfar_read(&regs->rstat));
+ }
+ if (events & IEVENT_BABR) {
+ dev->stats.rx_errors++;
+ priv->extra_stats.rx_babr++;
+
+ netif_dbg(priv, rx_err, dev, "babbling RX error\n");
+ }
+ if (events & IEVENT_EBERR) {
+ priv->extra_stats.eberr++;
+ netif_dbg(priv, rx_err, dev, "bus error\n");
+ }
+ if (events & IEVENT_RXC)
+ netif_dbg(priv, rx_status, dev, "control frame\n");
+
+ if (events & IEVENT_BABT) {
+ priv->extra_stats.tx_babt++;
+ netif_dbg(priv, tx_err, dev, "babbling TX error\n");
+ }
+ return IRQ_HANDLED;
+}
+
+static struct of_device_id gfar_match[] =
+{
+ {
+ .type = "network",
+ .compatible = "gianfar",
+ },
+ {
+ .compatible = "fsl,etsec2",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gfar_match);
+
+/* Structure for a device driver */
+static struct platform_driver gfar_driver = {
+ .driver = {
+ .name = "fsl-gianfar",
+ .owner = THIS_MODULE,
+ .pm = GFAR_PM_OPS,
+ .of_match_table = gfar_match,
+ },
+ .probe = gfar_probe,
+ .remove = gfar_remove,
+};
+
+static int __init gfar_init(void)
+{
+ return platform_driver_register(&gfar_driver);
+}
+
+static void __exit gfar_exit(void)
+{
+ platform_driver_unregister(&gfar_driver);
+}
+
+module_init(gfar_init);
+module_exit(gfar_exit);
+
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
new file mode 100644
index 000000000000..9aa43773e8e3
--- /dev/null
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -0,0 +1,1216 @@
+/*
+ * drivers/net/gianfar.h
+ *
+ * Gianfar Ethernet Driver
+ * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
+ * Based on 8260_io/fcc_enet.c
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
+ *
+ * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Still left to do:
+ * -Add support for module parameters
+ * -Add patch for ethtool phys id
+ */
+#ifndef __GIANFAR_H
+#define __GIANFAR_H
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <linux/workqueue.h>
+#include <linux/ethtool.h>
+
+struct ethtool_flow_spec_container {
+ struct ethtool_rx_flow_spec fs;
+ struct list_head list;
+};
+
+struct ethtool_rx_list {
+ struct list_head list;
+ unsigned int count;
+};
+
+/* The maximum number of packets to be handled in one call of gfar_poll */
+#define GFAR_DEV_WEIGHT 64
+
+/* Length for FCB */
+#define GMAC_FCB_LEN 8
+
+/* Default padding amount */
+#define DEFAULT_PADDING 2
+
+/* Number of bytes to align the rx bufs to */
+#define RXBUF_ALIGNMENT 64
+
+/* The number of bytes which composes a unit for the purpose of
+ * allocating data buffers. ie-for any given MTU, the data buffer
+ * will be the next highest multiple of 512 bytes. */
+#define INCREMENTAL_BUFFER_SIZE 512
+
+
+#define MAC_ADDR_LEN 6
+
+#define PHY_INIT_TIMEOUT 100000
+#define GFAR_PHY_CHANGE_TIME 2
+
+#define DEVICE_NAME "%s: Gianfar Ethernet Controller Version 1.2, "
+#define DRV_NAME "gfar-enet"
+extern const char gfar_driver_name[];
+extern const char gfar_driver_version[];
+
+/* MAXIMUM NUMBER OF QUEUES SUPPORTED */
+#define MAX_TX_QS 0x8
+#define MAX_RX_QS 0x8
+
+/* MAXIMUM NUMBER OF GROUPS SUPPORTED */
+#define MAXGROUPS 0x2
+
+/* These need to be powers of 2 for this driver */
+#define DEFAULT_TX_RING_SIZE 256
+#define DEFAULT_RX_RING_SIZE 256
+
+#define GFAR_RX_MAX_RING_SIZE 256
+#define GFAR_TX_MAX_RING_SIZE 256
+
+#define GFAR_MAX_FIFO_THRESHOLD 511
+#define GFAR_MAX_FIFO_STARVE 511
+#define GFAR_MAX_FIFO_STARVE_OFF 511
+
+#define DEFAULT_RX_BUFFER_SIZE 1536
+#define TX_RING_MOD_MASK(size) (size-1)
+#define RX_RING_MOD_MASK(size) (size-1)
+#define JUMBO_BUFFER_SIZE 9728
+#define JUMBO_FRAME_SIZE 9600
+
+#define DEFAULT_FIFO_TX_THR 0x100
+#define DEFAULT_FIFO_TX_STARVE 0x40
+#define DEFAULT_FIFO_TX_STARVE_OFF 0x80
+#define DEFAULT_BD_STASH 1
+#define DEFAULT_STASH_LENGTH 96
+#define DEFAULT_STASH_INDEX 0
+
+/* The number of Exact Match registers */
+#define GFAR_EM_NUM 15
+
+/* Latency of interface clock in nanoseconds */
+/* Interface clock latency , in this case, means the
+ * time described by a value of 1 in the interrupt
+ * coalescing registers' time fields. Since those fields
+ * refer to the time it takes for 64 clocks to pass, the
+ * latencies are as such:
+ * GBIT = 125MHz => 8ns/clock => 8*64 ns / tick
+ * 100 = 25 MHz => 40ns/clock => 40*64 ns / tick
+ * 10 = 2.5 MHz => 400ns/clock => 400*64 ns / tick
+ */
+#define GFAR_GBIT_TIME 512
+#define GFAR_100_TIME 2560
+#define GFAR_10_TIME 25600
+
+#define DEFAULT_TX_COALESCE 1
+#define DEFAULT_TXCOUNT 16
+#define DEFAULT_TXTIME 21
+
+#define DEFAULT_RXTIME 21
+
+#define DEFAULT_RX_COALESCE 0
+#define DEFAULT_RXCOUNT 0
+
+#define GFAR_SUPPORTED (SUPPORTED_10baseT_Half \
+ | SUPPORTED_10baseT_Full \
+ | SUPPORTED_100baseT_Half \
+ | SUPPORTED_100baseT_Full \
+ | SUPPORTED_Autoneg \
+ | SUPPORTED_MII)
+
+/* TBI register addresses */
+#define MII_TBICON 0x11
+
+/* TBICON register bit fields */
+#define TBICON_CLK_SELECT 0x0020
+
+/* MAC register bits */
+#define MACCFG1_SOFT_RESET 0x80000000
+#define MACCFG1_RESET_RX_MC 0x00080000
+#define MACCFG1_RESET_TX_MC 0x00040000
+#define MACCFG1_RESET_RX_FUN 0x00020000
+#define MACCFG1_RESET_TX_FUN 0x00010000
+#define MACCFG1_LOOPBACK 0x00000100
+#define MACCFG1_RX_FLOW 0x00000020
+#define MACCFG1_TX_FLOW 0x00000010
+#define MACCFG1_SYNCD_RX_EN 0x00000008
+#define MACCFG1_RX_EN 0x00000004
+#define MACCFG1_SYNCD_TX_EN 0x00000002
+#define MACCFG1_TX_EN 0x00000001
+
+#define MACCFG2_INIT_SETTINGS 0x00007205
+#define MACCFG2_FULL_DUPLEX 0x00000001
+#define MACCFG2_IF 0x00000300
+#define MACCFG2_MII 0x00000100
+#define MACCFG2_GMII 0x00000200
+#define MACCFG2_HUGEFRAME 0x00000020
+#define MACCFG2_LENGTHCHECK 0x00000010
+#define MACCFG2_MPEN 0x00000008
+
+#define ECNTRL_FIFM 0x00008000
+#define ECNTRL_INIT_SETTINGS 0x00001000
+#define ECNTRL_TBI_MODE 0x00000020
+#define ECNTRL_REDUCED_MODE 0x00000010
+#define ECNTRL_R100 0x00000008
+#define ECNTRL_REDUCED_MII_MODE 0x00000004
+#define ECNTRL_SGMII_MODE 0x00000002
+
+#define MRBLR_INIT_SETTINGS DEFAULT_RX_BUFFER_SIZE
+
+#define MINFLR_INIT_SETTINGS 0x00000040
+
+/* Tqueue control */
+#define TQUEUE_EN0 0x00008000
+#define TQUEUE_EN1 0x00004000
+#define TQUEUE_EN2 0x00002000
+#define TQUEUE_EN3 0x00001000
+#define TQUEUE_EN4 0x00000800
+#define TQUEUE_EN5 0x00000400
+#define TQUEUE_EN6 0x00000200
+#define TQUEUE_EN7 0x00000100
+#define TQUEUE_EN_ALL 0x0000FF00
+
+#define TR03WT_WT0_MASK 0xFF000000
+#define TR03WT_WT1_MASK 0x00FF0000
+#define TR03WT_WT2_MASK 0x0000FF00
+#define TR03WT_WT3_MASK 0x000000FF
+
+#define TR47WT_WT4_MASK 0xFF000000
+#define TR47WT_WT5_MASK 0x00FF0000
+#define TR47WT_WT6_MASK 0x0000FF00
+#define TR47WT_WT7_MASK 0x000000FF
+
+/* Rqueue control */
+#define RQUEUE_EX0 0x00800000
+#define RQUEUE_EX1 0x00400000
+#define RQUEUE_EX2 0x00200000
+#define RQUEUE_EX3 0x00100000
+#define RQUEUE_EX4 0x00080000
+#define RQUEUE_EX5 0x00040000
+#define RQUEUE_EX6 0x00020000
+#define RQUEUE_EX7 0x00010000
+#define RQUEUE_EX_ALL 0x00FF0000
+
+#define RQUEUE_EN0 0x00000080
+#define RQUEUE_EN1 0x00000040
+#define RQUEUE_EN2 0x00000020
+#define RQUEUE_EN3 0x00000010
+#define RQUEUE_EN4 0x00000008
+#define RQUEUE_EN5 0x00000004
+#define RQUEUE_EN6 0x00000002
+#define RQUEUE_EN7 0x00000001
+#define RQUEUE_EN_ALL 0x000000FF
+
+/* Init to do tx snooping for buffers and descriptors */
+#define DMACTRL_INIT_SETTINGS 0x000000c3
+#define DMACTRL_GRS 0x00000010
+#define DMACTRL_GTS 0x00000008
+
+#define TSTAT_CLEAR_THALT_ALL 0xFF000000
+#define TSTAT_CLEAR_THALT 0x80000000
+#define TSTAT_CLEAR_THALT0 0x80000000
+#define TSTAT_CLEAR_THALT1 0x40000000
+#define TSTAT_CLEAR_THALT2 0x20000000
+#define TSTAT_CLEAR_THALT3 0x10000000
+#define TSTAT_CLEAR_THALT4 0x08000000
+#define TSTAT_CLEAR_THALT5 0x04000000
+#define TSTAT_CLEAR_THALT6 0x02000000
+#define TSTAT_CLEAR_THALT7 0x01000000
+
+/* Interrupt coalescing macros */
+#define IC_ICEN 0x80000000
+#define IC_ICFT_MASK 0x1fe00000
+#define IC_ICFT_SHIFT 21
+#define mk_ic_icft(x) \
+ (((unsigned int)x << IC_ICFT_SHIFT)&IC_ICFT_MASK)
+#define IC_ICTT_MASK 0x0000ffff
+#define mk_ic_ictt(x) (x&IC_ICTT_MASK)
+
+#define mk_ic_value(count, time) (IC_ICEN | \
+ mk_ic_icft(count) | \
+ mk_ic_ictt(time))
+#define get_icft_value(ic) (((unsigned long)ic & IC_ICFT_MASK) >> \
+ IC_ICFT_SHIFT)
+#define get_ictt_value(ic) ((unsigned long)ic & IC_ICTT_MASK)
+
+#define DEFAULT_TXIC mk_ic_value(DEFAULT_TXCOUNT, DEFAULT_TXTIME)
+#define DEFAULT_RXIC mk_ic_value(DEFAULT_RXCOUNT, DEFAULT_RXTIME)
+
+#define skip_bd(bdp, stride, base, ring_size) ({ \
+ typeof(bdp) new_bd = (bdp) + (stride); \
+ (new_bd >= (base) + (ring_size)) ? (new_bd - (ring_size)) : new_bd; })
+
+#define next_bd(bdp, base, ring_size) skip_bd(bdp, 1, base, ring_size)
+
+#define RCTRL_TS_ENABLE 0x01000000
+#define RCTRL_PAL_MASK 0x001f0000
+#define RCTRL_VLEX 0x00002000
+#define RCTRL_FILREN 0x00001000
+#define RCTRL_GHTX 0x00000400
+#define RCTRL_IPCSEN 0x00000200
+#define RCTRL_TUCSEN 0x00000100
+#define RCTRL_PRSDEP_MASK 0x000000c0
+#define RCTRL_PRSDEP_INIT 0x000000c0
+#define RCTRL_PRSFM 0x00000020
+#define RCTRL_PROM 0x00000008
+#define RCTRL_EMEN 0x00000002
+#define RCTRL_REQ_PARSER (RCTRL_VLEX | RCTRL_IPCSEN | \
+ RCTRL_TUCSEN | RCTRL_FILREN)
+#define RCTRL_CHECKSUMMING (RCTRL_IPCSEN | RCTRL_TUCSEN | \
+ RCTRL_PRSDEP_INIT)
+#define RCTRL_EXTHASH (RCTRL_GHTX)
+#define RCTRL_VLAN (RCTRL_PRSDEP_INIT)
+#define RCTRL_PADDING(x) ((x << 16) & RCTRL_PAL_MASK)
+
+
+#define RSTAT_CLEAR_RHALT 0x00800000
+
+#define TCTRL_IPCSEN 0x00004000
+#define TCTRL_TUCSEN 0x00002000
+#define TCTRL_VLINS 0x00001000
+#define TCTRL_THDF 0x00000800
+#define TCTRL_RFCPAUSE 0x00000010
+#define TCTRL_TFCPAUSE 0x00000008
+#define TCTRL_TXSCHED_MASK 0x00000006
+#define TCTRL_TXSCHED_INIT 0x00000000
+#define TCTRL_TXSCHED_PRIO 0x00000002
+#define TCTRL_TXSCHED_WRRS 0x00000004
+#define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN)
+
+#define IEVENT_INIT_CLEAR 0xffffffff
+#define IEVENT_BABR 0x80000000
+#define IEVENT_RXC 0x40000000
+#define IEVENT_BSY 0x20000000
+#define IEVENT_EBERR 0x10000000
+#define IEVENT_MSRO 0x04000000
+#define IEVENT_GTSC 0x02000000
+#define IEVENT_BABT 0x01000000
+#define IEVENT_TXC 0x00800000
+#define IEVENT_TXE 0x00400000
+#define IEVENT_TXB 0x00200000
+#define IEVENT_TXF 0x00100000
+#define IEVENT_LC 0x00040000
+#define IEVENT_CRL 0x00020000
+#define IEVENT_XFUN 0x00010000
+#define IEVENT_RXB0 0x00008000
+#define IEVENT_MAG 0x00000800
+#define IEVENT_GRSC 0x00000100
+#define IEVENT_RXF0 0x00000080
+#define IEVENT_FIR 0x00000008
+#define IEVENT_FIQ 0x00000004
+#define IEVENT_DPE 0x00000002
+#define IEVENT_PERR 0x00000001
+#define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0 | IEVENT_BSY)
+#define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF)
+#define IEVENT_RTX_MASK (IEVENT_RX_MASK | IEVENT_TX_MASK)
+#define IEVENT_ERR_MASK \
+(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \
+ IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \
+ | IEVENT_CRL | IEVENT_XFUN | IEVENT_DPE | IEVENT_PERR \
+ | IEVENT_MAG | IEVENT_BABR)
+
+#define IMASK_INIT_CLEAR 0x00000000
+#define IMASK_BABR 0x80000000
+#define IMASK_RXC 0x40000000
+#define IMASK_BSY 0x20000000
+#define IMASK_EBERR 0x10000000
+#define IMASK_MSRO 0x04000000
+#define IMASK_GTSC 0x02000000
+#define IMASK_BABT 0x01000000
+#define IMASK_TXC 0x00800000
+#define IMASK_TXEEN 0x00400000
+#define IMASK_TXBEN 0x00200000
+#define IMASK_TXFEN 0x00100000
+#define IMASK_LC 0x00040000
+#define IMASK_CRL 0x00020000
+#define IMASK_XFUN 0x00010000
+#define IMASK_RXB0 0x00008000
+#define IMASK_MAG 0x00000800
+#define IMASK_GRSC 0x00000100
+#define IMASK_RXFEN0 0x00000080
+#define IMASK_FIR 0x00000008
+#define IMASK_FIQ 0x00000004
+#define IMASK_DPE 0x00000002
+#define IMASK_PERR 0x00000001
+#define IMASK_DEFAULT (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \
+ IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \
+ IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \
+ | IMASK_PERR)
+#define IMASK_RTX_DISABLED ((~(IMASK_RXFEN0 | IMASK_TXFEN | IMASK_BSY)) \
+ & IMASK_DEFAULT)
+
+/* Fifo management */
+#define FIFO_TX_THR_MASK 0x01ff
+#define FIFO_TX_STARVE_MASK 0x01ff
+#define FIFO_TX_STARVE_OFF_MASK 0x01ff
+
+/* Attribute fields */
+
+/* This enables rx snooping for buffers and descriptors */
+#define ATTR_BDSTASH 0x00000800
+
+#define ATTR_BUFSTASH 0x00004000
+
+#define ATTR_SNOOPING 0x000000c0
+#define ATTR_INIT_SETTINGS ATTR_SNOOPING
+
+#define ATTRELI_INIT_SETTINGS 0x0
+#define ATTRELI_EL_MASK 0x3fff0000
+#define ATTRELI_EL(x) (x << 16)
+#define ATTRELI_EI_MASK 0x00003fff
+#define ATTRELI_EI(x) (x)
+
+#define BD_LFLAG(flags) ((flags) << 16)
+#define BD_LENGTH_MASK 0x0000ffff
+
+#define FPR_FILER_MASK 0xFFFFFFFF
+#define MAX_FILER_IDX 0xFF
+
+/* This default RIR value directly corresponds
+ * to the 3-bit hash value generated */
+#define DEFAULT_RIR0 0x05397700
+
+/* RQFCR register bits */
+#define RQFCR_GPI 0x80000000
+#define RQFCR_HASHTBL_Q 0x00000000
+#define RQFCR_HASHTBL_0 0x00020000
+#define RQFCR_HASHTBL_1 0x00040000
+#define RQFCR_HASHTBL_2 0x00060000
+#define RQFCR_HASHTBL_3 0x00080000
+#define RQFCR_HASH 0x00010000
+#define RQFCR_QUEUE 0x0000FC00
+#define RQFCR_CLE 0x00000200
+#define RQFCR_RJE 0x00000100
+#define RQFCR_AND 0x00000080
+#define RQFCR_CMP_EXACT 0x00000000
+#define RQFCR_CMP_MATCH 0x00000020
+#define RQFCR_CMP_NOEXACT 0x00000040
+#define RQFCR_CMP_NOMATCH 0x00000060
+
+/* RQFCR PID values */
+#define RQFCR_PID_MASK 0x00000000
+#define RQFCR_PID_PARSE 0x00000001
+#define RQFCR_PID_ARB 0x00000002
+#define RQFCR_PID_DAH 0x00000003
+#define RQFCR_PID_DAL 0x00000004
+#define RQFCR_PID_SAH 0x00000005
+#define RQFCR_PID_SAL 0x00000006
+#define RQFCR_PID_ETY 0x00000007
+#define RQFCR_PID_VID 0x00000008
+#define RQFCR_PID_PRI 0x00000009
+#define RQFCR_PID_TOS 0x0000000A
+#define RQFCR_PID_L4P 0x0000000B
+#define RQFCR_PID_DIA 0x0000000C
+#define RQFCR_PID_SIA 0x0000000D
+#define RQFCR_PID_DPT 0x0000000E
+#define RQFCR_PID_SPT 0x0000000F
+
+/* RQFPR when PID is 0x0001 */
+#define RQFPR_HDR_GE_512 0x00200000
+#define RQFPR_LERR 0x00100000
+#define RQFPR_RAR 0x00080000
+#define RQFPR_RARQ 0x00040000
+#define RQFPR_AR 0x00020000
+#define RQFPR_ARQ 0x00010000
+#define RQFPR_EBC 0x00008000
+#define RQFPR_VLN 0x00004000
+#define RQFPR_CFI 0x00002000
+#define RQFPR_JUM 0x00001000
+#define RQFPR_IPF 0x00000800
+#define RQFPR_FIF 0x00000400
+#define RQFPR_IPV4 0x00000200
+#define RQFPR_IPV6 0x00000100
+#define RQFPR_ICC 0x00000080
+#define RQFPR_ICV 0x00000040
+#define RQFPR_TCP 0x00000020
+#define RQFPR_UDP 0x00000010
+#define RQFPR_TUC 0x00000008
+#define RQFPR_TUV 0x00000004
+#define RQFPR_PER 0x00000002
+#define RQFPR_EER 0x00000001
+
+/* TxBD status field bits */
+#define TXBD_READY 0x8000
+#define TXBD_PADCRC 0x4000
+#define TXBD_WRAP 0x2000
+#define TXBD_INTERRUPT 0x1000
+#define TXBD_LAST 0x0800
+#define TXBD_CRC 0x0400
+#define TXBD_DEF 0x0200
+#define TXBD_HUGEFRAME 0x0080
+#define TXBD_LATECOLLISION 0x0080
+#define TXBD_RETRYLIMIT 0x0040
+#define TXBD_RETRYCOUNTMASK 0x003c
+#define TXBD_UNDERRUN 0x0002
+#define TXBD_TOE 0x0002
+
+/* Tx FCB param bits */
+#define TXFCB_VLN 0x80
+#define TXFCB_IP 0x40
+#define TXFCB_IP6 0x20
+#define TXFCB_TUP 0x10
+#define TXFCB_UDP 0x08
+#define TXFCB_CIP 0x04
+#define TXFCB_CTU 0x02
+#define TXFCB_NPH 0x01
+#define TXFCB_DEFAULT (TXFCB_IP|TXFCB_TUP|TXFCB_CTU|TXFCB_NPH)
+
+/* RxBD status field bits */
+#define RXBD_EMPTY 0x8000
+#define RXBD_RO1 0x4000
+#define RXBD_WRAP 0x2000
+#define RXBD_INTERRUPT 0x1000
+#define RXBD_LAST 0x0800
+#define RXBD_FIRST 0x0400
+#define RXBD_MISS 0x0100
+#define RXBD_BROADCAST 0x0080
+#define RXBD_MULTICAST 0x0040
+#define RXBD_LARGE 0x0020
+#define RXBD_NONOCTET 0x0010
+#define RXBD_SHORT 0x0008
+#define RXBD_CRCERR 0x0004
+#define RXBD_OVERRUN 0x0002
+#define RXBD_TRUNCATED 0x0001
+#define RXBD_STATS 0x01ff
+#define RXBD_ERR (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET \
+ | RXBD_CRCERR | RXBD_OVERRUN \
+ | RXBD_TRUNCATED)
+
+/* Rx FCB status field bits */
+#define RXFCB_VLN 0x8000
+#define RXFCB_IP 0x4000
+#define RXFCB_IP6 0x2000
+#define RXFCB_TUP 0x1000
+#define RXFCB_CIP 0x0800
+#define RXFCB_CTU 0x0400
+#define RXFCB_EIP 0x0200
+#define RXFCB_ETU 0x0100
+#define RXFCB_CSUM_MASK 0x0f00
+#define RXFCB_PERR_MASK 0x000c
+#define RXFCB_PERR_BADL3 0x0008
+
+#define GFAR_INT_NAME_MAX IFNAMSIZ + 4
+
+struct txbd8
+{
+ union {
+ struct {
+ u16 status; /* Status Fields */
+ u16 length; /* Buffer length */
+ };
+ u32 lstatus;
+ };
+ u32 bufPtr; /* Buffer Pointer */
+};
+
+struct txfcb {
+ u8 flags;
+ u8 ptp; /* Flag to enable tx timestamping */
+ u8 l4os; /* Level 4 Header Offset */
+ u8 l3os; /* Level 3 Header Offset */
+ u16 phcs; /* Pseudo-header Checksum */
+ u16 vlctl; /* VLAN control word */
+};
+
+struct rxbd8
+{
+ union {
+ struct {
+ u16 status; /* Status Fields */
+ u16 length; /* Buffer Length */
+ };
+ u32 lstatus;
+ };
+ u32 bufPtr; /* Buffer Pointer */
+};
+
+struct rxfcb {
+ u16 flags;
+ u8 rq; /* Receive Queue index */
+ u8 pro; /* Layer 4 Protocol */
+ u16 reserved;
+ u16 vlctl; /* VLAN control word */
+};
+
+struct gianfar_skb_cb {
+ int alignamount;
+};
+
+#define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb))
+
+struct rmon_mib
+{
+ u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */
+ u32 tr127; /* 0x.684 - Transmit and Receive 65-127 byte Frame Counter */
+ u32 tr255; /* 0x.688 - Transmit and Receive 128-255 byte Frame Counter */
+ u32 tr511; /* 0x.68c - Transmit and Receive 256-511 byte Frame Counter */
+ u32 tr1k; /* 0x.690 - Transmit and Receive 512-1023 byte Frame Counter */
+ u32 trmax; /* 0x.694 - Transmit and Receive 1024-1518 byte Frame Counter */
+ u32 trmgv; /* 0x.698 - Transmit and Receive 1519-1522 byte Good VLAN Frame */
+ u32 rbyt; /* 0x.69c - Receive Byte Counter */
+ u32 rpkt; /* 0x.6a0 - Receive Packet Counter */
+ u32 rfcs; /* 0x.6a4 - Receive FCS Error Counter */
+ u32 rmca; /* 0x.6a8 - Receive Multicast Packet Counter */
+ u32 rbca; /* 0x.6ac - Receive Broadcast Packet Counter */
+ u32 rxcf; /* 0x.6b0 - Receive Control Frame Packet Counter */
+ u32 rxpf; /* 0x.6b4 - Receive Pause Frame Packet Counter */
+ u32 rxuo; /* 0x.6b8 - Receive Unknown OP Code Counter */
+ u32 raln; /* 0x.6bc - Receive Alignment Error Counter */
+ u32 rflr; /* 0x.6c0 - Receive Frame Length Error Counter */
+ u32 rcde; /* 0x.6c4 - Receive Code Error Counter */
+ u32 rcse; /* 0x.6c8 - Receive Carrier Sense Error Counter */
+ u32 rund; /* 0x.6cc - Receive Undersize Packet Counter */
+ u32 rovr; /* 0x.6d0 - Receive Oversize Packet Counter */
+ u32 rfrg; /* 0x.6d4 - Receive Fragments Counter */
+ u32 rjbr; /* 0x.6d8 - Receive Jabber Counter */
+ u32 rdrp; /* 0x.6dc - Receive Drop Counter */
+ u32 tbyt; /* 0x.6e0 - Transmit Byte Counter Counter */
+ u32 tpkt; /* 0x.6e4 - Transmit Packet Counter */
+ u32 tmca; /* 0x.6e8 - Transmit Multicast Packet Counter */
+ u32 tbca; /* 0x.6ec - Transmit Broadcast Packet Counter */
+ u32 txpf; /* 0x.6f0 - Transmit Pause Control Frame Counter */
+ u32 tdfr; /* 0x.6f4 - Transmit Deferral Packet Counter */
+ u32 tedf; /* 0x.6f8 - Transmit Excessive Deferral Packet Counter */
+ u32 tscl; /* 0x.6fc - Transmit Single Collision Packet Counter */
+ u32 tmcl; /* 0x.700 - Transmit Multiple Collision Packet Counter */
+ u32 tlcl; /* 0x.704 - Transmit Late Collision Packet Counter */
+ u32 txcl; /* 0x.708 - Transmit Excessive Collision Packet Counter */
+ u32 tncl; /* 0x.70c - Transmit Total Collision Counter */
+ u8 res1[4];
+ u32 tdrp; /* 0x.714 - Transmit Drop Frame Counter */
+ u32 tjbr; /* 0x.718 - Transmit Jabber Frame Counter */
+ u32 tfcs; /* 0x.71c - Transmit FCS Error Counter */
+ u32 txcf; /* 0x.720 - Transmit Control Frame Counter */
+ u32 tovr; /* 0x.724 - Transmit Oversize Frame Counter */
+ u32 tund; /* 0x.728 - Transmit Undersize Frame Counter */
+ u32 tfrg; /* 0x.72c - Transmit Fragments Frame Counter */
+ u32 car1; /* 0x.730 - Carry Register One */
+ u32 car2; /* 0x.734 - Carry Register Two */
+ u32 cam1; /* 0x.738 - Carry Mask Register One */
+ u32 cam2; /* 0x.73c - Carry Mask Register Two */
+};
+
+struct gfar_extra_stats {
+ u64 kernel_dropped;
+ u64 rx_large;
+ u64 rx_short;
+ u64 rx_nonoctet;
+ u64 rx_crcerr;
+ u64 rx_overrun;
+ u64 rx_bsy;
+ u64 rx_babr;
+ u64 rx_trunc;
+ u64 eberr;
+ u64 tx_babt;
+ u64 tx_underrun;
+ u64 rx_skbmissing;
+ u64 tx_timeout;
+};
+
+#define GFAR_RMON_LEN ((sizeof(struct rmon_mib) - 16)/sizeof(u32))
+#define GFAR_EXTRA_STATS_LEN (sizeof(struct gfar_extra_stats)/sizeof(u64))
+
+/* Number of stats in the stats structure (ignore car and cam regs)*/
+#define GFAR_STATS_LEN (GFAR_RMON_LEN + GFAR_EXTRA_STATS_LEN)
+
+#define GFAR_INFOSTR_LEN 32
+
+struct gfar_stats {
+ u64 extra[GFAR_EXTRA_STATS_LEN];
+ u64 rmon[GFAR_RMON_LEN];
+};
+
+
+struct gfar {
+ u32 tsec_id; /* 0x.000 - Controller ID register */
+ u32 tsec_id2; /* 0x.004 - Controller ID2 register */
+ u8 res1[8];
+ u32 ievent; /* 0x.010 - Interrupt Event Register */
+ u32 imask; /* 0x.014 - Interrupt Mask Register */
+ u32 edis; /* 0x.018 - Error Disabled Register */
+ u32 emapg; /* 0x.01c - Group Error mapping register */
+ u32 ecntrl; /* 0x.020 - Ethernet Control Register */
+ u32 minflr; /* 0x.024 - Minimum Frame Length Register */
+ u32 ptv; /* 0x.028 - Pause Time Value Register */
+ u32 dmactrl; /* 0x.02c - DMA Control Register */
+ u32 tbipa; /* 0x.030 - TBI PHY Address Register */
+ u8 res2[28];
+ u32 fifo_rx_pause; /* 0x.050 - FIFO receive pause start threshold
+ register */
+ u32 fifo_rx_pause_shutoff; /* x.054 - FIFO receive starve shutoff
+ register */
+ u32 fifo_rx_alarm; /* 0x.058 - FIFO receive alarm start threshold
+ register */
+ u32 fifo_rx_alarm_shutoff; /*0x.05c - FIFO receive alarm starve
+ shutoff register */
+ u8 res3[44];
+ u32 fifo_tx_thr; /* 0x.08c - FIFO transmit threshold register */
+ u8 res4[8];
+ u32 fifo_tx_starve; /* 0x.098 - FIFO transmit starve register */
+ u32 fifo_tx_starve_shutoff; /* 0x.09c - FIFO transmit starve shutoff register */
+ u8 res5[96];
+ u32 tctrl; /* 0x.100 - Transmit Control Register */
+ u32 tstat; /* 0x.104 - Transmit Status Register */
+ u32 dfvlan; /* 0x.108 - Default VLAN Control word */
+ u32 tbdlen; /* 0x.10c - Transmit Buffer Descriptor Data Length Register */
+ u32 txic; /* 0x.110 - Transmit Interrupt Coalescing Configuration Register */
+ u32 tqueue; /* 0x.114 - Transmit queue control register */
+ u8 res7[40];
+ u32 tr03wt; /* 0x.140 - TxBD Rings 0-3 round-robin weightings */
+ u32 tr47wt; /* 0x.144 - TxBD Rings 4-7 round-robin weightings */
+ u8 res8[52];
+ u32 tbdbph; /* 0x.17c - Tx data buffer pointer high */
+ u8 res9a[4];
+ u32 tbptr0; /* 0x.184 - TxBD Pointer for ring 0 */
+ u8 res9b[4];
+ u32 tbptr1; /* 0x.18c - TxBD Pointer for ring 1 */
+ u8 res9c[4];
+ u32 tbptr2; /* 0x.194 - TxBD Pointer for ring 2 */
+ u8 res9d[4];
+ u32 tbptr3; /* 0x.19c - TxBD Pointer for ring 3 */
+ u8 res9e[4];
+ u32 tbptr4; /* 0x.1a4 - TxBD Pointer for ring 4 */
+ u8 res9f[4];
+ u32 tbptr5; /* 0x.1ac - TxBD Pointer for ring 5 */
+ u8 res9g[4];
+ u32 tbptr6; /* 0x.1b4 - TxBD Pointer for ring 6 */
+ u8 res9h[4];
+ u32 tbptr7; /* 0x.1bc - TxBD Pointer for ring 7 */
+ u8 res9[64];
+ u32 tbaseh; /* 0x.200 - TxBD base address high */
+ u32 tbase0; /* 0x.204 - TxBD Base Address of ring 0 */
+ u8 res10a[4];
+ u32 tbase1; /* 0x.20c - TxBD Base Address of ring 1 */
+ u8 res10b[4];
+ u32 tbase2; /* 0x.214 - TxBD Base Address of ring 2 */
+ u8 res10c[4];
+ u32 tbase3; /* 0x.21c - TxBD Base Address of ring 3 */
+ u8 res10d[4];
+ u32 tbase4; /* 0x.224 - TxBD Base Address of ring 4 */
+ u8 res10e[4];
+ u32 tbase5; /* 0x.22c - TxBD Base Address of ring 5 */
+ u8 res10f[4];
+ u32 tbase6; /* 0x.234 - TxBD Base Address of ring 6 */
+ u8 res10g[4];
+ u32 tbase7; /* 0x.23c - TxBD Base Address of ring 7 */
+ u8 res10[192];
+ u32 rctrl; /* 0x.300 - Receive Control Register */
+ u32 rstat; /* 0x.304 - Receive Status Register */
+ u8 res12[8];
+ u32 rxic; /* 0x.310 - Receive Interrupt Coalescing Configuration Register */
+ u32 rqueue; /* 0x.314 - Receive queue control register */
+ u32 rir0; /* 0x.318 - Ring mapping register 0 */
+ u32 rir1; /* 0x.31c - Ring mapping register 1 */
+ u32 rir2; /* 0x.320 - Ring mapping register 2 */
+ u32 rir3; /* 0x.324 - Ring mapping register 3 */
+ u8 res13[8];
+ u32 rbifx; /* 0x.330 - Receive bit field extract control register */
+ u32 rqfar; /* 0x.334 - Receive queue filing table address register */
+ u32 rqfcr; /* 0x.338 - Receive queue filing table control register */
+ u32 rqfpr; /* 0x.33c - Receive queue filing table property register */
+ u32 mrblr; /* 0x.340 - Maximum Receive Buffer Length Register */
+ u8 res14[56];
+ u32 rbdbph; /* 0x.37c - Rx data buffer pointer high */
+ u8 res15a[4];
+ u32 rbptr0; /* 0x.384 - RxBD pointer for ring 0 */
+ u8 res15b[4];
+ u32 rbptr1; /* 0x.38c - RxBD pointer for ring 1 */
+ u8 res15c[4];
+ u32 rbptr2; /* 0x.394 - RxBD pointer for ring 2 */
+ u8 res15d[4];
+ u32 rbptr3; /* 0x.39c - RxBD pointer for ring 3 */
+ u8 res15e[4];
+ u32 rbptr4; /* 0x.3a4 - RxBD pointer for ring 4 */
+ u8 res15f[4];
+ u32 rbptr5; /* 0x.3ac - RxBD pointer for ring 5 */
+ u8 res15g[4];
+ u32 rbptr6; /* 0x.3b4 - RxBD pointer for ring 6 */
+ u8 res15h[4];
+ u32 rbptr7; /* 0x.3bc - RxBD pointer for ring 7 */
+ u8 res16[64];
+ u32 rbaseh; /* 0x.400 - RxBD base address high */
+ u32 rbase0; /* 0x.404 - RxBD base address of ring 0 */
+ u8 res17a[4];
+ u32 rbase1; /* 0x.40c - RxBD base address of ring 1 */
+ u8 res17b[4];
+ u32 rbase2; /* 0x.414 - RxBD base address of ring 2 */
+ u8 res17c[4];
+ u32 rbase3; /* 0x.41c - RxBD base address of ring 3 */
+ u8 res17d[4];
+ u32 rbase4; /* 0x.424 - RxBD base address of ring 4 */
+ u8 res17e[4];
+ u32 rbase5; /* 0x.42c - RxBD base address of ring 5 */
+ u8 res17f[4];
+ u32 rbase6; /* 0x.434 - RxBD base address of ring 6 */
+ u8 res17g[4];
+ u32 rbase7; /* 0x.43c - RxBD base address of ring 7 */
+ u8 res17[192];
+ u32 maccfg1; /* 0x.500 - MAC Configuration 1 Register */
+ u32 maccfg2; /* 0x.504 - MAC Configuration 2 Register */
+ u32 ipgifg; /* 0x.508 - Inter Packet Gap/Inter Frame Gap Register */
+ u32 hafdup; /* 0x.50c - Half Duplex Register */
+ u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */
+ u8 res18[12];
+ u8 gfar_mii_regs[24]; /* See gianfar_phy.h */
+ u32 ifctrl; /* 0x.538 - Interface control register */
+ u32 ifstat; /* 0x.53c - Interface Status Register */
+ u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */
+ u32 macstnaddr2; /* 0x.544 - Station Address Part 2 Register */
+ u32 mac01addr1; /* 0x.548 - MAC exact match address 1, part 1 */
+ u32 mac01addr2; /* 0x.54c - MAC exact match address 1, part 2 */
+ u32 mac02addr1; /* 0x.550 - MAC exact match address 2, part 1 */
+ u32 mac02addr2; /* 0x.554 - MAC exact match address 2, part 2 */
+ u32 mac03addr1; /* 0x.558 - MAC exact match address 3, part 1 */
+ u32 mac03addr2; /* 0x.55c - MAC exact match address 3, part 2 */
+ u32 mac04addr1; /* 0x.560 - MAC exact match address 4, part 1 */
+ u32 mac04addr2; /* 0x.564 - MAC exact match address 4, part 2 */
+ u32 mac05addr1; /* 0x.568 - MAC exact match address 5, part 1 */
+ u32 mac05addr2; /* 0x.56c - MAC exact match address 5, part 2 */
+ u32 mac06addr1; /* 0x.570 - MAC exact match address 6, part 1 */
+ u32 mac06addr2; /* 0x.574 - MAC exact match address 6, part 2 */
+ u32 mac07addr1; /* 0x.578 - MAC exact match address 7, part 1 */
+ u32 mac07addr2; /* 0x.57c - MAC exact match address 7, part 2 */
+ u32 mac08addr1; /* 0x.580 - MAC exact match address 8, part 1 */
+ u32 mac08addr2; /* 0x.584 - MAC exact match address 8, part 2 */
+ u32 mac09addr1; /* 0x.588 - MAC exact match address 9, part 1 */
+ u32 mac09addr2; /* 0x.58c - MAC exact match address 9, part 2 */
+ u32 mac10addr1; /* 0x.590 - MAC exact match address 10, part 1*/
+ u32 mac10addr2; /* 0x.594 - MAC exact match address 10, part 2*/
+ u32 mac11addr1; /* 0x.598 - MAC exact match address 11, part 1*/
+ u32 mac11addr2; /* 0x.59c - MAC exact match address 11, part 2*/
+ u32 mac12addr1; /* 0x.5a0 - MAC exact match address 12, part 1*/
+ u32 mac12addr2; /* 0x.5a4 - MAC exact match address 12, part 2*/
+ u32 mac13addr1; /* 0x.5a8 - MAC exact match address 13, part 1*/
+ u32 mac13addr2; /* 0x.5ac - MAC exact match address 13, part 2*/
+ u32 mac14addr1; /* 0x.5b0 - MAC exact match address 14, part 1*/
+ u32 mac14addr2; /* 0x.5b4 - MAC exact match address 14, part 2*/
+ u32 mac15addr1; /* 0x.5b8 - MAC exact match address 15, part 1*/
+ u32 mac15addr2; /* 0x.5bc - MAC exact match address 15, part 2*/
+ u8 res20[192];
+ struct rmon_mib rmon; /* 0x.680-0x.73c */
+ u32 rrej; /* 0x.740 - Receive filer rejected packet counter */
+ u8 res21[188];
+ u32 igaddr0; /* 0x.800 - Indivdual/Group address register 0*/
+ u32 igaddr1; /* 0x.804 - Indivdual/Group address register 1*/
+ u32 igaddr2; /* 0x.808 - Indivdual/Group address register 2*/
+ u32 igaddr3; /* 0x.80c - Indivdual/Group address register 3*/
+ u32 igaddr4; /* 0x.810 - Indivdual/Group address register 4*/
+ u32 igaddr5; /* 0x.814 - Indivdual/Group address register 5*/
+ u32 igaddr6; /* 0x.818 - Indivdual/Group address register 6*/
+ u32 igaddr7; /* 0x.81c - Indivdual/Group address register 7*/
+ u8 res22[96];
+ u32 gaddr0; /* 0x.880 - Group address register 0 */
+ u32 gaddr1; /* 0x.884 - Group address register 1 */
+ u32 gaddr2; /* 0x.888 - Group address register 2 */
+ u32 gaddr3; /* 0x.88c - Group address register 3 */
+ u32 gaddr4; /* 0x.890 - Group address register 4 */
+ u32 gaddr5; /* 0x.894 - Group address register 5 */
+ u32 gaddr6; /* 0x.898 - Group address register 6 */
+ u32 gaddr7; /* 0x.89c - Group address register 7 */
+ u8 res23a[352];
+ u32 fifocfg; /* 0x.a00 - FIFO interface config register */
+ u8 res23b[252];
+ u8 res23c[248];
+ u32 attr; /* 0x.bf8 - Attributes Register */
+ u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */
+ u8 res24[688];
+ u32 isrg0; /* 0x.eb0 - Interrupt steering group 0 register */
+ u32 isrg1; /* 0x.eb4 - Interrupt steering group 1 register */
+ u32 isrg2; /* 0x.eb8 - Interrupt steering group 2 register */
+ u32 isrg3; /* 0x.ebc - Interrupt steering group 3 register */
+ u8 res25[16];
+ u32 rxic0; /* 0x.ed0 - Ring 0 Rx interrupt coalescing */
+ u32 rxic1; /* 0x.ed4 - Ring 1 Rx interrupt coalescing */
+ u32 rxic2; /* 0x.ed8 - Ring 2 Rx interrupt coalescing */
+ u32 rxic3; /* 0x.edc - Ring 3 Rx interrupt coalescing */
+ u32 rxic4; /* 0x.ee0 - Ring 4 Rx interrupt coalescing */
+ u32 rxic5; /* 0x.ee4 - Ring 5 Rx interrupt coalescing */
+ u32 rxic6; /* 0x.ee8 - Ring 6 Rx interrupt coalescing */
+ u32 rxic7; /* 0x.eec - Ring 7 Rx interrupt coalescing */
+ u8 res26[32];
+ u32 txic0; /* 0x.f10 - Ring 0 Tx interrupt coalescing */
+ u32 txic1; /* 0x.f14 - Ring 1 Tx interrupt coalescing */
+ u32 txic2; /* 0x.f18 - Ring 2 Tx interrupt coalescing */
+ u32 txic3; /* 0x.f1c - Ring 3 Tx interrupt coalescing */
+ u32 txic4; /* 0x.f20 - Ring 4 Tx interrupt coalescing */
+ u32 txic5; /* 0x.f24 - Ring 5 Tx interrupt coalescing */
+ u32 txic6; /* 0x.f28 - Ring 6 Tx interrupt coalescing */
+ u32 txic7; /* 0x.f2c - Ring 7 Tx interrupt coalescing */
+ u8 res27[208];
+};
+
+/* Flags related to gianfar device features */
+#define FSL_GIANFAR_DEV_HAS_GIGABIT 0x00000001
+#define FSL_GIANFAR_DEV_HAS_COALESCE 0x00000002
+#define FSL_GIANFAR_DEV_HAS_RMON 0x00000004
+#define FSL_GIANFAR_DEV_HAS_MULTI_INTR 0x00000008
+#define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010
+#define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020
+#define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040
+#define FSL_GIANFAR_DEV_HAS_PADDING 0x00000080
+#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100
+#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
+#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
+#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800
+
+#if (MAXGROUPS == 2)
+#define DEFAULT_MAPPING 0xAA
+#else
+#define DEFAULT_MAPPING 0xFF
+#endif
+
+#define ISRG_SHIFT_TX 0x10
+#define ISRG_SHIFT_RX 0x18
+
+/* The same driver can operate in two modes */
+/* SQ_SG_MODE: Single Queue Single Group Mode
+ * (Backward compatible mode)
+ * MQ_MG_MODE: Multi Queue Multi Group mode
+ */
+enum {
+ SQ_SG_MODE = 0,
+ MQ_MG_MODE
+};
+
+/*
+ * Per TX queue stats
+ */
+struct tx_q_stats {
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+};
+
+/**
+ * struct gfar_priv_tx_q - per tx queue structure
+ * @txlock: per queue tx spin lock
+ * @tx_skbuff:skb pointers
+ * @skb_curtx: to be used skb pointer
+ * @skb_dirtytx:the last used skb pointer
+ * @stats: bytes/packets stats
+ * @qindex: index of this queue
+ * @dev: back pointer to the dev structure
+ * @grp: back pointer to the group to which this queue belongs
+ * @tx_bd_base: First tx buffer descriptor
+ * @cur_tx: Next free ring entry
+ * @dirty_tx: First buffer in line to be transmitted
+ * @tx_ring_size: Tx ring size
+ * @num_txbdfree: number of free TxBds
+ * @txcoalescing: enable/disable tx coalescing
+ * @txic: transmit interrupt coalescing value
+ * @txcount: coalescing value if based on tx frame count
+ * @txtime: coalescing value if based on time
+ */
+struct gfar_priv_tx_q {
+ spinlock_t txlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
+ struct sk_buff ** tx_skbuff;
+ /* Buffer descriptor pointers */
+ dma_addr_t tx_bd_dma_base;
+ struct txbd8 *tx_bd_base;
+ struct txbd8 *cur_tx;
+ struct txbd8 *dirty_tx;
+ struct tx_q_stats stats;
+ struct net_device *dev;
+ struct gfar_priv_grp *grp;
+ u16 skb_curtx;
+ u16 skb_dirtytx;
+ u16 qindex;
+ unsigned int tx_ring_size;
+ unsigned int num_txbdfree;
+ /* Configuration info for the coalescing features */
+ unsigned char txcoalescing;
+ unsigned long txic;
+ unsigned short txcount;
+ unsigned short txtime;
+};
+
+/*
+ * Per RX queue stats
+ */
+struct rx_q_stats {
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long rx_dropped;
+};
+
+/**
+ * struct gfar_priv_rx_q - per rx queue structure
+ * @rxlock: per queue rx spin lock
+ * @rx_skbuff: skb pointers
+ * @skb_currx: currently use skb pointer
+ * @rx_bd_base: First rx buffer descriptor
+ * @cur_rx: Next free rx ring entry
+ * @qindex: index of this queue
+ * @dev: back pointer to the dev structure
+ * @rx_ring_size: Rx ring size
+ * @rxcoalescing: enable/disable rx-coalescing
+ * @rxic: receive interrupt coalescing vlaue
+ */
+
+struct gfar_priv_rx_q {
+ spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
+ struct sk_buff ** rx_skbuff;
+ dma_addr_t rx_bd_dma_base;
+ struct rxbd8 *rx_bd_base;
+ struct rxbd8 *cur_rx;
+ struct net_device *dev;
+ struct gfar_priv_grp *grp;
+ struct rx_q_stats stats;
+ u16 skb_currx;
+ u16 qindex;
+ unsigned int rx_ring_size;
+ /* RX Coalescing values */
+ unsigned char rxcoalescing;
+ unsigned long rxic;
+};
+
+/**
+ * struct gfar_priv_grp - per group structure
+ * @napi: the napi poll function
+ * @priv: back pointer to the priv structure
+ * @regs: the ioremapped register space for this group
+ * @grp_id: group id for this group
+ * @interruptTransmit: The TX interrupt number for this group
+ * @interruptReceive: The RX interrupt number for this group
+ * @interruptError: The ERROR interrupt number for this group
+ * @int_name_tx: tx interrupt name for this group
+ * @int_name_rx: rx interrupt name for this group
+ * @int_name_er: er interrupt name for this group
+ */
+
+struct gfar_priv_grp {
+ spinlock_t grplock __attribute__ ((aligned (SMP_CACHE_BYTES)));
+ struct napi_struct napi;
+ struct gfar_private *priv;
+ struct gfar __iomem *regs;
+ unsigned int grp_id;
+ unsigned long rx_bit_map;
+ unsigned long tx_bit_map;
+ unsigned long num_tx_queues;
+ unsigned long num_rx_queues;
+ unsigned int rstat;
+ unsigned int tstat;
+ unsigned int imask;
+ unsigned int ievent;
+ unsigned int interruptTransmit;
+ unsigned int interruptReceive;
+ unsigned int interruptError;
+
+ char int_name_tx[GFAR_INT_NAME_MAX];
+ char int_name_rx[GFAR_INT_NAME_MAX];
+ char int_name_er[GFAR_INT_NAME_MAX];
+};
+
+enum gfar_errata {
+ GFAR_ERRATA_74 = 0x01,
+ GFAR_ERRATA_76 = 0x02,
+ GFAR_ERRATA_A002 = 0x04,
+ GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */
+};
+
+/* Struct stolen almost completely (and shamelessly) from the FCC enet source
+ * (Ok, that's not so true anymore, but there is a family resemblance)
+ * The GFAR buffer descriptors track the ring buffers. The rx_bd_base
+ * and tx_bd_base always point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller. The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions. The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct gfar_private {
+
+ /* Indicates how many tx, rx queues are enabled */
+ unsigned int num_tx_queues;
+ unsigned int num_rx_queues;
+ unsigned int num_grps;
+ unsigned int mode;
+
+ /* The total tx and rx ring size for the enabled queues */
+ unsigned int total_tx_ring_size;
+ unsigned int total_rx_ring_size;
+
+ struct device_node *node;
+ struct net_device *ndev;
+ struct platform_device *ofdev;
+ enum gfar_errata errata;
+
+ struct gfar_priv_grp gfargrp[MAXGROUPS];
+ struct gfar_priv_tx_q *tx_queue[MAX_TX_QS];
+ struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
+
+ /* RX per device parameters */
+ unsigned int rx_buffer_size;
+ unsigned int rx_stash_size;
+ unsigned int rx_stash_index;
+
+ u32 cur_filer_idx;
+
+ struct sk_buff_head rx_recycle;
+
+ /* RX queue filer rule set*/
+ struct ethtool_rx_list rx_list;
+ struct mutex rx_queue_access;
+
+ /* Hash registers and their width */
+ u32 __iomem *hash_regs[16];
+ int hash_width;
+
+ /* global parameters */
+ unsigned int fifo_threshold;
+ unsigned int fifo_starve;
+ unsigned int fifo_starve_off;
+
+ /* Bitfield update lock */
+ spinlock_t bflock;
+
+ phy_interface_t interface;
+ struct device_node *phy_node;
+ struct device_node *tbi_node;
+ u32 device_flags;
+ unsigned char
+ extended_hash:1,
+ bd_stash_en:1,
+ rx_filer_enable:1,
+ wol_en:1; /* Wake-on-LAN enabled */
+ unsigned short padding;
+
+ /* PHY stuff */
+ struct phy_device *phydev;
+ struct mii_bus *mii_bus;
+ int oldspeed;
+ int oldduplex;
+ int oldlink;
+
+ uint32_t msg_enable;
+
+ struct work_struct reset_task;
+
+ /* Network Statistics */
+ struct gfar_extra_stats extra_stats;
+
+ /* HW time stamping enabled flag */
+ int hwts_rx_en;
+ int hwts_tx_en;
+
+ /*Filer table*/
+ unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
+ unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
+};
+
+
+static inline int gfar_has_errata(struct gfar_private *priv,
+ enum gfar_errata err)
+{
+ return priv->errata & err;
+}
+
+static inline u32 gfar_read(volatile unsigned __iomem *addr)
+{
+ u32 val;
+ val = in_be32(addr);
+ return val;
+}
+
+static inline void gfar_write(volatile unsigned __iomem *addr, u32 val)
+{
+ out_be32(addr, val);
+}
+
+static inline void gfar_write_filer(struct gfar_private *priv,
+ unsigned int far, unsigned int fcr, unsigned int fpr)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+ gfar_write(&regs->rqfar, far);
+ gfar_write(&regs->rqfcr, fcr);
+ gfar_write(&regs->rqfpr, fpr);
+}
+
+static inline void gfar_read_filer(struct gfar_private *priv,
+ unsigned int far, unsigned int *fcr, unsigned int *fpr)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+ gfar_write(&regs->rqfar, far);
+ *fcr = gfar_read(&regs->rqfcr);
+ *fpr = gfar_read(&regs->rqfpr);
+}
+
+extern void lock_rx_qs(struct gfar_private *priv);
+extern void lock_tx_qs(struct gfar_private *priv);
+extern void unlock_rx_qs(struct gfar_private *priv);
+extern void unlock_tx_qs(struct gfar_private *priv);
+extern irqreturn_t gfar_receive(int irq, void *dev_id);
+extern int startup_gfar(struct net_device *dev);
+extern void stop_gfar(struct net_device *dev);
+extern void gfar_halt(struct net_device *dev);
+extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
+ int enable, u32 regnum, u32 read);
+extern void gfar_configure_coalescing(struct gfar_private *priv,
+ unsigned long tx_mask, unsigned long rx_mask);
+void gfar_init_sysfs(struct net_device *dev);
+int gfar_set_features(struct net_device *dev, u32 features);
+extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
+extern void gfar_vlan_mode(struct net_device *dev, u32 features);
+
+extern const struct ethtool_ops gfar_ethtool_ops;
+
+#define MAX_FILER_CACHE_IDX (2*(MAX_FILER_IDX))
+
+#define RQFCR_PID_PRI_MASK 0xFFFFFFF8
+#define RQFCR_PID_L4P_MASK 0xFFFFFF00
+#define RQFCR_PID_VID_MASK 0xFFFFF000
+#define RQFCR_PID_PORT_MASK 0xFFFF0000
+#define RQFCR_PID_MAC_MASK 0xFF000000
+
+struct gfar_mask_entry {
+ unsigned int mask; /* The mask value which is valid form start to end */
+ unsigned int start;
+ unsigned int end;
+ unsigned int block; /* Same block values indicate depended entries */
+};
+
+/* Represents a receive filer table entry */
+struct gfar_filer_entry {
+ u32 ctrl;
+ u32 prop;
+};
+
+
+/* The 20 additional entries are a shadow for one extra element */
+struct filer_table {
+ u32 index;
+ struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20];
+};
+
+#endif /* __GIANFAR_H */
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
new file mode 100644
index 000000000000..212736bab6bb
--- /dev/null
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -0,0 +1,1764 @@
+/*
+ * drivers/net/gianfar_ethtool.c
+ *
+ * Gianfar Ethernet Driver
+ * Ethtool support for Gianfar Enet
+ * Based on e1000 ethtool support
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
+ *
+ * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
+ *
+ * This software may be used and distributed according to
+ * the terms of the GNU Public License, Version 2, incorporated herein
+ * by reference.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <asm/types.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/sort.h>
+#include <linux/if_vlan.h>
+
+#include "gianfar.h"
+
+extern void gfar_start(struct net_device *dev);
+extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
+
+#define GFAR_MAX_COAL_USECS 0xffff
+#define GFAR_MAX_COAL_FRAMES 0xff
+static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
+ u64 * buf);
+static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
+static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
+static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
+static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
+static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
+static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo);
+
+static char stat_gstrings[][ETH_GSTRING_LEN] = {
+ "rx-dropped-by-kernel",
+ "rx-large-frame-errors",
+ "rx-short-frame-errors",
+ "rx-non-octet-errors",
+ "rx-crc-errors",
+ "rx-overrun-errors",
+ "rx-busy-errors",
+ "rx-babbling-errors",
+ "rx-truncated-frames",
+ "ethernet-bus-error",
+ "tx-babbling-errors",
+ "tx-underrun-errors",
+ "rx-skb-missing-errors",
+ "tx-timeout-errors",
+ "tx-rx-64-frames",
+ "tx-rx-65-127-frames",
+ "tx-rx-128-255-frames",
+ "tx-rx-256-511-frames",
+ "tx-rx-512-1023-frames",
+ "tx-rx-1024-1518-frames",
+ "tx-rx-1519-1522-good-vlan",
+ "rx-bytes",
+ "rx-packets",
+ "rx-fcs-errors",
+ "receive-multicast-packet",
+ "receive-broadcast-packet",
+ "rx-control-frame-packets",
+ "rx-pause-frame-packets",
+ "rx-unknown-op-code",
+ "rx-alignment-error",
+ "rx-frame-length-error",
+ "rx-code-error",
+ "rx-carrier-sense-error",
+ "rx-undersize-packets",
+ "rx-oversize-packets",
+ "rx-fragmented-frames",
+ "rx-jabber-frames",
+ "rx-dropped-frames",
+ "tx-byte-counter",
+ "tx-packets",
+ "tx-multicast-packets",
+ "tx-broadcast-packets",
+ "tx-pause-control-frames",
+ "tx-deferral-packets",
+ "tx-excessive-deferral-packets",
+ "tx-single-collision-packets",
+ "tx-multiple-collision-packets",
+ "tx-late-collision-packets",
+ "tx-excessive-collision-packets",
+ "tx-total-collision",
+ "reserved",
+ "tx-dropped-frames",
+ "tx-jabber-frames",
+ "tx-fcs-errors",
+ "tx-control-frames",
+ "tx-oversize-frames",
+ "tx-undersize-frames",
+ "tx-fragmented-frames",
+};
+
+/* Fill in a buffer with the strings which correspond to the
+ * stats */
+static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
+ memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
+ else
+ memcpy(buf, stat_gstrings,
+ GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
+}
+
+/* Fill in an array of 64-bit statistics from various sources.
+ * This array will be appended to the end of the ethtool_stats
+ * structure, and returned to user space
+ */
+static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf)
+{
+ int i;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u64 *extra = (u64 *) & priv->extra_stats;
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
+ u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
+ struct gfar_stats *stats = (struct gfar_stats *) buf;
+
+ for (i = 0; i < GFAR_RMON_LEN; i++)
+ stats->rmon[i] = (u64) gfar_read(&rmon[i]);
+
+ for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
+ stats->extra[i] = extra[i];
+ } else
+ for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
+ buf[i] = extra[i];
+}
+
+static int gfar_sset_count(struct net_device *dev, int sset)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
+ return GFAR_STATS_LEN;
+ else
+ return GFAR_EXTRA_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Fills in the drvinfo structure with some basic info */
+static void gfar_gdrvinfo(struct net_device *dev, struct
+ ethtool_drvinfo *drvinfo)
+{
+ strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN);
+ strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN);
+ strncpy(drvinfo->fw_version, "N/A", GFAR_INFOSTR_LEN);
+ strncpy(drvinfo->bus_info, "N/A", GFAR_INFOSTR_LEN);
+ drvinfo->regdump_len = 0;
+ drvinfo->eedump_len = 0;
+}
+
+
+static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+
+ if (NULL == phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, cmd);
+}
+
+
+/* Return the current settings in the ethtool_cmd structure */
+static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ struct gfar_priv_tx_q *tx_queue = NULL;
+
+ if (NULL == phydev)
+ return -ENODEV;
+ tx_queue = priv->tx_queue[0];
+ rx_queue = priv->rx_queue[0];
+
+ /* etsec-1.7 and older versions have only one txic
+ * and rxic regs although they support multiple queues */
+ cmd->maxtxpkt = get_icft_value(tx_queue->txic);
+ cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
+
+ return phy_ethtool_gset(phydev, cmd);
+}
+
+/* Return the length of the register structure */
+static int gfar_reglen(struct net_device *dev)
+{
+ return sizeof (struct gfar);
+}
+
+/* Return a dump of the GFAR register space */
+static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
+{
+ int i;
+ struct gfar_private *priv = netdev_priv(dev);
+ u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
+ u32 *buf = (u32 *) regbuf;
+
+ for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
+ buf[i] = gfar_read(&theregs[i]);
+}
+
+/* Convert microseconds to ethernet clock ticks, which changes
+ * depending on what speed the controller is running at */
+static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs)
+{
+ unsigned int count;
+
+ /* The timer is different, depending on the interface speed */
+ switch (priv->phydev->speed) {
+ case SPEED_1000:
+ count = GFAR_GBIT_TIME;
+ break;
+ case SPEED_100:
+ count = GFAR_100_TIME;
+ break;
+ case SPEED_10:
+ default:
+ count = GFAR_10_TIME;
+ break;
+ }
+
+ /* Make sure we return a number greater than 0
+ * if usecs > 0 */
+ return (usecs * 1000 + count - 1) / count;
+}
+
+/* Convert ethernet clock ticks to microseconds */
+static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int ticks)
+{
+ unsigned int count;
+
+ /* The timer is different, depending on the interface speed */
+ switch (priv->phydev->speed) {
+ case SPEED_1000:
+ count = GFAR_GBIT_TIME;
+ break;
+ case SPEED_100:
+ count = GFAR_100_TIME;
+ break;
+ case SPEED_10:
+ default:
+ count = GFAR_10_TIME;
+ break;
+ }
+
+ /* Make sure we return a number greater than 0 */
+ /* if ticks is > 0 */
+ return (ticks * count) / 1000;
+}
+
+/* Get the coalescing parameters, and put them in the cvals
+ * structure. */
+static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ unsigned long rxtime;
+ unsigned long rxcount;
+ unsigned long txtime;
+ unsigned long txcount;
+
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
+ return -EOPNOTSUPP;
+
+ if (NULL == priv->phydev)
+ return -ENODEV;
+
+ rx_queue = priv->rx_queue[0];
+ tx_queue = priv->tx_queue[0];
+
+ rxtime = get_ictt_value(rx_queue->rxic);
+ rxcount = get_icft_value(rx_queue->rxic);
+ txtime = get_ictt_value(tx_queue->txic);
+ txcount = get_icft_value(tx_queue->txic);
+ cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
+ cvals->rx_max_coalesced_frames = rxcount;
+
+ cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
+ cvals->tx_max_coalesced_frames = txcount;
+
+ cvals->use_adaptive_rx_coalesce = 0;
+ cvals->use_adaptive_tx_coalesce = 0;
+
+ cvals->pkt_rate_low = 0;
+ cvals->rx_coalesce_usecs_low = 0;
+ cvals->rx_max_coalesced_frames_low = 0;
+ cvals->tx_coalesce_usecs_low = 0;
+ cvals->tx_max_coalesced_frames_low = 0;
+
+ /* When the packet rate is below pkt_rate_high but above
+ * pkt_rate_low (both measured in packets per second) the
+ * normal {rx,tx}_* coalescing parameters are used.
+ */
+
+ /* When the packet rate is (measured in packets per second)
+ * is above pkt_rate_high, the {rx,tx}_*_high parameters are
+ * used.
+ */
+ cvals->pkt_rate_high = 0;
+ cvals->rx_coalesce_usecs_high = 0;
+ cvals->rx_max_coalesced_frames_high = 0;
+ cvals->tx_coalesce_usecs_high = 0;
+ cvals->tx_max_coalesced_frames_high = 0;
+
+ /* How often to do adaptive coalescing packet rate sampling,
+ * measured in seconds. Must not be zero.
+ */
+ cvals->rate_sample_interval = 0;
+
+ return 0;
+}
+
+/* Change the coalescing values.
+ * Both cvals->*_usecs and cvals->*_frames have to be > 0
+ * in order for coalescing to be active
+ */
+static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int i = 0;
+
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
+ return -EOPNOTSUPP;
+
+ /* Set up rx coalescing */
+ /* As of now, we will enable/disable coalescing for all
+ * queues together in case of eTSEC2, this will be modified
+ * along with the ethtool interface */
+ if ((cvals->rx_coalesce_usecs == 0) ||
+ (cvals->rx_max_coalesced_frames == 0)) {
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->rx_queue[i]->rxcoalescing = 0;
+ } else {
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->rx_queue[i]->rxcoalescing = 1;
+ }
+
+ if (NULL == priv->phydev)
+ return -ENODEV;
+
+ /* Check the bounds of the values */
+ if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
+ pr_info("Coalescing is limited to %d microseconds\n",
+ GFAR_MAX_COAL_USECS);
+ return -EINVAL;
+ }
+
+ if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
+ pr_info("Coalescing is limited to %d frames\n",
+ GFAR_MAX_COAL_FRAMES);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ priv->rx_queue[i]->rxic = mk_ic_value(
+ cvals->rx_max_coalesced_frames,
+ gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
+ }
+
+ /* Set up tx coalescing */
+ if ((cvals->tx_coalesce_usecs == 0) ||
+ (cvals->tx_max_coalesced_frames == 0)) {
+ for (i = 0; i < priv->num_tx_queues; i++)
+ priv->tx_queue[i]->txcoalescing = 0;
+ } else {
+ for (i = 0; i < priv->num_tx_queues; i++)
+ priv->tx_queue[i]->txcoalescing = 1;
+ }
+
+ /* Check the bounds of the values */
+ if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
+ pr_info("Coalescing is limited to %d microseconds\n",
+ GFAR_MAX_COAL_USECS);
+ return -EINVAL;
+ }
+
+ if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
+ pr_info("Coalescing is limited to %d frames\n",
+ GFAR_MAX_COAL_FRAMES);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ priv->tx_queue[i]->txic = mk_ic_value(
+ cvals->tx_max_coalesced_frames,
+ gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
+ }
+
+ gfar_configure_coalescing(priv, 0xFF, 0xFF);
+
+ return 0;
+}
+
+/* Fills in rvals with the current ring parameters. Currently,
+ * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
+ * jumbo are ignored by the driver */
+static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+
+ tx_queue = priv->tx_queue[0];
+ rx_queue = priv->rx_queue[0];
+
+ rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
+ rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
+ rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
+ rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
+
+ /* Values changeable by the user. The valid values are
+ * in the range 1 to the "*_max_pending" counterpart above.
+ */
+ rvals->rx_pending = rx_queue->rx_ring_size;
+ rvals->rx_mini_pending = rx_queue->rx_ring_size;
+ rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
+ rvals->tx_pending = tx_queue->tx_ring_size;
+}
+
+/* Change the current ring parameters, stopping the controller if
+ * necessary so that we don't mess things up while we're in
+ * motion. We wait for the ring to be clean before reallocating
+ * the rings. */
+static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int err = 0, i = 0;
+
+ if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(rvals->rx_pending)) {
+ netdev_err(dev, "Ring sizes must be a power of 2\n");
+ return -EINVAL;
+ }
+
+ if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(rvals->tx_pending)) {
+ netdev_err(dev, "Ring sizes must be a power of 2\n");
+ return -EINVAL;
+ }
+
+
+ if (dev->flags & IFF_UP) {
+ unsigned long flags;
+
+ /* Halt TX and RX, and process the frames which
+ * have already been received */
+ local_irq_save(flags);
+ lock_tx_qs(priv);
+ lock_rx_qs(priv);
+
+ gfar_halt(dev);
+
+ unlock_rx_qs(priv);
+ unlock_tx_qs(priv);
+ local_irq_restore(flags);
+
+ for (i = 0; i < priv->num_rx_queues; i++)
+ gfar_clean_rx_ring(priv->rx_queue[i],
+ priv->rx_queue[i]->rx_ring_size);
+
+ /* Now we take down the rings to rebuild them */
+ stop_gfar(dev);
+ }
+
+ /* Change the size */
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
+ priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
+ priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size;
+ }
+
+ /* Rebuild the rings with the new size */
+ if (dev->flags & IFF_UP) {
+ err = startup_gfar(dev);
+ netif_tx_wake_all_queues(dev);
+ }
+ return err;
+}
+
+int gfar_set_features(struct net_device *dev, u32 features)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ unsigned long flags;
+ int err = 0, i = 0;
+ u32 changed = dev->features ^ features;
+
+ if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
+ gfar_vlan_mode(dev, features);
+
+ if (!(changed & NETIF_F_RXCSUM))
+ return 0;
+
+ if (dev->flags & IFF_UP) {
+ /* Halt TX and RX, and process the frames which
+ * have already been received */
+ local_irq_save(flags);
+ lock_tx_qs(priv);
+ lock_rx_qs(priv);
+
+ gfar_halt(dev);
+
+ unlock_tx_qs(priv);
+ unlock_rx_qs(priv);
+ local_irq_restore(flags);
+
+ for (i = 0; i < priv->num_rx_queues; i++)
+ gfar_clean_rx_ring(priv->rx_queue[i],
+ priv->rx_queue[i]->rx_ring_size);
+
+ /* Now we take down the rings to rebuild them */
+ stop_gfar(dev);
+
+ dev->features = features;
+
+ err = startup_gfar(dev);
+ netif_tx_wake_all_queues(dev);
+ }
+ return err;
+}
+
+static uint32_t gfar_get_msglevel(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ priv->msg_enable = data;
+}
+
+#ifdef CONFIG_PM
+static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0;
+ } else {
+ wol->supported = wol->wolopts = 0;
+ }
+}
+
+static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
+ wol->wolopts != 0)
+ return -EINVAL;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
+
+ spin_lock_irqsave(&priv->bflock, flags);
+ priv->wol_en = !!device_may_wakeup(&dev->dev);
+ spin_unlock_irqrestore(&priv->bflock, flags);
+
+ return 0;
+}
+#endif
+
+static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
+{
+ u32 fcr = 0x0, fpr = FPR_FILER_MASK;
+
+ if (ethflow & RXH_L2DA) {
+ fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
+ RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+
+ fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
+ RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & RXH_VLAN) {
+ fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & RXH_IP_SRC) {
+ fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & (RXH_IP_DST)) {
+ fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & RXH_L3_PROTO) {
+ fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & RXH_L4_B_0_1) {
+ fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & RXH_L4_B_2_3) {
+ fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+}
+
+static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class)
+{
+ unsigned int last_rule_idx = priv->cur_filer_idx;
+ unsigned int cmp_rqfpr;
+ unsigned int *local_rqfpr;
+ unsigned int *local_rqfcr;
+ int i = 0x0, k = 0x0;
+ int j = MAX_FILER_IDX, l = 0x0;
+ int ret = 1;
+
+ local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
+ GFP_KERNEL);
+ local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
+ GFP_KERNEL);
+ if (!local_rqfpr || !local_rqfcr) {
+ pr_err("Out of memory\n");
+ ret = 0;
+ goto err;
+ }
+
+ switch (class) {
+ case TCP_V4_FLOW:
+ cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
+ break;
+ case UDP_V4_FLOW:
+ cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
+ break;
+ case TCP_V6_FLOW:
+ cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
+ break;
+ case UDP_V6_FLOW:
+ cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
+ break;
+ default:
+ pr_err("Right now this class is not supported\n");
+ ret = 0;
+ goto err;
+ }
+
+ for (i = 0; i < MAX_FILER_IDX + 1; i++) {
+ local_rqfpr[j] = priv->ftp_rqfpr[i];
+ local_rqfcr[j] = priv->ftp_rqfcr[i];
+ j--;
+ if ((priv->ftp_rqfcr[i] == (RQFCR_PID_PARSE |
+ RQFCR_CLE |RQFCR_AND)) &&
+ (priv->ftp_rqfpr[i] == cmp_rqfpr))
+ break;
+ }
+
+ if (i == MAX_FILER_IDX + 1) {
+ pr_err("No parse rule found, can't create hash rules\n");
+ ret = 0;
+ goto err;
+ }
+
+ /* If a match was found, then it begins the starting of a cluster rule
+ * if it was already programmed, we need to overwrite these rules
+ */
+ for (l = i+1; l < MAX_FILER_IDX; l++) {
+ if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
+ !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
+ priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
+ RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
+ priv->ftp_rqfpr[l] = FPR_FILER_MASK;
+ gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
+ priv->ftp_rqfpr[l]);
+ break;
+ }
+
+ if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
+ (priv->ftp_rqfcr[l] & RQFCR_AND))
+ continue;
+ else {
+ local_rqfpr[j] = priv->ftp_rqfpr[l];
+ local_rqfcr[j] = priv->ftp_rqfcr[l];
+ j--;
+ }
+ }
+
+ priv->cur_filer_idx = l - 1;
+ last_rule_idx = l;
+
+ /* hash rules */
+ ethflow_to_filer_rules(priv, ethflow);
+
+ /* Write back the popped out rules again */
+ for (k = j+1; k < MAX_FILER_IDX; k++) {
+ priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
+ priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
+ gfar_write_filer(priv, priv->cur_filer_idx,
+ local_rqfcr[k], local_rqfpr[k]);
+ if (!priv->cur_filer_idx)
+ break;
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+err:
+ kfree(local_rqfcr);
+ kfree(local_rqfpr);
+ return ret;
+}
+
+static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
+{
+ /* write the filer rules here */
+ if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int gfar_check_filer_hardware(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = NULL;
+ u32 i;
+
+ regs = priv->gfargrp[0].regs;
+
+ /* Check if we are in FIFO mode */
+ i = gfar_read(&regs->ecntrl);
+ i &= ECNTRL_FIFM;
+ if (i == ECNTRL_FIFM) {
+ netdev_notice(priv->ndev, "Interface in FIFO mode\n");
+ i = gfar_read(&regs->rctrl);
+ i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
+ if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
+ netdev_info(priv->ndev,
+ "Receive Queue Filtering enabled\n");
+ } else {
+ netdev_warn(priv->ndev,
+ "Receive Queue Filtering disabled\n");
+ return -EOPNOTSUPP;
+ }
+ }
+ /* Or in standard mode */
+ else {
+ i = gfar_read(&regs->rctrl);
+ i &= RCTRL_PRSDEP_MASK;
+ if (i == RCTRL_PRSDEP_MASK) {
+ netdev_info(priv->ndev,
+ "Receive Queue Filtering enabled\n");
+ } else {
+ netdev_warn(priv->ndev,
+ "Receive Queue Filtering disabled\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ /* Sets the properties for arbitrary filer rule
+ * to the first 4 Layer 4 Bytes */
+ regs->rbifx = 0xC0C1C2C3;
+ return 0;
+}
+
+static int gfar_comp_asc(const void *a, const void *b)
+{
+ return memcmp(a, b, 4);
+}
+
+static int gfar_comp_desc(const void *a, const void *b)
+{
+ return -memcmp(a, b, 4);
+}
+
+static void gfar_swap(void *a, void *b, int size)
+{
+ u32 *_a = a;
+ u32 *_b = b;
+
+ swap(_a[0], _b[0]);
+ swap(_a[1], _b[1]);
+ swap(_a[2], _b[2]);
+ swap(_a[3], _b[3]);
+}
+
+/* Write a mask to filer cache */
+static void gfar_set_mask(u32 mask, struct filer_table *tab)
+{
+ tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
+ tab->fe[tab->index].prop = mask;
+ tab->index++;
+}
+
+/* Sets parse bits (e.g. IP or TCP) */
+static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
+{
+ gfar_set_mask(mask, tab);
+ tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE
+ | RQFCR_AND;
+ tab->fe[tab->index].prop = value;
+ tab->index++;
+}
+
+static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
+ struct filer_table *tab)
+{
+ gfar_set_mask(mask, tab);
+ tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
+ tab->fe[tab->index].prop = value;
+ tab->index++;
+}
+
+/*
+ * For setting a tuple of value and mask of type flag
+ * Example:
+ * IP-Src = 10.0.0.0/255.0.0.0
+ * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
+ *
+ * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
+ * For a don't care mask it gives us a 0
+ *
+ * The check if don't care and the mask adjustment if mask=0 is done for VLAN
+ * and MAC stuff on an upper level (due to missing information on this level).
+ * For these guys we can discard them if they are value=0 and mask=0.
+ *
+ * Further the all masks are one-padded for better hardware efficiency.
+ */
+static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
+ struct filer_table *tab)
+{
+ switch (flag) {
+ /* 3bit */
+ case RQFCR_PID_PRI:
+ if (!(value | mask))
+ return;
+ mask |= RQFCR_PID_PRI_MASK;
+ break;
+ /* 8bit */
+ case RQFCR_PID_L4P:
+ case RQFCR_PID_TOS:
+ if (!~(mask | RQFCR_PID_L4P_MASK))
+ return;
+ if (!mask)
+ mask = ~0;
+ else
+ mask |= RQFCR_PID_L4P_MASK;
+ break;
+ /* 12bit */
+ case RQFCR_PID_VID:
+ if (!(value | mask))
+ return;
+ mask |= RQFCR_PID_VID_MASK;
+ break;
+ /* 16bit */
+ case RQFCR_PID_DPT:
+ case RQFCR_PID_SPT:
+ case RQFCR_PID_ETY:
+ if (!~(mask | RQFCR_PID_PORT_MASK))
+ return;
+ if (!mask)
+ mask = ~0;
+ else
+ mask |= RQFCR_PID_PORT_MASK;
+ break;
+ /* 24bit */
+ case RQFCR_PID_DAH:
+ case RQFCR_PID_DAL:
+ case RQFCR_PID_SAH:
+ case RQFCR_PID_SAL:
+ if (!(value | mask))
+ return;
+ mask |= RQFCR_PID_MAC_MASK;
+ break;
+ /* for all real 32bit masks */
+ default:
+ if (!~mask)
+ return;
+ if (!mask)
+ mask = ~0;
+ break;
+ }
+ gfar_set_general_attribute(value, mask, flag, tab);
+}
+
+/* Translates value and mask for UDP, TCP or SCTP */
+static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
+ struct ethtool_tcpip4_spec *mask, struct filer_table *tab)
+{
+ gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
+ gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
+ gfar_set_attribute(value->pdst, mask->pdst, RQFCR_PID_DPT, tab);
+ gfar_set_attribute(value->psrc, mask->psrc, RQFCR_PID_SPT, tab);
+ gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
+}
+
+/* Translates value and mask for RAW-IP4 */
+static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
+ struct ethtool_usrip4_spec *mask, struct filer_table *tab)
+{
+ gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
+ gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
+ gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
+ gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
+ gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB,
+ tab);
+
+}
+
+/* Translates value and mask for ETHER spec */
+static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
+ struct filer_table *tab)
+{
+ u32 upper_temp_mask = 0;
+ u32 lower_temp_mask = 0;
+ /* Source address */
+ if (!is_broadcast_ether_addr(mask->h_source)) {
+
+ if (is_zero_ether_addr(mask->h_source)) {
+ upper_temp_mask = 0xFFFFFFFF;
+ lower_temp_mask = 0xFFFFFFFF;
+ } else {
+ upper_temp_mask = mask->h_source[0] << 16
+ | mask->h_source[1] << 8
+ | mask->h_source[2];
+ lower_temp_mask = mask->h_source[3] << 16
+ | mask->h_source[4] << 8
+ | mask->h_source[5];
+ }
+ /* Upper 24bit */
+ gfar_set_attribute(
+ value->h_source[0] << 16 | value->h_source[1]
+ << 8 | value->h_source[2],
+ upper_temp_mask, RQFCR_PID_SAH, tab);
+ /* And the same for the lower part */
+ gfar_set_attribute(
+ value->h_source[3] << 16 | value->h_source[4]
+ << 8 | value->h_source[5],
+ lower_temp_mask, RQFCR_PID_SAL, tab);
+ }
+ /* Destination address */
+ if (!is_broadcast_ether_addr(mask->h_dest)) {
+
+ /* Special for destination is limited broadcast */
+ if ((is_broadcast_ether_addr(value->h_dest)
+ && is_zero_ether_addr(mask->h_dest))) {
+ gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
+ } else {
+
+ if (is_zero_ether_addr(mask->h_dest)) {
+ upper_temp_mask = 0xFFFFFFFF;
+ lower_temp_mask = 0xFFFFFFFF;
+ } else {
+ upper_temp_mask = mask->h_dest[0] << 16
+ | mask->h_dest[1] << 8
+ | mask->h_dest[2];
+ lower_temp_mask = mask->h_dest[3] << 16
+ | mask->h_dest[4] << 8
+ | mask->h_dest[5];
+ }
+
+ /* Upper 24bit */
+ gfar_set_attribute(
+ value->h_dest[0] << 16
+ | value->h_dest[1] << 8
+ | value->h_dest[2],
+ upper_temp_mask, RQFCR_PID_DAH, tab);
+ /* And the same for the lower part */
+ gfar_set_attribute(
+ value->h_dest[3] << 16
+ | value->h_dest[4] << 8
+ | value->h_dest[5],
+ lower_temp_mask, RQFCR_PID_DAL, tab);
+ }
+ }
+
+ gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab);
+
+}
+
+/* Convert a rule to binary filter format of gianfar */
+static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
+ struct filer_table *tab)
+{
+ u32 vlan = 0, vlan_mask = 0;
+ u32 id = 0, id_mask = 0;
+ u32 cfi = 0, cfi_mask = 0;
+ u32 prio = 0, prio_mask = 0;
+
+ u32 old_index = tab->index;
+
+ /* Check if vlan is wanted */
+ if ((rule->flow_type & FLOW_EXT) && (rule->m_ext.vlan_tci != 0xFFFF)) {
+ if (!rule->m_ext.vlan_tci)
+ rule->m_ext.vlan_tci = 0xFFFF;
+
+ vlan = RQFPR_VLN;
+ vlan_mask = RQFPR_VLN;
+
+ /* Separate the fields */
+ id = rule->h_ext.vlan_tci & VLAN_VID_MASK;
+ id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK;
+ cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK;
+ cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK;
+ prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+
+ if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
+ vlan |= RQFPR_CFI;
+ vlan_mask |= RQFPR_CFI;
+ } else if (cfi != VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
+ vlan_mask |= RQFPR_CFI;
+ }
+ }
+
+ switch (rule->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
+ RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
+ gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
+ &rule->m_u.tcp_ip4_spec, tab);
+ break;
+ case UDP_V4_FLOW:
+ gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
+ RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
+ gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
+ &rule->m_u.udp_ip4_spec, tab);
+ break;
+ case SCTP_V4_FLOW:
+ gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
+ tab);
+ gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
+ gfar_set_basic_ip((struct ethtool_tcpip4_spec *) &rule->h_u,
+ (struct ethtool_tcpip4_spec *) &rule->m_u, tab);
+ break;
+ case IP_USER_FLOW:
+ gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
+ tab);
+ gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
+ (struct ethtool_usrip4_spec *) &rule->m_u, tab);
+ break;
+ case ETHER_FLOW:
+ if (vlan)
+ gfar_set_parse_bits(vlan, vlan_mask, tab);
+ gfar_set_ether((struct ethhdr *) &rule->h_u,
+ (struct ethhdr *) &rule->m_u, tab);
+ break;
+ default:
+ return -1;
+ }
+
+ /* Set the vlan attributes in the end */
+ if (vlan) {
+ gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab);
+ gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab);
+ }
+
+ /* If there has been nothing written till now, it must be a default */
+ if (tab->index == old_index) {
+ gfar_set_mask(0xFFFFFFFF, tab);
+ tab->fe[tab->index].ctrl = 0x20;
+ tab->fe[tab->index].prop = 0x0;
+ tab->index++;
+ }
+
+ /* Remove last AND */
+ tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND);
+
+ /* Specify which queue to use or to drop */
+ if (rule->ring_cookie == RX_CLS_FLOW_DISC)
+ tab->fe[tab->index - 1].ctrl |= RQFCR_RJE;
+ else
+ tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10);
+
+ /* Only big enough entries can be clustered */
+ if (tab->index > (old_index + 2)) {
+ tab->fe[old_index + 1].ctrl |= RQFCR_CLE;
+ tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
+ }
+
+ /* In rare cases the cache can be full while there is free space in hw */
+ if (tab->index > MAX_FILER_CACHE_IDX - 1)
+ return -EBUSY;
+
+ return 0;
+}
+
+/* Copy size filer entries */
+static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
+ struct gfar_filer_entry src[0], s32 size)
+{
+ while (size > 0) {
+ size--;
+ dst[size].ctrl = src[size].ctrl;
+ dst[size].prop = src[size].prop;
+ }
+}
+
+/* Delete the contents of the filer-table between start and end
+ * and collapse them */
+static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
+{
+ int length;
+ if (end > MAX_FILER_CACHE_IDX || end < begin)
+ return -EINVAL;
+
+ end++;
+ length = end - begin;
+
+ /* Copy */
+ while (end < tab->index) {
+ tab->fe[begin].ctrl = tab->fe[end].ctrl;
+ tab->fe[begin++].prop = tab->fe[end++].prop;
+
+ }
+ /* Fill up with don't cares */
+ while (begin < tab->index) {
+ tab->fe[begin].ctrl = 0x60;
+ tab->fe[begin].prop = 0xFFFFFFFF;
+ begin++;
+ }
+
+ tab->index -= length;
+ return 0;
+}
+
+/* Make space on the wanted location */
+static int gfar_expand_filer_entries(u32 begin, u32 length,
+ struct filer_table *tab)
+{
+ if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || begin
+ > MAX_FILER_CACHE_IDX)
+ return -EINVAL;
+
+ gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
+ tab->index - length + 1);
+
+ tab->index += length;
+ return 0;
+}
+
+static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
+{
+ for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) {
+ if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE))
+ == (RQFCR_AND | RQFCR_CLE))
+ return start;
+ }
+ return -1;
+}
+
+static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
+{
+ for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) {
+ if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE))
+ == (RQFCR_CLE))
+ return start;
+ }
+ return -1;
+}
+
+/*
+ * Uses hardwares clustering option to reduce
+ * the number of filer table entries
+ */
+static void gfar_cluster_filer(struct filer_table *tab)
+{
+ s32 i = -1, j, iend, jend;
+
+ while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
+ j = i;
+ while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
+ /*
+ * The cluster entries self and the previous one
+ * (a mask) must be identical!
+ */
+ if (tab->fe[i].ctrl != tab->fe[j].ctrl)
+ break;
+ if (tab->fe[i].prop != tab->fe[j].prop)
+ break;
+ if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
+ break;
+ if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
+ break;
+ iend = gfar_get_next_cluster_end(i, tab);
+ jend = gfar_get_next_cluster_end(j, tab);
+ if (jend == -1 || iend == -1)
+ break;
+ /*
+ * First we make some free space, where our cluster
+ * element should be. Then we copy it there and finally
+ * delete in from its old location.
+ */
+
+ if (gfar_expand_filer_entries(iend, (jend - j), tab)
+ == -EINVAL)
+ break;
+
+ gfar_copy_filer_entries(&(tab->fe[iend + 1]),
+ &(tab->fe[jend + 1]), jend - j);
+
+ if (gfar_trim_filer_entries(jend - 1,
+ jend + (jend - j), tab) == -EINVAL)
+ return;
+
+ /* Mask out cluster bit */
+ tab->fe[iend].ctrl &= ~(RQFCR_CLE);
+ }
+ }
+}
+
+/* Swaps the masked bits of a1<>a2 and b1<>b2 */
+static void gfar_swap_bits(struct gfar_filer_entry *a1,
+ struct gfar_filer_entry *a2, struct gfar_filer_entry *b1,
+ struct gfar_filer_entry *b2, u32 mask)
+{
+ u32 temp[4];
+ temp[0] = a1->ctrl & mask;
+ temp[1] = a2->ctrl & mask;
+ temp[2] = b1->ctrl & mask;
+ temp[3] = b2->ctrl & mask;
+
+ a1->ctrl &= ~mask;
+ a2->ctrl &= ~mask;
+ b1->ctrl &= ~mask;
+ b2->ctrl &= ~mask;
+
+ a1->ctrl |= temp[1];
+ a2->ctrl |= temp[0];
+ b1->ctrl |= temp[3];
+ b2->ctrl |= temp[2];
+}
+
+/*
+ * Generate a list consisting of masks values with their start and
+ * end of validity and block as indicator for parts belonging
+ * together (glued by ANDs) in mask_table
+ */
+static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
+ struct filer_table *tab)
+{
+ u32 i, and_index = 0, block_index = 1;
+
+ for (i = 0; i < tab->index; i++) {
+
+ /* LSByte of control = 0 sets a mask */
+ if (!(tab->fe[i].ctrl & 0xF)) {
+ mask_table[and_index].mask = tab->fe[i].prop;
+ mask_table[and_index].start = i;
+ mask_table[and_index].block = block_index;
+ if (and_index >= 1)
+ mask_table[and_index - 1].end = i - 1;
+ and_index++;
+ }
+ /* cluster starts and ends will be separated because they should
+ * hold their position */
+ if (tab->fe[i].ctrl & RQFCR_CLE)
+ block_index++;
+ /* A not set AND indicates the end of a depended block */
+ if (!(tab->fe[i].ctrl & RQFCR_AND))
+ block_index++;
+
+ }
+
+ mask_table[and_index - 1].end = i - 1;
+
+ return and_index;
+}
+
+/*
+ * Sorts the entries of mask_table by the values of the masks.
+ * Important: The 0xFF80 flags of the first and last entry of a
+ * block must hold their position (which queue, CLusterEnable, ReJEct,
+ * AND)
+ */
+static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
+ struct filer_table *temp_table, u32 and_index)
+{
+ /* Pointer to compare function (_asc or _desc) */
+ int (*gfar_comp)(const void *, const void *);
+
+ u32 i, size = 0, start = 0, prev = 1;
+ u32 old_first, old_last, new_first, new_last;
+
+ gfar_comp = &gfar_comp_desc;
+
+ for (i = 0; i < and_index; i++) {
+
+ if (prev != mask_table[i].block) {
+ old_first = mask_table[start].start + 1;
+ old_last = mask_table[i - 1].end;
+ sort(mask_table + start, size,
+ sizeof(struct gfar_mask_entry),
+ gfar_comp, &gfar_swap);
+
+ /* Toggle order for every block. This makes the
+ * thing more efficient! */
+ if (gfar_comp == gfar_comp_desc)
+ gfar_comp = &gfar_comp_asc;
+ else
+ gfar_comp = &gfar_comp_desc;
+
+ new_first = mask_table[start].start + 1;
+ new_last = mask_table[i - 1].end;
+
+ gfar_swap_bits(&temp_table->fe[new_first],
+ &temp_table->fe[old_first],
+ &temp_table->fe[new_last],
+ &temp_table->fe[old_last],
+ RQFCR_QUEUE | RQFCR_CLE |
+ RQFCR_RJE | RQFCR_AND
+ );
+
+ start = i;
+ size = 0;
+ }
+ size++;
+ prev = mask_table[i].block;
+ }
+
+}
+
+/*
+ * Reduces the number of masks needed in the filer table to save entries
+ * This is done by sorting the masks of a depended block. A depended block is
+ * identified by gluing ANDs or CLE. The sorting order toggles after every
+ * block. Of course entries in scope of a mask must change their location with
+ * it.
+ */
+static int gfar_optimize_filer_masks(struct filer_table *tab)
+{
+ struct filer_table *temp_table;
+ struct gfar_mask_entry *mask_table;
+
+ u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
+ s32 ret = 0;
+
+ /* We need a copy of the filer table because
+ * we want to change its order */
+ temp_table = kmalloc(sizeof(*temp_table), GFP_KERNEL);
+ if (temp_table == NULL)
+ return -ENOMEM;
+ memcpy(temp_table, tab, sizeof(*temp_table));
+
+ mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
+ sizeof(struct gfar_mask_entry), GFP_KERNEL);
+
+ if (mask_table == NULL) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ and_index = gfar_generate_mask_table(mask_table, tab);
+
+ gfar_sort_mask_table(mask_table, temp_table, and_index);
+
+ /* Now we can copy the data from our duplicated filer table to
+ * the real one in the order the mask table says */
+ for (i = 0; i < and_index; i++) {
+ size = mask_table[i].end - mask_table[i].start + 1;
+ gfar_copy_filer_entries(&(tab->fe[j]),
+ &(temp_table->fe[mask_table[i].start]), size);
+ j += size;
+ }
+
+ /* And finally we just have to check for duplicated masks and drop the
+ * second ones */
+ for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
+ if (tab->fe[i].ctrl == 0x80) {
+ previous_mask = i++;
+ break;
+ }
+ }
+ for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
+ if (tab->fe[i].ctrl == 0x80) {
+ if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
+ /* Two identical ones found!
+ * So drop the second one! */
+ gfar_trim_filer_entries(i, i, tab);
+ } else
+ /* Not identical! */
+ previous_mask = i;
+ }
+ }
+
+ kfree(mask_table);
+end: kfree(temp_table);
+ return ret;
+}
+
+/* Write the bit-pattern from software's buffer to hardware registers */
+static int gfar_write_filer_table(struct gfar_private *priv,
+ struct filer_table *tab)
+{
+ u32 i = 0;
+ if (tab->index > MAX_FILER_IDX - 1)
+ return -EBUSY;
+
+ /* Avoid inconsistent filer table to be processed */
+ lock_rx_qs(priv);
+
+ /* Fill regular entries */
+ for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); i++)
+ gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
+ /* Fill the rest with fall-troughs */
+ for (; i < MAX_FILER_IDX - 1; i++)
+ gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
+ /* Last entry must be default accept
+ * because that's what people expect */
+ gfar_write_filer(priv, i, 0x20, 0x0);
+
+ unlock_rx_qs(priv);
+
+ return 0;
+}
+
+static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
+ struct gfar_private *priv)
+{
+
+ if (flow->flow_type & FLOW_EXT) {
+ if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
+ netdev_warn(priv->ndev,
+ "User-specific data not supported!\n");
+ if (~flow->m_ext.vlan_etype)
+ netdev_warn(priv->ndev,
+ "VLAN-etype not supported!\n");
+ }
+ if (flow->flow_type == IP_USER_FLOW)
+ if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
+ netdev_warn(priv->ndev,
+ "IP-Version differing from IPv4 not supported!\n");
+
+ return 0;
+}
+
+static int gfar_process_filer_changes(struct gfar_private *priv)
+{
+ struct ethtool_flow_spec_container *j;
+ struct filer_table *tab;
+ s32 i = 0;
+ s32 ret = 0;
+
+ /* So index is set to zero, too! */
+ tab = kzalloc(sizeof(*tab), GFP_KERNEL);
+ if (tab == NULL)
+ return -ENOMEM;
+
+ /* Now convert the existing filer data from flow_spec into
+ * filer tables binary format */
+ list_for_each_entry(j, &priv->rx_list.list, list) {
+ ret = gfar_convert_to_filer(&j->fs, tab);
+ if (ret == -EBUSY) {
+ netdev_err(priv->ndev, "Rule not added: No free space!\n");
+ goto end;
+ }
+ if (ret == -1) {
+ netdev_err(priv->ndev, "Rule not added: Unsupported Flow-type!\n");
+ goto end;
+ }
+ }
+
+ i = tab->index;
+
+ /* Optimizations to save entries */
+ gfar_cluster_filer(tab);
+ gfar_optimize_filer_masks(tab);
+
+ pr_debug("\n\tSummary:\n"
+ "\tData on hardware: %d\n"
+ "\tCompression rate: %d%%\n",
+ tab->index, 100 - (100 * tab->index) / i);
+
+ /* Write everything to hardware */
+ ret = gfar_write_filer_table(priv, tab);
+ if (ret == -EBUSY) {
+ netdev_err(priv->ndev, "Rule not added: No free space!\n");
+ goto end;
+ }
+
+end: kfree(tab);
+ return ret;
+}
+
+static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
+{
+ u32 i = 0;
+
+ for (i = 0; i < sizeof(flow->m_u); i++)
+ flow->m_u.hdata[i] ^= 0xFF;
+
+ flow->m_ext.vlan_etype ^= 0xFFFF;
+ flow->m_ext.vlan_tci ^= 0xFFFF;
+ flow->m_ext.data[0] ^= ~0;
+ flow->m_ext.data[1] ^= ~0;
+}
+
+static int gfar_add_cls(struct gfar_private *priv,
+ struct ethtool_rx_flow_spec *flow)
+{
+ struct ethtool_flow_spec_container *temp, *comp;
+ int ret = 0;
+
+ temp = kmalloc(sizeof(*temp), GFP_KERNEL);
+ if (temp == NULL)
+ return -ENOMEM;
+ memcpy(&temp->fs, flow, sizeof(temp->fs));
+
+ gfar_invert_masks(&temp->fs);
+ ret = gfar_check_capability(&temp->fs, priv);
+ if (ret)
+ goto clean_mem;
+ /* Link in the new element at the right @location */
+ if (list_empty(&priv->rx_list.list)) {
+ ret = gfar_check_filer_hardware(priv);
+ if (ret != 0)
+ goto clean_mem;
+ list_add(&temp->list, &priv->rx_list.list);
+ goto process;
+ } else {
+
+ list_for_each_entry(comp, &priv->rx_list.list, list) {
+ if (comp->fs.location > flow->location) {
+ list_add_tail(&temp->list, &comp->list);
+ goto process;
+ }
+ if (comp->fs.location == flow->location) {
+ netdev_err(priv->ndev,
+ "Rule not added: ID %d not free!\n",
+ flow->location);
+ ret = -EBUSY;
+ goto clean_mem;
+ }
+ }
+ list_add_tail(&temp->list, &priv->rx_list.list);
+ }
+
+process:
+ ret = gfar_process_filer_changes(priv);
+ if (ret)
+ goto clean_list;
+ priv->rx_list.count++;
+ return ret;
+
+clean_list:
+ list_del(&temp->list);
+clean_mem:
+ kfree(temp);
+ return ret;
+}
+
+static int gfar_del_cls(struct gfar_private *priv, u32 loc)
+{
+ struct ethtool_flow_spec_container *comp;
+ u32 ret = -EINVAL;
+
+ if (list_empty(&priv->rx_list.list))
+ return ret;
+
+ list_for_each_entry(comp, &priv->rx_list.list, list) {
+ if (comp->fs.location == loc) {
+ list_del(&comp->list);
+ kfree(comp);
+ priv->rx_list.count--;
+ gfar_process_filer_changes(priv);
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+
+}
+
+static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_flow_spec_container *comp;
+ u32 ret = -EINVAL;
+
+ list_for_each_entry(comp, &priv->rx_list.list, list) {
+ if (comp->fs.location == cmd->fs.location) {
+ memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs));
+ gfar_invert_masks(&cmd->fs);
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int gfar_get_cls_all(struct gfar_private *priv,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct ethtool_flow_spec_container *comp;
+ u32 i = 0;
+
+ list_for_each_entry(comp, &priv->rx_list.list, list) {
+ if (i == cmd->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[i] = comp->fs.location;
+ i++;
+ }
+
+ cmd->data = MAX_FILER_IDX;
+ cmd->rule_cnt = i;
+
+ return 0;
+}
+
+static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int ret = 0;
+
+ mutex_lock(&priv->rx_queue_access);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = gfar_set_hash_opts(priv, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ if (cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
+ cmd->fs.ring_cookie >= priv->num_rx_queues) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = gfar_add_cls(priv, &cmd->fs);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = gfar_del_cls(priv, cmd->fs.location);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&priv->rx_queue_access);
+
+ return ret;
+}
+
+static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int ret = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = priv->num_rx_queues;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = priv->rx_list.count;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = gfar_get_cls(priv, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = gfar_get_cls_all(priv, cmd, rule_locs);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+const struct ethtool_ops gfar_ethtool_ops = {
+ .get_settings = gfar_gsettings,
+ .set_settings = gfar_ssettings,
+ .get_drvinfo = gfar_gdrvinfo,
+ .get_regs_len = gfar_reglen,
+ .get_regs = gfar_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = gfar_gcoalesce,
+ .set_coalesce = gfar_scoalesce,
+ .get_ringparam = gfar_gringparam,
+ .set_ringparam = gfar_sringparam,
+ .get_strings = gfar_gstrings,
+ .get_sset_count = gfar_sset_count,
+ .get_ethtool_stats = gfar_fill_stats,
+ .get_msglevel = gfar_get_msglevel,
+ .set_msglevel = gfar_set_msglevel,
+#ifdef CONFIG_PM
+ .get_wol = gfar_get_wol,
+ .set_wol = gfar_set_wol,
+#endif
+ .set_rxnfc = gfar_set_nfc,
+ .get_rxnfc = gfar_get_nfc,
+};
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
new file mode 100644
index 000000000000..f67b8aebc89c
--- /dev/null
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -0,0 +1,583 @@
+/*
+ * PTP 1588 clock using the eTSEC
+ *
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/device.h>
+#include <linux/hrtimer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/timex.h>
+#include <linux/io.h>
+
+#include <linux/ptp_clock_kernel.h>
+
+#include "gianfar.h"
+
+/*
+ * gianfar ptp registers
+ * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010
+ */
+struct gianfar_ptp_registers {
+ u32 tmr_ctrl; /* Timer control register */
+ u32 tmr_tevent; /* Timestamp event register */
+ u32 tmr_temask; /* Timer event mask register */
+ u32 tmr_pevent; /* Timestamp event register */
+ u32 tmr_pemask; /* Timer event mask register */
+ u32 tmr_stat; /* Timestamp status register */
+ u32 tmr_cnt_h; /* Timer counter high register */
+ u32 tmr_cnt_l; /* Timer counter low register */
+ u32 tmr_add; /* Timer drift compensation addend register */
+ u32 tmr_acc; /* Timer accumulator register */
+ u32 tmr_prsc; /* Timer prescale */
+ u8 res1[4];
+ u32 tmroff_h; /* Timer offset high */
+ u32 tmroff_l; /* Timer offset low */
+ u8 res2[8];
+ u32 tmr_alarm1_h; /* Timer alarm 1 high register */
+ u32 tmr_alarm1_l; /* Timer alarm 1 high register */
+ u32 tmr_alarm2_h; /* Timer alarm 2 high register */
+ u32 tmr_alarm2_l; /* Timer alarm 2 high register */
+ u8 res3[48];
+ u32 tmr_fiper1; /* Timer fixed period interval */
+ u32 tmr_fiper2; /* Timer fixed period interval */
+ u32 tmr_fiper3; /* Timer fixed period interval */
+ u8 res4[20];
+ u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */
+ u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */
+ u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */
+ u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */
+};
+
+/* Bit definitions for the TMR_CTRL register */
+#define ALM1P (1<<31) /* Alarm1 output polarity */
+#define ALM2P (1<<30) /* Alarm2 output polarity */
+#define FS (1<<28) /* FIPER start indication */
+#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
+#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
+#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
+#define TCLK_PERIOD_MASK (0x3ff)
+#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
+#define FRD (1<<14) /* FIPER Realignment Disable */
+#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
+#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
+#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
+#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
+#define COPH (1<<7) /* Generated clock output phase. */
+#define CIPH (1<<6) /* External oscillator input clock phase */
+#define TMSR (1<<5) /* Timer soft reset. */
+#define BYP (1<<3) /* Bypass drift compensated clock */
+#define TE (1<<2) /* 1588 timer enable. */
+#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
+#define CKSEL_MASK (0x3)
+
+/* Bit definitions for the TMR_TEVENT register */
+#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
+#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
+#define ALM2 (1<<17) /* Current time = alarm time register 2 */
+#define ALM1 (1<<16) /* Current time = alarm time register 1 */
+#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
+#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
+#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
+
+/* Bit definitions for the TMR_TEMASK register */
+#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
+#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
+#define ALM2EN (1<<17) /* Timer ALM2 event enable */
+#define ALM1EN (1<<16) /* Timer ALM1 event enable */
+#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
+#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
+
+/* Bit definitions for the TMR_PEVENT register */
+#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
+#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
+#define RXP (1<<0) /* PTP frame has been received */
+
+/* Bit definitions for the TMR_PEMASK register */
+#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
+#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
+#define RXPEN (1<<0) /* Receive PTP packet event enable */
+
+/* Bit definitions for the TMR_STAT register */
+#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
+#define STAT_VEC_MASK (0x3f)
+
+/* Bit definitions for the TMR_PRSC register */
+#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
+#define PRSC_OCK_MASK (0xffff)
+
+
+#define DRIVER "gianfar_ptp"
+#define DEFAULT_CKSEL 1
+#define N_ALARM 1 /* first alarm is used internally to reset fipers */
+#define N_EXT_TS 2
+#define REG_SIZE sizeof(struct gianfar_ptp_registers)
+
+struct etsects {
+ struct gianfar_ptp_registers *regs;
+ spinlock_t lock; /* protects regs */
+ struct ptp_clock *clock;
+ struct ptp_clock_info caps;
+ struct resource *rsrc;
+ int irq;
+ u64 alarm_interval; /* for periodic alarm */
+ u64 alarm_value;
+ u32 tclk_period; /* nanoseconds */
+ u32 tmr_prsc;
+ u32 tmr_add;
+ u32 cksel;
+ u32 tmr_fiper1;
+ u32 tmr_fiper2;
+};
+
+/*
+ * Register access functions
+ */
+
+/* Caller must hold etsects->lock. */
+static u64 tmr_cnt_read(struct etsects *etsects)
+{
+ u64 ns;
+ u32 lo, hi;
+
+ lo = gfar_read(&etsects->regs->tmr_cnt_l);
+ hi = gfar_read(&etsects->regs->tmr_cnt_h);
+ ns = ((u64) hi) << 32;
+ ns |= lo;
+ return ns;
+}
+
+/* Caller must hold etsects->lock. */
+static void tmr_cnt_write(struct etsects *etsects, u64 ns)
+{
+ u32 hi = ns >> 32;
+ u32 lo = ns & 0xffffffff;
+
+ gfar_write(&etsects->regs->tmr_cnt_l, lo);
+ gfar_write(&etsects->regs->tmr_cnt_h, hi);
+}
+
+/* Caller must hold etsects->lock. */
+static void set_alarm(struct etsects *etsects)
+{
+ u64 ns;
+ u32 lo, hi;
+
+ ns = tmr_cnt_read(etsects) + 1500000000ULL;
+ ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
+ ns -= etsects->tclk_period;
+ hi = ns >> 32;
+ lo = ns & 0xffffffff;
+ gfar_write(&etsects->regs->tmr_alarm1_l, lo);
+ gfar_write(&etsects->regs->tmr_alarm1_h, hi);
+}
+
+/* Caller must hold etsects->lock. */
+static void set_fipers(struct etsects *etsects)
+{
+ set_alarm(etsects);
+ gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
+ gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
+}
+
+/*
+ * Interrupt service routine
+ */
+
+static irqreturn_t isr(int irq, void *priv)
+{
+ struct etsects *etsects = priv;
+ struct ptp_clock_event event;
+ u64 ns;
+ u32 ack = 0, lo, hi, mask, val;
+
+ val = gfar_read(&etsects->regs->tmr_tevent);
+
+ if (val & ETS1) {
+ ack |= ETS1;
+ hi = gfar_read(&etsects->regs->tmr_etts1_h);
+ lo = gfar_read(&etsects->regs->tmr_etts1_l);
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = ((u64) hi) << 32;
+ event.timestamp |= lo;
+ ptp_clock_event(etsects->clock, &event);
+ }
+
+ if (val & ETS2) {
+ ack |= ETS2;
+ hi = gfar_read(&etsects->regs->tmr_etts2_h);
+ lo = gfar_read(&etsects->regs->tmr_etts2_l);
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 1;
+ event.timestamp = ((u64) hi) << 32;
+ event.timestamp |= lo;
+ ptp_clock_event(etsects->clock, &event);
+ }
+
+ if (val & ALM2) {
+ ack |= ALM2;
+ if (etsects->alarm_value) {
+ event.type = PTP_CLOCK_ALARM;
+ event.index = 0;
+ event.timestamp = etsects->alarm_value;
+ ptp_clock_event(etsects->clock, &event);
+ }
+ if (etsects->alarm_interval) {
+ ns = etsects->alarm_value + etsects->alarm_interval;
+ hi = ns >> 32;
+ lo = ns & 0xffffffff;
+ spin_lock(&etsects->lock);
+ gfar_write(&etsects->regs->tmr_alarm2_l, lo);
+ gfar_write(&etsects->regs->tmr_alarm2_h, hi);
+ spin_unlock(&etsects->lock);
+ etsects->alarm_value = ns;
+ } else {
+ gfar_write(&etsects->regs->tmr_tevent, ALM2);
+ spin_lock(&etsects->lock);
+ mask = gfar_read(&etsects->regs->tmr_temask);
+ mask &= ~ALM2EN;
+ gfar_write(&etsects->regs->tmr_temask, mask);
+ spin_unlock(&etsects->lock);
+ etsects->alarm_value = 0;
+ etsects->alarm_interval = 0;
+ }
+ }
+
+ if (val & PP1) {
+ ack |= PP1;
+ event.type = PTP_CLOCK_PPS;
+ ptp_clock_event(etsects->clock, &event);
+ }
+
+ if (ack) {
+ gfar_write(&etsects->regs->tmr_tevent, ack);
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+}
+
+/*
+ * PTP clock operations
+ */
+
+static int ptp_gianfar_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ u64 adj;
+ u32 diff, tmr_add;
+ int neg_adj = 0;
+ struct etsects *etsects = container_of(ptp, struct etsects, caps);
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+ tmr_add = etsects->tmr_add;
+ adj = tmr_add;
+ adj *= ppb;
+ diff = div_u64(adj, 1000000000ULL);
+
+ tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
+
+ gfar_write(&etsects->regs->tmr_add, tmr_add);
+
+ return 0;
+}
+
+static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ s64 now;
+ unsigned long flags;
+ struct etsects *etsects = container_of(ptp, struct etsects, caps);
+
+ spin_lock_irqsave(&etsects->lock, flags);
+
+ now = tmr_cnt_read(etsects);
+ now += delta;
+ tmr_cnt_write(etsects, now);
+
+ spin_unlock_irqrestore(&etsects->lock, flags);
+
+ set_fipers(etsects);
+
+ return 0;
+}
+
+static int ptp_gianfar_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ u64 ns;
+ u32 remainder;
+ unsigned long flags;
+ struct etsects *etsects = container_of(ptp, struct etsects, caps);
+
+ spin_lock_irqsave(&etsects->lock, flags);
+
+ ns = tmr_cnt_read(etsects);
+
+ spin_unlock_irqrestore(&etsects->lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
+ ts->tv_nsec = remainder;
+ return 0;
+}
+
+static int ptp_gianfar_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ u64 ns;
+ unsigned long flags;
+ struct etsects *etsects = container_of(ptp, struct etsects, caps);
+
+ ns = ts->tv_sec * 1000000000ULL;
+ ns += ts->tv_nsec;
+
+ spin_lock_irqsave(&etsects->lock, flags);
+
+ tmr_cnt_write(etsects, ns);
+ set_fipers(etsects);
+
+ spin_unlock_irqrestore(&etsects->lock, flags);
+
+ return 0;
+}
+
+static int ptp_gianfar_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct etsects *etsects = container_of(ptp, struct etsects, caps);
+ unsigned long flags;
+ u32 bit, mask;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ switch (rq->extts.index) {
+ case 0:
+ bit = ETS1EN;
+ break;
+ case 1:
+ bit = ETS2EN;
+ break;
+ default:
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&etsects->lock, flags);
+ mask = gfar_read(&etsects->regs->tmr_temask);
+ if (on)
+ mask |= bit;
+ else
+ mask &= ~bit;
+ gfar_write(&etsects->regs->tmr_temask, mask);
+ spin_unlock_irqrestore(&etsects->lock, flags);
+ return 0;
+
+ case PTP_CLK_REQ_PPS:
+ spin_lock_irqsave(&etsects->lock, flags);
+ mask = gfar_read(&etsects->regs->tmr_temask);
+ if (on)
+ mask |= PP1EN;
+ else
+ mask &= ~PP1EN;
+ gfar_write(&etsects->regs->tmr_temask, mask);
+ spin_unlock_irqrestore(&etsects->lock, flags);
+ return 0;
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info ptp_gianfar_caps = {
+ .owner = THIS_MODULE,
+ .name = "gianfar clock",
+ .max_adj = 512000,
+ .n_alarm = N_ALARM,
+ .n_ext_ts = N_EXT_TS,
+ .n_per_out = 0,
+ .pps = 1,
+ .adjfreq = ptp_gianfar_adjfreq,
+ .adjtime = ptp_gianfar_adjtime,
+ .gettime = ptp_gianfar_gettime,
+ .settime = ptp_gianfar_settime,
+ .enable = ptp_gianfar_enable,
+};
+
+/* OF device tree */
+
+static int get_of_u32(struct device_node *node, char *str, u32 *val)
+{
+ int plen;
+ const u32 *prop = of_get_property(node, str, &plen);
+
+ if (!prop || plen != sizeof(*prop))
+ return -1;
+ *val = *prop;
+ return 0;
+}
+
+static int gianfar_ptp_probe(struct platform_device *dev)
+{
+ struct device_node *node = dev->dev.of_node;
+ struct etsects *etsects;
+ struct timespec now;
+ int err = -ENOMEM;
+ u32 tmr_ctrl;
+ unsigned long flags;
+
+ etsects = kzalloc(sizeof(*etsects), GFP_KERNEL);
+ if (!etsects)
+ goto no_memory;
+
+ err = -ENODEV;
+
+ etsects->caps = ptp_gianfar_caps;
+ etsects->cksel = DEFAULT_CKSEL;
+
+ if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) ||
+ get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) ||
+ get_of_u32(node, "fsl,tmr-add", &etsects->tmr_add) ||
+ get_of_u32(node, "fsl,tmr-fiper1", &etsects->tmr_fiper1) ||
+ get_of_u32(node, "fsl,tmr-fiper2", &etsects->tmr_fiper2) ||
+ get_of_u32(node, "fsl,max-adj", &etsects->caps.max_adj)) {
+ pr_err("device tree node missing required elements\n");
+ goto no_node;
+ }
+
+ etsects->irq = platform_get_irq(dev, 0);
+
+ if (etsects->irq == NO_IRQ) {
+ pr_err("irq not in device tree\n");
+ goto no_node;
+ }
+ if (request_irq(etsects->irq, isr, 0, DRIVER, etsects)) {
+ pr_err("request_irq failed\n");
+ goto no_node;
+ }
+
+ etsects->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!etsects->rsrc) {
+ pr_err("no resource\n");
+ goto no_resource;
+ }
+ if (request_resource(&ioport_resource, etsects->rsrc)) {
+ pr_err("resource busy\n");
+ goto no_resource;
+ }
+
+ spin_lock_init(&etsects->lock);
+
+ etsects->regs = ioremap(etsects->rsrc->start,
+ resource_size(etsects->rsrc));
+ if (!etsects->regs) {
+ pr_err("ioremap ptp registers failed\n");
+ goto no_ioremap;
+ }
+ getnstimeofday(&now);
+ ptp_gianfar_settime(&etsects->caps, &now);
+
+ tmr_ctrl =
+ (etsects->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
+ (etsects->cksel & CKSEL_MASK) << CKSEL_SHIFT;
+
+ spin_lock_irqsave(&etsects->lock, flags);
+
+ gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl);
+ gfar_write(&etsects->regs->tmr_add, etsects->tmr_add);
+ gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc);
+ gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
+ gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
+ set_alarm(etsects);
+ gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE|FRD);
+
+ spin_unlock_irqrestore(&etsects->lock, flags);
+
+ etsects->clock = ptp_clock_register(&etsects->caps);
+ if (IS_ERR(etsects->clock)) {
+ err = PTR_ERR(etsects->clock);
+ goto no_clock;
+ }
+
+ dev_set_drvdata(&dev->dev, etsects);
+
+ return 0;
+
+no_clock:
+no_ioremap:
+ release_resource(etsects->rsrc);
+no_resource:
+ free_irq(etsects->irq, etsects);
+no_node:
+ kfree(etsects);
+no_memory:
+ return err;
+}
+
+static int gianfar_ptp_remove(struct platform_device *dev)
+{
+ struct etsects *etsects = dev_get_drvdata(&dev->dev);
+
+ gfar_write(&etsects->regs->tmr_temask, 0);
+ gfar_write(&etsects->regs->tmr_ctrl, 0);
+
+ ptp_clock_unregister(etsects->clock);
+ iounmap(etsects->regs);
+ release_resource(etsects->rsrc);
+ free_irq(etsects->irq, etsects);
+ kfree(etsects);
+
+ return 0;
+}
+
+static struct of_device_id match_table[] = {
+ { .compatible = "fsl,etsec-ptp" },
+ {},
+};
+
+static struct platform_driver gianfar_ptp_driver = {
+ .driver = {
+ .name = "gianfar_ptp",
+ .of_match_table = match_table,
+ .owner = THIS_MODULE,
+ },
+ .probe = gianfar_ptp_probe,
+ .remove = gianfar_ptp_remove,
+};
+
+/* module operations */
+
+static int __init ptp_gianfar_init(void)
+{
+ return platform_driver_register(&gianfar_ptp_driver);
+}
+
+module_init(ptp_gianfar_init);
+
+static void __exit ptp_gianfar_exit(void)
+{
+ platform_driver_unregister(&gianfar_ptp_driver);
+}
+
+module_exit(ptp_gianfar_exit);
+
+MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>");
+MODULE_DESCRIPTION("PTP clock using the eTSEC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c
new file mode 100644
index 000000000000..64f4094ac7f1
--- /dev/null
+++ b/drivers/net/ethernet/freescale/gianfar_sysfs.c
@@ -0,0 +1,341 @@
+/*
+ * drivers/net/gianfar_sysfs.c
+ *
+ * Gianfar Ethernet Driver
+ * This driver is designed for the non-CPM ethernet controllers
+ * on the 85xx and 83xx family of integrated processors
+ * Based on 8260_io/fcc_enet.c
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala (galak@kernel.crashing.org)
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
+ *
+ * Copyright 2002-2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Sysfs file creation and management
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/device.h>
+
+#include <asm/uaccess.h>
+#include <linux/module.h>
+
+#include "gianfar.h"
+
+static ssize_t gfar_show_bd_stash(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+
+ return sprintf(buf, "%s\n", priv->bd_stash_en ? "on" : "off");
+}
+
+static ssize_t gfar_set_bd_stash(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ int new_setting = 0;
+ u32 temp;
+ unsigned long flags;
+
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING))
+ return count;
+
+
+ /* Find out the new setting */
+ if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
+ new_setting = 1;
+ else if (!strncmp("off", buf, count - 1) ||
+ !strncmp("0", buf, count - 1))
+ new_setting = 0;
+ else
+ return count;
+
+
+ local_irq_save(flags);
+ lock_rx_qs(priv);
+
+ /* Set the new stashing value */
+ priv->bd_stash_en = new_setting;
+
+ temp = gfar_read(&regs->attr);
+
+ if (new_setting)
+ temp |= ATTR_BDSTASH;
+ else
+ temp &= ~(ATTR_BDSTASH);
+
+ gfar_write(&regs->attr, temp);
+
+ unlock_rx_qs(priv);
+ local_irq_restore(flags);
+
+ return count;
+}
+
+static DEVICE_ATTR(bd_stash, 0644, gfar_show_bd_stash, gfar_set_bd_stash);
+
+static ssize_t gfar_show_rx_stash_size(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+
+ return sprintf(buf, "%d\n", priv->rx_stash_size);
+}
+
+static ssize_t gfar_set_rx_stash_size(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ unsigned int length = simple_strtoul(buf, NULL, 0);
+ u32 temp;
+ unsigned long flags;
+
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
+ return count;
+
+ local_irq_save(flags);
+ lock_rx_qs(priv);
+
+ if (length > priv->rx_buffer_size)
+ goto out;
+
+ if (length == priv->rx_stash_size)
+ goto out;
+
+ priv->rx_stash_size = length;
+
+ temp = gfar_read(&regs->attreli);
+ temp &= ~ATTRELI_EL_MASK;
+ temp |= ATTRELI_EL(length);
+ gfar_write(&regs->attreli, temp);
+
+ /* Turn stashing on/off as appropriate */
+ temp = gfar_read(&regs->attr);
+
+ if (length)
+ temp |= ATTR_BUFSTASH;
+ else
+ temp &= ~(ATTR_BUFSTASH);
+
+ gfar_write(&regs->attr, temp);
+
+out:
+ unlock_rx_qs(priv);
+ local_irq_restore(flags);
+
+ return count;
+}
+
+static DEVICE_ATTR(rx_stash_size, 0644, gfar_show_rx_stash_size,
+ gfar_set_rx_stash_size);
+
+/* Stashing will only be enabled when rx_stash_size != 0 */
+static ssize_t gfar_show_rx_stash_index(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+
+ return sprintf(buf, "%d\n", priv->rx_stash_index);
+}
+
+static ssize_t gfar_set_rx_stash_index(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ unsigned short index = simple_strtoul(buf, NULL, 0);
+ u32 temp;
+ unsigned long flags;
+
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
+ return count;
+
+ local_irq_save(flags);
+ lock_rx_qs(priv);
+
+ if (index > priv->rx_stash_size)
+ goto out;
+
+ if (index == priv->rx_stash_index)
+ goto out;
+
+ priv->rx_stash_index = index;
+
+ temp = gfar_read(&regs->attreli);
+ temp &= ~ATTRELI_EI_MASK;
+ temp |= ATTRELI_EI(index);
+ gfar_write(&regs->attreli, temp);
+
+out:
+ unlock_rx_qs(priv);
+ local_irq_restore(flags);
+
+ return count;
+}
+
+static DEVICE_ATTR(rx_stash_index, 0644, gfar_show_rx_stash_index,
+ gfar_set_rx_stash_index);
+
+static ssize_t gfar_show_fifo_threshold(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+
+ return sprintf(buf, "%d\n", priv->fifo_threshold);
+}
+
+static ssize_t gfar_set_fifo_threshold(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ unsigned int length = simple_strtoul(buf, NULL, 0);
+ u32 temp;
+ unsigned long flags;
+
+ if (length > GFAR_MAX_FIFO_THRESHOLD)
+ return count;
+
+ local_irq_save(flags);
+ lock_tx_qs(priv);
+
+ priv->fifo_threshold = length;
+
+ temp = gfar_read(&regs->fifo_tx_thr);
+ temp &= ~FIFO_TX_THR_MASK;
+ temp |= length;
+ gfar_write(&regs->fifo_tx_thr, temp);
+
+ unlock_tx_qs(priv);
+ local_irq_restore(flags);
+
+ return count;
+}
+
+static DEVICE_ATTR(fifo_threshold, 0644, gfar_show_fifo_threshold,
+ gfar_set_fifo_threshold);
+
+static ssize_t gfar_show_fifo_starve(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+
+ return sprintf(buf, "%d\n", priv->fifo_starve);
+}
+
+static ssize_t gfar_set_fifo_starve(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ unsigned int num = simple_strtoul(buf, NULL, 0);
+ u32 temp;
+ unsigned long flags;
+
+ if (num > GFAR_MAX_FIFO_STARVE)
+ return count;
+
+ local_irq_save(flags);
+ lock_tx_qs(priv);
+
+ priv->fifo_starve = num;
+
+ temp = gfar_read(&regs->fifo_tx_starve);
+ temp &= ~FIFO_TX_STARVE_MASK;
+ temp |= num;
+ gfar_write(&regs->fifo_tx_starve, temp);
+
+ unlock_tx_qs(priv);
+ local_irq_restore(flags);
+
+ return count;
+}
+
+static DEVICE_ATTR(fifo_starve, 0644, gfar_show_fifo_starve,
+ gfar_set_fifo_starve);
+
+static ssize_t gfar_show_fifo_starve_off(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+
+ return sprintf(buf, "%d\n", priv->fifo_starve_off);
+}
+
+static ssize_t gfar_set_fifo_starve_off(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ unsigned int num = simple_strtoul(buf, NULL, 0);
+ u32 temp;
+ unsigned long flags;
+
+ if (num > GFAR_MAX_FIFO_STARVE_OFF)
+ return count;
+
+ local_irq_save(flags);
+ lock_tx_qs(priv);
+
+ priv->fifo_starve_off = num;
+
+ temp = gfar_read(&regs->fifo_tx_starve_shutoff);
+ temp &= ~FIFO_TX_STARVE_OFF_MASK;
+ temp |= num;
+ gfar_write(&regs->fifo_tx_starve_shutoff, temp);
+
+ unlock_tx_qs(priv);
+ local_irq_restore(flags);
+
+ return count;
+}
+
+static DEVICE_ATTR(fifo_starve_off, 0644, gfar_show_fifo_starve_off,
+ gfar_set_fifo_starve_off);
+
+void gfar_init_sysfs(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int rc;
+
+ /* Initialize the default values */
+ priv->fifo_threshold = DEFAULT_FIFO_TX_THR;
+ priv->fifo_starve = DEFAULT_FIFO_TX_STARVE;
+ priv->fifo_starve_off = DEFAULT_FIFO_TX_STARVE_OFF;
+
+ /* Create our sysfs files */
+ rc = device_create_file(&dev->dev, &dev_attr_bd_stash);
+ rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_size);
+ rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_index);
+ rc |= device_create_file(&dev->dev, &dev_attr_fifo_threshold);
+ rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve);
+ rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve_off);
+ if (rc)
+ dev_err(&dev->dev, "Error creating gianfar sysfs files.\n");
+}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
new file mode 100644
index 000000000000..46d690a92c0b
--- /dev/null
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -0,0 +1,4026 @@
+/*
+ * Copyright (C) 2006-2009 Freescale Semicondutor, Inc. All rights reserved.
+ *
+ * Author: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * QE UCC Gigabit Ethernet Driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/workqueue.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/immap_qe.h>
+#include <asm/qe.h>
+#include <asm/ucc.h>
+#include <asm/ucc_fast.h>
+#include <asm/machdep.h>
+
+#include "ucc_geth.h"
+#include "fsl_pq_mdio.h"
+
+#undef DEBUG
+
+#define ugeth_printk(level, format, arg...) \
+ printk(level format "\n", ## arg)
+
+#define ugeth_dbg(format, arg...) \
+ ugeth_printk(KERN_DEBUG , format , ## arg)
+#define ugeth_err(format, arg...) \
+ ugeth_printk(KERN_ERR , format , ## arg)
+#define ugeth_info(format, arg...) \
+ ugeth_printk(KERN_INFO , format , ## arg)
+#define ugeth_warn(format, arg...) \
+ ugeth_printk(KERN_WARNING , format , ## arg)
+
+#ifdef UGETH_VERBOSE_DEBUG
+#define ugeth_vdbg ugeth_dbg
+#else
+#define ugeth_vdbg(fmt, args...) do { } while (0)
+#endif /* UGETH_VERBOSE_DEBUG */
+#define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1
+
+
+static DEFINE_SPINLOCK(ugeth_lock);
+
+static struct {
+ u32 msg_enable;
+} debug = { -1 };
+
+module_param_named(debug, debug.msg_enable, int, 0);
+MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)");
+
+static struct ucc_geth_info ugeth_primary_info = {
+ .uf_info = {
+ .bd_mem_part = MEM_PART_SYSTEM,
+ .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
+ .max_rx_buf_length = 1536,
+ /* adjusted at startup if max-speed 1000 */
+ .urfs = UCC_GETH_URFS_INIT,
+ .urfet = UCC_GETH_URFET_INIT,
+ .urfset = UCC_GETH_URFSET_INIT,
+ .utfs = UCC_GETH_UTFS_INIT,
+ .utfet = UCC_GETH_UTFET_INIT,
+ .utftt = UCC_GETH_UTFTT_INIT,
+ .ufpt = 256,
+ .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
+ .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
+ .tenc = UCC_FAST_TX_ENCODING_NRZ,
+ .renc = UCC_FAST_RX_ENCODING_NRZ,
+ .tcrc = UCC_FAST_16_BIT_CRC,
+ .synl = UCC_FAST_SYNC_LEN_NOT_USED,
+ },
+ .numQueuesTx = 1,
+ .numQueuesRx = 1,
+ .extendedFilteringChainPointer = ((uint32_t) NULL),
+ .typeorlen = 3072 /*1536 */ ,
+ .nonBackToBackIfgPart1 = 0x40,
+ .nonBackToBackIfgPart2 = 0x60,
+ .miminumInterFrameGapEnforcement = 0x50,
+ .backToBackInterFrameGap = 0x60,
+ .mblinterval = 128,
+ .nortsrbytetime = 5,
+ .fracsiz = 1,
+ .strictpriorityq = 0xff,
+ .altBebTruncation = 0xa,
+ .excessDefer = 1,
+ .maxRetransmission = 0xf,
+ .collisionWindow = 0x37,
+ .receiveFlowControl = 1,
+ .transmitFlowControl = 1,
+ .maxGroupAddrInHash = 4,
+ .maxIndAddrInHash = 4,
+ .prel = 7,
+ .maxFrameLength = 1518,
+ .minFrameLength = 64,
+ .maxD1Length = 1520,
+ .maxD2Length = 1520,
+ .vlantype = 0x8100,
+ .ecamptr = ((uint32_t) NULL),
+ .eventRegMask = UCCE_OTHER,
+ .pausePeriod = 0xf000,
+ .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
+ .bdRingLenTx = {
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN},
+
+ .bdRingLenRx = {
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN},
+
+ .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
+ .largestexternallookupkeysize =
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
+ .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE |
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX |
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX,
+ .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
+ .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
+ .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
+ .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
+ .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
+ .numThreadsTx = UCC_GETH_NUM_OF_THREADS_1,
+ .numThreadsRx = UCC_GETH_NUM_OF_THREADS_1,
+ .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
+ .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
+};
+
+static struct ucc_geth_info ugeth_info[8];
+
+#ifdef DEBUG
+static void mem_disp(u8 *addr, int size)
+{
+ u8 *i;
+ int size16Aling = (size >> 4) << 4;
+ int size4Aling = (size >> 2) << 2;
+ int notAlign = 0;
+ if (size % 16)
+ notAlign = 1;
+
+ for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
+ printk("0x%08x: %08x %08x %08x %08x\r\n",
+ (u32) i,
+ *((u32 *) (i)),
+ *((u32 *) (i + 4)),
+ *((u32 *) (i + 8)), *((u32 *) (i + 12)));
+ if (notAlign == 1)
+ printk("0x%08x: ", (u32) i);
+ for (; (u32) i < (u32) addr + size4Aling; i += 4)
+ printk("%08x ", *((u32 *) (i)));
+ for (; (u32) i < (u32) addr + size; i++)
+ printk("%02x", *((u8 *) (i)));
+ if (notAlign == 1)
+ printk("\r\n");
+}
+#endif /* DEBUG */
+
+static struct list_head *dequeue(struct list_head *lh)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ugeth_lock, flags);
+ if (!list_empty(lh)) {
+ struct list_head *node = lh->next;
+ list_del(node);
+ spin_unlock_irqrestore(&ugeth_lock, flags);
+ return node;
+ } else {
+ spin_unlock_irqrestore(&ugeth_lock, flags);
+ return NULL;
+ }
+}
+
+static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
+ u8 __iomem *bd)
+{
+ struct sk_buff *skb = NULL;
+
+ skb = __skb_dequeue(&ugeth->rx_recycle);
+ if (!skb)
+ skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT);
+ if (skb == NULL)
+ return NULL;
+
+ /* We need the data buffer to be aligned properly. We will reserve
+ * as many bytes as needed to align the data properly
+ */
+ skb_reserve(skb,
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT -
+ (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
+ 1)));
+
+ skb->dev = ugeth->ndev;
+
+ out_be32(&((struct qe_bd __iomem *)bd)->buf,
+ dma_map_single(ugeth->dev,
+ skb->data,
+ ugeth->ug_info->uf_info.max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT,
+ DMA_FROM_DEVICE));
+
+ out_be32((u32 __iomem *)bd,
+ (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W)));
+
+ return skb;
+}
+
+static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
+{
+ u8 __iomem *bd;
+ u32 bd_status;
+ struct sk_buff *skb;
+ int i;
+
+ bd = ugeth->p_rx_bd_ring[rxQ];
+ i = 0;
+
+ do {
+ bd_status = in_be32((u32 __iomem *)bd);
+ skb = get_new_skb(ugeth, bd);
+
+ if (!skb) /* If can not allocate data buffer,
+ abort. Cleanup will be elsewhere */
+ return -ENOMEM;
+
+ ugeth->rx_skbuff[rxQ][i] = skb;
+
+ /* advance the BD pointer */
+ bd += sizeof(struct qe_bd);
+ i++;
+ } while (!(bd_status & R_W));
+
+ return 0;
+}
+
+static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
+ u32 *p_start,
+ u8 num_entries,
+ u32 thread_size,
+ u32 thread_alignment,
+ unsigned int risc,
+ int skip_page_for_first_entry)
+{
+ u32 init_enet_offset;
+ u8 i;
+ int snum;
+
+ for (i = 0; i < num_entries; i++) {
+ if ((snum = qe_get_snum()) < 0) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("fill_init_enet_entries: Can not get SNUM.");
+ return snum;
+ }
+ if ((i == 0) && skip_page_for_first_entry)
+ /* First entry of Rx does not have page */
+ init_enet_offset = 0;
+ else {
+ init_enet_offset =
+ qe_muram_alloc(thread_size, thread_alignment);
+ if (IS_ERR_VALUE(init_enet_offset)) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("fill_init_enet_entries: Can not allocate DPRAM memory.");
+ qe_put_snum((u8) snum);
+ return -ENOMEM;
+ }
+ }
+ *(p_start++) =
+ ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
+ | risc;
+ }
+
+ return 0;
+}
+
+static int return_init_enet_entries(struct ucc_geth_private *ugeth,
+ u32 *p_start,
+ u8 num_entries,
+ unsigned int risc,
+ int skip_page_for_first_entry)
+{
+ u32 init_enet_offset;
+ u8 i;
+ int snum;
+
+ for (i = 0; i < num_entries; i++) {
+ u32 val = *p_start;
+
+ /* Check that this entry was actually valid --
+ needed in case failed in allocations */
+ if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
+ snum =
+ (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
+ ENET_INIT_PARAM_SNUM_SHIFT;
+ qe_put_snum((u8) snum);
+ if (!((i == 0) && skip_page_for_first_entry)) {
+ /* First entry of Rx does not have page */
+ init_enet_offset =
+ (val & ENET_INIT_PARAM_PTR_MASK);
+ qe_muram_free(init_enet_offset);
+ }
+ *p_start++ = 0;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef DEBUG
+static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
+ u32 __iomem *p_start,
+ u8 num_entries,
+ u32 thread_size,
+ unsigned int risc,
+ int skip_page_for_first_entry)
+{
+ u32 init_enet_offset;
+ u8 i;
+ int snum;
+
+ for (i = 0; i < num_entries; i++) {
+ u32 val = in_be32(p_start);
+
+ /* Check that this entry was actually valid --
+ needed in case failed in allocations */
+ if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
+ snum =
+ (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
+ ENET_INIT_PARAM_SNUM_SHIFT;
+ qe_put_snum((u8) snum);
+ if (!((i == 0) && skip_page_for_first_entry)) {
+ /* First entry of Rx does not have page */
+ init_enet_offset =
+ (in_be32(p_start) &
+ ENET_INIT_PARAM_PTR_MASK);
+ ugeth_info("Init enet entry %d:", i);
+ ugeth_info("Base address: 0x%08x",
+ (u32)
+ qe_muram_addr(init_enet_offset));
+ mem_disp(qe_muram_addr(init_enet_offset),
+ thread_size);
+ }
+ p_start++;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont)
+{
+ kfree(enet_addr_cont);
+}
+
+static void set_mac_addr(__be16 __iomem *reg, u8 *mac)
+{
+ out_be16(&reg[0], ((u16)mac[5] << 8) | mac[4]);
+ out_be16(&reg[1], ((u16)mac[3] << 8) | mac[2]);
+ out_be16(&reg[2], ((u16)mac[1] << 8) | mac[0]);
+}
+
+static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
+{
+ struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
+
+ if (!(paddr_num < NUM_OF_PADDRS)) {
+ ugeth_warn("%s: Illagel paddr_num.", __func__);
+ return -EINVAL;
+ }
+
+ p_82xx_addr_filt =
+ (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
+ addressfiltering;
+
+ /* Writing address ff.ff.ff.ff.ff.ff disables address
+ recognition for this register */
+ out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
+ out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
+ out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
+
+ return 0;
+}
+
+static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
+ u8 *p_enet_addr)
+{
+ struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
+ u32 cecr_subblock;
+
+ p_82xx_addr_filt =
+ (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
+ addressfiltering;
+
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+
+ /* Ethernet frames are defined in Little Endian mode,
+ therefore to insert */
+ /* the address to the hash (Big Endian mode), we reverse the bytes.*/
+
+ set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr);
+
+ qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
+ QE_CR_PROTOCOL_ETHERNET, 0);
+}
+
+static inline int compare_addr(u8 **addr1, u8 **addr2)
+{
+ return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
+}
+
+#ifdef DEBUG
+static void get_statistics(struct ucc_geth_private *ugeth,
+ struct ucc_geth_tx_firmware_statistics *
+ tx_firmware_statistics,
+ struct ucc_geth_rx_firmware_statistics *
+ rx_firmware_statistics,
+ struct ucc_geth_hardware_statistics *hardware_statistics)
+{
+ struct ucc_fast __iomem *uf_regs;
+ struct ucc_geth __iomem *ug_regs;
+ struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram;
+ struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram;
+
+ ug_regs = ugeth->ug_regs;
+ uf_regs = (struct ucc_fast __iomem *) ug_regs;
+ p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
+ p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
+
+ /* Tx firmware only if user handed pointer and driver actually
+ gathers Tx firmware statistics */
+ if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
+ tx_firmware_statistics->sicoltx =
+ in_be32(&p_tx_fw_statistics_pram->sicoltx);
+ tx_firmware_statistics->mulcoltx =
+ in_be32(&p_tx_fw_statistics_pram->mulcoltx);
+ tx_firmware_statistics->latecoltxfr =
+ in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
+ tx_firmware_statistics->frabortduecol =
+ in_be32(&p_tx_fw_statistics_pram->frabortduecol);
+ tx_firmware_statistics->frlostinmactxer =
+ in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
+ tx_firmware_statistics->carriersenseertx =
+ in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
+ tx_firmware_statistics->frtxok =
+ in_be32(&p_tx_fw_statistics_pram->frtxok);
+ tx_firmware_statistics->txfrexcessivedefer =
+ in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
+ tx_firmware_statistics->txpkts256 =
+ in_be32(&p_tx_fw_statistics_pram->txpkts256);
+ tx_firmware_statistics->txpkts512 =
+ in_be32(&p_tx_fw_statistics_pram->txpkts512);
+ tx_firmware_statistics->txpkts1024 =
+ in_be32(&p_tx_fw_statistics_pram->txpkts1024);
+ tx_firmware_statistics->txpktsjumbo =
+ in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
+ }
+
+ /* Rx firmware only if user handed pointer and driver actually
+ * gathers Rx firmware statistics */
+ if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
+ int i;
+ rx_firmware_statistics->frrxfcser =
+ in_be32(&p_rx_fw_statistics_pram->frrxfcser);
+ rx_firmware_statistics->fraligner =
+ in_be32(&p_rx_fw_statistics_pram->fraligner);
+ rx_firmware_statistics->inrangelenrxer =
+ in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
+ rx_firmware_statistics->outrangelenrxer =
+ in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
+ rx_firmware_statistics->frtoolong =
+ in_be32(&p_rx_fw_statistics_pram->frtoolong);
+ rx_firmware_statistics->runt =
+ in_be32(&p_rx_fw_statistics_pram->runt);
+ rx_firmware_statistics->verylongevent =
+ in_be32(&p_rx_fw_statistics_pram->verylongevent);
+ rx_firmware_statistics->symbolerror =
+ in_be32(&p_rx_fw_statistics_pram->symbolerror);
+ rx_firmware_statistics->dropbsy =
+ in_be32(&p_rx_fw_statistics_pram->dropbsy);
+ for (i = 0; i < 0x8; i++)
+ rx_firmware_statistics->res0[i] =
+ p_rx_fw_statistics_pram->res0[i];
+ rx_firmware_statistics->mismatchdrop =
+ in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
+ rx_firmware_statistics->underpkts =
+ in_be32(&p_rx_fw_statistics_pram->underpkts);
+ rx_firmware_statistics->pkts256 =
+ in_be32(&p_rx_fw_statistics_pram->pkts256);
+ rx_firmware_statistics->pkts512 =
+ in_be32(&p_rx_fw_statistics_pram->pkts512);
+ rx_firmware_statistics->pkts1024 =
+ in_be32(&p_rx_fw_statistics_pram->pkts1024);
+ rx_firmware_statistics->pktsjumbo =
+ in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
+ rx_firmware_statistics->frlossinmacer =
+ in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
+ rx_firmware_statistics->pausefr =
+ in_be32(&p_rx_fw_statistics_pram->pausefr);
+ for (i = 0; i < 0x4; i++)
+ rx_firmware_statistics->res1[i] =
+ p_rx_fw_statistics_pram->res1[i];
+ rx_firmware_statistics->removevlan =
+ in_be32(&p_rx_fw_statistics_pram->removevlan);
+ rx_firmware_statistics->replacevlan =
+ in_be32(&p_rx_fw_statistics_pram->replacevlan);
+ rx_firmware_statistics->insertvlan =
+ in_be32(&p_rx_fw_statistics_pram->insertvlan);
+ }
+
+ /* Hardware only if user handed pointer and driver actually
+ gathers hardware statistics */
+ if (hardware_statistics &&
+ (in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) {
+ hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
+ hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
+ hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
+ hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
+ hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
+ hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
+ hardware_statistics->txok = in_be32(&ug_regs->txok);
+ hardware_statistics->txcf = in_be16(&ug_regs->txcf);
+ hardware_statistics->tmca = in_be32(&ug_regs->tmca);
+ hardware_statistics->tbca = in_be32(&ug_regs->tbca);
+ hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
+ hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
+ hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
+ hardware_statistics->rmca = in_be32(&ug_regs->rmca);
+ hardware_statistics->rbca = in_be32(&ug_regs->rbca);
+ }
+}
+
+static void dump_bds(struct ucc_geth_private *ugeth)
+{
+ int i;
+ int length;
+
+ for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ if (ugeth->p_tx_bd_ring[i]) {
+ length =
+ (ugeth->ug_info->bdRingLenTx[i] *
+ sizeof(struct qe_bd));
+ ugeth_info("TX BDs[%d]", i);
+ mem_disp(ugeth->p_tx_bd_ring[i], length);
+ }
+ }
+ for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ if (ugeth->p_rx_bd_ring[i]) {
+ length =
+ (ugeth->ug_info->bdRingLenRx[i] *
+ sizeof(struct qe_bd));
+ ugeth_info("RX BDs[%d]", i);
+ mem_disp(ugeth->p_rx_bd_ring[i], length);
+ }
+ }
+}
+
+static void dump_regs(struct ucc_geth_private *ugeth)
+{
+ int i;
+
+ ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num + 1);
+ ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
+
+ ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->maccfg1,
+ in_be32(&ugeth->ug_regs->maccfg1));
+ ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->maccfg2,
+ in_be32(&ugeth->ug_regs->maccfg2));
+ ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->ipgifg,
+ in_be32(&ugeth->ug_regs->ipgifg));
+ ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->hafdup,
+ in_be32(&ugeth->ug_regs->hafdup));
+ ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->ifctl,
+ in_be32(&ugeth->ug_regs->ifctl));
+ ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->ifstat,
+ in_be32(&ugeth->ug_regs->ifstat));
+ ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->macstnaddr1,
+ in_be32(&ugeth->ug_regs->macstnaddr1));
+ ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->macstnaddr2,
+ in_be32(&ugeth->ug_regs->macstnaddr2));
+ ugeth_info("uempr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->uempr,
+ in_be32(&ugeth->ug_regs->uempr));
+ ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->utbipar,
+ in_be32(&ugeth->ug_regs->utbipar));
+ ugeth_info("uescr : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->ug_regs->uescr,
+ in_be16(&ugeth->ug_regs->uescr));
+ ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->tx64,
+ in_be32(&ugeth->ug_regs->tx64));
+ ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->tx127,
+ in_be32(&ugeth->ug_regs->tx127));
+ ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->tx255,
+ in_be32(&ugeth->ug_regs->tx255));
+ ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rx64,
+ in_be32(&ugeth->ug_regs->rx64));
+ ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rx127,
+ in_be32(&ugeth->ug_regs->rx127));
+ ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rx255,
+ in_be32(&ugeth->ug_regs->rx255));
+ ugeth_info("txok : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->txok,
+ in_be32(&ugeth->ug_regs->txok));
+ ugeth_info("txcf : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->ug_regs->txcf,
+ in_be16(&ugeth->ug_regs->txcf));
+ ugeth_info("tmca : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->tmca,
+ in_be32(&ugeth->ug_regs->tmca));
+ ugeth_info("tbca : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->tbca,
+ in_be32(&ugeth->ug_regs->tbca));
+ ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rxfok,
+ in_be32(&ugeth->ug_regs->rxfok));
+ ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rxbok,
+ in_be32(&ugeth->ug_regs->rxbok));
+ ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rbyt,
+ in_be32(&ugeth->ug_regs->rbyt));
+ ugeth_info("rmca : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rmca,
+ in_be32(&ugeth->ug_regs->rmca));
+ ugeth_info("rbca : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rbca,
+ in_be32(&ugeth->ug_regs->rbca));
+ ugeth_info("scar : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->scar,
+ in_be32(&ugeth->ug_regs->scar));
+ ugeth_info("scam : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->scam,
+ in_be32(&ugeth->ug_regs->scam));
+
+ if (ugeth->p_thread_data_tx) {
+ int numThreadsTxNumerical;
+ switch (ugeth->ug_info->numThreadsTx) {
+ case UCC_GETH_NUM_OF_THREADS_1:
+ numThreadsTxNumerical = 1;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_2:
+ numThreadsTxNumerical = 2;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_4:
+ numThreadsTxNumerical = 4;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_6:
+ numThreadsTxNumerical = 6;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_8:
+ numThreadsTxNumerical = 8;
+ break;
+ default:
+ numThreadsTxNumerical = 0;
+ break;
+ }
+
+ ugeth_info("Thread data TXs:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_thread_data_tx);
+ for (i = 0; i < numThreadsTxNumerical; i++) {
+ ugeth_info("Thread data TX[%d]:", i);
+ ugeth_info("Base address: 0x%08x",
+ (u32) & ugeth->p_thread_data_tx[i]);
+ mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
+ sizeof(struct ucc_geth_thread_data_tx));
+ }
+ }
+ if (ugeth->p_thread_data_rx) {
+ int numThreadsRxNumerical;
+ switch (ugeth->ug_info->numThreadsRx) {
+ case UCC_GETH_NUM_OF_THREADS_1:
+ numThreadsRxNumerical = 1;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_2:
+ numThreadsRxNumerical = 2;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_4:
+ numThreadsRxNumerical = 4;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_6:
+ numThreadsRxNumerical = 6;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_8:
+ numThreadsRxNumerical = 8;
+ break;
+ default:
+ numThreadsRxNumerical = 0;
+ break;
+ }
+
+ ugeth_info("Thread data RX:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_thread_data_rx);
+ for (i = 0; i < numThreadsRxNumerical; i++) {
+ ugeth_info("Thread data RX[%d]:", i);
+ ugeth_info("Base address: 0x%08x",
+ (u32) & ugeth->p_thread_data_rx[i]);
+ mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
+ sizeof(struct ucc_geth_thread_data_rx));
+ }
+ }
+ if (ugeth->p_exf_glbl_param) {
+ ugeth_info("EXF global param:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_exf_glbl_param);
+ mem_disp((u8 *) ugeth->p_exf_glbl_param,
+ sizeof(*ugeth->p_exf_glbl_param));
+ }
+ if (ugeth->p_tx_glbl_pram) {
+ ugeth_info("TX global param:");
+ ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram);
+ ugeth_info("temoder : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_tx_glbl_pram->temoder,
+ in_be16(&ugeth->p_tx_glbl_pram->temoder));
+ ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->sqptr,
+ in_be32(&ugeth->p_tx_glbl_pram->sqptr));
+ ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer,
+ in_be32(&ugeth->p_tx_glbl_pram->
+ schedulerbasepointer));
+ ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr,
+ in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
+ ugeth_info("tstate : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->tstate,
+ in_be32(&ugeth->p_tx_glbl_pram->tstate));
+ ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[0],
+ ugeth->p_tx_glbl_pram->iphoffset[0]);
+ ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[1],
+ ugeth->p_tx_glbl_pram->iphoffset[1]);
+ ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[2],
+ ugeth->p_tx_glbl_pram->iphoffset[2]);
+ ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[3],
+ ugeth->p_tx_glbl_pram->iphoffset[3]);
+ ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[4],
+ ugeth->p_tx_glbl_pram->iphoffset[4]);
+ ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[5],
+ ugeth->p_tx_glbl_pram->iphoffset[5]);
+ ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[6],
+ ugeth->p_tx_glbl_pram->iphoffset[6]);
+ ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[7],
+ ugeth->p_tx_glbl_pram->iphoffset[7]);
+ ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[0],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
+ ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[1],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
+ ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[2],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
+ ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[3],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
+ ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[4],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
+ ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[5],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
+ ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[6],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
+ ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[7],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
+ ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->tqptr,
+ in_be32(&ugeth->p_tx_glbl_pram->tqptr));
+ }
+ if (ugeth->p_rx_glbl_pram) {
+ ugeth_info("RX global param:");
+ ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram);
+ ugeth_info("remoder : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->remoder,
+ in_be32(&ugeth->p_rx_glbl_pram->remoder));
+ ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->rqptr,
+ in_be32(&ugeth->p_rx_glbl_pram->rqptr));
+ ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->typeorlen,
+ in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
+ ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_rx_glbl_pram->rxgstpack,
+ ugeth->p_rx_glbl_pram->rxgstpack);
+ ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr,
+ in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
+ ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr,
+ in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
+ ugeth_info("rstate : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_rx_glbl_pram->rstate,
+ ugeth->p_rx_glbl_pram->rstate);
+ ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->mrblr,
+ in_be16(&ugeth->p_rx_glbl_pram->mrblr));
+ ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->rbdqptr,
+ in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
+ ugeth_info("mflr : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->mflr,
+ in_be16(&ugeth->p_rx_glbl_pram->mflr));
+ ugeth_info("minflr : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->minflr,
+ in_be16(&ugeth->p_rx_glbl_pram->minflr));
+ ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->maxd1,
+ in_be16(&ugeth->p_rx_glbl_pram->maxd1));
+ ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->maxd2,
+ in_be16(&ugeth->p_rx_glbl_pram->maxd2));
+ ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->ecamptr,
+ in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
+ ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l2qt,
+ in_be32(&ugeth->p_rx_glbl_pram->l2qt));
+ ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[0],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
+ ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[1],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
+ ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[2],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
+ ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[3],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
+ ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[4],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
+ ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[5],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
+ ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[6],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
+ ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[7],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
+ ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->vlantype,
+ in_be16(&ugeth->p_rx_glbl_pram->vlantype));
+ ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->vlantci,
+ in_be16(&ugeth->p_rx_glbl_pram->vlantci));
+ for (i = 0; i < 64; i++)
+ ugeth_info
+ ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x",
+ i,
+ (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i],
+ ugeth->p_rx_glbl_pram->addressfiltering[i]);
+ ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam,
+ in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
+ }
+ if (ugeth->p_send_q_mem_reg) {
+ ugeth_info("Send Q memory registers:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_send_q_mem_reg);
+ for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ ugeth_info("SQQD[%d]:", i);
+ ugeth_info("Base address: 0x%08x",
+ (u32) & ugeth->p_send_q_mem_reg->sqqd[i]);
+ mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
+ sizeof(struct ucc_geth_send_queue_qd));
+ }
+ }
+ if (ugeth->p_scheduler) {
+ ugeth_info("Scheduler:");
+ ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler);
+ mem_disp((u8 *) ugeth->p_scheduler,
+ sizeof(*ugeth->p_scheduler));
+ }
+ if (ugeth->p_tx_fw_statistics_pram) {
+ ugeth_info("TX FW statistics pram:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_tx_fw_statistics_pram);
+ mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
+ sizeof(*ugeth->p_tx_fw_statistics_pram));
+ }
+ if (ugeth->p_rx_fw_statistics_pram) {
+ ugeth_info("RX FW statistics pram:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_rx_fw_statistics_pram);
+ mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
+ sizeof(*ugeth->p_rx_fw_statistics_pram));
+ }
+ if (ugeth->p_rx_irq_coalescing_tbl) {
+ ugeth_info("RX IRQ coalescing tables:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_rx_irq_coalescing_tbl);
+ for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ ugeth_info("RX IRQ coalescing table entry[%d]:", i);
+ ugeth_info("Base address: 0x%08x",
+ (u32) & ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i]);
+ ugeth_info
+ ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].interruptcoalescingmaxvalue,
+ in_be32(&ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].
+ interruptcoalescingmaxvalue));
+ ugeth_info
+ ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].interruptcoalescingcounter,
+ in_be32(&ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].
+ interruptcoalescingcounter));
+ }
+ }
+ if (ugeth->p_rx_bd_qs_tbl) {
+ ugeth_info("RX BD QS tables:");
+ ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl);
+ for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ ugeth_info("RX BD QS table[%d]:", i);
+ ugeth_info("Base address: 0x%08x",
+ (u32) & ugeth->p_rx_bd_qs_tbl[i]);
+ ugeth_info
+ ("bdbaseptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
+ ugeth_info
+ ("bdptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
+ ugeth_info
+ ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].
+ externalbdbaseptr));
+ ugeth_info
+ ("externalbdptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
+ ugeth_info("ucode RX Prefetched BDs:");
+ ugeth_info("Base address: 0x%08x",
+ (u32)
+ qe_muram_addr(in_be32
+ (&ugeth->p_rx_bd_qs_tbl[i].
+ bdbaseptr)));
+ mem_disp((u8 *)
+ qe_muram_addr(in_be32
+ (&ugeth->p_rx_bd_qs_tbl[i].
+ bdbaseptr)),
+ sizeof(struct ucc_geth_rx_prefetched_bds));
+ }
+ }
+ if (ugeth->p_init_enet_param_shadow) {
+ int size;
+ ugeth_info("Init enet param shadow:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_init_enet_param_shadow);
+ mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
+ sizeof(*ugeth->p_init_enet_param_shadow));
+
+ size = sizeof(struct ucc_geth_thread_rx_pram);
+ if (ugeth->ug_info->rxExtendedFiltering) {
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
+ if (ugeth->ug_info->largestexternallookupkeysize ==
+ QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
+ if (ugeth->ug_info->largestexternallookupkeysize ==
+ QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
+ }
+
+ dump_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ txthread[0]),
+ ENET_INIT_PARAM_MAX_ENTRIES_TX,
+ sizeof(struct ucc_geth_thread_tx_pram),
+ ugeth->ug_info->riscTx, 0);
+ dump_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ rxthread[0]),
+ ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
+ ugeth->ug_info->riscRx, 1);
+ }
+}
+#endif /* DEBUG */
+
+static void init_default_reg_vals(u32 __iomem *upsmr_register,
+ u32 __iomem *maccfg1_register,
+ u32 __iomem *maccfg2_register)
+{
+ out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
+ out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
+ out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
+}
+
+static int init_half_duplex_params(int alt_beb,
+ int back_pressure_no_backoff,
+ int no_backoff,
+ int excess_defer,
+ u8 alt_beb_truncation,
+ u8 max_retransmissions,
+ u8 collision_window,
+ u32 __iomem *hafdup_register)
+{
+ u32 value = 0;
+
+ if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
+ (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
+ (collision_window > HALFDUP_COLLISION_WINDOW_MAX))
+ return -EINVAL;
+
+ value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
+
+ if (alt_beb)
+ value |= HALFDUP_ALT_BEB;
+ if (back_pressure_no_backoff)
+ value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
+ if (no_backoff)
+ value |= HALFDUP_NO_BACKOFF;
+ if (excess_defer)
+ value |= HALFDUP_EXCESSIVE_DEFER;
+
+ value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
+
+ value |= collision_window;
+
+ out_be32(hafdup_register, value);
+ return 0;
+}
+
+static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
+ u8 non_btb_ipg,
+ u8 min_ifg,
+ u8 btb_ipg,
+ u32 __iomem *ipgifg_register)
+{
+ u32 value = 0;
+
+ /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
+ IPG part 2 */
+ if (non_btb_cs_ipg > non_btb_ipg)
+ return -EINVAL;
+
+ if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
+ (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
+ /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
+ (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
+ return -EINVAL;
+
+ value |=
+ ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
+ IPGIFG_NBTB_CS_IPG_MASK);
+ value |=
+ ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
+ IPGIFG_NBTB_IPG_MASK);
+ value |=
+ ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
+ IPGIFG_MIN_IFG_MASK);
+ value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
+
+ out_be32(ipgifg_register, value);
+ return 0;
+}
+
+int init_flow_control_params(u32 automatic_flow_control_mode,
+ int rx_flow_control_enable,
+ int tx_flow_control_enable,
+ u16 pause_period,
+ u16 extension_field,
+ u32 __iomem *upsmr_register,
+ u32 __iomem *uempr_register,
+ u32 __iomem *maccfg1_register)
+{
+ u32 value = 0;
+
+ /* Set UEMPR register */
+ value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
+ value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
+ out_be32(uempr_register, value);
+
+ /* Set UPSMR register */
+ setbits32(upsmr_register, automatic_flow_control_mode);
+
+ value = in_be32(maccfg1_register);
+ if (rx_flow_control_enable)
+ value |= MACCFG1_FLOW_RX;
+ if (tx_flow_control_enable)
+ value |= MACCFG1_FLOW_TX;
+ out_be32(maccfg1_register, value);
+
+ return 0;
+}
+
+static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
+ int auto_zero_hardware_statistics,
+ u32 __iomem *upsmr_register,
+ u16 __iomem *uescr_register)
+{
+ u16 uescr_value = 0;
+
+ /* Enable hardware statistics gathering if requested */
+ if (enable_hardware_statistics)
+ setbits32(upsmr_register, UCC_GETH_UPSMR_HSE);
+
+ /* Clear hardware statistics counters */
+ uescr_value = in_be16(uescr_register);
+ uescr_value |= UESCR_CLRCNT;
+ /* Automatically zero hardware statistics counters on read,
+ if requested */
+ if (auto_zero_hardware_statistics)
+ uescr_value |= UESCR_AUTOZ;
+ out_be16(uescr_register, uescr_value);
+
+ return 0;
+}
+
+static int init_firmware_statistics_gathering_mode(int
+ enable_tx_firmware_statistics,
+ int enable_rx_firmware_statistics,
+ u32 __iomem *tx_rmon_base_ptr,
+ u32 tx_firmware_statistics_structure_address,
+ u32 __iomem *rx_rmon_base_ptr,
+ u32 rx_firmware_statistics_structure_address,
+ u16 __iomem *temoder_register,
+ u32 __iomem *remoder_register)
+{
+ /* Note: this function does not check if */
+ /* the parameters it receives are NULL */
+
+ if (enable_tx_firmware_statistics) {
+ out_be32(tx_rmon_base_ptr,
+ tx_firmware_statistics_structure_address);
+ setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE);
+ }
+
+ if (enable_rx_firmware_statistics) {
+ out_be32(rx_rmon_base_ptr,
+ rx_firmware_statistics_structure_address);
+ setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE);
+ }
+
+ return 0;
+}
+
+static int init_mac_station_addr_regs(u8 address_byte_0,
+ u8 address_byte_1,
+ u8 address_byte_2,
+ u8 address_byte_3,
+ u8 address_byte_4,
+ u8 address_byte_5,
+ u32 __iomem *macstnaddr1_register,
+ u32 __iomem *macstnaddr2_register)
+{
+ u32 value = 0;
+
+ /* Example: for a station address of 0x12345678ABCD, */
+ /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
+
+ /* MACSTNADDR1 Register: */
+
+ /* 0 7 8 15 */
+ /* station address byte 5 station address byte 4 */
+ /* 16 23 24 31 */
+ /* station address byte 3 station address byte 2 */
+ value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
+ value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
+ value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
+ value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
+
+ out_be32(macstnaddr1_register, value);
+
+ /* MACSTNADDR2 Register: */
+
+ /* 0 7 8 15 */
+ /* station address byte 1 station address byte 0 */
+ /* 16 23 24 31 */
+ /* reserved reserved */
+ value = 0;
+ value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
+ value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
+
+ out_be32(macstnaddr2_register, value);
+
+ return 0;
+}
+
+static int init_check_frame_length_mode(int length_check,
+ u32 __iomem *maccfg2_register)
+{
+ u32 value = 0;
+
+ value = in_be32(maccfg2_register);
+
+ if (length_check)
+ value |= MACCFG2_LC;
+ else
+ value &= ~MACCFG2_LC;
+
+ out_be32(maccfg2_register, value);
+ return 0;
+}
+
+static int init_preamble_length(u8 preamble_length,
+ u32 __iomem *maccfg2_register)
+{
+ if ((preamble_length < 3) || (preamble_length > 7))
+ return -EINVAL;
+
+ clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK,
+ preamble_length << MACCFG2_PREL_SHIFT);
+
+ return 0;
+}
+
+static int init_rx_parameters(int reject_broadcast,
+ int receive_short_frames,
+ int promiscuous, u32 __iomem *upsmr_register)
+{
+ u32 value = 0;
+
+ value = in_be32(upsmr_register);
+
+ if (reject_broadcast)
+ value |= UCC_GETH_UPSMR_BRO;
+ else
+ value &= ~UCC_GETH_UPSMR_BRO;
+
+ if (receive_short_frames)
+ value |= UCC_GETH_UPSMR_RSH;
+ else
+ value &= ~UCC_GETH_UPSMR_RSH;
+
+ if (promiscuous)
+ value |= UCC_GETH_UPSMR_PRO;
+ else
+ value &= ~UCC_GETH_UPSMR_PRO;
+
+ out_be32(upsmr_register, value);
+
+ return 0;
+}
+
+static int init_max_rx_buff_len(u16 max_rx_buf_len,
+ u16 __iomem *mrblr_register)
+{
+ /* max_rx_buf_len value must be a multiple of 128 */
+ if ((max_rx_buf_len == 0) ||
+ (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
+ return -EINVAL;
+
+ out_be16(mrblr_register, max_rx_buf_len);
+ return 0;
+}
+
+static int init_min_frame_len(u16 min_frame_length,
+ u16 __iomem *minflr_register,
+ u16 __iomem *mrblr_register)
+{
+ u16 mrblr_value = 0;
+
+ mrblr_value = in_be16(mrblr_register);
+ if (min_frame_length >= (mrblr_value - 4))
+ return -EINVAL;
+
+ out_be16(minflr_register, min_frame_length);
+ return 0;
+}
+
+static int adjust_enet_interface(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth_info *ug_info;
+ struct ucc_geth __iomem *ug_regs;
+ struct ucc_fast __iomem *uf_regs;
+ int ret_val;
+ u32 upsmr, maccfg2;
+ u16 value;
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ ug_info = ugeth->ug_info;
+ ug_regs = ugeth->ug_regs;
+ uf_regs = ugeth->uccf->uf_regs;
+
+ /* Set MACCFG2 */
+ maccfg2 = in_be32(&ug_regs->maccfg2);
+ maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
+ if ((ugeth->max_speed == SPEED_10) ||
+ (ugeth->max_speed == SPEED_100))
+ maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
+ else if (ugeth->max_speed == SPEED_1000)
+ maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
+ maccfg2 |= ug_info->padAndCrc;
+ out_be32(&ug_regs->maccfg2, maccfg2);
+
+ /* Set UPSMR */
+ upsmr = in_be32(&uf_regs->upsmr);
+ upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
+ UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
+ if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
+ if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII)
+ upsmr |= UCC_GETH_UPSMR_RPM;
+ switch (ugeth->max_speed) {
+ case SPEED_10:
+ upsmr |= UCC_GETH_UPSMR_R10M;
+ /* FALLTHROUGH */
+ case SPEED_100:
+ if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
+ upsmr |= UCC_GETH_UPSMR_RMM;
+ }
+ }
+ if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
+ upsmr |= UCC_GETH_UPSMR_TBIM;
+ }
+ if ((ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII))
+ upsmr |= UCC_GETH_UPSMR_SGMM;
+
+ out_be32(&uf_regs->upsmr, upsmr);
+
+ /* Disable autonegotiation in tbi mode, because by default it
+ comes up in autonegotiation mode. */
+ /* Note that this depends on proper setting in utbipar register. */
+ if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ struct phy_device *tbiphy;
+
+ if (!ug_info->tbi_node)
+ ugeth_warn("TBI mode requires that the device "
+ "tree specify a tbi-handle\n");
+
+ tbiphy = of_phy_find_device(ug_info->tbi_node);
+ if (!tbiphy)
+ ugeth_warn("Could not get TBI device\n");
+
+ value = phy_read(tbiphy, ENET_TBI_MII_CR);
+ value &= ~0x1000; /* Turn off autonegotiation */
+ phy_write(tbiphy, ENET_TBI_MII_CR, value);
+ }
+
+ init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
+
+ ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
+ if (ret_val != 0) {
+ if (netif_msg_probe(ugeth))
+ ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.",
+ __func__);
+ return ret_val;
+ }
+
+ return 0;
+}
+
+static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_fast_private *uccf;
+ u32 cecr_subblock;
+ u32 temp;
+ int i = 10;
+
+ uccf = ugeth->uccf;
+
+ /* Mask GRACEFUL STOP TX interrupt bit and clear it */
+ clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA);
+ out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA); /* clear by writing 1 */
+
+ /* Issue host command */
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+ qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
+ QE_CR_PROTOCOL_ETHERNET, 0);
+
+ /* Wait for command to complete */
+ do {
+ msleep(10);
+ temp = in_be32(uccf->p_ucce);
+ } while (!(temp & UCC_GETH_UCCE_GRA) && --i);
+
+ uccf->stopped_tx = 1;
+
+ return 0;
+}
+
+static int ugeth_graceful_stop_rx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_fast_private *uccf;
+ u32 cecr_subblock;
+ u8 temp;
+ int i = 10;
+
+ uccf = ugeth->uccf;
+
+ /* Clear acknowledge bit */
+ temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
+ temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
+ out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp);
+
+ /* Keep issuing command and checking acknowledge bit until
+ it is asserted, according to spec */
+ do {
+ /* Issue host command */
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
+ ucc_num);
+ qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
+ QE_CR_PROTOCOL_ETHERNET, 0);
+ msleep(10);
+ temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
+ } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX) && --i);
+
+ uccf->stopped_rx = 1;
+
+ return 0;
+}
+
+static int ugeth_restart_tx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_fast_private *uccf;
+ u32 cecr_subblock;
+
+ uccf = ugeth->uccf;
+
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+ qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0);
+ uccf->stopped_tx = 0;
+
+ return 0;
+}
+
+static int ugeth_restart_rx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_fast_private *uccf;
+ u32 cecr_subblock;
+
+ uccf = ugeth->uccf;
+
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+ qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
+ 0);
+ uccf->stopped_rx = 0;
+
+ return 0;
+}
+
+static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
+{
+ struct ucc_fast_private *uccf;
+ int enabled_tx, enabled_rx;
+
+ uccf = ugeth->uccf;
+
+ /* check if the UCC number is in range. */
+ if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
+ if (netif_msg_probe(ugeth))
+ ugeth_err("%s: ucc_num out of range.", __func__);
+ return -EINVAL;
+ }
+
+ enabled_tx = uccf->enabled_tx;
+ enabled_rx = uccf->enabled_rx;
+
+ /* Get Tx and Rx going again, in case this channel was actively
+ disabled. */
+ if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
+ ugeth_restart_tx(ugeth);
+ if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
+ ugeth_restart_rx(ugeth);
+
+ ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */
+
+ return 0;
+
+}
+
+static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
+{
+ struct ucc_fast_private *uccf;
+
+ uccf = ugeth->uccf;
+
+ /* check if the UCC number is in range. */
+ if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
+ if (netif_msg_probe(ugeth))
+ ugeth_err("%s: ucc_num out of range.", __func__);
+ return -EINVAL;
+ }
+
+ /* Stop any transmissions */
+ if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
+ ugeth_graceful_stop_tx(ugeth);
+
+ /* Stop any receptions */
+ if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
+ ugeth_graceful_stop_rx(ugeth);
+
+ ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
+
+ return 0;
+}
+
+static void ugeth_quiesce(struct ucc_geth_private *ugeth)
+{
+ /* Prevent any further xmits, plus detach the device. */
+ netif_device_detach(ugeth->ndev);
+
+ /* Wait for any current xmits to finish. */
+ netif_tx_disable(ugeth->ndev);
+
+ /* Disable the interrupt to avoid NAPI rescheduling. */
+ disable_irq(ugeth->ug_info->uf_info.irq);
+
+ /* Stop NAPI, and possibly wait for its completion. */
+ napi_disable(&ugeth->napi);
+}
+
+static void ugeth_activate(struct ucc_geth_private *ugeth)
+{
+ napi_enable(&ugeth->napi);
+ enable_irq(ugeth->ug_info->uf_info.irq);
+ netif_device_attach(ugeth->ndev);
+}
+
+/* Called every time the controller might need to be made
+ * aware of new link state. The PHY code conveys this
+ * information through variables in the ugeth structure, and this
+ * function converts those variables into the appropriate
+ * register values, and can bring down the device if needed.
+ */
+
+static void adjust_link(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ struct ucc_geth __iomem *ug_regs;
+ struct ucc_fast __iomem *uf_regs;
+ struct phy_device *phydev = ugeth->phydev;
+ int new_state = 0;
+
+ ug_regs = ugeth->ug_regs;
+ uf_regs = ugeth->uccf->uf_regs;
+
+ if (phydev->link) {
+ u32 tempval = in_be32(&ug_regs->maccfg2);
+ u32 upsmr = in_be32(&uf_regs->upsmr);
+ /* Now we make sure that we can be in full duplex mode.
+ * If not, we operate in half-duplex mode. */
+ if (phydev->duplex != ugeth->oldduplex) {
+ new_state = 1;
+ if (!(phydev->duplex))
+ tempval &= ~(MACCFG2_FDX);
+ else
+ tempval |= MACCFG2_FDX;
+ ugeth->oldduplex = phydev->duplex;
+ }
+
+ if (phydev->speed != ugeth->oldspeed) {
+ new_state = 1;
+ switch (phydev->speed) {
+ case SPEED_1000:
+ tempval = ((tempval &
+ ~(MACCFG2_INTERFACE_MODE_MASK)) |
+ MACCFG2_INTERFACE_MODE_BYTE);
+ break;
+ case SPEED_100:
+ case SPEED_10:
+ tempval = ((tempval &
+ ~(MACCFG2_INTERFACE_MODE_MASK)) |
+ MACCFG2_INTERFACE_MODE_NIBBLE);
+ /* if reduced mode, re-set UPSMR.R10M */
+ if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
+ if (phydev->speed == SPEED_10)
+ upsmr |= UCC_GETH_UPSMR_R10M;
+ else
+ upsmr &= ~UCC_GETH_UPSMR_R10M;
+ }
+ break;
+ default:
+ if (netif_msg_link(ugeth))
+ ugeth_warn(
+ "%s: Ack! Speed (%d) is not 10/100/1000!",
+ dev->name, phydev->speed);
+ break;
+ }
+ ugeth->oldspeed = phydev->speed;
+ }
+
+ if (!ugeth->oldlink) {
+ new_state = 1;
+ ugeth->oldlink = 1;
+ }
+
+ if (new_state) {
+ /*
+ * To change the MAC configuration we need to disable
+ * the controller. To do so, we have to either grab
+ * ugeth->lock, which is a bad idea since 'graceful
+ * stop' commands might take quite a while, or we can
+ * quiesce driver's activity.
+ */
+ ugeth_quiesce(ugeth);
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+ out_be32(&ug_regs->maccfg2, tempval);
+ out_be32(&uf_regs->upsmr, upsmr);
+
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+ ugeth_activate(ugeth);
+ }
+ } else if (ugeth->oldlink) {
+ new_state = 1;
+ ugeth->oldlink = 0;
+ ugeth->oldspeed = 0;
+ ugeth->oldduplex = -1;
+ }
+
+ if (new_state && netif_msg_link(ugeth))
+ phy_print_status(phydev);
+}
+
+/* Initialize TBI PHY interface for communicating with the
+ * SERDES lynx PHY on the chip. We communicate with this PHY
+ * through the MDIO bus on each controller, treating it as a
+ * "normal" PHY at the address found in the UTBIPA register. We assume
+ * that the UTBIPA register is valid. Either the MDIO bus code will set
+ * it to a value that doesn't conflict with other PHYs on the bus, or the
+ * value doesn't matter, as there are no other PHYs on the bus.
+ */
+static void uec_configure_serdes(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ struct phy_device *tbiphy;
+
+ if (!ug_info->tbi_node) {
+ dev_warn(&dev->dev, "SGMII mode requires that the device "
+ "tree specify a tbi-handle\n");
+ return;
+ }
+
+ tbiphy = of_phy_find_device(ug_info->tbi_node);
+ if (!tbiphy) {
+ dev_err(&dev->dev, "error: Could not get TBI device\n");
+ return;
+ }
+
+ /*
+ * If the link is already up, we must already be ok, and don't need to
+ * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
+ * everything for us? Resetting it takes the link down and requires
+ * several seconds for it to come back.
+ */
+ if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS)
+ return;
+
+ /* Single clk mode, mii mode off(for serdes communication) */
+ phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
+
+ phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
+
+ phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
+}
+
+/* Configure the PHY for dev.
+ * returns 0 if success. -1 if failure
+ */
+static int init_phy(struct net_device *dev)
+{
+ struct ucc_geth_private *priv = netdev_priv(dev);
+ struct ucc_geth_info *ug_info = priv->ug_info;
+ struct phy_device *phydev;
+
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
+ phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
+ priv->phy_interface);
+ if (!phydev)
+ phydev = of_phy_connect_fixed_link(dev, &adjust_link,
+ priv->phy_interface);
+ if (!phydev) {
+ dev_err(&dev->dev, "Could not attach to PHY\n");
+ return -ENODEV;
+ }
+
+ if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
+ uec_configure_serdes(dev);
+
+ phydev->supported &= (SUPPORTED_MII |
+ SUPPORTED_Autoneg |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full);
+
+ if (priv->max_speed == SPEED_1000)
+ phydev->supported |= ADVERTISED_1000baseT_Full;
+
+ phydev->advertising = phydev->supported;
+
+ priv->phydev = phydev;
+
+ return 0;
+}
+
+static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
+{
+#ifdef DEBUG
+ ucc_fast_dump_regs(ugeth->uccf);
+ dump_regs(ugeth);
+ dump_bds(ugeth);
+#endif
+}
+
+static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private *
+ ugeth,
+ enum enet_addr_type
+ enet_addr_type)
+{
+ struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
+ struct ucc_fast_private *uccf;
+ enum comm_dir comm_dir;
+ struct list_head *p_lh;
+ u16 i, num;
+ u32 __iomem *addr_h;
+ u32 __iomem *addr_l;
+ u8 *p_counter;
+
+ uccf = ugeth->uccf;
+
+ p_82xx_addr_filt =
+ (struct ucc_geth_82xx_address_filtering_pram __iomem *)
+ ugeth->p_rx_glbl_pram->addressfiltering;
+
+ if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
+ addr_h = &(p_82xx_addr_filt->gaddr_h);
+ addr_l = &(p_82xx_addr_filt->gaddr_l);
+ p_lh = &ugeth->group_hash_q;
+ p_counter = &(ugeth->numGroupAddrInHash);
+ } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
+ addr_h = &(p_82xx_addr_filt->iaddr_h);
+ addr_l = &(p_82xx_addr_filt->iaddr_l);
+ p_lh = &ugeth->ind_hash_q;
+ p_counter = &(ugeth->numIndAddrInHash);
+ } else
+ return -EINVAL;
+
+ comm_dir = 0;
+ if (uccf->enabled_tx)
+ comm_dir |= COMM_DIR_TX;
+ if (uccf->enabled_rx)
+ comm_dir |= COMM_DIR_RX;
+ if (comm_dir)
+ ugeth_disable(ugeth, comm_dir);
+
+ /* Clear the hash table. */
+ out_be32(addr_h, 0x00000000);
+ out_be32(addr_l, 0x00000000);
+
+ if (!p_lh)
+ return 0;
+
+ num = *p_counter;
+
+ /* Delete all remaining CQ elements */
+ for (i = 0; i < num; i++)
+ put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
+
+ *p_counter = 0;
+
+ if (comm_dir)
+ ugeth_enable(ugeth, comm_dir);
+
+ return 0;
+}
+
+static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth,
+ u8 paddr_num)
+{
+ ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
+ return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
+}
+
+static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
+{
+ u16 i, j;
+ u8 __iomem *bd;
+
+ if (!ugeth)
+ return;
+
+ if (ugeth->uccf) {
+ ucc_fast_free(ugeth->uccf);
+ ugeth->uccf = NULL;
+ }
+
+ if (ugeth->p_thread_data_tx) {
+ qe_muram_free(ugeth->thread_dat_tx_offset);
+ ugeth->p_thread_data_tx = NULL;
+ }
+ if (ugeth->p_thread_data_rx) {
+ qe_muram_free(ugeth->thread_dat_rx_offset);
+ ugeth->p_thread_data_rx = NULL;
+ }
+ if (ugeth->p_exf_glbl_param) {
+ qe_muram_free(ugeth->exf_glbl_param_offset);
+ ugeth->p_exf_glbl_param = NULL;
+ }
+ if (ugeth->p_rx_glbl_pram) {
+ qe_muram_free(ugeth->rx_glbl_pram_offset);
+ ugeth->p_rx_glbl_pram = NULL;
+ }
+ if (ugeth->p_tx_glbl_pram) {
+ qe_muram_free(ugeth->tx_glbl_pram_offset);
+ ugeth->p_tx_glbl_pram = NULL;
+ }
+ if (ugeth->p_send_q_mem_reg) {
+ qe_muram_free(ugeth->send_q_mem_reg_offset);
+ ugeth->p_send_q_mem_reg = NULL;
+ }
+ if (ugeth->p_scheduler) {
+ qe_muram_free(ugeth->scheduler_offset);
+ ugeth->p_scheduler = NULL;
+ }
+ if (ugeth->p_tx_fw_statistics_pram) {
+ qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
+ ugeth->p_tx_fw_statistics_pram = NULL;
+ }
+ if (ugeth->p_rx_fw_statistics_pram) {
+ qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
+ ugeth->p_rx_fw_statistics_pram = NULL;
+ }
+ if (ugeth->p_rx_irq_coalescing_tbl) {
+ qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
+ ugeth->p_rx_irq_coalescing_tbl = NULL;
+ }
+ if (ugeth->p_rx_bd_qs_tbl) {
+ qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
+ ugeth->p_rx_bd_qs_tbl = NULL;
+ }
+ if (ugeth->p_init_enet_param_shadow) {
+ return_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ rxthread[0]),
+ ENET_INIT_PARAM_MAX_ENTRIES_RX,
+ ugeth->ug_info->riscRx, 1);
+ return_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ txthread[0]),
+ ENET_INIT_PARAM_MAX_ENTRIES_TX,
+ ugeth->ug_info->riscTx, 0);
+ kfree(ugeth->p_init_enet_param_shadow);
+ ugeth->p_init_enet_param_shadow = NULL;
+ }
+ for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ bd = ugeth->p_tx_bd_ring[i];
+ if (!bd)
+ continue;
+ for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
+ if (ugeth->tx_skbuff[i][j]) {
+ dma_unmap_single(ugeth->dev,
+ in_be32(&((struct qe_bd __iomem *)bd)->buf),
+ (in_be32((u32 __iomem *)bd) &
+ BD_LENGTH_MASK),
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
+ ugeth->tx_skbuff[i][j] = NULL;
+ }
+ }
+
+ kfree(ugeth->tx_skbuff[i]);
+
+ if (ugeth->p_tx_bd_ring[i]) {
+ if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_SYSTEM)
+ kfree((void *)ugeth->tx_bd_ring_offset[i]);
+ else if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_MURAM)
+ qe_muram_free(ugeth->tx_bd_ring_offset[i]);
+ ugeth->p_tx_bd_ring[i] = NULL;
+ }
+ }
+ for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ if (ugeth->p_rx_bd_ring[i]) {
+ /* Return existing data buffers in ring */
+ bd = ugeth->p_rx_bd_ring[i];
+ for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
+ if (ugeth->rx_skbuff[i][j]) {
+ dma_unmap_single(ugeth->dev,
+ in_be32(&((struct qe_bd __iomem *)bd)->buf),
+ ugeth->ug_info->
+ uf_info.max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(
+ ugeth->rx_skbuff[i][j]);
+ ugeth->rx_skbuff[i][j] = NULL;
+ }
+ bd += sizeof(struct qe_bd);
+ }
+
+ kfree(ugeth->rx_skbuff[i]);
+
+ if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_SYSTEM)
+ kfree((void *)ugeth->rx_bd_ring_offset[i]);
+ else if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_MURAM)
+ qe_muram_free(ugeth->rx_bd_ring_offset[i]);
+ ugeth->p_rx_bd_ring[i] = NULL;
+ }
+ }
+ while (!list_empty(&ugeth->group_hash_q))
+ put_enet_addr_container(ENET_ADDR_CONT_ENTRY
+ (dequeue(&ugeth->group_hash_q)));
+ while (!list_empty(&ugeth->ind_hash_q))
+ put_enet_addr_container(ENET_ADDR_CONT_ENTRY
+ (dequeue(&ugeth->ind_hash_q)));
+ if (ugeth->ug_regs) {
+ iounmap(ugeth->ug_regs);
+ ugeth->ug_regs = NULL;
+ }
+
+ skb_queue_purge(&ugeth->rx_recycle);
+}
+
+static void ucc_geth_set_multi(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth;
+ struct netdev_hw_addr *ha;
+ struct ucc_fast __iomem *uf_regs;
+ struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
+
+ ugeth = netdev_priv(dev);
+
+ uf_regs = ugeth->uccf->uf_regs;
+
+ if (dev->flags & IFF_PROMISC) {
+ setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
+ } else {
+ clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
+
+ p_82xx_addr_filt =
+ (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
+ p_rx_glbl_pram->addressfiltering;
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /* Catch all multicast addresses, so set the
+ * filter to all 1's.
+ */
+ out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
+ out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
+ } else {
+ /* Clear filter and add the addresses in the list.
+ */
+ out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
+ out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ /* Ask CPM to run CRC and set bit in
+ * filter mask.
+ */
+ hw_add_addr_in_hash(ugeth, ha->addr);
+ }
+ }
+ }
+}
+
+static void ucc_geth_stop(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
+ struct phy_device *phydev = ugeth->phydev;
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ /*
+ * Tell the kernel the link is down.
+ * Must be done before disabling the controller
+ * or deadlock may happen.
+ */
+ phy_stop(phydev);
+
+ /* Disable the controller */
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+ /* Mask all interrupts */
+ out_be32(ugeth->uccf->p_uccm, 0x00000000);
+
+ /* Clear all interrupts */
+ out_be32(ugeth->uccf->p_ucce, 0xffffffff);
+
+ /* Disable Rx and Tx */
+ clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
+
+ ucc_geth_memclean(ugeth);
+}
+
+static int ucc_struct_init(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_info *uf_info;
+ int i;
+
+ ug_info = ugeth->ug_info;
+ uf_info = &ug_info->uf_info;
+
+ if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
+ (uf_info->bd_mem_part == MEM_PART_MURAM))) {
+ if (netif_msg_probe(ugeth))
+ ugeth_err("%s: Bad memory partition value.",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Rx BD lengths */
+ for (i = 0; i < ug_info->numQueuesRx; i++) {
+ if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
+ (ug_info->bdRingLenRx[i] %
+ UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
+ if (netif_msg_probe(ugeth))
+ ugeth_err
+ ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ /* Tx BD lengths */
+ for (i = 0; i < ug_info->numQueuesTx; i++) {
+ if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
+ if (netif_msg_probe(ugeth))
+ ugeth_err
+ ("%s: Tx BD ring length must be no smaller than 2.",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ /* mrblr */
+ if ((uf_info->max_rx_buf_length == 0) ||
+ (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
+ if (netif_msg_probe(ugeth))
+ ugeth_err
+ ("%s: max_rx_buf_length must be non-zero multiple of 128.",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* num Tx queues */
+ if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
+ if (netif_msg_probe(ugeth))
+ ugeth_err("%s: number of tx queues too large.", __func__);
+ return -EINVAL;
+ }
+
+ /* num Rx queues */
+ if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
+ if (netif_msg_probe(ugeth))
+ ugeth_err("%s: number of rx queues too large.", __func__);
+ return -EINVAL;
+ }
+
+ /* l2qt */
+ for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
+ if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
+ if (netif_msg_probe(ugeth))
+ ugeth_err
+ ("%s: VLAN priority table entry must not be"
+ " larger than number of Rx queues.",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ /* l3qt */
+ for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
+ if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
+ if (netif_msg_probe(ugeth))
+ ugeth_err
+ ("%s: IP priority table entry must not be"
+ " larger than number of Rx queues.",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ if (ug_info->cam && !ug_info->ecamptr) {
+ if (netif_msg_probe(ugeth))
+ ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((ug_info->numStationAddresses !=
+ UCC_GETH_NUM_OF_STATION_ADDRESSES_1) &&
+ ug_info->rxExtendedFiltering) {
+ if (netif_msg_probe(ugeth))
+ ugeth_err("%s: Number of station addresses greater than 1 "
+ "not allowed in extended parsing mode.",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Generate uccm_mask for receive */
+ uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
+ for (i = 0; i < ug_info->numQueuesRx; i++)
+ uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i);
+
+ for (i = 0; i < ug_info->numQueuesTx; i++)
+ uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i);
+ /* Initialize the general fast UCC block. */
+ if (ucc_fast_init(uf_info, &ugeth->uccf)) {
+ if (netif_msg_probe(ugeth))
+ ugeth_err("%s: Failed to init uccf.", __func__);
+ return -ENOMEM;
+ }
+
+ /* read the number of risc engines, update the riscTx and riscRx
+ * if there are 4 riscs in QE
+ */
+ if (qe_get_num_of_risc() == 4) {
+ ug_info->riscTx = QE_RISC_ALLOCATION_FOUR_RISCS;
+ ug_info->riscRx = QE_RISC_ALLOCATION_FOUR_RISCS;
+ }
+
+ ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs));
+ if (!ugeth->ug_regs) {
+ if (netif_msg_probe(ugeth))
+ ugeth_err("%s: Failed to ioremap regs.", __func__);
+ return -ENOMEM;
+ }
+
+ skb_queue_head_init(&ugeth->rx_recycle);
+
+ return 0;
+}
+
+static int ucc_geth_startup(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
+ struct ucc_geth_init_pram __iomem *p_init_enet_pram;
+ struct ucc_fast_private *uccf;
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_info *uf_info;
+ struct ucc_fast __iomem *uf_regs;
+ struct ucc_geth __iomem *ug_regs;
+ int ret_val = -EINVAL;
+ u32 remoder = UCC_GETH_REMODER_INIT;
+ u32 init_enet_pram_offset, cecr_subblock, command;
+ u32 ifstat, i, j, size, l2qt, l3qt, length;
+ u16 temoder = UCC_GETH_TEMODER_INIT;
+ u16 test;
+ u8 function_code = 0;
+ u8 __iomem *bd;
+ u8 __iomem *endOfRing;
+ u8 numThreadsRxNumerical, numThreadsTxNumerical;
+
+ ugeth_vdbg("%s: IN", __func__);
+ uccf = ugeth->uccf;
+ ug_info = ugeth->ug_info;
+ uf_info = &ug_info->uf_info;
+ uf_regs = uccf->uf_regs;
+ ug_regs = ugeth->ug_regs;
+
+ switch (ug_info->numThreadsRx) {
+ case UCC_GETH_NUM_OF_THREADS_1:
+ numThreadsRxNumerical = 1;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_2:
+ numThreadsRxNumerical = 2;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_4:
+ numThreadsRxNumerical = 4;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_6:
+ numThreadsRxNumerical = 6;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_8:
+ numThreadsRxNumerical = 8;
+ break;
+ default:
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Bad number of Rx threads value.",
+ __func__);
+ return -EINVAL;
+ break;
+ }
+
+ switch (ug_info->numThreadsTx) {
+ case UCC_GETH_NUM_OF_THREADS_1:
+ numThreadsTxNumerical = 1;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_2:
+ numThreadsTxNumerical = 2;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_4:
+ numThreadsTxNumerical = 4;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_6:
+ numThreadsTxNumerical = 6;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_8:
+ numThreadsTxNumerical = 8;
+ break;
+ default:
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Bad number of Tx threads value.",
+ __func__);
+ return -EINVAL;
+ break;
+ }
+
+ /* Calculate rx_extended_features */
+ ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
+ ug_info->ipAddressAlignment ||
+ (ug_info->numStationAddresses !=
+ UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
+
+ ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
+ (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) ||
+ (ug_info->vlanOperationNonTagged !=
+ UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
+
+ init_default_reg_vals(&uf_regs->upsmr,
+ &ug_regs->maccfg1, &ug_regs->maccfg2);
+
+ /* Set UPSMR */
+ /* For more details see the hardware spec. */
+ init_rx_parameters(ug_info->bro,
+ ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
+
+ /* We're going to ignore other registers for now, */
+ /* except as needed to get up and running */
+
+ /* Set MACCFG1 */
+ /* For more details see the hardware spec. */
+ init_flow_control_params(ug_info->aufc,
+ ug_info->receiveFlowControl,
+ ug_info->transmitFlowControl,
+ ug_info->pausePeriod,
+ ug_info->extensionField,
+ &uf_regs->upsmr,
+ &ug_regs->uempr, &ug_regs->maccfg1);
+
+ setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
+
+ /* Set IPGIFG */
+ /* For more details see the hardware spec. */
+ ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
+ ug_info->nonBackToBackIfgPart2,
+ ug_info->
+ miminumInterFrameGapEnforcement,
+ ug_info->backToBackInterFrameGap,
+ &ug_regs->ipgifg);
+ if (ret_val != 0) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: IPGIFG initialization parameter too large.",
+ __func__);
+ return ret_val;
+ }
+
+ /* Set HAFDUP */
+ /* For more details see the hardware spec. */
+ ret_val = init_half_duplex_params(ug_info->altBeb,
+ ug_info->backPressureNoBackoff,
+ ug_info->noBackoff,
+ ug_info->excessDefer,
+ ug_info->altBebTruncation,
+ ug_info->maxRetransmission,
+ ug_info->collisionWindow,
+ &ug_regs->hafdup);
+ if (ret_val != 0) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Half Duplex initialization parameter too large.",
+ __func__);
+ return ret_val;
+ }
+
+ /* Set IFSTAT */
+ /* For more details see the hardware spec. */
+ /* Read only - resets upon read */
+ ifstat = in_be32(&ug_regs->ifstat);
+
+ /* Clear UEMPR */
+ /* For more details see the hardware spec. */
+ out_be32(&ug_regs->uempr, 0);
+
+ /* Set UESCR */
+ /* For more details see the hardware spec. */
+ init_hw_statistics_gathering_mode((ug_info->statisticsMode &
+ UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
+ 0, &uf_regs->upsmr, &ug_regs->uescr);
+
+ /* Allocate Tx bds */
+ for (j = 0; j < ug_info->numQueuesTx; j++) {
+ /* Allocate in multiple of
+ UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
+ according to spec */
+ length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
+ / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
+ * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
+ if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
+ UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
+ length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
+ if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
+ u32 align = 4;
+ if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
+ align = UCC_GETH_TX_BD_RING_ALIGNMENT;
+ ugeth->tx_bd_ring_offset[j] =
+ (u32) kmalloc((u32) (length + align), GFP_KERNEL);
+
+ if (ugeth->tx_bd_ring_offset[j] != 0)
+ ugeth->p_tx_bd_ring[j] =
+ (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
+ align) & ~(align - 1));
+ } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
+ ugeth->tx_bd_ring_offset[j] =
+ qe_muram_alloc(length,
+ UCC_GETH_TX_BD_RING_ALIGNMENT);
+ if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
+ ugeth->p_tx_bd_ring[j] =
+ (u8 __iomem *) qe_muram_addr(ugeth->
+ tx_bd_ring_offset[j]);
+ }
+ if (!ugeth->p_tx_bd_ring[j]) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate memory for Tx bd rings.",
+ __func__);
+ return -ENOMEM;
+ }
+ /* Zero unused end of bd ring, according to spec */
+ memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
+ ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0,
+ length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
+ }
+
+ /* Allocate Rx bds */
+ for (j = 0; j < ug_info->numQueuesRx; j++) {
+ length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
+ if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
+ u32 align = 4;
+ if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
+ align = UCC_GETH_RX_BD_RING_ALIGNMENT;
+ ugeth->rx_bd_ring_offset[j] =
+ (u32) kmalloc((u32) (length + align), GFP_KERNEL);
+ if (ugeth->rx_bd_ring_offset[j] != 0)
+ ugeth->p_rx_bd_ring[j] =
+ (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
+ align) & ~(align - 1));
+ } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
+ ugeth->rx_bd_ring_offset[j] =
+ qe_muram_alloc(length,
+ UCC_GETH_RX_BD_RING_ALIGNMENT);
+ if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
+ ugeth->p_rx_bd_ring[j] =
+ (u8 __iomem *) qe_muram_addr(ugeth->
+ rx_bd_ring_offset[j]);
+ }
+ if (!ugeth->p_rx_bd_ring[j]) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate memory for Rx bd rings.",
+ __func__);
+ return -ENOMEM;
+ }
+ }
+
+ /* Init Tx bds */
+ for (j = 0; j < ug_info->numQueuesTx; j++) {
+ /* Setup the skbuff rings */
+ ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
+ ugeth->ug_info->bdRingLenTx[j],
+ GFP_KERNEL);
+
+ if (ugeth->tx_skbuff[j] == NULL) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Could not allocate tx_skbuff",
+ __func__);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
+ ugeth->tx_skbuff[j][i] = NULL;
+
+ ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
+ bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
+ for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
+ /* clear bd buffer */
+ out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
+ /* set bd status and length */
+ out_be32((u32 __iomem *)bd, 0);
+ bd += sizeof(struct qe_bd);
+ }
+ bd -= sizeof(struct qe_bd);
+ /* set bd status and length */
+ out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */
+ }
+
+ /* Init Rx bds */
+ for (j = 0; j < ug_info->numQueuesRx; j++) {
+ /* Setup the skbuff rings */
+ ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
+ ugeth->ug_info->bdRingLenRx[j],
+ GFP_KERNEL);
+
+ if (ugeth->rx_skbuff[j] == NULL) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Could not allocate rx_skbuff",
+ __func__);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
+ ugeth->rx_skbuff[j][i] = NULL;
+
+ ugeth->skb_currx[j] = 0;
+ bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
+ for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
+ /* set bd status and length */
+ out_be32((u32 __iomem *)bd, R_I);
+ /* clear bd buffer */
+ out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
+ bd += sizeof(struct qe_bd);
+ }
+ bd -= sizeof(struct qe_bd);
+ /* set bd status and length */
+ out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */
+ }
+
+ /*
+ * Global PRAM
+ */
+ /* Tx global PRAM */
+ /* Allocate global tx parameter RAM page */
+ ugeth->tx_glbl_pram_offset =
+ qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
+ UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
+ __func__);
+ return -ENOMEM;
+ }
+ ugeth->p_tx_glbl_pram =
+ (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth->
+ tx_glbl_pram_offset);
+ /* Zero out p_tx_glbl_pram */
+ memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
+
+ /* Fill global PRAM */
+
+ /* TQPTR */
+ /* Size varies with number of Tx threads */
+ ugeth->thread_dat_tx_offset =
+ qe_muram_alloc(numThreadsTxNumerical *
+ sizeof(struct ucc_geth_thread_data_tx) +
+ 32 * (numThreadsTxNumerical == 1),
+ UCC_GETH_THREAD_DATA_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
+ __func__);
+ return -ENOMEM;
+ }
+
+ ugeth->p_thread_data_tx =
+ (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth->
+ thread_dat_tx_offset);
+ out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
+
+ /* vtagtable */
+ for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
+ out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
+ ug_info->vtagtable[i]);
+
+ /* iphoffset */
+ for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
+ out_8(&ugeth->p_tx_glbl_pram->iphoffset[i],
+ ug_info->iphoffset[i]);
+
+ /* SQPTR */
+ /* Size varies with number of Tx queues */
+ ugeth->send_q_mem_reg_offset =
+ qe_muram_alloc(ug_info->numQueuesTx *
+ sizeof(struct ucc_geth_send_queue_qd),
+ UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
+ __func__);
+ return -ENOMEM;
+ }
+
+ ugeth->p_send_q_mem_reg =
+ (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth->
+ send_q_mem_reg_offset);
+ out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
+
+ /* Setup the table */
+ /* Assume BD rings are already established */
+ for (i = 0; i < ug_info->numQueuesTx; i++) {
+ endOfRing =
+ ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
+ 1) * sizeof(struct qe_bd);
+ if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
+ (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
+ last_bd_completed_address,
+ (u32) virt_to_phys(endOfRing));
+ } else if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_MURAM) {
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
+ (u32) immrbar_virt_to_phys(ugeth->
+ p_tx_bd_ring[i]));
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
+ last_bd_completed_address,
+ (u32) immrbar_virt_to_phys(endOfRing));
+ }
+ }
+
+ /* schedulerbasepointer */
+
+ if (ug_info->numQueuesTx > 1) {
+ /* scheduler exists only if more than 1 tx queue */
+ ugeth->scheduler_offset =
+ qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
+ UCC_GETH_SCHEDULER_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->scheduler_offset)) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_scheduler.",
+ __func__);
+ return -ENOMEM;
+ }
+
+ ugeth->p_scheduler =
+ (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth->
+ scheduler_offset);
+ out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
+ ugeth->scheduler_offset);
+ /* Zero out p_scheduler */
+ memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
+
+ /* Set values in scheduler */
+ out_be32(&ugeth->p_scheduler->mblinterval,
+ ug_info->mblinterval);
+ out_be16(&ugeth->p_scheduler->nortsrbytetime,
+ ug_info->nortsrbytetime);
+ out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz);
+ out_8(&ugeth->p_scheduler->strictpriorityq,
+ ug_info->strictpriorityq);
+ out_8(&ugeth->p_scheduler->txasap, ug_info->txasap);
+ out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw);
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ out_8(&ugeth->p_scheduler->weightfactor[i],
+ ug_info->weightfactor[i]);
+
+ /* Set pointers to cpucount registers in scheduler */
+ ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
+ ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
+ ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
+ ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
+ ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
+ ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
+ ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
+ ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
+ }
+
+ /* schedulerbasepointer */
+ /* TxRMON_PTR (statistics) */
+ if (ug_info->
+ statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
+ ugeth->tx_fw_statistics_pram_offset =
+ qe_muram_alloc(sizeof
+ (struct ucc_geth_tx_firmware_statistics_pram),
+ UCC_GETH_TX_STATISTICS_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for"
+ " p_tx_fw_statistics_pram.",
+ __func__);
+ return -ENOMEM;
+ }
+ ugeth->p_tx_fw_statistics_pram =
+ (struct ucc_geth_tx_firmware_statistics_pram __iomem *)
+ qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
+ /* Zero out p_tx_fw_statistics_pram */
+ memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram,
+ 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram));
+ }
+
+ /* temoder */
+ /* Already has speed set */
+
+ if (ug_info->numQueuesTx > 1)
+ temoder |= TEMODER_SCHEDULER_ENABLE;
+ if (ug_info->ipCheckSumGenerate)
+ temoder |= TEMODER_IP_CHECKSUM_GENERATE;
+ temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
+ out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
+
+ test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
+
+ /* Function code register value to be used later */
+ function_code = UCC_BMR_BO_BE | UCC_BMR_GBL;
+ /* Required for QE */
+
+ /* function code register */
+ out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
+
+ /* Rx global PRAM */
+ /* Allocate global rx parameter RAM page */
+ ugeth->rx_glbl_pram_offset =
+ qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
+ UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
+ __func__);
+ return -ENOMEM;
+ }
+ ugeth->p_rx_glbl_pram =
+ (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth->
+ rx_glbl_pram_offset);
+ /* Zero out p_rx_glbl_pram */
+ memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
+
+ /* Fill global PRAM */
+
+ /* RQPTR */
+ /* Size varies with number of Rx threads */
+ ugeth->thread_dat_rx_offset =
+ qe_muram_alloc(numThreadsRxNumerical *
+ sizeof(struct ucc_geth_thread_data_rx),
+ UCC_GETH_THREAD_DATA_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
+ __func__);
+ return -ENOMEM;
+ }
+
+ ugeth->p_thread_data_rx =
+ (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth->
+ thread_dat_rx_offset);
+ out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
+
+ /* typeorlen */
+ out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
+
+ /* rxrmonbaseptr (statistics) */
+ if (ug_info->
+ statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
+ ugeth->rx_fw_statistics_pram_offset =
+ qe_muram_alloc(sizeof
+ (struct ucc_geth_rx_firmware_statistics_pram),
+ UCC_GETH_RX_STATISTICS_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for"
+ " p_rx_fw_statistics_pram.", __func__);
+ return -ENOMEM;
+ }
+ ugeth->p_rx_fw_statistics_pram =
+ (struct ucc_geth_rx_firmware_statistics_pram __iomem *)
+ qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
+ /* Zero out p_rx_fw_statistics_pram */
+ memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0,
+ sizeof(struct ucc_geth_rx_firmware_statistics_pram));
+ }
+
+ /* intCoalescingPtr */
+
+ /* Size varies with number of Rx queues */
+ ugeth->rx_irq_coalescing_tbl_offset =
+ qe_muram_alloc(ug_info->numQueuesRx *
+ sizeof(struct ucc_geth_rx_interrupt_coalescing_entry)
+ + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for"
+ " p_rx_irq_coalescing_tbl.", __func__);
+ return -ENOMEM;
+ }
+
+ ugeth->p_rx_irq_coalescing_tbl =
+ (struct ucc_geth_rx_interrupt_coalescing_table __iomem *)
+ qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
+ out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
+ ugeth->rx_irq_coalescing_tbl_offset);
+
+ /* Fill interrupt coalescing table */
+ for (i = 0; i < ug_info->numQueuesRx; i++) {
+ out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
+ interruptcoalescingmaxvalue,
+ ug_info->interruptcoalescingmaxvalue[i]);
+ out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
+ interruptcoalescingcounter,
+ ug_info->interruptcoalescingmaxvalue[i]);
+ }
+
+ /* MRBLR */
+ init_max_rx_buff_len(uf_info->max_rx_buf_length,
+ &ugeth->p_rx_glbl_pram->mrblr);
+ /* MFLR */
+ out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
+ /* MINFLR */
+ init_min_frame_len(ug_info->minFrameLength,
+ &ugeth->p_rx_glbl_pram->minflr,
+ &ugeth->p_rx_glbl_pram->mrblr);
+ /* MAXD1 */
+ out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
+ /* MAXD2 */
+ out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
+
+ /* l2qt */
+ l2qt = 0;
+ for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
+ l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
+ out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
+
+ /* l3qt */
+ for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
+ l3qt = 0;
+ for (i = 0; i < 8; i++)
+ l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
+ out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt);
+ }
+
+ /* vlantype */
+ out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
+
+ /* vlantci */
+ out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
+
+ /* ecamptr */
+ out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
+
+ /* RBDQPTR */
+ /* Size varies with number of Rx queues */
+ ugeth->rx_bd_qs_tbl_offset =
+ qe_muram_alloc(ug_info->numQueuesRx *
+ (sizeof(struct ucc_geth_rx_bd_queues_entry) +
+ sizeof(struct ucc_geth_rx_prefetched_bds)),
+ UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
+ __func__);
+ return -ENOMEM;
+ }
+
+ ugeth->p_rx_bd_qs_tbl =
+ (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth->
+ rx_bd_qs_tbl_offset);
+ out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
+ /* Zero out p_rx_bd_qs_tbl */
+ memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl,
+ 0,
+ ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) +
+ sizeof(struct ucc_geth_rx_prefetched_bds)));
+
+ /* Setup the table */
+ /* Assume BD rings are already established */
+ for (i = 0; i < ug_info->numQueuesRx; i++) {
+ if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
+ out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
+ (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
+ } else if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_MURAM) {
+ out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
+ (u32) immrbar_virt_to_phys(ugeth->
+ p_rx_bd_ring[i]));
+ }
+ /* rest of fields handled by QE */
+ }
+
+ /* remoder */
+ /* Already has speed set */
+
+ if (ugeth->rx_extended_features)
+ remoder |= REMODER_RX_EXTENDED_FEATURES;
+ if (ug_info->rxExtendedFiltering)
+ remoder |= REMODER_RX_EXTENDED_FILTERING;
+ if (ug_info->dynamicMaxFrameLength)
+ remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
+ if (ug_info->dynamicMinFrameLength)
+ remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
+ remoder |=
+ ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
+ remoder |=
+ ug_info->
+ vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
+ remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
+ remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
+ if (ug_info->ipCheckSumCheck)
+ remoder |= REMODER_IP_CHECKSUM_CHECK;
+ if (ug_info->ipAddressAlignment)
+ remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
+ out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
+
+ /* Note that this function must be called */
+ /* ONLY AFTER p_tx_fw_statistics_pram */
+ /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
+ init_firmware_statistics_gathering_mode((ug_info->
+ statisticsMode &
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
+ (ug_info->statisticsMode &
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
+ &ugeth->p_tx_glbl_pram->txrmonbaseptr,
+ ugeth->tx_fw_statistics_pram_offset,
+ &ugeth->p_rx_glbl_pram->rxrmonbaseptr,
+ ugeth->rx_fw_statistics_pram_offset,
+ &ugeth->p_tx_glbl_pram->temoder,
+ &ugeth->p_rx_glbl_pram->remoder);
+
+ /* function code register */
+ out_8(&ugeth->p_rx_glbl_pram->rstate, function_code);
+
+ /* initialize extended filtering */
+ if (ug_info->rxExtendedFiltering) {
+ if (!ug_info->extendedFilteringChainPointer) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Null Extended Filtering Chain Pointer.",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Allocate memory for extended filtering Mode Global
+ Parameters */
+ ugeth->exf_glbl_param_offset =
+ qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram),
+ UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for"
+ " p_exf_glbl_param.", __func__);
+ return -ENOMEM;
+ }
+
+ ugeth->p_exf_glbl_param =
+ (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth->
+ exf_glbl_param_offset);
+ out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
+ ugeth->exf_glbl_param_offset);
+ out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
+ (u32) ug_info->extendedFilteringChainPointer);
+
+ } else { /* initialize 82xx style address filtering */
+
+ /* Init individual address recognition registers to disabled */
+
+ for (j = 0; j < NUM_OF_PADDRS; j++)
+ ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
+
+ p_82xx_addr_filt =
+ (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
+ p_rx_glbl_pram->addressfiltering;
+
+ ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
+ ENET_ADDR_TYPE_GROUP);
+ ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
+ ENET_ADDR_TYPE_INDIVIDUAL);
+ }
+
+ /*
+ * Initialize UCC at QE level
+ */
+
+ command = QE_INIT_TX_RX;
+
+ /* Allocate shadow InitEnet command parameter structure.
+ * This is needed because after the InitEnet command is executed,
+ * the structure in DPRAM is released, because DPRAM is a premium
+ * resource.
+ * This shadow structure keeps a copy of what was done so that the
+ * allocated resources can be released when the channel is freed.
+ */
+ if (!(ugeth->p_init_enet_param_shadow =
+ kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate memory for"
+ " p_UccInitEnetParamShadows.", __func__);
+ return -ENOMEM;
+ }
+ /* Zero out *p_init_enet_param_shadow */
+ memset((char *)ugeth->p_init_enet_param_shadow,
+ 0, sizeof(struct ucc_geth_init_pram));
+
+ /* Fill shadow InitEnet command parameter structure */
+
+ ugeth->p_init_enet_param_shadow->resinit1 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT1;
+ ugeth->p_init_enet_param_shadow->resinit2 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT2;
+ ugeth->p_init_enet_param_shadow->resinit3 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT3;
+ ugeth->p_init_enet_param_shadow->resinit4 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT4;
+ ugeth->p_init_enet_param_shadow->resinit5 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT5;
+ ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
+ ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
+ ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
+ ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
+
+ ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
+ ugeth->rx_glbl_pram_offset | ug_info->riscRx;
+ if ((ug_info->largestexternallookupkeysize !=
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) &&
+ (ug_info->largestexternallookupkeysize !=
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) &&
+ (ug_info->largestexternallookupkeysize !=
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Invalid largest External Lookup Key Size.",
+ __func__);
+ return -EINVAL;
+ }
+ ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
+ ug_info->largestexternallookupkeysize;
+ size = sizeof(struct ucc_geth_thread_rx_pram);
+ if (ug_info->rxExtendedFiltering) {
+ size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
+ if (ug_info->largestexternallookupkeysize ==
+ QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
+ if (ug_info->largestexternallookupkeysize ==
+ QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
+ }
+
+ if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
+ p_init_enet_param_shadow->rxthread[0]),
+ (u8) (numThreadsRxNumerical + 1)
+ /* Rx needs one extra for terminator */
+ , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
+ ug_info->riscRx, 1)) != 0) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
+ __func__);
+ return ret_val;
+ }
+
+ ugeth->p_init_enet_param_shadow->txglobal =
+ ugeth->tx_glbl_pram_offset | ug_info->riscTx;
+ if ((ret_val =
+ fill_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ txthread[0]), numThreadsTxNumerical,
+ sizeof(struct ucc_geth_thread_tx_pram),
+ UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
+ ug_info->riscTx, 0)) != 0) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
+ __func__);
+ return ret_val;
+ }
+
+ /* Load Rx bds with buffers */
+ for (i = 0; i < ug_info->numQueuesRx; i++) {
+ if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Can not fill Rx bds with buffers.",
+ __func__);
+ return ret_val;
+ }
+ }
+
+ /* Allocate InitEnet command parameter structure */
+ init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4);
+ if (IS_ERR_VALUE(init_enet_pram_offset)) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
+ __func__);
+ return -ENOMEM;
+ }
+ p_init_enet_pram =
+ (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset);
+
+ /* Copy shadow InitEnet command parameter structure into PRAM */
+ out_8(&p_init_enet_pram->resinit1,
+ ugeth->p_init_enet_param_shadow->resinit1);
+ out_8(&p_init_enet_pram->resinit2,
+ ugeth->p_init_enet_param_shadow->resinit2);
+ out_8(&p_init_enet_pram->resinit3,
+ ugeth->p_init_enet_param_shadow->resinit3);
+ out_8(&p_init_enet_pram->resinit4,
+ ugeth->p_init_enet_param_shadow->resinit4);
+ out_be16(&p_init_enet_pram->resinit5,
+ ugeth->p_init_enet_param_shadow->resinit5);
+ out_8(&p_init_enet_pram->largestexternallookupkeysize,
+ ugeth->p_init_enet_param_shadow->largestexternallookupkeysize);
+ out_be32(&p_init_enet_pram->rgftgfrxglobal,
+ ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
+ for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
+ out_be32(&p_init_enet_pram->rxthread[i],
+ ugeth->p_init_enet_param_shadow->rxthread[i]);
+ out_be32(&p_init_enet_pram->txglobal,
+ ugeth->p_init_enet_param_shadow->txglobal);
+ for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
+ out_be32(&p_init_enet_pram->txthread[i],
+ ugeth->p_init_enet_param_shadow->txthread[i]);
+
+ /* Issue QE command */
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+ qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
+ init_enet_pram_offset);
+
+ /* Free InitEnet command parameter */
+ qe_muram_free(init_enet_pram_offset);
+
+ return 0;
+}
+
+/* This is called by the kernel when a frame is ready for transmission. */
+/* It is pointed to by the dev->hard_start_xmit function pointer */
+static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+#ifdef CONFIG_UGETH_TX_ON_DEMAND
+ struct ucc_fast_private *uccf;
+#endif
+ u8 __iomem *bd; /* BD pointer */
+ u32 bd_status;
+ u8 txQ = 0;
+ unsigned long flags;
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ spin_lock_irqsave(&ugeth->lock, flags);
+
+ dev->stats.tx_bytes += skb->len;
+
+ /* Start from the next BD that should be filled */
+ bd = ugeth->txBd[txQ];
+ bd_status = in_be32((u32 __iomem *)bd);
+ /* Save the skb pointer so we can free it later */
+ ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
+
+ /* Update the current skb pointer (wrapping if this was the last) */
+ ugeth->skb_curtx[txQ] =
+ (ugeth->skb_curtx[txQ] +
+ 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
+
+ /* set up the buffer descriptor */
+ out_be32(&((struct qe_bd __iomem *)bd)->buf,
+ dma_map_single(ugeth->dev, skb->data,
+ skb->len, DMA_TO_DEVICE));
+
+ /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
+
+ bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
+
+ /* set bd status and length */
+ out_be32((u32 __iomem *)bd, bd_status);
+
+ /* Move to next BD in the ring */
+ if (!(bd_status & T_W))
+ bd += sizeof(struct qe_bd);
+ else
+ bd = ugeth->p_tx_bd_ring[txQ];
+
+ /* If the next BD still needs to be cleaned up, then the bds
+ are full. We need to tell the kernel to stop sending us stuff. */
+ if (bd == ugeth->confBd[txQ]) {
+ if (!netif_queue_stopped(dev))
+ netif_stop_queue(dev);
+ }
+
+ ugeth->txBd[txQ] = bd;
+
+ skb_tx_timestamp(skb);
+
+ if (ugeth->p_scheduler) {
+ ugeth->cpucount[txQ]++;
+ /* Indicate to QE that there are more Tx bds ready for
+ transmission */
+ /* This is done by writing a running counter of the bd
+ count to the scheduler PRAM. */
+ out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
+ }
+
+#ifdef CONFIG_UGETH_TX_ON_DEMAND
+ uccf = ugeth->uccf;
+ out_be16(uccf->p_utodr, UCC_FAST_TOD);
+#endif
+ spin_unlock_irqrestore(&ugeth->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
+{
+ struct sk_buff *skb;
+ u8 __iomem *bd;
+ u16 length, howmany = 0;
+ u32 bd_status;
+ u8 *bdBuffer;
+ struct net_device *dev;
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ dev = ugeth->ndev;
+
+ /* collect received buffers */
+ bd = ugeth->rxBd[rxQ];
+
+ bd_status = in_be32((u32 __iomem *)bd);
+
+ /* while there are received buffers and BD is full (~R_E) */
+ while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
+ bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf);
+ length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
+ skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
+
+ /* determine whether buffer is first, last, first and last
+ (single buffer frame) or middle (not first and not last) */
+ if (!skb ||
+ (!(bd_status & (R_F | R_L))) ||
+ (bd_status & R_ERRORS_FATAL)) {
+ if (netif_msg_rx_err(ugeth))
+ ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
+ __func__, __LINE__, (u32) skb);
+ if (skb) {
+ skb->data = skb->head + NET_SKB_PAD;
+ skb->len = 0;
+ skb_reset_tail_pointer(skb);
+ __skb_queue_head(&ugeth->rx_recycle, skb);
+ }
+
+ ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
+ dev->stats.rx_dropped++;
+ } else {
+ dev->stats.rx_packets++;
+ howmany++;
+
+ /* Prep the skb for the packet */
+ skb_put(skb, length);
+
+ /* Tell the skb what kind of packet this is */
+ skb->protocol = eth_type_trans(skb, ugeth->ndev);
+
+ dev->stats.rx_bytes += length;
+ /* Send the packet up the stack */
+ netif_receive_skb(skb);
+ }
+
+ skb = get_new_skb(ugeth, bd);
+ if (!skb) {
+ if (netif_msg_rx_err(ugeth))
+ ugeth_warn("%s: No Rx Data Buffer", __func__);
+ dev->stats.rx_dropped++;
+ break;
+ }
+
+ ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
+
+ /* update to point at the next skb */
+ ugeth->skb_currx[rxQ] =
+ (ugeth->skb_currx[rxQ] +
+ 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
+
+ if (bd_status & R_W)
+ bd = ugeth->p_rx_bd_ring[rxQ];
+ else
+ bd += sizeof(struct qe_bd);
+
+ bd_status = in_be32((u32 __iomem *)bd);
+ }
+
+ ugeth->rxBd[rxQ] = bd;
+ return howmany;
+}
+
+static int ucc_geth_tx(struct net_device *dev, u8 txQ)
+{
+ /* Start from the next BD that should be filled */
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ u8 __iomem *bd; /* BD pointer */
+ u32 bd_status;
+
+ bd = ugeth->confBd[txQ];
+ bd_status = in_be32((u32 __iomem *)bd);
+
+ /* Normal processing. */
+ while ((bd_status & T_R) == 0) {
+ struct sk_buff *skb;
+
+ /* BD contains already transmitted buffer. */
+ /* Handle the transmitted buffer and release */
+ /* the BD to be used with the current frame */
+
+ skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
+ if (!skb)
+ break;
+
+ dev->stats.tx_packets++;
+
+ if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN &&
+ skb_recycle_check(skb,
+ ugeth->ug_info->uf_info.max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT))
+ __skb_queue_head(&ugeth->rx_recycle, skb);
+ else
+ dev_kfree_skb(skb);
+
+ ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
+ ugeth->skb_dirtytx[txQ] =
+ (ugeth->skb_dirtytx[txQ] +
+ 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
+
+ /* We freed a buffer, so now we can restart transmission */
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+
+ /* Advance the confirmation BD pointer */
+ if (!(bd_status & T_W))
+ bd += sizeof(struct qe_bd);
+ else
+ bd = ugeth->p_tx_bd_ring[txQ];
+ bd_status = in_be32((u32 __iomem *)bd);
+ }
+ ugeth->confBd[txQ] = bd;
+ return 0;
+}
+
+static int ucc_geth_poll(struct napi_struct *napi, int budget)
+{
+ struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi);
+ struct ucc_geth_info *ug_info;
+ int howmany, i;
+
+ ug_info = ugeth->ug_info;
+
+ /* Tx event processing */
+ spin_lock(&ugeth->lock);
+ for (i = 0; i < ug_info->numQueuesTx; i++)
+ ucc_geth_tx(ugeth->ndev, i);
+ spin_unlock(&ugeth->lock);
+
+ howmany = 0;
+ for (i = 0; i < ug_info->numQueuesRx; i++)
+ howmany += ucc_geth_rx(ugeth, i, budget - howmany);
+
+ if (howmany < budget) {
+ napi_complete(napi);
+ setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS);
+ }
+
+ return howmany;
+}
+
+static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
+{
+ struct net_device *dev = info;
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ struct ucc_fast_private *uccf;
+ struct ucc_geth_info *ug_info;
+ register u32 ucce;
+ register u32 uccm;
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ uccf = ugeth->uccf;
+ ug_info = ugeth->ug_info;
+
+ /* read and clear events */
+ ucce = (u32) in_be32(uccf->p_ucce);
+ uccm = (u32) in_be32(uccf->p_uccm);
+ ucce &= uccm;
+ out_be32(uccf->p_ucce, ucce);
+
+ /* check for receive events that require processing */
+ if (ucce & (UCCE_RX_EVENTS | UCCE_TX_EVENTS)) {
+ if (napi_schedule_prep(&ugeth->napi)) {
+ uccm &= ~(UCCE_RX_EVENTS | UCCE_TX_EVENTS);
+ out_be32(uccf->p_uccm, uccm);
+ __napi_schedule(&ugeth->napi);
+ }
+ }
+
+ /* Errors and other events */
+ if (ucce & UCCE_OTHER) {
+ if (ucce & UCC_GETH_UCCE_BSY)
+ dev->stats.rx_errors++;
+ if (ucce & UCC_GETH_UCCE_TXE)
+ dev->stats.tx_errors++;
+ }
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void ucc_netpoll(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ int irq = ugeth->ug_info->uf_info.irq;
+
+ disable_irq(irq);
+ ucc_geth_irq_handler(irq, dev);
+ enable_irq(irq);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+static int ucc_geth_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ /*
+ * If device is not running, we will set mac addr register
+ * when opening the device.
+ */
+ if (!netif_running(dev))
+ return 0;
+
+ spin_lock_irq(&ugeth->lock);
+ init_mac_station_addr_regs(dev->dev_addr[0],
+ dev->dev_addr[1],
+ dev->dev_addr[2],
+ dev->dev_addr[3],
+ dev->dev_addr[4],
+ dev->dev_addr[5],
+ &ugeth->ug_regs->macstnaddr1,
+ &ugeth->ug_regs->macstnaddr2);
+ spin_unlock_irq(&ugeth->lock);
+
+ return 0;
+}
+
+static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
+{
+ struct net_device *dev = ugeth->ndev;
+ int err;
+
+ err = ucc_struct_init(ugeth);
+ if (err) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Cannot configure internal struct, "
+ "aborting.", dev->name);
+ goto err;
+ }
+
+ err = ucc_geth_startup(ugeth);
+ if (err) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Cannot configure net device, aborting.",
+ dev->name);
+ goto err;
+ }
+
+ err = adjust_enet_interface(ugeth);
+ if (err) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Cannot configure net device, aborting.",
+ dev->name);
+ goto err;
+ }
+
+ /* Set MACSTNADDR1, MACSTNADDR2 */
+ /* For more details see the hardware spec. */
+ init_mac_station_addr_regs(dev->dev_addr[0],
+ dev->dev_addr[1],
+ dev->dev_addr[2],
+ dev->dev_addr[3],
+ dev->dev_addr[4],
+ dev->dev_addr[5],
+ &ugeth->ug_regs->macstnaddr1,
+ &ugeth->ug_regs->macstnaddr2);
+
+ err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+ if (err) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
+ goto err;
+ }
+
+ return 0;
+err:
+ ucc_geth_stop(ugeth);
+ return err;
+}
+
+/* Called when something needs to use the ethernet device */
+/* Returns 0 for success. */
+static int ucc_geth_open(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ int err;
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ /* Test station address */
+ if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Multicast address used for station "
+ "address - is this what you wanted?",
+ __func__);
+ return -EINVAL;
+ }
+
+ err = init_phy(dev);
+ if (err) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Cannot initialize PHY, aborting.",
+ dev->name);
+ return err;
+ }
+
+ err = ucc_geth_init_mac(ugeth);
+ if (err) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Cannot initialize MAC, aborting.",
+ dev->name);
+ goto err;
+ }
+
+ err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler,
+ 0, "UCC Geth", dev);
+ if (err) {
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Cannot get IRQ for net device, aborting.",
+ dev->name);
+ goto err;
+ }
+
+ phy_start(ugeth->phydev);
+ napi_enable(&ugeth->napi);
+ netif_start_queue(dev);
+
+ device_set_wakeup_capable(&dev->dev,
+ qe_alive_during_sleep() || ugeth->phydev->irq);
+ device_set_wakeup_enable(&dev->dev, ugeth->wol_en);
+
+ return err;
+
+err:
+ ucc_geth_stop(ugeth);
+ return err;
+}
+
+/* Stops the kernel queue, and halts the controller */
+static int ucc_geth_close(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ napi_disable(&ugeth->napi);
+
+ cancel_work_sync(&ugeth->timeout_work);
+ ucc_geth_stop(ugeth);
+ phy_disconnect(ugeth->phydev);
+ ugeth->phydev = NULL;
+
+ free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
+
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+/* Reopen device. This will reset the MAC and PHY. */
+static void ucc_geth_timeout_work(struct work_struct *work)
+{
+ struct ucc_geth_private *ugeth;
+ struct net_device *dev;
+
+ ugeth = container_of(work, struct ucc_geth_private, timeout_work);
+ dev = ugeth->ndev;
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ dev->stats.tx_errors++;
+
+ ugeth_dump_regs(ugeth);
+
+ if (dev->flags & IFF_UP) {
+ /*
+ * Must reset MAC *and* PHY. This is done by reopening
+ * the device.
+ */
+ netif_tx_stop_all_queues(dev);
+ ucc_geth_stop(ugeth);
+ ucc_geth_init_mac(ugeth);
+ /* Must start PHY here */
+ phy_start(ugeth->phydev);
+ netif_tx_start_all_queues(dev);
+ }
+
+ netif_tx_schedule_all(dev);
+}
+
+/*
+ * ucc_geth_timeout gets called when a packet has not been
+ * transmitted after a set amount of time.
+ */
+static void ucc_geth_timeout(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+
+ schedule_work(&ugeth->timeout_work);
+}
+
+
+#ifdef CONFIG_PM
+
+static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
+{
+ struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+
+ if (!netif_running(ndev))
+ return 0;
+
+ netif_device_detach(ndev);
+ napi_disable(&ugeth->napi);
+
+ /*
+ * Disable the controller, otherwise we'll wakeup on any network
+ * activity.
+ */
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+ if (ugeth->wol_en & WAKE_MAGIC) {
+ setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
+ setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
+ ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX);
+ } else if (!(ugeth->wol_en & WAKE_PHY)) {
+ phy_stop(ugeth->phydev);
+ }
+
+ return 0;
+}
+
+static int ucc_geth_resume(struct platform_device *ofdev)
+{
+ struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+ int err;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ if (qe_alive_during_sleep()) {
+ if (ugeth->wol_en & WAKE_MAGIC) {
+ ucc_fast_disable(ugeth->uccf, COMM_DIR_RX_AND_TX);
+ clrbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
+ clrbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
+ }
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+ } else {
+ /*
+ * Full reinitialization is required if QE shuts down
+ * during sleep.
+ */
+ ucc_geth_memclean(ugeth);
+
+ err = ucc_geth_init_mac(ugeth);
+ if (err) {
+ ugeth_err("%s: Cannot initialize MAC, aborting.",
+ ndev->name);
+ return err;
+ }
+ }
+
+ ugeth->oldlink = 0;
+ ugeth->oldspeed = 0;
+ ugeth->oldduplex = -1;
+
+ phy_stop(ugeth->phydev);
+ phy_start(ugeth->phydev);
+
+ napi_enable(&ugeth->napi);
+ netif_device_attach(ndev);
+
+ return 0;
+}
+
+#else
+#define ucc_geth_suspend NULL
+#define ucc_geth_resume NULL
+#endif
+
+static phy_interface_t to_phy_interface(const char *phy_connection_type)
+{
+ if (strcasecmp(phy_connection_type, "mii") == 0)
+ return PHY_INTERFACE_MODE_MII;
+ if (strcasecmp(phy_connection_type, "gmii") == 0)
+ return PHY_INTERFACE_MODE_GMII;
+ if (strcasecmp(phy_connection_type, "tbi") == 0)
+ return PHY_INTERFACE_MODE_TBI;
+ if (strcasecmp(phy_connection_type, "rmii") == 0)
+ return PHY_INTERFACE_MODE_RMII;
+ if (strcasecmp(phy_connection_type, "rgmii") == 0)
+ return PHY_INTERFACE_MODE_RGMII;
+ if (strcasecmp(phy_connection_type, "rgmii-id") == 0)
+ return PHY_INTERFACE_MODE_RGMII_ID;
+ if (strcasecmp(phy_connection_type, "rgmii-txid") == 0)
+ return PHY_INTERFACE_MODE_RGMII_TXID;
+ if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0)
+ return PHY_INTERFACE_MODE_RGMII_RXID;
+ if (strcasecmp(phy_connection_type, "rtbi") == 0)
+ return PHY_INTERFACE_MODE_RTBI;
+ if (strcasecmp(phy_connection_type, "sgmii") == 0)
+ return PHY_INTERFACE_MODE_SGMII;
+
+ return PHY_INTERFACE_MODE_MII;
+}
+
+static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!ugeth->phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(ugeth->phydev, rq, cmd);
+}
+
+static const struct net_device_ops ucc_geth_netdev_ops = {
+ .ndo_open = ucc_geth_open,
+ .ndo_stop = ucc_geth_close,
+ .ndo_start_xmit = ucc_geth_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = ucc_geth_set_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_rx_mode = ucc_geth_set_multi,
+ .ndo_tx_timeout = ucc_geth_timeout,
+ .ndo_do_ioctl = ucc_geth_ioctl,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ucc_netpoll,
+#endif
+};
+
+static int ucc_geth_probe(struct platform_device* ofdev)
+{
+ struct device *device = &ofdev->dev;
+ struct device_node *np = ofdev->dev.of_node;
+ struct net_device *dev = NULL;
+ struct ucc_geth_private *ugeth = NULL;
+ struct ucc_geth_info *ug_info;
+ struct resource res;
+ int err, ucc_num, max_speed = 0;
+ const unsigned int *prop;
+ const char *sprop;
+ const void *mac_addr;
+ phy_interface_t phy_interface;
+ static const int enet_to_speed[] = {
+ SPEED_10, SPEED_10, SPEED_10,
+ SPEED_100, SPEED_100, SPEED_100,
+ SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000,
+ };
+ static const phy_interface_t enet_to_phy_interface[] = {
+ PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII,
+ PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII,
+ PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
+ PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
+ PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
+ PHY_INTERFACE_MODE_SGMII,
+ };
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ prop = of_get_property(np, "cell-index", NULL);
+ if (!prop) {
+ prop = of_get_property(np, "device-id", NULL);
+ if (!prop)
+ return -ENODEV;
+ }
+
+ ucc_num = *prop - 1;
+ if ((ucc_num < 0) || (ucc_num > 7))
+ return -ENODEV;
+
+ ug_info = &ugeth_info[ucc_num];
+ if (ug_info == NULL) {
+ if (netif_msg_probe(&debug))
+ ugeth_err("%s: [%d] Missing additional data!",
+ __func__, ucc_num);
+ return -ENODEV;
+ }
+
+ ug_info->uf_info.ucc_num = ucc_num;
+
+ sprop = of_get_property(np, "rx-clock-name", NULL);
+ if (sprop) {
+ ug_info->uf_info.rx_clock = qe_clock_source(sprop);
+ if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) ||
+ (ug_info->uf_info.rx_clock > QE_CLK24)) {
+ printk(KERN_ERR
+ "ucc_geth: invalid rx-clock-name property\n");
+ return -EINVAL;
+ }
+ } else {
+ prop = of_get_property(np, "rx-clock", NULL);
+ if (!prop) {
+ /* If both rx-clock-name and rx-clock are missing,
+ we want to tell people to use rx-clock-name. */
+ printk(KERN_ERR
+ "ucc_geth: missing rx-clock-name property\n");
+ return -EINVAL;
+ }
+ if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
+ printk(KERN_ERR
+ "ucc_geth: invalid rx-clock propperty\n");
+ return -EINVAL;
+ }
+ ug_info->uf_info.rx_clock = *prop;
+ }
+
+ sprop = of_get_property(np, "tx-clock-name", NULL);
+ if (sprop) {
+ ug_info->uf_info.tx_clock = qe_clock_source(sprop);
+ if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) ||
+ (ug_info->uf_info.tx_clock > QE_CLK24)) {
+ printk(KERN_ERR
+ "ucc_geth: invalid tx-clock-name property\n");
+ return -EINVAL;
+ }
+ } else {
+ prop = of_get_property(np, "tx-clock", NULL);
+ if (!prop) {
+ printk(KERN_ERR
+ "ucc_geth: missing tx-clock-name property\n");
+ return -EINVAL;
+ }
+ if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
+ printk(KERN_ERR
+ "ucc_geth: invalid tx-clock property\n");
+ return -EINVAL;
+ }
+ ug_info->uf_info.tx_clock = *prop;
+ }
+
+ err = of_address_to_resource(np, 0, &res);
+ if (err)
+ return -EINVAL;
+
+ ug_info->uf_info.regs = res.start;
+ ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
+
+ ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
+
+ /* Find the TBI PHY node. If it's not there, we don't support SGMII */
+ ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
+
+ /* get the phy interface type, or default to MII */
+ prop = of_get_property(np, "phy-connection-type", NULL);
+ if (!prop) {
+ /* handle interface property present in old trees */
+ prop = of_get_property(ug_info->phy_node, "interface", NULL);
+ if (prop != NULL) {
+ phy_interface = enet_to_phy_interface[*prop];
+ max_speed = enet_to_speed[*prop];
+ } else
+ phy_interface = PHY_INTERFACE_MODE_MII;
+ } else {
+ phy_interface = to_phy_interface((const char *)prop);
+ }
+
+ /* get speed, or derive from PHY interface */
+ if (max_speed == 0)
+ switch (phy_interface) {
+ case PHY_INTERFACE_MODE_GMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_TBI:
+ case PHY_INTERFACE_MODE_RTBI:
+ case PHY_INTERFACE_MODE_SGMII:
+ max_speed = SPEED_1000;
+ break;
+ default:
+ max_speed = SPEED_100;
+ break;
+ }
+
+ if (max_speed == SPEED_1000) {
+ /* configure muram FIFOs for gigabit operation */
+ ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
+ ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
+ ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT;
+ ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT;
+ ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT;
+ ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT;
+ ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4;
+
+ /* If QE's snum number is 46 which means we need to support
+ * 4 UECs at 1000Base-T simultaneously, we need to allocate
+ * more Threads to Rx.
+ */
+ if (qe_get_num_of_snums() == 46)
+ ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6;
+ else
+ ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4;
+ }
+
+ if (netif_msg_probe(&debug))
+ printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d)\n",
+ ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
+ ug_info->uf_info.irq);
+
+ /* Create an ethernet device instance */
+ dev = alloc_etherdev(sizeof(*ugeth));
+
+ if (dev == NULL)
+ return -ENOMEM;
+
+ ugeth = netdev_priv(dev);
+ spin_lock_init(&ugeth->lock);
+
+ /* Create CQs for hash tables */
+ INIT_LIST_HEAD(&ugeth->group_hash_q);
+ INIT_LIST_HEAD(&ugeth->ind_hash_q);
+
+ dev_set_drvdata(device, dev);
+
+ /* Set the dev->base_addr to the gfar reg region */
+ dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
+
+ SET_NETDEV_DEV(dev, device);
+
+ /* Fill in the dev structure */
+ uec_set_ethtool_ops(dev);
+ dev->netdev_ops = &ucc_geth_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
+ netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
+ dev->mtu = 1500;
+
+ ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
+ ugeth->phy_interface = phy_interface;
+ ugeth->max_speed = max_speed;
+
+ err = register_netdev(dev);
+ if (err) {
+ if (netif_msg_probe(ugeth))
+ ugeth_err("%s: Cannot register net device, aborting.",
+ dev->name);
+ free_netdev(dev);
+ return err;
+ }
+
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr)
+ memcpy(dev->dev_addr, mac_addr, 6);
+
+ ugeth->ug_info = ug_info;
+ ugeth->dev = device;
+ ugeth->ndev = dev;
+ ugeth->node = np;
+
+ return 0;
+}
+
+static int ucc_geth_remove(struct platform_device* ofdev)
+{
+ struct device *device = &ofdev->dev;
+ struct net_device *dev = dev_get_drvdata(device);
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ free_netdev(dev);
+ ucc_geth_memclean(ugeth);
+ dev_set_drvdata(device, NULL);
+
+ return 0;
+}
+
+static struct of_device_id ucc_geth_match[] = {
+ {
+ .type = "network",
+ .compatible = "ucc_geth",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, ucc_geth_match);
+
+static struct platform_driver ucc_geth_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = ucc_geth_match,
+ },
+ .probe = ucc_geth_probe,
+ .remove = ucc_geth_remove,
+ .suspend = ucc_geth_suspend,
+ .resume = ucc_geth_resume,
+};
+
+static int __init ucc_geth_init(void)
+{
+ int i, ret;
+
+ if (netif_msg_drv(&debug))
+ printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
+ for (i = 0; i < 8; i++)
+ memcpy(&(ugeth_info[i]), &ugeth_primary_info,
+ sizeof(ugeth_primary_info));
+
+ ret = platform_driver_register(&ucc_geth_driver);
+
+ return ret;
+}
+
+static void __exit ucc_geth_exit(void)
+{
+ platform_driver_unregister(&ucc_geth_driver);
+}
+
+module_init(ucc_geth_init);
+module_exit(ucc_geth_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
new file mode 100644
index 000000000000..d12fcad145e9
--- /dev/null
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -0,0 +1,1240 @@
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2006-2009. All rights reserved.
+ *
+ * Author: Shlomi Gridish <gridish@freescale.com>
+ *
+ * Description:
+ * Internal header file for UCC Gigabit Ethernet unit routines.
+ *
+ * Changelog:
+ * Jun 28, 2006 Li Yang <LeoLi@freescale.com>
+ * - Rearrange code and style fixes
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef __UCC_GETH_H__
+#define __UCC_GETH_H__
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include <asm/immap_qe.h>
+#include <asm/qe.h>
+
+#include <asm/ucc.h>
+#include <asm/ucc_fast.h>
+
+#define DRV_DESC "QE UCC Gigabit Ethernet Controller"
+#define DRV_NAME "ucc_geth"
+#define DRV_VERSION "1.1"
+
+#define NUM_TX_QUEUES 8
+#define NUM_RX_QUEUES 8
+#define NUM_BDS_IN_PREFETCHED_BDS 4
+#define TX_IP_OFFSET_ENTRY_MAX 8
+#define NUM_OF_PADDRS 4
+#define ENET_INIT_PARAM_MAX_ENTRIES_RX 9
+#define ENET_INIT_PARAM_MAX_ENTRIES_TX 8
+
+struct ucc_geth {
+ struct ucc_fast uccf;
+ u8 res0[0x100 - sizeof(struct ucc_fast)];
+
+ u32 maccfg1; /* mac configuration reg. 1 */
+ u32 maccfg2; /* mac configuration reg. 2 */
+ u32 ipgifg; /* interframe gap reg. */
+ u32 hafdup; /* half-duplex reg. */
+ u8 res1[0x10];
+ u8 miimng[0x18]; /* MII management structure moved to _mii.h */
+ u32 ifctl; /* interface control reg */
+ u32 ifstat; /* interface statux reg */
+ u32 macstnaddr1; /* mac station address part 1 reg */
+ u32 macstnaddr2; /* mac station address part 2 reg */
+ u8 res2[0x8];
+ u32 uempr; /* UCC Ethernet Mac parameter reg */
+ u32 utbipar; /* UCC tbi address reg */
+ u16 uescr; /* UCC Ethernet statistics control reg */
+ u8 res3[0x180 - 0x15A];
+ u32 tx64; /* Total number of frames (including bad
+ frames) transmitted that were exactly of the
+ minimal length (64 for un tagged, 68 for
+ tagged, or with length exactly equal to the
+ parameter MINLength */
+ u32 tx127; /* Total number of frames (including bad
+ frames) transmitted that were between
+ MINLength (Including FCS length==4) and 127
+ octets */
+ u32 tx255; /* Total number of frames (including bad
+ frames) transmitted that were between 128
+ (Including FCS length==4) and 255 octets */
+ u32 rx64; /* Total number of frames received including
+ bad frames that were exactly of the mninimal
+ length (64 bytes) */
+ u32 rx127; /* Total number of frames (including bad
+ frames) received that were between MINLength
+ (Including FCS length==4) and 127 octets */
+ u32 rx255; /* Total number of frames (including bad
+ frames) received that were between 128
+ (Including FCS length==4) and 255 octets */
+ u32 txok; /* Total number of octets residing in frames
+ that where involved in successful
+ transmission */
+ u16 txcf; /* Total number of PAUSE control frames
+ transmitted by this MAC */
+ u8 res4[0x2];
+ u32 tmca; /* Total number of frames that were transmitted
+ successfully with the group address bit set
+ that are not broadcast frames */
+ u32 tbca; /* Total number of frames transmitted
+ successfully that had destination address
+ field equal to the broadcast address */
+ u32 rxfok; /* Total number of frames received OK */
+ u32 rxbok; /* Total number of octets received OK */
+ u32 rbyt; /* Total number of octets received including
+ octets in bad frames. Must be implemented in
+ HW because it includes octets in frames that
+ never even reach the UCC */
+ u32 rmca; /* Total number of frames that were received
+ successfully with the group address bit set
+ that are not broadcast frames */
+ u32 rbca; /* Total number of frames received successfully
+ that had destination address equal to the
+ broadcast address */
+ u32 scar; /* Statistics carry register */
+ u32 scam; /* Statistics caryy mask register */
+ u8 res5[0x200 - 0x1c4];
+} __packed;
+
+/* UCC GETH TEMODR Register */
+#define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics
+ */
+#define TEMODER_SCHEDULER_ENABLE 0x2000 /* enable scheduler */
+#define TEMODER_IP_CHECKSUM_GENERATE 0x0400 /* generate IPv4
+ checksums */
+#define TEMODER_PERFORMANCE_OPTIMIZATION_MODE1 0x0200 /* enable performance
+ optimization
+ enhancement (mode1) */
+#define TEMODER_RMON_STATISTICS 0x0100 /* enable tx statistics
+ */
+#define TEMODER_NUM_OF_QUEUES_SHIFT (15-15) /* Number of queues <<
+ shift */
+
+/* UCC GETH TEMODR Register */
+#define REMODER_RX_RMON_STATISTICS_ENABLE 0x00001000 /* enable Rx
+ statistics */
+#define REMODER_RX_EXTENDED_FEATURES 0x80000000 /* enable
+ extended
+ features */
+#define REMODER_VLAN_OPERATION_TAGGED_SHIFT (31-9 ) /* vlan operation
+ tagged << shift */
+#define REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT (31-10) /* vlan operation non
+ tagged << shift */
+#define REMODER_RX_QOS_MODE_SHIFT (31-15) /* rx QoS mode << shift
+ */
+#define REMODER_RMON_STATISTICS 0x00001000 /* enable rx
+ statistics */
+#define REMODER_RX_EXTENDED_FILTERING 0x00000800 /* extended
+ filtering
+ vs.
+ mpc82xx-like
+ filtering */
+#define REMODER_NUM_OF_QUEUES_SHIFT (31-23) /* Number of queues <<
+ shift */
+#define REMODER_DYNAMIC_MAX_FRAME_LENGTH 0x00000008 /* enable
+ dynamic max
+ frame length
+ */
+#define REMODER_DYNAMIC_MIN_FRAME_LENGTH 0x00000004 /* enable
+ dynamic min
+ frame length
+ */
+#define REMODER_IP_CHECKSUM_CHECK 0x00000002 /* check IPv4
+ checksums */
+#define REMODER_IP_ADDRESS_ALIGNMENT 0x00000001 /* align ip
+ address to
+ 4-byte
+ boundary */
+
+/* UCC GETH Event Register */
+#define UCCE_TXB (UCC_GETH_UCCE_TXB7 | UCC_GETH_UCCE_TXB6 | \
+ UCC_GETH_UCCE_TXB5 | UCC_GETH_UCCE_TXB4 | \
+ UCC_GETH_UCCE_TXB3 | UCC_GETH_UCCE_TXB2 | \
+ UCC_GETH_UCCE_TXB1 | UCC_GETH_UCCE_TXB0)
+
+#define UCCE_RXB (UCC_GETH_UCCE_RXB7 | UCC_GETH_UCCE_RXB6 | \
+ UCC_GETH_UCCE_RXB5 | UCC_GETH_UCCE_RXB4 | \
+ UCC_GETH_UCCE_RXB3 | UCC_GETH_UCCE_RXB2 | \
+ UCC_GETH_UCCE_RXB1 | UCC_GETH_UCCE_RXB0)
+
+#define UCCE_RXF (UCC_GETH_UCCE_RXF7 | UCC_GETH_UCCE_RXF6 | \
+ UCC_GETH_UCCE_RXF5 | UCC_GETH_UCCE_RXF4 | \
+ UCC_GETH_UCCE_RXF3 | UCC_GETH_UCCE_RXF2 | \
+ UCC_GETH_UCCE_RXF1 | UCC_GETH_UCCE_RXF0)
+
+#define UCCE_OTHER (UCC_GETH_UCCE_SCAR | UCC_GETH_UCCE_GRA | \
+ UCC_GETH_UCCE_CBPR | UCC_GETH_UCCE_BSY | \
+ UCC_GETH_UCCE_RXC | UCC_GETH_UCCE_TXC | UCC_GETH_UCCE_TXE)
+
+#define UCCE_RX_EVENTS (UCCE_RXF | UCC_GETH_UCCE_BSY)
+#define UCCE_TX_EVENTS (UCCE_TXB | UCC_GETH_UCCE_TXE)
+
+/* TBI defines */
+#define ENET_TBI_MII_CR 0x00 /* Control */
+#define ENET_TBI_MII_SR 0x01 /* Status */
+#define ENET_TBI_MII_ANA 0x04 /* AN advertisement */
+#define ENET_TBI_MII_ANLPBPA 0x05 /* AN link partner base page ability */
+#define ENET_TBI_MII_ANEX 0x06 /* AN expansion */
+#define ENET_TBI_MII_ANNPT 0x07 /* AN next page transmit */
+#define ENET_TBI_MII_ANLPANP 0x08 /* AN link partner ability next page */
+#define ENET_TBI_MII_EXST 0x0F /* Extended status */
+#define ENET_TBI_MII_JD 0x10 /* Jitter diagnostics */
+#define ENET_TBI_MII_TBICON 0x11 /* TBI control */
+
+/* TBI MDIO register bit fields*/
+#define TBISR_LSTATUS 0x0004
+#define TBICON_CLK_SELECT 0x0020
+#define TBIANA_ASYMMETRIC_PAUSE 0x0100
+#define TBIANA_SYMMETRIC_PAUSE 0x0080
+#define TBIANA_HALF_DUPLEX 0x0040
+#define TBIANA_FULL_DUPLEX 0x0020
+#define TBICR_PHY_RESET 0x8000
+#define TBICR_ANEG_ENABLE 0x1000
+#define TBICR_RESTART_ANEG 0x0200
+#define TBICR_FULL_DUPLEX 0x0100
+#define TBICR_SPEED1_SET 0x0040
+
+#define TBIANA_SETTINGS ( \
+ TBIANA_ASYMMETRIC_PAUSE \
+ | TBIANA_SYMMETRIC_PAUSE \
+ | TBIANA_FULL_DUPLEX \
+ )
+#define TBICR_SETTINGS ( \
+ TBICR_PHY_RESET \
+ | TBICR_ANEG_ENABLE \
+ | TBICR_FULL_DUPLEX \
+ | TBICR_SPEED1_SET \
+ )
+
+/* UCC GETH MACCFG1 (MAC Configuration 1 Register) */
+#define MACCFG1_FLOW_RX 0x00000020 /* Flow Control
+ Rx */
+#define MACCFG1_FLOW_TX 0x00000010 /* Flow Control
+ Tx */
+#define MACCFG1_ENABLE_SYNCHED_RX 0x00000008 /* Rx Enable
+ synchronized
+ to Rx stream
+ */
+#define MACCFG1_ENABLE_RX 0x00000004 /* Enable Rx */
+#define MACCFG1_ENABLE_SYNCHED_TX 0x00000002 /* Tx Enable
+ synchronized
+ to Tx stream
+ */
+#define MACCFG1_ENABLE_TX 0x00000001 /* Enable Tx */
+
+/* UCC GETH MACCFG2 (MAC Configuration 2 Register) */
+#define MACCFG2_PREL_SHIFT (31 - 19) /* Preamble
+ Length <<
+ shift */
+#define MACCFG2_PREL_MASK 0x0000f000 /* Preamble
+ Length mask */
+#define MACCFG2_SRP 0x00000080 /* Soft Receive
+ Preamble */
+#define MACCFG2_STP 0x00000040 /* Soft
+ Transmit
+ Preamble */
+#define MACCFG2_RESERVED_1 0x00000020 /* Reserved -
+ must be set
+ to 1 */
+#define MACCFG2_LC 0x00000010 /* Length Check
+ */
+#define MACCFG2_MPE 0x00000008 /* Magic packet
+ detect */
+#define MACCFG2_FDX 0x00000001 /* Full Duplex */
+#define MACCFG2_FDX_MASK 0x00000001 /* Full Duplex
+ mask */
+#define MACCFG2_PAD_CRC 0x00000004
+#define MACCFG2_CRC_EN 0x00000002
+#define MACCFG2_PAD_AND_CRC_MODE_NONE 0x00000000 /* Neither
+ Padding
+ short frames
+ nor CRC */
+#define MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY 0x00000002 /* Append CRC
+ only */
+#define MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC 0x00000004
+#define MACCFG2_INTERFACE_MODE_NIBBLE 0x00000100 /* nibble mode
+ (MII/RMII/RGMII
+ 10/100bps) */
+#define MACCFG2_INTERFACE_MODE_BYTE 0x00000200 /* byte mode
+ (GMII/TBI/RTB/RGMII
+ 1000bps ) */
+#define MACCFG2_INTERFACE_MODE_MASK 0x00000300 /* mask
+ covering all
+ relevant
+ bits */
+
+/* UCC GETH IPGIFG (Inter-frame Gap / Inter-Frame Gap Register) */
+#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT (31 - 7) /* Non
+ back-to-back
+ inter frame
+ gap part 1.
+ << shift */
+#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT (31 - 15) /* Non
+ back-to-back
+ inter frame
+ gap part 2.
+ << shift */
+#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT (31 - 23) /* Mimimum IFG
+ Enforcement
+ << shift */
+#define IPGIFG_BACK_TO_BACK_IFG_SHIFT (31 - 31) /* back-to-back
+ inter frame
+ gap << shift
+ */
+#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX 127 /* Non back-to-back
+ inter frame gap part
+ 1. max val */
+#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX 127 /* Non back-to-back
+ inter frame gap part
+ 2. max val */
+#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX 255 /* Mimimum IFG
+ Enforcement max val */
+#define IPGIFG_BACK_TO_BACK_IFG_MAX 127 /* back-to-back inter
+ frame gap max val */
+#define IPGIFG_NBTB_CS_IPG_MASK 0x7F000000
+#define IPGIFG_NBTB_IPG_MASK 0x007F0000
+#define IPGIFG_MIN_IFG_MASK 0x0000FF00
+#define IPGIFG_BTB_IPG_MASK 0x0000007F
+
+/* UCC GETH HAFDUP (Half Duplex Register) */
+#define HALFDUP_ALT_BEB_TRUNCATION_SHIFT (31 - 11) /* Alternate
+ Binary
+ Exponential
+ Backoff
+ Truncation
+ << shift */
+#define HALFDUP_ALT_BEB_TRUNCATION_MAX 0xf /* Alternate Binary
+ Exponential Backoff
+ Truncation max val */
+#define HALFDUP_ALT_BEB 0x00080000 /* Alternate
+ Binary
+ Exponential
+ Backoff */
+#define HALFDUP_BACK_PRESSURE_NO_BACKOFF 0x00040000 /* Back
+ pressure no
+ backoff */
+#define HALFDUP_NO_BACKOFF 0x00020000 /* No Backoff */
+#define HALFDUP_EXCESSIVE_DEFER 0x00010000 /* Excessive
+ Defer */
+#define HALFDUP_MAX_RETRANSMISSION_SHIFT (31 - 19) /* Maximum
+ Retransmission
+ << shift */
+#define HALFDUP_MAX_RETRANSMISSION_MAX 0xf /* Maximum
+ Retransmission max
+ val */
+#define HALFDUP_COLLISION_WINDOW_SHIFT (31 - 31) /* Collision
+ Window <<
+ shift */
+#define HALFDUP_COLLISION_WINDOW_MAX 0x3f /* Collision Window max
+ val */
+#define HALFDUP_ALT_BEB_TR_MASK 0x00F00000
+#define HALFDUP_RETRANS_MASK 0x0000F000
+#define HALFDUP_COL_WINDOW_MASK 0x0000003F
+
+/* UCC GETH UCCS (Ethernet Status Register) */
+#define UCCS_BPR 0x02 /* Back pressure (in
+ half duplex mode) */
+#define UCCS_PAU 0x02 /* Pause state (in full
+ duplex mode) */
+#define UCCS_MPD 0x01 /* Magic Packet
+ Detected */
+
+/* UCC GETH IFSTAT (Interface Status Register) */
+#define IFSTAT_EXCESS_DEFER 0x00000200 /* Excessive
+ transmission
+ defer */
+
+/* UCC GETH MACSTNADDR1 (Station Address Part 1 Register) */
+#define MACSTNADDR1_OCTET_6_SHIFT (31 - 7) /* Station
+ address 6th
+ octet <<
+ shift */
+#define MACSTNADDR1_OCTET_5_SHIFT (31 - 15) /* Station
+ address 5th
+ octet <<
+ shift */
+#define MACSTNADDR1_OCTET_4_SHIFT (31 - 23) /* Station
+ address 4th
+ octet <<
+ shift */
+#define MACSTNADDR1_OCTET_3_SHIFT (31 - 31) /* Station
+ address 3rd
+ octet <<
+ shift */
+
+/* UCC GETH MACSTNADDR2 (Station Address Part 2 Register) */
+#define MACSTNADDR2_OCTET_2_SHIFT (31 - 7) /* Station
+ address 2nd
+ octet <<
+ shift */
+#define MACSTNADDR2_OCTET_1_SHIFT (31 - 15) /* Station
+ address 1st
+ octet <<
+ shift */
+
+/* UCC GETH UEMPR (Ethernet Mac Parameter Register) */
+#define UEMPR_PAUSE_TIME_VALUE_SHIFT (31 - 15) /* Pause time
+ value <<
+ shift */
+#define UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT (31 - 31) /* Extended
+ pause time
+ value <<
+ shift */
+
+/* UCC GETH UTBIPAR (Ten Bit Interface Physical Address Register) */
+#define UTBIPAR_PHY_ADDRESS_SHIFT (31 - 31) /* Phy address
+ << shift */
+#define UTBIPAR_PHY_ADDRESS_MASK 0x0000001f /* Phy address
+ mask */
+
+/* UCC GETH UESCR (Ethernet Statistics Control Register) */
+#define UESCR_AUTOZ 0x8000 /* Automatically zero
+ addressed
+ statistical counter
+ values */
+#define UESCR_CLRCNT 0x4000 /* Clear all statistics
+ counters */
+#define UESCR_MAXCOV_SHIFT (15 - 7) /* Max
+ Coalescing
+ Value <<
+ shift */
+#define UESCR_SCOV_SHIFT (15 - 15) /* Status
+ Coalescing
+ Value <<
+ shift */
+
+/* UCC GETH UDSR (Data Synchronization Register) */
+#define UDSR_MAGIC 0x067E
+
+struct ucc_geth_thread_data_tx {
+ u8 res0[104];
+} __packed;
+
+struct ucc_geth_thread_data_rx {
+ u8 res0[40];
+} __packed;
+
+/* Send Queue Queue-Descriptor */
+struct ucc_geth_send_queue_qd {
+ u32 bd_ring_base; /* pointer to BD ring base address */
+ u8 res0[0x8];
+ u32 last_bd_completed_address;/* initialize to last entry in BD ring */
+ u8 res1[0x30];
+} __packed;
+
+struct ucc_geth_send_queue_mem_region {
+ struct ucc_geth_send_queue_qd sqqd[NUM_TX_QUEUES];
+} __packed;
+
+struct ucc_geth_thread_tx_pram {
+ u8 res0[64];
+} __packed;
+
+struct ucc_geth_thread_rx_pram {
+ u8 res0[128];
+} __packed;
+
+#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64
+#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64
+#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16 96
+
+struct ucc_geth_scheduler {
+ u16 cpucount0; /* CPU packet counter */
+ u16 cpucount1; /* CPU packet counter */
+ u16 cecount0; /* QE packet counter */
+ u16 cecount1; /* QE packet counter */
+ u16 cpucount2; /* CPU packet counter */
+ u16 cpucount3; /* CPU packet counter */
+ u16 cecount2; /* QE packet counter */
+ u16 cecount3; /* QE packet counter */
+ u16 cpucount4; /* CPU packet counter */
+ u16 cpucount5; /* CPU packet counter */
+ u16 cecount4; /* QE packet counter */
+ u16 cecount5; /* QE packet counter */
+ u16 cpucount6; /* CPU packet counter */
+ u16 cpucount7; /* CPU packet counter */
+ u16 cecount6; /* QE packet counter */
+ u16 cecount7; /* QE packet counter */
+ u32 weightstatus[NUM_TX_QUEUES]; /* accumulated weight factor */
+ u32 rtsrshadow; /* temporary variable handled by QE */
+ u32 time; /* temporary variable handled by QE */
+ u32 ttl; /* temporary variable handled by QE */
+ u32 mblinterval; /* max burst length interval */
+ u16 nortsrbytetime; /* normalized value of byte time in tsr units */
+ u8 fracsiz; /* radix 2 log value of denom. of
+ NorTSRByteTime */
+ u8 res0[1];
+ u8 strictpriorityq; /* Strict Priority Mask register */
+ u8 txasap; /* Transmit ASAP register */
+ u8 extrabw; /* Extra BandWidth register */
+ u8 oldwfqmask; /* temporary variable handled by QE */
+ u8 weightfactor[NUM_TX_QUEUES];
+ /**< weight factor for queues */
+ u32 minw; /* temporary variable handled by QE */
+ u8 res1[0x70 - 0x64];
+} __packed;
+
+struct ucc_geth_tx_firmware_statistics_pram {
+ u32 sicoltx; /* single collision */
+ u32 mulcoltx; /* multiple collision */
+ u32 latecoltxfr; /* late collision */
+ u32 frabortduecol; /* frames aborted due to transmit collision */
+ u32 frlostinmactxer; /* frames lost due to internal MAC error
+ transmission that are not counted on any
+ other counter */
+ u32 carriersenseertx; /* carrier sense error */
+ u32 frtxok; /* frames transmitted OK */
+ u32 txfrexcessivedefer; /* frames with defferal time greater than
+ specified threshold */
+ u32 txpkts256; /* total packets (including bad) between 256
+ and 511 octets */
+ u32 txpkts512; /* total packets (including bad) between 512
+ and 1023 octets */
+ u32 txpkts1024; /* total packets (including bad) between 1024
+ and 1518 octets */
+ u32 txpktsjumbo; /* total packets (including bad) between 1024
+ and MAXLength octets */
+} __packed;
+
+struct ucc_geth_rx_firmware_statistics_pram {
+ u32 frrxfcser; /* frames with crc error */
+ u32 fraligner; /* frames with alignment error */
+ u32 inrangelenrxer; /* in range length error */
+ u32 outrangelenrxer; /* out of range length error */
+ u32 frtoolong; /* frame too long */
+ u32 runt; /* runt */
+ u32 verylongevent; /* very long event */
+ u32 symbolerror; /* symbol error */
+ u32 dropbsy; /* drop because of BD not ready */
+ u8 res0[0x8];
+ u32 mismatchdrop; /* drop because of MAC filtering (e.g. address
+ or type mismatch) */
+ u32 underpkts; /* total frames less than 64 octets */
+ u32 pkts256; /* total frames (including bad) between 256 and
+ 511 octets */
+ u32 pkts512; /* total frames (including bad) between 512 and
+ 1023 octets */
+ u32 pkts1024; /* total frames (including bad) between 1024
+ and 1518 octets */
+ u32 pktsjumbo; /* total frames (including bad) between 1024
+ and MAXLength octets */
+ u32 frlossinmacer; /* frames lost because of internal MAC error
+ that is not counted in any other counter */
+ u32 pausefr; /* pause frames */
+ u8 res1[0x4];
+ u32 removevlan; /* total frames that had their VLAN tag removed
+ */
+ u32 replacevlan; /* total frames that had their VLAN tag
+ replaced */
+ u32 insertvlan; /* total frames that had their VLAN tag
+ inserted */
+} __packed;
+
+struct ucc_geth_rx_interrupt_coalescing_entry {
+ u32 interruptcoalescingmaxvalue; /* interrupt coalescing max
+ value */
+ u32 interruptcoalescingcounter; /* interrupt coalescing counter,
+ initialize to
+ interruptcoalescingmaxvalue */
+} __packed;
+
+struct ucc_geth_rx_interrupt_coalescing_table {
+ struct ucc_geth_rx_interrupt_coalescing_entry coalescingentry[NUM_RX_QUEUES];
+ /**< interrupt coalescing entry */
+} __packed;
+
+struct ucc_geth_rx_prefetched_bds {
+ struct qe_bd bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */
+} __packed;
+
+struct ucc_geth_rx_bd_queues_entry {
+ u32 bdbaseptr; /* BD base pointer */
+ u32 bdptr; /* BD pointer */
+ u32 externalbdbaseptr; /* external BD base pointer */
+ u32 externalbdptr; /* external BD pointer */
+} __packed;
+
+struct ucc_geth_tx_global_pram {
+ u16 temoder;
+ u8 res0[0x38 - 0x02];
+ u32 sqptr; /* a base pointer to send queue memory region */
+ u32 schedulerbasepointer; /* a base pointer to scheduler memory
+ region */
+ u32 txrmonbaseptr; /* base pointer to Tx RMON statistics counter */
+ u32 tstate; /* tx internal state. High byte contains
+ function code */
+ u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX];
+ u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */
+ u32 tqptr; /* a base pointer to the Tx Queues Memory
+ Region */
+ u8 res2[0x80 - 0x74];
+} __packed;
+
+/* structure representing Extended Filtering Global Parameters in PRAM */
+struct ucc_geth_exf_global_pram {
+ u32 l2pcdptr; /* individual address filter, high */
+ u8 res0[0x10 - 0x04];
+} __packed;
+
+struct ucc_geth_rx_global_pram {
+ u32 remoder; /* ethernet mode reg. */
+ u32 rqptr; /* base pointer to the Rx Queues Memory Region*/
+ u32 res0[0x1];
+ u8 res1[0x20 - 0xC];
+ u16 typeorlen; /* cutoff point less than which, type/len field
+ is considered length */
+ u8 res2[0x1];
+ u8 rxgstpack; /* acknowledgement on GRACEFUL STOP RX command*/
+ u32 rxrmonbaseptr; /* base pointer to Rx RMON statistics counter */
+ u8 res3[0x30 - 0x28];
+ u32 intcoalescingptr; /* Interrupt coalescing table pointer */
+ u8 res4[0x36 - 0x34];
+ u8 rstate; /* rx internal state. High byte contains
+ function code */
+ u8 res5[0x46 - 0x37];
+ u16 mrblr; /* max receive buffer length reg. */
+ u32 rbdqptr; /* base pointer to RxBD parameter table
+ description */
+ u16 mflr; /* max frame length reg. */
+ u16 minflr; /* min frame length reg. */
+ u16 maxd1; /* max dma1 length reg. */
+ u16 maxd2; /* max dma2 length reg. */
+ u32 ecamptr; /* external CAM address */
+ u32 l2qt; /* VLAN priority mapping table. */
+ u32 l3qt[0x8]; /* IP priority mapping table. */
+ u16 vlantype; /* vlan type */
+ u16 vlantci; /* default vlan tci */
+ u8 addressfiltering[64]; /* address filtering data structure */
+ u32 exfGlobalParam; /* base address for extended filtering global
+ parameters */
+ u8 res6[0x100 - 0xC4]; /* Initialize to zero */
+} __packed;
+
+#define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01
+
+/* structure representing InitEnet command */
+struct ucc_geth_init_pram {
+ u8 resinit1;
+ u8 resinit2;
+ u8 resinit3;
+ u8 resinit4;
+ u16 resinit5;
+ u8 res1[0x1];
+ u8 largestexternallookupkeysize;
+ u32 rgftgfrxglobal;
+ u32 rxthread[ENET_INIT_PARAM_MAX_ENTRIES_RX]; /* rx threads */
+ u8 res2[0x38 - 0x30];
+ u32 txglobal; /* tx global */
+ u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */
+ u8 res3[0x1];
+} __packed;
+
+#define ENET_INIT_PARAM_RGF_SHIFT (32 - 4)
+#define ENET_INIT_PARAM_TGF_SHIFT (32 - 8)
+
+#define ENET_INIT_PARAM_RISC_MASK 0x0000003f
+#define ENET_INIT_PARAM_PTR_MASK 0x00ffffc0
+#define ENET_INIT_PARAM_SNUM_MASK 0xff000000
+#define ENET_INIT_PARAM_SNUM_SHIFT 24
+
+#define ENET_INIT_PARAM_MAGIC_RES_INIT1 0x06
+#define ENET_INIT_PARAM_MAGIC_RES_INIT2 0x30
+#define ENET_INIT_PARAM_MAGIC_RES_INIT3 0xff
+#define ENET_INIT_PARAM_MAGIC_RES_INIT4 0x00
+#define ENET_INIT_PARAM_MAGIC_RES_INIT5 0x0400
+
+/* structure representing 82xx Address Filtering Enet Address in PRAM */
+struct ucc_geth_82xx_enet_address {
+ u8 res1[0x2];
+ u16 h; /* address (MSB) */
+ u16 m; /* address */
+ u16 l; /* address (LSB) */
+} __packed;
+
+/* structure representing 82xx Address Filtering PRAM */
+struct ucc_geth_82xx_address_filtering_pram {
+ u32 iaddr_h; /* individual address filter, high */
+ u32 iaddr_l; /* individual address filter, low */
+ u32 gaddr_h; /* group address filter, high */
+ u32 gaddr_l; /* group address filter, low */
+ struct ucc_geth_82xx_enet_address __iomem taddr;
+ struct ucc_geth_82xx_enet_address __iomem paddr[NUM_OF_PADDRS];
+ u8 res0[0x40 - 0x38];
+} __packed;
+
+/* GETH Tx firmware statistics structure, used when calling
+ UCC_GETH_GetStatistics. */
+struct ucc_geth_tx_firmware_statistics {
+ u32 sicoltx; /* single collision */
+ u32 mulcoltx; /* multiple collision */
+ u32 latecoltxfr; /* late collision */
+ u32 frabortduecol; /* frames aborted due to transmit collision */
+ u32 frlostinmactxer; /* frames lost due to internal MAC error
+ transmission that are not counted on any
+ other counter */
+ u32 carriersenseertx; /* carrier sense error */
+ u32 frtxok; /* frames transmitted OK */
+ u32 txfrexcessivedefer; /* frames with defferal time greater than
+ specified threshold */
+ u32 txpkts256; /* total packets (including bad) between 256
+ and 511 octets */
+ u32 txpkts512; /* total packets (including bad) between 512
+ and 1023 octets */
+ u32 txpkts1024; /* total packets (including bad) between 1024
+ and 1518 octets */
+ u32 txpktsjumbo; /* total packets (including bad) between 1024
+ and MAXLength octets */
+} __packed;
+
+/* GETH Rx firmware statistics structure, used when calling
+ UCC_GETH_GetStatistics. */
+struct ucc_geth_rx_firmware_statistics {
+ u32 frrxfcser; /* frames with crc error */
+ u32 fraligner; /* frames with alignment error */
+ u32 inrangelenrxer; /* in range length error */
+ u32 outrangelenrxer; /* out of range length error */
+ u32 frtoolong; /* frame too long */
+ u32 runt; /* runt */
+ u32 verylongevent; /* very long event */
+ u32 symbolerror; /* symbol error */
+ u32 dropbsy; /* drop because of BD not ready */
+ u8 res0[0x8];
+ u32 mismatchdrop; /* drop because of MAC filtering (e.g. address
+ or type mismatch) */
+ u32 underpkts; /* total frames less than 64 octets */
+ u32 pkts256; /* total frames (including bad) between 256 and
+ 511 octets */
+ u32 pkts512; /* total frames (including bad) between 512 and
+ 1023 octets */
+ u32 pkts1024; /* total frames (including bad) between 1024
+ and 1518 octets */
+ u32 pktsjumbo; /* total frames (including bad) between 1024
+ and MAXLength octets */
+ u32 frlossinmacer; /* frames lost because of internal MAC error
+ that is not counted in any other counter */
+ u32 pausefr; /* pause frames */
+ u8 res1[0x4];
+ u32 removevlan; /* total frames that had their VLAN tag removed
+ */
+ u32 replacevlan; /* total frames that had their VLAN tag
+ replaced */
+ u32 insertvlan; /* total frames that had their VLAN tag
+ inserted */
+} __packed;
+
+/* GETH hardware statistics structure, used when calling
+ UCC_GETH_GetStatistics. */
+struct ucc_geth_hardware_statistics {
+ u32 tx64; /* Total number of frames (including bad
+ frames) transmitted that were exactly of the
+ minimal length (64 for un tagged, 68 for
+ tagged, or with length exactly equal to the
+ parameter MINLength */
+ u32 tx127; /* Total number of frames (including bad
+ frames) transmitted that were between
+ MINLength (Including FCS length==4) and 127
+ octets */
+ u32 tx255; /* Total number of frames (including bad
+ frames) transmitted that were between 128
+ (Including FCS length==4) and 255 octets */
+ u32 rx64; /* Total number of frames received including
+ bad frames that were exactly of the mninimal
+ length (64 bytes) */
+ u32 rx127; /* Total number of frames (including bad
+ frames) received that were between MINLength
+ (Including FCS length==4) and 127 octets */
+ u32 rx255; /* Total number of frames (including bad
+ frames) received that were between 128
+ (Including FCS length==4) and 255 octets */
+ u32 txok; /* Total number of octets residing in frames
+ that where involved in successful
+ transmission */
+ u16 txcf; /* Total number of PAUSE control frames
+ transmitted by this MAC */
+ u32 tmca; /* Total number of frames that were transmitted
+ successfully with the group address bit set
+ that are not broadcast frames */
+ u32 tbca; /* Total number of frames transmitted
+ successfully that had destination address
+ field equal to the broadcast address */
+ u32 rxfok; /* Total number of frames received OK */
+ u32 rxbok; /* Total number of octets received OK */
+ u32 rbyt; /* Total number of octets received including
+ octets in bad frames. Must be implemented in
+ HW because it includes octets in frames that
+ never even reach the UCC */
+ u32 rmca; /* Total number of frames that were received
+ successfully with the group address bit set
+ that are not broadcast frames */
+ u32 rbca; /* Total number of frames received successfully
+ that had destination address equal to the
+ broadcast address */
+} __packed;
+
+/* UCC GETH Tx errors returned via TxConf callback */
+#define TX_ERRORS_DEF 0x0200
+#define TX_ERRORS_EXDEF 0x0100
+#define TX_ERRORS_LC 0x0080
+#define TX_ERRORS_RL 0x0040
+#define TX_ERRORS_RC_MASK 0x003C
+#define TX_ERRORS_RC_SHIFT 2
+#define TX_ERRORS_UN 0x0002
+#define TX_ERRORS_CSL 0x0001
+
+/* UCC GETH Rx errors returned via RxStore callback */
+#define RX_ERRORS_CMR 0x0200
+#define RX_ERRORS_M 0x0100
+#define RX_ERRORS_BC 0x0080
+#define RX_ERRORS_MC 0x0040
+
+/* Transmit BD. These are in addition to values defined in uccf. */
+#define T_VID 0x003c0000 /* insert VLAN id index mask. */
+#define T_DEF (((u32) TX_ERRORS_DEF ) << 16)
+#define T_EXDEF (((u32) TX_ERRORS_EXDEF ) << 16)
+#define T_LC (((u32) TX_ERRORS_LC ) << 16)
+#define T_RL (((u32) TX_ERRORS_RL ) << 16)
+#define T_RC_MASK (((u32) TX_ERRORS_RC_MASK ) << 16)
+#define T_UN (((u32) TX_ERRORS_UN ) << 16)
+#define T_CSL (((u32) TX_ERRORS_CSL ) << 16)
+#define T_ERRORS_REPORT (T_DEF | T_EXDEF | T_LC | T_RL | T_RC_MASK \
+ | T_UN | T_CSL) /* transmit errors to report */
+
+/* Receive BD. These are in addition to values defined in uccf. */
+#define R_LG 0x00200000 /* Frame length violation. */
+#define R_NO 0x00100000 /* Non-octet aligned frame. */
+#define R_SH 0x00080000 /* Short frame. */
+#define R_CR 0x00040000 /* CRC error. */
+#define R_OV 0x00020000 /* Overrun. */
+#define R_IPCH 0x00010000 /* IP checksum check failed. */
+#define R_CMR (((u32) RX_ERRORS_CMR ) << 16)
+#define R_M (((u32) RX_ERRORS_M ) << 16)
+#define R_BC (((u32) RX_ERRORS_BC ) << 16)
+#define R_MC (((u32) RX_ERRORS_MC ) << 16)
+#define R_ERRORS_REPORT (R_CMR | R_M | R_BC | R_MC) /* receive errors to
+ report */
+#define R_ERRORS_FATAL (R_LG | R_NO | R_SH | R_CR | \
+ R_OV | R_IPCH) /* receive errors to discard */
+
+/* Alignments */
+#define UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT 256
+#define UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT 128
+#define UCC_GETH_THREAD_RX_PRAM_ALIGNMENT 128
+#define UCC_GETH_THREAD_TX_PRAM_ALIGNMENT 64
+#define UCC_GETH_THREAD_DATA_ALIGNMENT 256 /* spec gives values
+ based on num of
+ threads, but always
+ using the maximum is
+ easier */
+#define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32
+#define UCC_GETH_SCHEDULER_ALIGNMENT 8 /* This is a guess */
+#define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */
+#define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */
+#define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 64
+#define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */
+#define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */
+#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 8 /* This
+ is a
+ guess
+ */
+#define UCC_GETH_RX_BD_RING_ALIGNMENT 32
+#define UCC_GETH_TX_BD_RING_ALIGNMENT 32
+#define UCC_GETH_MRBLR_ALIGNMENT 128
+#define UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT 4
+#define UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT 32
+#define UCC_GETH_RX_DATA_BUF_ALIGNMENT 64
+
+#define UCC_GETH_TAD_EF 0x80
+#define UCC_GETH_TAD_V 0x40
+#define UCC_GETH_TAD_REJ 0x20
+#define UCC_GETH_TAD_VTAG_OP_RIGHT_SHIFT 2
+#define UCC_GETH_TAD_VTAG_OP_SHIFT 6
+#define UCC_GETH_TAD_V_NON_VTAG_OP 0x20
+#define UCC_GETH_TAD_RQOS_SHIFT 0
+#define UCC_GETH_TAD_V_PRIORITY_SHIFT 5
+#define UCC_GETH_TAD_CFI 0x10
+
+#define UCC_GETH_VLAN_PRIORITY_MAX 8
+#define UCC_GETH_IP_PRIORITY_MAX 64
+#define UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX 8
+#define UCC_GETH_RX_BD_RING_SIZE_MIN 8
+#define UCC_GETH_TX_BD_RING_SIZE_MIN 2
+#define UCC_GETH_BD_RING_SIZE_MAX 0xffff
+
+#define UCC_GETH_SIZE_OF_BD QE_SIZEOF_BD
+
+/* Driver definitions */
+#define TX_BD_RING_LEN 0x10
+#define RX_BD_RING_LEN 0x10
+
+#define TX_RING_MOD_MASK(size) (size-1)
+#define RX_RING_MOD_MASK(size) (size-1)
+
+#define ENET_NUM_OCTETS_PER_ADDRESS 6
+#define ENET_GROUP_ADDR 0x01 /* Group address mask
+ for ethernet
+ addresses */
+
+#define TX_TIMEOUT (1*HZ)
+#define SKB_ALLOC_TIMEOUT 100000
+#define PHY_INIT_TIMEOUT 100000
+#define PHY_CHANGE_TIME 2
+
+/* Fast Ethernet (10/100 Mbps) */
+#define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size
+ */
+#define UCC_GETH_URFET_INIT 256 /* 1/2 urfs */
+#define UCC_GETH_URFSET_INIT 384 /* 3/4 urfs */
+#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size
+ */
+#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
+#define UCC_GETH_UTFTT_INIT 256 /* 1/2 utfs
+ due to errata */
+/* Gigabit Ethernet (1000 Mbps) */
+#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual
+ FIFO size */
+#define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */
+#define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */
+#define UCC_GETH_UTFS_GIGA_INIT 4096/*2048*/ /* Tx virtual
+ FIFO size */
+#define UCC_GETH_UTFET_GIGA_INIT 2048/*1024*/ /* 1/2 utfs */
+#define UCC_GETH_UTFTT_GIGA_INIT 4096/*0x40*/ /* Tx virtual
+ FIFO size */
+
+#define UCC_GETH_REMODER_INIT 0 /* bits that must be
+ set */
+#define UCC_GETH_TEMODER_INIT 0xC000 /* bits that must */
+
+/* Initial value for UPSMR */
+#define UCC_GETH_UPSMR_INIT UCC_GETH_UPSMR_RES1
+
+#define UCC_GETH_MACCFG1_INIT 0
+#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1)
+
+/* Ethernet Address Type. */
+enum enet_addr_type {
+ ENET_ADDR_TYPE_INDIVIDUAL,
+ ENET_ADDR_TYPE_GROUP,
+ ENET_ADDR_TYPE_BROADCAST
+};
+
+/* UCC GETH 82xx Ethernet Address Recognition Location */
+enum ucc_geth_enet_address_recognition_location {
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_STATION_ADDRESS,/* station
+ address */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_FIRST, /* additional
+ station
+ address
+ paddr1 */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR2, /* additional
+ station
+ address
+ paddr2 */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR3, /* additional
+ station
+ address
+ paddr3 */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_LAST, /* additional
+ station
+ address
+ paddr4 */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH, /* group hash */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH /* individual
+ hash */
+};
+
+/* UCC GETH vlan operation tagged */
+enum ucc_geth_vlan_operation_tagged {
+ UCC_GETH_VLAN_OPERATION_TAGGED_NOP = 0x0, /* Tagged - nop */
+ UCC_GETH_VLAN_OPERATION_TAGGED_REPLACE_VID_PORTION_OF_Q_TAG
+ = 0x1, /* Tagged - replace vid portion of q tag */
+ UCC_GETH_VLAN_OPERATION_TAGGED_IF_VID0_REPLACE_VID_WITH_DEFAULT_VALUE
+ = 0x2, /* Tagged - if vid0 replace vid with default value */
+ UCC_GETH_VLAN_OPERATION_TAGGED_EXTRACT_Q_TAG_FROM_FRAME
+ = 0x3 /* Tagged - extract q tag from frame */
+};
+
+/* UCC GETH vlan operation non-tagged */
+enum ucc_geth_vlan_operation_non_tagged {
+ UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP = 0x0, /* Non tagged - nop */
+ UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT = 0x1 /* Non tagged -
+ q tag insert
+ */
+};
+
+/* UCC GETH Rx Quality of Service Mode */
+enum ucc_geth_qos_mode {
+ UCC_GETH_QOS_MODE_DEFAULT = 0x0, /* default queue */
+ UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L2_CRITERIA = 0x1, /* queue
+ determined
+ by L2
+ criteria */
+ UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L3_CRITERIA = 0x2 /* queue
+ determined
+ by L3
+ criteria */
+};
+
+/* UCC GETH Statistics Gathering Mode - These are bit flags, 'or' them together
+ for combined functionality */
+enum ucc_geth_statistics_gathering_mode {
+ UCC_GETH_STATISTICS_GATHERING_MODE_NONE = 0x00000000, /* No
+ statistics
+ gathering */
+ UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE = 0x00000001,/* Enable
+ hardware
+ statistics
+ gathering
+ */
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX = 0x00000004,/*Enable
+ firmware
+ tx
+ statistics
+ gathering
+ */
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX = 0x00000008/* Enable
+ firmware
+ rx
+ statistics
+ gathering
+ */
+};
+
+/* UCC GETH Pad and CRC Mode - Note, Padding without CRC is not possible */
+enum ucc_geth_maccfg2_pad_and_crc_mode {
+ UCC_GETH_PAD_AND_CRC_MODE_NONE
+ = MACCFG2_PAD_AND_CRC_MODE_NONE, /* Neither Padding
+ short frames
+ nor CRC */
+ UCC_GETH_PAD_AND_CRC_MODE_CRC_ONLY
+ = MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY, /* Append
+ CRC only */
+ UCC_GETH_PAD_AND_CRC_MODE_PAD_AND_CRC =
+ MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC
+};
+
+/* UCC GETH upsmr Flow Control Mode */
+enum ucc_geth_flow_control_mode {
+ UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE = 0x00000000, /* No automatic
+ flow control
+ */
+ UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_PAUSE_WHEN_EMERGENCY
+ = 0x00004000 /* Send pause frame when RxFIFO reaches its
+ emergency threshold */
+};
+
+/* UCC GETH number of threads */
+enum ucc_geth_num_of_threads {
+ UCC_GETH_NUM_OF_THREADS_1 = 0x1, /* 1 */
+ UCC_GETH_NUM_OF_THREADS_2 = 0x2, /* 2 */
+ UCC_GETH_NUM_OF_THREADS_4 = 0x0, /* 4 */
+ UCC_GETH_NUM_OF_THREADS_6 = 0x3, /* 6 */
+ UCC_GETH_NUM_OF_THREADS_8 = 0x4 /* 8 */
+};
+
+/* UCC GETH number of station addresses */
+enum ucc_geth_num_of_station_addresses {
+ UCC_GETH_NUM_OF_STATION_ADDRESSES_1, /* 1 */
+ UCC_GETH_NUM_OF_STATION_ADDRESSES_5 /* 5 */
+};
+
+/* UCC GETH 82xx Ethernet Address Container */
+struct enet_addr_container {
+ u8 address[ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */
+ enum ucc_geth_enet_address_recognition_location location; /* location in
+ 82xx address
+ recognition
+ hardware */
+ struct list_head node;
+};
+
+#define ENET_ADDR_CONT_ENTRY(ptr) list_entry(ptr, struct enet_addr_container, node)
+
+/* UCC GETH Termination Action Descriptor (TAD) structure. */
+struct ucc_geth_tad_params {
+ int rx_non_dynamic_extended_features_mode;
+ int reject_frame;
+ enum ucc_geth_vlan_operation_tagged vtag_op;
+ enum ucc_geth_vlan_operation_non_tagged vnontag_op;
+ enum ucc_geth_qos_mode rqos;
+ u8 vpri;
+ u16 vid;
+};
+
+/* GETH protocol initialization structure */
+struct ucc_geth_info {
+ struct ucc_fast_info uf_info;
+ u8 numQueuesTx;
+ u8 numQueuesRx;
+ int ipCheckSumCheck;
+ int ipCheckSumGenerate;
+ int rxExtendedFiltering;
+ u32 extendedFilteringChainPointer;
+ u16 typeorlen;
+ int dynamicMaxFrameLength;
+ int dynamicMinFrameLength;
+ u8 nonBackToBackIfgPart1;
+ u8 nonBackToBackIfgPart2;
+ u8 miminumInterFrameGapEnforcement;
+ u8 backToBackInterFrameGap;
+ int ipAddressAlignment;
+ int lengthCheckRx;
+ u32 mblinterval;
+ u16 nortsrbytetime;
+ u8 fracsiz;
+ u8 strictpriorityq;
+ u8 txasap;
+ u8 extrabw;
+ int miiPreambleSupress;
+ u8 altBebTruncation;
+ int altBeb;
+ int backPressureNoBackoff;
+ int noBackoff;
+ int excessDefer;
+ u8 maxRetransmission;
+ u8 collisionWindow;
+ int pro;
+ int cap;
+ int rsh;
+ int rlpb;
+ int cam;
+ int bro;
+ int ecm;
+ int receiveFlowControl;
+ int transmitFlowControl;
+ u8 maxGroupAddrInHash;
+ u8 maxIndAddrInHash;
+ u8 prel;
+ u16 maxFrameLength;
+ u16 minFrameLength;
+ u16 maxD1Length;
+ u16 maxD2Length;
+ u16 vlantype;
+ u16 vlantci;
+ u32 ecamptr;
+ u32 eventRegMask;
+ u16 pausePeriod;
+ u16 extensionField;
+ struct device_node *phy_node;
+ struct device_node *tbi_node;
+ u8 weightfactor[NUM_TX_QUEUES];
+ u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES];
+ u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX];
+ u8 l3qt[UCC_GETH_IP_PRIORITY_MAX];
+ u32 vtagtable[UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX];
+ u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX];
+ u16 bdRingLenTx[NUM_TX_QUEUES];
+ u16 bdRingLenRx[NUM_RX_QUEUES];
+ enum ucc_geth_num_of_station_addresses numStationAddresses;
+ enum qe_fltr_largest_external_tbl_lookup_key_size
+ largestexternallookupkeysize;
+ enum ucc_geth_statistics_gathering_mode statisticsMode;
+ enum ucc_geth_vlan_operation_tagged vlanOperationTagged;
+ enum ucc_geth_vlan_operation_non_tagged vlanOperationNonTagged;
+ enum ucc_geth_qos_mode rxQoSMode;
+ enum ucc_geth_flow_control_mode aufc;
+ enum ucc_geth_maccfg2_pad_and_crc_mode padAndCrc;
+ enum ucc_geth_num_of_threads numThreadsTx;
+ enum ucc_geth_num_of_threads numThreadsRx;
+ unsigned int riscTx;
+ unsigned int riscRx;
+};
+
+/* structure representing UCC GETH */
+struct ucc_geth_private {
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_private *uccf;
+ struct device *dev;
+ struct net_device *ndev;
+ struct napi_struct napi;
+ struct work_struct timeout_work;
+ struct ucc_geth __iomem *ug_regs;
+ struct ucc_geth_init_pram *p_init_enet_param_shadow;
+ struct ucc_geth_exf_global_pram __iomem *p_exf_glbl_param;
+ u32 exf_glbl_param_offset;
+ struct ucc_geth_rx_global_pram __iomem *p_rx_glbl_pram;
+ u32 rx_glbl_pram_offset;
+ struct ucc_geth_tx_global_pram __iomem *p_tx_glbl_pram;
+ u32 tx_glbl_pram_offset;
+ struct ucc_geth_send_queue_mem_region __iomem *p_send_q_mem_reg;
+ u32 send_q_mem_reg_offset;
+ struct ucc_geth_thread_data_tx __iomem *p_thread_data_tx;
+ u32 thread_dat_tx_offset;
+ struct ucc_geth_thread_data_rx __iomem *p_thread_data_rx;
+ u32 thread_dat_rx_offset;
+ struct ucc_geth_scheduler __iomem *p_scheduler;
+ u32 scheduler_offset;
+ struct ucc_geth_tx_firmware_statistics_pram __iomem *p_tx_fw_statistics_pram;
+ u32 tx_fw_statistics_pram_offset;
+ struct ucc_geth_rx_firmware_statistics_pram __iomem *p_rx_fw_statistics_pram;
+ u32 rx_fw_statistics_pram_offset;
+ struct ucc_geth_rx_interrupt_coalescing_table __iomem *p_rx_irq_coalescing_tbl;
+ u32 rx_irq_coalescing_tbl_offset;
+ struct ucc_geth_rx_bd_queues_entry __iomem *p_rx_bd_qs_tbl;
+ u32 rx_bd_qs_tbl_offset;
+ u8 __iomem *p_tx_bd_ring[NUM_TX_QUEUES];
+ u32 tx_bd_ring_offset[NUM_TX_QUEUES];
+ u8 __iomem *p_rx_bd_ring[NUM_RX_QUEUES];
+ u32 rx_bd_ring_offset[NUM_RX_QUEUES];
+ u8 __iomem *confBd[NUM_TX_QUEUES];
+ u8 __iomem *txBd[NUM_TX_QUEUES];
+ u8 __iomem *rxBd[NUM_RX_QUEUES];
+ int badFrame[NUM_RX_QUEUES];
+ u16 cpucount[NUM_TX_QUEUES];
+ u16 __iomem *p_cpucount[NUM_TX_QUEUES];
+ int indAddrRegUsed[NUM_OF_PADDRS];
+ u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */
+ u8 numGroupAddrInHash;
+ u8 numIndAddrInHash;
+ u8 numIndAddrInReg;
+ int rx_extended_features;
+ int rx_non_dynamic_extended_features;
+ struct list_head conf_skbs;
+ struct list_head group_hash_q;
+ struct list_head ind_hash_q;
+ u32 saved_uccm;
+ spinlock_t lock;
+ /* pointers to arrays of skbuffs for tx and rx */
+ struct sk_buff **tx_skbuff[NUM_TX_QUEUES];
+ struct sk_buff **rx_skbuff[NUM_RX_QUEUES];
+ /* indices pointing to the next free sbk in skb arrays */
+ u16 skb_curtx[NUM_TX_QUEUES];
+ u16 skb_currx[NUM_RX_QUEUES];
+ /* index of the first skb which hasn't been transmitted yet. */
+ u16 skb_dirtytx[NUM_TX_QUEUES];
+
+ struct sk_buff_head rx_recycle;
+
+ struct ugeth_mii_info *mii_info;
+ struct phy_device *phydev;
+ phy_interface_t phy_interface;
+ int max_speed;
+ uint32_t msg_enable;
+ int oldspeed;
+ int oldduplex;
+ int oldlink;
+ int wol_en;
+
+ struct device_node *node;
+};
+
+void uec_set_ethtool_ops(struct net_device *netdev);
+int init_flow_control_params(u32 automatic_flow_control_mode,
+ int rx_flow_control_enable, int tx_flow_control_enable,
+ u16 pause_period, u16 extension_field,
+ u32 __iomem *upsmr_register, u32 __iomem *uempr_register,
+ u32 __iomem *maccfg1_register);
+
+
+#endif /* __UCC_GETH_H__ */
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
new file mode 100644
index 000000000000..a97257f91a3d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -0,0 +1,423 @@
+/*
+ * Copyright (c) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Description: QE UCC Gigabit Ethernet Ethtool API Set
+ *
+ * Author: Li Yang <leoli@freescale.com>
+ *
+ * Limitation:
+ * Can only get/set settings of the first queue.
+ * Need to re-open the interface manually after changing some parameters.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/stddef.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/types.h>
+
+#include "ucc_geth.h"
+
+static char hw_stat_gstrings[][ETH_GSTRING_LEN] = {
+ "tx-64-frames",
+ "tx-65-127-frames",
+ "tx-128-255-frames",
+ "rx-64-frames",
+ "rx-65-127-frames",
+ "rx-128-255-frames",
+ "tx-bytes-ok",
+ "tx-pause-frames",
+ "tx-multicast-frames",
+ "tx-broadcast-frames",
+ "rx-frames",
+ "rx-bytes-ok",
+ "rx-bytes-all",
+ "rx-multicast-frames",
+ "rx-broadcast-frames",
+ "stats-counter-carry",
+ "stats-counter-mask",
+ "rx-dropped-frames",
+};
+
+static char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
+ "tx-single-collision",
+ "tx-multiple-collision",
+ "tx-late-collsion",
+ "tx-aborted-frames",
+ "tx-lost-frames",
+ "tx-carrier-sense-errors",
+ "tx-frames-ok",
+ "tx-excessive-differ-frames",
+ "tx-256-511-frames",
+ "tx-512-1023-frames",
+ "tx-1024-1518-frames",
+ "tx-jumbo-frames",
+};
+
+static char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
+ "rx-crc-errors",
+ "rx-alignment-errors",
+ "rx-in-range-length-errors",
+ "rx-out-of-range-length-errors",
+ "rx-too-long-frames",
+ "rx-runt",
+ "rx-very-long-event",
+ "rx-symbol-errors",
+ "rx-busy-drop-frames",
+ "reserved",
+ "reserved",
+ "rx-mismatch-drop-frames",
+ "rx-small-than-64",
+ "rx-256-511-frames",
+ "rx-512-1023-frames",
+ "rx-1024-1518-frames",
+ "rx-jumbo-frames",
+ "rx-mac-error-loss",
+ "rx-pause-frames",
+ "reserved",
+ "rx-vlan-removed",
+ "rx-vlan-replaced",
+ "rx-vlan-inserted",
+ "rx-ip-checksum-errors",
+};
+
+#define UEC_HW_STATS_LEN ARRAY_SIZE(hw_stat_gstrings)
+#define UEC_TX_FW_STATS_LEN ARRAY_SIZE(tx_fw_stat_gstrings)
+#define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings)
+
+static int
+uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct phy_device *phydev = ugeth->phydev;
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+
+ if (!phydev)
+ return -ENODEV;
+
+ ecmd->maxtxpkt = 1;
+ ecmd->maxrxpkt = ug_info->interruptcoalescingmaxvalue[0];
+
+ return phy_ethtool_gset(phydev, ecmd);
+}
+
+static int
+uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct phy_device *phydev = ugeth->phydev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, ecmd);
+}
+
+static void
+uec_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+
+ pause->autoneg = ugeth->phydev->autoneg;
+
+ if (ugeth->ug_info->receiveFlowControl)
+ pause->rx_pause = 1;
+ if (ugeth->ug_info->transmitFlowControl)
+ pause->tx_pause = 1;
+}
+
+static int
+uec_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ int ret = 0;
+
+ ugeth->ug_info->receiveFlowControl = pause->rx_pause;
+ ugeth->ug_info->transmitFlowControl = pause->tx_pause;
+
+ if (ugeth->phydev->autoneg) {
+ if (netif_running(netdev)) {
+ /* FIXME: automatically restart */
+ printk(KERN_INFO
+ "Please re-open the interface.\n");
+ }
+ } else {
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+
+ ret = init_flow_control_params(ug_info->aufc,
+ ug_info->receiveFlowControl,
+ ug_info->transmitFlowControl,
+ ug_info->pausePeriod,
+ ug_info->extensionField,
+ &ugeth->uccf->uf_regs->upsmr,
+ &ugeth->ug_regs->uempr,
+ &ugeth->ug_regs->maccfg1);
+ }
+
+ return ret;
+}
+
+static uint32_t
+uec_get_msglevel(struct net_device *netdev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ return ugeth->msg_enable;
+}
+
+static void
+uec_set_msglevel(struct net_device *netdev, uint32_t data)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ ugeth->msg_enable = data;
+}
+
+static int
+uec_get_regs_len(struct net_device *netdev)
+{
+ return sizeof(struct ucc_geth);
+}
+
+static void
+uec_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ int i;
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ u32 __iomem *ug_regs = (u32 __iomem *)ugeth->ug_regs;
+ u32 *buff = p;
+
+ for (i = 0; i < sizeof(struct ucc_geth) / sizeof(u32); i++)
+ buff[i] = in_be32(&ug_regs[i]);
+}
+
+static void
+uec_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ int queue = 0;
+
+ ring->rx_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
+ ring->rx_mini_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
+ ring->rx_jumbo_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
+ ring->tx_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
+
+ ring->rx_pending = ug_info->bdRingLenRx[queue];
+ ring->rx_mini_pending = ug_info->bdRingLenRx[queue];
+ ring->rx_jumbo_pending = ug_info->bdRingLenRx[queue];
+ ring->tx_pending = ug_info->bdRingLenTx[queue];
+}
+
+static int
+uec_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ int queue = 0, ret = 0;
+
+ if (ring->rx_pending < UCC_GETH_RX_BD_RING_SIZE_MIN) {
+ printk("%s: RxBD ring size must be no smaller than %d.\n",
+ netdev->name, UCC_GETH_RX_BD_RING_SIZE_MIN);
+ return -EINVAL;
+ }
+ if (ring->rx_pending % UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT) {
+ printk("%s: RxBD ring size must be multiple of %d.\n",
+ netdev->name, UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT);
+ return -EINVAL;
+ }
+ if (ring->tx_pending < UCC_GETH_TX_BD_RING_SIZE_MIN) {
+ printk("%s: TxBD ring size must be no smaller than %d.\n",
+ netdev->name, UCC_GETH_TX_BD_RING_SIZE_MIN);
+ return -EINVAL;
+ }
+
+ ug_info->bdRingLenRx[queue] = ring->rx_pending;
+ ug_info->bdRingLenTx[queue] = ring->tx_pending;
+
+ if (netif_running(netdev)) {
+ /* FIXME: restart automatically */
+ printk(KERN_INFO
+ "Please re-open the interface.\n");
+ }
+
+ return ret;
+}
+
+static int uec_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ u32 stats_mode = ugeth->ug_info->statisticsMode;
+ int len = 0;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE)
+ len += UEC_HW_STATS_LEN;
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX)
+ len += UEC_TX_FW_STATS_LEN;
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX)
+ len += UEC_RX_FW_STATS_LEN;
+
+ return len;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void uec_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ u32 stats_mode = ugeth->ug_info->statisticsMode;
+
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) {
+ memcpy(buf, hw_stat_gstrings, UEC_HW_STATS_LEN *
+ ETH_GSTRING_LEN);
+ buf += UEC_HW_STATS_LEN * ETH_GSTRING_LEN;
+ }
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
+ memcpy(buf, tx_fw_stat_gstrings, UEC_TX_FW_STATS_LEN *
+ ETH_GSTRING_LEN);
+ buf += UEC_TX_FW_STATS_LEN * ETH_GSTRING_LEN;
+ }
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX)
+ memcpy(buf, rx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN *
+ ETH_GSTRING_LEN);
+}
+
+static void uec_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, uint64_t *data)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ u32 stats_mode = ugeth->ug_info->statisticsMode;
+ u32 __iomem *base;
+ int i, j = 0;
+
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) {
+ if (ugeth->ug_regs)
+ base = (u32 __iomem *)&ugeth->ug_regs->tx64;
+ else
+ base = NULL;
+
+ for (i = 0; i < UEC_HW_STATS_LEN; i++)
+ data[j++] = base ? in_be32(&base[i]) : 0;
+ }
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
+ base = (u32 __iomem *)ugeth->p_tx_fw_statistics_pram;
+ for (i = 0; i < UEC_TX_FW_STATS_LEN; i++)
+ data[j++] = base ? in_be32(&base[i]) : 0;
+ }
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
+ base = (u32 __iomem *)ugeth->p_rx_fw_statistics_pram;
+ for (i = 0; i < UEC_RX_FW_STATS_LEN; i++)
+ data[j++] = base ? in_be32(&base[i]) : 0;
+ }
+}
+
+static int uec_nway_reset(struct net_device *netdev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+
+ return phy_start_aneg(ugeth->phydev);
+}
+
+/* Report driver information */
+static void
+uec_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strncpy(drvinfo->driver, DRV_NAME, 32);
+ strncpy(drvinfo->version, DRV_VERSION, 32);
+ strncpy(drvinfo->fw_version, "N/A", 32);
+ strncpy(drvinfo->bus_info, "QUICC ENGINE", 32);
+ drvinfo->eedump_len = 0;
+ drvinfo->regdump_len = uec_get_regs_len(netdev);
+}
+
+#ifdef CONFIG_PM
+
+static void uec_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct phy_device *phydev = ugeth->phydev;
+
+ if (phydev && phydev->irq)
+ wol->supported |= WAKE_PHY;
+ if (qe_alive_during_sleep())
+ wol->supported |= WAKE_MAGIC;
+
+ wol->wolopts = ugeth->wol_en;
+}
+
+static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct phy_device *phydev = ugeth->phydev;
+
+ if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
+ else if (wol->wolopts & WAKE_PHY && (!phydev || !phydev->irq))
+ return -EINVAL;
+ else if (wol->wolopts & WAKE_MAGIC && !qe_alive_during_sleep())
+ return -EINVAL;
+
+ ugeth->wol_en = wol->wolopts;
+ device_set_wakeup_enable(&netdev->dev, ugeth->wol_en);
+
+ return 0;
+}
+
+#else
+#define uec_get_wol NULL
+#define uec_set_wol NULL
+#endif /* CONFIG_PM */
+
+static const struct ethtool_ops uec_ethtool_ops = {
+ .get_settings = uec_get_settings,
+ .set_settings = uec_set_settings,
+ .get_drvinfo = uec_get_drvinfo,
+ .get_regs_len = uec_get_regs_len,
+ .get_regs = uec_get_regs,
+ .get_msglevel = uec_get_msglevel,
+ .set_msglevel = uec_set_msglevel,
+ .nway_reset = uec_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = uec_get_ringparam,
+ .set_ringparam = uec_set_ringparam,
+ .get_pauseparam = uec_get_pauseparam,
+ .set_pauseparam = uec_set_pauseparam,
+ .get_sset_count = uec_get_sset_count,
+ .get_strings = uec_get_strings,
+ .get_ethtool_stats = uec_get_ethtool_stats,
+ .get_wol = uec_get_wol,
+ .set_wol = uec_set_wol,
+};
+
+void uec_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &uec_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/fujitsu/Kconfig b/drivers/net/ethernet/fujitsu/Kconfig
new file mode 100644
index 000000000000..dffee9d44fd5
--- /dev/null
+++ b/drivers/net/ethernet/fujitsu/Kconfig
@@ -0,0 +1,54 @@
+#
+# Fujitsu Network device configuration
+#
+
+config NET_VENDOR_FUJITSU
+ bool "Fujitsu devices"
+ default y
+ depends on ISA || PCMCIA || ((ISA || MCA_LEGACY) && EXPERIMENTAL)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ the questions about Fujitsu cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_FUJITSU
+
+config AT1700
+ tristate "AT1700/1720 support (EXPERIMENTAL)"
+ depends on (ISA || MCA_LEGACY) && EXPERIMENTAL
+ select CRC32
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called at1700.
+
+config PCMCIA_FMVJ18X
+ tristate "Fujitsu FMV-J18x PCMCIA support"
+ depends on PCMCIA
+ select CRC32
+ ---help---
+ Say Y here if you intend to attach a Fujitsu FMV-J18x or compatible
+ PCMCIA (PC-card) Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called fmvj18x_cs. If unsure, say N.
+
+config ETH16I
+ tristate "ICL EtherTeam 16i/32 support"
+ depends on ISA
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called eth16i.
+
+endif # NET_VENDOR_FUJITSU
diff --git a/drivers/net/ethernet/fujitsu/Makefile b/drivers/net/ethernet/fujitsu/Makefile
new file mode 100644
index 000000000000..2730ae67d3aa
--- /dev/null
+++ b/drivers/net/ethernet/fujitsu/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Fujitsu network device drivers.
+#
+
+obj-$(CONFIG_AT1700) += at1700.o
+obj-$(CONFIG_ETH16I) += eth16i.o
+obj-$(CONFIG_PCMCIA_FMVJ18X) += fmvj18x_cs.o
diff --git a/drivers/net/ethernet/fujitsu/at1700.c b/drivers/net/ethernet/fujitsu/at1700.c
new file mode 100644
index 000000000000..7c6c908bdf02
--- /dev/null
+++ b/drivers/net/ethernet/fujitsu/at1700.c
@@ -0,0 +1,900 @@
+/* at1700.c: A network device driver for the Allied Telesis AT1700.
+
+ Written 1993-98 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ This is a device driver for the Allied Telesis AT1700, and
+ Fujitsu FMV-181/182/181A/182A/183/184/183A/184A, which are
+ straight-forward Fujitsu MB86965 implementations.
+
+ Modification for Fujitsu FMV-18X cards is done by Yutaka Tamiya
+ (tamy@flab.fujitsu.co.jp).
+
+ Sources:
+ The Fujitsu MB86965 datasheet.
+
+ After the initial version of this driver was written Gerry Sawkins of
+ ATI provided their EEPROM configuration code header file.
+ Thanks to NIIBE Yutaka <gniibe@mri.co.jp> for bug fixes.
+
+ MCA bus (AT1720) support by Rene Schmit <rene@bss.lu>
+
+ Bugs:
+ The MB86965 has a design flaw that makes all probes unreliable. Not
+ only is it difficult to detect, it also moves around in I/O space in
+ response to inb()s from other device probes!
+*/
+
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mca-legacy.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+static char version[] __initdata =
+ "at1700.c:v1.16 9/11/06 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#define DRV_NAME "at1700"
+
+/* Tunable parameters. */
+
+/* When to switch from the 64-entry multicast filter to Rx-all-multicast. */
+#define MC_FILTERBREAK 64
+
+/* These unusual address orders are used to verify the CONFIG register. */
+
+static int fmv18x_probe_list[] __initdata = {
+ 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x300, 0x340, 0
+};
+
+/*
+ * ISA
+ */
+
+static unsigned at1700_probe_list[] __initdata = {
+ 0x260, 0x280, 0x2a0, 0x240, 0x340, 0x320, 0x380, 0x300, 0
+};
+
+/*
+ * MCA
+ */
+#ifdef CONFIG_MCA_LEGACY
+static int at1700_ioaddr_pattern[] __initdata = {
+ 0x00, 0x04, 0x01, 0x05, 0x02, 0x06, 0x03, 0x07
+};
+
+static int at1700_mca_probe_list[] __initdata = {
+ 0x400, 0x1400, 0x2400, 0x3400, 0x4400, 0x5400, 0x6400, 0x7400, 0
+};
+
+static int at1700_irq_pattern[] __initdata = {
+ 0x00, 0x00, 0x00, 0x30, 0x70, 0xb0, 0x00, 0x00,
+ 0x00, 0xf0, 0x34, 0x74, 0xb4, 0x00, 0x00, 0xf4, 0x00
+};
+#endif
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+typedef unsigned char uchar;
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ spinlock_t lock;
+ unsigned char mc_filter[8];
+ uint jumpered:1; /* Set iff the board has jumper config. */
+ uint tx_started:1; /* Packets are on the Tx queue. */
+ uint tx_queue_ready:1; /* Tx queue is ready to be sent. */
+ uint rx_started:1; /* Packets are Rxing. */
+ uchar tx_queue; /* Number of packet on the Tx queue. */
+ char mca_slot; /* -1 means ISA */
+ ushort tx_queue_len; /* Current length of the Tx queue. */
+};
+
+
+/* Offsets from the base address. */
+#define STATUS 0
+#define TX_STATUS 0
+#define RX_STATUS 1
+#define TX_INTR 2 /* Bit-mapped interrupt enable registers. */
+#define RX_INTR 3
+#define TX_MODE 4
+#define RX_MODE 5
+#define CONFIG_0 6 /* Misc. configuration settings. */
+#define CONFIG_1 7
+/* Run-time register bank 2 definitions. */
+#define DATAPORT 8 /* Word-wide DMA or programmed-I/O dataport. */
+#define TX_START 10
+#define COL16CNTL 11 /* Control Reg for 16 collisions */
+#define MODE13 13
+#define RX_CTRL 14
+/* Configuration registers only on the '865A/B chips. */
+#define EEPROM_Ctrl 16
+#define EEPROM_Data 17
+#define CARDSTATUS 16 /* FMV-18x Card Status */
+#define CARDSTATUS1 17 /* FMV-18x Card Status */
+#define IOCONFIG 18 /* Either read the jumper, or move the I/O. */
+#define IOCONFIG1 19
+#define SAPROM 20 /* The station address PROM, if no EEPROM. */
+#define MODE24 24
+#define RESET 31 /* Write to reset some parts of the chip. */
+#define AT1700_IO_EXTENT 32
+#define PORT_OFFSET(o) (o)
+
+
+#define TX_TIMEOUT (HZ/10)
+
+
+/* Index to functions, as function prototypes. */
+
+static int at1700_probe1(struct net_device *dev, int ioaddr);
+static int read_eeprom(long ioaddr, int location);
+static int net_open(struct net_device *dev);
+static netdev_tx_t net_send_packet(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t net_interrupt(int irq, void *dev_id);
+static void net_rx(struct net_device *dev);
+static int net_close(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static void net_tx_timeout (struct net_device *dev);
+
+
+#ifdef CONFIG_MCA_LEGACY
+struct at1720_mca_adapters_struct {
+ char* name;
+ int id;
+};
+/* rEnE : maybe there are others I don't know off... */
+
+static struct at1720_mca_adapters_struct at1720_mca_adapters[] __initdata = {
+ { "Allied Telesys AT1720AT", 0x6410 },
+ { "Allied Telesys AT1720BT", 0x6413 },
+ { "Allied Telesys AT1720T", 0x6416 },
+ { NULL, 0 },
+};
+#endif
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+ */
+
+static int io = 0x260;
+
+static int irq;
+
+static void cleanup_card(struct net_device *dev)
+{
+#ifdef CONFIG_MCA_LEGACY
+ struct net_local *lp = netdev_priv(dev);
+ if (lp->mca_slot >= 0)
+ mca_mark_as_unused(lp->mca_slot);
+#endif
+ free_irq(dev->irq, NULL);
+ release_region(dev->base_addr, AT1700_IO_EXTENT);
+}
+
+struct net_device * __init at1700_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
+ unsigned *port;
+ int err = 0;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ io = dev->base_addr;
+ irq = dev->irq;
+ } else {
+ dev->base_addr = io;
+ dev->irq = irq;
+ }
+
+ if (io > 0x1ff) { /* Check a single specified location. */
+ err = at1700_probe1(dev, io);
+ } else if (io != 0) { /* Don't probe at all. */
+ err = -ENXIO;
+ } else {
+ for (port = at1700_probe_list; *port; port++) {
+ if (at1700_probe1(dev, *port) == 0)
+ break;
+ dev->irq = irq;
+ }
+ if (!*port)
+ err = -ENODEV;
+ }
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static const struct net_device_ops at1700_netdev_ops = {
+ .ndo_open = net_open,
+ .ndo_stop = net_close,
+ .ndo_start_xmit = net_send_packet,
+ .ndo_set_rx_mode = set_rx_mode,
+ .ndo_tx_timeout = net_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/* The Fujitsu datasheet suggests that the NIC be probed for by checking its
+ "signature", the default bit pattern after a reset. This *doesn't* work --
+ there is no way to reset the bus interface without a complete power-cycle!
+
+ It turns out that ATI came to the same conclusion I did: the only thing
+ that can be done is checking a few bits and then diving right into an
+ EEPROM read. */
+
+static int __init at1700_probe1(struct net_device *dev, int ioaddr)
+{
+ static const char fmv_irqmap[4] = {3, 7, 10, 15};
+ static const char fmv_irqmap_pnp[8] = {3, 4, 5, 7, 9, 10, 11, 15};
+ static const char at1700_irqmap[8] = {3, 4, 5, 9, 10, 11, 14, 15};
+ unsigned int i, irq, is_fmv18x = 0, is_at1700 = 0;
+ int slot, ret = -ENODEV;
+ struct net_local *lp = netdev_priv(dev);
+
+ if (!request_region(ioaddr, AT1700_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ /* Resetting the chip doesn't reset the ISA interface, so don't bother.
+ That means we have to be careful with the register values we probe
+ for.
+ */
+#ifdef notdef
+ printk("at1700 probe at %#x, eeprom is %4.4x %4.4x %4.4x ctrl %4.4x.\n",
+ ioaddr, read_eeprom(ioaddr, 4), read_eeprom(ioaddr, 5),
+ read_eeprom(ioaddr, 6), inw(ioaddr + EEPROM_Ctrl));
+#endif
+
+#ifdef CONFIG_MCA_LEGACY
+ /* rEnE (rene@bss.lu): got this from 3c509 driver source , adapted for AT1720 */
+
+ /* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch, heavily
+ modified by Chris Beauregard (cpbeaure@csclub.uwaterloo.ca)
+ to support standard MCA probing. */
+
+ /* redone for multi-card detection by ZP Gu (zpg@castle.net) */
+ /* now works as a module */
+
+ if (MCA_bus) {
+ int j;
+ int l_i;
+ u_char pos3, pos4;
+
+ for (j = 0; at1720_mca_adapters[j].name != NULL; j ++) {
+ slot = 0;
+ while (slot != MCA_NOTFOUND) {
+
+ slot = mca_find_unused_adapter( at1720_mca_adapters[j].id, slot );
+ if (slot == MCA_NOTFOUND) break;
+
+ /* if we get this far, an adapter has been detected and is
+ enabled */
+
+ pos3 = mca_read_stored_pos( slot, 3 );
+ pos4 = mca_read_stored_pos( slot, 4 );
+
+ for (l_i = 0; l_i < 8; l_i++)
+ if (( pos3 & 0x07) == at1700_ioaddr_pattern[l_i])
+ break;
+ ioaddr = at1700_mca_probe_list[l_i];
+
+ for (irq = 0; irq < 0x10; irq++)
+ if (((((pos4>>4) & 0x0f) | (pos3 & 0xf0)) & 0xff) == at1700_irq_pattern[irq])
+ break;
+
+ /* probing for a card at a particular IO/IRQ */
+ if ((dev->irq && dev->irq != irq) ||
+ (dev->base_addr && dev->base_addr != ioaddr)) {
+ slot++; /* probing next slot */
+ continue;
+ }
+
+ dev->irq = irq;
+
+ /* claim the slot */
+ mca_set_adapter_name( slot, at1720_mca_adapters[j].name );
+ mca_mark_as_used(slot);
+
+ goto found;
+ }
+ }
+ /* if we get here, we didn't find an MCA adapter - try ISA */
+ }
+#endif
+ slot = -1;
+ /* We must check for the EEPROM-config boards first, else accessing
+ IOCONFIG0 will move the board! */
+ if (at1700_probe_list[inb(ioaddr + IOCONFIG1) & 0x07] == ioaddr &&
+ read_eeprom(ioaddr, 4) == 0x0000 &&
+ (read_eeprom(ioaddr, 5) & 0xff00) == 0xF400)
+ is_at1700 = 1;
+ else if (inb(ioaddr + SAPROM ) == 0x00 &&
+ inb(ioaddr + SAPROM + 1) == 0x00 &&
+ inb(ioaddr + SAPROM + 2) == 0x0e)
+ is_fmv18x = 1;
+ else {
+ goto err_out;
+ }
+
+#ifdef CONFIG_MCA_LEGACY
+found:
+#endif
+
+ /* Reset the internal state machines. */
+ outb(0, ioaddr + RESET);
+
+ if (is_at1700) {
+ irq = at1700_irqmap[(read_eeprom(ioaddr, 12)&0x04)
+ | (read_eeprom(ioaddr, 0)>>14)];
+ } else {
+ /* Check PnP mode for FMV-183/184/183A/184A. */
+ /* This PnP routine is very poor. IO and IRQ should be known. */
+ if (inb(ioaddr + CARDSTATUS1) & 0x20) {
+ irq = dev->irq;
+ for (i = 0; i < 8; i++) {
+ if (irq == fmv_irqmap_pnp[i])
+ break;
+ }
+ if (i == 8) {
+ goto err_mca;
+ }
+ } else {
+ if (fmv18x_probe_list[inb(ioaddr + IOCONFIG) & 0x07] != ioaddr)
+ goto err_mca;
+ irq = fmv_irqmap[(inb(ioaddr + IOCONFIG)>>6) & 0x03];
+ }
+ }
+
+ printk("%s: %s found at %#3x, IRQ %d, address ", dev->name,
+ is_at1700 ? "AT1700" : "FMV-18X", ioaddr, irq);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ if (is_at1700) {
+ for(i = 0; i < 3; i++) {
+ unsigned short eeprom_val = read_eeprom(ioaddr, 4+i);
+ ((unsigned short *)dev->dev_addr)[i] = ntohs(eeprom_val);
+ }
+ } else {
+ for(i = 0; i < 6; i++) {
+ unsigned char val = inb(ioaddr + SAPROM + i);
+ dev->dev_addr[i] = val;
+ }
+ }
+ printk("%pM", dev->dev_addr);
+
+ /* The EEPROM word 12 bit 0x0400 means use regular 100 ohm 10baseT signals,
+ rather than 150 ohm shielded twisted pair compensation.
+ 0x0000 == auto-sense the interface
+ 0x0800 == use TP interface
+ 0x1800 == use coax interface
+ */
+ {
+ const char *porttype[] = {"auto-sense", "10baseT", "auto-sense", "10base2"};
+ if (is_at1700) {
+ ushort setup_value = read_eeprom(ioaddr, 12);
+ dev->if_port = setup_value >> 8;
+ } else {
+ ushort setup_value = inb(ioaddr + CARDSTATUS);
+ switch (setup_value & 0x07) {
+ case 0x01: /* 10base5 */
+ case 0x02: /* 10base2 */
+ dev->if_port = 0x18; break;
+ case 0x04: /* 10baseT */
+ dev->if_port = 0x08; break;
+ default: /* auto-sense */
+ dev->if_port = 0x00; break;
+ }
+ }
+ printk(" %s interface.\n", porttype[(dev->if_port>>3) & 3]);
+ }
+
+ /* Set the configuration register 0 to 32K 100ns. byte-wide memory, 16 bit
+ bus access, two 4K Tx queues, and disabled Tx and Rx. */
+ outb(0xda, ioaddr + CONFIG_0);
+
+ /* Set the station address in bank zero. */
+ outb(0x00, ioaddr + CONFIG_1);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + PORT_OFFSET(8 + i));
+
+ /* Switch to bank 1 and set the multicast table to accept none. */
+ outb(0x04, ioaddr + CONFIG_1);
+ for (i = 0; i < 8; i++)
+ outb(0x00, ioaddr + PORT_OFFSET(8 + i));
+
+
+ /* Switch to bank 2 */
+ /* Lock our I/O address, and set manual processing mode for 16 collisions. */
+ outb(0x08, ioaddr + CONFIG_1);
+ outb(dev->if_port, ioaddr + MODE13);
+ outb(0x00, ioaddr + COL16CNTL);
+
+ if (net_debug)
+ printk(version);
+
+ dev->netdev_ops = &at1700_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ spin_lock_init(&lp->lock);
+
+ lp->jumpered = is_fmv18x;
+ lp->mca_slot = slot;
+ /* Snarf the interrupt vector now. */
+ ret = request_irq(irq, net_interrupt, 0, DRV_NAME, dev);
+ if (ret) {
+ printk(KERN_ERR "AT1700 at %#3x is unusable due to a "
+ "conflict on IRQ %d.\n",
+ ioaddr, irq);
+ goto err_mca;
+ }
+
+ return 0;
+
+err_mca:
+#ifdef CONFIG_MCA_LEGACY
+ if (slot >= 0)
+ mca_mark_as_unused(slot);
+#endif
+err_out:
+ release_region(ioaddr, AT1700_IO_EXTENT);
+ return ret;
+}
+
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x40 /* EEPROM shift clock, in reg. 16. */
+#define EE_CS 0x20 /* EEPROM chip select, in reg. 16. */
+#define EE_DATA_WRITE 0x80 /* EEPROM chip data in, in reg. 17. */
+#define EE_DATA_READ 0x80 /* EEPROM chip data out, in reg. 17. */
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD (5 << 6)
+#define EE_READ_CMD (6 << 6)
+#define EE_ERASE_CMD (7 << 6)
+
+static int __init read_eeprom(long ioaddr, int location)
+{
+ int i;
+ unsigned short retval = 0;
+ long ee_addr = ioaddr + EEPROM_Ctrl;
+ long ee_daddr = ioaddr + EEPROM_Data;
+ int read_cmd = location | EE_READ_CMD;
+
+ /* Shift the read command bits out. */
+ for (i = 9; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ outb(EE_CS, ee_addr);
+ outb(dataval, ee_daddr);
+ outb(EE_CS | EE_SHIFT_CLK, ee_addr); /* EEPROM clock tick. */
+ }
+ outb(EE_DATA_WRITE, ee_daddr);
+ for (i = 16; i > 0; i--) {
+ outb(EE_CS, ee_addr);
+ outb(EE_CS | EE_SHIFT_CLK, ee_addr);
+ retval = (retval << 1) | ((inb(ee_daddr) & EE_DATA_READ) ? 1 : 0);
+ }
+
+ /* Terminate the EEPROM access. */
+ outb(EE_CS, ee_addr);
+ outb(EE_SHIFT_CLK, ee_addr);
+ outb(0, ee_addr);
+ return retval;
+}
+
+
+
+static int net_open(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ /* Set the configuration register 0 to 32K 100ns. byte-wide memory, 16 bit
+ bus access, and two 4K Tx queues. */
+ outb(0x5a, ioaddr + CONFIG_0);
+
+ /* Powerup, switch to register bank 2, and enable the Rx and Tx. */
+ outb(0xe8, ioaddr + CONFIG_1);
+
+ lp->tx_started = 0;
+ lp->tx_queue_ready = 1;
+ lp->rx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+
+ /* Turn on hardware Tx and Rx interrupts. */
+ outb(0x82, ioaddr + TX_INTR);
+ outb(0x81, ioaddr + RX_INTR);
+
+ /* Enable the IRQ on boards of fmv18x it is feasible. */
+ if (lp->jumpered) {
+ outb(0x80, ioaddr + IOCONFIG1);
+ }
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+static void net_tx_timeout (struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ printk ("%s: transmit timed out with status %04x, %s?\n", dev->name,
+ inw (ioaddr + STATUS), inb (ioaddr + TX_STATUS) & 0x80
+ ? "IRQ conflict" : "network cable problem");
+ printk ("%s: timeout registers: %04x %04x %04x %04x %04x %04x %04x %04x.\n",
+ dev->name, inw(ioaddr + TX_STATUS), inw(ioaddr + TX_INTR), inw(ioaddr + TX_MODE),
+ inw(ioaddr + CONFIG_0), inw(ioaddr + DATAPORT), inw(ioaddr + TX_START),
+ inw(ioaddr + MODE13 - 1), inw(ioaddr + RX_CTRL));
+ dev->stats.tx_errors++;
+ /* ToDo: We should try to restart the adaptor... */
+ outw(0xffff, ioaddr + MODE24);
+ outw (0xffff, ioaddr + TX_STATUS);
+ outb (0x5a, ioaddr + CONFIG_0);
+ outb (0xe8, ioaddr + CONFIG_1);
+ outw (0x8182, ioaddr + TX_INTR);
+ outb (0x00, ioaddr + TX_START);
+ outb (0x03, ioaddr + COL16CNTL);
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+
+ lp->tx_started = 0;
+ lp->tx_queue_ready = 1;
+ lp->rx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+
+ netif_wake_queue(dev);
+}
+
+
+static netdev_tx_t net_send_packet (struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ short len = skb->len;
+ unsigned char *buf = skb->data;
+ static u8 pad[ETH_ZLEN];
+
+ netif_stop_queue (dev);
+
+ /* We may not start transmitting unless we finish transferring
+ a packet into the Tx queue. During executing the following
+ codes we possibly catch a Tx interrupt. Thus we flag off
+ tx_queue_ready, so that we prevent the interrupt routine
+ (net_interrupt) to start transmitting. */
+ lp->tx_queue_ready = 0;
+ {
+ outw (length, ioaddr + DATAPORT);
+ /* Packet data */
+ outsw (ioaddr + DATAPORT, buf, len >> 1);
+ /* Check for dribble byte */
+ if (len & 1) {
+ outw(skb->data[skb->len-1], ioaddr + DATAPORT);
+ len++;
+ }
+ /* Check for packet padding */
+ if (length != skb->len)
+ outsw(ioaddr + DATAPORT, pad, (length - len + 1) >> 1);
+
+ lp->tx_queue++;
+ lp->tx_queue_len += length + 2;
+ }
+ lp->tx_queue_ready = 1;
+
+ if (lp->tx_started == 0) {
+ /* If the Tx is idle, always trigger a transmit. */
+ outb (0x80 | lp->tx_queue, ioaddr + TX_START);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ lp->tx_started = 1;
+ netif_start_queue (dev);
+ } else if (lp->tx_queue_len < 4096 - 1502)
+ /* Yes, there is room for one more packet. */
+ netif_start_queue (dev);
+ dev_kfree_skb (skb);
+
+ return NETDEV_TX_OK;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static irqreturn_t net_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *lp;
+ int ioaddr, status;
+ int handled = 0;
+
+ if (dev == NULL) {
+ printk ("at1700_interrupt(): irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ spin_lock (&lp->lock);
+
+ status = inw(ioaddr + TX_STATUS);
+ outw(status, ioaddr + TX_STATUS);
+
+ if (net_debug > 4)
+ printk("%s: Interrupt with status %04x.\n", dev->name, status);
+ if (lp->rx_started == 0 &&
+ (status & 0xff00 || (inb(ioaddr + RX_MODE) & 0x40) == 0)) {
+ /* Got a packet(s).
+ We cannot execute net_rx more than once at the same time for
+ the same device. During executing net_rx, we possibly catch a
+ Tx interrupt. Thus we flag on rx_started, so that we prevent
+ the interrupt routine (net_interrupt) to dive into net_rx
+ again. */
+ handled = 1;
+ lp->rx_started = 1;
+ outb(0x00, ioaddr + RX_INTR); /* Disable RX intr. */
+ net_rx(dev);
+ outb(0x81, ioaddr + RX_INTR); /* Enable RX intr. */
+ lp->rx_started = 0;
+ }
+ if (status & 0x00ff) {
+ handled = 1;
+ if (status & 0x02) {
+ /* More than 16 collisions occurred */
+ if (net_debug > 4)
+ printk("%s: 16 Collision occur during Txing.\n", dev->name);
+ /* Cancel sending a packet. */
+ outb(0x03, ioaddr + COL16CNTL);
+ dev->stats.collisions++;
+ }
+ if (status & 0x82) {
+ dev->stats.tx_packets++;
+ /* The Tx queue has any packets and is not being
+ transferred a packet from the host, start
+ transmitting. */
+ if (lp->tx_queue && lp->tx_queue_ready) {
+ outb(0x80 | lp->tx_queue, ioaddr + TX_START);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ netif_wake_queue (dev);
+ } else {
+ lp->tx_started = 0;
+ netif_wake_queue (dev);
+ }
+ }
+ }
+
+ spin_unlock (&lp->lock);
+ return IRQ_RETVAL(handled);
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void
+net_rx(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ int boguscount = 5;
+
+ while ((inb(ioaddr + RX_MODE) & 0x40) == 0) {
+ ushort status = inw(ioaddr + DATAPORT);
+ ushort pkt_len = inw(ioaddr + DATAPORT);
+
+ if (net_debug > 4)
+ printk("%s: Rxing packet mode %02x status %04x.\n",
+ dev->name, inb(ioaddr + RX_MODE), status);
+#ifndef final_version
+ if (status == 0) {
+ outb(0x05, ioaddr + RX_CTRL);
+ break;
+ }
+#endif
+
+ if ((status & 0xF0) != 0x20) { /* There was an error. */
+ dev->stats.rx_errors++;
+ if (status & 0x08) dev->stats.rx_length_errors++;
+ if (status & 0x04) dev->stats.rx_frame_errors++;
+ if (status & 0x02) dev->stats.rx_crc_errors++;
+ if (status & 0x01) dev->stats.rx_over_errors++;
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ if (pkt_len > 1550) {
+ printk("%s: The AT1700 claimed a very large packet, size %d.\n",
+ dev->name, pkt_len);
+ /* Prime the FIFO and then flush the packet. */
+ inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT);
+ outb(0x05, ioaddr + RX_CTRL);
+ dev->stats.rx_errors++;
+ break;
+ }
+ skb = dev_alloc_skb(pkt_len+3);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet (len %d).\n",
+ dev->name, pkt_len);
+ /* Prime the FIFO and then flush the packet. */
+ inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT);
+ outb(0x05, ioaddr + RX_CTRL);
+ dev->stats.rx_dropped++;
+ break;
+ }
+ skb_reserve(skb,2);
+
+ insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1);
+ skb->protocol=eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+ if (--boguscount <= 0)
+ break;
+ }
+
+ /* If any worth-while packets have been received, dev_rint()
+ has done a mark_bh(NET_BH) for us and will work on them
+ when we get to the bottom-half routine. */
+ {
+ int i;
+ for (i = 0; i < 20; i++) {
+ if ((inb(ioaddr + RX_MODE) & 0x40) == 0x40)
+ break;
+ inw(ioaddr + DATAPORT); /* dummy status read */
+ outb(0x05, ioaddr + RX_CTRL);
+ }
+
+ if (net_debug > 5)
+ printk("%s: Exint Rx packet with mode %02x after %d ticks.\n",
+ dev->name, inb(ioaddr + RX_MODE), i);
+ }
+}
+
+/* The inverse routine to net_open(). */
+static int net_close(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ netif_stop_queue(dev);
+
+ /* Set configuration register 0 to disable Tx and Rx. */
+ outb(0xda, ioaddr + CONFIG_0);
+
+ /* No statistic counters on the chip to update. */
+
+ /* Disable the IRQ on boards of fmv18x where it is feasible. */
+ if (lp->jumpered)
+ outb(0x00, ioaddr + IOCONFIG1);
+
+ /* Power-down the chip. Green, green, green! */
+ outb(0x00, ioaddr + CONFIG_1);
+ return 0;
+}
+
+/*
+ Set the multicast/promiscuous mode for this adaptor.
+*/
+
+static void
+set_rx_mode(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct net_local *lp = netdev_priv(dev);
+ unsigned char mc_filter[8]; /* Multicast hash filter */
+ unsigned long flags;
+
+ if (dev->flags & IFF_PROMISC) {
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
+ } else if (netdev_mc_count(dev) > MC_FILTERBREAK ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ outb(2, ioaddr + RX_MODE); /* Use normal mode. */
+ } else if (netdev_mc_empty(dev)) {
+ memset(mc_filter, 0x00, sizeof(mc_filter));
+ outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
+ } else {
+ struct netdev_hw_addr *ha;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned int bit =
+ ether_crc_le(ETH_ALEN, ha->addr) >> 26;
+ mc_filter[bit >> 3] |= (1 << bit);
+ }
+ outb(0x02, ioaddr + RX_MODE); /* Use normal mode. */
+ }
+
+ spin_lock_irqsave (&lp->lock, flags);
+ if (memcmp(mc_filter, lp->mc_filter, sizeof(mc_filter))) {
+ int i;
+ int saved_bank = inw(ioaddr + CONFIG_0);
+ /* Switch to bank 1 and set the multicast table. */
+ outw((saved_bank & ~0x0C00) | 0x0480, ioaddr + CONFIG_0);
+ for (i = 0; i < 8; i++)
+ outb(mc_filter[i], ioaddr + PORT_OFFSET(8 + i));
+ memcpy(lp->mc_filter, mc_filter, sizeof(mc_filter));
+ outw(saved_bank, ioaddr + CONFIG_0);
+ }
+ spin_unlock_irqrestore (&lp->lock, flags);
+}
+
+#ifdef MODULE
+static struct net_device *dev_at1700;
+
+module_param(io, int, 0);
+module_param(irq, int, 0);
+module_param(net_debug, int, 0);
+MODULE_PARM_DESC(io, "AT1700/FMV18X I/O base address");
+MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number");
+MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)");
+
+static int __init at1700_module_init(void)
+{
+ if (io == 0)
+ printk("at1700: You should not use auto-probing with insmod!\n");
+ dev_at1700 = at1700_probe(-1);
+ if (IS_ERR(dev_at1700))
+ return PTR_ERR(dev_at1700);
+ return 0;
+}
+
+static void __exit at1700_module_exit(void)
+{
+ unregister_netdev(dev_at1700);
+ cleanup_card(dev_at1700);
+ free_netdev(dev_at1700);
+}
+module_init(at1700_module_init);
+module_exit(at1700_module_exit);
+#endif /* MODULE */
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/fujitsu/eth16i.c b/drivers/net/ethernet/fujitsu/eth16i.c
new file mode 100644
index 000000000000..b0e2313af3d1
--- /dev/null
+++ b/drivers/net/ethernet/fujitsu/eth16i.c
@@ -0,0 +1,1484 @@
+/* eth16i.c An ICL EtherTeam 16i and 32 EISA ethernet driver for Linux
+
+ Written 1994-1999 by Mika Kuoppala
+
+ Copyright (C) 1994-1999 by Mika Kuoppala
+ Based on skeleton.c and heavily on at1700.c by Donald Becker
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as miku@iki.fi
+
+ This driver supports following cards :
+ - ICL EtherTeam 16i
+ - ICL EtherTeam 32 EISA
+ (Uses true 32 bit transfers rather than 16i compatibility mode)
+
+ Example Module usage:
+ insmod eth16i.o io=0x2a0 mediatype=bnc
+
+ mediatype can be one of the following: bnc,tp,dix,auto,eprom
+
+ 'auto' will try to autoprobe mediatype.
+ 'eprom' will use whatever type defined in eprom.
+
+ I have benchmarked driver with PII/300Mhz as a ftp client
+ and 486/33Mhz as a ftp server. Top speed was 1128.37 kilobytes/sec.
+
+ Sources:
+ - skeleton.c a sample network driver core for linux,
+ written by Donald Becker <becker@scyld.com>
+ - at1700.c a driver for Allied Telesis AT1700, written
+ by Donald Becker.
+ - e16iSRV.asm a Netware 3.X Server Driver for ICL EtherTeam16i
+ written by Markku Viima
+ - The Fujitsu MB86965 databook.
+
+ Author thanks following persons due to their valueble assistance:
+ Markku Viima (ICL)
+ Ari Valve (ICL)
+ Donald Becker
+ Kurt Huwig <kurt@huwig.de>
+
+ Revision history:
+
+ Version Date Description
+
+ 0.01 15.12-94 Initial version (card detection)
+ 0.02 23.01-95 Interrupt is now hooked correctly
+ 0.03 01.02-95 Rewrote initialization part
+ 0.04 07.02-95 Base skeleton done...
+ Made a few changes to signature checking
+ to make it a bit reliable.
+ - fixed bug in tx_buf mapping
+ - fixed bug in initialization (DLC_EN
+ wasn't enabled when initialization
+ was done.)
+ 0.05 08.02-95 If there were more than one packet to send,
+ transmit was jammed due to invalid
+ register write...now fixed
+ 0.06 19.02-95 Rewrote interrupt handling
+ 0.07 13.04-95 Wrote EEPROM read routines
+ Card configuration now set according to
+ data read from EEPROM
+ 0.08 23.06-95 Wrote part that tries to probe used interface
+ port if AUTO is selected
+
+ 0.09 01.09-95 Added module support
+
+ 0.10 04.09-95 Fixed receive packet allocation to work
+ with kernels > 1.3.x
+
+ 0.20 20.09-95 Added support for EtherTeam32 EISA
+
+ 0.21 17.10-95 Removed the unnecessary extern
+ init_etherdev() declaration. Some
+ other cleanups.
+
+ 0.22 22.02-96 Receive buffer was not flushed
+ correctly when faulty packet was
+ received. Now fixed.
+
+ 0.23 26.02-96 Made resetting the adapter
+ more reliable.
+
+ 0.24 27.02-96 Rewrote faulty packet handling in eth16i_rx
+
+ 0.25 22.05-96 kfree() was missing from cleanup_module.
+
+ 0.26 11.06-96 Sometimes card was not found by
+ check_signature(). Now made more reliable.
+
+ 0.27 23.06-96 Oops. 16 consecutive collisions halted
+ adapter. Now will try to retransmit
+ MAX_COL_16 times before finally giving up.
+
+ 0.28 28.10-97 Added dev_id parameter (NULL) for free_irq
+
+ 0.29 29.10-97 Multiple card support for module users
+
+ 0.30 30.10-97 Fixed irq allocation bug.
+ (request_irq moved from probe to open)
+
+ 0.30a 21.08-98 Card detection made more relaxed. Driver
+ had problems with some TCP/IP-PROM boots
+ to find the card. Suggested by
+ Kurt Huwig <kurt@huwig.de>
+
+ 0.31 28.08-98 Media interface port can now be selected
+ with module parameters or kernel
+ boot parameters.
+
+ 0.32 31.08-98 IRQ was never freed if open/close
+ pair wasn't called. Now fixed.
+
+ 0.33 10.09-98 When eth16i_open() was called after
+ eth16i_close() chip never recovered.
+ Now more shallow reset is made on
+ close.
+
+ 0.34 29.06-99 Fixed one bad #ifdef.
+ Changed ioaddr -> io for consistency
+
+ 0.35 01.07-99 transmit,-receive bytes were never
+ updated in stats.
+
+ Bugs:
+ In some cases the media interface autoprobing code doesn't find
+ the correct interface type. In this case you can
+ manually choose the interface type in DOS with E16IC.EXE which is
+ configuration software for EtherTeam16i and EtherTeam32 cards.
+ This is also true for IRQ setting. You cannot use module
+ parameter to configure IRQ of the card (yet).
+
+ To do:
+ - Real multicast support
+ - Rewrite the media interface autoprobing code. Its _horrible_ !
+ - Possibly merge all the MB86965 specific code to external
+ module for use by eth16.c and Donald's at1700.c
+ - IRQ configuration with module parameter. I will do
+ this when i will get enough info about setting
+ irq without configuration utility.
+*/
+
+static char *version =
+ "eth16i.c: v0.35 01-Jul-1999 Mika Kuoppala (miku@iki.fi)\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/dma.h>
+
+
+
+/* Few macros */
+#define BITSET(ioaddr, bnum) ((outb(((inb(ioaddr)) | (bnum)), ioaddr)))
+#define BITCLR(ioaddr, bnum) ((outb(((inb(ioaddr)) & (~(bnum))), ioaddr)))
+
+/* This is the I/O address space for Etherteam 16i adapter. */
+#define ETH16I_IO_EXTENT 32
+
+/* Ticks before deciding that transmit has timed out */
+#define TX_TIMEOUT (400*HZ/1000)
+
+/* Maximum loop count when receiving packets */
+#define MAX_RX_LOOP 20
+
+/* Some interrupt masks */
+#define ETH16I_INTR_ON 0xef8a /* Higher is receive mask */
+#define ETH16I_INTR_OFF 0x0000
+
+/* Buffers header status byte meanings */
+#define PKT_GOOD BIT(5)
+#define PKT_GOOD_RMT BIT(4)
+#define PKT_SHORT BIT(3)
+#define PKT_ALIGN_ERR BIT(2)
+#define PKT_CRC_ERR BIT(1)
+#define PKT_RX_BUF_OVERFLOW BIT(0)
+
+/* Transmit status register (DLCR0) */
+#define TX_STATUS_REG 0
+#define TX_DONE BIT(7)
+#define NET_BUSY BIT(6)
+#define TX_PKT_RCD BIT(5)
+#define CR_LOST BIT(4)
+#define TX_JABBER_ERR BIT(3)
+#define COLLISION BIT(2)
+#define COLLISIONS_16 BIT(1)
+
+/* Receive status register (DLCR1) */
+#define RX_STATUS_REG 1
+#define RX_PKT BIT(7) /* Packet received */
+#define BUS_RD_ERR BIT(6)
+#define SHORT_PKT_ERR BIT(3)
+#define ALIGN_ERR BIT(2)
+#define CRC_ERR BIT(1)
+#define RX_BUF_OVERFLOW BIT(0)
+
+/* Transmit Interrupt Enable Register (DLCR2) */
+#define TX_INTR_REG 2
+#define TX_INTR_DONE BIT(7)
+#define TX_INTR_COL BIT(2)
+#define TX_INTR_16_COL BIT(1)
+
+/* Receive Interrupt Enable Register (DLCR3) */
+#define RX_INTR_REG 3
+#define RX_INTR_RECEIVE BIT(7)
+#define RX_INTR_SHORT_PKT BIT(3)
+#define RX_INTR_CRC_ERR BIT(1)
+#define RX_INTR_BUF_OVERFLOW BIT(0)
+
+/* Transmit Mode Register (DLCR4) */
+#define TRANSMIT_MODE_REG 4
+#define LOOPBACK_CONTROL BIT(1)
+#define CONTROL_OUTPUT BIT(2)
+
+/* Receive Mode Register (DLCR5) */
+#define RECEIVE_MODE_REG 5
+#define RX_BUFFER_EMPTY BIT(6)
+#define ACCEPT_BAD_PACKETS BIT(5)
+#define RECEIVE_SHORT_ADDR BIT(4)
+#define ACCEPT_SHORT_PACKETS BIT(3)
+#define REMOTE_RESET BIT(2)
+
+#define ADDRESS_FILTER_MODE BIT(1) | BIT(0)
+#define REJECT_ALL 0
+#define ACCEPT_ALL 3
+#define MODE_1 1 /* NODE ID, BC, MC, 2-24th bit */
+#define MODE_2 2 /* NODE ID, BC, MC, Hash Table */
+
+/* Configuration Register 0 (DLCR6) */
+#define CONFIG_REG_0 6
+#define DLC_EN BIT(7)
+#define SRAM_CYCLE_TIME_100NS BIT(6)
+#define SYSTEM_BUS_WIDTH_8 BIT(5) /* 1 = 8bit, 0 = 16bit */
+#define BUFFER_WIDTH_8 BIT(4) /* 1 = 8bit, 0 = 16bit */
+#define TBS1 BIT(3)
+#define TBS0 BIT(2)
+#define SRAM_BS1 BIT(1) /* 00=8kb, 01=16kb */
+#define SRAM_BS0 BIT(0) /* 10=32kb, 11=64kb */
+
+#ifndef ETH16I_TX_BUF_SIZE /* 0 = 2kb, 1 = 4kb */
+#define ETH16I_TX_BUF_SIZE 3 /* 2 = 8kb, 3 = 16kb */
+#endif
+#define TX_BUF_1x2048 0
+#define TX_BUF_2x2048 1
+#define TX_BUF_2x4098 2
+#define TX_BUF_2x8192 3
+
+/* Configuration Register 1 (DLCR7) */
+#define CONFIG_REG_1 7
+#define POWERUP BIT(5)
+
+/* Transmit start register */
+#define TRANSMIT_START_REG 10
+#define TRANSMIT_START_RB 2
+#define TX_START BIT(7) /* Rest of register bit indicate*/
+ /* number of packets in tx buffer*/
+/* Node ID registers (DLCR8-13) */
+#define NODE_ID_0 8
+#define NODE_ID_RB 0
+
+/* Hash Table registers (HT8-15) */
+#define HASH_TABLE_0 8
+#define HASH_TABLE_RB 1
+
+/* Buffer memory ports */
+#define BUFFER_MEM_PORT_LB 8
+#define DATAPORT BUFFER_MEM_PORT_LB
+#define BUFFER_MEM_PORT_HB 9
+
+/* 16 Collision control register (BMPR11) */
+#define COL_16_REG 11
+#define HALT_ON_16 0x00
+#define RETRANS_AND_HALT_ON_16 0x02
+
+/* Maximum number of attempts to send after 16 concecutive collisions */
+#define MAX_COL_16 10
+
+/* DMA Burst and Transceiver Mode Register (BMPR13) */
+#define TRANSCEIVER_MODE_REG 13
+#define TRANSCEIVER_MODE_RB 2
+#define IO_BASE_UNLOCK BIT(7)
+#define LOWER_SQUELCH_TRESH BIT(6)
+#define LINK_TEST_DISABLE BIT(5)
+#define AUI_SELECT BIT(4)
+#define DIS_AUTO_PORT_SEL BIT(3)
+
+/* Filter Self Receive Register (BMPR14) */
+#define FILTER_SELF_RX_REG 14
+#define SKIP_RX_PACKET BIT(2)
+#define FILTER_SELF_RECEIVE BIT(0)
+
+/* EEPROM Control Register (BMPR 16) */
+#define EEPROM_CTRL_REG 16
+
+/* EEPROM Data Register (BMPR 17) */
+#define EEPROM_DATA_REG 17
+
+/* NMC93CSx6 EEPROM Control Bits */
+#define CS_0 0x00
+#define CS_1 0x20
+#define SK_0 0x00
+#define SK_1 0x40
+#define DI_0 0x00
+#define DI_1 0x80
+
+/* NMC93CSx6 EEPROM Instructions */
+#define EEPROM_READ 0x80
+
+/* NMC93CSx6 EEPROM Addresses */
+#define E_NODEID_0 0x02
+#define E_NODEID_1 0x03
+#define E_NODEID_2 0x04
+#define E_PORT_SELECT 0x14
+ #define E_PORT_BNC 0x00
+ #define E_PORT_DIX 0x01
+ #define E_PORT_TP 0x02
+ #define E_PORT_AUTO 0x03
+ #define E_PORT_FROM_EPROM 0x04
+#define E_PRODUCT_CFG 0x30
+
+
+/* Macro to slow down io between EEPROM clock transitions */
+#define eeprom_slow_io() do { int _i = 40; while(--_i > 0) { inb(0x80); }}while(0)
+
+/* Jumperless Configuration Register (BMPR19) */
+#define JUMPERLESS_CONFIG 19
+
+/* ID ROM registers, writing to them also resets some parts of chip */
+#define ID_ROM_0 24
+#define ID_ROM_7 31
+#define RESET ID_ROM_0
+
+/* This is the I/O address list to be probed when seeking the card */
+static unsigned int eth16i_portlist[] __initdata = {
+ 0x260, 0x280, 0x2A0, 0x240, 0x340, 0x320, 0x380, 0x300, 0
+};
+
+static unsigned int eth32i_portlist[] __initdata = {
+ 0x1000, 0x2000, 0x3000, 0x4000, 0x5000, 0x6000, 0x7000, 0x8000,
+ 0x9000, 0xA000, 0xB000, 0xC000, 0xD000, 0xE000, 0xF000, 0
+};
+
+/* This is the Interrupt lookup table for Eth16i card */
+static unsigned int eth16i_irqmap[] __initdata = { 9, 10, 5, 15, 0 };
+#define NUM_OF_ISA_IRQS 4
+
+/* This is the Interrupt lookup table for Eth32i card */
+static unsigned int eth32i_irqmap[] __initdata = { 3, 5, 7, 9, 10, 11, 12, 15, 0 };
+#define EISA_IRQ_REG 0xc89
+#define NUM_OF_EISA_IRQS 8
+
+static unsigned int eth16i_tx_buf_map[] = { 2048, 2048, 4096, 8192 };
+
+/* Use 0 for production, 1 for verification, >2 for debug */
+#ifndef ETH16I_DEBUG
+#define ETH16I_DEBUG 0
+#endif
+static unsigned int eth16i_debug = ETH16I_DEBUG;
+
+/* Information for each board */
+
+struct eth16i_local {
+ unsigned char tx_started;
+ unsigned char tx_buf_busy;
+ unsigned short tx_queue; /* Number of packets in transmit buffer */
+ unsigned short tx_queue_len;
+ unsigned int tx_buf_size;
+ unsigned long open_time;
+ unsigned long tx_buffered_packets;
+ unsigned long tx_buffered_bytes;
+ unsigned long col_16;
+ spinlock_t lock;
+};
+
+/* Function prototypes */
+
+static int eth16i_probe1(struct net_device *dev, int ioaddr);
+static int eth16i_check_signature(int ioaddr);
+static int eth16i_probe_port(int ioaddr);
+static void eth16i_set_port(int ioaddr, int porttype);
+static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l);
+static int eth16i_receive_probe_packet(int ioaddr);
+static int eth16i_get_irq(int ioaddr);
+static int eth16i_read_eeprom(int ioaddr, int offset);
+static int eth16i_read_eeprom_word(int ioaddr);
+static void eth16i_eeprom_cmd(int ioaddr, unsigned char command);
+static int eth16i_open(struct net_device *dev);
+static int eth16i_close(struct net_device *dev);
+static netdev_tx_t eth16i_tx(struct sk_buff *skb, struct net_device *dev);
+static void eth16i_rx(struct net_device *dev);
+static void eth16i_timeout(struct net_device *dev);
+static irqreturn_t eth16i_interrupt(int irq, void *dev_id);
+static void eth16i_reset(struct net_device *dev);
+static void eth16i_timeout(struct net_device *dev);
+static void eth16i_skip_packet(struct net_device *dev);
+static void eth16i_multicast(struct net_device *dev);
+static void eth16i_select_regbank(unsigned char regbank, int ioaddr);
+static void eth16i_initialize(struct net_device *dev, int boot);
+
+#if 0
+static int eth16i_set_irq(struct net_device *dev);
+#endif
+
+#ifdef MODULE
+static ushort eth16i_parse_mediatype(const char* s);
+#endif
+
+static char cardname[] __initdata = "ICL EtherTeam 16i/32";
+
+static int __init do_eth16i_probe(struct net_device *dev)
+{
+ int i;
+ int ioaddr;
+ int base_addr = dev->base_addr;
+
+ if(eth16i_debug > 4)
+ printk(KERN_DEBUG "Probing started for %s\n", cardname);
+
+ if(base_addr > 0x1ff) /* Check only single location */
+ return eth16i_probe1(dev, base_addr);
+ else if(base_addr != 0) /* Don't probe at all */
+ return -ENXIO;
+
+ /* Seek card from the ISA io address space */
+ for(i = 0; (ioaddr = eth16i_portlist[i]) ; i++)
+ if(eth16i_probe1(dev, ioaddr) == 0)
+ return 0;
+
+ /* Seek card from the EISA io address space */
+ for(i = 0; (ioaddr = eth32i_portlist[i]) ; i++)
+ if(eth16i_probe1(dev, ioaddr) == 0)
+ return 0;
+
+ return -ENODEV;
+}
+
+#ifndef MODULE
+struct net_device * __init eth16i_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct eth16i_local));
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_eth16i_probe(dev);
+ if (err)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static const struct net_device_ops eth16i_netdev_ops = {
+ .ndo_open = eth16i_open,
+ .ndo_stop = eth16i_close,
+ .ndo_start_xmit = eth16i_tx,
+ .ndo_set_rx_mode = eth16i_multicast,
+ .ndo_tx_timeout = eth16i_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __init eth16i_probe1(struct net_device *dev, int ioaddr)
+{
+ struct eth16i_local *lp = netdev_priv(dev);
+ static unsigned version_printed;
+ int retval;
+
+ /* Let's grab the region */
+ if (!request_region(ioaddr, ETH16I_IO_EXTENT, cardname))
+ return -EBUSY;
+
+ /*
+ The MB86985 chip has on register which holds information in which
+ io address the chip lies. First read this register and compare
+ it to our current io address and if match then this could
+ be our chip.
+ */
+
+ if(ioaddr < 0x1000) {
+ if(eth16i_portlist[(inb(ioaddr + JUMPERLESS_CONFIG) & 0x07)]
+ != ioaddr) {
+ retval = -ENODEV;
+ goto out;
+ }
+ }
+
+ /* Now we will go a bit deeper and try to find the chip's signature */
+
+ if(eth16i_check_signature(ioaddr) != 0) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ /*
+ Now it seems that we have found a ethernet chip in this particular
+ ioaddr. The MB86985 chip has this feature, that when you read a
+ certain register it will increase it's io base address to next
+ configurable slot. Now when we have found the chip, first thing is
+ to make sure that the chip's ioaddr will hold still here.
+ */
+
+ eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
+ outb(0x00, ioaddr + TRANSCEIVER_MODE_REG);
+
+ outb(0x00, ioaddr + RESET); /* Reset some parts of chip */
+ BITSET(ioaddr + CONFIG_REG_0, BIT(7)); /* Disable the data link */
+
+ if( (eth16i_debug & version_printed++) == 0)
+ printk(KERN_INFO "%s", version);
+
+ dev->base_addr = ioaddr;
+ dev->irq = eth16i_get_irq(ioaddr);
+
+ /* Try to obtain interrupt vector */
+
+ if ((retval = request_irq(dev->irq, (void *)&eth16i_interrupt, 0, cardname, dev))) {
+ printk(KERN_WARNING "%s at %#3x, but is unusable due to conflicting IRQ %d.\n",
+ cardname, ioaddr, dev->irq);
+ goto out;
+ }
+
+ printk(KERN_INFO "%s: %s at %#3x, IRQ %d, ",
+ dev->name, cardname, ioaddr, dev->irq);
+
+
+ /* Now we will have to lock the chip's io address */
+ eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
+ outb(0x38, ioaddr + TRANSCEIVER_MODE_REG);
+
+ eth16i_initialize(dev, 1); /* Initialize rest of the chip's registers */
+
+ /* Now let's same some energy by shutting down the chip ;) */
+ BITCLR(ioaddr + CONFIG_REG_1, POWERUP);
+
+ /* Initialize the device structure */
+ dev->netdev_ops = &eth16i_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ spin_lock_init(&lp->lock);
+
+ retval = register_netdev(dev);
+ if (retval)
+ goto out1;
+ return 0;
+out1:
+ free_irq(dev->irq, dev);
+out:
+ release_region(ioaddr, ETH16I_IO_EXTENT);
+ return retval;
+}
+
+
+static void eth16i_initialize(struct net_device *dev, int boot)
+{
+ int ioaddr = dev->base_addr;
+ int i, node_w = 0;
+ unsigned char node_byte = 0;
+
+ /* Setup station address */
+ eth16i_select_regbank(NODE_ID_RB, ioaddr);
+ for(i = 0 ; i < 3 ; i++) {
+ unsigned short node_val = eth16i_read_eeprom(ioaddr, E_NODEID_0 + i);
+ ((unsigned short *)dev->dev_addr)[i] = ntohs(node_val);
+ }
+
+ for(i = 0; i < 6; i++) {
+ outb( ((unsigned char *)dev->dev_addr)[i], ioaddr + NODE_ID_0 + i);
+ if(boot) {
+ printk("%02x", inb(ioaddr + NODE_ID_0 + i));
+ if(i != 5)
+ printk(":");
+ }
+ }
+
+ /* Now we will set multicast addresses to accept none */
+ eth16i_select_regbank(HASH_TABLE_RB, ioaddr);
+ for(i = 0; i < 8; i++)
+ outb(0x00, ioaddr + HASH_TABLE_0 + i);
+
+ /*
+ Now let's disable the transmitter and receiver, set the buffer ram
+ cycle time, bus width and buffer data path width. Also we shall
+ set transmit buffer size and total buffer size.
+ */
+
+ eth16i_select_regbank(2, ioaddr);
+
+ node_byte = 0;
+ node_w = eth16i_read_eeprom(ioaddr, E_PRODUCT_CFG);
+
+ if( (node_w & 0xFF00) == 0x0800)
+ node_byte |= BUFFER_WIDTH_8;
+
+ node_byte |= SRAM_BS1;
+
+ if( (node_w & 0x00FF) == 64)
+ node_byte |= SRAM_BS0;
+
+ node_byte |= DLC_EN | SRAM_CYCLE_TIME_100NS | (ETH16I_TX_BUF_SIZE << 2);
+
+ outb(node_byte, ioaddr + CONFIG_REG_0);
+
+ /* We shall halt the transmitting, if 16 collisions are detected */
+ outb(HALT_ON_16, ioaddr + COL_16_REG);
+
+#ifdef MODULE
+ /* if_port already set by init_module() */
+#else
+ dev->if_port = (dev->mem_start < E_PORT_FROM_EPROM) ?
+ dev->mem_start : E_PORT_FROM_EPROM;
+#endif
+
+ /* Set interface port type */
+ if(boot) {
+ static const char * const porttype[] = {
+ "BNC", "DIX", "TP", "AUTO", "FROM_EPROM"
+ };
+
+ switch(dev->if_port)
+ {
+
+ case E_PORT_FROM_EPROM:
+ dev->if_port = eth16i_read_eeprom(ioaddr, E_PORT_SELECT);
+ break;
+
+ case E_PORT_AUTO:
+ dev->if_port = eth16i_probe_port(ioaddr);
+ break;
+
+ case E_PORT_BNC:
+ case E_PORT_TP:
+ case E_PORT_DIX:
+ break;
+ }
+
+ printk(" %s interface.\n", porttype[dev->if_port]);
+
+ eth16i_set_port(ioaddr, dev->if_port);
+ }
+
+ /* Set Receive Mode to normal operation */
+ outb(MODE_2, ioaddr + RECEIVE_MODE_REG);
+}
+
+static int eth16i_probe_port(int ioaddr)
+{
+ int i;
+ int retcode;
+ unsigned char dummy_packet[64];
+
+ /* Powerup the chip */
+ outb(0xc0 | POWERUP, ioaddr + CONFIG_REG_1);
+
+ BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
+
+ eth16i_select_regbank(NODE_ID_RB, ioaddr);
+
+ for(i = 0; i < 6; i++) {
+ dummy_packet[i] = inb(ioaddr + NODE_ID_0 + i);
+ dummy_packet[i+6] = inb(ioaddr + NODE_ID_0 + i);
+ }
+
+ dummy_packet[12] = 0x00;
+ dummy_packet[13] = 0x04;
+ memset(dummy_packet + 14, 0, sizeof(dummy_packet) - 14);
+
+ eth16i_select_regbank(2, ioaddr);
+
+ for(i = 0; i < 3; i++) {
+ BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
+ BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
+ eth16i_set_port(ioaddr, i);
+
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "Set port number %d\n", i);
+
+ retcode = eth16i_send_probe_packet(ioaddr, dummy_packet, 64);
+ if(retcode == 0) {
+ retcode = eth16i_receive_probe_packet(ioaddr);
+ if(retcode != -1) {
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "Eth16i interface port found at %d\n", i);
+ return i;
+ }
+ }
+ else {
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "TRANSMIT_DONE timeout when probing interface port\n");
+ }
+ }
+
+ if( eth16i_debug > 1)
+ printk(KERN_DEBUG "Using default port\n");
+
+ return E_PORT_BNC;
+}
+
+static void eth16i_set_port(int ioaddr, int porttype)
+{
+ unsigned short temp = 0;
+
+ eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
+ outb(LOOPBACK_CONTROL, ioaddr + TRANSMIT_MODE_REG);
+
+ temp |= DIS_AUTO_PORT_SEL;
+
+ switch(porttype) {
+
+ case E_PORT_BNC :
+ temp |= AUI_SELECT;
+ break;
+
+ case E_PORT_TP :
+ break;
+
+ case E_PORT_DIX :
+ temp |= AUI_SELECT;
+ BITSET(ioaddr + TRANSMIT_MODE_REG, CONTROL_OUTPUT);
+ break;
+ }
+
+ outb(temp, ioaddr + TRANSCEIVER_MODE_REG);
+
+ if(eth16i_debug > 1) {
+ printk(KERN_DEBUG "TRANSMIT_MODE_REG = %x\n", inb(ioaddr + TRANSMIT_MODE_REG));
+ printk(KERN_DEBUG "TRANSCEIVER_MODE_REG = %x\n",
+ inb(ioaddr+TRANSCEIVER_MODE_REG));
+ }
+}
+
+static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l)
+{
+ unsigned long starttime;
+
+ outb(0xff, ioaddr + TX_STATUS_REG);
+
+ outw(l, ioaddr + DATAPORT);
+ outsw(ioaddr + DATAPORT, (unsigned short *)b, (l + 1) >> 1);
+
+ starttime = jiffies;
+ outb(TX_START | 1, ioaddr + TRANSMIT_START_REG);
+
+ while( (inb(ioaddr + TX_STATUS_REG) & 0x80) == 0) {
+ if( time_after(jiffies, starttime + TX_TIMEOUT)) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int eth16i_receive_probe_packet(int ioaddr)
+{
+ unsigned long starttime;
+
+ starttime = jiffies;
+
+ while((inb(ioaddr + TX_STATUS_REG) & 0x20) == 0) {
+ if( time_after(jiffies, starttime + TX_TIMEOUT)) {
+
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "Timeout occurred waiting transmit packet received\n");
+ starttime = jiffies;
+ while((inb(ioaddr + RX_STATUS_REG) & 0x80) == 0) {
+ if( time_after(jiffies, starttime + TX_TIMEOUT)) {
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "Timeout occurred waiting receive packet\n");
+ return -1;
+ }
+ }
+
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "RECEIVE_PACKET\n");
+ return 0; /* Found receive packet */
+ }
+ }
+
+ if(eth16i_debug > 1) {
+ printk(KERN_DEBUG "TRANSMIT_PACKET_RECEIVED %x\n", inb(ioaddr + TX_STATUS_REG));
+ printk(KERN_DEBUG "RX_STATUS_REG = %x\n", inb(ioaddr + RX_STATUS_REG));
+ }
+
+ return 0; /* Return success */
+}
+
+#if 0
+static int eth16i_set_irq(struct net_device* dev)
+{
+ const int ioaddr = dev->base_addr;
+ const int irq = dev->irq;
+ int i = 0;
+
+ if(ioaddr < 0x1000) {
+ while(eth16i_irqmap[i] && eth16i_irqmap[i] != irq)
+ i++;
+
+ if(i < NUM_OF_ISA_IRQS) {
+ u8 cbyte = inb(ioaddr + JUMPERLESS_CONFIG);
+ cbyte = (cbyte & 0x3F) | (i << 6);
+ outb(cbyte, ioaddr + JUMPERLESS_CONFIG);
+ return 0;
+ }
+ }
+ else {
+ printk(KERN_NOTICE "%s: EISA Interrupt cannot be set. Use EISA Configuration utility.\n", dev->name);
+ }
+
+ return -1;
+
+}
+#endif
+
+static int __init eth16i_get_irq(int ioaddr)
+{
+ unsigned char cbyte;
+
+ if( ioaddr < 0x1000) {
+ cbyte = inb(ioaddr + JUMPERLESS_CONFIG);
+ return eth16i_irqmap[((cbyte & 0xC0) >> 6)];
+ } else { /* Oh..the card is EISA so method getting IRQ different */
+ unsigned short index = 0;
+ cbyte = inb(ioaddr + EISA_IRQ_REG);
+ while( (cbyte & 0x01) == 0) {
+ cbyte = cbyte >> 1;
+ index++;
+ }
+ return eth32i_irqmap[index];
+ }
+}
+
+static int __init eth16i_check_signature(int ioaddr)
+{
+ int i;
+ unsigned char creg[4] = { 0 };
+
+ for(i = 0; i < 4 ; i++) {
+
+ creg[i] = inb(ioaddr + TRANSMIT_MODE_REG + i);
+
+ if(eth16i_debug > 1)
+ printk("eth16i: read signature byte %x at %x\n",
+ creg[i],
+ ioaddr + TRANSMIT_MODE_REG + i);
+ }
+
+ creg[0] &= 0x0F; /* Mask collision cnr */
+ creg[2] &= 0x7F; /* Mask DCLEN bit */
+
+#if 0
+ /*
+ This was removed because the card was sometimes left to state
+ from which it couldn't be find anymore. If there is need
+ to more strict check still this have to be fixed.
+ */
+ if( ! ((creg[0] == 0x06) && (creg[1] == 0x41)) ) {
+ if(creg[1] != 0x42)
+ return -1;
+ }
+#endif
+
+ if( !((creg[2] == 0x36) && (creg[3] == 0xE0)) ) {
+ creg[2] &= 0x40;
+ creg[3] &= 0x03;
+
+ if( !((creg[2] == 0x40) && (creg[3] == 0x00)) )
+ return -1;
+ }
+
+ if(eth16i_read_eeprom(ioaddr, E_NODEID_0) != 0)
+ return -1;
+
+ if((eth16i_read_eeprom(ioaddr, E_NODEID_1) & 0xFF00) != 0x4B00)
+ return -1;
+
+ return 0;
+}
+
+static int eth16i_read_eeprom(int ioaddr, int offset)
+{
+ int data = 0;
+
+ eth16i_eeprom_cmd(ioaddr, EEPROM_READ | offset);
+ outb(CS_1, ioaddr + EEPROM_CTRL_REG);
+ data = eth16i_read_eeprom_word(ioaddr);
+ outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG);
+
+ return data;
+}
+
+static int eth16i_read_eeprom_word(int ioaddr)
+{
+ int i;
+ int data = 0;
+
+ for(i = 16; i > 0; i--) {
+ outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
+ eeprom_slow_io();
+ outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
+ eeprom_slow_io();
+ data = (data << 1) |
+ ((inb(ioaddr + EEPROM_DATA_REG) & DI_1) ? 1 : 0);
+
+ eeprom_slow_io();
+ }
+
+ return data;
+}
+
+static void eth16i_eeprom_cmd(int ioaddr, unsigned char command)
+{
+ int i;
+
+ outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG);
+ outb(DI_0, ioaddr + EEPROM_DATA_REG);
+ outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
+ outb(DI_1, ioaddr + EEPROM_DATA_REG);
+ outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
+
+ for(i = 7; i >= 0; i--) {
+ short cmd = ( (command & (1 << i)) ? DI_1 : DI_0 );
+ outb(cmd, ioaddr + EEPROM_DATA_REG);
+ outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
+ eeprom_slow_io();
+ outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
+ eeprom_slow_io();
+ }
+}
+
+static int eth16i_open(struct net_device *dev)
+{
+ struct eth16i_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ /* Powerup the chip */
+ outb(0xc0 | POWERUP, ioaddr + CONFIG_REG_1);
+
+ /* Initialize the chip */
+ eth16i_initialize(dev, 0);
+
+ /* Set the transmit buffer size */
+ lp->tx_buf_size = eth16i_tx_buf_map[ETH16I_TX_BUF_SIZE & 0x03];
+
+ if(eth16i_debug > 0)
+ printk(KERN_DEBUG "%s: transmit buffer size %d\n",
+ dev->name, lp->tx_buf_size);
+
+ /* Now enable Transmitter and Receiver sections */
+ BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
+
+ /* Now switch to register bank 2, for run time operation */
+ eth16i_select_regbank(2, ioaddr);
+
+ lp->open_time = jiffies;
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+
+ /* Turn on interrupts*/
+ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int eth16i_close(struct net_device *dev)
+{
+ struct eth16i_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ eth16i_reset(dev);
+
+ /* Turn off interrupts*/
+ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
+
+ netif_stop_queue(dev);
+
+ lp->open_time = 0;
+
+ /* Disable transmit and receive */
+ BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
+
+ /* Reset the chip */
+ /* outb(0xff, ioaddr + RESET); */
+ /* outw(0xffff, ioaddr + TX_STATUS_REG); */
+
+ outb(0x00, ioaddr + CONFIG_REG_1);
+
+ return 0;
+}
+
+static void eth16i_timeout(struct net_device *dev)
+{
+ struct eth16i_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ /*
+ If we get here, some higher level has decided that
+ we are broken. There should really be a "kick me"
+ function call instead.
+ */
+
+ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
+ printk(KERN_WARNING "%s: transmit timed out with status %04x, %s ?\n",
+ dev->name,
+ inw(ioaddr + TX_STATUS_REG), (inb(ioaddr + TX_STATUS_REG) & TX_DONE) ?
+ "IRQ conflict" : "network cable problem");
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+
+ /* Let's dump all registers */
+ if(eth16i_debug > 0) {
+ printk(KERN_DEBUG "%s: timeout: %02x %02x %02x %02x %02x %02x %02x %02x.\n",
+ dev->name, inb(ioaddr + 0),
+ inb(ioaddr + 1), inb(ioaddr + 2),
+ inb(ioaddr + 3), inb(ioaddr + 4),
+ inb(ioaddr + 5),
+ inb(ioaddr + 6), inb(ioaddr + 7));
+
+ printk(KERN_DEBUG "%s: transmit start reg: %02x. collision reg %02x\n",
+ dev->name, inb(ioaddr + TRANSMIT_START_REG),
+ inb(ioaddr + COL_16_REG));
+ printk(KERN_DEBUG "lp->tx_queue = %d\n", lp->tx_queue);
+ printk(KERN_DEBUG "lp->tx_queue_len = %d\n", lp->tx_queue_len);
+ printk(KERN_DEBUG "lp->tx_started = %d\n", lp->tx_started);
+ }
+ dev->stats.tx_errors++;
+ eth16i_reset(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
+ netif_wake_queue(dev);
+}
+
+static netdev_tx_t eth16i_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct eth16i_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int status = 0;
+ ushort length = skb->len;
+ unsigned char *buf;
+ unsigned long flags;
+
+ if (length < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ length = ETH_ZLEN;
+ }
+ buf = skb->data;
+
+ netif_stop_queue(dev);
+
+ /* Turn off TX interrupts */
+ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
+
+ /* We would be better doing the disable_irq tricks the 3c509 does,
+ that would make this suck a lot less */
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ if( (length + 2) > (lp->tx_buf_size - lp->tx_queue_len)) {
+ if(eth16i_debug > 0)
+ printk(KERN_WARNING "%s: Transmit buffer full.\n", dev->name);
+ }
+ else {
+ outw(length, ioaddr + DATAPORT);
+
+ if( ioaddr < 0x1000 )
+ outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
+ else {
+ unsigned char frag = length % 4;
+ outsl(ioaddr + DATAPORT, buf, length >> 2);
+ if( frag != 0 ) {
+ outsw(ioaddr + DATAPORT, (buf + (length & 0xFFFC)), 1);
+ if( frag == 3 )
+ outsw(ioaddr + DATAPORT,
+ (buf + (length & 0xFFFC) + 2), 1);
+ }
+ }
+ lp->tx_buffered_packets++;
+ lp->tx_buffered_bytes = length;
+ lp->tx_queue++;
+ lp->tx_queue_len += length + 2;
+ }
+ lp->tx_buf_busy = 0;
+
+ if(lp->tx_started == 0) {
+ /* If the transmitter is idle..always trigger a transmit */
+ outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ lp->tx_started = 1;
+ netif_wake_queue(dev);
+ }
+ else if(lp->tx_queue_len < lp->tx_buf_size - (ETH_FRAME_LEN + 2)) {
+ /* There is still more room for one more packet in tx buffer */
+ netif_wake_queue(dev);
+ }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
+ /* Turn TX interrupts back on */
+ /* outb(TX_INTR_DONE | TX_INTR_16_COL, ioaddr + TX_INTR_REG); */
+ status = 0;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static void eth16i_rx(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ int boguscount = MAX_RX_LOOP;
+
+ /* Loop until all packets have been read */
+ while( (inb(ioaddr + RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) == 0) {
+
+ /* Read status byte from receive buffer */
+ ushort status = inw(ioaddr + DATAPORT);
+
+ /* Get the size of the packet from receive buffer */
+ ushort pkt_len = inw(ioaddr + DATAPORT);
+
+ if(eth16i_debug > 4)
+ printk(KERN_DEBUG "%s: Receiving packet mode %02x status %04x.\n",
+ dev->name,
+ inb(ioaddr + RECEIVE_MODE_REG), status);
+
+ if( !(status & PKT_GOOD) ) {
+ dev->stats.rx_errors++;
+
+ if( (pkt_len < ETH_ZLEN) || (pkt_len > ETH_FRAME_LEN) ) {
+ dev->stats.rx_length_errors++;
+ eth16i_reset(dev);
+ return;
+ }
+ else {
+ eth16i_skip_packet(dev);
+ dev->stats.rx_dropped++;
+ }
+ }
+ else { /* Ok so now we should have a good packet */
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len + 3);
+ if( skb == NULL ) {
+ printk(KERN_WARNING "%s: Could'n allocate memory for packet (len %d)\n",
+ dev->name, pkt_len);
+ eth16i_skip_packet(dev);
+ dev->stats.rx_dropped++;
+ break;
+ }
+
+ skb_reserve(skb,2);
+
+ /*
+ Now let's get the packet out of buffer.
+ size is (pkt_len + 1) >> 1, cause we are now reading words
+ and it have to be even aligned.
+ */
+
+ if(ioaddr < 0x1000)
+ insw(ioaddr + DATAPORT, skb_put(skb, pkt_len),
+ (pkt_len + 1) >> 1);
+ else {
+ unsigned char *buf = skb_put(skb, pkt_len);
+ unsigned char frag = pkt_len % 4;
+
+ insl(ioaddr + DATAPORT, buf, pkt_len >> 2);
+
+ if(frag != 0) {
+ unsigned short rest[2];
+ rest[0] = inw( ioaddr + DATAPORT );
+ if(frag == 3)
+ rest[1] = inw( ioaddr + DATAPORT );
+
+ memcpy(buf + (pkt_len & 0xfffc), (char *)rest, frag);
+ }
+ }
+
+ skb->protocol=eth_type_trans(skb, dev);
+
+ if( eth16i_debug > 5 ) {
+ int i;
+ printk(KERN_DEBUG "%s: Received packet of length %d.\n",
+ dev->name, pkt_len);
+ for(i = 0; i < 14; i++)
+ printk(KERN_DEBUG " %02x", skb->data[i]);
+ printk(KERN_DEBUG ".\n");
+ }
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+
+ } /* else */
+
+ if(--boguscount <= 0)
+ break;
+
+ } /* while */
+}
+
+static irqreturn_t eth16i_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct eth16i_local *lp;
+ int ioaddr = 0, status;
+ int handled = 0;
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ /* Turn off all interrupts from adapter */
+ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
+
+ /* eth16i_tx won't be called */
+ spin_lock(&lp->lock);
+
+ status = inw(ioaddr + TX_STATUS_REG); /* Get the status */
+ outw(status, ioaddr + TX_STATUS_REG); /* Clear status bits */
+
+ if (status)
+ handled = 1;
+
+ if(eth16i_debug > 3)
+ printk(KERN_DEBUG "%s: Interrupt with status %04x.\n", dev->name, status);
+
+ if( status & 0x7f00 ) {
+
+ dev->stats.rx_errors++;
+
+ if(status & (BUS_RD_ERR << 8) )
+ printk(KERN_WARNING "%s: Bus read error.\n",dev->name);
+ if(status & (SHORT_PKT_ERR << 8) ) dev->stats.rx_length_errors++;
+ if(status & (ALIGN_ERR << 8) ) dev->stats.rx_frame_errors++;
+ if(status & (CRC_ERR << 8) ) dev->stats.rx_crc_errors++;
+ if(status & (RX_BUF_OVERFLOW << 8) ) dev->stats.rx_over_errors++;
+ }
+ if( status & 0x001a) {
+
+ dev->stats.tx_errors++;
+
+ if(status & CR_LOST) dev->stats.tx_carrier_errors++;
+ if(status & TX_JABBER_ERR) dev->stats.tx_window_errors++;
+
+#if 0
+ if(status & COLLISION) {
+ dev->stats.collisions +=
+ ((inb(ioaddr+TRANSMIT_MODE_REG) & 0xF0) >> 4);
+ }
+#endif
+ if(status & COLLISIONS_16) {
+ if(lp->col_16 < MAX_COL_16) {
+ lp->col_16++;
+ dev->stats.collisions++;
+ /* Resume transmitting, skip failed packet */
+ outb(0x02, ioaddr + COL_16_REG);
+ }
+ else {
+ printk(KERN_WARNING "%s: bailing out due to many consecutive 16-in-a-row collisions. Network cable problem?\n", dev->name);
+ }
+ }
+ }
+
+ if( status & 0x00ff ) { /* Let's check the transmit status reg */
+
+ if(status & TX_DONE) { /* The transmit has been done */
+ dev->stats.tx_packets = lp->tx_buffered_packets;
+ dev->stats.tx_bytes += lp->tx_buffered_bytes;
+ lp->col_16 = 0;
+
+ if(lp->tx_queue) { /* Is there still packets ? */
+ /* There was packet(s) so start transmitting and write also
+ how many packets there is to be sended */
+ outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ lp->tx_started = 1;
+ }
+ else {
+ lp->tx_started = 0;
+ }
+ netif_wake_queue(dev);
+ }
+ }
+
+ if( ( status & 0x8000 ) ||
+ ( (inb(ioaddr + RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) == 0) ) {
+ eth16i_rx(dev); /* We have packet in receive buffer */
+ }
+
+ /* Turn interrupts back on */
+ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
+
+ if(lp->tx_queue_len < lp->tx_buf_size - (ETH_FRAME_LEN + 2)) {
+ /* There is still more room for one more packet in tx buffer */
+ netif_wake_queue(dev);
+ }
+
+ spin_unlock(&lp->lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+static void eth16i_skip_packet(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ inw(ioaddr + DATAPORT);
+ inw(ioaddr + DATAPORT);
+ inw(ioaddr + DATAPORT);
+
+ outb(SKIP_RX_PACKET, ioaddr + FILTER_SELF_RX_REG);
+ while( inb( ioaddr + FILTER_SELF_RX_REG ) != 0);
+}
+
+static void eth16i_reset(struct net_device *dev)
+{
+ struct eth16i_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "%s: Resetting device.\n", dev->name);
+
+ BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
+ outw(0xffff, ioaddr + TX_STATUS_REG);
+ eth16i_select_regbank(2, ioaddr);
+
+ lp->tx_started = 0;
+ lp->tx_buf_busy = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
+}
+
+static void eth16i_multicast(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ {
+ outb(3, ioaddr + RECEIVE_MODE_REG);
+ } else {
+ outb(2, ioaddr + RECEIVE_MODE_REG);
+ }
+}
+
+static void eth16i_select_regbank(unsigned char banknbr, int ioaddr)
+{
+ unsigned char data;
+
+ data = inb(ioaddr + CONFIG_REG_1);
+ outb( ((data & 0xF3) | ( (banknbr & 0x03) << 2)), ioaddr + CONFIG_REG_1);
+}
+
+#ifdef MODULE
+
+static ushort eth16i_parse_mediatype(const char* s)
+{
+ if(!s)
+ return E_PORT_FROM_EPROM;
+
+ if (!strncmp(s, "bnc", 3))
+ return E_PORT_BNC;
+ else if (!strncmp(s, "tp", 2))
+ return E_PORT_TP;
+ else if (!strncmp(s, "dix", 3))
+ return E_PORT_DIX;
+ else if (!strncmp(s, "auto", 4))
+ return E_PORT_AUTO;
+ else
+ return E_PORT_FROM_EPROM;
+}
+
+#define MAX_ETH16I_CARDS 4 /* Max number of Eth16i cards per module */
+
+static struct net_device *dev_eth16i[MAX_ETH16I_CARDS];
+static int io[MAX_ETH16I_CARDS];
+#if 0
+static int irq[MAX_ETH16I_CARDS];
+#endif
+static char* mediatype[MAX_ETH16I_CARDS];
+static int debug = -1;
+
+MODULE_AUTHOR("Mika Kuoppala <miku@iki.fi>");
+MODULE_DESCRIPTION("ICL EtherTeam 16i/32 driver");
+MODULE_LICENSE("GPL");
+
+
+module_param_array(io, int, NULL, 0);
+MODULE_PARM_DESC(io, "eth16i I/O base address(es)");
+
+#if 0
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(irq, "eth16i interrupt request number");
+#endif
+
+module_param_array(mediatype, charp, NULL, 0);
+MODULE_PARM_DESC(mediatype, "eth16i media type of interface(s) (bnc,tp,dix,auto,eprom)");
+
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "eth16i debug level (0-6)");
+
+int __init init_module(void)
+{
+ int this_dev, found = 0;
+ struct net_device *dev;
+
+ for (this_dev = 0; this_dev < MAX_ETH16I_CARDS; this_dev++) {
+ dev = alloc_etherdev(sizeof(struct eth16i_local));
+ if (!dev)
+ break;
+
+ dev->base_addr = io[this_dev];
+
+ if(debug != -1)
+ eth16i_debug = debug;
+
+ if(eth16i_debug > 1)
+ printk(KERN_NOTICE "eth16i(%d): interface type %s\n", this_dev, mediatype[this_dev] ? mediatype[this_dev] : "none" );
+
+ dev->if_port = eth16i_parse_mediatype(mediatype[this_dev]);
+
+ if(io[this_dev] == 0) {
+ if (this_dev != 0) { /* Only autoprobe 1st one */
+ free_netdev(dev);
+ break;
+ }
+
+ printk(KERN_NOTICE "eth16i.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+
+ if (do_eth16i_probe(dev) == 0) {
+ dev_eth16i[found++] = dev;
+ continue;
+ }
+ printk(KERN_WARNING "eth16i.c No Eth16i card found (i/o = 0x%x).\n",
+ io[this_dev]);
+ free_netdev(dev);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+void __exit cleanup_module(void)
+{
+ int this_dev;
+
+ for(this_dev = 0; this_dev < MAX_ETH16I_CARDS; this_dev++) {
+ struct net_device *dev = dev_eth16i[this_dev];
+
+ if (netdev_priv(dev)) {
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, ETH16I_IO_EXTENT);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
new file mode 100644
index 000000000000..15416752c13e
--- /dev/null
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -0,0 +1,1187 @@
+/*======================================================================
+ fmvj18x_cs.c 2.8 2002/03/23
+
+ A fmvj18x (and its compatibles) PCMCIA client driver
+
+ Contributed by Shingo Fujimoto, shingo@flab.fujitsu.co.jp
+
+ TDK LAK-CD021 and CONTEC C-NET(PC)C support added by
+ Nobuhiro Katayama, kata-n@po.iijnet.or.jp
+
+ The PCMCIA client code is based on code written by David Hinds.
+ Network code is based on the "FMV-18x driver" by Yutaka TAMIYA
+ but is actually largely Donald Becker's AT1700 driver, which
+ carries the following attribution:
+
+ Written 1993-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+======================================================================*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DRV_NAME "fmvj18x_cs"
+#define DRV_VERSION "2.9"
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/crc32.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_DESCRIPTION("fmvj18x and compatible PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
+
+/* SRAM configuration */
+/* 0:4KB*2 TX buffer else:8KB*2 TX buffer */
+INT_MODULE_PARM(sram_config, 0);
+
+
+/*====================================================================*/
+/*
+ PCMCIA event handlers
+ */
+static int fmvj18x_config(struct pcmcia_device *link);
+static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id);
+static int fmvj18x_setup_mfc(struct pcmcia_device *link);
+static void fmvj18x_release(struct pcmcia_device *link);
+static void fmvj18x_detach(struct pcmcia_device *p_dev);
+
+/*
+ LAN controller(MBH86960A) specific routines
+ */
+static int fjn_config(struct net_device *dev, struct ifmap *map);
+static int fjn_open(struct net_device *dev);
+static int fjn_close(struct net_device *dev);
+static netdev_tx_t fjn_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t fjn_interrupt(int irq, void *dev_id);
+static void fjn_rx(struct net_device *dev);
+static void fjn_reset(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static void fjn_tx_timeout(struct net_device *dev);
+static const struct ethtool_ops netdev_ethtool_ops;
+
+/*
+ card type
+ */
+typedef enum { MBH10302, MBH10304, TDK, CONTEC, LA501, UNGERMANN,
+ XXX10304, NEC, KME
+} cardtype_t;
+
+/*
+ driver specific data structure
+*/
+typedef struct local_info_t {
+ struct pcmcia_device *p_dev;
+ long open_time;
+ uint tx_started:1;
+ uint tx_queue;
+ u_short tx_queue_len;
+ cardtype_t cardtype;
+ u_short sent;
+ u_char __iomem *base;
+} local_info_t;
+
+#define MC_FILTERBREAK 64
+
+/*====================================================================*/
+/*
+ ioport offset from the base address
+ */
+#define TX_STATUS 0 /* transmit status register */
+#define RX_STATUS 1 /* receive status register */
+#define TX_INTR 2 /* transmit interrupt mask register */
+#define RX_INTR 3 /* receive interrupt mask register */
+#define TX_MODE 4 /* transmit mode register */
+#define RX_MODE 5 /* receive mode register */
+#define CONFIG_0 6 /* configuration register 0 */
+#define CONFIG_1 7 /* configuration register 1 */
+
+#define NODE_ID 8 /* node ID register (bank 0) */
+#define MAR_ADR 8 /* multicast address registers (bank 1) */
+
+#define DATAPORT 8 /* buffer mem port registers (bank 2) */
+#define TX_START 10 /* transmit start register */
+#define COL_CTRL 11 /* 16 collision control register */
+#define BMPR12 12 /* reserved */
+#define BMPR13 13 /* reserved */
+#define RX_SKIP 14 /* skip received packet register */
+
+#define LAN_CTRL 16 /* LAN card control register */
+
+#define MAC_ID 0x1a /* hardware address */
+#define UNGERMANN_MAC_ID 0x18 /* UNGERMANN-BASS hardware address */
+
+/*
+ control bits
+ */
+#define ENA_TMT_OK 0x80
+#define ENA_TMT_REC 0x20
+#define ENA_COL 0x04
+#define ENA_16_COL 0x02
+#define ENA_TBUS_ERR 0x01
+
+#define ENA_PKT_RDY 0x80
+#define ENA_BUS_ERR 0x40
+#define ENA_LEN_ERR 0x08
+#define ENA_ALG_ERR 0x04
+#define ENA_CRC_ERR 0x02
+#define ENA_OVR_FLO 0x01
+
+/* flags */
+#define F_TMT_RDY 0x80 /* can accept new packet */
+#define F_NET_BSY 0x40 /* carrier is detected */
+#define F_TMT_OK 0x20 /* send packet successfully */
+#define F_SRT_PKT 0x10 /* short packet error */
+#define F_COL_ERR 0x04 /* collision error */
+#define F_16_COL 0x02 /* 16 collision error */
+#define F_TBUS_ERR 0x01 /* bus read error */
+
+#define F_PKT_RDY 0x80 /* packet(s) in buffer */
+#define F_BUS_ERR 0x40 /* bus read error */
+#define F_LEN_ERR 0x08 /* short packet */
+#define F_ALG_ERR 0x04 /* frame error */
+#define F_CRC_ERR 0x02 /* CRC error */
+#define F_OVR_FLO 0x01 /* overflow error */
+
+#define F_BUF_EMP 0x40 /* receive buffer is empty */
+
+#define F_SKP_PKT 0x05 /* drop packet in buffer */
+
+/* default bitmaps */
+#define D_TX_INTR ( ENA_TMT_OK )
+#define D_RX_INTR ( ENA_PKT_RDY | ENA_LEN_ERR \
+ | ENA_ALG_ERR | ENA_CRC_ERR | ENA_OVR_FLO )
+#define TX_STAT_M ( F_TMT_RDY )
+#define RX_STAT_M ( F_PKT_RDY | F_LEN_ERR \
+ | F_ALG_ERR | F_CRC_ERR | F_OVR_FLO )
+
+/* commands */
+#define D_TX_MODE 0x06 /* no tests, detect carrier */
+#define ID_MATCHED 0x02 /* (RX_MODE) */
+#define RECV_ALL 0x03 /* (RX_MODE) */
+#define CONFIG0_DFL 0x5a /* 16bit bus, 4K x 2 Tx queues */
+#define CONFIG0_DFL_1 0x5e /* 16bit bus, 8K x 2 Tx queues */
+#define CONFIG0_RST 0xda /* Data Link Controller off (CONFIG_0) */
+#define CONFIG0_RST_1 0xde /* Data Link Controller off (CONFIG_0) */
+#define BANK_0 0xa0 /* bank 0 (CONFIG_1) */
+#define BANK_1 0xa4 /* bank 1 (CONFIG_1) */
+#define BANK_2 0xa8 /* bank 2 (CONFIG_1) */
+#define CHIP_OFF 0x80 /* contrl chip power off (CONFIG_1) */
+#define DO_TX 0x80 /* do transmit packet */
+#define SEND_PKT 0x81 /* send a packet */
+#define AUTO_MODE 0x07 /* Auto skip packet on 16 col detected */
+#define MANU_MODE 0x03 /* Stop and skip packet on 16 col */
+#define TDK_AUTO_MODE 0x47 /* Auto skip packet on 16 col detected */
+#define TDK_MANU_MODE 0x43 /* Stop and skip packet on 16 col */
+#define INTR_OFF 0x0d /* LAN controller ignores interrupts */
+#define INTR_ON 0x1d /* LAN controller will catch interrupts */
+
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+#define BANK_0U 0x20 /* bank 0 (CONFIG_1) */
+#define BANK_1U 0x24 /* bank 1 (CONFIG_1) */
+#define BANK_2U 0x28 /* bank 2 (CONFIG_1) */
+
+static const struct net_device_ops fjn_netdev_ops = {
+ .ndo_open = fjn_open,
+ .ndo_stop = fjn_close,
+ .ndo_start_xmit = fjn_start_xmit,
+ .ndo_tx_timeout = fjn_tx_timeout,
+ .ndo_set_config = fjn_config,
+ .ndo_set_rx_mode = set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int fmvj18x_probe(struct pcmcia_device *link)
+{
+ local_info_t *lp;
+ struct net_device *dev;
+
+ dev_dbg(&link->dev, "fmvj18x_attach()\n");
+
+ /* Make up a FMVJ18x specific data structure */
+ dev = alloc_etherdev(sizeof(local_info_t));
+ if (!dev)
+ return -ENOMEM;
+ lp = netdev_priv(dev);
+ link->priv = dev;
+ lp->p_dev = link;
+ lp->base = NULL;
+
+ /* The io structure describes IO port mapping */
+ link->resource[0]->end = 32;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+
+ /* General socket configuration */
+ link->config_flags |= CONF_ENABLE_IRQ;
+
+ dev->netdev_ops = &fjn_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+
+ return fmvj18x_config(link);
+} /* fmvj18x_attach */
+
+/*====================================================================*/
+
+static void fmvj18x_detach(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ dev_dbg(&link->dev, "fmvj18x_detach\n");
+
+ unregister_netdev(dev);
+
+ fmvj18x_release(link);
+
+ free_netdev(dev);
+} /* fmvj18x_detach */
+
+/*====================================================================*/
+
+static int mfc_try_io_port(struct pcmcia_device *link)
+{
+ int i, ret;
+ static const unsigned int serial_base[5] =
+ { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
+
+ for (i = 0; i < 5; i++) {
+ link->resource[1]->start = serial_base[i];
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
+ if (link->resource[1]->start == 0) {
+ link->resource[1]->end = 0;
+ pr_notice("out of resource for serial\n");
+ }
+ ret = pcmcia_request_io(link);
+ if (ret == 0)
+ return ret;
+ }
+ return ret;
+}
+
+static int ungermann_try_io_port(struct pcmcia_device *link)
+{
+ int ret;
+ unsigned int ioaddr;
+ /*
+ Ungermann-Bass Access/CARD accepts 0x300,0x320,0x340,0x360
+ 0x380,0x3c0 only for ioport.
+ */
+ for (ioaddr = 0x300; ioaddr < 0x3e0; ioaddr += 0x20) {
+ link->resource[0]->start = ioaddr;
+ ret = pcmcia_request_io(link);
+ if (ret == 0) {
+ /* calculate ConfigIndex value */
+ link->config_index =
+ ((link->resource[0]->start & 0x0f0) >> 3) | 0x22;
+ return ret;
+ }
+ }
+ return ret; /* RequestIO failed */
+}
+
+static int fmvj18x_ioprobe(struct pcmcia_device *p_dev, void *priv_data)
+{
+ return 0; /* strange, but that's what the code did already before... */
+}
+
+static int fmvj18x_config(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ local_info_t *lp = netdev_priv(dev);
+ int i, ret;
+ unsigned int ioaddr;
+ cardtype_t cardtype;
+ char *card_name = "unknown";
+ u8 *buf;
+ size_t len;
+ u_char buggybuf[32];
+
+ dev_dbg(&link->dev, "fmvj18x_config\n");
+
+ link->io_lines = 5;
+
+ len = pcmcia_get_tuple(link, CISTPL_FUNCE, &buf);
+ kfree(buf);
+
+ if (len) {
+ /* Yes, I have CISTPL_FUNCE. Let's check CISTPL_MANFID */
+ ret = pcmcia_loop_config(link, fmvj18x_ioprobe, NULL);
+ if (ret != 0)
+ goto failed;
+
+ switch (link->manf_id) {
+ case MANFID_TDK:
+ cardtype = TDK;
+ if (link->card_id == PRODID_TDK_GN3410 ||
+ link->card_id == PRODID_TDK_NP9610 ||
+ link->card_id == PRODID_TDK_MN3200) {
+ /* MultiFunction Card */
+ link->config_base = 0x800;
+ link->config_index = 0x47;
+ link->resource[1]->end = 8;
+ }
+ break;
+ case MANFID_NEC:
+ cardtype = NEC; /* MultiFunction Card */
+ link->config_base = 0x800;
+ link->config_index = 0x47;
+ link->resource[1]->end = 8;
+ break;
+ case MANFID_KME:
+ cardtype = KME; /* MultiFunction Card */
+ link->config_base = 0x800;
+ link->config_index = 0x47;
+ link->resource[1]->end = 8;
+ break;
+ case MANFID_CONTEC:
+ cardtype = CONTEC;
+ break;
+ case MANFID_FUJITSU:
+ if (link->config_base == 0x0fe0)
+ cardtype = MBH10302;
+ else if (link->card_id == PRODID_FUJITSU_MBH10302)
+ /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302),
+ but these are MBH10304 based card. */
+ cardtype = MBH10304;
+ else if (link->card_id == PRODID_FUJITSU_MBH10304)
+ cardtype = MBH10304;
+ else
+ cardtype = LA501;
+ break;
+ default:
+ cardtype = MBH10304;
+ }
+ } else {
+ /* old type card */
+ switch (link->manf_id) {
+ case MANFID_FUJITSU:
+ if (link->card_id == PRODID_FUJITSU_MBH10304) {
+ cardtype = XXX10304; /* MBH10304 with buggy CIS */
+ link->config_index = 0x20;
+ } else {
+ cardtype = MBH10302; /* NextCom NC5310, etc. */
+ link->config_index = 1;
+ }
+ break;
+ case MANFID_UNGERMANN:
+ cardtype = UNGERMANN;
+ break;
+ default:
+ cardtype = MBH10302;
+ link->config_index = 1;
+ }
+ }
+
+ if (link->resource[1]->end != 0) {
+ ret = mfc_try_io_port(link);
+ if (ret != 0) goto failed;
+ } else if (cardtype == UNGERMANN) {
+ ret = ungermann_try_io_port(link);
+ if (ret != 0) goto failed;
+ } else {
+ ret = pcmcia_request_io(link);
+ if (ret)
+ goto failed;
+ }
+ ret = pcmcia_request_irq(link, fjn_interrupt);
+ if (ret)
+ goto failed;
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto failed;
+
+ dev->irq = link->irq;
+ dev->base_addr = link->resource[0]->start;
+
+ if (resource_size(link->resource[1]) != 0) {
+ ret = fmvj18x_setup_mfc(link);
+ if (ret != 0) goto failed;
+ }
+
+ ioaddr = dev->base_addr;
+
+ /* Reset controller */
+ if (sram_config == 0)
+ outb(CONFIG0_RST, ioaddr + CONFIG_0);
+ else
+ outb(CONFIG0_RST_1, ioaddr + CONFIG_0);
+
+ /* Power On chip and select bank 0 */
+ if (cardtype == MBH10302)
+ outb(BANK_0, ioaddr + CONFIG_1);
+ else
+ outb(BANK_0U, ioaddr + CONFIG_1);
+
+ /* Set hardware address */
+ switch (cardtype) {
+ case MBH10304:
+ case TDK:
+ case LA501:
+ case CONTEC:
+ case NEC:
+ case KME:
+ if (cardtype == MBH10304) {
+ card_name = "FMV-J182";
+
+ len = pcmcia_get_tuple(link, CISTPL_FUNCE, &buf);
+ if (len < 11) {
+ kfree(buf);
+ goto failed;
+ }
+ /* Read MACID from CIS */
+ for (i = 5; i < 11; i++)
+ dev->dev_addr[i] = buf[i];
+ kfree(buf);
+ } else {
+ if (pcmcia_get_mac_from_cis(link, dev))
+ goto failed;
+ if( cardtype == TDK ) {
+ card_name = "TDK LAK-CD021";
+ } else if( cardtype == LA501 ) {
+ card_name = "LA501";
+ } else if( cardtype == NEC ) {
+ card_name = "PK-UG-J001";
+ } else if( cardtype == KME ) {
+ card_name = "Panasonic";
+ } else {
+ card_name = "C-NET(PC)C";
+ }
+ }
+ break;
+ case UNGERMANN:
+ /* Read MACID from register */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + UNGERMANN_MAC_ID + i);
+ card_name = "Access/CARD";
+ break;
+ case XXX10304:
+ /* Read MACID from Buggy CIS */
+ if (fmvj18x_get_hwinfo(link, buggybuf) == -1) {
+ pr_notice("unable to read hardware net address\n");
+ goto failed;
+ }
+ for (i = 0 ; i < 6; i++) {
+ dev->dev_addr[i] = buggybuf[i];
+ }
+ card_name = "FMV-J182";
+ break;
+ case MBH10302:
+ default:
+ /* Read MACID from register */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + MAC_ID + i);
+ card_name = "FMV-J181";
+ break;
+ }
+
+ lp->cardtype = cardtype;
+ SET_NETDEV_DEV(dev, &link->dev);
+
+ if (register_netdev(dev) != 0) {
+ pr_notice("register_netdev() failed\n");
+ goto failed;
+ }
+
+ /* print current configuration */
+ netdev_info(dev, "%s, sram %s, port %#3lx, irq %d, hw_addr %pM\n",
+ card_name, sram_config == 0 ? "4K TX*2" : "8K TX*2",
+ dev->base_addr, dev->irq, dev->dev_addr);
+
+ return 0;
+
+failed:
+ fmvj18x_release(link);
+ return -ENODEV;
+} /* fmvj18x_config */
+/*====================================================================*/
+
+static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
+{
+ u_char __iomem *base;
+ int i, j;
+
+ /* Allocate a small memory window */
+ link->resource[2]->flags |= WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
+ link->resource[2]->start = 0; link->resource[2]->end = 0;
+ i = pcmcia_request_window(link, link->resource[2], 0);
+ if (i != 0)
+ return -1;
+
+ base = ioremap(link->resource[2]->start, resource_size(link->resource[2]));
+ pcmcia_map_mem_page(link, link->resource[2], 0);
+
+ /*
+ * MBH10304 CISTPL_FUNCE_LAN_NODE_ID format
+ * 22 0d xx xx xx 04 06 yy yy yy yy yy yy ff
+ * 'xx' is garbage.
+ * 'yy' is MAC address.
+ */
+ for (i = 0; i < 0x200; i++) {
+ if (readb(base+i*2) == 0x22) {
+ if (readb(base+(i-1)*2) == 0xff &&
+ readb(base+(i+5)*2) == 0x04 &&
+ readb(base+(i+6)*2) == 0x06 &&
+ readb(base+(i+13)*2) == 0xff)
+ break;
+ }
+ }
+
+ if (i != 0x200) {
+ for (j = 0 ; j < 6; j++,i++) {
+ node_id[j] = readb(base+(i+7)*2);
+ }
+ }
+
+ iounmap(base);
+ j = pcmcia_release_window(link, link->resource[2]);
+ return (i != 0x200) ? 0 : -1;
+
+} /* fmvj18x_get_hwinfo */
+/*====================================================================*/
+
+static int fmvj18x_setup_mfc(struct pcmcia_device *link)
+{
+ int i;
+ struct net_device *dev = link->priv;
+ unsigned int ioaddr;
+ local_info_t *lp = netdev_priv(dev);
+
+ /* Allocate a small memory window */
+ link->resource[3]->flags = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
+ link->resource[3]->start = link->resource[3]->end = 0;
+ i = pcmcia_request_window(link, link->resource[3], 0);
+ if (i != 0)
+ return -1;
+
+ lp->base = ioremap(link->resource[3]->start,
+ resource_size(link->resource[3]));
+ if (lp->base == NULL) {
+ netdev_notice(dev, "ioremap failed\n");
+ return -1;
+ }
+
+ i = pcmcia_map_mem_page(link, link->resource[3], 0);
+ if (i != 0) {
+ iounmap(lp->base);
+ lp->base = NULL;
+ return -1;
+ }
+
+ ioaddr = dev->base_addr;
+ writeb(0x47, lp->base+0x800); /* Config Option Register of LAN */
+ writeb(0x0, lp->base+0x802); /* Config and Status Register */
+
+ writeb(ioaddr & 0xff, lp->base+0x80a); /* I/O Base(Low) of LAN */
+ writeb((ioaddr >> 8) & 0xff, lp->base+0x80c); /* I/O Base(High) of LAN */
+
+ writeb(0x45, lp->base+0x820); /* Config Option Register of Modem */
+ writeb(0x8, lp->base+0x822); /* Config and Status Register */
+
+ return 0;
+
+}
+/*====================================================================*/
+
+static void fmvj18x_release(struct pcmcia_device *link)
+{
+
+ struct net_device *dev = link->priv;
+ local_info_t *lp = netdev_priv(dev);
+ u_char __iomem *tmp;
+
+ dev_dbg(&link->dev, "fmvj18x_release\n");
+
+ if (lp->base != NULL) {
+ tmp = lp->base;
+ lp->base = NULL; /* set NULL before iounmap */
+ iounmap(tmp);
+ }
+
+ pcmcia_disable_device(link);
+
+}
+
+static int fmvj18x_suspend(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ if (link->open)
+ netif_device_detach(dev);
+
+ return 0;
+}
+
+static int fmvj18x_resume(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ if (link->open) {
+ fjn_reset(dev);
+ netif_device_attach(dev);
+ }
+
+ return 0;
+}
+
+/*====================================================================*/
+
+static const struct pcmcia_device_id fmvj18x_ids[] = {
+ PCMCIA_DEVICE_MANF_CARD(0x0004, 0x0004),
+ PCMCIA_DEVICE_PROD_ID12("EAGLE Technology", "NE200 ETHERNET LAN MBH10302 04", 0x528c88c4, 0x74f91e59),
+ PCMCIA_DEVICE_PROD_ID12("Eiger Labs,Inc", "EPX-10BT PC Card Ethernet 10BT", 0x53af556e, 0x877f9922),
+ PCMCIA_DEVICE_PROD_ID12("Eiger labs,Inc.", "EPX-10BT PC Card Ethernet 10BT", 0xf47e6c66, 0x877f9922),
+ PCMCIA_DEVICE_PROD_ID12("FUJITSU", "LAN Card(FMV-J182)", 0x6ee5a3d8, 0x5baf31db),
+ PCMCIA_DEVICE_PROD_ID12("FUJITSU", "MBH10308", 0x6ee5a3d8, 0x3f04875e),
+ PCMCIA_DEVICE_PROD_ID12("FUJITSU TOWA", "LA501", 0xb8451188, 0x12939ba2),
+ PCMCIA_DEVICE_PROD_ID12("HITACHI", "HT-4840-11", 0xf4f43949, 0x773910f4),
+ PCMCIA_DEVICE_PROD_ID12("NextComK.K.", "NC5310B Ver1.0 ", 0x8cef4d3a, 0x075fc7b6),
+ PCMCIA_DEVICE_PROD_ID12("NextComK.K.", "NC5310 Ver1.0 ", 0x8cef4d3a, 0xbccf43e6),
+ PCMCIA_DEVICE_PROD_ID12("RATOC System Inc.", "10BASE_T CARD R280", 0x85c10e17, 0xd9413666),
+ PCMCIA_DEVICE_PROD_ID12("TDK", "LAC-CD02x", 0x1eae9475, 0x8fa0ee70),
+ PCMCIA_DEVICE_PROD_ID12("TDK", "LAC-CF010", 0x1eae9475, 0x7683bc9a),
+ PCMCIA_DEVICE_PROD_ID1("CONTEC Co.,Ltd.", 0x58d8fee2),
+ PCMCIA_DEVICE_PROD_ID1("PCMCIA LAN MBH10304 ES", 0x2599f454),
+ PCMCIA_DEVICE_PROD_ID1("PCMCIA MBH10302", 0x8f4005da),
+ PCMCIA_DEVICE_PROD_ID1("UBKK,V2.0", 0x90888080),
+ PCMCIA_PFC_DEVICE_PROD_ID12(0, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed),
+ PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
+ PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, fmvj18x_ids);
+
+static struct pcmcia_driver fmvj18x_cs_driver = {
+ .owner = THIS_MODULE,
+ .name = "fmvj18x_cs",
+ .probe = fmvj18x_probe,
+ .remove = fmvj18x_detach,
+ .id_table = fmvj18x_ids,
+ .suspend = fmvj18x_suspend,
+ .resume = fmvj18x_resume,
+};
+
+static int __init init_fmvj18x_cs(void)
+{
+ return pcmcia_register_driver(&fmvj18x_cs_driver);
+}
+
+static void __exit exit_fmvj18x_cs(void)
+{
+ pcmcia_unregister_driver(&fmvj18x_cs_driver);
+}
+
+module_init(init_fmvj18x_cs);
+module_exit(exit_fmvj18x_cs);
+
+/*====================================================================*/
+
+static irqreturn_t fjn_interrupt(int dummy, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ local_info_t *lp = netdev_priv(dev);
+ unsigned int ioaddr;
+ unsigned short tx_stat, rx_stat;
+
+ ioaddr = dev->base_addr;
+
+ /* avoid multiple interrupts */
+ outw(0x0000, ioaddr + TX_INTR);
+
+ /* wait for a while */
+ udelay(1);
+
+ /* get status */
+ tx_stat = inb(ioaddr + TX_STATUS);
+ rx_stat = inb(ioaddr + RX_STATUS);
+
+ /* clear status */
+ outb(tx_stat, ioaddr + TX_STATUS);
+ outb(rx_stat, ioaddr + RX_STATUS);
+
+ pr_debug("%s: interrupt, rx_status %02x.\n", dev->name, rx_stat);
+ pr_debug(" tx_status %02x.\n", tx_stat);
+
+ if (rx_stat || (inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) {
+ /* there is packet(s) in rx buffer */
+ fjn_rx(dev);
+ }
+ if (tx_stat & F_TMT_RDY) {
+ dev->stats.tx_packets += lp->sent ;
+ lp->sent = 0 ;
+ if (lp->tx_queue) {
+ outb(DO_TX | lp->tx_queue, ioaddr + TX_START);
+ lp->sent = lp->tx_queue ;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ } else {
+ lp->tx_started = 0;
+ }
+ netif_wake_queue(dev);
+ }
+ pr_debug("%s: exiting interrupt,\n", dev->name);
+ pr_debug(" tx_status %02x, rx_status %02x.\n", tx_stat, rx_stat);
+
+ outb(D_TX_INTR, ioaddr + TX_INTR);
+ outb(D_RX_INTR, ioaddr + RX_INTR);
+
+ if (lp->base != NULL) {
+ /* Ack interrupt for multifunction card */
+ writeb(0x01, lp->base+0x802);
+ writeb(0x09, lp->base+0x822);
+ }
+
+ return IRQ_HANDLED;
+
+} /* fjn_interrupt */
+
+/*====================================================================*/
+
+static void fjn_tx_timeout(struct net_device *dev)
+{
+ struct local_info_t *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+
+ netdev_notice(dev, "transmit timed out with status %04x, %s?\n",
+ htons(inw(ioaddr + TX_STATUS)),
+ inb(ioaddr + TX_STATUS) & F_TMT_RDY
+ ? "IRQ conflict" : "network cable problem");
+ netdev_notice(dev, "timeout registers: %04x %04x %04x "
+ "%04x %04x %04x %04x %04x.\n",
+ htons(inw(ioaddr + 0)), htons(inw(ioaddr + 2)),
+ htons(inw(ioaddr + 4)), htons(inw(ioaddr + 6)),
+ htons(inw(ioaddr + 8)), htons(inw(ioaddr + 10)),
+ htons(inw(ioaddr + 12)), htons(inw(ioaddr + 14)));
+ dev->stats.tx_errors++;
+ /* ToDo: We should try to restart the adaptor... */
+ local_irq_disable();
+ fjn_reset(dev);
+
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ lp->sent = 0;
+ lp->open_time = jiffies;
+ local_irq_enable();
+ netif_wake_queue(dev);
+}
+
+static netdev_tx_t fjn_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct local_info_t *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ short length = skb->len;
+
+ if (length < ETH_ZLEN)
+ {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ length = ETH_ZLEN;
+ }
+
+ netif_stop_queue(dev);
+
+ {
+ unsigned char *buf = skb->data;
+
+ if (length > ETH_FRAME_LEN) {
+ netdev_notice(dev, "Attempting to send a large packet (%d bytes)\n",
+ length);
+ return NETDEV_TX_BUSY;
+ }
+
+ netdev_dbg(dev, "Transmitting a packet of length %lu\n",
+ (unsigned long)skb->len);
+ dev->stats.tx_bytes += skb->len;
+
+ /* Disable both interrupts. */
+ outw(0x0000, ioaddr + TX_INTR);
+
+ /* wait for a while */
+ udelay(1);
+
+ outw(length, ioaddr + DATAPORT);
+ outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
+
+ lp->tx_queue++;
+ lp->tx_queue_len += ((length+3) & ~1);
+
+ if (lp->tx_started == 0) {
+ /* If the Tx is idle, always trigger a transmit. */
+ outb(DO_TX | lp->tx_queue, ioaddr + TX_START);
+ lp->sent = lp->tx_queue ;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ lp->tx_started = 1;
+ netif_start_queue(dev);
+ } else {
+ if( sram_config == 0 ) {
+ if (lp->tx_queue_len < (4096 - (ETH_FRAME_LEN +2)) )
+ /* Yes, there is room for one more packet. */
+ netif_start_queue(dev);
+ } else {
+ if (lp->tx_queue_len < (8192 - (ETH_FRAME_LEN +2)) &&
+ lp->tx_queue < 127 )
+ /* Yes, there is room for one more packet. */
+ netif_start_queue(dev);
+ }
+ }
+
+ /* Re-enable interrupts */
+ outb(D_TX_INTR, ioaddr + TX_INTR);
+ outb(D_RX_INTR, ioaddr + RX_INTR);
+ }
+ dev_kfree_skb (skb);
+
+ return NETDEV_TX_OK;
+} /* fjn_start_xmit */
+
+/*====================================================================*/
+
+static void fjn_reset(struct net_device *dev)
+{
+ struct local_info_t *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ int i;
+
+ netdev_dbg(dev, "fjn_reset() called\n");
+
+ /* Reset controller */
+ if( sram_config == 0 )
+ outb(CONFIG0_RST, ioaddr + CONFIG_0);
+ else
+ outb(CONFIG0_RST_1, ioaddr + CONFIG_0);
+
+ /* Power On chip and select bank 0 */
+ if (lp->cardtype == MBH10302)
+ outb(BANK_0, ioaddr + CONFIG_1);
+ else
+ outb(BANK_0U, ioaddr + CONFIG_1);
+
+ /* Set Tx modes */
+ outb(D_TX_MODE, ioaddr + TX_MODE);
+ /* set Rx modes */
+ outb(ID_MATCHED, ioaddr + RX_MODE);
+
+ /* Set hardware address */
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + NODE_ID + i);
+
+ /* (re)initialize the multicast table */
+ set_rx_mode(dev);
+
+ /* Switch to bank 2 (runtime mode) */
+ if (lp->cardtype == MBH10302)
+ outb(BANK_2, ioaddr + CONFIG_1);
+ else
+ outb(BANK_2U, ioaddr + CONFIG_1);
+
+ /* set 16col ctrl bits */
+ if( lp->cardtype == TDK || lp->cardtype == CONTEC)
+ outb(TDK_AUTO_MODE, ioaddr + COL_CTRL);
+ else
+ outb(AUTO_MODE, ioaddr + COL_CTRL);
+
+ /* clear Reserved Regs */
+ outb(0x00, ioaddr + BMPR12);
+ outb(0x00, ioaddr + BMPR13);
+
+ /* reset Skip packet reg. */
+ outb(0x01, ioaddr + RX_SKIP);
+
+ /* Enable Tx and Rx */
+ if( sram_config == 0 )
+ outb(CONFIG0_DFL, ioaddr + CONFIG_0);
+ else
+ outb(CONFIG0_DFL_1, ioaddr + CONFIG_0);
+
+ /* Init receive pointer ? */
+ inw(ioaddr + DATAPORT);
+ inw(ioaddr + DATAPORT);
+
+ /* Clear all status */
+ outb(0xff, ioaddr + TX_STATUS);
+ outb(0xff, ioaddr + RX_STATUS);
+
+ if (lp->cardtype == MBH10302)
+ outb(INTR_OFF, ioaddr + LAN_CTRL);
+
+ /* Turn on Rx interrupts */
+ outb(D_TX_INTR, ioaddr + TX_INTR);
+ outb(D_RX_INTR, ioaddr + RX_INTR);
+
+ /* Turn on interrupts from LAN card controller */
+ if (lp->cardtype == MBH10302)
+ outb(INTR_ON, ioaddr + LAN_CTRL);
+} /* fjn_reset */
+
+/*====================================================================*/
+
+static void fjn_rx(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ int boguscount = 10; /* 5 -> 10: by agy 19940922 */
+
+ pr_debug("%s: in rx_packet(), rx_status %02x.\n",
+ dev->name, inb(ioaddr + RX_STATUS));
+
+ while ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) {
+ u_short status = inw(ioaddr + DATAPORT);
+
+ netdev_dbg(dev, "Rxing packet mode %02x status %04x.\n",
+ inb(ioaddr + RX_MODE), status);
+#ifndef final_version
+ if (status == 0) {
+ outb(F_SKP_PKT, ioaddr + RX_SKIP);
+ break;
+ }
+#endif
+ if ((status & 0xF0) != 0x20) { /* There was an error. */
+ dev->stats.rx_errors++;
+ if (status & F_LEN_ERR) dev->stats.rx_length_errors++;
+ if (status & F_ALG_ERR) dev->stats.rx_frame_errors++;
+ if (status & F_CRC_ERR) dev->stats.rx_crc_errors++;
+ if (status & F_OVR_FLO) dev->stats.rx_over_errors++;
+ } else {
+ u_short pkt_len = inw(ioaddr + DATAPORT);
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ if (pkt_len > 1550) {
+ netdev_notice(dev, "The FMV-18x claimed a very large packet, size %d\n",
+ pkt_len);
+ outb(F_SKP_PKT, ioaddr + RX_SKIP);
+ dev->stats.rx_errors++;
+ break;
+ }
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) {
+ netdev_notice(dev, "Memory squeeze, dropping packet (len %d)\n",
+ pkt_len);
+ outb(F_SKP_PKT, ioaddr + RX_SKIP);
+ dev->stats.rx_dropped++;
+ break;
+ }
+
+ skb_reserve(skb, 2);
+ insw(ioaddr + DATAPORT, skb_put(skb, pkt_len),
+ (pkt_len + 1) >> 1);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ {
+ int i;
+ pr_debug("%s: Rxed packet of length %d: ",
+ dev->name, pkt_len);
+ for (i = 0; i < 14; i++)
+ pr_debug(" %02x", skb->data[i]);
+ pr_debug(".\n");
+ }
+
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+ if (--boguscount <= 0)
+ break;
+ }
+
+ /* If any worth-while packets have been received, dev_rint()
+ has done a netif_wake_queue() for us and will work on them
+ when we get to the bottom-half routine. */
+/*
+ if (lp->cardtype != TDK) {
+ int i;
+ for (i = 0; i < 20; i++) {
+ if ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == F_BUF_EMP)
+ break;
+ (void)inw(ioaddr + DATAPORT); /+ dummy status read +/
+ outb(F_SKP_PKT, ioaddr + RX_SKIP);
+ }
+
+ if (i > 0)
+ pr_debug("%s: Exint Rx packet with mode %02x after "
+ "%d ticks.\n", dev->name, inb(ioaddr + RX_MODE), i);
+ }
+*/
+} /* fjn_rx */
+
+/*====================================================================*/
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+};
+
+static int fjn_config(struct net_device *dev, struct ifmap *map){
+ return 0;
+}
+
+static int fjn_open(struct net_device *dev)
+{
+ struct local_info_t *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
+
+ pr_debug("fjn_open('%s').\n", dev->name);
+
+ if (!pcmcia_dev_present(link))
+ return -ENODEV;
+
+ link->open++;
+
+ fjn_reset(dev);
+
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ lp->open_time = jiffies;
+ netif_start_queue(dev);
+
+ return 0;
+} /* fjn_open */
+
+/*====================================================================*/
+
+static int fjn_close(struct net_device *dev)
+{
+ struct local_info_t *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
+ unsigned int ioaddr = dev->base_addr;
+
+ pr_debug("fjn_close('%s').\n", dev->name);
+
+ lp->open_time = 0;
+ netif_stop_queue(dev);
+
+ /* Set configuration register 0 to disable Tx and Rx. */
+ if( sram_config == 0 )
+ outb(CONFIG0_RST ,ioaddr + CONFIG_0);
+ else
+ outb(CONFIG0_RST_1 ,ioaddr + CONFIG_0);
+
+ /* Update the statistics -- ToDo. */
+
+ /* Power-down the chip. Green, green, green! */
+ outb(CHIP_OFF ,ioaddr + CONFIG_1);
+
+ /* Set the ethernet adaptor disable IRQ */
+ if (lp->cardtype == MBH10302)
+ outb(INTR_OFF, ioaddr + LAN_CTRL);
+
+ link->open--;
+
+ return 0;
+} /* fjn_close */
+
+/*====================================================================*/
+
+/*
+ Set the multicast/promiscuous mode for this adaptor.
+*/
+
+static void set_rx_mode(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ u_char mc_filter[8]; /* Multicast hash filter */
+ u_long flags;
+ int i;
+
+ int saved_bank;
+ int saved_config_0 = inb(ioaddr + CONFIG_0);
+
+ local_irq_save(flags);
+
+ /* Disable Tx and Rx */
+ if (sram_config == 0)
+ outb(CONFIG0_RST, ioaddr + CONFIG_0);
+ else
+ outb(CONFIG0_RST_1, ioaddr + CONFIG_0);
+
+ if (dev->flags & IFF_PROMISC) {
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
+ } else if (netdev_mc_count(dev) > MC_FILTERBREAK ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ outb(2, ioaddr + RX_MODE); /* Use normal mode. */
+ } else if (netdev_mc_empty(dev)) {
+ memset(mc_filter, 0x00, sizeof(mc_filter));
+ outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
+ } else {
+ struct netdev_hw_addr *ha;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned int bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26;
+ mc_filter[bit >> 3] |= (1 << (bit & 7));
+ }
+ outb(2, ioaddr + RX_MODE); /* Use normal mode. */
+ }
+
+ /* Switch to bank 1 and set the multicast table. */
+ saved_bank = inb(ioaddr + CONFIG_1);
+ outb(0xe4, ioaddr + CONFIG_1);
+
+ for (i = 0; i < 8; i++)
+ outb(mc_filter[i], ioaddr + MAR_ADR + i);
+ outb(saved_bank, ioaddr + CONFIG_1);
+
+ outb(saved_config_0, ioaddr + CONFIG_0);
+
+ local_irq_restore(flags);
+}
diff --git a/drivers/net/ethernet/hp/Kconfig b/drivers/net/ethernet/hp/Kconfig
new file mode 100644
index 000000000000..a0b8ece1e3bc
--- /dev/null
+++ b/drivers/net/ethernet/hp/Kconfig
@@ -0,0 +1,32 @@
+#
+# HP network device configuration
+#
+
+config NET_VENDOR_HP
+ bool "HP devices"
+ default y
+ depends on ISA || EISA || PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about HP cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_HP
+
+config HP100
+ tristate "HP 10/100VG PCLAN (ISA, EISA, PCI) support"
+ depends on (ISA || EISA || PCI)
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called hp100.
+
+endif # NET_VENDOR_HP
diff --git a/drivers/net/ethernet/hp/Makefile b/drivers/net/ethernet/hp/Makefile
new file mode 100644
index 000000000000..20b6918b52bd
--- /dev/null
+++ b/drivers/net/ethernet/hp/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the HP network device drivers.
+#
+
+obj-$(CONFIG_HP100) += hp100.o
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
new file mode 100644
index 000000000000..6a5ee0776b28
--- /dev/null
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -0,0 +1,3068 @@
+/*
+** hp100.c
+** HP CASCADE Architecture Driver for 100VG-AnyLan Network Adapters
+**
+** $Id: hp100.c,v 1.58 2001/09/24 18:03:01 perex Exp perex $
+**
+** Based on the HP100 driver written by Jaroslav Kysela <perex@jcu.cz>
+** Extended for new busmaster capable chipsets by
+** Siegfried "Frieder" Loeffler (dg1sek) <floeff@mathematik.uni-stuttgart.de>
+**
+** Maintained by: Jaroslav Kysela <perex@perex.cz>
+**
+** This driver has only been tested with
+** -- HP J2585B 10/100 Mbit/s PCI Busmaster
+** -- HP J2585A 10/100 Mbit/s PCI
+** -- HP J2970A 10 Mbit/s PCI Combo 10base-T/BNC
+** -- HP J2973A 10 Mbit/s PCI 10base-T
+** -- HP J2573 10/100 ISA
+** -- Compex ReadyLink ENET100-VG4 10/100 Mbit/s PCI / EISA
+** -- Compex FreedomLine 100/VG 10/100 Mbit/s ISA / EISA / PCI
+**
+** but it should also work with the other CASCADE based adapters.
+**
+** TODO:
+** - J2573 seems to hang sometimes when in shared memory mode.
+** - Mode for Priority TX
+** - Check PCI registers, performance might be improved?
+** - To reduce interrupt load in busmaster, one could switch off
+** the interrupts that are used to refill the queues whenever the
+** queues are filled up to more than a certain threshold.
+** - some updates for EISA version of card
+**
+**
+** This code is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This code is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+** 1.57c -> 1.58
+** - used indent to change coding-style
+** - added KTI DP-200 EISA ID
+** - ioremap is also used for low (<1MB) memory (multi-architecture support)
+**
+** 1.57b -> 1.57c - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+** - release resources on failure in init_module
+**
+** 1.57 -> 1.57b - Jean II
+** - fix spinlocks, SMP is now working !
+**
+** 1.56 -> 1.57
+** - updates for new PCI interface for 2.1 kernels
+**
+** 1.55 -> 1.56
+** - removed printk in misc. interrupt and update statistics to allow
+** monitoring of card status
+** - timing changes in xmit routines, relogin to 100VG hub added when
+** driver does reset
+** - included fix for Compex FreedomLine PCI adapter
+**
+** 1.54 -> 1.55
+** - fixed bad initialization in init_module
+** - added Compex FreedomLine adapter
+** - some fixes in card initialization
+**
+** 1.53 -> 1.54
+** - added hardware multicast filter support (doesn't work)
+** - little changes in hp100_sense_lan routine
+** - added support for Coax and AUI (J2970)
+** - fix for multiple cards and hp100_mode parameter (insmod)
+** - fix for shared IRQ
+**
+** 1.52 -> 1.53
+** - fixed bug in multicast support
+**
+*/
+
+#define HP100_DEFAULT_PRIORITY_TX 0
+
+#undef HP100_DEBUG
+#undef HP100_DEBUG_B /* Trace */
+#undef HP100_DEBUG_BM /* Debug busmaster code (PDL stuff) */
+
+#undef HP100_DEBUG_TRAINING /* Debug login-to-hub procedure */
+#undef HP100_DEBUG_TX
+#undef HP100_DEBUG_IRQ
+#undef HP100_DEBUG_RX
+
+#undef HP100_MULTICAST_FILTER /* Need to be debugged... */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/eisa.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
+
+#include <asm/io.h>
+
+#include "hp100.h"
+
+/*
+ * defines
+ */
+
+#define HP100_BUS_ISA 0
+#define HP100_BUS_EISA 1
+#define HP100_BUS_PCI 2
+
+#define HP100_REGION_SIZE 0x20 /* for ioports */
+#define HP100_SIG_LEN 8 /* same as EISA_SIG_LEN */
+
+#define HP100_MAX_PACKET_SIZE (1536+4)
+#define HP100_MIN_PACKET_SIZE 60
+
+#ifndef HP100_DEFAULT_RX_RATIO
+/* default - 75% onboard memory on the card are used for RX packets */
+#define HP100_DEFAULT_RX_RATIO 75
+#endif
+
+#ifndef HP100_DEFAULT_PRIORITY_TX
+/* default - don't enable transmit outgoing packets as priority */
+#define HP100_DEFAULT_PRIORITY_TX 0
+#endif
+
+/*
+ * structures
+ */
+
+struct hp100_private {
+ spinlock_t lock;
+ char id[HP100_SIG_LEN];
+ u_short chip;
+ u_short soft_model;
+ u_int memory_size;
+ u_int virt_memory_size;
+ u_short rx_ratio; /* 1 - 99 */
+ u_short priority_tx; /* != 0 - priority tx */
+ u_short mode; /* PIO, Shared Mem or Busmaster */
+ u_char bus;
+ struct pci_dev *pci_dev;
+ short mem_mapped; /* memory mapped access */
+ void __iomem *mem_ptr_virt; /* virtual memory mapped area, maybe NULL */
+ unsigned long mem_ptr_phys; /* physical memory mapped area */
+ short lan_type; /* 10Mb/s, 100Mb/s or -1 (error) */
+ int hub_status; /* was login to hub successful? */
+ u_char mac1_mode;
+ u_char mac2_mode;
+ u_char hash_bytes[8];
+
+ /* Rings for busmaster mode: */
+ hp100_ring_t *rxrhead; /* Head (oldest) index into rxring */
+ hp100_ring_t *rxrtail; /* Tail (newest) index into rxring */
+ hp100_ring_t *txrhead; /* Head (oldest) index into txring */
+ hp100_ring_t *txrtail; /* Tail (newest) index into txring */
+
+ hp100_ring_t rxring[MAX_RX_PDL];
+ hp100_ring_t txring[MAX_TX_PDL];
+
+ u_int *page_vaddr_algn; /* Aligned virtual address of allocated page */
+ u_long whatever_offset; /* Offset to bus/phys/dma address */
+ int rxrcommit; /* # Rx PDLs committed to adapter */
+ int txrcommit; /* # Tx PDLs committed to adapter */
+};
+
+/*
+ * variables
+ */
+#ifdef CONFIG_ISA
+static const char *hp100_isa_tbl[] = {
+ "HWPF150", /* HP J2573 rev A */
+ "HWP1950", /* HP J2573 */
+};
+#endif
+
+#ifdef CONFIG_EISA
+static struct eisa_device_id hp100_eisa_tbl[] = {
+ { "HWPF180" }, /* HP J2577 rev A */
+ { "HWP1920" }, /* HP 27248B */
+ { "HWP1940" }, /* HP J2577 */
+ { "HWP1990" }, /* HP J2577 */
+ { "CPX0301" }, /* ReadyLink ENET100-VG4 */
+ { "CPX0401" }, /* FreedomLine 100/VG */
+ { "" } /* Mandatory final entry ! */
+};
+MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl);
+#endif
+
+#ifdef CONFIG_PCI
+static DEFINE_PCI_DEVICE_TABLE(hp100_pci_tbl) = {
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2970A, PCI_ANY_ID, PCI_ANY_ID,},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2973A, PCI_ANY_ID, PCI_ANY_ID,},
+ {PCI_VENDOR_ID_COMPEX, PCI_DEVICE_ID_COMPEX_ENET100VG4, PCI_ANY_ID, PCI_ANY_ID,},
+ {PCI_VENDOR_ID_COMPEX2, PCI_DEVICE_ID_COMPEX2_100VG, PCI_ANY_ID, PCI_ANY_ID,},
+/* {PCI_VENDOR_ID_KTI, PCI_DEVICE_ID_KTI_DP200, PCI_ANY_ID, PCI_ANY_ID }, */
+ {} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, hp100_pci_tbl);
+#endif
+
+static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO;
+static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX;
+static int hp100_mode = 1;
+
+module_param(hp100_rx_ratio, int, 0);
+module_param(hp100_priority_tx, int, 0);
+module_param(hp100_mode, int, 0);
+
+/*
+ * prototypes
+ */
+
+static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus,
+ struct pci_dev *pci_dev);
+
+
+static int hp100_open(struct net_device *dev);
+static int hp100_close(struct net_device *dev);
+static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
+ struct net_device *dev);
+static void hp100_rx(struct net_device *dev);
+static struct net_device_stats *hp100_get_stats(struct net_device *dev);
+static void hp100_misc_interrupt(struct net_device *dev);
+static void hp100_update_stats(struct net_device *dev);
+static void hp100_clear_stats(struct hp100_private *lp, int ioaddr);
+static void hp100_set_multicast_list(struct net_device *dev);
+static irqreturn_t hp100_interrupt(int irq, void *dev_id);
+static void hp100_start_interface(struct net_device *dev);
+static void hp100_stop_interface(struct net_device *dev);
+static void hp100_load_eeprom(struct net_device *dev, u_short ioaddr);
+static int hp100_sense_lan(struct net_device *dev);
+static int hp100_login_to_vg_hub(struct net_device *dev,
+ u_short force_relogin);
+static int hp100_down_vg_link(struct net_device *dev);
+static void hp100_cascade_reset(struct net_device *dev, u_short enable);
+static void hp100_BM_shutdown(struct net_device *dev);
+static void hp100_mmuinit(struct net_device *dev);
+static void hp100_init_pdls(struct net_device *dev);
+static int hp100_init_rxpdl(struct net_device *dev,
+ register hp100_ring_t * ringptr,
+ register u_int * pdlptr);
+static int hp100_init_txpdl(struct net_device *dev,
+ register hp100_ring_t * ringptr,
+ register u_int * pdlptr);
+static void hp100_rxfill(struct net_device *dev);
+static void hp100_hwinit(struct net_device *dev);
+static void hp100_clean_txring(struct net_device *dev);
+#ifdef HP100_DEBUG
+static void hp100_RegisterDump(struct net_device *dev);
+#endif
+
+/* Conversion to new PCI API :
+ * Convert an address in a kernel buffer to a bus/phys/dma address.
+ * This work *only* for memory fragments part of lp->page_vaddr,
+ * because it was properly DMA allocated via pci_alloc_consistent(),
+ * so we just need to "retrieve" the original mapping to bus/phys/dma
+ * address - Jean II */
+static inline dma_addr_t virt_to_whatever(struct net_device *dev, u32 * ptr)
+{
+ struct hp100_private *lp = netdev_priv(dev);
+ return ((u_long) ptr) + lp->whatever_offset;
+}
+
+static inline u_int pdl_map_data(struct hp100_private *lp, void *data)
+{
+ return pci_map_single(lp->pci_dev, data,
+ MAX_ETHER_SIZE, PCI_DMA_FROMDEVICE);
+}
+
+/* TODO: This function should not really be needed in a good design... */
+static void wait(void)
+{
+ mdelay(1);
+}
+
+/*
+ * probe functions
+ * These functions should - if possible - avoid doing write operations
+ * since this could cause problems when the card is not installed.
+ */
+
+/*
+ * Read board id and convert to string.
+ * Effectively same code as decode_eisa_sig
+ */
+static __devinit const char *hp100_read_id(int ioaddr)
+{
+ int i;
+ static char str[HP100_SIG_LEN];
+ unsigned char sig[4], sum;
+ unsigned short rev;
+
+ hp100_page(ID_MAC_ADDR);
+ sum = 0;
+ for (i = 0; i < 4; i++) {
+ sig[i] = hp100_inb(BOARD_ID + i);
+ sum += sig[i];
+ }
+
+ sum += hp100_inb(BOARD_ID + i);
+ if (sum != 0xff)
+ return NULL; /* bad checksum */
+
+ str[0] = ((sig[0] >> 2) & 0x1f) + ('A' - 1);
+ str[1] = (((sig[0] & 3) << 3) | (sig[1] >> 5)) + ('A' - 1);
+ str[2] = (sig[1] & 0x1f) + ('A' - 1);
+ rev = (sig[2] << 8) | sig[3];
+ sprintf(str + 3, "%04X", rev);
+
+ return str;
+}
+
+#ifdef CONFIG_ISA
+static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr)
+{
+ const char *sig;
+ int i;
+
+ if (!request_region(ioaddr, HP100_REGION_SIZE, "hp100"))
+ goto err;
+
+ if (hp100_inw(HW_ID) != HP100_HW_ID_CASCADE) {
+ release_region(ioaddr, HP100_REGION_SIZE);
+ goto err;
+ }
+
+ sig = hp100_read_id(ioaddr);
+ release_region(ioaddr, HP100_REGION_SIZE);
+
+ if (sig == NULL)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(hp100_isa_tbl); i++) {
+ if (!strcmp(hp100_isa_tbl[i], sig))
+ break;
+
+ }
+
+ if (i < ARRAY_SIZE(hp100_isa_tbl))
+ return hp100_probe1(dev, ioaddr, HP100_BUS_ISA, NULL);
+ err:
+ return -ENODEV;
+
+}
+/*
+ * Probe for ISA board.
+ * EISA and PCI are handled by device infrastructure.
+ */
+
+static int __init hp100_isa_probe(struct net_device *dev, int addr)
+{
+ int err = -ENODEV;
+
+ /* Probe for a specific ISA address */
+ if (addr > 0xff && addr < 0x400)
+ err = hp100_isa_probe1(dev, addr);
+
+ else if (addr != 0)
+ err = -ENXIO;
+
+ else {
+ /* Probe all ISA possible port regions */
+ for (addr = 0x100; addr < 0x400; addr += 0x20) {
+ err = hp100_isa_probe1(dev, addr);
+ if (!err)
+ break;
+ }
+ }
+ return err;
+}
+#endif /* CONFIG_ISA */
+
+#if !defined(MODULE) && defined(CONFIG_ISA)
+struct net_device * __init hp100_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4200, TRACE);
+ printk("hp100: %s: probe\n", dev->name);
+#endif
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ err = hp100_isa_probe(dev, dev->base_addr);
+ if (err)
+ goto out;
+
+ return dev;
+ out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif /* !MODULE && CONFIG_ISA */
+
+static const struct net_device_ops hp100_bm_netdev_ops = {
+ .ndo_open = hp100_open,
+ .ndo_stop = hp100_close,
+ .ndo_start_xmit = hp100_start_xmit_bm,
+ .ndo_get_stats = hp100_get_stats,
+ .ndo_set_rx_mode = hp100_set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static const struct net_device_ops hp100_netdev_ops = {
+ .ndo_open = hp100_open,
+ .ndo_stop = hp100_close,
+ .ndo_start_xmit = hp100_start_xmit,
+ .ndo_get_stats = hp100_get_stats,
+ .ndo_set_rx_mode = hp100_set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __devinit hp100_probe1(struct net_device *dev, int ioaddr,
+ u_char bus, struct pci_dev *pci_dev)
+{
+ int i;
+ int err = -ENODEV;
+ const char *eid;
+ u_int chip;
+ u_char uc;
+ u_int memory_size = 0, virt_memory_size = 0;
+ u_short local_mode, lsw;
+ short mem_mapped;
+ unsigned long mem_ptr_phys;
+ void __iomem *mem_ptr_virt;
+ struct hp100_private *lp;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4201, TRACE);
+ printk("hp100: %s: probe1\n", dev->name);
+#endif
+
+ /* memory region for programmed i/o */
+ if (!request_region(ioaddr, HP100_REGION_SIZE, "hp100"))
+ goto out1;
+
+ if (hp100_inw(HW_ID) != HP100_HW_ID_CASCADE)
+ goto out2;
+
+ chip = hp100_inw(PAGING) & HP100_CHIPID_MASK;
+#ifdef HP100_DEBUG
+ if (chip == HP100_CHIPID_SHASTA)
+ printk("hp100: %s: Shasta Chip detected. (This is a pre 802.12 chip)\n", dev->name);
+ else if (chip == HP100_CHIPID_RAINIER)
+ printk("hp100: %s: Rainier Chip detected. (This is a pre 802.12 chip)\n", dev->name);
+ else if (chip == HP100_CHIPID_LASSEN)
+ printk("hp100: %s: Lassen Chip detected.\n", dev->name);
+ else
+ printk("hp100: %s: Warning: Unknown CASCADE chip (id=0x%.4x).\n", dev->name, chip);
+#endif
+
+ dev->base_addr = ioaddr;
+
+ eid = hp100_read_id(ioaddr);
+ if (eid == NULL) { /* bad checksum? */
+ printk(KERN_WARNING "hp100_probe: bad ID checksum at base port 0x%x\n", ioaddr);
+ goto out2;
+ }
+
+ hp100_page(ID_MAC_ADDR);
+ for (i = uc = 0; i < 7; i++)
+ uc += hp100_inb(LAN_ADDR + i);
+ if (uc != 0xff) {
+ printk(KERN_WARNING "hp100_probe: bad lan address checksum at port 0x%x)\n", ioaddr);
+ err = -EIO;
+ goto out2;
+ }
+
+ /* Make sure, that all registers are correctly updated... */
+
+ hp100_load_eeprom(dev, ioaddr);
+ wait();
+
+ /*
+ * Determine driver operation mode
+ *
+ * Use the variable "hp100_mode" upon insmod or as kernel parameter to
+ * force driver modes:
+ * hp100_mode=1 -> default, use busmaster mode if configured.
+ * hp100_mode=2 -> enable shared memory mode
+ * hp100_mode=3 -> force use of i/o mapped mode.
+ * hp100_mode=4 -> same as 1, but re-set the enable bit on the card.
+ */
+
+ /*
+ * LSW values:
+ * 0x2278 -> J2585B, PnP shared memory mode
+ * 0x2270 -> J2585B, shared memory mode, 0xdc000
+ * 0xa23c -> J2585B, I/O mapped mode
+ * 0x2240 -> EISA COMPEX, BusMaster (Shasta Chip)
+ * 0x2220 -> EISA HP, I/O (Shasta Chip)
+ * 0x2260 -> EISA HP, BusMaster (Shasta Chip)
+ */
+
+#if 0
+ local_mode = 0x2270;
+ hp100_outw(0xfefe, OPTION_LSW);
+ hp100_outw(local_mode | HP100_SET_LB | HP100_SET_HB, OPTION_LSW);
+#endif
+
+ /* hp100_mode value maybe used in future by another card */
+ local_mode = hp100_mode;
+ if (local_mode < 1 || local_mode > 4)
+ local_mode = 1; /* default */
+#ifdef HP100_DEBUG
+ printk("hp100: %s: original LSW = 0x%x\n", dev->name,
+ hp100_inw(OPTION_LSW));
+#endif
+
+ if (local_mode == 3) {
+ hp100_outw(HP100_MEM_EN | HP100_RESET_LB, OPTION_LSW);
+ hp100_outw(HP100_IO_EN | HP100_SET_LB, OPTION_LSW);
+ hp100_outw(HP100_BM_WRITE | HP100_BM_READ | HP100_RESET_HB, OPTION_LSW);
+ printk("hp100: IO mapped mode forced.\n");
+ } else if (local_mode == 2) {
+ hp100_outw(HP100_MEM_EN | HP100_SET_LB, OPTION_LSW);
+ hp100_outw(HP100_IO_EN | HP100_SET_LB, OPTION_LSW);
+ hp100_outw(HP100_BM_WRITE | HP100_BM_READ | HP100_RESET_HB, OPTION_LSW);
+ printk("hp100: Shared memory mode requested.\n");
+ } else if (local_mode == 4) {
+ if (chip == HP100_CHIPID_LASSEN) {
+ hp100_outw(HP100_BM_WRITE | HP100_BM_READ | HP100_SET_HB, OPTION_LSW);
+ hp100_outw(HP100_IO_EN | HP100_MEM_EN | HP100_RESET_LB, OPTION_LSW);
+ printk("hp100: Busmaster mode requested.\n");
+ }
+ local_mode = 1;
+ }
+
+ if (local_mode == 1) { /* default behaviour */
+ lsw = hp100_inw(OPTION_LSW);
+
+ if ((lsw & HP100_IO_EN) && (~lsw & HP100_MEM_EN) &&
+ (~lsw & (HP100_BM_WRITE | HP100_BM_READ))) {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: IO_EN bit is set on card.\n", dev->name);
+#endif
+ local_mode = 3;
+ } else if (chip == HP100_CHIPID_LASSEN &&
+ (lsw & (HP100_BM_WRITE | HP100_BM_READ)) == (HP100_BM_WRITE | HP100_BM_READ)) {
+ /* Conversion to new PCI API :
+ * I don't have the doc, but I assume that the card
+ * can map the full 32bit address space.
+ * Also, we can have EISA Busmaster cards (not tested),
+ * so beware !!! - Jean II */
+ if((bus == HP100_BUS_PCI) &&
+ (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)))) {
+ /* Gracefully fallback to shared memory */
+ goto busmasterfail;
+ }
+ printk("hp100: Busmaster mode enabled.\n");
+ hp100_outw(HP100_MEM_EN | HP100_IO_EN | HP100_RESET_LB, OPTION_LSW);
+ } else {
+ busmasterfail:
+#ifdef HP100_DEBUG
+ printk("hp100: %s: Card not configured for BM or BM not supported with this card.\n", dev->name);
+ printk("hp100: %s: Trying shared memory mode.\n", dev->name);
+#endif
+ /* In this case, try shared memory mode */
+ local_mode = 2;
+ hp100_outw(HP100_MEM_EN | HP100_SET_LB, OPTION_LSW);
+ /* hp100_outw(HP100_IO_EN|HP100_RESET_LB, OPTION_LSW); */
+ }
+ }
+#ifdef HP100_DEBUG
+ printk("hp100: %s: new LSW = 0x%x\n", dev->name, hp100_inw(OPTION_LSW));
+#endif
+
+ /* Check for shared memory on the card, eventually remap it */
+ hp100_page(HW_MAP);
+ mem_mapped = ((hp100_inw(OPTION_LSW) & (HP100_MEM_EN)) != 0);
+ mem_ptr_phys = 0UL;
+ mem_ptr_virt = NULL;
+ memory_size = (8192 << ((hp100_inb(SRAM) >> 5) & 0x07));
+ virt_memory_size = 0;
+
+ /* For memory mapped or busmaster mode, we want the memory address */
+ if (mem_mapped || (local_mode == 1)) {
+ mem_ptr_phys = (hp100_inw(MEM_MAP_LSW) | (hp100_inw(MEM_MAP_MSW) << 16));
+ mem_ptr_phys &= ~0x1fff; /* 8k alignment */
+
+ if (bus == HP100_BUS_ISA && (mem_ptr_phys & ~0xfffff) != 0) {
+ printk("hp100: Can only use programmed i/o mode.\n");
+ mem_ptr_phys = 0;
+ mem_mapped = 0;
+ local_mode = 3; /* Use programmed i/o */
+ }
+
+ /* We do not need access to shared memory in busmaster mode */
+ /* However in slave mode we need to remap high (>1GB) card memory */
+ if (local_mode != 1) { /* = not busmaster */
+ /* We try with smaller memory sizes, if ioremap fails */
+ for (virt_memory_size = memory_size; virt_memory_size > 16383; virt_memory_size >>= 1) {
+ if ((mem_ptr_virt = ioremap((u_long) mem_ptr_phys, virt_memory_size)) == NULL) {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: ioremap for 0x%x bytes high PCI memory at 0x%lx failed\n", dev->name, virt_memory_size, mem_ptr_phys);
+#endif
+ } else {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: remapped 0x%x bytes high PCI memory at 0x%lx to %p.\n", dev->name, virt_memory_size, mem_ptr_phys, mem_ptr_virt);
+#endif
+ break;
+ }
+ }
+
+ if (mem_ptr_virt == NULL) { /* all ioremap tries failed */
+ printk("hp100: Failed to ioremap the PCI card memory. Will have to use i/o mapped mode.\n");
+ local_mode = 3;
+ virt_memory_size = 0;
+ }
+ }
+ }
+
+ if (local_mode == 3) { /* io mapped forced */
+ mem_mapped = 0;
+ mem_ptr_phys = 0;
+ mem_ptr_virt = NULL;
+ printk("hp100: Using (slow) programmed i/o mode.\n");
+ }
+
+ /* Initialise the "private" data structure for this card. */
+ lp = netdev_priv(dev);
+
+ spin_lock_init(&lp->lock);
+ strlcpy(lp->id, eid, HP100_SIG_LEN);
+ lp->chip = chip;
+ lp->mode = local_mode;
+ lp->bus = bus;
+ lp->pci_dev = pci_dev;
+ lp->priority_tx = hp100_priority_tx;
+ lp->rx_ratio = hp100_rx_ratio;
+ lp->mem_ptr_phys = mem_ptr_phys;
+ lp->mem_ptr_virt = mem_ptr_virt;
+ hp100_page(ID_MAC_ADDR);
+ lp->soft_model = hp100_inb(SOFT_MODEL);
+ lp->mac1_mode = HP100_MAC1MODE3;
+ lp->mac2_mode = HP100_MAC2MODE3;
+ memset(&lp->hash_bytes, 0x00, 8);
+
+ dev->base_addr = ioaddr;
+
+ lp->memory_size = memory_size;
+ lp->virt_memory_size = virt_memory_size;
+ lp->rx_ratio = hp100_rx_ratio; /* can be conf'd with insmod */
+
+ if (lp->mode == 1) /* busmaster */
+ dev->netdev_ops = &hp100_bm_netdev_ops;
+ else
+ dev->netdev_ops = &hp100_netdev_ops;
+
+ /* Ask the card for which IRQ line it is configured */
+ if (bus == HP100_BUS_PCI) {
+ dev->irq = pci_dev->irq;
+ } else {
+ hp100_page(HW_MAP);
+ dev->irq = hp100_inb(IRQ_CHANNEL) & HP100_IRQMASK;
+ if (dev->irq == 2)
+ dev->irq = 9;
+ }
+
+ if (lp->mode == 1) /* busmaster */
+ dev->dma = 4;
+
+ /* Ask the card for its MAC address and store it for later use. */
+ hp100_page(ID_MAC_ADDR);
+ for (i = uc = 0; i < 6; i++)
+ dev->dev_addr[i] = hp100_inb(LAN_ADDR + i);
+
+ /* Reset statistics (counters) */
+ hp100_clear_stats(lp, ioaddr);
+
+ /* If busmaster mode is wanted, a dma-capable memory area is needed for
+ * the rx and tx PDLs
+ * PCI cards can access the whole PC memory. Therefore GFP_DMA is not
+ * needed for the allocation of the memory area.
+ */
+
+ /* TODO: We do not need this with old cards, where PDLs are stored
+ * in the cards shared memory area. But currently, busmaster has been
+ * implemented/tested only with the lassen chip anyway... */
+ if (lp->mode == 1) { /* busmaster */
+ dma_addr_t page_baddr;
+ /* Get physically continuous memory for TX & RX PDLs */
+ /* Conversion to new PCI API :
+ * Pages are always aligned and zeroed, no need to it ourself.
+ * Doc says should be OK for EISA bus as well - Jean II */
+ lp->page_vaddr_algn = pci_alloc_consistent(lp->pci_dev, MAX_RINGSIZE, &page_baddr);
+ if (!lp->page_vaddr_algn) {
+ err = -ENOMEM;
+ goto out_mem_ptr;
+ }
+ lp->whatever_offset = ((u_long) page_baddr) - ((u_long) lp->page_vaddr_algn);
+
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: Reserved DMA memory from 0x%x to 0x%x\n", dev->name, (u_int) lp->page_vaddr_algn, (u_int) lp->page_vaddr_algn + MAX_RINGSIZE);
+#endif
+ lp->rxrcommit = lp->txrcommit = 0;
+ lp->rxrhead = lp->rxrtail = &(lp->rxring[0]);
+ lp->txrhead = lp->txrtail = &(lp->txring[0]);
+ }
+
+ /* Initialise the card. */
+ /* (I'm not really sure if it's a good idea to do this during probing, but
+ * like this it's assured that the lan connection type can be sensed
+ * correctly)
+ */
+ hp100_hwinit(dev);
+
+ /* Try to find out which kind of LAN the card is connected to. */
+ lp->lan_type = hp100_sense_lan(dev);
+
+ /* Print out a message what about what we think we have probed. */
+ printk("hp100: at 0x%x, IRQ %d, ", ioaddr, dev->irq);
+ switch (bus) {
+ case HP100_BUS_EISA:
+ printk("EISA");
+ break;
+ case HP100_BUS_PCI:
+ printk("PCI");
+ break;
+ default:
+ printk("ISA");
+ break;
+ }
+ printk(" bus, %dk SRAM (rx/tx %d%%).\n", lp->memory_size >> 10, lp->rx_ratio);
+
+ if (lp->mode == 2) { /* memory mapped */
+ printk("hp100: Memory area at 0x%lx-0x%lx", mem_ptr_phys,
+ (mem_ptr_phys + (mem_ptr_phys > 0x100000 ? (u_long) lp->memory_size : 16 * 1024)) - 1);
+ if (mem_ptr_virt)
+ printk(" (virtual base %p)", mem_ptr_virt);
+ printk(".\n");
+
+ /* Set for info when doing ifconfig */
+ dev->mem_start = mem_ptr_phys;
+ dev->mem_end = mem_ptr_phys + lp->memory_size;
+ }
+
+ printk("hp100: ");
+ if (lp->lan_type != HP100_LAN_ERR)
+ printk("Adapter is attached to ");
+ switch (lp->lan_type) {
+ case HP100_LAN_100:
+ printk("100Mb/s Voice Grade AnyLAN network.\n");
+ break;
+ case HP100_LAN_10:
+ printk("10Mb/s network (10baseT).\n");
+ break;
+ case HP100_LAN_COAX:
+ printk("10Mb/s network (coax).\n");
+ break;
+ default:
+ printk("Warning! Link down.\n");
+ }
+
+ err = register_netdev(dev);
+ if (err)
+ goto out3;
+
+ return 0;
+out3:
+ if (local_mode == 1)
+ pci_free_consistent(lp->pci_dev, MAX_RINGSIZE + 0x0f,
+ lp->page_vaddr_algn,
+ virt_to_whatever(dev, lp->page_vaddr_algn));
+out_mem_ptr:
+ if (mem_ptr_virt)
+ iounmap(mem_ptr_virt);
+out2:
+ release_region(ioaddr, HP100_REGION_SIZE);
+out1:
+ return err;
+}
+
+/* This procedure puts the card into a stable init state */
+static void hp100_hwinit(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4202, TRACE);
+ printk("hp100: %s: hwinit\n", dev->name);
+#endif
+
+ /* Initialise the card. -------------------------------------------- */
+
+ /* Clear all pending Ints and disable Ints */
+ hp100_page(PERFORMANCE);
+ hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */
+ hp100_outw(0xffff, IRQ_STATUS); /* clear all pending ints */
+
+ hp100_outw(HP100_INT_EN | HP100_RESET_LB, OPTION_LSW);
+ hp100_outw(HP100_TRI_INT | HP100_SET_HB, OPTION_LSW);
+
+ if (lp->mode == 1) {
+ hp100_BM_shutdown(dev); /* disables BM, puts cascade in reset */
+ wait();
+ } else {
+ hp100_outw(HP100_INT_EN | HP100_RESET_LB, OPTION_LSW);
+ hp100_cascade_reset(dev, 1);
+ hp100_page(MAC_CTRL);
+ hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1);
+ }
+
+ /* Initiate EEPROM reload */
+ hp100_load_eeprom(dev, 0);
+
+ wait();
+
+ /* Go into reset again. */
+ hp100_cascade_reset(dev, 1);
+
+ /* Set Option Registers to a safe state */
+ hp100_outw(HP100_DEBUG_EN |
+ HP100_RX_HDR |
+ HP100_EE_EN |
+ HP100_BM_WRITE |
+ HP100_BM_READ | HP100_RESET_HB |
+ HP100_FAKE_INT |
+ HP100_INT_EN |
+ HP100_MEM_EN |
+ HP100_IO_EN | HP100_RESET_LB, OPTION_LSW);
+
+ hp100_outw(HP100_TRI_INT |
+ HP100_MMAP_DIS | HP100_SET_HB, OPTION_LSW);
+
+ hp100_outb(HP100_PRIORITY_TX |
+ HP100_ADV_NXT_PKT |
+ HP100_TX_CMD | HP100_RESET_LB, OPTION_MSW);
+
+ /* TODO: Configure MMU for Ram Test. */
+ /* TODO: Ram Test. */
+
+ /* Re-check if adapter is still at same i/o location */
+ /* (If the base i/o in eeprom has been changed but the */
+ /* registers had not been changed, a reload of the eeprom */
+ /* would move the adapter to the address stored in eeprom */
+
+ /* TODO: Code to implement. */
+
+ /* Until here it was code from HWdiscover procedure. */
+ /* Next comes code from mmuinit procedure of SCO BM driver which is
+ * called from HWconfigure in the SCO driver. */
+
+ /* Initialise MMU, eventually switch on Busmaster Mode, initialise
+ * multicast filter...
+ */
+ hp100_mmuinit(dev);
+
+ /* We don't turn the interrupts on here - this is done by start_interface. */
+ wait(); /* TODO: Do we really need this? */
+
+ /* Enable Hardware (e.g. unreset) */
+ hp100_cascade_reset(dev, 0);
+
+ /* ------- initialisation complete ----------- */
+
+ /* Finally try to log in the Hub if there may be a VG connection. */
+ if ((lp->lan_type == HP100_LAN_100) || (lp->lan_type == HP100_LAN_ERR))
+ hp100_login_to_vg_hub(dev, 0); /* relogin */
+
+}
+
+
+/*
+ * mmuinit - Reinitialise Cascade MMU and MAC settings.
+ * Note: Must already be in reset and leaves card in reset.
+ */
+static void hp100_mmuinit(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+ int i;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4203, TRACE);
+ printk("hp100: %s: mmuinit\n", dev->name);
+#endif
+
+#ifdef HP100_DEBUG
+ if (0 != (hp100_inw(OPTION_LSW) & HP100_HW_RST)) {
+ printk("hp100: %s: Not in reset when entering mmuinit. Fix me.\n", dev->name);
+ return;
+ }
+#endif
+
+ /* Make sure IRQs are masked off and ack'ed. */
+ hp100_page(PERFORMANCE);
+ hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */
+ hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */
+
+ /*
+ * Enable Hardware
+ * - Clear Debug En, Rx Hdr Pipe, EE En, I/O En, Fake Int and Intr En
+ * - Set Tri-State Int, Bus Master Rd/Wr, and Mem Map Disable
+ * - Clear Priority, Advance Pkt and Xmit Cmd
+ */
+
+ hp100_outw(HP100_DEBUG_EN |
+ HP100_RX_HDR |
+ HP100_EE_EN | HP100_RESET_HB |
+ HP100_IO_EN |
+ HP100_FAKE_INT |
+ HP100_INT_EN | HP100_RESET_LB, OPTION_LSW);
+
+ hp100_outw(HP100_TRI_INT | HP100_SET_HB, OPTION_LSW);
+
+ if (lp->mode == 1) { /* busmaster */
+ hp100_outw(HP100_BM_WRITE |
+ HP100_BM_READ |
+ HP100_MMAP_DIS | HP100_SET_HB, OPTION_LSW);
+ } else if (lp->mode == 2) { /* memory mapped */
+ hp100_outw(HP100_BM_WRITE |
+ HP100_BM_READ | HP100_RESET_HB, OPTION_LSW);
+ hp100_outw(HP100_MMAP_DIS | HP100_RESET_HB, OPTION_LSW);
+ hp100_outw(HP100_MEM_EN | HP100_SET_LB, OPTION_LSW);
+ hp100_outw(HP100_IO_EN | HP100_SET_LB, OPTION_LSW);
+ } else if (lp->mode == 3) { /* i/o mapped mode */
+ hp100_outw(HP100_MMAP_DIS | HP100_SET_HB |
+ HP100_IO_EN | HP100_SET_LB, OPTION_LSW);
+ }
+
+ hp100_page(HW_MAP);
+ hp100_outb(0, EARLYRXCFG);
+ hp100_outw(0, EARLYTXCFG);
+
+ /*
+ * Enable Bus Master mode
+ */
+ if (lp->mode == 1) { /* busmaster */
+ /* Experimental: Set some PCI configuration bits */
+ hp100_page(HW_MAP);
+ hp100_andb(~HP100_PDL_USE3, MODECTRL1); /* BM engine read maximum */
+ hp100_andb(~HP100_TX_DUALQ, MODECTRL1); /* No Queue for Priority TX */
+
+ /* PCI Bus failures should result in a Misc. Interrupt */
+ hp100_orb(HP100_EN_BUS_FAIL, MODECTRL2);
+
+ hp100_outw(HP100_BM_READ | HP100_BM_WRITE | HP100_SET_HB, OPTION_LSW);
+ hp100_page(HW_MAP);
+ /* Use Burst Mode and switch on PAGE_CK */
+ hp100_orb(HP100_BM_BURST_RD | HP100_BM_BURST_WR, BM);
+ if ((lp->chip == HP100_CHIPID_RAINIER) || (lp->chip == HP100_CHIPID_SHASTA))
+ hp100_orb(HP100_BM_PAGE_CK, BM);
+ hp100_orb(HP100_BM_MASTER, BM);
+ } else { /* not busmaster */
+
+ hp100_page(HW_MAP);
+ hp100_andb(~HP100_BM_MASTER, BM);
+ }
+
+ /*
+ * Divide card memory into regions for Rx, Tx and, if non-ETR chip, PDLs
+ */
+ hp100_page(MMU_CFG);
+ if (lp->mode == 1) { /* only needed for Busmaster */
+ int xmit_stop, recv_stop;
+
+ if ((lp->chip == HP100_CHIPID_RAINIER) ||
+ (lp->chip == HP100_CHIPID_SHASTA)) {
+ int pdl_stop;
+
+ /*
+ * Each pdl is 508 bytes long. (63 frags * 4 bytes for address and
+ * 4 bytes for header). We will leave NUM_RXPDLS * 508 (rounded
+ * to the next higher 1k boundary) bytes for the rx-pdl's
+ * Note: For non-etr chips the transmit stop register must be
+ * programmed on a 1k boundary, i.e. bits 9:0 must be zero.
+ */
+ pdl_stop = lp->memory_size;
+ xmit_stop = (pdl_stop - 508 * (MAX_RX_PDL) - 16) & ~(0x03ff);
+ recv_stop = (xmit_stop * (lp->rx_ratio) / 100) & ~(0x03ff);
+ hp100_outw((pdl_stop >> 4) - 1, PDL_MEM_STOP);
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: PDL_STOP = 0x%x\n", dev->name, pdl_stop);
+#endif
+ } else {
+ /* ETR chip (Lassen) in busmaster mode */
+ xmit_stop = (lp->memory_size) - 1;
+ recv_stop = ((lp->memory_size * lp->rx_ratio) / 100) & ~(0x03ff);
+ }
+
+ hp100_outw(xmit_stop >> 4, TX_MEM_STOP);
+ hp100_outw(recv_stop >> 4, RX_MEM_STOP);
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: TX_STOP = 0x%x\n", dev->name, xmit_stop >> 4);
+ printk("hp100: %s: RX_STOP = 0x%x\n", dev->name, recv_stop >> 4);
+#endif
+ } else {
+ /* Slave modes (memory mapped and programmed io) */
+ hp100_outw((((lp->memory_size * lp->rx_ratio) / 100) >> 4), RX_MEM_STOP);
+ hp100_outw(((lp->memory_size - 1) >> 4), TX_MEM_STOP);
+#ifdef HP100_DEBUG
+ printk("hp100: %s: TX_MEM_STOP: 0x%x\n", dev->name, hp100_inw(TX_MEM_STOP));
+ printk("hp100: %s: RX_MEM_STOP: 0x%x\n", dev->name, hp100_inw(RX_MEM_STOP));
+#endif
+ }
+
+ /* Write MAC address into page 1 */
+ hp100_page(MAC_ADDRESS);
+ for (i = 0; i < 6; i++)
+ hp100_outb(dev->dev_addr[i], MAC_ADDR + i);
+
+ /* Zero the multicast hash registers */
+ for (i = 0; i < 8; i++)
+ hp100_outb(0x0, HASH_BYTE0 + i);
+
+ /* Set up MAC defaults */
+ hp100_page(MAC_CTRL);
+
+ /* Go to LAN Page and zero all filter bits */
+ /* Zero accept error, accept multicast, accept broadcast and accept */
+ /* all directed packet bits */
+ hp100_andb(~(HP100_RX_EN |
+ HP100_TX_EN |
+ HP100_ACC_ERRORED |
+ HP100_ACC_MC |
+ HP100_ACC_BC | HP100_ACC_PHY), MAC_CFG_1);
+
+ hp100_outb(0x00, MAC_CFG_2);
+
+ /* Zero the frame format bit. This works around a training bug in the */
+ /* new hubs. */
+ hp100_outb(0x00, VG_LAN_CFG_2); /* (use 802.3) */
+
+ if (lp->priority_tx)
+ hp100_outb(HP100_PRIORITY_TX | HP100_SET_LB, OPTION_MSW);
+ else
+ hp100_outb(HP100_PRIORITY_TX | HP100_RESET_LB, OPTION_MSW);
+
+ hp100_outb(HP100_ADV_NXT_PKT |
+ HP100_TX_CMD | HP100_RESET_LB, OPTION_MSW);
+
+ /* If busmaster, initialize the PDLs */
+ if (lp->mode == 1)
+ hp100_init_pdls(dev);
+
+ /* Go to performance page and initialize isr and imr registers */
+ hp100_page(PERFORMANCE);
+ hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */
+ hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */
+}
+
+/*
+ * open/close functions
+ */
+
+static int hp100_open(struct net_device *dev)
+{
+ struct hp100_private *lp = netdev_priv(dev);
+#ifdef HP100_DEBUG_B
+ int ioaddr = dev->base_addr;
+#endif
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4204, TRACE);
+ printk("hp100: %s: open\n", dev->name);
+#endif
+
+ /* New: if bus is PCI or EISA, interrupts might be shared interrupts */
+ if (request_irq(dev->irq, hp100_interrupt,
+ lp->bus == HP100_BUS_PCI || lp->bus ==
+ HP100_BUS_EISA ? IRQF_SHARED : IRQF_DISABLED,
+ "hp100", dev)) {
+ printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_start_queue(dev);
+
+ lp->lan_type = hp100_sense_lan(dev);
+ lp->mac1_mode = HP100_MAC1MODE3;
+ lp->mac2_mode = HP100_MAC2MODE3;
+ memset(&lp->hash_bytes, 0x00, 8);
+
+ hp100_stop_interface(dev);
+
+ hp100_hwinit(dev);
+
+ hp100_start_interface(dev); /* sets mac modes, enables interrupts */
+
+ return 0;
+}
+
+/* The close function is called when the interface is to be brought down */
+static int hp100_close(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4205, TRACE);
+ printk("hp100: %s: close\n", dev->name);
+#endif
+
+ hp100_page(PERFORMANCE);
+ hp100_outw(0xfefe, IRQ_MASK); /* mask off all IRQs */
+
+ hp100_stop_interface(dev);
+
+ if (lp->lan_type == HP100_LAN_100)
+ lp->hub_status = hp100_login_to_vg_hub(dev, 0);
+
+ netif_stop_queue(dev);
+
+ free_irq(dev->irq, dev);
+
+#ifdef HP100_DEBUG
+ printk("hp100: %s: close LSW = 0x%x\n", dev->name,
+ hp100_inw(OPTION_LSW));
+#endif
+
+ return 0;
+}
+
+
+/*
+ * Configure the PDL Rx rings and LAN
+ */
+static void hp100_init_pdls(struct net_device *dev)
+{
+ struct hp100_private *lp = netdev_priv(dev);
+ hp100_ring_t *ringptr;
+ u_int *pageptr; /* Warning : increment by 4 - Jean II */
+ int i;
+
+#ifdef HP100_DEBUG_B
+ int ioaddr = dev->base_addr;
+#endif
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4206, TRACE);
+ printk("hp100: %s: init pdls\n", dev->name);
+#endif
+
+ if (!lp->page_vaddr_algn)
+ printk("hp100: %s: Warning: lp->page_vaddr_algn not initialised!\n", dev->name);
+ else {
+ /* pageptr shall point into the DMA accessible memory region */
+ /* we use this pointer to status the upper limit of allocated */
+ /* memory in the allocated page. */
+ /* note: align the pointers to the pci cache line size */
+ memset(lp->page_vaddr_algn, 0, MAX_RINGSIZE); /* Zero Rx/Tx ring page */
+ pageptr = lp->page_vaddr_algn;
+
+ lp->rxrcommit = 0;
+ ringptr = lp->rxrhead = lp->rxrtail = &(lp->rxring[0]);
+
+ /* Initialise Rx Ring */
+ for (i = MAX_RX_PDL - 1; i >= 0; i--) {
+ lp->rxring[i].next = ringptr;
+ ringptr = &(lp->rxring[i]);
+ pageptr += hp100_init_rxpdl(dev, ringptr, pageptr);
+ }
+
+ /* Initialise Tx Ring */
+ lp->txrcommit = 0;
+ ringptr = lp->txrhead = lp->txrtail = &(lp->txring[0]);
+ for (i = MAX_TX_PDL - 1; i >= 0; i--) {
+ lp->txring[i].next = ringptr;
+ ringptr = &(lp->txring[i]);
+ pageptr += hp100_init_txpdl(dev, ringptr, pageptr);
+ }
+ }
+}
+
+
+/* These functions "format" the entries in the pdl structure */
+/* They return how much memory the fragments need. */
+static int hp100_init_rxpdl(struct net_device *dev,
+ register hp100_ring_t * ringptr,
+ register u32 * pdlptr)
+{
+ /* pdlptr is starting address for this pdl */
+
+ if (0 != (((unsigned long) pdlptr) & 0xf))
+ printk("hp100: %s: Init rxpdl: Unaligned pdlptr 0x%lx.\n",
+ dev->name, (unsigned long) pdlptr);
+
+ ringptr->pdl = pdlptr + 1;
+ ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr + 1);
+ ringptr->skb = (void *) NULL;
+
+ /*
+ * Write address and length of first PDL Fragment (which is used for
+ * storing the RX-Header
+ * We use the 4 bytes _before_ the PDH in the pdl memory area to
+ * store this information. (PDH is at offset 0x04)
+ */
+ /* Note that pdlptr+1 and not pdlptr is the pointer to the PDH */
+
+ *(pdlptr + 2) = (u_int) virt_to_whatever(dev, pdlptr); /* Address Frag 1 */
+ *(pdlptr + 3) = 4; /* Length Frag 1 */
+
+ return roundup(MAX_RX_FRAG * 2 + 2, 4);
+}
+
+
+static int hp100_init_txpdl(struct net_device *dev,
+ register hp100_ring_t * ringptr,
+ register u32 * pdlptr)
+{
+ if (0 != (((unsigned long) pdlptr) & 0xf))
+ printk("hp100: %s: Init txpdl: Unaligned pdlptr 0x%lx.\n", dev->name, (unsigned long) pdlptr);
+
+ ringptr->pdl = pdlptr; /* +1; */
+ ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr); /* +1 */
+ ringptr->skb = (void *) NULL;
+
+ return roundup(MAX_TX_FRAG * 2 + 2, 4);
+}
+
+/*
+ * hp100_build_rx_pdl allocates an skb_buff of maximum size plus two bytes
+ * for possible odd word alignment rounding up to next dword and set PDL
+ * address for fragment#2
+ * Returns: 0 if unable to allocate skb_buff
+ * 1 if successful
+ */
+static int hp100_build_rx_pdl(hp100_ring_t * ringptr,
+ struct net_device *dev)
+{
+#ifdef HP100_DEBUG_B
+ int ioaddr = dev->base_addr;
+#endif
+#ifdef HP100_DEBUG_BM
+ u_int *p;
+#endif
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4207, TRACE);
+ printk("hp100: %s: build rx pdl\n", dev->name);
+#endif
+
+ /* Allocate skb buffer of maximum size */
+ /* Note: This depends on the alloc_skb functions allocating more
+ * space than requested, i.e. aligning to 16bytes */
+
+ ringptr->skb = dev_alloc_skb(roundup(MAX_ETHER_SIZE + 2, 4));
+
+ if (NULL != ringptr->skb) {
+ /*
+ * Reserve 2 bytes at the head of the buffer to land the IP header
+ * on a long word boundary (According to the Network Driver section
+ * in the Linux KHG, this should help to increase performance.)
+ */
+ skb_reserve(ringptr->skb, 2);
+
+ ringptr->skb->dev = dev;
+ ringptr->skb->data = (u_char *) skb_put(ringptr->skb, MAX_ETHER_SIZE);
+
+ /* ringptr->pdl points to the beginning of the PDL, i.e. the PDH */
+ /* Note: 1st Fragment is used for the 4 byte packet status
+ * (receive header). Its PDL entries are set up by init_rxpdl. So
+ * here we only have to set up the PDL fragment entries for the data
+ * part. Those 4 bytes will be stored in the DMA memory region
+ * directly before the PDL.
+ */
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: build_rx_pdl: PDH@0x%x, skb->data (len %d) at 0x%x\n",
+ dev->name, (u_int) ringptr->pdl,
+ roundup(MAX_ETHER_SIZE + 2, 4),
+ (unsigned int) ringptr->skb->data);
+#endif
+
+ /* Conversion to new PCI API : map skbuf data to PCI bus.
+ * Doc says it's OK for EISA as well - Jean II */
+ ringptr->pdl[0] = 0x00020000; /* Write PDH */
+ ringptr->pdl[3] = pdl_map_data(netdev_priv(dev),
+ ringptr->skb->data);
+ ringptr->pdl[4] = MAX_ETHER_SIZE; /* Length of Data */
+
+#ifdef HP100_DEBUG_BM
+ for (p = (ringptr->pdl); p < (ringptr->pdl + 5); p++)
+ printk("hp100: %s: Adr 0x%.8x = 0x%.8x\n", dev->name, (u_int) p, (u_int) * p);
+#endif
+ return 1;
+ }
+ /* else: */
+ /* alloc_skb failed (no memory) -> still can receive the header
+ * fragment into PDL memory. make PDL safe by clearing msgptr and
+ * making the PDL only 1 fragment (i.e. the 4 byte packet status)
+ */
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: build_rx_pdl: PDH@0x%x, No space for skb.\n", dev->name, (u_int) ringptr->pdl);
+#endif
+
+ ringptr->pdl[0] = 0x00010000; /* PDH: Count=1 Fragment */
+
+ return 0;
+}
+
+/*
+ * hp100_rxfill - attempt to fill the Rx Ring will empty skb's
+ *
+ * Makes assumption that skb's are always contiguous memory areas and
+ * therefore PDLs contain only 2 physical fragments.
+ * - While the number of Rx PDLs with buffers is less than maximum
+ * a. Get a maximum packet size skb
+ * b. Put the physical address of the buffer into the PDL.
+ * c. Output physical address of PDL to adapter.
+ */
+static void hp100_rxfill(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ struct hp100_private *lp = netdev_priv(dev);
+ hp100_ring_t *ringptr;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4208, TRACE);
+ printk("hp100: %s: rxfill\n", dev->name);
+#endif
+
+ hp100_page(PERFORMANCE);
+
+ while (lp->rxrcommit < MAX_RX_PDL) {
+ /*
+ ** Attempt to get a buffer and build a Rx PDL.
+ */
+ ringptr = lp->rxrtail;
+ if (0 == hp100_build_rx_pdl(ringptr, dev)) {
+ return; /* None available, return */
+ }
+
+ /* Hand this PDL over to the card */
+ /* Note: This needs performance page selected! */
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: rxfill: Hand to card: pdl #%d @0x%x phys:0x%x, buffer: 0x%x\n",
+ dev->name, lp->rxrcommit, (u_int) ringptr->pdl,
+ (u_int) ringptr->pdl_paddr, (u_int) ringptr->pdl[3]);
+#endif
+
+ hp100_outl((u32) ringptr->pdl_paddr, RX_PDA);
+
+ lp->rxrcommit += 1;
+ lp->rxrtail = ringptr->next;
+ }
+}
+
+/*
+ * BM_shutdown - shutdown bus mastering and leave chip in reset state
+ */
+
+static void hp100_BM_shutdown(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+ unsigned long time;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4209, TRACE);
+ printk("hp100: %s: bm shutdown\n", dev->name);
+#endif
+
+ hp100_page(PERFORMANCE);
+ hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */
+ hp100_outw(0xffff, IRQ_STATUS); /* Ack all ints */
+
+ /* Ensure Interrupts are off */
+ hp100_outw(HP100_INT_EN | HP100_RESET_LB, OPTION_LSW);
+
+ /* Disable all MAC activity */
+ hp100_page(MAC_CTRL);
+ hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1); /* stop rx/tx */
+
+ /* If cascade MMU is not already in reset */
+ if (0 != (hp100_inw(OPTION_LSW) & HP100_HW_RST)) {
+ /* Wait 1.3ms (10Mb max packet time) to ensure MAC is idle so
+ * MMU pointers will not be reset out from underneath
+ */
+ hp100_page(MAC_CTRL);
+ for (time = 0; time < 5000; time++) {
+ if ((hp100_inb(MAC_CFG_1) & (HP100_TX_IDLE | HP100_RX_IDLE)) == (HP100_TX_IDLE | HP100_RX_IDLE))
+ break;
+ }
+
+ /* Shutdown algorithm depends on the generation of Cascade */
+ if (lp->chip == HP100_CHIPID_LASSEN) { /* ETR shutdown/reset */
+ /* Disable Busmaster mode and wait for bit to go to zero. */
+ hp100_page(HW_MAP);
+ hp100_andb(~HP100_BM_MASTER, BM);
+ /* 100 ms timeout */
+ for (time = 0; time < 32000; time++) {
+ if (0 == (hp100_inb(BM) & HP100_BM_MASTER))
+ break;
+ }
+ } else { /* Shasta or Rainier Shutdown/Reset */
+ /* To ensure all bus master inloading activity has ceased,
+ * wait for no Rx PDAs or no Rx packets on card.
+ */
+ hp100_page(PERFORMANCE);
+ /* 100 ms timeout */
+ for (time = 0; time < 10000; time++) {
+ /* RX_PDL: PDLs not executed. */
+ /* RX_PKT_CNT: RX'd packets on card. */
+ if ((hp100_inb(RX_PDL) == 0) && (hp100_inb(RX_PKT_CNT) == 0))
+ break;
+ }
+
+ if (time >= 10000)
+ printk("hp100: %s: BM shutdown error.\n", dev->name);
+
+ /* To ensure all bus master outloading activity has ceased,
+ * wait until the Tx PDA count goes to zero or no more Tx space
+ * available in the Tx region of the card.
+ */
+ /* 100 ms timeout */
+ for (time = 0; time < 10000; time++) {
+ if ((0 == hp100_inb(TX_PKT_CNT)) &&
+ (0 != (hp100_inb(TX_MEM_FREE) & HP100_AUTO_COMPARE)))
+ break;
+ }
+
+ /* Disable Busmaster mode */
+ hp100_page(HW_MAP);
+ hp100_andb(~HP100_BM_MASTER, BM);
+ } /* end of shutdown procedure for non-etr parts */
+
+ hp100_cascade_reset(dev, 1);
+ }
+ hp100_page(PERFORMANCE);
+ /* hp100_outw( HP100_BM_READ | HP100_BM_WRITE | HP100_RESET_HB, OPTION_LSW ); */
+ /* Busmaster mode should be shut down now. */
+}
+
+static int hp100_check_lan(struct net_device *dev)
+{
+ struct hp100_private *lp = netdev_priv(dev);
+
+ if (lp->lan_type < 0) { /* no LAN type detected yet? */
+ hp100_stop_interface(dev);
+ if ((lp->lan_type = hp100_sense_lan(dev)) < 0) {
+ printk("hp100: %s: no connection found - check wire\n", dev->name);
+ hp100_start_interface(dev); /* 10Mb/s RX packets maybe handled */
+ return -EIO;
+ }
+ if (lp->lan_type == HP100_LAN_100)
+ lp->hub_status = hp100_login_to_vg_hub(dev, 0); /* relogin */
+ hp100_start_interface(dev);
+ }
+ return 0;
+}
+
+/*
+ * transmit functions
+ */
+
+/* tx function for busmaster mode */
+static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ unsigned long flags;
+ int i, ok_flag;
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+ hp100_ring_t *ringptr;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4210, TRACE);
+ printk("hp100: %s: start_xmit_bm\n", dev->name);
+#endif
+ if (skb->len <= 0)
+ goto drop;
+
+ if (lp->chip == HP100_CHIPID_SHASTA && skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
+ /* Get Tx ring tail pointer */
+ if (lp->txrtail->next == lp->txrhead) {
+ /* No memory. */
+#ifdef HP100_DEBUG
+ printk("hp100: %s: start_xmit_bm: No TX PDL available.\n", dev->name);
+#endif
+ /* not waited long enough since last tx? */
+ if (time_before(jiffies, dev_trans_start(dev) + HZ))
+ goto drop;
+
+ if (hp100_check_lan(dev))
+ goto drop;
+
+ if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) {
+ /* we have a 100Mb/s adapter but it isn't connected to hub */
+ printk("hp100: %s: login to 100Mb/s hub retry\n", dev->name);
+ hp100_stop_interface(dev);
+ lp->hub_status = hp100_login_to_vg_hub(dev, 0);
+ hp100_start_interface(dev);
+ } else {
+ spin_lock_irqsave(&lp->lock, flags);
+ hp100_ints_off(); /* Useful ? Jean II */
+ i = hp100_sense_lan(dev);
+ hp100_ints_on();
+ spin_unlock_irqrestore(&lp->lock, flags);
+ if (i == HP100_LAN_ERR)
+ printk("hp100: %s: link down detected\n", dev->name);
+ else if (lp->lan_type != i) { /* cable change! */
+ /* it's very hard - all network settings must be changed!!! */
+ printk("hp100: %s: cable change 10Mb/s <-> 100Mb/s detected\n", dev->name);
+ lp->lan_type = i;
+ hp100_stop_interface(dev);
+ if (lp->lan_type == HP100_LAN_100)
+ lp->hub_status = hp100_login_to_vg_hub(dev, 0);
+ hp100_start_interface(dev);
+ } else {
+ printk("hp100: %s: interface reset\n", dev->name);
+ hp100_stop_interface(dev);
+ if (lp->lan_type == HP100_LAN_100)
+ lp->hub_status = hp100_login_to_vg_hub(dev, 0);
+ hp100_start_interface(dev);
+ }
+ }
+
+ goto drop;
+ }
+
+ /*
+ * we have to turn int's off before modifying this, otherwise
+ * a tx_pdl_cleanup could occur at the same time
+ */
+ spin_lock_irqsave(&lp->lock, flags);
+ ringptr = lp->txrtail;
+ lp->txrtail = ringptr->next;
+
+ /* Check whether packet has minimal packet size */
+ ok_flag = skb->len >= HP100_MIN_PACKET_SIZE;
+ i = ok_flag ? skb->len : HP100_MIN_PACKET_SIZE;
+
+ ringptr->skb = skb;
+ ringptr->pdl[0] = ((1 << 16) | i); /* PDH: 1 Fragment & length */
+ if (lp->chip == HP100_CHIPID_SHASTA) {
+ /* TODO:Could someone who has the EISA card please check if this works? */
+ ringptr->pdl[2] = i;
+ } else { /* Lassen */
+ /* In the PDL, don't use the padded size but the real packet size: */
+ ringptr->pdl[2] = skb->len; /* 1st Frag: Length of frag */
+ }
+ /* Conversion to new PCI API : map skbuf data to PCI bus.
+ * Doc says it's OK for EISA as well - Jean II */
+ ringptr->pdl[1] = ((u32) pci_map_single(lp->pci_dev, skb->data, ringptr->pdl[2], PCI_DMA_TODEVICE)); /* 1st Frag: Adr. of data */
+
+ /* Hand this PDL to the card. */
+ hp100_outl(ringptr->pdl_paddr, TX_PDA_L); /* Low Prio. Queue */
+
+ lp->txrcommit++;
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return NETDEV_TX_OK;
+
+drop:
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+
+/* clean_txring checks if packets have been sent by the card by reading
+ * the TX_PDL register from the performance page and comparing it to the
+ * number of committed packets. It then frees the skb's of the packets that
+ * obviously have been sent to the network.
+ *
+ * Needs the PERFORMANCE page selected.
+ */
+static void hp100_clean_txring(struct net_device *dev)
+{
+ struct hp100_private *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int donecount;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4211, TRACE);
+ printk("hp100: %s: clean txring\n", dev->name);
+#endif
+
+ /* How many PDLs have been transmitted? */
+ donecount = (lp->txrcommit) - hp100_inb(TX_PDL);
+
+#ifdef HP100_DEBUG
+ if (donecount > MAX_TX_PDL)
+ printk("hp100: %s: Warning: More PDLs transmitted than committed to card???\n", dev->name);
+#endif
+
+ for (; 0 != donecount; donecount--) {
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: Free skb: data @0x%.8x txrcommit=0x%x TXPDL=0x%x, done=0x%x\n",
+ dev->name, (u_int) lp->txrhead->skb->data,
+ lp->txrcommit, hp100_inb(TX_PDL), donecount);
+#endif
+ /* Conversion to new PCI API : NOP */
+ pci_unmap_single(lp->pci_dev, (dma_addr_t) lp->txrhead->pdl[1], lp->txrhead->pdl[2], PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(lp->txrhead->skb);
+ lp->txrhead->skb = (void *) NULL;
+ lp->txrhead = lp->txrhead->next;
+ lp->txrcommit--;
+ }
+}
+
+/* tx function for slave modes */
+static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ unsigned long flags;
+ int i, ok_flag;
+ int ioaddr = dev->base_addr;
+ u_short val;
+ struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4212, TRACE);
+ printk("hp100: %s: start_xmit\n", dev->name);
+#endif
+ if (skb->len <= 0)
+ goto drop;
+
+ if (hp100_check_lan(dev))
+ goto drop;
+
+ /* If there is not enough free memory on the card... */
+ i = hp100_inl(TX_MEM_FREE) & 0x7fffffff;
+ if (!(((i / 2) - 539) > (skb->len + 16) && (hp100_inb(TX_PKT_CNT) < 255))) {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: start_xmit: tx free mem = 0x%x\n", dev->name, i);
+#endif
+ /* not waited long enough since last failed tx try? */
+ if (time_before(jiffies, dev_trans_start(dev) + HZ)) {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: trans_start timing problem\n",
+ dev->name);
+#endif
+ goto drop;
+ }
+ if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) {
+ /* we have a 100Mb/s adapter but it isn't connected to hub */
+ printk("hp100: %s: login to 100Mb/s hub retry\n", dev->name);
+ hp100_stop_interface(dev);
+ lp->hub_status = hp100_login_to_vg_hub(dev, 0);
+ hp100_start_interface(dev);
+ } else {
+ spin_lock_irqsave(&lp->lock, flags);
+ hp100_ints_off(); /* Useful ? Jean II */
+ i = hp100_sense_lan(dev);
+ hp100_ints_on();
+ spin_unlock_irqrestore(&lp->lock, flags);
+ if (i == HP100_LAN_ERR)
+ printk("hp100: %s: link down detected\n", dev->name);
+ else if (lp->lan_type != i) { /* cable change! */
+ /* it's very hard - all network setting must be changed!!! */
+ printk("hp100: %s: cable change 10Mb/s <-> 100Mb/s detected\n", dev->name);
+ lp->lan_type = i;
+ hp100_stop_interface(dev);
+ if (lp->lan_type == HP100_LAN_100)
+ lp->hub_status = hp100_login_to_vg_hub(dev, 0);
+ hp100_start_interface(dev);
+ } else {
+ printk("hp100: %s: interface reset\n", dev->name);
+ hp100_stop_interface(dev);
+ if (lp->lan_type == HP100_LAN_100)
+ lp->hub_status = hp100_login_to_vg_hub(dev, 0);
+ hp100_start_interface(dev);
+ mdelay(1);
+ }
+ }
+ goto drop;
+ }
+
+ for (i = 0; i < 6000 && (hp100_inb(OPTION_MSW) & HP100_TX_CMD); i++) {
+#ifdef HP100_DEBUG_TX
+ printk("hp100: %s: start_xmit: busy\n", dev->name);
+#endif
+ }
+
+ spin_lock_irqsave(&lp->lock, flags);
+ hp100_ints_off();
+ val = hp100_inw(IRQ_STATUS);
+ /* Ack / clear the interrupt TX_COMPLETE interrupt - this interrupt is set
+ * when the current packet being transmitted on the wire is completed. */
+ hp100_outw(HP100_TX_COMPLETE, IRQ_STATUS);
+#ifdef HP100_DEBUG_TX
+ printk("hp100: %s: start_xmit: irq_status=0x%.4x, irqmask=0x%.4x, len=%d\n",
+ dev->name, val, hp100_inw(IRQ_MASK), (int) skb->len);
+#endif
+
+ ok_flag = skb->len >= HP100_MIN_PACKET_SIZE;
+ i = ok_flag ? skb->len : HP100_MIN_PACKET_SIZE;
+
+ hp100_outw(i, DATA32); /* tell card the total packet length */
+ hp100_outw(i, FRAGMENT_LEN); /* and first/only fragment length */
+
+ if (lp->mode == 2) { /* memory mapped */
+ /* Note: The J2585B needs alignment to 32bits here! */
+ memcpy_toio(lp->mem_ptr_virt, skb->data, (skb->len + 3) & ~3);
+ if (!ok_flag)
+ memset_io(lp->mem_ptr_virt, 0, HP100_MIN_PACKET_SIZE - skb->len);
+ } else { /* programmed i/o */
+ outsl(ioaddr + HP100_REG_DATA32, skb->data,
+ (skb->len + 3) >> 2);
+ if (!ok_flag)
+ for (i = (skb->len + 3) & ~3; i < HP100_MIN_PACKET_SIZE; i += 4)
+ hp100_outl(0, DATA32);
+ }
+
+ hp100_outb(HP100_TX_CMD | HP100_SET_LB, OPTION_MSW); /* send packet */
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ hp100_ints_on();
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ dev_kfree_skb_any(skb);
+
+#ifdef HP100_DEBUG_TX
+ printk("hp100: %s: start_xmit: end\n", dev->name);
+#endif
+
+ return NETDEV_TX_OK;
+
+drop:
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+
+}
+
+
+/*
+ * Receive Function (Non-Busmaster mode)
+ * Called when an "Receive Packet" interrupt occurs, i.e. the receive
+ * packet counter is non-zero.
+ * For non-busmaster, this function does the whole work of transferring
+ * the packet to the host memory and then up to higher layers via skb
+ * and netif_rx.
+ */
+
+static void hp100_rx(struct net_device *dev)
+{
+ int packets, pkt_len;
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+ u_int header;
+ struct sk_buff *skb;
+
+#ifdef DEBUG_B
+ hp100_outw(0x4213, TRACE);
+ printk("hp100: %s: rx\n", dev->name);
+#endif
+
+ /* First get indication of received lan packet */
+ /* RX_PKT_CND indicates the number of packets which have been fully */
+ /* received onto the card but have not been fully transferred of the card */
+ packets = hp100_inb(RX_PKT_CNT);
+#ifdef HP100_DEBUG_RX
+ if (packets > 1)
+ printk("hp100: %s: rx: waiting packets = %d\n", dev->name, packets);
+#endif
+
+ while (packets-- > 0) {
+ /* If ADV_NXT_PKT is still set, we have to wait until the card has */
+ /* really advanced to the next packet. */
+ for (pkt_len = 0; pkt_len < 6000 && (hp100_inb(OPTION_MSW) & HP100_ADV_NXT_PKT); pkt_len++) {
+#ifdef HP100_DEBUG_RX
+ printk ("hp100: %s: rx: busy, remaining packets = %d\n", dev->name, packets);
+#endif
+ }
+
+ /* First we get the header, which contains information about the */
+ /* actual length of the received packet. */
+ if (lp->mode == 2) { /* memory mapped mode */
+ header = readl(lp->mem_ptr_virt);
+ } else /* programmed i/o */
+ header = hp100_inl(DATA32);
+
+ pkt_len = ((header & HP100_PKT_LEN_MASK) + 3) & ~3;
+
+#ifdef HP100_DEBUG_RX
+ printk("hp100: %s: rx: new packet - length=%d, errors=0x%x, dest=0x%x\n",
+ dev->name, header & HP100_PKT_LEN_MASK,
+ (header >> 16) & 0xfff8, (header >> 16) & 7);
+#endif
+
+ /* Now we allocate the skb and transfer the data into it. */
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) { /* Not enough memory->drop packet */
+#ifdef HP100_DEBUG
+ printk("hp100: %s: rx: couldn't allocate a sk_buff of size %d\n",
+ dev->name, pkt_len);
+#endif
+ dev->stats.rx_dropped++;
+ } else { /* skb successfully allocated */
+
+ u_char *ptr;
+
+ skb_reserve(skb,2);
+
+ /* ptr to start of the sk_buff data area */
+ skb_put(skb, pkt_len);
+ ptr = skb->data;
+
+ /* Now transfer the data from the card into that area */
+ if (lp->mode == 2)
+ memcpy_fromio(ptr, lp->mem_ptr_virt,pkt_len);
+ else /* io mapped */
+ insl(ioaddr + HP100_REG_DATA32, ptr, pkt_len >> 2);
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+#ifdef HP100_DEBUG_RX
+ printk("hp100: %s: rx: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ dev->name, ptr[0], ptr[1], ptr[2], ptr[3],
+ ptr[4], ptr[5], ptr[6], ptr[7], ptr[8],
+ ptr[9], ptr[10], ptr[11]);
+#endif
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+
+ /* Indicate the card that we have got the packet */
+ hp100_outb(HP100_ADV_NXT_PKT | HP100_SET_LB, OPTION_MSW);
+
+ switch (header & 0x00070000) {
+ case (HP100_MULTI_ADDR_HASH << 16):
+ case (HP100_MULTI_ADDR_NO_HASH << 16):
+ dev->stats.multicast++;
+ break;
+ }
+ } /* end of while(there are packets) loop */
+#ifdef HP100_DEBUG_RX
+ printk("hp100_rx: %s: end\n", dev->name);
+#endif
+}
+
+/*
+ * Receive Function for Busmaster Mode
+ */
+static void hp100_rx_bm(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+ hp100_ring_t *ptr;
+ u_int header;
+ int pkt_len;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4214, TRACE);
+ printk("hp100: %s: rx_bm\n", dev->name);
+#endif
+
+#ifdef HP100_DEBUG
+ if (0 == lp->rxrcommit) {
+ printk("hp100: %s: rx_bm called although no PDLs were committed to adapter?\n", dev->name);
+ return;
+ } else
+ /* RX_PKT_CNT states how many PDLs are currently formatted and available to
+ * the cards BM engine */
+ if ((hp100_inw(RX_PKT_CNT) & 0x00ff) >= lp->rxrcommit) {
+ printk("hp100: %s: More packets received than committed? RX_PKT_CNT=0x%x, commit=0x%x\n",
+ dev->name, hp100_inw(RX_PKT_CNT) & 0x00ff,
+ lp->rxrcommit);
+ return;
+ }
+#endif
+
+ while ((lp->rxrcommit > hp100_inb(RX_PDL))) {
+ /*
+ * The packet was received into the pdl pointed to by lp->rxrhead (
+ * the oldest pdl in the ring
+ */
+
+ /* First we get the header, which contains information about the */
+ /* actual length of the received packet. */
+
+ ptr = lp->rxrhead;
+
+ header = *(ptr->pdl - 1);
+ pkt_len = (header & HP100_PKT_LEN_MASK);
+
+ /* Conversion to new PCI API : NOP */
+ pci_unmap_single(lp->pci_dev, (dma_addr_t) ptr->pdl[3], MAX_ETHER_SIZE, PCI_DMA_FROMDEVICE);
+
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: rx_bm: header@0x%x=0x%x length=%d, errors=0x%x, dest=0x%x\n",
+ dev->name, (u_int) (ptr->pdl - 1), (u_int) header,
+ pkt_len, (header >> 16) & 0xfff8, (header >> 16) & 7);
+ printk("hp100: %s: RX_PDL_COUNT:0x%x TX_PDL_COUNT:0x%x, RX_PKT_CNT=0x%x PDH=0x%x, Data@0x%x len=0x%x\n",
+ dev->name, hp100_inb(RX_PDL), hp100_inb(TX_PDL),
+ hp100_inb(RX_PKT_CNT), (u_int) * (ptr->pdl),
+ (u_int) * (ptr->pdl + 3), (u_int) * (ptr->pdl + 4));
+#endif
+
+ if ((pkt_len >= MIN_ETHER_SIZE) &&
+ (pkt_len <= MAX_ETHER_SIZE)) {
+ if (ptr->skb == NULL) {
+ printk("hp100: %s: rx_bm: skb null\n", dev->name);
+ /* can happen if we only allocated room for the pdh due to memory shortage. */
+ dev->stats.rx_dropped++;
+ } else {
+ skb_trim(ptr->skb, pkt_len); /* Shorten it */
+ ptr->skb->protocol =
+ eth_type_trans(ptr->skb, dev);
+
+ netif_rx(ptr->skb); /* Up and away... */
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+
+ switch (header & 0x00070000) {
+ case (HP100_MULTI_ADDR_HASH << 16):
+ case (HP100_MULTI_ADDR_NO_HASH << 16):
+ dev->stats.multicast++;
+ break;
+ }
+ } else {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: rx_bm: Received bad packet (length=%d)\n", dev->name, pkt_len);
+#endif
+ if (ptr->skb != NULL)
+ dev_kfree_skb_any(ptr->skb);
+ dev->stats.rx_errors++;
+ }
+
+ lp->rxrhead = lp->rxrhead->next;
+
+ /* Allocate a new rx PDL (so lp->rxrcommit stays the same) */
+ if (0 == hp100_build_rx_pdl(lp->rxrtail, dev)) {
+ /* No space for skb, header can still be received. */
+#ifdef HP100_DEBUG
+ printk("hp100: %s: rx_bm: No space for new PDL.\n", dev->name);
+#endif
+ return;
+ } else { /* successfully allocated new PDL - put it in ringlist at tail. */
+ hp100_outl((u32) lp->rxrtail->pdl_paddr, RX_PDA);
+ lp->rxrtail = lp->rxrtail->next;
+ }
+
+ }
+}
+
+/*
+ * statistics
+ */
+static struct net_device_stats *hp100_get_stats(struct net_device *dev)
+{
+ unsigned long flags;
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4215, TRACE);
+#endif
+
+ spin_lock_irqsave(&lp->lock, flags);
+ hp100_ints_off(); /* Useful ? Jean II */
+ hp100_update_stats(dev);
+ hp100_ints_on();
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return &(dev->stats);
+}
+
+static void hp100_update_stats(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ u_short val;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4216, TRACE);
+ printk("hp100: %s: update-stats\n", dev->name);
+#endif
+
+ /* Note: Statistics counters clear when read. */
+ hp100_page(MAC_CTRL);
+ val = hp100_inw(DROPPED) & 0x0fff;
+ dev->stats.rx_errors += val;
+ dev->stats.rx_over_errors += val;
+ val = hp100_inb(CRC);
+ dev->stats.rx_errors += val;
+ dev->stats.rx_crc_errors += val;
+ val = hp100_inb(ABORT);
+ dev->stats.tx_errors += val;
+ dev->stats.tx_aborted_errors += val;
+ hp100_page(PERFORMANCE);
+}
+
+static void hp100_misc_interrupt(struct net_device *dev)
+{
+#ifdef HP100_DEBUG_B
+ int ioaddr = dev->base_addr;
+#endif
+
+#ifdef HP100_DEBUG_B
+ int ioaddr = dev->base_addr;
+ hp100_outw(0x4216, TRACE);
+ printk("hp100: %s: misc_interrupt\n", dev->name);
+#endif
+
+ /* Note: Statistics counters clear when read. */
+ dev->stats.rx_errors++;
+ dev->stats.tx_errors++;
+}
+
+static void hp100_clear_stats(struct hp100_private *lp, int ioaddr)
+{
+ unsigned long flags;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4217, TRACE);
+ printk("hp100: %s: clear_stats\n", dev->name);
+#endif
+
+ spin_lock_irqsave(&lp->lock, flags);
+ hp100_page(MAC_CTRL); /* get all statistics bytes */
+ hp100_inw(DROPPED);
+ hp100_inb(CRC);
+ hp100_inb(ABORT);
+ hp100_page(PERFORMANCE);
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+
+/*
+ * multicast setup
+ */
+
+/*
+ * Set or clear the multicast filter for this adapter.
+ */
+
+static void hp100_set_multicast_list(struct net_device *dev)
+{
+ unsigned long flags;
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4218, TRACE);
+ printk("hp100: %s: set_mc_list\n", dev->name);
+#endif
+
+ spin_lock_irqsave(&lp->lock, flags);
+ hp100_ints_off();
+ hp100_page(MAC_CTRL);
+ hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1); /* stop rx/tx */
+
+ if (dev->flags & IFF_PROMISC) {
+ lp->mac2_mode = HP100_MAC2MODE6; /* promiscuous mode = get all good */
+ lp->mac1_mode = HP100_MAC1MODE6; /* packets on the net */
+ memset(&lp->hash_bytes, 0xff, 8);
+ } else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) {
+ lp->mac2_mode = HP100_MAC2MODE5; /* multicast mode = get packets for */
+ lp->mac1_mode = HP100_MAC1MODE5; /* me, broadcasts and all multicasts */
+#ifdef HP100_MULTICAST_FILTER /* doesn't work!!! */
+ if (dev->flags & IFF_ALLMULTI) {
+ /* set hash filter to receive all multicast packets */
+ memset(&lp->hash_bytes, 0xff, 8);
+ } else {
+ int i, idx;
+ u_char *addrs;
+ struct netdev_hw_addr *ha;
+
+ memset(&lp->hash_bytes, 0x00, 8);
+#ifdef HP100_DEBUG
+ printk("hp100: %s: computing hash filter - mc_count = %i\n",
+ dev->name, netdev_mc_count(dev));
+#endif
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
+#ifdef HP100_DEBUG
+ printk("hp100: %s: multicast = %pM, ",
+ dev->name, addrs);
+#endif
+ for (i = idx = 0; i < 6; i++) {
+ idx ^= *addrs++ & 0x3f;
+ printk(":%02x:", idx);
+ }
+#ifdef HP100_DEBUG
+ printk("idx = %i\n", idx);
+#endif
+ lp->hash_bytes[idx >> 3] |= (1 << (idx & 7));
+ }
+ }
+#else
+ memset(&lp->hash_bytes, 0xff, 8);
+#endif
+ } else {
+ lp->mac2_mode = HP100_MAC2MODE3; /* normal mode = get packets for me */
+ lp->mac1_mode = HP100_MAC1MODE3; /* and broadcasts */
+ memset(&lp->hash_bytes, 0x00, 8);
+ }
+
+ if (((hp100_inb(MAC_CFG_1) & 0x0f) != lp->mac1_mode) ||
+ (hp100_inb(MAC_CFG_2) != lp->mac2_mode)) {
+ int i;
+
+ hp100_outb(lp->mac2_mode, MAC_CFG_2);
+ hp100_andb(HP100_MAC1MODEMASK, MAC_CFG_1); /* clear mac1 mode bits */
+ hp100_orb(lp->mac1_mode, MAC_CFG_1); /* and set the new mode */
+
+ hp100_page(MAC_ADDRESS);
+ for (i = 0; i < 8; i++)
+ hp100_outb(lp->hash_bytes[i], HASH_BYTE0 + i);
+#ifdef HP100_DEBUG
+ printk("hp100: %s: mac1 = 0x%x, mac2 = 0x%x, multicast hash = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name, lp->mac1_mode, lp->mac2_mode,
+ lp->hash_bytes[0], lp->hash_bytes[1],
+ lp->hash_bytes[2], lp->hash_bytes[3],
+ lp->hash_bytes[4], lp->hash_bytes[5],
+ lp->hash_bytes[6], lp->hash_bytes[7]);
+#endif
+
+ if (lp->lan_type == HP100_LAN_100) {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: 100VG MAC settings have changed - relogin.\n", dev->name);
+#endif
+ lp->hub_status = hp100_login_to_vg_hub(dev, 1); /* force a relogin to the hub */
+ }
+ } else {
+ int i;
+ u_char old_hash_bytes[8];
+
+ hp100_page(MAC_ADDRESS);
+ for (i = 0; i < 8; i++)
+ old_hash_bytes[i] = hp100_inb(HASH_BYTE0 + i);
+ if (memcmp(old_hash_bytes, &lp->hash_bytes, 8)) {
+ for (i = 0; i < 8; i++)
+ hp100_outb(lp->hash_bytes[i], HASH_BYTE0 + i);
+#ifdef HP100_DEBUG
+ printk("hp100: %s: multicast hash = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name, lp->hash_bytes[0],
+ lp->hash_bytes[1], lp->hash_bytes[2],
+ lp->hash_bytes[3], lp->hash_bytes[4],
+ lp->hash_bytes[5], lp->hash_bytes[6],
+ lp->hash_bytes[7]);
+#endif
+
+ if (lp->lan_type == HP100_LAN_100) {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: 100VG MAC settings have changed - relogin.\n", dev->name);
+#endif
+ lp->hub_status = hp100_login_to_vg_hub(dev, 1); /* force a relogin to the hub */
+ }
+ }
+ }
+
+ hp100_page(MAC_CTRL);
+ hp100_orb(HP100_RX_EN | HP100_RX_IDLE | /* enable rx */
+ HP100_TX_EN | HP100_TX_IDLE, MAC_CFG_1); /* enable tx */
+
+ hp100_page(PERFORMANCE);
+ hp100_ints_on();
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+/*
+ * hardware interrupt handling
+ */
+
+static irqreturn_t hp100_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct hp100_private *lp = netdev_priv(dev);
+
+ int ioaddr;
+ u_int val;
+
+ if (dev == NULL)
+ return IRQ_NONE;
+ ioaddr = dev->base_addr;
+
+ spin_lock(&lp->lock);
+
+ hp100_ints_off();
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4219, TRACE);
+#endif
+
+ /* hp100_page( PERFORMANCE ); */
+ val = hp100_inw(IRQ_STATUS);
+#ifdef HP100_DEBUG_IRQ
+ printk("hp100: %s: mode=%x,IRQ_STAT=0x%.4x,RXPKTCNT=0x%.2x RXPDL=0x%.2x TXPKTCNT=0x%.2x TXPDL=0x%.2x\n",
+ dev->name, lp->mode, (u_int) val, hp100_inb(RX_PKT_CNT),
+ hp100_inb(RX_PDL), hp100_inb(TX_PKT_CNT), hp100_inb(TX_PDL));
+#endif
+
+ if (val == 0) { /* might be a shared interrupt */
+ spin_unlock(&lp->lock);
+ hp100_ints_on();
+ return IRQ_NONE;
+ }
+ /* We're only interested in those interrupts we really enabled. */
+ /* val &= hp100_inw( IRQ_MASK ); */
+
+ /*
+ * RX_PDL_FILL_COMPL is set whenever a RX_PDL has been executed. A RX_PDL
+ * is considered executed whenever the RX_PDL data structure is no longer
+ * needed.
+ */
+ if (val & HP100_RX_PDL_FILL_COMPL) {
+ if (lp->mode == 1)
+ hp100_rx_bm(dev);
+ else {
+ printk("hp100: %s: rx_pdl_fill_compl interrupt although not busmaster?\n", dev->name);
+ }
+ }
+
+ /*
+ * The RX_PACKET interrupt is set, when the receive packet counter is
+ * non zero. We use this interrupt for receiving in slave mode. In
+ * busmaster mode, we use it to make sure we did not miss any rx_pdl_fill
+ * interrupts. If rx_pdl_fill_compl is not set and rx_packet is set, then
+ * we somehow have missed a rx_pdl_fill_compl interrupt.
+ */
+
+ if (val & HP100_RX_PACKET) { /* Receive Packet Counter is non zero */
+ if (lp->mode != 1) /* non busmaster */
+ hp100_rx(dev);
+ else if (!(val & HP100_RX_PDL_FILL_COMPL)) {
+ /* Shouldn't happen - maybe we missed a RX_PDL_FILL Interrupt? */
+ hp100_rx_bm(dev);
+ }
+ }
+
+ /*
+ * Ack. that we have noticed the interrupt and thereby allow next one.
+ * Note that this is now done after the slave rx function, since first
+ * acknowledging and then setting ADV_NXT_PKT caused an extra interrupt
+ * on the J2573.
+ */
+ hp100_outw(val, IRQ_STATUS);
+
+ /*
+ * RX_ERROR is set when a packet is dropped due to no memory resources on
+ * the card or when a RCV_ERR occurs.
+ * TX_ERROR is set when a TX_ABORT condition occurs in the MAC->exists
+ * only in the 802.3 MAC and happens when 16 collisions occur during a TX
+ */
+ if (val & (HP100_TX_ERROR | HP100_RX_ERROR)) {
+#ifdef HP100_DEBUG_IRQ
+ printk("hp100: %s: TX/RX Error IRQ\n", dev->name);
+#endif
+ hp100_update_stats(dev);
+ if (lp->mode == 1) {
+ hp100_rxfill(dev);
+ hp100_clean_txring(dev);
+ }
+ }
+
+ /*
+ * RX_PDA_ZERO is set when the PDA count goes from non-zero to zero.
+ */
+ if ((lp->mode == 1) && (val & (HP100_RX_PDA_ZERO)))
+ hp100_rxfill(dev);
+
+ /*
+ * HP100_TX_COMPLETE interrupt occurs when packet transmitted on wire
+ * is completed
+ */
+ if ((lp->mode == 1) && (val & (HP100_TX_COMPLETE)))
+ hp100_clean_txring(dev);
+
+ /*
+ * MISC_ERROR is set when either the LAN link goes down or a detected
+ * bus error occurs.
+ */
+ if (val & HP100_MISC_ERROR) { /* New for J2585B */
+#ifdef HP100_DEBUG_IRQ
+ printk
+ ("hp100: %s: Misc. Error Interrupt - Check cabling.\n",
+ dev->name);
+#endif
+ if (lp->mode == 1) {
+ hp100_clean_txring(dev);
+ hp100_rxfill(dev);
+ }
+ hp100_misc_interrupt(dev);
+ }
+
+ spin_unlock(&lp->lock);
+ hp100_ints_on();
+ return IRQ_HANDLED;
+}
+
+/*
+ * some misc functions
+ */
+
+static void hp100_start_interface(struct net_device *dev)
+{
+ unsigned long flags;
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4220, TRACE);
+ printk("hp100: %s: hp100_start_interface\n", dev->name);
+#endif
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /* Ensure the adapter does not want to request an interrupt when */
+ /* enabling the IRQ line to be active on the bus (i.e. not tri-stated) */
+ hp100_page(PERFORMANCE);
+ hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */
+ hp100_outw(0xffff, IRQ_STATUS); /* ack all IRQs */
+ hp100_outw(HP100_FAKE_INT | HP100_INT_EN | HP100_RESET_LB,
+ OPTION_LSW);
+ /* Un Tri-state int. TODO: Check if shared interrupts can be realised? */
+ hp100_outw(HP100_TRI_INT | HP100_RESET_HB, OPTION_LSW);
+
+ if (lp->mode == 1) {
+ /* Make sure BM bit is set... */
+ hp100_page(HW_MAP);
+ hp100_orb(HP100_BM_MASTER, BM);
+ hp100_rxfill(dev);
+ } else if (lp->mode == 2) {
+ /* Enable memory mapping. Note: Don't do this when busmaster. */
+ hp100_outw(HP100_MMAP_DIS | HP100_RESET_HB, OPTION_LSW);
+ }
+
+ hp100_page(PERFORMANCE);
+ hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */
+ hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */
+
+ /* enable a few interrupts: */
+ if (lp->mode == 1) { /* busmaster mode */
+ hp100_outw(HP100_RX_PDL_FILL_COMPL |
+ HP100_RX_PDA_ZERO | HP100_RX_ERROR |
+ /* HP100_RX_PACKET | */
+ /* HP100_RX_EARLY_INT | */ HP100_SET_HB |
+ /* HP100_TX_PDA_ZERO | */
+ HP100_TX_COMPLETE |
+ /* HP100_MISC_ERROR | */
+ HP100_TX_ERROR | HP100_SET_LB, IRQ_MASK);
+ } else {
+ hp100_outw(HP100_RX_PACKET |
+ HP100_RX_ERROR | HP100_SET_HB |
+ HP100_TX_ERROR | HP100_SET_LB, IRQ_MASK);
+ }
+
+ /* Note : before hp100_set_multicast_list(), because it will play with
+ * spinlock itself... Jean II */
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ /* Enable MAC Tx and RX, set MAC modes, ... */
+ hp100_set_multicast_list(dev);
+}
+
+static void hp100_stop_interface(struct net_device *dev)
+{
+ struct hp100_private *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ u_int val;
+
+#ifdef HP100_DEBUG_B
+ printk("hp100: %s: hp100_stop_interface\n", dev->name);
+ hp100_outw(0x4221, TRACE);
+#endif
+
+ if (lp->mode == 1)
+ hp100_BM_shutdown(dev);
+ else {
+ /* Note: MMAP_DIS will be reenabled by start_interface */
+ hp100_outw(HP100_INT_EN | HP100_RESET_LB |
+ HP100_TRI_INT | HP100_MMAP_DIS | HP100_SET_HB,
+ OPTION_LSW);
+ val = hp100_inw(OPTION_LSW);
+
+ hp100_page(MAC_CTRL);
+ hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1);
+
+ if (!(val & HP100_HW_RST))
+ return; /* If reset, imm. return ... */
+ /* ... else: busy wait until idle */
+ for (val = 0; val < 6000; val++)
+ if ((hp100_inb(MAC_CFG_1) & (HP100_TX_IDLE | HP100_RX_IDLE)) == (HP100_TX_IDLE | HP100_RX_IDLE)) {
+ hp100_page(PERFORMANCE);
+ return;
+ }
+ printk("hp100: %s: hp100_stop_interface - timeout\n", dev->name);
+ hp100_page(PERFORMANCE);
+ }
+}
+
+static void hp100_load_eeprom(struct net_device *dev, u_short probe_ioaddr)
+{
+ int i;
+ int ioaddr = probe_ioaddr > 0 ? probe_ioaddr : dev->base_addr;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4222, TRACE);
+#endif
+
+ hp100_page(EEPROM_CTRL);
+ hp100_andw(~HP100_EEPROM_LOAD, EEPROM_CTRL);
+ hp100_orw(HP100_EEPROM_LOAD, EEPROM_CTRL);
+ for (i = 0; i < 10000; i++)
+ if (!(hp100_inb(OPTION_MSW) & HP100_EE_LOAD))
+ return;
+ printk("hp100: %s: hp100_load_eeprom - timeout\n", dev->name);
+}
+
+/* Sense connection status.
+ * return values: LAN_10 - Connected to 10Mbit/s network
+ * LAN_100 - Connected to 100Mbit/s network
+ * LAN_ERR - not connected or 100Mbit/s Hub down
+ */
+static int hp100_sense_lan(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ u_short val_VG, val_10;
+ struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4223, TRACE);
+#endif
+
+ hp100_page(MAC_CTRL);
+ val_10 = hp100_inb(10_LAN_CFG_1);
+ val_VG = hp100_inb(VG_LAN_CFG_1);
+ hp100_page(PERFORMANCE);
+#ifdef HP100_DEBUG
+ printk("hp100: %s: sense_lan: val_VG = 0x%04x, val_10 = 0x%04x\n",
+ dev->name, val_VG, val_10);
+#endif
+
+ if (val_10 & HP100_LINK_BEAT_ST) /* 10Mb connection is active */
+ return HP100_LAN_10;
+
+ if (val_10 & HP100_AUI_ST) { /* have we BNC or AUI onboard? */
+ /*
+ * This can be overriden by dos utility, so if this has no effect,
+ * perhaps you need to download that utility from HP and set card
+ * back to "auto detect".
+ */
+ val_10 |= HP100_AUI_SEL | HP100_LOW_TH;
+ hp100_page(MAC_CTRL);
+ hp100_outb(val_10, 10_LAN_CFG_1);
+ hp100_page(PERFORMANCE);
+ return HP100_LAN_COAX;
+ }
+
+ /* Those cards don't have a 100 Mbit connector */
+ if ( !strcmp(lp->id, "HWP1920") ||
+ (lp->pci_dev &&
+ lp->pci_dev->vendor == PCI_VENDOR_ID &&
+ (lp->pci_dev->device == PCI_DEVICE_ID_HP_J2970A ||
+ lp->pci_dev->device == PCI_DEVICE_ID_HP_J2973A)))
+ return HP100_LAN_ERR;
+
+ if (val_VG & HP100_LINK_CABLE_ST) /* Can hear the HUBs tone. */
+ return HP100_LAN_100;
+ return HP100_LAN_ERR;
+}
+
+static int hp100_down_vg_link(struct net_device *dev)
+{
+ struct hp100_private *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ unsigned long time;
+ long savelan, newlan;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4224, TRACE);
+ printk("hp100: %s: down_vg_link\n", dev->name);
+#endif
+
+ hp100_page(MAC_CTRL);
+ time = jiffies + (HZ / 4);
+ do {
+ if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST)
+ break;
+ if (!in_interrupt())
+ schedule_timeout_interruptible(1);
+ } while (time_after(time, jiffies));
+
+ if (time_after_eq(jiffies, time)) /* no signal->no logout */
+ return 0;
+
+ /* Drop the VG Link by clearing the link up cmd and load addr. */
+
+ hp100_andb(~(HP100_LOAD_ADDR | HP100_LINK_CMD), VG_LAN_CFG_1);
+ hp100_orb(HP100_VG_SEL, VG_LAN_CFG_1);
+
+ /* Conditionally stall for >250ms on Link-Up Status (to go down) */
+ time = jiffies + (HZ / 2);
+ do {
+ if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
+ break;
+ if (!in_interrupt())
+ schedule_timeout_interruptible(1);
+ } while (time_after(time, jiffies));
+
+#ifdef HP100_DEBUG
+ if (time_after_eq(jiffies, time))
+ printk("hp100: %s: down_vg_link: Link does not go down?\n", dev->name);
+#endif
+
+ /* To prevent condition where Rev 1 VG MAC and old hubs do not complete */
+ /* logout under traffic (even though all the status bits are cleared), */
+ /* do this workaround to get the Rev 1 MAC in its idle state */
+ if (lp->chip == HP100_CHIPID_LASSEN) {
+ /* Reset VG MAC to insure it leaves the logoff state even if */
+ /* the Hub is still emitting tones */
+ hp100_andb(~HP100_VG_RESET, VG_LAN_CFG_1);
+ udelay(1500); /* wait for >1ms */
+ hp100_orb(HP100_VG_RESET, VG_LAN_CFG_1); /* Release Reset */
+ udelay(1500);
+ }
+
+ /* New: For lassen, switch to 10 Mbps mac briefly to clear training ACK */
+ /* to get the VG mac to full reset. This is not req.d with later chips */
+ /* Note: It will take the between 1 and 2 seconds for the VG mac to be */
+ /* selected again! This will be left to the connect hub function to */
+ /* perform if desired. */
+ if (lp->chip == HP100_CHIPID_LASSEN) {
+ /* Have to write to 10 and 100VG control registers simultaneously */
+ savelan = newlan = hp100_inl(10_LAN_CFG_1); /* read 10+100 LAN_CFG regs */
+ newlan &= ~(HP100_VG_SEL << 16);
+ newlan |= (HP100_DOT3_MAC) << 8;
+ hp100_andb(~HP100_AUTO_MODE, MAC_CFG_3); /* Autosel off */
+ hp100_outl(newlan, 10_LAN_CFG_1);
+
+ /* Conditionally stall for 5sec on VG selected. */
+ time = jiffies + (HZ * 5);
+ do {
+ if (!(hp100_inb(MAC_CFG_4) & HP100_MAC_SEL_ST))
+ break;
+ if (!in_interrupt())
+ schedule_timeout_interruptible(1);
+ } while (time_after(time, jiffies));
+
+ hp100_orb(HP100_AUTO_MODE, MAC_CFG_3); /* Autosel back on */
+ hp100_outl(savelan, 10_LAN_CFG_1);
+ }
+
+ time = jiffies + (3 * HZ); /* Timeout 3s */
+ do {
+ if ((hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST) == 0)
+ break;
+ if (!in_interrupt())
+ schedule_timeout_interruptible(1);
+ } while (time_after(time, jiffies));
+
+ if (time_before_eq(time, jiffies)) {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: down_vg_link: timeout\n", dev->name);
+#endif
+ return -EIO;
+ }
+
+ time = jiffies + (2 * HZ); /* This seems to take a while.... */
+ do {
+ if (!in_interrupt())
+ schedule_timeout_interruptible(1);
+ } while (time_after(time, jiffies));
+
+ return 0;
+}
+
+static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+ u_short val = 0;
+ unsigned long time;
+ int startst;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4225, TRACE);
+ printk("hp100: %s: login_to_vg_hub\n", dev->name);
+#endif
+
+ /* Initiate a login sequence iff VG MAC is enabled and either Load Address
+ * bit is zero or the force relogin flag is set (e.g. due to MAC address or
+ * promiscuous mode change)
+ */
+ hp100_page(MAC_CTRL);
+ startst = hp100_inb(VG_LAN_CFG_1);
+ if ((force_relogin == 1) || (hp100_inb(MAC_CFG_4) & HP100_MAC_SEL_ST)) {
+#ifdef HP100_DEBUG_TRAINING
+ printk("hp100: %s: Start training\n", dev->name);
+#endif
+
+ /* Ensure VG Reset bit is 1 (i.e., do not reset) */
+ hp100_orb(HP100_VG_RESET, VG_LAN_CFG_1);
+
+ /* If Lassen AND auto-select-mode AND VG tones were sensed on */
+ /* entry then temporarily put them into force 100Mbit mode */
+ if ((lp->chip == HP100_CHIPID_LASSEN) && (startst & HP100_LINK_CABLE_ST))
+ hp100_andb(~HP100_DOT3_MAC, 10_LAN_CFG_2);
+
+ /* Drop the VG link by zeroing Link Up Command and Load Address */
+ hp100_andb(~(HP100_LINK_CMD /* |HP100_LOAD_ADDR */ ), VG_LAN_CFG_1);
+
+#ifdef HP100_DEBUG_TRAINING
+ printk("hp100: %s: Bring down the link\n", dev->name);
+#endif
+
+ /* Wait for link to drop */
+ time = jiffies + (HZ / 10);
+ do {
+ if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
+ break;
+ if (!in_interrupt())
+ schedule_timeout_interruptible(1);
+ } while (time_after(time, jiffies));
+
+ /* Start an addressed training and optionally request promiscuous port */
+ if ((dev->flags) & IFF_PROMISC) {
+ hp100_orb(HP100_PROM_MODE, VG_LAN_CFG_2);
+ if (lp->chip == HP100_CHIPID_LASSEN)
+ hp100_orw(HP100_MACRQ_PROMSC, TRAIN_REQUEST);
+ } else {
+ hp100_andb(~HP100_PROM_MODE, VG_LAN_CFG_2);
+ /* For ETR parts we need to reset the prom. bit in the training
+ * register, otherwise promiscious mode won't be disabled.
+ */
+ if (lp->chip == HP100_CHIPID_LASSEN) {
+ hp100_andw(~HP100_MACRQ_PROMSC, TRAIN_REQUEST);
+ }
+ }
+
+ /* With ETR parts, frame format request bits can be set. */
+ if (lp->chip == HP100_CHIPID_LASSEN)
+ hp100_orb(HP100_MACRQ_FRAMEFMT_EITHER, TRAIN_REQUEST);
+
+ hp100_orb(HP100_LINK_CMD | HP100_LOAD_ADDR | HP100_VG_RESET, VG_LAN_CFG_1);
+
+ /* Note: Next wait could be omitted for Hood and earlier chips under */
+ /* certain circumstances */
+ /* TODO: check if hood/earlier and skip wait. */
+
+ /* Wait for either short timeout for VG tones or long for login */
+ /* Wait for the card hardware to signalise link cable status ok... */
+ hp100_page(MAC_CTRL);
+ time = jiffies + (1 * HZ); /* 1 sec timeout for cable st */
+ do {
+ if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST)
+ break;
+ if (!in_interrupt())
+ schedule_timeout_interruptible(1);
+ } while (time_before(jiffies, time));
+
+ if (time_after_eq(jiffies, time)) {
+#ifdef HP100_DEBUG_TRAINING
+ printk("hp100: %s: Link cable status not ok? Training aborted.\n", dev->name);
+#endif
+ } else {
+#ifdef HP100_DEBUG_TRAINING
+ printk
+ ("hp100: %s: HUB tones detected. Trying to train.\n",
+ dev->name);
+#endif
+
+ time = jiffies + (2 * HZ); /* again a timeout */
+ do {
+ val = hp100_inb(VG_LAN_CFG_1);
+ if ((val & (HP100_LINK_UP_ST))) {
+#ifdef HP100_DEBUG_TRAINING
+ printk("hp100: %s: Passed training.\n", dev->name);
+#endif
+ break;
+ }
+ if (!in_interrupt())
+ schedule_timeout_interruptible(1);
+ } while (time_after(time, jiffies));
+ }
+
+ /* If LINK_UP_ST is set, then we are logged into the hub. */
+ if (time_before_eq(jiffies, time) && (val & HP100_LINK_UP_ST)) {
+#ifdef HP100_DEBUG_TRAINING
+ printk("hp100: %s: Successfully logged into the HUB.\n", dev->name);
+ if (lp->chip == HP100_CHIPID_LASSEN) {
+ val = hp100_inw(TRAIN_ALLOW);
+ printk("hp100: %s: Card supports 100VG MAC Version \"%s\" ",
+ dev->name, (hp100_inw(TRAIN_REQUEST) & HP100_CARD_MACVER) ? "802.12" : "Pre");
+ printk("Driver will use MAC Version \"%s\"\n", (val & HP100_HUB_MACVER) ? "802.12" : "Pre");
+ printk("hp100: %s: Frame format is %s.\n", dev->name, (val & HP100_MALLOW_FRAMEFMT) ? "802.5" : "802.3");
+ }
+#endif
+ } else {
+ /* If LINK_UP_ST is not set, login was not successful */
+ printk("hp100: %s: Problem logging into the HUB.\n", dev->name);
+ if (lp->chip == HP100_CHIPID_LASSEN) {
+ /* Check allowed Register to find out why there is a problem. */
+ val = hp100_inw(TRAIN_ALLOW); /* won't work on non-ETR card */
+#ifdef HP100_DEBUG_TRAINING
+ printk("hp100: %s: MAC Configuration requested: 0x%04x, HUB allowed: 0x%04x\n", dev->name, hp100_inw(TRAIN_REQUEST), val);
+#endif
+ if (val & HP100_MALLOW_ACCDENIED)
+ printk("hp100: %s: HUB access denied.\n", dev->name);
+ if (val & HP100_MALLOW_CONFIGURE)
+ printk("hp100: %s: MAC Configuration is incompatible with the Network.\n", dev->name);
+ if (val & HP100_MALLOW_DUPADDR)
+ printk("hp100: %s: Duplicate MAC Address on the Network.\n", dev->name);
+ }
+ }
+
+ /* If we have put the chip into forced 100 Mbit mode earlier, go back */
+ /* to auto-select mode */
+
+ if ((lp->chip == HP100_CHIPID_LASSEN) && (startst & HP100_LINK_CABLE_ST)) {
+ hp100_page(MAC_CTRL);
+ hp100_orb(HP100_DOT3_MAC, 10_LAN_CFG_2);
+ }
+
+ val = hp100_inb(VG_LAN_CFG_1);
+
+ /* Clear the MISC_ERROR Interrupt, which might be generated when doing the relogin */
+ hp100_page(PERFORMANCE);
+ hp100_outw(HP100_MISC_ERROR, IRQ_STATUS);
+
+ if (val & HP100_LINK_UP_ST)
+ return 0; /* login was ok */
+ else {
+ printk("hp100: %s: Training failed.\n", dev->name);
+ hp100_down_vg_link(dev);
+ return -EIO;
+ }
+ }
+ /* no forced relogin & already link there->no training. */
+ return -EIO;
+}
+
+static void hp100_cascade_reset(struct net_device *dev, u_short enable)
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = netdev_priv(dev);
+
+#ifdef HP100_DEBUG_B
+ hp100_outw(0x4226, TRACE);
+ printk("hp100: %s: cascade_reset\n", dev->name);
+#endif
+
+ if (enable) {
+ hp100_outw(HP100_HW_RST | HP100_RESET_LB, OPTION_LSW);
+ if (lp->chip == HP100_CHIPID_LASSEN) {
+ /* Lassen requires a PCI transmit fifo reset */
+ hp100_page(HW_MAP);
+ hp100_andb(~HP100_PCI_RESET, PCICTRL2);
+ hp100_orb(HP100_PCI_RESET, PCICTRL2);
+ /* Wait for min. 300 ns */
+ /* we can't use jiffies here, because it may be */
+ /* that we have disabled the timer... */
+ udelay(400);
+ hp100_andb(~HP100_PCI_RESET, PCICTRL2);
+ hp100_page(PERFORMANCE);
+ }
+ } else { /* bring out of reset */
+ hp100_outw(HP100_HW_RST | HP100_SET_LB, OPTION_LSW);
+ udelay(400);
+ hp100_page(PERFORMANCE);
+ }
+}
+
+#ifdef HP100_DEBUG
+void hp100_RegisterDump(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ int Page;
+ int Register;
+
+ /* Dump common registers */
+ printk("hp100: %s: Cascade Register Dump\n", dev->name);
+ printk("hardware id #1: 0x%.2x\n", hp100_inb(HW_ID));
+ printk("hardware id #2/paging: 0x%.2x\n", hp100_inb(PAGING));
+ printk("option #1: 0x%.4x\n", hp100_inw(OPTION_LSW));
+ printk("option #2: 0x%.4x\n", hp100_inw(OPTION_MSW));
+
+ /* Dump paged registers */
+ for (Page = 0; Page < 8; Page++) {
+ /* Dump registers */
+ printk("page: 0x%.2x\n", Page);
+ outw(Page, ioaddr + 0x02);
+ for (Register = 0x8; Register < 0x22; Register += 2) {
+ /* Display Register contents except data port */
+ if (((Register != 0x10) && (Register != 0x12)) || (Page > 0)) {
+ printk("0x%.2x = 0x%.4x\n", Register, inw(ioaddr + Register));
+ }
+ }
+ }
+ hp100_page(PERFORMANCE);
+}
+#endif
+
+
+static void cleanup_dev(struct net_device *d)
+{
+ struct hp100_private *p = netdev_priv(d);
+
+ unregister_netdev(d);
+ release_region(d->base_addr, HP100_REGION_SIZE);
+
+ if (p->mode == 1) /* busmaster */
+ pci_free_consistent(p->pci_dev, MAX_RINGSIZE + 0x0f,
+ p->page_vaddr_algn,
+ virt_to_whatever(d, p->page_vaddr_algn));
+ if (p->mem_ptr_virt)
+ iounmap(p->mem_ptr_virt);
+
+ free_netdev(d);
+}
+
+#ifdef CONFIG_EISA
+static int __init hp100_eisa_probe (struct device *gendev)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
+ struct eisa_device *edev = to_eisa_device(gendev);
+ int err;
+
+ if (!dev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(dev, &edev->dev);
+
+ err = hp100_probe1(dev, edev->base_addr + 0xC38, HP100_BUS_EISA, NULL);
+ if (err)
+ goto out1;
+
+#ifdef HP100_DEBUG
+ printk("hp100: %s: EISA adapter found at 0x%x\n", dev->name,
+ dev->base_addr);
+#endif
+ dev_set_drvdata(gendev, dev);
+ return 0;
+ out1:
+ free_netdev(dev);
+ return err;
+}
+
+static int __devexit hp100_eisa_remove (struct device *gendev)
+{
+ struct net_device *dev = dev_get_drvdata(gendev);
+ cleanup_dev(dev);
+ return 0;
+}
+
+static struct eisa_driver hp100_eisa_driver = {
+ .id_table = hp100_eisa_tbl,
+ .driver = {
+ .name = "hp100",
+ .probe = hp100_eisa_probe,
+ .remove = __devexit_p (hp100_eisa_remove),
+ }
+};
+#endif
+
+#ifdef CONFIG_PCI
+static int __devinit hp100_pci_probe (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ int ioaddr;
+ u_short pci_command;
+ int err;
+
+ if (pci_enable_device(pdev))
+ return -ENODEV;
+
+ dev = alloc_etherdev(sizeof(struct hp100_private));
+ if (!dev) {
+ err = -ENOMEM;
+ goto out0;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+ if (!(pci_command & PCI_COMMAND_IO)) {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: PCI I/O Bit has not been set. Setting...\n", dev->name);
+#endif
+ pci_command |= PCI_COMMAND_IO;
+ pci_write_config_word(pdev, PCI_COMMAND, pci_command);
+ }
+
+ if (!(pci_command & PCI_COMMAND_MASTER)) {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: PCI Master Bit has not been set. Setting...\n", dev->name);
+#endif
+ pci_command |= PCI_COMMAND_MASTER;
+ pci_write_config_word(pdev, PCI_COMMAND, pci_command);
+ }
+
+ ioaddr = pci_resource_start(pdev, 0);
+ err = hp100_probe1(dev, ioaddr, HP100_BUS_PCI, pdev);
+ if (err)
+ goto out1;
+
+#ifdef HP100_DEBUG
+ printk("hp100: %s: PCI adapter found at 0x%x\n", dev->name, ioaddr);
+#endif
+ pci_set_drvdata(pdev, dev);
+ return 0;
+ out1:
+ free_netdev(dev);
+ out0:
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void __devexit hp100_pci_remove (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ cleanup_dev(dev);
+ pci_disable_device(pdev);
+}
+
+
+static struct pci_driver hp100_pci_driver = {
+ .name = "hp100",
+ .id_table = hp100_pci_tbl,
+ .probe = hp100_pci_probe,
+ .remove = __devexit_p(hp100_pci_remove),
+};
+#endif
+
+/*
+ * module section
+ */
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, "
+ "Siegfried \"Frieder\" Loeffler (dg1sek) <floeff@mathematik.uni-stuttgart.de>");
+MODULE_DESCRIPTION("HP CASCADE Architecture Driver for 100VG-AnyLan Network Adapters");
+
+/*
+ * Note: to register three isa devices, use:
+ * option hp100 hp100_port=0,0,0
+ * to register one card at io 0x280 as eth239, use:
+ * option hp100 hp100_port=0x280
+ */
+#if defined(MODULE) && defined(CONFIG_ISA)
+#define HP100_DEVICES 5
+/* Parameters set by insmod */
+static int hp100_port[HP100_DEVICES] = { 0, [1 ... (HP100_DEVICES-1)] = -1 };
+module_param_array(hp100_port, int, NULL, 0);
+
+/* List of devices */
+static struct net_device *hp100_devlist[HP100_DEVICES];
+
+static int __init hp100_isa_init(void)
+{
+ struct net_device *dev;
+ int i, err, cards = 0;
+
+ /* Don't autoprobe ISA bus */
+ if (hp100_port[0] == 0)
+ return -ENODEV;
+
+ /* Loop on all possible base addresses */
+ for (i = 0; i < HP100_DEVICES && hp100_port[i] != -1; ++i) {
+ dev = alloc_etherdev(sizeof(struct hp100_private));
+ if (!dev) {
+ printk(KERN_WARNING "hp100: no memory for network device\n");
+ while (cards > 0)
+ cleanup_dev(hp100_devlist[--cards]);
+
+ return -ENOMEM;
+ }
+
+ err = hp100_isa_probe(dev, hp100_port[i]);
+ if (!err)
+ hp100_devlist[cards++] = dev;
+ else
+ free_netdev(dev);
+ }
+
+ return cards > 0 ? 0 : -ENODEV;
+}
+
+static void hp100_isa_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < HP100_DEVICES; i++) {
+ struct net_device *dev = hp100_devlist[i];
+ if (dev)
+ cleanup_dev(dev);
+ }
+}
+#else
+#define hp100_isa_init() (0)
+#define hp100_isa_cleanup() do { } while(0)
+#endif
+
+static int __init hp100_module_init(void)
+{
+ int err;
+
+ err = hp100_isa_init();
+ if (err && err != -ENODEV)
+ goto out;
+#ifdef CONFIG_EISA
+ err = eisa_driver_register(&hp100_eisa_driver);
+ if (err && err != -ENODEV)
+ goto out2;
+#endif
+#ifdef CONFIG_PCI
+ err = pci_register_driver(&hp100_pci_driver);
+ if (err && err != -ENODEV)
+ goto out3;
+#endif
+ out:
+ return err;
+ out3:
+#ifdef CONFIG_EISA
+ eisa_driver_unregister (&hp100_eisa_driver);
+ out2:
+#endif
+ hp100_isa_cleanup();
+ goto out;
+}
+
+
+static void __exit hp100_module_exit(void)
+{
+ hp100_isa_cleanup();
+#ifdef CONFIG_EISA
+ eisa_driver_unregister (&hp100_eisa_driver);
+#endif
+#ifdef CONFIG_PCI
+ pci_unregister_driver (&hp100_pci_driver);
+#endif
+}
+
+module_init(hp100_module_init)
+module_exit(hp100_module_exit)
diff --git a/drivers/net/ethernet/hp/hp100.h b/drivers/net/ethernet/hp/hp100.h
new file mode 100644
index 000000000000..b60e96fe38b4
--- /dev/null
+++ b/drivers/net/ethernet/hp/hp100.h
@@ -0,0 +1,615 @@
+/*
+ * hp100.h: Hewlett Packard HP10/100VG ANY LAN ethernet driver for Linux.
+ *
+ * $Id: hp100.h,v 1.51 1997/04/08 14:26:42 floeff Exp floeff $
+ *
+ * Authors: Jaroslav Kysela, <perex@pf.jcu.cz>
+ * Siegfried Loeffler <floeff@tunix.mathematik.uni-stuttgart.de>
+ *
+ * This driver is based on the 'hpfepkt' crynwr packet driver.
+ *
+ * This source/code is public free; you can distribute it and/or modify
+ * it under terms of the GNU General Public License (published by the
+ * Free Software Foundation) either version two of this License, or any
+ * later version.
+ */
+
+/****************************************************************************
+ * Hardware Constants
+ ****************************************************************************/
+
+/*
+ * Page Identifiers
+ * (Swap Paging Register, PAGING, bits 3:0, Offset 0x02)
+ */
+
+#define HP100_PAGE_PERFORMANCE 0x0 /* Page 0 */
+#define HP100_PAGE_MAC_ADDRESS 0x1 /* Page 1 */
+#define HP100_PAGE_HW_MAP 0x2 /* Page 2 */
+#define HP100_PAGE_EEPROM_CTRL 0x3 /* Page 3 */
+#define HP100_PAGE_MAC_CTRL 0x4 /* Page 4 */
+#define HP100_PAGE_MMU_CFG 0x5 /* Page 5 */
+#define HP100_PAGE_ID_MAC_ADDR 0x6 /* Page 6 */
+#define HP100_PAGE_MMU_POINTER 0x7 /* Page 7 */
+
+
+/* Registers that are present on all pages */
+
+#define HP100_REG_HW_ID 0x00 /* R: (16) Unique card ID */
+#define HP100_REG_TRACE 0x00 /* W: (16) Used for debug output */
+#define HP100_REG_PAGING 0x02 /* R: (16),15:4 Card ID */
+ /* W: (16),3:0 Switch pages */
+#define HP100_REG_OPTION_LSW 0x04 /* RW: (16) Select card functions */
+#define HP100_REG_OPTION_MSW 0x06 /* RW: (16) Select card functions */
+
+/* Page 0 - Performance */
+
+#define HP100_REG_IRQ_STATUS 0x08 /* RW: (16) Which ints are pending */
+#define HP100_REG_IRQ_MASK 0x0a /* RW: (16) Select ints to allow */
+#define HP100_REG_FRAGMENT_LEN 0x0c /* W: (16)12:0 Current fragment len */
+/* Note: For 32 bit systems, fragment len and offset registers are available */
+/* at offset 0x28 and 0x2c, where they can be written as 32bit values. */
+#define HP100_REG_OFFSET 0x0e /* RW: (16)12:0 Offset to start read */
+#define HP100_REG_DATA32 0x10 /* RW: (32) I/O mode data port */
+#define HP100_REG_DATA16 0x12 /* RW: WORDs must be read from here */
+#define HP100_REG_TX_MEM_FREE 0x14 /* RD: (32) Amount of free Tx mem */
+#define HP100_REG_TX_PDA_L 0x14 /* W: (32) BM: Ptr to PDL, Low Pri */
+#define HP100_REG_TX_PDA_H 0x1c /* W: (32) BM: Ptr to PDL, High Pri */
+#define HP100_REG_RX_PKT_CNT 0x18 /* RD: (8) Rx count of pkts on card */
+#define HP100_REG_TX_PKT_CNT 0x19 /* RD: (8) Tx count of pkts on card */
+#define HP100_REG_RX_PDL 0x1a /* R: (8) BM: # rx pdl not executed */
+#define HP100_REG_TX_PDL 0x1b /* R: (8) BM: # tx pdl not executed */
+#define HP100_REG_RX_PDA 0x18 /* W: (32) BM: Up to 31 addresses */
+ /* which point to a PDL */
+#define HP100_REG_SL_EARLY 0x1c /* (32) Enhanced Slave Early Rx */
+#define HP100_REG_STAT_DROPPED 0x20 /* R (12) Dropped Packet Counter */
+#define HP100_REG_STAT_ERRORED 0x22 /* R (8) Errored Packet Counter */
+#define HP100_REG_STAT_ABORT 0x23 /* R (8) Abort Counter/OW Coll. Flag */
+#define HP100_REG_RX_RING 0x24 /* W (32) Slave: RX Ring Pointers */
+#define HP100_REG_32_FRAGMENT_LEN 0x28 /* W (13) Slave: Fragment Length Reg */
+#define HP100_REG_32_OFFSET 0x2c /* W (16) Slave: Offset Register */
+
+/* Page 1 - MAC Address/Hash Table */
+
+#define HP100_REG_MAC_ADDR 0x08 /* RW: (8) Cards MAC address */
+#define HP100_REG_HASH_BYTE0 0x10 /* RW: (8) Cards multicast filter */
+
+/* Page 2 - Hardware Mapping */
+
+#define HP100_REG_MEM_MAP_LSW 0x08 /* RW: (16) LSW of cards mem addr */
+#define HP100_REG_MEM_MAP_MSW 0x0a /* RW: (16) MSW of cards mem addr */
+#define HP100_REG_IO_MAP 0x0c /* RW: (8) Cards I/O address */
+#define HP100_REG_IRQ_CHANNEL 0x0d /* RW: (8) IRQ and edge/level int */
+#define HP100_REG_SRAM 0x0e /* RW: (8) How much RAM on card */
+#define HP100_REG_BM 0x0f /* RW: (8) Controls BM functions */
+
+/* New on Page 2 for ETR chips: */
+#define HP100_REG_MODECTRL1 0x10 /* RW: (8) Mode Control 1 */
+#define HP100_REG_MODECTRL2 0x11 /* RW: (8) Mode Control 2 */
+#define HP100_REG_PCICTRL1 0x12 /* RW: (8) PCI Cfg 1 */
+#define HP100_REG_PCICTRL2 0x13 /* RW: (8) PCI Cfg 2 */
+#define HP100_REG_PCIBUSMLAT 0x15 /* RW: (8) PCI Bus Master Latency */
+#define HP100_REG_EARLYTXCFG 0x16 /* RW: (16) Early TX Cfg/Cntrl Reg */
+#define HP100_REG_EARLYRXCFG 0x18 /* RW: (8) Early RX Cfg/Cntrl Reg */
+#define HP100_REG_ISAPNPCFG1 0x1a /* RW: (8) ISA PnP Cfg/Cntrl Reg 1 */
+#define HP100_REG_ISAPNPCFG2 0x1b /* RW: (8) ISA PnP Cfg/Cntrl Reg 2 */
+
+/* Page 3 - EEPROM/Boot ROM */
+
+#define HP100_REG_EEPROM_CTRL 0x08 /* RW: (16) Used to load EEPROM */
+#define HP100_REG_BOOTROM_CTRL 0x0a
+
+/* Page 4 - LAN Configuration (MAC_CTRL) */
+
+#define HP100_REG_10_LAN_CFG_1 0x08 /* RW: (8) Set 10M XCVR functions */
+#define HP100_REG_10_LAN_CFG_2 0x09 /* RW: (8) 10M XCVR functions */
+#define HP100_REG_VG_LAN_CFG_1 0x0a /* RW: (8) Set 100M XCVR functions */
+#define HP100_REG_VG_LAN_CFG_2 0x0b /* RW: (8) 100M LAN Training cfgregs */
+#define HP100_REG_MAC_CFG_1 0x0c /* RW: (8) Types of pkts to accept */
+#define HP100_REG_MAC_CFG_2 0x0d /* RW: (8) Misc MAC functions */
+#define HP100_REG_MAC_CFG_3 0x0e /* RW: (8) Misc MAC functions */
+#define HP100_REG_MAC_CFG_4 0x0f /* R: (8) Misc MAC states */
+#define HP100_REG_DROPPED 0x10 /* R: (16),11:0 Pkts can't fit in mem */
+#define HP100_REG_CRC 0x12 /* R: (8) Pkts with CRC */
+#define HP100_REG_ABORT 0x13 /* R: (8) Aborted Tx pkts */
+#define HP100_REG_TRAIN_REQUEST 0x14 /* RW: (16) Endnode MAC register. */
+#define HP100_REG_TRAIN_ALLOW 0x16 /* R: (16) Hub allowed register */
+
+/* Page 5 - MMU */
+
+#define HP100_REG_RX_MEM_STOP 0x0c /* RW: (16) End of Rx ring addr */
+#define HP100_REG_TX_MEM_STOP 0x0e /* RW: (16) End of Tx ring addr */
+#define HP100_REG_PDL_MEM_STOP 0x10 /* Not used by 802.12 devices */
+#define HP100_REG_ECB_MEM_STOP 0x14 /* I've no idea what this is */
+
+/* Page 6 - Card ID/Physical LAN Address */
+
+#define HP100_REG_BOARD_ID 0x08 /* R: (8) EISA/ISA card ID */
+#define HP100_REG_BOARD_IO_CHCK 0x0c /* R: (8) Added to ID to get FFh */
+#define HP100_REG_SOFT_MODEL 0x0d /* R: (8) Config program defined */
+#define HP100_REG_LAN_ADDR 0x10 /* R: (8) MAC addr of card */
+#define HP100_REG_LAN_ADDR_CHCK 0x16 /* R: (8) Added to addr to get FFh */
+
+/* Page 7 - MMU Current Pointers */
+
+#define HP100_REG_PTR_RXSTART 0x08 /* R: (16) Current begin of Rx ring */
+#define HP100_REG_PTR_RXEND 0x0a /* R: (16) Current end of Rx ring */
+#define HP100_REG_PTR_TXSTART 0x0c /* R: (16) Current begin of Tx ring */
+#define HP100_REG_PTR_TXEND 0x0e /* R: (16) Current end of Rx ring */
+#define HP100_REG_PTR_RPDLSTART 0x10
+#define HP100_REG_PTR_RPDLEND 0x12
+#define HP100_REG_PTR_RINGPTRS 0x14
+#define HP100_REG_PTR_MEMDEBUG 0x1a
+/* ------------------------------------------------------------------------ */
+
+
+/*
+ * Hardware ID Register I (Always available, HW_ID, Offset 0x00)
+ */
+#define HP100_HW_ID_CASCADE 0x4850 /* Identifies Cascade Chip */
+
+/*
+ * Hardware ID Register 2 & Paging Register
+ * (Always available, PAGING, Offset 0x02)
+ * Bits 15:4 are for the Chip ID
+ */
+#define HP100_CHIPID_MASK 0xFFF0
+#define HP100_CHIPID_SHASTA 0x5350 /* Not 802.12 compliant */
+ /* EISA BM/SL, MCA16/32 SL, ISA SL */
+#define HP100_CHIPID_RAINIER 0x5360 /* Not 802.12 compliant EISA BM, */
+ /* PCI SL, MCA16/32 SL, ISA SL */
+#define HP100_CHIPID_LASSEN 0x5370 /* 802.12 compliant PCI BM, PCI SL */
+ /* LRF supported */
+
+/*
+ * Option Registers I and II
+ * (Always available, OPTION_LSW, Offset 0x04-0x05)
+ */
+#define HP100_DEBUG_EN 0x8000 /* 0:Dis., 1:Enable Debug Dump Ptr. */
+#define HP100_RX_HDR 0x4000 /* 0:Dis., 1:Enable putting pkt into */
+ /* system mem. before Rx interrupt */
+#define HP100_MMAP_DIS 0x2000 /* 0:Enable, 1:Disable mem.mapping. */
+ /* MMAP_DIS must be 0 and MEM_EN */
+ /* must be 1 for memory-mapped */
+ /* mode to be enabled */
+#define HP100_EE_EN 0x1000 /* 0:Disable,1:Enable EEPROM writing */
+#define HP100_BM_WRITE 0x0800 /* 0:Slave, 1:Bus Master for Tx data */
+#define HP100_BM_READ 0x0400 /* 0:Slave, 1:Bus Master for Rx data */
+#define HP100_TRI_INT 0x0200 /* 0:Don't, 1:Do tri-state the int */
+#define HP100_MEM_EN 0x0040 /* Config program set this to */
+ /* 0:Disable, 1:Enable mem map. */
+ /* See MMAP_DIS. */
+#define HP100_IO_EN 0x0020 /* 1:Enable I/O transfers */
+#define HP100_BOOT_EN 0x0010 /* 1:Enable boot ROM access */
+#define HP100_FAKE_INT 0x0008 /* 1:int */
+#define HP100_INT_EN 0x0004 /* 1:Enable ints from card */
+#define HP100_HW_RST 0x0002 /* 0:Reset, 1:Out of reset */
+ /* NIC reset on 0 to 1 transition */
+
+/*
+ * Option Register III
+ * (Always available, OPTION_MSW, Offset 0x06)
+ */
+#define HP100_PRIORITY_TX 0x0080 /* 1:Do all Tx pkts as priority */
+#define HP100_EE_LOAD 0x0040 /* 1:EEPROM loading, 0 when done */
+#define HP100_ADV_NXT_PKT 0x0004 /* 1:Advance to next pkt in Rx queue */
+ /* h/w will set to 0 when done */
+#define HP100_TX_CMD 0x0002 /* 1:Tell h/w download done, h/w */
+ /* will set to 0 when done */
+
+/*
+ * Interrupt Status Registers I and II
+ * (Page PERFORMANCE, IRQ_STATUS, Offset 0x08-0x09)
+ * Note: With old chips, these Registers will clear when 1 is written to them
+ * with new chips this depends on setting of CLR_ISMODE
+ */
+#define HP100_RX_EARLY_INT 0x2000
+#define HP100_RX_PDA_ZERO 0x1000
+#define HP100_RX_PDL_FILL_COMPL 0x0800
+#define HP100_RX_PACKET 0x0400 /* 0:No, 1:Yes pkt has been Rx */
+#define HP100_RX_ERROR 0x0200 /* 0:No, 1:Yes Rx pkt had error */
+#define HP100_TX_PDA_ZERO 0x0020 /* 1 when PDA count goes to zero */
+#define HP100_TX_SPACE_AVAIL 0x0010 /* 0:<8192, 1:>=8192 Tx free bytes */
+#define HP100_TX_COMPLETE 0x0008 /* 0:No, 1:Yes a Tx has completed */
+#define HP100_MISC_ERROR 0x0004 /* 0:No, 1:Lan Link down or bus error */
+#define HP100_TX_ERROR 0x0002 /* 0:No, 1:Yes Tx pkt had error */
+
+/*
+ * Xmit Memory Free Count
+ * (Page PERFORMANCE, TX_MEM_FREE, Offset 0x14) (Read only, 32bit)
+ */
+#define HP100_AUTO_COMPARE 0x80000000 /* Tx Space avail & pkts<255 */
+#define HP100_FREE_SPACE 0x7fffffe0 /* Tx free memory */
+
+/*
+ * IRQ Channel
+ * (Page HW_MAP, IRQ_CHANNEL, Offset 0x0d)
+ */
+#define HP100_ZERO_WAIT_EN 0x80 /* 0:No, 1:Yes asserts NOWS signal */
+#define HP100_IRQ_SCRAMBLE 0x40
+#define HP100_BOND_HP 0x20
+#define HP100_LEVEL_IRQ 0x10 /* 0:Edge, 1:Level type interrupts. */
+ /* (Only valid on EISA cards) */
+#define HP100_IRQMASK 0x0F /* Isolate the IRQ bits */
+
+/*
+ * SRAM Parameters
+ * (Page HW_MAP, SRAM, Offset 0x0e)
+ */
+#define HP100_RAM_SIZE_MASK 0xe0 /* AND to get SRAM size index */
+#define HP100_RAM_SIZE_SHIFT 0x05 /* Shift count(put index in lwr bits) */
+
+/*
+ * Bus Master Register
+ * (Page HW_MAP, BM, Offset 0x0f)
+ */
+#define HP100_BM_BURST_RD 0x01 /* EISA only: 1=Use burst trans. fm system */
+ /* memory to chip (tx) */
+#define HP100_BM_BURST_WR 0x02 /* EISA only: 1=Use burst trans. fm system */
+ /* memory to chip (rx) */
+#define HP100_BM_MASTER 0x04 /* 0:Slave, 1:BM mode */
+#define HP100_BM_PAGE_CK 0x08 /* This bit should be set whenever in */
+ /* an EISA system */
+#define HP100_BM_PCI_8CLK 0x40 /* ... cycles 8 clocks apart */
+
+
+/*
+ * Mode Control Register I
+ * (Page HW_MAP, MODECTRL1, Offset0x10)
+ */
+#define HP100_TX_DUALQ 0x10
+ /* If set and BM -> dual tx pda queues */
+#define HP100_ISR_CLRMODE 0x02 /* If set ISR will clear all pending */
+ /* interrupts on read (etr only?) */
+#define HP100_EE_NOLOAD 0x04 /* Status whether res will be loaded */
+ /* from the eeprom */
+#define HP100_TX_CNT_FLG 0x08 /* Controls Early TX Reg Cnt Field */
+#define HP100_PDL_USE3 0x10 /* If set BM engine will read only */
+ /* first three data elements of a PDL */
+ /* on the first access. */
+#define HP100_BUSTYPE_MASK 0xe0 /* Three bit bus type info */
+
+/*
+ * Mode Control Register II
+ * (Page HW_MAP, MODECTRL2, Offset0x11)
+ */
+#define HP100_EE_MASK 0x0f /* Tell EEPROM circuit not to load */
+ /* certain resources */
+#define HP100_DIS_CANCEL 0x20 /* For tx dualq mode operation */
+#define HP100_EN_PDL_WB 0x40 /* 1: Status of PDL completion may be */
+ /* written back to system mem */
+#define HP100_EN_BUS_FAIL 0x80 /* Enables bus-fail portion of misc */
+ /* interrupt */
+
+/*
+ * PCI Configuration and Control Register I
+ * (Page HW_MAP, PCICTRL1, Offset 0x12)
+ */
+#define HP100_LO_MEM 0x01 /* 1: Mapped Mem requested below 1MB */
+#define HP100_NO_MEM 0x02 /* 1: Disables Req for sysmem to PCI */
+ /* bios */
+#define HP100_USE_ISA 0x04 /* 1: isa type decodes will occur */
+ /* simultaneously with PCI decodes */
+#define HP100_IRQ_HI_MASK 0xf0 /* pgmed by pci bios */
+#define HP100_PCI_IRQ_HI_MASK 0x78 /* Isolate 4 bits for PCI IRQ */
+
+/*
+ * PCI Configuration and Control Register II
+ * (Page HW_MAP, PCICTRL2, Offset 0x13)
+ */
+#define HP100_RD_LINE_PDL 0x01 /* 1: PCI command Memory Read Line en */
+#define HP100_RD_TX_DATA_MASK 0x06 /* choose PCI memread cmds for TX */
+#define HP100_MWI 0x08 /* 1: en. PCI memory write invalidate */
+#define HP100_ARB_MODE 0x10 /* Select PCI arbitor type */
+#define HP100_STOP_EN 0x20 /* Enables PCI state machine to issue */
+ /* pci stop if cascade not ready */
+#define HP100_IGNORE_PAR 0x40 /* 1: PCI state machine ignores parity */
+#define HP100_PCI_RESET 0x80 /* 0->1: Reset PCI block */
+
+/*
+ * Early TX Configuration and Control Register
+ * (Page HW_MAP, EARLYTXCFG, Offset 0x16)
+ */
+#define HP100_EN_EARLY_TX 0x8000 /* 1=Enable Early TX */
+#define HP100_EN_ADAPTIVE 0x4000 /* 1=Enable adaptive mode */
+#define HP100_EN_TX_UR_IRQ 0x2000 /* reserved, must be 0 */
+#define HP100_EN_LOW_TX 0x1000 /* reserved, must be 0 */
+#define HP100_ET_CNT_MASK 0x0fff /* bits 11..0: ET counters */
+
+/*
+ * Early RX Configuration and Control Register
+ * (Page HW_MAP, EARLYRXCFG, Offset 0x18)
+ */
+#define HP100_EN_EARLY_RX 0x80 /* 1=Enable Early RX */
+#define HP100_EN_LOW_RX 0x40 /* reserved, must be 0 */
+#define HP100_RX_TRIP_MASK 0x1f /* bits 4..0: threshold at which the
+ * early rx circuit will start the
+ * dma of received packet into system
+ * memory for BM */
+
+/*
+ * Serial Devices Control Register
+ * (Page EEPROM_CTRL, EEPROM_CTRL, Offset 0x08)
+ */
+#define HP100_EEPROM_LOAD 0x0001 /* 0->1 loads EEPROM into registers. */
+ /* When it goes back to 0, load is */
+ /* complete. This should take ~600us. */
+
+/*
+ * 10MB LAN Control and Configuration Register I
+ * (Page MAC_CTRL, 10_LAN_CFG_1, Offset 0x08)
+ */
+#define HP100_MAC10_SEL 0xc0 /* Get bits to indicate MAC */
+#define HP100_AUI_SEL 0x20 /* Status of AUI selection */
+#define HP100_LOW_TH 0x10 /* 0:No, 1:Yes allow better cabling */
+#define HP100_LINK_BEAT_DIS 0x08 /* 0:Enable, 1:Disable link beat */
+#define HP100_LINK_BEAT_ST 0x04 /* 0:No, 1:Yes link beat being Rx */
+#define HP100_R_ROL_ST 0x02 /* 0:No, 1:Yes Rx twisted pair has */
+ /* been reversed */
+#define HP100_AUI_ST 0x01 /* 0:No, 1:Yes use AUI on TP card */
+
+/*
+ * 10 MB LAN Control and Configuration Register II
+ * (Page MAC_CTRL, 10_LAN_CFG_2, Offset 0x09)
+ */
+#define HP100_SQU_ST 0x01 /* 0:No, 1:Yes collision signal sent */
+ /* after Tx.Only used for AUI. */
+#define HP100_FULLDUP 0x02 /* 1: LXT901 XCVR fullduplx enabled */
+#define HP100_DOT3_MAC 0x04 /* 1: DOT 3 Mac sel. unless Autosel */
+
+/*
+ * MAC Selection, use with MAC10_SEL bits
+ */
+#define HP100_AUTO_SEL_10 0x0 /* Auto select */
+#define HP100_XCVR_LXT901_10 0x1 /* LXT901 10BaseT transceiver */
+#define HP100_XCVR_7213 0x2 /* 7213 transceiver */
+#define HP100_XCVR_82503 0x3 /* 82503 transceiver */
+
+/*
+ * 100MB LAN Training Register
+ * (Page MAC_CTRL, VG_LAN_CFG_2, Offset 0x0b) (old, pre 802.12)
+ */
+#define HP100_FRAME_FORMAT 0x08 /* 0:802.3, 1:802.5 frames */
+#define HP100_BRIDGE 0x04 /* 0:No, 1:Yes tell hub i am a bridge */
+#define HP100_PROM_MODE 0x02 /* 0:No, 1:Yes tell hub card is */
+ /* promiscuous */
+#define HP100_REPEATER 0x01 /* 0:No, 1:Yes tell hub MAC wants to */
+ /* be a cascaded repeater */
+
+/*
+ * 100MB LAN Control and Configuration Register
+ * (Page MAC_CTRL, VG_LAN_CFG_1, Offset 0x0a)
+ */
+#define HP100_VG_SEL 0x80 /* 0:No, 1:Yes use 100 Mbit MAC */
+#define HP100_LINK_UP_ST 0x40 /* 0:No, 1:Yes endnode logged in */
+#define HP100_LINK_CABLE_ST 0x20 /* 0:No, 1:Yes cable can hear tones */
+ /* from hub */
+#define HP100_LOAD_ADDR 0x10 /* 0->1 card addr will be sent */
+ /* 100ms later the link status */
+ /* bits are valid */
+#define HP100_LINK_CMD 0x08 /* 0->1 link will attempt to log in. */
+ /* 100ms later the link status */
+ /* bits are valid */
+#define HP100_TRN_DONE 0x04 /* NEW ETR-Chips only: Will be reset */
+ /* after LinkUp Cmd is given and set */
+ /* when training has completed. */
+#define HP100_LINK_GOOD_ST 0x02 /* 0:No, 1:Yes cable passed training */
+#define HP100_VG_RESET 0x01 /* 0:Yes, 1:No reset the 100VG MAC */
+
+
+/*
+ * MAC Configuration Register I
+ * (Page MAC_CTRL, MAC_CFG_1, Offset 0x0c)
+ */
+#define HP100_RX_IDLE 0x80 /* 0:Yes, 1:No currently receiving pkts */
+#define HP100_TX_IDLE 0x40 /* 0:Yes, 1:No currently Txing pkts */
+#define HP100_RX_EN 0x20 /* 1: allow receiving of pkts */
+#define HP100_TX_EN 0x10 /* 1: allow transmitting of pkts */
+#define HP100_ACC_ERRORED 0x08 /* 0:No, 1:Yes allow Rx of errored pkts */
+#define HP100_ACC_MC 0x04 /* 0:No, 1:Yes allow Rx of multicast pkts */
+#define HP100_ACC_BC 0x02 /* 0:No, 1:Yes allow Rx of broadcast pkts */
+#define HP100_ACC_PHY 0x01 /* 0:No, 1:Yes allow Rx of ALL phys. pkts */
+#define HP100_MAC1MODEMASK 0xf0 /* Hide ACC bits */
+#define HP100_MAC1MODE1 0x00 /* Receive nothing, must also disable RX */
+#define HP100_MAC1MODE2 0x00
+#define HP100_MAC1MODE3 HP100_MAC1MODE2 | HP100_ACC_BC
+#define HP100_MAC1MODE4 HP100_MAC1MODE3 | HP100_ACC_MC
+#define HP100_MAC1MODE5 HP100_MAC1MODE4 /* set mc hash to all ones also */
+#define HP100_MAC1MODE6 HP100_MAC1MODE5 | HP100_ACC_PHY /* Promiscuous */
+/* Note MODE6 will receive all GOOD packets on the LAN. This really needs
+ a mode 7 defined to be LAN Analyzer mode, which will receive errored and
+ runt packets, and keep the CRC bytes. */
+#define HP100_MAC1MODE7 HP100_MAC1MODE6 | HP100_ACC_ERRORED
+
+/*
+ * MAC Configuration Register II
+ * (Page MAC_CTRL, MAC_CFG_2, Offset 0x0d)
+ */
+#define HP100_TR_MODE 0x80 /* 0:No, 1:Yes support Token Ring formats */
+#define HP100_TX_SAME 0x40 /* 0:No, 1:Yes Tx same packet continuous */
+#define HP100_LBK_XCVR 0x20 /* 0:No, 1:Yes loopback through MAC & */
+ /* transceiver */
+#define HP100_LBK_MAC 0x10 /* 0:No, 1:Yes loopback through MAC */
+#define HP100_CRC_I 0x08 /* 0:No, 1:Yes inhibit CRC on Tx packets */
+#define HP100_ACCNA 0x04 /* 1: For 802.5: Accept only token ring
+ * group addr that maches NA mask */
+#define HP100_KEEP_CRC 0x02 /* 0:No, 1:Yes keep CRC on Rx packets. */
+ /* The length will reflect this. */
+#define HP100_ACCFA 0x01 /* 1: For 802.5: Accept only functional
+ * addrs that match FA mask (page1) */
+#define HP100_MAC2MODEMASK 0x02
+#define HP100_MAC2MODE1 0x00
+#define HP100_MAC2MODE2 0x00
+#define HP100_MAC2MODE3 0x00
+#define HP100_MAC2MODE4 0x00
+#define HP100_MAC2MODE5 0x00
+#define HP100_MAC2MODE6 0x00
+#define HP100_MAC2MODE7 KEEP_CRC
+
+/*
+ * MAC Configuration Register III
+ * (Page MAC_CTRL, MAC_CFG_3, Offset 0x0e)
+ */
+#define HP100_PACKET_PACE 0x03 /* Packet Pacing:
+ * 00: No packet pacing
+ * 01: 8 to 16 uS delay
+ * 10: 16 to 32 uS delay
+ * 11: 32 to 64 uS delay
+ */
+#define HP100_LRF_EN 0x04 /* 1: External LAN Rcv Filter and
+ * TCP/IP Checksumming enabled. */
+#define HP100_AUTO_MODE 0x10 /* 1: AutoSelect between 10/100 */
+
+/*
+ * MAC Configuration Register IV
+ * (Page MAC_CTRL, MAC_CFG_4, Offset 0x0f)
+ */
+#define HP100_MAC_SEL_ST 0x01 /* (R): Status of external VGSEL
+ * Signal, 1=100VG, 0=10Mbit sel. */
+#define HP100_LINK_FAIL_ST 0x02 /* (R): Status of Link Fail portion
+ * of the Misc. Interrupt */
+
+/*
+ * 100 MB LAN Training Request/Allowed Registers
+ * (Page MAC_CTRL, TRAIN_REQUEST and TRAIN_ALLOW, Offset 0x14-0x16)(ETR parts only)
+ */
+#define HP100_MACRQ_REPEATER 0x0001 /* 1: MAC tells HUB it wants to be
+ * a cascaded repeater
+ * 0: ... wants to be a DTE */
+#define HP100_MACRQ_PROMSC 0x0006 /* 2 bits: Promiscious mode
+ * 00: Rcv only unicast packets
+ * specifically addr to this
+ * endnode
+ * 10: Rcv all pckts fwded by
+ * the local repeater */
+#define HP100_MACRQ_FRAMEFMT_EITHER 0x0018 /* 11: either format allowed */
+#define HP100_MACRQ_FRAMEFMT_802_3 0x0000 /* 00: 802.3 is requested */
+#define HP100_MACRQ_FRAMEFMT_802_5 0x0010 /* 10: 802.5 format is requested */
+#define HP100_CARD_MACVER 0xe000 /* R: 3 bit Cards 100VG MAC version */
+#define HP100_MALLOW_REPEATER 0x0001 /* If reset, requested access as an
+ * end node is allowed */
+#define HP100_MALLOW_PROMSC 0x0004 /* 2 bits: Promiscious mode
+ * 00: Rcv only unicast packets
+ * specifically addr to this
+ * endnode
+ * 10: Rcv all pckts fwded by
+ * the local repeater */
+#define HP100_MALLOW_FRAMEFMT 0x00e0 /* 2 bits: Frame Format
+ * 00: 802.3 format will be used
+ * 10: 802.5 format will be used */
+#define HP100_MALLOW_ACCDENIED 0x0400 /* N bit */
+#define HP100_MALLOW_CONFIGURE 0x0f00 /* C bit */
+#define HP100_MALLOW_DUPADDR 0x1000 /* D bit */
+#define HP100_HUB_MACVER 0xe000 /* R: 3 bit 802.12 MAC/RMAC training */
+ /* protocol of repeater */
+
+/* ****************************************************************************** */
+
+/*
+ * Set/Reset bits
+ */
+#define HP100_SET_HB 0x0100 /* 0:Set fields to 0 whose mask is 1 */
+#define HP100_SET_LB 0x0001 /* HB sets upper byte, LB sets lower byte */
+#define HP100_RESET_HB 0x0000 /* For readability when resetting bits */
+#define HP100_RESET_LB 0x0000 /* For readability when resetting bits */
+
+/*
+ * Misc. Constants
+ */
+#define HP100_LAN_100 100 /* lan_type value for VG */
+#define HP100_LAN_10 10 /* lan_type value for 10BaseT */
+#define HP100_LAN_COAX 9 /* lan_type value for Coax */
+#define HP100_LAN_ERR (-1) /* lan_type value for link down */
+
+/*
+ * Bus Master Data Structures ----------------------------------------------
+ */
+
+#define MAX_RX_PDL 30 /* Card limit = 31 */
+#define MAX_RX_FRAG 2 /* Don't need more... */
+#define MAX_TX_PDL 29
+#define MAX_TX_FRAG 2 /* Limit = 31 */
+
+/* Define total PDL area size in bytes (should be 4096) */
+/* This is the size of kernel (dma) memory that will be allocated. */
+#define MAX_RINGSIZE ((MAX_RX_FRAG*8+4+4)*MAX_RX_PDL+(MAX_TX_FRAG*8+4+4)*MAX_TX_PDL)+16
+
+/* Ethernet Packet Sizes */
+#define MIN_ETHER_SIZE 60
+#define MAX_ETHER_SIZE 1514 /* Needed for preallocation of */
+ /* skb buffer when busmastering */
+
+/* Tx or Rx Ring Entry */
+typedef struct hp100_ring {
+ u_int *pdl; /* Address of PDLs PDH, dword before
+ * this address is used for rx hdr */
+ u_int pdl_paddr; /* Physical address of PDL */
+ struct sk_buff *skb;
+ struct hp100_ring *next;
+} hp100_ring_t;
+
+
+
+/* Mask for Header Descriptor */
+#define HP100_PKT_LEN_MASK 0x1FFF /* AND with RxLength to get length */
+
+
+/* Receive Packet Status. Note, the error bits are only valid if ACC_ERRORED
+ bit in the MAC Configuration Register 1 is set. */
+#define HP100_RX_PRI 0x8000 /* 0:No, 1:Yes packet is priority */
+#define HP100_SDF_ERR 0x4000 /* 0:No, 1:Yes start of frame error */
+#define HP100_SKEW_ERR 0x2000 /* 0:No, 1:Yes skew out of range */
+#define HP100_BAD_SYMBOL_ERR 0x1000 /* 0:No, 1:Yes invalid symbol received */
+#define HP100_RCV_IPM_ERR 0x0800 /* 0:No, 1:Yes pkt had an invalid packet */
+ /* marker */
+#define HP100_SYMBOL_BAL_ERR 0x0400 /* 0:No, 1:Yes symbol balance error */
+#define HP100_VG_ALN_ERR 0x0200 /* 0:No, 1:Yes non-octet received */
+#define HP100_TRUNC_ERR 0x0100 /* 0:No, 1:Yes the packet was truncated */
+#define HP100_RUNT_ERR 0x0040 /* 0:No, 1:Yes pkt length < Min Pkt */
+ /* Length Reg. */
+#define HP100_ALN_ERR 0x0010 /* 0:No, 1:Yes align error. */
+#define HP100_CRC_ERR 0x0008 /* 0:No, 1:Yes CRC occurred. */
+
+/* The last three bits indicate the type of destination address */
+
+#define HP100_MULTI_ADDR_HASH 0x0006 /* 110: Addr multicast, matched hash */
+#define HP100_BROADCAST_ADDR 0x0003 /* x11: Addr broadcast */
+#define HP100_MULTI_ADDR_NO_HASH 0x0002 /* 010: Addr multicast, didn't match hash */
+#define HP100_PHYS_ADDR_MATCH 0x0001 /* x01: Addr was physical and mine */
+#define HP100_PHYS_ADDR_NO_MATCH 0x0000 /* x00: Addr was physical but not mine */
+
+/*
+ * macros
+ */
+
+#define hp100_inb( reg ) \
+ inb( ioaddr + HP100_REG_##reg )
+#define hp100_inw( reg ) \
+ inw( ioaddr + HP100_REG_##reg )
+#define hp100_inl( reg ) \
+ inl( ioaddr + HP100_REG_##reg )
+#define hp100_outb( data, reg ) \
+ outb( data, ioaddr + HP100_REG_##reg )
+#define hp100_outw( data, reg ) \
+ outw( data, ioaddr + HP100_REG_##reg )
+#define hp100_outl( data, reg ) \
+ outl( data, ioaddr + HP100_REG_##reg )
+#define hp100_orb( data, reg ) \
+ outb( inb( ioaddr + HP100_REG_##reg ) | (data), ioaddr + HP100_REG_##reg )
+#define hp100_orw( data, reg ) \
+ outw( inw( ioaddr + HP100_REG_##reg ) | (data), ioaddr + HP100_REG_##reg )
+#define hp100_andb( data, reg ) \
+ outb( inb( ioaddr + HP100_REG_##reg ) & (data), ioaddr + HP100_REG_##reg )
+#define hp100_andw( data, reg ) \
+ outw( inw( ioaddr + HP100_REG_##reg ) & (data), ioaddr + HP100_REG_##reg )
+
+#define hp100_page( page ) \
+ outw( HP100_PAGE_##page, ioaddr + HP100_REG_PAGING )
+#define hp100_ints_off() \
+ outw( HP100_INT_EN | HP100_RESET_LB, ioaddr + HP100_REG_OPTION_LSW )
+#define hp100_ints_on() \
+ outw( HP100_INT_EN | HP100_SET_LB, ioaddr + HP100_REG_OPTION_LSW )
+#define hp100_mem_map_enable() \
+ outw( HP100_MMAP_DIS | HP100_RESET_HB, ioaddr + HP100_REG_OPTION_LSW )
+#define hp100_mem_map_disable() \
+ outw( HP100_MMAP_DIS | HP100_SET_HB, ioaddr + HP100_REG_OPTION_LSW )
diff --git a/drivers/net/ethernet/i825xx/3c505.c b/drivers/net/ethernet/i825xx/3c505.c
new file mode 100644
index 000000000000..40e1a175fceb
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/3c505.c
@@ -0,0 +1,1673 @@
+/*
+ * Linux Ethernet device driver for the 3Com Etherlink Plus (3C505)
+ * By Craig Southeren, Juha Laiho and Philip Blundell
+ *
+ * 3c505.c This module implements an interface to the 3Com
+ * Etherlink Plus (3c505) Ethernet card. Linux device
+ * driver interface reverse engineered from the Linux 3C509
+ * device drivers. Some 3C505 information gleaned from
+ * the Crynwr packet driver. Still this driver would not
+ * be here without 3C505 technical reference provided by
+ * 3Com.
+ *
+ * $Id: 3c505.c,v 1.10 1996/04/16 13:06:27 phil Exp $
+ *
+ * Authors: Linux 3c505 device driver by
+ * Craig Southeren, <craigs@ineluki.apana.org.au>
+ * Final debugging by
+ * Andrew Tridgell, <tridge@nimbus.anu.edu.au>
+ * Auto irq/address, tuning, cleanup and v1.1.4+ kernel mods by
+ * Juha Laiho, <jlaiho@ichaos.nullnet.fi>
+ * Linux 3C509 driver by
+ * Donald Becker, <becker@super.org>
+ * (Now at <becker@scyld.com>)
+ * Crynwr packet driver by
+ * Krishnan Gopalan and Gregg Stefancik,
+ * Clemson University Engineering Computer Operations.
+ * Portions of the code have been adapted from the 3c505
+ * driver for NCSA Telnet by Bruce Orchard and later
+ * modified by Warren Van Houten and krus@diku.dk.
+ * 3C505 technical information provided by
+ * Terry Murphy, of 3Com Network Adapter Division
+ * Linux 1.3.0 changes by
+ * Alan Cox <Alan.Cox@linux.org>
+ * More debugging, DMA support, currently maintained by
+ * Philip Blundell <philb@gnu.org>
+ * Multicard/soft configurable dma channel/rev 2 hardware support
+ * by Christopher Collins <ccollins@pcug.org.au>
+ * Ethtool support (jgarzik), 11/17/2001
+ */
+
+#define DRV_NAME "3c505"
+#define DRV_VERSION "1.10a"
+
+
+/* Theory of operation:
+ *
+ * The 3c505 is quite an intelligent board. All communication with it is done
+ * by means of Primary Command Blocks (PCBs); these are transferred using PIO
+ * through the command register. The card has 256k of on-board RAM, which is
+ * used to buffer received packets. It might seem at first that more buffers
+ * are better, but in fact this isn't true. From my tests, it seems that
+ * more than about 10 buffers are unnecessary, and there is a noticeable
+ * performance hit in having more active on the card. So the majority of the
+ * card's memory isn't, in fact, used. Sadly, the card only has one transmit
+ * buffer and, short of loading our own firmware into it (which is what some
+ * drivers resort to) there's nothing we can do about this.
+ *
+ * We keep up to 4 "receive packet" commands active on the board at a time.
+ * When a packet comes in, so long as there is a receive command active, the
+ * board will send us a "packet received" PCB and then add the data for that
+ * packet to the DMA queue. If a DMA transfer is not already in progress, we
+ * set one up to start uploading the data. We have to maintain a list of
+ * backlogged receive packets, because the card may decide to tell us about
+ * a newly-arrived packet at any time, and we may not be able to start a DMA
+ * transfer immediately (ie one may already be going on). We can't NAK the
+ * PCB, because then it would throw the packet away.
+ *
+ * Trying to send a PCB to the card at the wrong moment seems to have bad
+ * effects. If we send it a transmit PCB while a receive DMA is happening,
+ * it will just NAK the PCB and so we will have wasted our time. Worse, it
+ * sometimes seems to interrupt the transfer. The majority of the low-level
+ * code is protected by one huge semaphore -- "busy" -- which is set whenever
+ * it probably isn't safe to do anything to the card. The receive routine
+ * must gain a lock on "busy" before it can start a DMA transfer, and the
+ * transmit routine must gain a lock before it sends the first PCB to the card.
+ * The send_pcb() routine also has an internal semaphore to protect it against
+ * being re-entered (which would be disastrous) -- this is needed because
+ * several things can happen asynchronously (re-priming the receiver and
+ * asking the card for statistics, for example). send_pcb() will also refuse
+ * to talk to the card at all if a DMA upload is happening. The higher-level
+ * networking code will reschedule a later retry if some part of the driver
+ * is blocked. In practice, this doesn't seem to happen very often.
+ */
+
+/* This driver may now work with revision 2.x hardware, since all the read
+ * operations on the HCR have been removed (we now keep our own softcopy).
+ * But I don't have an old card to test it on.
+ *
+ * This has had the bad effect that the autoprobe routine is now a bit
+ * less friendly to other devices. However, it was never very good.
+ * before, so I doubt it will hurt anybody.
+ */
+
+/* The driver is a mess. I took Craig's and Juha's code, and hacked it firstly
+ * to make it more reliable, and secondly to add DMA mode. Many things could
+ * probably be done better; the concurrency protection is particularly awful.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/gfp.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+
+#include "3c505.h"
+
+/*********************************************************
+ *
+ * define debug messages here as common strings to reduce space
+ *
+ *********************************************************/
+
+#define filename __FILE__
+
+#define timeout_msg "*** timeout at %s:%s (line %d) ***\n"
+#define TIMEOUT_MSG(lineno) \
+ pr_notice(timeout_msg, filename, __func__, (lineno))
+
+#define invalid_pcb_msg "*** invalid pcb length %d at %s:%s (line %d) ***\n"
+#define INVALID_PCB_MSG(len) \
+ pr_notice(invalid_pcb_msg, (len), filename, __func__, __LINE__)
+
+#define search_msg "%s: Looking for 3c505 adapter at address %#x..."
+
+#define stilllooking_msg "still looking..."
+
+#define found_msg "found.\n"
+
+#define notfound_msg "not found (reason = %d)\n"
+
+#define couldnot_msg "%s: 3c505 not found\n"
+
+/*********************************************************
+ *
+ * various other debug stuff
+ *
+ *********************************************************/
+
+#ifdef ELP_DEBUG
+static int elp_debug = ELP_DEBUG;
+#else
+static int elp_debug;
+#endif
+#define debug elp_debug
+
+/*
+ * 0 = no messages (well, some)
+ * 1 = messages when high level commands performed
+ * 2 = messages when low level commands performed
+ * 3 = messages when interrupts received
+ */
+
+/*****************************************************************
+ *
+ * List of I/O-addresses we try to auto-sense
+ * Last element MUST BE 0!
+ *****************************************************************/
+
+static int addr_list[] __initdata = {0x300, 0x280, 0x310, 0};
+
+/* Dma Memory related stuff */
+
+static unsigned long dma_mem_alloc(int size)
+{
+ int order = get_order(size);
+ return __get_dma_pages(GFP_KERNEL, order);
+}
+
+
+/*****************************************************************
+ *
+ * Functions for I/O (note the inline !)
+ *
+ *****************************************************************/
+
+static inline unsigned char inb_status(unsigned int base_addr)
+{
+ return inb(base_addr + PORT_STATUS);
+}
+
+static inline int inb_command(unsigned int base_addr)
+{
+ return inb(base_addr + PORT_COMMAND);
+}
+
+static inline void outb_control(unsigned char val, struct net_device *dev)
+{
+ outb(val, dev->base_addr + PORT_CONTROL);
+ ((elp_device *)(netdev_priv(dev)))->hcr_val = val;
+}
+
+#define HCR_VAL(x) (((elp_device *)(netdev_priv(x)))->hcr_val)
+
+static inline void outb_command(unsigned char val, unsigned int base_addr)
+{
+ outb(val, base_addr + PORT_COMMAND);
+}
+
+static inline unsigned int backlog_next(unsigned int n)
+{
+ return (n + 1) % BACKLOG_SIZE;
+}
+
+/*****************************************************************
+ *
+ * useful functions for accessing the adapter
+ *
+ *****************************************************************/
+
+/*
+ * use this routine when accessing the ASF bits as they are
+ * changed asynchronously by the adapter
+ */
+
+/* get adapter PCB status */
+#define GET_ASF(addr) \
+ (get_status(addr)&ASF_PCB_MASK)
+
+static inline int get_status(unsigned int base_addr)
+{
+ unsigned long timeout = jiffies + 10*HZ/100;
+ register int stat1;
+ do {
+ stat1 = inb_status(base_addr);
+ } while (stat1 != inb_status(base_addr) && time_before(jiffies, timeout));
+ if (time_after_eq(jiffies, timeout))
+ TIMEOUT_MSG(__LINE__);
+ return stat1;
+}
+
+static inline void set_hsf(struct net_device *dev, int hsf)
+{
+ elp_device *adapter = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ outb_control((HCR_VAL(dev) & ~HSF_PCB_MASK) | hsf, dev);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
+static bool start_receive(struct net_device *, pcb_struct *);
+
+static inline void adapter_reset(struct net_device *dev)
+{
+ unsigned long timeout;
+ elp_device *adapter = netdev_priv(dev);
+ unsigned char orig_hcr = adapter->hcr_val;
+
+ outb_control(0, dev);
+
+ if (inb_status(dev->base_addr) & ACRF) {
+ do {
+ inb_command(dev->base_addr);
+ timeout = jiffies + 2*HZ/100;
+ while (time_before_eq(jiffies, timeout) && !(inb_status(dev->base_addr) & ACRF));
+ } while (inb_status(dev->base_addr) & ACRF);
+ set_hsf(dev, HSF_PCB_NAK);
+ }
+ outb_control(adapter->hcr_val | ATTN | DIR, dev);
+ mdelay(10);
+ outb_control(adapter->hcr_val & ~ATTN, dev);
+ mdelay(10);
+ outb_control(adapter->hcr_val | FLSH, dev);
+ mdelay(10);
+ outb_control(adapter->hcr_val & ~FLSH, dev);
+ mdelay(10);
+
+ outb_control(orig_hcr, dev);
+ if (!start_receive(dev, &adapter->tx_pcb))
+ pr_err("%s: start receive command failed\n", dev->name);
+}
+
+/* Check to make sure that a DMA transfer hasn't timed out. This should
+ * never happen in theory, but seems to occur occasionally if the card gets
+ * prodded at the wrong time.
+ */
+static inline void check_3c505_dma(struct net_device *dev)
+{
+ elp_device *adapter = netdev_priv(dev);
+ if (adapter->dmaing && time_after(jiffies, adapter->current_dma.start_time + 10)) {
+ unsigned long flags, f;
+ pr_err("%s: DMA %s timed out, %d bytes left\n", dev->name,
+ adapter->current_dma.direction ? "download" : "upload",
+ get_dma_residue(dev->dma));
+ spin_lock_irqsave(&adapter->lock, flags);
+ adapter->dmaing = 0;
+ adapter->busy = 0;
+
+ f=claim_dma_lock();
+ disable_dma(dev->dma);
+ release_dma_lock(f);
+
+ if (adapter->rx_active)
+ adapter->rx_active--;
+ outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ }
+}
+
+/* Primitive functions used by send_pcb() */
+static inline bool send_pcb_slow(unsigned int base_addr, unsigned char byte)
+{
+ unsigned long timeout;
+ outb_command(byte, base_addr);
+ for (timeout = jiffies + 5*HZ/100; time_before(jiffies, timeout);) {
+ if (inb_status(base_addr) & HCRE)
+ return false;
+ }
+ pr_warning("3c505: send_pcb_slow timed out\n");
+ return true;
+}
+
+static inline bool send_pcb_fast(unsigned int base_addr, unsigned char byte)
+{
+ unsigned int timeout;
+ outb_command(byte, base_addr);
+ for (timeout = 0; timeout < 40000; timeout++) {
+ if (inb_status(base_addr) & HCRE)
+ return false;
+ }
+ pr_warning("3c505: send_pcb_fast timed out\n");
+ return true;
+}
+
+/* Check to see if the receiver needs restarting, and kick it if so */
+static inline void prime_rx(struct net_device *dev)
+{
+ elp_device *adapter = netdev_priv(dev);
+ while (adapter->rx_active < ELP_RX_PCBS && netif_running(dev)) {
+ if (!start_receive(dev, &adapter->itx_pcb))
+ break;
+ }
+}
+
+/*****************************************************************
+ *
+ * send_pcb
+ * Send a PCB to the adapter.
+ *
+ * output byte to command reg --<--+
+ * wait until HCRE is non zero |
+ * loop until all bytes sent -->--+
+ * set HSF1 and HSF2 to 1
+ * output pcb length
+ * wait until ASF give ACK or NAK
+ * set HSF1 and HSF2 to 0
+ *
+ *****************************************************************/
+
+/* This can be quite slow -- the adapter is allowed to take up to 40ms
+ * to respond to the initial interrupt.
+ *
+ * We run initially with interrupts turned on, but with a semaphore set
+ * so that nobody tries to re-enter this code. Once the first byte has
+ * gone through, we turn interrupts off and then send the others (the
+ * timeout is reduced to 500us).
+ */
+
+static bool send_pcb(struct net_device *dev, pcb_struct * pcb)
+{
+ int i;
+ unsigned long timeout;
+ elp_device *adapter = netdev_priv(dev);
+ unsigned long flags;
+
+ check_3c505_dma(dev);
+
+ if (adapter->dmaing && adapter->current_dma.direction == 0)
+ return false;
+
+ /* Avoid contention */
+ if (test_and_set_bit(1, &adapter->send_pcb_semaphore)) {
+ if (elp_debug >= 3) {
+ pr_debug("%s: send_pcb entered while threaded\n", dev->name);
+ }
+ return false;
+ }
+ /*
+ * load each byte into the command register and
+ * wait for the HCRE bit to indicate the adapter
+ * had read the byte
+ */
+ set_hsf(dev, 0);
+
+ if (send_pcb_slow(dev->base_addr, pcb->command))
+ goto abort;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+
+ if (send_pcb_fast(dev->base_addr, pcb->length))
+ goto sti_abort;
+
+ for (i = 0; i < pcb->length; i++) {
+ if (send_pcb_fast(dev->base_addr, pcb->data.raw[i]))
+ goto sti_abort;
+ }
+
+ outb_control(adapter->hcr_val | 3, dev); /* signal end of PCB */
+ outb_command(2 + pcb->length, dev->base_addr);
+
+ /* now wait for the acknowledgement */
+ spin_unlock_irqrestore(&adapter->lock, flags);
+
+ for (timeout = jiffies + 5*HZ/100; time_before(jiffies, timeout);) {
+ switch (GET_ASF(dev->base_addr)) {
+ case ASF_PCB_ACK:
+ adapter->send_pcb_semaphore = 0;
+ return true;
+
+ case ASF_PCB_NAK:
+#ifdef ELP_DEBUG
+ pr_debug("%s: send_pcb got NAK\n", dev->name);
+#endif
+ goto abort;
+ }
+ }
+
+ if (elp_debug >= 1)
+ pr_debug("%s: timeout waiting for PCB acknowledge (status %02x)\n",
+ dev->name, inb_status(dev->base_addr));
+ goto abort;
+
+ sti_abort:
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ abort:
+ adapter->send_pcb_semaphore = 0;
+ return false;
+}
+
+
+/*****************************************************************
+ *
+ * receive_pcb
+ * Read a PCB from the adapter
+ *
+ * wait for ACRF to be non-zero ---<---+
+ * input a byte |
+ * if ASF1 and ASF2 were not both one |
+ * before byte was read, loop --->---+
+ * set HSF1 and HSF2 for ack
+ *
+ *****************************************************************/
+
+static bool receive_pcb(struct net_device *dev, pcb_struct * pcb)
+{
+ int i, j;
+ int total_length;
+ int stat;
+ unsigned long timeout;
+ unsigned long flags;
+
+ elp_device *adapter = netdev_priv(dev);
+
+ set_hsf(dev, 0);
+
+ /* get the command code */
+ timeout = jiffies + 2*HZ/100;
+ while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout));
+ if (time_after_eq(jiffies, timeout)) {
+ TIMEOUT_MSG(__LINE__);
+ return false;
+ }
+ pcb->command = inb_command(dev->base_addr);
+
+ /* read the data length */
+ timeout = jiffies + 3*HZ/100;
+ while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout));
+ if (time_after_eq(jiffies, timeout)) {
+ TIMEOUT_MSG(__LINE__);
+ pr_info("%s: status %02x\n", dev->name, stat);
+ return false;
+ }
+ pcb->length = inb_command(dev->base_addr);
+
+ if (pcb->length > MAX_PCB_DATA) {
+ INVALID_PCB_MSG(pcb->length);
+ adapter_reset(dev);
+ return false;
+ }
+ /* read the data */
+ spin_lock_irqsave(&adapter->lock, flags);
+ for (i = 0; i < MAX_PCB_DATA; i++) {
+ for (j = 0; j < 20000; j++) {
+ stat = get_status(dev->base_addr);
+ if (stat & ACRF)
+ break;
+ }
+ pcb->data.raw[i] = inb_command(dev->base_addr);
+ if ((stat & ASF_PCB_MASK) == ASF_PCB_END || j >= 20000)
+ break;
+ }
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ if (i >= MAX_PCB_DATA) {
+ INVALID_PCB_MSG(i);
+ return false;
+ }
+ if (j >= 20000) {
+ TIMEOUT_MSG(__LINE__);
+ return false;
+ }
+ /* the last "data" byte was really the length! */
+ total_length = pcb->data.raw[i];
+
+ /* safety check total length vs data length */
+ if (total_length != (pcb->length + 2)) {
+ if (elp_debug >= 2)
+ pr_warning("%s: mangled PCB received\n", dev->name);
+ set_hsf(dev, HSF_PCB_NAK);
+ return false;
+ }
+
+ if (pcb->command == CMD_RECEIVE_PACKET_COMPLETE) {
+ if (test_and_set_bit(0, (void *) &adapter->busy)) {
+ if (backlog_next(adapter->rx_backlog.in) == adapter->rx_backlog.out) {
+ set_hsf(dev, HSF_PCB_NAK);
+ pr_warning("%s: PCB rejected, transfer in progress and backlog full\n", dev->name);
+ pcb->command = 0;
+ return true;
+ } else {
+ pcb->command = 0xff;
+ }
+ }
+ }
+ set_hsf(dev, HSF_PCB_ACK);
+ return true;
+}
+
+/******************************************************
+ *
+ * queue a receive command on the adapter so we will get an
+ * interrupt when a packet is received.
+ *
+ ******************************************************/
+
+static bool start_receive(struct net_device *dev, pcb_struct * tx_pcb)
+{
+ bool status;
+ elp_device *adapter = netdev_priv(dev);
+
+ if (elp_debug >= 3)
+ pr_debug("%s: restarting receiver\n", dev->name);
+ tx_pcb->command = CMD_RECEIVE_PACKET;
+ tx_pcb->length = sizeof(struct Rcv_pkt);
+ tx_pcb->data.rcv_pkt.buf_seg
+ = tx_pcb->data.rcv_pkt.buf_ofs = 0; /* Unused */
+ tx_pcb->data.rcv_pkt.buf_len = 1600;
+ tx_pcb->data.rcv_pkt.timeout = 0; /* set timeout to zero */
+ status = send_pcb(dev, tx_pcb);
+ if (status)
+ adapter->rx_active++;
+ return status;
+}
+
+/******************************************************
+ *
+ * extract a packet from the adapter
+ * this routine is only called from within the interrupt
+ * service routine, so no cli/sti calls are needed
+ * note that the length is always assumed to be even
+ *
+ ******************************************************/
+
+static void receive_packet(struct net_device *dev, int len)
+{
+ int rlen;
+ elp_device *adapter = netdev_priv(dev);
+ void *target;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ rlen = (len + 1) & ~1;
+ skb = dev_alloc_skb(rlen + 2);
+
+ if (!skb) {
+ pr_warning("%s: memory squeeze, dropping packet\n", dev->name);
+ target = adapter->dma_buffer;
+ adapter->current_dma.target = NULL;
+ /* FIXME: stats */
+ return;
+ }
+
+ skb_reserve(skb, 2);
+ target = skb_put(skb, rlen);
+ if ((unsigned long)(target + rlen) >= MAX_DMA_ADDRESS) {
+ adapter->current_dma.target = target;
+ target = adapter->dma_buffer;
+ } else {
+ adapter->current_dma.target = NULL;
+ }
+
+ /* if this happens, we die */
+ if (test_and_set_bit(0, (void *) &adapter->dmaing))
+ pr_err("%s: rx blocked, DMA in progress, dir %d\n",
+ dev->name, adapter->current_dma.direction);
+
+ adapter->current_dma.direction = 0;
+ adapter->current_dma.length = rlen;
+ adapter->current_dma.skb = skb;
+ adapter->current_dma.start_time = jiffies;
+
+ outb_control(adapter->hcr_val | DIR | TCEN | DMAE, dev);
+
+ flags=claim_dma_lock();
+ disable_dma(dev->dma);
+ clear_dma_ff(dev->dma);
+ set_dma_mode(dev->dma, 0x04); /* dma read */
+ set_dma_addr(dev->dma, isa_virt_to_bus(target));
+ set_dma_count(dev->dma, rlen);
+ enable_dma(dev->dma);
+ release_dma_lock(flags);
+
+ if (elp_debug >= 3) {
+ pr_debug("%s: rx DMA transfer started\n", dev->name);
+ }
+
+ if (adapter->rx_active)
+ adapter->rx_active--;
+
+ if (!adapter->busy)
+ pr_warning("%s: receive_packet called, busy not set.\n", dev->name);
+}
+
+/******************************************************
+ *
+ * interrupt handler
+ *
+ ******************************************************/
+
+static irqreturn_t elp_interrupt(int irq, void *dev_id)
+{
+ int len;
+ int dlen;
+ int icount = 0;
+ struct net_device *dev = dev_id;
+ elp_device *adapter = netdev_priv(dev);
+ unsigned long timeout;
+
+ spin_lock(&adapter->lock);
+
+ do {
+ /*
+ * has a DMA transfer finished?
+ */
+ if (inb_status(dev->base_addr) & DONE) {
+ if (!adapter->dmaing)
+ pr_warning("%s: phantom DMA completed\n", dev->name);
+
+ if (elp_debug >= 3)
+ pr_debug("%s: %s DMA complete, status %02x\n", dev->name,
+ adapter->current_dma.direction ? "tx" : "rx",
+ inb_status(dev->base_addr));
+
+ outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev);
+ if (adapter->current_dma.direction) {
+ dev_kfree_skb_irq(adapter->current_dma.skb);
+ } else {
+ struct sk_buff *skb = adapter->current_dma.skb;
+ if (skb) {
+ if (adapter->current_dma.target) {
+ /* have already done the skb_put() */
+ memcpy(adapter->current_dma.target, adapter->dma_buffer, adapter->current_dma.length);
+ }
+ skb->protocol = eth_type_trans(skb,dev);
+ dev->stats.rx_bytes += skb->len;
+ netif_rx(skb);
+ }
+ }
+ adapter->dmaing = 0;
+ if (adapter->rx_backlog.in != adapter->rx_backlog.out) {
+ int t = adapter->rx_backlog.length[adapter->rx_backlog.out];
+ adapter->rx_backlog.out = backlog_next(adapter->rx_backlog.out);
+ if (elp_debug >= 2)
+ pr_debug("%s: receiving backlogged packet (%d)\n", dev->name, t);
+ receive_packet(dev, t);
+ } else {
+ adapter->busy = 0;
+ }
+ } else {
+ /* has one timed out? */
+ check_3c505_dma(dev);
+ }
+
+ /*
+ * receive a PCB from the adapter
+ */
+ timeout = jiffies + 3*HZ/100;
+ while ((inb_status(dev->base_addr) & ACRF) != 0 && time_before(jiffies, timeout)) {
+ if (receive_pcb(dev, &adapter->irx_pcb)) {
+ switch (adapter->irx_pcb.command)
+ {
+ case 0:
+ break;
+ /*
+ * received a packet - this must be handled fast
+ */
+ case 0xff:
+ case CMD_RECEIVE_PACKET_COMPLETE:
+ /* if the device isn't open, don't pass packets up the stack */
+ if (!netif_running(dev))
+ break;
+ len = adapter->irx_pcb.data.rcv_resp.pkt_len;
+ dlen = adapter->irx_pcb.data.rcv_resp.buf_len;
+ if (adapter->irx_pcb.data.rcv_resp.timeout != 0) {
+ pr_err("%s: interrupt - packet not received correctly\n", dev->name);
+ } else {
+ if (elp_debug >= 3) {
+ pr_debug("%s: interrupt - packet received of length %i (%i)\n",
+ dev->name, len, dlen);
+ }
+ if (adapter->irx_pcb.command == 0xff) {
+ if (elp_debug >= 2)
+ pr_debug("%s: adding packet to backlog (len = %d)\n",
+ dev->name, dlen);
+ adapter->rx_backlog.length[adapter->rx_backlog.in] = dlen;
+ adapter->rx_backlog.in = backlog_next(adapter->rx_backlog.in);
+ } else {
+ receive_packet(dev, dlen);
+ }
+ if (elp_debug >= 3)
+ pr_debug("%s: packet received\n", dev->name);
+ }
+ break;
+
+ /*
+ * 82586 configured correctly
+ */
+ case CMD_CONFIGURE_82586_RESPONSE:
+ adapter->got[CMD_CONFIGURE_82586] = 1;
+ if (elp_debug >= 3)
+ pr_debug("%s: interrupt - configure response received\n", dev->name);
+ break;
+
+ /*
+ * Adapter memory configuration
+ */
+ case CMD_CONFIGURE_ADAPTER_RESPONSE:
+ adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 1;
+ if (elp_debug >= 3)
+ pr_debug("%s: Adapter memory configuration %s.\n", dev->name,
+ adapter->irx_pcb.data.failed ? "failed" : "succeeded");
+ break;
+
+ /*
+ * Multicast list loading
+ */
+ case CMD_LOAD_MULTICAST_RESPONSE:
+ adapter->got[CMD_LOAD_MULTICAST_LIST] = 1;
+ if (elp_debug >= 3)
+ pr_debug("%s: Multicast address list loading %s.\n", dev->name,
+ adapter->irx_pcb.data.failed ? "failed" : "succeeded");
+ break;
+
+ /*
+ * Station address setting
+ */
+ case CMD_SET_ADDRESS_RESPONSE:
+ adapter->got[CMD_SET_STATION_ADDRESS] = 1;
+ if (elp_debug >= 3)
+ pr_debug("%s: Ethernet address setting %s.\n", dev->name,
+ adapter->irx_pcb.data.failed ? "failed" : "succeeded");
+ break;
+
+
+ /*
+ * received board statistics
+ */
+ case CMD_NETWORK_STATISTICS_RESPONSE:
+ dev->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv;
+ dev->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit;
+ dev->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC;
+ dev->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align;
+ dev->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun;
+ dev->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res;
+ adapter->got[CMD_NETWORK_STATISTICS] = 1;
+ if (elp_debug >= 3)
+ pr_debug("%s: interrupt - statistics response received\n", dev->name);
+ break;
+
+ /*
+ * sent a packet
+ */
+ case CMD_TRANSMIT_PACKET_COMPLETE:
+ if (elp_debug >= 3)
+ pr_debug("%s: interrupt - packet sent\n", dev->name);
+ if (!netif_running(dev))
+ break;
+ switch (adapter->irx_pcb.data.xmit_resp.c_stat) {
+ case 0xffff:
+ dev->stats.tx_aborted_errors++;
+ pr_info("%s: transmit timed out, network cable problem?\n", dev->name);
+ break;
+ case 0xfffe:
+ dev->stats.tx_fifo_errors++;
+ pr_info("%s: transmit timed out, FIFO underrun\n", dev->name);
+ break;
+ }
+ netif_wake_queue(dev);
+ break;
+
+ /*
+ * some unknown PCB
+ */
+ default:
+ pr_debug("%s: unknown PCB received - %2.2x\n",
+ dev->name, adapter->irx_pcb.command);
+ break;
+ }
+ } else {
+ pr_warning("%s: failed to read PCB on interrupt\n", dev->name);
+ adapter_reset(dev);
+ }
+ }
+
+ } while (icount++ < 5 && (inb_status(dev->base_addr) & (ACRF | DONE)));
+
+ prime_rx(dev);
+
+ /*
+ * indicate no longer in interrupt routine
+ */
+ spin_unlock(&adapter->lock);
+ return IRQ_HANDLED;
+}
+
+
+/******************************************************
+ *
+ * open the board
+ *
+ ******************************************************/
+
+static int elp_open(struct net_device *dev)
+{
+ elp_device *adapter = netdev_priv(dev);
+ int retval;
+
+ if (elp_debug >= 3)
+ pr_debug("%s: request to open device\n", dev->name);
+
+ /*
+ * make sure we actually found the device
+ */
+ if (adapter == NULL) {
+ pr_err("%s: Opening a non-existent physical device\n", dev->name);
+ return -EAGAIN;
+ }
+ /*
+ * disable interrupts on the board
+ */
+ outb_control(0, dev);
+
+ /*
+ * clear any pending interrupts
+ */
+ inb_command(dev->base_addr);
+ adapter_reset(dev);
+
+ /*
+ * no receive PCBs active
+ */
+ adapter->rx_active = 0;
+
+ adapter->busy = 0;
+ adapter->send_pcb_semaphore = 0;
+ adapter->rx_backlog.in = 0;
+ adapter->rx_backlog.out = 0;
+
+ spin_lock_init(&adapter->lock);
+
+ /*
+ * install our interrupt service routine
+ */
+ if ((retval = request_irq(dev->irq, elp_interrupt, 0, dev->name, dev))) {
+ pr_err("%s: could not allocate IRQ%d\n", dev->name, dev->irq);
+ return retval;
+ }
+ if ((retval = request_dma(dev->dma, dev->name))) {
+ free_irq(dev->irq, dev);
+ pr_err("%s: could not allocate DMA%d channel\n", dev->name, dev->dma);
+ return retval;
+ }
+ adapter->dma_buffer = (void *) dma_mem_alloc(DMA_BUFFER_SIZE);
+ if (!adapter->dma_buffer) {
+ pr_err("%s: could not allocate DMA buffer\n", dev->name);
+ free_dma(dev->dma);
+ free_irq(dev->irq, dev);
+ return -ENOMEM;
+ }
+ adapter->dmaing = 0;
+
+ /*
+ * enable interrupts on the board
+ */
+ outb_control(CMDE, dev);
+
+ /*
+ * configure adapter memory: we need 10 multicast addresses, default==0
+ */
+ if (elp_debug >= 3)
+ pr_debug("%s: sending 3c505 memory configuration command\n", dev->name);
+ adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY;
+ adapter->tx_pcb.data.memconf.cmd_q = 10;
+ adapter->tx_pcb.data.memconf.rcv_q = 20;
+ adapter->tx_pcb.data.memconf.mcast = 10;
+ adapter->tx_pcb.data.memconf.frame = 20;
+ adapter->tx_pcb.data.memconf.rcv_b = 20;
+ adapter->tx_pcb.data.memconf.progs = 0;
+ adapter->tx_pcb.length = sizeof(struct Memconf);
+ adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ pr_err("%s: couldn't send memory configuration command\n", dev->name);
+ else {
+ unsigned long timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] == 0 && time_before(jiffies, timeout));
+ if (time_after_eq(jiffies, timeout))
+ TIMEOUT_MSG(__LINE__);
+ }
+
+
+ /*
+ * configure adapter to receive broadcast messages and wait for response
+ */
+ if (elp_debug >= 3)
+ pr_debug("%s: sending 82586 configure command\n", dev->name);
+ adapter->tx_pcb.command = CMD_CONFIGURE_82586;
+ adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
+ adapter->tx_pcb.length = 2;
+ adapter->got[CMD_CONFIGURE_82586] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ pr_err("%s: couldn't send 82586 configure command\n", dev->name);
+ else {
+ unsigned long timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_CONFIGURE_82586] == 0 && time_before(jiffies, timeout));
+ if (time_after_eq(jiffies, timeout))
+ TIMEOUT_MSG(__LINE__);
+ }
+
+ /* enable burst-mode DMA */
+ /* outb(0x1, dev->base_addr + PORT_AUXDMA); */
+
+ /*
+ * queue receive commands to provide buffering
+ */
+ prime_rx(dev);
+ if (elp_debug >= 3)
+ pr_debug("%s: %d receive PCBs active\n", dev->name, adapter->rx_active);
+
+ /*
+ * device is now officially open!
+ */
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+
+/******************************************************
+ *
+ * send a packet to the adapter
+ *
+ ******************************************************/
+
+static netdev_tx_t send_packet(struct net_device *dev, struct sk_buff *skb)
+{
+ elp_device *adapter = netdev_priv(dev);
+ unsigned long target;
+ unsigned long flags;
+
+ /*
+ * make sure the length is even and no shorter than 60 bytes
+ */
+ unsigned int nlen = (((skb->len < 60) ? 60 : skb->len) + 1) & (~1);
+
+ if (test_and_set_bit(0, (void *) &adapter->busy)) {
+ if (elp_debug >= 2)
+ pr_debug("%s: transmit blocked\n", dev->name);
+ return false;
+ }
+
+ dev->stats.tx_bytes += nlen;
+
+ /*
+ * send the adapter a transmit packet command. Ignore segment and offset
+ * and make sure the length is even
+ */
+ adapter->tx_pcb.command = CMD_TRANSMIT_PACKET;
+ adapter->tx_pcb.length = sizeof(struct Xmit_pkt);
+ adapter->tx_pcb.data.xmit_pkt.buf_ofs
+ = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0; /* Unused */
+ adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen;
+
+ if (!send_pcb(dev, &adapter->tx_pcb)) {
+ adapter->busy = 0;
+ return false;
+ }
+ /* if this happens, we die */
+ if (test_and_set_bit(0, (void *) &adapter->dmaing))
+ pr_debug("%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction);
+
+ adapter->current_dma.direction = 1;
+ adapter->current_dma.start_time = jiffies;
+
+ if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) {
+ skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen);
+ memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len);
+ target = isa_virt_to_bus(adapter->dma_buffer);
+ }
+ else {
+ target = isa_virt_to_bus(skb->data);
+ }
+ adapter->current_dma.skb = skb;
+
+ flags=claim_dma_lock();
+ disable_dma(dev->dma);
+ clear_dma_ff(dev->dma);
+ set_dma_mode(dev->dma, 0x48); /* dma memory -> io */
+ set_dma_addr(dev->dma, target);
+ set_dma_count(dev->dma, nlen);
+ outb_control(adapter->hcr_val | DMAE | TCEN, dev);
+ enable_dma(dev->dma);
+ release_dma_lock(flags);
+
+ if (elp_debug >= 3)
+ pr_debug("%s: DMA transfer started\n", dev->name);
+
+ return true;
+}
+
+/*
+ * The upper layer thinks we timed out
+ */
+
+static void elp_timeout(struct net_device *dev)
+{
+ int stat;
+
+ stat = inb_status(dev->base_addr);
+ pr_warning("%s: transmit timed out, lost %s?\n", dev->name,
+ (stat & ACRF) ? "interrupt" : "command");
+ if (elp_debug >= 1)
+ pr_debug("%s: status %#02x\n", dev->name, stat);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ dev->stats.tx_dropped++;
+ netif_wake_queue(dev);
+}
+
+/******************************************************
+ *
+ * start the transmitter
+ * return 0 if sent OK, else return 1
+ *
+ ******************************************************/
+
+static netdev_tx_t elp_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned long flags;
+ elp_device *adapter = netdev_priv(dev);
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ check_3c505_dma(dev);
+
+ if (elp_debug >= 3)
+ pr_debug("%s: request to send packet of length %d\n", dev->name, (int) skb->len);
+
+ netif_stop_queue(dev);
+
+ /*
+ * send the packet at skb->data for skb->len
+ */
+ if (!send_packet(dev, skb)) {
+ if (elp_debug >= 2) {
+ pr_debug("%s: failed to transmit packet\n", dev->name);
+ }
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+ if (elp_debug >= 3)
+ pr_debug("%s: packet of length %d sent\n", dev->name, (int) skb->len);
+
+ prime_rx(dev);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ netif_start_queue(dev);
+ return NETDEV_TX_OK;
+}
+
+/******************************************************
+ *
+ * return statistics on the board
+ *
+ ******************************************************/
+
+static struct net_device_stats *elp_get_stats(struct net_device *dev)
+{
+ elp_device *adapter = netdev_priv(dev);
+
+ if (elp_debug >= 3)
+ pr_debug("%s: request for stats\n", dev->name);
+
+ /* If the device is closed, just return the latest stats we have,
+ - we cannot ask from the adapter without interrupts */
+ if (!netif_running(dev))
+ return &dev->stats;
+
+ /* send a get statistics command to the board */
+ adapter->tx_pcb.command = CMD_NETWORK_STATISTICS;
+ adapter->tx_pcb.length = 0;
+ adapter->got[CMD_NETWORK_STATISTICS] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ pr_err("%s: couldn't send get statistics command\n", dev->name);
+ else {
+ unsigned long timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_NETWORK_STATISTICS] == 0 && time_before(jiffies, timeout));
+ if (time_after_eq(jiffies, timeout)) {
+ TIMEOUT_MSG(__LINE__);
+ return &dev->stats;
+ }
+ }
+
+ /* statistics are now up to date */
+ return &dev->stats;
+}
+
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
+}
+
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 level)
+{
+ debug = level;
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+};
+
+/******************************************************
+ *
+ * close the board
+ *
+ ******************************************************/
+
+static int elp_close(struct net_device *dev)
+{
+ elp_device *adapter = netdev_priv(dev);
+
+ if (elp_debug >= 3)
+ pr_debug("%s: request to close device\n", dev->name);
+
+ netif_stop_queue(dev);
+
+ /* Someone may request the device statistic information even when
+ * the interface is closed. The following will update the statistics
+ * structure in the driver, so we'll be able to give current statistics.
+ */
+ (void) elp_get_stats(dev);
+
+ /*
+ * disable interrupts on the board
+ */
+ outb_control(0, dev);
+
+ /*
+ * release the IRQ
+ */
+ free_irq(dev->irq, dev);
+
+ free_dma(dev->dma);
+ free_pages((unsigned long) adapter->dma_buffer, get_order(DMA_BUFFER_SIZE));
+
+ return 0;
+}
+
+
+/************************************************************
+ *
+ * Set multicast list
+ * num_addrs==0: clear mc_list
+ * num_addrs==-1: set promiscuous mode
+ * num_addrs>0: set mc_list
+ *
+ ************************************************************/
+
+static void elp_set_mc_list(struct net_device *dev)
+{
+ elp_device *adapter = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ int i;
+ unsigned long flags;
+
+ if (elp_debug >= 3)
+ pr_debug("%s: request to set multicast list\n", dev->name);
+
+ spin_lock_irqsave(&adapter->lock, flags);
+
+ if (!(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
+ /* send a "load multicast list" command to the board, max 10 addrs/cmd */
+ /* if num_addrs==0 the list will be cleared */
+ adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST;
+ adapter->tx_pcb.length = 6 * netdev_mc_count(dev);
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy(adapter->tx_pcb.data.multicast[i++],
+ ha->addr, 6);
+ adapter->got[CMD_LOAD_MULTICAST_LIST] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ pr_err("%s: couldn't send set_multicast command\n", dev->name);
+ else {
+ unsigned long timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_LOAD_MULTICAST_LIST] == 0 && time_before(jiffies, timeout));
+ if (time_after_eq(jiffies, timeout)) {
+ TIMEOUT_MSG(__LINE__);
+ }
+ }
+ if (!netdev_mc_empty(dev))
+ adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD | RECV_MULTI;
+ else /* num_addrs == 0 */
+ adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
+ } else
+ adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_PROMISC;
+ /*
+ * configure adapter to receive messages (as specified above)
+ * and wait for response
+ */
+ if (elp_debug >= 3)
+ pr_debug("%s: sending 82586 configure command\n", dev->name);
+ adapter->tx_pcb.command = CMD_CONFIGURE_82586;
+ adapter->tx_pcb.length = 2;
+ adapter->got[CMD_CONFIGURE_82586] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ {
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ pr_err("%s: couldn't send 82586 configure command\n", dev->name);
+ }
+ else {
+ unsigned long timeout = jiffies + TIMEOUT;
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ while (adapter->got[CMD_CONFIGURE_82586] == 0 && time_before(jiffies, timeout));
+ if (time_after_eq(jiffies, timeout))
+ TIMEOUT_MSG(__LINE__);
+ }
+}
+
+/************************************************************
+ *
+ * A couple of tests to see if there's 3C505 or not
+ * Called only by elp_autodetect
+ ************************************************************/
+
+static int __init elp_sense(struct net_device *dev)
+{
+ int addr = dev->base_addr;
+ const char *name = dev->name;
+ byte orig_HSR;
+
+ if (!request_region(addr, ELP_IO_EXTENT, "3c505"))
+ return -ENODEV;
+
+ orig_HSR = inb_status(addr);
+
+ if (elp_debug > 0)
+ pr_debug(search_msg, name, addr);
+
+ if (orig_HSR == 0xff) {
+ if (elp_debug > 0)
+ pr_cont(notfound_msg, 1);
+ goto out;
+ }
+
+ /* Wait for a while; the adapter may still be booting up */
+ if (elp_debug > 0)
+ pr_cont(stilllooking_msg);
+
+ if (orig_HSR & DIR) {
+ /* If HCR.DIR is up, we pull it down. HSR.DIR should follow. */
+ outb(0, dev->base_addr + PORT_CONTROL);
+ msleep(300);
+ if (inb_status(addr) & DIR) {
+ if (elp_debug > 0)
+ pr_cont(notfound_msg, 2);
+ goto out;
+ }
+ } else {
+ /* If HCR.DIR is down, we pull it up. HSR.DIR should follow. */
+ outb(DIR, dev->base_addr + PORT_CONTROL);
+ msleep(300);
+ if (!(inb_status(addr) & DIR)) {
+ if (elp_debug > 0)
+ pr_cont(notfound_msg, 3);
+ goto out;
+ }
+ }
+ /*
+ * It certainly looks like a 3c505.
+ */
+ if (elp_debug > 0)
+ pr_cont(found_msg);
+
+ return 0;
+out:
+ release_region(addr, ELP_IO_EXTENT);
+ return -ENODEV;
+}
+
+/*************************************************************
+ *
+ * Search through addr_list[] and try to find a 3C505
+ * Called only by eplus_probe
+ *************************************************************/
+
+static int __init elp_autodetect(struct net_device *dev)
+{
+ int idx = 0;
+
+ /* if base address set, then only check that address
+ otherwise, run through the table */
+ if (dev->base_addr != 0) { /* dev->base_addr == 0 ==> plain autodetect */
+ if (elp_sense(dev) == 0)
+ return dev->base_addr;
+ } else
+ while ((dev->base_addr = addr_list[idx++])) {
+ if (elp_sense(dev) == 0)
+ return dev->base_addr;
+ }
+
+ /* could not find an adapter */
+ if (elp_debug > 0)
+ pr_debug(couldnot_msg, dev->name);
+
+ return 0; /* Because of this, the layer above will return -ENODEV */
+}
+
+static const struct net_device_ops elp_netdev_ops = {
+ .ndo_open = elp_open,
+ .ndo_stop = elp_close,
+ .ndo_get_stats = elp_get_stats,
+ .ndo_start_xmit = elp_start_xmit,
+ .ndo_tx_timeout = elp_timeout,
+ .ndo_set_rx_mode = elp_set_mc_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/******************************************************
+ *
+ * probe for an Etherlink Plus board at the specified address
+ *
+ ******************************************************/
+
+/* There are three situations we need to be able to detect here:
+
+ * a) the card is idle
+ * b) the card is still booting up
+ * c) the card is stuck in a strange state (some DOS drivers do this)
+ *
+ * In case (a), all is well. In case (b), we wait 10 seconds to see if the
+ * card finishes booting, and carry on if so. In case (c), we do a hard reset,
+ * loop round, and hope for the best.
+ *
+ * This is all very unpleasant, but hopefully avoids the problems with the old
+ * probe code (which had a 15-second delay if the card was idle, and didn't
+ * work at all if it was in a weird state).
+ */
+
+static int __init elplus_setup(struct net_device *dev)
+{
+ elp_device *adapter = netdev_priv(dev);
+ int i, tries, tries1, okay;
+ unsigned long timeout;
+ unsigned long cookie = 0;
+ int err = -ENODEV;
+
+ /*
+ * setup adapter structure
+ */
+
+ dev->base_addr = elp_autodetect(dev);
+ if (!dev->base_addr)
+ return -ENODEV;
+
+ adapter->send_pcb_semaphore = 0;
+
+ for (tries1 = 0; tries1 < 3; tries1++) {
+ outb_control((adapter->hcr_val | CMDE) & ~DIR, dev);
+ /* First try to write just one byte, to see if the card is
+ * responding at all normally.
+ */
+ timeout = jiffies + 5*HZ/100;
+ okay = 0;
+ while (time_before(jiffies, timeout) && !(inb_status(dev->base_addr) & HCRE));
+ if ((inb_status(dev->base_addr) & HCRE)) {
+ outb_command(0, dev->base_addr); /* send a spurious byte */
+ timeout = jiffies + 5*HZ/100;
+ while (time_before(jiffies, timeout) && !(inb_status(dev->base_addr) & HCRE));
+ if (inb_status(dev->base_addr) & HCRE)
+ okay = 1;
+ }
+ if (!okay) {
+ /* Nope, it's ignoring the command register. This means that
+ * either it's still booting up, or it's died.
+ */
+ pr_err("%s: command register wouldn't drain, ", dev->name);
+ if ((inb_status(dev->base_addr) & 7) == 3) {
+ /* If the adapter status is 3, it *could* still be booting.
+ * Give it the benefit of the doubt for 10 seconds.
+ */
+ pr_cont("assuming 3c505 still starting\n");
+ timeout = jiffies + 10*HZ;
+ while (time_before(jiffies, timeout) && (inb_status(dev->base_addr) & 7));
+ if (inb_status(dev->base_addr) & 7) {
+ pr_err("%s: 3c505 failed to start\n", dev->name);
+ } else {
+ okay = 1; /* It started */
+ }
+ } else {
+ /* Otherwise, it must just be in a strange
+ * state. We probably need to kick it.
+ */
+ pr_cont("3c505 is sulking\n");
+ }
+ }
+ for (tries = 0; tries < 5 && okay; tries++) {
+
+ /*
+ * Try to set the Ethernet address, to make sure that the board
+ * is working.
+ */
+ adapter->tx_pcb.command = CMD_STATION_ADDRESS;
+ adapter->tx_pcb.length = 0;
+ cookie = probe_irq_on();
+ if (!send_pcb(dev, &adapter->tx_pcb)) {
+ pr_err("%s: could not send first PCB\n", dev->name);
+ probe_irq_off(cookie);
+ continue;
+ }
+ if (!receive_pcb(dev, &adapter->rx_pcb)) {
+ pr_err("%s: could not read first PCB\n", dev->name);
+ probe_irq_off(cookie);
+ continue;
+ }
+ if ((adapter->rx_pcb.command != CMD_ADDRESS_RESPONSE) ||
+ (adapter->rx_pcb.length != 6)) {
+ pr_err("%s: first PCB wrong (%d, %d)\n", dev->name,
+ adapter->rx_pcb.command, adapter->rx_pcb.length);
+ probe_irq_off(cookie);
+ continue;
+ }
+ goto okay;
+ }
+ /* It's broken. Do a hard reset to re-initialise the board,
+ * and try again.
+ */
+ pr_info("%s: resetting adapter\n", dev->name);
+ outb_control(adapter->hcr_val | FLSH | ATTN, dev);
+ outb_control(adapter->hcr_val & ~(FLSH | ATTN), dev);
+ }
+ pr_err("%s: failed to initialise 3c505\n", dev->name);
+ goto out;
+
+ okay:
+ if (dev->irq) { /* Is there a preset IRQ? */
+ int rpt = probe_irq_off(cookie);
+ if (dev->irq != rpt) {
+ pr_warning("%s: warning, irq %d configured but %d detected\n", dev->name, dev->irq, rpt);
+ }
+ /* if dev->irq == probe_irq_off(cookie), all is well */
+ } else /* No preset IRQ; just use what we can detect */
+ dev->irq = probe_irq_off(cookie);
+ switch (dev->irq) { /* Legal, sane? */
+ case 0:
+ pr_err("%s: IRQ probe failed: check 3c505 jumpers.\n",
+ dev->name);
+ goto out;
+ case 1:
+ case 6:
+ case 8:
+ case 13:
+ pr_err("%s: Impossible IRQ %d reported by probe_irq_off().\n",
+ dev->name, dev->irq);
+ goto out;
+ }
+ /*
+ * Now we have the IRQ number so we can disable the interrupts from
+ * the board until the board is opened.
+ */
+ outb_control(adapter->hcr_val & ~CMDE, dev);
+
+ /*
+ * copy Ethernet address into structure
+ */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = adapter->rx_pcb.data.eth_addr[i];
+
+ /* find a DMA channel */
+ if (!dev->dma) {
+ if (dev->mem_start) {
+ dev->dma = dev->mem_start & 7;
+ }
+ else {
+ pr_warning("%s: warning, DMA channel not specified, using default\n", dev->name);
+ dev->dma = ELP_DMA;
+ }
+ }
+
+ /*
+ * print remainder of startup message
+ */
+ pr_info("%s: 3c505 at %#lx, irq %d, dma %d, addr %pM, ",
+ dev->name, dev->base_addr, dev->irq, dev->dma, dev->dev_addr);
+ /*
+ * read more information from the adapter
+ */
+
+ adapter->tx_pcb.command = CMD_ADAPTER_INFO;
+ adapter->tx_pcb.length = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb) ||
+ !receive_pcb(dev, &adapter->rx_pcb) ||
+ (adapter->rx_pcb.command != CMD_ADAPTER_INFO_RESPONSE) ||
+ (adapter->rx_pcb.length != 10)) {
+ pr_cont("not responding to second PCB\n");
+ }
+ pr_cont("rev %d.%d, %dk\n", adapter->rx_pcb.data.info.major_vers,
+ adapter->rx_pcb.data.info.minor_vers, adapter->rx_pcb.data.info.RAM_sz);
+
+ /*
+ * reconfigure the adapter memory to better suit our purposes
+ */
+ adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY;
+ adapter->tx_pcb.length = 12;
+ adapter->tx_pcb.data.memconf.cmd_q = 8;
+ adapter->tx_pcb.data.memconf.rcv_q = 8;
+ adapter->tx_pcb.data.memconf.mcast = 10;
+ adapter->tx_pcb.data.memconf.frame = 10;
+ adapter->tx_pcb.data.memconf.rcv_b = 10;
+ adapter->tx_pcb.data.memconf.progs = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb) ||
+ !receive_pcb(dev, &adapter->rx_pcb) ||
+ (adapter->rx_pcb.command != CMD_CONFIGURE_ADAPTER_RESPONSE) ||
+ (adapter->rx_pcb.length != 2)) {
+ pr_err("%s: could not configure adapter memory\n", dev->name);
+ }
+ if (adapter->rx_pcb.data.configure) {
+ pr_err("%s: adapter configuration failed\n", dev->name);
+ }
+
+ dev->netdev_ops = &elp_netdev_ops;
+ dev->watchdog_timeo = 10*HZ;
+ dev->ethtool_ops = &netdev_ethtool_ops; /* local */
+
+ dev->mem_start = dev->mem_end = 0;
+
+ err = register_netdev(dev);
+ if (err)
+ goto out;
+
+ return 0;
+out:
+ release_region(dev->base_addr, ELP_IO_EXTENT);
+ return err;
+}
+
+#ifndef MODULE
+struct net_device * __init elplus_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(elp_device));
+ int err;
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = elplus_setup(dev);
+ if (err) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+ return dev;
+}
+
+#else
+static struct net_device *dev_3c505[ELP_MAX_CARDS];
+static int io[ELP_MAX_CARDS];
+static int irq[ELP_MAX_CARDS];
+static int dma[ELP_MAX_CARDS];
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(dma, int, NULL, 0);
+MODULE_PARM_DESC(io, "EtherLink Plus I/O base address(es)");
+MODULE_PARM_DESC(irq, "EtherLink Plus IRQ number(s) (assigned)");
+MODULE_PARM_DESC(dma, "EtherLink Plus DMA channel(s)");
+
+int __init init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < ELP_MAX_CARDS; this_dev++) {
+ struct net_device *dev = alloc_etherdev(sizeof(elp_device));
+ if (!dev)
+ break;
+
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ if (dma[this_dev]) {
+ dev->dma = dma[this_dev];
+ } else {
+ dev->dma = ELP_DMA;
+ pr_warning("3c505.c: warning, using default DMA channel,\n");
+ }
+ if (io[this_dev] == 0) {
+ if (this_dev) {
+ free_netdev(dev);
+ break;
+ }
+ pr_notice("3c505.c: module autoprobe not recommended, give io=xx.\n");
+ }
+ if (elplus_setup(dev) != 0) {
+ pr_warning("3c505.c: Failed to register card at 0x%x.\n", io[this_dev]);
+ free_netdev(dev);
+ break;
+ }
+ dev_3c505[this_dev] = dev;
+ found++;
+ }
+ if (!found)
+ return -ENODEV;
+ return 0;
+}
+
+void __exit cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < ELP_MAX_CARDS; this_dev++) {
+ struct net_device *dev = dev_3c505[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ release_region(dev->base_addr, ELP_IO_EXTENT);
+ free_netdev(dev);
+ }
+ }
+}
+
+#endif /* MODULE */
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/i825xx/3c505.h b/drivers/net/ethernet/i825xx/3c505.h
new file mode 100644
index 000000000000..04df2a9002b6
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/3c505.h
@@ -0,0 +1,292 @@
+/*****************************************************************
+ *
+ * defines for 3Com Etherlink Plus adapter
+ *
+ *****************************************************************/
+
+#define ELP_DMA 6
+#define ELP_RX_PCBS 4
+#define ELP_MAX_CARDS 4
+
+/*
+ * I/O register offsets
+ */
+#define PORT_COMMAND 0x00 /* read/write, 8-bit */
+#define PORT_STATUS 0x02 /* read only, 8-bit */
+#define PORT_AUXDMA 0x02 /* write only, 8-bit */
+#define PORT_DATA 0x04 /* read/write, 16-bit */
+#define PORT_CONTROL 0x06 /* read/write, 8-bit */
+
+#define ELP_IO_EXTENT 0x10 /* size of used IO registers */
+
+/*
+ * host control registers bits
+ */
+#define ATTN 0x80 /* attention */
+#define FLSH 0x40 /* flush data register */
+#define DMAE 0x20 /* DMA enable */
+#define DIR 0x10 /* direction */
+#define TCEN 0x08 /* terminal count interrupt enable */
+#define CMDE 0x04 /* command register interrupt enable */
+#define HSF2 0x02 /* host status flag 2 */
+#define HSF1 0x01 /* host status flag 1 */
+
+/*
+ * combinations of HSF flags used for PCB transmission
+ */
+#define HSF_PCB_ACK HSF1
+#define HSF_PCB_NAK HSF2
+#define HSF_PCB_END (HSF2|HSF1)
+#define HSF_PCB_MASK (HSF2|HSF1)
+
+/*
+ * host status register bits
+ */
+#define HRDY 0x80 /* data register ready */
+#define HCRE 0x40 /* command register empty */
+#define ACRF 0x20 /* adapter command register full */
+/* #define DIR 0x10 direction - same as in control register */
+#define DONE 0x08 /* DMA done */
+#define ASF3 0x04 /* adapter status flag 3 */
+#define ASF2 0x02 /* adapter status flag 2 */
+#define ASF1 0x01 /* adapter status flag 1 */
+
+/*
+ * combinations of ASF flags used for PCB reception
+ */
+#define ASF_PCB_ACK ASF1
+#define ASF_PCB_NAK ASF2
+#define ASF_PCB_END (ASF2|ASF1)
+#define ASF_PCB_MASK (ASF2|ASF1)
+
+/*
+ * host aux DMA register bits
+ */
+#define DMA_BRST 0x01 /* DMA burst */
+
+/*
+ * maximum amount of data allowed in a PCB
+ */
+#define MAX_PCB_DATA 62
+
+/*****************************************************************
+ *
+ * timeout value
+ * this is a rough value used for loops to stop them from
+ * locking up the whole machine in the case of failure or
+ * error conditions
+ *
+ *****************************************************************/
+
+#define TIMEOUT 300
+
+/*****************************************************************
+ *
+ * PCB commands
+ *
+ *****************************************************************/
+
+enum {
+ /*
+ * host PCB commands
+ */
+ CMD_CONFIGURE_ADAPTER_MEMORY = 0x01,
+ CMD_CONFIGURE_82586 = 0x02,
+ CMD_STATION_ADDRESS = 0x03,
+ CMD_DMA_DOWNLOAD = 0x04,
+ CMD_DMA_UPLOAD = 0x05,
+ CMD_PIO_DOWNLOAD = 0x06,
+ CMD_PIO_UPLOAD = 0x07,
+ CMD_RECEIVE_PACKET = 0x08,
+ CMD_TRANSMIT_PACKET = 0x09,
+ CMD_NETWORK_STATISTICS = 0x0a,
+ CMD_LOAD_MULTICAST_LIST = 0x0b,
+ CMD_CLEAR_PROGRAM = 0x0c,
+ CMD_DOWNLOAD_PROGRAM = 0x0d,
+ CMD_EXECUTE_PROGRAM = 0x0e,
+ CMD_SELF_TEST = 0x0f,
+ CMD_SET_STATION_ADDRESS = 0x10,
+ CMD_ADAPTER_INFO = 0x11,
+ NUM_TRANSMIT_CMDS,
+
+ /*
+ * adapter PCB commands
+ */
+ CMD_CONFIGURE_ADAPTER_RESPONSE = 0x31,
+ CMD_CONFIGURE_82586_RESPONSE = 0x32,
+ CMD_ADDRESS_RESPONSE = 0x33,
+ CMD_DOWNLOAD_DATA_REQUEST = 0x34,
+ CMD_UPLOAD_DATA_REQUEST = 0x35,
+ CMD_RECEIVE_PACKET_COMPLETE = 0x38,
+ CMD_TRANSMIT_PACKET_COMPLETE = 0x39,
+ CMD_NETWORK_STATISTICS_RESPONSE = 0x3a,
+ CMD_LOAD_MULTICAST_RESPONSE = 0x3b,
+ CMD_CLEAR_PROGRAM_RESPONSE = 0x3c,
+ CMD_DOWNLOAD_PROGRAM_RESPONSE = 0x3d,
+ CMD_EXECUTE_RESPONSE = 0x3e,
+ CMD_SELF_TEST_RESPONSE = 0x3f,
+ CMD_SET_ADDRESS_RESPONSE = 0x40,
+ CMD_ADAPTER_INFO_RESPONSE = 0x41
+};
+
+/* Definitions for the PCB data structure */
+
+/* Data units */
+typedef unsigned char byte;
+typedef unsigned short int word;
+typedef unsigned long int dword;
+
+/* Data structures */
+struct Memconf {
+ word cmd_q,
+ rcv_q,
+ mcast,
+ frame,
+ rcv_b,
+ progs;
+};
+
+struct Rcv_pkt {
+ word buf_ofs,
+ buf_seg,
+ buf_len,
+ timeout;
+};
+
+struct Xmit_pkt {
+ word buf_ofs,
+ buf_seg,
+ pkt_len;
+};
+
+struct Rcv_resp {
+ word buf_ofs,
+ buf_seg,
+ buf_len,
+ pkt_len,
+ timeout,
+ status;
+ dword timetag;
+};
+
+struct Xmit_resp {
+ word buf_ofs,
+ buf_seg,
+ c_stat,
+ status;
+};
+
+
+struct Netstat {
+ dword tot_recv,
+ tot_xmit;
+ word err_CRC,
+ err_align,
+ err_res,
+ err_ovrrun;
+};
+
+
+struct Selftest {
+ word error;
+ union {
+ word ROM_cksum;
+ struct {
+ word ofs, seg;
+ } RAM;
+ word i82586;
+ } failure;
+};
+
+struct Info {
+ byte minor_vers,
+ major_vers;
+ word ROM_cksum,
+ RAM_sz,
+ free_ofs,
+ free_seg;
+};
+
+struct Memdump {
+ word size,
+ off,
+ seg;
+};
+
+/*
+Primary Command Block. The most important data structure. All communication
+between the host and the adapter is done with these. (Except for the actual
+Ethernet data, which has different packaging.)
+*/
+typedef struct {
+ byte command;
+ byte length;
+ union {
+ struct Memconf memconf;
+ word configure;
+ struct Rcv_pkt rcv_pkt;
+ struct Xmit_pkt xmit_pkt;
+ byte multicast[10][6];
+ byte eth_addr[6];
+ byte failed;
+ struct Rcv_resp rcv_resp;
+ struct Xmit_resp xmit_resp;
+ struct Netstat netstat;
+ struct Selftest selftest;
+ struct Info info;
+ struct Memdump memdump;
+ byte raw[62];
+ } data;
+} pcb_struct;
+
+/* These defines for 'configure' */
+#define RECV_STATION 0x00
+#define RECV_BROAD 0x01
+#define RECV_MULTI 0x02
+#define RECV_PROMISC 0x04
+#define NO_LOOPBACK 0x00
+#define INT_LOOPBACK 0x08
+#define EXT_LOOPBACK 0x10
+
+/*****************************************************************
+ *
+ * structure to hold context information for adapter
+ *
+ *****************************************************************/
+
+#define DMA_BUFFER_SIZE 1600
+#define BACKLOG_SIZE 4
+
+typedef struct {
+ volatile short got[NUM_TRANSMIT_CMDS]; /* flags for
+ command completion */
+ pcb_struct tx_pcb; /* PCB for foreground sending */
+ pcb_struct rx_pcb; /* PCB for foreground receiving */
+ pcb_struct itx_pcb; /* PCB for background sending */
+ pcb_struct irx_pcb; /* PCB for background receiving */
+
+ void *dma_buffer;
+
+ struct {
+ unsigned int length[BACKLOG_SIZE];
+ unsigned int in;
+ unsigned int out;
+ } rx_backlog;
+
+ struct {
+ unsigned int direction;
+ unsigned int length;
+ struct sk_buff *skb;
+ void *target;
+ unsigned long start_time;
+ } current_dma;
+
+ /* flags */
+ unsigned long send_pcb_semaphore;
+ unsigned long dmaing;
+ unsigned long busy;
+
+ unsigned int rx_active; /* number of receive PCBs */
+ volatile unsigned char hcr_val; /* what we think the HCR contains */
+ spinlock_t lock; /* Interrupt v tx lock */
+} elp_device;
diff --git a/drivers/net/ethernet/i825xx/3c507.c b/drivers/net/ethernet/i825xx/3c507.c
new file mode 100644
index 000000000000..1e945551c144
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/3c507.c
@@ -0,0 +1,939 @@
+/* 3c507.c: An EtherLink16 device driver for Linux. */
+/*
+ Written 1993,1994 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+
+ Thanks go to jennings@Montrouge.SMR.slb.com ( Patrick Jennings)
+ and jrs@world.std.com (Rick Sladkey) for testing and bugfixes.
+ Mark Salazar <leslie@access.digex.net> made the changes for cards with
+ only 16K packet buffers.
+
+ Things remaining to do:
+ Verify that the tx and rx buffers don't have fencepost errors.
+ Move the theory of operation and memory map documentation.
+ The statistics need to be updated correctly.
+*/
+
+#define DRV_NAME "3c507"
+#define DRV_VERSION "1.10a"
+#define DRV_RELDATE "11/17/2001"
+
+static const char version[] =
+ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Donald Becker (becker@scyld.com)\n";
+
+/*
+ Sources:
+ This driver wouldn't have been written with the availability of the
+ Crynwr driver source code. It provided a known-working implementation
+ that filled in the gaping holes of the Intel documentation. Three cheers
+ for Russ Nelson.
+
+ Intel Microcommunications Databook, Vol. 1, 1990. It provides just enough
+ info that the casual reader might think that it documents the i82586 :-<.
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+/* use 0 for production, 1 for verification, 2..7 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+#define debug net_debug
+
+
+/*
+ Details of the i82586.
+
+ You'll really need the databook to understand the details of this part,
+ but the outline is that the i82586 has two separate processing units.
+ Both are started from a list of three configuration tables, of which only
+ the last, the System Control Block (SCB), is used after reset-time. The SCB
+ has the following fields:
+ Status word
+ Command word
+ Tx/Command block addr.
+ Rx block addr.
+ The command word accepts the following controls for the Tx and Rx units:
+ */
+
+#define CUC_START 0x0100
+#define CUC_RESUME 0x0200
+#define CUC_SUSPEND 0x0300
+#define RX_START 0x0010
+#define RX_RESUME 0x0020
+#define RX_SUSPEND 0x0030
+
+/* The Rx unit uses a list of frame descriptors and a list of data buffer
+ descriptors. We use full-sized (1518 byte) data buffers, so there is
+ a one-to-one pairing of frame descriptors to buffer descriptors.
+
+ The Tx ("command") unit executes a list of commands that look like:
+ Status word Written by the 82586 when the command is done.
+ Command word Command in lower 3 bits, post-command action in upper 3
+ Link word The address of the next command.
+ Parameters (as needed).
+
+ Some definitions related to the Command Word are:
+ */
+#define CMD_EOL 0x8000 /* The last command of the list, stop. */
+#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
+#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
+
+enum commands {
+ CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
+ CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7};
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ int last_restart;
+ ushort rx_head;
+ ushort rx_tail;
+ ushort tx_head;
+ ushort tx_cmd_link;
+ ushort tx_reap;
+ ushort tx_pkts_in_ring;
+ spinlock_t lock;
+ void __iomem *base;
+};
+
+/*
+ Details of the EtherLink16 Implementation
+ The 3c507 is a generic shared-memory i82586 implementation.
+ The host can map 16K, 32K, 48K, or 64K of the 64K memory into
+ 0x0[CD][08]0000, or all 64K into 0xF[02468]0000.
+ */
+
+/* Offsets from the base I/O address. */
+#define SA_DATA 0 /* Station address data, or 3Com signature. */
+#define MISC_CTRL 6 /* Switch the SA_DATA banks, and bus config bits. */
+#define RESET_IRQ 10 /* Reset the latched IRQ line. */
+#define SIGNAL_CA 11 /* Frob the 82586 Channel Attention line. */
+#define ROM_CONFIG 13
+#define MEM_CONFIG 14
+#define IRQ_CONFIG 15
+#define EL16_IO_EXTENT 16
+
+/* The ID port is used at boot-time to locate the ethercard. */
+#define ID_PORT 0x100
+
+/* Offsets to registers in the mailbox (SCB). */
+#define iSCB_STATUS 0x8
+#define iSCB_CMD 0xA
+#define iSCB_CBL 0xC /* Command BLock offset. */
+#define iSCB_RFA 0xE /* Rx Frame Area offset. */
+
+/* Since the 3c507 maps the shared memory window so that the last byte is
+ at 82586 address FFFF, the first byte is at 82586 address 0, 16K, 32K, or
+ 48K corresponding to window sizes of 64K, 48K, 32K and 16K respectively.
+ We can account for this be setting the 'SBC Base' entry in the ISCP table
+ below for all the 16 bit offset addresses, and also adding the 'SCB Base'
+ value to all 24 bit physical addresses (in the SCP table and the TX and RX
+ Buffer Descriptors).
+ -Mark
+ */
+#define SCB_BASE ((unsigned)64*1024 - (dev->mem_end - dev->mem_start))
+
+/*
+ What follows in 'init_words[]' is the "program" that is downloaded to the
+ 82586 memory. It's mostly tables and command blocks, and starts at the
+ reset address 0xfffff6. This is designed to be similar to the EtherExpress,
+ thus the unusual location of the SCB at 0x0008.
+
+ Even with the additional "don't care" values, doing it this way takes less
+ program space than initializing the individual tables, and I feel it's much
+ cleaner.
+
+ The databook is particularly useless for the first two structures, I had
+ to use the Crynwr driver as an example.
+
+ The memory setup is as follows:
+ */
+
+#define CONFIG_CMD 0x0018
+#define SET_SA_CMD 0x0024
+#define SA_OFFSET 0x002A
+#define IDLELOOP 0x30
+#define TDR_CMD 0x38
+#define TDR_TIME 0x3C
+#define DUMP_CMD 0x40
+#define DIAG_CMD 0x48
+#define SET_MC_CMD 0x4E
+#define DUMP_DATA 0x56 /* A 170 byte buffer for dump and Set-MC into. */
+
+#define TX_BUF_START 0x0100
+#define NUM_TX_BUFS 5
+#define TX_BUF_SIZE (1518+14+20+16) /* packet+header+TBD */
+
+#define RX_BUF_START 0x2000
+#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */
+#define RX_BUF_END (dev->mem_end - dev->mem_start)
+
+#define TX_TIMEOUT (HZ/20)
+
+/*
+ That's it: only 86 bytes to set up the beast, including every extra
+ command available. The 170 byte buffer at DUMP_DATA is shared between the
+ Dump command (called only by the diagnostic program) and the SetMulticastList
+ command.
+
+ To complete the memory setup you only have to write the station address at
+ SA_OFFSET and create the Tx & Rx buffer lists.
+
+ The Tx command chain and buffer list is setup as follows:
+ A Tx command table, with the data buffer pointing to...
+ A Tx data buffer descriptor. The packet is in a single buffer, rather than
+ chaining together several smaller buffers.
+ A NoOp command, which initially points to itself,
+ And the packet data.
+
+ A transmit is done by filling in the Tx command table and data buffer,
+ re-writing the NoOp command, and finally changing the offset of the last
+ command to point to the current Tx command. When the Tx command is finished,
+ it jumps to the NoOp, when it loops until the next Tx command changes the
+ "link offset" in the NoOp. This way the 82586 never has to go through the
+ slow restart sequence.
+
+ The Rx buffer list is set up in the obvious ring structure. We have enough
+ memory (and low enough interrupt latency) that we can avoid the complicated
+ Rx buffer linked lists by alway associating a full-size Rx data buffer with
+ each Rx data frame.
+
+ I current use four transmit buffers starting at TX_BUF_START (0x0100), and
+ use the rest of memory, from RX_BUF_START to RX_BUF_END, for Rx buffers.
+
+ */
+
+static unsigned short init_words[] = {
+ /* System Configuration Pointer (SCP). */
+ 0x0000, /* Set bus size to 16 bits. */
+ 0,0, /* pad words. */
+ 0x0000,0x0000, /* ISCP phys addr, set in init_82586_mem(). */
+
+ /* Intermediate System Configuration Pointer (ISCP). */
+ 0x0001, /* Status word that's cleared when init is done. */
+ 0x0008,0,0, /* SCB offset, (skip, skip) */
+
+ /* System Control Block (SCB). */
+ 0,0xf000|RX_START|CUC_START, /* SCB status and cmd. */
+ CONFIG_CMD, /* Command list pointer, points to Configure. */
+ RX_BUF_START, /* Rx block list. */
+ 0,0,0,0, /* Error count: CRC, align, buffer, overrun. */
+
+ /* 0x0018: Configure command. Change to put MAC data with packet. */
+ 0, CmdConfigure, /* Status, command. */
+ SET_SA_CMD, /* Next command is Set Station Addr. */
+ 0x0804, /* "4" bytes of config data, 8 byte FIFO. */
+ 0x2e40, /* Magic values, including MAC data location. */
+ 0, /* Unused pad word. */
+
+ /* 0x0024: Setup station address command. */
+ 0, CmdSASetup,
+ SET_MC_CMD, /* Next command. */
+ 0xaa00,0xb000,0x0bad, /* Station address (to be filled in) */
+
+ /* 0x0030: NOP, looping back to itself. Point to first Tx buffer to Tx. */
+ 0, CmdNOp, IDLELOOP, 0 /* pad */,
+
+ /* 0x0038: A unused Time-Domain Reflectometer command. */
+ 0, CmdTDR, IDLELOOP, 0,
+
+ /* 0x0040: An unused Dump State command. */
+ 0, CmdDump, IDLELOOP, DUMP_DATA,
+
+ /* 0x0048: An unused Diagnose command. */
+ 0, CmdDiagnose, IDLELOOP,
+
+ /* 0x004E: An empty set-multicast-list command. */
+ 0, CmdMulticastList, IDLELOOP, 0,
+};
+
+/* Index to functions, as function prototypes. */
+
+static int el16_probe1(struct net_device *dev, int ioaddr);
+static int el16_open(struct net_device *dev);
+static netdev_tx_t el16_send_packet(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t el16_interrupt(int irq, void *dev_id);
+static void el16_rx(struct net_device *dev);
+static int el16_close(struct net_device *dev);
+static void el16_tx_timeout (struct net_device *dev);
+
+static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad);
+static void init_82586_mem(struct net_device *dev);
+static const struct ethtool_ops netdev_ethtool_ops;
+static void init_rx_bufs(struct net_device *);
+
+static int io = 0x300;
+static int irq;
+static int mem_start;
+
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, (detachable devices only) allocate space for the
+ device and return success.
+ */
+
+struct net_device * __init el16_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
+ static const unsigned ports[] = { 0x300, 0x320, 0x340, 0x280, 0};
+ const unsigned *port;
+ int err = -ENODEV;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ io = dev->base_addr;
+ irq = dev->irq;
+ mem_start = dev->mem_start & 15;
+ }
+
+ if (io > 0x1ff) /* Check a single specified location. */
+ err = el16_probe1(dev, io);
+ else if (io != 0)
+ err = -ENXIO; /* Don't probe at all. */
+ else {
+ for (port = ports; *port; port++) {
+ err = el16_probe1(dev, *port);
+ if (!err)
+ break;
+ }
+ }
+
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ free_irq(dev->irq, dev);
+ iounmap(((struct net_local *)netdev_priv(dev))->base);
+ release_region(dev->base_addr, EL16_IO_EXTENT);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = el16_open,
+ .ndo_stop = el16_close,
+ .ndo_start_xmit = el16_send_packet,
+ .ndo_tx_timeout = el16_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __init el16_probe1(struct net_device *dev, int ioaddr)
+{
+ static unsigned char init_ID_done;
+ int i, irq, irqval, retval;
+ struct net_local *lp;
+
+ if (init_ID_done == 0) {
+ ushort lrs_state = 0xff;
+ /* Send the ID sequence to the ID_PORT to enable the board(s). */
+ outb(0x00, ID_PORT);
+ for(i = 0; i < 255; i++) {
+ outb(lrs_state, ID_PORT);
+ lrs_state <<= 1;
+ if (lrs_state & 0x100)
+ lrs_state ^= 0xe7;
+ }
+ outb(0x00, ID_PORT);
+ init_ID_done = 1;
+ }
+
+ if (!request_region(ioaddr, EL16_IO_EXTENT, DRV_NAME))
+ return -ENODEV;
+
+ if ((inb(ioaddr) != '*') || (inb(ioaddr + 1) != '3') ||
+ (inb(ioaddr + 2) != 'C') || (inb(ioaddr + 3) != 'O')) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ pr_info("%s: 3c507 at %#x,", dev->name, ioaddr);
+
+ /* We should make a few more checks here, like the first three octets of
+ the S.A. for the manufacturer's code. */
+
+ irq = inb(ioaddr + IRQ_CONFIG) & 0x0f;
+
+ irqval = request_irq(irq, el16_interrupt, 0, DRV_NAME, dev);
+ if (irqval) {
+ pr_cont("\n");
+ pr_err("3c507: unable to get IRQ %d (irqval=%d).\n", irq, irqval);
+ retval = -EAGAIN;
+ goto out;
+ }
+
+ /* We've committed to using the board, and can start filling in *dev. */
+ dev->base_addr = ioaddr;
+
+ outb(0x01, ioaddr + MISC_CTRL);
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + i);
+ pr_cont(" %pM", dev->dev_addr);
+
+ if (mem_start)
+ net_debug = mem_start & 7;
+
+#ifdef MEM_BASE
+ dev->mem_start = MEM_BASE;
+ dev->mem_end = dev->mem_start + 0x10000;
+#else
+ {
+ int base;
+ int size;
+ char mem_config = inb(ioaddr + MEM_CONFIG);
+ if (mem_config & 0x20) {
+ size = 64*1024;
+ base = 0xf00000 + (mem_config & 0x08 ? 0x080000
+ : ((mem_config & 3) << 17));
+ } else {
+ size = ((mem_config & 3) + 1) << 14;
+ base = 0x0c0000 + ( (mem_config & 0x18) << 12);
+ }
+ dev->mem_start = base;
+ dev->mem_end = base + size;
+ }
+#endif
+
+ dev->if_port = (inb(ioaddr + ROM_CONFIG) & 0x80) ? 1 : 0;
+ dev->irq = inb(ioaddr + IRQ_CONFIG) & 0x0f;
+
+ pr_cont(", IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->irq,
+ dev->if_port ? "ex" : "in", dev->mem_start, dev->mem_end-1);
+
+ if (net_debug)
+ pr_debug("%s", version);
+
+ lp = netdev_priv(dev);
+ spin_lock_init(&lp->lock);
+ lp->base = ioremap(dev->mem_start, RX_BUF_END);
+ if (!lp->base) {
+ pr_err("3c507: unable to remap memory\n");
+ retval = -EAGAIN;
+ goto out1;
+ }
+
+ dev->netdev_ops = &netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+ dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */
+ return 0;
+out1:
+ free_irq(dev->irq, dev);
+out:
+ release_region(ioaddr, EL16_IO_EXTENT);
+ return retval;
+}
+
+static int el16_open(struct net_device *dev)
+{
+ /* Initialize the 82586 memory and start it. */
+ init_82586_mem(dev);
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+
+static void el16_tx_timeout (struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ void __iomem *shmem = lp->base;
+
+ if (net_debug > 1)
+ pr_debug("%s: transmit timed out, %s? ", dev->name,
+ readw(shmem + iSCB_STATUS) & 0x8000 ? "IRQ conflict" :
+ "network cable problem");
+ /* Try to restart the adaptor. */
+ if (lp->last_restart == dev->stats.tx_packets) {
+ if (net_debug > 1)
+ pr_cont("Resetting board.\n");
+ /* Completely reset the adaptor. */
+ init_82586_mem (dev);
+ lp->tx_pkts_in_ring = 0;
+ } else {
+ /* Issue the channel attention signal and hope it "gets better". */
+ if (net_debug > 1)
+ pr_cont("Kicking board.\n");
+ writew(0xf000 | CUC_START | RX_START, shmem + iSCB_CMD);
+ outb (0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
+ lp->last_restart = dev->stats.tx_packets;
+ }
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue (dev);
+}
+
+
+static netdev_tx_t el16_send_packet (struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+
+ netif_stop_queue (dev);
+
+ spin_lock_irqsave (&lp->lock, flags);
+
+ dev->stats.tx_bytes += length;
+ /* Disable the 82586's input to the interrupt line. */
+ outb (0x80, ioaddr + MISC_CTRL);
+
+ hardware_send_packet (dev, buf, skb->len, length - skb->len);
+
+ /* Enable the 82586 interrupt input. */
+ outb (0x84, ioaddr + MISC_CTRL);
+
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ dev_kfree_skb (skb);
+
+ /* You might need to clean up and record Tx statistics here. */
+
+ return NETDEV_TX_OK;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static irqreturn_t el16_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *lp;
+ int ioaddr, status, boguscount = 0;
+ ushort ack_cmd = 0;
+ void __iomem *shmem;
+
+ if (dev == NULL) {
+ pr_err("net_interrupt(): irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+ shmem = lp->base;
+
+ spin_lock(&lp->lock);
+
+ status = readw(shmem+iSCB_STATUS);
+
+ if (net_debug > 4) {
+ pr_debug("%s: 3c507 interrupt, status %4.4x.\n", dev->name, status);
+ }
+
+ /* Disable the 82586's input to the interrupt line. */
+ outb(0x80, ioaddr + MISC_CTRL);
+
+ /* Reap the Tx packet buffers. */
+ while (lp->tx_pkts_in_ring) {
+ unsigned short tx_status = readw(shmem+lp->tx_reap);
+ if (!(tx_status & 0x8000)) {
+ if (net_debug > 5)
+ pr_debug("Tx command incomplete (%#x).\n", lp->tx_reap);
+ break;
+ }
+ /* Tx unsuccessful or some interesting status bit set. */
+ if (!(tx_status & 0x2000) || (tx_status & 0x0f3f)) {
+ dev->stats.tx_errors++;
+ if (tx_status & 0x0600) dev->stats.tx_carrier_errors++;
+ if (tx_status & 0x0100) dev->stats.tx_fifo_errors++;
+ if (!(tx_status & 0x0040)) dev->stats.tx_heartbeat_errors++;
+ if (tx_status & 0x0020) dev->stats.tx_aborted_errors++;
+ dev->stats.collisions += tx_status & 0xf;
+ }
+ dev->stats.tx_packets++;
+ if (net_debug > 5)
+ pr_debug("Reaped %x, Tx status %04x.\n" , lp->tx_reap, tx_status);
+ lp->tx_reap += TX_BUF_SIZE;
+ if (lp->tx_reap > RX_BUF_START - TX_BUF_SIZE)
+ lp->tx_reap = TX_BUF_START;
+
+ lp->tx_pkts_in_ring--;
+ /* There is always more space in the Tx ring buffer now. */
+ netif_wake_queue(dev);
+
+ if (++boguscount > 10)
+ break;
+ }
+
+ if (status & 0x4000) { /* Packet received. */
+ if (net_debug > 5)
+ pr_debug("Received packet, rx_head %04x.\n", lp->rx_head);
+ el16_rx(dev);
+ }
+
+ /* Acknowledge the interrupt sources. */
+ ack_cmd = status & 0xf000;
+
+ if ((status & 0x0700) != 0x0200 && netif_running(dev)) {
+ if (net_debug)
+ pr_debug("%s: Command unit stopped, status %04x, restarting.\n",
+ dev->name, status);
+ /* If this ever occurs we should really re-write the idle loop, reset
+ the Tx list, and do a complete restart of the command unit.
+ For now we rely on the Tx timeout if the resume doesn't work. */
+ ack_cmd |= CUC_RESUME;
+ }
+
+ if ((status & 0x0070) != 0x0040 && netif_running(dev)) {
+ /* The Rx unit is not ready, it must be hung. Restart the receiver by
+ initializing the rx buffers, and issuing an Rx start command. */
+ if (net_debug)
+ pr_debug("%s: Rx unit stopped, status %04x, restarting.\n",
+ dev->name, status);
+ init_rx_bufs(dev);
+ writew(RX_BUF_START,shmem+iSCB_RFA);
+ ack_cmd |= RX_START;
+ }
+
+ writew(ack_cmd,shmem+iSCB_CMD);
+ outb(0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
+
+ /* Clear the latched interrupt. */
+ outb(0, ioaddr + RESET_IRQ);
+
+ /* Enable the 82586's interrupt input. */
+ outb(0x84, ioaddr + MISC_CTRL);
+ spin_unlock(&lp->lock);
+ return IRQ_HANDLED;
+}
+
+static int el16_close(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ void __iomem *shmem = lp->base;
+
+ netif_stop_queue(dev);
+
+ /* Flush the Tx and disable Rx. */
+ writew(RX_SUSPEND | CUC_SUSPEND,shmem+iSCB_CMD);
+ outb(0, ioaddr + SIGNAL_CA);
+
+ /* Disable the 82586's input to the interrupt line. */
+ outb(0x80, ioaddr + MISC_CTRL);
+
+ /* We always physically use the IRQ line, so we don't do free_irq(). */
+
+ /* Update the statistics here. */
+
+ return 0;
+}
+
+/* Initialize the Rx-block list. */
+static void init_rx_bufs(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ void __iomem *write_ptr;
+ unsigned short SCB_base = SCB_BASE;
+
+ int cur_rxbuf = lp->rx_head = RX_BUF_START;
+
+ /* Initialize each Rx frame + data buffer. */
+ do { /* While there is room for one more. */
+
+ write_ptr = lp->base + cur_rxbuf;
+
+ writew(0x0000,write_ptr); /* Status */
+ writew(0x0000,write_ptr+=2); /* Command */
+ writew(cur_rxbuf + RX_BUF_SIZE,write_ptr+=2); /* Link */
+ writew(cur_rxbuf + 22,write_ptr+=2); /* Buffer offset */
+ writew(0x0000,write_ptr+=2); /* Pad for dest addr. */
+ writew(0x0000,write_ptr+=2);
+ writew(0x0000,write_ptr+=2);
+ writew(0x0000,write_ptr+=2); /* Pad for source addr. */
+ writew(0x0000,write_ptr+=2);
+ writew(0x0000,write_ptr+=2);
+ writew(0x0000,write_ptr+=2); /* Pad for protocol. */
+
+ writew(0x0000,write_ptr+=2); /* Buffer: Actual count */
+ writew(-1,write_ptr+=2); /* Buffer: Next (none). */
+ writew(cur_rxbuf + 0x20 + SCB_base,write_ptr+=2);/* Buffer: Address low */
+ writew(0x0000,write_ptr+=2);
+ /* Finally, the number of bytes in the buffer. */
+ writew(0x8000 + RX_BUF_SIZE-0x20,write_ptr+=2);
+
+ lp->rx_tail = cur_rxbuf;
+ cur_rxbuf += RX_BUF_SIZE;
+ } while (cur_rxbuf <= RX_BUF_END - RX_BUF_SIZE);
+
+ /* Terminate the list by setting the EOL bit, and wrap the pointer to make
+ the list a ring. */
+ write_ptr = lp->base + lp->rx_tail + 2;
+ writew(0xC000,write_ptr); /* Command, mark as last. */
+ writew(lp->rx_head,write_ptr+2); /* Link */
+}
+
+static void init_82586_mem(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ short ioaddr = dev->base_addr;
+ void __iomem *shmem = lp->base;
+
+ /* Enable loopback to protect the wire while starting up,
+ and hold the 586 in reset during the memory initialization. */
+ outb(0x20, ioaddr + MISC_CTRL);
+
+ /* Fix the ISCP address and base. */
+ init_words[3] = SCB_BASE;
+ init_words[7] = SCB_BASE;
+
+ /* Write the words at 0xfff6 (address-aliased to 0xfffff6). */
+ memcpy_toio(lp->base + RX_BUF_END - 10, init_words, 10);
+
+ /* Write the words at 0x0000. */
+ memcpy_toio(lp->base, init_words + 5, sizeof(init_words) - 10);
+
+ /* Fill in the station address. */
+ memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr, ETH_ALEN);
+
+ /* The Tx-block list is written as needed. We just set up the values. */
+ lp->tx_cmd_link = IDLELOOP + 4;
+ lp->tx_head = lp->tx_reap = TX_BUF_START;
+
+ init_rx_bufs(dev);
+
+ /* Start the 586 by releasing the reset line, but leave loopback. */
+ outb(0xA0, ioaddr + MISC_CTRL);
+
+ /* This was time consuming to track down: you need to give two channel
+ attention signals to reliably start up the i82586. */
+ outb(0, ioaddr + SIGNAL_CA);
+
+ {
+ int boguscnt = 50;
+ while (readw(shmem+iSCB_STATUS) == 0)
+ if (--boguscnt == 0) {
+ pr_warning("%s: i82586 initialization timed out with status %04x, cmd %04x.\n",
+ dev->name, readw(shmem+iSCB_STATUS), readw(shmem+iSCB_CMD));
+ break;
+ }
+ /* Issue channel-attn -- the 82586 won't start. */
+ outb(0, ioaddr + SIGNAL_CA);
+ }
+
+ /* Disable loopback and enable interrupts. */
+ outb(0x84, ioaddr + MISC_CTRL);
+ if (net_debug > 4)
+ pr_debug("%s: Initialized 82586, status %04x.\n", dev->name,
+ readw(shmem+iSCB_STATUS));
+}
+
+static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad)
+{
+ struct net_local *lp = netdev_priv(dev);
+ short ioaddr = dev->base_addr;
+ ushort tx_block = lp->tx_head;
+ void __iomem *write_ptr = lp->base + tx_block;
+ static char padding[ETH_ZLEN];
+
+ /* Set the write pointer to the Tx block, and put out the header. */
+ writew(0x0000,write_ptr); /* Tx status */
+ writew(CMD_INTR|CmdTx,write_ptr+=2); /* Tx command */
+ writew(tx_block+16,write_ptr+=2); /* Next command is a NoOp. */
+ writew(tx_block+8,write_ptr+=2); /* Data Buffer offset. */
+
+ /* Output the data buffer descriptor. */
+ writew((pad + length) | 0x8000,write_ptr+=2); /* Byte count parameter. */
+ writew(-1,write_ptr+=2); /* No next data buffer. */
+ writew(tx_block+22+SCB_BASE,write_ptr+=2); /* Buffer follows the NoOp command. */
+ writew(0x0000,write_ptr+=2); /* Buffer address high bits (always zero). */
+
+ /* Output the Loop-back NoOp command. */
+ writew(0x0000,write_ptr+=2); /* Tx status */
+ writew(CmdNOp,write_ptr+=2); /* Tx command */
+ writew(tx_block+16,write_ptr+=2); /* Next is myself. */
+
+ /* Output the packet at the write pointer. */
+ memcpy_toio(write_ptr+2, buf, length);
+ if (pad)
+ memcpy_toio(write_ptr+length+2, padding, pad);
+
+ /* Set the old command link pointing to this send packet. */
+ writew(tx_block,lp->base + lp->tx_cmd_link);
+ lp->tx_cmd_link = tx_block + 20;
+
+ /* Set the next free tx region. */
+ lp->tx_head = tx_block + TX_BUF_SIZE;
+ if (lp->tx_head > RX_BUF_START - TX_BUF_SIZE)
+ lp->tx_head = TX_BUF_START;
+
+ if (net_debug > 4) {
+ pr_debug("%s: 3c507 @%x send length = %d, tx_block %3x, next %3x.\n",
+ dev->name, ioaddr, length, tx_block, lp->tx_head);
+ }
+
+ /* Grimly block further packets if there has been insufficient reaping. */
+ if (++lp->tx_pkts_in_ring < NUM_TX_BUFS)
+ netif_wake_queue(dev);
+}
+
+static void el16_rx(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ void __iomem *shmem = lp->base;
+ ushort rx_head = lp->rx_head;
+ ushort rx_tail = lp->rx_tail;
+ ushort boguscount = 10;
+ short frame_status;
+
+ while ((frame_status = readw(shmem+rx_head)) < 0) { /* Command complete */
+ void __iomem *read_frame = lp->base + rx_head;
+ ushort rfd_cmd = readw(read_frame+2);
+ ushort next_rx_frame = readw(read_frame+4);
+ ushort data_buffer_addr = readw(read_frame+6);
+ void __iomem *data_frame = lp->base + data_buffer_addr;
+ ushort pkt_len = readw(data_frame);
+
+ if (rfd_cmd != 0 || data_buffer_addr != rx_head + 22 ||
+ (pkt_len & 0xC000) != 0xC000) {
+ pr_err("%s: Rx frame at %#x corrupted, "
+ "status %04x cmd %04x next %04x "
+ "data-buf @%04x %04x.\n",
+ dev->name, rx_head, frame_status, rfd_cmd,
+ next_rx_frame, data_buffer_addr, pkt_len);
+ } else if ((frame_status & 0x2000) == 0) {
+ /* Frame Rxed, but with error. */
+ dev->stats.rx_errors++;
+ if (frame_status & 0x0800) dev->stats.rx_crc_errors++;
+ if (frame_status & 0x0400) dev->stats.rx_frame_errors++;
+ if (frame_status & 0x0200) dev->stats.rx_fifo_errors++;
+ if (frame_status & 0x0100) dev->stats.rx_over_errors++;
+ if (frame_status & 0x0080) dev->stats.rx_length_errors++;
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ pkt_len &= 0x3fff;
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) {
+ pr_err("%s: Memory squeeze, dropping packet.\n",
+ dev->name);
+ dev->stats.rx_dropped++;
+ break;
+ }
+
+ skb_reserve(skb,2);
+
+ /* 'skb->data' points to the start of sk_buff data area. */
+ memcpy_fromio(skb_put(skb,pkt_len), data_frame + 10, pkt_len);
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+
+ /* Clear the status word and set End-of-List on the rx frame. */
+ writew(0,read_frame);
+ writew(0xC000,read_frame+2);
+ /* Clear the end-of-list on the prev. RFD. */
+ writew(0x0000,lp->base + rx_tail + 2);
+
+ rx_tail = rx_head;
+ rx_head = next_rx_frame;
+ if (--boguscount == 0)
+ break;
+ }
+
+ lp->rx_head = rx_head;
+ lp->rx_tail = rx_tail;
+}
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
+}
+
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 level)
+{
+ debug = level;
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+};
+
+#ifdef MODULE
+static struct net_device *dev_3c507;
+module_param(io, int, 0);
+module_param(irq, int, 0);
+MODULE_PARM_DESC(io, "EtherLink16 I/O base address");
+MODULE_PARM_DESC(irq, "(ignored)");
+
+int __init init_module(void)
+{
+ if (io == 0)
+ pr_notice("3c507: You should not use auto-probing with insmod!\n");
+ dev_3c507 = el16_probe(-1);
+ return IS_ERR(dev_3c507) ? PTR_ERR(dev_3c507) : 0;
+}
+
+void __exit
+cleanup_module(void)
+{
+ struct net_device *dev = dev_3c507;
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+ iounmap(((struct net_local *)netdev_priv(dev))->base);
+ release_region(dev->base_addr, EL16_IO_EXTENT);
+ free_netdev(dev);
+}
+#endif /* MODULE */
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/i825xx/3c523.c b/drivers/net/ethernet/i825xx/3c523.c
new file mode 100644
index 000000000000..d70d3df4c985
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/3c523.c
@@ -0,0 +1,1312 @@
+/*
+ net-3-driver for the 3c523 Etherlink/MC card (i82586 Ethernet chip)
+
+
+ This is an extension to the Linux operating system, and is covered by the
+ same GNU General Public License that covers that work.
+
+ Copyright 1995, 1996 by Chris Beauregard (cpbeaure@undergrad.math.uwaterloo.ca)
+
+ This is basically Michael Hipp's ni52 driver, with a new probing
+ algorithm and some minor changes to the 82586 CA and reset routines.
+ Thanks a lot Michael for a really clean i82586 implementation! Unless
+ otherwise documented in ni52.c, any bugs are mine.
+
+ Contrary to the Ethernet-HOWTO, this isn't based on the 3c507 driver in
+ any way. The ni52 is a lot easier to modify.
+
+ sources:
+ ni52.c
+
+ Crynwr packet driver collection was a great reference for my first
+ attempt at this sucker. The 3c507 driver also helped, until I noticed
+ that ni52.c was a lot nicer.
+
+ EtherLink/MC: Micro Channel Ethernet Adapter Technical Reference
+ Manual, courtesy of 3Com CardFacts, documents the 3c523-specific
+ stuff. Information on CardFacts is found in the Ethernet HOWTO.
+ Also see <a href="http://www.3com.com/">
+
+ Microprocessor Communications Support Chips, T.J. Byers, ISBN
+ 0-444-01224-9, has a section on the i82586. It tells you just enough
+ to know that you really don't want to learn how to program the chip.
+
+ The original device probe code was stolen from ps2esdi.c
+
+ Known Problems:
+ Since most of the code was stolen from ni52.c, you'll run across the
+ same bugs in the 0.62 version of ni52.c, plus maybe a few because of
+ the 3c523 idiosynchacies. The 3c523 has 16K of RAM though, so there
+ shouldn't be the overrun problem that the 8K ni52 has.
+
+ This driver is for a 16K adapter. It should work fine on the 64K
+ adapters, but it will only use one of the 4 banks of RAM. Modifying
+ this for the 64K version would require a lot of heinous bank
+ switching, which I'm sure not interested in doing. If you try to
+ implement a bank switching version, you'll basically have to remember
+ what bank is enabled and do a switch every time you access a memory
+ location that's not current. You'll also have to remap pointers on
+ the driver side, because it only knows about 16K of the memory.
+ Anyone desperate or masochistic enough to try?
+
+ It seems to be stable now when multiple transmit buffers are used. I
+ can't see any performance difference, but then I'm working on a 386SX.
+
+ Multicast doesn't work. It doesn't even pretend to work. Don't use
+ it. Don't compile your kernel with multicast support. I don't know
+ why.
+
+ Features:
+ This driver is useable as a loadable module. If you try to specify an
+ IRQ or a IO address (via insmod 3c523.o irq=xx io=0xyyy), it will
+ search the MCA slots until it finds a 3c523 with the specified
+ parameters.
+
+ This driver does support multiple ethernet cards when used as a module
+ (up to MAX_3C523_CARDS, the default being 4)
+
+ This has been tested with both BNC and TP versions, internal and
+ external transceivers. Haven't tested with the 64K version (that I
+ know of).
+
+ History:
+ Jan 1st, 1996
+ first public release
+ Feb 4th, 1996
+ update to 1.3.59, incorporated multicast diffs from ni52.c
+ Feb 15th, 1996
+ added shared irq support
+ Apr 1999
+ added support for multiple cards when used as a module
+ added option to disable multicast as is causes problems
+ Ganesh Sittampalam <ganesh.sittampalam@magdalen.oxford.ac.uk>
+ Stuart Adamson <stuart.adamson@compsoc.net>
+ Nov 2001
+ added support for ethtool (jgarzik)
+
+ $Header: /fsys2/home/chrisb/linux-1.3.59-MCA/drivers/net/RCS/3c523.c,v 1.1 1996/02/05 01:53:46 chrisb Exp chrisb $
+ */
+
+#define DRV_NAME "3c523"
+#define DRV_VERSION "17-Nov-2001"
+
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/mca-legacy.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
+
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+
+#include "3c523.h"
+
+/*************************************************************************/
+#define DEBUG /* debug on */
+#define SYSBUSVAL 0 /* 1 = 8 Bit, 0 = 16 bit - 3c523 only does 16 bit */
+#undef ELMC_MULTICAST /* Disable multicast support as it is somewhat seriously broken at the moment */
+
+#define make32(ptr16) (p->memtop + (short) (ptr16) )
+#define make24(ptr32) ((char *) (ptr32) - p->base)
+#define make16(ptr32) ((unsigned short) ((unsigned long) (ptr32) - (unsigned long) p->memtop ))
+
+/*************************************************************************/
+/*
+ Tables to which we can map values in the configuration registers.
+ */
+static int irq_table[] __initdata = {
+ 12, 7, 3, 9
+};
+
+static int csr_table[] __initdata = {
+ 0x300, 0x1300, 0x2300, 0x3300
+};
+
+static int shm_table[] __initdata = {
+ 0x0c0000, 0x0c8000, 0x0d0000, 0x0d8000
+};
+
+/******************* how to calculate the buffers *****************************
+
+
+ * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works
+ * --------------- in a different (more stable?) mode. Only in this mode it's
+ * possible to configure the driver with 'NO_NOPCOMMANDS'
+
+sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8;
+sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT
+sizeof(rfd) = 24; sizeof(rbd) = 12;
+sizeof(tbd) = 8; sizeof(transmit_cmd) = 16;
+sizeof(nop_cmd) = 8;
+
+ * if you don't know the driver, better do not change this values: */
+
+#define RECV_BUFF_SIZE 1524 /* slightly oversized */
+#define XMIT_BUFF_SIZE 1524 /* slightly oversized */
+#define NUM_XMIT_BUFFS 1 /* config for both, 8K and 16K shmem */
+#define NUM_RECV_BUFFS_8 4 /* config for 8K shared mem */
+#define NUM_RECV_BUFFS_16 9 /* config for 16K shared mem */
+
+#if (NUM_XMIT_BUFFS == 1)
+#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */
+#endif
+
+/**************************************************************************/
+
+#define DELAY(x) { mdelay(32 * x); }
+
+/* a much shorter delay: */
+#define DELAY_16(); { udelay(16) ; }
+
+/* wait for command with timeout: */
+#define WAIT_4_SCB_CMD() { int i; \
+ for(i=0;i<1024;i++) { \
+ if(!p->scb->cmd) break; \
+ DELAY_16(); \
+ if(i == 1023) { \
+ pr_warning("%s:%d: scb_cmd timed out .. resetting i82586\n",\
+ dev->name,__LINE__); \
+ elmc_id_reset586(); } } }
+
+static irqreturn_t elmc_interrupt(int irq, void *dev_id);
+static int elmc_open(struct net_device *dev);
+static int elmc_close(struct net_device *dev);
+static netdev_tx_t elmc_send_packet(struct sk_buff *, struct net_device *);
+static struct net_device_stats *elmc_get_stats(struct net_device *dev);
+static void elmc_timeout(struct net_device *dev);
+#ifdef ELMC_MULTICAST
+static void set_multicast_list(struct net_device *dev);
+#endif
+static const struct ethtool_ops netdev_ethtool_ops;
+
+/* helper-functions */
+static int init586(struct net_device *dev);
+static int check586(struct net_device *dev, unsigned long where, unsigned size);
+static void alloc586(struct net_device *dev);
+static void startrecv586(struct net_device *dev);
+static void *alloc_rfa(struct net_device *dev, void *ptr);
+static void elmc_rcv_int(struct net_device *dev);
+static void elmc_xmt_int(struct net_device *dev);
+static void elmc_rnr_int(struct net_device *dev);
+
+struct priv {
+ unsigned long base;
+ char *memtop;
+ unsigned long mapped_start; /* Start of ioremap */
+ volatile struct rfd_struct *rfd_last, *rfd_top, *rfd_first;
+ volatile struct scp_struct *scp; /* volatile is important */
+ volatile struct iscp_struct *iscp; /* volatile is important */
+ volatile struct scb_struct *scb; /* volatile is important */
+ volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS];
+#if (NUM_XMIT_BUFFS == 1)
+ volatile struct transmit_cmd_struct *xmit_cmds[2];
+ volatile struct nop_cmd_struct *nop_cmds[2];
+#else
+ volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS];
+ volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS];
+#endif
+ volatile int nop_point, num_recv_buffs;
+ volatile char *xmit_cbuffs[NUM_XMIT_BUFFS];
+ volatile int xmit_count, xmit_last;
+ volatile int slot;
+};
+
+#define elmc_attn586() {elmc_do_attn586(dev->base_addr,ELMC_CTRL_INTE);}
+#define elmc_reset586() {elmc_do_reset586(dev->base_addr,ELMC_CTRL_INTE);}
+
+/* with interrupts disabled - this will clear the interrupt bit in the
+ 3c523 control register, and won't put it back. This effectively
+ disables interrupts on the card. */
+#define elmc_id_attn586() {elmc_do_attn586(dev->base_addr,0);}
+#define elmc_id_reset586() {elmc_do_reset586(dev->base_addr,0);}
+
+/*************************************************************************/
+/*
+ Do a Channel Attention on the 3c523. This is extremely board dependent.
+ */
+static void elmc_do_attn586(int ioaddr, int ints)
+{
+ /* the 3c523 requires a minimum of 500 ns. The delays here might be
+ a little too large, and hence they may cut the performance of the
+ card slightly. If someone who knows a little more about Linux
+ timing would care to play with these, I'd appreciate it. */
+
+ /* this bit masking stuff is crap. I'd rather have separate
+ registers with strobe triggers for each of these functions. <sigh>
+ Ya take what ya got. */
+
+ outb(ELMC_CTRL_RST | 0x3 | ELMC_CTRL_CA | ints, ioaddr + ELMC_CTRL);
+ DELAY_16(); /* > 500 ns */
+ outb(ELMC_CTRL_RST | 0x3 | ints, ioaddr + ELMC_CTRL);
+}
+
+/*************************************************************************/
+/*
+ Reset the 82586 on the 3c523. Also very board dependent.
+ */
+static void elmc_do_reset586(int ioaddr, int ints)
+{
+ /* toggle the RST bit low then high */
+ outb(0x3 | ELMC_CTRL_LBK, ioaddr + ELMC_CTRL);
+ DELAY_16(); /* > 500 ns */
+ outb(ELMC_CTRL_RST | ELMC_CTRL_LBK | 0x3, ioaddr + ELMC_CTRL);
+
+ elmc_do_attn586(ioaddr, ints);
+}
+
+/**********************************************
+ * close device
+ */
+
+static int elmc_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ elmc_id_reset586(); /* the hard way to stop the receiver */
+ free_irq(dev->irq, dev);
+ return 0;
+}
+
+/**********************************************
+ * open device
+ */
+
+static int elmc_open(struct net_device *dev)
+{
+ int ret;
+
+ elmc_id_attn586(); /* disable interrupts */
+
+ ret = request_irq(dev->irq, elmc_interrupt, IRQF_SHARED,
+ dev->name, dev);
+ if (ret) {
+ pr_err("%s: couldn't get irq %d\n", dev->name, dev->irq);
+ elmc_id_reset586();
+ return ret;
+ }
+ alloc586(dev);
+ init586(dev);
+ startrecv586(dev);
+ netif_start_queue(dev);
+ return 0; /* most done by init */
+}
+
+/**********************************************
+ * Check to see if there's an 82586 out there.
+ */
+
+static int __init check586(struct net_device *dev, unsigned long where, unsigned size)
+{
+ struct priv *p = netdev_priv(dev);
+ char *iscp_addrs[2];
+ int i = 0;
+
+ p->base = (unsigned long) isa_bus_to_virt((unsigned long)where) + size - 0x01000000;
+ p->memtop = isa_bus_to_virt((unsigned long)where) + size;
+ p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS);
+ memset((char *) p->scp, 0, sizeof(struct scp_struct));
+ p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */
+
+ iscp_addrs[0] = isa_bus_to_virt((unsigned long)where);
+ iscp_addrs[1] = (char *) p->scp - sizeof(struct iscp_struct);
+
+ for (i = 0; i < 2; i++) {
+ p->iscp = (struct iscp_struct *) iscp_addrs[i];
+ memset((char *) p->iscp, 0, sizeof(struct iscp_struct));
+
+ p->scp->iscp = make24(p->iscp);
+ p->iscp->busy = 1;
+
+ elmc_id_reset586();
+
+ /* reset586 does an implicit CA */
+
+ /* apparently, you sometimes have to kick the 82586 twice... */
+ elmc_id_attn586();
+ DELAY(1);
+
+ if (p->iscp->busy) { /* i82586 clears 'busy' after successful init */
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/******************************************************************
+ * set iscp at the right place, called by elmc_probe and open586.
+ */
+
+static void alloc586(struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+
+ elmc_id_reset586();
+ DELAY(2);
+
+ p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS);
+ p->scb = (struct scb_struct *) isa_bus_to_virt(dev->mem_start);
+ p->iscp = (struct iscp_struct *) ((char *) p->scp - sizeof(struct iscp_struct));
+
+ memset((char *) p->iscp, 0, sizeof(struct iscp_struct));
+ memset((char *) p->scp, 0, sizeof(struct scp_struct));
+
+ p->scp->iscp = make24(p->iscp);
+ p->scp->sysbus = SYSBUSVAL;
+ p->iscp->scb_offset = make16(p->scb);
+
+ p->iscp->busy = 1;
+ elmc_id_reset586();
+ elmc_id_attn586();
+
+ DELAY(2);
+
+ if (p->iscp->busy)
+ pr_err("%s: Init-Problems (alloc).\n", dev->name);
+
+ memset((char *) p->scb, 0, sizeof(struct scb_struct));
+}
+
+/*****************************************************************/
+
+static int elmc_getinfo(char *buf, int slot, void *d)
+{
+ int len = 0;
+ struct net_device *dev = d;
+
+ if (dev == NULL)
+ return len;
+
+ len += sprintf(buf + len, "Revision: 0x%x\n",
+ inb(dev->base_addr + ELMC_REVISION) & 0xf);
+ len += sprintf(buf + len, "IRQ: %d\n", dev->irq);
+ len += sprintf(buf + len, "IO Address: %#lx-%#lx\n", dev->base_addr,
+ dev->base_addr + ELMC_IO_EXTENT);
+ len += sprintf(buf + len, "Memory: %#lx-%#lx\n", dev->mem_start,
+ dev->mem_end - 1);
+ len += sprintf(buf + len, "Transceiver: %s\n", dev->if_port ?
+ "External" : "Internal");
+ len += sprintf(buf + len, "Device: %s\n", dev->name);
+ len += sprintf(buf + len, "Hardware Address: %pM\n",
+ dev->dev_addr);
+
+ return len;
+} /* elmc_getinfo() */
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = elmc_open,
+ .ndo_stop = elmc_close,
+ .ndo_get_stats = elmc_get_stats,
+ .ndo_start_xmit = elmc_send_packet,
+ .ndo_tx_timeout = elmc_timeout,
+#ifdef ELMC_MULTICAST
+ .ndo_set_rx_mode = set_multicast_list,
+#endif
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/*****************************************************************/
+
+static int __init do_elmc_probe(struct net_device *dev)
+{
+ static int slot;
+ int base_addr = dev->base_addr;
+ int irq = dev->irq;
+ u_char status = 0;
+ u_char revision = 0;
+ int i = 0;
+ unsigned int size = 0;
+ int retval;
+ struct priv *pr = netdev_priv(dev);
+
+ if (MCA_bus == 0) {
+ return -ENODEV;
+ }
+ /* search through the slots for the 3c523. */
+ slot = mca_find_adapter(ELMC_MCA_ID, 0);
+ while (slot != -1) {
+ status = mca_read_stored_pos(slot, 2);
+
+ dev->irq=irq_table[(status & ELMC_STATUS_IRQ_SELECT) >> 6];
+ dev->base_addr=csr_table[(status & ELMC_STATUS_CSR_SELECT) >> 1];
+
+ /*
+ If we're trying to match a specified irq or IO address,
+ we'll reject a match unless it's what we're looking for.
+ Also reject it if the card is already in use.
+ */
+
+ if ((irq && irq != dev->irq) ||
+ (base_addr && base_addr != dev->base_addr)) {
+ slot = mca_find_adapter(ELMC_MCA_ID, slot + 1);
+ continue;
+ }
+ if (!request_region(dev->base_addr, ELMC_IO_EXTENT, DRV_NAME)) {
+ slot = mca_find_adapter(ELMC_MCA_ID, slot + 1);
+ continue;
+ }
+
+ /* found what we're looking for... */
+ break;
+ }
+
+ /* we didn't find any 3c523 in the slots we checked for */
+ if (slot == MCA_NOTFOUND)
+ return (base_addr || irq) ? -ENXIO : -ENODEV;
+
+ mca_set_adapter_name(slot, "3Com 3c523 Etherlink/MC");
+ mca_set_adapter_procfn(slot, (MCA_ProcFn) elmc_getinfo, dev);
+
+ /* if we get this far, adapter has been found - carry on */
+ pr_info("%s: 3c523 adapter found in slot %d\n", dev->name, slot + 1);
+
+ /* Now we extract configuration info from the card.
+ The 3c523 provides information in two of the POS registers, but
+ the second one is only needed if we want to tell the card what IRQ
+ to use. I suspect that whoever sets the thing up initially would
+ prefer we don't screw with those things.
+
+ Note that we read the status info when we found the card...
+
+ See 3c523.h for more details.
+ */
+
+ /* revision is stored in the first 4 bits of the revision register */
+ revision = inb(dev->base_addr + ELMC_REVISION) & 0xf;
+
+ /* according to docs, we read the interrupt and write it back to
+ the IRQ select register, since the POST might not configure the IRQ
+ properly. */
+ switch (dev->irq) {
+ case 3:
+ mca_write_pos(slot, 3, 0x04);
+ break;
+ case 7:
+ mca_write_pos(slot, 3, 0x02);
+ break;
+ case 9:
+ mca_write_pos(slot, 3, 0x08);
+ break;
+ case 12:
+ mca_write_pos(slot, 3, 0x01);
+ break;
+ }
+
+ pr->slot = slot;
+
+ pr_info("%s: 3Com 3c523 Rev 0x%x at %#lx\n", dev->name, (int) revision,
+ dev->base_addr);
+
+ /* Determine if we're using the on-board transceiver (i.e. coax) or
+ an external one. The information is pretty much useless, but I
+ guess it's worth brownie points. */
+ dev->if_port = (status & ELMC_STATUS_DISABLE_THIN);
+
+ /* The 3c523 has a 24K chunk of memory. The first 16K is the
+ shared memory, while the last 8K is for the EtherStart BIOS ROM.
+ Which we don't care much about here. We'll just tell Linux that
+ we're using 16K. MCA won't permit address space conflicts caused
+ by not mapping the other 8K. */
+ dev->mem_start = shm_table[(status & ELMC_STATUS_MEMORY_SELECT) >> 3];
+
+ /* We're using MCA, so it's a given that the information about memory
+ size is correct. The Crynwr drivers do something like this. */
+
+ elmc_id_reset586(); /* seems like a good idea before checking it... */
+
+ size = 0x4000; /* check for 16K mem */
+ if (!check586(dev, dev->mem_start, size)) {
+ pr_err("%s: memprobe, Can't find memory at 0x%lx!\n", dev->name,
+ dev->mem_start);
+ retval = -ENODEV;
+ goto err_out;
+ }
+ dev->mem_end = dev->mem_start + size; /* set mem_end showed by 'ifconfig' */
+
+ pr->memtop = isa_bus_to_virt(dev->mem_start) + size;
+ pr->base = (unsigned long) isa_bus_to_virt(dev->mem_start) + size - 0x01000000;
+ alloc586(dev);
+
+ elmc_id_reset586(); /* make sure it doesn't generate spurious ints */
+
+ /* set number of receive-buffs according to memsize */
+ pr->num_recv_buffs = NUM_RECV_BUFFS_16;
+
+ /* dump all the assorted information */
+ pr_info("%s: IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->name,
+ dev->irq, dev->if_port ? "ex" : "in",
+ dev->mem_start, dev->mem_end - 1);
+
+ /* The hardware address for the 3c523 is stored in the first six
+ bytes of the IO address. */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(dev->base_addr + i);
+
+ pr_info("%s: hardware address %pM\n",
+ dev->name, dev->dev_addr);
+
+ dev->netdev_ops = &netdev_ops;
+ dev->watchdog_timeo = HZ;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+
+ /* note that we haven't actually requested the IRQ from the kernel.
+ That gets done in elmc_open(). I'm not sure that's such a good idea,
+ but it works, so I'll go with it. */
+
+#ifndef ELMC_MULTICAST
+ dev->flags&=~IFF_MULTICAST; /* Multicast doesn't work */
+#endif
+
+ retval = register_netdev(dev);
+ if (retval)
+ goto err_out;
+
+ return 0;
+err_out:
+ mca_set_adapter_procfn(slot, NULL, NULL);
+ release_region(dev->base_addr, ELMC_IO_EXTENT);
+ return retval;
+}
+
+#ifdef MODULE
+static void cleanup_card(struct net_device *dev)
+{
+ mca_set_adapter_procfn(((struct priv *)netdev_priv(dev))->slot,
+ NULL, NULL);
+ release_region(dev->base_addr, ELMC_IO_EXTENT);
+}
+#else
+struct net_device * __init elmc_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct priv));
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_elmc_probe(dev);
+ if (err)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+/**********************************************
+ * init the chip (elmc-interrupt should be disabled?!)
+ * needs a correct 'allocated' memory
+ */
+
+static int init586(struct net_device *dev)
+{
+ void *ptr;
+ unsigned long s;
+ int i, result = 0;
+ struct priv *p = netdev_priv(dev);
+ volatile struct configure_cmd_struct *cfg_cmd;
+ volatile struct iasetup_cmd_struct *ias_cmd;
+ volatile struct tdr_cmd_struct *tdr_cmd;
+ volatile struct mcsetup_cmd_struct *mc_cmd;
+ struct netdev_hw_addr *ha;
+ int num_addrs = netdev_mc_count(dev);
+
+ ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct));
+
+ cfg_cmd = (struct configure_cmd_struct *) ptr; /* configure-command */
+ cfg_cmd->cmd_status = 0;
+ cfg_cmd->cmd_cmd = CMD_CONFIGURE | CMD_LAST;
+ cfg_cmd->cmd_link = 0xffff;
+
+ cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */
+ cfg_cmd->fifo = 0x08; /* fifo-limit (8=tx:32/rx:64) */
+ cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */
+ cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */
+ cfg_cmd->priority = 0x00;
+ cfg_cmd->ifs = 0x60;
+ cfg_cmd->time_low = 0x00;
+ cfg_cmd->time_high = 0xf2;
+ cfg_cmd->promisc = 0;
+ if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC))
+ cfg_cmd->promisc = 1;
+ cfg_cmd->carr_coll = 0x00;
+
+ p->scb->cbl_offset = make16(cfg_cmd);
+
+ p->scb->cmd = CUC_START; /* cmd.-unit start */
+ elmc_id_attn586();
+
+ s = jiffies; /* warning: only active with interrupts on !! */
+ while (!(cfg_cmd->cmd_status & STAT_COMPL)) {
+ if (time_after(jiffies, s + 30*HZ/100))
+ break;
+ }
+
+ if ((cfg_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_COMPL | STAT_OK)) {
+ pr_warning("%s (elmc): configure command failed: %x\n", dev->name, cfg_cmd->cmd_status);
+ return 1;
+ }
+ /*
+ * individual address setup
+ */
+ ias_cmd = (struct iasetup_cmd_struct *) ptr;
+
+ ias_cmd->cmd_status = 0;
+ ias_cmd->cmd_cmd = CMD_IASETUP | CMD_LAST;
+ ias_cmd->cmd_link = 0xffff;
+
+ memcpy((char *) &ias_cmd->iaddr, (char *) dev->dev_addr, ETH_ALEN);
+
+ p->scb->cbl_offset = make16(ias_cmd);
+
+ p->scb->cmd = CUC_START; /* cmd.-unit start */
+ elmc_id_attn586();
+
+ s = jiffies;
+ while (!(ias_cmd->cmd_status & STAT_COMPL)) {
+ if (time_after(jiffies, s + 30*HZ/100))
+ break;
+ }
+
+ if ((ias_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_OK | STAT_COMPL)) {
+ pr_warning("%s (elmc): individual address setup command failed: %04x\n",
+ dev->name, ias_cmd->cmd_status);
+ return 1;
+ }
+ /*
+ * TDR, wire check .. e.g. no resistor e.t.c
+ */
+ tdr_cmd = (struct tdr_cmd_struct *) ptr;
+
+ tdr_cmd->cmd_status = 0;
+ tdr_cmd->cmd_cmd = CMD_TDR | CMD_LAST;
+ tdr_cmd->cmd_link = 0xffff;
+ tdr_cmd->status = 0;
+
+ p->scb->cbl_offset = make16(tdr_cmd);
+
+ p->scb->cmd = CUC_START; /* cmd.-unit start */
+ elmc_attn586();
+
+ s = jiffies;
+ while (!(tdr_cmd->cmd_status & STAT_COMPL)) {
+ if (time_after(jiffies, s + 30*HZ/100)) {
+ pr_warning("%s: %d Problems while running the TDR.\n", dev->name, __LINE__);
+ result = 1;
+ break;
+ }
+ }
+
+ if (!result) {
+ DELAY(2); /* wait for result */
+ result = tdr_cmd->status;
+
+ p->scb->cmd = p->scb->status & STAT_MASK;
+ elmc_id_attn586(); /* ack the interrupts */
+
+ if (result & TDR_LNK_OK) {
+ /* empty */
+ } else if (result & TDR_XCVR_PRB) {
+ pr_warning("%s: TDR: Transceiver problem!\n", dev->name);
+ } else if (result & TDR_ET_OPN) {
+ pr_warning("%s: TDR: No correct termination %d clocks away.\n", dev->name, result & TDR_TIMEMASK);
+ } else if (result & TDR_ET_SRT) {
+ if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */
+ pr_warning("%s: TDR: Detected a short circuit %d clocks away.\n", dev->name, result & TDR_TIMEMASK);
+ } else {
+ pr_warning("%s: TDR: Unknown status %04x\n", dev->name, result);
+ }
+ }
+ /*
+ * ack interrupts
+ */
+ p->scb->cmd = p->scb->status & STAT_MASK;
+ elmc_id_attn586();
+
+ /*
+ * alloc nop/xmit-cmds
+ */
+#if (NUM_XMIT_BUFFS == 1)
+ for (i = 0; i < 2; i++) {
+ p->nop_cmds[i] = (struct nop_cmd_struct *) ptr;
+ p->nop_cmds[i]->cmd_cmd = CMD_NOP;
+ p->nop_cmds[i]->cmd_status = 0;
+ p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
+ ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
+ }
+ p->xmit_cmds[0] = (struct transmit_cmd_struct *) ptr; /* transmit cmd/buff 0 */
+ ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
+#else
+ for (i = 0; i < NUM_XMIT_BUFFS; i++) {
+ p->nop_cmds[i] = (struct nop_cmd_struct *) ptr;
+ p->nop_cmds[i]->cmd_cmd = CMD_NOP;
+ p->nop_cmds[i]->cmd_status = 0;
+ p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
+ ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
+ p->xmit_cmds[i] = (struct transmit_cmd_struct *) ptr; /*transmit cmd/buff 0 */
+ ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
+ }
+#endif
+
+ ptr = alloc_rfa(dev, (void *) ptr); /* init receive-frame-area */
+
+ /*
+ * Multicast setup
+ */
+
+ if (num_addrs) {
+ /* I don't understand this: do we really need memory after the init? */
+ int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
+ if (len <= 0) {
+ pr_err("%s: Ooooops, no memory for MC-Setup!\n", dev->name);
+ } else {
+ if (len < num_addrs) {
+ num_addrs = len;
+ pr_warning("%s: Sorry, can only apply %d MC-Address(es).\n",
+ dev->name, num_addrs);
+ }
+ mc_cmd = (struct mcsetup_cmd_struct *) ptr;
+ mc_cmd->cmd_status = 0;
+ mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST;
+ mc_cmd->cmd_link = 0xffff;
+ mc_cmd->mc_cnt = num_addrs * 6;
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy((char *) mc_cmd->mc_list[i++],
+ ha->addr, 6);
+ p->scb->cbl_offset = make16(mc_cmd);
+ p->scb->cmd = CUC_START;
+ elmc_id_attn586();
+ s = jiffies;
+ while (!(mc_cmd->cmd_status & STAT_COMPL)) {
+ if (time_after(jiffies, s + 30*HZ/100))
+ break;
+ }
+ if (!(mc_cmd->cmd_status & STAT_COMPL)) {
+ pr_warning("%s: Can't apply multicast-address-list.\n", dev->name);
+ }
+ }
+ }
+ /*
+ * alloc xmit-buffs / init xmit_cmds
+ */
+ for (i = 0; i < NUM_XMIT_BUFFS; i++) {
+ p->xmit_cbuffs[i] = (char *) ptr; /* char-buffs */
+ ptr = (char *) ptr + XMIT_BUFF_SIZE;
+ p->xmit_buffs[i] = (struct tbd_struct *) ptr; /* TBD */
+ ptr = (char *) ptr + sizeof(struct tbd_struct);
+ if ((void *) ptr > (void *) p->iscp) {
+ pr_err("%s: not enough shared-mem for your configuration!\n", dev->name);
+ return 1;
+ }
+ memset((char *) (p->xmit_cmds[i]), 0, sizeof(struct transmit_cmd_struct));
+ memset((char *) (p->xmit_buffs[i]), 0, sizeof(struct tbd_struct));
+ p->xmit_cmds[i]->cmd_status = STAT_COMPL;
+ p->xmit_cmds[i]->cmd_cmd = CMD_XMIT | CMD_INT;
+ p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i]));
+ p->xmit_buffs[i]->next = 0xffff;
+ p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i]));
+ }
+
+ p->xmit_count = 0;
+ p->xmit_last = 0;
+#ifndef NO_NOPCOMMANDS
+ p->nop_point = 0;
+#endif
+
+ /*
+ * 'start transmitter' (nop-loop)
+ */
+#ifndef NO_NOPCOMMANDS
+ p->scb->cbl_offset = make16(p->nop_cmds[0]);
+ p->scb->cmd = CUC_START;
+ elmc_id_attn586();
+ WAIT_4_SCB_CMD();
+#else
+ p->xmit_cmds[0]->cmd_link = 0xffff;
+ p->xmit_cmds[0]->cmd_cmd = CMD_XMIT | CMD_LAST | CMD_INT;
+#endif
+
+ return 0;
+}
+
+/******************************************************
+ * This is a helper routine for elmc_rnr_int() and init586().
+ * It sets up the Receive Frame Area (RFA).
+ */
+
+static void *alloc_rfa(struct net_device *dev, void *ptr)
+{
+ volatile struct rfd_struct *rfd = (struct rfd_struct *) ptr;
+ volatile struct rbd_struct *rbd;
+ int i;
+ struct priv *p = netdev_priv(dev);
+
+ memset((char *) rfd, 0, sizeof(struct rfd_struct) * p->num_recv_buffs);
+ p->rfd_first = rfd;
+
+ for (i = 0; i < p->num_recv_buffs; i++) {
+ rfd[i].next = make16(rfd + (i + 1) % p->num_recv_buffs);
+ }
+ rfd[p->num_recv_buffs - 1].last = RFD_SUSP; /* RU suspend */
+
+ ptr = (void *) (rfd + p->num_recv_buffs);
+
+ rbd = (struct rbd_struct *) ptr;
+ ptr = (void *) (rbd + p->num_recv_buffs);
+
+ /* clr descriptors */
+ memset((char *) rbd, 0, sizeof(struct rbd_struct) * p->num_recv_buffs);
+
+ for (i = 0; i < p->num_recv_buffs; i++) {
+ rbd[i].next = make16((rbd + (i + 1) % p->num_recv_buffs));
+ rbd[i].size = RECV_BUFF_SIZE;
+ rbd[i].buffer = make24(ptr);
+ ptr = (char *) ptr + RECV_BUFF_SIZE;
+ }
+
+ p->rfd_top = p->rfd_first;
+ p->rfd_last = p->rfd_first + p->num_recv_buffs - 1;
+
+ p->scb->rfa_offset = make16(p->rfd_first);
+ p->rfd_first->rbd_offset = make16(rbd);
+
+ return ptr;
+}
+
+
+/**************************************************
+ * Interrupt Handler ...
+ */
+
+static irqreturn_t
+elmc_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ unsigned short stat;
+ struct priv *p;
+
+ if (!netif_running(dev)) {
+ /* The 3c523 has this habit of generating interrupts during the
+ reset. I'm not sure if the ni52 has this same problem, but it's
+ really annoying if we haven't finished initializing it. I was
+ hoping all the elmc_id_* commands would disable this, but I
+ might have missed a few. */
+
+ elmc_id_attn586(); /* ack inter. and disable any more */
+ return IRQ_HANDLED;
+ } else if (!(ELMC_CTRL_INT & inb(dev->base_addr + ELMC_CTRL))) {
+ /* wasn't this device */
+ return IRQ_NONE;
+ }
+ /* reading ELMC_CTRL also clears the INT bit. */
+
+ p = netdev_priv(dev);
+
+ while ((stat = p->scb->status & STAT_MASK))
+ {
+ p->scb->cmd = stat;
+ elmc_attn586(); /* ack inter. */
+
+ if (stat & STAT_CX) {
+ /* command with I-bit set complete */
+ elmc_xmt_int(dev);
+ }
+ if (stat & STAT_FR) {
+ /* received a frame */
+ elmc_rcv_int(dev);
+ }
+#ifndef NO_NOPCOMMANDS
+ if (stat & STAT_CNA) {
+ /* CU went 'not ready' */
+ if (netif_running(dev)) {
+ pr_warning("%s: oops! CU has left active state. stat: %04x/%04x.\n",
+ dev->name, (int) stat, (int) p->scb->status);
+ }
+ }
+#endif
+
+ if (stat & STAT_RNR) {
+ /* RU went 'not ready' */
+
+ if (p->scb->status & RU_SUSPEND) {
+ /* special case: RU_SUSPEND */
+
+ WAIT_4_SCB_CMD();
+ p->scb->cmd = RUC_RESUME;
+ elmc_attn586();
+ } else {
+ pr_warning("%s: Receiver-Unit went 'NOT READY': %04x/%04x.\n",
+ dev->name, (int) stat, (int) p->scb->status);
+ elmc_rnr_int(dev);
+ }
+ }
+ WAIT_4_SCB_CMD(); /* wait for ack. (elmc_xmt_int can be faster than ack!!) */
+ if (p->scb->cmd) { /* timed out? */
+ break;
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/*******************************************************
+ * receive-interrupt
+ */
+
+static void elmc_rcv_int(struct net_device *dev)
+{
+ int status;
+ unsigned short totlen;
+ struct sk_buff *skb;
+ struct rbd_struct *rbd;
+ struct priv *p = netdev_priv(dev);
+
+ for (; (status = p->rfd_top->status) & STAT_COMPL;) {
+ rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);
+
+ if (status & STAT_OK) { /* frame received without error? */
+ if ((totlen = rbd->status) & RBD_LAST) { /* the first and the last buffer? */
+ totlen &= RBD_MASK; /* length of this frame */
+ rbd->status = 0;
+ skb = (struct sk_buff *) dev_alloc_skb(totlen + 2);
+ if (skb != NULL) {
+ skb_reserve(skb, 2); /* 16 byte alignment */
+ skb_put(skb,totlen);
+ skb_copy_to_linear_data(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += totlen;
+ } else {
+ dev->stats.rx_dropped++;
+ }
+ } else {
+ pr_warning("%s: received oversized frame.\n", dev->name);
+ dev->stats.rx_dropped++;
+ }
+ } else { /* frame !(ok), only with 'save-bad-frames' */
+ pr_warning("%s: oops! rfd-error-status: %04x\n", dev->name, status);
+ dev->stats.rx_errors++;
+ }
+ p->rfd_top->status = 0;
+ p->rfd_top->last = RFD_SUSP;
+ p->rfd_last->last = 0; /* delete RU_SUSP */
+ p->rfd_last = p->rfd_top;
+ p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */
+ }
+}
+
+/**********************************************************
+ * handle 'Receiver went not ready'.
+ */
+
+static void elmc_rnr_int(struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+
+ dev->stats.rx_errors++;
+
+ WAIT_4_SCB_CMD(); /* wait for the last cmd */
+ p->scb->cmd = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */
+ elmc_attn586();
+ WAIT_4_SCB_CMD(); /* wait for accept cmd. */
+
+ alloc_rfa(dev, (char *) p->rfd_first);
+ startrecv586(dev); /* restart RU */
+
+ pr_warning("%s: Receive-Unit restarted. Status: %04x\n", dev->name, p->scb->status);
+
+}
+
+/**********************************************************
+ * handle xmit - interrupt
+ */
+
+static void elmc_xmt_int(struct net_device *dev)
+{
+ int status;
+ struct priv *p = netdev_priv(dev);
+
+ status = p->xmit_cmds[p->xmit_last]->cmd_status;
+ if (!(status & STAT_COMPL)) {
+ pr_warning("%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name);
+ }
+ if (status & STAT_OK) {
+ dev->stats.tx_packets++;
+ dev->stats.collisions += (status & TCMD_MAXCOLLMASK);
+ } else {
+ dev->stats.tx_errors++;
+ if (status & TCMD_LATECOLL) {
+ pr_warning("%s: late collision detected.\n", dev->name);
+ dev->stats.collisions++;
+ } else if (status & TCMD_NOCARRIER) {
+ dev->stats.tx_carrier_errors++;
+ pr_warning("%s: no carrier detected.\n", dev->name);
+ } else if (status & TCMD_LOSTCTS) {
+ pr_warning("%s: loss of CTS detected.\n", dev->name);
+ } else if (status & TCMD_UNDERRUN) {
+ dev->stats.tx_fifo_errors++;
+ pr_warning("%s: DMA underrun detected.\n", dev->name);
+ } else if (status & TCMD_MAXCOLL) {
+ pr_warning("%s: Max. collisions exceeded.\n", dev->name);
+ dev->stats.collisions += 16;
+ }
+ }
+
+#if (NUM_XMIT_BUFFS != 1)
+ if ((++p->xmit_last) == NUM_XMIT_BUFFS) {
+ p->xmit_last = 0;
+ }
+#endif
+
+ netif_wake_queue(dev);
+}
+
+/***********************************************************
+ * (re)start the receiver
+ */
+
+static void startrecv586(struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+
+ p->scb->rfa_offset = make16(p->rfd_first);
+ p->scb->cmd = RUC_START;
+ elmc_attn586(); /* start cmd. */
+ WAIT_4_SCB_CMD(); /* wait for accept cmd. (no timeout!!) */
+}
+
+/******************************************************
+ * timeout
+ */
+
+static void elmc_timeout(struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+ /* COMMAND-UNIT active? */
+ if (p->scb->status & CU_ACTIVE) {
+ pr_debug("%s: strange ... timeout with CU active?!?\n", dev->name);
+ pr_debug("%s: X0: %04x N0: %04x N1: %04x %d\n", dev->name,
+ (int)p->xmit_cmds[0]->cmd_status,
+ (int)p->nop_cmds[0]->cmd_status,
+ (int)p->nop_cmds[1]->cmd_status, (int)p->nop_point);
+ p->scb->cmd = CUC_ABORT;
+ elmc_attn586();
+ WAIT_4_SCB_CMD();
+ p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]);
+ p->scb->cmd = CUC_START;
+ elmc_attn586();
+ WAIT_4_SCB_CMD();
+ netif_wake_queue(dev);
+ } else {
+ pr_debug("%s: xmitter timed out, try to restart! stat: %04x\n",
+ dev->name, p->scb->status);
+ pr_debug("%s: command-stats: %04x %04x\n", dev->name,
+ p->xmit_cmds[0]->cmd_status, p->xmit_cmds[1]->cmd_status);
+ elmc_close(dev);
+ elmc_open(dev);
+ }
+}
+
+/******************************************************
+ * send frame
+ */
+
+static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ int len;
+ int i;
+#ifndef NO_NOPCOMMANDS
+ int next_nop;
+#endif
+ struct priv *p = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
+
+ if (len != skb->len)
+ memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN);
+ skb_copy_from_linear_data(skb, (char *) p->xmit_cbuffs[p->xmit_count], skb->len);
+
+#if (NUM_XMIT_BUFFS == 1)
+#ifdef NO_NOPCOMMANDS
+ p->xmit_buffs[0]->size = TBD_LAST | len;
+ for (i = 0; i < 16; i++) {
+ p->scb->cbl_offset = make16(p->xmit_cmds[0]);
+ p->scb->cmd = CUC_START;
+ p->xmit_cmds[0]->cmd_status = 0;
+ elmc_attn586();
+ if (!i) {
+ dev_kfree_skb(skb);
+ }
+ WAIT_4_SCB_CMD();
+ if ((p->scb->status & CU_ACTIVE)) { /* test it, because CU sometimes doesn't start immediately */
+ break;
+ }
+ if (p->xmit_cmds[0]->cmd_status) {
+ break;
+ }
+ if (i == 15) {
+ pr_warning("%s: Can't start transmit-command.\n", dev->name);
+ }
+ }
+#else
+ next_nop = (p->nop_point + 1) & 0x1;
+ p->xmit_buffs[0]->size = TBD_LAST | len;
+
+ p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link
+ = make16((p->nop_cmds[next_nop]));
+ p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
+
+ p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
+ p->nop_point = next_nop;
+ dev_kfree_skb(skb);
+#endif
+#else
+ p->xmit_buffs[p->xmit_count]->size = TBD_LAST | len;
+ if ((next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS) {
+ next_nop = 0;
+ }
+ p->xmit_cmds[p->xmit_count]->cmd_status = 0;
+ p->xmit_cmds[p->xmit_count]->cmd_link = p->nop_cmds[next_nop]->cmd_link
+ = make16((p->nop_cmds[next_nop]));
+ p->nop_cmds[next_nop]->cmd_status = 0;
+ p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
+ p->xmit_count = next_nop;
+ if (p->xmit_count != p->xmit_last)
+ netif_wake_queue(dev);
+ dev_kfree_skb(skb);
+#endif
+ return NETDEV_TX_OK;
+}
+
+/*******************************************
+ * Someone wanna have the statistics
+ */
+
+static struct net_device_stats *elmc_get_stats(struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+ unsigned short crc, aln, rsc, ovrn;
+
+ crc = p->scb->crc_errs; /* get error-statistic from the ni82586 */
+ p->scb->crc_errs -= crc;
+ aln = p->scb->aln_errs;
+ p->scb->aln_errs -= aln;
+ rsc = p->scb->rsc_errs;
+ p->scb->rsc_errs -= rsc;
+ ovrn = p->scb->ovrn_errs;
+ p->scb->ovrn_errs -= ovrn;
+
+ dev->stats.rx_crc_errors += crc;
+ dev->stats.rx_fifo_errors += ovrn;
+ dev->stats.rx_frame_errors += aln;
+ dev->stats.rx_dropped += rsc;
+
+ return &dev->stats;
+}
+
+/********************************************************
+ * Set MC list ..
+ */
+
+#ifdef ELMC_MULTICAST
+static void set_multicast_list(struct net_device *dev)
+{
+ if (!dev->start) {
+ /* without a running interface, promiscuous doesn't work */
+ return;
+ }
+ dev->start = 0;
+ alloc586(dev);
+ init586(dev);
+ startrecv586(dev);
+ dev->start = 1;
+}
+#endif
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+};
+
+#ifdef MODULE
+
+/* Increase if needed ;) */
+#define MAX_3C523_CARDS 4
+
+static struct net_device *dev_elmc[MAX_3C523_CARDS];
+static int irq[MAX_3C523_CARDS];
+static int io[MAX_3C523_CARDS];
+module_param_array(irq, int, NULL, 0);
+module_param_array(io, int, NULL, 0);
+MODULE_PARM_DESC(io, "EtherLink/MC I/O base address(es)");
+MODULE_PARM_DESC(irq, "EtherLink/MC IRQ number(s)");
+MODULE_LICENSE("GPL");
+
+int __init init_module(void)
+{
+ int this_dev,found = 0;
+
+ /* Loop until we either can't find any more cards, or we have MAX_3C523_CARDS */
+ for(this_dev=0; this_dev<MAX_3C523_CARDS; this_dev++) {
+ struct net_device *dev = alloc_etherdev(sizeof(struct priv));
+ if (!dev)
+ break;
+ dev->irq=irq[this_dev];
+ dev->base_addr=io[this_dev];
+ if (do_elmc_probe(dev) == 0) {
+ dev_elmc[this_dev] = dev;
+ found++;
+ continue;
+ }
+ free_netdev(dev);
+ if (io[this_dev]==0)
+ break;
+ pr_warning("3c523.c: No 3c523 card found at io=%#x\n",io[this_dev]);
+ }
+
+ if(found==0) {
+ if (io[0]==0)
+ pr_notice("3c523.c: No 3c523 cards found\n");
+ return -ENXIO;
+ } else return 0;
+}
+
+void __exit cleanup_module(void)
+{
+ int this_dev;
+ for (this_dev=0; this_dev<MAX_3C523_CARDS; this_dev++) {
+ struct net_device *dev = dev_elmc[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+
+#endif /* MODULE */
diff --git a/drivers/net/ethernet/i825xx/3c523.h b/drivers/net/ethernet/i825xx/3c523.h
new file mode 100644
index 000000000000..6956441687b9
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/3c523.h
@@ -0,0 +1,355 @@
+#ifndef _3c523_INCLUDE_
+#define _3c523_INCLUDE_
+/*
+ This is basically a hacked version of ni52.h, for the 3c523
+ Etherlink/MC.
+*/
+
+/*
+ * Intel i82586 Ethernet definitions
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same GNU General Public License that covers that work.
+ *
+ * Copyright 1995 by Chris Beauregard (cpbeaure@undergrad.math.uwaterloo.ca)
+ *
+ * See 3c523.c for details.
+ *
+ * $Header: /home/chrisb/linux-1.2.13-3c523/drivers/net/RCS/3c523.h,v 1.6 1996/01/20 05:09:00 chrisb Exp chrisb $
+ */
+
+/*
+ * where to find the System Configuration Pointer (SCP)
+ */
+#define SCP_DEFAULT_ADDRESS 0xfffff4
+
+
+/*
+ * System Configuration Pointer Struct
+ */
+
+struct scp_struct
+{
+ unsigned short zero_dum0; /* has to be zero */
+ unsigned char sysbus; /* 0=16Bit,1=8Bit */
+ unsigned char zero_dum1; /* has to be zero for 586 */
+ unsigned short zero_dum2;
+ unsigned short zero_dum3;
+ char *iscp; /* pointer to the iscp-block */
+};
+
+
+/*
+ * Intermediate System Configuration Pointer (ISCP)
+ */
+struct iscp_struct
+{
+ unsigned char busy; /* 586 clears after successful init */
+ unsigned char zero_dummy; /* hast to be zero */
+ unsigned short scb_offset; /* pointeroffset to the scb_base */
+ char *scb_base; /* base-address of all 16-bit offsets */
+};
+
+/*
+ * System Control Block (SCB)
+ */
+struct scb_struct
+{
+ unsigned short status; /* status word */
+ unsigned short cmd; /* command word */
+ unsigned short cbl_offset; /* pointeroffset, command block list */
+ unsigned short rfa_offset; /* pointeroffset, receive frame area */
+ unsigned short crc_errs; /* CRC-Error counter */
+ unsigned short aln_errs; /* alignmenterror counter */
+ unsigned short rsc_errs; /* Resourceerror counter */
+ unsigned short ovrn_errs; /* OVerrunerror counter */
+};
+
+/*
+ * possible command values for the command word
+ */
+#define RUC_MASK 0x0070 /* mask for RU commands */
+#define RUC_NOP 0x0000 /* NOP-command */
+#define RUC_START 0x0010 /* start RU */
+#define RUC_RESUME 0x0020 /* resume RU after suspend */
+#define RUC_SUSPEND 0x0030 /* suspend RU */
+#define RUC_ABORT 0x0040 /* abort receiver operation immediately */
+
+#define CUC_MASK 0x0700 /* mask for CU command */
+#define CUC_NOP 0x0000 /* NOP-command */
+#define CUC_START 0x0100 /* start execution of 1. cmd on the CBL */
+#define CUC_RESUME 0x0200 /* resume after suspend */
+#define CUC_SUSPEND 0x0300 /* Suspend CU */
+#define CUC_ABORT 0x0400 /* abort command operation immediately */
+
+#define ACK_MASK 0xf000 /* mask for ACK command */
+#define ACK_CX 0x8000 /* acknowledges STAT_CX */
+#define ACK_FR 0x4000 /* ack. STAT_FR */
+#define ACK_CNA 0x2000 /* ack. STAT_CNA */
+#define ACK_RNR 0x1000 /* ack. STAT_RNR */
+
+/*
+ * possible status values for the status word
+ */
+#define STAT_MASK 0xf000 /* mask for cause of interrupt */
+#define STAT_CX 0x8000 /* CU finished cmd with its I bit set */
+#define STAT_FR 0x4000 /* RU finished receiving a frame */
+#define STAT_CNA 0x2000 /* CU left active state */
+#define STAT_RNR 0x1000 /* RU left ready state */
+
+#define CU_STATUS 0x700 /* CU status, 0=idle */
+#define CU_SUSPEND 0x100 /* CU is suspended */
+#define CU_ACTIVE 0x200 /* CU is active */
+
+#define RU_STATUS 0x70 /* RU status, 0=idle */
+#define RU_SUSPEND 0x10 /* RU suspended */
+#define RU_NOSPACE 0x20 /* RU no resources */
+#define RU_READY 0x40 /* RU is ready */
+
+/*
+ * Receive Frame Descriptor (RFD)
+ */
+struct rfd_struct
+{
+ unsigned short status; /* status word */
+ unsigned short last; /* Bit15,Last Frame on List / Bit14,suspend */
+ unsigned short next; /* linkoffset to next RFD */
+ unsigned short rbd_offset; /* pointeroffset to RBD-buffer */
+ unsigned char dest[6]; /* ethernet-address, destination */
+ unsigned char source[6]; /* ethernet-address, source */
+ unsigned short length; /* 802.3 frame-length */
+ unsigned short zero_dummy; /* dummy */
+};
+
+#define RFD_LAST 0x8000 /* last: last rfd in the list */
+#define RFD_SUSP 0x4000 /* last: suspend RU after */
+#define RFD_ERRMASK 0x0fe1 /* status: errormask */
+#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA */
+#define RFD_RNR 0x0200 /* status: receiver out of resources */
+
+/*
+ * Receive Buffer Descriptor (RBD)
+ */
+struct rbd_struct
+{
+ unsigned short status; /* status word,number of used bytes in buff */
+ unsigned short next; /* pointeroffset to next RBD */
+ char *buffer; /* receive buffer address pointer */
+ unsigned short size; /* size of this buffer */
+ unsigned short zero_dummy; /* dummy */
+};
+
+#define RBD_LAST 0x8000 /* last buffer */
+#define RBD_USED 0x4000 /* this buffer has data */
+#define RBD_MASK 0x3fff /* size-mask for length */
+
+/*
+ * Statusvalues for Commands/RFD
+ */
+#define STAT_COMPL 0x8000 /* status: frame/command is complete */
+#define STAT_BUSY 0x4000 /* status: frame/command is busy */
+#define STAT_OK 0x2000 /* status: frame/command is ok */
+
+/*
+ * Action-Commands
+ */
+#define CMD_NOP 0x0000 /* NOP */
+#define CMD_IASETUP 0x0001 /* initial address setup command */
+#define CMD_CONFIGURE 0x0002 /* configure command */
+#define CMD_MCSETUP 0x0003 /* MC setup command */
+#define CMD_XMIT 0x0004 /* transmit command */
+#define CMD_TDR 0x0005 /* time domain reflectometer (TDR) command */
+#define CMD_DUMP 0x0006 /* dump command */
+#define CMD_DIAGNOSE 0x0007 /* diagnose command */
+
+/*
+ * Action command bits
+ */
+#define CMD_LAST 0x8000 /* indicates last command in the CBL */
+#define CMD_SUSPEND 0x4000 /* suspend CU after this CB */
+#define CMD_INT 0x2000 /* generate interrupt after execution */
+
+/*
+ * NOP - command
+ */
+struct nop_cmd_struct
+{
+ unsigned short cmd_status; /* status of this command */
+ unsigned short cmd_cmd; /* the command itself (+bits) */
+ unsigned short cmd_link; /* offsetpointer to next command */
+};
+
+/*
+ * IA Setup command
+ */
+struct iasetup_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned char iaddr[6];
+};
+
+/*
+ * Configure command
+ */
+struct configure_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned char byte_cnt; /* size of the config-cmd */
+ unsigned char fifo; /* fifo/recv monitor */
+ unsigned char sav_bf; /* save bad frames (bit7=1)*/
+ unsigned char adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/
+ unsigned char priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */
+ unsigned char ifs; /* inter frame spacing */
+ unsigned char time_low; /* slot time low */
+ unsigned char time_high; /* slot time high(0-2) and max. retries(4-7) */
+ unsigned char promisc; /* promisc-mode(0) , et al (1-7) */
+ unsigned char carr_coll; /* carrier(0-3)/collision(4-7) stuff */
+ unsigned char fram_len; /* minimal frame len */
+ unsigned char dummy; /* dummy */
+};
+
+/*
+ * Multicast Setup command
+ */
+struct mcsetup_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short mc_cnt; /* number of bytes in the MC-List */
+ unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */
+};
+
+/*
+ * transmit command
+ */
+struct transmit_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short tbd_offset; /* pointeroffset to TBD */
+ unsigned char dest[6]; /* destination address of the frame */
+ unsigned short length; /* user defined: 802.3 length / Ether type */
+};
+
+#define TCMD_ERRMASK 0x0fa0
+#define TCMD_MAXCOLLMASK 0x000f
+#define TCMD_MAXCOLL 0x0020
+#define TCMD_HEARTBEAT 0x0040
+#define TCMD_DEFERRED 0x0080
+#define TCMD_UNDERRUN 0x0100
+#define TCMD_LOSTCTS 0x0200
+#define TCMD_NOCARRIER 0x0400
+#define TCMD_LATECOLL 0x0800
+
+struct tdr_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short status;
+};
+
+#define TDR_LNK_OK 0x8000 /* No link problem identified */
+#define TDR_XCVR_PRB 0x4000 /* indicates a transceiver problem */
+#define TDR_ET_OPN 0x2000 /* open, no correct termination */
+#define TDR_ET_SRT 0x1000 /* TDR detected a short circuit */
+#define TDR_TIMEMASK 0x07ff /* mask for the time field */
+
+/*
+ * Transmit Buffer Descriptor (TBD)
+ */
+struct tbd_struct
+{
+ unsigned short size; /* size + EOF-Flag(15) */
+ unsigned short next; /* pointeroffset to next TBD */
+ char *buffer; /* pointer to buffer */
+};
+
+#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */
+
+/*************************************************************************/
+/*
+Verbatim from the Crynwyr stuff:
+
+ The 3c523 responds with adapter code 0x6042 at slot
+registers xxx0 and xxx1. The setup register is at xxx2 and
+contains the following bits:
+
+0: card enable
+2,1: csr address select
+ 00 = 0300
+ 01 = 1300
+ 10 = 2300
+ 11 = 3300
+4,3: shared memory address select
+ 00 = 0c0000
+ 01 = 0c8000
+ 10 = 0d0000
+ 11 = 0d8000
+5: set to disable on-board thinnet
+7,6: (read-only) shows selected irq
+ 00 = 12
+ 01 = 7
+ 10 = 3
+ 11 = 9
+
+The interrupt-select register is at xxx3 and uses one bit per irq.
+
+0: int 12
+1: int 7
+2: int 3
+3: int 9
+
+ Again, the documentation stresses that the setup register
+should never be written. The interrupt-select register may be
+written with the value corresponding to bits 7.6 in
+the setup register to insure corret setup.
+*/
+
+/* Offsets from the base I/O address. */
+#define ELMC_SA 0 /* first 6 bytes are IEEE network address */
+#define ELMC_CTRL 6 /* control & status register */
+#define ELMC_REVISION 7 /* revision register, first 4 bits only */
+#define ELMC_IO_EXTENT 8
+
+/* these are the bit selects for the port register 2 */
+#define ELMC_STATUS_ENABLED 0x01
+#define ELMC_STATUS_CSR_SELECT 0x06
+#define ELMC_STATUS_MEMORY_SELECT 0x18
+#define ELMC_STATUS_DISABLE_THIN 0x20
+#define ELMC_STATUS_IRQ_SELECT 0xc0
+
+/* this is the card id used in the detection code. You might recognize
+it from @6042.adf */
+#define ELMC_MCA_ID 0x6042
+
+/*
+ The following define the bits for the control & status register
+
+ The bank select registers can be used if more than 16K of memory is
+ on the card. For some stupid reason, bank 3 is the one for the
+ bottom 16K, and the card defaults to bank 0. So we have to set the
+ bank to 3 before the card will even think of operating. To get bank
+ 3, set BS0 and BS1 to high (of course...)
+*/
+#define ELMC_CTRL_BS0 0x01 /* RW bank select */
+#define ELMC_CTRL_BS1 0x02 /* RW bank select */
+#define ELMC_CTRL_INTE 0x04 /* RW interrupt enable, assert high */
+#define ELMC_CTRL_INT 0x08 /* R interrupt active, assert high */
+/*#define ELMC_CTRL_* 0x10*/ /* reserved */
+#define ELMC_CTRL_LBK 0x20 /* RW loopback enable, assert high */
+#define ELMC_CTRL_CA 0x40 /* RW channel attention, assert high */
+#define ELMC_CTRL_RST 0x80 /* RW 82586 reset, assert low */
+
+/* some handy compound bits */
+
+/* normal operation should have bank 3 and RST high, ints enabled */
+#define ELMC_NORMAL (ELMC_CTRL_INTE|ELMC_CTRL_RST|0x3)
+
+#endif /* _3c523_INCLUDE_ */
diff --git a/drivers/net/ethernet/i825xx/3c527.c b/drivers/net/ethernet/i825xx/3c527.c
new file mode 100644
index 000000000000..474b5e71a53a
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/3c527.c
@@ -0,0 +1,1661 @@
+/* 3c527.c: 3Com Etherlink/MC32 driver for Linux 2.4 and 2.6.
+ *
+ * (c) Copyright 1998 Red Hat Software Inc
+ * Written by Alan Cox.
+ * Further debugging by Carl Drougge.
+ * Initial SMP support by Felipe W Damasio <felipewd@terra.com.br>
+ * Heavily modified by Richard Procter <rnp@paradise.net.nz>
+ *
+ * Based on skeleton.c written 1993-94 by Donald Becker and ne2.c
+ * (for the MCA stuff) written by Wim Dumon.
+ *
+ * Thanks to 3Com for making this possible by providing me with the
+ * documentation.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#define DRV_NAME "3c527"
+#define DRV_VERSION "0.7-SMP"
+#define DRV_RELDATE "2003/09/21"
+
+static const char *version =
+DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter <rnp@paradise.net.nz>\n";
+
+/**
+ * DOC: Traps for the unwary
+ *
+ * The diagram (Figure 1-1) and the POS summary disagree with the
+ * "Interrupt Level" section in the manual.
+ *
+ * The manual contradicts itself when describing the minimum number
+ * buffers in the 'configure lists' command.
+ * My card accepts a buffer config of 4/4.
+ *
+ * Setting the SAV BP bit does not save bad packets, but
+ * only enables RX on-card stats collection.
+ *
+ * The documentation in places seems to miss things. In actual fact
+ * I've always eventually found everything is documented, it just
+ * requires careful study.
+ *
+ * DOC: Theory Of Operation
+ *
+ * The 3com 3c527 is a 32bit MCA bus mastering adapter with a large
+ * amount of on board intelligence that housekeeps a somewhat dumber
+ * Intel NIC. For performance we want to keep the transmit queue deep
+ * as the card can transmit packets while fetching others from main
+ * memory by bus master DMA. Transmission and reception are driven by
+ * circular buffer queues.
+ *
+ * The mailboxes can be used for controlling how the card traverses
+ * its buffer rings, but are used only for initial setup in this
+ * implementation. The exec mailbox allows a variety of commands to
+ * be executed. Each command must complete before the next is
+ * executed. Primarily we use the exec mailbox for controlling the
+ * multicast lists. We have to do a certain amount of interesting
+ * hoop jumping as the multicast list changes can occur in interrupt
+ * state when the card has an exec command pending. We defer such
+ * events until the command completion interrupt.
+ *
+ * A copy break scheme (taken from 3c59x.c) is employed whereby
+ * received frames exceeding a configurable length are passed
+ * directly to the higher networking layers without incuring a copy,
+ * in what amounts to a time/space trade-off.
+ *
+ * The card also keeps a large amount of statistical information
+ * on-board. In a perfect world, these could be used safely at no
+ * cost. However, lacking information to the contrary, processing
+ * them without races would involve so much extra complexity as to
+ * make it unworthwhile to do so. In the end, a hybrid SW/HW
+ * implementation was made necessary --- see mc32_update_stats().
+ *
+ * DOC: Notes
+ *
+ * It should be possible to use two or more cards, but at this stage
+ * only by loading two copies of the same module.
+ *
+ * The on-board 82586 NIC has trouble receiving multiple
+ * back-to-back frames and so is likely to drop packets from fast
+ * senders.
+**/
+
+#include <linux/module.h>
+
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/mca-legacy.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/ethtool.h>
+#include <linux/completion.h>
+#include <linux/bitops.h>
+#include <linux/semaphore.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include "3c527.h"
+
+MODULE_LICENSE("GPL");
+
+/*
+ * The name of the card. Is used for messages and in the requests for
+ * io regions, irqs and dma channels
+ */
+static const char* cardname = DRV_NAME;
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 2
+#endif
+
+static unsigned int mc32_debug = NET_DEBUG;
+
+/* The number of low I/O ports used by the ethercard. */
+#define MC32_IO_EXTENT 8
+
+/* As implemented, values must be a power-of-2 -- 4/8/16/32 */
+#define TX_RING_LEN 32 /* Typically the card supports 37 */
+#define RX_RING_LEN 8 /* " " " */
+
+/* Copy break point, see above for details.
+ * Setting to > 1512 effectively disables this feature. */
+#define RX_COPYBREAK 200 /* Value from 3c59x.c */
+
+/* Issue the 82586 workaround command - this is for "busy lans", but
+ * basically means for all lans now days - has a performance (latency)
+ * cost, but best set. */
+static const int WORKAROUND_82586=1;
+
+/* Pointers to buffers and their on-card records */
+struct mc32_ring_desc
+{
+ volatile struct skb_header *p;
+ struct sk_buff *skb;
+};
+
+/* Information that needs to be kept for each board. */
+struct mc32_local
+{
+ int slot;
+
+ u32 base;
+ volatile struct mc32_mailbox *rx_box;
+ volatile struct mc32_mailbox *tx_box;
+ volatile struct mc32_mailbox *exec_box;
+ volatile struct mc32_stats *stats; /* Start of on-card statistics */
+ u16 tx_chain; /* Transmit list start offset */
+ u16 rx_chain; /* Receive list start offset */
+ u16 tx_len; /* Transmit list count */
+ u16 rx_len; /* Receive list count */
+
+ u16 xceiver_desired_state; /* HALTED or RUNNING */
+ u16 cmd_nonblocking; /* Thread is uninterested in command result */
+ u16 mc_reload_wait; /* A multicast load request is pending */
+ u32 mc_list_valid; /* True when the mclist is set */
+
+ struct mc32_ring_desc tx_ring[TX_RING_LEN]; /* Host Transmit ring */
+ struct mc32_ring_desc rx_ring[RX_RING_LEN]; /* Host Receive ring */
+
+ atomic_t tx_count; /* buffers left */
+ atomic_t tx_ring_head; /* index to tx en-queue end */
+ u16 tx_ring_tail; /* index to tx de-queue end */
+
+ u16 rx_ring_tail; /* index to rx de-queue end */
+
+ struct semaphore cmd_mutex; /* Serialises issuing of execute commands */
+ struct completion execution_cmd; /* Card has completed an execute command */
+ struct completion xceiver_cmd; /* Card has completed a tx or rx command */
+};
+
+/* The station (ethernet) address prefix, used for a sanity check. */
+#define SA_ADDR0 0x02
+#define SA_ADDR1 0x60
+#define SA_ADDR2 0xAC
+
+struct mca_adapters_t {
+ unsigned int id;
+ char *name;
+};
+
+static const struct mca_adapters_t mc32_adapters[] = {
+ { 0x0041, "3COM EtherLink MC/32" },
+ { 0x8EF5, "IBM High Performance Lan Adapter" },
+ { 0x0000, NULL }
+};
+
+
+/* Macros for ring index manipulations */
+static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); };
+static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); };
+
+static inline u16 next_tx(u16 tx) { return (tx+1)&(TX_RING_LEN-1); };
+
+
+/* Index to functions, as function prototypes. */
+static int mc32_probe1(struct net_device *dev, int ioaddr);
+static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len);
+static int mc32_open(struct net_device *dev);
+static void mc32_timeout(struct net_device *dev);
+static netdev_tx_t mc32_send_packet(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t mc32_interrupt(int irq, void *dev_id);
+static int mc32_close(struct net_device *dev);
+static struct net_device_stats *mc32_get_stats(struct net_device *dev);
+static void mc32_set_multicast_list(struct net_device *dev);
+static void mc32_reset_multicast_list(struct net_device *dev);
+static const struct ethtool_ops netdev_ethtool_ops;
+
+static void cleanup_card(struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ unsigned slot = lp->slot;
+ mca_mark_as_unused(slot);
+ mca_set_adapter_name(slot, NULL);
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, MC32_IO_EXTENT);
+}
+
+/**
+ * mc32_probe - Search for supported boards
+ * @unit: interface number to use
+ *
+ * Because MCA bus is a real bus and we can scan for cards we could do a
+ * single scan for all boards here. Right now we use the passed in device
+ * structure and scan for only one board. This needs fixing for modules
+ * in particular.
+ */
+
+struct net_device *__init mc32_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct mc32_local));
+ static int current_mca_slot = -1;
+ int i;
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0)
+ sprintf(dev->name, "eth%d", unit);
+
+ /* Do not check any supplied i/o locations.
+ POS registers usually don't fail :) */
+
+ /* MCA cards have POS registers.
+ Autodetecting MCA cards is extremely simple.
+ Just search for the card. */
+
+ for(i = 0; (mc32_adapters[i].name != NULL); i++) {
+ current_mca_slot =
+ mca_find_unused_adapter(mc32_adapters[i].id, 0);
+
+ if(current_mca_slot != MCA_NOTFOUND) {
+ if(!mc32_probe1(dev, current_mca_slot))
+ {
+ mca_set_adapter_name(current_mca_slot,
+ mc32_adapters[i].name);
+ mca_mark_as_used(current_mca_slot);
+ err = register_netdev(dev);
+ if (err) {
+ cleanup_card(dev);
+ free_netdev(dev);
+ dev = ERR_PTR(err);
+ }
+ return dev;
+ }
+
+ }
+ }
+ free_netdev(dev);
+ return ERR_PTR(-ENODEV);
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = mc32_open,
+ .ndo_stop = mc32_close,
+ .ndo_start_xmit = mc32_send_packet,
+ .ndo_get_stats = mc32_get_stats,
+ .ndo_set_rx_mode = mc32_set_multicast_list,
+ .ndo_tx_timeout = mc32_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/**
+ * mc32_probe1 - Check a given slot for a board and test the card
+ * @dev: Device structure to fill in
+ * @slot: The MCA bus slot being used by this card
+ *
+ * Decode the slot data and configure the card structures. Having done this we
+ * can reset the card and configure it. The card does a full self test cycle
+ * in firmware so we have to wait for it to return and post us either a
+ * failure case or some addresses we use to find the board internals.
+ */
+
+static int __init mc32_probe1(struct net_device *dev, int slot)
+{
+ static unsigned version_printed;
+ int i, err;
+ u8 POS;
+ u32 base;
+ struct mc32_local *lp = netdev_priv(dev);
+ static const u16 mca_io_bases[] = {
+ 0x7280,0x7290,
+ 0x7680,0x7690,
+ 0x7A80,0x7A90,
+ 0x7E80,0x7E90
+ };
+ static const u32 mca_mem_bases[] = {
+ 0x00C0000,
+ 0x00C4000,
+ 0x00C8000,
+ 0x00CC000,
+ 0x00D0000,
+ 0x00D4000,
+ 0x00D8000,
+ 0x00DC000
+ };
+ static const char * const failures[] = {
+ "Processor instruction",
+ "Processor data bus",
+ "Processor data bus",
+ "Processor data bus",
+ "Adapter bus",
+ "ROM checksum",
+ "Base RAM",
+ "Extended RAM",
+ "82586 internal loopback",
+ "82586 initialisation failure",
+ "Adapter list configuration error"
+ };
+
+ /* Time to play MCA games */
+
+ if (mc32_debug && version_printed++ == 0)
+ pr_debug("%s", version);
+
+ pr_info("%s: %s found in slot %d: ", dev->name, cardname, slot);
+
+ POS = mca_read_stored_pos(slot, 2);
+
+ if(!(POS&1))
+ {
+ pr_cont("disabled.\n");
+ return -ENODEV;
+ }
+
+ /* Fill in the 'dev' fields. */
+ dev->base_addr = mca_io_bases[(POS>>1)&7];
+ dev->mem_start = mca_mem_bases[(POS>>4)&7];
+
+ POS = mca_read_stored_pos(slot, 4);
+ if(!(POS&1))
+ {
+ pr_cont("memory window disabled.\n");
+ return -ENODEV;
+ }
+
+ POS = mca_read_stored_pos(slot, 5);
+
+ i=(POS>>4)&3;
+ if(i==3)
+ {
+ pr_cont("invalid memory window.\n");
+ return -ENODEV;
+ }
+
+ i*=16384;
+ i+=16384;
+
+ dev->mem_end=dev->mem_start + i;
+
+ dev->irq = ((POS>>2)&3)+9;
+
+ if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname))
+ {
+ pr_cont("io 0x%3lX, which is busy.\n", dev->base_addr);
+ return -EBUSY;
+ }
+
+ pr_cont("io 0x%3lX irq %d mem 0x%lX (%dK)\n",
+ dev->base_addr, dev->irq, dev->mem_start, i/1024);
+
+
+ /* We ought to set the cache line size here.. */
+
+
+ /*
+ * Go PROM browsing
+ */
+
+ /* Retrieve and print the ethernet address. */
+ for (i = 0; i < 6; i++)
+ {
+ mca_write_pos(slot, 6, i+12);
+ mca_write_pos(slot, 7, 0);
+
+ dev->dev_addr[i] = mca_read_pos(slot,3);
+ }
+
+ pr_info("%s: Address %pM ", dev->name, dev->dev_addr);
+
+ mca_write_pos(slot, 6, 0);
+ mca_write_pos(slot, 7, 0);
+
+ POS = mca_read_stored_pos(slot, 4);
+
+ if(POS&2)
+ pr_cont(": BNC port selected.\n");
+ else
+ pr_cont(": AUI port selected.\n");
+
+ POS=inb(dev->base_addr+HOST_CTRL);
+ POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET;
+ POS&=~HOST_CTRL_INTE;
+ outb(POS, dev->base_addr+HOST_CTRL);
+ /* Reset adapter */
+ udelay(100);
+ /* Reset off */
+ POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET);
+ outb(POS, dev->base_addr+HOST_CTRL);
+
+ udelay(300);
+
+ /*
+ * Grab the IRQ
+ */
+
+ err = request_irq(dev->irq, mc32_interrupt, IRQF_SHARED, DRV_NAME, dev);
+ if (err) {
+ release_region(dev->base_addr, MC32_IO_EXTENT);
+ pr_err("%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
+ goto err_exit_ports;
+ }
+
+ memset(lp, 0, sizeof(struct mc32_local));
+ lp->slot = slot;
+
+ i=0;
+
+ base = inb(dev->base_addr);
+
+ while(base == 0xFF)
+ {
+ i++;
+ if(i == 1000)
+ {
+ pr_err("%s: failed to boot adapter.\n", dev->name);
+ err = -ENODEV;
+ goto err_exit_irq;
+ }
+ udelay(1000);
+ if(inb(dev->base_addr+2)&(1<<5))
+ base = inb(dev->base_addr);
+ }
+
+ if(base>0)
+ {
+ if(base < 0x0C)
+ pr_err("%s: %s%s.\n", dev->name, failures[base-1],
+ base<0x0A?" test failure":"");
+ else
+ pr_err("%s: unknown failure %d.\n", dev->name, base);
+ err = -ENODEV;
+ goto err_exit_irq;
+ }
+
+ base=0;
+ for(i=0;i<4;i++)
+ {
+ int n=0;
+
+ while(!(inb(dev->base_addr+2)&(1<<5)))
+ {
+ n++;
+ udelay(50);
+ if(n>100)
+ {
+ pr_err("%s: mailbox read fail (%d).\n", dev->name, i);
+ err = -ENODEV;
+ goto err_exit_irq;
+ }
+ }
+
+ base|=(inb(dev->base_addr)<<(8*i));
+ }
+
+ lp->exec_box=isa_bus_to_virt(dev->mem_start+base);
+
+ base=lp->exec_box->data[1]<<16|lp->exec_box->data[0];
+
+ lp->base = dev->mem_start+base;
+
+ lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]);
+ lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]);
+
+ lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]);
+
+ /*
+ * Descriptor chains (card relative)
+ */
+
+ lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
+ lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
+ lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
+ lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
+
+ sema_init(&lp->cmd_mutex, 0);
+ init_completion(&lp->execution_cmd);
+ init_completion(&lp->xceiver_cmd);
+
+ pr_info("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
+ dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);
+
+ dev->netdev_ops = &netdev_ops;
+ dev->watchdog_timeo = HZ*5; /* Board does all the work */
+ dev->ethtool_ops = &netdev_ethtool_ops;
+
+ return 0;
+
+err_exit_irq:
+ free_irq(dev->irq, dev);
+err_exit_ports:
+ release_region(dev->base_addr, MC32_IO_EXTENT);
+ return err;
+}
+
+
+/**
+ * mc32_ready_poll - wait until we can feed it a command
+ * @dev: The device to wait for
+ *
+ * Wait until the card becomes ready to accept a command via the
+ * command register. This tells us nothing about the completion
+ * status of any pending commands and takes very little time at all.
+ */
+
+static inline void mc32_ready_poll(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR));
+}
+
+
+/**
+ * mc32_command_nowait - send a command non blocking
+ * @dev: The 3c527 to issue the command to
+ * @cmd: The command word to write to the mailbox
+ * @data: A data block if the command expects one
+ * @len: Length of the data block
+ *
+ * Send a command from interrupt state. If there is a command
+ * currently being executed then we return an error of -1. It
+ * simply isn't viable to wait around as commands may be
+ * slow. This can theoretically be starved on SMP, but it's hard
+ * to see a realistic situation. We do not wait for the command
+ * to complete --- we rely on the interrupt handler to tidy up
+ * after us.
+ */
+
+static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int len)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int ret = -1;
+
+ if (down_trylock(&lp->cmd_mutex) == 0)
+ {
+ lp->cmd_nonblocking=1;
+ lp->exec_box->mbox=0;
+ lp->exec_box->mbox=cmd;
+ memcpy((void *)lp->exec_box->data, data, len);
+ barrier(); /* the memcpy forgot the volatile so be sure */
+
+ /* Send the command */
+ mc32_ready_poll(dev);
+ outb(1<<6, ioaddr+HOST_CMD);
+
+ ret = 0;
+
+ /* Interrupt handler will signal mutex on completion */
+ }
+
+ return ret;
+}
+
+
+/**
+ * mc32_command - send a command and sleep until completion
+ * @dev: The 3c527 card to issue the command to
+ * @cmd: The command word to write to the mailbox
+ * @data: A data block if the command expects one
+ * @len: Length of the data block
+ *
+ * Sends exec commands in a user context. This permits us to wait around
+ * for the replies and also to wait for the command buffer to complete
+ * from a previous command before we execute our command. After our
+ * command completes we will attempt any pending multicast reload
+ * we blocked off by hogging the exec buffer.
+ *
+ * You feed the card a command, you wait, it interrupts you get a
+ * reply. All well and good. The complication arises because you use
+ * commands for filter list changes which come in at bh level from things
+ * like IPV6 group stuff.
+ */
+
+static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int ret = 0;
+
+ down(&lp->cmd_mutex);
+
+ /*
+ * My Turn
+ */
+
+ lp->cmd_nonblocking=0;
+ lp->exec_box->mbox=0;
+ lp->exec_box->mbox=cmd;
+ memcpy((void *)lp->exec_box->data, data, len);
+ barrier(); /* the memcpy forgot the volatile so be sure */
+
+ mc32_ready_poll(dev);
+ outb(1<<6, ioaddr+HOST_CMD);
+
+ wait_for_completion(&lp->execution_cmd);
+
+ if(lp->exec_box->mbox&(1<<13))
+ ret = -1;
+
+ up(&lp->cmd_mutex);
+
+ /*
+ * A multicast set got blocked - try it now
+ */
+
+ if(lp->mc_reload_wait)
+ {
+ mc32_reset_multicast_list(dev);
+ }
+
+ return ret;
+}
+
+
+/**
+ * mc32_start_transceiver - tell board to restart tx/rx
+ * @dev: The 3c527 card to issue the command to
+ *
+ * This may be called from the interrupt state, where it is used
+ * to restart the rx ring if the card runs out of rx buffers.
+ *
+ * We must first check if it's ok to (re)start the transceiver. See
+ * mc32_close for details.
+ */
+
+static void mc32_start_transceiver(struct net_device *dev) {
+
+ struct mc32_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ /* Ignore RX overflow on device closure */
+ if (lp->xceiver_desired_state==HALTED)
+ return;
+
+ /* Give the card the offset to the post-EOL-bit RX descriptor */
+ mc32_ready_poll(dev);
+ lp->rx_box->mbox=0;
+ lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next;
+ outb(HOST_CMD_START_RX, ioaddr+HOST_CMD);
+
+ mc32_ready_poll(dev);
+ lp->tx_box->mbox=0;
+ outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD); /* card ignores this on RX restart */
+
+ /* We are not interrupted on start completion */
+}
+
+
+/**
+ * mc32_halt_transceiver - tell board to stop tx/rx
+ * @dev: The 3c527 card to issue the command to
+ *
+ * We issue the commands to halt the card's transceiver. In fact,
+ * after some experimenting we now simply tell the card to
+ * suspend. When issuing aborts occasionally odd things happened.
+ *
+ * We then sleep until the card has notified us that both rx and
+ * tx have been suspended.
+ */
+
+static void mc32_halt_transceiver(struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ mc32_ready_poll(dev);
+ lp->rx_box->mbox=0;
+ outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD);
+ wait_for_completion(&lp->xceiver_cmd);
+
+ mc32_ready_poll(dev);
+ lp->tx_box->mbox=0;
+ outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD);
+ wait_for_completion(&lp->xceiver_cmd);
+}
+
+
+/**
+ * mc32_load_rx_ring - load the ring of receive buffers
+ * @dev: 3c527 to build the ring for
+ *
+ * This initialises the on-card and driver datastructures to
+ * the point where mc32_start_transceiver() can be called.
+ *
+ * The card sets up the receive ring for us. We are required to use the
+ * ring it provides, although the size of the ring is configurable.
+ *
+ * We allocate an sk_buff for each ring entry in turn and
+ * initialise its house-keeping info. At the same time, we read
+ * each 'next' pointer in our rx_ring array. This reduces slow
+ * shared-memory reads and makes it easy to access predecessor
+ * descriptors.
+ *
+ * We then set the end-of-list bit for the last entry so that the
+ * card will know when it has run out of buffers.
+ */
+
+static int mc32_load_rx_ring(struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ int i;
+ u16 rx_base;
+ volatile struct skb_header *p;
+
+ rx_base=lp->rx_chain;
+
+ for(i=0; i<RX_RING_LEN; i++) {
+ lp->rx_ring[i].skb=alloc_skb(1532, GFP_KERNEL);
+ if (lp->rx_ring[i].skb==NULL) {
+ for (;i>=0;i--)
+ kfree_skb(lp->rx_ring[i].skb);
+ return -ENOBUFS;
+ }
+ skb_reserve(lp->rx_ring[i].skb, 18);
+
+ p=isa_bus_to_virt(lp->base+rx_base);
+
+ p->control=0;
+ p->data=isa_virt_to_bus(lp->rx_ring[i].skb->data);
+ p->status=0;
+ p->length=1532;
+
+ lp->rx_ring[i].p=p;
+ rx_base=p->next;
+ }
+
+ lp->rx_ring[i-1].p->control |= CONTROL_EOL;
+
+ lp->rx_ring_tail=0;
+
+ return 0;
+}
+
+
+/**
+ * mc32_flush_rx_ring - free the ring of receive buffers
+ * @lp: Local data of 3c527 to flush the rx ring of
+ *
+ * Free the buffer for each ring slot. This may be called
+ * before mc32_load_rx_ring(), eg. on error in mc32_open().
+ * Requires rx skb pointers to point to a valid skb, or NULL.
+ */
+
+static void mc32_flush_rx_ring(struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ int i;
+
+ for(i=0; i < RX_RING_LEN; i++)
+ {
+ if (lp->rx_ring[i].skb) {
+ dev_kfree_skb(lp->rx_ring[i].skb);
+ lp->rx_ring[i].skb = NULL;
+ }
+ lp->rx_ring[i].p=NULL;
+ }
+}
+
+
+/**
+ * mc32_load_tx_ring - load transmit ring
+ * @dev: The 3c527 card to issue the command to
+ *
+ * This sets up the host transmit data-structures.
+ *
+ * First, we obtain from the card it's current position in the tx
+ * ring, so that we will know where to begin transmitting
+ * packets.
+ *
+ * Then, we read the 'next' pointers from the on-card tx ring into
+ * our tx_ring array to reduce slow shared-mem reads. Finally, we
+ * intitalise the tx house keeping variables.
+ *
+ */
+
+static void mc32_load_tx_ring(struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ volatile struct skb_header *p;
+ int i;
+ u16 tx_base;
+
+ tx_base=lp->tx_box->data[0];
+
+ for(i=0 ; i<TX_RING_LEN ; i++)
+ {
+ p=isa_bus_to_virt(lp->base+tx_base);
+ lp->tx_ring[i].p=p;
+ lp->tx_ring[i].skb=NULL;
+
+ tx_base=p->next;
+ }
+
+ /* -1 so that tx_ring_head cannot "lap" tx_ring_tail */
+ /* see mc32_tx_ring */
+
+ atomic_set(&lp->tx_count, TX_RING_LEN-1);
+ atomic_set(&lp->tx_ring_head, 0);
+ lp->tx_ring_tail=0;
+}
+
+
+/**
+ * mc32_flush_tx_ring - free transmit ring
+ * @lp: Local data of 3c527 to flush the tx ring of
+ *
+ * If the ring is non-empty, zip over the it, freeing any
+ * allocated skb_buffs. The tx ring house-keeping variables are
+ * then reset. Requires rx skb pointers to point to a valid skb,
+ * or NULL.
+ */
+
+static void mc32_flush_tx_ring(struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ int i;
+
+ for (i=0; i < TX_RING_LEN; i++)
+ {
+ if (lp->tx_ring[i].skb)
+ {
+ dev_kfree_skb(lp->tx_ring[i].skb);
+ lp->tx_ring[i].skb = NULL;
+ }
+ }
+
+ atomic_set(&lp->tx_count, 0);
+ atomic_set(&lp->tx_ring_head, 0);
+ lp->tx_ring_tail=0;
+}
+
+
+/**
+ * mc32_open - handle 'up' of card
+ * @dev: device to open
+ *
+ * The user is trying to bring the card into ready state. This requires
+ * a brief dialogue with the card. Firstly we enable interrupts and then
+ * 'indications'. Without these enabled the card doesn't bother telling
+ * us what it has done. This had me puzzled for a week.
+ *
+ * We configure the number of card descriptors, then load the network
+ * address and multicast filters. Turn on the workaround mode. This
+ * works around a bug in the 82586 - it asks the firmware to do
+ * so. It has a performance (latency) hit but is needed on busy
+ * [read most] lans. We load the ring with buffers then we kick it
+ * all off.
+ */
+
+static int mc32_open(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct mc32_local *lp = netdev_priv(dev);
+ u8 one=1;
+ u8 regs;
+ u16 descnumbuffs[2] = {TX_RING_LEN, RX_RING_LEN};
+
+ /*
+ * Interrupts enabled
+ */
+
+ regs=inb(ioaddr+HOST_CTRL);
+ regs|=HOST_CTRL_INTE;
+ outb(regs, ioaddr+HOST_CTRL);
+
+ /*
+ * Allow ourselves to issue commands
+ */
+
+ up(&lp->cmd_mutex);
+
+
+ /*
+ * Send the indications on command
+ */
+
+ mc32_command(dev, 4, &one, 2);
+
+ /*
+ * Poke it to make sure it's really dead.
+ */
+
+ mc32_halt_transceiver(dev);
+ mc32_flush_tx_ring(dev);
+
+ /*
+ * Ask card to set up on-card descriptors to our spec
+ */
+
+ if(mc32_command(dev, 8, descnumbuffs, 4)) {
+ pr_info("%s: %s rejected our buffer configuration!\n",
+ dev->name, cardname);
+ mc32_close(dev);
+ return -ENOBUFS;
+ }
+
+ /* Report new configuration */
+ mc32_command(dev, 6, NULL, 0);
+
+ lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
+ lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
+ lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
+ lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
+
+ /* Set Network Address */
+ mc32_command(dev, 1, dev->dev_addr, 6);
+
+ /* Set the filters */
+ mc32_set_multicast_list(dev);
+
+ if (WORKAROUND_82586) {
+ u16 zero_word=0;
+ mc32_command(dev, 0x0D, &zero_word, 2); /* 82586 bug workaround on */
+ }
+
+ mc32_load_tx_ring(dev);
+
+ if(mc32_load_rx_ring(dev))
+ {
+ mc32_close(dev);
+ return -ENOBUFS;
+ }
+
+ lp->xceiver_desired_state = RUNNING;
+
+ /* And finally, set the ball rolling... */
+ mc32_start_transceiver(dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+
+/**
+ * mc32_timeout - handle a timeout from the network layer
+ * @dev: 3c527 that timed out
+ *
+ * Handle a timeout on transmit from the 3c527. This normally means
+ * bad things as the hardware handles cable timeouts and mess for
+ * us.
+ *
+ */
+
+static void mc32_timeout(struct net_device *dev)
+{
+ pr_warning("%s: transmit timed out?\n", dev->name);
+ /* Try to restart the adaptor. */
+ netif_wake_queue(dev);
+}
+
+
+/**
+ * mc32_send_packet - queue a frame for transmit
+ * @skb: buffer to transmit
+ * @dev: 3c527 to send it out of
+ *
+ * Transmit a buffer. This normally means throwing the buffer onto
+ * the transmit queue as the queue is quite large. If the queue is
+ * full then we set tx_busy and return. Once the interrupt handler
+ * gets messages telling it to reclaim transmit queue entries, we will
+ * clear tx_busy and the kernel will start calling this again.
+ *
+ * We do not disable interrupts or acquire any locks; this can
+ * run concurrently with mc32_tx_ring(), and the function itself
+ * is serialised at a higher layer. However, similarly for the
+ * card itself, we must ensure that we update tx_ring_head only
+ * after we've established a valid packet on the tx ring (and
+ * before we let the card "see" it, to prevent it racing with the
+ * irq handler).
+ *
+ */
+
+static netdev_tx_t mc32_send_packet(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ u32 head = atomic_read(&lp->tx_ring_head);
+
+ volatile struct skb_header *p, *np;
+
+ netif_stop_queue(dev);
+
+ if(atomic_read(&lp->tx_count)==0) {
+ return NETDEV_TX_BUSY;
+ }
+
+ if (skb_padto(skb, ETH_ZLEN)) {
+ netif_wake_queue(dev);
+ return NETDEV_TX_OK;
+ }
+
+ atomic_dec(&lp->tx_count);
+
+ /* P is the last sending/sent buffer as a pointer */
+ p=lp->tx_ring[head].p;
+
+ head = next_tx(head);
+
+ /* NP is the buffer we will be loading */
+ np=lp->tx_ring[head].p;
+
+ /* We will need this to flush the buffer out */
+ lp->tx_ring[head].skb=skb;
+
+ np->length = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
+ np->data = isa_virt_to_bus(skb->data);
+ np->status = 0;
+ np->control = CONTROL_EOP | CONTROL_EOL;
+ wmb();
+
+ /*
+ * The new frame has been setup; we can now
+ * let the interrupt handler and card "see" it
+ */
+
+ atomic_set(&lp->tx_ring_head, head);
+ p->control &= ~CONTROL_EOL;
+
+ netif_wake_queue(dev);
+ return NETDEV_TX_OK;
+}
+
+
+/**
+ * mc32_update_stats - pull off the on board statistics
+ * @dev: 3c527 to service
+ *
+ *
+ * Query and reset the on-card stats. There's the small possibility
+ * of a race here, which would result in an underestimation of
+ * actual errors. As such, we'd prefer to keep all our stats
+ * collection in software. As a rule, we do. However it can't be
+ * used for rx errors and collisions as, by default, the card discards
+ * bad rx packets.
+ *
+ * Setting the SAV BP in the rx filter command supposedly
+ * stops this behaviour. However, testing shows that it only seems to
+ * enable the collation of on-card rx statistics --- the driver
+ * never sees an RX descriptor with an error status set.
+ *
+ */
+
+static void mc32_update_stats(struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ volatile struct mc32_stats *st = lp->stats;
+
+ u32 rx_errors=0;
+
+ rx_errors+=dev->stats.rx_crc_errors +=st->rx_crc_errors;
+ st->rx_crc_errors=0;
+ rx_errors+=dev->stats.rx_fifo_errors +=st->rx_overrun_errors;
+ st->rx_overrun_errors=0;
+ rx_errors+=dev->stats.rx_frame_errors +=st->rx_alignment_errors;
+ st->rx_alignment_errors=0;
+ rx_errors+=dev->stats.rx_length_errors+=st->rx_tooshort_errors;
+ st->rx_tooshort_errors=0;
+ rx_errors+=dev->stats.rx_missed_errors+=st->rx_outofresource_errors;
+ st->rx_outofresource_errors=0;
+ dev->stats.rx_errors=rx_errors;
+
+ /* Number of packets which saw one collision */
+ dev->stats.collisions+=st->dataC[10];
+ st->dataC[10]=0;
+
+ /* Number of packets which saw 2--15 collisions */
+ dev->stats.collisions+=st->dataC[11];
+ st->dataC[11]=0;
+}
+
+
+/**
+ * mc32_rx_ring - process the receive ring
+ * @dev: 3c527 that needs its receive ring processing
+ *
+ *
+ * We have received one or more indications from the card that a
+ * receive has completed. The buffer ring thus contains dirty
+ * entries. We walk the ring by iterating over the circular rx_ring
+ * array, starting at the next dirty buffer (which happens to be the
+ * one we finished up at last time around).
+ *
+ * For each completed packet, we will either copy it and pass it up
+ * the stack or, if the packet is near MTU sized, we allocate
+ * another buffer and flip the old one up the stack.
+ *
+ * We must succeed in keeping a buffer on the ring. If necessary we
+ * will toss a received packet rather than lose a ring entry. Once
+ * the first uncompleted descriptor is found, we move the
+ * End-Of-List bit to include the buffers just processed.
+ *
+ */
+
+static void mc32_rx_ring(struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ volatile struct skb_header *p;
+ u16 rx_ring_tail;
+ u16 rx_old_tail;
+ int x=0;
+
+ rx_old_tail = rx_ring_tail = lp->rx_ring_tail;
+
+ do
+ {
+ p=lp->rx_ring[rx_ring_tail].p;
+
+ if(!(p->status & (1<<7))) { /* Not COMPLETED */
+ break;
+ }
+ if(p->status & (1<<6)) /* COMPLETED_OK */
+ {
+
+ u16 length=p->length;
+ struct sk_buff *skb;
+ struct sk_buff *newskb;
+
+ /* Try to save time by avoiding a copy on big frames */
+
+ if ((length > RX_COPYBREAK) &&
+ ((newskb=dev_alloc_skb(1532)) != NULL))
+ {
+ skb=lp->rx_ring[rx_ring_tail].skb;
+ skb_put(skb, length);
+
+ skb_reserve(newskb,18);
+ lp->rx_ring[rx_ring_tail].skb=newskb;
+ p->data=isa_virt_to_bus(newskb->data);
+ }
+ else
+ {
+ skb=dev_alloc_skb(length+2);
+
+ if(skb==NULL) {
+ dev->stats.rx_dropped++;
+ goto dropped;
+ }
+
+ skb_reserve(skb,2);
+ memcpy(skb_put(skb, length),
+ lp->rx_ring[rx_ring_tail].skb->data, length);
+ }
+
+ skb->protocol=eth_type_trans(skb,dev);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += length;
+ netif_rx(skb);
+ }
+
+ dropped:
+ p->length = 1532;
+ p->status = 0;
+
+ rx_ring_tail=next_rx(rx_ring_tail);
+ }
+ while(x++<48);
+
+ /* If there was actually a frame to be processed, place the EOL bit */
+ /* at the descriptor prior to the one to be filled next */
+
+ if (rx_ring_tail != rx_old_tail)
+ {
+ lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL;
+ lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL;
+
+ lp->rx_ring_tail=rx_ring_tail;
+ }
+}
+
+
+/**
+ * mc32_tx_ring - process completed transmits
+ * @dev: 3c527 that needs its transmit ring processing
+ *
+ *
+ * This operates in a similar fashion to mc32_rx_ring. We iterate
+ * over the transmit ring. For each descriptor which has been
+ * processed by the card, we free its associated buffer and note
+ * any errors. This continues until the transmit ring is emptied
+ * or we reach a descriptor that hasn't yet been processed by the
+ * card.
+ *
+ */
+
+static void mc32_tx_ring(struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ volatile struct skb_header *np;
+
+ /*
+ * We rely on head==tail to mean 'queue empty'.
+ * This is why lp->tx_count=TX_RING_LEN-1: in order to prevent
+ * tx_ring_head wrapping to tail and confusing a 'queue empty'
+ * condition with 'queue full'
+ */
+
+ while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head))
+ {
+ u16 t;
+
+ t=next_tx(lp->tx_ring_tail);
+ np=lp->tx_ring[t].p;
+
+ if(!(np->status & (1<<7)))
+ {
+ /* Not COMPLETED */
+ break;
+ }
+ dev->stats.tx_packets++;
+ if(!(np->status & (1<<6))) /* Not COMPLETED_OK */
+ {
+ dev->stats.tx_errors++;
+
+ switch(np->status&0x0F)
+ {
+ case 1:
+ dev->stats.tx_aborted_errors++;
+ break; /* Max collisions */
+ case 2:
+ dev->stats.tx_fifo_errors++;
+ break;
+ case 3:
+ dev->stats.tx_carrier_errors++;
+ break;
+ case 4:
+ dev->stats.tx_window_errors++;
+ break; /* CTS Lost */
+ case 5:
+ dev->stats.tx_aborted_errors++;
+ break; /* Transmit timeout */
+ }
+ }
+ /* Packets are sent in order - this is
+ basically a FIFO queue of buffers matching
+ the card ring */
+ dev->stats.tx_bytes+=lp->tx_ring[t].skb->len;
+ dev_kfree_skb_irq(lp->tx_ring[t].skb);
+ lp->tx_ring[t].skb=NULL;
+ atomic_inc(&lp->tx_count);
+ netif_wake_queue(dev);
+
+ lp->tx_ring_tail=t;
+ }
+
+}
+
+
+/**
+ * mc32_interrupt - handle an interrupt from a 3c527
+ * @irq: Interrupt number
+ * @dev_id: 3c527 that requires servicing
+ * @regs: Registers (unused)
+ *
+ *
+ * An interrupt is raised whenever the 3c527 writes to the command
+ * register. This register contains the message it wishes to send us
+ * packed into a single byte field. We keep reading status entries
+ * until we have processed all the control items, but simply count
+ * transmit and receive reports. When all reports are in we empty the
+ * transceiver rings as appropriate. This saves the overhead of
+ * multiple command requests.
+ *
+ * Because MCA is level-triggered, we shouldn't miss indications.
+ * Therefore, we needn't ask the card to suspend interrupts within
+ * this handler. The card receives an implicit acknowledgment of the
+ * current interrupt when we read the command register.
+ *
+ */
+
+static irqreturn_t mc32_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct mc32_local *lp;
+ int ioaddr, status, boguscount = 0;
+ int rx_event = 0;
+ int tx_event = 0;
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ /* See whats cooking */
+
+ while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000)
+ {
+ status=inb(ioaddr+HOST_CMD);
+
+ pr_debug("Status TX%d RX%d EX%d OV%d BC%d\n",
+ (status&7), (status>>3)&7, (status>>6)&1,
+ (status>>7)&1, boguscount);
+
+ switch(status&7)
+ {
+ case 0:
+ break;
+ case 6: /* TX fail */
+ case 2: /* TX ok */
+ tx_event = 1;
+ break;
+ case 3: /* Halt */
+ case 4: /* Abort */
+ complete(&lp->xceiver_cmd);
+ break;
+ default:
+ pr_notice("%s: strange tx ack %d\n", dev->name, status&7);
+ }
+ status>>=3;
+ switch(status&7)
+ {
+ case 0:
+ break;
+ case 2: /* RX */
+ rx_event=1;
+ break;
+ case 3: /* Halt */
+ case 4: /* Abort */
+ complete(&lp->xceiver_cmd);
+ break;
+ case 6:
+ /* Out of RX buffers stat */
+ /* Must restart rx */
+ dev->stats.rx_dropped++;
+ mc32_rx_ring(dev);
+ mc32_start_transceiver(dev);
+ break;
+ default:
+ pr_notice("%s: strange rx ack %d\n",
+ dev->name, status&7);
+ }
+ status>>=3;
+ if(status&1)
+ {
+ /*
+ * No thread is waiting: we need to tidy
+ * up ourself.
+ */
+
+ if (lp->cmd_nonblocking) {
+ up(&lp->cmd_mutex);
+ if (lp->mc_reload_wait)
+ mc32_reset_multicast_list(dev);
+ }
+ else complete(&lp->execution_cmd);
+ }
+ if(status&2)
+ {
+ /*
+ * We get interrupted once per
+ * counter that is about to overflow.
+ */
+
+ mc32_update_stats(dev);
+ }
+ }
+
+
+ /*
+ * Process the transmit and receive rings
+ */
+
+ if(tx_event)
+ mc32_tx_ring(dev);
+
+ if(rx_event)
+ mc32_rx_ring(dev);
+
+ return IRQ_HANDLED;
+}
+
+
+/**
+ * mc32_close - user configuring the 3c527 down
+ * @dev: 3c527 card to shut down
+ *
+ * The 3c527 is a bus mastering device. We must be careful how we
+ * shut it down. It may also be running shared interrupt so we have
+ * to be sure to silence it properly
+ *
+ * We indicate that the card is closing to the rest of the
+ * driver. Otherwise, it is possible that the card may run out
+ * of receive buffers and restart the transceiver while we're
+ * trying to close it.
+ *
+ * We abort any receive and transmits going on and then wait until
+ * any pending exec commands have completed in other code threads.
+ * In theory we can't get here while that is true, in practice I am
+ * paranoid
+ *
+ * We turn off the interrupt enable for the board to be sure it can't
+ * intefere with other devices.
+ */
+
+static int mc32_close(struct net_device *dev)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ u8 regs;
+ u16 one=1;
+
+ lp->xceiver_desired_state = HALTED;
+ netif_stop_queue(dev);
+
+ /*
+ * Send the indications on command (handy debug check)
+ */
+
+ mc32_command(dev, 4, &one, 2);
+
+ /* Shut down the transceiver */
+
+ mc32_halt_transceiver(dev);
+
+ /* Ensure we issue no more commands beyond this point */
+
+ down(&lp->cmd_mutex);
+
+ /* Ok the card is now stopping */
+
+ regs=inb(ioaddr+HOST_CTRL);
+ regs&=~HOST_CTRL_INTE;
+ outb(regs, ioaddr+HOST_CTRL);
+
+ mc32_flush_rx_ring(dev);
+ mc32_flush_tx_ring(dev);
+
+ mc32_update_stats(dev);
+
+ return 0;
+}
+
+
+/**
+ * mc32_get_stats - hand back stats to network layer
+ * @dev: The 3c527 card to handle
+ *
+ * We've collected all the stats we can in software already. Now
+ * it's time to update those kept on-card and return the lot.
+ *
+ */
+
+static struct net_device_stats *mc32_get_stats(struct net_device *dev)
+{
+ mc32_update_stats(dev);
+ return &dev->stats;
+}
+
+
+/**
+ * do_mc32_set_multicast_list - attempt to update multicasts
+ * @dev: 3c527 device to load the list on
+ * @retry: indicates this is not the first call.
+ *
+ *
+ * Actually set or clear the multicast filter for this adaptor. The
+ * locking issues are handled by this routine. We have to track
+ * state as it may take multiple calls to get the command sequence
+ * completed. We just keep trying to schedule the loads until we
+ * manage to process them all.
+ *
+ * num_addrs == -1 Promiscuous mode, receive all packets
+ *
+ * num_addrs == 0 Normal mode, clear multicast list
+ *
+ * num_addrs > 0 Multicast mode, receive normal and MC packets,
+ * and do best-effort filtering.
+ *
+ * See mc32_update_stats() regards setting the SAV BP bit.
+ *
+ */
+
+static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
+{
+ struct mc32_local *lp = netdev_priv(dev);
+ u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
+
+ if ((dev->flags&IFF_PROMISC) ||
+ (dev->flags&IFF_ALLMULTI) ||
+ netdev_mc_count(dev) > 10)
+ /* Enable promiscuous mode */
+ filt |= 1;
+ else if (!netdev_mc_empty(dev))
+ {
+ unsigned char block[62];
+ unsigned char *bp;
+ struct netdev_hw_addr *ha;
+
+ if(retry==0)
+ lp->mc_list_valid = 0;
+ if(!lp->mc_list_valid)
+ {
+ block[1]=0;
+ block[0]=netdev_mc_count(dev);
+ bp=block+2;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(bp, ha->addr, 6);
+ bp+=6;
+ }
+ if(mc32_command_nowait(dev, 2, block,
+ 2+6*netdev_mc_count(dev))==-1)
+ {
+ lp->mc_reload_wait = 1;
+ return;
+ }
+ lp->mc_list_valid=1;
+ }
+ }
+
+ if(mc32_command_nowait(dev, 0, &filt, 2)==-1)
+ {
+ lp->mc_reload_wait = 1;
+ }
+ else {
+ lp->mc_reload_wait = 0;
+ }
+}
+
+
+/**
+ * mc32_set_multicast_list - queue multicast list update
+ * @dev: The 3c527 to use
+ *
+ * Commence loading the multicast list. This is called when the kernel
+ * changes the lists. It will override any pending list we are trying to
+ * load.
+ */
+
+static void mc32_set_multicast_list(struct net_device *dev)
+{
+ do_mc32_set_multicast_list(dev,0);
+}
+
+
+/**
+ * mc32_reset_multicast_list - reset multicast list
+ * @dev: The 3c527 to use
+ *
+ * Attempt the next step in loading the multicast lists. If this attempt
+ * fails to complete then it will be scheduled and this function called
+ * again later from elsewhere.
+ */
+
+static void mc32_reset_multicast_list(struct net_device *dev)
+{
+ do_mc32_set_multicast_list(dev,1);
+}
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
+}
+
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return mc32_debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 level)
+{
+ mc32_debug = level;
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+};
+
+#ifdef MODULE
+
+static struct net_device *this_device;
+
+/**
+ * init_module - entry point
+ *
+ * Probe and locate a 3c527 card. This really should probe and locate
+ * all the 3c527 cards in the machine not just one of them. Yes you can
+ * insmod multiple modules for now but it's a hack.
+ */
+
+int __init init_module(void)
+{
+ this_device = mc32_probe(-1);
+ if (IS_ERR(this_device))
+ return PTR_ERR(this_device);
+ return 0;
+}
+
+/**
+ * cleanup_module - free resources for an unload
+ *
+ * Unloading time. We release the MCA bus resources and the interrupt
+ * at which point everything is ready to unload. The card must be stopped
+ * at this point or we would not have been called. When we unload we
+ * leave the card stopped but not totally shut down. When the card is
+ * initialized it must be rebooted or the rings reloaded before any
+ * transmit operations are allowed to start scribbling into memory.
+ */
+
+void __exit cleanup_module(void)
+{
+ unregister_netdev(this_device);
+ cleanup_card(this_device);
+ free_netdev(this_device);
+}
+
+#endif /* MODULE */
diff --git a/drivers/net/ethernet/i825xx/3c527.h b/drivers/net/ethernet/i825xx/3c527.h
new file mode 100644
index 000000000000..d693b8d15cde
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/3c527.h
@@ -0,0 +1,81 @@
+/*
+ * 3COM "EtherLink MC/32" Descriptions
+ */
+
+/*
+ * Registers
+ */
+
+#define HOST_CMD 0
+#define HOST_CMD_START_RX (1<<3)
+#define HOST_CMD_SUSPND_RX (3<<3)
+#define HOST_CMD_RESTRT_RX (5<<3)
+
+#define HOST_CMD_SUSPND_TX 3
+#define HOST_CMD_RESTRT_TX 5
+
+
+#define HOST_STATUS 2
+#define HOST_STATUS_CRR (1<<6)
+#define HOST_STATUS_CWR (1<<5)
+
+
+#define HOST_CTRL 6
+#define HOST_CTRL_ATTN (1<<7)
+#define HOST_CTRL_RESET (1<<6)
+#define HOST_CTRL_INTE (1<<2)
+
+#define HOST_RAMPAGE 8
+
+#define HALTED 0
+#define RUNNING 1
+
+struct mc32_mailbox
+{
+ u16 mbox;
+ u16 data[1];
+} __packed;
+
+struct skb_header
+{
+ u8 status;
+ u8 control;
+ u16 next; /* Do not change! */
+ u16 length;
+ u32 data;
+} __packed;
+
+struct mc32_stats
+{
+ /* RX Errors */
+ u32 rx_crc_errors;
+ u32 rx_alignment_errors;
+ u32 rx_overrun_errors;
+ u32 rx_tooshort_errors;
+ u32 rx_toolong_errors;
+ u32 rx_outofresource_errors;
+
+ u32 rx_discarded; /* via card pattern match filter */
+
+ /* TX Errors */
+ u32 tx_max_collisions;
+ u32 tx_carrier_errors;
+ u32 tx_underrun_errors;
+ u32 tx_cts_errors;
+ u32 tx_timeout_errors;
+
+ /* various cruft */
+ u32 dataA[6];
+ u16 dataB[5];
+ u32 dataC[14];
+} __packed;
+
+#define STATUS_MASK 0x0F
+#define COMPLETED (1<<7)
+#define COMPLETED_OK (1<<6)
+#define BUFFER_BUSY (1<<5)
+
+#define CONTROL_EOP (1<<7) /* End Of Packet */
+#define CONTROL_EOL (1<<6) /* End of List */
+
+#define MCA_MC32_ID 0x0041 /* Our MCA ident */
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
new file mode 100644
index 000000000000..f2408a4d5d9c
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -0,0 +1,1632 @@
+/* 82596.c: A generic 82596 ethernet driver for linux. */
+/*
+ Based on Apricot.c
+ Written 1994 by Mark Evans.
+ This driver is for the Apricot 82596 bus-master interface
+
+ Modularised 12/94 Mark Evans
+
+
+ Modified to support the 82596 ethernet chips on 680x0 VME boards.
+ by Richard Hirst <richard@sleepie.demon.co.uk>
+ Renamed to be 82596.c
+
+ 980825: Changed to receive directly in to sk_buffs which are
+ allocated at open() time. Eliminates copy on incoming frames
+ (small ones are still copied). Shared data now held in a
+ non-cached page, so we can run on 68060 in copyback mode.
+
+ TBD:
+ * look at deferring rx frames rather than discarding (as per tulip)
+ * handle tx ring full as per tulip
+ * performance test to tune rx_copybreak
+
+ Most of my modifications relate to the braindead big-endian
+ implementation by Intel. When the i596 is operating in
+ 'big-endian' mode, it thinks a 32 bit value of 0x12345678
+ should be stored as 0x56781234. This is a real pain, when
+ you have linked lists which are shared by the 680x0 and the
+ i596.
+
+ Driver skeleton
+ Written 1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the Director,
+ National Security Agency. This software may only be used and distributed
+ according to the terms of the GNU General Public License as modified by SRC,
+ incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
+
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/gfp.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+
+static char version[] __initdata =
+ "82596.c $Revision: 1.5 $\n";
+
+#define DRV_NAME "82596"
+
+/* DEBUG flags
+ */
+
+#define DEB_INIT 0x0001
+#define DEB_PROBE 0x0002
+#define DEB_SERIOUS 0x0004
+#define DEB_ERRORS 0x0008
+#define DEB_MULTI 0x0010
+#define DEB_TDR 0x0020
+#define DEB_OPEN 0x0040
+#define DEB_RESET 0x0080
+#define DEB_ADDCMD 0x0100
+#define DEB_STATUS 0x0200
+#define DEB_STARTTX 0x0400
+#define DEB_RXADDR 0x0800
+#define DEB_TXADDR 0x1000
+#define DEB_RXFRAME 0x2000
+#define DEB_INTS 0x4000
+#define DEB_STRUCT 0x8000
+#define DEB_ANY 0xffff
+
+
+#define DEB(x,y) if (i596_debug & (x)) y
+
+
+#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_MVME16x_NET_MODULE)
+#define ENABLE_MVME16x_NET
+#endif
+#if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE)
+#define ENABLE_BVME6000_NET
+#endif
+#if defined(CONFIG_APRICOT) || defined(CONFIG_APRICOT_MODULE)
+#define ENABLE_APRICOT
+#endif
+
+#ifdef ENABLE_MVME16x_NET
+#include <asm/mvme16xhw.h>
+#endif
+#ifdef ENABLE_BVME6000_NET
+#include <asm/bvme6000hw.h>
+#endif
+
+/*
+ * Define various macros for Channel Attention, word swapping etc., dependent
+ * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel.
+ */
+
+#ifdef __mc68000__
+#define WSWAPrfd(x) ((struct i596_rfd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
+#define WSWAPrbd(x) ((struct i596_rbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
+#define WSWAPiscp(x) ((struct i596_iscp *)(((u32)(x)<<16) | ((((u32)(x)))>>16)))
+#define WSWAPscb(x) ((struct i596_scb *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
+#define WSWAPcmd(x) ((struct i596_cmd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
+#define WSWAPtbd(x) ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
+#define WSWAPchar(x) ((char *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
+#define ISCP_BUSY 0x00010000
+#define MACH_IS_APRICOT 0
+#else
+#define WSWAPrfd(x) ((struct i596_rfd *)((long)x))
+#define WSWAPrbd(x) ((struct i596_rbd *)((long)x))
+#define WSWAPiscp(x) ((struct i596_iscp *)((long)x))
+#define WSWAPscb(x) ((struct i596_scb *)((long)x))
+#define WSWAPcmd(x) ((struct i596_cmd *)((long)x))
+#define WSWAPtbd(x) ((struct i596_tbd *)((long)x))
+#define WSWAPchar(x) ((char *)((long)x))
+#define ISCP_BUSY 0x0001
+#define MACH_IS_APRICOT 1
+#endif
+
+/*
+ * The MPU_PORT command allows direct access to the 82596. With PORT access
+ * the following commands are available (p5-18). The 32-bit port command
+ * must be word-swapped with the most significant word written first.
+ * This only applies to VME boards.
+ */
+#define PORT_RESET 0x00 /* reset 82596 */
+#define PORT_SELFTEST 0x01 /* selftest */
+#define PORT_ALTSCP 0x02 /* alternate SCB address */
+#define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
+
+static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
+
+MODULE_AUTHOR("Richard Hirst");
+MODULE_DESCRIPTION("i82596 driver");
+MODULE_LICENSE("GPL");
+
+module_param(i596_debug, int, 0);
+MODULE_PARM_DESC(i596_debug, "i82596 debug mask");
+
+
+/* Copy frames shorter than rx_copybreak, otherwise pass on up in
+ * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
+ */
+static int rx_copybreak = 100;
+
+#define PKT_BUF_SZ 1536
+#define MAX_MC_CNT 64
+
+#define I596_TOTAL_SIZE 17
+
+#define I596_NULL ((void *)0xffffffff)
+
+#define CMD_EOL 0x8000 /* The last command of the list, stop. */
+#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
+#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
+
+#define CMD_FLEX 0x0008 /* Enable flexible memory model */
+
+enum commands {
+ CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
+ CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
+};
+
+#define STAT_C 0x8000 /* Set to 0 after execution */
+#define STAT_B 0x4000 /* Command being executed */
+#define STAT_OK 0x2000 /* Command executed ok */
+#define STAT_A 0x1000 /* Command aborted */
+
+#define CUC_START 0x0100
+#define CUC_RESUME 0x0200
+#define CUC_SUSPEND 0x0300
+#define CUC_ABORT 0x0400
+#define RX_START 0x0010
+#define RX_RESUME 0x0020
+#define RX_SUSPEND 0x0030
+#define RX_ABORT 0x0040
+
+#define TX_TIMEOUT (HZ/20)
+
+
+struct i596_reg {
+ unsigned short porthi;
+ unsigned short portlo;
+ unsigned long ca;
+};
+
+#define EOF 0x8000
+#define SIZE_MASK 0x3fff
+
+struct i596_tbd {
+ unsigned short size;
+ unsigned short pad;
+ struct i596_tbd *next;
+ char *data;
+};
+
+/* The command structure has two 'next' pointers; v_next is the address of
+ * the next command as seen by the CPU, b_next is the address of the next
+ * command as seen by the 82596. The b_next pointer, as used by the 82596
+ * always references the status field of the next command, rather than the
+ * v_next field, because the 82596 is unaware of v_next. It may seem more
+ * logical to put v_next at the end of the structure, but we cannot do that
+ * because the 82596 expects other fields to be there, depending on command
+ * type.
+ */
+
+struct i596_cmd {
+ struct i596_cmd *v_next; /* Address from CPUs viewpoint */
+ unsigned short status;
+ unsigned short command;
+ struct i596_cmd *b_next; /* Address from i596 viewpoint */
+};
+
+struct tx_cmd {
+ struct i596_cmd cmd;
+ struct i596_tbd *tbd;
+ unsigned short size;
+ unsigned short pad;
+ struct sk_buff *skb; /* So we can free it after tx */
+};
+
+struct tdr_cmd {
+ struct i596_cmd cmd;
+ unsigned short status;
+ unsigned short pad;
+};
+
+struct mc_cmd {
+ struct i596_cmd cmd;
+ short mc_cnt;
+ char mc_addrs[MAX_MC_CNT*6];
+};
+
+struct sa_cmd {
+ struct i596_cmd cmd;
+ char eth_addr[8];
+};
+
+struct cf_cmd {
+ struct i596_cmd cmd;
+ char i596_config[16];
+};
+
+struct i596_rfd {
+ unsigned short stat;
+ unsigned short cmd;
+ struct i596_rfd *b_next; /* Address from i596 viewpoint */
+ struct i596_rbd *rbd;
+ unsigned short count;
+ unsigned short size;
+ struct i596_rfd *v_next; /* Address from CPUs viewpoint */
+ struct i596_rfd *v_prev;
+};
+
+struct i596_rbd {
+ unsigned short count;
+ unsigned short zero1;
+ struct i596_rbd *b_next;
+ unsigned char *b_data; /* Address from i596 viewpoint */
+ unsigned short size;
+ unsigned short zero2;
+ struct sk_buff *skb;
+ struct i596_rbd *v_next;
+ struct i596_rbd *b_addr; /* This rbd addr from i596 view */
+ unsigned char *v_data; /* Address from CPUs viewpoint */
+};
+
+#define TX_RING_SIZE 64
+#define RX_RING_SIZE 16
+
+struct i596_scb {
+ unsigned short status;
+ unsigned short command;
+ struct i596_cmd *cmd;
+ struct i596_rfd *rfd;
+ unsigned long crc_err;
+ unsigned long align_err;
+ unsigned long resource_err;
+ unsigned long over_err;
+ unsigned long rcvdt_err;
+ unsigned long short_err;
+ unsigned short t_on;
+ unsigned short t_off;
+};
+
+struct i596_iscp {
+ unsigned long stat;
+ struct i596_scb *scb;
+};
+
+struct i596_scp {
+ unsigned long sysbus;
+ unsigned long pad;
+ struct i596_iscp *iscp;
+};
+
+struct i596_private {
+ volatile struct i596_scp scp;
+ volatile struct i596_iscp iscp;
+ volatile struct i596_scb scb;
+ struct sa_cmd sa_cmd;
+ struct cf_cmd cf_cmd;
+ struct tdr_cmd tdr_cmd;
+ struct mc_cmd mc_cmd;
+ unsigned long stat;
+ int last_restart __attribute__((aligned(4)));
+ struct i596_rfd *rfd_head;
+ struct i596_rbd *rbd_head;
+ struct i596_cmd *cmd_tail;
+ struct i596_cmd *cmd_head;
+ int cmd_backlog;
+ unsigned long last_cmd;
+ struct i596_rfd rfds[RX_RING_SIZE];
+ struct i596_rbd rbds[RX_RING_SIZE];
+ struct tx_cmd tx_cmds[TX_RING_SIZE];
+ struct i596_tbd tbds[TX_RING_SIZE];
+ int next_tx_cmd;
+ spinlock_t lock;
+};
+
+static char init_setup[] =
+{
+ 0x8E, /* length, prefetch on */
+ 0xC8, /* fifo to 8, monitor off */
+#ifdef CONFIG_VME
+ 0xc0, /* don't save bad frames */
+#else
+ 0x80, /* don't save bad frames */
+#endif
+ 0x2E, /* No source address insertion, 8 byte preamble */
+ 0x00, /* priority and backoff defaults */
+ 0x60, /* interframe spacing */
+ 0x00, /* slot time LSB */
+ 0xf2, /* slot time and retries */
+ 0x00, /* promiscuous mode */
+ 0x00, /* collision detect */
+ 0x40, /* minimum frame length */
+ 0xff,
+ 0x00,
+ 0x7f /* *multi IA */ };
+
+static int i596_open(struct net_device *dev);
+static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t i596_interrupt(int irq, void *dev_id);
+static int i596_close(struct net_device *dev);
+static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
+static void i596_tx_timeout (struct net_device *dev);
+static void print_eth(unsigned char *buf, char *str);
+static void set_multicast_list(struct net_device *dev);
+
+static int rx_ring_size = RX_RING_SIZE;
+static int ticks_limit = 25;
+static int max_cmd_backlog = TX_RING_SIZE-1;
+
+
+static inline void CA(struct net_device *dev)
+{
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ ((struct i596_reg *) dev->base_addr)->ca = 1;
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ volatile u32 i;
+
+ i = *(volatile u32 *) (dev->base_addr);
+ }
+#endif
+#ifdef ENABLE_APRICOT
+ if (MACH_IS_APRICOT) {
+ outw(0, (short) (dev->base_addr) + 4);
+ }
+#endif
+}
+
+
+static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x)
+{
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ struct i596_reg *p = (struct i596_reg *) (dev->base_addr);
+ p->porthi = ((c) | (u32) (x)) & 0xffff;
+ p->portlo = ((c) | (u32) (x)) >> 16;
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ u32 v = (u32) (c) | (u32) (x);
+ v = ((u32) (v) << 16) | ((u32) (v) >> 16);
+ *(volatile u32 *) dev->base_addr = v;
+ udelay(1);
+ *(volatile u32 *) dev->base_addr = v;
+ }
+#endif
+}
+
+
+static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
+{
+ while (--delcnt && lp->iscp.stat)
+ udelay(10);
+ if (!delcnt) {
+ printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
+ dev->name, str, lp->scb.status, lp->scb.command);
+ return -1;
+ }
+ else
+ return 0;
+}
+
+
+static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
+{
+ while (--delcnt && lp->scb.command)
+ udelay(10);
+ if (!delcnt) {
+ printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
+ dev->name, str, lp->scb.status, lp->scb.command);
+ return -1;
+ }
+ else
+ return 0;
+}
+
+
+static inline int wait_cfg(struct net_device *dev, struct i596_cmd *cmd, int delcnt, char *str)
+{
+ volatile struct i596_cmd *c = cmd;
+
+ while (--delcnt && c->command)
+ udelay(10);
+ if (!delcnt) {
+ printk(KERN_ERR "%s: %s.\n", dev->name, str);
+ return -1;
+ }
+ else
+ return 0;
+}
+
+
+static void i596_display_data(struct net_device *dev)
+{
+ struct i596_private *lp = dev->ml_priv;
+ struct i596_cmd *cmd;
+ struct i596_rfd *rfd;
+ struct i596_rbd *rbd;
+
+ printk(KERN_ERR "lp and scp at %p, .sysbus = %08lx, .iscp = %p\n",
+ &lp->scp, lp->scp.sysbus, lp->scp.iscp);
+ printk(KERN_ERR "iscp at %p, iscp.stat = %08lx, .scb = %p\n",
+ &lp->iscp, lp->iscp.stat, lp->iscp.scb);
+ printk(KERN_ERR "scb at %p, scb.status = %04x, .command = %04x,"
+ " .cmd = %p, .rfd = %p\n",
+ &lp->scb, lp->scb.status, lp->scb.command,
+ lp->scb.cmd, lp->scb.rfd);
+ printk(KERN_ERR " errors: crc %lx, align %lx, resource %lx,"
+ " over %lx, rcvdt %lx, short %lx\n",
+ lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
+ lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
+ cmd = lp->cmd_head;
+ while (cmd != I596_NULL) {
+ printk(KERN_ERR "cmd at %p, .status = %04x, .command = %04x, .b_next = %p\n",
+ cmd, cmd->status, cmd->command, cmd->b_next);
+ cmd = cmd->v_next;
+ }
+ rfd = lp->rfd_head;
+ printk(KERN_ERR "rfd_head = %p\n", rfd);
+ do {
+ printk(KERN_ERR " %p .stat %04x, .cmd %04x, b_next %p, rbd %p,"
+ " count %04x\n",
+ rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
+ rfd->count);
+ rfd = rfd->v_next;
+ } while (rfd != lp->rfd_head);
+ rbd = lp->rbd_head;
+ printk(KERN_ERR "rbd_head = %p\n", rbd);
+ do {
+ printk(KERN_ERR " %p .count %04x, b_next %p, b_data %p, size %04x\n",
+ rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
+ rbd = rbd->v_next;
+ } while (rbd != lp->rbd_head);
+}
+
+
+#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
+static irqreturn_t i596_error(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
+
+ pcc2[0x28] = 1;
+ pcc2[0x2b] = 0x1d;
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
+
+ *ethirq = 1;
+ *ethirq = 3;
+ }
+#endif
+ printk(KERN_ERR "%s: Error interrupt\n", dev->name);
+ i596_display_data(dev);
+ return IRQ_HANDLED;
+}
+#endif
+
+static inline void remove_rx_bufs(struct net_device *dev)
+{
+ struct i596_private *lp = dev->ml_priv;
+ struct i596_rbd *rbd;
+ int i;
+
+ for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
+ if (rbd->skb == NULL)
+ break;
+ dev_kfree_skb(rbd->skb);
+ rbd->skb = NULL;
+ }
+}
+
+static inline int init_rx_bufs(struct net_device *dev)
+{
+ struct i596_private *lp = dev->ml_priv;
+ int i;
+ struct i596_rfd *rfd;
+ struct i596_rbd *rbd;
+
+ /* First build the Receive Buffer Descriptor List */
+
+ for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
+ struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
+
+ if (skb == NULL) {
+ remove_rx_bufs(dev);
+ return -ENOMEM;
+ }
+
+ skb->dev = dev;
+ rbd->v_next = rbd+1;
+ rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1));
+ rbd->b_addr = WSWAPrbd(virt_to_bus(rbd));
+ rbd->skb = skb;
+ rbd->v_data = skb->data;
+ rbd->b_data = WSWAPchar(virt_to_bus(skb->data));
+ rbd->size = PKT_BUF_SZ;
+#ifdef __mc68000__
+ cache_clear(virt_to_phys(skb->data), PKT_BUF_SZ);
+#endif
+ }
+ lp->rbd_head = lp->rbds;
+ rbd = lp->rbds + rx_ring_size - 1;
+ rbd->v_next = lp->rbds;
+ rbd->b_next = WSWAPrbd(virt_to_bus(lp->rbds));
+
+ /* Now build the Receive Frame Descriptor List */
+
+ for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
+ rfd->rbd = I596_NULL;
+ rfd->v_next = rfd+1;
+ rfd->v_prev = rfd-1;
+ rfd->b_next = WSWAPrfd(virt_to_bus(rfd+1));
+ rfd->cmd = CMD_FLEX;
+ }
+ lp->rfd_head = lp->rfds;
+ lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
+ rfd = lp->rfds;
+ rfd->rbd = lp->rbd_head;
+ rfd->v_prev = lp->rfds + rx_ring_size - 1;
+ rfd = lp->rfds + rx_ring_size - 1;
+ rfd->v_next = lp->rfds;
+ rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds));
+ rfd->cmd = CMD_EOL|CMD_FLEX;
+
+ return 0;
+}
+
+
+static void rebuild_rx_bufs(struct net_device *dev)
+{
+ struct i596_private *lp = dev->ml_priv;
+ int i;
+
+ /* Ensure rx frame/buffer descriptors are tidy */
+
+ for (i = 0; i < rx_ring_size; i++) {
+ lp->rfds[i].rbd = I596_NULL;
+ lp->rfds[i].cmd = CMD_FLEX;
+ }
+ lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
+ lp->rfd_head = lp->rfds;
+ lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
+ lp->rbd_head = lp->rbds;
+ lp->rfds[0].rbd = WSWAPrbd(virt_to_bus(lp->rbds));
+}
+
+
+static int init_i596_mem(struct net_device *dev)
+{
+ struct i596_private *lp = dev->ml_priv;
+#if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET) || defined(ENABLE_APRICOT)
+ short ioaddr = dev->base_addr;
+#endif
+ unsigned long flags;
+
+ MPU_PORT(dev, PORT_RESET, NULL);
+
+ udelay(100); /* Wait 100us - seems to help */
+
+#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
+
+ /* Disable all ints for now */
+ pcc2[0x28] = 1;
+ pcc2[0x2a] = 0x48;
+ /* Following disables snooping. Snooping is not required
+ * as we make appropriate use of non-cached pages for
+ * shared data, and cache_push/cache_clear.
+ */
+ pcc2[0x2b] = 0x08;
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
+
+ *ethirq = 1;
+ }
+#endif
+
+ /* change the scp address */
+
+ MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp));
+
+#elif defined(ENABLE_APRICOT)
+
+ {
+ u32 scp = virt_to_bus(&lp->scp);
+
+ /* change the scp address */
+ outw(0, ioaddr);
+ outw(0, ioaddr);
+ outb(4, ioaddr + 0xf);
+ outw(scp | 2, ioaddr);
+ outw(scp >> 16, ioaddr);
+ }
+#endif
+
+ lp->last_cmd = jiffies;
+
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x)
+ lp->scp.sysbus = 0x00000054;
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000)
+ lp->scp.sysbus = 0x0000004c;
+#endif
+#ifdef ENABLE_APRICOT
+ if (MACH_IS_APRICOT)
+ lp->scp.sysbus = 0x00440000;
+#endif
+
+ lp->scp.iscp = WSWAPiscp(virt_to_bus((void *)&lp->iscp));
+ lp->iscp.scb = WSWAPscb(virt_to_bus((void *)&lp->scb));
+ lp->iscp.stat = ISCP_BUSY;
+ lp->cmd_backlog = 0;
+
+ lp->cmd_head = lp->scb.cmd = I596_NULL;
+
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ lp->scb.t_on = 7 * 25;
+ lp->scb.t_off = 1 * 25;
+ }
+#endif
+
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
+
+#if defined(ENABLE_APRICOT)
+ (void) inb(ioaddr + 0x10);
+ outb(4, ioaddr + 0xf);
+#endif
+ CA(dev);
+
+ if (wait_istat(dev,lp,1000,"initialization timed out"))
+ goto failed;
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: i82596 initialization successful\n", dev->name));
+
+ /* Ensure rx frame/buffer descriptors are tidy */
+ rebuild_rx_bufs(dev);
+ lp->scb.command = 0;
+
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
+
+ /* Enable ints, etc. now */
+ pcc2[0x2a] = 0x55; /* Edge sensitive */
+ pcc2[0x2b] = 0x15;
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
+
+ *ethirq = 3;
+ }
+#endif
+
+
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdConfigure\n", dev->name));
+ memcpy(lp->cf_cmd.i596_config, init_setup, 14);
+ lp->cf_cmd.cmd.command = CmdConfigure;
+ i596_add_cmd(dev, &lp->cf_cmd.cmd);
+
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
+ memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
+ lp->sa_cmd.cmd.command = CmdSASetup;
+ i596_add_cmd(dev, &lp->sa_cmd.cmd);
+
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
+ lp->tdr_cmd.cmd.command = CmdTDR;
+ i596_add_cmd(dev, &lp->tdr_cmd.cmd);
+
+ spin_lock_irqsave (&lp->lock, flags);
+
+ if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) {
+ spin_unlock_irqrestore (&lp->lock, flags);
+ goto failed;
+ }
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
+ lp->scb.command = RX_START;
+ CA(dev);
+
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ if (wait_cmd(dev,lp,1000,"RX_START not processed"))
+ goto failed;
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: Receive unit started OK\n", dev->name));
+ return 0;
+
+failed:
+ printk(KERN_CRIT "%s: Failed to initialise 82596\n", dev->name);
+ MPU_PORT(dev, PORT_RESET, NULL);
+ return -1;
+}
+
+static inline int i596_rx(struct net_device *dev)
+{
+ struct i596_private *lp = dev->ml_priv;
+ struct i596_rfd *rfd;
+ struct i596_rbd *rbd;
+ int frames = 0;
+
+ DEB(DEB_RXFRAME,printk(KERN_DEBUG "i596_rx(), rfd_head %p, rbd_head %p\n",
+ lp->rfd_head, lp->rbd_head));
+
+ rfd = lp->rfd_head; /* Ref next frame to check */
+
+ while ((rfd->stat) & STAT_C) { /* Loop while complete frames */
+ if (rfd->rbd == I596_NULL)
+ rbd = I596_NULL;
+ else if (rfd->rbd == lp->rbd_head->b_addr)
+ rbd = lp->rbd_head;
+ else {
+ printk(KERN_CRIT "%s: rbd chain broken!\n", dev->name);
+ /* XXX Now what? */
+ rbd = I596_NULL;
+ }
+ DEB(DEB_RXFRAME, printk(KERN_DEBUG " rfd %p, rfd.rbd %p, rfd.stat %04x\n",
+ rfd, rfd->rbd, rfd->stat));
+
+ if (rbd != I596_NULL && ((rfd->stat) & STAT_OK)) {
+ /* a good frame */
+ int pkt_len = rbd->count & 0x3fff;
+ struct sk_buff *skb = rbd->skb;
+ int rx_in_place = 0;
+
+ DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
+ frames++;
+
+ /* Check if the packet is long enough to just accept
+ * without copying to a properly sized skbuff.
+ */
+
+ if (pkt_len > rx_copybreak) {
+ struct sk_buff *newskb;
+
+ /* Get fresh skbuff to replace filled one. */
+ newskb = dev_alloc_skb(PKT_BUF_SZ);
+ if (newskb == NULL) {
+ skb = NULL; /* drop pkt */
+ goto memory_squeeze;
+ }
+ /* Pass up the skb already on the Rx ring. */
+ skb_put(skb, pkt_len);
+ rx_in_place = 1;
+ rbd->skb = newskb;
+ newskb->dev = dev;
+ rbd->v_data = newskb->data;
+ rbd->b_data = WSWAPchar(virt_to_bus(newskb->data));
+#ifdef __mc68000__
+ cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ);
+#endif
+ }
+ else
+ skb = dev_alloc_skb(pkt_len + 2);
+memory_squeeze:
+ if (skb == NULL) {
+ /* XXX tulip.c can defer packets here!! */
+ printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
+ dev->stats.rx_dropped++;
+ }
+ else {
+ if (!rx_in_place) {
+ /* 16 byte align the data fields */
+ skb_reserve(skb, 2);
+ memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
+ }
+ skb->protocol=eth_type_trans(skb,dev);
+ skb->len = pkt_len;
+#ifdef __mc68000__
+ cache_clear(virt_to_phys(rbd->skb->data),
+ pkt_len);
+#endif
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes+=pkt_len;
+ }
+ }
+ else {
+ DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n",
+ dev->name, rfd->stat));
+ dev->stats.rx_errors++;
+ if ((rfd->stat) & 0x0001)
+ dev->stats.collisions++;
+ if ((rfd->stat) & 0x0080)
+ dev->stats.rx_length_errors++;
+ if ((rfd->stat) & 0x0100)
+ dev->stats.rx_over_errors++;
+ if ((rfd->stat) & 0x0200)
+ dev->stats.rx_fifo_errors++;
+ if ((rfd->stat) & 0x0400)
+ dev->stats.rx_frame_errors++;
+ if ((rfd->stat) & 0x0800)
+ dev->stats.rx_crc_errors++;
+ if ((rfd->stat) & 0x1000)
+ dev->stats.rx_length_errors++;
+ }
+
+ /* Clear the buffer descriptor count and EOF + F flags */
+
+ if (rbd != I596_NULL && (rbd->count & 0x4000)) {
+ rbd->count = 0;
+ lp->rbd_head = rbd->v_next;
+ }
+
+ /* Tidy the frame descriptor, marking it as end of list */
+
+ rfd->rbd = I596_NULL;
+ rfd->stat = 0;
+ rfd->cmd = CMD_EOL|CMD_FLEX;
+ rfd->count = 0;
+
+ /* Remove end-of-list from old end descriptor */
+
+ rfd->v_prev->cmd = CMD_FLEX;
+
+ /* Update record of next frame descriptor to process */
+
+ lp->scb.rfd = rfd->b_next;
+ lp->rfd_head = rfd->v_next;
+ rfd = lp->rfd_head;
+ }
+
+ DEB(DEB_RXFRAME,printk(KERN_DEBUG "frames %d\n", frames));
+
+ return 0;
+}
+
+
+static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
+{
+ struct i596_cmd *ptr;
+
+ while (lp->cmd_head != I596_NULL) {
+ ptr = lp->cmd_head;
+ lp->cmd_head = ptr->v_next;
+ lp->cmd_backlog--;
+
+ switch ((ptr->command) & 0x7) {
+ case CmdTx:
+ {
+ struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
+ struct sk_buff *skb = tx_cmd->skb;
+
+ dev_kfree_skb(skb);
+
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
+
+ ptr->v_next = ptr->b_next = I596_NULL;
+ tx_cmd->cmd.command = 0; /* Mark as free */
+ break;
+ }
+ default:
+ ptr->v_next = ptr->b_next = I596_NULL;
+ }
+ }
+
+ wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out");
+ lp->scb.cmd = I596_NULL;
+}
+
+static void i596_reset(struct net_device *dev, struct i596_private *lp,
+ int ioaddr)
+{
+ unsigned long flags;
+
+ DEB(DEB_RESET,printk(KERN_DEBUG "i596_reset\n"));
+
+ spin_lock_irqsave (&lp->lock, flags);
+
+ wait_cmd(dev,lp,100,"i596_reset timed out");
+
+ netif_stop_queue(dev);
+
+ lp->scb.command = CUC_ABORT | RX_ABORT;
+ CA(dev);
+
+ /* wait for shutdown */
+ wait_cmd(dev,lp,1000,"i596_reset 2 timed out");
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ i596_cleanup_cmd(dev,lp);
+ i596_rx(dev);
+
+ netif_start_queue(dev);
+ init_i596_mem(dev);
+}
+
+static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
+{
+ struct i596_private *lp = dev->ml_priv;
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ DEB(DEB_ADDCMD,printk(KERN_DEBUG "i596_add_cmd\n"));
+
+ cmd->status = 0;
+ cmd->command |= (CMD_EOL | CMD_INTR);
+ cmd->v_next = cmd->b_next = I596_NULL;
+
+ spin_lock_irqsave (&lp->lock, flags);
+
+ if (lp->cmd_head != I596_NULL) {
+ lp->cmd_tail->v_next = cmd;
+ lp->cmd_tail->b_next = WSWAPcmd(virt_to_bus(&cmd->status));
+ } else {
+ lp->cmd_head = cmd;
+ wait_cmd(dev,lp,100,"i596_add_cmd timed out");
+ lp->scb.cmd = WSWAPcmd(virt_to_bus(&cmd->status));
+ lp->scb.command = CUC_START;
+ CA(dev);
+ }
+ lp->cmd_tail = cmd;
+ lp->cmd_backlog++;
+
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ if (lp->cmd_backlog > max_cmd_backlog) {
+ unsigned long tickssofar = jiffies - lp->last_cmd;
+
+ if (tickssofar < ticks_limit)
+ return;
+
+ printk(KERN_NOTICE "%s: command unit timed out, status resetting.\n", dev->name);
+
+ i596_reset(dev, lp, ioaddr);
+ }
+}
+
+static int i596_open(struct net_device *dev)
+{
+ int res = 0;
+
+ DEB(DEB_OPEN,printk(KERN_DEBUG "%s: i596_open() irq %d.\n", dev->name, dev->irq));
+
+ if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
+ printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ if (request_irq(0x56, i596_error, 0, "i82596_error", dev)) {
+ res = -EAGAIN;
+ goto err_irq_dev;
+ }
+ }
+#endif
+ res = init_rx_bufs(dev);
+ if (res)
+ goto err_irq_56;
+
+ netif_start_queue(dev);
+
+ if (init_i596_mem(dev)) {
+ res = -EAGAIN;
+ goto err_queue;
+ }
+
+ return 0;
+
+err_queue:
+ netif_stop_queue(dev);
+ remove_rx_bufs(dev);
+err_irq_56:
+#ifdef ENABLE_MVME16x_NET
+ free_irq(0x56, dev);
+err_irq_dev:
+#endif
+ free_irq(dev->irq, dev);
+
+ return res;
+}
+
+static void i596_tx_timeout (struct net_device *dev)
+{
+ struct i596_private *lp = dev->ml_priv;
+ int ioaddr = dev->base_addr;
+
+ /* Transmitter timeout, serious problems. */
+ DEB(DEB_ERRORS,printk(KERN_ERR "%s: transmit timed out, status resetting.\n",
+ dev->name));
+
+ dev->stats.tx_errors++;
+
+ /* Try to restart the adaptor */
+ if (lp->last_restart == dev->stats.tx_packets) {
+ DEB(DEB_ERRORS,printk(KERN_ERR "Resetting board.\n"));
+ /* Shutdown and restart */
+ i596_reset (dev, lp, ioaddr);
+ } else {
+ /* Issue a channel attention signal */
+ DEB(DEB_ERRORS,printk(KERN_ERR "Kicking board.\n"));
+ lp->scb.command = CUC_START | RX_START;
+ CA (dev);
+ lp->last_restart = dev->stats.tx_packets;
+ }
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue (dev);
+}
+
+static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct i596_private *lp = dev->ml_priv;
+ struct tx_cmd *tx_cmd;
+ struct i596_tbd *tbd;
+ short length = skb->len;
+
+ DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%p) called\n",
+ dev->name, skb->len, skb->data));
+
+ if (skb->len < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ length = ETH_ZLEN;
+ }
+ netif_stop_queue(dev);
+
+ tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
+ tbd = lp->tbds + lp->next_tx_cmd;
+
+ if (tx_cmd->cmd.command) {
+ printk(KERN_NOTICE "%s: xmit ring full, dropping packet.\n",
+ dev->name);
+ dev->stats.tx_dropped++;
+
+ dev_kfree_skb(skb);
+ } else {
+ if (++lp->next_tx_cmd == TX_RING_SIZE)
+ lp->next_tx_cmd = 0;
+ tx_cmd->tbd = WSWAPtbd(virt_to_bus(tbd));
+ tbd->next = I596_NULL;
+
+ tx_cmd->cmd.command = CMD_FLEX | CmdTx;
+ tx_cmd->skb = skb;
+
+ tx_cmd->pad = 0;
+ tx_cmd->size = 0;
+ tbd->pad = 0;
+ tbd->size = EOF | length;
+
+ tbd->data = WSWAPchar(virt_to_bus(skb->data));
+
+#ifdef __mc68000__
+ cache_push(virt_to_phys(skb->data), length);
+#endif
+ DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
+ i596_add_cmd(dev, &tx_cmd->cmd);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += length;
+ }
+
+ netif_start_queue(dev);
+
+ return NETDEV_TX_OK;
+}
+
+static void print_eth(unsigned char *add, char *str)
+{
+ printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
+ add, add + 6, add, add[12], add[13], str);
+}
+
+static int io = 0x300;
+static int irq = 10;
+
+static const struct net_device_ops i596_netdev_ops = {
+ .ndo_open = i596_open,
+ .ndo_stop = i596_close,
+ .ndo_start_xmit = i596_start_xmit,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_tx_timeout = i596_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+struct net_device * __init i82596_probe(int unit)
+{
+ struct net_device *dev;
+ int i;
+ struct i596_private *lp;
+ char eth_addr[8];
+ static int probed;
+ int err;
+
+ if (probed)
+ return ERR_PTR(-ENODEV);
+ probed++;
+
+ dev = alloc_etherdev(0);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ } else {
+ dev->base_addr = io;
+ dev->irq = irq;
+ }
+
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ if (mvme16x_config & MVME16x_CONFIG_NO_ETHERNET) {
+ printk(KERN_NOTICE "Ethernet probe disabled - chip not present\n");
+ err = -ENODEV;
+ goto out;
+ }
+ memcpy(eth_addr, (void *) 0xfffc1f2c, 6); /* YUCK! Get addr from NOVRAM */
+ dev->base_addr = MVME_I596_BASE;
+ dev->irq = (unsigned) MVME16x_IRQ_I596;
+ goto found;
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ volatile unsigned char *rtc = (unsigned char *) BVME_RTC_BASE;
+ unsigned char msr = rtc[3];
+ int i;
+
+ rtc[3] |= 0x80;
+ for (i = 0; i < 6; i++)
+ eth_addr[i] = rtc[i * 4 + 7]; /* Stored in RTC RAM at offset 1 */
+ rtc[3] = msr;
+ dev->base_addr = BVME_I596_BASE;
+ dev->irq = (unsigned) BVME_IRQ_I596;
+ goto found;
+ }
+#endif
+#ifdef ENABLE_APRICOT
+ {
+ int checksum = 0;
+ int ioaddr = 0x300;
+
+ /* this is easy the ethernet interface can only be at 0x300 */
+ /* first check nothing is already registered here */
+
+ if (!request_region(ioaddr, I596_TOTAL_SIZE, DRV_NAME)) {
+ printk(KERN_ERR "82596: IO address 0x%04x in use\n", ioaddr);
+ err = -EBUSY;
+ goto out;
+ }
+
+ dev->base_addr = ioaddr;
+
+ for (i = 0; i < 8; i++) {
+ eth_addr[i] = inb(ioaddr + 8 + i);
+ checksum += eth_addr[i];
+ }
+
+ /* checksum is a multiple of 0x100, got this wrong first time
+ some machines have 0x100, some 0x200. The DOS driver doesn't
+ even bother with the checksum.
+ Some other boards trip the checksum.. but then appear as
+ ether address 0. Trap these - AC */
+
+ if ((checksum % 0x100) ||
+ (memcmp(eth_addr, "\x00\x00\x49", 3) != 0)) {
+ err = -ENODEV;
+ goto out1;
+ }
+
+ dev->irq = 10;
+ goto found;
+ }
+#endif
+ err = -ENODEV;
+ goto out;
+
+found:
+ dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0);
+ if (!dev->mem_start) {
+ err = -ENOMEM;
+ goto out1;
+ }
+
+ DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
+
+ for (i = 0; i < 6; i++)
+ DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
+
+ DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
+
+ DEB(DEB_PROBE,printk(KERN_INFO "%s", version));
+
+ /* The 82596-specific entries in the device structure. */
+ dev->netdev_ops = &i596_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ dev->ml_priv = (void *)(dev->mem_start);
+
+ lp = dev->ml_priv;
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: lp at 0x%08lx (%zd bytes), "
+ "lp->scb at 0x%08lx\n",
+ dev->name, (unsigned long)lp,
+ sizeof(struct i596_private), (unsigned long)&lp->scb));
+ memset((void *) lp, 0, sizeof(struct i596_private));
+
+#ifdef __mc68000__
+ cache_push(virt_to_phys((void *)(dev->mem_start)), 4096);
+ cache_clear(virt_to_phys((void *)(dev->mem_start)), 4096);
+ kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER);
+#endif
+ lp->scb.command = 0;
+ lp->scb.cmd = I596_NULL;
+ lp->scb.rfd = I596_NULL;
+ spin_lock_init(&lp->lock);
+
+ err = register_netdev(dev);
+ if (err)
+ goto out2;
+ return dev;
+out2:
+#ifdef __mc68000__
+ /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING,
+ * XXX which may be invalid (CONFIG_060_WRITETHROUGH)
+ */
+ kernel_set_cachemode((void *)(dev->mem_start), 4096,
+ IOMAP_FULL_CACHING);
+#endif
+ free_page ((u32)(dev->mem_start));
+out1:
+#ifdef ENABLE_APRICOT
+ release_region(dev->base_addr, I596_TOTAL_SIZE);
+#endif
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static irqreturn_t i596_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct i596_private *lp;
+ short ioaddr;
+ unsigned short status, ack_cmd = 0;
+ int handled = 0;
+
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ if (*(char *) BVME_LOCAL_IRQ_STAT & BVME_ETHERR) {
+ i596_error(irq, dev_id);
+ return IRQ_HANDLED;
+ }
+ }
+#endif
+ if (dev == NULL) {
+ printk(KERN_ERR "i596_interrupt(): irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = dev->ml_priv;
+
+ spin_lock (&lp->lock);
+
+ wait_cmd(dev,lp,100,"i596 interrupt, timeout");
+ status = lp->scb.status;
+
+ DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
+ dev->name, irq, status));
+
+ ack_cmd = status & 0xf000;
+
+ if ((status & 0x8000) || (status & 0x2000)) {
+ struct i596_cmd *ptr;
+
+ handled = 1;
+ if ((status & 0x8000))
+ DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt completed command.\n", dev->name));
+ if ((status & 0x2000))
+ DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
+
+ while ((lp->cmd_head != I596_NULL) && (lp->cmd_head->status & STAT_C)) {
+ ptr = lp->cmd_head;
+
+ DEB(DEB_STATUS,printk(KERN_DEBUG "cmd_head->status = %04x, ->command = %04x\n",
+ lp->cmd_head->status, lp->cmd_head->command));
+ lp->cmd_head = ptr->v_next;
+ lp->cmd_backlog--;
+
+ switch ((ptr->command) & 0x7) {
+ case CmdTx:
+ {
+ struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
+ struct sk_buff *skb = tx_cmd->skb;
+
+ if ((ptr->status) & STAT_OK) {
+ DEB(DEB_TXADDR,print_eth(skb->data, "tx-done"));
+ } else {
+ dev->stats.tx_errors++;
+ if ((ptr->status) & 0x0020)
+ dev->stats.collisions++;
+ if (!((ptr->status) & 0x0040))
+ dev->stats.tx_heartbeat_errors++;
+ if ((ptr->status) & 0x0400)
+ dev->stats.tx_carrier_errors++;
+ if ((ptr->status) & 0x0800)
+ dev->stats.collisions++;
+ if ((ptr->status) & 0x1000)
+ dev->stats.tx_aborted_errors++;
+ }
+
+ dev_kfree_skb_irq(skb);
+
+ tx_cmd->cmd.command = 0; /* Mark free */
+ break;
+ }
+ case CmdTDR:
+ {
+ unsigned short status = ((struct tdr_cmd *)ptr)->status;
+
+ if (status & 0x8000) {
+ DEB(DEB_TDR,printk(KERN_INFO "%s: link ok.\n", dev->name));
+ } else {
+ if (status & 0x4000)
+ printk(KERN_ERR "%s: Transceiver problem.\n", dev->name);
+ if (status & 0x2000)
+ printk(KERN_ERR "%s: Termination problem.\n", dev->name);
+ if (status & 0x1000)
+ printk(KERN_ERR "%s: Short circuit.\n", dev->name);
+
+ DEB(DEB_TDR,printk(KERN_INFO "%s: Time %d.\n", dev->name, status & 0x07ff));
+ }
+ break;
+ }
+ case CmdConfigure:
+ case CmdMulticastList:
+ /* Zap command so set_multicast_list() knows it is free */
+ ptr->command = 0;
+ break;
+ }
+ ptr->v_next = ptr->b_next = I596_NULL;
+ lp->last_cmd = jiffies;
+ }
+
+ ptr = lp->cmd_head;
+ while ((ptr != I596_NULL) && (ptr != lp->cmd_tail)) {
+ ptr->command &= 0x1fff;
+ ptr = ptr->v_next;
+ }
+
+ if ((lp->cmd_head != I596_NULL))
+ ack_cmd |= CUC_START;
+ lp->scb.cmd = WSWAPcmd(virt_to_bus(&lp->cmd_head->status));
+ }
+ if ((status & 0x1000) || (status & 0x4000)) {
+ if ((status & 0x4000))
+ DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt received a frame.\n", dev->name));
+ i596_rx(dev);
+ /* Only RX_START if stopped - RGH 07-07-96 */
+ if (status & 0x1000) {
+ if (netif_running(dev)) {
+ DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
+ ack_cmd |= RX_START;
+ dev->stats.rx_errors++;
+ dev->stats.rx_fifo_errors++;
+ rebuild_rx_bufs(dev);
+ }
+ }
+ }
+ wait_cmd(dev,lp,100,"i596 interrupt, timeout");
+ lp->scb.command = ack_cmd;
+
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ /* Ack the interrupt */
+
+ volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
+
+ pcc2[0x2a] |= 0x08;
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
+
+ *ethirq = 1;
+ *ethirq = 3;
+ }
+#endif
+#ifdef ENABLE_APRICOT
+ (void) inb(ioaddr + 0x10);
+ outb(4, ioaddr + 0xf);
+#endif
+ CA(dev);
+
+ DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
+
+ spin_unlock (&lp->lock);
+ return IRQ_RETVAL(handled);
+}
+
+static int i596_close(struct net_device *dev)
+{
+ struct i596_private *lp = dev->ml_priv;
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+
+ DEB(DEB_INIT,printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
+ dev->name, lp->scb.status));
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ wait_cmd(dev,lp,100,"close1 timed out");
+ lp->scb.command = CUC_ABORT | RX_ABORT;
+ CA(dev);
+
+ wait_cmd(dev,lp,100,"close2 timed out");
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+ DEB(DEB_STRUCT,i596_display_data(dev));
+ i596_cleanup_cmd(dev,lp);
+
+#ifdef ENABLE_MVME16x_NET
+ if (MACH_IS_MVME16x) {
+ volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
+
+ /* Disable all ints */
+ pcc2[0x28] = 1;
+ pcc2[0x2a] = 0x40;
+ pcc2[0x2b] = 0x40; /* Set snooping bits now! */
+ }
+#endif
+#ifdef ENABLE_BVME6000_NET
+ if (MACH_IS_BVME6000) {
+ volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
+
+ *ethirq = 1;
+ }
+#endif
+
+#ifdef ENABLE_MVME16x_NET
+ free_irq(0x56, dev);
+#endif
+ free_irq(dev->irq, dev);
+ remove_rx_bufs(dev);
+
+ return 0;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct i596_private *lp = dev->ml_priv;
+ int config = 0, cnt;
+
+ DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
+ dev->name, netdev_mc_count(dev),
+ dev->flags & IFF_PROMISC ? "ON" : "OFF",
+ dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
+
+ if (wait_cfg(dev, &lp->cf_cmd.cmd, 1000, "config change request timed out"))
+ return;
+
+ if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
+ lp->cf_cmd.i596_config[8] |= 0x01;
+ config = 1;
+ }
+ if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
+ lp->cf_cmd.i596_config[8] &= ~0x01;
+ config = 1;
+ }
+ if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
+ lp->cf_cmd.i596_config[11] &= ~0x20;
+ config = 1;
+ }
+ if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
+ lp->cf_cmd.i596_config[11] |= 0x20;
+ config = 1;
+ }
+ if (config) {
+ lp->cf_cmd.cmd.command = CmdConfigure;
+ i596_add_cmd(dev, &lp->cf_cmd.cmd);
+ }
+
+ cnt = netdev_mc_count(dev);
+ if (cnt > MAX_MC_CNT)
+ {
+ cnt = MAX_MC_CNT;
+ printk(KERN_ERR "%s: Only %d multicast addresses supported",
+ dev->name, cnt);
+ }
+
+ if (!netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+ unsigned char *cp;
+ struct mc_cmd *cmd;
+
+ if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out"))
+ return;
+ cmd = &lp->mc_cmd;
+ cmd->cmd.command = CmdMulticastList;
+ cmd->mc_cnt = cnt * ETH_ALEN;
+ cp = cmd->mc_addrs;
+ netdev_for_each_mc_addr(ha, dev) {
+ if (!cnt--)
+ break;
+ memcpy(cp, ha->addr, ETH_ALEN);
+ if (i596_debug > 1)
+ DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n",
+ dev->name, cp));
+ cp += ETH_ALEN;
+ }
+ i596_add_cmd(dev, &cmd->cmd);
+ }
+}
+
+#ifdef MODULE
+static struct net_device *dev_82596;
+
+#ifdef ENABLE_APRICOT
+module_param(irq, int, 0);
+MODULE_PARM_DESC(irq, "Apricot IRQ number");
+#endif
+
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "i82596 debug mask");
+
+int __init init_module(void)
+{
+ if (debug >= 0)
+ i596_debug = debug;
+ dev_82596 = i82596_probe(-1);
+ if (IS_ERR(dev_82596))
+ return PTR_ERR(dev_82596);
+ return 0;
+}
+
+void __exit cleanup_module(void)
+{
+ unregister_netdev(dev_82596);
+#ifdef __mc68000__
+ /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING,
+ * XXX which may be invalid (CONFIG_060_WRITETHROUGH)
+ */
+
+ kernel_set_cachemode((void *)(dev_82596->mem_start), 4096,
+ IOMAP_FULL_CACHING);
+#endif
+ free_page ((u32)(dev_82596->mem_start));
+#ifdef ENABLE_APRICOT
+ /* If we don't do this, we can't re-insmod it later. */
+ release_region(dev_82596->base_addr, I596_TOTAL_SIZE);
+#endif
+ free_netdev(dev_82596);
+}
+
+#endif /* MODULE */
diff --git a/drivers/net/ethernet/i825xx/Kconfig b/drivers/net/ethernet/i825xx/Kconfig
new file mode 100644
index 000000000000..2be46986cbe2
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/Kconfig
@@ -0,0 +1,183 @@
+#
+# Intel 82596/82593/82596 network device configuration
+#
+
+config NET_VENDOR_I825XX
+ bool "Intel (82586/82593/82596) devices"
+ default y
+ depends on NET_VENDOR_INTEL && (ISA || ISA_DMA_API || ARM || \
+ ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \
+ GSC || BVME6000 || MVME16x || EXPERIMENTAL)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question does not directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about these devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_I825XX
+
+config ELPLUS
+ tristate "3c505 \"EtherLink Plus\" support"
+ depends on ISA && ISA_DMA_API
+ ---help---
+ Information about this network (Ethernet) card can be found in
+ <file:Documentation/networking/3c505.txt>. If you have a card of
+ this type, say Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called 3c505.
+
+config EL16
+ tristate "3c507 \"EtherLink 16\" support (EXPERIMENTAL)"
+ depends on ISA && EXPERIMENTAL
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called 3c507.
+
+config ELMC
+ tristate "3c523 \"EtherLink/MC\" support"
+ depends on MCA_LEGACY
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called 3c523.
+
+config ELMC_II
+ tristate "3c527 \"EtherLink/MC 32\" support (EXPERIMENTAL)"
+ depends on MCA && MCA_LEGACY
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called 3c527.
+
+config ARM_ETHER1
+ tristate "Acorn Ether1 support"
+ depends on ARM && ARCH_ACORN
+ ---help---
+ If you have an Acorn system with one of these (AKA25) network cards,
+ you should say Y to this option if you wish to use it with Linux.
+
+config APRICOT
+ tristate "Apricot Xen-II on board Ethernet"
+ depends on ISA
+ ---help---
+ If you have a network (Ethernet) controller of this type, say Y and
+ read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called apricot.
+
+config BVME6000_NET
+ tristate "BVME6000 Ethernet support"
+ depends on BVME6000MVME16x
+ ---help---
+ This is the driver for the Ethernet interface on BVME4000 and
+ BVME6000 VME boards. Say Y here to include the driver for this chip
+ in your kernel.
+ To compile this driver as a module, choose M here.
+
+config EEXPRESS
+ tristate "EtherExpress 16 support"
+ depends on ISA
+ ---help---
+ If you have an EtherExpress16 network (Ethernet) card, say Y and
+ read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Note that the Intel
+ EtherExpress16 card used to be regarded as a very poor choice
+ because the driver was very unreliable. We now have a new driver
+ that should do better.
+
+ To compile this driver as a module, choose M here. The module
+ will be called eexpress.
+
+config EEXPRESS_PRO
+ tristate "EtherExpressPro support/EtherExpress 10 (i82595) support"
+ depends on ISA
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y. This
+ driver supports Intel i82595{FX,TX} based boards. Note however
+ that the EtherExpress PRO/100 Ethernet card has its own separate
+ driver. Please read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called eepro.
+
+config LASI_82596
+ tristate "Lasi ethernet"
+ depends on GSC
+ ---help---
+ Say Y here to support the builtin Intel 82596 ethernet controller
+ found in Hewlett-Packard PA-RISC machines with 10Mbit ethernet.
+
+config LP486E
+ tristate "LP486E on board Ethernet"
+ depends on ISA
+ ---help---
+ Say Y here to support the 82596-based on-board Ethernet controller
+ for the Panther motherboard, which is one of the two shipped in the
+ Intel Professional Workstation.
+
+config MVME16x_NET
+ tristate "MVME16x Ethernet support"
+ depends on MVME16x
+ ---help---
+ This is the driver for the Ethernet interface on the Motorola
+ MVME162, 166, 167, 172 and 177 boards. Say Y here to include the
+ driver for this chip in your kernel.
+ To compile this driver as a module, choose M here.
+
+config NI52
+ tristate "NI5210 support"
+ depends on ISA
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ni52.
+
+config SNI_82596
+ tristate "SNI RM ethernet"
+ depends on SNI_RM
+ ---help---
+ Say Y here to support the on-board Intel 82596 ethernet controller
+ built into SNI RM machines.
+
+config SUN3_82586
+ bool "Sun3 on-board Intel 82586 support"
+ depends on SUN3
+ ---help---
+ This driver enables support for the on-board Intel 82586 based
+ Ethernet adapter found on Sun 3/1xx and 3/2xx motherboards. Note
+ that this driver does not support 82586-based adapters on additional
+ VME boards.
+
+config ZNET
+ tristate "Zenith Z-Note support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && ISA_DMA_API
+ ---help---
+ The Zenith Z-Note notebook computer has a built-in network
+ (Ethernet) card, and this is the Linux driver for it. Note that the
+ IBM Thinkpad 300 is compatible with the Z-Note and is also supported
+ by this driver. Read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+endif # NET_VENDOR_I825XX
diff --git a/drivers/net/ethernet/i825xx/Makefile b/drivers/net/ethernet/i825xx/Makefile
new file mode 100644
index 000000000000..f68a3694968a
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for the Intel 82586/82593/82596 chipset device drivers.
+#
+
+obj-$(CONFIG_ARM_ETHER1) += ether1.o
+obj-$(CONFIG_EEXPRESS) += eexpress.o
+obj-$(CONFIG_EEXPRESS_PRO) += eepro.o
+obj-$(CONFIG_ELPLUS) += 3c505.o
+obj-$(CONFIG_EL16) += 3c507.o
+obj-$(CONFIG_ELMC) += 3c523.o
+obj-$(CONFIG_ELMC_II) += 3c527.o
+obj-$(CONFIG_LP486E) += lp486e.o
+obj-$(CONFIG_NI52) += ni52.o
+obj-$(CONFIG_SUN3_82586) += sun3_82586.o
+obj-$(CONFIG_ZNET) += znet.o
+obj-$(CONFIG_APRICOT) += 82596.o
+obj-$(CONFIG_LASI_82596) += lasi_82596.o
+obj-$(CONFIG_SNI_82596) += sni_82596.o
+obj-$(CONFIG_MVME16x_NET) += 82596.o
+obj-$(CONFIG_BVME6000_NET) += 82596.o
diff --git a/drivers/net/ethernet/i825xx/eepro.c b/drivers/net/ethernet/i825xx/eepro.c
new file mode 100644
index 000000000000..067c46069a11
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/eepro.c
@@ -0,0 +1,1822 @@
+/* eepro.c: Intel EtherExpress Pro/10 device driver for Linux. */
+/*
+ Written 1994, 1995,1996 by Bao C. Ha.
+
+ Copyright (C) 1994, 1995,1996 by Bao C. Ha.
+
+ This software may be used and distributed
+ according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+
+ The author may be reached at bao.ha@srs.gov
+ or 418 Hastings Place, Martinez, GA 30907.
+
+ Things remaining to do:
+ Better record keeping of errors.
+ Eliminate transmit interrupt to reduce overhead.
+ Implement "concurrent processing". I won't be doing it!
+
+ Bugs:
+
+ If you have a problem of not detecting the 82595 during a
+ reboot (warm reset), disable the FLASH memory should fix it.
+ This is a compatibility hardware problem.
+
+ Versions:
+ 0.13b basic ethtool support (aris, 09/13/2004)
+ 0.13a in memory shortage, drop packets also in board
+ (Michael Westermann <mw@microdata-pos.de>, 07/30/2002)
+ 0.13 irq sharing, rewrote probe function, fixed a nasty bug in
+ hardware_send_packet and a major cleanup (aris, 11/08/2001)
+ 0.12d fixing a problem with single card detected as eight eth devices
+ fixing a problem with sudden drop in card performance
+ (chris (asdn@go2.pl), 10/29/2001)
+ 0.12c fixing some problems with old cards (aris, 01/08/2001)
+ 0.12b misc fixes (aris, 06/26/2000)
+ 0.12a port of version 0.12a of 2.2.x kernels to 2.3.x
+ (aris (aris@conectiva.com.br), 05/19/2000)
+ 0.11e some tweaks about multiple cards support (PdP, jul/aug 1999)
+ 0.11d added __initdata, __init stuff; call spin_lock_init
+ in eepro_probe1. Replaced "eepro" by dev->name. Augmented
+ the code protected by spin_lock in interrupt routine
+ (PdP, 12/12/1998)
+ 0.11c minor cleanup (PdP, RMC, 09/12/1998)
+ 0.11b Pascal Dupuis (dupuis@lei.ucl.ac.be): works as a module
+ under 2.1.xx. Debug messages are flagged as KERN_DEBUG to
+ avoid console flooding. Added locking at critical parts. Now
+ the dawn thing is SMP safe.
+ 0.11a Attempt to get 2.1.xx support up (RMC)
+ 0.11 Brian Candler added support for multiple cards. Tested as
+ a module, no idea if it works when compiled into kernel.
+
+ 0.10e Rick Bressler notified me that ifconfig up;ifconfig down fails
+ because the irq is lost somewhere. Fixed that by moving
+ request_irq and free_irq to eepro_open and eepro_close respectively.
+ 0.10d Ugh! Now Wakeup works. Was seriously broken in my first attempt.
+ I'll need to find a way to specify an ioport other than
+ the default one in the PnP case. PnP definitively sucks.
+ And, yes, this is not the only reason.
+ 0.10c PnP Wakeup Test for 595FX. uncomment #define PnPWakeup;
+ to use.
+ 0.10b Should work now with (some) Pro/10+. At least for
+ me (and my two cards) it does. _No_ guarantee for
+ function with non-Pro/10+ cards! (don't have any)
+ (RMC, 9/11/96)
+
+ 0.10 Added support for the Etherexpress Pro/10+. The
+ IRQ map was changed significantly from the old
+ pro/10. The new interrupt map was provided by
+ Rainer M. Canavan (Canavan@Zeus.cs.bonn.edu).
+ (BCH, 9/3/96)
+
+ 0.09 Fixed a race condition in the transmit algorithm,
+ which causes crashes under heavy load with fast
+ pentium computers. The performance should also
+ improve a bit. The size of RX buffer, and hence
+ TX buffer, can also be changed via lilo or insmod.
+ (BCH, 7/31/96)
+
+ 0.08 Implement 32-bit I/O for the 82595TX and 82595FX
+ based lan cards. Disable full-duplex mode if TPE
+ is not used. (BCH, 4/8/96)
+
+ 0.07a Fix a stat report which counts every packet as a
+ heart-beat failure. (BCH, 6/3/95)
+
+ 0.07 Modified to support all other 82595-based lan cards.
+ The IRQ vector of the EtherExpress Pro will be set
+ according to the value saved in the EEPROM. For other
+ cards, I will do autoirq_request() to grab the next
+ available interrupt vector. (BCH, 3/17/95)
+
+ 0.06a,b Interim released. Minor changes in the comments and
+ print out format. (BCH, 3/9/95 and 3/14/95)
+
+ 0.06 First stable release that I am comfortable with. (BCH,
+ 3/2/95)
+
+ 0.05 Complete testing of multicast. (BCH, 2/23/95)
+
+ 0.04 Adding multicast support. (BCH, 2/14/95)
+
+ 0.03 First widely alpha release for public testing.
+ (BCH, 2/14/95)
+
+*/
+
+static const char version[] =
+ "eepro.c: v0.13b 09/13/2004 aris@cathedrallabs.org\n";
+
+#include <linux/module.h>
+
+/*
+ Sources:
+
+ This driver wouldn't have been written without the availability
+ of the Crynwr's Lan595 driver source code. It helps me to
+ familiarize with the 82595 chipset while waiting for the Intel
+ documentation. I also learned how to detect the 82595 using
+ the packet driver's technique.
+
+ This driver is written by cutting and pasting the skeleton.c driver
+ provided by Donald Becker. I also borrowed the EEPROM routine from
+ Donald Becker's 82586 driver.
+
+ Datasheet for the Intel 82595 (including the TX and FX version). It
+ provides just enough info that the casual reader might think that it
+ documents the i82595.
+
+ The User Manual for the 82595. It provides a lot of the missing
+ information.
+
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/ethtool.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#define DRV_NAME "eepro"
+#define DRV_VERSION "0.13c"
+
+#define compat_dev_kfree_skb( skb, mode ) dev_kfree_skb( (skb) )
+/* I had reports of looong delays with SLOW_DOWN defined as udelay(2) */
+#define SLOW_DOWN inb(0x80)
+/* udelay(2) */
+#define compat_init_data __initdata
+enum iftype { AUI=0, BNC=1, TPE=2 };
+
+/* First, a few definitions that the brave might change. */
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int eepro_portlist[] compat_init_data =
+ { 0x300, 0x210, 0x240, 0x280, 0x2C0, 0x200, 0x320, 0x340, 0x360, 0};
+/* note: 0x300 is default, the 595FX supports ALL IO Ports
+ from 0x000 to 0x3F0, some of which are reserved in PCs */
+
+/* To try the (not-really PnP Wakeup: */
+/*
+#define PnPWakeup
+*/
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 0
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+/* The number of low I/O ports used by the ethercard. */
+#define EEPRO_IO_EXTENT 16
+
+/* Different 82595 chips */
+#define LAN595 0
+#define LAN595TX 1
+#define LAN595FX 2
+#define LAN595FX_10ISA 3
+
+/* Information that need to be kept for each board. */
+struct eepro_local {
+ unsigned rx_start;
+ unsigned tx_start; /* start of the transmit chain */
+ int tx_last; /* pointer to last packet in the transmit chain */
+ unsigned tx_end; /* end of the transmit chain (plus 1) */
+ int eepro; /* 1 for the EtherExpress Pro/10,
+ 2 for the EtherExpress Pro/10+,
+ 3 for the EtherExpress 10 (blue cards),
+ 0 for other 82595-based lan cards. */
+ int version; /* a flag to indicate if this is a TX or FX
+ version of the 82595 chip. */
+ int stepping;
+
+ spinlock_t lock; /* Serializing lock */
+
+ unsigned rcv_ram; /* pre-calculated space for rx */
+ unsigned xmt_ram; /* pre-calculated space for tx */
+ unsigned char xmt_bar;
+ unsigned char xmt_lower_limit_reg;
+ unsigned char xmt_upper_limit_reg;
+ short xmt_lower_limit;
+ short xmt_upper_limit;
+ short rcv_lower_limit;
+ short rcv_upper_limit;
+ unsigned char eeprom_reg;
+ unsigned short word[8];
+};
+
+/* The station (ethernet) address prefix, used for IDing the board. */
+#define SA_ADDR0 0x00 /* Etherexpress Pro/10 */
+#define SA_ADDR1 0xaa
+#define SA_ADDR2 0x00
+
+#define GetBit(x,y) ((x & (1<<y))>>y)
+
+/* EEPROM Word 0: */
+#define ee_PnP 0 /* Plug 'n Play enable bit */
+#define ee_Word1 1 /* Word 1? */
+#define ee_BusWidth 2 /* 8/16 bit */
+#define ee_FlashAddr 3 /* Flash Address */
+#define ee_FlashMask 0x7 /* Mask */
+#define ee_AutoIO 6 /* */
+#define ee_reserved0 7 /* =0! */
+#define ee_Flash 8 /* Flash there? */
+#define ee_AutoNeg 9 /* Auto Negotiation enabled? */
+#define ee_IO0 10 /* IO Address LSB */
+#define ee_IO0Mask 0x /*...*/
+#define ee_IO1 15 /* IO MSB */
+
+/* EEPROM Word 1: */
+#define ee_IntSel 0 /* Interrupt */
+#define ee_IntMask 0x7
+#define ee_LI 3 /* Link Integrity 0= enabled */
+#define ee_PC 4 /* Polarity Correction 0= enabled */
+#define ee_TPE_AUI 5 /* PortSelection 1=TPE */
+#define ee_Jabber 6 /* Jabber prevention 0= enabled */
+#define ee_AutoPort 7 /* Auto Port Selection 1= Disabled */
+#define ee_SMOUT 8 /* SMout Pin Control 0= Input */
+#define ee_PROM 9 /* Flash EPROM / PROM 0=Flash */
+#define ee_reserved1 10 /* .. 12 =0! */
+#define ee_AltReady 13 /* Alternate Ready, 0=normal */
+#define ee_reserved2 14 /* =0! */
+#define ee_Duplex 15
+
+/* Word2,3,4: */
+#define ee_IA5 0 /*bit start for individual Addr Byte 5 */
+#define ee_IA4 8 /*bit start for individual Addr Byte 5 */
+#define ee_IA3 0 /*bit start for individual Addr Byte 5 */
+#define ee_IA2 8 /*bit start for individual Addr Byte 5 */
+#define ee_IA1 0 /*bit start for individual Addr Byte 5 */
+#define ee_IA0 8 /*bit start for individual Addr Byte 5 */
+
+/* Word 5: */
+#define ee_BNC_TPE 0 /* 0=TPE */
+#define ee_BootType 1 /* 00=None, 01=IPX, 10=ODI, 11=NDIS */
+#define ee_BootTypeMask 0x3
+#define ee_NumConn 3 /* Number of Connections 0= One or Two */
+#define ee_FlashSock 4 /* Presence of Flash Socket 0= Present */
+#define ee_PortTPE 5
+#define ee_PortBNC 6
+#define ee_PortAUI 7
+#define ee_PowerMgt 10 /* 0= disabled */
+#define ee_CP 13 /* Concurrent Processing */
+#define ee_CPMask 0x7
+
+/* Word 6: */
+#define ee_Stepping 0 /* Stepping info */
+#define ee_StepMask 0x0F
+#define ee_BoardID 4 /* Manucaturer Board ID, reserved */
+#define ee_BoardMask 0x0FFF
+
+/* Word 7: */
+#define ee_INT_TO_IRQ 0 /* int to IRQ Mapping = 0x1EB8 for Pro/10+ */
+#define ee_FX_INT2IRQ 0x1EB8 /* the _only_ mapping allowed for FX chips */
+
+/*..*/
+#define ee_SIZE 0x40 /* total EEprom Size */
+#define ee_Checksum 0xBABA /* initial and final value for adding checksum */
+
+
+/* Card identification via EEprom: */
+#define ee_addr_vendor 0x10 /* Word offset for EISA Vendor ID */
+#define ee_addr_id 0x11 /* Word offset for Card ID */
+#define ee_addr_SN 0x12 /* Serial Number */
+#define ee_addr_CRC_8 0x14 /* CRC over last thee Bytes */
+
+
+#define ee_vendor_intel0 0x25 /* Vendor ID Intel */
+#define ee_vendor_intel1 0xD4
+#define ee_id_eepro10p0 0x10 /* ID for eepro/10+ */
+#define ee_id_eepro10p1 0x31
+
+#define TX_TIMEOUT ((4*HZ)/10)
+
+/* Index to functions, as function prototypes. */
+
+static int eepro_probe1(struct net_device *dev, int autoprobe);
+static int eepro_open(struct net_device *dev);
+static netdev_tx_t eepro_send_packet(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t eepro_interrupt(int irq, void *dev_id);
+static void eepro_rx(struct net_device *dev);
+static void eepro_transmit_interrupt(struct net_device *dev);
+static int eepro_close(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static void eepro_tx_timeout (struct net_device *dev);
+
+static int read_eeprom(int ioaddr, int location, struct net_device *dev);
+static int hardware_send_packet(struct net_device *dev, void *buf, short length);
+static int eepro_grab_irq(struct net_device *dev);
+
+/*
+ Details of the i82595.
+
+You will need either the datasheet or the user manual to understand what
+is going on here. The 82595 is very different from the 82586, 82593.
+
+The receive algorithm in eepro_rx() is just an implementation of the
+RCV ring structure that the Intel 82595 imposes at the hardware level.
+The receive buffer is set at 24K, and the transmit buffer is 8K. I
+am assuming that the total buffer memory is 32K, which is true for the
+Intel EtherExpress Pro/10. If it is less than that on a generic card,
+the driver will be broken.
+
+The transmit algorithm in the hardware_send_packet() is similar to the
+one in the eepro_rx(). The transmit buffer is a ring linked list.
+I just queue the next available packet to the end of the list. In my
+system, the 82595 is so fast that the list seems to always contain a
+single packet. In other systems with faster computers and more congested
+network traffics, the ring linked list should improve performance by
+allowing up to 8K worth of packets to be queued.
+
+The sizes of the receive and transmit buffers can now be changed via lilo
+or insmod. Lilo uses the appended line "ether=io,irq,debug,rx-buffer,eth0"
+where rx-buffer is in KB unit. Modules uses the parameter mem which is
+also in KB unit, for example "insmod io=io-address irq=0 mem=rx-buffer."
+The receive buffer has to be more than 3K or less than 29K. Otherwise,
+it is reset to the default of 24K, and, hence, 8K for the trasnmit
+buffer (transmit-buffer = 32K - receive-buffer).
+
+*/
+#define RAM_SIZE 0x8000
+
+#define RCV_HEADER 8
+#define RCV_DEFAULT_RAM 0x6000
+
+#define XMT_HEADER 8
+#define XMT_DEFAULT_RAM (RAM_SIZE - RCV_DEFAULT_RAM)
+
+#define XMT_START_PRO RCV_DEFAULT_RAM
+#define XMT_START_10 0x0000
+#define RCV_START_PRO 0x0000
+#define RCV_START_10 XMT_DEFAULT_RAM
+
+#define RCV_DONE 0x0008
+#define RX_OK 0x2000
+#define RX_ERROR 0x0d81
+
+#define TX_DONE_BIT 0x0080
+#define TX_OK 0x2000
+#define CHAIN_BIT 0x8000
+#define XMT_STATUS 0x02
+#define XMT_CHAIN 0x04
+#define XMT_COUNT 0x06
+
+#define BANK0_SELECT 0x00
+#define BANK1_SELECT 0x40
+#define BANK2_SELECT 0x80
+
+/* Bank 0 registers */
+#define COMMAND_REG 0x00 /* Register 0 */
+#define MC_SETUP 0x03
+#define XMT_CMD 0x04
+#define DIAGNOSE_CMD 0x07
+#define RCV_ENABLE_CMD 0x08
+#define RCV_DISABLE_CMD 0x0a
+#define STOP_RCV_CMD 0x0b
+#define RESET_CMD 0x0e
+#define POWER_DOWN_CMD 0x18
+#define RESUME_XMT_CMD 0x1c
+#define SEL_RESET_CMD 0x1e
+#define STATUS_REG 0x01 /* Register 1 */
+#define RX_INT 0x02
+#define TX_INT 0x04
+#define EXEC_STATUS 0x30
+#define ID_REG 0x02 /* Register 2 */
+#define R_ROBIN_BITS 0xc0 /* round robin counter */
+#define ID_REG_MASK 0x2c
+#define ID_REG_SIG 0x24
+#define AUTO_ENABLE 0x10
+#define INT_MASK_REG 0x03 /* Register 3 */
+#define RX_STOP_MASK 0x01
+#define RX_MASK 0x02
+#define TX_MASK 0x04
+#define EXEC_MASK 0x08
+#define ALL_MASK 0x0f
+#define IO_32_BIT 0x10
+#define RCV_BAR 0x04 /* The following are word (16-bit) registers */
+#define RCV_STOP 0x06
+
+#define XMT_BAR_PRO 0x0a
+#define XMT_BAR_10 0x0b
+
+#define HOST_ADDRESS_REG 0x0c
+#define IO_PORT 0x0e
+#define IO_PORT_32_BIT 0x0c
+
+/* Bank 1 registers */
+#define REG1 0x01
+#define WORD_WIDTH 0x02
+#define INT_ENABLE 0x80
+#define INT_NO_REG 0x02
+#define RCV_LOWER_LIMIT_REG 0x08
+#define RCV_UPPER_LIMIT_REG 0x09
+
+#define XMT_LOWER_LIMIT_REG_PRO 0x0a
+#define XMT_UPPER_LIMIT_REG_PRO 0x0b
+#define XMT_LOWER_LIMIT_REG_10 0x0b
+#define XMT_UPPER_LIMIT_REG_10 0x0a
+
+/* Bank 2 registers */
+#define XMT_Chain_Int 0x20 /* Interrupt at the end of the transmit chain */
+#define XMT_Chain_ErrStop 0x40 /* Interrupt at the end of the chain even if there are errors */
+#define RCV_Discard_BadFrame 0x80 /* Throw bad frames away, and continue to receive others */
+#define REG2 0x02
+#define PRMSC_Mode 0x01
+#define Multi_IA 0x20
+#define REG3 0x03
+#define TPE_BIT 0x04
+#define BNC_BIT 0x20
+#define REG13 0x0d
+#define FDX 0x00
+#define A_N_ENABLE 0x02
+
+#define I_ADD_REG0 0x04
+#define I_ADD_REG1 0x05
+#define I_ADD_REG2 0x06
+#define I_ADD_REG3 0x07
+#define I_ADD_REG4 0x08
+#define I_ADD_REG5 0x09
+
+#define EEPROM_REG_PRO 0x0a
+#define EEPROM_REG_10 0x0b
+
+#define EESK 0x01
+#define EECS 0x02
+#define EEDI 0x04
+#define EEDO 0x08
+
+/* do a full reset */
+#define eepro_reset(ioaddr) outb(RESET_CMD, ioaddr)
+
+/* do a nice reset */
+#define eepro_sel_reset(ioaddr) { \
+ outb(SEL_RESET_CMD, ioaddr); \
+ SLOW_DOWN; \
+ SLOW_DOWN; \
+ }
+
+/* disable all interrupts */
+#define eepro_dis_int(ioaddr) outb(ALL_MASK, ioaddr + INT_MASK_REG)
+
+/* clear all interrupts */
+#define eepro_clear_int(ioaddr) outb(ALL_MASK, ioaddr + STATUS_REG)
+
+/* enable tx/rx */
+#define eepro_en_int(ioaddr) outb(ALL_MASK & ~(RX_MASK | TX_MASK), \
+ ioaddr + INT_MASK_REG)
+
+/* enable exec event interrupt */
+#define eepro_en_intexec(ioaddr) outb(ALL_MASK & ~(EXEC_MASK), ioaddr + INT_MASK_REG)
+
+/* enable rx */
+#define eepro_en_rx(ioaddr) outb(RCV_ENABLE_CMD, ioaddr)
+
+/* disable rx */
+#define eepro_dis_rx(ioaddr) outb(RCV_DISABLE_CMD, ioaddr)
+
+/* switch bank */
+#define eepro_sw2bank0(ioaddr) outb(BANK0_SELECT, ioaddr)
+#define eepro_sw2bank1(ioaddr) outb(BANK1_SELECT, ioaddr)
+#define eepro_sw2bank2(ioaddr) outb(BANK2_SELECT, ioaddr)
+
+/* enable interrupt line */
+#define eepro_en_intline(ioaddr) outb(inb(ioaddr + REG1) | INT_ENABLE,\
+ ioaddr + REG1)
+
+/* disable interrupt line */
+#define eepro_dis_intline(ioaddr) outb(inb(ioaddr + REG1) & 0x7f, \
+ ioaddr + REG1);
+
+/* set diagnose flag */
+#define eepro_diag(ioaddr) outb(DIAGNOSE_CMD, ioaddr)
+
+/* ack for rx int */
+#define eepro_ack_rx(ioaddr) outb (RX_INT, ioaddr + STATUS_REG)
+
+/* ack for tx int */
+#define eepro_ack_tx(ioaddr) outb (TX_INT, ioaddr + STATUS_REG)
+
+/* a complete sel reset */
+#define eepro_complete_selreset(ioaddr) { \
+ dev->stats.tx_errors++;\
+ eepro_sel_reset(ioaddr);\
+ lp->tx_end = \
+ lp->xmt_lower_limit;\
+ lp->tx_start = lp->tx_end;\
+ lp->tx_last = 0;\
+ dev->trans_start = jiffies;\
+ netif_wake_queue(dev);\
+ eepro_en_rx(ioaddr);\
+ }
+
+/* Check for a network adaptor of this type, and return '0' if one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+ */
+static int __init do_eepro_probe(struct net_device *dev)
+{
+ int i;
+ int base_addr = dev->base_addr;
+ int irq = dev->irq;
+
+#ifdef PnPWakeup
+ /* XXXX for multiple cards should this only be run once? */
+
+ /* Wakeup: */
+ #define WakeupPort 0x279
+ #define WakeupSeq {0x6A, 0xB5, 0xDA, 0xED, 0xF6, 0xFB, 0x7D, 0xBE,\
+ 0xDF, 0x6F, 0x37, 0x1B, 0x0D, 0x86, 0xC3, 0x61,\
+ 0xB0, 0x58, 0x2C, 0x16, 0x8B, 0x45, 0xA2, 0xD1,\
+ 0xE8, 0x74, 0x3A, 0x9D, 0xCE, 0xE7, 0x73, 0x43}
+
+ {
+ unsigned short int WS[32]=WakeupSeq;
+
+ if (request_region(WakeupPort, 2, "eepro wakeup")) {
+ if (net_debug>5)
+ printk(KERN_DEBUG "Waking UP\n");
+
+ outb_p(0,WakeupPort);
+ outb_p(0,WakeupPort);
+ for (i=0; i<32; i++) {
+ outb_p(WS[i],WakeupPort);
+ if (net_debug>5) printk(KERN_DEBUG ": %#x ",WS[i]);
+ }
+
+ release_region(WakeupPort, 2);
+ } else
+ printk(KERN_WARNING "PnP wakeup region busy!\n");
+ }
+#endif
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return eepro_probe1(dev, 0);
+
+ else if (base_addr != 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ for (i = 0; eepro_portlist[i]; i++) {
+ dev->base_addr = eepro_portlist[i];
+ dev->irq = irq;
+ if (eepro_probe1(dev, 1) == 0)
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+#ifndef MODULE
+struct net_device * __init eepro_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct eepro_local));
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_eepro_probe(dev);
+ if (err)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static void __init printEEPROMInfo(struct net_device *dev)
+{
+ struct eepro_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ unsigned short Word;
+ int i,j;
+
+ j = ee_Checksum;
+ for (i = 0; i < 8; i++)
+ j += lp->word[i];
+ for ( ; i < ee_SIZE; i++)
+ j += read_eeprom(ioaddr, i, dev);
+
+ printk(KERN_DEBUG "Checksum: %#x\n",j&0xffff);
+
+ Word = lp->word[0];
+ printk(KERN_DEBUG "Word0:\n");
+ printk(KERN_DEBUG " Plug 'n Pray: %d\n",GetBit(Word,ee_PnP));
+ printk(KERN_DEBUG " Buswidth: %d\n",(GetBit(Word,ee_BusWidth)+1)*8 );
+ printk(KERN_DEBUG " AutoNegotiation: %d\n",GetBit(Word,ee_AutoNeg));
+ printk(KERN_DEBUG " IO Address: %#x\n", (Word>>ee_IO0)<<4);
+
+ if (net_debug>4) {
+ Word = lp->word[1];
+ printk(KERN_DEBUG "Word1:\n");
+ printk(KERN_DEBUG " INT: %d\n", Word & ee_IntMask);
+ printk(KERN_DEBUG " LI: %d\n", GetBit(Word,ee_LI));
+ printk(KERN_DEBUG " PC: %d\n", GetBit(Word,ee_PC));
+ printk(KERN_DEBUG " TPE/AUI: %d\n", GetBit(Word,ee_TPE_AUI));
+ printk(KERN_DEBUG " Jabber: %d\n", GetBit(Word,ee_Jabber));
+ printk(KERN_DEBUG " AutoPort: %d\n", !GetBit(Word,ee_AutoPort));
+ printk(KERN_DEBUG " Duplex: %d\n", GetBit(Word,ee_Duplex));
+ }
+
+ Word = lp->word[5];
+ printk(KERN_DEBUG "Word5:\n");
+ printk(KERN_DEBUG " BNC: %d\n",GetBit(Word,ee_BNC_TPE));
+ printk(KERN_DEBUG " NumConnectors: %d\n",GetBit(Word,ee_NumConn));
+ printk(KERN_DEBUG " Has ");
+ if (GetBit(Word,ee_PortTPE)) printk(KERN_DEBUG "TPE ");
+ if (GetBit(Word,ee_PortBNC)) printk(KERN_DEBUG "BNC ");
+ if (GetBit(Word,ee_PortAUI)) printk(KERN_DEBUG "AUI ");
+ printk(KERN_DEBUG "port(s)\n");
+
+ Word = lp->word[6];
+ printk(KERN_DEBUG "Word6:\n");
+ printk(KERN_DEBUG " Stepping: %d\n",Word & ee_StepMask);
+ printk(KERN_DEBUG " BoardID: %d\n",Word>>ee_BoardID);
+
+ Word = lp->word[7];
+ printk(KERN_DEBUG "Word7:\n");
+ printk(KERN_DEBUG " INT to IRQ:\n");
+
+ for (i=0, j=0; i<15; i++)
+ if (GetBit(Word,i)) printk(KERN_DEBUG " INT%d -> IRQ %d;",j++,i);
+
+ printk(KERN_DEBUG "\n");
+}
+
+/* function to recalculate the limits of buffer based on rcv_ram */
+static void eepro_recalc (struct net_device *dev)
+{
+ struct eepro_local * lp;
+
+ lp = netdev_priv(dev);
+ lp->xmt_ram = RAM_SIZE - lp->rcv_ram;
+
+ if (lp->eepro == LAN595FX_10ISA) {
+ lp->xmt_lower_limit = XMT_START_10;
+ lp->xmt_upper_limit = (lp->xmt_ram - 2);
+ lp->rcv_lower_limit = lp->xmt_ram;
+ lp->rcv_upper_limit = (RAM_SIZE - 2);
+ }
+ else {
+ lp->rcv_lower_limit = RCV_START_PRO;
+ lp->rcv_upper_limit = (lp->rcv_ram - 2);
+ lp->xmt_lower_limit = lp->rcv_ram;
+ lp->xmt_upper_limit = (RAM_SIZE - 2);
+ }
+}
+
+/* prints boot-time info */
+static void __init eepro_print_info (struct net_device *dev)
+{
+ struct eepro_local * lp = netdev_priv(dev);
+ int i;
+ const char * ifmap[] = {"AUI", "10Base2", "10BaseT"};
+
+ i = inb(dev->base_addr + ID_REG);
+ printk(KERN_DEBUG " id: %#x ",i);
+ printk(" io: %#x ", (unsigned)dev->base_addr);
+
+ switch (lp->eepro) {
+ case LAN595FX_10ISA:
+ printk("%s: Intel EtherExpress 10 ISA\n at %#x,",
+ dev->name, (unsigned)dev->base_addr);
+ break;
+ case LAN595FX:
+ printk("%s: Intel EtherExpress Pro/10+ ISA\n at %#x,",
+ dev->name, (unsigned)dev->base_addr);
+ break;
+ case LAN595TX:
+ printk("%s: Intel EtherExpress Pro/10 ISA at %#x,",
+ dev->name, (unsigned)dev->base_addr);
+ break;
+ case LAN595:
+ printk("%s: Intel 82595-based lan card at %#x,",
+ dev->name, (unsigned)dev->base_addr);
+ break;
+ }
+
+ printk(" %pM", dev->dev_addr);
+
+ if (net_debug > 3)
+ printk(KERN_DEBUG ", %dK RCV buffer",
+ (int)(lp->rcv_ram)/1024);
+
+ if (dev->irq > 2)
+ printk(", IRQ %d, %s.\n", dev->irq, ifmap[dev->if_port]);
+ else
+ printk(", %s.\n", ifmap[dev->if_port]);
+
+ if (net_debug > 3) {
+ i = lp->word[5];
+ if (i & 0x2000) /* bit 13 of EEPROM word 5 */
+ printk(KERN_DEBUG "%s: Concurrent Processing is "
+ "enabled but not used!\n", dev->name);
+ }
+
+ /* Check the station address for the manufacturer's code */
+ if (net_debug>3)
+ printEEPROMInfo(dev);
+}
+
+static const struct ethtool_ops eepro_ethtool_ops;
+
+static const struct net_device_ops eepro_netdev_ops = {
+ .ndo_open = eepro_open,
+ .ndo_stop = eepro_close,
+ .ndo_start_xmit = eepro_send_packet,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_tx_timeout = eepro_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/* This is the real probe routine. Linux has a history of friendly device
+ probes on the ISA bus. A good device probe avoids doing writes, and
+ verifies that the correct device exists and functions. */
+
+static int __init eepro_probe1(struct net_device *dev, int autoprobe)
+{
+ unsigned short station_addr[3], id, counter;
+ int i;
+ struct eepro_local *lp;
+ int ioaddr = dev->base_addr;
+ int err;
+
+ /* Grab the region so we can find another board if autoIRQ fails. */
+ if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) {
+ if (!autoprobe)
+ printk(KERN_WARNING "EEPRO: io-port 0x%04x in use\n",
+ ioaddr);
+ return -EBUSY;
+ }
+
+ /* Now, we are going to check for the signature of the
+ ID_REG (register 2 of bank 0) */
+
+ id = inb(ioaddr + ID_REG);
+
+ if ((id & ID_REG_MASK) != ID_REG_SIG)
+ goto exit;
+
+ /* We seem to have the 82595 signature, let's
+ play with its counter (last 2 bits of
+ register 2 of bank 0) to be sure. */
+
+ counter = id & R_ROBIN_BITS;
+
+ if ((inb(ioaddr + ID_REG) & R_ROBIN_BITS) != (counter + 0x40))
+ goto exit;
+
+ lp = netdev_priv(dev);
+ memset(lp, 0, sizeof(struct eepro_local));
+ lp->xmt_bar = XMT_BAR_PRO;
+ lp->xmt_lower_limit_reg = XMT_LOWER_LIMIT_REG_PRO;
+ lp->xmt_upper_limit_reg = XMT_UPPER_LIMIT_REG_PRO;
+ lp->eeprom_reg = EEPROM_REG_PRO;
+ spin_lock_init(&lp->lock);
+
+ /* Now, get the ethernet hardware address from
+ the EEPROM */
+ station_addr[0] = read_eeprom(ioaddr, 2, dev);
+
+ /* FIXME - find another way to know that we've found
+ * an Etherexpress 10
+ */
+ if (station_addr[0] == 0x0000 || station_addr[0] == 0xffff) {
+ lp->eepro = LAN595FX_10ISA;
+ lp->eeprom_reg = EEPROM_REG_10;
+ lp->xmt_lower_limit_reg = XMT_LOWER_LIMIT_REG_10;
+ lp->xmt_upper_limit_reg = XMT_UPPER_LIMIT_REG_10;
+ lp->xmt_bar = XMT_BAR_10;
+ station_addr[0] = read_eeprom(ioaddr, 2, dev);
+ }
+
+ /* get all words at once. will be used here and for ethtool */
+ for (i = 0; i < 8; i++) {
+ lp->word[i] = read_eeprom(ioaddr, i, dev);
+ }
+ station_addr[1] = lp->word[3];
+ station_addr[2] = lp->word[4];
+
+ if (!lp->eepro) {
+ if (lp->word[7] == ee_FX_INT2IRQ)
+ lp->eepro = 2;
+ else if (station_addr[2] == SA_ADDR1)
+ lp->eepro = 1;
+ }
+
+ /* Fill in the 'dev' fields. */
+ for (i=0; i < 6; i++)
+ dev->dev_addr[i] = ((unsigned char *) station_addr)[5-i];
+
+ /* RX buffer must be more than 3K and less than 29K */
+ if (dev->mem_end < 3072 || dev->mem_end > 29696)
+ lp->rcv_ram = RCV_DEFAULT_RAM;
+
+ /* calculate {xmt,rcv}_{lower,upper}_limit */
+ eepro_recalc(dev);
+
+ if (GetBit(lp->word[5], ee_BNC_TPE))
+ dev->if_port = BNC;
+ else
+ dev->if_port = TPE;
+
+ if (dev->irq < 2 && lp->eepro != 0) {
+ /* Mask off INT number */
+ int count = lp->word[1] & 7;
+ unsigned irqMask = lp->word[7];
+
+ while (count--)
+ irqMask &= irqMask - 1;
+
+ count = ffs(irqMask);
+
+ if (count)
+ dev->irq = count - 1;
+
+ if (dev->irq < 2) {
+ printk(KERN_ERR " Duh! illegal interrupt vector stored in EEPROM.\n");
+ goto exit;
+ } else if (dev->irq == 2) {
+ dev->irq = 9;
+ }
+ }
+
+ dev->netdev_ops = &eepro_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->ethtool_ops = &eepro_ethtool_ops;
+
+ /* print boot time info */
+ eepro_print_info(dev);
+
+ /* reset 82595 */
+ eepro_reset(ioaddr);
+
+ err = register_netdev(dev);
+ if (err)
+ goto err;
+ return 0;
+exit:
+ err = -ENODEV;
+err:
+ release_region(dev->base_addr, EEPRO_IO_EXTENT);
+ return err;
+}
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine should set everything up anew at each open, even
+ registers that "should" only need to be set once at boot, so that
+ there is non-reboot way to recover if something goes wrong.
+ */
+
+static const char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1};
+static const char irqrmap2[] = {-1,-1,4,0,1,2,-1,3,-1,4,5,6,7,-1,-1,-1};
+static int eepro_grab_irq(struct net_device *dev)
+{
+ static const int irqlist[] = { 3, 4, 5, 7, 9, 10, 11, 12, 0 };
+ const int *irqp = irqlist;
+ int temp_reg, ioaddr = dev->base_addr;
+
+ eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */
+
+ /* Enable the interrupt line. */
+ eepro_en_intline(ioaddr);
+
+ /* be CAREFUL, BANK 0 now */
+ eepro_sw2bank0(ioaddr);
+
+ /* clear all interrupts */
+ eepro_clear_int(ioaddr);
+
+ /* Let EXEC event to interrupt */
+ eepro_en_intexec(ioaddr);
+
+ do {
+ eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */
+
+ temp_reg = inb(ioaddr + INT_NO_REG);
+ outb((temp_reg & 0xf8) | irqrmap[*irqp], ioaddr + INT_NO_REG);
+
+ eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */
+
+ if (request_irq (*irqp, NULL, IRQF_SHARED, "bogus", dev) != EBUSY) {
+ unsigned long irq_mask;
+ /* Twinkle the interrupt, and check if it's seen */
+ irq_mask = probe_irq_on();
+
+ eepro_diag(ioaddr); /* RESET the 82595 */
+ mdelay(20);
+
+ if (*irqp == probe_irq_off(irq_mask)) /* It's a good IRQ line */
+ break;
+
+ /* clear all interrupts */
+ eepro_clear_int(ioaddr);
+ }
+ } while (*++irqp);
+
+ eepro_sw2bank1(ioaddr); /* Switch back to Bank 1 */
+
+ /* Disable the physical interrupt line. */
+ eepro_dis_intline(ioaddr);
+
+ eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */
+
+ /* Mask all the interrupts. */
+ eepro_dis_int(ioaddr);
+
+ /* clear all interrupts */
+ eepro_clear_int(ioaddr);
+
+ return dev->irq;
+}
+
+static int eepro_open(struct net_device *dev)
+{
+ unsigned short temp_reg, old8, old9;
+ int irqMask;
+ int i, ioaddr = dev->base_addr;
+ struct eepro_local *lp = netdev_priv(dev);
+
+ if (net_debug > 3)
+ printk(KERN_DEBUG "%s: entering eepro_open routine.\n", dev->name);
+
+ irqMask = lp->word[7];
+
+ if (lp->eepro == LAN595FX_10ISA) {
+ if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 3;\n");
+ }
+ else if (irqMask == ee_FX_INT2IRQ) /* INT to IRQ Mask */
+ {
+ lp->eepro = 2; /* Yes, an Intel EtherExpress Pro/10+ */
+ if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 2;\n");
+ }
+
+ else if ((dev->dev_addr[0] == SA_ADDR0 &&
+ dev->dev_addr[1] == SA_ADDR1 &&
+ dev->dev_addr[2] == SA_ADDR2))
+ {
+ lp->eepro = 1;
+ if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 1;\n");
+ } /* Yes, an Intel EtherExpress Pro/10 */
+
+ else lp->eepro = 0; /* No, it is a generic 82585 lan card */
+
+ /* Get the interrupt vector for the 82595 */
+ if (dev->irq < 2 && eepro_grab_irq(dev) == 0) {
+ printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+
+ if (request_irq(dev->irq , eepro_interrupt, 0, dev->name, dev)) {
+ printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+
+ /* Initialize the 82595. */
+
+ eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
+ temp_reg = inb(ioaddr + lp->eeprom_reg);
+
+ lp->stepping = temp_reg >> 5; /* Get the stepping number of the 595 */
+
+ if (net_debug > 3)
+ printk(KERN_DEBUG "The stepping of the 82595 is %d\n", lp->stepping);
+
+ if (temp_reg & 0x10) /* Check the TurnOff Enable bit */
+ outb(temp_reg & 0xef, ioaddr + lp->eeprom_reg);
+ for (i=0; i < 6; i++)
+ outb(dev->dev_addr[i] , ioaddr + I_ADD_REG0 + i);
+
+ temp_reg = inb(ioaddr + REG1); /* Setup Transmit Chaining */
+ outb(temp_reg | XMT_Chain_Int | XMT_Chain_ErrStop /* and discard bad RCV frames */
+ | RCV_Discard_BadFrame, ioaddr + REG1);
+
+ temp_reg = inb(ioaddr + REG2); /* Match broadcast */
+ outb(temp_reg | 0x14, ioaddr + REG2);
+
+ temp_reg = inb(ioaddr + REG3);
+ outb(temp_reg & 0x3f, ioaddr + REG3); /* clear test mode */
+
+ /* Set the receiving mode */
+ eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */
+
+ /* Set the interrupt vector */
+ temp_reg = inb(ioaddr + INT_NO_REG);
+ if (lp->eepro == LAN595FX || lp->eepro == LAN595FX_10ISA)
+ outb((temp_reg & 0xf8) | irqrmap2[dev->irq], ioaddr + INT_NO_REG);
+ else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG);
+
+
+ temp_reg = inb(ioaddr + INT_NO_REG);
+ if (lp->eepro == LAN595FX || lp->eepro == LAN595FX_10ISA)
+ outb((temp_reg & 0xf0) | irqrmap2[dev->irq] | 0x08,ioaddr+INT_NO_REG);
+ else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG);
+
+ if (net_debug > 3)
+ printk(KERN_DEBUG "eepro_open: content of INT Reg is %x\n", temp_reg);
+
+
+ /* Initialize the RCV and XMT upper and lower limits */
+ outb(lp->rcv_lower_limit >> 8, ioaddr + RCV_LOWER_LIMIT_REG);
+ outb(lp->rcv_upper_limit >> 8, ioaddr + RCV_UPPER_LIMIT_REG);
+ outb(lp->xmt_lower_limit >> 8, ioaddr + lp->xmt_lower_limit_reg);
+ outb(lp->xmt_upper_limit >> 8, ioaddr + lp->xmt_upper_limit_reg);
+
+ /* Enable the interrupt line. */
+ eepro_en_intline(ioaddr);
+
+ /* Switch back to Bank 0 */
+ eepro_sw2bank0(ioaddr);
+
+ /* Let RX and TX events to interrupt */
+ eepro_en_int(ioaddr);
+
+ /* clear all interrupts */
+ eepro_clear_int(ioaddr);
+
+ /* Initialize RCV */
+ outw(lp->rcv_lower_limit, ioaddr + RCV_BAR);
+ lp->rx_start = lp->rcv_lower_limit;
+ outw(lp->rcv_upper_limit | 0xfe, ioaddr + RCV_STOP);
+
+ /* Initialize XMT */
+ outw(lp->xmt_lower_limit, ioaddr + lp->xmt_bar);
+ lp->tx_start = lp->tx_end = lp->xmt_lower_limit;
+ lp->tx_last = 0;
+
+ /* Check for the i82595TX and i82595FX */
+ old8 = inb(ioaddr + 8);
+ outb(~old8, ioaddr + 8);
+
+ if ((temp_reg = inb(ioaddr + 8)) == old8) {
+ if (net_debug > 3)
+ printk(KERN_DEBUG "i82595 detected!\n");
+ lp->version = LAN595;
+ }
+ else {
+ lp->version = LAN595TX;
+ outb(old8, ioaddr + 8);
+ old9 = inb(ioaddr + 9);
+
+ if (irqMask==ee_FX_INT2IRQ) {
+ if (net_debug > 3) {
+ printk(KERN_DEBUG "IrqMask: %#x\n",irqMask);
+ printk(KERN_DEBUG "i82595FX detected!\n");
+ }
+ lp->version = LAN595FX;
+ outb(old9, ioaddr + 9);
+ if (dev->if_port != TPE) { /* Hopefully, this will fix the
+ problem of using Pentiums and
+ pro/10 w/ BNC. */
+ eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
+ temp_reg = inb(ioaddr + REG13);
+ /* disable the full duplex mode since it is not
+ applicable with the 10Base2 cable. */
+ outb(temp_reg & ~(FDX | A_N_ENABLE), REG13);
+ eepro_sw2bank0(ioaddr); /* be CAREFUL, BANK 0 now */
+ }
+ }
+ else if (net_debug > 3) {
+ printk(KERN_DEBUG "temp_reg: %#x ~old9: %#x\n",temp_reg,((~old9)&0xff));
+ printk(KERN_DEBUG "i82595TX detected!\n");
+ }
+ }
+
+ eepro_sel_reset(ioaddr);
+
+ netif_start_queue(dev);
+
+ if (net_debug > 3)
+ printk(KERN_DEBUG "%s: exiting eepro_open routine.\n", dev->name);
+
+ /* enabling rx */
+ eepro_en_rx(ioaddr);
+
+ return 0;
+}
+
+static void eepro_tx_timeout (struct net_device *dev)
+{
+ struct eepro_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ /* if (net_debug > 1) */
+ printk (KERN_ERR "%s: transmit timed out, %s?\n", dev->name,
+ "network cable problem");
+ /* This is not a duplicate. One message for the console,
+ one for the log file */
+ printk (KERN_DEBUG "%s: transmit timed out, %s?\n", dev->name,
+ "network cable problem");
+ eepro_complete_selreset(ioaddr);
+}
+
+
+static netdev_tx_t eepro_send_packet(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct eepro_local *lp = netdev_priv(dev);
+ unsigned long flags;
+ int ioaddr = dev->base_addr;
+ short length = skb->len;
+
+ if (net_debug > 5)
+ printk(KERN_DEBUG "%s: entering eepro_send_packet routine.\n", dev->name);
+
+ if (length < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ length = ETH_ZLEN;
+ }
+ netif_stop_queue (dev);
+
+ eepro_dis_int(ioaddr);
+ spin_lock_irqsave(&lp->lock, flags);
+
+ {
+ unsigned char *buf = skb->data;
+
+ if (hardware_send_packet(dev, buf, length))
+ /* we won't wake queue here because we're out of space */
+ dev->stats.tx_dropped++;
+ else {
+ dev->stats.tx_bytes+=skb->len;
+ netif_wake_queue(dev);
+ }
+
+ }
+
+ dev_kfree_skb (skb);
+
+ /* You might need to clean up and record Tx statistics here. */
+ /* dev->stats.tx_aborted_errors++; */
+
+ if (net_debug > 5)
+ printk(KERN_DEBUG "%s: exiting eepro_send_packet routine.\n", dev->name);
+
+ eepro_en_int(ioaddr);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+
+static irqreturn_t
+eepro_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct eepro_local *lp;
+ int ioaddr, status, boguscount = 20;
+ int handled = 0;
+
+ lp = netdev_priv(dev);
+
+ spin_lock(&lp->lock);
+
+ if (net_debug > 5)
+ printk(KERN_DEBUG "%s: entering eepro_interrupt routine.\n", dev->name);
+
+ ioaddr = dev->base_addr;
+
+ while (((status = inb(ioaddr + STATUS_REG)) & (RX_INT|TX_INT)) && (boguscount--))
+ {
+ handled = 1;
+ if (status & RX_INT) {
+ if (net_debug > 4)
+ printk(KERN_DEBUG "%s: packet received interrupt.\n", dev->name);
+
+ eepro_dis_int(ioaddr);
+
+ /* Get the received packets */
+ eepro_ack_rx(ioaddr);
+ eepro_rx(dev);
+
+ eepro_en_int(ioaddr);
+ }
+ if (status & TX_INT) {
+ if (net_debug > 4)
+ printk(KERN_DEBUG "%s: packet transmit interrupt.\n", dev->name);
+
+
+ eepro_dis_int(ioaddr);
+
+ /* Process the status of transmitted packets */
+ eepro_ack_tx(ioaddr);
+ eepro_transmit_interrupt(dev);
+
+ eepro_en_int(ioaddr);
+ }
+ }
+
+ if (net_debug > 5)
+ printk(KERN_DEBUG "%s: exiting eepro_interrupt routine.\n", dev->name);
+
+ spin_unlock(&lp->lock);
+ return IRQ_RETVAL(handled);
+}
+
+static int eepro_close(struct net_device *dev)
+{
+ struct eepro_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ short temp_reg;
+
+ netif_stop_queue(dev);
+
+ eepro_sw2bank1(ioaddr); /* Switch back to Bank 1 */
+
+ /* Disable the physical interrupt line. */
+ temp_reg = inb(ioaddr + REG1);
+ outb(temp_reg & 0x7f, ioaddr + REG1);
+
+ eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */
+
+ /* Flush the Tx and disable Rx. */
+ outb(STOP_RCV_CMD, ioaddr);
+ lp->tx_start = lp->tx_end = lp->xmt_lower_limit;
+ lp->tx_last = 0;
+
+ /* Mask all the interrupts. */
+ eepro_dis_int(ioaddr);
+
+ /* clear all interrupts */
+ eepro_clear_int(ioaddr);
+
+ /* Reset the 82595 */
+ eepro_reset(ioaddr);
+
+ /* release the interrupt */
+ free_irq(dev->irq, dev);
+
+ /* Update the statistics here. What statistics? */
+
+ return 0;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ */
+static void
+set_multicast_list(struct net_device *dev)
+{
+ struct eepro_local *lp = netdev_priv(dev);
+ short ioaddr = dev->base_addr;
+ unsigned short mode;
+ struct netdev_hw_addr *ha;
+ int mc_count = netdev_mc_count(dev);
+
+ if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63)
+ {
+ eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
+ mode = inb(ioaddr + REG2);
+ outb(mode | PRMSC_Mode, ioaddr + REG2);
+ mode = inb(ioaddr + REG3);
+ outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
+ eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
+ }
+
+ else if (mc_count == 0)
+ {
+ eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
+ mode = inb(ioaddr + REG2);
+ outb(mode & 0xd6, ioaddr + REG2); /* Turn off Multi-IA and PRMSC_Mode bits */
+ mode = inb(ioaddr + REG3);
+ outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
+ eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
+ }
+
+ else
+ {
+ unsigned short status, *eaddrs;
+ int i, boguscount = 0;
+
+ /* Disable RX and TX interrupts. Necessary to avoid
+ corruption of the HOST_ADDRESS_REG by interrupt
+ service routines. */
+ eepro_dis_int(ioaddr);
+
+ eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
+ mode = inb(ioaddr + REG2);
+ outb(mode | Multi_IA, ioaddr + REG2);
+ mode = inb(ioaddr + REG3);
+ outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
+ eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
+ outw(lp->tx_end, ioaddr + HOST_ADDRESS_REG);
+ outw(MC_SETUP, ioaddr + IO_PORT);
+ outw(0, ioaddr + IO_PORT);
+ outw(0, ioaddr + IO_PORT);
+ outw(6 * (mc_count + 1), ioaddr + IO_PORT);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ eaddrs = (unsigned short *) ha->addr;
+ outw(*eaddrs++, ioaddr + IO_PORT);
+ outw(*eaddrs++, ioaddr + IO_PORT);
+ outw(*eaddrs++, ioaddr + IO_PORT);
+ }
+
+ eaddrs = (unsigned short *) dev->dev_addr;
+ outw(eaddrs[0], ioaddr + IO_PORT);
+ outw(eaddrs[1], ioaddr + IO_PORT);
+ outw(eaddrs[2], ioaddr + IO_PORT);
+ outw(lp->tx_end, ioaddr + lp->xmt_bar);
+ outb(MC_SETUP, ioaddr);
+
+ /* Update the transmit queue */
+ i = lp->tx_end + XMT_HEADER + 6 * (mc_count + 1);
+
+ if (lp->tx_start != lp->tx_end)
+ {
+ /* update the next address and the chain bit in the
+ last packet */
+ outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
+ outw(i, ioaddr + IO_PORT);
+ outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
+ status = inw(ioaddr + IO_PORT);
+ outw(status | CHAIN_BIT, ioaddr + IO_PORT);
+ lp->tx_end = i ;
+ }
+ else {
+ lp->tx_start = lp->tx_end = i ;
+ }
+
+ /* Acknowledge that the MC setup is done */
+ do { /* We should be doing this in the eepro_interrupt()! */
+ SLOW_DOWN;
+ SLOW_DOWN;
+ if (inb(ioaddr + STATUS_REG) & 0x08)
+ {
+ i = inb(ioaddr);
+ outb(0x08, ioaddr + STATUS_REG);
+
+ if (i & 0x20) { /* command ABORTed */
+ printk(KERN_NOTICE "%s: multicast setup failed.\n",
+ dev->name);
+ break;
+ } else if ((i & 0x0f) == 0x03) { /* MC-Done */
+ printk(KERN_DEBUG "%s: set Rx mode to %d address%s.\n",
+ dev->name, mc_count,
+ mc_count > 1 ? "es":"");
+ break;
+ }
+ }
+ } while (++boguscount < 100);
+
+ /* Re-enable RX and TX interrupts */
+ eepro_en_int(ioaddr);
+ }
+ if (lp->eepro == LAN595FX_10ISA) {
+ eepro_complete_selreset(ioaddr);
+ }
+ else
+ eepro_en_rx(ioaddr);
+}
+
+/* The horrible routine to read a word from the serial EEPROM. */
+/* IMPORTANT - the 82595 will be set to Bank 0 after the eeprom is read */
+
+/* The delay between EEPROM clock transitions. */
+#define eeprom_delay() { udelay(40); }
+#define EE_READ_CMD (6 << 6)
+
+static int
+read_eeprom(int ioaddr, int location, struct net_device *dev)
+{
+ int i;
+ unsigned short retval = 0;
+ struct eepro_local *lp = netdev_priv(dev);
+ short ee_addr = ioaddr + lp->eeprom_reg;
+ int read_cmd = location | EE_READ_CMD;
+ short ctrl_val = EECS ;
+
+ /* XXXX - black magic */
+ eepro_sw2bank1(ioaddr);
+ outb(0x00, ioaddr + STATUS_REG);
+ /* XXXX - black magic */
+
+ eepro_sw2bank2(ioaddr);
+ outb(ctrl_val, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 8; i >= 0; i--) {
+ short outval = (read_cmd & (1 << i)) ? ctrl_val | EEDI
+ : ctrl_val;
+ outb(outval, ee_addr);
+ outb(outval | EESK, ee_addr); /* EEPROM clock tick. */
+ eeprom_delay();
+ outb(outval, ee_addr); /* Finish EEPROM a clock tick. */
+ eeprom_delay();
+ }
+ outb(ctrl_val, ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ outb(ctrl_val | EESK, ee_addr); eeprom_delay();
+ retval = (retval << 1) | ((inb(ee_addr) & EEDO) ? 1 : 0);
+ outb(ctrl_val, ee_addr); eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+ ctrl_val &= ~EECS;
+ outb(ctrl_val | EESK, ee_addr);
+ eeprom_delay();
+ outb(ctrl_val, ee_addr);
+ eeprom_delay();
+ eepro_sw2bank0(ioaddr);
+ return retval;
+}
+
+static int
+hardware_send_packet(struct net_device *dev, void *buf, short length)
+{
+ struct eepro_local *lp = netdev_priv(dev);
+ short ioaddr = dev->base_addr;
+ unsigned status, tx_available, last, end;
+
+ if (net_debug > 5)
+ printk(KERN_DEBUG "%s: entering hardware_send_packet routine.\n", dev->name);
+
+ /* determine how much of the transmit buffer space is available */
+ if (lp->tx_end > lp->tx_start)
+ tx_available = lp->xmt_ram - (lp->tx_end - lp->tx_start);
+ else if (lp->tx_end < lp->tx_start)
+ tx_available = lp->tx_start - lp->tx_end;
+ else tx_available = lp->xmt_ram;
+
+ if (((((length + 3) >> 1) << 1) + 2*XMT_HEADER) >= tx_available) {
+ /* No space available ??? */
+ return 1;
+ }
+
+ last = lp->tx_end;
+ end = last + (((length + 3) >> 1) << 1) + XMT_HEADER;
+
+ if (end >= lp->xmt_upper_limit + 2) { /* the transmit buffer is wrapped around */
+ if ((lp->xmt_upper_limit + 2 - last) <= XMT_HEADER) {
+ /* Arrrr!!!, must keep the xmt header together,
+ several days were lost to chase this one down. */
+ last = lp->xmt_lower_limit;
+ end = last + (((length + 3) >> 1) << 1) + XMT_HEADER;
+ }
+ else end = lp->xmt_lower_limit + (end -
+ lp->xmt_upper_limit + 2);
+ }
+
+ outw(last, ioaddr + HOST_ADDRESS_REG);
+ outw(XMT_CMD, ioaddr + IO_PORT);
+ outw(0, ioaddr + IO_PORT);
+ outw(end, ioaddr + IO_PORT);
+ outw(length, ioaddr + IO_PORT);
+
+ if (lp->version == LAN595)
+ outsw(ioaddr + IO_PORT, buf, (length + 3) >> 1);
+ else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */
+ unsigned short temp = inb(ioaddr + INT_MASK_REG);
+ outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG);
+ outsl(ioaddr + IO_PORT_32_BIT, buf, (length + 3) >> 2);
+ outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG);
+ }
+
+ /* A dummy read to flush the DRAM write pipeline */
+ status = inw(ioaddr + IO_PORT);
+
+ if (lp->tx_start == lp->tx_end) {
+ outw(last, ioaddr + lp->xmt_bar);
+ outb(XMT_CMD, ioaddr);
+ lp->tx_start = last; /* I don't like to change tx_start here */
+ }
+ else {
+ /* update the next address and the chain bit in the
+ last packet */
+
+ if (lp->tx_end != last) {
+ outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
+ outw(last, ioaddr + IO_PORT);
+ }
+
+ outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
+ status = inw(ioaddr + IO_PORT);
+ outw(status | CHAIN_BIT, ioaddr + IO_PORT);
+
+ /* Continue the transmit command */
+ outb(RESUME_XMT_CMD, ioaddr);
+ }
+
+ lp->tx_last = last;
+ lp->tx_end = end;
+
+ if (net_debug > 5)
+ printk(KERN_DEBUG "%s: exiting hardware_send_packet routine.\n", dev->name);
+
+ return 0;
+}
+
+static void
+eepro_rx(struct net_device *dev)
+{
+ struct eepro_local *lp = netdev_priv(dev);
+ short ioaddr = dev->base_addr;
+ short boguscount = 20;
+ short rcv_car = lp->rx_start;
+ unsigned rcv_event, rcv_status, rcv_next_frame, rcv_size;
+
+ if (net_debug > 5)
+ printk(KERN_DEBUG "%s: entering eepro_rx routine.\n", dev->name);
+
+ /* Set the read pointer to the start of the RCV */
+ outw(rcv_car, ioaddr + HOST_ADDRESS_REG);
+
+ rcv_event = inw(ioaddr + IO_PORT);
+
+ while (rcv_event == RCV_DONE) {
+
+ rcv_status = inw(ioaddr + IO_PORT);
+ rcv_next_frame = inw(ioaddr + IO_PORT);
+ rcv_size = inw(ioaddr + IO_PORT);
+
+ if ((rcv_status & (RX_OK | RX_ERROR)) == RX_OK) {
+
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ dev->stats.rx_bytes+=rcv_size;
+ rcv_size &= 0x3fff;
+ skb = dev_alloc_skb(rcv_size+5);
+ if (skb == NULL) {
+ printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
+ dev->stats.rx_dropped++;
+ rcv_car = lp->rx_start + RCV_HEADER + rcv_size;
+ lp->rx_start = rcv_next_frame;
+ outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG);
+
+ break;
+ }
+ skb_reserve(skb,2);
+
+ if (lp->version == LAN595)
+ insw(ioaddr+IO_PORT, skb_put(skb,rcv_size), (rcv_size + 3) >> 1);
+ else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */
+ unsigned short temp = inb(ioaddr + INT_MASK_REG);
+ outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG);
+ insl(ioaddr+IO_PORT_32_BIT, skb_put(skb,rcv_size),
+ (rcv_size + 3) >> 2);
+ outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG);
+ }
+
+ skb->protocol = eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ }
+
+ else { /* Not sure will ever reach here,
+ I set the 595 to discard bad received frames */
+ dev->stats.rx_errors++;
+
+ if (rcv_status & 0x0100)
+ dev->stats.rx_over_errors++;
+
+ else if (rcv_status & 0x0400)
+ dev->stats.rx_frame_errors++;
+
+ else if (rcv_status & 0x0800)
+ dev->stats.rx_crc_errors++;
+
+ printk(KERN_DEBUG "%s: event = %#x, status = %#x, next = %#x, size = %#x\n",
+ dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size);
+ }
+
+ if (rcv_status & 0x1000)
+ dev->stats.rx_length_errors++;
+
+ rcv_car = lp->rx_start + RCV_HEADER + rcv_size;
+ lp->rx_start = rcv_next_frame;
+
+ if (--boguscount == 0)
+ break;
+
+ outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG);
+ rcv_event = inw(ioaddr + IO_PORT);
+
+ }
+ if (rcv_car == 0)
+ rcv_car = lp->rcv_upper_limit | 0xff;
+
+ outw(rcv_car - 1, ioaddr + RCV_STOP);
+
+ if (net_debug > 5)
+ printk(KERN_DEBUG "%s: exiting eepro_rx routine.\n", dev->name);
+}
+
+static void
+eepro_transmit_interrupt(struct net_device *dev)
+{
+ struct eepro_local *lp = netdev_priv(dev);
+ short ioaddr = dev->base_addr;
+ short boguscount = 25;
+ short xmt_status;
+
+ while ((lp->tx_start != lp->tx_end) && boguscount--) {
+
+ outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG);
+ xmt_status = inw(ioaddr+IO_PORT);
+
+ if (!(xmt_status & TX_DONE_BIT))
+ break;
+
+ xmt_status = inw(ioaddr+IO_PORT);
+ lp->tx_start = inw(ioaddr+IO_PORT);
+
+ netif_wake_queue (dev);
+
+ if (xmt_status & TX_OK)
+ dev->stats.tx_packets++;
+ else {
+ dev->stats.tx_errors++;
+ if (xmt_status & 0x0400) {
+ dev->stats.tx_carrier_errors++;
+ printk(KERN_DEBUG "%s: carrier error\n",
+ dev->name);
+ printk(KERN_DEBUG "%s: XMT status = %#x\n",
+ dev->name, xmt_status);
+ }
+ else {
+ printk(KERN_DEBUG "%s: XMT status = %#x\n",
+ dev->name, xmt_status);
+ printk(KERN_DEBUG "%s: XMT status = %#x\n",
+ dev->name, xmt_status);
+ }
+ }
+ if (xmt_status & 0x000f) {
+ dev->stats.collisions += (xmt_status & 0x000f);
+ }
+
+ if ((xmt_status & 0x0040) == 0x0) {
+ dev->stats.tx_heartbeat_errors++;
+ }
+ }
+}
+
+static int eepro_ethtool_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct eepro_local *lp = netdev_priv(dev);
+
+ cmd->supported = SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_Autoneg;
+ cmd->advertising = ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_Autoneg;
+
+ if (GetBit(lp->word[5], ee_PortTPE)) {
+ cmd->supported |= SUPPORTED_TP;
+ cmd->advertising |= ADVERTISED_TP;
+ }
+ if (GetBit(lp->word[5], ee_PortBNC)) {
+ cmd->supported |= SUPPORTED_BNC;
+ cmd->advertising |= ADVERTISED_BNC;
+ }
+ if (GetBit(lp->word[5], ee_PortAUI)) {
+ cmd->supported |= SUPPORTED_AUI;
+ cmd->advertising |= ADVERTISED_AUI;
+ }
+
+ ethtool_cmd_speed_set(cmd, SPEED_10);
+
+ if (dev->if_port == TPE && lp->word[1] & ee_Duplex) {
+ cmd->duplex = DUPLEX_FULL;
+ }
+ else {
+ cmd->duplex = DUPLEX_HALF;
+ }
+
+ cmd->port = dev->if_port;
+ cmd->phy_address = dev->base_addr;
+ cmd->transceiver = XCVR_INTERNAL;
+
+ if (lp->word[0] & ee_AutoNeg) {
+ cmd->autoneg = 1;
+ }
+
+ return 0;
+}
+
+static void eepro_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strcpy(drvinfo->driver, DRV_NAME);
+ strcpy(drvinfo->version, DRV_VERSION);
+ sprintf(drvinfo->bus_info, "ISA 0x%lx", dev->base_addr);
+}
+
+static const struct ethtool_ops eepro_ethtool_ops = {
+ .get_settings = eepro_ethtool_get_settings,
+ .get_drvinfo = eepro_ethtool_get_drvinfo,
+};
+
+#ifdef MODULE
+
+#define MAX_EEPRO 8
+static struct net_device *dev_eepro[MAX_EEPRO];
+
+static int io[MAX_EEPRO] = {
+ [0 ... MAX_EEPRO-1] = -1
+};
+static int irq[MAX_EEPRO];
+static int mem[MAX_EEPRO] = { /* Size of the rx buffer in KB */
+ [0 ... MAX_EEPRO-1] = RCV_DEFAULT_RAM/1024
+};
+static int autodetect;
+
+static int n_eepro;
+/* For linux 2.1.xx */
+
+MODULE_AUTHOR("Pascal Dupuis and others");
+MODULE_DESCRIPTION("Intel i82595 ISA EtherExpressPro10/10+ driver");
+MODULE_LICENSE("GPL");
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(mem, int, NULL, 0);
+module_param(autodetect, int, 0);
+MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base address(es)");
+MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)");
+MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)");
+MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)");
+
+int __init init_module(void)
+{
+ struct net_device *dev;
+ int i;
+ if (io[0] == -1 && autodetect == 0) {
+ printk(KERN_WARNING "eepro_init_module: Probe is very dangerous in ISA boards!\n");
+ printk(KERN_WARNING "eepro_init_module: Please add \"autodetect=1\" to force probe\n");
+ return -ENODEV;
+ }
+ else if (autodetect) {
+ /* if autodetect is set then we must force detection */
+ for (i = 0; i < MAX_EEPRO; i++) {
+ io[i] = 0;
+ }
+
+ printk(KERN_INFO "eepro_init_module: Auto-detecting boards (May God protect us...)\n");
+ }
+
+ for (i = 0; i < MAX_EEPRO && io[i] != -1; i++) {
+ dev = alloc_etherdev(sizeof(struct eepro_local));
+ if (!dev)
+ break;
+
+ dev->mem_end = mem[i];
+ dev->base_addr = io[i];
+ dev->irq = irq[i];
+
+ if (do_eepro_probe(dev) == 0) {
+ dev_eepro[n_eepro++] = dev;
+ continue;
+ }
+ free_netdev(dev);
+ break;
+ }
+
+ if (n_eepro)
+ printk(KERN_INFO "%s", version);
+
+ return n_eepro ? 0 : -ENODEV;
+}
+
+void __exit
+cleanup_module(void)
+{
+ int i;
+
+ for (i=0; i<n_eepro; i++) {
+ struct net_device *dev = dev_eepro[i];
+ unregister_netdev(dev);
+ release_region(dev->base_addr, EEPRO_IO_EXTENT);
+ free_netdev(dev);
+ }
+}
+#endif /* MODULE */
diff --git a/drivers/net/ethernet/i825xx/eexpress.c b/drivers/net/ethernet/i825xx/eexpress.c
new file mode 100644
index 000000000000..3a9580f3d4dd
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/eexpress.c
@@ -0,0 +1,1720 @@
+/* Intel EtherExpress 16 device driver for Linux
+ *
+ * Written by John Sullivan, 1995
+ * based on original code by Donald Becker, with changes by
+ * Alan Cox and Pauline Middelink.
+ *
+ * Support for 8-bit mode by Zoltan Szilagyi <zoltans@cs.arizona.edu>
+ *
+ * Many modifications, and currently maintained, by
+ * Philip Blundell <philb@gnu.org>
+ * Added the Compaq LTE Alan Cox <alan@lxorguk.ukuu.org.uk>
+ * Added MCA support Adam Fritzler
+ *
+ * Note - this driver is experimental still - it has problems on faster
+ * machines. Someone needs to sit down and go through it line by line with
+ * a databook...
+ */
+
+/* The EtherExpress 16 is a fairly simple card, based on a shared-memory
+ * design using the i82586 Ethernet coprocessor. It bears no relationship,
+ * as far as I know, to the similarly-named "EtherExpress Pro" range.
+ *
+ * Historically, Linux support for these cards has been very bad. However,
+ * things seem to be getting better slowly.
+ */
+
+/* If your card is confused about what sort of interface it has (eg it
+ * persistently reports "10baseT" when none is fitted), running 'SOFTSET /BART'
+ * or 'SOFTSET /LISA' from DOS seems to help.
+ */
+
+/* Here's the scoop on memory mapping.
+ *
+ * There are three ways to access EtherExpress card memory: either using the
+ * shared-memory mapping, or using PIO through the dataport, or using PIO
+ * through the "shadow memory" ports.
+ *
+ * The shadow memory system works by having the card map some of its memory
+ * as follows:
+ *
+ * (the low five bits of the SMPTR are ignored)
+ *
+ * base+0x4000..400f memory at SMPTR+0..15
+ * base+0x8000..800f memory at SMPTR+16..31
+ * base+0xc000..c007 dubious stuff (memory at SMPTR+16..23 apparently)
+ * base+0xc008..c00f memory at 0x0008..0x000f
+ *
+ * This last set (the one at c008) is particularly handy because the SCB
+ * lives at 0x0008. So that set of ports gives us easy random access to data
+ * in the SCB without having to mess around setting up pointers and the like.
+ * We always use this method to access the SCB (via the scb_xx() functions).
+ *
+ * Dataport access works by aiming the appropriate (read or write) pointer
+ * at the first address you're interested in, and then reading or writing from
+ * the dataport. The pointers auto-increment after each transfer. We use
+ * this for data transfer.
+ *
+ * We don't use the shared-memory system because it allegedly doesn't work on
+ * all cards, and because it's a bit more prone to go wrong (it's one more
+ * thing to configure...).
+ */
+
+/* Known bugs:
+ *
+ * - The card seems to want to give us two interrupts every time something
+ * happens, where just one would be better.
+ */
+
+/*
+ *
+ * Note by Zoltan Szilagyi 10-12-96:
+ *
+ * I've succeeded in eliminating the "CU wedged" messages, and hence the
+ * lockups, which were only occurring with cards running in 8-bit mode ("force
+ * 8-bit operation" in Intel's SoftSet utility). This version of the driver
+ * sets the 82586 and the ASIC to 8-bit mode at startup; it also stops the
+ * CU before submitting a packet for transmission, and then restarts it as soon
+ * as the process of handing the packet is complete. This is definitely an
+ * unnecessary slowdown if the card is running in 16-bit mode; therefore one
+ * should detect 16-bit vs 8-bit mode from the EEPROM settings and act
+ * accordingly. In 8-bit mode with this bugfix I'm getting about 150 K/s for
+ * ftp's, which is significantly better than I get in DOS, so the overhead of
+ * stopping and restarting the CU with each transmit is not prohibitive in
+ * practice.
+ *
+ * Update by David Woodhouse 11/5/99:
+ *
+ * I've seen "CU wedged" messages in 16-bit mode, on the Alpha architecture.
+ * I assume that this is because 16-bit accesses are actually handled as two
+ * 8-bit accesses.
+ */
+
+#ifdef __alpha__
+#define LOCKUP16 1
+#endif
+#ifndef LOCKUP16
+#define LOCKUP16 0
+#endif
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/mca-legacy.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#ifndef NET_DEBUG
+#define NET_DEBUG 4
+#endif
+
+#include "eexpress.h"
+
+#define EEXP_IO_EXTENT 16
+
+/*
+ * Private data declarations
+ */
+
+struct net_local
+{
+ unsigned long last_tx; /* jiffies when last transmit started */
+ unsigned long init_time; /* jiffies when eexp_hw_init586 called */
+ unsigned short rx_first; /* first rx buf, same as RX_BUF_START */
+ unsigned short rx_last; /* last rx buf */
+ unsigned short rx_ptr; /* first rx buf to look at */
+ unsigned short tx_head; /* next free tx buf */
+ unsigned short tx_reap; /* first in-use tx buf */
+ unsigned short tx_tail; /* previous tx buf to tx_head */
+ unsigned short tx_link; /* last known-executing tx buf */
+ unsigned short last_tx_restart; /* set to tx_link when we
+ restart the CU */
+ unsigned char started;
+ unsigned short rx_buf_start;
+ unsigned short rx_buf_end;
+ unsigned short num_tx_bufs;
+ unsigned short num_rx_bufs;
+ unsigned char width; /* 0 for 16bit, 1 for 8bit */
+ unsigned char was_promisc;
+ unsigned char old_mc_count;
+ spinlock_t lock;
+};
+
+/* This is the code and data that is downloaded to the EtherExpress card's
+ * memory at boot time.
+ */
+
+static unsigned short start_code[] = {
+/* 0x0000 */
+ 0x0001, /* ISCP: busy - cleared after reset */
+ 0x0008,0x0000,0x0000, /* offset,address (lo,hi) of SCB */
+
+ 0x0000,0x0000, /* SCB: status, commands */
+ 0x0000,0x0000, /* links to first command block,
+ first receive descriptor */
+ 0x0000,0x0000, /* CRC error, alignment error counts */
+ 0x0000,0x0000, /* out of resources, overrun error counts */
+
+ 0x0000,0x0000, /* pad */
+ 0x0000,0x0000,
+
+/* 0x20 -- start of 82586 CU program */
+#define CONF_LINK 0x20
+ 0x0000,Cmd_Config,
+ 0x0032, /* link to next command */
+ 0x080c, /* 12 bytes follow : fifo threshold=8 */
+ 0x2e40, /* don't rx bad frames
+ * SRDY/ARDY => ext. sync. : preamble len=8
+ * take addresses from data buffers
+ * 6 bytes/address
+ */
+ 0x6000, /* default backoff method & priority
+ * interframe spacing = 0x60 */
+ 0xf200, /* slot time=0x200
+ * max collision retry = 0xf */
+#define CONF_PROMISC 0x2e
+ 0x0000, /* no HDLC : normal CRC : enable broadcast
+ * disable promiscuous/multicast modes */
+ 0x003c, /* minimum frame length = 60 octets) */
+
+ 0x0000,Cmd_SetAddr,
+ 0x003e, /* link to next command */
+#define CONF_HWADDR 0x38
+ 0x0000,0x0000,0x0000, /* hardware address placed here */
+
+ 0x0000,Cmd_MCast,
+ 0x0076, /* link to next command */
+#define CONF_NR_MULTICAST 0x44
+ 0x0000, /* number of bytes in multicast address(es) */
+#define CONF_MULTICAST 0x46
+ 0x0000, 0x0000, 0x0000, /* some addresses */
+ 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000,
+
+#define CONF_DIAG_RESULT 0x76
+ 0x0000, Cmd_Diag,
+ 0x007c, /* link to next command */
+
+ 0x0000,Cmd_TDR|Cmd_INT,
+ 0x0084,
+#define CONF_TDR_RESULT 0x82
+ 0x0000,
+
+ 0x0000,Cmd_END|Cmd_Nop, /* end of configure sequence */
+ 0x0084 /* dummy link */
+};
+
+/* maps irq number to EtherExpress magic value */
+static char irqrmap[] = { 0,0,1,2,3,4,0,0,0,1,5,6,0,0,0,0 };
+
+#ifdef CONFIG_MCA_LEGACY
+/* mapping of the first four bits of the second POS register */
+static unsigned short mca_iomap[] = {
+ 0x270, 0x260, 0x250, 0x240, 0x230, 0x220, 0x210, 0x200,
+ 0x370, 0x360, 0x350, 0x340, 0x330, 0x320, 0x310, 0x300
+};
+/* bits 5-7 of the second POS register */
+static char mca_irqmap[] = { 12, 9, 3, 4, 5, 10, 11, 15 };
+#endif
+
+/*
+ * Prototypes for Linux interface
+ */
+
+static int eexp_open(struct net_device *dev);
+static int eexp_close(struct net_device *dev);
+static void eexp_timeout(struct net_device *dev);
+static netdev_tx_t eexp_xmit(struct sk_buff *buf,
+ struct net_device *dev);
+
+static irqreturn_t eexp_irq(int irq, void *dev_addr);
+static void eexp_set_multicast(struct net_device *dev);
+
+/*
+ * Prototypes for hardware access functions
+ */
+
+static void eexp_hw_rx_pio(struct net_device *dev);
+static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf,
+ unsigned short len);
+static int eexp_hw_probe(struct net_device *dev,unsigned short ioaddr);
+static unsigned short eexp_hw_readeeprom(unsigned short ioaddr,
+ unsigned char location);
+
+static unsigned short eexp_hw_lasttxstat(struct net_device *dev);
+static void eexp_hw_txrestart(struct net_device *dev);
+
+static void eexp_hw_txinit (struct net_device *dev);
+static void eexp_hw_rxinit (struct net_device *dev);
+
+static void eexp_hw_init586 (struct net_device *dev);
+static void eexp_setup_filter (struct net_device *dev);
+
+static char *eexp_ifmap[]={"AUI", "BNC", "RJ45"};
+enum eexp_iftype {AUI=0, BNC=1, TPE=2};
+
+#define STARTED_RU 2
+#define STARTED_CU 1
+
+/*
+ * Primitive hardware access functions.
+ */
+
+static inline unsigned short scb_status(struct net_device *dev)
+{
+ return inw(dev->base_addr + 0xc008);
+}
+
+static inline unsigned short scb_rdcmd(struct net_device *dev)
+{
+ return inw(dev->base_addr + 0xc00a);
+}
+
+static inline void scb_command(struct net_device *dev, unsigned short cmd)
+{
+ outw(cmd, dev->base_addr + 0xc00a);
+}
+
+static inline void scb_wrcbl(struct net_device *dev, unsigned short val)
+{
+ outw(val, dev->base_addr + 0xc00c);
+}
+
+static inline void scb_wrrfa(struct net_device *dev, unsigned short val)
+{
+ outw(val, dev->base_addr + 0xc00e);
+}
+
+static inline void set_loopback(struct net_device *dev)
+{
+ outb(inb(dev->base_addr + Config) | 2, dev->base_addr + Config);
+}
+
+static inline void clear_loopback(struct net_device *dev)
+{
+ outb(inb(dev->base_addr + Config) & ~2, dev->base_addr + Config);
+}
+
+static inline unsigned short int SHADOW(short int addr)
+{
+ addr &= 0x1f;
+ if (addr > 0xf) addr += 0x3ff0;
+ return addr + 0x4000;
+}
+
+/*
+ * Linux interface
+ */
+
+/*
+ * checks for presence of EtherExpress card
+ */
+
+static int __init do_express_probe(struct net_device *dev)
+{
+ unsigned short *port;
+ static unsigned short ports[] = { 0x240,0x300,0x310,0x270,0x320,0x340,0 };
+ unsigned short ioaddr = dev->base_addr;
+ int dev_irq = dev->irq;
+ int err;
+
+ dev->if_port = 0xff; /* not set */
+
+#ifdef CONFIG_MCA_LEGACY
+ if (MCA_bus) {
+ int slot = 0;
+
+ /*
+ * Only find one card at a time. Subsequent calls
+ * will find others, however, proper multicard MCA
+ * probing and setup can't be done with the
+ * old-style Space.c init routines. -- ASF
+ */
+ while (slot != MCA_NOTFOUND) {
+ int pos0, pos1;
+
+ slot = mca_find_unused_adapter(0x628B, slot);
+ if (slot == MCA_NOTFOUND)
+ break;
+
+ pos0 = mca_read_stored_pos(slot, 2);
+ pos1 = mca_read_stored_pos(slot, 3);
+ ioaddr = mca_iomap[pos1&0xf];
+
+ dev->irq = mca_irqmap[(pos1>>4)&0x7];
+
+ /*
+ * XXX: Transceiver selection is done
+ * differently on the MCA version.
+ * How to get it to select something
+ * other than external/AUI is currently
+ * unknown. This code is just for looks. -- ASF
+ */
+ if ((pos0 & 0x7) == 0x1)
+ dev->if_port = AUI;
+ else if ((pos0 & 0x7) == 0x5) {
+ if (pos1 & 0x80)
+ dev->if_port = BNC;
+ else
+ dev->if_port = TPE;
+ }
+
+ mca_set_adapter_name(slot, "Intel EtherExpress 16 MCA");
+ mca_set_adapter_procfn(slot, NULL, dev);
+ mca_mark_as_used(slot);
+
+ break;
+ }
+ }
+#endif
+ if (ioaddr&0xfe00) {
+ if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress"))
+ return -EBUSY;
+ err = eexp_hw_probe(dev,ioaddr);
+ release_region(ioaddr, EEXP_IO_EXTENT);
+ return err;
+ } else if (ioaddr)
+ return -ENXIO;
+
+ for (port=&ports[0] ; *port ; port++ )
+ {
+ unsigned short sum = 0;
+ int i;
+ if (!request_region(*port, EEXP_IO_EXTENT, "EtherExpress"))
+ continue;
+ for ( i=0 ; i<4 ; i++ )
+ {
+ unsigned short t;
+ t = inb(*port + ID_PORT);
+ sum |= (t>>4) << ((t & 0x03)<<2);
+ }
+ if (sum==0xbaba && !eexp_hw_probe(dev,*port)) {
+ release_region(*port, EEXP_IO_EXTENT);
+ return 0;
+ }
+ release_region(*port, EEXP_IO_EXTENT);
+ dev->irq = dev_irq;
+ }
+ return -ENODEV;
+}
+
+#ifndef MODULE
+struct net_device * __init express_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_express_probe(dev);
+ if (!err)
+ return dev;
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+/*
+ * open and initialize the adapter, ready for use
+ */
+
+static int eexp_open(struct net_device *dev)
+{
+ int ret;
+ unsigned short ioaddr = dev->base_addr;
+ struct net_local *lp = netdev_priv(dev);
+
+#if NET_DEBUG > 6
+ printk(KERN_DEBUG "%s: eexp_open()\n", dev->name);
+#endif
+
+ if (!dev->irq || !irqrmap[dev->irq])
+ return -ENXIO;
+
+ ret = request_irq(dev->irq, eexp_irq, 0, dev->name, dev);
+ if (ret)
+ return ret;
+
+ if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress")) {
+ printk(KERN_WARNING "EtherExpress io port %x, is busy.\n"
+ , ioaddr);
+ goto err_out1;
+ }
+ if (!request_region(ioaddr+0x4000, EEXP_IO_EXTENT, "EtherExpress shadow")) {
+ printk(KERN_WARNING "EtherExpress io port %x, is busy.\n"
+ , ioaddr+0x4000);
+ goto err_out2;
+ }
+ if (!request_region(ioaddr+0x8000, EEXP_IO_EXTENT, "EtherExpress shadow")) {
+ printk(KERN_WARNING "EtherExpress io port %x, is busy.\n"
+ , ioaddr+0x8000);
+ goto err_out3;
+ }
+ if (!request_region(ioaddr+0xc000, EEXP_IO_EXTENT, "EtherExpress shadow")) {
+ printk(KERN_WARNING "EtherExpress io port %x, is busy.\n"
+ , ioaddr+0xc000);
+ goto err_out4;
+ }
+
+ if (lp->width) {
+ printk("%s: forcing ASIC to 8-bit mode\n", dev->name);
+ outb(inb(dev->base_addr+Config)&~4, dev->base_addr+Config);
+ }
+
+ eexp_hw_init586(dev);
+ netif_start_queue(dev);
+#if NET_DEBUG > 6
+ printk(KERN_DEBUG "%s: leaving eexp_open()\n", dev->name);
+#endif
+ return 0;
+
+ err_out4:
+ release_region(ioaddr+0x8000, EEXP_IO_EXTENT);
+ err_out3:
+ release_region(ioaddr+0x4000, EEXP_IO_EXTENT);
+ err_out2:
+ release_region(ioaddr, EEXP_IO_EXTENT);
+ err_out1:
+ free_irq(dev->irq, dev);
+ return -EBUSY;
+}
+
+/*
+ * close and disable the interface, leaving the 586 in reset.
+ */
+
+static int eexp_close(struct net_device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+ struct net_local *lp = netdev_priv(dev);
+
+ int irq = dev->irq;
+
+ netif_stop_queue(dev);
+
+ outb(SIRQ_dis|irqrmap[irq],ioaddr+SET_IRQ);
+ lp->started = 0;
+ scb_command(dev, SCB_CUsuspend|SCB_RUsuspend);
+ outb(0,ioaddr+SIGNAL_CA);
+ free_irq(irq,dev);
+ outb(i586_RST,ioaddr+EEPROM_Ctrl);
+ release_region(ioaddr, EEXP_IO_EXTENT);
+ release_region(ioaddr+0x4000, 16);
+ release_region(ioaddr+0x8000, 16);
+ release_region(ioaddr+0xc000, 16);
+
+ return 0;
+}
+
+/*
+ * This gets called when a higher level thinks we are broken. Check that
+ * nothing has become jammed in the CU.
+ */
+
+static void unstick_cu(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned short ioaddr = dev->base_addr;
+
+ if (lp->started)
+ {
+ if (time_after(jiffies, dev_trans_start(dev) + HZ/2))
+ {
+ if (lp->tx_link==lp->last_tx_restart)
+ {
+ unsigned short boguscount=200,rsst;
+ printk(KERN_WARNING "%s: Retransmit timed out, status %04x, resetting...\n",
+ dev->name, scb_status(dev));
+ eexp_hw_txinit(dev);
+ lp->last_tx_restart = 0;
+ scb_wrcbl(dev, lp->tx_link);
+ scb_command(dev, SCB_CUstart);
+ outb(0,ioaddr+SIGNAL_CA);
+ while (!SCB_complete(rsst=scb_status(dev)))
+ {
+ if (!--boguscount)
+ {
+ boguscount=200;
+ printk(KERN_WARNING "%s: Reset timed out status %04x, retrying...\n",
+ dev->name,rsst);
+ scb_wrcbl(dev, lp->tx_link);
+ scb_command(dev, SCB_CUstart);
+ outb(0,ioaddr+SIGNAL_CA);
+ }
+ }
+ netif_wake_queue(dev);
+ }
+ else
+ {
+ unsigned short status = scb_status(dev);
+ if (SCB_CUdead(status))
+ {
+ unsigned short txstatus = eexp_hw_lasttxstat(dev);
+ printk(KERN_WARNING "%s: Transmit timed out, CU not active status %04x %04x, restarting...\n",
+ dev->name, status, txstatus);
+ eexp_hw_txrestart(dev);
+ }
+ else
+ {
+ unsigned short txstatus = eexp_hw_lasttxstat(dev);
+ if (netif_queue_stopped(dev) && !txstatus)
+ {
+ printk(KERN_WARNING "%s: CU wedged, status %04x %04x, resetting...\n",
+ dev->name,status,txstatus);
+ eexp_hw_init586(dev);
+ netif_wake_queue(dev);
+ }
+ else
+ {
+ printk(KERN_WARNING "%s: transmit timed out\n", dev->name);
+ }
+ }
+ }
+ }
+ }
+ else
+ {
+ if (time_after(jiffies, lp->init_time + 10))
+ {
+ unsigned short status = scb_status(dev);
+ printk(KERN_WARNING "%s: i82586 startup timed out, status %04x, resetting...\n",
+ dev->name, status);
+ eexp_hw_init586(dev);
+ netif_wake_queue(dev);
+ }
+ }
+}
+
+static void eexp_timeout(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+#ifdef CONFIG_SMP
+ unsigned long flags;
+#endif
+ int status;
+
+ disable_irq(dev->irq);
+
+ /*
+ * Best would be to use synchronize_irq(); spin_lock() here
+ * lets make it work first..
+ */
+
+#ifdef CONFIG_SMP
+ spin_lock_irqsave(&lp->lock, flags);
+#endif
+
+ status = scb_status(dev);
+ unstick_cu(dev);
+ printk(KERN_INFO "%s: transmit timed out, %s?\n", dev->name,
+ (SCB_complete(status)?"lost interrupt":
+ "board on fire"));
+ dev->stats.tx_errors++;
+ lp->last_tx = jiffies;
+ if (!SCB_complete(status)) {
+ scb_command(dev, SCB_CUabort);
+ outb(0,dev->base_addr+SIGNAL_CA);
+ }
+ netif_wake_queue(dev);
+#ifdef CONFIG_SMP
+ spin_unlock_irqrestore(&lp->lock, flags);
+#endif
+}
+
+/*
+ * Called to transmit a packet, or to allow us to right ourselves
+ * if the kernel thinks we've died.
+ */
+static netdev_tx_t eexp_xmit(struct sk_buff *buf, struct net_device *dev)
+{
+ short length = buf->len;
+#ifdef CONFIG_SMP
+ struct net_local *lp = netdev_priv(dev);
+ unsigned long flags;
+#endif
+
+#if NET_DEBUG > 6
+ printk(KERN_DEBUG "%s: eexp_xmit()\n", dev->name);
+#endif
+
+ if (buf->len < ETH_ZLEN) {
+ if (skb_padto(buf, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ length = ETH_ZLEN;
+ }
+
+ disable_irq(dev->irq);
+
+ /*
+ * Best would be to use synchronize_irq(); spin_lock() here
+ * lets make it work first..
+ */
+
+#ifdef CONFIG_SMP
+ spin_lock_irqsave(&lp->lock, flags);
+#endif
+
+ {
+ unsigned short *data = (unsigned short *)buf->data;
+
+ dev->stats.tx_bytes += length;
+
+ eexp_hw_tx_pio(dev,data,length);
+ }
+ dev_kfree_skb(buf);
+#ifdef CONFIG_SMP
+ spin_unlock_irqrestore(&lp->lock, flags);
+#endif
+ enable_irq(dev->irq);
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Handle an EtherExpress interrupt
+ * If we've finished initializing, start the RU and CU up.
+ * If we've already started, reap tx buffers, handle any received packets,
+ * check to make sure we've not become wedged.
+ */
+
+static unsigned short eexp_start_irq(struct net_device *dev,
+ unsigned short status)
+{
+ unsigned short ack_cmd = SCB_ack(status);
+ struct net_local *lp = netdev_priv(dev);
+ unsigned short ioaddr = dev->base_addr;
+ if ((dev->flags & IFF_UP) && !(lp->started & STARTED_CU)) {
+ short diag_status, tdr_status;
+ while (SCB_CUstat(status)==2)
+ status = scb_status(dev);
+#if NET_DEBUG > 4
+ printk("%s: CU went non-active (status %04x)\n",
+ dev->name, status);
+#endif
+
+ outw(CONF_DIAG_RESULT & ~31, ioaddr + SM_PTR);
+ diag_status = inw(ioaddr + SHADOW(CONF_DIAG_RESULT));
+ if (diag_status & 1<<11) {
+ printk(KERN_WARNING "%s: 82586 failed self-test\n",
+ dev->name);
+ } else if (!(diag_status & 1<<13)) {
+ printk(KERN_WARNING "%s: 82586 self-test failed to complete\n", dev->name);
+ }
+
+ outw(CONF_TDR_RESULT & ~31, ioaddr + SM_PTR);
+ tdr_status = inw(ioaddr + SHADOW(CONF_TDR_RESULT));
+ if (tdr_status & (TDR_SHORT|TDR_OPEN)) {
+ printk(KERN_WARNING "%s: TDR reports cable %s at %d tick%s\n", dev->name, (tdr_status & TDR_SHORT)?"short":"broken", tdr_status & TDR_TIME, ((tdr_status & TDR_TIME) != 1) ? "s" : "");
+ }
+ else if (tdr_status & TDR_XCVRPROBLEM) {
+ printk(KERN_WARNING "%s: TDR reports transceiver problem\n", dev->name);
+ }
+ else if (tdr_status & TDR_LINKOK) {
+#if NET_DEBUG > 4
+ printk(KERN_DEBUG "%s: TDR reports link OK\n", dev->name);
+#endif
+ } else {
+ printk("%s: TDR is ga-ga (status %04x)\n", dev->name,
+ tdr_status);
+ }
+
+ lp->started |= STARTED_CU;
+ scb_wrcbl(dev, lp->tx_link);
+ /* if the RU isn't running, start it now */
+ if (!(lp->started & STARTED_RU)) {
+ ack_cmd |= SCB_RUstart;
+ scb_wrrfa(dev, lp->rx_buf_start);
+ lp->rx_ptr = lp->rx_buf_start;
+ lp->started |= STARTED_RU;
+ }
+ ack_cmd |= SCB_CUstart | 0x2000;
+ }
+
+ if ((dev->flags & IFF_UP) && !(lp->started & STARTED_RU) && SCB_RUstat(status)==4)
+ lp->started|=STARTED_RU;
+
+ return ack_cmd;
+}
+
+static void eexp_cmd_clear(struct net_device *dev)
+{
+ unsigned long int oldtime = jiffies;
+ while (scb_rdcmd(dev) && (time_before(jiffies, oldtime + 10)));
+ if (scb_rdcmd(dev)) {
+ printk("%s: command didn't clear\n", dev->name);
+ }
+}
+
+static irqreturn_t eexp_irq(int dummy, void *dev_info)
+{
+ struct net_device *dev = dev_info;
+ struct net_local *lp;
+ unsigned short ioaddr,status,ack_cmd;
+ unsigned short old_read_ptr, old_write_ptr;
+
+ lp = netdev_priv(dev);
+ ioaddr = dev->base_addr;
+
+ spin_lock(&lp->lock);
+
+ old_read_ptr = inw(ioaddr+READ_PTR);
+ old_write_ptr = inw(ioaddr+WRITE_PTR);
+
+ outb(SIRQ_dis|irqrmap[dev->irq], ioaddr+SET_IRQ);
+
+ status = scb_status(dev);
+
+#if NET_DEBUG > 4
+ printk(KERN_DEBUG "%s: interrupt (status %x)\n", dev->name, status);
+#endif
+
+ if (lp->started == (STARTED_CU | STARTED_RU)) {
+
+ do {
+ eexp_cmd_clear(dev);
+
+ ack_cmd = SCB_ack(status);
+ scb_command(dev, ack_cmd);
+ outb(0,ioaddr+SIGNAL_CA);
+
+ eexp_cmd_clear(dev);
+
+ if (SCB_complete(status)) {
+ if (!eexp_hw_lasttxstat(dev)) {
+ printk("%s: tx interrupt but no status\n", dev->name);
+ }
+ }
+
+ if (SCB_rxdframe(status))
+ eexp_hw_rx_pio(dev);
+
+ status = scb_status(dev);
+ } while (status & 0xc000);
+
+ if (SCB_RUdead(status))
+ {
+ printk(KERN_WARNING "%s: RU stopped: status %04x\n",
+ dev->name,status);
+#if 0
+ printk(KERN_WARNING "%s: cur_rfd=%04x, cur_rbd=%04x\n", dev->name, lp->cur_rfd, lp->cur_rbd);
+ outw(lp->cur_rfd, ioaddr+READ_PTR);
+ printk(KERN_WARNING "%s: [%04x]\n", dev->name, inw(ioaddr+DATAPORT));
+ outw(lp->cur_rfd+6, ioaddr+READ_PTR);
+ printk(KERN_WARNING "%s: rbd is %04x\n", dev->name, rbd= inw(ioaddr+DATAPORT));
+ outw(rbd, ioaddr+READ_PTR);
+ printk(KERN_WARNING "%s: [%04x %04x] ", dev->name, inw(ioaddr+DATAPORT), inw(ioaddr+DATAPORT));
+ outw(rbd+8, ioaddr+READ_PTR);
+ printk("[%04x]\n", inw(ioaddr+DATAPORT));
+#endif
+ dev->stats.rx_errors++;
+#if 1
+ eexp_hw_rxinit(dev);
+#else
+ lp->cur_rfd = lp->first_rfd;
+#endif
+ scb_wrrfa(dev, lp->rx_buf_start);
+ scb_command(dev, SCB_RUstart);
+ outb(0,ioaddr+SIGNAL_CA);
+ }
+ } else {
+ if (status & 0x8000)
+ ack_cmd = eexp_start_irq(dev, status);
+ else
+ ack_cmd = SCB_ack(status);
+ scb_command(dev, ack_cmd);
+ outb(0,ioaddr+SIGNAL_CA);
+ }
+
+ eexp_cmd_clear(dev);
+
+ outb(SIRQ_en|irqrmap[dev->irq], ioaddr+SET_IRQ);
+
+#if NET_DEBUG > 6
+ printk("%s: leaving eexp_irq()\n", dev->name);
+#endif
+ outw(old_read_ptr, ioaddr+READ_PTR);
+ outw(old_write_ptr, ioaddr+WRITE_PTR);
+
+ spin_unlock(&lp->lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Hardware access functions
+ */
+
+/*
+ * Set the cable type to use.
+ */
+
+static void eexp_hw_set_interface(struct net_device *dev)
+{
+ unsigned char oldval = inb(dev->base_addr + 0x300e);
+ oldval &= ~0x82;
+ switch (dev->if_port) {
+ case TPE:
+ oldval |= 0x2;
+ case BNC:
+ oldval |= 0x80;
+ break;
+ }
+ outb(oldval, dev->base_addr+0x300e);
+ mdelay(20);
+}
+
+/*
+ * Check all the receive buffers, and hand any received packets
+ * to the upper levels. Basic sanity check on each frame
+ * descriptor, though we don't bother trying to fix broken ones.
+ */
+
+static void eexp_hw_rx_pio(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned short rx_block = lp->rx_ptr;
+ unsigned short boguscount = lp->num_rx_bufs;
+ unsigned short ioaddr = dev->base_addr;
+ unsigned short status;
+
+#if NET_DEBUG > 6
+ printk(KERN_DEBUG "%s: eexp_hw_rx()\n", dev->name);
+#endif
+
+ do {
+ unsigned short rfd_cmd, rx_next, pbuf, pkt_len;
+
+ outw(rx_block, ioaddr + READ_PTR);
+ status = inw(ioaddr + DATAPORT);
+
+ if (FD_Done(status))
+ {
+ rfd_cmd = inw(ioaddr + DATAPORT);
+ rx_next = inw(ioaddr + DATAPORT);
+ pbuf = inw(ioaddr + DATAPORT);
+
+ outw(pbuf, ioaddr + READ_PTR);
+ pkt_len = inw(ioaddr + DATAPORT);
+
+ if (rfd_cmd!=0x0000)
+ {
+ printk(KERN_WARNING "%s: rfd_cmd not zero:0x%04x\n",
+ dev->name, rfd_cmd);
+ continue;
+ }
+ else if (pbuf!=rx_block+0x16)
+ {
+ printk(KERN_WARNING "%s: rfd and rbd out of sync 0x%04x 0x%04x\n",
+ dev->name, rx_block+0x16, pbuf);
+ continue;
+ }
+ else if ((pkt_len & 0xc000)!=0xc000)
+ {
+ printk(KERN_WARNING "%s: EOF or F not set on received buffer (%04x)\n",
+ dev->name, pkt_len & 0xc000);
+ continue;
+ }
+ else if (!FD_OK(status))
+ {
+ dev->stats.rx_errors++;
+ if (FD_CRC(status))
+ dev->stats.rx_crc_errors++;
+ if (FD_Align(status))
+ dev->stats.rx_frame_errors++;
+ if (FD_Resrc(status))
+ dev->stats.rx_fifo_errors++;
+ if (FD_DMA(status))
+ dev->stats.rx_over_errors++;
+ if (FD_Short(status))
+ dev->stats.rx_length_errors++;
+ }
+ else
+ {
+ struct sk_buff *skb;
+ pkt_len &= 0x3fff;
+ skb = dev_alloc_skb(pkt_len+16);
+ if (skb == NULL)
+ {
+ printk(KERN_WARNING "%s: Memory squeeze, dropping packet\n",dev->name);
+ dev->stats.rx_dropped++;
+ break;
+ }
+ skb_reserve(skb, 2);
+ outw(pbuf+10, ioaddr+READ_PTR);
+ insw(ioaddr+DATAPORT, skb_put(skb,pkt_len),(pkt_len+1)>>1);
+ skb->protocol = eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+ outw(rx_block, ioaddr+WRITE_PTR);
+ outw(0, ioaddr+DATAPORT);
+ outw(0, ioaddr+DATAPORT);
+ rx_block = rx_next;
+ }
+ } while (FD_Done(status) && boguscount--);
+ lp->rx_ptr = rx_block;
+}
+
+/*
+ * Hand a packet to the card for transmission
+ * If we get here, we MUST have already checked
+ * to make sure there is room in the transmit
+ * buffer region.
+ */
+
+static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf,
+ unsigned short len)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned short ioaddr = dev->base_addr;
+
+ if (LOCKUP16 || lp->width) {
+ /* Stop the CU so that there is no chance that it
+ jumps off to a bogus address while we are writing the
+ pointer to the next transmit packet in 8-bit mode --
+ this eliminates the "CU wedged" errors in 8-bit mode.
+ (Zoltan Szilagyi 10-12-96) */
+ scb_command(dev, SCB_CUsuspend);
+ outw(0xFFFF, ioaddr+SIGNAL_CA);
+ }
+
+ outw(lp->tx_head, ioaddr + WRITE_PTR);
+
+ outw(0x0000, ioaddr + DATAPORT);
+ outw(Cmd_INT|Cmd_Xmit, ioaddr + DATAPORT);
+ outw(lp->tx_head+0x08, ioaddr + DATAPORT);
+ outw(lp->tx_head+0x0e, ioaddr + DATAPORT);
+
+ outw(0x0000, ioaddr + DATAPORT);
+ outw(0x0000, ioaddr + DATAPORT);
+ outw(lp->tx_head+0x08, ioaddr + DATAPORT);
+
+ outw(0x8000|len, ioaddr + DATAPORT);
+ outw(-1, ioaddr + DATAPORT);
+ outw(lp->tx_head+0x16, ioaddr + DATAPORT);
+ outw(0, ioaddr + DATAPORT);
+
+ outsw(ioaddr + DATAPORT, buf, (len+1)>>1);
+
+ outw(lp->tx_tail+0xc, ioaddr + WRITE_PTR);
+ outw(lp->tx_head, ioaddr + DATAPORT);
+
+ dev->trans_start = jiffies;
+ lp->tx_tail = lp->tx_head;
+ if (lp->tx_head==TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE))
+ lp->tx_head = TX_BUF_START;
+ else
+ lp->tx_head += TX_BUF_SIZE;
+ if (lp->tx_head != lp->tx_reap)
+ netif_wake_queue(dev);
+
+ if (LOCKUP16 || lp->width) {
+ /* Restart the CU so that the packet can actually
+ be transmitted. (Zoltan Szilagyi 10-12-96) */
+ scb_command(dev, SCB_CUresume);
+ outw(0xFFFF, ioaddr+SIGNAL_CA);
+ }
+
+ dev->stats.tx_packets++;
+ lp->last_tx = jiffies;
+}
+
+static const struct net_device_ops eexp_netdev_ops = {
+ .ndo_open = eexp_open,
+ .ndo_stop = eexp_close,
+ .ndo_start_xmit = eexp_xmit,
+ .ndo_set_rx_mode = eexp_set_multicast,
+ .ndo_tx_timeout = eexp_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/*
+ * Sanity check the suspected EtherExpress card
+ * Read hardware address, reset card, size memory and initialize buffer
+ * memory pointers. These are held in netdev_priv(), in case someone has more
+ * than one card in a machine.
+ */
+
+static int __init eexp_hw_probe(struct net_device *dev, unsigned short ioaddr)
+{
+ unsigned short hw_addr[3];
+ unsigned char buswidth;
+ unsigned int memory_size;
+ int i;
+ unsigned short xsum = 0;
+ struct net_local *lp = netdev_priv(dev);
+
+ printk("%s: EtherExpress 16 at %#x ",dev->name,ioaddr);
+
+ outb(ASIC_RST, ioaddr+EEPROM_Ctrl);
+ outb(0, ioaddr+EEPROM_Ctrl);
+ udelay(500);
+ outb(i586_RST, ioaddr+EEPROM_Ctrl);
+
+ hw_addr[0] = eexp_hw_readeeprom(ioaddr,2);
+ hw_addr[1] = eexp_hw_readeeprom(ioaddr,3);
+ hw_addr[2] = eexp_hw_readeeprom(ioaddr,4);
+
+ /* Standard Address or Compaq LTE Address */
+ if (!((hw_addr[2]==0x00aa && ((hw_addr[1] & 0xff00)==0x0000)) ||
+ (hw_addr[2]==0x0080 && ((hw_addr[1] & 0xff00)==0x5F00))))
+ {
+ printk(" rejected: invalid address %04x%04x%04x\n",
+ hw_addr[2],hw_addr[1],hw_addr[0]);
+ return -ENODEV;
+ }
+
+ /* Calculate the EEPROM checksum. Carry on anyway if it's bad,
+ * though.
+ */
+ for (i = 0; i < 64; i++)
+ xsum += eexp_hw_readeeprom(ioaddr, i);
+ if (xsum != 0xbaba)
+ printk(" (bad EEPROM xsum 0x%02x)", xsum);
+
+ dev->base_addr = ioaddr;
+ for ( i=0 ; i<6 ; i++ )
+ dev->dev_addr[i] = ((unsigned char *)hw_addr)[5-i];
+
+ {
+ static const char irqmap[] = { 0, 9, 3, 4, 5, 10, 11, 0 };
+ unsigned short setupval = eexp_hw_readeeprom(ioaddr,0);
+
+ /* Use the IRQ from EEPROM if none was given */
+ if (!dev->irq)
+ dev->irq = irqmap[setupval>>13];
+
+ if (dev->if_port == 0xff) {
+ dev->if_port = !(setupval & 0x1000) ? AUI :
+ eexp_hw_readeeprom(ioaddr,5) & 0x1 ? TPE : BNC;
+ }
+
+ buswidth = !((setupval & 0x400) >> 10);
+ }
+
+ memset(lp, 0, sizeof(struct net_local));
+ spin_lock_init(&lp->lock);
+
+ printk("(IRQ %d, %s connector, %d-bit bus", dev->irq,
+ eexp_ifmap[dev->if_port], buswidth?8:16);
+
+ if (!request_region(dev->base_addr + 0x300e, 1, "EtherExpress"))
+ return -EBUSY;
+
+ eexp_hw_set_interface(dev);
+
+ release_region(dev->base_addr + 0x300e, 1);
+
+ /* Find out how much RAM we have on the card */
+ outw(0, dev->base_addr + WRITE_PTR);
+ for (i = 0; i < 32768; i++)
+ outw(0, dev->base_addr + DATAPORT);
+
+ for (memory_size = 0; memory_size < 64; memory_size++)
+ {
+ outw(memory_size<<10, dev->base_addr + READ_PTR);
+ if (inw(dev->base_addr+DATAPORT))
+ break;
+ outw(memory_size<<10, dev->base_addr + WRITE_PTR);
+ outw(memory_size | 0x5000, dev->base_addr+DATAPORT);
+ outw(memory_size<<10, dev->base_addr + READ_PTR);
+ if (inw(dev->base_addr+DATAPORT) != (memory_size | 0x5000))
+ break;
+ }
+
+ /* Sort out the number of buffers. We may have 16, 32, 48 or 64k
+ * of RAM to play with.
+ */
+ lp->num_tx_bufs = 4;
+ lp->rx_buf_end = 0x3ff6;
+ switch (memory_size)
+ {
+ case 64:
+ lp->rx_buf_end += 0x4000;
+ case 48:
+ lp->num_tx_bufs += 4;
+ lp->rx_buf_end += 0x4000;
+ case 32:
+ lp->rx_buf_end += 0x4000;
+ case 16:
+ printk(", %dk RAM)\n", memory_size);
+ break;
+ default:
+ printk(") bad memory size (%dk).\n", memory_size);
+ return -ENODEV;
+ break;
+ }
+
+ lp->rx_buf_start = TX_BUF_START + (lp->num_tx_bufs*TX_BUF_SIZE);
+ lp->width = buswidth;
+
+ dev->netdev_ops = &eexp_netdev_ops;
+ dev->watchdog_timeo = 2*HZ;
+
+ return register_netdev(dev);
+}
+
+/*
+ * Read a word from the EtherExpress on-board serial EEPROM.
+ * The EEPROM contains 64 words of 16 bits.
+ */
+static unsigned short __init eexp_hw_readeeprom(unsigned short ioaddr,
+ unsigned char location)
+{
+ unsigned short cmd = 0x180|(location&0x7f);
+ unsigned short rval = 0,wval = EC_CS|i586_RST;
+ int i;
+
+ outb(EC_CS|i586_RST,ioaddr+EEPROM_Ctrl);
+ for (i=0x100 ; i ; i>>=1 )
+ {
+ if (cmd&i)
+ wval |= EC_Wr;
+ else
+ wval &= ~EC_Wr;
+
+ outb(wval,ioaddr+EEPROM_Ctrl);
+ outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ outb(wval,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ }
+ wval &= ~EC_Wr;
+ outb(wval,ioaddr+EEPROM_Ctrl);
+ for (i=0x8000 ; i ; i>>=1 )
+ {
+ outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ if (inb(ioaddr+EEPROM_Ctrl)&EC_Rd)
+ rval |= i;
+ outb(wval,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ }
+ wval &= ~EC_CS;
+ outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ outb(wval,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ return rval;
+}
+
+/*
+ * Reap tx buffers and return last transmit status.
+ * if ==0 then either:
+ * a) we're not transmitting anything, so why are we here?
+ * b) we've died.
+ * otherwise, Stat_Busy(return) means we've still got some packets
+ * to transmit, Stat_Done(return) means our buffers should be empty
+ * again
+ */
+
+static unsigned short eexp_hw_lasttxstat(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned short tx_block = lp->tx_reap;
+ unsigned short status;
+
+ if (!netif_queue_stopped(dev) && lp->tx_head==lp->tx_reap)
+ return 0x0000;
+
+ do
+ {
+ outw(tx_block & ~31, dev->base_addr + SM_PTR);
+ status = inw(dev->base_addr + SHADOW(tx_block));
+ if (!Stat_Done(status))
+ {
+ lp->tx_link = tx_block;
+ return status;
+ }
+ else
+ {
+ lp->last_tx_restart = 0;
+ dev->stats.collisions += Stat_NoColl(status);
+ if (!Stat_OK(status))
+ {
+ char *whatsup = NULL;
+ dev->stats.tx_errors++;
+ if (Stat_Abort(status))
+ dev->stats.tx_aborted_errors++;
+ if (Stat_TNoCar(status)) {
+ whatsup = "aborted, no carrier";
+ dev->stats.tx_carrier_errors++;
+ }
+ if (Stat_TNoCTS(status)) {
+ whatsup = "aborted, lost CTS";
+ dev->stats.tx_carrier_errors++;
+ }
+ if (Stat_TNoDMA(status)) {
+ whatsup = "FIFO underran";
+ dev->stats.tx_fifo_errors++;
+ }
+ if (Stat_TXColl(status)) {
+ whatsup = "aborted, too many collisions";
+ dev->stats.tx_aborted_errors++;
+ }
+ if (whatsup)
+ printk(KERN_INFO "%s: transmit %s\n",
+ dev->name, whatsup);
+ }
+ else
+ dev->stats.tx_packets++;
+ }
+ if (tx_block == TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE))
+ lp->tx_reap = tx_block = TX_BUF_START;
+ else
+ lp->tx_reap = tx_block += TX_BUF_SIZE;
+ netif_wake_queue(dev);
+ }
+ while (lp->tx_reap != lp->tx_head);
+
+ lp->tx_link = lp->tx_tail + 0x08;
+
+ return status;
+}
+
+/*
+ * This should never happen. It is called when some higher routine detects
+ * that the CU has stopped, to try to restart it from the last packet we knew
+ * we were working on, or the idle loop if we had finished for the time.
+ */
+
+static void eexp_hw_txrestart(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned short ioaddr = dev->base_addr;
+
+ lp->last_tx_restart = lp->tx_link;
+ scb_wrcbl(dev, lp->tx_link);
+ scb_command(dev, SCB_CUstart);
+ outb(0,ioaddr+SIGNAL_CA);
+
+ {
+ unsigned short boguscount=50,failcount=5;
+ while (!scb_status(dev))
+ {
+ if (!--boguscount)
+ {
+ if (--failcount)
+ {
+ printk(KERN_WARNING "%s: CU start timed out, status %04x, cmd %04x\n", dev->name, scb_status(dev), scb_rdcmd(dev));
+ scb_wrcbl(dev, lp->tx_link);
+ scb_command(dev, SCB_CUstart);
+ outb(0,ioaddr+SIGNAL_CA);
+ boguscount = 100;
+ }
+ else
+ {
+ printk(KERN_WARNING "%s: Failed to restart CU, resetting board...\n",dev->name);
+ eexp_hw_init586(dev);
+ netif_wake_queue(dev);
+ return;
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Writes down the list of transmit buffers into card memory. Each
+ * entry consists of an 82586 transmit command, followed by a jump
+ * pointing to itself. When we want to transmit a packet, we write
+ * the data into the appropriate transmit buffer and then modify the
+ * preceding jump to point at the new transmit command. This means that
+ * the 586 command unit is continuously active.
+ */
+
+static void eexp_hw_txinit(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned short tx_block = TX_BUF_START;
+ unsigned short curtbuf;
+ unsigned short ioaddr = dev->base_addr;
+
+ for ( curtbuf=0 ; curtbuf<lp->num_tx_bufs ; curtbuf++ )
+ {
+ outw(tx_block, ioaddr + WRITE_PTR);
+
+ outw(0x0000, ioaddr + DATAPORT);
+ outw(Cmd_INT|Cmd_Xmit, ioaddr + DATAPORT);
+ outw(tx_block+0x08, ioaddr + DATAPORT);
+ outw(tx_block+0x0e, ioaddr + DATAPORT);
+
+ outw(0x0000, ioaddr + DATAPORT);
+ outw(0x0000, ioaddr + DATAPORT);
+ outw(tx_block+0x08, ioaddr + DATAPORT);
+
+ outw(0x8000, ioaddr + DATAPORT);
+ outw(-1, ioaddr + DATAPORT);
+ outw(tx_block+0x16, ioaddr + DATAPORT);
+ outw(0x0000, ioaddr + DATAPORT);
+
+ tx_block += TX_BUF_SIZE;
+ }
+ lp->tx_head = TX_BUF_START;
+ lp->tx_reap = TX_BUF_START;
+ lp->tx_tail = tx_block - TX_BUF_SIZE;
+ lp->tx_link = lp->tx_tail + 0x08;
+ lp->rx_buf_start = tx_block;
+
+}
+
+/*
+ * Write the circular list of receive buffer descriptors to card memory.
+ * The end of the list isn't marked, which means that the 82586 receive
+ * unit will loop until buffers become available (this avoids it giving us
+ * "out of resources" messages).
+ */
+
+static void eexp_hw_rxinit(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned short rx_block = lp->rx_buf_start;
+ unsigned short ioaddr = dev->base_addr;
+
+ lp->num_rx_bufs = 0;
+ lp->rx_first = lp->rx_ptr = rx_block;
+ do
+ {
+ lp->num_rx_bufs++;
+
+ outw(rx_block, ioaddr + WRITE_PTR);
+
+ outw(0, ioaddr + DATAPORT); outw(0, ioaddr+DATAPORT);
+ outw(rx_block + RX_BUF_SIZE, ioaddr+DATAPORT);
+ outw(0xffff, ioaddr+DATAPORT);
+
+ outw(0x0000, ioaddr+DATAPORT);
+ outw(0xdead, ioaddr+DATAPORT);
+ outw(0xdead, ioaddr+DATAPORT);
+ outw(0xdead, ioaddr+DATAPORT);
+ outw(0xdead, ioaddr+DATAPORT);
+ outw(0xdead, ioaddr+DATAPORT);
+ outw(0xdead, ioaddr+DATAPORT);
+
+ outw(0x0000, ioaddr+DATAPORT);
+ outw(rx_block + RX_BUF_SIZE + 0x16, ioaddr+DATAPORT);
+ outw(rx_block + 0x20, ioaddr+DATAPORT);
+ outw(0, ioaddr+DATAPORT);
+ outw(RX_BUF_SIZE-0x20, ioaddr+DATAPORT);
+
+ lp->rx_last = rx_block;
+ rx_block += RX_BUF_SIZE;
+ } while (rx_block <= lp->rx_buf_end-RX_BUF_SIZE);
+
+
+ /* Make first Rx frame descriptor point to first Rx buffer
+ descriptor */
+ outw(lp->rx_first + 6, ioaddr+WRITE_PTR);
+ outw(lp->rx_first + 0x16, ioaddr+DATAPORT);
+
+ /* Close Rx frame descriptor ring */
+ outw(lp->rx_last + 4, ioaddr+WRITE_PTR);
+ outw(lp->rx_first, ioaddr+DATAPORT);
+
+ /* Close Rx buffer descriptor ring */
+ outw(lp->rx_last + 0x16 + 2, ioaddr+WRITE_PTR);
+ outw(lp->rx_first + 0x16, ioaddr+DATAPORT);
+
+}
+
+/*
+ * Un-reset the 586, and start the configuration sequence. We don't wait for
+ * this to finish, but allow the interrupt handler to start the CU and RU for
+ * us. We can't start the receive/transmission system up before we know that
+ * the hardware is configured correctly.
+ */
+
+static void eexp_hw_init586(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned short ioaddr = dev->base_addr;
+ int i;
+
+#if NET_DEBUG > 6
+ printk("%s: eexp_hw_init586()\n", dev->name);
+#endif
+
+ lp->started = 0;
+
+ set_loopback(dev);
+
+ outb(SIRQ_dis|irqrmap[dev->irq],ioaddr+SET_IRQ);
+
+ /* Download the startup code */
+ outw(lp->rx_buf_end & ~31, ioaddr + SM_PTR);
+ outw(lp->width?0x0001:0x0000, ioaddr + 0x8006);
+ outw(0x0000, ioaddr + 0x8008);
+ outw(0x0000, ioaddr + 0x800a);
+ outw(0x0000, ioaddr + 0x800c);
+ outw(0x0000, ioaddr + 0x800e);
+
+ for (i = 0; i < ARRAY_SIZE(start_code) * 2; i+=32) {
+ int j;
+ outw(i, ioaddr + SM_PTR);
+ for (j = 0; j < 16 && (i+j)/2 < ARRAY_SIZE(start_code); j+=2)
+ outw(start_code[(i+j)/2],
+ ioaddr+0x4000+j);
+ for (j = 0; j < 16 && (i+j+16)/2 < ARRAY_SIZE(start_code); j+=2)
+ outw(start_code[(i+j+16)/2],
+ ioaddr+0x8000+j);
+ }
+
+ /* Do we want promiscuous mode or multicast? */
+ outw(CONF_PROMISC & ~31, ioaddr+SM_PTR);
+ i = inw(ioaddr+SHADOW(CONF_PROMISC));
+ outw((dev->flags & IFF_PROMISC)?(i|1):(i & ~1),
+ ioaddr+SHADOW(CONF_PROMISC));
+ lp->was_promisc = dev->flags & IFF_PROMISC;
+#if 0
+ eexp_setup_filter(dev);
+#endif
+
+ /* Write our hardware address */
+ outw(CONF_HWADDR & ~31, ioaddr+SM_PTR);
+ outw(((unsigned short *)dev->dev_addr)[0], ioaddr+SHADOW(CONF_HWADDR));
+ outw(((unsigned short *)dev->dev_addr)[1],
+ ioaddr+SHADOW(CONF_HWADDR+2));
+ outw(((unsigned short *)dev->dev_addr)[2],
+ ioaddr+SHADOW(CONF_HWADDR+4));
+
+ eexp_hw_txinit(dev);
+ eexp_hw_rxinit(dev);
+
+ outb(0,ioaddr+EEPROM_Ctrl);
+ mdelay(5);
+
+ scb_command(dev, 0xf000);
+ outb(0,ioaddr+SIGNAL_CA);
+
+ outw(0, ioaddr+SM_PTR);
+
+ {
+ unsigned short rboguscount=50,rfailcount=5;
+ while (inw(ioaddr+0x4000))
+ {
+ if (!--rboguscount)
+ {
+ printk(KERN_WARNING "%s: i82586 reset timed out, kicking...\n",
+ dev->name);
+ scb_command(dev, 0);
+ outb(0,ioaddr+SIGNAL_CA);
+ rboguscount = 100;
+ if (!--rfailcount)
+ {
+ printk(KERN_WARNING "%s: i82586 not responding, giving up.\n",
+ dev->name);
+ return;
+ }
+ }
+ }
+ }
+
+ scb_wrcbl(dev, CONF_LINK);
+ scb_command(dev, 0xf000|SCB_CUstart);
+ outb(0,ioaddr+SIGNAL_CA);
+
+ {
+ unsigned short iboguscount=50,ifailcount=5;
+ while (!scb_status(dev))
+ {
+ if (!--iboguscount)
+ {
+ if (--ifailcount)
+ {
+ printk(KERN_WARNING "%s: i82586 initialization timed out, status %04x, cmd %04x\n",
+ dev->name, scb_status(dev), scb_rdcmd(dev));
+ scb_wrcbl(dev, CONF_LINK);
+ scb_command(dev, 0xf000|SCB_CUstart);
+ outb(0,ioaddr+SIGNAL_CA);
+ iboguscount = 100;
+ }
+ else
+ {
+ printk(KERN_WARNING "%s: Failed to initialize i82586, giving up.\n",dev->name);
+ return;
+ }
+ }
+ }
+ }
+
+ clear_loopback(dev);
+ outb(SIRQ_en|irqrmap[dev->irq],ioaddr+SET_IRQ);
+
+ lp->init_time = jiffies;
+#if NET_DEBUG > 6
+ printk("%s: leaving eexp_hw_init586()\n", dev->name);
+#endif
+}
+
+static void eexp_setup_filter(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ unsigned short ioaddr = dev->base_addr;
+ int count = netdev_mc_count(dev);
+ int i;
+ if (count > 8) {
+ printk(KERN_INFO "%s: too many multicast addresses (%d)\n",
+ dev->name, count);
+ count = 8;
+ }
+
+ outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR);
+ outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST));
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned short *data = (unsigned short *) ha->addr;
+
+ if (i == count)
+ break;
+ outw((CONF_MULTICAST+(6*i)) & ~31, ioaddr+SM_PTR);
+ outw(data[0], ioaddr+SHADOW(CONF_MULTICAST+(6*i)));
+ outw((CONF_MULTICAST+(6*i)+2) & ~31, ioaddr+SM_PTR);
+ outw(data[1], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+2));
+ outw((CONF_MULTICAST+(6*i)+4) & ~31, ioaddr+SM_PTR);
+ outw(data[2], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+4));
+ i++;
+ }
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+static void
+eexp_set_multicast(struct net_device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+ struct net_local *lp = netdev_priv(dev);
+ int kick = 0, i;
+ if ((dev->flags & IFF_PROMISC) != lp->was_promisc) {
+ outw(CONF_PROMISC & ~31, ioaddr+SM_PTR);
+ i = inw(ioaddr+SHADOW(CONF_PROMISC));
+ outw((dev->flags & IFF_PROMISC)?(i|1):(i & ~1),
+ ioaddr+SHADOW(CONF_PROMISC));
+ lp->was_promisc = dev->flags & IFF_PROMISC;
+ kick = 1;
+ }
+ if (!(dev->flags & IFF_PROMISC)) {
+ eexp_setup_filter(dev);
+ if (lp->old_mc_count != netdev_mc_count(dev)) {
+ kick = 1;
+ lp->old_mc_count = netdev_mc_count(dev);
+ }
+ }
+ if (kick) {
+ unsigned long oj;
+ scb_command(dev, SCB_CUsuspend);
+ outb(0, ioaddr+SIGNAL_CA);
+ outb(0, ioaddr+SIGNAL_CA);
+#if 0
+ printk("%s: waiting for CU to go suspended\n", dev->name);
+#endif
+ oj = jiffies;
+ while ((SCB_CUstat(scb_status(dev)) == 2) &&
+ (time_before(jiffies, oj + 2000)));
+ if (SCB_CUstat(scb_status(dev)) == 2)
+ printk("%s: warning, CU didn't stop\n", dev->name);
+ lp->started &= ~(STARTED_CU);
+ scb_wrcbl(dev, CONF_LINK);
+ scb_command(dev, SCB_CUstart);
+ outb(0, ioaddr+SIGNAL_CA);
+ }
+}
+
+
+/*
+ * MODULE stuff
+ */
+
+#ifdef MODULE
+
+#define EEXP_MAX_CARDS 4 /* max number of cards to support */
+
+static struct net_device *dev_eexp[EEXP_MAX_CARDS];
+static int irq[EEXP_MAX_CARDS];
+static int io[EEXP_MAX_CARDS];
+
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(io, "EtherExpress 16 I/O base address(es)");
+MODULE_PARM_DESC(irq, "EtherExpress 16 IRQ number(s)");
+MODULE_LICENSE("GPL");
+
+
+/* Ideally the user would give us io=, irq= for every card. If any parameters
+ * are specified, we verify and then use them. If no parameters are given, we
+ * autoprobe for one card only.
+ */
+int __init init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < EEXP_MAX_CARDS; this_dev++) {
+ dev = alloc_etherdev(sizeof(struct net_local));
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ if (io[this_dev] == 0) {
+ if (this_dev)
+ break;
+ printk(KERN_NOTICE "eexpress.c: Module autoprobe not recommended, give io=xx.\n");
+ }
+ if (do_express_probe(dev) == 0) {
+ dev_eexp[this_dev] = dev;
+ found++;
+ continue;
+ }
+ printk(KERN_WARNING "eexpress.c: Failed to register card at 0x%x.\n", io[this_dev]);
+ free_netdev(dev);
+ break;
+ }
+ if (found)
+ return 0;
+ return -ENXIO;
+}
+
+void __exit cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < EEXP_MAX_CARDS; this_dev++) {
+ struct net_device *dev = dev_eexp[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif
+
+/*
+ * Local Variables:
+ * c-file-style: "linux"
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/net/ethernet/i825xx/eexpress.h b/drivers/net/ethernet/i825xx/eexpress.h
new file mode 100644
index 000000000000..dc9c6ea289e9
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/eexpress.h
@@ -0,0 +1,179 @@
+/*
+ * eexpress.h: Intel EtherExpress16 defines
+ */
+
+/*
+ * EtherExpress card register addresses
+ * as offsets from the base IO region (dev->base_addr)
+ */
+
+#define DATAPORT 0x0000
+#define WRITE_PTR 0x0002
+#define READ_PTR 0x0004
+#define SIGNAL_CA 0x0006
+#define SET_IRQ 0x0007
+#define SM_PTR 0x0008
+#define MEM_Dec 0x000a
+#define MEM_Ctrl 0x000b
+#define MEM_Page_Ctrl 0x000c
+#define Config 0x000d
+#define EEPROM_Ctrl 0x000e
+#define ID_PORT 0x000f
+#define MEM_ECtrl 0x000f
+
+/*
+ * card register defines
+ */
+
+/* SET_IRQ */
+#define SIRQ_en 0x08
+#define SIRQ_dis 0x00
+
+/* EEPROM_Ctrl */
+#define EC_Clk 0x01
+#define EC_CS 0x02
+#define EC_Wr 0x04
+#define EC_Rd 0x08
+#define ASIC_RST 0x40
+#define i586_RST 0x80
+
+#define eeprom_delay() { udelay(40); }
+
+/*
+ * i82586 Memory Configuration
+ */
+
+/* (System Configuration Pointer) System start up block, read after 586_RST */
+#define SCP_START 0xfff6
+
+/* Intermediate System Configuration Pointer */
+#define ISCP_START 0x0000
+
+/* System Command Block */
+#define SCB_START 0x0008
+
+/* Start of buffer region. Everything before this is used for control
+ * structures and the CU configuration program. The memory layout is
+ * determined in eexp_hw_probe(), once we know how much memory is
+ * available on the card.
+ */
+
+#define TX_BUF_START 0x0100
+
+#define TX_BUF_SIZE ((24+ETH_FRAME_LEN+31)&~0x1f)
+#define RX_BUF_SIZE ((32+ETH_FRAME_LEN+31)&~0x1f)
+
+/*
+ * SCB defines
+ */
+
+/* these functions take the SCB status word and test the relevant status bit */
+#define SCB_complete(s) (((s) & 0x8000) != 0)
+#define SCB_rxdframe(s) (((s) & 0x4000) != 0)
+#define SCB_CUdead(s) (((s) & 0x2000) != 0)
+#define SCB_RUdead(s) (((s) & 0x1000) != 0)
+#define SCB_ack(s) ((s) & 0xf000)
+
+/* Command unit status: 0=idle, 1=suspended, 2=active */
+#define SCB_CUstat(s) (((s)&0x0300)>>8)
+
+/* Receive unit status: 0=idle, 1=suspended, 2=out of resources, 4=ready */
+#define SCB_RUstat(s) (((s)&0x0070)>>4)
+
+/* SCB commands */
+#define SCB_CUnop 0x0000
+#define SCB_CUstart 0x0100
+#define SCB_CUresume 0x0200
+#define SCB_CUsuspend 0x0300
+#define SCB_CUabort 0x0400
+#define SCB_resetchip 0x0080
+
+#define SCB_RUnop 0x0000
+#define SCB_RUstart 0x0010
+#define SCB_RUresume 0x0020
+#define SCB_RUsuspend 0x0030
+#define SCB_RUabort 0x0040
+
+/*
+ * Command block defines
+ */
+
+#define Stat_Done(s) (((s) & 0x8000) != 0)
+#define Stat_Busy(s) (((s) & 0x4000) != 0)
+#define Stat_OK(s) (((s) & 0x2000) != 0)
+#define Stat_Abort(s) (((s) & 0x1000) != 0)
+#define Stat_STFail (((s) & 0x0800) != 0)
+#define Stat_TNoCar(s) (((s) & 0x0400) != 0)
+#define Stat_TNoCTS(s) (((s) & 0x0200) != 0)
+#define Stat_TNoDMA(s) (((s) & 0x0100) != 0)
+#define Stat_TDefer(s) (((s) & 0x0080) != 0)
+#define Stat_TColl(s) (((s) & 0x0040) != 0)
+#define Stat_TXColl(s) (((s) & 0x0020) != 0)
+#define Stat_NoColl(s) ((s) & 0x000f)
+
+/* Cmd_END will end AFTER the command if this is the first
+ * command block after an SCB_CUstart, but BEFORE the command
+ * for all subsequent commands. Best strategy is to place
+ * Cmd_INT on the last command in the sequence, followed by a
+ * dummy Cmd_Nop with Cmd_END after this.
+ */
+
+#define Cmd_END 0x8000
+#define Cmd_SUS 0x4000
+#define Cmd_INT 0x2000
+
+#define Cmd_Nop 0x0000
+#define Cmd_SetAddr 0x0001
+#define Cmd_Config 0x0002
+#define Cmd_MCast 0x0003
+#define Cmd_Xmit 0x0004
+#define Cmd_TDR 0x0005
+#define Cmd_Dump 0x0006
+#define Cmd_Diag 0x0007
+
+
+/*
+ * Frame Descriptor (Receive block) defines
+ */
+
+#define FD_Done(s) (((s) & 0x8000) != 0)
+#define FD_Busy(s) (((s) & 0x4000) != 0)
+#define FD_OK(s) (((s) & 0x2000) != 0)
+
+#define FD_CRC(s) (((s) & 0x0800) != 0)
+#define FD_Align(s) (((s) & 0x0400) != 0)
+#define FD_Resrc(s) (((s) & 0x0200) != 0)
+#define FD_DMA(s) (((s) & 0x0100) != 0)
+#define FD_Short(s) (((s) & 0x0080) != 0)
+#define FD_NoEOF(s) (((s) & 0x0040) != 0)
+
+struct rfd_header {
+ volatile unsigned long flags;
+ volatile unsigned short link;
+ volatile unsigned short rbd_offset;
+ volatile unsigned short dstaddr1;
+ volatile unsigned short dstaddr2;
+ volatile unsigned short dstaddr3;
+ volatile unsigned short srcaddr1;
+ volatile unsigned short srcaddr2;
+ volatile unsigned short srcaddr3;
+ volatile unsigned short length;
+
+ /* This is actually a Receive Buffer Descriptor. The way we
+ * arrange memory means that an RBD always follows the RFD that
+ * points to it, so they might as well be in the same structure.
+ */
+ volatile unsigned short actual_count;
+ volatile unsigned short next_rbd;
+ volatile unsigned short buf_addr1;
+ volatile unsigned short buf_addr2;
+ volatile unsigned short size;
+};
+
+/* Returned data from the Time Domain Reflectometer */
+
+#define TDR_LINKOK (1<<15)
+#define TDR_XCVRPROBLEM (1<<14)
+#define TDR_OPEN (1<<13)
+#define TDR_SHORT (1<<12)
+#define TDR_TIME 0x7ff
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
new file mode 100644
index 000000000000..42e90a97c7a5
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -0,0 +1,1094 @@
+/*
+ * linux/drivers/acorn/net/ether1.c
+ *
+ * Copyright (C) 1996-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Acorn ether1 driver (82586 chip) for Acorn machines
+ *
+ * We basically keep two queues in the cards memory - one for transmit
+ * and one for receive. Each has a head and a tail. The head is where
+ * we/the chip adds packets to be transmitted/received, and the tail
+ * is where the transmitter has got to/where the receiver will stop.
+ * Both of these queues are circular, and since the chip is running
+ * all the time, we have to be careful when we modify the pointers etc
+ * so that the buffer memory contents is valid all the time.
+ *
+ * Change log:
+ * 1.00 RMK Released
+ * 1.01 RMK 19/03/1996 Transfers the last odd byte onto/off of the card now.
+ * 1.02 RMK 25/05/1997 Added code to restart RU if it goes not ready
+ * 1.03 RMK 14/09/1997 Cleaned up the handling of a reset during the TX interrupt.
+ * Should prevent lockup.
+ * 1.04 RMK 17/09/1997 Added more info when initialsation of chip goes wrong.
+ * TDR now only reports failure when chip reports non-zero
+ * TDR time-distance.
+ * 1.05 RMK 31/12/1997 Removed calls to dev_tint for 2.1
+ * 1.06 RMK 10/02/2000 Updated for 2.3.43
+ * 1.07 RMK 13/05/2000 Updated for 2.3.99-pre8
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/ecard.h>
+
+#define __ETHER1_C
+#include "ether1.h"
+
+static unsigned int net_debug = NET_DEBUG;
+
+#define BUFFER_SIZE 0x10000
+#define TX_AREA_START 0x00100
+#define TX_AREA_END 0x05000
+#define RX_AREA_START 0x05000
+#define RX_AREA_END 0x0fc00
+
+static int ether1_open(struct net_device *dev);
+static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t ether1_interrupt(int irq, void *dev_id);
+static int ether1_close(struct net_device *dev);
+static void ether1_setmulticastlist(struct net_device *dev);
+static void ether1_timeout(struct net_device *dev);
+
+/* ------------------------------------------------------------------------- */
+
+static char version[] __devinitdata = "ether1 ethernet driver (c) 2000 Russell King v1.07\n";
+
+#define BUS_16 16
+#define BUS_8 8
+
+/* ------------------------------------------------------------------------- */
+
+#define DISABLEIRQS 1
+#define NORMALIRQS 0
+
+#define ether1_readw(dev, addr, type, offset, svflgs) ether1_inw_p (dev, addr + (int)(&((type *)0)->offset), svflgs)
+#define ether1_writew(dev, val, addr, type, offset, svflgs) ether1_outw_p (dev, val, addr + (int)(&((type *)0)->offset), svflgs)
+
+static inline unsigned short
+ether1_inw_p (struct net_device *dev, int addr, int svflgs)
+{
+ unsigned long flags;
+ unsigned short ret;
+
+ if (svflgs)
+ local_irq_save (flags);
+
+ writeb(addr >> 12, REG_PAGE);
+ ret = readw(ETHER1_RAM + ((addr & 4095) << 1));
+ if (svflgs)
+ local_irq_restore (flags);
+ return ret;
+}
+
+static inline void
+ether1_outw_p (struct net_device *dev, unsigned short val, int addr, int svflgs)
+{
+ unsigned long flags;
+
+ if (svflgs)
+ local_irq_save (flags);
+
+ writeb(addr >> 12, REG_PAGE);
+ writew(val, ETHER1_RAM + ((addr & 4095) << 1));
+ if (svflgs)
+ local_irq_restore (flags);
+}
+
+/*
+ * Some inline assembler to allow fast transfers on to/off of the card.
+ * Since this driver depends on some features presented by the ARM
+ * specific architecture, and that you can't configure this driver
+ * without specifiing ARM mode, this is not a problem.
+ *
+ * This routine is essentially an optimised memcpy from the card's
+ * onboard RAM to kernel memory.
+ */
+static void
+ether1_writebuffer (struct net_device *dev, void *data, unsigned int start, unsigned int length)
+{
+ unsigned int page, thislen, offset;
+ void __iomem *addr;
+
+ offset = start & 4095;
+ page = start >> 12;
+ addr = ETHER1_RAM + (offset << 1);
+
+ if (offset + length > 4096)
+ thislen = 4096 - offset;
+ else
+ thislen = length;
+
+ do {
+ int used;
+
+ writeb(page, REG_PAGE);
+ length -= thislen;
+
+ __asm__ __volatile__(
+ "subs %3, %3, #2\n\
+ bmi 2f\n\
+1: ldr %0, [%1], #2\n\
+ mov %0, %0, lsl #16\n\
+ orr %0, %0, %0, lsr #16\n\
+ str %0, [%2], #4\n\
+ subs %3, %3, #2\n\
+ bmi 2f\n\
+ ldr %0, [%1], #2\n\
+ mov %0, %0, lsl #16\n\
+ orr %0, %0, %0, lsr #16\n\
+ str %0, [%2], #4\n\
+ subs %3, %3, #2\n\
+ bmi 2f\n\
+ ldr %0, [%1], #2\n\
+ mov %0, %0, lsl #16\n\
+ orr %0, %0, %0, lsr #16\n\
+ str %0, [%2], #4\n\
+ subs %3, %3, #2\n\
+ bmi 2f\n\
+ ldr %0, [%1], #2\n\
+ mov %0, %0, lsl #16\n\
+ orr %0, %0, %0, lsr #16\n\
+ str %0, [%2], #4\n\
+ subs %3, %3, #2\n\
+ bpl 1b\n\
+2: adds %3, %3, #1\n\
+ ldreqb %0, [%1]\n\
+ streqb %0, [%2]"
+ : "=&r" (used), "=&r" (data)
+ : "r" (addr), "r" (thislen), "1" (data));
+
+ addr = ETHER1_RAM;
+
+ thislen = length;
+ if (thislen > 4096)
+ thislen = 4096;
+ page++;
+ } while (thislen);
+}
+
+static void
+ether1_readbuffer (struct net_device *dev, void *data, unsigned int start, unsigned int length)
+{
+ unsigned int page, thislen, offset;
+ void __iomem *addr;
+
+ offset = start & 4095;
+ page = start >> 12;
+ addr = ETHER1_RAM + (offset << 1);
+
+ if (offset + length > 4096)
+ thislen = 4096 - offset;
+ else
+ thislen = length;
+
+ do {
+ int used;
+
+ writeb(page, REG_PAGE);
+ length -= thislen;
+
+ __asm__ __volatile__(
+ "subs %3, %3, #2\n\
+ bmi 2f\n\
+1: ldr %0, [%2], #4\n\
+ strb %0, [%1], #1\n\
+ mov %0, %0, lsr #8\n\
+ strb %0, [%1], #1\n\
+ subs %3, %3, #2\n\
+ bmi 2f\n\
+ ldr %0, [%2], #4\n\
+ strb %0, [%1], #1\n\
+ mov %0, %0, lsr #8\n\
+ strb %0, [%1], #1\n\
+ subs %3, %3, #2\n\
+ bmi 2f\n\
+ ldr %0, [%2], #4\n\
+ strb %0, [%1], #1\n\
+ mov %0, %0, lsr #8\n\
+ strb %0, [%1], #1\n\
+ subs %3, %3, #2\n\
+ bmi 2f\n\
+ ldr %0, [%2], #4\n\
+ strb %0, [%1], #1\n\
+ mov %0, %0, lsr #8\n\
+ strb %0, [%1], #1\n\
+ subs %3, %3, #2\n\
+ bpl 1b\n\
+2: adds %3, %3, #1\n\
+ ldreqb %0, [%2]\n\
+ streqb %0, [%1]"
+ : "=&r" (used), "=&r" (data)
+ : "r" (addr), "r" (thislen), "1" (data));
+
+ addr = ETHER1_RAM;
+
+ thislen = length;
+ if (thislen > 4096)
+ thislen = 4096;
+ page++;
+ } while (thislen);
+}
+
+static int __devinit
+ether1_ramtest(struct net_device *dev, unsigned char byte)
+{
+ unsigned char *buffer = kmalloc (BUFFER_SIZE, GFP_KERNEL);
+ int i, ret = BUFFER_SIZE;
+ int max_errors = 15;
+ int bad = -1;
+ int bad_start = 0;
+
+ if (!buffer)
+ return 1;
+
+ memset (buffer, byte, BUFFER_SIZE);
+ ether1_writebuffer (dev, buffer, 0, BUFFER_SIZE);
+ memset (buffer, byte ^ 0xff, BUFFER_SIZE);
+ ether1_readbuffer (dev, buffer, 0, BUFFER_SIZE);
+
+ for (i = 0; i < BUFFER_SIZE; i++) {
+ if (buffer[i] != byte) {
+ if (max_errors >= 0 && bad != buffer[i]) {
+ if (bad != -1)
+ printk ("\n");
+ printk (KERN_CRIT "%s: RAM failed with (%02X instead of %02X) at 0x%04X",
+ dev->name, buffer[i], byte, i);
+ ret = -ENODEV;
+ max_errors --;
+ bad = buffer[i];
+ bad_start = i;
+ }
+ } else {
+ if (bad != -1) {
+ if (bad_start == i - 1)
+ printk ("\n");
+ else
+ printk (" - 0x%04X\n", i - 1);
+ bad = -1;
+ }
+ }
+ }
+
+ if (bad != -1)
+ printk (" - 0x%04X\n", BUFFER_SIZE);
+ kfree (buffer);
+
+ return ret;
+}
+
+static int
+ether1_reset (struct net_device *dev)
+{
+ writeb(CTRL_RST|CTRL_ACK, REG_CONTROL);
+ return BUS_16;
+}
+
+static int __devinit
+ether1_init_2(struct net_device *dev)
+{
+ int i;
+ dev->mem_start = 0;
+
+ i = ether1_ramtest (dev, 0x5a);
+
+ if (i > 0)
+ i = ether1_ramtest (dev, 0x1e);
+
+ if (i <= 0)
+ return -ENODEV;
+
+ dev->mem_end = i;
+ return 0;
+}
+
+/*
+ * These are the structures that are loaded into the ether RAM card to
+ * initialise the 82586
+ */
+
+/* at 0x0100 */
+#define NOP_ADDR (TX_AREA_START)
+#define NOP_SIZE (0x06)
+static nop_t init_nop = {
+ 0,
+ CMD_NOP,
+ NOP_ADDR
+};
+
+/* at 0x003a */
+#define TDR_ADDR (0x003a)
+#define TDR_SIZE (0x08)
+static tdr_t init_tdr = {
+ 0,
+ CMD_TDR | CMD_INTR,
+ NOP_ADDR,
+ 0
+};
+
+/* at 0x002e */
+#define MC_ADDR (0x002e)
+#define MC_SIZE (0x0c)
+static mc_t init_mc = {
+ 0,
+ CMD_SETMULTICAST,
+ TDR_ADDR,
+ 0,
+ { { 0, } }
+};
+
+/* at 0x0022 */
+#define SA_ADDR (0x0022)
+#define SA_SIZE (0x0c)
+static sa_t init_sa = {
+ 0,
+ CMD_SETADDRESS,
+ MC_ADDR,
+ { 0, }
+};
+
+/* at 0x0010 */
+#define CFG_ADDR (0x0010)
+#define CFG_SIZE (0x12)
+static cfg_t init_cfg = {
+ 0,
+ CMD_CONFIG,
+ SA_ADDR,
+ 8,
+ 8,
+ CFG8_SRDY,
+ CFG9_PREAMB8 | CFG9_ADDRLENBUF | CFG9_ADDRLEN(6),
+ 0,
+ 0x60,
+ 0,
+ CFG13_RETRY(15) | CFG13_SLOTH(2),
+ 0,
+};
+
+/* at 0x0000 */
+#define SCB_ADDR (0x0000)
+#define SCB_SIZE (0x10)
+static scb_t init_scb = {
+ 0,
+ SCB_CMDACKRNR | SCB_CMDACKCNA | SCB_CMDACKFR | SCB_CMDACKCX,
+ CFG_ADDR,
+ RX_AREA_START,
+ 0,
+ 0,
+ 0,
+ 0
+};
+
+/* at 0xffee */
+#define ISCP_ADDR (0xffee)
+#define ISCP_SIZE (0x08)
+static iscp_t init_iscp = {
+ 1,
+ SCB_ADDR,
+ 0x0000,
+ 0x0000
+};
+
+/* at 0xfff6 */
+#define SCP_ADDR (0xfff6)
+#define SCP_SIZE (0x0a)
+static scp_t init_scp = {
+ SCP_SY_16BBUS,
+ { 0, 0 },
+ ISCP_ADDR,
+ 0
+};
+
+#define RFD_SIZE (0x16)
+static rfd_t init_rfd = {
+ 0,
+ 0,
+ 0,
+ 0,
+ { 0, },
+ { 0, },
+ 0
+};
+
+#define RBD_SIZE (0x0a)
+static rbd_t init_rbd = {
+ 0,
+ 0,
+ 0,
+ 0,
+ ETH_FRAME_LEN + 8
+};
+
+#define TX_SIZE (0x08)
+#define TBD_SIZE (0x08)
+
+static int
+ether1_init_for_open (struct net_device *dev)
+{
+ int i, status, addr, next, next2;
+ int failures = 0;
+ unsigned long timeout;
+
+ writeb(CTRL_RST|CTRL_ACK, REG_CONTROL);
+
+ for (i = 0; i < 6; i++)
+ init_sa.sa_addr[i] = dev->dev_addr[i];
+
+ /* load data structures into ether1 RAM */
+ ether1_writebuffer (dev, &init_scp, SCP_ADDR, SCP_SIZE);
+ ether1_writebuffer (dev, &init_iscp, ISCP_ADDR, ISCP_SIZE);
+ ether1_writebuffer (dev, &init_scb, SCB_ADDR, SCB_SIZE);
+ ether1_writebuffer (dev, &init_cfg, CFG_ADDR, CFG_SIZE);
+ ether1_writebuffer (dev, &init_sa, SA_ADDR, SA_SIZE);
+ ether1_writebuffer (dev, &init_mc, MC_ADDR, MC_SIZE);
+ ether1_writebuffer (dev, &init_tdr, TDR_ADDR, TDR_SIZE);
+ ether1_writebuffer (dev, &init_nop, NOP_ADDR, NOP_SIZE);
+
+ if (ether1_readw(dev, CFG_ADDR, cfg_t, cfg_command, NORMALIRQS) != CMD_CONFIG) {
+ printk (KERN_ERR "%s: detected either RAM fault or compiler bug\n",
+ dev->name);
+ return 1;
+ }
+
+ /*
+ * setup circularly linked list of { rfd, rbd, buffer }, with
+ * all rfds circularly linked, rbds circularly linked.
+ * First rfd is linked to scp, first rbd is linked to first
+ * rfd. Last rbd has a suspend command.
+ */
+ addr = RX_AREA_START;
+ do {
+ next = addr + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10;
+ next2 = next + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10;
+
+ if (next2 >= RX_AREA_END) {
+ next = RX_AREA_START;
+ init_rfd.rfd_command = RFD_CMDEL | RFD_CMDSUSPEND;
+ priv(dev)->rx_tail = addr;
+ } else
+ init_rfd.rfd_command = 0;
+ if (addr == RX_AREA_START)
+ init_rfd.rfd_rbdoffset = addr + RFD_SIZE;
+ else
+ init_rfd.rfd_rbdoffset = 0;
+ init_rfd.rfd_link = next;
+ init_rbd.rbd_link = next + RFD_SIZE;
+ init_rbd.rbd_bufl = addr + RFD_SIZE + RBD_SIZE;
+
+ ether1_writebuffer (dev, &init_rfd, addr, RFD_SIZE);
+ ether1_writebuffer (dev, &init_rbd, addr + RFD_SIZE, RBD_SIZE);
+ addr = next;
+ } while (next2 < RX_AREA_END);
+
+ priv(dev)->tx_link = NOP_ADDR;
+ priv(dev)->tx_head = NOP_ADDR + NOP_SIZE;
+ priv(dev)->tx_tail = TDR_ADDR;
+ priv(dev)->rx_head = RX_AREA_START;
+
+ /* release reset & give 586 a prod */
+ priv(dev)->resetting = 1;
+ priv(dev)->initialising = 1;
+ writeb(CTRL_RST, REG_CONTROL);
+ writeb(0, REG_CONTROL);
+ writeb(CTRL_CA, REG_CONTROL);
+
+ /* 586 should now unset iscp.busy */
+ timeout = jiffies + HZ/2;
+ while (ether1_readw(dev, ISCP_ADDR, iscp_t, iscp_busy, DISABLEIRQS) == 1) {
+ if (time_after(jiffies, timeout)) {
+ printk (KERN_WARNING "%s: can't initialise 82586: iscp is busy\n", dev->name);
+ return 1;
+ }
+ }
+
+ /* check status of commands that we issued */
+ timeout += HZ/10;
+ while (((status = ether1_readw(dev, CFG_ADDR, cfg_t, cfg_status, DISABLEIRQS))
+ & STAT_COMPLETE) == 0) {
+ if (time_after(jiffies, timeout))
+ break;
+ }
+
+ if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) {
+ printk (KERN_WARNING "%s: can't initialise 82586: config status %04X\n", dev->name, status);
+ printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name,
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS));
+ failures += 1;
+ }
+
+ timeout += HZ/10;
+ while (((status = ether1_readw(dev, SA_ADDR, sa_t, sa_status, DISABLEIRQS))
+ & STAT_COMPLETE) == 0) {
+ if (time_after(jiffies, timeout))
+ break;
+ }
+
+ if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) {
+ printk (KERN_WARNING "%s: can't initialise 82586: set address status %04X\n", dev->name, status);
+ printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name,
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS));
+ failures += 1;
+ }
+
+ timeout += HZ/10;
+ while (((status = ether1_readw(dev, MC_ADDR, mc_t, mc_status, DISABLEIRQS))
+ & STAT_COMPLETE) == 0) {
+ if (time_after(jiffies, timeout))
+ break;
+ }
+
+ if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) {
+ printk (KERN_WARNING "%s: can't initialise 82586: set multicast status %04X\n", dev->name, status);
+ printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name,
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS));
+ failures += 1;
+ }
+
+ timeout += HZ;
+ while (((status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_status, DISABLEIRQS))
+ & STAT_COMPLETE) == 0) {
+ if (time_after(jiffies, timeout))
+ break;
+ }
+
+ if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) {
+ printk (KERN_WARNING "%s: can't tdr (ignored)\n", dev->name);
+ printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name,
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS),
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS));
+ } else {
+ status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_result, DISABLEIRQS);
+ if (status & TDR_XCVRPROB)
+ printk (KERN_WARNING "%s: i/f failed tdr: transceiver problem\n", dev->name);
+ else if ((status & (TDR_SHORT|TDR_OPEN)) && (status & TDR_TIME)) {
+#ifdef FANCY
+ printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d.%d us away\n", dev->name,
+ status & TDR_SHORT ? "short" : "open", (status & TDR_TIME) / 10,
+ (status & TDR_TIME) % 10);
+#else
+ printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d clks away\n", dev->name,
+ status & TDR_SHORT ? "short" : "open", (status & TDR_TIME));
+#endif
+ }
+ }
+
+ if (failures)
+ ether1_reset (dev);
+ return failures ? 1 : 0;
+}
+
+/* ------------------------------------------------------------------------- */
+
+static int
+ether1_txalloc (struct net_device *dev, int size)
+{
+ int start, tail;
+
+ size = (size + 1) & ~1;
+ tail = priv(dev)->tx_tail;
+
+ if (priv(dev)->tx_head + size > TX_AREA_END) {
+ if (tail > priv(dev)->tx_head)
+ return -1;
+ start = TX_AREA_START;
+ if (start + size > tail)
+ return -1;
+ priv(dev)->tx_head = start + size;
+ } else {
+ if (priv(dev)->tx_head < tail && (priv(dev)->tx_head + size) > tail)
+ return -1;
+ start = priv(dev)->tx_head;
+ priv(dev)->tx_head += size;
+ }
+
+ return start;
+}
+
+static int
+ether1_open (struct net_device *dev)
+{
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ printk(KERN_WARNING "%s: invalid ethernet MAC address\n",
+ dev->name);
+ return -EINVAL;
+ }
+
+ if (request_irq(dev->irq, ether1_interrupt, 0, "ether1", dev))
+ return -EAGAIN;
+
+ if (ether1_init_for_open (dev)) {
+ free_irq (dev->irq, dev);
+ return -EAGAIN;
+ }
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static void
+ether1_timeout(struct net_device *dev)
+{
+ printk(KERN_WARNING "%s: transmit timeout, network cable problem?\n",
+ dev->name);
+ printk(KERN_WARNING "%s: resetting device\n", dev->name);
+
+ ether1_reset (dev);
+
+ if (ether1_init_for_open (dev))
+ printk (KERN_ERR "%s: unable to restart interface\n", dev->name);
+
+ dev->stats.tx_errors++;
+ netif_wake_queue(dev);
+}
+
+static int
+ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
+{
+ int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr;
+ unsigned long flags;
+ tx_t tx;
+ tbd_t tbd;
+ nop_t nop;
+
+ if (priv(dev)->restart) {
+ printk(KERN_WARNING "%s: resetting device\n", dev->name);
+
+ ether1_reset(dev);
+
+ if (ether1_init_for_open(dev))
+ printk(KERN_ERR "%s: unable to restart interface\n", dev->name);
+ else
+ priv(dev)->restart = 0;
+ }
+
+ if (skb->len < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ goto out;
+ }
+
+ /*
+ * insert packet followed by a nop
+ */
+ txaddr = ether1_txalloc (dev, TX_SIZE);
+ tbdaddr = ether1_txalloc (dev, TBD_SIZE);
+ dataddr = ether1_txalloc (dev, skb->len);
+ nopaddr = ether1_txalloc (dev, NOP_SIZE);
+
+ tx.tx_status = 0;
+ tx.tx_command = CMD_TX | CMD_INTR;
+ tx.tx_link = nopaddr;
+ tx.tx_tbdoffset = tbdaddr;
+ tbd.tbd_opts = TBD_EOL | skb->len;
+ tbd.tbd_link = I82586_NULL;
+ tbd.tbd_bufl = dataddr;
+ tbd.tbd_bufh = 0;
+ nop.nop_status = 0;
+ nop.nop_command = CMD_NOP;
+ nop.nop_link = nopaddr;
+
+ local_irq_save(flags);
+ ether1_writebuffer (dev, &tx, txaddr, TX_SIZE);
+ ether1_writebuffer (dev, &tbd, tbdaddr, TBD_SIZE);
+ ether1_writebuffer (dev, skb->data, dataddr, skb->len);
+ ether1_writebuffer (dev, &nop, nopaddr, NOP_SIZE);
+ tmp = priv(dev)->tx_link;
+ priv(dev)->tx_link = nopaddr;
+
+ /* now reset the previous nop pointer */
+ ether1_writew(dev, txaddr, tmp, nop_t, nop_link, NORMALIRQS);
+
+ local_irq_restore(flags);
+
+ /* handle transmit */
+
+ /* check to see if we have room for a full sized ether frame */
+ tmp = priv(dev)->tx_head;
+ tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN);
+ priv(dev)->tx_head = tmp;
+ dev_kfree_skb (skb);
+
+ if (tst == -1)
+ netif_stop_queue(dev);
+
+ out:
+ return NETDEV_TX_OK;
+}
+
+static void
+ether1_xmit_done (struct net_device *dev)
+{
+ nop_t nop;
+ int caddr, tst;
+
+ caddr = priv(dev)->tx_tail;
+
+again:
+ ether1_readbuffer (dev, &nop, caddr, NOP_SIZE);
+
+ switch (nop.nop_command & CMD_MASK) {
+ case CMD_TDR:
+ /* special case */
+ if (ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS)
+ != (unsigned short)I82586_NULL) {
+ ether1_writew(dev, SCB_CMDCUCSTART | SCB_CMDRXSTART, SCB_ADDR, scb_t,
+ scb_command, NORMALIRQS);
+ writeb(CTRL_CA, REG_CONTROL);
+ }
+ priv(dev)->tx_tail = NOP_ADDR;
+ return;
+
+ case CMD_NOP:
+ if (nop.nop_link == caddr) {
+ if (priv(dev)->initialising == 0)
+ printk (KERN_WARNING "%s: strange command complete with no tx command!\n", dev->name);
+ else
+ priv(dev)->initialising = 0;
+ return;
+ }
+ if (caddr == nop.nop_link)
+ return;
+ caddr = nop.nop_link;
+ goto again;
+
+ case CMD_TX:
+ if (nop.nop_status & STAT_COMPLETE)
+ break;
+ printk (KERN_ERR "%s: strange command complete without completed command\n", dev->name);
+ priv(dev)->restart = 1;
+ return;
+
+ default:
+ printk (KERN_WARNING "%s: strange command %d complete! (offset %04X)", dev->name,
+ nop.nop_command & CMD_MASK, caddr);
+ priv(dev)->restart = 1;
+ return;
+ }
+
+ while (nop.nop_status & STAT_COMPLETE) {
+ if (nop.nop_status & STAT_OK) {
+ dev->stats.tx_packets++;
+ dev->stats.collisions += (nop.nop_status & STAT_COLLISIONS);
+ } else {
+ dev->stats.tx_errors++;
+
+ if (nop.nop_status & STAT_COLLAFTERTX)
+ dev->stats.collisions++;
+ if (nop.nop_status & STAT_NOCARRIER)
+ dev->stats.tx_carrier_errors++;
+ if (nop.nop_status & STAT_TXLOSTCTS)
+ printk (KERN_WARNING "%s: cts lost\n", dev->name);
+ if (nop.nop_status & STAT_TXSLOWDMA)
+ dev->stats.tx_fifo_errors++;
+ if (nop.nop_status & STAT_COLLEXCESSIVE)
+ dev->stats.collisions += 16;
+ }
+
+ if (nop.nop_link == caddr) {
+ printk (KERN_ERR "%s: tx buffer chaining error: tx command points to itself\n", dev->name);
+ break;
+ }
+
+ caddr = nop.nop_link;
+ ether1_readbuffer (dev, &nop, caddr, NOP_SIZE);
+ if ((nop.nop_command & CMD_MASK) != CMD_NOP) {
+ printk (KERN_ERR "%s: tx buffer chaining error: no nop after tx command\n", dev->name);
+ break;
+ }
+
+ if (caddr == nop.nop_link)
+ break;
+
+ caddr = nop.nop_link;
+ ether1_readbuffer (dev, &nop, caddr, NOP_SIZE);
+ if ((nop.nop_command & CMD_MASK) != CMD_TX) {
+ printk (KERN_ERR "%s: tx buffer chaining error: no tx command after nop\n", dev->name);
+ break;
+ }
+ }
+ priv(dev)->tx_tail = caddr;
+
+ caddr = priv(dev)->tx_head;
+ tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN);
+ priv(dev)->tx_head = caddr;
+ if (tst != -1)
+ netif_wake_queue(dev);
+}
+
+static void
+ether1_recv_done (struct net_device *dev)
+{
+ int status;
+ int nexttail, rbdaddr;
+ rbd_t rbd;
+
+ do {
+ status = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_status, NORMALIRQS);
+ if ((status & RFD_COMPLETE) == 0)
+ break;
+
+ rbdaddr = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_rbdoffset, NORMALIRQS);
+ ether1_readbuffer (dev, &rbd, rbdaddr, RBD_SIZE);
+
+ if ((rbd.rbd_status & (RBD_EOF | RBD_ACNTVALID)) == (RBD_EOF | RBD_ACNTVALID)) {
+ int length = rbd.rbd_status & RBD_ACNT;
+ struct sk_buff *skb;
+
+ length = (length + 1) & ~1;
+ skb = dev_alloc_skb (length + 2);
+
+ if (skb) {
+ skb_reserve (skb, 2);
+
+ ether1_readbuffer (dev, skb_put (skb, length), rbd.rbd_bufl, length);
+
+ skb->protocol = eth_type_trans (skb, dev);
+ netif_rx (skb);
+ dev->stats.rx_packets++;
+ } else
+ dev->stats.rx_dropped++;
+ } else {
+ printk(KERN_WARNING "%s: %s\n", dev->name,
+ (rbd.rbd_status & RBD_EOF) ? "oversized packet" : "acnt not valid");
+ dev->stats.rx_dropped++;
+ }
+
+ nexttail = ether1_readw(dev, priv(dev)->rx_tail, rfd_t, rfd_link, NORMALIRQS);
+ /* nexttail should be rx_head */
+ if (nexttail != priv(dev)->rx_head)
+ printk(KERN_ERR "%s: receiver buffer chaining error (%04X != %04X)\n",
+ dev->name, nexttail, priv(dev)->rx_head);
+ ether1_writew(dev, RFD_CMDEL | RFD_CMDSUSPEND, nexttail, rfd_t, rfd_command, NORMALIRQS);
+ ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_command, NORMALIRQS);
+ ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_status, NORMALIRQS);
+ ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_rbdoffset, NORMALIRQS);
+
+ priv(dev)->rx_tail = nexttail;
+ priv(dev)->rx_head = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_link, NORMALIRQS);
+ } while (1);
+}
+
+static irqreturn_t
+ether1_interrupt (int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ int status;
+
+ status = ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS);
+
+ if (status) {
+ ether1_writew(dev, status & (SCB_STRNR | SCB_STCNA | SCB_STFR | SCB_STCX),
+ SCB_ADDR, scb_t, scb_command, NORMALIRQS);
+ writeb(CTRL_CA | CTRL_ACK, REG_CONTROL);
+ if (status & SCB_STCX) {
+ ether1_xmit_done (dev);
+ }
+ if (status & SCB_STCNA) {
+ if (priv(dev)->resetting == 0)
+ printk (KERN_WARNING "%s: CU went not ready ???\n", dev->name);
+ else
+ priv(dev)->resetting += 1;
+ if (ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS)
+ != (unsigned short)I82586_NULL) {
+ ether1_writew(dev, SCB_CMDCUCSTART, SCB_ADDR, scb_t, scb_command, NORMALIRQS);
+ writeb(CTRL_CA, REG_CONTROL);
+ }
+ if (priv(dev)->resetting == 2)
+ priv(dev)->resetting = 0;
+ }
+ if (status & SCB_STFR) {
+ ether1_recv_done (dev);
+ }
+ if (status & SCB_STRNR) {
+ if (ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS) & SCB_STRXSUSP) {
+ printk (KERN_WARNING "%s: RU went not ready: RU suspended\n", dev->name);
+ ether1_writew(dev, SCB_CMDRXRESUME, SCB_ADDR, scb_t, scb_command, NORMALIRQS);
+ writeb(CTRL_CA, REG_CONTROL);
+ dev->stats.rx_dropped++; /* we suspended due to lack of buffer space */
+ } else
+ printk(KERN_WARNING "%s: RU went not ready: %04X\n", dev->name,
+ ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS));
+ printk (KERN_WARNING "RU ptr = %04X\n", ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset,
+ NORMALIRQS));
+ }
+ } else
+ writeb(CTRL_ACK, REG_CONTROL);
+
+ return IRQ_HANDLED;
+}
+
+static int
+ether1_close (struct net_device *dev)
+{
+ ether1_reset (dev);
+
+ free_irq(dev->irq, dev);
+
+ return 0;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ * num_addrs == -1 Promiscuous mode, receive all packets.
+ * num_addrs == 0 Normal mode, clear multicast list.
+ * num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ * best-effort filtering.
+ */
+static void
+ether1_setmulticastlist (struct net_device *dev)
+{
+}
+
+/* ------------------------------------------------------------------------- */
+
+static void __devinit ether1_banner(void)
+{
+ static unsigned int version_printed = 0;
+
+ if (net_debug && version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+}
+
+static const struct net_device_ops ether1_netdev_ops = {
+ .ndo_open = ether1_open,
+ .ndo_stop = ether1_close,
+ .ndo_start_xmit = ether1_sendpacket,
+ .ndo_set_rx_mode = ether1_setmulticastlist,
+ .ndo_tx_timeout = ether1_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int __devinit
+ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
+{
+ struct net_device *dev;
+ int i, ret = 0;
+
+ ether1_banner();
+
+ ret = ecard_request_resources(ec);
+ if (ret)
+ goto out;
+
+ dev = alloc_etherdev(sizeof(struct ether1_priv));
+ if (!dev) {
+ ret = -ENOMEM;
+ goto release;
+ }
+
+ SET_NETDEV_DEV(dev, &ec->dev);
+
+ dev->irq = ec->irq;
+ priv(dev)->base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
+ if (!priv(dev)->base) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ if ((priv(dev)->bus_type = ether1_reset(dev)) == 0) {
+ ret = -ENODEV;
+ goto free;
+ }
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = readb(IDPROM_ADDRESS + (i << 2));
+
+ if (ether1_init_2(dev)) {
+ ret = -ENODEV;
+ goto free;
+ }
+
+ dev->netdev_ops = &ether1_netdev_ops;
+ dev->watchdog_timeo = 5 * HZ / 100;
+
+ ret = register_netdev(dev);
+ if (ret)
+ goto free;
+
+ printk(KERN_INFO "%s: ether1 in slot %d, %pM\n",
+ dev->name, ec->slot_no, dev->dev_addr);
+
+ ecard_set_drvdata(ec, dev);
+ return 0;
+
+ free:
+ free_netdev(dev);
+ release:
+ ecard_release_resources(ec);
+ out:
+ return ret;
+}
+
+static void __devexit ether1_remove(struct expansion_card *ec)
+{
+ struct net_device *dev = ecard_get_drvdata(ec);
+
+ ecard_set_drvdata(ec, NULL);
+
+ unregister_netdev(dev);
+ free_netdev(dev);
+ ecard_release_resources(ec);
+}
+
+static const struct ecard_id ether1_ids[] = {
+ { MANU_ACORN, PROD_ACORN_ETHER1 },
+ { 0xffff, 0xffff }
+};
+
+static struct ecard_driver ether1_driver = {
+ .probe = ether1_probe,
+ .remove = __devexit_p(ether1_remove),
+ .id_table = ether1_ids,
+ .drv = {
+ .name = "ether1",
+ },
+};
+
+static int __init ether1_init(void)
+{
+ return ecard_register_driver(&ether1_driver);
+}
+
+static void __exit ether1_exit(void)
+{
+ ecard_remove_driver(&ether1_driver);
+}
+
+module_init(ether1_init);
+module_exit(ether1_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/i825xx/ether1.h b/drivers/net/ethernet/i825xx/ether1.h
new file mode 100644
index 000000000000..3a5830ab3dc7
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/ether1.h
@@ -0,0 +1,280 @@
+/*
+ * linux/drivers/acorn/net/ether1.h
+ *
+ * Copyright (C) 1996 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Network driver for Acorn Ether1 cards.
+ */
+
+#ifndef _LINUX_ether1_H
+#define _LINUX_ether1_H
+
+#ifdef __ETHER1_C
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 0
+#endif
+
+#define priv(dev) ((struct ether1_priv *)netdev_priv(dev))
+
+/* Page register */
+#define REG_PAGE (priv(dev)->base + 0x0000)
+
+/* Control register */
+#define REG_CONTROL (priv(dev)->base + 0x0004)
+#define CTRL_RST 0x01
+#define CTRL_LOOPBACK 0x02
+#define CTRL_CA 0x04
+#define CTRL_ACK 0x08
+
+#define ETHER1_RAM (priv(dev)->base + 0x2000)
+
+/* HW address */
+#define IDPROM_ADDRESS (priv(dev)->base + 0x0024)
+
+struct ether1_priv {
+ void __iomem *base;
+ unsigned int tx_link;
+ unsigned int tx_head;
+ volatile unsigned int tx_tail;
+ volatile unsigned int rx_head;
+ volatile unsigned int rx_tail;
+ unsigned char bus_type;
+ unsigned char resetting;
+ unsigned char initialising : 1;
+ unsigned char restart : 1;
+};
+
+#define I82586_NULL (-1)
+
+typedef struct { /* tdr */
+ unsigned short tdr_status;
+ unsigned short tdr_command;
+ unsigned short tdr_link;
+ unsigned short tdr_result;
+#define TDR_TIME (0x7ff)
+#define TDR_SHORT (1 << 12)
+#define TDR_OPEN (1 << 13)
+#define TDR_XCVRPROB (1 << 14)
+#define TDR_LNKOK (1 << 15)
+} tdr_t;
+
+typedef struct { /* transmit */
+ unsigned short tx_status;
+ unsigned short tx_command;
+ unsigned short tx_link;
+ unsigned short tx_tbdoffset;
+} tx_t;
+
+typedef struct { /* tbd */
+ unsigned short tbd_opts;
+#define TBD_CNT (0x3fff)
+#define TBD_EOL (1 << 15)
+ unsigned short tbd_link;
+ unsigned short tbd_bufl;
+ unsigned short tbd_bufh;
+} tbd_t;
+
+typedef struct { /* rfd */
+ unsigned short rfd_status;
+#define RFD_NOEOF (1 << 6)
+#define RFD_FRAMESHORT (1 << 7)
+#define RFD_DMAOVRN (1 << 8)
+#define RFD_NORESOURCES (1 << 9)
+#define RFD_ALIGNERROR (1 << 10)
+#define RFD_CRCERROR (1 << 11)
+#define RFD_OK (1 << 13)
+#define RFD_FDCONSUMED (1 << 14)
+#define RFD_COMPLETE (1 << 15)
+ unsigned short rfd_command;
+#define RFD_CMDSUSPEND (1 << 14)
+#define RFD_CMDEL (1 << 15)
+ unsigned short rfd_link;
+ unsigned short rfd_rbdoffset;
+ unsigned char rfd_dest[6];
+ unsigned char rfd_src[6];
+ unsigned short rfd_len;
+} rfd_t;
+
+typedef struct { /* rbd */
+ unsigned short rbd_status;
+#define RBD_ACNT (0x3fff)
+#define RBD_ACNTVALID (1 << 14)
+#define RBD_EOF (1 << 15)
+ unsigned short rbd_link;
+ unsigned short rbd_bufl;
+ unsigned short rbd_bufh;
+ unsigned short rbd_len;
+} rbd_t;
+
+typedef struct { /* nop */
+ unsigned short nop_status;
+ unsigned short nop_command;
+ unsigned short nop_link;
+} nop_t;
+
+typedef struct { /* set multicast */
+ unsigned short mc_status;
+ unsigned short mc_command;
+ unsigned short mc_link;
+ unsigned short mc_cnt;
+ unsigned char mc_addrs[1][6];
+} mc_t;
+
+typedef struct { /* set address */
+ unsigned short sa_status;
+ unsigned short sa_command;
+ unsigned short sa_link;
+ unsigned char sa_addr[6];
+} sa_t;
+
+typedef struct { /* config command */
+ unsigned short cfg_status;
+ unsigned short cfg_command;
+ unsigned short cfg_link;
+ unsigned char cfg_bytecnt; /* size foll data: 4 - 12 */
+ unsigned char cfg_fifolim; /* FIFO threshold */
+ unsigned char cfg_byte8;
+#define CFG8_SRDY (1 << 6)
+#define CFG8_SAVEBADF (1 << 7)
+ unsigned char cfg_byte9;
+#define CFG9_ADDRLEN(x) (x)
+#define CFG9_ADDRLENBUF (1 << 3)
+#define CFG9_PREAMB2 (0 << 4)
+#define CFG9_PREAMB4 (1 << 4)
+#define CFG9_PREAMB8 (2 << 4)
+#define CFG9_PREAMB16 (3 << 4)
+#define CFG9_ILOOPBACK (1 << 6)
+#define CFG9_ELOOPBACK (1 << 7)
+ unsigned char cfg_byte10;
+#define CFG10_LINPRI(x) (x)
+#define CFG10_ACR(x) (x << 4)
+#define CFG10_BOFMET (1 << 7)
+ unsigned char cfg_ifs;
+ unsigned char cfg_slotl;
+ unsigned char cfg_byte13;
+#define CFG13_SLOTH(x) (x)
+#define CFG13_RETRY(x) (x << 4)
+ unsigned char cfg_byte14;
+#define CFG14_PROMISC (1 << 0)
+#define CFG14_DISBRD (1 << 1)
+#define CFG14_MANCH (1 << 2)
+#define CFG14_TNCRS (1 << 3)
+#define CFG14_NOCRC (1 << 4)
+#define CFG14_CRC16 (1 << 5)
+#define CFG14_BTSTF (1 << 6)
+#define CFG14_FLGPAD (1 << 7)
+ unsigned char cfg_byte15;
+#define CFG15_CSTF(x) (x)
+#define CFG15_ICSS (1 << 3)
+#define CFG15_CDTF(x) (x << 4)
+#define CFG15_ICDS (1 << 7)
+ unsigned short cfg_minfrmlen;
+} cfg_t;
+
+typedef struct { /* scb */
+ unsigned short scb_status; /* status of 82586 */
+#define SCB_STRXMASK (7 << 4) /* Receive unit status */
+#define SCB_STRXIDLE (0 << 4) /* Idle */
+#define SCB_STRXSUSP (1 << 4) /* Suspended */
+#define SCB_STRXNRES (2 << 4) /* No resources */
+#define SCB_STRXRDY (4 << 4) /* Ready */
+#define SCB_STCUMASK (7 << 8) /* Command unit status */
+#define SCB_STCUIDLE (0 << 8) /* Idle */
+#define SCB_STCUSUSP (1 << 8) /* Suspended */
+#define SCB_STCUACTV (2 << 8) /* Active */
+#define SCB_STRNR (1 << 12) /* Receive unit not ready */
+#define SCB_STCNA (1 << 13) /* Command unit not ready */
+#define SCB_STFR (1 << 14) /* Frame received */
+#define SCB_STCX (1 << 15) /* Command completed */
+ unsigned short scb_command; /* Next command */
+#define SCB_CMDRXSTART (1 << 4) /* Start (at rfa_offset) */
+#define SCB_CMDRXRESUME (2 << 4) /* Resume reception */
+#define SCB_CMDRXSUSPEND (3 << 4) /* Suspend reception */
+#define SCB_CMDRXABORT (4 << 4) /* Abort reception */
+#define SCB_CMDCUCSTART (1 << 8) /* Start (at cbl_offset) */
+#define SCB_CMDCUCRESUME (2 << 8) /* Resume execution */
+#define SCB_CMDCUCSUSPEND (3 << 8) /* Suspend execution */
+#define SCB_CMDCUCABORT (4 << 8) /* Abort execution */
+#define SCB_CMDACKRNR (1 << 12) /* Ack RU not ready */
+#define SCB_CMDACKCNA (1 << 13) /* Ack CU not ready */
+#define SCB_CMDACKFR (1 << 14) /* Ack Frame received */
+#define SCB_CMDACKCX (1 << 15) /* Ack Command complete */
+ unsigned short scb_cbl_offset; /* Offset of first command unit */
+ unsigned short scb_rfa_offset; /* Offset of first receive frame area */
+ unsigned short scb_crc_errors; /* Properly aligned frame with CRC error*/
+ unsigned short scb_aln_errors; /* Misaligned frames */
+ unsigned short scb_rsc_errors; /* Frames lost due to no space */
+ unsigned short scb_ovn_errors; /* Frames lost due to slow bus */
+} scb_t;
+
+typedef struct { /* iscp */
+ unsigned short iscp_busy; /* set by CPU before CA */
+ unsigned short iscp_offset; /* offset of SCB */
+ unsigned short iscp_basel; /* base of SCB */
+ unsigned short iscp_baseh;
+} iscp_t;
+
+ /* this address must be 0xfff6 */
+typedef struct { /* scp */
+ unsigned short scp_sysbus; /* bus size */
+#define SCP_SY_16BBUS 0x00
+#define SCP_SY_8BBUS 0x01
+ unsigned short scp_junk[2]; /* junk */
+ unsigned short scp_iscpl; /* lower 16 bits of iscp */
+ unsigned short scp_iscph; /* upper 16 bits of iscp */
+} scp_t;
+
+/* commands */
+#define CMD_NOP 0
+#define CMD_SETADDRESS 1
+#define CMD_CONFIG 2
+#define CMD_SETMULTICAST 3
+#define CMD_TX 4
+#define CMD_TDR 5
+#define CMD_DUMP 6
+#define CMD_DIAGNOSE 7
+
+#define CMD_MASK 7
+
+#define CMD_INTR (1 << 13)
+#define CMD_SUSP (1 << 14)
+#define CMD_EOL (1 << 15)
+
+#define STAT_COLLISIONS (15)
+#define STAT_COLLEXCESSIVE (1 << 5)
+#define STAT_COLLAFTERTX (1 << 6)
+#define STAT_TXDEFERRED (1 << 7)
+#define STAT_TXSLOWDMA (1 << 8)
+#define STAT_TXLOSTCTS (1 << 9)
+#define STAT_NOCARRIER (1 << 10)
+#define STAT_FAIL (1 << 11)
+#define STAT_ABORTED (1 << 12)
+#define STAT_OK (1 << 13)
+#define STAT_BUSY (1 << 14)
+#define STAT_COMPLETE (1 << 15)
+#endif
+#endif
+
+/*
+ * Ether1 card definitions:
+ *
+ * FAST accesses:
+ * +0 Page register
+ * 16 pages
+ * +4 Control
+ * '1' = reset
+ * '2' = loopback
+ * '4' = CA
+ * '8' = int ack
+ *
+ * RAM at address + 0x2000
+ * Pod. Prod id = 3
+ * Words after ID block [base + 8 words]
+ * +0 pcb issue (0x0c and 0xf3 invalid)
+ * +1 - +6 eth hw address
+ */
diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
new file mode 100644
index 000000000000..6eba352c52e0
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/lasi_82596.c
@@ -0,0 +1,238 @@
+/* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
+ munged into HPPA boxen .
+
+ This driver is based upon 82596.c, original credits are below...
+ but there were too many hoops which HP wants jumped through to
+ keep this code in there in a sane manner.
+
+ 3 primary sources of the mess --
+ 1) hppa needs *lots* of cacheline flushing to keep this kind of
+ MMIO running.
+
+ 2) The 82596 needs to see all of its pointers as their physical
+ address. Thus virt_to_bus/bus_to_virt are *everywhere*.
+
+ 3) The implementation HP is using seems to be significantly pickier
+ about when and how the command and RX units are started. some
+ command ordering was changed.
+
+ Examination of the mach driver leads one to believe that there
+ might be a saner way to pull this off... anyone who feels like a
+ full rewrite can be my guest.
+
+ Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
+
+ 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de)
+ 03/02/2000 changes for better/correct(?) cache-flushing (deller)
+*/
+
+/* 82596.c: A generic 82596 ethernet driver for linux. */
+/*
+ Based on Apricot.c
+ Written 1994 by Mark Evans.
+ This driver is for the Apricot 82596 bus-master interface
+
+ Modularised 12/94 Mark Evans
+
+
+ Modified to support the 82596 ethernet chips on 680x0 VME boards.
+ by Richard Hirst <richard@sleepie.demon.co.uk>
+ Renamed to be 82596.c
+
+ 980825: Changed to receive directly in to sk_buffs which are
+ allocated at open() time. Eliminates copy on incoming frames
+ (small ones are still copied). Shared data now held in a
+ non-cached page, so we can run on 68060 in copyback mode.
+
+ TBD:
+ * look at deferring rx frames rather than discarding (as per tulip)
+ * handle tx ring full as per tulip
+ * performance test to tune rx_copybreak
+
+ Most of my modifications relate to the braindead big-endian
+ implementation by Intel. When the i596 is operating in
+ 'big-endian' mode, it thinks a 32 bit value of 0x12345678
+ should be stored as 0x56781234. This is a real pain, when
+ you have linked lists which are shared by the 680x0 and the
+ i596.
+
+ Driver skeleton
+ Written 1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the Director,
+ National Security Agency. This software may only be used and distributed
+ according to the terms of the GNU General Public License as modified by SRC,
+ incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
+
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/pdc.h>
+#include <asm/parisc-device.h>
+
+#define LASI_82596_DRIVER_VERSION "LASI 82596 driver - Revision: 1.30"
+
+#define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/
+#define PA_CPU_PORT_L_ACCESS 4
+#define PA_CHANNEL_ATTENTION 8
+
+#define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
+
+#define DMA_ALLOC dma_alloc_noncoherent
+#define DMA_FREE dma_free_noncoherent
+#define DMA_WBACK(ndev, addr, len) \
+ do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0)
+
+#define DMA_INV(ndev, addr, len) \
+ do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_FROM_DEVICE); } while (0)
+
+#define DMA_WBACK_INV(ndev, addr, len) \
+ do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
+
+#define SYSBUS 0x0000006c;
+
+/* big endian CPU, 82596 "big" endian mode */
+#define SWAP32(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
+#define SWAP16(x) (x)
+
+#include "lib82596.c"
+
+MODULE_AUTHOR("Richard Hirst");
+MODULE_DESCRIPTION("i82596 driver");
+MODULE_LICENSE("GPL");
+module_param(i596_debug, int, 0);
+MODULE_PARM_DESC(i596_debug, "lasi_82596 debug mask");
+
+static inline void ca(struct net_device *dev)
+{
+ gsc_writel(0, dev->base_addr + PA_CHANNEL_ATTENTION);
+}
+
+
+static void mpu_port(struct net_device *dev, int c, dma_addr_t x)
+{
+ struct i596_private *lp = netdev_priv(dev);
+
+ u32 v = (u32) (c) | (u32) (x);
+ u16 a, b;
+
+ if (lp->options & OPT_SWAP_PORT) {
+ a = v >> 16;
+ b = v & 0xffff;
+ } else {
+ a = v & 0xffff;
+ b = v >> 16;
+ }
+
+ gsc_writel(a, dev->base_addr + PA_CPU_PORT_L_ACCESS);
+ udelay(1);
+ gsc_writel(b, dev->base_addr + PA_CPU_PORT_L_ACCESS);
+}
+
+#define LAN_PROM_ADDR 0xF0810000
+
+static int __devinit
+lan_init_chip(struct parisc_device *dev)
+{
+ struct net_device *netdevice;
+ struct i596_private *lp;
+ int retval;
+ int i;
+
+ if (!dev->irq) {
+ printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
+ __FILE__, (unsigned long)dev->hpa.start);
+ return -ENODEV;
+ }
+
+ printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n",
+ (unsigned long)dev->hpa.start, dev->irq);
+
+ netdevice = alloc_etherdev(sizeof(struct i596_private));
+ if (!netdevice)
+ return -ENOMEM;
+ SET_NETDEV_DEV(netdevice, &dev->dev);
+ parisc_set_drvdata (dev, netdevice);
+
+ netdevice->base_addr = dev->hpa.start;
+ netdevice->irq = dev->irq;
+
+ if (pdc_lan_station_id(netdevice->dev_addr, netdevice->base_addr)) {
+ for (i = 0; i < 6; i++) {
+ netdevice->dev_addr[i] = gsc_readb(LAN_PROM_ADDR + i);
+ }
+ printk(KERN_INFO
+ "%s: MAC of HP700 LAN read from EEPROM\n", __FILE__);
+ }
+
+ lp = netdev_priv(netdevice);
+ lp->options = dev->id.sversion == 0x72 ? OPT_SWAP_PORT : 0;
+
+ retval = i82596_probe(netdevice);
+ if (retval) {
+ free_netdev(netdevice);
+ return -ENODEV;
+ }
+ return retval;
+}
+
+static int __devexit lan_remove_chip (struct parisc_device *pdev)
+{
+ struct net_device *dev = parisc_get_drvdata(pdev);
+ struct i596_private *lp = netdev_priv(dev);
+
+ unregister_netdev (dev);
+ DMA_FREE(&pdev->dev, sizeof(struct i596_private),
+ (void *)lp->dma, lp->dma_addr);
+ free_netdev (dev);
+ return 0;
+}
+
+static struct parisc_device_id lan_tbl[] = {
+ { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008a },
+ { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00072 },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(parisc, lan_tbl);
+
+static struct parisc_driver lan_driver = {
+ .name = "lasi_82596",
+ .id_table = lan_tbl,
+ .probe = lan_init_chip,
+ .remove = __devexit_p(lan_remove_chip),
+};
+
+static int __devinit lasi_82596_init(void)
+{
+ printk(KERN_INFO LASI_82596_DRIVER_VERSION "\n");
+ return register_parisc_driver(&lan_driver);
+}
+
+module_init(lasi_82596_init);
+
+static void __exit lasi_82596_exit(void)
+{
+ unregister_parisc_driver(&lan_driver);
+}
+
+module_exit(lasi_82596_exit);
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
new file mode 100644
index 000000000000..3efbd8dbb63d
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -0,0 +1,1412 @@
+/* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
+ munged into HPPA boxen .
+
+ This driver is based upon 82596.c, original credits are below...
+ but there were too many hoops which HP wants jumped through to
+ keep this code in there in a sane manner.
+
+ 3 primary sources of the mess --
+ 1) hppa needs *lots* of cacheline flushing to keep this kind of
+ MMIO running.
+
+ 2) The 82596 needs to see all of its pointers as their physical
+ address. Thus virt_to_bus/bus_to_virt are *everywhere*.
+
+ 3) The implementation HP is using seems to be significantly pickier
+ about when and how the command and RX units are started. some
+ command ordering was changed.
+
+ Examination of the mach driver leads one to believe that there
+ might be a saner way to pull this off... anyone who feels like a
+ full rewrite can be my guest.
+
+ Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
+
+ 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de)
+ 03/02/2000 changes for better/correct(?) cache-flushing (deller)
+*/
+
+/* 82596.c: A generic 82596 ethernet driver for linux. */
+/*
+ Based on Apricot.c
+ Written 1994 by Mark Evans.
+ This driver is for the Apricot 82596 bus-master interface
+
+ Modularised 12/94 Mark Evans
+
+
+ Modified to support the 82596 ethernet chips on 680x0 VME boards.
+ by Richard Hirst <richard@sleepie.demon.co.uk>
+ Renamed to be 82596.c
+
+ 980825: Changed to receive directly in to sk_buffs which are
+ allocated at open() time. Eliminates copy on incoming frames
+ (small ones are still copied). Shared data now held in a
+ non-cached page, so we can run on 68060 in copyback mode.
+
+ TBD:
+ * look at deferring rx frames rather than discarding (as per tulip)
+ * handle tx ring full as per tulip
+ * performance test to tune rx_copybreak
+
+ Most of my modifications relate to the braindead big-endian
+ implementation by Intel. When the i596 is operating in
+ 'big-endian' mode, it thinks a 32 bit value of 0x12345678
+ should be stored as 0x56781234. This is a real pain, when
+ you have linked lists which are shared by the 680x0 and the
+ i596.
+
+ Driver skeleton
+ Written 1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the Director,
+ National Security Agency. This software may only be used and distributed
+ according to the terms of the GNU General Public License as modified by SRC,
+ incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
+
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/gfp.h>
+
+/* DEBUG flags
+ */
+
+#define DEB_INIT 0x0001
+#define DEB_PROBE 0x0002
+#define DEB_SERIOUS 0x0004
+#define DEB_ERRORS 0x0008
+#define DEB_MULTI 0x0010
+#define DEB_TDR 0x0020
+#define DEB_OPEN 0x0040
+#define DEB_RESET 0x0080
+#define DEB_ADDCMD 0x0100
+#define DEB_STATUS 0x0200
+#define DEB_STARTTX 0x0400
+#define DEB_RXADDR 0x0800
+#define DEB_TXADDR 0x1000
+#define DEB_RXFRAME 0x2000
+#define DEB_INTS 0x4000
+#define DEB_STRUCT 0x8000
+#define DEB_ANY 0xffff
+
+
+#define DEB(x, y) if (i596_debug & (x)) { y; }
+
+
+/*
+ * The MPU_PORT command allows direct access to the 82596. With PORT access
+ * the following commands are available (p5-18). The 32-bit port command
+ * must be word-swapped with the most significant word written first.
+ * This only applies to VME boards.
+ */
+#define PORT_RESET 0x00 /* reset 82596 */
+#define PORT_SELFTEST 0x01 /* selftest */
+#define PORT_ALTSCP 0x02 /* alternate SCB address */
+#define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
+
+static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
+
+/* Copy frames shorter than rx_copybreak, otherwise pass on up in
+ * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
+ */
+static int rx_copybreak = 100;
+
+#define PKT_BUF_SZ 1536
+#define MAX_MC_CNT 64
+
+#define ISCP_BUSY 0x0001
+
+#define I596_NULL ((u32)0xffffffff)
+
+#define CMD_EOL 0x8000 /* The last command of the list, stop. */
+#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
+#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
+
+#define CMD_FLEX 0x0008 /* Enable flexible memory model */
+
+enum commands {
+ CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
+ CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
+};
+
+#define STAT_C 0x8000 /* Set to 0 after execution */
+#define STAT_B 0x4000 /* Command being executed */
+#define STAT_OK 0x2000 /* Command executed ok */
+#define STAT_A 0x1000 /* Command aborted */
+
+#define CUC_START 0x0100
+#define CUC_RESUME 0x0200
+#define CUC_SUSPEND 0x0300
+#define CUC_ABORT 0x0400
+#define RX_START 0x0010
+#define RX_RESUME 0x0020
+#define RX_SUSPEND 0x0030
+#define RX_ABORT 0x0040
+
+#define TX_TIMEOUT (HZ/20)
+
+
+struct i596_reg {
+ unsigned short porthi;
+ unsigned short portlo;
+ u32 ca;
+};
+
+#define EOF 0x8000
+#define SIZE_MASK 0x3fff
+
+struct i596_tbd {
+ unsigned short size;
+ unsigned short pad;
+ u32 next;
+ u32 data;
+ u32 cache_pad[5]; /* Total 32 bytes... */
+};
+
+/* The command structure has two 'next' pointers; v_next is the address of
+ * the next command as seen by the CPU, b_next is the address of the next
+ * command as seen by the 82596. The b_next pointer, as used by the 82596
+ * always references the status field of the next command, rather than the
+ * v_next field, because the 82596 is unaware of v_next. It may seem more
+ * logical to put v_next at the end of the structure, but we cannot do that
+ * because the 82596 expects other fields to be there, depending on command
+ * type.
+ */
+
+struct i596_cmd {
+ struct i596_cmd *v_next; /* Address from CPUs viewpoint */
+ unsigned short status;
+ unsigned short command;
+ u32 b_next; /* Address from i596 viewpoint */
+};
+
+struct tx_cmd {
+ struct i596_cmd cmd;
+ u32 tbd;
+ unsigned short size;
+ unsigned short pad;
+ struct sk_buff *skb; /* So we can free it after tx */
+ dma_addr_t dma_addr;
+#ifdef __LP64__
+ u32 cache_pad[6]; /* Total 64 bytes... */
+#else
+ u32 cache_pad[1]; /* Total 32 bytes... */
+#endif
+};
+
+struct tdr_cmd {
+ struct i596_cmd cmd;
+ unsigned short status;
+ unsigned short pad;
+};
+
+struct mc_cmd {
+ struct i596_cmd cmd;
+ short mc_cnt;
+ char mc_addrs[MAX_MC_CNT*6];
+};
+
+struct sa_cmd {
+ struct i596_cmd cmd;
+ char eth_addr[8];
+};
+
+struct cf_cmd {
+ struct i596_cmd cmd;
+ char i596_config[16];
+};
+
+struct i596_rfd {
+ unsigned short stat;
+ unsigned short cmd;
+ u32 b_next; /* Address from i596 viewpoint */
+ u32 rbd;
+ unsigned short count;
+ unsigned short size;
+ struct i596_rfd *v_next; /* Address from CPUs viewpoint */
+ struct i596_rfd *v_prev;
+#ifndef __LP64__
+ u32 cache_pad[2]; /* Total 32 bytes... */
+#endif
+};
+
+struct i596_rbd {
+ /* hardware data */
+ unsigned short count;
+ unsigned short zero1;
+ u32 b_next;
+ u32 b_data; /* Address from i596 viewpoint */
+ unsigned short size;
+ unsigned short zero2;
+ /* driver data */
+ struct sk_buff *skb;
+ struct i596_rbd *v_next;
+ u32 b_addr; /* This rbd addr from i596 view */
+ unsigned char *v_data; /* Address from CPUs viewpoint */
+ /* Total 32 bytes... */
+#ifdef __LP64__
+ u32 cache_pad[4];
+#endif
+};
+
+/* These values as chosen so struct i596_dma fits in one page... */
+
+#define TX_RING_SIZE 32
+#define RX_RING_SIZE 16
+
+struct i596_scb {
+ unsigned short status;
+ unsigned short command;
+ u32 cmd;
+ u32 rfd;
+ u32 crc_err;
+ u32 align_err;
+ u32 resource_err;
+ u32 over_err;
+ u32 rcvdt_err;
+ u32 short_err;
+ unsigned short t_on;
+ unsigned short t_off;
+};
+
+struct i596_iscp {
+ u32 stat;
+ u32 scb;
+};
+
+struct i596_scp {
+ u32 sysbus;
+ u32 pad;
+ u32 iscp;
+};
+
+struct i596_dma {
+ struct i596_scp scp __attribute__((aligned(32)));
+ volatile struct i596_iscp iscp __attribute__((aligned(32)));
+ volatile struct i596_scb scb __attribute__((aligned(32)));
+ struct sa_cmd sa_cmd __attribute__((aligned(32)));
+ struct cf_cmd cf_cmd __attribute__((aligned(32)));
+ struct tdr_cmd tdr_cmd __attribute__((aligned(32)));
+ struct mc_cmd mc_cmd __attribute__((aligned(32)));
+ struct i596_rfd rfds[RX_RING_SIZE] __attribute__((aligned(32)));
+ struct i596_rbd rbds[RX_RING_SIZE] __attribute__((aligned(32)));
+ struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32)));
+ struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32)));
+};
+
+struct i596_private {
+ struct i596_dma *dma;
+ u32 stat;
+ int last_restart;
+ struct i596_rfd *rfd_head;
+ struct i596_rbd *rbd_head;
+ struct i596_cmd *cmd_tail;
+ struct i596_cmd *cmd_head;
+ int cmd_backlog;
+ u32 last_cmd;
+ int next_tx_cmd;
+ int options;
+ spinlock_t lock; /* serialize access to chip */
+ dma_addr_t dma_addr;
+ void __iomem *mpu_port;
+ void __iomem *ca;
+};
+
+static const char init_setup[] =
+{
+ 0x8E, /* length, prefetch on */
+ 0xC8, /* fifo to 8, monitor off */
+ 0x80, /* don't save bad frames */
+ 0x2E, /* No source address insertion, 8 byte preamble */
+ 0x00, /* priority and backoff defaults */
+ 0x60, /* interframe spacing */
+ 0x00, /* slot time LSB */
+ 0xf2, /* slot time and retries */
+ 0x00, /* promiscuous mode */
+ 0x00, /* collision detect */
+ 0x40, /* minimum frame length */
+ 0xff,
+ 0x00,
+ 0x7f /* *multi IA */ };
+
+static int i596_open(struct net_device *dev);
+static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t i596_interrupt(int irq, void *dev_id);
+static int i596_close(struct net_device *dev);
+static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
+static void i596_tx_timeout (struct net_device *dev);
+static void print_eth(unsigned char *buf, char *str);
+static void set_multicast_list(struct net_device *dev);
+static inline void ca(struct net_device *dev);
+static void mpu_port(struct net_device *dev, int c, dma_addr_t x);
+
+static int rx_ring_size = RX_RING_SIZE;
+static int ticks_limit = 100;
+static int max_cmd_backlog = TX_RING_SIZE-1;
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void i596_poll_controller(struct net_device *dev);
+#endif
+
+
+static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
+{
+ DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
+ while (--delcnt && dma->iscp.stat) {
+ udelay(10);
+ DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
+ }
+ if (!delcnt) {
+ printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
+ dev->name, str, SWAP16(dma->iscp.stat));
+ return -1;
+ } else
+ return 0;
+}
+
+
+static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
+{
+ DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
+ while (--delcnt && dma->scb.command) {
+ udelay(10);
+ DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
+ }
+ if (!delcnt) {
+ printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
+ dev->name, str,
+ SWAP16(dma->scb.status),
+ SWAP16(dma->scb.command));
+ return -1;
+ } else
+ return 0;
+}
+
+
+static void i596_display_data(struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+ struct i596_dma *dma = lp->dma;
+ struct i596_cmd *cmd;
+ struct i596_rfd *rfd;
+ struct i596_rbd *rbd;
+
+ printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
+ &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp));
+ printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n",
+ &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb));
+ printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x,"
+ " .cmd = %08x, .rfd = %08x\n",
+ &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command),
+ SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd));
+ printk(KERN_DEBUG " errors: crc %x, align %x, resource %x,"
+ " over %x, rcvdt %x, short %x\n",
+ SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err),
+ SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err),
+ SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err));
+ cmd = lp->cmd_head;
+ while (cmd != NULL) {
+ printk(KERN_DEBUG
+ "cmd at %p, .status = %04x, .command = %04x,"
+ " .b_next = %08x\n",
+ cmd, SWAP16(cmd->status), SWAP16(cmd->command),
+ SWAP32(cmd->b_next));
+ cmd = cmd->v_next;
+ }
+ rfd = lp->rfd_head;
+ printk(KERN_DEBUG "rfd_head = %p\n", rfd);
+ do {
+ printk(KERN_DEBUG
+ " %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
+ " count %04x\n",
+ rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd),
+ SWAP32(rfd->b_next), SWAP32(rfd->rbd),
+ SWAP16(rfd->count));
+ rfd = rfd->v_next;
+ } while (rfd != lp->rfd_head);
+ rbd = lp->rbd_head;
+ printk(KERN_DEBUG "rbd_head = %p\n", rbd);
+ do {
+ printk(KERN_DEBUG
+ " %p .count %04x, b_next %08x, b_data %08x,"
+ " size %04x\n",
+ rbd, SWAP16(rbd->count), SWAP32(rbd->b_next),
+ SWAP32(rbd->b_data), SWAP16(rbd->size));
+ rbd = rbd->v_next;
+ } while (rbd != lp->rbd_head);
+ DMA_INV(dev, dma, sizeof(struct i596_dma));
+}
+
+
+#define virt_to_dma(lp, v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)((lp)->dma)))
+
+static inline int init_rx_bufs(struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+ struct i596_dma *dma = lp->dma;
+ int i;
+ struct i596_rfd *rfd;
+ struct i596_rbd *rbd;
+
+ /* First build the Receive Buffer Descriptor List */
+
+ for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
+ if (skb == NULL)
+ return -1;
+ dma_addr = dma_map_single(dev->dev.parent, skb->data,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
+ rbd->v_next = rbd+1;
+ rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1));
+ rbd->b_addr = SWAP32(virt_to_dma(lp, rbd));
+ rbd->skb = skb;
+ rbd->v_data = skb->data;
+ rbd->b_data = SWAP32(dma_addr);
+ rbd->size = SWAP16(PKT_BUF_SZ);
+ }
+ lp->rbd_head = dma->rbds;
+ rbd = dma->rbds + rx_ring_size - 1;
+ rbd->v_next = dma->rbds;
+ rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds));
+
+ /* Now build the Receive Frame Descriptor List */
+
+ for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) {
+ rfd->rbd = I596_NULL;
+ rfd->v_next = rfd+1;
+ rfd->v_prev = rfd-1;
+ rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1));
+ rfd->cmd = SWAP16(CMD_FLEX);
+ }
+ lp->rfd_head = dma->rfds;
+ dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
+ rfd = dma->rfds;
+ rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head));
+ rfd->v_prev = dma->rfds + rx_ring_size - 1;
+ rfd = dma->rfds + rx_ring_size - 1;
+ rfd->v_next = dma->rfds;
+ rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
+ rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
+
+ DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
+ return 0;
+}
+
+static inline void remove_rx_bufs(struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+ struct i596_rbd *rbd;
+ int i;
+
+ for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) {
+ if (rbd->skb == NULL)
+ break;
+ dma_unmap_single(dev->dev.parent,
+ (dma_addr_t)SWAP32(rbd->b_data),
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
+ dev_kfree_skb(rbd->skb);
+ }
+}
+
+
+static void rebuild_rx_bufs(struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+ struct i596_dma *dma = lp->dma;
+ int i;
+
+ /* Ensure rx frame/buffer descriptors are tidy */
+
+ for (i = 0; i < rx_ring_size; i++) {
+ dma->rfds[i].rbd = I596_NULL;
+ dma->rfds[i].cmd = SWAP16(CMD_FLEX);
+ }
+ dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX);
+ lp->rfd_head = dma->rfds;
+ dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
+ lp->rbd_head = dma->rbds;
+ dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
+
+ DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
+}
+
+
+static int init_i596_mem(struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+ struct i596_dma *dma = lp->dma;
+ unsigned long flags;
+
+ mpu_port(dev, PORT_RESET, 0);
+ udelay(100); /* Wait 100us - seems to help */
+
+ /* change the scp address */
+
+ lp->last_cmd = jiffies;
+
+ dma->scp.sysbus = SYSBUS;
+ dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp)));
+ dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb)));
+ dma->iscp.stat = SWAP32(ISCP_BUSY);
+ lp->cmd_backlog = 0;
+
+ lp->cmd_head = NULL;
+ dma->scb.cmd = I596_NULL;
+
+ DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
+
+ DMA_WBACK(dev, &(dma->scp), sizeof(struct i596_scp));
+ DMA_WBACK(dev, &(dma->iscp), sizeof(struct i596_iscp));
+ DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
+
+ mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
+ ca(dev);
+ if (wait_istat(dev, dma, 1000, "initialization timed out"))
+ goto failed;
+ DEB(DEB_INIT, printk(KERN_DEBUG
+ "%s: i82596 initialization successful\n",
+ dev->name));
+
+ if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
+ printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
+ goto failed;
+ }
+
+ /* Ensure rx frame/buffer descriptors are tidy */
+ rebuild_rx_bufs(dev);
+
+ dma->scb.command = 0;
+ DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
+
+ DEB(DEB_INIT, printk(KERN_DEBUG
+ "%s: queuing CmdConfigure\n", dev->name));
+ memcpy(dma->cf_cmd.i596_config, init_setup, 14);
+ dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
+ DMA_WBACK(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
+ i596_add_cmd(dev, &dma->cf_cmd.cmd);
+
+ DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
+ memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, 6);
+ dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
+ DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
+ i596_add_cmd(dev, &dma->sa_cmd.cmd);
+
+ DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
+ dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
+ DMA_WBACK(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
+ i596_add_cmd(dev, &dma->tdr_cmd.cmd);
+
+ spin_lock_irqsave (&lp->lock, flags);
+
+ if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) {
+ spin_unlock_irqrestore (&lp->lock, flags);
+ goto failed_free_irq;
+ }
+ DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
+ dma->scb.command = SWAP16(RX_START);
+ dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
+ DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
+
+ ca(dev);
+
+ spin_unlock_irqrestore (&lp->lock, flags);
+ if (wait_cmd(dev, dma, 1000, "RX_START not processed"))
+ goto failed_free_irq;
+ DEB(DEB_INIT, printk(KERN_DEBUG
+ "%s: Receive unit started OK\n", dev->name));
+ return 0;
+
+failed_free_irq:
+ free_irq(dev->irq, dev);
+failed:
+ printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name);
+ mpu_port(dev, PORT_RESET, 0);
+ return -1;
+}
+
+
+static inline int i596_rx(struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+ struct i596_rfd *rfd;
+ struct i596_rbd *rbd;
+ int frames = 0;
+
+ DEB(DEB_RXFRAME, printk(KERN_DEBUG
+ "i596_rx(), rfd_head %p, rbd_head %p\n",
+ lp->rfd_head, lp->rbd_head));
+
+
+ rfd = lp->rfd_head; /* Ref next frame to check */
+
+ DMA_INV(dev, rfd, sizeof(struct i596_rfd));
+ while (rfd->stat & SWAP16(STAT_C)) { /* Loop while complete frames */
+ if (rfd->rbd == I596_NULL)
+ rbd = NULL;
+ else if (rfd->rbd == lp->rbd_head->b_addr) {
+ rbd = lp->rbd_head;
+ DMA_INV(dev, rbd, sizeof(struct i596_rbd));
+ } else {
+ printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
+ /* XXX Now what? */
+ rbd = NULL;
+ }
+ DEB(DEB_RXFRAME, printk(KERN_DEBUG
+ " rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
+ rfd, rfd->rbd, rfd->stat));
+
+ if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) {
+ /* a good frame */
+ int pkt_len = SWAP16(rbd->count) & 0x3fff;
+ struct sk_buff *skb = rbd->skb;
+ int rx_in_place = 0;
+
+ DEB(DEB_RXADDR, print_eth(rbd->v_data, "received"));
+ frames++;
+
+ /* Check if the packet is long enough to just accept
+ * without copying to a properly sized skbuff.
+ */
+
+ if (pkt_len > rx_copybreak) {
+ struct sk_buff *newskb;
+ dma_addr_t dma_addr;
+
+ dma_unmap_single(dev->dev.parent,
+ (dma_addr_t)SWAP32(rbd->b_data),
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
+ /* Get fresh skbuff to replace filled one. */
+ newskb = netdev_alloc_skb_ip_align(dev,
+ PKT_BUF_SZ);
+ if (newskb == NULL) {
+ skb = NULL; /* drop pkt */
+ goto memory_squeeze;
+ }
+
+ /* Pass up the skb already on the Rx ring. */
+ skb_put(skb, pkt_len);
+ rx_in_place = 1;
+ rbd->skb = newskb;
+ dma_addr = dma_map_single(dev->dev.parent,
+ newskb->data,
+ PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ rbd->v_data = newskb->data;
+ rbd->b_data = SWAP32(dma_addr);
+ DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
+ } else
+ skb = netdev_alloc_skb_ip_align(dev, pkt_len);
+memory_squeeze:
+ if (skb == NULL) {
+ /* XXX tulip.c can defer packets here!! */
+ printk(KERN_ERR
+ "%s: i596_rx Memory squeeze, dropping packet.\n",
+ dev->name);
+ dev->stats.rx_dropped++;
+ } else {
+ if (!rx_in_place) {
+ /* 16 byte align the data fields */
+ dma_sync_single_for_cpu(dev->dev.parent,
+ (dma_addr_t)SWAP32(rbd->b_data),
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
+ memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
+ dma_sync_single_for_device(dev->dev.parent,
+ (dma_addr_t)SWAP32(rbd->b_data),
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
+ }
+ skb->len = pkt_len;
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+ } else {
+ DEB(DEB_ERRORS, printk(KERN_DEBUG
+ "%s: Error, rfd.stat = 0x%04x\n",
+ dev->name, rfd->stat));
+ dev->stats.rx_errors++;
+ if (rfd->stat & SWAP16(0x0100))
+ dev->stats.collisions++;
+ if (rfd->stat & SWAP16(0x8000))
+ dev->stats.rx_length_errors++;
+ if (rfd->stat & SWAP16(0x0001))
+ dev->stats.rx_over_errors++;
+ if (rfd->stat & SWAP16(0x0002))
+ dev->stats.rx_fifo_errors++;
+ if (rfd->stat & SWAP16(0x0004))
+ dev->stats.rx_frame_errors++;
+ if (rfd->stat & SWAP16(0x0008))
+ dev->stats.rx_crc_errors++;
+ if (rfd->stat & SWAP16(0x0010))
+ dev->stats.rx_length_errors++;
+ }
+
+ /* Clear the buffer descriptor count and EOF + F flags */
+
+ if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
+ rbd->count = 0;
+ lp->rbd_head = rbd->v_next;
+ DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
+ }
+
+ /* Tidy the frame descriptor, marking it as end of list */
+
+ rfd->rbd = I596_NULL;
+ rfd->stat = 0;
+ rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
+ rfd->count = 0;
+
+ /* Update record of next frame descriptor to process */
+
+ lp->dma->scb.rfd = rfd->b_next;
+ lp->rfd_head = rfd->v_next;
+ DMA_WBACK_INV(dev, rfd, sizeof(struct i596_rfd));
+
+ /* Remove end-of-list from old end descriptor */
+
+ rfd->v_prev->cmd = SWAP16(CMD_FLEX);
+ DMA_WBACK_INV(dev, rfd->v_prev, sizeof(struct i596_rfd));
+ rfd = lp->rfd_head;
+ DMA_INV(dev, rfd, sizeof(struct i596_rfd));
+ }
+
+ DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
+
+ return 0;
+}
+
+
+static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
+{
+ struct i596_cmd *ptr;
+
+ while (lp->cmd_head != NULL) {
+ ptr = lp->cmd_head;
+ lp->cmd_head = ptr->v_next;
+ lp->cmd_backlog--;
+
+ switch (SWAP16(ptr->command) & 0x7) {
+ case CmdTx:
+ {
+ struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
+ struct sk_buff *skb = tx_cmd->skb;
+ dma_unmap_single(dev->dev.parent,
+ tx_cmd->dma_addr,
+ skb->len, DMA_TO_DEVICE);
+
+ dev_kfree_skb(skb);
+
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
+
+ ptr->v_next = NULL;
+ ptr->b_next = I596_NULL;
+ tx_cmd->cmd.command = 0; /* Mark as free */
+ break;
+ }
+ default:
+ ptr->v_next = NULL;
+ ptr->b_next = I596_NULL;
+ }
+ DMA_WBACK_INV(dev, ptr, sizeof(struct i596_cmd));
+ }
+
+ wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
+ lp->dma->scb.cmd = I596_NULL;
+ DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
+}
+
+
+static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
+{
+ unsigned long flags;
+
+ DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n"));
+
+ spin_lock_irqsave (&lp->lock, flags);
+
+ wait_cmd(dev, lp->dma, 100, "i596_reset timed out");
+
+ netif_stop_queue(dev);
+
+ /* FIXME: this command might cause an lpmc */
+ lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
+ DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
+ ca(dev);
+
+ /* wait for shutdown */
+ wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out");
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ i596_cleanup_cmd(dev, lp);
+ i596_rx(dev);
+
+ netif_start_queue(dev);
+ init_i596_mem(dev);
+}
+
+
+static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
+{
+ struct i596_private *lp = netdev_priv(dev);
+ struct i596_dma *dma = lp->dma;
+ unsigned long flags;
+
+ DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n",
+ lp->cmd_head));
+
+ cmd->status = 0;
+ cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
+ cmd->v_next = NULL;
+ cmd->b_next = I596_NULL;
+ DMA_WBACK(dev, cmd, sizeof(struct i596_cmd));
+
+ spin_lock_irqsave (&lp->lock, flags);
+
+ if (lp->cmd_head != NULL) {
+ lp->cmd_tail->v_next = cmd;
+ lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
+ DMA_WBACK(dev, lp->cmd_tail, sizeof(struct i596_cmd));
+ } else {
+ lp->cmd_head = cmd;
+ wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
+ dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
+ dma->scb.command = SWAP16(CUC_START);
+ DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
+ ca(dev);
+ }
+ lp->cmd_tail = cmd;
+ lp->cmd_backlog++;
+
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ if (lp->cmd_backlog > max_cmd_backlog) {
+ unsigned long tickssofar = jiffies - lp->last_cmd;
+
+ if (tickssofar < ticks_limit)
+ return;
+
+ printk(KERN_ERR
+ "%s: command unit timed out, status resetting.\n",
+ dev->name);
+#if 1
+ i596_reset(dev, lp);
+#endif
+ }
+}
+
+static int i596_open(struct net_device *dev)
+{
+ DEB(DEB_OPEN, printk(KERN_DEBUG
+ "%s: i596_open() irq %d.\n", dev->name, dev->irq));
+
+ if (init_rx_bufs(dev)) {
+ printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name);
+ return -EAGAIN;
+ }
+ if (init_i596_mem(dev)) {
+ printk(KERN_ERR "%s: Failed to init memory\n", dev->name);
+ goto out_remove_rx_bufs;
+ }
+ netif_start_queue(dev);
+
+ return 0;
+
+out_remove_rx_bufs:
+ remove_rx_bufs(dev);
+ return -EAGAIN;
+}
+
+static void i596_tx_timeout (struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+
+ /* Transmitter timeout, serious problems. */
+ DEB(DEB_ERRORS, printk(KERN_DEBUG
+ "%s: transmit timed out, status resetting.\n",
+ dev->name));
+
+ dev->stats.tx_errors++;
+
+ /* Try to restart the adaptor */
+ if (lp->last_restart == dev->stats.tx_packets) {
+ DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n"));
+ /* Shutdown and restart */
+ i596_reset (dev, lp);
+ } else {
+ /* Issue a channel attention signal */
+ DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
+ lp->dma->scb.command = SWAP16(CUC_START | RX_START);
+ DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb));
+ ca (dev);
+ lp->last_restart = dev->stats.tx_packets;
+ }
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue (dev);
+}
+
+
+static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+ struct tx_cmd *tx_cmd;
+ struct i596_tbd *tbd;
+ short length = skb->len;
+
+ DEB(DEB_STARTTX, printk(KERN_DEBUG
+ "%s: i596_start_xmit(%x,%p) called\n",
+ dev->name, skb->len, skb->data));
+
+ if (length < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ length = ETH_ZLEN;
+ }
+
+ netif_stop_queue(dev);
+
+ tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd;
+ tbd = lp->dma->tbds + lp->next_tx_cmd;
+
+ if (tx_cmd->cmd.command) {
+ DEB(DEB_ERRORS, printk(KERN_DEBUG
+ "%s: xmit ring full, dropping packet.\n",
+ dev->name));
+ dev->stats.tx_dropped++;
+
+ dev_kfree_skb(skb);
+ } else {
+ if (++lp->next_tx_cmd == TX_RING_SIZE)
+ lp->next_tx_cmd = 0;
+ tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd));
+ tbd->next = I596_NULL;
+
+ tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx);
+ tx_cmd->skb = skb;
+
+ tx_cmd->pad = 0;
+ tx_cmd->size = 0;
+ tbd->pad = 0;
+ tbd->size = SWAP16(EOF | length);
+
+ tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ tbd->data = SWAP32(tx_cmd->dma_addr);
+
+ DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
+ DMA_WBACK_INV(dev, tx_cmd, sizeof(struct tx_cmd));
+ DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd));
+ i596_add_cmd(dev, &tx_cmd->cmd);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += length;
+ }
+
+ netif_start_queue(dev);
+
+ return NETDEV_TX_OK;
+}
+
+static void print_eth(unsigned char *add, char *str)
+{
+ printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
+ add, add + 6, add, add[12], add[13], str);
+}
+static const struct net_device_ops i596_netdev_ops = {
+ .ndo_open = i596_open,
+ .ndo_stop = i596_close,
+ .ndo_start_xmit = i596_start_xmit,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_tx_timeout = i596_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = i596_poll_controller,
+#endif
+};
+
+static int __devinit i82596_probe(struct net_device *dev)
+{
+ int i;
+ struct i596_private *lp = netdev_priv(dev);
+ struct i596_dma *dma;
+
+ /* This lot is ensure things have been cache line aligned. */
+ BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
+ BUILD_BUG_ON(sizeof(struct i596_rbd) & 31);
+ BUILD_BUG_ON(sizeof(struct tx_cmd) & 31);
+ BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
+#ifndef __LP64__
+ BUILD_BUG_ON(sizeof(struct i596_dma) > 4096);
+#endif
+
+ if (!dev->base_addr || !dev->irq)
+ return -ENODEV;
+
+ dma = (struct i596_dma *) DMA_ALLOC(dev->dev.parent,
+ sizeof(struct i596_dma), &lp->dma_addr, GFP_KERNEL);
+ if (!dma) {
+ printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
+ return -ENOMEM;
+ }
+
+ dev->netdev_ops = &i596_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ memset(dma, 0, sizeof(struct i596_dma));
+ lp->dma = dma;
+
+ dma->scb.command = 0;
+ dma->scb.cmd = I596_NULL;
+ dma->scb.rfd = I596_NULL;
+ spin_lock_init(&lp->lock);
+
+ DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
+
+ i = register_netdev(dev);
+ if (i) {
+ DMA_FREE(dev->dev.parent, sizeof(struct i596_dma),
+ (void *)dma, lp->dma_addr);
+ return i;
+ }
+
+ DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
+ dev->name, dev->base_addr, dev->dev_addr,
+ dev->irq));
+ DEB(DEB_INIT, printk(KERN_INFO
+ "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
+ dev->name, dma, (int)sizeof(struct i596_dma),
+ &dma->scb));
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void i596_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ i596_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static irqreturn_t i596_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct i596_private *lp;
+ struct i596_dma *dma;
+ unsigned short status, ack_cmd = 0;
+
+ lp = netdev_priv(dev);
+ dma = lp->dma;
+
+ spin_lock (&lp->lock);
+
+ wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
+ status = SWAP16(dma->scb.status);
+
+ DEB(DEB_INTS, printk(KERN_DEBUG
+ "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
+ dev->name, dev->irq, status));
+
+ ack_cmd = status & 0xf000;
+
+ if (!ack_cmd) {
+ DEB(DEB_ERRORS, printk(KERN_DEBUG
+ "%s: interrupt with no events\n",
+ dev->name));
+ spin_unlock (&lp->lock);
+ return IRQ_NONE;
+ }
+
+ if ((status & 0x8000) || (status & 0x2000)) {
+ struct i596_cmd *ptr;
+
+ if ((status & 0x8000))
+ DEB(DEB_INTS,
+ printk(KERN_DEBUG
+ "%s: i596 interrupt completed command.\n",
+ dev->name));
+ if ((status & 0x2000))
+ DEB(DEB_INTS,
+ printk(KERN_DEBUG
+ "%s: i596 interrupt command unit inactive %x.\n",
+ dev->name, status & 0x0700));
+
+ while (lp->cmd_head != NULL) {
+ DMA_INV(dev, lp->cmd_head, sizeof(struct i596_cmd));
+ if (!(lp->cmd_head->status & SWAP16(STAT_C)))
+ break;
+
+ ptr = lp->cmd_head;
+
+ DEB(DEB_STATUS,
+ printk(KERN_DEBUG
+ "cmd_head->status = %04x, ->command = %04x\n",
+ SWAP16(lp->cmd_head->status),
+ SWAP16(lp->cmd_head->command)));
+ lp->cmd_head = ptr->v_next;
+ lp->cmd_backlog--;
+
+ switch (SWAP16(ptr->command) & 0x7) {
+ case CmdTx:
+ {
+ struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
+ struct sk_buff *skb = tx_cmd->skb;
+
+ if (ptr->status & SWAP16(STAT_OK)) {
+ DEB(DEB_TXADDR,
+ print_eth(skb->data, "tx-done"));
+ } else {
+ dev->stats.tx_errors++;
+ if (ptr->status & SWAP16(0x0020))
+ dev->stats.collisions++;
+ if (!(ptr->status & SWAP16(0x0040)))
+ dev->stats.tx_heartbeat_errors++;
+ if (ptr->status & SWAP16(0x0400))
+ dev->stats.tx_carrier_errors++;
+ if (ptr->status & SWAP16(0x0800))
+ dev->stats.collisions++;
+ if (ptr->status & SWAP16(0x1000))
+ dev->stats.tx_aborted_errors++;
+ }
+ dma_unmap_single(dev->dev.parent,
+ tx_cmd->dma_addr,
+ skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_irq(skb);
+
+ tx_cmd->cmd.command = 0; /* Mark free */
+ break;
+ }
+ case CmdTDR:
+ {
+ unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status);
+
+ if (status & 0x8000) {
+ DEB(DEB_ANY,
+ printk(KERN_DEBUG "%s: link ok.\n",
+ dev->name));
+ } else {
+ if (status & 0x4000)
+ printk(KERN_ERR
+ "%s: Transceiver problem.\n",
+ dev->name);
+ if (status & 0x2000)
+ printk(KERN_ERR
+ "%s: Termination problem.\n",
+ dev->name);
+ if (status & 0x1000)
+ printk(KERN_ERR
+ "%s: Short circuit.\n",
+ dev->name);
+
+ DEB(DEB_TDR,
+ printk(KERN_DEBUG "%s: Time %d.\n",
+ dev->name, status & 0x07ff));
+ }
+ break;
+ }
+ case CmdConfigure:
+ /*
+ * Zap command so set_multicast_list() know
+ * it is free
+ */
+ ptr->command = 0;
+ break;
+ }
+ ptr->v_next = NULL;
+ ptr->b_next = I596_NULL;
+ DMA_WBACK(dev, ptr, sizeof(struct i596_cmd));
+ lp->last_cmd = jiffies;
+ }
+
+ /* This mess is arranging that only the last of any outstanding
+ * commands has the interrupt bit set. Should probably really
+ * only add to the cmd queue when the CU is stopped.
+ */
+ ptr = lp->cmd_head;
+ while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
+ struct i596_cmd *prev = ptr;
+
+ ptr->command &= SWAP16(0x1fff);
+ ptr = ptr->v_next;
+ DMA_WBACK_INV(dev, prev, sizeof(struct i596_cmd));
+ }
+
+ if (lp->cmd_head != NULL)
+ ack_cmd |= CUC_START;
+ dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
+ DMA_WBACK_INV(dev, &dma->scb, sizeof(struct i596_scb));
+ }
+ if ((status & 0x1000) || (status & 0x4000)) {
+ if ((status & 0x4000))
+ DEB(DEB_INTS,
+ printk(KERN_DEBUG
+ "%s: i596 interrupt received a frame.\n",
+ dev->name));
+ i596_rx(dev);
+ /* Only RX_START if stopped - RGH 07-07-96 */
+ if (status & 0x1000) {
+ if (netif_running(dev)) {
+ DEB(DEB_ERRORS,
+ printk(KERN_DEBUG
+ "%s: i596 interrupt receive unit inactive, status 0x%x\n",
+ dev->name, status));
+ ack_cmd |= RX_START;
+ dev->stats.rx_errors++;
+ dev->stats.rx_fifo_errors++;
+ rebuild_rx_bufs(dev);
+ }
+ }
+ }
+ wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
+ dma->scb.command = SWAP16(ack_cmd);
+ DMA_WBACK(dev, &dma->scb, sizeof(struct i596_scb));
+
+ /* DANGER: I suspect that some kind of interrupt
+ acknowledgement aside from acking the 82596 might be needed
+ here... but it's running acceptably without */
+
+ ca(dev);
+
+ wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout");
+ DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
+
+ spin_unlock (&lp->lock);
+ return IRQ_HANDLED;
+}
+
+static int i596_close(struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+
+ DEB(DEB_INIT,
+ printk(KERN_DEBUG
+ "%s: Shutting down ethercard, status was %4.4x.\n",
+ dev->name, SWAP16(lp->dma->scb.status)));
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ wait_cmd(dev, lp->dma, 100, "close1 timed out");
+ lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
+ DMA_WBACK(dev, &lp->dma->scb, sizeof(struct i596_scb));
+
+ ca(dev);
+
+ wait_cmd(dev, lp->dma, 100, "close2 timed out");
+ spin_unlock_irqrestore(&lp->lock, flags);
+ DEB(DEB_STRUCT, i596_display_data(dev));
+ i596_cleanup_cmd(dev, lp);
+
+ free_irq(dev->irq, dev);
+ remove_rx_bufs(dev);
+
+ return 0;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+ struct i596_dma *dma = lp->dma;
+ int config = 0, cnt;
+
+ DEB(DEB_MULTI,
+ printk(KERN_DEBUG
+ "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
+ dev->name, netdev_mc_count(dev),
+ dev->flags & IFF_PROMISC ? "ON" : "OFF",
+ dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
+
+ if ((dev->flags & IFF_PROMISC) &&
+ !(dma->cf_cmd.i596_config[8] & 0x01)) {
+ dma->cf_cmd.i596_config[8] |= 0x01;
+ config = 1;
+ }
+ if (!(dev->flags & IFF_PROMISC) &&
+ (dma->cf_cmd.i596_config[8] & 0x01)) {
+ dma->cf_cmd.i596_config[8] &= ~0x01;
+ config = 1;
+ }
+ if ((dev->flags & IFF_ALLMULTI) &&
+ (dma->cf_cmd.i596_config[11] & 0x20)) {
+ dma->cf_cmd.i596_config[11] &= ~0x20;
+ config = 1;
+ }
+ if (!(dev->flags & IFF_ALLMULTI) &&
+ !(dma->cf_cmd.i596_config[11] & 0x20)) {
+ dma->cf_cmd.i596_config[11] |= 0x20;
+ config = 1;
+ }
+ if (config) {
+ if (dma->cf_cmd.cmd.command)
+ printk(KERN_INFO
+ "%s: config change request already queued\n",
+ dev->name);
+ else {
+ dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
+ DMA_WBACK_INV(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
+ i596_add_cmd(dev, &dma->cf_cmd.cmd);
+ }
+ }
+
+ cnt = netdev_mc_count(dev);
+ if (cnt > MAX_MC_CNT) {
+ cnt = MAX_MC_CNT;
+ printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
+ dev->name, cnt);
+ }
+
+ if (!netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+ unsigned char *cp;
+ struct mc_cmd *cmd;
+
+ cmd = &dma->mc_cmd;
+ cmd->cmd.command = SWAP16(CmdMulticastList);
+ cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
+ cp = cmd->mc_addrs;
+ netdev_for_each_mc_addr(ha, dev) {
+ if (!cnt--)
+ break;
+ memcpy(cp, ha->addr, 6);
+ if (i596_debug > 1)
+ DEB(DEB_MULTI,
+ printk(KERN_DEBUG
+ "%s: Adding address %pM\n",
+ dev->name, cp));
+ cp += 6;
+ }
+ DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
+ i596_add_cmd(dev, &cmd->cmd);
+ }
+}
diff --git a/drivers/net/ethernet/i825xx/lp486e.c b/drivers/net/ethernet/i825xx/lp486e.c
new file mode 100644
index 000000000000..414044b3cb11
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/lp486e.c
@@ -0,0 +1,1339 @@
+/* Intel Professional Workstation/panther ethernet driver */
+/* lp486e.c: A panther 82596 ethernet driver for linux. */
+/*
+ History and copyrights:
+
+ Driver skeleton
+ Written 1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the Director,
+ National Security Agency. This software may only be used and
+ distributed according to the terms of the GNU General Public License
+ as modified by SRC, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Apricot
+ Written 1994 by Mark Evans.
+ This driver is for the Apricot 82596 bus-master interface
+
+ Modularised 12/94 Mark Evans
+
+ Professional Workstation
+ Derived from apricot.c by Ard van Breemen
+ <ard@murphy.nl>|<ard@cstmel.hobby.nl>|<ard@cstmel.nl.eu.org>
+
+ Credits:
+ Thanks to Murphy Software BV for letting me write this in their time.
+ Well, actually, I get paid doing this...
+ (Also: see http://www.murphy.nl for murphy, and my homepage ~ard for
+ more information on the Professional Workstation)
+
+ Present version
+ aeb@cwi.nl
+*/
+/*
+ There are currently two motherboards that I know of in the
+ professional workstation. The only one that I know is the
+ intel panther motherboard. -- ard
+*/
+/*
+The pws is equipped with an intel 82596. This is a very intelligent controller
+which runs its own micro-code. Communication with the hostprocessor is done
+through linked lists of commands and buffers in the hostprocessors memory.
+A complete description of the 82596 is available from intel. Search for
+a file called "29021806.pdf". It is a complete description of the chip itself.
+To use it for the pws some additions are needed regarding generation of
+the PORT and CA signal, and the interrupt glue needed for a pc.
+I/O map:
+PORT SIZE ACTION MEANING
+0xCB0 2 WRITE Lower 16 bits for PORT command
+0xCB2 2 WRITE Upper 16 bits for PORT command, and issue of PORT command
+0xCB4 1 WRITE Generation of CA signal
+0xCB8 1 WRITE Clear interrupt glue
+All other communication is through memory!
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#define DRV_NAME "lp486e"
+
+/* debug print flags */
+#define LOG_SRCDST 0x80000000
+#define LOG_STATINT 0x40000000
+#define LOG_STARTINT 0x20000000
+
+#define i596_debug debug
+
+static int i596_debug = 0;
+
+static const char * const medianame[] = {
+ "10baseT", "AUI",
+ "10baseT-FD", "AUI-FD",
+};
+
+#define LP486E_TOTAL_SIZE 16
+
+#define I596_NULL (0xffffffff)
+
+#define CMD_EOL 0x8000 /* The last command of the list, stop. */
+#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
+#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
+
+#define CMD_FLEX 0x0008 /* Enable flexible memory model */
+
+enum commands {
+ CmdNOP = 0,
+ CmdIASetup = 1,
+ CmdConfigure = 2,
+ CmdMulticastList = 3,
+ CmdTx = 4,
+ CmdTDR = 5,
+ CmdDump = 6,
+ CmdDiagnose = 7
+};
+
+#if 0
+static const char *CUcmdnames[8] = { "NOP", "IASetup", "Configure", "MulticastList",
+ "Tx", "TDR", "Dump", "Diagnose" };
+#endif
+
+/* Status word bits */
+#define STAT_CX 0x8000 /* The CU finished executing a command
+ with the Interrupt bit set */
+#define STAT_FR 0x4000 /* The RU finished receiving a frame */
+#define STAT_CNA 0x2000 /* The CU left the active state */
+#define STAT_RNR 0x1000 /* The RU left the active state */
+#define STAT_ACK (STAT_CX | STAT_FR | STAT_CNA | STAT_RNR)
+#define STAT_CUS 0x0700 /* Status of CU: 0: idle, 1: suspended,
+ 2: active, 3-7: unused */
+#define STAT_RUS 0x00f0 /* Status of RU: 0: idle, 1: suspended,
+ 2: no resources, 4: ready,
+ 10: no resources due to no more RBDs,
+ 12: no more RBDs, other: unused */
+#define STAT_T 0x0008 /* Bus throttle timers loaded */
+#define STAT_ZERO 0x0807 /* Always zero */
+
+#if 0
+static char *CUstates[8] = {
+ "idle", "suspended", "active", 0, 0, 0, 0, 0
+};
+static char *RUstates[16] = {
+ "idle", "suspended", "no resources", 0, "ready", 0, 0, 0,
+ 0, 0, "no RBDs", 0, "out of RBDs", 0, 0, 0
+};
+
+static void
+i596_out_status(int status) {
+ int bad = 0;
+ char *s;
+
+ printk("status %4.4x:", status);
+ if (status == 0xffff)
+ printk(" strange..\n");
+ else {
+ if (status & STAT_CX)
+ printk(" CU done");
+ if (status & STAT_CNA)
+ printk(" CU stopped");
+ if (status & STAT_FR)
+ printk(" got a frame");
+ if (status & STAT_RNR)
+ printk(" RU stopped");
+ if (status & STAT_T)
+ printk(" throttled");
+ if (status & STAT_ZERO)
+ bad = 1;
+ s = CUstates[(status & STAT_CUS) >> 8];
+ if (!s)
+ bad = 1;
+ else
+ printk(" CU(%s)", s);
+ s = RUstates[(status & STAT_RUS) >> 4];
+ if (!s)
+ bad = 1;
+ else
+ printk(" RU(%s)", s);
+ if (bad)
+ printk(" bad status");
+ printk("\n");
+ }
+}
+#endif
+
+/* Command word bits */
+#define ACK_CX 0x8000
+#define ACK_FR 0x4000
+#define ACK_CNA 0x2000
+#define ACK_RNR 0x1000
+
+#define CUC_START 0x0100
+#define CUC_RESUME 0x0200
+#define CUC_SUSPEND 0x0300
+#define CUC_ABORT 0x0400
+
+#define RX_START 0x0010
+#define RX_RESUME 0x0020
+#define RX_SUSPEND 0x0030
+#define RX_ABORT 0x0040
+
+typedef u32 phys_addr;
+
+static inline phys_addr
+va_to_pa(void *x) {
+ return x ? virt_to_bus(x) : I596_NULL;
+}
+
+static inline void *
+pa_to_va(phys_addr x) {
+ return (x == I596_NULL) ? NULL : bus_to_virt(x);
+}
+
+/* status bits for cmd */
+#define CMD_STAT_C 0x8000 /* CU command complete */
+#define CMD_STAT_B 0x4000 /* CU command in progress */
+#define CMD_STAT_OK 0x2000 /* CU command completed without errors */
+#define CMD_STAT_A 0x1000 /* CU command abnormally terminated */
+
+struct i596_cmd { /* 8 bytes */
+ unsigned short status;
+ unsigned short command;
+ phys_addr pa_next; /* va_to_pa(struct i596_cmd *next) */
+};
+
+#define EOF 0x8000
+#define SIZE_MASK 0x3fff
+
+struct i596_tbd {
+ unsigned short size;
+ unsigned short pad;
+ phys_addr pa_next; /* va_to_pa(struct i596_tbd *next) */
+ phys_addr pa_data; /* va_to_pa(char *data) */
+ struct sk_buff *skb;
+};
+
+struct tx_cmd {
+ struct i596_cmd cmd;
+ phys_addr pa_tbd; /* va_to_pa(struct i596_tbd *tbd) */
+ unsigned short size;
+ unsigned short pad;
+};
+
+/* status bits for rfd */
+#define RFD_STAT_C 0x8000 /* Frame reception complete */
+#define RFD_STAT_B 0x4000 /* Frame reception in progress */
+#define RFD_STAT_OK 0x2000 /* Frame received without errors */
+#define RFD_STATUS 0x1fff
+#define RFD_LENGTH_ERR 0x1000
+#define RFD_CRC_ERR 0x0800
+#define RFD_ALIGN_ERR 0x0400
+#define RFD_NOBUFS_ERR 0x0200
+#define RFD_DMA_ERR 0x0100 /* DMA overrun failure to acquire system bus */
+#define RFD_SHORT_FRAME_ERR 0x0080
+#define RFD_NOEOP_ERR 0x0040
+#define RFD_TRUNC_ERR 0x0020
+#define RFD_MULTICAST 0x0002 /* 0: destination had our address
+ 1: destination was broadcast/multicast */
+#define RFD_COLLISION 0x0001
+
+/* receive frame descriptor */
+struct i596_rfd {
+ unsigned short stat;
+ unsigned short cmd;
+ phys_addr pa_next; /* va_to_pa(struct i596_rfd *next) */
+ phys_addr pa_rbd; /* va_to_pa(struct i596_rbd *rbd) */
+ unsigned short count;
+ unsigned short size;
+ char data[1532];
+};
+
+#define RBD_EL 0x8000
+#define RBD_P 0x4000
+#define RBD_SIZEMASK 0x3fff
+#define RBD_EOF 0x8000
+#define RBD_F 0x4000
+
+/* receive buffer descriptor */
+struct i596_rbd {
+ unsigned short size;
+ unsigned short pad;
+ phys_addr pa_next; /* va_to_pa(struct i596_tbd *next) */
+ phys_addr pa_data; /* va_to_pa(char *data) */
+ phys_addr pa_prev; /* va_to_pa(struct i596_tbd *prev) */
+
+ /* Driver private part */
+ struct sk_buff *skb;
+};
+
+#define RX_RING_SIZE 64
+#define RX_SKBSIZE (ETH_FRAME_LEN+10)
+#define RX_RBD_SIZE 32
+
+/* System Control Block - 40 bytes */
+struct i596_scb {
+ u16 status; /* 0 */
+ u16 command; /* 2 */
+ phys_addr pa_cmd; /* 4 - va_to_pa(struct i596_cmd *cmd) */
+ phys_addr pa_rfd; /* 8 - va_to_pa(struct i596_rfd *rfd) */
+ u32 crc_err; /* 12 */
+ u32 align_err; /* 16 */
+ u32 resource_err; /* 20 */
+ u32 over_err; /* 24 */
+ u32 rcvdt_err; /* 28 */
+ u32 short_err; /* 32 */
+ u16 t_on; /* 36 */
+ u16 t_off; /* 38 */
+};
+
+/* Intermediate System Configuration Pointer - 8 bytes */
+struct i596_iscp {
+ u32 busy; /* 0 */
+ phys_addr pa_scb; /* 4 - va_to_pa(struct i596_scb *scb) */
+};
+
+/* System Configuration Pointer - 12 bytes */
+struct i596_scp {
+ u32 sysbus; /* 0 */
+ u32 pad; /* 4 */
+ phys_addr pa_iscp; /* 8 - va_to_pa(struct i596_iscp *iscp) */
+};
+
+/* Selftest and dump results - needs 16-byte alignment */
+/*
+ * The size of the dump area is 304 bytes. When the dump is executed
+ * by the Port command an extra word will be appended to the dump area.
+ * The extra word is a copy of the Dump status word (containing the
+ * C, B, OK bits). [I find 0xa006, with a0 for C+OK and 6 for dump]
+ */
+struct i596_dump {
+ u16 dump[153]; /* (304 = 130h) + 2 bytes */
+};
+
+struct i596_private { /* aligned to a 16-byte boundary */
+ struct i596_scp scp; /* 0 - needs 16-byte alignment */
+ struct i596_iscp iscp; /* 12 */
+ struct i596_scb scb; /* 20 */
+ u32 dummy; /* 60 */
+ struct i596_dump dump; /* 64 - needs 16-byte alignment */
+
+ struct i596_cmd set_add;
+ char eth_addr[8]; /* directly follows set_add */
+
+ struct i596_cmd set_conf;
+ char i596_config[16]; /* directly follows set_conf */
+
+ struct i596_cmd tdr;
+ unsigned long tdr_stat; /* directly follows tdr */
+
+ int last_restart;
+ struct i596_rbd *rbd_list;
+ struct i596_rbd *rbd_tail;
+ struct i596_rfd *rx_tail;
+ struct i596_cmd *cmd_tail;
+ struct i596_cmd *cmd_head;
+ int cmd_backlog;
+ unsigned long last_cmd;
+ spinlock_t cmd_lock;
+};
+
+static char init_setup[14] = {
+ 0x8E, /* length 14 bytes, prefetch on */
+ 0xC8, /* default: fifo to 8, monitor off */
+ 0x40, /* default: don't save bad frames (apricot.c had 0x80) */
+ 0x2E, /* (default is 0x26)
+ No source address insertion, 8 byte preamble */
+ 0x00, /* default priority and backoff */
+ 0x60, /* default interframe spacing */
+ 0x00, /* default slot time LSB */
+ 0xf2, /* default slot time and nr of retries */
+ 0x00, /* default various bits
+ (0: promiscuous mode, 1: broadcast disable,
+ 2: encoding mode, 3: transmit on no CRS,
+ 4: no CRC insertion, 5: CRC type,
+ 6: bit stuffing, 7: padding) */
+ 0x00, /* default carrier sense and collision detect */
+ 0x40, /* default minimum frame length */
+ 0xff, /* (default is 0xff, and that is what apricot.c has;
+ elp486.c has 0xfb: Enable crc append in memory.) */
+ 0x00, /* default: not full duplex */
+ 0x7f /* (default is 0x3f) multi IA */
+};
+
+static int i596_open(struct net_device *dev);
+static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t i596_interrupt(int irq, void *dev_id);
+static int i596_close(struct net_device *dev);
+static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
+static void print_eth(char *);
+static void set_multicast_list(struct net_device *dev);
+static void i596_tx_timeout(struct net_device *dev);
+
+static int
+i596_timeout(struct net_device *dev, char *msg, int ct) {
+ struct i596_private *lp;
+ int boguscnt = ct;
+
+ lp = netdev_priv(dev);
+ while (lp->scb.command) {
+ if (--boguscnt == 0) {
+ printk("%s: %s timed out - stat %4.4x, cmd %4.4x\n",
+ dev->name, msg,
+ lp->scb.status, lp->scb.command);
+ return 1;
+ }
+ udelay(5);
+ barrier();
+ }
+ return 0;
+}
+
+static inline int
+init_rx_bufs(struct net_device *dev, int num) {
+ struct i596_private *lp;
+ struct i596_rfd *rfd;
+ int i;
+ // struct i596_rbd *rbd;
+
+ lp = netdev_priv(dev);
+ lp->scb.pa_rfd = I596_NULL;
+
+ for (i = 0; i < num; i++) {
+ rfd = kmalloc(sizeof(struct i596_rfd), GFP_KERNEL);
+ if (rfd == NULL)
+ break;
+
+ rfd->stat = 0;
+ rfd->pa_rbd = I596_NULL;
+ rfd->count = 0;
+ rfd->size = 1532;
+ if (i == 0) {
+ rfd->cmd = CMD_EOL;
+ lp->rx_tail = rfd;
+ } else {
+ rfd->cmd = 0;
+ }
+ rfd->pa_next = lp->scb.pa_rfd;
+ lp->scb.pa_rfd = va_to_pa(rfd);
+ lp->rx_tail->pa_next = lp->scb.pa_rfd;
+ }
+
+#if 0
+ for (i = 0; i<RX_RBD_SIZE; i++) {
+ rbd = kmalloc(sizeof(struct i596_rbd), GFP_KERNEL);
+ if (rbd) {
+ rbd->pad = 0;
+ rbd->count = 0;
+ rbd->skb = dev_alloc_skb(RX_SKBSIZE);
+ if (!rbd->skb) {
+ printk("dev_alloc_skb failed");
+ }
+ rbd->next = rfd->rbd;
+ if (i) {
+ rfd->rbd->prev = rbd;
+ rbd->size = RX_SKBSIZE;
+ } else {
+ rbd->size = (RX_SKBSIZE | RBD_EL);
+ lp->rbd_tail = rbd;
+ }
+
+ rfd->rbd = rbd;
+ } else {
+ printk("Could not kmalloc rbd\n");
+ }
+ }
+ lp->rbd_tail->next = rfd->rbd;
+#endif
+ return i;
+}
+
+static inline void
+remove_rx_bufs(struct net_device *dev) {
+ struct i596_private *lp;
+ struct i596_rfd *rfd;
+
+ lp = netdev_priv(dev);
+ lp->rx_tail->pa_next = I596_NULL;
+
+ do {
+ rfd = pa_to_va(lp->scb.pa_rfd);
+ lp->scb.pa_rfd = rfd->pa_next;
+ kfree(rfd);
+ } while (rfd != lp->rx_tail);
+
+ lp->rx_tail = NULL;
+
+#if 0
+ for (lp->rbd_list) {
+ }
+#endif
+}
+
+#define PORT_RESET 0x00 /* reset 82596 */
+#define PORT_SELFTEST 0x01 /* selftest */
+#define PORT_ALTSCP 0x02 /* alternate SCB address */
+#define PORT_DUMP 0x03 /* dump */
+
+#define IOADDR 0xcb0 /* real constant */
+#define IRQ 10 /* default IRQ - can be changed by ECU */
+
+/* The 82596 requires two 16-bit write cycles for a port command */
+static inline void
+PORT(phys_addr a, unsigned int cmd) {
+ if (a & 0xf)
+ printk("lp486e.c: PORT: address not aligned\n");
+ outw(((a & 0xffff) | cmd), IOADDR);
+ outw(((a>>16) & 0xffff), IOADDR+2);
+}
+
+static inline void
+CA(void) {
+ outb(0, IOADDR+4);
+ udelay(8);
+}
+
+static inline void
+CLEAR_INT(void) {
+ outb(0, IOADDR+8);
+}
+
+#if 0
+/* selftest or dump */
+static void
+i596_port_do(struct net_device *dev, int portcmd, char *cmdname) {
+ struct i596_private *lp = netdev_priv(dev);
+ u16 *outp;
+ int i, m;
+
+ memset((void *)&(lp->dump), 0, sizeof(struct i596_dump));
+ outp = &(lp->dump.dump[0]);
+
+ PORT(va_to_pa(outp), portcmd);
+ mdelay(30); /* random, unmotivated */
+
+ printk("lp486e i82596 %s result:\n", cmdname);
+ for (m = ARRAY_SIZE(lp->dump.dump); m && lp->dump.dump[m-1] == 0; m--)
+ ;
+ for (i = 0; i < m; i++) {
+ printk(" %04x", lp->dump.dump[i]);
+ if (i%8 == 7)
+ printk("\n");
+ }
+ printk("\n");
+}
+#endif
+
+static int
+i596_scp_setup(struct net_device *dev) {
+ struct i596_private *lp = netdev_priv(dev);
+ int boguscnt;
+
+ /* Setup SCP, ISCP, SCB */
+ /*
+ * sysbus bits:
+ * only a single byte is significant - here 0x44
+ * 0x80: big endian mode (details depend on stepping)
+ * 0x40: 1
+ * 0x20: interrupt pin is active low
+ * 0x10: lock function disabled
+ * 0x08: external triggering of bus throttle timers
+ * 0x06: 00: 82586 compat mode, 01: segmented mode, 10: linear mode
+ * 0x01: unused
+ */
+ lp->scp.sysbus = 0x00440000; /* linear mode */
+ lp->scp.pad = 0; /* must be zero */
+ lp->scp.pa_iscp = va_to_pa(&(lp->iscp));
+
+ /*
+ * The CPU sets the ISCP to 1 before it gives the first CA()
+ */
+ lp->iscp.busy = 0x0001;
+ lp->iscp.pa_scb = va_to_pa(&(lp->scb));
+
+ lp->scb.command = 0;
+ lp->scb.status = 0;
+ lp->scb.pa_cmd = I596_NULL;
+ /* lp->scb.pa_rfd has been initialised already */
+
+ lp->last_cmd = jiffies;
+ lp->cmd_backlog = 0;
+ lp->cmd_head = NULL;
+
+ /*
+ * Reset the 82596.
+ * We need to wait 10 systemclock cycles, and
+ * 5 serial clock cycles.
+ */
+ PORT(0, PORT_RESET); /* address part ignored */
+ udelay(100);
+
+ /*
+ * Before the CA signal is asserted, the default SCP address
+ * (0x00fffff4) can be changed to a 16-byte aligned value
+ */
+ PORT(va_to_pa(&lp->scp), PORT_ALTSCP); /* change the scp address */
+
+ /*
+ * The initialization procedure begins when a
+ * Channel Attention signal is asserted after a reset.
+ */
+
+ CA();
+
+ /*
+ * The ISCP busy is cleared by the 82596 after the SCB address is read.
+ */
+ boguscnt = 100;
+ while (lp->iscp.busy) {
+ if (--boguscnt == 0) {
+ /* No i82596 present? */
+ printk("%s: i82596 initialization timed out\n",
+ dev->name);
+ return 1;
+ }
+ udelay(5);
+ barrier();
+ }
+ /* I find here boguscnt==100, so no delay was required. */
+
+ return 0;
+}
+
+static int
+init_i596(struct net_device *dev) {
+ struct i596_private *lp;
+
+ if (i596_scp_setup(dev))
+ return 1;
+
+ lp = netdev_priv(dev);
+ lp->scb.command = 0;
+
+ memcpy ((void *)lp->i596_config, init_setup, 14);
+ lp->set_conf.command = CmdConfigure;
+ i596_add_cmd(dev, (void *)&lp->set_conf);
+
+ memcpy ((void *)lp->eth_addr, dev->dev_addr, 6);
+ lp->set_add.command = CmdIASetup;
+ i596_add_cmd(dev, (struct i596_cmd *)&lp->set_add);
+
+ lp->tdr.command = CmdTDR;
+ i596_add_cmd(dev, (struct i596_cmd *)&lp->tdr);
+
+ if (lp->scb.command && i596_timeout(dev, "i82596 init", 200))
+ return 1;
+
+ lp->scb.command = RX_START;
+ CA();
+
+ barrier();
+
+ if (lp->scb.command && i596_timeout(dev, "Receive Unit start", 100))
+ return 1;
+
+ return 0;
+}
+
+/* Receive a single frame */
+static inline int
+i596_rx_one(struct net_device *dev, struct i596_private *lp,
+ struct i596_rfd *rfd, int *frames) {
+
+ if (rfd->stat & RFD_STAT_OK) {
+ /* a good frame */
+ int pkt_len = (rfd->count & 0x3fff);
+ struct sk_buff *skb = dev_alloc_skb(pkt_len);
+
+ (*frames)++;
+
+ if (rfd->cmd & CMD_EOL)
+ printk("Received on EOL\n");
+
+ if (skb == NULL) {
+ printk ("%s: i596_rx Memory squeeze, "
+ "dropping packet.\n", dev->name);
+ dev->stats.rx_dropped++;
+ return 1;
+ }
+
+ memcpy(skb_put(skb,pkt_len), rfd->data, pkt_len);
+
+ skb->protocol = eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ } else {
+#if 0
+ printk("Frame reception error status %04x\n",
+ rfd->stat);
+#endif
+ dev->stats.rx_errors++;
+ if (rfd->stat & RFD_COLLISION)
+ dev->stats.collisions++;
+ if (rfd->stat & RFD_SHORT_FRAME_ERR)
+ dev->stats.rx_length_errors++;
+ if (rfd->stat & RFD_DMA_ERR)
+ dev->stats.rx_over_errors++;
+ if (rfd->stat & RFD_NOBUFS_ERR)
+ dev->stats.rx_fifo_errors++;
+ if (rfd->stat & RFD_ALIGN_ERR)
+ dev->stats.rx_frame_errors++;
+ if (rfd->stat & RFD_CRC_ERR)
+ dev->stats.rx_crc_errors++;
+ if (rfd->stat & RFD_LENGTH_ERR)
+ dev->stats.rx_length_errors++;
+ }
+ rfd->stat = rfd->count = 0;
+ return 0;
+}
+
+static int
+i596_rx(struct net_device *dev) {
+ struct i596_private *lp = netdev_priv(dev);
+ struct i596_rfd *rfd;
+ int frames = 0;
+
+ while (1) {
+ rfd = pa_to_va(lp->scb.pa_rfd);
+ if (!rfd) {
+ printk(KERN_ERR "i596_rx: NULL rfd?\n");
+ return 0;
+ }
+#if 1
+ if (rfd->stat && !(rfd->stat & (RFD_STAT_C | RFD_STAT_B)))
+ printk("SF:%p-%04x\n", rfd, rfd->stat);
+#endif
+ if (!(rfd->stat & RFD_STAT_C))
+ break; /* next one not ready */
+ if (i596_rx_one(dev, lp, rfd, &frames))
+ break; /* out of memory */
+ rfd->cmd = CMD_EOL;
+ lp->rx_tail->cmd = 0;
+ lp->rx_tail = rfd;
+ lp->scb.pa_rfd = rfd->pa_next;
+ barrier();
+ }
+
+ return frames;
+}
+
+static void
+i596_cleanup_cmd(struct net_device *dev) {
+ struct i596_private *lp;
+ struct i596_cmd *cmd;
+
+ lp = netdev_priv(dev);
+ while (lp->cmd_head) {
+ cmd = (struct i596_cmd *)lp->cmd_head;
+
+ lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
+ lp->cmd_backlog--;
+
+ switch ((cmd->command) & 0x7) {
+ case CmdTx: {
+ struct tx_cmd *tx_cmd = (struct tx_cmd *) cmd;
+ struct i596_tbd * tx_cmd_tbd;
+ tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd);
+
+ dev_kfree_skb_any(tx_cmd_tbd->skb);
+
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
+
+ cmd->pa_next = I596_NULL;
+ kfree((unsigned char *)tx_cmd);
+ netif_wake_queue(dev);
+ break;
+ }
+ case CmdMulticastList: {
+ // unsigned short count = *((unsigned short *) (ptr + 1));
+
+ cmd->pa_next = I596_NULL;
+ kfree((unsigned char *)cmd);
+ break;
+ }
+ default: {
+ cmd->pa_next = I596_NULL;
+ break;
+ }
+ }
+ barrier();
+ }
+
+ if (lp->scb.command && i596_timeout(dev, "i596_cleanup_cmd", 100))
+ ;
+
+ lp->scb.pa_cmd = va_to_pa(lp->cmd_head);
+}
+
+static void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr) {
+
+ if (lp->scb.command && i596_timeout(dev, "i596_reset", 100))
+ ;
+
+ netif_stop_queue(dev);
+
+ lp->scb.command = CUC_ABORT | RX_ABORT;
+ CA();
+ barrier();
+
+ /* wait for shutdown */
+ if (lp->scb.command && i596_timeout(dev, "i596_reset(2)", 400))
+ ;
+
+ i596_cleanup_cmd(dev);
+ i596_rx(dev);
+
+ netif_start_queue(dev);
+ /*dev_kfree_skb(skb, FREE_WRITE);*/
+ init_i596(dev);
+}
+
+static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) {
+ struct i596_private *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ cmd->status = 0;
+ cmd->command |= (CMD_EOL | CMD_INTR);
+ cmd->pa_next = I596_NULL;
+
+ spin_lock_irqsave(&lp->cmd_lock, flags);
+
+ if (lp->cmd_head) {
+ lp->cmd_tail->pa_next = va_to_pa(cmd);
+ } else {
+ lp->cmd_head = cmd;
+ if (lp->scb.command && i596_timeout(dev, "i596_add_cmd", 100))
+ ;
+ lp->scb.pa_cmd = va_to_pa(cmd);
+ lp->scb.command = CUC_START;
+ CA();
+ }
+ lp->cmd_tail = cmd;
+ lp->cmd_backlog++;
+
+ lp->cmd_head = pa_to_va(lp->scb.pa_cmd);
+ spin_unlock_irqrestore(&lp->cmd_lock, flags);
+
+ if (lp->cmd_backlog > 16) {
+ int tickssofar = jiffies - lp->last_cmd;
+ if (tickssofar < HZ/4)
+ return;
+
+ printk(KERN_WARNING "%s: command unit timed out, status resetting.\n", dev->name);
+ i596_reset(dev, lp, ioaddr);
+ }
+}
+
+static int i596_open(struct net_device *dev)
+{
+ int i;
+
+ i = request_irq(dev->irq, i596_interrupt, IRQF_SHARED, dev->name, dev);
+ if (i) {
+ printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
+ return i;
+ }
+
+ if ((i = init_rx_bufs(dev, RX_RING_SIZE)) < RX_RING_SIZE)
+ printk(KERN_ERR "%s: only able to allocate %d receive buffers\n", dev->name, i);
+
+ if (i < 4) {
+ free_irq(dev->irq, dev);
+ return -EAGAIN;
+ }
+ netif_start_queue(dev);
+ init_i596(dev);
+ return 0; /* Always succeed */
+}
+
+static netdev_tx_t i596_start_xmit (struct sk_buff *skb, struct net_device *dev) {
+ struct tx_cmd *tx_cmd;
+ short length;
+
+ length = skb->len;
+
+ if (length < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ length = ETH_ZLEN;
+ }
+
+ tx_cmd = kmalloc((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC);
+ if (tx_cmd == NULL) {
+ printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name);
+ dev->stats.tx_dropped++;
+ dev_kfree_skb (skb);
+ } else {
+ struct i596_tbd *tx_cmd_tbd;
+ tx_cmd_tbd = (struct i596_tbd *) (tx_cmd + 1);
+ tx_cmd->pa_tbd = va_to_pa (tx_cmd_tbd);
+ tx_cmd_tbd->pa_next = I596_NULL;
+
+ tx_cmd->cmd.command = (CMD_FLEX | CmdTx);
+
+ tx_cmd->pad = 0;
+ tx_cmd->size = 0;
+ tx_cmd_tbd->pad = 0;
+ tx_cmd_tbd->size = (EOF | length);
+
+ tx_cmd_tbd->pa_data = va_to_pa (skb->data);
+ tx_cmd_tbd->skb = skb;
+
+ if (i596_debug & LOG_SRCDST)
+ print_eth (skb->data);
+
+ i596_add_cmd (dev, (struct i596_cmd *) tx_cmd);
+
+ dev->stats.tx_packets++;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static void
+i596_tx_timeout (struct net_device *dev) {
+ struct i596_private *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ /* Transmitter timeout, serious problems. */
+ printk(KERN_WARNING "%s: transmit timed out, status resetting.\n", dev->name);
+ dev->stats.tx_errors++;
+
+ /* Try to restart the adaptor */
+ if (lp->last_restart == dev->stats.tx_packets) {
+ printk ("Resetting board.\n");
+
+ /* Shutdown and restart */
+ i596_reset (dev, lp, ioaddr);
+ } else {
+ /* Issue a channel attention signal */
+ printk ("Kicking board.\n");
+ lp->scb.command = (CUC_START | RX_START);
+ CA();
+ lp->last_restart = dev->stats.tx_packets;
+ }
+ netif_wake_queue(dev);
+}
+
+static void print_eth(char *add)
+{
+ int i;
+
+ printk ("Dest ");
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", (unsigned char) add[i]);
+ printk ("\n");
+
+ printk ("Source");
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", (unsigned char) add[i+6]);
+ printk ("\n");
+
+ printk ("type %2.2X%2.2X\n",
+ (unsigned char) add[12], (unsigned char) add[13]);
+}
+
+static const struct net_device_ops i596_netdev_ops = {
+ .ndo_open = i596_open,
+ .ndo_stop = i596_close,
+ .ndo_start_xmit = i596_start_xmit,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_tx_timeout = i596_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __init lp486e_probe(struct net_device *dev) {
+ struct i596_private *lp;
+ unsigned char eth_addr[6] = { 0, 0xaa, 0, 0, 0, 0 };
+ unsigned char *bios;
+ int i, j;
+ int ret = -ENOMEM;
+ static int probed;
+
+ if (probed)
+ return -ENODEV;
+ probed++;
+
+ if (!request_region(IOADDR, LP486E_TOTAL_SIZE, DRV_NAME)) {
+ printk(KERN_ERR "lp486e: IO address 0x%x in use\n", IOADDR);
+ return -EBUSY;
+ }
+
+ lp = netdev_priv(dev);
+ spin_lock_init(&lp->cmd_lock);
+
+ /*
+ * Do we really have this thing?
+ */
+ if (i596_scp_setup(dev)) {
+ ret = -ENODEV;
+ goto err_out_kfree;
+ }
+
+ dev->base_addr = IOADDR;
+ dev->irq = IRQ;
+
+
+ /*
+ * How do we find the ethernet address? I don't know.
+ * One possibility is to look at the EISA configuration area
+ * [0xe8000-0xe9fff]. This contains the ethernet address
+ * but not at a fixed address - things depend on setup options.
+ *
+ * If we find no address, or the wrong address, use
+ * ifconfig eth0 hw ether a1:a2:a3:a4:a5:a6
+ * with the value found in the BIOS setup.
+ */
+ bios = bus_to_virt(0xe8000);
+ for (j = 0; j < 0x2000; j++) {
+ if (bios[j] == 0 && bios[j+1] == 0xaa && bios[j+2] == 0) {
+ printk("%s: maybe address at BIOS 0x%x:",
+ dev->name, 0xe8000+j);
+ for (i = 0; i < 6; i++) {
+ eth_addr[i] = bios[i+j];
+ printk(" %2.2X", eth_addr[i]);
+ }
+ printk("\n");
+ }
+ }
+
+ printk("%s: lp486e 82596 at %#3lx, IRQ %d,",
+ dev->name, dev->base_addr, dev->irq);
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]);
+ printk("\n");
+
+ /* The LP486E-specific entries in the device structure. */
+ dev->netdev_ops = &i596_netdev_ops;
+ dev->watchdog_timeo = 5*HZ;
+
+#if 0
+ /* selftest reports 0x320925ae - don't know what that means */
+ i596_port_do(dev, PORT_SELFTEST, "selftest");
+ i596_port_do(dev, PORT_DUMP, "dump");
+#endif
+ return 0;
+
+err_out_kfree:
+ release_region(IOADDR, LP486E_TOTAL_SIZE);
+ return ret;
+}
+
+static inline void
+i596_handle_CU_completion(struct net_device *dev,
+ struct i596_private *lp,
+ unsigned short status,
+ unsigned short *ack_cmdp) {
+ struct i596_cmd *cmd;
+ int frames_out = 0;
+ int commands_done = 0;
+ int cmd_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->cmd_lock, flags);
+ cmd = lp->cmd_head;
+
+ while (lp->cmd_head && (lp->cmd_head->status & CMD_STAT_C)) {
+ cmd = lp->cmd_head;
+
+ lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
+ lp->cmd_backlog--;
+
+ commands_done++;
+ cmd_val = cmd->command & 0x7;
+#if 0
+ printk("finished CU %s command (%d)\n",
+ CUcmdnames[cmd_val], cmd_val);
+#endif
+ switch (cmd_val) {
+ case CmdTx:
+ {
+ struct tx_cmd *tx_cmd;
+ struct i596_tbd *tx_cmd_tbd;
+
+ tx_cmd = (struct tx_cmd *) cmd;
+ tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd);
+
+ frames_out++;
+ if (cmd->status & CMD_STAT_OK) {
+ if (i596_debug)
+ print_eth(pa_to_va(tx_cmd_tbd->pa_data));
+ } else {
+ dev->stats.tx_errors++;
+ if (i596_debug)
+ printk("transmission failure:%04x\n",
+ cmd->status);
+ if (cmd->status & 0x0020)
+ dev->stats.collisions++;
+ if (!(cmd->status & 0x0040))
+ dev->stats.tx_heartbeat_errors++;
+ if (cmd->status & 0x0400)
+ dev->stats.tx_carrier_errors++;
+ if (cmd->status & 0x0800)
+ dev->stats.collisions++;
+ if (cmd->status & 0x1000)
+ dev->stats.tx_aborted_errors++;
+ }
+ dev_kfree_skb_irq(tx_cmd_tbd->skb);
+
+ cmd->pa_next = I596_NULL;
+ kfree((unsigned char *)tx_cmd);
+ netif_wake_queue(dev);
+ break;
+ }
+
+ case CmdMulticastList:
+ cmd->pa_next = I596_NULL;
+ kfree((unsigned char *)cmd);
+ break;
+
+ case CmdTDR:
+ {
+ unsigned long status = *((unsigned long *) (cmd + 1));
+ if (status & 0x8000) {
+ if (i596_debug)
+ printk("%s: link ok.\n", dev->name);
+ } else {
+ if (status & 0x4000)
+ printk("%s: Transceiver problem.\n",
+ dev->name);
+ if (status & 0x2000)
+ printk("%s: Termination problem.\n",
+ dev->name);
+ if (status & 0x1000)
+ printk("%s: Short circuit.\n",
+ dev->name);
+ printk("%s: Time %ld.\n",
+ dev->name, status & 0x07ff);
+ }
+ }
+ default:
+ cmd->pa_next = I596_NULL;
+ lp->last_cmd = jiffies;
+
+ }
+ barrier();
+ }
+
+ cmd = lp->cmd_head;
+ while (cmd && (cmd != lp->cmd_tail)) {
+ cmd->command &= 0x1fff;
+ cmd = pa_to_va(cmd->pa_next);
+ barrier();
+ }
+
+ if (lp->cmd_head)
+ *ack_cmdp |= CUC_START;
+ lp->scb.pa_cmd = va_to_pa(lp->cmd_head);
+ spin_unlock_irqrestore(&lp->cmd_lock, flags);
+}
+
+static irqreturn_t
+i596_interrupt(int irq, void *dev_instance)
+{
+ struct net_device *dev = dev_instance;
+ struct i596_private *lp = netdev_priv(dev);
+ unsigned short status, ack_cmd = 0;
+ int frames_in = 0;
+
+ /*
+ * The 82596 examines the command, performs the required action,
+ * and then clears the SCB command word.
+ */
+ if (lp->scb.command && i596_timeout(dev, "interrupt", 40))
+ ;
+
+ /*
+ * The status word indicates the status of the 82596.
+ * It is modified only by the 82596.
+ *
+ * [So, we must not clear it. I find often status 0xffff,
+ * which is not one of the values allowed by the docs.]
+ */
+ status = lp->scb.status;
+#if 0
+ if (i596_debug) {
+ printk("%s: i596 interrupt, ", dev->name);
+ i596_out_status(status);
+ }
+#endif
+ /* Impossible, but it happens - perhaps when we get
+ a receive interrupt but scb.pa_rfd is I596_NULL. */
+ if (status == 0xffff) {
+ printk("%s: i596_interrupt: got status 0xffff\n", dev->name);
+ goto out;
+ }
+
+ ack_cmd = (status & STAT_ACK);
+
+ if (status & (STAT_CX | STAT_CNA))
+ i596_handle_CU_completion(dev, lp, status, &ack_cmd);
+
+ if (status & (STAT_FR | STAT_RNR)) {
+ /* Restart the receive unit when it got inactive somehow */
+ if ((status & STAT_RNR) && netif_running(dev))
+ ack_cmd |= RX_START;
+
+ if (status & STAT_FR) {
+ frames_in = i596_rx(dev);
+ if (!frames_in)
+ printk("receive frame reported, but no frames\n");
+ }
+ }
+
+ /* acknowledge the interrupt */
+ /*
+ if ((lp->scb.pa_cmd != I596_NULL) && netif_running(dev))
+ ack_cmd |= CUC_START;
+ */
+
+ if (lp->scb.command && i596_timeout(dev, "i596 interrupt", 100))
+ ;
+
+ lp->scb.command = ack_cmd;
+
+ CLEAR_INT();
+ CA();
+
+ out:
+ return IRQ_HANDLED;
+}
+
+static int i596_close(struct net_device *dev) {
+ struct i596_private *lp = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ if (i596_debug)
+ printk("%s: Shutting down ethercard, status was %4.4x.\n",
+ dev->name, lp->scb.status);
+
+ lp->scb.command = (CUC_ABORT | RX_ABORT);
+ CA();
+
+ i596_cleanup_cmd(dev);
+
+ if (lp->scb.command && i596_timeout(dev, "i596_close", 200))
+ ;
+
+ free_irq(dev->irq, dev);
+ remove_rx_bufs(dev);
+
+ return 0;
+}
+
+/*
+* Set or clear the multicast filter for this adaptor.
+*/
+
+static void set_multicast_list(struct net_device *dev) {
+ struct i596_private *lp = netdev_priv(dev);
+ struct i596_cmd *cmd;
+
+ if (i596_debug > 1)
+ printk ("%s: set multicast list %d\n",
+ dev->name, netdev_mc_count(dev));
+
+ if (!netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+ char *cp;
+ cmd = kmalloc(sizeof(struct i596_cmd) + 2 +
+ netdev_mc_count(dev) * 6, GFP_ATOMIC);
+ if (cmd == NULL) {
+ printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name);
+ return;
+ }
+ cmd->command = CmdMulticastList;
+ *((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6;
+ cp = ((char *)(cmd + 1))+2;
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(cp, ha->addr, 6);
+ cp += 6;
+ }
+ if (i596_debug & LOG_SRCDST)
+ print_eth (((char *)(cmd + 1)) + 2);
+ i596_add_cmd(dev, cmd);
+ } else {
+ if (lp->set_conf.pa_next != I596_NULL) {
+ return;
+ }
+ if (netdev_mc_empty(dev) &&
+ !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
+ lp->i596_config[8] &= ~0x01;
+ } else {
+ lp->i596_config[8] |= 0x01;
+ }
+
+ i596_add_cmd(dev, (struct i596_cmd *) &lp->set_conf);
+ }
+}
+
+MODULE_AUTHOR("Ard van Breemen <ard@cstmel.nl.eu.org>");
+MODULE_DESCRIPTION("Intel Panther onboard i82596 driver");
+MODULE_LICENSE("GPL");
+
+static struct net_device *dev_lp486e;
+static int full_duplex;
+static int options;
+static int io = IOADDR;
+static int irq = IRQ;
+
+module_param(debug, int, 0);
+//module_param(max_interrupt_work, int, 0);
+//module_param(reverse_probe, int, 0);
+//module_param(rx_copybreak, int, 0);
+module_param(options, int, 0);
+module_param(full_duplex, int, 0);
+
+static int __init lp486e_init_module(void) {
+ int err;
+ struct net_device *dev = alloc_etherdev(sizeof(struct i596_private));
+ if (!dev)
+ return -ENOMEM;
+
+ dev->irq = irq;
+ dev->base_addr = io;
+ err = lp486e_probe(dev);
+ if (err) {
+ free_netdev(dev);
+ return err;
+ }
+ err = register_netdev(dev);
+ if (err) {
+ release_region(dev->base_addr, LP486E_TOTAL_SIZE);
+ free_netdev(dev);
+ return err;
+ }
+ dev_lp486e = dev;
+ full_duplex = 0;
+ options = 0;
+ return 0;
+}
+
+static void __exit lp486e_cleanup_module(void) {
+ unregister_netdev(dev_lp486e);
+ release_region(dev_lp486e->base_addr, LP486E_TOTAL_SIZE);
+ free_netdev(dev_lp486e);
+}
+
+module_init(lp486e_init_module);
+module_exit(lp486e_cleanup_module);
diff --git a/drivers/net/ethernet/i825xx/ni52.c b/drivers/net/ethernet/i825xx/ni52.c
new file mode 100644
index 000000000000..c0893715ef47
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/ni52.c
@@ -0,0 +1,1346 @@
+/*
+ * net-3-driver for the NI5210 card (i82586 Ethernet chip)
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same GNU General Public License that covers that work.
+ *
+ * Alphacode 0.82 (96/09/29) for Linux 2.0.0 (or later)
+ * Copyrights (c) 1994,1995,1996 by M.Hipp (hippm@informatik.uni-tuebingen.de)
+ * [feel free to mail ....]
+ *
+ * when using as module: (no autoprobing!)
+ * run with e.g:
+ * insmod ni52.o io=0x360 irq=9 memstart=0xd0000 memend=0xd4000
+ *
+ * CAN YOU PLEASE REPORT ME YOUR PERFORMANCE EXPERIENCES !!.
+ *
+ * If you find a bug, please report me:
+ * The kernel panic output and any kmsg from the ni52 driver
+ * the ni5210-driver-version and the linux-kernel version
+ * how many shared memory (memsize) on the netcard,
+ * bootprom: yes/no, base_addr, mem_start
+ * maybe the ni5210-card revision and the i82586 version
+ *
+ * autoprobe for: base_addr: 0x300,0x280,0x360,0x320,0x340
+ * mem_start: 0xd0000,0xd2000,0xc8000,0xca000,0xd4000,0xd6000,
+ * 0xd8000,0xcc000,0xce000,0xda000,0xdc000
+ *
+ * sources:
+ * skeleton.c from Donald Becker
+ *
+ * I have also done a look in the following sources: (mail me if you need them)
+ * crynwr-packet-driver by Russ Nelson
+ * Garret A. Wollman's (fourth) i82586-driver for BSD
+ * (before getting an i82596 (yes 596 not 586) manual, the existing drivers
+ * helped me a lot to understand this tricky chip.)
+ *
+ * Known Problems:
+ * The internal sysbus seems to be slow. So we often lose packets because of
+ * overruns while receiving from a fast remote host.
+ * This can slow down TCP connections. Maybe the newer ni5210 cards are
+ * better. My experience is, that if a machine sends with more than about
+ * 500-600K/s the fifo/sysbus overflows.
+ *
+ * IMPORTANT NOTE:
+ * On fast networks, it's a (very) good idea to have 16K shared memory. With
+ * 8K, we can store only 4 receive frames, so it can (easily) happen that a
+ * remote machine 'overruns' our system.
+ *
+ * Known i82586/card problems (I'm sure, there are many more!):
+ * Running the NOP-mode, the i82586 sometimes seems to forget to report
+ * every xmit-interrupt until we restart the CU.
+ * Another MAJOR bug is, that the RU sometimes seems to ignore the EL-Bit
+ * in the RBD-Struct which indicates an end of the RBD queue.
+ * Instead, the RU fetches another (randomly selected and
+ * usually used) RBD and begins to fill it. (Maybe, this happens only if
+ * the last buffer from the previous RFD fits exact into the queue and
+ * the next RFD can't fetch an initial RBD. Anyone knows more? )
+ *
+ * results from ftp performance tests with Linux 1.2.5
+ * send and receive about 350-400 KByte/s (peak up to 460 kbytes/s)
+ * sending in NOP-mode: peak performance up to 530K/s (but better don't
+ * run this mode)
+ */
+
+/*
+ * 29.Sept.96: virt_to_bus changes for new memory scheme
+ * 19.Feb.96: more Mcast changes, module support (MH)
+ *
+ * 18.Nov.95: Mcast changes (AC).
+ *
+ * 23.April.95: fixed(?) receiving problems by configuring a RFD more
+ * than the number of RBD's. Can maybe cause other problems.
+ * 18.April.95: Added MODULE support (MH)
+ * 17.April.95: MC related changes in init586() and set_multicast_list().
+ * removed use of 'jiffies' in init586() (MH)
+ *
+ * 19.Sep.94: Added Multicast support (not tested yet) (MH)
+ *
+ * 18.Sep.94: Workaround for 'EL-Bug'. Removed flexible RBD-handling.
+ * Now, every RFD has exact one RBD. (MH)
+ *
+ * 14.Sep.94: added promiscuous mode, a few cleanups (MH)
+ *
+ * 19.Aug.94: changed request_irq() parameter (MH)
+ *
+ * 20.July.94: removed cleanup bugs, removed a 16K-mem-probe-bug (MH)
+ *
+ * 19.July.94: lotsa cleanups .. (MH)
+ *
+ * 17.July.94: some patches ... verified to run with 1.1.29 (MH)
+ *
+ * 4.July.94: patches for Linux 1.1.24 (MH)
+ *
+ * 26.March.94: patches for Linux 1.0 and iomem-auto-probe (MH)
+ *
+ * 30.Sep.93: Added nop-chain .. driver now runs with only one Xmit-Buff,
+ * too (MH)
+ *
+ * < 30.Sep.93: first versions
+ */
+
+static int debuglevel; /* debug-printk 0: off 1: a few 2: more */
+static int automatic_resume; /* experimental .. better should be zero */
+static int rfdadd; /* rfdadd=1 may be better for 8K MEM cards */
+static int fifo = 0x8; /* don't change */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "ni52.h"
+
+#define DRV_NAME "ni52"
+
+#define DEBUG /* debug on */
+#define SYSBUSVAL 1 /* 8 Bit */
+
+#define ni_attn586() { outb(0, dev->base_addr + NI52_ATTENTION); }
+#define ni_reset586() { outb(0, dev->base_addr + NI52_RESET); }
+#define ni_disint() { outb(0, dev->base_addr + NI52_INTDIS); }
+#define ni_enaint() { outb(0, dev->base_addr + NI52_INTENA); }
+
+#define make32(ptr16) ((void __iomem *)(p->memtop + (short) (ptr16)))
+#define make24(ptr32) ((char __iomem *)(ptr32)) - p->base
+#define make16(ptr32) ((unsigned short) ((char __iomem *)(ptr32)\
+ - p->memtop))
+
+/******************* how to calculate the buffers *****************************
+
+ * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works
+ * --------------- in a different (more stable?) mode. Only in this mode it's
+ * possible to configure the driver with 'NO_NOPCOMMANDS'
+
+sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8;
+sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT
+sizeof(rfd) = 24; sizeof(rbd) = 12;
+sizeof(tbd) = 8; sizeof(transmit_cmd) = 16;
+sizeof(nop_cmd) = 8;
+
+ * if you don't know the driver, better do not change these values: */
+
+#define RECV_BUFF_SIZE 1524 /* slightly oversized */
+#define XMIT_BUFF_SIZE 1524 /* slightly oversized */
+#define NUM_XMIT_BUFFS 1 /* config for both, 8K and 16K shmem */
+#define NUM_RECV_BUFFS_8 4 /* config for 8K shared mem */
+#define NUM_RECV_BUFFS_16 9 /* config for 16K shared mem */
+#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */
+
+/**************************************************************************/
+
+
+#define NI52_TOTAL_SIZE 16
+#define NI52_ADDR0 0x02
+#define NI52_ADDR1 0x07
+#define NI52_ADDR2 0x01
+
+static int ni52_probe1(struct net_device *dev, int ioaddr);
+static irqreturn_t ni52_interrupt(int irq, void *dev_id);
+static int ni52_open(struct net_device *dev);
+static int ni52_close(struct net_device *dev);
+static netdev_tx_t ni52_send_packet(struct sk_buff *, struct net_device *);
+static struct net_device_stats *ni52_get_stats(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static void ni52_timeout(struct net_device *dev);
+
+/* helper-functions */
+static int init586(struct net_device *dev);
+static int check586(struct net_device *dev, unsigned size);
+static void alloc586(struct net_device *dev);
+static void startrecv586(struct net_device *dev);
+static void __iomem *alloc_rfa(struct net_device *dev, void __iomem *ptr);
+static void ni52_rcv_int(struct net_device *dev);
+static void ni52_xmt_int(struct net_device *dev);
+static void ni52_rnr_int(struct net_device *dev);
+
+struct priv {
+ char __iomem *base;
+ char __iomem *mapped;
+ char __iomem *memtop;
+ spinlock_t spinlock;
+ int reset;
+ struct rfd_struct __iomem *rfd_last, *rfd_top, *rfd_first;
+ struct scp_struct __iomem *scp;
+ struct iscp_struct __iomem *iscp;
+ struct scb_struct __iomem *scb;
+ struct tbd_struct __iomem *xmit_buffs[NUM_XMIT_BUFFS];
+#if (NUM_XMIT_BUFFS == 1)
+ struct transmit_cmd_struct __iomem *xmit_cmds[2];
+ struct nop_cmd_struct __iomem *nop_cmds[2];
+#else
+ struct transmit_cmd_struct __iomem *xmit_cmds[NUM_XMIT_BUFFS];
+ struct nop_cmd_struct __iomem *nop_cmds[NUM_XMIT_BUFFS];
+#endif
+ int nop_point, num_recv_buffs;
+ char __iomem *xmit_cbuffs[NUM_XMIT_BUFFS];
+ int xmit_count, xmit_last;
+};
+
+/* wait for command with timeout: */
+static void wait_for_scb_cmd(struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+ int i;
+ for (i = 0; i < 16384; i++) {
+ if (readb(&p->scb->cmd_cuc) == 0)
+ break;
+ udelay(4);
+ if (i == 16383) {
+ printk(KERN_ERR "%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",
+ dev->name, readb(&p->scb->cmd_cuc), readb(&p->scb->cus));
+ if (!p->reset) {
+ p->reset = 1;
+ ni_reset586();
+ }
+ }
+ }
+}
+
+static void wait_for_scb_cmd_ruc(struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+ int i;
+ for (i = 0; i < 16384; i++) {
+ if (readb(&p->scb->cmd_ruc) == 0)
+ break;
+ udelay(4);
+ if (i == 16383) {
+ printk(KERN_ERR "%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",
+ dev->name, readb(&p->scb->cmd_ruc),
+ readb(&p->scb->rus));
+ if (!p->reset) {
+ p->reset = 1;
+ ni_reset586();
+ }
+ }
+ }
+}
+
+static void wait_for_stat_compl(void __iomem *p)
+{
+ struct nop_cmd_struct __iomem *addr = p;
+ int i;
+ for (i = 0; i < 32767; i++) {
+ if (readw(&((addr)->cmd_status)) & STAT_COMPL)
+ break;
+ udelay(32);
+ }
+}
+
+/**********************************************
+ * close device
+ */
+static int ni52_close(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+ ni_reset586(); /* the hard way to stop the receiver */
+ netif_stop_queue(dev);
+ return 0;
+}
+
+/**********************************************
+ * open device
+ */
+static int ni52_open(struct net_device *dev)
+{
+ int ret;
+
+ ni_disint();
+ alloc586(dev);
+ init586(dev);
+ startrecv586(dev);
+ ni_enaint();
+
+ ret = request_irq(dev->irq, ni52_interrupt, 0, dev->name, dev);
+ if (ret) {
+ ni_reset586();
+ return ret;
+ }
+ netif_start_queue(dev);
+ return 0; /* most done by init */
+}
+
+static int check_iscp(struct net_device *dev, void __iomem *addr)
+{
+ struct iscp_struct __iomem *iscp = addr;
+ struct priv *p = netdev_priv(dev);
+ memset_io(iscp, 0, sizeof(struct iscp_struct));
+
+ writel(make24(iscp), &p->scp->iscp);
+ writeb(1, &iscp->busy);
+
+ ni_reset586();
+ ni_attn586();
+ mdelay(32); /* wait a while... */
+ /* i82586 clears 'busy' after successful init */
+ if (readb(&iscp->busy))
+ return 0;
+ return 1;
+}
+
+/**********************************************
+ * Check to see if there's an 82586 out there.
+ */
+static int check586(struct net_device *dev, unsigned size)
+{
+ struct priv *p = netdev_priv(dev);
+ int i;
+
+ p->mapped = ioremap(dev->mem_start, size);
+ if (!p->mapped)
+ return 0;
+
+ p->base = p->mapped + size - 0x01000000;
+ p->memtop = p->mapped + size;
+ p->scp = (struct scp_struct __iomem *)(p->base + SCP_DEFAULT_ADDRESS);
+ p->scb = (struct scb_struct __iomem *) p->mapped;
+ p->iscp = (struct iscp_struct __iomem *)p->scp - 1;
+ memset_io(p->scp, 0, sizeof(struct scp_struct));
+ for (i = 0; i < sizeof(struct scp_struct); i++)
+ /* memory was writeable? */
+ if (readb((char __iomem *)p->scp + i))
+ goto Enodev;
+ writeb(SYSBUSVAL, &p->scp->sysbus); /* 1 = 8Bit-Bus, 0 = 16 Bit */
+ if (readb(&p->scp->sysbus) != SYSBUSVAL)
+ goto Enodev;
+
+ if (!check_iscp(dev, p->mapped))
+ goto Enodev;
+ if (!check_iscp(dev, p->iscp))
+ goto Enodev;
+ return 1;
+Enodev:
+ iounmap(p->mapped);
+ return 0;
+}
+
+/******************************************************************
+ * set iscp at the right place, called by ni52_probe1 and open586.
+ */
+static void alloc586(struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+
+ ni_reset586();
+ mdelay(32);
+
+ memset_io(p->iscp, 0, sizeof(struct iscp_struct));
+ memset_io(p->scp , 0, sizeof(struct scp_struct));
+
+ writel(make24(p->iscp), &p->scp->iscp);
+ writeb(SYSBUSVAL, &p->scp->sysbus);
+ writew(make16(p->scb), &p->iscp->scb_offset);
+
+ writeb(1, &p->iscp->busy);
+ ni_reset586();
+ ni_attn586();
+
+ mdelay(32);
+
+ if (readb(&p->iscp->busy))
+ printk(KERN_ERR "%s: Init-Problems (alloc).\n", dev->name);
+
+ p->reset = 0;
+
+ memset_io(p->scb, 0, sizeof(struct scb_struct));
+}
+
+/* set: io,irq,memstart,memend or set it when calling insmod */
+static int irq = 9;
+static int io = 0x300;
+static long memstart; /* e.g 0xd0000 */
+static long memend; /* e.g 0xd4000 */
+
+/**********************************************
+ * probe the ni5210-card
+ */
+struct net_device * __init ni52_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct priv));
+ static const int ports[] = {0x300, 0x280, 0x360, 0x320, 0x340, 0};
+ const int *port;
+ struct priv *p;
+ int err = 0;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ p = netdev_priv(dev);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ io = dev->base_addr;
+ irq = dev->irq;
+ memstart = dev->mem_start;
+ memend = dev->mem_end;
+ }
+
+ if (io > 0x1ff) { /* Check a single specified location. */
+ err = ni52_probe1(dev, io);
+ } else if (io > 0) { /* Don't probe at all. */
+ err = -ENXIO;
+ } else {
+ for (port = ports; *port && ni52_probe1(dev, *port) ; port++)
+ ;
+ if (*port)
+ goto got_it;
+#ifdef FULL_IO_PROBE
+ for (io = 0x200; io < 0x400 && ni52_probe1(dev, io); io += 8)
+ ;
+ if (io < 0x400)
+ goto got_it;
+#endif
+ err = -ENODEV;
+ }
+ if (err)
+ goto out;
+got_it:
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ iounmap(p->mapped);
+ release_region(dev->base_addr, NI52_TOTAL_SIZE);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static const struct net_device_ops ni52_netdev_ops = {
+ .ndo_open = ni52_open,
+ .ndo_stop = ni52_close,
+ .ndo_get_stats = ni52_get_stats,
+ .ndo_tx_timeout = ni52_timeout,
+ .ndo_start_xmit = ni52_send_packet,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __init ni52_probe1(struct net_device *dev, int ioaddr)
+{
+ int i, size, retval;
+ struct priv *priv = netdev_priv(dev);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ dev->mem_start = memstart;
+ dev->mem_end = memend;
+
+ spin_lock_init(&priv->spinlock);
+
+ if (!request_region(ioaddr, NI52_TOTAL_SIZE, DRV_NAME))
+ return -EBUSY;
+
+ if (!(inb(ioaddr+NI52_MAGIC1) == NI52_MAGICVAL1) ||
+ !(inb(ioaddr+NI52_MAGIC2) == NI52_MAGICVAL2)) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = inb(dev->base_addr+i);
+
+ if (dev->dev_addr[0] != NI52_ADDR0 || dev->dev_addr[1] != NI52_ADDR1 ||
+ dev->dev_addr[2] != NI52_ADDR2) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ printk(KERN_INFO "%s: NI5210 found at %#3lx, ",
+ dev->name, dev->base_addr);
+
+ /*
+ * check (or search) IO-Memory, 8K and 16K
+ */
+#ifdef MODULE
+ size = dev->mem_end - dev->mem_start;
+ if (size != 0x2000 && size != 0x4000) {
+ printk("\n");
+ printk(KERN_ERR "%s: Invalid memory size %d. Allowed is 0x2000 or 0x4000 bytes.\n", dev->name, size);
+ retval = -ENODEV;
+ goto out;
+ }
+ if (!check586(dev, size)) {
+ printk(KERN_ERR "?memcheck, Can't find memory at 0x%lx with size %d!\n", dev->mem_start, size);
+ retval = -ENODEV;
+ goto out;
+ }
+#else
+ if (dev->mem_start != 0) {
+ /* no auto-mem-probe */
+ size = 0x4000; /* check for 16K mem */
+ if (!check586(dev, size)) {
+ size = 0x2000; /* check for 8K mem */
+ if (!check586(dev, size)) {
+ printk(KERN_ERR "?memprobe, Can't find memory at 0x%lx!\n", dev->mem_start);
+ retval = -ENODEV;
+ goto out;
+ }
+ }
+ } else {
+ static const unsigned long memaddrs[] = {
+ 0xc8000, 0xca000, 0xcc000, 0xce000, 0xd0000, 0xd2000,
+ 0xd4000, 0xd6000, 0xd8000, 0xda000, 0xdc000, 0
+ };
+ for (i = 0;; i++) {
+ if (!memaddrs[i]) {
+ printk(KERN_ERR "?memprobe, Can't find io-memory!\n");
+ retval = -ENODEV;
+ goto out;
+ }
+ dev->mem_start = memaddrs[i];
+ size = 0x2000; /* check for 8K mem */
+ if (check586(dev, size))
+ /* 8K-check */
+ break;
+ size = 0x4000; /* check for 16K mem */
+ if (check586(dev, size))
+ /* 16K-check */
+ break;
+ }
+ }
+ /* set mem_end showed by 'ifconfig' */
+ dev->mem_end = dev->mem_start + size;
+#endif
+
+ alloc586(dev);
+
+ /* set number of receive-buffs according to memsize */
+ if (size == 0x2000)
+ priv->num_recv_buffs = NUM_RECV_BUFFS_8;
+ else
+ priv->num_recv_buffs = NUM_RECV_BUFFS_16;
+
+ printk(KERN_DEBUG "Memaddr: 0x%lx, Memsize: %d, ",
+ dev->mem_start, size);
+
+ if (dev->irq < 2) {
+ unsigned long irq_mask;
+
+ irq_mask = probe_irq_on();
+ ni_reset586();
+ ni_attn586();
+
+ mdelay(20);
+ dev->irq = probe_irq_off(irq_mask);
+ if (!dev->irq) {
+ printk("?autoirq, Failed to detect IRQ line!\n");
+ retval = -EAGAIN;
+ iounmap(priv->mapped);
+ goto out;
+ }
+ printk("IRQ %d (autodetected).\n", dev->irq);
+ } else {
+ if (dev->irq == 2)
+ dev->irq = 9;
+ printk("IRQ %d (assigned and not checked!).\n", dev->irq);
+ }
+
+ dev->netdev_ops = &ni52_netdev_ops;
+ dev->watchdog_timeo = HZ/20;
+
+ return 0;
+out:
+ release_region(ioaddr, NI52_TOTAL_SIZE);
+ return retval;
+}
+
+/**********************************************
+ * init the chip (ni52-interrupt should be disabled?!)
+ * needs a correct 'allocated' memory
+ */
+
+static int init586(struct net_device *dev)
+{
+ void __iomem *ptr;
+ int i, result = 0;
+ struct priv *p = netdev_priv(dev);
+ struct configure_cmd_struct __iomem *cfg_cmd;
+ struct iasetup_cmd_struct __iomem *ias_cmd;
+ struct tdr_cmd_struct __iomem *tdr_cmd;
+ struct mcsetup_cmd_struct __iomem *mc_cmd;
+ struct netdev_hw_addr *ha;
+ int num_addrs = netdev_mc_count(dev);
+
+ ptr = p->scb + 1;
+
+ cfg_cmd = ptr; /* configure-command */
+ writew(0, &cfg_cmd->cmd_status);
+ writew(CMD_CONFIGURE | CMD_LAST, &cfg_cmd->cmd_cmd);
+ writew(0xFFFF, &cfg_cmd->cmd_link);
+
+ /* number of cfg bytes */
+ writeb(0x0a, &cfg_cmd->byte_cnt);
+ /* fifo-limit (8=tx:32/rx:64) */
+ writeb(fifo, &cfg_cmd->fifo);
+ /* hold or discard bad recv frames (bit 7) */
+ writeb(0x40, &cfg_cmd->sav_bf);
+ /* addr_len |!src_insert |pre-len |loopback */
+ writeb(0x2e, &cfg_cmd->adr_len);
+ writeb(0x00, &cfg_cmd->priority);
+ writeb(0x60, &cfg_cmd->ifs);
+ writeb(0x00, &cfg_cmd->time_low);
+ writeb(0xf2, &cfg_cmd->time_high);
+ writeb(0x00, &cfg_cmd->promisc);
+ if (dev->flags & IFF_ALLMULTI) {
+ int len = ((char __iomem *)p->iscp - (char __iomem *)ptr - 8) / 6;
+ if (num_addrs > len) {
+ printk(KERN_ERR "%s: switching to promisc. mode\n",
+ dev->name);
+ writeb(0x01, &cfg_cmd->promisc);
+ }
+ }
+ if (dev->flags & IFF_PROMISC)
+ writeb(0x01, &cfg_cmd->promisc);
+ writeb(0x00, &cfg_cmd->carr_coll);
+ writew(make16(cfg_cmd), &p->scb->cbl_offset);
+ writeb(0, &p->scb->cmd_ruc);
+
+ writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */
+ ni_attn586();
+
+ wait_for_stat_compl(cfg_cmd);
+
+ if ((readw(&cfg_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) !=
+ (STAT_COMPL|STAT_OK)) {
+ printk(KERN_ERR "%s: configure command failed: %x\n",
+ dev->name, readw(&cfg_cmd->cmd_status));
+ return 1;
+ }
+
+ /*
+ * individual address setup
+ */
+
+ ias_cmd = ptr;
+
+ writew(0, &ias_cmd->cmd_status);
+ writew(CMD_IASETUP | CMD_LAST, &ias_cmd->cmd_cmd);
+ writew(0xffff, &ias_cmd->cmd_link);
+
+ memcpy_toio(&ias_cmd->iaddr, (char *)dev->dev_addr, ETH_ALEN);
+
+ writew(make16(ias_cmd), &p->scb->cbl_offset);
+
+ writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */
+ ni_attn586();
+
+ wait_for_stat_compl(ias_cmd);
+
+ if ((readw(&ias_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) !=
+ (STAT_OK|STAT_COMPL)) {
+ printk(KERN_ERR "%s (ni52): individual address setup command failed: %04x\n", dev->name, readw(&ias_cmd->cmd_status));
+ return 1;
+ }
+
+ /*
+ * TDR, wire check .. e.g. no resistor e.t.c
+ */
+
+ tdr_cmd = ptr;
+
+ writew(0, &tdr_cmd->cmd_status);
+ writew(CMD_TDR | CMD_LAST, &tdr_cmd->cmd_cmd);
+ writew(0xffff, &tdr_cmd->cmd_link);
+ writew(0, &tdr_cmd->status);
+
+ writew(make16(tdr_cmd), &p->scb->cbl_offset);
+ writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */
+ ni_attn586();
+
+ wait_for_stat_compl(tdr_cmd);
+
+ if (!(readw(&tdr_cmd->cmd_status) & STAT_COMPL))
+ printk(KERN_ERR "%s: Problems while running the TDR.\n",
+ dev->name);
+ else {
+ udelay(16);
+ result = readw(&tdr_cmd->status);
+ writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc);
+ ni_attn586(); /* ack the interrupts */
+
+ if (result & TDR_LNK_OK)
+ ;
+ else if (result & TDR_XCVR_PRB)
+ printk(KERN_ERR "%s: TDR: Transceiver problem. Check the cable(s)!\n",
+ dev->name);
+ else if (result & TDR_ET_OPN)
+ printk(KERN_ERR "%s: TDR: No correct termination %d clocks away.\n",
+ dev->name, result & TDR_TIMEMASK);
+ else if (result & TDR_ET_SRT) {
+ /* time == 0 -> strange :-) */
+ if (result & TDR_TIMEMASK)
+ printk(KERN_ERR "%s: TDR: Detected a short circuit %d clocks away.\n",
+ dev->name, result & TDR_TIMEMASK);
+ } else
+ printk(KERN_ERR "%s: TDR: Unknown status %04x\n",
+ dev->name, result);
+ }
+
+ /*
+ * Multicast setup
+ */
+ if (num_addrs && !(dev->flags & IFF_PROMISC)) {
+ mc_cmd = ptr;
+ writew(0, &mc_cmd->cmd_status);
+ writew(CMD_MCSETUP | CMD_LAST, &mc_cmd->cmd_cmd);
+ writew(0xffff, &mc_cmd->cmd_link);
+ writew(num_addrs * 6, &mc_cmd->mc_cnt);
+
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy_toio(mc_cmd->mc_list[i++], ha->addr, 6);
+
+ writew(make16(mc_cmd), &p->scb->cbl_offset);
+ writeb(CUC_START, &p->scb->cmd_cuc);
+ ni_attn586();
+
+ wait_for_stat_compl(mc_cmd);
+
+ if ((readw(&mc_cmd->cmd_status) & (STAT_COMPL|STAT_OK))
+ != (STAT_COMPL|STAT_OK))
+ printk(KERN_ERR "%s: Can't apply multicast-address-list.\n", dev->name);
+ }
+
+ /*
+ * alloc nop/xmit-cmds
+ */
+#if (NUM_XMIT_BUFFS == 1)
+ for (i = 0; i < 2; i++) {
+ p->nop_cmds[i] = ptr;
+ writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd);
+ writew(0, &p->nop_cmds[i]->cmd_status);
+ writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link);
+ ptr = ptr + sizeof(struct nop_cmd_struct);
+ }
+#else
+ for (i = 0; i < NUM_XMIT_BUFFS; i++) {
+ p->nop_cmds[i] = ptr;
+ writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd);
+ writew(0, &p->nop_cmds[i]->cmd_status);
+ writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link);
+ ptr = ptr + sizeof(struct nop_cmd_struct);
+ }
+#endif
+
+ ptr = alloc_rfa(dev, ptr); /* init receive-frame-area */
+
+ /*
+ * alloc xmit-buffs / init xmit_cmds
+ */
+ for (i = 0; i < NUM_XMIT_BUFFS; i++) {
+ /* Transmit cmd/buff 0 */
+ p->xmit_cmds[i] = ptr;
+ ptr = ptr + sizeof(struct transmit_cmd_struct);
+ p->xmit_cbuffs[i] = ptr; /* char-buffs */
+ ptr = ptr + XMIT_BUFF_SIZE;
+ p->xmit_buffs[i] = ptr; /* TBD */
+ ptr = ptr + sizeof(struct tbd_struct);
+ if ((void __iomem *)ptr > (void __iomem *)p->iscp) {
+ printk(KERN_ERR "%s: not enough shared-mem for your configuration!\n",
+ dev->name);
+ return 1;
+ }
+ memset_io(p->xmit_cmds[i], 0,
+ sizeof(struct transmit_cmd_struct));
+ memset_io(p->xmit_buffs[i], 0,
+ sizeof(struct tbd_struct));
+ writew(make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]),
+ &p->xmit_cmds[i]->cmd_link);
+ writew(STAT_COMPL, &p->xmit_cmds[i]->cmd_status);
+ writew(CMD_XMIT|CMD_INT, &p->xmit_cmds[i]->cmd_cmd);
+ writew(make16(p->xmit_buffs[i]), &p->xmit_cmds[i]->tbd_offset);
+ writew(0xffff, &p->xmit_buffs[i]->next);
+ writel(make24(p->xmit_cbuffs[i]), &p->xmit_buffs[i]->buffer);
+ }
+
+ p->xmit_count = 0;
+ p->xmit_last = 0;
+#ifndef NO_NOPCOMMANDS
+ p->nop_point = 0;
+#endif
+
+ /*
+ * 'start transmitter'
+ */
+#ifndef NO_NOPCOMMANDS
+ writew(make16(p->nop_cmds[0]), &p->scb->cbl_offset);
+ writeb(CUC_START, &p->scb->cmd_cuc);
+ ni_attn586();
+ wait_for_scb_cmd(dev);
+#else
+ writew(make16(p->xmit_cmds[0]), &p->xmit_cmds[0]->cmd_link);
+ writew(CMD_XMIT | CMD_SUSPEND | CMD_INT, &p->xmit_cmds[0]->cmd_cmd);
+#endif
+
+ /*
+ * ack. interrupts
+ */
+ writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc);
+ ni_attn586();
+ udelay(16);
+
+ ni_enaint();
+
+ return 0;
+}
+
+/******************************************************
+ * This is a helper routine for ni52_rnr_int() and init586().
+ * It sets up the Receive Frame Area (RFA).
+ */
+
+static void __iomem *alloc_rfa(struct net_device *dev, void __iomem *ptr)
+{
+ struct rfd_struct __iomem *rfd = ptr;
+ struct rbd_struct __iomem *rbd;
+ int i;
+ struct priv *p = netdev_priv(dev);
+
+ memset_io(rfd, 0,
+ sizeof(struct rfd_struct) * (p->num_recv_buffs + rfdadd));
+ p->rfd_first = rfd;
+
+ for (i = 0; i < (p->num_recv_buffs + rfdadd); i++) {
+ writew(make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd)),
+ &rfd[i].next);
+ writew(0xffff, &rfd[i].rbd_offset);
+ }
+ /* RU suspend */
+ writeb(RFD_SUSP, &rfd[p->num_recv_buffs-1+rfdadd].last);
+
+ ptr = rfd + (p->num_recv_buffs + rfdadd);
+
+ rbd = ptr;
+ ptr = rbd + p->num_recv_buffs;
+
+ /* clr descriptors */
+ memset_io(rbd, 0, sizeof(struct rbd_struct) * (p->num_recv_buffs));
+
+ for (i = 0; i < p->num_recv_buffs; i++) {
+ writew(make16(rbd + (i+1) % p->num_recv_buffs), &rbd[i].next);
+ writew(RECV_BUFF_SIZE, &rbd[i].size);
+ writel(make24(ptr), &rbd[i].buffer);
+ ptr = ptr + RECV_BUFF_SIZE;
+ }
+ p->rfd_top = p->rfd_first;
+ p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd);
+
+ writew(make16(p->rfd_first), &p->scb->rfa_offset);
+ writew(make16(rbd), &p->rfd_first->rbd_offset);
+
+ return ptr;
+}
+
+
+/**************************************************
+ * Interrupt Handler ...
+ */
+
+static irqreturn_t ni52_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ unsigned int stat;
+ int cnt = 0;
+ struct priv *p;
+
+ p = netdev_priv(dev);
+
+ if (debuglevel > 1)
+ printk("I");
+
+ spin_lock(&p->spinlock);
+
+ wait_for_scb_cmd(dev); /* wait for last command */
+
+ while ((stat = readb(&p->scb->cus) & STAT_MASK)) {
+ writeb(stat, &p->scb->cmd_cuc);
+ ni_attn586();
+
+ if (stat & STAT_FR) /* received a frame */
+ ni52_rcv_int(dev);
+
+ if (stat & STAT_RNR) { /* RU went 'not ready' */
+ printk("(R)");
+ if (readb(&p->scb->rus) & RU_SUSPEND) {
+ /* special case: RU_SUSPEND */
+ wait_for_scb_cmd(dev);
+ writeb(RUC_RESUME, &p->scb->cmd_ruc);
+ ni_attn586();
+ wait_for_scb_cmd_ruc(dev);
+ } else {
+ printk(KERN_ERR "%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",
+ dev->name, stat, readb(&p->scb->rus));
+ ni52_rnr_int(dev);
+ }
+ }
+
+ /* Command with I-bit set complete */
+ if (stat & STAT_CX)
+ ni52_xmt_int(dev);
+
+#ifndef NO_NOPCOMMANDS
+ if (stat & STAT_CNA) { /* CU went 'not ready' */
+ if (netif_running(dev))
+ printk(KERN_ERR "%s: oops! CU has left active state. stat: %04x/%02x.\n",
+ dev->name, stat, readb(&p->scb->cus));
+ }
+#endif
+
+ if (debuglevel > 1)
+ printk("%d", cnt++);
+
+ /* Wait for ack. (ni52_xmt_int can be faster than ack!!) */
+ wait_for_scb_cmd(dev);
+ if (readb(&p->scb->cmd_cuc)) { /* timed out? */
+ printk(KERN_ERR "%s: Acknowledge timed out.\n",
+ dev->name);
+ ni_disint();
+ break;
+ }
+ }
+ spin_unlock(&p->spinlock);
+
+ if (debuglevel > 1)
+ printk("i");
+ return IRQ_HANDLED;
+}
+
+/*******************************************************
+ * receive-interrupt
+ */
+
+static void ni52_rcv_int(struct net_device *dev)
+{
+ int status, cnt = 0;
+ unsigned short totlen;
+ struct sk_buff *skb;
+ struct rbd_struct __iomem *rbd;
+ struct priv *p = netdev_priv(dev);
+
+ if (debuglevel > 0)
+ printk("R");
+
+ for (; (status = readb(&p->rfd_top->stat_high)) & RFD_COMPL;) {
+ rbd = make32(readw(&p->rfd_top->rbd_offset));
+ if (status & RFD_OK) { /* frame received without error? */
+ totlen = readw(&rbd->status);
+ if (totlen & RBD_LAST) {
+ /* the first and the last buffer? */
+ totlen &= RBD_MASK; /* length of this frame */
+ writew(0x00, &rbd->status);
+ skb = (struct sk_buff *)dev_alloc_skb(totlen+2);
+ if (skb != NULL) {
+ skb_reserve(skb, 2);
+ skb_put(skb, totlen);
+ memcpy_fromio(skb->data, p->base + readl(&rbd->buffer), totlen);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += totlen;
+ } else
+ dev->stats.rx_dropped++;
+ } else {
+ int rstat;
+ /* free all RBD's until RBD_LAST is set */
+ totlen = 0;
+ while (!((rstat = readw(&rbd->status)) & RBD_LAST)) {
+ totlen += rstat & RBD_MASK;
+ if (!rstat) {
+ printk(KERN_ERR "%s: Whoops .. no end mark in RBD list\n", dev->name);
+ break;
+ }
+ writew(0, &rbd->status);
+ rbd = make32(readw(&rbd->next));
+ }
+ totlen += rstat & RBD_MASK;
+ writew(0, &rbd->status);
+ printk(KERN_ERR "%s: received oversized frame! length: %d\n",
+ dev->name, totlen);
+ dev->stats.rx_dropped++;
+ }
+ } else {/* frame !(ok), only with 'save-bad-frames' */
+ printk(KERN_ERR "%s: oops! rfd-error-status: %04x\n",
+ dev->name, status);
+ dev->stats.rx_errors++;
+ }
+ writeb(0, &p->rfd_top->stat_high);
+ writeb(RFD_SUSP, &p->rfd_top->last); /* maybe exchange by RFD_LAST */
+ writew(0xffff, &p->rfd_top->rbd_offset);
+ writeb(0, &p->rfd_last->last); /* delete RFD_SUSP */
+ p->rfd_last = p->rfd_top;
+ p->rfd_top = make32(readw(&p->rfd_top->next)); /* step to next RFD */
+ writew(make16(p->rfd_top), &p->scb->rfa_offset);
+
+ if (debuglevel > 0)
+ printk("%d", cnt++);
+ }
+
+ if (automatic_resume) {
+ wait_for_scb_cmd(dev);
+ writeb(RUC_RESUME, &p->scb->cmd_ruc);
+ ni_attn586();
+ wait_for_scb_cmd_ruc(dev);
+ }
+
+#ifdef WAIT_4_BUSY
+ {
+ int i;
+ for (i = 0; i < 1024; i++) {
+ if (p->rfd_top->status)
+ break;
+ udelay(16);
+ if (i == 1023)
+ printk(KERN_ERR "%s: RU hasn't fetched next RFD (not busy/complete)\n", dev->name);
+ }
+ }
+#endif
+ if (debuglevel > 0)
+ printk("r");
+}
+
+/**********************************************************
+ * handle 'Receiver went not ready'.
+ */
+
+static void ni52_rnr_int(struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+
+ dev->stats.rx_errors++;
+
+ wait_for_scb_cmd(dev); /* wait for the last cmd, WAIT_4_FULLSTAT?? */
+ writeb(RUC_ABORT, &p->scb->cmd_ruc); /* usually the RU is in the 'no resource'-state .. abort it now. */
+ ni_attn586();
+ wait_for_scb_cmd_ruc(dev); /* wait for accept cmd. */
+
+ alloc_rfa(dev, p->rfd_first);
+ /* maybe add a check here, before restarting the RU */
+ startrecv586(dev); /* restart RU */
+
+ printk(KERN_ERR "%s: Receive-Unit restarted. Status: %04x\n",
+ dev->name, readb(&p->scb->rus));
+
+}
+
+/**********************************************************
+ * handle xmit - interrupt
+ */
+
+static void ni52_xmt_int(struct net_device *dev)
+{
+ int status;
+ struct priv *p = netdev_priv(dev);
+
+ if (debuglevel > 0)
+ printk("X");
+
+ status = readw(&p->xmit_cmds[p->xmit_last]->cmd_status);
+ if (!(status & STAT_COMPL))
+ printk(KERN_ERR "%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name);
+
+ if (status & STAT_OK) {
+ dev->stats.tx_packets++;
+ dev->stats.collisions += (status & TCMD_MAXCOLLMASK);
+ } else {
+ dev->stats.tx_errors++;
+ if (status & TCMD_LATECOLL) {
+ printk(KERN_ERR "%s: late collision detected.\n",
+ dev->name);
+ dev->stats.collisions++;
+ } else if (status & TCMD_NOCARRIER) {
+ dev->stats.tx_carrier_errors++;
+ printk(KERN_ERR "%s: no carrier detected.\n",
+ dev->name);
+ } else if (status & TCMD_LOSTCTS)
+ printk(KERN_ERR "%s: loss of CTS detected.\n",
+ dev->name);
+ else if (status & TCMD_UNDERRUN) {
+ dev->stats.tx_fifo_errors++;
+ printk(KERN_ERR "%s: DMA underrun detected.\n",
+ dev->name);
+ } else if (status & TCMD_MAXCOLL) {
+ printk(KERN_ERR "%s: Max. collisions exceeded.\n",
+ dev->name);
+ dev->stats.collisions += 16;
+ }
+ }
+#if (NUM_XMIT_BUFFS > 1)
+ if ((++p->xmit_last) == NUM_XMIT_BUFFS)
+ p->xmit_last = 0;
+#endif
+ netif_wake_queue(dev);
+}
+
+/***********************************************************
+ * (re)start the receiver
+ */
+
+static void startrecv586(struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+
+ wait_for_scb_cmd(dev);
+ wait_for_scb_cmd_ruc(dev);
+ writew(make16(p->rfd_first), &p->scb->rfa_offset);
+ writeb(RUC_START, &p->scb->cmd_ruc);
+ ni_attn586(); /* start cmd. */
+ wait_for_scb_cmd_ruc(dev);
+ /* wait for accept cmd. (no timeout!!) */
+}
+
+static void ni52_timeout(struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+#ifndef NO_NOPCOMMANDS
+ if (readb(&p->scb->cus) & CU_ACTIVE) { /* COMMAND-UNIT active? */
+ netif_wake_queue(dev);
+#ifdef DEBUG
+ printk(KERN_ERR "%s: strange ... timeout with CU active?!?\n",
+ dev->name);
+ printk(KERN_ERR "%s: X0: %04x N0: %04x N1: %04x %d\n",
+ dev->name, (int)p->xmit_cmds[0]->cmd_status,
+ readw(&p->nop_cmds[0]->cmd_status),
+ readw(&p->nop_cmds[1]->cmd_status),
+ p->nop_point);
+#endif
+ writeb(CUC_ABORT, &p->scb->cmd_cuc);
+ ni_attn586();
+ wait_for_scb_cmd(dev);
+ writew(make16(p->nop_cmds[p->nop_point]), &p->scb->cbl_offset);
+ writeb(CUC_START, &p->scb->cmd_cuc);
+ ni_attn586();
+ wait_for_scb_cmd(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ return 0;
+ }
+#endif
+ {
+#ifdef DEBUG
+ printk(KERN_ERR "%s: xmitter timed out, try to restart! stat: %02x\n",
+ dev->name, readb(&p->scb->cus));
+ printk(KERN_ERR "%s: command-stats: %04x %04x\n",
+ dev->name,
+ readw(&p->xmit_cmds[0]->cmd_status),
+ readw(&p->xmit_cmds[1]->cmd_status));
+ printk(KERN_ERR "%s: check, whether you set the right interrupt number!\n",
+ dev->name);
+#endif
+ ni52_close(dev);
+ ni52_open(dev);
+ }
+ dev->trans_start = jiffies; /* prevent tx timeout */
+}
+
+/******************************************************
+ * send frame
+ */
+
+static netdev_tx_t ni52_send_packet(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ int len, i;
+#ifndef NO_NOPCOMMANDS
+ int next_nop;
+#endif
+ struct priv *p = netdev_priv(dev);
+
+ if (skb->len > XMIT_BUFF_SIZE) {
+ printk(KERN_ERR "%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n", dev->name, XMIT_BUFF_SIZE, skb->len);
+ return NETDEV_TX_OK;
+ }
+
+ netif_stop_queue(dev);
+
+ memcpy_toio(p->xmit_cbuffs[p->xmit_count], skb->data, skb->len);
+ len = skb->len;
+ if (len < ETH_ZLEN) {
+ len = ETH_ZLEN;
+ memset_io(p->xmit_cbuffs[p->xmit_count]+skb->len, 0,
+ len - skb->len);
+ }
+
+#if (NUM_XMIT_BUFFS == 1)
+# ifdef NO_NOPCOMMANDS
+
+#ifdef DEBUG
+ if (readb(&p->scb->cus) & CU_ACTIVE) {
+ printk(KERN_ERR "%s: Hmmm .. CU is still running and we wanna send a new packet.\n", dev->name);
+ printk(KERN_ERR "%s: stat: %04x %04x\n",
+ dev->name, readb(&p->scb->cus),
+ readw(&p->xmit_cmds[0]->cmd_status));
+ }
+#endif
+ writew(TBD_LAST | len, &p->xmit_buffs[0]->size);
+ for (i = 0; i < 16; i++) {
+ writew(0, &p->xmit_cmds[0]->cmd_status);
+ wait_for_scb_cmd(dev);
+ if ((readb(&p->scb->cus) & CU_STATUS) == CU_SUSPEND)
+ writeb(CUC_RESUME, &p->scb->cmd_cuc);
+ else {
+ writew(make16(p->xmit_cmds[0]), &p->scb->cbl_offset);
+ writeb(CUC_START, &p->scb->cmd_cuc);
+ }
+ ni_attn586();
+ if (!i)
+ dev_kfree_skb(skb);
+ wait_for_scb_cmd(dev);
+ /* test it, because CU sometimes doesn't start immediately */
+ if (readb(&p->scb->cus) & CU_ACTIVE)
+ break;
+ if (readw(&p->xmit_cmds[0]->cmd_status))
+ break;
+ if (i == 15)
+ printk(KERN_WARNING "%s: Can't start transmit-command.\n", dev->name);
+ }
+# else
+ next_nop = (p->nop_point + 1) & 0x1;
+ writew(TBD_LAST | len, &p->xmit_buffs[0]->size);
+ writew(make16(p->nop_cmds[next_nop]), &p->xmit_cmds[0]->cmd_link);
+ writew(make16(p->nop_cmds[next_nop]),
+ &p->nop_cmds[next_nop]->cmd_link);
+ writew(0, &p->xmit_cmds[0]->cmd_status);
+ writew(0, &p->nop_cmds[next_nop]->cmd_status);
+
+ writew(make16(p->xmit_cmds[0]), &p->nop_cmds[p->nop_point]->cmd_link);
+ p->nop_point = next_nop;
+ dev_kfree_skb(skb);
+# endif
+#else
+ writew(TBD_LAST | len, &p->xmit_buffs[p->xmit_count]->size);
+ next_nop = p->xmit_count + 1
+ if (next_nop == NUM_XMIT_BUFFS)
+ next_nop = 0;
+ writew(0, &p->xmit_cmds[p->xmit_count]->cmd_status);
+ /* linkpointer of xmit-command already points to next nop cmd */
+ writew(make16(p->nop_cmds[next_nop]),
+ &p->nop_cmds[next_nop]->cmd_link);
+ writew(0, &p->nop_cmds[next_nop]->cmd_status);
+ writew(make16(p->xmit_cmds[p->xmit_count]),
+ &p->nop_cmds[p->xmit_count]->cmd_link);
+ p->xmit_count = next_nop;
+ {
+ unsigned long flags;
+ spin_lock_irqsave(&p->spinlock);
+ if (p->xmit_count != p->xmit_last)
+ netif_wake_queue(dev);
+ spin_unlock_irqrestore(&p->spinlock);
+ }
+ dev_kfree_skb(skb);
+#endif
+ return NETDEV_TX_OK;
+}
+
+/*******************************************
+ * Someone wanna have the statistics
+ */
+
+static struct net_device_stats *ni52_get_stats(struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+ unsigned short crc, aln, rsc, ovrn;
+
+ /* Get error-statistics from the ni82586 */
+ crc = readw(&p->scb->crc_errs);
+ writew(0, &p->scb->crc_errs);
+ aln = readw(&p->scb->aln_errs);
+ writew(0, &p->scb->aln_errs);
+ rsc = readw(&p->scb->rsc_errs);
+ writew(0, &p->scb->rsc_errs);
+ ovrn = readw(&p->scb->ovrn_errs);
+ writew(0, &p->scb->ovrn_errs);
+
+ dev->stats.rx_crc_errors += crc;
+ dev->stats.rx_fifo_errors += ovrn;
+ dev->stats.rx_frame_errors += aln;
+ dev->stats.rx_dropped += rsc;
+
+ return &dev->stats;
+}
+
+/********************************************************
+ * Set MC list ..
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ ni_disint();
+ alloc586(dev);
+ init586(dev);
+ startrecv586(dev);
+ ni_enaint();
+ netif_wake_queue(dev);
+}
+
+#ifdef MODULE
+static struct net_device *dev_ni52;
+
+module_param(io, int, 0);
+module_param(irq, int, 0);
+module_param(memstart, long, 0);
+module_param(memend, long, 0);
+MODULE_PARM_DESC(io, "NI5210 I/O base address,required");
+MODULE_PARM_DESC(irq, "NI5210 IRQ number,required");
+MODULE_PARM_DESC(memstart, "NI5210 memory base address,required");
+MODULE_PARM_DESC(memend, "NI5210 memory end address,required");
+
+int __init init_module(void)
+{
+ if (io <= 0x0 || !memend || !memstart || irq < 2) {
+ printk(KERN_ERR "ni52: Autoprobing not allowed for modules.\n");
+ printk(KERN_ERR "ni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n");
+ return -ENODEV;
+ }
+ dev_ni52 = ni52_probe(-1);
+ if (IS_ERR(dev_ni52))
+ return PTR_ERR(dev_ni52);
+ return 0;
+}
+
+void __exit cleanup_module(void)
+{
+ struct priv *p = netdev_priv(dev_ni52);
+ unregister_netdev(dev_ni52);
+ iounmap(p->mapped);
+ release_region(dev_ni52->base_addr, NI52_TOTAL_SIZE);
+ free_netdev(dev_ni52);
+}
+#endif /* MODULE */
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/i825xx/ni52.h b/drivers/net/ethernet/i825xx/ni52.h
new file mode 100644
index 000000000000..0a03b2883327
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/ni52.h
@@ -0,0 +1,310 @@
+/*
+ * Intel i82586 Ethernet definitions
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same GNU General Public License that covers that work.
+ *
+ * copyrights (c) 1994 by Michael Hipp (hippm@informatik.uni-tuebingen.de)
+ *
+ * I have done a look in the following sources:
+ * crynwr-packet-driver by Russ Nelson
+ * Garret A. Wollman's i82586-driver for BSD
+ */
+
+
+#define NI52_RESET 0 /* writing to this address, resets the i82586 */
+#define NI52_ATTENTION 1 /* channel attention, kick the 586 */
+#define NI52_TENA 3 /* 2-5 possibly wrong, Xmit enable */
+#define NI52_TDIS 2 /* Xmit disable */
+#define NI52_INTENA 5 /* Interrupt enable */
+#define NI52_INTDIS 4 /* Interrupt disable */
+#define NI52_MAGIC1 6 /* dunno exact function */
+#define NI52_MAGIC2 7 /* dunno exact function */
+
+#define NI52_MAGICVAL1 0x00 /* magic-values for ni5210 card */
+#define NI52_MAGICVAL2 0x55
+
+/*
+ * where to find the System Configuration Pointer (SCP)
+ */
+#define SCP_DEFAULT_ADDRESS 0xfffff4
+
+
+/*
+ * System Configuration Pointer Struct
+ */
+
+struct scp_struct
+{
+ u16 zero_dum0; /* has to be zero */
+ u8 sysbus; /* 0=16Bit,1=8Bit */
+ u8 zero_dum1; /* has to be zero for 586 */
+ u16 zero_dum2;
+ u16 zero_dum3;
+ u32 iscp; /* pointer to the iscp-block */
+};
+
+
+/*
+ * Intermediate System Configuration Pointer (ISCP)
+ */
+struct iscp_struct
+{
+ u8 busy; /* 586 clears after successful init */
+ u8 zero_dummy; /* has to be zero */
+ u16 scb_offset; /* pointeroffset to the scb_base */
+ u32 scb_base; /* base-address of all 16-bit offsets */
+};
+
+/*
+ * System Control Block (SCB)
+ */
+struct scb_struct
+{
+ u8 rus;
+ u8 cus;
+ u8 cmd_ruc; /* command word: RU part */
+ u8 cmd_cuc; /* command word: CU part & ACK */
+ u16 cbl_offset; /* pointeroffset, command block list */
+ u16 rfa_offset; /* pointeroffset, receive frame area */
+ u16 crc_errs; /* CRC-Error counter */
+ u16 aln_errs; /* alignmenterror counter */
+ u16 rsc_errs; /* Resourceerror counter */
+ u16 ovrn_errs; /* OVerrunerror counter */
+};
+
+/*
+ * possible command values for the command word
+ */
+#define RUC_MASK 0x0070 /* mask for RU commands */
+#define RUC_NOP 0x0000 /* NOP-command */
+#define RUC_START 0x0010 /* start RU */
+#define RUC_RESUME 0x0020 /* resume RU after suspend */
+#define RUC_SUSPEND 0x0030 /* suspend RU */
+#define RUC_ABORT 0x0040 /* abort receiver operation immediately */
+
+#define CUC_MASK 0x07 /* mask for CU command */
+#define CUC_NOP 0x00 /* NOP-command */
+#define CUC_START 0x01 /* start execution of 1. cmd on the CBL */
+#define CUC_RESUME 0x02 /* resume after suspend */
+#define CUC_SUSPEND 0x03 /* Suspend CU */
+#define CUC_ABORT 0x04 /* abort command operation immediately */
+
+#define ACK_MASK 0xf0 /* mask for ACK command */
+#define ACK_CX 0x80 /* acknowledges STAT_CX */
+#define ACK_FR 0x40 /* ack. STAT_FR */
+#define ACK_CNA 0x20 /* ack. STAT_CNA */
+#define ACK_RNR 0x10 /* ack. STAT_RNR */
+
+/*
+ * possible status values for the status word
+ */
+#define STAT_MASK 0xf0 /* mask for cause of interrupt */
+#define STAT_CX 0x80 /* CU finished cmd with its I bit set */
+#define STAT_FR 0x40 /* RU finished receiving a frame */
+#define STAT_CNA 0x20 /* CU left active state */
+#define STAT_RNR 0x10 /* RU left ready state */
+
+#define CU_STATUS 0x7 /* CU status, 0=idle */
+#define CU_SUSPEND 0x1 /* CU is suspended */
+#define CU_ACTIVE 0x2 /* CU is active */
+
+#define RU_STATUS 0x70 /* RU status, 0=idle */
+#define RU_SUSPEND 0x10 /* RU suspended */
+#define RU_NOSPACE 0x20 /* RU no resources */
+#define RU_READY 0x40 /* RU is ready */
+
+/*
+ * Receive Frame Descriptor (RFD)
+ */
+struct rfd_struct
+{
+ u8 stat_low; /* status word */
+ u8 stat_high; /* status word */
+ u8 rfd_sf; /* 82596 mode only */
+ u8 last; /* Bit15,Last Frame on List / Bit14,suspend */
+ u16 next; /* linkoffset to next RFD */
+ u16 rbd_offset; /* pointeroffset to RBD-buffer */
+ u8 dest[6]; /* ethernet-address, destination */
+ u8 source[6]; /* ethernet-address, source */
+ u16 length; /* 802.3 frame-length */
+ u16 zero_dummy; /* dummy */
+};
+
+#define RFD_LAST 0x80 /* last: last rfd in the list */
+#define RFD_SUSP 0x40 /* last: suspend RU after */
+#define RFD_COMPL 0x80
+#define RFD_OK 0x20
+#define RFD_BUSY 0x40
+#define RFD_ERR_LEN 0x10 /* Length error (if enabled length-checking */
+#define RFD_ERR_CRC 0x08 /* CRC error */
+#define RFD_ERR_ALGN 0x04 /* Alignment error */
+#define RFD_ERR_RNR 0x02 /* status: receiver out of resources */
+#define RFD_ERR_OVR 0x01 /* DMA Overrun! */
+
+#define RFD_ERR_FTS 0x0080 /* Frame to short */
+#define RFD_ERR_NEOP 0x0040 /* No EOP flag (for bitstuffing only) */
+#define RFD_ERR_TRUN 0x0020 /* (82596 only/SF mode) indicates truncated frame */
+#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA (only 82596) */
+#define RFD_COLLDET 0x0001 /* Detected collision during reception */
+
+/*
+ * Receive Buffer Descriptor (RBD)
+ */
+struct rbd_struct
+{
+ u16 status; /* status word,number of used bytes in buff */
+ u16 next; /* pointeroffset to next RBD */
+ u32 buffer; /* receive buffer address pointer */
+ u16 size; /* size of this buffer */
+ u16 zero_dummy; /* dummy */
+};
+
+#define RBD_LAST 0x8000 /* last buffer */
+#define RBD_USED 0x4000 /* this buffer has data */
+#define RBD_MASK 0x3fff /* size-mask for length */
+
+/*
+ * Statusvalues for Commands/RFD
+ */
+#define STAT_COMPL 0x8000 /* status: frame/command is complete */
+#define STAT_BUSY 0x4000 /* status: frame/command is busy */
+#define STAT_OK 0x2000 /* status: frame/command is ok */
+
+/*
+ * Action-Commands
+ */
+#define CMD_NOP 0x0000 /* NOP */
+#define CMD_IASETUP 0x0001 /* initial address setup command */
+#define CMD_CONFIGURE 0x0002 /* configure command */
+#define CMD_MCSETUP 0x0003 /* MC setup command */
+#define CMD_XMIT 0x0004 /* transmit command */
+#define CMD_TDR 0x0005 /* time domain reflectometer (TDR) command */
+#define CMD_DUMP 0x0006 /* dump command */
+#define CMD_DIAGNOSE 0x0007 /* diagnose command */
+
+/*
+ * Action command bits
+ */
+#define CMD_LAST 0x8000 /* indicates last command in the CBL */
+#define CMD_SUSPEND 0x4000 /* suspend CU after this CB */
+#define CMD_INT 0x2000 /* generate interrupt after execution */
+
+/*
+ * NOP - command
+ */
+struct nop_cmd_struct
+{
+ u16 cmd_status; /* status of this command */
+ u16 cmd_cmd; /* the command itself (+bits) */
+ u16 cmd_link; /* offsetpointer to next command */
+};
+
+/*
+ * IA Setup command
+ */
+struct iasetup_cmd_struct
+{
+ u16 cmd_status;
+ u16 cmd_cmd;
+ u16 cmd_link;
+ u8 iaddr[6];
+};
+
+/*
+ * Configure command
+ */
+struct configure_cmd_struct
+{
+ u16 cmd_status;
+ u16 cmd_cmd;
+ u16 cmd_link;
+ u8 byte_cnt; /* size of the config-cmd */
+ u8 fifo; /* fifo/recv monitor */
+ u8 sav_bf; /* save bad frames (bit7=1)*/
+ u8 adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/
+ u8 priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */
+ u8 ifs; /* inter frame spacing */
+ u8 time_low; /* slot time low */
+ u8 time_high; /* slot time high(0-2) and max. retries(4-7) */
+ u8 promisc; /* promisc-mode(0) , et al (1-7) */
+ u8 carr_coll; /* carrier(0-3)/collision(4-7) stuff */
+ u8 fram_len; /* minimal frame len */
+ u8 dummy; /* dummy */
+};
+
+/*
+ * Multicast Setup command
+ */
+struct mcsetup_cmd_struct
+{
+ u16 cmd_status;
+ u16 cmd_cmd;
+ u16 cmd_link;
+ u16 mc_cnt; /* number of bytes in the MC-List */
+ u8 mc_list[0][6]; /* pointer to 6 bytes entries */
+};
+
+/*
+ * DUMP command
+ */
+struct dump_cmd_struct
+{
+ u16 cmd_status;
+ u16 cmd_cmd;
+ u16 cmd_link;
+ u16 dump_offset; /* pointeroffset to DUMP space */
+};
+
+/*
+ * transmit command
+ */
+struct transmit_cmd_struct
+{
+ u16 cmd_status;
+ u16 cmd_cmd;
+ u16 cmd_link;
+ u16 tbd_offset; /* pointeroffset to TBD */
+ u8 dest[6]; /* destination address of the frame */
+ u16 length; /* user defined: 802.3 length / Ether type */
+};
+
+#define TCMD_ERRMASK 0x0fa0
+#define TCMD_MAXCOLLMASK 0x000f
+#define TCMD_MAXCOLL 0x0020
+#define TCMD_HEARTBEAT 0x0040
+#define TCMD_DEFERRED 0x0080
+#define TCMD_UNDERRUN 0x0100
+#define TCMD_LOSTCTS 0x0200
+#define TCMD_NOCARRIER 0x0400
+#define TCMD_LATECOLL 0x0800
+
+struct tdr_cmd_struct
+{
+ u16 cmd_status;
+ u16 cmd_cmd;
+ u16 cmd_link;
+ u16 status;
+};
+
+#define TDR_LNK_OK 0x8000 /* No link problem identified */
+#define TDR_XCVR_PRB 0x4000 /* indicates a transceiver problem */
+#define TDR_ET_OPN 0x2000 /* open, no correct termination */
+#define TDR_ET_SRT 0x1000 /* TDR detected a short circuit */
+#define TDR_TIMEMASK 0x07ff /* mask for the time field */
+
+/*
+ * Transmit Buffer Descriptor (TBD)
+ */
+struct tbd_struct
+{
+ u16 size; /* size + EOF-Flag(15) */
+ u16 next; /* pointeroffset to next TBD */
+ u32 buffer; /* pointer to buffer */
+};
+
+#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */
+
+
+
+
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
new file mode 100644
index 000000000000..6b2a88817473
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -0,0 +1,186 @@
+/*
+ * sni_82596.c -- driver for intel 82596 ethernet controller, as
+ * used in older SNI RM machines
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+
+#define SNI_82596_DRIVER_VERSION "SNI RM 82596 driver - Revision: 0.01"
+
+static const char sni_82596_string[] = "snirm_82596";
+
+#define DMA_ALLOC dma_alloc_coherent
+#define DMA_FREE dma_free_coherent
+#define DMA_WBACK(priv, addr, len) do { } while (0)
+#define DMA_INV(priv, addr, len) do { } while (0)
+#define DMA_WBACK_INV(priv, addr, len) do { } while (0)
+
+#define SYSBUS 0x00004400
+
+/* big endian CPU, 82596 little endian */
+#define SWAP32(x) cpu_to_le32((u32)(x))
+#define SWAP16(x) cpu_to_le16((u16)(x))
+
+#define OPT_MPU_16BIT 0x01
+
+#include "lib82596.c"
+
+MODULE_AUTHOR("Thomas Bogendoerfer");
+MODULE_DESCRIPTION("i82596 driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:snirm_82596");
+module_param(i596_debug, int, 0);
+MODULE_PARM_DESC(i596_debug, "82596 debug mask");
+
+static inline void ca(struct net_device *dev)
+{
+ struct i596_private *lp = netdev_priv(dev);
+
+ writel(0, lp->ca);
+}
+
+
+static void mpu_port(struct net_device *dev, int c, dma_addr_t x)
+{
+ struct i596_private *lp = netdev_priv(dev);
+
+ u32 v = (u32) (c) | (u32) (x);
+
+ if (lp->options & OPT_MPU_16BIT) {
+ writew(v & 0xffff, lp->mpu_port);
+ wmb(); /* order writes to MPU port */
+ udelay(1);
+ writew(v >> 16, lp->mpu_port);
+ } else {
+ writel(v, lp->mpu_port);
+ wmb(); /* order writes to MPU port */
+ udelay(1);
+ writel(v, lp->mpu_port);
+ }
+}
+
+
+static int __devinit sni_82596_probe(struct platform_device *dev)
+{
+ struct net_device *netdevice;
+ struct i596_private *lp;
+ struct resource *res, *ca, *idprom, *options;
+ int retval = -ENOMEM;
+ void __iomem *mpu_addr;
+ void __iomem *ca_addr;
+ u8 __iomem *eth_addr;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ ca = platform_get_resource(dev, IORESOURCE_MEM, 1);
+ options = platform_get_resource(dev, 0, 0);
+ idprom = platform_get_resource(dev, IORESOURCE_MEM, 2);
+ if (!res || !ca || !options || !idprom)
+ return -ENODEV;
+ mpu_addr = ioremap_nocache(res->start, 4);
+ if (!mpu_addr)
+ return -ENOMEM;
+ ca_addr = ioremap_nocache(ca->start, 4);
+ if (!ca_addr)
+ goto probe_failed_free_mpu;
+
+ printk(KERN_INFO "Found i82596 at 0x%x\n", res->start);
+
+ netdevice = alloc_etherdev(sizeof(struct i596_private));
+ if (!netdevice)
+ goto probe_failed_free_ca;
+
+ SET_NETDEV_DEV(netdevice, &dev->dev);
+ platform_set_drvdata (dev, netdevice);
+
+ netdevice->base_addr = res->start;
+ netdevice->irq = platform_get_irq(dev, 0);
+
+ eth_addr = ioremap_nocache(idprom->start, 0x10);
+ if (!eth_addr)
+ goto probe_failed;
+
+ /* someone seems to like messed up stuff */
+ netdevice->dev_addr[0] = readb(eth_addr + 0x0b);
+ netdevice->dev_addr[1] = readb(eth_addr + 0x0a);
+ netdevice->dev_addr[2] = readb(eth_addr + 0x09);
+ netdevice->dev_addr[3] = readb(eth_addr + 0x08);
+ netdevice->dev_addr[4] = readb(eth_addr + 0x07);
+ netdevice->dev_addr[5] = readb(eth_addr + 0x06);
+ iounmap(eth_addr);
+
+ if (!netdevice->irq) {
+ printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
+ __FILE__, netdevice->base_addr);
+ goto probe_failed;
+ }
+
+ lp = netdev_priv(netdevice);
+ lp->options = options->flags & IORESOURCE_BITS;
+ lp->ca = ca_addr;
+ lp->mpu_port = mpu_addr;
+
+ retval = i82596_probe(netdevice);
+ if (retval == 0)
+ return 0;
+
+probe_failed:
+ free_netdev(netdevice);
+probe_failed_free_ca:
+ iounmap(ca_addr);
+probe_failed_free_mpu:
+ iounmap(mpu_addr);
+ return retval;
+}
+
+static int __devexit sni_82596_driver_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct i596_private *lp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ DMA_FREE(dev->dev.parent, sizeof(struct i596_private),
+ lp->dma, lp->dma_addr);
+ iounmap(lp->ca);
+ iounmap(lp->mpu_port);
+ free_netdev (dev);
+ return 0;
+}
+
+static struct platform_driver sni_82596_driver = {
+ .probe = sni_82596_probe,
+ .remove = __devexit_p(sni_82596_driver_remove),
+ .driver = {
+ .name = sni_82596_string,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __devinit sni_82596_init(void)
+{
+ printk(KERN_INFO SNI_82596_DRIVER_VERSION "\n");
+ return platform_driver_register(&sni_82596_driver);
+}
+
+
+static void __exit sni_82596_exit(void)
+{
+ platform_driver_unregister(&sni_82596_driver);
+}
+
+module_init(sni_82596_init);
+module_exit(sni_82596_exit);
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
new file mode 100644
index 000000000000..6ef5e11d1c84
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -0,0 +1,1213 @@
+/*
+ * Sun3 i82586 Ethernet driver
+ *
+ * Cloned from ni52.c for the Sun3 by Sam Creasey (sammy@sammy.net)
+ *
+ * Original copyright follows:
+ * --------------------------
+ *
+ * net-3-driver for the NI5210 card (i82586 Ethernet chip)
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same Gnu Public License that covers that work.
+ *
+ * Alphacode 0.82 (96/09/29) for Linux 2.0.0 (or later)
+ * Copyrights (c) 1994,1995,1996 by M.Hipp (hippm@informatik.uni-tuebingen.de)
+ * --------------------------
+ *
+ * Consult ni52.c for further notes from the original driver.
+ *
+ * This incarnation currently supports the OBIO version of the i82586 chip
+ * used in certain sun3 models. It should be fairly doable to expand this
+ * to support VME if I should every acquire such a board.
+ *
+ */
+
+static int debuglevel = 0; /* debug-printk 0: off 1: a few 2: more */
+static int automatic_resume = 0; /* experimental .. better should be zero */
+static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */
+static int fifo=0x8; /* don't change */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/idprom.h>
+#include <asm/machines.h>
+#include <asm/sun3mmu.h>
+#include <asm/dvma.h>
+#include <asm/byteorder.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "sun3_82586.h"
+
+#define DRV_NAME "sun3_82586"
+
+#define DEBUG /* debug on */
+#define SYSBUSVAL 0 /* 16 Bit */
+#define SUN3_82586_TOTAL_SIZE PAGE_SIZE
+
+#define sun3_attn586() {*(volatile unsigned char *)(dev->base_addr) |= IEOB_ATTEN; *(volatile unsigned char *)(dev->base_addr) &= ~IEOB_ATTEN;}
+#define sun3_reset586() {*(volatile unsigned char *)(dev->base_addr) = 0; udelay(100); *(volatile unsigned char *)(dev->base_addr) = IEOB_NORSET;}
+#define sun3_disint() {*(volatile unsigned char *)(dev->base_addr) &= ~IEOB_IENAB;}
+#define sun3_enaint() {*(volatile unsigned char *)(dev->base_addr) |= IEOB_IENAB;}
+#define sun3_active() {*(volatile unsigned char *)(dev->base_addr) |= (IEOB_IENAB|IEOB_ONAIR|IEOB_NORSET);}
+
+#define make32(ptr16) (p->memtop + (swab16((unsigned short) (ptr16))) )
+#define make24(ptr32) (char *)swab32(( ((unsigned long) (ptr32)) - p->base))
+#define make16(ptr32) (swab16((unsigned short) ((unsigned long)(ptr32) - (unsigned long) p->memtop )))
+
+/******************* how to calculate the buffers *****************************
+
+ * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works
+ * --------------- in a different (more stable?) mode. Only in this mode it's
+ * possible to configure the driver with 'NO_NOPCOMMANDS'
+
+sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8;
+sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT
+sizeof(rfd) = 24; sizeof(rbd) = 12;
+sizeof(tbd) = 8; sizeof(transmit_cmd) = 16;
+sizeof(nop_cmd) = 8;
+
+ * if you don't know the driver, better do not change these values: */
+
+#define RECV_BUFF_SIZE 1536 /* slightly oversized */
+#define XMIT_BUFF_SIZE 1536 /* slightly oversized */
+#define NUM_XMIT_BUFFS 1 /* config for 32K shmem */
+#define NUM_RECV_BUFFS_8 4 /* config for 32K shared mem */
+#define NUM_RECV_BUFFS_16 9 /* config for 32K shared mem */
+#define NUM_RECV_BUFFS_32 16 /* config for 32K shared mem */
+#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */
+
+/**************************************************************************/
+
+/* different DELAYs */
+#define DELAY(x) mdelay(32 * x);
+#define DELAY_16(); { udelay(16); }
+#define DELAY_18(); { udelay(4); }
+
+/* wait for command with timeout: */
+#define WAIT_4_SCB_CMD() \
+{ int i; \
+ for(i=0;i<16384;i++) { \
+ if(!p->scb->cmd_cuc) break; \
+ DELAY_18(); \
+ if(i == 16383) { \
+ printk("%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_cuc,p->scb->cus); \
+ if(!p->reseted) { p->reseted = 1; sun3_reset586(); } } } }
+
+#define WAIT_4_SCB_CMD_RUC() { int i; \
+ for(i=0;i<16384;i++) { \
+ if(!p->scb->cmd_ruc) break; \
+ DELAY_18(); \
+ if(i == 16383) { \
+ printk("%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_ruc,p->scb->rus); \
+ if(!p->reseted) { p->reseted = 1; sun3_reset586(); } } } }
+
+#define WAIT_4_STAT_COMPL(addr) { int i; \
+ for(i=0;i<32767;i++) { \
+ if(swab16((addr)->cmd_status) & STAT_COMPL) break; \
+ DELAY_16(); DELAY_16(); } }
+
+static int sun3_82586_probe1(struct net_device *dev,int ioaddr);
+static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id);
+static int sun3_82586_open(struct net_device *dev);
+static int sun3_82586_close(struct net_device *dev);
+static int sun3_82586_send_packet(struct sk_buff *,struct net_device *);
+static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static void sun3_82586_timeout(struct net_device *dev);
+#if 0
+static void sun3_82586_dump(struct net_device *,void *);
+#endif
+
+/* helper-functions */
+static int init586(struct net_device *dev);
+static int check586(struct net_device *dev,char *where,unsigned size);
+static void alloc586(struct net_device *dev);
+static void startrecv586(struct net_device *dev);
+static void *alloc_rfa(struct net_device *dev,void *ptr);
+static void sun3_82586_rcv_int(struct net_device *dev);
+static void sun3_82586_xmt_int(struct net_device *dev);
+static void sun3_82586_rnr_int(struct net_device *dev);
+
+struct priv
+{
+ unsigned long base;
+ char *memtop;
+ long int lock;
+ int reseted;
+ volatile struct rfd_struct *rfd_last,*rfd_top,*rfd_first;
+ volatile struct scp_struct *scp; /* volatile is important */
+ volatile struct iscp_struct *iscp; /* volatile is important */
+ volatile struct scb_struct *scb; /* volatile is important */
+ volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS];
+ volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS];
+#if (NUM_XMIT_BUFFS == 1)
+ volatile struct nop_cmd_struct *nop_cmds[2];
+#else
+ volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS];
+#endif
+ volatile int nop_point,num_recv_buffs;
+ volatile char *xmit_cbuffs[NUM_XMIT_BUFFS];
+ volatile int xmit_count,xmit_last;
+};
+
+/**********************************************
+ * close device
+ */
+static int sun3_82586_close(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+
+ sun3_reset586(); /* the hard way to stop the receiver */
+
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+/**********************************************
+ * open device
+ */
+static int sun3_82586_open(struct net_device *dev)
+{
+ int ret;
+
+ sun3_disint();
+ alloc586(dev);
+ init586(dev);
+ startrecv586(dev);
+ sun3_enaint();
+
+ ret = request_irq(dev->irq, sun3_82586_interrupt,0,dev->name,dev);
+ if (ret)
+ {
+ sun3_reset586();
+ return ret;
+ }
+
+ netif_start_queue(dev);
+
+ return 0; /* most done by init */
+}
+
+/**********************************************
+ * Check to see if there's an 82586 out there.
+ */
+static int check586(struct net_device *dev,char *where,unsigned size)
+{
+ struct priv pb;
+ struct priv *p = &pb;
+ char *iscp_addr;
+ int i;
+
+ p->base = (unsigned long) dvma_btov(0);
+ p->memtop = (char *)dvma_btov((unsigned long)where);
+ p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS);
+ memset((char *)p->scp,0, sizeof(struct scp_struct));
+ for(i=0;i<sizeof(struct scp_struct);i++) /* memory was writeable? */
+ if(((char *)p->scp)[i])
+ return 0;
+ p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */
+ if(p->scp->sysbus != SYSBUSVAL)
+ return 0;
+
+ iscp_addr = (char *)dvma_btov((unsigned long)where);
+
+ p->iscp = (struct iscp_struct *) iscp_addr;
+ memset((char *)p->iscp,0, sizeof(struct iscp_struct));
+
+ p->scp->iscp = make24(p->iscp);
+ p->iscp->busy = 1;
+
+ sun3_reset586();
+ sun3_attn586();
+ DELAY(1); /* wait a while... */
+
+ if(p->iscp->busy) /* i82586 clears 'busy' after successful init */
+ return 0;
+
+ return 1;
+}
+
+/******************************************************************
+ * set iscp at the right place, called by sun3_82586_probe1 and open586.
+ */
+static void alloc586(struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+
+ sun3_reset586();
+ DELAY(1);
+
+ p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS);
+ p->iscp = (struct iscp_struct *) dvma_btov(dev->mem_start);
+ p->scb = (struct scb_struct *) ((char *)p->iscp + sizeof(struct iscp_struct));
+
+ memset((char *) p->iscp,0,sizeof(struct iscp_struct));
+ memset((char *) p->scp ,0,sizeof(struct scp_struct));
+
+ p->scp->iscp = make24(p->iscp);
+ p->scp->sysbus = SYSBUSVAL;
+ p->iscp->scb_offset = make16(p->scb);
+ p->iscp->scb_base = make24(dvma_btov(dev->mem_start));
+
+ p->iscp->busy = 1;
+ sun3_reset586();
+ sun3_attn586();
+
+ DELAY(1);
+
+ if(p->iscp->busy)
+ printk("%s: Init-Problems (alloc).\n",dev->name);
+
+ p->reseted = 0;
+
+ memset((char *)p->scb,0,sizeof(struct scb_struct));
+}
+
+struct net_device * __init sun3_82586_probe(int unit)
+{
+ struct net_device *dev;
+ unsigned long ioaddr;
+ static int found = 0;
+ int err = -ENOMEM;
+
+ /* check that this machine has an onboard 82586 */
+ switch(idprom->id_machtype) {
+ case SM_SUN3|SM_3_160:
+ case SM_SUN3|SM_3_260:
+ /* these machines have 82586 */
+ break;
+
+ default:
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (found)
+ return ERR_PTR(-ENODEV);
+
+ ioaddr = (unsigned long)ioremap(IE_OBIO, SUN3_82586_TOTAL_SIZE);
+ if (!ioaddr)
+ return ERR_PTR(-ENOMEM);
+ found = 1;
+
+ dev = alloc_etherdev(sizeof(struct priv));
+ if (!dev)
+ goto out;
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ dev->irq = IE_IRQ;
+ dev->base_addr = ioaddr;
+ err = sun3_82586_probe1(dev, ioaddr);
+ if (err)
+ goto out1;
+ err = register_netdev(dev);
+ if (err)
+ goto out2;
+ return dev;
+
+out2:
+ release_region(ioaddr, SUN3_82586_TOTAL_SIZE);
+out1:
+ free_netdev(dev);
+out:
+ iounmap((void __iomem *)ioaddr);
+ return ERR_PTR(err);
+}
+
+static const struct net_device_ops sun3_82586_netdev_ops = {
+ .ndo_open = sun3_82586_open,
+ .ndo_stop = sun3_82586_close,
+ .ndo_start_xmit = sun3_82586_send_packet,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_tx_timeout = sun3_82586_timeout,
+ .ndo_get_stats = sun3_82586_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr)
+{
+ int i, size, retval;
+
+ if (!request_region(ioaddr, SUN3_82586_TOTAL_SIZE, DRV_NAME))
+ return -EBUSY;
+
+ /* copy in the ethernet address from the prom */
+ for(i = 0; i < 6 ; i++)
+ dev->dev_addr[i] = idprom->id_ethaddr[i];
+
+ printk("%s: SUN3 Intel 82586 found at %lx, ",dev->name,dev->base_addr);
+
+ /*
+ * check (or search) IO-Memory, 32K
+ */
+ size = 0x8000;
+
+ dev->mem_start = (unsigned long)dvma_malloc_align(0x8000, 0x1000);
+ dev->mem_end = dev->mem_start + size;
+
+ if(size != 0x2000 && size != 0x4000 && size != 0x8000) {
+ printk("\n%s: Illegal memory size %d. Allowed is 0x2000 or 0x4000 or 0x8000 bytes.\n",dev->name,size);
+ retval = -ENODEV;
+ goto out;
+ }
+ if(!check586(dev,(char *) dev->mem_start,size)) {
+ printk("?memcheck, Can't find memory at 0x%lx with size %d!\n",dev->mem_start,size);
+ retval = -ENODEV;
+ goto out;
+ }
+
+ ((struct priv *)netdev_priv(dev))->memtop =
+ (char *)dvma_btov(dev->mem_start);
+ ((struct priv *)netdev_priv(dev))->base = (unsigned long) dvma_btov(0);
+ alloc586(dev);
+
+ /* set number of receive-buffs according to memsize */
+ if(size == 0x2000)
+ ((struct priv *)netdev_priv(dev))->num_recv_buffs =
+ NUM_RECV_BUFFS_8;
+ else if(size == 0x4000)
+ ((struct priv *)netdev_priv(dev))->num_recv_buffs =
+ NUM_RECV_BUFFS_16;
+ else
+ ((struct priv *)netdev_priv(dev))->num_recv_buffs =
+ NUM_RECV_BUFFS_32;
+
+ printk("Memaddr: 0x%lx, Memsize: %d, IRQ %d\n",dev->mem_start,size, dev->irq);
+
+ dev->netdev_ops = &sun3_82586_netdev_ops;
+ dev->watchdog_timeo = HZ/20;
+
+ dev->if_port = 0;
+ return 0;
+out:
+ release_region(ioaddr, SUN3_82586_TOTAL_SIZE);
+ return retval;
+}
+
+
+static int init586(struct net_device *dev)
+{
+ void *ptr;
+ int i,result=0;
+ struct priv *p = netdev_priv(dev);
+ volatile struct configure_cmd_struct *cfg_cmd;
+ volatile struct iasetup_cmd_struct *ias_cmd;
+ volatile struct tdr_cmd_struct *tdr_cmd;
+ volatile struct mcsetup_cmd_struct *mc_cmd;
+ struct netdev_hw_addr *ha;
+ int num_addrs=netdev_mc_count(dev);
+
+ ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
+
+ cfg_cmd = (struct configure_cmd_struct *)ptr; /* configure-command */
+ cfg_cmd->cmd_status = 0;
+ cfg_cmd->cmd_cmd = swab16(CMD_CONFIGURE | CMD_LAST);
+ cfg_cmd->cmd_link = 0xffff;
+
+ cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */
+ cfg_cmd->fifo = fifo; /* fifo-limit (8=tx:32/rx:64) */
+ cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */
+ cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */
+ cfg_cmd->priority = 0x00;
+ cfg_cmd->ifs = 0x60;
+ cfg_cmd->time_low = 0x00;
+ cfg_cmd->time_high = 0xf2;
+ cfg_cmd->promisc = 0;
+ if(dev->flags & IFF_ALLMULTI) {
+ int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
+ if(num_addrs > len) {
+ printk("%s: switching to promisc. mode\n",dev->name);
+ cfg_cmd->promisc = 1;
+ }
+ }
+ if(dev->flags&IFF_PROMISC)
+ cfg_cmd->promisc = 1;
+ cfg_cmd->carr_coll = 0x00;
+
+ p->scb->cbl_offset = make16(cfg_cmd);
+ p->scb->cmd_ruc = 0;
+
+ p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
+ sun3_attn586();
+
+ WAIT_4_STAT_COMPL(cfg_cmd);
+
+ if((swab16(cfg_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != (STAT_COMPL|STAT_OK))
+ {
+ printk("%s: configure command failed: %x\n",dev->name,swab16(cfg_cmd->cmd_status));
+ return 1;
+ }
+
+ /*
+ * individual address setup
+ */
+
+ ias_cmd = (struct iasetup_cmd_struct *)ptr;
+
+ ias_cmd->cmd_status = 0;
+ ias_cmd->cmd_cmd = swab16(CMD_IASETUP | CMD_LAST);
+ ias_cmd->cmd_link = 0xffff;
+
+ memcpy((char *)&ias_cmd->iaddr,(char *) dev->dev_addr,ETH_ALEN);
+
+ p->scb->cbl_offset = make16(ias_cmd);
+
+ p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
+ sun3_attn586();
+
+ WAIT_4_STAT_COMPL(ias_cmd);
+
+ if((swab16(ias_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != (STAT_OK|STAT_COMPL)) {
+ printk("%s (82586): individual address setup command failed: %04x\n",dev->name,swab16(ias_cmd->cmd_status));
+ return 1;
+ }
+
+ /*
+ * TDR, wire check .. e.g. no resistor e.t.c
+ */
+
+ tdr_cmd = (struct tdr_cmd_struct *)ptr;
+
+ tdr_cmd->cmd_status = 0;
+ tdr_cmd->cmd_cmd = swab16(CMD_TDR | CMD_LAST);
+ tdr_cmd->cmd_link = 0xffff;
+ tdr_cmd->status = 0;
+
+ p->scb->cbl_offset = make16(tdr_cmd);
+ p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
+ sun3_attn586();
+
+ WAIT_4_STAT_COMPL(tdr_cmd);
+
+ if(!(swab16(tdr_cmd->cmd_status) & STAT_COMPL))
+ {
+ printk("%s: Problems while running the TDR.\n",dev->name);
+ }
+ else
+ {
+ DELAY_16(); /* wait for result */
+ result = swab16(tdr_cmd->status);
+
+ p->scb->cmd_cuc = p->scb->cus & STAT_MASK;
+ sun3_attn586(); /* ack the interrupts */
+
+ if(result & TDR_LNK_OK)
+ ;
+ else if(result & TDR_XCVR_PRB)
+ printk("%s: TDR: Transceiver problem. Check the cable(s)!\n",dev->name);
+ else if(result & TDR_ET_OPN)
+ printk("%s: TDR: No correct termination %d clocks away.\n",dev->name,result & TDR_TIMEMASK);
+ else if(result & TDR_ET_SRT)
+ {
+ if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */
+ printk("%s: TDR: Detected a short circuit %d clocks away.\n",dev->name,result & TDR_TIMEMASK);
+ }
+ else
+ printk("%s: TDR: Unknown status %04x\n",dev->name,result);
+ }
+
+ /*
+ * Multicast setup
+ */
+ if(num_addrs && !(dev->flags & IFF_PROMISC) )
+ {
+ mc_cmd = (struct mcsetup_cmd_struct *) ptr;
+ mc_cmd->cmd_status = 0;
+ mc_cmd->cmd_cmd = swab16(CMD_MCSETUP | CMD_LAST);
+ mc_cmd->cmd_link = 0xffff;
+ mc_cmd->mc_cnt = swab16(num_addrs * 6);
+
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy((char *) mc_cmd->mc_list[i++],
+ ha->addr, ETH_ALEN);
+
+ p->scb->cbl_offset = make16(mc_cmd);
+ p->scb->cmd_cuc = CUC_START;
+ sun3_attn586();
+
+ WAIT_4_STAT_COMPL(mc_cmd);
+
+ if( (swab16(mc_cmd->cmd_status) & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) )
+ printk("%s: Can't apply multicast-address-list.\n",dev->name);
+ }
+
+ /*
+ * alloc nop/xmit-cmds
+ */
+#if (NUM_XMIT_BUFFS == 1)
+ for(i=0;i<2;i++)
+ {
+ p->nop_cmds[i] = (struct nop_cmd_struct *)ptr;
+ p->nop_cmds[i]->cmd_cmd = swab16(CMD_NOP);
+ p->nop_cmds[i]->cmd_status = 0;
+ p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
+ ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
+ }
+#else
+ for(i=0;i<NUM_XMIT_BUFFS;i++)
+ {
+ p->nop_cmds[i] = (struct nop_cmd_struct *)ptr;
+ p->nop_cmds[i]->cmd_cmd = swab16(CMD_NOP);
+ p->nop_cmds[i]->cmd_status = 0;
+ p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
+ ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
+ }
+#endif
+
+ ptr = alloc_rfa(dev,(void *)ptr); /* init receive-frame-area */
+
+ /*
+ * alloc xmit-buffs / init xmit_cmds
+ */
+ for(i=0;i<NUM_XMIT_BUFFS;i++)
+ {
+ p->xmit_cmds[i] = (struct transmit_cmd_struct *)ptr; /*transmit cmd/buff 0*/
+ ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
+ p->xmit_cbuffs[i] = (char *)ptr; /* char-buffs */
+ ptr = (char *) ptr + XMIT_BUFF_SIZE;
+ p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */
+ ptr = (char *) ptr + sizeof(struct tbd_struct);
+ if((void *)ptr > (void *)dev->mem_end)
+ {
+ printk("%s: not enough shared-mem for your configuration!\n",dev->name);
+ return 1;
+ }
+ memset((char *)(p->xmit_cmds[i]) ,0, sizeof(struct transmit_cmd_struct));
+ memset((char *)(p->xmit_buffs[i]),0, sizeof(struct tbd_struct));
+ p->xmit_cmds[i]->cmd_link = make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]);
+ p->xmit_cmds[i]->cmd_status = swab16(STAT_COMPL);
+ p->xmit_cmds[i]->cmd_cmd = swab16(CMD_XMIT | CMD_INT);
+ p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i]));
+ p->xmit_buffs[i]->next = 0xffff;
+ p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i]));
+ }
+
+ p->xmit_count = 0;
+ p->xmit_last = 0;
+#ifndef NO_NOPCOMMANDS
+ p->nop_point = 0;
+#endif
+
+ /*
+ * 'start transmitter'
+ */
+#ifndef NO_NOPCOMMANDS
+ p->scb->cbl_offset = make16(p->nop_cmds[0]);
+ p->scb->cmd_cuc = CUC_START;
+ sun3_attn586();
+ WAIT_4_SCB_CMD();
+#else
+ p->xmit_cmds[0]->cmd_link = make16(p->xmit_cmds[0]);
+ p->xmit_cmds[0]->cmd_cmd = swab16(CMD_XMIT | CMD_SUSPEND | CMD_INT);
+#endif
+
+ /*
+ * ack. interrupts
+ */
+ p->scb->cmd_cuc = p->scb->cus & STAT_MASK;
+ sun3_attn586();
+ DELAY_16();
+
+ sun3_enaint();
+ sun3_active();
+
+ return 0;
+}
+
+/******************************************************
+ * This is a helper routine for sun3_82586_rnr_int() and init586().
+ * It sets up the Receive Frame Area (RFA).
+ */
+
+static void *alloc_rfa(struct net_device *dev,void *ptr)
+{
+ volatile struct rfd_struct *rfd = (struct rfd_struct *)ptr;
+ volatile struct rbd_struct *rbd;
+ int i;
+ struct priv *p = netdev_priv(dev);
+
+ memset((char *) rfd,0,sizeof(struct rfd_struct)*(p->num_recv_buffs+rfdadd));
+ p->rfd_first = rfd;
+
+ for(i = 0; i < (p->num_recv_buffs+rfdadd); i++) {
+ rfd[i].next = make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd) );
+ rfd[i].rbd_offset = 0xffff;
+ }
+ rfd[p->num_recv_buffs-1+rfdadd].last = RFD_SUSP; /* RU suspend */
+
+ ptr = (void *) (rfd + (p->num_recv_buffs + rfdadd) );
+
+ rbd = (struct rbd_struct *) ptr;
+ ptr = (void *) (rbd + p->num_recv_buffs);
+
+ /* clr descriptors */
+ memset((char *) rbd,0,sizeof(struct rbd_struct)*(p->num_recv_buffs));
+
+ for(i=0;i<p->num_recv_buffs;i++)
+ {
+ rbd[i].next = make16((rbd + (i+1) % p->num_recv_buffs));
+ rbd[i].size = swab16(RECV_BUFF_SIZE);
+ rbd[i].buffer = make24(ptr);
+ ptr = (char *) ptr + RECV_BUFF_SIZE;
+ }
+
+ p->rfd_top = p->rfd_first;
+ p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd);
+
+ p->scb->rfa_offset = make16(p->rfd_first);
+ p->rfd_first->rbd_offset = make16(rbd);
+
+ return ptr;
+}
+
+
+/**************************************************
+ * Interrupt Handler ...
+ */
+
+static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ unsigned short stat;
+ int cnt=0;
+ struct priv *p;
+
+ if (!dev) {
+ printk ("sun3_82586-interrupt: irq %d for unknown device.\n",irq);
+ return IRQ_NONE;
+ }
+ p = netdev_priv(dev);
+
+ if(debuglevel > 1)
+ printk("I");
+
+ WAIT_4_SCB_CMD(); /* wait for last command */
+
+ while((stat=p->scb->cus & STAT_MASK))
+ {
+ p->scb->cmd_cuc = stat;
+ sun3_attn586();
+
+ if(stat & STAT_FR) /* received a frame */
+ sun3_82586_rcv_int(dev);
+
+ if(stat & STAT_RNR) /* RU went 'not ready' */
+ {
+ printk("(R)");
+ if(p->scb->rus & RU_SUSPEND) /* special case: RU_SUSPEND */
+ {
+ WAIT_4_SCB_CMD();
+ p->scb->cmd_ruc = RUC_RESUME;
+ sun3_attn586();
+ WAIT_4_SCB_CMD_RUC();
+ }
+ else
+ {
+ printk("%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->rus);
+ sun3_82586_rnr_int(dev);
+ }
+ }
+
+ if(stat & STAT_CX) /* command with I-bit set complete */
+ sun3_82586_xmt_int(dev);
+
+#ifndef NO_NOPCOMMANDS
+ if(stat & STAT_CNA) /* CU went 'not ready' */
+ {
+ if(netif_running(dev))
+ printk("%s: oops! CU has left active state. stat: %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->cus);
+ }
+#endif
+
+ if(debuglevel > 1)
+ printk("%d",cnt++);
+
+ WAIT_4_SCB_CMD(); /* wait for ack. (sun3_82586_xmt_int can be faster than ack!!) */
+ if(p->scb->cmd_cuc) /* timed out? */
+ {
+ printk("%s: Acknowledge timed out.\n",dev->name);
+ sun3_disint();
+ break;
+ }
+ }
+
+ if(debuglevel > 1)
+ printk("i");
+ return IRQ_HANDLED;
+}
+
+/*******************************************************
+ * receive-interrupt
+ */
+
+static void sun3_82586_rcv_int(struct net_device *dev)
+{
+ int status,cnt=0;
+ unsigned short totlen;
+ struct sk_buff *skb;
+ struct rbd_struct *rbd;
+ struct priv *p = netdev_priv(dev);
+
+ if(debuglevel > 0)
+ printk("R");
+
+ for(;(status = p->rfd_top->stat_high) & RFD_COMPL;)
+ {
+ rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);
+
+ if(status & RFD_OK) /* frame received without error? */
+ {
+ if( (totlen = swab16(rbd->status)) & RBD_LAST) /* the first and the last buffer? */
+ {
+ totlen &= RBD_MASK; /* length of this frame */
+ rbd->status = 0;
+ skb = (struct sk_buff *) dev_alloc_skb(totlen+2);
+ if(skb != NULL)
+ {
+ skb_reserve(skb,2);
+ skb_put(skb,totlen);
+ skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ }
+ else
+ dev->stats.rx_dropped++;
+ }
+ else
+ {
+ int rstat;
+ /* free all RBD's until RBD_LAST is set */
+ totlen = 0;
+ while(!((rstat=swab16(rbd->status)) & RBD_LAST))
+ {
+ totlen += rstat & RBD_MASK;
+ if(!rstat)
+ {
+ printk("%s: Whoops .. no end mark in RBD list\n",dev->name);
+ break;
+ }
+ rbd->status = 0;
+ rbd = (struct rbd_struct *) make32(rbd->next);
+ }
+ totlen += rstat & RBD_MASK;
+ rbd->status = 0;
+ printk("%s: received oversized frame! length: %d\n",dev->name,totlen);
+ dev->stats.rx_dropped++;
+ }
+ }
+ else /* frame !(ok), only with 'save-bad-frames' */
+ {
+ printk("%s: oops! rfd-error-status: %04x\n",dev->name,status);
+ dev->stats.rx_errors++;
+ }
+ p->rfd_top->stat_high = 0;
+ p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */
+ p->rfd_top->rbd_offset = 0xffff;
+ p->rfd_last->last = 0; /* delete RFD_SUSP */
+ p->rfd_last = p->rfd_top;
+ p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */
+ p->scb->rfa_offset = make16(p->rfd_top);
+
+ if(debuglevel > 0)
+ printk("%d",cnt++);
+ }
+
+ if(automatic_resume)
+ {
+ WAIT_4_SCB_CMD();
+ p->scb->cmd_ruc = RUC_RESUME;
+ sun3_attn586();
+ WAIT_4_SCB_CMD_RUC();
+ }
+
+#ifdef WAIT_4_BUSY
+ {
+ int i;
+ for(i=0;i<1024;i++)
+ {
+ if(p->rfd_top->status)
+ break;
+ DELAY_16();
+ if(i == 1023)
+ printk("%s: RU hasn't fetched next RFD (not busy/complete)\n",dev->name);
+ }
+ }
+#endif
+
+#if 0
+ if(!at_least_one)
+ {
+ int i;
+ volatile struct rfd_struct *rfds=p->rfd_top;
+ volatile struct rbd_struct *rbds;
+ printk("%s: received a FC intr. without having a frame: %04x %d\n",dev->name,status,old_at_least);
+ for(i=0;i< (p->num_recv_buffs+4);i++)
+ {
+ rbds = (struct rbd_struct *) make32(rfds->rbd_offset);
+ printk("%04x:%04x ",rfds->status,rbds->status);
+ rfds = (struct rfd_struct *) make32(rfds->next);
+ }
+ printk("\nerrs: %04x %04x stat: %04x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->status);
+ printk("\nerrs: %04x %04x rus: %02x, cus: %02x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->rus,(int)p->scb->cus);
+ }
+ old_at_least = at_least_one;
+#endif
+
+ if(debuglevel > 0)
+ printk("r");
+}
+
+/**********************************************************
+ * handle 'Receiver went not ready'.
+ */
+
+static void sun3_82586_rnr_int(struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+
+ dev->stats.rx_errors++;
+
+ WAIT_4_SCB_CMD(); /* wait for the last cmd, WAIT_4_FULLSTAT?? */
+ p->scb->cmd_ruc = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */
+ sun3_attn586();
+ WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. */
+
+ alloc_rfa(dev,(char *)p->rfd_first);
+/* maybe add a check here, before restarting the RU */
+ startrecv586(dev); /* restart RU */
+
+ printk("%s: Receive-Unit restarted. Status: %04x\n",dev->name,p->scb->rus);
+
+}
+
+/**********************************************************
+ * handle xmit - interrupt
+ */
+
+static void sun3_82586_xmt_int(struct net_device *dev)
+{
+ int status;
+ struct priv *p = netdev_priv(dev);
+
+ if(debuglevel > 0)
+ printk("X");
+
+ status = swab16(p->xmit_cmds[p->xmit_last]->cmd_status);
+ if(!(status & STAT_COMPL))
+ printk("%s: strange .. xmit-int without a 'COMPLETE'\n",dev->name);
+
+ if(status & STAT_OK)
+ {
+ dev->stats.tx_packets++;
+ dev->stats.collisions += (status & TCMD_MAXCOLLMASK);
+ }
+ else
+ {
+ dev->stats.tx_errors++;
+ if(status & TCMD_LATECOLL) {
+ printk("%s: late collision detected.\n",dev->name);
+ dev->stats.collisions++;
+ }
+ else if(status & TCMD_NOCARRIER) {
+ dev->stats.tx_carrier_errors++;
+ printk("%s: no carrier detected.\n",dev->name);
+ }
+ else if(status & TCMD_LOSTCTS)
+ printk("%s: loss of CTS detected.\n",dev->name);
+ else if(status & TCMD_UNDERRUN) {
+ dev->stats.tx_fifo_errors++;
+ printk("%s: DMA underrun detected.\n",dev->name);
+ }
+ else if(status & TCMD_MAXCOLL) {
+ printk("%s: Max. collisions exceeded.\n",dev->name);
+ dev->stats.collisions += 16;
+ }
+ }
+
+#if (NUM_XMIT_BUFFS > 1)
+ if( (++p->xmit_last) == NUM_XMIT_BUFFS)
+ p->xmit_last = 0;
+#endif
+ netif_wake_queue(dev);
+}
+
+/***********************************************************
+ * (re)start the receiver
+ */
+
+static void startrecv586(struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+
+ WAIT_4_SCB_CMD();
+ WAIT_4_SCB_CMD_RUC();
+ p->scb->rfa_offset = make16(p->rfd_first);
+ p->scb->cmd_ruc = RUC_START;
+ sun3_attn586(); /* start cmd. */
+ WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. (no timeout!!) */
+}
+
+static void sun3_82586_timeout(struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+#ifndef NO_NOPCOMMANDS
+ if(p->scb->cus & CU_ACTIVE) /* COMMAND-UNIT active? */
+ {
+ netif_wake_queue(dev);
+#ifdef DEBUG
+ printk("%s: strange ... timeout with CU active?!?\n",dev->name);
+ printk("%s: X0: %04x N0: %04x N1: %04x %d\n",dev->name,(int)swab16(p->xmit_cmds[0]->cmd_status),(int)swab16(p->nop_cmds[0]->cmd_status),(int)swab16(p->nop_cmds[1]->cmd_status),(int)p->nop_point);
+#endif
+ p->scb->cmd_cuc = CUC_ABORT;
+ sun3_attn586();
+ WAIT_4_SCB_CMD();
+ p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]);
+ p->scb->cmd_cuc = CUC_START;
+ sun3_attn586();
+ WAIT_4_SCB_CMD();
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ return 0;
+ }
+#endif
+ {
+#ifdef DEBUG
+ printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus);
+ printk("%s: command-stats: %04x %04x\n",dev->name,swab16(p->xmit_cmds[0]->cmd_status),swab16(p->xmit_cmds[1]->cmd_status));
+ printk("%s: check, whether you set the right interrupt number!\n",dev->name);
+#endif
+ sun3_82586_close(dev);
+ sun3_82586_open(dev);
+ }
+ dev->trans_start = jiffies; /* prevent tx timeout */
+}
+
+/******************************************************
+ * send frame
+ */
+
+static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ int len,i;
+#ifndef NO_NOPCOMMANDS
+ int next_nop;
+#endif
+ struct priv *p = netdev_priv(dev);
+
+ if(skb->len > XMIT_BUFF_SIZE)
+ {
+ printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len);
+ return NETDEV_TX_OK;
+ }
+
+ netif_stop_queue(dev);
+
+#if(NUM_XMIT_BUFFS > 1)
+ if(test_and_set_bit(0,(void *) &p->lock)) {
+ printk("%s: Queue was locked\n",dev->name);
+ return NETDEV_TX_BUSY;
+ }
+ else
+#endif
+ {
+ len = skb->len;
+ if (len < ETH_ZLEN) {
+ memset((void *)p->xmit_cbuffs[p->xmit_count], 0,
+ ETH_ZLEN);
+ len = ETH_ZLEN;
+ }
+ skb_copy_from_linear_data(skb, (void *)p->xmit_cbuffs[p->xmit_count], skb->len);
+
+#if (NUM_XMIT_BUFFS == 1)
+# ifdef NO_NOPCOMMANDS
+
+#ifdef DEBUG
+ if(p->scb->cus & CU_ACTIVE)
+ {
+ printk("%s: Hmmm .. CU is still running and we wanna send a new packet.\n",dev->name);
+ printk("%s: stat: %04x %04x\n",dev->name,p->scb->cus,swab16(p->xmit_cmds[0]->cmd_status));
+ }
+#endif
+
+ p->xmit_buffs[0]->size = swab16(TBD_LAST | len);
+ for(i=0;i<16;i++)
+ {
+ p->xmit_cmds[0]->cmd_status = 0;
+ WAIT_4_SCB_CMD();
+ if( (p->scb->cus & CU_STATUS) == CU_SUSPEND)
+ p->scb->cmd_cuc = CUC_RESUME;
+ else
+ {
+ p->scb->cbl_offset = make16(p->xmit_cmds[0]);
+ p->scb->cmd_cuc = CUC_START;
+ }
+
+ sun3_attn586();
+ if(!i)
+ dev_kfree_skb(skb);
+ WAIT_4_SCB_CMD();
+ if( (p->scb->cus & CU_ACTIVE)) /* test it, because CU sometimes doesn't start immediately */
+ break;
+ if(p->xmit_cmds[0]->cmd_status)
+ break;
+ if(i==15)
+ printk("%s: Can't start transmit-command.\n",dev->name);
+ }
+# else
+ next_nop = (p->nop_point + 1) & 0x1;
+ p->xmit_buffs[0]->size = swab16(TBD_LAST | len);
+
+ p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link
+ = make16((p->nop_cmds[next_nop]));
+ p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
+
+ p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
+ p->nop_point = next_nop;
+ dev_kfree_skb(skb);
+# endif
+#else
+ p->xmit_buffs[p->xmit_count]->size = swab16(TBD_LAST | len);
+ if( (next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS )
+ next_nop = 0;
+
+ p->xmit_cmds[p->xmit_count]->cmd_status = 0;
+ /* linkpointer of xmit-command already points to next nop cmd */
+ p->nop_cmds[next_nop]->cmd_link = make16((p->nop_cmds[next_nop]));
+ p->nop_cmds[next_nop]->cmd_status = 0;
+
+ p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
+ p->xmit_count = next_nop;
+
+ {
+ unsigned long flags;
+ local_irq_save(flags);
+ if(p->xmit_count != p->xmit_last)
+ netif_wake_queue(dev);
+ p->lock = 0;
+ local_irq_restore(flags);
+ }
+ dev_kfree_skb(skb);
+#endif
+ }
+ return NETDEV_TX_OK;
+}
+
+/*******************************************
+ * Someone wanna have the statistics
+ */
+
+static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev)
+{
+ struct priv *p = netdev_priv(dev);
+ unsigned short crc,aln,rsc,ovrn;
+
+ crc = swab16(p->scb->crc_errs); /* get error-statistic from the ni82586 */
+ p->scb->crc_errs = 0;
+ aln = swab16(p->scb->aln_errs);
+ p->scb->aln_errs = 0;
+ rsc = swab16(p->scb->rsc_errs);
+ p->scb->rsc_errs = 0;
+ ovrn = swab16(p->scb->ovrn_errs);
+ p->scb->ovrn_errs = 0;
+
+ dev->stats.rx_crc_errors += crc;
+ dev->stats.rx_fifo_errors += ovrn;
+ dev->stats.rx_frame_errors += aln;
+ dev->stats.rx_dropped += rsc;
+
+ return &dev->stats;
+}
+
+/********************************************************
+ * Set MC list ..
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ sun3_disint();
+ alloc586(dev);
+ init586(dev);
+ startrecv586(dev);
+ sun3_enaint();
+ netif_wake_queue(dev);
+}
+
+#ifdef MODULE
+#error This code is not currently supported as a module
+static struct net_device *dev_sun3_82586;
+
+int init_module(void)
+{
+ dev_sun3_82586 = sun3_82586_probe(-1);
+ if (IS_ERR(dev_sun3_82586))
+ return PTR_ERR(dev_sun3_82586);
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ unsigned long ioaddr = dev_sun3_82586->base_addr;
+ unregister_netdev(dev_sun3_82586);
+ release_region(ioaddr, SUN3_82586_TOTAL_SIZE);
+ iounmap((void *)ioaddr);
+ free_netdev(dev_sun3_82586);
+}
+#endif /* MODULE */
+
+#if 0
+/*
+ * DUMP .. we expect a not running CMD unit and enough space
+ */
+void sun3_82586_dump(struct net_device *dev,void *ptr)
+{
+ struct priv *p = netdev_priv(dev);
+ struct dump_cmd_struct *dump_cmd = (struct dump_cmd_struct *) ptr;
+ int i;
+
+ p->scb->cmd_cuc = CUC_ABORT;
+ sun3_attn586();
+ WAIT_4_SCB_CMD();
+ WAIT_4_SCB_CMD_RUC();
+
+ dump_cmd->cmd_status = 0;
+ dump_cmd->cmd_cmd = CMD_DUMP | CMD_LAST;
+ dump_cmd->dump_offset = make16((dump_cmd + 1));
+ dump_cmd->cmd_link = 0xffff;
+
+ p->scb->cbl_offset = make16(dump_cmd);
+ p->scb->cmd_cuc = CUC_START;
+ sun3_attn586();
+ WAIT_4_STAT_COMPL(dump_cmd);
+
+ if( (dump_cmd->cmd_status & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) )
+ printk("%s: Can't get dump information.\n",dev->name);
+
+ for(i=0;i<170;i++) {
+ printk("%02x ",(int) ((unsigned char *) (dump_cmd + 1))[i]);
+ if(i % 24 == 23)
+ printk("\n");
+ }
+ printk("\n");
+}
+#endif
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.h b/drivers/net/ethernet/i825xx/sun3_82586.h
new file mode 100644
index 000000000000..93346f00486b
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/sun3_82586.h
@@ -0,0 +1,318 @@
+/*
+ * Intel i82586 Ethernet definitions
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same Gnu Public License that covers that work.
+ *
+ * copyrights (c) 1994 by Michael Hipp (hippm@informatik.uni-tuebingen.de)
+ *
+ * I have done a look in the following sources:
+ * crynwr-packet-driver by Russ Nelson
+ * Garret A. Wollman's i82586-driver for BSD
+ */
+
+/*
+ * Cloned from ni52.h, copyright as above.
+ *
+ * Modified for Sun3 OBIO i82586 by Sam Creasey (sammy@sammy.net)
+ */
+
+
+/* defines for the obio chip (not vme) */
+#define IEOB_NORSET 0x80 /* don't reset the board */
+#define IEOB_ONAIR 0x40 /* put us on the air */
+#define IEOB_ATTEN 0x20 /* attention! */
+#define IEOB_IENAB 0x10 /* interrupt enable */
+#define IEOB_XXXXX 0x08 /* free bit */
+#define IEOB_XCVRL2 0x04 /* level 2 transceiver? */
+#define IEOB_BUSERR 0x02 /* bus error */
+#define IEOB_INT 0x01 /* interrupt */
+
+/* where the obio one lives */
+#define IE_OBIO 0xc0000
+#define IE_IRQ 3
+
+/*
+ * where to find the System Configuration Pointer (SCP)
+ */
+#define SCP_DEFAULT_ADDRESS 0xfffff4
+
+
+/*
+ * System Configuration Pointer Struct
+ */
+
+struct scp_struct
+{
+ unsigned short zero_dum0; /* has to be zero */
+ unsigned char sysbus; /* 0=16Bit,1=8Bit */
+ unsigned char zero_dum1; /* has to be zero for 586 */
+ unsigned short zero_dum2;
+ unsigned short zero_dum3;
+ char *iscp; /* pointer to the iscp-block */
+};
+
+
+/*
+ * Intermediate System Configuration Pointer (ISCP)
+ */
+struct iscp_struct
+{
+ unsigned char busy; /* 586 clears after successful init */
+ unsigned char zero_dummy; /* has to be zero */
+ unsigned short scb_offset; /* pointeroffset to the scb_base */
+ char *scb_base; /* base-address of all 16-bit offsets */
+};
+
+/*
+ * System Control Block (SCB)
+ */
+struct scb_struct
+{
+ unsigned char rus;
+ unsigned char cus;
+ unsigned char cmd_ruc; /* command word: RU part */
+ unsigned char cmd_cuc; /* command word: CU part & ACK */
+ unsigned short cbl_offset; /* pointeroffset, command block list */
+ unsigned short rfa_offset; /* pointeroffset, receive frame area */
+ unsigned short crc_errs; /* CRC-Error counter */
+ unsigned short aln_errs; /* allignmenterror counter */
+ unsigned short rsc_errs; /* Resourceerror counter */
+ unsigned short ovrn_errs; /* OVerrunerror counter */
+};
+
+/*
+ * possible command values for the command word
+ */
+#define RUC_MASK 0x0070 /* mask for RU commands */
+#define RUC_NOP 0x0000 /* NOP-command */
+#define RUC_START 0x0010 /* start RU */
+#define RUC_RESUME 0x0020 /* resume RU after suspend */
+#define RUC_SUSPEND 0x0030 /* suspend RU */
+#define RUC_ABORT 0x0040 /* abort receiver operation immediately */
+
+#define CUC_MASK 0x07 /* mask for CU command */
+#define CUC_NOP 0x00 /* NOP-command */
+#define CUC_START 0x01 /* start execution of 1. cmd on the CBL */
+#define CUC_RESUME 0x02 /* resume after suspend */
+#define CUC_SUSPEND 0x03 /* Suspend CU */
+#define CUC_ABORT 0x04 /* abort command operation immediately */
+
+#define ACK_MASK 0xf0 /* mask for ACK command */
+#define ACK_CX 0x80 /* acknowledges STAT_CX */
+#define ACK_FR 0x40 /* ack. STAT_FR */
+#define ACK_CNA 0x20 /* ack. STAT_CNA */
+#define ACK_RNR 0x10 /* ack. STAT_RNR */
+
+/*
+ * possible status values for the status word
+ */
+#define STAT_MASK 0xf0 /* mask for cause of interrupt */
+#define STAT_CX 0x80 /* CU finished cmd with its I bit set */
+#define STAT_FR 0x40 /* RU finished receiving a frame */
+#define STAT_CNA 0x20 /* CU left active state */
+#define STAT_RNR 0x10 /* RU left ready state */
+
+#define CU_STATUS 0x7 /* CU status, 0=idle */
+#define CU_SUSPEND 0x1 /* CU is suspended */
+#define CU_ACTIVE 0x2 /* CU is active */
+
+#define RU_STATUS 0x70 /* RU status, 0=idle */
+#define RU_SUSPEND 0x10 /* RU suspended */
+#define RU_NOSPACE 0x20 /* RU no resources */
+#define RU_READY 0x40 /* RU is ready */
+
+/*
+ * Receive Frame Descriptor (RFD)
+ */
+struct rfd_struct
+{
+ unsigned char stat_low; /* status word */
+ unsigned char stat_high; /* status word */
+ unsigned char rfd_sf; /* 82596 mode only */
+ unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */
+ unsigned short next; /* linkoffset to next RFD */
+ unsigned short rbd_offset; /* pointeroffset to RBD-buffer */
+ unsigned char dest[6]; /* ethernet-address, destination */
+ unsigned char source[6]; /* ethernet-address, source */
+ unsigned short length; /* 802.3 frame-length */
+ unsigned short zero_dummy; /* dummy */
+};
+
+#define RFD_LAST 0x80 /* last: last rfd in the list */
+#define RFD_SUSP 0x40 /* last: suspend RU after */
+#define RFD_COMPL 0x80
+#define RFD_OK 0x20
+#define RFD_BUSY 0x40
+#define RFD_ERR_LEN 0x10 /* Length error (if enabled length-checking */
+#define RFD_ERR_CRC 0x08 /* CRC error */
+#define RFD_ERR_ALGN 0x04 /* Alignment error */
+#define RFD_ERR_RNR 0x02 /* status: receiver out of resources */
+#define RFD_ERR_OVR 0x01 /* DMA Overrun! */
+
+#define RFD_ERR_FTS 0x0080 /* Frame to short */
+#define RFD_ERR_NEOP 0x0040 /* No EOP flag (for bitstuffing only) */
+#define RFD_ERR_TRUN 0x0020 /* (82596 only/SF mode) indicates truncated frame */
+#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA (only 82596) */
+#define RFD_COLLDET 0x0001 /* Detected collision during reception */
+
+/*
+ * Receive Buffer Descriptor (RBD)
+ */
+struct rbd_struct
+{
+ unsigned short status; /* status word,number of used bytes in buff */
+ unsigned short next; /* pointeroffset to next RBD */
+ char *buffer; /* receive buffer address pointer */
+ unsigned short size; /* size of this buffer */
+ unsigned short zero_dummy; /* dummy */
+};
+
+#define RBD_LAST 0x8000 /* last buffer */
+#define RBD_USED 0x4000 /* this buffer has data */
+#define RBD_MASK 0x3fff /* size-mask for length */
+
+/*
+ * Statusvalues for Commands/RFD
+ */
+#define STAT_COMPL 0x8000 /* status: frame/command is complete */
+#define STAT_BUSY 0x4000 /* status: frame/command is busy */
+#define STAT_OK 0x2000 /* status: frame/command is ok */
+
+/*
+ * Action-Commands
+ */
+#define CMD_NOP 0x0000 /* NOP */
+#define CMD_IASETUP 0x0001 /* initial address setup command */
+#define CMD_CONFIGURE 0x0002 /* configure command */
+#define CMD_MCSETUP 0x0003 /* MC setup command */
+#define CMD_XMIT 0x0004 /* transmit command */
+#define CMD_TDR 0x0005 /* time domain reflectometer (TDR) command */
+#define CMD_DUMP 0x0006 /* dump command */
+#define CMD_DIAGNOSE 0x0007 /* diagnose command */
+
+/*
+ * Action command bits
+ */
+#define CMD_LAST 0x8000 /* indicates last command in the CBL */
+#define CMD_SUSPEND 0x4000 /* suspend CU after this CB */
+#define CMD_INT 0x2000 /* generate interrupt after execution */
+
+/*
+ * NOP - command
+ */
+struct nop_cmd_struct
+{
+ unsigned short cmd_status; /* status of this command */
+ unsigned short cmd_cmd; /* the command itself (+bits) */
+ unsigned short cmd_link; /* offsetpointer to next command */
+};
+
+/*
+ * IA Setup command
+ */
+struct iasetup_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned char iaddr[6];
+};
+
+/*
+ * Configure command
+ */
+struct configure_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned char byte_cnt; /* size of the config-cmd */
+ unsigned char fifo; /* fifo/recv monitor */
+ unsigned char sav_bf; /* save bad frames (bit7=1)*/
+ unsigned char adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/
+ unsigned char priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */
+ unsigned char ifs; /* inter frame spacing */
+ unsigned char time_low; /* slot time low */
+ unsigned char time_high; /* slot time high(0-2) and max. retries(4-7) */
+ unsigned char promisc; /* promisc-mode(0) , et al (1-7) */
+ unsigned char carr_coll; /* carrier(0-3)/collision(4-7) stuff */
+ unsigned char fram_len; /* minimal frame len */
+ unsigned char dummy; /* dummy */
+};
+
+/*
+ * Multicast Setup command
+ */
+struct mcsetup_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short mc_cnt; /* number of bytes in the MC-List */
+ unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */
+};
+
+/*
+ * DUMP command
+ */
+struct dump_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short dump_offset; /* pointeroffset to DUMP space */
+};
+
+/*
+ * transmit command
+ */
+struct transmit_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short tbd_offset; /* pointeroffset to TBD */
+ unsigned char dest[6]; /* destination address of the frame */
+ unsigned short length; /* user defined: 802.3 length / Ether type */
+};
+
+#define TCMD_ERRMASK 0x0fa0
+#define TCMD_MAXCOLLMASK 0x000f
+#define TCMD_MAXCOLL 0x0020
+#define TCMD_HEARTBEAT 0x0040
+#define TCMD_DEFERRED 0x0080
+#define TCMD_UNDERRUN 0x0100
+#define TCMD_LOSTCTS 0x0200
+#define TCMD_NOCARRIER 0x0400
+#define TCMD_LATECOLL 0x0800
+
+struct tdr_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short status;
+};
+
+#define TDR_LNK_OK 0x8000 /* No link problem identified */
+#define TDR_XCVR_PRB 0x4000 /* indicates a transceiver problem */
+#define TDR_ET_OPN 0x2000 /* open, no correct termination */
+#define TDR_ET_SRT 0x1000 /* TDR detected a short circuit */
+#define TDR_TIMEMASK 0x07ff /* mask for the time field */
+
+/*
+ * Transmit Buffer Descriptor (TBD)
+ */
+struct tbd_struct
+{
+ unsigned short size; /* size + EOF-Flag(15) */
+ unsigned short next; /* pointeroffset to next TBD */
+ char *buffer; /* pointer to buffer */
+};
+
+#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */
+
+
+
+
diff --git a/drivers/net/ethernet/i825xx/znet.c b/drivers/net/ethernet/i825xx/znet.c
new file mode 100644
index 000000000000..962b4c421f3f
--- /dev/null
+++ b/drivers/net/ethernet/i825xx/znet.c
@@ -0,0 +1,924 @@
+/* znet.c: An Zenith Z-Note ethernet driver for linux. */
+
+/*
+ Written by Donald Becker.
+
+ The author may be reached as becker@scyld.com.
+ This driver is based on the Linux skeleton driver. The copyright of the
+ skeleton driver is held by the United States Government, as represented
+ by DIRNSA, and it is released under the GPL.
+
+ Thanks to Mike Hollick for alpha testing and suggestions.
+
+ References:
+ The Crynwr packet driver.
+
+ "82593 CSMA/CD Core LAN Controller" Intel datasheet, 1992
+ Intel Microcommunications Databook, Vol. 1, 1990.
+ As usual with Intel, the documentation is incomplete and inaccurate.
+ I had to read the Crynwr packet driver to figure out how to actually
+ use the i82593, and guess at what register bits matched the loosely
+ related i82586.
+
+ Theory of Operation
+
+ The i82593 used in the Zenith Z-Note series operates using two(!) slave
+ DMA channels, one interrupt, and one 8-bit I/O port.
+
+ While there several ways to configure '593 DMA system, I chose the one
+ that seemed commensurate with the highest system performance in the face
+ of moderate interrupt latency: Both DMA channels are configured as
+ recirculating ring buffers, with one channel (#0) dedicated to Rx and
+ the other channel (#1) to Tx and configuration. (Note that this is
+ different than the Crynwr driver, where the Tx DMA channel is initialized
+ before each operation. That approach simplifies operation and Tx error
+ recovery, but requires additional I/O in normal operation and precludes
+ transmit buffer chaining.)
+
+ Both rings are set to 8192 bytes using {TX,RX}_RING_SIZE. This provides
+ a reasonable ring size for Rx, while simplifying DMA buffer allocation --
+ DMA buffers must not cross a 128K boundary. (In truth the size selection
+ was influenced by my lack of '593 documentation. I thus was constrained
+ to use the Crynwr '593 initialization table, which sets the Rx ring size
+ to 8K.)
+
+ Despite my usual low opinion about Intel-designed parts, I must admit
+ that the bulk data handling of the i82593 is a good design for
+ an integrated system, like a laptop, where using two slave DMA channels
+ doesn't pose a problem. I still take issue with using only a single I/O
+ port. In the same controlled environment there are essentially no
+ limitations on I/O space, and using multiple locations would eliminate
+ the need for multiple operations when looking at status registers,
+ setting the Rx ring boundary, or switching to promiscuous mode.
+
+ I also question Zenith's selection of the '593: one of the advertised
+ advantages of earlier Intel parts was that if you figured out the magic
+ initialization incantation you could use the same part on many different
+ network types. Zenith's use of the "FriendlyNet" (sic) connector rather
+ than an on-board transceiver leads me to believe that they were planning
+ to take advantage of this. But, uhmmm, the '593 omits all but ethernet
+ functionality from the serial subsystem.
+ */
+
+/* 10/2002
+
+ o Resurected for Linux 2.5+ by Marc Zyngier <maz@wild-wind.fr.eu.org> :
+
+ - Removed strange DMA snooping in znet_sent_packet, which lead to
+ TX buffer corruption on my laptop.
+ - Use init_etherdev stuff.
+ - Use kmalloc-ed DMA buffers.
+ - Use as few global variables as possible.
+ - Use proper resources management.
+ - Use wireless/i82593.h as much as possible (structure, constants)
+ - Compiles as module or build-in.
+ - Now survives unplugging/replugging cable.
+
+ Some code was taken from wavelan_cs.
+
+ Tested on a vintage Zenith Z-Note 433Lnp+. Probably broken on
+ anything else. Testers (and detailed bug reports) are welcome :-).
+
+ o TODO :
+
+ - Properly handle multicast
+ - Understand why some traffic patterns add a 1s latency...
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/i82593.h>
+
+static char version[] __initdata = "znet.c:v1.02 9/23/94 becker@scyld.com\n";
+
+#ifndef ZNET_DEBUG
+#define ZNET_DEBUG 1
+#endif
+static unsigned int znet_debug = ZNET_DEBUG;
+module_param (znet_debug, int, 0);
+MODULE_PARM_DESC (znet_debug, "ZNet debug level");
+MODULE_LICENSE("GPL");
+
+/* The DMA modes we need aren't in <dma.h>. */
+#define DMA_RX_MODE 0x14 /* Auto init, I/O to mem, ++, demand. */
+#define DMA_TX_MODE 0x18 /* Auto init, Mem to I/O, ++, demand. */
+#define dma_page_eq(ptr1, ptr2) ((long)(ptr1)>>17 == (long)(ptr2)>>17)
+#define RX_BUF_SIZE 8192
+#define TX_BUF_SIZE 8192
+#define DMA_BUF_SIZE (RX_BUF_SIZE + 16) /* 8k + 16 bytes for trailers */
+
+#define TX_TIMEOUT (HZ/10)
+
+struct znet_private {
+ int rx_dma, tx_dma;
+ spinlock_t lock;
+ short sia_base, sia_size, io_size;
+ struct i82593_conf_block i593_init;
+ /* The starting, current, and end pointers for the packet buffers. */
+ ushort *rx_start, *rx_cur, *rx_end;
+ ushort *tx_start, *tx_cur, *tx_end;
+ ushort tx_buf_len; /* Tx buffer length, in words. */
+};
+
+/* Only one can be built-in;-> */
+static struct net_device *znet_dev;
+
+struct netidblk {
+ char magic[8]; /* The magic number (string) "NETIDBLK" */
+ unsigned char netid[8]; /* The physical station address */
+ char nettype, globalopt;
+ char vendor[8]; /* The machine vendor and product name. */
+ char product[8];
+ char irq1, irq2; /* Interrupts, only one is currently used. */
+ char dma1, dma2;
+ short dma_mem_misc[8]; /* DMA buffer locations (unused in Linux). */
+ short iobase1, iosize1;
+ short iobase2, iosize2; /* Second iobase unused. */
+ char driver_options; /* Misc. bits */
+ char pad;
+};
+
+static int znet_open(struct net_device *dev);
+static netdev_tx_t znet_send_packet(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t znet_interrupt(int irq, void *dev_id);
+static void znet_rx(struct net_device *dev);
+static int znet_close(struct net_device *dev);
+static void hardware_init(struct net_device *dev);
+static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset);
+static void znet_tx_timeout (struct net_device *dev);
+
+/* Request needed resources */
+static int znet_request_resources (struct net_device *dev)
+{
+ struct znet_private *znet = netdev_priv(dev);
+
+ if (request_irq (dev->irq, znet_interrupt, 0, "ZNet", dev))
+ goto failed;
+ if (request_dma (znet->rx_dma, "ZNet rx"))
+ goto free_irq;
+ if (request_dma (znet->tx_dma, "ZNet tx"))
+ goto free_rx_dma;
+ if (!request_region (znet->sia_base, znet->sia_size, "ZNet SIA"))
+ goto free_tx_dma;
+ if (!request_region (dev->base_addr, znet->io_size, "ZNet I/O"))
+ goto free_sia;
+
+ return 0; /* Happy ! */
+
+ free_sia:
+ release_region (znet->sia_base, znet->sia_size);
+ free_tx_dma:
+ free_dma (znet->tx_dma);
+ free_rx_dma:
+ free_dma (znet->rx_dma);
+ free_irq:
+ free_irq (dev->irq, dev);
+ failed:
+ return -1;
+}
+
+static void znet_release_resources (struct net_device *dev)
+{
+ struct znet_private *znet = netdev_priv(dev);
+
+ release_region (znet->sia_base, znet->sia_size);
+ release_region (dev->base_addr, znet->io_size);
+ free_dma (znet->tx_dma);
+ free_dma (znet->rx_dma);
+ free_irq (dev->irq, dev);
+}
+
+/* Keep the magical SIA stuff in a single function... */
+static void znet_transceiver_power (struct net_device *dev, int on)
+{
+ struct znet_private *znet = netdev_priv(dev);
+ unsigned char v;
+
+ /* Turn on/off the 82501 SIA, using zenith-specific magic. */
+ /* Select LAN control register */
+ outb(0x10, znet->sia_base);
+
+ if (on)
+ v = inb(znet->sia_base + 1) | 0x84;
+ else
+ v = inb(znet->sia_base + 1) & ~0x84;
+
+ outb(v, znet->sia_base+1); /* Turn on/off LAN power (bit 2). */
+}
+
+/* Init the i82593, with current promisc/mcast configuration.
+ Also used from hardware_init. */
+static void znet_set_multicast_list (struct net_device *dev)
+{
+ struct znet_private *znet = netdev_priv(dev);
+ short ioaddr = dev->base_addr;
+ struct i82593_conf_block *cfblk = &znet->i593_init;
+
+ memset(cfblk, 0x00, sizeof(struct i82593_conf_block));
+
+ /* The configuration block. What an undocumented nightmare.
+ The first set of values are those suggested (without explanation)
+ for ethernet in the Intel 82586 databook. The rest appear to be
+ completely undocumented, except for cryptic notes in the Crynwr
+ packet driver. This driver uses the Crynwr values verbatim. */
+
+ /* maz : Rewritten to take advantage of the wanvelan includes.
+ At least we have names, not just blind values */
+
+ /* Byte 0 */
+ cfblk->fifo_limit = 10; /* = 16 B rx and 80 B tx fifo thresholds */
+ cfblk->forgnesi = 0; /* 0=82C501, 1=AMD7992B compatibility */
+ cfblk->fifo_32 = 1;
+ cfblk->d6mod = 0; /* Run in i82593 advanced mode */
+ cfblk->throttle_enb = 1;
+
+ /* Byte 1 */
+ cfblk->throttle = 8; /* Continuous w/interrupts, 128-clock DMA. */
+ cfblk->cntrxint = 0; /* enable continuous mode receive interrupts */
+ cfblk->contin = 1; /* enable continuous mode */
+
+ /* Byte 2 */
+ cfblk->addr_len = ETH_ALEN;
+ cfblk->acloc = 1; /* Disable source addr insertion by i82593 */
+ cfblk->preamb_len = 2; /* 8 bytes preamble */
+ cfblk->loopback = 0; /* Loopback off */
+
+ /* Byte 3 */
+ cfblk->lin_prio = 0; /* Default priorities & backoff methods. */
+ cfblk->tbofstop = 0;
+ cfblk->exp_prio = 0;
+ cfblk->bof_met = 0;
+
+ /* Byte 4 */
+ cfblk->ifrm_spc = 6; /* 96 bit times interframe spacing */
+
+ /* Byte 5 */
+ cfblk->slottim_low = 0; /* 512 bit times slot time (low) */
+
+ /* Byte 6 */
+ cfblk->slottim_hi = 2; /* 512 bit times slot time (high) */
+ cfblk->max_retr = 15; /* 15 collisions retries */
+
+ /* Byte 7 */
+ cfblk->prmisc = ((dev->flags & IFF_PROMISC) ? 1 : 0); /* Promiscuous mode */
+ cfblk->bc_dis = 0; /* Enable broadcast reception */
+ cfblk->crs_1 = 0; /* Don't transmit without carrier sense */
+ cfblk->nocrc_ins = 0; /* i82593 generates CRC */
+ cfblk->crc_1632 = 0; /* 32-bit Autodin-II CRC */
+ cfblk->crs_cdt = 0; /* CD not to be interpreted as CS */
+
+ /* Byte 8 */
+ cfblk->cs_filter = 0; /* CS is recognized immediately */
+ cfblk->crs_src = 0; /* External carrier sense */
+ cfblk->cd_filter = 0; /* CD is recognized immediately */
+
+ /* Byte 9 */
+ cfblk->min_fr_len = ETH_ZLEN >> 2; /* Minimum frame length */
+
+ /* Byte A */
+ cfblk->lng_typ = 1; /* Type/length checks OFF */
+ cfblk->lng_fld = 1; /* Disable 802.3 length field check */
+ cfblk->rxcrc_xf = 1; /* Don't transfer CRC to memory */
+ cfblk->artx = 1; /* Disable automatic retransmission */
+ cfblk->sarec = 1; /* Disable source addr trig of CD */
+ cfblk->tx_jabber = 0; /* Disable jabber jam sequence */
+ cfblk->hash_1 = 1; /* Use bits 0-5 in mc address hash */
+ cfblk->lbpkpol = 0; /* Loopback pin active high */
+
+ /* Byte B */
+ cfblk->fdx = 0; /* Disable full duplex operation */
+
+ /* Byte C */
+ cfblk->dummy_6 = 0x3f; /* all ones, Default multicast addresses & backoff. */
+ cfblk->mult_ia = 0; /* No multiple individual addresses */
+ cfblk->dis_bof = 0; /* Disable the backoff algorithm ?! */
+
+ /* Byte D */
+ cfblk->dummy_1 = 1; /* set to 1 */
+ cfblk->tx_ifs_retrig = 3; /* Hmm... Disabled */
+ cfblk->mc_all = (!netdev_mc_empty(dev) ||
+ (dev->flags & IFF_ALLMULTI)); /* multicast all mode */
+ cfblk->rcv_mon = 0; /* Monitor mode disabled */
+ cfblk->frag_acpt = 0; /* Do not accept fragments */
+ cfblk->tstrttrs = 0; /* No start transmission threshold */
+
+ /* Byte E */
+ cfblk->fretx = 1; /* FIFO automatic retransmission */
+ cfblk->runt_eop = 0; /* drop "runt" packets */
+ cfblk->hw_sw_pin = 0; /* ?? */
+ cfblk->big_endn = 0; /* Big Endian ? no... */
+ cfblk->syncrqs = 1; /* Synchronous DRQ deassertion... */
+ cfblk->sttlen = 1; /* 6 byte status registers */
+ cfblk->rx_eop = 0; /* Signal EOP on packet reception */
+ cfblk->tx_eop = 0; /* Signal EOP on packet transmission */
+
+ /* Byte F */
+ cfblk->rbuf_size = RX_BUF_SIZE >> 12; /* Set receive buffer size */
+ cfblk->rcvstop = 1; /* Enable Receive Stop Register */
+
+ if (znet_debug > 2) {
+ int i;
+ unsigned char *c;
+
+ for (i = 0, c = (char *) cfblk; i < sizeof (*cfblk); i++)
+ printk ("%02X ", c[i]);
+ printk ("\n");
+ }
+
+ *znet->tx_cur++ = sizeof(struct i82593_conf_block);
+ memcpy(znet->tx_cur, cfblk, sizeof(struct i82593_conf_block));
+ znet->tx_cur += sizeof(struct i82593_conf_block)/2;
+ outb(OP0_CONFIGURE | CR0_CHNL, ioaddr);
+
+ /* XXX FIXME maz : Add multicast addresses here, so having a
+ * multicast address configured isn't equal to IFF_ALLMULTI */
+}
+
+static const struct net_device_ops znet_netdev_ops = {
+ .ndo_open = znet_open,
+ .ndo_stop = znet_close,
+ .ndo_start_xmit = znet_send_packet,
+ .ndo_set_rx_mode = znet_set_multicast_list,
+ .ndo_tx_timeout = znet_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/* The Z-Note probe is pretty easy. The NETIDBLK exists in the safe-to-probe
+ BIOS area. We just scan for the signature, and pull the vital parameters
+ out of the structure. */
+
+static int __init znet_probe (void)
+{
+ int i;
+ struct netidblk *netinfo;
+ struct znet_private *znet;
+ struct net_device *dev;
+ char *p;
+ int err = -ENOMEM;
+
+ /* This code scans the region 0xf0000 to 0xfffff for a "NETIDBLK". */
+ for(p = (char *)phys_to_virt(0xf0000); p < (char *)phys_to_virt(0x100000); p++)
+ if (*p == 'N' && strncmp(p, "NETIDBLK", 8) == 0)
+ break;
+
+ if (p >= (char *)phys_to_virt(0x100000)) {
+ if (znet_debug > 1)
+ printk(KERN_INFO "No Z-Note ethernet adaptor found.\n");
+ return -ENODEV;
+ }
+
+ dev = alloc_etherdev(sizeof(struct znet_private));
+ if (!dev)
+ return -ENOMEM;
+
+ znet = netdev_priv(dev);
+
+ netinfo = (struct netidblk *)p;
+ dev->base_addr = netinfo->iobase1;
+ dev->irq = netinfo->irq1;
+
+ /* The station address is in the "netidblk" at 0x0f0000. */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = netinfo->netid[i];
+
+ printk(KERN_INFO "%s: ZNET at %#3lx, %pM"
+ ", using IRQ %d DMA %d and %d.\n",
+ dev->name, dev->base_addr, dev->dev_addr,
+ dev->irq, netinfo->dma1, netinfo->dma2);
+
+ if (znet_debug > 1) {
+ printk(KERN_INFO "%s: vendor '%16.16s' IRQ1 %d IRQ2 %d DMA1 %d DMA2 %d.\n",
+ dev->name, netinfo->vendor,
+ netinfo->irq1, netinfo->irq2,
+ netinfo->dma1, netinfo->dma2);
+ printk(KERN_INFO "%s: iobase1 %#x size %d iobase2 %#x size %d net type %2.2x.\n",
+ dev->name, netinfo->iobase1, netinfo->iosize1,
+ netinfo->iobase2, netinfo->iosize2, netinfo->nettype);
+ }
+
+ if (znet_debug > 0)
+ printk(KERN_INFO "%s", version);
+
+ znet->rx_dma = netinfo->dma1;
+ znet->tx_dma = netinfo->dma2;
+ spin_lock_init(&znet->lock);
+ znet->sia_base = 0xe6; /* Magic address for the 82501 SIA */
+ znet->sia_size = 2;
+ /* maz: Despite the '593 being advertised above as using a
+ * single 8bits I/O port, this driver does many 16bits
+ * access. So set io_size accordingly */
+ znet->io_size = 2;
+
+ if (!(znet->rx_start = kmalloc (DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA)))
+ goto free_dev;
+ if (!(znet->tx_start = kmalloc (DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA)))
+ goto free_rx;
+
+ if (!dma_page_eq (znet->rx_start, znet->rx_start + (RX_BUF_SIZE/2-1)) ||
+ !dma_page_eq (znet->tx_start, znet->tx_start + (TX_BUF_SIZE/2-1))) {
+ printk (KERN_WARNING "tx/rx crossing DMA frontiers, giving up\n");
+ goto free_tx;
+ }
+
+ znet->rx_end = znet->rx_start + RX_BUF_SIZE/2;
+ znet->tx_buf_len = TX_BUF_SIZE/2;
+ znet->tx_end = znet->tx_start + znet->tx_buf_len;
+
+ /* The ZNET-specific entries in the device structure. */
+ dev->netdev_ops = &znet_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ err = register_netdev(dev);
+ if (err)
+ goto free_tx;
+ znet_dev = dev;
+ return 0;
+
+ free_tx:
+ kfree(znet->tx_start);
+ free_rx:
+ kfree(znet->rx_start);
+ free_dev:
+ free_netdev(dev);
+ return err;
+}
+
+
+static int znet_open(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (znet_debug > 2)
+ printk(KERN_DEBUG "%s: znet_open() called.\n", dev->name);
+
+ /* These should never fail. You can't add devices to a sealed box! */
+ if (znet_request_resources (dev)) {
+ printk(KERN_WARNING "%s: Not opened -- resource busy?!?\n", dev->name);
+ return -EBUSY;
+ }
+
+ znet_transceiver_power (dev, 1);
+
+ /* According to the Crynwr driver we should wait 50 msec. for the
+ LAN clock to stabilize. My experiments indicates that the '593 can
+ be initialized immediately. The delay is probably needed for the
+ DC-to-DC converter to come up to full voltage, and for the oscillator
+ to be spot-on at 20Mhz before transmitting.
+ Until this proves to be a problem we rely on the higher layers for the
+ delay and save allocating a timer entry. */
+
+ /* maz : Well, I'm getting every time the following message
+ * without the delay on a 486@33. This machine is much too
+ * fast... :-) So maybe the Crynwr driver wasn't wrong after
+ * all, even if the message is completly harmless on my
+ * setup. */
+ mdelay (50);
+
+ /* This follows the packet driver's lead, and checks for success. */
+ if (inb(ioaddr) != 0x10 && inb(ioaddr) != 0x00)
+ printk(KERN_WARNING "%s: Problem turning on the transceiver power.\n",
+ dev->name);
+
+ hardware_init(dev);
+ netif_start_queue (dev);
+
+ return 0;
+}
+
+
+static void znet_tx_timeout (struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ ushort event, tx_status, rx_offset, state;
+
+ outb (CR0_STATUS_0, ioaddr);
+ event = inb (ioaddr);
+ outb (CR0_STATUS_1, ioaddr);
+ tx_status = inw (ioaddr);
+ outb (CR0_STATUS_2, ioaddr);
+ rx_offset = inw (ioaddr);
+ outb (CR0_STATUS_3, ioaddr);
+ state = inb (ioaddr);
+ printk (KERN_WARNING "%s: transmit timed out, status %02x %04x %04x %02x,"
+ " resetting.\n", dev->name, event, tx_status, rx_offset, state);
+ if (tx_status == TX_LOST_CRS)
+ printk (KERN_WARNING "%s: Tx carrier error, check transceiver cable.\n",
+ dev->name);
+ outb (OP0_RESET, ioaddr);
+ hardware_init (dev);
+ netif_wake_queue (dev);
+}
+
+static netdev_tx_t znet_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct znet_private *znet = netdev_priv(dev);
+ unsigned long flags;
+ short length = skb->len;
+
+ if (znet_debug > 4)
+ printk(KERN_DEBUG "%s: ZNet_send_packet.\n", dev->name);
+
+ if (length < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ length = ETH_ZLEN;
+ }
+
+ netif_stop_queue (dev);
+
+ /* Check that the part hasn't reset itself, probably from suspend. */
+ outb(CR0_STATUS_0, ioaddr);
+ if (inw(ioaddr) == 0x0010 &&
+ inw(ioaddr) == 0x0000 &&
+ inw(ioaddr) == 0x0010) {
+ if (znet_debug > 1)
+ printk (KERN_WARNING "%s : waking up\n", dev->name);
+ hardware_init(dev);
+ znet_transceiver_power (dev, 1);
+ }
+
+ if (1) {
+ unsigned char *buf = (void *)skb->data;
+ ushort *tx_link = znet->tx_cur - 1;
+ ushort rnd_len = (length + 1)>>1;
+
+ dev->stats.tx_bytes+=length;
+
+ if (znet->tx_cur >= znet->tx_end)
+ znet->tx_cur = znet->tx_start;
+ *znet->tx_cur++ = length;
+ if (znet->tx_cur + rnd_len + 1 > znet->tx_end) {
+ int semi_cnt = (znet->tx_end - znet->tx_cur)<<1; /* Cvrt to byte cnt. */
+ memcpy(znet->tx_cur, buf, semi_cnt);
+ rnd_len -= semi_cnt>>1;
+ memcpy(znet->tx_start, buf + semi_cnt, length - semi_cnt);
+ znet->tx_cur = znet->tx_start + rnd_len;
+ } else {
+ memcpy(znet->tx_cur, buf, skb->len);
+ znet->tx_cur += rnd_len;
+ }
+ *znet->tx_cur++ = 0;
+
+ spin_lock_irqsave(&znet->lock, flags);
+ {
+ *tx_link = OP0_TRANSMIT | CR0_CHNL;
+ /* Is this always safe to do? */
+ outb(OP0_TRANSMIT | CR0_CHNL, ioaddr);
+ }
+ spin_unlock_irqrestore (&znet->lock, flags);
+
+ netif_start_queue (dev);
+
+ if (znet_debug > 4)
+ printk(KERN_DEBUG "%s: Transmitter queued, length %d.\n", dev->name, length);
+ }
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+/* The ZNET interrupt handler. */
+static irqreturn_t znet_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct znet_private *znet = netdev_priv(dev);
+ int ioaddr;
+ int boguscnt = 20;
+ int handled = 0;
+
+ spin_lock (&znet->lock);
+
+ ioaddr = dev->base_addr;
+
+ outb(CR0_STATUS_0, ioaddr);
+ do {
+ ushort status = inb(ioaddr);
+ if (znet_debug > 5) {
+ ushort result, rx_ptr, running;
+ outb(CR0_STATUS_1, ioaddr);
+ result = inw(ioaddr);
+ outb(CR0_STATUS_2, ioaddr);
+ rx_ptr = inw(ioaddr);
+ outb(CR0_STATUS_3, ioaddr);
+ running = inb(ioaddr);
+ printk(KERN_DEBUG "%s: interrupt, status %02x, %04x %04x %02x serial %d.\n",
+ dev->name, status, result, rx_ptr, running, boguscnt);
+ }
+ if ((status & SR0_INTERRUPT) == 0)
+ break;
+
+ handled = 1;
+
+ if ((status & SR0_EVENT_MASK) == SR0_TRANSMIT_DONE ||
+ (status & SR0_EVENT_MASK) == SR0_RETRANSMIT_DONE ||
+ (status & SR0_EVENT_MASK) == SR0_TRANSMIT_NO_CRC_DONE) {
+ int tx_status;
+ outb(CR0_STATUS_1, ioaddr);
+ tx_status = inw(ioaddr);
+ /* It's undocumented, but tx_status seems to match the i82586. */
+ if (tx_status & TX_OK) {
+ dev->stats.tx_packets++;
+ dev->stats.collisions += tx_status & TX_NCOL_MASK;
+ } else {
+ if (tx_status & (TX_LOST_CTS | TX_LOST_CRS))
+ dev->stats.tx_carrier_errors++;
+ if (tx_status & TX_UND_RUN)
+ dev->stats.tx_fifo_errors++;
+ if (!(tx_status & TX_HRT_BEAT))
+ dev->stats.tx_heartbeat_errors++;
+ if (tx_status & TX_MAX_COL)
+ dev->stats.tx_aborted_errors++;
+ /* ...and the catch-all. */
+ if ((tx_status | (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL)) != (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL))
+ dev->stats.tx_errors++;
+
+ /* Transceiver may be stuck if cable
+ * was removed while emitting a
+ * packet. Flip it off, then on to
+ * reset it. This is very empirical,
+ * but it seems to work. */
+
+ znet_transceiver_power (dev, 0);
+ znet_transceiver_power (dev, 1);
+ }
+ netif_wake_queue (dev);
+ }
+
+ if ((status & SR0_RECEPTION) ||
+ (status & SR0_EVENT_MASK) == SR0_STOP_REG_HIT) {
+ znet_rx(dev);
+ }
+ /* Clear the interrupts we've handled. */
+ outb(CR0_INT_ACK, ioaddr);
+ } while (boguscnt--);
+
+ spin_unlock (&znet->lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+static void znet_rx(struct net_device *dev)
+{
+ struct znet_private *znet = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int boguscount = 1;
+ short next_frame_end_offset = 0; /* Offset of next frame start. */
+ short *cur_frame_end;
+ short cur_frame_end_offset;
+
+ outb(CR0_STATUS_2, ioaddr);
+ cur_frame_end_offset = inw(ioaddr);
+
+ if (cur_frame_end_offset == znet->rx_cur - znet->rx_start) {
+ printk(KERN_WARNING "%s: Interrupted, but nothing to receive, offset %03x.\n",
+ dev->name, cur_frame_end_offset);
+ return;
+ }
+
+ /* Use same method as the Crynwr driver: construct a forward list in
+ the same area of the backwards links we now have. This allows us to
+ pass packets to the upper layers in the order they were received --
+ important for fast-path sequential operations. */
+ while (znet->rx_start + cur_frame_end_offset != znet->rx_cur &&
+ ++boguscount < 5) {
+ unsigned short hi_cnt, lo_cnt, hi_status, lo_status;
+ int count, status;
+
+ if (cur_frame_end_offset < 4) {
+ /* Oh no, we have a special case: the frame trailer wraps around
+ the end of the ring buffer. We've saved space at the end of
+ the ring buffer for just this problem. */
+ memcpy(znet->rx_end, znet->rx_start, 8);
+ cur_frame_end_offset += (RX_BUF_SIZE/2);
+ }
+ cur_frame_end = znet->rx_start + cur_frame_end_offset - 4;
+
+ lo_status = *cur_frame_end++;
+ hi_status = *cur_frame_end++;
+ status = ((hi_status & 0xff) << 8) + (lo_status & 0xff);
+ lo_cnt = *cur_frame_end++;
+ hi_cnt = *cur_frame_end++;
+ count = ((hi_cnt & 0xff) << 8) + (lo_cnt & 0xff);
+
+ if (znet_debug > 5)
+ printk(KERN_DEBUG "Constructing trailer at location %03x, %04x %04x %04x %04x"
+ " count %#x status %04x.\n",
+ cur_frame_end_offset<<1, lo_status, hi_status, lo_cnt, hi_cnt,
+ count, status);
+ cur_frame_end[-4] = status;
+ cur_frame_end[-3] = next_frame_end_offset;
+ cur_frame_end[-2] = count;
+ next_frame_end_offset = cur_frame_end_offset;
+ cur_frame_end_offset -= ((count + 1)>>1) + 3;
+ if (cur_frame_end_offset < 0)
+ cur_frame_end_offset += RX_BUF_SIZE/2;
+ }
+
+ /* Now step forward through the list. */
+ do {
+ ushort *this_rfp_ptr = znet->rx_start + next_frame_end_offset;
+ int status = this_rfp_ptr[-4];
+ int pkt_len = this_rfp_ptr[-2];
+
+ if (znet_debug > 5)
+ printk(KERN_DEBUG "Looking at trailer ending at %04x status %04x length %03x"
+ " next %04x.\n", next_frame_end_offset<<1, status, pkt_len,
+ this_rfp_ptr[-3]<<1);
+ /* Once again we must assume that the i82586 docs apply. */
+ if ( ! (status & RX_RCV_OK)) { /* There was an error. */
+ dev->stats.rx_errors++;
+ if (status & RX_CRC_ERR) dev->stats.rx_crc_errors++;
+ if (status & RX_ALG_ERR) dev->stats.rx_frame_errors++;
+#if 0
+ if (status & 0x0200) dev->stats.rx_over_errors++; /* Wrong. */
+ if (status & 0x0100) dev->stats.rx_fifo_errors++;
+#else
+ /* maz : Wild guess... */
+ if (status & RX_OVRRUN) dev->stats.rx_over_errors++;
+#endif
+ if (status & RX_SRT_FRM) dev->stats.rx_length_errors++;
+ } else if (pkt_len > 1536) {
+ dev->stats.rx_length_errors++;
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len);
+ if (skb == NULL) {
+ if (znet_debug)
+ printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
+ dev->stats.rx_dropped++;
+ break;
+ }
+
+ if (&znet->rx_cur[(pkt_len+1)>>1] > znet->rx_end) {
+ int semi_cnt = (znet->rx_end - znet->rx_cur)<<1;
+ memcpy(skb_put(skb,semi_cnt), znet->rx_cur, semi_cnt);
+ memcpy(skb_put(skb,pkt_len-semi_cnt), znet->rx_start,
+ pkt_len - semi_cnt);
+ } else {
+ memcpy(skb_put(skb,pkt_len), znet->rx_cur, pkt_len);
+ if (znet_debug > 6) {
+ unsigned int *packet = (unsigned int *) skb->data;
+ printk(KERN_DEBUG "Packet data is %08x %08x %08x %08x.\n", packet[0],
+ packet[1], packet[2], packet[3]);
+ }
+ }
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+ znet->rx_cur = this_rfp_ptr;
+ if (znet->rx_cur >= znet->rx_end)
+ znet->rx_cur -= RX_BUF_SIZE/2;
+ update_stop_hit(ioaddr, (znet->rx_cur - znet->rx_start)<<1);
+ next_frame_end_offset = this_rfp_ptr[-3];
+ if (next_frame_end_offset == 0) /* Read all the frames? */
+ break; /* Done for now */
+ this_rfp_ptr = znet->rx_start + next_frame_end_offset;
+ } while (--boguscount);
+
+ /* If any worth-while packets have been received, dev_rint()
+ has done a mark_bh(INET_BH) for us and will work on them
+ when we get to the bottom-half routine. */
+}
+
+/* The inverse routine to znet_open(). */
+static int znet_close(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ netif_stop_queue (dev);
+
+ outb(OP0_RESET, ioaddr); /* CMD0_RESET */
+
+ if (znet_debug > 1)
+ printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
+ /* Turn off transceiver power. */
+ znet_transceiver_power (dev, 0);
+
+ znet_release_resources (dev);
+
+ return 0;
+}
+
+static void show_dma(struct net_device *dev)
+{
+ short ioaddr = dev->base_addr;
+ unsigned char stat = inb (ioaddr);
+ struct znet_private *znet = netdev_priv(dev);
+ unsigned long flags;
+ short dma_port = ((znet->tx_dma&3)<<2) + IO_DMA2_BASE;
+ unsigned addr = inb(dma_port);
+ short residue;
+
+ addr |= inb(dma_port) << 8;
+ residue = get_dma_residue(znet->tx_dma);
+
+ if (znet_debug > 1) {
+ flags=claim_dma_lock();
+ printk(KERN_DEBUG "Stat:%02x Addr: %04x cnt:%3x\n",
+ stat, addr<<1, residue);
+ release_dma_lock(flags);
+ }
+}
+
+/* Initialize the hardware. We have to do this when the board is open()ed
+ or when we come out of suspend mode. */
+static void hardware_init(struct net_device *dev)
+{
+ unsigned long flags;
+ short ioaddr = dev->base_addr;
+ struct znet_private *znet = netdev_priv(dev);
+
+ znet->rx_cur = znet->rx_start;
+ znet->tx_cur = znet->tx_start;
+
+ /* Reset the chip, and start it up. */
+ outb(OP0_RESET, ioaddr);
+
+ flags=claim_dma_lock();
+ disable_dma(znet->rx_dma); /* reset by an interrupting task. */
+ clear_dma_ff(znet->rx_dma);
+ set_dma_mode(znet->rx_dma, DMA_RX_MODE);
+ set_dma_addr(znet->rx_dma, (unsigned int) znet->rx_start);
+ set_dma_count(znet->rx_dma, RX_BUF_SIZE);
+ enable_dma(znet->rx_dma);
+ /* Now set up the Tx channel. */
+ disable_dma(znet->tx_dma);
+ clear_dma_ff(znet->tx_dma);
+ set_dma_mode(znet->tx_dma, DMA_TX_MODE);
+ set_dma_addr(znet->tx_dma, (unsigned int) znet->tx_start);
+ set_dma_count(znet->tx_dma, znet->tx_buf_len<<1);
+ enable_dma(znet->tx_dma);
+ release_dma_lock(flags);
+
+ if (znet_debug > 1)
+ printk(KERN_DEBUG "%s: Initializing the i82593, rx buf %p tx buf %p\n",
+ dev->name, znet->rx_start,znet->tx_start);
+ /* Do an empty configure command, just like the Crynwr driver. This
+ resets to chip to its default values. */
+ *znet->tx_cur++ = 0;
+ *znet->tx_cur++ = 0;
+ show_dma(dev);
+ outb(OP0_CONFIGURE | CR0_CHNL, ioaddr);
+
+ znet_set_multicast_list (dev);
+
+ *znet->tx_cur++ = 6;
+ memcpy(znet->tx_cur, dev->dev_addr, 6);
+ znet->tx_cur += 3;
+ show_dma(dev);
+ outb(OP0_IA_SETUP | CR0_CHNL, ioaddr);
+ show_dma(dev);
+
+ update_stop_hit(ioaddr, 8192);
+ if (znet_debug > 1) printk(KERN_DEBUG "enabling Rx.\n");
+ outb(OP0_RCV_ENABLE, ioaddr);
+ netif_start_queue (dev);
+}
+
+static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset)
+{
+ outb(OP0_SWIT_TO_PORT_1 | CR0_CHNL, ioaddr);
+ if (znet_debug > 5)
+ printk(KERN_DEBUG "Updating stop hit with value %02x.\n",
+ (rx_stop_offset >> 6) | CR1_STOP_REG_UPDATE);
+ outb((rx_stop_offset >> 6) | CR1_STOP_REG_UPDATE, ioaddr);
+ outb(OP1_SWIT_TO_PORT_0, ioaddr);
+}
+
+static __exit void znet_cleanup (void)
+{
+ if (znet_dev) {
+ struct znet_private *znet = netdev_priv(znet_dev);
+
+ unregister_netdev (znet_dev);
+ kfree (znet->rx_start);
+ kfree (znet->tx_start);
+ free_netdev (znet_dev);
+ }
+}
+
+module_init (znet_probe);
+module_exit (znet_cleanup);
diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig
new file mode 100644
index 000000000000..9e16f3fa97b2
--- /dev/null
+++ b/drivers/net/ethernet/ibm/Kconfig
@@ -0,0 +1,48 @@
+#
+# IBM device configuration.
+#
+
+config NET_VENDOR_IBM
+ bool "IBM devices"
+ default y
+ depends on MCA || PPC_PSERIES || PPC_PSERIES || PPC_DCR || \
+ (IBMEBUS && INET && SPARSEMEM)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about IBM devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_IBM
+
+config IBMVETH
+ tristate "IBM LAN Virtual Ethernet support"
+ depends on PPC_PSERIES
+ ---help---
+ This driver supports virtual ethernet adapters on newer IBM iSeries
+ and pSeries systems.
+
+ To compile this driver as a module, choose M here. The module will
+ be called ibmveth.
+
+config ISERIES_VETH
+ tristate "iSeries Virtual Ethernet driver support"
+ depends on PPC_ISERIES
+
+source "drivers/net/ethernet/ibm/emac/Kconfig"
+
+config EHEA
+ tristate "eHEA Ethernet support"
+ depends on IBMEBUS && INET && SPARSEMEM
+ select INET_LRO
+ ---help---
+ This driver supports the IBM pSeries eHEA ethernet adapter.
+
+ To compile the driver as a module, choose M here. The module
+ will be called ehea.
+
+endif # NET_VENDOR_IBM
diff --git a/drivers/net/ethernet/ibm/Makefile b/drivers/net/ethernet/ibm/Makefile
new file mode 100644
index 000000000000..5a7d4e9ac803
--- /dev/null
+++ b/drivers/net/ethernet/ibm/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for th IBM network device drivers.
+#
+
+obj-$(CONFIG_IBMVETH) += ibmveth.o
+obj-$(CONFIG_ISERIES_VETH) += iseries_veth.o
+obj-$(CONFIG_IBM_EMAC) += emac/
+obj-$(CONFIG_EHEA) += ehea/
diff --git a/drivers/net/ethernet/ibm/ehea/Makefile b/drivers/net/ethernet/ibm/ehea/Makefile
new file mode 100644
index 000000000000..775d9969b5c2
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ehea/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the eHEA ethernet device driver for IBM eServer System p
+#
+ehea-y = ehea_main.o ehea_phyp.o ehea_qmr.o ehea_ethtool.o ehea_phyp.o
+obj-$(CONFIG_EHEA) += ehea.o
+
diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h
new file mode 100644
index 000000000000..0b8e6a97a980
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ehea/ehea.h
@@ -0,0 +1,505 @@
+/*
+ * linux/drivers/net/ehea/ehea.h
+ *
+ * eHEA ethernet device driver for IBM eServer System p
+ *
+ * (C) Copyright IBM Corp. 2006
+ *
+ * Authors:
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __EHEA_H__
+#define __EHEA_H__
+
+#include <linux/module.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+#include <linux/if_vlan.h>
+#include <linux/inet_lro.h>
+
+#include <asm/ibmebus.h>
+#include <asm/abs_addr.h>
+#include <asm/io.h>
+
+#define DRV_NAME "ehea"
+#define DRV_VERSION "EHEA_0107"
+
+/* eHEA capability flags */
+#define DLPAR_PORT_ADD_REM 1
+#define DLPAR_MEM_ADD 2
+#define DLPAR_MEM_REM 4
+#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD | DLPAR_MEM_REM)
+
+#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
+ | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+#define EHEA_MAX_ENTRIES_RQ1 32767
+#define EHEA_MAX_ENTRIES_RQ2 16383
+#define EHEA_MAX_ENTRIES_RQ3 16383
+#define EHEA_MAX_ENTRIES_SQ 32767
+#define EHEA_MIN_ENTRIES_QP 127
+
+#define EHEA_SMALL_QUEUES
+#define EHEA_NUM_TX_QP 1
+#define EHEA_LRO_MAX_AGGR 64
+
+#ifdef EHEA_SMALL_QUEUES
+#define EHEA_MAX_CQE_COUNT 1023
+#define EHEA_DEF_ENTRIES_SQ 1023
+#define EHEA_DEF_ENTRIES_RQ1 4095
+#define EHEA_DEF_ENTRIES_RQ2 1023
+#define EHEA_DEF_ENTRIES_RQ3 1023
+#else
+#define EHEA_MAX_CQE_COUNT 4080
+#define EHEA_DEF_ENTRIES_SQ 4080
+#define EHEA_DEF_ENTRIES_RQ1 8160
+#define EHEA_DEF_ENTRIES_RQ2 2040
+#define EHEA_DEF_ENTRIES_RQ3 2040
+#endif
+
+#define EHEA_MAX_ENTRIES_EQ 20
+
+#define EHEA_SG_SQ 2
+#define EHEA_SG_RQ1 1
+#define EHEA_SG_RQ2 0
+#define EHEA_SG_RQ3 0
+
+#define EHEA_MAX_PACKET_SIZE 9022 /* for jumbo frames */
+#define EHEA_RQ2_PKT_SIZE 1522
+#define EHEA_L_PKT_SIZE 256 /* low latency */
+
+#define MAX_LRO_DESCRIPTORS 8
+
+/* Send completion signaling */
+
+/* Protection Domain Identifier */
+#define EHEA_PD_ID 0xaabcdeff
+
+#define EHEA_RQ2_THRESHOLD 1
+#define EHEA_RQ3_THRESHOLD 9 /* use RQ3 threshold of 1522 bytes */
+
+#define EHEA_SPEED_10G 10000
+#define EHEA_SPEED_1G 1000
+#define EHEA_SPEED_100M 100
+#define EHEA_SPEED_10M 10
+#define EHEA_SPEED_AUTONEG 0
+
+/* Broadcast/Multicast registration types */
+#define EHEA_BCMC_SCOPE_ALL 0x08
+#define EHEA_BCMC_SCOPE_SINGLE 0x00
+#define EHEA_BCMC_MULTICAST 0x04
+#define EHEA_BCMC_BROADCAST 0x00
+#define EHEA_BCMC_UNTAGGED 0x02
+#define EHEA_BCMC_TAGGED 0x00
+#define EHEA_BCMC_VLANID_ALL 0x01
+#define EHEA_BCMC_VLANID_SINGLE 0x00
+
+#define EHEA_CACHE_LINE 128
+
+/* Memory Regions */
+#define EHEA_MR_ACC_CTRL 0x00800000
+
+#define EHEA_BUSMAP_START 0x8000000000000000ULL
+#define EHEA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
+#define EHEA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
+#define EHEA_TOP_INDEX_SHIFT (EHEA_DIR_INDEX_SHIFT * 2)
+#define EHEA_MAP_ENTRIES (1 << EHEA_DIR_INDEX_SHIFT)
+#define EHEA_MAP_SIZE (0x10000) /* currently fixed map size */
+#define EHEA_INDEX_MASK (EHEA_MAP_ENTRIES - 1)
+
+
+#define EHEA_WATCH_DOG_TIMEOUT 10*HZ
+
+/* utility functions */
+
+void ehea_dump(void *adr, int len, char *msg);
+
+#define EHEA_BMASK(pos, length) (((pos) << 16) + (length))
+
+#define EHEA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
+
+#define EHEA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
+
+#define EHEA_BMASK_MASK(mask) \
+ (0xffffffffffffffffULL >> ((64 - (mask)) & 0xffff))
+
+#define EHEA_BMASK_SET(mask, value) \
+ ((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask))
+
+#define EHEA_BMASK_GET(mask, value) \
+ (EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask)))
+
+/*
+ * Generic ehea page
+ */
+struct ehea_page {
+ u8 entries[PAGE_SIZE];
+};
+
+/*
+ * Generic queue in linux kernel virtual memory
+ */
+struct hw_queue {
+ u64 current_q_offset; /* current queue entry */
+ struct ehea_page **queue_pages; /* array of pages belonging to queue */
+ u32 qe_size; /* queue entry size */
+ u32 queue_length; /* queue length allocated in bytes */
+ u32 pagesize;
+ u32 toggle_state; /* toggle flag - per page */
+ u32 reserved; /* 64 bit alignment */
+};
+
+/*
+ * For pSeries this is a 64bit memory address where
+ * I/O memory is mapped into CPU address space
+ */
+struct h_epa {
+ void __iomem *addr;
+};
+
+struct h_epa_user {
+ u64 addr;
+};
+
+struct h_epas {
+ struct h_epa kernel; /* kernel space accessible resource,
+ set to 0 if unused */
+ struct h_epa_user user; /* user space accessible resource
+ set to 0 if unused */
+};
+
+/*
+ * Memory map data structures
+ */
+struct ehea_dir_bmap
+{
+ u64 ent[EHEA_MAP_ENTRIES];
+};
+struct ehea_top_bmap
+{
+ struct ehea_dir_bmap *dir[EHEA_MAP_ENTRIES];
+};
+struct ehea_bmap
+{
+ struct ehea_top_bmap *top[EHEA_MAP_ENTRIES];
+};
+
+struct ehea_qp;
+struct ehea_cq;
+struct ehea_eq;
+struct ehea_port;
+struct ehea_av;
+
+/*
+ * Queue attributes passed to ehea_create_qp()
+ */
+struct ehea_qp_init_attr {
+ /* input parameter */
+ u32 qp_token; /* queue token */
+ u8 low_lat_rq1;
+ u8 signalingtype; /* cqe generation flag */
+ u8 rq_count; /* num of receive queues */
+ u8 eqe_gen; /* eqe generation flag */
+ u16 max_nr_send_wqes; /* max number of send wqes */
+ u16 max_nr_rwqes_rq1; /* max number of receive wqes */
+ u16 max_nr_rwqes_rq2;
+ u16 max_nr_rwqes_rq3;
+ u8 wqe_size_enc_sq;
+ u8 wqe_size_enc_rq1;
+ u8 wqe_size_enc_rq2;
+ u8 wqe_size_enc_rq3;
+ u8 swqe_imm_data_len; /* immediate data length for swqes */
+ u16 port_nr;
+ u16 rq2_threshold;
+ u16 rq3_threshold;
+ u64 send_cq_handle;
+ u64 recv_cq_handle;
+ u64 aff_eq_handle;
+
+ /* output parameter */
+ u32 qp_nr;
+ u16 act_nr_send_wqes;
+ u16 act_nr_rwqes_rq1;
+ u16 act_nr_rwqes_rq2;
+ u16 act_nr_rwqes_rq3;
+ u8 act_wqe_size_enc_sq;
+ u8 act_wqe_size_enc_rq1;
+ u8 act_wqe_size_enc_rq2;
+ u8 act_wqe_size_enc_rq3;
+ u32 nr_sq_pages;
+ u32 nr_rq1_pages;
+ u32 nr_rq2_pages;
+ u32 nr_rq3_pages;
+ u32 liobn_sq;
+ u32 liobn_rq1;
+ u32 liobn_rq2;
+ u32 liobn_rq3;
+};
+
+/*
+ * Event Queue attributes, passed as parameter
+ */
+struct ehea_eq_attr {
+ u32 type;
+ u32 max_nr_of_eqes;
+ u8 eqe_gen; /* generate eqe flag */
+ u64 eq_handle;
+ u32 act_nr_of_eqes;
+ u32 nr_pages;
+ u32 ist1; /* Interrupt service token */
+ u32 ist2;
+ u32 ist3;
+ u32 ist4;
+};
+
+
+/*
+ * Event Queue
+ */
+struct ehea_eq {
+ struct ehea_adapter *adapter;
+ struct hw_queue hw_queue;
+ u64 fw_handle;
+ struct h_epas epas;
+ spinlock_t spinlock;
+ struct ehea_eq_attr attr;
+};
+
+/*
+ * HEA Queues
+ */
+struct ehea_qp {
+ struct ehea_adapter *adapter;
+ u64 fw_handle; /* QP handle for firmware calls */
+ struct hw_queue hw_squeue;
+ struct hw_queue hw_rqueue1;
+ struct hw_queue hw_rqueue2;
+ struct hw_queue hw_rqueue3;
+ struct h_epas epas;
+ struct ehea_qp_init_attr init_attr;
+};
+
+/*
+ * Completion Queue attributes
+ */
+struct ehea_cq_attr {
+ /* input parameter */
+ u32 max_nr_of_cqes;
+ u32 cq_token;
+ u64 eq_handle;
+
+ /* output parameter */
+ u32 act_nr_of_cqes;
+ u32 nr_pages;
+};
+
+/*
+ * Completion Queue
+ */
+struct ehea_cq {
+ struct ehea_adapter *adapter;
+ u64 fw_handle;
+ struct hw_queue hw_queue;
+ struct h_epas epas;
+ struct ehea_cq_attr attr;
+};
+
+/*
+ * Memory Region
+ */
+struct ehea_mr {
+ struct ehea_adapter *adapter;
+ u64 handle;
+ u64 vaddr;
+ u32 lkey;
+};
+
+/*
+ * Port state information
+ */
+struct port_stats {
+ int poll_receive_errors;
+ int queue_stopped;
+ int err_tcp_cksum;
+ int err_ip_cksum;
+ int err_frame_crc;
+};
+
+#define EHEA_IRQ_NAME_SIZE 20
+
+/*
+ * Queue SKB Array
+ */
+struct ehea_q_skb_arr {
+ struct sk_buff **arr; /* skb array for queue */
+ int len; /* array length */
+ int index; /* array index */
+ int os_skbs; /* rq2/rq3 only: outstanding skbs */
+};
+
+/*
+ * Port resources
+ */
+struct ehea_port_res {
+ struct napi_struct napi;
+ struct port_stats p_stats;
+ struct ehea_mr send_mr; /* send memory region */
+ struct ehea_mr recv_mr; /* receive memory region */
+ spinlock_t xmit_lock;
+ struct ehea_port *port;
+ char int_recv_name[EHEA_IRQ_NAME_SIZE];
+ char int_send_name[EHEA_IRQ_NAME_SIZE];
+ struct ehea_qp *qp;
+ struct ehea_cq *send_cq;
+ struct ehea_cq *recv_cq;
+ struct ehea_eq *eq;
+ struct ehea_q_skb_arr rq1_skba;
+ struct ehea_q_skb_arr rq2_skba;
+ struct ehea_q_skb_arr rq3_skba;
+ struct ehea_q_skb_arr sq_skba;
+ int sq_skba_size;
+ spinlock_t netif_queue;
+ int queue_stopped;
+ int swqe_refill_th;
+ atomic_t swqe_avail;
+ int swqe_ll_count;
+ u32 swqe_id_counter;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u32 poll_counter;
+ struct net_lro_mgr lro_mgr;
+ struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
+ int sq_restart_flag;
+};
+
+
+#define EHEA_MAX_PORTS 16
+
+#define EHEA_NUM_PORTRES_FW_HANDLES 6 /* QP handle, SendCQ handle,
+ RecvCQ handle, EQ handle,
+ SendMR handle, RecvMR handle */
+#define EHEA_NUM_PORT_FW_HANDLES 1 /* EQ handle */
+#define EHEA_NUM_ADAPTER_FW_HANDLES 2 /* MR handle, NEQ handle */
+
+struct ehea_adapter {
+ u64 handle;
+ struct platform_device *ofdev;
+ struct ehea_port *port[EHEA_MAX_PORTS];
+ struct ehea_eq *neq; /* notification event queue */
+ struct tasklet_struct neq_tasklet;
+ struct ehea_mr mr;
+ u32 pd; /* protection domain */
+ u64 max_mc_mac; /* max number of multicast mac addresses */
+ int active_ports;
+ struct list_head list;
+};
+
+
+struct ehea_mc_list {
+ struct list_head list;
+ u64 macaddr;
+};
+
+/* kdump support */
+struct ehea_fw_handle_entry {
+ u64 adh; /* Adapter Handle */
+ u64 fwh; /* Firmware Handle */
+};
+
+struct ehea_fw_handle_array {
+ struct ehea_fw_handle_entry *arr;
+ int num_entries;
+ struct mutex lock;
+};
+
+struct ehea_bcmc_reg_entry {
+ u64 adh; /* Adapter Handle */
+ u32 port_id; /* Logical Port Id */
+ u8 reg_type; /* Registration Type */
+ u64 macaddr;
+};
+
+struct ehea_bcmc_reg_array {
+ struct ehea_bcmc_reg_entry *arr;
+ int num_entries;
+ spinlock_t lock;
+};
+
+#define EHEA_PORT_UP 1
+#define EHEA_PORT_DOWN 0
+#define EHEA_PHY_LINK_UP 1
+#define EHEA_PHY_LINK_DOWN 0
+#define EHEA_MAX_PORT_RES 16
+struct ehea_port {
+ struct ehea_adapter *adapter; /* adapter that owns this port */
+ struct net_device *netdev;
+ struct net_device_stats stats;
+ struct ehea_port_res port_res[EHEA_MAX_PORT_RES];
+ struct platform_device ofdev; /* Open Firmware Device */
+ struct ehea_mc_list *mc_list; /* Multicast MAC addresses */
+ struct ehea_eq *qp_eq;
+ struct work_struct reset_task;
+ struct delayed_work stats_work;
+ struct mutex port_lock;
+ char int_aff_name[EHEA_IRQ_NAME_SIZE];
+ int allmulti; /* Indicates IFF_ALLMULTI state */
+ int promisc; /* Indicates IFF_PROMISC state */
+ int num_tx_qps;
+ int num_add_tx_qps;
+ int num_mcs;
+ int resets;
+ unsigned long flags;
+ u64 mac_addr;
+ u32 logical_port_id;
+ u32 port_speed;
+ u32 msg_enable;
+ u32 sig_comp_iv;
+ u32 state;
+ u32 lro_max_aggr;
+ u8 phy_link;
+ u8 full_duplex;
+ u8 autoneg;
+ u8 num_def_qps;
+ wait_queue_head_t swqe_avail_wq;
+ wait_queue_head_t restart_wq;
+};
+
+struct port_res_cfg {
+ int max_entries_rcq;
+ int max_entries_scq;
+ int max_entries_sq;
+ int max_entries_rq1;
+ int max_entries_rq2;
+ int max_entries_rq3;
+};
+
+enum ehea_flag_bits {
+ __EHEA_STOP_XFER,
+ __EHEA_DISABLE_PORT_RESET
+};
+
+void ehea_set_ethtool_ops(struct net_device *netdev);
+int ehea_sense_port_attr(struct ehea_port *port);
+int ehea_set_portspeed(struct ehea_port *port, u32 port_speed);
+
+#endif /* __EHEA_H__ */
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
new file mode 100644
index 000000000000..7f642aef5e82
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -0,0 +1,295 @@
+/*
+ * linux/drivers/net/ehea/ehea_ethtool.c
+ *
+ * eHEA ethernet device driver for IBM eServer System p
+ *
+ * (C) Copyright IBM Corp. 2006
+ *
+ * Authors:
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "ehea.h"
+#include "ehea_phyp.h"
+
+static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ u32 speed;
+ int ret;
+
+ ret = ehea_sense_port_attr(port);
+
+ if (ret)
+ return ret;
+
+ if (netif_carrier_ok(dev)) {
+ switch (port->port_speed) {
+ case EHEA_SPEED_10M:
+ speed = SPEED_10;
+ break;
+ case EHEA_SPEED_100M:
+ speed = SPEED_100;
+ break;
+ case EHEA_SPEED_1G:
+ speed = SPEED_1000;
+ break;
+ case EHEA_SPEED_10G:
+ speed = SPEED_10000;
+ break;
+ default:
+ speed = -1;
+ break; /* BUG */
+ }
+ cmd->duplex = port->full_duplex == 1 ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ } else {
+ speed = ~0;
+ cmd->duplex = -1;
+ }
+ ethtool_cmd_speed_set(cmd, speed);
+
+ if (cmd->speed == SPEED_10000) {
+ cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
+ cmd->port = PORT_FIBRE;
+ } else {
+ cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full
+ | SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full
+ | SUPPORTED_10baseT_Half | SUPPORTED_Autoneg
+ | SUPPORTED_TP);
+ cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg
+ | ADVERTISED_TP);
+ cmd->port = PORT_TP;
+ }
+
+ cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static int ehea_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ int ret = 0;
+ u32 sp;
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ sp = EHEA_SPEED_AUTONEG;
+ goto doit;
+ }
+
+ switch (cmd->speed) {
+ case SPEED_10:
+ if (cmd->duplex == DUPLEX_FULL)
+ sp = H_SPEED_10M_F;
+ else
+ sp = H_SPEED_10M_H;
+ break;
+
+ case SPEED_100:
+ if (cmd->duplex == DUPLEX_FULL)
+ sp = H_SPEED_100M_F;
+ else
+ sp = H_SPEED_100M_H;
+ break;
+
+ case SPEED_1000:
+ if (cmd->duplex == DUPLEX_FULL)
+ sp = H_SPEED_1G_F;
+ else
+ ret = -EINVAL;
+ break;
+
+ case SPEED_10000:
+ if (cmd->duplex == DUPLEX_FULL)
+ sp = H_SPEED_10G_F;
+ else
+ ret = -EINVAL;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ goto out;
+doit:
+ ret = ehea_set_portspeed(port, sp);
+
+ if (!ret)
+ netdev_info(dev,
+ "Port speed successfully set: %dMbps %s Duplex\n",
+ port->port_speed,
+ port->full_duplex == 1 ? "Full" : "Half");
+out:
+ return ret;
+}
+
+static int ehea_nway_reset(struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ int ret;
+
+ ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG);
+
+ if (!ret)
+ netdev_info(port->netdev,
+ "Port speed successfully set: %dMbps %s Duplex\n",
+ port->port_speed,
+ port->full_duplex == 1 ? "Full" : "Half");
+ return ret;
+}
+
+static void ehea_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+}
+
+static u32 ehea_get_msglevel(struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ return port->msg_enable;
+}
+
+static void ehea_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ port->msg_enable = value;
+}
+
+static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = {
+ {"sig_comp_iv"},
+ {"swqe_refill_th"},
+ {"port resets"},
+ {"Receive errors"},
+ {"TCP cksum errors"},
+ {"IP cksum errors"},
+ {"Frame cksum errors"},
+ {"num SQ stopped"},
+ {"SQ stopped"},
+ {"PR0 free_swqes"},
+ {"PR1 free_swqes"},
+ {"PR2 free_swqes"},
+ {"PR3 free_swqes"},
+ {"PR4 free_swqes"},
+ {"PR5 free_swqes"},
+ {"PR6 free_swqes"},
+ {"PR7 free_swqes"},
+ {"LRO aggregated"},
+ {"LRO flushed"},
+ {"LRO no_desc"},
+};
+
+static void ehea_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ if (stringset == ETH_SS_STATS) {
+ memcpy(data, &ehea_ethtool_stats_keys,
+ sizeof(ehea_ethtool_stats_keys));
+ }
+}
+
+static int ehea_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(ehea_ethtool_stats_keys);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void ehea_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int i, k, tmp;
+ struct ehea_port *port = netdev_priv(dev);
+
+ for (i = 0; i < ehea_get_sset_count(dev, ETH_SS_STATS); i++)
+ data[i] = 0;
+ i = 0;
+
+ data[i++] = port->sig_comp_iv;
+ data[i++] = port->port_res[0].swqe_refill_th;
+ data[i++] = port->resets;
+
+ for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
+ tmp += port->port_res[k].p_stats.poll_receive_errors;
+ data[i++] = tmp;
+
+ for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
+ tmp += port->port_res[k].p_stats.err_tcp_cksum;
+ data[i++] = tmp;
+
+ for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
+ tmp += port->port_res[k].p_stats.err_ip_cksum;
+ data[i++] = tmp;
+
+ for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
+ tmp += port->port_res[k].p_stats.err_frame_crc;
+ data[i++] = tmp;
+
+ for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
+ tmp += port->port_res[k].p_stats.queue_stopped;
+ data[i++] = tmp;
+
+ for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
+ tmp |= port->port_res[k].queue_stopped;
+ data[i++] = tmp;
+
+ for (k = 0; k < 8; k++)
+ data[i++] = atomic_read(&port->port_res[k].swqe_avail);
+
+ for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
+ tmp |= port->port_res[k].lro_mgr.stats.aggregated;
+ data[i++] = tmp;
+
+ for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
+ tmp |= port->port_res[k].lro_mgr.stats.flushed;
+ data[i++] = tmp;
+
+ for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
+ tmp |= port->port_res[k].lro_mgr.stats.no_desc;
+ data[i++] = tmp;
+
+}
+
+const struct ethtool_ops ehea_ethtool_ops = {
+ .get_settings = ehea_get_settings,
+ .get_drvinfo = ehea_get_drvinfo,
+ .get_msglevel = ehea_get_msglevel,
+ .set_msglevel = ehea_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_strings = ehea_get_strings,
+ .get_sset_count = ehea_get_sset_count,
+ .get_ethtool_stats = ehea_get_ethtool_stats,
+ .set_settings = ehea_set_settings,
+ .nway_reset = ehea_nway_reset, /* Restart autonegotiation */
+};
+
+void ehea_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &ehea_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_hw.h b/drivers/net/ethernet/ibm/ehea/ehea_hw.h
new file mode 100644
index 000000000000..567981b4b2cc
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ehea/ehea_hw.h
@@ -0,0 +1,292 @@
+/*
+ * linux/drivers/net/ehea/ehea_hw.h
+ *
+ * eHEA ethernet device driver for IBM eServer System p
+ *
+ * (C) Copyright IBM Corp. 2006
+ *
+ * Authors:
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __EHEA_HW_H__
+#define __EHEA_HW_H__
+
+#define QPX_SQA_VALUE EHEA_BMASK_IBM(48, 63)
+#define QPX_RQ1A_VALUE EHEA_BMASK_IBM(48, 63)
+#define QPX_RQ2A_VALUE EHEA_BMASK_IBM(48, 63)
+#define QPX_RQ3A_VALUE EHEA_BMASK_IBM(48, 63)
+
+#define QPTEMM_OFFSET(x) offsetof(struct ehea_qptemm, x)
+
+struct ehea_qptemm {
+ u64 qpx_hcr;
+ u64 qpx_c;
+ u64 qpx_herr;
+ u64 qpx_aer;
+ u64 qpx_sqa;
+ u64 qpx_sqc;
+ u64 qpx_rq1a;
+ u64 qpx_rq1c;
+ u64 qpx_st;
+ u64 qpx_aerr;
+ u64 qpx_tenure;
+ u64 qpx_reserved1[(0x098 - 0x058) / 8];
+ u64 qpx_portp;
+ u64 qpx_reserved2[(0x100 - 0x0A0) / 8];
+ u64 qpx_t;
+ u64 qpx_sqhp;
+ u64 qpx_sqptp;
+ u64 qpx_reserved3[(0x140 - 0x118) / 8];
+ u64 qpx_sqwsize;
+ u64 qpx_reserved4[(0x170 - 0x148) / 8];
+ u64 qpx_sqsize;
+ u64 qpx_reserved5[(0x1B0 - 0x178) / 8];
+ u64 qpx_sigt;
+ u64 qpx_wqecnt;
+ u64 qpx_rq1hp;
+ u64 qpx_rq1ptp;
+ u64 qpx_rq1size;
+ u64 qpx_reserved6[(0x220 - 0x1D8) / 8];
+ u64 qpx_rq1wsize;
+ u64 qpx_reserved7[(0x240 - 0x228) / 8];
+ u64 qpx_pd;
+ u64 qpx_scqn;
+ u64 qpx_rcqn;
+ u64 qpx_aeqn;
+ u64 reserved49;
+ u64 qpx_ram;
+ u64 qpx_reserved8[(0x300 - 0x270) / 8];
+ u64 qpx_rq2a;
+ u64 qpx_rq2c;
+ u64 qpx_rq2hp;
+ u64 qpx_rq2ptp;
+ u64 qpx_rq2size;
+ u64 qpx_rq2wsize;
+ u64 qpx_rq2th;
+ u64 qpx_rq3a;
+ u64 qpx_rq3c;
+ u64 qpx_rq3hp;
+ u64 qpx_rq3ptp;
+ u64 qpx_rq3size;
+ u64 qpx_rq3wsize;
+ u64 qpx_rq3th;
+ u64 qpx_lpn;
+ u64 qpx_reserved9[(0x400 - 0x378) / 8];
+ u64 reserved_ext[(0x500 - 0x400) / 8];
+ u64 reserved2[(0x1000 - 0x500) / 8];
+};
+
+#define MRx_HCR_LPARID_VALID EHEA_BMASK_IBM(0, 0)
+
+#define MRMWMM_OFFSET(x) offsetof(struct ehea_mrmwmm, x)
+
+struct ehea_mrmwmm {
+ u64 mrx_hcr;
+ u64 mrx_c;
+ u64 mrx_herr;
+ u64 mrx_aer;
+ u64 mrx_pp;
+ u64 reserved1;
+ u64 reserved2;
+ u64 reserved3;
+ u64 reserved4[(0x200 - 0x40) / 8];
+ u64 mrx_ctl[64];
+};
+
+#define QPEDMM_OFFSET(x) offsetof(struct ehea_qpedmm, x)
+
+struct ehea_qpedmm {
+
+ u64 reserved0[(0x400) / 8];
+ u64 qpedx_phh;
+ u64 qpedx_ppsgp;
+ u64 qpedx_ppsgu;
+ u64 qpedx_ppdgp;
+ u64 qpedx_ppdgu;
+ u64 qpedx_aph;
+ u64 qpedx_apsgp;
+ u64 qpedx_apsgu;
+ u64 qpedx_apdgp;
+ u64 qpedx_apdgu;
+ u64 qpedx_apav;
+ u64 qpedx_apsav;
+ u64 qpedx_hcr;
+ u64 reserved1[4];
+ u64 qpedx_rrl0;
+ u64 qpedx_rrrkey0;
+ u64 qpedx_rrva0;
+ u64 reserved2;
+ u64 qpedx_rrl1;
+ u64 qpedx_rrrkey1;
+ u64 qpedx_rrva1;
+ u64 reserved3;
+ u64 qpedx_rrl2;
+ u64 qpedx_rrrkey2;
+ u64 qpedx_rrva2;
+ u64 reserved4;
+ u64 qpedx_rrl3;
+ u64 qpedx_rrrkey3;
+ u64 qpedx_rrva3;
+};
+
+#define CQX_FECADDER EHEA_BMASK_IBM(32, 63)
+#define CQX_FEC_CQE_CNT EHEA_BMASK_IBM(32, 63)
+#define CQX_N1_GENERATE_COMP_EVENT EHEA_BMASK_IBM(0, 0)
+#define CQX_EP_EVENT_PENDING EHEA_BMASK_IBM(0, 0)
+
+#define CQTEMM_OFFSET(x) offsetof(struct ehea_cqtemm, x)
+
+struct ehea_cqtemm {
+ u64 cqx_hcr;
+ u64 cqx_c;
+ u64 cqx_herr;
+ u64 cqx_aer;
+ u64 cqx_ptp;
+ u64 cqx_tp;
+ u64 cqx_fec;
+ u64 cqx_feca;
+ u64 cqx_ep;
+ u64 cqx_eq;
+ u64 reserved1;
+ u64 cqx_n0;
+ u64 cqx_n1;
+ u64 reserved2[(0x1000 - 0x60) / 8];
+};
+
+#define EQTEMM_OFFSET(x) offsetof(struct ehea_eqtemm, x)
+
+struct ehea_eqtemm {
+ u64 eqx_hcr;
+ u64 eqx_c;
+ u64 eqx_herr;
+ u64 eqx_aer;
+ u64 eqx_ptp;
+ u64 eqx_tp;
+ u64 eqx_ssba;
+ u64 eqx_psba;
+ u64 eqx_cec;
+ u64 eqx_meql;
+ u64 eqx_xisbi;
+ u64 eqx_xisc;
+ u64 eqx_it;
+};
+
+/*
+ * These access functions will be changed when the dissuccsion about
+ * the new access methods for POWER has settled.
+ */
+
+static inline u64 epa_load(struct h_epa epa, u32 offset)
+{
+ return __raw_readq((void __iomem *)(epa.addr + offset));
+}
+
+static inline void epa_store(struct h_epa epa, u32 offset, u64 value)
+{
+ __raw_writeq(value, (void __iomem *)(epa.addr + offset));
+ epa_load(epa, offset); /* synchronize explicitly to eHEA */
+}
+
+static inline void epa_store_acc(struct h_epa epa, u32 offset, u64 value)
+{
+ __raw_writeq(value, (void __iomem *)(epa.addr + offset));
+}
+
+#define epa_store_eq(epa, offset, value)\
+ epa_store(epa, EQTEMM_OFFSET(offset), value)
+#define epa_load_eq(epa, offset)\
+ epa_load(epa, EQTEMM_OFFSET(offset))
+
+#define epa_store_cq(epa, offset, value)\
+ epa_store(epa, CQTEMM_OFFSET(offset), value)
+#define epa_load_cq(epa, offset)\
+ epa_load(epa, CQTEMM_OFFSET(offset))
+
+#define epa_store_qp(epa, offset, value)\
+ epa_store(epa, QPTEMM_OFFSET(offset), value)
+#define epa_load_qp(epa, offset)\
+ epa_load(epa, QPTEMM_OFFSET(offset))
+
+#define epa_store_qped(epa, offset, value)\
+ epa_store(epa, QPEDMM_OFFSET(offset), value)
+#define epa_load_qped(epa, offset)\
+ epa_load(epa, QPEDMM_OFFSET(offset))
+
+#define epa_store_mrmw(epa, offset, value)\
+ epa_store(epa, MRMWMM_OFFSET(offset), value)
+#define epa_load_mrmw(epa, offset)\
+ epa_load(epa, MRMWMM_OFFSET(offset))
+
+#define epa_store_base(epa, offset, value)\
+ epa_store(epa, HCAGR_OFFSET(offset), value)
+#define epa_load_base(epa, offset)\
+ epa_load(epa, HCAGR_OFFSET(offset))
+
+static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes)
+{
+ struct h_epa epa = qp->epas.kernel;
+ epa_store_acc(epa, QPTEMM_OFFSET(qpx_sqa),
+ EHEA_BMASK_SET(QPX_SQA_VALUE, nr_wqes));
+}
+
+static inline void ehea_update_rq3a(struct ehea_qp *qp, u16 nr_wqes)
+{
+ struct h_epa epa = qp->epas.kernel;
+ epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq3a),
+ EHEA_BMASK_SET(QPX_RQ1A_VALUE, nr_wqes));
+}
+
+static inline void ehea_update_rq2a(struct ehea_qp *qp, u16 nr_wqes)
+{
+ struct h_epa epa = qp->epas.kernel;
+ epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq2a),
+ EHEA_BMASK_SET(QPX_RQ2A_VALUE, nr_wqes));
+}
+
+static inline void ehea_update_rq1a(struct ehea_qp *qp, u16 nr_wqes)
+{
+ struct h_epa epa = qp->epas.kernel;
+ epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq1a),
+ EHEA_BMASK_SET(QPX_RQ3A_VALUE, nr_wqes));
+}
+
+static inline void ehea_update_feca(struct ehea_cq *cq, u32 nr_cqes)
+{
+ struct h_epa epa = cq->epas.kernel;
+ epa_store_acc(epa, CQTEMM_OFFSET(cqx_feca),
+ EHEA_BMASK_SET(CQX_FECADDER, nr_cqes));
+}
+
+static inline void ehea_reset_cq_n1(struct ehea_cq *cq)
+{
+ struct h_epa epa = cq->epas.kernel;
+ epa_store_cq(epa, cqx_n1,
+ EHEA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, 1));
+}
+
+static inline void ehea_reset_cq_ep(struct ehea_cq *my_cq)
+{
+ struct h_epa epa = my_cq->epas.kernel;
+ epa_store_acc(epa, CQTEMM_OFFSET(cqx_ep),
+ EHEA_BMASK_SET(CQX_EP_EVENT_PENDING, 0));
+}
+
+#endif /* __EHEA_HW_H__ */
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
new file mode 100644
index 000000000000..c821cb653999
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -0,0 +1,3781 @@
+/*
+ * linux/drivers/net/ehea/ehea_main.c
+ *
+ * eHEA ethernet device driver for IBM eServer System p
+ *
+ * (C) Copyright IBM Corp. 2006
+ *
+ * Authors:
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/if_ether.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/memory.h>
+#include <asm/kexec.h>
+#include <linux/mutex.h>
+#include <linux/prefetch.h>
+
+#include <net/ip.h>
+
+#include "ehea.h"
+#include "ehea_qmr.h"
+#include "ehea_phyp.h"
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
+MODULE_DESCRIPTION("IBM eServer HEA Driver");
+MODULE_VERSION(DRV_VERSION);
+
+
+static int msg_level = -1;
+static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
+static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
+static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
+static int sq_entries = EHEA_DEF_ENTRIES_SQ;
+static int use_mcs;
+static int use_lro;
+static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
+static int num_tx_qps = EHEA_NUM_TX_QP;
+static int prop_carrier_state;
+
+module_param(msg_level, int, 0);
+module_param(rq1_entries, int, 0);
+module_param(rq2_entries, int, 0);
+module_param(rq3_entries, int, 0);
+module_param(sq_entries, int, 0);
+module_param(prop_carrier_state, int, 0);
+module_param(use_mcs, int, 0);
+module_param(use_lro, int, 0);
+module_param(lro_max_aggr, int, 0);
+module_param(num_tx_qps, int, 0);
+
+MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
+MODULE_PARM_DESC(msg_level, "msg_level");
+MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
+ "port to stack. 1:yes, 0:no. Default = 0 ");
+MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
+ "[2^x - 1], x = [6..14]. Default = "
+ __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
+MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
+ "[2^x - 1], x = [6..14]. Default = "
+ __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
+MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
+ "[2^x - 1], x = [6..14]. Default = "
+ __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
+MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
+ "[2^x - 1], x = [6..14]. Default = "
+ __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
+MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
+
+MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
+ __MODULE_STRING(EHEA_LRO_MAX_AGGR));
+MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
+ "Default = 0");
+
+static int port_name_cnt;
+static LIST_HEAD(adapter_list);
+static unsigned long ehea_driver_flags;
+static DEFINE_MUTEX(dlpar_mem_lock);
+struct ehea_fw_handle_array ehea_fw_handles;
+struct ehea_bcmc_reg_array ehea_bcmc_regs;
+
+
+static int __devinit ehea_probe_adapter(struct platform_device *dev,
+ const struct of_device_id *id);
+
+static int __devexit ehea_remove(struct platform_device *dev);
+
+static struct of_device_id ehea_device_table[] = {
+ {
+ .name = "lhea",
+ .compatible = "IBM,lhea",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ehea_device_table);
+
+static struct of_platform_driver ehea_driver = {
+ .driver = {
+ .name = "ehea",
+ .owner = THIS_MODULE,
+ .of_match_table = ehea_device_table,
+ },
+ .probe = ehea_probe_adapter,
+ .remove = ehea_remove,
+};
+
+void ehea_dump(void *adr, int len, char *msg)
+{
+ int x;
+ unsigned char *deb = adr;
+ for (x = 0; x < len; x += 16) {
+ pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
+ msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
+ deb += 16;
+ }
+}
+
+void ehea_schedule_port_reset(struct ehea_port *port)
+{
+ if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
+ schedule_work(&port->reset_task);
+}
+
+static void ehea_update_firmware_handles(void)
+{
+ struct ehea_fw_handle_entry *arr = NULL;
+ struct ehea_adapter *adapter;
+ int num_adapters = 0;
+ int num_ports = 0;
+ int num_portres = 0;
+ int i = 0;
+ int num_fw_handles, k, l;
+
+ /* Determine number of handles */
+ mutex_lock(&ehea_fw_handles.lock);
+
+ list_for_each_entry(adapter, &adapter_list, list) {
+ num_adapters++;
+
+ for (k = 0; k < EHEA_MAX_PORTS; k++) {
+ struct ehea_port *port = adapter->port[k];
+
+ if (!port || (port->state != EHEA_PORT_UP))
+ continue;
+
+ num_ports++;
+ num_portres += port->num_def_qps + port->num_add_tx_qps;
+ }
+ }
+
+ num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
+ num_ports * EHEA_NUM_PORT_FW_HANDLES +
+ num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
+
+ if (num_fw_handles) {
+ arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
+ if (!arr)
+ goto out; /* Keep the existing array */
+ } else
+ goto out_update;
+
+ list_for_each_entry(adapter, &adapter_list, list) {
+ if (num_adapters == 0)
+ break;
+
+ for (k = 0; k < EHEA_MAX_PORTS; k++) {
+ struct ehea_port *port = adapter->port[k];
+
+ if (!port || (port->state != EHEA_PORT_UP) ||
+ (num_ports == 0))
+ continue;
+
+ for (l = 0;
+ l < port->num_def_qps + port->num_add_tx_qps;
+ l++) {
+ struct ehea_port_res *pr = &port->port_res[l];
+
+ arr[i].adh = adapter->handle;
+ arr[i++].fwh = pr->qp->fw_handle;
+ arr[i].adh = adapter->handle;
+ arr[i++].fwh = pr->send_cq->fw_handle;
+ arr[i].adh = adapter->handle;
+ arr[i++].fwh = pr->recv_cq->fw_handle;
+ arr[i].adh = adapter->handle;
+ arr[i++].fwh = pr->eq->fw_handle;
+ arr[i].adh = adapter->handle;
+ arr[i++].fwh = pr->send_mr.handle;
+ arr[i].adh = adapter->handle;
+ arr[i++].fwh = pr->recv_mr.handle;
+ }
+ arr[i].adh = adapter->handle;
+ arr[i++].fwh = port->qp_eq->fw_handle;
+ num_ports--;
+ }
+
+ arr[i].adh = adapter->handle;
+ arr[i++].fwh = adapter->neq->fw_handle;
+
+ if (adapter->mr.handle) {
+ arr[i].adh = adapter->handle;
+ arr[i++].fwh = adapter->mr.handle;
+ }
+ num_adapters--;
+ }
+
+out_update:
+ kfree(ehea_fw_handles.arr);
+ ehea_fw_handles.arr = arr;
+ ehea_fw_handles.num_entries = i;
+out:
+ mutex_unlock(&ehea_fw_handles.lock);
+}
+
+static void ehea_update_bcmc_registrations(void)
+{
+ unsigned long flags;
+ struct ehea_bcmc_reg_entry *arr = NULL;
+ struct ehea_adapter *adapter;
+ struct ehea_mc_list *mc_entry;
+ int num_registrations = 0;
+ int i = 0;
+ int k;
+
+ spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
+
+ /* Determine number of registrations */
+ list_for_each_entry(adapter, &adapter_list, list)
+ for (k = 0; k < EHEA_MAX_PORTS; k++) {
+ struct ehea_port *port = adapter->port[k];
+
+ if (!port || (port->state != EHEA_PORT_UP))
+ continue;
+
+ num_registrations += 2; /* Broadcast registrations */
+
+ list_for_each_entry(mc_entry, &port->mc_list->list,list)
+ num_registrations += 2;
+ }
+
+ if (num_registrations) {
+ arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
+ if (!arr)
+ goto out; /* Keep the existing array */
+ } else
+ goto out_update;
+
+ list_for_each_entry(adapter, &adapter_list, list) {
+ for (k = 0; k < EHEA_MAX_PORTS; k++) {
+ struct ehea_port *port = adapter->port[k];
+
+ if (!port || (port->state != EHEA_PORT_UP))
+ continue;
+
+ if (num_registrations == 0)
+ goto out_update;
+
+ arr[i].adh = adapter->handle;
+ arr[i].port_id = port->logical_port_id;
+ arr[i].reg_type = EHEA_BCMC_BROADCAST |
+ EHEA_BCMC_UNTAGGED;
+ arr[i++].macaddr = port->mac_addr;
+
+ arr[i].adh = adapter->handle;
+ arr[i].port_id = port->logical_port_id;
+ arr[i].reg_type = EHEA_BCMC_BROADCAST |
+ EHEA_BCMC_VLANID_ALL;
+ arr[i++].macaddr = port->mac_addr;
+ num_registrations -= 2;
+
+ list_for_each_entry(mc_entry,
+ &port->mc_list->list, list) {
+ if (num_registrations == 0)
+ goto out_update;
+
+ arr[i].adh = adapter->handle;
+ arr[i].port_id = port->logical_port_id;
+ arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
+ EHEA_BCMC_MULTICAST |
+ EHEA_BCMC_UNTAGGED;
+ arr[i++].macaddr = mc_entry->macaddr;
+
+ arr[i].adh = adapter->handle;
+ arr[i].port_id = port->logical_port_id;
+ arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
+ EHEA_BCMC_MULTICAST |
+ EHEA_BCMC_VLANID_ALL;
+ arr[i++].macaddr = mc_entry->macaddr;
+ num_registrations -= 2;
+ }
+ }
+ }
+
+out_update:
+ kfree(ehea_bcmc_regs.arr);
+ ehea_bcmc_regs.arr = arr;
+ ehea_bcmc_regs.num_entries = i;
+out:
+ spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
+}
+
+static struct net_device_stats *ehea_get_stats(struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct net_device_stats *stats = &port->stats;
+ u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
+ int i;
+
+ for (i = 0; i < port->num_def_qps; i++) {
+ rx_packets += port->port_res[i].rx_packets;
+ rx_bytes += port->port_res[i].rx_bytes;
+ }
+
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ tx_packets += port->port_res[i].tx_packets;
+ tx_bytes += port->port_res[i].tx_bytes;
+ }
+
+ stats->tx_packets = tx_packets;
+ stats->rx_bytes = rx_bytes;
+ stats->tx_bytes = tx_bytes;
+ stats->rx_packets = rx_packets;
+
+ return &port->stats;
+}
+
+static void ehea_update_stats(struct work_struct *work)
+{
+ struct ehea_port *port =
+ container_of(work, struct ehea_port, stats_work.work);
+ struct net_device *dev = port->netdev;
+ struct net_device_stats *stats = &port->stats;
+ struct hcp_ehea_port_cb2 *cb2;
+ u64 hret;
+
+ cb2 = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!cb2) {
+ netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
+ goto resched;
+ }
+
+ hret = ehea_h_query_ehea_port(port->adapter->handle,
+ port->logical_port_id,
+ H_PORT_CB2, H_PORT_CB2_ALL, cb2);
+ if (hret != H_SUCCESS) {
+ netdev_err(dev, "query_ehea_port failed\n");
+ goto out_herr;
+ }
+
+ if (netif_msg_hw(port))
+ ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
+
+ stats->multicast = cb2->rxmcp;
+ stats->rx_errors = cb2->rxuerr;
+
+out_herr:
+ free_page((unsigned long)cb2);
+resched:
+ schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+}
+
+static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
+{
+ struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
+ struct net_device *dev = pr->port->netdev;
+ int max_index_mask = pr->rq1_skba.len - 1;
+ int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
+ int adder = 0;
+ int i;
+
+ pr->rq1_skba.os_skbs = 0;
+
+ if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
+ if (nr_of_wqes > 0)
+ pr->rq1_skba.index = index;
+ pr->rq1_skba.os_skbs = fill_wqes;
+ return;
+ }
+
+ for (i = 0; i < fill_wqes; i++) {
+ if (!skb_arr_rq1[index]) {
+ skb_arr_rq1[index] = netdev_alloc_skb(dev,
+ EHEA_L_PKT_SIZE);
+ if (!skb_arr_rq1[index]) {
+ netdev_info(dev, "Unable to allocate enough skb in the array\n");
+ pr->rq1_skba.os_skbs = fill_wqes - i;
+ break;
+ }
+ }
+ index--;
+ index &= max_index_mask;
+ adder++;
+ }
+
+ if (adder == 0)
+ return;
+
+ /* Ring doorbell */
+ ehea_update_rq1a(pr->qp, adder);
+}
+
+static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
+{
+ struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
+ struct net_device *dev = pr->port->netdev;
+ int i;
+
+ if (nr_rq1a > pr->rq1_skba.len) {
+ netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
+ return;
+ }
+
+ for (i = 0; i < nr_rq1a; i++) {
+ skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
+ if (!skb_arr_rq1[i]) {
+ netdev_info(dev, "Not enough memory to allocate skb array\n");
+ break;
+ }
+ }
+ /* Ring doorbell */
+ ehea_update_rq1a(pr->qp, i - 1);
+}
+
+static int ehea_refill_rq_def(struct ehea_port_res *pr,
+ struct ehea_q_skb_arr *q_skba, int rq_nr,
+ int num_wqes, int wqe_type, int packet_size)
+{
+ struct net_device *dev = pr->port->netdev;
+ struct ehea_qp *qp = pr->qp;
+ struct sk_buff **skb_arr = q_skba->arr;
+ struct ehea_rwqe *rwqe;
+ int i, index, max_index_mask, fill_wqes;
+ int adder = 0;
+ int ret = 0;
+
+ fill_wqes = q_skba->os_skbs + num_wqes;
+ q_skba->os_skbs = 0;
+
+ if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
+ q_skba->os_skbs = fill_wqes;
+ return ret;
+ }
+
+ index = q_skba->index;
+ max_index_mask = q_skba->len - 1;
+ for (i = 0; i < fill_wqes; i++) {
+ u64 tmp_addr;
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(dev, packet_size);
+ if (!skb) {
+ q_skba->os_skbs = fill_wqes - i;
+ if (q_skba->os_skbs == q_skba->len - 2) {
+ netdev_info(pr->port->netdev,
+ "rq%i ran dry - no mem for skb\n",
+ rq_nr);
+ ret = -ENOMEM;
+ }
+ break;
+ }
+
+ skb_arr[index] = skb;
+ tmp_addr = ehea_map_vaddr(skb->data);
+ if (tmp_addr == -1) {
+ dev_kfree_skb(skb);
+ q_skba->os_skbs = fill_wqes - i;
+ ret = 0;
+ break;
+ }
+
+ rwqe = ehea_get_next_rwqe(qp, rq_nr);
+ rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
+ | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
+ rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
+ rwqe->sg_list[0].vaddr = tmp_addr;
+ rwqe->sg_list[0].len = packet_size;
+ rwqe->data_segments = 1;
+
+ index++;
+ index &= max_index_mask;
+ adder++;
+ }
+
+ q_skba->index = index;
+ if (adder == 0)
+ goto out;
+
+ /* Ring doorbell */
+ iosync();
+ if (rq_nr == 2)
+ ehea_update_rq2a(pr->qp, adder);
+ else
+ ehea_update_rq3a(pr->qp, adder);
+out:
+ return ret;
+}
+
+
+static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
+{
+ return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
+ nr_of_wqes, EHEA_RWQE2_TYPE,
+ EHEA_RQ2_PKT_SIZE);
+}
+
+
+static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
+{
+ return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
+ nr_of_wqes, EHEA_RWQE3_TYPE,
+ EHEA_MAX_PACKET_SIZE);
+}
+
+static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
+{
+ *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
+ if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
+ return 0;
+ if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
+ (cqe->header_length == 0))
+ return 0;
+ return -EINVAL;
+}
+
+static inline void ehea_fill_skb(struct net_device *dev,
+ struct sk_buff *skb, struct ehea_cqe *cqe)
+{
+ int length = cqe->num_bytes_transfered - 4; /*remove CRC */
+
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* The packet was not an IPV4 packet so a complemented checksum was
+ calculated. The value is found in the Internet Checksum field. */
+ if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = csum_unfold(~cqe->inet_checksum_value);
+ } else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
+ int arr_len,
+ struct ehea_cqe *cqe)
+{
+ int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
+ struct sk_buff *skb;
+ void *pref;
+ int x;
+
+ x = skb_index + 1;
+ x &= (arr_len - 1);
+
+ pref = skb_array[x];
+ if (pref) {
+ prefetchw(pref);
+ prefetchw(pref + EHEA_CACHE_LINE);
+
+ pref = (skb_array[x]->data);
+ prefetch(pref);
+ prefetch(pref + EHEA_CACHE_LINE);
+ prefetch(pref + EHEA_CACHE_LINE * 2);
+ prefetch(pref + EHEA_CACHE_LINE * 3);
+ }
+
+ skb = skb_array[skb_index];
+ skb_array[skb_index] = NULL;
+ return skb;
+}
+
+static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
+ int arr_len, int wqe_index)
+{
+ struct sk_buff *skb;
+ void *pref;
+ int x;
+
+ x = wqe_index + 1;
+ x &= (arr_len - 1);
+
+ pref = skb_array[x];
+ if (pref) {
+ prefetchw(pref);
+ prefetchw(pref + EHEA_CACHE_LINE);
+
+ pref = (skb_array[x]->data);
+ prefetchw(pref);
+ prefetchw(pref + EHEA_CACHE_LINE);
+ }
+
+ skb = skb_array[wqe_index];
+ skb_array[wqe_index] = NULL;
+ return skb;
+}
+
+static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
+ struct ehea_cqe *cqe, int *processed_rq2,
+ int *processed_rq3)
+{
+ struct sk_buff *skb;
+
+ if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
+ pr->p_stats.err_tcp_cksum++;
+ if (cqe->status & EHEA_CQE_STAT_ERR_IP)
+ pr->p_stats.err_ip_cksum++;
+ if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
+ pr->p_stats.err_frame_crc++;
+
+ if (rq == 2) {
+ *processed_rq2 += 1;
+ skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
+ dev_kfree_skb(skb);
+ } else if (rq == 3) {
+ *processed_rq3 += 1;
+ skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
+ dev_kfree_skb(skb);
+ }
+
+ if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
+ if (netif_msg_rx_err(pr->port)) {
+ pr_err("Critical receive error for QP %d. Resetting port.\n",
+ pr->qp->init_attr.qp_nr);
+ ehea_dump(cqe, sizeof(*cqe), "CQE");
+ }
+ ehea_schedule_port_reset(pr->port);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
+ void **tcph, u64 *hdr_flags, void *priv)
+{
+ struct ehea_cqe *cqe = priv;
+ unsigned int ip_len;
+ struct iphdr *iph;
+
+ /* non tcp/udp packets */
+ if (!cqe->header_length)
+ return -1;
+
+ /* non tcp packet */
+ skb_reset_network_header(skb);
+ iph = ip_hdr(skb);
+ if (iph->protocol != IPPROTO_TCP)
+ return -1;
+
+ ip_len = ip_hdrlen(skb);
+ skb_set_transport_header(skb, ip_len);
+ *tcph = tcp_hdr(skb);
+
+ /* check if ip header and tcp header are complete */
+ if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
+ return -1;
+
+ *hdr_flags = LRO_IPV4 | LRO_TCP;
+ *iphdr = iph;
+
+ return 0;
+}
+
+static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
+ struct sk_buff *skb)
+{
+ if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
+ __vlan_hwaccel_put_tag(skb, cqe->vlan_tag);
+
+ if (skb->dev->features & NETIF_F_LRO)
+ lro_receive_skb(&pr->lro_mgr, skb, cqe);
+ else
+ netif_receive_skb(skb);
+}
+
+static int ehea_proc_rwqes(struct net_device *dev,
+ struct ehea_port_res *pr,
+ int budget)
+{
+ struct ehea_port *port = pr->port;
+ struct ehea_qp *qp = pr->qp;
+ struct ehea_cqe *cqe;
+ struct sk_buff *skb;
+ struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
+ struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
+ struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
+ int skb_arr_rq1_len = pr->rq1_skba.len;
+ int skb_arr_rq2_len = pr->rq2_skba.len;
+ int skb_arr_rq3_len = pr->rq3_skba.len;
+ int processed, processed_rq1, processed_rq2, processed_rq3;
+ u64 processed_bytes = 0;
+ int wqe_index, last_wqe_index, rq, port_reset;
+
+ processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
+ last_wqe_index = 0;
+
+ cqe = ehea_poll_rq1(qp, &wqe_index);
+ while ((processed < budget) && cqe) {
+ ehea_inc_rq1(qp);
+ processed_rq1++;
+ processed++;
+ if (netif_msg_rx_status(port))
+ ehea_dump(cqe, sizeof(*cqe), "CQE");
+
+ last_wqe_index = wqe_index;
+ rmb();
+ if (!ehea_check_cqe(cqe, &rq)) {
+ if (rq == 1) {
+ /* LL RQ1 */
+ skb = get_skb_by_index_ll(skb_arr_rq1,
+ skb_arr_rq1_len,
+ wqe_index);
+ if (unlikely(!skb)) {
+ netif_info(port, rx_err, dev,
+ "LL rq1: skb=NULL\n");
+
+ skb = netdev_alloc_skb(dev,
+ EHEA_L_PKT_SIZE);
+ if (!skb) {
+ netdev_err(dev, "Not enough memory to allocate skb\n");
+ break;
+ }
+ }
+ skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
+ cqe->num_bytes_transfered - 4);
+ ehea_fill_skb(dev, skb, cqe);
+ } else if (rq == 2) {
+ /* RQ2 */
+ skb = get_skb_by_index(skb_arr_rq2,
+ skb_arr_rq2_len, cqe);
+ if (unlikely(!skb)) {
+ netif_err(port, rx_err, dev,
+ "rq2: skb=NULL\n");
+ break;
+ }
+ ehea_fill_skb(dev, skb, cqe);
+ processed_rq2++;
+ } else {
+ /* RQ3 */
+ skb = get_skb_by_index(skb_arr_rq3,
+ skb_arr_rq3_len, cqe);
+ if (unlikely(!skb)) {
+ netif_err(port, rx_err, dev,
+ "rq3: skb=NULL\n");
+ break;
+ }
+ ehea_fill_skb(dev, skb, cqe);
+ processed_rq3++;
+ }
+
+ processed_bytes += skb->len;
+ ehea_proc_skb(pr, cqe, skb);
+ } else {
+ pr->p_stats.poll_receive_errors++;
+ port_reset = ehea_treat_poll_error(pr, rq, cqe,
+ &processed_rq2,
+ &processed_rq3);
+ if (port_reset)
+ break;
+ }
+ cqe = ehea_poll_rq1(qp, &wqe_index);
+ }
+ if (dev->features & NETIF_F_LRO)
+ lro_flush_all(&pr->lro_mgr);
+
+ pr->rx_packets += processed;
+ pr->rx_bytes += processed_bytes;
+
+ ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
+ ehea_refill_rq2(pr, processed_rq2);
+ ehea_refill_rq3(pr, processed_rq3);
+
+ return processed;
+}
+
+#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
+
+static void reset_sq_restart_flag(struct ehea_port *port)
+{
+ int i;
+
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ struct ehea_port_res *pr = &port->port_res[i];
+ pr->sq_restart_flag = 0;
+ }
+ wake_up(&port->restart_wq);
+}
+
+static void check_sqs(struct ehea_port *port)
+{
+ struct ehea_swqe *swqe;
+ int swqe_index;
+ int i, k;
+
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ struct ehea_port_res *pr = &port->port_res[i];
+ int ret;
+ k = 0;
+ swqe = ehea_get_swqe(pr->qp, &swqe_index);
+ memset(swqe, 0, SWQE_HEADER_SIZE);
+ atomic_dec(&pr->swqe_avail);
+
+ swqe->tx_control |= EHEA_SWQE_PURGE;
+ swqe->wr_id = SWQE_RESTART_CHECK;
+ swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
+ swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
+ swqe->immediate_data_length = 80;
+
+ ehea_post_swqe(pr->qp, swqe);
+
+ ret = wait_event_timeout(port->restart_wq,
+ pr->sq_restart_flag == 0,
+ msecs_to_jiffies(100));
+
+ if (!ret) {
+ pr_err("HW/SW queues out of sync\n");
+ ehea_schedule_port_reset(pr->port);
+ return;
+ }
+ }
+}
+
+
+static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
+{
+ struct sk_buff *skb;
+ struct ehea_cq *send_cq = pr->send_cq;
+ struct ehea_cqe *cqe;
+ int quota = my_quota;
+ int cqe_counter = 0;
+ int swqe_av = 0;
+ int index;
+ unsigned long flags;
+
+ cqe = ehea_poll_cq(send_cq);
+ while (cqe && (quota > 0)) {
+ ehea_inc_cq(send_cq);
+
+ cqe_counter++;
+ rmb();
+
+ if (cqe->wr_id == SWQE_RESTART_CHECK) {
+ pr->sq_restart_flag = 1;
+ swqe_av++;
+ break;
+ }
+
+ if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
+ pr_err("Bad send completion status=0x%04X\n",
+ cqe->status);
+
+ if (netif_msg_tx_err(pr->port))
+ ehea_dump(cqe, sizeof(*cqe), "Send CQE");
+
+ if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
+ pr_err("Resetting port\n");
+ ehea_schedule_port_reset(pr->port);
+ break;
+ }
+ }
+
+ if (netif_msg_tx_done(pr->port))
+ ehea_dump(cqe, sizeof(*cqe), "CQE");
+
+ if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
+ == EHEA_SWQE2_TYPE)) {
+
+ index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
+ skb = pr->sq_skba.arr[index];
+ dev_kfree_skb(skb);
+ pr->sq_skba.arr[index] = NULL;
+ }
+
+ swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
+ quota--;
+
+ cqe = ehea_poll_cq(send_cq);
+ }
+
+ ehea_update_feca(send_cq, cqe_counter);
+ atomic_add(swqe_av, &pr->swqe_avail);
+
+ spin_lock_irqsave(&pr->netif_queue, flags);
+
+ if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
+ >= pr->swqe_refill_th)) {
+ netif_wake_queue(pr->port->netdev);
+ pr->queue_stopped = 0;
+ }
+ spin_unlock_irqrestore(&pr->netif_queue, flags);
+ wake_up(&pr->port->swqe_avail_wq);
+
+ return cqe;
+}
+
+#define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
+#define EHEA_POLL_MAX_CQES 65535
+
+static int ehea_poll(struct napi_struct *napi, int budget)
+{
+ struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
+ napi);
+ struct net_device *dev = pr->port->netdev;
+ struct ehea_cqe *cqe;
+ struct ehea_cqe *cqe_skb = NULL;
+ int force_irq, wqe_index;
+ int rx = 0;
+
+ force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
+ cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
+
+ if (!force_irq)
+ rx += ehea_proc_rwqes(dev, pr, budget - rx);
+
+ while ((rx != budget) || force_irq) {
+ pr->poll_counter = 0;
+ force_irq = 0;
+ napi_complete(napi);
+ ehea_reset_cq_ep(pr->recv_cq);
+ ehea_reset_cq_ep(pr->send_cq);
+ ehea_reset_cq_n1(pr->recv_cq);
+ ehea_reset_cq_n1(pr->send_cq);
+ rmb();
+ cqe = ehea_poll_rq1(pr->qp, &wqe_index);
+ cqe_skb = ehea_poll_cq(pr->send_cq);
+
+ if (!cqe && !cqe_skb)
+ return rx;
+
+ if (!napi_reschedule(napi))
+ return rx;
+
+ cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
+ rx += ehea_proc_rwqes(dev, pr, budget - rx);
+ }
+
+ pr->poll_counter++;
+ return rx;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void ehea_netpoll(struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < port->num_def_qps; i++)
+ napi_schedule(&port->port_res[i].napi);
+}
+#endif
+
+static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
+{
+ struct ehea_port_res *pr = param;
+
+ napi_schedule(&pr->napi);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
+{
+ struct ehea_port *port = param;
+ struct ehea_eqe *eqe;
+ struct ehea_qp *qp;
+ u32 qp_token;
+ u64 resource_type, aer, aerr;
+ int reset_port = 0;
+
+ eqe = ehea_poll_eq(port->qp_eq);
+
+ while (eqe) {
+ qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
+ pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
+ eqe->entry, qp_token);
+
+ qp = port->port_res[qp_token].qp;
+
+ resource_type = ehea_error_data(port->adapter, qp->fw_handle,
+ &aer, &aerr);
+
+ if (resource_type == EHEA_AER_RESTYPE_QP) {
+ if ((aer & EHEA_AER_RESET_MASK) ||
+ (aerr & EHEA_AERR_RESET_MASK))
+ reset_port = 1;
+ } else
+ reset_port = 1; /* Reset in case of CQ or EQ error */
+
+ eqe = ehea_poll_eq(port->qp_eq);
+ }
+
+ if (reset_port) {
+ pr_err("Resetting port\n");
+ ehea_schedule_port_reset(port);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
+ int logical_port)
+{
+ int i;
+
+ for (i = 0; i < EHEA_MAX_PORTS; i++)
+ if (adapter->port[i])
+ if (adapter->port[i]->logical_port_id == logical_port)
+ return adapter->port[i];
+ return NULL;
+}
+
+int ehea_sense_port_attr(struct ehea_port *port)
+{
+ int ret;
+ u64 hret;
+ struct hcp_ehea_port_cb0 *cb0;
+
+ /* may be called via ehea_neq_tasklet() */
+ cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (!cb0) {
+ pr_err("no mem for cb0\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ hret = ehea_h_query_ehea_port(port->adapter->handle,
+ port->logical_port_id, H_PORT_CB0,
+ EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
+ cb0);
+ if (hret != H_SUCCESS) {
+ ret = -EIO;
+ goto out_free;
+ }
+
+ /* MAC address */
+ port->mac_addr = cb0->port_mac_addr << 16;
+
+ if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
+ ret = -EADDRNOTAVAIL;
+ goto out_free;
+ }
+
+ /* Port speed */
+ switch (cb0->port_speed) {
+ case H_SPEED_10M_H:
+ port->port_speed = EHEA_SPEED_10M;
+ port->full_duplex = 0;
+ break;
+ case H_SPEED_10M_F:
+ port->port_speed = EHEA_SPEED_10M;
+ port->full_duplex = 1;
+ break;
+ case H_SPEED_100M_H:
+ port->port_speed = EHEA_SPEED_100M;
+ port->full_duplex = 0;
+ break;
+ case H_SPEED_100M_F:
+ port->port_speed = EHEA_SPEED_100M;
+ port->full_duplex = 1;
+ break;
+ case H_SPEED_1G_F:
+ port->port_speed = EHEA_SPEED_1G;
+ port->full_duplex = 1;
+ break;
+ case H_SPEED_10G_F:
+ port->port_speed = EHEA_SPEED_10G;
+ port->full_duplex = 1;
+ break;
+ default:
+ port->port_speed = 0;
+ port->full_duplex = 0;
+ break;
+ }
+
+ port->autoneg = 1;
+ port->num_mcs = cb0->num_default_qps;
+
+ /* Number of default QPs */
+ if (use_mcs)
+ port->num_def_qps = cb0->num_default_qps;
+ else
+ port->num_def_qps = 1;
+
+ if (!port->num_def_qps) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ port->num_tx_qps = num_tx_qps;
+
+ if (port->num_def_qps >= port->num_tx_qps)
+ port->num_add_tx_qps = 0;
+ else
+ port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
+
+ ret = 0;
+out_free:
+ if (ret || netif_msg_probe(port))
+ ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
+ free_page((unsigned long)cb0);
+out:
+ return ret;
+}
+
+int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
+{
+ struct hcp_ehea_port_cb4 *cb4;
+ u64 hret;
+ int ret = 0;
+
+ cb4 = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!cb4) {
+ pr_err("no mem for cb4\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cb4->port_speed = port_speed;
+
+ netif_carrier_off(port->netdev);
+
+ hret = ehea_h_modify_ehea_port(port->adapter->handle,
+ port->logical_port_id,
+ H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
+ if (hret == H_SUCCESS) {
+ port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
+
+ hret = ehea_h_query_ehea_port(port->adapter->handle,
+ port->logical_port_id,
+ H_PORT_CB4, H_PORT_CB4_SPEED,
+ cb4);
+ if (hret == H_SUCCESS) {
+ switch (cb4->port_speed) {
+ case H_SPEED_10M_H:
+ port->port_speed = EHEA_SPEED_10M;
+ port->full_duplex = 0;
+ break;
+ case H_SPEED_10M_F:
+ port->port_speed = EHEA_SPEED_10M;
+ port->full_duplex = 1;
+ break;
+ case H_SPEED_100M_H:
+ port->port_speed = EHEA_SPEED_100M;
+ port->full_duplex = 0;
+ break;
+ case H_SPEED_100M_F:
+ port->port_speed = EHEA_SPEED_100M;
+ port->full_duplex = 1;
+ break;
+ case H_SPEED_1G_F:
+ port->port_speed = EHEA_SPEED_1G;
+ port->full_duplex = 1;
+ break;
+ case H_SPEED_10G_F:
+ port->port_speed = EHEA_SPEED_10G;
+ port->full_duplex = 1;
+ break;
+ default:
+ port->port_speed = 0;
+ port->full_duplex = 0;
+ break;
+ }
+ } else {
+ pr_err("Failed sensing port speed\n");
+ ret = -EIO;
+ }
+ } else {
+ if (hret == H_AUTHORITY) {
+ pr_info("Hypervisor denied setting port speed\n");
+ ret = -EPERM;
+ } else {
+ ret = -EIO;
+ pr_err("Failed setting port speed\n");
+ }
+ }
+ if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
+ netif_carrier_on(port->netdev);
+
+ free_page((unsigned long)cb4);
+out:
+ return ret;
+}
+
+static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
+{
+ int ret;
+ u8 ec;
+ u8 portnum;
+ struct ehea_port *port;
+ struct net_device *dev;
+
+ ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
+ portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
+ port = ehea_get_port(adapter, portnum);
+ dev = port->netdev;
+
+ switch (ec) {
+ case EHEA_EC_PORTSTATE_CHG: /* port state change */
+
+ if (!port) {
+ netdev_err(dev, "unknown portnum %x\n", portnum);
+ break;
+ }
+
+ if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
+ if (!netif_carrier_ok(dev)) {
+ ret = ehea_sense_port_attr(port);
+ if (ret) {
+ netdev_err(dev, "failed resensing port attributes\n");
+ break;
+ }
+
+ netif_info(port, link, dev,
+ "Logical port up: %dMbps %s Duplex\n",
+ port->port_speed,
+ port->full_duplex == 1 ?
+ "Full" : "Half");
+
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
+ }
+ } else
+ if (netif_carrier_ok(dev)) {
+ netif_info(port, link, dev,
+ "Logical port down\n");
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+ }
+
+ if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
+ port->phy_link = EHEA_PHY_LINK_UP;
+ netif_info(port, link, dev,
+ "Physical port up\n");
+ if (prop_carrier_state)
+ netif_carrier_on(dev);
+ } else {
+ port->phy_link = EHEA_PHY_LINK_DOWN;
+ netif_info(port, link, dev,
+ "Physical port down\n");
+ if (prop_carrier_state)
+ netif_carrier_off(dev);
+ }
+
+ if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
+ netdev_info(dev,
+ "External switch port is primary port\n");
+ else
+ netdev_info(dev,
+ "External switch port is backup port\n");
+
+ break;
+ case EHEA_EC_ADAPTER_MALFUNC:
+ netdev_err(dev, "Adapter malfunction\n");
+ break;
+ case EHEA_EC_PORT_MALFUNC:
+ netdev_info(dev, "Port malfunction\n");
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+ break;
+ default:
+ netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
+ break;
+ }
+}
+
+static void ehea_neq_tasklet(unsigned long data)
+{
+ struct ehea_adapter *adapter = (struct ehea_adapter *)data;
+ struct ehea_eqe *eqe;
+ u64 event_mask;
+
+ eqe = ehea_poll_eq(adapter->neq);
+ pr_debug("eqe=%p\n", eqe);
+
+ while (eqe) {
+ pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
+ ehea_parse_eqe(adapter, eqe->entry);
+ eqe = ehea_poll_eq(adapter->neq);
+ pr_debug("next eqe=%p\n", eqe);
+ }
+
+ event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
+ | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
+ | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
+
+ ehea_h_reset_events(adapter->handle,
+ adapter->neq->fw_handle, event_mask);
+}
+
+static irqreturn_t ehea_interrupt_neq(int irq, void *param)
+{
+ struct ehea_adapter *adapter = param;
+ tasklet_hi_schedule(&adapter->neq_tasklet);
+ return IRQ_HANDLED;
+}
+
+
+static int ehea_fill_port_res(struct ehea_port_res *pr)
+{
+ int ret;
+ struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
+
+ ehea_init_fill_rq1(pr, pr->rq1_skba.len);
+
+ ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
+
+ ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
+
+ return ret;
+}
+
+static int ehea_reg_interrupts(struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct ehea_port_res *pr;
+ int i, ret;
+
+
+ snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
+ dev->name);
+
+ ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
+ ehea_qp_aff_irq_handler,
+ IRQF_DISABLED, port->int_aff_name, port);
+ if (ret) {
+ netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
+ port->qp_eq->attr.ist1);
+ goto out_free_qpeq;
+ }
+
+ netif_info(port, ifup, dev,
+ "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
+ port->qp_eq->attr.ist1);
+
+
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ pr = &port->port_res[i];
+ snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
+ "%s-queue%d", dev->name, i);
+ ret = ibmebus_request_irq(pr->eq->attr.ist1,
+ ehea_recv_irq_handler,
+ IRQF_DISABLED, pr->int_send_name,
+ pr);
+ if (ret) {
+ netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
+ i, pr->eq->attr.ist1);
+ goto out_free_req;
+ }
+ netif_info(port, ifup, dev,
+ "irq_handle 0x%X for function ehea_queue_int %d registered\n",
+ pr->eq->attr.ist1, i);
+ }
+out:
+ return ret;
+
+
+out_free_req:
+ while (--i >= 0) {
+ u32 ist = port->port_res[i].eq->attr.ist1;
+ ibmebus_free_irq(ist, &port->port_res[i]);
+ }
+
+out_free_qpeq:
+ ibmebus_free_irq(port->qp_eq->attr.ist1, port);
+ i = port->num_def_qps;
+
+ goto out;
+
+}
+
+static void ehea_free_interrupts(struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct ehea_port_res *pr;
+ int i;
+
+ /* send */
+
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ pr = &port->port_res[i];
+ ibmebus_free_irq(pr->eq->attr.ist1, pr);
+ netif_info(port, intr, dev,
+ "free send irq for res %d with handle 0x%X\n",
+ i, pr->eq->attr.ist1);
+ }
+
+ /* associated events */
+ ibmebus_free_irq(port->qp_eq->attr.ist1, port);
+ netif_info(port, intr, dev,
+ "associated event interrupt for handle 0x%X freed\n",
+ port->qp_eq->attr.ist1);
+}
+
+static int ehea_configure_port(struct ehea_port *port)
+{
+ int ret, i;
+ u64 hret, mask;
+ struct hcp_ehea_port_cb0 *cb0;
+
+ ret = -ENOMEM;
+ cb0 = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!cb0)
+ goto out;
+
+ cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
+ | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
+ | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
+ | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
+ | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
+ PXLY_RC_VLAN_FILTER)
+ | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
+
+ for (i = 0; i < port->num_mcs; i++)
+ if (use_mcs)
+ cb0->default_qpn_arr[i] =
+ port->port_res[i].qp->init_attr.qp_nr;
+ else
+ cb0->default_qpn_arr[i] =
+ port->port_res[0].qp->init_attr.qp_nr;
+
+ if (netif_msg_ifup(port))
+ ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
+
+ mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
+ | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
+
+ hret = ehea_h_modify_ehea_port(port->adapter->handle,
+ port->logical_port_id,
+ H_PORT_CB0, mask, cb0);
+ ret = -EIO;
+ if (hret != H_SUCCESS)
+ goto out_free;
+
+ ret = 0;
+
+out_free:
+ free_page((unsigned long)cb0);
+out:
+ return ret;
+}
+
+int ehea_gen_smrs(struct ehea_port_res *pr)
+{
+ int ret;
+ struct ehea_adapter *adapter = pr->port->adapter;
+
+ ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
+ if (ret)
+ goto out;
+
+ ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
+ if (ret)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ ehea_rem_mr(&pr->send_mr);
+out:
+ pr_err("Generating SMRS failed\n");
+ return -EIO;
+}
+
+int ehea_rem_smrs(struct ehea_port_res *pr)
+{
+ if ((ehea_rem_mr(&pr->send_mr)) ||
+ (ehea_rem_mr(&pr->recv_mr)))
+ return -EIO;
+ else
+ return 0;
+}
+
+static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
+{
+ int arr_size = sizeof(void *) * max_q_entries;
+
+ q_skba->arr = vzalloc(arr_size);
+ if (!q_skba->arr)
+ return -ENOMEM;
+
+ q_skba->len = max_q_entries;
+ q_skba->index = 0;
+ q_skba->os_skbs = 0;
+
+ return 0;
+}
+
+static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
+ struct port_res_cfg *pr_cfg, int queue_token)
+{
+ struct ehea_adapter *adapter = port->adapter;
+ enum ehea_eq_type eq_type = EHEA_EQ;
+ struct ehea_qp_init_attr *init_attr = NULL;
+ int ret = -EIO;
+ u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
+
+ tx_bytes = pr->tx_bytes;
+ tx_packets = pr->tx_packets;
+ rx_bytes = pr->rx_bytes;
+ rx_packets = pr->rx_packets;
+
+ memset(pr, 0, sizeof(struct ehea_port_res));
+
+ pr->tx_bytes = rx_bytes;
+ pr->tx_packets = tx_packets;
+ pr->rx_bytes = rx_bytes;
+ pr->rx_packets = rx_packets;
+
+ pr->port = port;
+ spin_lock_init(&pr->xmit_lock);
+ spin_lock_init(&pr->netif_queue);
+
+ pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
+ if (!pr->eq) {
+ pr_err("create_eq failed (eq)\n");
+ goto out_free;
+ }
+
+ pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
+ pr->eq->fw_handle,
+ port->logical_port_id);
+ if (!pr->recv_cq) {
+ pr_err("create_cq failed (cq_recv)\n");
+ goto out_free;
+ }
+
+ pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
+ pr->eq->fw_handle,
+ port->logical_port_id);
+ if (!pr->send_cq) {
+ pr_err("create_cq failed (cq_send)\n");
+ goto out_free;
+ }
+
+ if (netif_msg_ifup(port))
+ pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
+ pr->send_cq->attr.act_nr_of_cqes,
+ pr->recv_cq->attr.act_nr_of_cqes);
+
+ init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
+ if (!init_attr) {
+ ret = -ENOMEM;
+ pr_err("no mem for ehea_qp_init_attr\n");
+ goto out_free;
+ }
+
+ init_attr->low_lat_rq1 = 1;
+ init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
+ init_attr->rq_count = 3;
+ init_attr->qp_token = queue_token;
+ init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
+ init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
+ init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
+ init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
+ init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
+ init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
+ init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
+ init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
+ init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
+ init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
+ init_attr->port_nr = port->logical_port_id;
+ init_attr->send_cq_handle = pr->send_cq->fw_handle;
+ init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
+ init_attr->aff_eq_handle = port->qp_eq->fw_handle;
+
+ pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
+ if (!pr->qp) {
+ pr_err("create_qp failed\n");
+ ret = -EIO;
+ goto out_free;
+ }
+
+ if (netif_msg_ifup(port))
+ pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
+ init_attr->qp_nr,
+ init_attr->act_nr_send_wqes,
+ init_attr->act_nr_rwqes_rq1,
+ init_attr->act_nr_rwqes_rq2,
+ init_attr->act_nr_rwqes_rq3);
+
+ pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
+
+ ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
+ ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
+ ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
+ ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
+ if (ret)
+ goto out_free;
+
+ pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
+ if (ehea_gen_smrs(pr) != 0) {
+ ret = -EIO;
+ goto out_free;
+ }
+
+ atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
+
+ kfree(init_attr);
+
+ netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
+
+ pr->lro_mgr.max_aggr = pr->port->lro_max_aggr;
+ pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
+ pr->lro_mgr.lro_arr = pr->lro_desc;
+ pr->lro_mgr.get_skb_header = get_skb_hdr;
+ pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
+ pr->lro_mgr.dev = port->netdev;
+ pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
+ pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
+
+ ret = 0;
+ goto out;
+
+out_free:
+ kfree(init_attr);
+ vfree(pr->sq_skba.arr);
+ vfree(pr->rq1_skba.arr);
+ vfree(pr->rq2_skba.arr);
+ vfree(pr->rq3_skba.arr);
+ ehea_destroy_qp(pr->qp);
+ ehea_destroy_cq(pr->send_cq);
+ ehea_destroy_cq(pr->recv_cq);
+ ehea_destroy_eq(pr->eq);
+out:
+ return ret;
+}
+
+static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
+{
+ int ret, i;
+
+ if (pr->qp)
+ netif_napi_del(&pr->napi);
+
+ ret = ehea_destroy_qp(pr->qp);
+
+ if (!ret) {
+ ehea_destroy_cq(pr->send_cq);
+ ehea_destroy_cq(pr->recv_cq);
+ ehea_destroy_eq(pr->eq);
+
+ for (i = 0; i < pr->rq1_skba.len; i++)
+ if (pr->rq1_skba.arr[i])
+ dev_kfree_skb(pr->rq1_skba.arr[i]);
+
+ for (i = 0; i < pr->rq2_skba.len; i++)
+ if (pr->rq2_skba.arr[i])
+ dev_kfree_skb(pr->rq2_skba.arr[i]);
+
+ for (i = 0; i < pr->rq3_skba.len; i++)
+ if (pr->rq3_skba.arr[i])
+ dev_kfree_skb(pr->rq3_skba.arr[i]);
+
+ for (i = 0; i < pr->sq_skba.len; i++)
+ if (pr->sq_skba.arr[i])
+ dev_kfree_skb(pr->sq_skba.arr[i]);
+
+ vfree(pr->rq1_skba.arr);
+ vfree(pr->rq2_skba.arr);
+ vfree(pr->rq3_skba.arr);
+ vfree(pr->sq_skba.arr);
+ ret = ehea_rem_smrs(pr);
+ }
+ return ret;
+}
+
+/*
+ * The write_* functions store information in swqe which is used by
+ * the hardware to calculate the ip/tcp/udp checksum
+ */
+
+static inline void write_ip_start_end(struct ehea_swqe *swqe,
+ const struct sk_buff *skb)
+{
+ swqe->ip_start = skb_network_offset(skb);
+ swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
+}
+
+static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
+ const struct sk_buff *skb)
+{
+ swqe->tcp_offset =
+ (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
+
+ swqe->tcp_end = (u16)skb->len - 1;
+}
+
+static inline void write_udp_offset_end(struct ehea_swqe *swqe,
+ const struct sk_buff *skb)
+{
+ swqe->tcp_offset =
+ (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
+
+ swqe->tcp_end = (u16)skb->len - 1;
+}
+
+
+static void write_swqe2_TSO(struct sk_buff *skb,
+ struct ehea_swqe *swqe, u32 lkey)
+{
+ struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
+ u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
+ int skb_data_size = skb_headlen(skb);
+ int headersize;
+
+ /* Packet is TCP with TSO enabled */
+ swqe->tx_control |= EHEA_SWQE_TSO;
+ swqe->mss = skb_shinfo(skb)->gso_size;
+ /* copy only eth/ip/tcp headers to immediate data and
+ * the rest of skb->data to sg1entry
+ */
+ headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
+
+ skb_data_size = skb_headlen(skb);
+
+ if (skb_data_size >= headersize) {
+ /* copy immediate data */
+ skb_copy_from_linear_data(skb, imm_data, headersize);
+ swqe->immediate_data_length = headersize;
+
+ if (skb_data_size > headersize) {
+ /* set sg1entry data */
+ sg1entry->l_key = lkey;
+ sg1entry->len = skb_data_size - headersize;
+ sg1entry->vaddr =
+ ehea_map_vaddr(skb->data + headersize);
+ swqe->descriptors++;
+ }
+ } else
+ pr_err("cannot handle fragmented headers\n");
+}
+
+static void write_swqe2_nonTSO(struct sk_buff *skb,
+ struct ehea_swqe *swqe, u32 lkey)
+{
+ int skb_data_size = skb_headlen(skb);
+ u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
+ struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
+
+ /* Packet is any nonTSO type
+ *
+ * Copy as much as possible skb->data to immediate data and
+ * the rest to sg1entry
+ */
+ if (skb_data_size >= SWQE2_MAX_IMM) {
+ /* copy immediate data */
+ skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
+
+ swqe->immediate_data_length = SWQE2_MAX_IMM;
+
+ if (skb_data_size > SWQE2_MAX_IMM) {
+ /* copy sg1entry data */
+ sg1entry->l_key = lkey;
+ sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
+ sg1entry->vaddr =
+ ehea_map_vaddr(skb->data + SWQE2_MAX_IMM);
+ swqe->descriptors++;
+ }
+ } else {
+ skb_copy_from_linear_data(skb, imm_data, skb_data_size);
+ swqe->immediate_data_length = skb_data_size;
+ }
+}
+
+static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
+ struct ehea_swqe *swqe, u32 lkey)
+{
+ struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
+ skb_frag_t *frag;
+ int nfrags, sg1entry_contains_frag_data, i;
+
+ nfrags = skb_shinfo(skb)->nr_frags;
+ sg1entry = &swqe->u.immdata_desc.sg_entry;
+ sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
+ swqe->descriptors = 0;
+ sg1entry_contains_frag_data = 0;
+
+ if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
+ write_swqe2_TSO(skb, swqe, lkey);
+ else
+ write_swqe2_nonTSO(skb, swqe, lkey);
+
+ /* write descriptors */
+ if (nfrags > 0) {
+ if (swqe->descriptors == 0) {
+ /* sg1entry not yet used */
+ frag = &skb_shinfo(skb)->frags[0];
+
+ /* copy sg1entry data */
+ sg1entry->l_key = lkey;
+ sg1entry->len = frag->size;
+ sg1entry->vaddr =
+ ehea_map_vaddr(page_address(frag->page)
+ + frag->page_offset);
+ swqe->descriptors++;
+ sg1entry_contains_frag_data = 1;
+ }
+
+ for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
+
+ frag = &skb_shinfo(skb)->frags[i];
+ sgentry = &sg_list[i - sg1entry_contains_frag_data];
+
+ sgentry->l_key = lkey;
+ sgentry->len = frag->size;
+ sgentry->vaddr =
+ ehea_map_vaddr(page_address(frag->page)
+ + frag->page_offset);
+ swqe->descriptors++;
+ }
+ }
+}
+
+static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
+{
+ int ret = 0;
+ u64 hret;
+ u8 reg_type;
+
+ /* De/Register untagged packets */
+ reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
+ hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
+ port->logical_port_id,
+ reg_type, port->mac_addr, 0, hcallid);
+ if (hret != H_SUCCESS) {
+ pr_err("%sregistering bc address failed (tagged)\n",
+ hcallid == H_REG_BCMC ? "" : "de");
+ ret = -EIO;
+ goto out_herr;
+ }
+
+ /* De/Register VLAN packets */
+ reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
+ hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
+ port->logical_port_id,
+ reg_type, port->mac_addr, 0, hcallid);
+ if (hret != H_SUCCESS) {
+ pr_err("%sregistering bc address failed (vlan)\n",
+ hcallid == H_REG_BCMC ? "" : "de");
+ ret = -EIO;
+ }
+out_herr:
+ return ret;
+}
+
+static int ehea_set_mac_addr(struct net_device *dev, void *sa)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct sockaddr *mac_addr = sa;
+ struct hcp_ehea_port_cb0 *cb0;
+ int ret;
+ u64 hret;
+
+ if (!is_valid_ether_addr(mac_addr->sa_data)) {
+ ret = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ cb0 = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!cb0) {
+ pr_err("no mem for cb0\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
+
+ cb0->port_mac_addr = cb0->port_mac_addr >> 16;
+
+ hret = ehea_h_modify_ehea_port(port->adapter->handle,
+ port->logical_port_id, H_PORT_CB0,
+ EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
+ if (hret != H_SUCCESS) {
+ ret = -EIO;
+ goto out_free;
+ }
+
+ memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
+
+ /* Deregister old MAC in pHYP */
+ if (port->state == EHEA_PORT_UP) {
+ ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
+ if (ret)
+ goto out_upregs;
+ }
+
+ port->mac_addr = cb0->port_mac_addr << 16;
+
+ /* Register new MAC in pHYP */
+ if (port->state == EHEA_PORT_UP) {
+ ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
+ if (ret)
+ goto out_upregs;
+ }
+
+ ret = 0;
+
+out_upregs:
+ ehea_update_bcmc_registrations();
+out_free:
+ free_page((unsigned long)cb0);
+out:
+ return ret;
+}
+
+static void ehea_promiscuous_error(u64 hret, int enable)
+{
+ if (hret == H_AUTHORITY)
+ pr_info("Hypervisor denied %sabling promiscuous mode\n",
+ enable == 1 ? "en" : "dis");
+ else
+ pr_err("failed %sabling promiscuous mode\n",
+ enable == 1 ? "en" : "dis");
+}
+
+static void ehea_promiscuous(struct net_device *dev, int enable)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct hcp_ehea_port_cb7 *cb7;
+ u64 hret;
+
+ if (enable == port->promisc)
+ return;
+
+ cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (!cb7) {
+ pr_err("no mem for cb7\n");
+ goto out;
+ }
+
+ /* Modify Pxs_DUCQPN in CB7 */
+ cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
+
+ hret = ehea_h_modify_ehea_port(port->adapter->handle,
+ port->logical_port_id,
+ H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
+ if (hret) {
+ ehea_promiscuous_error(hret, enable);
+ goto out;
+ }
+
+ port->promisc = enable;
+out:
+ free_page((unsigned long)cb7);
+}
+
+static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
+ u32 hcallid)
+{
+ u64 hret;
+ u8 reg_type;
+
+ reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
+ | EHEA_BCMC_UNTAGGED;
+
+ hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
+ port->logical_port_id,
+ reg_type, mc_mac_addr, 0, hcallid);
+ if (hret)
+ goto out;
+
+ reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
+ | EHEA_BCMC_VLANID_ALL;
+
+ hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
+ port->logical_port_id,
+ reg_type, mc_mac_addr, 0, hcallid);
+out:
+ return hret;
+}
+
+static int ehea_drop_multicast_list(struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct ehea_mc_list *mc_entry = port->mc_list;
+ struct list_head *pos;
+ struct list_head *temp;
+ int ret = 0;
+ u64 hret;
+
+ list_for_each_safe(pos, temp, &(port->mc_list->list)) {
+ mc_entry = list_entry(pos, struct ehea_mc_list, list);
+
+ hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
+ H_DEREG_BCMC);
+ if (hret) {
+ pr_err("failed deregistering mcast MAC\n");
+ ret = -EIO;
+ }
+
+ list_del(pos);
+ kfree(mc_entry);
+ }
+ return ret;
+}
+
+static void ehea_allmulti(struct net_device *dev, int enable)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ u64 hret;
+
+ if (!port->allmulti) {
+ if (enable) {
+ /* Enable ALLMULTI */
+ ehea_drop_multicast_list(dev);
+ hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
+ if (!hret)
+ port->allmulti = 1;
+ else
+ netdev_err(dev,
+ "failed enabling IFF_ALLMULTI\n");
+ }
+ } else
+ if (!enable) {
+ /* Disable ALLMULTI */
+ hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
+ if (!hret)
+ port->allmulti = 0;
+ else
+ netdev_err(dev,
+ "failed disabling IFF_ALLMULTI\n");
+ }
+}
+
+static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
+{
+ struct ehea_mc_list *ehea_mcl_entry;
+ u64 hret;
+
+ ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
+ if (!ehea_mcl_entry) {
+ pr_err("no mem for mcl_entry\n");
+ return;
+ }
+
+ INIT_LIST_HEAD(&ehea_mcl_entry->list);
+
+ memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
+
+ hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
+ H_REG_BCMC);
+ if (!hret)
+ list_add(&ehea_mcl_entry->list, &port->mc_list->list);
+ else {
+ pr_err("failed registering mcast MAC\n");
+ kfree(ehea_mcl_entry);
+ }
+}
+
+static void ehea_set_multicast_list(struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ int ret;
+
+ if (port->promisc) {
+ ehea_promiscuous(dev, 1);
+ return;
+ }
+ ehea_promiscuous(dev, 0);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ ehea_allmulti(dev, 1);
+ goto out;
+ }
+ ehea_allmulti(dev, 0);
+
+ if (!netdev_mc_empty(dev)) {
+ ret = ehea_drop_multicast_list(dev);
+ if (ret) {
+ /* Dropping the current multicast list failed.
+ * Enabling ALL_MULTI is the best we can do.
+ */
+ ehea_allmulti(dev, 1);
+ }
+
+ if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
+ pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
+ port->adapter->max_mc_mac);
+ goto out;
+ }
+
+ netdev_for_each_mc_addr(ha, dev)
+ ehea_add_multicast_entry(port, ha->addr);
+
+ }
+out:
+ ehea_update_bcmc_registrations();
+}
+
+static int ehea_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
+ struct ehea_swqe *swqe, u32 lkey)
+{
+ if (skb->protocol == htons(ETH_P_IP)) {
+ const struct iphdr *iph = ip_hdr(skb);
+
+ /* IPv4 */
+ swqe->tx_control |= EHEA_SWQE_CRC
+ | EHEA_SWQE_IP_CHECKSUM
+ | EHEA_SWQE_TCP_CHECKSUM
+ | EHEA_SWQE_IMM_DATA_PRESENT
+ | EHEA_SWQE_DESCRIPTORS_PRESENT;
+
+ write_ip_start_end(swqe, skb);
+
+ if (iph->protocol == IPPROTO_UDP) {
+ if ((iph->frag_off & IP_MF) ||
+ (iph->frag_off & IP_OFFSET))
+ /* IP fragment, so don't change cs */
+ swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
+ else
+ write_udp_offset_end(swqe, skb);
+ } else if (iph->protocol == IPPROTO_TCP) {
+ write_tcp_offset_end(swqe, skb);
+ }
+
+ /* icmp (big data) and ip segmentation packets (all other ip
+ packets) do not require any special handling */
+
+ } else {
+ /* Other Ethernet Protocol */
+ swqe->tx_control |= EHEA_SWQE_CRC
+ | EHEA_SWQE_IMM_DATA_PRESENT
+ | EHEA_SWQE_DESCRIPTORS_PRESENT;
+ }
+
+ write_swqe2_data(skb, dev, swqe, lkey);
+}
+
+static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
+ struct ehea_swqe *swqe)
+{
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
+ skb_frag_t *frag;
+ int i;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ const struct iphdr *iph = ip_hdr(skb);
+
+ /* IPv4 */
+ write_ip_start_end(swqe, skb);
+
+ if (iph->protocol == IPPROTO_TCP) {
+ swqe->tx_control |= EHEA_SWQE_CRC
+ | EHEA_SWQE_IP_CHECKSUM
+ | EHEA_SWQE_TCP_CHECKSUM
+ | EHEA_SWQE_IMM_DATA_PRESENT;
+
+ write_tcp_offset_end(swqe, skb);
+
+ } else if (iph->protocol == IPPROTO_UDP) {
+ if ((iph->frag_off & IP_MF) ||
+ (iph->frag_off & IP_OFFSET))
+ /* IP fragment, so don't change cs */
+ swqe->tx_control |= EHEA_SWQE_CRC
+ | EHEA_SWQE_IMM_DATA_PRESENT;
+ else {
+ swqe->tx_control |= EHEA_SWQE_CRC
+ | EHEA_SWQE_IP_CHECKSUM
+ | EHEA_SWQE_TCP_CHECKSUM
+ | EHEA_SWQE_IMM_DATA_PRESENT;
+
+ write_udp_offset_end(swqe, skb);
+ }
+ } else {
+ /* icmp (big data) and
+ ip segmentation packets (all other ip packets) */
+ swqe->tx_control |= EHEA_SWQE_CRC
+ | EHEA_SWQE_IP_CHECKSUM
+ | EHEA_SWQE_IMM_DATA_PRESENT;
+ }
+ } else {
+ /* Other Ethernet Protocol */
+ swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
+ }
+ /* copy (immediate) data */
+ if (nfrags == 0) {
+ /* data is in a single piece */
+ skb_copy_from_linear_data(skb, imm_data, skb->len);
+ } else {
+ /* first copy data from the skb->data buffer ... */
+ skb_copy_from_linear_data(skb, imm_data,
+ skb_headlen(skb));
+ imm_data += skb_headlen(skb);
+
+ /* ... then copy data from the fragments */
+ for (i = 0; i < nfrags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ memcpy(imm_data,
+ page_address(frag->page) + frag->page_offset,
+ frag->size);
+ imm_data += frag->size;
+ }
+ }
+ swqe->immediate_data_length = skb->len;
+ dev_kfree_skb(skb);
+}
+
+static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
+{
+ struct tcphdr *tcp;
+ u32 tmp;
+
+ if ((skb->protocol == htons(ETH_P_IP)) &&
+ (ip_hdr(skb)->protocol == IPPROTO_TCP)) {
+ tcp = (struct tcphdr *)(skb_network_header(skb) +
+ (ip_hdr(skb)->ihl * 4));
+ tmp = (tcp->source + (tcp->dest << 16)) % 31;
+ tmp += ip_hdr(skb)->daddr % 31;
+ return tmp % num_qps;
+ } else
+ return 0;
+}
+
+static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct ehea_swqe *swqe;
+ unsigned long flags;
+ u32 lkey;
+ int swqe_index;
+ struct ehea_port_res *pr;
+
+ pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
+
+ if (!spin_trylock(&pr->xmit_lock))
+ return NETDEV_TX_BUSY;
+
+ if (pr->queue_stopped) {
+ spin_unlock(&pr->xmit_lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ swqe = ehea_get_swqe(pr->qp, &swqe_index);
+ memset(swqe, 0, SWQE_HEADER_SIZE);
+ atomic_dec(&pr->swqe_avail);
+
+ if (vlan_tx_tag_present(skb)) {
+ swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
+ swqe->vlan_tag = vlan_tx_tag_get(skb);
+ }
+
+ pr->tx_packets++;
+ pr->tx_bytes += skb->len;
+
+ if (skb->len <= SWQE3_MAX_IMM) {
+ u32 sig_iv = port->sig_comp_iv;
+ u32 swqe_num = pr->swqe_id_counter;
+ ehea_xmit3(skb, dev, swqe);
+ swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
+ | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
+ if (pr->swqe_ll_count >= (sig_iv - 1)) {
+ swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
+ sig_iv);
+ swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
+ pr->swqe_ll_count = 0;
+ } else
+ pr->swqe_ll_count += 1;
+ } else {
+ swqe->wr_id =
+ EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
+ | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
+ | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
+ | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
+ pr->sq_skba.arr[pr->sq_skba.index] = skb;
+
+ pr->sq_skba.index++;
+ pr->sq_skba.index &= (pr->sq_skba.len - 1);
+
+ lkey = pr->send_mr.lkey;
+ ehea_xmit2(skb, dev, swqe, lkey);
+ swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
+ }
+ pr->swqe_id_counter += 1;
+
+ netif_info(port, tx_queued, dev,
+ "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
+ if (netif_msg_tx_queued(port))
+ ehea_dump(swqe, 512, "swqe");
+
+ if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
+ netif_stop_queue(dev);
+ swqe->tx_control |= EHEA_SWQE_PURGE;
+ }
+
+ ehea_post_swqe(pr->qp, swqe);
+
+ if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
+ spin_lock_irqsave(&pr->netif_queue, flags);
+ if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
+ pr->p_stats.queue_stopped++;
+ netif_stop_queue(dev);
+ pr->queue_stopped = 1;
+ }
+ spin_unlock_irqrestore(&pr->netif_queue, flags);
+ }
+ dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
+ spin_unlock(&pr->xmit_lock);
+
+ return NETDEV_TX_OK;
+}
+
+static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct ehea_adapter *adapter = port->adapter;
+ struct hcp_ehea_port_cb1 *cb1;
+ int index;
+ u64 hret;
+
+ cb1 = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!cb1) {
+ pr_err("no mem for cb1\n");
+ goto out;
+ }
+
+ hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
+ H_PORT_CB1, H_PORT_CB1_ALL, cb1);
+ if (hret != H_SUCCESS) {
+ pr_err("query_ehea_port failed\n");
+ goto out;
+ }
+
+ index = (vid / 64);
+ cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
+
+ hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
+ H_PORT_CB1, H_PORT_CB1_ALL, cb1);
+ if (hret != H_SUCCESS)
+ pr_err("modify_ehea_port failed\n");
+out:
+ free_page((unsigned long)cb1);
+ return;
+}
+
+static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct ehea_adapter *adapter = port->adapter;
+ struct hcp_ehea_port_cb1 *cb1;
+ int index;
+ u64 hret;
+
+ cb1 = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!cb1) {
+ pr_err("no mem for cb1\n");
+ goto out;
+ }
+
+ hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
+ H_PORT_CB1, H_PORT_CB1_ALL, cb1);
+ if (hret != H_SUCCESS) {
+ pr_err("query_ehea_port failed\n");
+ goto out;
+ }
+
+ index = (vid / 64);
+ cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
+
+ hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
+ H_PORT_CB1, H_PORT_CB1_ALL, cb1);
+ if (hret != H_SUCCESS)
+ pr_err("modify_ehea_port failed\n");
+out:
+ free_page((unsigned long)cb1);
+}
+
+int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
+{
+ int ret = -EIO;
+ u64 hret;
+ u16 dummy16 = 0;
+ u64 dummy64 = 0;
+ struct hcp_modify_qp_cb0 *cb0;
+
+ cb0 = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!cb0) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
+ EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
+ if (hret != H_SUCCESS) {
+ pr_err("query_ehea_qp failed (1)\n");
+ goto out;
+ }
+
+ cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
+ hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
+ EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
+ &dummy64, &dummy64, &dummy16, &dummy16);
+ if (hret != H_SUCCESS) {
+ pr_err("modify_ehea_qp failed (1)\n");
+ goto out;
+ }
+
+ hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
+ EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
+ if (hret != H_SUCCESS) {
+ pr_err("query_ehea_qp failed (2)\n");
+ goto out;
+ }
+
+ cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
+ hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
+ EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
+ &dummy64, &dummy64, &dummy16, &dummy16);
+ if (hret != H_SUCCESS) {
+ pr_err("modify_ehea_qp failed (2)\n");
+ goto out;
+ }
+
+ hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
+ EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
+ if (hret != H_SUCCESS) {
+ pr_err("query_ehea_qp failed (3)\n");
+ goto out;
+ }
+
+ cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
+ hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
+ EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
+ &dummy64, &dummy64, &dummy16, &dummy16);
+ if (hret != H_SUCCESS) {
+ pr_err("modify_ehea_qp failed (3)\n");
+ goto out;
+ }
+
+ hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
+ EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
+ if (hret != H_SUCCESS) {
+ pr_err("query_ehea_qp failed (4)\n");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ free_page((unsigned long)cb0);
+ return ret;
+}
+
+static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
+ int add_tx_qps)
+{
+ int ret, i;
+ struct port_res_cfg pr_cfg, pr_cfg_small_rx;
+ enum ehea_eq_type eq_type = EHEA_EQ;
+
+ port->qp_eq = ehea_create_eq(port->adapter, eq_type,
+ EHEA_MAX_ENTRIES_EQ, 1);
+ if (!port->qp_eq) {
+ ret = -EINVAL;
+ pr_err("ehea_create_eq failed (qp_eq)\n");
+ goto out_kill_eq;
+ }
+
+ pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
+ pr_cfg.max_entries_scq = sq_entries * 2;
+ pr_cfg.max_entries_sq = sq_entries;
+ pr_cfg.max_entries_rq1 = rq1_entries;
+ pr_cfg.max_entries_rq2 = rq2_entries;
+ pr_cfg.max_entries_rq3 = rq3_entries;
+
+ pr_cfg_small_rx.max_entries_rcq = 1;
+ pr_cfg_small_rx.max_entries_scq = sq_entries;
+ pr_cfg_small_rx.max_entries_sq = sq_entries;
+ pr_cfg_small_rx.max_entries_rq1 = 1;
+ pr_cfg_small_rx.max_entries_rq2 = 1;
+ pr_cfg_small_rx.max_entries_rq3 = 1;
+
+ for (i = 0; i < def_qps; i++) {
+ ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
+ if (ret)
+ goto out_clean_pr;
+ }
+ for (i = def_qps; i < def_qps + add_tx_qps; i++) {
+ ret = ehea_init_port_res(port, &port->port_res[i],
+ &pr_cfg_small_rx, i);
+ if (ret)
+ goto out_clean_pr;
+ }
+
+ return 0;
+
+out_clean_pr:
+ while (--i >= 0)
+ ehea_clean_portres(port, &port->port_res[i]);
+
+out_kill_eq:
+ ehea_destroy_eq(port->qp_eq);
+ return ret;
+}
+
+static int ehea_clean_all_portres(struct ehea_port *port)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ ret |= ehea_clean_portres(port, &port->port_res[i]);
+
+ ret |= ehea_destroy_eq(port->qp_eq);
+
+ return ret;
+}
+
+static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
+{
+ if (adapter->active_ports)
+ return;
+
+ ehea_rem_mr(&adapter->mr);
+}
+
+static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
+{
+ if (adapter->active_ports)
+ return 0;
+
+ return ehea_reg_kernel_mr(adapter, &adapter->mr);
+}
+
+static int ehea_up(struct net_device *dev)
+{
+ int ret, i;
+ struct ehea_port *port = netdev_priv(dev);
+
+ if (port->state == EHEA_PORT_UP)
+ return 0;
+
+ ret = ehea_port_res_setup(port, port->num_def_qps,
+ port->num_add_tx_qps);
+ if (ret) {
+ netdev_err(dev, "port_res_failed\n");
+ goto out;
+ }
+
+ /* Set default QP for this port */
+ ret = ehea_configure_port(port);
+ if (ret) {
+ netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
+ goto out_clean_pr;
+ }
+
+ ret = ehea_reg_interrupts(dev);
+ if (ret) {
+ netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
+ goto out_clean_pr;
+ }
+
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
+ if (ret) {
+ netdev_err(dev, "activate_qp failed\n");
+ goto out_free_irqs;
+ }
+ }
+
+ for (i = 0; i < port->num_def_qps; i++) {
+ ret = ehea_fill_port_res(&port->port_res[i]);
+ if (ret) {
+ netdev_err(dev, "out_free_irqs\n");
+ goto out_free_irqs;
+ }
+ }
+
+ ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
+ if (ret) {
+ ret = -EIO;
+ goto out_free_irqs;
+ }
+
+ port->state = EHEA_PORT_UP;
+
+ ret = 0;
+ goto out;
+
+out_free_irqs:
+ ehea_free_interrupts(dev);
+
+out_clean_pr:
+ ehea_clean_all_portres(port);
+out:
+ if (ret)
+ netdev_info(dev, "Failed starting. ret=%i\n", ret);
+
+ ehea_update_bcmc_registrations();
+ ehea_update_firmware_handles();
+
+ return ret;
+}
+
+static void port_napi_disable(struct ehea_port *port)
+{
+ int i;
+
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ napi_disable(&port->port_res[i].napi);
+}
+
+static void port_napi_enable(struct ehea_port *port)
+{
+ int i;
+
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ napi_enable(&port->port_res[i].napi);
+}
+
+static int ehea_open(struct net_device *dev)
+{
+ int ret;
+ struct ehea_port *port = netdev_priv(dev);
+
+ mutex_lock(&port->port_lock);
+
+ netif_info(port, ifup, dev, "enabling port\n");
+
+ ret = ehea_up(dev);
+ if (!ret) {
+ port_napi_enable(port);
+ netif_start_queue(dev);
+ }
+
+ mutex_unlock(&port->port_lock);
+ schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+
+ return ret;
+}
+
+static int ehea_down(struct net_device *dev)
+{
+ int ret;
+ struct ehea_port *port = netdev_priv(dev);
+
+ if (port->state == EHEA_PORT_DOWN)
+ return 0;
+
+ ehea_drop_multicast_list(dev);
+ ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
+
+ ehea_free_interrupts(dev);
+
+ port->state = EHEA_PORT_DOWN;
+
+ ehea_update_bcmc_registrations();
+
+ ret = ehea_clean_all_portres(port);
+ if (ret)
+ netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
+
+ ehea_update_firmware_handles();
+
+ return ret;
+}
+
+static int ehea_stop(struct net_device *dev)
+{
+ int ret;
+ struct ehea_port *port = netdev_priv(dev);
+
+ netif_info(port, ifdown, dev, "disabling port\n");
+
+ set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
+ cancel_work_sync(&port->reset_task);
+ cancel_delayed_work_sync(&port->stats_work);
+ mutex_lock(&port->port_lock);
+ netif_stop_queue(dev);
+ port_napi_disable(port);
+ ret = ehea_down(dev);
+ mutex_unlock(&port->port_lock);
+ clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
+ return ret;
+}
+
+static void ehea_purge_sq(struct ehea_qp *orig_qp)
+{
+ struct ehea_qp qp = *orig_qp;
+ struct ehea_qp_init_attr *init_attr = &qp.init_attr;
+ struct ehea_swqe *swqe;
+ int wqe_index;
+ int i;
+
+ for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
+ swqe = ehea_get_swqe(&qp, &wqe_index);
+ swqe->tx_control |= EHEA_SWQE_PURGE;
+ }
+}
+
+static void ehea_flush_sq(struct ehea_port *port)
+{
+ int i;
+
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ struct ehea_port_res *pr = &port->port_res[i];
+ int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
+ int ret;
+
+ ret = wait_event_timeout(port->swqe_avail_wq,
+ atomic_read(&pr->swqe_avail) >= swqe_max,
+ msecs_to_jiffies(100));
+
+ if (!ret) {
+ pr_err("WARNING: sq not flushed completely\n");
+ break;
+ }
+ }
+}
+
+int ehea_stop_qps(struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct ehea_adapter *adapter = port->adapter;
+ struct hcp_modify_qp_cb0 *cb0;
+ int ret = -EIO;
+ int dret;
+ int i;
+ u64 hret;
+ u64 dummy64 = 0;
+ u16 dummy16 = 0;
+
+ cb0 = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!cb0) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
+ struct ehea_port_res *pr = &port->port_res[i];
+ struct ehea_qp *qp = pr->qp;
+
+ /* Purge send queue */
+ ehea_purge_sq(qp);
+
+ /* Disable queue pair */
+ hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
+ EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
+ cb0);
+ if (hret != H_SUCCESS) {
+ pr_err("query_ehea_qp failed (1)\n");
+ goto out;
+ }
+
+ cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
+ cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
+
+ hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
+ EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
+ 1), cb0, &dummy64,
+ &dummy64, &dummy16, &dummy16);
+ if (hret != H_SUCCESS) {
+ pr_err("modify_ehea_qp failed (1)\n");
+ goto out;
+ }
+
+ hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
+ EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
+ cb0);
+ if (hret != H_SUCCESS) {
+ pr_err("query_ehea_qp failed (2)\n");
+ goto out;
+ }
+
+ /* deregister shared memory regions */
+ dret = ehea_rem_smrs(pr);
+ if (dret) {
+ pr_err("unreg shared memory region failed\n");
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ free_page((unsigned long)cb0);
+
+ return ret;
+}
+
+void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
+{
+ struct ehea_qp qp = *orig_qp;
+ struct ehea_qp_init_attr *init_attr = &qp.init_attr;
+ struct ehea_rwqe *rwqe;
+ struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
+ struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
+ struct sk_buff *skb;
+ u32 lkey = pr->recv_mr.lkey;
+
+
+ int i;
+ int index;
+
+ for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
+ rwqe = ehea_get_next_rwqe(&qp, 2);
+ rwqe->sg_list[0].l_key = lkey;
+ index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
+ skb = skba_rq2[index];
+ if (skb)
+ rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
+ }
+
+ for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
+ rwqe = ehea_get_next_rwqe(&qp, 3);
+ rwqe->sg_list[0].l_key = lkey;
+ index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
+ skb = skba_rq3[index];
+ if (skb)
+ rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
+ }
+}
+
+int ehea_restart_qps(struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct ehea_adapter *adapter = port->adapter;
+ int ret = 0;
+ int i;
+
+ struct hcp_modify_qp_cb0 *cb0;
+ u64 hret;
+ u64 dummy64 = 0;
+ u16 dummy16 = 0;
+
+ cb0 = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!cb0) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
+ struct ehea_port_res *pr = &port->port_res[i];
+ struct ehea_qp *qp = pr->qp;
+
+ ret = ehea_gen_smrs(pr);
+ if (ret) {
+ netdev_err(dev, "creation of shared memory regions failed\n");
+ goto out;
+ }
+
+ ehea_update_rqs(qp, pr);
+
+ /* Enable queue pair */
+ hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
+ EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
+ cb0);
+ if (hret != H_SUCCESS) {
+ netdev_err(dev, "query_ehea_qp failed (1)\n");
+ goto out;
+ }
+
+ cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
+ cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
+
+ hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
+ EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
+ 1), cb0, &dummy64,
+ &dummy64, &dummy16, &dummy16);
+ if (hret != H_SUCCESS) {
+ netdev_err(dev, "modify_ehea_qp failed (1)\n");
+ goto out;
+ }
+
+ hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
+ EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
+ cb0);
+ if (hret != H_SUCCESS) {
+ netdev_err(dev, "query_ehea_qp failed (2)\n");
+ goto out;
+ }
+
+ /* refill entire queue */
+ ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
+ ehea_refill_rq2(pr, 0);
+ ehea_refill_rq3(pr, 0);
+ }
+out:
+ free_page((unsigned long)cb0);
+
+ return ret;
+}
+
+static void ehea_reset_port(struct work_struct *work)
+{
+ int ret;
+ struct ehea_port *port =
+ container_of(work, struct ehea_port, reset_task);
+ struct net_device *dev = port->netdev;
+
+ mutex_lock(&dlpar_mem_lock);
+ port->resets++;
+ mutex_lock(&port->port_lock);
+ netif_stop_queue(dev);
+
+ port_napi_disable(port);
+
+ ehea_down(dev);
+
+ ret = ehea_up(dev);
+ if (ret)
+ goto out;
+
+ ehea_set_multicast_list(dev);
+
+ netif_info(port, timer, dev, "reset successful\n");
+
+ port_napi_enable(port);
+
+ netif_wake_queue(dev);
+out:
+ mutex_unlock(&port->port_lock);
+ mutex_unlock(&dlpar_mem_lock);
+}
+
+static void ehea_rereg_mrs(void)
+{
+ int ret, i;
+ struct ehea_adapter *adapter;
+
+ pr_info("LPAR memory changed - re-initializing driver\n");
+
+ list_for_each_entry(adapter, &adapter_list, list)
+ if (adapter->active_ports) {
+ /* Shutdown all ports */
+ for (i = 0; i < EHEA_MAX_PORTS; i++) {
+ struct ehea_port *port = adapter->port[i];
+ struct net_device *dev;
+
+ if (!port)
+ continue;
+
+ dev = port->netdev;
+
+ if (dev->flags & IFF_UP) {
+ mutex_lock(&port->port_lock);
+ netif_stop_queue(dev);
+ ehea_flush_sq(port);
+ ret = ehea_stop_qps(dev);
+ if (ret) {
+ mutex_unlock(&port->port_lock);
+ goto out;
+ }
+ port_napi_disable(port);
+ mutex_unlock(&port->port_lock);
+ }
+ reset_sq_restart_flag(port);
+ }
+
+ /* Unregister old memory region */
+ ret = ehea_rem_mr(&adapter->mr);
+ if (ret) {
+ pr_err("unregister MR failed - driver inoperable!\n");
+ goto out;
+ }
+ }
+
+ clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+
+ list_for_each_entry(adapter, &adapter_list, list)
+ if (adapter->active_ports) {
+ /* Register new memory region */
+ ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
+ if (ret) {
+ pr_err("register MR failed - driver inoperable!\n");
+ goto out;
+ }
+
+ /* Restart all ports */
+ for (i = 0; i < EHEA_MAX_PORTS; i++) {
+ struct ehea_port *port = adapter->port[i];
+
+ if (port) {
+ struct net_device *dev = port->netdev;
+
+ if (dev->flags & IFF_UP) {
+ mutex_lock(&port->port_lock);
+ ret = ehea_restart_qps(dev);
+ if (!ret) {
+ check_sqs(port);
+ port_napi_enable(port);
+ netif_wake_queue(dev);
+ } else {
+ netdev_err(dev, "Unable to restart QPS\n");
+ }
+ mutex_unlock(&port->port_lock);
+ }
+ }
+ }
+ }
+ pr_info("re-initializing driver complete\n");
+out:
+ return;
+}
+
+static void ehea_tx_watchdog(struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+
+ if (netif_carrier_ok(dev) &&
+ !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
+ ehea_schedule_port_reset(port);
+}
+
+int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
+{
+ struct hcp_query_ehea *cb;
+ u64 hret;
+ int ret;
+
+ cb = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!cb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ hret = ehea_h_query_ehea(adapter->handle, cb);
+
+ if (hret != H_SUCCESS) {
+ ret = -EIO;
+ goto out_herr;
+ }
+
+ adapter->max_mc_mac = cb->max_mc_mac - 1;
+ ret = 0;
+
+out_herr:
+ free_page((unsigned long)cb);
+out:
+ return ret;
+}
+
+int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
+{
+ struct hcp_ehea_port_cb4 *cb4;
+ u64 hret;
+ int ret = 0;
+
+ *jumbo = 0;
+
+ /* (Try to) enable *jumbo frames */
+ cb4 = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!cb4) {
+ pr_err("no mem for cb4\n");
+ ret = -ENOMEM;
+ goto out;
+ } else {
+ hret = ehea_h_query_ehea_port(port->adapter->handle,
+ port->logical_port_id,
+ H_PORT_CB4,
+ H_PORT_CB4_JUMBO, cb4);
+ if (hret == H_SUCCESS) {
+ if (cb4->jumbo_frame)
+ *jumbo = 1;
+ else {
+ cb4->jumbo_frame = 1;
+ hret = ehea_h_modify_ehea_port(port->adapter->
+ handle,
+ port->
+ logical_port_id,
+ H_PORT_CB4,
+ H_PORT_CB4_JUMBO,
+ cb4);
+ if (hret == H_SUCCESS)
+ *jumbo = 1;
+ }
+ } else
+ ret = -EINVAL;
+
+ free_page((unsigned long)cb4);
+ }
+out:
+ return ret;
+}
+
+static ssize_t ehea_show_port_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
+ return sprintf(buf, "%d", port->logical_port_id);
+}
+
+static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
+ NULL);
+
+static void __devinit logical_port_release(struct device *dev)
+{
+ struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
+ of_node_put(port->ofdev.dev.of_node);
+}
+
+static struct device *ehea_register_port(struct ehea_port *port,
+ struct device_node *dn)
+{
+ int ret;
+
+ port->ofdev.dev.of_node = of_node_get(dn);
+ port->ofdev.dev.parent = &port->adapter->ofdev->dev;
+ port->ofdev.dev.bus = &ibmebus_bus_type;
+
+ dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
+ port->ofdev.dev.release = logical_port_release;
+
+ ret = of_device_register(&port->ofdev);
+ if (ret) {
+ pr_err("failed to register device. ret=%d\n", ret);
+ goto out;
+ }
+
+ ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
+ if (ret) {
+ pr_err("failed to register attributes, ret=%d\n", ret);
+ goto out_unreg_of_dev;
+ }
+
+ return &port->ofdev.dev;
+
+out_unreg_of_dev:
+ of_device_unregister(&port->ofdev);
+out:
+ return NULL;
+}
+
+static void ehea_unregister_port(struct ehea_port *port)
+{
+ device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
+ of_device_unregister(&port->ofdev);
+}
+
+static const struct net_device_ops ehea_netdev_ops = {
+ .ndo_open = ehea_open,
+ .ndo_stop = ehea_stop,
+ .ndo_start_xmit = ehea_start_xmit,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ehea_netpoll,
+#endif
+ .ndo_get_stats = ehea_get_stats,
+ .ndo_set_mac_address = ehea_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = ehea_set_multicast_list,
+ .ndo_change_mtu = ehea_change_mtu,
+ .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
+ .ndo_tx_timeout = ehea_tx_watchdog,
+};
+
+struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
+ u32 logical_port_id,
+ struct device_node *dn)
+{
+ int ret;
+ struct net_device *dev;
+ struct ehea_port *port;
+ struct device *port_dev;
+ int jumbo;
+
+ /* allocate memory for the port structures */
+ dev = alloc_etherdev(sizeof(struct ehea_port));
+
+ if (!dev) {
+ pr_err("no mem for net_device\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ port = netdev_priv(dev);
+
+ mutex_init(&port->port_lock);
+ port->state = EHEA_PORT_DOWN;
+ port->sig_comp_iv = sq_entries / 10;
+
+ port->adapter = adapter;
+ port->netdev = dev;
+ port->logical_port_id = logical_port_id;
+
+ port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
+
+ port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
+ if (!port->mc_list) {
+ ret = -ENOMEM;
+ goto out_free_ethdev;
+ }
+
+ INIT_LIST_HEAD(&port->mc_list->list);
+
+ ret = ehea_sense_port_attr(port);
+ if (ret)
+ goto out_free_mc_list;
+
+ port_dev = ehea_register_port(port, dn);
+ if (!port_dev)
+ goto out_free_mc_list;
+
+ SET_NETDEV_DEV(dev, port_dev);
+
+ /* initialize net_device structure */
+ memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
+
+ dev->netdev_ops = &ehea_netdev_ops;
+ ehea_set_ethtool_ops(dev);
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
+ | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
+ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
+ | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
+ | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
+ | NETIF_F_LLTX | NETIF_F_RXCSUM;
+ dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
+
+ if (use_lro)
+ dev->features |= NETIF_F_LRO;
+
+ INIT_WORK(&port->reset_task, ehea_reset_port);
+ INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
+
+ init_waitqueue_head(&port->swqe_avail_wq);
+ init_waitqueue_head(&port->restart_wq);
+
+ memset(&port->stats, 0, sizeof(struct net_device_stats));
+ ret = register_netdev(dev);
+ if (ret) {
+ pr_err("register_netdev failed. ret=%d\n", ret);
+ goto out_unreg_port;
+ }
+
+ port->lro_max_aggr = lro_max_aggr;
+
+ ret = ehea_get_jumboframe_status(port, &jumbo);
+ if (ret)
+ netdev_err(dev, "failed determining jumbo frame status\n");
+
+ netdev_info(dev, "Jumbo frames are %sabled\n",
+ jumbo == 1 ? "en" : "dis");
+
+ adapter->active_ports++;
+
+ return port;
+
+out_unreg_port:
+ ehea_unregister_port(port);
+
+out_free_mc_list:
+ kfree(port->mc_list);
+
+out_free_ethdev:
+ free_netdev(dev);
+
+out_err:
+ pr_err("setting up logical port with id=%d failed, ret=%d\n",
+ logical_port_id, ret);
+ return NULL;
+}
+
+static void ehea_shutdown_single_port(struct ehea_port *port)
+{
+ struct ehea_adapter *adapter = port->adapter;
+
+ cancel_work_sync(&port->reset_task);
+ cancel_delayed_work_sync(&port->stats_work);
+ unregister_netdev(port->netdev);
+ ehea_unregister_port(port);
+ kfree(port->mc_list);
+ free_netdev(port->netdev);
+ adapter->active_ports--;
+}
+
+static int ehea_setup_ports(struct ehea_adapter *adapter)
+{
+ struct device_node *lhea_dn;
+ struct device_node *eth_dn = NULL;
+
+ const u32 *dn_log_port_id;
+ int i = 0;
+
+ lhea_dn = adapter->ofdev->dev.of_node;
+ while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
+
+ dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
+ NULL);
+ if (!dn_log_port_id) {
+ pr_err("bad device node: eth_dn name=%s\n",
+ eth_dn->full_name);
+ continue;
+ }
+
+ if (ehea_add_adapter_mr(adapter)) {
+ pr_err("creating MR failed\n");
+ of_node_put(eth_dn);
+ return -EIO;
+ }
+
+ adapter->port[i] = ehea_setup_single_port(adapter,
+ *dn_log_port_id,
+ eth_dn);
+ if (adapter->port[i])
+ netdev_info(adapter->port[i]->netdev,
+ "logical port id #%d\n", *dn_log_port_id);
+ else
+ ehea_remove_adapter_mr(adapter);
+
+ i++;
+ }
+ return 0;
+}
+
+static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
+ u32 logical_port_id)
+{
+ struct device_node *lhea_dn;
+ struct device_node *eth_dn = NULL;
+ const u32 *dn_log_port_id;
+
+ lhea_dn = adapter->ofdev->dev.of_node;
+ while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
+
+ dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
+ NULL);
+ if (dn_log_port_id)
+ if (*dn_log_port_id == logical_port_id)
+ return eth_dn;
+ }
+
+ return NULL;
+}
+
+static ssize_t ehea_probe_port(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ehea_adapter *adapter = dev_get_drvdata(dev);
+ struct ehea_port *port;
+ struct device_node *eth_dn = NULL;
+ int i;
+
+ u32 logical_port_id;
+
+ sscanf(buf, "%d", &logical_port_id);
+
+ port = ehea_get_port(adapter, logical_port_id);
+
+ if (port) {
+ netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
+ logical_port_id);
+ return -EINVAL;
+ }
+
+ eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
+
+ if (!eth_dn) {
+ pr_info("no logical port with id %d found\n", logical_port_id);
+ return -EINVAL;
+ }
+
+ if (ehea_add_adapter_mr(adapter)) {
+ pr_err("creating MR failed\n");
+ return -EIO;
+ }
+
+ port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
+
+ of_node_put(eth_dn);
+
+ if (port) {
+ for (i = 0; i < EHEA_MAX_PORTS; i++)
+ if (!adapter->port[i]) {
+ adapter->port[i] = port;
+ break;
+ }
+
+ netdev_info(port->netdev, "added: (logical port id=%d)\n",
+ logical_port_id);
+ } else {
+ ehea_remove_adapter_mr(adapter);
+ return -EIO;
+ }
+
+ return (ssize_t) count;
+}
+
+static ssize_t ehea_remove_port(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ehea_adapter *adapter = dev_get_drvdata(dev);
+ struct ehea_port *port;
+ int i;
+ u32 logical_port_id;
+
+ sscanf(buf, "%d", &logical_port_id);
+
+ port = ehea_get_port(adapter, logical_port_id);
+
+ if (port) {
+ netdev_info(port->netdev, "removed: (logical port id=%d)\n",
+ logical_port_id);
+
+ ehea_shutdown_single_port(port);
+
+ for (i = 0; i < EHEA_MAX_PORTS; i++)
+ if (adapter->port[i] == port) {
+ adapter->port[i] = NULL;
+ break;
+ }
+ } else {
+ pr_err("removing port with logical port id=%d failed. port not configured.\n",
+ logical_port_id);
+ return -EINVAL;
+ }
+
+ ehea_remove_adapter_mr(adapter);
+
+ return (ssize_t) count;
+}
+
+static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
+static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
+
+int ehea_create_device_sysfs(struct platform_device *dev)
+{
+ int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
+ if (ret)
+ goto out;
+
+ ret = device_create_file(&dev->dev, &dev_attr_remove_port);
+out:
+ return ret;
+}
+
+void ehea_remove_device_sysfs(struct platform_device *dev)
+{
+ device_remove_file(&dev->dev, &dev_attr_probe_port);
+ device_remove_file(&dev->dev, &dev_attr_remove_port);
+}
+
+static int __devinit ehea_probe_adapter(struct platform_device *dev,
+ const struct of_device_id *id)
+{
+ struct ehea_adapter *adapter;
+ const u64 *adapter_handle;
+ int ret;
+
+ if (!dev || !dev->dev.of_node) {
+ pr_err("Invalid ibmebus device probed\n");
+ return -EINVAL;
+ }
+
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter) {
+ ret = -ENOMEM;
+ dev_err(&dev->dev, "no mem for ehea_adapter\n");
+ goto out;
+ }
+
+ list_add(&adapter->list, &adapter_list);
+
+ adapter->ofdev = dev;
+
+ adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
+ NULL);
+ if (adapter_handle)
+ adapter->handle = *adapter_handle;
+
+ if (!adapter->handle) {
+ dev_err(&dev->dev, "failed getting handle for adapter"
+ " '%s'\n", dev->dev.of_node->full_name);
+ ret = -ENODEV;
+ goto out_free_ad;
+ }
+
+ adapter->pd = EHEA_PD_ID;
+
+ dev_set_drvdata(&dev->dev, adapter);
+
+
+ /* initialize adapter and ports */
+ /* get adapter properties */
+ ret = ehea_sense_adapter_attr(adapter);
+ if (ret) {
+ dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
+ goto out_free_ad;
+ }
+
+ adapter->neq = ehea_create_eq(adapter,
+ EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
+ if (!adapter->neq) {
+ ret = -EIO;
+ dev_err(&dev->dev, "NEQ creation failed\n");
+ goto out_free_ad;
+ }
+
+ tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
+ (unsigned long)adapter);
+
+ ret = ibmebus_request_irq(adapter->neq->attr.ist1,
+ ehea_interrupt_neq, IRQF_DISABLED,
+ "ehea_neq", adapter);
+ if (ret) {
+ dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
+ goto out_kill_eq;
+ }
+
+ ret = ehea_create_device_sysfs(dev);
+ if (ret)
+ goto out_free_irq;
+
+ ret = ehea_setup_ports(adapter);
+ if (ret) {
+ dev_err(&dev->dev, "setup_ports failed\n");
+ goto out_rem_dev_sysfs;
+ }
+
+ ret = 0;
+ goto out;
+
+out_rem_dev_sysfs:
+ ehea_remove_device_sysfs(dev);
+
+out_free_irq:
+ ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
+
+out_kill_eq:
+ ehea_destroy_eq(adapter->neq);
+
+out_free_ad:
+ list_del(&adapter->list);
+ kfree(adapter);
+
+out:
+ ehea_update_firmware_handles();
+
+ return ret;
+}
+
+static int __devexit ehea_remove(struct platform_device *dev)
+{
+ struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
+ int i;
+
+ for (i = 0; i < EHEA_MAX_PORTS; i++)
+ if (adapter->port[i]) {
+ ehea_shutdown_single_port(adapter->port[i]);
+ adapter->port[i] = NULL;
+ }
+
+ ehea_remove_device_sysfs(dev);
+
+ ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
+ tasklet_kill(&adapter->neq_tasklet);
+
+ ehea_destroy_eq(adapter->neq);
+ ehea_remove_adapter_mr(adapter);
+ list_del(&adapter->list);
+ kfree(adapter);
+
+ ehea_update_firmware_handles();
+
+ return 0;
+}
+
+void ehea_crash_handler(void)
+{
+ int i;
+
+ if (ehea_fw_handles.arr)
+ for (i = 0; i < ehea_fw_handles.num_entries; i++)
+ ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
+ ehea_fw_handles.arr[i].fwh,
+ FORCE_FREE);
+
+ if (ehea_bcmc_regs.arr)
+ for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
+ ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
+ ehea_bcmc_regs.arr[i].port_id,
+ ehea_bcmc_regs.arr[i].reg_type,
+ ehea_bcmc_regs.arr[i].macaddr,
+ 0, H_DEREG_BCMC);
+}
+
+static int ehea_mem_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ int ret = NOTIFY_BAD;
+ struct memory_notify *arg = data;
+
+ mutex_lock(&dlpar_mem_lock);
+
+ switch (action) {
+ case MEM_CANCEL_OFFLINE:
+ pr_info("memory offlining canceled");
+ /* Readd canceled memory block */
+ case MEM_ONLINE:
+ pr_info("memory is going online");
+ set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+ if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
+ goto out_unlock;
+ ehea_rereg_mrs();
+ break;
+ case MEM_GOING_OFFLINE:
+ pr_info("memory is going offline");
+ set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+ if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
+ goto out_unlock;
+ ehea_rereg_mrs();
+ break;
+ default:
+ break;
+ }
+
+ ehea_update_firmware_handles();
+ ret = NOTIFY_OK;
+
+out_unlock:
+ mutex_unlock(&dlpar_mem_lock);
+ return ret;
+}
+
+static struct notifier_block ehea_mem_nb = {
+ .notifier_call = ehea_mem_notifier,
+};
+
+static int ehea_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *unused)
+{
+ if (action == SYS_RESTART) {
+ pr_info("Reboot: freeing all eHEA resources\n");
+ ibmebus_unregister_driver(&ehea_driver);
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ehea_reboot_nb = {
+ .notifier_call = ehea_reboot_notifier,
+};
+
+static int check_module_parm(void)
+{
+ int ret = 0;
+
+ if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
+ (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
+ pr_info("Bad parameter: rq1_entries\n");
+ ret = -EINVAL;
+ }
+ if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
+ (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
+ pr_info("Bad parameter: rq2_entries\n");
+ ret = -EINVAL;
+ }
+ if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
+ (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
+ pr_info("Bad parameter: rq3_entries\n");
+ ret = -EINVAL;
+ }
+ if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
+ (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
+ pr_info("Bad parameter: sq_entries\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static ssize_t ehea_show_capabilities(struct device_driver *drv,
+ char *buf)
+{
+ return sprintf(buf, "%d", EHEA_CAPABILITIES);
+}
+
+static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
+ ehea_show_capabilities, NULL);
+
+int __init ehea_module_init(void)
+{
+ int ret;
+
+ pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
+
+ memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
+ memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
+
+ mutex_init(&ehea_fw_handles.lock);
+ spin_lock_init(&ehea_bcmc_regs.lock);
+
+ ret = check_module_parm();
+ if (ret)
+ goto out;
+
+ ret = ehea_create_busmap();
+ if (ret)
+ goto out;
+
+ ret = register_reboot_notifier(&ehea_reboot_nb);
+ if (ret)
+ pr_info("failed registering reboot notifier\n");
+
+ ret = register_memory_notifier(&ehea_mem_nb);
+ if (ret)
+ pr_info("failed registering memory remove notifier\n");
+
+ ret = crash_shutdown_register(ehea_crash_handler);
+ if (ret)
+ pr_info("failed registering crash handler\n");
+
+ ret = ibmebus_register_driver(&ehea_driver);
+ if (ret) {
+ pr_err("failed registering eHEA device driver on ebus\n");
+ goto out2;
+ }
+
+ ret = driver_create_file(&ehea_driver.driver,
+ &driver_attr_capabilities);
+ if (ret) {
+ pr_err("failed to register capabilities attribute, ret=%d\n",
+ ret);
+ goto out3;
+ }
+
+ return ret;
+
+out3:
+ ibmebus_unregister_driver(&ehea_driver);
+out2:
+ unregister_memory_notifier(&ehea_mem_nb);
+ unregister_reboot_notifier(&ehea_reboot_nb);
+ crash_shutdown_unregister(ehea_crash_handler);
+out:
+ return ret;
+}
+
+static void __exit ehea_module_exit(void)
+{
+ int ret;
+
+ driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
+ ibmebus_unregister_driver(&ehea_driver);
+ unregister_reboot_notifier(&ehea_reboot_nb);
+ ret = crash_shutdown_unregister(ehea_crash_handler);
+ if (ret)
+ pr_info("failed unregistering crash handler\n");
+ unregister_memory_notifier(&ehea_mem_nb);
+ kfree(ehea_fw_handles.arr);
+ kfree(ehea_bcmc_regs.arr);
+ ehea_destroy_busmap();
+}
+
+module_init(ehea_module_init);
+module_exit(ehea_module_exit);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.c b/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
new file mode 100644
index 000000000000..0506967b9044
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
@@ -0,0 +1,626 @@
+/*
+ * linux/drivers/net/ehea/ehea_phyp.c
+ *
+ * eHEA ethernet device driver for IBM eServer System p
+ *
+ * (C) Copyright IBM Corp. 2006
+ *
+ * Authors:
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "ehea_phyp.h"
+
+
+static inline u16 get_order_of_qentries(u16 queue_entries)
+{
+ u8 ld = 1; /* logarithmus dualis */
+ while (((1U << ld) - 1) < queue_entries)
+ ld++;
+ return ld - 1;
+}
+
+/* Defines for H_CALL H_ALLOC_RESOURCE */
+#define H_ALL_RES_TYPE_QP 1
+#define H_ALL_RES_TYPE_CQ 2
+#define H_ALL_RES_TYPE_EQ 3
+#define H_ALL_RES_TYPE_MR 5
+#define H_ALL_RES_TYPE_MW 6
+
+static long ehea_plpar_hcall_norets(unsigned long opcode,
+ unsigned long arg1,
+ unsigned long arg2,
+ unsigned long arg3,
+ unsigned long arg4,
+ unsigned long arg5,
+ unsigned long arg6,
+ unsigned long arg7)
+{
+ long ret;
+ int i, sleep_msecs;
+
+ for (i = 0; i < 5; i++) {
+ ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
+ arg5, arg6, arg7);
+
+ if (H_IS_LONG_BUSY(ret)) {
+ sleep_msecs = get_longbusy_msecs(ret);
+ msleep_interruptible(sleep_msecs);
+ continue;
+ }
+
+ if (ret < H_SUCCESS)
+ pr_err("opcode=%lx ret=%lx"
+ " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
+ " arg5=%lx arg6=%lx arg7=%lx\n",
+ opcode, ret,
+ arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+
+ return ret;
+ }
+
+ return H_BUSY;
+}
+
+static long ehea_plpar_hcall9(unsigned long opcode,
+ unsigned long *outs, /* array of 9 outputs */
+ unsigned long arg1,
+ unsigned long arg2,
+ unsigned long arg3,
+ unsigned long arg4,
+ unsigned long arg5,
+ unsigned long arg6,
+ unsigned long arg7,
+ unsigned long arg8,
+ unsigned long arg9)
+{
+ long ret;
+ int i, sleep_msecs;
+ u8 cb_cat;
+
+ for (i = 0; i < 5; i++) {
+ ret = plpar_hcall9(opcode, outs,
+ arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9);
+
+ if (H_IS_LONG_BUSY(ret)) {
+ sleep_msecs = get_longbusy_msecs(ret);
+ msleep_interruptible(sleep_msecs);
+ continue;
+ }
+
+ cb_cat = EHEA_BMASK_GET(H_MEHEAPORT_CAT, arg2);
+
+ if ((ret < H_SUCCESS) && !(((ret == H_AUTHORITY)
+ && (opcode == H_MODIFY_HEA_PORT))
+ && (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO)
+ || (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7)
+ && (arg3 == H_PORT_CB7_DUCQPN)))))
+ pr_err("opcode=%lx ret=%lx"
+ " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
+ " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
+ " arg9=%lx"
+ " out1=%lx out2=%lx out3=%lx out4=%lx"
+ " out5=%lx out6=%lx out7=%lx out8=%lx"
+ " out9=%lx\n",
+ opcode, ret,
+ arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9,
+ outs[0], outs[1], outs[2], outs[3], outs[4],
+ outs[5], outs[6], outs[7], outs[8]);
+ return ret;
+ }
+
+ return H_BUSY;
+}
+
+u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
+ const u64 qp_handle, const u64 sel_mask, void *cb_addr)
+{
+ return ehea_plpar_hcall_norets(H_QUERY_HEA_QP,
+ adapter_handle, /* R4 */
+ qp_category, /* R5 */
+ qp_handle, /* R6 */
+ sel_mask, /* R7 */
+ virt_to_abs(cb_addr), /* R8 */
+ 0, 0);
+}
+
+/* input param R5 */
+#define H_ALL_RES_QP_EQPO EHEA_BMASK_IBM(9, 11)
+#define H_ALL_RES_QP_QPP EHEA_BMASK_IBM(12, 12)
+#define H_ALL_RES_QP_RQR EHEA_BMASK_IBM(13, 15)
+#define H_ALL_RES_QP_EQEG EHEA_BMASK_IBM(16, 16)
+#define H_ALL_RES_QP_LL_QP EHEA_BMASK_IBM(17, 17)
+#define H_ALL_RES_QP_DMA128 EHEA_BMASK_IBM(19, 19)
+#define H_ALL_RES_QP_HSM EHEA_BMASK_IBM(20, 21)
+#define H_ALL_RES_QP_SIGT EHEA_BMASK_IBM(22, 23)
+#define H_ALL_RES_QP_TENURE EHEA_BMASK_IBM(48, 55)
+#define H_ALL_RES_QP_RES_TYP EHEA_BMASK_IBM(56, 63)
+
+/* input param R9 */
+#define H_ALL_RES_QP_TOKEN EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_PD EHEA_BMASK_IBM(32, 63)
+
+/* input param R10 */
+#define H_ALL_RES_QP_MAX_SWQE EHEA_BMASK_IBM(4, 7)
+#define H_ALL_RES_QP_MAX_R1WQE EHEA_BMASK_IBM(12, 15)
+#define H_ALL_RES_QP_MAX_R2WQE EHEA_BMASK_IBM(20, 23)
+#define H_ALL_RES_QP_MAX_R3WQE EHEA_BMASK_IBM(28, 31)
+/* Max Send Scatter Gather Elements */
+#define H_ALL_RES_QP_MAX_SSGE EHEA_BMASK_IBM(37, 39)
+#define H_ALL_RES_QP_MAX_R1SGE EHEA_BMASK_IBM(45, 47)
+/* Max Receive SG Elements RQ1 */
+#define H_ALL_RES_QP_MAX_R2SGE EHEA_BMASK_IBM(53, 55)
+#define H_ALL_RES_QP_MAX_R3SGE EHEA_BMASK_IBM(61, 63)
+
+/* input param R11 */
+#define H_ALL_RES_QP_SWQE_IDL EHEA_BMASK_IBM(0, 7)
+/* max swqe immediate data length */
+#define H_ALL_RES_QP_PORT_NUM EHEA_BMASK_IBM(48, 63)
+
+/* input param R12 */
+#define H_ALL_RES_QP_TH_RQ2 EHEA_BMASK_IBM(0, 15)
+/* Threshold RQ2 */
+#define H_ALL_RES_QP_TH_RQ3 EHEA_BMASK_IBM(16, 31)
+/* Threshold RQ3 */
+
+/* output param R6 */
+#define H_ALL_RES_QP_ACT_SWQE EHEA_BMASK_IBM(0, 15)
+#define H_ALL_RES_QP_ACT_R1WQE EHEA_BMASK_IBM(16, 31)
+#define H_ALL_RES_QP_ACT_R2WQE EHEA_BMASK_IBM(32, 47)
+#define H_ALL_RES_QP_ACT_R3WQE EHEA_BMASK_IBM(48, 63)
+
+/* output param, R7 */
+#define H_ALL_RES_QP_ACT_SSGE EHEA_BMASK_IBM(0, 7)
+#define H_ALL_RES_QP_ACT_R1SGE EHEA_BMASK_IBM(8, 15)
+#define H_ALL_RES_QP_ACT_R2SGE EHEA_BMASK_IBM(16, 23)
+#define H_ALL_RES_QP_ACT_R3SGE EHEA_BMASK_IBM(24, 31)
+#define H_ALL_RES_QP_ACT_SWQE_IDL EHEA_BMASK_IBM(32, 39)
+
+/* output param R8,R9 */
+#define H_ALL_RES_QP_SIZE_SQ EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_SIZE_RQ1 EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_QP_SIZE_RQ2 EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_SIZE_RQ3 EHEA_BMASK_IBM(32, 63)
+
+/* output param R11,R12 */
+#define H_ALL_RES_QP_LIOBN_SQ EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_LIOBN_RQ1 EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_QP_LIOBN_RQ2 EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_LIOBN_RQ3 EHEA_BMASK_IBM(32, 63)
+
+u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
+ struct ehea_qp_init_attr *init_attr, const u32 pd,
+ u64 *qp_handle, struct h_epas *h_epas)
+{
+ u64 hret;
+ unsigned long outs[PLPAR_HCALL9_BUFSIZE];
+
+ u64 allocate_controls =
+ EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_QPP, 0)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_RQR, 6) /* rq1 & rq2 & rq3 */
+ | EHEA_BMASK_SET(H_ALL_RES_QP_EQEG, 0) /* EQE gen. disabled */
+ | EHEA_BMASK_SET(H_ALL_RES_QP_LL_QP, init_attr->low_lat_rq1)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_DMA128, 0)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_HSM, 0)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_SIGT, init_attr->signalingtype)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_RES_TYP, H_ALL_RES_TYPE_QP);
+
+ u64 r9_reg = EHEA_BMASK_SET(H_ALL_RES_QP_PD, pd)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_TOKEN, init_attr->qp_token);
+
+ u64 max_r10_reg =
+ EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SWQE,
+ get_order_of_qentries(init_attr->max_nr_send_wqes))
+ | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1WQE,
+ get_order_of_qentries(init_attr->max_nr_rwqes_rq1))
+ | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2WQE,
+ get_order_of_qentries(init_attr->max_nr_rwqes_rq2))
+ | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3WQE,
+ get_order_of_qentries(init_attr->max_nr_rwqes_rq3))
+ | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SSGE, init_attr->wqe_size_enc_sq)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1SGE,
+ init_attr->wqe_size_enc_rq1)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2SGE,
+ init_attr->wqe_size_enc_rq2)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3SGE,
+ init_attr->wqe_size_enc_rq3);
+
+ u64 r11_in =
+ EHEA_BMASK_SET(H_ALL_RES_QP_SWQE_IDL, init_attr->swqe_imm_data_len)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_PORT_NUM, init_attr->port_nr);
+ u64 threshold =
+ EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ2, init_attr->rq2_threshold)
+ | EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ3, init_attr->rq3_threshold);
+
+ hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
+ outs,
+ adapter_handle, /* R4 */
+ allocate_controls, /* R5 */
+ init_attr->send_cq_handle, /* R6 */
+ init_attr->recv_cq_handle, /* R7 */
+ init_attr->aff_eq_handle, /* R8 */
+ r9_reg, /* R9 */
+ max_r10_reg, /* R10 */
+ r11_in, /* R11 */
+ threshold); /* R12 */
+
+ *qp_handle = outs[0];
+ init_attr->qp_nr = (u32)outs[1];
+
+ init_attr->act_nr_send_wqes =
+ (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_SWQE, outs[2]);
+ init_attr->act_nr_rwqes_rq1 =
+ (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R1WQE, outs[2]);
+ init_attr->act_nr_rwqes_rq2 =
+ (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R2WQE, outs[2]);
+ init_attr->act_nr_rwqes_rq3 =
+ (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R3WQE, outs[2]);
+
+ init_attr->act_wqe_size_enc_sq = init_attr->wqe_size_enc_sq;
+ init_attr->act_wqe_size_enc_rq1 = init_attr->wqe_size_enc_rq1;
+ init_attr->act_wqe_size_enc_rq2 = init_attr->wqe_size_enc_rq2;
+ init_attr->act_wqe_size_enc_rq3 = init_attr->wqe_size_enc_rq3;
+
+ init_attr->nr_sq_pages =
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_SQ, outs[4]);
+ init_attr->nr_rq1_pages =
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ1, outs[4]);
+ init_attr->nr_rq2_pages =
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ2, outs[5]);
+ init_attr->nr_rq3_pages =
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ3, outs[5]);
+
+ init_attr->liobn_sq =
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_SQ, outs[7]);
+ init_attr->liobn_rq1 =
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ1, outs[7]);
+ init_attr->liobn_rq2 =
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ2, outs[8]);
+ init_attr->liobn_rq3 =
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ3, outs[8]);
+
+ if (!hret)
+ hcp_epas_ctor(h_epas, outs[6], outs[6]);
+
+ return hret;
+}
+
+u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
+ struct ehea_cq_attr *cq_attr,
+ u64 *cq_handle, struct h_epas *epas)
+{
+ u64 hret;
+ unsigned long outs[PLPAR_HCALL9_BUFSIZE];
+
+ hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
+ outs,
+ adapter_handle, /* R4 */
+ H_ALL_RES_TYPE_CQ, /* R5 */
+ cq_attr->eq_handle, /* R6 */
+ cq_attr->cq_token, /* R7 */
+ cq_attr->max_nr_of_cqes, /* R8 */
+ 0, 0, 0, 0); /* R9-R12 */
+
+ *cq_handle = outs[0];
+ cq_attr->act_nr_of_cqes = outs[3];
+ cq_attr->nr_pages = outs[4];
+
+ if (!hret)
+ hcp_epas_ctor(epas, outs[5], outs[6]);
+
+ return hret;
+}
+
+/* Defines for H_CALL H_ALLOC_RESOURCE */
+#define H_ALL_RES_TYPE_QP 1
+#define H_ALL_RES_TYPE_CQ 2
+#define H_ALL_RES_TYPE_EQ 3
+#define H_ALL_RES_TYPE_MR 5
+#define H_ALL_RES_TYPE_MW 6
+
+/* input param R5 */
+#define H_ALL_RES_EQ_NEQ EHEA_BMASK_IBM(0, 0)
+#define H_ALL_RES_EQ_NON_NEQ_ISN EHEA_BMASK_IBM(6, 7)
+#define H_ALL_RES_EQ_INH_EQE_GEN EHEA_BMASK_IBM(16, 16)
+#define H_ALL_RES_EQ_RES_TYPE EHEA_BMASK_IBM(56, 63)
+/* input param R6 */
+#define H_ALL_RES_EQ_MAX_EQE EHEA_BMASK_IBM(32, 63)
+
+/* output param R6 */
+#define H_ALL_RES_EQ_LIOBN EHEA_BMASK_IBM(32, 63)
+
+/* output param R7 */
+#define H_ALL_RES_EQ_ACT_EQE EHEA_BMASK_IBM(32, 63)
+
+/* output param R8 */
+#define H_ALL_RES_EQ_ACT_PS EHEA_BMASK_IBM(32, 63)
+
+/* output param R9 */
+#define H_ALL_RES_EQ_ACT_EQ_IST_C EHEA_BMASK_IBM(30, 31)
+#define H_ALL_RES_EQ_ACT_EQ_IST_1 EHEA_BMASK_IBM(40, 63)
+
+/* output param R10 */
+#define H_ALL_RES_EQ_ACT_EQ_IST_2 EHEA_BMASK_IBM(40, 63)
+
+/* output param R11 */
+#define H_ALL_RES_EQ_ACT_EQ_IST_3 EHEA_BMASK_IBM(40, 63)
+
+/* output param R12 */
+#define H_ALL_RES_EQ_ACT_EQ_IST_4 EHEA_BMASK_IBM(40, 63)
+
+u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
+ struct ehea_eq_attr *eq_attr, u64 *eq_handle)
+{
+ u64 hret, allocate_controls;
+ unsigned long outs[PLPAR_HCALL9_BUFSIZE];
+
+ /* resource type */
+ allocate_controls =
+ EHEA_BMASK_SET(H_ALL_RES_EQ_RES_TYPE, H_ALL_RES_TYPE_EQ)
+ | EHEA_BMASK_SET(H_ALL_RES_EQ_NEQ, eq_attr->type ? 1 : 0)
+ | EHEA_BMASK_SET(H_ALL_RES_EQ_INH_EQE_GEN, !eq_attr->eqe_gen)
+ | EHEA_BMASK_SET(H_ALL_RES_EQ_NON_NEQ_ISN, 1);
+
+ hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
+ outs,
+ adapter_handle, /* R4 */
+ allocate_controls, /* R5 */
+ eq_attr->max_nr_of_eqes, /* R6 */
+ 0, 0, 0, 0, 0, 0); /* R7-R10 */
+
+ *eq_handle = outs[0];
+ eq_attr->act_nr_of_eqes = outs[3];
+ eq_attr->nr_pages = outs[4];
+ eq_attr->ist1 = outs[5];
+ eq_attr->ist2 = outs[6];
+ eq_attr->ist3 = outs[7];
+ eq_attr->ist4 = outs[8];
+
+ return hret;
+}
+
+u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat,
+ const u64 qp_handle, const u64 sel_mask,
+ void *cb_addr, u64 *inv_attr_id, u64 *proc_mask,
+ u16 *out_swr, u16 *out_rwr)
+{
+ u64 hret;
+ unsigned long outs[PLPAR_HCALL9_BUFSIZE];
+
+ hret = ehea_plpar_hcall9(H_MODIFY_HEA_QP,
+ outs,
+ adapter_handle, /* R4 */
+ (u64) cat, /* R5 */
+ qp_handle, /* R6 */
+ sel_mask, /* R7 */
+ virt_to_abs(cb_addr), /* R8 */
+ 0, 0, 0, 0); /* R9-R12 */
+
+ *inv_attr_id = outs[0];
+ *out_swr = outs[3];
+ *out_rwr = outs[4];
+ *proc_mask = outs[5];
+
+ return hret;
+}
+
+u64 ehea_h_register_rpage(const u64 adapter_handle, const u8 pagesize,
+ const u8 queue_type, const u64 resource_handle,
+ const u64 log_pageaddr, u64 count)
+{
+ u64 reg_control;
+
+ reg_control = EHEA_BMASK_SET(H_REG_RPAGE_PAGE_SIZE, pagesize)
+ | EHEA_BMASK_SET(H_REG_RPAGE_QT, queue_type);
+
+ return ehea_plpar_hcall_norets(H_REGISTER_HEA_RPAGES,
+ adapter_handle, /* R4 */
+ reg_control, /* R5 */
+ resource_handle, /* R6 */
+ log_pageaddr, /* R7 */
+ count, /* R8 */
+ 0, 0); /* R9-R10 */
+}
+
+u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
+ const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
+ struct ehea_mr *mr)
+{
+ u64 hret;
+ unsigned long outs[PLPAR_HCALL9_BUFSIZE];
+
+ hret = ehea_plpar_hcall9(H_REGISTER_SMR,
+ outs,
+ adapter_handle , /* R4 */
+ orig_mr_handle, /* R5 */
+ vaddr_in, /* R6 */
+ (((u64)access_ctrl) << 32ULL), /* R7 */
+ pd, /* R8 */
+ 0, 0, 0, 0); /* R9-R12 */
+
+ mr->handle = outs[0];
+ mr->lkey = (u32)outs[2];
+
+ return hret;
+}
+
+u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle)
+{
+ unsigned long outs[PLPAR_HCALL9_BUFSIZE];
+
+ return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA,
+ outs,
+ adapter_handle, /* R4 */
+ H_DISABLE_GET_EHEA_WQE_P, /* R5 */
+ qp_handle, /* R6 */
+ 0, 0, 0, 0, 0, 0); /* R7-R12 */
+}
+
+u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
+ u64 force_bit)
+{
+ return ehea_plpar_hcall_norets(H_FREE_RESOURCE,
+ adapter_handle, /* R4 */
+ res_handle, /* R5 */
+ force_bit,
+ 0, 0, 0, 0); /* R7-R10 */
+}
+
+u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
+ const u64 length, const u32 access_ctrl,
+ const u32 pd, u64 *mr_handle, u32 *lkey)
+{
+ u64 hret;
+ unsigned long outs[PLPAR_HCALL9_BUFSIZE];
+
+ hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
+ outs,
+ adapter_handle, /* R4 */
+ 5, /* R5 */
+ vaddr, /* R6 */
+ length, /* R7 */
+ (((u64) access_ctrl) << 32ULL), /* R8 */
+ pd, /* R9 */
+ 0, 0, 0); /* R10-R12 */
+
+ *mr_handle = outs[0];
+ *lkey = (u32)outs[2];
+ return hret;
+}
+
+u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
+ const u8 pagesize, const u8 queue_type,
+ const u64 log_pageaddr, const u64 count)
+{
+ if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) {
+ pr_err("not on pageboundary\n");
+ return H_PARAMETER;
+ }
+
+ return ehea_h_register_rpage(adapter_handle, pagesize,
+ queue_type, mr_handle,
+ log_pageaddr, count);
+}
+
+u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr)
+{
+ u64 hret, cb_logaddr;
+
+ cb_logaddr = virt_to_abs(cb_addr);
+
+ hret = ehea_plpar_hcall_norets(H_QUERY_HEA,
+ adapter_handle, /* R4 */
+ cb_logaddr, /* R5 */
+ 0, 0, 0, 0, 0); /* R6-R10 */
+#ifdef DEBUG
+ ehea_dump(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
+#endif
+ return hret;
+}
+
+u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
+ const u8 cb_cat, const u64 select_mask,
+ void *cb_addr)
+{
+ u64 port_info;
+ u64 cb_logaddr = virt_to_abs(cb_addr);
+ u64 arr_index = 0;
+
+ port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
+ | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
+
+ return ehea_plpar_hcall_norets(H_QUERY_HEA_PORT,
+ adapter_handle, /* R4 */
+ port_info, /* R5 */
+ select_mask, /* R6 */
+ arr_index, /* R7 */
+ cb_logaddr, /* R8 */
+ 0, 0); /* R9-R10 */
+}
+
+u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
+ const u8 cb_cat, const u64 select_mask,
+ void *cb_addr)
+{
+ unsigned long outs[PLPAR_HCALL9_BUFSIZE];
+ u64 port_info;
+ u64 arr_index = 0;
+ u64 cb_logaddr = virt_to_abs(cb_addr);
+
+ port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
+ | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
+#ifdef DEBUG
+ ehea_dump(cb_addr, sizeof(struct hcp_ehea_port_cb0), "Before HCALL");
+#endif
+ return ehea_plpar_hcall9(H_MODIFY_HEA_PORT,
+ outs,
+ adapter_handle, /* R4 */
+ port_info, /* R5 */
+ select_mask, /* R6 */
+ arr_index, /* R7 */
+ cb_logaddr, /* R8 */
+ 0, 0, 0, 0); /* R9-R12 */
+}
+
+u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
+ const u8 reg_type, const u64 mc_mac_addr,
+ const u16 vlan_id, const u32 hcall_id)
+{
+ u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id;
+ u64 mac_addr = mc_mac_addr >> 16;
+
+ r5_port_num = EHEA_BMASK_SET(H_REGBCMC_PN, port_num);
+ r6_reg_type = EHEA_BMASK_SET(H_REGBCMC_REGTYPE, reg_type);
+ r7_mc_mac_addr = EHEA_BMASK_SET(H_REGBCMC_MACADDR, mac_addr);
+ r8_vlan_id = EHEA_BMASK_SET(H_REGBCMC_VLANID, vlan_id);
+
+ return ehea_plpar_hcall_norets(hcall_id,
+ adapter_handle, /* R4 */
+ r5_port_num, /* R5 */
+ r6_reg_type, /* R6 */
+ r7_mc_mac_addr, /* R7 */
+ r8_vlan_id, /* R8 */
+ 0, 0); /* R9-R12 */
+}
+
+u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
+ const u64 event_mask)
+{
+ return ehea_plpar_hcall_norets(H_RESET_EVENTS,
+ adapter_handle, /* R4 */
+ neq_handle, /* R5 */
+ event_mask, /* R6 */
+ 0, 0, 0, 0); /* R7-R12 */
+}
+
+u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
+ void *rblock)
+{
+ return ehea_plpar_hcall_norets(H_ERROR_DATA,
+ adapter_handle, /* R4 */
+ ressource_handle, /* R5 */
+ virt_to_abs(rblock), /* R6 */
+ 0, 0, 0, 0); /* R7-R12 */
+}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
new file mode 100644
index 000000000000..2f8174c248bc
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
@@ -0,0 +1,467 @@
+/*
+ * linux/drivers/net/ehea/ehea_phyp.h
+ *
+ * eHEA ethernet device driver for IBM eServer System p
+ *
+ * (C) Copyright IBM Corp. 2006
+ *
+ * Authors:
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __EHEA_PHYP_H__
+#define __EHEA_PHYP_H__
+
+#include <linux/delay.h>
+#include <asm/hvcall.h>
+#include "ehea.h"
+#include "ehea_hw.h"
+
+/* Some abbreviations used here:
+ *
+ * hcp_* - structures, variables and functions releated to Hypervisor Calls
+ */
+
+static inline u32 get_longbusy_msecs(int long_busy_ret_code)
+{
+ switch (long_busy_ret_code) {
+ case H_LONG_BUSY_ORDER_1_MSEC:
+ return 1;
+ case H_LONG_BUSY_ORDER_10_MSEC:
+ return 10;
+ case H_LONG_BUSY_ORDER_100_MSEC:
+ return 100;
+ case H_LONG_BUSY_ORDER_1_SEC:
+ return 1000;
+ case H_LONG_BUSY_ORDER_10_SEC:
+ return 10000;
+ case H_LONG_BUSY_ORDER_100_SEC:
+ return 100000;
+ default:
+ return 1;
+ }
+}
+
+/* Number of pages which can be registered at once by H_REGISTER_HEA_RPAGES */
+#define EHEA_MAX_RPAGE 512
+
+/* Notification Event Queue (NEQ) Entry bit masks */
+#define NEQE_EVENT_CODE EHEA_BMASK_IBM(2, 7)
+#define NEQE_PORTNUM EHEA_BMASK_IBM(32, 47)
+#define NEQE_PORT_UP EHEA_BMASK_IBM(16, 16)
+#define NEQE_EXTSWITCH_PORT_UP EHEA_BMASK_IBM(17, 17)
+#define NEQE_EXTSWITCH_PRIMARY EHEA_BMASK_IBM(18, 18)
+#define NEQE_PLID EHEA_BMASK_IBM(16, 47)
+
+/* Notification Event Codes */
+#define EHEA_EC_PORTSTATE_CHG 0x30
+#define EHEA_EC_ADAPTER_MALFUNC 0x32
+#define EHEA_EC_PORT_MALFUNC 0x33
+
+/* Notification Event Log Register (NELR) bit masks */
+#define NELR_PORT_MALFUNC EHEA_BMASK_IBM(61, 61)
+#define NELR_ADAPTER_MALFUNC EHEA_BMASK_IBM(62, 62)
+#define NELR_PORTSTATE_CHG EHEA_BMASK_IBM(63, 63)
+
+static inline void hcp_epas_ctor(struct h_epas *epas, u64 paddr_kernel,
+ u64 paddr_user)
+{
+ /* To support 64k pages we must round to 64k page boundary */
+ epas->kernel.addr = ioremap((paddr_kernel & PAGE_MASK), PAGE_SIZE) +
+ (paddr_kernel & ~PAGE_MASK);
+ epas->user.addr = paddr_user;
+}
+
+static inline void hcp_epas_dtor(struct h_epas *epas)
+{
+ if (epas->kernel.addr)
+ iounmap((void __iomem *)((u64)epas->kernel.addr & PAGE_MASK));
+
+ epas->user.addr = 0;
+ epas->kernel.addr = 0;
+}
+
+struct hcp_modify_qp_cb0 {
+ u64 qp_ctl_reg; /* 00 */
+ u32 max_swqe; /* 02 */
+ u32 max_rwqe; /* 03 */
+ u32 port_nb; /* 04 */
+ u32 reserved0; /* 05 */
+ u64 qp_aer; /* 06 */
+ u64 qp_tenure; /* 08 */
+};
+
+/* Hcall Query/Modify Queue Pair Control Block 0 Selection Mask Bits */
+#define H_QPCB0_ALL EHEA_BMASK_IBM(0, 5)
+#define H_QPCB0_QP_CTL_REG EHEA_BMASK_IBM(0, 0)
+#define H_QPCB0_MAX_SWQE EHEA_BMASK_IBM(1, 1)
+#define H_QPCB0_MAX_RWQE EHEA_BMASK_IBM(2, 2)
+#define H_QPCB0_PORT_NB EHEA_BMASK_IBM(3, 3)
+#define H_QPCB0_QP_AER EHEA_BMASK_IBM(4, 4)
+#define H_QPCB0_QP_TENURE EHEA_BMASK_IBM(5, 5)
+
+/* Queue Pair Control Register Status Bits */
+#define H_QP_CR_ENABLED 0x8000000000000000ULL /* QP enabled */
+ /* QP States: */
+#define H_QP_CR_STATE_RESET 0x0000010000000000ULL /* Reset */
+#define H_QP_CR_STATE_INITIALIZED 0x0000020000000000ULL /* Initialized */
+#define H_QP_CR_STATE_RDY2RCV 0x0000030000000000ULL /* Ready to recv */
+#define H_QP_CR_STATE_RDY2SND 0x0000050000000000ULL /* Ready to send */
+#define H_QP_CR_STATE_ERROR 0x0000800000000000ULL /* Error */
+#define H_QP_CR_RES_STATE 0x0000007F00000000ULL /* Resultant state */
+
+struct hcp_modify_qp_cb1 {
+ u32 qpn; /* 00 */
+ u32 qp_asyn_ev_eq_nb; /* 01 */
+ u64 sq_cq_handle; /* 02 */
+ u64 rq_cq_handle; /* 04 */
+ /* sgel = scatter gather element */
+ u32 sgel_nb_sq; /* 06 */
+ u32 sgel_nb_rq1; /* 07 */
+ u32 sgel_nb_rq2; /* 08 */
+ u32 sgel_nb_rq3; /* 09 */
+};
+
+/* Hcall Query/Modify Queue Pair Control Block 1 Selection Mask Bits */
+#define H_QPCB1_ALL EHEA_BMASK_IBM(0, 7)
+#define H_QPCB1_QPN EHEA_BMASK_IBM(0, 0)
+#define H_QPCB1_ASYN_EV_EQ_NB EHEA_BMASK_IBM(1, 1)
+#define H_QPCB1_SQ_CQ_HANDLE EHEA_BMASK_IBM(2, 2)
+#define H_QPCB1_RQ_CQ_HANDLE EHEA_BMASK_IBM(3, 3)
+#define H_QPCB1_SGEL_NB_SQ EHEA_BMASK_IBM(4, 4)
+#define H_QPCB1_SGEL_NB_RQ1 EHEA_BMASK_IBM(5, 5)
+#define H_QPCB1_SGEL_NB_RQ2 EHEA_BMASK_IBM(6, 6)
+#define H_QPCB1_SGEL_NB_RQ3 EHEA_BMASK_IBM(7, 7)
+
+struct hcp_query_ehea {
+ u32 cur_num_qps; /* 00 */
+ u32 cur_num_cqs; /* 01 */
+ u32 cur_num_eqs; /* 02 */
+ u32 cur_num_mrs; /* 03 */
+ u32 auth_level; /* 04 */
+ u32 max_num_qps; /* 05 */
+ u32 max_num_cqs; /* 06 */
+ u32 max_num_eqs; /* 07 */
+ u32 max_num_mrs; /* 08 */
+ u32 reserved0; /* 09 */
+ u32 int_clock_freq; /* 10 */
+ u32 max_num_pds; /* 11 */
+ u32 max_num_addr_handles; /* 12 */
+ u32 max_num_cqes; /* 13 */
+ u32 max_num_wqes; /* 14 */
+ u32 max_num_sgel_rq1wqe; /* 15 */
+ u32 max_num_sgel_rq2wqe; /* 16 */
+ u32 max_num_sgel_rq3wqe; /* 17 */
+ u32 mr_page_size; /* 18 */
+ u32 reserved1; /* 19 */
+ u64 max_mr_size; /* 20 */
+ u64 reserved2; /* 22 */
+ u32 num_ports; /* 24 */
+ u32 reserved3; /* 25 */
+ u32 reserved4; /* 26 */
+ u32 reserved5; /* 27 */
+ u64 max_mc_mac; /* 28 */
+ u64 ehea_cap; /* 30 */
+ u32 max_isn_per_eq; /* 32 */
+ u32 max_num_neq; /* 33 */
+ u64 max_num_vlan_ids; /* 34 */
+ u32 max_num_port_group; /* 36 */
+ u32 max_num_phys_port; /* 37 */
+
+};
+
+/* Hcall Query/Modify Port Control Block defines */
+#define H_PORT_CB0 0
+#define H_PORT_CB1 1
+#define H_PORT_CB2 2
+#define H_PORT_CB3 3
+#define H_PORT_CB4 4
+#define H_PORT_CB5 5
+#define H_PORT_CB6 6
+#define H_PORT_CB7 7
+
+struct hcp_ehea_port_cb0 {
+ u64 port_mac_addr;
+ u64 port_rc;
+ u64 reserved0;
+ u32 port_op_state;
+ u32 port_speed;
+ u32 ext_swport_op_state;
+ u32 neg_tpf_prpf;
+ u32 num_default_qps;
+ u32 reserved1;
+ u64 default_qpn_arr[16];
+};
+
+/* Hcall Query/Modify Port Control Block 0 Selection Mask Bits */
+#define H_PORT_CB0_ALL EHEA_BMASK_IBM(0, 7) /* Set all bits */
+#define H_PORT_CB0_MAC EHEA_BMASK_IBM(0, 0) /* MAC address */
+#define H_PORT_CB0_PRC EHEA_BMASK_IBM(1, 1) /* Port Recv Control */
+#define H_PORT_CB0_DEFQPNARRAY EHEA_BMASK_IBM(7, 7) /* Default QPN Array */
+
+/* Hcall Query Port: Returned port speed values */
+#define H_SPEED_10M_H 1 /* 10 Mbps, Half Duplex */
+#define H_SPEED_10M_F 2 /* 10 Mbps, Full Duplex */
+#define H_SPEED_100M_H 3 /* 100 Mbps, Half Duplex */
+#define H_SPEED_100M_F 4 /* 100 Mbps, Full Duplex */
+#define H_SPEED_1G_F 6 /* 1 Gbps, Full Duplex */
+#define H_SPEED_10G_F 8 /* 10 Gbps, Full Duplex */
+
+/* Port Receive Control Status Bits */
+#define PXLY_RC_VALID EHEA_BMASK_IBM(49, 49)
+#define PXLY_RC_VLAN_XTRACT EHEA_BMASK_IBM(50, 50)
+#define PXLY_RC_TCP_6_TUPLE EHEA_BMASK_IBM(51, 51)
+#define PXLY_RC_UDP_6_TUPLE EHEA_BMASK_IBM(52, 52)
+#define PXLY_RC_TCP_3_TUPLE EHEA_BMASK_IBM(53, 53)
+#define PXLY_RC_TCP_2_TUPLE EHEA_BMASK_IBM(54, 54)
+#define PXLY_RC_LLC_SNAP EHEA_BMASK_IBM(55, 55)
+#define PXLY_RC_JUMBO_FRAME EHEA_BMASK_IBM(56, 56)
+#define PXLY_RC_FRAG_IP_PKT EHEA_BMASK_IBM(57, 57)
+#define PXLY_RC_TCP_UDP_CHKSUM EHEA_BMASK_IBM(58, 58)
+#define PXLY_RC_IP_CHKSUM EHEA_BMASK_IBM(59, 59)
+#define PXLY_RC_MAC_FILTER EHEA_BMASK_IBM(60, 60)
+#define PXLY_RC_UNTAG_FILTER EHEA_BMASK_IBM(61, 61)
+#define PXLY_RC_VLAN_TAG_FILTER EHEA_BMASK_IBM(62, 63)
+
+#define PXLY_RC_VLAN_FILTER 2
+#define PXLY_RC_VLAN_PERM 0
+
+
+#define H_PORT_CB1_ALL 0x8000000000000000ULL
+
+struct hcp_ehea_port_cb1 {
+ u64 vlan_filter[64];
+};
+
+#define H_PORT_CB2_ALL 0xFFE0000000000000ULL
+
+struct hcp_ehea_port_cb2 {
+ u64 rxo;
+ u64 rxucp;
+ u64 rxufd;
+ u64 rxuerr;
+ u64 rxftl;
+ u64 rxmcp;
+ u64 rxbcp;
+ u64 txo;
+ u64 txucp;
+ u64 txmcp;
+ u64 txbcp;
+};
+
+struct hcp_ehea_port_cb3 {
+ u64 vlan_bc_filter[64];
+ u64 vlan_mc_filter[64];
+ u64 vlan_un_filter[64];
+ u64 port_mac_hash_array[64];
+};
+
+#define H_PORT_CB4_ALL 0xF000000000000000ULL
+#define H_PORT_CB4_JUMBO 0x1000000000000000ULL
+#define H_PORT_CB4_SPEED 0x8000000000000000ULL
+
+struct hcp_ehea_port_cb4 {
+ u32 port_speed;
+ u32 pause_frame;
+ u32 ens_port_op_state;
+ u32 jumbo_frame;
+ u32 ens_port_wrap;
+};
+
+/* Hcall Query/Modify Port Control Block 5 Selection Mask Bits */
+#define H_PORT_CB5_RCU 0x0001000000000000ULL
+#define PXS_RCU EHEA_BMASK_IBM(61, 63)
+
+struct hcp_ehea_port_cb5 {
+ u64 prc; /* 00 */
+ u64 uaa; /* 01 */
+ u64 macvc; /* 02 */
+ u64 xpcsc; /* 03 */
+ u64 xpcsp; /* 04 */
+ u64 pcsid; /* 05 */
+ u64 xpcsst; /* 06 */
+ u64 pthlb; /* 07 */
+ u64 pthrb; /* 08 */
+ u64 pqu; /* 09 */
+ u64 pqd; /* 10 */
+ u64 prt; /* 11 */
+ u64 wsth; /* 12 */
+ u64 rcb; /* 13 */
+ u64 rcm; /* 14 */
+ u64 rcu; /* 15 */
+ u64 macc; /* 16 */
+ u64 pc; /* 17 */
+ u64 pst; /* 18 */
+ u64 ducqpn; /* 19 */
+ u64 mcqpn; /* 20 */
+ u64 mma; /* 21 */
+ u64 pmc0h; /* 22 */
+ u64 pmc0l; /* 23 */
+ u64 lbc; /* 24 */
+};
+
+#define H_PORT_CB6_ALL 0xFFFFFE7FFFFF8000ULL
+
+struct hcp_ehea_port_cb6 {
+ u64 rxo; /* 00 */
+ u64 rx64; /* 01 */
+ u64 rx65; /* 02 */
+ u64 rx128; /* 03 */
+ u64 rx256; /* 04 */
+ u64 rx512; /* 05 */
+ u64 rx1024; /* 06 */
+ u64 rxbfcs; /* 07 */
+ u64 rxime; /* 08 */
+ u64 rxrle; /* 09 */
+ u64 rxorle; /* 10 */
+ u64 rxftl; /* 11 */
+ u64 rxjab; /* 12 */
+ u64 rxse; /* 13 */
+ u64 rxce; /* 14 */
+ u64 rxrf; /* 15 */
+ u64 rxfrag; /* 16 */
+ u64 rxuoc; /* 17 */
+ u64 rxcpf; /* 18 */
+ u64 rxsb; /* 19 */
+ u64 rxfd; /* 20 */
+ u64 rxoerr; /* 21 */
+ u64 rxaln; /* 22 */
+ u64 ducqpn; /* 23 */
+ u64 reserved0; /* 24 */
+ u64 rxmcp; /* 25 */
+ u64 rxbcp; /* 26 */
+ u64 txmcp; /* 27 */
+ u64 txbcp; /* 28 */
+ u64 txo; /* 29 */
+ u64 tx64; /* 30 */
+ u64 tx65; /* 31 */
+ u64 tx128; /* 32 */
+ u64 tx256; /* 33 */
+ u64 tx512; /* 34 */
+ u64 tx1024; /* 35 */
+ u64 txbfcs; /* 36 */
+ u64 txcpf; /* 37 */
+ u64 txlf; /* 38 */
+ u64 txrf; /* 39 */
+ u64 txime; /* 40 */
+ u64 txsc; /* 41 */
+ u64 txmc; /* 42 */
+ u64 txsqe; /* 43 */
+ u64 txdef; /* 44 */
+ u64 txlcol; /* 45 */
+ u64 txexcol; /* 46 */
+ u64 txcse; /* 47 */
+ u64 txbor; /* 48 */
+};
+
+#define H_PORT_CB7_DUCQPN 0x8000000000000000ULL
+
+struct hcp_ehea_port_cb7 {
+ u64 def_uc_qpn;
+};
+
+u64 ehea_h_query_ehea_qp(const u64 adapter_handle,
+ const u8 qp_category,
+ const u64 qp_handle, const u64 sel_mask,
+ void *cb_addr);
+
+u64 ehea_h_modify_ehea_qp(const u64 adapter_handle,
+ const u8 cat,
+ const u64 qp_handle,
+ const u64 sel_mask,
+ void *cb_addr,
+ u64 *inv_attr_id,
+ u64 *proc_mask, u16 *out_swr, u16 *out_rwr);
+
+u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
+ struct ehea_eq_attr *eq_attr, u64 *eq_handle);
+
+u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
+ struct ehea_cq_attr *cq_attr,
+ u64 *cq_handle, struct h_epas *epas);
+
+u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
+ struct ehea_qp_init_attr *init_attr,
+ const u32 pd,
+ u64 *qp_handle, struct h_epas *h_epas);
+
+#define H_REG_RPAGE_PAGE_SIZE EHEA_BMASK_IBM(48, 55)
+#define H_REG_RPAGE_QT EHEA_BMASK_IBM(62, 63)
+
+u64 ehea_h_register_rpage(const u64 adapter_handle,
+ const u8 pagesize,
+ const u8 queue_type,
+ const u64 resource_handle,
+ const u64 log_pageaddr, u64 count);
+
+#define H_DISABLE_GET_EHEA_WQE_P 1
+#define H_DISABLE_GET_SQ_WQE_P 2
+#define H_DISABLE_GET_RQC 3
+
+u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle);
+
+#define FORCE_FREE 1
+#define NORMAL_FREE 0
+
+u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
+ u64 force_bit);
+
+u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
+ const u64 length, const u32 access_ctrl,
+ const u32 pd, u64 *mr_handle, u32 *lkey);
+
+u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
+ const u8 pagesize, const u8 queue_type,
+ const u64 log_pageaddr, const u64 count);
+
+u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
+ const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
+ struct ehea_mr *mr);
+
+u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr);
+
+/* output param R5 */
+#define H_MEHEAPORT_CAT EHEA_BMASK_IBM(40, 47)
+#define H_MEHEAPORT_PN EHEA_BMASK_IBM(48, 63)
+
+u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
+ const u8 cb_cat, const u64 select_mask,
+ void *cb_addr);
+
+u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
+ const u8 cb_cat, const u64 select_mask,
+ void *cb_addr);
+
+#define H_REGBCMC_PN EHEA_BMASK_IBM(48, 63)
+#define H_REGBCMC_REGTYPE EHEA_BMASK_IBM(61, 63)
+#define H_REGBCMC_MACADDR EHEA_BMASK_IBM(16, 63)
+#define H_REGBCMC_VLANID EHEA_BMASK_IBM(52, 63)
+
+u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
+ const u8 reg_type, const u64 mc_mac_addr,
+ const u16 vlan_id, const u32 hcall_id);
+
+u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
+ const u64 event_mask);
+
+u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
+ void *rblock);
+
+#endif /* __EHEA_PHYP_H__ */
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
new file mode 100644
index 000000000000..95b9f4fa811e
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -0,0 +1,1031 @@
+/*
+ * linux/drivers/net/ehea/ehea_qmr.c
+ *
+ * eHEA ethernet device driver for IBM eServer System p
+ *
+ * (C) Copyright IBM Corp. 2006
+ *
+ * Authors:
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include "ehea.h"
+#include "ehea_phyp.h"
+#include "ehea_qmr.h"
+
+struct ehea_bmap *ehea_bmap = NULL;
+
+
+
+static void *hw_qpageit_get_inc(struct hw_queue *queue)
+{
+ void *retvalue = hw_qeit_get(queue);
+
+ queue->current_q_offset += queue->pagesize;
+ if (queue->current_q_offset > queue->queue_length) {
+ queue->current_q_offset -= queue->pagesize;
+ retvalue = NULL;
+ } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
+ pr_err("not on pageboundary\n");
+ retvalue = NULL;
+ }
+ return retvalue;
+}
+
+static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
+ const u32 pagesize, const u32 qe_size)
+{
+ int pages_per_kpage = PAGE_SIZE / pagesize;
+ int i, k;
+
+ if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
+ pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
+ (int)PAGE_SIZE, (int)pagesize);
+ return -EINVAL;
+ }
+
+ queue->queue_length = nr_of_pages * pagesize;
+ queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
+ if (!queue->queue_pages) {
+ pr_err("no mem for queue_pages\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * allocate pages for queue:
+ * outer loop allocates whole kernel pages (page aligned) and
+ * inner loop divides a kernel page into smaller hea queue pages
+ */
+ i = 0;
+ while (i < nr_of_pages) {
+ u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
+ if (!kpage)
+ goto out_nomem;
+ for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
+ (queue->queue_pages)[i] = (struct ehea_page *)kpage;
+ kpage += pagesize;
+ i++;
+ }
+ }
+
+ queue->current_q_offset = 0;
+ queue->qe_size = qe_size;
+ queue->pagesize = pagesize;
+ queue->toggle_state = 1;
+
+ return 0;
+out_nomem:
+ for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
+ if (!(queue->queue_pages)[i])
+ break;
+ free_page((unsigned long)(queue->queue_pages)[i]);
+ }
+ return -ENOMEM;
+}
+
+static void hw_queue_dtor(struct hw_queue *queue)
+{
+ int pages_per_kpage = PAGE_SIZE / queue->pagesize;
+ int i, nr_pages;
+
+ if (!queue || !queue->queue_pages)
+ return;
+
+ nr_pages = queue->queue_length / queue->pagesize;
+
+ for (i = 0; i < nr_pages; i += pages_per_kpage)
+ free_page((unsigned long)(queue->queue_pages)[i]);
+
+ kfree(queue->queue_pages);
+}
+
+struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
+ int nr_of_cqe, u64 eq_handle, u32 cq_token)
+{
+ struct ehea_cq *cq;
+ struct h_epa epa;
+ u64 *cq_handle_ref, hret, rpage;
+ u32 act_nr_of_entries, act_pages, counter;
+ int ret;
+ void *vpage;
+
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq) {
+ pr_err("no mem for cq\n");
+ goto out_nomem;
+ }
+
+ cq->attr.max_nr_of_cqes = nr_of_cqe;
+ cq->attr.cq_token = cq_token;
+ cq->attr.eq_handle = eq_handle;
+
+ cq->adapter = adapter;
+
+ cq_handle_ref = &cq->fw_handle;
+ act_nr_of_entries = 0;
+ act_pages = 0;
+
+ hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
+ &cq->fw_handle, &cq->epas);
+ if (hret != H_SUCCESS) {
+ pr_err("alloc_resource_cq failed\n");
+ goto out_freemem;
+ }
+
+ ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
+ EHEA_PAGESIZE, sizeof(struct ehea_cqe));
+ if (ret)
+ goto out_freeres;
+
+ for (counter = 0; counter < cq->attr.nr_pages; counter++) {
+ vpage = hw_qpageit_get_inc(&cq->hw_queue);
+ if (!vpage) {
+ pr_err("hw_qpageit_get_inc failed\n");
+ goto out_kill_hwq;
+ }
+
+ rpage = virt_to_abs(vpage);
+ hret = ehea_h_register_rpage(adapter->handle,
+ 0, EHEA_CQ_REGISTER_ORIG,
+ cq->fw_handle, rpage, 1);
+ if (hret < H_SUCCESS) {
+ pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
+ cq, hret, counter, cq->attr.nr_pages);
+ goto out_kill_hwq;
+ }
+
+ if (counter == (cq->attr.nr_pages - 1)) {
+ vpage = hw_qpageit_get_inc(&cq->hw_queue);
+
+ if ((hret != H_SUCCESS) || (vpage)) {
+ pr_err("registration of pages not complete hret=%llx\n",
+ hret);
+ goto out_kill_hwq;
+ }
+ } else {
+ if (hret != H_PAGE_REGISTERED) {
+ pr_err("CQ: registration of page failed hret=%llx\n",
+ hret);
+ goto out_kill_hwq;
+ }
+ }
+ }
+
+ hw_qeit_reset(&cq->hw_queue);
+ epa = cq->epas.kernel;
+ ehea_reset_cq_ep(cq);
+ ehea_reset_cq_n1(cq);
+
+ return cq;
+
+out_kill_hwq:
+ hw_queue_dtor(&cq->hw_queue);
+
+out_freeres:
+ ehea_h_free_resource(adapter->handle, cq->fw_handle, FORCE_FREE);
+
+out_freemem:
+ kfree(cq);
+
+out_nomem:
+ return NULL;
+}
+
+u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
+{
+ u64 hret;
+ u64 adapter_handle = cq->adapter->handle;
+
+ /* deregister all previous registered pages */
+ hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force);
+ if (hret != H_SUCCESS)
+ return hret;
+
+ hw_queue_dtor(&cq->hw_queue);
+ kfree(cq);
+
+ return hret;
+}
+
+int ehea_destroy_cq(struct ehea_cq *cq)
+{
+ u64 hret, aer, aerr;
+ if (!cq)
+ return 0;
+
+ hcp_epas_dtor(&cq->epas);
+ hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
+ if (hret == H_R_STATE) {
+ ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
+ hret = ehea_destroy_cq_res(cq, FORCE_FREE);
+ }
+
+ if (hret != H_SUCCESS) {
+ pr_err("destroy CQ failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
+ const enum ehea_eq_type type,
+ const u32 max_nr_of_eqes, const u8 eqe_gen)
+{
+ int ret, i;
+ u64 hret, rpage;
+ void *vpage;
+ struct ehea_eq *eq;
+
+ eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+ if (!eq) {
+ pr_err("no mem for eq\n");
+ return NULL;
+ }
+
+ eq->adapter = adapter;
+ eq->attr.type = type;
+ eq->attr.max_nr_of_eqes = max_nr_of_eqes;
+ eq->attr.eqe_gen = eqe_gen;
+ spin_lock_init(&eq->spinlock);
+
+ hret = ehea_h_alloc_resource_eq(adapter->handle,
+ &eq->attr, &eq->fw_handle);
+ if (hret != H_SUCCESS) {
+ pr_err("alloc_resource_eq failed\n");
+ goto out_freemem;
+ }
+
+ ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
+ EHEA_PAGESIZE, sizeof(struct ehea_eqe));
+ if (ret) {
+ pr_err("can't allocate eq pages\n");
+ goto out_freeres;
+ }
+
+ for (i = 0; i < eq->attr.nr_pages; i++) {
+ vpage = hw_qpageit_get_inc(&eq->hw_queue);
+ if (!vpage) {
+ pr_err("hw_qpageit_get_inc failed\n");
+ hret = H_RESOURCE;
+ goto out_kill_hwq;
+ }
+
+ rpage = virt_to_abs(vpage);
+
+ hret = ehea_h_register_rpage(adapter->handle, 0,
+ EHEA_EQ_REGISTER_ORIG,
+ eq->fw_handle, rpage, 1);
+
+ if (i == (eq->attr.nr_pages - 1)) {
+ /* last page */
+ vpage = hw_qpageit_get_inc(&eq->hw_queue);
+ if ((hret != H_SUCCESS) || (vpage))
+ goto out_kill_hwq;
+
+ } else {
+ if (hret != H_PAGE_REGISTERED)
+ goto out_kill_hwq;
+
+ }
+ }
+
+ hw_qeit_reset(&eq->hw_queue);
+ return eq;
+
+out_kill_hwq:
+ hw_queue_dtor(&eq->hw_queue);
+
+out_freeres:
+ ehea_h_free_resource(adapter->handle, eq->fw_handle, FORCE_FREE);
+
+out_freemem:
+ kfree(eq);
+ return NULL;
+}
+
+struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
+{
+ struct ehea_eqe *eqe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&eq->spinlock, flags);
+ eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue);
+ spin_unlock_irqrestore(&eq->spinlock, flags);
+
+ return eqe;
+}
+
+u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
+{
+ u64 hret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&eq->spinlock, flags);
+
+ hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle, force);
+ spin_unlock_irqrestore(&eq->spinlock, flags);
+
+ if (hret != H_SUCCESS)
+ return hret;
+
+ hw_queue_dtor(&eq->hw_queue);
+ kfree(eq);
+
+ return hret;
+}
+
+int ehea_destroy_eq(struct ehea_eq *eq)
+{
+ u64 hret, aer, aerr;
+ if (!eq)
+ return 0;
+
+ hcp_epas_dtor(&eq->epas);
+
+ hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
+ if (hret == H_R_STATE) {
+ ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
+ hret = ehea_destroy_eq_res(eq, FORCE_FREE);
+ }
+
+ if (hret != H_SUCCESS) {
+ pr_err("destroy EQ failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * allocates memory for a queue and registers pages in phyp
+ */
+int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
+ int nr_pages, int wqe_size, int act_nr_sges,
+ struct ehea_adapter *adapter, int h_call_q_selector)
+{
+ u64 hret, rpage;
+ int ret, cnt;
+ void *vpage;
+
+ ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
+ if (ret)
+ return ret;
+
+ for (cnt = 0; cnt < nr_pages; cnt++) {
+ vpage = hw_qpageit_get_inc(hw_queue);
+ if (!vpage) {
+ pr_err("hw_qpageit_get_inc failed\n");
+ goto out_kill_hwq;
+ }
+ rpage = virt_to_abs(vpage);
+ hret = ehea_h_register_rpage(adapter->handle,
+ 0, h_call_q_selector,
+ qp->fw_handle, rpage, 1);
+ if (hret < H_SUCCESS) {
+ pr_err("register_rpage_qp failed\n");
+ goto out_kill_hwq;
+ }
+ }
+ hw_qeit_reset(hw_queue);
+ return 0;
+
+out_kill_hwq:
+ hw_queue_dtor(hw_queue);
+ return -EIO;
+}
+
+static inline u32 map_wqe_size(u8 wqe_enc_size)
+{
+ return 128 << wqe_enc_size;
+}
+
+struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
+ u32 pd, struct ehea_qp_init_attr *init_attr)
+{
+ int ret;
+ u64 hret;
+ struct ehea_qp *qp;
+ u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
+ u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
+
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp) {
+ pr_err("no mem for qp\n");
+ return NULL;
+ }
+
+ qp->adapter = adapter;
+
+ hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
+ &qp->fw_handle, &qp->epas);
+ if (hret != H_SUCCESS) {
+ pr_err("ehea_h_alloc_resource_qp failed\n");
+ goto out_freemem;
+ }
+
+ wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
+ wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
+ wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
+ wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
+
+ ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
+ wqe_size_in_bytes_sq,
+ init_attr->act_wqe_size_enc_sq, adapter,
+ 0);
+ if (ret) {
+ pr_err("can't register for sq ret=%x\n", ret);
+ goto out_freeres;
+ }
+
+ ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
+ init_attr->nr_rq1_pages,
+ wqe_size_in_bytes_rq1,
+ init_attr->act_wqe_size_enc_rq1,
+ adapter, 1);
+ if (ret) {
+ pr_err("can't register for rq1 ret=%x\n", ret);
+ goto out_kill_hwsq;
+ }
+
+ if (init_attr->rq_count > 1) {
+ ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
+ init_attr->nr_rq2_pages,
+ wqe_size_in_bytes_rq2,
+ init_attr->act_wqe_size_enc_rq2,
+ adapter, 2);
+ if (ret) {
+ pr_err("can't register for rq2 ret=%x\n", ret);
+ goto out_kill_hwr1q;
+ }
+ }
+
+ if (init_attr->rq_count > 2) {
+ ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
+ init_attr->nr_rq3_pages,
+ wqe_size_in_bytes_rq3,
+ init_attr->act_wqe_size_enc_rq3,
+ adapter, 3);
+ if (ret) {
+ pr_err("can't register for rq3 ret=%x\n", ret);
+ goto out_kill_hwr2q;
+ }
+ }
+
+ qp->init_attr = *init_attr;
+
+ return qp;
+
+out_kill_hwr2q:
+ hw_queue_dtor(&qp->hw_rqueue2);
+
+out_kill_hwr1q:
+ hw_queue_dtor(&qp->hw_rqueue1);
+
+out_kill_hwsq:
+ hw_queue_dtor(&qp->hw_squeue);
+
+out_freeres:
+ ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle);
+ ehea_h_free_resource(adapter->handle, qp->fw_handle, FORCE_FREE);
+
+out_freemem:
+ kfree(qp);
+ return NULL;
+}
+
+u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
+{
+ u64 hret;
+ struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
+
+
+ ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
+ hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force);
+ if (hret != H_SUCCESS)
+ return hret;
+
+ hw_queue_dtor(&qp->hw_squeue);
+ hw_queue_dtor(&qp->hw_rqueue1);
+
+ if (qp_attr->rq_count > 1)
+ hw_queue_dtor(&qp->hw_rqueue2);
+ if (qp_attr->rq_count > 2)
+ hw_queue_dtor(&qp->hw_rqueue3);
+ kfree(qp);
+
+ return hret;
+}
+
+int ehea_destroy_qp(struct ehea_qp *qp)
+{
+ u64 hret, aer, aerr;
+ if (!qp)
+ return 0;
+
+ hcp_epas_dtor(&qp->epas);
+
+ hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
+ if (hret == H_R_STATE) {
+ ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
+ hret = ehea_destroy_qp_res(qp, FORCE_FREE);
+ }
+
+ if (hret != H_SUCCESS) {
+ pr_err("destroy QP failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static inline int ehea_calc_index(unsigned long i, unsigned long s)
+{
+ return (i >> s) & EHEA_INDEX_MASK;
+}
+
+static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
+ int dir)
+{
+ if (!ehea_top_bmap->dir[dir]) {
+ ehea_top_bmap->dir[dir] =
+ kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
+ if (!ehea_top_bmap->dir[dir])
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
+{
+ if (!ehea_bmap->top[top]) {
+ ehea_bmap->top[top] =
+ kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
+ if (!ehea_bmap->top[top])
+ return -ENOMEM;
+ }
+ return ehea_init_top_bmap(ehea_bmap->top[top], dir);
+}
+
+static DEFINE_MUTEX(ehea_busmap_mutex);
+static unsigned long ehea_mr_len;
+
+#define EHEA_BUSMAP_ADD_SECT 1
+#define EHEA_BUSMAP_REM_SECT 0
+
+static void ehea_rebuild_busmap(void)
+{
+ u64 vaddr = EHEA_BUSMAP_START;
+ int top, dir, idx;
+
+ for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
+ struct ehea_top_bmap *ehea_top;
+ int valid_dir_entries = 0;
+
+ if (!ehea_bmap->top[top])
+ continue;
+ ehea_top = ehea_bmap->top[top];
+ for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
+ struct ehea_dir_bmap *ehea_dir;
+ int valid_entries = 0;
+
+ if (!ehea_top->dir[dir])
+ continue;
+ valid_dir_entries++;
+ ehea_dir = ehea_top->dir[dir];
+ for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
+ if (!ehea_dir->ent[idx])
+ continue;
+ valid_entries++;
+ ehea_dir->ent[idx] = vaddr;
+ vaddr += EHEA_SECTSIZE;
+ }
+ if (!valid_entries) {
+ ehea_top->dir[dir] = NULL;
+ kfree(ehea_dir);
+ }
+ }
+ if (!valid_dir_entries) {
+ ehea_bmap->top[top] = NULL;
+ kfree(ehea_top);
+ }
+ }
+}
+
+static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
+{
+ unsigned long i, start_section, end_section;
+
+ if (!nr_pages)
+ return 0;
+
+ if (!ehea_bmap) {
+ ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
+ if (!ehea_bmap)
+ return -ENOMEM;
+ }
+
+ start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
+ end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
+ /* Mark entries as valid or invalid only; address is assigned later */
+ for (i = start_section; i < end_section; i++) {
+ u64 flag;
+ int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
+ int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
+ int idx = i & EHEA_INDEX_MASK;
+
+ if (add) {
+ int ret = ehea_init_bmap(ehea_bmap, top, dir);
+ if (ret)
+ return ret;
+ flag = 1; /* valid */
+ ehea_mr_len += EHEA_SECTSIZE;
+ } else {
+ if (!ehea_bmap->top[top])
+ continue;
+ if (!ehea_bmap->top[top]->dir[dir])
+ continue;
+ flag = 0; /* invalid */
+ ehea_mr_len -= EHEA_SECTSIZE;
+ }
+
+ ehea_bmap->top[top]->dir[dir]->ent[idx] = flag;
+ }
+ ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */
+ return 0;
+}
+
+int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages)
+{
+ int ret;
+
+ mutex_lock(&ehea_busmap_mutex);
+ ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
+ mutex_unlock(&ehea_busmap_mutex);
+ return ret;
+}
+
+int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
+{
+ int ret;
+
+ mutex_lock(&ehea_busmap_mutex);
+ ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
+ mutex_unlock(&ehea_busmap_mutex);
+ return ret;
+}
+
+static int ehea_is_hugepage(unsigned long pfn)
+{
+ int page_order;
+
+ if (pfn & EHEA_HUGEPAGE_PFN_MASK)
+ return 0;
+
+ page_order = compound_order(pfn_to_page(pfn));
+ if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
+ return 0;
+
+ return 1;
+}
+
+static int ehea_create_busmap_callback(unsigned long initial_pfn,
+ unsigned long total_nr_pages, void *arg)
+{
+ int ret;
+ unsigned long pfn, start_pfn, end_pfn, nr_pages;
+
+ if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
+ return ehea_update_busmap(initial_pfn, total_nr_pages,
+ EHEA_BUSMAP_ADD_SECT);
+
+ /* Given chunk is >= 16GB -> check for hugepages */
+ start_pfn = initial_pfn;
+ end_pfn = initial_pfn + total_nr_pages;
+ pfn = start_pfn;
+
+ while (pfn < end_pfn) {
+ if (ehea_is_hugepage(pfn)) {
+ /* Add mem found in front of the hugepage */
+ nr_pages = pfn - start_pfn;
+ ret = ehea_update_busmap(start_pfn, nr_pages,
+ EHEA_BUSMAP_ADD_SECT);
+ if (ret)
+ return ret;
+
+ /* Skip the hugepage */
+ pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
+ start_pfn = pfn;
+ } else
+ pfn += (EHEA_SECTSIZE / PAGE_SIZE);
+ }
+
+ /* Add mem found behind the hugepage(s) */
+ nr_pages = pfn - start_pfn;
+ return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
+}
+
+int ehea_create_busmap(void)
+{
+ int ret;
+
+ mutex_lock(&ehea_busmap_mutex);
+ ehea_mr_len = 0;
+ ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
+ ehea_create_busmap_callback);
+ mutex_unlock(&ehea_busmap_mutex);
+ return ret;
+}
+
+void ehea_destroy_busmap(void)
+{
+ int top, dir;
+ mutex_lock(&ehea_busmap_mutex);
+ if (!ehea_bmap)
+ goto out_destroy;
+
+ for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
+ if (!ehea_bmap->top[top])
+ continue;
+
+ for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
+ if (!ehea_bmap->top[top]->dir[dir])
+ continue;
+
+ kfree(ehea_bmap->top[top]->dir[dir]);
+ }
+
+ kfree(ehea_bmap->top[top]);
+ }
+
+ kfree(ehea_bmap);
+ ehea_bmap = NULL;
+out_destroy:
+ mutex_unlock(&ehea_busmap_mutex);
+}
+
+u64 ehea_map_vaddr(void *caddr)
+{
+ int top, dir, idx;
+ unsigned long index, offset;
+
+ if (!ehea_bmap)
+ return EHEA_INVAL_ADDR;
+
+ index = virt_to_abs(caddr) >> SECTION_SIZE_BITS;
+ top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
+ if (!ehea_bmap->top[top])
+ return EHEA_INVAL_ADDR;
+
+ dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK;
+ if (!ehea_bmap->top[top]->dir[dir])
+ return EHEA_INVAL_ADDR;
+
+ idx = index & EHEA_INDEX_MASK;
+ if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
+ return EHEA_INVAL_ADDR;
+
+ offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1);
+ return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset;
+}
+
+static inline void *ehea_calc_sectbase(int top, int dir, int idx)
+{
+ unsigned long ret = idx;
+ ret |= dir << EHEA_DIR_INDEX_SHIFT;
+ ret |= top << EHEA_TOP_INDEX_SHIFT;
+ return abs_to_virt(ret << SECTION_SIZE_BITS);
+}
+
+static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
+ struct ehea_adapter *adapter,
+ struct ehea_mr *mr)
+{
+ void *pg;
+ u64 j, m, hret;
+ unsigned long k = 0;
+ u64 pt_abs = virt_to_abs(pt);
+
+ void *sectbase = ehea_calc_sectbase(top, dir, idx);
+
+ for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) {
+
+ for (m = 0; m < EHEA_MAX_RPAGE; m++) {
+ pg = sectbase + ((k++) * EHEA_PAGESIZE);
+ pt[m] = virt_to_abs(pg);
+ }
+ hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
+ 0, pt_abs, EHEA_MAX_RPAGE);
+
+ if ((hret != H_SUCCESS) &&
+ (hret != H_PAGE_REGISTERED)) {
+ ehea_h_free_resource(adapter->handle, mr->handle,
+ FORCE_FREE);
+ pr_err("register_rpage_mr failed\n");
+ return hret;
+ }
+ }
+ return hret;
+}
+
+static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt,
+ struct ehea_adapter *adapter,
+ struct ehea_mr *mr)
+{
+ u64 hret = H_SUCCESS;
+ int idx;
+
+ for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
+ if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
+ continue;
+
+ hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
+ if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
+ return hret;
+ }
+ return hret;
+}
+
+static u64 ehea_reg_mr_dir_sections(int top, u64 *pt,
+ struct ehea_adapter *adapter,
+ struct ehea_mr *mr)
+{
+ u64 hret = H_SUCCESS;
+ int dir;
+
+ for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
+ if (!ehea_bmap->top[top]->dir[dir])
+ continue;
+
+ hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
+ if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
+ return hret;
+ }
+ return hret;
+}
+
+int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
+{
+ int ret;
+ u64 *pt;
+ u64 hret;
+ u32 acc_ctrl = EHEA_MR_ACC_CTRL;
+
+ unsigned long top;
+
+ pt = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!pt) {
+ pr_err("no mem\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START,
+ ehea_mr_len, acc_ctrl, adapter->pd,
+ &mr->handle, &mr->lkey);
+
+ if (hret != H_SUCCESS) {
+ pr_err("alloc_resource_mr failed\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ if (!ehea_bmap) {
+ ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
+ pr_err("no busmap available\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
+ if (!ehea_bmap->top[top])
+ continue;
+
+ hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr);
+ if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
+ break;
+ }
+
+ if (hret != H_SUCCESS) {
+ ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
+ pr_err("registering mr failed\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ mr->vaddr = EHEA_BUSMAP_START;
+ mr->adapter = adapter;
+ ret = 0;
+out:
+ free_page((unsigned long)pt);
+ return ret;
+}
+
+int ehea_rem_mr(struct ehea_mr *mr)
+{
+ u64 hret;
+
+ if (!mr || !mr->adapter)
+ return -EINVAL;
+
+ hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
+ FORCE_FREE);
+ if (hret != H_SUCCESS) {
+ pr_err("destroy MR failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
+ struct ehea_mr *shared_mr)
+{
+ u64 hret;
+
+ hret = ehea_h_register_smr(adapter->handle, old_mr->handle,
+ old_mr->vaddr, EHEA_MR_ACC_CTRL,
+ adapter->pd, shared_mr);
+ if (hret != H_SUCCESS)
+ return -EIO;
+
+ shared_mr->adapter = adapter;
+
+ return 0;
+}
+
+void print_error_data(u64 *data)
+{
+ int length;
+ u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
+ u64 resource = data[1];
+
+ length = EHEA_BMASK_GET(ERROR_DATA_LENGTH, data[0]);
+
+ if (length > EHEA_PAGESIZE)
+ length = EHEA_PAGESIZE;
+
+ if (type == EHEA_AER_RESTYPE_QP)
+ pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
+ resource, data[6], data[12], data[22]);
+ else if (type == EHEA_AER_RESTYPE_CQ)
+ pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
+ resource, data[6]);
+ else if (type == EHEA_AER_RESTYPE_EQ)
+ pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
+ resource, data[6]);
+
+ ehea_dump(data, length, "error data");
+}
+
+u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
+ u64 *aer, u64 *aerr)
+{
+ unsigned long ret;
+ u64 *rblock;
+ u64 type = 0;
+
+ rblock = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!rblock) {
+ pr_err("Cannot allocate rblock memory\n");
+ goto out;
+ }
+
+ ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
+
+ if (ret == H_SUCCESS) {
+ type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
+ *aer = rblock[6];
+ *aerr = rblock[12];
+ print_error_data(rblock);
+ } else if (ret == H_R_STATE) {
+ pr_err("No error data available: %llX\n", res_handle);
+ } else
+ pr_err("Error data could not be fetched: %llX\n", res_handle);
+
+ free_page((unsigned long)rblock);
+out:
+ return type;
+}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.h b/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
new file mode 100644
index 000000000000..fddff8ec8cfd
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
@@ -0,0 +1,404 @@
+/*
+ * linux/drivers/net/ehea/ehea_qmr.h
+ *
+ * eHEA ethernet device driver for IBM eServer System p
+ *
+ * (C) Copyright IBM Corp. 2006
+ *
+ * Authors:
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __EHEA_QMR_H__
+#define __EHEA_QMR_H__
+
+#include <linux/prefetch.h>
+#include "ehea.h"
+#include "ehea_hw.h"
+
+/*
+ * page size of ehea hardware queues
+ */
+
+#define EHEA_PAGESHIFT 12
+#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT)
+#define EHEA_SECTSIZE (1UL << 24)
+#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
+#define EHEA_HUGEPAGESHIFT 34
+#define EHEA_HUGEPAGE_SIZE (1UL << EHEA_HUGEPAGESHIFT)
+#define EHEA_HUGEPAGE_PFN_MASK ((EHEA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
+
+#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
+#error eHEA module cannot work if kernel sectionsize < ehea sectionsize
+#endif
+
+/* Some abbreviations used here:
+ *
+ * WQE - Work Queue Entry
+ * SWQE - Send Work Queue Entry
+ * RWQE - Receive Work Queue Entry
+ * CQE - Completion Queue Entry
+ * EQE - Event Queue Entry
+ * MR - Memory Region
+ */
+
+/* Use of WR_ID field for EHEA */
+#define EHEA_WR_ID_COUNT EHEA_BMASK_IBM(0, 19)
+#define EHEA_WR_ID_TYPE EHEA_BMASK_IBM(20, 23)
+#define EHEA_SWQE2_TYPE 0x1
+#define EHEA_SWQE3_TYPE 0x2
+#define EHEA_RWQE2_TYPE 0x3
+#define EHEA_RWQE3_TYPE 0x4
+#define EHEA_WR_ID_INDEX EHEA_BMASK_IBM(24, 47)
+#define EHEA_WR_ID_REFILL EHEA_BMASK_IBM(48, 63)
+
+struct ehea_vsgentry {
+ u64 vaddr;
+ u32 l_key;
+ u32 len;
+};
+
+/* maximum number of sg entries allowed in a WQE */
+#define EHEA_MAX_WQE_SG_ENTRIES 252
+#define SWQE2_MAX_IMM (0xD0 - 0x30)
+#define SWQE3_MAX_IMM 224
+
+/* tx control flags for swqe */
+#define EHEA_SWQE_CRC 0x8000
+#define EHEA_SWQE_IP_CHECKSUM 0x4000
+#define EHEA_SWQE_TCP_CHECKSUM 0x2000
+#define EHEA_SWQE_TSO 0x1000
+#define EHEA_SWQE_SIGNALLED_COMPLETION 0x0800
+#define EHEA_SWQE_VLAN_INSERT 0x0400
+#define EHEA_SWQE_IMM_DATA_PRESENT 0x0200
+#define EHEA_SWQE_DESCRIPTORS_PRESENT 0x0100
+#define EHEA_SWQE_WRAP_CTL_REC 0x0080
+#define EHEA_SWQE_WRAP_CTL_FORCE 0x0040
+#define EHEA_SWQE_BIND 0x0020
+#define EHEA_SWQE_PURGE 0x0010
+
+/* sizeof(struct ehea_swqe) less the union */
+#define SWQE_HEADER_SIZE 32
+
+struct ehea_swqe {
+ u64 wr_id;
+ u16 tx_control;
+ u16 vlan_tag;
+ u8 reserved1;
+ u8 ip_start;
+ u8 ip_end;
+ u8 immediate_data_length;
+ u8 tcp_offset;
+ u8 reserved2;
+ u16 tcp_end;
+ u8 wrap_tag;
+ u8 descriptors; /* number of valid descriptors in WQE */
+ u16 reserved3;
+ u16 reserved4;
+ u16 mss;
+ u32 reserved5;
+ union {
+ /* Send WQE Format 1 */
+ struct {
+ struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
+ } no_immediate_data;
+
+ /* Send WQE Format 2 */
+ struct {
+ struct ehea_vsgentry sg_entry;
+ /* 0x30 */
+ u8 immediate_data[SWQE2_MAX_IMM];
+ /* 0xd0 */
+ struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
+ } immdata_desc __packed;
+
+ /* Send WQE Format 3 */
+ struct {
+ u8 immediate_data[SWQE3_MAX_IMM];
+ } immdata_nodesc;
+ } u;
+};
+
+struct ehea_rwqe {
+ u64 wr_id; /* work request ID */
+ u8 reserved1[5];
+ u8 data_segments;
+ u16 reserved2;
+ u64 reserved3;
+ u64 reserved4;
+ struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
+};
+
+#define EHEA_CQE_VLAN_TAG_XTRACT 0x0400
+
+#define EHEA_CQE_TYPE_RQ 0x60
+#define EHEA_CQE_STAT_ERR_MASK 0x700F
+#define EHEA_CQE_STAT_FAT_ERR_MASK 0xF
+#define EHEA_CQE_BLIND_CKSUM 0x8000
+#define EHEA_CQE_STAT_ERR_TCP 0x4000
+#define EHEA_CQE_STAT_ERR_IP 0x2000
+#define EHEA_CQE_STAT_ERR_CRC 0x1000
+
+/* Defines which bad send cqe stati lead to a port reset */
+#define EHEA_CQE_STAT_RESET_MASK 0x0002
+
+struct ehea_cqe {
+ u64 wr_id; /* work request ID from WQE */
+ u8 type;
+ u8 valid;
+ u16 status;
+ u16 reserved1;
+ u16 num_bytes_transfered;
+ u16 vlan_tag;
+ u16 inet_checksum_value;
+ u8 reserved2;
+ u8 header_length;
+ u16 reserved3;
+ u16 page_offset;
+ u16 wqe_count;
+ u32 qp_token;
+ u32 timestamp;
+ u32 reserved4;
+ u64 reserved5[3];
+};
+
+#define EHEA_EQE_VALID EHEA_BMASK_IBM(0, 0)
+#define EHEA_EQE_IS_CQE EHEA_BMASK_IBM(1, 1)
+#define EHEA_EQE_IDENTIFIER EHEA_BMASK_IBM(2, 7)
+#define EHEA_EQE_QP_CQ_NUMBER EHEA_BMASK_IBM(8, 31)
+#define EHEA_EQE_QP_TOKEN EHEA_BMASK_IBM(32, 63)
+#define EHEA_EQE_CQ_TOKEN EHEA_BMASK_IBM(32, 63)
+#define EHEA_EQE_KEY EHEA_BMASK_IBM(32, 63)
+#define EHEA_EQE_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
+#define EHEA_EQE_EQ_NUMBER EHEA_BMASK_IBM(48, 63)
+#define EHEA_EQE_SM_ID EHEA_BMASK_IBM(48, 63)
+#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
+#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
+
+#define EHEA_AER_RESTYPE_QP 0x8
+#define EHEA_AER_RESTYPE_CQ 0x4
+#define EHEA_AER_RESTYPE_EQ 0x3
+
+/* Defines which affiliated errors lead to a port reset */
+#define EHEA_AER_RESET_MASK 0xFFFFFFFFFEFFFFFFULL
+#define EHEA_AERR_RESET_MASK 0xFFFFFFFFFFFFFFFFULL
+
+struct ehea_eqe {
+ u64 entry;
+};
+
+#define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52, 63)
+#define ERROR_DATA_TYPE EHEA_BMASK_IBM(0, 7)
+
+static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
+{
+ struct ehea_page *current_page;
+
+ if (q_offset >= queue->queue_length)
+ q_offset -= queue->queue_length;
+ current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
+ return &current_page->entries[q_offset & (EHEA_PAGESIZE - 1)];
+}
+
+static inline void *hw_qeit_get(struct hw_queue *queue)
+{
+ return hw_qeit_calc(queue, queue->current_q_offset);
+}
+
+static inline void hw_qeit_inc(struct hw_queue *queue)
+{
+ queue->current_q_offset += queue->qe_size;
+ if (queue->current_q_offset >= queue->queue_length) {
+ queue->current_q_offset = 0;
+ /* toggle the valid flag */
+ queue->toggle_state = (~queue->toggle_state) & 1;
+ }
+}
+
+static inline void *hw_qeit_get_inc(struct hw_queue *queue)
+{
+ void *retvalue = hw_qeit_get(queue);
+ hw_qeit_inc(queue);
+ return retvalue;
+}
+
+static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
+{
+ struct ehea_cqe *retvalue = hw_qeit_get(queue);
+ u8 valid = retvalue->valid;
+ void *pref;
+
+ if ((valid >> 7) == (queue->toggle_state & 1)) {
+ /* this is a good one */
+ hw_qeit_inc(queue);
+ pref = hw_qeit_calc(queue, queue->current_q_offset);
+ prefetch(pref);
+ prefetch(pref + 128);
+ } else
+ retvalue = NULL;
+ return retvalue;
+}
+
+static inline void *hw_qeit_get_valid(struct hw_queue *queue)
+{
+ struct ehea_cqe *retvalue = hw_qeit_get(queue);
+ void *pref;
+ u8 valid;
+
+ pref = hw_qeit_calc(queue, queue->current_q_offset);
+ prefetch(pref);
+ prefetch(pref + 128);
+ prefetch(pref + 256);
+ valid = retvalue->valid;
+ if (!((valid >> 7) == (queue->toggle_state & 1)))
+ retvalue = NULL;
+ return retvalue;
+}
+
+static inline void *hw_qeit_reset(struct hw_queue *queue)
+{
+ queue->current_q_offset = 0;
+ return hw_qeit_get(queue);
+}
+
+static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
+{
+ u64 last_entry_in_q = queue->queue_length - queue->qe_size;
+ void *retvalue;
+
+ retvalue = hw_qeit_get(queue);
+ queue->current_q_offset += queue->qe_size;
+ if (queue->current_q_offset > last_entry_in_q) {
+ queue->current_q_offset = 0;
+ queue->toggle_state = (~queue->toggle_state) & 1;
+ }
+ return retvalue;
+}
+
+static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
+{
+ void *retvalue = hw_qeit_get(queue);
+ u32 qe = *(u8 *)retvalue;
+ if ((qe >> 7) == (queue->toggle_state & 1))
+ hw_qeit_eq_get_inc(queue);
+ else
+ retvalue = NULL;
+ return retvalue;
+}
+
+static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp,
+ int rq_nr)
+{
+ struct hw_queue *queue;
+
+ if (rq_nr == 1)
+ queue = &qp->hw_rqueue1;
+ else if (rq_nr == 2)
+ queue = &qp->hw_rqueue2;
+ else
+ queue = &qp->hw_rqueue3;
+
+ return hw_qeit_get_inc(queue);
+}
+
+static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp,
+ int *wqe_index)
+{
+ struct hw_queue *queue = &my_qp->hw_squeue;
+ struct ehea_swqe *wqe_p;
+
+ *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
+ wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue);
+
+ return wqe_p;
+}
+
+static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe)
+{
+ iosync();
+ ehea_update_sqa(my_qp, 1);
+}
+
+static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
+{
+ struct hw_queue *queue = &qp->hw_rqueue1;
+
+ *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
+ return hw_qeit_get_valid(queue);
+}
+
+static inline void ehea_inc_cq(struct ehea_cq *cq)
+{
+ hw_qeit_inc(&cq->hw_queue);
+}
+
+static inline void ehea_inc_rq1(struct ehea_qp *qp)
+{
+ hw_qeit_inc(&qp->hw_rqueue1);
+}
+
+static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
+{
+ return hw_qeit_get_valid(&my_cq->hw_queue);
+}
+
+#define EHEA_CQ_REGISTER_ORIG 0
+#define EHEA_EQ_REGISTER_ORIG 0
+
+enum ehea_eq_type {
+ EHEA_EQ = 0, /* event queue */
+ EHEA_NEQ /* notification event queue */
+};
+
+struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
+ enum ehea_eq_type type,
+ const u32 length, const u8 eqe_gen);
+
+int ehea_destroy_eq(struct ehea_eq *eq);
+
+struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq);
+
+struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
+ u64 eq_handle, u32 cq_token);
+
+int ehea_destroy_cq(struct ehea_cq *cq);
+
+struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd,
+ struct ehea_qp_init_attr *init_attr);
+
+int ehea_destroy_qp(struct ehea_qp *qp);
+
+int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr);
+
+int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
+ struct ehea_mr *shared_mr);
+
+int ehea_rem_mr(struct ehea_mr *mr);
+
+u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
+ u64 *aer, u64 *aerr);
+
+int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
+int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
+int ehea_create_busmap(void);
+void ehea_destroy_busmap(void);
+u64 ehea_map_vaddr(void *caddr);
+
+#endif /* __EHEA_QMR_H__ */
diff --git a/drivers/net/ethernet/ibm/emac/Kconfig b/drivers/net/ethernet/ibm/emac/Kconfig
new file mode 100644
index 000000000000..3f44a30e0615
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/Kconfig
@@ -0,0 +1,76 @@
+config IBM_EMAC
+ tristate "IBM EMAC Ethernet support"
+ depends on PPC_DCR
+ select CRC32
+ help
+ This driver supports the IBM EMAC family of Ethernet controllers
+ typically found on 4xx embedded PowerPC chips, but also on the
+ Axon southbridge for Cell.
+
+config IBM_EMAC_RXB
+ int "Number of receive buffers"
+ depends on IBM_EMAC
+ default "128"
+
+config IBM_EMAC_TXB
+ int "Number of transmit buffers"
+ depends on IBM_EMAC
+ default "64"
+
+config IBM_EMAC_POLL_WEIGHT
+ int "MAL NAPI polling weight"
+ depends on IBM_EMAC
+ default "32"
+
+config IBM_EMAC_RX_COPY_THRESHOLD
+ int "RX skb copy threshold (bytes)"
+ depends on IBM_EMAC
+ default "256"
+
+config IBM_EMAC_RX_SKB_HEADROOM
+ int "Additional RX skb headroom (bytes)"
+ depends on IBM_EMAC
+ default "0"
+ help
+ Additional receive skb headroom. Note, that driver
+ will always reserve at least 2 bytes to make IP header
+ aligned, so usually there is no need to add any additional
+ headroom.
+
+ If unsure, set to 0.
+
+config IBM_EMAC_DEBUG
+ bool "Debugging"
+ depends on IBM_EMAC
+ default n
+
+# The options below has to be select'ed by the respective
+# processor types or platforms
+
+config IBM_EMAC_ZMII
+ bool
+ default n
+
+config IBM_EMAC_RGMII
+ bool
+ default n
+
+config IBM_EMAC_TAH
+ bool
+ default n
+
+config IBM_EMAC_EMAC4
+ bool
+ default n
+
+config IBM_EMAC_NO_FLOW_CTRL
+ bool
+ default n
+
+config IBM_EMAC_MAL_CLR_ICINTSTAT
+ bool
+ default n
+
+config IBM_EMAC_MAL_COMMON_ERR
+ bool
+ default n
diff --git a/drivers/net/ethernet/ibm/emac/Makefile b/drivers/net/ethernet/ibm/emac/Makefile
new file mode 100644
index 000000000000..eba21835d90d
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the PowerPC 4xx on-chip ethernet driver
+#
+
+obj-$(CONFIG_IBM_EMAC) += ibm_emac.o
+
+ibm_emac-y := mal.o core.o phy.o
+ibm_emac-$(CONFIG_IBM_EMAC_ZMII) += zmii.o
+ibm_emac-$(CONFIG_IBM_EMAC_RGMII) += rgmii.o
+ibm_emac-$(CONFIG_IBM_EMAC_TAH) += tah.o
+ibm_emac-$(CONFIG_IBM_EMAC_DEBUG) += debug.o
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
new file mode 100644
index 000000000000..a573df1fafb6
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -0,0 +1,3074 @@
+/*
+ * drivers/net/ibm_newemac/core.c
+ *
+ * Driver for PowerPC 4xx on-chip ethernet controller.
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * Based on the arch/ppc version of the driver:
+ *
+ * Copyright (c) 2004, 2005 Zultys Technologies.
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ *
+ * Based on original work by
+ * Matt Porter <mporter@kernel.crashing.org>
+ * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * Armin Kuster <akuster@mvista.com>
+ * Johnnie Peters <jpeters@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/bitops.h>
+#include <linux/workqueue.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/slab.h>
+
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/uaccess.h>
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+
+#include "core.h"
+
+/*
+ * Lack of dma_unmap_???? calls is intentional.
+ *
+ * API-correct usage requires additional support state information to be
+ * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
+ * EMAC design (e.g. TX buffer passed from network stack can be split into
+ * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
+ * maintaining such information will add additional overhead.
+ * Current DMA API implementation for 4xx processors only ensures cache coherency
+ * and dma_unmap_???? routines are empty and are likely to stay this way.
+ * I decided to omit dma_unmap_??? calls because I don't want to add additional
+ * complexity just for the sake of following some abstract API, when it doesn't
+ * add any real benefit to the driver. I understand that this decision maybe
+ * controversial, but I really tried to make code API-correct and efficient
+ * at the same time and didn't come up with code I liked :(. --ebs
+ */
+
+#define DRV_NAME "emac"
+#define DRV_VERSION "3.54"
+#define DRV_DESC "PPC 4xx OCP EMAC driver"
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_AUTHOR
+ ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
+MODULE_LICENSE("GPL");
+
+/*
+ * PPC64 doesn't (yet) have a cacheable_memcpy
+ */
+#ifdef CONFIG_PPC64
+#define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
+#endif
+
+/* minimum number of free TX descriptors required to wake up TX process */
+#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
+
+/* If packet size is less than this number, we allocate small skb and copy packet
+ * contents into it instead of just sending original big skb up
+ */
+#define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
+
+/* Since multiple EMACs share MDIO lines in various ways, we need
+ * to avoid re-using the same PHY ID in cases where the arch didn't
+ * setup precise phy_map entries
+ *
+ * XXX This is something that needs to be reworked as we can have multiple
+ * EMAC "sets" (multiple ASICs containing several EMACs) though we can
+ * probably require in that case to have explicit PHY IDs in the device-tree
+ */
+static u32 busy_phy_map;
+static DEFINE_MUTEX(emac_phy_map_lock);
+
+/* This is the wait queue used to wait on any event related to probe, that
+ * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
+ */
+static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
+
+/* Having stable interface names is a doomed idea. However, it would be nice
+ * if we didn't have completely random interface names at boot too :-) It's
+ * just a matter of making everybody's life easier. Since we are doing
+ * threaded probing, it's a bit harder though. The base idea here is that
+ * we make up a list of all emacs in the device-tree before we register the
+ * driver. Every emac will then wait for the previous one in the list to
+ * initialize before itself. We should also keep that list ordered by
+ * cell_index.
+ * That list is only 4 entries long, meaning that additional EMACs don't
+ * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
+ */
+
+#define EMAC_BOOT_LIST_SIZE 4
+static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
+
+/* How long should I wait for dependent devices ? */
+#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
+
+/* I don't want to litter system log with timeout errors
+ * when we have brain-damaged PHY.
+ */
+static inline void emac_report_timeout_error(struct emac_instance *dev,
+ const char *error)
+{
+ if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
+ EMAC_FTR_460EX_PHY_CLK_FIX |
+ EMAC_FTR_440EP_PHY_CLK_FIX))
+ DBG(dev, "%s" NL, error);
+ else if (net_ratelimit())
+ printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
+ error);
+}
+
+/* EMAC PHY clock workaround:
+ * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
+ * which allows controlling each EMAC clock
+ */
+static inline void emac_rx_clk_tx(struct emac_instance *dev)
+{
+#ifdef CONFIG_PPC_DCR_NATIVE
+ if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
+ dcri_clrset(SDR0, SDR0_MFR,
+ 0, SDR0_MFR_ECS >> dev->cell_index);
+#endif
+}
+
+static inline void emac_rx_clk_default(struct emac_instance *dev)
+{
+#ifdef CONFIG_PPC_DCR_NATIVE
+ if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
+ dcri_clrset(SDR0, SDR0_MFR,
+ SDR0_MFR_ECS >> dev->cell_index, 0);
+#endif
+}
+
+/* PHY polling intervals */
+#define PHY_POLL_LINK_ON HZ
+#define PHY_POLL_LINK_OFF (HZ / 5)
+
+/* Graceful stop timeouts in us.
+ * We should allow up to 1 frame time (full-duplex, ignoring collisions)
+ */
+#define STOP_TIMEOUT_10 1230
+#define STOP_TIMEOUT_100 124
+#define STOP_TIMEOUT_1000 13
+#define STOP_TIMEOUT_1000_JUMBO 73
+
+static unsigned char default_mcast_addr[] = {
+ 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
+};
+
+/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
+static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
+ "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
+ "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
+ "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
+ "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
+ "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
+ "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
+ "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
+ "rx_bad_packet", "rx_runt_packet", "rx_short_event",
+ "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
+ "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
+ "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
+ "tx_bd_excessive_collisions", "tx_bd_late_collision",
+ "tx_bd_multple_collisions", "tx_bd_single_collision",
+ "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
+ "tx_errors"
+};
+
+static irqreturn_t emac_irq(int irq, void *dev_instance);
+static void emac_clean_tx_ring(struct emac_instance *dev);
+static void __emac_set_multicast_list(struct emac_instance *dev);
+
+static inline int emac_phy_supports_gige(int phy_mode)
+{
+ return phy_mode == PHY_MODE_GMII ||
+ phy_mode == PHY_MODE_RGMII ||
+ phy_mode == PHY_MODE_SGMII ||
+ phy_mode == PHY_MODE_TBI ||
+ phy_mode == PHY_MODE_RTBI;
+}
+
+static inline int emac_phy_gpcs(int phy_mode)
+{
+ return phy_mode == PHY_MODE_SGMII ||
+ phy_mode == PHY_MODE_TBI ||
+ phy_mode == PHY_MODE_RTBI;
+}
+
+static inline void emac_tx_enable(struct emac_instance *dev)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ u32 r;
+
+ DBG(dev, "tx_enable" NL);
+
+ r = in_be32(&p->mr0);
+ if (!(r & EMAC_MR0_TXE))
+ out_be32(&p->mr0, r | EMAC_MR0_TXE);
+}
+
+static void emac_tx_disable(struct emac_instance *dev)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ u32 r;
+
+ DBG(dev, "tx_disable" NL);
+
+ r = in_be32(&p->mr0);
+ if (r & EMAC_MR0_TXE) {
+ int n = dev->stop_timeout;
+ out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
+ while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
+ udelay(1);
+ --n;
+ }
+ if (unlikely(!n))
+ emac_report_timeout_error(dev, "TX disable timeout");
+ }
+}
+
+static void emac_rx_enable(struct emac_instance *dev)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ u32 r;
+
+ if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
+ goto out;
+
+ DBG(dev, "rx_enable" NL);
+
+ r = in_be32(&p->mr0);
+ if (!(r & EMAC_MR0_RXE)) {
+ if (unlikely(!(r & EMAC_MR0_RXI))) {
+ /* Wait if previous async disable is still in progress */
+ int n = dev->stop_timeout;
+ while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
+ udelay(1);
+ --n;
+ }
+ if (unlikely(!n))
+ emac_report_timeout_error(dev,
+ "RX disable timeout");
+ }
+ out_be32(&p->mr0, r | EMAC_MR0_RXE);
+ }
+ out:
+ ;
+}
+
+static void emac_rx_disable(struct emac_instance *dev)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ u32 r;
+
+ DBG(dev, "rx_disable" NL);
+
+ r = in_be32(&p->mr0);
+ if (r & EMAC_MR0_RXE) {
+ int n = dev->stop_timeout;
+ out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
+ while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
+ udelay(1);
+ --n;
+ }
+ if (unlikely(!n))
+ emac_report_timeout_error(dev, "RX disable timeout");
+ }
+}
+
+static inline void emac_netif_stop(struct emac_instance *dev)
+{
+ netif_tx_lock_bh(dev->ndev);
+ netif_addr_lock(dev->ndev);
+ dev->no_mcast = 1;
+ netif_addr_unlock(dev->ndev);
+ netif_tx_unlock_bh(dev->ndev);
+ dev->ndev->trans_start = jiffies; /* prevent tx timeout */
+ mal_poll_disable(dev->mal, &dev->commac);
+ netif_tx_disable(dev->ndev);
+}
+
+static inline void emac_netif_start(struct emac_instance *dev)
+{
+ netif_tx_lock_bh(dev->ndev);
+ netif_addr_lock(dev->ndev);
+ dev->no_mcast = 0;
+ if (dev->mcast_pending && netif_running(dev->ndev))
+ __emac_set_multicast_list(dev);
+ netif_addr_unlock(dev->ndev);
+ netif_tx_unlock_bh(dev->ndev);
+
+ netif_wake_queue(dev->ndev);
+
+ /* NOTE: unconditional netif_wake_queue is only appropriate
+ * so long as all callers are assured to have free tx slots
+ * (taken from tg3... though the case where that is wrong is
+ * not terribly harmful)
+ */
+ mal_poll_enable(dev->mal, &dev->commac);
+}
+
+static inline void emac_rx_disable_async(struct emac_instance *dev)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ u32 r;
+
+ DBG(dev, "rx_disable_async" NL);
+
+ r = in_be32(&p->mr0);
+ if (r & EMAC_MR0_RXE)
+ out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
+}
+
+static int emac_reset(struct emac_instance *dev)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ int n = 20;
+
+ DBG(dev, "reset" NL);
+
+ if (!dev->reset_failed) {
+ /* 40x erratum suggests stopping RX channel before reset,
+ * we stop TX as well
+ */
+ emac_rx_disable(dev);
+ emac_tx_disable(dev);
+ }
+
+#ifdef CONFIG_PPC_DCR_NATIVE
+ /* Enable internal clock source */
+ if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
+ dcri_clrset(SDR0, SDR0_ETH_CFG,
+ 0, SDR0_ETH_CFG_ECS << dev->cell_index);
+#endif
+
+ out_be32(&p->mr0, EMAC_MR0_SRST);
+ while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
+ --n;
+
+#ifdef CONFIG_PPC_DCR_NATIVE
+ /* Enable external clock source */
+ if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
+ dcri_clrset(SDR0, SDR0_ETH_CFG,
+ SDR0_ETH_CFG_ECS << dev->cell_index, 0);
+#endif
+
+ if (n) {
+ dev->reset_failed = 0;
+ return 0;
+ } else {
+ emac_report_timeout_error(dev, "reset timeout");
+ dev->reset_failed = 1;
+ return -ETIMEDOUT;
+ }
+}
+
+static void emac_hash_mc(struct emac_instance *dev)
+{
+ const int regs = EMAC_XAHT_REGS(dev);
+ u32 *gaht_base = emac_gaht_base(dev);
+ u32 gaht_temp[regs];
+ struct netdev_hw_addr *ha;
+ int i;
+
+ DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
+
+ memset(gaht_temp, 0, sizeof (gaht_temp));
+
+ netdev_for_each_mc_addr(ha, dev->ndev) {
+ int slot, reg, mask;
+ DBG2(dev, "mc %pM" NL, ha->addr);
+
+ slot = EMAC_XAHT_CRC_TO_SLOT(dev,
+ ether_crc(ETH_ALEN, ha->addr));
+ reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
+ mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
+
+ gaht_temp[reg] |= mask;
+ }
+
+ for (i = 0; i < regs; i++)
+ out_be32(gaht_base + i, gaht_temp[i]);
+}
+
+static inline u32 emac_iff2rmr(struct net_device *ndev)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ u32 r;
+
+ r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
+
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
+ r |= EMAC4_RMR_BASE;
+ else
+ r |= EMAC_RMR_BASE;
+
+ if (ndev->flags & IFF_PROMISC)
+ r |= EMAC_RMR_PME;
+ else if (ndev->flags & IFF_ALLMULTI ||
+ (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
+ r |= EMAC_RMR_PMME;
+ else if (!netdev_mc_empty(ndev))
+ r |= EMAC_RMR_MAE;
+
+ return r;
+}
+
+static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
+{
+ u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
+
+ DBG2(dev, "__emac_calc_base_mr1" NL);
+
+ switch(tx_size) {
+ case 2048:
+ ret |= EMAC_MR1_TFS_2K;
+ break;
+ default:
+ printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
+ dev->ndev->name, tx_size);
+ }
+
+ switch(rx_size) {
+ case 16384:
+ ret |= EMAC_MR1_RFS_16K;
+ break;
+ case 4096:
+ ret |= EMAC_MR1_RFS_4K;
+ break;
+ default:
+ printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
+ dev->ndev->name, rx_size);
+ }
+
+ return ret;
+}
+
+static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
+{
+ u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
+ EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
+
+ DBG2(dev, "__emac4_calc_base_mr1" NL);
+
+ switch(tx_size) {
+ case 16384:
+ ret |= EMAC4_MR1_TFS_16K;
+ break;
+ case 4096:
+ ret |= EMAC4_MR1_TFS_4K;
+ break;
+ case 2048:
+ ret |= EMAC4_MR1_TFS_2K;
+ break;
+ default:
+ printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
+ dev->ndev->name, tx_size);
+ }
+
+ switch(rx_size) {
+ case 16384:
+ ret |= EMAC4_MR1_RFS_16K;
+ break;
+ case 4096:
+ ret |= EMAC4_MR1_RFS_4K;
+ break;
+ case 2048:
+ ret |= EMAC4_MR1_RFS_2K;
+ break;
+ default:
+ printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
+ dev->ndev->name, rx_size);
+ }
+
+ return ret;
+}
+
+static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
+{
+ return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
+ __emac4_calc_base_mr1(dev, tx_size, rx_size) :
+ __emac_calc_base_mr1(dev, tx_size, rx_size);
+}
+
+static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
+{
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
+ return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
+ else
+ return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
+}
+
+static inline u32 emac_calc_rwmr(struct emac_instance *dev,
+ unsigned int low, unsigned int high)
+{
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
+ return (low << 22) | ( (high & 0x3ff) << 6);
+ else
+ return (low << 23) | ( (high & 0x1ff) << 7);
+}
+
+static int emac_configure(struct emac_instance *dev)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ struct net_device *ndev = dev->ndev;
+ int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
+ u32 r, mr1 = 0;
+
+ DBG(dev, "configure" NL);
+
+ if (!link) {
+ out_be32(&p->mr1, in_be32(&p->mr1)
+ | EMAC_MR1_FDE | EMAC_MR1_ILE);
+ udelay(100);
+ } else if (emac_reset(dev) < 0)
+ return -ETIMEDOUT;
+
+ if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
+ tah_reset(dev->tah_dev);
+
+ DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
+ link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
+
+ /* Default fifo sizes */
+ tx_size = dev->tx_fifo_size;
+ rx_size = dev->rx_fifo_size;
+
+ /* No link, force loopback */
+ if (!link)
+ mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
+
+ /* Check for full duplex */
+ else if (dev->phy.duplex == DUPLEX_FULL)
+ mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
+
+ /* Adjust fifo sizes, mr1 and timeouts based on link speed */
+ dev->stop_timeout = STOP_TIMEOUT_10;
+ switch (dev->phy.speed) {
+ case SPEED_1000:
+ if (emac_phy_gpcs(dev->phy.mode)) {
+ mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
+ (dev->phy.gpcs_address != 0xffffffff) ?
+ dev->phy.gpcs_address : dev->phy.address);
+
+ /* Put some arbitrary OUI, Manuf & Rev IDs so we can
+ * identify this GPCS PHY later.
+ */
+ out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
+ } else
+ mr1 |= EMAC_MR1_MF_1000;
+
+ /* Extended fifo sizes */
+ tx_size = dev->tx_fifo_size_gige;
+ rx_size = dev->rx_fifo_size_gige;
+
+ if (dev->ndev->mtu > ETH_DATA_LEN) {
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
+ mr1 |= EMAC4_MR1_JPSM;
+ else
+ mr1 |= EMAC_MR1_JPSM;
+ dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
+ } else
+ dev->stop_timeout = STOP_TIMEOUT_1000;
+ break;
+ case SPEED_100:
+ mr1 |= EMAC_MR1_MF_100;
+ dev->stop_timeout = STOP_TIMEOUT_100;
+ break;
+ default: /* make gcc happy */
+ break;
+ }
+
+ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
+ rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
+ dev->phy.speed);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
+ zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
+
+ /* on 40x erratum forces us to NOT use integrated flow control,
+ * let's hope it works on 44x ;)
+ */
+ if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
+ dev->phy.duplex == DUPLEX_FULL) {
+ if (dev->phy.pause)
+ mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
+ else if (dev->phy.asym_pause)
+ mr1 |= EMAC_MR1_APP;
+ }
+
+ /* Add base settings & fifo sizes & program MR1 */
+ mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
+ out_be32(&p->mr1, mr1);
+
+ /* Set individual MAC address */
+ out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
+ out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
+ (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
+ ndev->dev_addr[5]);
+
+ /* VLAN Tag Protocol ID */
+ out_be32(&p->vtpid, 0x8100);
+
+ /* Receive mode register */
+ r = emac_iff2rmr(ndev);
+ if (r & EMAC_RMR_MAE)
+ emac_hash_mc(dev);
+ out_be32(&p->rmr, r);
+
+ /* FIFOs thresholds */
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
+ r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
+ tx_size / 2 / dev->fifo_entry_size);
+ else
+ r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
+ tx_size / 2 / dev->fifo_entry_size);
+ out_be32(&p->tmr1, r);
+ out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
+
+ /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
+ there should be still enough space in FIFO to allow the our link
+ partner time to process this frame and also time to send PAUSE
+ frame itself.
+
+ Here is the worst case scenario for the RX FIFO "headroom"
+ (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
+
+ 1) One maximum-length frame on TX 1522 bytes
+ 2) One PAUSE frame time 64 bytes
+ 3) PAUSE frame decode time allowance 64 bytes
+ 4) One maximum-length frame on RX 1522 bytes
+ 5) Round-trip propagation delay of the link (100Mb) 15 bytes
+ ----------
+ 3187 bytes
+
+ I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
+ low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
+ */
+ r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
+ rx_size / 4 / dev->fifo_entry_size);
+ out_be32(&p->rwmr, r);
+
+ /* Set PAUSE timer to the maximum */
+ out_be32(&p->ptr, 0xffff);
+
+ /* IRQ sources */
+ r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
+ EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
+ EMAC_ISR_IRE | EMAC_ISR_TE;
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
+ r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
+ EMAC4_ISR_RXOE | */;
+ out_be32(&p->iser, r);
+
+ /* We need to take GPCS PHY out of isolate mode after EMAC reset */
+ if (emac_phy_gpcs(dev->phy.mode)) {
+ if (dev->phy.gpcs_address != 0xffffffff)
+ emac_mii_reset_gpcs(&dev->phy);
+ else
+ emac_mii_reset_phy(&dev->phy);
+ }
+
+ return 0;
+}
+
+static void emac_reinitialize(struct emac_instance *dev)
+{
+ DBG(dev, "reinitialize" NL);
+
+ emac_netif_stop(dev);
+ if (!emac_configure(dev)) {
+ emac_tx_enable(dev);
+ emac_rx_enable(dev);
+ }
+ emac_netif_start(dev);
+}
+
+static void emac_full_tx_reset(struct emac_instance *dev)
+{
+ DBG(dev, "full_tx_reset" NL);
+
+ emac_tx_disable(dev);
+ mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
+ emac_clean_tx_ring(dev);
+ dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
+
+ emac_configure(dev);
+
+ mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
+ emac_tx_enable(dev);
+ emac_rx_enable(dev);
+}
+
+static void emac_reset_work(struct work_struct *work)
+{
+ struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
+
+ DBG(dev, "reset_work" NL);
+
+ mutex_lock(&dev->link_lock);
+ if (dev->opened) {
+ emac_netif_stop(dev);
+ emac_full_tx_reset(dev);
+ emac_netif_start(dev);
+ }
+ mutex_unlock(&dev->link_lock);
+}
+
+static void emac_tx_timeout(struct net_device *ndev)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+
+ DBG(dev, "tx_timeout" NL);
+
+ schedule_work(&dev->reset_work);
+}
+
+
+static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
+{
+ int done = !!(stacr & EMAC_STACR_OC);
+
+ if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
+ done = !done;
+
+ return done;
+};
+
+static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ u32 r = 0;
+ int n, err = -ETIMEDOUT;
+
+ mutex_lock(&dev->mdio_lock);
+
+ DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
+
+ /* Enable proper MDIO port */
+ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
+ zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
+ rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
+
+ /* Wait for management interface to become idle */
+ n = 20;
+ while (!emac_phy_done(dev, in_be32(&p->stacr))) {
+ udelay(1);
+ if (!--n) {
+ DBG2(dev, " -> timeout wait idle\n");
+ goto bail;
+ }
+ }
+
+ /* Issue read command */
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
+ r = EMAC4_STACR_BASE(dev->opb_bus_freq);
+ else
+ r = EMAC_STACR_BASE(dev->opb_bus_freq);
+ if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
+ r |= EMAC_STACR_OC;
+ if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
+ r |= EMACX_STACR_STAC_READ;
+ else
+ r |= EMAC_STACR_STAC_READ;
+ r |= (reg & EMAC_STACR_PRA_MASK)
+ | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
+ out_be32(&p->stacr, r);
+
+ /* Wait for read to complete */
+ n = 200;
+ while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
+ udelay(1);
+ if (!--n) {
+ DBG2(dev, " -> timeout wait complete\n");
+ goto bail;
+ }
+ }
+
+ if (unlikely(r & EMAC_STACR_PHYE)) {
+ DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
+ err = -EREMOTEIO;
+ goto bail;
+ }
+
+ r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
+
+ DBG2(dev, "mdio_read -> %04x" NL, r);
+ err = 0;
+ bail:
+ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
+ rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
+ zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
+ mutex_unlock(&dev->mdio_lock);
+
+ return err == 0 ? r : err;
+}
+
+static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
+ u16 val)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ u32 r = 0;
+ int n, err = -ETIMEDOUT;
+
+ mutex_lock(&dev->mdio_lock);
+
+ DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
+
+ /* Enable proper MDIO port */
+ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
+ zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
+ rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
+
+ /* Wait for management interface to be idle */
+ n = 20;
+ while (!emac_phy_done(dev, in_be32(&p->stacr))) {
+ udelay(1);
+ if (!--n) {
+ DBG2(dev, " -> timeout wait idle\n");
+ goto bail;
+ }
+ }
+
+ /* Issue write command */
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
+ r = EMAC4_STACR_BASE(dev->opb_bus_freq);
+ else
+ r = EMAC_STACR_BASE(dev->opb_bus_freq);
+ if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
+ r |= EMAC_STACR_OC;
+ if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
+ r |= EMACX_STACR_STAC_WRITE;
+ else
+ r |= EMAC_STACR_STAC_WRITE;
+ r |= (reg & EMAC_STACR_PRA_MASK) |
+ ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
+ (val << EMAC_STACR_PHYD_SHIFT);
+ out_be32(&p->stacr, r);
+
+ /* Wait for write to complete */
+ n = 200;
+ while (!emac_phy_done(dev, in_be32(&p->stacr))) {
+ udelay(1);
+ if (!--n) {
+ DBG2(dev, " -> timeout wait complete\n");
+ goto bail;
+ }
+ }
+ err = 0;
+ bail:
+ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
+ rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
+ zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
+ mutex_unlock(&dev->mdio_lock);
+}
+
+static int emac_mdio_read(struct net_device *ndev, int id, int reg)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ int res;
+
+ res = __emac_mdio_read((dev->mdio_instance &&
+ dev->phy.gpcs_address != id) ?
+ dev->mdio_instance : dev,
+ (u8) id, (u8) reg);
+ return res;
+}
+
+static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+
+ __emac_mdio_write((dev->mdio_instance &&
+ dev->phy.gpcs_address != id) ?
+ dev->mdio_instance : dev,
+ (u8) id, (u8) reg, (u16) val);
+}
+
+/* Tx lock BH */
+static void __emac_set_multicast_list(struct emac_instance *dev)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ u32 rmr = emac_iff2rmr(dev->ndev);
+
+ DBG(dev, "__multicast %08x" NL, rmr);
+
+ /* I decided to relax register access rules here to avoid
+ * full EMAC reset.
+ *
+ * There is a real problem with EMAC4 core if we use MWSW_001 bit
+ * in MR1 register and do a full EMAC reset.
+ * One TX BD status update is delayed and, after EMAC reset, it
+ * never happens, resulting in TX hung (it'll be recovered by TX
+ * timeout handler eventually, but this is just gross).
+ * So we either have to do full TX reset or try to cheat here :)
+ *
+ * The only required change is to RX mode register, so I *think* all
+ * we need is just to stop RX channel. This seems to work on all
+ * tested SoCs. --ebs
+ *
+ * If we need the full reset, we might just trigger the workqueue
+ * and do it async... a bit nasty but should work --BenH
+ */
+ dev->mcast_pending = 0;
+ emac_rx_disable(dev);
+ if (rmr & EMAC_RMR_MAE)
+ emac_hash_mc(dev);
+ out_be32(&p->rmr, rmr);
+ emac_rx_enable(dev);
+}
+
+/* Tx lock BH */
+static void emac_set_multicast_list(struct net_device *ndev)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+
+ DBG(dev, "multicast" NL);
+
+ BUG_ON(!netif_running(dev->ndev));
+
+ if (dev->no_mcast) {
+ dev->mcast_pending = 1;
+ return;
+ }
+ __emac_set_multicast_list(dev);
+}
+
+static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
+{
+ int rx_sync_size = emac_rx_sync_size(new_mtu);
+ int rx_skb_size = emac_rx_skb_size(new_mtu);
+ int i, ret = 0;
+
+ mutex_lock(&dev->link_lock);
+ emac_netif_stop(dev);
+ emac_rx_disable(dev);
+ mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
+
+ if (dev->rx_sg_skb) {
+ ++dev->estats.rx_dropped_resize;
+ dev_kfree_skb(dev->rx_sg_skb);
+ dev->rx_sg_skb = NULL;
+ }
+
+ /* Make a first pass over RX ring and mark BDs ready, dropping
+ * non-processed packets on the way. We need this as a separate pass
+ * to simplify error recovery in the case of allocation failure later.
+ */
+ for (i = 0; i < NUM_RX_BUFF; ++i) {
+ if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
+ ++dev->estats.rx_dropped_resize;
+
+ dev->rx_desc[i].data_len = 0;
+ dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
+ (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
+ }
+
+ /* Reallocate RX ring only if bigger skb buffers are required */
+ if (rx_skb_size <= dev->rx_skb_size)
+ goto skip;
+
+ /* Second pass, allocate new skbs */
+ for (i = 0; i < NUM_RX_BUFF; ++i) {
+ struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto oom;
+ }
+
+ BUG_ON(!dev->rx_skb[i]);
+ dev_kfree_skb(dev->rx_skb[i]);
+
+ skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
+ dev->rx_desc[i].data_ptr =
+ dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
+ DMA_FROM_DEVICE) + 2;
+ dev->rx_skb[i] = skb;
+ }
+ skip:
+ /* Check if we need to change "Jumbo" bit in MR1 */
+ if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
+ /* This is to prevent starting RX channel in emac_rx_enable() */
+ set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
+
+ dev->ndev->mtu = new_mtu;
+ emac_full_tx_reset(dev);
+ }
+
+ mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
+ oom:
+ /* Restart RX */
+ clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
+ dev->rx_slot = 0;
+ mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
+ emac_rx_enable(dev);
+ emac_netif_start(dev);
+ mutex_unlock(&dev->link_lock);
+
+ return ret;
+}
+
+/* Process ctx, rtnl_lock semaphore */
+static int emac_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ int ret = 0;
+
+ if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
+ return -EINVAL;
+
+ DBG(dev, "change_mtu(%d)" NL, new_mtu);
+
+ if (netif_running(ndev)) {
+ /* Check if we really need to reinitialize RX ring */
+ if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
+ ret = emac_resize_rx_ring(dev, new_mtu);
+ }
+
+ if (!ret) {
+ ndev->mtu = new_mtu;
+ dev->rx_skb_size = emac_rx_skb_size(new_mtu);
+ dev->rx_sync_size = emac_rx_sync_size(new_mtu);
+ }
+
+ return ret;
+}
+
+static void emac_clean_tx_ring(struct emac_instance *dev)
+{
+ int i;
+
+ for (i = 0; i < NUM_TX_BUFF; ++i) {
+ if (dev->tx_skb[i]) {
+ dev_kfree_skb(dev->tx_skb[i]);
+ dev->tx_skb[i] = NULL;
+ if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
+ ++dev->estats.tx_dropped;
+ }
+ dev->tx_desc[i].ctrl = 0;
+ dev->tx_desc[i].data_ptr = 0;
+ }
+}
+
+static void emac_clean_rx_ring(struct emac_instance *dev)
+{
+ int i;
+
+ for (i = 0; i < NUM_RX_BUFF; ++i)
+ if (dev->rx_skb[i]) {
+ dev->rx_desc[i].ctrl = 0;
+ dev_kfree_skb(dev->rx_skb[i]);
+ dev->rx_skb[i] = NULL;
+ dev->rx_desc[i].data_ptr = 0;
+ }
+
+ if (dev->rx_sg_skb) {
+ dev_kfree_skb(dev->rx_sg_skb);
+ dev->rx_sg_skb = NULL;
+ }
+}
+
+static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
+ gfp_t flags)
+{
+ struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ dev->rx_skb[slot] = skb;
+ dev->rx_desc[slot].data_len = 0;
+
+ skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
+ dev->rx_desc[slot].data_ptr =
+ dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
+ DMA_FROM_DEVICE) + 2;
+ wmb();
+ dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
+ (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
+
+ return 0;
+}
+
+static void emac_print_link_status(struct emac_instance *dev)
+{
+ if (netif_carrier_ok(dev->ndev))
+ printk(KERN_INFO "%s: link is up, %d %s%s\n",
+ dev->ndev->name, dev->phy.speed,
+ dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
+ dev->phy.pause ? ", pause enabled" :
+ dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
+ else
+ printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
+}
+
+/* Process ctx, rtnl_lock semaphore */
+static int emac_open(struct net_device *ndev)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ int err, i;
+
+ DBG(dev, "open" NL);
+
+ /* Setup error IRQ handler */
+ err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
+ if (err) {
+ printk(KERN_ERR "%s: failed to request IRQ %d\n",
+ ndev->name, dev->emac_irq);
+ return err;
+ }
+
+ /* Allocate RX ring */
+ for (i = 0; i < NUM_RX_BUFF; ++i)
+ if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
+ printk(KERN_ERR "%s: failed to allocate RX ring\n",
+ ndev->name);
+ goto oom;
+ }
+
+ dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
+ clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
+ dev->rx_sg_skb = NULL;
+
+ mutex_lock(&dev->link_lock);
+ dev->opened = 1;
+
+ /* Start PHY polling now.
+ */
+ if (dev->phy.address >= 0) {
+ int link_poll_interval;
+ if (dev->phy.def->ops->poll_link(&dev->phy)) {
+ dev->phy.def->ops->read_link(&dev->phy);
+ emac_rx_clk_default(dev);
+ netif_carrier_on(dev->ndev);
+ link_poll_interval = PHY_POLL_LINK_ON;
+ } else {
+ emac_rx_clk_tx(dev);
+ netif_carrier_off(dev->ndev);
+ link_poll_interval = PHY_POLL_LINK_OFF;
+ }
+ dev->link_polling = 1;
+ wmb();
+ schedule_delayed_work(&dev->link_work, link_poll_interval);
+ emac_print_link_status(dev);
+ } else
+ netif_carrier_on(dev->ndev);
+
+ /* Required for Pause packet support in EMAC */
+ dev_mc_add_global(ndev, default_mcast_addr);
+
+ emac_configure(dev);
+ mal_poll_add(dev->mal, &dev->commac);
+ mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
+ mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
+ mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
+ emac_tx_enable(dev);
+ emac_rx_enable(dev);
+ emac_netif_start(dev);
+
+ mutex_unlock(&dev->link_lock);
+
+ return 0;
+ oom:
+ emac_clean_rx_ring(dev);
+ free_irq(dev->emac_irq, dev);
+
+ return -ENOMEM;
+}
+
+/* BHs disabled */
+#if 0
+static int emac_link_differs(struct emac_instance *dev)
+{
+ u32 r = in_be32(&dev->emacp->mr1);
+
+ int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
+ int speed, pause, asym_pause;
+
+ if (r & EMAC_MR1_MF_1000)
+ speed = SPEED_1000;
+ else if (r & EMAC_MR1_MF_100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+
+ switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
+ case (EMAC_MR1_EIFC | EMAC_MR1_APP):
+ pause = 1;
+ asym_pause = 0;
+ break;
+ case EMAC_MR1_APP:
+ pause = 0;
+ asym_pause = 1;
+ break;
+ default:
+ pause = asym_pause = 0;
+ }
+ return speed != dev->phy.speed || duplex != dev->phy.duplex ||
+ pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
+}
+#endif
+
+static void emac_link_timer(struct work_struct *work)
+{
+ struct emac_instance *dev =
+ container_of(to_delayed_work(work),
+ struct emac_instance, link_work);
+ int link_poll_interval;
+
+ mutex_lock(&dev->link_lock);
+ DBG2(dev, "link timer" NL);
+
+ if (!dev->opened)
+ goto bail;
+
+ if (dev->phy.def->ops->poll_link(&dev->phy)) {
+ if (!netif_carrier_ok(dev->ndev)) {
+ emac_rx_clk_default(dev);
+ /* Get new link parameters */
+ dev->phy.def->ops->read_link(&dev->phy);
+
+ netif_carrier_on(dev->ndev);
+ emac_netif_stop(dev);
+ emac_full_tx_reset(dev);
+ emac_netif_start(dev);
+ emac_print_link_status(dev);
+ }
+ link_poll_interval = PHY_POLL_LINK_ON;
+ } else {
+ if (netif_carrier_ok(dev->ndev)) {
+ emac_rx_clk_tx(dev);
+ netif_carrier_off(dev->ndev);
+ netif_tx_disable(dev->ndev);
+ emac_reinitialize(dev);
+ emac_print_link_status(dev);
+ }
+ link_poll_interval = PHY_POLL_LINK_OFF;
+ }
+ schedule_delayed_work(&dev->link_work, link_poll_interval);
+ bail:
+ mutex_unlock(&dev->link_lock);
+}
+
+static void emac_force_link_update(struct emac_instance *dev)
+{
+ netif_carrier_off(dev->ndev);
+ smp_rmb();
+ if (dev->link_polling) {
+ cancel_delayed_work_sync(&dev->link_work);
+ if (dev->link_polling)
+ schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
+ }
+}
+
+/* Process ctx, rtnl_lock semaphore */
+static int emac_close(struct net_device *ndev)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+
+ DBG(dev, "close" NL);
+
+ if (dev->phy.address >= 0) {
+ dev->link_polling = 0;
+ cancel_delayed_work_sync(&dev->link_work);
+ }
+ mutex_lock(&dev->link_lock);
+ emac_netif_stop(dev);
+ dev->opened = 0;
+ mutex_unlock(&dev->link_lock);
+
+ emac_rx_disable(dev);
+ emac_tx_disable(dev);
+ mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
+ mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
+ mal_poll_del(dev->mal, &dev->commac);
+
+ emac_clean_tx_ring(dev);
+ emac_clean_rx_ring(dev);
+
+ free_irq(dev->emac_irq, dev);
+
+ netif_carrier_off(ndev);
+
+ return 0;
+}
+
+static inline u16 emac_tx_csum(struct emac_instance *dev,
+ struct sk_buff *skb)
+{
+ if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
+ (skb->ip_summed == CHECKSUM_PARTIAL)) {
+ ++dev->stats.tx_packets_csum;
+ return EMAC_TX_CTRL_TAH_CSUM;
+ }
+ return 0;
+}
+
+static inline int emac_xmit_finish(struct emac_instance *dev, int len)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ struct net_device *ndev = dev->ndev;
+
+ /* Send the packet out. If the if makes a significant perf
+ * difference, then we can store the TMR0 value in "dev"
+ * instead
+ */
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
+ out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
+ else
+ out_be32(&p->tmr0, EMAC_TMR0_XMIT);
+
+ if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
+ netif_stop_queue(ndev);
+ DBG2(dev, "stopped TX queue" NL);
+ }
+
+ ndev->trans_start = jiffies;
+ ++dev->stats.tx_packets;
+ dev->stats.tx_bytes += len;
+
+ return NETDEV_TX_OK;
+}
+
+/* Tx lock BH */
+static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ unsigned int len = skb->len;
+ int slot;
+
+ u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
+ MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
+
+ slot = dev->tx_slot++;
+ if (dev->tx_slot == NUM_TX_BUFF) {
+ dev->tx_slot = 0;
+ ctrl |= MAL_TX_CTRL_WRAP;
+ }
+
+ DBG2(dev, "xmit(%u) %d" NL, len, slot);
+
+ dev->tx_skb[slot] = skb;
+ dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
+ skb->data, len,
+ DMA_TO_DEVICE);
+ dev->tx_desc[slot].data_len = (u16) len;
+ wmb();
+ dev->tx_desc[slot].ctrl = ctrl;
+
+ return emac_xmit_finish(dev, len);
+}
+
+static inline int emac_xmit_split(struct emac_instance *dev, int slot,
+ u32 pd, int len, int last, u16 base_ctrl)
+{
+ while (1) {
+ u16 ctrl = base_ctrl;
+ int chunk = min(len, MAL_MAX_TX_SIZE);
+ len -= chunk;
+
+ slot = (slot + 1) % NUM_TX_BUFF;
+
+ if (last && !len)
+ ctrl |= MAL_TX_CTRL_LAST;
+ if (slot == NUM_TX_BUFF - 1)
+ ctrl |= MAL_TX_CTRL_WRAP;
+
+ dev->tx_skb[slot] = NULL;
+ dev->tx_desc[slot].data_ptr = pd;
+ dev->tx_desc[slot].data_len = (u16) chunk;
+ dev->tx_desc[slot].ctrl = ctrl;
+ ++dev->tx_cnt;
+
+ if (!len)
+ break;
+
+ pd += chunk;
+ }
+ return slot;
+}
+
+/* Tx lock BH disabled (SG version for TAH equipped EMACs) */
+static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int len = skb->len, chunk;
+ int slot, i;
+ u16 ctrl;
+ u32 pd;
+
+ /* This is common "fast" path */
+ if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
+ return emac_start_xmit(skb, ndev);
+
+ len -= skb->data_len;
+
+ /* Note, this is only an *estimation*, we can still run out of empty
+ * slots because of the additional fragmentation into
+ * MAL_MAX_TX_SIZE-sized chunks
+ */
+ if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
+ goto stop_queue;
+
+ ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
+ emac_tx_csum(dev, skb);
+ slot = dev->tx_slot;
+
+ /* skb data */
+ dev->tx_skb[slot] = NULL;
+ chunk = min(len, MAL_MAX_TX_SIZE);
+ dev->tx_desc[slot].data_ptr = pd =
+ dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
+ dev->tx_desc[slot].data_len = (u16) chunk;
+ len -= chunk;
+ if (unlikely(len))
+ slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
+ ctrl);
+ /* skb fragments */
+ for (i = 0; i < nr_frags; ++i) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ len = frag->size;
+
+ if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
+ goto undo_frame;
+
+ pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
+ DMA_TO_DEVICE);
+
+ slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
+ ctrl);
+ }
+
+ DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
+
+ /* Attach skb to the last slot so we don't release it too early */
+ dev->tx_skb[slot] = skb;
+
+ /* Send the packet out */
+ if (dev->tx_slot == NUM_TX_BUFF - 1)
+ ctrl |= MAL_TX_CTRL_WRAP;
+ wmb();
+ dev->tx_desc[dev->tx_slot].ctrl = ctrl;
+ dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
+
+ return emac_xmit_finish(dev, skb->len);
+
+ undo_frame:
+ /* Well, too bad. Our previous estimation was overly optimistic.
+ * Undo everything.
+ */
+ while (slot != dev->tx_slot) {
+ dev->tx_desc[slot].ctrl = 0;
+ --dev->tx_cnt;
+ if (--slot < 0)
+ slot = NUM_TX_BUFF - 1;
+ }
+ ++dev->estats.tx_undo;
+
+ stop_queue:
+ netif_stop_queue(ndev);
+ DBG2(dev, "stopped TX queue" NL);
+ return NETDEV_TX_BUSY;
+}
+
+/* Tx lock BHs */
+static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
+{
+ struct emac_error_stats *st = &dev->estats;
+
+ DBG(dev, "BD TX error %04x" NL, ctrl);
+
+ ++st->tx_bd_errors;
+ if (ctrl & EMAC_TX_ST_BFCS)
+ ++st->tx_bd_bad_fcs;
+ if (ctrl & EMAC_TX_ST_LCS)
+ ++st->tx_bd_carrier_loss;
+ if (ctrl & EMAC_TX_ST_ED)
+ ++st->tx_bd_excessive_deferral;
+ if (ctrl & EMAC_TX_ST_EC)
+ ++st->tx_bd_excessive_collisions;
+ if (ctrl & EMAC_TX_ST_LC)
+ ++st->tx_bd_late_collision;
+ if (ctrl & EMAC_TX_ST_MC)
+ ++st->tx_bd_multple_collisions;
+ if (ctrl & EMAC_TX_ST_SC)
+ ++st->tx_bd_single_collision;
+ if (ctrl & EMAC_TX_ST_UR)
+ ++st->tx_bd_underrun;
+ if (ctrl & EMAC_TX_ST_SQE)
+ ++st->tx_bd_sqe;
+}
+
+static void emac_poll_tx(void *param)
+{
+ struct emac_instance *dev = param;
+ u32 bad_mask;
+
+ DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
+
+ if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
+ bad_mask = EMAC_IS_BAD_TX_TAH;
+ else
+ bad_mask = EMAC_IS_BAD_TX;
+
+ netif_tx_lock_bh(dev->ndev);
+ if (dev->tx_cnt) {
+ u16 ctrl;
+ int slot = dev->ack_slot, n = 0;
+ again:
+ ctrl = dev->tx_desc[slot].ctrl;
+ if (!(ctrl & MAL_TX_CTRL_READY)) {
+ struct sk_buff *skb = dev->tx_skb[slot];
+ ++n;
+
+ if (skb) {
+ dev_kfree_skb(skb);
+ dev->tx_skb[slot] = NULL;
+ }
+ slot = (slot + 1) % NUM_TX_BUFF;
+
+ if (unlikely(ctrl & bad_mask))
+ emac_parse_tx_error(dev, ctrl);
+
+ if (--dev->tx_cnt)
+ goto again;
+ }
+ if (n) {
+ dev->ack_slot = slot;
+ if (netif_queue_stopped(dev->ndev) &&
+ dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
+ netif_wake_queue(dev->ndev);
+
+ DBG2(dev, "tx %d pkts" NL, n);
+ }
+ }
+ netif_tx_unlock_bh(dev->ndev);
+}
+
+static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
+ int len)
+{
+ struct sk_buff *skb = dev->rx_skb[slot];
+
+ DBG2(dev, "recycle %d %d" NL, slot, len);
+
+ if (len)
+ dma_map_single(&dev->ofdev->dev, skb->data - 2,
+ EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
+
+ dev->rx_desc[slot].data_len = 0;
+ wmb();
+ dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
+ (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
+}
+
+static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
+{
+ struct emac_error_stats *st = &dev->estats;
+
+ DBG(dev, "BD RX error %04x" NL, ctrl);
+
+ ++st->rx_bd_errors;
+ if (ctrl & EMAC_RX_ST_OE)
+ ++st->rx_bd_overrun;
+ if (ctrl & EMAC_RX_ST_BP)
+ ++st->rx_bd_bad_packet;
+ if (ctrl & EMAC_RX_ST_RP)
+ ++st->rx_bd_runt_packet;
+ if (ctrl & EMAC_RX_ST_SE)
+ ++st->rx_bd_short_event;
+ if (ctrl & EMAC_RX_ST_AE)
+ ++st->rx_bd_alignment_error;
+ if (ctrl & EMAC_RX_ST_BFCS)
+ ++st->rx_bd_bad_fcs;
+ if (ctrl & EMAC_RX_ST_PTL)
+ ++st->rx_bd_packet_too_long;
+ if (ctrl & EMAC_RX_ST_ORE)
+ ++st->rx_bd_out_of_range;
+ if (ctrl & EMAC_RX_ST_IRE)
+ ++st->rx_bd_in_range;
+}
+
+static inline void emac_rx_csum(struct emac_instance *dev,
+ struct sk_buff *skb, u16 ctrl)
+{
+#ifdef CONFIG_IBM_EMAC_TAH
+ if (!ctrl && dev->tah_dev) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ ++dev->stats.rx_packets_csum;
+ }
+#endif
+}
+
+static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
+{
+ if (likely(dev->rx_sg_skb != NULL)) {
+ int len = dev->rx_desc[slot].data_len;
+ int tot_len = dev->rx_sg_skb->len + len;
+
+ if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
+ ++dev->estats.rx_dropped_mtu;
+ dev_kfree_skb(dev->rx_sg_skb);
+ dev->rx_sg_skb = NULL;
+ } else {
+ cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
+ dev->rx_skb[slot]->data, len);
+ skb_put(dev->rx_sg_skb, len);
+ emac_recycle_rx_skb(dev, slot, len);
+ return 0;
+ }
+ }
+ emac_recycle_rx_skb(dev, slot, 0);
+ return -1;
+}
+
+/* NAPI poll context */
+static int emac_poll_rx(void *param, int budget)
+{
+ struct emac_instance *dev = param;
+ int slot = dev->rx_slot, received = 0;
+
+ DBG2(dev, "poll_rx(%d)" NL, budget);
+
+ again:
+ while (budget > 0) {
+ int len;
+ struct sk_buff *skb;
+ u16 ctrl = dev->rx_desc[slot].ctrl;
+
+ if (ctrl & MAL_RX_CTRL_EMPTY)
+ break;
+
+ skb = dev->rx_skb[slot];
+ mb();
+ len = dev->rx_desc[slot].data_len;
+
+ if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
+ goto sg;
+
+ ctrl &= EMAC_BAD_RX_MASK;
+ if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
+ emac_parse_rx_error(dev, ctrl);
+ ++dev->estats.rx_dropped_error;
+ emac_recycle_rx_skb(dev, slot, 0);
+ len = 0;
+ goto next;
+ }
+
+ if (len < ETH_HLEN) {
+ ++dev->estats.rx_dropped_stack;
+ emac_recycle_rx_skb(dev, slot, len);
+ goto next;
+ }
+
+ if (len && len < EMAC_RX_COPY_THRESH) {
+ struct sk_buff *copy_skb =
+ alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
+ if (unlikely(!copy_skb))
+ goto oom;
+
+ skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
+ cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
+ len + 2);
+ emac_recycle_rx_skb(dev, slot, len);
+ skb = copy_skb;
+ } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
+ goto oom;
+
+ skb_put(skb, len);
+ push_packet:
+ skb->protocol = eth_type_trans(skb, dev->ndev);
+ emac_rx_csum(dev, skb, ctrl);
+
+ if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
+ ++dev->estats.rx_dropped_stack;
+ next:
+ ++dev->stats.rx_packets;
+ skip:
+ dev->stats.rx_bytes += len;
+ slot = (slot + 1) % NUM_RX_BUFF;
+ --budget;
+ ++received;
+ continue;
+ sg:
+ if (ctrl & MAL_RX_CTRL_FIRST) {
+ BUG_ON(dev->rx_sg_skb);
+ if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
+ DBG(dev, "rx OOM %d" NL, slot);
+ ++dev->estats.rx_dropped_oom;
+ emac_recycle_rx_skb(dev, slot, 0);
+ } else {
+ dev->rx_sg_skb = skb;
+ skb_put(skb, len);
+ }
+ } else if (!emac_rx_sg_append(dev, slot) &&
+ (ctrl & MAL_RX_CTRL_LAST)) {
+
+ skb = dev->rx_sg_skb;
+ dev->rx_sg_skb = NULL;
+
+ ctrl &= EMAC_BAD_RX_MASK;
+ if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
+ emac_parse_rx_error(dev, ctrl);
+ ++dev->estats.rx_dropped_error;
+ dev_kfree_skb(skb);
+ len = 0;
+ } else
+ goto push_packet;
+ }
+ goto skip;
+ oom:
+ DBG(dev, "rx OOM %d" NL, slot);
+ /* Drop the packet and recycle skb */
+ ++dev->estats.rx_dropped_oom;
+ emac_recycle_rx_skb(dev, slot, 0);
+ goto next;
+ }
+
+ if (received) {
+ DBG2(dev, "rx %d BDs" NL, received);
+ dev->rx_slot = slot;
+ }
+
+ if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
+ mb();
+ if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
+ DBG2(dev, "rx restart" NL);
+ received = 0;
+ goto again;
+ }
+
+ if (dev->rx_sg_skb) {
+ DBG2(dev, "dropping partial rx packet" NL);
+ ++dev->estats.rx_dropped_error;
+ dev_kfree_skb(dev->rx_sg_skb);
+ dev->rx_sg_skb = NULL;
+ }
+
+ clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
+ mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
+ emac_rx_enable(dev);
+ dev->rx_slot = 0;
+ }
+ return received;
+}
+
+/* NAPI poll context */
+static int emac_peek_rx(void *param)
+{
+ struct emac_instance *dev = param;
+
+ return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
+}
+
+/* NAPI poll context */
+static int emac_peek_rx_sg(void *param)
+{
+ struct emac_instance *dev = param;
+
+ int slot = dev->rx_slot;
+ while (1) {
+ u16 ctrl = dev->rx_desc[slot].ctrl;
+ if (ctrl & MAL_RX_CTRL_EMPTY)
+ return 0;
+ else if (ctrl & MAL_RX_CTRL_LAST)
+ return 1;
+
+ slot = (slot + 1) % NUM_RX_BUFF;
+
+ /* I'm just being paranoid here :) */
+ if (unlikely(slot == dev->rx_slot))
+ return 0;
+ }
+}
+
+/* Hard IRQ */
+static void emac_rxde(void *param)
+{
+ struct emac_instance *dev = param;
+
+ ++dev->estats.rx_stopped;
+ emac_rx_disable_async(dev);
+}
+
+/* Hard IRQ */
+static irqreturn_t emac_irq(int irq, void *dev_instance)
+{
+ struct emac_instance *dev = dev_instance;
+ struct emac_regs __iomem *p = dev->emacp;
+ struct emac_error_stats *st = &dev->estats;
+ u32 isr;
+
+ spin_lock(&dev->lock);
+
+ isr = in_be32(&p->isr);
+ out_be32(&p->isr, isr);
+
+ DBG(dev, "isr = %08x" NL, isr);
+
+ if (isr & EMAC4_ISR_TXPE)
+ ++st->tx_parity;
+ if (isr & EMAC4_ISR_RXPE)
+ ++st->rx_parity;
+ if (isr & EMAC4_ISR_TXUE)
+ ++st->tx_underrun;
+ if (isr & EMAC4_ISR_RXOE)
+ ++st->rx_fifo_overrun;
+ if (isr & EMAC_ISR_OVR)
+ ++st->rx_overrun;
+ if (isr & EMAC_ISR_BP)
+ ++st->rx_bad_packet;
+ if (isr & EMAC_ISR_RP)
+ ++st->rx_runt_packet;
+ if (isr & EMAC_ISR_SE)
+ ++st->rx_short_event;
+ if (isr & EMAC_ISR_ALE)
+ ++st->rx_alignment_error;
+ if (isr & EMAC_ISR_BFCS)
+ ++st->rx_bad_fcs;
+ if (isr & EMAC_ISR_PTLE)
+ ++st->rx_packet_too_long;
+ if (isr & EMAC_ISR_ORE)
+ ++st->rx_out_of_range;
+ if (isr & EMAC_ISR_IRE)
+ ++st->rx_in_range;
+ if (isr & EMAC_ISR_SQE)
+ ++st->tx_sqe;
+ if (isr & EMAC_ISR_TE)
+ ++st->tx_errors;
+
+ spin_unlock(&dev->lock);
+
+ return IRQ_HANDLED;
+}
+
+static struct net_device_stats *emac_stats(struct net_device *ndev)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ struct emac_stats *st = &dev->stats;
+ struct emac_error_stats *est = &dev->estats;
+ struct net_device_stats *nst = &dev->nstats;
+ unsigned long flags;
+
+ DBG2(dev, "stats" NL);
+
+ /* Compute "legacy" statistics */
+ spin_lock_irqsave(&dev->lock, flags);
+ nst->rx_packets = (unsigned long)st->rx_packets;
+ nst->rx_bytes = (unsigned long)st->rx_bytes;
+ nst->tx_packets = (unsigned long)st->tx_packets;
+ nst->tx_bytes = (unsigned long)st->tx_bytes;
+ nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
+ est->rx_dropped_error +
+ est->rx_dropped_resize +
+ est->rx_dropped_mtu);
+ nst->tx_dropped = (unsigned long)est->tx_dropped;
+
+ nst->rx_errors = (unsigned long)est->rx_bd_errors;
+ nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
+ est->rx_fifo_overrun +
+ est->rx_overrun);
+ nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
+ est->rx_alignment_error);
+ nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
+ est->rx_bad_fcs);
+ nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
+ est->rx_bd_short_event +
+ est->rx_bd_packet_too_long +
+ est->rx_bd_out_of_range +
+ est->rx_bd_in_range +
+ est->rx_runt_packet +
+ est->rx_short_event +
+ est->rx_packet_too_long +
+ est->rx_out_of_range +
+ est->rx_in_range);
+
+ nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
+ nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
+ est->tx_underrun);
+ nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
+ nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
+ est->tx_bd_excessive_collisions +
+ est->tx_bd_late_collision +
+ est->tx_bd_multple_collisions);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return nst;
+}
+
+static struct mal_commac_ops emac_commac_ops = {
+ .poll_tx = &emac_poll_tx,
+ .poll_rx = &emac_poll_rx,
+ .peek_rx = &emac_peek_rx,
+ .rxde = &emac_rxde,
+};
+
+static struct mal_commac_ops emac_commac_sg_ops = {
+ .poll_tx = &emac_poll_tx,
+ .poll_rx = &emac_poll_rx,
+ .peek_rx = &emac_peek_rx_sg,
+ .rxde = &emac_rxde,
+};
+
+/* Ethtool support */
+static int emac_ethtool_get_settings(struct net_device *ndev,
+ struct ethtool_cmd *cmd)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+
+ cmd->supported = dev->phy.features;
+ cmd->port = PORT_MII;
+ cmd->phy_address = dev->phy.address;
+ cmd->transceiver =
+ dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
+
+ mutex_lock(&dev->link_lock);
+ cmd->advertising = dev->phy.advertising;
+ cmd->autoneg = dev->phy.autoneg;
+ cmd->speed = dev->phy.speed;
+ cmd->duplex = dev->phy.duplex;
+ mutex_unlock(&dev->link_lock);
+
+ return 0;
+}
+
+static int emac_ethtool_set_settings(struct net_device *ndev,
+ struct ethtool_cmd *cmd)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ u32 f = dev->phy.features;
+
+ DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
+ cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
+
+ /* Basic sanity checks */
+ if (dev->phy.address < 0)
+ return -EOPNOTSUPP;
+ if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+ if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
+ return -EINVAL;
+ if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_DISABLE) {
+ switch (cmd->speed) {
+ case SPEED_10:
+ if (cmd->duplex == DUPLEX_HALF &&
+ !(f & SUPPORTED_10baseT_Half))
+ return -EINVAL;
+ if (cmd->duplex == DUPLEX_FULL &&
+ !(f & SUPPORTED_10baseT_Full))
+ return -EINVAL;
+ break;
+ case SPEED_100:
+ if (cmd->duplex == DUPLEX_HALF &&
+ !(f & SUPPORTED_100baseT_Half))
+ return -EINVAL;
+ if (cmd->duplex == DUPLEX_FULL &&
+ !(f & SUPPORTED_100baseT_Full))
+ return -EINVAL;
+ break;
+ case SPEED_1000:
+ if (cmd->duplex == DUPLEX_HALF &&
+ !(f & SUPPORTED_1000baseT_Half))
+ return -EINVAL;
+ if (cmd->duplex == DUPLEX_FULL &&
+ !(f & SUPPORTED_1000baseT_Full))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->link_lock);
+ dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
+ cmd->duplex);
+ mutex_unlock(&dev->link_lock);
+
+ } else {
+ if (!(f & SUPPORTED_Autoneg))
+ return -EINVAL;
+
+ mutex_lock(&dev->link_lock);
+ dev->phy.def->ops->setup_aneg(&dev->phy,
+ (cmd->advertising & f) |
+ (dev->phy.advertising &
+ (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause)));
+ mutex_unlock(&dev->link_lock);
+ }
+ emac_force_link_update(dev);
+
+ return 0;
+}
+
+static void emac_ethtool_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *rp)
+{
+ rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
+ rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
+}
+
+static void emac_ethtool_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pp)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+
+ mutex_lock(&dev->link_lock);
+ if ((dev->phy.features & SUPPORTED_Autoneg) &&
+ (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
+ pp->autoneg = 1;
+
+ if (dev->phy.duplex == DUPLEX_FULL) {
+ if (dev->phy.pause)
+ pp->rx_pause = pp->tx_pause = 1;
+ else if (dev->phy.asym_pause)
+ pp->tx_pause = 1;
+ }
+ mutex_unlock(&dev->link_lock);
+}
+
+static int emac_get_regs_len(struct emac_instance *dev)
+{
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4))
+ return sizeof(struct emac_ethtool_regs_subhdr) +
+ EMAC4_ETHTOOL_REGS_SIZE(dev);
+ else
+ return sizeof(struct emac_ethtool_regs_subhdr) +
+ EMAC_ETHTOOL_REGS_SIZE(dev);
+}
+
+static int emac_ethtool_get_regs_len(struct net_device *ndev)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ int size;
+
+ size = sizeof(struct emac_ethtool_regs_hdr) +
+ emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
+ size += zmii_get_regs_len(dev->zmii_dev);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
+ size += rgmii_get_regs_len(dev->rgmii_dev);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
+ size += tah_get_regs_len(dev->tah_dev);
+
+ return size;
+}
+
+static void *emac_dump_regs(struct emac_instance *dev, void *buf)
+{
+ struct emac_ethtool_regs_subhdr *hdr = buf;
+
+ hdr->index = dev->cell_index;
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
+ hdr->version = EMAC4_ETHTOOL_REGS_VER;
+ memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
+ return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev);
+ } else {
+ hdr->version = EMAC_ETHTOOL_REGS_VER;
+ memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
+ return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev);
+ }
+}
+
+static void emac_ethtool_get_regs(struct net_device *ndev,
+ struct ethtool_regs *regs, void *buf)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ struct emac_ethtool_regs_hdr *hdr = buf;
+
+ hdr->components = 0;
+ buf = hdr + 1;
+
+ buf = mal_dump_regs(dev->mal, buf);
+ buf = emac_dump_regs(dev, buf);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
+ hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
+ buf = zmii_dump_regs(dev->zmii_dev, buf);
+ }
+ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
+ hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
+ buf = rgmii_dump_regs(dev->rgmii_dev, buf);
+ }
+ if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
+ hdr->components |= EMAC_ETHTOOL_REGS_TAH;
+ buf = tah_dump_regs(dev->tah_dev, buf);
+ }
+}
+
+static int emac_ethtool_nway_reset(struct net_device *ndev)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ int res = 0;
+
+ DBG(dev, "nway_reset" NL);
+
+ if (dev->phy.address < 0)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&dev->link_lock);
+ if (!dev->phy.autoneg) {
+ res = -EINVAL;
+ goto out;
+ }
+
+ dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
+ out:
+ mutex_unlock(&dev->link_lock);
+ emac_force_link_update(dev);
+ return res;
+}
+
+static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
+{
+ if (stringset == ETH_SS_STATS)
+ return EMAC_ETHTOOL_STATS_COUNT;
+ else
+ return -EINVAL;
+}
+
+static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
+ u8 * buf)
+{
+ if (stringset == ETH_SS_STATS)
+ memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
+}
+
+static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *estats,
+ u64 * tmp_stats)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+
+ memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
+ tmp_stats += sizeof(dev->stats) / sizeof(u64);
+ memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
+}
+
+static void emac_ethtool_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+
+ strcpy(info->driver, "ibm_emac");
+ strcpy(info->version, DRV_VERSION);
+ info->fw_version[0] = '\0';
+ sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
+ dev->cell_index, dev->ofdev->dev.of_node->full_name);
+ info->regdump_len = emac_ethtool_get_regs_len(ndev);
+}
+
+static const struct ethtool_ops emac_ethtool_ops = {
+ .get_settings = emac_ethtool_get_settings,
+ .set_settings = emac_ethtool_set_settings,
+ .get_drvinfo = emac_ethtool_get_drvinfo,
+
+ .get_regs_len = emac_ethtool_get_regs_len,
+ .get_regs = emac_ethtool_get_regs,
+
+ .nway_reset = emac_ethtool_nway_reset,
+
+ .get_ringparam = emac_ethtool_get_ringparam,
+ .get_pauseparam = emac_ethtool_get_pauseparam,
+
+ .get_strings = emac_ethtool_get_strings,
+ .get_sset_count = emac_ethtool_get_sset_count,
+ .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
+
+ .get_link = ethtool_op_get_link,
+};
+
+static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ struct mii_ioctl_data *data = if_mii(rq);
+
+ DBG(dev, "ioctl %08x" NL, cmd);
+
+ if (dev->phy.address < 0)
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = dev->phy.address;
+ /* Fall through */
+ case SIOCGMIIREG:
+ data->val_out = emac_mdio_read(ndev, dev->phy.address,
+ data->reg_num);
+ return 0;
+
+ case SIOCSMIIREG:
+ emac_mdio_write(ndev, dev->phy.address, data->reg_num,
+ data->val_in);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+struct emac_depentry {
+ u32 phandle;
+ struct device_node *node;
+ struct platform_device *ofdev;
+ void *drvdata;
+};
+
+#define EMAC_DEP_MAL_IDX 0
+#define EMAC_DEP_ZMII_IDX 1
+#define EMAC_DEP_RGMII_IDX 2
+#define EMAC_DEP_TAH_IDX 3
+#define EMAC_DEP_MDIO_IDX 4
+#define EMAC_DEP_PREV_IDX 5
+#define EMAC_DEP_COUNT 6
+
+static int __devinit emac_check_deps(struct emac_instance *dev,
+ struct emac_depentry *deps)
+{
+ int i, there = 0;
+ struct device_node *np;
+
+ for (i = 0; i < EMAC_DEP_COUNT; i++) {
+ /* no dependency on that item, allright */
+ if (deps[i].phandle == 0) {
+ there++;
+ continue;
+ }
+ /* special case for blist as the dependency might go away */
+ if (i == EMAC_DEP_PREV_IDX) {
+ np = *(dev->blist - 1);
+ if (np == NULL) {
+ deps[i].phandle = 0;
+ there++;
+ continue;
+ }
+ if (deps[i].node == NULL)
+ deps[i].node = of_node_get(np);
+ }
+ if (deps[i].node == NULL)
+ deps[i].node = of_find_node_by_phandle(deps[i].phandle);
+ if (deps[i].node == NULL)
+ continue;
+ if (deps[i].ofdev == NULL)
+ deps[i].ofdev = of_find_device_by_node(deps[i].node);
+ if (deps[i].ofdev == NULL)
+ continue;
+ if (deps[i].drvdata == NULL)
+ deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
+ if (deps[i].drvdata != NULL)
+ there++;
+ }
+ return there == EMAC_DEP_COUNT;
+}
+
+static void emac_put_deps(struct emac_instance *dev)
+{
+ if (dev->mal_dev)
+ of_dev_put(dev->mal_dev);
+ if (dev->zmii_dev)
+ of_dev_put(dev->zmii_dev);
+ if (dev->rgmii_dev)
+ of_dev_put(dev->rgmii_dev);
+ if (dev->mdio_dev)
+ of_dev_put(dev->mdio_dev);
+ if (dev->tah_dev)
+ of_dev_put(dev->tah_dev);
+}
+
+static int __devinit emac_of_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ /* We are only intereted in device addition */
+ if (action == BUS_NOTIFY_BOUND_DRIVER)
+ wake_up_all(&emac_probe_wait);
+ return 0;
+}
+
+static struct notifier_block emac_of_bus_notifier __devinitdata = {
+ .notifier_call = emac_of_bus_notify
+};
+
+static int __devinit emac_wait_deps(struct emac_instance *dev)
+{
+ struct emac_depentry deps[EMAC_DEP_COUNT];
+ int i, err;
+
+ memset(&deps, 0, sizeof(deps));
+
+ deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
+ deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
+ deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
+ if (dev->tah_ph)
+ deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
+ if (dev->mdio_ph)
+ deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
+ if (dev->blist && dev->blist > emac_boot_list)
+ deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
+ bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
+ wait_event_timeout(emac_probe_wait,
+ emac_check_deps(dev, deps),
+ EMAC_PROBE_DEP_TIMEOUT);
+ bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
+ err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
+ for (i = 0; i < EMAC_DEP_COUNT; i++) {
+ if (deps[i].node)
+ of_node_put(deps[i].node);
+ if (err && deps[i].ofdev)
+ of_dev_put(deps[i].ofdev);
+ }
+ if (err == 0) {
+ dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
+ dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
+ dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
+ dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
+ dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
+ }
+ if (deps[EMAC_DEP_PREV_IDX].ofdev)
+ of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
+ return err;
+}
+
+static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
+ u32 *val, int fatal)
+{
+ int len;
+ const u32 *prop = of_get_property(np, name, &len);
+ if (prop == NULL || len < sizeof(u32)) {
+ if (fatal)
+ printk(KERN_ERR "%s: missing %s property\n",
+ np->full_name, name);
+ return -ENODEV;
+ }
+ *val = *prop;
+ return 0;
+}
+
+static int __devinit emac_init_phy(struct emac_instance *dev)
+{
+ struct device_node *np = dev->ofdev->dev.of_node;
+ struct net_device *ndev = dev->ndev;
+ u32 phy_map, adv;
+ int i;
+
+ dev->phy.dev = ndev;
+ dev->phy.mode = dev->phy_mode;
+
+ /* PHY-less configuration.
+ * XXX I probably should move these settings to the dev tree
+ */
+ if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
+ emac_reset(dev);
+
+ /* PHY-less configuration.
+ * XXX I probably should move these settings to the dev tree
+ */
+ dev->phy.address = -1;
+ dev->phy.features = SUPPORTED_MII;
+ if (emac_phy_supports_gige(dev->phy_mode))
+ dev->phy.features |= SUPPORTED_1000baseT_Full;
+ else
+ dev->phy.features |= SUPPORTED_100baseT_Full;
+ dev->phy.pause = 1;
+
+ return 0;
+ }
+
+ mutex_lock(&emac_phy_map_lock);
+ phy_map = dev->phy_map | busy_phy_map;
+
+ DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
+
+ dev->phy.mdio_read = emac_mdio_read;
+ dev->phy.mdio_write = emac_mdio_write;
+
+ /* Enable internal clock source */
+#ifdef CONFIG_PPC_DCR_NATIVE
+ if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
+ dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
+#endif
+ /* PHY clock workaround */
+ emac_rx_clk_tx(dev);
+
+ /* Enable internal clock source on 440GX*/
+#ifdef CONFIG_PPC_DCR_NATIVE
+ if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
+ dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
+#endif
+ /* Configure EMAC with defaults so we can at least use MDIO
+ * This is needed mostly for 440GX
+ */
+ if (emac_phy_gpcs(dev->phy.mode)) {
+ /* XXX
+ * Make GPCS PHY address equal to EMAC index.
+ * We probably should take into account busy_phy_map
+ * and/or phy_map here.
+ *
+ * Note that the busy_phy_map is currently global
+ * while it should probably be per-ASIC...
+ */
+ dev->phy.gpcs_address = dev->gpcs_address;
+ if (dev->phy.gpcs_address == 0xffffffff)
+ dev->phy.address = dev->cell_index;
+ }
+
+ emac_configure(dev);
+
+ if (dev->phy_address != 0xffffffff)
+ phy_map = ~(1 << dev->phy_address);
+
+ for (i = 0; i < 0x20; phy_map >>= 1, ++i)
+ if (!(phy_map & 1)) {
+ int r;
+ busy_phy_map |= 1 << i;
+
+ /* Quick check if there is a PHY at the address */
+ r = emac_mdio_read(dev->ndev, i, MII_BMCR);
+ if (r == 0xffff || r < 0)
+ continue;
+ if (!emac_mii_phy_probe(&dev->phy, i))
+ break;
+ }
+
+ /* Enable external clock source */
+#ifdef CONFIG_PPC_DCR_NATIVE
+ if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
+ dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
+#endif
+ mutex_unlock(&emac_phy_map_lock);
+ if (i == 0x20) {
+ printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
+ return -ENXIO;
+ }
+
+ /* Init PHY */
+ if (dev->phy.def->ops->init)
+ dev->phy.def->ops->init(&dev->phy);
+
+ /* Disable any PHY features not supported by the platform */
+ dev->phy.def->features &= ~dev->phy_feat_exc;
+
+ /* Setup initial link parameters */
+ if (dev->phy.features & SUPPORTED_Autoneg) {
+ adv = dev->phy.features;
+ if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
+ adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ /* Restart autonegotiation */
+ dev->phy.def->ops->setup_aneg(&dev->phy, adv);
+ } else {
+ u32 f = dev->phy.def->features;
+ int speed = SPEED_10, fd = DUPLEX_HALF;
+
+ /* Select highest supported speed/duplex */
+ if (f & SUPPORTED_1000baseT_Full) {
+ speed = SPEED_1000;
+ fd = DUPLEX_FULL;
+ } else if (f & SUPPORTED_1000baseT_Half)
+ speed = SPEED_1000;
+ else if (f & SUPPORTED_100baseT_Full) {
+ speed = SPEED_100;
+ fd = DUPLEX_FULL;
+ } else if (f & SUPPORTED_100baseT_Half)
+ speed = SPEED_100;
+ else if (f & SUPPORTED_10baseT_Full)
+ fd = DUPLEX_FULL;
+
+ /* Force link parameters */
+ dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
+ }
+ return 0;
+}
+
+static int __devinit emac_init_config(struct emac_instance *dev)
+{
+ struct device_node *np = dev->ofdev->dev.of_node;
+ const void *p;
+
+ /* Read config from device-tree */
+ if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
+ return -ENXIO;
+ if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
+ return -ENXIO;
+ if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
+ return -ENXIO;
+ if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
+ return -ENXIO;
+ if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
+ dev->max_mtu = 1500;
+ if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
+ dev->rx_fifo_size = 2048;
+ if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
+ dev->tx_fifo_size = 2048;
+ if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
+ dev->rx_fifo_size_gige = dev->rx_fifo_size;
+ if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
+ dev->tx_fifo_size_gige = dev->tx_fifo_size;
+ if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
+ dev->phy_address = 0xffffffff;
+ if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
+ dev->phy_map = 0xffffffff;
+ if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
+ dev->gpcs_address = 0xffffffff;
+ if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
+ return -ENXIO;
+ if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
+ dev->tah_ph = 0;
+ if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
+ dev->tah_port = 0;
+ if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
+ dev->mdio_ph = 0;
+ if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
+ dev->zmii_ph = 0;
+ if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
+ dev->zmii_port = 0xffffffff;
+ if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
+ dev->rgmii_ph = 0;
+ if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
+ dev->rgmii_port = 0xffffffff;
+ if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
+ dev->fifo_entry_size = 16;
+ if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
+ dev->mal_burst_size = 256;
+
+ /* PHY mode needs some decoding */
+ dev->phy_mode = of_get_phy_mode(np);
+ if (dev->phy_mode < 0)
+ dev->phy_mode = PHY_MODE_NA;
+
+ /* Check EMAC version */
+ if (of_device_is_compatible(np, "ibm,emac4sync")) {
+ dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
+ if (of_device_is_compatible(np, "ibm,emac-460ex") ||
+ of_device_is_compatible(np, "ibm,emac-460gt"))
+ dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
+ if (of_device_is_compatible(np, "ibm,emac-405ex") ||
+ of_device_is_compatible(np, "ibm,emac-405exr"))
+ dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
+ } else if (of_device_is_compatible(np, "ibm,emac4")) {
+ dev->features |= EMAC_FTR_EMAC4;
+ if (of_device_is_compatible(np, "ibm,emac-440gx"))
+ dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
+ } else {
+ if (of_device_is_compatible(np, "ibm,emac-440ep") ||
+ of_device_is_compatible(np, "ibm,emac-440gr"))
+ dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
+ if (of_device_is_compatible(np, "ibm,emac-405ez")) {
+#ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
+ dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
+#else
+ printk(KERN_ERR "%s: Flow control not disabled!\n",
+ np->full_name);
+ return -ENXIO;
+#endif
+ }
+
+ }
+
+ /* Fixup some feature bits based on the device tree */
+ if (of_get_property(np, "has-inverted-stacr-oc", NULL))
+ dev->features |= EMAC_FTR_STACR_OC_INVERT;
+ if (of_get_property(np, "has-new-stacr-staopc", NULL))
+ dev->features |= EMAC_FTR_HAS_NEW_STACR;
+
+ /* CAB lacks the appropriate properties */
+ if (of_device_is_compatible(np, "ibm,emac-axon"))
+ dev->features |= EMAC_FTR_HAS_NEW_STACR |
+ EMAC_FTR_STACR_OC_INVERT;
+
+ /* Enable TAH/ZMII/RGMII features as found */
+ if (dev->tah_ph != 0) {
+#ifdef CONFIG_IBM_EMAC_TAH
+ dev->features |= EMAC_FTR_HAS_TAH;
+#else
+ printk(KERN_ERR "%s: TAH support not enabled !\n",
+ np->full_name);
+ return -ENXIO;
+#endif
+ }
+
+ if (dev->zmii_ph != 0) {
+#ifdef CONFIG_IBM_EMAC_ZMII
+ dev->features |= EMAC_FTR_HAS_ZMII;
+#else
+ printk(KERN_ERR "%s: ZMII support not enabled !\n",
+ np->full_name);
+ return -ENXIO;
+#endif
+ }
+
+ if (dev->rgmii_ph != 0) {
+#ifdef CONFIG_IBM_EMAC_RGMII
+ dev->features |= EMAC_FTR_HAS_RGMII;
+#else
+ printk(KERN_ERR "%s: RGMII support not enabled !\n",
+ np->full_name);
+ return -ENXIO;
+#endif
+ }
+
+ /* Read MAC-address */
+ p = of_get_property(np, "local-mac-address", NULL);
+ if (p == NULL) {
+ printk(KERN_ERR "%s: Can't find local-mac-address property\n",
+ np->full_name);
+ return -ENXIO;
+ }
+ memcpy(dev->ndev->dev_addr, p, 6);
+
+ /* IAHT and GAHT filter parameterization */
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
+ dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
+ dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
+ } else {
+ dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
+ dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
+ }
+
+ DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
+ DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
+ DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
+ DBG(dev, "max_mtu : %d\n", dev->max_mtu);
+ DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
+
+ return 0;
+}
+
+static const struct net_device_ops emac_netdev_ops = {
+ .ndo_open = emac_open,
+ .ndo_stop = emac_close,
+ .ndo_get_stats = emac_stats,
+ .ndo_set_rx_mode = emac_set_multicast_list,
+ .ndo_do_ioctl = emac_ioctl,
+ .ndo_tx_timeout = emac_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_start_xmit = emac_start_xmit,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static const struct net_device_ops emac_gige_netdev_ops = {
+ .ndo_open = emac_open,
+ .ndo_stop = emac_close,
+ .ndo_get_stats = emac_stats,
+ .ndo_set_rx_mode = emac_set_multicast_list,
+ .ndo_do_ioctl = emac_ioctl,
+ .ndo_tx_timeout = emac_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_start_xmit = emac_start_xmit_sg,
+ .ndo_change_mtu = emac_change_mtu,
+};
+
+static int __devinit emac_probe(struct platform_device *ofdev)
+{
+ struct net_device *ndev;
+ struct emac_instance *dev;
+ struct device_node *np = ofdev->dev.of_node;
+ struct device_node **blist = NULL;
+ int err, i;
+
+ /* Skip unused/unwired EMACS. We leave the check for an unused
+ * property here for now, but new flat device trees should set a
+ * status property to "disabled" instead.
+ */
+ if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
+ return -ENODEV;
+
+ /* Find ourselves in the bootlist if we are there */
+ for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
+ if (emac_boot_list[i] == np)
+ blist = &emac_boot_list[i];
+
+ /* Allocate our net_device structure */
+ err = -ENOMEM;
+ ndev = alloc_etherdev(sizeof(struct emac_instance));
+ if (!ndev) {
+ printk(KERN_ERR "%s: could not allocate ethernet device!\n",
+ np->full_name);
+ goto err_gone;
+ }
+ dev = netdev_priv(ndev);
+ dev->ndev = ndev;
+ dev->ofdev = ofdev;
+ dev->blist = blist;
+ SET_NETDEV_DEV(ndev, &ofdev->dev);
+
+ /* Initialize some embedded data structures */
+ mutex_init(&dev->mdio_lock);
+ mutex_init(&dev->link_lock);
+ spin_lock_init(&dev->lock);
+ INIT_WORK(&dev->reset_work, emac_reset_work);
+
+ /* Init various config data based on device-tree */
+ err = emac_init_config(dev);
+ if (err != 0)
+ goto err_free;
+
+ /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
+ dev->emac_irq = irq_of_parse_and_map(np, 0);
+ dev->wol_irq = irq_of_parse_and_map(np, 1);
+ if (dev->emac_irq == NO_IRQ) {
+ printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
+ goto err_free;
+ }
+ ndev->irq = dev->emac_irq;
+
+ /* Map EMAC regs */
+ if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
+ printk(KERN_ERR "%s: Can't get registers address\n",
+ np->full_name);
+ goto err_irq_unmap;
+ }
+ // TODO : request_mem_region
+ dev->emacp = ioremap(dev->rsrc_regs.start,
+ resource_size(&dev->rsrc_regs));
+ if (dev->emacp == NULL) {
+ printk(KERN_ERR "%s: Can't map device registers!\n",
+ np->full_name);
+ err = -ENOMEM;
+ goto err_irq_unmap;
+ }
+
+ /* Wait for dependent devices */
+ err = emac_wait_deps(dev);
+ if (err) {
+ printk(KERN_ERR
+ "%s: Timeout waiting for dependent devices\n",
+ np->full_name);
+ /* display more info about what's missing ? */
+ goto err_reg_unmap;
+ }
+ dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
+ if (dev->mdio_dev != NULL)
+ dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
+
+ /* Register with MAL */
+ dev->commac.ops = &emac_commac_ops;
+ dev->commac.dev = dev;
+ dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
+ dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
+ err = mal_register_commac(dev->mal, &dev->commac);
+ if (err) {
+ printk(KERN_ERR "%s: failed to register with mal %s!\n",
+ np->full_name, dev->mal_dev->dev.of_node->full_name);
+ goto err_rel_deps;
+ }
+ dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
+ dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
+
+ /* Get pointers to BD rings */
+ dev->tx_desc =
+ dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
+ dev->rx_desc =
+ dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
+
+ DBG(dev, "tx_desc %p" NL, dev->tx_desc);
+ DBG(dev, "rx_desc %p" NL, dev->rx_desc);
+
+ /* Clean rings */
+ memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
+ memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
+ memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
+ memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
+
+ /* Attach to ZMII, if needed */
+ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
+ (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
+ goto err_unreg_commac;
+
+ /* Attach to RGMII, if needed */
+ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
+ (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
+ goto err_detach_zmii;
+
+ /* Attach to TAH, if needed */
+ if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
+ (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
+ goto err_detach_rgmii;
+
+ /* Set some link defaults before we can find out real parameters */
+ dev->phy.speed = SPEED_100;
+ dev->phy.duplex = DUPLEX_FULL;
+ dev->phy.autoneg = AUTONEG_DISABLE;
+ dev->phy.pause = dev->phy.asym_pause = 0;
+ dev->stop_timeout = STOP_TIMEOUT_100;
+ INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
+
+ /* Find PHY if any */
+ err = emac_init_phy(dev);
+ if (err != 0)
+ goto err_detach_tah;
+
+ if (dev->tah_dev) {
+ ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
+ ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
+ }
+ ndev->watchdog_timeo = 5 * HZ;
+ if (emac_phy_supports_gige(dev->phy_mode)) {
+ ndev->netdev_ops = &emac_gige_netdev_ops;
+ dev->commac.ops = &emac_commac_sg_ops;
+ } else
+ ndev->netdev_ops = &emac_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
+
+ netif_carrier_off(ndev);
+
+ err = register_netdev(ndev);
+ if (err) {
+ printk(KERN_ERR "%s: failed to register net device (%d)!\n",
+ np->full_name, err);
+ goto err_detach_tah;
+ }
+
+ /* Set our drvdata last as we don't want them visible until we are
+ * fully initialized
+ */
+ wmb();
+ dev_set_drvdata(&ofdev->dev, dev);
+
+ /* There's a new kid in town ! Let's tell everybody */
+ wake_up_all(&emac_probe_wait);
+
+
+ printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
+ ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
+
+ if (dev->phy_mode == PHY_MODE_SGMII)
+ printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
+
+ if (dev->phy.address >= 0)
+ printk("%s: found %s PHY (0x%02x)\n", ndev->name,
+ dev->phy.def->name, dev->phy.address);
+
+ emac_dbg_register(dev);
+
+ /* Life is good */
+ return 0;
+
+ /* I have a bad feeling about this ... */
+
+ err_detach_tah:
+ if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
+ tah_detach(dev->tah_dev, dev->tah_port);
+ err_detach_rgmii:
+ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
+ rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
+ err_detach_zmii:
+ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
+ zmii_detach(dev->zmii_dev, dev->zmii_port);
+ err_unreg_commac:
+ mal_unregister_commac(dev->mal, &dev->commac);
+ err_rel_deps:
+ emac_put_deps(dev);
+ err_reg_unmap:
+ iounmap(dev->emacp);
+ err_irq_unmap:
+ if (dev->wol_irq != NO_IRQ)
+ irq_dispose_mapping(dev->wol_irq);
+ if (dev->emac_irq != NO_IRQ)
+ irq_dispose_mapping(dev->emac_irq);
+ err_free:
+ free_netdev(ndev);
+ err_gone:
+ /* if we were on the bootlist, remove us as we won't show up and
+ * wake up all waiters to notify them in case they were waiting
+ * on us
+ */
+ if (blist) {
+ *blist = NULL;
+ wake_up_all(&emac_probe_wait);
+ }
+ return err;
+}
+
+static int __devexit emac_remove(struct platform_device *ofdev)
+{
+ struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
+
+ DBG(dev, "remove" NL);
+
+ dev_set_drvdata(&ofdev->dev, NULL);
+
+ unregister_netdev(dev->ndev);
+
+ cancel_work_sync(&dev->reset_work);
+
+ if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
+ tah_detach(dev->tah_dev, dev->tah_port);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
+ rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
+ if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
+ zmii_detach(dev->zmii_dev, dev->zmii_port);
+
+ mal_unregister_commac(dev->mal, &dev->commac);
+ emac_put_deps(dev);
+
+ emac_dbg_unregister(dev);
+ iounmap(dev->emacp);
+
+ if (dev->wol_irq != NO_IRQ)
+ irq_dispose_mapping(dev->wol_irq);
+ if (dev->emac_irq != NO_IRQ)
+ irq_dispose_mapping(dev->emac_irq);
+
+ free_netdev(dev->ndev);
+
+ return 0;
+}
+
+/* XXX Features in here should be replaced by properties... */
+static struct of_device_id emac_match[] =
+{
+ {
+ .type = "network",
+ .compatible = "ibm,emac",
+ },
+ {
+ .type = "network",
+ .compatible = "ibm,emac4",
+ },
+ {
+ .type = "network",
+ .compatible = "ibm,emac4sync",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, emac_match);
+
+static struct platform_driver emac_driver = {
+ .driver = {
+ .name = "emac",
+ .owner = THIS_MODULE,
+ .of_match_table = emac_match,
+ },
+ .probe = emac_probe,
+ .remove = emac_remove,
+};
+
+static void __init emac_make_bootlist(void)
+{
+ struct device_node *np = NULL;
+ int j, max, i = 0, k;
+ int cell_indices[EMAC_BOOT_LIST_SIZE];
+
+ /* Collect EMACs */
+ while((np = of_find_all_nodes(np)) != NULL) {
+ const u32 *idx;
+
+ if (of_match_node(emac_match, np) == NULL)
+ continue;
+ if (of_get_property(np, "unused", NULL))
+ continue;
+ idx = of_get_property(np, "cell-index", NULL);
+ if (idx == NULL)
+ continue;
+ cell_indices[i] = *idx;
+ emac_boot_list[i++] = of_node_get(np);
+ if (i >= EMAC_BOOT_LIST_SIZE) {
+ of_node_put(np);
+ break;
+ }
+ }
+ max = i;
+
+ /* Bubble sort them (doh, what a creative algorithm :-) */
+ for (i = 0; max > 1 && (i < (max - 1)); i++)
+ for (j = i; j < max; j++) {
+ if (cell_indices[i] > cell_indices[j]) {
+ np = emac_boot_list[i];
+ emac_boot_list[i] = emac_boot_list[j];
+ emac_boot_list[j] = np;
+ k = cell_indices[i];
+ cell_indices[i] = cell_indices[j];
+ cell_indices[j] = k;
+ }
+ }
+}
+
+static int __init emac_init(void)
+{
+ int rc;
+
+ printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
+
+ /* Init debug stuff */
+ emac_init_debug();
+
+ /* Build EMAC boot list */
+ emac_make_bootlist();
+
+ /* Init submodules */
+ rc = mal_init();
+ if (rc)
+ goto err;
+ rc = zmii_init();
+ if (rc)
+ goto err_mal;
+ rc = rgmii_init();
+ if (rc)
+ goto err_zmii;
+ rc = tah_init();
+ if (rc)
+ goto err_rgmii;
+ rc = platform_driver_register(&emac_driver);
+ if (rc)
+ goto err_tah;
+
+ return 0;
+
+ err_tah:
+ tah_exit();
+ err_rgmii:
+ rgmii_exit();
+ err_zmii:
+ zmii_exit();
+ err_mal:
+ mal_exit();
+ err:
+ return rc;
+}
+
+static void __exit emac_exit(void)
+{
+ int i;
+
+ platform_driver_unregister(&emac_driver);
+
+ tah_exit();
+ rgmii_exit();
+ zmii_exit();
+ mal_exit();
+ emac_fini_debug();
+
+ /* Destroy EMAC boot list */
+ for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
+ if (emac_boot_list[i])
+ of_node_put(emac_boot_list[i]);
+}
+
+module_init(emac_init);
+module_exit(emac_exit);
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
new file mode 100644
index 000000000000..fa3ec57935fa
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -0,0 +1,462 @@
+/*
+ * drivers/net/ibm_newemac/core.h
+ *
+ * Driver for PowerPC 4xx on-chip ethernet controller.
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * Based on the arch/ppc version of the driver:
+ *
+ * Copyright (c) 2004, 2005 Zultys Technologies.
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ *
+ * Based on original work by
+ * Armin Kuster <akuster@mvista.com>
+ * Johnnie Peters <jpeters@mvista.com>
+ * Copyright 2000, 2001 MontaVista Softare Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __IBM_NEWEMAC_CORE_H
+#define __IBM_NEWEMAC_CORE_H
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+
+#include <asm/io.h>
+#include <asm/dcr.h>
+
+#include "emac.h"
+#include "phy.h"
+#include "zmii.h"
+#include "rgmii.h"
+#include "mal.h"
+#include "tah.h"
+#include "debug.h"
+
+#define NUM_TX_BUFF CONFIG_IBM_EMAC_TXB
+#define NUM_RX_BUFF CONFIG_IBM_EMAC_RXB
+
+/* Simple sanity check */
+#if NUM_TX_BUFF > 256 || NUM_RX_BUFF > 256
+#error Invalid number of buffer descriptors (greater than 256)
+#endif
+
+#define EMAC_MIN_MTU 46
+
+/* Maximum L2 header length (VLAN tagged, no FCS) */
+#define EMAC_MTU_OVERHEAD (6 * 2 + 2 + 4)
+
+/* RX BD size for the given MTU */
+static inline int emac_rx_size(int mtu)
+{
+ if (mtu > ETH_DATA_LEN)
+ return MAL_MAX_RX_SIZE;
+ else
+ return mal_rx_size(ETH_DATA_LEN + EMAC_MTU_OVERHEAD);
+}
+
+#define EMAC_DMA_ALIGN(x) ALIGN((x), dma_get_cache_alignment())
+
+#define EMAC_RX_SKB_HEADROOM \
+ EMAC_DMA_ALIGN(CONFIG_IBM_EMAC_RX_SKB_HEADROOM)
+
+/* Size of RX skb for the given MTU */
+static inline int emac_rx_skb_size(int mtu)
+{
+ int size = max(mtu + EMAC_MTU_OVERHEAD, emac_rx_size(mtu));
+ return EMAC_DMA_ALIGN(size + 2) + EMAC_RX_SKB_HEADROOM;
+}
+
+/* RX DMA sync size */
+static inline int emac_rx_sync_size(int mtu)
+{
+ return EMAC_DMA_ALIGN(emac_rx_size(mtu) + 2);
+}
+
+/* Driver statistcs is split into two parts to make it more cache friendly:
+ * - normal statistics (packet count, etc)
+ * - error statistics
+ *
+ * When statistics is requested by ethtool, these parts are concatenated,
+ * normal one goes first.
+ *
+ * Please, keep these structures in sync with emac_stats_keys.
+ */
+
+/* Normal TX/RX Statistics */
+struct emac_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 rx_packets_csum;
+ u64 tx_packets_csum;
+};
+
+/* Error statistics */
+struct emac_error_stats {
+ u64 tx_undo;
+
+ /* Software RX Errors */
+ u64 rx_dropped_stack;
+ u64 rx_dropped_oom;
+ u64 rx_dropped_error;
+ u64 rx_dropped_resize;
+ u64 rx_dropped_mtu;
+ u64 rx_stopped;
+ /* BD reported RX errors */
+ u64 rx_bd_errors;
+ u64 rx_bd_overrun;
+ u64 rx_bd_bad_packet;
+ u64 rx_bd_runt_packet;
+ u64 rx_bd_short_event;
+ u64 rx_bd_alignment_error;
+ u64 rx_bd_bad_fcs;
+ u64 rx_bd_packet_too_long;
+ u64 rx_bd_out_of_range;
+ u64 rx_bd_in_range;
+ /* EMAC IRQ reported RX errors */
+ u64 rx_parity;
+ u64 rx_fifo_overrun;
+ u64 rx_overrun;
+ u64 rx_bad_packet;
+ u64 rx_runt_packet;
+ u64 rx_short_event;
+ u64 rx_alignment_error;
+ u64 rx_bad_fcs;
+ u64 rx_packet_too_long;
+ u64 rx_out_of_range;
+ u64 rx_in_range;
+
+ /* Software TX Errors */
+ u64 tx_dropped;
+ /* BD reported TX errors */
+ u64 tx_bd_errors;
+ u64 tx_bd_bad_fcs;
+ u64 tx_bd_carrier_loss;
+ u64 tx_bd_excessive_deferral;
+ u64 tx_bd_excessive_collisions;
+ u64 tx_bd_late_collision;
+ u64 tx_bd_multple_collisions;
+ u64 tx_bd_single_collision;
+ u64 tx_bd_underrun;
+ u64 tx_bd_sqe;
+ /* EMAC IRQ reported TX errors */
+ u64 tx_parity;
+ u64 tx_underrun;
+ u64 tx_sqe;
+ u64 tx_errors;
+};
+
+#define EMAC_ETHTOOL_STATS_COUNT ((sizeof(struct emac_stats) + \
+ sizeof(struct emac_error_stats)) \
+ / sizeof(u64))
+
+struct emac_instance {
+ struct net_device *ndev;
+ struct resource rsrc_regs;
+ struct emac_regs __iomem *emacp;
+ struct platform_device *ofdev;
+ struct device_node **blist; /* bootlist entry */
+
+ /* MAL linkage */
+ u32 mal_ph;
+ struct platform_device *mal_dev;
+ u32 mal_rx_chan;
+ u32 mal_tx_chan;
+ struct mal_instance *mal;
+ struct mal_commac commac;
+
+ /* PHY infos */
+ u32 phy_mode;
+ u32 phy_map;
+ u32 phy_address;
+ u32 phy_feat_exc;
+ struct mii_phy phy;
+ struct mutex link_lock;
+ struct delayed_work link_work;
+ int link_polling;
+
+ /* GPCS PHY infos */
+ u32 gpcs_address;
+
+ /* Shared MDIO if any */
+ u32 mdio_ph;
+ struct platform_device *mdio_dev;
+ struct emac_instance *mdio_instance;
+ struct mutex mdio_lock;
+
+ /* ZMII infos if any */
+ u32 zmii_ph;
+ u32 zmii_port;
+ struct platform_device *zmii_dev;
+
+ /* RGMII infos if any */
+ u32 rgmii_ph;
+ u32 rgmii_port;
+ struct platform_device *rgmii_dev;
+
+ /* TAH infos if any */
+ u32 tah_ph;
+ u32 tah_port;
+ struct platform_device *tah_dev;
+
+ /* IRQs */
+ int wol_irq;
+ int emac_irq;
+
+ /* OPB bus frequency in Mhz */
+ u32 opb_bus_freq;
+
+ /* Cell index within an ASIC (for clk mgmnt) */
+ u32 cell_index;
+
+ /* Max supported MTU */
+ u32 max_mtu;
+
+ /* Feature bits (from probe table) */
+ unsigned int features;
+
+ /* Tx and Rx fifo sizes & other infos in bytes */
+ u32 tx_fifo_size;
+ u32 tx_fifo_size_gige;
+ u32 rx_fifo_size;
+ u32 rx_fifo_size_gige;
+ u32 fifo_entry_size;
+ u32 mal_burst_size; /* move to MAL ? */
+
+ /* IAHT and GAHT filter parameterization */
+ u32 xaht_slots_shift;
+ u32 xaht_width_shift;
+
+ /* Descriptor management
+ */
+ struct mal_descriptor *tx_desc;
+ int tx_cnt;
+ int tx_slot;
+ int ack_slot;
+
+ struct mal_descriptor *rx_desc;
+ int rx_slot;
+ struct sk_buff *rx_sg_skb; /* 1 */
+ int rx_skb_size;
+ int rx_sync_size;
+
+ struct sk_buff *tx_skb[NUM_TX_BUFF];
+ struct sk_buff *rx_skb[NUM_RX_BUFF];
+
+ /* Stats
+ */
+ struct emac_error_stats estats;
+ struct net_device_stats nstats;
+ struct emac_stats stats;
+
+ /* Misc
+ */
+ int reset_failed;
+ int stop_timeout; /* in us */
+ int no_mcast;
+ int mcast_pending;
+ int opened;
+ struct work_struct reset_work;
+ spinlock_t lock;
+};
+
+/*
+ * Features of various EMAC implementations
+ */
+
+/*
+ * No flow control on 40x according to the original driver
+ */
+#define EMAC_FTR_NO_FLOW_CONTROL_40x 0x00000001
+/*
+ * Cell is an EMAC4
+ */
+#define EMAC_FTR_EMAC4 0x00000002
+/*
+ * For the 440SPe, AMCC inexplicably changed the polarity of
+ * the "operation complete" bit in the MII control register.
+ */
+#define EMAC_FTR_STACR_OC_INVERT 0x00000004
+/*
+ * Set if we have a TAH.
+ */
+#define EMAC_FTR_HAS_TAH 0x00000008
+/*
+ * Set if we have a ZMII.
+ */
+#define EMAC_FTR_HAS_ZMII 0x00000010
+/*
+ * Set if we have a RGMII.
+ */
+#define EMAC_FTR_HAS_RGMII 0x00000020
+/*
+ * Set if we have new type STACR with STAOPC
+ */
+#define EMAC_FTR_HAS_NEW_STACR 0x00000040
+/*
+ * Set if we need phy clock workaround for 440gx
+ */
+#define EMAC_FTR_440GX_PHY_CLK_FIX 0x00000080
+/*
+ * Set if we need phy clock workaround for 440ep or 440gr
+ */
+#define EMAC_FTR_440EP_PHY_CLK_FIX 0x00000100
+/*
+ * The 405EX and 460EX contain the EMAC4SYNC core
+ */
+#define EMAC_FTR_EMAC4SYNC 0x00000200
+/*
+ * Set if we need phy clock workaround for 460ex or 460gt
+ */
+#define EMAC_FTR_460EX_PHY_CLK_FIX 0x00000400
+
+
+/* Right now, we don't quite handle the always/possible masks on the
+ * most optimal way as we don't have a way to say something like
+ * always EMAC4. Patches welcome.
+ */
+enum {
+ EMAC_FTRS_ALWAYS = 0,
+
+ EMAC_FTRS_POSSIBLE =
+#ifdef CONFIG_IBM_EMAC_EMAC4
+ EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC |
+ EMAC_FTR_HAS_NEW_STACR |
+ EMAC_FTR_STACR_OC_INVERT | EMAC_FTR_440GX_PHY_CLK_FIX |
+#endif
+#ifdef CONFIG_IBM_EMAC_TAH
+ EMAC_FTR_HAS_TAH |
+#endif
+#ifdef CONFIG_IBM_EMAC_ZMII
+ EMAC_FTR_HAS_ZMII |
+#endif
+#ifdef CONFIG_IBM_EMAC_RGMII
+ EMAC_FTR_HAS_RGMII |
+#endif
+#ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
+ EMAC_FTR_NO_FLOW_CONTROL_40x |
+#endif
+ EMAC_FTR_460EX_PHY_CLK_FIX |
+ EMAC_FTR_440EP_PHY_CLK_FIX,
+};
+
+static inline int emac_has_feature(struct emac_instance *dev,
+ unsigned long feature)
+{
+ return (EMAC_FTRS_ALWAYS & feature) ||
+ (EMAC_FTRS_POSSIBLE & dev->features & feature);
+}
+
+/*
+ * Various instances of the EMAC core have varying 1) number of
+ * address match slots, 2) width of the registers for handling address
+ * match slots, 3) number of registers for handling address match
+ * slots and 4) base offset for those registers.
+ *
+ * These macros and inlines handle these differences based on
+ * parameters supplied by the device structure which are, in turn,
+ * initialized based on the "compatible" entry in the device tree.
+ */
+
+#define EMAC4_XAHT_SLOTS_SHIFT 6
+#define EMAC4_XAHT_WIDTH_SHIFT 4
+
+#define EMAC4SYNC_XAHT_SLOTS_SHIFT 8
+#define EMAC4SYNC_XAHT_WIDTH_SHIFT 5
+
+#define EMAC_XAHT_SLOTS(dev) (1 << (dev)->xaht_slots_shift)
+#define EMAC_XAHT_WIDTH(dev) (1 << (dev)->xaht_width_shift)
+#define EMAC_XAHT_REGS(dev) (1 << ((dev)->xaht_slots_shift - \
+ (dev)->xaht_width_shift))
+
+#define EMAC_XAHT_CRC_TO_SLOT(dev, crc) \
+ ((EMAC_XAHT_SLOTS(dev) - 1) - \
+ ((crc) >> ((sizeof (u32) * BITS_PER_BYTE) - \
+ (dev)->xaht_slots_shift)))
+
+#define EMAC_XAHT_SLOT_TO_REG(dev, slot) \
+ ((slot) >> (dev)->xaht_width_shift)
+
+#define EMAC_XAHT_SLOT_TO_MASK(dev, slot) \
+ ((u32)(1 << (EMAC_XAHT_WIDTH(dev) - 1)) >> \
+ ((slot) & (u32)(EMAC_XAHT_WIDTH(dev) - 1)))
+
+static inline u32 *emac_xaht_base(struct emac_instance *dev)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ int offset;
+
+ /* The first IAHT entry always is the base of the block of
+ * IAHT and GAHT registers.
+ */
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC))
+ offset = offsetof(struct emac_regs, u1.emac4sync.iaht1);
+ else
+ offset = offsetof(struct emac_regs, u0.emac4.iaht1);
+
+ return (u32 *)((ptrdiff_t)p + offset);
+}
+
+static inline u32 *emac_gaht_base(struct emac_instance *dev)
+{
+ /* GAHT registers always come after an identical number of
+ * IAHT registers.
+ */
+ return emac_xaht_base(dev) + EMAC_XAHT_REGS(dev);
+}
+
+static inline u32 *emac_iaht_base(struct emac_instance *dev)
+{
+ /* IAHT registers always come before an identical number of
+ * GAHT registers.
+ */
+ return emac_xaht_base(dev);
+}
+
+/* Ethtool get_regs complex data.
+ * We want to get not just EMAC registers, but also MAL, ZMII, RGMII, TAH
+ * when available.
+ *
+ * Returned BLOB consists of the ibm_emac_ethtool_regs_hdr,
+ * MAL registers, EMAC registers and optional ZMII, RGMII, TAH registers.
+ * Each register component is preceded with emac_ethtool_regs_subhdr.
+ * Order of the optional headers follows their relative bit posititions
+ * in emac_ethtool_regs_hdr.components
+ */
+#define EMAC_ETHTOOL_REGS_ZMII 0x00000001
+#define EMAC_ETHTOOL_REGS_RGMII 0x00000002
+#define EMAC_ETHTOOL_REGS_TAH 0x00000004
+
+struct emac_ethtool_regs_hdr {
+ u32 components;
+};
+
+struct emac_ethtool_regs_subhdr {
+ u32 version;
+ u32 index;
+};
+
+#define EMAC_ETHTOOL_REGS_VER 0
+#define EMAC_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \
+ (dev)->rsrc_regs.start + 1)
+#define EMAC4_ETHTOOL_REGS_VER 1
+#define EMAC4_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \
+ (dev)->rsrc_regs.start + 1)
+
+#endif /* __IBM_NEWEMAC_CORE_H */
diff --git a/drivers/net/ethernet/ibm/emac/debug.c b/drivers/net/ethernet/ibm/emac/debug.c
new file mode 100644
index 000000000000..8c6c1e2a8750
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/debug.c
@@ -0,0 +1,270 @@
+/*
+ * drivers/net/ibm_newemac/debug.c
+ *
+ * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines.
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * Based on the arch/ppc version of the driver:
+ *
+ * Copyright (c) 2004, 2005 Zultys Technologies
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/sysrq.h>
+#include <asm/io.h>
+
+#include "core.h"
+
+static DEFINE_SPINLOCK(emac_dbg_lock);
+
+static void emac_desc_dump(struct emac_instance *p)
+{
+ int i;
+ printk("** EMAC %s TX BDs **\n"
+ " tx_cnt = %d tx_slot = %d ack_slot = %d\n",
+ p->ofdev->dev.of_node->full_name,
+ p->tx_cnt, p->tx_slot, p->ack_slot);
+ for (i = 0; i < NUM_TX_BUFF / 2; ++i)
+ printk
+ ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n",
+ i, p->tx_desc[i].data_ptr, p->tx_skb[i] ? 'V' : ' ',
+ p->tx_desc[i].ctrl, p->tx_desc[i].data_len,
+ NUM_TX_BUFF / 2 + i,
+ p->tx_desc[NUM_TX_BUFF / 2 + i].data_ptr,
+ p->tx_skb[NUM_TX_BUFF / 2 + i] ? 'V' : ' ',
+ p->tx_desc[NUM_TX_BUFF / 2 + i].ctrl,
+ p->tx_desc[NUM_TX_BUFF / 2 + i].data_len);
+
+ printk("** EMAC %s RX BDs **\n"
+ " rx_slot = %d flags = 0x%lx rx_skb_size = %d rx_sync_size = %d\n"
+ " rx_sg_skb = 0x%p\n",
+ p->ofdev->dev.of_node->full_name,
+ p->rx_slot, p->commac.flags, p->rx_skb_size,
+ p->rx_sync_size, p->rx_sg_skb);
+ for (i = 0; i < NUM_RX_BUFF / 2; ++i)
+ printk
+ ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n",
+ i, p->rx_desc[i].data_ptr, p->rx_skb[i] ? 'V' : ' ',
+ p->rx_desc[i].ctrl, p->rx_desc[i].data_len,
+ NUM_RX_BUFF / 2 + i,
+ p->rx_desc[NUM_RX_BUFF / 2 + i].data_ptr,
+ p->rx_skb[NUM_RX_BUFF / 2 + i] ? 'V' : ' ',
+ p->rx_desc[NUM_RX_BUFF / 2 + i].ctrl,
+ p->rx_desc[NUM_RX_BUFF / 2 + i].data_len);
+}
+
+static void emac_mac_dump(struct emac_instance *dev)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ const int xaht_regs = EMAC_XAHT_REGS(dev);
+ u32 *gaht_base = emac_gaht_base(dev);
+ u32 *iaht_base = emac_iaht_base(dev);
+ int emac4sync = emac_has_feature(dev, EMAC_FTR_EMAC4SYNC);
+ int n;
+
+ printk("** EMAC %s registers **\n"
+ "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n"
+ "RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n"
+ "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n",
+ dev->ofdev->dev.of_node->full_name,
+ in_be32(&p->mr0), in_be32(&p->mr1),
+ in_be32(&p->tmr0), in_be32(&p->tmr1),
+ in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser),
+ in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid),
+ in_be32(&p->vtci)
+ );
+
+ if (emac4sync)
+ printk("MAR = %04x%08x MMAR = %04x%08x\n",
+ in_be32(&p->u0.emac4sync.mahr),
+ in_be32(&p->u0.emac4sync.malr),
+ in_be32(&p->u0.emac4sync.mmahr),
+ in_be32(&p->u0.emac4sync.mmalr)
+ );
+
+ for (n = 0; n < xaht_regs; n++)
+ printk("IAHT%02d = 0x%08x\n", n + 1, in_be32(iaht_base + n));
+
+ for (n = 0; n < xaht_regs; n++)
+ printk("GAHT%02d = 0x%08x\n", n + 1, in_be32(gaht_base + n));
+
+ printk("LSA = %04x%08x IPGVR = 0x%04x\n"
+ "STACR = 0x%08x TRTR = 0x%08x RWMR = 0x%08x\n"
+ "OCTX = 0x%08x OCRX = 0x%08x\n",
+ in_be32(&p->lsah), in_be32(&p->lsal), in_be32(&p->ipgvr),
+ in_be32(&p->stacr), in_be32(&p->trtr), in_be32(&p->rwmr),
+ in_be32(&p->octx), in_be32(&p->ocrx)
+ );
+
+ if (!emac4sync) {
+ printk("IPCR = 0x%08x\n",
+ in_be32(&p->u1.emac4.ipcr)
+ );
+ } else {
+ printk("REVID = 0x%08x TPC = 0x%08x\n",
+ in_be32(&p->u1.emac4sync.revid),
+ in_be32(&p->u1.emac4sync.tpc)
+ );
+ }
+
+ emac_desc_dump(dev);
+}
+
+static void emac_mal_dump(struct mal_instance *mal)
+{
+ int i;
+
+ printk("** MAL %s Registers **\n"
+ "CFG = 0x%08x ESR = 0x%08x IER = 0x%08x\n"
+ "TX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n"
+ "RX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n",
+ mal->ofdev->dev.of_node->full_name,
+ get_mal_dcrn(mal, MAL_CFG), get_mal_dcrn(mal, MAL_ESR),
+ get_mal_dcrn(mal, MAL_IER),
+ get_mal_dcrn(mal, MAL_TXCASR), get_mal_dcrn(mal, MAL_TXCARR),
+ get_mal_dcrn(mal, MAL_TXEOBISR), get_mal_dcrn(mal, MAL_TXDEIR),
+ get_mal_dcrn(mal, MAL_RXCASR), get_mal_dcrn(mal, MAL_RXCARR),
+ get_mal_dcrn(mal, MAL_RXEOBISR), get_mal_dcrn(mal, MAL_RXDEIR)
+ );
+
+ printk("TX|");
+ for (i = 0; i < mal->num_tx_chans; ++i) {
+ if (i && !(i % 4))
+ printk("\n ");
+ printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_TXCTPR(i)));
+ }
+ printk("\nRX|");
+ for (i = 0; i < mal->num_rx_chans; ++i) {
+ if (i && !(i % 4))
+ printk("\n ");
+ printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_RXCTPR(i)));
+ }
+ printk("\n ");
+ for (i = 0; i < mal->num_rx_chans; ++i) {
+ u32 r = get_mal_dcrn(mal, MAL_RCBS(i));
+ if (i && !(i % 3))
+ printk("\n ");
+ printk("RCBS%d = 0x%08x (%d) ", i, r, r * 16);
+ }
+ printk("\n");
+}
+
+static struct emac_instance *__emacs[4];
+static struct mal_instance *__mals[1];
+
+void emac_dbg_register(struct emac_instance *dev)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&emac_dbg_lock, flags);
+ for (i = 0; i < ARRAY_SIZE(__emacs); i++)
+ if (__emacs[i] == NULL) {
+ __emacs[i] = dev;
+ break;
+ }
+ spin_unlock_irqrestore(&emac_dbg_lock, flags);
+}
+
+void emac_dbg_unregister(struct emac_instance *dev)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&emac_dbg_lock, flags);
+ for (i = 0; i < ARRAY_SIZE(__emacs); i++)
+ if (__emacs[i] == dev) {
+ __emacs[i] = NULL;
+ break;
+ }
+ spin_unlock_irqrestore(&emac_dbg_lock, flags);
+}
+
+void mal_dbg_register(struct mal_instance *mal)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&emac_dbg_lock, flags);
+ for (i = 0; i < ARRAY_SIZE(__mals); i++)
+ if (__mals[i] == NULL) {
+ __mals[i] = mal;
+ break;
+ }
+ spin_unlock_irqrestore(&emac_dbg_lock, flags);
+}
+
+void mal_dbg_unregister(struct mal_instance *mal)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&emac_dbg_lock, flags);
+ for (i = 0; i < ARRAY_SIZE(__mals); i++)
+ if (__mals[i] == mal) {
+ __mals[i] = NULL;
+ break;
+ }
+ spin_unlock_irqrestore(&emac_dbg_lock, flags);
+}
+
+void emac_dbg_dump_all(void)
+{
+ unsigned int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&emac_dbg_lock, flags);
+
+ for (i = 0; i < ARRAY_SIZE(__mals); ++i)
+ if (__mals[i])
+ emac_mal_dump(__mals[i]);
+
+ for (i = 0; i < ARRAY_SIZE(__emacs); ++i)
+ if (__emacs[i])
+ emac_mac_dump(__emacs[i]);
+
+ spin_unlock_irqrestore(&emac_dbg_lock, flags);
+}
+
+#if defined(CONFIG_MAGIC_SYSRQ)
+static void emac_sysrq_handler(int key)
+{
+ emac_dbg_dump_all();
+}
+
+static struct sysrq_key_op emac_sysrq_op = {
+ .handler = emac_sysrq_handler,
+ .help_msg = "emaC",
+ .action_msg = "Show EMAC(s) status",
+};
+
+int __init emac_init_debug(void)
+{
+ return register_sysrq_key('c', &emac_sysrq_op);
+}
+
+void __exit emac_fini_debug(void)
+{
+ unregister_sysrq_key('c', &emac_sysrq_op);
+}
+
+#else
+int __init emac_init_debug(void)
+{
+ return 0;
+}
+void __exit emac_fini_debug(void)
+{
+}
+#endif /* CONFIG_MAGIC_SYSRQ */
diff --git a/drivers/net/ethernet/ibm/emac/debug.h b/drivers/net/ethernet/ibm/emac/debug.h
new file mode 100644
index 000000000000..90477fe69d0c
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/debug.h
@@ -0,0 +1,83 @@
+/*
+ * drivers/net/ibm_newemac/debug.h
+ *
+ * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines.
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * Based on the arch/ppc version of the driver:
+ *
+ * Copyright (c) 2004, 2005 Zultys Technologies
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __IBM_NEWEMAC_DEBUG_H
+#define __IBM_NEWEMAC_DEBUG_H
+
+#include <linux/init.h>
+
+#include "core.h"
+
+#if defined(CONFIG_IBM_EMAC_DEBUG)
+
+struct emac_instance;
+struct mal_instance;
+
+extern void emac_dbg_register(struct emac_instance *dev);
+extern void emac_dbg_unregister(struct emac_instance *dev);
+extern void mal_dbg_register(struct mal_instance *mal);
+extern void mal_dbg_unregister(struct mal_instance *mal);
+extern int emac_init_debug(void) __init;
+extern void emac_fini_debug(void) __exit;
+extern void emac_dbg_dump_all(void);
+
+# define DBG_LEVEL 1
+
+#else
+
+# define emac_dbg_register(x) do { } while(0)
+# define emac_dbg_unregister(x) do { } while(0)
+# define mal_dbg_register(x) do { } while(0)
+# define mal_dbg_unregister(x) do { } while(0)
+# define emac_init_debug() do { } while(0)
+# define emac_fini_debug() do { } while(0)
+# define emac_dbg_dump_all() do { } while(0)
+
+# define DBG_LEVEL 0
+
+#endif
+
+#define EMAC_DBG(d, name, fmt, arg...) \
+ printk(KERN_DEBUG #name "%s: " fmt, d->ofdev->dev.of_node->full_name, ## arg)
+
+#if DBG_LEVEL > 0
+# define DBG(d,f,x...) EMAC_DBG(d, emac, f, ##x)
+# define MAL_DBG(d,f,x...) EMAC_DBG(d, mal, f, ##x)
+# define ZMII_DBG(d,f,x...) EMAC_DBG(d, zmii, f, ##x)
+# define RGMII_DBG(d,f,x...) EMAC_DBG(d, rgmii, f, ##x)
+# define NL "\n"
+#else
+# define DBG(f,x...) ((void)0)
+# define MAL_DBG(d,f,x...) ((void)0)
+# define ZMII_DBG(d,f,x...) ((void)0)
+# define RGMII_DBG(d,f,x...) ((void)0)
+#endif
+#if DBG_LEVEL > 1
+# define DBG2(d,f,x...) DBG(d,f, ##x)
+# define MAL_DBG2(d,f,x...) MAL_DBG(d,f, ##x)
+# define ZMII_DBG2(d,f,x...) ZMII_DBG(d,f, ##x)
+# define RGMII_DBG2(d,f,x...) RGMII_DBG(d,f, ##x)
+#else
+# define DBG2(f,x...) ((void)0)
+# define MAL_DBG2(d,f,x...) ((void)0)
+# define ZMII_DBG2(d,f,x...) ((void)0)
+# define RGMII_DBG2(d,f,x...) ((void)0)
+#endif
+
+#endif /* __IBM_NEWEMAC_DEBUG_H */
diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h
new file mode 100644
index 000000000000..1568278d759a
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/emac.h
@@ -0,0 +1,312 @@
+/*
+ * drivers/net/ibm_newemac/emac.h
+ *
+ * Register definitions for PowerPC 4xx on-chip ethernet contoller
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * Based on the arch/ppc version of the driver:
+ *
+ * Copyright (c) 2004, 2005 Zultys Technologies.
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ *
+ * Based on original work by
+ * Matt Porter <mporter@kernel.crashing.org>
+ * Armin Kuster <akuster@mvista.com>
+ * Copyright 2002-2004 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __IBM_NEWEMAC_H
+#define __IBM_NEWEMAC_H
+
+#include <linux/types.h>
+#include <linux/phy.h>
+
+/* EMAC registers Write Access rules */
+struct emac_regs {
+ /* Common registers across all EMAC implementations. */
+ u32 mr0; /* Special */
+ u32 mr1; /* Reset */
+ u32 tmr0; /* Special */
+ u32 tmr1; /* Special */
+ u32 rmr; /* Reset */
+ u32 isr; /* Always */
+ u32 iser; /* Reset */
+ u32 iahr; /* Reset, R, T */
+ u32 ialr; /* Reset, R, T */
+ u32 vtpid; /* Reset, R, T */
+ u32 vtci; /* Reset, R, T */
+ u32 ptr; /* Reset, T */
+ union {
+ /* Registers unique to EMAC4 implementations */
+ struct {
+ u32 iaht1; /* Reset, R */
+ u32 iaht2; /* Reset, R */
+ u32 iaht3; /* Reset, R */
+ u32 iaht4; /* Reset, R */
+ u32 gaht1; /* Reset, R */
+ u32 gaht2; /* Reset, R */
+ u32 gaht3; /* Reset, R */
+ u32 gaht4; /* Reset, R */
+ } emac4;
+ /* Registers unique to EMAC4SYNC implementations */
+ struct {
+ u32 mahr; /* Reset, R, T */
+ u32 malr; /* Reset, R, T */
+ u32 mmahr; /* Reset, R, T */
+ u32 mmalr; /* Reset, R, T */
+ u32 rsvd0[4];
+ } emac4sync;
+ } u0;
+ /* Common registers across all EMAC implementations. */
+ u32 lsah;
+ u32 lsal;
+ u32 ipgvr; /* Reset, T */
+ u32 stacr; /* Special */
+ u32 trtr; /* Special */
+ u32 rwmr; /* Reset */
+ u32 octx;
+ u32 ocrx;
+ union {
+ /* Registers unique to EMAC4 implementations */
+ struct {
+ u32 ipcr;
+ } emac4;
+ /* Registers unique to EMAC4SYNC implementations */
+ struct {
+ u32 rsvd1;
+ u32 revid;
+ u32 rsvd2[2];
+ u32 iaht1; /* Reset, R */
+ u32 iaht2; /* Reset, R */
+ u32 iaht3; /* Reset, R */
+ u32 iaht4; /* Reset, R */
+ u32 iaht5; /* Reset, R */
+ u32 iaht6; /* Reset, R */
+ u32 iaht7; /* Reset, R */
+ u32 iaht8; /* Reset, R */
+ u32 gaht1; /* Reset, R */
+ u32 gaht2; /* Reset, R */
+ u32 gaht3; /* Reset, R */
+ u32 gaht4; /* Reset, R */
+ u32 gaht5; /* Reset, R */
+ u32 gaht6; /* Reset, R */
+ u32 gaht7; /* Reset, R */
+ u32 gaht8; /* Reset, R */
+ u32 tpc; /* Reset, T */
+ } emac4sync;
+ } u1;
+};
+
+/*
+ * PHY mode settings (EMAC <-> ZMII/RGMII bridge <-> PHY)
+ */
+#define PHY_MODE_NA PHY_INTERFACE_MODE_NA
+#define PHY_MODE_MII PHY_INTERFACE_MODE_MII
+#define PHY_MODE_RMII PHY_INTERFACE_MODE_RMII
+#define PHY_MODE_SMII PHY_INTERFACE_MODE_SMII
+#define PHY_MODE_RGMII PHY_INTERFACE_MODE_RGMII
+#define PHY_MODE_TBI PHY_INTERFACE_MODE_TBI
+#define PHY_MODE_GMII PHY_INTERFACE_MODE_GMII
+#define PHY_MODE_RTBI PHY_INTERFACE_MODE_RTBI
+#define PHY_MODE_SGMII PHY_INTERFACE_MODE_SGMII
+
+/* EMACx_MR0 */
+#define EMAC_MR0_RXI 0x80000000
+#define EMAC_MR0_TXI 0x40000000
+#define EMAC_MR0_SRST 0x20000000
+#define EMAC_MR0_TXE 0x10000000
+#define EMAC_MR0_RXE 0x08000000
+#define EMAC_MR0_WKE 0x04000000
+
+/* EMACx_MR1 */
+#define EMAC_MR1_FDE 0x80000000
+#define EMAC_MR1_ILE 0x40000000
+#define EMAC_MR1_VLE 0x20000000
+#define EMAC_MR1_EIFC 0x10000000
+#define EMAC_MR1_APP 0x08000000
+#define EMAC_MR1_IST 0x01000000
+
+#define EMAC_MR1_MF_MASK 0x00c00000
+#define EMAC_MR1_MF_10 0x00000000
+#define EMAC_MR1_MF_100 0x00400000
+#define EMAC_MR1_MF_1000 0x00800000
+#define EMAC_MR1_MF_1000GPCS 0x00c00000
+#define EMAC_MR1_MF_IPPA(id) (((id) & 0x1f) << 6)
+
+#define EMAC_MR1_RFS_4K 0x00300000
+#define EMAC_MR1_RFS_16K 0x00000000
+#define EMAC_MR1_TFS_2K 0x00080000
+#define EMAC_MR1_TR0_MULT 0x00008000
+#define EMAC_MR1_JPSM 0x00000000
+#define EMAC_MR1_MWSW_001 0x00000000
+#define EMAC_MR1_BASE(opb) (EMAC_MR1_TFS_2K | EMAC_MR1_TR0_MULT)
+
+
+#define EMAC4_MR1_RFS_2K 0x00100000
+#define EMAC4_MR1_RFS_4K 0x00180000
+#define EMAC4_MR1_RFS_16K 0x00280000
+#define EMAC4_MR1_TFS_2K 0x00020000
+#define EMAC4_MR1_TFS_4K 0x00030000
+#define EMAC4_MR1_TFS_16K 0x00050000
+#define EMAC4_MR1_TR 0x00008000
+#define EMAC4_MR1_MWSW_001 0x00001000
+#define EMAC4_MR1_JPSM 0x00000800
+#define EMAC4_MR1_OBCI_MASK 0x00000038
+#define EMAC4_MR1_OBCI_50 0x00000000
+#define EMAC4_MR1_OBCI_66 0x00000008
+#define EMAC4_MR1_OBCI_83 0x00000010
+#define EMAC4_MR1_OBCI_100 0x00000018
+#define EMAC4_MR1_OBCI_100P 0x00000020
+#define EMAC4_MR1_OBCI(freq) ((freq) <= 50 ? EMAC4_MR1_OBCI_50 : \
+ (freq) <= 66 ? EMAC4_MR1_OBCI_66 : \
+ (freq) <= 83 ? EMAC4_MR1_OBCI_83 : \
+ (freq) <= 100 ? EMAC4_MR1_OBCI_100 : \
+ EMAC4_MR1_OBCI_100P)
+
+/* EMACx_TMR0 */
+#define EMAC_TMR0_GNP 0x80000000
+#define EMAC_TMR0_DEFAULT 0x00000000
+#define EMAC4_TMR0_TFAE_2_32 0x00000001
+#define EMAC4_TMR0_TFAE_4_64 0x00000002
+#define EMAC4_TMR0_TFAE_8_128 0x00000003
+#define EMAC4_TMR0_TFAE_16_256 0x00000004
+#define EMAC4_TMR0_TFAE_32_512 0x00000005
+#define EMAC4_TMR0_TFAE_64_1024 0x00000006
+#define EMAC4_TMR0_TFAE_128_2048 0x00000007
+#define EMAC4_TMR0_DEFAULT EMAC4_TMR0_TFAE_2_32
+#define EMAC_TMR0_XMIT (EMAC_TMR0_GNP | EMAC_TMR0_DEFAULT)
+#define EMAC4_TMR0_XMIT (EMAC_TMR0_GNP | EMAC4_TMR0_DEFAULT)
+
+/* EMACx_TMR1 */
+
+#define EMAC_TMR1(l,h) (((l) << 27) | (((h) & 0xff) << 16))
+#define EMAC4_TMR1(l,h) (((l) << 27) | (((h) & 0x3ff) << 14))
+
+/* EMACx_RMR */
+#define EMAC_RMR_SP 0x80000000
+#define EMAC_RMR_SFCS 0x40000000
+#define EMAC_RMR_RRP 0x20000000
+#define EMAC_RMR_RFP 0x10000000
+#define EMAC_RMR_ROP 0x08000000
+#define EMAC_RMR_RPIR 0x04000000
+#define EMAC_RMR_PPP 0x02000000
+#define EMAC_RMR_PME 0x01000000
+#define EMAC_RMR_PMME 0x00800000
+#define EMAC_RMR_IAE 0x00400000
+#define EMAC_RMR_MIAE 0x00200000
+#define EMAC_RMR_BAE 0x00100000
+#define EMAC_RMR_MAE 0x00080000
+#define EMAC_RMR_BASE 0x00000000
+#define EMAC4_RMR_RFAF_2_32 0x00000001
+#define EMAC4_RMR_RFAF_4_64 0x00000002
+#define EMAC4_RMR_RFAF_8_128 0x00000003
+#define EMAC4_RMR_RFAF_16_256 0x00000004
+#define EMAC4_RMR_RFAF_32_512 0x00000005
+#define EMAC4_RMR_RFAF_64_1024 0x00000006
+#define EMAC4_RMR_RFAF_128_2048 0x00000007
+#define EMAC4_RMR_BASE EMAC4_RMR_RFAF_128_2048
+
+/* EMACx_ISR & EMACx_ISER */
+#define EMAC4_ISR_TXPE 0x20000000
+#define EMAC4_ISR_RXPE 0x10000000
+#define EMAC4_ISR_TXUE 0x08000000
+#define EMAC4_ISR_RXOE 0x04000000
+#define EMAC_ISR_OVR 0x02000000
+#define EMAC_ISR_PP 0x01000000
+#define EMAC_ISR_BP 0x00800000
+#define EMAC_ISR_RP 0x00400000
+#define EMAC_ISR_SE 0x00200000
+#define EMAC_ISR_ALE 0x00100000
+#define EMAC_ISR_BFCS 0x00080000
+#define EMAC_ISR_PTLE 0x00040000
+#define EMAC_ISR_ORE 0x00020000
+#define EMAC_ISR_IRE 0x00010000
+#define EMAC_ISR_SQE 0x00000080
+#define EMAC_ISR_TE 0x00000040
+#define EMAC_ISR_MOS 0x00000002
+#define EMAC_ISR_MOF 0x00000001
+
+/* EMACx_STACR */
+#define EMAC_STACR_PHYD_MASK 0xffff
+#define EMAC_STACR_PHYD_SHIFT 16
+#define EMAC_STACR_OC 0x00008000
+#define EMAC_STACR_PHYE 0x00004000
+#define EMAC_STACR_STAC_MASK 0x00003000
+#define EMAC_STACR_STAC_READ 0x00001000
+#define EMAC_STACR_STAC_WRITE 0x00002000
+#define EMAC_STACR_OPBC_MASK 0x00000C00
+#define EMAC_STACR_OPBC_50 0x00000000
+#define EMAC_STACR_OPBC_66 0x00000400
+#define EMAC_STACR_OPBC_83 0x00000800
+#define EMAC_STACR_OPBC_100 0x00000C00
+#define EMAC_STACR_OPBC(freq) ((freq) <= 50 ? EMAC_STACR_OPBC_50 : \
+ (freq) <= 66 ? EMAC_STACR_OPBC_66 : \
+ (freq) <= 83 ? EMAC_STACR_OPBC_83 : EMAC_STACR_OPBC_100)
+#define EMAC_STACR_BASE(opb) EMAC_STACR_OPBC(opb)
+#define EMAC4_STACR_BASE(opb) 0x00000000
+#define EMAC_STACR_PCDA_MASK 0x1f
+#define EMAC_STACR_PCDA_SHIFT 5
+#define EMAC_STACR_PRA_MASK 0x1f
+#define EMACX_STACR_STAC_MASK 0x00003800
+#define EMACX_STACR_STAC_READ 0x00001000
+#define EMACX_STACR_STAC_WRITE 0x00000800
+#define EMACX_STACR_STAC_IND_ADDR 0x00002000
+#define EMACX_STACR_STAC_IND_READ 0x00003800
+#define EMACX_STACR_STAC_IND_READINC 0x00003000
+#define EMACX_STACR_STAC_IND_WRITE 0x00002800
+
+
+/* EMACx_TRTR */
+#define EMAC_TRTR_SHIFT_EMAC4 24
+#define EMAC_TRTR_SHIFT 27
+
+/* EMAC specific TX descriptor control fields (write access) */
+#define EMAC_TX_CTRL_GFCS 0x0200
+#define EMAC_TX_CTRL_GP 0x0100
+#define EMAC_TX_CTRL_ISA 0x0080
+#define EMAC_TX_CTRL_RSA 0x0040
+#define EMAC_TX_CTRL_IVT 0x0020
+#define EMAC_TX_CTRL_RVT 0x0010
+#define EMAC_TX_CTRL_TAH_CSUM 0x000e
+
+/* EMAC specific TX descriptor status fields (read access) */
+#define EMAC_TX_ST_BFCS 0x0200
+#define EMAC_TX_ST_LCS 0x0080
+#define EMAC_TX_ST_ED 0x0040
+#define EMAC_TX_ST_EC 0x0020
+#define EMAC_TX_ST_LC 0x0010
+#define EMAC_TX_ST_MC 0x0008
+#define EMAC_TX_ST_SC 0x0004
+#define EMAC_TX_ST_UR 0x0002
+#define EMAC_TX_ST_SQE 0x0001
+#define EMAC_IS_BAD_TX (EMAC_TX_ST_LCS | EMAC_TX_ST_ED | \
+ EMAC_TX_ST_EC | EMAC_TX_ST_LC | \
+ EMAC_TX_ST_MC | EMAC_TX_ST_UR)
+#define EMAC_IS_BAD_TX_TAH (EMAC_TX_ST_LCS | EMAC_TX_ST_ED | \
+ EMAC_TX_ST_EC | EMAC_TX_ST_LC)
+
+/* EMAC specific RX descriptor status fields (read access) */
+#define EMAC_RX_ST_OE 0x0200
+#define EMAC_RX_ST_PP 0x0100
+#define EMAC_RX_ST_BP 0x0080
+#define EMAC_RX_ST_RP 0x0040
+#define EMAC_RX_ST_SE 0x0020
+#define EMAC_RX_ST_AE 0x0010
+#define EMAC_RX_ST_BFCS 0x0008
+#define EMAC_RX_ST_PTL 0x0004
+#define EMAC_RX_ST_ORE 0x0002
+#define EMAC_RX_ST_IRE 0x0001
+#define EMAC_RX_TAH_BAD_CSUM 0x0003
+#define EMAC_BAD_RX_MASK (EMAC_RX_ST_OE | EMAC_RX_ST_BP | \
+ EMAC_RX_ST_RP | EMAC_RX_ST_SE | \
+ EMAC_RX_ST_AE | EMAC_RX_ST_BFCS | \
+ EMAC_RX_ST_PTL | EMAC_RX_ST_ORE | \
+ EMAC_RX_ST_IRE )
+#endif /* __IBM_NEWEMAC_H */
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
new file mode 100644
index 000000000000..f3c50b97ec61
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -0,0 +1,809 @@
+/*
+ * drivers/net/ibm_newemac/mal.c
+ *
+ * Memory Access Layer (MAL) support
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * Based on the arch/ppc version of the driver:
+ *
+ * Copyright (c) 2004, 2005 Zultys Technologies.
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ *
+ * Based on original work by
+ * Benjamin Herrenschmidt <benh@kernel.crashing.org>,
+ * David Gibson <hermes@gibson.dropbear.id.au>,
+ *
+ * Armin Kuster <akuster@mvista.com>
+ * Copyright 2002 MontaVista Softare Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "core.h"
+#include <asm/dcr-regs.h>
+
+static int mal_count;
+
+int __devinit mal_register_commac(struct mal_instance *mal,
+ struct mal_commac *commac)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mal->lock, flags);
+
+ MAL_DBG(mal, "reg(%08x, %08x)" NL,
+ commac->tx_chan_mask, commac->rx_chan_mask);
+
+ /* Don't let multiple commacs claim the same channel(s) */
+ if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
+ (mal->rx_chan_mask & commac->rx_chan_mask)) {
+ spin_unlock_irqrestore(&mal->lock, flags);
+ printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
+ mal->index);
+ return -EBUSY;
+ }
+
+ if (list_empty(&mal->list))
+ napi_enable(&mal->napi);
+ mal->tx_chan_mask |= commac->tx_chan_mask;
+ mal->rx_chan_mask |= commac->rx_chan_mask;
+ list_add(&commac->list, &mal->list);
+
+ spin_unlock_irqrestore(&mal->lock, flags);
+
+ return 0;
+}
+
+void mal_unregister_commac(struct mal_instance *mal,
+ struct mal_commac *commac)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mal->lock, flags);
+
+ MAL_DBG(mal, "unreg(%08x, %08x)" NL,
+ commac->tx_chan_mask, commac->rx_chan_mask);
+
+ mal->tx_chan_mask &= ~commac->tx_chan_mask;
+ mal->rx_chan_mask &= ~commac->rx_chan_mask;
+ list_del_init(&commac->list);
+ if (list_empty(&mal->list))
+ napi_disable(&mal->napi);
+
+ spin_unlock_irqrestore(&mal->lock, flags);
+}
+
+int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size)
+{
+ BUG_ON(channel < 0 || channel >= mal->num_rx_chans ||
+ size > MAL_MAX_RX_SIZE);
+
+ MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size);
+
+ if (size & 0xf) {
+ printk(KERN_WARNING
+ "mal%d: incorrect RX size %lu for the channel %d\n",
+ mal->index, size, channel);
+ return -EINVAL;
+ }
+
+ set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4);
+ return 0;
+}
+
+int mal_tx_bd_offset(struct mal_instance *mal, int channel)
+{
+ BUG_ON(channel < 0 || channel >= mal->num_tx_chans);
+
+ return channel * NUM_TX_BUFF;
+}
+
+int mal_rx_bd_offset(struct mal_instance *mal, int channel)
+{
+ BUG_ON(channel < 0 || channel >= mal->num_rx_chans);
+ return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF;
+}
+
+void mal_enable_tx_channel(struct mal_instance *mal, int channel)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mal->lock, flags);
+
+ MAL_DBG(mal, "enable_tx(%d)" NL, channel);
+
+ set_mal_dcrn(mal, MAL_TXCASR,
+ get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel));
+
+ spin_unlock_irqrestore(&mal->lock, flags);
+}
+
+void mal_disable_tx_channel(struct mal_instance *mal, int channel)
+{
+ set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel));
+
+ MAL_DBG(mal, "disable_tx(%d)" NL, channel);
+}
+
+void mal_enable_rx_channel(struct mal_instance *mal, int channel)
+{
+ unsigned long flags;
+
+ /*
+ * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
+ * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
+ * for the bitmask
+ */
+ if (!(channel % 8))
+ channel >>= 3;
+
+ spin_lock_irqsave(&mal->lock, flags);
+
+ MAL_DBG(mal, "enable_rx(%d)" NL, channel);
+
+ set_mal_dcrn(mal, MAL_RXCASR,
+ get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel));
+
+ spin_unlock_irqrestore(&mal->lock, flags);
+}
+
+void mal_disable_rx_channel(struct mal_instance *mal, int channel)
+{
+ /*
+ * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
+ * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
+ * for the bitmask
+ */
+ if (!(channel % 8))
+ channel >>= 3;
+
+ set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel));
+
+ MAL_DBG(mal, "disable_rx(%d)" NL, channel);
+}
+
+void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mal->lock, flags);
+
+ MAL_DBG(mal, "poll_add(%p)" NL, commac);
+
+ /* starts disabled */
+ set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
+
+ list_add_tail(&commac->poll_list, &mal->poll_list);
+
+ spin_unlock_irqrestore(&mal->lock, flags);
+}
+
+void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mal->lock, flags);
+
+ MAL_DBG(mal, "poll_del(%p)" NL, commac);
+
+ list_del(&commac->poll_list);
+
+ spin_unlock_irqrestore(&mal->lock, flags);
+}
+
+/* synchronized by mal_poll() */
+static inline void mal_enable_eob_irq(struct mal_instance *mal)
+{
+ MAL_DBG2(mal, "enable_irq" NL);
+
+ // XXX might want to cache MAL_CFG as the DCR read can be slooooow
+ set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE);
+}
+
+/* synchronized by NAPI state */
+static inline void mal_disable_eob_irq(struct mal_instance *mal)
+{
+ // XXX might want to cache MAL_CFG as the DCR read can be slooooow
+ set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE);
+
+ MAL_DBG2(mal, "disable_irq" NL);
+}
+
+static irqreturn_t mal_serr(int irq, void *dev_instance)
+{
+ struct mal_instance *mal = dev_instance;
+
+ u32 esr = get_mal_dcrn(mal, MAL_ESR);
+
+ /* Clear the error status register */
+ set_mal_dcrn(mal, MAL_ESR, esr);
+
+ MAL_DBG(mal, "SERR %08x" NL, esr);
+
+ if (esr & MAL_ESR_EVB) {
+ if (esr & MAL_ESR_DE) {
+ /* We ignore Descriptor error,
+ * TXDE or RXDE interrupt will be generated anyway.
+ */
+ return IRQ_HANDLED;
+ }
+
+ if (esr & MAL_ESR_PEIN) {
+ /* PLB error, it's probably buggy hardware or
+ * incorrect physical address in BD (i.e. bug)
+ */
+ if (net_ratelimit())
+ printk(KERN_ERR
+ "mal%d: system error, "
+ "PLB (ESR = 0x%08x)\n",
+ mal->index, esr);
+ return IRQ_HANDLED;
+ }
+
+ /* OPB error, it's probably buggy hardware or incorrect
+ * EBC setup
+ */
+ if (net_ratelimit())
+ printk(KERN_ERR
+ "mal%d: system error, OPB (ESR = 0x%08x)\n",
+ mal->index, esr);
+ }
+ return IRQ_HANDLED;
+}
+
+static inline void mal_schedule_poll(struct mal_instance *mal)
+{
+ if (likely(napi_schedule_prep(&mal->napi))) {
+ MAL_DBG2(mal, "schedule_poll" NL);
+ mal_disable_eob_irq(mal);
+ __napi_schedule(&mal->napi);
+ } else
+ MAL_DBG2(mal, "already in poll" NL);
+}
+
+static irqreturn_t mal_txeob(int irq, void *dev_instance)
+{
+ struct mal_instance *mal = dev_instance;
+
+ u32 r = get_mal_dcrn(mal, MAL_TXEOBISR);
+
+ MAL_DBG2(mal, "txeob %08x" NL, r);
+
+ mal_schedule_poll(mal);
+ set_mal_dcrn(mal, MAL_TXEOBISR, r);
+
+#ifdef CONFIG_PPC_DCR_NATIVE
+ if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
+ mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
+ (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX));
+#endif
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mal_rxeob(int irq, void *dev_instance)
+{
+ struct mal_instance *mal = dev_instance;
+
+ u32 r = get_mal_dcrn(mal, MAL_RXEOBISR);
+
+ MAL_DBG2(mal, "rxeob %08x" NL, r);
+
+ mal_schedule_poll(mal);
+ set_mal_dcrn(mal, MAL_RXEOBISR, r);
+
+#ifdef CONFIG_PPC_DCR_NATIVE
+ if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
+ mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
+ (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX));
+#endif
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mal_txde(int irq, void *dev_instance)
+{
+ struct mal_instance *mal = dev_instance;
+
+ u32 deir = get_mal_dcrn(mal, MAL_TXDEIR);
+ set_mal_dcrn(mal, MAL_TXDEIR, deir);
+
+ MAL_DBG(mal, "txde %08x" NL, deir);
+
+ if (net_ratelimit())
+ printk(KERN_ERR
+ "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
+ mal->index, deir);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mal_rxde(int irq, void *dev_instance)
+{
+ struct mal_instance *mal = dev_instance;
+ struct list_head *l;
+
+ u32 deir = get_mal_dcrn(mal, MAL_RXDEIR);
+
+ MAL_DBG(mal, "rxde %08x" NL, deir);
+
+ list_for_each(l, &mal->list) {
+ struct mal_commac *mc = list_entry(l, struct mal_commac, list);
+ if (deir & mc->rx_chan_mask) {
+ set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags);
+ mc->ops->rxde(mc->dev);
+ }
+ }
+
+ mal_schedule_poll(mal);
+ set_mal_dcrn(mal, MAL_RXDEIR, deir);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mal_int(int irq, void *dev_instance)
+{
+ struct mal_instance *mal = dev_instance;
+ u32 esr = get_mal_dcrn(mal, MAL_ESR);
+
+ if (esr & MAL_ESR_EVB) {
+ /* descriptor error */
+ if (esr & MAL_ESR_DE) {
+ if (esr & MAL_ESR_CIDT)
+ return mal_rxde(irq, dev_instance);
+ else
+ return mal_txde(irq, dev_instance);
+ } else { /* SERR */
+ return mal_serr(irq, dev_instance);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac)
+{
+ /* Spinlock-type semantics: only one caller disable poll at a time */
+ while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags))
+ msleep(1);
+
+ /* Synchronize with the MAL NAPI poller */
+ napi_synchronize(&mal->napi);
+}
+
+void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac)
+{
+ smp_wmb();
+ clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
+
+ /* Feels better to trigger a poll here to catch up with events that
+ * may have happened on this channel while disabled. It will most
+ * probably be delayed until the next interrupt but that's mostly a
+ * non-issue in the context where this is called.
+ */
+ napi_schedule(&mal->napi);
+}
+
+static int mal_poll(struct napi_struct *napi, int budget)
+{
+ struct mal_instance *mal = container_of(napi, struct mal_instance, napi);
+ struct list_head *l;
+ int received = 0;
+ unsigned long flags;
+
+ MAL_DBG2(mal, "poll(%d)" NL, budget);
+ again:
+ /* Process TX skbs */
+ list_for_each(l, &mal->poll_list) {
+ struct mal_commac *mc =
+ list_entry(l, struct mal_commac, poll_list);
+ mc->ops->poll_tx(mc->dev);
+ }
+
+ /* Process RX skbs.
+ *
+ * We _might_ need something more smart here to enforce polling
+ * fairness.
+ */
+ list_for_each(l, &mal->poll_list) {
+ struct mal_commac *mc =
+ list_entry(l, struct mal_commac, poll_list);
+ int n;
+ if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
+ continue;
+ n = mc->ops->poll_rx(mc->dev, budget);
+ if (n) {
+ received += n;
+ budget -= n;
+ if (budget <= 0)
+ goto more_work; // XXX What if this is the last one ?
+ }
+ }
+
+ /* We need to disable IRQs to protect from RXDE IRQ here */
+ spin_lock_irqsave(&mal->lock, flags);
+ __napi_complete(napi);
+ mal_enable_eob_irq(mal);
+ spin_unlock_irqrestore(&mal->lock, flags);
+
+ /* Check for "rotting" packet(s) */
+ list_for_each(l, &mal->poll_list) {
+ struct mal_commac *mc =
+ list_entry(l, struct mal_commac, poll_list);
+ if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
+ continue;
+ if (unlikely(mc->ops->peek_rx(mc->dev) ||
+ test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) {
+ MAL_DBG2(mal, "rotting packet" NL);
+ if (napi_reschedule(napi))
+ mal_disable_eob_irq(mal);
+ else
+ MAL_DBG2(mal, "already in poll list" NL);
+
+ if (budget > 0)
+ goto again;
+ else
+ goto more_work;
+ }
+ mc->ops->poll_tx(mc->dev);
+ }
+
+ more_work:
+ MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received);
+ return received;
+}
+
+static void mal_reset(struct mal_instance *mal)
+{
+ int n = 10;
+
+ MAL_DBG(mal, "reset" NL);
+
+ set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR);
+
+ /* Wait for reset to complete (1 system clock) */
+ while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n)
+ --n;
+
+ if (unlikely(!n))
+ printk(KERN_ERR "mal%d: reset timeout\n", mal->index);
+}
+
+int mal_get_regs_len(struct mal_instance *mal)
+{
+ return sizeof(struct emac_ethtool_regs_subhdr) +
+ sizeof(struct mal_regs);
+}
+
+void *mal_dump_regs(struct mal_instance *mal, void *buf)
+{
+ struct emac_ethtool_regs_subhdr *hdr = buf;
+ struct mal_regs *regs = (struct mal_regs *)(hdr + 1);
+ int i;
+
+ hdr->version = mal->version;
+ hdr->index = mal->index;
+
+ regs->tx_count = mal->num_tx_chans;
+ regs->rx_count = mal->num_rx_chans;
+
+ regs->cfg = get_mal_dcrn(mal, MAL_CFG);
+ regs->esr = get_mal_dcrn(mal, MAL_ESR);
+ regs->ier = get_mal_dcrn(mal, MAL_IER);
+ regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR);
+ regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR);
+ regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR);
+ regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR);
+ regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR);
+ regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR);
+ regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR);
+ regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR);
+
+ for (i = 0; i < regs->tx_count; ++i)
+ regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i));
+
+ for (i = 0; i < regs->rx_count; ++i) {
+ regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i));
+ regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i));
+ }
+ return regs + 1;
+}
+
+static int __devinit mal_probe(struct platform_device *ofdev)
+{
+ struct mal_instance *mal;
+ int err = 0, i, bd_size;
+ int index = mal_count++;
+ unsigned int dcr_base;
+ const u32 *prop;
+ u32 cfg;
+ unsigned long irqflags;
+ irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
+
+ mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
+ if (!mal) {
+ printk(KERN_ERR
+ "mal%d: out of memory allocating MAL structure!\n",
+ index);
+ return -ENOMEM;
+ }
+ mal->index = index;
+ mal->ofdev = ofdev;
+ mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1;
+
+ MAL_DBG(mal, "probe" NL);
+
+ prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL);
+ if (prop == NULL) {
+ printk(KERN_ERR
+ "mal%d: can't find MAL num-tx-chans property!\n",
+ index);
+ err = -ENODEV;
+ goto fail;
+ }
+ mal->num_tx_chans = prop[0];
+
+ prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL);
+ if (prop == NULL) {
+ printk(KERN_ERR
+ "mal%d: can't find MAL num-rx-chans property!\n",
+ index);
+ err = -ENODEV;
+ goto fail;
+ }
+ mal->num_rx_chans = prop[0];
+
+ dcr_base = dcr_resource_start(ofdev->dev.of_node, 0);
+ if (dcr_base == 0) {
+ printk(KERN_ERR
+ "mal%d: can't find DCR resource!\n", index);
+ err = -ENODEV;
+ goto fail;
+ }
+ mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100);
+ if (!DCR_MAP_OK(mal->dcr_host)) {
+ printk(KERN_ERR
+ "mal%d: failed to map DCRs !\n", index);
+ err = -ENODEV;
+ goto fail;
+ }
+
+ if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) {
+#if defined(CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT) && \
+ defined(CONFIG_IBM_EMAC_MAL_COMMON_ERR)
+ mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
+ MAL_FTR_COMMON_ERR_INT);
+#else
+ printk(KERN_ERR "%s: Support for 405EZ not enabled!\n",
+ ofdev->dev.of_node->full_name);
+ err = -ENODEV;
+ goto fail;
+#endif
+ }
+
+ mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
+ mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2);
+
+ if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
+ mal->txde_irq = mal->rxde_irq = mal->serr_irq;
+ } else {
+ mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3);
+ mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
+ }
+
+ if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ ||
+ mal->serr_irq == NO_IRQ || mal->txde_irq == NO_IRQ ||
+ mal->rxde_irq == NO_IRQ) {
+ printk(KERN_ERR
+ "mal%d: failed to map interrupts !\n", index);
+ err = -ENODEV;
+ goto fail_unmap;
+ }
+
+ INIT_LIST_HEAD(&mal->poll_list);
+ INIT_LIST_HEAD(&mal->list);
+ spin_lock_init(&mal->lock);
+
+ init_dummy_netdev(&mal->dummy_dev);
+
+ netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll,
+ CONFIG_IBM_EMAC_POLL_WEIGHT);
+
+ /* Load power-on reset defaults */
+ mal_reset(mal);
+
+ /* Set the MAL configuration register */
+ cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT;
+ cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA;
+
+ /* Current Axon is not happy with priority being non-0, it can
+ * deadlock, fix it up here
+ */
+ if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon"))
+ cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10);
+
+ /* Apply configuration */
+ set_mal_dcrn(mal, MAL_CFG, cfg);
+
+ /* Allocate space for BD rings */
+ BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32);
+ BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32);
+
+ bd_size = sizeof(struct mal_descriptor) *
+ (NUM_TX_BUFF * mal->num_tx_chans +
+ NUM_RX_BUFF * mal->num_rx_chans);
+ mal->bd_virt =
+ dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
+ GFP_KERNEL);
+ if (mal->bd_virt == NULL) {
+ printk(KERN_ERR
+ "mal%d: out of memory allocating RX/TX descriptors!\n",
+ index);
+ err = -ENOMEM;
+ goto fail_unmap;
+ }
+ memset(mal->bd_virt, 0, bd_size);
+
+ for (i = 0; i < mal->num_tx_chans; ++i)
+ set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
+ sizeof(struct mal_descriptor) *
+ mal_tx_bd_offset(mal, i));
+
+ for (i = 0; i < mal->num_rx_chans; ++i)
+ set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma +
+ sizeof(struct mal_descriptor) *
+ mal_rx_bd_offset(mal, i));
+
+ if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
+ irqflags = IRQF_SHARED;
+ hdlr_serr = hdlr_txde = hdlr_rxde = mal_int;
+ } else {
+ irqflags = 0;
+ hdlr_serr = mal_serr;
+ hdlr_txde = mal_txde;
+ hdlr_rxde = mal_rxde;
+ }
+
+ err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal);
+ if (err)
+ goto fail2;
+ err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal);
+ if (err)
+ goto fail3;
+ err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
+ if (err)
+ goto fail4;
+ err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal);
+ if (err)
+ goto fail5;
+ err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
+ if (err)
+ goto fail6;
+
+ /* Enable all MAL SERR interrupt sources */
+ if (mal->version == 2)
+ set_mal_dcrn(mal, MAL_IER, MAL2_IER_EVENTS);
+ else
+ set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS);
+
+ /* Enable EOB interrupt */
+ mal_enable_eob_irq(mal);
+
+ printk(KERN_INFO
+ "MAL v%d %s, %d TX channels, %d RX channels\n",
+ mal->version, ofdev->dev.of_node->full_name,
+ mal->num_tx_chans, mal->num_rx_chans);
+
+ /* Advertise this instance to the rest of the world */
+ wmb();
+ dev_set_drvdata(&ofdev->dev, mal);
+
+ mal_dbg_register(mal);
+
+ return 0;
+
+ fail6:
+ free_irq(mal->rxde_irq, mal);
+ fail5:
+ free_irq(mal->txeob_irq, mal);
+ fail4:
+ free_irq(mal->txde_irq, mal);
+ fail3:
+ free_irq(mal->serr_irq, mal);
+ fail2:
+ dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
+ fail_unmap:
+ dcr_unmap(mal->dcr_host, 0x100);
+ fail:
+ kfree(mal);
+
+ return err;
+}
+
+static int __devexit mal_remove(struct platform_device *ofdev)
+{
+ struct mal_instance *mal = dev_get_drvdata(&ofdev->dev);
+
+ MAL_DBG(mal, "remove" NL);
+
+ /* Synchronize with scheduled polling */
+ napi_disable(&mal->napi);
+
+ if (!list_empty(&mal->list)) {
+ /* This is *very* bad */
+ printk(KERN_EMERG
+ "mal%d: commac list is not empty on remove!\n",
+ mal->index);
+ WARN_ON(1);
+ }
+
+ dev_set_drvdata(&ofdev->dev, NULL);
+
+ free_irq(mal->serr_irq, mal);
+ free_irq(mal->txde_irq, mal);
+ free_irq(mal->txeob_irq, mal);
+ free_irq(mal->rxde_irq, mal);
+ free_irq(mal->rxeob_irq, mal);
+
+ mal_reset(mal);
+
+ mal_dbg_unregister(mal);
+
+ dma_free_coherent(&ofdev->dev,
+ sizeof(struct mal_descriptor) *
+ (NUM_TX_BUFF * mal->num_tx_chans +
+ NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt,
+ mal->bd_dma);
+ kfree(mal);
+
+ return 0;
+}
+
+static struct of_device_id mal_platform_match[] =
+{
+ {
+ .compatible = "ibm,mcmal",
+ },
+ {
+ .compatible = "ibm,mcmal2",
+ },
+ /* Backward compat */
+ {
+ .type = "mcmal-dma",
+ .compatible = "ibm,mcmal",
+ },
+ {
+ .type = "mcmal-dma",
+ .compatible = "ibm,mcmal2",
+ },
+ {},
+};
+
+static struct platform_driver mal_of_driver = {
+ .driver = {
+ .name = "mcmal",
+ .owner = THIS_MODULE,
+ .of_match_table = mal_platform_match,
+ },
+ .probe = mal_probe,
+ .remove = mal_remove,
+};
+
+int __init mal_init(void)
+{
+ return platform_driver_register(&mal_of_driver);
+}
+
+void mal_exit(void)
+{
+ platform_driver_unregister(&mal_of_driver);
+}
diff --git a/drivers/net/ethernet/ibm/emac/mal.h b/drivers/net/ethernet/ibm/emac/mal.h
new file mode 100644
index 000000000000..d06f985bda32
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/mal.h
@@ -0,0 +1,316 @@
+/*
+ * drivers/net/ibm_newemac/mal.h
+ *
+ * Memory Access Layer (MAL) support
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * Based on the arch/ppc version of the driver:
+ *
+ * Copyright (c) 2004, 2005 Zultys Technologies.
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ *
+ * Based on original work by
+ * Armin Kuster <akuster@mvista.com>
+ * Copyright 2002 MontaVista Softare Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __IBM_NEWEMAC_MAL_H
+#define __IBM_NEWEMAC_MAL_H
+
+/*
+ * There are some variations on the MAL, we express them in this driver as
+ * MAL Version 1 and 2 though that doesn't match any IBM terminology.
+ *
+ * We call MAL 1 the version in 405GP, 405GPR, 405EP, 440EP, 440GR and
+ * NP405H.
+ *
+ * We call MAL 2 the version in 440GP, 440GX, 440SP, 440SPE and Axon
+ *
+ * The driver expects a "version" property in the emac node containing
+ * a number 1 or 2. New device-trees for EMAC capable platforms are thus
+ * required to include that when porting to arch/powerpc.
+ */
+
+/* MALx DCR registers */
+#define MAL_CFG 0x00
+#define MAL_CFG_SR 0x80000000
+#define MAL_CFG_PLBB 0x00004000
+#define MAL_CFG_OPBBL 0x00000080
+#define MAL_CFG_EOPIE 0x00000004
+#define MAL_CFG_LEA 0x00000002
+#define MAL_CFG_SD 0x00000001
+
+/* MAL V1 CFG bits */
+#define MAL1_CFG_PLBP_MASK 0x00c00000
+#define MAL1_CFG_PLBP_10 0x00800000
+#define MAL1_CFG_GA 0x00200000
+#define MAL1_CFG_OA 0x00100000
+#define MAL1_CFG_PLBLE 0x00080000
+#define MAL1_CFG_PLBT_MASK 0x00078000
+#define MAL1_CFG_DEFAULT (MAL1_CFG_PLBP_10 | MAL1_CFG_PLBT_MASK)
+
+/* MAL V2 CFG bits */
+#define MAL2_CFG_RPP_MASK 0x00c00000
+#define MAL2_CFG_RPP_10 0x00800000
+#define MAL2_CFG_RMBS_MASK 0x00300000
+#define MAL2_CFG_WPP_MASK 0x000c0000
+#define MAL2_CFG_WPP_10 0x00080000
+#define MAL2_CFG_WMBS_MASK 0x00030000
+#define MAL2_CFG_PLBLE 0x00008000
+#define MAL2_CFG_DEFAULT (MAL2_CFG_RMBS_MASK | MAL2_CFG_WMBS_MASK | \
+ MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10)
+
+#define MAL_ESR 0x01
+#define MAL_ESR_EVB 0x80000000
+#define MAL_ESR_CIDT 0x40000000
+#define MAL_ESR_CID_MASK 0x3e000000
+#define MAL_ESR_CID_SHIFT 25
+#define MAL_ESR_DE 0x00100000
+#define MAL_ESR_OTE 0x00040000
+#define MAL_ESR_OSE 0x00020000
+#define MAL_ESR_PEIN 0x00010000
+#define MAL_ESR_DEI 0x00000010
+#define MAL_ESR_OTEI 0x00000004
+#define MAL_ESR_OSEI 0x00000002
+#define MAL_ESR_PBEI 0x00000001
+
+/* MAL V1 ESR bits */
+#define MAL1_ESR_ONE 0x00080000
+#define MAL1_ESR_ONEI 0x00000008
+
+/* MAL V2 ESR bits */
+#define MAL2_ESR_PTE 0x00800000
+#define MAL2_ESR_PRE 0x00400000
+#define MAL2_ESR_PWE 0x00200000
+#define MAL2_ESR_PTEI 0x00000080
+#define MAL2_ESR_PREI 0x00000040
+#define MAL2_ESR_PWEI 0x00000020
+
+
+#define MAL_IER 0x02
+#define MAL_IER_DE 0x00000010
+#define MAL_IER_OTE 0x00000004
+#define MAL_IER_OE 0x00000002
+#define MAL_IER_PE 0x00000001
+/* MAL V1 IER bits */
+#define MAL1_IER_NWE 0x00000008
+#define MAL1_IER_SOC_EVENTS MAL1_IER_NWE
+#define MAL1_IER_EVENTS (MAL1_IER_SOC_EVENTS | MAL_IER_DE | \
+ MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
+
+/* MAL V2 IER bits */
+#define MAL2_IER_PT 0x00000080
+#define MAL2_IER_PRE 0x00000040
+#define MAL2_IER_PWE 0x00000020
+#define MAL2_IER_SOC_EVENTS (MAL2_IER_PT | MAL2_IER_PRE | MAL2_IER_PWE)
+#define MAL2_IER_EVENTS (MAL2_IER_SOC_EVENTS | MAL_IER_DE | \
+ MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
+
+
+#define MAL_TXCASR 0x04
+#define MAL_TXCARR 0x05
+#define MAL_TXEOBISR 0x06
+#define MAL_TXDEIR 0x07
+#define MAL_RXCASR 0x10
+#define MAL_RXCARR 0x11
+#define MAL_RXEOBISR 0x12
+#define MAL_RXDEIR 0x13
+#define MAL_TXCTPR(n) ((n) + 0x20)
+#define MAL_RXCTPR(n) ((n) + 0x40)
+#define MAL_RCBS(n) ((n) + 0x60)
+
+/* In reality MAL can handle TX buffers up to 4095 bytes long,
+ * but this isn't a good round number :) --ebs
+ */
+#define MAL_MAX_TX_SIZE 4080
+#define MAL_MAX_RX_SIZE 4080
+
+static inline int mal_rx_size(int len)
+{
+ len = (len + 0xf) & ~0xf;
+ return len > MAL_MAX_RX_SIZE ? MAL_MAX_RX_SIZE : len;
+}
+
+static inline int mal_tx_chunks(int len)
+{
+ return (len + MAL_MAX_TX_SIZE - 1) / MAL_MAX_TX_SIZE;
+}
+
+#define MAL_CHAN_MASK(n) (0x80000000 >> (n))
+
+/* MAL Buffer Descriptor structure */
+struct mal_descriptor {
+ u16 ctrl; /* MAL / Commac status control bits */
+ u16 data_len; /* Max length is 4K-1 (12 bits) */
+ u32 data_ptr; /* pointer to actual data buffer */
+};
+
+/* the following defines are for the MadMAL status and control registers. */
+/* MADMAL transmit and receive status/control bits */
+#define MAL_RX_CTRL_EMPTY 0x8000
+#define MAL_RX_CTRL_WRAP 0x4000
+#define MAL_RX_CTRL_CM 0x2000
+#define MAL_RX_CTRL_LAST 0x1000
+#define MAL_RX_CTRL_FIRST 0x0800
+#define MAL_RX_CTRL_INTR 0x0400
+#define MAL_RX_CTRL_SINGLE (MAL_RX_CTRL_LAST | MAL_RX_CTRL_FIRST)
+#define MAL_IS_SINGLE_RX(ctrl) (((ctrl) & MAL_RX_CTRL_SINGLE) == MAL_RX_CTRL_SINGLE)
+
+#define MAL_TX_CTRL_READY 0x8000
+#define MAL_TX_CTRL_WRAP 0x4000
+#define MAL_TX_CTRL_CM 0x2000
+#define MAL_TX_CTRL_LAST 0x1000
+#define MAL_TX_CTRL_INTR 0x0400
+
+struct mal_commac_ops {
+ void (*poll_tx) (void *dev);
+ int (*poll_rx) (void *dev, int budget);
+ int (*peek_rx) (void *dev);
+ void (*rxde) (void *dev);
+};
+
+struct mal_commac {
+ struct mal_commac_ops *ops;
+ void *dev;
+ struct list_head poll_list;
+ long flags;
+#define MAL_COMMAC_RX_STOPPED 0
+#define MAL_COMMAC_POLL_DISABLED 1
+ u32 tx_chan_mask;
+ u32 rx_chan_mask;
+ struct list_head list;
+};
+
+struct mal_instance {
+ int version;
+ dcr_host_t dcr_host;
+
+ int num_tx_chans; /* Number of TX channels */
+ int num_rx_chans; /* Number of RX channels */
+ int txeob_irq; /* TX End Of Buffer IRQ */
+ int rxeob_irq; /* RX End Of Buffer IRQ */
+ int txde_irq; /* TX Descriptor Error IRQ */
+ int rxde_irq; /* RX Descriptor Error IRQ */
+ int serr_irq; /* MAL System Error IRQ */
+
+ struct list_head poll_list;
+ struct napi_struct napi;
+
+ struct list_head list;
+ u32 tx_chan_mask;
+ u32 rx_chan_mask;
+
+ dma_addr_t bd_dma;
+ struct mal_descriptor *bd_virt;
+
+ struct platform_device *ofdev;
+ int index;
+ spinlock_t lock;
+
+ struct net_device dummy_dev;
+
+ unsigned int features;
+};
+
+static inline u32 get_mal_dcrn(struct mal_instance *mal, int reg)
+{
+ return dcr_read(mal->dcr_host, reg);
+}
+
+static inline void set_mal_dcrn(struct mal_instance *mal, int reg, u32 val)
+{
+ dcr_write(mal->dcr_host, reg, val);
+}
+
+/* Features of various MAL implementations */
+
+/* Set if you have interrupt coalescing and you have to clear the SDR
+ * register for TXEOB and RXEOB interrupts to work
+ */
+#define MAL_FTR_CLEAR_ICINTSTAT 0x00000001
+
+/* Set if your MAL has SERR, TXDE, and RXDE OR'd into a single UIC
+ * interrupt
+ */
+#define MAL_FTR_COMMON_ERR_INT 0x00000002
+
+enum {
+ MAL_FTRS_ALWAYS = 0,
+
+ MAL_FTRS_POSSIBLE =
+#ifdef CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT
+ MAL_FTR_CLEAR_ICINTSTAT |
+#endif
+#ifdef CONFIG_IBM_EMAC_MAL_COMMON_ERR
+ MAL_FTR_COMMON_ERR_INT |
+#endif
+ 0,
+};
+
+static inline int mal_has_feature(struct mal_instance *dev,
+ unsigned long feature)
+{
+ return (MAL_FTRS_ALWAYS & feature) ||
+ (MAL_FTRS_POSSIBLE & dev->features & feature);
+}
+
+/* Register MAL devices */
+int mal_init(void);
+void mal_exit(void);
+
+int mal_register_commac(struct mal_instance *mal,
+ struct mal_commac *commac);
+void mal_unregister_commac(struct mal_instance *mal,
+ struct mal_commac *commac);
+int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size);
+
+/* Returns BD ring offset for a particular channel
+ (in 'struct mal_descriptor' elements)
+*/
+int mal_tx_bd_offset(struct mal_instance *mal, int channel);
+int mal_rx_bd_offset(struct mal_instance *mal, int channel);
+
+void mal_enable_tx_channel(struct mal_instance *mal, int channel);
+void mal_disable_tx_channel(struct mal_instance *mal, int channel);
+void mal_enable_rx_channel(struct mal_instance *mal, int channel);
+void mal_disable_rx_channel(struct mal_instance *mal, int channel);
+
+void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac);
+void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac);
+
+/* Add/remove EMAC to/from MAL polling list */
+void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac);
+void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac);
+
+/* Ethtool MAL registers */
+struct mal_regs {
+ u32 tx_count;
+ u32 rx_count;
+
+ u32 cfg;
+ u32 esr;
+ u32 ier;
+ u32 tx_casr;
+ u32 tx_carr;
+ u32 tx_eobisr;
+ u32 tx_deir;
+ u32 rx_casr;
+ u32 rx_carr;
+ u32 rx_eobisr;
+ u32 rx_deir;
+ u32 tx_ctpr[32];
+ u32 rx_ctpr[32];
+ u32 rcbs[32];
+};
+
+int mal_get_regs_len(struct mal_instance *mal);
+void *mal_dump_regs(struct mal_instance *mal, void *buf);
+
+#endif /* __IBM_NEWEMAC_MAL_H */
diff --git a/drivers/net/ethernet/ibm/emac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c
new file mode 100644
index 000000000000..ab4e5969fe65
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/phy.c
@@ -0,0 +1,541 @@
+/*
+ * drivers/net/ibm_newemac/phy.c
+ *
+ * Driver for PowerPC 4xx on-chip ethernet controller, PHY support.
+ * Borrowed from sungem_phy.c, though I only kept the generic MII
+ * driver for now.
+ *
+ * This file should be shared with other drivers or eventually
+ * merged as the "low level" part of miilib
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * Based on the arch/ppc version of the driver:
+ *
+ * (c) 2003, Benjamin Herrenscmidt (benh@kernel.crashing.org)
+ * (c) 2004-2005, Eugene Surovegin <ebs@ebshome.net>
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+
+#include "emac.h"
+#include "phy.h"
+
+#define phy_read _phy_read
+#define phy_write _phy_write
+
+static inline int _phy_read(struct mii_phy *phy, int reg)
+{
+ return phy->mdio_read(phy->dev, phy->address, reg);
+}
+
+static inline void _phy_write(struct mii_phy *phy, int reg, int val)
+{
+ phy->mdio_write(phy->dev, phy->address, reg, val);
+}
+
+static inline int gpcs_phy_read(struct mii_phy *phy, int reg)
+{
+ return phy->mdio_read(phy->dev, phy->gpcs_address, reg);
+}
+
+static inline void gpcs_phy_write(struct mii_phy *phy, int reg, int val)
+{
+ phy->mdio_write(phy->dev, phy->gpcs_address, reg, val);
+}
+
+int emac_mii_reset_phy(struct mii_phy *phy)
+{
+ int val;
+ int limit = 10000;
+
+ val = phy_read(phy, MII_BMCR);
+ val &= ~(BMCR_ISOLATE | BMCR_ANENABLE);
+ val |= BMCR_RESET;
+ phy_write(phy, MII_BMCR, val);
+
+ udelay(300);
+
+ while (--limit) {
+ val = phy_read(phy, MII_BMCR);
+ if (val >= 0 && (val & BMCR_RESET) == 0)
+ break;
+ udelay(10);
+ }
+ if ((val & BMCR_ISOLATE) && limit > 0)
+ phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
+
+ return limit <= 0;
+}
+
+int emac_mii_reset_gpcs(struct mii_phy *phy)
+{
+ int val;
+ int limit = 10000;
+
+ val = gpcs_phy_read(phy, MII_BMCR);
+ val &= ~(BMCR_ISOLATE | BMCR_ANENABLE);
+ val |= BMCR_RESET;
+ gpcs_phy_write(phy, MII_BMCR, val);
+
+ udelay(300);
+
+ while (--limit) {
+ val = gpcs_phy_read(phy, MII_BMCR);
+ if (val >= 0 && (val & BMCR_RESET) == 0)
+ break;
+ udelay(10);
+ }
+ if ((val & BMCR_ISOLATE) && limit > 0)
+ gpcs_phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
+
+ if (limit > 0 && phy->mode == PHY_MODE_SGMII) {
+ /* Configure GPCS interface to recommended setting for SGMII */
+ gpcs_phy_write(phy, 0x04, 0x8120); /* AsymPause, FDX */
+ gpcs_phy_write(phy, 0x07, 0x2801); /* msg_pg, toggle */
+ gpcs_phy_write(phy, 0x00, 0x0140); /* 1Gbps, FDX */
+ }
+
+ return limit <= 0;
+}
+
+static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
+{
+ int ctl, adv;
+
+ phy->autoneg = AUTONEG_ENABLE;
+ phy->speed = SPEED_10;
+ phy->duplex = DUPLEX_HALF;
+ phy->pause = phy->asym_pause = 0;
+ phy->advertising = advertise;
+
+ ctl = phy_read(phy, MII_BMCR);
+ if (ctl < 0)
+ return ctl;
+ ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
+
+ /* First clear the PHY */
+ phy_write(phy, MII_BMCR, ctl);
+
+ /* Setup standard advertise */
+ adv = phy_read(phy, MII_ADVERTISE);
+ if (adv < 0)
+ return adv;
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
+ ADVERTISE_PAUSE_ASYM);
+ if (advertise & ADVERTISED_10baseT_Half)
+ adv |= ADVERTISE_10HALF;
+ if (advertise & ADVERTISED_10baseT_Full)
+ adv |= ADVERTISE_10FULL;
+ if (advertise & ADVERTISED_100baseT_Half)
+ adv |= ADVERTISE_100HALF;
+ if (advertise & ADVERTISED_100baseT_Full)
+ adv |= ADVERTISE_100FULL;
+ if (advertise & ADVERTISED_Pause)
+ adv |= ADVERTISE_PAUSE_CAP;
+ if (advertise & ADVERTISED_Asym_Pause)
+ adv |= ADVERTISE_PAUSE_ASYM;
+ phy_write(phy, MII_ADVERTISE, adv);
+
+ if (phy->features &
+ (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) {
+ adv = phy_read(phy, MII_CTRL1000);
+ if (adv < 0)
+ return adv;
+ adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+ if (advertise & ADVERTISED_1000baseT_Full)
+ adv |= ADVERTISE_1000FULL;
+ if (advertise & ADVERTISED_1000baseT_Half)
+ adv |= ADVERTISE_1000HALF;
+ phy_write(phy, MII_CTRL1000, adv);
+ }
+
+ /* Start/Restart aneg */
+ ctl = phy_read(phy, MII_BMCR);
+ ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ phy_write(phy, MII_BMCR, ctl);
+
+ return 0;
+}
+
+static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
+{
+ int ctl;
+
+ phy->autoneg = AUTONEG_DISABLE;
+ phy->speed = speed;
+ phy->duplex = fd;
+ phy->pause = phy->asym_pause = 0;
+
+ ctl = phy_read(phy, MII_BMCR);
+ if (ctl < 0)
+ return ctl;
+ ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
+
+ /* First clear the PHY */
+ phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
+
+ /* Select speed & duplex */
+ switch (speed) {
+ case SPEED_10:
+ break;
+ case SPEED_100:
+ ctl |= BMCR_SPEED100;
+ break;
+ case SPEED_1000:
+ ctl |= BMCR_SPEED1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (fd == DUPLEX_FULL)
+ ctl |= BMCR_FULLDPLX;
+ phy_write(phy, MII_BMCR, ctl);
+
+ return 0;
+}
+
+static int genmii_poll_link(struct mii_phy *phy)
+{
+ int status;
+
+ /* Clear latched value with dummy read */
+ phy_read(phy, MII_BMSR);
+ status = phy_read(phy, MII_BMSR);
+ if (status < 0 || (status & BMSR_LSTATUS) == 0)
+ return 0;
+ if (phy->autoneg == AUTONEG_ENABLE && !(status & BMSR_ANEGCOMPLETE))
+ return 0;
+ return 1;
+}
+
+static int genmii_read_link(struct mii_phy *phy)
+{
+ if (phy->autoneg == AUTONEG_ENABLE) {
+ int glpa = 0;
+ int lpa = phy_read(phy, MII_LPA) & phy_read(phy, MII_ADVERTISE);
+ if (lpa < 0)
+ return lpa;
+
+ if (phy->features &
+ (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) {
+ int adv = phy_read(phy, MII_CTRL1000);
+ glpa = phy_read(phy, MII_STAT1000);
+
+ if (glpa < 0 || adv < 0)
+ return adv;
+
+ glpa &= adv << 2;
+ }
+
+ phy->speed = SPEED_10;
+ phy->duplex = DUPLEX_HALF;
+ phy->pause = phy->asym_pause = 0;
+
+ if (glpa & (LPA_1000FULL | LPA_1000HALF)) {
+ phy->speed = SPEED_1000;
+ if (glpa & LPA_1000FULL)
+ phy->duplex = DUPLEX_FULL;
+ } else if (lpa & (LPA_100FULL | LPA_100HALF)) {
+ phy->speed = SPEED_100;
+ if (lpa & LPA_100FULL)
+ phy->duplex = DUPLEX_FULL;
+ } else if (lpa & LPA_10FULL)
+ phy->duplex = DUPLEX_FULL;
+
+ if (phy->duplex == DUPLEX_FULL) {
+ phy->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
+ phy->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
+ }
+ } else {
+ int bmcr = phy_read(phy, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
+
+ if (bmcr & BMCR_FULLDPLX)
+ phy->duplex = DUPLEX_FULL;
+ else
+ phy->duplex = DUPLEX_HALF;
+ if (bmcr & BMCR_SPEED1000)
+ phy->speed = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ phy->speed = SPEED_100;
+ else
+ phy->speed = SPEED_10;
+
+ phy->pause = phy->asym_pause = 0;
+ }
+ return 0;
+}
+
+/* Generic implementation for most 10/100/1000 PHYs */
+static struct mii_phy_ops generic_phy_ops = {
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = genmii_read_link
+};
+
+static struct mii_phy_def genmii_phy_def = {
+ .phy_id = 0x00000000,
+ .phy_id_mask = 0x00000000,
+ .name = "Generic MII",
+ .ops = &generic_phy_ops
+};
+
+/* CIS8201 */
+#define MII_CIS8201_10BTCSR 0x16
+#define TENBTCSR_ECHO_DISABLE 0x2000
+#define MII_CIS8201_EPCR 0x17
+#define EPCR_MODE_MASK 0x3000
+#define EPCR_GMII_MODE 0x0000
+#define EPCR_RGMII_MODE 0x1000
+#define EPCR_TBI_MODE 0x2000
+#define EPCR_RTBI_MODE 0x3000
+#define MII_CIS8201_ACSR 0x1c
+#define ACSR_PIN_PRIO_SELECT 0x0004
+
+static int cis8201_init(struct mii_phy *phy)
+{
+ int epcr;
+
+ epcr = phy_read(phy, MII_CIS8201_EPCR);
+ if (epcr < 0)
+ return epcr;
+
+ epcr &= ~EPCR_MODE_MASK;
+
+ switch (phy->mode) {
+ case PHY_MODE_TBI:
+ epcr |= EPCR_TBI_MODE;
+ break;
+ case PHY_MODE_RTBI:
+ epcr |= EPCR_RTBI_MODE;
+ break;
+ case PHY_MODE_GMII:
+ epcr |= EPCR_GMII_MODE;
+ break;
+ case PHY_MODE_RGMII:
+ default:
+ epcr |= EPCR_RGMII_MODE;
+ }
+
+ phy_write(phy, MII_CIS8201_EPCR, epcr);
+
+ /* MII regs override strap pins */
+ phy_write(phy, MII_CIS8201_ACSR,
+ phy_read(phy, MII_CIS8201_ACSR) | ACSR_PIN_PRIO_SELECT);
+
+ /* Disable TX_EN -> CRS echo mode, otherwise 10/HDX doesn't work */
+ phy_write(phy, MII_CIS8201_10BTCSR,
+ phy_read(phy, MII_CIS8201_10BTCSR) | TENBTCSR_ECHO_DISABLE);
+
+ return 0;
+}
+
+static struct mii_phy_ops cis8201_phy_ops = {
+ .init = cis8201_init,
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = genmii_read_link
+};
+
+static struct mii_phy_def cis8201_phy_def = {
+ .phy_id = 0x000fc410,
+ .phy_id_mask = 0x000ffff0,
+ .name = "CIS8201 Gigabit Ethernet",
+ .ops = &cis8201_phy_ops
+};
+
+static struct mii_phy_def bcm5248_phy_def = {
+
+ .phy_id = 0x0143bc00,
+ .phy_id_mask = 0x0ffffff0,
+ .name = "BCM5248 10/100 SMII Ethernet",
+ .ops = &generic_phy_ops
+};
+
+static int m88e1111_init(struct mii_phy *phy)
+{
+ pr_debug("%s: Marvell 88E1111 Ethernet\n", __func__);
+ phy_write(phy, 0x14, 0x0ce3);
+ phy_write(phy, 0x18, 0x4101);
+ phy_write(phy, 0x09, 0x0e00);
+ phy_write(phy, 0x04, 0x01e1);
+ phy_write(phy, 0x00, 0x9140);
+ phy_write(phy, 0x00, 0x1140);
+
+ return 0;
+}
+
+static int m88e1112_init(struct mii_phy *phy)
+{
+ /*
+ * Marvell 88E1112 PHY needs to have the SGMII MAC
+ * interace (page 2) properly configured to
+ * communicate with the 460EX/GT GPCS interface.
+ */
+
+ u16 reg_short;
+
+ pr_debug("%s: Marvell 88E1112 Ethernet\n", __func__);
+
+ /* Set access to Page 2 */
+ phy_write(phy, 0x16, 0x0002);
+
+ phy_write(phy, 0x00, 0x0040); /* 1Gbps */
+ reg_short = (u16)(phy_read(phy, 0x1a));
+ reg_short |= 0x8000; /* bypass Auto-Negotiation */
+ phy_write(phy, 0x1a, reg_short);
+ emac_mii_reset_phy(phy); /* reset MAC interface */
+
+ /* Reset access to Page 0 */
+ phy_write(phy, 0x16, 0x0000);
+
+ return 0;
+}
+
+static int et1011c_init(struct mii_phy *phy)
+{
+ u16 reg_short;
+
+ reg_short = (u16)(phy_read(phy, 0x16));
+ reg_short &= ~(0x7);
+ reg_short |= 0x6; /* RGMII Trace Delay*/
+ phy_write(phy, 0x16, reg_short);
+
+ reg_short = (u16)(phy_read(phy, 0x17));
+ reg_short &= ~(0x40);
+ phy_write(phy, 0x17, reg_short);
+
+ phy_write(phy, 0x1c, 0x74f0);
+ return 0;
+}
+
+static struct mii_phy_ops et1011c_phy_ops = {
+ .init = et1011c_init,
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = genmii_read_link
+};
+
+static struct mii_phy_def et1011c_phy_def = {
+ .phy_id = 0x0282f000,
+ .phy_id_mask = 0x0fffff00,
+ .name = "ET1011C Gigabit Ethernet",
+ .ops = &et1011c_phy_ops
+};
+
+
+
+
+
+static struct mii_phy_ops m88e1111_phy_ops = {
+ .init = m88e1111_init,
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = genmii_read_link
+};
+
+static struct mii_phy_def m88e1111_phy_def = {
+
+ .phy_id = 0x01410CC0,
+ .phy_id_mask = 0x0ffffff0,
+ .name = "Marvell 88E1111 Ethernet",
+ .ops = &m88e1111_phy_ops,
+};
+
+static struct mii_phy_ops m88e1112_phy_ops = {
+ .init = m88e1112_init,
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = genmii_read_link
+};
+
+static struct mii_phy_def m88e1112_phy_def = {
+ .phy_id = 0x01410C90,
+ .phy_id_mask = 0x0ffffff0,
+ .name = "Marvell 88E1112 Ethernet",
+ .ops = &m88e1112_phy_ops,
+};
+
+static struct mii_phy_def *mii_phy_table[] = {
+ &et1011c_phy_def,
+ &cis8201_phy_def,
+ &bcm5248_phy_def,
+ &m88e1111_phy_def,
+ &m88e1112_phy_def,
+ &genmii_phy_def,
+ NULL
+};
+
+int emac_mii_phy_probe(struct mii_phy *phy, int address)
+{
+ struct mii_phy_def *def;
+ int i;
+ u32 id;
+
+ phy->autoneg = AUTONEG_DISABLE;
+ phy->advertising = 0;
+ phy->address = address;
+ phy->speed = SPEED_10;
+ phy->duplex = DUPLEX_HALF;
+ phy->pause = phy->asym_pause = 0;
+
+ /* Take PHY out of isolate mode and reset it. */
+ if (emac_mii_reset_phy(phy))
+ return -ENODEV;
+
+ /* Read ID and find matching entry */
+ id = (phy_read(phy, MII_PHYSID1) << 16) | phy_read(phy, MII_PHYSID2);
+ for (i = 0; (def = mii_phy_table[i]) != NULL; i++)
+ if ((id & def->phy_id_mask) == def->phy_id)
+ break;
+ /* Should never be NULL (we have a generic entry), but... */
+ if (!def)
+ return -ENODEV;
+
+ phy->def = def;
+
+ /* Determine PHY features if needed */
+ phy->features = def->features;
+ if (!phy->features) {
+ u16 bmsr = phy_read(phy, MII_BMSR);
+ if (bmsr & BMSR_ANEGCAPABLE)
+ phy->features |= SUPPORTED_Autoneg;
+ if (bmsr & BMSR_10HALF)
+ phy->features |= SUPPORTED_10baseT_Half;
+ if (bmsr & BMSR_10FULL)
+ phy->features |= SUPPORTED_10baseT_Full;
+ if (bmsr & BMSR_100HALF)
+ phy->features |= SUPPORTED_100baseT_Half;
+ if (bmsr & BMSR_100FULL)
+ phy->features |= SUPPORTED_100baseT_Full;
+ if (bmsr & BMSR_ESTATEN) {
+ u16 esr = phy_read(phy, MII_ESTATUS);
+ if (esr & ESTATUS_1000_TFULL)
+ phy->features |= SUPPORTED_1000baseT_Full;
+ if (esr & ESTATUS_1000_THALF)
+ phy->features |= SUPPORTED_1000baseT_Half;
+ }
+ phy->features |= SUPPORTED_MII;
+ }
+
+ /* Setup default advertising */
+ phy->advertising = phy->features;
+
+ return 0;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ibm/emac/phy.h b/drivers/net/ethernet/ibm/emac/phy.h
new file mode 100644
index 000000000000..5d2bf4cbe50b
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/phy.h
@@ -0,0 +1,87 @@
+/*
+ * drivers/net/ibm_newemac/phy.h
+ *
+ * Driver for PowerPC 4xx on-chip ethernet controller, PHY support
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * Based on the arch/ppc version of the driver:
+ *
+ * Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * February 2003
+ *
+ * Minor additions by Eugene Surovegin <ebs@ebshome.net>, 2004
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file basically duplicates sungem_phy.{c,h} with different PHYs
+ * supported. I'm looking into merging that in a single mii layer more
+ * flexible than mii.c
+ */
+
+#ifndef __IBM_NEWEMAC_PHY_H
+#define __IBM_NEWEMAC_PHY_H
+
+struct mii_phy;
+
+/* Operations supported by any kind of PHY */
+struct mii_phy_ops {
+ int (*init) (struct mii_phy * phy);
+ int (*suspend) (struct mii_phy * phy, int wol_options);
+ int (*setup_aneg) (struct mii_phy * phy, u32 advertise);
+ int (*setup_forced) (struct mii_phy * phy, int speed, int fd);
+ int (*poll_link) (struct mii_phy * phy);
+ int (*read_link) (struct mii_phy * phy);
+};
+
+/* Structure used to statically define an mii/gii based PHY */
+struct mii_phy_def {
+ u32 phy_id; /* Concatenated ID1 << 16 | ID2 */
+ u32 phy_id_mask; /* Significant bits */
+ u32 features; /* Ethtool SUPPORTED_* defines or
+ 0 for autodetect */
+ int magic_aneg; /* Autoneg does all speed test for us */
+ const char *name;
+ const struct mii_phy_ops *ops;
+};
+
+/* An instance of a PHY, partially borrowed from mii_if_info */
+struct mii_phy {
+ struct mii_phy_def *def;
+ u32 advertising; /* Ethtool ADVERTISED_* defines */
+ u32 features; /* Copied from mii_phy_def.features
+ or determined automaticaly */
+ int address; /* PHY address */
+ int mode; /* PHY mode */
+ int gpcs_address; /* GPCS PHY address */
+
+ /* 1: autoneg enabled, 0: disabled */
+ int autoneg;
+
+ /* forced speed & duplex (no autoneg)
+ * partner speed & duplex & pause (autoneg)
+ */
+ int speed;
+ int duplex;
+ int pause;
+ int asym_pause;
+
+ /* Provided by host chip */
+ struct net_device *dev;
+ int (*mdio_read) (struct net_device * dev, int addr, int reg);
+ void (*mdio_write) (struct net_device * dev, int addr, int reg,
+ int val);
+};
+
+/* Pass in a struct mii_phy with dev, mdio_read and mdio_write
+ * filled, the remaining fields will be filled on return
+ */
+int emac_mii_phy_probe(struct mii_phy *phy, int address);
+int emac_mii_reset_phy(struct mii_phy *phy);
+int emac_mii_reset_gpcs(struct mii_phy *phy);
+
+#endif /* __IBM_NEWEMAC_PHY_H */
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
new file mode 100644
index 000000000000..4fa53f3def64
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -0,0 +1,338 @@
+/*
+ * drivers/net/ibm_newemac/rgmii.c
+ *
+ * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support.
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * Based on the arch/ppc version of the driver:
+ *
+ * Copyright (c) 2004, 2005 Zultys Technologies.
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ *
+ * Based on original work by
+ * Matt Porter <mporter@kernel.crashing.org>
+ * Copyright 2004 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/ethtool.h>
+#include <asm/io.h>
+
+#include "emac.h"
+#include "debug.h"
+
+// XXX FIXME: Axon seems to support a subset of the RGMII, we
+// thus need to take that into account and possibly change some
+// of the bit settings below that don't seem to quite match the
+// AXON spec
+
+/* RGMIIx_FER */
+#define RGMII_FER_MASK(idx) (0x7 << ((idx) * 4))
+#define RGMII_FER_RTBI(idx) (0x4 << ((idx) * 4))
+#define RGMII_FER_RGMII(idx) (0x5 << ((idx) * 4))
+#define RGMII_FER_TBI(idx) (0x6 << ((idx) * 4))
+#define RGMII_FER_GMII(idx) (0x7 << ((idx) * 4))
+#define RGMII_FER_MII(idx) RGMII_FER_GMII(idx)
+
+/* RGMIIx_SSR */
+#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8))
+#define RGMII_SSR_100(idx) (0x2 << ((idx) * 8))
+#define RGMII_SSR_1000(idx) (0x4 << ((idx) * 8))
+
+/* RGMII bridge supports only GMII/TBI and RGMII/RTBI PHYs */
+static inline int rgmii_valid_mode(int phy_mode)
+{
+ return phy_mode == PHY_MODE_GMII ||
+ phy_mode == PHY_MODE_MII ||
+ phy_mode == PHY_MODE_RGMII ||
+ phy_mode == PHY_MODE_TBI ||
+ phy_mode == PHY_MODE_RTBI;
+}
+
+static inline const char *rgmii_mode_name(int mode)
+{
+ switch (mode) {
+ case PHY_MODE_RGMII:
+ return "RGMII";
+ case PHY_MODE_TBI:
+ return "TBI";
+ case PHY_MODE_GMII:
+ return "GMII";
+ case PHY_MODE_MII:
+ return "MII";
+ case PHY_MODE_RTBI:
+ return "RTBI";
+ default:
+ BUG();
+ }
+}
+
+static inline u32 rgmii_mode_mask(int mode, int input)
+{
+ switch (mode) {
+ case PHY_MODE_RGMII:
+ return RGMII_FER_RGMII(input);
+ case PHY_MODE_TBI:
+ return RGMII_FER_TBI(input);
+ case PHY_MODE_GMII:
+ return RGMII_FER_GMII(input);
+ case PHY_MODE_MII:
+ return RGMII_FER_MII(input);
+ case PHY_MODE_RTBI:
+ return RGMII_FER_RTBI(input);
+ default:
+ BUG();
+ }
+}
+
+int __devinit rgmii_attach(struct platform_device *ofdev, int input, int mode)
+{
+ struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+ struct rgmii_regs __iomem *p = dev->base;
+
+ RGMII_DBG(dev, "attach(%d)" NL, input);
+
+ /* Check if we need to attach to a RGMII */
+ if (input < 0 || !rgmii_valid_mode(mode)) {
+ printk(KERN_ERR "%s: unsupported settings !\n",
+ ofdev->dev.of_node->full_name);
+ return -ENODEV;
+ }
+
+ mutex_lock(&dev->lock);
+
+ /* Enable this input */
+ out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input));
+
+ printk(KERN_NOTICE "%s: input %d in %s mode\n",
+ ofdev->dev.of_node->full_name, input, rgmii_mode_name(mode));
+
+ ++dev->users;
+
+ mutex_unlock(&dev->lock);
+
+ return 0;
+}
+
+void rgmii_set_speed(struct platform_device *ofdev, int input, int speed)
+{
+ struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+ struct rgmii_regs __iomem *p = dev->base;
+ u32 ssr;
+
+ mutex_lock(&dev->lock);
+
+ ssr = in_be32(&p->ssr) & ~RGMII_SSR_MASK(input);
+
+ RGMII_DBG(dev, "speed(%d, %d)" NL, input, speed);
+
+ if (speed == SPEED_1000)
+ ssr |= RGMII_SSR_1000(input);
+ else if (speed == SPEED_100)
+ ssr |= RGMII_SSR_100(input);
+
+ out_be32(&p->ssr, ssr);
+
+ mutex_unlock(&dev->lock);
+}
+
+void rgmii_get_mdio(struct platform_device *ofdev, int input)
+{
+ struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+ struct rgmii_regs __iomem *p = dev->base;
+ u32 fer;
+
+ RGMII_DBG2(dev, "get_mdio(%d)" NL, input);
+
+ if (!(dev->flags & EMAC_RGMII_FLAG_HAS_MDIO))
+ return;
+
+ mutex_lock(&dev->lock);
+
+ fer = in_be32(&p->fer);
+ fer |= 0x00080000u >> input;
+ out_be32(&p->fer, fer);
+ (void)in_be32(&p->fer);
+
+ DBG2(dev, " fer = 0x%08x\n", fer);
+}
+
+void rgmii_put_mdio(struct platform_device *ofdev, int input)
+{
+ struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+ struct rgmii_regs __iomem *p = dev->base;
+ u32 fer;
+
+ RGMII_DBG2(dev, "put_mdio(%d)" NL, input);
+
+ if (!(dev->flags & EMAC_RGMII_FLAG_HAS_MDIO))
+ return;
+
+ fer = in_be32(&p->fer);
+ fer &= ~(0x00080000u >> input);
+ out_be32(&p->fer, fer);
+ (void)in_be32(&p->fer);
+
+ DBG2(dev, " fer = 0x%08x\n", fer);
+
+ mutex_unlock(&dev->lock);
+}
+
+void rgmii_detach(struct platform_device *ofdev, int input)
+{
+ struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+ struct rgmii_regs __iomem *p;
+
+ BUG_ON(!dev || dev->users == 0);
+ p = dev->base;
+
+ mutex_lock(&dev->lock);
+
+ RGMII_DBG(dev, "detach(%d)" NL, input);
+
+ /* Disable this input */
+ out_be32(&p->fer, in_be32(&p->fer) & ~RGMII_FER_MASK(input));
+
+ --dev->users;
+
+ mutex_unlock(&dev->lock);
+}
+
+int rgmii_get_regs_len(struct platform_device *ofdev)
+{
+ return sizeof(struct emac_ethtool_regs_subhdr) +
+ sizeof(struct rgmii_regs);
+}
+
+void *rgmii_dump_regs(struct platform_device *ofdev, void *buf)
+{
+ struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+ struct emac_ethtool_regs_subhdr *hdr = buf;
+ struct rgmii_regs *regs = (struct rgmii_regs *)(hdr + 1);
+
+ hdr->version = 0;
+ hdr->index = 0; /* for now, are there chips with more than one
+ * rgmii ? if yes, then we'll add a cell_index
+ * like we do for emac
+ */
+ memcpy_fromio(regs, dev->base, sizeof(struct rgmii_regs));
+ return regs + 1;
+}
+
+
+static int __devinit rgmii_probe(struct platform_device *ofdev)
+{
+ struct device_node *np = ofdev->dev.of_node;
+ struct rgmii_instance *dev;
+ struct resource regs;
+ int rc;
+
+ rc = -ENOMEM;
+ dev = kzalloc(sizeof(struct rgmii_instance), GFP_KERNEL);
+ if (dev == NULL) {
+ printk(KERN_ERR "%s: could not allocate RGMII device!\n",
+ np->full_name);
+ goto err_gone;
+ }
+
+ mutex_init(&dev->lock);
+ dev->ofdev = ofdev;
+
+ rc = -ENXIO;
+ if (of_address_to_resource(np, 0, &regs)) {
+ printk(KERN_ERR "%s: Can't get registers address\n",
+ np->full_name);
+ goto err_free;
+ }
+
+ rc = -ENOMEM;
+ dev->base = (struct rgmii_regs __iomem *)ioremap(regs.start,
+ sizeof(struct rgmii_regs));
+ if (dev->base == NULL) {
+ printk(KERN_ERR "%s: Can't map device registers!\n",
+ np->full_name);
+ goto err_free;
+ }
+
+ /* Check for RGMII flags */
+ if (of_get_property(ofdev->dev.of_node, "has-mdio", NULL))
+ dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO;
+
+ /* CAB lacks the right properties, fix this up */
+ if (of_device_is_compatible(ofdev->dev.of_node, "ibm,rgmii-axon"))
+ dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO;
+
+ DBG2(dev, " Boot FER = 0x%08x, SSR = 0x%08x\n",
+ in_be32(&dev->base->fer), in_be32(&dev->base->ssr));
+
+ /* Disable all inputs by default */
+ out_be32(&dev->base->fer, 0);
+
+ printk(KERN_INFO
+ "RGMII %s initialized with%s MDIO support\n",
+ ofdev->dev.of_node->full_name,
+ (dev->flags & EMAC_RGMII_FLAG_HAS_MDIO) ? "" : "out");
+
+ wmb();
+ dev_set_drvdata(&ofdev->dev, dev);
+
+ return 0;
+
+ err_free:
+ kfree(dev);
+ err_gone:
+ return rc;
+}
+
+static int __devexit rgmii_remove(struct platform_device *ofdev)
+{
+ struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+
+ dev_set_drvdata(&ofdev->dev, NULL);
+
+ WARN_ON(dev->users != 0);
+
+ iounmap(dev->base);
+ kfree(dev);
+
+ return 0;
+}
+
+static struct of_device_id rgmii_match[] =
+{
+ {
+ .compatible = "ibm,rgmii",
+ },
+ {
+ .type = "emac-rgmii",
+ },
+ {},
+};
+
+static struct platform_driver rgmii_driver = {
+ .driver = {
+ .name = "emac-rgmii",
+ .owner = THIS_MODULE,
+ .of_match_table = rgmii_match,
+ },
+ .probe = rgmii_probe,
+ .remove = rgmii_remove,
+};
+
+int __init rgmii_init(void)
+{
+ return platform_driver_register(&rgmii_driver);
+}
+
+void rgmii_exit(void)
+{
+ platform_driver_unregister(&rgmii_driver);
+}
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.h b/drivers/net/ethernet/ibm/emac/rgmii.h
new file mode 100644
index 000000000000..9296b6c5f920
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/rgmii.h
@@ -0,0 +1,82 @@
+/*
+ * drivers/net/ibm_newemac/rgmii.h
+ *
+ * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support.
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * Based on the arch/ppc version of the driver:
+ *
+ * Based on ocp_zmii.h/ibm_emac_zmii.h
+ * Armin Kuster akuster@mvista.com
+ *
+ * Copyright 2004 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * Copyright (c) 2004, 2005 Zultys Technologies.
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __IBM_NEWEMAC_RGMII_H
+#define __IBM_NEWEMAC_RGMII_H
+
+/* RGMII bridge type */
+#define RGMII_STANDARD 0
+#define RGMII_AXON 1
+
+/* RGMII bridge */
+struct rgmii_regs {
+ u32 fer; /* Function enable register */
+ u32 ssr; /* Speed select register */
+};
+
+/* RGMII device */
+struct rgmii_instance {
+ struct rgmii_regs __iomem *base;
+
+ /* RGMII bridge flags */
+ int flags;
+#define EMAC_RGMII_FLAG_HAS_MDIO 0x00000001
+
+ /* Only one EMAC whacks us at a time */
+ struct mutex lock;
+
+ /* number of EMACs using this RGMII bridge */
+ int users;
+
+ /* OF device instance */
+ struct platform_device *ofdev;
+};
+
+#ifdef CONFIG_IBM_EMAC_RGMII
+
+extern int rgmii_init(void);
+extern void rgmii_exit(void);
+extern int rgmii_attach(struct platform_device *ofdev, int input, int mode);
+extern void rgmii_detach(struct platform_device *ofdev, int input);
+extern void rgmii_get_mdio(struct platform_device *ofdev, int input);
+extern void rgmii_put_mdio(struct platform_device *ofdev, int input);
+extern void rgmii_set_speed(struct platform_device *ofdev, int input, int speed);
+extern int rgmii_get_regs_len(struct platform_device *ofdev);
+extern void *rgmii_dump_regs(struct platform_device *ofdev, void *buf);
+
+#else
+
+# define rgmii_init() 0
+# define rgmii_exit() do { } while(0)
+# define rgmii_attach(x,y,z) (-ENXIO)
+# define rgmii_detach(x,y) do { } while(0)
+# define rgmii_get_mdio(o,i) do { } while (0)
+# define rgmii_put_mdio(o,i) do { } while (0)
+# define rgmii_set_speed(x,y,z) do { } while(0)
+# define rgmii_get_regs_len(x) 0
+# define rgmii_dump_regs(x,buf) (buf)
+#endif /* !CONFIG_IBM_EMAC_RGMII */
+
+#endif /* __IBM_NEWEMAC_RGMII_H */
diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c
new file mode 100644
index 000000000000..5f51bf7c9dc5
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/tah.c
@@ -0,0 +1,185 @@
+/*
+ * drivers/net/ibm_newemac/tah.c
+ *
+ * Driver for PowerPC 4xx on-chip ethernet controller, TAH support.
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * Based on the arch/ppc version of the driver:
+ *
+ * Copyright 2004 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * Copyright (c) 2005 Eugene Surovegin <ebs@ebshome.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <asm/io.h>
+
+#include "emac.h"
+#include "core.h"
+
+int __devinit tah_attach(struct platform_device *ofdev, int channel)
+{
+ struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
+
+ mutex_lock(&dev->lock);
+ /* Reset has been done at probe() time... nothing else to do for now */
+ ++dev->users;
+ mutex_unlock(&dev->lock);
+
+ return 0;
+}
+
+void tah_detach(struct platform_device *ofdev, int channel)
+{
+ struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
+
+ mutex_lock(&dev->lock);
+ --dev->users;
+ mutex_unlock(&dev->lock);
+}
+
+void tah_reset(struct platform_device *ofdev)
+{
+ struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
+ struct tah_regs __iomem *p = dev->base;
+ int n;
+
+ /* Reset TAH */
+ out_be32(&p->mr, TAH_MR_SR);
+ n = 100;
+ while ((in_be32(&p->mr) & TAH_MR_SR) && n)
+ --n;
+
+ if (unlikely(!n))
+ printk(KERN_ERR "%s: reset timeout\n",
+ ofdev->dev.of_node->full_name);
+
+ /* 10KB TAH TX FIFO accommodates the max MTU of 9000 */
+ out_be32(&p->mr,
+ TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP |
+ TAH_MR_DIG);
+}
+
+int tah_get_regs_len(struct platform_device *ofdev)
+{
+ return sizeof(struct emac_ethtool_regs_subhdr) +
+ sizeof(struct tah_regs);
+}
+
+void *tah_dump_regs(struct platform_device *ofdev, void *buf)
+{
+ struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
+ struct emac_ethtool_regs_subhdr *hdr = buf;
+ struct tah_regs *regs = (struct tah_regs *)(hdr + 1);
+
+ hdr->version = 0;
+ hdr->index = 0; /* for now, are there chips with more than one
+ * zmii ? if yes, then we'll add a cell_index
+ * like we do for emac
+ */
+ memcpy_fromio(regs, dev->base, sizeof(struct tah_regs));
+ return regs + 1;
+}
+
+static int __devinit tah_probe(struct platform_device *ofdev)
+{
+ struct device_node *np = ofdev->dev.of_node;
+ struct tah_instance *dev;
+ struct resource regs;
+ int rc;
+
+ rc = -ENOMEM;
+ dev = kzalloc(sizeof(struct tah_instance), GFP_KERNEL);
+ if (dev == NULL) {
+ printk(KERN_ERR "%s: could not allocate TAH device!\n",
+ np->full_name);
+ goto err_gone;
+ }
+
+ mutex_init(&dev->lock);
+ dev->ofdev = ofdev;
+
+ rc = -ENXIO;
+ if (of_address_to_resource(np, 0, &regs)) {
+ printk(KERN_ERR "%s: Can't get registers address\n",
+ np->full_name);
+ goto err_free;
+ }
+
+ rc = -ENOMEM;
+ dev->base = (struct tah_regs __iomem *)ioremap(regs.start,
+ sizeof(struct tah_regs));
+ if (dev->base == NULL) {
+ printk(KERN_ERR "%s: Can't map device registers!\n",
+ np->full_name);
+ goto err_free;
+ }
+
+ dev_set_drvdata(&ofdev->dev, dev);
+
+ /* Initialize TAH and enable IPv4 checksum verification, no TSO yet */
+ tah_reset(ofdev);
+
+ printk(KERN_INFO
+ "TAH %s initialized\n", ofdev->dev.of_node->full_name);
+ wmb();
+
+ return 0;
+
+ err_free:
+ kfree(dev);
+ err_gone:
+ return rc;
+}
+
+static int __devexit tah_remove(struct platform_device *ofdev)
+{
+ struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
+
+ dev_set_drvdata(&ofdev->dev, NULL);
+
+ WARN_ON(dev->users != 0);
+
+ iounmap(dev->base);
+ kfree(dev);
+
+ return 0;
+}
+
+static struct of_device_id tah_match[] =
+{
+ {
+ .compatible = "ibm,tah",
+ },
+ /* For backward compat with old DT */
+ {
+ .type = "tah",
+ },
+ {},
+};
+
+static struct platform_driver tah_driver = {
+ .driver = {
+ .name = "emac-tah",
+ .owner = THIS_MODULE,
+ .of_match_table = tah_match,
+ },
+ .probe = tah_probe,
+ .remove = tah_remove,
+};
+
+int __init tah_init(void)
+{
+ return platform_driver_register(&tah_driver);
+}
+
+void tah_exit(void)
+{
+ platform_driver_unregister(&tah_driver);
+}
diff --git a/drivers/net/ethernet/ibm/emac/tah.h b/drivers/net/ethernet/ibm/emac/tah.h
new file mode 100644
index 000000000000..3437ab4964c7
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/tah.h
@@ -0,0 +1,95 @@
+/*
+ * drivers/net/ibm_newemac/tah.h
+ *
+ * Driver for PowerPC 4xx on-chip ethernet controller, TAH support.
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * Based on the arch/ppc version of the driver:
+ *
+ * Copyright 2004 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * Copyright (c) 2005 Eugene Surovegin <ebs@ebshome.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __IBM_NEWEMAC_TAH_H
+#define __IBM_NEWEMAC_TAH_H
+
+/* TAH */
+struct tah_regs {
+ u32 revid;
+ u32 pad[3];
+ u32 mr;
+ u32 ssr0;
+ u32 ssr1;
+ u32 ssr2;
+ u32 ssr3;
+ u32 ssr4;
+ u32 ssr5;
+ u32 tsr;
+};
+
+
+/* TAH device */
+struct tah_instance {
+ struct tah_regs __iomem *base;
+
+ /* Only one EMAC whacks us at a time */
+ struct mutex lock;
+
+ /* number of EMACs using this TAH */
+ int users;
+
+ /* OF device instance */
+ struct platform_device *ofdev;
+};
+
+
+/* TAH engine */
+#define TAH_MR_CVR 0x80000000
+#define TAH_MR_SR 0x40000000
+#define TAH_MR_ST_256 0x01000000
+#define TAH_MR_ST_512 0x02000000
+#define TAH_MR_ST_768 0x03000000
+#define TAH_MR_ST_1024 0x04000000
+#define TAH_MR_ST_1280 0x05000000
+#define TAH_MR_ST_1536 0x06000000
+#define TAH_MR_TFS_16KB 0x00000000
+#define TAH_MR_TFS_2KB 0x00200000
+#define TAH_MR_TFS_4KB 0x00400000
+#define TAH_MR_TFS_6KB 0x00600000
+#define TAH_MR_TFS_8KB 0x00800000
+#define TAH_MR_TFS_10KB 0x00a00000
+#define TAH_MR_DTFP 0x00100000
+#define TAH_MR_DIG 0x00080000
+
+#ifdef CONFIG_IBM_EMAC_TAH
+
+extern int tah_init(void);
+extern void tah_exit(void);
+extern int tah_attach(struct platform_device *ofdev, int channel);
+extern void tah_detach(struct platform_device *ofdev, int channel);
+extern void tah_reset(struct platform_device *ofdev);
+extern int tah_get_regs_len(struct platform_device *ofdev);
+extern void *tah_dump_regs(struct platform_device *ofdev, void *buf);
+
+#else
+
+# define tah_init() 0
+# define tah_exit() do { } while(0)
+# define tah_attach(x,y) (-ENXIO)
+# define tah_detach(x,y) do { } while(0)
+# define tah_reset(x) do { } while(0)
+# define tah_get_regs_len(x) 0
+# define tah_dump_regs(x,buf) (buf)
+
+#endif /* !CONFIG_IBM_EMAC_TAH */
+
+#endif /* __IBM_NEWEMAC_TAH_H */
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
new file mode 100644
index 000000000000..97449e786d61
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -0,0 +1,332 @@
+/*
+ * drivers/net/ibm_newemac/zmii.c
+ *
+ * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support.
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * Based on the arch/ppc version of the driver:
+ *
+ * Copyright (c) 2004, 2005 Zultys Technologies.
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ *
+ * Based on original work by
+ * Armin Kuster <akuster@mvista.com>
+ * Copyright 2001 MontaVista Softare Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/ethtool.h>
+#include <asm/io.h>
+
+#include "emac.h"
+#include "core.h"
+
+/* ZMIIx_FER */
+#define ZMII_FER_MDI(idx) (0x80000000 >> ((idx) * 4))
+#define ZMII_FER_MDI_ALL (ZMII_FER_MDI(0) | ZMII_FER_MDI(1) | \
+ ZMII_FER_MDI(2) | ZMII_FER_MDI(3))
+
+#define ZMII_FER_SMII(idx) (0x40000000 >> ((idx) * 4))
+#define ZMII_FER_RMII(idx) (0x20000000 >> ((idx) * 4))
+#define ZMII_FER_MII(idx) (0x10000000 >> ((idx) * 4))
+
+/* ZMIIx_SSR */
+#define ZMII_SSR_SCI(idx) (0x40000000 >> ((idx) * 4))
+#define ZMII_SSR_FSS(idx) (0x20000000 >> ((idx) * 4))
+#define ZMII_SSR_SP(idx) (0x10000000 >> ((idx) * 4))
+
+/* ZMII only supports MII, RMII and SMII
+ * we also support autodetection for backward compatibility
+ */
+static inline int zmii_valid_mode(int mode)
+{
+ return mode == PHY_MODE_MII ||
+ mode == PHY_MODE_RMII ||
+ mode == PHY_MODE_SMII ||
+ mode == PHY_MODE_NA;
+}
+
+static inline const char *zmii_mode_name(int mode)
+{
+ switch (mode) {
+ case PHY_MODE_MII:
+ return "MII";
+ case PHY_MODE_RMII:
+ return "RMII";
+ case PHY_MODE_SMII:
+ return "SMII";
+ default:
+ BUG();
+ }
+}
+
+static inline u32 zmii_mode_mask(int mode, int input)
+{
+ switch (mode) {
+ case PHY_MODE_MII:
+ return ZMII_FER_MII(input);
+ case PHY_MODE_RMII:
+ return ZMII_FER_RMII(input);
+ case PHY_MODE_SMII:
+ return ZMII_FER_SMII(input);
+ default:
+ return 0;
+ }
+}
+
+int __devinit zmii_attach(struct platform_device *ofdev, int input, int *mode)
+{
+ struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+ struct zmii_regs __iomem *p = dev->base;
+
+ ZMII_DBG(dev, "init(%d, %d)" NL, input, *mode);
+
+ if (!zmii_valid_mode(*mode)) {
+ /* Probably an EMAC connected to RGMII,
+ * but it still may need ZMII for MDIO so
+ * we don't fail here.
+ */
+ dev->users++;
+ return 0;
+ }
+
+ mutex_lock(&dev->lock);
+
+ /* Autodetect ZMII mode if not specified.
+ * This is only for backward compatibility with the old driver.
+ * Please, always specify PHY mode in your board port to avoid
+ * any surprises.
+ */
+ if (dev->mode == PHY_MODE_NA) {
+ if (*mode == PHY_MODE_NA) {
+ u32 r = dev->fer_save;
+
+ ZMII_DBG(dev, "autodetecting mode, FER = 0x%08x" NL, r);
+
+ if (r & (ZMII_FER_MII(0) | ZMII_FER_MII(1)))
+ dev->mode = PHY_MODE_MII;
+ else if (r & (ZMII_FER_RMII(0) | ZMII_FER_RMII(1)))
+ dev->mode = PHY_MODE_RMII;
+ else
+ dev->mode = PHY_MODE_SMII;
+ } else
+ dev->mode = *mode;
+
+ printk(KERN_NOTICE "%s: bridge in %s mode\n",
+ ofdev->dev.of_node->full_name,
+ zmii_mode_name(dev->mode));
+ } else {
+ /* All inputs must use the same mode */
+ if (*mode != PHY_MODE_NA && *mode != dev->mode) {
+ printk(KERN_ERR
+ "%s: invalid mode %d specified for input %d\n",
+ ofdev->dev.of_node->full_name, *mode, input);
+ mutex_unlock(&dev->lock);
+ return -EINVAL;
+ }
+ }
+
+ /* Report back correct PHY mode,
+ * it may be used during PHY initialization.
+ */
+ *mode = dev->mode;
+
+ /* Enable this input */
+ out_be32(&p->fer, in_be32(&p->fer) | zmii_mode_mask(dev->mode, input));
+ ++dev->users;
+
+ mutex_unlock(&dev->lock);
+
+ return 0;
+}
+
+void zmii_get_mdio(struct platform_device *ofdev, int input)
+{
+ struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+ u32 fer;
+
+ ZMII_DBG2(dev, "get_mdio(%d)" NL, input);
+
+ mutex_lock(&dev->lock);
+
+ fer = in_be32(&dev->base->fer) & ~ZMII_FER_MDI_ALL;
+ out_be32(&dev->base->fer, fer | ZMII_FER_MDI(input));
+}
+
+void zmii_put_mdio(struct platform_device *ofdev, int input)
+{
+ struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+
+ ZMII_DBG2(dev, "put_mdio(%d)" NL, input);
+ mutex_unlock(&dev->lock);
+}
+
+
+void zmii_set_speed(struct platform_device *ofdev, int input, int speed)
+{
+ struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+ u32 ssr;
+
+ mutex_lock(&dev->lock);
+
+ ssr = in_be32(&dev->base->ssr);
+
+ ZMII_DBG(dev, "speed(%d, %d)" NL, input, speed);
+
+ if (speed == SPEED_100)
+ ssr |= ZMII_SSR_SP(input);
+ else
+ ssr &= ~ZMII_SSR_SP(input);
+
+ out_be32(&dev->base->ssr, ssr);
+
+ mutex_unlock(&dev->lock);
+}
+
+void zmii_detach(struct platform_device *ofdev, int input)
+{
+ struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+
+ BUG_ON(!dev || dev->users == 0);
+
+ mutex_lock(&dev->lock);
+
+ ZMII_DBG(dev, "detach(%d)" NL, input);
+
+ /* Disable this input */
+ out_be32(&dev->base->fer,
+ in_be32(&dev->base->fer) & ~zmii_mode_mask(dev->mode, input));
+
+ --dev->users;
+
+ mutex_unlock(&dev->lock);
+}
+
+int zmii_get_regs_len(struct platform_device *ofdev)
+{
+ return sizeof(struct emac_ethtool_regs_subhdr) +
+ sizeof(struct zmii_regs);
+}
+
+void *zmii_dump_regs(struct platform_device *ofdev, void *buf)
+{
+ struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+ struct emac_ethtool_regs_subhdr *hdr = buf;
+ struct zmii_regs *regs = (struct zmii_regs *)(hdr + 1);
+
+ hdr->version = 0;
+ hdr->index = 0; /* for now, are there chips with more than one
+ * zmii ? if yes, then we'll add a cell_index
+ * like we do for emac
+ */
+ memcpy_fromio(regs, dev->base, sizeof(struct zmii_regs));
+ return regs + 1;
+}
+
+static int __devinit zmii_probe(struct platform_device *ofdev)
+{
+ struct device_node *np = ofdev->dev.of_node;
+ struct zmii_instance *dev;
+ struct resource regs;
+ int rc;
+
+ rc = -ENOMEM;
+ dev = kzalloc(sizeof(struct zmii_instance), GFP_KERNEL);
+ if (dev == NULL) {
+ printk(KERN_ERR "%s: could not allocate ZMII device!\n",
+ np->full_name);
+ goto err_gone;
+ }
+
+ mutex_init(&dev->lock);
+ dev->ofdev = ofdev;
+ dev->mode = PHY_MODE_NA;
+
+ rc = -ENXIO;
+ if (of_address_to_resource(np, 0, &regs)) {
+ printk(KERN_ERR "%s: Can't get registers address\n",
+ np->full_name);
+ goto err_free;
+ }
+
+ rc = -ENOMEM;
+ dev->base = (struct zmii_regs __iomem *)ioremap(regs.start,
+ sizeof(struct zmii_regs));
+ if (dev->base == NULL) {
+ printk(KERN_ERR "%s: Can't map device registers!\n",
+ np->full_name);
+ goto err_free;
+ }
+
+ /* We may need FER value for autodetection later */
+ dev->fer_save = in_be32(&dev->base->fer);
+
+ /* Disable all inputs by default */
+ out_be32(&dev->base->fer, 0);
+
+ printk(KERN_INFO
+ "ZMII %s initialized\n", ofdev->dev.of_node->full_name);
+ wmb();
+ dev_set_drvdata(&ofdev->dev, dev);
+
+ return 0;
+
+ err_free:
+ kfree(dev);
+ err_gone:
+ return rc;
+}
+
+static int __devexit zmii_remove(struct platform_device *ofdev)
+{
+ struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
+
+ dev_set_drvdata(&ofdev->dev, NULL);
+
+ WARN_ON(dev->users != 0);
+
+ iounmap(dev->base);
+ kfree(dev);
+
+ return 0;
+}
+
+static struct of_device_id zmii_match[] =
+{
+ {
+ .compatible = "ibm,zmii",
+ },
+ /* For backward compat with old DT */
+ {
+ .type = "emac-zmii",
+ },
+ {},
+};
+
+static struct platform_driver zmii_driver = {
+ .driver = {
+ .name = "emac-zmii",
+ .owner = THIS_MODULE,
+ .of_match_table = zmii_match,
+ },
+ .probe = zmii_probe,
+ .remove = zmii_remove,
+};
+
+int __init zmii_init(void)
+{
+ return platform_driver_register(&zmii_driver);
+}
+
+void zmii_exit(void)
+{
+ platform_driver_unregister(&zmii_driver);
+}
diff --git a/drivers/net/ethernet/ibm/emac/zmii.h b/drivers/net/ethernet/ibm/emac/zmii.h
new file mode 100644
index 000000000000..ceaed823a83c
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/zmii.h
@@ -0,0 +1,78 @@
+/*
+ * drivers/net/ibm_newemac/zmii.h
+ *
+ * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support.
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * Based on the arch/ppc version of the driver:
+ *
+ * Copyright (c) 2004, 2005 Zultys Technologies.
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ *
+ * Based on original work by
+ * Armin Kuster <akuster@mvista.com>
+ * Copyright 2001 MontaVista Softare Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __IBM_NEWEMAC_ZMII_H
+#define __IBM_NEWEMAC_ZMII_H
+
+/* ZMII bridge registers */
+struct zmii_regs {
+ u32 fer; /* Function enable reg */
+ u32 ssr; /* Speed select reg */
+ u32 smiirs; /* SMII status reg */
+};
+
+/* ZMII device */
+struct zmii_instance {
+ struct zmii_regs __iomem *base;
+
+ /* Only one EMAC whacks us at a time */
+ struct mutex lock;
+
+ /* subset of PHY_MODE_XXXX */
+ int mode;
+
+ /* number of EMACs using this ZMII bridge */
+ int users;
+
+ /* FER value left by firmware */
+ u32 fer_save;
+
+ /* OF device instance */
+ struct platform_device *ofdev;
+};
+
+#ifdef CONFIG_IBM_EMAC_ZMII
+
+extern int zmii_init(void);
+extern void zmii_exit(void);
+extern int zmii_attach(struct platform_device *ofdev, int input, int *mode);
+extern void zmii_detach(struct platform_device *ofdev, int input);
+extern void zmii_get_mdio(struct platform_device *ofdev, int input);
+extern void zmii_put_mdio(struct platform_device *ofdev, int input);
+extern void zmii_set_speed(struct platform_device *ofdev, int input, int speed);
+extern int zmii_get_regs_len(struct platform_device *ocpdev);
+extern void *zmii_dump_regs(struct platform_device *ofdev, void *buf);
+
+#else
+# define zmii_init() 0
+# define zmii_exit() do { } while(0)
+# define zmii_attach(x,y,z) (-ENXIO)
+# define zmii_detach(x,y) do { } while(0)
+# define zmii_get_mdio(x,y) do { } while(0)
+# define zmii_put_mdio(x,y) do { } while(0)
+# define zmii_set_speed(x,y,z) do { } while(0)
+# define zmii_get_regs_len(x) 0
+# define zmii_dump_regs(x,buf) (buf)
+#endif /* !CONFIG_IBM_EMAC_ZMII */
+
+#endif /* __IBM_NEWEMAC_ZMII_H */
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
new file mode 100644
index 000000000000..4da972eaabb4
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -0,0 +1,1638 @@
+/*
+ * IBM Power Virtual Ethernet Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2003, 2010
+ *
+ * Authors: Dave Larson <larson1@us.ibm.com>
+ * Santiago Leon <santil@linux.vnet.ibm.com>
+ * Brian King <brking@linux.vnet.ibm.com>
+ * Robert Jennings <rcj@linux.vnet.ibm.com>
+ * Anton Blanchard <anton@au.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/pm.h>
+#include <linux/ethtool.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <asm/hvcall.h>
+#include <linux/atomic.h>
+#include <asm/vio.h>
+#include <asm/iommu.h>
+#include <asm/firmware.h>
+
+#include "ibmveth.h"
+
+static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
+static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
+static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
+
+static struct kobj_type ktype_veth_pool;
+
+
+static const char ibmveth_driver_name[] = "ibmveth";
+static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
+#define ibmveth_driver_version "1.04"
+
+MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ibmveth_driver_version);
+
+static unsigned int tx_copybreak __read_mostly = 128;
+module_param(tx_copybreak, uint, 0644);
+MODULE_PARM_DESC(tx_copybreak,
+ "Maximum size of packet that is copied to a new buffer on transmit");
+
+static unsigned int rx_copybreak __read_mostly = 128;
+module_param(rx_copybreak, uint, 0644);
+MODULE_PARM_DESC(rx_copybreak,
+ "Maximum size of packet that is copied to a new buffer on receive");
+
+static unsigned int rx_flush __read_mostly = 0;
+module_param(rx_flush, uint, 0644);
+MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
+
+struct ibmveth_stat {
+ char name[ETH_GSTRING_LEN];
+ int offset;
+};
+
+#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
+#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
+
+struct ibmveth_stat ibmveth_stats[] = {
+ { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
+ { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
+ { "replenish_add_buff_failure",
+ IBMVETH_STAT_OFF(replenish_add_buff_failure) },
+ { "replenish_add_buff_success",
+ IBMVETH_STAT_OFF(replenish_add_buff_success) },
+ { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
+ { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
+ { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
+ { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
+ { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
+ { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
+};
+
+/* simple methods of getting data from the current rxq entry */
+static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
+{
+ return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off;
+}
+
+static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
+{
+ return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
+ IBMVETH_RXQ_TOGGLE_SHIFT;
+}
+
+static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
+{
+ return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
+}
+
+static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
+{
+ return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
+}
+
+static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
+{
+ return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
+}
+
+static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
+{
+ return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length;
+}
+
+static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
+{
+ return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
+}
+
+/* setup the initial settings for a buffer pool */
+static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
+ u32 pool_index, u32 pool_size,
+ u32 buff_size, u32 pool_active)
+{
+ pool->size = pool_size;
+ pool->index = pool_index;
+ pool->buff_size = buff_size;
+ pool->threshold = pool_size * 7 / 8;
+ pool->active = pool_active;
+}
+
+/* allocate and setup an buffer pool - called during open */
+static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
+{
+ int i;
+
+ pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
+
+ if (!pool->free_map)
+ return -1;
+
+ pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
+ if (!pool->dma_addr) {
+ kfree(pool->free_map);
+ pool->free_map = NULL;
+ return -1;
+ }
+
+ pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
+
+ if (!pool->skbuff) {
+ kfree(pool->dma_addr);
+ pool->dma_addr = NULL;
+
+ kfree(pool->free_map);
+ pool->free_map = NULL;
+ return -1;
+ }
+
+ memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
+
+ for (i = 0; i < pool->size; ++i)
+ pool->free_map[i] = i;
+
+ atomic_set(&pool->available, 0);
+ pool->producer_index = 0;
+ pool->consumer_index = 0;
+
+ return 0;
+}
+
+static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
+{
+ unsigned long offset;
+
+ for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
+ asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
+}
+
+/* replenish the buffers for a pool. note that we don't need to
+ * skb_reserve these since they are used for incoming...
+ */
+static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
+ struct ibmveth_buff_pool *pool)
+{
+ u32 i;
+ u32 count = pool->size - atomic_read(&pool->available);
+ u32 buffers_added = 0;
+ struct sk_buff *skb;
+ unsigned int free_index, index;
+ u64 correlator;
+ unsigned long lpar_rc;
+ dma_addr_t dma_addr;
+
+ mb();
+
+ for (i = 0; i < count; ++i) {
+ union ibmveth_buf_desc desc;
+
+ skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
+
+ if (!skb) {
+ netdev_dbg(adapter->netdev,
+ "replenish: unable to allocate skb\n");
+ adapter->replenish_no_mem++;
+ break;
+ }
+
+ free_index = pool->consumer_index;
+ pool->consumer_index++;
+ if (pool->consumer_index >= pool->size)
+ pool->consumer_index = 0;
+ index = pool->free_map[free_index];
+
+ BUG_ON(index == IBM_VETH_INVALID_MAP);
+ BUG_ON(pool->skbuff[index] != NULL);
+
+ dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
+ pool->buff_size, DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
+ goto failure;
+
+ pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
+ pool->dma_addr[index] = dma_addr;
+ pool->skbuff[index] = skb;
+
+ correlator = ((u64)pool->index << 32) | index;
+ *(u64 *)skb->data = correlator;
+
+ desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
+ desc.fields.address = dma_addr;
+
+ if (rx_flush) {
+ unsigned int len = min(pool->buff_size,
+ adapter->netdev->mtu +
+ IBMVETH_BUFF_OH);
+ ibmveth_flush_buffer(skb->data, len);
+ }
+ lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
+ desc.desc);
+
+ if (lpar_rc != H_SUCCESS) {
+ goto failure;
+ } else {
+ buffers_added++;
+ adapter->replenish_add_buff_success++;
+ }
+ }
+
+ mb();
+ atomic_add(buffers_added, &(pool->available));
+ return;
+
+failure:
+ pool->free_map[free_index] = index;
+ pool->skbuff[index] = NULL;
+ if (pool->consumer_index == 0)
+ pool->consumer_index = pool->size - 1;
+ else
+ pool->consumer_index--;
+ if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
+ dma_unmap_single(&adapter->vdev->dev,
+ pool->dma_addr[index], pool->buff_size,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ adapter->replenish_add_buff_failure++;
+
+ mb();
+ atomic_add(buffers_added, &(pool->available));
+}
+
+/* replenish routine */
+static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
+{
+ int i;
+
+ adapter->replenish_task_cycles++;
+
+ for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
+ struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
+
+ if (pool->active &&
+ (atomic_read(&pool->available) < pool->threshold))
+ ibmveth_replenish_buffer_pool(adapter, pool);
+ }
+
+ adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
+ 4096 - 8);
+}
+
+/* empty and free ana buffer pool - also used to do cleanup in error paths */
+static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
+ struct ibmveth_buff_pool *pool)
+{
+ int i;
+
+ kfree(pool->free_map);
+ pool->free_map = NULL;
+
+ if (pool->skbuff && pool->dma_addr) {
+ for (i = 0; i < pool->size; ++i) {
+ struct sk_buff *skb = pool->skbuff[i];
+ if (skb) {
+ dma_unmap_single(&adapter->vdev->dev,
+ pool->dma_addr[i],
+ pool->buff_size,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ pool->skbuff[i] = NULL;
+ }
+ }
+ }
+
+ if (pool->dma_addr) {
+ kfree(pool->dma_addr);
+ pool->dma_addr = NULL;
+ }
+
+ if (pool->skbuff) {
+ kfree(pool->skbuff);
+ pool->skbuff = NULL;
+ }
+}
+
+/* remove a buffer from a pool */
+static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
+ u64 correlator)
+{
+ unsigned int pool = correlator >> 32;
+ unsigned int index = correlator & 0xffffffffUL;
+ unsigned int free_index;
+ struct sk_buff *skb;
+
+ BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
+ BUG_ON(index >= adapter->rx_buff_pool[pool].size);
+
+ skb = adapter->rx_buff_pool[pool].skbuff[index];
+
+ BUG_ON(skb == NULL);
+
+ adapter->rx_buff_pool[pool].skbuff[index] = NULL;
+
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->rx_buff_pool[pool].dma_addr[index],
+ adapter->rx_buff_pool[pool].buff_size,
+ DMA_FROM_DEVICE);
+
+ free_index = adapter->rx_buff_pool[pool].producer_index;
+ adapter->rx_buff_pool[pool].producer_index++;
+ if (adapter->rx_buff_pool[pool].producer_index >=
+ adapter->rx_buff_pool[pool].size)
+ adapter->rx_buff_pool[pool].producer_index = 0;
+ adapter->rx_buff_pool[pool].free_map[free_index] = index;
+
+ mb();
+
+ atomic_dec(&(adapter->rx_buff_pool[pool].available));
+}
+
+/* get the current buffer on the rx queue */
+static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
+{
+ u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
+ unsigned int pool = correlator >> 32;
+ unsigned int index = correlator & 0xffffffffUL;
+
+ BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
+ BUG_ON(index >= adapter->rx_buff_pool[pool].size);
+
+ return adapter->rx_buff_pool[pool].skbuff[index];
+}
+
+/* recycle the current buffer on the rx queue */
+static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
+{
+ u32 q_index = adapter->rx_queue.index;
+ u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
+ unsigned int pool = correlator >> 32;
+ unsigned int index = correlator & 0xffffffffUL;
+ union ibmveth_buf_desc desc;
+ unsigned long lpar_rc;
+ int ret = 1;
+
+ BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
+ BUG_ON(index >= adapter->rx_buff_pool[pool].size);
+
+ if (!adapter->rx_buff_pool[pool].active) {
+ ibmveth_rxq_harvest_buffer(adapter);
+ ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
+ goto out;
+ }
+
+ desc.fields.flags_len = IBMVETH_BUF_VALID |
+ adapter->rx_buff_pool[pool].buff_size;
+ desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
+
+ lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
+
+ if (lpar_rc != H_SUCCESS) {
+ netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
+ "during recycle rc=%ld", lpar_rc);
+ ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
+ ret = 0;
+ }
+
+ if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
+ adapter->rx_queue.index = 0;
+ adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
+ }
+
+out:
+ return ret;
+}
+
+static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
+{
+ ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
+
+ if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
+ adapter->rx_queue.index = 0;
+ adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
+ }
+}
+
+static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
+{
+ int i;
+ struct device *dev = &adapter->vdev->dev;
+
+ if (adapter->buffer_list_addr != NULL) {
+ if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
+ dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
+ adapter->buffer_list_dma = DMA_ERROR_CODE;
+ }
+ free_page((unsigned long)adapter->buffer_list_addr);
+ adapter->buffer_list_addr = NULL;
+ }
+
+ if (adapter->filter_list_addr != NULL) {
+ if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
+ dma_unmap_single(dev, adapter->filter_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
+ adapter->filter_list_dma = DMA_ERROR_CODE;
+ }
+ free_page((unsigned long)adapter->filter_list_addr);
+ adapter->filter_list_addr = NULL;
+ }
+
+ if (adapter->rx_queue.queue_addr != NULL) {
+ if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
+ dma_unmap_single(dev,
+ adapter->rx_queue.queue_dma,
+ adapter->rx_queue.queue_len,
+ DMA_BIDIRECTIONAL);
+ adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
+ }
+ kfree(adapter->rx_queue.queue_addr);
+ adapter->rx_queue.queue_addr = NULL;
+ }
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ if (adapter->rx_buff_pool[i].active)
+ ibmveth_free_buffer_pool(adapter,
+ &adapter->rx_buff_pool[i]);
+
+ if (adapter->bounce_buffer != NULL) {
+ if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->bounce_buffer_dma,
+ adapter->netdev->mtu + IBMVETH_BUFF_OH,
+ DMA_BIDIRECTIONAL);
+ adapter->bounce_buffer_dma = DMA_ERROR_CODE;
+ }
+ kfree(adapter->bounce_buffer);
+ adapter->bounce_buffer = NULL;
+ }
+}
+
+static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
+ union ibmveth_buf_desc rxq_desc, u64 mac_address)
+{
+ int rc, try_again = 1;
+
+ /*
+ * After a kexec the adapter will still be open, so our attempt to
+ * open it will fail. So if we get a failure we free the adapter and
+ * try again, but only once.
+ */
+retry:
+ rc = h_register_logical_lan(adapter->vdev->unit_address,
+ adapter->buffer_list_dma, rxq_desc.desc,
+ adapter->filter_list_dma, mac_address);
+
+ if (rc != H_SUCCESS && try_again) {
+ do {
+ rc = h_free_logical_lan(adapter->vdev->unit_address);
+ } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
+
+ try_again = 0;
+ goto retry;
+ }
+
+ return rc;
+}
+
+static int ibmveth_open(struct net_device *netdev)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ u64 mac_address = 0;
+ int rxq_entries = 1;
+ unsigned long lpar_rc;
+ int rc;
+ union ibmveth_buf_desc rxq_desc;
+ int i;
+ struct device *dev;
+
+ netdev_dbg(netdev, "open starting\n");
+
+ napi_enable(&adapter->napi);
+
+ for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ rxq_entries += adapter->rx_buff_pool[i].size;
+
+ adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
+ adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
+
+ if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
+ netdev_err(netdev, "unable to allocate filter or buffer list "
+ "pages\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
+ rxq_entries;
+ adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
+ GFP_KERNEL);
+
+ if (!adapter->rx_queue.queue_addr) {
+ netdev_err(netdev, "unable to allocate rx queue pages\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ dev = &adapter->vdev->dev;
+
+ adapter->buffer_list_dma = dma_map_single(dev,
+ adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
+ adapter->filter_list_dma = dma_map_single(dev,
+ adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
+ adapter->rx_queue.queue_dma = dma_map_single(dev,
+ adapter->rx_queue.queue_addr,
+ adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
+
+ if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
+ (dma_mapping_error(dev, adapter->filter_list_dma)) ||
+ (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
+ netdev_err(netdev, "unable to map filter or buffer list "
+ "pages\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ adapter->rx_queue.index = 0;
+ adapter->rx_queue.num_slots = rxq_entries;
+ adapter->rx_queue.toggle = 1;
+
+ memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
+ mac_address = mac_address >> 16;
+
+ rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
+ adapter->rx_queue.queue_len;
+ rxq_desc.fields.address = adapter->rx_queue.queue_dma;
+
+ netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
+ netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
+ netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
+
+ h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
+
+ lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
+
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
+ lpar_rc);
+ netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
+ "desc:0x%llx MAC:0x%llx\n",
+ adapter->buffer_list_dma,
+ adapter->filter_list_dma,
+ rxq_desc.desc,
+ mac_address);
+ rc = -ENONET;
+ goto err_out;
+ }
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
+ if (!adapter->rx_buff_pool[i].active)
+ continue;
+ if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
+ netdev_err(netdev, "unable to alloc pool\n");
+ adapter->rx_buff_pool[i].active = 0;
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ }
+
+ netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
+ rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
+ netdev);
+ if (rc != 0) {
+ netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
+ netdev->irq, rc);
+ do {
+ lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
+ } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
+
+ goto err_out;
+ }
+
+ adapter->bounce_buffer =
+ kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
+ if (!adapter->bounce_buffer) {
+ netdev_err(netdev, "unable to allocate bounce buffer\n");
+ rc = -ENOMEM;
+ goto err_out_free_irq;
+ }
+ adapter->bounce_buffer_dma =
+ dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
+ netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
+ netdev_err(netdev, "unable to map bounce buffer\n");
+ rc = -ENOMEM;
+ goto err_out_free_irq;
+ }
+
+ netdev_dbg(netdev, "initial replenish cycle\n");
+ ibmveth_interrupt(netdev->irq, netdev);
+
+ netif_start_queue(netdev);
+
+ netdev_dbg(netdev, "open complete\n");
+
+ return 0;
+
+err_out_free_irq:
+ free_irq(netdev->irq, netdev);
+err_out:
+ ibmveth_cleanup(adapter);
+ napi_disable(&adapter->napi);
+ return rc;
+}
+
+static int ibmveth_close(struct net_device *netdev)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ long lpar_rc;
+
+ netdev_dbg(netdev, "close starting\n");
+
+ napi_disable(&adapter->napi);
+
+ if (!adapter->pool_config)
+ netif_stop_queue(netdev);
+
+ h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
+
+ do {
+ lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
+ } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
+
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_free_logical_lan failed with %lx, "
+ "continuing with close\n", lpar_rc);
+ }
+
+ free_irq(netdev->irq, netdev);
+
+ adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
+ 4096 - 8);
+
+ ibmveth_cleanup(adapter);
+
+ netdev_dbg(netdev, "close complete\n");
+
+ return 0;
+}
+
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+ SUPPORTED_FIBRE);
+ cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
+ ADVERTISED_FIBRE);
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
+ cmd->duplex = DUPLEX_FULL;
+ cmd->port = PORT_FIBRE;
+ cmd->phy_address = 0;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->autoneg = AUTONEG_ENABLE;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 1;
+ return 0;
+}
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
+ strncpy(info->version, ibmveth_driver_version,
+ sizeof(info->version) - 1);
+}
+
+static u32 ibmveth_fix_features(struct net_device *dev, u32 features)
+{
+ /*
+ * Since the ibmveth firmware interface does not have the
+ * concept of separate tx/rx checksum offload enable, if rx
+ * checksum is disabled we also have to disable tx checksum
+ * offload. Once we disable rx checksum offload, we are no
+ * longer allowed to send tx buffers that are not properly
+ * checksummed.
+ */
+
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_ALL_CSUM;
+
+ return features;
+}
+
+static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+ unsigned long set_attr, clr_attr, ret_attr;
+ unsigned long set_attr6, clr_attr6;
+ long ret, ret4, ret6;
+ int rc1 = 0, rc2 = 0;
+ int restart = 0;
+
+ if (netif_running(dev)) {
+ restart = 1;
+ adapter->pool_config = 1;
+ ibmveth_close(dev);
+ adapter->pool_config = 0;
+ }
+
+ set_attr = 0;
+ clr_attr = 0;
+ set_attr6 = 0;
+ clr_attr6 = 0;
+
+ if (data) {
+ set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
+ set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
+ } else {
+ clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
+ clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
+ }
+
+ ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
+
+ if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
+ !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
+ (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
+ ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
+ set_attr, &ret_attr);
+
+ if (ret4 != H_SUCCESS) {
+ netdev_err(dev, "unable to change IPv4 checksum "
+ "offload settings. %d rc=%ld\n",
+ data, ret4);
+
+ h_illan_attributes(adapter->vdev->unit_address,
+ set_attr, clr_attr, &ret_attr);
+
+ if (data == 1)
+ dev->features &= ~NETIF_F_IP_CSUM;
+
+ } else {
+ adapter->fw_ipv4_csum_support = data;
+ }
+
+ ret6 = h_illan_attributes(adapter->vdev->unit_address,
+ clr_attr6, set_attr6, &ret_attr);
+
+ if (ret6 != H_SUCCESS) {
+ netdev_err(dev, "unable to change IPv6 checksum "
+ "offload settings. %d rc=%ld\n",
+ data, ret6);
+
+ h_illan_attributes(adapter->vdev->unit_address,
+ set_attr6, clr_attr6, &ret_attr);
+
+ if (data == 1)
+ dev->features &= ~NETIF_F_IPV6_CSUM;
+
+ } else
+ adapter->fw_ipv6_csum_support = data;
+
+ if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
+ adapter->rx_csum = data;
+ else
+ rc1 = -EIO;
+ } else {
+ rc1 = -EIO;
+ netdev_err(dev, "unable to change checksum offload settings."
+ " %d rc=%ld ret_attr=%lx\n", data, ret,
+ ret_attr);
+ }
+
+ if (restart)
+ rc2 = ibmveth_open(dev);
+
+ return rc1 ? rc1 : rc2;
+}
+
+static int ibmveth_set_features(struct net_device *dev, u32 features)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+ int rx_csum = !!(features & NETIF_F_RXCSUM);
+ int rc;
+
+ if (rx_csum == adapter->rx_csum)
+ return 0;
+
+ rc = ibmveth_set_csum_offload(dev, rx_csum);
+ if (rc && !adapter->rx_csum)
+ dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
+
+ return rc;
+}
+
+static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
+ memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
+}
+
+static int ibmveth_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(ibmveth_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void ibmveth_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int i;
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+
+ for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
+ data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_settings = netdev_get_settings,
+ .get_link = ethtool_op_get_link,
+ .get_strings = ibmveth_get_strings,
+ .get_sset_count = ibmveth_get_sset_count,
+ .get_ethtool_stats = ibmveth_get_ethtool_stats,
+};
+
+static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
+
+static int ibmveth_send(struct ibmveth_adapter *adapter,
+ union ibmveth_buf_desc *descs)
+{
+ unsigned long correlator;
+ unsigned int retry_count;
+ unsigned long ret;
+
+ /*
+ * The retry count sets a maximum for the number of broadcast and
+ * multicast destinations within the system.
+ */
+ retry_count = 1024;
+ correlator = 0;
+ do {
+ ret = h_send_logical_lan(adapter->vdev->unit_address,
+ descs[0].desc, descs[1].desc,
+ descs[2].desc, descs[3].desc,
+ descs[4].desc, descs[5].desc,
+ correlator, &correlator);
+ } while ((ret == H_BUSY) && (retry_count--));
+
+ if (ret != H_SUCCESS && ret != H_DROPPED) {
+ netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
+ "with rc=%ld\n", ret);
+ return 1;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ unsigned int desc_flags;
+ union ibmveth_buf_desc descs[6];
+ int last, i;
+ int force_bounce = 0;
+ dma_addr_t dma_addr;
+
+ /*
+ * veth handles a maximum of 6 segments including the header, so
+ * we have to linearize the skb if there are more than this.
+ */
+ if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
+
+ /* veth can't checksum offload UDP */
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ ((skb->protocol == htons(ETH_P_IP) &&
+ ip_hdr(skb)->protocol != IPPROTO_TCP) ||
+ (skb->protocol == htons(ETH_P_IPV6) &&
+ ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
+ skb_checksum_help(skb)) {
+
+ netdev_err(netdev, "tx: failed to checksum packet\n");
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
+
+ desc_flags = IBMVETH_BUF_VALID;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ unsigned char *buf = skb_transport_header(skb) +
+ skb->csum_offset;
+
+ desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
+
+ /* Need to zero out the checksum */
+ buf[0] = 0;
+ buf[1] = 0;
+ }
+
+retry_bounce:
+ memset(descs, 0, sizeof(descs));
+
+ /*
+ * If a linear packet is below the rx threshold then
+ * copy it into the static bounce buffer. This avoids the
+ * cost of a TCE insert and remove.
+ */
+ if (force_bounce || (!skb_is_nonlinear(skb) &&
+ (skb->len < tx_copybreak))) {
+ skb_copy_from_linear_data(skb, adapter->bounce_buffer,
+ skb->len);
+
+ descs[0].fields.flags_len = desc_flags | skb->len;
+ descs[0].fields.address = adapter->bounce_buffer_dma;
+
+ if (ibmveth_send(adapter, descs)) {
+ adapter->tx_send_failed++;
+ netdev->stats.tx_dropped++;
+ } else {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+ }
+
+ goto out;
+ }
+
+ /* Map the header */
+ dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
+ goto map_failed;
+
+ descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
+ descs[0].fields.address = dma_addr;
+
+ /* Map the frags */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
+ frag->size, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
+ goto map_failed_frags;
+
+ descs[i+1].fields.flags_len = desc_flags | frag->size;
+ descs[i+1].fields.address = dma_addr;
+ }
+
+ if (ibmveth_send(adapter, descs)) {
+ adapter->tx_send_failed++;
+ netdev->stats.tx_dropped++;
+ } else {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+ }
+
+ dma_unmap_single(&adapter->vdev->dev,
+ descs[0].fields.address,
+ descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+ DMA_TO_DEVICE);
+
+ for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
+ dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
+ descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+ DMA_TO_DEVICE);
+
+out:
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+
+map_failed_frags:
+ last = i+1;
+ for (i = 0; i < last; i++)
+ dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
+ descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+ DMA_TO_DEVICE);
+
+map_failed:
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ netdev_err(netdev, "tx: unable to map xmit buffer\n");
+ adapter->tx_map_failed++;
+ skb_linearize(skb);
+ force_bounce = 1;
+ goto retry_bounce;
+}
+
+static int ibmveth_poll(struct napi_struct *napi, int budget)
+{
+ struct ibmveth_adapter *adapter =
+ container_of(napi, struct ibmveth_adapter, napi);
+ struct net_device *netdev = adapter->netdev;
+ int frames_processed = 0;
+ unsigned long lpar_rc;
+
+restart_poll:
+ do {
+ if (!ibmveth_rxq_pending_buffer(adapter))
+ break;
+
+ smp_rmb();
+ if (!ibmveth_rxq_buffer_valid(adapter)) {
+ wmb(); /* suggested by larson1 */
+ adapter->rx_invalid_buffer++;
+ netdev_dbg(netdev, "recycling invalid buffer\n");
+ ibmveth_rxq_recycle_buffer(adapter);
+ } else {
+ struct sk_buff *skb, *new_skb;
+ int length = ibmveth_rxq_frame_length(adapter);
+ int offset = ibmveth_rxq_frame_offset(adapter);
+ int csum_good = ibmveth_rxq_csum_good(adapter);
+
+ skb = ibmveth_rxq_get_buffer(adapter);
+
+ new_skb = NULL;
+ if (length < rx_copybreak)
+ new_skb = netdev_alloc_skb(netdev, length);
+
+ if (new_skb) {
+ skb_copy_to_linear_data(new_skb,
+ skb->data + offset,
+ length);
+ if (rx_flush)
+ ibmveth_flush_buffer(skb->data,
+ length + offset);
+ if (!ibmveth_rxq_recycle_buffer(adapter))
+ kfree_skb(skb);
+ skb = new_skb;
+ } else {
+ ibmveth_rxq_harvest_buffer(adapter);
+ skb_reserve(skb, offset);
+ }
+
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (csum_good)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ netif_receive_skb(skb); /* send it up */
+
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += length;
+ frames_processed++;
+ }
+ } while (frames_processed < budget);
+
+ ibmveth_replenish_task(adapter);
+
+ if (frames_processed < budget) {
+ /* We think we are done - reenable interrupts,
+ * then check once more to make sure we are done.
+ */
+ lpar_rc = h_vio_signal(adapter->vdev->unit_address,
+ VIO_IRQ_ENABLE);
+
+ BUG_ON(lpar_rc != H_SUCCESS);
+
+ napi_complete(napi);
+
+ if (ibmveth_rxq_pending_buffer(adapter) &&
+ napi_reschedule(napi)) {
+ lpar_rc = h_vio_signal(adapter->vdev->unit_address,
+ VIO_IRQ_DISABLE);
+ goto restart_poll;
+ }
+ }
+
+ return frames_processed;
+}
+
+static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
+{
+ struct net_device *netdev = dev_instance;
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ unsigned long lpar_rc;
+
+ if (napi_schedule_prep(&adapter->napi)) {
+ lpar_rc = h_vio_signal(adapter->vdev->unit_address,
+ VIO_IRQ_DISABLE);
+ BUG_ON(lpar_rc != H_SUCCESS);
+ __napi_schedule(&adapter->napi);
+ }
+ return IRQ_HANDLED;
+}
+
+static void ibmveth_set_multicast_list(struct net_device *netdev)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ unsigned long lpar_rc;
+
+ if ((netdev->flags & IFF_PROMISC) ||
+ (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
+ lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
+ IbmVethMcastEnableRecv |
+ IbmVethMcastDisableFiltering,
+ 0);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
+ "entering promisc mode\n", lpar_rc);
+ }
+ } else {
+ struct netdev_hw_addr *ha;
+ /* clear the filter table & disable filtering */
+ lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
+ IbmVethMcastEnableRecv |
+ IbmVethMcastDisableFiltering |
+ IbmVethMcastClearFilterTable,
+ 0);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
+ "attempting to clear filter table\n",
+ lpar_rc);
+ }
+ /* add the addresses to the filter table */
+ netdev_for_each_mc_addr(ha, netdev) {
+ /* add the multicast address to the filter table */
+ unsigned long mcast_addr = 0;
+ memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
+ lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
+ IbmVethMcastAddFilter,
+ mcast_addr);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_multicast_ctrl rc=%ld "
+ "when adding an entry to the filter "
+ "table\n", lpar_rc);
+ }
+ }
+
+ /* re-enable filtering */
+ lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
+ IbmVethMcastEnableFiltering,
+ 0);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
+ "enabling filtering\n", lpar_rc);
+ }
+ }
+}
+
+static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+ struct vio_dev *viodev = adapter->vdev;
+ int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
+ int i, rc;
+ int need_restart = 0;
+
+ if (new_mtu < IBMVETH_MIN_MTU)
+ return -EINVAL;
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
+ break;
+
+ if (i == IBMVETH_NUM_BUFF_POOLS)
+ return -EINVAL;
+
+ /* Deactivate all the buffer pools so that the next loop can activate
+ only the buffer pools necessary to hold the new MTU */
+ if (netif_running(adapter->netdev)) {
+ need_restart = 1;
+ adapter->pool_config = 1;
+ ibmveth_close(adapter->netdev);
+ adapter->pool_config = 0;
+ }
+
+ /* Look for an active buffer pool that can hold the new MTU */
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
+ adapter->rx_buff_pool[i].active = 1;
+
+ if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
+ dev->mtu = new_mtu;
+ vio_cmo_set_dev_desired(viodev,
+ ibmveth_get_desired_dma
+ (viodev));
+ if (need_restart) {
+ return ibmveth_open(adapter->netdev);
+ }
+ return 0;
+ }
+ }
+
+ if (need_restart && (rc = ibmveth_open(adapter->netdev)))
+ return rc;
+
+ return -EINVAL;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void ibmveth_poll_controller(struct net_device *dev)
+{
+ ibmveth_replenish_task(netdev_priv(dev));
+ ibmveth_interrupt(dev->irq, dev);
+}
+#endif
+
+/**
+ * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
+ *
+ * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
+ *
+ * Return value:
+ * Number of bytes of IO data the driver will need to perform well.
+ */
+static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
+{
+ struct net_device *netdev = dev_get_drvdata(&vdev->dev);
+ struct ibmveth_adapter *adapter;
+ unsigned long ret;
+ int i;
+ int rxqentries = 1;
+
+ /* netdev inits at probe time along with the structures we need below*/
+ if (netdev == NULL)
+ return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
+
+ adapter = netdev_priv(netdev);
+
+ ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
+ ret += IOMMU_PAGE_ALIGN(netdev->mtu);
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
+ /* add the size of the active receive buffers */
+ if (adapter->rx_buff_pool[i].active)
+ ret +=
+ adapter->rx_buff_pool[i].size *
+ IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
+ buff_size);
+ rxqentries += adapter->rx_buff_pool[i].size;
+ }
+ /* add the size of the receive queue entries */
+ ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
+
+ return ret;
+}
+
+static const struct net_device_ops ibmveth_netdev_ops = {
+ .ndo_open = ibmveth_open,
+ .ndo_stop = ibmveth_close,
+ .ndo_start_xmit = ibmveth_start_xmit,
+ .ndo_set_rx_mode = ibmveth_set_multicast_list,
+ .ndo_do_ioctl = ibmveth_ioctl,
+ .ndo_change_mtu = ibmveth_change_mtu,
+ .ndo_fix_features = ibmveth_fix_features,
+ .ndo_set_features = ibmveth_set_features,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ibmveth_poll_controller,
+#endif
+};
+
+static int __devinit ibmveth_probe(struct vio_dev *dev,
+ const struct vio_device_id *id)
+{
+ int rc, i;
+ struct net_device *netdev;
+ struct ibmveth_adapter *adapter;
+ unsigned char *mac_addr_p;
+ unsigned int *mcastFilterSize_p;
+
+ dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
+ dev->unit_address);
+
+ mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
+ NULL);
+ if (!mac_addr_p) {
+ dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
+ return -EINVAL;
+ }
+
+ mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
+ VETH_MCAST_FILTER_SIZE, NULL);
+ if (!mcastFilterSize_p) {
+ dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
+ "attribute\n");
+ return -EINVAL;
+ }
+
+ netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
+
+ if (!netdev)
+ return -ENOMEM;
+
+ adapter = netdev_priv(netdev);
+ dev_set_drvdata(&dev->dev, netdev);
+
+ adapter->vdev = dev;
+ adapter->netdev = netdev;
+ adapter->mcastFilterSize = *mcastFilterSize_p;
+ adapter->pool_config = 0;
+
+ netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
+
+ /*
+ * Some older boxes running PHYP non-natively have an OF that returns
+ * a 8-byte local-mac-address field (and the first 2 bytes have to be
+ * ignored) while newer boxes' OF return a 6-byte field. Note that
+ * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
+ * The RPA doc specifies that the first byte must be 10b, so we'll
+ * just look for it to solve this 8 vs. 6 byte field issue
+ */
+ if ((*mac_addr_p & 0x3) != 0x02)
+ mac_addr_p += 2;
+
+ adapter->mac_addr = 0;
+ memcpy(&adapter->mac_addr, mac_addr_p, 6);
+
+ netdev->irq = dev->irq;
+ netdev->netdev_ops = &ibmveth_netdev_ops;
+ netdev->ethtool_ops = &netdev_ethtool_ops;
+ SET_NETDEV_DEV(netdev, &dev->dev);
+ netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->features |= netdev->hw_features;
+
+ memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
+ struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
+ int error;
+
+ ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
+ pool_count[i], pool_size[i],
+ pool_active[i]);
+ error = kobject_init_and_add(kobj, &ktype_veth_pool,
+ &dev->dev.kobj, "pool%d", i);
+ if (!error)
+ kobject_uevent(kobj, KOBJ_ADD);
+ }
+
+ netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
+
+ adapter->buffer_list_dma = DMA_ERROR_CODE;
+ adapter->filter_list_dma = DMA_ERROR_CODE;
+ adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
+
+ netdev_dbg(netdev, "registering netdev...\n");
+
+ ibmveth_set_features(netdev, netdev->features);
+
+ rc = register_netdev(netdev);
+
+ if (rc) {
+ netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
+ free_netdev(netdev);
+ return rc;
+ }
+
+ netdev_dbg(netdev, "registered\n");
+
+ return 0;
+}
+
+static int __devexit ibmveth_remove(struct vio_dev *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(&dev->dev);
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ kobject_put(&adapter->rx_buff_pool[i].kobj);
+
+ unregister_netdev(netdev);
+
+ free_netdev(netdev);
+ dev_set_drvdata(&dev->dev, NULL);
+
+ return 0;
+}
+
+static struct attribute veth_active_attr;
+static struct attribute veth_num_attr;
+static struct attribute veth_size_attr;
+
+static ssize_t veth_pool_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct ibmveth_buff_pool *pool = container_of(kobj,
+ struct ibmveth_buff_pool,
+ kobj);
+
+ if (attr == &veth_active_attr)
+ return sprintf(buf, "%d\n", pool->active);
+ else if (attr == &veth_num_attr)
+ return sprintf(buf, "%d\n", pool->size);
+ else if (attr == &veth_size_attr)
+ return sprintf(buf, "%d\n", pool->buff_size);
+ return 0;
+}
+
+static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ibmveth_buff_pool *pool = container_of(kobj,
+ struct ibmveth_buff_pool,
+ kobj);
+ struct net_device *netdev = dev_get_drvdata(
+ container_of(kobj->parent, struct device, kobj));
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ long value = simple_strtol(buf, NULL, 10);
+ long rc;
+
+ if (attr == &veth_active_attr) {
+ if (value && !pool->active) {
+ if (netif_running(netdev)) {
+ if (ibmveth_alloc_buffer_pool(pool)) {
+ netdev_err(netdev,
+ "unable to alloc pool\n");
+ return -ENOMEM;
+ }
+ pool->active = 1;
+ adapter->pool_config = 1;
+ ibmveth_close(netdev);
+ adapter->pool_config = 0;
+ if ((rc = ibmveth_open(netdev)))
+ return rc;
+ } else {
+ pool->active = 1;
+ }
+ } else if (!value && pool->active) {
+ int mtu = netdev->mtu + IBMVETH_BUFF_OH;
+ int i;
+ /* Make sure there is a buffer pool with buffers that
+ can hold a packet of the size of the MTU */
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
+ if (pool == &adapter->rx_buff_pool[i])
+ continue;
+ if (!adapter->rx_buff_pool[i].active)
+ continue;
+ if (mtu <= adapter->rx_buff_pool[i].buff_size)
+ break;
+ }
+
+ if (i == IBMVETH_NUM_BUFF_POOLS) {
+ netdev_err(netdev, "no active pool >= MTU\n");
+ return -EPERM;
+ }
+
+ if (netif_running(netdev)) {
+ adapter->pool_config = 1;
+ ibmveth_close(netdev);
+ pool->active = 0;
+ adapter->pool_config = 0;
+ if ((rc = ibmveth_open(netdev)))
+ return rc;
+ }
+ pool->active = 0;
+ }
+ } else if (attr == &veth_num_attr) {
+ if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
+ return -EINVAL;
+ } else {
+ if (netif_running(netdev)) {
+ adapter->pool_config = 1;
+ ibmveth_close(netdev);
+ adapter->pool_config = 0;
+ pool->size = value;
+ if ((rc = ibmveth_open(netdev)))
+ return rc;
+ } else {
+ pool->size = value;
+ }
+ }
+ } else if (attr == &veth_size_attr) {
+ if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
+ return -EINVAL;
+ } else {
+ if (netif_running(netdev)) {
+ adapter->pool_config = 1;
+ ibmveth_close(netdev);
+ adapter->pool_config = 0;
+ pool->buff_size = value;
+ if ((rc = ibmveth_open(netdev)))
+ return rc;
+ } else {
+ pool->buff_size = value;
+ }
+ }
+ }
+
+ /* kick the interrupt handler to allocate/deallocate pools */
+ ibmveth_interrupt(netdev->irq, netdev);
+ return count;
+}
+
+
+#define ATTR(_name, _mode) \
+ struct attribute veth_##_name##_attr = { \
+ .name = __stringify(_name), .mode = _mode, \
+ };
+
+static ATTR(active, 0644);
+static ATTR(num, 0644);
+static ATTR(size, 0644);
+
+static struct attribute *veth_pool_attrs[] = {
+ &veth_active_attr,
+ &veth_num_attr,
+ &veth_size_attr,
+ NULL,
+};
+
+static const struct sysfs_ops veth_pool_ops = {
+ .show = veth_pool_show,
+ .store = veth_pool_store,
+};
+
+static struct kobj_type ktype_veth_pool = {
+ .release = NULL,
+ .sysfs_ops = &veth_pool_ops,
+ .default_attrs = veth_pool_attrs,
+};
+
+static int ibmveth_resume(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ ibmveth_interrupt(netdev->irq, netdev);
+ return 0;
+}
+
+static struct vio_device_id ibmveth_device_table[] __devinitdata = {
+ { "network", "IBM,l-lan"},
+ { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
+
+static struct dev_pm_ops ibmveth_pm_ops = {
+ .resume = ibmveth_resume
+};
+
+static struct vio_driver ibmveth_driver = {
+ .id_table = ibmveth_device_table,
+ .probe = ibmveth_probe,
+ .remove = ibmveth_remove,
+ .get_desired_dma = ibmveth_get_desired_dma,
+ .driver = {
+ .name = ibmveth_driver_name,
+ .owner = THIS_MODULE,
+ .pm = &ibmveth_pm_ops,
+ }
+};
+
+static int __init ibmveth_module_init(void)
+{
+ printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
+ ibmveth_driver_string, ibmveth_driver_version);
+
+ return vio_register_driver(&ibmveth_driver);
+}
+
+static void __exit ibmveth_module_exit(void)
+{
+ vio_unregister_driver(&ibmveth_driver);
+}
+
+module_init(ibmveth_module_init);
+module_exit(ibmveth_module_exit);
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
new file mode 100644
index 000000000000..43a794fab9ff
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -0,0 +1,195 @@
+/*
+ * IBM Power Virtual Ethernet Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2003, 2010
+ *
+ * Authors: Dave Larson <larson1@us.ibm.com>
+ * Santiago Leon <santil@linux.vnet.ibm.com>
+ * Brian King <brking@linux.vnet.ibm.com>
+ * Robert Jennings <rcj@linux.vnet.ibm.com>
+ * Anton Blanchard <anton@au.ibm.com>
+ */
+
+#ifndef _IBMVETH_H
+#define _IBMVETH_H
+
+/* constants for H_MULTICAST_CTRL */
+#define IbmVethMcastReceptionModifyBit 0x80000UL
+#define IbmVethMcastReceptionEnableBit 0x20000UL
+#define IbmVethMcastFilterModifyBit 0x40000UL
+#define IbmVethMcastFilterEnableBit 0x10000UL
+
+#define IbmVethMcastEnableRecv (IbmVethMcastReceptionModifyBit | IbmVethMcastReceptionEnableBit)
+#define IbmVethMcastDisableRecv (IbmVethMcastReceptionModifyBit)
+#define IbmVethMcastEnableFiltering (IbmVethMcastFilterModifyBit | IbmVethMcastFilterEnableBit)
+#define IbmVethMcastDisableFiltering (IbmVethMcastFilterModifyBit)
+#define IbmVethMcastAddFilter 0x1UL
+#define IbmVethMcastRemoveFilter 0x2UL
+#define IbmVethMcastClearFilterTable 0x3UL
+
+#define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000UL
+#define IBMVETH_ILLAN_TRUNK_PRI_MASK 0x0000000000000F00UL
+#define IBMVETH_ILLAN_IPV6_TCP_CSUM 0x0000000000000004UL
+#define IBMVETH_ILLAN_IPV4_TCP_CSUM 0x0000000000000002UL
+#define IBMVETH_ILLAN_ACTIVE_TRUNK 0x0000000000000001UL
+
+/* hcall macros */
+#define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \
+ plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac)
+
+#define h_free_logical_lan(ua) \
+ plpar_hcall_norets(H_FREE_LOGICAL_LAN, ua)
+
+#define h_add_logical_lan_buffer(ua, buf) \
+ plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
+
+static inline long h_send_logical_lan(unsigned long unit_address,
+ unsigned long desc1, unsigned long desc2, unsigned long desc3,
+ unsigned long desc4, unsigned long desc5, unsigned long desc6,
+ unsigned long corellator_in, unsigned long *corellator_out)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+ rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, desc1,
+ desc2, desc3, desc4, desc5, desc6, corellator_in);
+
+ *corellator_out = retbuf[0];
+
+ return rc;
+}
+
+static inline long h_illan_attributes(unsigned long unit_address,
+ unsigned long reset_mask, unsigned long set_mask,
+ unsigned long *ret_attributes)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall(H_ILLAN_ATTRIBUTES, retbuf, unit_address,
+ reset_mask, set_mask);
+
+ *ret_attributes = retbuf[0];
+
+ return rc;
+}
+
+#define h_multicast_ctrl(ua, cmd, mac) \
+ plpar_hcall_norets(H_MULTICAST_CTRL, ua, cmd, mac)
+
+#define h_change_logical_lan_mac(ua, mac) \
+ plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
+
+#define IBMVETH_NUM_BUFF_POOLS 5
+#define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */
+#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
+#define IBMVETH_MIN_MTU 68
+#define IBMVETH_MAX_POOL_COUNT 4096
+#define IBMVETH_BUFF_LIST_SIZE 4096
+#define IBMVETH_FILT_LIST_SIZE 4096
+#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
+
+static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
+static int pool_count[] = { 256, 512, 256, 256, 256 };
+static int pool_active[] = { 1, 1, 0, 0, 0};
+
+#define IBM_VETH_INVALID_MAP ((u16)0xffff)
+
+struct ibmveth_buff_pool {
+ u32 size;
+ u32 index;
+ u32 buff_size;
+ u32 threshold;
+ atomic_t available;
+ u32 consumer_index;
+ u32 producer_index;
+ u16 *free_map;
+ dma_addr_t *dma_addr;
+ struct sk_buff **skbuff;
+ int active;
+ struct kobject kobj;
+};
+
+struct ibmveth_rx_q {
+ u64 index;
+ u64 num_slots;
+ u64 toggle;
+ dma_addr_t queue_dma;
+ u32 queue_len;
+ struct ibmveth_rx_q_entry *queue_addr;
+};
+
+struct ibmveth_adapter {
+ struct vio_dev *vdev;
+ struct net_device *netdev;
+ struct napi_struct napi;
+ struct net_device_stats stats;
+ unsigned int mcastFilterSize;
+ unsigned long mac_addr;
+ void * buffer_list_addr;
+ void * filter_list_addr;
+ dma_addr_t buffer_list_dma;
+ dma_addr_t filter_list_dma;
+ struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
+ struct ibmveth_rx_q rx_queue;
+ int pool_config;
+ int rx_csum;
+ void *bounce_buffer;
+ dma_addr_t bounce_buffer_dma;
+
+ u64 fw_ipv6_csum_support;
+ u64 fw_ipv4_csum_support;
+ /* adapter specific stats */
+ u64 replenish_task_cycles;
+ u64 replenish_no_mem;
+ u64 replenish_add_buff_failure;
+ u64 replenish_add_buff_success;
+ u64 rx_invalid_buffer;
+ u64 rx_no_buffer;
+ u64 tx_map_failed;
+ u64 tx_send_failed;
+};
+
+struct ibmveth_buf_desc_fields {
+ u32 flags_len;
+#define IBMVETH_BUF_VALID 0x80000000
+#define IBMVETH_BUF_TOGGLE 0x40000000
+#define IBMVETH_BUF_NO_CSUM 0x02000000
+#define IBMVETH_BUF_CSUM_GOOD 0x01000000
+#define IBMVETH_BUF_LEN_MASK 0x00FFFFFF
+ u32 address;
+};
+
+union ibmveth_buf_desc {
+ u64 desc;
+ struct ibmveth_buf_desc_fields fields;
+};
+
+struct ibmveth_rx_q_entry {
+ u32 flags_off;
+#define IBMVETH_RXQ_TOGGLE 0x80000000
+#define IBMVETH_RXQ_TOGGLE_SHIFT 31
+#define IBMVETH_RXQ_VALID 0x40000000
+#define IBMVETH_RXQ_NO_CSUM 0x02000000
+#define IBMVETH_RXQ_CSUM_GOOD 0x01000000
+#define IBMVETH_RXQ_OFF_MASK 0x0000FFFF
+
+ u32 length;
+ u64 correlator;
+};
+
+#endif /* _IBMVETH_H */
diff --git a/drivers/net/ethernet/ibm/iseries_veth.c b/drivers/net/ethernet/ibm/iseries_veth.c
new file mode 100644
index 000000000000..4326681df382
--- /dev/null
+++ b/drivers/net/ethernet/ibm/iseries_veth.c
@@ -0,0 +1,1710 @@
+/* File veth.c created by Kyle A. Lucke on Mon Aug 7 2000. */
+/*
+ * IBM eServer iSeries Virtual Ethernet Device Driver
+ * Copyright (C) 2001 Kyle A. Lucke (klucke@us.ibm.com), IBM Corp.
+ * Substantially cleaned up by:
+ * Copyright (C) 2003 David Gibson <dwg@au1.ibm.com>, IBM Corporation.
+ * Copyright (C) 2004-2005 Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ *
+ *
+ * This module implements the virtual ethernet device for iSeries LPAR
+ * Linux. It uses hypervisor message passing to implement an
+ * ethernet-like network device communicating between partitions on
+ * the iSeries.
+ *
+ * The iSeries LPAR hypervisor currently allows for up to 16 different
+ * virtual ethernets. These are all dynamically configurable on
+ * OS/400 partitions, but dynamic configuration is not supported under
+ * Linux yet. An ethXX network device will be created for each
+ * virtual ethernet this partition is connected to.
+ *
+ * - This driver is responsible for routing packets to and from other
+ * partitions. The MAC addresses used by the virtual ethernets
+ * contains meaning and must not be modified.
+ *
+ * - Having 2 virtual ethernets to the same remote partition DOES NOT
+ * double the available bandwidth. The 2 devices will share the
+ * available hypervisor bandwidth.
+ *
+ * - If you send a packet to your own mac address, it will just be
+ * dropped, you won't get it on the receive side.
+ *
+ * - Multicast is implemented by sending the frame frame to every
+ * other partition. It is the responsibility of the receiving
+ * partition to filter the addresses desired.
+ *
+ * Tunable parameters:
+ *
+ * VETH_NUMBUFFERS: This compile time option defaults to 120. It
+ * controls how much memory Linux will allocate per remote partition
+ * it is communicating with. It can be thought of as the maximum
+ * number of packets outstanding to a remote partition at a time.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/slab.h>
+
+#include <asm/abs_addr.h>
+#include <asm/iseries/mf.h>
+#include <asm/uaccess.h>
+#include <asm/firmware.h>
+#include <asm/iseries/hv_lp_config.h>
+#include <asm/iseries/hv_types.h>
+#include <asm/iseries/hv_lp_event.h>
+#include <asm/iommu.h>
+#include <asm/vio.h>
+
+#undef DEBUG
+
+MODULE_AUTHOR("Kyle Lucke <klucke@us.ibm.com>");
+MODULE_DESCRIPTION("iSeries Virtual ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define VETH_EVENT_CAP (0)
+#define VETH_EVENT_FRAMES (1)
+#define VETH_EVENT_MONITOR (2)
+#define VETH_EVENT_FRAMES_ACK (3)
+
+#define VETH_MAX_ACKS_PER_MSG (20)
+#define VETH_MAX_FRAMES_PER_MSG (6)
+
+struct veth_frames_data {
+ u32 addr[VETH_MAX_FRAMES_PER_MSG];
+ u16 len[VETH_MAX_FRAMES_PER_MSG];
+ u32 eofmask;
+};
+#define VETH_EOF_SHIFT (32-VETH_MAX_FRAMES_PER_MSG)
+
+struct veth_frames_ack_data {
+ u16 token[VETH_MAX_ACKS_PER_MSG];
+};
+
+struct veth_cap_data {
+ u8 caps_version;
+ u8 rsvd1;
+ u16 num_buffers;
+ u16 ack_threshold;
+ u16 rsvd2;
+ u32 ack_timeout;
+ u32 rsvd3;
+ u64 rsvd4[3];
+};
+
+struct veth_lpevent {
+ struct HvLpEvent base_event;
+ union {
+ struct veth_cap_data caps_data;
+ struct veth_frames_data frames_data;
+ struct veth_frames_ack_data frames_ack_data;
+ } u;
+
+};
+
+#define DRV_NAME "iseries_veth"
+#define DRV_VERSION "2.0"
+
+#define VETH_NUMBUFFERS (120)
+#define VETH_ACKTIMEOUT (1000000) /* microseconds */
+#define VETH_MAX_MCAST (12)
+
+#define VETH_MAX_MTU (9000)
+
+#if VETH_NUMBUFFERS < 10
+#define ACK_THRESHOLD (1)
+#elif VETH_NUMBUFFERS < 20
+#define ACK_THRESHOLD (4)
+#elif VETH_NUMBUFFERS < 40
+#define ACK_THRESHOLD (10)
+#else
+#define ACK_THRESHOLD (20)
+#endif
+
+#define VETH_STATE_SHUTDOWN (0x0001)
+#define VETH_STATE_OPEN (0x0002)
+#define VETH_STATE_RESET (0x0004)
+#define VETH_STATE_SENTMON (0x0008)
+#define VETH_STATE_SENTCAPS (0x0010)
+#define VETH_STATE_GOTCAPACK (0x0020)
+#define VETH_STATE_GOTCAPS (0x0040)
+#define VETH_STATE_SENTCAPACK (0x0080)
+#define VETH_STATE_READY (0x0100)
+
+struct veth_msg {
+ struct veth_msg *next;
+ struct veth_frames_data data;
+ int token;
+ int in_use;
+ struct sk_buff *skb;
+ struct device *dev;
+};
+
+struct veth_lpar_connection {
+ HvLpIndex remote_lp;
+ struct delayed_work statemachine_wq;
+ struct veth_msg *msgs;
+ int num_events;
+ struct veth_cap_data local_caps;
+
+ struct kobject kobject;
+ struct timer_list ack_timer;
+
+ struct timer_list reset_timer;
+ unsigned int reset_timeout;
+ unsigned long last_contact;
+ int outstanding_tx;
+
+ spinlock_t lock;
+ unsigned long state;
+ HvLpInstanceId src_inst;
+ HvLpInstanceId dst_inst;
+ struct veth_lpevent cap_event, cap_ack_event;
+ u16 pending_acks[VETH_MAX_ACKS_PER_MSG];
+ u32 num_pending_acks;
+
+ int num_ack_events;
+ struct veth_cap_data remote_caps;
+ u32 ack_timeout;
+
+ struct veth_msg *msg_stack_head;
+};
+
+struct veth_port {
+ struct device *dev;
+ u64 mac_addr;
+ HvLpIndexMap lpar_map;
+
+ /* queue_lock protects the stopped_map and dev's queue. */
+ spinlock_t queue_lock;
+ HvLpIndexMap stopped_map;
+
+ /* mcast_gate protects promiscuous, num_mcast & mcast_addr. */
+ rwlock_t mcast_gate;
+ int promiscuous;
+ int num_mcast;
+ u64 mcast_addr[VETH_MAX_MCAST];
+
+ struct kobject kobject;
+};
+
+static HvLpIndex this_lp;
+static struct veth_lpar_connection *veth_cnx[HVMAXARCHITECTEDLPS]; /* = 0 */
+static struct net_device *veth_dev[HVMAXARCHITECTEDVIRTUALLANS]; /* = 0 */
+
+static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void veth_recycle_msg(struct veth_lpar_connection *, struct veth_msg *);
+static void veth_wake_queues(struct veth_lpar_connection *cnx);
+static void veth_stop_queues(struct veth_lpar_connection *cnx);
+static void veth_receive(struct veth_lpar_connection *, struct veth_lpevent *);
+static void veth_release_connection(struct kobject *kobject);
+static void veth_timed_ack(unsigned long ptr);
+static void veth_timed_reset(unsigned long ptr);
+
+/*
+ * Utility functions
+ */
+
+#define veth_info(fmt, args...) \
+ printk(KERN_INFO DRV_NAME ": " fmt, ## args)
+
+#define veth_error(fmt, args...) \
+ printk(KERN_ERR DRV_NAME ": Error: " fmt, ## args)
+
+#ifdef DEBUG
+#define veth_debug(fmt, args...) \
+ printk(KERN_DEBUG DRV_NAME ": " fmt, ## args)
+#else
+#define veth_debug(fmt, args...) do {} while (0)
+#endif
+
+/* You must hold the connection's lock when you call this function. */
+static inline void veth_stack_push(struct veth_lpar_connection *cnx,
+ struct veth_msg *msg)
+{
+ msg->next = cnx->msg_stack_head;
+ cnx->msg_stack_head = msg;
+}
+
+/* You must hold the connection's lock when you call this function. */
+static inline struct veth_msg *veth_stack_pop(struct veth_lpar_connection *cnx)
+{
+ struct veth_msg *msg;
+
+ msg = cnx->msg_stack_head;
+ if (msg)
+ cnx->msg_stack_head = cnx->msg_stack_head->next;
+
+ return msg;
+}
+
+/* You must hold the connection's lock when you call this function. */
+static inline int veth_stack_is_empty(struct veth_lpar_connection *cnx)
+{
+ return cnx->msg_stack_head == NULL;
+}
+
+static inline HvLpEvent_Rc
+veth_signalevent(struct veth_lpar_connection *cnx, u16 subtype,
+ HvLpEvent_AckInd ackind, HvLpEvent_AckType acktype,
+ u64 token,
+ u64 data1, u64 data2, u64 data3, u64 data4, u64 data5)
+{
+ return HvCallEvent_signalLpEventFast(cnx->remote_lp,
+ HvLpEvent_Type_VirtualLan,
+ subtype, ackind, acktype,
+ cnx->src_inst,
+ cnx->dst_inst,
+ token, data1, data2, data3,
+ data4, data5);
+}
+
+static inline HvLpEvent_Rc veth_signaldata(struct veth_lpar_connection *cnx,
+ u16 subtype, u64 token, void *data)
+{
+ u64 *p = (u64 *) data;
+
+ return veth_signalevent(cnx, subtype, HvLpEvent_AckInd_NoAck,
+ HvLpEvent_AckType_ImmediateAck,
+ token, p[0], p[1], p[2], p[3], p[4]);
+}
+
+struct veth_allocation {
+ struct completion c;
+ int num;
+};
+
+static void veth_complete_allocation(void *parm, int number)
+{
+ struct veth_allocation *vc = (struct veth_allocation *)parm;
+
+ vc->num = number;
+ complete(&vc->c);
+}
+
+static int veth_allocate_events(HvLpIndex rlp, int number)
+{
+ struct veth_allocation vc =
+ { COMPLETION_INITIALIZER_ONSTACK(vc.c), 0 };
+
+ mf_allocate_lp_events(rlp, HvLpEvent_Type_VirtualLan,
+ sizeof(struct veth_lpevent), number,
+ &veth_complete_allocation, &vc);
+ wait_for_completion(&vc.c);
+
+ return vc.num;
+}
+
+/*
+ * sysfs support
+ */
+
+struct veth_cnx_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct veth_lpar_connection *, char *buf);
+ ssize_t (*store)(struct veth_lpar_connection *, const char *buf);
+};
+
+static ssize_t veth_cnx_attribute_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct veth_cnx_attribute *cnx_attr;
+ struct veth_lpar_connection *cnx;
+
+ cnx_attr = container_of(attr, struct veth_cnx_attribute, attr);
+ cnx = container_of(kobj, struct veth_lpar_connection, kobject);
+
+ if (!cnx_attr->show)
+ return -EIO;
+
+ return cnx_attr->show(cnx, buf);
+}
+
+#define CUSTOM_CNX_ATTR(_name, _format, _expression) \
+static ssize_t _name##_show(struct veth_lpar_connection *cnx, char *buf)\
+{ \
+ return sprintf(buf, _format, _expression); \
+} \
+struct veth_cnx_attribute veth_cnx_attr_##_name = __ATTR_RO(_name)
+
+#define SIMPLE_CNX_ATTR(_name) \
+ CUSTOM_CNX_ATTR(_name, "%lu\n", (unsigned long)cnx->_name)
+
+SIMPLE_CNX_ATTR(outstanding_tx);
+SIMPLE_CNX_ATTR(remote_lp);
+SIMPLE_CNX_ATTR(num_events);
+SIMPLE_CNX_ATTR(src_inst);
+SIMPLE_CNX_ATTR(dst_inst);
+SIMPLE_CNX_ATTR(num_pending_acks);
+SIMPLE_CNX_ATTR(num_ack_events);
+CUSTOM_CNX_ATTR(ack_timeout, "%d\n", jiffies_to_msecs(cnx->ack_timeout));
+CUSTOM_CNX_ATTR(reset_timeout, "%d\n", jiffies_to_msecs(cnx->reset_timeout));
+CUSTOM_CNX_ATTR(state, "0x%.4lX\n", cnx->state);
+CUSTOM_CNX_ATTR(last_contact, "%d\n", cnx->last_contact ?
+ jiffies_to_msecs(jiffies - cnx->last_contact) : 0);
+
+#define GET_CNX_ATTR(_name) (&veth_cnx_attr_##_name.attr)
+
+static struct attribute *veth_cnx_default_attrs[] = {
+ GET_CNX_ATTR(outstanding_tx),
+ GET_CNX_ATTR(remote_lp),
+ GET_CNX_ATTR(num_events),
+ GET_CNX_ATTR(reset_timeout),
+ GET_CNX_ATTR(last_contact),
+ GET_CNX_ATTR(state),
+ GET_CNX_ATTR(src_inst),
+ GET_CNX_ATTR(dst_inst),
+ GET_CNX_ATTR(num_pending_acks),
+ GET_CNX_ATTR(num_ack_events),
+ GET_CNX_ATTR(ack_timeout),
+ NULL
+};
+
+static const struct sysfs_ops veth_cnx_sysfs_ops = {
+ .show = veth_cnx_attribute_show
+};
+
+static struct kobj_type veth_lpar_connection_ktype = {
+ .release = veth_release_connection,
+ .sysfs_ops = &veth_cnx_sysfs_ops,
+ .default_attrs = veth_cnx_default_attrs
+};
+
+struct veth_port_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct veth_port *, char *buf);
+ ssize_t (*store)(struct veth_port *, const char *buf);
+};
+
+static ssize_t veth_port_attribute_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct veth_port_attribute *port_attr;
+ struct veth_port *port;
+
+ port_attr = container_of(attr, struct veth_port_attribute, attr);
+ port = container_of(kobj, struct veth_port, kobject);
+
+ if (!port_attr->show)
+ return -EIO;
+
+ return port_attr->show(port, buf);
+}
+
+#define CUSTOM_PORT_ATTR(_name, _format, _expression) \
+static ssize_t _name##_show(struct veth_port *port, char *buf) \
+{ \
+ return sprintf(buf, _format, _expression); \
+} \
+struct veth_port_attribute veth_port_attr_##_name = __ATTR_RO(_name)
+
+#define SIMPLE_PORT_ATTR(_name) \
+ CUSTOM_PORT_ATTR(_name, "%lu\n", (unsigned long)port->_name)
+
+SIMPLE_PORT_ATTR(promiscuous);
+SIMPLE_PORT_ATTR(num_mcast);
+CUSTOM_PORT_ATTR(lpar_map, "0x%X\n", port->lpar_map);
+CUSTOM_PORT_ATTR(stopped_map, "0x%X\n", port->stopped_map);
+CUSTOM_PORT_ATTR(mac_addr, "0x%llX\n", port->mac_addr);
+
+#define GET_PORT_ATTR(_name) (&veth_port_attr_##_name.attr)
+static struct attribute *veth_port_default_attrs[] = {
+ GET_PORT_ATTR(mac_addr),
+ GET_PORT_ATTR(lpar_map),
+ GET_PORT_ATTR(stopped_map),
+ GET_PORT_ATTR(promiscuous),
+ GET_PORT_ATTR(num_mcast),
+ NULL
+};
+
+static const struct sysfs_ops veth_port_sysfs_ops = {
+ .show = veth_port_attribute_show
+};
+
+static struct kobj_type veth_port_ktype = {
+ .sysfs_ops = &veth_port_sysfs_ops,
+ .default_attrs = veth_port_default_attrs
+};
+
+/*
+ * LPAR connection code
+ */
+
+static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx)
+{
+ schedule_delayed_work(&cnx->statemachine_wq, 0);
+}
+
+static void veth_take_cap(struct veth_lpar_connection *cnx,
+ struct veth_lpevent *event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cnx->lock, flags);
+ /* Receiving caps may mean the other end has just come up, so
+ * we need to reload the instance ID of the far end */
+ cnx->dst_inst =
+ HvCallEvent_getTargetLpInstanceId(cnx->remote_lp,
+ HvLpEvent_Type_VirtualLan);
+
+ if (cnx->state & VETH_STATE_GOTCAPS) {
+ veth_error("Received a second capabilities from LPAR %d.\n",
+ cnx->remote_lp);
+ event->base_event.xRc = HvLpEvent_Rc_BufferNotAvailable;
+ HvCallEvent_ackLpEvent((struct HvLpEvent *) event);
+ } else {
+ memcpy(&cnx->cap_event, event, sizeof(cnx->cap_event));
+ cnx->state |= VETH_STATE_GOTCAPS;
+ veth_kick_statemachine(cnx);
+ }
+ spin_unlock_irqrestore(&cnx->lock, flags);
+}
+
+static void veth_take_cap_ack(struct veth_lpar_connection *cnx,
+ struct veth_lpevent *event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cnx->lock, flags);
+ if (cnx->state & VETH_STATE_GOTCAPACK) {
+ veth_error("Received a second capabilities ack from LPAR %d.\n",
+ cnx->remote_lp);
+ } else {
+ memcpy(&cnx->cap_ack_event, event,
+ sizeof(cnx->cap_ack_event));
+ cnx->state |= VETH_STATE_GOTCAPACK;
+ veth_kick_statemachine(cnx);
+ }
+ spin_unlock_irqrestore(&cnx->lock, flags);
+}
+
+static void veth_take_monitor_ack(struct veth_lpar_connection *cnx,
+ struct veth_lpevent *event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cnx->lock, flags);
+ veth_debug("cnx %d: lost connection.\n", cnx->remote_lp);
+
+ /* Avoid kicking the statemachine once we're shutdown.
+ * It's unnecessary and it could break veth_stop_connection(). */
+
+ if (! (cnx->state & VETH_STATE_SHUTDOWN)) {
+ cnx->state |= VETH_STATE_RESET;
+ veth_kick_statemachine(cnx);
+ }
+ spin_unlock_irqrestore(&cnx->lock, flags);
+}
+
+static void veth_handle_ack(struct veth_lpevent *event)
+{
+ HvLpIndex rlp = event->base_event.xTargetLp;
+ struct veth_lpar_connection *cnx = veth_cnx[rlp];
+
+ BUG_ON(! cnx);
+
+ switch (event->base_event.xSubtype) {
+ case VETH_EVENT_CAP:
+ veth_take_cap_ack(cnx, event);
+ break;
+ case VETH_EVENT_MONITOR:
+ veth_take_monitor_ack(cnx, event);
+ break;
+ default:
+ veth_error("Unknown ack type %d from LPAR %d.\n",
+ event->base_event.xSubtype, rlp);
+ }
+}
+
+static void veth_handle_int(struct veth_lpevent *event)
+{
+ HvLpIndex rlp = event->base_event.xSourceLp;
+ struct veth_lpar_connection *cnx = veth_cnx[rlp];
+ unsigned long flags;
+ int i, acked = 0;
+
+ BUG_ON(! cnx);
+
+ switch (event->base_event.xSubtype) {
+ case VETH_EVENT_CAP:
+ veth_take_cap(cnx, event);
+ break;
+ case VETH_EVENT_MONITOR:
+ /* do nothing... this'll hang out here til we're dead,
+ * and the hypervisor will return it for us. */
+ break;
+ case VETH_EVENT_FRAMES_ACK:
+ spin_lock_irqsave(&cnx->lock, flags);
+
+ for (i = 0; i < VETH_MAX_ACKS_PER_MSG; ++i) {
+ u16 msgnum = event->u.frames_ack_data.token[i];
+
+ if (msgnum < VETH_NUMBUFFERS) {
+ veth_recycle_msg(cnx, cnx->msgs + msgnum);
+ cnx->outstanding_tx--;
+ acked++;
+ }
+ }
+
+ if (acked > 0) {
+ cnx->last_contact = jiffies;
+ veth_wake_queues(cnx);
+ }
+
+ spin_unlock_irqrestore(&cnx->lock, flags);
+ break;
+ case VETH_EVENT_FRAMES:
+ veth_receive(cnx, event);
+ break;
+ default:
+ veth_error("Unknown interrupt type %d from LPAR %d.\n",
+ event->base_event.xSubtype, rlp);
+ }
+}
+
+static void veth_handle_event(struct HvLpEvent *event)
+{
+ struct veth_lpevent *veth_event = (struct veth_lpevent *)event;
+
+ if (hvlpevent_is_ack(event))
+ veth_handle_ack(veth_event);
+ else
+ veth_handle_int(veth_event);
+}
+
+static int veth_process_caps(struct veth_lpar_connection *cnx)
+{
+ struct veth_cap_data *remote_caps = &cnx->remote_caps;
+ int num_acks_needed;
+
+ /* Convert timer to jiffies */
+ cnx->ack_timeout = remote_caps->ack_timeout * HZ / 1000000;
+
+ if ( (remote_caps->num_buffers == 0) ||
+ (remote_caps->ack_threshold > VETH_MAX_ACKS_PER_MSG) ||
+ (remote_caps->ack_threshold == 0) ||
+ (cnx->ack_timeout == 0) ) {
+ veth_error("Received incompatible capabilities from LPAR %d.\n",
+ cnx->remote_lp);
+ return HvLpEvent_Rc_InvalidSubtypeData;
+ }
+
+ num_acks_needed = (remote_caps->num_buffers
+ / remote_caps->ack_threshold) + 1;
+
+ /* FIXME: locking on num_ack_events? */
+ if (cnx->num_ack_events < num_acks_needed) {
+ int num;
+
+ num = veth_allocate_events(cnx->remote_lp,
+ num_acks_needed-cnx->num_ack_events);
+ if (num > 0)
+ cnx->num_ack_events += num;
+
+ if (cnx->num_ack_events < num_acks_needed) {
+ veth_error("Couldn't allocate enough ack events "
+ "for LPAR %d.\n", cnx->remote_lp);
+
+ return HvLpEvent_Rc_BufferNotAvailable;
+ }
+ }
+
+
+ return HvLpEvent_Rc_Good;
+}
+
+/* FIXME: The gotos here are a bit dubious */
+static void veth_statemachine(struct work_struct *work)
+{
+ struct veth_lpar_connection *cnx =
+ container_of(work, struct veth_lpar_connection,
+ statemachine_wq.work);
+ int rlp = cnx->remote_lp;
+ int rc;
+
+ spin_lock_irq(&cnx->lock);
+
+ restart:
+ if (cnx->state & VETH_STATE_RESET) {
+ if (cnx->state & VETH_STATE_OPEN)
+ HvCallEvent_closeLpEventPath(cnx->remote_lp,
+ HvLpEvent_Type_VirtualLan);
+
+ /*
+ * Reset ack data. This prevents the ack_timer actually
+ * doing anything, even if it runs one more time when
+ * we drop the lock below.
+ */
+ memset(&cnx->pending_acks, 0xff, sizeof (cnx->pending_acks));
+ cnx->num_pending_acks = 0;
+
+ cnx->state &= ~(VETH_STATE_RESET | VETH_STATE_SENTMON
+ | VETH_STATE_OPEN | VETH_STATE_SENTCAPS
+ | VETH_STATE_GOTCAPACK | VETH_STATE_GOTCAPS
+ | VETH_STATE_SENTCAPACK | VETH_STATE_READY);
+
+ /* Clean up any leftover messages */
+ if (cnx->msgs) {
+ int i;
+ for (i = 0; i < VETH_NUMBUFFERS; ++i)
+ veth_recycle_msg(cnx, cnx->msgs + i);
+ }
+
+ cnx->outstanding_tx = 0;
+ veth_wake_queues(cnx);
+
+ /* Drop the lock so we can do stuff that might sleep or
+ * take other locks. */
+ spin_unlock_irq(&cnx->lock);
+
+ del_timer_sync(&cnx->ack_timer);
+ del_timer_sync(&cnx->reset_timer);
+
+ spin_lock_irq(&cnx->lock);
+
+ if (cnx->state & VETH_STATE_RESET)
+ goto restart;
+
+ /* Hack, wait for the other end to reset itself. */
+ if (! (cnx->state & VETH_STATE_SHUTDOWN)) {
+ schedule_delayed_work(&cnx->statemachine_wq, 5 * HZ);
+ goto out;
+ }
+ }
+
+ if (cnx->state & VETH_STATE_SHUTDOWN)
+ /* It's all over, do nothing */
+ goto out;
+
+ if ( !(cnx->state & VETH_STATE_OPEN) ) {
+ if (! cnx->msgs || (cnx->num_events < (2 + VETH_NUMBUFFERS)) )
+ goto cant_cope;
+
+ HvCallEvent_openLpEventPath(rlp, HvLpEvent_Type_VirtualLan);
+ cnx->src_inst =
+ HvCallEvent_getSourceLpInstanceId(rlp,
+ HvLpEvent_Type_VirtualLan);
+ cnx->dst_inst =
+ HvCallEvent_getTargetLpInstanceId(rlp,
+ HvLpEvent_Type_VirtualLan);
+ cnx->state |= VETH_STATE_OPEN;
+ }
+
+ if ( (cnx->state & VETH_STATE_OPEN) &&
+ !(cnx->state & VETH_STATE_SENTMON) ) {
+ rc = veth_signalevent(cnx, VETH_EVENT_MONITOR,
+ HvLpEvent_AckInd_DoAck,
+ HvLpEvent_AckType_DeferredAck,
+ 0, 0, 0, 0, 0, 0);
+
+ if (rc == HvLpEvent_Rc_Good) {
+ cnx->state |= VETH_STATE_SENTMON;
+ } else {
+ if ( (rc != HvLpEvent_Rc_PartitionDead) &&
+ (rc != HvLpEvent_Rc_PathClosed) )
+ veth_error("Error sending monitor to LPAR %d, "
+ "rc = %d\n", rlp, rc);
+
+ /* Oh well, hope we get a cap from the other
+ * end and do better when that kicks us */
+ goto out;
+ }
+ }
+
+ if ( (cnx->state & VETH_STATE_OPEN) &&
+ !(cnx->state & VETH_STATE_SENTCAPS)) {
+ u64 *rawcap = (u64 *)&cnx->local_caps;
+
+ rc = veth_signalevent(cnx, VETH_EVENT_CAP,
+ HvLpEvent_AckInd_DoAck,
+ HvLpEvent_AckType_ImmediateAck,
+ 0, rawcap[0], rawcap[1], rawcap[2],
+ rawcap[3], rawcap[4]);
+
+ if (rc == HvLpEvent_Rc_Good) {
+ cnx->state |= VETH_STATE_SENTCAPS;
+ } else {
+ if ( (rc != HvLpEvent_Rc_PartitionDead) &&
+ (rc != HvLpEvent_Rc_PathClosed) )
+ veth_error("Error sending caps to LPAR %d, "
+ "rc = %d\n", rlp, rc);
+
+ /* Oh well, hope we get a cap from the other
+ * end and do better when that kicks us */
+ goto out;
+ }
+ }
+
+ if ((cnx->state & VETH_STATE_GOTCAPS) &&
+ !(cnx->state & VETH_STATE_SENTCAPACK)) {
+ struct veth_cap_data *remote_caps = &cnx->remote_caps;
+
+ memcpy(remote_caps, &cnx->cap_event.u.caps_data,
+ sizeof(*remote_caps));
+
+ spin_unlock_irq(&cnx->lock);
+ rc = veth_process_caps(cnx);
+ spin_lock_irq(&cnx->lock);
+
+ /* We dropped the lock, so recheck for anything which
+ * might mess us up */
+ if (cnx->state & (VETH_STATE_RESET|VETH_STATE_SHUTDOWN))
+ goto restart;
+
+ cnx->cap_event.base_event.xRc = rc;
+ HvCallEvent_ackLpEvent((struct HvLpEvent *)&cnx->cap_event);
+ if (rc == HvLpEvent_Rc_Good)
+ cnx->state |= VETH_STATE_SENTCAPACK;
+ else
+ goto cant_cope;
+ }
+
+ if ((cnx->state & VETH_STATE_GOTCAPACK) &&
+ (cnx->state & VETH_STATE_GOTCAPS) &&
+ !(cnx->state & VETH_STATE_READY)) {
+ if (cnx->cap_ack_event.base_event.xRc == HvLpEvent_Rc_Good) {
+ /* Start the ACK timer */
+ cnx->ack_timer.expires = jiffies + cnx->ack_timeout;
+ add_timer(&cnx->ack_timer);
+ cnx->state |= VETH_STATE_READY;
+ } else {
+ veth_error("Caps rejected by LPAR %d, rc = %d\n",
+ rlp, cnx->cap_ack_event.base_event.xRc);
+ goto cant_cope;
+ }
+ }
+
+ out:
+ spin_unlock_irq(&cnx->lock);
+ return;
+
+ cant_cope:
+ /* FIXME: we get here if something happens we really can't
+ * cope with. The link will never work once we get here, and
+ * all we can do is not lock the rest of the system up */
+ veth_error("Unrecoverable error on connection to LPAR %d, shutting down"
+ " (state = 0x%04lx)\n", rlp, cnx->state);
+ cnx->state |= VETH_STATE_SHUTDOWN;
+ spin_unlock_irq(&cnx->lock);
+}
+
+static int veth_init_connection(u8 rlp)
+{
+ struct veth_lpar_connection *cnx;
+ struct veth_msg *msgs;
+ int i;
+
+ if ( (rlp == this_lp) ||
+ ! HvLpConfig_doLpsCommunicateOnVirtualLan(this_lp, rlp) )
+ return 0;
+
+ cnx = kzalloc(sizeof(*cnx), GFP_KERNEL);
+ if (! cnx)
+ return -ENOMEM;
+
+ cnx->remote_lp = rlp;
+ spin_lock_init(&cnx->lock);
+ INIT_DELAYED_WORK(&cnx->statemachine_wq, veth_statemachine);
+
+ init_timer(&cnx->ack_timer);
+ cnx->ack_timer.function = veth_timed_ack;
+ cnx->ack_timer.data = (unsigned long) cnx;
+
+ init_timer(&cnx->reset_timer);
+ cnx->reset_timer.function = veth_timed_reset;
+ cnx->reset_timer.data = (unsigned long) cnx;
+ cnx->reset_timeout = 5 * HZ * (VETH_ACKTIMEOUT / 1000000);
+
+ memset(&cnx->pending_acks, 0xff, sizeof (cnx->pending_acks));
+
+ veth_cnx[rlp] = cnx;
+
+ /* This gets us 1 reference, which is held on behalf of the driver
+ * infrastructure. It's released at module unload. */
+ kobject_init(&cnx->kobject, &veth_lpar_connection_ktype);
+
+ msgs = kcalloc(VETH_NUMBUFFERS, sizeof(struct veth_msg), GFP_KERNEL);
+ if (! msgs) {
+ veth_error("Can't allocate buffers for LPAR %d.\n", rlp);
+ return -ENOMEM;
+ }
+
+ cnx->msgs = msgs;
+
+ for (i = 0; i < VETH_NUMBUFFERS; i++) {
+ msgs[i].token = i;
+ veth_stack_push(cnx, msgs + i);
+ }
+
+ cnx->num_events = veth_allocate_events(rlp, 2 + VETH_NUMBUFFERS);
+
+ if (cnx->num_events < (2 + VETH_NUMBUFFERS)) {
+ veth_error("Can't allocate enough events for LPAR %d.\n", rlp);
+ return -ENOMEM;
+ }
+
+ cnx->local_caps.num_buffers = VETH_NUMBUFFERS;
+ cnx->local_caps.ack_threshold = ACK_THRESHOLD;
+ cnx->local_caps.ack_timeout = VETH_ACKTIMEOUT;
+
+ return 0;
+}
+
+static void veth_stop_connection(struct veth_lpar_connection *cnx)
+{
+ if (!cnx)
+ return;
+
+ spin_lock_irq(&cnx->lock);
+ cnx->state |= VETH_STATE_RESET | VETH_STATE_SHUTDOWN;
+ veth_kick_statemachine(cnx);
+ spin_unlock_irq(&cnx->lock);
+
+ /* ensure the statemachine runs now and waits for its completion */
+ flush_delayed_work_sync(&cnx->statemachine_wq);
+}
+
+static void veth_destroy_connection(struct veth_lpar_connection *cnx)
+{
+ if (!cnx)
+ return;
+
+ if (cnx->num_events > 0)
+ mf_deallocate_lp_events(cnx->remote_lp,
+ HvLpEvent_Type_VirtualLan,
+ cnx->num_events,
+ NULL, NULL);
+ if (cnx->num_ack_events > 0)
+ mf_deallocate_lp_events(cnx->remote_lp,
+ HvLpEvent_Type_VirtualLan,
+ cnx->num_ack_events,
+ NULL, NULL);
+
+ kfree(cnx->msgs);
+ veth_cnx[cnx->remote_lp] = NULL;
+ kfree(cnx);
+}
+
+static void veth_release_connection(struct kobject *kobj)
+{
+ struct veth_lpar_connection *cnx;
+ cnx = container_of(kobj, struct veth_lpar_connection, kobject);
+ veth_stop_connection(cnx);
+ veth_destroy_connection(cnx);
+}
+
+/*
+ * net_device code
+ */
+
+static int veth_open(struct net_device *dev)
+{
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int veth_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static int veth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > VETH_MAX_MTU))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static void veth_set_multicast_list(struct net_device *dev)
+{
+ struct veth_port *port = netdev_priv(dev);
+ unsigned long flags;
+
+ write_lock_irqsave(&port->mcast_gate, flags);
+
+ if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > VETH_MAX_MCAST)) {
+ port->promiscuous = 1;
+ } else {
+ struct netdev_hw_addr *ha;
+
+ port->promiscuous = 0;
+
+ /* Update table */
+ port->num_mcast = 0;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ u8 *addr = ha->addr;
+ u64 xaddr = 0;
+
+ memcpy(&xaddr, addr, ETH_ALEN);
+ port->mcast_addr[port->num_mcast] = xaddr;
+ port->num_mcast++;
+ }
+ }
+
+ write_unlock_irqrestore(&port->mcast_gate, flags);
+}
+
+static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, DRV_NAME, sizeof(info->driver) - 1);
+ info->driver[sizeof(info->driver) - 1] = '\0';
+ strncpy(info->version, DRV_VERSION, sizeof(info->version) - 1);
+ info->version[sizeof(info->version) - 1] = '\0';
+}
+
+static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ ecmd->supported = (SUPPORTED_1000baseT_Full
+ | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
+ ecmd->advertising = (SUPPORTED_1000baseT_Full
+ | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_INTERNAL;
+ ecmd->phy_address = 0;
+ ecmd->speed = SPEED_1000;
+ ecmd->duplex = DUPLEX_FULL;
+ ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->maxtxpkt = 120;
+ ecmd->maxrxpkt = 120;
+ return 0;
+}
+
+static const struct ethtool_ops ops = {
+ .get_drvinfo = veth_get_drvinfo,
+ .get_settings = veth_get_settings,
+ .get_link = ethtool_op_get_link,
+};
+
+static const struct net_device_ops veth_netdev_ops = {
+ .ndo_open = veth_open,
+ .ndo_stop = veth_close,
+ .ndo_start_xmit = veth_start_xmit,
+ .ndo_change_mtu = veth_change_mtu,
+ .ndo_set_rx_mode = veth_set_multicast_list,
+ .ndo_set_mac_address = NULL,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static struct net_device *veth_probe_one(int vlan,
+ struct vio_dev *vio_dev)
+{
+ struct net_device *dev;
+ struct veth_port *port;
+ struct device *vdev = &vio_dev->dev;
+ int i, rc;
+ const unsigned char *mac_addr;
+
+ mac_addr = vio_get_attribute(vio_dev, "local-mac-address", NULL);
+ if (mac_addr == NULL)
+ mac_addr = vio_get_attribute(vio_dev, "mac-address", NULL);
+ if (mac_addr == NULL) {
+ veth_error("Unable to fetch MAC address from device tree.\n");
+ return NULL;
+ }
+
+ dev = alloc_etherdev(sizeof (struct veth_port));
+ if (! dev) {
+ veth_error("Unable to allocate net_device structure!\n");
+ return NULL;
+ }
+
+ port = netdev_priv(dev);
+
+ spin_lock_init(&port->queue_lock);
+ rwlock_init(&port->mcast_gate);
+ port->stopped_map = 0;
+
+ for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
+ HvLpVirtualLanIndexMap map;
+
+ if (i == this_lp)
+ continue;
+ map = HvLpConfig_getVirtualLanIndexMapForLp(i);
+ if (map & (0x8000 >> vlan))
+ port->lpar_map |= (1 << i);
+ }
+ port->dev = vdev;
+
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+
+ dev->mtu = VETH_MAX_MTU;
+
+ memcpy(&port->mac_addr, mac_addr, ETH_ALEN);
+
+ dev->netdev_ops = &veth_netdev_ops;
+ SET_ETHTOOL_OPS(dev, &ops);
+
+ SET_NETDEV_DEV(dev, vdev);
+
+ rc = register_netdev(dev);
+ if (rc != 0) {
+ veth_error("Failed registering net device for vlan%d.\n", vlan);
+ free_netdev(dev);
+ return NULL;
+ }
+
+ kobject_init(&port->kobject, &veth_port_ktype);
+ if (0 != kobject_add(&port->kobject, &dev->dev.kobj, "veth_port"))
+ veth_error("Failed adding port for %s to sysfs.\n", dev->name);
+
+ veth_info("%s attached to iSeries vlan %d (LPAR map = 0x%.4X)\n",
+ dev->name, vlan, port->lpar_map);
+
+ return dev;
+}
+
+/*
+ * Tx path
+ */
+
+static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp,
+ struct net_device *dev)
+{
+ struct veth_lpar_connection *cnx = veth_cnx[rlp];
+ struct veth_port *port = netdev_priv(dev);
+ HvLpEvent_Rc rc;
+ struct veth_msg *msg = NULL;
+ unsigned long flags;
+
+ if (! cnx)
+ return 0;
+
+ spin_lock_irqsave(&cnx->lock, flags);
+
+ if (! (cnx->state & VETH_STATE_READY))
+ goto no_error;
+
+ if ((skb->len - ETH_HLEN) > VETH_MAX_MTU)
+ goto drop;
+
+ msg = veth_stack_pop(cnx);
+ if (! msg)
+ goto drop;
+
+ msg->in_use = 1;
+ msg->skb = skb_get(skb);
+
+ msg->data.addr[0] = dma_map_single(port->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(port->dev, msg->data.addr[0]))
+ goto recycle_and_drop;
+
+ msg->dev = port->dev;
+ msg->data.len[0] = skb->len;
+ msg->data.eofmask = 1 << VETH_EOF_SHIFT;
+
+ rc = veth_signaldata(cnx, VETH_EVENT_FRAMES, msg->token, &msg->data);
+
+ if (rc != HvLpEvent_Rc_Good)
+ goto recycle_and_drop;
+
+ /* If the timer's not already running, start it now. */
+ if (0 == cnx->outstanding_tx)
+ mod_timer(&cnx->reset_timer, jiffies + cnx->reset_timeout);
+
+ cnx->last_contact = jiffies;
+ cnx->outstanding_tx++;
+
+ if (veth_stack_is_empty(cnx))
+ veth_stop_queues(cnx);
+
+ no_error:
+ spin_unlock_irqrestore(&cnx->lock, flags);
+ return 0;
+
+ recycle_and_drop:
+ veth_recycle_msg(cnx, msg);
+ drop:
+ spin_unlock_irqrestore(&cnx->lock, flags);
+ return 1;
+}
+
+static void veth_transmit_to_many(struct sk_buff *skb,
+ HvLpIndexMap lpmask,
+ struct net_device *dev)
+{
+ int i, success, error;
+
+ success = error = 0;
+
+ for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
+ if ((lpmask & (1 << i)) == 0)
+ continue;
+
+ if (veth_transmit_to_one(skb, i, dev))
+ error = 1;
+ else
+ success = 1;
+ }
+
+ if (error)
+ dev->stats.tx_errors++;
+
+ if (success) {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ }
+}
+
+static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned char *frame = skb->data;
+ struct veth_port *port = netdev_priv(dev);
+ HvLpIndexMap lpmask;
+
+ if (is_unicast_ether_addr(frame)) {
+ /* unicast packet */
+ HvLpIndex rlp = frame[5];
+
+ if ( ! ((1 << rlp) & port->lpar_map) ) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ lpmask = 1 << rlp;
+ } else {
+ lpmask = port->lpar_map;
+ }
+
+ veth_transmit_to_many(skb, lpmask, dev);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/* You must hold the connection's lock when you call this function. */
+static void veth_recycle_msg(struct veth_lpar_connection *cnx,
+ struct veth_msg *msg)
+{
+ u32 dma_address, dma_length;
+
+ if (msg->in_use) {
+ msg->in_use = 0;
+ dma_address = msg->data.addr[0];
+ dma_length = msg->data.len[0];
+
+ if (!dma_mapping_error(msg->dev, dma_address))
+ dma_unmap_single(msg->dev, dma_address, dma_length,
+ DMA_TO_DEVICE);
+
+ if (msg->skb) {
+ dev_kfree_skb_any(msg->skb);
+ msg->skb = NULL;
+ }
+
+ memset(&msg->data, 0, sizeof(msg->data));
+ veth_stack_push(cnx, msg);
+ } else if (cnx->state & VETH_STATE_OPEN) {
+ veth_error("Non-pending frame (# %d) acked by LPAR %d.\n",
+ cnx->remote_lp, msg->token);
+ }
+}
+
+static void veth_wake_queues(struct veth_lpar_connection *cnx)
+{
+ int i;
+
+ for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) {
+ struct net_device *dev = veth_dev[i];
+ struct veth_port *port;
+ unsigned long flags;
+
+ if (! dev)
+ continue;
+
+ port = netdev_priv(dev);
+
+ if (! (port->lpar_map & (1<<cnx->remote_lp)))
+ continue;
+
+ spin_lock_irqsave(&port->queue_lock, flags);
+
+ port->stopped_map &= ~(1 << cnx->remote_lp);
+
+ if (0 == port->stopped_map && netif_queue_stopped(dev)) {
+ veth_debug("cnx %d: woke queue for %s.\n",
+ cnx->remote_lp, dev->name);
+ netif_wake_queue(dev);
+ }
+ spin_unlock_irqrestore(&port->queue_lock, flags);
+ }
+}
+
+static void veth_stop_queues(struct veth_lpar_connection *cnx)
+{
+ int i;
+
+ for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) {
+ struct net_device *dev = veth_dev[i];
+ struct veth_port *port;
+
+ if (! dev)
+ continue;
+
+ port = netdev_priv(dev);
+
+ /* If this cnx is not on the vlan for this port, continue */
+ if (! (port->lpar_map & (1 << cnx->remote_lp)))
+ continue;
+
+ spin_lock(&port->queue_lock);
+
+ netif_stop_queue(dev);
+ port->stopped_map |= (1 << cnx->remote_lp);
+
+ veth_debug("cnx %d: stopped queue for %s, map = 0x%x.\n",
+ cnx->remote_lp, dev->name, port->stopped_map);
+
+ spin_unlock(&port->queue_lock);
+ }
+}
+
+static void veth_timed_reset(unsigned long ptr)
+{
+ struct veth_lpar_connection *cnx = (struct veth_lpar_connection *)ptr;
+ unsigned long trigger_time, flags;
+
+ /* FIXME is it possible this fires after veth_stop_connection()?
+ * That would reschedule the statemachine for 5 seconds and probably
+ * execute it after the module's been unloaded. Hmm. */
+
+ spin_lock_irqsave(&cnx->lock, flags);
+
+ if (cnx->outstanding_tx > 0) {
+ trigger_time = cnx->last_contact + cnx->reset_timeout;
+
+ if (trigger_time < jiffies) {
+ cnx->state |= VETH_STATE_RESET;
+ veth_kick_statemachine(cnx);
+ veth_error("%d packets not acked by LPAR %d within %d "
+ "seconds, resetting.\n",
+ cnx->outstanding_tx, cnx->remote_lp,
+ cnx->reset_timeout / HZ);
+ } else {
+ /* Reschedule the timer */
+ trigger_time = jiffies + cnx->reset_timeout;
+ mod_timer(&cnx->reset_timer, trigger_time);
+ }
+ }
+
+ spin_unlock_irqrestore(&cnx->lock, flags);
+}
+
+/*
+ * Rx path
+ */
+
+static inline int veth_frame_wanted(struct veth_port *port, u64 mac_addr)
+{
+ int wanted = 0;
+ int i;
+ unsigned long flags;
+
+ if ( (mac_addr == port->mac_addr) || (mac_addr == 0xffffffffffff0000) )
+ return 1;
+
+ read_lock_irqsave(&port->mcast_gate, flags);
+
+ if (port->promiscuous) {
+ wanted = 1;
+ goto out;
+ }
+
+ for (i = 0; i < port->num_mcast; ++i) {
+ if (port->mcast_addr[i] == mac_addr) {
+ wanted = 1;
+ break;
+ }
+ }
+
+ out:
+ read_unlock_irqrestore(&port->mcast_gate, flags);
+
+ return wanted;
+}
+
+struct dma_chunk {
+ u64 addr;
+ u64 size;
+};
+
+#define VETH_MAX_PAGES_PER_FRAME ( (VETH_MAX_MTU+PAGE_SIZE-2)/PAGE_SIZE + 1 )
+
+static inline void veth_build_dma_list(struct dma_chunk *list,
+ unsigned char *p, unsigned long length)
+{
+ unsigned long done;
+ int i = 1;
+
+ /* FIXME: skbs are contiguous in real addresses. Do we
+ * really need to break it into PAGE_SIZE chunks, or can we do
+ * it just at the granularity of iSeries real->absolute
+ * mapping? Indeed, given the way the allocator works, can we
+ * count on them being absolutely contiguous? */
+ list[0].addr = iseries_hv_addr(p);
+ list[0].size = min(length,
+ PAGE_SIZE - ((unsigned long)p & ~PAGE_MASK));
+
+ done = list[0].size;
+ while (done < length) {
+ list[i].addr = iseries_hv_addr(p + done);
+ list[i].size = min(length-done, PAGE_SIZE);
+ done += list[i].size;
+ i++;
+ }
+}
+
+static void veth_flush_acks(struct veth_lpar_connection *cnx)
+{
+ HvLpEvent_Rc rc;
+
+ rc = veth_signaldata(cnx, VETH_EVENT_FRAMES_ACK,
+ 0, &cnx->pending_acks);
+
+ if (rc != HvLpEvent_Rc_Good)
+ veth_error("Failed acking frames from LPAR %d, rc = %d\n",
+ cnx->remote_lp, (int)rc);
+
+ cnx->num_pending_acks = 0;
+ memset(&cnx->pending_acks, 0xff, sizeof(cnx->pending_acks));
+}
+
+static void veth_receive(struct veth_lpar_connection *cnx,
+ struct veth_lpevent *event)
+{
+ struct veth_frames_data *senddata = &event->u.frames_data;
+ int startchunk = 0;
+ int nchunks;
+ unsigned long flags;
+ HvLpDma_Rc rc;
+
+ do {
+ u16 length = 0;
+ struct sk_buff *skb;
+ struct dma_chunk local_list[VETH_MAX_PAGES_PER_FRAME];
+ struct dma_chunk remote_list[VETH_MAX_FRAMES_PER_MSG];
+ u64 dest;
+ HvLpVirtualLanIndex vlan;
+ struct net_device *dev;
+ struct veth_port *port;
+
+ /* FIXME: do we need this? */
+ memset(local_list, 0, sizeof(local_list));
+ memset(remote_list, 0, sizeof(VETH_MAX_FRAMES_PER_MSG));
+
+ /* a 0 address marks the end of the valid entries */
+ if (senddata->addr[startchunk] == 0)
+ break;
+
+ /* make sure that we have at least 1 EOF entry in the
+ * remaining entries */
+ if (! (senddata->eofmask >> (startchunk + VETH_EOF_SHIFT))) {
+ veth_error("Missing EOF fragment in event "
+ "eofmask = 0x%x startchunk = %d\n",
+ (unsigned)senddata->eofmask,
+ startchunk);
+ break;
+ }
+
+ /* build list of chunks in this frame */
+ nchunks = 0;
+ do {
+ remote_list[nchunks].addr =
+ (u64) senddata->addr[startchunk+nchunks] << 32;
+ remote_list[nchunks].size =
+ senddata->len[startchunk+nchunks];
+ length += remote_list[nchunks].size;
+ } while (! (senddata->eofmask &
+ (1 << (VETH_EOF_SHIFT + startchunk + nchunks++))));
+
+ /* length == total length of all chunks */
+ /* nchunks == # of chunks in this frame */
+
+ if ((length - ETH_HLEN) > VETH_MAX_MTU) {
+ veth_error("Received oversize frame from LPAR %d "
+ "(length = %d)\n",
+ cnx->remote_lp, length);
+ continue;
+ }
+
+ skb = alloc_skb(length, GFP_ATOMIC);
+ if (!skb)
+ continue;
+
+ veth_build_dma_list(local_list, skb->data, length);
+
+ rc = HvCallEvent_dmaBufList(HvLpEvent_Type_VirtualLan,
+ event->base_event.xSourceLp,
+ HvLpDma_Direction_RemoteToLocal,
+ cnx->src_inst,
+ cnx->dst_inst,
+ HvLpDma_AddressType_RealAddress,
+ HvLpDma_AddressType_TceIndex,
+ iseries_hv_addr(&local_list),
+ iseries_hv_addr(&remote_list),
+ length);
+ if (rc != HvLpDma_Rc_Good) {
+ dev_kfree_skb_irq(skb);
+ continue;
+ }
+
+ vlan = skb->data[9];
+ dev = veth_dev[vlan];
+ if (! dev) {
+ /*
+ * Some earlier versions of the driver sent
+ * broadcasts down all connections, even to lpars
+ * that weren't on the relevant vlan. So ignore
+ * packets belonging to a vlan we're not on.
+ * We can also be here if we receive packets while
+ * the driver is going down, because then dev is NULL.
+ */
+ dev_kfree_skb_irq(skb);
+ continue;
+ }
+
+ port = netdev_priv(dev);
+ dest = *((u64 *) skb->data) & 0xFFFFFFFFFFFF0000;
+
+ if ((vlan > HVMAXARCHITECTEDVIRTUALLANS) || !port) {
+ dev_kfree_skb_irq(skb);
+ continue;
+ }
+ if (! veth_frame_wanted(port, dest)) {
+ dev_kfree_skb_irq(skb);
+ continue;
+ }
+
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, dev);
+ skb_checksum_none_assert(skb);
+ netif_rx(skb); /* send it up */
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += length;
+ } while (startchunk += nchunks, startchunk < VETH_MAX_FRAMES_PER_MSG);
+
+ /* Ack it */
+ spin_lock_irqsave(&cnx->lock, flags);
+ BUG_ON(cnx->num_pending_acks > VETH_MAX_ACKS_PER_MSG);
+
+ cnx->pending_acks[cnx->num_pending_acks++] =
+ event->base_event.xCorrelationToken;
+
+ if ( (cnx->num_pending_acks >= cnx->remote_caps.ack_threshold) ||
+ (cnx->num_pending_acks >= VETH_MAX_ACKS_PER_MSG) )
+ veth_flush_acks(cnx);
+
+ spin_unlock_irqrestore(&cnx->lock, flags);
+}
+
+static void veth_timed_ack(unsigned long ptr)
+{
+ struct veth_lpar_connection *cnx = (struct veth_lpar_connection *) ptr;
+ unsigned long flags;
+
+ /* Ack all the events */
+ spin_lock_irqsave(&cnx->lock, flags);
+ if (cnx->num_pending_acks > 0)
+ veth_flush_acks(cnx);
+
+ /* Reschedule the timer */
+ cnx->ack_timer.expires = jiffies + cnx->ack_timeout;
+ add_timer(&cnx->ack_timer);
+ spin_unlock_irqrestore(&cnx->lock, flags);
+}
+
+static int veth_remove(struct vio_dev *vdev)
+{
+ struct veth_lpar_connection *cnx;
+ struct net_device *dev;
+ struct veth_port *port;
+ int i;
+
+ dev = veth_dev[vdev->unit_address];
+
+ if (! dev)
+ return 0;
+
+ port = netdev_priv(dev);
+
+ for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
+ cnx = veth_cnx[i];
+
+ if (cnx && (port->lpar_map & (1 << i))) {
+ /* Drop our reference to connections on our VLAN */
+ kobject_put(&cnx->kobject);
+ }
+ }
+
+ veth_dev[vdev->unit_address] = NULL;
+ kobject_del(&port->kobject);
+ kobject_put(&port->kobject);
+ unregister_netdev(dev);
+ free_netdev(dev);
+
+ return 0;
+}
+
+static int veth_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+{
+ int i = vdev->unit_address;
+ struct net_device *dev;
+ struct veth_port *port;
+
+ dev = veth_probe_one(i, vdev);
+ if (dev == NULL) {
+ veth_remove(vdev);
+ return 1;
+ }
+ veth_dev[i] = dev;
+
+ port = netdev_priv(dev);
+
+ /* Start the state machine on each connection on this vlan. If we're
+ * the first dev to do so this will commence link negotiation */
+ for (i = 0; i < HVMAXARCHITECTEDLPS; i++) {
+ struct veth_lpar_connection *cnx;
+
+ if (! (port->lpar_map & (1 << i)))
+ continue;
+
+ cnx = veth_cnx[i];
+ if (!cnx)
+ continue;
+
+ kobject_get(&cnx->kobject);
+ veth_kick_statemachine(cnx);
+ }
+
+ return 0;
+}
+
+/**
+ * veth_device_table: Used by vio.c to match devices that we
+ * support.
+ */
+static struct vio_device_id veth_device_table[] __devinitdata = {
+ { "network", "IBM,iSeries-l-lan" },
+ { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, veth_device_table);
+
+static struct vio_driver veth_driver = {
+ .id_table = veth_device_table,
+ .probe = veth_probe,
+ .remove = veth_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ }
+};
+
+/*
+ * Module initialization/cleanup
+ */
+
+static void __exit veth_module_cleanup(void)
+{
+ int i;
+ struct veth_lpar_connection *cnx;
+
+ /* Disconnect our "irq" to stop events coming from the Hypervisor. */
+ HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan);
+
+ for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
+ cnx = veth_cnx[i];
+
+ if (!cnx)
+ continue;
+
+ /* Cancel work queued from Hypervisor callbacks */
+ cancel_delayed_work_sync(&cnx->statemachine_wq);
+ /* Remove the connection from sysfs */
+ kobject_del(&cnx->kobject);
+ /* Drop the driver's reference to the connection */
+ kobject_put(&cnx->kobject);
+ }
+
+ /* Unregister the driver, which will close all the netdevs and stop
+ * the connections when they're no longer referenced. */
+ vio_unregister_driver(&veth_driver);
+}
+module_exit(veth_module_cleanup);
+
+static int __init veth_module_init(void)
+{
+ int i;
+ int rc;
+
+ if (!firmware_has_feature(FW_FEATURE_ISERIES))
+ return -ENODEV;
+
+ this_lp = HvLpConfig_getLpIndex_outline();
+
+ for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
+ rc = veth_init_connection(i);
+ if (rc != 0)
+ goto error;
+ }
+
+ HvLpEvent_registerHandler(HvLpEvent_Type_VirtualLan,
+ &veth_handle_event);
+
+ rc = vio_register_driver(&veth_driver);
+ if (rc != 0)
+ goto error;
+
+ for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
+ struct kobject *kobj;
+
+ if (!veth_cnx[i])
+ continue;
+
+ kobj = &veth_cnx[i]->kobject;
+ /* If the add failes, complain but otherwise continue */
+ if (0 != driver_add_kobj(&veth_driver.driver, kobj,
+ "cnx%.2d", veth_cnx[i]->remote_lp))
+ veth_error("cnx %d: Failed adding to sysfs.\n", i);
+ }
+
+ return 0;
+
+error:
+ for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
+ veth_destroy_connection(veth_cnx[i]);
+ }
+
+ return rc;
+}
+module_init(veth_module_init);
diff --git a/drivers/net/ethernet/icplus/Kconfig b/drivers/net/ethernet/icplus/Kconfig
new file mode 100644
index 000000000000..3aff81d7989f
--- /dev/null
+++ b/drivers/net/ethernet/icplus/Kconfig
@@ -0,0 +1,14 @@
+#
+# IC Plus device configuration
+#
+
+config IP1000
+ tristate "IP1000 Gigabit Ethernet support"
+ depends on PCI && EXPERIMENTAL
+ select NET_CORE
+ select MII
+ ---help---
+ This driver supports IP1000 gigabit Ethernet cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ipg. This is recommended.
diff --git a/drivers/net/ethernet/icplus/Makefile b/drivers/net/ethernet/icplus/Makefile
new file mode 100644
index 000000000000..5bc87c1f36aa
--- /dev/null
+++ b/drivers/net/ethernet/icplus/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the IC Plus device drivers
+#
+
+obj-$(CONFIG_IP1000) += ipg.o
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
new file mode 100644
index 000000000000..8fd80a00b898
--- /dev/null
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -0,0 +1,2324 @@
+/*
+ * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter
+ *
+ * Copyright (C) 2003, 2007 IC Plus Corp
+ *
+ * Original Author:
+ *
+ * Craig Rich
+ * Sundance Technology, Inc.
+ * www.sundanceti.com
+ * craig_rich@sundanceti.com
+ *
+ * Current Maintainer:
+ *
+ * Sorbica Shieh.
+ * http://www.icplus.com.tw
+ * sorbica@icplus.com.tw
+ *
+ * Jesse Huang
+ * http://www.icplus.com.tw
+ * jesse@icplus.com.tw
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/gfp.h>
+#include <linux/mii.h>
+#include <linux/mutex.h>
+
+#include <asm/div64.h>
+
+#define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH)
+#define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH)
+#define IPG_RESET_MASK \
+ (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \
+ IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \
+ IPG_AC_AUTO_INIT)
+
+#define ipg_w32(val32, reg) iowrite32((val32), ioaddr + (reg))
+#define ipg_w16(val16, reg) iowrite16((val16), ioaddr + (reg))
+#define ipg_w8(val8, reg) iowrite8((val8), ioaddr + (reg))
+
+#define ipg_r32(reg) ioread32(ioaddr + (reg))
+#define ipg_r16(reg) ioread16(ioaddr + (reg))
+#define ipg_r8(reg) ioread8(ioaddr + (reg))
+
+enum {
+ netdev_io_size = 128
+};
+
+#include "ipg.h"
+#define DRV_NAME "ipg"
+
+MODULE_AUTHOR("IC Plus Corp. 2003");
+MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver");
+MODULE_LICENSE("GPL");
+
+/*
+ * Defaults
+ */
+#define IPG_MAX_RXFRAME_SIZE 0x0600
+#define IPG_RXFRAG_SIZE 0x0600
+#define IPG_RXSUPPORT_SIZE 0x0600
+#define IPG_IS_JUMBO false
+
+/*
+ * Variable record -- index by leading revision/length
+ * Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN
+ */
+static const unsigned short DefaultPhyParam[] = {
+ /* 11/12/03 IP1000A v1-3 rev=0x40 */
+ /*--------------------------------------------------------------------------
+ (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2,
+ 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6,
+ 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700,
+ --------------------------------------------------------------------------*/
+ /* 12/17/03 IP1000A v1-4 rev=0x40 */
+ (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
+ 0x0000,
+ 30, 0x005e, 9, 0x0700,
+ /* 01/09/04 IP1000A v1-5 rev=0x41 */
+ (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
+ 0x0000,
+ 30, 0x005e, 9, 0x0700,
+ 0x0000
+};
+
+static const char * const ipg_brand_name[] = {
+ "IC PLUS IP1000 1000/100/10 based NIC",
+ "Sundance Technology ST2021 based NIC",
+ "Tamarack Microelectronics TC9020/9021 based NIC",
+ "D-Link NIC IP1000A"
+};
+
+static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
+ { PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
+ { PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
+ { PCI_VDEVICE(DLINK, 0x9021), 2 },
+ { PCI_VDEVICE(DLINK, 0x4020), 3 },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, ipg_pci_tbl);
+
+static inline void __iomem *ipg_ioaddr(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ return sp->ioaddr;
+}
+
+#ifdef IPG_DEBUG
+static void ipg_dump_rfdlist(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->ioaddr;
+ unsigned int i;
+ u32 offset;
+
+ IPG_DEBUG_MSG("_dump_rfdlist\n");
+
+ netdev_info(dev, "rx_current = %02x\n", sp->rx_current);
+ netdev_info(dev, "rx_dirty = %02x\n", sp->rx_dirty);
+ netdev_info(dev, "RFDList start address = %016lx\n",
+ (unsigned long)sp->rxd_map);
+ netdev_info(dev, "RFDListPtr register = %08x%08x\n",
+ ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0));
+
+ for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
+ offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd;
+ netdev_info(dev, "%02x %04x RFDNextPtr = %016lx\n",
+ i, offset, (unsigned long)sp->rxd[i].next_desc);
+ offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd;
+ netdev_info(dev, "%02x %04x RFS = %016lx\n",
+ i, offset, (unsigned long)sp->rxd[i].rfs);
+ offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd;
+ netdev_info(dev, "%02x %04x frag_info = %016lx\n",
+ i, offset, (unsigned long)sp->rxd[i].frag_info);
+ }
+}
+
+static void ipg_dump_tfdlist(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->ioaddr;
+ unsigned int i;
+ u32 offset;
+
+ IPG_DEBUG_MSG("_dump_tfdlist\n");
+
+ netdev_info(dev, "tx_current = %02x\n", sp->tx_current);
+ netdev_info(dev, "tx_dirty = %02x\n", sp->tx_dirty);
+ netdev_info(dev, "TFDList start address = %016lx\n",
+ (unsigned long) sp->txd_map);
+ netdev_info(dev, "TFDListPtr register = %08x%08x\n",
+ ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0));
+
+ for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
+ offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd;
+ netdev_info(dev, "%02x %04x TFDNextPtr = %016lx\n",
+ i, offset, (unsigned long)sp->txd[i].next_desc);
+
+ offset = (u32) &sp->txd[i].tfc - (u32) sp->txd;
+ netdev_info(dev, "%02x %04x TFC = %016lx\n",
+ i, offset, (unsigned long) sp->txd[i].tfc);
+ offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd;
+ netdev_info(dev, "%02x %04x frag_info = %016lx\n",
+ i, offset, (unsigned long) sp->txd[i].frag_info);
+ }
+}
+#endif
+
+static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data)
+{
+ ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL);
+ ndelay(IPG_PC_PHYCTRLWAIT_NS);
+}
+
+static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data)
+{
+ ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data);
+ ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data);
+}
+
+static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity)
+{
+ phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR;
+
+ ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity);
+}
+
+static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity)
+{
+ ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR |
+ phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL);
+}
+
+static u16 read_phy_bit(void __iomem *ioaddr, u8 phyctrlpolarity)
+{
+ u16 bit_data;
+
+ ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity);
+
+ bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1;
+
+ ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity);
+
+ return bit_data;
+}
+
+/*
+ * Read a register from the Physical Layer device located
+ * on the IPG NIC, using the IPG PHYCTRL register.
+ */
+static int mdio_read(struct net_device *dev, int phy_id, int phy_reg)
+{
+ void __iomem *ioaddr = ipg_ioaddr(dev);
+ /*
+ * The GMII mangement frame structure for a read is as follows:
+ *
+ * |Preamble|st|op|phyad|regad|ta| data |idle|
+ * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
+ *
+ * <32 1s> = 32 consecutive logic 1 values
+ * A = bit of Physical Layer device address (MSB first)
+ * R = bit of register address (MSB first)
+ * z = High impedance state
+ * D = bit of read data (MSB first)
+ *
+ * Transmission order is 'Preamble' field first, bits transmitted
+ * left to right (first to last).
+ */
+ struct {
+ u32 field;
+ unsigned int len;
+ } p[] = {
+ { GMII_PREAMBLE, 32 }, /* Preamble */
+ { GMII_ST, 2 }, /* ST */
+ { GMII_READ, 2 }, /* OP */
+ { phy_id, 5 }, /* PHYAD */
+ { phy_reg, 5 }, /* REGAD */
+ { 0x0000, 2 }, /* TA */
+ { 0x0000, 16 }, /* DATA */
+ { 0x0000, 1 } /* IDLE */
+ };
+ unsigned int i, j;
+ u8 polarity, data;
+
+ polarity = ipg_r8(PHY_CTRL);
+ polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
+
+ /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
+ for (j = 0; j < 5; j++) {
+ for (i = 0; i < p[j].len; i++) {
+ /* For each variable length field, the MSB must be
+ * transmitted first. Rotate through the field bits,
+ * starting with the MSB, and move each bit into the
+ * the 1st (2^1) bit position (this is the bit position
+ * corresponding to the MgmtData bit of the PhyCtrl
+ * register for the IPG).
+ *
+ * Example: ST = 01;
+ *
+ * First write a '0' to bit 1 of the PhyCtrl
+ * register, then write a '1' to bit 1 of the
+ * PhyCtrl register.
+ *
+ * To do this, right shift the MSB of ST by the value:
+ * [field length - 1 - #ST bits already written]
+ * then left shift this result by 1.
+ */
+ data = (p[j].field >> (p[j].len - 1 - i)) << 1;
+ data &= IPG_PC_MGMTDATA;
+ data |= polarity | IPG_PC_MGMTDIR;
+
+ ipg_drive_phy_ctl_low_high(ioaddr, data);
+ }
+ }
+
+ send_three_state(ioaddr, polarity);
+
+ read_phy_bit(ioaddr, polarity);
+
+ /*
+ * For a read cycle, the bits for the next two fields (TA and
+ * DATA) are driven by the PHY (the IPG reads these bits).
+ */
+ for (i = 0; i < p[6].len; i++) {
+ p[6].field |=
+ (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i));
+ }
+
+ send_three_state(ioaddr, polarity);
+ send_three_state(ioaddr, polarity);
+ send_three_state(ioaddr, polarity);
+ send_end(ioaddr, polarity);
+
+ /* Return the value of the DATA field. */
+ return p[6].field;
+}
+
+/*
+ * Write to a register from the Physical Layer device located
+ * on the IPG NIC, using the IPG PHYCTRL register.
+ */
+static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val)
+{
+ void __iomem *ioaddr = ipg_ioaddr(dev);
+ /*
+ * The GMII mangement frame structure for a read is as follows:
+ *
+ * |Preamble|st|op|phyad|regad|ta| data |idle|
+ * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
+ *
+ * <32 1s> = 32 consecutive logic 1 values
+ * A = bit of Physical Layer device address (MSB first)
+ * R = bit of register address (MSB first)
+ * z = High impedance state
+ * D = bit of write data (MSB first)
+ *
+ * Transmission order is 'Preamble' field first, bits transmitted
+ * left to right (first to last).
+ */
+ struct {
+ u32 field;
+ unsigned int len;
+ } p[] = {
+ { GMII_PREAMBLE, 32 }, /* Preamble */
+ { GMII_ST, 2 }, /* ST */
+ { GMII_WRITE, 2 }, /* OP */
+ { phy_id, 5 }, /* PHYAD */
+ { phy_reg, 5 }, /* REGAD */
+ { 0x0002, 2 }, /* TA */
+ { val & 0xffff, 16 }, /* DATA */
+ { 0x0000, 1 } /* IDLE */
+ };
+ unsigned int i, j;
+ u8 polarity, data;
+
+ polarity = ipg_r8(PHY_CTRL);
+ polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
+
+ /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
+ for (j = 0; j < 7; j++) {
+ for (i = 0; i < p[j].len; i++) {
+ /* For each variable length field, the MSB must be
+ * transmitted first. Rotate through the field bits,
+ * starting with the MSB, and move each bit into the
+ * the 1st (2^1) bit position (this is the bit position
+ * corresponding to the MgmtData bit of the PhyCtrl
+ * register for the IPG).
+ *
+ * Example: ST = 01;
+ *
+ * First write a '0' to bit 1 of the PhyCtrl
+ * register, then write a '1' to bit 1 of the
+ * PhyCtrl register.
+ *
+ * To do this, right shift the MSB of ST by the value:
+ * [field length - 1 - #ST bits already written]
+ * then left shift this result by 1.
+ */
+ data = (p[j].field >> (p[j].len - 1 - i)) << 1;
+ data &= IPG_PC_MGMTDATA;
+ data |= polarity | IPG_PC_MGMTDIR;
+
+ ipg_drive_phy_ctl_low_high(ioaddr, data);
+ }
+ }
+
+ /* The last cycle is a tri-state, so read from the PHY. */
+ for (j = 7; j < 8; j++) {
+ for (i = 0; i < p[j].len; i++) {
+ ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
+
+ p[j].field |= ((ipg_r8(PHY_CTRL) &
+ IPG_PC_MGMTDATA) >> 1) << (p[j].len - 1 - i);
+
+ ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
+ }
+ }
+}
+
+static void ipg_set_led_mode(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->ioaddr;
+ u32 mode;
+
+ mode = ipg_r32(ASIC_CTRL);
+ mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED);
+
+ if ((sp->led_mode & 0x03) > 1)
+ mode |= IPG_AC_LED_MODE_BIT_1; /* Write Asic Control Bit 29 */
+
+ if ((sp->led_mode & 0x01) == 1)
+ mode |= IPG_AC_LED_MODE; /* Write Asic Control Bit 14 */
+
+ if ((sp->led_mode & 0x08) == 8)
+ mode |= IPG_AC_LED_SPEED; /* Write Asic Control Bit 27 */
+
+ ipg_w32(mode, ASIC_CTRL);
+}
+
+static void ipg_set_phy_set(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->ioaddr;
+ int physet;
+
+ physet = ipg_r8(PHY_SET);
+ physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET);
+ physet |= ((sp->led_mode & 0x70) >> 4);
+ ipg_w8(physet, PHY_SET);
+}
+
+static int ipg_reset(struct net_device *dev, u32 resetflags)
+{
+ /* Assert functional resets via the IPG AsicCtrl
+ * register as specified by the 'resetflags' input
+ * parameter.
+ */
+ void __iomem *ioaddr = ipg_ioaddr(dev);
+ unsigned int timeout_count = 0;
+
+ IPG_DEBUG_MSG("_reset\n");
+
+ ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL);
+
+ /* Delay added to account for problem with 10Mbps reset. */
+ mdelay(IPG_AC_RESETWAIT);
+
+ while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) {
+ mdelay(IPG_AC_RESETWAIT);
+ if (++timeout_count > IPG_AC_RESET_TIMEOUT)
+ return -ETIME;
+ }
+ /* Set LED Mode in Asic Control */
+ ipg_set_led_mode(dev);
+
+ /* Set PHYSet Register Value */
+ ipg_set_phy_set(dev);
+ return 0;
+}
+
+/* Find the GMII PHY address. */
+static int ipg_find_phyaddr(struct net_device *dev)
+{
+ unsigned int phyaddr, i;
+
+ for (i = 0; i < 32; i++) {
+ u32 status;
+
+ /* Search for the correct PHY address among 32 possible. */
+ phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32;
+
+ /* 10/22/03 Grace change verify from GMII_PHY_STATUS to
+ GMII_PHY_ID1
+ */
+
+ status = mdio_read(dev, phyaddr, MII_BMSR);
+
+ if ((status != 0xFFFF) && (status != 0))
+ return phyaddr;
+ }
+
+ return 0x1f;
+}
+
+/*
+ * Configure IPG based on result of IEEE 802.3 PHY
+ * auto-negotiation.
+ */
+static int ipg_config_autoneg(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->ioaddr;
+ unsigned int txflowcontrol;
+ unsigned int rxflowcontrol;
+ unsigned int fullduplex;
+ u32 mac_ctrl_val;
+ u32 asicctrl;
+ u8 phyctrl;
+ const char *speed;
+ const char *duplex;
+ const char *tx_desc;
+ const char *rx_desc;
+
+ IPG_DEBUG_MSG("_config_autoneg\n");
+
+ asicctrl = ipg_r32(ASIC_CTRL);
+ phyctrl = ipg_r8(PHY_CTRL);
+ mac_ctrl_val = ipg_r32(MAC_CTRL);
+
+ /* Set flags for use in resolving auto-negotiation, assuming
+ * non-1000Mbps, half duplex, no flow control.
+ */
+ fullduplex = 0;
+ txflowcontrol = 0;
+ rxflowcontrol = 0;
+
+ /* To accommodate a problem in 10Mbps operation,
+ * set a global flag if PHY running in 10Mbps mode.
+ */
+ sp->tenmbpsmode = 0;
+
+ /* Determine actual speed of operation. */
+ switch (phyctrl & IPG_PC_LINK_SPEED) {
+ case IPG_PC_LINK_SPEED_10MBPS:
+ speed = "10Mbps";
+ sp->tenmbpsmode = 1;
+ break;
+ case IPG_PC_LINK_SPEED_100MBPS:
+ speed = "100Mbps";
+ break;
+ case IPG_PC_LINK_SPEED_1000MBPS:
+ speed = "1000Mbps";
+ break;
+ default:
+ speed = "undefined!";
+ return 0;
+ }
+
+ netdev_info(dev, "Link speed = %s\n", speed);
+ if (sp->tenmbpsmode == 1)
+ netdev_info(dev, "10Mbps operational mode enabled\n");
+
+ if (phyctrl & IPG_PC_DUPLEX_STATUS) {
+ fullduplex = 1;
+ txflowcontrol = 1;
+ rxflowcontrol = 1;
+ }
+
+ /* Configure full duplex, and flow control. */
+ if (fullduplex == 1) {
+
+ /* Configure IPG for full duplex operation. */
+
+ duplex = "full";
+
+ mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD;
+
+ if (txflowcontrol == 1) {
+ tx_desc = "";
+ mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE;
+ } else {
+ tx_desc = "no ";
+ mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE;
+ }
+
+ if (rxflowcontrol == 1) {
+ rx_desc = "";
+ mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE;
+ } else {
+ rx_desc = "no ";
+ mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE;
+ }
+ } else {
+ duplex = "half";
+ tx_desc = "no ";
+ rx_desc = "no ";
+ mac_ctrl_val &= (~IPG_MC_DUPLEX_SELECT_FD &
+ ~IPG_MC_TX_FLOW_CONTROL_ENABLE &
+ ~IPG_MC_RX_FLOW_CONTROL_ENABLE);
+ }
+
+ netdev_info(dev, "setting %s duplex, %sTX, %sRX flow control\n",
+ duplex, tx_desc, rx_desc);
+ ipg_w32(mac_ctrl_val, MAC_CTRL);
+
+ return 0;
+}
+
+/* Determine and configure multicast operation and set
+ * receive mode for IPG.
+ */
+static void ipg_nic_set_multicast_list(struct net_device *dev)
+{
+ void __iomem *ioaddr = ipg_ioaddr(dev);
+ struct netdev_hw_addr *ha;
+ unsigned int hashindex;
+ u32 hashtable[2];
+ u8 receivemode;
+
+ IPG_DEBUG_MSG("_nic_set_multicast_list\n");
+
+ receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* NIC to be configured in promiscuous mode. */
+ receivemode = IPG_RM_RECEIVEALLFRAMES;
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+ ((dev->flags & IFF_MULTICAST) &&
+ (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) {
+ /* NIC to be configured to receive all multicast
+ * frames. */
+ receivemode |= IPG_RM_RECEIVEMULTICAST;
+ } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
+ /* NIC to be configured to receive selected
+ * multicast addresses. */
+ receivemode |= IPG_RM_RECEIVEMULTICASTHASH;
+ }
+
+ /* Calculate the bits to set for the 64 bit, IPG HASHTABLE.
+ * The IPG applies a cyclic-redundancy-check (the same CRC
+ * used to calculate the frame data FCS) to the destination
+ * address all incoming multicast frames whose destination
+ * address has the multicast bit set. The least significant
+ * 6 bits of the CRC result are used as an addressing index
+ * into the hash table. If the value of the bit addressed by
+ * this index is a 1, the frame is passed to the host system.
+ */
+
+ /* Clear hashtable. */
+ hashtable[0] = 0x00000000;
+ hashtable[1] = 0x00000000;
+
+ /* Cycle through all multicast addresses to filter. */
+ netdev_for_each_mc_addr(ha, dev) {
+ /* Calculate CRC result for each multicast address. */
+ hashindex = crc32_le(0xffffffff, ha->addr,
+ ETH_ALEN);
+
+ /* Use only the least significant 6 bits. */
+ hashindex = hashindex & 0x3F;
+
+ /* Within "hashtable", set bit number "hashindex"
+ * to a logic 1.
+ */
+ set_bit(hashindex, (void *)hashtable);
+ }
+
+ /* Write the value of the hashtable, to the 4, 16 bit
+ * HASHTABLE IPG registers.
+ */
+ ipg_w32(hashtable[0], HASHTABLE_0);
+ ipg_w32(hashtable[1], HASHTABLE_1);
+
+ ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE);
+
+ IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE));
+}
+
+static int ipg_io_config(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = ipg_ioaddr(dev);
+ u32 origmacctrl;
+ u32 restoremacctrl;
+
+ IPG_DEBUG_MSG("_io_config\n");
+
+ origmacctrl = ipg_r32(MAC_CTRL);
+
+ restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE;
+
+ /* Based on compilation option, determine if FCS is to be
+ * stripped on receive frames by IPG.
+ */
+ if (!IPG_STRIP_FCS_ON_RX)
+ restoremacctrl |= IPG_MC_RCV_FCS;
+
+ /* Determine if transmitter and/or receiver are
+ * enabled so we may restore MACCTRL correctly.
+ */
+ if (origmacctrl & IPG_MC_TX_ENABLED)
+ restoremacctrl |= IPG_MC_TX_ENABLE;
+
+ if (origmacctrl & IPG_MC_RX_ENABLED)
+ restoremacctrl |= IPG_MC_RX_ENABLE;
+
+ /* Transmitter and receiver must be disabled before setting
+ * IFSSelect.
+ */
+ ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) &
+ IPG_MC_RSVD_MASK, MAC_CTRL);
+
+ /* Now that transmitter and receiver are disabled, write
+ * to IFSSelect.
+ */
+ ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL);
+
+ /* Set RECEIVEMODE register. */
+ ipg_nic_set_multicast_list(dev);
+
+ ipg_w16(sp->max_rxframe_size, MAX_FRAME_SIZE);
+
+ ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE, RX_DMA_POLL_PERIOD);
+ ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH);
+ ipg_w8(IPG_RXDMABURSTTHRESH_VALUE, RX_DMA_BURST_THRESH);
+ ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE, TX_DMA_POLL_PERIOD);
+ ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH);
+ ipg_w8(IPG_TXDMABURSTTHRESH_VALUE, TX_DMA_BURST_THRESH);
+ ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE |
+ IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED |
+ IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT |
+ IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE);
+ ipg_w16(IPG_FLOWONTHRESH_VALUE, FLOW_ON_THRESH);
+ ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH);
+
+ /* IPG multi-frag frame bug workaround.
+ * Per silicon revision B3 eratta.
+ */
+ ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL);
+
+ /* IPG TX poll now bug workaround.
+ * Per silicon revision B3 eratta.
+ */
+ ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL);
+
+ /* IPG RX poll now bug workaround.
+ * Per silicon revision B3 eratta.
+ */
+ ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL);
+
+ /* Now restore MACCTRL to original setting. */
+ ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL);
+
+ /* Disable unused RMON statistics. */
+ ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK);
+
+ /* Disable unused MIB statistics. */
+ ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD |
+ IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES |
+ IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES |
+ IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK |
+ IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS |
+ IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK);
+
+ return 0;
+}
+
+/*
+ * Create a receive buffer within system memory and update
+ * NIC private structure appropriately.
+ */
+static int ipg_get_rxbuff(struct net_device *dev, int entry)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ struct ipg_rx *rxfd = sp->rxd + entry;
+ struct sk_buff *skb;
+ u64 rxfragsize;
+
+ IPG_DEBUG_MSG("_get_rxbuff\n");
+
+ skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size);
+ if (!skb) {
+ sp->rx_buff[entry] = NULL;
+ return -ENOMEM;
+ }
+
+ /* Associate the receive buffer with the IPG NIC. */
+ skb->dev = dev;
+
+ /* Save the address of the sk_buff structure. */
+ sp->rx_buff[entry] = skb;
+
+ rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
+ sp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+
+ /* Set the RFD fragment length. */
+ rxfragsize = sp->rxfrag_size;
+ rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN);
+
+ return 0;
+}
+
+static int init_rfdlist(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->ioaddr;
+ unsigned int i;
+
+ IPG_DEBUG_MSG("_init_rfdlist\n");
+
+ for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
+ struct ipg_rx *rxfd = sp->rxd + i;
+
+ if (sp->rx_buff[i]) {
+ pci_unmap_single(sp->pdev,
+ le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
+ sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_irq(sp->rx_buff[i]);
+ sp->rx_buff[i] = NULL;
+ }
+
+ /* Clear out the RFS field. */
+ rxfd->rfs = 0x0000000000000000;
+
+ if (ipg_get_rxbuff(dev, i) < 0) {
+ /*
+ * A receive buffer was not ready, break the
+ * RFD list here.
+ */
+ IPG_DEBUG_MSG("Cannot allocate Rx buffer\n");
+
+ /* Just in case we cannot allocate a single RFD.
+ * Should not occur.
+ */
+ if (i == 0) {
+ netdev_err(dev, "No memory available for RFD list\n");
+ return -ENOMEM;
+ }
+ }
+
+ rxfd->next_desc = cpu_to_le64(sp->rxd_map +
+ sizeof(struct ipg_rx)*(i + 1));
+ }
+ sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map);
+
+ sp->rx_current = 0;
+ sp->rx_dirty = 0;
+
+ /* Write the location of the RFDList to the IPG. */
+ ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0);
+ ipg_w32(0x00000000, RFD_LIST_PTR_1);
+
+ return 0;
+}
+
+static void init_tfdlist(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->ioaddr;
+ unsigned int i;
+
+ IPG_DEBUG_MSG("_init_tfdlist\n");
+
+ for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
+ struct ipg_tx *txfd = sp->txd + i;
+
+ txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
+
+ if (sp->tx_buff[i]) {
+ dev_kfree_skb_irq(sp->tx_buff[i]);
+ sp->tx_buff[i] = NULL;
+ }
+
+ txfd->next_desc = cpu_to_le64(sp->txd_map +
+ sizeof(struct ipg_tx)*(i + 1));
+ }
+ sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map);
+
+ sp->tx_current = 0;
+ sp->tx_dirty = 0;
+
+ /* Write the location of the TFDList to the IPG. */
+ IPG_DDEBUG_MSG("Starting TFDListPtr = %08x\n",
+ (u32) sp->txd_map);
+ ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0);
+ ipg_w32(0x00000000, TFD_LIST_PTR_1);
+
+ sp->reset_current_tfd = 1;
+}
+
+/*
+ * Free all transmit buffers which have already been transferred
+ * via DMA to the IPG.
+ */
+static void ipg_nic_txfree(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ unsigned int released, pending, dirty;
+
+ IPG_DEBUG_MSG("_nic_txfree\n");
+
+ pending = sp->tx_current - sp->tx_dirty;
+ dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH;
+
+ for (released = 0; released < pending; released++) {
+ struct sk_buff *skb = sp->tx_buff[dirty];
+ struct ipg_tx *txfd = sp->txd + dirty;
+
+ IPG_DEBUG_MSG("TFC = %016lx\n", (unsigned long) txfd->tfc);
+
+ /* Look at each TFD's TFC field beginning
+ * at the last freed TFD up to the current TFD.
+ * If the TFDDone bit is set, free the associated
+ * buffer.
+ */
+ if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE)))
+ break;
+
+ /* Free the transmit buffer. */
+ if (skb) {
+ pci_unmap_single(sp->pdev,
+ le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
+ skb->len, PCI_DMA_TODEVICE);
+
+ dev_kfree_skb_irq(skb);
+
+ sp->tx_buff[dirty] = NULL;
+ }
+ dirty = (dirty + 1) % IPG_TFDLIST_LENGTH;
+ }
+
+ sp->tx_dirty += released;
+
+ if (netif_queue_stopped(dev) &&
+ (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) {
+ netif_wake_queue(dev);
+ }
+}
+
+static void ipg_tx_timeout(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->ioaddr;
+
+ ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK |
+ IPG_AC_FIFO);
+
+ spin_lock_irq(&sp->lock);
+
+ /* Re-configure after DMA reset. */
+ if (ipg_io_config(dev) < 0)
+ netdev_info(dev, "Error during re-configuration\n");
+
+ init_tfdlist(dev);
+
+ spin_unlock_irq(&sp->lock);
+
+ ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK,
+ MAC_CTRL);
+}
+
+/*
+ * For TxComplete interrupts, free all transmit
+ * buffers which have already been transferred via DMA
+ * to the IPG.
+ */
+static void ipg_nic_txcleanup(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->ioaddr;
+ unsigned int i;
+
+ IPG_DEBUG_MSG("_nic_txcleanup\n");
+
+ for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
+ /* Reading the TXSTATUS register clears the
+ * TX_COMPLETE interrupt.
+ */
+ u32 txstatusdword = ipg_r32(TX_STATUS);
+
+ IPG_DEBUG_MSG("TxStatus = %08x\n", txstatusdword);
+
+ /* Check for Transmit errors. Error bits only valid if
+ * TX_COMPLETE bit in the TXSTATUS register is a 1.
+ */
+ if (!(txstatusdword & IPG_TS_TX_COMPLETE))
+ break;
+
+ /* If in 10Mbps mode, indicate transmit is ready. */
+ if (sp->tenmbpsmode) {
+ netif_wake_queue(dev);
+ }
+
+ /* Transmit error, increment stat counters. */
+ if (txstatusdword & IPG_TS_TX_ERROR) {
+ IPG_DEBUG_MSG("Transmit error\n");
+ sp->stats.tx_errors++;
+ }
+
+ /* Late collision, re-enable transmitter. */
+ if (txstatusdword & IPG_TS_LATE_COLLISION) {
+ IPG_DEBUG_MSG("Late collision on transmit\n");
+ ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
+ IPG_MC_RSVD_MASK, MAC_CTRL);
+ }
+
+ /* Maximum collisions, re-enable transmitter. */
+ if (txstatusdword & IPG_TS_TX_MAX_COLL) {
+ IPG_DEBUG_MSG("Maximum collisions on transmit\n");
+ ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
+ IPG_MC_RSVD_MASK, MAC_CTRL);
+ }
+
+ /* Transmit underrun, reset and re-enable
+ * transmitter.
+ */
+ if (txstatusdword & IPG_TS_TX_UNDERRUN) {
+ IPG_DEBUG_MSG("Transmitter underrun\n");
+ sp->stats.tx_fifo_errors++;
+ ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA |
+ IPG_AC_NETWORK | IPG_AC_FIFO);
+
+ /* Re-configure after DMA reset. */
+ if (ipg_io_config(dev) < 0) {
+ netdev_info(dev, "Error during re-configuration\n");
+ }
+ init_tfdlist(dev);
+
+ ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
+ IPG_MC_RSVD_MASK, MAC_CTRL);
+ }
+ }
+
+ ipg_nic_txfree(dev);
+}
+
+/* Provides statistical information about the IPG NIC. */
+static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->ioaddr;
+ u16 temp1;
+ u16 temp2;
+
+ IPG_DEBUG_MSG("_nic_get_stats\n");
+
+ /* Check to see if the NIC has been initialized via nic_open,
+ * before trying to read statistic registers.
+ */
+ if (!test_bit(__LINK_STATE_START, &dev->state))
+ return &sp->stats;
+
+ sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK);
+ sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK);
+ sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK);
+ sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK);
+ temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS);
+ sp->stats.rx_errors += temp1;
+ sp->stats.rx_missed_errors += temp1;
+ temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) +
+ ipg_r32(IPG_LATECOLLISIONS);
+ temp2 = ipg_r16(IPG_CARRIERSENSEERRORS);
+ sp->stats.collisions += temp1;
+ sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS);
+ sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) +
+ ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2;
+ sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK);
+
+ /* detailed tx_errors */
+ sp->stats.tx_carrier_errors += temp2;
+
+ /* detailed rx_errors */
+ sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) +
+ ipg_r16(IPG_FRAMETOOLONGERRRORS);
+ sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS);
+
+ /* Unutilized IPG statistic registers. */
+ ipg_r32(IPG_MCSTFRAMESRCVDOK);
+
+ return &sp->stats;
+}
+
+/* Restore used receive buffers. */
+static int ipg_nic_rxrestore(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ const unsigned int curr = sp->rx_current;
+ unsigned int dirty = sp->rx_dirty;
+
+ IPG_DEBUG_MSG("_nic_rxrestore\n");
+
+ for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) {
+ unsigned int entry = dirty % IPG_RFDLIST_LENGTH;
+
+ /* rx_copybreak may poke hole here and there. */
+ if (sp->rx_buff[entry])
+ continue;
+
+ /* Generate a new receive buffer to replace the
+ * current buffer (which will be released by the
+ * Linux system).
+ */
+ if (ipg_get_rxbuff(dev, entry) < 0) {
+ IPG_DEBUG_MSG("Cannot allocate new Rx buffer\n");
+
+ break;
+ }
+
+ /* Reset the RFS field. */
+ sp->rxd[entry].rfs = 0x0000000000000000;
+ }
+ sp->rx_dirty = dirty;
+
+ return 0;
+}
+
+/* use jumboindex and jumbosize to control jumbo frame status
+ * initial status is jumboindex=-1 and jumbosize=0
+ * 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done.
+ * 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving
+ * 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump
+ * previous receiving and need to continue dumping the current one
+ */
+enum {
+ NORMAL_PACKET,
+ ERROR_PACKET
+};
+
+enum {
+ FRAME_NO_START_NO_END = 0,
+ FRAME_WITH_START = 1,
+ FRAME_WITH_END = 10,
+ FRAME_WITH_START_WITH_END = 11
+};
+
+static void ipg_nic_rx_free_skb(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
+
+ if (sp->rx_buff[entry]) {
+ struct ipg_rx *rxfd = sp->rxd + entry;
+
+ pci_unmap_single(sp->pdev,
+ le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
+ sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_irq(sp->rx_buff[entry]);
+ sp->rx_buff[entry] = NULL;
+ }
+}
+
+static int ipg_nic_rx_check_frame_type(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH);
+ int type = FRAME_NO_START_NO_END;
+
+ if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART)
+ type += FRAME_WITH_START;
+ if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND)
+ type += FRAME_WITH_END;
+ return type;
+}
+
+static int ipg_nic_rx_check_error(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
+ struct ipg_rx *rxfd = sp->rxd + entry;
+
+ if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
+ (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
+ IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
+ IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) {
+ IPG_DEBUG_MSG("Rx error, RFS = %016lx\n",
+ (unsigned long) rxfd->rfs);
+
+ /* Increment general receive error statistic. */
+ sp->stats.rx_errors++;
+
+ /* Increment detailed receive error statistics. */
+ if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
+ IPG_DEBUG_MSG("RX FIFO overrun occurred\n");
+
+ sp->stats.rx_fifo_errors++;
+ }
+
+ if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
+ IPG_DEBUG_MSG("RX runt occurred\n");
+ sp->stats.rx_length_errors++;
+ }
+
+ /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME,
+ * error count handled by a IPG statistic register.
+ */
+
+ if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
+ IPG_DEBUG_MSG("RX alignment error occurred\n");
+ sp->stats.rx_frame_errors++;
+ }
+
+ /* Do nothing for IPG_RFS_RXFCSERROR, error count
+ * handled by a IPG statistic register.
+ */
+
+ /* Free the memory associated with the RX
+ * buffer since it is erroneous and we will
+ * not pass it to higher layer processes.
+ */
+ if (sp->rx_buff[entry]) {
+ pci_unmap_single(sp->pdev,
+ le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
+ sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+
+ dev_kfree_skb_irq(sp->rx_buff[entry]);
+ sp->rx_buff[entry] = NULL;
+ }
+ return ERROR_PACKET;
+ }
+ return NORMAL_PACKET;
+}
+
+static void ipg_nic_rx_with_start_and_end(struct net_device *dev,
+ struct ipg_nic_private *sp,
+ struct ipg_rx *rxfd, unsigned entry)
+{
+ struct ipg_jumbo *jumbo = &sp->jumbo;
+ struct sk_buff *skb;
+ int framelen;
+
+ if (jumbo->found_start) {
+ dev_kfree_skb_irq(jumbo->skb);
+ jumbo->found_start = 0;
+ jumbo->current_size = 0;
+ jumbo->skb = NULL;
+ }
+
+ /* 1: found error, 0 no error */
+ if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
+ return;
+
+ skb = sp->rx_buff[entry];
+ if (!skb)
+ return;
+
+ /* accept this frame and send to upper layer */
+ framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
+ if (framelen > sp->rxfrag_size)
+ framelen = sp->rxfrag_size;
+
+ skb_put(skb, framelen);
+ skb->protocol = eth_type_trans(skb, dev);
+ skb_checksum_none_assert(skb);
+ netif_rx(skb);
+ sp->rx_buff[entry] = NULL;
+}
+
+static void ipg_nic_rx_with_start(struct net_device *dev,
+ struct ipg_nic_private *sp,
+ struct ipg_rx *rxfd, unsigned entry)
+{
+ struct ipg_jumbo *jumbo = &sp->jumbo;
+ struct pci_dev *pdev = sp->pdev;
+ struct sk_buff *skb;
+
+ /* 1: found error, 0 no error */
+ if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
+ return;
+
+ /* accept this frame and send to upper layer */
+ skb = sp->rx_buff[entry];
+ if (!skb)
+ return;
+
+ if (jumbo->found_start)
+ dev_kfree_skb_irq(jumbo->skb);
+
+ pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
+ sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+
+ skb_put(skb, sp->rxfrag_size);
+
+ jumbo->found_start = 1;
+ jumbo->current_size = sp->rxfrag_size;
+ jumbo->skb = skb;
+
+ sp->rx_buff[entry] = NULL;
+}
+
+static void ipg_nic_rx_with_end(struct net_device *dev,
+ struct ipg_nic_private *sp,
+ struct ipg_rx *rxfd, unsigned entry)
+{
+ struct ipg_jumbo *jumbo = &sp->jumbo;
+
+ /* 1: found error, 0 no error */
+ if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
+ struct sk_buff *skb = sp->rx_buff[entry];
+
+ if (!skb)
+ return;
+
+ if (jumbo->found_start) {
+ int framelen, endframelen;
+
+ framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
+
+ endframelen = framelen - jumbo->current_size;
+ if (framelen > sp->rxsupport_size)
+ dev_kfree_skb_irq(jumbo->skb);
+ else {
+ memcpy(skb_put(jumbo->skb, endframelen),
+ skb->data, endframelen);
+
+ jumbo->skb->protocol =
+ eth_type_trans(jumbo->skb, dev);
+
+ skb_checksum_none_assert(jumbo->skb);
+ netif_rx(jumbo->skb);
+ }
+ }
+
+ jumbo->found_start = 0;
+ jumbo->current_size = 0;
+ jumbo->skb = NULL;
+
+ ipg_nic_rx_free_skb(dev);
+ } else {
+ dev_kfree_skb_irq(jumbo->skb);
+ jumbo->found_start = 0;
+ jumbo->current_size = 0;
+ jumbo->skb = NULL;
+ }
+}
+
+static void ipg_nic_rx_no_start_no_end(struct net_device *dev,
+ struct ipg_nic_private *sp,
+ struct ipg_rx *rxfd, unsigned entry)
+{
+ struct ipg_jumbo *jumbo = &sp->jumbo;
+
+ /* 1: found error, 0 no error */
+ if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
+ struct sk_buff *skb = sp->rx_buff[entry];
+
+ if (skb) {
+ if (jumbo->found_start) {
+ jumbo->current_size += sp->rxfrag_size;
+ if (jumbo->current_size <= sp->rxsupport_size) {
+ memcpy(skb_put(jumbo->skb,
+ sp->rxfrag_size),
+ skb->data, sp->rxfrag_size);
+ }
+ }
+ ipg_nic_rx_free_skb(dev);
+ }
+ } else {
+ dev_kfree_skb_irq(jumbo->skb);
+ jumbo->found_start = 0;
+ jumbo->current_size = 0;
+ jumbo->skb = NULL;
+ }
+}
+
+static int ipg_nic_rx_jumbo(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ unsigned int curr = sp->rx_current;
+ void __iomem *ioaddr = sp->ioaddr;
+ unsigned int i;
+
+ IPG_DEBUG_MSG("_nic_rx\n");
+
+ for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
+ unsigned int entry = curr % IPG_RFDLIST_LENGTH;
+ struct ipg_rx *rxfd = sp->rxd + entry;
+
+ if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE)))
+ break;
+
+ switch (ipg_nic_rx_check_frame_type(dev)) {
+ case FRAME_WITH_START_WITH_END:
+ ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry);
+ break;
+ case FRAME_WITH_START:
+ ipg_nic_rx_with_start(dev, sp, rxfd, entry);
+ break;
+ case FRAME_WITH_END:
+ ipg_nic_rx_with_end(dev, sp, rxfd, entry);
+ break;
+ case FRAME_NO_START_NO_END:
+ ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry);
+ break;
+ }
+ }
+
+ sp->rx_current = curr;
+
+ if (i == IPG_MAXRFDPROCESS_COUNT) {
+ /* There are more RFDs to process, however the
+ * allocated amount of RFD processing time has
+ * expired. Assert Interrupt Requested to make
+ * sure we come back to process the remaining RFDs.
+ */
+ ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
+ }
+
+ ipg_nic_rxrestore(dev);
+
+ return 0;
+}
+
+static int ipg_nic_rx(struct net_device *dev)
+{
+ /* Transfer received Ethernet frames to higher network layers. */
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ unsigned int curr = sp->rx_current;
+ void __iomem *ioaddr = sp->ioaddr;
+ struct ipg_rx *rxfd;
+ unsigned int i;
+
+ IPG_DEBUG_MSG("_nic_rx\n");
+
+#define __RFS_MASK \
+ cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND)
+
+ for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
+ unsigned int entry = curr % IPG_RFDLIST_LENGTH;
+ struct sk_buff *skb = sp->rx_buff[entry];
+ unsigned int framelen;
+
+ rxfd = sp->rxd + entry;
+
+ if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb)
+ break;
+
+ /* Get received frame length. */
+ framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
+
+ /* Check for jumbo frame arrival with too small
+ * RXFRAG_SIZE.
+ */
+ if (framelen > sp->rxfrag_size) {
+ IPG_DEBUG_MSG
+ ("RFS FrameLen > allocated fragment size\n");
+
+ framelen = sp->rxfrag_size;
+ }
+
+ if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
+ (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
+ IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
+ IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) {
+
+ IPG_DEBUG_MSG("Rx error, RFS = %016lx\n",
+ (unsigned long int) rxfd->rfs);
+
+ /* Increment general receive error statistic. */
+ sp->stats.rx_errors++;
+
+ /* Increment detailed receive error statistics. */
+ if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
+ IPG_DEBUG_MSG("RX FIFO overrun occurred\n");
+ sp->stats.rx_fifo_errors++;
+ }
+
+ if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
+ IPG_DEBUG_MSG("RX runt occurred\n");
+ sp->stats.rx_length_errors++;
+ }
+
+ if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ;
+ /* Do nothing, error count handled by a IPG
+ * statistic register.
+ */
+
+ if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
+ IPG_DEBUG_MSG("RX alignment error occurred\n");
+ sp->stats.rx_frame_errors++;
+ }
+
+ if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ;
+ /* Do nothing, error count handled by a IPG
+ * statistic register.
+ */
+
+ /* Free the memory associated with the RX
+ * buffer since it is erroneous and we will
+ * not pass it to higher layer processes.
+ */
+ if (skb) {
+ __le64 info = rxfd->frag_info;
+
+ pci_unmap_single(sp->pdev,
+ le64_to_cpu(info) & ~IPG_RFI_FRAGLEN,
+ sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+
+ dev_kfree_skb_irq(skb);
+ }
+ } else {
+
+ /* Adjust the new buffer length to accommodate the size
+ * of the received frame.
+ */
+ skb_put(skb, framelen);
+
+ /* Set the buffer's protocol field to Ethernet. */
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* The IPG encountered an error with (or
+ * there were no) IP/TCP/UDP checksums.
+ * This may or may not indicate an invalid
+ * IP/TCP/UDP frame was received. Let the
+ * upper layer decide.
+ */
+ skb_checksum_none_assert(skb);
+
+ /* Hand off frame for higher layer processing.
+ * The function netif_rx() releases the sk_buff
+ * when processing completes.
+ */
+ netif_rx(skb);
+ }
+
+ /* Assure RX buffer is not reused by IPG. */
+ sp->rx_buff[entry] = NULL;
+ }
+
+ /*
+ * If there are more RFDs to process and the allocated amount of RFD
+ * processing time has expired, assert Interrupt Requested to make
+ * sure we come back to process the remaining RFDs.
+ */
+ if (i == IPG_MAXRFDPROCESS_COUNT)
+ ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
+
+#ifdef IPG_DEBUG
+ /* Check if the RFD list contained no receive frame data. */
+ if (!i)
+ sp->EmptyRFDListCount++;
+#endif
+ while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) &&
+ !((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) &&
+ (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) {
+ unsigned int entry = curr++ % IPG_RFDLIST_LENGTH;
+
+ rxfd = sp->rxd + entry;
+
+ IPG_DEBUG_MSG("Frame requires multiple RFDs\n");
+
+ /* An unexpected event, additional code needed to handle
+ * properly. So for the time being, just disregard the
+ * frame.
+ */
+
+ /* Free the memory associated with the RX
+ * buffer since it is erroneous and we will
+ * not pass it to higher layer processes.
+ */
+ if (sp->rx_buff[entry]) {
+ pci_unmap_single(sp->pdev,
+ le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
+ sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_irq(sp->rx_buff[entry]);
+ }
+
+ /* Assure RX buffer is not reused by IPG. */
+ sp->rx_buff[entry] = NULL;
+ }
+
+ sp->rx_current = curr;
+
+ /* Check to see if there are a minimum number of used
+ * RFDs before restoring any (should improve performance.)
+ */
+ if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE)
+ ipg_nic_rxrestore(dev);
+
+ return 0;
+}
+
+static void ipg_reset_after_host_error(struct work_struct *work)
+{
+ struct ipg_nic_private *sp =
+ container_of(work, struct ipg_nic_private, task.work);
+ struct net_device *dev = sp->dev;
+
+ /*
+ * Acknowledge HostError interrupt by resetting
+ * IPG DMA and HOST.
+ */
+ ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
+
+ init_rfdlist(dev);
+ init_tfdlist(dev);
+
+ if (ipg_io_config(dev) < 0) {
+ netdev_info(dev, "Cannot recover from PCI error\n");
+ schedule_delayed_work(&sp->task, HZ);
+ }
+}
+
+static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst)
+{
+ struct net_device *dev = dev_inst;
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->ioaddr;
+ unsigned int handled = 0;
+ u16 status;
+
+ IPG_DEBUG_MSG("_interrupt_handler\n");
+
+ if (sp->is_jumbo)
+ ipg_nic_rxrestore(dev);
+
+ spin_lock(&sp->lock);
+
+ /* Get interrupt source information, and acknowledge
+ * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly,
+ * IntRequested, MacControlFrame, LinkEvent) interrupts
+ * if issued. Also, all IPG interrupts are disabled by
+ * reading IntStatusAck.
+ */
+ status = ipg_r16(INT_STATUS_ACK);
+
+ IPG_DEBUG_MSG("IntStatusAck = %04x\n", status);
+
+ /* Shared IRQ of remove event. */
+ if (!(status & IPG_IS_RSVD_MASK))
+ goto out_enable;
+
+ handled = 1;
+
+ if (unlikely(!netif_running(dev)))
+ goto out_unlock;
+
+ /* If RFDListEnd interrupt, restore all used RFDs. */
+ if (status & IPG_IS_RFD_LIST_END) {
+ IPG_DEBUG_MSG("RFDListEnd Interrupt\n");
+
+ /* The RFD list end indicates an RFD was encountered
+ * with a 0 NextPtr, or with an RFDDone bit set to 1
+ * (indicating the RFD is not read for use by the
+ * IPG.) Try to restore all RFDs.
+ */
+ ipg_nic_rxrestore(dev);
+
+#ifdef IPG_DEBUG
+ /* Increment the RFDlistendCount counter. */
+ sp->RFDlistendCount++;
+#endif
+ }
+
+ /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or
+ * IntRequested interrupt, process received frames. */
+ if ((status & IPG_IS_RX_DMA_PRIORITY) ||
+ (status & IPG_IS_RFD_LIST_END) ||
+ (status & IPG_IS_RX_DMA_COMPLETE) ||
+ (status & IPG_IS_INT_REQUESTED)) {
+#ifdef IPG_DEBUG
+ /* Increment the RFD list checked counter if interrupted
+ * only to check the RFD list. */
+ if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END |
+ IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) &
+ (IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE |
+ IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE |
+ IPG_IS_UPDATE_STATS)))
+ sp->RFDListCheckedCount++;
+#endif
+
+ if (sp->is_jumbo)
+ ipg_nic_rx_jumbo(dev);
+ else
+ ipg_nic_rx(dev);
+ }
+
+ /* If TxDMAComplete interrupt, free used TFDs. */
+ if (status & IPG_IS_TX_DMA_COMPLETE)
+ ipg_nic_txfree(dev);
+
+ /* TxComplete interrupts indicate one of numerous actions.
+ * Determine what action to take based on TXSTATUS register.
+ */
+ if (status & IPG_IS_TX_COMPLETE)
+ ipg_nic_txcleanup(dev);
+
+ /* If UpdateStats interrupt, update Linux Ethernet statistics */
+ if (status & IPG_IS_UPDATE_STATS)
+ ipg_nic_get_stats(dev);
+
+ /* If HostError interrupt, reset IPG. */
+ if (status & IPG_IS_HOST_ERROR) {
+ IPG_DDEBUG_MSG("HostError Interrupt\n");
+
+ schedule_delayed_work(&sp->task, 0);
+ }
+
+ /* If LinkEvent interrupt, resolve autonegotiation. */
+ if (status & IPG_IS_LINK_EVENT) {
+ if (ipg_config_autoneg(dev) < 0)
+ netdev_info(dev, "Auto-negotiation error\n");
+ }
+
+ /* If MACCtrlFrame interrupt, do nothing. */
+ if (status & IPG_IS_MAC_CTRL_FRAME)
+ IPG_DEBUG_MSG("MACCtrlFrame interrupt\n");
+
+ /* If RxComplete interrupt, do nothing. */
+ if (status & IPG_IS_RX_COMPLETE)
+ IPG_DEBUG_MSG("RxComplete interrupt\n");
+
+ /* If RxEarly interrupt, do nothing. */
+ if (status & IPG_IS_RX_EARLY)
+ IPG_DEBUG_MSG("RxEarly interrupt\n");
+
+out_enable:
+ /* Re-enable IPG interrupts. */
+ ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE |
+ IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE |
+ IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE);
+out_unlock:
+ spin_unlock(&sp->lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+static void ipg_rx_clear(struct ipg_nic_private *sp)
+{
+ unsigned int i;
+
+ for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
+ if (sp->rx_buff[i]) {
+ struct ipg_rx *rxfd = sp->rxd + i;
+
+ dev_kfree_skb_irq(sp->rx_buff[i]);
+ sp->rx_buff[i] = NULL;
+ pci_unmap_single(sp->pdev,
+ le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
+ sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ }
+ }
+}
+
+static void ipg_tx_clear(struct ipg_nic_private *sp)
+{
+ unsigned int i;
+
+ for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
+ if (sp->tx_buff[i]) {
+ struct ipg_tx *txfd = sp->txd + i;
+
+ pci_unmap_single(sp->pdev,
+ le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
+ sp->tx_buff[i]->len, PCI_DMA_TODEVICE);
+
+ dev_kfree_skb_irq(sp->tx_buff[i]);
+
+ sp->tx_buff[i] = NULL;
+ }
+ }
+}
+
+static int ipg_nic_open(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->ioaddr;
+ struct pci_dev *pdev = sp->pdev;
+ int rc;
+
+ IPG_DEBUG_MSG("_nic_open\n");
+
+ sp->rx_buf_sz = sp->rxsupport_size;
+
+ /* Check for interrupt line conflicts, and request interrupt
+ * line for IPG.
+ *
+ * IMPORTANT: Disable IPG interrupts prior to registering
+ * IRQ.
+ */
+ ipg_w16(0x0000, INT_ENABLE);
+
+ /* Register the interrupt line to be used by the IPG within
+ * the Linux system.
+ */
+ rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED,
+ dev->name, dev);
+ if (rc < 0) {
+ netdev_info(dev, "Error when requesting interrupt\n");
+ goto out;
+ }
+
+ dev->irq = pdev->irq;
+
+ rc = -ENOMEM;
+
+ sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES,
+ &sp->rxd_map, GFP_KERNEL);
+ if (!sp->rxd)
+ goto err_free_irq_0;
+
+ sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES,
+ &sp->txd_map, GFP_KERNEL);
+ if (!sp->txd)
+ goto err_free_rx_1;
+
+ rc = init_rfdlist(dev);
+ if (rc < 0) {
+ netdev_info(dev, "Error during configuration\n");
+ goto err_free_tx_2;
+ }
+
+ init_tfdlist(dev);
+
+ rc = ipg_io_config(dev);
+ if (rc < 0) {
+ netdev_info(dev, "Error during configuration\n");
+ goto err_release_tfdlist_3;
+ }
+
+ /* Resolve autonegotiation. */
+ if (ipg_config_autoneg(dev) < 0)
+ netdev_info(dev, "Auto-negotiation error\n");
+
+ /* initialize JUMBO Frame control variable */
+ sp->jumbo.found_start = 0;
+ sp->jumbo.current_size = 0;
+ sp->jumbo.skb = NULL;
+
+ /* Enable transmit and receive operation of the IPG. */
+ ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) &
+ IPG_MC_RSVD_MASK, MAC_CTRL);
+
+ netif_start_queue(dev);
+out:
+ return rc;
+
+err_release_tfdlist_3:
+ ipg_tx_clear(sp);
+ ipg_rx_clear(sp);
+err_free_tx_2:
+ dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
+err_free_rx_1:
+ dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
+err_free_irq_0:
+ free_irq(pdev->irq, dev);
+ goto out;
+}
+
+static int ipg_nic_stop(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->ioaddr;
+ struct pci_dev *pdev = sp->pdev;
+
+ IPG_DEBUG_MSG("_nic_stop\n");
+
+ netif_stop_queue(dev);
+
+ IPG_DUMPTFDLIST(dev);
+
+ do {
+ (void) ipg_r16(INT_STATUS_ACK);
+
+ ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
+
+ synchronize_irq(pdev->irq);
+ } while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK);
+
+ ipg_rx_clear(sp);
+
+ ipg_tx_clear(sp);
+
+ pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
+ pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
+
+ free_irq(pdev->irq, dev);
+
+ return 0;
+}
+
+static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->ioaddr;
+ unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH;
+ unsigned long flags;
+ struct ipg_tx *txfd;
+
+ IPG_DDEBUG_MSG("_nic_hard_start_xmit\n");
+
+ /* If in 10Mbps mode, stop the transmit queue so
+ * no more transmit frames are accepted.
+ */
+ if (sp->tenmbpsmode)
+ netif_stop_queue(dev);
+
+ if (sp->reset_current_tfd) {
+ sp->reset_current_tfd = 0;
+ entry = 0;
+ }
+
+ txfd = sp->txd + entry;
+
+ sp->tx_buff[entry] = skb;
+
+ /* Clear all TFC fields, except TFDDONE. */
+ txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
+
+ /* Specify the TFC field within the TFD. */
+ txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED |
+ (IPG_TFC_FRAMEID & sp->tx_current) |
+ (IPG_TFC_FRAGCOUNT & (1 << 24)));
+ /*
+ * 16--17 (WordAlign) <- 3 (disable),
+ * 0--15 (FrameId) <- sp->tx_current,
+ * 24--27 (FragCount) <- 1
+ */
+
+ /* Request TxComplete interrupts at an interval defined
+ * by the constant IPG_FRAMESBETWEENTXCOMPLETES.
+ * Request TxComplete interrupt for every frame
+ * if in 10Mbps mode to accommodate problem with 10Mbps
+ * processing.
+ */
+ if (sp->tenmbpsmode)
+ txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE);
+ txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE);
+ /* Based on compilation option, determine if FCS is to be
+ * appended to transmit frame by IPG.
+ */
+ if (!(IPG_APPEND_FCS_ON_TX))
+ txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE);
+
+ /* Based on compilation option, determine if IP, TCP and/or
+ * UDP checksums are to be added to transmit frame by IPG.
+ */
+ if (IPG_ADD_IPCHECKSUM_ON_TX)
+ txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE);
+
+ if (IPG_ADD_TCPCHECKSUM_ON_TX)
+ txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE);
+
+ if (IPG_ADD_UDPCHECKSUM_ON_TX)
+ txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE);
+
+ /* Based on compilation option, determine if VLAN tag info is to be
+ * inserted into transmit frame by IPG.
+ */
+ if (IPG_INSERT_MANUAL_VLAN_TAG) {
+ txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT |
+ ((u64) IPG_MANUAL_VLAN_VID << 32) |
+ ((u64) IPG_MANUAL_VLAN_CFI << 44) |
+ ((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45));
+ }
+
+ /* The fragment start location within system memory is defined
+ * by the sk_buff structure's data field. The physical address
+ * of this location within the system's virtual memory space
+ * is determined using the IPG_HOST2BUS_MAP function.
+ */
+ txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
+ skb->len, PCI_DMA_TODEVICE));
+
+ /* The length of the fragment within system memory is defined by
+ * the sk_buff structure's len field.
+ */
+ txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN &
+ ((u64) (skb->len & 0xffff) << 48));
+
+ /* Clear the TFDDone bit last to indicate the TFD is ready
+ * for transfer to the IPG.
+ */
+ txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE);
+
+ spin_lock_irqsave(&sp->lock, flags);
+
+ sp->tx_current++;
+
+ mmiowb();
+
+ ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL);
+
+ if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH))
+ netif_stop_queue(dev);
+
+ spin_unlock_irqrestore(&sp->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static void ipg_set_phy_default_param(unsigned char rev,
+ struct net_device *dev, int phy_address)
+{
+ unsigned short length;
+ unsigned char revision;
+ const unsigned short *phy_param;
+ unsigned short address, value;
+
+ phy_param = &DefaultPhyParam[0];
+ length = *phy_param & 0x00FF;
+ revision = (unsigned char)((*phy_param) >> 8);
+ phy_param++;
+ while (length != 0) {
+ if (rev == revision) {
+ while (length > 1) {
+ address = *phy_param;
+ value = *(phy_param + 1);
+ phy_param += 2;
+ mdio_write(dev, phy_address, address, value);
+ length -= 4;
+ }
+ break;
+ } else {
+ phy_param += length / 2;
+ length = *phy_param & 0x00FF;
+ revision = (unsigned char)((*phy_param) >> 8);
+ phy_param++;
+ }
+ }
+}
+
+static int read_eeprom(struct net_device *dev, int eep_addr)
+{
+ void __iomem *ioaddr = ipg_ioaddr(dev);
+ unsigned int i;
+ int ret = 0;
+ u16 value;
+
+ value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff);
+ ipg_w16(value, EEPROM_CTRL);
+
+ for (i = 0; i < 1000; i++) {
+ u16 data;
+
+ mdelay(10);
+ data = ipg_r16(EEPROM_CTRL);
+ if (!(data & IPG_EC_EEPROM_BUSY)) {
+ ret = ipg_r16(EEPROM_DATA);
+ break;
+ }
+ }
+ return ret;
+}
+
+static void ipg_init_mii(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ struct mii_if_info *mii_if = &sp->mii_if;
+ int phyaddr;
+
+ mii_if->dev = dev;
+ mii_if->mdio_read = mdio_read;
+ mii_if->mdio_write = mdio_write;
+ mii_if->phy_id_mask = 0x1f;
+ mii_if->reg_num_mask = 0x1f;
+
+ mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev);
+
+ if (phyaddr != 0x1f) {
+ u16 mii_phyctrl, mii_1000cr;
+
+ mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000);
+ mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF |
+ GMII_PHY_1000BASETCONTROL_PreferMaster;
+ mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr);
+
+ mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR);
+
+ /* Set default phyparam */
+ ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr);
+
+ /* Reset PHY */
+ mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART;
+ mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl);
+
+ }
+}
+
+static int ipg_hw_init(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->ioaddr;
+ unsigned int i;
+ int rc;
+
+ /* Read/Write and Reset EEPROM Value */
+ /* Read LED Mode Configuration from EEPROM */
+ sp->led_mode = read_eeprom(dev, 6);
+
+ /* Reset all functions within the IPG. Do not assert
+ * RST_OUT as not compatible with some PHYs.
+ */
+ rc = ipg_reset(dev, IPG_RESET_MASK);
+ if (rc < 0)
+ goto out;
+
+ ipg_init_mii(dev);
+
+ /* Read MAC Address from EEPROM */
+ for (i = 0; i < 3; i++)
+ sp->station_addr[i] = read_eeprom(dev, 16 + i);
+
+ for (i = 0; i < 3; i++)
+ ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i);
+
+ /* Set station address in ethernet_device structure. */
+ dev->dev_addr[0] = ipg_r16(STATION_ADDRESS_0) & 0x00ff;
+ dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8;
+ dev->dev_addr[2] = ipg_r16(STATION_ADDRESS_1) & 0x00ff;
+ dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8;
+ dev->dev_addr[4] = ipg_r16(STATION_ADDRESS_2) & 0x00ff;
+ dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8;
+out:
+ return rc;
+}
+
+static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ int rc;
+
+ mutex_lock(&sp->mii_mutex);
+ rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL);
+ mutex_unlock(&sp->mii_mutex);
+
+ return rc;
+}
+
+static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ int err;
+
+ /* Function to accommodate changes to Maximum Transfer Unit
+ * (or MTU) of IPG NIC. Cannot use default function since
+ * the default will not allow for MTU > 1500 bytes.
+ */
+
+ IPG_DEBUG_MSG("_nic_change_mtu\n");
+
+ /*
+ * Check that the new MTU value is between 68 (14 byte header, 46 byte
+ * payload, 4 byte FCS) and 10 KB, which is the largest supported MTU.
+ */
+ if (new_mtu < 68 || new_mtu > 10240)
+ return -EINVAL;
+
+ err = ipg_nic_stop(dev);
+ if (err)
+ return err;
+
+ dev->mtu = new_mtu;
+
+ sp->max_rxframe_size = new_mtu;
+
+ sp->rxfrag_size = new_mtu;
+ if (sp->rxfrag_size > 4088)
+ sp->rxfrag_size = 4088;
+
+ sp->rxsupport_size = sp->max_rxframe_size;
+
+ if (new_mtu > 0x0600)
+ sp->is_jumbo = true;
+ else
+ sp->is_jumbo = false;
+
+ return ipg_nic_open(dev);
+}
+
+static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ int rc;
+
+ mutex_lock(&sp->mii_mutex);
+ rc = mii_ethtool_gset(&sp->mii_if, cmd);
+ mutex_unlock(&sp->mii_mutex);
+
+ return rc;
+}
+
+static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ int rc;
+
+ mutex_lock(&sp->mii_mutex);
+ rc = mii_ethtool_sset(&sp->mii_if, cmd);
+ mutex_unlock(&sp->mii_mutex);
+
+ return rc;
+}
+
+static int ipg_nway_reset(struct net_device *dev)
+{
+ struct ipg_nic_private *sp = netdev_priv(dev);
+ int rc;
+
+ mutex_lock(&sp->mii_mutex);
+ rc = mii_nway_restart(&sp->mii_if);
+ mutex_unlock(&sp->mii_mutex);
+
+ return rc;
+}
+
+static const struct ethtool_ops ipg_ethtool_ops = {
+ .get_settings = ipg_get_settings,
+ .set_settings = ipg_set_settings,
+ .nway_reset = ipg_nway_reset,
+};
+
+static void __devexit ipg_remove(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct ipg_nic_private *sp = netdev_priv(dev);
+
+ IPG_DEBUG_MSG("_remove\n");
+
+ /* Un-register Ethernet device. */
+ unregister_netdev(dev);
+
+ pci_iounmap(pdev, sp->ioaddr);
+
+ pci_release_regions(pdev);
+
+ free_netdev(dev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static const struct net_device_ops ipg_netdev_ops = {
+ .ndo_open = ipg_nic_open,
+ .ndo_stop = ipg_nic_stop,
+ .ndo_start_xmit = ipg_nic_hard_start_xmit,
+ .ndo_get_stats = ipg_nic_get_stats,
+ .ndo_set_rx_mode = ipg_nic_set_multicast_list,
+ .ndo_do_ioctl = ipg_ioctl,
+ .ndo_tx_timeout = ipg_tx_timeout,
+ .ndo_change_mtu = ipg_nic_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __devinit ipg_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ unsigned int i = id->driver_data;
+ struct ipg_nic_private *sp;
+ struct net_device *dev;
+ void __iomem *ioaddr;
+ int rc;
+
+ rc = pci_enable_device(pdev);
+ if (rc < 0)
+ goto out;
+
+ pr_info("%s: %s\n", pci_name(pdev), ipg_brand_name[i]);
+
+ pci_set_master(pdev);
+
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
+ if (rc < 0) {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc < 0) {
+ pr_err("%s: DMA config failed\n", pci_name(pdev));
+ goto err_disable_0;
+ }
+ }
+
+ /*
+ * Initialize net device.
+ */
+ dev = alloc_etherdev(sizeof(struct ipg_nic_private));
+ if (!dev) {
+ pr_err("%s: alloc_etherdev failed\n", pci_name(pdev));
+ rc = -ENOMEM;
+ goto err_disable_0;
+ }
+
+ sp = netdev_priv(dev);
+ spin_lock_init(&sp->lock);
+ mutex_init(&sp->mii_mutex);
+
+ sp->is_jumbo = IPG_IS_JUMBO;
+ sp->rxfrag_size = IPG_RXFRAG_SIZE;
+ sp->rxsupport_size = IPG_RXSUPPORT_SIZE;
+ sp->max_rxframe_size = IPG_MAX_RXFRAME_SIZE;
+
+ /* Declare IPG NIC functions for Ethernet device methods.
+ */
+ dev->netdev_ops = &ipg_netdev_ops;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops);
+
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_free_dev_1;
+
+ ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
+ if (!ioaddr) {
+ pr_err("%s: cannot map MMIO\n", pci_name(pdev));
+ rc = -EIO;
+ goto err_release_regions_2;
+ }
+
+ /* Save the pointer to the PCI device information. */
+ sp->ioaddr = ioaddr;
+ sp->pdev = pdev;
+ sp->dev = dev;
+
+ INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error);
+
+ pci_set_drvdata(pdev, dev);
+
+ rc = ipg_hw_init(dev);
+ if (rc < 0)
+ goto err_unmap_3;
+
+ rc = register_netdev(dev);
+ if (rc < 0)
+ goto err_unmap_3;
+
+ netdev_info(dev, "Ethernet device registered\n");
+out:
+ return rc;
+
+err_unmap_3:
+ pci_iounmap(pdev, ioaddr);
+err_release_regions_2:
+ pci_release_regions(pdev);
+err_free_dev_1:
+ free_netdev(dev);
+err_disable_0:
+ pci_disable_device(pdev);
+ goto out;
+}
+
+static struct pci_driver ipg_pci_driver = {
+ .name = IPG_DRIVER_NAME,
+ .id_table = ipg_pci_tbl,
+ .probe = ipg_probe,
+ .remove = __devexit_p(ipg_remove),
+};
+
+static int __init ipg_init_module(void)
+{
+ return pci_register_driver(&ipg_pci_driver);
+}
+
+static void __exit ipg_exit_module(void)
+{
+ pci_unregister_driver(&ipg_pci_driver);
+}
+
+module_init(ipg_init_module);
+module_exit(ipg_exit_module);
diff --git a/drivers/net/ethernet/icplus/ipg.h b/drivers/net/ethernet/icplus/ipg.h
new file mode 100644
index 000000000000..6ce027355fcf
--- /dev/null
+++ b/drivers/net/ethernet/icplus/ipg.h
@@ -0,0 +1,749 @@
+/*
+ * Include file for Gigabit Ethernet device driver for Network
+ * Interface Cards (NICs) utilizing the Tamarack Microelectronics
+ * Inc. IPG Gigabit or Triple Speed Ethernet Media Access
+ * Controller.
+ */
+#ifndef __LINUX_IPG_H
+#define __LINUX_IPG_H
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#include <linux/errno.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <asm/bitops.h>
+
+/*
+ * Constants
+ */
+
+/* GMII based PHY IDs */
+#define NS 0x2000
+#define MARVELL 0x0141
+#define ICPLUS_PHY 0x243
+
+/* NIC Physical Layer Device MII register fields. */
+#define MII_PHY_SELECTOR_IEEE8023 0x0001
+#define MII_PHY_TECHABILITYFIELD 0x1FE0
+
+/* GMII_PHY_1000 need to set to prefer master */
+#define GMII_PHY_1000BASETCONTROL_PreferMaster 0x0400
+
+/* NIC Physical Layer Device GMII constants. */
+#define GMII_PREAMBLE 0xFFFFFFFF
+#define GMII_ST 0x1
+#define GMII_READ 0x2
+#define GMII_WRITE 0x1
+#define GMII_TA_READ_MASK 0x1
+#define GMII_TA_WRITE 0x2
+
+/* I/O register offsets. */
+enum ipg_regs {
+ DMA_CTRL = 0x00,
+ RX_DMA_STATUS = 0x08, /* Unused + reserved */
+ TFD_LIST_PTR_0 = 0x10,
+ TFD_LIST_PTR_1 = 0x14,
+ TX_DMA_BURST_THRESH = 0x18,
+ TX_DMA_URGENT_THRESH = 0x19,
+ TX_DMA_POLL_PERIOD = 0x1a,
+ RFD_LIST_PTR_0 = 0x1c,
+ RFD_LIST_PTR_1 = 0x20,
+ RX_DMA_BURST_THRESH = 0x24,
+ RX_DMA_URGENT_THRESH = 0x25,
+ RX_DMA_POLL_PERIOD = 0x26,
+ DEBUG_CTRL = 0x2c,
+ ASIC_CTRL = 0x30,
+ FIFO_CTRL = 0x38, /* Unused */
+ FLOW_OFF_THRESH = 0x3c,
+ FLOW_ON_THRESH = 0x3e,
+ EEPROM_DATA = 0x48,
+ EEPROM_CTRL = 0x4a,
+ EXPROM_ADDR = 0x4c, /* Unused */
+ EXPROM_DATA = 0x50, /* Unused */
+ WAKE_EVENT = 0x51, /* Unused */
+ COUNTDOWN = 0x54, /* Unused */
+ INT_STATUS_ACK = 0x5a,
+ INT_ENABLE = 0x5c,
+ INT_STATUS = 0x5e, /* Unused */
+ TX_STATUS = 0x60,
+ MAC_CTRL = 0x6c,
+ VLAN_TAG = 0x70, /* Unused */
+ PHY_SET = 0x75,
+ PHY_CTRL = 0x76,
+ STATION_ADDRESS_0 = 0x78,
+ STATION_ADDRESS_1 = 0x7a,
+ STATION_ADDRESS_2 = 0x7c,
+ MAX_FRAME_SIZE = 0x86,
+ RECEIVE_MODE = 0x88,
+ HASHTABLE_0 = 0x8c,
+ HASHTABLE_1 = 0x90,
+ RMON_STATISTICS_MASK = 0x98,
+ STATISTICS_MASK = 0x9c,
+ RX_JUMBO_FRAMES = 0xbc, /* Unused */
+ TCP_CHECKSUM_ERRORS = 0xc0, /* Unused */
+ IP_CHECKSUM_ERRORS = 0xc2, /* Unused */
+ UDP_CHECKSUM_ERRORS = 0xc4, /* Unused */
+ TX_JUMBO_FRAMES = 0xf4 /* Unused */
+};
+
+/* Ethernet MIB statistic register offsets. */
+#define IPG_OCTETRCVOK 0xA8
+#define IPG_MCSTOCTETRCVDOK 0xAC
+#define IPG_BCSTOCTETRCVOK 0xB0
+#define IPG_FRAMESRCVDOK 0xB4
+#define IPG_MCSTFRAMESRCVDOK 0xB8
+#define IPG_BCSTFRAMESRCVDOK 0xBE
+#define IPG_MACCONTROLFRAMESRCVD 0xC6
+#define IPG_FRAMETOOLONGERRRORS 0xC8
+#define IPG_INRANGELENGTHERRORS 0xCA
+#define IPG_FRAMECHECKSEQERRORS 0xCC
+#define IPG_FRAMESLOSTRXERRORS 0xCE
+#define IPG_OCTETXMTOK 0xD0
+#define IPG_MCSTOCTETXMTOK 0xD4
+#define IPG_BCSTOCTETXMTOK 0xD8
+#define IPG_FRAMESXMTDOK 0xDC
+#define IPG_MCSTFRAMESXMTDOK 0xE0
+#define IPG_FRAMESWDEFERREDXMT 0xE4
+#define IPG_LATECOLLISIONS 0xE8
+#define IPG_MULTICOLFRAMES 0xEC
+#define IPG_SINGLECOLFRAMES 0xF0
+#define IPG_BCSTFRAMESXMTDOK 0xF6
+#define IPG_CARRIERSENSEERRORS 0xF8
+#define IPG_MACCONTROLFRAMESXMTDOK 0xFA
+#define IPG_FRAMESABORTXSCOLLS 0xFC
+#define IPG_FRAMESWEXDEFERRAL 0xFE
+
+/* RMON statistic register offsets. */
+#define IPG_ETHERSTATSCOLLISIONS 0x100
+#define IPG_ETHERSTATSOCTETSTRANSMIT 0x104
+#define IPG_ETHERSTATSPKTSTRANSMIT 0x108
+#define IPG_ETHERSTATSPKTS64OCTESTSTRANSMIT 0x10C
+#define IPG_ETHERSTATSPKTS65TO127OCTESTSTRANSMIT 0x110
+#define IPG_ETHERSTATSPKTS128TO255OCTESTSTRANSMIT 0x114
+#define IPG_ETHERSTATSPKTS256TO511OCTESTSTRANSMIT 0x118
+#define IPG_ETHERSTATSPKTS512TO1023OCTESTSTRANSMIT 0x11C
+#define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120
+#define IPG_ETHERSTATSCRCALIGNERRORS 0x124
+#define IPG_ETHERSTATSUNDERSIZEPKTS 0x128
+#define IPG_ETHERSTATSFRAGMENTS 0x12C
+#define IPG_ETHERSTATSJABBERS 0x130
+#define IPG_ETHERSTATSOCTETS 0x134
+#define IPG_ETHERSTATSPKTS 0x138
+#define IPG_ETHERSTATSPKTS64OCTESTS 0x13C
+#define IPG_ETHERSTATSPKTS65TO127OCTESTS 0x140
+#define IPG_ETHERSTATSPKTS128TO255OCTESTS 0x144
+#define IPG_ETHERSTATSPKTS256TO511OCTESTS 0x148
+#define IPG_ETHERSTATSPKTS512TO1023OCTESTS 0x14C
+#define IPG_ETHERSTATSPKTS1024TO1518OCTESTS 0x150
+
+/* RMON statistic register equivalents. */
+#define IPG_ETHERSTATSMULTICASTPKTSTRANSMIT 0xE0
+#define IPG_ETHERSTATSBROADCASTPKTSTRANSMIT 0xF6
+#define IPG_ETHERSTATSMULTICASTPKTS 0xB8
+#define IPG_ETHERSTATSBROADCASTPKTS 0xBE
+#define IPG_ETHERSTATSOVERSIZEPKTS 0xC8
+#define IPG_ETHERSTATSDROPEVENTS 0xCE
+
+/* Serial EEPROM offsets */
+#define IPG_EEPROM_CONFIGPARAM 0x00
+#define IPG_EEPROM_ASICCTRL 0x01
+#define IPG_EEPROM_SUBSYSTEMVENDORID 0x02
+#define IPG_EEPROM_SUBSYSTEMID 0x03
+#define IPG_EEPROM_STATIONADDRESS0 0x10
+#define IPG_EEPROM_STATIONADDRESS1 0x11
+#define IPG_EEPROM_STATIONADDRESS2 0x12
+
+/* Register & data structure bit masks */
+
+/* PCI register masks. */
+
+/* IOBaseAddress */
+#define IPG_PIB_RSVD_MASK 0xFFFFFE01
+#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00
+#define IPG_PIB_IOBASEADDRIND 0x00000001
+
+/* MemBaseAddress */
+#define IPG_PMB_RSVD_MASK 0xFFFFFE07
+#define IPG_PMB_MEMBASEADDRIND 0x00000001
+#define IPG_PMB_MEMMAPTYPE 0x00000006
+#define IPG_PMB_MEMMAPTYPE0 0x00000002
+#define IPG_PMB_MEMMAPTYPE1 0x00000004
+#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00
+
+/* ConfigStatus */
+#define IPG_CS_RSVD_MASK 0xFFB0
+#define IPG_CS_CAPABILITIES 0x0010
+#define IPG_CS_66MHZCAPABLE 0x0020
+#define IPG_CS_FASTBACK2BACK 0x0080
+#define IPG_CS_DATAPARITYREPORTED 0x0100
+#define IPG_CS_DEVSELTIMING 0x0600
+#define IPG_CS_SIGNALEDTARGETABORT 0x0800
+#define IPG_CS_RECEIVEDTARGETABORT 0x1000
+#define IPG_CS_RECEIVEDMASTERABORT 0x2000
+#define IPG_CS_SIGNALEDSYSTEMERROR 0x4000
+#define IPG_CS_DETECTEDPARITYERROR 0x8000
+
+/* TFD data structure masks. */
+
+/* TFDList, TFC */
+#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFF
+#define IPG_TFC_FRAMEID 0x000000000000FFFF
+#define IPG_TFC_WORDALIGN 0x0000000000030000
+#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000
+#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000
+#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000
+#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000
+#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000
+#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000
+#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000
+#define IPG_TFC_TXINDICATE 0x0000000000400000
+#define IPG_TFC_TXDMAINDICATE 0x0000000000800000
+#define IPG_TFC_FRAGCOUNT 0x000000000F000000
+#define IPG_TFC_VLANTAGINSERT 0x0000000010000000
+#define IPG_TFC_TFDDONE 0x0000000080000000
+#define IPG_TFC_VID 0x00000FFF00000000
+#define IPG_TFC_CFI 0x0000100000000000
+#define IPG_TFC_USERPRIORITY 0x0000E00000000000
+
+/* TFDList, FragInfo */
+#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFF
+#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFF
+#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL
+
+/* RFD data structure masks. */
+
+/* RFDList, RFS */
+#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFF
+#define IPG_RFS_RXFRAMELEN 0x000000000000FFFF
+#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000
+#define IPG_RFS_RXRUNTFRAME 0x0000000000020000
+#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000
+#define IPG_RFS_RXFCSERROR 0x0000000000080000
+#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000
+#define IPG_RFS_RXLENGTHERROR 0x0000000000200000
+#define IPG_RFS_VLANDETECTED 0x0000000000400000
+#define IPG_RFS_TCPDETECTED 0x0000000000800000
+#define IPG_RFS_TCPERROR 0x0000000001000000
+#define IPG_RFS_UDPDETECTED 0x0000000002000000
+#define IPG_RFS_UDPERROR 0x0000000004000000
+#define IPG_RFS_IPDETECTED 0x0000000008000000
+#define IPG_RFS_IPERROR 0x0000000010000000
+#define IPG_RFS_FRAMESTART 0x0000000020000000
+#define IPG_RFS_FRAMEEND 0x0000000040000000
+#define IPG_RFS_RFDDONE 0x0000000080000000
+#define IPG_RFS_TCI 0x0000FFFF00000000
+
+/* RFDList, FragInfo */
+#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFF
+#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFF
+#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL
+
+/* I/O Register masks. */
+
+/* RMON Statistics Mask */
+#define IPG_RZ_ALL 0x0FFFFFFF
+
+/* Statistics Mask */
+#define IPG_SM_ALL 0x0FFFFFFF
+#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001
+#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002
+#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004
+#define IPG_SM_RXJUMBOFRAMES 0x00000008
+#define IPG_SM_TCPCHECKSUMERRORS 0x00000010
+#define IPG_SM_IPCHECKSUMERRORS 0x00000020
+#define IPG_SM_UDPCHECKSUMERRORS 0x00000040
+#define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080
+#define IPG_SM_FRAMESTOOLONGERRORS 0x00000100
+#define IPG_SM_INRANGELENGTHERRORS 0x00000200
+#define IPG_SM_FRAMECHECKSEQERRORS 0x00000400
+#define IPG_SM_FRAMESLOSTRXERRORS 0x00000800
+#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000
+#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000
+#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000
+#define IPG_SM_FRAMESWDEFERREDXMT 0x00008000
+#define IPG_SM_LATECOLLISIONS 0x00010000
+#define IPG_SM_MULTICOLFRAMES 0x00020000
+#define IPG_SM_SINGLECOLFRAMES 0x00040000
+#define IPG_SM_TXJUMBOFRAMES 0x00080000
+#define IPG_SM_CARRIERSENSEERRORS 0x00100000
+#define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000
+#define IPG_SM_FRAMESABORTXSCOLLS 0x00400000
+#define IPG_SM_FRAMESWEXDEFERAL 0x00800000
+
+/* Countdown */
+#define IPG_CD_RSVD_MASK 0x0700FFFF
+#define IPG_CD_COUNT 0x0000FFFF
+#define IPG_CD_COUNTDOWNSPEED 0x01000000
+#define IPG_CD_COUNTDOWNMODE 0x02000000
+#define IPG_CD_COUNTINTENABLED 0x04000000
+
+/* TxDMABurstThresh */
+#define IPG_TB_RSVD_MASK 0xFF
+
+/* TxDMAUrgentThresh */
+#define IPG_TU_RSVD_MASK 0xFF
+
+/* TxDMAPollPeriod */
+#define IPG_TP_RSVD_MASK 0xFF
+
+/* RxDMAUrgentThresh */
+#define IPG_RU_RSVD_MASK 0xFF
+
+/* RxDMAPollPeriod */
+#define IPG_RP_RSVD_MASK 0xFF
+
+/* ReceiveMode */
+#define IPG_RM_RSVD_MASK 0x3F
+#define IPG_RM_RECEIVEUNICAST 0x01
+#define IPG_RM_RECEIVEMULTICAST 0x02
+#define IPG_RM_RECEIVEBROADCAST 0x04
+#define IPG_RM_RECEIVEALLFRAMES 0x08
+#define IPG_RM_RECEIVEMULTICASTHASH 0x10
+#define IPG_RM_RECEIVEIPMULTICAST 0x20
+
+/* PhySet */
+#define IPG_PS_MEM_LENB9B 0x01
+#define IPG_PS_MEM_LEN9 0x02
+#define IPG_PS_NON_COMPDET 0x04
+
+/* PhyCtrl */
+#define IPG_PC_RSVD_MASK 0xFF
+#define IPG_PC_MGMTCLK_LO 0x00
+#define IPG_PC_MGMTCLK_HI 0x01
+#define IPG_PC_MGMTCLK 0x01
+#define IPG_PC_MGMTDATA 0x02
+#define IPG_PC_MGMTDIR 0x04
+#define IPG_PC_DUPLEX_POLARITY 0x08
+#define IPG_PC_DUPLEX_STATUS 0x10
+#define IPG_PC_LINK_POLARITY 0x20
+#define IPG_PC_LINK_SPEED 0xC0
+#define IPG_PC_LINK_SPEED_10MBPS 0x40
+#define IPG_PC_LINK_SPEED_100MBPS 0x80
+#define IPG_PC_LINK_SPEED_1000MBPS 0xC0
+
+/* DMACtrl */
+#define IPG_DC_RSVD_MASK 0xC07D9818
+#define IPG_DC_RX_DMA_COMPLETE 0x00000008
+#define IPG_DC_RX_DMA_POLL_NOW 0x00000010
+#define IPG_DC_TX_DMA_COMPLETE 0x00000800
+#define IPG_DC_TX_DMA_POLL_NOW 0x00001000
+#define IPG_DC_TX_DMA_IN_PROG 0x00008000
+#define IPG_DC_RX_EARLY_DISABLE 0x00010000
+#define IPG_DC_MWI_DISABLE 0x00040000
+#define IPG_DC_TX_WRITE_BACK_DISABLE 0x00080000
+#define IPG_DC_TX_BURST_LIMIT 0x00700000
+#define IPG_DC_TARGET_ABORT 0x40000000
+#define IPG_DC_MASTER_ABORT 0x80000000
+
+/* ASICCtrl */
+#define IPG_AC_RSVD_MASK 0x07FFEFF2
+#define IPG_AC_EXP_ROM_SIZE 0x00000002
+#define IPG_AC_PHY_SPEED10 0x00000010
+#define IPG_AC_PHY_SPEED100 0x00000020
+#define IPG_AC_PHY_SPEED1000 0x00000040
+#define IPG_AC_PHY_MEDIA 0x00000080
+#define IPG_AC_FORCED_CFG 0x00000700
+#define IPG_AC_D3RESETDISABLE 0x00000800
+#define IPG_AC_SPEED_UP_MODE 0x00002000
+#define IPG_AC_LED_MODE 0x00004000
+#define IPG_AC_RST_OUT_POLARITY 0x00008000
+#define IPG_AC_GLOBAL_RESET 0x00010000
+#define IPG_AC_RX_RESET 0x00020000
+#define IPG_AC_TX_RESET 0x00040000
+#define IPG_AC_DMA 0x00080000
+#define IPG_AC_FIFO 0x00100000
+#define IPG_AC_NETWORK 0x00200000
+#define IPG_AC_HOST 0x00400000
+#define IPG_AC_AUTO_INIT 0x00800000
+#define IPG_AC_RST_OUT 0x01000000
+#define IPG_AC_INT_REQUEST 0x02000000
+#define IPG_AC_RESET_BUSY 0x04000000
+#define IPG_AC_LED_SPEED 0x08000000
+#define IPG_AC_LED_MODE_BIT_1 0x20000000
+
+/* EepromCtrl */
+#define IPG_EC_RSVD_MASK 0x83FF
+#define IPG_EC_EEPROM_ADDR 0x00FF
+#define IPG_EC_EEPROM_OPCODE 0x0300
+#define IPG_EC_EEPROM_SUBCOMMAD 0x0000
+#define IPG_EC_EEPROM_WRITEOPCODE 0x0100
+#define IPG_EC_EEPROM_READOPCODE 0x0200
+#define IPG_EC_EEPROM_ERASEOPCODE 0x0300
+#define IPG_EC_EEPROM_BUSY 0x8000
+
+/* FIFOCtrl */
+#define IPG_FC_RSVD_MASK 0xC001
+#define IPG_FC_RAM_TEST_MODE 0x0001
+#define IPG_FC_TRANSMITTING 0x4000
+#define IPG_FC_RECEIVING 0x8000
+
+/* TxStatus */
+#define IPG_TS_RSVD_MASK 0xFFFF00DD
+#define IPG_TS_TX_ERROR 0x00000001
+#define IPG_TS_LATE_COLLISION 0x00000004
+#define IPG_TS_TX_MAX_COLL 0x00000008
+#define IPG_TS_TX_UNDERRUN 0x00000010
+#define IPG_TS_TX_IND_REQD 0x00000040
+#define IPG_TS_TX_COMPLETE 0x00000080
+#define IPG_TS_TX_FRAMEID 0xFFFF0000
+
+/* WakeEvent */
+#define IPG_WE_WAKE_PKT_ENABLE 0x01
+#define IPG_WE_MAGIC_PKT_ENABLE 0x02
+#define IPG_WE_LINK_EVT_ENABLE 0x04
+#define IPG_WE_WAKE_POLARITY 0x08
+#define IPG_WE_WAKE_PKT_EVT 0x10
+#define IPG_WE_MAGIC_PKT_EVT 0x20
+#define IPG_WE_LINK_EVT 0x40
+#define IPG_WE_WOL_ENABLE 0x80
+
+/* IntEnable */
+#define IPG_IE_RSVD_MASK 0x1FFE
+#define IPG_IE_HOST_ERROR 0x0002
+#define IPG_IE_TX_COMPLETE 0x0004
+#define IPG_IE_MAC_CTRL_FRAME 0x0008
+#define IPG_IE_RX_COMPLETE 0x0010
+#define IPG_IE_RX_EARLY 0x0020
+#define IPG_IE_INT_REQUESTED 0x0040
+#define IPG_IE_UPDATE_STATS 0x0080
+#define IPG_IE_LINK_EVENT 0x0100
+#define IPG_IE_TX_DMA_COMPLETE 0x0200
+#define IPG_IE_RX_DMA_COMPLETE 0x0400
+#define IPG_IE_RFD_LIST_END 0x0800
+#define IPG_IE_RX_DMA_PRIORITY 0x1000
+
+/* IntStatus */
+#define IPG_IS_RSVD_MASK 0x1FFF
+#define IPG_IS_INTERRUPT_STATUS 0x0001
+#define IPG_IS_HOST_ERROR 0x0002
+#define IPG_IS_TX_COMPLETE 0x0004
+#define IPG_IS_MAC_CTRL_FRAME 0x0008
+#define IPG_IS_RX_COMPLETE 0x0010
+#define IPG_IS_RX_EARLY 0x0020
+#define IPG_IS_INT_REQUESTED 0x0040
+#define IPG_IS_UPDATE_STATS 0x0080
+#define IPG_IS_LINK_EVENT 0x0100
+#define IPG_IS_TX_DMA_COMPLETE 0x0200
+#define IPG_IS_RX_DMA_COMPLETE 0x0400
+#define IPG_IS_RFD_LIST_END 0x0800
+#define IPG_IS_RX_DMA_PRIORITY 0x1000
+
+/* MACCtrl */
+#define IPG_MC_RSVD_MASK 0x7FE33FA3
+#define IPG_MC_IFS_SELECT 0x00000003
+#define IPG_MC_IFS_4352BIT 0x00000003
+#define IPG_MC_IFS_1792BIT 0x00000002
+#define IPG_MC_IFS_1024BIT 0x00000001
+#define IPG_MC_IFS_96BIT 0x00000000
+#define IPG_MC_DUPLEX_SELECT 0x00000020
+#define IPG_MC_DUPLEX_SELECT_FD 0x00000020
+#define IPG_MC_DUPLEX_SELECT_HD 0x00000000
+#define IPG_MC_TX_FLOW_CONTROL_ENABLE 0x00000080
+#define IPG_MC_RX_FLOW_CONTROL_ENABLE 0x00000100
+#define IPG_MC_RCV_FCS 0x00000200
+#define IPG_MC_FIFO_LOOPBACK 0x00000400
+#define IPG_MC_MAC_LOOPBACK 0x00000800
+#define IPG_MC_AUTO_VLAN_TAGGING 0x00001000
+#define IPG_MC_AUTO_VLAN_UNTAGGING 0x00002000
+#define IPG_MC_COLLISION_DETECT 0x00010000
+#define IPG_MC_CARRIER_SENSE 0x00020000
+#define IPG_MC_STATISTICS_ENABLE 0x00200000
+#define IPG_MC_STATISTICS_DISABLE 0x00400000
+#define IPG_MC_STATISTICS_ENABLED 0x00800000
+#define IPG_MC_TX_ENABLE 0x01000000
+#define IPG_MC_TX_DISABLE 0x02000000
+#define IPG_MC_TX_ENABLED 0x04000000
+#define IPG_MC_RX_ENABLE 0x08000000
+#define IPG_MC_RX_DISABLE 0x10000000
+#define IPG_MC_RX_ENABLED 0x20000000
+#define IPG_MC_PAUSED 0x40000000
+
+/*
+ * Tune
+ */
+
+/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS append on TX. */
+#define IPG_APPEND_FCS_ON_TX 1
+
+/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS strip on RX. */
+#define IPG_STRIP_FCS_ON_RX 1
+
+/* Assign IPG_DROP_ON_RX_ETH_ERRORS > 0 to drop RX frames with
+ * Ethernet errors.
+ */
+#define IPG_DROP_ON_RX_ETH_ERRORS 1
+
+/* Assign IPG_INSERT_MANUAL_VLAN_TAG > 0 to insert VLAN tags manually
+ * (via TFC).
+ */
+#define IPG_INSERT_MANUAL_VLAN_TAG 0
+
+/* Assign IPG_ADD_IPCHECKSUM_ON_TX > 0 for auto IP checksum on TX. */
+#define IPG_ADD_IPCHECKSUM_ON_TX 0
+
+/* Assign IPG_ADD_TCPCHECKSUM_ON_TX > 0 for auto TCP checksum on TX.
+ * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER.
+ */
+#define IPG_ADD_TCPCHECKSUM_ON_TX 0
+
+/* Assign IPG_ADD_UDPCHECKSUM_ON_TX > 0 for auto UDP checksum on TX.
+ * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER.
+ */
+#define IPG_ADD_UDPCHECKSUM_ON_TX 0
+
+/* If inserting VLAN tags manually, assign the IPG_MANUAL_VLAN_xx
+ * constants as desired.
+ */
+#define IPG_MANUAL_VLAN_VID 0xABC
+#define IPG_MANUAL_VLAN_CFI 0x1
+#define IPG_MANUAL_VLAN_USERPRIORITY 0x5
+
+#define IPG_IO_REG_RANGE 0xFF
+#define IPG_MEM_REG_RANGE 0x154
+#define IPG_DRIVER_NAME "Sundance Technology IPG Triple-Speed Ethernet"
+#define IPG_NIC_PHY_ADDRESS 0x01
+#define IPG_DMALIST_ALIGN_PAD 0x07
+#define IPG_MULTICAST_HASHTABLE_SIZE 0x40
+
+/* Number of milliseconds to wait after issuing a software reset.
+ * 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation.
+ */
+#define IPG_AC_RESETWAIT 0x05
+
+/* Number of IPG_AC_RESETWAIT timeperiods before declaring timeout. */
+#define IPG_AC_RESET_TIMEOUT 0x0A
+
+/* Minimum number of nanoseconds used to toggle MDC clock during
+ * MII/GMII register access.
+ */
+#define IPG_PC_PHYCTRLWAIT_NS 200
+
+#define IPG_TFDLIST_LENGTH 0x100
+
+/* Number of frames between TxDMAComplete interrupt.
+ * 0 < IPG_FRAMESBETWEENTXDMACOMPLETES <= IPG_TFDLIST_LENGTH
+ */
+#define IPG_FRAMESBETWEENTXDMACOMPLETES 0x1
+
+#define IPG_RFDLIST_LENGTH 0x100
+
+/* Maximum number of RFDs to process per interrupt.
+ * 1 < IPG_MAXRFDPROCESS_COUNT < IPG_RFDLIST_LENGTH
+ */
+#define IPG_MAXRFDPROCESS_COUNT 0x80
+
+/* Minimum margin between last freed RFD, and current RFD.
+ * 1 < IPG_MINUSEDRFDSTOFREE < IPG_RFDLIST_LENGTH
+ */
+#define IPG_MINUSEDRFDSTOFREE 0x80
+
+/* specify the jumbo frame maximum size
+ * per unit is 0x600 (the rx_buffer size that one RFD can carry)
+ */
+#define MAX_JUMBOSIZE 0x8 /* max is 12K */
+
+/* Key register values loaded at driver start up. */
+
+/* TXDMAPollPeriod is specified in 320ns increments.
+ *
+ * Value Time
+ * ---------------------
+ * 0x00-0x01 320ns
+ * 0x03 ~1us
+ * 0x1F ~10us
+ * 0xFF ~82us
+ */
+#define IPG_TXDMAPOLLPERIOD_VALUE 0x26
+
+/* TxDMAUrgentThresh specifies the minimum amount of
+ * data in the transmit FIFO before asserting an
+ * urgent transmit DMA request.
+ *
+ * Value Min TxFIFO occupied space before urgent TX request
+ * ---------------------------------------------------------------
+ * 0x00-0x04 128 bytes (1024 bits)
+ * 0x27 1248 bytes (~10000 bits)
+ * 0x30 1536 bytes (12288 bits)
+ * 0xFF 8192 bytes (65535 bits)
+ */
+#define IPG_TXDMAURGENTTHRESH_VALUE 0x04
+
+/* TxDMABurstThresh specifies the minimum amount of
+ * free space in the transmit FIFO before asserting an
+ * transmit DMA request.
+ *
+ * Value Min TxFIFO free space before TX request
+ * ----------------------------------------------------
+ * 0x00-0x08 256 bytes
+ * 0x30 1536 bytes
+ * 0xFF 8192 bytes
+ */
+#define IPG_TXDMABURSTTHRESH_VALUE 0x30
+
+/* RXDMAPollPeriod is specified in 320ns increments.
+ *
+ * Value Time
+ * ---------------------
+ * 0x00-0x01 320ns
+ * 0x03 ~1us
+ * 0x1F ~10us
+ * 0xFF ~82us
+ */
+#define IPG_RXDMAPOLLPERIOD_VALUE 0x01
+
+/* RxDMAUrgentThresh specifies the minimum amount of
+ * free space within the receive FIFO before asserting
+ * a urgent receive DMA request.
+ *
+ * Value Min RxFIFO free space before urgent RX request
+ * ---------------------------------------------------------------
+ * 0x00-0x04 128 bytes (1024 bits)
+ * 0x27 1248 bytes (~10000 bits)
+ * 0x30 1536 bytes (12288 bits)
+ * 0xFF 8192 bytes (65535 bits)
+ */
+#define IPG_RXDMAURGENTTHRESH_VALUE 0x30
+
+/* RxDMABurstThresh specifies the minimum amount of
+ * occupied space within the receive FIFO before asserting
+ * a receive DMA request.
+ *
+ * Value Min TxFIFO free space before TX request
+ * ----------------------------------------------------
+ * 0x00-0x08 256 bytes
+ * 0x30 1536 bytes
+ * 0xFF 8192 bytes
+ */
+#define IPG_RXDMABURSTTHRESH_VALUE 0x30
+
+/* FlowOnThresh specifies the maximum amount of occupied
+ * space in the receive FIFO before a PAUSE frame with
+ * maximum pause time transmitted.
+ *
+ * Value Max RxFIFO occupied space before PAUSE
+ * ---------------------------------------------------
+ * 0x0000 0 bytes
+ * 0x0740 29,696 bytes
+ * 0x07FF 32,752 bytes
+ */
+#define IPG_FLOWONTHRESH_VALUE 0x0740
+
+/* FlowOffThresh specifies the minimum amount of occupied
+ * space in the receive FIFO before a PAUSE frame with
+ * zero pause time is transmitted.
+ *
+ * Value Max RxFIFO occupied space before PAUSE
+ * ---------------------------------------------------
+ * 0x0000 0 bytes
+ * 0x00BF 3056 bytes
+ * 0x07FF 32,752 bytes
+ */
+#define IPG_FLOWOFFTHRESH_VALUE 0x00BF
+
+/*
+ * Miscellaneous macros.
+ */
+
+/* Macros for printing debug statements. */
+#ifdef IPG_DEBUG
+# define IPG_DEBUG_MSG(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "IPG: " fmt, ##args); \
+} while (0)
+# define IPG_DDEBUG_MSG(fmt, args...) \
+ printk(KERN_DEBUG "IPG: " fmt, ##args)
+# define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args)
+# define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args)
+#else
+# define IPG_DEBUG_MSG(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "IPG: " fmt, ##args); \
+} while (0)
+# define IPG_DDEBUG_MSG(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "IPG: " fmt, ##args); \
+} while (0)
+# define IPG_DUMPRFDLIST(args)
+# define IPG_DUMPTFDLIST(args)
+#endif
+
+/*
+ * End miscellaneous macros.
+ */
+
+/* Transmit Frame Descriptor. The IPG supports 15 fragments,
+ * however Linux requires only a single fragment. Note, each
+ * TFD field is 64 bits wide.
+ */
+struct ipg_tx {
+ __le64 next_desc;
+ __le64 tfc;
+ __le64 frag_info;
+};
+
+/* Receive Frame Descriptor. Note, each RFD field is 64 bits wide.
+ */
+struct ipg_rx {
+ __le64 next_desc;
+ __le64 rfs;
+ __le64 frag_info;
+};
+
+struct ipg_jumbo {
+ int found_start;
+ int current_size;
+ struct sk_buff *skb;
+};
+
+/* Structure of IPG NIC specific data. */
+struct ipg_nic_private {
+ void __iomem *ioaddr;
+ struct ipg_tx *txd;
+ struct ipg_rx *rxd;
+ dma_addr_t txd_map;
+ dma_addr_t rxd_map;
+ struct sk_buff *tx_buff[IPG_TFDLIST_LENGTH];
+ struct sk_buff *rx_buff[IPG_RFDLIST_LENGTH];
+ unsigned int tx_current;
+ unsigned int tx_dirty;
+ unsigned int rx_current;
+ unsigned int rx_dirty;
+ bool is_jumbo;
+ struct ipg_jumbo jumbo;
+ unsigned long rxfrag_size;
+ unsigned long rxsupport_size;
+ unsigned long max_rxframe_size;
+ unsigned int rx_buf_sz;
+ struct pci_dev *pdev;
+ struct net_device *dev;
+ struct net_device_stats stats;
+ spinlock_t lock;
+ int tenmbpsmode;
+
+ u16 led_mode;
+ u16 station_addr[3]; /* Station Address in EEPROM Reg 0x10..0x12 */
+
+ struct mutex mii_mutex;
+ struct mii_if_info mii_if;
+ int reset_current_tfd;
+#ifdef IPG_DEBUG
+ int RFDlistendCount;
+ int RFDListCheckedCount;
+ int EmptyRFDListCount;
+#endif
+ struct delayed_work task;
+};
+
+#endif /* __LINUX_IPG_H */
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
new file mode 100644
index 000000000000..61029dc7fa6f
--- /dev/null
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -0,0 +1,222 @@
+#
+# Intel network device configuration
+#
+
+config NET_VENDOR_INTEL
+ bool "Intel devices"
+ default y
+ depends on PCI || PCI_MSI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Intel cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_INTEL
+
+config E100
+ tristate "Intel(R) PRO/100+ support"
+ depends on PCI
+ select NET_CORE
+ select MII
+ ---help---
+ This driver supports Intel(R) PRO/100 family of adapters.
+ To verify that your adapter is supported, find the board ID number
+ on the adapter. Look for a label that has a barcode and a number
+ in the format 123456-001 (six digits hyphen three digits).
+
+ Use the above information and the Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+ to identify the adapter.
+
+ For the latest Intel PRO/100 network driver for Linux, see:
+
+ <http://www.intel.com/p/en_US/support/highlights/network/pro100plus>
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/e100.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called e100.
+
+config E1000
+ tristate "Intel(R) PRO/1000 Gigabit Ethernet support"
+ depends on PCI
+ ---help---
+ This driver supports Intel(R) PRO/1000 gigabit ethernet family of
+ adapters. For more information on how to identify your adapter, go
+ to the Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/e1000.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called e1000.
+
+config E1000E
+ tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
+ depends on PCI && (!SPARC32 || BROKEN)
+ select CRC32
+ ---help---
+ This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
+ ethernet family of adapters. For PCI or PCI-X e1000 adapters,
+ use the regular e1000 driver For more information on how to
+ identify your adapter, go to the Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ To compile this driver as a module, choose M here. The module
+ will be called e1000e.
+
+config IGB
+ tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
+ depends on PCI
+ ---help---
+ This driver supports Intel(R) 82575/82576 gigabit ethernet family of
+ adapters. For more information on how to identify your adapter, go
+ to the Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/e1000.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called igb.
+
+config IGB_DCA
+ bool "Direct Cache Access (DCA) Support"
+ default y
+ depends on IGB && DCA && !(IGB=y && DCA=m)
+ ---help---
+ Say Y here if you want to use Direct Cache Access (DCA) in the
+ driver. DCA is a method for warming the CPU cache before data
+ is used, with the intent of lessening the impact of cache misses.
+
+config IGBVF
+ tristate "Intel(R) 82576 Virtual Function Ethernet support"
+ depends on PCI
+ ---help---
+ This driver supports Intel(R) 82576 virtual functions. For more
+ information on how to identify your adapter, go to the Adapter &
+ Driver ID Guide at:
+
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/e1000.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called igbvf.
+
+config IXGB
+ tristate "Intel(R) PRO/10GbE support"
+ depends on PCI
+ ---help---
+ This driver supports Intel(R) PRO/10GbE family of adapters for
+ PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver
+ instead. For more information on how to identify your adapter, go
+ to the Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/ixgb.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ixgb.
+
+config IXGBE
+ tristate "Intel(R) 10GbE PCI Express adapters support"
+ depends on PCI && INET
+ select MDIO
+ ---help---
+ This driver supports Intel(R) 10GbE PCI Express family of
+ adapters. For more information on how to identify your adapter, go
+ to the Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ To compile this driver as a module, choose M here. The module
+ will be called ixgbe.
+
+config IXGBE_DCA
+ bool "Direct Cache Access (DCA) Support"
+ default y
+ depends on IXGBE && DCA && !(IXGBE=y && DCA=m)
+ ---help---
+ Say Y here if you want to use Direct Cache Access (DCA) in the
+ driver. DCA is a method for warming the CPU cache before data
+ is used, with the intent of lessening the impact of cache misses.
+
+config IXGBE_DCB
+ bool "Data Center Bridging (DCB) Support"
+ default n
+ depends on IXGBE && DCB
+ ---help---
+ Say Y here if you want to use Data Center Bridging (DCB) in the
+ driver.
+
+ If unsure, say N.
+
+config IXGBEVF
+ tristate "Intel(R) 82599 Virtual Function Ethernet support"
+ depends on PCI_MSI
+ ---help---
+ This driver supports Intel(R) 82599 virtual functions. For more
+ information on how to identify your adapter, go to the Adapter &
+ Driver ID Guide at:
+
+ <http://support.intel.com/support/network/sb/CS-008441.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/ixgbevf.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ixgbevf. MSI-X interrupt support is required
+ for this driver to work correctly.
+
+endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
new file mode 100644
index 000000000000..c8210e688669
--- /dev/null
+++ b/drivers/net/ethernet/intel/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the Intel network device drivers.
+#
+
+obj-$(CONFIG_E100) += e100.o
+obj-$(CONFIG_E1000) += e1000/
+obj-$(CONFIG_E1000E) += e1000e/
+obj-$(CONFIG_IGB) += igb/
+obj-$(CONFIG_IGBVF) += igbvf/
+obj-$(CONFIG_IXGBE) += ixgbe/
+obj-$(CONFIG_IXGBEVF) += ixgbevf/
+obj-$(CONFIG_IXGB) += ixgb/
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
new file mode 100644
index 000000000000..fe87d3eea5ed
--- /dev/null
+++ b/drivers/net/ethernet/intel/e100.c
@@ -0,0 +1,3109 @@
+/*******************************************************************************
+
+ Intel PRO/100 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/*
+ * e100.c: Intel(R) PRO/100 ethernet driver
+ *
+ * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
+ * original e100 driver, but better described as a munging of
+ * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
+ *
+ * References:
+ * Intel 8255x 10/100 Mbps Ethernet Controller Family,
+ * Open Source Software Developers Manual,
+ * http://sourceforge.net/projects/e1000
+ *
+ *
+ * Theory of Operation
+ *
+ * I. General
+ *
+ * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
+ * controller family, which includes the 82557, 82558, 82559, 82550,
+ * 82551, and 82562 devices. 82558 and greater controllers
+ * integrate the Intel 82555 PHY. The controllers are used in
+ * server and client network interface cards, as well as in
+ * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
+ * configurations. 8255x supports a 32-bit linear addressing
+ * mode and operates at 33Mhz PCI clock rate.
+ *
+ * II. Driver Operation
+ *
+ * Memory-mapped mode is used exclusively to access the device's
+ * shared-memory structure, the Control/Status Registers (CSR). All
+ * setup, configuration, and control of the device, including queuing
+ * of Tx, Rx, and configuration commands is through the CSR.
+ * cmd_lock serializes accesses to the CSR command register. cb_lock
+ * protects the shared Command Block List (CBL).
+ *
+ * 8255x is highly MII-compliant and all access to the PHY go
+ * through the Management Data Interface (MDI). Consequently, the
+ * driver leverages the mii.c library shared with other MII-compliant
+ * devices.
+ *
+ * Big- and Little-Endian byte order as well as 32- and 64-bit
+ * archs are supported. Weak-ordered memory and non-cache-coherent
+ * archs are supported.
+ *
+ * III. Transmit
+ *
+ * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
+ * together in a fixed-size ring (CBL) thus forming the flexible mode
+ * memory structure. A TCB marked with the suspend-bit indicates
+ * the end of the ring. The last TCB processed suspends the
+ * controller, and the controller can be restarted by issue a CU
+ * resume command to continue from the suspend point, or a CU start
+ * command to start at a given position in the ring.
+ *
+ * Non-Tx commands (config, multicast setup, etc) are linked
+ * into the CBL ring along with Tx commands. The common structure
+ * used for both Tx and non-Tx commands is the Command Block (CB).
+ *
+ * cb_to_use is the next CB to use for queuing a command; cb_to_clean
+ * is the next CB to check for completion; cb_to_send is the first
+ * CB to start on in case of a previous failure to resume. CB clean
+ * up happens in interrupt context in response to a CU interrupt.
+ * cbs_avail keeps track of number of free CB resources available.
+ *
+ * Hardware padding of short packets to minimum packet size is
+ * enabled. 82557 pads with 7Eh, while the later controllers pad
+ * with 00h.
+ *
+ * IV. Receive
+ *
+ * The Receive Frame Area (RFA) comprises a ring of Receive Frame
+ * Descriptors (RFD) + data buffer, thus forming the simplified mode
+ * memory structure. Rx skbs are allocated to contain both the RFD
+ * and the data buffer, but the RFD is pulled off before the skb is
+ * indicated. The data buffer is aligned such that encapsulated
+ * protocol headers are u32-aligned. Since the RFD is part of the
+ * mapped shared memory, and completion status is contained within
+ * the RFD, the RFD must be dma_sync'ed to maintain a consistent
+ * view from software and hardware.
+ *
+ * In order to keep updates to the RFD link field from colliding with
+ * hardware writes to mark packets complete, we use the feature that
+ * hardware will not write to a size 0 descriptor and mark the previous
+ * packet as end-of-list (EL). After updating the link, we remove EL
+ * and only then restore the size such that hardware may use the
+ * previous-to-end RFD.
+ *
+ * Under typical operation, the receive unit (RU) is start once,
+ * and the controller happily fills RFDs as frames arrive. If
+ * replacement RFDs cannot be allocated, or the RU goes non-active,
+ * the RU must be restarted. Frame arrival generates an interrupt,
+ * and Rx indication and re-allocation happen in the same context,
+ * therefore no locking is required. A software-generated interrupt
+ * is generated from the watchdog to recover from a failed allocation
+ * scenario where all Rx resources have been indicated and none re-
+ * placed.
+ *
+ * V. Miscellaneous
+ *
+ * VLAN offloading of tagging, stripping and filtering is not
+ * supported, but driver will accommodate the extra 4-byte VLAN tag
+ * for processing by upper layers. Tx/Rx Checksum offloading is not
+ * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
+ * not supported (hardware limitation).
+ *
+ * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
+ *
+ * Thanks to JC (jchapman@katalix.com) for helping with
+ * testing/troubleshooting the development driver.
+ *
+ * TODO:
+ * o several entry points race with dev->close
+ * o check for tx-no-resources/stop Q races with tx clean/wake Q
+ *
+ * FIXES:
+ * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
+ * - Stratus87247: protect MDI control register manipulations
+ * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
+ * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/hardirq.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/string.h>
+#include <linux/firmware.h>
+#include <linux/rtnetlink.h>
+#include <asm/unaligned.h>
+
+
+#define DRV_NAME "e100"
+#define DRV_EXT "-NAPI"
+#define DRV_VERSION "3.5.24-k2"DRV_EXT
+#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
+#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
+
+#define E100_WATCHDOG_PERIOD (2 * HZ)
+#define E100_NAPI_WEIGHT 16
+
+#define FIRMWARE_D101M "e100/d101m_ucode.bin"
+#define FIRMWARE_D101S "e100/d101s_ucode.bin"
+#define FIRMWARE_D102E "e100/d102e_ucode.bin"
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR(DRV_COPYRIGHT);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_FIRMWARE(FIRMWARE_D101M);
+MODULE_FIRMWARE(FIRMWARE_D101S);
+MODULE_FIRMWARE(FIRMWARE_D102E);
+
+static int debug = 3;
+static int eeprom_bad_csum_allow = 0;
+static int use_io = 0;
+module_param(debug, int, 0);
+module_param(eeprom_bad_csum_allow, int, 0);
+module_param(use_io, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
+MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
+
+#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
+ PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
+ PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
+static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
+ INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
+ INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
+ INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
+ INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
+ INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
+ INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
+ INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
+ INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
+ INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
+ INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
+ INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
+ INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
+ INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
+ INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
+ INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
+ INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
+ INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
+ INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
+ INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
+ INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
+ INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
+ INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
+ INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
+ INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
+ INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
+ INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
+ INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
+ INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
+ INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
+ INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
+ INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
+ INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
+ INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
+ INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
+ INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
+ INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
+ INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
+ INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
+ INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
+ INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
+ INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
+ INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, e100_id_table);
+
+enum mac {
+ mac_82557_D100_A = 0,
+ mac_82557_D100_B = 1,
+ mac_82557_D100_C = 2,
+ mac_82558_D101_A4 = 4,
+ mac_82558_D101_B0 = 5,
+ mac_82559_D101M = 8,
+ mac_82559_D101S = 9,
+ mac_82550_D102 = 12,
+ mac_82550_D102_C = 13,
+ mac_82551_E = 14,
+ mac_82551_F = 15,
+ mac_82551_10 = 16,
+ mac_unknown = 0xFF,
+};
+
+enum phy {
+ phy_100a = 0x000003E0,
+ phy_100c = 0x035002A8,
+ phy_82555_tx = 0x015002A8,
+ phy_nsc_tx = 0x5C002000,
+ phy_82562_et = 0x033002A8,
+ phy_82562_em = 0x032002A8,
+ phy_82562_ek = 0x031002A8,
+ phy_82562_eh = 0x017002A8,
+ phy_82552_v = 0xd061004d,
+ phy_unknown = 0xFFFFFFFF,
+};
+
+/* CSR (Control/Status Registers) */
+struct csr {
+ struct {
+ u8 status;
+ u8 stat_ack;
+ u8 cmd_lo;
+ u8 cmd_hi;
+ u32 gen_ptr;
+ } scb;
+ u32 port;
+ u16 flash_ctrl;
+ u8 eeprom_ctrl_lo;
+ u8 eeprom_ctrl_hi;
+ u32 mdi_ctrl;
+ u32 rx_dma_count;
+};
+
+enum scb_status {
+ rus_no_res = 0x08,
+ rus_ready = 0x10,
+ rus_mask = 0x3C,
+};
+
+enum ru_state {
+ RU_SUSPENDED = 0,
+ RU_RUNNING = 1,
+ RU_UNINITIALIZED = -1,
+};
+
+enum scb_stat_ack {
+ stat_ack_not_ours = 0x00,
+ stat_ack_sw_gen = 0x04,
+ stat_ack_rnr = 0x10,
+ stat_ack_cu_idle = 0x20,
+ stat_ack_frame_rx = 0x40,
+ stat_ack_cu_cmd_done = 0x80,
+ stat_ack_not_present = 0xFF,
+ stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
+ stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
+};
+
+enum scb_cmd_hi {
+ irq_mask_none = 0x00,
+ irq_mask_all = 0x01,
+ irq_sw_gen = 0x02,
+};
+
+enum scb_cmd_lo {
+ cuc_nop = 0x00,
+ ruc_start = 0x01,
+ ruc_load_base = 0x06,
+ cuc_start = 0x10,
+ cuc_resume = 0x20,
+ cuc_dump_addr = 0x40,
+ cuc_dump_stats = 0x50,
+ cuc_load_base = 0x60,
+ cuc_dump_reset = 0x70,
+};
+
+enum cuc_dump {
+ cuc_dump_complete = 0x0000A005,
+ cuc_dump_reset_complete = 0x0000A007,
+};
+
+enum port {
+ software_reset = 0x0000,
+ selftest = 0x0001,
+ selective_reset = 0x0002,
+};
+
+enum eeprom_ctrl_lo {
+ eesk = 0x01,
+ eecs = 0x02,
+ eedi = 0x04,
+ eedo = 0x08,
+};
+
+enum mdi_ctrl {
+ mdi_write = 0x04000000,
+ mdi_read = 0x08000000,
+ mdi_ready = 0x10000000,
+};
+
+enum eeprom_op {
+ op_write = 0x05,
+ op_read = 0x06,
+ op_ewds = 0x10,
+ op_ewen = 0x13,
+};
+
+enum eeprom_offsets {
+ eeprom_cnfg_mdix = 0x03,
+ eeprom_phy_iface = 0x06,
+ eeprom_id = 0x0A,
+ eeprom_config_asf = 0x0D,
+ eeprom_smbus_addr = 0x90,
+};
+
+enum eeprom_cnfg_mdix {
+ eeprom_mdix_enabled = 0x0080,
+};
+
+enum eeprom_phy_iface {
+ NoSuchPhy = 0,
+ I82553AB,
+ I82553C,
+ I82503,
+ DP83840,
+ S80C240,
+ S80C24,
+ I82555,
+ DP83840A = 10,
+};
+
+enum eeprom_id {
+ eeprom_id_wol = 0x0020,
+};
+
+enum eeprom_config_asf {
+ eeprom_asf = 0x8000,
+ eeprom_gcl = 0x4000,
+};
+
+enum cb_status {
+ cb_complete = 0x8000,
+ cb_ok = 0x2000,
+};
+
+enum cb_command {
+ cb_nop = 0x0000,
+ cb_iaaddr = 0x0001,
+ cb_config = 0x0002,
+ cb_multi = 0x0003,
+ cb_tx = 0x0004,
+ cb_ucode = 0x0005,
+ cb_dump = 0x0006,
+ cb_tx_sf = 0x0008,
+ cb_cid = 0x1f00,
+ cb_i = 0x2000,
+ cb_s = 0x4000,
+ cb_el = 0x8000,
+};
+
+struct rfd {
+ __le16 status;
+ __le16 command;
+ __le32 link;
+ __le32 rbd;
+ __le16 actual_size;
+ __le16 size;
+};
+
+struct rx {
+ struct rx *next, *prev;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+};
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+#define X(a,b) b,a
+#else
+#define X(a,b) a,b
+#endif
+struct config {
+/*0*/ u8 X(byte_count:6, pad0:2);
+/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
+/*2*/ u8 adaptive_ifs;
+/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
+ term_write_cache_line:1), pad3:4);
+/*4*/ u8 X(rx_dma_max_count:7, pad4:1);
+/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
+/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
+ tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
+ rx_discard_overruns:1), rx_save_bad_frames:1);
+/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
+ pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
+ tx_dynamic_tbd:1);
+/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
+/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
+ link_status_wake:1), arp_wake:1), mcmatch_wake:1);
+/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
+ loopback:2);
+/*11*/ u8 X(linear_priority:3, pad11:5);
+/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
+/*13*/ u8 ip_addr_lo;
+/*14*/ u8 ip_addr_hi;
+/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
+ wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
+ pad15_2:1), crs_or_cdt:1);
+/*16*/ u8 fc_delay_lo;
+/*17*/ u8 fc_delay_hi;
+/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
+ rx_long_ok:1), fc_priority_threshold:3), pad18:1);
+/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
+ fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
+ full_duplex_force:1), full_duplex_pin:1);
+/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
+/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
+/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
+ u8 pad_d102[9];
+};
+
+#define E100_MAX_MULTICAST_ADDRS 64
+struct multi {
+ __le16 count;
+ u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
+};
+
+/* Important: keep total struct u32-aligned */
+#define UCODE_SIZE 134
+struct cb {
+ __le16 status;
+ __le16 command;
+ __le32 link;
+ union {
+ u8 iaaddr[ETH_ALEN];
+ __le32 ucode[UCODE_SIZE];
+ struct config config;
+ struct multi multi;
+ struct {
+ u32 tbd_array;
+ u16 tcb_byte_count;
+ u8 threshold;
+ u8 tbd_count;
+ struct {
+ __le32 buf_addr;
+ __le16 size;
+ u16 eol;
+ } tbd;
+ } tcb;
+ __le32 dump_buffer_addr;
+ } u;
+ struct cb *next, *prev;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+};
+
+enum loopback {
+ lb_none = 0, lb_mac = 1, lb_phy = 3,
+};
+
+struct stats {
+ __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
+ tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
+ tx_multiple_collisions, tx_total_collisions;
+ __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
+ rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
+ rx_short_frame_errors;
+ __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
+ __le16 xmt_tco_frames, rcv_tco_frames;
+ __le32 complete;
+};
+
+struct mem {
+ struct {
+ u32 signature;
+ u32 result;
+ } selftest;
+ struct stats stats;
+ u8 dump_buf[596];
+};
+
+struct param_range {
+ u32 min;
+ u32 max;
+ u32 count;
+};
+
+struct params {
+ struct param_range rfds;
+ struct param_range cbs;
+};
+
+struct nic {
+ /* Begin: frequently used values: keep adjacent for cache effect */
+ u32 msg_enable ____cacheline_aligned;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
+
+ struct rx *rxs ____cacheline_aligned;
+ struct rx *rx_to_use;
+ struct rx *rx_to_clean;
+ struct rfd blank_rfd;
+ enum ru_state ru_running;
+
+ spinlock_t cb_lock ____cacheline_aligned;
+ spinlock_t cmd_lock;
+ struct csr __iomem *csr;
+ enum scb_cmd_lo cuc_cmd;
+ unsigned int cbs_avail;
+ struct napi_struct napi;
+ struct cb *cbs;
+ struct cb *cb_to_use;
+ struct cb *cb_to_send;
+ struct cb *cb_to_clean;
+ __le16 tx_command;
+ /* End: frequently used values: keep adjacent for cache effect */
+
+ enum {
+ ich = (1 << 0),
+ promiscuous = (1 << 1),
+ multicast_all = (1 << 2),
+ wol_magic = (1 << 3),
+ ich_10h_workaround = (1 << 4),
+ } flags ____cacheline_aligned;
+
+ enum mac mac;
+ enum phy phy;
+ struct params params;
+ struct timer_list watchdog;
+ struct mii_if_info mii;
+ struct work_struct tx_timeout_task;
+ enum loopback loopback;
+
+ struct mem *mem;
+ dma_addr_t dma_addr;
+
+ struct pci_pool *cbs_pool;
+ dma_addr_t cbs_dma_addr;
+ u8 adaptive_ifs;
+ u8 tx_threshold;
+ u32 tx_frames;
+ u32 tx_collisions;
+ u32 tx_deferred;
+ u32 tx_single_collisions;
+ u32 tx_multiple_collisions;
+ u32 tx_fc_pause;
+ u32 tx_tco_frames;
+
+ u32 rx_fc_pause;
+ u32 rx_fc_unsupported;
+ u32 rx_tco_frames;
+ u32 rx_over_length_errors;
+
+ u16 eeprom_wc;
+ __le16 eeprom[256];
+ spinlock_t mdio_lock;
+ const struct firmware *fw;
+};
+
+static inline void e100_write_flush(struct nic *nic)
+{
+ /* Flush previous PCI writes through intermediate bridges
+ * by doing a benign read */
+ (void)ioread8(&nic->csr->scb.status);
+}
+
+static void e100_enable_irq(struct nic *nic)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&nic->cmd_lock, flags);
+ iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
+ e100_write_flush(nic);
+ spin_unlock_irqrestore(&nic->cmd_lock, flags);
+}
+
+static void e100_disable_irq(struct nic *nic)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&nic->cmd_lock, flags);
+ iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
+ e100_write_flush(nic);
+ spin_unlock_irqrestore(&nic->cmd_lock, flags);
+}
+
+static void e100_hw_reset(struct nic *nic)
+{
+ /* Put CU and RU into idle with a selective reset to get
+ * device off of PCI bus */
+ iowrite32(selective_reset, &nic->csr->port);
+ e100_write_flush(nic); udelay(20);
+
+ /* Now fully reset device */
+ iowrite32(software_reset, &nic->csr->port);
+ e100_write_flush(nic); udelay(20);
+
+ /* Mask off our interrupt line - it's unmasked after reset */
+ e100_disable_irq(nic);
+}
+
+static int e100_self_test(struct nic *nic)
+{
+ u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
+
+ /* Passing the self-test is a pretty good indication
+ * that the device can DMA to/from host memory */
+
+ nic->mem->selftest.signature = 0;
+ nic->mem->selftest.result = 0xFFFFFFFF;
+
+ iowrite32(selftest | dma_addr, &nic->csr->port);
+ e100_write_flush(nic);
+ /* Wait 10 msec for self-test to complete */
+ msleep(10);
+
+ /* Interrupts are enabled after self-test */
+ e100_disable_irq(nic);
+
+ /* Check results of self-test */
+ if (nic->mem->selftest.result != 0) {
+ netif_err(nic, hw, nic->netdev,
+ "Self-test failed: result=0x%08X\n",
+ nic->mem->selftest.result);
+ return -ETIMEDOUT;
+ }
+ if (nic->mem->selftest.signature == 0) {
+ netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
+{
+ u32 cmd_addr_data[3];
+ u8 ctrl;
+ int i, j;
+
+ /* Three cmds: write/erase enable, write data, write/erase disable */
+ cmd_addr_data[0] = op_ewen << (addr_len - 2);
+ cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
+ le16_to_cpu(data);
+ cmd_addr_data[2] = op_ewds << (addr_len - 2);
+
+ /* Bit-bang cmds to write word to eeprom */
+ for (j = 0; j < 3; j++) {
+
+ /* Chip select */
+ iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
+ e100_write_flush(nic); udelay(4);
+
+ for (i = 31; i >= 0; i--) {
+ ctrl = (cmd_addr_data[j] & (1 << i)) ?
+ eecs | eedi : eecs;
+ iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
+ e100_write_flush(nic); udelay(4);
+
+ iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
+ e100_write_flush(nic); udelay(4);
+ }
+ /* Wait 10 msec for cmd to complete */
+ msleep(10);
+
+ /* Chip deselect */
+ iowrite8(0, &nic->csr->eeprom_ctrl_lo);
+ e100_write_flush(nic); udelay(4);
+ }
+};
+
+/* General technique stolen from the eepro100 driver - very clever */
+static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
+{
+ u32 cmd_addr_data;
+ u16 data = 0;
+ u8 ctrl;
+ int i;
+
+ cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
+
+ /* Chip select */
+ iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
+ e100_write_flush(nic); udelay(4);
+
+ /* Bit-bang to read word from eeprom */
+ for (i = 31; i >= 0; i--) {
+ ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
+ iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
+ e100_write_flush(nic); udelay(4);
+
+ iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
+ e100_write_flush(nic); udelay(4);
+
+ /* Eeprom drives a dummy zero to EEDO after receiving
+ * complete address. Use this to adjust addr_len. */
+ ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
+ if (!(ctrl & eedo) && i > 16) {
+ *addr_len -= (i - 16);
+ i = 17;
+ }
+
+ data = (data << 1) | (ctrl & eedo ? 1 : 0);
+ }
+
+ /* Chip deselect */
+ iowrite8(0, &nic->csr->eeprom_ctrl_lo);
+ e100_write_flush(nic); udelay(4);
+
+ return cpu_to_le16(data);
+};
+
+/* Load entire EEPROM image into driver cache and validate checksum */
+static int e100_eeprom_load(struct nic *nic)
+{
+ u16 addr, addr_len = 8, checksum = 0;
+
+ /* Try reading with an 8-bit addr len to discover actual addr len */
+ e100_eeprom_read(nic, &addr_len, 0);
+ nic->eeprom_wc = 1 << addr_len;
+
+ for (addr = 0; addr < nic->eeprom_wc; addr++) {
+ nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
+ if (addr < nic->eeprom_wc - 1)
+ checksum += le16_to_cpu(nic->eeprom[addr]);
+ }
+
+ /* The checksum, stored in the last word, is calculated such that
+ * the sum of words should be 0xBABA */
+ if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
+ netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
+ if (!eeprom_bad_csum_allow)
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/* Save (portion of) driver EEPROM cache to device and update checksum */
+static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
+{
+ u16 addr, addr_len = 8, checksum = 0;
+
+ /* Try reading with an 8-bit addr len to discover actual addr len */
+ e100_eeprom_read(nic, &addr_len, 0);
+ nic->eeprom_wc = 1 << addr_len;
+
+ if (start + count >= nic->eeprom_wc)
+ return -EINVAL;
+
+ for (addr = start; addr < start + count; addr++)
+ e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
+
+ /* The checksum, stored in the last word, is calculated such that
+ * the sum of words should be 0xBABA */
+ for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
+ checksum += le16_to_cpu(nic->eeprom[addr]);
+ nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
+ e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
+ nic->eeprom[nic->eeprom_wc - 1]);
+
+ return 0;
+}
+
+#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
+#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
+static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
+{
+ unsigned long flags;
+ unsigned int i;
+ int err = 0;
+
+ spin_lock_irqsave(&nic->cmd_lock, flags);
+
+ /* Previous command is accepted when SCB clears */
+ for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
+ if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
+ break;
+ cpu_relax();
+ if (unlikely(i > E100_WAIT_SCB_FAST))
+ udelay(5);
+ }
+ if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
+ err = -EAGAIN;
+ goto err_unlock;
+ }
+
+ if (unlikely(cmd != cuc_resume))
+ iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
+ iowrite8(cmd, &nic->csr->scb.cmd_lo);
+
+err_unlock:
+ spin_unlock_irqrestore(&nic->cmd_lock, flags);
+
+ return err;
+}
+
+static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
+ void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
+{
+ struct cb *cb;
+ unsigned long flags;
+ int err = 0;
+
+ spin_lock_irqsave(&nic->cb_lock, flags);
+
+ if (unlikely(!nic->cbs_avail)) {
+ err = -ENOMEM;
+ goto err_unlock;
+ }
+
+ cb = nic->cb_to_use;
+ nic->cb_to_use = cb->next;
+ nic->cbs_avail--;
+ cb->skb = skb;
+
+ if (unlikely(!nic->cbs_avail))
+ err = -ENOSPC;
+
+ cb_prepare(nic, cb, skb);
+
+ /* Order is important otherwise we'll be in a race with h/w:
+ * set S-bit in current first, then clear S-bit in previous. */
+ cb->command |= cpu_to_le16(cb_s);
+ wmb();
+ cb->prev->command &= cpu_to_le16(~cb_s);
+
+ while (nic->cb_to_send != nic->cb_to_use) {
+ if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
+ nic->cb_to_send->dma_addr))) {
+ /* Ok, here's where things get sticky. It's
+ * possible that we can't schedule the command
+ * because the controller is too busy, so
+ * let's just queue the command and try again
+ * when another command is scheduled. */
+ if (err == -ENOSPC) {
+ //request a reset
+ schedule_work(&nic->tx_timeout_task);
+ }
+ break;
+ } else {
+ nic->cuc_cmd = cuc_resume;
+ nic->cb_to_send = nic->cb_to_send->next;
+ }
+ }
+
+err_unlock:
+ spin_unlock_irqrestore(&nic->cb_lock, flags);
+
+ return err;
+}
+
+static int mdio_read(struct net_device *netdev, int addr, int reg)
+{
+ struct nic *nic = netdev_priv(netdev);
+ return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
+}
+
+static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
+{
+ struct nic *nic = netdev_priv(netdev);
+
+ nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
+}
+
+/* the standard mdio_ctrl() function for usual MII-compliant hardware */
+static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
+{
+ u32 data_out = 0;
+ unsigned int i;
+ unsigned long flags;
+
+
+ /*
+ * Stratus87247: we shouldn't be writing the MDI control
+ * register until the Ready bit shows True. Also, since
+ * manipulation of the MDI control registers is a multi-step
+ * procedure it should be done under lock.
+ */
+ spin_lock_irqsave(&nic->mdio_lock, flags);
+ for (i = 100; i; --i) {
+ if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
+ break;
+ udelay(20);
+ }
+ if (unlikely(!i)) {
+ netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
+ spin_unlock_irqrestore(&nic->mdio_lock, flags);
+ return 0; /* No way to indicate timeout error */
+ }
+ iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
+
+ for (i = 0; i < 100; i++) {
+ udelay(20);
+ if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
+ break;
+ }
+ spin_unlock_irqrestore(&nic->mdio_lock, flags);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
+ dir == mdi_read ? "READ" : "WRITE",
+ addr, reg, data, data_out);
+ return (u16)data_out;
+}
+
+/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
+static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
+ u32 addr,
+ u32 dir,
+ u32 reg,
+ u16 data)
+{
+ if ((reg == MII_BMCR) && (dir == mdi_write)) {
+ if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
+ u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
+ MII_ADVERTISE);
+
+ /*
+ * Workaround Si issue where sometimes the part will not
+ * autoneg to 100Mbps even when advertised.
+ */
+ if (advert & ADVERTISE_100FULL)
+ data |= BMCR_SPEED100 | BMCR_FULLDPLX;
+ else if (advert & ADVERTISE_100HALF)
+ data |= BMCR_SPEED100;
+ }
+ }
+ return mdio_ctrl_hw(nic, addr, dir, reg, data);
+}
+
+/* Fully software-emulated mdio_ctrl() function for cards without
+ * MII-compliant PHYs.
+ * For now, this is mainly geared towards 80c24 support; in case of further
+ * requirements for other types (i82503, ...?) either extend this mechanism
+ * or split it, whichever is cleaner.
+ */
+static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
+ u32 addr,
+ u32 dir,
+ u32 reg,
+ u16 data)
+{
+ /* might need to allocate a netdev_priv'ed register array eventually
+ * to be able to record state changes, but for now
+ * some fully hardcoded register handling ought to be ok I guess. */
+
+ if (dir == mdi_read) {
+ switch (reg) {
+ case MII_BMCR:
+ /* Auto-negotiation, right? */
+ return BMCR_ANENABLE |
+ BMCR_FULLDPLX;
+ case MII_BMSR:
+ return BMSR_LSTATUS /* for mii_link_ok() */ |
+ BMSR_ANEGCAPABLE |
+ BMSR_10FULL;
+ case MII_ADVERTISE:
+ /* 80c24 is a "combo card" PHY, right? */
+ return ADVERTISE_10HALF |
+ ADVERTISE_10FULL;
+ default:
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
+ dir == mdi_read ? "READ" : "WRITE",
+ addr, reg, data);
+ return 0xFFFF;
+ }
+ } else {
+ switch (reg) {
+ default:
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
+ dir == mdi_read ? "READ" : "WRITE",
+ addr, reg, data);
+ return 0xFFFF;
+ }
+ }
+}
+static inline int e100_phy_supports_mii(struct nic *nic)
+{
+ /* for now, just check it by comparing whether we
+ are using MII software emulation.
+ */
+ return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
+}
+
+static void e100_get_defaults(struct nic *nic)
+{
+ struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
+ struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
+
+ /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
+ nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
+ if (nic->mac == mac_unknown)
+ nic->mac = mac_82557_D100_A;
+
+ nic->params.rfds = rfds;
+ nic->params.cbs = cbs;
+
+ /* Quadwords to DMA into FIFO before starting frame transmit */
+ nic->tx_threshold = 0xE0;
+
+ /* no interrupt for every tx completion, delay = 256us if not 557 */
+ nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
+ ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
+
+ /* Template for a freshly allocated RFD */
+ nic->blank_rfd.command = 0;
+ nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
+ nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
+
+ /* MII setup */
+ nic->mii.phy_id_mask = 0x1F;
+ nic->mii.reg_num_mask = 0x1F;
+ nic->mii.dev = nic->netdev;
+ nic->mii.mdio_read = mdio_read;
+ nic->mii.mdio_write = mdio_write;
+}
+
+static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+{
+ struct config *config = &cb->u.config;
+ u8 *c = (u8 *)config;
+
+ cb->command = cpu_to_le16(cb_config);
+
+ memset(config, 0, sizeof(struct config));
+
+ config->byte_count = 0x16; /* bytes in this struct */
+ config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
+ config->direct_rx_dma = 0x1; /* reserved */
+ config->standard_tcb = 0x1; /* 1=standard, 0=extended */
+ config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
+ config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
+ config->tx_underrun_retry = 0x3; /* # of underrun retries */
+ if (e100_phy_supports_mii(nic))
+ config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */
+ config->pad10 = 0x6;
+ config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
+ config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
+ config->ifs = 0x6; /* x16 = inter frame spacing */
+ config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
+ config->pad15_1 = 0x1;
+ config->pad15_2 = 0x1;
+ config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
+ config->fc_delay_hi = 0x40; /* time delay for fc frame */
+ config->tx_padding = 0x1; /* 1=pad short frames */
+ config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
+ config->pad18 = 0x1;
+ config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
+ config->pad20_1 = 0x1F;
+ config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
+ config->pad21_1 = 0x5;
+
+ config->adaptive_ifs = nic->adaptive_ifs;
+ config->loopback = nic->loopback;
+
+ if (nic->mii.force_media && nic->mii.full_duplex)
+ config->full_duplex_force = 0x1; /* 1=force, 0=auto */
+
+ if (nic->flags & promiscuous || nic->loopback) {
+ config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
+ config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
+ config->promiscuous_mode = 0x1; /* 1=on, 0=off */
+ }
+
+ if (nic->flags & multicast_all)
+ config->multicast_all = 0x1; /* 1=accept, 0=no */
+
+ /* disable WoL when up */
+ if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
+ config->magic_packet_disable = 0x1; /* 1=off, 0=on */
+
+ if (nic->mac >= mac_82558_D101_A4) {
+ config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
+ config->mwi_enable = 0x1; /* 1=enable, 0=disable */
+ config->standard_tcb = 0x0; /* 1=standard, 0=extended */
+ config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
+ if (nic->mac >= mac_82559_D101M) {
+ config->tno_intr = 0x1; /* TCO stats enable */
+ /* Enable TCO in extended config */
+ if (nic->mac >= mac_82551_10) {
+ config->byte_count = 0x20; /* extended bytes */
+ config->rx_d102_mode = 0x1; /* GMRC for TCO */
+ }
+ } else {
+ config->standard_stat_counter = 0x0;
+ }
+ }
+
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
+}
+
+/*************************************************************************
+* CPUSaver parameters
+*
+* All CPUSaver parameters are 16-bit literals that are part of a
+* "move immediate value" instruction. By changing the value of
+* the literal in the instruction before the code is loaded, the
+* driver can change the algorithm.
+*
+* INTDELAY - This loads the dead-man timer with its initial value.
+* When this timer expires the interrupt is asserted, and the
+* timer is reset each time a new packet is received. (see
+* BUNDLEMAX below to set the limit on number of chained packets)
+* The current default is 0x600 or 1536. Experiments show that
+* the value should probably stay within the 0x200 - 0x1000.
+*
+* BUNDLEMAX -
+* This sets the maximum number of frames that will be bundled. In
+* some situations, such as the TCP windowing algorithm, it may be
+* better to limit the growth of the bundle size than let it go as
+* high as it can, because that could cause too much added latency.
+* The default is six, because this is the number of packets in the
+* default TCP window size. A value of 1 would make CPUSaver indicate
+* an interrupt for every frame received. If you do not want to put
+* a limit on the bundle size, set this value to xFFFF.
+*
+* BUNDLESMALL -
+* This contains a bit-mask describing the minimum size frame that
+* will be bundled. The default masks the lower 7 bits, which means
+* that any frame less than 128 bytes in length will not be bundled,
+* but will instead immediately generate an interrupt. This does
+* not affect the current bundle in any way. Any frame that is 128
+* bytes or large will be bundled normally. This feature is meant
+* to provide immediate indication of ACK frames in a TCP environment.
+* Customers were seeing poor performance when a machine with CPUSaver
+* enabled was sending but not receiving. The delay introduced when
+* the ACKs were received was enough to reduce total throughput, because
+* the sender would sit idle until the ACK was finally seen.
+*
+* The current default is 0xFF80, which masks out the lower 7 bits.
+* This means that any frame which is x7F (127) bytes or smaller
+* will cause an immediate interrupt. Because this value must be a
+* bit mask, there are only a few valid values that can be used. To
+* turn this feature off, the driver can write the value xFFFF to the
+* lower word of this instruction (in the same way that the other
+* parameters are used). Likewise, a value of 0xF800 (2047) would
+* cause an interrupt to be generated for every frame, because all
+* standard Ethernet frames are <= 2047 bytes in length.
+*************************************************************************/
+
+/* if you wish to disable the ucode functionality, while maintaining the
+ * workarounds it provides, set the following defines to:
+ * BUNDLESMALL 0
+ * BUNDLEMAX 1
+ * INTDELAY 1
+ */
+#define BUNDLESMALL 1
+#define BUNDLEMAX (u16)6
+#define INTDELAY (u16)1536 /* 0x600 */
+
+/* Initialize firmware */
+static const struct firmware *e100_request_firmware(struct nic *nic)
+{
+ const char *fw_name;
+ const struct firmware *fw = nic->fw;
+ u8 timer, bundle, min_size;
+ int err = 0;
+
+ /* do not load u-code for ICH devices */
+ if (nic->flags & ich)
+ return NULL;
+
+ /* Search for ucode match against h/w revision */
+ if (nic->mac == mac_82559_D101M)
+ fw_name = FIRMWARE_D101M;
+ else if (nic->mac == mac_82559_D101S)
+ fw_name = FIRMWARE_D101S;
+ else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
+ fw_name = FIRMWARE_D102E;
+ else /* No ucode on other devices */
+ return NULL;
+
+ /* If the firmware has not previously been loaded, request a pointer
+ * to it. If it was previously loaded, we are reinitializing the
+ * adapter, possibly in a resume from hibernate, in which case
+ * request_firmware() cannot be used.
+ */
+ if (!fw)
+ err = request_firmware(&fw, fw_name, &nic->pdev->dev);
+
+ if (err) {
+ netif_err(nic, probe, nic->netdev,
+ "Failed to load firmware \"%s\": %d\n",
+ fw_name, err);
+ return ERR_PTR(err);
+ }
+
+ /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
+ indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
+ if (fw->size != UCODE_SIZE * 4 + 3) {
+ netif_err(nic, probe, nic->netdev,
+ "Firmware \"%s\" has wrong size %zu\n",
+ fw_name, fw->size);
+ release_firmware(fw);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Read timer, bundle and min_size from end of firmware blob */
+ timer = fw->data[UCODE_SIZE * 4];
+ bundle = fw->data[UCODE_SIZE * 4 + 1];
+ min_size = fw->data[UCODE_SIZE * 4 + 2];
+
+ if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
+ min_size >= UCODE_SIZE) {
+ netif_err(nic, probe, nic->netdev,
+ "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
+ fw_name, timer, bundle, min_size);
+ release_firmware(fw);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* OK, firmware is validated and ready to use. Save a pointer
+ * to it in the nic */
+ nic->fw = fw;
+ return fw;
+}
+
+static void e100_setup_ucode(struct nic *nic, struct cb *cb,
+ struct sk_buff *skb)
+{
+ const struct firmware *fw = (void *)skb;
+ u8 timer, bundle, min_size;
+
+ /* It's not a real skb; we just abused the fact that e100_exec_cb
+ will pass it through to here... */
+ cb->skb = NULL;
+
+ /* firmware is stored as little endian already */
+ memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
+
+ /* Read timer, bundle and min_size from end of firmware blob */
+ timer = fw->data[UCODE_SIZE * 4];
+ bundle = fw->data[UCODE_SIZE * 4 + 1];
+ min_size = fw->data[UCODE_SIZE * 4 + 2];
+
+ /* Insert user-tunable settings in cb->u.ucode */
+ cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
+ cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
+ cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
+ cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
+ cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
+ cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
+
+ cb->command = cpu_to_le16(cb_ucode | cb_el);
+}
+
+static inline int e100_load_ucode_wait(struct nic *nic)
+{
+ const struct firmware *fw;
+ int err = 0, counter = 50;
+ struct cb *cb = nic->cb_to_clean;
+
+ fw = e100_request_firmware(nic);
+ /* If it's NULL, then no ucode is required */
+ if (!fw || IS_ERR(fw))
+ return PTR_ERR(fw);
+
+ if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
+ netif_err(nic, probe, nic->netdev,
+ "ucode cmd failed with error %d\n", err);
+
+ /* must restart cuc */
+ nic->cuc_cmd = cuc_start;
+
+ /* wait for completion */
+ e100_write_flush(nic);
+ udelay(10);
+
+ /* wait for possibly (ouch) 500ms */
+ while (!(cb->status & cpu_to_le16(cb_complete))) {
+ msleep(10);
+ if (!--counter) break;
+ }
+
+ /* ack any interrupts, something could have been set */
+ iowrite8(~0, &nic->csr->scb.stat_ack);
+
+ /* if the command failed, or is not OK, notify and return */
+ if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
+ netif_err(nic, probe, nic->netdev, "ucode load failed\n");
+ err = -EPERM;
+ }
+
+ return err;
+}
+
+static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
+ struct sk_buff *skb)
+{
+ cb->command = cpu_to_le16(cb_iaaddr);
+ memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
+}
+
+static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+{
+ cb->command = cpu_to_le16(cb_dump);
+ cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
+ offsetof(struct mem, dump_buf));
+}
+
+static int e100_phy_check_without_mii(struct nic *nic)
+{
+ u8 phy_type;
+ int without_mii;
+
+ phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
+
+ switch (phy_type) {
+ case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
+ case I82503: /* Non-MII PHY; UNTESTED! */
+ case S80C24: /* Non-MII PHY; tested and working */
+ /* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
+ * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
+ * doesn't have a programming interface of any sort. The
+ * media is sensed automatically based on how the link partner
+ * is configured. This is, in essence, manual configuration.
+ */
+ netif_info(nic, probe, nic->netdev,
+ "found MII-less i82503 or 80c24 or other PHY\n");
+
+ nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
+ nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
+
+ /* these might be needed for certain MII-less cards...
+ * nic->flags |= ich;
+ * nic->flags |= ich_10h_workaround; */
+
+ without_mii = 1;
+ break;
+ default:
+ without_mii = 0;
+ break;
+ }
+ return without_mii;
+}
+
+#define NCONFIG_AUTO_SWITCH 0x0080
+#define MII_NSC_CONG MII_RESV1
+#define NSC_CONG_ENABLE 0x0100
+#define NSC_CONG_TXREADY 0x0400
+#define ADVERTISE_FC_SUPPORTED 0x0400
+static int e100_phy_init(struct nic *nic)
+{
+ struct net_device *netdev = nic->netdev;
+ u32 addr;
+ u16 bmcr, stat, id_lo, id_hi, cong;
+
+ /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
+ for (addr = 0; addr < 32; addr++) {
+ nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
+ bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
+ stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
+ stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
+ if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
+ break;
+ }
+ if (addr == 32) {
+ /* uhoh, no PHY detected: check whether we seem to be some
+ * weird, rare variant which is *known* to not have any MII.
+ * But do this AFTER MII checking only, since this does
+ * lookup of EEPROM values which may easily be unreliable. */
+ if (e100_phy_check_without_mii(nic))
+ return 0; /* simply return and hope for the best */
+ else {
+ /* for unknown cases log a fatal error */
+ netif_err(nic, hw, nic->netdev,
+ "Failed to locate any known PHY, aborting\n");
+ return -EAGAIN;
+ }
+ } else
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "phy_addr = %d\n", nic->mii.phy_id);
+
+ /* Get phy ID */
+ id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
+ id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
+ nic->phy = (u32)id_hi << 16 | (u32)id_lo;
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "phy ID = 0x%08X\n", nic->phy);
+
+ /* Select the phy and isolate the rest */
+ for (addr = 0; addr < 32; addr++) {
+ if (addr != nic->mii.phy_id) {
+ mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
+ } else if (nic->phy != phy_82552_v) {
+ bmcr = mdio_read(netdev, addr, MII_BMCR);
+ mdio_write(netdev, addr, MII_BMCR,
+ bmcr & ~BMCR_ISOLATE);
+ }
+ }
+ /*
+ * Workaround for 82552:
+ * Clear the ISOLATE bit on selected phy_id last (mirrored on all
+ * other phy_id's) using bmcr value from addr discovery loop above.
+ */
+ if (nic->phy == phy_82552_v)
+ mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
+ bmcr & ~BMCR_ISOLATE);
+
+ /* Handle National tx phys */
+#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
+ if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
+ /* Disable congestion control */
+ cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
+ cong |= NSC_CONG_TXREADY;
+ cong &= ~NSC_CONG_ENABLE;
+ mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
+ }
+
+ if (nic->phy == phy_82552_v) {
+ u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
+
+ /* assign special tweaked mdio_ctrl() function */
+ nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
+
+ /* Workaround Si not advertising flow-control during autoneg */
+ advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
+
+ /* Reset for the above changes to take effect */
+ bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
+ bmcr |= BMCR_RESET;
+ mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
+ } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
+ (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
+ !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
+ /* enable/disable MDI/MDI-X auto-switching. */
+ mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
+ nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
+ }
+
+ return 0;
+}
+
+static int e100_hw_init(struct nic *nic)
+{
+ int err = 0;
+
+ e100_hw_reset(nic);
+
+ netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
+ if (!in_interrupt() && (err = e100_self_test(nic)))
+ return err;
+
+ if ((err = e100_phy_init(nic)))
+ return err;
+ if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
+ return err;
+ if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
+ return err;
+ if ((err = e100_load_ucode_wait(nic)))
+ return err;
+ if ((err = e100_exec_cb(nic, NULL, e100_configure)))
+ return err;
+ if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
+ return err;
+ if ((err = e100_exec_cmd(nic, cuc_dump_addr,
+ nic->dma_addr + offsetof(struct mem, stats))))
+ return err;
+ if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
+ return err;
+
+ e100_disable_irq(nic);
+
+ return 0;
+}
+
+static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+{
+ struct net_device *netdev = nic->netdev;
+ struct netdev_hw_addr *ha;
+ u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
+
+ cb->command = cpu_to_le16(cb_multi);
+ cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (i == count)
+ break;
+ memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
+ ETH_ALEN);
+ }
+}
+
+static void e100_set_multicast_list(struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "mc_count=%d, flags=0x%04X\n",
+ netdev_mc_count(netdev), netdev->flags);
+
+ if (netdev->flags & IFF_PROMISC)
+ nic->flags |= promiscuous;
+ else
+ nic->flags &= ~promiscuous;
+
+ if (netdev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
+ nic->flags |= multicast_all;
+ else
+ nic->flags &= ~multicast_all;
+
+ e100_exec_cb(nic, NULL, e100_configure);
+ e100_exec_cb(nic, NULL, e100_multi);
+}
+
+static void e100_update_stats(struct nic *nic)
+{
+ struct net_device *dev = nic->netdev;
+ struct net_device_stats *ns = &dev->stats;
+ struct stats *s = &nic->mem->stats;
+ __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
+ (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
+ &s->complete;
+
+ /* Device's stats reporting may take several microseconds to
+ * complete, so we're always waiting for results of the
+ * previous command. */
+
+ if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
+ *complete = 0;
+ nic->tx_frames = le32_to_cpu(s->tx_good_frames);
+ nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
+ ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
+ ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
+ ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
+ ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
+ ns->collisions += nic->tx_collisions;
+ ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
+ le32_to_cpu(s->tx_lost_crs);
+ ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
+ nic->rx_over_length_errors;
+ ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
+ ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
+ ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
+ ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
+ ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
+ ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
+ le32_to_cpu(s->rx_alignment_errors) +
+ le32_to_cpu(s->rx_short_frame_errors) +
+ le32_to_cpu(s->rx_cdt_errors);
+ nic->tx_deferred += le32_to_cpu(s->tx_deferred);
+ nic->tx_single_collisions +=
+ le32_to_cpu(s->tx_single_collisions);
+ nic->tx_multiple_collisions +=
+ le32_to_cpu(s->tx_multiple_collisions);
+ if (nic->mac >= mac_82558_D101_A4) {
+ nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
+ nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
+ nic->rx_fc_unsupported +=
+ le32_to_cpu(s->fc_rcv_unsupported);
+ if (nic->mac >= mac_82559_D101M) {
+ nic->tx_tco_frames +=
+ le16_to_cpu(s->xmt_tco_frames);
+ nic->rx_tco_frames +=
+ le16_to_cpu(s->rcv_tco_frames);
+ }
+ }
+ }
+
+
+ if (e100_exec_cmd(nic, cuc_dump_reset, 0))
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "exec cuc_dump_reset failed\n");
+}
+
+static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
+{
+ /* Adjust inter-frame-spacing (IFS) between two transmits if
+ * we're getting collisions on a half-duplex connection. */
+
+ if (duplex == DUPLEX_HALF) {
+ u32 prev = nic->adaptive_ifs;
+ u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
+
+ if ((nic->tx_frames / 32 < nic->tx_collisions) &&
+ (nic->tx_frames > min_frames)) {
+ if (nic->adaptive_ifs < 60)
+ nic->adaptive_ifs += 5;
+ } else if (nic->tx_frames < min_frames) {
+ if (nic->adaptive_ifs >= 5)
+ nic->adaptive_ifs -= 5;
+ }
+ if (nic->adaptive_ifs != prev)
+ e100_exec_cb(nic, NULL, e100_configure);
+ }
+}
+
+static void e100_watchdog(unsigned long data)
+{
+ struct nic *nic = (struct nic *)data;
+ struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
+ u32 speed;
+
+ netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
+ "right now = %ld\n", jiffies);
+
+ /* mii library handles link maintenance tasks */
+
+ mii_ethtool_gset(&nic->mii, &cmd);
+ speed = ethtool_cmd_speed(&cmd);
+
+ if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
+ netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
+ speed == SPEED_100 ? 100 : 10,
+ cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
+ } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
+ netdev_info(nic->netdev, "NIC Link is Down\n");
+ }
+
+ mii_check_link(&nic->mii);
+
+ /* Software generated interrupt to recover from (rare) Rx
+ * allocation failure.
+ * Unfortunately have to use a spinlock to not re-enable interrupts
+ * accidentally, due to hardware that shares a register between the
+ * interrupt mask bit and the SW Interrupt generation bit */
+ spin_lock_irq(&nic->cmd_lock);
+ iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
+ e100_write_flush(nic);
+ spin_unlock_irq(&nic->cmd_lock);
+
+ e100_update_stats(nic);
+ e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
+
+ if (nic->mac <= mac_82557_D100_C)
+ /* Issue a multicast command to workaround a 557 lock up */
+ e100_set_multicast_list(nic->netdev);
+
+ if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
+ /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
+ nic->flags |= ich_10h_workaround;
+ else
+ nic->flags &= ~ich_10h_workaround;
+
+ mod_timer(&nic->watchdog,
+ round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
+}
+
+static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
+ struct sk_buff *skb)
+{
+ cb->command = nic->tx_command;
+ /* interrupt every 16 packets regardless of delay */
+ if ((nic->cbs_avail & ~15) == nic->cbs_avail)
+ cb->command |= cpu_to_le16(cb_i);
+ cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
+ cb->u.tcb.tcb_byte_count = 0;
+ cb->u.tcb.threshold = nic->tx_threshold;
+ cb->u.tcb.tbd_count = 1;
+ cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
+ skb->data, skb->len, PCI_DMA_TODEVICE));
+ /* check for mapping failure? */
+ cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
+}
+
+static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+ int err;
+
+ if (nic->flags & ich_10h_workaround) {
+ /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
+ Issue a NOP command followed by a 1us delay before
+ issuing the Tx command. */
+ if (e100_exec_cmd(nic, cuc_nop, 0))
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "exec cuc_nop failed\n");
+ udelay(1);
+ }
+
+ err = e100_exec_cb(nic, skb, e100_xmit_prepare);
+
+ switch (err) {
+ case -ENOSPC:
+ /* We queued the skb, but now we're out of space. */
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "No space for CB\n");
+ netif_stop_queue(netdev);
+ break;
+ case -ENOMEM:
+ /* This is a hard error - log it. */
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "Out of Tx resources, returning skb\n");
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int e100_tx_clean(struct nic *nic)
+{
+ struct net_device *dev = nic->netdev;
+ struct cb *cb;
+ int tx_cleaned = 0;
+
+ spin_lock(&nic->cb_lock);
+
+ /* Clean CBs marked complete */
+ for (cb = nic->cb_to_clean;
+ cb->status & cpu_to_le16(cb_complete);
+ cb = nic->cb_to_clean = cb->next) {
+ rmb(); /* read skb after status */
+ netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
+ "cb[%d]->status = 0x%04X\n",
+ (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
+ cb->status);
+
+ if (likely(cb->skb != NULL)) {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += cb->skb->len;
+
+ pci_unmap_single(nic->pdev,
+ le32_to_cpu(cb->u.tcb.tbd.buf_addr),
+ le16_to_cpu(cb->u.tcb.tbd.size),
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(cb->skb);
+ cb->skb = NULL;
+ tx_cleaned = 1;
+ }
+ cb->status = 0;
+ nic->cbs_avail++;
+ }
+
+ spin_unlock(&nic->cb_lock);
+
+ /* Recover from running out of Tx resources in xmit_frame */
+ if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
+ netif_wake_queue(nic->netdev);
+
+ return tx_cleaned;
+}
+
+static void e100_clean_cbs(struct nic *nic)
+{
+ if (nic->cbs) {
+ while (nic->cbs_avail != nic->params.cbs.count) {
+ struct cb *cb = nic->cb_to_clean;
+ if (cb->skb) {
+ pci_unmap_single(nic->pdev,
+ le32_to_cpu(cb->u.tcb.tbd.buf_addr),
+ le16_to_cpu(cb->u.tcb.tbd.size),
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(cb->skb);
+ }
+ nic->cb_to_clean = nic->cb_to_clean->next;
+ nic->cbs_avail++;
+ }
+ pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
+ nic->cbs = NULL;
+ nic->cbs_avail = 0;
+ }
+ nic->cuc_cmd = cuc_start;
+ nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
+ nic->cbs;
+}
+
+static int e100_alloc_cbs(struct nic *nic)
+{
+ struct cb *cb;
+ unsigned int i, count = nic->params.cbs.count;
+
+ nic->cuc_cmd = cuc_start;
+ nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
+ nic->cbs_avail = 0;
+
+ nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
+ &nic->cbs_dma_addr);
+ if (!nic->cbs)
+ return -ENOMEM;
+ memset(nic->cbs, 0, count * sizeof(struct cb));
+
+ for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
+ cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
+ cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
+
+ cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
+ cb->link = cpu_to_le32(nic->cbs_dma_addr +
+ ((i+1) % count) * sizeof(struct cb));
+ }
+
+ nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
+ nic->cbs_avail = count;
+
+ return 0;
+}
+
+static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
+{
+ if (!nic->rxs) return;
+ if (RU_SUSPENDED != nic->ru_running) return;
+
+ /* handle init time starts */
+ if (!rx) rx = nic->rxs;
+
+ /* (Re)start RU if suspended or idle and RFA is non-NULL */
+ if (rx->skb) {
+ e100_exec_cmd(nic, ruc_start, rx->dma_addr);
+ nic->ru_running = RU_RUNNING;
+ }
+}
+
+#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
+static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
+{
+ if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
+ return -ENOMEM;
+
+ /* Init, and map the RFD. */
+ skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
+ rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
+ RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
+
+ if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
+ dev_kfree_skb_any(rx->skb);
+ rx->skb = NULL;
+ rx->dma_addr = 0;
+ return -ENOMEM;
+ }
+
+ /* Link the RFD to end of RFA by linking previous RFD to
+ * this one. We are safe to touch the previous RFD because
+ * it is protected by the before last buffer's el bit being set */
+ if (rx->prev->skb) {
+ struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
+ put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
+ pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
+ sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
+ }
+
+ return 0;
+}
+
+static int e100_rx_indicate(struct nic *nic, struct rx *rx,
+ unsigned int *work_done, unsigned int work_to_do)
+{
+ struct net_device *dev = nic->netdev;
+ struct sk_buff *skb = rx->skb;
+ struct rfd *rfd = (struct rfd *)skb->data;
+ u16 rfd_status, actual_size;
+
+ if (unlikely(work_done && *work_done >= work_to_do))
+ return -EAGAIN;
+
+ /* Need to sync before taking a peek at cb_complete bit */
+ pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
+ sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
+ rfd_status = le16_to_cpu(rfd->status);
+
+ netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
+ "status=0x%04X\n", rfd_status);
+ rmb(); /* read size after status bit */
+
+ /* If data isn't ready, nothing to indicate */
+ if (unlikely(!(rfd_status & cb_complete))) {
+ /* If the next buffer has the el bit, but we think the receiver
+ * is still running, check to see if it really stopped while
+ * we had interrupts off.
+ * This allows for a fast restart without re-enabling
+ * interrupts */
+ if ((le16_to_cpu(rfd->command) & cb_el) &&
+ (RU_RUNNING == nic->ru_running))
+
+ if (ioread8(&nic->csr->scb.status) & rus_no_res)
+ nic->ru_running = RU_SUSPENDED;
+ pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
+ sizeof(struct rfd),
+ PCI_DMA_FROMDEVICE);
+ return -ENODATA;
+ }
+
+ /* Get actual data size */
+ actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
+ if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
+ actual_size = RFD_BUF_LEN - sizeof(struct rfd);
+
+ /* Get data */
+ pci_unmap_single(nic->pdev, rx->dma_addr,
+ RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
+
+ /* If this buffer has the el bit, but we think the receiver
+ * is still running, check to see if it really stopped while
+ * we had interrupts off.
+ * This allows for a fast restart without re-enabling interrupts.
+ * This can happen when the RU sees the size change but also sees
+ * the el bit set. */
+ if ((le16_to_cpu(rfd->command) & cb_el) &&
+ (RU_RUNNING == nic->ru_running)) {
+
+ if (ioread8(&nic->csr->scb.status) & rus_no_res)
+ nic->ru_running = RU_SUSPENDED;
+ }
+
+ /* Pull off the RFD and put the actual data (minus eth hdr) */
+ skb_reserve(skb, sizeof(struct rfd));
+ skb_put(skb, actual_size);
+ skb->protocol = eth_type_trans(skb, nic->netdev);
+
+ if (unlikely(!(rfd_status & cb_ok))) {
+ /* Don't indicate if hardware indicates errors */
+ dev_kfree_skb_any(skb);
+ } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
+ /* Don't indicate oversized frames */
+ nic->rx_over_length_errors++;
+ dev_kfree_skb_any(skb);
+ } else {
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += actual_size;
+ netif_receive_skb(skb);
+ if (work_done)
+ (*work_done)++;
+ }
+
+ rx->skb = NULL;
+
+ return 0;
+}
+
+static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
+ unsigned int work_to_do)
+{
+ struct rx *rx;
+ int restart_required = 0, err = 0;
+ struct rx *old_before_last_rx, *new_before_last_rx;
+ struct rfd *old_before_last_rfd, *new_before_last_rfd;
+
+ /* Indicate newly arrived packets */
+ for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
+ err = e100_rx_indicate(nic, rx, work_done, work_to_do);
+ /* Hit quota or no more to clean */
+ if (-EAGAIN == err || -ENODATA == err)
+ break;
+ }
+
+
+ /* On EAGAIN, hit quota so have more work to do, restart once
+ * cleanup is complete.
+ * Else, are we already rnr? then pay attention!!! this ensures that
+ * the state machine progression never allows a start with a
+ * partially cleaned list, avoiding a race between hardware
+ * and rx_to_clean when in NAPI mode */
+ if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
+ restart_required = 1;
+
+ old_before_last_rx = nic->rx_to_use->prev->prev;
+ old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
+
+ /* Alloc new skbs to refill list */
+ for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
+ if (unlikely(e100_rx_alloc_skb(nic, rx)))
+ break; /* Better luck next time (see watchdog) */
+ }
+
+ new_before_last_rx = nic->rx_to_use->prev->prev;
+ if (new_before_last_rx != old_before_last_rx) {
+ /* Set the el-bit on the buffer that is before the last buffer.
+ * This lets us update the next pointer on the last buffer
+ * without worrying about hardware touching it.
+ * We set the size to 0 to prevent hardware from touching this
+ * buffer.
+ * When the hardware hits the before last buffer with el-bit
+ * and size of 0, it will RNR interrupt, the RUS will go into
+ * the No Resources state. It will not complete nor write to
+ * this buffer. */
+ new_before_last_rfd =
+ (struct rfd *)new_before_last_rx->skb->data;
+ new_before_last_rfd->size = 0;
+ new_before_last_rfd->command |= cpu_to_le16(cb_el);
+ pci_dma_sync_single_for_device(nic->pdev,
+ new_before_last_rx->dma_addr, sizeof(struct rfd),
+ PCI_DMA_BIDIRECTIONAL);
+
+ /* Now that we have a new stopping point, we can clear the old
+ * stopping point. We must sync twice to get the proper
+ * ordering on the hardware side of things. */
+ old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
+ pci_dma_sync_single_for_device(nic->pdev,
+ old_before_last_rx->dma_addr, sizeof(struct rfd),
+ PCI_DMA_BIDIRECTIONAL);
+ old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
+ pci_dma_sync_single_for_device(nic->pdev,
+ old_before_last_rx->dma_addr, sizeof(struct rfd),
+ PCI_DMA_BIDIRECTIONAL);
+ }
+
+ if (restart_required) {
+ // ack the rnr?
+ iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
+ e100_start_receiver(nic, nic->rx_to_clean);
+ if (work_done)
+ (*work_done)++;
+ }
+}
+
+static void e100_rx_clean_list(struct nic *nic)
+{
+ struct rx *rx;
+ unsigned int i, count = nic->params.rfds.count;
+
+ nic->ru_running = RU_UNINITIALIZED;
+
+ if (nic->rxs) {
+ for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
+ if (rx->skb) {
+ pci_unmap_single(nic->pdev, rx->dma_addr,
+ RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
+ dev_kfree_skb(rx->skb);
+ }
+ }
+ kfree(nic->rxs);
+ nic->rxs = NULL;
+ }
+
+ nic->rx_to_use = nic->rx_to_clean = NULL;
+}
+
+static int e100_rx_alloc_list(struct nic *nic)
+{
+ struct rx *rx;
+ unsigned int i, count = nic->params.rfds.count;
+ struct rfd *before_last;
+
+ nic->rx_to_use = nic->rx_to_clean = NULL;
+ nic->ru_running = RU_UNINITIALIZED;
+
+ if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
+ return -ENOMEM;
+
+ for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
+ rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
+ rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
+ if (e100_rx_alloc_skb(nic, rx)) {
+ e100_rx_clean_list(nic);
+ return -ENOMEM;
+ }
+ }
+ /* Set the el-bit on the buffer that is before the last buffer.
+ * This lets us update the next pointer on the last buffer without
+ * worrying about hardware touching it.
+ * We set the size to 0 to prevent hardware from touching this buffer.
+ * When the hardware hits the before last buffer with el-bit and size
+ * of 0, it will RNR interrupt, the RU will go into the No Resources
+ * state. It will not complete nor write to this buffer. */
+ rx = nic->rxs->prev->prev;
+ before_last = (struct rfd *)rx->skb->data;
+ before_last->command |= cpu_to_le16(cb_el);
+ before_last->size = 0;
+ pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
+ sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
+
+ nic->rx_to_use = nic->rx_to_clean = nic->rxs;
+ nic->ru_running = RU_SUSPENDED;
+
+ return 0;
+}
+
+static irqreturn_t e100_intr(int irq, void *dev_id)
+{
+ struct net_device *netdev = dev_id;
+ struct nic *nic = netdev_priv(netdev);
+ u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
+
+ netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
+ "stat_ack = 0x%02X\n", stat_ack);
+
+ if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
+ stat_ack == stat_ack_not_present) /* Hardware is ejected */
+ return IRQ_NONE;
+
+ /* Ack interrupt(s) */
+ iowrite8(stat_ack, &nic->csr->scb.stat_ack);
+
+ /* We hit Receive No Resource (RNR); restart RU after cleaning */
+ if (stat_ack & stat_ack_rnr)
+ nic->ru_running = RU_SUSPENDED;
+
+ if (likely(napi_schedule_prep(&nic->napi))) {
+ e100_disable_irq(nic);
+ __napi_schedule(&nic->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int e100_poll(struct napi_struct *napi, int budget)
+{
+ struct nic *nic = container_of(napi, struct nic, napi);
+ unsigned int work_done = 0;
+
+ e100_rx_clean(nic, &work_done, budget);
+ e100_tx_clean(nic);
+
+ /* If budget not fully consumed, exit the polling mode */
+ if (work_done < budget) {
+ napi_complete(napi);
+ e100_enable_irq(nic);
+ }
+
+ return work_done;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void e100_netpoll(struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+
+ e100_disable_irq(nic);
+ e100_intr(nic->pdev->irq, netdev);
+ e100_tx_clean(nic);
+ e100_enable_irq(nic);
+}
+#endif
+
+static int e100_set_mac_address(struct net_device *netdev, void *p)
+{
+ struct nic *nic = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ e100_exec_cb(nic, NULL, e100_setup_iaaddr);
+
+ return 0;
+}
+
+static int e100_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
+ return -EINVAL;
+ netdev->mtu = new_mtu;
+ return 0;
+}
+
+static int e100_asf(struct nic *nic)
+{
+ /* ASF can be enabled from eeprom */
+ return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
+ (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
+ !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
+ ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
+}
+
+static int e100_up(struct nic *nic)
+{
+ int err;
+
+ if ((err = e100_rx_alloc_list(nic)))
+ return err;
+ if ((err = e100_alloc_cbs(nic)))
+ goto err_rx_clean_list;
+ if ((err = e100_hw_init(nic)))
+ goto err_clean_cbs;
+ e100_set_multicast_list(nic->netdev);
+ e100_start_receiver(nic, NULL);
+ mod_timer(&nic->watchdog, jiffies);
+ if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
+ nic->netdev->name, nic->netdev)))
+ goto err_no_irq;
+ netif_wake_queue(nic->netdev);
+ napi_enable(&nic->napi);
+ /* enable ints _after_ enabling poll, preventing a race between
+ * disable ints+schedule */
+ e100_enable_irq(nic);
+ return 0;
+
+err_no_irq:
+ del_timer_sync(&nic->watchdog);
+err_clean_cbs:
+ e100_clean_cbs(nic);
+err_rx_clean_list:
+ e100_rx_clean_list(nic);
+ return err;
+}
+
+static void e100_down(struct nic *nic)
+{
+ /* wait here for poll to complete */
+ napi_disable(&nic->napi);
+ netif_stop_queue(nic->netdev);
+ e100_hw_reset(nic);
+ free_irq(nic->pdev->irq, nic->netdev);
+ del_timer_sync(&nic->watchdog);
+ netif_carrier_off(nic->netdev);
+ e100_clean_cbs(nic);
+ e100_rx_clean_list(nic);
+}
+
+static void e100_tx_timeout(struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+
+ /* Reset outside of interrupt context, to avoid request_irq
+ * in interrupt context */
+ schedule_work(&nic->tx_timeout_task);
+}
+
+static void e100_tx_timeout_task(struct work_struct *work)
+{
+ struct nic *nic = container_of(work, struct nic, tx_timeout_task);
+ struct net_device *netdev = nic->netdev;
+
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ e100_down(netdev_priv(netdev));
+ e100_up(netdev_priv(netdev));
+ }
+ rtnl_unlock();
+}
+
+static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
+{
+ int err;
+ struct sk_buff *skb;
+
+ /* Use driver resources to perform internal MAC or PHY
+ * loopback test. A single packet is prepared and transmitted
+ * in loopback mode, and the test passes if the received
+ * packet compares byte-for-byte to the transmitted packet. */
+
+ if ((err = e100_rx_alloc_list(nic)))
+ return err;
+ if ((err = e100_alloc_cbs(nic)))
+ goto err_clean_rx;
+
+ /* ICH PHY loopback is broken so do MAC loopback instead */
+ if (nic->flags & ich && loopback_mode == lb_phy)
+ loopback_mode = lb_mac;
+
+ nic->loopback = loopback_mode;
+ if ((err = e100_hw_init(nic)))
+ goto err_loopback_none;
+
+ if (loopback_mode == lb_phy)
+ mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
+ BMCR_LOOPBACK);
+
+ e100_start_receiver(nic, NULL);
+
+ if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
+ err = -ENOMEM;
+ goto err_loopback_none;
+ }
+ skb_put(skb, ETH_DATA_LEN);
+ memset(skb->data, 0xFF, ETH_DATA_LEN);
+ e100_xmit_frame(skb, nic->netdev);
+
+ msleep(10);
+
+ pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
+ RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
+
+ if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
+ skb->data, ETH_DATA_LEN))
+ err = -EAGAIN;
+
+err_loopback_none:
+ mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
+ nic->loopback = lb_none;
+ e100_clean_cbs(nic);
+ e100_hw_reset(nic);
+err_clean_rx:
+ e100_rx_clean_list(nic);
+ return err;
+}
+
+#define MII_LED_CONTROL 0x1B
+#define E100_82552_LED_OVERRIDE 0x19
+#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
+#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
+
+static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ struct nic *nic = netdev_priv(netdev);
+ return mii_ethtool_gset(&nic->mii, cmd);
+}
+
+static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ struct nic *nic = netdev_priv(netdev);
+ int err;
+
+ mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
+ err = mii_ethtool_sset(&nic->mii, cmd);
+ e100_exec_cb(nic, NULL, e100_configure);
+
+ return err;
+}
+
+static void e100_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct nic *nic = netdev_priv(netdev);
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->fw_version, "N/A");
+ strcpy(info->bus_info, pci_name(nic->pdev));
+}
+
+#define E100_PHY_REGS 0x1C
+static int e100_get_regs_len(struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+ return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
+}
+
+static void e100_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct nic *nic = netdev_priv(netdev);
+ u32 *buff = p;
+ int i;
+
+ regs->version = (1 << 24) | nic->pdev->revision;
+ buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
+ ioread8(&nic->csr->scb.cmd_lo) << 16 |
+ ioread16(&nic->csr->scb.status);
+ for (i = E100_PHY_REGS; i >= 0; i--)
+ buff[1 + E100_PHY_REGS - i] =
+ mdio_read(netdev, nic->mii.phy_id, i);
+ memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
+ e100_exec_cb(nic, NULL, e100_dump);
+ msleep(10);
+ memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
+ sizeof(nic->mem->dump_buf));
+}
+
+static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct nic *nic = netdev_priv(netdev);
+ wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
+ wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
+}
+
+static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct nic *nic = netdev_priv(netdev);
+
+ if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
+ !device_can_wakeup(&nic->pdev->dev))
+ return -EOPNOTSUPP;
+
+ if (wol->wolopts)
+ nic->flags |= wol_magic;
+ else
+ nic->flags &= ~wol_magic;
+
+ device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
+
+ e100_exec_cb(nic, NULL, e100_configure);
+
+ return 0;
+}
+
+static u32 e100_get_msglevel(struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+ return nic->msg_enable;
+}
+
+static void e100_set_msglevel(struct net_device *netdev, u32 value)
+{
+ struct nic *nic = netdev_priv(netdev);
+ nic->msg_enable = value;
+}
+
+static int e100_nway_reset(struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+ return mii_nway_restart(&nic->mii);
+}
+
+static u32 e100_get_link(struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+ return mii_link_ok(&nic->mii);
+}
+
+static int e100_get_eeprom_len(struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+ return nic->eeprom_wc << 1;
+}
+
+#define E100_EEPROM_MAGIC 0x1234
+static int e100_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct nic *nic = netdev_priv(netdev);
+
+ eeprom->magic = E100_EEPROM_MAGIC;
+ memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
+
+ return 0;
+}
+
+static int e100_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct nic *nic = netdev_priv(netdev);
+
+ if (eeprom->magic != E100_EEPROM_MAGIC)
+ return -EINVAL;
+
+ memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
+
+ return e100_eeprom_save(nic, eeprom->offset >> 1,
+ (eeprom->len >> 1) + 1);
+}
+
+static void e100_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct nic *nic = netdev_priv(netdev);
+ struct param_range *rfds = &nic->params.rfds;
+ struct param_range *cbs = &nic->params.cbs;
+
+ ring->rx_max_pending = rfds->max;
+ ring->tx_max_pending = cbs->max;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rfds->count;
+ ring->tx_pending = cbs->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int e100_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct nic *nic = netdev_priv(netdev);
+ struct param_range *rfds = &nic->params.rfds;
+ struct param_range *cbs = &nic->params.cbs;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ if (netif_running(netdev))
+ e100_down(nic);
+ rfds->count = max(ring->rx_pending, rfds->min);
+ rfds->count = min(rfds->count, rfds->max);
+ cbs->count = max(ring->tx_pending, cbs->min);
+ cbs->count = min(cbs->count, cbs->max);
+ netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
+ rfds->count, cbs->count);
+ if (netif_running(netdev))
+ e100_up(nic);
+
+ return 0;
+}
+
+static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Link test (on/offline)",
+ "Eeprom test (on/offline)",
+ "Self test (offline)",
+ "Mac loopback (offline)",
+ "Phy loopback (offline)",
+};
+#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
+
+static void e100_diag_test(struct net_device *netdev,
+ struct ethtool_test *test, u64 *data)
+{
+ struct ethtool_cmd cmd;
+ struct nic *nic = netdev_priv(netdev);
+ int i, err;
+
+ memset(data, 0, E100_TEST_LEN * sizeof(u64));
+ data[0] = !mii_link_ok(&nic->mii);
+ data[1] = e100_eeprom_load(nic);
+ if (test->flags & ETH_TEST_FL_OFFLINE) {
+
+ /* save speed, duplex & autoneg settings */
+ err = mii_ethtool_gset(&nic->mii, &cmd);
+
+ if (netif_running(netdev))
+ e100_down(nic);
+ data[2] = e100_self_test(nic);
+ data[3] = e100_loopback_test(nic, lb_mac);
+ data[4] = e100_loopback_test(nic, lb_phy);
+
+ /* restore speed, duplex & autoneg settings */
+ err = mii_ethtool_sset(&nic->mii, &cmd);
+
+ if (netif_running(netdev))
+ e100_up(nic);
+ }
+ for (i = 0; i < E100_TEST_LEN; i++)
+ test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
+
+ msleep_interruptible(4 * 1000);
+}
+
+static int e100_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct nic *nic = netdev_priv(netdev);
+ enum led_state {
+ led_on = 0x01,
+ led_off = 0x04,
+ led_on_559 = 0x05,
+ led_on_557 = 0x07,
+ };
+ u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
+ MII_LED_CONTROL;
+ u16 leds = 0;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 2;
+
+ case ETHTOOL_ID_ON:
+ leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
+ (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
+ break;
+
+ case ETHTOOL_ID_OFF:
+ leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ break;
+ }
+
+ mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
+ return 0;
+}
+
+static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
+ "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
+ "rx_length_errors", "rx_over_errors", "rx_crc_errors",
+ "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
+ "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
+ "tx_heartbeat_errors", "tx_window_errors",
+ /* device-specific stats */
+ "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
+ "tx_flow_control_pause", "rx_flow_control_pause",
+ "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
+};
+#define E100_NET_STATS_LEN 21
+#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
+
+static int e100_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return E100_TEST_LEN;
+ case ETH_SS_STATS:
+ return E100_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void e100_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct nic *nic = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < E100_NET_STATS_LEN; i++)
+ data[i] = ((unsigned long *)&netdev->stats)[i];
+
+ data[i++] = nic->tx_deferred;
+ data[i++] = nic->tx_single_collisions;
+ data[i++] = nic->tx_multiple_collisions;
+ data[i++] = nic->tx_fc_pause;
+ data[i++] = nic->rx_fc_pause;
+ data[i++] = nic->rx_fc_unsupported;
+ data[i++] = nic->tx_tco_frames;
+ data[i++] = nic->rx_tco_frames;
+}
+
+static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
+ break;
+ case ETH_SS_STATS:
+ memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
+ break;
+ }
+}
+
+static const struct ethtool_ops e100_ethtool_ops = {
+ .get_settings = e100_get_settings,
+ .set_settings = e100_set_settings,
+ .get_drvinfo = e100_get_drvinfo,
+ .get_regs_len = e100_get_regs_len,
+ .get_regs = e100_get_regs,
+ .get_wol = e100_get_wol,
+ .set_wol = e100_set_wol,
+ .get_msglevel = e100_get_msglevel,
+ .set_msglevel = e100_set_msglevel,
+ .nway_reset = e100_nway_reset,
+ .get_link = e100_get_link,
+ .get_eeprom_len = e100_get_eeprom_len,
+ .get_eeprom = e100_get_eeprom,
+ .set_eeprom = e100_set_eeprom,
+ .get_ringparam = e100_get_ringparam,
+ .set_ringparam = e100_set_ringparam,
+ .self_test = e100_diag_test,
+ .get_strings = e100_get_strings,
+ .set_phys_id = e100_set_phys_id,
+ .get_ethtool_stats = e100_get_ethtool_stats,
+ .get_sset_count = e100_get_sset_count,
+};
+
+static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct nic *nic = netdev_priv(netdev);
+
+ return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
+}
+
+static int e100_alloc(struct nic *nic)
+{
+ nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
+ &nic->dma_addr);
+ return nic->mem ? 0 : -ENOMEM;
+}
+
+static void e100_free(struct nic *nic)
+{
+ if (nic->mem) {
+ pci_free_consistent(nic->pdev, sizeof(struct mem),
+ nic->mem, nic->dma_addr);
+ nic->mem = NULL;
+ }
+}
+
+static int e100_open(struct net_device *netdev)
+{
+ struct nic *nic = netdev_priv(netdev);
+ int err = 0;
+
+ netif_carrier_off(netdev);
+ if ((err = e100_up(nic)))
+ netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
+ return err;
+}
+
+static int e100_close(struct net_device *netdev)
+{
+ e100_down(netdev_priv(netdev));
+ return 0;
+}
+
+static const struct net_device_ops e100_netdev_ops = {
+ .ndo_open = e100_open,
+ .ndo_stop = e100_close,
+ .ndo_start_xmit = e100_xmit_frame,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = e100_set_multicast_list,
+ .ndo_set_mac_address = e100_set_mac_address,
+ .ndo_change_mtu = e100_change_mtu,
+ .ndo_do_ioctl = e100_do_ioctl,
+ .ndo_tx_timeout = e100_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = e100_netpoll,
+#endif
+};
+
+static int __devinit e100_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct nic *nic;
+ int err;
+
+ if (!(netdev = alloc_etherdev(sizeof(struct nic)))) {
+ if (((1 << debug) - 1) & NETIF_MSG_PROBE)
+ pr_err("Etherdev alloc failed, aborting\n");
+ return -ENOMEM;
+ }
+
+ netdev->netdev_ops = &e100_netdev_ops;
+ SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
+ netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
+ strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+ nic = netdev_priv(netdev);
+ netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
+ nic->netdev = netdev;
+ nic->pdev = pdev;
+ nic->msg_enable = (1 << debug) - 1;
+ nic->mdio_ctrl = mdio_ctrl_hw;
+ pci_set_drvdata(pdev, netdev);
+
+ if ((err = pci_enable_device(pdev))) {
+ netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
+ goto err_out_free_dev;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
+ err = -ENODEV;
+ goto err_out_disable_pdev;
+ }
+
+ if ((err = pci_request_regions(pdev, DRV_NAME))) {
+ netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
+ goto err_out_disable_pdev;
+ }
+
+ if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
+ goto err_out_free_res;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ if (use_io)
+ netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
+
+ nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
+ if (!nic->csr) {
+ netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
+ err = -ENOMEM;
+ goto err_out_free_res;
+ }
+
+ if (ent->driver_data)
+ nic->flags |= ich;
+ else
+ nic->flags &= ~ich;
+
+ e100_get_defaults(nic);
+
+ /* locks must be initialized before calling hw_reset */
+ spin_lock_init(&nic->cb_lock);
+ spin_lock_init(&nic->cmd_lock);
+ spin_lock_init(&nic->mdio_lock);
+
+ /* Reset the device before pci_set_master() in case device is in some
+ * funky state and has an interrupt pending - hint: we don't have the
+ * interrupt handler registered yet. */
+ e100_hw_reset(nic);
+
+ pci_set_master(pdev);
+
+ init_timer(&nic->watchdog);
+ nic->watchdog.function = e100_watchdog;
+ nic->watchdog.data = (unsigned long)nic;
+
+ INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
+
+ if ((err = e100_alloc(nic))) {
+ netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
+ goto err_out_iounmap;
+ }
+
+ if ((err = e100_eeprom_load(nic)))
+ goto err_out_free;
+
+ e100_phy_init(nic);
+
+ memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
+ memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
+ if (!is_valid_ether_addr(netdev->perm_addr)) {
+ if (!eeprom_bad_csum_allow) {
+ netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
+ err = -EAGAIN;
+ goto err_out_free;
+ } else {
+ netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
+ }
+ }
+
+ /* Wol magic packet can be enabled from eeprom */
+ if ((nic->mac >= mac_82558_D101_A4) &&
+ (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
+ nic->flags |= wol_magic;
+ device_set_wakeup_enable(&pdev->dev, true);
+ }
+
+ /* ack any pending wake events, disable PME */
+ pci_pme_active(pdev, false);
+
+ strcpy(netdev->name, "eth%d");
+ if ((err = register_netdev(netdev))) {
+ netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
+ goto err_out_free;
+ }
+ nic->cbs_pool = pci_pool_create(netdev->name,
+ nic->pdev,
+ nic->params.cbs.max * sizeof(struct cb),
+ sizeof(u32),
+ 0);
+ netif_info(nic, probe, nic->netdev,
+ "addr 0x%llx, irq %d, MAC addr %pM\n",
+ (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
+ pdev->irq, netdev->dev_addr);
+
+ return 0;
+
+err_out_free:
+ e100_free(nic);
+err_out_iounmap:
+ pci_iounmap(pdev, nic->csr);
+err_out_free_res:
+ pci_release_regions(pdev);
+err_out_disable_pdev:
+ pci_disable_device(pdev);
+err_out_free_dev:
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+ return err;
+}
+
+static void __devexit e100_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ if (netdev) {
+ struct nic *nic = netdev_priv(netdev);
+ unregister_netdev(netdev);
+ e100_free(nic);
+ pci_iounmap(pdev, nic->csr);
+ pci_pool_destroy(nic->cbs_pool);
+ free_netdev(netdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */
+#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
+#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
+static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct nic *nic = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ e100_down(nic);
+ netif_device_detach(netdev);
+
+ pci_save_state(pdev);
+
+ if ((nic->flags & wol_magic) | e100_asf(nic)) {
+ /* enable reverse auto-negotiation */
+ if (nic->phy == phy_82552_v) {
+ u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
+ E100_82552_SMARTSPEED);
+
+ mdio_write(netdev, nic->mii.phy_id,
+ E100_82552_SMARTSPEED, smartspeed |
+ E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
+ }
+ *enable_wake = true;
+ } else {
+ *enable_wake = false;
+ }
+
+ pci_disable_device(pdev);
+}
+
+static int __e100_power_off(struct pci_dev *pdev, bool wake)
+{
+ if (wake)
+ return pci_prepare_to_sleep(pdev);
+
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ bool wake;
+ __e100_shutdown(pdev, &wake);
+ return __e100_power_off(pdev, wake);
+}
+
+static int e100_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct nic *nic = netdev_priv(netdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ /* ack any pending wake events, disable PME */
+ pci_enable_wake(pdev, 0, 0);
+
+ /* disable reverse auto-negotiation */
+ if (nic->phy == phy_82552_v) {
+ u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
+ E100_82552_SMARTSPEED);
+
+ mdio_write(netdev, nic->mii.phy_id,
+ E100_82552_SMARTSPEED,
+ smartspeed & ~(E100_82552_REV_ANEG));
+ }
+
+ netif_device_attach(netdev);
+ if (netif_running(netdev))
+ e100_up(nic);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static void e100_shutdown(struct pci_dev *pdev)
+{
+ bool wake;
+ __e100_shutdown(pdev, &wake);
+ if (system_state == SYSTEM_POWER_OFF)
+ __e100_power_off(pdev, wake);
+}
+
+/* ------------------ PCI Error Recovery infrastructure -------------- */
+/**
+ * e100_io_error_detected - called when PCI error is detected.
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ */
+static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct nic *nic = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ e100_down(nic);
+ pci_disable_device(pdev);
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * e100_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch.
+ */
+static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct nic *nic = netdev_priv(netdev);
+
+ if (pci_enable_device(pdev)) {
+ pr_err("Cannot re-enable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+
+ /* Only one device per card can do a reset */
+ if (0 != PCI_FUNC(pdev->devfn))
+ return PCI_ERS_RESULT_RECOVERED;
+ e100_hw_reset(nic);
+ e100_phy_init(nic);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * e100_io_resume - resume normal operations
+ * @pdev: Pointer to PCI device
+ *
+ * Resume normal operations after an error recovery
+ * sequence has been completed.
+ */
+static void e100_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct nic *nic = netdev_priv(netdev);
+
+ /* ack any pending wake events, disable PME */
+ pci_enable_wake(pdev, 0, 0);
+
+ netif_device_attach(netdev);
+ if (netif_running(netdev)) {
+ e100_open(netdev);
+ mod_timer(&nic->watchdog, jiffies);
+ }
+}
+
+static struct pci_error_handlers e100_err_handler = {
+ .error_detected = e100_io_error_detected,
+ .slot_reset = e100_io_slot_reset,
+ .resume = e100_io_resume,
+};
+
+static struct pci_driver e100_driver = {
+ .name = DRV_NAME,
+ .id_table = e100_id_table,
+ .probe = e100_probe,
+ .remove = __devexit_p(e100_remove),
+#ifdef CONFIG_PM
+ /* Power Management hooks */
+ .suspend = e100_suspend,
+ .resume = e100_resume,
+#endif
+ .shutdown = e100_shutdown,
+ .err_handler = &e100_err_handler,
+};
+
+static int __init e100_init_module(void)
+{
+ if (((1 << debug) - 1) & NETIF_MSG_DRV) {
+ pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
+ pr_info("%s\n", DRV_COPYRIGHT);
+ }
+ return pci_register_driver(&e100_driver);
+}
+
+static void __exit e100_cleanup_module(void)
+{
+ pci_unregister_driver(&e100_driver);
+}
+
+module_init(e100_init_module);
+module_exit(e100_cleanup_module);
diff --git a/drivers/net/ethernet/intel/e1000/Makefile b/drivers/net/ethernet/intel/e1000/Makefile
new file mode 100644
index 000000000000..4a6ab1522451
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000/Makefile
@@ -0,0 +1,35 @@
+################################################################################
+#
+# Intel PRO/1000 Linux driver
+# Copyright(c) 1999 - 2006 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# Linux NICS <linux.nics@intel.com>
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) PRO/1000 ethernet driver
+#
+
+obj-$(CONFIG_E1000) += e1000.o
+
+e1000-objs := e1000_main.o e1000_hw.o e1000_ethtool.o e1000_param.o
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
new file mode 100644
index 000000000000..1e1596990b5c
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -0,0 +1,363 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _E1000_H_
+#define _E1000_H_
+
+#include <linux/stddef.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/capability.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <net/pkt_sched.h>
+#include <linux/list.h>
+#include <linux/reboot.h>
+#include <net/checksum.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+
+#define BAR_0 0
+#define BAR_1 1
+#define BAR_5 5
+
+#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+struct e1000_adapter;
+
+#include "e1000_hw.h"
+
+#define E1000_MAX_INTR 10
+
+/* TX/RX descriptor defines */
+#define E1000_DEFAULT_TXD 256
+#define E1000_MAX_TXD 256
+#define E1000_MIN_TXD 48
+#define E1000_MAX_82544_TXD 4096
+
+#define E1000_DEFAULT_RXD 256
+#define E1000_MAX_RXD 256
+#define E1000_MIN_RXD 48
+#define E1000_MAX_82544_RXD 4096
+
+#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
+#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
+
+/* this is the size past which hardware will drop packets when setting LPE=0 */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+
+/* Supported Rx Buffer Sizes */
+#define E1000_RXBUFFER_128 128 /* Used for packet split */
+#define E1000_RXBUFFER_256 256 /* Used for packet split */
+#define E1000_RXBUFFER_512 512
+#define E1000_RXBUFFER_1024 1024
+#define E1000_RXBUFFER_2048 2048
+#define E1000_RXBUFFER_4096 4096
+#define E1000_RXBUFFER_8192 8192
+#define E1000_RXBUFFER_16384 16384
+
+/* SmartSpeed delimiters */
+#define E1000_SMARTSPEED_DOWNSHIFT 3
+#define E1000_SMARTSPEED_MAX 15
+
+/* Packet Buffer allocations */
+#define E1000_PBA_BYTES_SHIFT 0xA
+#define E1000_TX_HEAD_ADDR_SHIFT 7
+#define E1000_PBA_TX_MASK 0xFFFF0000
+
+/* Flow Control Watermarks */
+#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
+#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
+
+#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */
+
+/* How many Tx Descriptors do we need to call netif_wake_queue ? */
+#define E1000_TX_QUEUE_WAKE 16
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define AUTO_ALL_MODES 0
+#define E1000_EEPROM_82544_APM 0x0004
+#define E1000_EEPROM_APME 0x0400
+
+#ifndef E1000_MASTER_SLAVE
+/* Switch to override PHY master/slave setting */
+#define E1000_MASTER_SLAVE e1000_ms_hw_default
+#endif
+
+#define E1000_MNG_VLAN_NONE (-1)
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct e1000_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
+ unsigned long time_stamp;
+ u16 length;
+ u16 next_to_watch;
+ unsigned int segs;
+ unsigned int bytecount;
+ u16 mapped_as_page;
+};
+
+struct e1000_tx_ring {
+ /* pointer to the descriptor ring memory */
+ void *desc;
+ /* physical address of the descriptor ring */
+ dma_addr_t dma;
+ /* length of descriptor ring in bytes */
+ unsigned int size;
+ /* number of descriptors in the ring */
+ unsigned int count;
+ /* next descriptor to associate a buffer with */
+ unsigned int next_to_use;
+ /* next descriptor to check for DD status bit */
+ unsigned int next_to_clean;
+ /* array of buffer information structs */
+ struct e1000_buffer *buffer_info;
+
+ u16 tdh;
+ u16 tdt;
+ bool last_tx_tso;
+};
+
+struct e1000_rx_ring {
+ /* pointer to the descriptor ring memory */
+ void *desc;
+ /* physical address of the descriptor ring */
+ dma_addr_t dma;
+ /* length of descriptor ring in bytes */
+ unsigned int size;
+ /* number of descriptors in the ring */
+ unsigned int count;
+ /* next descriptor to associate a buffer with */
+ unsigned int next_to_use;
+ /* next descriptor to check for DD status bit */
+ unsigned int next_to_clean;
+ /* array of buffer information structs */
+ struct e1000_buffer *buffer_info;
+ struct sk_buff *rx_skb_top;
+
+ /* cpu for rx queue */
+ int cpu;
+
+ u16 rdh;
+ u16 rdt;
+};
+
+#define E1000_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) \
+ ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define E1000_RX_DESC_EXT(R, i) \
+ (&(((union e1000_rx_desc_extended *)((R).desc))[i]))
+#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
+#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
+#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
+#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc)
+
+/* board specific private data structure */
+
+struct e1000_adapter {
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u16 mng_vlan_id;
+ u32 bd_number;
+ u32 rx_buffer_len;
+ u32 wol;
+ u32 smartspeed;
+ u32 en_mng_pt;
+ u16 link_speed;
+ u16 link_duplex;
+ spinlock_t stats_lock;
+ unsigned int total_tx_bytes;
+ unsigned int total_tx_packets;
+ unsigned int total_rx_bytes;
+ unsigned int total_rx_packets;
+ /* Interrupt Throttle Rate */
+ u32 itr;
+ u32 itr_setting;
+ u16 tx_itr;
+ u16 rx_itr;
+
+ u8 fc_autoneg;
+
+ /* TX */
+ struct e1000_tx_ring *tx_ring; /* One per active queue */
+ unsigned int restart_queue;
+ u32 txd_cmd;
+ u32 tx_int_delay;
+ u32 tx_abs_int_delay;
+ u32 gotcl;
+ u64 gotcl_old;
+ u64 tpt_old;
+ u64 colc_old;
+ u32 tx_timeout_count;
+ u32 tx_fifo_head;
+ u32 tx_head_addr;
+ u32 tx_fifo_size;
+ u8 tx_timeout_factor;
+ atomic_t tx_fifo_stall;
+ bool pcix_82544;
+ bool detect_tx_hung;
+
+ /* RX */
+ bool (*clean_rx)(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do);
+ void (*alloc_rx_buf)(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count);
+ struct e1000_rx_ring *rx_ring; /* One per active queue */
+ struct napi_struct napi;
+
+ int num_tx_queues;
+ int num_rx_queues;
+
+ u64 hw_csum_err;
+ u64 hw_csum_good;
+ u32 alloc_rx_buff_failed;
+ u32 rx_int_delay;
+ u32 rx_abs_int_delay;
+ bool rx_csum;
+ u32 gorcl;
+ u64 gorcl_old;
+
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
+ /* structs defined in e1000_hw.h */
+ struct e1000_hw hw;
+ struct e1000_hw_stats stats;
+ struct e1000_phy_info phy_info;
+ struct e1000_phy_stats phy_stats;
+
+ u32 test_icr;
+ struct e1000_tx_ring test_tx_ring;
+ struct e1000_rx_ring test_rx_ring;
+
+ int msg_enable;
+
+ /* to not mess up cache alignment, always add to the bottom */
+ bool tso_force;
+ bool smart_power_down; /* phy smart power down */
+ bool quad_port_a;
+ unsigned long flags;
+ u32 eeprom_wol;
+
+ /* for ioport free */
+ int bars;
+ int need_ioport;
+
+ bool discarding;
+
+ struct work_struct reset_task;
+ struct delayed_work watchdog_task;
+ struct delayed_work fifo_stall_task;
+ struct delayed_work phy_info_task;
+
+ struct mutex mutex;
+};
+
+enum e1000_state_t {
+ __E1000_TESTING,
+ __E1000_RESETTING,
+ __E1000_DOWN
+};
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
+#define e_dbg(format, arg...) \
+ netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
+#define e_err(msglvl, format, arg...) \
+ netif_err(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_info(msglvl, format, arg...) \
+ netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_warn(msglvl, format, arg...) \
+ netif_warn(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_notice(msglvl, format, arg...) \
+ netif_notice(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_dev_info(format, arg...) \
+ dev_info(&adapter->pdev->dev, format, ## arg)
+#define e_dev_warn(format, arg...) \
+ dev_warn(&adapter->pdev->dev, format, ## arg)
+#define e_dev_err(format, arg...) \
+ dev_err(&adapter->pdev->dev, format, ## arg)
+
+extern char e1000_driver_name[];
+extern const char e1000_driver_version[];
+
+extern int e1000_up(struct e1000_adapter *adapter);
+extern void e1000_down(struct e1000_adapter *adapter);
+extern void e1000_reinit_locked(struct e1000_adapter *adapter);
+extern void e1000_reset(struct e1000_adapter *adapter);
+extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
+extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_update_stats(struct e1000_adapter *adapter);
+extern bool e1000_has_link(struct e1000_adapter *adapter);
+extern void e1000_power_up_phy(struct e1000_adapter *);
+extern void e1000_set_ethtool_ops(struct net_device *netdev);
+extern void e1000_check_options(struct e1000_adapter *adapter);
+extern char *e1000_get_hw_dev_name(struct e1000_hw *hw);
+
+#endif /* _E1000_H_ */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
new file mode 100644
index 000000000000..5548d464261a
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -0,0 +1,1863 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for e1000 */
+
+#include "e1000.h"
+#include <asm/uaccess.h>
+
+enum {NETDEV_STATS, E1000_STATS};
+
+struct e1000_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int type;
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define E1000_STAT(m) E1000_STATS, \
+ sizeof(((struct e1000_adapter *)0)->m), \
+ offsetof(struct e1000_adapter, m)
+#define E1000_NETDEV_STAT(m) NETDEV_STATS, \
+ sizeof(((struct net_device *)0)->m), \
+ offsetof(struct net_device, m)
+
+static const struct e1000_stats e1000_gstrings_stats[] = {
+ { "rx_packets", E1000_STAT(stats.gprc) },
+ { "tx_packets", E1000_STAT(stats.gptc) },
+ { "rx_bytes", E1000_STAT(stats.gorcl) },
+ { "tx_bytes", E1000_STAT(stats.gotcl) },
+ { "rx_broadcast", E1000_STAT(stats.bprc) },
+ { "tx_broadcast", E1000_STAT(stats.bptc) },
+ { "rx_multicast", E1000_STAT(stats.mprc) },
+ { "tx_multicast", E1000_STAT(stats.mptc) },
+ { "rx_errors", E1000_STAT(stats.rxerrc) },
+ { "tx_errors", E1000_STAT(stats.txerrc) },
+ { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) },
+ { "multicast", E1000_STAT(stats.mprc) },
+ { "collisions", E1000_STAT(stats.colc) },
+ { "rx_length_errors", E1000_STAT(stats.rlerrc) },
+ { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) },
+ { "rx_crc_errors", E1000_STAT(stats.crcerrs) },
+ { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) },
+ { "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
+ { "rx_missed_errors", E1000_STAT(stats.mpc) },
+ { "tx_aborted_errors", E1000_STAT(stats.ecol) },
+ { "tx_carrier_errors", E1000_STAT(stats.tncrs) },
+ { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) },
+ { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) },
+ { "tx_window_errors", E1000_STAT(stats.latecol) },
+ { "tx_abort_late_coll", E1000_STAT(stats.latecol) },
+ { "tx_deferred_ok", E1000_STAT(stats.dc) },
+ { "tx_single_coll_ok", E1000_STAT(stats.scc) },
+ { "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
+ { "tx_timeout_count", E1000_STAT(tx_timeout_count) },
+ { "tx_restart_queue", E1000_STAT(restart_queue) },
+ { "rx_long_length_errors", E1000_STAT(stats.roc) },
+ { "rx_short_length_errors", E1000_STAT(stats.ruc) },
+ { "rx_align_errors", E1000_STAT(stats.algnerrc) },
+ { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) },
+ { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) },
+ { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) },
+ { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) },
+ { "tx_flow_control_xon", E1000_STAT(stats.xontxc) },
+ { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) },
+ { "rx_long_byte_count", E1000_STAT(stats.gorcl) },
+ { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
+ { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
+ { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
+ { "tx_smbus", E1000_STAT(stats.mgptc) },
+ { "rx_smbus", E1000_STAT(stats.mgprc) },
+ { "dropped_smbus", E1000_STAT(stats.mgpdc) },
+};
+
+#define E1000_QUEUE_STATS_LEN 0
+#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
+#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
+static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)", "Eeprom test (offline)",
+ "Interrupt test (offline)", "Loopback test (offline)",
+ "Link test (on/offline)"
+};
+#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
+
+static int e1000_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (hw->media_type == e1000_media_type_copper) {
+
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full|
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
+ ecmd->advertising = ADVERTISED_TP;
+
+ if (hw->autoneg == 1) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ /* the e1000 autoneg seems to match ethtool nicely */
+ ecmd->advertising |= hw->autoneg_advertised;
+ }
+
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = hw->phy_addr;
+
+ if (hw->mac_type == e1000_82543)
+ ecmd->transceiver = XCVR_EXTERNAL;
+ else
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ } else {
+ ecmd->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg);
+
+ ecmd->advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg);
+
+ ecmd->port = PORT_FIBRE;
+
+ if (hw->mac_type >= e1000_82545)
+ ecmd->transceiver = XCVR_INTERNAL;
+ else
+ ecmd->transceiver = XCVR_EXTERNAL;
+ }
+
+ if (er32(STATUS) & E1000_STATUS_LU) {
+
+ e1000_get_speed_and_duplex(hw, &adapter->link_speed,
+ &adapter->link_duplex);
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
+
+ /* unfortunately FULL_DUPLEX != DUPLEX_FULL
+ * and HALF_DUPLEX != DUPLEX_HALF */
+
+ if (adapter->link_duplex == FULL_DUPLEX)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ } else {
+ ethtool_cmd_speed_set(ecmd, -1);
+ ecmd->duplex = -1;
+ }
+
+ ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
+ hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ return 0;
+}
+
+static int e1000_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ hw->autoneg = 1;
+ if (hw->media_type == e1000_media_type_fiber)
+ hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg;
+ else
+ hw->autoneg_advertised = ecmd->advertising |
+ ADVERTISED_TP |
+ ADVERTISED_Autoneg;
+ ecmd->advertising = hw->autoneg_advertised;
+ } else {
+ u32 speed = ethtool_cmd_speed(ecmd);
+ if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+ return -EINVAL;
+ }
+ }
+
+ /* reset the link */
+
+ if (netif_running(adapter->netdev)) {
+ e1000_down(adapter);
+ e1000_up(adapter);
+ } else
+ e1000_reset(adapter);
+
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+ return 0;
+}
+
+static u32 e1000_get_link(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ /*
+ * If the link is not reported up to netdev, interrupts are disabled,
+ * and so the physical link state may have changed since we last
+ * looked. Set get_link_status to make sure that the true link
+ * state is interrogated, rather than pulling a cached and possibly
+ * stale link state from the driver.
+ */
+ if (!netif_carrier_ok(netdev))
+ adapter->hw.get_link_status = 1;
+
+ return e1000_has_link(adapter);
+}
+
+static void e1000_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ pause->autoneg =
+ (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ if (hw->fc == E1000_FC_RX_PAUSE)
+ pause->rx_pause = 1;
+ else if (hw->fc == E1000_FC_TX_PAUSE)
+ pause->tx_pause = 1;
+ else if (hw->fc == E1000_FC_FULL) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ }
+}
+
+static int e1000_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int retval = 0;
+
+ adapter->fc_autoneg = pause->autoneg;
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+
+ if (pause->rx_pause && pause->tx_pause)
+ hw->fc = E1000_FC_FULL;
+ else if (pause->rx_pause && !pause->tx_pause)
+ hw->fc = E1000_FC_RX_PAUSE;
+ else if (!pause->rx_pause && pause->tx_pause)
+ hw->fc = E1000_FC_TX_PAUSE;
+ else if (!pause->rx_pause && !pause->tx_pause)
+ hw->fc = E1000_FC_NONE;
+
+ hw->original_fc = hw->fc;
+
+ if (adapter->fc_autoneg == AUTONEG_ENABLE) {
+ if (netif_running(adapter->netdev)) {
+ e1000_down(adapter);
+ e1000_up(adapter);
+ } else
+ e1000_reset(adapter);
+ } else
+ retval = ((hw->media_type == e1000_media_type_fiber) ?
+ e1000_setup_link(hw) : e1000_force_mac_fc(hw));
+
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+ return retval;
+}
+
+static u32 e1000_get_msglevel(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void e1000_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+static int e1000_get_regs_len(struct net_device *netdev)
+{
+#define E1000_REGS_LEN 32
+ return E1000_REGS_LEN * sizeof(u32);
+}
+
+static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u16 phy_data;
+
+ memset(p, 0, E1000_REGS_LEN * sizeof(u32));
+
+ regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+
+ regs_buff[0] = er32(CTRL);
+ regs_buff[1] = er32(STATUS);
+
+ regs_buff[2] = er32(RCTL);
+ regs_buff[3] = er32(RDLEN);
+ regs_buff[4] = er32(RDH);
+ regs_buff[5] = er32(RDT);
+ regs_buff[6] = er32(RDTR);
+
+ regs_buff[7] = er32(TCTL);
+ regs_buff[8] = er32(TDLEN);
+ regs_buff[9] = er32(TDH);
+ regs_buff[10] = er32(TDT);
+ regs_buff[11] = er32(TIDV);
+
+ regs_buff[12] = hw->phy_type; /* PHY type (IGP=1, M88=0) */
+ if (hw->phy_type == e1000_phy_igp) {
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+ IGP01E1000_PHY_AGC_A);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[13] = (u32)phy_data; /* cable length */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+ IGP01E1000_PHY_AGC_B);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[14] = (u32)phy_data; /* cable length */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+ IGP01E1000_PHY_AGC_C);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[15] = (u32)phy_data; /* cable length */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+ IGP01E1000_PHY_AGC_D);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[16] = (u32)phy_data; /* cable length */
+ regs_buff[17] = 0; /* extended 10bt distance (not needed) */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[18] = (u32)phy_data; /* cable polarity */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+ IGP01E1000_PHY_PCS_INIT_REG);
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG &
+ IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+ regs_buff[19] = (u32)phy_data; /* cable polarity */
+ regs_buff[20] = 0; /* polarity correction enabled (always) */
+ regs_buff[22] = 0; /* phy receive errors (unavailable) */
+ regs_buff[23] = regs_buff[18]; /* mdix mode */
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
+ } else {
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ regs_buff[13] = (u32)phy_data; /* cable length */
+ regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
+ regs_buff[18] = regs_buff[13]; /* cable polarity */
+ regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ regs_buff[20] = regs_buff[17]; /* polarity correction */
+ /* phy receive errors */
+ regs_buff[22] = adapter->phy_stats.receive_errors;
+ regs_buff[23] = regs_buff[13]; /* mdix mode */
+ }
+ regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */
+ e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+ regs_buff[24] = (u32)phy_data; /* phy local receiver status */
+ regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
+ if (hw->mac_type >= e1000_82540 &&
+ hw->media_type == e1000_media_type_copper) {
+ regs_buff[26] = er32(MANC);
+ }
+}
+
+static int e1000_get_eeprom_len(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ return hw->eeprom.word_size * 2;
+}
+
+static int e1000_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ int first_word, last_word;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) *
+ (last_word - first_word + 1), GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ if (hw->eeprom.type == e1000_eeprom_spi)
+ ret_val = e1000_read_eeprom(hw, first_word,
+ last_word - first_word + 1,
+ eeprom_buff);
+ else {
+ for (i = 0; i < last_word - first_word + 1; i++) {
+ ret_val = e1000_read_eeprom(hw, first_word + i, 1,
+ &eeprom_buff[i]);
+ if (ret_val)
+ break;
+ }
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
+ eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int e1000_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ void *ptr;
+ int max_len, first_word, last_word, ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EOPNOTSUPP;
+
+ if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+ return -EFAULT;
+
+ max_len = hw->eeprom.word_size * 2;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = (void *)eeprom_buff;
+
+ if (eeprom->offset & 1) {
+ /* need read/modify/write of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ ret_val = e1000_read_eeprom(hw, first_word, 1,
+ &eeprom_buff[0]);
+ ptr++;
+ }
+ if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
+ /* need read/modify/write of last changed EEPROM word */
+ /* only the first byte of the word is being modified */
+ ret_val = e1000_read_eeprom(hw, last_word, 1,
+ &eeprom_buff[last_word - first_word]);
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+
+ ret_val = e1000_write_eeprom(hw, first_word,
+ last_word - first_word + 1, eeprom_buff);
+
+ /* Update the checksum over the first part of the EEPROM if needed */
+ if ((ret_val == 0) && (first_word <= EEPROM_CHECKSUM_REG))
+ e1000_update_eeprom_checksum(hw);
+
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
+static void e1000_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ char firmware_version[32];
+
+ strncpy(drvinfo->driver, e1000_driver_name, 32);
+ strncpy(drvinfo->version, e1000_driver_version, 32);
+
+ sprintf(firmware_version, "N/A");
+ strncpy(drvinfo->fw_version, firmware_version, 32);
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ drvinfo->regdump_len = e1000_get_regs_len(netdev);
+ drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
+}
+
+static void e1000_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ e1000_mac_type mac_type = hw->mac_type;
+ struct e1000_tx_ring *txdr = adapter->tx_ring;
+ struct e1000_rx_ring *rxdr = adapter->rx_ring;
+
+ ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD :
+ E1000_MAX_82544_RXD;
+ ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD :
+ E1000_MAX_82544_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rxdr->count;
+ ring->tx_pending = txdr->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int e1000_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ e1000_mac_type mac_type = hw->mac_type;
+ struct e1000_tx_ring *txdr, *tx_old;
+ struct e1000_rx_ring *rxdr, *rx_old;
+ int i, err;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+
+ if (netif_running(adapter->netdev))
+ e1000_down(adapter);
+
+ tx_old = adapter->tx_ring;
+ rx_old = adapter->rx_ring;
+
+ err = -ENOMEM;
+ txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), GFP_KERNEL);
+ if (!txdr)
+ goto err_alloc_tx;
+
+ rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), GFP_KERNEL);
+ if (!rxdr)
+ goto err_alloc_rx;
+
+ adapter->tx_ring = txdr;
+ adapter->rx_ring = rxdr;
+
+ rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD);
+ rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ?
+ E1000_MAX_RXD : E1000_MAX_82544_RXD));
+ rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD);
+ txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ?
+ E1000_MAX_TXD : E1000_MAX_82544_TXD));
+ txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ txdr[i].count = txdr->count;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ rxdr[i].count = rxdr->count;
+
+ if (netif_running(adapter->netdev)) {
+ /* Try to get new resources before deleting old */
+ err = e1000_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+ err = e1000_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* save the new, restore the old in order to free it,
+ * then restore the new back again */
+
+ adapter->rx_ring = rx_old;
+ adapter->tx_ring = tx_old;
+ e1000_free_all_rx_resources(adapter);
+ e1000_free_all_tx_resources(adapter);
+ kfree(tx_old);
+ kfree(rx_old);
+ adapter->rx_ring = rxdr;
+ adapter->tx_ring = txdr;
+ err = e1000_up(adapter);
+ if (err)
+ goto err_setup;
+ }
+
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+ return 0;
+err_setup_tx:
+ e1000_free_all_rx_resources(adapter);
+err_setup_rx:
+ adapter->rx_ring = rx_old;
+ adapter->tx_ring = tx_old;
+ kfree(rxdr);
+err_alloc_rx:
+ kfree(txdr);
+err_alloc_tx:
+ e1000_up(adapter);
+err_setup:
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+ return err;
+}
+
+static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg,
+ u32 mask, u32 write)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ static const u32 test[] =
+ {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ u8 __iomem *address = hw->hw_addr + reg;
+ u32 read;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(test); i++) {
+ writel(write & test[i], address);
+ read = readl(address);
+ if (read != (write & test[i] & mask)) {
+ e_err(drv, "pattern test reg %04X failed: "
+ "got 0x%08X expected 0x%08X\n",
+ reg, read, (write & test[i] & mask));
+ *data = reg;
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg,
+ u32 mask, u32 write)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u8 __iomem *address = hw->hw_addr + reg;
+ u32 read;
+
+ writel(write & mask, address);
+ read = readl(address);
+ if ((read & mask) != (write & mask)) {
+ e_err(drv, "set/check reg %04X test failed: "
+ "got 0x%08X expected 0x%08X\n",
+ reg, (read & mask), (write & mask));
+ *data = reg;
+ return true;
+ }
+ return false;
+}
+
+#define REG_PATTERN_TEST(reg, mask, write) \
+ do { \
+ if (reg_pattern_test(adapter, data, \
+ (hw->mac_type >= e1000_82543) \
+ ? E1000_##reg : E1000_82542_##reg, \
+ mask, write)) \
+ return 1; \
+ } while (0)
+
+#define REG_SET_AND_CHECK(reg, mask, write) \
+ do { \
+ if (reg_set_and_check(adapter, data, \
+ (hw->mac_type >= e1000_82543) \
+ ? E1000_##reg : E1000_82542_##reg, \
+ mask, write)) \
+ return 1; \
+ } while (0)
+
+static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
+{
+ u32 value, before, after;
+ u32 i, toggle;
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* The status register is Read Only, so a write should fail.
+ * Some bits that get toggled are ignored.
+ */
+
+ /* there are several bits on newer hardware that are r/w */
+ toggle = 0xFFFFF833;
+
+ before = er32(STATUS);
+ value = (er32(STATUS) & toggle);
+ ew32(STATUS, toggle);
+ after = er32(STATUS) & toggle;
+ if (value != after) {
+ e_err(drv, "failed STATUS register test got: "
+ "0x%08X expected: 0x%08X\n", after, value);
+ *data = 1;
+ return 1;
+ }
+ /* restore previous status */
+ ew32(STATUS, before);
+
+ REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
+
+ REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF);
+ REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8);
+ REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
+ REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF);
+
+ REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000);
+
+ before = 0x06DFB3FE;
+ REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB);
+ REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
+
+ if (hw->mac_type >= e1000_82543) {
+
+ REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
+ REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+ REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+ REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
+ value = E1000_RAR_ENTRIES;
+ for (i = 0; i < value; i++) {
+ REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
+ 0xFFFFFFFF);
+ }
+
+ } else {
+
+ REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
+ REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF);
+ REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF);
+
+ }
+
+ value = E1000_MC_TBL_SIZE;
+ for (i = 0; i < value; i++)
+ REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
+
+ *data = 0;
+ return 0;
+}
+
+static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 temp;
+ u16 checksum = 0;
+ u16 i;
+
+ *data = 0;
+ /* Read and add up the contents of the EEPROM */
+ for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+ if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) {
+ *data = 1;
+ break;
+ }
+ checksum += temp;
+ }
+
+ /* If Checksum is not Correct return error else test passed */
+ if ((checksum != (u16)EEPROM_SUM) && !(*data))
+ *data = 2;
+
+ return *data;
+}
+
+static irqreturn_t e1000_test_intr(int irq, void *data)
+{
+ struct net_device *netdev = (struct net_device *)data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ adapter->test_icr |= er32(ICR);
+
+ return IRQ_HANDLED;
+}
+
+static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 mask, i = 0;
+ bool shared_int = true;
+ u32 irq = adapter->pdev->irq;
+ struct e1000_hw *hw = &adapter->hw;
+
+ *data = 0;
+
+ /* NOTE: we don't test MSI interrupts here, yet */
+ /* Hook up test interrupt handler just for this test */
+ if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
+ netdev))
+ shared_int = false;
+ else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
+ netdev->name, netdev)) {
+ *data = 1;
+ return -1;
+ }
+ e_info(hw, "testing %s interrupt\n", (shared_int ?
+ "shared" : "unshared"));
+
+ /* Disable all the interrupts */
+ ew32(IMC, 0xFFFFFFFF);
+ E1000_WRITE_FLUSH();
+ msleep(10);
+
+ /* Test each interrupt */
+ for (; i < 10; i++) {
+
+ /* Interrupt to test */
+ mask = 1 << i;
+
+ if (!shared_int) {
+ /* Disable the interrupt to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ ew32(IMC, mask);
+ ew32(ICS, mask);
+ E1000_WRITE_FLUSH();
+ msleep(10);
+
+ if (adapter->test_icr & mask) {
+ *data = 3;
+ break;
+ }
+ }
+
+ /* Enable the interrupt to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was not posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ ew32(IMS, mask);
+ ew32(ICS, mask);
+ E1000_WRITE_FLUSH();
+ msleep(10);
+
+ if (!(adapter->test_icr & mask)) {
+ *data = 4;
+ break;
+ }
+
+ if (!shared_int) {
+ /* Disable the other interrupts to be reported in
+ * the cause register and then force the other
+ * interrupts and see if any get posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ ew32(IMC, ~mask & 0x00007FFF);
+ ew32(ICS, ~mask & 0x00007FFF);
+ E1000_WRITE_FLUSH();
+ msleep(10);
+
+ if (adapter->test_icr) {
+ *data = 5;
+ break;
+ }
+ }
+ }
+
+ /* Disable all the interrupts */
+ ew32(IMC, 0xFFFFFFFF);
+ E1000_WRITE_FLUSH();
+ msleep(10);
+
+ /* Unhook test interrupt handler */
+ free_irq(irq, netdev);
+
+ return *data;
+}
+
+static void e1000_free_desc_rings(struct e1000_adapter *adapter)
+{
+ struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
+ struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ int i;
+
+ if (txdr->desc && txdr->buffer_info) {
+ for (i = 0; i < txdr->count; i++) {
+ if (txdr->buffer_info[i].dma)
+ dma_unmap_single(&pdev->dev,
+ txdr->buffer_info[i].dma,
+ txdr->buffer_info[i].length,
+ DMA_TO_DEVICE);
+ if (txdr->buffer_info[i].skb)
+ dev_kfree_skb(txdr->buffer_info[i].skb);
+ }
+ }
+
+ if (rxdr->desc && rxdr->buffer_info) {
+ for (i = 0; i < rxdr->count; i++) {
+ if (rxdr->buffer_info[i].dma)
+ dma_unmap_single(&pdev->dev,
+ rxdr->buffer_info[i].dma,
+ rxdr->buffer_info[i].length,
+ DMA_FROM_DEVICE);
+ if (rxdr->buffer_info[i].skb)
+ dev_kfree_skb(rxdr->buffer_info[i].skb);
+ }
+ }
+
+ if (txdr->desc) {
+ dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
+ txdr->dma);
+ txdr->desc = NULL;
+ }
+ if (rxdr->desc) {
+ dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
+ rxdr->dma);
+ rxdr->desc = NULL;
+ }
+
+ kfree(txdr->buffer_info);
+ txdr->buffer_info = NULL;
+ kfree(rxdr->buffer_info);
+ rxdr->buffer_info = NULL;
+}
+
+static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
+ struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ u32 rctl;
+ int i, ret_val;
+
+ /* Setup Tx descriptor ring and Tx buffers */
+
+ if (!txdr->count)
+ txdr->count = E1000_DEFAULT_TXD;
+
+ txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_buffer),
+ GFP_KERNEL);
+ if (!txdr->buffer_info) {
+ ret_val = 1;
+ goto err_nomem;
+ }
+
+ txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
+ txdr->size = ALIGN(txdr->size, 4096);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
+ if (!txdr->desc) {
+ ret_val = 2;
+ goto err_nomem;
+ }
+ memset(txdr->desc, 0, txdr->size);
+ txdr->next_to_use = txdr->next_to_clean = 0;
+
+ ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF));
+ ew32(TDBAH, ((u64)txdr->dma >> 32));
+ ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc));
+ ew32(TDH, 0);
+ ew32(TDT, 0);
+ ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN |
+ E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
+ E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
+
+ for (i = 0; i < txdr->count; i++) {
+ struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i);
+ struct sk_buff *skb;
+ unsigned int size = 1024;
+
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb) {
+ ret_val = 3;
+ goto err_nomem;
+ }
+ skb_put(skb, size);
+ txdr->buffer_info[i].skb = skb;
+ txdr->buffer_info[i].length = skb->len;
+ txdr->buffer_info[i].dma =
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
+ tx_desc->lower.data = cpu_to_le32(skb->len);
+ tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
+ E1000_TXD_CMD_IFCS |
+ E1000_TXD_CMD_RPS);
+ tx_desc->upper.data = 0;
+ }
+
+ /* Setup Rx descriptor ring and Rx buffers */
+
+ if (!rxdr->count)
+ rxdr->count = E1000_DEFAULT_RXD;
+
+ rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer),
+ GFP_KERNEL);
+ if (!rxdr->buffer_info) {
+ ret_val = 4;
+ goto err_nomem;
+ }
+
+ rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
+ if (!rxdr->desc) {
+ ret_val = 5;
+ goto err_nomem;
+ }
+ memset(rxdr->desc, 0, rxdr->size);
+ rxdr->next_to_use = rxdr->next_to_clean = 0;
+
+ rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF));
+ ew32(RDBAH, ((u64)rxdr->dma >> 32));
+ ew32(RDLEN, rxdr->size);
+ ew32(RDH, 0);
+ ew32(RDT, 0);
+ rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
+ ew32(RCTL, rctl);
+
+ for (i = 0; i < rxdr->count; i++) {
+ struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
+ struct sk_buff *skb;
+
+ skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
+ if (!skb) {
+ ret_val = 6;
+ goto err_nomem;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+ rxdr->buffer_info[i].skb = skb;
+ rxdr->buffer_info[i].length = E1000_RXBUFFER_2048;
+ rxdr->buffer_info[i].dma =
+ dma_map_single(&pdev->dev, skb->data,
+ E1000_RXBUFFER_2048, DMA_FROM_DEVICE);
+ rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
+ memset(skb->data, 0x00, skb->len);
+ }
+
+ return 0;
+
+err_nomem:
+ e1000_free_desc_rings(adapter);
+ return ret_val;
+}
+
+static void e1000_phy_disable_receiver(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Write out to PHY registers 29 and 30 to disable the Receiver. */
+ e1000_write_phy_reg(hw, 29, 0x001F);
+ e1000_write_phy_reg(hw, 30, 0x8FFC);
+ e1000_write_phy_reg(hw, 29, 0x001A);
+ e1000_write_phy_reg(hw, 30, 0x8FF0);
+}
+
+static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 phy_reg;
+
+ /* Because we reset the PHY above, we need to re-force TX_CLK in the
+ * Extended PHY Specific Control Register to 25MHz clock. This
+ * value defaults back to a 2.5MHz clock when the PHY is reset.
+ */
+ e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
+ phy_reg |= M88E1000_EPSCR_TX_CLK_25;
+ e1000_write_phy_reg(hw,
+ M88E1000_EXT_PHY_SPEC_CTRL, phy_reg);
+
+ /* In addition, because of the s/w reset above, we need to enable
+ * CRS on TX. This must be set for both full and half duplex
+ * operation.
+ */
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
+ phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+ e1000_write_phy_reg(hw,
+ M88E1000_PHY_SPEC_CTRL, phy_reg);
+}
+
+static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_reg;
+ u16 phy_reg;
+
+ /* Setup the Device Control Register for PHY loopback test. */
+
+ ctrl_reg = er32(CTRL);
+ ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */
+ E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+
+ ew32(CTRL, ctrl_reg);
+
+ /* Read the PHY Specific Control Register (0x10) */
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
+
+ /* Clear Auto-Crossover bits in PHY Specific Control Register
+ * (bits 6:5).
+ */
+ phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE;
+ e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg);
+
+ /* Perform software reset on the PHY */
+ e1000_phy_reset(hw);
+
+ /* Have to setup TX_CLK and TX_CRS after software reset */
+ e1000_phy_reset_clk_and_crs(adapter);
+
+ e1000_write_phy_reg(hw, PHY_CTRL, 0x8100);
+
+ /* Wait for reset to complete. */
+ udelay(500);
+
+ /* Have to setup TX_CLK and TX_CRS after software reset */
+ e1000_phy_reset_clk_and_crs(adapter);
+
+ /* Write out to PHY registers 29 and 30 to disable the Receiver. */
+ e1000_phy_disable_receiver(adapter);
+
+ /* Set the loopback bit in the PHY control register. */
+ e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
+ phy_reg |= MII_CR_LOOPBACK;
+ e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
+
+ /* Setup TX_CLK and TX_CRS one more time. */
+ e1000_phy_reset_clk_and_crs(adapter);
+
+ /* Check Phy Configuration */
+ e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
+ if (phy_reg != 0x4100)
+ return 9;
+
+ e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
+ if (phy_reg != 0x0070)
+ return 10;
+
+ e1000_read_phy_reg(hw, 29, &phy_reg);
+ if (phy_reg != 0x001A)
+ return 11;
+
+ return 0;
+}
+
+static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_reg = 0;
+ u32 stat_reg = 0;
+
+ hw->autoneg = false;
+
+ if (hw->phy_type == e1000_phy_m88) {
+ /* Auto-MDI/MDIX Off */
+ e1000_write_phy_reg(hw,
+ M88E1000_PHY_SPEC_CTRL, 0x0808);
+ /* reset to update Auto-MDI/MDIX */
+ e1000_write_phy_reg(hw, PHY_CTRL, 0x9140);
+ /* autoneg off */
+ e1000_write_phy_reg(hw, PHY_CTRL, 0x8140);
+ }
+
+ ctrl_reg = er32(CTRL);
+
+ /* force 1000, set loopback */
+ e1000_write_phy_reg(hw, PHY_CTRL, 0x4140);
+
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg = er32(CTRL);
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+
+ if (hw->media_type == e1000_media_type_copper &&
+ hw->phy_type == e1000_phy_m88)
+ ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
+ else {
+ /* Set the ILOS bit on the fiber Nic is half
+ * duplex link is detected. */
+ stat_reg = er32(STATUS);
+ if ((stat_reg & E1000_STATUS_FD) == 0)
+ ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
+ }
+
+ ew32(CTRL, ctrl_reg);
+
+ /* Disable the receiver on the PHY so when a cable is plugged in, the
+ * PHY does not begin to autoneg when a cable is reconnected to the NIC.
+ */
+ if (hw->phy_type == e1000_phy_m88)
+ e1000_phy_disable_receiver(adapter);
+
+ udelay(500);
+
+ return 0;
+}
+
+static int e1000_set_phy_loopback(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 phy_reg = 0;
+ u16 count = 0;
+
+ switch (hw->mac_type) {
+ case e1000_82543:
+ if (hw->media_type == e1000_media_type_copper) {
+ /* Attempt to setup Loopback mode on Non-integrated PHY.
+ * Some PHY registers get corrupted at random, so
+ * attempt this 10 times.
+ */
+ while (e1000_nonintegrated_phy_loopback(adapter) &&
+ count++ < 10);
+ if (count < 11)
+ return 0;
+ }
+ break;
+
+ case e1000_82544:
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ return e1000_integrated_phy_loopback(adapter);
+ break;
+ default:
+ /* Default PHY loopback work is to read the MII
+ * control register and assert bit 14 (loopback mode).
+ */
+ e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
+ phy_reg |= MII_CR_LOOPBACK;
+ e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
+ return 0;
+ break;
+ }
+
+ return 8;
+}
+
+static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+
+ if (hw->media_type == e1000_media_type_fiber ||
+ hw->media_type == e1000_media_type_internal_serdes) {
+ switch (hw->mac_type) {
+ case e1000_82545:
+ case e1000_82546:
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ return e1000_set_phy_loopback(adapter);
+ break;
+ default:
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_LBM_TCVR;
+ ew32(RCTL, rctl);
+ return 0;
+ }
+ } else if (hw->media_type == e1000_media_type_copper)
+ return e1000_set_phy_loopback(adapter);
+
+ return 7;
+}
+
+static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+ u16 phy_reg;
+
+ rctl = er32(RCTL);
+ rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
+ ew32(RCTL, rctl);
+
+ switch (hw->mac_type) {
+ case e1000_82545:
+ case e1000_82546:
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ default:
+ hw->autoneg = true;
+ e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
+ if (phy_reg & MII_CR_LOOPBACK) {
+ phy_reg &= ~MII_CR_LOOPBACK;
+ e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
+ e1000_phy_reset(hw);
+ }
+ break;
+ }
+}
+
+static void e1000_create_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ memset(skb->data, 0xFF, frame_size);
+ frame_size &= ~1;
+ memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+ memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+ memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+static int e1000_check_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ frame_size &= ~1;
+ if (*(skb->data + 3) == 0xFF) {
+ if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+ (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+ return 0;
+ }
+ }
+ return 13;
+}
+
+static int e1000_run_loopback_test(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
+ struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ int i, j, k, l, lc, good_cnt, ret_val=0;
+ unsigned long time;
+
+ ew32(RDT, rxdr->count - 1);
+
+ /* Calculate the loop count based on the largest descriptor ring
+ * The idea is to wrap the largest ring a number of times using 64
+ * send/receive pairs during each loop
+ */
+
+ if (rxdr->count <= txdr->count)
+ lc = ((txdr->count / 64) * 2) + 1;
+ else
+ lc = ((rxdr->count / 64) * 2) + 1;
+
+ k = l = 0;
+ for (j = 0; j <= lc; j++) { /* loop count loop */
+ for (i = 0; i < 64; i++) { /* send the packets */
+ e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
+ 1024);
+ dma_sync_single_for_device(&pdev->dev,
+ txdr->buffer_info[k].dma,
+ txdr->buffer_info[k].length,
+ DMA_TO_DEVICE);
+ if (unlikely(++k == txdr->count)) k = 0;
+ }
+ ew32(TDT, k);
+ E1000_WRITE_FLUSH();
+ msleep(200);
+ time = jiffies; /* set the start time for the receive */
+ good_cnt = 0;
+ do { /* receive the sent packets */
+ dma_sync_single_for_cpu(&pdev->dev,
+ rxdr->buffer_info[l].dma,
+ rxdr->buffer_info[l].length,
+ DMA_FROM_DEVICE);
+
+ ret_val = e1000_check_lbtest_frame(
+ rxdr->buffer_info[l].skb,
+ 1024);
+ if (!ret_val)
+ good_cnt++;
+ if (unlikely(++l == rxdr->count)) l = 0;
+ /* time + 20 msecs (200 msecs on 2.4) is more than
+ * enough time to complete the receives, if it's
+ * exceeded, break and error off
+ */
+ } while (good_cnt < 64 && jiffies < (time + 20));
+ if (good_cnt != 64) {
+ ret_val = 13; /* ret_val is the same as mis-compare */
+ break;
+ }
+ if (jiffies >= (time + 2)) {
+ ret_val = 14; /* error code for time out error */
+ break;
+ }
+ } /* end loop count loop */
+ return ret_val;
+}
+
+static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
+{
+ *data = e1000_setup_desc_rings(adapter);
+ if (*data)
+ goto out;
+ *data = e1000_setup_loopback_test(adapter);
+ if (*data)
+ goto err_loopback;
+ *data = e1000_run_loopback_test(adapter);
+ e1000_loopback_cleanup(adapter);
+
+err_loopback:
+ e1000_free_desc_rings(adapter);
+out:
+ return *data;
+}
+
+static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ *data = 0;
+ if (hw->media_type == e1000_media_type_internal_serdes) {
+ int i = 0;
+ hw->serdes_has_link = false;
+
+ /* On some blade server designs, link establishment
+ * could take as long as 2-3 minutes */
+ do {
+ e1000_check_for_link(hw);
+ if (hw->serdes_has_link)
+ return *data;
+ msleep(20);
+ } while (i++ < 3750);
+
+ *data = 1;
+ } else {
+ e1000_check_for_link(hw);
+ if (hw->autoneg) /* if auto_neg is set wait for it */
+ msleep(4000);
+
+ if (!(er32(STATUS) & E1000_STATUS_LU)) {
+ *data = 1;
+ }
+ }
+ return *data;
+}
+
+static int e1000_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return E1000_TEST_LEN;
+ case ETH_SS_STATS:
+ return E1000_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void e1000_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ bool if_running = netif_running(netdev);
+
+ set_bit(__E1000_TESTING, &adapter->flags);
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ /* save speed, duplex, autoneg settings */
+ u16 autoneg_advertised = hw->autoneg_advertised;
+ u8 forced_speed_duplex = hw->forced_speed_duplex;
+ u8 autoneg = hw->autoneg;
+
+ e_info(hw, "offline testing starting\n");
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result */
+ if (e1000_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+ else
+ e1000_reset(adapter);
+
+ if (e1000_reg_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ e1000_reset(adapter);
+ if (e1000_eeprom_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ e1000_reset(adapter);
+ if (e1000_intr_test(adapter, &data[2]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ e1000_reset(adapter);
+ /* make sure the phy is powered up */
+ e1000_power_up_phy(adapter);
+ if (e1000_loopback_test(adapter, &data[3]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* restore speed, duplex, autoneg settings */
+ hw->autoneg_advertised = autoneg_advertised;
+ hw->forced_speed_duplex = forced_speed_duplex;
+ hw->autoneg = autoneg;
+
+ e1000_reset(adapter);
+ clear_bit(__E1000_TESTING, &adapter->flags);
+ if (if_running)
+ dev_open(netdev);
+ } else {
+ e_info(hw, "online testing starting\n");
+ /* Online tests */
+ if (e1000_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* Online tests aren't run; pass by default */
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 0;
+ data[3] = 0;
+
+ clear_bit(__E1000_TESTING, &adapter->flags);
+ }
+ msleep_interruptible(4 * 1000);
+}
+
+static int e1000_wol_exclusion(struct e1000_adapter *adapter,
+ struct ethtool_wolinfo *wol)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int retval = 1; /* fail by default */
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82542:
+ case E1000_DEV_ID_82543GC_FIBER:
+ case E1000_DEV_ID_82543GC_COPPER:
+ case E1000_DEV_ID_82544EI_FIBER:
+ case E1000_DEV_ID_82546EB_QUAD_COPPER:
+ case E1000_DEV_ID_82545EM_FIBER:
+ case E1000_DEV_ID_82545EM_COPPER:
+ case E1000_DEV_ID_82546GB_QUAD_COPPER:
+ case E1000_DEV_ID_82546GB_PCIE:
+ /* these don't support WoL at all */
+ wol->supported = 0;
+ break;
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546GB_FIBER:
+ /* Wake events not supported on port B */
+ if (er32(STATUS) & E1000_STATUS_FUNC_1) {
+ wol->supported = 0;
+ break;
+ }
+ /* return success for non excluded adapter ports */
+ retval = 0;
+ break;
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ /* quad port adapters only support WoL on port A */
+ if (!adapter->quad_port_a) {
+ wol->supported = 0;
+ break;
+ }
+ /* return success for non excluded adapter ports */
+ retval = 0;
+ break;
+ default:
+ /* dual port cards only support WoL on port A from now on
+ * unless it was enabled in the eeprom for port B
+ * so exclude FUNC_1 ports from having WoL enabled */
+ if (er32(STATUS) & E1000_STATUS_FUNC_1 &&
+ !adapter->eeprom_wol) {
+ wol->supported = 0;
+ break;
+ }
+
+ retval = 0;
+ }
+
+ return retval;
+}
+
+static void e1000_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ wol->supported = WAKE_UCAST | WAKE_MCAST |
+ WAKE_BCAST | WAKE_MAGIC;
+ wol->wolopts = 0;
+
+ /* this function will set ->supported = 0 and return 1 if wol is not
+ * supported by this hardware */
+ if (e1000_wol_exclusion(adapter, wol) ||
+ !device_can_wakeup(&adapter->pdev->dev))
+ return;
+
+ /* apply any specific unsupported masks here */
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ /* KSP3 does not suppport UCAST wake-ups */
+ wol->supported &= ~WAKE_UCAST;
+
+ if (adapter->wol & E1000_WUFC_EX)
+ e_err(drv, "Interface does not support directed "
+ "(unicast) frame wake-up packets\n");
+ break;
+ default:
+ break;
+ }
+
+ if (adapter->wol & E1000_WUFC_EX)
+ wol->wolopts |= WAKE_UCAST;
+ if (adapter->wol & E1000_WUFC_MC)
+ wol->wolopts |= WAKE_MCAST;
+ if (adapter->wol & E1000_WUFC_BC)
+ wol->wolopts |= WAKE_BCAST;
+ if (adapter->wol & E1000_WUFC_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+ return -EOPNOTSUPP;
+
+ if (e1000_wol_exclusion(adapter, wol) ||
+ !device_can_wakeup(&adapter->pdev->dev))
+ return wol->wolopts ? -EOPNOTSUPP : 0;
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ if (wol->wolopts & WAKE_UCAST) {
+ e_err(drv, "Interface does not support directed "
+ "(unicast) frame wake-up packets\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* these settings will always override what we currently have */
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wol |= E1000_WUFC_EX;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wol |= E1000_WUFC_MC;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wol |= E1000_WUFC_BC;
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wol |= E1000_WUFC_MAG;
+
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
+static int e1000_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ e1000_setup_led(hw);
+ return 2;
+
+ case ETHTOOL_ID_ON:
+ e1000_led_on(hw);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ e1000_led_off(hw);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ e1000_cleanup_led(hw);
+ }
+
+ return 0;
+}
+
+static int e1000_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->hw.mac_type < e1000_82545)
+ return -EOPNOTSUPP;
+
+ if (adapter->itr_setting <= 4)
+ ec->rx_coalesce_usecs = adapter->itr_setting;
+ else
+ ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
+
+ return 0;
+}
+
+static int e1000_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (hw->mac_type < e1000_82545)
+ return -EOPNOTSUPP;
+
+ if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
+ ((ec->rx_coalesce_usecs > 4) &&
+ (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
+ (ec->rx_coalesce_usecs == 2))
+ return -EINVAL;
+
+ if (ec->rx_coalesce_usecs == 4) {
+ adapter->itr = adapter->itr_setting = 4;
+ } else if (ec->rx_coalesce_usecs <= 3) {
+ adapter->itr = 20000;
+ adapter->itr_setting = ec->rx_coalesce_usecs;
+ } else {
+ adapter->itr = (1000000 / ec->rx_coalesce_usecs);
+ adapter->itr_setting = adapter->itr & ~3;
+ }
+
+ if (adapter->itr_setting != 0)
+ ew32(ITR, 1000000000 / (adapter->itr * 256));
+ else
+ ew32(ITR, 0);
+
+ return 0;
+}
+
+static int e1000_nway_reset(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ if (netif_running(netdev))
+ e1000_reinit_locked(adapter);
+ return 0;
+}
+
+static void e1000_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ int i;
+ char *p = NULL;
+
+ e1000_update_stats(adapter);
+ for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+ switch (e1000_gstrings_stats[i].type) {
+ case NETDEV_STATS:
+ p = (char *) netdev +
+ e1000_gstrings_stats[i].stat_offset;
+ break;
+ case E1000_STATS:
+ p = (char *) adapter +
+ e1000_gstrings_stats[i].stat_offset;
+ break;
+ }
+
+ data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+/* BUG_ON(i != E1000_STATS_LEN); */
+}
+
+static void e1000_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *e1000_gstrings_test,
+ sizeof(e1000_gstrings_test));
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, e1000_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
+ break;
+ }
+}
+
+static const struct ethtool_ops e1000_ethtool_ops = {
+ .get_settings = e1000_get_settings,
+ .set_settings = e1000_set_settings,
+ .get_drvinfo = e1000_get_drvinfo,
+ .get_regs_len = e1000_get_regs_len,
+ .get_regs = e1000_get_regs,
+ .get_wol = e1000_get_wol,
+ .set_wol = e1000_set_wol,
+ .get_msglevel = e1000_get_msglevel,
+ .set_msglevel = e1000_set_msglevel,
+ .nway_reset = e1000_nway_reset,
+ .get_link = e1000_get_link,
+ .get_eeprom_len = e1000_get_eeprom_len,
+ .get_eeprom = e1000_get_eeprom,
+ .set_eeprom = e1000_set_eeprom,
+ .get_ringparam = e1000_get_ringparam,
+ .set_ringparam = e1000_set_ringparam,
+ .get_pauseparam = e1000_get_pauseparam,
+ .set_pauseparam = e1000_set_pauseparam,
+ .self_test = e1000_diag_test,
+ .get_strings = e1000_get_strings,
+ .set_phys_id = e1000_set_phys_id,
+ .get_ethtool_stats = e1000_get_ethtool_stats,
+ .get_sset_count = e1000_get_sset_count,
+ .get_coalesce = e1000_get_coalesce,
+ .set_coalesce = e1000_set_coalesce,
+};
+
+void e1000_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
new file mode 100644
index 000000000000..36ee76bf4cba
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -0,0 +1,5830 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+ */
+
+/* e1000_hw.c
+ * Shared functions for accessing and configuring the MAC
+ */
+
+#include "e1000.h"
+
+static s32 e1000_check_downshift(struct e1000_hw *hw);
+static s32 e1000_check_polarity(struct e1000_hw *hw,
+ e1000_rev_polarity *polarity);
+static void e1000_clear_hw_cntrs(struct e1000_hw *hw);
+static void e1000_clear_vfta(struct e1000_hw *hw);
+static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw,
+ bool link_up);
+static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw);
+static s32 e1000_detect_gig_phy(struct e1000_hw *hw);
+static s32 e1000_get_auto_rd_done(struct e1000_hw *hw);
+static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
+ u16 *max_length);
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
+static s32 e1000_id_led_init(struct e1000_hw *hw);
+static void e1000_init_rx_addrs(struct e1000_hw *hw);
+static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info);
+static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info);
+static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+static s32 e1000_wait_autoneg(struct e1000_hw *hw);
+static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value);
+static s32 e1000_set_phy_type(struct e1000_hw *hw);
+static void e1000_phy_init_script(struct e1000_hw *hw);
+static s32 e1000_setup_copper_link(struct e1000_hw *hw);
+static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw);
+static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw);
+static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
+static s32 e1000_config_mac_to_phy(struct e1000_hw *hw);
+static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
+static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
+static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count);
+static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw);
+static s32 e1000_phy_reset_dsp(struct e1000_hw *hw);
+static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw);
+static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd);
+static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd);
+static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count);
+static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+ u16 phy_data);
+static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+ u16 *phy_data);
+static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count);
+static s32 e1000_acquire_eeprom(struct e1000_hw *hw);
+static void e1000_release_eeprom(struct e1000_hw *hw);
+static void e1000_standby_eeprom(struct e1000_hw *hw);
+static s32 e1000_set_vco_speed(struct e1000_hw *hw);
+static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw);
+static s32 e1000_set_phy_mode(struct e1000_hw *hw);
+static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+
+/* IGP cable length table */
+static const
+u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = {
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25,
+ 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40,
+ 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60,
+ 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90,
+ 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+ 100,
+ 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110,
+ 110, 110,
+ 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120,
+ 120, 120
+};
+
+static DEFINE_SPINLOCK(e1000_eeprom_lock);
+
+/**
+ * e1000_set_phy_type - Set the phy type member in the hw struct.
+ * @hw: Struct containing variables accessed by shared code
+ */
+static s32 e1000_set_phy_type(struct e1000_hw *hw)
+{
+ e_dbg("e1000_set_phy_type");
+
+ if (hw->mac_type == e1000_undefined)
+ return -E1000_ERR_PHY_TYPE;
+
+ switch (hw->phy_id) {
+ case M88E1000_E_PHY_ID:
+ case M88E1000_I_PHY_ID:
+ case M88E1011_I_PHY_ID:
+ case M88E1111_I_PHY_ID:
+ case M88E1118_E_PHY_ID:
+ hw->phy_type = e1000_phy_m88;
+ break;
+ case IGP01E1000_I_PHY_ID:
+ if (hw->mac_type == e1000_82541 ||
+ hw->mac_type == e1000_82541_rev_2 ||
+ hw->mac_type == e1000_82547 ||
+ hw->mac_type == e1000_82547_rev_2)
+ hw->phy_type = e1000_phy_igp;
+ break;
+ case RTL8211B_PHY_ID:
+ hw->phy_type = e1000_phy_8211;
+ break;
+ case RTL8201N_PHY_ID:
+ hw->phy_type = e1000_phy_8201;
+ break;
+ default:
+ /* Should never have loaded on this device */
+ hw->phy_type = e1000_phy_undefined;
+ return -E1000_ERR_PHY_TYPE;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_init_script - IGP phy init script - initializes the GbE PHY
+ * @hw: Struct containing variables accessed by shared code
+ */
+static void e1000_phy_init_script(struct e1000_hw *hw)
+{
+ u32 ret_val;
+ u16 phy_saved_data;
+
+ e_dbg("e1000_phy_init_script");
+
+ if (hw->phy_init_script) {
+ msleep(20);
+
+ /* Save off the current value of register 0x2F5B to be restored at
+ * the end of this routine. */
+ ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+ /* Disabled the PHY transmitter */
+ e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+ msleep(20);
+
+ e1000_write_phy_reg(hw, 0x0000, 0x0140);
+ msleep(5);
+
+ switch (hw->mac_type) {
+ case e1000_82541:
+ case e1000_82547:
+ e1000_write_phy_reg(hw, 0x1F95, 0x0001);
+ e1000_write_phy_reg(hw, 0x1F71, 0xBD21);
+ e1000_write_phy_reg(hw, 0x1F79, 0x0018);
+ e1000_write_phy_reg(hw, 0x1F30, 0x1600);
+ e1000_write_phy_reg(hw, 0x1F31, 0x0014);
+ e1000_write_phy_reg(hw, 0x1F32, 0x161C);
+ e1000_write_phy_reg(hw, 0x1F94, 0x0003);
+ e1000_write_phy_reg(hw, 0x1F96, 0x003F);
+ e1000_write_phy_reg(hw, 0x2010, 0x0008);
+ break;
+
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ e1000_write_phy_reg(hw, 0x1F73, 0x0099);
+ break;
+ default:
+ break;
+ }
+
+ e1000_write_phy_reg(hw, 0x0000, 0x3300);
+ msleep(20);
+
+ /* Now enable the transmitter */
+ e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+ if (hw->mac_type == e1000_82547) {
+ u16 fused, fine, coarse;
+
+ /* Move to analog registers page */
+ e1000_read_phy_reg(hw,
+ IGP01E1000_ANALOG_SPARE_FUSE_STATUS,
+ &fused);
+
+ if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
+ e1000_read_phy_reg(hw,
+ IGP01E1000_ANALOG_FUSE_STATUS,
+ &fused);
+
+ fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
+ coarse =
+ fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
+
+ if (coarse >
+ IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
+ coarse -=
+ IGP01E1000_ANALOG_FUSE_COARSE_10;
+ fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
+ } else if (coarse ==
+ IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
+ fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
+
+ fused =
+ (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
+ (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) |
+ (coarse &
+ IGP01E1000_ANALOG_FUSE_COARSE_MASK);
+
+ e1000_write_phy_reg(hw,
+ IGP01E1000_ANALOG_FUSE_CONTROL,
+ fused);
+ e1000_write_phy_reg(hw,
+ IGP01E1000_ANALOG_FUSE_BYPASS,
+ IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL);
+ }
+ }
+ }
+}
+
+/**
+ * e1000_set_mac_type - Set the mac type member in the hw struct.
+ * @hw: Struct containing variables accessed by shared code
+ */
+s32 e1000_set_mac_type(struct e1000_hw *hw)
+{
+ e_dbg("e1000_set_mac_type");
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82542:
+ switch (hw->revision_id) {
+ case E1000_82542_2_0_REV_ID:
+ hw->mac_type = e1000_82542_rev2_0;
+ break;
+ case E1000_82542_2_1_REV_ID:
+ hw->mac_type = e1000_82542_rev2_1;
+ break;
+ default:
+ /* Invalid 82542 revision ID */
+ return -E1000_ERR_MAC_TYPE;
+ }
+ break;
+ case E1000_DEV_ID_82543GC_FIBER:
+ case E1000_DEV_ID_82543GC_COPPER:
+ hw->mac_type = e1000_82543;
+ break;
+ case E1000_DEV_ID_82544EI_COPPER:
+ case E1000_DEV_ID_82544EI_FIBER:
+ case E1000_DEV_ID_82544GC_COPPER:
+ case E1000_DEV_ID_82544GC_LOM:
+ hw->mac_type = e1000_82544;
+ break;
+ case E1000_DEV_ID_82540EM:
+ case E1000_DEV_ID_82540EM_LOM:
+ case E1000_DEV_ID_82540EP:
+ case E1000_DEV_ID_82540EP_LOM:
+ case E1000_DEV_ID_82540EP_LP:
+ hw->mac_type = e1000_82540;
+ break;
+ case E1000_DEV_ID_82545EM_COPPER:
+ case E1000_DEV_ID_82545EM_FIBER:
+ hw->mac_type = e1000_82545;
+ break;
+ case E1000_DEV_ID_82545GM_COPPER:
+ case E1000_DEV_ID_82545GM_FIBER:
+ case E1000_DEV_ID_82545GM_SERDES:
+ hw->mac_type = e1000_82545_rev_3;
+ break;
+ case E1000_DEV_ID_82546EB_COPPER:
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546EB_QUAD_COPPER:
+ hw->mac_type = e1000_82546;
+ break;
+ case E1000_DEV_ID_82546GB_COPPER:
+ case E1000_DEV_ID_82546GB_FIBER:
+ case E1000_DEV_ID_82546GB_SERDES:
+ case E1000_DEV_ID_82546GB_PCIE:
+ case E1000_DEV_ID_82546GB_QUAD_COPPER:
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ hw->mac_type = e1000_82546_rev_3;
+ break;
+ case E1000_DEV_ID_82541EI:
+ case E1000_DEV_ID_82541EI_MOBILE:
+ case E1000_DEV_ID_82541ER_LOM:
+ hw->mac_type = e1000_82541;
+ break;
+ case E1000_DEV_ID_82541ER:
+ case E1000_DEV_ID_82541GI:
+ case E1000_DEV_ID_82541GI_LF:
+ case E1000_DEV_ID_82541GI_MOBILE:
+ hw->mac_type = e1000_82541_rev_2;
+ break;
+ case E1000_DEV_ID_82547EI:
+ case E1000_DEV_ID_82547EI_MOBILE:
+ hw->mac_type = e1000_82547;
+ break;
+ case E1000_DEV_ID_82547GI:
+ hw->mac_type = e1000_82547_rev_2;
+ break;
+ case E1000_DEV_ID_INTEL_CE4100_GBE:
+ hw->mac_type = e1000_ce4100;
+ break;
+ default:
+ /* Should never have loaded on this device */
+ return -E1000_ERR_MAC_TYPE;
+ }
+
+ switch (hw->mac_type) {
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ hw->asf_firmware_present = true;
+ break;
+ default:
+ break;
+ }
+
+ /* The 82543 chip does not count tx_carrier_errors properly in
+ * FD mode
+ */
+ if (hw->mac_type == e1000_82543)
+ hw->bad_tx_carr_stats_fd = true;
+
+ if (hw->mac_type > e1000_82544)
+ hw->has_smbus = true;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_media_type - Set media type and TBI compatibility.
+ * @hw: Struct containing variables accessed by shared code
+ */
+void e1000_set_media_type(struct e1000_hw *hw)
+{
+ u32 status;
+
+ e_dbg("e1000_set_media_type");
+
+ if (hw->mac_type != e1000_82543) {
+ /* tbi_compatibility is only valid on 82543 */
+ hw->tbi_compatibility_en = false;
+ }
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82545GM_SERDES:
+ case E1000_DEV_ID_82546GB_SERDES:
+ hw->media_type = e1000_media_type_internal_serdes;
+ break;
+ default:
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ hw->media_type = e1000_media_type_fiber;
+ break;
+ case e1000_ce4100:
+ hw->media_type = e1000_media_type_copper;
+ break;
+ default:
+ status = er32(STATUS);
+ if (status & E1000_STATUS_TBIMODE) {
+ hw->media_type = e1000_media_type_fiber;
+ /* tbi_compatibility not valid on fiber */
+ hw->tbi_compatibility_en = false;
+ } else {
+ hw->media_type = e1000_media_type_copper;
+ }
+ break;
+ }
+ }
+}
+
+/**
+ * e1000_reset_hw: reset the hardware completely
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ */
+s32 e1000_reset_hw(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u32 ctrl_ext;
+ u32 icr;
+ u32 manc;
+ u32 led_ctrl;
+ s32 ret_val;
+
+ e_dbg("e1000_reset_hw");
+
+ /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
+ if (hw->mac_type == e1000_82542_rev2_0) {
+ e_dbg("Disabling MWI on 82542 rev 2.0\n");
+ e1000_pci_clear_mwi(hw);
+ }
+
+ /* Clear interrupt mask to stop board from generating interrupts */
+ e_dbg("Masking off all interrupts\n");
+ ew32(IMC, 0xffffffff);
+
+ /* Disable the Transmit and Receive units. Then delay to allow
+ * any pending transactions to complete before we hit the MAC with
+ * the global reset.
+ */
+ ew32(RCTL, 0);
+ ew32(TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH();
+
+ /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
+ hw->tbi_compatibility_on = false;
+
+ /* Delay to allow any outstanding PCI transactions to complete before
+ * resetting the device
+ */
+ msleep(10);
+
+ ctrl = er32(CTRL);
+
+ /* Must reset the PHY before resetting the MAC */
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST));
+ E1000_WRITE_FLUSH();
+ msleep(5);
+ }
+
+ /* Issue a global reset to the MAC. This will reset the chip's
+ * transmit, receive, DMA, and link units. It will not effect
+ * the current PCI configuration. The global reset bit is self-
+ * clearing, and should clear within a microsecond.
+ */
+ e_dbg("Issuing a global reset to MAC\n");
+
+ switch (hw->mac_type) {
+ case e1000_82544:
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82546:
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ /* These controllers can't ack the 64-bit write when issuing the
+ * reset, so use IO-mapping as a workaround to issue the reset */
+ E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
+ break;
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ /* Reset is performed on a shadow of the control register */
+ ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
+ break;
+ case e1000_ce4100:
+ default:
+ ew32(CTRL, (ctrl | E1000_CTRL_RST));
+ break;
+ }
+
+ /* After MAC reset, force reload of EEPROM to restore power-on settings to
+ * device. Later controllers reload the EEPROM automatically, so just wait
+ * for reload to complete.
+ */
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ /* Wait for reset to complete */
+ udelay(10);
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
+ /* Wait for EEPROM reload */
+ msleep(2);
+ break;
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ /* Wait for EEPROM reload */
+ msleep(20);
+ break;
+ default:
+ /* Auto read done will delay 5ms or poll based on mac type */
+ ret_val = e1000_get_auto_rd_done(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ }
+
+ /* Disable HW ARPs on ASF enabled adapters */
+ if (hw->mac_type >= e1000_82540) {
+ manc = er32(MANC);
+ manc &= ~(E1000_MANC_ARP_EN);
+ ew32(MANC, manc);
+ }
+
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ e1000_phy_init_script(hw);
+
+ /* Configure activity LED after PHY reset */
+ led_ctrl = er32(LEDCTL);
+ led_ctrl &= IGP_ACTIVITY_LED_MASK;
+ led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ ew32(LEDCTL, led_ctrl);
+ }
+
+ /* Clear interrupt mask to stop board from generating interrupts */
+ e_dbg("Masking off all interrupts\n");
+ ew32(IMC, 0xffffffff);
+
+ /* Clear any pending interrupt events. */
+ icr = er32(ICR);
+
+ /* If MWI was previously enabled, reenable it. */
+ if (hw->mac_type == e1000_82542_rev2_0) {
+ if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+ e1000_pci_set_mwi(hw);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_hw: Performs basic configuration of the adapter.
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Assumes that the controller has previously been reset and is in a
+ * post-reset uninitialized state. Initializes the receive address registers,
+ * multicast table, and VLAN filter table. Calls routines to setup link
+ * configuration and flow control settings. Clears all on-chip counters. Leaves
+ * the transmit and receive units disabled and uninitialized.
+ */
+s32 e1000_init_hw(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u32 i;
+ s32 ret_val;
+ u32 mta_size;
+ u32 ctrl_ext;
+
+ e_dbg("e1000_init_hw");
+
+ /* Initialize Identification LED */
+ ret_val = e1000_id_led_init(hw);
+ if (ret_val) {
+ e_dbg("Error Initializing Identification LED\n");
+ return ret_val;
+ }
+
+ /* Set the media type and TBI compatibility */
+ e1000_set_media_type(hw);
+
+ /* Disabling VLAN filtering. */
+ e_dbg("Initializing the IEEE VLAN\n");
+ if (hw->mac_type < e1000_82545_rev_3)
+ ew32(VET, 0);
+ e1000_clear_vfta(hw);
+
+ /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
+ if (hw->mac_type == e1000_82542_rev2_0) {
+ e_dbg("Disabling MWI on 82542 rev 2.0\n");
+ e1000_pci_clear_mwi(hw);
+ ew32(RCTL, E1000_RCTL_RST);
+ E1000_WRITE_FLUSH();
+ msleep(5);
+ }
+
+ /* Setup the receive address. This involves initializing all of the Receive
+ * Address Registers (RARs 0 - 15).
+ */
+ e1000_init_rx_addrs(hw);
+
+ /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
+ if (hw->mac_type == e1000_82542_rev2_0) {
+ ew32(RCTL, 0);
+ E1000_WRITE_FLUSH();
+ msleep(1);
+ if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+ e1000_pci_set_mwi(hw);
+ }
+
+ /* Zero out the Multicast HASH table */
+ e_dbg("Zeroing the MTA\n");
+ mta_size = E1000_MC_TBL_SIZE;
+ for (i = 0; i < mta_size; i++) {
+ E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+ /* use write flush to prevent Memory Write Block (MWB) from
+ * occurring when accessing our register space */
+ E1000_WRITE_FLUSH();
+ }
+
+ /* Set the PCI priority bit correctly in the CTRL register. This
+ * determines if the adapter gives priority to receives, or if it
+ * gives equal priority to transmits and receives. Valid only on
+ * 82542 and 82543 silicon.
+ */
+ if (hw->dma_fairness && hw->mac_type <= e1000_82543) {
+ ctrl = er32(CTRL);
+ ew32(CTRL, ctrl | E1000_CTRL_PRIOR);
+ }
+
+ switch (hw->mac_type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ break;
+ default:
+ /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
+ if (hw->bus_type == e1000_bus_type_pcix
+ && e1000_pcix_get_mmrbc(hw) > 2048)
+ e1000_pcix_set_mmrbc(hw, 2048);
+ break;
+ }
+
+ /* Call a subroutine to configure the link and setup flow control. */
+ ret_val = e1000_setup_link(hw);
+
+ /* Set the transmit descriptor write-back policy */
+ if (hw->mac_type > e1000_82544) {
+ ctrl = er32(TXDCTL);
+ ctrl =
+ (ctrl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB;
+ ew32(TXDCTL, ctrl);
+ }
+
+ /* Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs(hw);
+
+ if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
+ hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
+ ctrl_ext = er32(CTRL_EXT);
+ /* Relaxed ordering must be disabled to avoid a parity
+ * error crash in a PCI slot. */
+ ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+ ew32(CTRL_EXT, ctrl_ext);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_adjust_serdes_amplitude - Adjust SERDES output amplitude based on EEPROM setting.
+ * @hw: Struct containing variables accessed by shared code.
+ */
+static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
+{
+ u16 eeprom_data;
+ s32 ret_val;
+
+ e_dbg("e1000_adjust_serdes_amplitude");
+
+ if (hw->media_type != e1000_media_type_internal_serdes)
+ return E1000_SUCCESS;
+
+ switch (hw->mac_type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ break;
+ default:
+ return E1000_SUCCESS;
+ }
+
+ ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1,
+ &eeprom_data);
+ if (ret_val) {
+ return ret_val;
+ }
+
+ if (eeprom_data != EEPROM_RESERVED_WORD) {
+ /* Adjust SERDES output amplitude only. */
+ eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_setup_link - Configures flow control and link settings.
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Determines which flow control settings to use. Calls the appropriate media-
+ * specific link configuration function. Configures the flow control settings.
+ * Assuming the adapter has a valid link partner, a valid link should be
+ * established. Assumes the hardware has previously been reset and the
+ * transmitter and receiver are not enabled.
+ */
+s32 e1000_setup_link(struct e1000_hw *hw)
+{
+ u32 ctrl_ext;
+ s32 ret_val;
+ u16 eeprom_data;
+
+ e_dbg("e1000_setup_link");
+
+ /* Read and store word 0x0F of the EEPROM. This word contains bits
+ * that determine the hardware's default PAUSE (flow control) mode,
+ * a bit that determines whether the HW defaults to enabling or
+ * disabling auto-negotiation, and the direction of the
+ * SW defined pins. If there is no SW over-ride of the flow
+ * control setting, then the variable hw->fc will
+ * be initialized based on a value in the EEPROM.
+ */
+ if (hw->fc == E1000_FC_DEFAULT) {
+ ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+ 1, &eeprom_data);
+ if (ret_val) {
+ e_dbg("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
+ hw->fc = E1000_FC_NONE;
+ else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) ==
+ EEPROM_WORD0F_ASM_DIR)
+ hw->fc = E1000_FC_TX_PAUSE;
+ else
+ hw->fc = E1000_FC_FULL;
+ }
+
+ /* We want to save off the original Flow Control configuration just
+ * in case we get disconnected and then reconnected into a different
+ * hub or switch with different Flow Control capabilities.
+ */
+ if (hw->mac_type == e1000_82542_rev2_0)
+ hw->fc &= (~E1000_FC_TX_PAUSE);
+
+ if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1))
+ hw->fc &= (~E1000_FC_RX_PAUSE);
+
+ hw->original_fc = hw->fc;
+
+ e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc);
+
+ /* Take the 4 bits from EEPROM word 0x0F that determine the initial
+ * polarity value for the SW controlled pins, and setup the
+ * Extended Device Control reg with that info.
+ * This is needed because one of the SW controlled pins is used for
+ * signal detection. So this should be done before e1000_setup_pcs_link()
+ * or e1000_phy_setup() is called.
+ */
+ if (hw->mac_type == e1000_82543) {
+ ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+ 1, &eeprom_data);
+ if (ret_val) {
+ e_dbg("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
+ SWDPIO__EXT_SHIFT);
+ ew32(CTRL_EXT, ctrl_ext);
+ }
+
+ /* Call the necessary subroutine to configure the link. */
+ ret_val = (hw->media_type == e1000_media_type_copper) ?
+ e1000_setup_copper_link(hw) : e1000_setup_fiber_serdes_link(hw);
+
+ /* Initialize the flow control address, type, and PAUSE timer
+ * registers to their default values. This is done even if flow
+ * control is disabled, because it does not hurt anything to
+ * initialize these registers.
+ */
+ e_dbg("Initializing the Flow Control address, type and timer regs\n");
+
+ ew32(FCT, FLOW_CONTROL_TYPE);
+ ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+ ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+ ew32(FCTTV, hw->fc_pause_time);
+
+ /* Set the flow control receive threshold registers. Normally,
+ * these registers will be set to a default threshold that may be
+ * adjusted later by the driver's runtime code. However, if the
+ * ability to transmit pause frames in not enabled, then these
+ * registers will be set to 0.
+ */
+ if (!(hw->fc & E1000_FC_TX_PAUSE)) {
+ ew32(FCRTL, 0);
+ ew32(FCRTH, 0);
+ } else {
+ /* We need to set up the Receive Threshold high and low water marks
+ * as well as (optionally) enabling the transmission of XON frames.
+ */
+ if (hw->fc_send_xon) {
+ ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
+ ew32(FCRTH, hw->fc_high_water);
+ } else {
+ ew32(FCRTL, hw->fc_low_water);
+ ew32(FCRTH, hw->fc_high_water);
+ }
+ }
+ return ret_val;
+}
+
+/**
+ * e1000_setup_fiber_serdes_link - prepare fiber or serdes link
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Manipulates Physical Coding Sublayer functions in order to configure
+ * link. Assumes the hardware has been previously reset and the transmitter
+ * and receiver are not enabled.
+ */
+static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u32 status;
+ u32 txcw = 0;
+ u32 i;
+ u32 signal = 0;
+ s32 ret_val;
+
+ e_dbg("e1000_setup_fiber_serdes_link");
+
+ /* On adapters with a MAC newer than 82544, SWDP 1 will be
+ * set when the optics detect a signal. On older adapters, it will be
+ * cleared when there is a signal. This applies to fiber media only.
+ * If we're on serdes media, adjust the output amplitude to value
+ * set in the EEPROM.
+ */
+ ctrl = er32(CTRL);
+ if (hw->media_type == e1000_media_type_fiber)
+ signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+
+ ret_val = e1000_adjust_serdes_amplitude(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Take the link out of reset */
+ ctrl &= ~(E1000_CTRL_LRST);
+
+ /* Adjust VCO speed to improve BER performance */
+ ret_val = e1000_set_vco_speed(hw);
+ if (ret_val)
+ return ret_val;
+
+ e1000_config_collision_dist(hw);
+
+ /* Check for a software override of the flow control settings, and setup
+ * the device accordingly. If auto-negotiation is enabled, then software
+ * will have to set the "PAUSE" bits to the correct value in the Tranmsit
+ * Config Word Register (TXCW) and re-start auto-negotiation. However, if
+ * auto-negotiation is disabled, then software will have to manually
+ * configure the two flow control enable bits in the CTRL register.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames, but
+ * not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but we do
+ * not support receiving pause frames).
+ * 3: Both Rx and TX flow control (symmetric) are enabled.
+ */
+ switch (hw->fc) {
+ case E1000_FC_NONE:
+ /* Flow control is completely disabled by a software over-ride. */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+ break;
+ case E1000_FC_RX_PAUSE:
+ /* RX Flow control is enabled and TX Flow control is disabled by a
+ * software over-ride. Since there really isn't a way to advertise
+ * that we are capable of RX Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric RX PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ case E1000_FC_TX_PAUSE:
+ /* TX Flow control is enabled, and RX Flow control is disabled, by a
+ * software over-ride.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+ break;
+ case E1000_FC_FULL:
+ /* Flow control (both RX and TX) is enabled by a software over-ride. */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ default:
+ e_dbg("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ break;
+ }
+
+ /* Since auto-negotiation is enabled, take the link out of reset (the link
+ * will be in reset, because we previously reset the chip). This will
+ * restart auto-negotiation. If auto-negotiation is successful then the
+ * link-up status bit will be set and the flow control enable bits (RFCE
+ * and TFCE) will be set according to their negotiated value.
+ */
+ e_dbg("Auto-negotiation enabled\n");
+
+ ew32(TXCW, txcw);
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
+
+ hw->txcw = txcw;
+ msleep(1);
+
+ /* If we have a signal (the cable is plugged in) then poll for a "Link-Up"
+ * indication in the Device Status Register. Time-out if a link isn't
+ * seen in 500 milliseconds seconds (Auto-negotiation should complete in
+ * less than 500 milliseconds even if the other end is doing it in SW).
+ * For internal serdes, we just assume a signal is present, then poll.
+ */
+ if (hw->media_type == e1000_media_type_internal_serdes ||
+ (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
+ e_dbg("Looking for Link\n");
+ for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
+ msleep(10);
+ status = er32(STATUS);
+ if (status & E1000_STATUS_LU)
+ break;
+ }
+ if (i == (LINK_UP_TIMEOUT / 10)) {
+ e_dbg("Never got a valid link from auto-neg!!!\n");
+ hw->autoneg_failed = 1;
+ /* AutoNeg failed to achieve a link, so we'll call
+ * e1000_check_for_link. This routine will force the link up if
+ * we detect a signal. This will allow us to communicate with
+ * non-autonegotiating link partners.
+ */
+ ret_val = e1000_check_for_link(hw);
+ if (ret_val) {
+ e_dbg("Error while checking for link\n");
+ return ret_val;
+ }
+ hw->autoneg_failed = 0;
+ } else {
+ hw->autoneg_failed = 0;
+ e_dbg("Valid Link Found\n");
+ }
+ } else {
+ e_dbg("No Signal Detected\n");
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series.
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Commits changes to PHY configuration by calling e1000_phy_reset().
+ */
+static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ /* SW reset the PHY so all changes take effect */
+ ret_val = e1000_phy_reset(hw);
+ if (ret_val) {
+ e_dbg("Error Resetting the PHY\n");
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+static s32 gbe_dhg_phy_setup(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u32 ctrl_aux;
+
+ switch (hw->phy_type) {
+ case e1000_phy_8211:
+ ret_val = e1000_copper_link_rtl_setup(hw);
+ if (ret_val) {
+ e_dbg("e1000_copper_link_rtl_setup failed!\n");
+ return ret_val;
+ }
+ break;
+ case e1000_phy_8201:
+ /* Set RMII mode */
+ ctrl_aux = er32(CTL_AUX);
+ ctrl_aux |= E1000_CTL_AUX_RMII;
+ ew32(CTL_AUX, ctrl_aux);
+ E1000_WRITE_FLUSH();
+
+ /* Disable the J/K bits required for receive */
+ ctrl_aux = er32(CTL_AUX);
+ ctrl_aux |= 0x4;
+ ctrl_aux &= ~0x2;
+ ew32(CTL_AUX, ctrl_aux);
+ E1000_WRITE_FLUSH();
+ ret_val = e1000_copper_link_rtl_setup(hw);
+
+ if (ret_val) {
+ e_dbg("e1000_copper_link_rtl_setup failed!\n");
+ return ret_val;
+ }
+ break;
+ default:
+ e_dbg("Error Resetting the PHY\n");
+ return E1000_ERR_PHY_TYPE;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_copper_link_preconfig - early configuration for copper
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Make sure we have a valid PHY and change PHY mode before link setup.
+ */
+static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_copper_link_preconfig");
+
+ ctrl = er32(CTRL);
+ /* With 82543, we need to force speed and duplex on the MAC equal to what
+ * the PHY speed and duplex configuration is. In addition, we need to
+ * perform a hardware reset on the PHY to take it out of reset.
+ */
+ if (hw->mac_type > e1000_82543) {
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ew32(CTRL, ctrl);
+ } else {
+ ctrl |=
+ (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
+ ew32(CTRL, ctrl);
+ ret_val = e1000_phy_hw_reset(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Make sure we have a valid PHY */
+ ret_val = e1000_detect_gig_phy(hw);
+ if (ret_val) {
+ e_dbg("Error, did not detect valid phy.\n");
+ return ret_val;
+ }
+ e_dbg("Phy ID = %x\n", hw->phy_id);
+
+ /* Set PHY to class A mode (if necessary) */
+ ret_val = e1000_set_phy_mode(hw);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->mac_type == e1000_82545_rev_3) ||
+ (hw->mac_type == e1000_82546_rev_3)) {
+ ret_val =
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ phy_data |= 0x00000008;
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ }
+
+ if (hw->mac_type <= e1000_82543 ||
+ hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
+ hw->mac_type == e1000_82541_rev_2
+ || hw->mac_type == e1000_82547_rev_2)
+ hw->phy_reset_disable = false;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_copper_link_igp_setup - Copper link setup for e1000_phy_igp series.
+ * @hw: Struct containing variables accessed by shared code
+ */
+static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
+{
+ u32 led_ctrl;
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_copper_link_igp_setup");
+
+ if (hw->phy_reset_disable)
+ return E1000_SUCCESS;
+
+ ret_val = e1000_phy_reset(hw);
+ if (ret_val) {
+ e_dbg("Error Resetting the PHY\n");
+ return ret_val;
+ }
+
+ /* Wait 15ms for MAC to configure PHY from eeprom settings */
+ msleep(15);
+ /* Configure activity LED after PHY reset */
+ led_ctrl = er32(LEDCTL);
+ led_ctrl &= IGP_ACTIVITY_LED_MASK;
+ led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ ew32(LEDCTL, led_ctrl);
+
+ /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */
+ if (hw->phy_type == e1000_phy_igp) {
+ /* disable lplu d3 during driver init */
+ ret_val = e1000_set_d3_lplu_state(hw, false);
+ if (ret_val) {
+ e_dbg("Error Disabling LPLU D3\n");
+ return ret_val;
+ }
+ }
+
+ /* Configure mdi-mdix settings */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ hw->dsp_config_state = e1000_dsp_config_disabled;
+ /* Force MDI for earlier revs of the IGP PHY */
+ phy_data &=
+ ~(IGP01E1000_PSCR_AUTO_MDIX |
+ IGP01E1000_PSCR_FORCE_MDI_MDIX);
+ hw->mdix = 1;
+
+ } else {
+ hw->dsp_config_state = e1000_dsp_config_enabled;
+ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+ switch (hw->mdix) {
+ case 1:
+ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 2:
+ phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 0:
+ default:
+ phy_data |= IGP01E1000_PSCR_AUTO_MDIX;
+ break;
+ }
+ }
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* set auto-master slave resolution settings */
+ if (hw->autoneg) {
+ e1000_ms_type phy_ms_setting = hw->master_slave;
+
+ if (hw->ffe_config_state == e1000_ffe_config_active)
+ hw->ffe_config_state = e1000_ffe_config_enabled;
+
+ if (hw->dsp_config_state == e1000_dsp_config_activated)
+ hw->dsp_config_state = e1000_dsp_config_enabled;
+
+ /* when autonegotiation advertisement is only 1000Mbps then we
+ * should disable SmartSpeed and enable Auto MasterSlave
+ * resolution as hardware default. */
+ if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
+ /* Disable SmartSpeed */
+ ret_val =
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ /* Set auto Master/Slave resolution process */
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data &= ~CR_1000T_MS_ENABLE;
+ ret_val =
+ e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* load defaults for future use */
+ hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ?
+ ((phy_data & CR_1000T_MS_VALUE) ?
+ e1000_ms_force_master :
+ e1000_ms_force_slave) : e1000_ms_auto;
+
+ switch (phy_ms_setting) {
+ case e1000_ms_force_master:
+ phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_force_slave:
+ phy_data |= CR_1000T_MS_ENABLE;
+ phy_data &= ~(CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_auto:
+ phy_data &= ~CR_1000T_MS_ENABLE;
+ default:
+ break;
+ }
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_copper_link_mgp_setup - Copper link setup for e1000_phy_m88 series.
+ * @hw: Struct containing variables accessed by shared code
+ */
+static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_copper_link_mgp_setup");
+
+ if (hw->phy_reset_disable)
+ return E1000_SUCCESS;
+
+ /* Enable CRS on TX. This must be set for half-duplex operation. */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+ /* Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+ switch (hw->mdix) {
+ case 1:
+ phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+ break;
+ case 2:
+ phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+ break;
+ case 3:
+ phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+ break;
+ case 0:
+ default:
+ phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+ break;
+ }
+
+ /* Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+ if (hw->disable_polarity_correction == 1)
+ phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (hw->phy_revision < M88E1011_I_REV_4) {
+ /* Force TX_CLK in the Extended PHY Specific Control Register
+ * to 25MHz clock.
+ */
+ ret_val =
+ e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+ if ((hw->phy_revision == E1000_REVISION_2) &&
+ (hw->phy_id == M88E1111_I_PHY_ID)) {
+ /* Vidalia Phy, set the downshift counter to 5x */
+ phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK);
+ phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+ ret_val = e1000_write_phy_reg(hw,
+ M88E1000_EXT_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* Configure Master and Slave downshift values */
+ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+ phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+ ret_val = e1000_write_phy_reg(hw,
+ M88E1000_EXT_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ /* SW Reset the PHY so all changes take effect */
+ ret_val = e1000_phy_reset(hw);
+ if (ret_val) {
+ e_dbg("Error Resetting the PHY\n");
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_copper_link_autoneg - setup auto-neg
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Setup auto-negotiation and flow control advertisements,
+ * and then perform auto-negotiation.
+ */
+static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_copper_link_autoneg");
+
+ /* Perform some bounds checking on the hw->autoneg_advertised
+ * parameter. If this variable is zero, then set it to the default.
+ */
+ hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+ /* If autoneg_advertised is zero, we assume it was not defaulted
+ * by the calling code so we set to advertise full capability.
+ */
+ if (hw->autoneg_advertised == 0)
+ hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+ /* IFE/RTL8201N PHY only supports 10/100 */
+ if (hw->phy_type == e1000_phy_8201)
+ hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
+
+ e_dbg("Reconfiguring auto-neg advertisement params\n");
+ ret_val = e1000_phy_setup_autoneg(hw);
+ if (ret_val) {
+ e_dbg("Error Setting up Auto-Negotiation\n");
+ return ret_val;
+ }
+ e_dbg("Restarting Auto-Neg\n");
+
+ /* Restart auto-negotiation by setting the Auto Neg Enable bit and
+ * the Auto Neg Restart bit in the PHY control register.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+ ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Does the user want to wait for Auto-Neg to complete here, or
+ * check at a later time (for example, callback routine).
+ */
+ if (hw->wait_autoneg_complete) {
+ ret_val = e1000_wait_autoneg(hw);
+ if (ret_val) {
+ e_dbg
+ ("Error while waiting for autoneg to complete\n");
+ return ret_val;
+ }
+ }
+
+ hw->get_link_status = true;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_copper_link_postconfig - post link setup
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Config the MAC and the PHY after link is up.
+ * 1) Set up the MAC to the current PHY speed/duplex
+ * if we are on 82543. If we
+ * are on newer silicon, we only need to configure
+ * collision distance in the Transmit Control Register.
+ * 2) Set up flow control on the MAC to that established with
+ * the link partner.
+ * 3) Config DSP to improve Gigabit link quality for some PHY revisions.
+ */
+static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ e_dbg("e1000_copper_link_postconfig");
+
+ if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) {
+ e1000_config_collision_dist(hw);
+ } else {
+ ret_val = e1000_config_mac_to_phy(hw);
+ if (ret_val) {
+ e_dbg("Error configuring MAC to PHY settings\n");
+ return ret_val;
+ }
+ }
+ ret_val = e1000_config_fc_after_link_up(hw);
+ if (ret_val) {
+ e_dbg("Error Configuring Flow Control\n");
+ return ret_val;
+ }
+
+ /* Config DSP to improve Giga link quality */
+ if (hw->phy_type == e1000_phy_igp) {
+ ret_val = e1000_config_dsp_after_link_change(hw, true);
+ if (ret_val) {
+ e_dbg("Error Configuring DSP after link up\n");
+ return ret_val;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_setup_copper_link - phy/speed/duplex setting
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Detects which PHY is present and sets up the speed and duplex
+ */
+static s32 e1000_setup_copper_link(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 i;
+ u16 phy_data;
+
+ e_dbg("e1000_setup_copper_link");
+
+ /* Check if it is a valid PHY and set PHY mode if necessary. */
+ ret_val = e1000_copper_link_preconfig(hw);
+ if (ret_val)
+ return ret_val;
+
+ if (hw->phy_type == e1000_phy_igp) {
+ ret_val = e1000_copper_link_igp_setup(hw);
+ if (ret_val)
+ return ret_val;
+ } else if (hw->phy_type == e1000_phy_m88) {
+ ret_val = e1000_copper_link_mgp_setup(hw);
+ if (ret_val)
+ return ret_val;
+ } else {
+ ret_val = gbe_dhg_phy_setup(hw);
+ if (ret_val) {
+ e_dbg("gbe_dhg_phy_setup failed!\n");
+ return ret_val;
+ }
+ }
+
+ if (hw->autoneg) {
+ /* Setup autoneg and flow control advertisement
+ * and perform autonegotiation */
+ ret_val = e1000_copper_link_autoneg(hw);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* PHY will be set to 10H, 10F, 100H,or 100F
+ * depending on value from forced_speed_duplex. */
+ e_dbg("Forcing speed and duplex\n");
+ ret_val = e1000_phy_force_speed_duplex(hw);
+ if (ret_val) {
+ e_dbg("Error Forcing Speed and Duplex\n");
+ return ret_val;
+ }
+ }
+
+ /* Check link status. Wait up to 100 microseconds for link to become
+ * valid.
+ */
+ for (i = 0; i < 10; i++) {
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (phy_data & MII_SR_LINK_STATUS) {
+ /* Config the MAC and PHY after link is up */
+ ret_val = e1000_copper_link_postconfig(hw);
+ if (ret_val)
+ return ret_val;
+
+ e_dbg("Valid link established!!!\n");
+ return E1000_SUCCESS;
+ }
+ udelay(10);
+ }
+
+ e_dbg("Unable to establish link!!!\n");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_setup_autoneg - phy settings
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Configures PHY autoneg and flow control advertisement settings
+ */
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 mii_autoneg_adv_reg;
+ u16 mii_1000t_ctrl_reg;
+
+ e_dbg("e1000_phy_setup_autoneg");
+
+ /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+ ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ /* Read the MII 1000Base-T Control Register (Address 9). */
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ else if (hw->phy_type == e1000_phy_8201)
+ mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
+
+ /* Need to parse both autoneg_advertised and fc and set up
+ * the appropriate PHY registers. First we will parse for
+ * autoneg_advertised software override. Since we can advertise
+ * a plethora of combinations, we need to check each bit
+ * individually.
+ */
+
+ /* First we clear all the 10/100 mb speed bits in the Auto-Neg
+ * Advertisement Register (Address 4) and the 1000 mb speed bits in
+ * the 1000Base-T Control Register (Address 9).
+ */
+ mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
+ mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
+
+ e_dbg("autoneg_advertised %x\n", hw->autoneg_advertised);
+
+ /* Do we want to advertise 10 Mb Half Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
+ e_dbg("Advertise 10mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+ }
+
+ /* Do we want to advertise 10 Mb Full Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
+ e_dbg("Advertise 10mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Half Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
+ e_dbg("Advertise 100mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Full Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
+ e_dbg("Advertise 100mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+ }
+
+ /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+ if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
+ e_dbg
+ ("Advertise 1000mb Half duplex requested, request denied!\n");
+ }
+
+ /* Do we want to advertise 1000 Mb Full Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
+ e_dbg("Advertise 1000mb Full duplex\n");
+ mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+ }
+
+ /* Check for a software override of the flow control settings, and
+ * setup the PHY advertisement registers accordingly. If
+ * auto-negotiation is enabled, then software will have to set the
+ * "PAUSE" bits to the correct value in the Auto-Negotiation
+ * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * but we do not support receiving pause frames).
+ * 3: Both Rx and TX flow control (symmetric) are enabled.
+ * other: No software override. The flow control configuration
+ * in the EEPROM is used.
+ */
+ switch (hw->fc) {
+ case E1000_FC_NONE: /* 0 */
+ /* Flow control (RX & TX) is completely disabled by a
+ * software over-ride.
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case E1000_FC_RX_PAUSE: /* 1 */
+ /* RX Flow control is enabled, and TX Flow control is
+ * disabled, by a software over-ride.
+ */
+ /* Since there really isn't a way to advertise that we are
+ * capable of RX Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric RX PAUSE. Later
+ * (in e1000_config_fc_after_link_up) we will disable the
+ *hw's ability to send PAUSE frames.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case E1000_FC_TX_PAUSE: /* 2 */
+ /* TX Flow control is enabled, and RX Flow control is
+ * disabled, by a software over-ride.
+ */
+ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+ mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+ break;
+ case E1000_FC_FULL: /* 3 */
+ /* Flow control (both RX and TX) is enabled by a software
+ * over-ride.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ default:
+ e_dbg("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+ if (hw->phy_type == e1000_phy_8201) {
+ mii_1000t_ctrl_reg = 0;
+ } else {
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL,
+ mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_force_speed_duplex - force link settings
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Force PHY speed and duplex settings to hw->forced_speed_duplex
+ */
+static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 mii_ctrl_reg;
+ u16 mii_status_reg;
+ u16 phy_data;
+ u16 i;
+
+ e_dbg("e1000_phy_force_speed_duplex");
+
+ /* Turn off Flow control if we are forcing speed and duplex. */
+ hw->fc = E1000_FC_NONE;
+
+ e_dbg("hw->fc = %d\n", hw->fc);
+
+ /* Read the Device Control Register. */
+ ctrl = er32(CTRL);
+
+ /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ctrl &= ~(DEVICE_SPEED_MASK);
+
+ /* Clear the Auto Speed Detect Enable bit. */
+ ctrl &= ~E1000_CTRL_ASDE;
+
+ /* Read the MII Control Register. */
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+
+ /* We need to disable autoneg in order to force link and duplex. */
+
+ mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN;
+
+ /* Are we forcing Full or Half Duplex? */
+ if (hw->forced_speed_duplex == e1000_100_full ||
+ hw->forced_speed_duplex == e1000_10_full) {
+ /* We want to force full duplex so we SET the full duplex bits in the
+ * Device and MII Control Registers.
+ */
+ ctrl |= E1000_CTRL_FD;
+ mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
+ e_dbg("Full Duplex\n");
+ } else {
+ /* We want to force half duplex so we CLEAR the full duplex bits in
+ * the Device and MII Control Registers.
+ */
+ ctrl &= ~E1000_CTRL_FD;
+ mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
+ e_dbg("Half Duplex\n");
+ }
+
+ /* Are we forcing 100Mbps??? */
+ if (hw->forced_speed_duplex == e1000_100_full ||
+ hw->forced_speed_duplex == e1000_100_half) {
+ /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */
+ ctrl |= E1000_CTRL_SPD_100;
+ mii_ctrl_reg |= MII_CR_SPEED_100;
+ mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+ e_dbg("Forcing 100mb ");
+ } else {
+ /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */
+ ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+ mii_ctrl_reg |= MII_CR_SPEED_10;
+ mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+ e_dbg("Forcing 10mb ");
+ }
+
+ e1000_config_collision_dist(hw);
+
+ /* Write the configured values back to the Device Control Reg. */
+ ew32(CTRL, ctrl);
+
+ if (hw->phy_type == e1000_phy_m88) {
+ ret_val =
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+ * forced whenever speed are duplex are forced.
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e_dbg("M88E1000 PSCR: %x\n", phy_data);
+
+ /* Need to reset the PHY or these changes will be ignored */
+ mii_ctrl_reg |= MII_CR_RESET;
+
+ /* Disable MDI-X support for 10/100 */
+ } else {
+ /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
+ * forced whenever speed or duplex are forced.
+ */
+ ret_val =
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Write back the modified PHY MII control register. */
+ ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+
+ udelay(1);
+
+ /* The wait_autoneg_complete flag may be a little misleading here.
+ * Since we are forcing speed and duplex, Auto-Neg is not enabled.
+ * But we do want to delay for a period while forcing only so we
+ * don't generate false No Link messages. So we will wait here
+ * only if the user has set wait_autoneg_complete to 1, which is
+ * the default.
+ */
+ if (hw->wait_autoneg_complete) {
+ /* We will wait for autoneg to complete. */
+ e_dbg("Waiting for forced speed/duplex link.\n");
+ mii_status_reg = 0;
+
+ /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Auto-Neg Complete bit
+ * to be set.
+ */
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (mii_status_reg & MII_SR_LINK_STATUS)
+ break;
+ msleep(100);
+ }
+ if ((i == 0) && (hw->phy_type == e1000_phy_m88)) {
+ /* We didn't get link. Reset the DSP and wait again for link. */
+ ret_val = e1000_phy_reset_dsp(hw);
+ if (ret_val) {
+ e_dbg("Error Resetting PHY DSP\n");
+ return ret_val;
+ }
+ }
+ /* This loop will early-out if the link condition has been met. */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ if (mii_status_reg & MII_SR_LINK_STATUS)
+ break;
+ msleep(100);
+ /* Read the MII Status Register and wait for Auto-Neg Complete bit
+ * to be set.
+ */
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ if (hw->phy_type == e1000_phy_m88) {
+ /* Because we reset the PHY above, we need to re-force TX_CLK in the
+ * Extended PHY Specific Control Register to 25MHz clock. This value
+ * defaults back to a 2.5MHz clock when the PHY is reset.
+ */
+ ret_val =
+ e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* In addition, because of the s/w reset above, we need to enable CRS on
+ * TX. This must be set for both full and half duplex operation.
+ */
+ ret_val =
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543)
+ && (!hw->autoneg)
+ && (hw->forced_speed_duplex == e1000_10_full
+ || hw->forced_speed_duplex == e1000_10_half)) {
+ ret_val = e1000_polarity_reversal_workaround(hw);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_config_collision_dist - set collision distance register
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Sets the collision distance in the Transmit Control register.
+ * Link should have been established previously. Reads the speed and duplex
+ * information from the Device Status register.
+ */
+void e1000_config_collision_dist(struct e1000_hw *hw)
+{
+ u32 tctl, coll_dist;
+
+ e_dbg("e1000_config_collision_dist");
+
+ if (hw->mac_type < e1000_82543)
+ coll_dist = E1000_COLLISION_DISTANCE_82542;
+ else
+ coll_dist = E1000_COLLISION_DISTANCE;
+
+ tctl = er32(TCTL);
+
+ tctl &= ~E1000_TCTL_COLD;
+ tctl |= coll_dist << E1000_COLD_SHIFT;
+
+ ew32(TCTL, tctl);
+ E1000_WRITE_FLUSH();
+}
+
+/**
+ * e1000_config_mac_to_phy - sync phy and mac settings
+ * @hw: Struct containing variables accessed by shared code
+ * @mii_reg: data to write to the MII control register
+ *
+ * Sets MAC speed and duplex settings to reflect the those in the PHY
+ * The contents of the PHY register containing the needed information need to
+ * be passed in.
+ */
+static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_config_mac_to_phy");
+
+ /* 82544 or newer MAC, Auto Speed Detection takes care of
+ * MAC speed/duplex configuration.*/
+ if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100))
+ return E1000_SUCCESS;
+
+ /* Read the Device Control Register and set the bits to Force Speed
+ * and Duplex.
+ */
+ ctrl = er32(CTRL);
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
+
+ switch (hw->phy_type) {
+ case e1000_phy_8201:
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (phy_data & RTL_PHY_CTRL_FD)
+ ctrl |= E1000_CTRL_FD;
+ else
+ ctrl &= ~E1000_CTRL_FD;
+
+ if (phy_data & RTL_PHY_CTRL_SPD_100)
+ ctrl |= E1000_CTRL_SPD_100;
+ else
+ ctrl |= E1000_CTRL_SPD_10;
+
+ e1000_config_collision_dist(hw);
+ break;
+ default:
+ /* Set up duplex in the Device Control and Transmit Control
+ * registers depending on negotiated values.
+ */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (phy_data & M88E1000_PSSR_DPLX)
+ ctrl |= E1000_CTRL_FD;
+ else
+ ctrl &= ~E1000_CTRL_FD;
+
+ e1000_config_collision_dist(hw);
+
+ /* Set up speed in the Device Control register depending on
+ * negotiated values.
+ */
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+ ctrl |= E1000_CTRL_SPD_1000;
+ else if ((phy_data & M88E1000_PSSR_SPEED) ==
+ M88E1000_PSSR_100MBS)
+ ctrl |= E1000_CTRL_SPD_100;
+ }
+
+ /* Write the configured values back to the Device Control Reg. */
+ ew32(CTRL, ctrl);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_force_mac_fc - force flow control settings
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Forces the MAC's flow control settings.
+ * Sets the TFCE and RFCE bits in the device control register to reflect
+ * the adapter settings. TFCE and RFCE need to be explicitly set by
+ * software when a Copper PHY is used because autonegotiation is managed
+ * by the PHY rather than the MAC. Software must also configure these
+ * bits when link is forced on a fiber connection.
+ */
+s32 e1000_force_mac_fc(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ e_dbg("e1000_force_mac_fc");
+
+ /* Get the current configuration of the Device Control Register */
+ ctrl = er32(CTRL);
+
+ /* Because we didn't get link via the internal auto-negotiation
+ * mechanism (we either forced link or we got link via PHY
+ * auto-neg), we have to manually enable/disable transmit an
+ * receive flow control.
+ *
+ * The "Case" statement below enables/disable flow control
+ * according to the "hw->fc" parameter.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause
+ * frames but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * frames but we do not receive pause frames).
+ * 3: Both Rx and TX flow control (symmetric) is enabled.
+ * other: No other values should be possible at this point.
+ */
+
+ switch (hw->fc) {
+ case E1000_FC_NONE:
+ ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+ break;
+ case E1000_FC_RX_PAUSE:
+ ctrl &= (~E1000_CTRL_TFCE);
+ ctrl |= E1000_CTRL_RFCE;
+ break;
+ case E1000_FC_TX_PAUSE:
+ ctrl &= (~E1000_CTRL_RFCE);
+ ctrl |= E1000_CTRL_TFCE;
+ break;
+ case E1000_FC_FULL:
+ ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+ break;
+ default:
+ e_dbg("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ /* Disable TX Flow Control for 82542 (rev 2.0) */
+ if (hw->mac_type == e1000_82542_rev2_0)
+ ctrl &= (~E1000_CTRL_TFCE);
+
+ ew32(CTRL, ctrl);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_config_fc_after_link_up - configure flow control after autoneg
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Configures flow control settings after link is established
+ * Should be called immediately after a valid link has been established.
+ * Forces MAC flow control settings if link was forced. When in MII/GMII mode
+ * and autonegotiation is enabled, the MAC flow control settings will be set
+ * based on the flow control negotiated by the PHY. In TBI mode, the TFCE
+ * and RFCE bits will be automatically set to the negotiated flow control mode.
+ */
+static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 mii_status_reg;
+ u16 mii_nway_adv_reg;
+ u16 mii_nway_lp_ability_reg;
+ u16 speed;
+ u16 duplex;
+
+ e_dbg("e1000_config_fc_after_link_up");
+
+ /* Check for the case where we have fiber media and auto-neg failed
+ * so we had to force link. In this case, we need to force the
+ * configuration of the MAC to match the "fc" parameter.
+ */
+ if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed))
+ || ((hw->media_type == e1000_media_type_internal_serdes)
+ && (hw->autoneg_failed))
+ || ((hw->media_type == e1000_media_type_copper)
+ && (!hw->autoneg))) {
+ ret_val = e1000_force_mac_fc(hw);
+ if (ret_val) {
+ e_dbg("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
+
+ /* Check for the case where we have copper media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) {
+ /* Read the MII Status Register and check to see if AutoNeg
+ * has completed. We read this twice because this reg has
+ * some "sticky" (latched) bits.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement Register
+ * (Address 4) and the Auto_Negotiation Base Page Ability
+ * Register (Address 5) to determine how flow control was
+ * negotiated.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
+ &mii_nway_adv_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY,
+ &mii_nway_lp_ability_reg);
+ if (ret_val)
+ return ret_val;
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (Address 4) and two bits in the Auto Negotiation Base
+ * Page Ability Register (Address 5) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | E1000_FC_NONE
+ * 0 | 1 | 0 | DC | E1000_FC_NONE
+ * 0 | 1 | 1 | 0 | E1000_FC_NONE
+ * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
+ * 1 | 0 | 0 | DC | E1000_FC_NONE
+ * 1 | DC | 1 | DC | E1000_FC_FULL
+ * 1 | 1 | 0 | 0 | E1000_FC_NONE
+ * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
+ *
+ */
+ /* Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | E1000_FC_FULL
+ *
+ */
+ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+ /* Now we need to check if the user selected RX ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->original_fc == E1000_FC_FULL) {
+ hw->fc = E1000_FC_FULL;
+ e_dbg("Flow Control = FULL.\n");
+ } else {
+ hw->fc = E1000_FC_RX_PAUSE;
+ e_dbg
+ ("Flow Control = RX PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
+ *
+ */
+ else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
+ {
+ hw->fc = E1000_FC_TX_PAUSE;
+ e_dbg
+ ("Flow Control = TX PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
+ *
+ */
+ else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
+ {
+ hw->fc = E1000_FC_RX_PAUSE;
+ e_dbg
+ ("Flow Control = RX PAUSE frames only.\n");
+ }
+ /* Per the IEEE spec, at this point flow control should be
+ * disabled. However, we want to consider that we could
+ * be connected to a legacy switch that doesn't advertise
+ * desired flow control, but can be forced on the link
+ * partner. So if we advertised no flow control, that is
+ * what we will resolve to. If we advertised some kind of
+ * receive capability (Rx Pause Only or Full Flow Control)
+ * and the link partner advertised none, we will configure
+ * ourselves to enable Rx Flow Control only. We can do
+ * this safely for two reasons: If the link partner really
+ * didn't want flow control enabled, and we enable Rx, no
+ * harm done since we won't be receiving any PAUSE frames
+ * anyway. If the intent on the link partner was to have
+ * flow control enabled, then by us enabling RX only, we
+ * can at least receive pause frames and process them.
+ * This is a good idea because in most cases, since we are
+ * predominantly a server NIC, more times than not we will
+ * be asked to delay transmission of packets than asking
+ * our link partner to pause transmission of frames.
+ */
+ else if ((hw->original_fc == E1000_FC_NONE ||
+ hw->original_fc == E1000_FC_TX_PAUSE) ||
+ hw->fc_strict_ieee) {
+ hw->fc = E1000_FC_NONE;
+ e_dbg("Flow Control = NONE.\n");
+ } else {
+ hw->fc = E1000_FC_RX_PAUSE;
+ e_dbg
+ ("Flow Control = RX PAUSE frames only.\n");
+ }
+
+ /* Now we need to do one last check... If we auto-
+ * negotiated to HALF DUPLEX, flow control should not be
+ * enabled per IEEE 802.3 spec.
+ */
+ ret_val =
+ e1000_get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ e_dbg
+ ("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+
+ if (duplex == HALF_DUPLEX)
+ hw->fc = E1000_FC_NONE;
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ ret_val = e1000_force_mac_fc(hw);
+ if (ret_val) {
+ e_dbg
+ ("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ } else {
+ e_dbg
+ ("Copper PHY and Auto Neg has not completed.\n");
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_for_serdes_link_generic - Check for link (Serdes)
+ * @hw: pointer to the HW structure
+ *
+ * Checks for link up on the hardware. If link is not up and we have
+ * a signal, then we need to force link up.
+ */
+static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
+{
+ u32 rxcw;
+ u32 ctrl;
+ u32 status;
+ s32 ret_val = E1000_SUCCESS;
+
+ e_dbg("e1000_check_for_serdes_link_generic");
+
+ ctrl = er32(CTRL);
+ status = er32(STATUS);
+ rxcw = er32(RXCW);
+
+ /*
+ * If we don't have link (auto-negotiation failed or link partner
+ * cannot auto-negotiate), and our link partner is not trying to
+ * auto-negotiate with us (we are receiving idles or data),
+ * we need to force link up. We also need to give auto-negotiation
+ * time to complete.
+ */
+ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+ if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
+ if (hw->autoneg_failed == 0) {
+ hw->autoneg_failed = 1;
+ goto out;
+ }
+ e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
+
+ /* Disable auto-negotiation in the TXCW register */
+ ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE));
+
+ /* Force link-up and also force full-duplex. */
+ ctrl = er32(CTRL);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ ew32(CTRL, ctrl);
+
+ /* Configure Flow Control after forcing link up. */
+ ret_val = e1000_config_fc_after_link_up(hw);
+ if (ret_val) {
+ e_dbg("Error configuring flow control\n");
+ goto out;
+ }
+ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+ /*
+ * If we are forcing link and we are receiving /C/ ordered
+ * sets, re-enable auto-negotiation in the TXCW register
+ * and disable forced link in the Device Control register
+ * in an attempt to auto-negotiate with our link partner.
+ */
+ e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
+ ew32(TXCW, hw->txcw);
+ ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+ hw->serdes_has_link = true;
+ } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
+ /*
+ * If we force link for non-auto-negotiation switch, check
+ * link status based on MAC synchronization for internal
+ * serdes media type.
+ */
+ /* SYNCH bit and IV bit are sticky. */
+ udelay(10);
+ rxcw = er32(RXCW);
+ if (rxcw & E1000_RXCW_SYNCH) {
+ if (!(rxcw & E1000_RXCW_IV)) {
+ hw->serdes_has_link = true;
+ e_dbg("SERDES: Link up - forced.\n");
+ }
+ } else {
+ hw->serdes_has_link = false;
+ e_dbg("SERDES: Link down - force failed.\n");
+ }
+ }
+
+ if (E1000_TXCW_ANE & er32(TXCW)) {
+ status = er32(STATUS);
+ if (status & E1000_STATUS_LU) {
+ /* SYNCH bit and IV bit are sticky, so reread rxcw. */
+ udelay(10);
+ rxcw = er32(RXCW);
+ if (rxcw & E1000_RXCW_SYNCH) {
+ if (!(rxcw & E1000_RXCW_IV)) {
+ hw->serdes_has_link = true;
+ e_dbg("SERDES: Link up - autoneg "
+ "completed successfully.\n");
+ } else {
+ hw->serdes_has_link = false;
+ e_dbg("SERDES: Link down - invalid"
+ "codewords detected in autoneg.\n");
+ }
+ } else {
+ hw->serdes_has_link = false;
+ e_dbg("SERDES: Link down - no sync.\n");
+ }
+ } else {
+ hw->serdes_has_link = false;
+ e_dbg("SERDES: Link down - autoneg failed\n");
+ }
+ }
+
+ out:
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_link
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Checks to see if the link status of the hardware has changed.
+ * Called by any function that needs to check the link status of the adapter.
+ */
+s32 e1000_check_for_link(struct e1000_hw *hw)
+{
+ u32 rxcw = 0;
+ u32 ctrl;
+ u32 status;
+ u32 rctl;
+ u32 icr;
+ u32 signal = 0;
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_check_for_link");
+
+ ctrl = er32(CTRL);
+ status = er32(STATUS);
+
+ /* On adapters with a MAC newer than 82544, SW Definable pin 1 will be
+ * set when the optics detect a signal. On older adapters, it will be
+ * cleared when there is a signal. This applies to fiber media only.
+ */
+ if ((hw->media_type == e1000_media_type_fiber) ||
+ (hw->media_type == e1000_media_type_internal_serdes)) {
+ rxcw = er32(RXCW);
+
+ if (hw->media_type == e1000_media_type_fiber) {
+ signal =
+ (hw->mac_type >
+ e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+ if (status & E1000_STATUS_LU)
+ hw->get_link_status = false;
+ }
+ }
+
+ /* If we have a copper PHY then we only want to go out to the PHY
+ * registers to see if Auto-Neg has completed and/or if our link
+ * status has changed. The get_link_status flag will be set if we
+ * receive a Link Status Change interrupt or we have Rx Sequence
+ * Errors.
+ */
+ if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) {
+ /* First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ * Read the register twice since the link bit is sticky.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (phy_data & MII_SR_LINK_STATUS) {
+ hw->get_link_status = false;
+ /* Check if there was DownShift, must be checked immediately after
+ * link-up */
+ e1000_check_downshift(hw);
+
+ /* If we are on 82544 or 82543 silicon and speed/duplex
+ * are forced to 10H or 10F, then we will implement the polarity
+ * reversal workaround. We disable interrupts first, and upon
+ * returning, place the devices interrupt state to its previous
+ * value except for the link status change interrupt which will
+ * happen due to the execution of this workaround.
+ */
+
+ if ((hw->mac_type == e1000_82544
+ || hw->mac_type == e1000_82543) && (!hw->autoneg)
+ && (hw->forced_speed_duplex == e1000_10_full
+ || hw->forced_speed_duplex == e1000_10_half)) {
+ ew32(IMC, 0xffffffff);
+ ret_val =
+ e1000_polarity_reversal_workaround(hw);
+ icr = er32(ICR);
+ ew32(ICS, (icr & ~E1000_ICS_LSC));
+ ew32(IMS, IMS_ENABLE_MASK);
+ }
+
+ } else {
+ /* No link detected */
+ e1000_config_dsp_after_link_change(hw, false);
+ return 0;
+ }
+
+ /* If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!hw->autoneg)
+ return -E1000_ERR_CONFIG;
+
+ /* optimize the dsp settings for the igp phy */
+ e1000_config_dsp_after_link_change(hw, true);
+
+ /* We have a M88E1000 PHY and Auto-Neg is enabled. If we
+ * have Si on board that is 82544 or newer, Auto
+ * Speed Detection takes care of MAC speed/duplex
+ * configuration. So we only need to configure Collision
+ * Distance in the MAC. Otherwise, we need to force
+ * speed/duplex on the MAC to the current PHY speed/duplex
+ * settings.
+ */
+ if ((hw->mac_type >= e1000_82544) &&
+ (hw->mac_type != e1000_ce4100))
+ e1000_config_collision_dist(hw);
+ else {
+ ret_val = e1000_config_mac_to_phy(hw);
+ if (ret_val) {
+ e_dbg
+ ("Error configuring MAC to PHY settings\n");
+ return ret_val;
+ }
+ }
+
+ /* Configure Flow Control now that Auto-Neg has completed. First, we
+ * need to restore the desired flow control settings because we may
+ * have had to re-autoneg with a different link partner.
+ */
+ ret_val = e1000_config_fc_after_link_up(hw);
+ if (ret_val) {
+ e_dbg("Error configuring flow control\n");
+ return ret_val;
+ }
+
+ /* At this point we know that we are on copper and we have
+ * auto-negotiated link. These are conditions for checking the link
+ * partner capability register. We use the link speed to determine if
+ * TBI compatibility needs to be turned on or off. If the link is not
+ * at gigabit speed, then TBI compatibility is not needed. If we are
+ * at gigabit speed, we turn on TBI compatibility.
+ */
+ if (hw->tbi_compatibility_en) {
+ u16 speed, duplex;
+ ret_val =
+ e1000_get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ e_dbg
+ ("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+ if (speed != SPEED_1000) {
+ /* If link speed is not set to gigabit speed, we do not need
+ * to enable TBI compatibility.
+ */
+ if (hw->tbi_compatibility_on) {
+ /* If we previously were in the mode, turn it off. */
+ rctl = er32(RCTL);
+ rctl &= ~E1000_RCTL_SBP;
+ ew32(RCTL, rctl);
+ hw->tbi_compatibility_on = false;
+ }
+ } else {
+ /* If TBI compatibility is was previously off, turn it on. For
+ * compatibility with a TBI link partner, we will store bad
+ * packets. Some frames have an additional byte on the end and
+ * will look like CRC errors to to the hardware.
+ */
+ if (!hw->tbi_compatibility_on) {
+ hw->tbi_compatibility_on = true;
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_SBP;
+ ew32(RCTL, rctl);
+ }
+ }
+ }
+ }
+
+ if ((hw->media_type == e1000_media_type_fiber) ||
+ (hw->media_type == e1000_media_type_internal_serdes))
+ e1000_check_for_serdes_link_generic(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_speed_and_duplex
+ * @hw: Struct containing variables accessed by shared code
+ * @speed: Speed of the connection
+ * @duplex: Duplex setting of the connection
+
+ * Detects the current speed and duplex settings of the hardware.
+ */
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+{
+ u32 status;
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_get_speed_and_duplex");
+
+ if (hw->mac_type >= e1000_82543) {
+ status = er32(STATUS);
+ if (status & E1000_STATUS_SPEED_1000) {
+ *speed = SPEED_1000;
+ e_dbg("1000 Mbs, ");
+ } else if (status & E1000_STATUS_SPEED_100) {
+ *speed = SPEED_100;
+ e_dbg("100 Mbs, ");
+ } else {
+ *speed = SPEED_10;
+ e_dbg("10 Mbs, ");
+ }
+
+ if (status & E1000_STATUS_FD) {
+ *duplex = FULL_DUPLEX;
+ e_dbg("Full Duplex\n");
+ } else {
+ *duplex = HALF_DUPLEX;
+ e_dbg(" Half Duplex\n");
+ }
+ } else {
+ e_dbg("1000 Mbs, Full Duplex\n");
+ *speed = SPEED_1000;
+ *duplex = FULL_DUPLEX;
+ }
+
+ /* IGP01 PHY may advertise full duplex operation after speed downgrade even
+ * if it is operating at half duplex. Here we set the duplex settings to
+ * match the duplex in the link partner's capabilities.
+ */
+ if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
+ ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (!(phy_data & NWAY_ER_LP_NWAY_CAPS))
+ *duplex = HALF_DUPLEX;
+ else {
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data);
+ if (ret_val)
+ return ret_val;
+ if ((*speed == SPEED_100
+ && !(phy_data & NWAY_LPAR_100TX_FD_CAPS))
+ || (*speed == SPEED_10
+ && !(phy_data & NWAY_LPAR_10T_FD_CAPS)))
+ *duplex = HALF_DUPLEX;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_wait_autoneg
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Blocks until autoneg completes or times out (~4.5 seconds)
+ */
+static s32 e1000_wait_autoneg(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 i;
+ u16 phy_data;
+
+ e_dbg("e1000_wait_autoneg");
+ e_dbg("Waiting for Auto-Neg to complete.\n");
+
+ /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+ for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Auto-Neg
+ * Complete bit to be set.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+ if (phy_data & MII_SR_AUTONEG_COMPLETE) {
+ return E1000_SUCCESS;
+ }
+ msleep(100);
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_raise_mdi_clk - Raises the Management Data Clock
+ * @hw: Struct containing variables accessed by shared code
+ * @ctrl: Device control register's current value
+ */
+static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
+{
+ /* Raise the clock input to the Management Data Clock (by setting the MDC
+ * bit), and then delay 10 microseconds.
+ */
+ ew32(CTRL, (*ctrl | E1000_CTRL_MDC));
+ E1000_WRITE_FLUSH();
+ udelay(10);
+}
+
+/**
+ * e1000_lower_mdi_clk - Lowers the Management Data Clock
+ * @hw: Struct containing variables accessed by shared code
+ * @ctrl: Device control register's current value
+ */
+static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
+{
+ /* Lower the clock input to the Management Data Clock (by clearing the MDC
+ * bit), and then delay 10 microseconds.
+ */
+ ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC));
+ E1000_WRITE_FLUSH();
+ udelay(10);
+}
+
+/**
+ * e1000_shift_out_mdi_bits - Shifts data bits out to the PHY
+ * @hw: Struct containing variables accessed by shared code
+ * @data: Data to send out to the PHY
+ * @count: Number of bits to shift out
+ *
+ * Bits are shifted out in MSB to LSB order.
+ */
+static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count)
+{
+ u32 ctrl;
+ u32 mask;
+
+ /* We need to shift "count" number of bits out to the PHY. So, the value
+ * in the "data" parameter will be shifted out to the PHY one bit at a
+ * time. In order to do this, "data" must be broken down into bits.
+ */
+ mask = 0x01;
+ mask <<= (count - 1);
+
+ ctrl = er32(CTRL);
+
+ /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
+ ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
+
+ while (mask) {
+ /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
+ * then raising and lowering the Management Data Clock. A "0" is
+ * shifted out to the PHY by setting the MDIO bit to "0" and then
+ * raising and lowering the clock.
+ */
+ if (data & mask)
+ ctrl |= E1000_CTRL_MDIO;
+ else
+ ctrl &= ~E1000_CTRL_MDIO;
+
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
+
+ udelay(10);
+
+ e1000_raise_mdi_clk(hw, &ctrl);
+ e1000_lower_mdi_clk(hw, &ctrl);
+
+ mask = mask >> 1;
+ }
+}
+
+/**
+ * e1000_shift_in_mdi_bits - Shifts data bits in from the PHY
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Bits are shifted in in MSB to LSB order.
+ */
+static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u16 data = 0;
+ u8 i;
+
+ /* In order to read a register from the PHY, we need to shift in a total
+ * of 18 bits from the PHY. The first two bit (turnaround) times are used
+ * to avoid contention on the MDIO pin when a read operation is performed.
+ * These two bits are ignored by us and thrown away. Bits are "shifted in"
+ * by raising the input to the Management Data Clock (setting the MDC bit),
+ * and then reading the value of the MDIO bit.
+ */
+ ctrl = er32(CTRL);
+
+ /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
+ ctrl &= ~E1000_CTRL_MDIO_DIR;
+ ctrl &= ~E1000_CTRL_MDIO;
+
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
+
+ /* Raise and Lower the clock before reading in the data. This accounts for
+ * the turnaround bits. The first clock occurred when we clocked out the
+ * last bit of the Register Address.
+ */
+ e1000_raise_mdi_clk(hw, &ctrl);
+ e1000_lower_mdi_clk(hw, &ctrl);
+
+ for (data = 0, i = 0; i < 16; i++) {
+ data = data << 1;
+ e1000_raise_mdi_clk(hw, &ctrl);
+ ctrl = er32(CTRL);
+ /* Check to see if we shifted in a "1". */
+ if (ctrl & E1000_CTRL_MDIO)
+ data |= 1;
+ e1000_lower_mdi_clk(hw, &ctrl);
+ }
+
+ e1000_raise_mdi_clk(hw, &ctrl);
+ e1000_lower_mdi_clk(hw, &ctrl);
+
+ return data;
+}
+
+
+/**
+ * e1000_read_phy_reg - read a phy register
+ * @hw: Struct containing variables accessed by shared code
+ * @reg_addr: address of the PHY register to read
+ *
+ * Reads the value from a PHY register, if the value is on a specific non zero
+ * page, sets the page first.
+ */
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
+{
+ u32 ret_val;
+
+ e_dbg("e1000_read_phy_reg");
+
+ if ((hw->phy_type == e1000_phy_igp) &&
+ (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+ ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+ (u16) reg_addr);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+ phy_data);
+
+ return ret_val;
+}
+
+static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+ u16 *phy_data)
+{
+ u32 i;
+ u32 mdic = 0;
+ const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
+
+ e_dbg("e1000_read_phy_reg_ex");
+
+ if (reg_addr > MAX_PHY_REG_ADDRESS) {
+ e_dbg("PHY Address %d is out of range\n", reg_addr);
+ return -E1000_ERR_PARAM;
+ }
+
+ if (hw->mac_type > e1000_82543) {
+ /* Set up Op-code, Phy Address, and register address in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ if (hw->mac_type == e1000_ce4100) {
+ mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (INTEL_CE_GBE_MDIC_OP_READ) |
+ (INTEL_CE_GBE_MDIC_GO));
+
+ writel(mdic, E1000_MDIO_CMD);
+
+ /* Poll the ready bit to see if the MDI read
+ * completed
+ */
+ for (i = 0; i < 64; i++) {
+ udelay(50);
+ mdic = readl(E1000_MDIO_CMD);
+ if (!(mdic & INTEL_CE_GBE_MDIC_GO))
+ break;
+ }
+
+ if (mdic & INTEL_CE_GBE_MDIC_GO) {
+ e_dbg("MDI Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+
+ mdic = readl(E1000_MDIO_STS);
+ if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) {
+ e_dbg("MDI Read Error\n");
+ return -E1000_ERR_PHY;
+ }
+ *phy_data = (u16) mdic;
+ } else {
+ mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_READ));
+
+ ew32(MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read
+ * completed
+ */
+ for (i = 0; i < 64; i++) {
+ udelay(50);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ e_dbg("MDI Error\n");
+ return -E1000_ERR_PHY;
+ }
+ *phy_data = (u16) mdic;
+ }
+ } else {
+ /* We must first send a preamble through the MDIO pin to signal the
+ * beginning of an MII instruction. This is done by sending 32
+ * consecutive "1" bits.
+ */
+ e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+ /* Now combine the next few fields that are required for a read
+ * operation. We use this method instead of calling the
+ * e1000_shift_out_mdi_bits routine five different times. The format of
+ * a MII read instruction consists of a shift out of 14 bits and is
+ * defined as follows:
+ * <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
+ * followed by a shift in of 18 bits. This first two bits shifted in
+ * are TurnAround bits used to avoid contention on the MDIO pin when a
+ * READ operation is performed. These two bits are thrown away
+ * followed by a shift in of 16 bits which contains the desired data.
+ */
+ mdic = ((reg_addr) | (phy_addr << 5) |
+ (PHY_OP_READ << 10) | (PHY_SOF << 12));
+
+ e1000_shift_out_mdi_bits(hw, mdic, 14);
+
+ /* Now that we've shifted out the read command to the MII, we need to
+ * "shift in" the 16-bit value (18 total bits) of the requested PHY
+ * register address.
+ */
+ *phy_data = e1000_shift_in_mdi_bits(hw);
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_phy_reg - write a phy register
+ *
+ * @hw: Struct containing variables accessed by shared code
+ * @reg_addr: address of the PHY register to write
+ * @data: data to write to the PHY
+
+ * Writes a value to a PHY register
+ */
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
+{
+ u32 ret_val;
+
+ e_dbg("e1000_write_phy_reg");
+
+ if ((hw->phy_type == e1000_phy_igp) &&
+ (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+ ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+ (u16) reg_addr);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+ phy_data);
+
+ return ret_val;
+}
+
+static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+ u16 phy_data)
+{
+ u32 i;
+ u32 mdic = 0;
+ const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
+
+ e_dbg("e1000_write_phy_reg_ex");
+
+ if (reg_addr > MAX_PHY_REG_ADDRESS) {
+ e_dbg("PHY Address %d is out of range\n", reg_addr);
+ return -E1000_ERR_PARAM;
+ }
+
+ if (hw->mac_type > e1000_82543) {
+ /* Set up Op-code, Phy Address, register address, and data
+ * intended for the PHY register in the MDI Control register.
+ * The MAC will take care of interfacing with the PHY to send
+ * the desired data.
+ */
+ if (hw->mac_type == e1000_ce4100) {
+ mdic = (((u32) phy_data) |
+ (reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (INTEL_CE_GBE_MDIC_OP_WRITE) |
+ (INTEL_CE_GBE_MDIC_GO));
+
+ writel(mdic, E1000_MDIO_CMD);
+
+ /* Poll the ready bit to see if the MDI read
+ * completed
+ */
+ for (i = 0; i < 640; i++) {
+ udelay(5);
+ mdic = readl(E1000_MDIO_CMD);
+ if (!(mdic & INTEL_CE_GBE_MDIC_GO))
+ break;
+ }
+ if (mdic & INTEL_CE_GBE_MDIC_GO) {
+ e_dbg("MDI Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ } else {
+ mdic = (((u32) phy_data) |
+ (reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_WRITE));
+
+ ew32(MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read
+ * completed
+ */
+ for (i = 0; i < 641; i++) {
+ udelay(5);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ }
+ } else {
+ /* We'll need to use the SW defined pins to shift the write command
+ * out to the PHY. We first send a preamble to the PHY to signal the
+ * beginning of the MII instruction. This is done by sending 32
+ * consecutive "1" bits.
+ */
+ e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+ /* Now combine the remaining required fields that will indicate a
+ * write operation. We use this method instead of calling the
+ * e1000_shift_out_mdi_bits routine for each field in the command. The
+ * format of a MII write instruction is as follows:
+ * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
+ */
+ mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
+ (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
+ mdic <<= 16;
+ mdic |= (u32) phy_data;
+
+ e1000_shift_out_mdi_bits(hw, mdic, 32);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_hw_reset - reset the phy, hardware style
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Returns the PHY to the power-on reset state
+ */
+s32 e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+ u32 ctrl, ctrl_ext;
+ u32 led_ctrl;
+
+ e_dbg("e1000_phy_hw_reset");
+
+ e_dbg("Resetting Phy...\n");
+
+ if (hw->mac_type > e1000_82543) {
+ /* Read the device control register and assert the E1000_CTRL_PHY_RST
+ * bit. Then, take it out of reset.
+ * For e1000 hardware, we delay for 10ms between the assert
+ * and deassert.
+ */
+ ctrl = er32(CTRL);
+ ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
+ E1000_WRITE_FLUSH();
+
+ msleep(10);
+
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
+
+ } else {
+ /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
+ * bit to put the PHY into reset. Then, take it out of reset.
+ */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
+ ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
+ msleep(10);
+ ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
+ }
+ udelay(150);
+
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ /* Configure activity LED after PHY reset */
+ led_ctrl = er32(LEDCTL);
+ led_ctrl &= IGP_ACTIVITY_LED_MASK;
+ led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ ew32(LEDCTL, led_ctrl);
+ }
+
+ /* Wait for FW to finish PHY configuration. */
+ return e1000_get_phy_cfg_done(hw);
+}
+
+/**
+ * e1000_phy_reset - reset the phy to commit settings
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Resets the PHY
+ * Sets bit 15 of the MII Control register
+ */
+s32 e1000_phy_reset(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_phy_reset");
+
+ switch (hw->phy_type) {
+ case e1000_phy_igp:
+ ret_val = e1000_phy_hw_reset(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ default:
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= MII_CR_RESET;
+ ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ udelay(1);
+ break;
+ }
+
+ if (hw->phy_type == e1000_phy_igp)
+ e1000_phy_init_script(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_detect_gig_phy - check the phy type
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Probes the expected PHY address for known PHY IDs
+ */
+static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
+{
+ s32 phy_init_status, ret_val;
+ u16 phy_id_high, phy_id_low;
+ bool match = false;
+
+ e_dbg("e1000_detect_gig_phy");
+
+ if (hw->phy_id != 0)
+ return E1000_SUCCESS;
+
+ /* Read the PHY ID Registers to identify which PHY is onboard. */
+ ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
+ if (ret_val)
+ return ret_val;
+
+ hw->phy_id = (u32) (phy_id_high << 16);
+ udelay(20);
+ ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
+ if (ret_val)
+ return ret_val;
+
+ hw->phy_id |= (u32) (phy_id_low & PHY_REVISION_MASK);
+ hw->phy_revision = (u32) phy_id_low & ~PHY_REVISION_MASK;
+
+ switch (hw->mac_type) {
+ case e1000_82543:
+ if (hw->phy_id == M88E1000_E_PHY_ID)
+ match = true;
+ break;
+ case e1000_82544:
+ if (hw->phy_id == M88E1000_I_PHY_ID)
+ match = true;
+ break;
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ if (hw->phy_id == M88E1011_I_PHY_ID)
+ match = true;
+ break;
+ case e1000_ce4100:
+ if ((hw->phy_id == RTL8211B_PHY_ID) ||
+ (hw->phy_id == RTL8201N_PHY_ID) ||
+ (hw->phy_id == M88E1118_E_PHY_ID))
+ match = true;
+ break;
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ if (hw->phy_id == IGP01E1000_I_PHY_ID)
+ match = true;
+ break;
+ default:
+ e_dbg("Invalid MAC type %d\n", hw->mac_type);
+ return -E1000_ERR_CONFIG;
+ }
+ phy_init_status = e1000_set_phy_type(hw);
+
+ if ((match) && (phy_init_status == E1000_SUCCESS)) {
+ e_dbg("PHY ID 0x%X detected\n", hw->phy_id);
+ return E1000_SUCCESS;
+ }
+ e_dbg("Invalid PHY ID 0x%X\n", hw->phy_id);
+ return -E1000_ERR_PHY;
+}
+
+/**
+ * e1000_phy_reset_dsp - reset DSP
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Resets the PHY's DSP
+ */
+static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ e_dbg("e1000_phy_reset_dsp");
+
+ do {
+ ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
+ if (ret_val)
+ break;
+ ret_val = e1000_write_phy_reg(hw, 30, 0x00c1);
+ if (ret_val)
+ break;
+ ret_val = e1000_write_phy_reg(hw, 30, 0x0000);
+ if (ret_val)
+ break;
+ ret_val = E1000_SUCCESS;
+ } while (0);
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_igp_get_info - get igp specific registers
+ * @hw: Struct containing variables accessed by shared code
+ * @phy_info: PHY information structure
+ *
+ * Get PHY information from various PHY registers for igp PHY only.
+ */
+static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info)
+{
+ s32 ret_val;
+ u16 phy_data, min_length, max_length, average;
+ e1000_rev_polarity polarity;
+
+ e_dbg("e1000_phy_igp_get_info");
+
+ /* The downshift status is checked only once, after link is established,
+ * and it stored in the hw->speed_downgraded parameter. */
+ phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
+
+ /* IGP01E1000 does not need to support it. */
+ phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
+
+ /* IGP01E1000 always correct polarity reversal */
+ phy_info->polarity_correction = e1000_polarity_reversal_enabled;
+
+ /* Check polarity status */
+ ret_val = e1000_check_polarity(hw, &polarity);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->cable_polarity = polarity;
+
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->mdix_mode =
+ (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >>
+ IGP01E1000_PSSR_MDIX_SHIFT);
+
+ if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+ /* Local/Remote Receiver Information are only valid at 1000 Mbps */
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+ SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+ phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+ SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+
+ /* Get cable length */
+ ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
+ if (ret_val)
+ return ret_val;
+
+ /* Translate to old method */
+ average = (max_length + min_length) / 2;
+
+ if (average <= e1000_igp_cable_length_50)
+ phy_info->cable_length = e1000_cable_length_50;
+ else if (average <= e1000_igp_cable_length_80)
+ phy_info->cable_length = e1000_cable_length_50_80;
+ else if (average <= e1000_igp_cable_length_110)
+ phy_info->cable_length = e1000_cable_length_80_110;
+ else if (average <= e1000_igp_cable_length_140)
+ phy_info->cable_length = e1000_cable_length_110_140;
+ else
+ phy_info->cable_length = e1000_cable_length_140;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_m88_get_info - get m88 specific registers
+ * @hw: Struct containing variables accessed by shared code
+ * @phy_info: PHY information structure
+ *
+ * Get PHY information from various PHY registers for m88 PHY only.
+ */
+static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
+ struct e1000_phy_info *phy_info)
+{
+ s32 ret_val;
+ u16 phy_data;
+ e1000_rev_polarity polarity;
+
+ e_dbg("e1000_phy_m88_get_info");
+
+ /* The downshift status is checked only once, after link is established,
+ * and it stored in the hw->speed_downgraded parameter. */
+ phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->extended_10bt_distance =
+ ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >>
+ M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ?
+ e1000_10bt_ext_dist_enable_lower :
+ e1000_10bt_ext_dist_enable_normal;
+
+ phy_info->polarity_correction =
+ ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >>
+ M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ?
+ e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
+
+ /* Check polarity status */
+ ret_val = e1000_check_polarity(hw, &polarity);
+ if (ret_val)
+ return ret_val;
+ phy_info->cable_polarity = polarity;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->mdix_mode =
+ (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >>
+ M88E1000_PSSR_MDIX_SHIFT);
+
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+ /* Cable Length Estimation and Local/Remote Receiver Information
+ * are only valid at 1000 Mbps.
+ */
+ phy_info->cable_length =
+ (e1000_cable_length) ((phy_data &
+ M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+ SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+ phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+ SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_get_info - request phy info
+ * @hw: Struct containing variables accessed by shared code
+ * @phy_info: PHY information structure
+ *
+ * Get PHY information from various PHY registers
+ */
+s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_phy_get_info");
+
+ phy_info->cable_length = e1000_cable_length_undefined;
+ phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
+ phy_info->cable_polarity = e1000_rev_polarity_undefined;
+ phy_info->downshift = e1000_downshift_undefined;
+ phy_info->polarity_correction = e1000_polarity_reversal_undefined;
+ phy_info->mdix_mode = e1000_auto_x_mode_undefined;
+ phy_info->local_rx = e1000_1000t_rx_status_undefined;
+ phy_info->remote_rx = e1000_1000t_rx_status_undefined;
+
+ if (hw->media_type != e1000_media_type_copper) {
+ e_dbg("PHY info is only valid for copper media\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
+ e_dbg("PHY info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ if (hw->phy_type == e1000_phy_igp)
+ return e1000_phy_igp_get_info(hw, phy_info);
+ else if ((hw->phy_type == e1000_phy_8211) ||
+ (hw->phy_type == e1000_phy_8201))
+ return E1000_SUCCESS;
+ else
+ return e1000_phy_m88_get_info(hw, phy_info);
+}
+
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
+{
+ e_dbg("e1000_validate_mdi_settings");
+
+ if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
+ e_dbg("Invalid MDI setting detected\n");
+ hw->mdix = 1;
+ return -E1000_ERR_CONFIG;
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_eeprom_params - initialize sw eeprom vars
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Sets up eeprom variables in the hw struct. Must be called after mac_type
+ * is configured.
+ */
+s32 e1000_init_eeprom_params(struct e1000_hw *hw)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd = er32(EECD);
+ s32 ret_val = E1000_SUCCESS;
+ u16 eeprom_size;
+
+ e_dbg("e1000_init_eeprom_params");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ eeprom->type = e1000_eeprom_microwire;
+ eeprom->word_size = 64;
+ eeprom->opcode_bits = 3;
+ eeprom->address_bits = 6;
+ eeprom->delay_usec = 50;
+ break;
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ eeprom->type = e1000_eeprom_microwire;
+ eeprom->opcode_bits = 3;
+ eeprom->delay_usec = 50;
+ if (eecd & E1000_EECD_SIZE) {
+ eeprom->word_size = 256;
+ eeprom->address_bits = 8;
+ } else {
+ eeprom->word_size = 64;
+ eeprom->address_bits = 6;
+ }
+ break;
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ if (eecd & E1000_EECD_TYPE) {
+ eeprom->type = e1000_eeprom_spi;
+ eeprom->opcode_bits = 8;
+ eeprom->delay_usec = 1;
+ if (eecd & E1000_EECD_ADDR_BITS) {
+ eeprom->page_size = 32;
+ eeprom->address_bits = 16;
+ } else {
+ eeprom->page_size = 8;
+ eeprom->address_bits = 8;
+ }
+ } else {
+ eeprom->type = e1000_eeprom_microwire;
+ eeprom->opcode_bits = 3;
+ eeprom->delay_usec = 50;
+ if (eecd & E1000_EECD_ADDR_BITS) {
+ eeprom->word_size = 256;
+ eeprom->address_bits = 8;
+ } else {
+ eeprom->word_size = 64;
+ eeprom->address_bits = 6;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (eeprom->type == e1000_eeprom_spi) {
+ /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
+ * 32KB (incremented by powers of 2).
+ */
+ /* Set to default value for initial eeprom read. */
+ eeprom->word_size = 64;
+ ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size);
+ if (ret_val)
+ return ret_val;
+ eeprom_size =
+ (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
+ /* 256B eeprom size was not supported in earlier hardware, so we
+ * bump eeprom_size up one to ensure that "1" (which maps to 256B)
+ * is never the result used in the shifting logic below. */
+ if (eeprom_size)
+ eeprom_size++;
+
+ eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT);
+ }
+ return ret_val;
+}
+
+/**
+ * e1000_raise_ee_clk - Raises the EEPROM's clock input.
+ * @hw: Struct containing variables accessed by shared code
+ * @eecd: EECD's current value
+ */
+static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ /* Raise the clock input to the EEPROM (by setting the SK bit), and then
+ * wait <delay> microseconds.
+ */
+ *eecd = *eecd | E1000_EECD_SK;
+ ew32(EECD, *eecd);
+ E1000_WRITE_FLUSH();
+ udelay(hw->eeprom.delay_usec);
+}
+
+/**
+ * e1000_lower_ee_clk - Lowers the EEPROM's clock input.
+ * @hw: Struct containing variables accessed by shared code
+ * @eecd: EECD's current value
+ */
+static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
+ * wait 50 microseconds.
+ */
+ *eecd = *eecd & ~E1000_EECD_SK;
+ ew32(EECD, *eecd);
+ E1000_WRITE_FLUSH();
+ udelay(hw->eeprom.delay_usec);
+}
+
+/**
+ * e1000_shift_out_ee_bits - Shift data bits out to the EEPROM.
+ * @hw: Struct containing variables accessed by shared code
+ * @data: data to send to the EEPROM
+ * @count: number of bits to shift out
+ */
+static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd;
+ u32 mask;
+
+ /* We need to shift "count" bits out to the EEPROM. So, value in the
+ * "data" parameter will be shifted out to the EEPROM one bit at a time.
+ * In order to do this, "data" must be broken down into bits.
+ */
+ mask = 0x01 << (count - 1);
+ eecd = er32(EECD);
+ if (eeprom->type == e1000_eeprom_microwire) {
+ eecd &= ~E1000_EECD_DO;
+ } else if (eeprom->type == e1000_eeprom_spi) {
+ eecd |= E1000_EECD_DO;
+ }
+ do {
+ /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
+ * and then raising and then lowering the clock (the SK bit controls
+ * the clock input to the EEPROM). A "0" is shifted out to the EEPROM
+ * by setting "DI" to "0" and then raising and then lowering the clock.
+ */
+ eecd &= ~E1000_EECD_DI;
+
+ if (data & mask)
+ eecd |= E1000_EECD_DI;
+
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+
+ udelay(eeprom->delay_usec);
+
+ e1000_raise_ee_clk(hw, &eecd);
+ e1000_lower_ee_clk(hw, &eecd);
+
+ mask = mask >> 1;
+
+ } while (mask);
+
+ /* We leave the "DI" bit set to "0" when we leave this routine. */
+ eecd &= ~E1000_EECD_DI;
+ ew32(EECD, eecd);
+}
+
+/**
+ * e1000_shift_in_ee_bits - Shift data bits in from the EEPROM
+ * @hw: Struct containing variables accessed by shared code
+ * @count: number of bits to shift in
+ */
+static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count)
+{
+ u32 eecd;
+ u32 i;
+ u16 data;
+
+ /* In order to read a register from the EEPROM, we need to shift 'count'
+ * bits in from the EEPROM. Bits are "shifted in" by raising the clock
+ * input to the EEPROM (setting the SK bit), and then reading the value of
+ * the "DO" bit. During this "shifting in" process the "DI" bit should
+ * always be clear.
+ */
+
+ eecd = er32(EECD);
+
+ eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+ data = 0;
+
+ for (i = 0; i < count; i++) {
+ data = data << 1;
+ e1000_raise_ee_clk(hw, &eecd);
+
+ eecd = er32(EECD);
+
+ eecd &= ~(E1000_EECD_DI);
+ if (eecd & E1000_EECD_DO)
+ data |= 1;
+
+ e1000_lower_ee_clk(hw, &eecd);
+ }
+
+ return data;
+}
+
+/**
+ * e1000_acquire_eeprom - Prepares EEPROM for access
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
+ * function should be called before issuing a command to the EEPROM.
+ */
+static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd, i = 0;
+
+ e_dbg("e1000_acquire_eeprom");
+
+ eecd = er32(EECD);
+
+ /* Request EEPROM Access */
+ if (hw->mac_type > e1000_82544) {
+ eecd |= E1000_EECD_REQ;
+ ew32(EECD, eecd);
+ eecd = er32(EECD);
+ while ((!(eecd & E1000_EECD_GNT)) &&
+ (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
+ i++;
+ udelay(5);
+ eecd = er32(EECD);
+ }
+ if (!(eecd & E1000_EECD_GNT)) {
+ eecd &= ~E1000_EECD_REQ;
+ ew32(EECD, eecd);
+ e_dbg("Could not acquire EEPROM grant\n");
+ return -E1000_ERR_EEPROM;
+ }
+ }
+
+ /* Setup EEPROM for Read/Write */
+
+ if (eeprom->type == e1000_eeprom_microwire) {
+ /* Clear SK and DI */
+ eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
+ ew32(EECD, eecd);
+
+ /* Set CS */
+ eecd |= E1000_EECD_CS;
+ ew32(EECD, eecd);
+ } else if (eeprom->type == e1000_eeprom_spi) {
+ /* Clear SK and CS */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(1);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_standby_eeprom - Returns EEPROM to a "standby" state
+ * @hw: Struct containing variables accessed by shared code
+ */
+static void e1000_standby_eeprom(struct e1000_hw *hw)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd;
+
+ eecd = er32(EECD);
+
+ if (eeprom->type == e1000_eeprom_microwire) {
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+
+ /* Clock high */
+ eecd |= E1000_EECD_SK;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+
+ /* Select EEPROM */
+ eecd |= E1000_EECD_CS;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+
+ /* Clock low */
+ eecd &= ~E1000_EECD_SK;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+ } else if (eeprom->type == e1000_eeprom_spi) {
+ /* Toggle CS to flush commands */
+ eecd |= E1000_EECD_CS;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+ eecd &= ~E1000_EECD_CS;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+ }
+}
+
+/**
+ * e1000_release_eeprom - drop chip select
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Terminates a command by inverting the EEPROM's chip select pin
+ */
+static void e1000_release_eeprom(struct e1000_hw *hw)
+{
+ u32 eecd;
+
+ e_dbg("e1000_release_eeprom");
+
+ eecd = er32(EECD);
+
+ if (hw->eeprom.type == e1000_eeprom_spi) {
+ eecd |= E1000_EECD_CS; /* Pull CS high */
+ eecd &= ~E1000_EECD_SK; /* Lower SCK */
+
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+
+ udelay(hw->eeprom.delay_usec);
+ } else if (hw->eeprom.type == e1000_eeprom_microwire) {
+ /* cleanup eeprom */
+
+ /* CS on Microwire is active-high */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
+
+ ew32(EECD, eecd);
+
+ /* Rising edge of clock */
+ eecd |= E1000_EECD_SK;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(hw->eeprom.delay_usec);
+
+ /* Falling edge of clock */
+ eecd &= ~E1000_EECD_SK;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(hw->eeprom.delay_usec);
+ }
+
+ /* Stop requesting EEPROM access */
+ if (hw->mac_type > e1000_82544) {
+ eecd &= ~E1000_EECD_REQ;
+ ew32(EECD, eecd);
+ }
+}
+
+/**
+ * e1000_spi_eeprom_ready - Reads a 16 bit word from the EEPROM.
+ * @hw: Struct containing variables accessed by shared code
+ */
+static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
+{
+ u16 retry_count = 0;
+ u8 spi_stat_reg;
+
+ e_dbg("e1000_spi_eeprom_ready");
+
+ /* Read "Status Register" repeatedly until the LSB is cleared. The
+ * EEPROM will signal that the command has been completed by clearing
+ * bit 0 of the internal status register. If it's not cleared within
+ * 5 milliseconds, then error out.
+ */
+ retry_count = 0;
+ do {
+ e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI,
+ hw->eeprom.opcode_bits);
+ spi_stat_reg = (u8) e1000_shift_in_ee_bits(hw, 8);
+ if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI))
+ break;
+
+ udelay(5);
+ retry_count += 5;
+
+ e1000_standby_eeprom(hw);
+ } while (retry_count < EEPROM_MAX_RETRY_SPI);
+
+ /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and
+ * only 0-5mSec on 5V devices)
+ */
+ if (retry_count >= EEPROM_MAX_RETRY_SPI) {
+ e_dbg("SPI EEPROM Status error\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_eeprom - Reads a 16 bit word from the EEPROM.
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ * @words: number of words to read
+ */
+s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ s32 ret;
+ spin_lock(&e1000_eeprom_lock);
+ ret = e1000_do_read_eeprom(hw, offset, words, data);
+ spin_unlock(&e1000_eeprom_lock);
+ return ret;
+}
+
+static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 i = 0;
+
+ e_dbg("e1000_read_eeprom");
+
+ if (hw->mac_type == e1000_ce4100) {
+ GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words,
+ data);
+ return E1000_SUCCESS;
+ }
+
+ /* If eeprom is not yet detected, do so now */
+ if (eeprom->word_size == 0)
+ e1000_init_eeprom_params(hw);
+
+ /* A check for invalid values: offset too large, too many words, and not
+ * enough words.
+ */
+ if ((offset >= eeprom->word_size)
+ || (words > eeprom->word_size - offset) || (words == 0)) {
+ e_dbg("\"words\" parameter out of bounds. Words = %d,"
+ "size = %d\n", offset, eeprom->word_size);
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* EEPROM's that don't use EERD to read require us to bit-bang the SPI
+ * directly. In this case, we need to acquire the EEPROM so that
+ * FW or other port software does not interrupt.
+ */
+ /* Prepare the EEPROM for bit-bang reading */
+ if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+ return -E1000_ERR_EEPROM;
+
+ /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have
+ * acquired the EEPROM at this point, so any returns should release it */
+ if (eeprom->type == e1000_eeprom_spi) {
+ u16 word_in;
+ u8 read_opcode = EEPROM_READ_OPCODE_SPI;
+
+ if (e1000_spi_eeprom_ready(hw)) {
+ e1000_release_eeprom(hw);
+ return -E1000_ERR_EEPROM;
+ }
+
+ e1000_standby_eeprom(hw);
+
+ /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ if ((eeprom->address_bits == 8) && (offset >= 128))
+ read_opcode |= EEPROM_A8_OPCODE_SPI;
+
+ /* Send the READ command (opcode + addr) */
+ e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits);
+ e1000_shift_out_ee_bits(hw, (u16) (offset * 2),
+ eeprom->address_bits);
+
+ /* Read the data. The address of the eeprom internally increments with
+ * each byte (spi) being read, saving on the overhead of eeprom setup
+ * and tear-down. The address counter will roll over if reading beyond
+ * the size of the eeprom, thus allowing the entire memory to be read
+ * starting from any offset. */
+ for (i = 0; i < words; i++) {
+ word_in = e1000_shift_in_ee_bits(hw, 16);
+ data[i] = (word_in >> 8) | (word_in << 8);
+ }
+ } else if (eeprom->type == e1000_eeprom_microwire) {
+ for (i = 0; i < words; i++) {
+ /* Send the READ command (opcode + addr) */
+ e1000_shift_out_ee_bits(hw,
+ EEPROM_READ_OPCODE_MICROWIRE,
+ eeprom->opcode_bits);
+ e1000_shift_out_ee_bits(hw, (u16) (offset + i),
+ eeprom->address_bits);
+
+ /* Read the data. For microwire, each word requires the overhead
+ * of eeprom setup and tear-down. */
+ data[i] = e1000_shift_in_ee_bits(hw, 16);
+ e1000_standby_eeprom(hw);
+ }
+ }
+
+ /* End this read operation */
+ e1000_release_eeprom(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_validate_eeprom_checksum - Verifies that the EEPROM has a valid checksum
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Reads the first 64 16 bit words of the EEPROM and sums the values read.
+ * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
+ * valid.
+ */
+s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
+{
+ u16 checksum = 0;
+ u16 i, eeprom_data;
+
+ e_dbg("e1000_validate_eeprom_checksum");
+
+ for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+ if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+ e_dbg("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ checksum += eeprom_data;
+ }
+
+#ifdef CONFIG_PARISC
+ /* This is a signature and not a checksum on HP c8000 */
+ if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6))
+ return E1000_SUCCESS;
+
+#endif
+ if (checksum == (u16) EEPROM_SUM)
+ return E1000_SUCCESS;
+ else {
+ e_dbg("EEPROM Checksum Invalid\n");
+ return -E1000_ERR_EEPROM;
+ }
+}
+
+/**
+ * e1000_update_eeprom_checksum - Calculates/writes the EEPROM checksum
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
+ * Writes the difference to word offset 63 of the EEPROM.
+ */
+s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
+{
+ u16 checksum = 0;
+ u16 i, eeprom_data;
+
+ e_dbg("e1000_update_eeprom_checksum");
+
+ for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
+ if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+ e_dbg("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ checksum += eeprom_data;
+ }
+ checksum = (u16) EEPROM_SUM - checksum;
+ if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
+ e_dbg("EEPROM Write Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_eeprom - write words to the different EEPROM types.
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word to be written to the EEPROM
+ *
+ * If e1000_update_eeprom_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ */
+s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ s32 ret;
+ spin_lock(&e1000_eeprom_lock);
+ ret = e1000_do_write_eeprom(hw, offset, words, data);
+ spin_unlock(&e1000_eeprom_lock);
+ return ret;
+}
+
+static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ s32 status = 0;
+
+ e_dbg("e1000_write_eeprom");
+
+ if (hw->mac_type == e1000_ce4100) {
+ GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words,
+ data);
+ return E1000_SUCCESS;
+ }
+
+ /* If eeprom is not yet detected, do so now */
+ if (eeprom->word_size == 0)
+ e1000_init_eeprom_params(hw);
+
+ /* A check for invalid values: offset too large, too many words, and not
+ * enough words.
+ */
+ if ((offset >= eeprom->word_size)
+ || (words > eeprom->word_size - offset) || (words == 0)) {
+ e_dbg("\"words\" parameter out of bounds\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* Prepare the EEPROM for writing */
+ if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+ return -E1000_ERR_EEPROM;
+
+ if (eeprom->type == e1000_eeprom_microwire) {
+ status = e1000_write_eeprom_microwire(hw, offset, words, data);
+ } else {
+ status = e1000_write_eeprom_spi(hw, offset, words, data);
+ msleep(10);
+ }
+
+ /* Done with writing */
+ e1000_release_eeprom(hw);
+
+ return status;
+}
+
+/**
+ * e1000_write_eeprom_spi - Writes a 16 bit word to a given offset in an SPI EEPROM.
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: pointer to array of 8 bit words to be written to the EEPROM
+ */
+static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u16 widx = 0;
+
+ e_dbg("e1000_write_eeprom_spi");
+
+ while (widx < words) {
+ u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
+
+ if (e1000_spi_eeprom_ready(hw))
+ return -E1000_ERR_EEPROM;
+
+ e1000_standby_eeprom(hw);
+
+ /* Send the WRITE ENABLE command (8 bit opcode ) */
+ e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI,
+ eeprom->opcode_bits);
+
+ e1000_standby_eeprom(hw);
+
+ /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ if ((eeprom->address_bits == 8) && (offset >= 128))
+ write_opcode |= EEPROM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+ e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits);
+
+ e1000_shift_out_ee_bits(hw, (u16) ((offset + widx) * 2),
+ eeprom->address_bits);
+
+ /* Send the data */
+
+ /* Loop to allow for up to whole page write (32 bytes) of eeprom */
+ while (widx < words) {
+ u16 word_out = data[widx];
+ word_out = (word_out >> 8) | (word_out << 8);
+ e1000_shift_out_ee_bits(hw, word_out, 16);
+ widx++;
+
+ /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE
+ * operation, while the smaller eeproms are capable of an 8-byte
+ * PAGE WRITE operation. Break the inner loop to pass new address
+ */
+ if ((((offset + widx) * 2) % eeprom->page_size) == 0) {
+ e1000_standby_eeprom(hw);
+ break;
+ }
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_eeprom_microwire - Writes a 16 bit word to a given offset in a Microwire EEPROM.
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: pointer to array of 8 bit words to be written to the EEPROM
+ */
+static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd;
+ u16 words_written = 0;
+ u16 i = 0;
+
+ e_dbg("e1000_write_eeprom_microwire");
+
+ /* Send the write enable command to the EEPROM (3-bit opcode plus
+ * 6/8-bit dummy address beginning with 11). It's less work to include
+ * the 11 of the dummy address as part of the opcode than it is to shift
+ * it over the correct number of bits for the address. This puts the
+ * EEPROM into write/erase mode.
+ */
+ e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE,
+ (u16) (eeprom->opcode_bits + 2));
+
+ e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2));
+
+ /* Prepare the EEPROM */
+ e1000_standby_eeprom(hw);
+
+ while (words_written < words) {
+ /* Send the Write command (3-bit opcode + addr) */
+ e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE,
+ eeprom->opcode_bits);
+
+ e1000_shift_out_ee_bits(hw, (u16) (offset + words_written),
+ eeprom->address_bits);
+
+ /* Send the data */
+ e1000_shift_out_ee_bits(hw, data[words_written], 16);
+
+ /* Toggle the CS line. This in effect tells the EEPROM to execute
+ * the previous command.
+ */
+ e1000_standby_eeprom(hw);
+
+ /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will
+ * signal that the command has been completed by raising the DO signal.
+ * If DO does not go high in 10 milliseconds, then error out.
+ */
+ for (i = 0; i < 200; i++) {
+ eecd = er32(EECD);
+ if (eecd & E1000_EECD_DO)
+ break;
+ udelay(50);
+ }
+ if (i == 200) {
+ e_dbg("EEPROM Write did not complete\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* Recover from write */
+ e1000_standby_eeprom(hw);
+
+ words_written++;
+ }
+
+ /* Send the write disable command to the EEPROM (3-bit opcode plus
+ * 6/8-bit dummy address beginning with 10). It's less work to include
+ * the 10 of the dummy address as part of the opcode than it is to shift
+ * it over the correct number of bits for the address. This takes the
+ * EEPROM out of write/erase mode.
+ */
+ e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE,
+ (u16) (eeprom->opcode_bits + 2));
+
+ e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2));
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_mac_addr - read the adapters MAC from eeprom
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the
+ * second function of dual function devices
+ */
+s32 e1000_read_mac_addr(struct e1000_hw *hw)
+{
+ u16 offset;
+ u16 eeprom_data, i;
+
+ e_dbg("e1000_read_mac_addr");
+
+ for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
+ offset = i >> 1;
+ if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
+ e_dbg("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF);
+ hw->perm_mac_addr[i + 1] = (u8) (eeprom_data >> 8);
+ }
+
+ switch (hw->mac_type) {
+ default:
+ break;
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ if (er32(STATUS) & E1000_STATUS_FUNC_1)
+ hw->perm_mac_addr[5] ^= 0x01;
+ break;
+ }
+
+ for (i = 0; i < NODE_ADDRESS_SIZE; i++)
+ hw->mac_addr[i] = hw->perm_mac_addr[i];
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_rx_addrs - Initializes receive address filters.
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive address registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ */
+static void e1000_init_rx_addrs(struct e1000_hw *hw)
+{
+ u32 i;
+ u32 rar_num;
+
+ e_dbg("e1000_init_rx_addrs");
+
+ /* Setup the receive address. */
+ e_dbg("Programming MAC Address into RAR[0]\n");
+
+ e1000_rar_set(hw, hw->mac_addr, 0);
+
+ rar_num = E1000_RAR_ENTRIES;
+
+ /* Zero out the other 15 receive addresses. */
+ e_dbg("Clearing RAR[1-15]\n");
+ for (i = 1; i < rar_num; i++) {
+ E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+ E1000_WRITE_FLUSH();
+ }
+}
+
+/**
+ * e1000_hash_mc_addr - Hashes an address to determine its location in the multicast table
+ * @hw: Struct containing variables accessed by shared code
+ * @mc_addr: the multicast address to hash
+ */
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+ u32 hash_value = 0;
+
+ /* The portion of the address that is used for the hash table is
+ * determined by the mc_filter_type setting.
+ */
+ switch (hw->mc_filter_type) {
+ /* [0] [1] [2] [3] [4] [5]
+ * 01 AA 00 12 34 56
+ * LSB MSB
+ */
+ case 0:
+ /* [47:36] i.e. 0x563 for above example address */
+ hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
+ break;
+ case 1:
+ /* [46:35] i.e. 0xAC6 for above example address */
+ hash_value = ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5));
+ break;
+ case 2:
+ /* [45:34] i.e. 0x5D8 for above example address */
+ hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
+ break;
+ case 3:
+ /* [43:32] i.e. 0x634 for above example address */
+ hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8));
+ break;
+ }
+
+ hash_value &= 0xFFF;
+ return hash_value;
+}
+
+/**
+ * e1000_rar_set - Puts an ethernet address into a receive address register.
+ * @hw: Struct containing variables accessed by shared code
+ * @addr: Address to put into receive address register
+ * @index: Receive address register to write
+ */
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+ /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx
+ * unit hang.
+ *
+ * Description:
+ * If there are any Rx frames queued up or otherwise present in the HW
+ * before RSS is enabled, and then we enable RSS, the HW Rx unit will
+ * hang. To work around this issue, we have to disable receives and
+ * flush out all Rx frames before we enable RSS. To do so, we modify we
+ * redirect all Rx traffic to manageability and then reset the HW.
+ * This flushes away Rx frames, and (since the redirections to
+ * manageability persists across resets) keeps new ones from coming in
+ * while we work. Then, we clear the Address Valid AV bit for all MAC
+ * addresses and undo the re-direction to manageability.
+ * Now, frames are coming in again, but the MAC won't accept them, so
+ * far so good. We now proceed to initialize RSS (if necessary) and
+ * configure the Rx unit. Last, we re-enable the AV bits and continue
+ * on our merry way.
+ */
+ switch (hw->mac_type) {
+ default:
+ /* Indicate to hardware the Address is Valid. */
+ rar_high |= E1000_RAH_AV;
+ break;
+ }
+
+ E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
+ E1000_WRITE_FLUSH();
+}
+
+/**
+ * e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table.
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: Offset in VLAN filer table to write
+ * @value: Value to write into VLAN filter table
+ */
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ u32 temp;
+
+ if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
+ temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1));
+ E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp);
+ E1000_WRITE_FLUSH();
+ } else {
+ E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+ E1000_WRITE_FLUSH();
+ }
+}
+
+/**
+ * e1000_clear_vfta - Clears the VLAN filer table
+ * @hw: Struct containing variables accessed by shared code
+ */
+static void e1000_clear_vfta(struct e1000_hw *hw)
+{
+ u32 offset;
+ u32 vfta_value = 0;
+ u32 vfta_offset = 0;
+ u32 vfta_bit_in_reg = 0;
+
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ /* If the offset we want to clear is the same offset of the
+ * manageability VLAN ID, then clear all bits except that of the
+ * manageability unit */
+ vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
+ E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
+ E1000_WRITE_FLUSH();
+ }
+}
+
+static s32 e1000_id_led_init(struct e1000_hw *hw)
+{
+ u32 ledctl;
+ const u32 ledctl_mask = 0x000000FF;
+ const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+ const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+ u16 eeprom_data, i, temp;
+ const u16 led_mask = 0x0F;
+
+ e_dbg("e1000_id_led_init");
+
+ if (hw->mac_type < e1000_82540) {
+ /* Nothing to do */
+ return E1000_SUCCESS;
+ }
+
+ ledctl = er32(LEDCTL);
+ hw->ledctl_default = ledctl;
+ hw->ledctl_mode1 = hw->ledctl_default;
+ hw->ledctl_mode2 = hw->ledctl_default;
+
+ if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
+ e_dbg("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ if ((eeprom_data == ID_LED_RESERVED_0000) ||
+ (eeprom_data == ID_LED_RESERVED_FFFF)) {
+ eeprom_data = ID_LED_DEFAULT;
+ }
+
+ for (i = 0; i < 4; i++) {
+ temp = (eeprom_data >> (i << 2)) & led_mask;
+ switch (temp) {
+ case ID_LED_ON1_DEF2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_ON1_OFF2:
+ hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode1 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_OFF1_DEF2:
+ case ID_LED_OFF1_ON2:
+ case ID_LED_OFF1_OFF2:
+ hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode1 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ switch (temp) {
+ case ID_LED_DEF1_ON2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_OFF1_ON2:
+ hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode2 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_DEF1_OFF2:
+ case ID_LED_ON1_OFF2:
+ case ID_LED_OFF1_OFF2:
+ hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode2 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_setup_led
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Prepares SW controlable LED for use and saves the current state of the LED.
+ */
+s32 e1000_setup_led(struct e1000_hw *hw)
+{
+ u32 ledctl;
+ s32 ret_val = E1000_SUCCESS;
+
+ e_dbg("e1000_setup_led");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ /* No setup necessary */
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ /* Turn off PHY Smart Power Down (if enabled) */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ &hw->phy_spd_default);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ (u16) (hw->phy_spd_default &
+ ~IGP01E1000_GMII_SPD));
+ if (ret_val)
+ return ret_val;
+ /* Fall Through */
+ default:
+ if (hw->media_type == e1000_media_type_fiber) {
+ ledctl = er32(LEDCTL);
+ /* Save current LEDCTL settings */
+ hw->ledctl_default = ledctl;
+ /* Turn off LED0 */
+ ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
+ E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_LED0_MODE_MASK);
+ ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+ E1000_LEDCTL_LED0_MODE_SHIFT);
+ ew32(LEDCTL, ledctl);
+ } else if (hw->media_type == e1000_media_type_copper)
+ ew32(LEDCTL, hw->ledctl_mode1);
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_cleanup_led - Restores the saved state of the SW controlable LED.
+ * @hw: Struct containing variables accessed by shared code
+ */
+s32 e1000_cleanup_led(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ e_dbg("e1000_cleanup_led");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ /* No cleanup necessary */
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ /* Turn on PHY Smart Power Down (if previously enabled) */
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ hw->phy_spd_default);
+ if (ret_val)
+ return ret_val;
+ /* Fall Through */
+ default:
+ /* Restore LEDCTL settings */
+ ew32(LEDCTL, hw->ledctl_default);
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_on - Turns on the software controllable LED
+ * @hw: Struct containing variables accessed by shared code
+ */
+s32 e1000_led_on(struct e1000_hw *hw)
+{
+ u32 ctrl = er32(CTRL);
+
+ e_dbg("e1000_led_on");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ /* Set SW Defineable Pin 0 to turn on the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ break;
+ case e1000_82544:
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* Set SW Defineable Pin 0 to turn on the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else {
+ /* Clear SW Defineable Pin 0 to turn on the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ }
+ break;
+ default:
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* Clear SW Defineable Pin 0 to turn on the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else if (hw->media_type == e1000_media_type_copper) {
+ ew32(LEDCTL, hw->ledctl_mode2);
+ return E1000_SUCCESS;
+ }
+ break;
+ }
+
+ ew32(CTRL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_off - Turns off the software controllable LED
+ * @hw: Struct containing variables accessed by shared code
+ */
+s32 e1000_led_off(struct e1000_hw *hw)
+{
+ u32 ctrl = er32(CTRL);
+
+ e_dbg("e1000_led_off");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ /* Clear SW Defineable Pin 0 to turn off the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ break;
+ case e1000_82544:
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* Clear SW Defineable Pin 0 to turn off the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else {
+ /* Set SW Defineable Pin 0 to turn off the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ }
+ break;
+ default:
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* Set SW Defineable Pin 0 to turn off the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else if (hw->media_type == e1000_media_type_copper) {
+ ew32(LEDCTL, hw->ledctl_mode1);
+ return E1000_SUCCESS;
+ }
+ break;
+ }
+
+ ew32(CTRL, ctrl);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_clear_hw_cntrs - Clears all hardware statistics counters.
+ * @hw: Struct containing variables accessed by shared code
+ */
+static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
+{
+ volatile u32 temp;
+
+ temp = er32(CRCERRS);
+ temp = er32(SYMERRS);
+ temp = er32(MPC);
+ temp = er32(SCC);
+ temp = er32(ECOL);
+ temp = er32(MCC);
+ temp = er32(LATECOL);
+ temp = er32(COLC);
+ temp = er32(DC);
+ temp = er32(SEC);
+ temp = er32(RLEC);
+ temp = er32(XONRXC);
+ temp = er32(XONTXC);
+ temp = er32(XOFFRXC);
+ temp = er32(XOFFTXC);
+ temp = er32(FCRUC);
+
+ temp = er32(PRC64);
+ temp = er32(PRC127);
+ temp = er32(PRC255);
+ temp = er32(PRC511);
+ temp = er32(PRC1023);
+ temp = er32(PRC1522);
+
+ temp = er32(GPRC);
+ temp = er32(BPRC);
+ temp = er32(MPRC);
+ temp = er32(GPTC);
+ temp = er32(GORCL);
+ temp = er32(GORCH);
+ temp = er32(GOTCL);
+ temp = er32(GOTCH);
+ temp = er32(RNBC);
+ temp = er32(RUC);
+ temp = er32(RFC);
+ temp = er32(ROC);
+ temp = er32(RJC);
+ temp = er32(TORL);
+ temp = er32(TORH);
+ temp = er32(TOTL);
+ temp = er32(TOTH);
+ temp = er32(TPR);
+ temp = er32(TPT);
+
+ temp = er32(PTC64);
+ temp = er32(PTC127);
+ temp = er32(PTC255);
+ temp = er32(PTC511);
+ temp = er32(PTC1023);
+ temp = er32(PTC1522);
+
+ temp = er32(MPTC);
+ temp = er32(BPTC);
+
+ if (hw->mac_type < e1000_82543)
+ return;
+
+ temp = er32(ALGNERRC);
+ temp = er32(RXERRC);
+ temp = er32(TNCRS);
+ temp = er32(CEXTERR);
+ temp = er32(TSCTC);
+ temp = er32(TSCTFC);
+
+ if (hw->mac_type <= e1000_82544)
+ return;
+
+ temp = er32(MGTPRC);
+ temp = er32(MGTPDC);
+ temp = er32(MGTPTC);
+}
+
+/**
+ * e1000_reset_adaptive - Resets Adaptive IFS to its default state.
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Call this after e1000_init_hw. You may override the IFS defaults by setting
+ * hw->ifs_params_forced to true. However, you must initialize hw->
+ * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio
+ * before calling this function.
+ */
+void e1000_reset_adaptive(struct e1000_hw *hw)
+{
+ e_dbg("e1000_reset_adaptive");
+
+ if (hw->adaptive_ifs) {
+ if (!hw->ifs_params_forced) {
+ hw->current_ifs_val = 0;
+ hw->ifs_min_val = IFS_MIN;
+ hw->ifs_max_val = IFS_MAX;
+ hw->ifs_step_size = IFS_STEP;
+ hw->ifs_ratio = IFS_RATIO;
+ }
+ hw->in_ifs_mode = false;
+ ew32(AIT, 0);
+ } else {
+ e_dbg("Not in Adaptive IFS mode!\n");
+ }
+}
+
+/**
+ * e1000_update_adaptive - update adaptive IFS
+ * @hw: Struct containing variables accessed by shared code
+ * @tx_packets: Number of transmits since last callback
+ * @total_collisions: Number of collisions since last callback
+ *
+ * Called during the callback/watchdog routine to update IFS value based on
+ * the ratio of transmits to collisions.
+ */
+void e1000_update_adaptive(struct e1000_hw *hw)
+{
+ e_dbg("e1000_update_adaptive");
+
+ if (hw->adaptive_ifs) {
+ if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) {
+ if (hw->tx_packet_delta > MIN_NUM_XMITS) {
+ hw->in_ifs_mode = true;
+ if (hw->current_ifs_val < hw->ifs_max_val) {
+ if (hw->current_ifs_val == 0)
+ hw->current_ifs_val =
+ hw->ifs_min_val;
+ else
+ hw->current_ifs_val +=
+ hw->ifs_step_size;
+ ew32(AIT, hw->current_ifs_val);
+ }
+ }
+ } else {
+ if (hw->in_ifs_mode
+ && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
+ hw->current_ifs_val = 0;
+ hw->in_ifs_mode = false;
+ ew32(AIT, 0);
+ }
+ }
+ } else {
+ e_dbg("Not in Adaptive IFS mode!\n");
+ }
+}
+
+/**
+ * e1000_tbi_adjust_stats
+ * @hw: Struct containing variables accessed by shared code
+ * @frame_len: The length of the frame in question
+ * @mac_addr: The Ethernet destination address of the frame in question
+ *
+ * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
+ */
+void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
+ u32 frame_len, u8 *mac_addr)
+{
+ u64 carry_bit;
+
+ /* First adjust the frame length. */
+ frame_len--;
+ /* We need to adjust the statistics counters, since the hardware
+ * counters overcount this packet as a CRC error and undercount
+ * the packet as a good packet
+ */
+ /* This packet should not be counted as a CRC error. */
+ stats->crcerrs--;
+ /* This packet does count as a Good Packet Received. */
+ stats->gprc++;
+
+ /* Adjust the Good Octets received counters */
+ carry_bit = 0x80000000 & stats->gorcl;
+ stats->gorcl += frame_len;
+ /* If the high bit of Gorcl (the low 32 bits of the Good Octets
+ * Received Count) was one before the addition,
+ * AND it is zero after, then we lost the carry out,
+ * need to add one to Gorch (Good Octets Received Count High).
+ * This could be simplified if all environments supported
+ * 64-bit integers.
+ */
+ if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
+ stats->gorch++;
+ /* Is this a broadcast or multicast? Check broadcast first,
+ * since the test for a multicast frame will test positive on
+ * a broadcast frame.
+ */
+ if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff))
+ /* Broadcast packet */
+ stats->bprc++;
+ else if (*mac_addr & 0x01)
+ /* Multicast packet */
+ stats->mprc++;
+
+ if (frame_len == hw->max_frame_size) {
+ /* In this case, the hardware has overcounted the number of
+ * oversize frames.
+ */
+ if (stats->roc > 0)
+ stats->roc--;
+ }
+
+ /* Adjust the bin counters when the extra byte put the frame in the
+ * wrong bin. Remember that the frame_len was adjusted above.
+ */
+ if (frame_len == 64) {
+ stats->prc64++;
+ stats->prc127--;
+ } else if (frame_len == 127) {
+ stats->prc127++;
+ stats->prc255--;
+ } else if (frame_len == 255) {
+ stats->prc255++;
+ stats->prc511--;
+ } else if (frame_len == 511) {
+ stats->prc511++;
+ stats->prc1023--;
+ } else if (frame_len == 1023) {
+ stats->prc1023++;
+ stats->prc1522--;
+ } else if (frame_len == 1522) {
+ stats->prc1522++;
+ }
+}
+
+/**
+ * e1000_get_bus_info
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Gets the current PCI bus type, speed, and width of the hardware
+ */
+void e1000_get_bus_info(struct e1000_hw *hw)
+{
+ u32 status;
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ hw->bus_type = e1000_bus_type_pci;
+ hw->bus_speed = e1000_bus_speed_unknown;
+ hw->bus_width = e1000_bus_width_unknown;
+ break;
+ default:
+ status = er32(STATUS);
+ hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ?
+ e1000_bus_type_pcix : e1000_bus_type_pci;
+
+ if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) {
+ hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ?
+ e1000_bus_speed_66 : e1000_bus_speed_120;
+ } else if (hw->bus_type == e1000_bus_type_pci) {
+ hw->bus_speed = (status & E1000_STATUS_PCI66) ?
+ e1000_bus_speed_66 : e1000_bus_speed_33;
+ } else {
+ switch (status & E1000_STATUS_PCIX_SPEED) {
+ case E1000_STATUS_PCIX_SPEED_66:
+ hw->bus_speed = e1000_bus_speed_66;
+ break;
+ case E1000_STATUS_PCIX_SPEED_100:
+ hw->bus_speed = e1000_bus_speed_100;
+ break;
+ case E1000_STATUS_PCIX_SPEED_133:
+ hw->bus_speed = e1000_bus_speed_133;
+ break;
+ default:
+ hw->bus_speed = e1000_bus_speed_reserved;
+ break;
+ }
+ }
+ hw->bus_width = (status & E1000_STATUS_BUS64) ?
+ e1000_bus_width_64 : e1000_bus_width_32;
+ break;
+ }
+}
+
+/**
+ * e1000_write_reg_io
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: offset to write to
+ * @value: value to write
+ *
+ * Writes a value to one of the devices registers using port I/O (as opposed to
+ * memory mapped I/O). Only 82544 and newer devices support port I/O.
+ */
+static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ unsigned long io_addr = hw->io_base;
+ unsigned long io_data = hw->io_base + 4;
+
+ e1000_io_write(hw, io_addr, offset);
+ e1000_io_write(hw, io_data, value);
+}
+
+/**
+ * e1000_get_cable_length - Estimates the cable length.
+ * @hw: Struct containing variables accessed by shared code
+ * @min_length: The estimated minimum length
+ * @max_length: The estimated maximum length
+ *
+ * returns: - E1000_ERR_XXX
+ * E1000_SUCCESS
+ *
+ * This function always returns a ranged length (minimum & maximum).
+ * So for M88 phy's, this function interprets the one value returned from the
+ * register to the minimum and maximum range.
+ * For IGP phy's, the function calculates the range by the AGC registers.
+ */
+static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
+ u16 *max_length)
+{
+ s32 ret_val;
+ u16 agc_value = 0;
+ u16 i, phy_data;
+ u16 cable_length;
+
+ e_dbg("e1000_get_cable_length");
+
+ *min_length = *max_length = 0;
+
+ /* Use old method for Phy older than IGP */
+ if (hw->phy_type == e1000_phy_m88) {
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+
+ /* Convert the enum value to ranged values */
+ switch (cable_length) {
+ case e1000_cable_length_50:
+ *min_length = 0;
+ *max_length = e1000_igp_cable_length_50;
+ break;
+ case e1000_cable_length_50_80:
+ *min_length = e1000_igp_cable_length_50;
+ *max_length = e1000_igp_cable_length_80;
+ break;
+ case e1000_cable_length_80_110:
+ *min_length = e1000_igp_cable_length_80;
+ *max_length = e1000_igp_cable_length_110;
+ break;
+ case e1000_cable_length_110_140:
+ *min_length = e1000_igp_cable_length_110;
+ *max_length = e1000_igp_cable_length_140;
+ break;
+ case e1000_cable_length_140:
+ *min_length = e1000_igp_cable_length_140;
+ *max_length = e1000_igp_cable_length_170;
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ break;
+ }
+ } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
+ u16 cur_agc_value;
+ u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
+ static const u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {
+ IGP01E1000_PHY_AGC_A,
+ IGP01E1000_PHY_AGC_B,
+ IGP01E1000_PHY_AGC_C,
+ IGP01E1000_PHY_AGC_D
+ };
+ /* Read the AGC registers for all channels */
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+
+ ret_val =
+ e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
+
+ /* Value bound check. */
+ if ((cur_agc_value >=
+ IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1)
+ || (cur_agc_value == 0))
+ return -E1000_ERR_PHY;
+
+ agc_value += cur_agc_value;
+
+ /* Update minimal AGC value. */
+ if (min_agc_value > cur_agc_value)
+ min_agc_value = cur_agc_value;
+ }
+
+ /* Remove the minimal AGC result for length < 50m */
+ if (agc_value <
+ IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
+ agc_value -= min_agc_value;
+
+ /* Get the average length of the remaining 3 channels */
+ agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
+ } else {
+ /* Get the average length of all the 4 channels. */
+ agc_value /= IGP01E1000_PHY_CHANNEL_NUM;
+ }
+
+ /* Set the range of the calculated length. */
+ *min_length = ((e1000_igp_cable_length_table[agc_value] -
+ IGP01E1000_AGC_RANGE) > 0) ?
+ (e1000_igp_cable_length_table[agc_value] -
+ IGP01E1000_AGC_RANGE) : 0;
+ *max_length = e1000_igp_cable_length_table[agc_value] +
+ IGP01E1000_AGC_RANGE;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_polarity - Check the cable polarity
+ * @hw: Struct containing variables accessed by shared code
+ * @polarity: output parameter : 0 - Polarity is not reversed
+ * 1 - Polarity is reversed.
+ *
+ * returns: - E1000_ERR_XXX
+ * E1000_SUCCESS
+ *
+ * For phy's older than IGP, this function simply reads the polarity bit in the
+ * Phy Status register. For IGP phy's, this bit is valid only if link speed is
+ * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will
+ * return 0. If the link speed is 1000 Mbps the polarity status is in the
+ * IGP01E1000_PHY_PCS_INIT_REG.
+ */
+static s32 e1000_check_polarity(struct e1000_hw *hw,
+ e1000_rev_polarity *polarity)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_check_polarity");
+
+ if (hw->phy_type == e1000_phy_m88) {
+ /* return the Polarity bit in the Status register. */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >>
+ M88E1000_PSSR_REV_POLARITY_SHIFT) ?
+ e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+
+ } else if (hw->phy_type == e1000_phy_igp) {
+ /* Read the Status register to check the speed */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
+ * find the polarity status */
+ if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+
+ /* Read the GIG initialization PCS register (0x00B4) */
+ ret_val =
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Check the polarity bits */
+ *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ?
+ e1000_rev_polarity_reversed :
+ e1000_rev_polarity_normal;
+ } else {
+ /* For 10 Mbps, read the polarity bit in the status register. (for
+ * 100 Mbps this bit is always 0) */
+ *polarity =
+ (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ?
+ e1000_rev_polarity_reversed :
+ e1000_rev_polarity_normal;
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_downshift - Check if Downshift occurred
+ * @hw: Struct containing variables accessed by shared code
+ * @downshift: output parameter : 0 - No Downshift occurred.
+ * 1 - Downshift occurred.
+ *
+ * returns: - E1000_ERR_XXX
+ * E1000_SUCCESS
+ *
+ * For phy's older than IGP, this function reads the Downshift bit in the Phy
+ * Specific Status register. For IGP phy's, it reads the Downgrade bit in the
+ * Link Health register. In IGP this bit is latched high, so the driver must
+ * read it immediately after link is established.
+ */
+static s32 e1000_check_downshift(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ e_dbg("e1000_check_downshift");
+
+ if (hw->phy_type == e1000_phy_igp) {
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ hw->speed_downgraded =
+ (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
+ } else if (hw->phy_type == e1000_phy_m88) {
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
+ M88E1000_PSSR_DOWNSHIFT_SHIFT;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_config_dsp_after_link_change
+ * @hw: Struct containing variables accessed by shared code
+ * @link_up: was link up at the time this was called
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ * E1000_SUCCESS at any other case.
+ *
+ * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a
+ * gigabit link is achieved to improve link quality.
+ */
+
+static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
+{
+ s32 ret_val;
+ u16 phy_data, phy_saved_data, speed, duplex, i;
+ static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {
+ IGP01E1000_PHY_AGC_PARAM_A,
+ IGP01E1000_PHY_AGC_PARAM_B,
+ IGP01E1000_PHY_AGC_PARAM_C,
+ IGP01E1000_PHY_AGC_PARAM_D
+ };
+ u16 min_length, max_length;
+
+ e_dbg("e1000_config_dsp_after_link_change");
+
+ if (hw->phy_type != e1000_phy_igp)
+ return E1000_SUCCESS;
+
+ if (link_up) {
+ ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ e_dbg("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+
+ if (speed == SPEED_1000) {
+
+ ret_val =
+ e1000_get_cable_length(hw, &min_length,
+ &max_length);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->dsp_config_state == e1000_dsp_config_enabled)
+ && min_length >= e1000_igp_cable_length_50) {
+
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val =
+ e1000_read_phy_reg(hw,
+ dsp_reg_array[i],
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &=
+ ~IGP01E1000_PHY_EDAC_MU_INDEX;
+
+ ret_val =
+ e1000_write_phy_reg(hw,
+ dsp_reg_array
+ [i], phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+ hw->dsp_config_state =
+ e1000_dsp_config_activated;
+ }
+
+ if ((hw->ffe_config_state == e1000_ffe_config_enabled)
+ && (min_length < e1000_igp_cable_length_50)) {
+
+ u16 ffe_idle_err_timeout =
+ FFE_IDLE_ERR_COUNT_TIMEOUT_20;
+ u32 idle_errs = 0;
+
+ /* clear previous idle error counts */
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_1000T_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ for (i = 0; i < ffe_idle_err_timeout; i++) {
+ udelay(1000);
+ ret_val =
+ e1000_read_phy_reg(hw,
+ PHY_1000T_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ idle_errs +=
+ (phy_data &
+ SR_1000T_IDLE_ERROR_CNT);
+ if (idle_errs >
+ SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT)
+ {
+ hw->ffe_config_state =
+ e1000_ffe_config_active;
+
+ ret_val =
+ e1000_write_phy_reg(hw,
+ IGP01E1000_PHY_DSP_FFE,
+ IGP01E1000_PHY_DSP_FFE_CM_CP);
+ if (ret_val)
+ return ret_val;
+ break;
+ }
+
+ if (idle_errs)
+ ffe_idle_err_timeout =
+ FFE_IDLE_ERR_COUNT_TIMEOUT_100;
+ }
+ }
+ }
+ } else {
+ if (hw->dsp_config_state == e1000_dsp_config_activated) {
+ /* Save off the current value of register 0x2F5B to be restored at
+ * the end of the routines. */
+ ret_val =
+ e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+ if (ret_val)
+ return ret_val;
+
+ /* Disable the PHY transmitter */
+ ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+ if (ret_val)
+ return ret_val;
+
+ msleep(20);
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_FORCE_GIGA);
+ if (ret_val)
+ return ret_val;
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val =
+ e1000_read_phy_reg(hw, dsp_reg_array[i],
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+ phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
+
+ ret_val =
+ e1000_write_phy_reg(hw, dsp_reg_array[i],
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_RESTART_AUTONEG);
+ if (ret_val)
+ return ret_val;
+
+ msleep(20);
+
+ /* Now enable the transmitter */
+ ret_val =
+ e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+ if (ret_val)
+ return ret_val;
+
+ hw->dsp_config_state = e1000_dsp_config_enabled;
+ }
+
+ if (hw->ffe_config_state == e1000_ffe_config_active) {
+ /* Save off the current value of register 0x2F5B to be restored at
+ * the end of the routines. */
+ ret_val =
+ e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+ if (ret_val)
+ return ret_val;
+
+ /* Disable the PHY transmitter */
+ ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+ if (ret_val)
+ return ret_val;
+
+ msleep(20);
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_FORCE_GIGA);
+ if (ret_val)
+ return ret_val;
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE,
+ IGP01E1000_PHY_DSP_FFE_DEFAULT);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_RESTART_AUTONEG);
+ if (ret_val)
+ return ret_val;
+
+ msleep(20);
+
+ /* Now enable the transmitter */
+ ret_val =
+ e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+ if (ret_val)
+ return ret_val;
+
+ hw->ffe_config_state = e1000_ffe_config_enabled;
+ }
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_phy_mode - Set PHY to class A mode
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Assumes the following operations will follow to enable the new class mode.
+ * 1. Do a PHY soft reset
+ * 2. Restart auto-negotiation or force link.
+ */
+static s32 e1000_set_phy_mode(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 eeprom_data;
+
+ e_dbg("e1000_set_phy_mode");
+
+ if ((hw->mac_type == e1000_82545_rev_3) &&
+ (hw->media_type == e1000_media_type_copper)) {
+ ret_val =
+ e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1,
+ &eeprom_data);
+ if (ret_val) {
+ return ret_val;
+ }
+
+ if ((eeprom_data != EEPROM_RESERVED_WORD) &&
+ (eeprom_data & EEPROM_PHY_CLASS_A)) {
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT,
+ 0x000B);
+ if (ret_val)
+ return ret_val;
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL,
+ 0x8104);
+ if (ret_val)
+ return ret_val;
+
+ hw->phy_reset_disable = false;
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_d3_lplu_state - set d3 link power state
+ * @hw: Struct containing variables accessed by shared code
+ * @active: true to enable lplu false to disable lplu.
+ *
+ * This function sets the lplu state according to the active flag. When
+ * activating lplu this function also disables smart speed and vise versa.
+ * lplu will not be activated unless the device autonegotiation advertisement
+ * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ * E1000_SUCCESS at any other case.
+ */
+static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+ s32 ret_val;
+ u16 phy_data;
+ e_dbg("e1000_set_d3_lplu_state");
+
+ if (hw->phy_type != e1000_phy_igp)
+ return E1000_SUCCESS;
+
+ /* During driver activity LPLU should not be used or it will attain link
+ * from the lowest speeds starting from 10Mbps. The capability is used for
+ * Dx transitions and states */
+ if (hw->mac_type == e1000_82541_rev_2
+ || hw->mac_type == e1000_82547_rev_2) {
+ ret_val =
+ e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (!active) {
+ if (hw->mac_type == e1000_82541_rev_2 ||
+ hw->mac_type == e1000_82547_rev_2) {
+ phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
+ * Dx states where the power conservation is most important. During
+ * driver activity we should enable SmartSpeed, so performance is
+ * maintained. */
+ if (hw->smart_speed == e1000_smart_speed_on) {
+ ret_val =
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ } else if (hw->smart_speed == e1000_smart_speed_off) {
+ ret_val =
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+ } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT)
+ || (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL)
+ || (hw->autoneg_advertised ==
+ AUTONEG_ADVERTISE_10_100_ALL)) {
+
+ if (hw->mac_type == e1000_82541_rev_2 ||
+ hw->mac_type == e1000_82547_rev_2) {
+ phy_data |= IGP01E1000_GMII_FLEX_SPD;
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* When LPLU is enabled we should disable SmartSpeed */
+ ret_val =
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_vco_speed
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Change VCO speed register to improve Bit Error Rate performance of SERDES.
+ */
+static s32 e1000_set_vco_speed(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 default_page = 0;
+ u16 phy_data;
+
+ e_dbg("e1000_set_vco_speed");
+
+ switch (hw->mac_type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ break;
+ default:
+ return E1000_SUCCESS;
+ }
+
+ /* Set PHY register 30, page 5, bit 8 to 0 */
+
+ ret_val =
+ e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Set PHY register 30, page 4, bit 11 to 1 */
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PHY_VCO_REG_BIT11;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page);
+ if (ret_val)
+ return ret_val;
+
+ return E1000_SUCCESS;
+}
+
+
+/**
+ * e1000_enable_mng_pass_thru - check for bmc pass through
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Verifies the hardware needs to allow ARPs to be processed by the host
+ * returns: - true/false
+ */
+u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+ u32 manc;
+
+ if (hw->asf_firmware_present) {
+ manc = er32(MANC);
+
+ if (!(manc & E1000_MANC_RCV_TCO_EN) ||
+ !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
+ return false;
+ if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN))
+ return true;
+ }
+ return false;
+}
+
+static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 mii_status_reg;
+ u16 i;
+
+ /* Polarity reversal workaround for forced 10F/10H links. */
+
+ /* Disable the transmitter on the PHY */
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+ if (ret_val)
+ return ret_val;
+
+ /* This loop will early-out if the NO link condition has been met. */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Link Status bit
+ * to be clear.
+ */
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0)
+ break;
+ msleep(100);
+ }
+
+ /* Recommended delay time after link has been lost */
+ msleep(1000);
+
+ /* Now we will re-enable th transmitter on the PHY */
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+ if (ret_val)
+ return ret_val;
+ msleep(50);
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
+ if (ret_val)
+ return ret_val;
+ msleep(50);
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
+ if (ret_val)
+ return ret_val;
+ msleep(50);
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+ if (ret_val)
+ return ret_val;
+
+ /* This loop will early-out if the link condition has been met. */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Link Status bit
+ * to be set.
+ */
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (mii_status_reg & MII_SR_LINK_STATUS)
+ break;
+ msleep(100);
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_auto_rd_done
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Check for EEPROM Auto Read bit done.
+ * returns: - E1000_ERR_RESET if fail to reset MAC
+ * E1000_SUCCESS at any other case.
+ */
+static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
+{
+ e_dbg("e1000_get_auto_rd_done");
+ msleep(5);
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_phy_cfg_done
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Checks if the PHY configuration is done
+ * returns: - E1000_ERR_RESET if fail to reset MAC
+ * E1000_SUCCESS at any other case.
+ */
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
+{
+ e_dbg("e1000_get_phy_cfg_done");
+ msleep(10);
+ return E1000_SUCCESS;
+}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h
new file mode 100644
index 000000000000..5c9a8403668b
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h
@@ -0,0 +1,3103 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_hw.h
+ * Structures, enums, and macros for the MAC
+ */
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include "e1000_osdep.h"
+
+
+/* Forward declarations of structures used by the shared code */
+struct e1000_hw;
+struct e1000_hw_stats;
+
+/* Enumerated types specific to the e1000 hardware */
+/* Media Access Controllers */
+typedef enum {
+ e1000_undefined = 0,
+ e1000_82542_rev2_0,
+ e1000_82542_rev2_1,
+ e1000_82543,
+ e1000_82544,
+ e1000_82540,
+ e1000_82545,
+ e1000_82545_rev_3,
+ e1000_82546,
+ e1000_ce4100,
+ e1000_82546_rev_3,
+ e1000_82541,
+ e1000_82541_rev_2,
+ e1000_82547,
+ e1000_82547_rev_2,
+ e1000_num_macs
+} e1000_mac_type;
+
+typedef enum {
+ e1000_eeprom_uninitialized = 0,
+ e1000_eeprom_spi,
+ e1000_eeprom_microwire,
+ e1000_eeprom_flash,
+ e1000_eeprom_none, /* No NVM support */
+ e1000_num_eeprom_types
+} e1000_eeprom_type;
+
+/* Media Types */
+typedef enum {
+ e1000_media_type_copper = 0,
+ e1000_media_type_fiber = 1,
+ e1000_media_type_internal_serdes = 2,
+ e1000_num_media_types
+} e1000_media_type;
+
+typedef enum {
+ e1000_10_half = 0,
+ e1000_10_full = 1,
+ e1000_100_half = 2,
+ e1000_100_full = 3
+} e1000_speed_duplex_type;
+
+/* Flow Control Settings */
+typedef enum {
+ E1000_FC_NONE = 0,
+ E1000_FC_RX_PAUSE = 1,
+ E1000_FC_TX_PAUSE = 2,
+ E1000_FC_FULL = 3,
+ E1000_FC_DEFAULT = 0xFF
+} e1000_fc_type;
+
+struct e1000_shadow_ram {
+ u16 eeprom_word;
+ bool modified;
+};
+
+/* PCI bus types */
+typedef enum {
+ e1000_bus_type_unknown = 0,
+ e1000_bus_type_pci,
+ e1000_bus_type_pcix,
+ e1000_bus_type_reserved
+} e1000_bus_type;
+
+/* PCI bus speeds */
+typedef enum {
+ e1000_bus_speed_unknown = 0,
+ e1000_bus_speed_33,
+ e1000_bus_speed_66,
+ e1000_bus_speed_100,
+ e1000_bus_speed_120,
+ e1000_bus_speed_133,
+ e1000_bus_speed_reserved
+} e1000_bus_speed;
+
+/* PCI bus widths */
+typedef enum {
+ e1000_bus_width_unknown = 0,
+ e1000_bus_width_32,
+ e1000_bus_width_64,
+ e1000_bus_width_reserved
+} e1000_bus_width;
+
+/* PHY status info structure and supporting enums */
+typedef enum {
+ e1000_cable_length_50 = 0,
+ e1000_cable_length_50_80,
+ e1000_cable_length_80_110,
+ e1000_cable_length_110_140,
+ e1000_cable_length_140,
+ e1000_cable_length_undefined = 0xFF
+} e1000_cable_length;
+
+typedef enum {
+ e1000_gg_cable_length_60 = 0,
+ e1000_gg_cable_length_60_115 = 1,
+ e1000_gg_cable_length_115_150 = 2,
+ e1000_gg_cable_length_150 = 4
+} e1000_gg_cable_length;
+
+typedef enum {
+ e1000_igp_cable_length_10 = 10,
+ e1000_igp_cable_length_20 = 20,
+ e1000_igp_cable_length_30 = 30,
+ e1000_igp_cable_length_40 = 40,
+ e1000_igp_cable_length_50 = 50,
+ e1000_igp_cable_length_60 = 60,
+ e1000_igp_cable_length_70 = 70,
+ e1000_igp_cable_length_80 = 80,
+ e1000_igp_cable_length_90 = 90,
+ e1000_igp_cable_length_100 = 100,
+ e1000_igp_cable_length_110 = 110,
+ e1000_igp_cable_length_115 = 115,
+ e1000_igp_cable_length_120 = 120,
+ e1000_igp_cable_length_130 = 130,
+ e1000_igp_cable_length_140 = 140,
+ e1000_igp_cable_length_150 = 150,
+ e1000_igp_cable_length_160 = 160,
+ e1000_igp_cable_length_170 = 170,
+ e1000_igp_cable_length_180 = 180
+} e1000_igp_cable_length;
+
+typedef enum {
+ e1000_10bt_ext_dist_enable_normal = 0,
+ e1000_10bt_ext_dist_enable_lower,
+ e1000_10bt_ext_dist_enable_undefined = 0xFF
+} e1000_10bt_ext_dist_enable;
+
+typedef enum {
+ e1000_rev_polarity_normal = 0,
+ e1000_rev_polarity_reversed,
+ e1000_rev_polarity_undefined = 0xFF
+} e1000_rev_polarity;
+
+typedef enum {
+ e1000_downshift_normal = 0,
+ e1000_downshift_activated,
+ e1000_downshift_undefined = 0xFF
+} e1000_downshift;
+
+typedef enum {
+ e1000_smart_speed_default = 0,
+ e1000_smart_speed_on,
+ e1000_smart_speed_off
+} e1000_smart_speed;
+
+typedef enum {
+ e1000_polarity_reversal_enabled = 0,
+ e1000_polarity_reversal_disabled,
+ e1000_polarity_reversal_undefined = 0xFF
+} e1000_polarity_reversal;
+
+typedef enum {
+ e1000_auto_x_mode_manual_mdi = 0,
+ e1000_auto_x_mode_manual_mdix,
+ e1000_auto_x_mode_auto1,
+ e1000_auto_x_mode_auto2,
+ e1000_auto_x_mode_undefined = 0xFF
+} e1000_auto_x_mode;
+
+typedef enum {
+ e1000_1000t_rx_status_not_ok = 0,
+ e1000_1000t_rx_status_ok,
+ e1000_1000t_rx_status_undefined = 0xFF
+} e1000_1000t_rx_status;
+
+typedef enum {
+ e1000_phy_m88 = 0,
+ e1000_phy_igp,
+ e1000_phy_8211,
+ e1000_phy_8201,
+ e1000_phy_undefined = 0xFF
+} e1000_phy_type;
+
+typedef enum {
+ e1000_ms_hw_default = 0,
+ e1000_ms_force_master,
+ e1000_ms_force_slave,
+ e1000_ms_auto
+} e1000_ms_type;
+
+typedef enum {
+ e1000_ffe_config_enabled = 0,
+ e1000_ffe_config_active,
+ e1000_ffe_config_blocked
+} e1000_ffe_config;
+
+typedef enum {
+ e1000_dsp_config_disabled = 0,
+ e1000_dsp_config_enabled,
+ e1000_dsp_config_activated,
+ e1000_dsp_config_undefined = 0xFF
+} e1000_dsp_config;
+
+struct e1000_phy_info {
+ e1000_cable_length cable_length;
+ e1000_10bt_ext_dist_enable extended_10bt_distance;
+ e1000_rev_polarity cable_polarity;
+ e1000_downshift downshift;
+ e1000_polarity_reversal polarity_correction;
+ e1000_auto_x_mode mdix_mode;
+ e1000_1000t_rx_status local_rx;
+ e1000_1000t_rx_status remote_rx;
+};
+
+struct e1000_phy_stats {
+ u32 idle_errors;
+ u32 receive_errors;
+};
+
+struct e1000_eeprom_info {
+ e1000_eeprom_type type;
+ u16 word_size;
+ u16 opcode_bits;
+ u16 address_bits;
+ u16 delay_usec;
+ u16 page_size;
+};
+
+/* Flex ASF Information */
+#define E1000_HOST_IF_MAX_SIZE 2048
+
+typedef enum {
+ e1000_byte_align = 0,
+ e1000_word_align = 1,
+ e1000_dword_align = 2
+} e1000_align_type;
+
+/* Error Codes */
+#define E1000_SUCCESS 0
+#define E1000_ERR_EEPROM 1
+#define E1000_ERR_PHY 2
+#define E1000_ERR_CONFIG 3
+#define E1000_ERR_PARAM 4
+#define E1000_ERR_MAC_TYPE 5
+#define E1000_ERR_PHY_TYPE 6
+#define E1000_ERR_RESET 9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_ERR_HOST_INTERFACE_COMMAND 11
+#define E1000_BLK_PHY_RESET 12
+
+#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \
+ (((_value) & 0xff00) >> 8))
+
+/* Function prototypes */
+/* Initialization */
+s32 e1000_reset_hw(struct e1000_hw *hw);
+s32 e1000_init_hw(struct e1000_hw *hw);
+s32 e1000_set_mac_type(struct e1000_hw *hw);
+void e1000_set_media_type(struct e1000_hw *hw);
+
+/* Link Configuration */
+s32 e1000_setup_link(struct e1000_hw *hw);
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
+void e1000_config_collision_dist(struct e1000_hw *hw);
+s32 e1000_check_for_link(struct e1000_hw *hw);
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 * speed, u16 * duplex);
+s32 e1000_force_mac_fc(struct e1000_hw *hw);
+
+/* PHY */
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 * phy_data);
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
+s32 e1000_phy_hw_reset(struct e1000_hw *hw);
+s32 e1000_phy_reset(struct e1000_hw *hw);
+s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
+
+/* EEPROM Functions */
+s32 e1000_init_eeprom_params(struct e1000_hw *hw);
+
+/* MNG HOST IF functions */
+u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw);
+
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */
+
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */
+#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */
+#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */
+#define E1000_MNG_IAMT_MODE 0x3
+#define E1000_MNG_ICH_IAMT_MODE 0x2
+#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */
+
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */
+#define E1000_VFTA_ENTRY_SHIFT 0x5
+#define E1000_VFTA_ENTRY_MASK 0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
+
+struct e1000_host_mng_command_header {
+ u8 command_id;
+ u8 checksum;
+ u16 reserved1;
+ u16 reserved2;
+ u16 command_length;
+};
+
+struct e1000_host_mng_command_info {
+ struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
+ u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658 */
+};
+#ifdef __BIG_ENDIAN
+struct e1000_host_mng_dhcp_cookie {
+ u32 signature;
+ u16 vlan_id;
+ u8 reserved0;
+ u8 status;
+ u32 reserved1;
+ u8 checksum;
+ u8 reserved3;
+ u16 reserved2;
+};
+#else
+struct e1000_host_mng_dhcp_cookie {
+ u32 signature;
+ u8 status;
+ u8 reserved0;
+ u16 vlan_id;
+ u32 reserved1;
+ u16 reserved2;
+ u8 reserved3;
+ u8 checksum;
+};
+#endif
+
+bool e1000_check_mng_mode(struct e1000_hw *hw);
+s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data);
+s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw);
+s32 e1000_update_eeprom_checksum(struct e1000_hw *hw);
+s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data);
+s32 e1000_read_mac_addr(struct e1000_hw *hw);
+
+/* Filters (multicast, vlan, receive) */
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr);
+void e1000_mta_set(struct e1000_hw *hw, u32 hash_value);
+void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index);
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
+
+/* LED functions */
+s32 e1000_setup_led(struct e1000_hw *hw);
+s32 e1000_cleanup_led(struct e1000_hw *hw);
+s32 e1000_led_on(struct e1000_hw *hw);
+s32 e1000_led_off(struct e1000_hw *hw);
+s32 e1000_blink_led_start(struct e1000_hw *hw);
+
+/* Adaptive IFS Functions */
+
+/* Everything else */
+void e1000_reset_adaptive(struct e1000_hw *hw);
+void e1000_update_adaptive(struct e1000_hw *hw);
+void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
+ u32 frame_len, u8 * mac_addr);
+void e1000_get_bus_info(struct e1000_hw *hw);
+void e1000_pci_set_mwi(struct e1000_hw *hw);
+void e1000_pci_clear_mwi(struct e1000_hw *hw);
+void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc);
+int e1000_pcix_get_mmrbc(struct e1000_hw *hw);
+/* Port I/O is only supported on 82544 and newer */
+void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
+
+#define E1000_READ_REG_IO(a, reg) \
+ e1000_read_reg_io((a), E1000_##reg)
+#define E1000_WRITE_REG_IO(a, reg, val) \
+ e1000_write_reg_io((a), E1000_##reg, val)
+
+/* PCI Device IDs */
+#define E1000_DEV_ID_82542 0x1000
+#define E1000_DEV_ID_82543GC_FIBER 0x1001
+#define E1000_DEV_ID_82543GC_COPPER 0x1004
+#define E1000_DEV_ID_82544EI_COPPER 0x1008
+#define E1000_DEV_ID_82544EI_FIBER 0x1009
+#define E1000_DEV_ID_82544GC_COPPER 0x100C
+#define E1000_DEV_ID_82544GC_LOM 0x100D
+#define E1000_DEV_ID_82540EM 0x100E
+#define E1000_DEV_ID_82540EM_LOM 0x1015
+#define E1000_DEV_ID_82540EP_LOM 0x1016
+#define E1000_DEV_ID_82540EP 0x1017
+#define E1000_DEV_ID_82540EP_LP 0x101E
+#define E1000_DEV_ID_82545EM_COPPER 0x100F
+#define E1000_DEV_ID_82545EM_FIBER 0x1011
+#define E1000_DEV_ID_82545GM_COPPER 0x1026
+#define E1000_DEV_ID_82545GM_FIBER 0x1027
+#define E1000_DEV_ID_82545GM_SERDES 0x1028
+#define E1000_DEV_ID_82546EB_COPPER 0x1010
+#define E1000_DEV_ID_82546EB_FIBER 0x1012
+#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D
+#define E1000_DEV_ID_82541EI 0x1013
+#define E1000_DEV_ID_82541EI_MOBILE 0x1018
+#define E1000_DEV_ID_82541ER_LOM 0x1014
+#define E1000_DEV_ID_82541ER 0x1078
+#define E1000_DEV_ID_82547GI 0x1075
+#define E1000_DEV_ID_82541GI 0x1076
+#define E1000_DEV_ID_82541GI_MOBILE 0x1077
+#define E1000_DEV_ID_82541GI_LF 0x107C
+#define E1000_DEV_ID_82546GB_COPPER 0x1079
+#define E1000_DEV_ID_82546GB_FIBER 0x107A
+#define E1000_DEV_ID_82546GB_SERDES 0x107B
+#define E1000_DEV_ID_82546GB_PCIE 0x108A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
+#define E1000_DEV_ID_82547EI 0x1019
+#define E1000_DEV_ID_82547EI_MOBILE 0x101A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
+#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E
+
+#define NODE_ADDRESS_SIZE 6
+#define ETH_LENGTH_OF_ADDRESS 6
+
+/* MAC decode size is 128K - This is the size of BAR0 */
+#define MAC_DECODE_SIZE (128 * 1024)
+
+#define E1000_82542_2_0_REV_ID 2
+#define E1000_82542_2_1_REV_ID 3
+#define E1000_REVISION_0 0
+#define E1000_REVISION_1 1
+#define E1000_REVISION_2 2
+#define E1000_REVISION_3 3
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+/* The sizes (in bytes) of a ethernet packet */
+#define ENET_HEADER_SIZE 14
+#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */
+#define ETHERNET_FCS_SIZE 4
+#define MINIMUM_ETHERNET_PACKET_SIZE \
+ (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE)
+#define CRC_LENGTH ETHERNET_FCS_SIZE
+#define MAX_JUMBO_FRAME_SIZE 0x3F00
+
+/* 802.1q VLAN Packet Sizes */
+#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */
+
+/* Ethertype field values */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
+#define ETHERNET_IP_TYPE 0x0800 /* IP packets */
+#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */
+
+/* Packet Header defines */
+#define IP_PROTOCOL_TCP 6
+#define IP_PROTOCOL_UDP 0x11
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register. Each bit is documented below:
+ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ * o RXSEQ = Receive Sequence Error
+ */
+#define POLL_IMS_ENABLE_MASK ( \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ)
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register. Each bit is documented below:
+ * o RXT0 = Receiver Timer Interrupt (ring 0)
+ * o TXDW = Transmit Descriptor Written Back
+ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ * o RXSEQ = Receive Sequence Error
+ * o LSC = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+ E1000_IMS_RXT0 | \
+ E1000_IMS_TXDW | \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ | \
+ E1000_IMS_LSC)
+
+/* Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor. We
+ * reserve one of these spots for our directed address, allowing us room for
+ * E1000_RAR_ENTRIES - 1 multicast addresses.
+ */
+#define E1000_RAR_ENTRIES 15
+
+#define MIN_NUMBER_OF_DESCRIPTORS 8
+#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8
+
+/* Receive Descriptor */
+struct e1000_rx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ __le16 length; /* Length of data DMAed into data buffer */
+ __le16 csum; /* Packet checksum */
+ u8 status; /* Descriptor status */
+ u8 errors; /* Descriptor Errors */
+ __le16 special;
+};
+
+/* Receive Descriptor - Extended */
+union e1000_rx_desc_extended {
+ struct {
+ __le64 buffer_addr;
+ __le64 reserved;
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length;
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+/* Receive Descriptor - Packet Split */
+union e1000_rx_desc_packet_split {
+ struct {
+ /* one buffer for protocol header(s), three data buffers */
+ __le64 buffer_addr[MAX_PS_BUFFERS];
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length0; /* length of buffer 0 */
+ __le16 vlan; /* VLAN tag */
+ } middle;
+ struct {
+ __le16 header_status;
+ __le16 length[3]; /* length of buffers 1-3 */
+ } upper;
+ __le64 reserved;
+ } wb; /* writeback */
+};
+
+/* Receive Descriptor bit definitions */
+#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
+#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
+#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */
+#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
+#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
+#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
+#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
+#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
+#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+#define E1000_RXD_SPC_PRI_SHIFT 13
+#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */
+#define E1000_RXD_SPC_CFI_SHIFT 12
+
+#define E1000_RXDEXT_STATERR_CE 0x01000000
+#define E1000_RXDEXT_STATERR_SE 0x02000000
+#define E1000_RXDEXT_STATERR_SEQ 0x04000000
+#define E1000_RXDEXT_STATERR_CXE 0x10000000
+#define E1000_RXDEXT_STATERR_TCPE 0x20000000
+#define E1000_RXDEXT_STATERR_IPE 0x40000000
+#define E1000_RXDEXT_STATERR_RXE 0x80000000
+
+#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
+#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
+
+/* mask to determine if packets should be dropped due to frame errors */
+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
+ E1000_RXD_ERR_CE | \
+ E1000_RXD_ERR_SE | \
+ E1000_RXD_ERR_SEQ | \
+ E1000_RXD_ERR_CXE | \
+ E1000_RXD_ERR_RXE)
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+ E1000_RXDEXT_STATERR_CE | \
+ E1000_RXDEXT_STATERR_SE | \
+ E1000_RXDEXT_STATERR_SEQ | \
+ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
+
+/* Transmit Descriptor */
+struct e1000_tx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 cso; /* Checksum offset */
+ u8 cmd; /* Descriptor control */
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 css; /* Checksum start */
+ __le16 special;
+ } fields;
+ } upper;
+};
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
+#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */
+#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+ union {
+ __le32 ip_config;
+ struct {
+ u8 ipcss; /* IP checksum start */
+ u8 ipcso; /* IP checksum offset */
+ __le16 ipcse; /* IP checksum end */
+ } ip_fields;
+ } lower_setup;
+ union {
+ __le32 tcp_config;
+ struct {
+ u8 tucss; /* TCP checksum start */
+ u8 tucso; /* TCP checksum offset */
+ __le16 tucse; /* TCP checksum end */
+ } tcp_fields;
+ } upper_setup;
+ __le32 cmd_and_length; /* */
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 hdr_len; /* Header length */
+ __le16 mss; /* Maximum segment size */
+ } fields;
+ } tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct e1000_data_desc {
+ __le64 buffer_addr; /* Address of the descriptor's buffer address */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 typ_len_ext; /* */
+ u8 cmd; /* */
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 popts; /* Packet Options */
+ __le16 special; /* */
+ } fields;
+ } upper;
+};
+
+/* Filters */
+#define E1000_NUM_UNICAST 16 /* Unicast filter entries */
+#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
+
+/* Receive Address Register */
+struct e1000_rar {
+ volatile __le32 low; /* receive address low */
+ volatile __le32 high; /* receive address high */
+};
+
+/* Number of entries in the Multicast Table Array (MTA). */
+#define E1000_NUM_MTA_REGISTERS 128
+
+/* IPv4 Address Table Entry */
+struct e1000_ipv4_at_entry {
+ volatile u32 ipv4_addr; /* IP Address (RW) */
+ volatile u32 reserved;
+};
+
+/* Four wakeup IP addresses are supported */
+#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4
+#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX
+#define E1000_IP6AT_SIZE 1
+
+/* IPv6 Address Table Entry */
+struct e1000_ipv6_at_entry {
+ volatile u8 ipv6_addr[16];
+};
+
+/* Flexible Filter Length Table Entry */
+struct e1000_fflt_entry {
+ volatile u32 length; /* Flexible Filter Length (RW) */
+ volatile u32 reserved;
+};
+
+/* Flexible Filter Mask Table Entry */
+struct e1000_ffmt_entry {
+ volatile u32 mask; /* Flexible Filter Mask (RW) */
+ volatile u32 reserved;
+};
+
+/* Flexible Filter Value Table Entry */
+struct e1000_ffvt_entry {
+ volatile u32 value; /* Flexible Filter Value (RW) */
+ volatile u32 reserved;
+};
+
+/* Four Flexible Filters are supported */
+#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
+
+/* Each Flexible Filter is at most 128 (0x80) bytes in length */
+#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128
+
+#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX
+#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+
+#define E1000_DISABLE_SERDES_LOOPBACK 0x0400
+
+/* Register Set. (82543, 82544)
+ *
+ * Registers are defined to be 32 bits and should be accessed as 32 bit values.
+ * These registers are physically located on the NIC, but are mapped into the
+ * host memory address space.
+ *
+ * RW - register is both readable and writable
+ * RO - register is read only
+ * WO - register is write only
+ * R/clr - register is read only and is cleared when read
+ * A - register array
+ */
+#define E1000_CTRL 0x00000 /* Device Control - RW */
+#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */
+#define E1000_STATUS 0x00008 /* Device Status - RO */
+#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
+#define E1000_EERD 0x00014 /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+#define E1000_FLA 0x0001C /* Flash Access - RW */
+#define E1000_MDIC 0x00020 /* MDI Control - RW */
+
+extern void __iomem *ce4100_gbe_mdio_base_virt;
+#define INTEL_CE_GBE_MDIO_RCOMP_BASE (ce4100_gbe_mdio_base_virt)
+#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0)
+#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4)
+#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8)
+#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC)
+#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20)
+#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24)
+
+#define E1000_SCTL 0x00024 /* SerDes Control - RW */
+#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */
+#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
+#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
+#define E1000_FCT 0x00030 /* Flow Control Type - RW */
+#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
+#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
+#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
+#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
+#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
+#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
+#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
+
+/* Auxiliary Control Register. This register is CE4100 specific,
+ * RMII/RGMII function is switched by this register - RW
+ * Following are bits definitions of the Auxiliary Control Register
+ */
+#define E1000_CTL_AUX 0x000E0
+#define E1000_CTL_AUX_END_SEL_SHIFT 10
+#define E1000_CTL_AUX_ENDIANESS_SHIFT 8
+#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0
+
+/* descriptor and packet transfer use CTL_AUX.ENDIANESS */
+#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT)
+/* descriptor use CTL_AUX.ENDIANESS, packet use default */
+#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT)
+/* descriptor use default, packet use CTL_AUX.ENDIANESS */
+#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT)
+/* all use CTL_AUX.ENDIANESS */
+#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT)
+
+#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
+#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
+
+/* LW little endian, Byte big endian */
+#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+
+#define E1000_RCTL 0x00100 /* RX Control - RW */
+#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */
+#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */
+#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */
+#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */
+#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */
+#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */
+#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
+#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */
+#define E1000_TCTL 0x00400 /* TX Control - RW */
+#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */
+#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
+#define E1000_TBT 0x00448 /* TX Burst Timer - RW */
+#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
+#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
+#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
+#define FEXTNVM_SW_CONFIG 0x0001
+#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
+#define E1000_PBS 0x01008 /* Packet Buffer Size */
+#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_FLASH_UPDATES 1000
+#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
+#define E1000_FLASHT 0x01028 /* FLASH Timer Register */
+#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
+#define E1000_FLSWCTL 0x01030 /* FLASH control register */
+#define E1000_FLSWDATA 0x01034 /* FLASH data register */
+#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */
+#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
+#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
+#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
+#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */
+#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */
+#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */
+#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */
+#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */
+#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */
+#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */
+#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */
+#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */
+#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */
+#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */
+#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */
+#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */
+#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */
+#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */
+#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */
+#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
+#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */
+#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
+#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */
+#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */
+#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */
+#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */
+#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */
+#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */
+#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */
+#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */
+#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */
+#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
+#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */
+#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */
+#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */
+#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */
+#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */
+#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */
+#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */
+#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */
+#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
+#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
+#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
+#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
+#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
+#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
+#define E1000_COLC 0x04028 /* Collision Count - R/clr */
+#define E1000_DC 0x04030 /* Defer Count - R/clr */
+#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */
+#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */
+#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */
+#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */
+#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */
+#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */
+#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */
+#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */
+#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */
+#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */
+#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */
+#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */
+#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */
+#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */
+#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */
+#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */
+#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */
+#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */
+#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */
+#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */
+#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */
+#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */
+#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */
+#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */
+#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */
+#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */
+#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */
+#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */
+#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */
+#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */
+#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */
+#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */
+#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */
+#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */
+#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */
+#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */
+#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */
+#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */
+#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */
+#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */
+#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */
+#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */
+#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
+#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */
+#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */
+#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */
+#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */
+#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */
+#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
+#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
+#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */
+#define E1000_RFCTL 0x05008 /* Receive Filter Control */
+#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
+#define E1000_RA 0x05400 /* Receive Address - RW Array */
+#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
+#define E1000_WUC 0x05800 /* Wakeup Control - RW */
+#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
+#define E1000_WUS 0x05810 /* Wakeup Status - RO */
+#define E1000_MANC 0x05820 /* Management Control - RW */
+#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
+#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */
+#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */
+#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
+#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */
+#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF 0x08800 /* Host Interface */
+#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */
+#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */
+
+#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MDPHYA 0x0003C /* PHY address - RW */
+#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
+#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
+
+#define E1000_GCR 0x05B00 /* PCI-Ex Control */
+#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
+#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
+#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */
+#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */
+#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM 0x05B50 /* SW Semaphore */
+#define E1000_FWSM 0x05B54 /* FW Semaphore */
+#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
+#define E1000_HICR 0x08F00 /* Host Interface Control */
+
+/* RSS registers */
+#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */
+#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
+#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */
+#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */
+#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */
+#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */
+/* Register Set (82542)
+ *
+ * Some of the 82542 registers are located at different offsets than they are
+ * in more current versions of the 8254x. Despite the difference in location,
+ * the registers function in the same manner.
+ */
+#define E1000_82542_CTL_AUX E1000_CTL_AUX
+#define E1000_82542_CTRL E1000_CTRL
+#define E1000_82542_CTRL_DUP E1000_CTRL_DUP
+#define E1000_82542_STATUS E1000_STATUS
+#define E1000_82542_EECD E1000_EECD
+#define E1000_82542_EERD E1000_EERD
+#define E1000_82542_CTRL_EXT E1000_CTRL_EXT
+#define E1000_82542_FLA E1000_FLA
+#define E1000_82542_MDIC E1000_MDIC
+#define E1000_82542_SCTL E1000_SCTL
+#define E1000_82542_FEXTNVM E1000_FEXTNVM
+#define E1000_82542_FCAL E1000_FCAL
+#define E1000_82542_FCAH E1000_FCAH
+#define E1000_82542_FCT E1000_FCT
+#define E1000_82542_VET E1000_VET
+#define E1000_82542_RA 0x00040
+#define E1000_82542_ICR E1000_ICR
+#define E1000_82542_ITR E1000_ITR
+#define E1000_82542_ICS E1000_ICS
+#define E1000_82542_IMS E1000_IMS
+#define E1000_82542_IMC E1000_IMC
+#define E1000_82542_RCTL E1000_RCTL
+#define E1000_82542_RDTR 0x00108
+#define E1000_82542_RDBAL 0x00110
+#define E1000_82542_RDBAH 0x00114
+#define E1000_82542_RDLEN 0x00118
+#define E1000_82542_RDH 0x00120
+#define E1000_82542_RDT 0x00128
+#define E1000_82542_RDTR0 E1000_82542_RDTR
+#define E1000_82542_RDBAL0 E1000_82542_RDBAL
+#define E1000_82542_RDBAH0 E1000_82542_RDBAH
+#define E1000_82542_RDLEN0 E1000_82542_RDLEN
+#define E1000_82542_RDH0 E1000_82542_RDH
+#define E1000_82542_RDT0 E1000_82542_RDT
+#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication
+ * RX Control - RW */
+#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8))
+#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */
+#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */
+#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */
+#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */
+#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */
+#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */
+#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */
+#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */
+#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */
+#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */
+#define E1000_82542_RDTR1 0x00130
+#define E1000_82542_RDBAL1 0x00138
+#define E1000_82542_RDBAH1 0x0013C
+#define E1000_82542_RDLEN1 0x00140
+#define E1000_82542_RDH1 0x00148
+#define E1000_82542_RDT1 0x00150
+#define E1000_82542_FCRTH 0x00160
+#define E1000_82542_FCRTL 0x00168
+#define E1000_82542_FCTTV E1000_FCTTV
+#define E1000_82542_TXCW E1000_TXCW
+#define E1000_82542_RXCW E1000_RXCW
+#define E1000_82542_MTA 0x00200
+#define E1000_82542_TCTL E1000_TCTL
+#define E1000_82542_TCTL_EXT E1000_TCTL_EXT
+#define E1000_82542_TIPG E1000_TIPG
+#define E1000_82542_TDBAL 0x00420
+#define E1000_82542_TDBAH 0x00424
+#define E1000_82542_TDLEN 0x00428
+#define E1000_82542_TDH 0x00430
+#define E1000_82542_TDT 0x00438
+#define E1000_82542_TIDV 0x00440
+#define E1000_82542_TBT E1000_TBT
+#define E1000_82542_AIT E1000_AIT
+#define E1000_82542_VFTA 0x00600
+#define E1000_82542_LEDCTL E1000_LEDCTL
+#define E1000_82542_PBA E1000_PBA
+#define E1000_82542_PBS E1000_PBS
+#define E1000_82542_EEMNGCTL E1000_EEMNGCTL
+#define E1000_82542_EEARBC E1000_EEARBC
+#define E1000_82542_FLASHT E1000_FLASHT
+#define E1000_82542_EEWR E1000_EEWR
+#define E1000_82542_FLSWCTL E1000_FLSWCTL
+#define E1000_82542_FLSWDATA E1000_FLSWDATA
+#define E1000_82542_FLSWCNT E1000_FLSWCNT
+#define E1000_82542_FLOP E1000_FLOP
+#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL
+#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE
+#define E1000_82542_PHY_CTRL E1000_PHY_CTRL
+#define E1000_82542_ERT E1000_ERT
+#define E1000_82542_RXDCTL E1000_RXDCTL
+#define E1000_82542_RXDCTL1 E1000_RXDCTL1
+#define E1000_82542_RADV E1000_RADV
+#define E1000_82542_RSRPD E1000_RSRPD
+#define E1000_82542_TXDMAC E1000_TXDMAC
+#define E1000_82542_KABGTXD E1000_KABGTXD
+#define E1000_82542_TDFHS E1000_TDFHS
+#define E1000_82542_TDFTS E1000_TDFTS
+#define E1000_82542_TDFPC E1000_TDFPC
+#define E1000_82542_TXDCTL E1000_TXDCTL
+#define E1000_82542_TADV E1000_TADV
+#define E1000_82542_TSPMT E1000_TSPMT
+#define E1000_82542_CRCERRS E1000_CRCERRS
+#define E1000_82542_ALGNERRC E1000_ALGNERRC
+#define E1000_82542_SYMERRS E1000_SYMERRS
+#define E1000_82542_RXERRC E1000_RXERRC
+#define E1000_82542_MPC E1000_MPC
+#define E1000_82542_SCC E1000_SCC
+#define E1000_82542_ECOL E1000_ECOL
+#define E1000_82542_MCC E1000_MCC
+#define E1000_82542_LATECOL E1000_LATECOL
+#define E1000_82542_COLC E1000_COLC
+#define E1000_82542_DC E1000_DC
+#define E1000_82542_TNCRS E1000_TNCRS
+#define E1000_82542_SEC E1000_SEC
+#define E1000_82542_CEXTERR E1000_CEXTERR
+#define E1000_82542_RLEC E1000_RLEC
+#define E1000_82542_XONRXC E1000_XONRXC
+#define E1000_82542_XONTXC E1000_XONTXC
+#define E1000_82542_XOFFRXC E1000_XOFFRXC
+#define E1000_82542_XOFFTXC E1000_XOFFTXC
+#define E1000_82542_FCRUC E1000_FCRUC
+#define E1000_82542_PRC64 E1000_PRC64
+#define E1000_82542_PRC127 E1000_PRC127
+#define E1000_82542_PRC255 E1000_PRC255
+#define E1000_82542_PRC511 E1000_PRC511
+#define E1000_82542_PRC1023 E1000_PRC1023
+#define E1000_82542_PRC1522 E1000_PRC1522
+#define E1000_82542_GPRC E1000_GPRC
+#define E1000_82542_BPRC E1000_BPRC
+#define E1000_82542_MPRC E1000_MPRC
+#define E1000_82542_GPTC E1000_GPTC
+#define E1000_82542_GORCL E1000_GORCL
+#define E1000_82542_GORCH E1000_GORCH
+#define E1000_82542_GOTCL E1000_GOTCL
+#define E1000_82542_GOTCH E1000_GOTCH
+#define E1000_82542_RNBC E1000_RNBC
+#define E1000_82542_RUC E1000_RUC
+#define E1000_82542_RFC E1000_RFC
+#define E1000_82542_ROC E1000_ROC
+#define E1000_82542_RJC E1000_RJC
+#define E1000_82542_MGTPRC E1000_MGTPRC
+#define E1000_82542_MGTPDC E1000_MGTPDC
+#define E1000_82542_MGTPTC E1000_MGTPTC
+#define E1000_82542_TORL E1000_TORL
+#define E1000_82542_TORH E1000_TORH
+#define E1000_82542_TOTL E1000_TOTL
+#define E1000_82542_TOTH E1000_TOTH
+#define E1000_82542_TPR E1000_TPR
+#define E1000_82542_TPT E1000_TPT
+#define E1000_82542_PTC64 E1000_PTC64
+#define E1000_82542_PTC127 E1000_PTC127
+#define E1000_82542_PTC255 E1000_PTC255
+#define E1000_82542_PTC511 E1000_PTC511
+#define E1000_82542_PTC1023 E1000_PTC1023
+#define E1000_82542_PTC1522 E1000_PTC1522
+#define E1000_82542_MPTC E1000_MPTC
+#define E1000_82542_BPTC E1000_BPTC
+#define E1000_82542_TSCTC E1000_TSCTC
+#define E1000_82542_TSCTFC E1000_TSCTFC
+#define E1000_82542_RXCSUM E1000_RXCSUM
+#define E1000_82542_WUC E1000_WUC
+#define E1000_82542_WUFC E1000_WUFC
+#define E1000_82542_WUS E1000_WUS
+#define E1000_82542_MANC E1000_MANC
+#define E1000_82542_IPAV E1000_IPAV
+#define E1000_82542_IP4AT E1000_IP4AT
+#define E1000_82542_IP6AT E1000_IP6AT
+#define E1000_82542_WUPL E1000_WUPL
+#define E1000_82542_WUPM E1000_WUPM
+#define E1000_82542_FFLT E1000_FFLT
+#define E1000_82542_TDFH 0x08010
+#define E1000_82542_TDFT 0x08018
+#define E1000_82542_FFMT E1000_FFMT
+#define E1000_82542_FFVT E1000_FFVT
+#define E1000_82542_HOST_IF E1000_HOST_IF
+#define E1000_82542_IAM E1000_IAM
+#define E1000_82542_EEMNGCTL E1000_EEMNGCTL
+#define E1000_82542_PSRCTL E1000_PSRCTL
+#define E1000_82542_RAID E1000_RAID
+#define E1000_82542_TARC0 E1000_TARC0
+#define E1000_82542_TDBAL1 E1000_TDBAL1
+#define E1000_82542_TDBAH1 E1000_TDBAH1
+#define E1000_82542_TDLEN1 E1000_TDLEN1
+#define E1000_82542_TDH1 E1000_TDH1
+#define E1000_82542_TDT1 E1000_TDT1
+#define E1000_82542_TXDCTL1 E1000_TXDCTL1
+#define E1000_82542_TARC1 E1000_TARC1
+#define E1000_82542_RFCTL E1000_RFCTL
+#define E1000_82542_GCR E1000_GCR
+#define E1000_82542_GSCL_1 E1000_GSCL_1
+#define E1000_82542_GSCL_2 E1000_GSCL_2
+#define E1000_82542_GSCL_3 E1000_GSCL_3
+#define E1000_82542_GSCL_4 E1000_GSCL_4
+#define E1000_82542_FACTPS E1000_FACTPS
+#define E1000_82542_SWSM E1000_SWSM
+#define E1000_82542_FWSM E1000_FWSM
+#define E1000_82542_FFLT_DBG E1000_FFLT_DBG
+#define E1000_82542_IAC E1000_IAC
+#define E1000_82542_ICRXPTC E1000_ICRXPTC
+#define E1000_82542_ICRXATC E1000_ICRXATC
+#define E1000_82542_ICTXPTC E1000_ICTXPTC
+#define E1000_82542_ICTXATC E1000_ICTXATC
+#define E1000_82542_ICTXQEC E1000_ICTXQEC
+#define E1000_82542_ICTXQMTC E1000_ICTXQMTC
+#define E1000_82542_ICRXDMTC E1000_ICRXDMTC
+#define E1000_82542_ICRXOC E1000_ICRXOC
+#define E1000_82542_HICR E1000_HICR
+
+#define E1000_82542_CPUVEC E1000_CPUVEC
+#define E1000_82542_MRQC E1000_MRQC
+#define E1000_82542_RETA E1000_RETA
+#define E1000_82542_RSSRK E1000_RSSRK
+#define E1000_82542_RSSIM E1000_RSSIM
+#define E1000_82542_RSSIR E1000_RSSIR
+#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA
+#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+ u64 crcerrs;
+ u64 algnerrc;
+ u64 symerrs;
+ u64 rxerrc;
+ u64 txerrc;
+ u64 mpc;
+ u64 scc;
+ u64 ecol;
+ u64 mcc;
+ u64 latecol;
+ u64 colc;
+ u64 dc;
+ u64 tncrs;
+ u64 sec;
+ u64 cexterr;
+ u64 rlec;
+ u64 xonrxc;
+ u64 xontxc;
+ u64 xoffrxc;
+ u64 xofftxc;
+ u64 fcruc;
+ u64 prc64;
+ u64 prc127;
+ u64 prc255;
+ u64 prc511;
+ u64 prc1023;
+ u64 prc1522;
+ u64 gprc;
+ u64 bprc;
+ u64 mprc;
+ u64 gptc;
+ u64 gorcl;
+ u64 gorch;
+ u64 gotcl;
+ u64 gotch;
+ u64 rnbc;
+ u64 ruc;
+ u64 rfc;
+ u64 roc;
+ u64 rlerrc;
+ u64 rjc;
+ u64 mgprc;
+ u64 mgpdc;
+ u64 mgptc;
+ u64 torl;
+ u64 torh;
+ u64 totl;
+ u64 toth;
+ u64 tpr;
+ u64 tpt;
+ u64 ptc64;
+ u64 ptc127;
+ u64 ptc255;
+ u64 ptc511;
+ u64 ptc1023;
+ u64 ptc1522;
+ u64 mptc;
+ u64 bptc;
+ u64 tsctc;
+ u64 tsctfc;
+ u64 iac;
+ u64 icrxptc;
+ u64 icrxatc;
+ u64 ictxptc;
+ u64 ictxatc;
+ u64 ictxqec;
+ u64 ictxqmtc;
+ u64 icrxdmtc;
+ u64 icrxoc;
+};
+
+/* Structure containing variables used by the shared code (e1000_hw.c) */
+struct e1000_hw {
+ u8 __iomem *hw_addr;
+ u8 __iomem *flash_address;
+ e1000_mac_type mac_type;
+ e1000_phy_type phy_type;
+ u32 phy_init_script;
+ e1000_media_type media_type;
+ void *back;
+ struct e1000_shadow_ram *eeprom_shadow_ram;
+ u32 flash_bank_size;
+ u32 flash_base_addr;
+ e1000_fc_type fc;
+ e1000_bus_speed bus_speed;
+ e1000_bus_width bus_width;
+ e1000_bus_type bus_type;
+ struct e1000_eeprom_info eeprom;
+ e1000_ms_type master_slave;
+ e1000_ms_type original_master_slave;
+ e1000_ffe_config ffe_config_state;
+ u32 asf_firmware_present;
+ u32 eeprom_semaphore_present;
+ unsigned long io_base;
+ u32 phy_id;
+ u32 phy_revision;
+ u32 phy_addr;
+ u32 original_fc;
+ u32 txcw;
+ u32 autoneg_failed;
+ u32 max_frame_size;
+ u32 min_frame_size;
+ u32 mc_filter_type;
+ u32 num_mc_addrs;
+ u32 collision_delta;
+ u32 tx_packet_delta;
+ u32 ledctl_default;
+ u32 ledctl_mode1;
+ u32 ledctl_mode2;
+ bool tx_pkt_filtering;
+ struct e1000_host_mng_dhcp_cookie mng_cookie;
+ u16 phy_spd_default;
+ u16 autoneg_advertised;
+ u16 pci_cmd_word;
+ u16 fc_high_water;
+ u16 fc_low_water;
+ u16 fc_pause_time;
+ u16 current_ifs_val;
+ u16 ifs_min_val;
+ u16 ifs_max_val;
+ u16 ifs_step_size;
+ u16 ifs_ratio;
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u8 autoneg;
+ u8 mdix;
+ u8 forced_speed_duplex;
+ u8 wait_autoneg_complete;
+ u8 dma_fairness;
+ u8 mac_addr[NODE_ADDRESS_SIZE];
+ u8 perm_mac_addr[NODE_ADDRESS_SIZE];
+ bool disable_polarity_correction;
+ bool speed_downgraded;
+ e1000_smart_speed smart_speed;
+ e1000_dsp_config dsp_config_state;
+ bool get_link_status;
+ bool serdes_has_link;
+ bool tbi_compatibility_en;
+ bool tbi_compatibility_on;
+ bool laa_is_present;
+ bool phy_reset_disable;
+ bool initialize_hw_bits_disable;
+ bool fc_send_xon;
+ bool fc_strict_ieee;
+ bool report_tx_early;
+ bool adaptive_ifs;
+ bool ifs_params_forced;
+ bool in_ifs_mode;
+ bool mng_reg_access_disabled;
+ bool leave_av_bit_off;
+ bool bad_tx_carr_stats_fd;
+ bool has_smbus;
+};
+
+#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */
+#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */
+#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */
+#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
+#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */
+#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */
+#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */
+/* Register Bit Masks */
+/* Device Control */
+#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */
+#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
+#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */
+#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */
+#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
+#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
+#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
+#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
+#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */
+#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
+#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */
+#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
+#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */
+#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */
+#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
+#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
+#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */
+#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */
+#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */
+#define E1000_CTRL_RST 0x04000000 /* Global reset */
+#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
+#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */
+#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
+#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */
+
+/* Device Status */
+#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */
+#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
+#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
+#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */
+#define E1000_STATUS_SPEED_MASK 0x000000C0
+#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion
+ by EEPROM/Flash */
+#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */
+#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
+#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */
+#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */
+#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */
+#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */
+#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */
+#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */
+#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */
+#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */
+#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
+#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */
+#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
+#define E1000_STATUS_FUSE_8 0x04000000
+#define E1000_STATUS_FUSE_9 0x08000000
+#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */
+#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */
+
+/* Constants used to interpret the masked PCI-X bus speed. */
+#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */
+#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */
+#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */
+
+/* EEPROM/Flash Control */
+#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */
+#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */
+#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */
+#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */
+#define E1000_EECD_FWE_MASK 0x00000030
+#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */
+#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */
+#define E1000_EECD_FWE_SHIFT 4
+#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */
+#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */
+#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */
+#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */
+#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type
+ * (0-small, 1-large) */
+#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */
+#ifndef E1000_EEPROM_GRANT_ATTEMPTS
+#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
+#endif
+#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */
+#define E1000_EECD_SIZE_EX_SHIFT 11
+#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */
+#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */
+#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */
+#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */
+#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */
+#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
+#define E1000_EECD_SECVAL_SHIFT 22
+#define E1000_STM_OPCODE 0xDB00
+#define E1000_HICR_FW_RESET 0xC0
+
+#define E1000_SHADOW_RAM_WORDS 2048
+#define E1000_ICH_NVM_SIG_WORD 0x13
+#define E1000_ICH_NVM_SIG_MASK 0xC0
+
+/* EEPROM Read */
+#define E1000_EERD_START 0x00000001 /* Start Read */
+#define E1000_EERD_DONE 0x00000010 /* Read Done */
+#define E1000_EERD_ADDR_SHIFT 8
+#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */
+#define E1000_EERD_DATA_SHIFT 16
+#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */
+
+/* SPI EEPROM Status Register */
+#define EEPROM_STATUS_RDY_SPI 0x01
+#define EEPROM_STATUS_WEN_SPI 0x02
+#define EEPROM_STATUS_BP0_SPI 0x04
+#define EEPROM_STATUS_BP1_SPI 0x08
+#define EEPROM_STATUS_WPEN_SPI 0x80
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */
+#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */
+#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
+#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */
+#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */
+#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */
+#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */
+#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA
+#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */
+#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
+#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */
+#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */
+#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */
+#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */
+#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */
+#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
+#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */
+#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_SERDES 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
+#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000
+#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000
+#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000
+#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000
+#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000
+#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
+#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
+#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
+#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */
+#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */
+#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000
+
+/* MDI Control */
+#define E1000_MDIC_DATA_MASK 0x0000FFFF
+#define E1000_MDIC_REG_MASK 0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK 0x03E00000
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE 0x04000000
+#define E1000_MDIC_OP_READ 0x08000000
+#define E1000_MDIC_READY 0x10000000
+#define E1000_MDIC_INT_EN 0x20000000
+#define E1000_MDIC_ERROR 0x40000000
+
+#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000
+#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000
+#define INTEL_CE_GBE_MDIC_GO 0x80000000
+#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000
+
+#define E1000_KUMCTRLSTA_MASK 0x0000FFFF
+#define E1000_KUMCTRLSTA_OFFSET 0x001F0000
+#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16
+#define E1000_KUMCTRLSTA_REN 0x00200000
+
+#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000
+#define E1000_KUMCTRLSTA_OFFSET_CTRL 0x00000001
+#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002
+#define E1000_KUMCTRLSTA_OFFSET_DIAG 0x00000003
+#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004
+#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009
+#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010
+#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001E
+#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001F
+
+/* FIFO Control */
+#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x00000008
+#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800
+
+/* In-Band Control */
+#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500
+#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010
+
+/* Half-Duplex Control */
+#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004
+#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000
+
+#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E
+
+#define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000
+#define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000
+
+#define E1000_KUMCTRLSTA_K0S_100_EN 0x2000
+#define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000
+#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003
+
+#define E1000_KABGTXD_BGSQLBIAS 0x00050000
+
+#define E1000_PHY_CTRL_SPD_EN 0x00000001
+#define E1000_PHY_CTRL_D0A_LPLU 0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
+#define E1000_PHY_CTRL_B2B_EN 0x00000080
+
+/* LED Control */
+#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
+#define E1000_LEDCTL_LED0_MODE_SHIFT 0
+#define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020
+#define E1000_LEDCTL_LED0_IVRT 0x00000040
+#define E1000_LEDCTL_LED0_BLINK 0x00000080
+#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00
+#define E1000_LEDCTL_LED1_MODE_SHIFT 8
+#define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000
+#define E1000_LEDCTL_LED1_IVRT 0x00004000
+#define E1000_LEDCTL_LED1_BLINK 0x00008000
+#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000
+#define E1000_LEDCTL_LED2_MODE_SHIFT 16
+#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000
+#define E1000_LEDCTL_LED2_IVRT 0x00400000
+#define E1000_LEDCTL_LED2_BLINK 0x00800000
+#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000
+#define E1000_LEDCTL_LED3_MODE_SHIFT 24
+#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000
+#define E1000_LEDCTL_LED3_IVRT 0x40000000
+#define E1000_LEDCTL_LED3_BLINK 0x80000000
+
+#define E1000_LEDCTL_MODE_LINK_10_1000 0x0
+#define E1000_LEDCTL_MODE_LINK_100_1000 0x1
+#define E1000_LEDCTL_MODE_LINK_UP 0x2
+#define E1000_LEDCTL_MODE_ACTIVITY 0x3
+#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4
+#define E1000_LEDCTL_MODE_LINK_10 0x5
+#define E1000_LEDCTL_MODE_LINK_100 0x6
+#define E1000_LEDCTL_MODE_LINK_1000 0x7
+#define E1000_LEDCTL_MODE_PCIX_MODE 0x8
+#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9
+#define E1000_LEDCTL_MODE_COLLISION 0xA
+#define E1000_LEDCTL_MODE_BUS_SPEED 0xB
+#define E1000_LEDCTL_MODE_BUS_SIZE 0xC
+#define E1000_LEDCTL_MODE_PAUSED 0xD
+#define E1000_LEDCTL_MODE_LED_ON 0xE
+#define E1000_LEDCTL_MODE_LED_OFF 0xF
+
+/* Receive Address */
+#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
+#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
+#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
+#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
+#define E1000_ICR_RXO 0x00000040 /* rx overrun */
+#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
+#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */
+#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */
+#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
+#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
+#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
+#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
+#define E1000_ICR_TXD_LOW 0x00008000
+#define E1000_ICR_SRPD 0x00010000
+#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */
+#define E1000_ICR_MNG 0x00040000 /* Manageability event */
+#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */
+#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */
+#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */
+#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */
+#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */
+#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */
+#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
+#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */
+#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
+#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */
+#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
+#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
+#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
+#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
+#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
+#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW
+#define E1000_ICS_SRPD E1000_ICR_SRPD
+#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */
+#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */
+#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */
+#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
+#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
+#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICS_DSW E1000_ICR_DSW
+#define E1000_ICS_PHYINT E1000_ICR_PHYINT
+#define E1000_ICS_EPRST E1000_ICR_EPRST
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
+#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */
+#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
+#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */
+#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
+#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
+#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
+#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
+#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
+#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW
+#define E1000_IMS_SRPD E1000_ICR_SRPD
+#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */
+#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */
+#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */
+#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
+#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
+#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMS_DSW E1000_ICR_DSW
+#define E1000_IMS_PHYINT E1000_ICR_PHYINT
+#define E1000_IMS_EPRST E1000_ICR_EPRST
+
+/* Interrupt Mask Clear */
+#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
+#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */
+#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */
+#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */
+#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
+#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
+#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
+#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
+#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
+#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW
+#define E1000_IMC_SRPD E1000_ICR_SRPD
+#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */
+#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */
+#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */
+#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
+#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
+#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMC_DSW E1000_ICR_DSW
+#define E1000_IMC_PHYINT E1000_ICR_PHYINT
+#define E1000_IMC_EPRST E1000_ICR_EPRST
+
+/* Receive Control */
+#define E1000_RCTL_RST 0x00000001 /* Software reset */
+#define E1000_RCTL_EN 0x00000002 /* enable */
+#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
+#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
+#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
+#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
+#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
+#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
+#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */
+#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */
+#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */
+#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
+#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */
+#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */
+#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */
+#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
+#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */
+#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */
+#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
+#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */
+#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
+#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
+#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
+#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */
+#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
+#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
+#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
+#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */
+#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */
+
+/* Use byte values for the following shift parameters
+ * Usage:
+ * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ * E1000_PSRCTL_BSIZE0_MASK) |
+ * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ * E1000_PSRCTL_BSIZE1_MASK) |
+ * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ * E1000_PSRCTL_BSIZE2_MASK) |
+ * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ * E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256], default=256
+ * value1 = [1024..64512], default=4096
+ * value2 = [0..64512], default=4096
+ * value3 = [0..64512], default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
+
+/* SW_W_SYNC definitions */
+#define E1000_SWFW_EEP_SM 0x0001
+#define E1000_SWFW_PHY0_SM 0x0002
+#define E1000_SWFW_PHY1_SM 0x0004
+#define E1000_SWFW_MAC_CSR_SM 0x0008
+
+/* Receive Descriptor */
+#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */
+#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */
+#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */
+#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */
+#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */
+
+/* Flow Control */
+#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */
+#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
+
+/* Header split receive */
+#define E1000_RFCTL_ISCSI_DIS 0x00000001
+#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E
+#define E1000_RFCTL_ISCSI_DWC_SHIFT 1
+#define E1000_RFCTL_NFSW_DIS 0x00000040
+#define E1000_RFCTL_NFSR_DIS 0x00000080
+#define E1000_RFCTL_NFS_VER_MASK 0x00000300
+#define E1000_RFCTL_NFS_VER_SHIFT 8
+#define E1000_RFCTL_IPV6_DIS 0x00000400
+#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800
+#define E1000_RFCTL_ACK_DIS 0x00001000
+#define E1000_RFCTL_ACKD_DIS 0x00002000
+#define E1000_RFCTL_IPFRSP_DIS 0x00004000
+#define E1000_RFCTL_EXTEN 0x00008000
+#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
+#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+
+/* Receive Descriptor Control */
+#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */
+#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */
+#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */
+#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
+
+/* Transmit Descriptor Control */
+#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
+ still to be processed. */
+/* Transmit Configuration Word */
+#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
+#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */
+#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
+#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */
+#define E1000_TXCW_NP 0x00008000 /* TXCW next page */
+#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */
+#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */
+#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
+#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */
+#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
+#define E1000_RXCW_CC 0x10000000 /* Receive config change */
+#define E1000_RXCW_C 0x20000000 /* Receive config */
+#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
+#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */
+
+/* Transmit Control */
+#define E1000_TCTL_RST 0x00000001 /* software reset */
+#define E1000_TCTL_EN 0x00000002 /* enable tx */
+#define E1000_TCTL_BCE 0x00000004 /* busy check enable */
+#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
+#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
+#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
+#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */
+#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */
+#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
+#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */
+#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
+/* Extended Transmit Control */
+#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */
+#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */
+#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
+#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+
+/* Multiple Receive Queue Control */
+#define E1000_MRQC_ENABLE_MASK 0x00000003
+#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001
+#define E1000_MRQC_ENABLE_RSS_INT 0x00000004
+#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000
+#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_APME 0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_SPM 0x80000000 /* Enable SPM */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
+#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */
+#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
+#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
+
+/* Wake Up Status */
+#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */
+#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */
+#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */
+#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */
+#define E1000_WUS_BC 0x00000010 /* Broadcast Received */
+#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */
+#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */
+#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */
+#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */
+#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */
+#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */
+#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */
+#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */
+#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */
+#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */
+#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */
+#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */
+#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */
+#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
+#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery
+ * Filtering */
+#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */
+#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */
+#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
+#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
+#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address
+ * filtering */
+#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host
+ * memory */
+#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address
+ * filtering */
+#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */
+#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */
+#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */
+#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */
+#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */
+#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */
+#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */
+#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */
+
+#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */
+#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
+#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
+
+/* FW Semaphore Register */
+#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */
+#define E1000_FWSM_MODE_SHIFT 1
+#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */
+
+#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */
+#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */
+#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */
+#define E1000_FWSM_SKUEL_SHIFT 29
+#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */
+#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */
+#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */
+#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */
+
+/* FFLT Debug Register */
+#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */
+
+typedef enum {
+ e1000_mng_mode_none = 0,
+ e1000_mng_mode_asf,
+ e1000_mng_mode_pt,
+ e1000_mng_mode_ipmi,
+ e1000_mng_mode_host_interface_only
+} e1000_mng_mode;
+
+/* Host Interface Control Register */
+#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */
+#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done
+ * to put command in RAM */
+#define E1000_HICR_SV 0x00000004 /* Status Validity */
+#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */
+
+/* Host Interface Command Interface - Address range 0x8800-0x8EFF */
+#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */
+#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */
+#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */
+#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */
+
+struct e1000_host_command_header {
+ u8 command_id;
+ u8 command_length;
+ u8 command_options; /* I/F bits for command, status for return */
+ u8 checksum;
+};
+struct e1000_host_command_info {
+ struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
+ u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */
+};
+
+/* Host SMB register #0 */
+#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */
+#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */
+#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */
+#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */
+
+/* Host SMB register #1 */
+#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN
+#define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN
+#define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT
+#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT
+
+/* FW Status Register */
+#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */
+
+/* Wake Up Packet Length */
+#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */
+
+#define E1000_MDALIGN 4096
+
+/* PCI-Ex registers*/
+
+/* PCI-Ex Control Register */
+#define E1000_GCR_RXD_NO_SNOOP 0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
+#define E1000_GCR_TXD_NO_SNOOP 0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
+
+#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
+ E1000_GCR_RXDSCW_NO_SNOOP | \
+ E1000_GCR_RXDSCR_NO_SNOOP | \
+ E1000_GCR_TXD_NO_SNOOP | \
+ E1000_GCR_TXDSCW_NO_SNOOP | \
+ E1000_GCR_TXDSCR_NO_SNOOP)
+
+#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL
+
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+/* Function Active and Power State to MNG */
+#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003
+#define E1000_FACTPS_LAN0_VALID 0x00000004
+#define E1000_FACTPS_FUNC0_AUX_EN 0x00000008
+#define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0
+#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6
+#define E1000_FACTPS_LAN1_VALID 0x00000100
+#define E1000_FACTPS_FUNC1_AUX_EN 0x00000200
+#define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000
+#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12
+#define E1000_FACTPS_IDE_ENABLE 0x00004000
+#define E1000_FACTPS_FUNC2_AUX_EN 0x00008000
+#define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000
+#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18
+#define E1000_FACTPS_SP_ENABLE 0x00100000
+#define E1000_FACTPS_FUNC3_AUX_EN 0x00200000
+#define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000
+#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24
+#define E1000_FACTPS_IPMI_ENABLE 0x04000000
+#define E1000_FACTPS_FUNC4_AUX_EN 0x08000000
+#define E1000_FACTPS_MNGCG 0x20000000
+#define E1000_FACTPS_LAN_FUNC_SEL 0x40000000
+#define E1000_FACTPS_PM_STATE_CHANGED 0x80000000
+
+/* PCI-Ex Config Space */
+#define PCI_EX_LINK_STATUS 0x12
+#define PCI_EX_LINK_WIDTH_MASK 0x3F0
+#define PCI_EX_LINK_WIDTH_SHIFT 4
+
+/* EEPROM Commands - Microwire */
+#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */
+#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */
+#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */
+#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erase/write disable */
+
+/* EEPROM Commands - SPI */
+#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
+#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
+#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
+#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */
+#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */
+#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */
+#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */
+#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */
+#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */
+#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
+
+/* EEPROM Size definitions */
+#define EEPROM_WORD_SIZE_SHIFT 6
+#define EEPROM_SIZE_SHIFT 10
+#define EEPROM_SIZE_MASK 0x1C00
+
+/* EEPROM Word Offsets */
+#define EEPROM_COMPAT 0x0003
+#define EEPROM_ID_LED_SETTINGS 0x0004
+#define EEPROM_VERSION 0x0005
+#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */
+#define EEPROM_PHY_CLASS_WORD 0x0007
+#define EEPROM_INIT_CONTROL1_REG 0x000A
+#define EEPROM_INIT_CONTROL2_REG 0x000F
+#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010
+#define EEPROM_INIT_CONTROL3_PORT_B 0x0014
+#define EEPROM_INIT_3GIO_3 0x001A
+#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020
+#define EEPROM_INIT_CONTROL3_PORT_A 0x0024
+#define EEPROM_CFG 0x0012
+#define EEPROM_FLASH_VERSION 0x0032
+#define EEPROM_CHECKSUM_REG 0x003F
+
+#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */
+#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
+ (ID_LED_OFF1_OFF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2 0x1
+#define ID_LED_DEF1_ON2 0x2
+#define ID_LED_DEF1_OFF2 0x3
+#define ID_LED_ON1_DEF2 0x4
+#define ID_LED_ON1_ON2 0x5
+#define ID_LED_ON1_OFF2 0x6
+#define ID_LED_OFF1_DEF2 0x7
+#define ID_LED_OFF1_ON2 0x8
+#define ID_LED_OFF1_OFF2 0x9
+
+#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE 0x07000000
+
+/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */
+#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F
+
+/* Mask bit for PHY class in Word 7 of the EEPROM */
+#define EEPROM_PHY_CLASS_A 0x8000
+
+/* Mask bits for fields in Word 0x0a of the EEPROM */
+#define EEPROM_WORD0A_ILOS 0x0010
+#define EEPROM_WORD0A_SWDPIO 0x01E0
+#define EEPROM_WORD0A_LRST 0x0200
+#define EEPROM_WORD0A_FD 0x0400
+#define EEPROM_WORD0A_66MHZ 0x0800
+
+/* Mask bits for fields in Word 0x0f of the EEPROM */
+#define EEPROM_WORD0F_PAUSE_MASK 0x3000
+#define EEPROM_WORD0F_PAUSE 0x1000
+#define EEPROM_WORD0F_ASM_DIR 0x2000
+#define EEPROM_WORD0F_ANE 0x0800
+#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0
+#define EEPROM_WORD0F_LPLU 0x0001
+
+/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */
+#define EEPROM_WORD1020_GIGA_DISABLE 0x0010
+#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008
+
+/* Mask bits for fields in Word 0x1a of the EEPROM */
+#define EEPROM_WORD1A_ASPM_MASK 0x000C
+
+/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */
+#define EEPROM_SUM 0xBABA
+
+/* EEPROM Map defines (WORD OFFSETS)*/
+#define EEPROM_NODE_ADDRESS_BYTE_0 0
+#define EEPROM_PBA_BYTE_1 8
+
+#define EEPROM_RESERVED_WORD 0xFFFF
+
+/* EEPROM Map Sizes (Byte Counts) */
+#define PBA_SIZE 4
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD 15
+#define E1000_CT_SHIFT 4
+/* Collision distance is a 0-based value that applies to
+ * half-duplex-capable hardware only. */
+#define E1000_COLLISION_DISTANCE 63
+#define E1000_COLLISION_DISTANCE_82542 64
+#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
+#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
+#define E1000_COLD_SHIFT 12
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define REQ_RX_DESCRIPTOR_MULTIPLE 8
+
+/* Default values for the transmit IPG register */
+#define DEFAULT_82542_TIPG_IPGT 10
+#define DEFAULT_82543_TIPG_IPGT_FIBER 9
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define E1000_TIPG_IPGT_MASK 0x000003FF
+#define E1000_TIPG_IPGR1_MASK 0x000FFC00
+#define E1000_TIPG_IPGR2_MASK 0x3FF00000
+
+#define DEFAULT_82542_TIPG_IPGR1 2
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define E1000_TIPG_IPGR1_SHIFT 10
+
+#define DEFAULT_82542_TIPG_IPGR2 10
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define E1000_TIPG_IPGR2_SHIFT 20
+
+#define E1000_TXDMAC_DPP 0x00000001
+
+/* Adaptive IFS defines */
+#define TX_THRESHOLD_START 8
+#define TX_THRESHOLD_INCREMENT 10
+#define TX_THRESHOLD_DECREMENT 1
+#define TX_THRESHOLD_STOP 190
+#define TX_THRESHOLD_DISABLE 0
+#define TX_THRESHOLD_TIMER_MS 10000
+#define MIN_NUM_XMITS 1000
+#define IFS_MAX 80
+#define IFS_STEP 10
+#define IFS_MIN 40
+#define IFS_RATIO 4
+
+/* Extended Configuration Control and Size */
+#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001
+#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002
+#define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004
+#define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008
+#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010
+#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
+#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000
+
+#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF
+#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
+#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
+
+/* PBA constants */
+#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */
+#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */
+#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
+#define E1000_PBA_20K 0x0014
+#define E1000_PBA_22K 0x0016
+#define E1000_PBA_24K 0x0018
+#define E1000_PBA_30K 0x001E
+#define E1000_PBA_32K 0x0020
+#define E1000_PBA_34K 0x0022
+#define E1000_PBA_38K 0x0026
+#define E1000_PBA_40K 0x0028
+#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */
+
+#define E1000_PBS_16K E1000_PBA_16K
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE 0x8808
+
+/* The historical defaults for the flow control values are given below. */
+#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */
+#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */
+#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */
+
+/* PCIX Config space */
+#define PCIX_COMMAND_REGISTER 0xE6
+#define PCIX_STATUS_REGISTER_LO 0xE8
+#define PCIX_STATUS_REGISTER_HI 0xEA
+
+#define PCIX_COMMAND_MMRBC_MASK 0x000C
+#define PCIX_COMMAND_MMRBC_SHIFT 0x2
+#define PCIX_STATUS_HI_MMRBC_MASK 0x0060
+#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5
+#define PCIX_STATUS_HI_MMRBC_4K 0x3
+#define PCIX_STATUS_HI_MMRBC_2K 0x2
+
+/* Number of bits required to shift right the "pause" bits from the
+ * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register.
+ */
+#define PAUSE_SHIFT 5
+
+/* Number of bits required to shift left the "SWDPIO" bits from the
+ * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register.
+ */
+#define SWDPIO_SHIFT 17
+
+/* Number of bits required to shift left the "SWDPIO_EXT" bits from the
+ * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register.
+ */
+#define SWDPIO__EXT_SHIFT 4
+
+/* Number of bits required to shift left the "ILOS" bit from the EEPROM
+ * (bit 4) to the "ILOS" (bit 7) field in the CTRL register.
+ */
+#define ILOS_SHIFT 3
+
+#define RECEIVE_BUFFER_ALIGN_SIZE (256)
+
+/* Number of milliseconds we wait for auto-negotiation to complete */
+#define LINK_UP_TIMEOUT 500
+
+/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */
+#define AUTO_READ_DONE_TIMEOUT 10
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT 100
+
+#define E1000_TX_BUFFER_SIZE ((u32)1514)
+
+/* The carrier extension symbol, as received by the NIC. */
+#define CARRIER_EXTENSION 0x0F
+
+/* TBI_ACCEPT macro definition:
+ *
+ * This macro requires:
+ * adapter = a pointer to struct e1000_hw
+ * status = the 8 bit status field of the RX descriptor with EOP set
+ * error = the 8 bit error field of the RX descriptor with EOP set
+ * length = the sum of all the length fields of the RX descriptors that
+ * make up the current frame
+ * last_byte = the last byte of the frame DMAed by the hardware
+ * max_frame_length = the maximum frame length we want to accept.
+ * min_frame_length = the minimum frame length we want to accept.
+ *
+ * This macro is a conditional that should be used in the interrupt
+ * handler's Rx processing routine when RxErrors have been detected.
+ *
+ * Typical use:
+ * ...
+ * if (TBI_ACCEPT) {
+ * accept_frame = true;
+ * e1000_tbi_adjust_stats(adapter, MacAddress);
+ * frame_length--;
+ * } else {
+ * accept_frame = false;
+ * }
+ * ...
+ */
+
+#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \
+ ((adapter)->tbi_compatibility_on && \
+ (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
+ ((last_byte) == CARRIER_EXTENSION) && \
+ (((status) & E1000_RXD_STAT_VP) ? \
+ (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \
+ ((length) <= ((adapter)->max_frame_size + 1))) : \
+ (((length) > (adapter)->min_frame_size) && \
+ ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1)))))
+
+/* Structures, enums, and macros for the PHY */
+
+/* Bit definitions for the Management Data IO (MDIO) and Management Data
+ * Clock (MDC) pins in the Device Control Register.
+ */
+#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0
+#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0
+#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2
+#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2
+#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3
+#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3
+#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR
+#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CTRL 0x00 /* Control Register */
+#define PHY_STATUS 0x01 /* Status Register */
+#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */
+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
+#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
+
+#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
+#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */
+#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
+#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */
+
+#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */
+#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
+#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */
+#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */
+
+#define IGP01E1000_IEEE_REGS_PAGE 0x0000
+#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300
+#define IGP01E1000_IEEE_FORCE_GIGA 0x0140
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */
+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */
+#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */
+#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */
+#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */
+#define IGP02E1000_PHY_POWER_MGMT 0x19
+#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */
+
+/* IGP01E1000 AGC Registers - stores the cable length values*/
+#define IGP01E1000_PHY_AGC_A 0x1172
+#define IGP01E1000_PHY_AGC_B 0x1272
+#define IGP01E1000_PHY_AGC_C 0x1472
+#define IGP01E1000_PHY_AGC_D 0x1872
+
+/* IGP02E1000 AGC Registers for cable length values */
+#define IGP02E1000_PHY_AGC_A 0x11B1
+#define IGP02E1000_PHY_AGC_B 0x12B1
+#define IGP02E1000_PHY_AGC_C 0x14B1
+#define IGP02E1000_PHY_AGC_D 0x18B1
+
+/* IGP01E1000 DSP Reset Register */
+#define IGP01E1000_PHY_DSP_RESET 0x1F33
+#define IGP01E1000_PHY_DSP_SET 0x1F71
+#define IGP01E1000_PHY_DSP_FFE 0x1F35
+
+#define IGP01E1000_PHY_CHANNEL_NUM 4
+#define IGP02E1000_PHY_CHANNEL_NUM 4
+
+#define IGP01E1000_PHY_AGC_PARAM_A 0x1171
+#define IGP01E1000_PHY_AGC_PARAM_B 0x1271
+#define IGP01E1000_PHY_AGC_PARAM_C 0x1471
+#define IGP01E1000_PHY_AGC_PARAM_D 0x1871
+
+#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000
+#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000
+
+#define IGP01E1000_PHY_ANALOG_TX_STATE 0x2890
+#define IGP01E1000_PHY_ANALOG_CLASS_A 0x2000
+#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE 0x0004
+#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069
+
+#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A
+/* IGP01E1000 PCS Initialization register - stores the polarity status when
+ * speed = 1000 Mbps. */
+#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
+#define IGP01E1000_PHY_PCS_CTRL_REG 0x00B5
+
+#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
+#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN 0x0800 /* Power down */
+#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
+#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
+#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
+#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
+#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
+#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
+#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
+#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
+#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */
+#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */
+#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */
+#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */
+#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */
+#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
+#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */
+#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */
+#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+
+/* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
+#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */
+#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */
+#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
+#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */
+
+/* Next Page TX Register */
+#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
+#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges
+ * of different NP
+ */
+#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg
+ * 0 = cannot comply with msg
+ */
+#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */
+#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow
+ * 0 = sending last NP
+ */
+
+/* Link Partner Next Page Register */
+#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
+#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges
+ * of different NP
+ */
+#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg
+ * 0 = cannot comply with msg
+ */
+#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */
+#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */
+#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow
+ * 0 = sending last NP
+ */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
+#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */
+ /* 0=DTE device */
+#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
+ /* 0=Configure PHY as Slave */
+#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
+ /* 0=Automatic Master/Slave config */
+#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
+#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
+#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
+#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */
+#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */
+#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
+#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
+#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
+#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
+#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12
+#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100
+
+/* Extended Status Register */
+#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
+#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
+#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
+#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
+
+#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */
+#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */
+
+#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */
+ /* (0=enable, 1=disable) */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
+#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low,
+ * 0=CLK125 toggling
+ */
+#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
+ /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
+#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover,
+ * 100BASE-TX/10BASE-T:
+ * MDI Mode
+ */
+#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled
+ * all speeds.
+ */
+#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080
+ /* 1=Enable Extended 10BASE-T distance
+ * (Lower 10BASE-T RX Threshold)
+ * 0=Normal 10BASE-T RX Threshold */
+#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100
+ /* 1=5-Bit interface in 100BASE-TX
+ * 0=MII interface in 100BASE-TX */
+#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */
+#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
+
+#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1
+#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5
+#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */
+#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
+#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M;
+ * 3=110-140M;4=>140M */
+#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */
+#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
+#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */
+#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
+#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */
+#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */
+#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_REV_POLARITY_SHIFT 1
+#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5
+#define M88E1000_PSSR_MDIX_SHIFT 6
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/* M88E1000 Extended PHY Specific Control Register */
+#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
+#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled.
+ * Will assert lost lock and bring
+ * link down if idle not seen
+ * within 1ms in 1000BASE-T
+ */
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300
+#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00
+
+/* IGP01E1000 Specific Port Config Register - R/W */
+#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010
+#define IGP01E1000_PSCFR_PRE_EN 0x0020
+#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
+#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK 0x0100
+#define IGP01E1000_PSCFR_DISABLE_JABBER 0x0400
+#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000
+
+/* IGP01E1000 Specific Port Status Register - R/O */
+#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C
+#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200
+#define IGP01E1000_PSSR_LINK_UP 0x0400
+#define IGP01E1000_PSSR_MDIX 0x0800
+#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */
+#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000
+#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000
+#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
+#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */
+#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */
+
+/* IGP01E1000 Specific Port Control Register - R/W */
+#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010
+#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR 0x0200
+#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400
+#define IGP01E1000_PSCR_FLIP_CHIP 0x0800
+#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */
+
+/* IGP01E1000 Specific Port Link Health Register */
+#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
+#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000
+#define IGP01E1000_PLHR_MASTER_FAULT 0x2000
+#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000
+#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */
+#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */
+#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */
+#define IGP01E1000_PLHR_DATA_ERR_0 0x0100
+#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040
+#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010
+#define IGP01E1000_PLHR_VALID_CHANNEL_D 0x0008
+#define IGP01E1000_PLHR_VALID_CHANNEL_C 0x0004
+#define IGP01E1000_PLHR_VALID_CHANNEL_B 0x0002
+#define IGP01E1000_PLHR_VALID_CHANNEL_A 0x0001
+
+/* IGP01E1000 Channel Quality Register */
+#define IGP01E1000_MSE_CHANNEL_D 0x000F
+#define IGP01E1000_MSE_CHANNEL_C 0x00F0
+#define IGP01E1000_MSE_CHANNEL_B 0x0F00
+#define IGP01E1000_MSE_CHANNEL_A 0xF000
+
+#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
+#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */
+#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */
+
+/* IGP01E1000 DSP reset macros */
+#define DSP_RESET_ENABLE 0x0
+#define DSP_RESET_DISABLE 0x2
+#define E1000_MAX_DSP_RESETS 10
+
+/* IGP01E1000 & IGP02E1000 AGC Registers */
+
+#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */
+#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */
+
+/* IGP02E1000 AGC Register Length 9-bit mask */
+#define IGP02E1000_AGC_LENGTH_MASK 0x7F
+
+/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */
+#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128
+#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113
+
+/* The precision error of the cable length is +/- 10 meters */
+#define IGP01E1000_AGC_RANGE 10
+#define IGP02E1000_AGC_RANGE 15
+
+/* IGP01E1000 PCS Initialization register */
+/* bits 3:6 in the PCS registers stores the channels polarity */
+#define IGP01E1000_PHY_POLARITY_MASK 0x0078
+
+/* IGP01E1000 GMII FIFO Register */
+#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed
+ * on Link-Up */
+#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */
+
+/* IGP01E1000 Analog Register */
+#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1
+#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0
+#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC
+#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE
+
+#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000
+#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80
+#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070
+#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100
+#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002
+
+#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040
+#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010
+#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080
+#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500
+
+/* Bit definitions for valid PHY IDs. */
+/* I = Integrated
+ * E = External
+ */
+#define M88_VENDOR 0x0141
+#define M88E1000_E_PHY_ID 0x01410C50
+#define M88E1000_I_PHY_ID 0x01410C30
+#define M88E1011_I_PHY_ID 0x01410C20
+#define IGP01E1000_I_PHY_ID 0x02A80380
+#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID
+#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
+#define M88E1011_I_REV_4 0x04
+#define M88E1111_I_PHY_ID 0x01410CC0
+#define M88E1118_E_PHY_ID 0x01410E40
+#define L1LXT971A_PHY_ID 0x001378E0
+
+#define RTL8211B_PHY_ID 0x001CC910
+#define RTL8201N_PHY_ID 0x8200
+#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */
+#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */
+
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define PHY_PAGE_SHIFT 5
+#define PHY_REG(page, reg) \
+ (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+
+#define IGP3_PHY_PORT_CTRL \
+ PHY_REG(769, 17) /* Port General Configuration */
+#define IGP3_PHY_RATE_ADAPT_CTRL \
+ PHY_REG(769, 25) /* Rate Adapter Control Register */
+
+#define IGP3_KMRN_FIFO_CTRL_STATS \
+ PHY_REG(770, 16) /* KMRN FIFO's control/status register */
+#define IGP3_KMRN_POWER_MNG_CTRL \
+ PHY_REG(770, 17) /* KMRN Power Management Control Register */
+#define IGP3_KMRN_INBAND_CTRL \
+ PHY_REG(770, 18) /* KMRN Inband Control Register */
+#define IGP3_KMRN_DIAG \
+ PHY_REG(770, 19) /* KMRN Diagnostic register */
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */
+#define IGP3_KMRN_ACK_TIMEOUT \
+ PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */
+
+#define IGP3_VR_CTRL \
+ PHY_REG(776, 18) /* Voltage regulator control register */
+#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */
+#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */
+
+#define IGP3_CAPABILITY \
+ PHY_REG(776, 19) /* IGP3 Capability Register */
+
+/* Capabilities for SKU Control */
+#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */
+#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */
+#define IGP3_CAP_ASF 0x0004 /* Support ASF */
+#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */
+#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */
+#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */
+#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */
+#define IGP3_CAP_RSS 0x0080 /* Support RSS */
+#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */
+#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */
+
+#define IGP3_PPC_JORDAN_EN 0x0001
+#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002
+
+#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001
+#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E
+#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020
+#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040
+
+#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */
+#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */
+
+#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18)
+#define IGP3_KMRN_EC_DIS_INBAND 0x0080
+
+#define IGP03E1000_E_PHY_ID 0x02A80390
+#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */
+#define IFE_PLUS_E_PHY_ID 0x02A80320
+#define IFE_C_E_PHY_ID 0x02A80310
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */
+#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */
+#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */
+#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnect Counter */
+#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */
+#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */
+#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */
+#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */
+#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */
+#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */
+#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */
+#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */
+
+#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Default 1 = Disable auto reduced power down */
+#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */
+#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */
+#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */
+#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */
+#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */
+#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */
+#define IFE_PESC_POLARITY_REVERSED_SHIFT 8
+
+#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dynamic Power Down disabled */
+#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */
+#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */
+#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */
+#define IFE_PSC_FORCE_POLARITY_SHIFT 5
+#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4
+
+#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */
+#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */
+#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */
+#define IFE_PMC_MDIX_MODE_SHIFT 6
+#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */
+
+#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */
+#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */
+#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */
+#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */
+#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */
+#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */
+#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */
+#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */
+#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */
+#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
+
+#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */
+#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */
+#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */
+#define ICH_FLASH_SEG_SIZE_256 256
+#define ICH_FLASH_SEG_SIZE_4K 4096
+#define ICH_FLASH_SEG_SIZE_64K 65536
+
+#define ICH_CYCLE_READ 0x0
+#define ICH_CYCLE_RESERVED 0x1
+#define ICH_CYCLE_WRITE 0x2
+#define ICH_CYCLE_ERASE 0x3
+
+#define ICH_FLASH_GFPREG 0x0000
+#define ICH_FLASH_HSFSTS 0x0004
+#define ICH_FLASH_HSFCTL 0x0006
+#define ICH_FLASH_FADDR 0x0008
+#define ICH_FLASH_FDATA0 0x0010
+#define ICH_FLASH_FRACC 0x0050
+#define ICH_FLASH_FREG0 0x0054
+#define ICH_FLASH_FREG1 0x0058
+#define ICH_FLASH_FREG2 0x005C
+#define ICH_FLASH_FREG3 0x0060
+#define ICH_FLASH_FPR0 0x0074
+#define ICH_FLASH_FPR1 0x0078
+#define ICH_FLASH_SSFSTS 0x0090
+#define ICH_FLASH_SSFCTL 0x0092
+#define ICH_FLASH_PREOP 0x0094
+#define ICH_FLASH_OPTYPE 0x0096
+#define ICH_FLASH_OPMENU 0x0098
+
+#define ICH_FLASH_REG_MAPSIZE 0x00A0
+#define ICH_FLASH_SECTOR_SIZE 4096
+#define ICH_GFPREG_BASE_MASK 0x1FFF
+#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
+
+/* Miscellaneous PHY bit definitions. */
+#define PHY_PREAMBLE 0xFFFFFFFF
+#define PHY_SOF 0x01
+#define PHY_OP_READ 0x02
+#define PHY_OP_WRITE 0x01
+#define PHY_TURNAROUND 0x02
+#define PHY_PREAMBLE_SIZE 32
+#define MII_CR_SPEED_1000 0x0040
+#define MII_CR_SPEED_100 0x2000
+#define MII_CR_SPEED_10 0x0000
+#define E1000_PHY_ADDRESS 0x01
+#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */
+#define PHY_FORCE_TIME 20 /* 2.0 Seconds */
+#define PHY_REVISION_MASK 0xFFFFFFF0
+#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */
+#define REG4_SPEED_MASK 0x01E0
+#define REG9_SPEED_MASK 0x0300
+#define ADVERTISE_10_HALF 0x0001
+#define ADVERTISE_10_FULL 0x0002
+#define ADVERTISE_100_HALF 0x0004
+#define ADVERTISE_100_FULL 0x0008
+#define ADVERTISE_1000_HALF 0x0010
+#define ADVERTISE_1000_FULL 0x0020
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */
+#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */
+#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */
+
+#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
new file mode 100644
index 000000000000..a42421f26678
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -0,0 +1,4971 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000.h"
+#include <net/ip6_checksum.h>
+#include <linux/io.h>
+#include <linux/prefetch.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+
+/* Intel Media SOC GbE MDIO physical base address */
+static unsigned long ce4100_gbe_mdio_base_phy;
+/* Intel Media SOC GbE MDIO virtual base address */
+void __iomem *ce4100_gbe_mdio_base_virt;
+
+char e1000_driver_name[] = "e1000";
+static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
+#define DRV_VERSION "7.3.21-k8-NAPI"
+const char e1000_driver_version[] = DRV_VERSION;
+static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
+
+/* e1000_pci_tbl - PCI Device ID Table
+ *
+ * Last entry must be all 0s
+ *
+ * Macro expands to...
+ * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+ */
+static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
+ INTEL_E1000_ETHERNET_DEVICE(0x1000),
+ INTEL_E1000_ETHERNET_DEVICE(0x1001),
+ INTEL_E1000_ETHERNET_DEVICE(0x1004),
+ INTEL_E1000_ETHERNET_DEVICE(0x1008),
+ INTEL_E1000_ETHERNET_DEVICE(0x1009),
+ INTEL_E1000_ETHERNET_DEVICE(0x100C),
+ INTEL_E1000_ETHERNET_DEVICE(0x100D),
+ INTEL_E1000_ETHERNET_DEVICE(0x100E),
+ INTEL_E1000_ETHERNET_DEVICE(0x100F),
+ INTEL_E1000_ETHERNET_DEVICE(0x1010),
+ INTEL_E1000_ETHERNET_DEVICE(0x1011),
+ INTEL_E1000_ETHERNET_DEVICE(0x1012),
+ INTEL_E1000_ETHERNET_DEVICE(0x1013),
+ INTEL_E1000_ETHERNET_DEVICE(0x1014),
+ INTEL_E1000_ETHERNET_DEVICE(0x1015),
+ INTEL_E1000_ETHERNET_DEVICE(0x1016),
+ INTEL_E1000_ETHERNET_DEVICE(0x1017),
+ INTEL_E1000_ETHERNET_DEVICE(0x1018),
+ INTEL_E1000_ETHERNET_DEVICE(0x1019),
+ INTEL_E1000_ETHERNET_DEVICE(0x101A),
+ INTEL_E1000_ETHERNET_DEVICE(0x101D),
+ INTEL_E1000_ETHERNET_DEVICE(0x101E),
+ INTEL_E1000_ETHERNET_DEVICE(0x1026),
+ INTEL_E1000_ETHERNET_DEVICE(0x1027),
+ INTEL_E1000_ETHERNET_DEVICE(0x1028),
+ INTEL_E1000_ETHERNET_DEVICE(0x1075),
+ INTEL_E1000_ETHERNET_DEVICE(0x1076),
+ INTEL_E1000_ETHERNET_DEVICE(0x1077),
+ INTEL_E1000_ETHERNET_DEVICE(0x1078),
+ INTEL_E1000_ETHERNET_DEVICE(0x1079),
+ INTEL_E1000_ETHERNET_DEVICE(0x107A),
+ INTEL_E1000_ETHERNET_DEVICE(0x107B),
+ INTEL_E1000_ETHERNET_DEVICE(0x107C),
+ INTEL_E1000_ETHERNET_DEVICE(0x108A),
+ INTEL_E1000_ETHERNET_DEVICE(0x1099),
+ INTEL_E1000_ETHERNET_DEVICE(0x10B5),
+ INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
+ /* required last entry */
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
+
+int e1000_up(struct e1000_adapter *adapter);
+void e1000_down(struct e1000_adapter *adapter);
+void e1000_reinit_locked(struct e1000_adapter *adapter);
+void e1000_reset(struct e1000_adapter *adapter);
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *txdr);
+static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rxdr);
+static void e1000_free_tx_resources(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring);
+static void e1000_free_rx_resources(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring);
+void e1000_update_stats(struct e1000_adapter *adapter);
+
+static int e1000_init_module(void);
+static void e1000_exit_module(void);
+static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void __devexit e1000_remove(struct pci_dev *pdev);
+static int e1000_alloc_queues(struct e1000_adapter *adapter);
+static int e1000_sw_init(struct e1000_adapter *adapter);
+static int e1000_open(struct net_device *netdev);
+static int e1000_close(struct net_device *netdev);
+static void e1000_configure_tx(struct e1000_adapter *adapter);
+static void e1000_configure_rx(struct e1000_adapter *adapter);
+static void e1000_setup_rctl(struct e1000_adapter *adapter);
+static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring);
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring);
+static void e1000_set_rx_mode(struct net_device *netdev);
+static void e1000_update_phy_info_task(struct work_struct *work);
+static void e1000_watchdog(struct work_struct *work);
+static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
+static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev);
+static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
+static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
+static int e1000_set_mac(struct net_device *netdev, void *p);
+static irqreturn_t e1000_intr(int irq, void *data);
+static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring);
+static int e1000_clean(struct napi_struct *napi, int budget);
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do);
+static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do);
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count);
+static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count);
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd);
+static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
+static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
+static void e1000_tx_timeout(struct net_device *dev);
+static void e1000_reset_task(struct work_struct *work);
+static void e1000_smartspeed(struct e1000_adapter *adapter);
+static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+ struct sk_buff *skb);
+
+static bool e1000_vlan_used(struct e1000_adapter *adapter);
+static void e1000_vlan_mode(struct net_device *netdev, u32 features);
+static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static void e1000_restore_vlan(struct e1000_adapter *adapter);
+
+#ifdef CONFIG_PM
+static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
+static int e1000_resume(struct pci_dev *pdev);
+#endif
+static void e1000_shutdown(struct pci_dev *pdev);
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* for netdump / net console */
+static void e1000_netpoll (struct net_device *netdev);
+#endif
+
+#define COPYBREAK_DEFAULT 256
+static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
+module_param(copybreak, uint, 0644);
+MODULE_PARM_DESC(copybreak,
+ "Maximum size of packet that is copied to a new buffer on receive");
+
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
+static void e1000_io_resume(struct pci_dev *pdev);
+
+static struct pci_error_handlers e1000_err_handler = {
+ .error_detected = e1000_io_error_detected,
+ .slot_reset = e1000_io_slot_reset,
+ .resume = e1000_io_resume,
+};
+
+static struct pci_driver e1000_driver = {
+ .name = e1000_driver_name,
+ .id_table = e1000_pci_tbl,
+ .probe = e1000_probe,
+ .remove = __devexit_p(e1000_remove),
+#ifdef CONFIG_PM
+ /* Power Management Hooks */
+ .suspend = e1000_suspend,
+ .resume = e1000_resume,
+#endif
+ .shutdown = e1000_shutdown,
+ .err_handler = &e1000_err_handler
+};
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+/**
+ * e1000_get_hw_dev - return device
+ * used by hardware layer to print debugging information
+ *
+ **/
+struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
+{
+ struct e1000_adapter *adapter = hw->back;
+ return adapter->netdev;
+}
+
+/**
+ * e1000_init_module - Driver Registration Routine
+ *
+ * e1000_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+
+static int __init e1000_init_module(void)
+{
+ int ret;
+ pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
+
+ pr_info("%s\n", e1000_copyright);
+
+ ret = pci_register_driver(&e1000_driver);
+ if (copybreak != COPYBREAK_DEFAULT) {
+ if (copybreak == 0)
+ pr_info("copybreak disabled\n");
+ else
+ pr_info("copybreak enabled for "
+ "packets <= %u bytes\n", copybreak);
+ }
+ return ret;
+}
+
+module_init(e1000_init_module);
+
+/**
+ * e1000_exit_module - Driver Exit Cleanup Routine
+ *
+ * e1000_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+
+static void __exit e1000_exit_module(void)
+{
+ pci_unregister_driver(&e1000_driver);
+}
+
+module_exit(e1000_exit_module);
+
+static int e1000_request_irq(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ irq_handler_t handler = e1000_intr;
+ int irq_flags = IRQF_SHARED;
+ int err;
+
+ err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
+ netdev);
+ if (err) {
+ e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
+ }
+
+ return err;
+}
+
+static void e1000_free_irq(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ free_irq(adapter->pdev->irq, netdev);
+}
+
+/**
+ * e1000_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+
+static void e1000_irq_disable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ ew32(IMC, ~0);
+ E1000_WRITE_FLUSH();
+ synchronize_irq(adapter->pdev->irq);
+}
+
+/**
+ * e1000_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+
+static void e1000_irq_enable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ ew32(IMS, IMS_ENABLE_MASK);
+ E1000_WRITE_FLUSH();
+}
+
+static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u16 vid = hw->mng_cookie.vlan_id;
+ u16 old_vid = adapter->mng_vlan_id;
+
+ if (!e1000_vlan_used(adapter))
+ return;
+
+ if (!test_bit(vid, adapter->active_vlans)) {
+ if (hw->mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
+ e1000_vlan_rx_add_vid(netdev, vid);
+ adapter->mng_vlan_id = vid;
+ } else {
+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+ }
+ if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
+ (vid != old_vid) &&
+ !test_bit(old_vid, adapter->active_vlans))
+ e1000_vlan_rx_kill_vid(netdev, old_vid);
+ } else {
+ adapter->mng_vlan_id = vid;
+ }
+}
+
+static void e1000_init_manageability(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (adapter->en_mng_pt) {
+ u32 manc = er32(MANC);
+
+ /* disable hardware interception of ARP */
+ manc &= ~(E1000_MANC_ARP_EN);
+
+ ew32(MANC, manc);
+ }
+}
+
+static void e1000_release_manageability(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (adapter->en_mng_pt) {
+ u32 manc = er32(MANC);
+
+ /* re-enable hardware interception of ARP */
+ manc |= E1000_MANC_ARP_EN;
+
+ ew32(MANC, manc);
+ }
+}
+
+/**
+ * e1000_configure - configure the hardware for RX and TX
+ * @adapter = private board structure
+ **/
+static void e1000_configure(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ e1000_set_rx_mode(netdev);
+
+ e1000_restore_vlan(adapter);
+ e1000_init_manageability(adapter);
+
+ e1000_configure_tx(adapter);
+ e1000_setup_rctl(adapter);
+ e1000_configure_rx(adapter);
+ /* call E1000_DESC_UNUSED which always leaves
+ * at least 1 descriptor unused to make sure
+ * next_to_use != next_to_clean */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct e1000_rx_ring *ring = &adapter->rx_ring[i];
+ adapter->alloc_rx_buf(adapter, ring,
+ E1000_DESC_UNUSED(ring));
+ }
+}
+
+int e1000_up(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* hardware has been reset, we need to reload some things */
+ e1000_configure(adapter);
+
+ clear_bit(__E1000_DOWN, &adapter->flags);
+
+ napi_enable(&adapter->napi);
+
+ e1000_irq_enable(adapter);
+
+ netif_wake_queue(adapter->netdev);
+
+ /* fire a link change interrupt to start the watchdog */
+ ew32(ICS, E1000_ICS_LSC);
+ return 0;
+}
+
+/**
+ * e1000_power_up_phy - restore link in case the phy was powered down
+ * @adapter: address of board private structure
+ *
+ * The phy may be powered down to save power and turn off link when the
+ * driver is unloaded and wake on lan is not enabled (among others)
+ * *** this routine MUST be followed by a call to e1000_reset ***
+ *
+ **/
+
+void e1000_power_up_phy(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 mii_reg = 0;
+
+ /* Just clear the power down bit to wake the phy back up */
+ if (hw->media_type == e1000_media_type_copper) {
+ /* according to the manual, the phy will retain its
+ * settings across a power-down/up cycle */
+ e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
+ mii_reg &= ~MII_CR_POWER_DOWN;
+ e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
+ }
+}
+
+static void e1000_power_down_phy(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Power down the PHY so no link is implied when interface is down *
+ * The PHY cannot be powered down if any of the following is true *
+ * (a) WoL is enabled
+ * (b) AMT is active
+ * (c) SoL/IDER session is active */
+ if (!adapter->wol && hw->mac_type >= e1000_82540 &&
+ hw->media_type == e1000_media_type_copper) {
+ u16 mii_reg = 0;
+
+ switch (hw->mac_type) {
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_ce4100:
+ case e1000_82546_rev_3:
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ if (er32(MANC) & E1000_MANC_SMBUS_EN)
+ goto out;
+ break;
+ default:
+ goto out;
+ }
+ e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
+ mii_reg |= MII_CR_POWER_DOWN;
+ e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
+ msleep(1);
+ }
+out:
+ return;
+}
+
+static void e1000_down_and_stop(struct e1000_adapter *adapter)
+{
+ set_bit(__E1000_DOWN, &adapter->flags);
+ cancel_work_sync(&adapter->reset_task);
+ cancel_delayed_work_sync(&adapter->watchdog_task);
+ cancel_delayed_work_sync(&adapter->phy_info_task);
+ cancel_delayed_work_sync(&adapter->fifo_stall_task);
+}
+
+void e1000_down(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 rctl, tctl;
+
+
+ /* disable receives in the hardware */
+ rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ /* flush and sleep below */
+
+ netif_tx_disable(netdev);
+
+ /* disable transmits in the hardware */
+ tctl = er32(TCTL);
+ tctl &= ~E1000_TCTL_EN;
+ ew32(TCTL, tctl);
+ /* flush both disables and wait for them to finish */
+ E1000_WRITE_FLUSH();
+ msleep(10);
+
+ napi_disable(&adapter->napi);
+
+ e1000_irq_disable(adapter);
+
+ /*
+ * Setting DOWN must be after irq_disable to prevent
+ * a screaming interrupt. Setting DOWN also prevents
+ * tasks from rescheduling.
+ */
+ e1000_down_and_stop(adapter);
+
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+ netif_carrier_off(netdev);
+
+ e1000_reset(adapter);
+ e1000_clean_all_tx_rings(adapter);
+ e1000_clean_all_rx_rings(adapter);
+}
+
+static void e1000_reinit_safe(struct e1000_adapter *adapter)
+{
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+ mutex_lock(&adapter->mutex);
+ e1000_down(adapter);
+ e1000_up(adapter);
+ mutex_unlock(&adapter->mutex);
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+}
+
+void e1000_reinit_locked(struct e1000_adapter *adapter)
+{
+ /* if rtnl_lock is not held the call path is bogus */
+ ASSERT_RTNL();
+ WARN_ON(in_interrupt());
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+ e1000_down(adapter);
+ e1000_up(adapter);
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+}
+
+void e1000_reset(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 pba = 0, tx_space, min_tx_space, min_rx_space;
+ bool legacy_pba_adjust = false;
+ u16 hwm;
+
+ /* Repartition Pba for greater than 9k mtu
+ * To take effect CTRL.RST is required.
+ */
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ case e1000_82540:
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ legacy_pba_adjust = true;
+ pba = E1000_PBA_48K;
+ break;
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_ce4100:
+ case e1000_82546_rev_3:
+ pba = E1000_PBA_48K;
+ break;
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ legacy_pba_adjust = true;
+ pba = E1000_PBA_30K;
+ break;
+ case e1000_undefined:
+ case e1000_num_macs:
+ break;
+ }
+
+ if (legacy_pba_adjust) {
+ if (hw->max_frame_size > E1000_RXBUFFER_8192)
+ pba -= 8; /* allocate more FIFO for Tx */
+
+ if (hw->mac_type == e1000_82547) {
+ adapter->tx_fifo_head = 0;
+ adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
+ adapter->tx_fifo_size =
+ (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
+ atomic_set(&adapter->tx_fifo_stall, 0);
+ }
+ } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
+ /* adjust PBA for jumbo frames */
+ ew32(PBA, pba);
+
+ /* To maintain wire speed transmits, the Tx FIFO should be
+ * large enough to accommodate two full transmit packets,
+ * rounded up to the next 1KB and expressed in KB. Likewise,
+ * the Rx FIFO should be large enough to accommodate at least
+ * one full receive packet and is similarly rounded up and
+ * expressed in KB. */
+ pba = er32(PBA);
+ /* upper 16 bits has Tx packet buffer allocation size in KB */
+ tx_space = pba >> 16;
+ /* lower 16 bits has Rx packet buffer allocation size in KB */
+ pba &= 0xffff;
+ /*
+ * the tx fifo also stores 16 bytes of information about the tx
+ * but don't include ethernet FCS because hardware appends it
+ */
+ min_tx_space = (hw->max_frame_size +
+ sizeof(struct e1000_tx_desc) -
+ ETH_FCS_LEN) * 2;
+ min_tx_space = ALIGN(min_tx_space, 1024);
+ min_tx_space >>= 10;
+ /* software strips receive CRC, so leave room for it */
+ min_rx_space = hw->max_frame_size;
+ min_rx_space = ALIGN(min_rx_space, 1024);
+ min_rx_space >>= 10;
+
+ /* If current Tx allocation is less than the min Tx FIFO size,
+ * and the min Tx FIFO size is less than the current Rx FIFO
+ * allocation, take space away from current Rx allocation */
+ if (tx_space < min_tx_space &&
+ ((min_tx_space - tx_space) < pba)) {
+ pba = pba - (min_tx_space - tx_space);
+
+ /* PCI/PCIx hardware has PBA alignment constraints */
+ switch (hw->mac_type) {
+ case e1000_82545 ... e1000_82546_rev_3:
+ pba &= ~(E1000_PBA_8K - 1);
+ break;
+ default:
+ break;
+ }
+
+ /* if short on rx space, rx wins and must trump tx
+ * adjustment or use Early Receive if available */
+ if (pba < min_rx_space)
+ pba = min_rx_space;
+ }
+ }
+
+ ew32(PBA, pba);
+
+ /*
+ * flow control settings:
+ * The high water mark must be low enough to fit one full frame
+ * (or the size used for early receive) above it in the Rx FIFO.
+ * Set it to the lower of:
+ * - 90% of the Rx FIFO size, and
+ * - the full Rx FIFO size minus the early receive size (for parts
+ * with ERT support assuming ERT set to E1000_ERT_2048), or
+ * - the full Rx FIFO size minus one full frame
+ */
+ hwm = min(((pba << 10) * 9 / 10),
+ ((pba << 10) - hw->max_frame_size));
+
+ hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
+ hw->fc_low_water = hw->fc_high_water - 8;
+ hw->fc_pause_time = E1000_FC_PAUSE_TIME;
+ hw->fc_send_xon = 1;
+ hw->fc = hw->original_fc;
+
+ /* Allow time for pending master requests to run */
+ e1000_reset_hw(hw);
+ if (hw->mac_type >= e1000_82544)
+ ew32(WUC, 0);
+
+ if (e1000_init_hw(hw))
+ e_dev_err("Hardware Error\n");
+ e1000_update_mng_vlan(adapter);
+
+ /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
+ if (hw->mac_type >= e1000_82544 &&
+ hw->autoneg == 1 &&
+ hw->autoneg_advertised == ADVERTISE_1000_FULL) {
+ u32 ctrl = er32(CTRL);
+ /* clear phy power management bit if we are in gig only mode,
+ * which if enabled will attempt negotiation to 100Mb, which
+ * can cause a loss of link at power off or driver unload */
+ ctrl &= ~E1000_CTRL_SWDPIN3;
+ ew32(CTRL, ctrl);
+ }
+
+ /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
+ ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
+
+ e1000_reset_adaptive(hw);
+ e1000_phy_get_info(hw, &adapter->phy_info);
+
+ e1000_release_manageability(adapter);
+}
+
+/**
+ * Dump the eeprom for users having checksum issues
+ **/
+static void e1000_dump_eeprom(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ethtool_eeprom eeprom;
+ const struct ethtool_ops *ops = netdev->ethtool_ops;
+ u8 *data;
+ int i;
+ u16 csum_old, csum_new = 0;
+
+ eeprom.len = ops->get_eeprom_len(netdev);
+ eeprom.offset = 0;
+
+ data = kmalloc(eeprom.len, GFP_KERNEL);
+ if (!data) {
+ pr_err("Unable to allocate memory to dump EEPROM data\n");
+ return;
+ }
+
+ ops->get_eeprom(netdev, &eeprom, data);
+
+ csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
+ (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
+ for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
+ csum_new += data[i] + (data[i + 1] << 8);
+ csum_new = EEPROM_SUM - csum_new;
+
+ pr_err("/*********************/\n");
+ pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
+ pr_err("Calculated : 0x%04x\n", csum_new);
+
+ pr_err("Offset Values\n");
+ pr_err("======== ======\n");
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
+
+ pr_err("Include this output when contacting your support provider.\n");
+ pr_err("This is not a software error! Something bad happened to\n");
+ pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
+ pr_err("result in further problems, possibly loss of data,\n");
+ pr_err("corruption or system hangs!\n");
+ pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
+ pr_err("which is invalid and requires you to set the proper MAC\n");
+ pr_err("address manually before continuing to enable this network\n");
+ pr_err("device. Please inspect the EEPROM dump and report the\n");
+ pr_err("issue to your hardware vendor or Intel Customer Support.\n");
+ pr_err("/*********************/\n");
+
+ kfree(data);
+}
+
+/**
+ * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
+ * @pdev: PCI device information struct
+ *
+ * Return true if an adapter needs ioport resources
+ **/
+static int e1000_is_need_ioport(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case E1000_DEV_ID_82540EM:
+ case E1000_DEV_ID_82540EM_LOM:
+ case E1000_DEV_ID_82540EP:
+ case E1000_DEV_ID_82540EP_LOM:
+ case E1000_DEV_ID_82540EP_LP:
+ case E1000_DEV_ID_82541EI:
+ case E1000_DEV_ID_82541EI_MOBILE:
+ case E1000_DEV_ID_82541ER:
+ case E1000_DEV_ID_82541ER_LOM:
+ case E1000_DEV_ID_82541GI:
+ case E1000_DEV_ID_82541GI_LF:
+ case E1000_DEV_ID_82541GI_MOBILE:
+ case E1000_DEV_ID_82544EI_COPPER:
+ case E1000_DEV_ID_82544EI_FIBER:
+ case E1000_DEV_ID_82544GC_COPPER:
+ case E1000_DEV_ID_82544GC_LOM:
+ case E1000_DEV_ID_82545EM_COPPER:
+ case E1000_DEV_ID_82545EM_FIBER:
+ case E1000_DEV_ID_82546EB_COPPER:
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546EB_QUAD_COPPER:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static u32 e1000_fix_features(struct net_device *netdev, u32 features)
+{
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_RX)
+ features |= NETIF_F_HW_VLAN_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_TX;
+
+ return features;
+}
+
+static int e1000_set_features(struct net_device *netdev, u32 features)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ u32 changed = features ^ netdev->features;
+
+ if (changed & NETIF_F_HW_VLAN_RX)
+ e1000_vlan_mode(netdev, features);
+
+ if (!(changed & NETIF_F_RXCSUM))
+ return 0;
+
+ adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
+
+ if (netif_running(netdev))
+ e1000_reinit_locked(adapter);
+ else
+ e1000_reset(adapter);
+
+ return 0;
+}
+
+static const struct net_device_ops e1000_netdev_ops = {
+ .ndo_open = e1000_open,
+ .ndo_stop = e1000_close,
+ .ndo_start_xmit = e1000_xmit_frame,
+ .ndo_get_stats = e1000_get_stats,
+ .ndo_set_rx_mode = e1000_set_rx_mode,
+ .ndo_set_mac_address = e1000_set_mac,
+ .ndo_tx_timeout = e1000_tx_timeout,
+ .ndo_change_mtu = e1000_change_mtu,
+ .ndo_do_ioctl = e1000_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = e1000_netpoll,
+#endif
+ .ndo_fix_features = e1000_fix_features,
+ .ndo_set_features = e1000_set_features,
+};
+
+/**
+ * e1000_init_hw_struct - initialize members of hw struct
+ * @adapter: board private struct
+ * @hw: structure used by e1000_hw.c
+ *
+ * Factors out initialization of the e1000_hw struct to its own function
+ * that can be called very early at init (just after struct allocation).
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ * Returns negative error codes if MAC type setup fails.
+ */
+static int e1000_init_hw_struct(struct e1000_adapter *adapter,
+ struct e1000_hw *hw)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* PCI config space info */
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_id = pdev->subsystem_device;
+ hw->revision_id = pdev->revision;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
+
+ hw->max_frame_size = adapter->netdev->mtu +
+ ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+ hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
+
+ /* identify the MAC */
+ if (e1000_set_mac_type(hw)) {
+ e_err(probe, "Unknown MAC Type\n");
+ return -EIO;
+ }
+
+ switch (hw->mac_type) {
+ default:
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ hw->phy_init_script = 1;
+ break;
+ }
+
+ e1000_set_media_type(hw);
+ e1000_get_bus_info(hw);
+
+ hw->wait_autoneg_complete = false;
+ hw->tbi_compatibility_en = true;
+ hw->adaptive_ifs = true;
+
+ /* Copper options */
+
+ if (hw->media_type == e1000_media_type_copper) {
+ hw->mdix = AUTO_ALL_MODES;
+ hw->disable_polarity_correction = false;
+ hw->master_slave = E1000_MASTER_SLAVE;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in e1000_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * e1000_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit e1000_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct e1000_adapter *adapter;
+ struct e1000_hw *hw;
+
+ static int cards_found = 0;
+ static int global_quad_port_a = 0; /* global ksp3 port a indication */
+ int i, err, pci_using_dac;
+ u16 eeprom_data = 0;
+ u16 tmp = 0;
+ u16 eeprom_apme_mask = E1000_EEPROM_APME;
+ int bars, need_ioport;
+
+ /* do not allocate ioport bars when not needed */
+ need_ioport = e1000_is_need_ioport(pdev);
+ if (need_ioport) {
+ bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
+ err = pci_enable_device(pdev);
+ } else {
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ err = pci_enable_device_mem(pdev);
+ }
+ if (err)
+ return err;
+
+ err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
+ if (err)
+ goto err_pci_reg;
+
+ pci_set_master(pdev);
+ err = pci_save_state(pdev);
+ if (err)
+ goto err_alloc_etherdev;
+
+ err = -ENOMEM;
+ netdev = alloc_etherdev(sizeof(struct e1000_adapter));
+ if (!netdev)
+ goto err_alloc_etherdev;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->msg_enable = (1 << debug) - 1;
+ adapter->bars = bars;
+ adapter->need_ioport = need_ioport;
+
+ hw = &adapter->hw;
+ hw->back = adapter;
+
+ err = -EIO;
+ hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
+ if (!hw->hw_addr)
+ goto err_ioremap;
+
+ if (adapter->need_ioport) {
+ for (i = BAR_1; i <= BAR_5; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+ hw->io_base = pci_resource_start(pdev, i);
+ break;
+ }
+ }
+ }
+
+ /* make ready for any if (hw->...) below */
+ err = e1000_init_hw_struct(adapter, hw);
+ if (err)
+ goto err_sw_init;
+
+ /*
+ * there is a workaround being applied below that limits
+ * 64-bit DMA addresses to 64-bit hardware. There are some
+ * 32-bit adapters that Tx hang when given 64-bit DMA addresses
+ */
+ pci_using_dac = 0;
+ if ((hw->bus_type == e1000_bus_type_pcix) &&
+ !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ /*
+ * according to DMA-API-HOWTO, coherent calls will always
+ * succeed if the set call did
+ */
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ pci_using_dac = 1;
+ } else {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ pr_err("No usable DMA config, aborting\n");
+ goto err_dma;
+ }
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ }
+
+ netdev->netdev_ops = &e1000_netdev_ops;
+ e1000_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+ netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
+
+ strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+ adapter->bd_number = cards_found;
+
+ /* setup the private structure */
+
+ err = e1000_sw_init(adapter);
+ if (err)
+ goto err_sw_init;
+
+ err = -EIO;
+ if (hw->mac_type == e1000_ce4100) {
+ ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1);
+ ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy,
+ pci_resource_len(pdev, BAR_1));
+
+ if (!ce4100_gbe_mdio_base_virt)
+ goto err_mdio_ioremap;
+ }
+
+ if (hw->mac_type >= e1000_82543) {
+ netdev->hw_features = NETIF_F_SG |
+ NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_RX;
+ netdev->features = NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_FILTER;
+ }
+
+ if ((hw->mac_type >= e1000_82544) &&
+ (hw->mac_type != e1000_82547))
+ netdev->hw_features |= NETIF_F_TSO;
+
+ netdev->features |= netdev->hw_features;
+ netdev->hw_features |= NETIF_F_RXCSUM;
+
+ if (pci_using_dac) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
+
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_HW_CSUM;
+ netdev->vlan_features |= NETIF_F_SG;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
+
+ /* initialize eeprom parameters */
+ if (e1000_init_eeprom_params(hw)) {
+ e_err(probe, "EEPROM initialization failed\n");
+ goto err_eeprom;
+ }
+
+ /* before reading the EEPROM, reset the controller to
+ * put the device in a known good starting state */
+
+ e1000_reset_hw(hw);
+
+ /* make sure the EEPROM is good */
+ if (e1000_validate_eeprom_checksum(hw) < 0) {
+ e_err(probe, "The EEPROM Checksum Is Not Valid\n");
+ e1000_dump_eeprom(adapter);
+ /*
+ * set MAC address to all zeroes to invalidate and temporary
+ * disable this device for the user. This blocks regular
+ * traffic while still permitting ethtool ioctls from reaching
+ * the hardware as well as allowing the user to run the
+ * interface after manually setting a hw addr using
+ * `ip set address`
+ */
+ memset(hw->mac_addr, 0, netdev->addr_len);
+ } else {
+ /* copy the MAC address out of the EEPROM */
+ if (e1000_read_mac_addr(hw))
+ e_err(probe, "EEPROM Read Error\n");
+ }
+ /* don't block initalization here due to bad MAC address */
+ memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->perm_addr))
+ e_err(probe, "Invalid MAC Address\n");
+
+
+ INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
+ INIT_DELAYED_WORK(&adapter->fifo_stall_task,
+ e1000_82547_tx_fifo_stall_task);
+ INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
+ INIT_WORK(&adapter->reset_task, e1000_reset_task);
+
+ e1000_check_options(adapter);
+
+ /* Initial Wake on LAN setting
+ * If APM wake is enabled in the EEPROM,
+ * enable the ACPI Magic Packet filter
+ */
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ break;
+ case e1000_82544:
+ e1000_read_eeprom(hw,
+ EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
+ eeprom_apme_mask = E1000_EEPROM_82544_APM;
+ break;
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ if (er32(STATUS) & E1000_STATUS_FUNC_1){
+ e1000_read_eeprom(hw,
+ EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+ break;
+ }
+ /* Fall Through */
+ default:
+ e1000_read_eeprom(hw,
+ EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+ break;
+ }
+ if (eeprom_data & eeprom_apme_mask)
+ adapter->eeprom_wol |= E1000_WUFC_MAG;
+
+ /* now that we have the eeprom settings, apply the special cases
+ * where the eeprom may be wrong or the board simply won't support
+ * wake on lan on a particular port */
+ switch (pdev->device) {
+ case E1000_DEV_ID_82546GB_PCIE:
+ adapter->eeprom_wol = 0;
+ break;
+ case E1000_DEV_ID_82546EB_FIBER:
+ case E1000_DEV_ID_82546GB_FIBER:
+ /* Wake events only supported on port A for dual fiber
+ * regardless of eeprom setting */
+ if (er32(STATUS) & E1000_STATUS_FUNC_1)
+ adapter->eeprom_wol = 0;
+ break;
+ case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+ /* if quad port adapter, disable WoL on all but port A */
+ if (global_quad_port_a != 0)
+ adapter->eeprom_wol = 0;
+ else
+ adapter->quad_port_a = 1;
+ /* Reset for multiple quad port adapters */
+ if (++global_quad_port_a == 4)
+ global_quad_port_a = 0;
+ break;
+ }
+
+ /* initialize the wol settings based on the eeprom settings */
+ adapter->wol = adapter->eeprom_wol;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ /* Auto detect PHY address */
+ if (hw->mac_type == e1000_ce4100) {
+ for (i = 0; i < 32; i++) {
+ hw->phy_addr = i;
+ e1000_read_phy_reg(hw, PHY_ID2, &tmp);
+ if (tmp == 0 || tmp == 0xFF) {
+ if (i == 31)
+ goto err_eeprom;
+ continue;
+ } else
+ break;
+ }
+ }
+
+ /* reset the hardware with the new settings */
+ e1000_reset(adapter);
+
+ strcpy(netdev->name, "eth%d");
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ e1000_vlan_mode(netdev, netdev->features);
+
+ /* print bus type/speed/width info */
+ e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
+ ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
+ ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
+ (hw->bus_speed == e1000_bus_speed_120) ? 120 :
+ (hw->bus_speed == e1000_bus_speed_100) ? 100 :
+ (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
+ ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
+ netdev->dev_addr);
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+ e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
+
+ cards_found++;
+ return 0;
+
+err_register:
+err_eeprom:
+ e1000_phy_hw_reset(hw);
+
+ if (hw->flash_address)
+ iounmap(hw->flash_address);
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+err_dma:
+err_sw_init:
+err_mdio_ioremap:
+ iounmap(ce4100_gbe_mdio_base_virt);
+ iounmap(hw->hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_selected_regions(pdev, bars);
+err_pci_reg:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * e1000_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * e1000_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+
+static void __devexit e1000_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ e1000_down_and_stop(adapter);
+ e1000_release_manageability(adapter);
+
+ unregister_netdev(netdev);
+
+ e1000_phy_hw_reset(hw);
+
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+
+ iounmap(hw->hw_addr);
+ if (hw->flash_address)
+ iounmap(hw->flash_address);
+ pci_release_selected_regions(pdev, adapter->bars);
+
+ free_netdev(netdev);
+
+ pci_disable_device(pdev);
+}
+
+/**
+ * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * e1000_sw_init initializes the Adapter private data structure.
+ * e1000_init_hw_struct MUST be called before this function
+ **/
+
+static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
+{
+ adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_queues = 1;
+
+ if (e1000_alloc_queues(adapter)) {
+ e_err(probe, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ /* Explicitly disable IRQ since the NIC can be in any state. */
+ e1000_irq_disable(adapter);
+
+ spin_lock_init(&adapter->stats_lock);
+ mutex_init(&adapter->mutex);
+
+ set_bit(__E1000_DOWN, &adapter->flags);
+
+ return 0;
+}
+
+/**
+ * e1000_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time.
+ **/
+
+static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
+{
+ adapter->tx_ring = kcalloc(adapter->num_tx_queues,
+ sizeof(struct e1000_tx_ring), GFP_KERNEL);
+ if (!adapter->tx_ring)
+ return -ENOMEM;
+
+ adapter->rx_ring = kcalloc(adapter->num_rx_queues,
+ sizeof(struct e1000_rx_ring), GFP_KERNEL);
+ if (!adapter->rx_ring) {
+ kfree(adapter->tx_ring);
+ return -ENOMEM;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog task is started,
+ * and the stack is notified that the interface is ready.
+ **/
+
+static int e1000_open(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__E1000_TESTING, &adapter->flags))
+ return -EBUSY;
+
+ netif_carrier_off(netdev);
+
+ /* allocate transmit descriptors */
+ err = e1000_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = e1000_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ e1000_power_up_phy(adapter);
+
+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+ if ((hw->mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
+ e1000_update_mng_vlan(adapter);
+ }
+
+ /* before we allocate an interrupt, we must be ready to handle it.
+ * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
+ * as soon as we call pci_request_irq, so we have to setup our
+ * clean_rx handler before we do so. */
+ e1000_configure(adapter);
+
+ err = e1000_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ /* From here on the code is the same as e1000_up() */
+ clear_bit(__E1000_DOWN, &adapter->flags);
+
+ napi_enable(&adapter->napi);
+
+ e1000_irq_enable(adapter);
+
+ netif_start_queue(netdev);
+
+ /* fire a link status change interrupt to start the watchdog */
+ ew32(ICS, E1000_ICS_LSC);
+
+ return E1000_SUCCESS;
+
+err_req_irq:
+ e1000_power_down_phy(adapter);
+ e1000_free_all_rx_resources(adapter);
+err_setup_rx:
+ e1000_free_all_tx_resources(adapter);
+err_setup_tx:
+ e1000_reset(adapter);
+
+ return err;
+}
+
+/**
+ * e1000_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+
+static int e1000_close(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+ e1000_down(adapter);
+ e1000_power_down_phy(adapter);
+ e1000_free_irq(adapter);
+
+ e1000_free_all_tx_resources(adapter);
+ e1000_free_all_rx_resources(adapter);
+
+ /* kill manageability vlan ID if supported, but not if a vlan with
+ * the same ID is registered on the host OS (let 8021q kill it) */
+ if ((hw->mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+ !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
+ e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
+ * @adapter: address of board private structure
+ * @start: address of beginning of memory
+ * @len: length of memory
+ **/
+static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
+ unsigned long len)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned long begin = (unsigned long)start;
+ unsigned long end = begin + len;
+
+ /* First rev 82545 and 82546 need to not allow any memory
+ * write location to cross 64k boundary due to errata 23 */
+ if (hw->mac_type == e1000_82545 ||
+ hw->mac_type == e1000_ce4100 ||
+ hw->mac_type == e1000_82546) {
+ return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
+ }
+
+ return true;
+}
+
+/**
+ * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ * @txdr: tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *txdr)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct e1000_buffer) * txdr->count;
+ txdr->buffer_info = vzalloc(size);
+ if (!txdr->buffer_info) {
+ e_err(probe, "Unable to allocate memory for the Tx descriptor "
+ "ring\n");
+ return -ENOMEM;
+ }
+
+ /* round up to nearest 4K */
+
+ txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
+ txdr->size = ALIGN(txdr->size, 4096);
+
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
+ if (!txdr->desc) {
+setup_tx_desc_die:
+ vfree(txdr->buffer_info);
+ e_err(probe, "Unable to allocate memory for the Tx descriptor "
+ "ring\n");
+ return -ENOMEM;
+ }
+
+ /* Fix for errata 23, can't cross 64kB boundary */
+ if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
+ void *olddesc = txdr->desc;
+ dma_addr_t olddma = txdr->dma;
+ e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
+ txdr->size, txdr->desc);
+ /* Try again, without freeing the previous */
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
+ &txdr->dma, GFP_KERNEL);
+ /* Failed allocation, critical failure */
+ if (!txdr->desc) {
+ dma_free_coherent(&pdev->dev, txdr->size, olddesc,
+ olddma);
+ goto setup_tx_desc_die;
+ }
+
+ if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
+ /* give up */
+ dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
+ txdr->dma);
+ dma_free_coherent(&pdev->dev, txdr->size, olddesc,
+ olddma);
+ e_err(probe, "Unable to allocate aligned memory "
+ "for the transmit descriptor ring\n");
+ vfree(txdr->buffer_info);
+ return -ENOMEM;
+ } else {
+ /* Free old allocation, new allocation was successful */
+ dma_free_coherent(&pdev->dev, txdr->size, olddesc,
+ olddma);
+ }
+ }
+ memset(txdr->desc, 0, txdr->size);
+
+ txdr->next_to_use = 0;
+ txdr->next_to_clean = 0;
+
+ return 0;
+}
+
+/**
+ * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
+ * (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+ if (err) {
+ e_err(probe, "Allocation for Tx Queue %u failed\n", i);
+ for (i-- ; i >= 0; i--)
+ e1000_free_tx_resources(adapter,
+ &adapter->tx_ring[i]);
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+
+static void e1000_configure_tx(struct e1000_adapter *adapter)
+{
+ u64 tdba;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 tdlen, tctl, tipg;
+ u32 ipgr1, ipgr2;
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+
+ switch (adapter->num_tx_queues) {
+ case 1:
+ default:
+ tdba = adapter->tx_ring[0].dma;
+ tdlen = adapter->tx_ring[0].count *
+ sizeof(struct e1000_tx_desc);
+ ew32(TDLEN, tdlen);
+ ew32(TDBAH, (tdba >> 32));
+ ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
+ ew32(TDT, 0);
+ ew32(TDH, 0);
+ adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
+ adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
+ break;
+ }
+
+ /* Set the default values for the Tx Inter Packet Gap timer */
+ if ((hw->media_type == e1000_media_type_fiber ||
+ hw->media_type == e1000_media_type_internal_serdes))
+ tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
+ else
+ tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ tipg = DEFAULT_82542_TIPG_IPGT;
+ ipgr1 = DEFAULT_82542_TIPG_IPGR1;
+ ipgr2 = DEFAULT_82542_TIPG_IPGR2;
+ break;
+ default:
+ ipgr1 = DEFAULT_82543_TIPG_IPGR1;
+ ipgr2 = DEFAULT_82543_TIPG_IPGR2;
+ break;
+ }
+ tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
+ tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
+ ew32(TIPG, tipg);
+
+ /* Set the Tx Interrupt Delay register */
+
+ ew32(TIDV, adapter->tx_int_delay);
+ if (hw->mac_type >= e1000_82540)
+ ew32(TADV, adapter->tx_abs_int_delay);
+
+ /* Program the Transmit Control Register */
+
+ tctl = er32(TCTL);
+ tctl &= ~E1000_TCTL_CT;
+ tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+ (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+ e1000_config_collision_dist(hw);
+
+ /* Setup Transmit Descriptor Settings for eop descriptor */
+ adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
+
+ /* only set IDE if we are delaying interrupts using the timers */
+ if (adapter->tx_int_delay)
+ adapter->txd_cmd |= E1000_TXD_CMD_IDE;
+
+ if (hw->mac_type < e1000_82543)
+ adapter->txd_cmd |= E1000_TXD_CMD_RPS;
+ else
+ adapter->txd_cmd |= E1000_TXD_CMD_RS;
+
+ /* Cache if we're 82544 running in PCI-X because we'll
+ * need this to apply a workaround later in the send path. */
+ if (hw->mac_type == e1000_82544 &&
+ hw->bus_type == e1000_bus_type_pcix)
+ adapter->pcix_82544 = 1;
+
+ ew32(TCTL, tctl);
+
+}
+
+/**
+ * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rxdr: rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rxdr)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int size, desc_len;
+
+ size = sizeof(struct e1000_buffer) * rxdr->count;
+ rxdr->buffer_info = vzalloc(size);
+ if (!rxdr->buffer_info) {
+ e_err(probe, "Unable to allocate memory for the Rx descriptor "
+ "ring\n");
+ return -ENOMEM;
+ }
+
+ desc_len = sizeof(struct e1000_rx_desc);
+
+ /* Round up to nearest 4K */
+
+ rxdr->size = rxdr->count * desc_len;
+ rxdr->size = ALIGN(rxdr->size, 4096);
+
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
+
+ if (!rxdr->desc) {
+ e_err(probe, "Unable to allocate memory for the Rx descriptor "
+ "ring\n");
+setup_rx_desc_die:
+ vfree(rxdr->buffer_info);
+ return -ENOMEM;
+ }
+
+ /* Fix for errata 23, can't cross 64kB boundary */
+ if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
+ void *olddesc = rxdr->desc;
+ dma_addr_t olddma = rxdr->dma;
+ e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
+ rxdr->size, rxdr->desc);
+ /* Try again, without freeing the previous */
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
+ &rxdr->dma, GFP_KERNEL);
+ /* Failed allocation, critical failure */
+ if (!rxdr->desc) {
+ dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
+ olddma);
+ e_err(probe, "Unable to allocate memory for the Rx "
+ "descriptor ring\n");
+ goto setup_rx_desc_die;
+ }
+
+ if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
+ /* give up */
+ dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
+ rxdr->dma);
+ dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
+ olddma);
+ e_err(probe, "Unable to allocate aligned memory for "
+ "the Rx descriptor ring\n");
+ goto setup_rx_desc_die;
+ } else {
+ /* Free old allocation, new allocation was successful */
+ dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
+ olddma);
+ }
+ }
+ memset(rxdr->desc, 0, rxdr->size);
+
+ rxdr->next_to_clean = 0;
+ rxdr->next_to_use = 0;
+ rxdr->rx_skb_top = NULL;
+
+ return 0;
+}
+
+/**
+ * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
+ * (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+ if (err) {
+ e_err(probe, "Allocation for Rx Queue %u failed\n", i);
+ for (i-- ; i >= 0; i--)
+ e1000_free_rx_resources(adapter,
+ &adapter->rx_ring[i]);
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * e1000_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
+ **/
+static void e1000_setup_rctl(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+
+ rctl = er32(RCTL);
+
+ rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+
+ rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
+ E1000_RCTL_RDMTS_HALF |
+ (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+ if (hw->tbi_compatibility_on == 1)
+ rctl |= E1000_RCTL_SBP;
+ else
+ rctl &= ~E1000_RCTL_SBP;
+
+ if (adapter->netdev->mtu <= ETH_DATA_LEN)
+ rctl &= ~E1000_RCTL_LPE;
+ else
+ rctl |= E1000_RCTL_LPE;
+
+ /* Setup buffer sizes */
+ rctl &= ~E1000_RCTL_SZ_4096;
+ rctl |= E1000_RCTL_BSEX;
+ switch (adapter->rx_buffer_len) {
+ case E1000_RXBUFFER_2048:
+ default:
+ rctl |= E1000_RCTL_SZ_2048;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case E1000_RXBUFFER_4096:
+ rctl |= E1000_RCTL_SZ_4096;
+ break;
+ case E1000_RXBUFFER_8192:
+ rctl |= E1000_RCTL_SZ_8192;
+ break;
+ case E1000_RXBUFFER_16384:
+ rctl |= E1000_RCTL_SZ_16384;
+ break;
+ }
+
+ ew32(RCTL, rctl);
+}
+
+/**
+ * e1000_configure_rx - Configure 8254x Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+
+static void e1000_configure_rx(struct e1000_adapter *adapter)
+{
+ u64 rdba;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rdlen, rctl, rxcsum;
+
+ if (adapter->netdev->mtu > ETH_DATA_LEN) {
+ rdlen = adapter->rx_ring[0].count *
+ sizeof(struct e1000_rx_desc);
+ adapter->clean_rx = e1000_clean_jumbo_rx_irq;
+ adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
+ } else {
+ rdlen = adapter->rx_ring[0].count *
+ sizeof(struct e1000_rx_desc);
+ adapter->clean_rx = e1000_clean_rx_irq;
+ adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
+ }
+
+ /* disable receives while setting up the descriptors */
+ rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+
+ /* set the Receive Delay Timer Register */
+ ew32(RDTR, adapter->rx_int_delay);
+
+ if (hw->mac_type >= e1000_82540) {
+ ew32(RADV, adapter->rx_abs_int_delay);
+ if (adapter->itr_setting != 0)
+ ew32(ITR, 1000000000 / (adapter->itr * 256));
+ }
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring */
+ switch (adapter->num_rx_queues) {
+ case 1:
+ default:
+ rdba = adapter->rx_ring[0].dma;
+ ew32(RDLEN, rdlen);
+ ew32(RDBAH, (rdba >> 32));
+ ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
+ ew32(RDT, 0);
+ ew32(RDH, 0);
+ adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
+ adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
+ break;
+ }
+
+ /* Enable 82543 Receive Checksum Offload for TCP and UDP */
+ if (hw->mac_type >= e1000_82543) {
+ rxcsum = er32(RXCSUM);
+ if (adapter->rx_csum)
+ rxcsum |= E1000_RXCSUM_TUOFL;
+ else
+ /* don't need to clear IPPCSE as it defaults to 0 */
+ rxcsum &= ~E1000_RXCSUM_TUOFL;
+ ew32(RXCSUM, rxcsum);
+ }
+
+ /* Enable Receives */
+ ew32(RCTL, rctl | E1000_RCTL_EN);
+}
+
+/**
+ * e1000_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+
+static void e1000_free_tx_resources(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ e1000_clean_tx_ring(adapter, tx_ring);
+
+ vfree(tx_ring->buffer_info);
+ tx_ring->buffer_info = NULL;
+
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
+
+/**
+ * e1000_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
+}
+
+static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
+ struct e1000_buffer *buffer_info)
+{
+ if (buffer_info->dma) {
+ if (buffer_info->mapped_as_page)
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length,
+ DMA_TO_DEVICE);
+ buffer_info->dma = 0;
+ }
+ if (buffer_info->skb) {
+ dev_kfree_skb_any(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+ buffer_info->time_stamp = 0;
+ /* buffer_info must be completely set up in the transmit path */
+}
+
+/**
+ * e1000_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
+ **/
+
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_buffer *buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Tx ring sk_buffs */
+
+ for (i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->buffer_info[i];
+ e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+ }
+
+ size = sizeof(struct e1000_buffer) * tx_ring->count;
+ memset(tx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ tx_ring->last_tx_tso = 0;
+
+ writel(0, hw->hw_addr + tx_ring->tdh);
+ writel(0, hw->hw_addr + tx_ring->tdt);
+}
+
+/**
+ * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+
+static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+/**
+ * e1000_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+
+static void e1000_free_rx_resources(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ e1000_clean_rx_ring(adapter, rx_ring);
+
+ vfree(rx_ring->buffer_info);
+ rx_ring->buffer_info = NULL;
+
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
+
+ rx_ring->desc = NULL;
+}
+
+/**
+ * e1000_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * e1000_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ * @rx_ring: ring to free buffers from
+ **/
+
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_buffer *buffer_info;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ if (buffer_info->dma &&
+ adapter->clean_rx == e1000_clean_rx_irq) {
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ buffer_info->length,
+ DMA_FROM_DEVICE);
+ } else if (buffer_info->dma &&
+ adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ buffer_info->length,
+ DMA_FROM_DEVICE);
+ }
+
+ buffer_info->dma = 0;
+ if (buffer_info->page) {
+ put_page(buffer_info->page);
+ buffer_info->page = NULL;
+ }
+ if (buffer_info->skb) {
+ dev_kfree_skb(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+ }
+
+ /* there also may be some cached data from a chained receive */
+ if (rx_ring->rx_skb_top) {
+ dev_kfree_skb(rx_ring->rx_skb_top);
+ rx_ring->rx_skb_top = NULL;
+ }
+
+ size = sizeof(struct e1000_buffer) * rx_ring->count;
+ memset(rx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ writel(0, hw->hw_addr + rx_ring->rdh);
+ writel(0, hw->hw_addr + rx_ring->rdt);
+}
+
+/**
+ * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+
+static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+}
+
+/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
+ * and memory write and invalidate disabled for certain operations
+ */
+static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 rctl;
+
+ e1000_pci_clear_mwi(hw);
+
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_RST;
+ ew32(RCTL, rctl);
+ E1000_WRITE_FLUSH();
+ mdelay(5);
+
+ if (netif_running(netdev))
+ e1000_clean_all_rx_rings(adapter);
+}
+
+static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 rctl;
+
+ rctl = er32(RCTL);
+ rctl &= ~E1000_RCTL_RST;
+ ew32(RCTL, rctl);
+ E1000_WRITE_FLUSH();
+ mdelay(5);
+
+ if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+ e1000_pci_set_mwi(hw);
+
+ if (netif_running(netdev)) {
+ /* No need to loop, because 82542 supports only 1 queue */
+ struct e1000_rx_ring *ring = &adapter->rx_ring[0];
+ e1000_configure_rx(adapter);
+ adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
+ }
+}
+
+/**
+ * e1000_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int e1000_set_mac(struct net_device *netdev, void *p)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ /* 82542 2.0 needs to be in reset to write receive address registers */
+
+ if (hw->mac_type == e1000_82542_rev2_0)
+ e1000_enter_82542_rst(adapter);
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
+
+ e1000_rar_set(hw, hw->mac_addr, 0);
+
+ if (hw->mac_type == e1000_82542_rev2_0)
+ e1000_leave_82542_rst(adapter);
+
+ return 0;
+}
+
+/**
+ * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_mode entry point is called whenever the unicast or multicast
+ * address lists or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+
+static void e1000_set_rx_mode(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ bool use_uc = false;
+ u32 rctl;
+ u32 hash_value;
+ int i, rar_entries = E1000_RAR_ENTRIES;
+ int mta_reg_count = E1000_NUM_MTA_REGISTERS;
+ u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
+
+ if (!mcarray) {
+ e_err(probe, "memory allocation failed\n");
+ return;
+ }
+
+ /* Check for Promiscuous and All Multicast modes */
+
+ rctl = er32(RCTL);
+
+ if (netdev->flags & IFF_PROMISC) {
+ rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+ rctl &= ~E1000_RCTL_VFE;
+ } else {
+ if (netdev->flags & IFF_ALLMULTI)
+ rctl |= E1000_RCTL_MPE;
+ else
+ rctl &= ~E1000_RCTL_MPE;
+ /* Enable VLAN filter if there is a VLAN */
+ if (e1000_vlan_used(adapter))
+ rctl |= E1000_RCTL_VFE;
+ }
+
+ if (netdev_uc_count(netdev) > rar_entries - 1) {
+ rctl |= E1000_RCTL_UPE;
+ } else if (!(netdev->flags & IFF_PROMISC)) {
+ rctl &= ~E1000_RCTL_UPE;
+ use_uc = true;
+ }
+
+ ew32(RCTL, rctl);
+
+ /* 82542 2.0 needs to be in reset to write receive address registers */
+
+ if (hw->mac_type == e1000_82542_rev2_0)
+ e1000_enter_82542_rst(adapter);
+
+ /* load the first 14 addresses into the exact filters 1-14. Unicast
+ * addresses take precedence to avoid disabling unicast filtering
+ * when possible.
+ *
+ * RAR 0 is used for the station MAC address
+ * if there are not 14 addresses, go ahead and clear the filters
+ */
+ i = 1;
+ if (use_uc)
+ netdev_for_each_uc_addr(ha, netdev) {
+ if (i == rar_entries)
+ break;
+ e1000_rar_set(hw, ha->addr, i++);
+ }
+
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (i == rar_entries) {
+ /* load any remaining addresses into the hash table */
+ u32 hash_reg, hash_bit, mta;
+ hash_value = e1000_hash_mc_addr(hw, ha->addr);
+ hash_reg = (hash_value >> 5) & 0x7F;
+ hash_bit = hash_value & 0x1F;
+ mta = (1 << hash_bit);
+ mcarray[hash_reg] |= mta;
+ } else {
+ e1000_rar_set(hw, ha->addr, i++);
+ }
+ }
+
+ for (; i < rar_entries; i++) {
+ E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
+ E1000_WRITE_FLUSH();
+ }
+
+ /* write the hash table completely, write from bottom to avoid
+ * both stupid write combining chipsets, and flushing each write */
+ for (i = mta_reg_count - 1; i >= 0 ; i--) {
+ /*
+ * If we are on an 82544 has an errata where writing odd
+ * offsets overwrites the previous even offset, but writing
+ * backwards over the range solves the issue by always
+ * writing the odd offset first
+ */
+ E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
+ }
+ E1000_WRITE_FLUSH();
+
+ if (hw->mac_type == e1000_82542_rev2_0)
+ e1000_leave_82542_rst(adapter);
+
+ kfree(mcarray);
+}
+
+/**
+ * e1000_update_phy_info_task - get phy info
+ * @work: work struct contained inside adapter struct
+ *
+ * Need to wait a few seconds after link up to get diagnostic information from
+ * the phy
+ */
+static void e1000_update_phy_info_task(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter,
+ phy_info_task.work);
+ if (test_bit(__E1000_DOWN, &adapter->flags))
+ return;
+ mutex_lock(&adapter->mutex);
+ e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
+ mutex_unlock(&adapter->mutex);
+}
+
+/**
+ * e1000_82547_tx_fifo_stall_task - task to complete work
+ * @work: work struct contained inside adapter struct
+ **/
+static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter,
+ fifo_stall_task.work);
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 tctl;
+
+ if (test_bit(__E1000_DOWN, &adapter->flags))
+ return;
+ mutex_lock(&adapter->mutex);
+ if (atomic_read(&adapter->tx_fifo_stall)) {
+ if ((er32(TDT) == er32(TDH)) &&
+ (er32(TDFT) == er32(TDFH)) &&
+ (er32(TDFTS) == er32(TDFHS))) {
+ tctl = er32(TCTL);
+ ew32(TCTL, tctl & ~E1000_TCTL_EN);
+ ew32(TDFT, adapter->tx_head_addr);
+ ew32(TDFH, adapter->tx_head_addr);
+ ew32(TDFTS, adapter->tx_head_addr);
+ ew32(TDFHS, adapter->tx_head_addr);
+ ew32(TCTL, tctl);
+ E1000_WRITE_FLUSH();
+
+ adapter->tx_fifo_head = 0;
+ atomic_set(&adapter->tx_fifo_stall, 0);
+ netif_wake_queue(netdev);
+ } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
+ schedule_delayed_work(&adapter->fifo_stall_task, 1);
+ }
+ }
+ mutex_unlock(&adapter->mutex);
+}
+
+bool e1000_has_link(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ bool link_active = false;
+
+ /* get_link_status is set on LSC (link status) interrupt or rx
+ * sequence error interrupt (except on intel ce4100).
+ * get_link_status will stay false until the
+ * e1000_check_for_link establishes link for copper adapters
+ * ONLY
+ */
+ switch (hw->media_type) {
+ case e1000_media_type_copper:
+ if (hw->mac_type == e1000_ce4100)
+ hw->get_link_status = 1;
+ if (hw->get_link_status) {
+ e1000_check_for_link(hw);
+ link_active = !hw->get_link_status;
+ } else {
+ link_active = true;
+ }
+ break;
+ case e1000_media_type_fiber:
+ e1000_check_for_link(hw);
+ link_active = !!(er32(STATUS) & E1000_STATUS_LU);
+ break;
+ case e1000_media_type_internal_serdes:
+ e1000_check_for_link(hw);
+ link_active = hw->serdes_has_link;
+ break;
+ default:
+ break;
+ }
+
+ return link_active;
+}
+
+/**
+ * e1000_watchdog - work function
+ * @work: work struct contained inside adapter struct
+ **/
+static void e1000_watchdog(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter,
+ watchdog_task.work);
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_tx_ring *txdr = adapter->tx_ring;
+ u32 link, tctl;
+
+ if (test_bit(__E1000_DOWN, &adapter->flags))
+ return;
+
+ mutex_lock(&adapter->mutex);
+ link = e1000_has_link(adapter);
+ if ((netif_carrier_ok(netdev)) && link)
+ goto link_up;
+
+ if (link) {
+ if (!netif_carrier_ok(netdev)) {
+ u32 ctrl;
+ bool txb2b = true;
+ /* update snapshot of PHY registers on LSC */
+ e1000_get_speed_and_duplex(hw,
+ &adapter->link_speed,
+ &adapter->link_duplex);
+
+ ctrl = er32(CTRL);
+ pr_info("%s NIC Link is Up %d Mbps %s, "
+ "Flow Control: %s\n",
+ netdev->name,
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "Full Duplex" : "Half Duplex",
+ ((ctrl & E1000_CTRL_TFCE) && (ctrl &
+ E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
+ E1000_CTRL_RFCE) ? "RX" : ((ctrl &
+ E1000_CTRL_TFCE) ? "TX" : "None")));
+
+ /* adjust timeout factor according to speed/duplex */
+ adapter->tx_timeout_factor = 1;
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ txb2b = false;
+ adapter->tx_timeout_factor = 16;
+ break;
+ case SPEED_100:
+ txb2b = false;
+ /* maybe add some timeout factor ? */
+ break;
+ }
+
+ /* enable transmits in the hardware */
+ tctl = er32(TCTL);
+ tctl |= E1000_TCTL_EN;
+ ew32(TCTL, tctl);
+
+ netif_carrier_on(netdev);
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ schedule_delayed_work(&adapter->phy_info_task,
+ 2 * HZ);
+ adapter->smartspeed = 0;
+ }
+ } else {
+ if (netif_carrier_ok(netdev)) {
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+ pr_info("%s NIC Link is Down\n",
+ netdev->name);
+ netif_carrier_off(netdev);
+
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ schedule_delayed_work(&adapter->phy_info_task,
+ 2 * HZ);
+ }
+
+ e1000_smartspeed(adapter);
+ }
+
+link_up:
+ e1000_update_stats(adapter);
+
+ hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
+ adapter->tpt_old = adapter->stats.tpt;
+ hw->collision_delta = adapter->stats.colc - adapter->colc_old;
+ adapter->colc_old = adapter->stats.colc;
+
+ adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
+ adapter->gorcl_old = adapter->stats.gorcl;
+ adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
+ adapter->gotcl_old = adapter->stats.gotcl;
+
+ e1000_update_adaptive(hw);
+
+ if (!netif_carrier_ok(netdev)) {
+ if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
+ /* We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context). */
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+ /* exit immediately since reset is imminent */
+ goto unlock;
+ }
+ }
+
+ /* Simple mode for Interrupt Throttle Rate (ITR) */
+ if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
+ /*
+ * Symmetric Tx/Rx gets a reduced ITR=2000;
+ * Total asymmetrical Tx or Rx gets ITR=8000;
+ * everyone else is between 2000-8000.
+ */
+ u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
+ u32 dif = (adapter->gotcl > adapter->gorcl ?
+ adapter->gotcl - adapter->gorcl :
+ adapter->gorcl - adapter->gotcl) / 10000;
+ u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
+
+ ew32(ITR, 1000000000 / (itr * 256));
+ }
+
+ /* Cause software interrupt to ensure rx ring is cleaned */
+ ew32(ICS, E1000_ICS_RXDMT0);
+
+ /* Force detection of hung controller every watchdog period */
+ adapter->detect_tx_hung = true;
+
+ /* Reschedule the task */
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
+
+unlock:
+ mutex_unlock(&adapter->mutex);
+}
+
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
+/**
+ * e1000_update_itr - update the dynamic ITR value based on statistics
+ * @adapter: pointer to adapter
+ * @itr_setting: current adapter->itr
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ * this functionality is controlled by the InterruptThrottleRate module
+ * parameter (see e1000_param.c)
+ **/
+static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
+ u16 itr_setting, int packets, int bytes)
+{
+ unsigned int retval = itr_setting;
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (unlikely(hw->mac_type < e1000_82540))
+ goto update_itr_done;
+
+ if (packets == 0)
+ goto update_itr_done;
+
+ switch (itr_setting) {
+ case lowest_latency:
+ /* jumbo frames get bulk treatment*/
+ if (bytes/packets > 8000)
+ retval = bulk_latency;
+ else if ((packets < 5) && (bytes > 512))
+ retval = low_latency;
+ break;
+ case low_latency: /* 50 usec aka 20000 ints/s */
+ if (bytes > 10000) {
+ /* jumbo frames need bulk latency setting */
+ if (bytes/packets > 8000)
+ retval = bulk_latency;
+ else if ((packets < 10) || ((bytes/packets) > 1200))
+ retval = bulk_latency;
+ else if ((packets > 35))
+ retval = lowest_latency;
+ } else if (bytes/packets > 2000)
+ retval = bulk_latency;
+ else if (packets <= 2 && bytes < 512)
+ retval = lowest_latency;
+ break;
+ case bulk_latency: /* 250 usec aka 4000 ints/s */
+ if (bytes > 25000) {
+ if (packets > 35)
+ retval = low_latency;
+ } else if (bytes < 6000) {
+ retval = low_latency;
+ }
+ break;
+ }
+
+update_itr_done:
+ return retval;
+}
+
+static void e1000_set_itr(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 current_itr;
+ u32 new_itr = adapter->itr;
+
+ if (unlikely(hw->mac_type < e1000_82540))
+ return;
+
+ /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+ if (unlikely(adapter->link_speed != SPEED_1000)) {
+ current_itr = 0;
+ new_itr = 4000;
+ goto set_itr_now;
+ }
+
+ adapter->tx_itr = e1000_update_itr(adapter,
+ adapter->tx_itr,
+ adapter->total_tx_packets,
+ adapter->total_tx_bytes);
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
+ adapter->tx_itr = low_latency;
+
+ adapter->rx_itr = e1000_update_itr(adapter,
+ adapter->rx_itr,
+ adapter->total_rx_packets,
+ adapter->total_rx_bytes);
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
+ adapter->rx_itr = low_latency;
+
+ current_itr = max(adapter->rx_itr, adapter->tx_itr);
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = 70000;
+ break;
+ case low_latency:
+ new_itr = 20000; /* aka hwitr = ~200 */
+ break;
+ case bulk_latency:
+ new_itr = 4000;
+ break;
+ default:
+ break;
+ }
+
+set_itr_now:
+ if (new_itr != adapter->itr) {
+ /* this attempts to bias the interrupt rate towards Bulk
+ * by adding intermediate steps when interrupt rate is
+ * increasing */
+ new_itr = new_itr > adapter->itr ?
+ min(adapter->itr + (new_itr >> 2), new_itr) :
+ new_itr;
+ adapter->itr = new_itr;
+ ew32(ITR, 1000000000 / (new_itr * 256));
+ }
+}
+
+#define E1000_TX_FLAGS_CSUM 0x00000001
+#define E1000_TX_FLAGS_VLAN 0x00000002
+#define E1000_TX_FLAGS_TSO 0x00000004
+#define E1000_TX_FLAGS_IPV4 0x00000008
+#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
+#define E1000_TX_FLAGS_VLAN_SHIFT 16
+
+static int e1000_tso(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+{
+ struct e1000_context_desc *context_desc;
+ struct e1000_buffer *buffer_info;
+ unsigned int i;
+ u32 cmd_length = 0;
+ u16 ipcse = 0, tucse, mss;
+ u8 ipcss, ipcso, tucss, tucso, hdr_len;
+ int err;
+
+ if (skb_is_gso(skb)) {
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ mss = skb_shinfo(skb)->gso_size;
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ cmd_length = E1000_TXD_CMD_IP;
+ ipcse = skb_transport_offset(skb) - 1;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ ipcse = 0;
+ }
+ ipcss = skb_network_offset(skb);
+ ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
+ tucss = skb_transport_offset(skb);
+ tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
+ tucse = 0;
+
+ cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
+ E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
+
+ i = tx_ring->next_to_use;
+ context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+
+ context_desc->lower_setup.ip_fields.ipcss = ipcss;
+ context_desc->lower_setup.ip_fields.ipcso = ipcso;
+ context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
+ context_desc->upper_setup.tcp_fields.tucss = tucss;
+ context_desc->upper_setup.tcp_fields.tucso = tucso;
+ context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
+ context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
+ context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
+ context_desc->cmd_and_length = cpu_to_le32(cmd_length);
+
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+
+ if (++i == tx_ring->count) i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
+ }
+ return false;
+}
+
+static bool e1000_tx_csum(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+{
+ struct e1000_context_desc *context_desc;
+ struct e1000_buffer *buffer_info;
+ unsigned int i;
+ u8 css;
+ u32 cmd_len = E1000_TXD_CMD_DEXT;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return false;
+
+ switch (skb->protocol) {
+ case cpu_to_be16(ETH_P_IP):
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ cmd_len |= E1000_TXD_CMD_TCP;
+ break;
+ case cpu_to_be16(ETH_P_IPV6):
+ /* XXX not handling all IPV6 headers */
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ cmd_len |= E1000_TXD_CMD_TCP;
+ break;
+ default:
+ if (unlikely(net_ratelimit()))
+ e_warn(drv, "checksum_partial proto=%x!\n",
+ skb->protocol);
+ break;
+ }
+
+ css = skb_checksum_start_offset(skb);
+
+ i = tx_ring->next_to_use;
+ buffer_info = &tx_ring->buffer_info[i];
+ context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+
+ context_desc->lower_setup.ip_config = 0;
+ context_desc->upper_setup.tcp_fields.tucss = css;
+ context_desc->upper_setup.tcp_fields.tucso =
+ css + skb->csum_offset;
+ context_desc->upper_setup.tcp_fields.tucse = 0;
+ context_desc->tcp_seg_setup.data = 0;
+ context_desc->cmd_and_length = cpu_to_le32(cmd_len);
+
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+
+ if (unlikely(++i == tx_ring->count)) i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
+}
+
+#define E1000_MAX_TXD_PWR 12
+#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
+
+static int e1000_tx_map(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring,
+ struct sk_buff *skb, unsigned int first,
+ unsigned int max_per_txd, unsigned int nr_frags,
+ unsigned int mss)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_buffer *buffer_info;
+ unsigned int len = skb_headlen(skb);
+ unsigned int offset = 0, size, count = 0, i;
+ unsigned int f, bytecount, segs;
+
+ i = tx_ring->next_to_use;
+
+ while (len) {
+ buffer_info = &tx_ring->buffer_info[i];
+ size = min(len, max_per_txd);
+ /* Workaround for Controller erratum --
+ * descriptor for non-tso packet in a linear SKB that follows a
+ * tso gets written back prematurely before the data is fully
+ * DMA'd to the controller */
+ if (!skb->data_len && tx_ring->last_tx_tso &&
+ !skb_is_gso(skb)) {
+ tx_ring->last_tx_tso = 0;
+ size -= 4;
+ }
+
+ /* Workaround for premature desc write-backs
+ * in TSO mode. Append 4-byte sentinel desc */
+ if (unlikely(mss && !nr_frags && size == len && size > 8))
+ size -= 4;
+ /* work-around for errata 10 and it applies
+ * to all controllers in PCI-X mode
+ * The fix is to make sure that the first descriptor of a
+ * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
+ */
+ if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
+ (size > 2015) && count == 0))
+ size = 2015;
+
+ /* Workaround for potential 82544 hang in PCI-X. Avoid
+ * terminating buffers within evenly-aligned dwords. */
+ if (unlikely(adapter->pcix_82544 &&
+ !((unsigned long)(skb->data + offset + size - 1) & 4) &&
+ size > 4))
+ size -= 4;
+
+ buffer_info->length = size;
+ /* set time_stamp *before* dma to help avoid a possible race */
+ buffer_info->time_stamp = jiffies;
+ buffer_info->mapped_as_page = false;
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data + offset,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
+ goto dma_error;
+ buffer_info->next_to_watch = i;
+
+ len -= size;
+ offset += size;
+ count++;
+ if (len) {
+ i++;
+ if (unlikely(i == tx_ring->count))
+ i = 0;
+ }
+ }
+
+ for (f = 0; f < nr_frags; f++) {
+ struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ len = frag->size;
+ offset = 0;
+
+ while (len) {
+ unsigned long bufend;
+ i++;
+ if (unlikely(i == tx_ring->count))
+ i = 0;
+
+ buffer_info = &tx_ring->buffer_info[i];
+ size = min(len, max_per_txd);
+ /* Workaround for premature desc write-backs
+ * in TSO mode. Append 4-byte sentinel desc */
+ if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
+ size -= 4;
+ /* Workaround for potential 82544 hang in PCI-X.
+ * Avoid terminating buffers within evenly-aligned
+ * dwords. */
+ bufend = (unsigned long)
+ page_to_phys(skb_frag_page(frag));
+ bufend += offset + size - 1;
+ if (unlikely(adapter->pcix_82544 &&
+ !(bufend & 4) &&
+ size > 4))
+ size -= 4;
+
+ buffer_info->length = size;
+ buffer_info->time_stamp = jiffies;
+ buffer_info->mapped_as_page = true;
+ buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
+ offset, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
+ goto dma_error;
+ buffer_info->next_to_watch = i;
+
+ len -= size;
+ offset += size;
+ count++;
+ }
+ }
+
+ segs = skb_shinfo(skb)->gso_segs ?: 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
+
+ tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[i].segs = segs;
+ tx_ring->buffer_info[i].bytecount = bytecount;
+ tx_ring->buffer_info[first].next_to_watch = i;
+
+ return count;
+
+dma_error:
+ dev_err(&pdev->dev, "TX DMA map failed\n");
+ buffer_info->dma = 0;
+ if (count)
+ count--;
+
+ while (count--) {
+ if (i==0)
+ i += tx_ring->count;
+ i--;
+ buffer_info = &tx_ring->buffer_info[i];
+ e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+ }
+
+ return 0;
+}
+
+static void e1000_tx_queue(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring, int tx_flags,
+ int count)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_tx_desc *tx_desc = NULL;
+ struct e1000_buffer *buffer_info;
+ u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
+ unsigned int i;
+
+ if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
+ txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
+ E1000_TXD_CMD_TSE;
+ txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+
+ if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
+ txd_upper |= E1000_TXD_POPTS_IXSM << 8;
+ }
+
+ if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
+ txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+ txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+ }
+
+ if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
+ txd_lower |= E1000_TXD_CMD_VLE;
+ txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
+ }
+
+ i = tx_ring->next_to_use;
+
+ while (count--) {
+ buffer_info = &tx_ring->buffer_info[i];
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+ tx_desc->lower.data =
+ cpu_to_le32(txd_lower | buffer_info->length);
+ tx_desc->upper.data = cpu_to_le32(txd_upper);
+ if (unlikely(++i == tx_ring->count)) i = 0;
+ }
+
+ tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+
+ tx_ring->next_to_use = i;
+ writel(i, hw->hw_addr + tx_ring->tdt);
+ /* we need this if more than one processor can write to our tail
+ * at a time, it syncronizes IO on IA64/Altix systems */
+ mmiowb();
+}
+
+/**
+ * 82547 workaround to avoid controller hang in half-duplex environment.
+ * The workaround is to avoid queuing a large packet that would span
+ * the internal Tx FIFO ring boundary by notifying the stack to resend
+ * the packet at a later time. This gives the Tx FIFO an opportunity to
+ * flush all packets. When that occurs, we reset the Tx FIFO pointers
+ * to the beginning of the Tx FIFO.
+ **/
+
+#define E1000_FIFO_HDR 0x10
+#define E1000_82547_PAD_LEN 0x3E0
+
+static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+ struct sk_buff *skb)
+{
+ u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
+ u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
+
+ skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
+
+ if (adapter->link_duplex != HALF_DUPLEX)
+ goto no_fifo_stall_required;
+
+ if (atomic_read(&adapter->tx_fifo_stall))
+ return 1;
+
+ if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
+ atomic_set(&adapter->tx_fifo_stall, 1);
+ return 1;
+ }
+
+no_fifo_stall_required:
+ adapter->tx_fifo_head += skb_fifo_len;
+ if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
+ adapter->tx_fifo_head -= adapter->tx_fifo_size;
+ return 0;
+}
+
+static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+
+ netif_stop_queue(netdev);
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it. */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available. */
+ if (likely(E1000_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! */
+ netif_start_queue(netdev);
+ ++adapter->restart_queue;
+ return 0;
+}
+
+static int e1000_maybe_stop_tx(struct net_device *netdev,
+ struct e1000_tx_ring *tx_ring, int size)
+{
+ if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __e1000_maybe_stop_tx(netdev, size);
+}
+
+#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
+static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_tx_ring *tx_ring;
+ unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
+ unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
+ unsigned int tx_flags = 0;
+ unsigned int len = skb_headlen(skb);
+ unsigned int nr_frags;
+ unsigned int mss;
+ int count = 0;
+ int tso;
+ unsigned int f;
+
+ /* This goes back to the question of how to logically map a tx queue
+ * to a flow. Right now, performance is impacted slightly negatively
+ * if using multiple tx queues. If the stack breaks away from a
+ * single qdisc implementation, we can look at this again. */
+ tx_ring = adapter->tx_ring;
+
+ if (unlikely(skb->len <= 0)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ mss = skb_shinfo(skb)->gso_size;
+ /* The controller does a simple calculation to
+ * make sure there is enough room in the FIFO before
+ * initiating the DMA for each buffer. The calc is:
+ * 4 = ceil(buffer len/mss). To make sure we don't
+ * overrun the FIFO, adjust the max buffer len if mss
+ * drops. */
+ if (mss) {
+ u8 hdr_len;
+ max_per_txd = min(mss << 2, max_per_txd);
+ max_txd_pwr = fls(max_per_txd) - 1;
+
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ if (skb->data_len && hdr_len == len) {
+ switch (hw->mac_type) {
+ unsigned int pull_size;
+ case e1000_82544:
+ /* Make sure we have room to chop off 4 bytes,
+ * and that the end alignment will work out to
+ * this hardware's requirements
+ * NOTE: this is a TSO only workaround
+ * if end byte alignment not correct move us
+ * into the next dword */
+ if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
+ break;
+ /* fall through */
+ pull_size = min((unsigned int)4, skb->data_len);
+ if (!__pskb_pull_tail(skb, pull_size)) {
+ e_err(drv, "__pskb_pull_tail "
+ "failed.\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ len = skb_headlen(skb);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+ }
+ }
+
+ /* reserve a descriptor for the offload context */
+ if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
+ count++;
+ count++;
+
+ /* Controller Erratum workaround */
+ if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
+ count++;
+
+ count += TXD_USE_COUNT(len, max_txd_pwr);
+
+ if (adapter->pcix_82544)
+ count++;
+
+ /* work-around for errata 10 and it applies to all controllers
+ * in PCI-X mode, so add one more descriptor to the count
+ */
+ if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
+ (len > 2015)))
+ count++;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ for (f = 0; f < nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
+ max_txd_pwr);
+ if (adapter->pcix_82544)
+ count += nr_frags;
+
+ /* need: count + 2 desc gap to keep tail from touching
+ * head, otherwise try next time */
+ if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
+ return NETDEV_TX_BUSY;
+
+ if (unlikely((hw->mac_type == e1000_82547) &&
+ (e1000_82547_fifo_workaround(adapter, skb)))) {
+ netif_stop_queue(netdev);
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ schedule_delayed_work(&adapter->fifo_stall_task, 1);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (vlan_tx_tag_present(skb)) {
+ tx_flags |= E1000_TX_FLAGS_VLAN;
+ tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
+ }
+
+ first = tx_ring->next_to_use;
+
+ tso = e1000_tso(adapter, tx_ring, skb);
+ if (tso < 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (likely(tso)) {
+ if (likely(hw->mac_type != e1000_82544))
+ tx_ring->last_tx_tso = 1;
+ tx_flags |= E1000_TX_FLAGS_TSO;
+ } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
+ tx_flags |= E1000_TX_FLAGS_CSUM;
+
+ if (likely(skb->protocol == htons(ETH_P_IP)))
+ tx_flags |= E1000_TX_FLAGS_IPV4;
+
+ count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
+ nr_frags, mss);
+
+ if (count) {
+ e1000_tx_queue(adapter, tx_ring, tx_flags, count);
+ /* Make sure there is space in the ring for the next send. */
+ e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
+
+ } else {
+ dev_kfree_skb_any(skb);
+ tx_ring->buffer_info[first].time_stamp = 0;
+ tx_ring->next_to_use = first;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * e1000_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+
+static void e1000_tx_timeout(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+}
+
+static void e1000_reset_task(struct work_struct *work)
+{
+ struct e1000_adapter *adapter =
+ container_of(work, struct e1000_adapter, reset_task);
+
+ if (test_bit(__E1000_DOWN, &adapter->flags))
+ return;
+ e1000_reinit_safe(adapter);
+}
+
+/**
+ * e1000_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the watchdog.
+ **/
+
+static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
+{
+ /* only return the current stats */
+ return &netdev->stats;
+}
+
+/**
+ * e1000_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+
+ if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
+ (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+ e_err(probe, "Invalid MTU setting\n");
+ return -EINVAL;
+ }
+
+ /* Adapter-specific max frame size limits. */
+ switch (hw->mac_type) {
+ case e1000_undefined ... e1000_82542_rev2_1:
+ if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
+ e_err(probe, "Jumbo Frames not supported.\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
+ break;
+ }
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+ /* e1000_down has a dependency on max_frame_size */
+ hw->max_frame_size = max_frame;
+ if (netif_running(netdev))
+ e1000_down(adapter);
+
+ /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ * means we reserve 2 more, this pushes us to allocate from the next
+ * larger slab size.
+ * i.e. RXBUFFER_2048 --> size-4096 slab
+ * however with the new *_jumbo_rx* routines, jumbo receives will use
+ * fragmented skbs */
+
+ if (max_frame <= E1000_RXBUFFER_2048)
+ adapter->rx_buffer_len = E1000_RXBUFFER_2048;
+ else
+#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
+ adapter->rx_buffer_len = E1000_RXBUFFER_16384;
+#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
+ adapter->rx_buffer_len = PAGE_SIZE;
+#endif
+
+ /* adjust allocation if LPE protects us, and we aren't using SBP */
+ if (!hw->tbi_compatibility_on &&
+ ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
+ (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
+ adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+
+ pr_info("%s changing MTU from %d to %d\n",
+ netdev->name, netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+
+ if (netif_running(netdev))
+ e1000_up(adapter);
+ else
+ e1000_reset(adapter);
+
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+
+ return 0;
+}
+
+/**
+ * e1000_update_stats - Update the board statistics counters
+ * @adapter: board private structure
+ **/
+
+void e1000_update_stats(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long flags;
+ u16 phy_tmp;
+
+#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
+
+ /*
+ * Prevent stats update while adapter is being reset, or if the pci
+ * connection is down.
+ */
+ if (adapter->link_speed == 0)
+ return;
+ if (pci_channel_offline(pdev))
+ return;
+
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+
+ /* these counters are modified from e1000_tbi_adjust_stats,
+ * called from the interrupt context, so they must only
+ * be written while holding adapter->stats_lock
+ */
+
+ adapter->stats.crcerrs += er32(CRCERRS);
+ adapter->stats.gprc += er32(GPRC);
+ adapter->stats.gorcl += er32(GORCL);
+ adapter->stats.gorch += er32(GORCH);
+ adapter->stats.bprc += er32(BPRC);
+ adapter->stats.mprc += er32(MPRC);
+ adapter->stats.roc += er32(ROC);
+
+ adapter->stats.prc64 += er32(PRC64);
+ adapter->stats.prc127 += er32(PRC127);
+ adapter->stats.prc255 += er32(PRC255);
+ adapter->stats.prc511 += er32(PRC511);
+ adapter->stats.prc1023 += er32(PRC1023);
+ adapter->stats.prc1522 += er32(PRC1522);
+
+ adapter->stats.symerrs += er32(SYMERRS);
+ adapter->stats.mpc += er32(MPC);
+ adapter->stats.scc += er32(SCC);
+ adapter->stats.ecol += er32(ECOL);
+ adapter->stats.mcc += er32(MCC);
+ adapter->stats.latecol += er32(LATECOL);
+ adapter->stats.dc += er32(DC);
+ adapter->stats.sec += er32(SEC);
+ adapter->stats.rlec += er32(RLEC);
+ adapter->stats.xonrxc += er32(XONRXC);
+ adapter->stats.xontxc += er32(XONTXC);
+ adapter->stats.xoffrxc += er32(XOFFRXC);
+ adapter->stats.xofftxc += er32(XOFFTXC);
+ adapter->stats.fcruc += er32(FCRUC);
+ adapter->stats.gptc += er32(GPTC);
+ adapter->stats.gotcl += er32(GOTCL);
+ adapter->stats.gotch += er32(GOTCH);
+ adapter->stats.rnbc += er32(RNBC);
+ adapter->stats.ruc += er32(RUC);
+ adapter->stats.rfc += er32(RFC);
+ adapter->stats.rjc += er32(RJC);
+ adapter->stats.torl += er32(TORL);
+ adapter->stats.torh += er32(TORH);
+ adapter->stats.totl += er32(TOTL);
+ adapter->stats.toth += er32(TOTH);
+ adapter->stats.tpr += er32(TPR);
+
+ adapter->stats.ptc64 += er32(PTC64);
+ adapter->stats.ptc127 += er32(PTC127);
+ adapter->stats.ptc255 += er32(PTC255);
+ adapter->stats.ptc511 += er32(PTC511);
+ adapter->stats.ptc1023 += er32(PTC1023);
+ adapter->stats.ptc1522 += er32(PTC1522);
+
+ adapter->stats.mptc += er32(MPTC);
+ adapter->stats.bptc += er32(BPTC);
+
+ /* used for adaptive IFS */
+
+ hw->tx_packet_delta = er32(TPT);
+ adapter->stats.tpt += hw->tx_packet_delta;
+ hw->collision_delta = er32(COLC);
+ adapter->stats.colc += hw->collision_delta;
+
+ if (hw->mac_type >= e1000_82543) {
+ adapter->stats.algnerrc += er32(ALGNERRC);
+ adapter->stats.rxerrc += er32(RXERRC);
+ adapter->stats.tncrs += er32(TNCRS);
+ adapter->stats.cexterr += er32(CEXTERR);
+ adapter->stats.tsctc += er32(TSCTC);
+ adapter->stats.tsctfc += er32(TSCTFC);
+ }
+
+ /* Fill out the OS statistics structure */
+ netdev->stats.multicast = adapter->stats.mprc;
+ netdev->stats.collisions = adapter->stats.colc;
+
+ /* Rx Errors */
+
+ /* RLEC on some newer hardware can be incorrect so build
+ * our own version based on RUC and ROC */
+ netdev->stats.rx_errors = adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc +
+ adapter->stats.cexterr;
+ adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
+ netdev->stats.rx_length_errors = adapter->stats.rlerrc;
+ netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
+ netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
+ netdev->stats.rx_missed_errors = adapter->stats.mpc;
+
+ /* Tx Errors */
+ adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
+ netdev->stats.tx_errors = adapter->stats.txerrc;
+ netdev->stats.tx_aborted_errors = adapter->stats.ecol;
+ netdev->stats.tx_window_errors = adapter->stats.latecol;
+ netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
+ if (hw->bad_tx_carr_stats_fd &&
+ adapter->link_duplex == FULL_DUPLEX) {
+ netdev->stats.tx_carrier_errors = 0;
+ adapter->stats.tncrs = 0;
+ }
+
+ /* Tx Dropped needs to be maintained elsewhere */
+
+ /* Phy Stats */
+ if (hw->media_type == e1000_media_type_copper) {
+ if ((adapter->link_speed == SPEED_1000) &&
+ (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
+ phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
+ adapter->phy_stats.idle_errors += phy_tmp;
+ }
+
+ if ((hw->mac_type <= e1000_82546) &&
+ (hw->phy_type == e1000_phy_m88) &&
+ !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
+ adapter->phy_stats.receive_errors += phy_tmp;
+ }
+
+ /* Management Stats */
+ if (hw->has_smbus) {
+ adapter->stats.mgptc += er32(MGTPTC);
+ adapter->stats.mgprc += er32(MGTPRC);
+ adapter->stats.mgpdc += er32(MGTPDC);
+ }
+
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+}
+
+/**
+ * e1000_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+
+static irqreturn_t e1000_intr(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 icr = er32(ICR);
+
+ if (unlikely((!icr)))
+ return IRQ_NONE; /* Not our interrupt */
+
+ /*
+ * we might have caused the interrupt, but the above
+ * read cleared it, and just in case the driver is
+ * down there is nothing to do so return handled
+ */
+ if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
+ return IRQ_HANDLED;
+
+ if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
+ hw->get_link_status = 1;
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ schedule_delayed_work(&adapter->watchdog_task, 1);
+ }
+
+ /* disable interrupts, without the synchronize_irq bit */
+ ew32(IMC, ~0);
+ E1000_WRITE_FLUSH();
+
+ if (likely(napi_schedule_prep(&adapter->napi))) {
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
+ __napi_schedule(&adapter->napi);
+ } else {
+ /* this really should not happen! if it does it is basically a
+ * bug, but not a hard error, so enable ints and continue */
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_enable(adapter);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * e1000_clean - NAPI Rx polling callback
+ * @adapter: board private structure
+ **/
+static int e1000_clean(struct napi_struct *napi, int budget)
+{
+ struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
+ int tx_clean_complete = 0, work_done = 0;
+
+ tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
+
+ adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
+
+ if (!tx_clean_complete)
+ work_done = budget;
+
+ /* If budget not fully consumed, exit the polling mode */
+ if (work_done < budget) {
+ if (likely(adapter->itr_setting & 3))
+ e1000_set_itr(adapter);
+ napi_complete(napi);
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_enable(adapter);
+ }
+
+ return work_done;
+}
+
+/**
+ * e1000_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ **/
+static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
+ struct e1000_tx_ring *tx_ring)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_tx_desc *tx_desc, *eop_desc;
+ struct e1000_buffer *buffer_info;
+ unsigned int i, eop;
+ unsigned int count = 0;
+ unsigned int total_tx_bytes=0, total_tx_packets=0;
+
+ i = tx_ring->next_to_clean;
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = E1000_TX_DESC(*tx_ring, eop);
+
+ while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+ (count < tx_ring->count)) {
+ bool cleaned = false;
+ rmb(); /* read buffer_info after eop_desc */
+ for ( ; !cleaned; count++) {
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+ cleaned = (i == eop);
+
+ if (cleaned) {
+ total_tx_packets += buffer_info->segs;
+ total_tx_bytes += buffer_info->bytecount;
+ }
+ e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+ tx_desc->upper.data = 0;
+
+ if (unlikely(++i == tx_ring->count)) i = 0;
+ }
+
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = E1000_TX_DESC(*tx_ring, eop);
+ }
+
+ tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD 32
+ if (unlikely(count && netif_carrier_ok(netdev) &&
+ E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+
+ if (netif_queue_stopped(netdev) &&
+ !(test_bit(__E1000_DOWN, &adapter->flags))) {
+ netif_wake_queue(netdev);
+ ++adapter->restart_queue;
+ }
+ }
+
+ if (adapter->detect_tx_hung) {
+ /* Detect a transmit hang in hardware, this serializes the
+ * check with the clearing of time_stamp and movement of i */
+ adapter->detect_tx_hung = false;
+ if (tx_ring->buffer_info[eop].time_stamp &&
+ time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
+ (adapter->tx_timeout_factor * HZ)) &&
+ !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+
+ /* detected Tx unit hang */
+ e_err(drv, "Detected Tx Unit Hang\n"
+ " Tx Queue <%lu>\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%x>\n"
+ " jiffies <%lx>\n"
+ " next_to_watch.status <%x>\n",
+ (unsigned long)((tx_ring - adapter->tx_ring) /
+ sizeof(struct e1000_tx_ring)),
+ readl(hw->hw_addr + tx_ring->tdh),
+ readl(hw->hw_addr + tx_ring->tdt),
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_ring->buffer_info[eop].time_stamp,
+ eop,
+ jiffies,
+ eop_desc->upper.fields.status);
+ netif_stop_queue(netdev);
+ }
+ }
+ adapter->total_tx_bytes += total_tx_bytes;
+ adapter->total_tx_packets += total_tx_packets;
+ netdev->stats.tx_bytes += total_tx_bytes;
+ netdev->stats.tx_packets += total_tx_packets;
+ return count < tx_ring->count;
+}
+
+/**
+ * e1000_rx_checksum - Receive Checksum Offload for 82543
+ * @adapter: board private structure
+ * @status_err: receive descriptor status and error fields
+ * @csum: receive descriptor csum field
+ * @sk_buff: socket buffer with received data
+ **/
+
+static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
+ u32 csum, struct sk_buff *skb)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 status = (u16)status_err;
+ u8 errors = (u8)(status_err >> 24);
+
+ skb_checksum_none_assert(skb);
+
+ /* 82543 or newer only */
+ if (unlikely(hw->mac_type < e1000_82543)) return;
+ /* Ignore Checksum bit is set */
+ if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
+ /* TCP/UDP checksum error bit is set */
+ if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
+ /* let the stack verify checksum errors */
+ adapter->hw_csum_err++;
+ return;
+ }
+ /* TCP/UDP Checksum has not been calculated */
+ if (!(status & E1000_RXD_STAT_TCPCS))
+ return;
+
+ /* It must be a TCP or UDP packet with a valid checksum */
+ if (likely(status & E1000_RXD_STAT_TCPCS)) {
+ /* TCP checksum is good */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ adapter->hw_csum_good++;
+}
+
+/**
+ * e1000_consume_page - helper function
+ **/
+static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
+ u16 length)
+{
+ bi->page = NULL;
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+}
+
+/**
+ * e1000_receive_skb - helper function to handle rx indications
+ * @adapter: board private structure
+ * @status: descriptor status field as written by hardware
+ * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
+ * @skb: pointer to sk_buff to be indicated to stack
+ */
+static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
+ __le16 vlan, struct sk_buff *skb)
+{
+ skb->protocol = eth_type_trans(skb, adapter->netdev);
+
+ if (status & E1000_RXD_STAT_VP) {
+ u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
+
+ __vlan_hwaccel_put_tag(skb, vid);
+ }
+ napi_gro_receive(&adapter->napi, skb);
+}
+
+/**
+ * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ * @rx_ring: ring to clean
+ * @work_done: amount of napi work completed this call
+ * @work_to_do: max amount of work allowed for this call to do
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ */
+static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_rx_desc *rx_desc, *next_rxd;
+ struct e1000_buffer *buffer_info, *next_buffer;
+ unsigned long irq_flags;
+ u32 length;
+ unsigned int i;
+ int cleaned_count = 0;
+ bool cleaned = false;
+ unsigned int total_rx_bytes=0, total_rx_packets=0;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (rx_desc->status & E1000_RXD_STAT_DD) {
+ struct sk_buff *skb;
+ u8 status;
+
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+ rmb(); /* read descriptor and rx_buffer_info after status DD */
+
+ status = rx_desc->status;
+ skb = buffer_info->skb;
+ buffer_info->skb = NULL;
+
+ if (++i == rx_ring->count) i = 0;
+ next_rxd = E1000_RX_DESC(*rx_ring, i);
+ prefetch(next_rxd);
+
+ next_buffer = &rx_ring->buffer_info[i];
+
+ cleaned = true;
+ cleaned_count++;
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+
+ length = le16_to_cpu(rx_desc->length);
+
+ /* errors is only valid for DD + EOP descriptors */
+ if (unlikely((status & E1000_RXD_STAT_EOP) &&
+ (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
+ u8 last_byte = *(skb->data + length - 1);
+ if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
+ last_byte)) {
+ spin_lock_irqsave(&adapter->stats_lock,
+ irq_flags);
+ e1000_tbi_adjust_stats(hw, &adapter->stats,
+ length, skb->data);
+ spin_unlock_irqrestore(&adapter->stats_lock,
+ irq_flags);
+ length--;
+ } else {
+ /* recycle both page and skb */
+ buffer_info->skb = skb;
+ /* an error means any chain goes out the window
+ * too */
+ if (rx_ring->rx_skb_top)
+ dev_kfree_skb(rx_ring->rx_skb_top);
+ rx_ring->rx_skb_top = NULL;
+ goto next_desc;
+ }
+ }
+
+#define rxtop rx_ring->rx_skb_top
+ if (!(status & E1000_RXD_STAT_EOP)) {
+ /* this descriptor is only the beginning (or middle) */
+ if (!rxtop) {
+ /* this is the beginning of a chain */
+ rxtop = skb;
+ skb_fill_page_desc(rxtop, 0, buffer_info->page,
+ 0, length);
+ } else {
+ /* this is the middle of a chain */
+ skb_fill_page_desc(rxtop,
+ skb_shinfo(rxtop)->nr_frags,
+ buffer_info->page, 0, length);
+ /* re-use the skb, only consumed the page */
+ buffer_info->skb = skb;
+ }
+ e1000_consume_page(buffer_info, rxtop, length);
+ goto next_desc;
+ } else {
+ if (rxtop) {
+ /* end of the chain */
+ skb_fill_page_desc(rxtop,
+ skb_shinfo(rxtop)->nr_frags,
+ buffer_info->page, 0, length);
+ /* re-use the current skb, we only consumed the
+ * page */
+ buffer_info->skb = skb;
+ skb = rxtop;
+ rxtop = NULL;
+ e1000_consume_page(buffer_info, skb, length);
+ } else {
+ /* no chain, got EOP, this buf is the packet
+ * copybreak to save the put_page/alloc_page */
+ if (length <= copybreak &&
+ skb_tailroom(skb) >= length) {
+ u8 *vaddr;
+ vaddr = kmap_atomic(buffer_info->page,
+ KM_SKB_DATA_SOFTIRQ);
+ memcpy(skb_tail_pointer(skb), vaddr, length);
+ kunmap_atomic(vaddr,
+ KM_SKB_DATA_SOFTIRQ);
+ /* re-use the page, so don't erase
+ * buffer_info->page */
+ skb_put(skb, length);
+ } else {
+ skb_fill_page_desc(skb, 0,
+ buffer_info->page, 0,
+ length);
+ e1000_consume_page(buffer_info, skb,
+ length);
+ }
+ }
+ }
+
+ /* Receive Checksum Offload XXX recompute due to CRC strip? */
+ e1000_rx_checksum(adapter,
+ (u32)(status) |
+ ((u32)(rx_desc->errors) << 24),
+ le16_to_cpu(rx_desc->csum), skb);
+
+ pskb_trim(skb, skb->len - 4);
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ /* eth type trans needs skb->data to point to something */
+ if (!pskb_may_pull(skb, ETH_HLEN)) {
+ e_err(drv, "pskb_may_pull failed.\n");
+ dev_kfree_skb(skb);
+ goto next_desc;
+ }
+
+ e1000_receive_skb(adapter, status, rx_desc->special, skb);
+
+next_desc:
+ rx_desc->status = 0;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+ adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+ }
+ rx_ring->next_to_clean = i;
+
+ cleaned_count = E1000_DESC_UNUSED(rx_ring);
+ if (cleaned_count)
+ adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+
+ adapter->total_rx_packets += total_rx_packets;
+ adapter->total_rx_bytes += total_rx_bytes;
+ netdev->stats.rx_bytes += total_rx_bytes;
+ netdev->stats.rx_packets += total_rx_packets;
+ return cleaned;
+}
+
+/*
+ * this should improve performance for small packets with large amounts
+ * of reassembly being done in the stack
+ */
+static void e1000_check_copybreak(struct net_device *netdev,
+ struct e1000_buffer *buffer_info,
+ u32 length, struct sk_buff **skb)
+{
+ struct sk_buff *new_skb;
+
+ if (length > copybreak)
+ return;
+
+ new_skb = netdev_alloc_skb_ip_align(netdev, length);
+ if (!new_skb)
+ return;
+
+ skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
+ (*skb)->data - NET_IP_ALIGN,
+ length + NET_IP_ALIGN);
+ /* save the skb in buffer_info as good */
+ buffer_info->skb = *skb;
+ *skb = new_skb;
+}
+
+/**
+ * e1000_clean_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ * @rx_ring: ring to clean
+ * @work_done: amount of napi work completed this call
+ * @work_to_do: max amount of work allowed for this call to do
+ */
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_rx_desc *rx_desc, *next_rxd;
+ struct e1000_buffer *buffer_info, *next_buffer;
+ unsigned long flags;
+ u32 length;
+ unsigned int i;
+ int cleaned_count = 0;
+ bool cleaned = false;
+ unsigned int total_rx_bytes=0, total_rx_packets=0;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (rx_desc->status & E1000_RXD_STAT_DD) {
+ struct sk_buff *skb;
+ u8 status;
+
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+ rmb(); /* read descriptor and rx_buffer_info after status DD */
+
+ status = rx_desc->status;
+ skb = buffer_info->skb;
+ buffer_info->skb = NULL;
+
+ prefetch(skb->data - NET_IP_ALIGN);
+
+ if (++i == rx_ring->count) i = 0;
+ next_rxd = E1000_RX_DESC(*rx_ring, i);
+ prefetch(next_rxd);
+
+ next_buffer = &rx_ring->buffer_info[i];
+
+ cleaned = true;
+ cleaned_count++;
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+
+ length = le16_to_cpu(rx_desc->length);
+ /* !EOP means multiple descriptors were used to store a single
+ * packet, if thats the case we need to toss it. In fact, we
+ * to toss every packet with the EOP bit clear and the next
+ * frame that _does_ have the EOP bit set, as it is by
+ * definition only a frame fragment
+ */
+ if (unlikely(!(status & E1000_RXD_STAT_EOP)))
+ adapter->discarding = true;
+
+ if (adapter->discarding) {
+ /* All receives must fit into a single buffer */
+ e_dbg("Receive packet consumed multiple buffers\n");
+ /* recycle */
+ buffer_info->skb = skb;
+ if (status & E1000_RXD_STAT_EOP)
+ adapter->discarding = false;
+ goto next_desc;
+ }
+
+ if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
+ u8 last_byte = *(skb->data + length - 1);
+ if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
+ last_byte)) {
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+ e1000_tbi_adjust_stats(hw, &adapter->stats,
+ length, skb->data);
+ spin_unlock_irqrestore(&adapter->stats_lock,
+ flags);
+ length--;
+ } else {
+ /* recycle */
+ buffer_info->skb = skb;
+ goto next_desc;
+ }
+ }
+
+ /* adjust length to remove Ethernet CRC, this must be
+ * done after the TBI_ACCEPT workaround above */
+ length -= 4;
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += length;
+ total_rx_packets++;
+
+ e1000_check_copybreak(netdev, buffer_info, length, &skb);
+
+ skb_put(skb, length);
+
+ /* Receive Checksum Offload */
+ e1000_rx_checksum(adapter,
+ (u32)(status) |
+ ((u32)(rx_desc->errors) << 24),
+ le16_to_cpu(rx_desc->csum), skb);
+
+ e1000_receive_skb(adapter, status, rx_desc->special, skb);
+
+next_desc:
+ rx_desc->status = 0;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+ adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+ }
+ rx_ring->next_to_clean = i;
+
+ cleaned_count = E1000_DESC_UNUSED(rx_ring);
+ if (cleaned_count)
+ adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+
+ adapter->total_rx_packets += total_rx_packets;
+ adapter->total_rx_bytes += total_rx_bytes;
+ netdev->stats.rx_bytes += total_rx_bytes;
+ netdev->stats.rx_packets += total_rx_packets;
+ return cleaned;
+}
+
+/**
+ * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
+ * @adapter: address of board private structure
+ * @rx_ring: pointer to receive ring structure
+ * @cleaned_count: number of buffers to allocate this pass
+ **/
+
+static void
+e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring, int cleaned_count)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_rx_desc *rx_desc;
+ struct e1000_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
+
+ i = rx_ring->next_to_use;
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (cleaned_count--) {
+ skb = buffer_info->skb;
+ if (skb) {
+ skb_trim(skb, 0);
+ goto check_page;
+ }
+
+ skb = netdev_alloc_skb_ip_align(netdev, bufsz);
+ if (unlikely(!skb)) {
+ /* Better luck next round */
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+
+ /* Fix for errata 23, can't cross 64kB boundary */
+ if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+ struct sk_buff *oldskb = skb;
+ e_err(rx_err, "skb align check failed: %u bytes at "
+ "%p\n", bufsz, skb->data);
+ /* Try again, without freeing the previous */
+ skb = netdev_alloc_skb_ip_align(netdev, bufsz);
+ /* Failed allocation, critical failure */
+ if (!skb) {
+ dev_kfree_skb(oldskb);
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+
+ if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+ /* give up */
+ dev_kfree_skb(skb);
+ dev_kfree_skb(oldskb);
+ break; /* while (cleaned_count--) */
+ }
+
+ /* Use new allocation */
+ dev_kfree_skb(oldskb);
+ }
+ buffer_info->skb = skb;
+ buffer_info->length = adapter->rx_buffer_len;
+check_page:
+ /* allocate a new page if necessary */
+ if (!buffer_info->page) {
+ buffer_info->page = alloc_page(GFP_ATOMIC);
+ if (unlikely(!buffer_info->page)) {
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+ }
+
+ if (!buffer_info->dma) {
+ buffer_info->dma = dma_map_page(&pdev->dev,
+ buffer_info->page, 0,
+ buffer_info->length,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+ put_page(buffer_info->page);
+ dev_kfree_skb(skb);
+ buffer_info->page = NULL;
+ buffer_info->skb = NULL;
+ buffer_info->dma = 0;
+ adapter->alloc_rx_buff_failed++;
+ break; /* while !buffer_info->skb */
+ }
+ }
+
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
+ rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
+ buffer_info = &rx_ring->buffer_info[i];
+ }
+
+ if (likely(rx_ring->next_to_use != i)) {
+ rx_ring->next_to_use = i;
+ if (unlikely(i-- == 0))
+ i = (rx_ring->count - 1);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+ writel(i, adapter->hw.hw_addr + rx_ring->rdt);
+ }
+}
+
+/**
+ * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
+ * @adapter: address of board private structure
+ **/
+
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_rx_desc *rx_desc;
+ struct e1000_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz = adapter->rx_buffer_len;
+
+ i = rx_ring->next_to_use;
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (cleaned_count--) {
+ skb = buffer_info->skb;
+ if (skb) {
+ skb_trim(skb, 0);
+ goto map_skb;
+ }
+
+ skb = netdev_alloc_skb_ip_align(netdev, bufsz);
+ if (unlikely(!skb)) {
+ /* Better luck next round */
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+
+ /* Fix for errata 23, can't cross 64kB boundary */
+ if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+ struct sk_buff *oldskb = skb;
+ e_err(rx_err, "skb align check failed: %u bytes at "
+ "%p\n", bufsz, skb->data);
+ /* Try again, without freeing the previous */
+ skb = netdev_alloc_skb_ip_align(netdev, bufsz);
+ /* Failed allocation, critical failure */
+ if (!skb) {
+ dev_kfree_skb(oldskb);
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+
+ if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+ /* give up */
+ dev_kfree_skb(skb);
+ dev_kfree_skb(oldskb);
+ adapter->alloc_rx_buff_failed++;
+ break; /* while !buffer_info->skb */
+ }
+
+ /* Use new allocation */
+ dev_kfree_skb(oldskb);
+ }
+ buffer_info->skb = skb;
+ buffer_info->length = adapter->rx_buffer_len;
+map_skb:
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data,
+ buffer_info->length,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+ dev_kfree_skb(skb);
+ buffer_info->skb = NULL;
+ buffer_info->dma = 0;
+ adapter->alloc_rx_buff_failed++;
+ break; /* while !buffer_info->skb */
+ }
+
+ /*
+ * XXX if it was allocated cleanly it will never map to a
+ * boundary crossing
+ */
+
+ /* Fix for errata 23, can't cross 64kB boundary */
+ if (!e1000_check_64k_bound(adapter,
+ (void *)(unsigned long)buffer_info->dma,
+ adapter->rx_buffer_len)) {
+ e_err(rx_err, "dma align check failed: %u bytes at "
+ "%p\n", adapter->rx_buffer_len,
+ (void *)(unsigned long)buffer_info->dma);
+ dev_kfree_skb(skb);
+ buffer_info->skb = NULL;
+
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+
+ adapter->alloc_rx_buff_failed++;
+ break; /* while !buffer_info->skb */
+ }
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
+ rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
+ buffer_info = &rx_ring->buffer_info[i];
+ }
+
+ if (likely(rx_ring->next_to_use != i)) {
+ rx_ring->next_to_use = i;
+ if (unlikely(i-- == 0))
+ i = (rx_ring->count - 1);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+ writel(i, hw->hw_addr + rx_ring->rdt);
+ }
+}
+
+/**
+ * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
+ * @adapter:
+ **/
+
+static void e1000_smartspeed(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 phy_status;
+ u16 phy_ctrl;
+
+ if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
+ !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
+ return;
+
+ if (adapter->smartspeed == 0) {
+ /* If Master/Slave config fault is asserted twice,
+ * we assume back-to-back */
+ e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
+ if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+ e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
+ if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+ e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
+ if (phy_ctrl & CR_1000T_MS_ENABLE) {
+ phy_ctrl &= ~CR_1000T_MS_ENABLE;
+ e1000_write_phy_reg(hw, PHY_1000T_CTRL,
+ phy_ctrl);
+ adapter->smartspeed++;
+ if (!e1000_phy_setup_autoneg(hw) &&
+ !e1000_read_phy_reg(hw, PHY_CTRL,
+ &phy_ctrl)) {
+ phy_ctrl |= (MII_CR_AUTO_NEG_EN |
+ MII_CR_RESTART_AUTO_NEG);
+ e1000_write_phy_reg(hw, PHY_CTRL,
+ phy_ctrl);
+ }
+ }
+ return;
+ } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
+ /* If still no link, perhaps using 2/3 pair cable */
+ e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
+ phy_ctrl |= CR_1000T_MS_ENABLE;
+ e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
+ if (!e1000_phy_setup_autoneg(hw) &&
+ !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
+ phy_ctrl |= (MII_CR_AUTO_NEG_EN |
+ MII_CR_RESTART_AUTO_NEG);
+ e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
+ }
+ }
+ /* Restart process after E1000_SMARTSPEED_MAX iterations */
+ if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
+ adapter->smartspeed = 0;
+}
+
+/**
+ * e1000_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return e1000_mii_ioctl(netdev, ifr, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * e1000_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+
+static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct mii_ioctl_data *data = if_mii(ifr);
+ int retval;
+ u16 mii_reg;
+ unsigned long flags;
+
+ if (hw->media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = hw->phy_addr;
+ break;
+ case SIOCGMIIREG:
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+ if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
+ &data->val_out)) {
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+ return -EIO;
+ }
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+ break;
+ case SIOCSMIIREG:
+ if (data->reg_num & ~(0x1F))
+ return -EFAULT;
+ mii_reg = data->val_in;
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+ if (e1000_write_phy_reg(hw, data->reg_num,
+ mii_reg)) {
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+ return -EIO;
+ }
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+ if (hw->media_type == e1000_media_type_copper) {
+ switch (data->reg_num) {
+ case PHY_CTRL:
+ if (mii_reg & MII_CR_POWER_DOWN)
+ break;
+ if (mii_reg & MII_CR_AUTO_NEG_EN) {
+ hw->autoneg = 1;
+ hw->autoneg_advertised = 0x2F;
+ } else {
+ u32 speed;
+ if (mii_reg & 0x40)
+ speed = SPEED_1000;
+ else if (mii_reg & 0x2000)
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+ retval = e1000_set_spd_dplx(
+ adapter, speed,
+ ((mii_reg & 0x100)
+ ? DUPLEX_FULL :
+ DUPLEX_HALF));
+ if (retval)
+ return retval;
+ }
+ if (netif_running(adapter->netdev))
+ e1000_reinit_locked(adapter);
+ else
+ e1000_reset(adapter);
+ break;
+ case M88E1000_PHY_SPEC_CTRL:
+ case M88E1000_EXT_PHY_SPEC_CTRL:
+ if (e1000_phy_reset(hw))
+ return -EIO;
+ break;
+ }
+ } else {
+ switch (data->reg_num) {
+ case PHY_CTRL:
+ if (mii_reg & MII_CR_POWER_DOWN)
+ break;
+ if (netif_running(adapter->netdev))
+ e1000_reinit_locked(adapter);
+ else
+ e1000_reset(adapter);
+ break;
+ }
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return E1000_SUCCESS;
+}
+
+void e1000_pci_set_mwi(struct e1000_hw *hw)
+{
+ struct e1000_adapter *adapter = hw->back;
+ int ret_val = pci_set_mwi(adapter->pdev);
+
+ if (ret_val)
+ e_err(probe, "Error in setting MWI\n");
+}
+
+void e1000_pci_clear_mwi(struct e1000_hw *hw)
+{
+ struct e1000_adapter *adapter = hw->back;
+
+ pci_clear_mwi(adapter->pdev);
+}
+
+int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
+{
+ struct e1000_adapter *adapter = hw->back;
+ return pcix_get_mmrbc(adapter->pdev);
+}
+
+void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
+{
+ struct e1000_adapter *adapter = hw->back;
+ pcix_set_mmrbc(adapter->pdev, mmrbc);
+}
+
+void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
+{
+ outl(value, port);
+}
+
+static bool e1000_vlan_used(struct e1000_adapter *adapter)
+{
+ u16 vid;
+
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ return true;
+ return false;
+}
+
+static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
+ bool filter_on)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_disable(adapter);
+
+ if (filter_on) {
+ /* enable VLAN receive filtering */
+ rctl = er32(RCTL);
+ rctl &= ~E1000_RCTL_CFIEN;
+ if (!(adapter->netdev->flags & IFF_PROMISC))
+ rctl |= E1000_RCTL_VFE;
+ ew32(RCTL, rctl);
+ e1000_update_mng_vlan(adapter);
+ } else {
+ /* disable VLAN receive filtering */
+ rctl = er32(RCTL);
+ rctl &= ~E1000_RCTL_VFE;
+ ew32(RCTL, rctl);
+ }
+
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_enable(adapter);
+}
+
+static void e1000_vlan_mode(struct net_device *netdev, u32 features)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl;
+
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_disable(adapter);
+
+ ctrl = er32(CTRL);
+ if (features & NETIF_F_HW_VLAN_RX) {
+ /* enable VLAN tag insert/strip */
+ ctrl |= E1000_CTRL_VME;
+ } else {
+ /* disable VLAN tag insert/strip */
+ ctrl &= ~E1000_CTRL_VME;
+ }
+ ew32(CTRL, ctrl);
+
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_enable(adapter);
+}
+
+static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vfta, index;
+
+ if ((hw->mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+ (vid == adapter->mng_vlan_id))
+ return;
+
+ if (!e1000_vlan_used(adapter))
+ e1000_vlan_filter_on_off(adapter, true);
+
+ /* add VID to filter table */
+ index = (vid >> 5) & 0x7F;
+ vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
+ vfta |= (1 << (vid & 0x1F));
+ e1000_write_vfta(hw, index, vfta);
+
+ set_bit(vid, adapter->active_vlans);
+}
+
+static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vfta, index;
+
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_disable(adapter);
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_enable(adapter);
+
+ /* remove VID from filter table */
+ index = (vid >> 5) & 0x7F;
+ vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
+ vfta &= ~(1 << (vid & 0x1F));
+ e1000_write_vfta(hw, index, vfta);
+
+ clear_bit(vid, adapter->active_vlans);
+
+ if (!e1000_vlan_used(adapter))
+ e1000_vlan_filter_on_off(adapter, false);
+}
+
+static void e1000_restore_vlan(struct e1000_adapter *adapter)
+{
+ u16 vid;
+
+ if (!e1000_vlan_used(adapter))
+ return;
+
+ e1000_vlan_filter_on_off(adapter, true);
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ e1000_vlan_rx_add_vid(adapter->netdev, vid);
+}
+
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ hw->autoneg = 0;
+
+ /* Make sure dplx is at most 1 bit and lsb of speed is not set
+ * for the switch() below to work */
+ if ((spd & 1) || (dplx & ~1))
+ goto err_inval;
+
+ /* Fiber NICs only allow 1000 gbps Full duplex */
+ if ((hw->media_type == e1000_media_type_fiber) &&
+ spd != SPEED_1000 &&
+ dplx != DUPLEX_FULL)
+ goto err_inval;
+
+ switch (spd + dplx) {
+ case SPEED_10 + DUPLEX_HALF:
+ hw->forced_speed_duplex = e1000_10_half;
+ break;
+ case SPEED_10 + DUPLEX_FULL:
+ hw->forced_speed_duplex = e1000_10_full;
+ break;
+ case SPEED_100 + DUPLEX_HALF:
+ hw->forced_speed_duplex = e1000_100_half;
+ break;
+ case SPEED_100 + DUPLEX_FULL:
+ hw->forced_speed_duplex = e1000_100_full;
+ break;
+ case SPEED_1000 + DUPLEX_FULL:
+ hw->autoneg = 1;
+ hw->autoneg_advertised = ADVERTISE_1000_FULL;
+ break;
+ case SPEED_1000 + DUPLEX_HALF: /* not supported */
+ default:
+ goto err_inval;
+ }
+ return 0;
+
+err_inval:
+ e_err(probe, "Unsupported Speed/Duplex configuration\n");
+ return -EINVAL;
+}
+
+static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl, ctrl_ext, rctl, status;
+ u32 wufc = adapter->wol;
+#ifdef CONFIG_PM
+ int retval = 0;
+#endif
+
+ netif_device_detach(netdev);
+
+ mutex_lock(&adapter->mutex);
+
+ if (netif_running(netdev)) {
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+ e1000_down(adapter);
+ }
+
+#ifdef CONFIG_PM
+ retval = pci_save_state(pdev);
+ if (retval) {
+ mutex_unlock(&adapter->mutex);
+ return retval;
+ }
+#endif
+
+ status = er32(STATUS);
+ if (status & E1000_STATUS_LU)
+ wufc &= ~E1000_WUFC_LNKC;
+
+ if (wufc) {
+ e1000_setup_rctl(adapter);
+ e1000_set_rx_mode(netdev);
+
+ /* turn on all-multi mode if wake on multicast is enabled */
+ if (wufc & E1000_WUFC_MC) {
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_MPE;
+ ew32(RCTL, rctl);
+ }
+
+ if (hw->mac_type >= e1000_82540) {
+ ctrl = er32(CTRL);
+ /* advertise wake from D3Cold */
+ #define E1000_CTRL_ADVD3WUC 0x00100000
+ /* phy power management enable */
+ #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
+ ctrl |= E1000_CTRL_ADVD3WUC |
+ E1000_CTRL_EN_PHY_PWR_MGMT;
+ ew32(CTRL, ctrl);
+ }
+
+ if (hw->media_type == e1000_media_type_fiber ||
+ hw->media_type == e1000_media_type_internal_serdes) {
+ /* keep the laser running in D3 */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
+ ew32(CTRL_EXT, ctrl_ext);
+ }
+
+ ew32(WUC, E1000_WUC_PME_EN);
+ ew32(WUFC, wufc);
+ } else {
+ ew32(WUC, 0);
+ ew32(WUFC, 0);
+ }
+
+ e1000_release_manageability(adapter);
+
+ *enable_wake = !!wufc;
+
+ /* make sure adapter isn't asleep if manageability is enabled */
+ if (adapter->en_mng_pt)
+ *enable_wake = true;
+
+ if (netif_running(netdev))
+ e1000_free_irq(adapter);
+
+ mutex_unlock(&adapter->mutex);
+
+ pci_disable_device(pdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int retval;
+ bool wake;
+
+ retval = __e1000_shutdown(pdev, &wake);
+ if (retval)
+ return retval;
+
+ if (wake) {
+ pci_prepare_to_sleep(pdev);
+ } else {
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+
+ return 0;
+}
+
+static int e1000_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ if (adapter->need_ioport)
+ err = pci_enable_device(pdev);
+ else
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ pr_err("Cannot enable PCI device from suspend\n");
+ return err;
+ }
+ pci_set_master(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ if (netif_running(netdev)) {
+ err = e1000_request_irq(adapter);
+ if (err)
+ return err;
+ }
+
+ e1000_power_up_phy(adapter);
+ e1000_reset(adapter);
+ ew32(WUS, ~0);
+
+ e1000_init_manageability(adapter);
+
+ if (netif_running(netdev))
+ e1000_up(adapter);
+
+ netif_device_attach(netdev);
+
+ return 0;
+}
+#endif
+
+static void e1000_shutdown(struct pci_dev *pdev)
+{
+ bool wake;
+
+ __e1000_shutdown(pdev, &wake);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, wake);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void e1000_netpoll(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ disable_irq(adapter->pdev->irq);
+ e1000_intr(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+}
+#endif
+
+/**
+ * e1000_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ e1000_down(adapter);
+ pci_disable_device(pdev);
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * e1000_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the e1000_resume routine.
+ */
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
+
+ if (adapter->need_ioport)
+ err = pci_enable_device(pdev);
+ else
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ pr_err("Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ e1000_reset(adapter);
+ ew32(WUS, ~0);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * e1000_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the e1000_resume routine.
+ */
+static void e1000_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ e1000_init_manageability(adapter);
+
+ if (netif_running(netdev)) {
+ if (e1000_up(adapter)) {
+ pr_info("can't bring device back up after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+}
+
+/* e1000_main.c */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_osdep.h b/drivers/net/ethernet/intel/e1000/e1000_osdep.h
new file mode 100644
index 000000000000..33e7c45a4fe4
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000/e1000_osdep.h
@@ -0,0 +1,109 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* glue for the OS independent part of e1000
+ * includes register access macros
+ */
+
+#ifndef _E1000_OSDEP_H_
+#define _E1000_OSDEP_H_
+
+#include <asm/io.h>
+
+#define CONFIG_RAM_BASE 0x60000
+#define GBE_CONFIG_OFFSET 0x0
+
+#define GBE_CONFIG_RAM_BASE \
+ ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
+
+#define GBE_CONFIG_BASE_VIRT \
+ ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE))
+
+#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
+ (iowrite16_rep(base + offset, data, count))
+
+#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \
+ (ioread16_rep(base + (offset << 1), data, count))
+
+#define er32(reg) \
+ (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \
+ ? E1000_##reg : E1000_82542_##reg)))
+
+#define ew32(reg, value) \
+ (writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \
+ ? E1000_##reg : E1000_82542_##reg))))
+
+#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+ writel((value), ((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+ ((offset) << 2))))
+
+#define E1000_READ_REG_ARRAY(a, reg, offset) ( \
+ readl((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+ ((offset) << 2)))
+
+#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
+#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
+
+#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
+ writew((value), ((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+ ((offset) << 1))))
+
+#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
+ readw((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+ ((offset) << 1)))
+
+#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
+ writeb((value), ((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+ (offset))))
+
+#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
+ readb((a)->hw_addr + \
+ (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+ (offset)))
+
+#define E1000_WRITE_FLUSH() er32(STATUS)
+
+#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \
+ writel((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_ICH_FLASH_REG(a, reg) ( \
+ readl((a)->flash_address + reg))
+
+#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \
+ writew((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \
+ readw((a)->flash_address + reg))
+
+#endif /* _E1000_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c
new file mode 100644
index 000000000000..1301eba8b57a
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000/e1000_param.c
@@ -0,0 +1,755 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2006 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define E1000_MAX_NIC 32
+
+#define OPTION_UNSET -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED 1
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
+#define E1000_PARAM(X, desc) \
+ static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
+ static unsigned int num_##X; \
+ module_param_array_named(X, X, int, &num_##X, 0); \
+ MODULE_PARM_DESC(X, desc);
+
+/* Transmit Descriptor Count
+ *
+ * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
+ * Valid Range: 80-4096 for 82544 and newer
+ *
+ * Default Value: 256
+ */
+E1000_PARAM(TxDescriptors, "Number of transmit descriptors");
+
+/* Receive Descriptor Count
+ *
+ * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
+ * Valid Range: 80-4096 for 82544 and newer
+ *
+ * Default Value: 256
+ */
+E1000_PARAM(RxDescriptors, "Number of receive descriptors");
+
+/* User Specified Speed Override
+ *
+ * Valid Range: 0, 10, 100, 1000
+ * - 0 - auto-negotiate at all supported speeds
+ * - 10 - only link at 10 Mbps
+ * - 100 - only link at 100 Mbps
+ * - 1000 - only link at 1000 Mbps
+ *
+ * Default Value: 0
+ */
+E1000_PARAM(Speed, "Speed setting");
+
+/* User Specified Duplex Override
+ *
+ * Valid Range: 0-2
+ * - 0 - auto-negotiate for duplex
+ * - 1 - only link at half duplex
+ * - 2 - only link at full duplex
+ *
+ * Default Value: 0
+ */
+E1000_PARAM(Duplex, "Duplex setting");
+
+/* Auto-negotiation Advertisement Override
+ *
+ * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber)
+ *
+ * The AutoNeg value is a bit mask describing which speed and duplex
+ * combinations should be advertised during auto-negotiation.
+ * The supported speed and duplex modes are listed below
+ *
+ * Bit 7 6 5 4 3 2 1 0
+ * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10
+ * Duplex Full Full Half Full Half
+ *
+ * Default Value: 0x2F (copper); 0x20 (fiber)
+ */
+E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
+#define AUTONEG_ADV_DEFAULT 0x2F
+#define AUTONEG_ADV_MASK 0x2F
+
+/* User Specified Flow Control Override
+ *
+ * Valid Range: 0-3
+ * - 0 - No Flow Control
+ * - 1 - Rx only, respond to PAUSE frames but do not generate them
+ * - 2 - Tx only, generate PAUSE frames but ignore them on receive
+ * - 3 - Full Flow Control Support
+ *
+ * Default Value: Read flow control settings from the EEPROM
+ */
+E1000_PARAM(FlowControl, "Flow Control setting");
+#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
+
+/* XsumRX - Receive Checksum Offload Enable/Disable
+ *
+ * Valid Range: 0, 1
+ * - 0 - disables all checksum offload
+ * - 1 - enables receive IP/TCP/UDP checksum offload
+ * on 82543 and newer -based NICs
+ *
+ * Default Value: 1
+ */
+E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
+
+/* Transmit Interrupt Delay in units of 1.024 microseconds
+ * Tx interrupt delay needs to typically be set to something non zero
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
+#define DEFAULT_TIDV 8
+#define MAX_TXDELAY 0xFFFF
+#define MIN_TXDELAY 0
+
+/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
+#define DEFAULT_TADV 32
+#define MAX_TXABSDELAY 0xFFFF
+#define MIN_TXABSDELAY 0
+
+/* Receive Interrupt Delay in units of 1.024 microseconds
+ * hardware will likely hang if you set this to anything but zero.
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
+#define DEFAULT_RDTR 0
+#define MAX_RXDELAY 0xFFFF
+#define MIN_RXDELAY 0
+
+/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
+#define DEFAULT_RADV 8
+#define MAX_RXABSDELAY 0xFFFF
+#define MIN_RXABSDELAY 0
+
+/* Interrupt Throttle Rate (interrupts/sec)
+ *
+ * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
+ */
+E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
+#define DEFAULT_ITR 3
+#define MAX_ITR 100000
+#define MIN_ITR 100
+
+/* Enable Smart Power Down of the PHY
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0 (disabled)
+ */
+E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
+
+struct e1000_option {
+ enum { enable_option, range_option, list_option } type;
+ const char *name;
+ const char *err;
+ int def;
+ union {
+ struct { /* range_option info */
+ int min;
+ int max;
+ } r;
+ struct { /* list_option info */
+ int nr;
+ const struct e1000_opt_list { int i; char *str; } *p;
+ } l;
+ } arg;
+};
+
+static int __devinit e1000_validate_option(unsigned int *value,
+ const struct e1000_option *opt,
+ struct e1000_adapter *adapter)
+{
+ if (*value == OPTION_UNSET) {
+ *value = opt->def;
+ return 0;
+ }
+
+ switch (opt->type) {
+ case enable_option:
+ switch (*value) {
+ case OPTION_ENABLED:
+ e_dev_info("%s Enabled\n", opt->name);
+ return 0;
+ case OPTION_DISABLED:
+ e_dev_info("%s Disabled\n", opt->name);
+ return 0;
+ }
+ break;
+ case range_option:
+ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+ e_dev_info("%s set to %i\n", opt->name, *value);
+ return 0;
+ }
+ break;
+ case list_option: {
+ int i;
+ const struct e1000_opt_list *ent;
+
+ for (i = 0; i < opt->arg.l.nr; i++) {
+ ent = &opt->arg.l.p[i];
+ if (*value == ent->i) {
+ if (ent->str[0] != '\0')
+ e_dev_info("%s\n", ent->str);
+ return 0;
+ }
+ }
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ e_dev_info("Invalid %s value specified (%i) %s\n",
+ opt->name, *value, opt->err);
+ *value = opt->def;
+ return -1;
+}
+
+static void e1000_check_fiber_options(struct e1000_adapter *adapter);
+static void e1000_check_copper_options(struct e1000_adapter *adapter);
+
+/**
+ * e1000_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input. If an invalid value is given, or if no user specified
+ * value exists, a default value is used. The final value is stored
+ * in a variable in the adapter structure.
+ **/
+
+void __devinit e1000_check_options(struct e1000_adapter *adapter)
+{
+ struct e1000_option opt;
+ int bd = adapter->bd_number;
+
+ if (bd >= E1000_MAX_NIC) {
+ e_dev_warn("Warning: no configuration for board #%i "
+ "using defaults for all values\n", bd);
+ }
+
+ { /* Transmit Descriptor Count */
+ struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+ int i;
+ e1000_mac_type mac_type = adapter->hw.mac_type;
+
+ opt = (struct e1000_option) {
+ .type = range_option,
+ .name = "Transmit Descriptors",
+ .err = "using default of "
+ __MODULE_STRING(E1000_DEFAULT_TXD),
+ .def = E1000_DEFAULT_TXD,
+ .arg = { .r = {
+ .min = E1000_MIN_TXD,
+ .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD
+ }}
+ };
+
+ if (num_TxDescriptors > bd) {
+ tx_ring->count = TxDescriptors[bd];
+ e1000_validate_option(&tx_ring->count, &opt, adapter);
+ tx_ring->count = ALIGN(tx_ring->count,
+ REQ_TX_DESCRIPTOR_MULTIPLE);
+ } else {
+ tx_ring->count = opt.def;
+ }
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ tx_ring[i].count = tx_ring->count;
+ }
+ { /* Receive Descriptor Count */
+ struct e1000_rx_ring *rx_ring = adapter->rx_ring;
+ int i;
+ e1000_mac_type mac_type = adapter->hw.mac_type;
+
+ opt = (struct e1000_option) {
+ .type = range_option,
+ .name = "Receive Descriptors",
+ .err = "using default of "
+ __MODULE_STRING(E1000_DEFAULT_RXD),
+ .def = E1000_DEFAULT_RXD,
+ .arg = { .r = {
+ .min = E1000_MIN_RXD,
+ .max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD
+ }}
+ };
+
+ if (num_RxDescriptors > bd) {
+ rx_ring->count = RxDescriptors[bd];
+ e1000_validate_option(&rx_ring->count, &opt, adapter);
+ rx_ring->count = ALIGN(rx_ring->count,
+ REQ_RX_DESCRIPTOR_MULTIPLE);
+ } else {
+ rx_ring->count = opt.def;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ rx_ring[i].count = rx_ring->count;
+ }
+ { /* Checksum Offload Enable/Disable */
+ opt = (struct e1000_option) {
+ .type = enable_option,
+ .name = "Checksum Offload",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+
+ if (num_XsumRX > bd) {
+ unsigned int rx_csum = XsumRX[bd];
+ e1000_validate_option(&rx_csum, &opt, adapter);
+ adapter->rx_csum = rx_csum;
+ } else {
+ adapter->rx_csum = opt.def;
+ }
+ }
+ { /* Flow Control */
+
+ static const struct e1000_opt_list fc_list[] = {
+ { E1000_FC_NONE, "Flow Control Disabled" },
+ { E1000_FC_RX_PAUSE, "Flow Control Receive Only" },
+ { E1000_FC_TX_PAUSE, "Flow Control Transmit Only" },
+ { E1000_FC_FULL, "Flow Control Enabled" },
+ { E1000_FC_DEFAULT, "Flow Control Hardware Default" }
+ };
+
+ opt = (struct e1000_option) {
+ .type = list_option,
+ .name = "Flow Control",
+ .err = "reading default settings from EEPROM",
+ .def = E1000_FC_DEFAULT,
+ .arg = { .l = { .nr = ARRAY_SIZE(fc_list),
+ .p = fc_list }}
+ };
+
+ if (num_FlowControl > bd) {
+ unsigned int fc = FlowControl[bd];
+ e1000_validate_option(&fc, &opt, adapter);
+ adapter->hw.fc = adapter->hw.original_fc = fc;
+ } else {
+ adapter->hw.fc = adapter->hw.original_fc = opt.def;
+ }
+ }
+ { /* Transmit Interrupt Delay */
+ opt = (struct e1000_option) {
+ .type = range_option,
+ .name = "Transmit Interrupt Delay",
+ .err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
+ .def = DEFAULT_TIDV,
+ .arg = { .r = { .min = MIN_TXDELAY,
+ .max = MAX_TXDELAY }}
+ };
+
+ if (num_TxIntDelay > bd) {
+ adapter->tx_int_delay = TxIntDelay[bd];
+ e1000_validate_option(&adapter->tx_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->tx_int_delay = opt.def;
+ }
+ }
+ { /* Transmit Absolute Interrupt Delay */
+ opt = (struct e1000_option) {
+ .type = range_option,
+ .name = "Transmit Absolute Interrupt Delay",
+ .err = "using default of " __MODULE_STRING(DEFAULT_TADV),
+ .def = DEFAULT_TADV,
+ .arg = { .r = { .min = MIN_TXABSDELAY,
+ .max = MAX_TXABSDELAY }}
+ };
+
+ if (num_TxAbsIntDelay > bd) {
+ adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
+ e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->tx_abs_int_delay = opt.def;
+ }
+ }
+ { /* Receive Interrupt Delay */
+ opt = (struct e1000_option) {
+ .type = range_option,
+ .name = "Receive Interrupt Delay",
+ .err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
+ .def = DEFAULT_RDTR,
+ .arg = { .r = { .min = MIN_RXDELAY,
+ .max = MAX_RXDELAY }}
+ };
+
+ if (num_RxIntDelay > bd) {
+ adapter->rx_int_delay = RxIntDelay[bd];
+ e1000_validate_option(&adapter->rx_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->rx_int_delay = opt.def;
+ }
+ }
+ { /* Receive Absolute Interrupt Delay */
+ opt = (struct e1000_option) {
+ .type = range_option,
+ .name = "Receive Absolute Interrupt Delay",
+ .err = "using default of " __MODULE_STRING(DEFAULT_RADV),
+ .def = DEFAULT_RADV,
+ .arg = { .r = { .min = MIN_RXABSDELAY,
+ .max = MAX_RXABSDELAY }}
+ };
+
+ if (num_RxAbsIntDelay > bd) {
+ adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
+ e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->rx_abs_int_delay = opt.def;
+ }
+ }
+ { /* Interrupt Throttling Rate */
+ opt = (struct e1000_option) {
+ .type = range_option,
+ .name = "Interrupt Throttling Rate (ints/sec)",
+ .err = "using default of " __MODULE_STRING(DEFAULT_ITR),
+ .def = DEFAULT_ITR,
+ .arg = { .r = { .min = MIN_ITR,
+ .max = MAX_ITR }}
+ };
+
+ if (num_InterruptThrottleRate > bd) {
+ adapter->itr = InterruptThrottleRate[bd];
+ switch (adapter->itr) {
+ case 0:
+ e_dev_info("%s turned off\n", opt.name);
+ break;
+ case 1:
+ e_dev_info("%s set to dynamic mode\n",
+ opt.name);
+ adapter->itr_setting = adapter->itr;
+ adapter->itr = 20000;
+ break;
+ case 3:
+ e_dev_info("%s set to dynamic conservative "
+ "mode\n", opt.name);
+ adapter->itr_setting = adapter->itr;
+ adapter->itr = 20000;
+ break;
+ case 4:
+ e_dev_info("%s set to simplified "
+ "(2000-8000) ints mode\n", opt.name);
+ adapter->itr_setting = adapter->itr;
+ break;
+ default:
+ e1000_validate_option(&adapter->itr, &opt,
+ adapter);
+ /* save the setting, because the dynamic bits
+ * change itr.
+ * clear the lower two bits because they are
+ * used as control */
+ adapter->itr_setting = adapter->itr & ~3;
+ break;
+ }
+ } else {
+ adapter->itr_setting = opt.def;
+ adapter->itr = 20000;
+ }
+ }
+ { /* Smart Power Down */
+ opt = (struct e1000_option) {
+ .type = enable_option,
+ .name = "PHY Smart Power Down",
+ .err = "defaulting to Disabled",
+ .def = OPTION_DISABLED
+ };
+
+ if (num_SmartPowerDownEnable > bd) {
+ unsigned int spd = SmartPowerDownEnable[bd];
+ e1000_validate_option(&spd, &opt, adapter);
+ adapter->smart_power_down = spd;
+ } else {
+ adapter->smart_power_down = opt.def;
+ }
+ }
+
+ switch (adapter->hw.media_type) {
+ case e1000_media_type_fiber:
+ case e1000_media_type_internal_serdes:
+ e1000_check_fiber_options(adapter);
+ break;
+ case e1000_media_type_copper:
+ e1000_check_copper_options(adapter);
+ break;
+ default:
+ BUG();
+ }
+}
+
+/**
+ * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on fiber adapters
+ **/
+
+static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
+{
+ int bd = adapter->bd_number;
+ if (num_Speed > bd) {
+ e_dev_info("Speed not valid for fiber adapters, parameter "
+ "ignored\n");
+ }
+
+ if (num_Duplex > bd) {
+ e_dev_info("Duplex not valid for fiber adapters, parameter "
+ "ignored\n");
+ }
+
+ if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
+ e_dev_info("AutoNeg other than 1000/Full is not valid for fiber"
+ "adapters, parameter ignored\n");
+ }
+}
+
+/**
+ * e1000_check_copper_options - Range Checking for Link Options, Copper Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on copper adapters
+ **/
+
+static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
+{
+ struct e1000_option opt;
+ unsigned int speed, dplx, an;
+ int bd = adapter->bd_number;
+
+ { /* Speed */
+ static const struct e1000_opt_list speed_list[] = {
+ { 0, "" },
+ { SPEED_10, "" },
+ { SPEED_100, "" },
+ { SPEED_1000, "" }};
+
+ opt = (struct e1000_option) {
+ .type = list_option,
+ .name = "Speed",
+ .err = "parameter ignored",
+ .def = 0,
+ .arg = { .l = { .nr = ARRAY_SIZE(speed_list),
+ .p = speed_list }}
+ };
+
+ if (num_Speed > bd) {
+ speed = Speed[bd];
+ e1000_validate_option(&speed, &opt, adapter);
+ } else {
+ speed = opt.def;
+ }
+ }
+ { /* Duplex */
+ static const struct e1000_opt_list dplx_list[] = {
+ { 0, "" },
+ { HALF_DUPLEX, "" },
+ { FULL_DUPLEX, "" }};
+
+ opt = (struct e1000_option) {
+ .type = list_option,
+ .name = "Duplex",
+ .err = "parameter ignored",
+ .def = 0,
+ .arg = { .l = { .nr = ARRAY_SIZE(dplx_list),
+ .p = dplx_list }}
+ };
+
+ if (num_Duplex > bd) {
+ dplx = Duplex[bd];
+ e1000_validate_option(&dplx, &opt, adapter);
+ } else {
+ dplx = opt.def;
+ }
+ }
+
+ if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
+ e_dev_info("AutoNeg specified along with Speed or Duplex, "
+ "parameter ignored\n");
+ adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+ } else { /* Autoneg */
+ static const struct e1000_opt_list an_list[] =
+ #define AA "AutoNeg advertising "
+ {{ 0x01, AA "10/HD" },
+ { 0x02, AA "10/FD" },
+ { 0x03, AA "10/FD, 10/HD" },
+ { 0x04, AA "100/HD" },
+ { 0x05, AA "100/HD, 10/HD" },
+ { 0x06, AA "100/HD, 10/FD" },
+ { 0x07, AA "100/HD, 10/FD, 10/HD" },
+ { 0x08, AA "100/FD" },
+ { 0x09, AA "100/FD, 10/HD" },
+ { 0x0a, AA "100/FD, 10/FD" },
+ { 0x0b, AA "100/FD, 10/FD, 10/HD" },
+ { 0x0c, AA "100/FD, 100/HD" },
+ { 0x0d, AA "100/FD, 100/HD, 10/HD" },
+ { 0x0e, AA "100/FD, 100/HD, 10/FD" },
+ { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" },
+ { 0x20, AA "1000/FD" },
+ { 0x21, AA "1000/FD, 10/HD" },
+ { 0x22, AA "1000/FD, 10/FD" },
+ { 0x23, AA "1000/FD, 10/FD, 10/HD" },
+ { 0x24, AA "1000/FD, 100/HD" },
+ { 0x25, AA "1000/FD, 100/HD, 10/HD" },
+ { 0x26, AA "1000/FD, 100/HD, 10/FD" },
+ { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" },
+ { 0x28, AA "1000/FD, 100/FD" },
+ { 0x29, AA "1000/FD, 100/FD, 10/HD" },
+ { 0x2a, AA "1000/FD, 100/FD, 10/FD" },
+ { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" },
+ { 0x2c, AA "1000/FD, 100/FD, 100/HD" },
+ { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" },
+ { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
+ { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }};
+
+ opt = (struct e1000_option) {
+ .type = list_option,
+ .name = "AutoNeg",
+ .err = "parameter ignored",
+ .def = AUTONEG_ADV_DEFAULT,
+ .arg = { .l = { .nr = ARRAY_SIZE(an_list),
+ .p = an_list }}
+ };
+
+ if (num_AutoNeg > bd) {
+ an = AutoNeg[bd];
+ e1000_validate_option(&an, &opt, adapter);
+ } else {
+ an = opt.def;
+ }
+ adapter->hw.autoneg_advertised = an;
+ }
+
+ switch (speed + dplx) {
+ case 0:
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ if ((num_Speed > bd) && (speed != 0 || dplx != 0))
+ e_dev_info("Speed and duplex autonegotiation "
+ "enabled\n");
+ break;
+ case HALF_DUPLEX:
+ e_dev_info("Half Duplex specified without Speed\n");
+ e_dev_info("Using Autonegotiation at Half Duplex only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
+ ADVERTISE_100_HALF;
+ break;
+ case FULL_DUPLEX:
+ e_dev_info("Full Duplex specified without Speed\n");
+ e_dev_info("Using Autonegotiation at Full Duplex only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
+ ADVERTISE_100_FULL |
+ ADVERTISE_1000_FULL;
+ break;
+ case SPEED_10:
+ e_dev_info("10 Mbps Speed specified without Duplex\n");
+ e_dev_info("Using Autonegotiation at 10 Mbps only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
+ ADVERTISE_10_FULL;
+ break;
+ case SPEED_10 + HALF_DUPLEX:
+ e_dev_info("Forcing to 10 Mbps Half Duplex\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 0;
+ adapter->hw.forced_speed_duplex = e1000_10_half;
+ adapter->hw.autoneg_advertised = 0;
+ break;
+ case SPEED_10 + FULL_DUPLEX:
+ e_dev_info("Forcing to 10 Mbps Full Duplex\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 0;
+ adapter->hw.forced_speed_duplex = e1000_10_full;
+ adapter->hw.autoneg_advertised = 0;
+ break;
+ case SPEED_100:
+ e_dev_info("100 Mbps Speed specified without Duplex\n");
+ e_dev_info("Using Autonegotiation at 100 Mbps only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
+ ADVERTISE_100_FULL;
+ break;
+ case SPEED_100 + HALF_DUPLEX:
+ e_dev_info("Forcing to 100 Mbps Half Duplex\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 0;
+ adapter->hw.forced_speed_duplex = e1000_100_half;
+ adapter->hw.autoneg_advertised = 0;
+ break;
+ case SPEED_100 + FULL_DUPLEX:
+ e_dev_info("Forcing to 100 Mbps Full Duplex\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 0;
+ adapter->hw.forced_speed_duplex = e1000_100_full;
+ adapter->hw.autoneg_advertised = 0;
+ break;
+ case SPEED_1000:
+ e_dev_info("1000 Mbps Speed specified without Duplex\n");
+ goto full_duplex_only;
+ case SPEED_1000 + HALF_DUPLEX:
+ e_dev_info("Half Duplex is not supported at 1000 Mbps\n");
+ /* fall through */
+ case SPEED_1000 + FULL_DUPLEX:
+full_duplex_only:
+ e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex "
+ "only\n");
+ adapter->hw.autoneg = adapter->fc_autoneg = 1;
+ adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
+ break;
+ default:
+ BUG();
+ }
+
+ /* Speed, AutoNeg and MDI/MDI-X must all play nice */
+ if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
+ e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. "
+ "Setting MDI-X to a compatible value.\n");
+ }
+}
+
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
new file mode 100644
index 000000000000..e1159e54334a
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -0,0 +1,1515 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/*
+ * 80003ES2LAN Gigabit Ethernet Controller (Copper)
+ * 80003ES2LAN Gigabit Ethernet Controller (Serdes)
+ */
+
+#include "e1000.h"
+
+#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00
+#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02
+#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10
+#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F
+
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800
+#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010
+
+#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
+#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
+#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000
+
+#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C
+#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004
+
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
+#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000
+
+#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8
+#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9
+
+/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
+#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Disab. */
+#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
+#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */
+#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */
+#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */
+
+/* PHY Specific Control Register 2 (Page 0, Register 26) */
+#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000
+ /* 1=Reverse Auto-Negotiation */
+
+/* MAC Specific Control Register (Page 2, Register 21) */
+/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
+#define GG82563_MSCR_TX_CLK_MASK 0x0007
+#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004
+#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005
+#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007
+
+#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
+
+/* DSP Distance Register (Page 5, Register 26) */
+#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M
+ 1 = 50-80M
+ 2 = 80-110M
+ 3 = 110-140M
+ 4 = >140M */
+
+/* Kumeran Mode Control Register (Page 193, Register 16) */
+#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
+
+/* Max number of times Kumeran read/write should be validated */
+#define GG82563_MAX_KMRN_RETRY 0x5
+
+/* Power Management Control Register (Page 193, Register 20) */
+#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001
+ /* 1=Enable SERDES Electrical Idle */
+
+/* In-Band Control Register (Page 194, Register 18) */
+#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
+
+/*
+ * A table for the GG82563 cable length where the range is defined
+ * with a lower bound at "index" and the upper bound at
+ * "index + 5".
+ */
+static const u16 e1000_gg82563_cable_length_table[] = {
+ 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF };
+#define GG82563_CABLE_LENGTH_TABLE_SIZE \
+ ARRAY_SIZE(e1000_gg82563_cable_length_table)
+
+static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw);
+static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
+static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
+static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw);
+static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw);
+static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex);
+static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw);
+static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 data);
+static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw);
+
+/**
+ * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ phy->type = e1000_phy_none;
+ return 0;
+ } else {
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan;
+ }
+
+ phy->addr = 1;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 100;
+ phy->type = e1000_phy_gg82563;
+
+ /* This can only be done after all function pointers are setup. */
+ ret_val = e1000e_get_phy_id(hw);
+
+ /* Verify phy id */
+ if (phy->id != GG82563_E_PHY_ID)
+ return -E1000_ERR_PHY;
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = er32(EECD);
+ u16 size;
+
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
+ switch (nvm->override) {
+ case e1000_nvm_override_spi_large:
+ nvm->page_size = 32;
+ nvm->address_bits = 16;
+ break;
+ case e1000_nvm_override_spi_small:
+ nvm->page_size = 8;
+ nvm->address_bits = 8;
+ break;
+ default:
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
+ break;
+ }
+
+ nvm->type = e1000_nvm_eeprom_spi;
+
+ size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+ E1000_EECD_SIZE_EX_SHIFT);
+
+ /*
+ * Added to a constant, "size" becomes the left-shift value
+ * for setting word_size.
+ */
+ size += NVM_WORD_SIZE_BASE_SHIFT;
+
+ /* EEPROM access above 16k is unsupported */
+ if (size > 14)
+ size = 14;
+ nvm->word_size = 1 << size;
+
+ return 0;
+}
+
+/**
+ * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_mac_operations *func = &mac->ops;
+
+ /* Set media type */
+ switch (adapter->pdev->device) {
+ case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ break;
+ default:
+ hw->phy.media_type = e1000_media_type_copper;
+ break;
+ }
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_RAR_ENTRIES;
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC supported; valid only if manageability features are enabled. */
+ mac->arc_subsystem_valid =
+ (er32(FWSM) & E1000_FWSM_MODE_MASK)
+ ? true : false;
+ /* Adaptive IFS not supported */
+ mac->adaptive_ifs = false;
+
+ /* check for link */
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
+ func->check_for_link = e1000e_check_for_copper_link;
+ break;
+ case e1000_media_type_fiber:
+ func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
+ func->check_for_link = e1000e_check_for_fiber_link;
+ break;
+ case e1000_media_type_internal_serdes:
+ func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
+ func->check_for_link = e1000e_check_for_serdes_link;
+ break;
+ default:
+ return -E1000_ERR_CONFIG;
+ break;
+ }
+
+ /* set lan id for port to determine which phy lock to use */
+ hw->mac.ops.set_lan_id(hw);
+
+ return 0;
+}
+
+static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ s32 rc;
+
+ rc = e1000_init_mac_params_80003es2lan(adapter);
+ if (rc)
+ return rc;
+
+ rc = e1000_init_nvm_params_80003es2lan(hw);
+ if (rc)
+ return rc;
+
+ rc = e1000_init_phy_params_80003es2lan(hw);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+/**
+ * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * A wrapper to acquire access rights to the correct PHY.
+ **/
+static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
+{
+ u16 mask;
+
+ mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+ return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ * e1000_release_phy_80003es2lan - Release rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * A wrapper to release access rights to the correct PHY.
+ **/
+static void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
+{
+ u16 mask;
+
+ mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+ e1000_release_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ * e1000_acquire_mac_csr_80003es2lan - Acquire rights to access Kumeran register
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the semaphore to access the Kumeran interface.
+ *
+ **/
+static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw)
+{
+ u16 mask;
+
+ mask = E1000_SWFW_CSR_SM;
+
+ return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ * e1000_release_mac_csr_80003es2lan - Release rights to access Kumeran Register
+ * @hw: pointer to the HW structure
+ *
+ * Release the semaphore used to access the Kumeran interface
+ **/
+static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw)
+{
+ u16 mask;
+
+ mask = E1000_SWFW_CSR_SM;
+
+ e1000_release_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the semaphore to access the EEPROM.
+ **/
+static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000e_acquire_nvm(hw);
+
+ if (ret_val)
+ e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+
+ return ret_val;
+}
+
+/**
+ * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM
+ * @hw: pointer to the HW structure
+ *
+ * Release the semaphore used to access the EEPROM.
+ **/
+static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw)
+{
+ e1000e_release_nvm(hw);
+ e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
+ * will also specify which port we're acquiring the lock for.
+ **/
+static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 16;
+ s32 i = 0;
+ s32 timeout = 50;
+
+ while (i < timeout) {
+ if (e1000e_get_hw_semaphore(hw))
+ return -E1000_ERR_SWFW_SYNC;
+
+ swfw_sync = er32(SW_FW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask)))
+ break;
+
+ /*
+ * Firmware currently using resource (fwmask)
+ * or other software thread using resource (swmask)
+ */
+ e1000e_put_hw_semaphore(hw);
+ mdelay(5);
+ i++;
+ }
+
+ if (i == timeout) {
+ e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ return -E1000_ERR_SWFW_SYNC;
+ }
+
+ swfw_sync |= swmask;
+ ew32(SW_FW_SYNC, swfw_sync);
+
+ e1000e_put_hw_semaphore(hw);
+
+ return 0;
+}
+
+/**
+ * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Release the SW/FW semaphore used to access the PHY or NVM. The mask
+ * will also specify which port we're releasing the lock for.
+ **/
+static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+
+ while (e1000e_get_hw_semaphore(hw) != 0)
+ ; /* Empty */
+
+ swfw_sync = er32(SW_FW_SYNC);
+ swfw_sync &= ~mask;
+ ew32(SW_FW_SYNC, swfw_sync);
+
+ e1000e_put_hw_semaphore(hw);
+}
+
+/**
+ * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the register to read
+ * @data: pointer to the data returned from the operation
+ *
+ * Read the GG82563 PHY register.
+ **/
+static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+ u32 offset, u16 *data)
+{
+ s32 ret_val;
+ u32 page_select;
+ u16 temp;
+
+ ret_val = e1000_acquire_phy_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Select Configuration Page */
+ if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+ page_select = GG82563_PHY_PAGE_SELECT;
+ } else {
+ /*
+ * Use Alternative Page Select register to access
+ * registers 30 and 31
+ */
+ page_select = GG82563_PHY_PAGE_SELECT_ALT;
+ }
+
+ temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
+ ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp);
+ if (ret_val) {
+ e1000_release_phy_80003es2lan(hw);
+ return ret_val;
+ }
+
+ if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) {
+ /*
+ * The "ready" bit in the MDIC register may be incorrectly set
+ * before the device has completed the "Page Select" MDI
+ * transaction. So we wait 200us after each MDI command...
+ */
+ udelay(200);
+
+ /* ...and verify the command was successful. */
+ ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
+
+ if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+ ret_val = -E1000_ERR_PHY;
+ e1000_release_phy_80003es2lan(hw);
+ return ret_val;
+ }
+
+ udelay(200);
+
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ udelay(200);
+ } else {
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+ }
+
+ e1000_release_phy_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the register to read
+ * @data: value to write to the register
+ *
+ * Write to the GG82563 PHY register.
+ **/
+static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+ u32 offset, u16 data)
+{
+ s32 ret_val;
+ u32 page_select;
+ u16 temp;
+
+ ret_val = e1000_acquire_phy_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Select Configuration Page */
+ if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+ page_select = GG82563_PHY_PAGE_SELECT;
+ } else {
+ /*
+ * Use Alternative Page Select register to access
+ * registers 30 and 31
+ */
+ page_select = GG82563_PHY_PAGE_SELECT_ALT;
+ }
+
+ temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
+ ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp);
+ if (ret_val) {
+ e1000_release_phy_80003es2lan(hw);
+ return ret_val;
+ }
+
+ if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) {
+ /*
+ * The "ready" bit in the MDIC register may be incorrectly set
+ * before the device has completed the "Page Select" MDI
+ * transaction. So we wait 200us after each MDI command...
+ */
+ udelay(200);
+
+ /* ...and verify the command was successful. */
+ ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
+
+ if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+ e1000_release_phy_80003es2lan(hw);
+ return -E1000_ERR_PHY;
+ }
+
+ udelay(200);
+
+ ret_val = e1000e_write_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ udelay(200);
+ } else {
+ ret_val = e1000e_write_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+ }
+
+ e1000_release_phy_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_nvm_80003es2lan - Write to ESB2 NVM
+ * @hw: pointer to the HW structure
+ * @offset: offset of the register to read
+ * @words: number of words to write
+ * @data: buffer of data to write to the NVM
+ *
+ * Write "words" of data to the ESB2 NVM.
+ **/
+static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ return e1000e_write_nvm_spi(hw, offset, words, data);
+}
+
+/**
+ * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete
+ * @hw: pointer to the HW structure
+ *
+ * Wait a specific amount of time for manageability processes to complete.
+ * This is a function pointer entry point called by the phy module.
+ **/
+static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+ u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+ if (hw->bus.func == 1)
+ mask = E1000_NVM_CFG_DONE_PORT_1;
+
+ while (timeout) {
+ if (er32(EEMNGCTL) & mask)
+ break;
+ usleep_range(1000, 2000);
+ timeout--;
+ }
+ if (!timeout) {
+ e_dbg("MNG configuration cycle has not completed.\n");
+ return -E1000_ERR_RESET;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex
+ * @hw: pointer to the HW structure
+ *
+ * Force the speed and duplex settings onto the PHY. This is a
+ * function pointer entry point called by the phy module.
+ **/
+static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ /*
+ * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+ * forced whenever speed and duplex are forced.
+ */
+ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO;
+ ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e_dbg("GG82563 PSCR: %X\n", phy_data);
+
+ ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ /* Reset the phy to commit changes. */
+ phy_data |= MII_CR_RESET;
+
+ ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ udelay(1);
+
+ if (hw->phy.autoneg_wait_to_complete) {
+ e_dbg("Waiting for forced speed/duplex link "
+ "on GG82563 phy.\n");
+
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ /*
+ * We didn't get link.
+ * Reset the DSP and cross our fingers.
+ */
+ ret_val = e1000e_phy_reset_dsp(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Try once more */
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * Resetting the phy means we need to verify the TX_CLK corresponds
+ * to the link speed. 10Mbps -> 2.5MHz, else 25MHz.
+ */
+ phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
+ if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED)
+ phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5;
+ else
+ phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25;
+
+ /*
+ * In addition, we must re-enable CRS on Tx for both half and full
+ * duplex.
+ */
+ phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+ ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_cable_length_80003es2lan - Set approximate cable length
+ * @hw: pointer to the HW structure
+ *
+ * Find the approximate cable length as measured by the GG82563 PHY.
+ * This is a function pointer entry point called by the phy module.
+ **/
+static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+ u16 phy_data, index;
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data);
+ if (ret_val)
+ goto out;
+
+ index = phy_data & GG82563_DSPD_CABLE_LENGTH;
+
+ if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+ phy->min_cable_length = e1000_gg82563_cable_length_table[index];
+ phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5];
+
+ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_link_up_info_80003es2lan - Report speed and duplex
+ * @hw: pointer to the HW structure
+ * @speed: pointer to speed buffer
+ * @duplex: pointer to duplex buffer
+ *
+ * Retrieve the current speed and duplex configuration.
+ **/
+static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 ret_val;
+
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ ret_val = e1000e_get_speed_and_duplex_copper(hw,
+ speed,
+ duplex);
+ hw->phy.ops.cfg_on_link_up(hw);
+ } else {
+ ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw,
+ speed,
+ duplex);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_reset_hw_80003es2lan - Reset the ESB2 controller
+ * @hw: pointer to the HW structure
+ *
+ * Perform a global reset to the ESB2 controller.
+ **/
+static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ /*
+ * Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = e1000e_disable_pcie_master(hw);
+ if (ret_val)
+ e_dbg("PCI-E Master disable polling has failed.\n");
+
+ e_dbg("Masking off all interrupts\n");
+ ew32(IMC, 0xffffffff);
+
+ ew32(RCTL, 0);
+ ew32(TCTL, E1000_TCTL_PSP);
+ e1e_flush();
+
+ usleep_range(10000, 20000);
+
+ ctrl = er32(CTRL);
+
+ ret_val = e1000_acquire_phy_80003es2lan(hw);
+ e_dbg("Issuing a global reset to MAC\n");
+ ew32(CTRL, ctrl | E1000_CTRL_RST);
+ e1000_release_phy_80003es2lan(hw);
+
+ ret_val = e1000e_get_auto_rd_done(hw);
+ if (ret_val)
+ /* We don't want to continue accessing MAC registers. */
+ return ret_val;
+
+ /* Clear any pending interrupt events. */
+ ew32(IMC, 0xffffffff);
+ er32(ICR);
+
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_hw_80003es2lan - Initialize the ESB2 controller
+ * @hw: pointer to the HW structure
+ *
+ * Initialize the hw bits, LED, VFTA, MTA, link and hw counters.
+ **/
+static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 reg_data;
+ s32 ret_val;
+ u16 kum_reg_data;
+ u16 i;
+
+ e1000_initialize_hw_bits_80003es2lan(hw);
+
+ /* Initialize identification LED */
+ ret_val = e1000e_id_led_init(hw);
+ if (ret_val)
+ e_dbg("Error initializing identification LED\n");
+ /* This is not fatal and we should not stop init due to this */
+
+ /* Disabling VLAN filtering */
+ e_dbg("Initializing the IEEE VLAN\n");
+ mac->ops.clear_vfta(hw);
+
+ /* Setup the receive address. */
+ e1000e_init_rx_addrs(hw, mac->rar_entry_count);
+
+ /* Zero out the Multicast HASH table */
+ e_dbg("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+ /* Setup link and flow control */
+ ret_val = e1000e_setup_link(hw);
+
+ /* Disable IBIST slave mode (far-end loopback) */
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ &kum_reg_data);
+ kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
+ e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ kum_reg_data);
+
+ /* Set the transmit descriptor write-back policy */
+ reg_data = er32(TXDCTL(0));
+ reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
+ ew32(TXDCTL(0), reg_data);
+
+ /* ...for both queues. */
+ reg_data = er32(TXDCTL(1));
+ reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
+ ew32(TXDCTL(1), reg_data);
+
+ /* Enable retransmit on late collisions */
+ reg_data = er32(TCTL);
+ reg_data |= E1000_TCTL_RTLC;
+ ew32(TCTL, reg_data);
+
+ /* Configure Gigabit Carry Extend Padding */
+ reg_data = er32(TCTL_EXT);
+ reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
+ reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN;
+ ew32(TCTL_EXT, reg_data);
+
+ /* Configure Transmit Inter-Packet Gap */
+ reg_data = er32(TIPG);
+ reg_data &= ~E1000_TIPG_IPGT_MASK;
+ reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
+ ew32(TIPG, reg_data);
+
+ reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001);
+ reg_data &= ~0x00100000;
+ E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data);
+
+ /* default to true to enable the MDIC W/A */
+ hw->dev_spec.e80003es2lan.mdic_wa_enable = true;
+
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET >>
+ E1000_KMRNCTRLSTA_OFFSET_SHIFT,
+ &i);
+ if (!ret_val) {
+ if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) ==
+ E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
+ hw->dev_spec.e80003es2lan.mdic_wa_enable = false;
+ }
+
+ /*
+ * Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2
+ * @hw: pointer to the HW structure
+ *
+ * Initializes required hardware-dependent bits needed for normal operation.
+ **/
+static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
+{
+ u32 reg;
+
+ /* Transmit Descriptor Control 0 */
+ reg = er32(TXDCTL(0));
+ reg |= (1 << 22);
+ ew32(TXDCTL(0), reg);
+
+ /* Transmit Descriptor Control 1 */
+ reg = er32(TXDCTL(1));
+ reg |= (1 << 22);
+ ew32(TXDCTL(1), reg);
+
+ /* Transmit Arbitration Control 0 */
+ reg = er32(TARC(0));
+ reg &= ~(0xF << 27); /* 30:27 */
+ if (hw->phy.media_type != e1000_media_type_copper)
+ reg &= ~(1 << 20);
+ ew32(TARC(0), reg);
+
+ /* Transmit Arbitration Control 1 */
+ reg = er32(TARC(1));
+ if (er32(TCTL) & E1000_TCTL_MULR)
+ reg &= ~(1 << 28);
+ else
+ reg |= (1 << 28);
+ ew32(TARC(1), reg);
+}
+
+/**
+ * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link
+ * @hw: pointer to the HW structure
+ *
+ * Setup some GG82563 PHY registers for obtaining link
+ **/
+static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u32 ctrl_ext;
+ u16 data;
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+ /* Use 25MHz for both link down and 1000Base-T for Tx clock. */
+ data |= GG82563_MSCR_TX_CLK_1000MBPS_25;
+
+ ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
+
+ switch (phy->mdix) {
+ case 1:
+ data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
+ break;
+ case 2:
+ data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
+ break;
+ case 0:
+ default:
+ data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
+ break;
+ }
+
+ /*
+ * Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+ if (phy->disable_polarity_correction)
+ data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+
+ ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ /* SW Reset the PHY so all changes take effect */
+ ret_val = e1000e_commit_phy(hw);
+ if (ret_val) {
+ e_dbg("Error Resetting the PHY\n");
+ return ret_val;
+ }
+
+ /* Bypass Rx and Tx FIFO's */
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
+ E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
+ E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
+ &data);
+ if (ret_val)
+ return ret_val;
+ data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
+ data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
+ ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL_2, data);
+ if (ret_val)
+ return ret_val;
+
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
+ ew32(CTRL_EXT, ctrl_ext);
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * Do not init these registers when the HW is in IAMT mode, since the
+ * firmware will have already initialized them. We only initialize
+ * them if the HW is not in IAMT mode.
+ */
+ if (!e1000e_check_mng_mode(hw)) {
+ /* Enable Electrical Idle on the PHY */
+ data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
+ ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+ ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /*
+ * Workaround: Disable padding in Kumeran interface in the MAC
+ * and in the PHY to avoid CRC errors.
+ */
+ ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= GG82563_ICR_DIS_PADDING;
+ ret_val = e1e_wphy(hw, GG82563_PHY_INBAND_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ return 0;
+}
+
+/**
+ * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2
+ * @hw: pointer to the HW structure
+ *
+ * Essentially a wrapper for setting up all things "copper" related.
+ * This is a function pointer entry point called by the mac module.
+ **/
+static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 reg_data;
+
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ew32(CTRL, ctrl);
+
+ /*
+ * Set the mac to wait the maximum time between each
+ * iteration and increase the max iterations when
+ * polling the phy; this fixes erroneous timeouts at 10Mbps.
+ */
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4),
+ 0xFFFF);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
+ &reg_data);
+ if (ret_val)
+ return ret_val;
+ reg_data |= 0x3F;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
+ reg_data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+ &reg_data);
+ if (ret_val)
+ return ret_val;
+ reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000e_setup_copper_link(hw);
+
+ return 0;
+}
+
+/**
+ * e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up
+ * @hw: pointer to the HW structure
+ * @duplex: current duplex setting
+ *
+ * Configure the KMRN interface by applying last minute quirks for
+ * 10/100 operation.
+ **/
+static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 speed;
+ u16 duplex;
+
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed,
+ &duplex);
+ if (ret_val)
+ return ret_val;
+
+ if (speed == SPEED_1000)
+ ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw);
+ else
+ ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation
+ * @hw: pointer to the HW structure
+ * @duplex: current duplex setting
+ *
+ * Configure the KMRN interface by applying last minute quirks for
+ * 10/100 operation.
+ **/
+static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
+{
+ s32 ret_val;
+ u32 tipg;
+ u32 i = 0;
+ u16 reg_data, reg_data2;
+
+ reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Configure Transmit Inter-Packet Gap */
+ tipg = er32(TIPG);
+ tipg &= ~E1000_TIPG_IPGT_MASK;
+ tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN;
+ ew32(TIPG, tipg);
+
+ do {
+ ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data2);
+ if (ret_val)
+ return ret_val;
+ i++;
+ } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
+
+ if (duplex == HALF_DUPLEX)
+ reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
+ else
+ reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+
+ ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+ return 0;
+}
+
+/**
+ * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation
+ * @hw: pointer to the HW structure
+ *
+ * Configure the KMRN interface by applying last minute quirks for
+ * gigabit operation.
+ **/
+static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 reg_data, reg_data2;
+ u32 tipg;
+ u32 i = 0;
+
+ reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Configure Transmit Inter-Packet Gap */
+ tipg = er32(TIPG);
+ tipg &= ~E1000_TIPG_IPGT_MASK;
+ tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
+ ew32(TIPG, tipg);
+
+ do {
+ ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data2);
+ if (ret_val)
+ return ret_val;
+ i++;
+ } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
+
+ reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+ ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_kmrn_reg_80003es2lan - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquire semaphore, then read the PHY register at offset
+ * using the kumeran interface. The information retrieved is stored in data.
+ * Release the semaphore before exiting.
+ **/
+static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 *data)
+{
+ u32 kmrnctrlsta;
+ s32 ret_val = 0;
+
+ ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+ ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
+
+ udelay(2);
+
+ kmrnctrlsta = er32(KMRNCTRLSTA);
+ *data = (u16)kmrnctrlsta;
+
+ e1000_release_mac_csr_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_kmrn_reg_80003es2lan - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquire semaphore, then write the data to PHY register
+ * at the offset using the kumeran interface. Release semaphore
+ * before exiting.
+ **/
+static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 data)
+{
+ u32 kmrnctrlsta;
+ s32 ret_val = 0;
+
+ ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | data;
+ ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
+
+ udelay(2);
+
+ e1000_release_mac_csr_80003es2lan(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_mac_addr_80003es2lan - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ /*
+ * If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_read_mac_addr_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw)
+{
+ /* If the management interface is not enabled, then power down */
+ if (!(hw->mac.ops.check_mng_mode(hw) ||
+ hw->phy.ops.check_reset_block(hw)))
+ e1000_power_down_phy_copper(hw);
+}
+
+/**
+ * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
+{
+ e1000e_clear_hw_cntrs_base(hw);
+
+ er32(PRC64);
+ er32(PRC127);
+ er32(PRC255);
+ er32(PRC511);
+ er32(PRC1023);
+ er32(PRC1522);
+ er32(PTC64);
+ er32(PTC127);
+ er32(PTC255);
+ er32(PTC511);
+ er32(PTC1023);
+ er32(PTC1522);
+
+ er32(ALGNERRC);
+ er32(RXERRC);
+ er32(TNCRS);
+ er32(CEXTERR);
+ er32(TSCTC);
+ er32(TSCTFC);
+
+ er32(MGTPRC);
+ er32(MGTPDC);
+ er32(MGTPTC);
+
+ er32(IAC);
+ er32(ICRXOC);
+
+ er32(ICRXPTC);
+ er32(ICRXATC);
+ er32(ICTXPTC);
+ er32(ICTXATC);
+ er32(ICTXQEC);
+ er32(ICTXQMTC);
+ er32(ICRXDMTC);
+}
+
+static const struct e1000_mac_operations es2_mac_ops = {
+ .read_mac_addr = e1000_read_mac_addr_80003es2lan,
+ .id_led_init = e1000e_id_led_init,
+ .blink_led = e1000e_blink_led_generic,
+ .check_mng_mode = e1000e_check_mng_mode_generic,
+ /* check_for_link dependent on media type */
+ .cleanup_led = e1000e_cleanup_led_generic,
+ .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan,
+ .get_bus_info = e1000e_get_bus_info_pcie,
+ .set_lan_id = e1000_set_lan_id_multi_port_pcie,
+ .get_link_up_info = e1000_get_link_up_info_80003es2lan,
+ .led_on = e1000e_led_on_generic,
+ .led_off = e1000e_led_off_generic,
+ .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
+ .write_vfta = e1000_write_vfta_generic,
+ .clear_vfta = e1000_clear_vfta_generic,
+ .reset_hw = e1000_reset_hw_80003es2lan,
+ .init_hw = e1000_init_hw_80003es2lan,
+ .setup_link = e1000e_setup_link,
+ /* setup_physical_interface dependent on media type */
+ .setup_led = e1000e_setup_led_generic,
+};
+
+static const struct e1000_phy_operations es2_phy_ops = {
+ .acquire = e1000_acquire_phy_80003es2lan,
+ .check_polarity = e1000_check_polarity_m88,
+ .check_reset_block = e1000e_check_reset_block_generic,
+ .commit = e1000e_phy_sw_reset,
+ .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan,
+ .get_cfg_done = e1000_get_cfg_done_80003es2lan,
+ .get_cable_length = e1000_get_cable_length_80003es2lan,
+ .get_info = e1000e_get_phy_info_m88,
+ .read_reg = e1000_read_phy_reg_gg82563_80003es2lan,
+ .release = e1000_release_phy_80003es2lan,
+ .reset = e1000e_phy_hw_reset_generic,
+ .set_d0_lplu_state = NULL,
+ .set_d3_lplu_state = e1000e_set_d3_lplu_state,
+ .write_reg = e1000_write_phy_reg_gg82563_80003es2lan,
+ .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
+};
+
+static const struct e1000_nvm_operations es2_nvm_ops = {
+ .acquire = e1000_acquire_nvm_80003es2lan,
+ .read = e1000e_read_nvm_eerd,
+ .release = e1000_release_nvm_80003es2lan,
+ .update = e1000e_update_nvm_checksum_generic,
+ .valid_led_default = e1000e_valid_led_default,
+ .validate = e1000e_validate_nvm_checksum_generic,
+ .write = e1000_write_nvm_80003es2lan,
+};
+
+const struct e1000_info e1000_es2_info = {
+ .mac = e1000_80003es2lan,
+ .flags = FLAG_HAS_HW_VLAN_FILTER
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_HAS_WOL
+ | FLAG_APME_IN_CTRL3
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_RX_NEEDS_RESTART /* errata */
+ | FLAG_TARC_SET_BIT_ZERO /* errata */
+ | FLAG_APME_CHECK_PORT_B
+ | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
+ | FLAG_TIPG_MEDIUM_FOR_80003ESLAN,
+ .flags2 = FLAG2_DMA_BURST,
+ .pba = 38,
+ .max_hw_frame_size = DEFAULT_JUMBO,
+ .get_variants = e1000_get_variants_80003es2lan,
+ .mac_ops = &es2_mac_ops,
+ .phy_ops = &es2_phy_ops,
+ .nvm_ops = &es2_nvm_ops,
+};
+
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
new file mode 100644
index 000000000000..a3e65fd26e09
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -0,0 +1,2112 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/*
+ * 82571EB Gigabit Ethernet Controller
+ * 82571EB Gigabit Ethernet Controller (Copper)
+ * 82571EB Gigabit Ethernet Controller (Fiber)
+ * 82571EB Dual Port Gigabit Mezzanine Adapter
+ * 82571EB Quad Port Gigabit Mezzanine Adapter
+ * 82571PT Gigabit PT Quad Port Server ExpressModule
+ * 82572EI Gigabit Ethernet Controller (Copper)
+ * 82572EI Gigabit Ethernet Controller (Fiber)
+ * 82572EI Gigabit Ethernet Controller
+ * 82573V Gigabit Ethernet Controller (Copper)
+ * 82573E Gigabit Ethernet Controller (Copper)
+ * 82573L Gigabit Ethernet Controller
+ * 82574L Gigabit Network Connection
+ * 82583V Gigabit Network Connection
+ */
+
+#include "e1000.h"
+
+#define ID_LED_RESERVED_F746 0xF746
+#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_OFF1_ON2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */
+#define E1000_BASE1000T_STATUS 10
+#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
+#define E1000_RECEIVE_ERROR_COUNTER 21
+#define E1000_RECEIVE_ERROR_MAX 0xFFFF
+
+#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
+
+static s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
+static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw);
+static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
+static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw);
+static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
+static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw);
+static s32 e1000_setup_link_82571(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
+static void e1000_clear_vfta_82571(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
+static s32 e1000_led_on_82574(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
+static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
+static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
+static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active);
+static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active);
+
+/**
+ * e1000_init_phy_params_82571 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ phy->type = e1000_phy_none;
+ return 0;
+ }
+
+ phy->addr = 1;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 100;
+
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_82571;
+
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ phy->type = e1000_phy_igp_2;
+ break;
+ case e1000_82573:
+ phy->type = e1000_phy_m88;
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ phy->type = e1000_phy_bm;
+ phy->ops.acquire = e1000_get_hw_semaphore_82574;
+ phy->ops.release = e1000_put_hw_semaphore_82574;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574;
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ break;
+ }
+
+ /* This can only be done after all function pointers are setup. */
+ ret_val = e1000_get_phy_id_82571(hw);
+ if (ret_val) {
+ e_dbg("Error getting PHY ID\n");
+ return ret_val;
+ }
+
+ /* Verify phy id */
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ if (phy->id != IGP01E1000_I_PHY_ID)
+ ret_val = -E1000_ERR_PHY;
+ break;
+ case e1000_82573:
+ if (phy->id != M88E1111_I_PHY_ID)
+ ret_val = -E1000_ERR_PHY;
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ if (phy->id != BME1000_E_PHY_ID_R2)
+ ret_val = -E1000_ERR_PHY;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ break;
+ }
+
+ if (ret_val)
+ e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id);
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_82571 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = er32(EECD);
+ u16 size;
+
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
+ switch (nvm->override) {
+ case e1000_nvm_override_spi_large:
+ nvm->page_size = 32;
+ nvm->address_bits = 16;
+ break;
+ case e1000_nvm_override_spi_small:
+ nvm->page_size = 8;
+ nvm->address_bits = 8;
+ break;
+ default:
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
+ break;
+ }
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ if (((eecd >> 15) & 0x3) == 0x3) {
+ nvm->type = e1000_nvm_flash_hw;
+ nvm->word_size = 2048;
+ /*
+ * Autonomous Flash update bit must be cleared due
+ * to Flash update issue.
+ */
+ eecd &= ~E1000_EECD_AUPDEN;
+ ew32(EECD, eecd);
+ break;
+ }
+ /* Fall Through */
+ default:
+ nvm->type = e1000_nvm_eeprom_spi;
+ size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+ E1000_EECD_SIZE_EX_SHIFT);
+ /*
+ * Added to a constant, "size" becomes the left-shift value
+ * for setting word_size.
+ */
+ size += NVM_WORD_SIZE_BASE_SHIFT;
+
+ /* EEPROM access above 16k is unsupported */
+ if (size > 14)
+ size = 14;
+ nvm->word_size = 1 << size;
+ break;
+ }
+
+ /* Function Pointers */
+ switch (hw->mac.type) {
+ case e1000_82574:
+ case e1000_82583:
+ nvm->ops.acquire = e1000_get_hw_semaphore_82574;
+ nvm->ops.release = e1000_put_hw_semaphore_82574;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_init_mac_params_82571 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_mac_operations *func = &mac->ops;
+ u32 swsm = 0;
+ u32 swsm2 = 0;
+ bool force_clear_smbi = false;
+
+ /* Set media type */
+ switch (adapter->pdev->device) {
+ case E1000_DEV_ID_82571EB_FIBER:
+ case E1000_DEV_ID_82572EI_FIBER:
+ case E1000_DEV_ID_82571EB_QUAD_FIBER:
+ hw->phy.media_type = e1000_media_type_fiber;
+ break;
+ case E1000_DEV_ID_82571EB_SERDES:
+ case E1000_DEV_ID_82572EI_SERDES:
+ case E1000_DEV_ID_82571EB_SERDES_DUAL:
+ case E1000_DEV_ID_82571EB_SERDES_QUAD:
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ break;
+ default:
+ hw->phy.media_type = e1000_media_type_copper;
+ break;
+ }
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_RAR_ENTRIES;
+ /* Adaptive IFS supported */
+ mac->adaptive_ifs = true;
+
+ /* check for link */
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ func->setup_physical_interface = e1000_setup_copper_link_82571;
+ func->check_for_link = e1000e_check_for_copper_link;
+ func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
+ break;
+ case e1000_media_type_fiber:
+ func->setup_physical_interface =
+ e1000_setup_fiber_serdes_link_82571;
+ func->check_for_link = e1000e_check_for_fiber_link;
+ func->get_link_up_info =
+ e1000e_get_speed_and_duplex_fiber_serdes;
+ break;
+ case e1000_media_type_internal_serdes:
+ func->setup_physical_interface =
+ e1000_setup_fiber_serdes_link_82571;
+ func->check_for_link = e1000_check_for_serdes_link_82571;
+ func->get_link_up_info =
+ e1000e_get_speed_and_duplex_fiber_serdes;
+ break;
+ default:
+ return -E1000_ERR_CONFIG;
+ break;
+ }
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ func->set_lan_id = e1000_set_lan_id_single_port;
+ func->check_mng_mode = e1000e_check_mng_mode_generic;
+ func->led_on = e1000e_led_on_generic;
+ func->blink_led = e1000e_blink_led_generic;
+
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /*
+ * ARC supported; valid only if manageability features are
+ * enabled.
+ */
+ mac->arc_subsystem_valid =
+ (er32(FWSM) & E1000_FWSM_MODE_MASK)
+ ? true : false;
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ func->set_lan_id = e1000_set_lan_id_single_port;
+ func->check_mng_mode = e1000_check_mng_mode_82574;
+ func->led_on = e1000_led_on_82574;
+ break;
+ default:
+ func->check_mng_mode = e1000e_check_mng_mode_generic;
+ func->led_on = e1000e_led_on_generic;
+ func->blink_led = e1000e_blink_led_generic;
+
+ /* FWSM register */
+ mac->has_fwsm = true;
+ break;
+ }
+
+ /*
+ * Ensure that the inter-port SWSM.SMBI lock bit is clear before
+ * first NVM or PHY access. This should be done for single-port
+ * devices, and for one port only on dual-port devices so that
+ * for those devices we can still use the SMBI lock to synchronize
+ * inter-port accesses to the PHY & NVM.
+ */
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ swsm2 = er32(SWSM2);
+
+ if (!(swsm2 & E1000_SWSM2_LOCK)) {
+ /* Only do this for the first interface on this card */
+ ew32(SWSM2,
+ swsm2 | E1000_SWSM2_LOCK);
+ force_clear_smbi = true;
+ } else
+ force_clear_smbi = false;
+ break;
+ default:
+ force_clear_smbi = true;
+ break;
+ }
+
+ if (force_clear_smbi) {
+ /* Make sure SWSM.SMBI is clear */
+ swsm = er32(SWSM);
+ if (swsm & E1000_SWSM_SMBI) {
+ /* This bit should not be set on a first interface, and
+ * indicates that the bootagent or EFI code has
+ * improperly left this bit enabled
+ */
+ e_dbg("Please update your 82571 Bootagent\n");
+ }
+ ew32(SWSM, swsm & ~E1000_SWSM_SMBI);
+ }
+
+ /*
+ * Initialize device specific counter of SMBI acquisition
+ * timeouts.
+ */
+ hw->dev_spec.e82571.smb_counter = 0;
+
+ return 0;
+}
+
+static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ static int global_quad_port_a; /* global port a indication */
+ struct pci_dev *pdev = adapter->pdev;
+ int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1;
+ s32 rc;
+
+ rc = e1000_init_mac_params_82571(adapter);
+ if (rc)
+ return rc;
+
+ rc = e1000_init_nvm_params_82571(hw);
+ if (rc)
+ return rc;
+
+ rc = e1000_init_phy_params_82571(hw);
+ if (rc)
+ return rc;
+
+ /* tag quad port adapters first, it's used below */
+ switch (pdev->device) {
+ case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ case E1000_DEV_ID_82571EB_QUAD_FIBER:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
+ case E1000_DEV_ID_82571PT_QUAD_COPPER:
+ adapter->flags |= FLAG_IS_QUAD_PORT;
+ /* mark the first port */
+ if (global_quad_port_a == 0)
+ adapter->flags |= FLAG_IS_QUAD_PORT_A;
+ /* Reset for multiple quad port adapters */
+ global_quad_port_a++;
+ if (global_quad_port_a == 4)
+ global_quad_port_a = 0;
+ break;
+ default:
+ break;
+ }
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82571:
+ /* these dual ports don't have WoL on port B at all */
+ if (((pdev->device == E1000_DEV_ID_82571EB_FIBER) ||
+ (pdev->device == E1000_DEV_ID_82571EB_SERDES) ||
+ (pdev->device == E1000_DEV_ID_82571EB_COPPER)) &&
+ (is_port_b))
+ adapter->flags &= ~FLAG_HAS_WOL;
+ /* quad ports only support WoL on port A */
+ if (adapter->flags & FLAG_IS_QUAD_PORT &&
+ (!(adapter->flags & FLAG_IS_QUAD_PORT_A)))
+ adapter->flags &= ~FLAG_HAS_WOL;
+ /* Does not support WoL on any port */
+ if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)
+ adapter->flags &= ~FLAG_HAS_WOL;
+ break;
+ case e1000_82573:
+ if (pdev->device == E1000_DEV_ID_82573L) {
+ adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
+ adapter->max_hw_frame_size = DEFAULT_JUMBO;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_get_phy_id_82571 - Retrieve the PHY ID and revision
+ * @hw: pointer to the HW structure
+ *
+ * Reads the PHY registers and stores the PHY ID and possibly the PHY
+ * revision in the hardware structure.
+ **/
+static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_id = 0;
+
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ /*
+ * The 82571 firmware may still be configuring the PHY.
+ * In this case, we cannot access the PHY until the
+ * configuration is done. So we explicitly set the
+ * PHY ID.
+ */
+ phy->id = IGP01E1000_I_PHY_ID;
+ break;
+ case e1000_82573:
+ return e1000e_get_phy_id(hw);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
+ if (ret_val)
+ return ret_val;
+
+ phy->id = (u32)(phy_id << 16);
+ udelay(20);
+ ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
+ if (ret_val)
+ return ret_val;
+
+ phy->id |= (u32)(phy_id);
+ phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_get_hw_semaphore_82571 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ **/
+static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
+{
+ u32 swsm;
+ s32 sw_timeout = hw->nvm.word_size + 1;
+ s32 fw_timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+
+ /*
+ * If we have timedout 3 times on trying to acquire
+ * the inter-port SMBI semaphore, there is old code
+ * operating on the other port, and it is not
+ * releasing SMBI. Modify the number of times that
+ * we try for the semaphore to interwork with this
+ * older code.
+ */
+ if (hw->dev_spec.e82571.smb_counter > 2)
+ sw_timeout = 1;
+
+ /* Get the SW semaphore */
+ while (i < sw_timeout) {
+ swsm = er32(SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ udelay(50);
+ i++;
+ }
+
+ if (i == sw_timeout) {
+ e_dbg("Driver can't access device - SMBI bit is set.\n");
+ hw->dev_spec.e82571.smb_counter++;
+ }
+ /* Get the FW semaphore. */
+ for (i = 0; i < fw_timeout; i++) {
+ swsm = er32(SWSM);
+ ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (er32(SWSM) & E1000_SWSM_SWESMBI)
+ break;
+
+ udelay(50);
+ }
+
+ if (i == fw_timeout) {
+ /* Release semaphores */
+ e1000_put_hw_semaphore_82571(hw);
+ e_dbg("Driver can't access the NVM\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_put_hw_semaphore_82571 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ **/
+static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
+{
+ u32 swsm;
+
+ swsm = er32(SWSM);
+ swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+ ew32(SWSM, swsm);
+}
+/**
+ * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore during reset.
+ *
+ **/
+static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+ s32 ret_val = 0;
+ s32 i = 0;
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+ do {
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+ break;
+
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+ usleep_range(2000, 4000);
+ i++;
+ } while (i < MDIO_OWNERSHIP_TIMEOUT);
+
+ if (i == MDIO_OWNERSHIP_TIMEOUT) {
+ /* Release semaphores */
+ e1000_put_hw_semaphore_82573(hw);
+ e_dbg("Driver can't access the PHY\n");
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_put_hw_semaphore_82573 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used during reset.
+ *
+ **/
+static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+}
+
+static DEFINE_MUTEX(swflag_mutex);
+
+/**
+ * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM.
+ *
+ **/
+static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ mutex_lock(&swflag_mutex);
+ ret_val = e1000_get_hw_semaphore_82573(hw);
+ if (ret_val)
+ mutex_unlock(&swflag_mutex);
+ return ret_val;
+}
+
+/**
+ * e1000_put_hw_semaphore_82574 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ *
+ **/
+static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
+{
+ e1000_put_hw_semaphore_82573(hw);
+ mutex_unlock(&swflag_mutex);
+}
+
+/**
+ * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag.
+ * LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+ u16 data = er32(POEMB);
+
+ if (active)
+ data |= E1000_PHY_CTRL_D0A_LPLU;
+ else
+ data &= ~E1000_PHY_CTRL_D0A_LPLU;
+
+ ew32(POEMB, data);
+ return 0;
+}
+
+/**
+ * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * when active is true, else clear lplu for D3. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained.
+ **/
+static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+ u16 data = er32(POEMB);
+
+ if (!active) {
+ data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+ } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) {
+ data |= E1000_PHY_CTRL_NOND0A_LPLU;
+ }
+
+ ew32(POEMB, data);
+ return 0;
+}
+
+/**
+ * e1000_acquire_nvm_82571 - Request for access to the EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * To gain access to the EEPROM, first we must obtain a hardware semaphore.
+ * Then for non-82573 hardware, set the EEPROM access request bit and wait
+ * for EEPROM access grant bit. If the access grant bit is not set, release
+ * hardware semaphore.
+ **/
+static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ ret_val = e1000_get_hw_semaphore_82571(hw);
+ if (ret_val)
+ return ret_val;
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ break;
+ default:
+ ret_val = e1000e_acquire_nvm(hw);
+ break;
+ }
+
+ if (ret_val)
+ e1000_put_hw_semaphore_82571(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_release_nvm_82571 - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+static void e1000_release_nvm_82571(struct e1000_hw *hw)
+{
+ e1000e_release_nvm(hw);
+ e1000_put_hw_semaphore_82571(hw);
+}
+
+/**
+ * e1000_write_nvm_82571 - Write to EEPROM using appropriate interface
+ * @hw: pointer to the HW structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * For non-82573 silicon, write data to EEPROM at offset using SPI interface.
+ *
+ * If e1000e_update_nvm_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 ret_val;
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data);
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ ret_val = e1000e_write_nvm_spi(hw, offset, words, data);
+ break;
+ default:
+ ret_val = -E1000_ERR_NVM;
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_update_nvm_checksum_82571 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM.
+ **/
+static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
+{
+ u32 eecd;
+ s32 ret_val;
+ u16 i;
+
+ ret_val = e1000e_update_nvm_checksum_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * If our nvm is an EEPROM, then we're done
+ * otherwise, commit the checksum to the flash NVM.
+ */
+ if (hw->nvm.type != e1000_nvm_flash_hw)
+ return ret_val;
+
+ /* Check for pending operations. */
+ for (i = 0; i < E1000_FLASH_UPDATES; i++) {
+ usleep_range(1000, 2000);
+ if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
+ break;
+ }
+
+ if (i == E1000_FLASH_UPDATES)
+ return -E1000_ERR_NVM;
+
+ /* Reset the firmware if using STM opcode. */
+ if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) {
+ /*
+ * The enabling of and the actual reset must be done
+ * in two write cycles.
+ */
+ ew32(HICR, E1000_HICR_FW_RESET_ENABLE);
+ e1e_flush();
+ ew32(HICR, E1000_HICR_FW_RESET);
+ }
+
+ /* Commit the write to flash */
+ eecd = er32(EECD) | E1000_EECD_FLUPD;
+ ew32(EECD, eecd);
+
+ for (i = 0; i < E1000_FLASH_UPDATES; i++) {
+ usleep_range(1000, 2000);
+ if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
+ break;
+ }
+
+ if (i == E1000_FLASH_UPDATES)
+ return -E1000_ERR_NVM;
+
+ return 0;
+}
+
+/**
+ * e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw)
+{
+ if (hw->nvm.type == e1000_nvm_flash_hw)
+ e1000_fix_nvm_checksum_82571(hw);
+
+ return e1000e_validate_nvm_checksum_generic(hw);
+}
+
+/**
+ * e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon
+ * @hw: pointer to the HW structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * After checking for invalid values, poll the EEPROM to ensure the previous
+ * command has completed before trying to write the next word. After write
+ * poll for completion.
+ *
+ * If e1000e_update_nvm_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i, eewr = 0;
+ s32 ret_val = 0;
+
+ /*
+ * A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ e_dbg("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ for (i = 0; i < words; i++) {
+ eewr = (data[i] << E1000_NVM_RW_REG_DATA) |
+ ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
+ E1000_NVM_RW_REG_START;
+
+ ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
+ if (ret_val)
+ break;
+
+ ew32(EEWR, eewr);
+
+ ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
+ if (ret_val)
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_cfg_done_82571 - Poll for configuration done
+ * @hw: pointer to the HW structure
+ *
+ * Reads the management control register for the config done bit to be set.
+ **/
+static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+
+ while (timeout) {
+ if (er32(EEMNGCTL) &
+ E1000_NVM_CFG_DONE_PORT_0)
+ break;
+ usleep_range(1000, 2000);
+ timeout--;
+ }
+ if (!timeout) {
+ e_dbg("MNG configuration cycle has not completed.\n");
+ return -E1000_ERR_RESET;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag. When activating LPLU
+ * this function also disables smart speed and vice versa. LPLU will not be
+ * activated unless the device autonegotiation advertisement meets standards
+ * of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function
+ * pointer entry point only called by PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (active) {
+ data |= IGP02E1000_PM_D0_LPLU;
+ ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
+ if (ret_val)
+ return ret_val;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
+ if (ret_val)
+ return ret_val;
+ } else {
+ data &= ~IGP02E1000_PM_D0_LPLU;
+ ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
+ /*
+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_reset_hw_82571 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state.
+ **/
+static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
+{
+ u32 ctrl, ctrl_ext;
+ s32 ret_val;
+
+ /*
+ * Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = e1000e_disable_pcie_master(hw);
+ if (ret_val)
+ e_dbg("PCI-E Master disable polling has failed.\n");
+
+ e_dbg("Masking off all interrupts\n");
+ ew32(IMC, 0xffffffff);
+
+ ew32(RCTL, 0);
+ ew32(TCTL, E1000_TCTL_PSP);
+ e1e_flush();
+
+ usleep_range(10000, 20000);
+
+ /*
+ * Must acquire the MDIO ownership before MAC reset.
+ * Ownership defaults to firmware after a reset.
+ */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ ret_val = e1000_get_hw_semaphore_82573(hw);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ ret_val = e1000_get_hw_semaphore_82574(hw);
+ break;
+ default:
+ break;
+ }
+ if (ret_val)
+ e_dbg("Cannot acquire MDIO ownership\n");
+
+ ctrl = er32(CTRL);
+
+ e_dbg("Issuing a global reset to MAC\n");
+ ew32(CTRL, ctrl | E1000_CTRL_RST);
+
+ /* Must release MDIO ownership and mutex after MAC reset. */
+ switch (hw->mac.type) {
+ case e1000_82574:
+ case e1000_82583:
+ e1000_put_hw_semaphore_82574(hw);
+ break;
+ default:
+ break;
+ }
+
+ if (hw->nvm.type == e1000_nvm_flash_hw) {
+ udelay(10);
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ ew32(CTRL_EXT, ctrl_ext);
+ e1e_flush();
+ }
+
+ ret_val = e1000e_get_auto_rd_done(hw);
+ if (ret_val)
+ /* We don't want to continue accessing MAC registers. */
+ return ret_val;
+
+ /*
+ * Phy configuration from NVM just starts after EECD_AUTO_RD is set.
+ * Need to wait for Phy configuration completion before accessing
+ * NVM and Phy.
+ */
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ msleep(25);
+ break;
+ default:
+ break;
+ }
+
+ /* Clear any pending interrupt events. */
+ ew32(IMC, 0xffffffff);
+ er32(ICR);
+
+ if (hw->mac.type == e1000_82571) {
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ e1000e_set_laa_state_82571(hw, true);
+ }
+
+ /* Reinitialize the 82571 serdes link state machine */
+ if (hw->phy.media_type == e1000_media_type_internal_serdes)
+ hw->mac.serdes_link_state = e1000_serdes_link_down;
+
+ return 0;
+}
+
+/**
+ * e1000_init_hw_82571 - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation.
+ **/
+static s32 e1000_init_hw_82571(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 reg_data;
+ s32 ret_val;
+ u16 i, rar_count = mac->rar_entry_count;
+
+ e1000_initialize_hw_bits_82571(hw);
+
+ /* Initialize identification LED */
+ ret_val = e1000e_id_led_init(hw);
+ if (ret_val)
+ e_dbg("Error initializing identification LED\n");
+ /* This is not fatal and we should not stop init due to this */
+
+ /* Disabling VLAN filtering */
+ e_dbg("Initializing the IEEE VLAN\n");
+ mac->ops.clear_vfta(hw);
+
+ /* Setup the receive address. */
+ /*
+ * If, however, a locally administered address was assigned to the
+ * 82571, we must reserve a RAR for it to work around an issue where
+ * resetting one port will reload the MAC on the other port.
+ */
+ if (e1000e_get_laa_state_82571(hw))
+ rar_count--;
+ e1000e_init_rx_addrs(hw, rar_count);
+
+ /* Zero out the Multicast HASH table */
+ e_dbg("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+ /* Setup link and flow control */
+ ret_val = e1000_setup_link_82571(hw);
+
+ /* Set the transmit descriptor write-back policy */
+ reg_data = er32(TXDCTL(0));
+ reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB |
+ E1000_TXDCTL_COUNT_DESC;
+ ew32(TXDCTL(0), reg_data);
+
+ /* ...for both queues. */
+ switch (mac->type) {
+ case e1000_82573:
+ e1000e_enable_tx_pkt_filtering(hw);
+ /* fall through */
+ case e1000_82574:
+ case e1000_82583:
+ reg_data = er32(GCR);
+ reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
+ ew32(GCR, reg_data);
+ break;
+ default:
+ reg_data = er32(TXDCTL(1));
+ reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB |
+ E1000_TXDCTL_COUNT_DESC;
+ ew32(TXDCTL(1), reg_data);
+ break;
+ }
+
+ /*
+ * Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_82571(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits
+ * @hw: pointer to the HW structure
+ *
+ * Initializes required hardware-dependent bits needed for normal operation.
+ **/
+static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
+{
+ u32 reg;
+
+ /* Transmit Descriptor Control 0 */
+ reg = er32(TXDCTL(0));
+ reg |= (1 << 22);
+ ew32(TXDCTL(0), reg);
+
+ /* Transmit Descriptor Control 1 */
+ reg = er32(TXDCTL(1));
+ reg |= (1 << 22);
+ ew32(TXDCTL(1), reg);
+
+ /* Transmit Arbitration Control 0 */
+ reg = er32(TARC(0));
+ reg &= ~(0xF << 27); /* 30:27 */
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26);
+ break;
+ default:
+ break;
+ }
+ ew32(TARC(0), reg);
+
+ /* Transmit Arbitration Control 1 */
+ reg = er32(TARC(1));
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ reg &= ~((1 << 29) | (1 << 30));
+ reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26);
+ if (er32(TCTL) & E1000_TCTL_MULR)
+ reg &= ~(1 << 28);
+ else
+ reg |= (1 << 28);
+ ew32(TARC(1), reg);
+ break;
+ default:
+ break;
+ }
+
+ /* Device Control */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ reg = er32(CTRL);
+ reg &= ~(1 << 29);
+ ew32(CTRL, reg);
+ break;
+ default:
+ break;
+ }
+
+ /* Extended Device Control */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ reg = er32(CTRL_EXT);
+ reg &= ~(1 << 23);
+ reg |= (1 << 22);
+ ew32(CTRL_EXT, reg);
+ break;
+ default:
+ break;
+ }
+
+ if (hw->mac.type == e1000_82571) {
+ reg = er32(PBA_ECC);
+ reg |= E1000_PBA_ECC_CORR_EN;
+ ew32(PBA_ECC, reg);
+ }
+ /*
+ * Workaround for hardware errata.
+ * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572
+ */
+
+ if ((hw->mac.type == e1000_82571) ||
+ (hw->mac.type == e1000_82572)) {
+ reg = er32(CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN;
+ ew32(CTRL_EXT, reg);
+ }
+
+
+ /* PCI-Ex Control Registers */
+ switch (hw->mac.type) {
+ case e1000_82574:
+ case e1000_82583:
+ reg = er32(GCR);
+ reg |= (1 << 22);
+ ew32(GCR, reg);
+
+ /*
+ * Workaround for hardware errata.
+ * apply workaround for hardware errata documented in errata
+ * docs Fixes issue where some error prone or unreliable PCIe
+ * completions are occurring, particularly with ASPM enabled.
+ * Without fix, issue can cause Tx timeouts.
+ */
+ reg = er32(GCR2);
+ reg |= 1;
+ ew32(GCR2, reg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * e1000_clear_vfta_82571 - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * Clears the register array which contains the VLAN filter table by
+ * setting all the values to 0.
+ **/
+static void e1000_clear_vfta_82571(struct e1000_hw *hw)
+{
+ u32 offset;
+ u32 vfta_value = 0;
+ u32 vfta_offset = 0;
+ u32 vfta_bit_in_reg = 0;
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ if (hw->mng_cookie.vlan_id != 0) {
+ /*
+ * The VFTA is a 4096b bit-field, each identifying
+ * a single VLAN ID. The following operations
+ * determine which 32b entry (i.e. offset) into the
+ * array we want to set the VLAN ID (i.e. bit) of
+ * the manageability unit.
+ */
+ vfta_offset = (hw->mng_cookie.vlan_id >>
+ E1000_VFTA_ENTRY_SHIFT) &
+ E1000_VFTA_ENTRY_MASK;
+ vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
+ E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+ }
+ break;
+ default:
+ break;
+ }
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ /*
+ * If the offset we want to clear is the same offset of the
+ * manageability VLAN ID, then clear all bits except that of
+ * the manageability unit.
+ */
+ vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value);
+ e1e_flush();
+ }
+}
+
+/**
+ * e1000_check_mng_mode_82574 - Check manageability is enabled
+ * @hw: pointer to the HW structure
+ *
+ * Reads the NVM Initialization Control Word 2 and returns true
+ * (>0) if any manageability is enabled, else false (0).
+ **/
+static bool e1000_check_mng_mode_82574(struct e1000_hw *hw)
+{
+ u16 data;
+
+ e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+ return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0;
+}
+
+/**
+ * e1000_led_on_82574 - Turn LED on
+ * @hw: pointer to the HW structure
+ *
+ * Turn LED on.
+ **/
+static s32 e1000_led_on_82574(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ u32 i;
+
+ ctrl = hw->mac.ledctl_mode2;
+ if (!(E1000_STATUS_LU & er32(STATUS))) {
+ /*
+ * If no link, then turn LED on by setting the invert bit
+ * for each LED that's "on" (0x0E) in ledctl_mode2.
+ */
+ for (i = 0; i < 4; i++)
+ if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
+ E1000_LEDCTL_MODE_LED_ON)
+ ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8));
+ }
+ ew32(LEDCTL, ctrl);
+
+ return 0;
+}
+
+/**
+ * e1000_check_phy_82574 - check 82574 phy hung state
+ * @hw: pointer to the HW structure
+ *
+ * Returns whether phy is hung or not
+ **/
+bool e1000_check_phy_82574(struct e1000_hw *hw)
+{
+ u16 status_1kbt = 0;
+ u16 receive_errors = 0;
+ bool phy_hung = false;
+ s32 ret_val = 0;
+
+ /*
+ * Read PHY Receive Error counter first, if its is max - all F's then
+ * read the Base1000T status register If both are max then PHY is hung.
+ */
+ ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors);
+
+ if (ret_val)
+ goto out;
+ if (receive_errors == E1000_RECEIVE_ERROR_MAX) {
+ ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt);
+ if (ret_val)
+ goto out;
+ if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) ==
+ E1000_IDLE_ERROR_COUNT_MASK)
+ phy_hung = true;
+ }
+out:
+ return phy_hung;
+}
+
+/**
+ * e1000_setup_link_82571 - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Determines which flow control settings to use, then configures flow
+ * control. Calls the appropriate media-specific link configuration
+ * function. Assuming the adapter has a valid link partner, a valid link
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ **/
+static s32 e1000_setup_link_82571(struct e1000_hw *hw)
+{
+ /*
+ * 82573 does not have a word in the NVM to determine
+ * the default flow control setting, so we explicitly
+ * set it to full.
+ */
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ if (hw->fc.requested_mode == e1000_fc_default)
+ hw->fc.requested_mode = e1000_fc_full;
+ break;
+ default:
+ break;
+ }
+
+ return e1000e_setup_link(hw);
+}
+
+/**
+ * e1000_setup_copper_link_82571 - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Configures the link for auto-neg or forced speed and duplex. Then we check
+ * for link, once link is established calls to configure collision distance
+ * and flow control are called.
+ **/
+static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ew32(CTRL, ctrl);
+
+ switch (hw->phy.type) {
+ case e1000_phy_m88:
+ case e1000_phy_bm:
+ ret_val = e1000e_copper_link_setup_m88(hw);
+ break;
+ case e1000_phy_igp_2:
+ ret_val = e1000e_copper_link_setup_igp(hw);
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ break;
+ }
+
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000e_setup_copper_link(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes
+ * @hw: pointer to the HW structure
+ *
+ * Configures collision distance and flow control for fiber and serdes links.
+ * Upon successful setup, poll for link.
+ **/
+static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
+{
+ switch (hw->mac.type) {
+ case e1000_82571:
+ case e1000_82572:
+ /*
+ * If SerDes loopback mode is entered, there is no form
+ * of reset to take the adapter out of that mode. So we
+ * have to explicitly take the adapter out of loopback
+ * mode. This prevents drivers from twiddling their thumbs
+ * if another tool failed to take it out of loopback mode.
+ */
+ ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+ break;
+ default:
+ break;
+ }
+
+ return e1000e_setup_fiber_serdes_link(hw);
+}
+
+/**
+ * e1000_check_for_serdes_link_82571 - Check for link (Serdes)
+ * @hw: pointer to the HW structure
+ *
+ * Reports the link state as up or down.
+ *
+ * If autonegotiation is supported by the link partner, the link state is
+ * determined by the result of autonegotiation. This is the most likely case.
+ * If autonegotiation is not supported by the link partner, and the link
+ * has a valid signal, force the link up.
+ *
+ * The link state is represented internally here by 4 states:
+ *
+ * 1) down
+ * 2) autoneg_progress
+ * 3) autoneg_complete (the link successfully autonegotiated)
+ * 4) forced_up (the link has been forced up, it did not autonegotiate)
+ *
+ **/
+static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 rxcw;
+ u32 ctrl;
+ u32 status;
+ u32 txcw;
+ u32 i;
+ s32 ret_val = 0;
+
+ ctrl = er32(CTRL);
+ status = er32(STATUS);
+ rxcw = er32(RXCW);
+
+ if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
+
+ /* Receiver is synchronized with no invalid bits. */
+ switch (mac->serdes_link_state) {
+ case e1000_serdes_link_autoneg_complete:
+ if (!(status & E1000_STATUS_LU)) {
+ /*
+ * We have lost link, retry autoneg before
+ * reporting link failure
+ */
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
+ mac->serdes_has_link = false;
+ e_dbg("AN_UP -> AN_PROG\n");
+ } else {
+ mac->serdes_has_link = true;
+ }
+ break;
+
+ case e1000_serdes_link_forced_up:
+ /*
+ * If we are receiving /C/ ordered sets, re-enable
+ * auto-negotiation in the TXCW register and disable
+ * forced link in the Device Control register in an
+ * attempt to auto-negotiate with our link partner.
+ * If the partner code word is null, stop forcing
+ * and restart auto negotiation.
+ */
+ if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
+ /* Enable autoneg, and unforce link up */
+ ew32(TXCW, mac->txcw);
+ ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
+ mac->serdes_has_link = false;
+ e_dbg("FORCED_UP -> AN_PROG\n");
+ } else {
+ mac->serdes_has_link = true;
+ }
+ break;
+
+ case e1000_serdes_link_autoneg_progress:
+ if (rxcw & E1000_RXCW_C) {
+ /*
+ * We received /C/ ordered sets, meaning the
+ * link partner has autonegotiated, and we can
+ * trust the Link Up (LU) status bit.
+ */
+ if (status & E1000_STATUS_LU) {
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_complete;
+ e_dbg("AN_PROG -> AN_UP\n");
+ mac->serdes_has_link = true;
+ } else {
+ /* Autoneg completed, but failed. */
+ mac->serdes_link_state =
+ e1000_serdes_link_down;
+ e_dbg("AN_PROG -> DOWN\n");
+ }
+ } else {
+ /*
+ * The link partner did not autoneg.
+ * Force link up and full duplex, and change
+ * state to forced.
+ */
+ ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ ew32(CTRL, ctrl);
+
+ /* Configure Flow Control after link up. */
+ ret_val = e1000e_config_fc_after_link_up(hw);
+ if (ret_val) {
+ e_dbg("Error config flow control\n");
+ break;
+ }
+ mac->serdes_link_state =
+ e1000_serdes_link_forced_up;
+ mac->serdes_has_link = true;
+ e_dbg("AN_PROG -> FORCED_UP\n");
+ }
+ break;
+
+ case e1000_serdes_link_down:
+ default:
+ /*
+ * The link was down but the receiver has now gained
+ * valid sync, so lets see if we can bring the link
+ * up.
+ */
+ ew32(TXCW, mac->txcw);
+ ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
+ mac->serdes_has_link = false;
+ e_dbg("DOWN -> AN_PROG\n");
+ break;
+ }
+ } else {
+ if (!(rxcw & E1000_RXCW_SYNCH)) {
+ mac->serdes_has_link = false;
+ mac->serdes_link_state = e1000_serdes_link_down;
+ e_dbg("ANYSTATE -> DOWN\n");
+ } else {
+ /*
+ * Check several times, if Sync and Config
+ * both are consistently 1 then simply ignore
+ * the Invalid bit and restart Autoneg
+ */
+ for (i = 0; i < AN_RETRY_COUNT; i++) {
+ udelay(10);
+ rxcw = er32(RXCW);
+ if ((rxcw & E1000_RXCW_IV) &&
+ !((rxcw & E1000_RXCW_SYNCH) &&
+ (rxcw & E1000_RXCW_C))) {
+ mac->serdes_has_link = false;
+ mac->serdes_link_state =
+ e1000_serdes_link_down;
+ e_dbg("ANYSTATE -> DOWN\n");
+ break;
+ }
+ }
+
+ if (i == AN_RETRY_COUNT) {
+ txcw = er32(TXCW);
+ txcw |= E1000_TXCW_ANE;
+ ew32(TXCW, txcw);
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
+ mac->serdes_has_link = false;
+ e_dbg("ANYSTATE -> AN_PROG\n");
+ }
+ }
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_valid_led_default_82571 - Verify a valid default LED config
+ * @hw: pointer to the HW structure
+ * @data: pointer to the NVM (EEPROM)
+ *
+ * Read the EEPROM for the current default LED configuration. If the
+ * LED configuration is not valid, set to a valid LED configuration.
+ **/
+static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ return ret_val;
+ }
+
+ switch (hw->mac.type) {
+ case e1000_82573:
+ case e1000_82574:
+ case e1000_82583:
+ if (*data == ID_LED_RESERVED_F746)
+ *data = ID_LED_DEFAULT_82573;
+ break;
+ default:
+ if (*data == ID_LED_RESERVED_0000 ||
+ *data == ID_LED_RESERVED_FFFF)
+ *data = ID_LED_DEFAULT;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_get_laa_state_82571 - Get locally administered address state
+ * @hw: pointer to the HW structure
+ *
+ * Retrieve and return the current locally administered address state.
+ **/
+bool e1000e_get_laa_state_82571(struct e1000_hw *hw)
+{
+ if (hw->mac.type != e1000_82571)
+ return false;
+
+ return hw->dev_spec.e82571.laa_is_present;
+}
+
+/**
+ * e1000e_set_laa_state_82571 - Set locally administered address state
+ * @hw: pointer to the HW structure
+ * @state: enable/disable locally administered address
+ *
+ * Enable/Disable the current locally administered address state.
+ **/
+void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
+{
+ if (hw->mac.type != e1000_82571)
+ return;
+
+ hw->dev_spec.e82571.laa_is_present = state;
+
+ /* If workaround is activated... */
+ if (state)
+ /*
+ * Hold a copy of the LAA in RAR[14] This is done so that
+ * between the time RAR[0] gets clobbered and the time it
+ * gets fixed, the actual LAA is in one of the RARs and no
+ * incoming packets directed to this port are dropped.
+ * Eventually the LAA will be in RAR[0] and RAR[14].
+ */
+ e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1);
+}
+
+/**
+ * e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Verifies that the EEPROM has completed the update. After updating the
+ * EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If
+ * the checksum fix is not implemented, we need to set the bit and update
+ * the checksum. Otherwise, if bit 15 is set and the checksum is incorrect,
+ * we need to return bad checksum.
+ **/
+static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ s32 ret_val;
+ u16 data;
+
+ if (nvm->type != e1000_nvm_flash_hw)
+ return 0;
+
+ /*
+ * Check bit 4 of word 10h. If it is 0, firmware is done updating
+ * 10h-12h. Checksum may need to be fixed.
+ */
+ ret_val = e1000_read_nvm(hw, 0x10, 1, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (!(data & 0x10)) {
+ /*
+ * Read 0x23 and check bit 15. This bit is a 1
+ * when the checksum has already been fixed. If
+ * the checksum is still wrong and this bit is a
+ * 1, we need to return bad checksum. Otherwise,
+ * we need to set this bit to a 1 and update the
+ * checksum.
+ */
+ ret_val = e1000_read_nvm(hw, 0x23, 1, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (!(data & 0x8000)) {
+ data |= 0x8000;
+ ret_val = e1000_write_nvm(hw, 0x23, 1, &data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000e_update_nvm_checksum(hw);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_read_mac_addr_82571 - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ if (hw->mac.type == e1000_82571) {
+ /*
+ * If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ goto out;
+ }
+
+ ret_val = e1000_read_mac_addr_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_82571 - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ struct e1000_mac_info *mac = &hw->mac;
+
+ if (!(phy->ops.check_reset_block))
+ return;
+
+ /* If the management interface is not enabled, then power down */
+ if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
+ e1000_power_down_phy_copper(hw);
+}
+
+/**
+ * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
+{
+ e1000e_clear_hw_cntrs_base(hw);
+
+ er32(PRC64);
+ er32(PRC127);
+ er32(PRC255);
+ er32(PRC511);
+ er32(PRC1023);
+ er32(PRC1522);
+ er32(PTC64);
+ er32(PTC127);
+ er32(PTC255);
+ er32(PTC511);
+ er32(PTC1023);
+ er32(PTC1522);
+
+ er32(ALGNERRC);
+ er32(RXERRC);
+ er32(TNCRS);
+ er32(CEXTERR);
+ er32(TSCTC);
+ er32(TSCTFC);
+
+ er32(MGTPRC);
+ er32(MGTPDC);
+ er32(MGTPTC);
+
+ er32(IAC);
+ er32(ICRXOC);
+
+ er32(ICRXPTC);
+ er32(ICRXATC);
+ er32(ICTXPTC);
+ er32(ICTXATC);
+ er32(ICTXQEC);
+ er32(ICTXQMTC);
+ er32(ICRXDMTC);
+}
+
+static const struct e1000_mac_operations e82571_mac_ops = {
+ /* .check_mng_mode: mac type dependent */
+ /* .check_for_link: media type dependent */
+ .id_led_init = e1000e_id_led_init,
+ .cleanup_led = e1000e_cleanup_led_generic,
+ .clear_hw_cntrs = e1000_clear_hw_cntrs_82571,
+ .get_bus_info = e1000e_get_bus_info_pcie,
+ .set_lan_id = e1000_set_lan_id_multi_port_pcie,
+ /* .get_link_up_info: media type dependent */
+ /* .led_on: mac type dependent */
+ .led_off = e1000e_led_off_generic,
+ .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
+ .write_vfta = e1000_write_vfta_generic,
+ .clear_vfta = e1000_clear_vfta_82571,
+ .reset_hw = e1000_reset_hw_82571,
+ .init_hw = e1000_init_hw_82571,
+ .setup_link = e1000_setup_link_82571,
+ /* .setup_physical_interface: media type dependent */
+ .setup_led = e1000e_setup_led_generic,
+ .read_mac_addr = e1000_read_mac_addr_82571,
+};
+
+static const struct e1000_phy_operations e82_phy_ops_igp = {
+ .acquire = e1000_get_hw_semaphore_82571,
+ .check_polarity = e1000_check_polarity_igp,
+ .check_reset_block = e1000e_check_reset_block_generic,
+ .commit = NULL,
+ .force_speed_duplex = e1000e_phy_force_speed_duplex_igp,
+ .get_cfg_done = e1000_get_cfg_done_82571,
+ .get_cable_length = e1000e_get_cable_length_igp_2,
+ .get_info = e1000e_get_phy_info_igp,
+ .read_reg = e1000e_read_phy_reg_igp,
+ .release = e1000_put_hw_semaphore_82571,
+ .reset = e1000e_phy_hw_reset_generic,
+ .set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
+ .set_d3_lplu_state = e1000e_set_d3_lplu_state,
+ .write_reg = e1000e_write_phy_reg_igp,
+ .cfg_on_link_up = NULL,
+};
+
+static const struct e1000_phy_operations e82_phy_ops_m88 = {
+ .acquire = e1000_get_hw_semaphore_82571,
+ .check_polarity = e1000_check_polarity_m88,
+ .check_reset_block = e1000e_check_reset_block_generic,
+ .commit = e1000e_phy_sw_reset,
+ .force_speed_duplex = e1000e_phy_force_speed_duplex_m88,
+ .get_cfg_done = e1000e_get_cfg_done,
+ .get_cable_length = e1000e_get_cable_length_m88,
+ .get_info = e1000e_get_phy_info_m88,
+ .read_reg = e1000e_read_phy_reg_m88,
+ .release = e1000_put_hw_semaphore_82571,
+ .reset = e1000e_phy_hw_reset_generic,
+ .set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
+ .set_d3_lplu_state = e1000e_set_d3_lplu_state,
+ .write_reg = e1000e_write_phy_reg_m88,
+ .cfg_on_link_up = NULL,
+};
+
+static const struct e1000_phy_operations e82_phy_ops_bm = {
+ .acquire = e1000_get_hw_semaphore_82571,
+ .check_polarity = e1000_check_polarity_m88,
+ .check_reset_block = e1000e_check_reset_block_generic,
+ .commit = e1000e_phy_sw_reset,
+ .force_speed_duplex = e1000e_phy_force_speed_duplex_m88,
+ .get_cfg_done = e1000e_get_cfg_done,
+ .get_cable_length = e1000e_get_cable_length_m88,
+ .get_info = e1000e_get_phy_info_m88,
+ .read_reg = e1000e_read_phy_reg_bm2,
+ .release = e1000_put_hw_semaphore_82571,
+ .reset = e1000e_phy_hw_reset_generic,
+ .set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
+ .set_d3_lplu_state = e1000e_set_d3_lplu_state,
+ .write_reg = e1000e_write_phy_reg_bm2,
+ .cfg_on_link_up = NULL,
+};
+
+static const struct e1000_nvm_operations e82571_nvm_ops = {
+ .acquire = e1000_acquire_nvm_82571,
+ .read = e1000e_read_nvm_eerd,
+ .release = e1000_release_nvm_82571,
+ .update = e1000_update_nvm_checksum_82571,
+ .valid_led_default = e1000_valid_led_default_82571,
+ .validate = e1000_validate_nvm_checksum_82571,
+ .write = e1000_write_nvm_82571,
+};
+
+const struct e1000_info e1000_82571_info = {
+ .mac = e1000_82571,
+ .flags = FLAG_HAS_HW_VLAN_FILTER
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_HAS_WOL
+ | FLAG_APME_IN_CTRL3
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_SMART_POWER_DOWN
+ | FLAG_RESET_OVERWRITES_LAA /* errata */
+ | FLAG_TARC_SPEED_MODE_BIT /* errata */
+ | FLAG_APME_CHECK_PORT_B,
+ .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */
+ | FLAG2_DMA_BURST,
+ .pba = 38,
+ .max_hw_frame_size = DEFAULT_JUMBO,
+ .get_variants = e1000_get_variants_82571,
+ .mac_ops = &e82571_mac_ops,
+ .phy_ops = &e82_phy_ops_igp,
+ .nvm_ops = &e82571_nvm_ops,
+};
+
+const struct e1000_info e1000_82572_info = {
+ .mac = e1000_82572,
+ .flags = FLAG_HAS_HW_VLAN_FILTER
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_HAS_WOL
+ | FLAG_APME_IN_CTRL3
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_TARC_SPEED_MODE_BIT, /* errata */
+ .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */
+ | FLAG2_DMA_BURST,
+ .pba = 38,
+ .max_hw_frame_size = DEFAULT_JUMBO,
+ .get_variants = e1000_get_variants_82571,
+ .mac_ops = &e82571_mac_ops,
+ .phy_ops = &e82_phy_ops_igp,
+ .nvm_ops = &e82571_nvm_ops,
+};
+
+const struct e1000_info e1000_82573_info = {
+ .mac = e1000_82573,
+ .flags = FLAG_HAS_HW_VLAN_FILTER
+ | FLAG_HAS_WOL
+ | FLAG_APME_IN_CTRL3
+ | FLAG_HAS_SMART_POWER_DOWN
+ | FLAG_HAS_AMT
+ | FLAG_HAS_SWSM_ON_LOAD,
+ .flags2 = FLAG2_DISABLE_ASPM_L1
+ | FLAG2_DISABLE_ASPM_L0S,
+ .pba = 20,
+ .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
+ .get_variants = e1000_get_variants_82571,
+ .mac_ops = &e82571_mac_ops,
+ .phy_ops = &e82_phy_ops_m88,
+ .nvm_ops = &e82571_nvm_ops,
+};
+
+const struct e1000_info e1000_82574_info = {
+ .mac = e1000_82574,
+ .flags = FLAG_HAS_HW_VLAN_FILTER
+ | FLAG_HAS_MSIX
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_HAS_WOL
+ | FLAG_APME_IN_CTRL3
+ | FLAG_HAS_SMART_POWER_DOWN
+ | FLAG_HAS_AMT
+ | FLAG_HAS_CTRLEXT_ON_LOAD,
+ .flags2 = FLAG2_CHECK_PHY_HANG
+ | FLAG2_DISABLE_ASPM_L0S
+ | FLAG2_NO_DISABLE_RX,
+ .pba = 32,
+ .max_hw_frame_size = DEFAULT_JUMBO,
+ .get_variants = e1000_get_variants_82571,
+ .mac_ops = &e82571_mac_ops,
+ .phy_ops = &e82_phy_ops_bm,
+ .nvm_ops = &e82571_nvm_ops,
+};
+
+const struct e1000_info e1000_82583_info = {
+ .mac = e1000_82583,
+ .flags = FLAG_HAS_HW_VLAN_FILTER
+ | FLAG_HAS_WOL
+ | FLAG_APME_IN_CTRL3
+ | FLAG_HAS_SMART_POWER_DOWN
+ | FLAG_HAS_AMT
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_HAS_CTRLEXT_ON_LOAD,
+ .flags2 = FLAG2_DISABLE_ASPM_L0S
+ | FLAG2_NO_DISABLE_RX,
+ .pba = 32,
+ .max_hw_frame_size = DEFAULT_JUMBO,
+ .get_variants = e1000_get_variants_82571,
+ .mac_ops = &e82571_mac_ops,
+ .phy_ops = &e82_phy_ops_bm,
+ .nvm_ops = &e82571_nvm_ops,
+};
+
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile
new file mode 100644
index 000000000000..948c05db5d68
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/Makefile
@@ -0,0 +1,37 @@
+################################################################################
+#
+# Intel PRO/1000 Linux driver
+# Copyright(c) 1999 - 2011 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# Linux NICS <linux.nics@intel.com>
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) PRO/1000 ethernet driver
+#
+
+obj-$(CONFIG_E1000E) += e1000e.o
+
+e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \
+ lib.o phy.o param.o ethtool.o netdev.o
+
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
new file mode 100644
index 000000000000..c516a7440bec
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -0,0 +1,844 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_DEFINES_H_
+#define _E1000_DEFINES_H_
+
+#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define REQ_RX_DESCRIPTOR_MULTIPLE 8
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_APME 0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
+#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+
+/* Wake Up Status */
+#define E1000_WUS_LNKC E1000_WUFC_LNKC
+#define E1000_WUS_MAG E1000_WUFC_MAG
+#define E1000_WUS_EX E1000_WUFC_EX
+#define E1000_WUS_MC E1000_WUFC_MC
+#define E1000_WUS_BC E1000_WUFC_BC
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
+#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
+#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
+#define E1000_CTRL_EXT_EIAME 0x01000000
+#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
+#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
+#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
+#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
+#define E1000_CTRL_EXT_LSECCK 0x00001000
+#define E1000_CTRL_EXT_PHYPDEN 0x00100000
+
+/* Receive Descriptor bit definitions */
+#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
+#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
+#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
+#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
+#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
+#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+
+#define E1000_RXDEXT_STATERR_CE 0x01000000
+#define E1000_RXDEXT_STATERR_SE 0x02000000
+#define E1000_RXDEXT_STATERR_SEQ 0x04000000
+#define E1000_RXDEXT_STATERR_CXE 0x10000000
+#define E1000_RXDEXT_STATERR_RXE 0x80000000
+
+/* mask to determine if packets should be dropped due to frame errors */
+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
+ E1000_RXD_ERR_CE | \
+ E1000_RXD_ERR_SE | \
+ E1000_RXD_ERR_SEQ | \
+ E1000_RXD_ERR_CXE | \
+ E1000_RXD_ERR_RXE)
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+ E1000_RXDEXT_STATERR_CE | \
+ E1000_RXDEXT_STATERR_SE | \
+ E1000_RXDEXT_STATERR_SEQ | \
+ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
+
+#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
+#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
+/* Enable MAC address filtering */
+#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
+/* Enable MNG packets to host memory */
+#define E1000_MANC_EN_MNG2HOST 0x00200000
+
+#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */
+#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */
+#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */
+#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */
+
+/* Receive Control */
+#define E1000_RCTL_EN 0x00000002 /* enable */
+#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
+#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
+#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
+#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
+#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
+#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
+#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */
+#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
+#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
+#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */
+#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */
+#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */
+#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
+#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
+#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
+#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
+#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
+#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
+
+/*
+ * Use byte values for the following shift parameters
+ * Usage:
+ * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ * E1000_PSRCTL_BSIZE0_MASK) |
+ * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ * E1000_PSRCTL_BSIZE1_MASK) |
+ * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ * E1000_PSRCTL_BSIZE2_MASK) |
+ * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ * E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256], default=256
+ * value1 = [1024..64512], default=4096
+ * value2 = [0..64512], default=4096
+ * value3 = [0..64512], default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
+
+/* SWFW_SYNC Definitions */
+#define E1000_SWFW_EEP_SM 0x1
+#define E1000_SWFW_PHY0_SM 0x2
+#define E1000_SWFW_PHY1_SM 0x4
+#define E1000_SWFW_CSR_SM 0x8
+
+/* Device Control */
+#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
+#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
+#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
+#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
+#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
+#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
+#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
+#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
+#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
+#define E1000_CTRL_RST 0x04000000 /* Global reset */
+#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
+#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
+
+/*
+ * Bit definitions for the Management Data IO (MDIO) and Management Data
+ * Clock (MDC) pins in the Device Control Register.
+ */
+
+/* Device Status */
+#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
+#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
+#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
+#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
+
+/* Constants used to interpret the masked PCI-X bus speed. */
+
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+
+#define ADVERTISE_10_HALF 0x0001
+#define ADVERTISE_10_FULL 0x0002
+#define ADVERTISE_100_HALF 0x0004
+#define ADVERTISE_100_FULL 0x0008
+#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL 0x0020
+
+/* 1000/H is not supported, nor spec-compliant. */
+#define E1000_ALL_SPEED_DUPLEX ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
+ ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
+ ADVERTISE_1000_FULL)
+#define E1000_ALL_NOT_GIG ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
+ ADVERTISE_100_HALF | ADVERTISE_100_FULL)
+#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
+#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
+#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
+
+/* LED Control */
+#define E1000_PHY_LED0_MODE_MASK 0x00000007
+#define E1000_PHY_LED0_IVRT 0x00000008
+#define E1000_PHY_LED0_MASK 0x0000001F
+
+#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
+#define E1000_LEDCTL_LED0_MODE_SHIFT 0
+#define E1000_LEDCTL_LED0_IVRT 0x00000040
+#define E1000_LEDCTL_LED0_BLINK 0x00000080
+
+#define E1000_LEDCTL_MODE_LINK_UP 0x2
+#define E1000_LEDCTL_MODE_LED_ON 0xE
+#define E1000_LEDCTL_MODE_LED_OFF 0xF
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
+#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
+
+/* Transmit Control */
+#define E1000_TCTL_EN 0x00000002 /* enable Tx */
+#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
+#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
+#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
+#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
+#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
+
+/* Transmit Arbitration Count */
+
+/* SerDes Control */
+#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
+
+/* Header split receive */
+#define E1000_RFCTL_NFSW_DIS 0x00000040
+#define E1000_RFCTL_NFSR_DIS 0x00000080
+#define E1000_RFCTL_ACK_DIS 0x00001000
+#define E1000_RFCTL_EXTEN 0x00008000
+#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
+#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD 15
+#define E1000_CT_SHIFT 4
+#define E1000_COLLISION_DISTANCE 63
+#define E1000_COLD_SHIFT 12
+
+/* Default values for the transmit IPG register */
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define E1000_TIPG_IPGT_MASK 0x000003FF
+
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define E1000_TIPG_IPGR1_SHIFT 10
+
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
+#define E1000_TIPG_IPGR2_SHIFT 20
+
+#define MAX_JUMBO_FRAME_SIZE 0x3F00
+
+/* Extended Configuration Control and Size */
+#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
+#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008
+#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
+#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16
+
+#define E1000_PHY_CTRL_D0A_LPLU 0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
+
+#define E1000_KABGTXD_BGSQLBIAS 0x00050000
+
+/* PBA constants */
+#define E1000_PBA_8K 0x0008 /* 8KB */
+#define E1000_PBA_16K 0x0010 /* 16KB */
+
+#define E1000_PBS_16K E1000_PBA_16K
+
+#define IFS_MAX 80
+#define IFS_MIN 40
+#define IFS_RATIO 4
+#define IFS_STEP 10
+#define MIN_NUM_XMITS 1000
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
+
+#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
+#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
+#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
+#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
+#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
+#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
+#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
+#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */
+#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */
+
+/* PBA ECC Register */
+#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */
+#define E1000_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */
+#define E1000_PBA_ECC_CORR_EN 0x00000001 /* ECC correction enable */
+#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */
+#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 for ECC */
+
+/*
+ * This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register. Each bit is documented below:
+ * o RXT0 = Receiver Timer Interrupt (ring 0)
+ * o TXDW = Transmit Descriptor Written Back
+ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ * o RXSEQ = Receive Sequence Error
+ * o LSC = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+ E1000_IMS_RXT0 | \
+ E1000_IMS_TXDW | \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ | \
+ E1000_IMS_LSC)
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
+#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
+#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
+#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
+#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
+#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
+#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */
+#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
+#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
+
+/* Transmit Descriptor Control */
+#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
+/* Enable the counting of desc. still to be processed. */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE 0x8808
+
+/* 802.1q VLAN Packet Size */
+#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
+
+/* Receive Address */
+/*
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * Technically, we have 16 spots. However, we reserve one of these spots
+ * (RAR[15]) for our directed address used by controllers with
+ * manageability enabled, allowing us room for 15 multicast addresses.
+ */
+#define E1000_RAR_ENTRIES 15
+#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
+#define E1000_RAL_MAC_ADDR_LEN 4
+#define E1000_RAH_MAC_ADDR_LEN 2
+
+/* Error Codes */
+#define E1000_ERR_NVM 1
+#define E1000_ERR_PHY 2
+#define E1000_ERR_CONFIG 3
+#define E1000_ERR_PARAM 4
+#define E1000_ERR_MAC_INIT 5
+#define E1000_ERR_PHY_TYPE 6
+#define E1000_ERR_RESET 9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_ERR_HOST_INTERFACE_COMMAND 11
+#define E1000_BLK_PHY_RESET 12
+#define E1000_ERR_SWFW_SYNC 13
+#define E1000_NOT_IMPLEMENTED 14
+#define E1000_ERR_INVALID_ARGUMENT 16
+#define E1000_ERR_NO_SPACE 17
+#define E1000_ERR_NVM_PBA_SECTION 18
+
+/* Loop limit on how long we wait for auto-negotiation to complete */
+#define FIBER_LINK_UP_LIMIT 50
+#define COPPER_LINK_UP_LIMIT 10
+#define PHY_AUTO_NEG_LIMIT 45
+#define PHY_FORCE_LIMIT 20
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT 800
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT 100
+/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
+#define MDIO_OWNERSHIP_TIMEOUT 10
+/* Number of milliseconds for NVM auto read done after MAC reset. */
+#define AUTO_READ_DONE_TIMEOUT 10
+
+/* Flow Control */
+#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
+
+/* Transmit Configuration Word */
+#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
+#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
+#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
+#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
+#define E1000_RXCW_C 0x20000000 /* Receive config */
+#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
+
+/* PCI Express Control */
+#define E1000_GCR_RXD_NO_SNOOP 0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
+#define E1000_GCR_TXD_NO_SNOOP 0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
+
+#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
+ E1000_GCR_RXDSCW_NO_SNOOP | \
+ E1000_GCR_RXDSCR_NO_SNOOP | \
+ E1000_GCR_TXD_NO_SNOOP | \
+ E1000_GCR_TXDSCW_NO_SNOOP | \
+ E1000_GCR_TXDSCR_NO_SNOOP)
+
+/* PHY Control Register */
+#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_POWER_DOWN 0x0800 /* Power down */
+#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
+#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_1000 0x0040
+#define MII_CR_SPEED_100 0x2000
+#define MII_CR_SPEED_10 0x0000
+
+/* PHY Status Register */
+#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
+#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
+#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
+
+/* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
+ /* 0=DTE device */
+#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
+ /* 0=Configure PHY as Slave */
+#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
+ /* 0=Automatic Master/Slave config */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
+
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CONTROL 0x00 /* Control Register */
+#define PHY_STATUS 0x01 /* Status Register */
+#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
+#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
+
+#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */
+
+/* NVM Control */
+#define E1000_EECD_SK 0x00000001 /* NVM Clock */
+#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
+#define E1000_EECD_DI 0x00000004 /* NVM Data In */
+#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
+#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
+#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
+#define E1000_EECD_PRES 0x00000100 /* NVM Present */
+#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */
+/* NVM Addressing bits based on type (0-small, 1-large) */
+#define E1000_EECD_ADDR_BITS 0x00000400
+#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
+#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
+#define E1000_EECD_SIZE_EX_SHIFT 11
+#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */
+#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
+#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
+
+#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */
+#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
+#define E1000_NVM_RW_REG_START 1 /* Start operation */
+#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
+#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
+#define E1000_FLASH_UPDATES 2000
+
+/* NVM Word Offsets */
+#define NVM_COMPAT 0x0003
+#define NVM_ID_LED_SETTINGS 0x0004
+#define NVM_INIT_CONTROL2_REG 0x000F
+#define NVM_INIT_CONTROL3_PORT_B 0x0014
+#define NVM_INIT_3GIO_3 0x001A
+#define NVM_INIT_CONTROL3_PORT_A 0x0024
+#define NVM_CFG 0x0012
+#define NVM_ALT_MAC_ADDR_PTR 0x0037
+#define NVM_CHECKSUM_REG 0x003F
+
+#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
+
+#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */
+#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */
+
+/* Mask bits for fields in Word 0x0f of the NVM */
+#define NVM_WORD0F_PAUSE_MASK 0x3000
+#define NVM_WORD0F_PAUSE 0x1000
+#define NVM_WORD0F_ASM_DIR 0x2000
+
+/* Mask bits for fields in Word 0x1a of the NVM */
+#define NVM_WORD1A_ASPM_MASK 0x000C
+
+/* Mask bits for fields in Word 0x03 of the EEPROM */
+#define NVM_COMPAT_LOM 0x0800
+
+/* length of string needed to store PBA number */
+#define E1000_PBANUM_LENGTH 11
+
+/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
+#define NVM_SUM 0xBABA
+
+/* PBA (printed board assembly) number words */
+#define NVM_PBA_OFFSET_0 8
+#define NVM_PBA_OFFSET_1 9
+#define NVM_PBA_PTR_GUARD 0xFAFA
+#define NVM_WORD_SIZE_BASE_SHIFT 6
+
+/* NVM Commands - SPI */
+#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
+#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
+#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
+#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
+#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
+#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
+
+/* SPI NVM Status Register */
+#define NVM_STATUS_RDY_SPI 0x01
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
+ (ID_LED_OFF1_OFF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2 0x1
+#define ID_LED_DEF1_ON2 0x2
+#define ID_LED_DEF1_OFF2 0x3
+#define ID_LED_ON1_DEF2 0x4
+#define ID_LED_ON1_ON2 0x5
+#define ID_LED_ON1_OFF2 0x6
+#define ID_LED_OFF1_DEF2 0x7
+#define ID_LED_OFF1_ON2 0x8
+#define ID_LED_OFF1_OFF2 0x9
+
+#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE 0x07000000
+
+/* PCI/PCI-X/PCI-EX Config space */
+#define PCI_HEADER_TYPE_REGISTER 0x0E
+#define PCIE_LINK_STATUS 0x12
+
+#define PCI_HEADER_TYPE_MULTIFUNC 0x80
+#define PCIE_LINK_WIDTH_MASK 0x3F0
+#define PCIE_LINK_WIDTH_SHIFT 4
+
+#define PHY_REVISION_MASK 0xFFFFFFF0
+#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF
+
+/* Bit definitions for valid PHY IDs. */
+/*
+ * I = Integrated
+ * E = External
+ */
+#define M88E1000_E_PHY_ID 0x01410C50
+#define M88E1000_I_PHY_ID 0x01410C30
+#define M88E1011_I_PHY_ID 0x01410C20
+#define IGP01E1000_I_PHY_ID 0x02A80380
+#define M88E1111_I_PHY_ID 0x01410CC0
+#define GG82563_E_PHY_ID 0x01410CA0
+#define IGP03E1000_E_PHY_ID 0x02A80390
+#define IFE_E_PHY_ID 0x02A80330
+#define IFE_PLUS_E_PHY_ID 0x02A80320
+#define IFE_C_E_PHY_ID 0x02A80310
+#define BME1000_E_PHY_ID 0x01410CB0
+#define BME1000_E_PHY_ID_R2 0x01410CB1
+#define I82577_E_PHY_ID 0x01540050
+#define I82578_E_PHY_ID 0x004DD040
+#define I82579_E_PHY_ID 0x01540090
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
+
+#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
+ /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
+/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
+#define M88E1000_PSCR_AUTO_X_1000T 0x0040
+/* Auto crossover enabled all speeds */
+#define M88E1000_PSCR_AUTO_X_MODE 0x0060
+/*
+ * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold)
+ * 0=Normal 10BASE-T Rx Threshold
+ */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
+/* 0=<50M; 1=50-80M; 2=80-110M; 3=110-140M; 4=>140M */
+#define M88E1000_PSSR_CABLE_LENGTH 0x0380
+#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave
+ */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
+#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
+
+#define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020
+#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C
+
+/* BME1000 PHY Specific Control Register */
+#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */
+
+
+#define PHY_PAGE_SHIFT 5
+#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
+ ((reg) & MAX_PHY_REG_ADDRESS))
+
+/*
+ * Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define GG82563_PAGE_SHIFT 5
+#define GG82563_REG(page, reg) \
+ (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+#define GG82563_MIN_ALT_REG 30
+
+/* GG82563 Specific Registers */
+#define GG82563_PHY_SPEC_CTRL \
+ GG82563_REG(0, 16) /* PHY Specific Control */
+#define GG82563_PHY_PAGE_SELECT \
+ GG82563_REG(0, 22) /* Page Select */
+#define GG82563_PHY_SPEC_CTRL_2 \
+ GG82563_REG(0, 26) /* PHY Specific Control 2 */
+#define GG82563_PHY_PAGE_SELECT_ALT \
+ GG82563_REG(0, 29) /* Alternate Page Select */
+
+#define GG82563_PHY_MAC_SPEC_CTRL \
+ GG82563_REG(2, 21) /* MAC Specific Control Register */
+
+#define GG82563_PHY_DSP_DISTANCE \
+ GG82563_REG(5, 26) /* DSP Distance */
+
+/* Page 193 - Port Control Registers */
+#define GG82563_PHY_KMRN_MODE_CTRL \
+ GG82563_REG(193, 16) /* Kumeran Mode Control */
+#define GG82563_PHY_PWR_MGMT_CTRL \
+ GG82563_REG(193, 20) /* Power Management Control */
+
+/* Page 194 - KMRN Registers */
+#define GG82563_PHY_INBAND_CTRL \
+ GG82563_REG(194, 18) /* Inband Control */
+
+/* MDI Control */
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE 0x04000000
+#define E1000_MDIC_OP_READ 0x08000000
+#define E1000_MDIC_READY 0x10000000
+#define E1000_MDIC_ERROR 0x40000000
+
+/* SerDes Control */
+#define E1000_GEN_POLL_TIMEOUT 640
+
+#endif /* _E1000_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
new file mode 100644
index 000000000000..7877b9c26edb
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -0,0 +1,741 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _E1000_H_
+#define _E1000_H_
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/crc32.h>
+#include <linux/if_vlan.h>
+
+#include "hw.h"
+
+struct e1000_info;
+
+#define e_dbg(format, arg...) \
+ netdev_dbg(hw->adapter->netdev, format, ## arg)
+#define e_err(format, arg...) \
+ netdev_err(adapter->netdev, format, ## arg)
+#define e_info(format, arg...) \
+ netdev_info(adapter->netdev, format, ## arg)
+#define e_warn(format, arg...) \
+ netdev_warn(adapter->netdev, format, ## arg)
+#define e_notice(format, arg...) \
+ netdev_notice(adapter->netdev, format, ## arg)
+
+
+/* Interrupt modes, as used by the IntMode parameter */
+#define E1000E_INT_MODE_LEGACY 0
+#define E1000E_INT_MODE_MSI 1
+#define E1000E_INT_MODE_MSIX 2
+
+/* Tx/Rx descriptor defines */
+#define E1000_DEFAULT_TXD 256
+#define E1000_MAX_TXD 4096
+#define E1000_MIN_TXD 64
+
+#define E1000_DEFAULT_RXD 256
+#define E1000_MAX_RXD 4096
+#define E1000_MIN_RXD 64
+
+#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
+#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
+
+/* Early Receive defines */
+#define E1000_ERT_2048 0x100
+
+#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */
+
+/* How many Tx Descriptors do we need to call netif_wake_queue ? */
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define AUTO_ALL_MODES 0
+#define E1000_EEPROM_APME 0x0400
+
+#define E1000_MNG_VLAN_NONE (-1)
+
+/* Number of packet split data buffers (not including the header buffer) */
+#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
+
+#define DEFAULT_JUMBO 9234
+
+/* BM/HV Specific Registers */
+#define BM_PORT_CTRL_PAGE 769
+
+#define PHY_UPPER_SHIFT 21
+#define BM_PHY_REG(page, reg) \
+ (((reg) & MAX_PHY_REG_ADDRESS) |\
+ (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
+ (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
+
+/* PHY Wakeup Registers and defines */
+#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
+#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
+#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
+#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
+#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
+#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
+#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
+#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
+#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
+#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
+
+#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
+#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
+#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */
+#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */
+#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */
+#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
+#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
+
+#define HV_STATS_PAGE 778
+#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */
+#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17)
+#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */
+#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19)
+#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */
+#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21)
+#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */
+#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
+#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */
+#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26)
+#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
+#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28)
+#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */
+#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30)
+
+#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
+
+/* BM PHY Copper Specific Status */
+#define BM_CS_STATUS 17
+#define BM_CS_STATUS_LINK_UP 0x0400
+#define BM_CS_STATUS_RESOLVED 0x0800
+#define BM_CS_STATUS_SPEED_MASK 0xC000
+#define BM_CS_STATUS_SPEED_1000 0x8000
+
+/* 82577 Mobile Phy Status Register */
+#define HV_M_STATUS 26
+#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
+#define HV_M_STATUS_SPEED_MASK 0x0300
+#define HV_M_STATUS_SPEED_1000 0x0200
+#define HV_M_STATUS_LINK_UP 0x0040
+
+#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
+#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
+
+/* Time to wait before putting the device into D3 if there's no link (in ms). */
+#define LINK_TIMEOUT 100
+
+#define DEFAULT_RDTR 0
+#define DEFAULT_RADV 8
+#define BURST_RDTR 0x20
+#define BURST_RADV 0x20
+
+/*
+ * in the case of WTHRESH, it appears at least the 82571/2 hardware
+ * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
+ * WTHRESH=4, and since we want 64 bytes at a time written back, set
+ * it to 5
+ */
+#define E1000_TXDCTL_DMA_BURST_ENABLE \
+ (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
+ E1000_TXDCTL_COUNT_DESC | \
+ (5 << 16) | /* wthresh must be +1 more than desired */\
+ (1 << 8) | /* hthresh */ \
+ 0x1f) /* pthresh */
+
+#define E1000_RXDCTL_DMA_BURST_ENABLE \
+ (0x01000000 | /* set descriptor granularity */ \
+ (4 << 16) | /* set writeback threshold */ \
+ (4 << 8) | /* set prefetch threshold */ \
+ 0x20) /* set hthresh */
+
+#define E1000_TIDV_FPD (1 << 31)
+#define E1000_RDTR_FPD (1 << 31)
+
+enum e1000_boards {
+ board_82571,
+ board_82572,
+ board_82573,
+ board_82574,
+ board_82583,
+ board_80003es2lan,
+ board_ich8lan,
+ board_ich9lan,
+ board_ich10lan,
+ board_pchlan,
+ board_pch2lan,
+};
+
+struct e1000_ps_page {
+ struct page *page;
+ u64 dma; /* must be u64 - written to hw */
+};
+
+/*
+ * wrappers around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct e1000_buffer {
+ dma_addr_t dma;
+ struct sk_buff *skb;
+ union {
+ /* Tx */
+ struct {
+ unsigned long time_stamp;
+ u16 length;
+ u16 next_to_watch;
+ unsigned int segs;
+ unsigned int bytecount;
+ u16 mapped_as_page;
+ };
+ /* Rx */
+ struct {
+ /* arrays of page information for packet split */
+ struct e1000_ps_page *ps_pages;
+ struct page *page;
+ };
+ };
+};
+
+struct e1000_ring {
+ void *desc; /* pointer to ring memory */
+ dma_addr_t dma; /* phys address of ring */
+ unsigned int size; /* length of ring in bytes */
+ unsigned int count; /* number of desc. in ring */
+
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ u16 head;
+ u16 tail;
+
+ /* array of buffer information structs */
+ struct e1000_buffer *buffer_info;
+
+ char name[IFNAMSIZ + 5];
+ u32 ims_val;
+ u32 itr_val;
+ u16 itr_register;
+ int set_itr;
+
+ struct sk_buff *rx_skb_top;
+};
+
+/* PHY register snapshot values */
+struct e1000_phy_regs {
+ u16 bmcr; /* basic mode control register */
+ u16 bmsr; /* basic mode status register */
+ u16 advertise; /* auto-negotiation advertisement */
+ u16 lpa; /* link partner ability register */
+ u16 expansion; /* auto-negotiation expansion reg */
+ u16 ctrl1000; /* 1000BASE-T control register */
+ u16 stat1000; /* 1000BASE-T status register */
+ u16 estatus; /* extended status register */
+};
+
+/* board specific private data structure */
+struct e1000_adapter {
+ struct timer_list watchdog_timer;
+ struct timer_list phy_info_timer;
+ struct timer_list blink_timer;
+
+ struct work_struct reset_task;
+ struct work_struct watchdog_task;
+
+ const struct e1000_info *ei;
+
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u32 bd_number;
+ u32 rx_buffer_len;
+ u16 mng_vlan_id;
+ u16 link_speed;
+ u16 link_duplex;
+ u16 eeprom_vers;
+
+ /* track device up/down/testing state */
+ unsigned long state;
+
+ /* Interrupt Throttle Rate */
+ u32 itr;
+ u32 itr_setting;
+ u16 tx_itr;
+ u16 rx_itr;
+
+ /*
+ * Tx
+ */
+ struct e1000_ring *tx_ring /* One per active queue */
+ ____cacheline_aligned_in_smp;
+
+ struct napi_struct napi;
+
+ unsigned int restart_queue;
+ u32 txd_cmd;
+
+ bool detect_tx_hung;
+ u8 tx_timeout_factor;
+
+ u32 tx_int_delay;
+ u32 tx_abs_int_delay;
+
+ unsigned int total_tx_bytes;
+ unsigned int total_tx_packets;
+ unsigned int total_rx_bytes;
+ unsigned int total_rx_packets;
+
+ /* Tx stats */
+ u64 tpt_old;
+ u64 colc_old;
+ u32 gotc;
+ u64 gotc_old;
+ u32 tx_timeout_count;
+ u32 tx_fifo_head;
+ u32 tx_head_addr;
+ u32 tx_fifo_size;
+ u32 tx_dma_failed;
+
+ /*
+ * Rx
+ */
+ bool (*clean_rx) (struct e1000_adapter *adapter,
+ int *work_done, int work_to_do)
+ ____cacheline_aligned_in_smp;
+ void (*alloc_rx_buf) (struct e1000_adapter *adapter,
+ int cleaned_count, gfp_t gfp);
+ struct e1000_ring *rx_ring;
+
+ u32 rx_int_delay;
+ u32 rx_abs_int_delay;
+
+ /* Rx stats */
+ u64 hw_csum_err;
+ u64 hw_csum_good;
+ u64 rx_hdr_split;
+ u32 gorc;
+ u64 gorc_old;
+ u32 alloc_rx_buff_failed;
+ u32 rx_dma_failed;
+
+ unsigned int rx_ps_pages;
+ u16 rx_ps_bsize0;
+ u32 max_frame_size;
+ u32 min_frame_size;
+
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
+ /* structs defined in e1000_hw.h */
+ struct e1000_hw hw;
+
+ spinlock_t stats64_lock;
+ struct e1000_hw_stats stats;
+ struct e1000_phy_info phy_info;
+ struct e1000_phy_stats phy_stats;
+
+ /* Snapshot of PHY registers */
+ struct e1000_phy_regs phy_regs;
+
+ struct e1000_ring test_tx_ring;
+ struct e1000_ring test_rx_ring;
+ u32 test_icr;
+
+ u32 msg_enable;
+ unsigned int num_vectors;
+ struct msix_entry *msix_entries;
+ int int_mode;
+ u32 eiac_mask;
+
+ u32 eeprom_wol;
+ u32 wol;
+ u32 pba;
+ u32 max_hw_frame_size;
+
+ bool fc_autoneg;
+
+ unsigned int flags;
+ unsigned int flags2;
+ struct work_struct downshift_task;
+ struct work_struct update_phy_task;
+ struct work_struct print_hang_task;
+
+ bool idle_check;
+ int phy_hang_count;
+};
+
+struct e1000_info {
+ enum e1000_mac_type mac;
+ unsigned int flags;
+ unsigned int flags2;
+ u32 pba;
+ u32 max_hw_frame_size;
+ s32 (*get_variants)(struct e1000_adapter *);
+ const struct e1000_mac_operations *mac_ops;
+ const struct e1000_phy_operations *phy_ops;
+ const struct e1000_nvm_operations *nvm_ops;
+};
+
+/* hardware capability, feature, and workaround flags */
+#define FLAG_HAS_AMT (1 << 0)
+#define FLAG_HAS_FLASH (1 << 1)
+#define FLAG_HAS_HW_VLAN_FILTER (1 << 2)
+#define FLAG_HAS_WOL (1 << 3)
+#define FLAG_HAS_ERT (1 << 4)
+#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5)
+#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
+#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
+#define FLAG_READ_ONLY_NVM (1 << 8)
+#define FLAG_IS_ICH (1 << 9)
+#define FLAG_HAS_MSIX (1 << 10)
+#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
+#define FLAG_IS_QUAD_PORT_A (1 << 12)
+#define FLAG_IS_QUAD_PORT (1 << 13)
+#define FLAG_TIPG_MEDIUM_FOR_80003ESLAN (1 << 14)
+#define FLAG_APME_IN_WUC (1 << 15)
+#define FLAG_APME_IN_CTRL3 (1 << 16)
+#define FLAG_APME_CHECK_PORT_B (1 << 17)
+#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18)
+#define FLAG_NO_WAKE_UCAST (1 << 19)
+#define FLAG_MNG_PT_ENABLED (1 << 20)
+#define FLAG_RESET_OVERWRITES_LAA (1 << 21)
+#define FLAG_TARC_SPEED_MODE_BIT (1 << 22)
+#define FLAG_TARC_SET_BIT_ZERO (1 << 23)
+#define FLAG_RX_NEEDS_RESTART (1 << 24)
+#define FLAG_LSC_GIG_SPEED_DROP (1 << 25)
+#define FLAG_SMART_POWER_DOWN (1 << 26)
+#define FLAG_MSI_ENABLED (1 << 27)
+/* reserved (1 << 28) */
+#define FLAG_TSO_FORCE (1 << 29)
+#define FLAG_RX_RESTART_NOW (1 << 30)
+#define FLAG_MSI_TEST_FAILED (1 << 31)
+
+#define FLAG2_CRC_STRIPPING (1 << 0)
+#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
+#define FLAG2_IS_DISCARDING (1 << 2)
+#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
+#define FLAG2_HAS_PHY_STATS (1 << 4)
+#define FLAG2_HAS_EEE (1 << 5)
+#define FLAG2_DMA_BURST (1 << 6)
+#define FLAG2_DISABLE_ASPM_L0S (1 << 7)
+#define FLAG2_DISABLE_AIM (1 << 8)
+#define FLAG2_CHECK_PHY_HANG (1 << 9)
+#define FLAG2_NO_DISABLE_RX (1 << 10)
+#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11)
+
+#define E1000_RX_DESC_PS(R, i) \
+ (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
+#define E1000_RX_DESC_EXT(R, i) \
+ (&(((union e1000_rx_desc_extended *)((R).desc))[i]))
+#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
+#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
+#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc)
+
+enum e1000_state_t {
+ __E1000_TESTING,
+ __E1000_RESETTING,
+ __E1000_DOWN
+};
+
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
+extern char e1000e_driver_name[];
+extern const char e1000e_driver_version[];
+
+extern void e1000e_check_options(struct e1000_adapter *adapter);
+extern void e1000e_set_ethtool_ops(struct net_device *netdev);
+
+extern int e1000e_up(struct e1000_adapter *adapter);
+extern void e1000e_down(struct e1000_adapter *adapter);
+extern void e1000e_reinit_locked(struct e1000_adapter *adapter);
+extern void e1000e_reset(struct e1000_adapter *adapter);
+extern void e1000e_power_up_phy(struct e1000_adapter *adapter);
+extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter);
+extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
+extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
+extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
+extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64
+ *stats);
+extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
+extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
+extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
+
+extern unsigned int copybreak;
+
+extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw);
+
+extern const struct e1000_info e1000_82571_info;
+extern const struct e1000_info e1000_82572_info;
+extern const struct e1000_info e1000_82573_info;
+extern const struct e1000_info e1000_82574_info;
+extern const struct e1000_info e1000_82583_info;
+extern const struct e1000_info e1000_ich8_info;
+extern const struct e1000_info e1000_ich9_info;
+extern const struct e1000_info e1000_ich10_info;
+extern const struct e1000_info e1000_pch_info;
+extern const struct e1000_info e1000_pch2_info;
+extern const struct e1000_info e1000_es2_info;
+
+extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+
+extern s32 e1000e_commit_phy(struct e1000_hw *hw);
+
+extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
+
+extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
+extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
+
+extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
+extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+ bool state);
+extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
+extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
+extern void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
+extern void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
+extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
+extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
+extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
+
+extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
+extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
+extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
+extern s32 e1000e_setup_led_generic(struct e1000_hw *hw);
+extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
+extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
+extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
+extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
+extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
+extern void e1000_set_lan_id_single_port(struct e1000_hw *hw);
+extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex);
+extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex);
+extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
+extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw);
+extern s32 e1000e_id_led_init(struct e1000_hw *hw);
+extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
+extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
+extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
+extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
+extern s32 e1000e_setup_link(struct e1000_hw *hw);
+extern void e1000_clear_vfta_generic(struct e1000_hw *hw);
+extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
+extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
+ u8 *mc_addr_list,
+ u32 mc_addr_count);
+extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
+extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
+extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
+extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
+extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
+extern void e1000e_config_collision_dist(struct e1000_hw *hw);
+extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
+extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
+extern s32 e1000e_blink_led_generic(struct e1000_hw *hw);
+extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
+extern void e1000e_reset_adaptive(struct e1000_hw *hw);
+extern void e1000e_update_adaptive(struct e1000_hw *hw);
+
+extern s32 e1000e_setup_copper_link(struct e1000_hw *hw);
+extern s32 e1000e_get_phy_id(struct e1000_hw *hw);
+extern void e1000e_put_hw_semaphore(struct e1000_hw *hw);
+extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw);
+extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
+extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
+extern s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
+extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
+extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
+ u16 data);
+extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
+extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+extern s32 e1000e_get_cfg_done(struct e1000_hw *hw);
+extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
+extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
+extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
+extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
+extern s32 e1000e_determine_phy_address(struct e1000_hw *hw);
+extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
+ u16 *phy_reg);
+extern s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
+ u16 *phy_reg);
+extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
+extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
+extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
+ u16 data);
+extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success);
+extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
+extern void e1000_power_up_phy_copper(struct e1000_hw *hw);
+extern void e1000_power_down_phy_copper(struct e1000_hw *hw);
+extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_check_downshift(struct e1000_hw *hw);
+extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+extern s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
+ u16 data);
+extern s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
+ u16 data);
+extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
+extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
+extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
+extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
+extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
+extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
+
+extern s32 e1000_check_polarity_m88(struct e1000_hw *hw);
+extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
+extern s32 e1000_check_polarity_ife(struct e1000_hw *hw);
+extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
+extern s32 e1000_check_polarity_igp(struct e1000_hw *hw);
+extern bool e1000_check_phy_82574(struct e1000_hw *hw);
+
+static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+ return hw->phy.ops.reset(hw);
+}
+
+static inline s32 e1000_check_reset_block(struct e1000_hw *hw)
+{
+ return hw->phy.ops.check_reset_block(hw);
+}
+
+static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return hw->phy.ops.read_reg(hw, offset, data);
+}
+
+static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return hw->phy.ops.write_reg(hw, offset, data);
+}
+
+static inline s32 e1000_get_cable_length(struct e1000_hw *hw)
+{
+ return hw->phy.ops.get_cable_length(hw);
+}
+
+extern s32 e1000e_acquire_nvm(struct e1000_hw *hw);
+extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
+extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
+extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
+extern void e1000e_release_nvm(struct e1000_hw *hw);
+extern void e1000e_reload_nvm(struct e1000_hw *hw);
+extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
+
+static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.read_mac_addr)
+ return hw->mac.ops.read_mac_addr(hw);
+
+ return e1000_read_mac_addr_generic(hw);
+}
+
+static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
+{
+ return hw->nvm.ops.validate(hw);
+}
+
+static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw)
+{
+ return hw->nvm.ops.update(hw);
+}
+
+static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ return hw->nvm.ops.read(hw, offset, words, data);
+}
+
+static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ return hw->nvm.ops.write(hw, offset, words, data);
+}
+
+static inline s32 e1000_get_phy_info(struct e1000_hw *hw)
+{
+ return hw->phy.ops.get_info(hw);
+}
+
+static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw)
+{
+ return hw->mac.ops.check_mng_mode(hw);
+}
+
+extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
+extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
+extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
+
+static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
+{
+ return readl(hw->hw_addr + reg);
+}
+
+static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
+{
+ writel(val, hw->hw_addr + reg);
+}
+
+#endif /* _E1000_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
new file mode 100644
index 000000000000..d96d0b0e08cf
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -0,0 +1,1995 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for e1000 */
+
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+#include "e1000.h"
+
+enum {NETDEV_STATS, E1000_STATS};
+
+struct e1000_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int type;
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define E1000_STAT(str, m) { \
+ .stat_string = str, \
+ .type = E1000_STATS, \
+ .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
+ .stat_offset = offsetof(struct e1000_adapter, m) }
+#define E1000_NETDEV_STAT(str, m) { \
+ .stat_string = str, \
+ .type = NETDEV_STATS, \
+ .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \
+ .stat_offset = offsetof(struct rtnl_link_stats64, m) }
+
+static const struct e1000_stats e1000_gstrings_stats[] = {
+ E1000_STAT("rx_packets", stats.gprc),
+ E1000_STAT("tx_packets", stats.gptc),
+ E1000_STAT("rx_bytes", stats.gorc),
+ E1000_STAT("tx_bytes", stats.gotc),
+ E1000_STAT("rx_broadcast", stats.bprc),
+ E1000_STAT("tx_broadcast", stats.bptc),
+ E1000_STAT("rx_multicast", stats.mprc),
+ E1000_STAT("tx_multicast", stats.mptc),
+ E1000_NETDEV_STAT("rx_errors", rx_errors),
+ E1000_NETDEV_STAT("tx_errors", tx_errors),
+ E1000_NETDEV_STAT("tx_dropped", tx_dropped),
+ E1000_STAT("multicast", stats.mprc),
+ E1000_STAT("collisions", stats.colc),
+ E1000_NETDEV_STAT("rx_length_errors", rx_length_errors),
+ E1000_NETDEV_STAT("rx_over_errors", rx_over_errors),
+ E1000_STAT("rx_crc_errors", stats.crcerrs),
+ E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
+ E1000_STAT("rx_no_buffer_count", stats.rnbc),
+ E1000_STAT("rx_missed_errors", stats.mpc),
+ E1000_STAT("tx_aborted_errors", stats.ecol),
+ E1000_STAT("tx_carrier_errors", stats.tncrs),
+ E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
+ E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
+ E1000_STAT("tx_window_errors", stats.latecol),
+ E1000_STAT("tx_abort_late_coll", stats.latecol),
+ E1000_STAT("tx_deferred_ok", stats.dc),
+ E1000_STAT("tx_single_coll_ok", stats.scc),
+ E1000_STAT("tx_multi_coll_ok", stats.mcc),
+ E1000_STAT("tx_timeout_count", tx_timeout_count),
+ E1000_STAT("tx_restart_queue", restart_queue),
+ E1000_STAT("rx_long_length_errors", stats.roc),
+ E1000_STAT("rx_short_length_errors", stats.ruc),
+ E1000_STAT("rx_align_errors", stats.algnerrc),
+ E1000_STAT("tx_tcp_seg_good", stats.tsctc),
+ E1000_STAT("tx_tcp_seg_failed", stats.tsctfc),
+ E1000_STAT("rx_flow_control_xon", stats.xonrxc),
+ E1000_STAT("rx_flow_control_xoff", stats.xoffrxc),
+ E1000_STAT("tx_flow_control_xon", stats.xontxc),
+ E1000_STAT("tx_flow_control_xoff", stats.xofftxc),
+ E1000_STAT("rx_long_byte_count", stats.gorc),
+ E1000_STAT("rx_csum_offload_good", hw_csum_good),
+ E1000_STAT("rx_csum_offload_errors", hw_csum_err),
+ E1000_STAT("rx_header_split", rx_hdr_split),
+ E1000_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
+ E1000_STAT("tx_smbus", stats.mgptc),
+ E1000_STAT("rx_smbus", stats.mgprc),
+ E1000_STAT("dropped_smbus", stats.mgpdc),
+ E1000_STAT("rx_dma_failed", rx_dma_failed),
+ E1000_STAT("tx_dma_failed", tx_dma_failed),
+};
+
+#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
+#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN)
+static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)", "Eeprom test (offline)",
+ "Interrupt test (offline)", "Loopback test (offline)",
+ "Link test (on/offline)"
+};
+#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
+
+static int e1000_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 speed;
+
+ if (hw->phy.media_type == e1000_media_type_copper) {
+
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
+ if (hw->phy.type == e1000_phy_ife)
+ ecmd->supported &= ~SUPPORTED_1000baseT_Full;
+ ecmd->advertising = ADVERTISED_TP;
+
+ if (hw->mac.autoneg == 1) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ /* the e1000 autoneg seems to match ethtool nicely */
+ ecmd->advertising |= hw->phy.autoneg_advertised;
+ }
+
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = hw->phy.addr;
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ } else {
+ ecmd->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg);
+
+ ecmd->advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg);
+
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ }
+
+ speed = -1;
+ ecmd->duplex = -1;
+
+ if (netif_running(netdev)) {
+ if (netif_carrier_ok(netdev)) {
+ speed = adapter->link_speed;
+ ecmd->duplex = adapter->link_duplex - 1;
+ }
+ } else {
+ u32 status = er32(STATUS);
+ if (status & E1000_STATUS_LU) {
+ if (status & E1000_STATUS_SPEED_1000)
+ speed = SPEED_1000;
+ else if (status & E1000_STATUS_SPEED_100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+
+ if (status & E1000_STATUS_FD)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ }
+ }
+
+ ethtool_cmd_speed_set(ecmd, speed);
+ ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
+ hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ /* MDI-X => 2; MDI =>1; Invalid =>0 */
+ if ((hw->phy.media_type == e1000_media_type_copper) &&
+ netif_carrier_ok(netdev))
+ ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
+ ETH_TP_MDI;
+ else
+ ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+
+ return 0;
+}
+
+static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
+{
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+
+ mac->autoneg = 0;
+
+ /* Make sure dplx is at most 1 bit and lsb of speed is not set
+ * for the switch() below to work */
+ if ((spd & 1) || (dplx & ~1))
+ goto err_inval;
+
+ /* Fiber NICs only allow 1000 gbps Full duplex */
+ if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
+ spd != SPEED_1000 &&
+ dplx != DUPLEX_FULL) {
+ goto err_inval;
+ }
+
+ switch (spd + dplx) {
+ case SPEED_10 + DUPLEX_HALF:
+ mac->forced_speed_duplex = ADVERTISE_10_HALF;
+ break;
+ case SPEED_10 + DUPLEX_FULL:
+ mac->forced_speed_duplex = ADVERTISE_10_FULL;
+ break;
+ case SPEED_100 + DUPLEX_HALF:
+ mac->forced_speed_duplex = ADVERTISE_100_HALF;
+ break;
+ case SPEED_100 + DUPLEX_FULL:
+ mac->forced_speed_duplex = ADVERTISE_100_FULL;
+ break;
+ case SPEED_1000 + DUPLEX_FULL:
+ mac->autoneg = 1;
+ adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
+ break;
+ case SPEED_1000 + DUPLEX_HALF: /* not supported */
+ default:
+ goto err_inval;
+ }
+ return 0;
+
+err_inval:
+ e_err("Unsupported Speed/Duplex configuration\n");
+ return -EINVAL;
+}
+
+static int e1000_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ /*
+ * When SoL/IDER sessions are active, autoneg/speed/duplex
+ * cannot be changed
+ */
+ if (e1000_check_reset_block(hw)) {
+ e_err("Cannot change link characteristics when SoL/IDER is "
+ "active.\n");
+ return -EINVAL;
+ }
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ hw->mac.autoneg = 1;
+ if (hw->phy.media_type == e1000_media_type_fiber)
+ hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg;
+ else
+ hw->phy.autoneg_advertised = ecmd->advertising |
+ ADVERTISED_TP |
+ ADVERTISED_Autoneg;
+ ecmd->advertising = hw->phy.autoneg_advertised;
+ if (adapter->fc_autoneg)
+ hw->fc.requested_mode = e1000_fc_default;
+ } else {
+ u32 speed = ethtool_cmd_speed(ecmd);
+ if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
+ clear_bit(__E1000_RESETTING, &adapter->state);
+ return -EINVAL;
+ }
+ }
+
+ /* reset the link */
+
+ if (netif_running(adapter->netdev)) {
+ e1000e_down(adapter);
+ e1000e_up(adapter);
+ } else {
+ e1000e_reset(adapter);
+ }
+
+ clear_bit(__E1000_RESETTING, &adapter->state);
+ return 0;
+}
+
+static void e1000_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ pause->autoneg =
+ (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ if (hw->fc.current_mode == e1000_fc_rx_pause) {
+ pause->rx_pause = 1;
+ } else if (hw->fc.current_mode == e1000_fc_tx_pause) {
+ pause->tx_pause = 1;
+ } else if (hw->fc.current_mode == e1000_fc_full) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ }
+}
+
+static int e1000_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int retval = 0;
+
+ adapter->fc_autoneg = pause->autoneg;
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (adapter->fc_autoneg == AUTONEG_ENABLE) {
+ hw->fc.requested_mode = e1000_fc_default;
+ if (netif_running(adapter->netdev)) {
+ e1000e_down(adapter);
+ e1000e_up(adapter);
+ } else {
+ e1000e_reset(adapter);
+ }
+ } else {
+ if (pause->rx_pause && pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_full;
+ else if (pause->rx_pause && !pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_rx_pause;
+ else if (!pause->rx_pause && pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_tx_pause;
+ else if (!pause->rx_pause && !pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_none;
+
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ retval = hw->mac.ops.setup_link(hw);
+ /* implicit goto out */
+ } else {
+ retval = e1000e_force_mac_fc(hw);
+ if (retval)
+ goto out;
+ e1000e_set_fc_watermarks(hw);
+ }
+ }
+
+out:
+ clear_bit(__E1000_RESETTING, &adapter->state);
+ return retval;
+}
+
+static u32 e1000_get_msglevel(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void e1000_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+static int e1000_get_regs_len(struct net_device *netdev)
+{
+#define E1000_REGS_LEN 32 /* overestimate */
+ return E1000_REGS_LEN * sizeof(u32);
+}
+
+static void e1000_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u16 phy_data;
+
+ memset(p, 0, E1000_REGS_LEN * sizeof(u32));
+
+ regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
+ adapter->pdev->device;
+
+ regs_buff[0] = er32(CTRL);
+ regs_buff[1] = er32(STATUS);
+
+ regs_buff[2] = er32(RCTL);
+ regs_buff[3] = er32(RDLEN);
+ regs_buff[4] = er32(RDH);
+ regs_buff[5] = er32(RDT);
+ regs_buff[6] = er32(RDTR);
+
+ regs_buff[7] = er32(TCTL);
+ regs_buff[8] = er32(TDLEN);
+ regs_buff[9] = er32(TDH);
+ regs_buff[10] = er32(TDT);
+ regs_buff[11] = er32(TIDV);
+
+ regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */
+
+ /* ethtool doesn't use anything past this point, so all this
+ * code is likely legacy junk for apps that may or may not
+ * exist */
+ if (hw->phy.type == e1000_phy_m88) {
+ e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ regs_buff[13] = (u32)phy_data; /* cable length */
+ regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
+ regs_buff[18] = regs_buff[13]; /* cable polarity */
+ regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
+ regs_buff[20] = regs_buff[17]; /* polarity correction */
+ /* phy receive errors */
+ regs_buff[22] = adapter->phy_stats.receive_errors;
+ regs_buff[23] = regs_buff[13]; /* mdix mode */
+ }
+ regs_buff[21] = 0; /* was idle_errors */
+ e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
+ regs_buff[24] = (u32)phy_data; /* phy local receiver status */
+ regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
+}
+
+static int e1000_get_eeprom_len(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ return adapter->hw.nvm.word_size * 2;
+}
+
+static int e1000_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ int first_word;
+ int last_word;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) *
+ (last_word - first_word + 1), GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ if (hw->nvm.type == e1000_nvm_eeprom_spi) {
+ ret_val = e1000_read_nvm(hw, first_word,
+ last_word - first_word + 1,
+ eeprom_buff);
+ } else {
+ for (i = 0; i < last_word - first_word + 1; i++) {
+ ret_val = e1000_read_nvm(hw, first_word + i, 1,
+ &eeprom_buff[i]);
+ if (ret_val)
+ break;
+ }
+ }
+
+ if (ret_val) {
+ /* a read error occurred, throw away the result */
+ memset(eeprom_buff, 0xff, sizeof(u16) *
+ (last_word - first_word + 1));
+ } else {
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+ }
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int e1000_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ void *ptr;
+ int max_len;
+ int first_word;
+ int last_word;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EOPNOTSUPP;
+
+ if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16)))
+ return -EFAULT;
+
+ if (adapter->flags & FLAG_READ_ONLY_NVM)
+ return -EINVAL;
+
+ max_len = hw->nvm.word_size * 2;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = (void *)eeprom_buff;
+
+ if (eeprom->offset & 1) {
+ /* need read/modify/write of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]);
+ ptr++;
+ }
+ if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0))
+ /* need read/modify/write of last changed EEPROM word */
+ /* only the first byte of the word is being modified */
+ ret_val = e1000_read_nvm(hw, last_word, 1,
+ &eeprom_buff[last_word - first_word]);
+
+ if (ret_val)
+ goto out;
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+
+ ret_val = e1000_write_nvm(hw, first_word,
+ last_word - first_word + 1, eeprom_buff);
+
+ if (ret_val)
+ goto out;
+
+ /*
+ * Update the checksum over the first part of the EEPROM if needed
+ * and flush shadow RAM for applicable controllers
+ */
+ if ((first_word <= NVM_CHECKSUM_REG) ||
+ (hw->mac.type == e1000_82583) ||
+ (hw->mac.type == e1000_82574) ||
+ (hw->mac.type == e1000_82573))
+ ret_val = e1000e_update_nvm_checksum(hw);
+
+out:
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
+static void e1000_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ char firmware_version[32];
+
+ strncpy(drvinfo->driver, e1000e_driver_name,
+ sizeof(drvinfo->driver) - 1);
+ strncpy(drvinfo->version, e1000e_driver_version,
+ sizeof(drvinfo->version) - 1);
+
+ /*
+ * EEPROM image version # is reported as firmware version # for
+ * PCI-E controllers
+ */
+ snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
+ (adapter->eeprom_vers & 0xF000) >> 12,
+ (adapter->eeprom_vers & 0x0FF0) >> 4,
+ (adapter->eeprom_vers & 0x000F));
+
+ strncpy(drvinfo->fw_version, firmware_version,
+ sizeof(drvinfo->fw_version) - 1);
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info) - 1);
+ drvinfo->regdump_len = e1000_get_regs_len(netdev);
+ drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
+}
+
+static void e1000_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+
+ ring->rx_max_pending = E1000_MAX_RXD;
+ ring->tx_max_pending = E1000_MAX_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rx_ring->count;
+ ring->tx_pending = tx_ring->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int e1000_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_ring *tx_ring, *tx_old;
+ struct e1000_ring *rx_ring, *rx_old;
+ int err;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (netif_running(adapter->netdev))
+ e1000e_down(adapter);
+
+ tx_old = adapter->tx_ring;
+ rx_old = adapter->rx_ring;
+
+ err = -ENOMEM;
+ tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL);
+ if (!tx_ring)
+ goto err_alloc_tx;
+
+ rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL);
+ if (!rx_ring)
+ goto err_alloc_rx;
+
+ adapter->tx_ring = tx_ring;
+ adapter->rx_ring = rx_ring;
+
+ rx_ring->count = max(ring->rx_pending, (u32)E1000_MIN_RXD);
+ rx_ring->count = min(rx_ring->count, (u32)(E1000_MAX_RXD));
+ rx_ring->count = ALIGN(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ tx_ring->count = max(ring->tx_pending, (u32)E1000_MIN_TXD);
+ tx_ring->count = min(tx_ring->count, (u32)(E1000_MAX_TXD));
+ tx_ring->count = ALIGN(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ if (netif_running(adapter->netdev)) {
+ /* Try to get new resources before deleting old */
+ err = e1000e_setup_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+ err = e1000e_setup_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /*
+ * restore the old in order to free it,
+ * then add in the new
+ */
+ adapter->rx_ring = rx_old;
+ adapter->tx_ring = tx_old;
+ e1000e_free_rx_resources(adapter);
+ e1000e_free_tx_resources(adapter);
+ kfree(tx_old);
+ kfree(rx_old);
+ adapter->rx_ring = rx_ring;
+ adapter->tx_ring = tx_ring;
+ err = e1000e_up(adapter);
+ if (err)
+ goto err_setup;
+ }
+
+ clear_bit(__E1000_RESETTING, &adapter->state);
+ return 0;
+err_setup_tx:
+ e1000e_free_rx_resources(adapter);
+err_setup_rx:
+ adapter->rx_ring = rx_old;
+ adapter->tx_ring = tx_old;
+ kfree(rx_ring);
+err_alloc_rx:
+ kfree(tx_ring);
+err_alloc_tx:
+ e1000e_up(adapter);
+err_setup:
+ clear_bit(__E1000_RESETTING, &adapter->state);
+ return err;
+}
+
+static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
+ int reg, int offset, u32 mask, u32 write)
+{
+ u32 pat, val;
+ static const u32 test[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
+ E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
+ (test[pat] & write));
+ val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
+ if (val != (test[pat] & write & mask)) {
+ e_err("pattern test reg %04X failed: got 0x%08X "
+ "expected 0x%08X\n", reg + offset, val,
+ (test[pat] & write & mask));
+ *data = reg;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
+ int reg, u32 mask, u32 write)
+{
+ u32 val;
+ __ew32(&adapter->hw, reg, write & mask);
+ val = __er32(&adapter->hw, reg);
+ if ((write & mask) != (val & mask)) {
+ e_err("set/check reg %04X test failed: got 0x%08X "
+ "expected 0x%08X\n", reg, (val & mask), (write & mask));
+ *data = reg;
+ return 1;
+ }
+ return 0;
+}
+#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
+ do { \
+ if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \
+ return 1; \
+ } while (0)
+#define REG_PATTERN_TEST(reg, mask, write) \
+ REG_PATTERN_TEST_ARRAY(reg, 0, mask, write)
+
+#define REG_SET_AND_CHECK(reg, mask, write) \
+ do { \
+ if (reg_set_and_check(adapter, data, reg, mask, write)) \
+ return 1; \
+ } while (0)
+
+static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+ u32 value;
+ u32 before;
+ u32 after;
+ u32 i;
+ u32 toggle;
+ u32 mask;
+
+ /*
+ * The status register is Read Only, so a write should fail.
+ * Some bits that get toggled are ignored.
+ */
+ switch (mac->type) {
+ /* there are several bits on newer hardware that are r/w */
+ case e1000_82571:
+ case e1000_82572:
+ case e1000_80003es2lan:
+ toggle = 0x7FFFF3FF;
+ break;
+ default:
+ toggle = 0x7FFFF033;
+ break;
+ }
+
+ before = er32(STATUS);
+ value = (er32(STATUS) & toggle);
+ ew32(STATUS, toggle);
+ after = er32(STATUS) & toggle;
+ if (value != after) {
+ e_err("failed STATUS register test got: 0x%08X expected: "
+ "0x%08X\n", after, value);
+ *data = 1;
+ return 1;
+ }
+ /* restore previous status */
+ ew32(STATUS, before);
+
+ if (!(adapter->flags & FLAG_IS_ICH)) {
+ REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF);
+ }
+
+ REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF);
+ REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8);
+ REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
+ REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF);
+
+ REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);
+
+ before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE);
+ REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB);
+ REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);
+
+ REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+ if (!(adapter->flags & FLAG_IS_ICH))
+ REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+ REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
+ mask = 0x8003FFFF;
+ switch (mac->type) {
+ case e1000_ich10lan:
+ case e1000_pchlan:
+ case e1000_pch2lan:
+ mask |= (1 << 18);
+ break;
+ default:
+ break;
+ }
+ for (i = 0; i < mac->rar_entry_count; i++)
+ REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
+ mask, 0xFFFFFFFF);
+
+ for (i = 0; i < mac->mta_reg_count; i++)
+ REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
+
+ *data = 0;
+ return 0;
+}
+
+static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
+{
+ u16 temp;
+ u16 checksum = 0;
+ u16 i;
+
+ *data = 0;
+ /* Read and add up the contents of the EEPROM */
+ for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+ if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) {
+ *data = 1;
+ return *data;
+ }
+ checksum += temp;
+ }
+
+ /* If Checksum is not Correct return error else test passed */
+ if ((checksum != (u16) NVM_SUM) && !(*data))
+ *data = 2;
+
+ return *data;
+}
+
+static irqreturn_t e1000_test_intr(int irq, void *data)
+{
+ struct net_device *netdev = (struct net_device *) data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ adapter->test_icr |= er32(ICR);
+
+ return IRQ_HANDLED;
+}
+
+static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mask;
+ u32 shared_int = 1;
+ u32 irq = adapter->pdev->irq;
+ int i;
+ int ret_val = 0;
+ int int_mode = E1000E_INT_MODE_LEGACY;
+
+ *data = 0;
+
+ /* NOTE: we don't test MSI/MSI-X interrupts here, yet */
+ if (adapter->int_mode == E1000E_INT_MODE_MSIX) {
+ int_mode = adapter->int_mode;
+ e1000e_reset_interrupt_capability(adapter);
+ adapter->int_mode = E1000E_INT_MODE_LEGACY;
+ e1000e_set_interrupt_capability(adapter);
+ }
+ /* Hook up test interrupt handler just for this test */
+ if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
+ netdev)) {
+ shared_int = 0;
+ } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
+ netdev->name, netdev)) {
+ *data = 1;
+ ret_val = -1;
+ goto out;
+ }
+ e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
+
+ /* Disable all the interrupts */
+ ew32(IMC, 0xFFFFFFFF);
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ /* Test each interrupt */
+ for (i = 0; i < 10; i++) {
+ /* Interrupt to test */
+ mask = 1 << i;
+
+ if (adapter->flags & FLAG_IS_ICH) {
+ switch (mask) {
+ case E1000_ICR_RXSEQ:
+ continue;
+ case 0x00000100:
+ if (adapter->hw.mac.type == e1000_ich8lan ||
+ adapter->hw.mac.type == e1000_ich9lan)
+ continue;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!shared_int) {
+ /*
+ * Disable the interrupt to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ ew32(IMC, mask);
+ ew32(ICS, mask);
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ if (adapter->test_icr & mask) {
+ *data = 3;
+ break;
+ }
+ }
+
+ /*
+ * Enable the interrupt to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was not posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ ew32(IMS, mask);
+ ew32(ICS, mask);
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ if (!(adapter->test_icr & mask)) {
+ *data = 4;
+ break;
+ }
+
+ if (!shared_int) {
+ /*
+ * Disable the other interrupts to be reported in
+ * the cause register and then force the other
+ * interrupts and see if any get posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ ew32(IMC, ~mask & 0x00007FFF);
+ ew32(ICS, ~mask & 0x00007FFF);
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ if (adapter->test_icr) {
+ *data = 5;
+ break;
+ }
+ }
+ }
+
+ /* Disable all the interrupts */
+ ew32(IMC, 0xFFFFFFFF);
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ /* Unhook test interrupt handler */
+ free_irq(irq, netdev);
+
+out:
+ if (int_mode == E1000E_INT_MODE_MSIX) {
+ e1000e_reset_interrupt_capability(adapter);
+ adapter->int_mode = int_mode;
+ e1000e_set_interrupt_capability(adapter);
+ }
+
+ return ret_val;
+}
+
+static void e1000_free_desc_rings(struct e1000_adapter *adapter)
+{
+ struct e1000_ring *tx_ring = &adapter->test_tx_ring;
+ struct e1000_ring *rx_ring = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ int i;
+
+ if (tx_ring->desc && tx_ring->buffer_info) {
+ for (i = 0; i < tx_ring->count; i++) {
+ if (tx_ring->buffer_info[i].dma)
+ dma_unmap_single(&pdev->dev,
+ tx_ring->buffer_info[i].dma,
+ tx_ring->buffer_info[i].length,
+ DMA_TO_DEVICE);
+ if (tx_ring->buffer_info[i].skb)
+ dev_kfree_skb(tx_ring->buffer_info[i].skb);
+ }
+ }
+
+ if (rx_ring->desc && rx_ring->buffer_info) {
+ for (i = 0; i < rx_ring->count; i++) {
+ if (rx_ring->buffer_info[i].dma)
+ dma_unmap_single(&pdev->dev,
+ rx_ring->buffer_info[i].dma,
+ 2048, DMA_FROM_DEVICE);
+ if (rx_ring->buffer_info[i].skb)
+ dev_kfree_skb(rx_ring->buffer_info[i].skb);
+ }
+ }
+
+ if (tx_ring->desc) {
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
+ tx_ring->desc = NULL;
+ }
+ if (rx_ring->desc) {
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
+ rx_ring->desc = NULL;
+ }
+
+ kfree(tx_ring->buffer_info);
+ tx_ring->buffer_info = NULL;
+ kfree(rx_ring->buffer_info);
+ rx_ring->buffer_info = NULL;
+}
+
+static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
+{
+ struct e1000_ring *tx_ring = &adapter->test_tx_ring;
+ struct e1000_ring *rx_ring = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+ int i;
+ int ret_val;
+
+ /* Setup Tx descriptor ring and Tx buffers */
+
+ if (!tx_ring->count)
+ tx_ring->count = E1000_DEFAULT_TXD;
+
+ tx_ring->buffer_info = kcalloc(tx_ring->count,
+ sizeof(struct e1000_buffer),
+ GFP_KERNEL);
+ if (!(tx_ring->buffer_info)) {
+ ret_val = 1;
+ goto err_nomem;
+ }
+
+ tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc) {
+ ret_val = 2;
+ goto err_nomem;
+ }
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
+ ew32(TDBAH, ((u64) tx_ring->dma >> 32));
+ ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc));
+ ew32(TDH, 0);
+ ew32(TDT, 0);
+ ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR |
+ E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
+ E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
+
+ for (i = 0; i < tx_ring->count; i++) {
+ struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
+ struct sk_buff *skb;
+ unsigned int skb_size = 1024;
+
+ skb = alloc_skb(skb_size, GFP_KERNEL);
+ if (!skb) {
+ ret_val = 3;
+ goto err_nomem;
+ }
+ skb_put(skb, skb_size);
+ tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[i].length = skb->len;
+ tx_ring->buffer_info[i].dma =
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ tx_ring->buffer_info[i].dma)) {
+ ret_val = 4;
+ goto err_nomem;
+ }
+ tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
+ tx_desc->lower.data = cpu_to_le32(skb->len);
+ tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
+ E1000_TXD_CMD_IFCS |
+ E1000_TXD_CMD_RS);
+ tx_desc->upper.data = 0;
+ }
+
+ /* Setup Rx descriptor ring and Rx buffers */
+
+ if (!rx_ring->count)
+ rx_ring->count = E1000_DEFAULT_RXD;
+
+ rx_ring->buffer_info = kcalloc(rx_ring->count,
+ sizeof(struct e1000_buffer),
+ GFP_KERNEL);
+ if (!(rx_ring->buffer_info)) {
+ ret_val = 5;
+ goto err_nomem;
+ }
+
+ rx_ring->size = rx_ring->count * sizeof(union e1000_rx_desc_extended);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+ if (!rx_ring->desc) {
+ ret_val = 6;
+ goto err_nomem;
+ }
+ rx_ring->next_to_use = 0;
+ rx_ring->next_to_clean = 0;
+
+ rctl = er32(RCTL);
+ if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF));
+ ew32(RDBAH, ((u64) rx_ring->dma >> 32));
+ ew32(RDLEN, rx_ring->size);
+ ew32(RDH, 0);
+ ew32(RDT, 0);
+ rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
+ E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
+ E1000_RCTL_SBP | E1000_RCTL_SECRC |
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+ ew32(RCTL, rctl);
+
+ for (i = 0; i < rx_ring->count; i++) {
+ union e1000_rx_desc_extended *rx_desc;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL);
+ if (!skb) {
+ ret_val = 7;
+ goto err_nomem;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+ rx_ring->buffer_info[i].skb = skb;
+ rx_ring->buffer_info[i].dma =
+ dma_map_single(&pdev->dev, skb->data, 2048,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ rx_ring->buffer_info[i].dma)) {
+ ret_val = 8;
+ goto err_nomem;
+ }
+ rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+ rx_desc->read.buffer_addr =
+ cpu_to_le64(rx_ring->buffer_info[i].dma);
+ memset(skb->data, 0x00, skb->len);
+ }
+
+ return 0;
+
+err_nomem:
+ e1000_free_desc_rings(adapter);
+ return ret_val;
+}
+
+static void e1000_phy_disable_receiver(struct e1000_adapter *adapter)
+{
+ /* Write out to PHY registers 29 and 30 to disable the Receiver. */
+ e1e_wphy(&adapter->hw, 29, 0x001F);
+ e1e_wphy(&adapter->hw, 30, 0x8FFC);
+ e1e_wphy(&adapter->hw, 29, 0x001A);
+ e1e_wphy(&adapter->hw, 30, 0x8FF0);
+}
+
+static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_reg = 0;
+ u16 phy_reg = 0;
+ s32 ret_val = 0;
+
+ hw->mac.autoneg = 0;
+
+ if (hw->phy.type == e1000_phy_ife) {
+ /* force 100, set loopback */
+ e1e_wphy(hw, PHY_CONTROL, 0x6100);
+
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg = er32(CTRL);
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_100 |/* Force Speed to 100 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+
+ ew32(CTRL, ctrl_reg);
+ e1e_flush();
+ udelay(500);
+
+ return 0;
+ }
+
+ /* Specific PHY configuration for loopback */
+ switch (hw->phy.type) {
+ case e1000_phy_m88:
+ /* Auto-MDI/MDIX Off */
+ e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
+ /* reset to update Auto-MDI/MDIX */
+ e1e_wphy(hw, PHY_CONTROL, 0x9140);
+ /* autoneg off */
+ e1e_wphy(hw, PHY_CONTROL, 0x8140);
+ break;
+ case e1000_phy_gg82563:
+ e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
+ break;
+ case e1000_phy_bm:
+ /* Set Default MAC Interface speed to 1GB */
+ e1e_rphy(hw, PHY_REG(2, 21), &phy_reg);
+ phy_reg &= ~0x0007;
+ phy_reg |= 0x006;
+ e1e_wphy(hw, PHY_REG(2, 21), phy_reg);
+ /* Assert SW reset for above settings to take effect */
+ e1000e_commit_phy(hw);
+ mdelay(1);
+ /* Force Full Duplex */
+ e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
+ e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C);
+ /* Set Link Up (in force link) */
+ e1e_rphy(hw, PHY_REG(776, 16), &phy_reg);
+ e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040);
+ /* Force Link */
+ e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
+ e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040);
+ /* Set Early Link Enable */
+ e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
+ e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400);
+ break;
+ case e1000_phy_82577:
+ case e1000_phy_82578:
+ /* Workaround: K1 must be disabled for stable 1Gbps operation */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ e_err("Cannot setup 1Gbps loopback.\n");
+ return ret_val;
+ }
+ e1000_configure_k1_ich8lan(hw, false);
+ hw->phy.ops.release(hw);
+ break;
+ case e1000_phy_82579:
+ /* Disable PHY energy detect power down */
+ e1e_rphy(hw, PHY_REG(0, 21), &phy_reg);
+ e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3));
+ /* Disable full chip energy detect */
+ e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
+ e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
+ /* Enable loopback on the PHY */
+#define I82577_PHY_LBK_CTRL 19
+ e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001);
+ break;
+ default:
+ break;
+ }
+
+ /* force 1000, set loopback */
+ e1e_wphy(hw, PHY_CONTROL, 0x4140);
+ mdelay(250);
+
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg = er32(CTRL);
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+
+ if (adapter->flags & FLAG_IS_ICH)
+ ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */
+
+ if (hw->phy.media_type == e1000_media_type_copper &&
+ hw->phy.type == e1000_phy_m88) {
+ ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
+ } else {
+ /*
+ * Set the ILOS bit on the fiber Nic if half duplex link is
+ * detected.
+ */
+ if ((er32(STATUS) & E1000_STATUS_FD) == 0)
+ ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
+ }
+
+ ew32(CTRL, ctrl_reg);
+
+ /*
+ * Disable the receiver on the PHY so when a cable is plugged in, the
+ * PHY does not begin to autoneg when a cable is reconnected to the NIC.
+ */
+ if (hw->phy.type == e1000_phy_m88)
+ e1000_phy_disable_receiver(adapter);
+
+ udelay(500);
+
+ return 0;
+}
+
+static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl = er32(CTRL);
+ int link = 0;
+
+ /* special requirements for 82571/82572 fiber adapters */
+
+ /*
+ * jump through hoops to make sure link is up because serdes
+ * link is hardwired up
+ */
+ ctrl |= E1000_CTRL_SLU;
+ ew32(CTRL, ctrl);
+
+ /* disable autoneg */
+ ctrl = er32(TXCW);
+ ctrl &= ~(1 << 31);
+ ew32(TXCW, ctrl);
+
+ link = (er32(STATUS) & E1000_STATUS_LU);
+
+ if (!link) {
+ /* set invert loss of signal */
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_ILOS;
+ ew32(CTRL, ctrl);
+ }
+
+ /*
+ * special write to serdes control register to enable SerDes analog
+ * loopback
+ */
+#define E1000_SERDES_LB_ON 0x410
+ ew32(SCTL, E1000_SERDES_LB_ON);
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ return 0;
+}
+
+/* only call this for fiber/serdes connections to es2lan */
+static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrlext = er32(CTRL_EXT);
+ u32 ctrl = er32(CTRL);
+
+ /*
+ * save CTRL_EXT to restore later, reuse an empty variable (unused
+ * on mac_type 80003es2lan)
+ */
+ adapter->tx_fifo_head = ctrlext;
+
+ /* clear the serdes mode bits, putting the device into mac loopback */
+ ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+ ew32(CTRL_EXT, ctrlext);
+
+ /* force speed to 1000/FD, link up */
+ ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX |
+ E1000_CTRL_SPD_1000 | E1000_CTRL_FD);
+ ew32(CTRL, ctrl);
+
+ /* set mac loopback */
+ ctrl = er32(RCTL);
+ ctrl |= E1000_RCTL_LBM_MAC;
+ ew32(RCTL, ctrl);
+
+ /* set testing mode parameters (no need to reset later) */
+#define KMRNCTRLSTA_OPMODE (0x1F << 16)
+#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582
+ ew32(KMRNCTRLSTA,
+ (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII));
+
+ return 0;
+}
+
+static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+
+ if (hw->phy.media_type == e1000_media_type_fiber ||
+ hw->phy.media_type == e1000_media_type_internal_serdes) {
+ switch (hw->mac.type) {
+ case e1000_80003es2lan:
+ return e1000_set_es2lan_mac_loopback(adapter);
+ break;
+ case e1000_82571:
+ case e1000_82572:
+ return e1000_set_82571_fiber_loopback(adapter);
+ break;
+ default:
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_LBM_TCVR;
+ ew32(RCTL, rctl);
+ return 0;
+ }
+ } else if (hw->phy.media_type == e1000_media_type_copper) {
+ return e1000_integrated_phy_loopback(adapter);
+ }
+
+ return 7;
+}
+
+static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+ u16 phy_reg;
+
+ rctl = er32(RCTL);
+ rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
+ ew32(RCTL, rctl);
+
+ switch (hw->mac.type) {
+ case e1000_80003es2lan:
+ if (hw->phy.media_type == e1000_media_type_fiber ||
+ hw->phy.media_type == e1000_media_type_internal_serdes) {
+ /* restore CTRL_EXT, stealing space from tx_fifo_head */
+ ew32(CTRL_EXT, adapter->tx_fifo_head);
+ adapter->tx_fifo_head = 0;
+ }
+ /* fall through */
+ case e1000_82571:
+ case e1000_82572:
+ if (hw->phy.media_type == e1000_media_type_fiber ||
+ hw->phy.media_type == e1000_media_type_internal_serdes) {
+#define E1000_SERDES_LB_OFF 0x400
+ ew32(SCTL, E1000_SERDES_LB_OFF);
+ e1e_flush();
+ usleep_range(10000, 20000);
+ break;
+ }
+ /* Fall Through */
+ default:
+ hw->mac.autoneg = 1;
+ if (hw->phy.type == e1000_phy_gg82563)
+ e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180);
+ e1e_rphy(hw, PHY_CONTROL, &phy_reg);
+ if (phy_reg & MII_CR_LOOPBACK) {
+ phy_reg &= ~MII_CR_LOOPBACK;
+ e1e_wphy(hw, PHY_CONTROL, phy_reg);
+ e1000e_commit_phy(hw);
+ }
+ break;
+ }
+}
+
+static void e1000_create_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ memset(skb->data, 0xFF, frame_size);
+ frame_size &= ~1;
+ memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+ memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+ memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+static int e1000_check_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ frame_size &= ~1;
+ if (*(skb->data + 3) == 0xFF)
+ if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+ (*(skb->data + frame_size / 2 + 12) == 0xAF))
+ return 0;
+ return 13;
+}
+
+static int e1000_run_loopback_test(struct e1000_adapter *adapter)
+{
+ struct e1000_ring *tx_ring = &adapter->test_tx_ring;
+ struct e1000_ring *rx_ring = &adapter->test_rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+ int i, j, k, l;
+ int lc;
+ int good_cnt;
+ int ret_val = 0;
+ unsigned long time;
+
+ ew32(RDT, rx_ring->count - 1);
+
+ /*
+ * Calculate the loop count based on the largest descriptor ring
+ * The idea is to wrap the largest ring a number of times using 64
+ * send/receive pairs during each loop
+ */
+
+ if (rx_ring->count <= tx_ring->count)
+ lc = ((tx_ring->count / 64) * 2) + 1;
+ else
+ lc = ((rx_ring->count / 64) * 2) + 1;
+
+ k = 0;
+ l = 0;
+ for (j = 0; j <= lc; j++) { /* loop count loop */
+ for (i = 0; i < 64; i++) { /* send the packets */
+ e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
+ 1024);
+ dma_sync_single_for_device(&pdev->dev,
+ tx_ring->buffer_info[k].dma,
+ tx_ring->buffer_info[k].length,
+ DMA_TO_DEVICE);
+ k++;
+ if (k == tx_ring->count)
+ k = 0;
+ }
+ ew32(TDT, k);
+ e1e_flush();
+ msleep(200);
+ time = jiffies; /* set the start time for the receive */
+ good_cnt = 0;
+ do { /* receive the sent packets */
+ dma_sync_single_for_cpu(&pdev->dev,
+ rx_ring->buffer_info[l].dma, 2048,
+ DMA_FROM_DEVICE);
+
+ ret_val = e1000_check_lbtest_frame(
+ rx_ring->buffer_info[l].skb, 1024);
+ if (!ret_val)
+ good_cnt++;
+ l++;
+ if (l == rx_ring->count)
+ l = 0;
+ /*
+ * time + 20 msecs (200 msecs on 2.4) is more than
+ * enough time to complete the receives, if it's
+ * exceeded, break and error off
+ */
+ } while ((good_cnt < 64) && !time_after(jiffies, time + 20));
+ if (good_cnt != 64) {
+ ret_val = 13; /* ret_val is the same as mis-compare */
+ break;
+ }
+ if (jiffies >= (time + 20)) {
+ ret_val = 14; /* error code for time out error */
+ break;
+ }
+ } /* end loop count loop */
+ return ret_val;
+}
+
+static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
+{
+ /*
+ * PHY loopback cannot be performed if SoL/IDER
+ * sessions are active
+ */
+ if (e1000_check_reset_block(&adapter->hw)) {
+ e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
+ *data = 0;
+ goto out;
+ }
+
+ *data = e1000_setup_desc_rings(adapter);
+ if (*data)
+ goto out;
+
+ *data = e1000_setup_loopback_test(adapter);
+ if (*data)
+ goto err_loopback;
+
+ *data = e1000_run_loopback_test(adapter);
+ e1000_loopback_cleanup(adapter);
+
+err_loopback:
+ e1000_free_desc_rings(adapter);
+out:
+ return *data;
+}
+
+static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ *data = 0;
+ if (hw->phy.media_type == e1000_media_type_internal_serdes) {
+ int i = 0;
+ hw->mac.serdes_has_link = false;
+
+ /*
+ * On some blade server designs, link establishment
+ * could take as long as 2-3 minutes
+ */
+ do {
+ hw->mac.ops.check_for_link(hw);
+ if (hw->mac.serdes_has_link)
+ return *data;
+ msleep(20);
+ } while (i++ < 3750);
+
+ *data = 1;
+ } else {
+ hw->mac.ops.check_for_link(hw);
+ if (hw->mac.autoneg)
+ /*
+ * On some Phy/switch combinations, link establishment
+ * can take a few seconds more than expected.
+ */
+ msleep(5000);
+
+ if (!(er32(STATUS) & E1000_STATUS_LU))
+ *data = 1;
+ }
+ return *data;
+}
+
+static int e1000e_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return E1000_TEST_LEN;
+ case ETH_SS_STATS:
+ return E1000_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void e1000_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ u16 autoneg_advertised;
+ u8 forced_speed_duplex;
+ u8 autoneg;
+ bool if_running = netif_running(netdev);
+
+ set_bit(__E1000_TESTING, &adapter->state);
+
+ if (!if_running) {
+ /* Get control of and reset hardware */
+ if (adapter->flags & FLAG_HAS_AMT)
+ e1000e_get_hw_control(adapter);
+
+ e1000e_power_up_phy(adapter);
+
+ adapter->hw.phy.autoneg_wait_to_complete = 1;
+ e1000e_reset(adapter);
+ adapter->hw.phy.autoneg_wait_to_complete = 0;
+ }
+
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ /* save speed, duplex, autoneg settings */
+ autoneg_advertised = adapter->hw.phy.autoneg_advertised;
+ forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
+ autoneg = adapter->hw.mac.autoneg;
+
+ e_info("offline testing starting\n");
+
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+
+ if (e1000_reg_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ e1000e_reset(adapter);
+ if (e1000_eeprom_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ e1000e_reset(adapter);
+ if (e1000_intr_test(adapter, &data[2]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ e1000e_reset(adapter);
+ if (e1000_loopback_test(adapter, &data[3]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* force this routine to wait until autoneg complete/timeout */
+ adapter->hw.phy.autoneg_wait_to_complete = 1;
+ e1000e_reset(adapter);
+ adapter->hw.phy.autoneg_wait_to_complete = 0;
+
+ if (e1000_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* restore speed, duplex, autoneg settings */
+ adapter->hw.phy.autoneg_advertised = autoneg_advertised;
+ adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
+ adapter->hw.mac.autoneg = autoneg;
+ e1000e_reset(adapter);
+
+ clear_bit(__E1000_TESTING, &adapter->state);
+ if (if_running)
+ dev_open(netdev);
+ } else {
+ /* Online tests */
+
+ e_info("online testing starting\n");
+
+ /* register, eeprom, intr and loopback tests not run online */
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 0;
+ data[3] = 0;
+
+ if (e1000_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ clear_bit(__E1000_TESTING, &adapter->state);
+ }
+
+ if (!if_running) {
+ e1000e_reset(adapter);
+
+ if (adapter->flags & FLAG_HAS_AMT)
+ e1000e_release_hw_control(adapter);
+ }
+
+ msleep_interruptible(4 * 1000);
+}
+
+static void e1000_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (!(adapter->flags & FLAG_HAS_WOL) ||
+ !device_can_wakeup(&adapter->pdev->dev))
+ return;
+
+ wol->supported = WAKE_UCAST | WAKE_MCAST |
+ WAKE_BCAST | WAKE_MAGIC | WAKE_PHY;
+
+ /* apply any specific unsupported masks here */
+ if (adapter->flags & FLAG_NO_WAKE_UCAST) {
+ wol->supported &= ~WAKE_UCAST;
+
+ if (adapter->wol & E1000_WUFC_EX)
+ e_err("Interface does not support directed (unicast) "
+ "frame wake-up packets\n");
+ }
+
+ if (adapter->wol & E1000_WUFC_EX)
+ wol->wolopts |= WAKE_UCAST;
+ if (adapter->wol & E1000_WUFC_MC)
+ wol->wolopts |= WAKE_MCAST;
+ if (adapter->wol & E1000_WUFC_BC)
+ wol->wolopts |= WAKE_BCAST;
+ if (adapter->wol & E1000_WUFC_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+ if (adapter->wol & E1000_WUFC_LNKC)
+ wol->wolopts |= WAKE_PHY;
+}
+
+static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (!(adapter->flags & FLAG_HAS_WOL) ||
+ !device_can_wakeup(&adapter->pdev->dev) ||
+ (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
+ WAKE_MAGIC | WAKE_PHY)))
+ return -EOPNOTSUPP;
+
+ /* these settings will always override what we currently have */
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wol |= E1000_WUFC_EX;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wol |= E1000_WUFC_MC;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wol |= E1000_WUFC_BC;
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wol |= E1000_WUFC_MAG;
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wol |= E1000_WUFC_LNKC;
+
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
+static int e1000_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ if (!hw->mac.ops.blink_led)
+ return 2; /* cycle on/off twice per second */
+
+ hw->mac.ops.blink_led(hw);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ if (hw->phy.type == e1000_phy_ife)
+ e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
+ hw->mac.ops.led_off(hw);
+ hw->mac.ops.cleanup_led(hw);
+ break;
+
+ case ETHTOOL_ID_ON:
+ adapter->hw.mac.ops.led_on(&adapter->hw);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ adapter->hw.mac.ops.led_off(&adapter->hw);
+ break;
+ }
+ return 0;
+}
+
+static int e1000_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->itr_setting <= 4)
+ ec->rx_coalesce_usecs = adapter->itr_setting;
+ else
+ ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
+
+ return 0;
+}
+
+static int e1000_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
+ ((ec->rx_coalesce_usecs > 4) &&
+ (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
+ (ec->rx_coalesce_usecs == 2))
+ return -EINVAL;
+
+ if (ec->rx_coalesce_usecs == 4) {
+ adapter->itr = adapter->itr_setting = 4;
+ } else if (ec->rx_coalesce_usecs <= 3) {
+ adapter->itr = 20000;
+ adapter->itr_setting = ec->rx_coalesce_usecs;
+ } else {
+ adapter->itr = (1000000 / ec->rx_coalesce_usecs);
+ adapter->itr_setting = adapter->itr & ~3;
+ }
+
+ if (adapter->itr_setting != 0)
+ ew32(ITR, 1000000000 / (adapter->itr * 256));
+ else
+ ew32(ITR, 0);
+
+ return 0;
+}
+
+static int e1000_nway_reset(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (!adapter->hw.mac.autoneg)
+ return -EINVAL;
+
+ e1000e_reinit_locked(adapter);
+
+ return 0;
+}
+
+static void e1000_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct rtnl_link_stats64 net_stats;
+ int i;
+ char *p = NULL;
+
+ e1000e_get_stats64(netdev, &net_stats);
+ for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+ switch (e1000_gstrings_stats[i].type) {
+ case NETDEV_STATS:
+ p = (char *) &net_stats +
+ e1000_gstrings_stats[i].stat_offset;
+ break;
+ case E1000_STATS:
+ p = (char *) adapter +
+ e1000_gstrings_stats[i].stat_offset;
+ break;
+ default:
+ data[i] = 0;
+ continue;
+ }
+
+ data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+}
+
+static void e1000_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, e1000_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static const struct ethtool_ops e1000_ethtool_ops = {
+ .get_settings = e1000_get_settings,
+ .set_settings = e1000_set_settings,
+ .get_drvinfo = e1000_get_drvinfo,
+ .get_regs_len = e1000_get_regs_len,
+ .get_regs = e1000_get_regs,
+ .get_wol = e1000_get_wol,
+ .set_wol = e1000_set_wol,
+ .get_msglevel = e1000_get_msglevel,
+ .set_msglevel = e1000_set_msglevel,
+ .nway_reset = e1000_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = e1000_get_eeprom_len,
+ .get_eeprom = e1000_get_eeprom,
+ .set_eeprom = e1000_set_eeprom,
+ .get_ringparam = e1000_get_ringparam,
+ .set_ringparam = e1000_set_ringparam,
+ .get_pauseparam = e1000_get_pauseparam,
+ .set_pauseparam = e1000_set_pauseparam,
+ .self_test = e1000_diag_test,
+ .get_strings = e1000_get_strings,
+ .set_phys_id = e1000_set_phys_id,
+ .get_ethtool_stats = e1000_get_ethtool_stats,
+ .get_sset_count = e1000e_get_sset_count,
+ .get_coalesce = e1000_get_coalesce,
+ .set_coalesce = e1000_set_coalesce,
+};
+
+void e1000e_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
new file mode 100644
index 000000000000..29670397079b
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -0,0 +1,984 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include <linux/types.h>
+
+struct e1000_hw;
+struct e1000_adapter;
+
+#include "defines.h"
+
+#define er32(reg) __er32(hw, E1000_##reg)
+#define ew32(reg,val) __ew32(hw, E1000_##reg, (val))
+#define e1e_flush() er32(STATUS)
+
+#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \
+ (writel((value), ((a)->hw_addr + reg + ((offset) << 2))))
+
+#define E1000_READ_REG_ARRAY(a, reg, offset) \
+ (readl((a)->hw_addr + reg + ((offset) << 2)))
+
+enum e1e_registers {
+ E1000_CTRL = 0x00000, /* Device Control - RW */
+ E1000_STATUS = 0x00008, /* Device Status - RO */
+ E1000_EECD = 0x00010, /* EEPROM/Flash Control - RW */
+ E1000_EERD = 0x00014, /* EEPROM Read - RW */
+ E1000_CTRL_EXT = 0x00018, /* Extended Device Control - RW */
+ E1000_FLA = 0x0001C, /* Flash Access - RW */
+ E1000_MDIC = 0x00020, /* MDI Control - RW */
+ E1000_SCTL = 0x00024, /* SerDes Control - RW */
+ E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */
+ E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */
+ E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */
+ E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */
+ E1000_FCT = 0x00030, /* Flow Control Type - RW */
+ E1000_VET = 0x00038, /* VLAN Ether Type - RW */
+ E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */
+ E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */
+ E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */
+ E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */
+ E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */
+ E1000_EIAC_82574 = 0x000DC, /* Ext. Interrupt Auto Clear - RW */
+ E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */
+ E1000_IVAR = 0x000E4, /* Interrupt Vector Allocation - RW */
+ E1000_EITR_82574_BASE = 0x000E8, /* Interrupt Throttling - RW */
+#define E1000_EITR_82574(_n) (E1000_EITR_82574_BASE + (_n << 2))
+ E1000_RCTL = 0x00100, /* Rx Control - RW */
+ E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */
+ E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */
+ E1000_RXCW = 0x00180, /* Rx Configuration Word - RO */
+ E1000_TCTL = 0x00400, /* Tx Control - RW */
+ E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */
+ E1000_TIPG = 0x00410, /* Tx Inter-packet gap -RW */
+ E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */
+ E1000_LEDCTL = 0x00E00, /* LED Control - RW */
+ E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */
+ E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */
+ E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */
+#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
+ E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
+ E1000_PBS = 0x01008, /* Packet Buffer Size */
+ E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
+ E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */
+ E1000_FLOP = 0x0103C, /* FLASH Opcode Register */
+ E1000_PBA_ECC = 0x01100, /* PBA ECC Register */
+ E1000_ERT = 0x02008, /* Early Rx Threshold - RW */
+ E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */
+ E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */
+ E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */
+ E1000_RDBAL = 0x02800, /* Rx Descriptor Base Address Low - RW */
+ E1000_RDBAH = 0x02804, /* Rx Descriptor Base Address High - RW */
+ E1000_RDLEN = 0x02808, /* Rx Descriptor Length - RW */
+ E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */
+ E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */
+ E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
+ E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
+#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8))
+ E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
+
+/* Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ *
+ */
+#define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8))
+ E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */
+ E1000_TDBAL = 0x03800, /* Tx Descriptor Base Address Low - RW */
+ E1000_TDBAH = 0x03804, /* Tx Descriptor Base Address High - RW */
+ E1000_TDLEN = 0x03808, /* Tx Descriptor Length - RW */
+ E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */
+ E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */
+ E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */
+ E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */
+#define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8))
+ E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */
+ E1000_TARC_BASE = 0x03840, /* Tx Arbitration Count (0) */
+#define E1000_TARC(_n) (E1000_TARC_BASE + (_n << 8))
+ E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */
+ E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */
+ E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */
+ E1000_RXERRC = 0x0400C, /* Receive Error Count - R/clr */
+ E1000_MPC = 0x04010, /* Missed Packet Count - R/clr */
+ E1000_SCC = 0x04014, /* Single Collision Count - R/clr */
+ E1000_ECOL = 0x04018, /* Excessive Collision Count - R/clr */
+ E1000_MCC = 0x0401C, /* Multiple Collision Count - R/clr */
+ E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */
+ E1000_COLC = 0x04028, /* Collision Count - R/clr */
+ E1000_DC = 0x04030, /* Defer Count - R/clr */
+ E1000_TNCRS = 0x04034, /* Tx-No CRS - R/clr */
+ E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */
+ E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */
+ E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */
+ E1000_XONRXC = 0x04048, /* XON Rx Count - R/clr */
+ E1000_XONTXC = 0x0404C, /* XON Tx Count - R/clr */
+ E1000_XOFFRXC = 0x04050, /* XOFF Rx Count - R/clr */
+ E1000_XOFFTXC = 0x04054, /* XOFF Tx Count - R/clr */
+ E1000_FCRUC = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */
+ E1000_PRC64 = 0x0405C, /* Packets Rx (64 bytes) - R/clr */
+ E1000_PRC127 = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */
+ E1000_PRC255 = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */
+ E1000_PRC511 = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */
+ E1000_PRC1023 = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */
+ E1000_PRC1522 = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */
+ E1000_GPRC = 0x04074, /* Good Packets Rx Count - R/clr */
+ E1000_BPRC = 0x04078, /* Broadcast Packets Rx Count - R/clr */
+ E1000_MPRC = 0x0407C, /* Multicast Packets Rx Count - R/clr */
+ E1000_GPTC = 0x04080, /* Good Packets Tx Count - R/clr */
+ E1000_GORCL = 0x04088, /* Good Octets Rx Count Low - R/clr */
+ E1000_GORCH = 0x0408C, /* Good Octets Rx Count High - R/clr */
+ E1000_GOTCL = 0x04090, /* Good Octets Tx Count Low - R/clr */
+ E1000_GOTCH = 0x04094, /* Good Octets Tx Count High - R/clr */
+ E1000_RNBC = 0x040A0, /* Rx No Buffers Count - R/clr */
+ E1000_RUC = 0x040A4, /* Rx Undersize Count - R/clr */
+ E1000_RFC = 0x040A8, /* Rx Fragment Count - R/clr */
+ E1000_ROC = 0x040AC, /* Rx Oversize Count - R/clr */
+ E1000_RJC = 0x040B0, /* Rx Jabber Count - R/clr */
+ E1000_MGTPRC = 0x040B4, /* Management Packets Rx Count - R/clr */
+ E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */
+ E1000_MGTPTC = 0x040BC, /* Management Packets Tx Count - R/clr */
+ E1000_TORL = 0x040C0, /* Total Octets Rx Low - R/clr */
+ E1000_TORH = 0x040C4, /* Total Octets Rx High - R/clr */
+ E1000_TOTL = 0x040C8, /* Total Octets Tx Low - R/clr */
+ E1000_TOTH = 0x040CC, /* Total Octets Tx High - R/clr */
+ E1000_TPR = 0x040D0, /* Total Packets Rx - R/clr */
+ E1000_TPT = 0x040D4, /* Total Packets Tx - R/clr */
+ E1000_PTC64 = 0x040D8, /* Packets Tx (64 bytes) - R/clr */
+ E1000_PTC127 = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */
+ E1000_PTC255 = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */
+ E1000_PTC511 = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */
+ E1000_PTC1023 = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */
+ E1000_PTC1522 = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */
+ E1000_MPTC = 0x040F0, /* Multicast Packets Tx Count - R/clr */
+ E1000_BPTC = 0x040F4, /* Broadcast Packets Tx Count - R/clr */
+ E1000_TSCTC = 0x040F8, /* TCP Segmentation Context Tx - R/clr */
+ E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */
+ E1000_IAC = 0x04100, /* Interrupt Assertion Count */
+ E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */
+ E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */
+ E1000_ICTXPTC = 0x0410C, /* Irq Cause Tx Packet Timer Expire Count */
+ E1000_ICTXATC = 0x04110, /* Irq Cause Tx Abs Timer Expire Count */
+ E1000_ICTXQEC = 0x04118, /* Irq Cause Tx Queue Empty Count */
+ E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */
+ E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */
+ E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */
+ E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */
+ E1000_RFCTL = 0x05008, /* Receive Filter Control */
+ E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */
+ E1000_RAL_BASE = 0x05400, /* Receive Address Low - RW */
+#define E1000_RAL(_n) (E1000_RAL_BASE + ((_n) * 8))
+#define E1000_RA (E1000_RAL(0))
+ E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */
+#define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8))
+ E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */
+ E1000_WUC = 0x05800, /* Wakeup Control - RW */
+ E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */
+ E1000_WUS = 0x05810, /* Wakeup Status - RO */
+ E1000_MANC = 0x05820, /* Management Control - RW */
+ E1000_FFLT = 0x05F00, /* Flexible Filter Length Table - RW Array */
+ E1000_HOST_IF = 0x08800, /* Host Interface */
+
+ E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */
+ E1000_MANC2H = 0x05860, /* Management Control To Host - RW */
+ E1000_MDEF_BASE = 0x05890, /* Management Decision Filters */
+#define E1000_MDEF(_n) (E1000_MDEF_BASE + ((_n) * 4))
+ E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */
+ E1000_GCR = 0x05B00, /* PCI-Ex Control */
+ E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */
+ E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */
+ E1000_SWSM = 0x05B50, /* SW Semaphore */
+ E1000_FWSM = 0x05B54, /* FW Semaphore */
+ E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */
+ E1000_FFLT_DBG = 0x05F04, /* Debug Register */
+ E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */
+#define E1000_PCH_RAICC(_n) (E1000_PCH_RAICC_BASE + ((_n) * 4))
+#define E1000_CRC_OFFSET E1000_PCH_RAICC_BASE
+ E1000_HICR = 0x08F00, /* Host Interface Control */
+};
+
+#define E1000_MAX_PHY_ADDR 4
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
+#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
+#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
+#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
+#define IGP_PAGE_SHIFT 5
+#define PHY_REG_MASK 0x1F
+
+#define BM_WUC_PAGE 800
+#define BM_WUC_ADDRESS_OPCODE 0x11
+#define BM_WUC_DATA_OPCODE 0x12
+#define BM_WUC_ENABLE_PAGE 769
+#define BM_WUC_ENABLE_REG 17
+#define BM_WUC_ENABLE_BIT (1 << 2)
+#define BM_WUC_HOST_WU_BIT (1 << 4)
+#define BM_WUC_ME_WU_BIT (1 << 5)
+
+#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
+#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
+#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
+
+#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK 0x0078
+
+#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
+
+#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
+
+#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
+#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
+
+#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
+
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_MDIX 0x0800
+#define IGP01E1000_PSSR_SPEED_MASK 0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
+
+#define IGP02E1000_PHY_CHANNEL_NUM 4
+#define IGP02E1000_PHY_AGC_A 0x11B1
+#define IGP02E1000_PHY_AGC_B 0x12B1
+#define IGP02E1000_PHY_AGC_C 0x14B1
+#define IGP02E1000_PHY_AGC_D 0x18B1
+
+#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK 0x7F
+#define IGP02E1000_AGC_RANGE 15
+
+/* manage.c */
+#define E1000_VFTA_ENTRY_SHIFT 5
+#define E1000_VFTA_ENTRY_MASK 0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
+
+#define E1000_HICR_EN 0x01 /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define E1000_HICR_C 0x02
+#define E1000_HICR_FW_RESET_ENABLE 0x40
+#define E1000_HICR_FW_RESET 0x80
+
+#define E1000_FWSM_MODE_MASK 0xE
+#define E1000_FWSM_MODE_SHIFT 1
+
+#define E1000_MNG_IAMT_MODE 0x3
+#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
+#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
+
+/* nvm.c */
+#define E1000_STM_OPCODE 0xDB00
+
+#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
+#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
+#define E1000_KMRNCTRLSTA_REN 0x00200000
+#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */
+#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
+#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
+#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
+#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
+#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
+#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
+#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002
+#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
+#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */
+#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
+
+/* IFE PHY Extended Status Control */
+#define IFE_PESC_POLARITY_REVERSED 0x0100
+
+/* IFE PHY Special Control */
+#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
+#define IFE_PSC_FORCE_POLARITY 0x0020
+
+/* IFE PHY Special Control and LED Control */
+#define IFE_PSCL_PROBE_MODE 0x0020
+#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
+
+/* IFE PHY MDIX Control */
+#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
+#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */
+
+#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
+
+#define E1000_DEV_ID_82571EB_COPPER 0x105E
+#define E1000_DEV_ID_82571EB_FIBER 0x105F
+#define E1000_DEV_ID_82571EB_SERDES 0x1060
+#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
+#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
+#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5
+#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC
+#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
+#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA
+#define E1000_DEV_ID_82572EI_COPPER 0x107D
+#define E1000_DEV_ID_82572EI_FIBER 0x107E
+#define E1000_DEV_ID_82572EI_SERDES 0x107F
+#define E1000_DEV_ID_82572EI 0x10B9
+#define E1000_DEV_ID_82573E 0x108B
+#define E1000_DEV_ID_82573E_IAMT 0x108C
+#define E1000_DEV_ID_82573L 0x109A
+#define E1000_DEV_ID_82574L 0x10D3
+#define E1000_DEV_ID_82574LA 0x10F6
+#define E1000_DEV_ID_82583V 0x150C
+
+#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
+#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
+#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
+#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
+
+#define E1000_DEV_ID_ICH8_82567V_3 0x1501
+#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
+#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
+#define E1000_DEV_ID_ICH8_IGP_C 0x104B
+#define E1000_DEV_ID_ICH8_IFE 0x104C
+#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4
+#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
+#define E1000_DEV_ID_ICH8_IGP_M 0x104D
+#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD
+#define E1000_DEV_ID_ICH9_BM 0x10E5
+#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5
+#define E1000_DEV_ID_ICH9_IGP_M 0x10BF
+#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB
+#define E1000_DEV_ID_ICH9_IGP_C 0x294C
+#define E1000_DEV_ID_ICH9_IFE 0x10C0
+#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3
+#define E1000_DEV_ID_ICH9_IFE_G 0x10C2
+#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC
+#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD
+#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
+#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE
+#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF
+#define E1000_DEV_ID_ICH10_D_BM_V 0x1525
+#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA
+#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
+#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
+#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0
+#define E1000_DEV_ID_PCH2_LV_LM 0x1502
+#define E1000_DEV_ID_PCH2_LV_V 0x1503
+
+#define E1000_REVISION_4 4
+
+#define E1000_FUNC_1 1
+
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
+
+enum e1000_mac_type {
+ e1000_82571,
+ e1000_82572,
+ e1000_82573,
+ e1000_82574,
+ e1000_82583,
+ e1000_80003es2lan,
+ e1000_ich8lan,
+ e1000_ich9lan,
+ e1000_ich10lan,
+ e1000_pchlan,
+ e1000_pch2lan,
+};
+
+enum e1000_media_type {
+ e1000_media_type_unknown = 0,
+ e1000_media_type_copper = 1,
+ e1000_media_type_fiber = 2,
+ e1000_media_type_internal_serdes = 3,
+ e1000_num_media_types
+};
+
+enum e1000_nvm_type {
+ e1000_nvm_unknown = 0,
+ e1000_nvm_none,
+ e1000_nvm_eeprom_spi,
+ e1000_nvm_flash_hw,
+ e1000_nvm_flash_sw
+};
+
+enum e1000_nvm_override {
+ e1000_nvm_override_none = 0,
+ e1000_nvm_override_spi_small,
+ e1000_nvm_override_spi_large
+};
+
+enum e1000_phy_type {
+ e1000_phy_unknown = 0,
+ e1000_phy_none,
+ e1000_phy_m88,
+ e1000_phy_igp,
+ e1000_phy_igp_2,
+ e1000_phy_gg82563,
+ e1000_phy_igp_3,
+ e1000_phy_ife,
+ e1000_phy_bm,
+ e1000_phy_82578,
+ e1000_phy_82577,
+ e1000_phy_82579,
+};
+
+enum e1000_bus_width {
+ e1000_bus_width_unknown = 0,
+ e1000_bus_width_pcie_x1,
+ e1000_bus_width_pcie_x2,
+ e1000_bus_width_pcie_x4 = 4,
+ e1000_bus_width_32,
+ e1000_bus_width_64,
+ e1000_bus_width_reserved
+};
+
+enum e1000_1000t_rx_status {
+ e1000_1000t_rx_status_not_ok = 0,
+ e1000_1000t_rx_status_ok,
+ e1000_1000t_rx_status_undefined = 0xFF
+};
+
+enum e1000_rev_polarity{
+ e1000_rev_polarity_normal = 0,
+ e1000_rev_polarity_reversed,
+ e1000_rev_polarity_undefined = 0xFF
+};
+
+enum e1000_fc_mode {
+ e1000_fc_none = 0,
+ e1000_fc_rx_pause,
+ e1000_fc_tx_pause,
+ e1000_fc_full,
+ e1000_fc_default = 0xFF
+};
+
+enum e1000_ms_type {
+ e1000_ms_hw_default = 0,
+ e1000_ms_force_master,
+ e1000_ms_force_slave,
+ e1000_ms_auto
+};
+
+enum e1000_smart_speed {
+ e1000_smart_speed_default = 0,
+ e1000_smart_speed_on,
+ e1000_smart_speed_off
+};
+
+enum e1000_serdes_link_state {
+ e1000_serdes_link_down = 0,
+ e1000_serdes_link_autoneg_progress,
+ e1000_serdes_link_autoneg_complete,
+ e1000_serdes_link_forced_up
+};
+
+/* Receive Descriptor */
+struct e1000_rx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ __le16 length; /* Length of data DMAed into data buffer */
+ __le16 csum; /* Packet checksum */
+ u8 status; /* Descriptor status */
+ u8 errors; /* Descriptor Errors */
+ __le16 special;
+};
+
+/* Receive Descriptor - Extended */
+union e1000_rx_desc_extended {
+ struct {
+ __le64 buffer_addr;
+ __le64 reserved;
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length;
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+/* Receive Descriptor - Packet Split */
+union e1000_rx_desc_packet_split {
+ struct {
+ /* one buffer for protocol header(s), three data buffers */
+ __le64 buffer_addr[MAX_PS_BUFFERS];
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length0; /* length of buffer 0 */
+ __le16 vlan; /* VLAN tag */
+ } middle;
+ struct {
+ __le16 header_status;
+ __le16 length[3]; /* length of buffers 1-3 */
+ } upper;
+ __le64 reserved;
+ } wb; /* writeback */
+};
+
+/* Transmit Descriptor */
+struct e1000_tx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 cso; /* Checksum offset */
+ u8 cmd; /* Descriptor control */
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 css; /* Checksum start */
+ __le16 special;
+ } fields;
+ } upper;
+};
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+ union {
+ __le32 ip_config;
+ struct {
+ u8 ipcss; /* IP checksum start */
+ u8 ipcso; /* IP checksum offset */
+ __le16 ipcse; /* IP checksum end */
+ } ip_fields;
+ } lower_setup;
+ union {
+ __le32 tcp_config;
+ struct {
+ u8 tucss; /* TCP checksum start */
+ u8 tucso; /* TCP checksum offset */
+ __le16 tucse; /* TCP checksum end */
+ } tcp_fields;
+ } upper_setup;
+ __le32 cmd_and_length;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 hdr_len; /* Header length */
+ __le16 mss; /* Maximum segment size */
+ } fields;
+ } tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct e1000_data_desc {
+ __le64 buffer_addr; /* Address of the descriptor's buffer address */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 typ_len_ext;
+ u8 cmd;
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 popts; /* Packet Options */
+ __le16 special; /* */
+ } fields;
+ } upper;
+};
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+ u64 crcerrs;
+ u64 algnerrc;
+ u64 symerrs;
+ u64 rxerrc;
+ u64 mpc;
+ u64 scc;
+ u64 ecol;
+ u64 mcc;
+ u64 latecol;
+ u64 colc;
+ u64 dc;
+ u64 tncrs;
+ u64 sec;
+ u64 cexterr;
+ u64 rlec;
+ u64 xonrxc;
+ u64 xontxc;
+ u64 xoffrxc;
+ u64 xofftxc;
+ u64 fcruc;
+ u64 prc64;
+ u64 prc127;
+ u64 prc255;
+ u64 prc511;
+ u64 prc1023;
+ u64 prc1522;
+ u64 gprc;
+ u64 bprc;
+ u64 mprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 rnbc;
+ u64 ruc;
+ u64 rfc;
+ u64 roc;
+ u64 rjc;
+ u64 mgprc;
+ u64 mgpdc;
+ u64 mgptc;
+ u64 tor;
+ u64 tot;
+ u64 tpr;
+ u64 tpt;
+ u64 ptc64;
+ u64 ptc127;
+ u64 ptc255;
+ u64 ptc511;
+ u64 ptc1023;
+ u64 ptc1522;
+ u64 mptc;
+ u64 bptc;
+ u64 tsctc;
+ u64 tsctfc;
+ u64 iac;
+ u64 icrxptc;
+ u64 icrxatc;
+ u64 ictxptc;
+ u64 ictxatc;
+ u64 ictxqec;
+ u64 ictxqmtc;
+ u64 icrxdmtc;
+ u64 icrxoc;
+};
+
+struct e1000_phy_stats {
+ u32 idle_errors;
+ u32 receive_errors;
+};
+
+struct e1000_host_mng_dhcp_cookie {
+ u32 signature;
+ u8 status;
+ u8 reserved0;
+ u16 vlan_id;
+ u32 reserved1;
+ u16 reserved2;
+ u8 reserved3;
+ u8 checksum;
+};
+
+/* Host Interface "Rev 1" */
+struct e1000_host_command_header {
+ u8 command_id;
+ u8 command_length;
+ u8 command_options;
+ u8 checksum;
+};
+
+#define E1000_HI_MAX_DATA_LENGTH 252
+struct e1000_host_command_info {
+ struct e1000_host_command_header command_header;
+ u8 command_data[E1000_HI_MAX_DATA_LENGTH];
+};
+
+/* Host Interface "Rev 2" */
+struct e1000_host_mng_command_header {
+ u8 command_id;
+ u8 checksum;
+ u16 reserved1;
+ u16 reserved2;
+ u16 command_length;
+};
+
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
+struct e1000_host_mng_command_info {
+ struct e1000_host_mng_command_header command_header;
+ u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
+};
+
+/* Function pointers and static data for the MAC. */
+struct e1000_mac_operations {
+ s32 (*id_led_init)(struct e1000_hw *);
+ s32 (*blink_led)(struct e1000_hw *);
+ bool (*check_mng_mode)(struct e1000_hw *);
+ s32 (*check_for_link)(struct e1000_hw *);
+ s32 (*cleanup_led)(struct e1000_hw *);
+ void (*clear_hw_cntrs)(struct e1000_hw *);
+ void (*clear_vfta)(struct e1000_hw *);
+ s32 (*get_bus_info)(struct e1000_hw *);
+ void (*set_lan_id)(struct e1000_hw *);
+ s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
+ s32 (*led_on)(struct e1000_hw *);
+ s32 (*led_off)(struct e1000_hw *);
+ void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
+ s32 (*reset_hw)(struct e1000_hw *);
+ s32 (*init_hw)(struct e1000_hw *);
+ s32 (*setup_link)(struct e1000_hw *);
+ s32 (*setup_physical_interface)(struct e1000_hw *);
+ s32 (*setup_led)(struct e1000_hw *);
+ void (*write_vfta)(struct e1000_hw *, u32, u32);
+ s32 (*read_mac_addr)(struct e1000_hw *);
+};
+
+/*
+ * When to use various PHY register access functions:
+ *
+ * Func Caller
+ * Function Does Does When to use
+ * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * X_reg L,P,A n/a for simple PHY reg accesses
+ * X_reg_locked P,A L for multiple accesses of different regs
+ * on different pages
+ * X_reg_page A L,P for multiple accesses of different regs
+ * on the same page
+ *
+ * Where X=[read|write], L=locking, P=sets page, A=register access
+ *
+ */
+struct e1000_phy_operations {
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*cfg_on_link_up)(struct e1000_hw *);
+ s32 (*check_polarity)(struct e1000_hw *);
+ s32 (*check_reset_block)(struct e1000_hw *);
+ s32 (*commit)(struct e1000_hw *);
+ s32 (*force_speed_duplex)(struct e1000_hw *);
+ s32 (*get_cfg_done)(struct e1000_hw *hw);
+ s32 (*get_cable_length)(struct e1000_hw *);
+ s32 (*get_info)(struct e1000_hw *);
+ s32 (*set_page)(struct e1000_hw *, u16);
+ s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
+ s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
+ s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *);
+ void (*release)(struct e1000_hw *);
+ s32 (*reset)(struct e1000_hw *);
+ s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
+ s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
+ s32 (*write_reg)(struct e1000_hw *, u32, u16);
+ s32 (*write_reg_locked)(struct e1000_hw *, u32, u16);
+ s32 (*write_reg_page)(struct e1000_hw *, u32, u16);
+ void (*power_up)(struct e1000_hw *);
+ void (*power_down)(struct e1000_hw *);
+};
+
+/* Function pointers for the NVM. */
+struct e1000_nvm_operations {
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
+ void (*release)(struct e1000_hw *);
+ s32 (*update)(struct e1000_hw *);
+ s32 (*valid_led_default)(struct e1000_hw *, u16 *);
+ s32 (*validate)(struct e1000_hw *);
+ s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
+};
+
+struct e1000_mac_info {
+ struct e1000_mac_operations ops;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+
+ enum e1000_mac_type type;
+
+ u32 collision_delta;
+ u32 ledctl_default;
+ u32 ledctl_mode1;
+ u32 ledctl_mode2;
+ u32 mc_filter_type;
+ u32 tx_packet_delta;
+ u32 txcw;
+
+ u16 current_ifs_val;
+ u16 ifs_max_val;
+ u16 ifs_min_val;
+ u16 ifs_ratio;
+ u16 ifs_step_size;
+ u16 mta_reg_count;
+
+ /* Maximum size of the MTA register table in all supported adapters */
+ #define MAX_MTA_REG 128
+ u32 mta_shadow[MAX_MTA_REG];
+ u16 rar_entry_count;
+
+ u8 forced_speed_duplex;
+
+ bool adaptive_ifs;
+ bool has_fwsm;
+ bool arc_subsystem_valid;
+ bool autoneg;
+ bool autoneg_failed;
+ bool get_link_status;
+ bool in_ifs_mode;
+ bool serdes_has_link;
+ bool tx_pkt_filtering;
+ enum e1000_serdes_link_state serdes_link_state;
+};
+
+struct e1000_phy_info {
+ struct e1000_phy_operations ops;
+
+ enum e1000_phy_type type;
+
+ enum e1000_1000t_rx_status local_rx;
+ enum e1000_1000t_rx_status remote_rx;
+ enum e1000_ms_type ms_type;
+ enum e1000_ms_type original_ms_type;
+ enum e1000_rev_polarity cable_polarity;
+ enum e1000_smart_speed smart_speed;
+
+ u32 addr;
+ u32 id;
+ u32 reset_delay_us; /* in usec */
+ u32 revision;
+
+ enum e1000_media_type media_type;
+
+ u16 autoneg_advertised;
+ u16 autoneg_mask;
+ u16 cable_length;
+ u16 max_cable_length;
+ u16 min_cable_length;
+
+ u8 mdix;
+
+ bool disable_polarity_correction;
+ bool is_mdix;
+ bool polarity_correction;
+ bool speed_downgraded;
+ bool autoneg_wait_to_complete;
+};
+
+struct e1000_nvm_info {
+ struct e1000_nvm_operations ops;
+
+ enum e1000_nvm_type type;
+ enum e1000_nvm_override override;
+
+ u32 flash_bank_size;
+ u32 flash_base_addr;
+
+ u16 word_size;
+ u16 delay_usec;
+ u16 address_bits;
+ u16 opcode_bits;
+ u16 page_size;
+};
+
+struct e1000_bus_info {
+ enum e1000_bus_width width;
+
+ u16 func;
+};
+
+struct e1000_fc_info {
+ u32 high_water; /* Flow control high-water mark */
+ u32 low_water; /* Flow control low-water mark */
+ u16 pause_time; /* Flow control pause timer */
+ u16 refresh_time; /* Flow control refresh timer */
+ bool send_xon; /* Flow control send XON */
+ bool strict_ieee; /* Strict IEEE mode */
+ enum e1000_fc_mode current_mode; /* FC mode in effect */
+ enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+struct e1000_dev_spec_82571 {
+ bool laa_is_present;
+ u32 smb_counter;
+};
+
+struct e1000_dev_spec_80003es2lan {
+ bool mdic_wa_enable;
+};
+
+struct e1000_shadow_ram {
+ u16 value;
+ bool modified;
+};
+
+#define E1000_ICH8_SHADOW_RAM_WORDS 2048
+
+struct e1000_dev_spec_ich8lan {
+ bool kmrn_lock_loss_workaround_enabled;
+ struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
+ bool nvm_k1_enabled;
+ bool eee_disable;
+};
+
+struct e1000_hw {
+ struct e1000_adapter *adapter;
+
+ u8 __iomem *hw_addr;
+ u8 __iomem *flash_address;
+
+ struct e1000_mac_info mac;
+ struct e1000_fc_info fc;
+ struct e1000_phy_info phy;
+ struct e1000_nvm_info nvm;
+ struct e1000_bus_info bus;
+ struct e1000_host_mng_dhcp_cookie mng_cookie;
+
+ union {
+ struct e1000_dev_spec_82571 e82571;
+ struct e1000_dev_spec_80003es2lan e80003es2lan;
+ struct e1000_dev_spec_ich8lan ich8lan;
+ } dev_spec;
+};
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
new file mode 100644
index 000000000000..4f709749dbce
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -0,0 +1,4152 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/*
+ * 82562G 10/100 Network Connection
+ * 82562G-2 10/100 Network Connection
+ * 82562GT 10/100 Network Connection
+ * 82562GT-2 10/100 Network Connection
+ * 82562V 10/100 Network Connection
+ * 82562V-2 10/100 Network Connection
+ * 82566DC-2 Gigabit Network Connection
+ * 82566DC Gigabit Network Connection
+ * 82566DM-2 Gigabit Network Connection
+ * 82566DM Gigabit Network Connection
+ * 82566MC Gigabit Network Connection
+ * 82566MM Gigabit Network Connection
+ * 82567LM Gigabit Network Connection
+ * 82567LF Gigabit Network Connection
+ * 82567V Gigabit Network Connection
+ * 82567LM-2 Gigabit Network Connection
+ * 82567LF-2 Gigabit Network Connection
+ * 82567V-2 Gigabit Network Connection
+ * 82567LF-3 Gigabit Network Connection
+ * 82567LM-3 Gigabit Network Connection
+ * 82567LM-4 Gigabit Network Connection
+ * 82577LM Gigabit Network Connection
+ * 82577LC Gigabit Network Connection
+ * 82578DM Gigabit Network Connection
+ * 82578DC Gigabit Network Connection
+ * 82579LM Gigabit Network Connection
+ * 82579V Gigabit Network Connection
+ */
+
+#include "e1000.h"
+
+#define ICH_FLASH_GFPREG 0x0000
+#define ICH_FLASH_HSFSTS 0x0004
+#define ICH_FLASH_HSFCTL 0x0006
+#define ICH_FLASH_FADDR 0x0008
+#define ICH_FLASH_FDATA0 0x0010
+#define ICH_FLASH_PR0 0x0074
+
+#define ICH_FLASH_READ_COMMAND_TIMEOUT 500
+#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500
+#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000
+#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
+#define ICH_FLASH_CYCLE_REPEAT_COUNT 10
+
+#define ICH_CYCLE_READ 0
+#define ICH_CYCLE_WRITE 2
+#define ICH_CYCLE_ERASE 3
+
+#define FLASH_GFPREG_BASE_MASK 0x1FFF
+#define FLASH_SECTOR_ADDR_SHIFT 12
+
+#define ICH_FLASH_SEG_SIZE_256 256
+#define ICH_FLASH_SEG_SIZE_4K 4096
+#define ICH_FLASH_SEG_SIZE_8K 8192
+#define ICH_FLASH_SEG_SIZE_64K 65536
+
+
+#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
+/* FW established a valid mode */
+#define E1000_ICH_FWSM_FW_VALID 0x00008000
+
+#define E1000_ICH_MNG_IAMT_MODE 0x2
+
+#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_DEF1_OFF2 << 8) | \
+ (ID_LED_DEF1_ON2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+
+#define E1000_ICH_NVM_SIG_WORD 0x13
+#define E1000_ICH_NVM_SIG_MASK 0xC000
+#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
+#define E1000_ICH_NVM_SIG_VALUE 0x80
+
+#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
+
+#define E1000_FEXTNVM_SW_CONFIG 1
+#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
+
+#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
+
+#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
+
+#define E1000_ICH_RAR_ENTRIES 7
+
+#define PHY_PAGE_SHIFT 5
+#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
+ ((reg) & MAX_PHY_REG_ADDRESS))
+#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */
+#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */
+
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002
+#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
+#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
+
+#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
+
+#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
+
+/* SMBus Address Phy Register */
+#define HV_SMB_ADDR PHY_REG(768, 26)
+#define HV_SMB_ADDR_MASK 0x007F
+#define HV_SMB_ADDR_PEC_EN 0x0200
+#define HV_SMB_ADDR_VALID 0x0080
+
+/* PHY Power Management Control */
+#define HV_PM_CTRL PHY_REG(770, 17)
+
+/* PHY Low Power Idle Control */
+#define I82579_LPI_CTRL PHY_REG(772, 20)
+#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
+#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
+
+/* EMI Registers */
+#define I82579_EMI_ADDR 0x10
+#define I82579_EMI_DATA 0x11
+#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
+
+/* Strapping Option Register - RO */
+#define E1000_STRAP 0x0000C
+#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
+#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
+
+/* OEM Bits Phy Register */
+#define HV_OEM_BITS PHY_REG(768, 25)
+#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
+#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */
+#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
+
+#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
+#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
+
+/* KMRN Mode Control */
+#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
+#define HV_KMRN_MDIO_SLOW 0x0400
+
+/* KMRN FIFO Control and Status */
+#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
+
+/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
+/* Offset 04h HSFSTS */
+union ich8_hws_flash_status {
+ struct ich8_hsfsts {
+ u16 flcdone :1; /* bit 0 Flash Cycle Done */
+ u16 flcerr :1; /* bit 1 Flash Cycle Error */
+ u16 dael :1; /* bit 2 Direct Access error Log */
+ u16 berasesz :2; /* bit 4:3 Sector Erase Size */
+ u16 flcinprog :1; /* bit 5 flash cycle in Progress */
+ u16 reserved1 :2; /* bit 13:6 Reserved */
+ u16 reserved2 :6; /* bit 13:6 Reserved */
+ u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
+ u16 flockdn :1; /* bit 15 Flash Config Lock-Down */
+ } hsf_status;
+ u16 regval;
+};
+
+/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
+/* Offset 06h FLCTL */
+union ich8_hws_flash_ctrl {
+ struct ich8_hsflctl {
+ u16 flcgo :1; /* 0 Flash Cycle Go */
+ u16 flcycle :2; /* 2:1 Flash Cycle */
+ u16 reserved :5; /* 7:3 Reserved */
+ u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
+ u16 flockdn :6; /* 15:10 Reserved */
+ } hsf_ctrl;
+ u16 regval;
+};
+
+/* ICH Flash Region Access Permissions */
+union ich8_hws_flash_regacc {
+ struct ich8_flracc {
+ u32 grra :8; /* 0:7 GbE region Read Access */
+ u32 grwa :8; /* 8:15 GbE region Write Access */
+ u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
+ u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
+ } hsf_flregacc;
+ u16 regval;
+};
+
+/* ICH Flash Protected Region */
+union ich8_flash_protected_range {
+ struct ich8_pr {
+ u32 base:13; /* 0:12 Protected Range Base */
+ u32 reserved1:2; /* 13:14 Reserved */
+ u32 rpe:1; /* 15 Read Protection Enable */
+ u32 limit:13; /* 16:28 Protected Range Limit */
+ u32 reserved2:2; /* 29:30 Reserved */
+ u32 wpe:1; /* 31 Write Protection Enable */
+ } range;
+ u32 regval;
+};
+
+static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
+static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
+static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
+static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
+ u32 offset, u8 byte);
+static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 *data);
+static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 size, u16 *data);
+static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
+static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
+static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
+static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
+static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
+static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
+static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
+static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
+static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
+static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
+static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
+static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
+static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
+static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
+static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
+static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
+static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
+static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
+
+static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
+{
+ return readw(hw->flash_address + reg);
+}
+
+static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg)
+{
+ return readl(hw->flash_address + reg);
+}
+
+static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val)
+{
+ writew(val, hw->flash_address + reg);
+}
+
+static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
+{
+ writel(val, hw->flash_address + reg);
+}
+
+#define er16flash(reg) __er16flash(hw, (reg))
+#define er32flash(reg) __er32flash(hw, (reg))
+#define ew16flash(reg,val) __ew16flash(hw, (reg), (val))
+#define ew32flash(reg,val) __ew32flash(hw, (reg), (val))
+
+static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
+ ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
+ ew32(CTRL, ctrl);
+ e1e_flush();
+ udelay(10);
+ ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
+ ew32(CTRL, ctrl);
+}
+
+/**
+ * e1000_init_phy_params_pchlan - Initialize PHY function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize family-specific PHY parameters and function pointers.
+ **/
+static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 fwsm;
+ s32 ret_val = 0;
+
+ phy->addr = 1;
+ phy->reset_delay_us = 100;
+
+ phy->ops.set_page = e1000_set_page_igp;
+ phy->ops.read_reg = e1000_read_phy_reg_hv;
+ phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
+ phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
+ phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
+ phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
+ phy->ops.write_reg = e1000_write_phy_reg_hv;
+ phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
+ phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+ /*
+ * The MAC-PHY interconnect may still be in SMBus mode
+ * after Sx->S0. If the manageability engine (ME) is
+ * disabled, then toggle the LANPHYPC Value bit to force
+ * the interconnect to PCIe mode.
+ */
+ fwsm = er32(FWSM);
+ if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) {
+ e1000_toggle_lanphypc_value_ich8lan(hw);
+ msleep(50);
+
+ /*
+ * Gate automatic PHY configuration by hardware on
+ * non-managed 82579
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
+ }
+
+ /*
+ * Reset the PHY before any access to it. Doing so, ensures that
+ * the PHY is in a known good state before we read/write PHY registers.
+ * The generic reset is sufficient here, because we haven't determined
+ * the PHY type yet.
+ */
+ ret_val = e1000e_phy_hw_reset_generic(hw);
+ if (ret_val)
+ goto out;
+
+ /* Ungate automatic PHY configuration on non-managed 82579 */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
+ usleep_range(10000, 20000);
+ e1000_gate_hw_phy_config_ich8lan(hw, false);
+ }
+
+ phy->id = e1000_phy_unknown;
+ switch (hw->mac.type) {
+ default:
+ ret_val = e1000e_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+ if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
+ break;
+ /* fall-through */
+ case e1000_pch2lan:
+ /*
+ * In case the PHY needs to be in mdio slow mode,
+ * set slow mode and try to get the PHY id again.
+ */
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ goto out;
+ ret_val = e1000e_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+ break;
+ }
+ phy->type = e1000e_get_phy_type_from_id(phy->id);
+
+ switch (phy->type) {
+ case e1000_phy_82577:
+ case e1000_phy_82579:
+ phy->ops.check_polarity = e1000_check_polarity_82577;
+ phy->ops.force_speed_duplex =
+ e1000_phy_force_speed_duplex_82577;
+ phy->ops.get_cable_length = e1000_get_cable_length_82577;
+ phy->ops.get_info = e1000_get_phy_info_82577;
+ phy->ops.commit = e1000e_phy_sw_reset;
+ break;
+ case e1000_phy_82578:
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
+ phy->ops.get_cable_length = e1000e_get_cable_length_m88;
+ phy->ops.get_info = e1000e_get_phy_info_m88;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ break;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize family-specific PHY parameters and function pointers.
+ **/
+static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 i = 0;
+
+ phy->addr = 1;
+ phy->reset_delay_us = 100;
+
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
+
+ /*
+ * We may need to do this twice - once for IGP and if that fails,
+ * we'll set BM func pointers and try again
+ */
+ ret_val = e1000e_determine_phy_address(hw);
+ if (ret_val) {
+ phy->ops.write_reg = e1000e_write_phy_reg_bm;
+ phy->ops.read_reg = e1000e_read_phy_reg_bm;
+ ret_val = e1000e_determine_phy_address(hw);
+ if (ret_val) {
+ e_dbg("Cannot determine PHY addr. Erroring out\n");
+ return ret_val;
+ }
+ }
+
+ phy->id = 0;
+ while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
+ (i++ < 100)) {
+ usleep_range(1000, 2000);
+ ret_val = e1000e_get_phy_id(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Verify phy id */
+ switch (phy->id) {
+ case IGP03E1000_E_PHY_ID:
+ phy->type = e1000_phy_igp_3;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked;
+ phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked;
+ phy->ops.get_info = e1000e_get_phy_info_igp;
+ phy->ops.check_polarity = e1000_check_polarity_igp;
+ phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp;
+ break;
+ case IFE_E_PHY_ID:
+ case IFE_PLUS_E_PHY_ID:
+ case IFE_C_E_PHY_ID:
+ phy->type = e1000_phy_ife;
+ phy->autoneg_mask = E1000_ALL_NOT_GIG;
+ phy->ops.get_info = e1000_get_phy_info_ife;
+ phy->ops.check_polarity = e1000_check_polarity_ife;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
+ break;
+ case BME1000_E_PHY_ID:
+ phy->type = e1000_phy_bm;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->ops.read_reg = e1000e_read_phy_reg_bm;
+ phy->ops.write_reg = e1000e_write_phy_reg_bm;
+ phy->ops.commit = e1000e_phy_sw_reset;
+ phy->ops.get_info = e1000e_get_phy_info_m88;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize family-specific NVM parameters and function
+ * pointers.
+ **/
+static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 gfpreg, sector_base_addr, sector_end_addr;
+ u16 i;
+
+ /* Can't read flash registers if the register set isn't mapped. */
+ if (!hw->flash_address) {
+ e_dbg("ERROR: Flash registers not mapped\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ nvm->type = e1000_nvm_flash_sw;
+
+ gfpreg = er32flash(ICH_FLASH_GFPREG);
+
+ /*
+ * sector_X_addr is a "sector"-aligned address (4096 bytes)
+ * Add 1 to sector_end_addr since this sector is included in
+ * the overall size.
+ */
+ sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
+ sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
+
+ /* flash_base_addr is byte-aligned */
+ nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
+
+ /*
+ * find total size of the NVM, then cut in half since the total
+ * size represents two separate NVM banks.
+ */
+ nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
+ << FLASH_SECTOR_ADDR_SHIFT;
+ nvm->flash_bank_size /= 2;
+ /* Adjust to word count */
+ nvm->flash_bank_size /= sizeof(u16);
+
+ nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
+
+ /* Clear shadow ram */
+ for (i = 0; i < nvm->word_size; i++) {
+ dev_spec->shadow_ram[i].modified = false;
+ dev_spec->shadow_ram[i].value = 0xFFFF;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize family-specific MAC parameters and function
+ * pointers.
+ **/
+static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_mac_info *mac = &hw->mac;
+
+ /* Set media type function pointer */
+ hw->phy.media_type = e1000_media_type_copper;
+
+ /* Set mta register count */
+ mac->mta_reg_count = 32;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
+ if (mac->type == e1000_ich8lan)
+ mac->rar_entry_count--;
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC subsystem not supported */
+ mac->arc_subsystem_valid = false;
+ /* Adaptive IFS supported */
+ mac->adaptive_ifs = true;
+
+ /* LED operations */
+ switch (mac->type) {
+ case e1000_ich8lan:
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ /* check management mode */
+ mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
+ /* ID LED init */
+ mac->ops.id_led_init = e1000e_id_led_init;
+ /* blink LED */
+ mac->ops.blink_led = e1000e_blink_led_generic;
+ /* setup LED */
+ mac->ops.setup_led = e1000e_setup_led_generic;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_ich8lan;
+ mac->ops.led_off = e1000_led_off_ich8lan;
+ break;
+ case e1000_pchlan:
+ case e1000_pch2lan:
+ /* check management mode */
+ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
+ /* ID LED init */
+ mac->ops.id_led_init = e1000_id_led_init_pchlan;
+ /* setup LED */
+ mac->ops.setup_led = e1000_setup_led_pchlan;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_pchlan;
+ mac->ops.led_off = e1000_led_off_pchlan;
+ break;
+ default:
+ break;
+ }
+
+ /* Enable PCS Lock-loss workaround for ICH8 */
+ if (mac->type == e1000_ich8lan)
+ e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
+
+ /* Gate automatic PHY configuration by hardware on managed 82579 */
+ if ((mac->type == e1000_pch2lan) &&
+ (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
+
+ return 0;
+}
+
+/**
+ * e1000_set_eee_pchlan - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ *
+ * Enable/disable EEE based on setting in dev_spec structure. The bits in
+ * the LPI Control register will remain set only if/when link is up.
+ **/
+static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 phy_reg;
+
+ if (hw->phy.type != e1000_phy_82579)
+ goto out;
+
+ ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
+ if (ret_val)
+ goto out;
+
+ if (hw->dev_spec.ich8lan.eee_disable)
+ phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
+ else
+ phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
+
+ ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
+ * @hw: pointer to the HW structure
+ *
+ * Checks to see of the link status of the hardware has changed. If a
+ * change in link status has been detected, then we read the PHY registers
+ * to get the current speed/duplex if link exists.
+ **/
+static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ bool link;
+ u16 phy_reg;
+
+ /*
+ * We only want to go out to the PHY registers to see if Auto-Neg
+ * has completed and/or if our link status has changed. The
+ * get_link_status flag is set upon receiving a Link Status
+ * Change or Rx Sequence Error interrupt.
+ */
+ if (!mac->get_link_status) {
+ ret_val = 0;
+ goto out;
+ }
+
+ /*
+ * First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ goto out;
+
+ if (hw->mac.type == e1000_pchlan) {
+ ret_val = e1000_k1_gig_workaround_hv(hw, link);
+ if (ret_val)
+ goto out;
+ }
+
+ if (!link)
+ goto out; /* No link detected */
+
+ mac->get_link_status = false;
+
+ switch (hw->mac.type) {
+ case e1000_pch2lan:
+ ret_val = e1000_k1_workaround_lv(hw);
+ if (ret_val)
+ goto out;
+ /* fall-thru */
+ case e1000_pchlan:
+ if (hw->phy.type == e1000_phy_82578) {
+ ret_val = e1000_link_stall_workaround_hv(hw);
+ if (ret_val)
+ goto out;
+ }
+
+ /*
+ * Workaround for PCHx parts in half-duplex:
+ * Set the number of preambles removed from the packet
+ * when it is passed from the PHY to the MAC to prevent
+ * the MAC from misinterpreting the packet type.
+ */
+ e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
+ phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
+
+ if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
+ phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
+
+ e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
+ e1000e_check_downshift(hw);
+
+ /* Enable/Disable EEE after link up */
+ ret_val = e1000_set_eee_pchlan(hw);
+ if (ret_val)
+ goto out;
+
+ /*
+ * If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!mac->autoneg) {
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ /*
+ * Auto-Neg is enabled. Auto Speed Detection takes care
+ * of MAC speed/duplex configuration. So we only need to
+ * configure Collision Distance in the MAC.
+ */
+ e1000e_config_collision_dist(hw);
+
+ /*
+ * Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = e1000e_config_fc_after_link_up(hw);
+ if (ret_val)
+ e_dbg("Error configuring flow control\n");
+
+out:
+ return ret_val;
+}
+
+static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ s32 rc;
+
+ rc = e1000_init_mac_params_ich8lan(adapter);
+ if (rc)
+ return rc;
+
+ rc = e1000_init_nvm_params_ich8lan(hw);
+ if (rc)
+ return rc;
+
+ switch (hw->mac.type) {
+ case e1000_ich8lan:
+ case e1000_ich9lan:
+ case e1000_ich10lan:
+ rc = e1000_init_phy_params_ich8lan(hw);
+ break;
+ case e1000_pchlan:
+ case e1000_pch2lan:
+ rc = e1000_init_phy_params_pchlan(hw);
+ break;
+ default:
+ break;
+ }
+ if (rc)
+ return rc;
+
+ /*
+ * Disable Jumbo Frame support on parts with Intel 10/100 PHY or
+ * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
+ */
+ if ((adapter->hw.phy.type == e1000_phy_ife) ||
+ ((adapter->hw.mac.type >= e1000_pch2lan) &&
+ (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
+ adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
+ adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
+
+ hw->mac.ops.blink_led = NULL;
+ }
+
+ if ((adapter->hw.mac.type == e1000_ich8lan) &&
+ (adapter->hw.phy.type != e1000_phy_ife))
+ adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
+
+ /* Enable workaround for 82579 w/ ME enabled */
+ if ((adapter->hw.mac.type == e1000_pch2lan) &&
+ (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+ adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
+
+ /* Disable EEE by default until IEEE802.3az spec is finalized */
+ if (adapter->flags2 & FLAG2_HAS_EEE)
+ adapter->hw.dev_spec.ich8lan.eee_disable = true;
+
+ return 0;
+}
+
+static DEFINE_MUTEX(nvm_mutex);
+
+/**
+ * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
+ * @hw: pointer to the HW structure
+ *
+ * Acquires the mutex for performing NVM operations.
+ **/
+static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
+{
+ mutex_lock(&nvm_mutex);
+
+ return 0;
+}
+
+/**
+ * e1000_release_nvm_ich8lan - Release NVM mutex
+ * @hw: pointer to the HW structure
+ *
+ * Releases the mutex used while performing NVM operations.
+ **/
+static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
+{
+ mutex_unlock(&nvm_mutex);
+}
+
+static DEFINE_MUTEX(swflag_mutex);
+
+/**
+ * e1000_acquire_swflag_ich8lan - Acquire software control flag
+ * @hw: pointer to the HW structure
+ *
+ * Acquires the software control flag for performing PHY and select
+ * MAC CSR accesses.
+ **/
+static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
+ s32 ret_val = 0;
+
+ mutex_lock(&swflag_mutex);
+
+ while (timeout) {
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
+ break;
+
+ mdelay(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ e_dbg("SW/FW/HW has locked the resource for too long.\n");
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ timeout = SW_FLAG_TIMEOUT;
+
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+
+ while (timeout) {
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
+ break;
+
+ mdelay(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ e_dbg("Failed to acquire the semaphore.\n");
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+out:
+ if (ret_val)
+ mutex_unlock(&swflag_mutex);
+
+ return ret_val;
+}
+
+/**
+ * e1000_release_swflag_ich8lan - Release software control flag
+ * @hw: pointer to the HW structure
+ *
+ * Releases the software control flag for performing PHY and select
+ * MAC CSR accesses.
+ **/
+static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+ } else {
+ e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
+ }
+
+ mutex_unlock(&swflag_mutex);
+}
+
+/**
+ * e1000_check_mng_mode_ich8lan - Checks management mode
+ * @hw: pointer to the HW structure
+ *
+ * This checks if the adapter has any manageability enabled.
+ * This is a function pointer entry point only called by read/write
+ * routines for the PHY and NVM parts.
+ **/
+static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
+{
+ u32 fwsm;
+
+ fwsm = er32(FWSM);
+ return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+}
+
+/**
+ * e1000_check_mng_mode_pchlan - Checks management mode
+ * @hw: pointer to the HW structure
+ *
+ * This checks if the adapter has iAMT enabled.
+ * This is a function pointer entry point only called by read/write
+ * routines for the PHY and NVM parts.
+ **/
+static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
+{
+ u32 fwsm;
+
+ fwsm = er32(FWSM);
+ return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
+ (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+}
+
+/**
+ * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
+ * @hw: pointer to the HW structure
+ *
+ * Checks if firmware is blocking the reset of the PHY.
+ * This is a function pointer entry point only called by
+ * reset routines.
+ **/
+static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
+{
+ u32 fwsm;
+
+ fwsm = er32(FWSM);
+
+ return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET;
+}
+
+/**
+ * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
+ * @hw: pointer to the HW structure
+ *
+ * Assumes semaphore already acquired.
+ *
+ **/
+static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
+{
+ u16 phy_data;
+ u32 strap = er32(STRAP);
+ s32 ret_val = 0;
+
+ strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
+
+ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data &= ~HV_SMB_ADDR_MASK;
+ phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
+ phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
+ * @hw: pointer to the HW structure
+ *
+ * SW should configure the LCD from the NVM extended configuration region
+ * as a workaround for certain parts.
+ **/
+static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
+ s32 ret_val = 0;
+ u16 word_addr, reg_data, reg_addr, phy_page = 0;
+
+ /*
+ * Initialize the PHY from the NVM on ICH platforms. This
+ * is needed due to an issue where the NVM configuration is
+ * not properly autoloaded after power transitions.
+ * Therefore, after each PHY reset, we will load the
+ * configuration data out of the NVM manually.
+ */
+ switch (hw->mac.type) {
+ case e1000_ich8lan:
+ if (phy->type != e1000_phy_igp_3)
+ return ret_val;
+
+ if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+ break;
+ }
+ /* Fall-thru */
+ case e1000_pchlan:
+ case e1000_pch2lan:
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
+ break;
+ default:
+ return ret_val;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ data = er32(FEXTNVM);
+ if (!(data & sw_cfg_mask))
+ goto out;
+
+ /*
+ * Make sure HW does not configure LCD from PHY
+ * extended configuration before SW configuration
+ */
+ data = er32(EXTCNF_CTRL);
+ if (!(hw->mac.type == e1000_pch2lan)) {
+ if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
+ goto out;
+ }
+
+ cnf_size = er32(EXTCNF_SIZE);
+ cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
+ cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
+ if (!cnf_size)
+ goto out;
+
+ cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
+ cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
+
+ if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
+ (hw->mac.type == e1000_pchlan)) ||
+ (hw->mac.type == e1000_pch2lan)) {
+ /*
+ * HW configures the SMBus address and LEDs when the
+ * OEM and LCD Write Enable bits are set in the NVM.
+ * When both NVM bits are cleared, SW will configure
+ * them instead.
+ */
+ ret_val = e1000_write_smbus_addr(hw);
+ if (ret_val)
+ goto out;
+
+ data = er32(LEDCTL);
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
+ (u16)data);
+ if (ret_val)
+ goto out;
+ }
+
+ /* Configure LCD from extended configuration region. */
+
+ /* cnf_base_addr is in DWORD */
+ word_addr = (u16)(cnf_base_addr << 1);
+
+ for (i = 0; i < cnf_size; i++) {
+ ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
+ &reg_data);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
+ 1, &reg_addr);
+ if (ret_val)
+ goto out;
+
+ /* Save off the PHY page for future writes. */
+ if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
+ phy_page = reg_data;
+ continue;
+ }
+
+ reg_addr &= PHY_REG_MASK;
+ reg_addr |= phy_page;
+
+ ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
+ reg_data);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_k1_gig_workaround_hv - K1 Si workaround
+ * @hw: pointer to the HW structure
+ * @link: link up bool flag
+ *
+ * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
+ * from a lower speed. This workaround disables K1 whenever link is at 1Gig
+ * If link is down, the function will restore the default K1 setting located
+ * in the NVM.
+ **/
+static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
+{
+ s32 ret_val = 0;
+ u16 status_reg = 0;
+ bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
+
+ if (hw->mac.type != e1000_pchlan)
+ goto out;
+
+ /* Wrap the whole flow with the sw flag */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
+ if (link) {
+ if (hw->phy.type == e1000_phy_82578) {
+ ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
+ &status_reg);
+ if (ret_val)
+ goto release;
+
+ status_reg &= BM_CS_STATUS_LINK_UP |
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_MASK;
+
+ if (status_reg == (BM_CS_STATUS_LINK_UP |
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_1000))
+ k1_enable = false;
+ }
+
+ if (hw->phy.type == e1000_phy_82577) {
+ ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
+ &status_reg);
+ if (ret_val)
+ goto release;
+
+ status_reg &= HV_M_STATUS_LINK_UP |
+ HV_M_STATUS_AUTONEG_COMPLETE |
+ HV_M_STATUS_SPEED_MASK;
+
+ if (status_reg == (HV_M_STATUS_LINK_UP |
+ HV_M_STATUS_AUTONEG_COMPLETE |
+ HV_M_STATUS_SPEED_1000))
+ k1_enable = false;
+ }
+
+ /* Link stall fix for link up */
+ ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
+ 0x0100);
+ if (ret_val)
+ goto release;
+
+ } else {
+ /* Link stall fix for link down */
+ ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
+ 0x4100);
+ if (ret_val)
+ goto release;
+ }
+
+ ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
+
+release:
+ hw->phy.ops.release(hw);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_configure_k1_ich8lan - Configure K1 power state
+ * @hw: pointer to the HW structure
+ * @enable: K1 state to configure
+ *
+ * Configure the K1 power state based on the provided parameter.
+ * Assumes semaphore already acquired.
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ **/
+s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
+{
+ s32 ret_val = 0;
+ u32 ctrl_reg = 0;
+ u32 ctrl_ext = 0;
+ u32 reg = 0;
+ u16 kmrn_reg = 0;
+
+ ret_val = e1000e_read_kmrn_reg_locked(hw,
+ E1000_KMRNCTRLSTA_K1_CONFIG,
+ &kmrn_reg);
+ if (ret_val)
+ goto out;
+
+ if (k1_enable)
+ kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
+ else
+ kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
+
+ ret_val = e1000e_write_kmrn_reg_locked(hw,
+ E1000_KMRNCTRLSTA_K1_CONFIG,
+ kmrn_reg);
+ if (ret_val)
+ goto out;
+
+ udelay(20);
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_reg = er32(CTRL);
+
+ reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+ reg |= E1000_CTRL_FRCSPD;
+ ew32(CTRL, reg);
+
+ ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
+ e1e_flush();
+ udelay(20);
+ ew32(CTRL, ctrl_reg);
+ ew32(CTRL_EXT, ctrl_ext);
+ e1e_flush();
+ udelay(20);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
+ * @hw: pointer to the HW structure
+ * @d0_state: boolean if entering d0 or d3 device state
+ *
+ * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
+ * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
+ * in NVM determines whether HW should configure LPLU and Gbe Disable.
+ **/
+static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
+{
+ s32 ret_val = 0;
+ u32 mac_reg;
+ u16 oem_reg;
+
+ if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan))
+ return ret_val;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ if (!(hw->mac.type == e1000_pch2lan)) {
+ mac_reg = er32(EXTCNF_CTRL);
+ if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
+ goto out;
+ }
+
+ mac_reg = er32(FEXTNVM);
+ if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
+ goto out;
+
+ mac_reg = er32(PHY_CTRL);
+
+ ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
+ if (ret_val)
+ goto out;
+
+ oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
+
+ if (d0_state) {
+ if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
+ oem_reg |= HV_OEM_BITS_GBE_DIS;
+
+ if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
+ oem_reg |= HV_OEM_BITS_LPLU;
+
+ /* Set Restart auto-neg to activate the bits */
+ if (!e1000_check_reset_block(hw))
+ oem_reg |= HV_OEM_BITS_RESTART_AN;
+ } else {
+ if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
+ E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
+ oem_reg |= HV_OEM_BITS_GBE_DIS;
+
+ if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
+ E1000_PHY_CTRL_NOND0A_LPLU))
+ oem_reg |= HV_OEM_BITS_LPLU;
+ }
+
+ ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
+
+out:
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+
+/**
+ * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 data;
+
+ ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= HV_KMRN_MDIO_SLOW;
+
+ ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
+ * done after every PHY reset.
+ **/
+static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 phy_data;
+
+ if (hw->mac.type != e1000_pchlan)
+ return ret_val;
+
+ /* Set MDIO slow mode before any other MDIO access */
+ if (hw->phy.type == e1000_phy_82577) {
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ goto out;
+ }
+
+ if (((hw->phy.type == e1000_phy_82577) &&
+ ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
+ ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
+ /* Disable generation of early preamble */
+ ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431);
+ if (ret_val)
+ return ret_val;
+
+ /* Preamble tuning for SSC */
+ ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (hw->phy.type == e1000_phy_82578) {
+ /*
+ * Return registers to default by doing a soft reset then
+ * writing 0x3140 to the control register.
+ */
+ if (hw->phy.revision < 2) {
+ e1000e_phy_sw_reset(hw);
+ ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140);
+ }
+ }
+
+ /* Select page 0 */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ hw->phy.addr = 1;
+ ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
+ hw->phy.ops.release(hw);
+ if (ret_val)
+ goto out;
+
+ /*
+ * Configure the K1 Si workaround during phy reset assuming there is
+ * link so that it disables K1 if link is in 1Gbps.
+ */
+ ret_val = e1000_k1_gig_workaround_hv(hw, true);
+ if (ret_val)
+ goto out;
+
+ /* Workaround for link disconnects on a busy hub in half duplex */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
+ if (ret_val)
+ goto release;
+ ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
+ phy_data & 0x00FF);
+release:
+ hw->phy.ops.release(hw);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
+ * @hw: pointer to the HW structure
+ **/
+void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
+{
+ u32 mac_reg;
+ u16 i, phy_reg = 0;
+ s32 ret_val;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+ ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+ if (ret_val)
+ goto release;
+
+ /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
+ for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+ mac_reg = er32(RAL(i));
+ hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
+ (u16)(mac_reg & 0xFFFF));
+ hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
+ (u16)((mac_reg >> 16) & 0xFFFF));
+
+ mac_reg = er32(RAH(i));
+ hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
+ (u16)(mac_reg & 0xFFFF));
+ hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
+ (u16)((mac_reg & E1000_RAH_AV)
+ >> 16));
+ }
+
+ e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+
+release:
+ hw->phy.ops.release(hw);
+}
+
+/**
+ * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
+ * with 82579 PHY
+ * @hw: pointer to the HW structure
+ * @enable: flag to enable/disable workaround when enabling/disabling jumbos
+ **/
+s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
+{
+ s32 ret_val = 0;
+ u16 phy_reg, data;
+ u32 mac_reg;
+ u16 i;
+
+ if (hw->mac.type != e1000_pch2lan)
+ goto out;
+
+ /* disable Rx path while enabling/disabling workaround */
+ e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
+ if (ret_val)
+ goto out;
+
+ if (enable) {
+ /*
+ * Write Rx addresses (rar_entry_count for RAL/H, +4 for
+ * SHRAL/H) and initial CRC values to the MAC
+ */
+ for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+ u8 mac_addr[ETH_ALEN] = {0};
+ u32 addr_high, addr_low;
+
+ addr_high = er32(RAH(i));
+ if (!(addr_high & E1000_RAH_AV))
+ continue;
+ addr_low = er32(RAL(i));
+ mac_addr[0] = (addr_low & 0xFF);
+ mac_addr[1] = ((addr_low >> 8) & 0xFF);
+ mac_addr[2] = ((addr_low >> 16) & 0xFF);
+ mac_addr[3] = ((addr_low >> 24) & 0xFF);
+ mac_addr[4] = (addr_high & 0xFF);
+ mac_addr[5] = ((addr_high >> 8) & 0xFF);
+
+ ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
+ }
+
+ /* Write Rx addresses to the PHY */
+ e1000_copy_rx_addrs_to_phy_ich8lan(hw);
+
+ /* Enable jumbo frame workaround in the MAC */
+ mac_reg = er32(FFLT_DBG);
+ mac_reg &= ~(1 << 14);
+ mac_reg |= (7 << 15);
+ ew32(FFLT_DBG, mac_reg);
+
+ mac_reg = er32(RCTL);
+ mac_reg |= E1000_RCTL_SECRC;
+ ew32(RCTL, mac_reg);
+
+ ret_val = e1000e_read_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ &data);
+ if (ret_val)
+ goto out;
+ ret_val = e1000e_write_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ data | (1 << 0));
+ if (ret_val)
+ goto out;
+ ret_val = e1000e_read_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ &data);
+ if (ret_val)
+ goto out;
+ data &= ~(0xF << 8);
+ data |= (0xB << 8);
+ ret_val = e1000e_write_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ data);
+ if (ret_val)
+ goto out;
+
+ /* Enable jumbo frame workaround in the PHY */
+ e1e_rphy(hw, PHY_REG(769, 23), &data);
+ data &= ~(0x7F << 5);
+ data |= (0x37 << 5);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, PHY_REG(769, 16), &data);
+ data &= ~(1 << 13);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, PHY_REG(776, 20), &data);
+ data &= ~(0x3FF << 2);
+ data |= (0x1A << 2);
+ ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
+ if (ret_val)
+ goto out;
+ ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100);
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, HV_PM_CTRL, &data);
+ ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
+ if (ret_val)
+ goto out;
+ } else {
+ /* Write MAC register values back to h/w defaults */
+ mac_reg = er32(FFLT_DBG);
+ mac_reg &= ~(0xF << 14);
+ ew32(FFLT_DBG, mac_reg);
+
+ mac_reg = er32(RCTL);
+ mac_reg &= ~E1000_RCTL_SECRC;
+ ew32(RCTL, mac_reg);
+
+ ret_val = e1000e_read_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ &data);
+ if (ret_val)
+ goto out;
+ ret_val = e1000e_write_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ data & ~(1 << 0));
+ if (ret_val)
+ goto out;
+ ret_val = e1000e_read_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ &data);
+ if (ret_val)
+ goto out;
+ data &= ~(0xF << 8);
+ data |= (0xB << 8);
+ ret_val = e1000e_write_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ data);
+ if (ret_val)
+ goto out;
+
+ /* Write PHY register values back to h/w defaults */
+ e1e_rphy(hw, PHY_REG(769, 23), &data);
+ data &= ~(0x7F << 5);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, PHY_REG(769, 16), &data);
+ data |= (1 << 13);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, PHY_REG(776, 20), &data);
+ data &= ~(0x3FF << 2);
+ data |= (0x8 << 2);
+ ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
+ if (ret_val)
+ goto out;
+ ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, HV_PM_CTRL, &data);
+ ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
+ if (ret_val)
+ goto out;
+ }
+
+ /* re-enable Rx path after enabling/disabling workaround */
+ ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
+ * done after every PHY reset.
+ **/
+static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ if (hw->mac.type != e1000_pch2lan)
+ goto out;
+
+ /* Set MDIO slow mode before any other MDIO access */
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_k1_gig_workaround_lv - K1 Si workaround
+ * @hw: pointer to the HW structure
+ *
+ * Workaround to set the K1 beacon duration for 82579 parts
+ **/
+static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 status_reg = 0;
+ u32 mac_reg;
+ u16 phy_reg;
+
+ if (hw->mac.type != e1000_pch2lan)
+ goto out;
+
+ /* Set K1 beacon duration based on 1Gbps speed or otherwise */
+ ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
+ if (ret_val)
+ goto out;
+
+ if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
+ == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
+ mac_reg = er32(FEXTNVM4);
+ mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
+
+ ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
+ if (ret_val)
+ goto out;
+
+ if (status_reg & HV_M_STATUS_SPEED_1000) {
+ mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
+ phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
+ } else {
+ mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
+ phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
+ }
+ ew32(FEXTNVM4, mac_reg);
+ ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
+ * @hw: pointer to the HW structure
+ * @gate: boolean set to true to gate, false to ungate
+ *
+ * Gate/ungate the automatic PHY configuration via hardware; perform
+ * the configuration via software instead.
+ **/
+static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
+{
+ u32 extcnf_ctrl;
+
+ if (hw->mac.type != e1000_pch2lan)
+ return;
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+
+ if (gate)
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+ else
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+ return;
+}
+
+/**
+ * e1000_lan_init_done_ich8lan - Check for PHY config completion
+ * @hw: pointer to the HW structure
+ *
+ * Check the appropriate indication the MAC has finished configuring the
+ * PHY after a software reset.
+ **/
+static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
+{
+ u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
+
+ /* Wait for basic configuration completes before proceeding */
+ do {
+ data = er32(STATUS);
+ data &= E1000_STATUS_LAN_INIT_DONE;
+ udelay(100);
+ } while ((!data) && --loop);
+
+ /*
+ * If basic configuration is incomplete before the above loop
+ * count reaches 0, loading the configuration from NVM will
+ * leave the PHY in a bad state possibly resulting in no link.
+ */
+ if (loop == 0)
+ e_dbg("LAN_INIT_DONE not set, increase timeout\n");
+
+ /* Clear the Init Done bit for the next init event */
+ data = er32(STATUS);
+ data &= ~E1000_STATUS_LAN_INIT_DONE;
+ ew32(STATUS, data);
+}
+
+/**
+ * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 reg;
+
+ if (e1000_check_reset_block(hw))
+ goto out;
+
+ /* Allow time for h/w to get to quiescent state after reset */
+ usleep_range(10000, 20000);
+
+ /* Perform any necessary post-reset workarounds */
+ switch (hw->mac.type) {
+ case e1000_pchlan:
+ ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
+ if (ret_val)
+ goto out;
+ break;
+ case e1000_pch2lan:
+ ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
+ if (ret_val)
+ goto out;
+ break;
+ default:
+ break;
+ }
+
+ /* Clear the host wakeup bit after lcd reset */
+ if (hw->mac.type >= e1000_pchlan) {
+ e1e_rphy(hw, BM_PORT_GEN_CFG, &reg);
+ reg &= ~BM_WUC_HOST_WU_BIT;
+ e1e_wphy(hw, BM_PORT_GEN_CFG, reg);
+ }
+
+ /* Configure the LCD with the extended configuration region in NVM */
+ ret_val = e1000_sw_lcd_config_ich8lan(hw);
+ if (ret_val)
+ goto out;
+
+ /* Configure the LCD with the OEM bits in NVM */
+ ret_val = e1000_oem_bits_config_ich8lan(hw, true);
+
+ if (hw->mac.type == e1000_pch2lan) {
+ /* Ungate automatic PHY configuration on non-managed 82579 */
+ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ usleep_range(10000, 20000);
+ e1000_gate_hw_phy_config_ich8lan(hw, false);
+ }
+
+ /* Set EEE LPI Update Timer to 200usec */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
+ I82579_LPI_UPDATE_TIMER);
+ if (ret_val)
+ goto release;
+ ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
+ 0x1387);
+release:
+ hw->phy.ops.release(hw);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Resets the PHY
+ * This is a function pointer entry point called by drivers
+ * or other shared routines.
+ **/
+static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ /* Gate automatic PHY configuration by hardware on non-managed 82579 */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
+
+ ret_val = e1000e_phy_hw_reset_generic(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_post_phy_reset_ich8lan(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU state according to the active flag. For PCH, if OEM write
+ * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
+ * the phy speed. This function will manually set the LPLU bit and restart
+ * auto-neg as hw would do. D3 and D0 LPLU will call the same function
+ * since it configures the same bit.
+ **/
+static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
+{
+ s32 ret_val = 0;
+ u16 oem_reg;
+
+ ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
+ if (ret_val)
+ goto out;
+
+ if (active)
+ oem_reg |= HV_OEM_BITS_LPLU;
+ else
+ oem_reg &= ~HV_OEM_BITS_LPLU;
+
+ oem_reg |= HV_OEM_BITS_RESTART_AN;
+ ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag. When
+ * activating LPLU this function also disables smart speed
+ * and vice versa. LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 phy_ctrl;
+ s32 ret_val = 0;
+ u16 data;
+
+ if (phy->type == e1000_phy_ife)
+ return ret_val;
+
+ phy_ctrl = er32(PHY_CTRL);
+
+ if (active) {
+ phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
+ ew32(PHY_CTRL, phy_ctrl);
+
+ if (phy->type != e1000_phy_igp_3)
+ return 0;
+
+ /*
+ * Call gig speed drop workaround on LPLU before accessing
+ * any PHY registers
+ */
+ if (hw->mac.type == e1000_ich8lan)
+ e1000e_gig_downshift_workaround_ich8lan(hw);
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
+ if (ret_val)
+ return ret_val;
+ } else {
+ phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
+ ew32(PHY_CTRL, phy_ctrl);
+
+ if (phy->type != e1000_phy_igp_3)
+ return 0;
+
+ /*
+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D3 state according to the active flag. When
+ * activating LPLU this function also disables smart speed
+ * and vice versa. LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 phy_ctrl;
+ s32 ret_val;
+ u16 data;
+
+ phy_ctrl = er32(PHY_CTRL);
+
+ if (!active) {
+ phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+ ew32(PHY_CTRL, phy_ctrl);
+
+ if (phy->type != e1000_phy_igp_3)
+ return 0;
+
+ /*
+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ }
+ } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+ phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
+ ew32(PHY_CTRL, phy_ctrl);
+
+ if (phy->type != e1000_phy_igp_3)
+ return 0;
+
+ /*
+ * Call gig speed drop workaround on LPLU before accessing
+ * any PHY registers
+ */
+ if (hw->mac.type == e1000_ich8lan)
+ e1000e_gig_downshift_workaround_ich8lan(hw);
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
+ * @hw: pointer to the HW structure
+ * @bank: pointer to the variable that returns the active bank
+ *
+ * Reads signature byte from the NVM using the flash access registers.
+ * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
+ **/
+static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
+{
+ u32 eecd;
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
+ u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
+ u8 sig_byte = 0;
+ s32 ret_val = 0;
+
+ switch (hw->mac.type) {
+ case e1000_ich8lan:
+ case e1000_ich9lan:
+ eecd = er32(EECD);
+ if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
+ E1000_EECD_SEC1VAL_VALID_MASK) {
+ if (eecd & E1000_EECD_SEC1VAL)
+ *bank = 1;
+ else
+ *bank = 0;
+
+ return 0;
+ }
+ e_dbg("Unable to determine valid NVM bank via EEC - "
+ "reading flash signature\n");
+ /* fall-thru */
+ default:
+ /* set bank to 0 in case flash read fails */
+ *bank = 0;
+
+ /* Check bank 0 */
+ ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
+ &sig_byte);
+ if (ret_val)
+ return ret_val;
+ if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+ E1000_ICH_NVM_SIG_VALUE) {
+ *bank = 0;
+ return 0;
+ }
+
+ /* Check bank 1 */
+ ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
+ bank1_offset,
+ &sig_byte);
+ if (ret_val)
+ return ret_val;
+ if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+ E1000_ICH_NVM_SIG_VALUE) {
+ *bank = 1;
+ return 0;
+ }
+
+ e_dbg("ERROR: No valid NVM bank present\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_read_nvm_ich8lan - Read word(s) from the NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the word(s) to read.
+ * @words: Size of data to read in words
+ * @data: Pointer to the word(s) to read at offset.
+ *
+ * Reads a word(s) from the NVM using the flash access registers.
+ **/
+static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 act_offset;
+ s32 ret_val = 0;
+ u32 bank = 0;
+ u16 i, word;
+
+ if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+ (words == 0)) {
+ e_dbg("nvm parameter(s) out of bounds\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+ nvm->ops.acquire(hw);
+
+ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+ if (ret_val) {
+ e_dbg("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
+
+ act_offset = (bank) ? nvm->flash_bank_size : 0;
+ act_offset += offset;
+
+ ret_val = 0;
+ for (i = 0; i < words; i++) {
+ if (dev_spec->shadow_ram[offset+i].modified) {
+ data[i] = dev_spec->shadow_ram[offset+i].value;
+ } else {
+ ret_val = e1000_read_flash_word_ich8lan(hw,
+ act_offset + i,
+ &word);
+ if (ret_val)
+ break;
+ data[i] = word;
+ }
+ }
+
+ nvm->ops.release(hw);
+
+out:
+ if (ret_val)
+ e_dbg("NVM read error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
+ * e1000_flash_cycle_init_ich8lan - Initialize flash
+ * @hw: pointer to the HW structure
+ *
+ * This function does initial flash setup so that a new read/write/erase cycle
+ * can be started.
+ **/
+static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
+{
+ union ich8_hws_flash_status hsfsts;
+ s32 ret_val = -E1000_ERR_NVM;
+
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+
+ /* Check if the flash descriptor is valid */
+ if (hsfsts.hsf_status.fldesvalid == 0) {
+ e_dbg("Flash descriptor invalid. "
+ "SW Sequencing must be used.\n");
+ return -E1000_ERR_NVM;
+ }
+
+ /* Clear FCERR and DAEL in hw status by writing 1 */
+ hsfsts.hsf_status.flcerr = 1;
+ hsfsts.hsf_status.dael = 1;
+
+ ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+
+ /*
+ * Either we should have a hardware SPI cycle in progress
+ * bit to check against, in order to start a new cycle or
+ * FDONE bit should be changed in the hardware so that it
+ * is 1 after hardware reset, which can then be used as an
+ * indication whether a cycle is in progress or has been
+ * completed.
+ */
+
+ if (hsfsts.hsf_status.flcinprog == 0) {
+ /*
+ * There is no cycle running at present,
+ * so we can start a cycle.
+ * Begin by setting Flash Cycle Done.
+ */
+ hsfsts.hsf_status.flcdone = 1;
+ ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+ ret_val = 0;
+ } else {
+ s32 i = 0;
+
+ /*
+ * Otherwise poll for sometime so the current
+ * cycle has a chance to end before giving up.
+ */
+ for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
+ hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcinprog == 0) {
+ ret_val = 0;
+ break;
+ }
+ udelay(1);
+ }
+ if (ret_val == 0) {
+ /*
+ * Successful in waiting for previous cycle to timeout,
+ * now set the Flash Cycle Done.
+ */
+ hsfsts.hsf_status.flcdone = 1;
+ ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+ } else {
+ e_dbg("Flash controller busy, cannot get access\n");
+ }
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
+ * @hw: pointer to the HW structure
+ * @timeout: maximum time to wait for completion
+ *
+ * This function starts a flash cycle and waits for its completion.
+ **/
+static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
+{
+ union ich8_hws_flash_ctrl hsflctl;
+ union ich8_hws_flash_status hsfsts;
+ s32 ret_val = -E1000_ERR_NVM;
+ u32 i = 0;
+
+ /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
+ hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+ hsflctl.hsf_ctrl.flcgo = 1;
+ ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ /* wait till FDONE bit is set to 1 */
+ do {
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcdone == 1)
+ break;
+ udelay(1);
+ } while (i++ < timeout);
+
+ if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
+ return 0;
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_flash_word_ich8lan - Read word from flash
+ * @hw: pointer to the HW structure
+ * @offset: offset to data location
+ * @data: pointer to the location for storing the data
+ *
+ * Reads the flash word at offset into data. Offset is converted
+ * to bytes before read.
+ **/
+static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
+ u16 *data)
+{
+ /* Must convert offset into bytes. */
+ offset <<= 1;
+
+ return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
+}
+
+/**
+ * e1000_read_flash_byte_ich8lan - Read byte from flash
+ * @hw: pointer to the HW structure
+ * @offset: The offset of the byte to read.
+ * @data: Pointer to a byte to store the value read.
+ *
+ * Reads a single byte from the NVM using the flash access registers.
+ **/
+static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 *data)
+{
+ s32 ret_val;
+ u16 word = 0;
+
+ ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
+ if (ret_val)
+ return ret_val;
+
+ *data = (u8)word;
+
+ return 0;
+}
+
+/**
+ * e1000_read_flash_data_ich8lan - Read byte or word from NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the byte or word to read.
+ * @size: Size of data to read, 1=byte 2=word
+ * @data: Pointer to the word to store the value read.
+ *
+ * Reads a byte or word from the NVM using the flash access registers.
+ **/
+static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 size, u16 *data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ u32 flash_data = 0;
+ s32 ret_val = -E1000_ERR_NVM;
+ u8 count = 0;
+
+ if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+
+ flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr;
+
+ do {
+ udelay(1);
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val != 0)
+ break;
+
+ hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = size - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+ ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
+
+ ret_val = e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_READ_COMMAND_TIMEOUT);
+
+ /*
+ * Check if FCERR is set to 1, if set to 1, clear it
+ * and try the whole sequence a few more times, else
+ * read in (shift in) the Flash Data0, the order is
+ * least significant byte first msb to lsb
+ */
+ if (ret_val == 0) {
+ flash_data = er32flash(ICH_FLASH_FDATA0);
+ if (size == 1)
+ *data = (u8)(flash_data & 0x000000FF);
+ else if (size == 2)
+ *data = (u16)(flash_data & 0x0000FFFF);
+ break;
+ } else {
+ /*
+ * If we've gotten here, then things are probably
+ * completely hosed, but if the error condition is
+ * detected, it won't hurt to give it another try...
+ * ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr == 1) {
+ /* Repeat for some time before giving up. */
+ continue;
+ } else if (hsfsts.hsf_status.flcdone == 0) {
+ e_dbg("Timeout error - flash cycle "
+ "did not complete.\n");
+ break;
+ }
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_nvm_ich8lan - Write word(s) to the NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the word(s) to write.
+ * @words: Size of data to write in words
+ * @data: Pointer to the word(s) to write at offset.
+ *
+ * Writes a byte or word to the NVM using the flash access registers.
+ **/
+static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u16 i;
+
+ if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+ (words == 0)) {
+ e_dbg("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ nvm->ops.acquire(hw);
+
+ for (i = 0; i < words; i++) {
+ dev_spec->shadow_ram[offset+i].modified = true;
+ dev_spec->shadow_ram[offset+i].value = data[i];
+ }
+
+ nvm->ops.release(hw);
+
+ return 0;
+}
+
+/**
+ * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
+ * @hw: pointer to the HW structure
+ *
+ * The NVM checksum is updated by calling the generic update_nvm_checksum,
+ * which writes the checksum to the shadow ram. The changes in the shadow
+ * ram are then committed to the EEPROM by processing each bank at a time
+ * checking for the modified bit and writing only the pending changes.
+ * After a successful commit, the shadow ram is cleared and is ready for
+ * future writes.
+ **/
+static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
+ s32 ret_val;
+ u16 data;
+
+ ret_val = e1000e_update_nvm_checksum_generic(hw);
+ if (ret_val)
+ goto out;
+
+ if (nvm->type != e1000_nvm_flash_sw)
+ goto out;
+
+ nvm->ops.acquire(hw);
+
+ /*
+ * We're writing to the opposite bank so if we're on bank 1,
+ * write to bank 0 etc. We also need to erase the segment that
+ * is going to be written
+ */
+ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+ if (ret_val) {
+ e_dbg("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
+
+ if (bank == 0) {
+ new_bank_offset = nvm->flash_bank_size;
+ old_bank_offset = 0;
+ ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
+ if (ret_val)
+ goto release;
+ } else {
+ old_bank_offset = nvm->flash_bank_size;
+ new_bank_offset = 0;
+ ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
+ if (ret_val)
+ goto release;
+ }
+
+ for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
+ /*
+ * Determine whether to write the value stored
+ * in the other NVM bank or a modified value stored
+ * in the shadow RAM
+ */
+ if (dev_spec->shadow_ram[i].modified) {
+ data = dev_spec->shadow_ram[i].value;
+ } else {
+ ret_val = e1000_read_flash_word_ich8lan(hw, i +
+ old_bank_offset,
+ &data);
+ if (ret_val)
+ break;
+ }
+
+ /*
+ * If the word is 0x13, then make sure the signature bits
+ * (15:14) are 11b until the commit has completed.
+ * This will allow us to write 10b which indicates the
+ * signature is valid. We want to do this after the write
+ * has completed so that we don't mark the segment valid
+ * while the write is still in progress
+ */
+ if (i == E1000_ICH_NVM_SIG_WORD)
+ data |= E1000_ICH_NVM_SIG_MASK;
+
+ /* Convert offset to bytes. */
+ act_offset = (i + new_bank_offset) << 1;
+
+ udelay(100);
+ /* Write the bytes to the new bank. */
+ ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+ act_offset,
+ (u8)data);
+ if (ret_val)
+ break;
+
+ udelay(100);
+ ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+ act_offset + 1,
+ (u8)(data >> 8));
+ if (ret_val)
+ break;
+ }
+
+ /*
+ * Don't bother writing the segment valid bits if sector
+ * programming failed.
+ */
+ if (ret_val) {
+ /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
+ e_dbg("Flash commit failed.\n");
+ goto release;
+ }
+
+ /*
+ * Finally validate the new segment by setting bit 15:14
+ * to 10b in word 0x13 , this can be done without an
+ * erase as well since these bits are 11 to start with
+ * and we need to change bit 14 to 0b
+ */
+ act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
+ ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
+ if (ret_val)
+ goto release;
+
+ data &= 0xBFFF;
+ ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+ act_offset * 2 + 1,
+ (u8)(data >> 8));
+ if (ret_val)
+ goto release;
+
+ /*
+ * And invalidate the previously valid segment by setting
+ * its signature word (0x13) high_byte to 0b. This can be
+ * done without an erase because flash erase sets all bits
+ * to 1's. We can write 1's to 0's without an erase
+ */
+ act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
+ ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
+ if (ret_val)
+ goto release;
+
+ /* Great! Everything worked, we can now clear the cached entries. */
+ for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
+ dev_spec->shadow_ram[i].modified = false;
+ dev_spec->shadow_ram[i].value = 0xFFFF;
+ }
+
+release:
+ nvm->ops.release(hw);
+
+ /*
+ * Reload the EEPROM, or else modifications will not appear
+ * until after the next adapter reset.
+ */
+ if (!ret_val) {
+ e1000e_reload_nvm(hw);
+ usleep_range(10000, 20000);
+ }
+
+out:
+ if (ret_val)
+ e_dbg("NVM update error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
+ * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
+ * If the bit is 0, that the EEPROM had been modified, but the checksum was not
+ * calculated, in which case we need to calculate the checksum and set bit 6.
+ **/
+static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 data;
+
+ /*
+ * Read 0x19 and check bit 6. If this bit is 0, the checksum
+ * needs to be fixed. This bit is an indication that the NVM
+ * was prepared by OEM software and did not calculate the
+ * checksum...a likely scenario.
+ */
+ ret_val = e1000_read_nvm(hw, 0x19, 1, &data);
+ if (ret_val)
+ return ret_val;
+
+ if ((data & 0x40) == 0) {
+ data |= 0x40;
+ ret_val = e1000_write_nvm(hw, 0x19, 1, &data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000e_update_nvm_checksum(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return e1000e_validate_nvm_checksum_generic(hw);
+}
+
+/**
+ * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only
+ * @hw: pointer to the HW structure
+ *
+ * To prevent malicious write/erase of the NVM, set it to be read-only
+ * so that the hardware ignores all write/erase cycles of the NVM via
+ * the flash control registers. The shadow-ram copy of the NVM will
+ * still be updated, however any updates to this copy will not stick
+ * across driver reloads.
+ **/
+void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ union ich8_flash_protected_range pr0;
+ union ich8_hws_flash_status hsfsts;
+ u32 gfpreg;
+
+ nvm->ops.acquire(hw);
+
+ gfpreg = er32flash(ICH_FLASH_GFPREG);
+
+ /* Write-protect GbE Sector of NVM */
+ pr0.regval = er32flash(ICH_FLASH_PR0);
+ pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
+ pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
+ pr0.range.wpe = true;
+ ew32flash(ICH_FLASH_PR0, pr0.regval);
+
+ /*
+ * Lock down a subset of GbE Flash Control Registers, e.g.
+ * PR0 to prevent the write-protection from being lifted.
+ * Once FLOCKDN is set, the registers protected by it cannot
+ * be written until FLOCKDN is cleared by a hardware reset.
+ */
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+ hsfsts.hsf_status.flockdn = true;
+ ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+
+ nvm->ops.release(hw);
+}
+
+/**
+ * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the byte/word to read.
+ * @size: Size of data to read, 1=byte 2=word
+ * @data: The byte(s) to write to the NVM.
+ *
+ * Writes one/two bytes to the NVM using the flash access registers.
+ **/
+static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 size, u16 data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ u32 flash_data = 0;
+ s32 ret_val;
+ u8 count = 0;
+
+ if (size < 1 || size > 2 || data > size * 0xff ||
+ offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+
+ flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr;
+
+ do {
+ udelay(1);
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val)
+ break;
+
+ hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = size -1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+ ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
+
+ if (size == 1)
+ flash_data = (u32)data & 0x00FF;
+ else
+ flash_data = (u32)data;
+
+ ew32flash(ICH_FLASH_FDATA0, flash_data);
+
+ /*
+ * check if FCERR is set to 1 , if set to 1, clear it
+ * and try the whole sequence a few more times else done
+ */
+ ret_val = e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_WRITE_COMMAND_TIMEOUT);
+ if (!ret_val)
+ break;
+
+ /*
+ * If we're here, then things are most likely
+ * completely hosed, but if the error condition
+ * is detected, it won't hurt to give it another
+ * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr == 1)
+ /* Repeat for some time before giving up. */
+ continue;
+ if (hsfsts.hsf_status.flcdone == 0) {
+ e_dbg("Timeout error - flash cycle "
+ "did not complete.");
+ break;
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
+ * @hw: pointer to the HW structure
+ * @offset: The index of the byte to read.
+ * @data: The byte to write to the NVM.
+ *
+ * Writes a single byte to the NVM using the flash access registers.
+ **/
+static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
+ u8 data)
+{
+ u16 word = (u16)data;
+
+ return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
+}
+
+/**
+ * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset of the byte to write.
+ * @byte: The byte to write to the NVM.
+ *
+ * Writes a single byte to the NVM using the flash access registers.
+ * Goes through a retry algorithm before giving up.
+ **/
+static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
+ u32 offset, u8 byte)
+{
+ s32 ret_val;
+ u16 program_retries;
+
+ ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
+ if (!ret_val)
+ return ret_val;
+
+ for (program_retries = 0; program_retries < 100; program_retries++) {
+ e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
+ udelay(100);
+ ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
+ if (!ret_val)
+ break;
+ }
+ if (program_retries == 100)
+ return -E1000_ERR_NVM;
+
+ return 0;
+}
+
+/**
+ * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
+ * @hw: pointer to the HW structure
+ * @bank: 0 for first bank, 1 for second bank, etc.
+ *
+ * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
+ * bank N is 4096 * N + flash_reg_addr.
+ **/
+static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ /* bank size is in 16bit words - adjust to bytes */
+ u32 flash_bank_size = nvm->flash_bank_size * 2;
+ s32 ret_val;
+ s32 count = 0;
+ s32 j, iteration, sector_size;
+
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+
+ /*
+ * Determine HW Sector size: Read BERASE bits of hw flash status
+ * register
+ * 00: The Hw sector is 256 bytes, hence we need to erase 16
+ * consecutive sectors. The start index for the nth Hw sector
+ * can be calculated as = bank * 4096 + n * 256
+ * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
+ * The start index for the nth Hw sector can be calculated
+ * as = bank * 4096
+ * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
+ * (ich9 only, otherwise error condition)
+ * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
+ */
+ switch (hsfsts.hsf_status.berasesz) {
+ case 0:
+ /* Hw sector size 256 */
+ sector_size = ICH_FLASH_SEG_SIZE_256;
+ iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
+ break;
+ case 1:
+ sector_size = ICH_FLASH_SEG_SIZE_4K;
+ iteration = 1;
+ break;
+ case 2:
+ sector_size = ICH_FLASH_SEG_SIZE_8K;
+ iteration = 1;
+ break;
+ case 3:
+ sector_size = ICH_FLASH_SEG_SIZE_64K;
+ iteration = 1;
+ break;
+ default:
+ return -E1000_ERR_NVM;
+ }
+
+ /* Start with the base address, then add the sector offset. */
+ flash_linear_addr = hw->nvm.flash_base_addr;
+ flash_linear_addr += (bank) ? flash_bank_size : 0;
+
+ for (j = 0; j < iteration ; j++) {
+ do {
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * Write a value 11 (block Erase) in Flash
+ * Cycle field in hw flash control
+ */
+ hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
+ ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ /*
+ * Write the last 24 bits of an index within the
+ * block into Flash Linear address field in Flash
+ * Address.
+ */
+ flash_linear_addr += (j * sector_size);
+ ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
+
+ ret_val = e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_ERASE_COMMAND_TIMEOUT);
+ if (ret_val == 0)
+ break;
+
+ /*
+ * Check if FCERR is set to 1. If 1,
+ * clear it and try the whole sequence
+ * a few more times else Done
+ */
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr == 1)
+ /* repeat for some time before giving up */
+ continue;
+ else if (hsfsts.hsf_status.flcdone == 0)
+ return ret_val;
+ } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_valid_led_default_ich8lan - Set the default LED settings
+ * @hw: pointer to the HW structure
+ * @data: Pointer to the LED settings
+ *
+ * Reads the LED default settings from the NVM to data. If the NVM LED
+ * settings is all 0's or F's, set the LED default to a valid LED default
+ * setting.
+ **/
+static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (*data == ID_LED_RESERVED_0000 ||
+ *data == ID_LED_RESERVED_FFFF)
+ *data = ID_LED_DEFAULT_ICH8LAN;
+
+ return 0;
+}
+
+/**
+ * e1000_id_led_init_pchlan - store LED configurations
+ * @hw: pointer to the HW structure
+ *
+ * PCH does not control LEDs via the LEDCTL register, rather it uses
+ * the PHY LED configuration register.
+ *
+ * PCH also does not have an "always on" or "always off" mode which
+ * complicates the ID feature. Instead of using the "on" mode to indicate
+ * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init()),
+ * use "link_up" mode. The LEDs will still ID on request if there is no
+ * link based on logic in e1000_led_[on|off]_pchlan().
+ **/
+static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
+ const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
+ u16 data, i, temp, shift;
+
+ /* Get default ID LED modes */
+ ret_val = hw->nvm.ops.valid_led_default(hw, &data);
+ if (ret_val)
+ goto out;
+
+ mac->ledctl_default = er32(LEDCTL);
+ mac->ledctl_mode1 = mac->ledctl_default;
+ mac->ledctl_mode2 = mac->ledctl_default;
+
+ for (i = 0; i < 4; i++) {
+ temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
+ shift = (i * 5);
+ switch (temp) {
+ case ID_LED_ON1_DEF2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_ON1_OFF2:
+ mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode1 |= (ledctl_on << shift);
+ break;
+ case ID_LED_OFF1_DEF2:
+ case ID_LED_OFF1_ON2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode1 |= (ledctl_off << shift);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ switch (temp) {
+ case ID_LED_DEF1_ON2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_OFF1_ON2:
+ mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode2 |= (ledctl_on << shift);
+ break;
+ case ID_LED_DEF1_OFF2:
+ case ID_LED_ON1_OFF2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
+ mac->ledctl_mode2 |= (ledctl_off << shift);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
+ * @hw: pointer to the HW structure
+ *
+ * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
+ * register, so the the bus width is hard coded.
+ **/
+static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+ s32 ret_val;
+
+ ret_val = e1000e_get_bus_info_pcie(hw);
+
+ /*
+ * ICH devices are "PCI Express"-ish. They have
+ * a configuration space, but do not contain
+ * PCI Express Capability registers, so bus width
+ * must be hardcoded.
+ */
+ if (bus->width == e1000_bus_width_unknown)
+ bus->width = e1000_bus_width_pcie_x1;
+
+ return ret_val;
+}
+
+/**
+ * e1000_reset_hw_ich8lan - Reset the hardware
+ * @hw: pointer to the HW structure
+ *
+ * Does a full reset of the hardware which includes a reset of the PHY and
+ * MAC.
+ **/
+static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u16 reg;
+ u32 ctrl, kab;
+ s32 ret_val;
+
+ /*
+ * Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = e1000e_disable_pcie_master(hw);
+ if (ret_val)
+ e_dbg("PCI-E Master disable polling has failed.\n");
+
+ e_dbg("Masking off all interrupts\n");
+ ew32(IMC, 0xffffffff);
+
+ /*
+ * Disable the Transmit and Receive units. Then delay to allow
+ * any pending transactions to complete before we hit the MAC
+ * with the global reset.
+ */
+ ew32(RCTL, 0);
+ ew32(TCTL, E1000_TCTL_PSP);
+ e1e_flush();
+
+ usleep_range(10000, 20000);
+
+ /* Workaround for ICH8 bit corruption issue in FIFO memory */
+ if (hw->mac.type == e1000_ich8lan) {
+ /* Set Tx and Rx buffer allocation to 8k apiece. */
+ ew32(PBA, E1000_PBA_8K);
+ /* Set Packet Buffer Size to 16k. */
+ ew32(PBS, E1000_PBS_16K);
+ }
+
+ if (hw->mac.type == e1000_pchlan) {
+ /* Save the NVM K1 bit setting*/
+ ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &reg);
+ if (ret_val)
+ return ret_val;
+
+ if (reg & E1000_NVM_K1_ENABLE)
+ dev_spec->nvm_k1_enabled = true;
+ else
+ dev_spec->nvm_k1_enabled = false;
+ }
+
+ ctrl = er32(CTRL);
+
+ if (!e1000_check_reset_block(hw)) {
+ /*
+ * Full-chip reset requires MAC and PHY reset at the same
+ * time to make sure the interface between MAC and the
+ * external PHY is reset.
+ */
+ ctrl |= E1000_CTRL_PHY_RST;
+
+ /*
+ * Gate automatic PHY configuration by hardware on
+ * non-managed 82579
+ */
+ if ((hw->mac.type == e1000_pch2lan) &&
+ !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
+ }
+ ret_val = e1000_acquire_swflag_ich8lan(hw);
+ e_dbg("Issuing a global reset to ich8lan\n");
+ ew32(CTRL, (ctrl | E1000_CTRL_RST));
+ /* cannot issue a flush here because it hangs the hardware */
+ msleep(20);
+
+ if (!ret_val)
+ mutex_unlock(&swflag_mutex);
+
+ if (ctrl & E1000_CTRL_PHY_RST) {
+ ret_val = hw->phy.ops.get_cfg_done(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_post_phy_reset_ich8lan(hw);
+ if (ret_val)
+ goto out;
+ }
+
+ /*
+ * For PCH, this write will make sure that any noise
+ * will be detected as a CRC error and be dropped rather than show up
+ * as a bad packet to the DMA engine.
+ */
+ if (hw->mac.type == e1000_pchlan)
+ ew32(CRC_OFFSET, 0x65656565);
+
+ ew32(IMC, 0xffffffff);
+ er32(ICR);
+
+ kab = er32(KABGTXD);
+ kab |= E1000_KABGTXD_BGSQLBIAS;
+ ew32(KABGTXD, kab);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_hw_ich8lan - Initialize the hardware
+ * @hw: pointer to the HW structure
+ *
+ * Prepares the hardware for transmit and receive by doing the following:
+ * - initialize hardware bits
+ * - initialize LED identification
+ * - setup receive address registers
+ * - setup flow control
+ * - setup transmit descriptors
+ * - clear statistics
+ **/
+static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 ctrl_ext, txdctl, snoop;
+ s32 ret_val;
+ u16 i;
+
+ e1000_initialize_hw_bits_ich8lan(hw);
+
+ /* Initialize identification LED */
+ ret_val = mac->ops.id_led_init(hw);
+ if (ret_val)
+ e_dbg("Error initializing identification LED\n");
+ /* This is not fatal and we should not stop init due to this */
+
+ /* Setup the receive address. */
+ e1000e_init_rx_addrs(hw, mac->rar_entry_count);
+
+ /* Zero out the Multicast HASH table */
+ e_dbg("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+ /*
+ * The 82578 Rx buffer will stall if wakeup is enabled in host and
+ * the ME. Disable wakeup by clearing the host wakeup bit.
+ * Reset the phy after disabling host wakeup to reset the Rx buffer.
+ */
+ if (hw->phy.type == e1000_phy_82578) {
+ e1e_rphy(hw, BM_PORT_GEN_CFG, &i);
+ i &= ~BM_WUC_HOST_WU_BIT;
+ e1e_wphy(hw, BM_PORT_GEN_CFG, i);
+ ret_val = e1000_phy_hw_reset_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Setup link and flow control */
+ ret_val = e1000_setup_link_ich8lan(hw);
+
+ /* Set the transmit descriptor write-back policy for both queues */
+ txdctl = er32(TXDCTL(0));
+ txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB;
+ txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
+ E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
+ ew32(TXDCTL(0), txdctl);
+ txdctl = er32(TXDCTL(1));
+ txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB;
+ txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
+ E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
+ ew32(TXDCTL(1), txdctl);
+
+ /*
+ * ICH8 has opposite polarity of no_snoop bits.
+ * By default, we should use snoop behavior.
+ */
+ if (mac->type == e1000_ich8lan)
+ snoop = PCIE_ICH8_SNOOP_ALL;
+ else
+ snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
+ e1000e_set_pcie_no_snoop(hw, snoop);
+
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+ ew32(CTRL_EXT, ctrl_ext);
+
+ /*
+ * Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs_ich8lan(hw);
+
+ return 0;
+}
+/**
+ * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
+ * @hw: pointer to the HW structure
+ *
+ * Sets/Clears required hardware bits necessary for correctly setting up the
+ * hardware for transmit and receive.
+ **/
+static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
+{
+ u32 reg;
+
+ /* Extended Device Control */
+ reg = er32(CTRL_EXT);
+ reg |= (1 << 22);
+ /* Enable PHY low-power state when MAC is at D3 w/o WoL */
+ if (hw->mac.type >= e1000_pchlan)
+ reg |= E1000_CTRL_EXT_PHYPDEN;
+ ew32(CTRL_EXT, reg);
+
+ /* Transmit Descriptor Control 0 */
+ reg = er32(TXDCTL(0));
+ reg |= (1 << 22);
+ ew32(TXDCTL(0), reg);
+
+ /* Transmit Descriptor Control 1 */
+ reg = er32(TXDCTL(1));
+ reg |= (1 << 22);
+ ew32(TXDCTL(1), reg);
+
+ /* Transmit Arbitration Control 0 */
+ reg = er32(TARC(0));
+ if (hw->mac.type == e1000_ich8lan)
+ reg |= (1 << 28) | (1 << 29);
+ reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
+ ew32(TARC(0), reg);
+
+ /* Transmit Arbitration Control 1 */
+ reg = er32(TARC(1));
+ if (er32(TCTL) & E1000_TCTL_MULR)
+ reg &= ~(1 << 28);
+ else
+ reg |= (1 << 28);
+ reg |= (1 << 24) | (1 << 26) | (1 << 30);
+ ew32(TARC(1), reg);
+
+ /* Device Status */
+ if (hw->mac.type == e1000_ich8lan) {
+ reg = er32(STATUS);
+ reg &= ~(1 << 31);
+ ew32(STATUS, reg);
+ }
+
+ /*
+ * work-around descriptor data corruption issue during nfs v2 udp
+ * traffic, just disable the nfs filtering capability
+ */
+ reg = er32(RFCTL);
+ reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
+ ew32(RFCTL, reg);
+}
+
+/**
+ * e1000_setup_link_ich8lan - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Determines which flow control settings to use, then configures flow
+ * control. Calls the appropriate media-specific link configuration
+ * function. Assuming the adapter has a valid link partner, a valid link
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ **/
+static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ if (e1000_check_reset_block(hw))
+ return 0;
+
+ /*
+ * ICH parts do not have a word in the NVM to determine
+ * the default flow control setting, so we explicitly
+ * set it to full.
+ */
+ if (hw->fc.requested_mode == e1000_fc_default) {
+ /* Workaround h/w hang when Tx flow control enabled */
+ if (hw->mac.type == e1000_pchlan)
+ hw->fc.requested_mode = e1000_fc_rx_pause;
+ else
+ hw->fc.requested_mode = e1000_fc_full;
+ }
+
+ /*
+ * Save off the requested flow control mode for use later. Depending
+ * on the link partner's capabilities, we may or may not use this mode.
+ */
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ e_dbg("After fix-ups FlowControl is now = %x\n",
+ hw->fc.current_mode);
+
+ /* Continue to configure the copper link. */
+ ret_val = e1000_setup_copper_link_ich8lan(hw);
+ if (ret_val)
+ return ret_val;
+
+ ew32(FCTTV, hw->fc.pause_time);
+ if ((hw->phy.type == e1000_phy_82578) ||
+ (hw->phy.type == e1000_phy_82579) ||
+ (hw->phy.type == e1000_phy_82577)) {
+ ew32(FCRTV_PCH, hw->fc.refresh_time);
+
+ ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
+ hw->fc.pause_time);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return e1000e_set_fc_watermarks(hw);
+}
+
+/**
+ * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
+ * @hw: pointer to the HW structure
+ *
+ * Configures the kumeran interface to the PHY to wait the appropriate time
+ * when polling the PHY, then call the generic setup_copper_link to finish
+ * configuring the copper link.
+ **/
+static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+ u16 reg_data;
+
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ew32(CTRL, ctrl);
+
+ /*
+ * Set the mac to wait the maximum time between each iteration
+ * and increase the max iterations when polling the phy;
+ * this fixes erroneous timeouts at 10Mbps.
+ */
+ ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ &reg_data);
+ if (ret_val)
+ return ret_val;
+ reg_data |= 0x3F;
+ ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ reg_data);
+ if (ret_val)
+ return ret_val;
+
+ switch (hw->phy.type) {
+ case e1000_phy_igp_3:
+ ret_val = e1000e_copper_link_setup_igp(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ case e1000_phy_bm:
+ case e1000_phy_82578:
+ ret_val = e1000e_copper_link_setup_m88(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ case e1000_phy_82577:
+ case e1000_phy_82579:
+ ret_val = e1000_copper_link_setup_82577(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ case e1000_phy_ife:
+ ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
+ if (ret_val)
+ return ret_val;
+
+ reg_data &= ~IFE_PMC_AUTO_MDIX;
+
+ switch (hw->phy.mdix) {
+ case 1:
+ reg_data &= ~IFE_PMC_FORCE_MDIX;
+ break;
+ case 2:
+ reg_data |= IFE_PMC_FORCE_MDIX;
+ break;
+ case 0:
+ default:
+ reg_data |= IFE_PMC_AUTO_MDIX;
+ break;
+ }
+ ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
+ if (ret_val)
+ return ret_val;
+ break;
+ default:
+ break;
+ }
+ return e1000e_setup_copper_link(hw);
+}
+
+/**
+ * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
+ * @hw: pointer to the HW structure
+ * @speed: pointer to store current link speed
+ * @duplex: pointer to store the current link duplex
+ *
+ * Calls the generic get_speed_and_duplex to retrieve the current link
+ * information and then calls the Kumeran lock loss workaround for links at
+ * gigabit speeds.
+ **/
+static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 ret_val;
+
+ ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->mac.type == e1000_ich8lan) &&
+ (hw->phy.type == e1000_phy_igp_3) &&
+ (*speed == SPEED_1000)) {
+ ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
+ * @hw: pointer to the HW structure
+ *
+ * Work-around for 82566 Kumeran PCS lock loss:
+ * On link status change (i.e. PCI reset, speed change) and link is up and
+ * speed is gigabit-
+ * 0) if workaround is optionally disabled do nothing
+ * 1) wait 1ms for Kumeran link to come up
+ * 2) check Kumeran Diagnostic register PCS lock loss bit
+ * 3) if not set the link is locked (all is good), otherwise...
+ * 4) reset the PHY
+ * 5) repeat up to 10 times
+ * Note: this is only called for IGP3 copper when speed is 1gb.
+ **/
+static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 phy_ctrl;
+ s32 ret_val;
+ u16 i, data;
+ bool link;
+
+ if (!dev_spec->kmrn_lock_loss_workaround_enabled)
+ return 0;
+
+ /*
+ * Make sure link is up before proceeding. If not just return.
+ * Attempting this while link is negotiating fouled up link
+ * stability
+ */
+ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+ if (!link)
+ return 0;
+
+ for (i = 0; i < 10; i++) {
+ /* read once to clear */
+ ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
+ if (ret_val)
+ return ret_val;
+ /* and again to get new status */
+ ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
+ if (ret_val)
+ return ret_val;
+
+ /* check for PCS lock */
+ if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
+ return 0;
+
+ /* Issue PHY reset */
+ e1000_phy_hw_reset(hw);
+ mdelay(5);
+ }
+ /* Disable GigE link negotiation */
+ phy_ctrl = er32(PHY_CTRL);
+ phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
+ E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+ ew32(PHY_CTRL, phy_ctrl);
+
+ /*
+ * Call gig speed drop workaround on Gig disable before accessing
+ * any PHY registers
+ */
+ e1000e_gig_downshift_workaround_ich8lan(hw);
+
+ /* unable to acquire PCS lock */
+ return -E1000_ERR_PHY;
+}
+
+/**
+ * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
+ * @hw: pointer to the HW structure
+ * @state: boolean value used to set the current Kumeran workaround state
+ *
+ * If ICH8, set the current Kumeran workaround state (enabled - true
+ * /disabled - false).
+ **/
+void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+ bool state)
+{
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+
+ if (hw->mac.type != e1000_ich8lan) {
+ e_dbg("Workaround applies to ICH8 only.\n");
+ return;
+ }
+
+ dev_spec->kmrn_lock_loss_workaround_enabled = state;
+}
+
+/**
+ * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
+ * @hw: pointer to the HW structure
+ *
+ * Workaround for 82566 power-down on D3 entry:
+ * 1) disable gigabit link
+ * 2) write VR power-down enable
+ * 3) read it back
+ * Continue if successful, else issue LCD reset and repeat
+ **/
+void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
+{
+ u32 reg;
+ u16 data;
+ u8 retry = 0;
+
+ if (hw->phy.type != e1000_phy_igp_3)
+ return;
+
+ /* Try the workaround twice (if needed) */
+ do {
+ /* Disable link */
+ reg = er32(PHY_CTRL);
+ reg |= (E1000_PHY_CTRL_GBE_DISABLE |
+ E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+ ew32(PHY_CTRL, reg);
+
+ /*
+ * Call gig speed drop workaround on Gig disable before
+ * accessing any PHY registers
+ */
+ if (hw->mac.type == e1000_ich8lan)
+ e1000e_gig_downshift_workaround_ich8lan(hw);
+
+ /* Write VR power-down enable */
+ e1e_rphy(hw, IGP3_VR_CTRL, &data);
+ data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
+ e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN);
+
+ /* Read it back and test */
+ e1e_rphy(hw, IGP3_VR_CTRL, &data);
+ data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
+ if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
+ break;
+
+ /* Issue PHY reset and repeat at most one more time */
+ reg = er32(CTRL);
+ ew32(CTRL, reg | E1000_CTRL_PHY_RST);
+ retry++;
+ } while (retry);
+}
+
+/**
+ * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working
+ * @hw: pointer to the HW structure
+ *
+ * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
+ * LPLU, Gig disable, MDIC PHY reset):
+ * 1) Set Kumeran Near-end loopback
+ * 2) Clear Kumeran Near-end loopback
+ * Should only be called for ICH8[m] devices with any 1G Phy.
+ **/
+void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 reg_data;
+
+ if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife))
+ return;
+
+ ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+ &reg_data);
+ if (ret_val)
+ return;
+ reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
+ ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+ reg_data);
+ if (ret_val)
+ return;
+ reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
+ ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+ reg_data);
+}
+
+/**
+ * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
+ * @hw: pointer to the HW structure
+ *
+ * During S0 to Sx transition, it is possible the link remains at gig
+ * instead of negotiating to a lower speed. Before going to Sx, set
+ * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
+ * to a lower speed. For PCH and newer parts, the OEM bits PHY register
+ * (LED, GbE disable and LPLU configurations) also needs to be written.
+ **/
+void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
+{
+ u32 phy_ctrl;
+ s32 ret_val;
+
+ phy_ctrl = er32(PHY_CTRL);
+ phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
+ ew32(PHY_CTRL, phy_ctrl);
+
+ if (hw->mac.type == e1000_ich8lan)
+ e1000e_gig_downshift_workaround_ich8lan(hw);
+
+ if (hw->mac.type >= e1000_pchlan) {
+ e1000_oem_bits_config_ich8lan(hw, false);
+ e1000_phy_hw_reset_ich8lan(hw);
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+ e1000_write_smbus_addr(hw);
+ hw->phy.ops.release(hw);
+ }
+}
+
+/**
+ * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
+ * @hw: pointer to the HW structure
+ *
+ * During Sx to S0 transitions on non-managed devices or managed devices
+ * on which PHY resets are not blocked, if the PHY registers cannot be
+ * accessed properly by the s/w toggle the LANPHYPC value to power cycle
+ * the PHY.
+ **/
+void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
+{
+ u32 fwsm;
+
+ if (hw->mac.type != e1000_pch2lan)
+ return;
+
+ fwsm = er32(FWSM);
+ if (!(fwsm & E1000_ICH_FWSM_FW_VALID) || !e1000_check_reset_block(hw)) {
+ u16 phy_id1, phy_id2;
+ s32 ret_val;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ e_dbg("Failed to acquire PHY semaphore in resume\n");
+ return;
+ }
+
+ /* Test access to the PHY registers by reading the ID regs */
+ ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1);
+ if (ret_val)
+ goto release;
+ ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2);
+ if (ret_val)
+ goto release;
+
+ if (hw->phy.id == ((u32)(phy_id1 << 16) |
+ (u32)(phy_id2 & PHY_REVISION_MASK)))
+ goto release;
+
+ e1000_toggle_lanphypc_value_ich8lan(hw);
+
+ hw->phy.ops.release(hw);
+ msleep(50);
+ e1000_phy_hw_reset(hw);
+ msleep(50);
+ return;
+ }
+
+release:
+ hw->phy.ops.release(hw);
+
+ return;
+}
+
+/**
+ * e1000_cleanup_led_ich8lan - Restore the default LED operation
+ * @hw: pointer to the HW structure
+ *
+ * Return the LED back to the default configuration.
+ **/
+static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
+{
+ if (hw->phy.type == e1000_phy_ife)
+ return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
+
+ ew32(LEDCTL, hw->mac.ledctl_default);
+ return 0;
+}
+
+/**
+ * e1000_led_on_ich8lan - Turn LEDs on
+ * @hw: pointer to the HW structure
+ *
+ * Turn on the LEDs.
+ **/
+static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
+{
+ if (hw->phy.type == e1000_phy_ife)
+ return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+ (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
+
+ ew32(LEDCTL, hw->mac.ledctl_mode2);
+ return 0;
+}
+
+/**
+ * e1000_led_off_ich8lan - Turn LEDs off
+ * @hw: pointer to the HW structure
+ *
+ * Turn off the LEDs.
+ **/
+static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
+{
+ if (hw->phy.type == e1000_phy_ife)
+ return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+ (IFE_PSCL_PROBE_MODE |
+ IFE_PSCL_PROBE_LEDS_OFF));
+
+ ew32(LEDCTL, hw->mac.ledctl_mode1);
+ return 0;
+}
+
+/**
+ * e1000_setup_led_pchlan - Configures SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This prepares the SW controllable LED for use.
+ **/
+static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
+{
+ return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
+}
+
+/**
+ * e1000_cleanup_led_pchlan - Restore the default LED operation
+ * @hw: pointer to the HW structure
+ *
+ * Return the LED back to the default configuration.
+ **/
+static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
+{
+ return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
+}
+
+/**
+ * e1000_led_on_pchlan - Turn LEDs on
+ * @hw: pointer to the HW structure
+ *
+ * Turn on the LEDs.
+ **/
+static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
+{
+ u16 data = (u16)hw->mac.ledctl_mode2;
+ u32 i, led;
+
+ /*
+ * If no link, then turn LED on by setting the invert bit
+ * for each LED that's mode is "link_up" in ledctl_mode2.
+ */
+ if (!(er32(STATUS) & E1000_STATUS_LU)) {
+ for (i = 0; i < 3; i++) {
+ led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
+ if ((led & E1000_PHY_LED0_MODE_MASK) !=
+ E1000_LEDCTL_MODE_LINK_UP)
+ continue;
+ if (led & E1000_PHY_LED0_IVRT)
+ data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
+ else
+ data |= (E1000_PHY_LED0_IVRT << (i * 5));
+ }
+ }
+
+ return e1e_wphy(hw, HV_LED_CONFIG, data);
+}
+
+/**
+ * e1000_led_off_pchlan - Turn LEDs off
+ * @hw: pointer to the HW structure
+ *
+ * Turn off the LEDs.
+ **/
+static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
+{
+ u16 data = (u16)hw->mac.ledctl_mode1;
+ u32 i, led;
+
+ /*
+ * If no link, then turn LED off by clearing the invert bit
+ * for each LED that's mode is "link_up" in ledctl_mode1.
+ */
+ if (!(er32(STATUS) & E1000_STATUS_LU)) {
+ for (i = 0; i < 3; i++) {
+ led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
+ if ((led & E1000_PHY_LED0_MODE_MASK) !=
+ E1000_LEDCTL_MODE_LINK_UP)
+ continue;
+ if (led & E1000_PHY_LED0_IVRT)
+ data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
+ else
+ data |= (E1000_PHY_LED0_IVRT << (i * 5));
+ }
+ }
+
+ return e1e_wphy(hw, HV_LED_CONFIG, data);
+}
+
+/**
+ * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Read appropriate register for the config done bit for completion status
+ * and configure the PHY through s/w for EEPROM-less parts.
+ *
+ * NOTE: some silicon which is EEPROM-less will fail trying to read the
+ * config done bit, so only an error is logged and continues. If we were
+ * to return with error, EEPROM-less silicon would not be able to be reset
+ * or change link.
+ **/
+static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u32 bank = 0;
+ u32 status;
+
+ e1000e_get_cfg_done(hw);
+
+ /* Wait for indication from h/w that it has completed basic config */
+ if (hw->mac.type >= e1000_ich10lan) {
+ e1000_lan_init_done_ich8lan(hw);
+ } else {
+ ret_val = e1000e_get_auto_rd_done(hw);
+ if (ret_val) {
+ /*
+ * When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ e_dbg("Auto Read Done did not complete\n");
+ ret_val = 0;
+ }
+ }
+
+ /* Clear PHY Reset Asserted bit */
+ status = er32(STATUS);
+ if (status & E1000_STATUS_PHYRA)
+ ew32(STATUS, status & ~E1000_STATUS_PHYRA);
+ else
+ e_dbg("PHY Reset Asserted not set - needs delay\n");
+
+ /* If EEPROM is not marked present, init the IGP 3 PHY manually */
+ if (hw->mac.type <= e1000_ich9lan) {
+ if (((er32(EECD) & E1000_EECD_PRES) == 0) &&
+ (hw->phy.type == e1000_phy_igp_3)) {
+ e1000e_phy_init_script_igp3(hw);
+ }
+ } else {
+ if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
+ /* Maybe we should do a basic PHY config */
+ e_dbg("EEPROM not present\n");
+ ret_val = -E1000_ERR_CONFIG;
+ }
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
+{
+ /* If the management interface is not enabled, then power down */
+ if (!(hw->mac.ops.check_mng_mode(hw) ||
+ hw->phy.ops.check_reset_block(hw)))
+ e1000_power_down_phy_copper(hw);
+}
+
+/**
+ * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears hardware counters specific to the silicon family and calls
+ * clear_hw_cntrs_generic to clear all general purpose counters.
+ **/
+static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
+{
+ u16 phy_data;
+ s32 ret_val;
+
+ e1000e_clear_hw_cntrs_base(hw);
+
+ er32(ALGNERRC);
+ er32(RXERRC);
+ er32(TNCRS);
+ er32(CEXTERR);
+ er32(TSCTC);
+ er32(TSCTFC);
+
+ er32(MGTPRC);
+ er32(MGTPDC);
+ er32(MGTPTC);
+
+ er32(IAC);
+ er32(ICRXOC);
+
+ /* Clear PHY statistics registers */
+ if ((hw->phy.type == e1000_phy_82578) ||
+ (hw->phy.type == e1000_phy_82579) ||
+ (hw->phy.type == e1000_phy_82577)) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+ ret_val = hw->phy.ops.set_page(hw,
+ HV_STATS_PAGE << IGP_PAGE_SHIFT);
+ if (ret_val)
+ goto release;
+ hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
+release:
+ hw->phy.ops.release(hw);
+ }
+}
+
+static const struct e1000_mac_operations ich8_mac_ops = {
+ .id_led_init = e1000e_id_led_init,
+ /* check_mng_mode dependent on mac type */
+ .check_for_link = e1000_check_for_copper_link_ich8lan,
+ /* cleanup_led dependent on mac type */
+ .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
+ .get_bus_info = e1000_get_bus_info_ich8lan,
+ .set_lan_id = e1000_set_lan_id_single_port,
+ .get_link_up_info = e1000_get_link_up_info_ich8lan,
+ /* led_on dependent on mac type */
+ /* led_off dependent on mac type */
+ .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
+ .reset_hw = e1000_reset_hw_ich8lan,
+ .init_hw = e1000_init_hw_ich8lan,
+ .setup_link = e1000_setup_link_ich8lan,
+ .setup_physical_interface= e1000_setup_copper_link_ich8lan,
+ /* id_led_init dependent on mac type */
+};
+
+static const struct e1000_phy_operations ich8_phy_ops = {
+ .acquire = e1000_acquire_swflag_ich8lan,
+ .check_reset_block = e1000_check_reset_block_ich8lan,
+ .commit = NULL,
+ .get_cfg_done = e1000_get_cfg_done_ich8lan,
+ .get_cable_length = e1000e_get_cable_length_igp_2,
+ .read_reg = e1000e_read_phy_reg_igp,
+ .release = e1000_release_swflag_ich8lan,
+ .reset = e1000_phy_hw_reset_ich8lan,
+ .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan,
+ .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan,
+ .write_reg = e1000e_write_phy_reg_igp,
+};
+
+static const struct e1000_nvm_operations ich8_nvm_ops = {
+ .acquire = e1000_acquire_nvm_ich8lan,
+ .read = e1000_read_nvm_ich8lan,
+ .release = e1000_release_nvm_ich8lan,
+ .update = e1000_update_nvm_checksum_ich8lan,
+ .valid_led_default = e1000_valid_led_default_ich8lan,
+ .validate = e1000_validate_nvm_checksum_ich8lan,
+ .write = e1000_write_nvm_ich8lan,
+};
+
+const struct e1000_info e1000_ich8_info = {
+ .mac = e1000_ich8lan,
+ .flags = FLAG_HAS_WOL
+ | FLAG_IS_ICH
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_APME_IN_WUC,
+ .pba = 8,
+ .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &ich8_nvm_ops,
+};
+
+const struct e1000_info e1000_ich9_info = {
+ .mac = e1000_ich9lan,
+ .flags = FLAG_HAS_JUMBO_FRAMES
+ | FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_ERT
+ | FLAG_HAS_FLASH
+ | FLAG_APME_IN_WUC,
+ .pba = 10,
+ .max_hw_frame_size = DEFAULT_JUMBO,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &ich8_nvm_ops,
+};
+
+const struct e1000_info e1000_ich10_info = {
+ .mac = e1000_ich10lan,
+ .flags = FLAG_HAS_JUMBO_FRAMES
+ | FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_ERT
+ | FLAG_HAS_FLASH
+ | FLAG_APME_IN_WUC,
+ .pba = 10,
+ .max_hw_frame_size = DEFAULT_JUMBO,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &ich8_nvm_ops,
+};
+
+const struct e1000_info e1000_pch_info = {
+ .mac = e1000_pchlan,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS,
+ .pba = 26,
+ .max_hw_frame_size = 4096,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &ich8_nvm_ops,
+};
+
+const struct e1000_info e1000_pch2_info = {
+ .mac = e1000_pch2lan,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS
+ | FLAG2_HAS_EEE,
+ .pba = 26,
+ .max_hw_frame_size = DEFAULT_JUMBO,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &ich8_nvm_ops,
+};
diff --git a/drivers/net/ethernet/intel/e1000e/lib.c b/drivers/net/ethernet/intel/e1000e/lib.c
new file mode 100644
index 000000000000..0893ab107adf
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/lib.c
@@ -0,0 +1,2693 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000.h"
+
+enum e1000_mng_mode {
+ e1000_mng_mode_none = 0,
+ e1000_mng_mode_asf,
+ e1000_mng_mode_pt,
+ e1000_mng_mode_ipmi,
+ e1000_mng_mode_host_if_only
+};
+
+#define E1000_FACTPS_MNGCG 0x20000000
+
+/* Intel(R) Active Management Technology signature */
+#define E1000_IAMT_SIGNATURE 0x544D4149
+
+/**
+ * e1000e_get_bus_info_pcie - Get PCIe bus information
+ * @hw: pointer to the HW structure
+ *
+ * Determines and stores the system bus information for a particular
+ * network interface. The following bus information is determined and stored:
+ * bus speed, bus width, type (PCIe), and PCIe function.
+ **/
+s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_bus_info *bus = &hw->bus;
+ struct e1000_adapter *adapter = hw->adapter;
+ u16 pcie_link_status, cap_offset;
+
+ cap_offset = adapter->pdev->pcie_cap;
+ if (!cap_offset) {
+ bus->width = e1000_bus_width_unknown;
+ } else {
+ pci_read_config_word(adapter->pdev,
+ cap_offset + PCIE_LINK_STATUS,
+ &pcie_link_status);
+ bus->width = (enum e1000_bus_width)((pcie_link_status &
+ PCIE_LINK_WIDTH_MASK) >>
+ PCIE_LINK_WIDTH_SHIFT);
+ }
+
+ mac->ops.set_lan_id(hw);
+
+ return 0;
+}
+
+/**
+ * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ *
+ * @hw: pointer to the HW structure
+ *
+ * Determines the LAN function id by reading memory-mapped registers
+ * and swaps the port value if requested.
+ **/
+void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+ u32 reg;
+
+ /*
+ * The status register reports the correct function number
+ * for the device regardless of function swap state.
+ */
+ reg = er32(STATUS);
+ bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+}
+
+/**
+ * e1000_set_lan_id_single_port - Set LAN id for a single port device
+ * @hw: pointer to the HW structure
+ *
+ * Sets the LAN function id to zero for a single port device.
+ **/
+void e1000_set_lan_id_single_port(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+
+ bus->func = 0;
+}
+
+/**
+ * e1000_clear_vfta_generic - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * Clears the register array which contains the VLAN filter table by
+ * setting all the values to 0.
+ **/
+void e1000_clear_vfta_generic(struct e1000_hw *hw)
+{
+ u32 offset;
+
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
+ e1e_flush();
+ }
+}
+
+/**
+ * e1000_write_vfta_generic - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: register offset in VLAN filter table
+ * @value: register value written to VLAN filter table
+ *
+ * Writes value at the given offset in the register array which stores
+ * the VLAN filter table.
+ **/
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
+ e1e_flush();
+}
+
+/**
+ * e1000e_init_rx_addrs - Initialize receive address's
+ * @hw: pointer to the HW structure
+ * @rar_count: receive address registers
+ *
+ * Setup the receive address registers by setting the base receive address
+ * register to the devices MAC address and clearing all the other receive
+ * address registers to 0.
+ **/
+void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
+{
+ u32 i;
+ u8 mac_addr[ETH_ALEN] = {0};
+
+ /* Setup the receive address */
+ e_dbg("Programming MAC Address into RAR[0]\n");
+
+ e1000e_rar_set(hw, hw->mac.addr, 0);
+
+ /* Zero out the other (rar_entry_count - 1) receive addresses */
+ e_dbg("Clearing RAR[1-%u]\n", rar_count-1);
+ for (i = 1; i < rar_count; i++)
+ e1000e_rar_set(hw, mac_addr, i);
+}
+
+/**
+ * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
+ * @hw: pointer to the HW structure
+ *
+ * Checks the nvm for an alternate MAC address. An alternate MAC address
+ * can be setup by pre-boot software and must be treated like a permanent
+ * address and must override the actual permanent MAC address. If an
+ * alternate MAC address is found it is programmed into RAR0, replacing
+ * the permanent address that was installed into RAR0 by the Si on reset.
+ * This function will return SUCCESS unless it encounters an error while
+ * reading the EEPROM.
+ **/
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
+{
+ u32 i;
+ s32 ret_val = 0;
+ u16 offset, nvm_alt_mac_addr_offset, nvm_data;
+ u8 alt_mac_addr[ETH_ALEN];
+
+ ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data);
+ if (ret_val)
+ goto out;
+
+ /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
+ if (!((nvm_data & NVM_COMPAT_LOM) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES)))
+ goto out;
+
+ ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+ &nvm_alt_mac_addr_offset);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
+ (nvm_alt_mac_addr_offset == 0x0000))
+ /* There is no Alternate MAC Address */
+ goto out;
+
+ if (hw->bus.func == E1000_FUNC_1)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
+ for (i = 0; i < ETH_ALEN; i += 2) {
+ offset = nvm_alt_mac_addr_offset + (i >> 1);
+ ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
+ alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
+ }
+
+ /* if multicast bit is set, the alternate address will not be used */
+ if (is_multicast_ether_addr(alt_mac_addr)) {
+ e_dbg("Ignoring Alternate Mac Address with MC bit set\n");
+ goto out;
+ }
+
+ /*
+ * We have a valid alternate MAC address, and we want to treat it the
+ * same as the normal permanent MAC address stored by the HW into the
+ * RAR. Do this by mapping this address into RAR0.
+ */
+ e1000e_rar_set(hw, alt_mac_addr, 0);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000e_rar_set - Set receive address register
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index: receive address array register
+ *
+ * Sets the receive address array register at index to the address passed
+ * in by addr.
+ **/
+void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+
+ /*
+ * HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32) addr[0] |
+ ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= E1000_RAH_AV;
+
+ /*
+ * Some bridges will combine consecutive 32-bit writes into
+ * a single burst write, which will malfunction on some parts.
+ * The flushes avoid this.
+ */
+ ew32(RAL(index), rar_low);
+ e1e_flush();
+ ew32(RAH(index), rar_high);
+ e1e_flush();
+}
+
+/**
+ * e1000_hash_mc_addr - Generate a multicast hash value
+ * @hw: pointer to the HW structure
+ * @mc_addr: pointer to a multicast address
+ *
+ * Generates a multicast address hash value which is used to determine
+ * the multicast filter table array address and new table value. See
+ * e1000_mta_set_generic()
+ **/
+static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+ u32 hash_value, hash_mask;
+ u8 bit_shift = 0;
+
+ /* Register count multiplied by bits per register */
+ hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+ /*
+ * For a mc_filter_type of 0, bit_shift is the number of left-shifts
+ * where 0xFF would still fall within the hash mask.
+ */
+ while (hash_mask >> bit_shift != 0xFF)
+ bit_shift++;
+
+ /*
+ * The portion of the address that is used for the hash table
+ * is determined by the mc_filter_type setting.
+ * The algorithm is such that there is a total of 8 bits of shifting.
+ * The bit_shift for a mc_filter_type of 0 represents the number of
+ * left-shifts where the MSB of mc_addr[5] would still fall within
+ * the hash_mask. Case 0 does this exactly. Since there are a total
+ * of 8 bits of shifting, then mc_addr[4] will shift right the
+ * remaining number of bits. Thus 8 - bit_shift. The rest of the
+ * cases are a variation of this algorithm...essentially raising the
+ * number of bits to shift mc_addr[5] left, while still keeping the
+ * 8-bit shifting total.
+ *
+ * For example, given the following Destination MAC Address and an
+ * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
+ * we can see that the bit_shift for case 0 is 4. These are the hash
+ * values resulting from each mc_filter_type...
+ * [0] [1] [2] [3] [4] [5]
+ * 01 AA 00 12 34 56
+ * LSB MSB
+ *
+ * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
+ * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
+ * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
+ * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
+ */
+ switch (hw->mac.mc_filter_type) {
+ default:
+ case 0:
+ break;
+ case 1:
+ bit_shift += 1;
+ break;
+ case 2:
+ bit_shift += 2;
+ break;
+ case 3:
+ bit_shift += 4;
+ break;
+ }
+
+ hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+ (((u16) mc_addr[5]) << bit_shift)));
+
+ return hash_value;
+}
+
+/**
+ * e1000e_update_mc_addr_list_generic - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates entire Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count)
+{
+ u32 hash_value, hash_bit, hash_reg;
+ int i;
+
+ /* clear mta_shadow */
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+
+ /* update mta_shadow from mc_addr_list */
+ for (i = 0; (u32) i < mc_addr_count; i++) {
+ hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
+
+ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+ hash_bit = hash_value & 0x1F;
+
+ hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+ mc_addr_list += (ETH_ALEN);
+ }
+
+ /* replace the entire MTA table */
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
+ e1e_flush();
+}
+
+/**
+ * e1000e_clear_hw_cntrs_base - Clear base hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the base hardware counters by reading the counter registers.
+ **/
+void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
+{
+ er32(CRCERRS);
+ er32(SYMERRS);
+ er32(MPC);
+ er32(SCC);
+ er32(ECOL);
+ er32(MCC);
+ er32(LATECOL);
+ er32(COLC);
+ er32(DC);
+ er32(SEC);
+ er32(RLEC);
+ er32(XONRXC);
+ er32(XONTXC);
+ er32(XOFFRXC);
+ er32(XOFFTXC);
+ er32(FCRUC);
+ er32(GPRC);
+ er32(BPRC);
+ er32(MPRC);
+ er32(GPTC);
+ er32(GORCL);
+ er32(GORCH);
+ er32(GOTCL);
+ er32(GOTCH);
+ er32(RNBC);
+ er32(RUC);
+ er32(RFC);
+ er32(ROC);
+ er32(RJC);
+ er32(TORL);
+ er32(TORH);
+ er32(TOTL);
+ er32(TOTH);
+ er32(TPR);
+ er32(TPT);
+ er32(MPTC);
+ er32(BPTC);
+}
+
+/**
+ * e1000e_check_for_copper_link - Check for link (Copper)
+ * @hw: pointer to the HW structure
+ *
+ * Checks to see of the link status of the hardware has changed. If a
+ * change in link status has been detected, then we read the PHY registers
+ * to get the current speed/duplex if link exists.
+ **/
+s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ bool link;
+
+ /*
+ * We only want to go out to the PHY registers to see if Auto-Neg
+ * has completed and/or if our link status has changed. The
+ * get_link_status flag is set upon receiving a Link Status
+ * Change or Rx Sequence Error interrupt.
+ */
+ if (!mac->get_link_status)
+ return 0;
+
+ /*
+ * First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link)
+ return ret_val; /* No link detected */
+
+ mac->get_link_status = false;
+
+ /*
+ * Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
+ e1000e_check_downshift(hw);
+
+ /*
+ * If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!mac->autoneg) {
+ ret_val = -E1000_ERR_CONFIG;
+ return ret_val;
+ }
+
+ /*
+ * Auto-Neg is enabled. Auto Speed Detection takes care
+ * of MAC speed/duplex configuration. So we only need to
+ * configure Collision Distance in the MAC.
+ */
+ e1000e_config_collision_dist(hw);
+
+ /*
+ * Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = e1000e_config_fc_after_link_up(hw);
+ if (ret_val)
+ e_dbg("Error configuring flow control\n");
+
+ return ret_val;
+}
+
+/**
+ * e1000e_check_for_fiber_link - Check for link (Fiber)
+ * @hw: pointer to the HW structure
+ *
+ * Checks for link up on the hardware. If link is not up and we have
+ * a signal, then we need to force link up.
+ **/
+s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 rxcw;
+ u32 ctrl;
+ u32 status;
+ s32 ret_val;
+
+ ctrl = er32(CTRL);
+ status = er32(STATUS);
+ rxcw = er32(RXCW);
+
+ /*
+ * If we don't have link (auto-negotiation failed or link partner
+ * cannot auto-negotiate), the cable is plugged in (we have signal),
+ * and our link partner is not trying to auto-negotiate with us (we
+ * are receiving idles or data), we need to force link up. We also
+ * need to give auto-negotiation time to complete, in case the cable
+ * was just plugged in. The autoneg_failed flag does this.
+ */
+ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+ if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
+ (!(rxcw & E1000_RXCW_C))) {
+ if (mac->autoneg_failed == 0) {
+ mac->autoneg_failed = 1;
+ return 0;
+ }
+ e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
+
+ /* Disable auto-negotiation in the TXCW register */
+ ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+ /* Force link-up and also force full-duplex. */
+ ctrl = er32(CTRL);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ ew32(CTRL, ctrl);
+
+ /* Configure Flow Control after forcing link up. */
+ ret_val = e1000e_config_fc_after_link_up(hw);
+ if (ret_val) {
+ e_dbg("Error configuring flow control\n");
+ return ret_val;
+ }
+ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+ /*
+ * If we are forcing link and we are receiving /C/ ordered
+ * sets, re-enable auto-negotiation in the TXCW register
+ * and disable forced link in the Device Control register
+ * in an attempt to auto-negotiate with our link partner.
+ */
+ e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
+ ew32(TXCW, mac->txcw);
+ ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+ mac->serdes_has_link = true;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_check_for_serdes_link - Check for link (Serdes)
+ * @hw: pointer to the HW structure
+ *
+ * Checks for link up on the hardware. If link is not up and we have
+ * a signal, then we need to force link up.
+ **/
+s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 rxcw;
+ u32 ctrl;
+ u32 status;
+ s32 ret_val;
+
+ ctrl = er32(CTRL);
+ status = er32(STATUS);
+ rxcw = er32(RXCW);
+
+ /*
+ * If we don't have link (auto-negotiation failed or link partner
+ * cannot auto-negotiate), and our link partner is not trying to
+ * auto-negotiate with us (we are receiving idles or data),
+ * we need to force link up. We also need to give auto-negotiation
+ * time to complete.
+ */
+ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+ if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
+ if (mac->autoneg_failed == 0) {
+ mac->autoneg_failed = 1;
+ return 0;
+ }
+ e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
+
+ /* Disable auto-negotiation in the TXCW register */
+ ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+ /* Force link-up and also force full-duplex. */
+ ctrl = er32(CTRL);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ ew32(CTRL, ctrl);
+
+ /* Configure Flow Control after forcing link up. */
+ ret_val = e1000e_config_fc_after_link_up(hw);
+ if (ret_val) {
+ e_dbg("Error configuring flow control\n");
+ return ret_val;
+ }
+ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+ /*
+ * If we are forcing link and we are receiving /C/ ordered
+ * sets, re-enable auto-negotiation in the TXCW register
+ * and disable forced link in the Device Control register
+ * in an attempt to auto-negotiate with our link partner.
+ */
+ e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
+ ew32(TXCW, mac->txcw);
+ ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+ mac->serdes_has_link = true;
+ } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
+ /*
+ * If we force link for non-auto-negotiation switch, check
+ * link status based on MAC synchronization for internal
+ * serdes media type.
+ */
+ /* SYNCH bit and IV bit are sticky. */
+ udelay(10);
+ rxcw = er32(RXCW);
+ if (rxcw & E1000_RXCW_SYNCH) {
+ if (!(rxcw & E1000_RXCW_IV)) {
+ mac->serdes_has_link = true;
+ e_dbg("SERDES: Link up - forced.\n");
+ }
+ } else {
+ mac->serdes_has_link = false;
+ e_dbg("SERDES: Link down - force failed.\n");
+ }
+ }
+
+ if (E1000_TXCW_ANE & er32(TXCW)) {
+ status = er32(STATUS);
+ if (status & E1000_STATUS_LU) {
+ /* SYNCH bit and IV bit are sticky, so reread rxcw. */
+ udelay(10);
+ rxcw = er32(RXCW);
+ if (rxcw & E1000_RXCW_SYNCH) {
+ if (!(rxcw & E1000_RXCW_IV)) {
+ mac->serdes_has_link = true;
+ e_dbg("SERDES: Link up - autoneg "
+ "completed successfully.\n");
+ } else {
+ mac->serdes_has_link = false;
+ e_dbg("SERDES: Link down - invalid"
+ "codewords detected in autoneg.\n");
+ }
+ } else {
+ mac->serdes_has_link = false;
+ e_dbg("SERDES: Link down - no sync.\n");
+ }
+ } else {
+ mac->serdes_has_link = false;
+ e_dbg("SERDES: Link down - autoneg failed\n");
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_set_default_fc_generic - Set flow control default values
+ * @hw: pointer to the HW structure
+ *
+ * Read the EEPROM for the default values for flow control and store the
+ * values.
+ **/
+static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 nvm_data;
+
+ /*
+ * Read and store word 0x0F of the EEPROM. This word contains bits
+ * that determine the hardware's default PAUSE (flow control) mode,
+ * a bit that determines whether the HW defaults to enabling or
+ * disabling auto-negotiation, and the direction of the
+ * SW defined pins. If there is no SW over-ride of the flow
+ * control setting, then the variable hw->fc will
+ * be initialized based on a value in the EEPROM.
+ */
+ ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
+
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
+ hw->fc.requested_mode = e1000_fc_none;
+ else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
+ NVM_WORD0F_ASM_DIR)
+ hw->fc.requested_mode = e1000_fc_tx_pause;
+ else
+ hw->fc.requested_mode = e1000_fc_full;
+
+ return 0;
+}
+
+/**
+ * e1000e_setup_link - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Determines which flow control settings to use, then configures flow
+ * control. Calls the appropriate media-specific link configuration
+ * function. Assuming the adapter has a valid link partner, a valid link
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ **/
+s32 e1000e_setup_link(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+
+ /*
+ * In the case of the phy reset being blocked, we already have a link.
+ * We do not need to set it up again.
+ */
+ if (e1000_check_reset_block(hw))
+ return 0;
+
+ /*
+ * If requested flow control is set to default, set flow control
+ * based on the EEPROM flow control settings.
+ */
+ if (hw->fc.requested_mode == e1000_fc_default) {
+ ret_val = e1000_set_default_fc_generic(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /*
+ * Save off the requested flow control mode for use later. Depending
+ * on the link partner's capabilities, we may or may not use this mode.
+ */
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ e_dbg("After fix-ups FlowControl is now = %x\n",
+ hw->fc.current_mode);
+
+ /* Call the necessary media_type subroutine to configure the link. */
+ ret_val = mac->ops.setup_physical_interface(hw);
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * Initialize the flow control address, type, and PAUSE timer
+ * registers to their default values. This is done even if flow
+ * control is disabled, because it does not hurt anything to
+ * initialize these registers.
+ */
+ e_dbg("Initializing the Flow Control address, type and timer regs\n");
+ ew32(FCT, FLOW_CONTROL_TYPE);
+ ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+ ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+ ew32(FCTTV, hw->fc.pause_time);
+
+ return e1000e_set_fc_watermarks(hw);
+}
+
+/**
+ * e1000_commit_fc_settings_generic - Configure flow control
+ * @hw: pointer to the HW structure
+ *
+ * Write the flow control settings to the Transmit Config Word Register (TXCW)
+ * base on the flow control settings in e1000_mac_info.
+ **/
+static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 txcw;
+
+ /*
+ * Check for a software override of the flow control settings, and
+ * setup the device accordingly. If auto-negotiation is enabled, then
+ * software will have to set the "PAUSE" bits to the correct value in
+ * the Transmit Config Word Register (TXCW) and re-start auto-
+ * negotiation. However, if auto-negotiation is disabled, then
+ * software will have to manually configure the two flow control enable
+ * bits in the CTRL register.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but we
+ * do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ */
+ switch (hw->fc.current_mode) {
+ case e1000_fc_none:
+ /* Flow control completely disabled by a software over-ride. */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+ break;
+ case e1000_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is disabled
+ * by a software over-ride. Since there really isn't a way to
+ * advertise that we are capable of Rx Pause ONLY, we will
+ * advertise that we support both symmetric and asymmetric Rx
+ * PAUSE. Later, we will disable the adapter's ability to send
+ * PAUSE frames.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ case e1000_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is disabled,
+ * by a software over-ride.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+ break;
+ case e1000_fc_full:
+ /*
+ * Flow control (both Rx and Tx) is enabled by a software
+ * over-ride.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ default:
+ e_dbg("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ break;
+ }
+
+ ew32(TXCW, txcw);
+ mac->txcw = txcw;
+
+ return 0;
+}
+
+/**
+ * e1000_poll_fiber_serdes_link_generic - Poll for link up
+ * @hw: pointer to the HW structure
+ *
+ * Polls for link up by reading the status register, if link fails to come
+ * up with auto-negotiation, then the link is forced if a signal is detected.
+ **/
+static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 i, status;
+ s32 ret_val;
+
+ /*
+ * If we have a signal (the cable is plugged in, or assumed true for
+ * serdes media) then poll for a "Link-Up" indication in the Device
+ * Status Register. Time-out if a link isn't seen in 500 milliseconds
+ * seconds (Auto-negotiation should complete in less than 500
+ * milliseconds even if the other end is doing it in SW).
+ */
+ for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
+ usleep_range(10000, 20000);
+ status = er32(STATUS);
+ if (status & E1000_STATUS_LU)
+ break;
+ }
+ if (i == FIBER_LINK_UP_LIMIT) {
+ e_dbg("Never got a valid link from auto-neg!!!\n");
+ mac->autoneg_failed = 1;
+ /*
+ * AutoNeg failed to achieve a link, so we'll call
+ * mac->check_for_link. This routine will force the
+ * link up if we detect a signal. This will allow us to
+ * communicate with non-autonegotiating link partners.
+ */
+ ret_val = mac->ops.check_for_link(hw);
+ if (ret_val) {
+ e_dbg("Error while checking for link\n");
+ return ret_val;
+ }
+ mac->autoneg_failed = 0;
+ } else {
+ mac->autoneg_failed = 0;
+ e_dbg("Valid Link Found\n");
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes
+ * @hw: pointer to the HW structure
+ *
+ * Configures collision distance and flow control for fiber and serdes
+ * links. Upon successful setup, poll for link.
+ **/
+s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ ctrl = er32(CTRL);
+
+ /* Take the link out of reset */
+ ctrl &= ~E1000_CTRL_LRST;
+
+ e1000e_config_collision_dist(hw);
+
+ ret_val = e1000_commit_fc_settings_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * Since auto-negotiation is enabled, take the link out of reset (the
+ * link will be in reset, because we previously reset the chip). This
+ * will restart auto-negotiation. If auto-negotiation is successful
+ * then the link-up status bit will be set and the flow control enable
+ * bits (RFCE and TFCE) will be set according to their negotiated value.
+ */
+ e_dbg("Auto-negotiation enabled\n");
+
+ ew32(CTRL, ctrl);
+ e1e_flush();
+ usleep_range(1000, 2000);
+
+ /*
+ * For these adapters, the SW definable pin 1 is set when the optics
+ * detect a signal. If we have a signal, then poll for a "Link-Up"
+ * indication.
+ */
+ if (hw->phy.media_type == e1000_media_type_internal_serdes ||
+ (er32(CTRL) & E1000_CTRL_SWDPIN1)) {
+ ret_val = e1000_poll_fiber_serdes_link_generic(hw);
+ } else {
+ e_dbg("No signal detected\n");
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_config_collision_dist - Configure collision distance
+ * @hw: pointer to the HW structure
+ *
+ * Configures the collision distance to the default value and is used
+ * during link setup. Currently no func pointer exists and all
+ * implementations are handled in the generic version of this function.
+ **/
+void e1000e_config_collision_dist(struct e1000_hw *hw)
+{
+ u32 tctl;
+
+ tctl = er32(TCTL);
+
+ tctl &= ~E1000_TCTL_COLD;
+ tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
+
+ ew32(TCTL, tctl);
+ e1e_flush();
+}
+
+/**
+ * e1000e_set_fc_watermarks - Set flow control high/low watermarks
+ * @hw: pointer to the HW structure
+ *
+ * Sets the flow control high/low threshold (watermark) registers. If
+ * flow control XON frame transmission is enabled, then set XON frame
+ * transmission as well.
+ **/
+s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
+{
+ u32 fcrtl = 0, fcrth = 0;
+
+ /*
+ * Set the flow control receive threshold registers. Normally,
+ * these registers will be set to a default threshold that may be
+ * adjusted later by the driver's runtime code. However, if the
+ * ability to transmit pause frames is not enabled, then these
+ * registers will be set to 0.
+ */
+ if (hw->fc.current_mode & e1000_fc_tx_pause) {
+ /*
+ * We need to set up the Receive Threshold high and low water
+ * marks as well as (optionally) enabling the transmission of
+ * XON frames.
+ */
+ fcrtl = hw->fc.low_water;
+ fcrtl |= E1000_FCRTL_XONE;
+ fcrth = hw->fc.high_water;
+ }
+ ew32(FCRTL, fcrtl);
+ ew32(FCRTH, fcrth);
+
+ return 0;
+}
+
+/**
+ * e1000e_force_mac_fc - Force the MAC's flow control settings
+ * @hw: pointer to the HW structure
+ *
+ * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
+ * device control register to reflect the adapter settings. TFCE and RFCE
+ * need to be explicitly set by software when a copper PHY is used because
+ * autonegotiation is managed by the PHY rather than the MAC. Software must
+ * also configure these bits when link is forced on a fiber connection.
+ **/
+s32 e1000e_force_mac_fc(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ ctrl = er32(CTRL);
+
+ /*
+ * Because we didn't get link via the internal auto-negotiation
+ * mechanism (we either forced link or we got link via PHY
+ * auto-neg), we have to manually enable/disable transmit an
+ * receive flow control.
+ *
+ * The "Case" statement below enables/disable flow control
+ * according to the "hw->fc.current_mode" parameter.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause
+ * frames but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * frames but we do not receive pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) is enabled.
+ * other: No other values should be possible at this point.
+ */
+ e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
+
+ switch (hw->fc.current_mode) {
+ case e1000_fc_none:
+ ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+ break;
+ case e1000_fc_rx_pause:
+ ctrl &= (~E1000_CTRL_TFCE);
+ ctrl |= E1000_CTRL_RFCE;
+ break;
+ case e1000_fc_tx_pause:
+ ctrl &= (~E1000_CTRL_RFCE);
+ ctrl |= E1000_CTRL_TFCE;
+ break;
+ case e1000_fc_full:
+ ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+ break;
+ default:
+ e_dbg("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ew32(CTRL, ctrl);
+
+ return 0;
+}
+
+/**
+ * e1000e_config_fc_after_link_up - Configures flow control after link
+ * @hw: pointer to the HW structure
+ *
+ * Checks the status of auto-negotiation after link up to ensure that the
+ * speed and duplex were not forced. If the link needed to be forced, then
+ * flow control needs to be forced also. If auto-negotiation is enabled
+ * and did not fail, then we configure flow control based on our link
+ * partner.
+ **/
+s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val = 0;
+ u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
+ u16 speed, duplex;
+
+ /*
+ * Check for the case where we have fiber media and auto-neg failed
+ * so we had to force link. In this case, we need to force the
+ * configuration of the MAC to match the "fc" parameter.
+ */
+ if (mac->autoneg_failed) {
+ if (hw->phy.media_type == e1000_media_type_fiber ||
+ hw->phy.media_type == e1000_media_type_internal_serdes)
+ ret_val = e1000e_force_mac_fc(hw);
+ } else {
+ if (hw->phy.media_type == e1000_media_type_copper)
+ ret_val = e1000e_force_mac_fc(hw);
+ }
+
+ if (ret_val) {
+ e_dbg("Error forcing flow control settings\n");
+ return ret_val;
+ }
+
+ /*
+ * Check for the case where we have copper media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
+ /*
+ * Read the MII Status Register and check to see if AutoNeg
+ * has completed. We read this twice because this reg has
+ * some "sticky" (latched) bits.
+ */
+ ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+ e_dbg("Copper PHY and Auto Neg "
+ "has not completed.\n");
+ return ret_val;
+ }
+
+ /*
+ * The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (Address 4) and the Auto_Negotiation Base
+ * Page Ability Register (Address 5) to determine how
+ * flow control was negotiated.
+ */
+ ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val =
+ e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * Two bits in the Auto Negotiation Advertisement Register
+ * (Address 4) and two bits in the Auto Negotiation Base
+ * Page Ability Register (Address 5) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | e1000_fc_none
+ * 0 | 1 | 0 | DC | e1000_fc_none
+ * 0 | 1 | 1 | 0 | e1000_fc_none
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ * 1 | 0 | 0 | DC | e1000_fc_none
+ * 1 | DC | 1 | DC | e1000_fc_full
+ * 1 | 1 | 0 | 0 | e1000_fc_none
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | E1000_fc_full
+ *
+ */
+ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+ /*
+ * Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise Rx
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == e1000_fc_full) {
+ hw->fc.current_mode = e1000_fc_full;
+ e_dbg("Flow Control = FULL.\r\n");
+ } else {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ e_dbg("Flow Control = "
+ "Rx PAUSE frames only.\r\n");
+ }
+ }
+ /*
+ * For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ */
+ else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_tx_pause;
+ e_dbg("Flow Control = Tx PAUSE frames only.\r\n");
+ }
+ /*
+ * For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ */
+ else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ e_dbg("Flow Control = Rx PAUSE frames only.\r\n");
+ } else {
+ /*
+ * Per the IEEE spec, at this point flow control
+ * should be disabled.
+ */
+ hw->fc.current_mode = e1000_fc_none;
+ e_dbg("Flow Control = NONE.\r\n");
+ }
+
+ /*
+ * Now we need to do one last check... If we auto-
+ * negotiated to HALF DUPLEX, flow control should not be
+ * enabled per IEEE 802.3 spec.
+ */
+ ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
+ if (ret_val) {
+ e_dbg("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+
+ if (duplex == HALF_DUPLEX)
+ hw->fc.current_mode = e1000_fc_none;
+
+ /*
+ * Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ ret_val = e1000e_force_mac_fc(hw);
+ if (ret_val) {
+ e_dbg("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * Read the status register for the current speed/duplex and store the current
+ * speed and duplex for copper connections.
+ **/
+s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+{
+ u32 status;
+
+ status = er32(STATUS);
+ if (status & E1000_STATUS_SPEED_1000)
+ *speed = SPEED_1000;
+ else if (status & E1000_STATUS_SPEED_100)
+ *speed = SPEED_100;
+ else
+ *speed = SPEED_10;
+
+ if (status & E1000_STATUS_FD)
+ *duplex = FULL_DUPLEX;
+ else
+ *duplex = HALF_DUPLEX;
+
+ e_dbg("%u Mbps, %s Duplex\n",
+ *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10,
+ *duplex == FULL_DUPLEX ? "Full" : "Half");
+
+ return 0;
+}
+
+/**
+ * e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * Sets the speed and duplex to gigabit full duplex (the only possible option)
+ * for fiber/serdes links.
+ **/
+s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+{
+ *speed = SPEED_1000;
+ *duplex = FULL_DUPLEX;
+
+ return 0;
+}
+
+/**
+ * e1000e_get_hw_semaphore - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ **/
+s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
+{
+ u32 swsm;
+ s32 timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+
+ /* Get the SW semaphore */
+ while (i < timeout) {
+ swsm = er32(SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ udelay(50);
+ i++;
+ }
+
+ if (i == timeout) {
+ e_dbg("Driver can't access device - SMBI bit is set.\n");
+ return -E1000_ERR_NVM;
+ }
+
+ /* Get the FW semaphore. */
+ for (i = 0; i < timeout; i++) {
+ swsm = er32(SWSM);
+ ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (er32(SWSM) & E1000_SWSM_SWESMBI)
+ break;
+
+ udelay(50);
+ }
+
+ if (i == timeout) {
+ /* Release semaphores */
+ e1000e_put_hw_semaphore(hw);
+ e_dbg("Driver can't access the NVM\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_put_hw_semaphore - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ **/
+void e1000e_put_hw_semaphore(struct e1000_hw *hw)
+{
+ u32 swsm;
+
+ swsm = er32(SWSM);
+ swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+ ew32(SWSM, swsm);
+}
+
+/**
+ * e1000e_get_auto_rd_done - Check for auto read completion
+ * @hw: pointer to the HW structure
+ *
+ * Check EEPROM for Auto Read done bit.
+ **/
+s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
+{
+ s32 i = 0;
+
+ while (i < AUTO_READ_DONE_TIMEOUT) {
+ if (er32(EECD) & E1000_EECD_AUTO_RD)
+ break;
+ usleep_range(1000, 2000);
+ i++;
+ }
+
+ if (i == AUTO_READ_DONE_TIMEOUT) {
+ e_dbg("Auto read by HW from NVM has not completed.\n");
+ return -E1000_ERR_RESET;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_valid_led_default - Verify a valid default LED config
+ * @hw: pointer to the HW structure
+ * @data: pointer to the NVM (EEPROM)
+ *
+ * Read the EEPROM for the current default LED configuration. If the
+ * LED configuration is not valid, set to a valid LED configuration.
+ **/
+s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
+ *data = ID_LED_DEFAULT;
+
+ return 0;
+}
+
+/**
+ * e1000e_id_led_init -
+ * @hw: pointer to the HW structure
+ *
+ **/
+s32 e1000e_id_led_init(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ const u32 ledctl_mask = 0x000000FF;
+ const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+ const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+ u16 data, i, temp;
+ const u16 led_mask = 0x0F;
+
+ ret_val = hw->nvm.ops.valid_led_default(hw, &data);
+ if (ret_val)
+ return ret_val;
+
+ mac->ledctl_default = er32(LEDCTL);
+ mac->ledctl_mode1 = mac->ledctl_default;
+ mac->ledctl_mode2 = mac->ledctl_default;
+
+ for (i = 0; i < 4; i++) {
+ temp = (data >> (i << 2)) & led_mask;
+ switch (temp) {
+ case ID_LED_ON1_DEF2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_ON1_OFF2:
+ mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode1 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_OFF1_DEF2:
+ case ID_LED_OFF1_ON2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode1 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ switch (temp) {
+ case ID_LED_DEF1_ON2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_OFF1_ON2:
+ mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode2 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_DEF1_OFF2:
+ case ID_LED_ON1_OFF2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode2 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_setup_led_generic - Configures SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This prepares the SW controllable LED for use and saves the current state
+ * of the LED so it can be later restored.
+ **/
+s32 e1000e_setup_led_generic(struct e1000_hw *hw)
+{
+ u32 ledctl;
+
+ if (hw->mac.ops.setup_led != e1000e_setup_led_generic)
+ return -E1000_ERR_CONFIG;
+
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ ledctl = er32(LEDCTL);
+ hw->mac.ledctl_default = ledctl;
+ /* Turn off LED0 */
+ ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
+ E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_LED0_MODE_MASK);
+ ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+ E1000_LEDCTL_LED0_MODE_SHIFT);
+ ew32(LEDCTL, ledctl);
+ } else if (hw->phy.media_type == e1000_media_type_copper) {
+ ew32(LEDCTL, hw->mac.ledctl_mode1);
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_cleanup_led_generic - Set LED config to default operation
+ * @hw: pointer to the HW structure
+ *
+ * Remove the current LED configuration and set the LED configuration
+ * to the default value, saved from the EEPROM.
+ **/
+s32 e1000e_cleanup_led_generic(struct e1000_hw *hw)
+{
+ ew32(LEDCTL, hw->mac.ledctl_default);
+ return 0;
+}
+
+/**
+ * e1000e_blink_led_generic - Blink LED
+ * @hw: pointer to the HW structure
+ *
+ * Blink the LEDs which are set to be on.
+ **/
+s32 e1000e_blink_led_generic(struct e1000_hw *hw)
+{
+ u32 ledctl_blink = 0;
+ u32 i;
+
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ /* always blink LED0 for PCI-E fiber */
+ ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+ (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+ } else {
+ /*
+ * set the blink bit for each LED that's "on" (0x0E)
+ * in ledctl_mode2
+ */
+ ledctl_blink = hw->mac.ledctl_mode2;
+ for (i = 0; i < 4; i++)
+ if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
+ E1000_LEDCTL_MODE_LED_ON)
+ ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
+ (i * 8));
+ }
+
+ ew32(LEDCTL, ledctl_blink);
+
+ return 0;
+}
+
+/**
+ * e1000e_led_on_generic - Turn LED on
+ * @hw: pointer to the HW structure
+ *
+ * Turn LED on.
+ **/
+s32 e1000e_led_on_generic(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ switch (hw->phy.media_type) {
+ case e1000_media_type_fiber:
+ ctrl = er32(CTRL);
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ ew32(CTRL, ctrl);
+ break;
+ case e1000_media_type_copper:
+ ew32(LEDCTL, hw->mac.ledctl_mode2);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_led_off_generic - Turn LED off
+ * @hw: pointer to the HW structure
+ *
+ * Turn LED off.
+ **/
+s32 e1000e_led_off_generic(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ switch (hw->phy.media_type) {
+ case e1000_media_type_fiber:
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ ew32(CTRL, ctrl);
+ break;
+ case e1000_media_type_copper:
+ ew32(LEDCTL, hw->mac.ledctl_mode1);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_set_pcie_no_snoop - Set PCI-express capabilities
+ * @hw: pointer to the HW structure
+ * @no_snoop: bitmap of snoop events
+ *
+ * Set the PCI-express register to snoop for events enabled in 'no_snoop'.
+ **/
+void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop)
+{
+ u32 gcr;
+
+ if (no_snoop) {
+ gcr = er32(GCR);
+ gcr &= ~(PCIE_NO_SNOOP_ALL);
+ gcr |= no_snoop;
+ ew32(GCR, gcr);
+ }
+}
+
+/**
+ * e1000e_disable_pcie_master - Disables PCI-express master access
+ * @hw: pointer to the HW structure
+ *
+ * Returns 0 if successful, else returns -10
+ * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
+ * the master requests to be disabled.
+ *
+ * Disables PCI-Express master access and verifies there are no pending
+ * requests.
+ **/
+s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 timeout = MASTER_DISABLE_TIMEOUT;
+
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+ ew32(CTRL, ctrl);
+
+ while (timeout) {
+ if (!(er32(STATUS) &
+ E1000_STATUS_GIO_MASTER_ENABLE))
+ break;
+ udelay(100);
+ timeout--;
+ }
+
+ if (!timeout) {
+ e_dbg("Master requests are pending.\n");
+ return -E1000_ERR_MASTER_REQUESTS_PENDING;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing
+ * @hw: pointer to the HW structure
+ *
+ * Reset the Adaptive Interframe Spacing throttle to default values.
+ **/
+void e1000e_reset_adaptive(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ if (!mac->adaptive_ifs) {
+ e_dbg("Not in Adaptive IFS mode!\n");
+ goto out;
+ }
+
+ mac->current_ifs_val = 0;
+ mac->ifs_min_val = IFS_MIN;
+ mac->ifs_max_val = IFS_MAX;
+ mac->ifs_step_size = IFS_STEP;
+ mac->ifs_ratio = IFS_RATIO;
+
+ mac->in_ifs_mode = false;
+ ew32(AIT, 0);
+out:
+ return;
+}
+
+/**
+ * e1000e_update_adaptive - Update Adaptive Interframe Spacing
+ * @hw: pointer to the HW structure
+ *
+ * Update the Adaptive Interframe Spacing Throttle value based on the
+ * time between transmitted packets and time between collisions.
+ **/
+void e1000e_update_adaptive(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ if (!mac->adaptive_ifs) {
+ e_dbg("Not in Adaptive IFS mode!\n");
+ goto out;
+ }
+
+ if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
+ if (mac->tx_packet_delta > MIN_NUM_XMITS) {
+ mac->in_ifs_mode = true;
+ if (mac->current_ifs_val < mac->ifs_max_val) {
+ if (!mac->current_ifs_val)
+ mac->current_ifs_val = mac->ifs_min_val;
+ else
+ mac->current_ifs_val +=
+ mac->ifs_step_size;
+ ew32(AIT, mac->current_ifs_val);
+ }
+ }
+ } else {
+ if (mac->in_ifs_mode &&
+ (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
+ mac->current_ifs_val = 0;
+ mac->in_ifs_mode = false;
+ ew32(AIT, 0);
+ }
+ }
+out:
+ return;
+}
+
+/**
+ * e1000_raise_eec_clk - Raise EEPROM clock
+ * @hw: pointer to the HW structure
+ * @eecd: pointer to the EEPROM
+ *
+ * Enable/Raise the EEPROM clock bit.
+ **/
+static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ *eecd = *eecd | E1000_EECD_SK;
+ ew32(EECD, *eecd);
+ e1e_flush();
+ udelay(hw->nvm.delay_usec);
+}
+
+/**
+ * e1000_lower_eec_clk - Lower EEPROM clock
+ * @hw: pointer to the HW structure
+ * @eecd: pointer to the EEPROM
+ *
+ * Clear/Lower the EEPROM clock bit.
+ **/
+static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ *eecd = *eecd & ~E1000_EECD_SK;
+ ew32(EECD, *eecd);
+ e1e_flush();
+ udelay(hw->nvm.delay_usec);
+}
+
+/**
+ * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
+ * @hw: pointer to the HW structure
+ * @data: data to send to the EEPROM
+ * @count: number of bits to shift out
+ *
+ * We need to shift 'count' bits out to the EEPROM. So, the value in the
+ * "data" parameter will be shifted out to the EEPROM one bit at a time.
+ * In order to do this, "data" must be broken down into bits.
+ **/
+static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = er32(EECD);
+ u32 mask;
+
+ mask = 0x01 << (count - 1);
+ if (nvm->type == e1000_nvm_eeprom_spi)
+ eecd |= E1000_EECD_DO;
+
+ do {
+ eecd &= ~E1000_EECD_DI;
+
+ if (data & mask)
+ eecd |= E1000_EECD_DI;
+
+ ew32(EECD, eecd);
+ e1e_flush();
+
+ udelay(nvm->delay_usec);
+
+ e1000_raise_eec_clk(hw, &eecd);
+ e1000_lower_eec_clk(hw, &eecd);
+
+ mask >>= 1;
+ } while (mask);
+
+ eecd &= ~E1000_EECD_DI;
+ ew32(EECD, eecd);
+}
+
+/**
+ * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
+ * @hw: pointer to the HW structure
+ * @count: number of bits to shift in
+ *
+ * In order to read a register from the EEPROM, we need to shift 'count' bits
+ * in from the EEPROM. Bits are "shifted in" by raising the clock input to
+ * the EEPROM (setting the SK bit), and then reading the value of the data out
+ * "DO" bit. During this "shifting in" process the data in "DI" bit should
+ * always be clear.
+ **/
+static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
+{
+ u32 eecd;
+ u32 i;
+ u16 data;
+
+ eecd = er32(EECD);
+
+ eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+ data = 0;
+
+ for (i = 0; i < count; i++) {
+ data <<= 1;
+ e1000_raise_eec_clk(hw, &eecd);
+
+ eecd = er32(EECD);
+
+ eecd &= ~E1000_EECD_DI;
+ if (eecd & E1000_EECD_DO)
+ data |= 1;
+
+ e1000_lower_eec_clk(hw, &eecd);
+ }
+
+ return data;
+}
+
+/**
+ * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion
+ * @hw: pointer to the HW structure
+ * @ee_reg: EEPROM flag for polling
+ *
+ * Polls the EEPROM status bit for either read or write completion based
+ * upon the value of 'ee_reg'.
+ **/
+s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
+{
+ u32 attempts = 100000;
+ u32 i, reg = 0;
+
+ for (i = 0; i < attempts; i++) {
+ if (ee_reg == E1000_NVM_POLL_READ)
+ reg = er32(EERD);
+ else
+ reg = er32(EEWR);
+
+ if (reg & E1000_NVM_RW_REG_DONE)
+ return 0;
+
+ udelay(5);
+ }
+
+ return -E1000_ERR_NVM;
+}
+
+/**
+ * e1000e_acquire_nvm - Generic request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+s32 e1000e_acquire_nvm(struct e1000_hw *hw)
+{
+ u32 eecd = er32(EECD);
+ s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
+
+ ew32(EECD, eecd | E1000_EECD_REQ);
+ eecd = er32(EECD);
+
+ while (timeout) {
+ if (eecd & E1000_EECD_GNT)
+ break;
+ udelay(5);
+ eecd = er32(EECD);
+ timeout--;
+ }
+
+ if (!timeout) {
+ eecd &= ~E1000_EECD_REQ;
+ ew32(EECD, eecd);
+ e_dbg("Could not acquire NVM grant\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_standby_nvm - Return EEPROM to standby state
+ * @hw: pointer to the HW structure
+ *
+ * Return the EEPROM to a standby state.
+ **/
+static void e1000_standby_nvm(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = er32(EECD);
+
+ if (nvm->type == e1000_nvm_eeprom_spi) {
+ /* Toggle CS to flush commands */
+ eecd |= E1000_EECD_CS;
+ ew32(EECD, eecd);
+ e1e_flush();
+ udelay(nvm->delay_usec);
+ eecd &= ~E1000_EECD_CS;
+ ew32(EECD, eecd);
+ e1e_flush();
+ udelay(nvm->delay_usec);
+ }
+}
+
+/**
+ * e1000_stop_nvm - Terminate EEPROM command
+ * @hw: pointer to the HW structure
+ *
+ * Terminates the current command by inverting the EEPROM's chip select pin.
+ **/
+static void e1000_stop_nvm(struct e1000_hw *hw)
+{
+ u32 eecd;
+
+ eecd = er32(EECD);
+ if (hw->nvm.type == e1000_nvm_eeprom_spi) {
+ /* Pull CS high */
+ eecd |= E1000_EECD_CS;
+ e1000_lower_eec_clk(hw, &eecd);
+ }
+}
+
+/**
+ * e1000e_release_nvm - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+void e1000e_release_nvm(struct e1000_hw *hw)
+{
+ u32 eecd;
+
+ e1000_stop_nvm(hw);
+
+ eecd = er32(EECD);
+ eecd &= ~E1000_EECD_REQ;
+ ew32(EECD, eecd);
+}
+
+/**
+ * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
+ * @hw: pointer to the HW structure
+ *
+ * Setups the EEPROM for reading and writing.
+ **/
+static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = er32(EECD);
+ u8 spi_stat_reg;
+
+ if (nvm->type == e1000_nvm_eeprom_spi) {
+ u16 timeout = NVM_MAX_RETRY_SPI;
+
+ /* Clear SK and CS */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+ ew32(EECD, eecd);
+ e1e_flush();
+ udelay(1);
+
+ /*
+ * Read "Status Register" repeatedly until the LSB is cleared.
+ * The EEPROM will signal that the command has been completed
+ * by clearing bit 0 of the internal status register. If it's
+ * not cleared within 'timeout', then error out.
+ */
+ while (timeout) {
+ e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
+ hw->nvm.opcode_bits);
+ spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
+ if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
+ break;
+
+ udelay(5);
+ e1000_standby_nvm(hw);
+ timeout--;
+ }
+
+ if (!timeout) {
+ e_dbg("SPI NVM Status error\n");
+ return -E1000_ERR_NVM;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_read_nvm_eerd - Reads EEPROM using EERD register
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i, eerd = 0;
+ s32 ret_val = 0;
+
+ /*
+ * A check for invalid values: offset too large, too many words,
+ * too many words for the offset, and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ e_dbg("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ for (i = 0; i < words; i++) {
+ eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
+ E1000_NVM_RW_REG_START;
+
+ ew32(EERD, eerd);
+ ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
+ if (ret_val)
+ break;
+
+ data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000e_write_nvm_spi - Write to EEPROM using SPI
+ * @hw: pointer to the HW structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * Writes data to EEPROM at offset using SPI interface.
+ *
+ * If e1000e_update_nvm_checksum is not called after this function , the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ s32 ret_val;
+ u16 widx = 0;
+
+ /*
+ * A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ e_dbg("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
+ }
+
+ ret_val = nvm->ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ while (widx < words) {
+ u8 write_opcode = NVM_WRITE_OPCODE_SPI;
+
+ ret_val = e1000_ready_nvm_eeprom(hw);
+ if (ret_val) {
+ nvm->ops.release(hw);
+ return ret_val;
+ }
+
+ e1000_standby_nvm(hw);
+
+ /* Send the WRITE ENABLE command (8 bit opcode) */
+ e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
+ nvm->opcode_bits);
+
+ e1000_standby_nvm(hw);
+
+ /*
+ * Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+ */
+ if ((nvm->address_bits == 8) && (offset >= 128))
+ write_opcode |= NVM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+ e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
+ e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
+ nvm->address_bits);
+
+ /* Loop to allow for up to whole page write of eeprom */
+ while (widx < words) {
+ u16 word_out = data[widx];
+ word_out = (word_out >> 8) | (word_out << 8);
+ e1000_shift_out_eec_bits(hw, word_out, 16);
+ widx++;
+
+ if ((((offset + widx) * 2) % nvm->page_size) == 0) {
+ e1000_standby_nvm(hw);
+ break;
+ }
+ }
+ }
+
+ usleep_range(10000, 20000);
+ nvm->ops.release(hw);
+ return 0;
+}
+
+/**
+ * e1000_read_pba_string_generic - Read device part number
+ * @hw: pointer to the HW structure
+ * @pba_num: pointer to device part number
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+ * the value in pba_num.
+ **/
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ s32 ret_val;
+ u16 nvm_data;
+ u16 pba_ptr;
+ u16 offset;
+ u16 length;
+
+ if (pba_num == NULL) {
+ e_dbg("PBA string buffer was null\n");
+ ret_val = E1000_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ /*
+ * if nvm_data is not ptr guard the PBA must be in legacy format which
+ * means pba_ptr is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (nvm_data != NVM_PBA_PTR_GUARD) {
+ e_dbg("NVM PBA number is not stored as string\n");
+
+ /* we will need 11 characters to store the PBA */
+ if (pba_num_size < 11) {
+ e_dbg("PBA string buffer too small\n");
+ return E1000_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pba_ptr */
+ pba_num[0] = (nvm_data >> 12) & 0xF;
+ pba_num[1] = (nvm_data >> 8) & 0xF;
+ pba_num[2] = (nvm_data >> 4) & 0xF;
+ pba_num[3] = nvm_data & 0xF;
+ pba_num[4] = (pba_ptr >> 12) & 0xF;
+ pba_num[5] = (pba_ptr >> 8) & 0xF;
+ pba_num[6] = '-';
+ pba_num[7] = 0;
+ pba_num[8] = (pba_ptr >> 4) & 0xF;
+ pba_num[9] = pba_ptr & 0xF;
+
+ /* put a null character on the end of our string */
+ pba_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (pba_num[offset] < 0xA)
+ pba_num[offset] += '0';
+ else if (pba_num[offset] < 0x10)
+ pba_num[offset] += 'A' - 0xA;
+ }
+
+ goto out;
+ }
+
+ ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ e_dbg("NVM PBA number section invalid length\n");
+ ret_val = E1000_ERR_NVM_PBA_SECTION;
+ goto out;
+ }
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((u32)length * 2) - 1)) {
+ e_dbg("PBA string buffer too small\n");
+ ret_val = E1000_ERR_NO_SPACE;
+ goto out;
+ }
+
+ /* trim pba length from start of string */
+ pba_ptr++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+ pba_num[offset * 2] = (u8)(nvm_data >> 8);
+ pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+ }
+ pba_num[offset * 2] = '\0';
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_read_mac_addr_generic - Read device MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the device MAC address from the EEPROM and stores the value.
+ * Since devices with two ports use the same EEPROM, we increment the
+ * last bit in the MAC address for the second port.
+ **/
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
+{
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
+
+ rar_high = er32(RAH(0));
+ rar_low = er32(RAL(0));
+
+ for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
+
+ for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
+
+ for (i = 0; i < ETH_ALEN; i++)
+ hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+ return 0;
+}
+
+/**
+ * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+ ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ return ret_val;
+ }
+ checksum += nvm_data;
+ }
+
+ if (checksum != (u16) NVM_SUM) {
+ e_dbg("NVM Checksum Invalid\n");
+ return -E1000_ERR_NVM;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_update_nvm_checksum_generic - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM.
+ **/
+s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+ ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error while updating checksum.\n");
+ return ret_val;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16) NVM_SUM - checksum;
+ ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
+ if (ret_val)
+ e_dbg("NVM Write Error while updating checksum.\n");
+
+ return ret_val;
+}
+
+/**
+ * e1000e_reload_nvm - Reloads EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ * extended control register.
+ **/
+void e1000e_reload_nvm(struct e1000_hw *hw)
+{
+ u32 ctrl_ext;
+
+ udelay(10);
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ ew32(CTRL_EXT, ctrl_ext);
+ e1e_flush();
+}
+
+/**
+ * e1000_calculate_checksum - Calculate checksum for buffer
+ * @buffer: pointer to EEPROM
+ * @length: size of EEPROM to calculate a checksum for
+ *
+ * Calculates the checksum for some buffer on a specified length. The
+ * checksum calculated is returned.
+ **/
+static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
+{
+ u32 i;
+ u8 sum = 0;
+
+ if (!buffer)
+ return 0;
+
+ for (i = 0; i < length; i++)
+ sum += buffer[i];
+
+ return (u8) (0 - sum);
+}
+
+/**
+ * e1000_mng_enable_host_if - Checks host interface is enabled
+ * @hw: pointer to the HW structure
+ *
+ * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ * This function checks whether the HOST IF is enabled for command operation
+ * and also checks whether the previous command is completed. It busy waits
+ * in case of previous command is not completed.
+ **/
+static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
+{
+ u32 hicr;
+ u8 i;
+
+ if (!(hw->mac.arc_subsystem_valid)) {
+ e_dbg("ARC subsystem not valid.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = er32(HICR);
+ if ((hicr & E1000_HICR_EN) == 0) {
+ e_dbg("E1000_HOST_EN bit disabled.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+ /* check the previous command is completed */
+ for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
+ hicr = er32(HICR);
+ if (!(hicr & E1000_HICR_C))
+ break;
+ mdelay(1);
+ }
+
+ if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+ e_dbg("Previous command timeout failed .\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_check_mng_mode_generic - check management mode
+ * @hw: pointer to the HW structure
+ *
+ * Reads the firmware semaphore register and returns true (>0) if
+ * manageability is enabled, else false (0).
+ **/
+bool e1000e_check_mng_mode_generic(struct e1000_hw *hw)
+{
+ u32 fwsm = er32(FWSM);
+
+ return (fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
+}
+
+/**
+ * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx
+ * @hw: pointer to the HW structure
+ *
+ * Enables packet filtering on transmit packets if manageability is enabled
+ * and host interface is enabled.
+ **/
+bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
+{
+ struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
+ u32 *buffer = (u32 *)&hw->mng_cookie;
+ u32 offset;
+ s32 ret_val, hdr_csum, csum;
+ u8 i, len;
+
+ hw->mac.tx_pkt_filtering = true;
+
+ /* No manageability, no filtering */
+ if (!e1000e_check_mng_mode(hw)) {
+ hw->mac.tx_pkt_filtering = false;
+ goto out;
+ }
+
+ /*
+ * If we can't read from the host interface for whatever
+ * reason, disable filtering.
+ */
+ ret_val = e1000_mng_enable_host_if(hw);
+ if (ret_val) {
+ hw->mac.tx_pkt_filtering = false;
+ goto out;
+ }
+
+ /* Read in the header. Length and offset are in dwords. */
+ len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
+ offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
+ for (i = 0; i < len; i++)
+ *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i);
+ hdr_csum = hdr->checksum;
+ hdr->checksum = 0;
+ csum = e1000_calculate_checksum((u8 *)hdr,
+ E1000_MNG_DHCP_COOKIE_LENGTH);
+ /*
+ * If either the checksums or signature don't match, then
+ * the cookie area isn't considered valid, in which case we
+ * take the safe route of assuming Tx filtering is enabled.
+ */
+ if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
+ hw->mac.tx_pkt_filtering = true;
+ goto out;
+ }
+
+ /* Cookie area is valid, make the final check for filtering. */
+ if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
+ hw->mac.tx_pkt_filtering = false;
+ goto out;
+ }
+
+out:
+ return hw->mac.tx_pkt_filtering;
+}
+
+/**
+ * e1000_mng_write_cmd_header - Writes manageability command header
+ * @hw: pointer to the HW structure
+ * @hdr: pointer to the host interface command header
+ *
+ * Writes the command header after does the checksum calculation.
+ **/
+static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr)
+{
+ u16 i, length = sizeof(struct e1000_host_mng_command_header);
+
+ /* Write the whole command header structure with new checksum. */
+
+ hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
+
+ length >>= 2;
+ /* Write the relevant command block into the ram area. */
+ for (i = 0; i < length; i++) {
+ E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i,
+ *((u32 *) hdr + i));
+ e1e_flush();
+ }
+
+ return 0;
+}
+
+/**
+ * e1000_mng_host_if_write - Write to the manageability host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface buffer
+ * @length: size of the buffer
+ * @offset: location in the buffer to write to
+ * @sum: sum of the data (not checksum)
+ *
+ * This function writes the buffer content at the offset given on the host if.
+ * It also does alignment considerations to do the writes in most efficient
+ * way. Also fills up the sum of the buffer in *buffer parameter.
+ **/
+static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
+ u16 length, u16 offset, u8 *sum)
+{
+ u8 *tmp;
+ u8 *bufptr = buffer;
+ u32 data = 0;
+ u16 remaining, i, j, prev_bytes;
+
+ /* sum = only sum of the data and it is not checksum */
+
+ if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
+ return -E1000_ERR_PARAM;
+
+ tmp = (u8 *)&data;
+ prev_bytes = offset & 0x3;
+ offset >>= 2;
+
+ if (prev_bytes) {
+ data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset);
+ for (j = prev_bytes; j < sizeof(u32); j++) {
+ *(tmp + j) = *bufptr++;
+ *sum += *(tmp + j);
+ }
+ E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data);
+ length -= j - prev_bytes;
+ offset++;
+ }
+
+ remaining = length & 0x3;
+ length -= remaining;
+
+ /* Calculate length in DWORDs */
+ length >>= 2;
+
+ /*
+ * The device driver writes the relevant command block into the
+ * ram area.
+ */
+ for (i = 0; i < length; i++) {
+ for (j = 0; j < sizeof(u32); j++) {
+ *(tmp + j) = *bufptr++;
+ *sum += *(tmp + j);
+ }
+
+ E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
+ }
+ if (remaining) {
+ for (j = 0; j < sizeof(u32); j++) {
+ if (j < remaining)
+ *(tmp + j) = *bufptr++;
+ else
+ *(tmp + j) = 0;
+
+ *sum += *(tmp + j);
+ }
+ E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface
+ * @length: size of the buffer
+ *
+ * Writes the DHCP information to the host interface.
+ **/
+s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
+{
+ struct e1000_host_mng_command_header hdr;
+ s32 ret_val;
+ u32 hicr;
+
+ hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
+ hdr.command_length = length;
+ hdr.reserved1 = 0;
+ hdr.reserved2 = 0;
+ hdr.checksum = 0;
+
+ /* Enable the host interface */
+ ret_val = e1000_mng_enable_host_if(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Populate the host interface with the contents of "buffer". */
+ ret_val = e1000_mng_host_if_write(hw, buffer, length,
+ sizeof(hdr), &(hdr.checksum));
+ if (ret_val)
+ return ret_val;
+
+ /* Write the manageability command header */
+ ret_val = e1000_mng_write_cmd_header(hw, &hdr);
+ if (ret_val)
+ return ret_val;
+
+ /* Tell the ARC a new command is pending. */
+ hicr = er32(HICR);
+ ew32(HICR, hicr | E1000_HICR_C);
+
+ return 0;
+}
+
+/**
+ * e1000e_enable_mng_pass_thru - Check if management passthrough is needed
+ * @hw: pointer to the HW structure
+ *
+ * Verifies the hardware needs to leave interface enabled so that frames can
+ * be directed to and from the management interface.
+ **/
+bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+ u32 manc;
+ u32 fwsm, factps;
+ bool ret_val = false;
+
+ manc = er32(MANC);
+
+ if (!(manc & E1000_MANC_RCV_TCO_EN))
+ goto out;
+
+ if (hw->mac.has_fwsm) {
+ fwsm = er32(FWSM);
+ factps = er32(FACTPS);
+
+ if (!(factps & E1000_FACTPS_MNGCG) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
+ ret_val = true;
+ goto out;
+ }
+ } else if ((hw->mac.type == e1000_82574) ||
+ (hw->mac.type == e1000_82583)) {
+ u16 data;
+
+ factps = er32(FACTPS);
+ e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+
+ if (!(factps & E1000_FACTPS_MNGCG) &&
+ ((data & E1000_NVM_INIT_CTRL2_MNGM) ==
+ (e1000_mng_mode_pt << 13))) {
+ ret_val = true;
+ goto out;
+ }
+ } else if ((manc & E1000_MANC_SMBUS_EN) &&
+ !(manc & E1000_MANC_ASF_EN)) {
+ ret_val = true;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
new file mode 100644
index 000000000000..78c5d21fa34b
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -0,0 +1,6440 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/pm_qos_params.h>
+#include <linux/pm_runtime.h>
+#include <linux/aer.h>
+#include <linux/prefetch.h>
+
+#include "e1000.h"
+
+#define DRV_EXTRAVERSION "-k"
+
+#define DRV_VERSION "1.5.1" DRV_EXTRAVERSION
+char e1000e_driver_name[] = "e1000e";
+const char e1000e_driver_version[] = DRV_VERSION;
+
+static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
+
+static const struct e1000_info *e1000_info_tbl[] = {
+ [board_82571] = &e1000_82571_info,
+ [board_82572] = &e1000_82572_info,
+ [board_82573] = &e1000_82573_info,
+ [board_82574] = &e1000_82574_info,
+ [board_82583] = &e1000_82583_info,
+ [board_80003es2lan] = &e1000_es2_info,
+ [board_ich8lan] = &e1000_ich8_info,
+ [board_ich9lan] = &e1000_ich9_info,
+ [board_ich10lan] = &e1000_ich10_info,
+ [board_pchlan] = &e1000_pch_info,
+ [board_pch2lan] = &e1000_pch2_info,
+};
+
+struct e1000_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
+
+#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
+
+static const struct e1000_reg_info e1000_reg_info_tbl[] = {
+
+ /* General Registers */
+ {E1000_CTRL, "CTRL"},
+ {E1000_STATUS, "STATUS"},
+ {E1000_CTRL_EXT, "CTRL_EXT"},
+
+ /* Interrupt Registers */
+ {E1000_ICR, "ICR"},
+
+ /* Rx Registers */
+ {E1000_RCTL, "RCTL"},
+ {E1000_RDLEN, "RDLEN"},
+ {E1000_RDH, "RDH"},
+ {E1000_RDT, "RDT"},
+ {E1000_RDTR, "RDTR"},
+ {E1000_RXDCTL(0), "RXDCTL"},
+ {E1000_ERT, "ERT"},
+ {E1000_RDBAL, "RDBAL"},
+ {E1000_RDBAH, "RDBAH"},
+ {E1000_RDFH, "RDFH"},
+ {E1000_RDFT, "RDFT"},
+ {E1000_RDFHS, "RDFHS"},
+ {E1000_RDFTS, "RDFTS"},
+ {E1000_RDFPC, "RDFPC"},
+
+ /* Tx Registers */
+ {E1000_TCTL, "TCTL"},
+ {E1000_TDBAL, "TDBAL"},
+ {E1000_TDBAH, "TDBAH"},
+ {E1000_TDLEN, "TDLEN"},
+ {E1000_TDH, "TDH"},
+ {E1000_TDT, "TDT"},
+ {E1000_TIDV, "TIDV"},
+ {E1000_TXDCTL(0), "TXDCTL"},
+ {E1000_TADV, "TADV"},
+ {E1000_TARC(0), "TARC"},
+ {E1000_TDFH, "TDFH"},
+ {E1000_TDFT, "TDFT"},
+ {E1000_TDFHS, "TDFHS"},
+ {E1000_TDFTS, "TDFTS"},
+ {E1000_TDFPC, "TDFPC"},
+
+ /* List Terminator */
+ {}
+};
+
+/*
+ * e1000_regdump - register printout routine
+ */
+static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
+{
+ int n = 0;
+ char rname[16];
+ u32 regs[8];
+
+ switch (reginfo->ofs) {
+ case E1000_RXDCTL(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_RXDCTL(n));
+ break;
+ case E1000_TXDCTL(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_TXDCTL(n));
+ break;
+ case E1000_TARC(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_TARC(n));
+ break;
+ default:
+ printk(KERN_INFO "%-15s %08x\n",
+ reginfo->name, __er32(hw, reginfo->ofs));
+ return;
+ }
+
+ snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
+ printk(KERN_INFO "%-15s ", rname);
+ for (n = 0; n < 2; n++)
+ printk(KERN_CONT "%08x ", regs[n]);
+ printk(KERN_CONT "\n");
+}
+
+/*
+ * e1000e_dump - Print registers, Tx-ring and Rx-ring
+ */
+static void e1000e_dump(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_reg_info *reginfo;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_tx_desc *tx_desc;
+ struct my_u0 {
+ u64 a;
+ u64 b;
+ } *u0;
+ struct e1000_buffer *buffer_info;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ union e1000_rx_desc_packet_split *rx_desc_ps;
+ union e1000_rx_desc_extended *rx_desc;
+ struct my_u1 {
+ u64 a;
+ u64 b;
+ u64 c;
+ u64 d;
+ } *u1;
+ u32 staterr;
+ int i = 0;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ printk(KERN_INFO "Device Name state "
+ "trans_start last_rx\n");
+ printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
+ netdev->name, netdev->state, netdev->trans_start,
+ netdev->last_rx);
+ }
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ printk(KERN_INFO " Register Name Value\n");
+ for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ e1000_regdump(hw, reginfo);
+ }
+
+ /* Print Tx Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
+ " leng ntw timestamp\n");
+ buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
+ printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+ 0, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (unsigned long long)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (unsigned long long)buffer_info->time_stamp);
+
+ /* Print Tx Ring */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
+
+ /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
+ *
+ * Legacy Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] (Reserved on Write Back) |
+ * +--------------------------------------------------------------+
+ * 8 | Special | CSS | Status | CMD | CSO | Length |
+ * +--------------------------------------------------------------+
+ * 63 48 47 36 35 32 31 24 23 16 15 0
+ *
+ * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
+ * 63 48 47 40 39 32 31 16 15 8 7 0
+ * +----------------------------------------------------------------+
+ * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
+ * +----------------------------------------------------------------+
+ * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
+ * +----------------------------------------------------------------+
+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
+ *
+ * Extended Data Descriptor (DTYP=0x1)
+ * +----------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +----------------------------------------------------------------+
+ * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
+ * +----------------------------------------------------------------+
+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
+ */
+ printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Legacy format\n");
+ printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Ext Context format\n");
+ printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Ext Data format\n");
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
+ "%04X %3X %016llX %p",
+ (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
+ ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
+ (unsigned long long)le64_to_cpu(u0->a),
+ (unsigned long long)le64_to_cpu(u0->b),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->length, buffer_info->next_to_watch,
+ (unsigned long long)buffer_info->time_stamp,
+ buffer_info->skb);
+ if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC/U\n");
+ else if (i == tx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
+ 16, 1, phys_to_virt(buffer_info->dma),
+ buffer_info->length, true);
+ }
+
+ /* Print Rx Ring Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ printk(KERN_INFO " %5d %5X %5X\n", 0,
+ rx_ring->next_to_use, rx_ring->next_to_clean);
+
+ /* Print Rx Ring */
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
+ switch (adapter->rx_ps_pages) {
+ case 1:
+ case 2:
+ case 3:
+ /* [Extended] Packet Split Receive Descriptor Format
+ *
+ * +-----------------------------------------------------+
+ * 0 | Buffer Address 0 [63:0] |
+ * +-----------------------------------------------------+
+ * 8 | Buffer Address 1 [63:0] |
+ * +-----------------------------------------------------+
+ * 16 | Buffer Address 2 [63:0] |
+ * +-----------------------------------------------------+
+ * 24 | Buffer Address 3 [63:0] |
+ * +-----------------------------------------------------+
+ */
+ printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
+ "[buffer 1 63:0 ] "
+ "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
+ "[bi->skb] <-- Ext Pkt Split format\n");
+ /* [Extended] Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 13 12 8 7 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
+ * | Checksum | Ident | | Queue | | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+ printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
+ "[vl l0 ee es] "
+ "[ l3 l2 l1 hs] [reserved ] ---------------- "
+ "[bi->skb] <-- Ext Rx Write-Back format\n");
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
+ u1 = (struct my_u1 *)rx_desc_ps;
+ staterr =
+ le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+ if (staterr & E1000_RXD_STAT_DD) {
+ /* Descriptor Done */
+ printk(KERN_INFO "RWB[0x%03X] %016llX "
+ "%016llX %016llX %016llX "
+ "---------------- %p", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
+ buffer_info->skb);
+ } else {
+ printk(KERN_INFO "R [0x%03X] %016llX "
+ "%016llX %016llX %016llX %016llX %p", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->skb);
+
+ if (netif_msg_pktdata(adapter))
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(buffer_info->dma),
+ adapter->rx_ps_bsize0, true);
+ }
+
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+ }
+ break;
+ default:
+ case 0:
+ /* Extended Receive Descriptor (Read) Format
+ *
+ * +-----------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +-----------------------------------------------------+
+ * 8 | Reserved |
+ * +-----------------------------------------------------+
+ */
+ printk(KERN_INFO "R [desc] [buf addr 63:0 ] "
+ "[reserved 63:0 ] [bi->dma ] "
+ "[bi->skb] <-- Ext (Read) format\n");
+ /* Extended Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 24 23 4 3 0
+ * +------------------------------------------------------+
+ * | RSS Hash | | | |
+ * 0 +-------------------+ Rsvd | Reserved | MRQ RSS |
+ * | Packet | IP | | | Type |
+ * | Checksum | Ident | | | |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+ printk(KERN_INFO "RWB[desc] [cs ipid mrq] "
+ "[vt ln xe xs] "
+ "[bi->skb] <-- Ext (Write-Back) format\n");
+
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+ u1 = (struct my_u1 *)rx_desc;
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ if (staterr & E1000_RXD_STAT_DD) {
+ /* Descriptor Done */
+ printk(KERN_INFO "RWB[0x%03X] %016llX "
+ "%016llX ---------------- %p", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ buffer_info->skb);
+ } else {
+ printk(KERN_INFO "R [0x%03X] %016llX "
+ "%016llX %016llX %p", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->skb);
+
+ if (netif_msg_pktdata(adapter))
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16,
+ 1,
+ phys_to_virt
+ (buffer_info->dma),
+ adapter->rx_buffer_len,
+ true);
+ }
+
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+ }
+ }
+
+exit:
+ return;
+}
+
+/**
+ * e1000_desc_unused - calculate if we have unused descriptors
+ **/
+static int e1000_desc_unused(struct e1000_ring *ring)
+{
+ if (ring->next_to_clean > ring->next_to_use)
+ return ring->next_to_clean - ring->next_to_use - 1;
+
+ return ring->count + ring->next_to_clean - ring->next_to_use - 1;
+}
+
+/**
+ * e1000_receive_skb - helper function to handle Rx indications
+ * @adapter: board private structure
+ * @status: descriptor status field as written by hardware
+ * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
+ * @skb: pointer to sk_buff to be indicated to stack
+ **/
+static void e1000_receive_skb(struct e1000_adapter *adapter,
+ struct net_device *netdev, struct sk_buff *skb,
+ u8 status, __le16 vlan)
+{
+ u16 tag = le16_to_cpu(vlan);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (status & E1000_RXD_STAT_VP)
+ __vlan_hwaccel_put_tag(skb, tag);
+
+ napi_gro_receive(&adapter->napi, skb);
+}
+
+/**
+ * e1000_rx_checksum - Receive Checksum Offload
+ * @adapter: board private structure
+ * @status_err: receive descriptor status and error fields
+ * @csum: receive descriptor csum field
+ * @sk_buff: socket buffer with received data
+ **/
+static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
+ u32 csum, struct sk_buff *skb)
+{
+ u16 status = (u16)status_err;
+ u8 errors = (u8)(status_err >> 24);
+
+ skb_checksum_none_assert(skb);
+
+ /* Ignore Checksum bit is set */
+ if (status & E1000_RXD_STAT_IXSM)
+ return;
+ /* TCP/UDP checksum error bit is set */
+ if (errors & E1000_RXD_ERR_TCPE) {
+ /* let the stack verify checksum errors */
+ adapter->hw_csum_err++;
+ return;
+ }
+
+ /* TCP/UDP Checksum has not been calculated */
+ if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
+ return;
+
+ /* It must be a TCP or UDP packet with a valid checksum */
+ if (status & E1000_RXD_STAT_TCPCS) {
+ /* TCP checksum is good */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ /*
+ * IP fragment with UDP payload
+ * Hardware complements the payload checksum, so we undo it
+ * and then put the value in host order for further stack use.
+ */
+ __sum16 sum = (__force __sum16)htons(csum);
+ skb->csum = csum_unfold(~sum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+ adapter->hw_csum_good++;
+}
+
+/**
+ * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa()
+ * @hw: pointer to the HW structure
+ * @tail: address of tail descriptor register
+ * @i: value to write to tail descriptor register
+ *
+ * When updating the tail register, the ME could be accessing Host CSR
+ * registers at the same time. Normally, this is handled in h/w by an
+ * arbiter but on some parts there is a bug that acknowledges Host accesses
+ * later than it should which could result in the descriptor register to
+ * have an incorrect value. Workaround this by checking the FWSM register
+ * which has bit 24 set while ME is accessing Host CSR registers, wait
+ * if it is set and try again a number of times.
+ **/
+static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail,
+ unsigned int i)
+{
+ unsigned int j = 0;
+
+ while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) &&
+ (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI))
+ udelay(50);
+
+ writel(i, tail);
+
+ if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail)))
+ return E1000_ERR_SWFW_SYNC;
+
+ return 0;
+}
+
+static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i)
+{
+ u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (e1000e_update_tail_wa(hw, tail, i)) {
+ u32 rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ e_err("ME firmware caused invalid RDT - resetting\n");
+ schedule_work(&adapter->reset_task);
+ }
+}
+
+static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i)
+{
+ u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (e1000e_update_tail_wa(hw, tail, i)) {
+ u32 tctl = er32(TCTL);
+ ew32(TCTL, tctl & ~E1000_TCTL_EN);
+ e_err("ME firmware caused invalid TDT - resetting\n");
+ schedule_work(&adapter->reset_task);
+ }
+}
+
+/**
+ * e1000_alloc_rx_buffers - Replace used receive buffers
+ * @adapter: address of board private structure
+ **/
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+ int cleaned_count, gfp_t gfp)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ union e1000_rx_desc_extended *rx_desc;
+ struct e1000_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz = adapter->rx_buffer_len;
+
+ i = rx_ring->next_to_use;
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (cleaned_count--) {
+ skb = buffer_info->skb;
+ if (skb) {
+ skb_trim(skb, 0);
+ goto map_skb;
+ }
+
+ skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
+ if (!skb) {
+ /* Better luck next round */
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+
+ buffer_info->skb = skb;
+map_skb:
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
+ adapter->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+ dev_err(&pdev->dev, "Rx DMA map failed\n");
+ adapter->rx_dma_failed++;
+ break;
+ }
+
+ rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+ rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
+
+ if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_rdt_wa(adapter, i);
+ else
+ writel(i, adapter->hw.hw_addr + rx_ring->tail);
+ }
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ buffer_info = &rx_ring->buffer_info[i];
+ }
+
+ rx_ring->next_to_use = i;
+}
+
+/**
+ * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
+ * @adapter: address of board private structure
+ **/
+static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
+ int cleaned_count, gfp_t gfp)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ union e1000_rx_desc_packet_split *rx_desc;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ struct e1000_buffer *buffer_info;
+ struct e1000_ps_page *ps_page;
+ struct sk_buff *skb;
+ unsigned int i, j;
+
+ i = rx_ring->next_to_use;
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (cleaned_count--) {
+ rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
+
+ for (j = 0; j < PS_PAGE_BUFFERS; j++) {
+ ps_page = &buffer_info->ps_pages[j];
+ if (j >= adapter->rx_ps_pages) {
+ /* all unused desc entries get hw null ptr */
+ rx_desc->read.buffer_addr[j + 1] =
+ ~cpu_to_le64(0);
+ continue;
+ }
+ if (!ps_page->page) {
+ ps_page->page = alloc_page(gfp);
+ if (!ps_page->page) {
+ adapter->alloc_rx_buff_failed++;
+ goto no_buffers;
+ }
+ ps_page->dma = dma_map_page(&pdev->dev,
+ ps_page->page,
+ 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ ps_page->dma)) {
+ dev_err(&adapter->pdev->dev,
+ "Rx DMA page map failed\n");
+ adapter->rx_dma_failed++;
+ goto no_buffers;
+ }
+ }
+ /*
+ * Refresh the desc even if buffer_addrs
+ * didn't change because each write-back
+ * erases this info.
+ */
+ rx_desc->read.buffer_addr[j + 1] =
+ cpu_to_le64(ps_page->dma);
+ }
+
+ skb = __netdev_alloc_skb_ip_align(netdev,
+ adapter->rx_ps_bsize0,
+ gfp);
+
+ if (!skb) {
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+
+ buffer_info->skb = skb;
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
+ adapter->rx_ps_bsize0,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+ dev_err(&pdev->dev, "Rx DMA map failed\n");
+ adapter->rx_dma_failed++;
+ /* cleanup skb */
+ dev_kfree_skb_any(skb);
+ buffer_info->skb = NULL;
+ break;
+ }
+
+ rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
+
+ if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_rdt_wa(adapter, i << 1);
+ else
+ writel(i << 1,
+ adapter->hw.hw_addr + rx_ring->tail);
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ buffer_info = &rx_ring->buffer_info[i];
+ }
+
+no_buffers:
+ rx_ring->next_to_use = i;
+}
+
+/**
+ * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
+ * @adapter: address of board private structure
+ * @cleaned_count: number of buffers to allocate this pass
+ **/
+
+static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
+ int cleaned_count, gfp_t gfp)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ union e1000_rx_desc_extended *rx_desc;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ struct e1000_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz = 256 - 16 /* for skb_reserve */;
+
+ i = rx_ring->next_to_use;
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (cleaned_count--) {
+ skb = buffer_info->skb;
+ if (skb) {
+ skb_trim(skb, 0);
+ goto check_page;
+ }
+
+ skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
+ if (unlikely(!skb)) {
+ /* Better luck next round */
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+
+ buffer_info->skb = skb;
+check_page:
+ /* allocate a new page if necessary */
+ if (!buffer_info->page) {
+ buffer_info->page = alloc_page(gfp);
+ if (unlikely(!buffer_info->page)) {
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+ }
+
+ if (!buffer_info->dma)
+ buffer_info->dma = dma_map_page(&pdev->dev,
+ buffer_info->page, 0,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+
+ rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+ rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
+
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
+ buffer_info = &rx_ring->buffer_info[i];
+ }
+
+ if (likely(rx_ring->next_to_use != i)) {
+ rx_ring->next_to_use = i;
+ if (unlikely(i-- == 0))
+ i = (rx_ring->count - 1);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_rdt_wa(adapter, i);
+ else
+ writel(i, adapter->hw.hw_addr + rx_ring->tail);
+ }
+}
+
+/**
+ * e1000_clean_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ int *work_done, int work_to_do)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ union e1000_rx_desc_extended *rx_desc, *next_rxd;
+ struct e1000_buffer *buffer_info, *next_buffer;
+ u32 length, staterr;
+ unsigned int i;
+ int cleaned_count = 0;
+ bool cleaned = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (staterr & E1000_RXD_STAT_DD) {
+ struct sk_buff *skb;
+
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+ rmb(); /* read descriptor and rx_buffer_info after status DD */
+
+ skb = buffer_info->skb;
+ buffer_info->skb = NULL;
+
+ prefetch(skb->data - NET_IP_ALIGN);
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
+ prefetch(next_rxd);
+
+ next_buffer = &rx_ring->buffer_info[i];
+
+ cleaned = 1;
+ cleaned_count++;
+ dma_unmap_single(&pdev->dev,
+ buffer_info->dma,
+ adapter->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+
+ length = le16_to_cpu(rx_desc->wb.upper.length);
+
+ /*
+ * !EOP means multiple descriptors were used to store a single
+ * packet, if that's the case we need to toss it. In fact, we
+ * need to toss every packet with the EOP bit clear and the
+ * next frame that _does_ have the EOP bit set, as it is by
+ * definition only a frame fragment
+ */
+ if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
+ adapter->flags2 |= FLAG2_IS_DISCARDING;
+
+ if (adapter->flags2 & FLAG2_IS_DISCARDING) {
+ /* All receives must fit into a single buffer */
+ e_dbg("Receive packet consumed multiple buffers\n");
+ /* recycle */
+ buffer_info->skb = skb;
+ if (staterr & E1000_RXD_STAT_EOP)
+ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
+ goto next_desc;
+ }
+
+ if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
+ /* recycle */
+ buffer_info->skb = skb;
+ goto next_desc;
+ }
+
+ /* adjust length to remove Ethernet CRC */
+ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
+ length -= 4;
+
+ total_rx_bytes += length;
+ total_rx_packets++;
+
+ /*
+ * code added for copybreak, this should improve
+ * performance for small packets with large amounts
+ * of reassembly being done in the stack
+ */
+ if (length < copybreak) {
+ struct sk_buff *new_skb =
+ netdev_alloc_skb_ip_align(netdev, length);
+ if (new_skb) {
+ skb_copy_to_linear_data_offset(new_skb,
+ -NET_IP_ALIGN,
+ (skb->data -
+ NET_IP_ALIGN),
+ (length +
+ NET_IP_ALIGN));
+ /* save the skb in buffer_info as good */
+ buffer_info->skb = skb;
+ skb = new_skb;
+ }
+ /* else just continue with the old one */
+ }
+ /* end copybreak code */
+ skb_put(skb, length);
+
+ /* Receive Checksum Offload */
+ e1000_rx_checksum(adapter, staterr,
+ le16_to_cpu(rx_desc->wb.lower.hi_dword.
+ csum_ip.csum), skb);
+
+ e1000_receive_skb(adapter, netdev, skb, staterr,
+ rx_desc->wb.upper.vlan);
+
+next_desc:
+ rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
+ adapter->alloc_rx_buf(adapter, cleaned_count,
+ GFP_ATOMIC);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+ rx_ring->next_to_clean = i;
+
+ cleaned_count = e1000_desc_unused(rx_ring);
+ if (cleaned_count)
+ adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
+
+ adapter->total_rx_bytes += total_rx_bytes;
+ adapter->total_rx_packets += total_rx_packets;
+ return cleaned;
+}
+
+static void e1000_put_txbuf(struct e1000_adapter *adapter,
+ struct e1000_buffer *buffer_info)
+{
+ if (buffer_info->dma) {
+ if (buffer_info->mapped_as_page)
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
+ buffer_info->dma = 0;
+ }
+ if (buffer_info->skb) {
+ dev_kfree_skb_any(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+ buffer_info->time_stamp = 0;
+}
+
+static void e1000_print_hw_hang(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter,
+ print_hang_task);
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ unsigned int i = tx_ring->next_to_clean;
+ unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
+ struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 phy_status, phy_1000t_status, phy_ext_status;
+ u16 pci_status;
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
+ e1e_rphy(hw, PHY_STATUS, &phy_status);
+ e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
+ e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
+
+ pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
+
+ /* detected Hardware unit hang */
+ e_err("Detected Hardware Unit Hang:\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]:\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%x>\n"
+ " jiffies <%lx>\n"
+ " next_to_watch.status <%x>\n"
+ "MAC Status <%x>\n"
+ "PHY Status <%x>\n"
+ "PHY 1000BASE-T Status <%x>\n"
+ "PHY Extended Status <%x>\n"
+ "PCI Status <%x>\n",
+ readl(adapter->hw.hw_addr + tx_ring->head),
+ readl(adapter->hw.hw_addr + tx_ring->tail),
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_ring->buffer_info[eop].time_stamp,
+ eop,
+ jiffies,
+ eop_desc->upper.fields.status,
+ er32(STATUS),
+ phy_status,
+ phy_1000t_status,
+ phy_ext_status,
+ pci_status);
+}
+
+/**
+ * e1000_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_tx_desc *tx_desc, *eop_desc;
+ struct e1000_buffer *buffer_info;
+ unsigned int i, eop;
+ unsigned int count = 0;
+ unsigned int total_tx_bytes = 0, total_tx_packets = 0;
+
+ i = tx_ring->next_to_clean;
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = E1000_TX_DESC(*tx_ring, eop);
+
+ while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+ (count < tx_ring->count)) {
+ bool cleaned = false;
+ rmb(); /* read buffer_info after eop_desc */
+ for (; !cleaned; count++) {
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+ cleaned = (i == eop);
+
+ if (cleaned) {
+ total_tx_packets += buffer_info->segs;
+ total_tx_bytes += buffer_info->bytecount;
+ }
+
+ e1000_put_txbuf(adapter, buffer_info);
+ tx_desc->upper.data = 0;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ if (i == tx_ring->next_to_use)
+ break;
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = E1000_TX_DESC(*tx_ring, eop);
+ }
+
+ tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD 32
+ if (count && netif_carrier_ok(netdev) &&
+ e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+
+ if (netif_queue_stopped(netdev) &&
+ !(test_bit(__E1000_DOWN, &adapter->state))) {
+ netif_wake_queue(netdev);
+ ++adapter->restart_queue;
+ }
+ }
+
+ if (adapter->detect_tx_hung) {
+ /*
+ * Detect a transmit hang in hardware, this serializes the
+ * check with the clearing of time_stamp and movement of i
+ */
+ adapter->detect_tx_hung = 0;
+ if (tx_ring->buffer_info[i].time_stamp &&
+ time_after(jiffies, tx_ring->buffer_info[i].time_stamp
+ + (adapter->tx_timeout_factor * HZ)) &&
+ !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+ schedule_work(&adapter->print_hang_task);
+ netif_stop_queue(netdev);
+ }
+ }
+ adapter->total_tx_bytes += total_tx_bytes;
+ adapter->total_tx_packets += total_tx_packets;
+ return count < tx_ring->count;
+}
+
+/**
+ * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+ int *work_done, int work_to_do)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ struct e1000_buffer *buffer_info, *next_buffer;
+ struct e1000_ps_page *ps_page;
+ struct sk_buff *skb;
+ unsigned int i, j;
+ u32 length, staterr;
+ int cleaned_count = 0;
+ bool cleaned = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
+ staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (staterr & E1000_RXD_STAT_DD) {
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+ skb = buffer_info->skb;
+ rmb(); /* read descriptor and rx_buffer_info after status DD */
+
+ /* in the packet split case this is header only */
+ prefetch(skb->data - NET_IP_ALIGN);
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
+ prefetch(next_rxd);
+
+ next_buffer = &rx_ring->buffer_info[i];
+
+ cleaned = 1;
+ cleaned_count++;
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+
+ /* see !EOP comment in other Rx routine */
+ if (!(staterr & E1000_RXD_STAT_EOP))
+ adapter->flags2 |= FLAG2_IS_DISCARDING;
+
+ if (adapter->flags2 & FLAG2_IS_DISCARDING) {
+ e_dbg("Packet Split buffers didn't pick up the full "
+ "packet\n");
+ dev_kfree_skb_irq(skb);
+ if (staterr & E1000_RXD_STAT_EOP)
+ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
+ goto next_desc;
+ }
+
+ if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+
+ length = le16_to_cpu(rx_desc->wb.middle.length0);
+
+ if (!length) {
+ e_dbg("Last part of the packet spanning multiple "
+ "descriptors\n");
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+
+ /* Good Receive */
+ skb_put(skb, length);
+
+ {
+ /*
+ * this looks ugly, but it seems compiler issues make it
+ * more efficient than reusing j
+ */
+ int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
+
+ /*
+ * page alloc/put takes too long and effects small packet
+ * throughput, so unsplit small packets and save the alloc/put
+ * only valid in softirq (napi) context to call kmap_*
+ */
+ if (l1 && (l1 <= copybreak) &&
+ ((length + l1) <= adapter->rx_ps_bsize0)) {
+ u8 *vaddr;
+
+ ps_page = &buffer_info->ps_pages[0];
+
+ /*
+ * there is no documentation about how to call
+ * kmap_atomic, so we can't hold the mapping
+ * very long
+ */
+ dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
+ memcpy(skb_tail_pointer(skb), vaddr, l1);
+ kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
+ dma_sync_single_for_device(&pdev->dev, ps_page->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /* remove the CRC */
+ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
+ l1 -= 4;
+
+ skb_put(skb, l1);
+ goto copydone;
+ } /* if */
+ }
+
+ for (j = 0; j < PS_PAGE_BUFFERS; j++) {
+ length = le16_to_cpu(rx_desc->wb.upper.length[j]);
+ if (!length)
+ break;
+
+ ps_page = &buffer_info->ps_pages[j];
+ dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ ps_page->dma = 0;
+ skb_fill_page_desc(skb, j, ps_page->page, 0, length);
+ ps_page->page = NULL;
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+ }
+
+ /* strip the ethernet crc, problem is we're using pages now so
+ * this whole operation can get a little cpu intensive
+ */
+ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
+ pskb_trim(skb, skb->len - 4);
+
+copydone:
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ e1000_rx_checksum(adapter, staterr, le16_to_cpu(
+ rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
+
+ if (rx_desc->wb.upper.header_status &
+ cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
+ adapter->rx_hdr_split++;
+
+ e1000_receive_skb(adapter, netdev, skb,
+ staterr, rx_desc->wb.middle.vlan);
+
+next_desc:
+ rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
+ buffer_info->skb = NULL;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
+ adapter->alloc_rx_buf(adapter, cleaned_count,
+ GFP_ATOMIC);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+
+ staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
+ }
+ rx_ring->next_to_clean = i;
+
+ cleaned_count = e1000_desc_unused(rx_ring);
+ if (cleaned_count)
+ adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
+
+ adapter->total_rx_bytes += total_rx_bytes;
+ adapter->total_rx_packets += total_rx_packets;
+ return cleaned;
+}
+
+/**
+ * e1000_consume_page - helper function
+ **/
+static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
+ u16 length)
+{
+ bi->page = NULL;
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+}
+
+/**
+ * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+
+static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
+ int *work_done, int work_to_do)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ union e1000_rx_desc_extended *rx_desc, *next_rxd;
+ struct e1000_buffer *buffer_info, *next_buffer;
+ u32 length, staterr;
+ unsigned int i;
+ int cleaned_count = 0;
+ bool cleaned = false;
+ unsigned int total_rx_bytes=0, total_rx_packets=0;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (staterr & E1000_RXD_STAT_DD) {
+ struct sk_buff *skb;
+
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+ rmb(); /* read descriptor and rx_buffer_info after status DD */
+
+ skb = buffer_info->skb;
+ buffer_info->skb = NULL;
+
+ ++i;
+ if (i == rx_ring->count)
+ i = 0;
+ next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
+ prefetch(next_rxd);
+
+ next_buffer = &rx_ring->buffer_info[i];
+
+ cleaned = true;
+ cleaned_count++;
+ dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+
+ length = le16_to_cpu(rx_desc->wb.upper.length);
+
+ /* errors is only valid for DD + EOP descriptors */
+ if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
+ (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK))) {
+ /* recycle both page and skb */
+ buffer_info->skb = skb;
+ /* an error means any chain goes out the window too */
+ if (rx_ring->rx_skb_top)
+ dev_kfree_skb_irq(rx_ring->rx_skb_top);
+ rx_ring->rx_skb_top = NULL;
+ goto next_desc;
+ }
+
+#define rxtop (rx_ring->rx_skb_top)
+ if (!(staterr & E1000_RXD_STAT_EOP)) {
+ /* this descriptor is only the beginning (or middle) */
+ if (!rxtop) {
+ /* this is the beginning of a chain */
+ rxtop = skb;
+ skb_fill_page_desc(rxtop, 0, buffer_info->page,
+ 0, length);
+ } else {
+ /* this is the middle of a chain */
+ skb_fill_page_desc(rxtop,
+ skb_shinfo(rxtop)->nr_frags,
+ buffer_info->page, 0, length);
+ /* re-use the skb, only consumed the page */
+ buffer_info->skb = skb;
+ }
+ e1000_consume_page(buffer_info, rxtop, length);
+ goto next_desc;
+ } else {
+ if (rxtop) {
+ /* end of the chain */
+ skb_fill_page_desc(rxtop,
+ skb_shinfo(rxtop)->nr_frags,
+ buffer_info->page, 0, length);
+ /* re-use the current skb, we only consumed the
+ * page */
+ buffer_info->skb = skb;
+ skb = rxtop;
+ rxtop = NULL;
+ e1000_consume_page(buffer_info, skb, length);
+ } else {
+ /* no chain, got EOP, this buf is the packet
+ * copybreak to save the put_page/alloc_page */
+ if (length <= copybreak &&
+ skb_tailroom(skb) >= length) {
+ u8 *vaddr;
+ vaddr = kmap_atomic(buffer_info->page,
+ KM_SKB_DATA_SOFTIRQ);
+ memcpy(skb_tail_pointer(skb), vaddr,
+ length);
+ kunmap_atomic(vaddr,
+ KM_SKB_DATA_SOFTIRQ);
+ /* re-use the page, so don't erase
+ * buffer_info->page */
+ skb_put(skb, length);
+ } else {
+ skb_fill_page_desc(skb, 0,
+ buffer_info->page, 0,
+ length);
+ e1000_consume_page(buffer_info, skb,
+ length);
+ }
+ }
+ }
+
+ /* Receive Checksum Offload XXX recompute due to CRC strip? */
+ e1000_rx_checksum(adapter, staterr,
+ le16_to_cpu(rx_desc->wb.lower.hi_dword.
+ csum_ip.csum), skb);
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ /* eth type trans needs skb->data to point to something */
+ if (!pskb_may_pull(skb, ETH_HLEN)) {
+ e_err("pskb_may_pull failed.\n");
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+
+ e1000_receive_skb(adapter, netdev, skb, staterr,
+ rx_desc->wb.upper.vlan);
+
+next_desc:
+ rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+ adapter->alloc_rx_buf(adapter, cleaned_count,
+ GFP_ATOMIC);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+ rx_ring->next_to_clean = i;
+
+ cleaned_count = e1000_desc_unused(rx_ring);
+ if (cleaned_count)
+ adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
+
+ adapter->total_rx_bytes += total_rx_bytes;
+ adapter->total_rx_packets += total_rx_packets;
+ return cleaned;
+}
+
+/**
+ * e1000_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ **/
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
+{
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ struct e1000_buffer *buffer_info;
+ struct e1000_ps_page *ps_page;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned int i, j;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ if (buffer_info->dma) {
+ if (adapter->clean_rx == e1000_clean_rx_irq)
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_ps_bsize0,
+ DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+ }
+
+ if (buffer_info->page) {
+ put_page(buffer_info->page);
+ buffer_info->page = NULL;
+ }
+
+ if (buffer_info->skb) {
+ dev_kfree_skb(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+
+ for (j = 0; j < PS_PAGE_BUFFERS; j++) {
+ ps_page = &buffer_info->ps_pages[j];
+ if (!ps_page->page)
+ break;
+ dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ ps_page->dma = 0;
+ put_page(ps_page->page);
+ ps_page->page = NULL;
+ }
+ }
+
+ /* there also may be some cached data from a chained receive */
+ if (rx_ring->rx_skb_top) {
+ dev_kfree_skb(rx_ring->rx_skb_top);
+ rx_ring->rx_skb_top = NULL;
+ }
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
+
+ writel(0, adapter->hw.hw_addr + rx_ring->head);
+ writel(0, adapter->hw.hw_addr + rx_ring->tail);
+}
+
+static void e1000e_downshift_workaround(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter, downshift_task);
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
+ e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
+}
+
+/**
+ * e1000_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t e1000_intr_msi(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 icr = er32(ICR);
+
+ /*
+ * read ICR disables interrupts using IAM
+ */
+
+ if (icr & E1000_ICR_LSC) {
+ hw->mac.get_link_status = 1;
+ /*
+ * ICH8 workaround-- Call gig speed drop workaround on cable
+ * disconnect (LSC) before accessing any PHY registers
+ */
+ if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
+ (!(er32(STATUS) & E1000_STATUS_LU)))
+ schedule_work(&adapter->downshift_task);
+
+ /*
+ * 80003ES2LAN workaround-- For packet buffer work-around on
+ * link down event; disable receives here in the ISR and reset
+ * adapter in watchdog
+ */
+ if (netif_carrier_ok(netdev) &&
+ adapter->flags & FLAG_RX_NEEDS_RESTART) {
+ /* disable receives */
+ u32 rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ adapter->flags |= FLAG_RX_RESTART_NOW;
+ }
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+ if (napi_schedule_prep(&adapter->napi)) {
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
+ __napi_schedule(&adapter->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * e1000_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t e1000_intr(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl, icr = er32(ICR);
+
+ if (!icr || test_bit(__E1000_DOWN, &adapter->state))
+ return IRQ_NONE; /* Not our interrupt */
+
+ /*
+ * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+ * not set, then the adapter didn't send an interrupt
+ */
+ if (!(icr & E1000_ICR_INT_ASSERTED))
+ return IRQ_NONE;
+
+ /*
+ * Interrupt Auto-Mask...upon reading ICR,
+ * interrupts are masked. No need for the
+ * IMC write
+ */
+
+ if (icr & E1000_ICR_LSC) {
+ hw->mac.get_link_status = 1;
+ /*
+ * ICH8 workaround-- Call gig speed drop workaround on cable
+ * disconnect (LSC) before accessing any PHY registers
+ */
+ if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
+ (!(er32(STATUS) & E1000_STATUS_LU)))
+ schedule_work(&adapter->downshift_task);
+
+ /*
+ * 80003ES2LAN workaround--
+ * For packet buffer work-around on link down event;
+ * disable receives here in the ISR and
+ * reset adapter in watchdog
+ */
+ if (netif_carrier_ok(netdev) &&
+ (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
+ /* disable receives */
+ rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ adapter->flags |= FLAG_RX_RESTART_NOW;
+ }
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+ if (napi_schedule_prep(&adapter->napi)) {
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
+ __napi_schedule(&adapter->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t e1000_msix_other(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 icr = er32(ICR);
+
+ if (!(icr & E1000_ICR_INT_ASSERTED)) {
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ ew32(IMS, E1000_IMS_OTHER);
+ return IRQ_NONE;
+ }
+
+ if (icr & adapter->eiac_mask)
+ ew32(ICS, (icr & adapter->eiac_mask));
+
+ if (icr & E1000_ICR_OTHER) {
+ if (!(icr & E1000_ICR_LSC))
+ goto no_link_interrupt;
+ hw->mac.get_link_status = 1;
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+no_link_interrupt:
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
+
+ return IRQ_HANDLED;
+}
+
+
+static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+
+
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+
+ if (!e1000_clean_tx_irq(adapter))
+ /* Ring was not completely cleaned, so fire another interrupt */
+ ew32(ICS, tx_ring->ims_val);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ /* Write the ITR value calculated at the end of the
+ * previous interrupt.
+ */
+ if (adapter->rx_ring->set_itr) {
+ writel(1000000000 / (adapter->rx_ring->itr_val * 256),
+ adapter->hw.hw_addr + adapter->rx_ring->itr_register);
+ adapter->rx_ring->set_itr = 0;
+ }
+
+ if (napi_schedule_prep(&adapter->napi)) {
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
+ __napi_schedule(&adapter->napi);
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * e1000_configure_msix - Configure MSI-X hardware
+ *
+ * e1000_configure_msix sets up the hardware to properly
+ * generate MSI-X interrupts.
+ **/
+static void e1000_configure_msix(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ int vector = 0;
+ u32 ctrl_ext, ivar = 0;
+
+ adapter->eiac_mask = 0;
+
+ /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
+ if (hw->mac.type == e1000_82574) {
+ u32 rfctl = er32(RFCTL);
+ rfctl |= E1000_RFCTL_ACK_DIS;
+ ew32(RFCTL, rfctl);
+ }
+
+#define E1000_IVAR_INT_ALLOC_VALID 0x8
+ /* Configure Rx vector */
+ rx_ring->ims_val = E1000_IMS_RXQ0;
+ adapter->eiac_mask |= rx_ring->ims_val;
+ if (rx_ring->itr_val)
+ writel(1000000000 / (rx_ring->itr_val * 256),
+ hw->hw_addr + rx_ring->itr_register);
+ else
+ writel(1, hw->hw_addr + rx_ring->itr_register);
+ ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
+
+ /* Configure Tx vector */
+ tx_ring->ims_val = E1000_IMS_TXQ0;
+ vector++;
+ if (tx_ring->itr_val)
+ writel(1000000000 / (tx_ring->itr_val * 256),
+ hw->hw_addr + tx_ring->itr_register);
+ else
+ writel(1, hw->hw_addr + tx_ring->itr_register);
+ adapter->eiac_mask |= tx_ring->ims_val;
+ ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
+
+ /* set vector for Other Causes, e.g. link changes */
+ vector++;
+ ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
+ if (rx_ring->itr_val)
+ writel(1000000000 / (rx_ring->itr_val * 256),
+ hw->hw_addr + E1000_EITR_82574(vector));
+ else
+ writel(1, hw->hw_addr + E1000_EITR_82574(vector));
+
+ /* Cause Tx interrupts on every write back */
+ ivar |= (1 << 31);
+
+ ew32(IVAR, ivar);
+
+ /* enable MSI-X PBA support */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
+
+ /* Auto-Mask Other interrupts upon ICR read */
+#define E1000_EIAC_MASK_82574 0x01F00000
+ ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
+ ctrl_ext |= E1000_CTRL_EXT_EIAME;
+ ew32(CTRL_EXT, ctrl_ext);
+ e1e_flush();
+}
+
+void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
+{
+ if (adapter->msix_entries) {
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else if (adapter->flags & FLAG_MSI_ENABLED) {
+ pci_disable_msi(adapter->pdev);
+ adapter->flags &= ~FLAG_MSI_ENABLED;
+ }
+}
+
+/**
+ * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
+{
+ int err;
+ int i;
+
+ switch (adapter->int_mode) {
+ case E1000E_INT_MODE_MSIX:
+ if (adapter->flags & FLAG_HAS_MSIX) {
+ adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
+ adapter->msix_entries = kcalloc(adapter->num_vectors,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (adapter->msix_entries) {
+ for (i = 0; i < adapter->num_vectors; i++)
+ adapter->msix_entries[i].entry = i;
+
+ err = pci_enable_msix(adapter->pdev,
+ adapter->msix_entries,
+ adapter->num_vectors);
+ if (err == 0)
+ return;
+ }
+ /* MSI-X failed, so fall through and try MSI */
+ e_err("Failed to initialize MSI-X interrupts. "
+ "Falling back to MSI interrupts.\n");
+ e1000e_reset_interrupt_capability(adapter);
+ }
+ adapter->int_mode = E1000E_INT_MODE_MSI;
+ /* Fall through */
+ case E1000E_INT_MODE_MSI:
+ if (!pci_enable_msi(adapter->pdev)) {
+ adapter->flags |= FLAG_MSI_ENABLED;
+ } else {
+ adapter->int_mode = E1000E_INT_MODE_LEGACY;
+ e_err("Failed to initialize MSI interrupts. Falling "
+ "back to legacy interrupts.\n");
+ }
+ /* Fall through */
+ case E1000E_INT_MODE_LEGACY:
+ /* Don't do anything; this is the system default */
+ break;
+ }
+
+ /* store the number of vectors being used */
+ adapter->num_vectors = 1;
+}
+
+/**
+ * e1000_request_msix - Initialize MSI-X interrupts
+ *
+ * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
+ * kernel.
+ **/
+static int e1000_request_msix(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err = 0, vector = 0;
+
+ if (strlen(netdev->name) < (IFNAMSIZ - 5))
+ snprintf(adapter->rx_ring->name,
+ sizeof(adapter->rx_ring->name) - 1,
+ "%s-rx-0", netdev->name);
+ else
+ memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
+ err = request_irq(adapter->msix_entries[vector].vector,
+ e1000_intr_msix_rx, 0, adapter->rx_ring->name,
+ netdev);
+ if (err)
+ goto out;
+ adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
+ adapter->rx_ring->itr_val = adapter->itr;
+ vector++;
+
+ if (strlen(netdev->name) < (IFNAMSIZ - 5))
+ snprintf(adapter->tx_ring->name,
+ sizeof(adapter->tx_ring->name) - 1,
+ "%s-tx-0", netdev->name);
+ else
+ memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
+ err = request_irq(adapter->msix_entries[vector].vector,
+ e1000_intr_msix_tx, 0, adapter->tx_ring->name,
+ netdev);
+ if (err)
+ goto out;
+ adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
+ adapter->tx_ring->itr_val = adapter->itr;
+ vector++;
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ e1000_msix_other, 0, netdev->name, netdev);
+ if (err)
+ goto out;
+
+ e1000_configure_msix(adapter);
+ return 0;
+out:
+ return err;
+}
+
+/**
+ * e1000_request_irq - initialize interrupts
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int e1000_request_irq(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ if (adapter->msix_entries) {
+ err = e1000_request_msix(adapter);
+ if (!err)
+ return err;
+ /* fall back to MSI */
+ e1000e_reset_interrupt_capability(adapter);
+ adapter->int_mode = E1000E_INT_MODE_MSI;
+ e1000e_set_interrupt_capability(adapter);
+ }
+ if (adapter->flags & FLAG_MSI_ENABLED) {
+ err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
+ netdev->name, netdev);
+ if (!err)
+ return err;
+
+ /* fall back to legacy interrupt */
+ e1000e_reset_interrupt_capability(adapter);
+ adapter->int_mode = E1000E_INT_MODE_LEGACY;
+ }
+
+ err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
+ netdev->name, netdev);
+ if (err)
+ e_err("Unable to allocate interrupt, Error: %d\n", err);
+
+ return err;
+}
+
+static void e1000_free_irq(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (adapter->msix_entries) {
+ int vector = 0;
+
+ free_irq(adapter->msix_entries[vector].vector, netdev);
+ vector++;
+
+ free_irq(adapter->msix_entries[vector].vector, netdev);
+ vector++;
+
+ /* Other Causes interrupt vector */
+ free_irq(adapter->msix_entries[vector].vector, netdev);
+ return;
+ }
+
+ free_irq(adapter->pdev->irq, netdev);
+}
+
+/**
+ * e1000_irq_disable - Mask off interrupt generation on the NIC
+ **/
+static void e1000_irq_disable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ ew32(IMC, ~0);
+ if (adapter->msix_entries)
+ ew32(EIAC_82574, 0);
+ e1e_flush();
+
+ if (adapter->msix_entries) {
+ int i;
+ for (i = 0; i < adapter->num_vectors; i++)
+ synchronize_irq(adapter->msix_entries[i].vector);
+ } else {
+ synchronize_irq(adapter->pdev->irq);
+ }
+}
+
+/**
+ * e1000_irq_enable - Enable default interrupt generation settings
+ **/
+static void e1000_irq_enable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (adapter->msix_entries) {
+ ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
+ ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
+ } else {
+ ew32(IMS, IMS_ENABLE_MASK);
+ }
+ e1e_flush();
+}
+
+/**
+ * e1000e_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded. For AMT version (only with 82573)
+ * of the f/w this means that the network i/f is open.
+ **/
+void e1000e_get_hw_control(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_ext;
+ u32 swsm;
+
+ /* Let firmware know the driver has taken over */
+ if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
+ swsm = er32(SWSM);
+ ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
+ } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
+ ctrl_ext = er32(CTRL_EXT);
+ ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+ }
+}
+
+/**
+ * e1000e_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded. For AMT version (only with 82573) i
+ * of the f/w this means that the network i/f is closed.
+ *
+ **/
+void e1000e_release_hw_control(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_ext;
+ u32 swsm;
+
+ /* Let firmware taken over control of h/w */
+ if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
+ swsm = er32(SWSM);
+ ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
+ } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
+ ctrl_ext = er32(CTRL_EXT);
+ ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+ }
+}
+
+/**
+ * @e1000_alloc_ring - allocate memory for a ring structure
+ **/
+static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
+ struct e1000_ring *ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
+ GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
+{
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ int err = -ENOMEM, size;
+
+ size = sizeof(struct e1000_buffer) * tx_ring->count;
+ tx_ring->buffer_info = vzalloc(size);
+ if (!tx_ring->buffer_info)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+ err = e1000_alloc_ring_dma(adapter, tx_ring);
+ if (err)
+ goto err;
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ return 0;
+err:
+ vfree(tx_ring->buffer_info);
+ e_err("Unable to allocate memory for the transmit descriptor ring\n");
+ return err;
+}
+
+/**
+ * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
+{
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ struct e1000_buffer *buffer_info;
+ int i, size, desc_len, err = -ENOMEM;
+
+ size = sizeof(struct e1000_buffer) * rx_ring->count;
+ rx_ring->buffer_info = vzalloc(size);
+ if (!rx_ring->buffer_info)
+ goto err;
+
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
+ sizeof(struct e1000_ps_page),
+ GFP_KERNEL);
+ if (!buffer_info->ps_pages)
+ goto err_pages;
+ }
+
+ desc_len = sizeof(union e1000_rx_desc_packet_split);
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * desc_len;
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+ err = e1000_alloc_ring_dma(adapter, rx_ring);
+ if (err)
+ goto err_pages;
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ rx_ring->rx_skb_top = NULL;
+
+ return 0;
+
+err_pages:
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ kfree(buffer_info->ps_pages);
+ }
+err:
+ vfree(rx_ring->buffer_info);
+ e_err("Unable to allocate memory for the receive descriptor ring\n");
+ return err;
+}
+
+/**
+ * e1000_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ **/
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
+{
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_buffer *buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ for (i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->buffer_info[i];
+ e1000_put_txbuf(adapter, buffer_info);
+ }
+
+ size = sizeof(struct e1000_buffer) * tx_ring->count;
+ memset(tx_ring->buffer_info, 0, size);
+
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ writel(0, adapter->hw.hw_addr + tx_ring->head);
+ writel(0, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+/**
+ * e1000e_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+void e1000e_free_tx_resources(struct e1000_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+
+ e1000_clean_tx_ring(adapter);
+
+ vfree(tx_ring->buffer_info);
+ tx_ring->buffer_info = NULL;
+
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
+ tx_ring->desc = NULL;
+}
+
+/**
+ * e1000e_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+
+void e1000e_free_rx_resources(struct e1000_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ int i;
+
+ e1000_clean_rx_ring(adapter);
+
+ for (i = 0; i < rx_ring->count; i++)
+ kfree(rx_ring->buffer_info[i].ps_pages);
+
+ vfree(rx_ring->buffer_info);
+ rx_ring->buffer_info = NULL;
+
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
+ rx_ring->desc = NULL;
+}
+
+/**
+ * e1000_update_itr - update the dynamic ITR value based on statistics
+ * @adapter: pointer to adapter
+ * @itr_setting: current adapter->itr
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput. This functionality is controlled
+ * by the InterruptThrottleRate module parameter.
+ **/
+static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
+ u16 itr_setting, int packets,
+ int bytes)
+{
+ unsigned int retval = itr_setting;
+
+ if (packets == 0)
+ goto update_itr_done;
+
+ switch (itr_setting) {
+ case lowest_latency:
+ /* handle TSO and jumbo frames */
+ if (bytes/packets > 8000)
+ retval = bulk_latency;
+ else if ((packets < 5) && (bytes > 512))
+ retval = low_latency;
+ break;
+ case low_latency: /* 50 usec aka 20000 ints/s */
+ if (bytes > 10000) {
+ /* this if handles the TSO accounting */
+ if (bytes/packets > 8000)
+ retval = bulk_latency;
+ else if ((packets < 10) || ((bytes/packets) > 1200))
+ retval = bulk_latency;
+ else if ((packets > 35))
+ retval = lowest_latency;
+ } else if (bytes/packets > 2000) {
+ retval = bulk_latency;
+ } else if (packets <= 2 && bytes < 512) {
+ retval = lowest_latency;
+ }
+ break;
+ case bulk_latency: /* 250 usec aka 4000 ints/s */
+ if (bytes > 25000) {
+ if (packets > 35)
+ retval = low_latency;
+ } else if (bytes < 6000) {
+ retval = low_latency;
+ }
+ break;
+ }
+
+update_itr_done:
+ return retval;
+}
+
+static void e1000_set_itr(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 current_itr;
+ u32 new_itr = adapter->itr;
+
+ /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+ if (adapter->link_speed != SPEED_1000) {
+ current_itr = 0;
+ new_itr = 4000;
+ goto set_itr_now;
+ }
+
+ if (adapter->flags2 & FLAG2_DISABLE_AIM) {
+ new_itr = 0;
+ goto set_itr_now;
+ }
+
+ adapter->tx_itr = e1000_update_itr(adapter,
+ adapter->tx_itr,
+ adapter->total_tx_packets,
+ adapter->total_tx_bytes);
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
+ adapter->tx_itr = low_latency;
+
+ adapter->rx_itr = e1000_update_itr(adapter,
+ adapter->rx_itr,
+ adapter->total_rx_packets,
+ adapter->total_rx_bytes);
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
+ adapter->rx_itr = low_latency;
+
+ current_itr = max(adapter->rx_itr, adapter->tx_itr);
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = 70000;
+ break;
+ case low_latency:
+ new_itr = 20000; /* aka hwitr = ~200 */
+ break;
+ case bulk_latency:
+ new_itr = 4000;
+ break;
+ default:
+ break;
+ }
+
+set_itr_now:
+ if (new_itr != adapter->itr) {
+ /*
+ * this attempts to bias the interrupt rate towards Bulk
+ * by adding intermediate steps when interrupt rate is
+ * increasing
+ */
+ new_itr = new_itr > adapter->itr ?
+ min(adapter->itr + (new_itr >> 2), new_itr) :
+ new_itr;
+ adapter->itr = new_itr;
+ adapter->rx_ring->itr_val = new_itr;
+ if (adapter->msix_entries)
+ adapter->rx_ring->set_itr = 1;
+ else
+ if (new_itr)
+ ew32(ITR, 1000000000 / (new_itr * 256));
+ else
+ ew32(ITR, 0);
+ }
+}
+
+/**
+ * e1000_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ **/
+static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
+{
+ adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+ if (!adapter->tx_ring)
+ goto err;
+
+ adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+ if (!adapter->rx_ring)
+ goto err;
+
+ return 0;
+err:
+ e_err("Unable to allocate memory for queues\n");
+ kfree(adapter->rx_ring);
+ kfree(adapter->tx_ring);
+ return -ENOMEM;
+}
+
+/**
+ * e1000_clean - NAPI Rx polling callback
+ * @napi: struct associated with this polling callback
+ * @budget: amount of packets driver is allowed to process this poll
+ **/
+static int e1000_clean(struct napi_struct *napi, int budget)
+{
+ struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *poll_dev = adapter->netdev;
+ int tx_cleaned = 1, work_done = 0;
+
+ adapter = netdev_priv(poll_dev);
+
+ if (adapter->msix_entries &&
+ !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
+ goto clean_rx;
+
+ tx_cleaned = e1000_clean_tx_irq(adapter);
+
+clean_rx:
+ adapter->clean_rx(adapter, &work_done, budget);
+
+ if (!tx_cleaned)
+ work_done = budget;
+
+ /* If budget not fully consumed, exit the polling mode */
+ if (work_done < budget) {
+ if (adapter->itr_setting & 3)
+ e1000_set_itr(adapter);
+ napi_complete(napi);
+ if (!test_bit(__E1000_DOWN, &adapter->state)) {
+ if (adapter->msix_entries)
+ ew32(IMS, adapter->rx_ring->ims_val);
+ else
+ e1000_irq_enable(adapter);
+ }
+ }
+
+ return work_done;
+}
+
+static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vfta, index;
+
+ /* don't update vlan cookie if already programmed */
+ if ((adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
+ (vid == adapter->mng_vlan_id))
+ return;
+
+ /* add VID to filter table */
+ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+ index = (vid >> 5) & 0x7F;
+ vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
+ vfta |= (1 << (vid & 0x1F));
+ hw->mac.ops.write_vfta(hw, index, vfta);
+ }
+
+ set_bit(vid, adapter->active_vlans);
+}
+
+static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vfta, index;
+
+ if ((adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
+ (vid == adapter->mng_vlan_id)) {
+ /* release control to f/w */
+ e1000e_release_hw_control(adapter);
+ return;
+ }
+
+ /* remove VID from filter table */
+ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+ index = (vid >> 5) & 0x7F;
+ vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
+ vfta &= ~(1 << (vid & 0x1F));
+ hw->mac.ops.write_vfta(hw, index, vfta);
+ }
+
+ clear_bit(vid, adapter->active_vlans);
+}
+
+/**
+ * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+
+ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+ /* disable VLAN receive filtering */
+ rctl = er32(RCTL);
+ rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
+ ew32(RCTL, rctl);
+
+ if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
+ e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+ }
+ }
+}
+
+/**
+ * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+
+ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+ /* enable VLAN receive filtering */
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_VFE;
+ rctl &= ~E1000_RCTL_CFIEN;
+ ew32(RCTL, rctl);
+ }
+}
+
+/**
+ * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl;
+
+ /* disable VLAN tag insert/strip */
+ ctrl = er32(CTRL);
+ ctrl &= ~E1000_CTRL_VME;
+ ew32(CTRL, ctrl);
+}
+
+/**
+ * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl;
+
+ /* enable VLAN tag insert/strip */
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_VME;
+ ew32(CTRL, ctrl);
+}
+
+static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u16 vid = adapter->hw.mng_cookie.vlan_id;
+ u16 old_vid = adapter->mng_vlan_id;
+
+ if (adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+ e1000_vlan_rx_add_vid(netdev, vid);
+ adapter->mng_vlan_id = vid;
+ }
+
+ if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
+ e1000_vlan_rx_kill_vid(netdev, old_vid);
+}
+
+static void e1000_restore_vlan(struct e1000_adapter *adapter)
+{
+ u16 vid;
+
+ e1000_vlan_rx_add_vid(adapter->netdev, 0);
+
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ e1000_vlan_rx_add_vid(adapter->netdev, vid);
+}
+
+static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 manc, manc2h, mdef, i, j;
+
+ if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
+ return;
+
+ manc = er32(MANC);
+
+ /*
+ * enable receiving management packets to the host. this will probably
+ * generate destination unreachable messages from the host OS, but
+ * the packets will be handled on SMBUS
+ */
+ manc |= E1000_MANC_EN_MNG2HOST;
+ manc2h = er32(MANC2H);
+
+ switch (hw->mac.type) {
+ default:
+ manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ /*
+ * Check if IPMI pass-through decision filter already exists;
+ * if so, enable it.
+ */
+ for (i = 0, j = 0; i < 8; i++) {
+ mdef = er32(MDEF(i));
+
+ /* Ignore filters with anything other than IPMI ports */
+ if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+ continue;
+
+ /* Enable this decision filter in MANC2H */
+ if (mdef)
+ manc2h |= (1 << i);
+
+ j |= mdef;
+ }
+
+ if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+ break;
+
+ /* Create new decision filter in an empty filter */
+ for (i = 0, j = 0; i < 8; i++)
+ if (er32(MDEF(i)) == 0) {
+ ew32(MDEF(i), (E1000_MDEF_PORT_623 |
+ E1000_MDEF_PORT_664));
+ manc2h |= (1 << 1);
+ j++;
+ break;
+ }
+
+ if (!j)
+ e_warn("Unable to create IPMI pass-through filter\n");
+ break;
+ }
+
+ ew32(MANC2H, manc2h);
+ ew32(MANC, manc);
+}
+
+/**
+ * e1000_configure_tx - Configure Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void e1000_configure_tx(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ u64 tdba;
+ u32 tdlen, tctl, tipg, tarc;
+ u32 ipgr1, ipgr2;
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ tdba = tx_ring->dma;
+ tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
+ ew32(TDBAL, (tdba & DMA_BIT_MASK(32)));
+ ew32(TDBAH, (tdba >> 32));
+ ew32(TDLEN, tdlen);
+ ew32(TDH, 0);
+ ew32(TDT, 0);
+ tx_ring->head = E1000_TDH;
+ tx_ring->tail = E1000_TDT;
+
+ /* Set the default values for the Tx Inter Packet Gap timer */
+ tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */
+ ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */
+ ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */
+
+ if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
+ ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */
+
+ tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
+ tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
+ ew32(TIPG, tipg);
+
+ /* Set the Tx Interrupt Delay register */
+ ew32(TIDV, adapter->tx_int_delay);
+ /* Tx irq moderation */
+ ew32(TADV, adapter->tx_abs_int_delay);
+
+ if (adapter->flags2 & FLAG2_DMA_BURST) {
+ u32 txdctl = er32(TXDCTL(0));
+ txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
+ E1000_TXDCTL_WTHRESH);
+ /*
+ * set up some performance related parameters to encourage the
+ * hardware to use the bus more efficiently in bursts, depends
+ * on the tx_int_delay to be enabled,
+ * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time
+ * hthresh = 1 ==> prefetch when one or more available
+ * pthresh = 0x1f ==> prefetch if internal cache 31 or less
+ * BEWARE: this seems to work but should be considered first if
+ * there are Tx hangs or other Tx related bugs
+ */
+ txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
+ ew32(TXDCTL(0), txdctl);
+ /* erratum work around: set txdctl the same for both queues */
+ ew32(TXDCTL(1), txdctl);
+ }
+
+ /* Program the Transmit Control Register */
+ tctl = er32(TCTL);
+ tctl &= ~E1000_TCTL_CT;
+ tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+ (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+ if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
+ tarc = er32(TARC(0));
+ /*
+ * set the speed mode bit, we'll clear it if we're not at
+ * gigabit link later
+ */
+#define SPEED_MODE_BIT (1 << 21)
+ tarc |= SPEED_MODE_BIT;
+ ew32(TARC(0), tarc);
+ }
+
+ /* errata: program both queues to unweighted RR */
+ if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
+ tarc = er32(TARC(0));
+ tarc |= 1;
+ ew32(TARC(0), tarc);
+ tarc = er32(TARC(1));
+ tarc |= 1;
+ ew32(TARC(1), tarc);
+ }
+
+ /* Setup Transmit Descriptor Settings for eop descriptor */
+ adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
+
+ /* only set IDE if we are delaying interrupts using the timers */
+ if (adapter->tx_int_delay)
+ adapter->txd_cmd |= E1000_TXD_CMD_IDE;
+
+ /* enable Report Status bit */
+ adapter->txd_cmd |= E1000_TXD_CMD_RS;
+
+ ew32(TCTL, tctl);
+
+ e1000e_config_collision_dist(hw);
+}
+
+/**
+ * e1000_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
+ **/
+#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
+ (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
+static void e1000_setup_rctl(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl, rfctl;
+ u32 pages = 0;
+
+ /* Workaround Si errata on 82579 - configure jumbo frame flow */
+ if (hw->mac.type == e1000_pch2lan) {
+ s32 ret_val;
+
+ if (adapter->netdev->mtu > ETH_DATA_LEN)
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
+ else
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+
+ if (ret_val)
+ e_dbg("failed to enable jumbo frame workaround mode\n");
+ }
+
+ /* Program MC offset vector base */
+ rctl = er32(RCTL);
+ rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+ rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+ /* Do not Store bad packets */
+ rctl &= ~E1000_RCTL_SBP;
+
+ /* Enable Long Packet receive */
+ if (adapter->netdev->mtu <= ETH_DATA_LEN)
+ rctl &= ~E1000_RCTL_LPE;
+ else
+ rctl |= E1000_RCTL_LPE;
+
+ /* Some systems expect that the CRC is included in SMBUS traffic. The
+ * hardware strips the CRC before sending to both SMBUS (BMC) and to
+ * host memory when this is enabled
+ */
+ if (adapter->flags2 & FLAG2_CRC_STRIPPING)
+ rctl |= E1000_RCTL_SECRC;
+
+ /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
+ if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
+ u16 phy_data;
+
+ e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
+ phy_data &= 0xfff8;
+ phy_data |= (1 << 2);
+ e1e_wphy(hw, PHY_REG(770, 26), phy_data);
+
+ e1e_rphy(hw, 22, &phy_data);
+ phy_data &= 0x0fff;
+ phy_data |= (1 << 14);
+ e1e_wphy(hw, 0x10, 0x2823);
+ e1e_wphy(hw, 0x11, 0x0003);
+ e1e_wphy(hw, 22, phy_data);
+ }
+
+ /* Setup buffer sizes */
+ rctl &= ~E1000_RCTL_SZ_4096;
+ rctl |= E1000_RCTL_BSEX;
+ switch (adapter->rx_buffer_len) {
+ case 2048:
+ default:
+ rctl |= E1000_RCTL_SZ_2048;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case 4096:
+ rctl |= E1000_RCTL_SZ_4096;
+ break;
+ case 8192:
+ rctl |= E1000_RCTL_SZ_8192;
+ break;
+ case 16384:
+ rctl |= E1000_RCTL_SZ_16384;
+ break;
+ }
+
+ /* Enable Extended Status in all Receive Descriptors */
+ rfctl = er32(RFCTL);
+ rfctl |= E1000_RFCTL_EXTEN;
+
+ /*
+ * 82571 and greater support packet-split where the protocol
+ * header is placed in skb->data and the packet data is
+ * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
+ * In the case of a non-split, skb->data is linearly filled,
+ * followed by the page buffers. Therefore, skb->data is
+ * sized to hold the largest protocol header.
+ *
+ * allocations using alloc_page take too long for regular MTU
+ * so only enable packet split for jumbo frames
+ *
+ * Using pages when the page size is greater than 16k wastes
+ * a lot of memory, since we allocate 3 pages at all times
+ * per packet.
+ */
+ pages = PAGE_USE_COUNT(adapter->netdev->mtu);
+ if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) &&
+ (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
+ adapter->rx_ps_pages = pages;
+ else
+ adapter->rx_ps_pages = 0;
+
+ if (adapter->rx_ps_pages) {
+ u32 psrctl = 0;
+
+ /*
+ * disable packet split support for IPv6 extension headers,
+ * because some malformed IPv6 headers can hang the Rx
+ */
+ rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
+ E1000_RFCTL_NEW_IPV6_EXT_DIS);
+
+ /* Enable Packet split descriptors */
+ rctl |= E1000_RCTL_DTYP_PS;
+
+ psrctl |= adapter->rx_ps_bsize0 >>
+ E1000_PSRCTL_BSIZE0_SHIFT;
+
+ switch (adapter->rx_ps_pages) {
+ case 3:
+ psrctl |= PAGE_SIZE <<
+ E1000_PSRCTL_BSIZE3_SHIFT;
+ case 2:
+ psrctl |= PAGE_SIZE <<
+ E1000_PSRCTL_BSIZE2_SHIFT;
+ case 1:
+ psrctl |= PAGE_SIZE >>
+ E1000_PSRCTL_BSIZE1_SHIFT;
+ break;
+ }
+
+ ew32(PSRCTL, psrctl);
+ }
+
+ ew32(RFCTL, rfctl);
+ ew32(RCTL, rctl);
+ /* just started the receive unit, no need to restart */
+ adapter->flags &= ~FLAG_RX_RESTART_NOW;
+}
+
+/**
+ * e1000_configure_rx - Configure Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void e1000_configure_rx(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ u64 rdba;
+ u32 rdlen, rctl, rxcsum, ctrl_ext;
+
+ if (adapter->rx_ps_pages) {
+ /* this is a 32 byte descriptor */
+ rdlen = rx_ring->count *
+ sizeof(union e1000_rx_desc_packet_split);
+ adapter->clean_rx = e1000_clean_rx_irq_ps;
+ adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
+ } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
+ rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
+ adapter->clean_rx = e1000_clean_jumbo_rx_irq;
+ adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
+ } else {
+ rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
+ adapter->clean_rx = e1000_clean_rx_irq;
+ adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
+ }
+
+ /* disable receives while setting up the descriptors */
+ rctl = er32(RCTL);
+ if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ if (adapter->flags2 & FLAG2_DMA_BURST) {
+ /*
+ * set the writeback threshold (only takes effect if the RDTR
+ * is set). set GRAN=1 and write back up to 0x4 worth, and
+ * enable prefetching of 0x20 Rx descriptors
+ * granularity = 01
+ * wthresh = 04,
+ * hthresh = 04,
+ * pthresh = 0x20
+ */
+ ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
+ ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
+
+ /*
+ * override the delay timers for enabling bursting, only if
+ * the value was not set by the user via module options
+ */
+ if (adapter->rx_int_delay == DEFAULT_RDTR)
+ adapter->rx_int_delay = BURST_RDTR;
+ if (adapter->rx_abs_int_delay == DEFAULT_RADV)
+ adapter->rx_abs_int_delay = BURST_RADV;
+ }
+
+ /* set the Receive Delay Timer Register */
+ ew32(RDTR, adapter->rx_int_delay);
+
+ /* irq moderation */
+ ew32(RADV, adapter->rx_abs_int_delay);
+ if ((adapter->itr_setting != 0) && (adapter->itr != 0))
+ ew32(ITR, 1000000000 / (adapter->itr * 256));
+
+ ctrl_ext = er32(CTRL_EXT);
+ /* Auto-Mask interrupts upon ICR access */
+ ctrl_ext |= E1000_CTRL_EXT_IAME;
+ ew32(IAM, 0xffffffff);
+ ew32(CTRL_EXT, ctrl_ext);
+ e1e_flush();
+
+ /*
+ * Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring
+ */
+ rdba = rx_ring->dma;
+ ew32(RDBAL, (rdba & DMA_BIT_MASK(32)));
+ ew32(RDBAH, (rdba >> 32));
+ ew32(RDLEN, rdlen);
+ ew32(RDH, 0);
+ ew32(RDT, 0);
+ rx_ring->head = E1000_RDH;
+ rx_ring->tail = E1000_RDT;
+
+ /* Enable Receive Checksum Offload for TCP and UDP */
+ rxcsum = er32(RXCSUM);
+ if (adapter->netdev->features & NETIF_F_RXCSUM) {
+ rxcsum |= E1000_RXCSUM_TUOFL;
+
+ /*
+ * IPv4 payload checksum for UDP fragments must be
+ * used in conjunction with packet-split.
+ */
+ if (adapter->rx_ps_pages)
+ rxcsum |= E1000_RXCSUM_IPPCSE;
+ } else {
+ rxcsum &= ~E1000_RXCSUM_TUOFL;
+ /* no need to clear IPPCSE as it defaults to 0 */
+ }
+ ew32(RXCSUM, rxcsum);
+
+ /*
+ * Enable early receives on supported devices, only takes effect when
+ * packet size is equal or larger than the specified value (in 8 byte
+ * units), e.g. using jumbo frames when setting to E1000_ERT_2048
+ */
+ if ((adapter->flags & FLAG_HAS_ERT) ||
+ (adapter->hw.mac.type == e1000_pch2lan)) {
+ if (adapter->netdev->mtu > ETH_DATA_LEN) {
+ u32 rxdctl = er32(RXDCTL(0));
+ ew32(RXDCTL(0), rxdctl | 0x3);
+ if (adapter->flags & FLAG_HAS_ERT)
+ ew32(ERT, E1000_ERT_2048 | (1 << 13));
+ /*
+ * With jumbo frames and early-receive enabled,
+ * excessive C-state transition latencies result in
+ * dropped transactions.
+ */
+ pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
+ } else {
+ pm_qos_update_request(&adapter->netdev->pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
+ }
+ }
+
+ /* Enable Receives */
+ ew32(RCTL, rctl);
+}
+
+/**
+ * e1000_update_mc_addr_list - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates the Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count)
+{
+ hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
+}
+
+/**
+ * e1000_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+static void e1000_set_multi(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u8 *mta_list;
+ u32 rctl;
+
+ /* Check for Promiscuous and All Multicast modes */
+
+ rctl = er32(RCTL);
+
+ if (netdev->flags & IFF_PROMISC) {
+ rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+ rctl &= ~E1000_RCTL_VFE;
+ /* Do not hardware filter VLANs in promisc mode */
+ e1000e_vlan_filter_disable(adapter);
+ } else {
+ if (netdev->flags & IFF_ALLMULTI) {
+ rctl |= E1000_RCTL_MPE;
+ rctl &= ~E1000_RCTL_UPE;
+ } else {
+ rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+ }
+ e1000e_vlan_filter_enable(adapter);
+ }
+
+ ew32(RCTL, rctl);
+
+ if (!netdev_mc_empty(netdev)) {
+ int i = 0;
+
+ mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
+ if (!mta_list)
+ return;
+
+ /* prepare a packed array of only addresses. */
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+
+ e1000_update_mc_addr_list(hw, mta_list, i);
+ kfree(mta_list);
+ } else {
+ /*
+ * if we're called from probe, we might not have
+ * anything to do here, so clear out the list
+ */
+ e1000_update_mc_addr_list(hw, NULL, 0);
+ }
+
+ if (netdev->features & NETIF_F_HW_VLAN_RX)
+ e1000e_vlan_strip_enable(adapter);
+ else
+ e1000e_vlan_strip_disable(adapter);
+}
+
+/**
+ * e1000_configure - configure the hardware for Rx and Tx
+ * @adapter: private board structure
+ **/
+static void e1000_configure(struct e1000_adapter *adapter)
+{
+ e1000_set_multi(adapter->netdev);
+
+ e1000_restore_vlan(adapter);
+ e1000_init_manageability_pt(adapter);
+
+ e1000_configure_tx(adapter);
+ e1000_setup_rctl(adapter);
+ e1000_configure_rx(adapter);
+ adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring),
+ GFP_KERNEL);
+}
+
+/**
+ * e1000e_power_up_phy - restore link in case the phy was powered down
+ * @adapter: address of board private structure
+ *
+ * The phy may be powered down to save power and turn off link when the
+ * driver is unloaded and wake on lan is not enabled (among others)
+ * *** this routine MUST be followed by a call to e1000e_reset ***
+ **/
+void e1000e_power_up_phy(struct e1000_adapter *adapter)
+{
+ if (adapter->hw.phy.ops.power_up)
+ adapter->hw.phy.ops.power_up(&adapter->hw);
+
+ adapter->hw.mac.ops.setup_link(&adapter->hw);
+}
+
+/**
+ * e1000_power_down_phy - Power down the PHY
+ *
+ * Power down the PHY so no link is implied when interface is down.
+ * The PHY cannot be powered down if management or WoL is active.
+ */
+static void e1000_power_down_phy(struct e1000_adapter *adapter)
+{
+ /* WoL is enabled */
+ if (adapter->wol)
+ return;
+
+ if (adapter->hw.phy.ops.power_down)
+ adapter->hw.phy.ops.power_down(&adapter->hw);
+}
+
+/**
+ * e1000e_reset - bring the hardware into a known good state
+ *
+ * This function boots the hardware and enables some settings that
+ * require a configuration cycle of the hardware - those cannot be
+ * set/changed during runtime. After reset the device needs to be
+ * properly configured for Rx, Tx etc.
+ */
+void e1000e_reset(struct e1000_adapter *adapter)
+{
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+ struct e1000_fc_info *fc = &adapter->hw.fc;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 tx_space, min_tx_space, min_rx_space;
+ u32 pba = adapter->pba;
+ u16 hwm;
+
+ /* reset Packet Buffer Allocation to default */
+ ew32(PBA, pba);
+
+ if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
+ /*
+ * To maintain wire speed transmits, the Tx FIFO should be
+ * large enough to accommodate two full transmit packets,
+ * rounded up to the next 1KB and expressed in KB. Likewise,
+ * the Rx FIFO should be large enough to accommodate at least
+ * one full receive packet and is similarly rounded up and
+ * expressed in KB.
+ */
+ pba = er32(PBA);
+ /* upper 16 bits has Tx packet buffer allocation size in KB */
+ tx_space = pba >> 16;
+ /* lower 16 bits has Rx packet buffer allocation size in KB */
+ pba &= 0xffff;
+ /*
+ * the Tx fifo also stores 16 bytes of information about the Tx
+ * but don't include ethernet FCS because hardware appends it
+ */
+ min_tx_space = (adapter->max_frame_size +
+ sizeof(struct e1000_tx_desc) -
+ ETH_FCS_LEN) * 2;
+ min_tx_space = ALIGN(min_tx_space, 1024);
+ min_tx_space >>= 10;
+ /* software strips receive CRC, so leave room for it */
+ min_rx_space = adapter->max_frame_size;
+ min_rx_space = ALIGN(min_rx_space, 1024);
+ min_rx_space >>= 10;
+
+ /*
+ * If current Tx allocation is less than the min Tx FIFO size,
+ * and the min Tx FIFO size is less than the current Rx FIFO
+ * allocation, take space away from current Rx allocation
+ */
+ if ((tx_space < min_tx_space) &&
+ ((min_tx_space - tx_space) < pba)) {
+ pba -= min_tx_space - tx_space;
+
+ /*
+ * if short on Rx space, Rx wins and must trump Tx
+ * adjustment or use Early Receive if available
+ */
+ if ((pba < min_rx_space) &&
+ (!(adapter->flags & FLAG_HAS_ERT)))
+ /* ERT enabled in e1000_configure_rx */
+ pba = min_rx_space;
+ }
+
+ ew32(PBA, pba);
+ }
+
+ /*
+ * flow control settings
+ *
+ * The high water mark must be low enough to fit one full frame
+ * (or the size used for early receive) above it in the Rx FIFO.
+ * Set it to the lower of:
+ * - 90% of the Rx FIFO size, and
+ * - the full Rx FIFO size minus the early receive size (for parts
+ * with ERT support assuming ERT set to E1000_ERT_2048), or
+ * - the full Rx FIFO size minus one full frame
+ */
+ if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
+ fc->pause_time = 0xFFFF;
+ else
+ fc->pause_time = E1000_FC_PAUSE_TIME;
+ fc->send_xon = 1;
+ fc->current_mode = fc->requested_mode;
+
+ switch (hw->mac.type) {
+ default:
+ if ((adapter->flags & FLAG_HAS_ERT) &&
+ (adapter->netdev->mtu > ETH_DATA_LEN))
+ hwm = min(((pba << 10) * 9 / 10),
+ ((pba << 10) - (E1000_ERT_2048 << 3)));
+ else
+ hwm = min(((pba << 10) * 9 / 10),
+ ((pba << 10) - adapter->max_frame_size));
+
+ fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
+ fc->low_water = fc->high_water - 8;
+ break;
+ case e1000_pchlan:
+ /*
+ * Workaround PCH LOM adapter hangs with certain network
+ * loads. If hangs persist, try disabling Tx flow control.
+ */
+ if (adapter->netdev->mtu > ETH_DATA_LEN) {
+ fc->high_water = 0x3500;
+ fc->low_water = 0x1500;
+ } else {
+ fc->high_water = 0x5000;
+ fc->low_water = 0x3000;
+ }
+ fc->refresh_time = 0x1000;
+ break;
+ case e1000_pch2lan:
+ fc->high_water = 0x05C20;
+ fc->low_water = 0x05048;
+ fc->pause_time = 0x0650;
+ fc->refresh_time = 0x0400;
+ if (adapter->netdev->mtu > ETH_DATA_LEN) {
+ pba = 14;
+ ew32(PBA, pba);
+ }
+ break;
+ }
+
+ /*
+ * Disable Adaptive Interrupt Moderation if 2 full packets cannot
+ * fit in receive buffer and early-receive not supported.
+ */
+ if (adapter->itr_setting & 0x3) {
+ if (((adapter->max_frame_size * 2) > (pba << 10)) &&
+ !(adapter->flags & FLAG_HAS_ERT)) {
+ if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
+ dev_info(&adapter->pdev->dev,
+ "Interrupt Throttle Rate turned off\n");
+ adapter->flags2 |= FLAG2_DISABLE_AIM;
+ ew32(ITR, 0);
+ }
+ } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
+ dev_info(&adapter->pdev->dev,
+ "Interrupt Throttle Rate turned on\n");
+ adapter->flags2 &= ~FLAG2_DISABLE_AIM;
+ adapter->itr = 20000;
+ ew32(ITR, 1000000000 / (adapter->itr * 256));
+ }
+ }
+
+ /* Allow time for pending master requests to run */
+ mac->ops.reset_hw(hw);
+
+ /*
+ * For parts with AMT enabled, let the firmware know
+ * that the network interface is in control
+ */
+ if (adapter->flags & FLAG_HAS_AMT)
+ e1000e_get_hw_control(adapter);
+
+ ew32(WUC, 0);
+
+ if (mac->ops.init_hw(hw))
+ e_err("Hardware Error\n");
+
+ e1000_update_mng_vlan(adapter);
+
+ /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
+ ew32(VET, ETH_P_8021Q);
+
+ e1000e_reset_adaptive(hw);
+
+ if (!netif_running(adapter->netdev) &&
+ !test_bit(__E1000_TESTING, &adapter->state)) {
+ e1000_power_down_phy(adapter);
+ return;
+ }
+
+ e1000_get_phy_info(hw);
+
+ if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
+ !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
+ u16 phy_data = 0;
+ /*
+ * speed up time to link by disabling smart power down, ignore
+ * the return value of this function because there is nothing
+ * different we would do if it failed
+ */
+ e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
+ phy_data &= ~IGP02E1000_PM_SPD;
+ e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
+ }
+}
+
+int e1000e_up(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* hardware has been reset, we need to reload some things */
+ e1000_configure(adapter);
+
+ clear_bit(__E1000_DOWN, &adapter->state);
+
+ napi_enable(&adapter->napi);
+ if (adapter->msix_entries)
+ e1000_configure_msix(adapter);
+ e1000_irq_enable(adapter);
+
+ netif_start_queue(adapter->netdev);
+
+ /* fire a link change interrupt to start the watchdog */
+ if (adapter->msix_entries)
+ ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+ else
+ ew32(ICS, E1000_ICS_LSC);
+
+ return 0;
+}
+
+static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (!(adapter->flags2 & FLAG2_DMA_BURST))
+ return;
+
+ /* flush pending descriptor writebacks to memory */
+ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+ ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
+
+ /* execute the writes immediately */
+ e1e_flush();
+}
+
+static void e1000e_update_stats(struct e1000_adapter *adapter);
+
+void e1000e_down(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 tctl, rctl;
+
+ /*
+ * signal that we're down so the interrupt handler does not
+ * reschedule our watchdog timer
+ */
+ set_bit(__E1000_DOWN, &adapter->state);
+
+ /* disable receives in the hardware */
+ rctl = er32(RCTL);
+ if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ /* flush and sleep below */
+
+ netif_stop_queue(netdev);
+
+ /* disable transmits in the hardware */
+ tctl = er32(TCTL);
+ tctl &= ~E1000_TCTL_EN;
+ ew32(TCTL, tctl);
+
+ /* flush both disables and wait for them to finish */
+ e1e_flush();
+ usleep_range(10000, 20000);
+
+ napi_disable(&adapter->napi);
+ e1000_irq_disable(adapter);
+
+ del_timer_sync(&adapter->watchdog_timer);
+ del_timer_sync(&adapter->phy_info_timer);
+
+ netif_carrier_off(netdev);
+
+ spin_lock(&adapter->stats64_lock);
+ e1000e_update_stats(adapter);
+ spin_unlock(&adapter->stats64_lock);
+
+ e1000e_flush_descriptors(adapter);
+ e1000_clean_tx_ring(adapter);
+ e1000_clean_rx_ring(adapter);
+
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+
+ if (!pci_channel_offline(adapter->pdev))
+ e1000e_reset(adapter);
+
+ /*
+ * TODO: for power management, we could drop the link and
+ * pci_disable_device here.
+ */
+}
+
+void e1000e_reinit_locked(struct e1000_adapter *adapter)
+{
+ might_sleep();
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+ e1000e_down(adapter);
+ e1000e_up(adapter);
+ clear_bit(__E1000_RESETTING, &adapter->state);
+}
+
+/**
+ * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * e1000_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
+ adapter->rx_ps_bsize0 = 128;
+ adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+ spin_lock_init(&adapter->stats64_lock);
+
+ e1000e_set_interrupt_capability(adapter);
+
+ if (e1000_alloc_queues(adapter))
+ return -ENOMEM;
+
+ /* Explicitly disable IRQ since the NIC can be in any state. */
+ e1000_irq_disable(adapter);
+
+ set_bit(__E1000_DOWN, &adapter->state);
+ return 0;
+}
+
+/**
+ * e1000_intr_msi_test - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t e1000_intr_msi_test(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 icr = er32(ICR);
+
+ e_dbg("icr is %08X\n", icr);
+ if (icr & E1000_ICR_RXSEQ) {
+ adapter->flags &= ~FLAG_MSI_TEST_FAILED;
+ wmb();
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * e1000_test_msi_interrupt - Returns 0 for successful test
+ * @adapter: board private struct
+ *
+ * code flow taken from tg3.c
+ **/
+static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
+
+ /* poll_enable hasn't been called yet, so don't need disable */
+ /* clear any pending events */
+ er32(ICR);
+
+ /* free the real vector and request a test handler */
+ e1000_free_irq(adapter);
+ e1000e_reset_interrupt_capability(adapter);
+
+ /* Assume that the test fails, if it succeeds then the test
+ * MSI irq handler will unset this flag */
+ adapter->flags |= FLAG_MSI_TEST_FAILED;
+
+ err = pci_enable_msi(adapter->pdev);
+ if (err)
+ goto msi_test_failed;
+
+ err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
+ netdev->name, netdev);
+ if (err) {
+ pci_disable_msi(adapter->pdev);
+ goto msi_test_failed;
+ }
+
+ wmb();
+
+ e1000_irq_enable(adapter);
+
+ /* fire an unusual interrupt on the test handler */
+ ew32(ICS, E1000_ICS_RXSEQ);
+ e1e_flush();
+ msleep(50);
+
+ e1000_irq_disable(adapter);
+
+ rmb();
+
+ if (adapter->flags & FLAG_MSI_TEST_FAILED) {
+ adapter->int_mode = E1000E_INT_MODE_LEGACY;
+ e_info("MSI interrupt test failed, using legacy interrupt.\n");
+ } else
+ e_dbg("MSI interrupt test succeeded!\n");
+
+ free_irq(adapter->pdev->irq, netdev);
+ pci_disable_msi(adapter->pdev);
+
+msi_test_failed:
+ e1000e_set_interrupt_capability(adapter);
+ return e1000_request_irq(adapter);
+}
+
+/**
+ * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
+ * @adapter: board private struct
+ *
+ * code flow taken from tg3.c, called with e1000 interrupts disabled.
+ **/
+static int e1000_test_msi(struct e1000_adapter *adapter)
+{
+ int err;
+ u16 pci_cmd;
+
+ if (!(adapter->flags & FLAG_MSI_ENABLED))
+ return 0;
+
+ /* disable SERR in case the MSI write causes a master abort */
+ pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
+ if (pci_cmd & PCI_COMMAND_SERR)
+ pci_write_config_word(adapter->pdev, PCI_COMMAND,
+ pci_cmd & ~PCI_COMMAND_SERR);
+
+ err = e1000_test_msi_interrupt(adapter);
+
+ /* re-enable SERR */
+ if (pci_cmd & PCI_COMMAND_SERR) {
+ pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
+ pci_cmd |= PCI_COMMAND_SERR;
+ pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
+ }
+
+ return err;
+}
+
+/**
+ * e1000_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int e1000_open(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__E1000_TESTING, &adapter->state))
+ return -EBUSY;
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ netif_carrier_off(netdev);
+
+ /* allocate transmit descriptors */
+ err = e1000e_setup_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = e1000e_setup_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ /*
+ * If AMT is enabled, let the firmware know that the network
+ * interface is now open and reset the part to a known state.
+ */
+ if (adapter->flags & FLAG_HAS_AMT) {
+ e1000e_get_hw_control(adapter);
+ e1000e_reset(adapter);
+ }
+
+ e1000e_power_up_phy(adapter);
+
+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+ if ((adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
+ e1000_update_mng_vlan(adapter);
+
+ /* DMA latency requirement to workaround early-receive/jumbo issue */
+ if ((adapter->flags & FLAG_HAS_ERT) ||
+ (adapter->hw.mac.type == e1000_pch2lan))
+ pm_qos_add_request(&adapter->netdev->pm_qos_req,
+ PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
+ /*
+ * before we allocate an interrupt, we must be ready to handle it.
+ * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
+ * as soon as we call pci_request_irq, so we have to setup our
+ * clean_rx handler before we do so.
+ */
+ e1000_configure(adapter);
+
+ err = e1000_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ /*
+ * Work around PCIe errata with MSI interrupts causing some chipsets to
+ * ignore e1000e MSI messages, which means we need to test our MSI
+ * interrupt now
+ */
+ if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
+ err = e1000_test_msi(adapter);
+ if (err) {
+ e_err("Interrupt allocation failed\n");
+ goto err_req_irq;
+ }
+ }
+
+ /* From here on the code is the same as e1000e_up() */
+ clear_bit(__E1000_DOWN, &adapter->state);
+
+ napi_enable(&adapter->napi);
+
+ e1000_irq_enable(adapter);
+
+ netif_start_queue(netdev);
+
+ adapter->idle_check = true;
+ pm_runtime_put(&pdev->dev);
+
+ /* fire a link status change interrupt to start the watchdog */
+ if (adapter->msix_entries)
+ ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+ else
+ ew32(ICS, E1000_ICS_LSC);
+
+ return 0;
+
+err_req_irq:
+ e1000e_release_hw_control(adapter);
+ e1000_power_down_phy(adapter);
+ e1000e_free_rx_resources(adapter);
+err_setup_rx:
+ e1000e_free_tx_resources(adapter);
+err_setup_tx:
+ e1000e_reset(adapter);
+ pm_runtime_put_sync(&pdev->dev);
+
+ return err;
+}
+
+/**
+ * e1000_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int e1000_close(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ if (!test_bit(__E1000_DOWN, &adapter->state)) {
+ e1000e_down(adapter);
+ e1000_free_irq(adapter);
+ }
+ e1000_power_down_phy(adapter);
+
+ e1000e_free_tx_resources(adapter);
+ e1000e_free_rx_resources(adapter);
+
+ /*
+ * kill manageability vlan ID if supported, but not if a vlan with
+ * the same ID is registered on the host OS (let 8021q kill it)
+ */
+ if (adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
+ e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+
+ /*
+ * If AMT is enabled, let the firmware know that the network
+ * interface is now closed
+ */
+ if ((adapter->flags & FLAG_HAS_AMT) &&
+ !test_bit(__E1000_TESTING, &adapter->state))
+ e1000e_release_hw_control(adapter);
+
+ if ((adapter->flags & FLAG_HAS_ERT) ||
+ (adapter->hw.mac.type == e1000_pch2lan))
+ pm_qos_remove_request(&adapter->netdev->pm_qos_req);
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ return 0;
+}
+/**
+ * e1000_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int e1000_set_mac(struct net_device *netdev, void *p)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
+
+ e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+
+ if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
+ /* activate the work around */
+ e1000e_set_laa_state_82571(&adapter->hw, 1);
+
+ /*
+ * Hold a copy of the LAA in RAR[14] This is done so that
+ * between the time RAR[0] gets clobbered and the time it
+ * gets fixed (in e1000_watchdog), the actual LAA is in one
+ * of the RARs and no incoming packets directed to this port
+ * are dropped. Eventually the LAA will be in RAR[0] and
+ * RAR[14]
+ */
+ e1000e_rar_set(&adapter->hw,
+ adapter->hw.mac.addr,
+ adapter->hw.mac.rar_entry_count - 1);
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_update_phy_task - work thread to update phy
+ * @work: pointer to our work struct
+ *
+ * this worker thread exists because we must acquire a
+ * semaphore to read the phy, which we could msleep while
+ * waiting for it, and we can't msleep in a timer.
+ **/
+static void e1000e_update_phy_task(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter, update_phy_task);
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
+ e1000_get_phy_info(&adapter->hw);
+}
+
+/*
+ * Need to wait a few seconds after link up to get diagnostic information from
+ * the phy
+ */
+static void e1000_update_phy_info(unsigned long data)
+{
+ struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
+ schedule_work(&adapter->update_phy_task);
+}
+
+/**
+ * e1000e_update_phy_stats - Update the PHY statistics counters
+ * @adapter: board private structure
+ *
+ * Read/clear the upper 16-bit PHY registers and read/accumulate lower
+ **/
+static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ s32 ret_val;
+ u16 phy_data;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+
+ /*
+ * A page set is expensive so check if already on desired page.
+ * If not, set to the page with the PHY status registers.
+ */
+ hw->phy.addr = 1;
+ ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
+ &phy_data);
+ if (ret_val)
+ goto release;
+ if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
+ ret_val = hw->phy.ops.set_page(hw,
+ HV_STATS_PAGE << IGP_PAGE_SHIFT);
+ if (ret_val)
+ goto release;
+ }
+
+ /* Single Collision Count */
+ hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
+ if (!ret_val)
+ adapter->stats.scc += phy_data;
+
+ /* Excessive Collision Count */
+ hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
+ if (!ret_val)
+ adapter->stats.ecol += phy_data;
+
+ /* Multiple Collision Count */
+ hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
+ if (!ret_val)
+ adapter->stats.mcc += phy_data;
+
+ /* Late Collision Count */
+ hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
+ if (!ret_val)
+ adapter->stats.latecol += phy_data;
+
+ /* Collision Count - also used for adaptive IFS */
+ hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
+ if (!ret_val)
+ hw->mac.collision_delta = phy_data;
+
+ /* Defer Count */
+ hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
+ if (!ret_val)
+ adapter->stats.dc += phy_data;
+
+ /* Transmit with no CRS */
+ hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
+ if (!ret_val)
+ adapter->stats.tncrs += phy_data;
+
+release:
+ hw->phy.ops.release(hw);
+}
+
+/**
+ * e1000e_update_stats - Update the board statistics counters
+ * @adapter: board private structure
+ **/
+static void e1000e_update_stats(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /*
+ * Prevent stats update while adapter is being reset, or if the pci
+ * connection is down.
+ */
+ if (adapter->link_speed == 0)
+ return;
+ if (pci_channel_offline(pdev))
+ return;
+
+ adapter->stats.crcerrs += er32(CRCERRS);
+ adapter->stats.gprc += er32(GPRC);
+ adapter->stats.gorc += er32(GORCL);
+ er32(GORCH); /* Clear gorc */
+ adapter->stats.bprc += er32(BPRC);
+ adapter->stats.mprc += er32(MPRC);
+ adapter->stats.roc += er32(ROC);
+
+ adapter->stats.mpc += er32(MPC);
+
+ /* Half-duplex statistics */
+ if (adapter->link_duplex == HALF_DUPLEX) {
+ if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
+ e1000e_update_phy_stats(adapter);
+ } else {
+ adapter->stats.scc += er32(SCC);
+ adapter->stats.ecol += er32(ECOL);
+ adapter->stats.mcc += er32(MCC);
+ adapter->stats.latecol += er32(LATECOL);
+ adapter->stats.dc += er32(DC);
+
+ hw->mac.collision_delta = er32(COLC);
+
+ if ((hw->mac.type != e1000_82574) &&
+ (hw->mac.type != e1000_82583))
+ adapter->stats.tncrs += er32(TNCRS);
+ }
+ adapter->stats.colc += hw->mac.collision_delta;
+ }
+
+ adapter->stats.xonrxc += er32(XONRXC);
+ adapter->stats.xontxc += er32(XONTXC);
+ adapter->stats.xoffrxc += er32(XOFFRXC);
+ adapter->stats.xofftxc += er32(XOFFTXC);
+ adapter->stats.gptc += er32(GPTC);
+ adapter->stats.gotc += er32(GOTCL);
+ er32(GOTCH); /* Clear gotc */
+ adapter->stats.rnbc += er32(RNBC);
+ adapter->stats.ruc += er32(RUC);
+
+ adapter->stats.mptc += er32(MPTC);
+ adapter->stats.bptc += er32(BPTC);
+
+ /* used for adaptive IFS */
+
+ hw->mac.tx_packet_delta = er32(TPT);
+ adapter->stats.tpt += hw->mac.tx_packet_delta;
+
+ adapter->stats.algnerrc += er32(ALGNERRC);
+ adapter->stats.rxerrc += er32(RXERRC);
+ adapter->stats.cexterr += er32(CEXTERR);
+ adapter->stats.tsctc += er32(TSCTC);
+ adapter->stats.tsctfc += er32(TSCTFC);
+
+ /* Fill out the OS statistics structure */
+ netdev->stats.multicast = adapter->stats.mprc;
+ netdev->stats.collisions = adapter->stats.colc;
+
+ /* Rx Errors */
+
+ /*
+ * RLEC on some newer hardware can be incorrect so build
+ * our own version based on RUC and ROC
+ */
+ netdev->stats.rx_errors = adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc +
+ adapter->stats.cexterr;
+ netdev->stats.rx_length_errors = adapter->stats.ruc +
+ adapter->stats.roc;
+ netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
+ netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
+ netdev->stats.rx_missed_errors = adapter->stats.mpc;
+
+ /* Tx Errors */
+ netdev->stats.tx_errors = adapter->stats.ecol +
+ adapter->stats.latecol;
+ netdev->stats.tx_aborted_errors = adapter->stats.ecol;
+ netdev->stats.tx_window_errors = adapter->stats.latecol;
+ netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
+
+ /* Tx Dropped needs to be maintained elsewhere */
+
+ /* Management Stats */
+ adapter->stats.mgptc += er32(MGTPTC);
+ adapter->stats.mgprc += er32(MGTPRC);
+ adapter->stats.mgpdc += er32(MGTPDC);
+}
+
+/**
+ * e1000_phy_read_status - Update the PHY register status snapshot
+ * @adapter: board private structure
+ **/
+static void e1000_phy_read_status(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_phy_regs *phy = &adapter->phy_regs;
+
+ if ((er32(STATUS) & E1000_STATUS_LU) &&
+ (adapter->hw.phy.media_type == e1000_media_type_copper)) {
+ int ret_val;
+
+ ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
+ ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
+ ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
+ ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
+ ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
+ ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
+ ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
+ ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
+ if (ret_val)
+ e_warn("Error reading PHY register\n");
+ } else {
+ /*
+ * Do not read PHY registers if link is not up
+ * Set values to typical power-on defaults
+ */
+ phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
+ phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
+ BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
+ BMSR_ERCAP);
+ phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
+ ADVERTISE_ALL | ADVERTISE_CSMA);
+ phy->lpa = 0;
+ phy->expansion = EXPANSION_ENABLENPAGE;
+ phy->ctrl1000 = ADVERTISE_1000FULL;
+ phy->stat1000 = 0;
+ phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
+ }
+}
+
+static void e1000_print_link_info(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl = er32(CTRL);
+
+ /* Link status message must follow this format for user tools */
+ printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
+ "Flow Control: %s\n",
+ adapter->netdev->name,
+ adapter->link_speed,
+ (adapter->link_duplex == FULL_DUPLEX) ?
+ "Full Duplex" : "Half Duplex",
+ ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
+ "Rx/Tx" :
+ ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
+ ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
+}
+
+static bool e1000e_has_link(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ bool link_active = 0;
+ s32 ret_val = 0;
+
+ /*
+ * get_link_status is set on LSC (link status) interrupt or
+ * Rx sequence error interrupt. get_link_status will stay
+ * false until the check_for_link establishes link
+ * for copper adapters ONLY
+ */
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ if (hw->mac.get_link_status) {
+ ret_val = hw->mac.ops.check_for_link(hw);
+ link_active = !hw->mac.get_link_status;
+ } else {
+ link_active = 1;
+ }
+ break;
+ case e1000_media_type_fiber:
+ ret_val = hw->mac.ops.check_for_link(hw);
+ link_active = !!(er32(STATUS) & E1000_STATUS_LU);
+ break;
+ case e1000_media_type_internal_serdes:
+ ret_val = hw->mac.ops.check_for_link(hw);
+ link_active = adapter->hw.mac.serdes_has_link;
+ break;
+ default:
+ case e1000_media_type_unknown:
+ break;
+ }
+
+ if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
+ (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
+ /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
+ e_info("Gigabit has been disabled, downgrading speed\n");
+ }
+
+ return link_active;
+}
+
+static void e1000e_enable_receives(struct e1000_adapter *adapter)
+{
+ /* make sure the receive unit is started */
+ if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
+ (adapter->flags & FLAG_RX_RESTART_NOW)) {
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl = er32(RCTL);
+ ew32(RCTL, rctl | E1000_RCTL_EN);
+ adapter->flags &= ~FLAG_RX_RESTART_NOW;
+ }
+}
+
+static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /*
+ * With 82574 controllers, PHY needs to be checked periodically
+ * for hung state and reset, if two calls return true
+ */
+ if (e1000_check_phy_82574(hw))
+ adapter->phy_hang_count++;
+ else
+ adapter->phy_hang_count = 0;
+
+ if (adapter->phy_hang_count > 1) {
+ adapter->phy_hang_count = 0;
+ schedule_work(&adapter->reset_task);
+ }
+}
+
+/**
+ * e1000_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void e1000_watchdog(unsigned long data)
+{
+ struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+
+ /* Do the rest outside of interrupt context */
+ schedule_work(&adapter->watchdog_task);
+
+ /* TODO: make this use queue_delayed_work() */
+}
+
+static void e1000_watchdog_task(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter, watchdog_task);
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+ struct e1000_phy_info *phy = &adapter->hw.phy;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 link, tctl;
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
+ link = e1000e_has_link(adapter);
+ if ((netif_carrier_ok(netdev)) && link) {
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
+ e1000e_enable_receives(adapter);
+ goto link_up;
+ }
+
+ if ((e1000e_enable_tx_pkt_filtering(hw)) &&
+ (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
+ e1000_update_mng_vlan(adapter);
+
+ if (link) {
+ if (!netif_carrier_ok(netdev)) {
+ bool txb2b = 1;
+
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
+ /* update snapshot of PHY registers on LSC */
+ e1000_phy_read_status(adapter);
+ mac->ops.get_link_up_info(&adapter->hw,
+ &adapter->link_speed,
+ &adapter->link_duplex);
+ e1000_print_link_info(adapter);
+ /*
+ * On supported PHYs, check for duplex mismatch only
+ * if link has autonegotiated at 10/100 half
+ */
+ if ((hw->phy.type == e1000_phy_igp_3 ||
+ hw->phy.type == e1000_phy_bm) &&
+ (hw->mac.autoneg == true) &&
+ (adapter->link_speed == SPEED_10 ||
+ adapter->link_speed == SPEED_100) &&
+ (adapter->link_duplex == HALF_DUPLEX)) {
+ u16 autoneg_exp;
+
+ e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
+
+ if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
+ e_info("Autonegotiated half duplex but"
+ " link partner cannot autoneg. "
+ " Try forcing full duplex if "
+ "link gets many collisions.\n");
+ }
+
+ /* adjust timeout factor according to speed/duplex */
+ adapter->tx_timeout_factor = 1;
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ txb2b = 0;
+ adapter->tx_timeout_factor = 16;
+ break;
+ case SPEED_100:
+ txb2b = 0;
+ adapter->tx_timeout_factor = 10;
+ break;
+ }
+
+ /*
+ * workaround: re-program speed mode bit after
+ * link-up event
+ */
+ if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
+ !txb2b) {
+ u32 tarc0;
+ tarc0 = er32(TARC(0));
+ tarc0 &= ~SPEED_MODE_BIT;
+ ew32(TARC(0), tarc0);
+ }
+
+ /*
+ * disable TSO for pcie and 10/100 speeds, to avoid
+ * some hardware issues
+ */
+ if (!(adapter->flags & FLAG_TSO_FORCE)) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
+ e_info("10/100 speed: disabling TSO\n");
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ break;
+ case SPEED_1000:
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ break;
+ default:
+ /* oops */
+ break;
+ }
+ }
+
+ /*
+ * enable transmits in the hardware, need to do this
+ * after setting TARC(0)
+ */
+ tctl = er32(TCTL);
+ tctl |= E1000_TCTL_EN;
+ ew32(TCTL, tctl);
+
+ /*
+ * Perform any post-link-up configuration before
+ * reporting link up.
+ */
+ if (phy->ops.cfg_on_link_up)
+ phy->ops.cfg_on_link_up(hw);
+
+ netif_carrier_on(netdev);
+
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->phy_info_timer,
+ round_jiffies(jiffies + 2 * HZ));
+ }
+ } else {
+ if (netif_carrier_ok(netdev)) {
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+ /* Link status message must follow this format */
+ printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
+ adapter->netdev->name);
+ netif_carrier_off(netdev);
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->phy_info_timer,
+ round_jiffies(jiffies + 2 * HZ));
+
+ if (adapter->flags & FLAG_RX_NEEDS_RESTART)
+ schedule_work(&adapter->reset_task);
+ else
+ pm_schedule_suspend(netdev->dev.parent,
+ LINK_TIMEOUT);
+ }
+ }
+
+link_up:
+ spin_lock(&adapter->stats64_lock);
+ e1000e_update_stats(adapter);
+
+ mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
+ adapter->tpt_old = adapter->stats.tpt;
+ mac->collision_delta = adapter->stats.colc - adapter->colc_old;
+ adapter->colc_old = adapter->stats.colc;
+
+ adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
+ adapter->gorc_old = adapter->stats.gorc;
+ adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
+ adapter->gotc_old = adapter->stats.gotc;
+ spin_unlock(&adapter->stats64_lock);
+
+ e1000e_update_adaptive(&adapter->hw);
+
+ if (!netif_carrier_ok(netdev) &&
+ (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
+ /*
+ * We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context).
+ */
+ schedule_work(&adapter->reset_task);
+ /* return immediately since reset is imminent */
+ return;
+ }
+
+ /* Simple mode for Interrupt Throttle Rate (ITR) */
+ if (adapter->itr_setting == 4) {
+ /*
+ * Symmetric Tx/Rx gets a reduced ITR=2000;
+ * Total asymmetrical Tx or Rx gets ITR=8000;
+ * everyone else is between 2000-8000.
+ */
+ u32 goc = (adapter->gotc + adapter->gorc) / 10000;
+ u32 dif = (adapter->gotc > adapter->gorc ?
+ adapter->gotc - adapter->gorc :
+ adapter->gorc - adapter->gotc) / 10000;
+ u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
+
+ ew32(ITR, 1000000000 / (itr * 256));
+ }
+
+ /* Cause software interrupt to ensure Rx ring is cleaned */
+ if (adapter->msix_entries)
+ ew32(ICS, adapter->rx_ring->ims_val);
+ else
+ ew32(ICS, E1000_ICS_RXDMT0);
+
+ /* flush pending descriptors to memory before detecting Tx hang */
+ e1000e_flush_descriptors(adapter);
+
+ /* Force detection of hung controller every watchdog period */
+ adapter->detect_tx_hung = 1;
+
+ /*
+ * With 82571 controllers, LAA may be overwritten due to controller
+ * reset from the other port. Set the appropriate LAA in RAR[0]
+ */
+ if (e1000e_get_laa_state_82571(hw))
+ e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
+
+ if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
+ e1000e_check_82574_phy_workaround(adapter);
+
+ /* Reset the timer */
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 2 * HZ));
+}
+
+#define E1000_TX_FLAGS_CSUM 0x00000001
+#define E1000_TX_FLAGS_VLAN 0x00000002
+#define E1000_TX_FLAGS_TSO 0x00000004
+#define E1000_TX_FLAGS_IPV4 0x00000008
+#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
+#define E1000_TX_FLAGS_VLAN_SHIFT 16
+
+static int e1000_tso(struct e1000_adapter *adapter,
+ struct sk_buff *skb)
+{
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_context_desc *context_desc;
+ struct e1000_buffer *buffer_info;
+ unsigned int i;
+ u32 cmd_length = 0;
+ u16 ipcse = 0, tucse, mss;
+ u8 ipcss, ipcso, tucss, tucso, hdr_len;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ if (skb_header_cloned(skb)) {
+ int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+
+ if (err)
+ return err;
+ }
+
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ mss = skb_shinfo(skb)->gso_size;
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 0, IPPROTO_TCP, 0);
+ cmd_length = E1000_TXD_CMD_IP;
+ ipcse = skb_transport_offset(skb) - 1;
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ ipcse = 0;
+ }
+ ipcss = skb_network_offset(skb);
+ ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
+ tucss = skb_transport_offset(skb);
+ tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
+ tucse = 0;
+
+ cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
+ E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
+
+ i = tx_ring->next_to_use;
+ context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+
+ context_desc->lower_setup.ip_fields.ipcss = ipcss;
+ context_desc->lower_setup.ip_fields.ipcso = ipcso;
+ context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
+ context_desc->upper_setup.tcp_fields.tucss = tucss;
+ context_desc->upper_setup.tcp_fields.tucso = tucso;
+ context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
+ context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
+ context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
+ context_desc->cmd_and_length = cpu_to_le32(cmd_length);
+
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+
+ return 1;
+}
+
+static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
+{
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_context_desc *context_desc;
+ struct e1000_buffer *buffer_info;
+ unsigned int i;
+ u8 css;
+ u32 cmd_len = E1000_TXD_CMD_DEXT;
+ __be16 protocol;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
+ protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+ else
+ protocol = skb->protocol;
+
+ switch (protocol) {
+ case cpu_to_be16(ETH_P_IP):
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ cmd_len |= E1000_TXD_CMD_TCP;
+ break;
+ case cpu_to_be16(ETH_P_IPV6):
+ /* XXX not handling all IPV6 headers */
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ cmd_len |= E1000_TXD_CMD_TCP;
+ break;
+ default:
+ if (unlikely(net_ratelimit()))
+ e_warn("checksum_partial proto=%x!\n",
+ be16_to_cpu(protocol));
+ break;
+ }
+
+ css = skb_checksum_start_offset(skb);
+
+ i = tx_ring->next_to_use;
+ buffer_info = &tx_ring->buffer_info[i];
+ context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+
+ context_desc->lower_setup.ip_config = 0;
+ context_desc->upper_setup.tcp_fields.tucss = css;
+ context_desc->upper_setup.tcp_fields.tucso =
+ css + skb->csum_offset;
+ context_desc->upper_setup.tcp_fields.tucse = 0;
+ context_desc->tcp_seg_setup.data = 0;
+ context_desc->cmd_and_length = cpu_to_le32(cmd_len);
+
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+
+ return 1;
+}
+
+#define E1000_MAX_PER_TXD 8192
+#define E1000_MAX_TXD_PWR 12
+
+static int e1000_tx_map(struct e1000_adapter *adapter,
+ struct sk_buff *skb, unsigned int first,
+ unsigned int max_per_txd, unsigned int nr_frags,
+ unsigned int mss)
+{
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_buffer *buffer_info;
+ unsigned int len = skb_headlen(skb);
+ unsigned int offset = 0, size, count = 0, i;
+ unsigned int f, bytecount, segs;
+
+ i = tx_ring->next_to_use;
+
+ while (len) {
+ buffer_info = &tx_ring->buffer_info[i];
+ size = min(len, max_per_txd);
+
+ buffer_info->length = size;
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data + offset,
+ size, DMA_TO_DEVICE);
+ buffer_info->mapped_as_page = false;
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
+ goto dma_error;
+
+ len -= size;
+ offset += size;
+ count++;
+
+ if (len) {
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+ }
+
+ for (f = 0; f < nr_frags; f++) {
+ struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ len = frag->size;
+ offset = 0;
+
+ while (len) {
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ buffer_info = &tx_ring->buffer_info[i];
+ size = min(len, max_per_txd);
+
+ buffer_info->length = size;
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+ buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
+ offset, size, DMA_TO_DEVICE);
+ buffer_info->mapped_as_page = true;
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
+ goto dma_error;
+
+ len -= size;
+ offset += size;
+ count++;
+ }
+ }
+
+ segs = skb_shinfo(skb)->gso_segs ? : 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
+
+ tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[i].segs = segs;
+ tx_ring->buffer_info[i].bytecount = bytecount;
+ tx_ring->buffer_info[first].next_to_watch = i;
+
+ return count;
+
+dma_error:
+ dev_err(&pdev->dev, "Tx DMA map failed\n");
+ buffer_info->dma = 0;
+ if (count)
+ count--;
+
+ while (count--) {
+ if (i == 0)
+ i += tx_ring->count;
+ i--;
+ buffer_info = &tx_ring->buffer_info[i];
+ e1000_put_txbuf(adapter, buffer_info);
+ }
+
+ return 0;
+}
+
+static void e1000_tx_queue(struct e1000_adapter *adapter,
+ int tx_flags, int count)
+{
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_tx_desc *tx_desc = NULL;
+ struct e1000_buffer *buffer_info;
+ u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
+ unsigned int i;
+
+ if (tx_flags & E1000_TX_FLAGS_TSO) {
+ txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
+ E1000_TXD_CMD_TSE;
+ txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+
+ if (tx_flags & E1000_TX_FLAGS_IPV4)
+ txd_upper |= E1000_TXD_POPTS_IXSM << 8;
+ }
+
+ if (tx_flags & E1000_TX_FLAGS_CSUM) {
+ txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+ txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+ }
+
+ if (tx_flags & E1000_TX_FLAGS_VLAN) {
+ txd_lower |= E1000_TXD_CMD_VLE;
+ txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
+ }
+
+ i = tx_ring->next_to_use;
+
+ do {
+ buffer_info = &tx_ring->buffer_info[i];
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+ tx_desc->lower.data =
+ cpu_to_le32(txd_lower | buffer_info->length);
+ tx_desc->upper.data = cpu_to_le32(txd_upper);
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ } while (--count > 0);
+
+ tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
+
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ tx_ring->next_to_use = i;
+
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_tdt_wa(adapter, i);
+ else
+ writel(i, adapter->hw.hw_addr + tx_ring->tail);
+
+ /*
+ * we need this if more than one processor can write to our tail
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
+ mmiowb();
+}
+
+#define MINIMUM_DHCP_PACKET_SIZE 282
+static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
+ struct sk_buff *skb)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 length, offset;
+
+ if (vlan_tx_tag_present(skb)) {
+ if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
+ (adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
+ return 0;
+ }
+
+ if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
+ return 0;
+
+ if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
+ return 0;
+
+ {
+ const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
+ struct udphdr *udp;
+
+ if (ip->protocol != IPPROTO_UDP)
+ return 0;
+
+ udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
+ if (ntohs(udp->dest) != 67)
+ return 0;
+
+ offset = (u8 *)udp + 8 - skb->data;
+ length = skb->len - offset;
+ return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
+ }
+
+ return 0;
+}
+
+static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ netif_stop_queue(netdev);
+ /*
+ * Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it.
+ */
+ smp_mb();
+
+ /*
+ * We need to check again in a case another CPU has just
+ * made room available.
+ */
+ if (e1000_desc_unused(adapter->tx_ring) < size)
+ return -EBUSY;
+
+ /* A reprieve! */
+ netif_start_queue(netdev);
+ ++adapter->restart_queue;
+ return 0;
+}
+
+static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (e1000_desc_unused(adapter->tx_ring) >= size)
+ return 0;
+ return __e1000_maybe_stop_tx(netdev, size);
+}
+
+#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
+static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ unsigned int first;
+ unsigned int max_per_txd = E1000_MAX_PER_TXD;
+ unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
+ unsigned int tx_flags = 0;
+ unsigned int len = skb_headlen(skb);
+ unsigned int nr_frags;
+ unsigned int mss;
+ int count = 0;
+ int tso;
+ unsigned int f;
+
+ if (test_bit(__E1000_DOWN, &adapter->state)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (skb->len <= 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ mss = skb_shinfo(skb)->gso_size;
+ /*
+ * The controller does a simple calculation to
+ * make sure there is enough room in the FIFO before
+ * initiating the DMA for each buffer. The calc is:
+ * 4 = ceil(buffer len/mss). To make sure we don't
+ * overrun the FIFO, adjust the max buffer len if mss
+ * drops.
+ */
+ if (mss) {
+ u8 hdr_len;
+ max_per_txd = min(mss << 2, max_per_txd);
+ max_txd_pwr = fls(max_per_txd) - 1;
+
+ /*
+ * TSO Workaround for 82571/2/3 Controllers -- if skb->data
+ * points to just header, pull a few bytes of payload from
+ * frags into skb->data
+ */
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ /*
+ * we do this workaround for ES2LAN, but it is un-necessary,
+ * avoiding it could save a lot of cycles
+ */
+ if (skb->data_len && (hdr_len == len)) {
+ unsigned int pull_size;
+
+ pull_size = min((unsigned int)4, skb->data_len);
+ if (!__pskb_pull_tail(skb, pull_size)) {
+ e_err("__pskb_pull_tail failed.\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ len = skb_headlen(skb);
+ }
+ }
+
+ /* reserve a descriptor for the offload context */
+ if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
+ count++;
+ count++;
+
+ count += TXD_USE_COUNT(len, max_txd_pwr);
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ for (f = 0; f < nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
+ max_txd_pwr);
+
+ if (adapter->hw.mac.tx_pkt_filtering)
+ e1000_transfer_dhcp_info(adapter, skb);
+
+ /*
+ * need: count + 2 desc gap to keep tail from touching
+ * head, otherwise try next time
+ */
+ if (e1000_maybe_stop_tx(netdev, count + 2))
+ return NETDEV_TX_BUSY;
+
+ if (vlan_tx_tag_present(skb)) {
+ tx_flags |= E1000_TX_FLAGS_VLAN;
+ tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
+ }
+
+ first = tx_ring->next_to_use;
+
+ tso = e1000_tso(adapter, skb);
+ if (tso < 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (tso)
+ tx_flags |= E1000_TX_FLAGS_TSO;
+ else if (e1000_tx_csum(adapter, skb))
+ tx_flags |= E1000_TX_FLAGS_CSUM;
+
+ /*
+ * Old method was to assume IPv4 packet by default if TSO was enabled.
+ * 82571 hardware supports TSO capabilities for IPv6 as well...
+ * no longer assume, we must.
+ */
+ if (skb->protocol == htons(ETH_P_IP))
+ tx_flags |= E1000_TX_FLAGS_IPV4;
+
+ /* if count is 0 then mapping error has occurred */
+ count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
+ if (count) {
+ e1000_tx_queue(adapter, tx_flags, count);
+ /* Make sure there is space in the ring for the next send. */
+ e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
+
+ } else {
+ dev_kfree_skb_any(skb);
+ tx_ring->buffer_info[first].time_stamp = 0;
+ tx_ring->next_to_use = first;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * e1000_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void e1000_tx_timeout(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+}
+
+static void e1000_reset_task(struct work_struct *work)
+{
+ struct e1000_adapter *adapter;
+ adapter = container_of(work, struct e1000_adapter, reset_task);
+
+ /* don't run the task if already down */
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
+ if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
+ (adapter->flags & FLAG_RX_RESTART_NOW))) {
+ e1000e_dump(adapter);
+ e_err("Reset adapter\n");
+ }
+ e1000e_reinit_locked(adapter);
+}
+
+/**
+ * e1000_get_stats64 - Get System Network Statistics
+ * @netdev: network interface device structure
+ * @stats: rtnl_link_stats64 pointer
+ *
+ * Returns the address of the device statistics structure.
+ **/
+struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ memset(stats, 0, sizeof(struct rtnl_link_stats64));
+ spin_lock(&adapter->stats64_lock);
+ e1000e_update_stats(adapter);
+ /* Fill out the OS statistics structure */
+ stats->rx_bytes = adapter->stats.gorc;
+ stats->rx_packets = adapter->stats.gprc;
+ stats->tx_bytes = adapter->stats.gotc;
+ stats->tx_packets = adapter->stats.gptc;
+ stats->multicast = adapter->stats.mprc;
+ stats->collisions = adapter->stats.colc;
+
+ /* Rx Errors */
+
+ /*
+ * RLEC on some newer hardware can be incorrect so build
+ * our own version based on RUC and ROC
+ */
+ stats->rx_errors = adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc +
+ adapter->stats.cexterr;
+ stats->rx_length_errors = adapter->stats.ruc +
+ adapter->stats.roc;
+ stats->rx_crc_errors = adapter->stats.crcerrs;
+ stats->rx_frame_errors = adapter->stats.algnerrc;
+ stats->rx_missed_errors = adapter->stats.mpc;
+
+ /* Tx Errors */
+ stats->tx_errors = adapter->stats.ecol +
+ adapter->stats.latecol;
+ stats->tx_aborted_errors = adapter->stats.ecol;
+ stats->tx_window_errors = adapter->stats.latecol;
+ stats->tx_carrier_errors = adapter->stats.tncrs;
+
+ /* Tx Dropped needs to be maintained elsewhere */
+
+ spin_unlock(&adapter->stats64_lock);
+ return stats;
+}
+
+/**
+ * e1000_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* Jumbo frame support */
+ if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+ !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
+ e_err("Jumbo Frames not supported.\n");
+ return -EINVAL;
+ }
+
+ /* Supported frame sizes */
+ if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
+ (max_frame > adapter->max_hw_frame_size)) {
+ e_err("Unsupported MTU setting\n");
+ return -EINVAL;
+ }
+
+ /* Jumbo frame workaround on 82579 requires CRC be stripped */
+ if ((adapter->hw.mac.type == e1000_pch2lan) &&
+ !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
+ (new_mtu > ETH_DATA_LEN)) {
+ e_err("Jumbo Frames not supported on 82579 when CRC "
+ "stripping is disabled.\n");
+ return -EINVAL;
+ }
+
+ /* 82573 Errata 17 */
+ if (((adapter->hw.mac.type == e1000_82573) ||
+ (adapter->hw.mac.type == e1000_82574)) &&
+ (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
+ adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
+ e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
+ }
+
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+ /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
+ adapter->max_frame_size = max_frame;
+ e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+ if (netif_running(netdev))
+ e1000e_down(adapter);
+
+ /*
+ * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ * means we reserve 2 more, this pushes us to allocate from the next
+ * larger slab size.
+ * i.e. RXBUFFER_2048 --> size-4096 slab
+ * However with the new *_jumbo_rx* routines, jumbo receives will use
+ * fragmented skbs
+ */
+
+ if (max_frame <= 2048)
+ adapter->rx_buffer_len = 2048;
+ else
+ adapter->rx_buffer_len = 4096;
+
+ /* adjust allocation if LPE protects us, and we aren't using SBP */
+ if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
+ (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
+ adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
+ + ETH_FCS_LEN;
+
+ if (netif_running(netdev))
+ e1000e_up(adapter);
+ else
+ e1000e_reset(adapter);
+
+ clear_bit(__E1000_RESETTING, &adapter->state);
+
+ return 0;
+}
+
+static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ if (adapter->hw.phy.media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = adapter->hw.phy.addr;
+ break;
+ case SIOCGMIIREG:
+ e1000_phy_read_status(adapter);
+
+ switch (data->reg_num & 0x1F) {
+ case MII_BMCR:
+ data->val_out = adapter->phy_regs.bmcr;
+ break;
+ case MII_BMSR:
+ data->val_out = adapter->phy_regs.bmsr;
+ break;
+ case MII_PHYSID1:
+ data->val_out = (adapter->hw.phy.id >> 16);
+ break;
+ case MII_PHYSID2:
+ data->val_out = (adapter->hw.phy.id & 0xFFFF);
+ break;
+ case MII_ADVERTISE:
+ data->val_out = adapter->phy_regs.advertise;
+ break;
+ case MII_LPA:
+ data->val_out = adapter->phy_regs.lpa;
+ break;
+ case MII_EXPANSION:
+ data->val_out = adapter->phy_regs.expansion;
+ break;
+ case MII_CTRL1000:
+ data->val_out = adapter->phy_regs.ctrl1000;
+ break;
+ case MII_STAT1000:
+ data->val_out = adapter->phy_regs.stat1000;
+ break;
+ case MII_ESTATUS:
+ data->val_out = adapter->phy_regs.estatus;
+ break;
+ default:
+ return -EIO;
+ }
+ break;
+ case SIOCSMIIREG:
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return e1000_mii_ioctl(netdev, ifr, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 i, mac_reg;
+ u16 phy_reg, wuc_enable;
+ int retval = 0;
+
+ /* copy MAC RARs to PHY RARs */
+ e1000_copy_rx_addrs_to_phy_ich8lan(hw);
+
+ retval = hw->phy.ops.acquire(hw);
+ if (retval) {
+ e_err("Could not acquire PHY\n");
+ return retval;
+ }
+
+ /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
+ retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
+ if (retval)
+ goto out;
+
+ /* copy MAC MTA to PHY MTA - only needed for pchlan */
+ for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
+ mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
+ hw->phy.ops.write_reg_page(hw, BM_MTA(i),
+ (u16)(mac_reg & 0xFFFF));
+ hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
+ (u16)((mac_reg >> 16) & 0xFFFF));
+ }
+
+ /* configure PHY Rx Control register */
+ hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
+ mac_reg = er32(RCTL);
+ if (mac_reg & E1000_RCTL_UPE)
+ phy_reg |= BM_RCTL_UPE;
+ if (mac_reg & E1000_RCTL_MPE)
+ phy_reg |= BM_RCTL_MPE;
+ phy_reg &= ~(BM_RCTL_MO_MASK);
+ if (mac_reg & E1000_RCTL_MO_3)
+ phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
+ << BM_RCTL_MO_SHIFT);
+ if (mac_reg & E1000_RCTL_BAM)
+ phy_reg |= BM_RCTL_BAM;
+ if (mac_reg & E1000_RCTL_PMCF)
+ phy_reg |= BM_RCTL_PMCF;
+ mac_reg = er32(CTRL);
+ if (mac_reg & E1000_CTRL_RFCE)
+ phy_reg |= BM_RCTL_RFCE;
+ hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
+
+ /* enable PHY wakeup in MAC register */
+ ew32(WUFC, wufc);
+ ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
+
+ /* configure and enable PHY wakeup in PHY registers */
+ hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
+ hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
+
+ /* activate PHY wakeup */
+ wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
+ retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
+ if (retval)
+ e_err("Could not set PHY Host Wakeup bit\n");
+out:
+ hw->phy.ops.release(hw);
+
+ return retval;
+}
+
+static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
+ bool runtime)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl, ctrl_ext, rctl, status;
+ /* Runtime suspend should only enable wakeup for link changes */
+ u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
+ int retval = 0;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
+ e1000e_down(adapter);
+ e1000_free_irq(adapter);
+ }
+ e1000e_reset_interrupt_capability(adapter);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ status = er32(STATUS);
+ if (status & E1000_STATUS_LU)
+ wufc &= ~E1000_WUFC_LNKC;
+
+ if (wufc) {
+ e1000_setup_rctl(adapter);
+ e1000_set_multi(netdev);
+
+ /* turn on all-multi mode if wake on multicast is enabled */
+ if (wufc & E1000_WUFC_MC) {
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_MPE;
+ ew32(RCTL, rctl);
+ }
+
+ ctrl = er32(CTRL);
+ /* advertise wake from D3Cold */
+ #define E1000_CTRL_ADVD3WUC 0x00100000
+ /* phy power management enable */
+ #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
+ ctrl |= E1000_CTRL_ADVD3WUC;
+ if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
+ ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
+ ew32(CTRL, ctrl);
+
+ if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
+ adapter->hw.phy.media_type ==
+ e1000_media_type_internal_serdes) {
+ /* keep the laser running in D3 */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
+ ew32(CTRL_EXT, ctrl_ext);
+ }
+
+ if (adapter->flags & FLAG_IS_ICH)
+ e1000_suspend_workarounds_ich8lan(&adapter->hw);
+
+ /* Allow time for pending master requests to run */
+ e1000e_disable_pcie_master(&adapter->hw);
+
+ if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
+ /* enable wakeup by the PHY */
+ retval = e1000_init_phy_wakeup(adapter, wufc);
+ if (retval)
+ return retval;
+ } else {
+ /* enable wakeup by the MAC */
+ ew32(WUFC, wufc);
+ ew32(WUC, E1000_WUC_PME_EN);
+ }
+ } else {
+ ew32(WUC, 0);
+ ew32(WUFC, 0);
+ }
+
+ *enable_wake = !!wufc;
+
+ /* make sure adapter isn't asleep if manageability is enabled */
+ if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
+ (hw->mac.ops.check_mng_mode(hw)))
+ *enable_wake = true;
+
+ if (adapter->hw.phy.type == e1000_phy_igp_3)
+ e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
+
+ /*
+ * Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant.
+ */
+ e1000e_release_hw_control(adapter);
+
+ pci_disable_device(pdev);
+
+ return 0;
+}
+
+static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
+{
+ if (sleep && wake) {
+ pci_prepare_to_sleep(pdev);
+ return;
+ }
+
+ pci_wake_from_d3(pdev, wake);
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
+ bool wake)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ /*
+ * The pci-e switch on some quad port adapters will report a
+ * correctable error when the MAC transitions from D0 to D3. To
+ * prevent this we need to mask off the correctable errors on the
+ * downstream port of the pci-e switch.
+ */
+ if (adapter->flags & FLAG_IS_QUAD_PORT) {
+ struct pci_dev *us_dev = pdev->bus->self;
+ int pos = pci_pcie_cap(us_dev);
+ u16 devctl;
+
+ pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl);
+ pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
+ (devctl & ~PCI_EXP_DEVCTL_CERE));
+
+ e1000_power_off(pdev, sleep, wake);
+
+ pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
+ } else {
+ e1000_power_off(pdev, sleep, wake);
+ }
+}
+
+#ifdef CONFIG_PCIEASPM
+static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+{
+ pci_disable_link_state_locked(pdev, state);
+}
+#else
+static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+{
+ int pos;
+ u16 reg16;
+
+ /*
+ * Both device and parent should have the same ASPM setting.
+ * Disable ASPM in downstream component first and then upstream.
+ */
+ pos = pci_pcie_cap(pdev);
+ pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
+ reg16 &= ~state;
+ pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
+
+ if (!pdev->bus->self)
+ return;
+
+ pos = pci_pcie_cap(pdev->bus->self);
+ pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
+ reg16 &= ~state;
+ pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
+}
+#endif
+static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+{
+ dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
+ (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
+ (state & PCIE_LINK_STATE_L1) ? "L1" : "");
+
+ __e1000e_disable_aspm(pdev, state);
+}
+
+#ifdef CONFIG_PM
+static bool e1000e_pm_ready(struct e1000_adapter *adapter)
+{
+ return !!adapter->tx_ring->buffer_info;
+}
+
+static int __e1000_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 aspm_disable_flag = 0;
+ u32 err;
+
+ if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
+ aspm_disable_flag = PCIE_LINK_STATE_L0S;
+ if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
+ aspm_disable_flag |= PCIE_LINK_STATE_L1;
+ if (aspm_disable_flag)
+ e1000e_disable_aspm(pdev, aspm_disable_flag);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ e1000e_set_interrupt_capability(adapter);
+ if (netif_running(netdev)) {
+ err = e1000_request_irq(adapter);
+ if (err)
+ return err;
+ }
+
+ if (hw->mac.type == e1000_pch2lan)
+ e1000_resume_workarounds_pchlan(&adapter->hw);
+
+ e1000e_power_up_phy(adapter);
+
+ /* report the system wakeup cause from S3/S4 */
+ if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
+ u16 phy_data;
+
+ e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
+ if (phy_data) {
+ e_info("PHY Wakeup cause - %s\n",
+ phy_data & E1000_WUS_EX ? "Unicast Packet" :
+ phy_data & E1000_WUS_MC ? "Multicast Packet" :
+ phy_data & E1000_WUS_BC ? "Broadcast Packet" :
+ phy_data & E1000_WUS_MAG ? "Magic Packet" :
+ phy_data & E1000_WUS_LNKC ? "Link Status "
+ " Change" : "other");
+ }
+ e1e_wphy(&adapter->hw, BM_WUS, ~0);
+ } else {
+ u32 wus = er32(WUS);
+ if (wus) {
+ e_info("MAC Wakeup cause - %s\n",
+ wus & E1000_WUS_EX ? "Unicast Packet" :
+ wus & E1000_WUS_MC ? "Multicast Packet" :
+ wus & E1000_WUS_BC ? "Broadcast Packet" :
+ wus & E1000_WUS_MAG ? "Magic Packet" :
+ wus & E1000_WUS_LNKC ? "Link Status Change" :
+ "other");
+ }
+ ew32(WUS, ~0);
+ }
+
+ e1000e_reset(adapter);
+
+ e1000_init_manageability_pt(adapter);
+
+ if (netif_running(netdev))
+ e1000e_up(adapter);
+
+ netif_device_attach(netdev);
+
+ /*
+ * If the controller has AMT, do not set DRV_LOAD until the interface
+ * is up. For all other cases, let the f/w know that the h/w is now
+ * under the control of the driver.
+ */
+ if (!(adapter->flags & FLAG_HAS_AMT))
+ e1000e_get_hw_control(adapter);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int e1000_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int retval;
+ bool wake;
+
+ retval = __e1000_shutdown(pdev, &wake, false);
+ if (!retval)
+ e1000_complete_shutdown(pdev, true, wake);
+
+ return retval;
+}
+
+static int e1000_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (e1000e_pm_ready(adapter))
+ adapter->idle_check = true;
+
+ return __e1000_resume(pdev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_RUNTIME
+static int e1000_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (e1000e_pm_ready(adapter)) {
+ bool wake;
+
+ __e1000_shutdown(pdev, &wake, true);
+ }
+
+ return 0;
+}
+
+static int e1000_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (!e1000e_pm_ready(adapter))
+ return 0;
+
+ if (adapter->idle_check) {
+ adapter->idle_check = false;
+ if (!e1000e_has_link(adapter))
+ pm_schedule_suspend(dev, MSEC_PER_SEC);
+ }
+
+ return -EBUSY;
+}
+
+static int e1000_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (!e1000e_pm_ready(adapter))
+ return 0;
+
+ adapter->idle_check = !dev->power.runtime_auto;
+ return __e1000_resume(pdev);
+}
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
+
+static void e1000_shutdown(struct pci_dev *pdev)
+{
+ bool wake = false;
+
+ __e1000_shutdown(pdev, &wake, false);
+
+ if (system_state == SYSTEM_POWER_OFF)
+ e1000_complete_shutdown(pdev, false, wake);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+
+static irqreturn_t e1000_intr_msix(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->msix_entries) {
+ int vector, msix_irq;
+
+ vector = 0;
+ msix_irq = adapter->msix_entries[vector].vector;
+ disable_irq(msix_irq);
+ e1000_intr_msix_rx(msix_irq, netdev);
+ enable_irq(msix_irq);
+
+ vector++;
+ msix_irq = adapter->msix_entries[vector].vector;
+ disable_irq(msix_irq);
+ e1000_intr_msix_tx(msix_irq, netdev);
+ enable_irq(msix_irq);
+
+ vector++;
+ msix_irq = adapter->msix_entries[vector].vector;
+ disable_irq(msix_irq);
+ e1000_msix_other(msix_irq, netdev);
+ enable_irq(msix_irq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void e1000_netpoll(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ switch (adapter->int_mode) {
+ case E1000E_INT_MODE_MSIX:
+ e1000_intr_msix(adapter->pdev->irq, netdev);
+ break;
+ case E1000E_INT_MODE_MSI:
+ disable_irq(adapter->pdev->irq);
+ e1000_intr_msi(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+ break;
+ default: /* E1000E_INT_MODE_LEGACY */
+ disable_irq(adapter->pdev->irq);
+ e1000_intr(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+ break;
+ }
+}
+#endif
+
+/**
+ * e1000_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ e1000e_down(adapter);
+ pci_disable_device(pdev);
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * e1000_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the e1000_resume routine.
+ */
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 aspm_disable_flag = 0;
+ int err;
+ pci_ers_result_t result;
+
+ if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
+ aspm_disable_flag = PCIE_LINK_STATE_L0S;
+ if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
+ aspm_disable_flag |= PCIE_LINK_STATE_L1;
+ if (aspm_disable_flag)
+ e1000e_disable_aspm(pdev, aspm_disable_flag);
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pdev->state_saved = true;
+ pci_restore_state(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ e1000e_reset(adapter);
+ ew32(WUS, ~0);
+ result = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ return result;
+}
+
+/**
+ * e1000_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the e1000_resume routine.
+ */
+static void e1000_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ e1000_init_manageability_pt(adapter);
+
+ if (netif_running(netdev)) {
+ if (e1000e_up(adapter)) {
+ dev_err(&pdev->dev,
+ "can't bring device back up after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+
+ /*
+ * If the controller has AMT, do not set DRV_LOAD until the interface
+ * is up. For all other cases, let the f/w know that the h/w is now
+ * under the control of the driver.
+ */
+ if (!(adapter->flags & FLAG_HAS_AMT))
+ e1000e_get_hw_control(adapter);
+
+}
+
+static void e1000_print_device_info(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 ret_val;
+ u8 pba_str[E1000_PBANUM_LENGTH];
+
+ /* print bus type/speed/width info */
+ e_info("(PCI Express:2.5GT/s:%s) %pM\n",
+ /* bus width */
+ ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
+ "Width x1"),
+ /* MAC address */
+ netdev->dev_addr);
+ e_info("Intel(R) PRO/%s Network Connection\n",
+ (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
+ ret_val = e1000_read_pba_string_generic(hw, pba_str,
+ E1000_PBANUM_LENGTH);
+ if (ret_val)
+ strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
+ e_info("MAC: %d, PHY: %d, PBA No: %s\n",
+ hw->mac.type, hw->phy.type, pba_str);
+}
+
+static void e1000_eeprom_checks(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int ret_val;
+ u16 buf = 0;
+
+ if (hw->mac.type != e1000_82573)
+ return;
+
+ ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
+ if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) {
+ /* Deep Smart Power Down (DSPD) */
+ dev_warn(&adapter->pdev->dev,
+ "Warning: detected DSPD enabled in EEPROM\n");
+ }
+}
+
+static int e1000_set_features(struct net_device *netdev, u32 features)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ u32 changed = features ^ netdev->features;
+
+ if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
+ adapter->flags |= FLAG_TSO_FORCE;
+
+ if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX |
+ NETIF_F_RXCSUM)))
+ return 0;
+
+ if (netif_running(netdev))
+ e1000e_reinit_locked(adapter);
+ else
+ e1000e_reset(adapter);
+
+ return 0;
+}
+
+static const struct net_device_ops e1000e_netdev_ops = {
+ .ndo_open = e1000_open,
+ .ndo_stop = e1000_close,
+ .ndo_start_xmit = e1000_xmit_frame,
+ .ndo_get_stats64 = e1000e_get_stats64,
+ .ndo_set_rx_mode = e1000_set_multi,
+ .ndo_set_mac_address = e1000_set_mac,
+ .ndo_change_mtu = e1000_change_mtu,
+ .ndo_do_ioctl = e1000_ioctl,
+ .ndo_tx_timeout = e1000_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+
+ .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = e1000_netpoll,
+#endif
+ .ndo_set_features = e1000_set_features,
+};
+
+/**
+ * e1000_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in e1000_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * e1000_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit e1000_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct e1000_adapter *adapter;
+ struct e1000_hw *hw;
+ const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
+ resource_size_t mmio_start, mmio_len;
+ resource_size_t flash_start, flash_len;
+
+ static int cards_found;
+ u16 aspm_disable_flag = 0;
+ int i, err, pci_using_dac;
+ u16 eeprom_data = 0;
+ u16 eeprom_apme_mask = E1000_EEPROM_APME;
+
+ if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
+ aspm_disable_flag = PCIE_LINK_STATE_L0S;
+ if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
+ aspm_disable_flag |= PCIE_LINK_STATE_L1;
+ if (aspm_disable_flag)
+ e1000e_disable_aspm(pdev, aspm_disable_flag);
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ pci_using_dac = 0;
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err)
+ pci_using_dac = 1;
+ } else {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA "
+ "configuration, aborting\n");
+ goto err_dma;
+ }
+ }
+ }
+
+ err = pci_request_selected_regions_exclusive(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM),
+ e1000e_driver_name);
+ if (err)
+ goto err_pci_reg;
+
+ /* AER (Advanced Error Reporting) hooks */
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+ /* PCI config space info */
+ err = pci_save_state(pdev);
+ if (err)
+ goto err_alloc_etherdev;
+
+ err = -ENOMEM;
+ netdev = alloc_etherdev(sizeof(struct e1000_adapter));
+ if (!netdev)
+ goto err_alloc_etherdev;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ netdev->irq = pdev->irq;
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ hw = &adapter->hw;
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->ei = ei;
+ adapter->pba = ei->pba;
+ adapter->flags = ei->flags;
+ adapter->flags2 = ei->flags2;
+ adapter->hw.adapter = adapter;
+ adapter->hw.mac.type = ei->mac;
+ adapter->max_hw_frame_size = ei->max_hw_frame_size;
+ adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
+
+ mmio_start = pci_resource_start(pdev, 0);
+ mmio_len = pci_resource_len(pdev, 0);
+
+ err = -EIO;
+ adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
+ if (!adapter->hw.hw_addr)
+ goto err_ioremap;
+
+ if ((adapter->flags & FLAG_HAS_FLASH) &&
+ (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+ flash_start = pci_resource_start(pdev, 1);
+ flash_len = pci_resource_len(pdev, 1);
+ adapter->hw.flash_address = ioremap(flash_start, flash_len);
+ if (!adapter->hw.flash_address)
+ goto err_flashmap;
+ }
+
+ /* construct the net_device struct */
+ netdev->netdev_ops = &e1000e_netdev_ops;
+ e1000e_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+ netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
+ strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+ netdev->mem_start = mmio_start;
+ netdev->mem_end = mmio_start + mmio_len;
+
+ adapter->bd_number = cards_found++;
+
+ e1000e_check_options(adapter);
+
+ /* setup adapter struct */
+ err = e1000_sw_init(adapter);
+ if (err)
+ goto err_sw_init;
+
+ memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
+ memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
+ memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
+
+ err = ei->get_variants(adapter);
+ if (err)
+ goto err_hw_init;
+
+ if ((adapter->flags & FLAG_IS_ICH) &&
+ (adapter->flags & FLAG_READ_ONLY_NVM))
+ e1000e_write_protect_nvm_ich8lan(&adapter->hw);
+
+ hw->mac.ops.get_bus_info(&adapter->hw);
+
+ adapter->hw.phy.autoneg_wait_to_complete = 0;
+
+ /* Copper options */
+ if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+ adapter->hw.phy.mdix = AUTO_ALL_MODES;
+ adapter->hw.phy.disable_polarity_correction = 0;
+ adapter->hw.phy.ms_type = e1000_ms_hw_default;
+ }
+
+ if (e1000_check_reset_block(&adapter->hw))
+ e_info("PHY reset is blocked due to SOL/IDER session.\n");
+
+ /* Set initial default active device features */
+ netdev->features = (NETIF_F_SG |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM);
+
+ /* Set user-changeable features (subset of all device features) */
+ netdev->hw_features = netdev->features;
+
+ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
+ netdev->features |= NETIF_F_HW_VLAN_FILTER;
+
+ netdev->vlan_features |= (NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_HW_CSUM);
+
+ if (pci_using_dac) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
+
+ if (e1000e_enable_mng_pass_thru(&adapter->hw))
+ adapter->flags |= FLAG_MNG_PT_ENABLED;
+
+ /*
+ * before reading the NVM, reset the controller to
+ * put the device in a known good starting state
+ */
+ adapter->hw.mac.ops.reset_hw(&adapter->hw);
+
+ /*
+ * systems with ASPM and others may see the checksum fail on the first
+ * attempt. Let's give it a few tries
+ */
+ for (i = 0;; i++) {
+ if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
+ break;
+ if (i == 2) {
+ e_err("The NVM Checksum Is Not Valid\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+ }
+
+ e1000_eeprom_checks(adapter);
+
+ /* copy the MAC address */
+ if (e1000e_read_mac_addr(&adapter->hw))
+ e_err("NVM Read Error while reading MAC address\n");
+
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->perm_addr)) {
+ e_err("Invalid MAC Address: %pM\n", netdev->perm_addr);
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->watchdog_timer.function = e1000_watchdog;
+ adapter->watchdog_timer.data = (unsigned long) adapter;
+
+ init_timer(&adapter->phy_info_timer);
+ adapter->phy_info_timer.function = e1000_update_phy_info;
+ adapter->phy_info_timer.data = (unsigned long) adapter;
+
+ INIT_WORK(&adapter->reset_task, e1000_reset_task);
+ INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
+ INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
+ INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
+ INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
+
+ /* Initialize link parameters. User can change them with ethtool */
+ adapter->hw.mac.autoneg = 1;
+ adapter->fc_autoneg = 1;
+ adapter->hw.fc.requested_mode = e1000_fc_default;
+ adapter->hw.fc.current_mode = e1000_fc_default;
+ adapter->hw.phy.autoneg_advertised = 0x2f;
+
+ /* ring size defaults */
+ adapter->rx_ring->count = 256;
+ adapter->tx_ring->count = 256;
+
+ /*
+ * Initial Wake on LAN setting - If APM wake is enabled in
+ * the EEPROM, enable the ACPI Magic Packet filter
+ */
+ if (adapter->flags & FLAG_APME_IN_WUC) {
+ /* APME bit in EEPROM is mapped to WUC.APME */
+ eeprom_data = er32(WUC);
+ eeprom_apme_mask = E1000_WUC_APME;
+ if ((hw->mac.type > e1000_ich10lan) &&
+ (eeprom_data & E1000_WUC_PHY_WAKE))
+ adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
+ } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
+ if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
+ (adapter->hw.bus.func == 1))
+ e1000_read_nvm(&adapter->hw,
+ NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+ else
+ e1000_read_nvm(&adapter->hw,
+ NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+ }
+
+ /* fetch WoL from EEPROM */
+ if (eeprom_data & eeprom_apme_mask)
+ adapter->eeprom_wol |= E1000_WUFC_MAG;
+
+ /*
+ * now that we have the eeprom settings, apply the special cases
+ * where the eeprom may be wrong or the board simply won't support
+ * wake on lan on a particular port
+ */
+ if (!(adapter->flags & FLAG_HAS_WOL))
+ adapter->eeprom_wol = 0;
+
+ /* initialize the wol settings based on the eeprom settings */
+ adapter->wol = adapter->eeprom_wol;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ /* save off EEPROM version number */
+ e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
+
+ /* reset the hardware with the new settings */
+ e1000e_reset(adapter);
+
+ /*
+ * If the controller has AMT, do not set DRV_LOAD until the interface
+ * is up. For all other cases, let the f/w know that the h/w is now
+ * under the control of the driver.
+ */
+ if (!(adapter->flags & FLAG_HAS_AMT))
+ e1000e_get_hw_control(adapter);
+
+ strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1);
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+ e1000_print_device_info(adapter);
+
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return 0;
+
+err_register:
+ if (!(adapter->flags & FLAG_HAS_AMT))
+ e1000e_release_hw_control(adapter);
+err_eeprom:
+ if (!e1000_check_reset_block(&adapter->hw))
+ e1000_phy_hw_reset(&adapter->hw);
+err_hw_init:
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+err_sw_init:
+ if (adapter->hw.flash_address)
+ iounmap(adapter->hw.flash_address);
+ e1000e_reset_interrupt_capability(adapter);
+err_flashmap:
+ iounmap(adapter->hw.hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * e1000_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * e1000_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void __devexit e1000_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ bool down = test_bit(__E1000_DOWN, &adapter->state);
+
+ /*
+ * The timers may be rescheduled, so explicitly disable them
+ * from being rescheduled.
+ */
+ if (!down)
+ set_bit(__E1000_DOWN, &adapter->state);
+ del_timer_sync(&adapter->watchdog_timer);
+ del_timer_sync(&adapter->phy_info_timer);
+
+ cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->watchdog_task);
+ cancel_work_sync(&adapter->downshift_task);
+ cancel_work_sync(&adapter->update_phy_task);
+ cancel_work_sync(&adapter->print_hang_task);
+
+ if (!(netdev->flags & IFF_UP))
+ e1000_power_down_phy(adapter);
+
+ /* Don't lie to e1000_close() down the road. */
+ if (!down)
+ clear_bit(__E1000_DOWN, &adapter->state);
+ unregister_netdev(netdev);
+
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_get_noresume(&pdev->dev);
+
+ /*
+ * Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant.
+ */
+ e1000e_release_hw_control(adapter);
+
+ e1000e_reset_interrupt_capability(adapter);
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+
+ iounmap(adapter->hw.hw_addr);
+ if (adapter->hw.flash_address)
+ iounmap(adapter->hw.flash_address);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+
+ free_netdev(netdev);
+
+ /* AER disable */
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+/* PCI Error Recovery (ERS) */
+static struct pci_error_handlers e1000_err_handler = {
+ .error_detected = e1000_io_error_detected,
+ .slot_reset = e1000_io_slot_reset,
+ .resume = e1000_io_resume,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
+ board_80003es2lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
+ board_80003es2lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
+ board_80003es2lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
+ board_80003es2lan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
+
+ { } /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops e1000_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
+ SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
+ e1000_runtime_resume, e1000_idle)
+};
+#endif
+
+/* PCI Device API Driver */
+static struct pci_driver e1000_driver = {
+ .name = e1000e_driver_name,
+ .id_table = e1000_pci_tbl,
+ .probe = e1000_probe,
+ .remove = __devexit_p(e1000_remove),
+#ifdef CONFIG_PM
+ .driver.pm = &e1000_pm_ops,
+#endif
+ .shutdown = e1000_shutdown,
+ .err_handler = &e1000_err_handler
+};
+
+/**
+ * e1000_init_module - Driver Registration Routine
+ *
+ * e1000_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init e1000_init_module(void)
+{
+ int ret;
+ pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
+ e1000e_driver_version);
+ pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n");
+ ret = pci_register_driver(&e1000_driver);
+
+ return ret;
+}
+module_init(e1000_init_module);
+
+/**
+ * e1000_exit_module - Driver Exit Cleanup Routine
+ *
+ * e1000_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit e1000_exit_module(void)
+{
+ pci_unregister_driver(&e1000_driver);
+}
+module_exit(e1000_exit_module);
+
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/* e1000_main.c */
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
new file mode 100644
index 000000000000..4dd9b63273f6
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -0,0 +1,478 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+
+#include "e1000.h"
+
+/*
+ * This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define E1000_MAX_NIC 32
+
+#define OPTION_UNSET -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED 1
+
+#define COPYBREAK_DEFAULT 256
+unsigned int copybreak = COPYBREAK_DEFAULT;
+module_param(copybreak, uint, 0644);
+MODULE_PARM_DESC(copybreak,
+ "Maximum size of packet that is copied to a new buffer on receive");
+
+/*
+ * All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
+#define E1000_PARAM(X, desc) \
+ static int __devinitdata X[E1000_MAX_NIC+1] \
+ = E1000_PARAM_INIT; \
+ static unsigned int num_##X; \
+ module_param_array_named(X, X, int, &num_##X, 0); \
+ MODULE_PARM_DESC(X, desc);
+
+/*
+ * Transmit Interrupt Delay in units of 1.024 microseconds
+ * Tx interrupt delay needs to typically be set to something non-zero
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
+#define DEFAULT_TIDV 8
+#define MAX_TXDELAY 0xFFFF
+#define MIN_TXDELAY 0
+
+/*
+ * Transmit Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
+#define DEFAULT_TADV 32
+#define MAX_TXABSDELAY 0xFFFF
+#define MIN_TXABSDELAY 0
+
+/*
+ * Receive Interrupt Delay in units of 1.024 microseconds
+ * hardware will likely hang if you set this to anything but zero.
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
+#define MAX_RXDELAY 0xFFFF
+#define MIN_RXDELAY 0
+
+/*
+ * Receive Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
+#define MAX_RXABSDELAY 0xFFFF
+#define MIN_RXABSDELAY 0
+
+/*
+ * Interrupt Throttle Rate (interrupts/sec)
+ *
+ * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
+ */
+E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
+#define DEFAULT_ITR 3
+#define MAX_ITR 100000
+#define MIN_ITR 100
+
+/* IntMode (Interrupt Mode)
+ *
+ * Valid Range: 0 - 2
+ *
+ * Default Value: 2 (MSI-X)
+ */
+E1000_PARAM(IntMode, "Interrupt Mode");
+#define MAX_INTMODE 2
+#define MIN_INTMODE 0
+
+/*
+ * Enable Smart Power Down of the PHY
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0 (disabled)
+ */
+E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
+
+/*
+ * Enable Kumeran Lock Loss workaround
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
+
+/*
+ * Write Protect NVM
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
+
+/*
+ * Enable CRC Stripping
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \
+ "the CRC");
+
+struct e1000_option {
+ enum { enable_option, range_option, list_option } type;
+ const char *name;
+ const char *err;
+ int def;
+ union {
+ struct { /* range_option info */
+ int min;
+ int max;
+ } r;
+ struct { /* list_option info */
+ int nr;
+ struct e1000_opt_list { int i; char *str; } *p;
+ } l;
+ } arg;
+};
+
+static int __devinit e1000_validate_option(unsigned int *value,
+ const struct e1000_option *opt,
+ struct e1000_adapter *adapter)
+{
+ if (*value == OPTION_UNSET) {
+ *value = opt->def;
+ return 0;
+ }
+
+ switch (opt->type) {
+ case enable_option:
+ switch (*value) {
+ case OPTION_ENABLED:
+ e_info("%s Enabled\n", opt->name);
+ return 0;
+ case OPTION_DISABLED:
+ e_info("%s Disabled\n", opt->name);
+ return 0;
+ }
+ break;
+ case range_option:
+ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+ e_info("%s set to %i\n", opt->name, *value);
+ return 0;
+ }
+ break;
+ case list_option: {
+ int i;
+ struct e1000_opt_list *ent;
+
+ for (i = 0; i < opt->arg.l.nr; i++) {
+ ent = &opt->arg.l.p[i];
+ if (*value == ent->i) {
+ if (ent->str[0] != '\0')
+ e_info("%s\n", ent->str);
+ return 0;
+ }
+ }
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ e_info("Invalid %s value specified (%i) %s\n", opt->name, *value,
+ opt->err);
+ *value = opt->def;
+ return -1;
+}
+
+/**
+ * e1000e_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input. If an invalid value is given, or if no user specified
+ * value exists, a default value is used. The final value is stored
+ * in a variable in the adapter structure.
+ **/
+void __devinit e1000e_check_options(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int bd = adapter->bd_number;
+
+ if (bd >= E1000_MAX_NIC) {
+ e_notice("Warning: no configuration for board #%i\n", bd);
+ e_notice("Using defaults for all values\n");
+ }
+
+ { /* Transmit Interrupt Delay */
+ static const struct e1000_option opt = {
+ .type = range_option,
+ .name = "Transmit Interrupt Delay",
+ .err = "using default of "
+ __MODULE_STRING(DEFAULT_TIDV),
+ .def = DEFAULT_TIDV,
+ .arg = { .r = { .min = MIN_TXDELAY,
+ .max = MAX_TXDELAY } }
+ };
+
+ if (num_TxIntDelay > bd) {
+ adapter->tx_int_delay = TxIntDelay[bd];
+ e1000_validate_option(&adapter->tx_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->tx_int_delay = opt.def;
+ }
+ }
+ { /* Transmit Absolute Interrupt Delay */
+ static const struct e1000_option opt = {
+ .type = range_option,
+ .name = "Transmit Absolute Interrupt Delay",
+ .err = "using default of "
+ __MODULE_STRING(DEFAULT_TADV),
+ .def = DEFAULT_TADV,
+ .arg = { .r = { .min = MIN_TXABSDELAY,
+ .max = MAX_TXABSDELAY } }
+ };
+
+ if (num_TxAbsIntDelay > bd) {
+ adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
+ e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->tx_abs_int_delay = opt.def;
+ }
+ }
+ { /* Receive Interrupt Delay */
+ static struct e1000_option opt = {
+ .type = range_option,
+ .name = "Receive Interrupt Delay",
+ .err = "using default of "
+ __MODULE_STRING(DEFAULT_RDTR),
+ .def = DEFAULT_RDTR,
+ .arg = { .r = { .min = MIN_RXDELAY,
+ .max = MAX_RXDELAY } }
+ };
+
+ if (num_RxIntDelay > bd) {
+ adapter->rx_int_delay = RxIntDelay[bd];
+ e1000_validate_option(&adapter->rx_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->rx_int_delay = opt.def;
+ }
+ }
+ { /* Receive Absolute Interrupt Delay */
+ static const struct e1000_option opt = {
+ .type = range_option,
+ .name = "Receive Absolute Interrupt Delay",
+ .err = "using default of "
+ __MODULE_STRING(DEFAULT_RADV),
+ .def = DEFAULT_RADV,
+ .arg = { .r = { .min = MIN_RXABSDELAY,
+ .max = MAX_RXABSDELAY } }
+ };
+
+ if (num_RxAbsIntDelay > bd) {
+ adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
+ e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
+ adapter);
+ } else {
+ adapter->rx_abs_int_delay = opt.def;
+ }
+ }
+ { /* Interrupt Throttling Rate */
+ static const struct e1000_option opt = {
+ .type = range_option,
+ .name = "Interrupt Throttling Rate (ints/sec)",
+ .err = "using default of "
+ __MODULE_STRING(DEFAULT_ITR),
+ .def = DEFAULT_ITR,
+ .arg = { .r = { .min = MIN_ITR,
+ .max = MAX_ITR } }
+ };
+
+ if (num_InterruptThrottleRate > bd) {
+ adapter->itr = InterruptThrottleRate[bd];
+ switch (adapter->itr) {
+ case 0:
+ e_info("%s turned off\n", opt.name);
+ break;
+ case 1:
+ e_info("%s set to dynamic mode\n", opt.name);
+ adapter->itr_setting = adapter->itr;
+ adapter->itr = 20000;
+ break;
+ case 3:
+ e_info("%s set to dynamic conservative mode\n",
+ opt.name);
+ adapter->itr_setting = adapter->itr;
+ adapter->itr = 20000;
+ break;
+ case 4:
+ e_info("%s set to simplified (2000-8000 ints) "
+ "mode\n", opt.name);
+ adapter->itr_setting = 4;
+ break;
+ default:
+ /*
+ * Save the setting, because the dynamic bits
+ * change itr.
+ */
+ if (e1000_validate_option(&adapter->itr, &opt,
+ adapter) &&
+ (adapter->itr == 3)) {
+ /*
+ * In case of invalid user value,
+ * default to conservative mode.
+ */
+ adapter->itr_setting = adapter->itr;
+ adapter->itr = 20000;
+ } else {
+ /*
+ * Clear the lower two bits because
+ * they are used as control.
+ */
+ adapter->itr_setting =
+ adapter->itr & ~3;
+ }
+ break;
+ }
+ } else {
+ adapter->itr_setting = opt.def;
+ adapter->itr = 20000;
+ }
+ }
+ { /* Interrupt Mode */
+ static struct e1000_option opt = {
+ .type = range_option,
+ .name = "Interrupt Mode",
+ .err = "defaulting to 2 (MSI-X)",
+ .def = E1000E_INT_MODE_MSIX,
+ .arg = { .r = { .min = MIN_INTMODE,
+ .max = MAX_INTMODE } }
+ };
+
+ if (num_IntMode > bd) {
+ unsigned int int_mode = IntMode[bd];
+ e1000_validate_option(&int_mode, &opt, adapter);
+ adapter->int_mode = int_mode;
+ } else {
+ adapter->int_mode = opt.def;
+ }
+ }
+ { /* Smart Power Down */
+ static const struct e1000_option opt = {
+ .type = enable_option,
+ .name = "PHY Smart Power Down",
+ .err = "defaulting to Disabled",
+ .def = OPTION_DISABLED
+ };
+
+ if (num_SmartPowerDownEnable > bd) {
+ unsigned int spd = SmartPowerDownEnable[bd];
+ e1000_validate_option(&spd, &opt, adapter);
+ if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN)
+ && spd)
+ adapter->flags |= FLAG_SMART_POWER_DOWN;
+ }
+ }
+ { /* CRC Stripping */
+ static const struct e1000_option opt = {
+ .type = enable_option,
+ .name = "CRC Stripping",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+
+ if (num_CrcStripping > bd) {
+ unsigned int crc_stripping = CrcStripping[bd];
+ e1000_validate_option(&crc_stripping, &opt, adapter);
+ if (crc_stripping == OPTION_ENABLED)
+ adapter->flags2 |= FLAG2_CRC_STRIPPING;
+ } else {
+ adapter->flags2 |= FLAG2_CRC_STRIPPING;
+ }
+ }
+ { /* Kumeran Lock Loss Workaround */
+ static const struct e1000_option opt = {
+ .type = enable_option,
+ .name = "Kumeran Lock Loss Workaround",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+
+ if (num_KumeranLockLoss > bd) {
+ unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
+ e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
+ if (hw->mac.type == e1000_ich8lan)
+ e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
+ kmrn_lock_loss);
+ } else {
+ if (hw->mac.type == e1000_ich8lan)
+ e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
+ opt.def);
+ }
+ }
+ { /* Write-protect NVM */
+ static const struct e1000_option opt = {
+ .type = enable_option,
+ .name = "Write-protect NVM",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+
+ if (adapter->flags & FLAG_IS_ICH) {
+ if (num_WriteProtectNVM > bd) {
+ unsigned int write_protect_nvm = WriteProtectNVM[bd];
+ e1000_validate_option(&write_protect_nvm, &opt,
+ adapter);
+ if (write_protect_nvm)
+ adapter->flags |= FLAG_READ_ONLY_NVM;
+ } else {
+ if (opt.def)
+ adapter->flags |= FLAG_READ_ONLY_NVM;
+ }
+ }
+ }
+}
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
new file mode 100644
index 000000000000..8666476cb9be
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -0,0 +1,3377 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/delay.h>
+
+#include "e1000.h"
+
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
+static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
+static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
+static s32 e1000_wait_autoneg(struct e1000_hw *hw);
+static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg);
+static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read, bool page_set);
+static u32 e1000_get_phy_addr_for_hv_page(u32 page);
+static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read);
+
+/* Cable length tables */
+static const u16 e1000_m88_cable_length_table[] = {
+ 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
+ ARRAY_SIZE(e1000_m88_cable_length_table)
+
+static const u16 e1000_igp_2_cable_length_table[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
+ 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22,
+ 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40,
+ 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61,
+ 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
+ 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
+ 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
+ 124};
+#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
+ ARRAY_SIZE(e1000_igp_2_cable_length_table)
+
+#define BM_PHY_REG_PAGE(offset) \
+ ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
+#define BM_PHY_REG_NUM(offset) \
+ ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
+ (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
+ ~MAX_PHY_REG_ADDRESS)))
+
+#define HV_INTC_FC_PAGE_START 768
+#define I82578_ADDR_REG 29
+#define I82577_ADDR_REG 16
+#define I82577_CFG_REG 22
+#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
+#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
+#define I82577_CTRL_REG 23
+
+/* 82577 specific PHY registers */
+#define I82577_PHY_CTRL_2 18
+#define I82577_PHY_STATUS_2 26
+#define I82577_PHY_DIAG_STATUS 31
+
+/* I82577 PHY Status 2 */
+#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
+#define I82577_PHY_STATUS2_MDIX 0x0800
+#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
+#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
+
+/* I82577 PHY Control 2 */
+#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400
+#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200
+
+/* I82577 PHY Diagnostics Status */
+#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
+#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
+
+/* BM PHY Copper Specific Control 1 */
+#define BM_CS_CTRL1 16
+
+#define HV_MUX_DATA_CTRL PHY_REG(776, 16)
+#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400
+#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004
+
+/**
+ * e1000e_check_reset_block_generic - Check if PHY reset is blocked
+ * @hw: pointer to the HW structure
+ *
+ * Read the PHY management control register and check whether a PHY reset
+ * is blocked. If a reset is not blocked return 0, otherwise
+ * return E1000_BLK_PHY_RESET (12).
+ **/
+s32 e1000e_check_reset_block_generic(struct e1000_hw *hw)
+{
+ u32 manc;
+
+ manc = er32(MANC);
+
+ return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
+ E1000_BLK_PHY_RESET : 0;
+}
+
+/**
+ * e1000e_get_phy_id - Retrieve the PHY ID and revision
+ * @hw: pointer to the HW structure
+ *
+ * Reads the PHY registers and stores the PHY ID and possibly the PHY
+ * revision in the hardware structure.
+ **/
+s32 e1000e_get_phy_id(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+ u16 phy_id;
+ u16 retry_count = 0;
+
+ if (!(phy->ops.read_reg))
+ goto out;
+
+ while (retry_count < 2) {
+ ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
+ if (ret_val)
+ goto out;
+
+ phy->id = (u32)(phy_id << 16);
+ udelay(20);
+ ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
+ if (ret_val)
+ goto out;
+
+ phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
+ phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+
+ if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
+ goto out;
+
+ retry_count++;
+ }
+out:
+ return ret_val;
+}
+
+/**
+ * e1000e_phy_reset_dsp - Reset PHY DSP
+ * @hw: pointer to the HW structure
+ *
+ * Reset the digital signal processor.
+ **/
+s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ ret_val = e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
+ if (ret_val)
+ return ret_val;
+
+ return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0);
+}
+
+/**
+ * e1000e_read_phy_reg_mdic - Read MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the MDI control register in the PHY at offset and stores the
+ * information read to data.
+ **/
+s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, mdic = 0;
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ e_dbg("PHY Address %d is out of range\n", offset);
+ return -E1000_ERR_PARAM;
+ }
+
+ /*
+ * Set up Op-code, Phy Address, and register offset in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ mdic = ((offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_READ));
+
+ ew32(MDIC, mdic);
+
+ /*
+ * Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ udelay(50);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ e_dbg("MDI Error\n");
+ return -E1000_ERR_PHY;
+ }
+ *data = (u16) mdic;
+
+ /*
+ * Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ udelay(100);
+
+ return 0;
+}
+
+/**
+ * e1000e_write_phy_reg_mdic - Write MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write to register at offset
+ *
+ * Writes data to MDI control register in the PHY at offset.
+ **/
+s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, mdic = 0;
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ e_dbg("PHY Address %d is out of range\n", offset);
+ return -E1000_ERR_PARAM;
+ }
+
+ /*
+ * Set up Op-code, Phy Address, and register offset in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ mdic = (((u32)data) |
+ (offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_WRITE));
+
+ ew32(MDIC, mdic);
+
+ /*
+ * Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ udelay(50);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ e_dbg("MDI Error\n");
+ return -E1000_ERR_PHY;
+ }
+
+ /*
+ * Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ udelay(100);
+
+ return 0;
+}
+
+/**
+ * e1000e_read_phy_reg_m88 - Read m88 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and storing the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000e_write_phy_reg_m88 - Write m88 PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_set_page_igp - Set page as on IGP-like PHY(s)
+ * @hw: pointer to the HW structure
+ * @page: page to set (shifted left when necessary)
+ *
+ * Sets PHY page required for PHY register access. Assumes semaphore is
+ * already acquired. Note, this function sets phy.addr to 1 so the caller
+ * must set it appropriately (if necessary) after this function returns.
+ **/
+s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page)
+{
+ e_dbg("Setting page 0x%x\n", page);
+
+ hw->phy.addr = 1;
+
+ return e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page);
+}
+
+/**
+ * __e1000e_read_phy_reg_igp - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and stores the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked)
+{
+ s32 ret_val = 0;
+
+ if (!locked) {
+ if (!(hw->phy.ops.acquire))
+ goto out;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+ }
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ ret_val = e1000e_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (u16)offset);
+ if (ret_val)
+ goto release;
+ }
+
+ ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+release:
+ if (!locked)
+ hw->phy.ops.release(hw);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000e_read_phy_reg_igp - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset and stores the
+ * retrieved information in data.
+ * Release the acquired semaphore before exiting.
+ **/
+s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000e_read_phy_reg_igp(hw, offset, data, false);
+}
+
+/**
+ * e1000e_read_phy_reg_igp_locked - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset and stores the retrieved information
+ * in data. Assumes semaphore already acquired.
+ **/
+s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000e_read_phy_reg_igp(hw, offset, data, true);
+}
+
+/**
+ * e1000e_write_phy_reg_igp - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked)
+{
+ s32 ret_val = 0;
+
+ if (!locked) {
+ if (!(hw->phy.ops.acquire))
+ goto out;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+ }
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ ret_val = e1000e_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (u16)offset);
+ if (ret_val)
+ goto release;
+ }
+
+ ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+release:
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000e_write_phy_reg_igp - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000e_write_phy_reg_igp(hw, offset, data, false);
+}
+
+/**
+ * e1000e_write_phy_reg_igp_locked - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000e_write_phy_reg_igp(hw, offset, data, true);
+}
+
+/**
+ * __e1000_read_kmrn_reg - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary. Then reads the PHY register at offset
+ * using the kumeran interface. The information retrieved is stored in data.
+ * Release any acquired semaphores before exiting.
+ **/
+static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked)
+{
+ u32 kmrnctrlsta;
+ s32 ret_val = 0;
+
+ if (!locked) {
+ if (!(hw->phy.ops.acquire))
+ goto out;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+ }
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+ ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
+
+ udelay(2);
+
+ kmrnctrlsta = er32(KMRNCTRLSTA);
+ *data = (u16)kmrnctrlsta;
+
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000e_read_kmrn_reg - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset using the
+ * kumeran interface. The information retrieved is stored in data.
+ * Release the acquired semaphore before exiting.
+ **/
+s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ * e1000e_read_kmrn_reg_locked - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset using the kumeran interface. The
+ * information retrieved is stored in data.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_kmrn_reg(hw, offset, data, true);
+}
+
+/**
+ * __e1000_write_kmrn_reg - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary. Then write the data to PHY register
+ * at the offset using the kumeran interface. Release any acquired semaphores
+ * before exiting.
+ **/
+static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked)
+{
+ u32 kmrnctrlsta;
+ s32 ret_val = 0;
+
+ if (!locked) {
+ if (!(hw->phy.ops.acquire))
+ goto out;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+ }
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | data;
+ ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
+
+ udelay(2);
+
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000e_write_kmrn_reg - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore then writes the data to the PHY register at the offset
+ * using the kumeran interface. Release the acquired semaphore before exiting.
+ **/
+s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ * e1000e_write_kmrn_reg_locked - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Write the data to PHY register at the offset using the kumeran interface.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_kmrn_reg(hw, offset, data, true);
+}
+
+/**
+ * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up Carrier-sense on Transmit and downshift values.
+ **/
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
+ ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data |= I82577_CFG_ASSERT_CRS_ON_TX;
+
+ /* Enable downshift */
+ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
+
+ ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock
+ * and downshift values are set also.
+ **/
+s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
+ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* For BM PHY this bit is downshift enable */
+ if (phy->type != e1000_phy_bm)
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+ /*
+ * Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+ switch (phy->mdix) {
+ case 1:
+ phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+ break;
+ case 2:
+ phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+ break;
+ case 3:
+ phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+ break;
+ case 0:
+ default:
+ phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+ break;
+ }
+
+ /*
+ * Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+ if (phy->disable_polarity_correction == 1)
+ phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+ /* Enable downshift on BM (disabled by default) */
+ if (phy->type == e1000_phy_bm)
+ phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT;
+
+ ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if ((phy->type == e1000_phy_m88) &&
+ (phy->revision < E1000_REVISION_4) &&
+ (phy->id != BME1000_E_PHY_ID_R2)) {
+ /*
+ * Force TX_CLK in the Extended PHY Specific Control Register
+ * to 25MHz clock.
+ */
+ ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+ if ((phy->revision == 2) &&
+ (phy->id == M88E1111_I_PHY_ID)) {
+ /* 82573L PHY - set the downshift counter to 5x. */
+ phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
+ phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+ } else {
+ /* Configure Master and Slave downshift values */
+ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+ phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+ }
+ ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) {
+ /* Set PHY page 0, register 29 to 0x0003 */
+ ret_val = e1e_wphy(hw, 29, 0x0003);
+ if (ret_val)
+ return ret_val;
+
+ /* Set PHY page 0, register 30 to 0x0000 */
+ ret_val = e1e_wphy(hw, 30, 0x0000);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Commit the changes. */
+ ret_val = e1000e_commit_phy(hw);
+ if (ret_val) {
+ e_dbg("Error committing the PHY changes\n");
+ return ret_val;
+ }
+
+ if (phy->type == e1000_phy_82578) {
+ ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* 82578 PHY - set the downshift count to 1x. */
+ phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
+ phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
+ ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return 0;
+}
+
+/**
+ * e1000e_copper_link_setup_igp - Setup igp PHY's for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
+ * igp PHY's.
+ **/
+s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ ret_val = e1000_phy_hw_reset(hw);
+ if (ret_val) {
+ e_dbg("Error resetting the PHY.\n");
+ return ret_val;
+ }
+
+ /*
+ * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
+ * timeout issues when LFS is enabled.
+ */
+ msleep(100);
+
+ /* disable lplu d0 during driver init */
+ ret_val = e1000_set_d0_lplu_state(hw, false);
+ if (ret_val) {
+ e_dbg("Error Disabling LPLU D0\n");
+ return ret_val;
+ }
+ /* Configure mdi-mdix settings */
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+ switch (phy->mdix) {
+ case 1:
+ data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 2:
+ data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 0:
+ default:
+ data |= IGP01E1000_PSCR_AUTO_MDIX;
+ break;
+ }
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, data);
+ if (ret_val)
+ return ret_val;
+
+ /* set auto-master slave resolution settings */
+ if (hw->mac.autoneg) {
+ /*
+ * when autonegotiation advertisement is only 1000Mbps then we
+ * should disable SmartSpeed and enable Auto MasterSlave
+ * resolution as hardware default.
+ */
+ if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
+ /* Disable SmartSpeed */
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+
+ /* Set auto Master/Slave resolution process */
+ ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~CR_1000T_MS_ENABLE;
+ ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ /* load defaults for future use */
+ phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
+ ((data & CR_1000T_MS_VALUE) ?
+ e1000_ms_force_master :
+ e1000_ms_force_slave) :
+ e1000_ms_auto;
+
+ switch (phy->ms_type) {
+ case e1000_ms_force_master:
+ data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_force_slave:
+ data |= CR_1000T_MS_ENABLE;
+ data &= ~(CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_auto:
+ data &= ~CR_1000T_MS_ENABLE;
+ default:
+ break;
+ }
+ ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
+ * @hw: pointer to the HW structure
+ *
+ * Reads the MII auto-neg advertisement register and/or the 1000T control
+ * register and if the PHY is already setup for auto-negotiation, then
+ * return successful. Otherwise, setup advertisement and flow control to
+ * the appropriate values for the wanted auto-negotiation.
+ **/
+static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 mii_autoneg_adv_reg;
+ u16 mii_1000t_ctrl_reg = 0;
+
+ phy->autoneg_advertised &= phy->autoneg_mask;
+
+ /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+ ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+ /* Read the MII 1000Base-T Control Register (Address 9). */
+ ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /*
+ * Need to parse both autoneg_advertised and fc and set up
+ * the appropriate PHY registers. First we will parse for
+ * autoneg_advertised software override. Since we can advertise
+ * a plethora of combinations, we need to check each bit
+ * individually.
+ */
+
+ /*
+ * First we clear all the 10/100 mb speed bits in the Auto-Neg
+ * Advertisement Register (Address 4) and the 1000 mb speed bits in
+ * the 1000Base-T Control Register (Address 9).
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
+ NWAY_AR_100TX_HD_CAPS |
+ NWAY_AR_10T_FD_CAPS |
+ NWAY_AR_10T_HD_CAPS);
+ mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
+
+ e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
+
+ /* Do we want to advertise 10 Mb Half Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
+ e_dbg("Advertise 10mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+ }
+
+ /* Do we want to advertise 10 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
+ e_dbg("Advertise 10mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Half Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
+ e_dbg("Advertise 100mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
+ e_dbg("Advertise 100mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+ }
+
+ /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+ if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
+ e_dbg("Advertise 1000mb Half duplex request denied!\n");
+
+ /* Do we want to advertise 1000 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
+ e_dbg("Advertise 1000mb Full duplex\n");
+ mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+ }
+
+ /*
+ * Check for a software override of the flow control settings, and
+ * setup the PHY advertisement registers accordingly. If
+ * auto-negotiation is enabled, then software will have to set the
+ * "PAUSE" bits to the correct value in the Auto-Negotiation
+ * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
+ * negotiation.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * but we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: No software override. The flow control configuration
+ * in the EEPROM is used.
+ */
+ switch (hw->fc.current_mode) {
+ case e1000_fc_none:
+ /*
+ * Flow control (Rx & Tx) is completely disabled by a
+ * software over-ride.
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case e1000_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled, and Tx Flow control is
+ * disabled, by a software over-ride.
+ *
+ * Since there really isn't a way to advertise that we are
+ * capable of Rx Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric Rx PAUSE. Later
+ * (in e1000e_config_fc_after_link_up) we will disable the
+ * hw's ability to send PAUSE frames.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case e1000_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled, by a software over-ride.
+ */
+ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+ mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+ break;
+ case e1000_fc_full:
+ /*
+ * Flow control (both Rx and Tx) is enabled by a software
+ * over-ride.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ default:
+ e_dbg("Flow control param set incorrectly\n");
+ ret_val = -E1000_ERR_CONFIG;
+ return ret_val;
+ }
+
+ ret_val = e1e_wphy(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+ if (phy->autoneg_mask & ADVERTISE_1000_FULL)
+ ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
+
+ return ret_val;
+}
+
+/**
+ * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Performs initial bounds checking on autoneg advertisement parameter, then
+ * configure to advertise the full capability. Setup the PHY to autoneg
+ * and restart the negotiation process between the link partner. If
+ * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
+ **/
+static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_ctrl;
+
+ /*
+ * Perform some bounds checking on the autoneg advertisement
+ * parameter.
+ */
+ phy->autoneg_advertised &= phy->autoneg_mask;
+
+ /*
+ * If autoneg_advertised is zero, we assume it was not defaulted
+ * by the calling code so we set to advertise full capability.
+ */
+ if (phy->autoneg_advertised == 0)
+ phy->autoneg_advertised = phy->autoneg_mask;
+
+ e_dbg("Reconfiguring auto-neg advertisement params\n");
+ ret_val = e1000_phy_setup_autoneg(hw);
+ if (ret_val) {
+ e_dbg("Error Setting up Auto-Negotiation\n");
+ return ret_val;
+ }
+ e_dbg("Restarting Auto-Neg\n");
+
+ /*
+ * Restart auto-negotiation by setting the Auto Neg Enable bit and
+ * the Auto Neg Restart bit in the PHY control register.
+ */
+ ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+ ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * Does the user want to wait for Auto-Neg to complete here, or
+ * check at a later time (for example, callback routine).
+ */
+ if (phy->autoneg_wait_to_complete) {
+ ret_val = e1000_wait_autoneg(hw);
+ if (ret_val) {
+ e_dbg("Error while waiting for "
+ "autoneg to complete\n");
+ return ret_val;
+ }
+ }
+
+ hw->mac.get_link_status = 1;
+
+ return ret_val;
+}
+
+/**
+ * e1000e_setup_copper_link - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Calls the appropriate function to configure the link for auto-neg or forced
+ * speed and duplex. Then we check for link, once link is established calls
+ * to configure collision distance and flow control are called. If link is
+ * not established, we return -E1000_ERR_PHY (-2).
+ **/
+s32 e1000e_setup_copper_link(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ bool link;
+
+ if (hw->mac.autoneg) {
+ /*
+ * Setup autoneg and flow control advertisement and perform
+ * autonegotiation.
+ */
+ ret_val = e1000_copper_link_autoneg(hw);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /*
+ * PHY will be set to 10H, 10F, 100H or 100F
+ * depending on user settings.
+ */
+ e_dbg("Forcing Speed and Duplex\n");
+ ret_val = e1000_phy_force_speed_duplex(hw);
+ if (ret_val) {
+ e_dbg("Error Forcing Speed and Duplex\n");
+ return ret_val;
+ }
+ }
+
+ /*
+ * Check link status. Wait up to 100 microseconds for link to become
+ * valid.
+ */
+ ret_val = e1000e_phy_has_link_generic(hw,
+ COPPER_LINK_UP_LIMIT,
+ 10,
+ &link);
+ if (ret_val)
+ return ret_val;
+
+ if (link) {
+ e_dbg("Valid link established!!!\n");
+ e1000e_config_collision_dist(hw);
+ ret_val = e1000e_config_fc_after_link_up(hw);
+ } else {
+ e_dbg("Unable to establish link!!!\n");
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000e_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex. Clears the
+ * auto-crossover to force MDI manually. Waits for link and returns
+ * successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * Clear Auto-Crossover to force MDI manually. IGP requires MDI
+ * forced whenever speed and duplex are forced.
+ */
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e_dbg("IGP PSCR: %X\n", phy_data);
+
+ udelay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+ e_dbg("Waiting for forced speed/duplex link on IGP phy.\n");
+
+ ret_val = e1000e_phy_has_link_generic(hw,
+ PHY_FORCE_LIMIT,
+ 100000,
+ &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link)
+ e_dbg("Link taking longer than expected.\n");
+
+ /* Try once more */
+ ret_val = e1000e_phy_has_link_generic(hw,
+ PHY_FORCE_LIMIT,
+ 100000,
+ &link);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000e_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex. Clears the
+ * auto-crossover to force MDI manually. Resets the PHY to commit the
+ * changes. If time expires while waiting for link up, we reset the DSP.
+ * After reset, TX_CLK and CRS on Tx must be set. Return successful upon
+ * successful completion, else return corresponding error code.
+ **/
+s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ /*
+ * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+ * forced whenever speed and duplex are forced.
+ */
+ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+ ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e_dbg("M88E1000 PSCR: %X\n", phy_data);
+
+ ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Reset the phy to commit changes. */
+ ret_val = e1000e_commit_phy(hw);
+ if (ret_val)
+ return ret_val;
+
+ if (phy->autoneg_wait_to_complete) {
+ e_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
+
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ if (hw->phy.type != e1000_phy_m88) {
+ e_dbg("Link taking longer than expected.\n");
+ } else {
+ /*
+ * We didn't get link.
+ * Reset the DSP and cross our fingers.
+ */
+ ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
+ 0x001d);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000e_phy_reset_dsp(hw);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ /* Try once more */
+ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (hw->phy.type != e1000_phy_m88)
+ return 0;
+
+ ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * Resetting the phy means we need to re-force TX_CLK in the
+ * Extended PHY Specific Control Register to 25MHz clock from
+ * the reset value of 2.5MHz.
+ */
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+ ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * In addition, we must re-enable CRS on Tx for both half and full
+ * duplex.
+ */
+ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+ ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex
+ * @hw: pointer to the HW structure
+ *
+ * Forces the speed and duplex settings of the PHY.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ ret_val = e1e_rphy(hw, PHY_CONTROL, &data);
+ if (ret_val)
+ goto out;
+
+ e1000e_phy_force_speed_duplex_setup(hw, &data);
+
+ ret_val = e1e_wphy(hw, PHY_CONTROL, data);
+ if (ret_val)
+ goto out;
+
+ /* Disable MDI-X support for 10/100 */
+ ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
+ if (ret_val)
+ goto out;
+
+ data &= ~IFE_PMC_AUTO_MDIX;
+ data &= ~IFE_PMC_FORCE_MDIX;
+
+ ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data);
+ if (ret_val)
+ goto out;
+
+ e_dbg("IFE PMC: %X\n", data);
+
+ udelay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+ e_dbg("Waiting for forced speed/duplex link on IFE phy.\n");
+
+ ret_val = e1000e_phy_has_link_generic(hw,
+ PHY_FORCE_LIMIT,
+ 100000,
+ &link);
+ if (ret_val)
+ goto out;
+
+ if (!link)
+ e_dbg("Link taking longer than expected.\n");
+
+ /* Try once more */
+ ret_val = e1000e_phy_has_link_generic(hw,
+ PHY_FORCE_LIMIT,
+ 100000,
+ &link);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
+ * @hw: pointer to the HW structure
+ * @phy_ctrl: pointer to current value of PHY_CONTROL
+ *
+ * Forces speed and duplex on the PHY by doing the following: disable flow
+ * control, force speed/duplex on the MAC, disable auto speed detection,
+ * disable auto-negotiation, configure duplex, configure speed, configure
+ * the collision distance, write configuration to CTRL register. The
+ * caller must write to the PHY_CONTROL register for these settings to
+ * take affect.
+ **/
+void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 ctrl;
+
+ /* Turn off flow control when forcing speed/duplex */
+ hw->fc.current_mode = e1000_fc_none;
+
+ /* Force speed/duplex on the mac */
+ ctrl = er32(CTRL);
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ctrl &= ~E1000_CTRL_SPD_SEL;
+
+ /* Disable Auto Speed Detection */
+ ctrl &= ~E1000_CTRL_ASDE;
+
+ /* Disable autoneg on the phy */
+ *phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
+
+ /* Forcing Full or Half Duplex? */
+ if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
+ ctrl &= ~E1000_CTRL_FD;
+ *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
+ e_dbg("Half Duplex\n");
+ } else {
+ ctrl |= E1000_CTRL_FD;
+ *phy_ctrl |= MII_CR_FULL_DUPLEX;
+ e_dbg("Full Duplex\n");
+ }
+
+ /* Forcing 10mb or 100mb? */
+ if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
+ ctrl |= E1000_CTRL_SPD_100;
+ *phy_ctrl |= MII_CR_SPEED_100;
+ *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+ e_dbg("Forcing 100mb\n");
+ } else {
+ ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+ *phy_ctrl |= MII_CR_SPEED_10;
+ *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+ e_dbg("Forcing 10mb\n");
+ }
+
+ e1000e_config_collision_dist(hw);
+
+ ew32(CTRL, ctrl);
+}
+
+/**
+ * e1000e_set_d3_lplu_state - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * and SmartSpeed is disabled when active is true, else clear lplu for D3
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained.
+ **/
+s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (!active) {
+ data &= ~IGP02E1000_PM_D3_LPLU;
+ ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
+ if (ret_val)
+ return ret_val;
+ /*
+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ return ret_val;
+ }
+ } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+ data |= IGP02E1000_PM_D3_LPLU;
+ ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
+ if (ret_val)
+ return ret_val;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000e_check_downshift - Checks whether a downshift in speed occurred
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * A downshift is detected by querying the PHY link health.
+ **/
+s32 e1000e_check_downshift(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, offset, mask;
+
+ switch (phy->type) {
+ case e1000_phy_m88:
+ case e1000_phy_gg82563:
+ case e1000_phy_bm:
+ case e1000_phy_82578:
+ offset = M88E1000_PHY_SPEC_STATUS;
+ mask = M88E1000_PSSR_DOWNSHIFT;
+ break;
+ case e1000_phy_igp_2:
+ case e1000_phy_igp_3:
+ offset = IGP01E1000_PHY_LINK_HEALTH;
+ mask = IGP01E1000_PLHR_SS_DOWNGRADE;
+ break;
+ default:
+ /* speed downshift not supported */
+ phy->speed_downgraded = false;
+ return 0;
+ }
+
+ ret_val = e1e_rphy(hw, offset, &phy_data);
+
+ if (!ret_val)
+ phy->speed_downgraded = (phy_data & mask);
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_polarity_m88 - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data);
+
+ if (!ret_val)
+ phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal;
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_polarity_igp - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY port status register, and the
+ * current speed (since there is no polarity at 100Mbps).
+ **/
+s32 e1000_check_polarity_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data, offset, mask;
+
+ /*
+ * Polarity is determined based on the speed of
+ * our connection.
+ */
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+ offset = IGP01E1000_PHY_PCS_INIT_REG;
+ mask = IGP01E1000_PHY_POLARITY_MASK;
+ } else {
+ /*
+ * This really only applies to 10Mbps since
+ * there is no polarity for 100Mbps (always 0).
+ */
+ offset = IGP01E1000_PHY_PORT_STATUS;
+ mask = IGP01E1000_PSSR_POLARITY_REVERSED;
+ }
+
+ ret_val = e1e_rphy(hw, offset, &data);
+
+ if (!ret_val)
+ phy->cable_polarity = (data & mask)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal;
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_polarity_ife - Check cable polarity for IFE PHY
+ * @hw: pointer to the HW structure
+ *
+ * Polarity is determined on the polarity reversal feature being enabled.
+ **/
+s32 e1000_check_polarity_ife(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, offset, mask;
+
+ /*
+ * Polarity is determined based on the reversal feature being enabled.
+ */
+ if (phy->polarity_correction) {
+ offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
+ mask = IFE_PESC_POLARITY_REVERSED;
+ } else {
+ offset = IFE_PHY_SPECIAL_CONTROL;
+ mask = IFE_PSC_FORCE_POLARITY;
+ }
+
+ ret_val = e1e_rphy(hw, offset, &phy_data);
+
+ if (!ret_val)
+ phy->cable_polarity = (phy_data & mask)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal;
+
+ return ret_val;
+}
+
+/**
+ * e1000_wait_autoneg - Wait for auto-neg completion
+ * @hw: pointer to the HW structure
+ *
+ * Waits for auto-negotiation to complete or for the auto-negotiation time
+ * limit to expire, which ever happens first.
+ **/
+static s32 e1000_wait_autoneg(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 i, phy_status;
+
+ /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
+ for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
+ ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ if (phy_status & MII_SR_AUTONEG_COMPLETE)
+ break;
+ msleep(100);
+ }
+
+ /*
+ * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+ * has completed.
+ */
+ return ret_val;
+}
+
+/**
+ * e1000e_phy_has_link_generic - Polls PHY for link
+ * @hw: pointer to the HW structure
+ * @iterations: number of times to poll for link
+ * @usec_interval: delay between polling attempts
+ * @success: pointer to whether polling was successful or not
+ *
+ * Polls the PHY status register for link, 'iterations' number of times.
+ **/
+s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success)
+{
+ s32 ret_val = 0;
+ u16 i, phy_status;
+
+ for (i = 0; i < iterations; i++) {
+ /*
+ * Some PHYs require the PHY_STATUS register to be read
+ * twice due to the link bit being sticky. No harm doing
+ * it across the board.
+ */
+ ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ /*
+ * If the first read fails, another entity may have
+ * ownership of the resources, wait and try again to
+ * see if they have relinquished the resources yet.
+ */
+ udelay(usec_interval);
+ ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ if (phy_status & MII_SR_LINK_STATUS)
+ break;
+ if (usec_interval >= 1000)
+ mdelay(usec_interval/1000);
+ else
+ udelay(usec_interval);
+ }
+
+ *success = (i < iterations);
+
+ return ret_val;
+}
+
+/**
+ * e1000e_get_cable_length_m88 - Determine cable length for m88 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Reads the PHY specific status register to retrieve the cable length
+ * information. The cable length is determined by averaging the minimum and
+ * maximum values to get the "average" cable length. The m88 PHY has four
+ * possible cable length values, which are:
+ * Register Value Cable Length
+ * 0 < 50 meters
+ * 1 50 - 80 meters
+ * 2 80 - 110 meters
+ * 3 110 - 140 meters
+ * 4 > 140 meters
+ **/
+s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, index;
+
+ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ goto out;
+
+ index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+ phy->min_cable_length = e1000_m88_cable_length_table[index];
+ phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000e_get_cable_length_igp_2 - Determine cable length for igp2 PHY
+ * @hw: pointer to the HW structure
+ *
+ * The automatic gain control (agc) normalizes the amplitude of the
+ * received signal, adjusting for the attenuation produced by the
+ * cable. By reading the AGC registers, which represent the
+ * combination of coarse and fine gain value, the value can be put
+ * into a lookup table to obtain the approximate cable length
+ * for each channel.
+ **/
+s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, i, agc_value = 0;
+ u16 cur_agc_index, max_agc_index = 0;
+ u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+ static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
+ IGP02E1000_PHY_AGC_A,
+ IGP02E1000_PHY_AGC_B,
+ IGP02E1000_PHY_AGC_C,
+ IGP02E1000_PHY_AGC_D
+ };
+
+ /* Read the AGC registers for all channels */
+ for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = e1e_rphy(hw, agc_reg_array[i], &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * Getting bits 15:9, which represent the combination of
+ * coarse and fine gain values. The result is a number
+ * that can be put into the lookup table to obtain the
+ * approximate cable length.
+ */
+ cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+ IGP02E1000_AGC_LENGTH_MASK;
+
+ /* Array index bound check. */
+ if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+ (cur_agc_index == 0))
+ return -E1000_ERR_PHY;
+
+ /* Remove min & max AGC values from calculation. */
+ if (e1000_igp_2_cable_length_table[min_agc_index] >
+ e1000_igp_2_cable_length_table[cur_agc_index])
+ min_agc_index = cur_agc_index;
+ if (e1000_igp_2_cable_length_table[max_agc_index] <
+ e1000_igp_2_cable_length_table[cur_agc_index])
+ max_agc_index = cur_agc_index;
+
+ agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
+ }
+
+ agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+ e1000_igp_2_cable_length_table[max_agc_index]);
+ agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+ /* Calculate cable length with the error range of +/- 10 meters. */
+ phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+ (agc_value - IGP02E1000_AGC_RANGE) : 0;
+ phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
+
+ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+ return ret_val;
+}
+
+/**
+ * e1000e_get_phy_info_m88 - Retrieve PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Valid for only copper links. Read the PHY status register (sticky read)
+ * to verify that link is up. Read the PHY special control register to
+ * determine the polarity and 10base-T extended distance. Read the PHY
+ * special status register to determine MDI/MDIx and current speed. If
+ * speed is 1000, then determine cable length, local and remote receiver.
+ **/
+s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ if (phy->media_type != e1000_media_type_copper) {
+ e_dbg("Phy info is only valid for copper media\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ e_dbg("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy->polarity_correction = (phy_data &
+ M88E1000_PSCR_POLARITY_REVERSAL);
+
+ ret_val = e1000_check_polarity_m88(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX);
+
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+ ret_val = e1000_get_cable_length(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+
+ phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+ } else {
+ /* Set values to "undefined" */
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000e_get_phy_info_igp - Retrieve igp PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Read PHY status to determine if link is up. If link is up, then
+ * set/determine 10base-T extended distance and polarity correction. Read
+ * PHY port status to determine MDI/MDIx and speed. Based on the speed,
+ * determine on the cable length, local and remote receiver.
+ **/
+s32 e1000e_get_phy_info_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ e_dbg("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ phy->polarity_correction = true;
+
+ ret_val = e1000_check_polarity_igp(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->is_mdix = (data & IGP01E1000_PSSR_MDIX);
+
+ if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+ ret_val = e1000_get_cable_length(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+
+ phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+ } else {
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_info_ife - Retrieves various IFE PHY states
+ * @hw: pointer to the HW structure
+ *
+ * Populates "phy" structure with various feature states.
+ **/
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ goto out;
+
+ if (!link) {
+ e_dbg("Phy info is only valid if link is up\n");
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data);
+ if (ret_val)
+ goto out;
+ phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE)
+ ? false : true;
+
+ if (phy->polarity_correction) {
+ ret_val = e1000_check_polarity_ife(hw);
+ if (ret_val)
+ goto out;
+ } else {
+ /* Polarity is forced */
+ phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal;
+ }
+
+ ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
+ if (ret_val)
+ goto out;
+
+ phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false;
+
+ /* The following parameters are undefined for 10/100 operation. */
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000e_phy_sw_reset - PHY software reset
+ * @hw: pointer to the HW structure
+ *
+ * Does a software reset of the PHY by reading the PHY control register and
+ * setting/write the control register reset bit to the PHY.
+ **/
+s32 e1000e_phy_sw_reset(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_ctrl;
+
+ ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ phy_ctrl |= MII_CR_RESET;
+ ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ udelay(1);
+
+ return ret_val;
+}
+
+/**
+ * e1000e_phy_hw_reset_generic - PHY hardware reset
+ * @hw: pointer to the HW structure
+ *
+ * Verify the reset block is not blocking us from resetting. Acquire
+ * semaphore (if necessary) and read/set/write the device control reset
+ * bit in the PHY. Wait the appropriate delay time for the device to
+ * reset and release the semaphore (if necessary).
+ **/
+s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u32 ctrl;
+
+ ret_val = e1000_check_reset_block(hw);
+ if (ret_val)
+ return 0;
+
+ ret_val = phy->ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ctrl = er32(CTRL);
+ ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
+ e1e_flush();
+
+ udelay(phy->reset_delay_us);
+
+ ew32(CTRL, ctrl);
+ e1e_flush();
+
+ udelay(150);
+
+ phy->ops.release(hw);
+
+ return e1000_get_phy_cfg_done(hw);
+}
+
+/**
+ * e1000e_get_cfg_done - Generic configuration done
+ * @hw: pointer to the HW structure
+ *
+ * Generic function to wait 10 milli-seconds for configuration to complete
+ * and return success.
+ **/
+s32 e1000e_get_cfg_done(struct e1000_hw *hw)
+{
+ mdelay(10);
+ return 0;
+}
+
+/**
+ * e1000e_phy_init_script_igp3 - Inits the IGP3 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
+ **/
+s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw)
+{
+ e_dbg("Running IGP 3 PHY init script\n");
+
+ /* PHY init IGP 3 */
+ /* Enable rise/fall, 10-mode work in class-A */
+ e1e_wphy(hw, 0x2F5B, 0x9018);
+ /* Remove all caps from Replica path filter */
+ e1e_wphy(hw, 0x2F52, 0x0000);
+ /* Bias trimming for ADC, AFE and Driver (Default) */
+ e1e_wphy(hw, 0x2FB1, 0x8B24);
+ /* Increase Hybrid poly bias */
+ e1e_wphy(hw, 0x2FB2, 0xF8F0);
+ /* Add 4% to Tx amplitude in Gig mode */
+ e1e_wphy(hw, 0x2010, 0x10B0);
+ /* Disable trimming (TTT) */
+ e1e_wphy(hw, 0x2011, 0x0000);
+ /* Poly DC correction to 94.6% + 2% for all channels */
+ e1e_wphy(hw, 0x20DD, 0x249A);
+ /* ABS DC correction to 95.9% */
+ e1e_wphy(hw, 0x20DE, 0x00D3);
+ /* BG temp curve trim */
+ e1e_wphy(hw, 0x28B4, 0x04CE);
+ /* Increasing ADC OPAMP stage 1 currents to max */
+ e1e_wphy(hw, 0x2F70, 0x29E4);
+ /* Force 1000 ( required for enabling PHY regs configuration) */
+ e1e_wphy(hw, 0x0000, 0x0140);
+ /* Set upd_freq to 6 */
+ e1e_wphy(hw, 0x1F30, 0x1606);
+ /* Disable NPDFE */
+ e1e_wphy(hw, 0x1F31, 0xB814);
+ /* Disable adaptive fixed FFE (Default) */
+ e1e_wphy(hw, 0x1F35, 0x002A);
+ /* Enable FFE hysteresis */
+ e1e_wphy(hw, 0x1F3E, 0x0067);
+ /* Fixed FFE for short cable lengths */
+ e1e_wphy(hw, 0x1F54, 0x0065);
+ /* Fixed FFE for medium cable lengths */
+ e1e_wphy(hw, 0x1F55, 0x002A);
+ /* Fixed FFE for long cable lengths */
+ e1e_wphy(hw, 0x1F56, 0x002A);
+ /* Enable Adaptive Clip Threshold */
+ e1e_wphy(hw, 0x1F72, 0x3FB0);
+ /* AHT reset limit to 1 */
+ e1e_wphy(hw, 0x1F76, 0xC0FF);
+ /* Set AHT master delay to 127 msec */
+ e1e_wphy(hw, 0x1F77, 0x1DEC);
+ /* Set scan bits for AHT */
+ e1e_wphy(hw, 0x1F78, 0xF9EF);
+ /* Set AHT Preset bits */
+ e1e_wphy(hw, 0x1F79, 0x0210);
+ /* Change integ_factor of channel A to 3 */
+ e1e_wphy(hw, 0x1895, 0x0003);
+ /* Change prop_factor of channels BCD to 8 */
+ e1e_wphy(hw, 0x1796, 0x0008);
+ /* Change cg_icount + enable integbp for channels BCD */
+ e1e_wphy(hw, 0x1798, 0xD008);
+ /*
+ * Change cg_icount + enable integbp + change prop_factor_master
+ * to 8 for channel A
+ */
+ e1e_wphy(hw, 0x1898, 0xD918);
+ /* Disable AHT in Slave mode on channel A */
+ e1e_wphy(hw, 0x187A, 0x0800);
+ /*
+ * Enable LPLU and disable AN to 1000 in non-D0a states,
+ * Enable SPD+B2B
+ */
+ e1e_wphy(hw, 0x0019, 0x008D);
+ /* Enable restart AN on an1000_dis change */
+ e1e_wphy(hw, 0x001B, 0x2080);
+ /* Enable wh_fifo read clock in 10/100 modes */
+ e1e_wphy(hw, 0x0014, 0x0045);
+ /* Restart AN, Speed selection is 1000 */
+ e1e_wphy(hw, 0x0000, 0x1340);
+
+ return 0;
+}
+
+/* Internal function pointers */
+
+/**
+ * e1000_get_phy_cfg_done - Generic PHY configuration done
+ * @hw: pointer to the HW structure
+ *
+ * Return success if silicon family did not implement a family specific
+ * get_cfg_done function.
+ **/
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.get_cfg_done)
+ return hw->phy.ops.get_cfg_done(hw);
+
+ return 0;
+}
+
+/**
+ * e1000_phy_force_speed_duplex - Generic force PHY speed/duplex
+ * @hw: pointer to the HW structure
+ *
+ * When the silicon family has not implemented a forced speed/duplex
+ * function for the PHY, simply return 0.
+ **/
+static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.force_speed_duplex)
+ return hw->phy.ops.force_speed_duplex(hw);
+
+ return 0;
+}
+
+/**
+ * e1000e_get_phy_type_from_id - Get PHY type from id
+ * @phy_id: phy_id read from the phy
+ *
+ * Returns the phy type from the id.
+ **/
+enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
+{
+ enum e1000_phy_type phy_type = e1000_phy_unknown;
+
+ switch (phy_id) {
+ case M88E1000_I_PHY_ID:
+ case M88E1000_E_PHY_ID:
+ case M88E1111_I_PHY_ID:
+ case M88E1011_I_PHY_ID:
+ phy_type = e1000_phy_m88;
+ break;
+ case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
+ phy_type = e1000_phy_igp_2;
+ break;
+ case GG82563_E_PHY_ID:
+ phy_type = e1000_phy_gg82563;
+ break;
+ case IGP03E1000_E_PHY_ID:
+ phy_type = e1000_phy_igp_3;
+ break;
+ case IFE_E_PHY_ID:
+ case IFE_PLUS_E_PHY_ID:
+ case IFE_C_E_PHY_ID:
+ phy_type = e1000_phy_ife;
+ break;
+ case BME1000_E_PHY_ID:
+ case BME1000_E_PHY_ID_R2:
+ phy_type = e1000_phy_bm;
+ break;
+ case I82578_E_PHY_ID:
+ phy_type = e1000_phy_82578;
+ break;
+ case I82577_E_PHY_ID:
+ phy_type = e1000_phy_82577;
+ break;
+ case I82579_E_PHY_ID:
+ phy_type = e1000_phy_82579;
+ break;
+ default:
+ phy_type = e1000_phy_unknown;
+ break;
+ }
+ return phy_type;
+}
+
+/**
+ * e1000e_determine_phy_address - Determines PHY address.
+ * @hw: pointer to the HW structure
+ *
+ * This uses a trial and error method to loop through possible PHY
+ * addresses. It tests each by reading the PHY ID registers and
+ * checking for a match.
+ **/
+s32 e1000e_determine_phy_address(struct e1000_hw *hw)
+{
+ s32 ret_val = -E1000_ERR_PHY_TYPE;
+ u32 phy_addr = 0;
+ u32 i;
+ enum e1000_phy_type phy_type = e1000_phy_unknown;
+
+ hw->phy.id = phy_type;
+
+ for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) {
+ hw->phy.addr = phy_addr;
+ i = 0;
+
+ do {
+ e1000e_get_phy_id(hw);
+ phy_type = e1000e_get_phy_type_from_id(hw->phy.id);
+
+ /*
+ * If phy_type is valid, break - we found our
+ * PHY address
+ */
+ if (phy_type != e1000_phy_unknown) {
+ ret_val = 0;
+ goto out;
+ }
+ usleep_range(1000, 2000);
+ i++;
+ } while (i < 10);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address
+ * @page: page to access
+ *
+ * Returns the phy address for the page requested.
+ **/
+static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
+{
+ u32 phy_addr = 2;
+
+ if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31))
+ phy_addr = 1;
+
+ return phy_addr;
+}
+
+/**
+ * e1000e_write_phy_reg_bm - Write BM PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+ u32 page = offset >> IGP_PAGE_SHIFT;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+ false, false);
+ goto out;
+ }
+
+ hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ u32 page_shift, page_select;
+
+ /*
+ * Page select is register 31 for phy address 1 and 22 for
+ * phy address 2 and 3. Page select is shifted only for
+ * phy address 1.
+ */
+ if (hw->phy.addr == 1) {
+ page_shift = IGP_PAGE_SHIFT;
+ page_select = IGP01E1000_PHY_PAGE_SELECT;
+ } else {
+ page_shift = 0;
+ page_select = BM_PHY_PAGE_SELECT;
+ }
+
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
+ (page << page_shift));
+ if (ret_val)
+ goto out;
+ }
+
+ ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+out:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000e_read_phy_reg_bm - Read BM PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and storing the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+ u32 page = offset >> IGP_PAGE_SHIFT;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+ true, false);
+ goto out;
+ }
+
+ hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ u32 page_shift, page_select;
+
+ /*
+ * Page select is register 31 for phy address 1 and 22 for
+ * phy address 2 and 3. Page select is shifted only for
+ * phy address 1.
+ */
+ if (hw->phy.addr == 1) {
+ page_shift = IGP_PAGE_SHIFT;
+ page_select = IGP01E1000_PHY_PAGE_SELECT;
+ } else {
+ page_shift = 0;
+ page_select = BM_PHY_PAGE_SELECT;
+ }
+
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
+ (page << page_shift));
+ if (ret_val)
+ goto out;
+ }
+
+ ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+out:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000e_read_phy_reg_bm2 - Read BM PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and storing the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+ u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+ true, false);
+ goto out;
+ }
+
+ hw->phy.addr = 1;
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
+ page);
+
+ if (ret_val)
+ goto out;
+ }
+
+ ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+out:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000e_write_phy_reg_bm2 - Write BM PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+ u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+ false, false);
+ goto out;
+ }
+
+ hw->phy.addr = 1;
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
+ page);
+
+ if (ret_val)
+ goto out;
+ }
+
+ ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+out:
+ hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
+ * @hw: pointer to the HW structure
+ * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
+ *
+ * Assumes semaphore already acquired and phy_reg points to a valid memory
+ * address to store contents of the BM_WUC_ENABLE_REG register.
+ **/
+s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
+{
+ s32 ret_val;
+ u16 temp;
+
+ /* All page select, port ctrl and wakeup registers use phy address 1 */
+ hw->phy.addr = 1;
+
+ /* Select Port Control Registers page */
+ ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
+ if (ret_val) {
+ e_dbg("Could not set Port Control page\n");
+ goto out;
+ }
+
+ ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
+ if (ret_val) {
+ e_dbg("Could not read PHY register %d.%d\n",
+ BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+ goto out;
+ }
+
+ /*
+ * Enable both PHY wakeup mode and Wakeup register page writes.
+ * Prevent a power state change by disabling ME and Host PHY wakeup.
+ */
+ temp = *phy_reg;
+ temp |= BM_WUC_ENABLE_BIT;
+ temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
+
+ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp);
+ if (ret_val) {
+ e_dbg("Could not write PHY register %d.%d\n",
+ BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+ goto out;
+ }
+
+ /* Select Host Wakeup Registers page */
+ ret_val = e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT));
+
+ /* caller now able to write registers on the Wakeup registers page */
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
+ * @hw: pointer to the HW structure
+ * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
+ *
+ * Restore BM_WUC_ENABLE_REG to its original value.
+ *
+ * Assumes semaphore already acquired and *phy_reg is the contents of the
+ * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
+ * caller.
+ **/
+s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
+{
+ s32 ret_val = 0;
+
+ /* Select Port Control Registers page */
+ ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
+ if (ret_val) {
+ e_dbg("Could not set Port Control page\n");
+ goto out;
+ }
+
+ /* Restore 769.17 to its original value */
+ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg);
+ if (ret_val)
+ e_dbg("Could not restore PHY register %d.%d\n",
+ BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read or written
+ * @data: pointer to the data to read or write
+ * @read: determines if operation is read or write
+ * @page_set: BM_WUC_PAGE already set and access enabled
+ *
+ * Read the PHY register at offset and store the retrieved information in
+ * data, or write data to PHY register at offset. Note the procedure to
+ * access the PHY wakeup registers is different than reading the other PHY
+ * registers. It works as such:
+ * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1
+ * 2) Set page to 800 for host (801 if we were manageability)
+ * 3) Write the address using the address opcode (0x11)
+ * 4) Read or write the data using the data opcode (0x12)
+ * 5) Restore 769.17.2 to its original value
+ *
+ * Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and
+ * step 5 is done by e1000_disable_phy_wakeup_reg_access_bm().
+ *
+ * Assumes semaphore is already acquired. When page_set==true, assumes
+ * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
+ * is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()).
+ **/
+static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read, bool page_set)
+{
+ s32 ret_val;
+ u16 reg = BM_PHY_REG_NUM(offset);
+ u16 page = BM_PHY_REG_PAGE(offset);
+ u16 phy_reg = 0;
+
+ /* Gig must be disabled for MDIO accesses to Host Wakeup reg page */
+ if ((hw->mac.type == e1000_pchlan) &&
+ (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
+ e_dbg("Attempting to access page %d while gig enabled.\n",
+ page);
+
+ if (!page_set) {
+ /* Enable access to PHY wakeup registers */
+ ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+ if (ret_val) {
+ e_dbg("Could not enable PHY wakeup reg access\n");
+ goto out;
+ }
+ }
+
+ e_dbg("Accessing PHY page %d reg 0x%x\n", page, reg);
+
+ /* Write the Wakeup register page offset value using opcode 0x11 */
+ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg);
+ if (ret_val) {
+ e_dbg("Could not write address opcode to page %d\n", page);
+ goto out;
+ }
+
+ if (read) {
+ /* Read the Wakeup register page value using opcode 0x12 */
+ ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
+ data);
+ } else {
+ /* Write the Wakeup register page value using opcode 0x12 */
+ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
+ *data);
+ }
+
+ if (ret_val) {
+ e_dbg("Could not access PHY reg %d.%d\n", page, reg);
+ goto out;
+ }
+
+ if (!page_set)
+ ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_power_up_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_up_phy_copper(struct e1000_hw *hw)
+{
+ u16 mii_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ e1e_rphy(hw, PHY_CONTROL, &mii_reg);
+ mii_reg &= ~MII_CR_POWER_DOWN;
+ e1e_wphy(hw, PHY_CONTROL, mii_reg);
+}
+
+/**
+ * e1000_power_down_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_down_phy_copper(struct e1000_hw *hw)
+{
+ u16 mii_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ e1e_rphy(hw, PHY_CONTROL, &mii_reg);
+ mii_reg |= MII_CR_POWER_DOWN;
+ e1e_wphy(hw, PHY_CONTROL, mii_reg);
+ usleep_range(1000, 2000);
+}
+
+/**
+ * e1000e_commit_phy - Soft PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Performs a soft PHY reset on those that apply. This is a function pointer
+ * entry point called by drivers.
+ **/
+s32 e1000e_commit_phy(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.commit)
+ return hw->phy.ops.commit(hw);
+
+ return 0;
+}
+
+/**
+ * e1000_set_d0_lplu_state - Sets low power link up state for D0
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D0
+ * and SmartSpeed is disabled when active is true, else clear lplu for D0
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained. This is a function pointer entry point called by drivers.
+ **/
+static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
+{
+ if (hw->phy.ops.set_d0_lplu_state)
+ return hw->phy.ops.set_d0_lplu_state(hw, active);
+
+ return 0;
+}
+
+/**
+ * __e1000_read_phy_reg_hv - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and stores the retrieved information in data. Release any acquired
+ * semaphore before exiting.
+ **/
+static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked, bool page_set)
+{
+ s32 ret_val;
+ u16 page = BM_PHY_REG_PAGE(offset);
+ u16 reg = BM_PHY_REG_NUM(offset);
+ u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
+
+ if (!locked) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+ true, page_set);
+ goto out;
+ }
+
+ if (page > 0 && page < HV_INTC_FC_PAGE_START) {
+ ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
+ data, true);
+ goto out;
+ }
+
+ if (!page_set) {
+ if (page == HV_INTC_FC_PAGE_START)
+ page = 0;
+
+ if (reg > MAX_PHY_MULTI_PAGE_REG) {
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_set_page_igp(hw,
+ (page << IGP_PAGE_SHIFT));
+
+ hw->phy.addr = phy_addr;
+
+ if (ret_val)
+ goto out;
+ }
+ }
+
+ e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
+ page << IGP_PAGE_SHIFT, reg);
+
+ ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
+ data);
+out:
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_hv - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset and stores
+ * the retrieved information in data. Release the acquired semaphore
+ * before exiting.
+ **/
+s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_hv(hw, offset, data, false, false);
+}
+
+/**
+ * e1000_read_phy_reg_hv_locked - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset and stores the retrieved information
+ * in data. Assumes semaphore already acquired.
+ **/
+s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_hv(hw, offset, data, true, false);
+}
+
+/**
+ * e1000_read_phy_reg_page_hv - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Reads the PHY register at offset and stores the retrieved information
+ * in data. Assumes semaphore already acquired and page already set.
+ **/
+s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_hv(hw, offset, data, true, true);
+}
+
+/**
+ * __e1000_write_phy_reg_hv - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked, bool page_set)
+{
+ s32 ret_val;
+ u16 page = BM_PHY_REG_PAGE(offset);
+ u16 reg = BM_PHY_REG_NUM(offset);
+ u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
+
+ if (!locked) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+ false, page_set);
+ goto out;
+ }
+
+ if (page > 0 && page < HV_INTC_FC_PAGE_START) {
+ ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
+ &data, false);
+ goto out;
+ }
+
+ if (!page_set) {
+ if (page == HV_INTC_FC_PAGE_START)
+ page = 0;
+
+ /*
+ * Workaround MDIO accesses being disabled after entering IEEE
+ * Power Down (when bit 11 of the PHY Control register is set)
+ */
+ if ((hw->phy.type == e1000_phy_82578) &&
+ (hw->phy.revision >= 1) &&
+ (hw->phy.addr == 2) &&
+ ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) {
+ u16 data2 = 0x7EFF;
+ ret_val = e1000_access_phy_debug_regs_hv(hw,
+ (1 << 6) | 0x3,
+ &data2, false);
+ if (ret_val)
+ goto out;
+ }
+
+ if (reg > MAX_PHY_MULTI_PAGE_REG) {
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_set_page_igp(hw,
+ (page << IGP_PAGE_SHIFT));
+
+ hw->phy.addr = phy_addr;
+
+ if (ret_val)
+ goto out;
+ }
+ }
+
+ e_dbg("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
+ page << IGP_PAGE_SHIFT, reg);
+
+ ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
+ data);
+
+out:
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_phy_reg_hv - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore then writes the data to PHY register at the offset.
+ * Release the acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_hv(hw, offset, data, false, false);
+}
+
+/**
+ * e1000_write_phy_reg_hv_locked - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset. Assumes semaphore
+ * already acquired.
+ **/
+s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_hv(hw, offset, data, true, false);
+}
+
+/**
+ * e1000_write_phy_reg_page_hv - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset. Assumes semaphore
+ * already acquired and page already set.
+ **/
+s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_hv(hw, offset, data, true, true);
+}
+
+/**
+ * e1000_get_phy_addr_for_hv_page - Get PHY address based on page
+ * @page: page to be accessed
+ **/
+static u32 e1000_get_phy_addr_for_hv_page(u32 page)
+{
+ u32 phy_addr = 2;
+
+ if (page >= HV_INTC_FC_PAGE_START)
+ phy_addr = 1;
+
+ return phy_addr;
+}
+
+/**
+ * e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read or written
+ * @data: pointer to the data to be read or written
+ * @read: determines if operation is read or write
+ *
+ * Reads the PHY register at offset and stores the retreived information
+ * in data. Assumes semaphore already acquired. Note that the procedure
+ * to access these regs uses the address port and data port to read/write.
+ * These accesses done with PHY address 2 and without using pages.
+ **/
+static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read)
+{
+ s32 ret_val;
+ u32 addr_reg = 0;
+ u32 data_reg = 0;
+
+ /* This takes care of the difference with desktop vs mobile phy */
+ addr_reg = (hw->phy.type == e1000_phy_82578) ?
+ I82578_ADDR_REG : I82577_ADDR_REG;
+ data_reg = addr_reg + 1;
+
+ /* All operations in this function are phy address 2 */
+ hw->phy.addr = 2;
+
+ /* masking with 0x3F to remove the page from offset */
+ ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F);
+ if (ret_val) {
+ e_dbg("Could not write the Address Offset port register\n");
+ goto out;
+ }
+
+ /* Read or write the data value next */
+ if (read)
+ ret_val = e1000e_read_phy_reg_mdic(hw, data_reg, data);
+ else
+ ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data);
+
+ if (ret_val) {
+ e_dbg("Could not access the Data port register\n");
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_link_stall_workaround_hv - Si workaround
+ * @hw: pointer to the HW structure
+ *
+ * This function works around a Si bug where the link partner can get
+ * a link up indication before the PHY does. If small packets are sent
+ * by the link partner they can be placed in the packet buffer without
+ * being properly accounted for by the PHY and will stall preventing
+ * further packets from being received. The workaround is to clear the
+ * packet buffer after the PHY detects link up.
+ **/
+s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 data;
+
+ if (hw->phy.type != e1000_phy_82578)
+ goto out;
+
+ /* Do not apply workaround if in PHY loopback bit 14 set */
+ e1e_rphy(hw, PHY_CONTROL, &data);
+ if (data & PHY_CONTROL_LB)
+ goto out;
+
+ /* check if link is up and at 1Gbps */
+ ret_val = e1e_rphy(hw, BM_CS_STATUS, &data);
+ if (ret_val)
+ goto out;
+
+ data &= BM_CS_STATUS_LINK_UP |
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_MASK;
+
+ if (data != (BM_CS_STATUS_LINK_UP |
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_1000))
+ goto out;
+
+ mdelay(200);
+
+ /* flush the packets in the fifo buffer */
+ ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC |
+ HV_MUX_DATA_CTRL_FORCE_SPEED);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_check_polarity_82577 - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
+
+ if (!ret_val)
+ phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal;
+
+ return ret_val;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex.
+ **/
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+ goto out;
+
+ e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+ goto out;
+
+ udelay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+ e_dbg("Waiting for forced speed/duplex link on 82577 phy\n");
+
+ ret_val = e1000e_phy_has_link_generic(hw,
+ PHY_FORCE_LIMIT,
+ 100000,
+ &link);
+ if (ret_val)
+ goto out;
+
+ if (!link)
+ e_dbg("Link taking longer than expected.\n");
+
+ /* Try once more */
+ ret_val = e1000e_phy_has_link_generic(hw,
+ PHY_FORCE_LIMIT,
+ 100000,
+ &link);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_info_82577 - Retrieve I82577 PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Read PHY status to determine if link is up. If link is up, then
+ * set/determine 10base-T extended distance and polarity correction. Read
+ * PHY port status to determine MDI/MDIx and speed. Based on the speed,
+ * determine on the cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ goto out;
+
+ if (!link) {
+ e_dbg("Phy info is only valid if link is up\n");
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ phy->polarity_correction = true;
+
+ ret_val = e1000_check_polarity_82577(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
+ if (ret_val)
+ goto out;
+
+ phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false;
+
+ if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
+ I82577_PHY_STATUS2_SPEED_1000MBPS) {
+ ret_val = hw->phy.ops.get_cable_length(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
+ if (ret_val)
+ goto out;
+
+ phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+
+ phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+ } else {
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Reads the diagnostic status register and verifies result is valid before
+ * placing it in the phy_cable_length field.
+ **/
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, length;
+
+ ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data);
+ if (ret_val)
+ goto out;
+
+ length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
+ I82577_DSTATUS_CABLE_LENGTH_SHIFT;
+
+ if (length == E1000_CABLE_LENGTH_UNDEFINED)
+ ret_val = -E1000_ERR_PHY;
+
+ phy->cable_length = length;
+
+out:
+ return ret_val;
+}
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
new file mode 100644
index 000000000000..c6e4621b6262
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -0,0 +1,37 @@
+################################################################################
+#
+# Intel 82575 PCI-Express Ethernet Linux driver
+# Copyright(c) 1999 - 2011 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# Linux NICS <linux.nics@intel.com>
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) 82575 PCI-Express ethernet driver
+#
+
+obj-$(CONFIG_IGB) += igb.o
+
+igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
+ e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o
+
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
new file mode 100644
index 000000000000..c0857bdfb03a
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -0,0 +1,2084 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_82575
+ * e1000_82576
+ */
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#include "e1000_mac.h"
+#include "e1000_82575.h"
+
+static s32 igb_get_invariants_82575(struct e1000_hw *);
+static s32 igb_acquire_phy_82575(struct e1000_hw *);
+static void igb_release_phy_82575(struct e1000_hw *);
+static s32 igb_acquire_nvm_82575(struct e1000_hw *);
+static void igb_release_nvm_82575(struct e1000_hw *);
+static s32 igb_check_for_link_82575(struct e1000_hw *);
+static s32 igb_get_cfg_done_82575(struct e1000_hw *);
+static s32 igb_init_hw_82575(struct e1000_hw *);
+static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
+static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
+static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *);
+static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16);
+static s32 igb_reset_hw_82575(struct e1000_hw *);
+static s32 igb_reset_hw_82580(struct e1000_hw *);
+static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
+static s32 igb_setup_copper_link_82575(struct e1000_hw *);
+static s32 igb_setup_serdes_link_82575(struct e1000_hw *);
+static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
+static void igb_clear_hw_cntrs_82575(struct e1000_hw *);
+static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16);
+static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *,
+ u16 *);
+static s32 igb_get_phy_id_82575(struct e1000_hw *);
+static void igb_release_swfw_sync_82575(struct e1000_hw *, u16);
+static bool igb_sgmii_active_82575(struct e1000_hw *);
+static s32 igb_reset_init_script_82575(struct e1000_hw *);
+static s32 igb_read_mac_addr_82575(struct e1000_hw *);
+static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw);
+static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw);
+static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
+static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
+static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw,
+ u16 offset);
+static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
+ u16 offset);
+static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
+static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
+static const u16 e1000_82580_rxpbs_table[] =
+ { 36, 72, 144, 1, 2, 4, 8, 16,
+ 35, 70, 140 };
+#define E1000_82580_RXPBS_TABLE_SIZE \
+ (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
+
+/**
+ * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
+ * @hw: pointer to the HW structure
+ *
+ * Called to determine if the I2C pins are being used for I2C or as an
+ * external MDIO interface since the two options are mutually exclusive.
+ **/
+static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
+{
+ u32 reg = 0;
+ bool ext_mdio = false;
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+ case e1000_82576:
+ reg = rd32(E1000_MDIC);
+ ext_mdio = !!(reg & E1000_MDIC_DEST);
+ break;
+ case e1000_82580:
+ case e1000_i350:
+ reg = rd32(E1000_MDICNFG);
+ ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
+ break;
+ default:
+ break;
+ }
+ return ext_mdio;
+}
+
+static s32 igb_get_invariants_82575(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
+ u32 eecd;
+ s32 ret_val;
+ u16 size;
+ u32 ctrl_ext = 0;
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82575EB_COPPER:
+ case E1000_DEV_ID_82575EB_FIBER_SERDES:
+ case E1000_DEV_ID_82575GB_QUAD_COPPER:
+ mac->type = e1000_82575;
+ break;
+ case E1000_DEV_ID_82576:
+ case E1000_DEV_ID_82576_NS:
+ case E1000_DEV_ID_82576_NS_SERDES:
+ case E1000_DEV_ID_82576_FIBER:
+ case E1000_DEV_ID_82576_SERDES:
+ case E1000_DEV_ID_82576_QUAD_COPPER:
+ case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+ case E1000_DEV_ID_82576_SERDES_QUAD:
+ mac->type = e1000_82576;
+ break;
+ case E1000_DEV_ID_82580_COPPER:
+ case E1000_DEV_ID_82580_FIBER:
+ case E1000_DEV_ID_82580_QUAD_FIBER:
+ case E1000_DEV_ID_82580_SERDES:
+ case E1000_DEV_ID_82580_SGMII:
+ case E1000_DEV_ID_82580_COPPER_DUAL:
+ case E1000_DEV_ID_DH89XXCC_SGMII:
+ case E1000_DEV_ID_DH89XXCC_SERDES:
+ case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+ case E1000_DEV_ID_DH89XXCC_SFP:
+ mac->type = e1000_82580;
+ break;
+ case E1000_DEV_ID_I350_COPPER:
+ case E1000_DEV_ID_I350_FIBER:
+ case E1000_DEV_ID_I350_SERDES:
+ case E1000_DEV_ID_I350_SGMII:
+ mac->type = e1000_i350;
+ break;
+ default:
+ return -E1000_ERR_MAC_INIT;
+ break;
+ }
+
+ /* Set media type */
+ /*
+ * The 82575 uses bits 22:23 for link mode. The mode can be changed
+ * based on the EEPROM. We cannot rely upon device ID. There
+ * is no distinguishable difference between fiber and internal
+ * SerDes mode on the 82575. There can be an external PHY attached
+ * on the SGMII interface. For this, we'll set sgmii_active to true.
+ */
+ phy->media_type = e1000_media_type_copper;
+ dev_spec->sgmii_active = false;
+
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
+ case E1000_CTRL_EXT_LINK_MODE_SGMII:
+ dev_spec->sgmii_active = true;
+ break;
+ case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+ case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ break;
+ default:
+ break;
+ }
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
+ if (mac->type == e1000_82576)
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
+ if (mac->type == e1000_82580)
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+ if (mac->type == e1000_i350)
+ mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
+ /* reset */
+ if (mac->type >= e1000_82580)
+ mac->ops.reset_hw = igb_reset_hw_82580;
+ else
+ mac->ops.reset_hw = igb_reset_hw_82575;
+ /* Set if part includes ASF firmware */
+ mac->asf_firmware_present = true;
+ /* Set if manageability features are enabled. */
+ mac->arc_subsystem_valid =
+ (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
+ ? true : false;
+ /* enable EEE on i350 parts */
+ if (mac->type == e1000_i350)
+ dev_spec->eee_disable = false;
+ else
+ dev_spec->eee_disable = true;
+ /* physical interface link setup */
+ mac->ops.setup_physical_interface =
+ (hw->phy.media_type == e1000_media_type_copper)
+ ? igb_setup_copper_link_82575
+ : igb_setup_serdes_link_82575;
+
+ /* NVM initialization */
+ eecd = rd32(E1000_EECD);
+
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
+ switch (nvm->override) {
+ case e1000_nvm_override_spi_large:
+ nvm->page_size = 32;
+ nvm->address_bits = 16;
+ break;
+ case e1000_nvm_override_spi_small:
+ nvm->page_size = 8;
+ nvm->address_bits = 8;
+ break;
+ default:
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
+ break;
+ }
+
+ nvm->type = e1000_nvm_eeprom_spi;
+
+ size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+ E1000_EECD_SIZE_EX_SHIFT);
+
+ /*
+ * Added to a constant, "size" becomes the left-shift value
+ * for setting word_size.
+ */
+ size += NVM_WORD_SIZE_BASE_SHIFT;
+
+ /*
+ * Check for invalid size
+ */
+ if ((hw->mac.type == e1000_82576) && (size > 15)) {
+ printk("igb: The NVM size is not valid, "
+ "defaulting to 32K.\n");
+ size = 15;
+ }
+ nvm->word_size = 1 << size;
+ if (nvm->word_size == (1 << 15))
+ nvm->page_size = 128;
+
+ /* NVM Function Pointers */
+ nvm->ops.acquire = igb_acquire_nvm_82575;
+ if (nvm->word_size < (1 << 15))
+ nvm->ops.read = igb_read_nvm_eerd;
+ else
+ nvm->ops.read = igb_read_nvm_spi;
+
+ nvm->ops.release = igb_release_nvm_82575;
+ switch (hw->mac.type) {
+ case e1000_82580:
+ nvm->ops.validate = igb_validate_nvm_checksum_82580;
+ nvm->ops.update = igb_update_nvm_checksum_82580;
+ break;
+ case e1000_i350:
+ nvm->ops.validate = igb_validate_nvm_checksum_i350;
+ nvm->ops.update = igb_update_nvm_checksum_i350;
+ break;
+ default:
+ nvm->ops.validate = igb_validate_nvm_checksum;
+ nvm->ops.update = igb_update_nvm_checksum;
+ }
+ nvm->ops.write = igb_write_nvm_spi;
+
+ /* if part supports SR-IOV then initialize mailbox parameters */
+ switch (mac->type) {
+ case e1000_82576:
+ case e1000_i350:
+ igb_init_mbx_params_pf(hw);
+ break;
+ default:
+ break;
+ }
+
+ /* setup PHY parameters */
+ if (phy->media_type != e1000_media_type_copper) {
+ phy->type = e1000_phy_none;
+ return 0;
+ }
+
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 100;
+
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+
+ /* PHY function pointers */
+ if (igb_sgmii_active_82575(hw)) {
+ phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
+ ctrl_ext |= E1000_CTRL_I2C_ENA;
+ } else {
+ phy->ops.reset = igb_phy_hw_reset;
+ ctrl_ext &= ~E1000_CTRL_I2C_ENA;
+ }
+
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+ igb_reset_mdicnfg_82580(hw);
+
+ if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
+ phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
+ phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
+ } else if (hw->mac.type >= e1000_82580) {
+ phy->ops.read_reg = igb_read_phy_reg_82580;
+ phy->ops.write_reg = igb_write_phy_reg_82580;
+ } else {
+ phy->ops.read_reg = igb_read_phy_reg_igp;
+ phy->ops.write_reg = igb_write_phy_reg_igp;
+ }
+
+ /* set lan id */
+ hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
+ E1000_STATUS_FUNC_SHIFT;
+
+ /* Set phy->phy_addr and phy->id. */
+ ret_val = igb_get_phy_id_82575(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Verify phy id and set remaining function pointers */
+ switch (phy->id) {
+ case I347AT4_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
+ case M88E1111_I_PHY_ID:
+ phy->type = e1000_phy_m88;
+ phy->ops.get_phy_info = igb_get_phy_info_m88;
+
+ if (phy->id == I347AT4_E_PHY_ID ||
+ phy->id == M88E1112_E_PHY_ID)
+ phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
+ else
+ phy->ops.get_cable_length = igb_get_cable_length_m88;
+
+ phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+ break;
+ case IGP03E1000_E_PHY_ID:
+ phy->type = e1000_phy_igp_3;
+ phy->ops.get_phy_info = igb_get_phy_info_igp;
+ phy->ops.get_cable_length = igb_get_cable_length_igp_2;
+ phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
+ phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
+ phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
+ break;
+ case I82580_I_PHY_ID:
+ case I350_I_PHY_ID:
+ phy->type = e1000_phy_82580;
+ phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
+ phy->ops.get_cable_length = igb_get_cable_length_82580;
+ phy->ops.get_phy_info = igb_get_phy_info_82580;
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ }
+
+ return 0;
+}
+
+/**
+ * igb_acquire_phy_82575 - Acquire rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * Acquire access rights to the correct PHY. This is a
+ * function pointer entry point called by the api module.
+ **/
+static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
+{
+ u16 mask = E1000_SWFW_PHY0_SM;
+
+ if (hw->bus.func == E1000_FUNC_1)
+ mask = E1000_SWFW_PHY1_SM;
+ else if (hw->bus.func == E1000_FUNC_2)
+ mask = E1000_SWFW_PHY2_SM;
+ else if (hw->bus.func == E1000_FUNC_3)
+ mask = E1000_SWFW_PHY3_SM;
+
+ return igb_acquire_swfw_sync_82575(hw, mask);
+}
+
+/**
+ * igb_release_phy_82575 - Release rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * A wrapper to release access rights to the correct PHY. This is a
+ * function pointer entry point called by the api module.
+ **/
+static void igb_release_phy_82575(struct e1000_hw *hw)
+{
+ u16 mask = E1000_SWFW_PHY0_SM;
+
+ if (hw->bus.func == E1000_FUNC_1)
+ mask = E1000_SWFW_PHY1_SM;
+ else if (hw->bus.func == E1000_FUNC_2)
+ mask = E1000_SWFW_PHY2_SM;
+ else if (hw->bus.func == E1000_FUNC_3)
+ mask = E1000_SWFW_PHY3_SM;
+
+ igb_release_swfw_sync_82575(hw, mask);
+}
+
+/**
+ * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset using the serial gigabit media independent
+ * interface and stores the retrieved information in data.
+ **/
+static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+ u16 *data)
+{
+ s32 ret_val = -E1000_ERR_PARAM;
+
+ if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
+ hw_dbg("PHY Address %u is out of range\n", offset);
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = igb_read_phy_reg_i2c(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset using the serial gigabit
+ * media independent interface.
+ **/
+static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+ u16 data)
+{
+ s32 ret_val = -E1000_ERR_PARAM;
+
+
+ if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
+ hw_dbg("PHY Address %d is out of range\n", offset);
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = igb_write_phy_reg_i2c(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_get_phy_id_82575 - Retrieve PHY addr and id
+ * @hw: pointer to the HW structure
+ *
+ * Retrieves the PHY address and ID for both PHY's which do and do not use
+ * sgmi interface.
+ **/
+static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+ u16 phy_id;
+ u32 ctrl_ext;
+ u32 mdic;
+
+ /*
+ * For SGMII PHYs, we try the list of possible addresses until
+ * we find one that works. For non-SGMII PHYs
+ * (e.g. integrated copper PHYs), an address of 1 should
+ * work. The result of this function should mean phy->phy_addr
+ * and phy->id are set correctly.
+ */
+ if (!(igb_sgmii_active_82575(hw))) {
+ phy->addr = 1;
+ ret_val = igb_get_phy_id(hw);
+ goto out;
+ }
+
+ if (igb_sgmii_uses_mdio_82575(hw)) {
+ switch (hw->mac.type) {
+ case e1000_82575:
+ case e1000_82576:
+ mdic = rd32(E1000_MDIC);
+ mdic &= E1000_MDIC_PHY_MASK;
+ phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
+ break;
+ case e1000_82580:
+ case e1000_i350:
+ mdic = rd32(E1000_MDICNFG);
+ mdic &= E1000_MDICNFG_PHY_MASK;
+ phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ break;
+ }
+ ret_val = igb_get_phy_id(hw);
+ goto out;
+ }
+
+ /* Power on sgmii phy if it is disabled */
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
+ wrfl();
+ msleep(300);
+
+ /*
+ * The address field in the I2CCMD register is 3 bits and 0 is invalid.
+ * Therefore, we need to test 1-7
+ */
+ for (phy->addr = 1; phy->addr < 8; phy->addr++) {
+ ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
+ if (ret_val == 0) {
+ hw_dbg("Vendor ID 0x%08X read at address %u\n",
+ phy_id, phy->addr);
+ /*
+ * At the time of this writing, The M88 part is
+ * the only supported SGMII PHY product.
+ */
+ if (phy_id == M88_VENDOR)
+ break;
+ } else {
+ hw_dbg("PHY address %u was unreadable\n", phy->addr);
+ }
+ }
+
+ /* A valid PHY type couldn't be found. */
+ if (phy->addr == 8) {
+ phy->addr = 0;
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ } else {
+ ret_val = igb_get_phy_id(hw);
+ }
+
+ /* restore previous sfp cage power state */
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Resets the PHY using the serial gigabit media independent interface.
+ **/
+static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ /*
+ * This isn't a true "hard" reset, but is the only reset
+ * available to us at this time.
+ */
+
+ hw_dbg("Soft resetting SGMII attached PHY...\n");
+
+ /*
+ * SFP documentation requires the following to configure the SPF module
+ * to work on SGMII. No further documentation is given.
+ */
+ ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
+ if (ret_val)
+ goto out;
+
+ ret_val = igb_phy_sw_reset(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag. When
+ * activating LPLU this function also disables smart speed
+ * and vice versa. LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+ if (ret_val)
+ goto out;
+
+ if (active) {
+ data |= IGP02E1000_PM_D0_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ data);
+ if (ret_val)
+ goto out;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ goto out;
+ } else {
+ data &= ~IGP02E1000_PM_D0_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ data);
+ /*
+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG, &data);
+ if (ret_val)
+ goto out;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG, data);
+ if (ret_val)
+ goto out;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG, &data);
+ if (ret_val)
+ goto out;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG, data);
+ if (ret_val)
+ goto out;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_acquire_nvm_82575 - Request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the necessary semaphores for exclusive access to the EEPROM.
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+static s32 igb_acquire_nvm_82575(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ ret_val = igb_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+ if (ret_val)
+ goto out;
+
+ ret_val = igb_acquire_nvm(hw);
+
+ if (ret_val)
+ igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_release_nvm_82575 - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit,
+ * then release the semaphores acquired.
+ **/
+static void igb_release_nvm_82575(struct e1000_hw *hw)
+{
+ igb_release_nvm(hw);
+ igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
+ * will also specify which port we're acquiring the lock for.
+ **/
+static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 16;
+ s32 ret_val = 0;
+ s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+
+ while (i < timeout) {
+ if (igb_get_hw_semaphore(hw)) {
+ ret_val = -E1000_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync = rd32(E1000_SW_FW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask)))
+ break;
+
+ /*
+ * Firmware currently using resource (fwmask)
+ * or other software thread using resource (swmask)
+ */
+ igb_put_hw_semaphore(hw);
+ mdelay(5);
+ i++;
+ }
+
+ if (i == timeout) {
+ hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ ret_val = -E1000_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync |= swmask;
+ wr32(E1000_SW_FW_SYNC, swfw_sync);
+
+ igb_put_hw_semaphore(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_release_swfw_sync_82575 - Release SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Release the SW/FW semaphore used to access the PHY or NVM. The mask
+ * will also specify which port we're releasing the lock for.
+ **/
+static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+
+ while (igb_get_hw_semaphore(hw) != 0);
+ /* Empty */
+
+ swfw_sync = rd32(E1000_SW_FW_SYNC);
+ swfw_sync &= ~mask;
+ wr32(E1000_SW_FW_SYNC, swfw_sync);
+
+ igb_put_hw_semaphore(hw);
+}
+
+/**
+ * igb_get_cfg_done_82575 - Read config done bit
+ * @hw: pointer to the HW structure
+ *
+ * Read the management control register for the config done bit for
+ * completion status. NOTE: silicon which is EEPROM-less will fail trying
+ * to read the config done bit, so an error is *ONLY* logged and returns
+ * 0. If we were to return with error, EEPROM-less silicon
+ * would not be able to be reset or change link.
+ **/
+static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+ s32 ret_val = 0;
+ u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+ if (hw->bus.func == 1)
+ mask = E1000_NVM_CFG_DONE_PORT_1;
+ else if (hw->bus.func == E1000_FUNC_2)
+ mask = E1000_NVM_CFG_DONE_PORT_2;
+ else if (hw->bus.func == E1000_FUNC_3)
+ mask = E1000_NVM_CFG_DONE_PORT_3;
+
+ while (timeout) {
+ if (rd32(E1000_EEMNGCTL) & mask)
+ break;
+ msleep(1);
+ timeout--;
+ }
+ if (!timeout)
+ hw_dbg("MNG configuration cycle has not completed.\n");
+
+ /* If EEPROM is not marked present, init the PHY manually */
+ if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) &&
+ (hw->phy.type == e1000_phy_igp_3))
+ igb_phy_init_script_igp3(hw);
+
+ return ret_val;
+}
+
+/**
+ * igb_check_for_link_82575 - Check for link
+ * @hw: pointer to the HW structure
+ *
+ * If sgmii is enabled, then use the pcs register to determine link, otherwise
+ * use the generic interface for determining link.
+ **/
+static s32 igb_check_for_link_82575(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 speed, duplex;
+
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
+ &duplex);
+ /*
+ * Use this flag to determine if link needs to be checked or
+ * not. If we have link clear the flag so that we do not
+ * continue to check for link.
+ */
+ hw->mac.get_link_status = !hw->mac.serdes_has_link;
+ } else {
+ ret_val = igb_check_for_copper_link(hw);
+ }
+
+ return ret_val;
+}
+
+/**
+ * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown
+ * @hw: pointer to the HW structure
+ **/
+void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
+{
+ u32 reg;
+
+
+ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+ !igb_sgmii_active_82575(hw))
+ return;
+
+ /* Enable PCS to turn on link */
+ reg = rd32(E1000_PCS_CFG0);
+ reg |= E1000_PCS_CFG_PCS_EN;
+ wr32(E1000_PCS_CFG0, reg);
+
+ /* Power up the laser */
+ reg = rd32(E1000_CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_SDP3_DATA;
+ wr32(E1000_CTRL_EXT, reg);
+
+ /* flush the write to verify completion */
+ wrfl();
+ msleep(1);
+}
+
+/**
+ * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * Using the physical coding sub-layer (PCS), retrieve the current speed and
+ * duplex, then store the values in the pointers provided.
+ **/
+static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 pcs;
+
+ /* Set up defaults for the return values of this function */
+ mac->serdes_has_link = false;
+ *speed = 0;
+ *duplex = 0;
+
+ /*
+ * Read the PCS Status register for link state. For non-copper mode,
+ * the status register is not accurate. The PCS status register is
+ * used instead.
+ */
+ pcs = rd32(E1000_PCS_LSTAT);
+
+ /*
+ * The link up bit determines when link is up on autoneg. The sync ok
+ * gets set once both sides sync up and agree upon link. Stable link
+ * can be determined by checking for both link up and link sync ok
+ */
+ if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
+ mac->serdes_has_link = true;
+
+ /* Detect and store PCS speed */
+ if (pcs & E1000_PCS_LSTS_SPEED_1000) {
+ *speed = SPEED_1000;
+ } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
+ *speed = SPEED_100;
+ } else {
+ *speed = SPEED_10;
+ }
+
+ /* Detect and store PCS duplex */
+ if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
+ *duplex = FULL_DUPLEX;
+ } else {
+ *duplex = HALF_DUPLEX;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * igb_shutdown_serdes_link_82575 - Remove link during power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of fiber serdes, shut down optics and PCS on driver unload
+ * when management pass thru is not enabled.
+ **/
+void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
+{
+ u32 reg;
+
+ if (hw->phy.media_type != e1000_media_type_internal_serdes &&
+ igb_sgmii_active_82575(hw))
+ return;
+
+ if (!igb_enable_mng_pass_thru(hw)) {
+ /* Disable PCS to turn off link */
+ reg = rd32(E1000_PCS_CFG0);
+ reg &= ~E1000_PCS_CFG_PCS_EN;
+ wr32(E1000_PCS_CFG0, reg);
+
+ /* shutdown the laser */
+ reg = rd32(E1000_CTRL_EXT);
+ reg |= E1000_CTRL_EXT_SDP3_DATA;
+ wr32(E1000_CTRL_EXT, reg);
+
+ /* flush the write to verify completion */
+ wrfl();
+ msleep(1);
+ }
+}
+
+/**
+ * igb_reset_hw_82575 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state. This is a
+ * function pointer entry point called by the api module.
+ **/
+static s32 igb_reset_hw_82575(struct e1000_hw *hw)
+{
+ u32 ctrl, icr;
+ s32 ret_val;
+
+ /*
+ * Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = igb_disable_pcie_master(hw);
+ if (ret_val)
+ hw_dbg("PCI-E Master disable polling has failed.\n");
+
+ /* set the completion timeout for interface */
+ ret_val = igb_set_pcie_completion_timeout(hw);
+ if (ret_val) {
+ hw_dbg("PCI-E Set completion timeout has failed.\n");
+ }
+
+ hw_dbg("Masking off all interrupts\n");
+ wr32(E1000_IMC, 0xffffffff);
+
+ wr32(E1000_RCTL, 0);
+ wr32(E1000_TCTL, E1000_TCTL_PSP);
+ wrfl();
+
+ msleep(10);
+
+ ctrl = rd32(E1000_CTRL);
+
+ hw_dbg("Issuing a global reset to MAC\n");
+ wr32(E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+ ret_val = igb_get_auto_rd_done(hw);
+ if (ret_val) {
+ /*
+ * When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ hw_dbg("Auto Read Done did not complete\n");
+ }
+
+ /* If EEPROM is not present, run manual init scripts */
+ if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
+ igb_reset_init_script_82575(hw);
+
+ /* Clear any pending interrupt events. */
+ wr32(E1000_IMC, 0xffffffff);
+ icr = rd32(E1000_ICR);
+
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = igb_check_alt_mac_addr(hw);
+
+ return ret_val;
+}
+
+/**
+ * igb_init_hw_82575 - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation.
+ **/
+static s32 igb_init_hw_82575(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ u16 i, rar_count = mac->rar_entry_count;
+
+ /* Initialize identification LED */
+ ret_val = igb_id_led_init(hw);
+ if (ret_val) {
+ hw_dbg("Error initializing identification LED\n");
+ /* This is not fatal and we should not stop init due to this */
+ }
+
+ /* Disabling VLAN filtering */
+ hw_dbg("Initializing the IEEE VLAN\n");
+ igb_clear_vfta(hw);
+
+ /* Setup the receive address */
+ igb_init_rx_addrs(hw, rar_count);
+
+ /* Zero out the Multicast HASH table */
+ hw_dbg("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ array_wr32(E1000_MTA, i, 0);
+
+ /* Zero out the Unicast HASH table */
+ hw_dbg("Zeroing the UTA\n");
+ for (i = 0; i < mac->uta_reg_count; i++)
+ array_wr32(E1000_UTA, i, 0);
+
+ /* Setup link and flow control */
+ ret_val = igb_setup_link(hw);
+
+ /*
+ * Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ igb_clear_hw_cntrs_82575(hw);
+
+ return ret_val;
+}
+
+/**
+ * igb_setup_copper_link_82575 - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Configures the link for auto-neg or forced speed and duplex. Then we check
+ * for link, once link is established calls to configure collision distance
+ * and flow control are called.
+ **/
+static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ ctrl = rd32(E1000_CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ wr32(E1000_CTRL, ctrl);
+
+ ret_val = igb_setup_serdes_link_82575(hw);
+ if (ret_val)
+ goto out;
+
+ if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
+ /* allow time for SFP cage time to power up phy */
+ msleep(300);
+
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val) {
+ hw_dbg("Error resetting the PHY.\n");
+ goto out;
+ }
+ }
+ switch (hw->phy.type) {
+ case e1000_phy_m88:
+ if (hw->phy.id == I347AT4_E_PHY_ID ||
+ hw->phy.id == M88E1112_E_PHY_ID)
+ ret_val = igb_copper_link_setup_m88_gen2(hw);
+ else
+ ret_val = igb_copper_link_setup_m88(hw);
+ break;
+ case e1000_phy_igp_3:
+ ret_val = igb_copper_link_setup_igp(hw);
+ break;
+ case e1000_phy_82580:
+ ret_val = igb_copper_link_setup_82580(hw);
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ break;
+ }
+
+ if (ret_val)
+ goto out;
+
+ ret_val = igb_setup_copper_link(hw);
+out:
+ return ret_val;
+}
+
+/**
+ * igb_setup_serdes_link_82575 - Setup link for serdes
+ * @hw: pointer to the HW structure
+ *
+ * Configure the physical coding sub-layer (PCS) link. The PCS link is
+ * used on copper connections where the serialized gigabit media independent
+ * interface (sgmii), or serdes fiber is being used. Configures the link
+ * for auto-negotiation or forces speed/duplex.
+ **/
+static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
+{
+ u32 ctrl_ext, ctrl_reg, reg;
+ bool pcs_autoneg;
+ s32 ret_val = E1000_SUCCESS;
+ u16 data;
+
+ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+ !igb_sgmii_active_82575(hw))
+ return ret_val;
+
+
+ /*
+ * On the 82575, SerDes loopback mode persists until it is
+ * explicitly turned off or a power cycle is performed. A read to
+ * the register does not indicate its status. Therefore, we ensure
+ * loopback mode is disabled during initialization.
+ */
+ wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+
+ /* power on the sfp cage if present */
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+
+ ctrl_reg = rd32(E1000_CTRL);
+ ctrl_reg |= E1000_CTRL_SLU;
+
+ if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
+ /* set both sw defined pins */
+ ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
+
+ /* Set switch control to serdes energy detect */
+ reg = rd32(E1000_CONNSW);
+ reg |= E1000_CONNSW_ENRGSRC;
+ wr32(E1000_CONNSW, reg);
+ }
+
+ reg = rd32(E1000_PCS_LCTL);
+
+ /* default pcs_autoneg to the same setting as mac autoneg */
+ pcs_autoneg = hw->mac.autoneg;
+
+ switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
+ case E1000_CTRL_EXT_LINK_MODE_SGMII:
+ /* sgmii mode lets the phy handle forcing speed/duplex */
+ pcs_autoneg = true;
+ /* autoneg time out should be disabled for SGMII mode */
+ reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
+ break;
+ case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+ /* disable PCS autoneg and support parallel detect only */
+ pcs_autoneg = false;
+ default:
+ if (hw->mac.type == e1000_82575 ||
+ hw->mac.type == e1000_82576) {
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
+ if (ret_val) {
+ printk(KERN_DEBUG "NVM Read Error\n\n");
+ return ret_val;
+ }
+
+ if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT)
+ pcs_autoneg = false;
+ }
+
+ /*
+ * non-SGMII modes only supports a speed of 1000/Full for the
+ * link so it is best to just force the MAC and let the pcs
+ * link either autoneg or be forced to 1000/Full
+ */
+ ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
+ E1000_CTRL_FD | E1000_CTRL_FRCDPX;
+
+ /* set speed of 1000/Full if speed/duplex is forced */
+ reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
+ break;
+ }
+
+ wr32(E1000_CTRL, ctrl_reg);
+
+ /*
+ * New SerDes mode allows for forcing speed or autonegotiating speed
+ * at 1gb. Autoneg should be default set by most drivers. This is the
+ * mode that will be compatible with older link partners and switches.
+ * However, both are supported by the hardware and some drivers/tools.
+ */
+ reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
+ E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
+
+ /*
+ * We force flow control to prevent the CTRL register values from being
+ * overwritten by the autonegotiated flow control values
+ */
+ reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+
+ if (pcs_autoneg) {
+ /* Set PCS register for autoneg */
+ reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
+ E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
+ hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
+ } else {
+ /* Set PCS register for forced link */
+ reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
+
+ hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
+ }
+
+ wr32(E1000_PCS_LCTL, reg);
+
+ if (!igb_sgmii_active_82575(hw))
+ igb_force_mac_fc(hw);
+
+ return ret_val;
+}
+
+/**
+ * igb_sgmii_active_82575 - Return sgmii state
+ * @hw: pointer to the HW structure
+ *
+ * 82575 silicon has a serialized gigabit media independent interface (sgmii)
+ * which can be enabled for use in the embedded applications. Simply
+ * return the current state of the sgmii interface.
+ **/
+static bool igb_sgmii_active_82575(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+ return dev_spec->sgmii_active;
+}
+
+/**
+ * igb_reset_init_script_82575 - Inits HW defaults after reset
+ * @hw: pointer to the HW structure
+ *
+ * Inits recommended HW defaults after a reset when there is no EEPROM
+ * detected. This is only for the 82575.
+ **/
+static s32 igb_reset_init_script_82575(struct e1000_hw *hw)
+{
+ if (hw->mac.type == e1000_82575) {
+ hw_dbg("Running reset init script for 82575\n");
+ /* SerDes configuration via SERDESCTRL */
+ igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C);
+ igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78);
+ igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23);
+ igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15);
+
+ /* CCM configuration via CCMCTL register */
+ igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00);
+ igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00);
+
+ /* PCIe lanes configuration */
+ igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC);
+ igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF);
+ igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05);
+ igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81);
+
+ /* PCIe PLL Configuration */
+ igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47);
+ igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00);
+ igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00);
+ }
+
+ return 0;
+}
+
+/**
+ * igb_read_mac_addr_82575 - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ /*
+ * If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = igb_check_alt_mac_addr(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = igb_read_mac_addr(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_power_down_phy_copper_82575 - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
+{
+ /* If the management interface is not enabled, then power down */
+ if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
+ igb_power_down_phy_copper(hw);
+}
+
+/**
+ * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the hardware counters by reading the counter registers.
+ **/
+static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
+{
+ igb_clear_hw_cntrs_base(hw);
+
+ rd32(E1000_PRC64);
+ rd32(E1000_PRC127);
+ rd32(E1000_PRC255);
+ rd32(E1000_PRC511);
+ rd32(E1000_PRC1023);
+ rd32(E1000_PRC1522);
+ rd32(E1000_PTC64);
+ rd32(E1000_PTC127);
+ rd32(E1000_PTC255);
+ rd32(E1000_PTC511);
+ rd32(E1000_PTC1023);
+ rd32(E1000_PTC1522);
+
+ rd32(E1000_ALGNERRC);
+ rd32(E1000_RXERRC);
+ rd32(E1000_TNCRS);
+ rd32(E1000_CEXTERR);
+ rd32(E1000_TSCTC);
+ rd32(E1000_TSCTFC);
+
+ rd32(E1000_MGTPRC);
+ rd32(E1000_MGTPDC);
+ rd32(E1000_MGTPTC);
+
+ rd32(E1000_IAC);
+ rd32(E1000_ICRXOC);
+
+ rd32(E1000_ICRXPTC);
+ rd32(E1000_ICRXATC);
+ rd32(E1000_ICTXPTC);
+ rd32(E1000_ICTXATC);
+ rd32(E1000_ICTXQEC);
+ rd32(E1000_ICTXQMTC);
+ rd32(E1000_ICRXDMTC);
+
+ rd32(E1000_CBTMPC);
+ rd32(E1000_HTDPMC);
+ rd32(E1000_CBRMPC);
+ rd32(E1000_RPTHC);
+ rd32(E1000_HGPTC);
+ rd32(E1000_HTCBDPC);
+ rd32(E1000_HGORCL);
+ rd32(E1000_HGORCH);
+ rd32(E1000_HGOTCL);
+ rd32(E1000_HGOTCH);
+ rd32(E1000_LENERRS);
+
+ /* This register should not be read in copper configurations */
+ if (hw->phy.media_type == e1000_media_type_internal_serdes ||
+ igb_sgmii_active_82575(hw))
+ rd32(E1000_SCVPC);
+}
+
+/**
+ * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable
+ * @hw: pointer to the HW structure
+ *
+ * After rx enable if managability is enabled then there is likely some
+ * bad data at the start of the fifo and possibly in the DMA fifo. This
+ * function clears the fifos and flushes any packets that came in as rx was
+ * being enabled.
+ **/
+void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
+{
+ u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
+ int i, ms_wait;
+
+ if (hw->mac.type != e1000_82575 ||
+ !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN))
+ return;
+
+ /* Disable all RX queues */
+ for (i = 0; i < 4; i++) {
+ rxdctl[i] = rd32(E1000_RXDCTL(i));
+ wr32(E1000_RXDCTL(i),
+ rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
+ }
+ /* Poll all queues to verify they have shut down */
+ for (ms_wait = 0; ms_wait < 10; ms_wait++) {
+ msleep(1);
+ rx_enabled = 0;
+ for (i = 0; i < 4; i++)
+ rx_enabled |= rd32(E1000_RXDCTL(i));
+ if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
+ break;
+ }
+
+ if (ms_wait == 10)
+ hw_dbg("Queue disable timed out after 10ms\n");
+
+ /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
+ * incoming packets are rejected. Set enable and wait 2ms so that
+ * any packet that was coming in as RCTL.EN was set is flushed
+ */
+ rfctl = rd32(E1000_RFCTL);
+ wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
+
+ rlpml = rd32(E1000_RLPML);
+ wr32(E1000_RLPML, 0);
+
+ rctl = rd32(E1000_RCTL);
+ temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
+ temp_rctl |= E1000_RCTL_LPE;
+
+ wr32(E1000_RCTL, temp_rctl);
+ wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
+ wrfl();
+ msleep(2);
+
+ /* Enable RX queues that were previously enabled and restore our
+ * previous state
+ */
+ for (i = 0; i < 4; i++)
+ wr32(E1000_RXDCTL(i), rxdctl[i]);
+ wr32(E1000_RCTL, rctl);
+ wrfl();
+
+ wr32(E1000_RLPML, rlpml);
+ wr32(E1000_RFCTL, rfctl);
+
+ /* Flush receive errors generated by workaround */
+ rd32(E1000_ROC);
+ rd32(E1000_RNBC);
+ rd32(E1000_MPC);
+}
+
+/**
+ * igb_set_pcie_completion_timeout - set pci-e completion timeout
+ * @hw: pointer to the HW structure
+ *
+ * The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
+ * however the hardware default for these parts is 500us to 1ms which is less
+ * than the 10ms recommended by the pci-e spec. To address this we need to
+ * increase the value to either 10ms to 200ms for capability version 1 config,
+ * or 16ms to 55ms for version 2.
+ **/
+static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
+{
+ u32 gcr = rd32(E1000_GCR);
+ s32 ret_val = 0;
+ u16 pcie_devctl2;
+
+ /* only take action if timeout value is defaulted to 0 */
+ if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
+ goto out;
+
+ /*
+ * if capababilities version is type 1 we can write the
+ * timeout of 10ms to 200ms through the GCR register
+ */
+ if (!(gcr & E1000_GCR_CAP_VER2)) {
+ gcr |= E1000_GCR_CMPL_TMOUT_10ms;
+ goto out;
+ }
+
+ /*
+ * for version 2 capabilities we need to write the config space
+ * directly in order to set the completion timeout value for
+ * 16ms to 55ms
+ */
+ ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+ &pcie_devctl2);
+ if (ret_val)
+ goto out;
+
+ pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
+
+ ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+ &pcie_devctl2);
+out:
+ /* disable completion timeout resend */
+ gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
+
+ wr32(E1000_GCR, gcr);
+ return ret_val;
+}
+
+/**
+ * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
+ * @hw: pointer to the hardware struct
+ * @enable: state to enter, either enabled or disabled
+ * @pf: Physical Function pool - do not set anti-spoofing for the PF
+ *
+ * enables/disables L2 switch anti-spoofing functionality.
+ **/
+void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
+{
+ u32 dtxswc;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ case e1000_i350:
+ dtxswc = rd32(E1000_DTXSWC);
+ if (enable) {
+ dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
+ E1000_DTXSWC_VLAN_SPOOF_MASK);
+ /* The PF can spoof - it has to in order to
+ * support emulation mode NICs */
+ dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+ } else {
+ dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
+ E1000_DTXSWC_VLAN_SPOOF_MASK);
+ }
+ wr32(E1000_DTXSWC, dtxswc);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback
+ * @hw: pointer to the hardware struct
+ * @enable: state to enter, either enabled or disabled
+ *
+ * enables/disables L2 switch loopback functionality.
+ **/
+void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
+{
+ u32 dtxswc = rd32(E1000_DTXSWC);
+
+ if (enable)
+ dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ else
+ dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+
+ wr32(E1000_DTXSWC, dtxswc);
+}
+
+/**
+ * igb_vmdq_set_replication_pf - enable or disable vmdq replication
+ * @hw: pointer to the hardware struct
+ * @enable: state to enter, either enabled or disabled
+ *
+ * enables/disables replication of packets across multiple pools.
+ **/
+void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
+{
+ u32 vt_ctl = rd32(E1000_VT_CTL);
+
+ if (enable)
+ vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
+ else
+ vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
+
+ wr32(E1000_VT_CTL, vt_ctl);
+}
+
+/**
+ * igb_read_phy_reg_82580 - Read 82580 MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the MDI control register in the PHY at offset and stores the
+ * information read to data.
+ **/
+static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = igb_read_phy_reg_mdic(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_write_phy_reg_82580 - Write 82580 MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write to register at offset
+ *
+ * Writes data to MDI control register in the PHY at offset.
+ **/
+static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = igb_write_phy_reg_mdic(hw, offset, data);
+
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
+ * @hw: pointer to the HW structure
+ *
+ * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
+ * the values found in the EEPROM. This addresses an issue in which these
+ * bits are not restored from EEPROM after reset.
+ **/
+static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u32 mdicnfg;
+ u16 nvm_data = 0;
+
+ if (hw->mac.type != e1000_82580)
+ goto out;
+ if (!igb_sgmii_active_82575(hw))
+ goto out;
+
+ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+ NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+ &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ mdicnfg = rd32(E1000_MDICNFG);
+ if (nvm_data & NVM_WORD24_EXT_MDIO)
+ mdicnfg |= E1000_MDICNFG_EXT_MDIO;
+ if (nvm_data & NVM_WORD24_COM_MDIO)
+ mdicnfg |= E1000_MDICNFG_COM_MDIO;
+ wr32(E1000_MDICNFG, mdicnfg);
+out:
+ return ret_val;
+}
+
+/**
+ * igb_reset_hw_82580 - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets function or entire device (all ports, etc.)
+ * to a known state.
+ **/
+static s32 igb_reset_hw_82580(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ /* BH SW mailbox bit in SW_FW_SYNC */
+ u16 swmbsw_mask = E1000_SW_SYNCH_MB;
+ u32 ctrl, icr;
+ bool global_device_reset = hw->dev_spec._82575.global_device_reset;
+
+
+ hw->dev_spec._82575.global_device_reset = false;
+
+ /* Get current control state. */
+ ctrl = rd32(E1000_CTRL);
+
+ /*
+ * Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = igb_disable_pcie_master(hw);
+ if (ret_val)
+ hw_dbg("PCI-E Master disable polling has failed.\n");
+
+ hw_dbg("Masking off all interrupts\n");
+ wr32(E1000_IMC, 0xffffffff);
+ wr32(E1000_RCTL, 0);
+ wr32(E1000_TCTL, E1000_TCTL_PSP);
+ wrfl();
+
+ msleep(10);
+
+ /* Determine whether or not a global dev reset is requested */
+ if (global_device_reset &&
+ igb_acquire_swfw_sync_82575(hw, swmbsw_mask))
+ global_device_reset = false;
+
+ if (global_device_reset &&
+ !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET))
+ ctrl |= E1000_CTRL_DEV_RST;
+ else
+ ctrl |= E1000_CTRL_RST;
+
+ wr32(E1000_CTRL, ctrl);
+ wrfl();
+
+ /* Add delay to insure DEV_RST has time to complete */
+ if (global_device_reset)
+ msleep(5);
+
+ ret_val = igb_get_auto_rd_done(hw);
+ if (ret_val) {
+ /*
+ * When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ hw_dbg("Auto Read Done did not complete\n");
+ }
+
+ /* If EEPROM is not present, run manual init scripts */
+ if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
+ igb_reset_init_script_82575(hw);
+
+ /* clear global device reset status bit */
+ wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);
+
+ /* Clear any pending interrupt events. */
+ wr32(E1000_IMC, 0xffffffff);
+ icr = rd32(E1000_ICR);
+
+ ret_val = igb_reset_mdicnfg_82580(hw);
+ if (ret_val)
+ hw_dbg("Could not reset MDICNFG based on EEPROM\n");
+
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = igb_check_alt_mac_addr(hw);
+
+ /* Release semaphore */
+ if (global_device_reset)
+ igb_release_swfw_sync_82575(hw, swmbsw_mask);
+
+ return ret_val;
+}
+
+/**
+ * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
+ * @data: data received by reading RXPBS register
+ *
+ * The 82580 uses a table based approach for packet buffer allocation sizes.
+ * This function converts the retrieved value into the correct table value
+ * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
+ * 0x0 36 72 144 1 2 4 8 16
+ * 0x8 35 70 140 rsv rsv rsv rsv rsv
+ */
+u16 igb_rxpbs_adjust_82580(u32 data)
+{
+ u16 ret_val = 0;
+
+ if (data < E1000_82580_RXPBS_TABLE_SIZE)
+ ret_val = e1000_82580_rxpbs_table[data];
+
+ return ret_val;
+}
+
+/**
+ * igb_validate_nvm_checksum_with_offset - Validate EEPROM
+ * checksum
+ * @hw: pointer to the HW structure
+ * @offset: offset in words of the checksum protected region
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
+{
+ s32 ret_val = 0;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Read Error\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+
+ if (checksum != (u16) NVM_SUM) {
+ hw_dbg("NVM Checksum Invalid\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_update_nvm_checksum_with_offset - Update EEPROM
+ * checksum
+ * @hw: pointer to the HW structure
+ * @offset: offset in words of the checksum protected region
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM.
+ **/
+s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Read Error while updating checksum.\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16) NVM_SUM - checksum;
+ ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
+ &checksum);
+ if (ret_val)
+ hw_dbg("NVM Write Error while updating checksum.\n");
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM section checksum by reading/adding each word of
+ * the EEPROM and then verifies that the sum of the EEPROM is
+ * equal to 0xBABA.
+ **/
+static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 eeprom_regions_count = 1;
+ u16 j, nvm_data;
+ u16 nvm_offset;
+
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
+ /* if checksums compatibility bit is set validate checksums
+ * for all 4 ports. */
+ eeprom_regions_count = 4;
+ }
+
+ for (j = 0; j < eeprom_regions_count; j++) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+ ret_val = igb_validate_nvm_checksum_with_offset(hw,
+ nvm_offset);
+ if (ret_val != 0)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_update_nvm_checksum_82580 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM section checksums for all 4 ports by reading/adding
+ * each word of the EEPROM up to the checksum. Then calculates the EEPROM
+ * checksum and writes the value to the EEPROM.
+ **/
+static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 j, nvm_data;
+ u16 nvm_offset;
+
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Read Error while updating checksum"
+ " compatibility bit.\n");
+ goto out;
+ }
+
+ if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
+ /* set compatibility bit to validate checksums appropriately */
+ nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
+ ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
+ &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Write Error while updating checksum"
+ " compatibility bit.\n");
+ goto out;
+ }
+ }
+
+ for (j = 0; j < 4; j++) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+ ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM section checksum by reading/adding each word of
+ * the EEPROM and then verifies that the sum of the EEPROM is
+ * equal to 0xBABA.
+ **/
+static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 j;
+ u16 nvm_offset;
+
+ for (j = 0; j < 4; j++) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+ ret_val = igb_validate_nvm_checksum_with_offset(hw,
+ nvm_offset);
+ if (ret_val != 0)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_update_nvm_checksum_i350 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM section checksums for all 4 ports by reading/adding
+ * each word of the EEPROM up to the checksum. Then calculates the EEPROM
+ * checksum and writes the value to the EEPROM.
+ **/
+static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 j;
+ u16 nvm_offset;
+
+ for (j = 0; j < 4; j++) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+ ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
+ if (ret_val != 0)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_set_eee_i350 - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ *
+ * Enable/disable EEE based on setting in dev_spec structure.
+ *
+ **/
+s32 igb_set_eee_i350(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u32 ipcnfg, eeer, ctrl_ext;
+
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ if ((hw->mac.type != e1000_i350) ||
+ (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK))
+ goto out;
+ ipcnfg = rd32(E1000_IPCNFG);
+ eeer = rd32(E1000_EEER);
+
+ /* enable or disable per user setting */
+ if (!(hw->dev_spec._82575.eee_disable)) {
+ ipcnfg |= (E1000_IPCNFG_EEE_1G_AN |
+ E1000_IPCNFG_EEE_100M_AN);
+ eeer |= (E1000_EEER_TX_LPI_EN |
+ E1000_EEER_RX_LPI_EN |
+ E1000_EEER_LPI_FC);
+
+ } else {
+ ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
+ E1000_IPCNFG_EEE_100M_AN);
+ eeer &= ~(E1000_EEER_TX_LPI_EN |
+ E1000_EEER_RX_LPI_EN |
+ E1000_EEER_LPI_FC);
+ }
+ wr32(E1000_IPCNFG, ipcnfg);
+ wr32(E1000_EEER, eeer);
+out:
+
+ return ret_val;
+}
+
+static struct e1000_mac_operations e1000_mac_ops_82575 = {
+ .init_hw = igb_init_hw_82575,
+ .check_for_link = igb_check_for_link_82575,
+ .rar_set = igb_rar_set,
+ .read_mac_addr = igb_read_mac_addr_82575,
+ .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
+};
+
+static struct e1000_phy_operations e1000_phy_ops_82575 = {
+ .acquire = igb_acquire_phy_82575,
+ .get_cfg_done = igb_get_cfg_done_82575,
+ .release = igb_release_phy_82575,
+};
+
+static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
+ .acquire = igb_acquire_nvm_82575,
+ .read = igb_read_nvm_eerd,
+ .release = igb_release_nvm_82575,
+ .write = igb_write_nvm_spi,
+};
+
+const struct e1000_info e1000_82575_info = {
+ .get_invariants = igb_get_invariants_82575,
+ .mac_ops = &e1000_mac_ops_82575,
+ .phy_ops = &e1000_phy_ops_82575,
+ .nvm_ops = &e1000_nvm_ops_82575,
+};
+
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
new file mode 100644
index 000000000000..08a757eb6608
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -0,0 +1,260 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_82575_H_
+#define _E1000_82575_H_
+
+extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
+extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
+extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
+extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
+
+#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_DEF1_DEF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_ON2))
+
+#define E1000_RAR_ENTRIES_82575 16
+#define E1000_RAR_ENTRIES_82576 24
+#define E1000_RAR_ENTRIES_82580 24
+#define E1000_RAR_ENTRIES_I350 32
+
+#define E1000_SW_SYNCH_MB 0x00000100
+#define E1000_STAT_DEV_RST_SET 0x00100000
+#define E1000_CTRL_DEV_RST 0x20000000
+
+/* SRRCTL bit definitions */
+#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
+#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
+#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define E1000_SRRCTL_DROP_EN 0x80000000
+#define E1000_SRRCTL_TIMESTAMP 0x40000000
+
+#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
+#define E1000_MRQC_ENABLE_VMDQ 0x00000003
+#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
+#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
+
+#define E1000_EICR_TX_QUEUE ( \
+ E1000_EICR_TX_QUEUE0 | \
+ E1000_EICR_TX_QUEUE1 | \
+ E1000_EICR_TX_QUEUE2 | \
+ E1000_EICR_TX_QUEUE3)
+
+#define E1000_EICR_RX_QUEUE ( \
+ E1000_EICR_RX_QUEUE0 | \
+ E1000_EICR_RX_QUEUE1 | \
+ E1000_EICR_RX_QUEUE2 | \
+ E1000_EICR_RX_QUEUE3)
+
+/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
+#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
+#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
+
+/* Receive Descriptor - Advanced */
+union e1000_adv_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ struct {
+ __le16 pkt_info; /* RSS type, Packet type */
+ __le16 hdr_info; /* Split Header,
+ * header buffer length */
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
+#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
+#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
+#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
+
+/* Transmit Descriptor - Advanced */
+union e1000_adv_tx_desc {
+ struct {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */
+#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
+#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
+#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
+#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
+#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
+#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+
+/* Context descriptors */
+struct e1000_adv_tx_context_desc {
+ __le32 vlan_macip_lens;
+ __le32 seqnum_seed;
+ __le32 type_tucmd_mlhl;
+ __le32 mss_l4len_idx;
+};
+
+#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */
+/* IPSec Encrypt Enable for ESP */
+#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+/* Adv ctxt IPSec SA IDX mask */
+/* Adv ctxt IPSec ESP len mask */
+
+/* Additional Transmit Descriptor Control definitions */
+#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */
+/* Tx Queue Arbitration Priority 0=low, 1=high */
+
+/* Additional Receive Descriptor Control definitions */
+#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */
+
+/* Direct Cache Access (DCA) definitions */
+#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */
+#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
+
+#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
+#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
+#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
+#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
+
+#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
+#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+
+/* Additional DCA related definitions, note change in position of CPUID */
+#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
+#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
+#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */
+#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
+
+/* ETQF register bit definitions */
+#define E1000_ETQF_FILTER_ENABLE (1 << 26)
+#define E1000_ETQF_1588 (1 << 30)
+
+/* FTQF register bit definitions */
+#define E1000_FTQF_VF_BP 0x00008000
+#define E1000_FTQF_1588_TIME_STAMP 0x08000000
+#define E1000_FTQF_MASK 0xF0000000
+#define E1000_FTQF_MASK_PROTO_BP 0x10000000
+#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
+
+#define E1000_NVM_APME_82575 0x0400
+#define MAX_NUM_VFS 8
+
+#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */
+#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */
+#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
+#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
+#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
+
+/* Easy defines for setting default pool, would normally be left a zero */
+#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
+#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
+
+/* Other useful VMD_CTL register defines */
+#define E1000_VT_CTL_IGNORE_MAC (1 << 28)
+#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
+#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
+
+/* Per VM Offload register setup */
+#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
+#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */
+#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */
+#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */
+#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */
+#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */
+#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */
+#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */
+#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
+#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
+
+#define E1000_VLVF_ARRAY_SIZE 32
+#define E1000_VLVF_VLANID_MASK 0x00000FFF
+#define E1000_VLVF_POOLSEL_SHIFT 12
+#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT)
+#define E1000_VLVF_LVLAN 0x00100000
+#define E1000_VLVF_VLANID_ENABLE 0x80000000
+
+#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
+
+#define E1000_IOVCTL 0x05BBC
+#define E1000_IOVCTL_REUSE_VFQ 0x00000001
+
+#define E1000_RPLOLR_STRVLAN 0x40000000
+#define E1000_RPLOLR_STRCRC 0x80000000
+
+#define E1000_DTXCTL_8023LL 0x0004
+#define E1000_DTXCTL_VLAN_ADDED 0x0008
+#define E1000_DTXCTL_OOS_ENABLE 0x0010
+#define E1000_DTXCTL_MDP_EN 0x0020
+#define E1000_DTXCTL_SPOOF_INT 0x0040
+
+#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14)
+
+#define ALL_QUEUES 0xFFFF
+
+/* RX packet buffer size defines */
+#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
+void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int);
+void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
+void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
+u16 igb_rxpbs_adjust_82580(u32 data);
+s32 igb_set_eee_i350(struct e1000_hw *);
+
+#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
new file mode 100644
index 000000000000..7b8ddd830f19
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -0,0 +1,834 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_DEFINES_H_
+#define _E1000_DEFINES_H_
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define REQ_RX_DESCRIPTOR_MULTIPLE 8
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */
+/* Physical Func Reset Done Indication */
+#define E1000_CTRL_EXT_PFRSTD 0x00004000
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_EIAME 0x01000000
+#define E1000_CTRL_EXT_IRCA 0x00000001
+/* Interrupt delay cancellation */
+/* Driver loaded bit for FW */
+#define E1000_CTRL_EXT_DRV_LOAD 0x10000000
+/* Interrupt acknowledge Auto-mask */
+/* Clear Interrupt timers after IMS clear */
+/* packet buffer parity error detection enabled */
+/* descriptor FIFO parity error detection enable */
+#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
+#define E1000_I2CCMD_REG_ADDR_SHIFT 16
+#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
+#define E1000_I2CCMD_OPCODE_READ 0x08000000
+#define E1000_I2CCMD_OPCODE_WRITE 0x00000000
+#define E1000_I2CCMD_READY 0x20000000
+#define E1000_I2CCMD_ERROR 0x80000000
+#define E1000_MAX_SGMII_PHY_REG_ADDR 255
+#define E1000_I2CCMD_PHY_TIMEOUT 200
+#define E1000_IVAR_VALID 0x80
+#define E1000_GPIE_NSICR 0x00000001
+#define E1000_GPIE_MSIX_MODE 0x00000010
+#define E1000_GPIE_EIAME 0x40000000
+#define E1000_GPIE_PBA 0x80000000
+
+/* Receive Descriptor bit definitions */
+#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
+#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
+#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+#define E1000_RXD_STAT_TS 0x10000 /* Pkt was time stamped */
+
+#define E1000_RXDEXT_STATERR_CE 0x01000000
+#define E1000_RXDEXT_STATERR_SE 0x02000000
+#define E1000_RXDEXT_STATERR_SEQ 0x04000000
+#define E1000_RXDEXT_STATERR_CXE 0x10000000
+#define E1000_RXDEXT_STATERR_TCPE 0x20000000
+#define E1000_RXDEXT_STATERR_IPE 0x40000000
+#define E1000_RXDEXT_STATERR_RXE 0x80000000
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+ E1000_RXDEXT_STATERR_CE | \
+ E1000_RXDEXT_STATERR_SE | \
+ E1000_RXDEXT_STATERR_SEQ | \
+ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
+
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */
+/* Enable Neighbor Discovery Filtering */
+#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
+/* Enable MAC address filtering */
+#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
+
+/* Receive Control */
+#define E1000_RCTL_EN 0x00000002 /* enable */
+#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
+#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
+#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
+#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
+#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
+#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
+#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
+#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
+#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
+#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
+#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
+#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
+#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
+#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
+
+/*
+ * Use byte values for the following shift parameters
+ * Usage:
+ * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ * E1000_PSRCTL_BSIZE0_MASK) |
+ * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ * E1000_PSRCTL_BSIZE1_MASK) |
+ * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ * E1000_PSRCTL_BSIZE2_MASK) |
+ * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ * E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256], default=256
+ * value1 = [1024..64512], default=4096
+ * value2 = [0..64512], default=4096
+ * value3 = [0..64512], default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
+
+/* SWFW_SYNC Definitions */
+#define E1000_SWFW_EEP_SM 0x1
+#define E1000_SWFW_PHY0_SM 0x2
+#define E1000_SWFW_PHY1_SM 0x4
+#define E1000_SWFW_PHY2_SM 0x20
+#define E1000_SWFW_PHY3_SM 0x40
+
+/* FACTPS Definitions */
+/* Device Control */
+#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
+#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
+#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
+#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
+#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
+#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+/* Defined polarity of Dock/Undock indication in SDP[0] */
+/* Reset both PHY ports, through PHYRST_N pin */
+/* enable link status from external LINK_0 and LINK_1 pins */
+#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
+#define E1000_CTRL_RST 0x04000000 /* Global reset */
+#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
+#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
+/* Initiate an interrupt to manageability engine */
+#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */
+
+/* Bit definitions for the Management Data IO (MDIO) and Management Data
+ * Clock (MDC) pins in the Device Control Register.
+ */
+
+#define E1000_CONNSW_ENRGSRC 0x4
+#define E1000_PCS_CFG_PCS_EN 8
+#define E1000_PCS_LCTL_FLV_LINK_UP 1
+#define E1000_PCS_LCTL_FSV_100 2
+#define E1000_PCS_LCTL_FSV_1000 4
+#define E1000_PCS_LCTL_FDV_FULL 8
+#define E1000_PCS_LCTL_FSD 0x10
+#define E1000_PCS_LCTL_FORCE_LINK 0x20
+#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
+#define E1000_PCS_LCTL_AN_ENABLE 0x10000
+#define E1000_PCS_LCTL_AN_RESTART 0x20000
+#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
+#define E1000_ENABLE_SERDES_LOOPBACK 0x0410
+
+#define E1000_PCS_LSTS_LINK_OK 1
+#define E1000_PCS_LSTS_SPEED_100 2
+#define E1000_PCS_LSTS_SPEED_1000 4
+#define E1000_PCS_LSTS_DUPLEX_FULL 8
+#define E1000_PCS_LSTS_SYNK_OK 0x10
+
+/* Device Status */
+#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
+#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
+#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
+/* Change in Dock/Undock state. Clear on write '0'. */
+/* Status of Master requests. */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000
+/* BMC external code execution disabled */
+
+/* Constants used to intrepret the masked PCI-X bus speed. */
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+
+#define ADVERTISE_10_HALF 0x0001
+#define ADVERTISE_10_FULL 0x0002
+#define ADVERTISE_100_HALF 0x0004
+#define ADVERTISE_100_FULL 0x0008
+#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL 0x0020
+
+/* 1000/H is not supported, nor spec-compliant. */
+#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
+ ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
+ ADVERTISE_1000_FULL)
+#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
+ ADVERTISE_100_HALF | ADVERTISE_100_FULL)
+#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
+#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
+#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \
+ ADVERTISE_1000_FULL)
+#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
+
+/* LED Control */
+#define E1000_LEDCTL_LED0_MODE_SHIFT 0
+#define E1000_LEDCTL_LED0_BLINK 0x00000080
+
+#define E1000_LEDCTL_MODE_LED_ON 0xE
+#define E1000_LEDCTL_MODE_LED_OFF 0xF
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
+#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+/* Extended desc bits for Linksec and timesync */
+
+/* Transmit Control */
+#define E1000_TCTL_EN 0x00000002 /* enable tx */
+#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
+#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
+#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
+#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
+
+/* DMA Coalescing register fields */
+#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing
+ * Watchdog Timer */
+#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive
+ * Threshold */
+#define E1000_DMACR_DMACTHR_SHIFT 16
+#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe
+ * transactions */
+#define E1000_DMACR_DMAC_LX_SHIFT 28
+#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
+
+#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit
+ * Threshold */
+
+#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
+
+#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate
+ * Threshold */
+#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in
+ * current window */
+
+#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic
+ * Current Cnt */
+
+#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold
+ * High val */
+#define E1000_FCRTC_RTH_COAL_SHIFT 4
+#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */
+
+/* SerDes Control */
+#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
+#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+
+/* Header split receive */
+#define E1000_RFCTL_LEF 0x00040000
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD 15
+#define E1000_CT_SHIFT 4
+#define E1000_COLLISION_DISTANCE 63
+#define E1000_COLD_SHIFT 12
+
+/* Ethertype field values */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
+
+#define MAX_JUMBO_FRAME_SIZE 0x3F00
+
+/* PBA constants */
+#define E1000_PBA_34K 0x0022
+#define E1000_PBA_64K 0x0040 /* 64KB */
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
+#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
+#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
+#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
+#define E1000_ICR_VMMB 0x00000100 /* VM MB event */
+#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */
+/* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_INT_ASSERTED 0x80000000
+/* LAN connected device generates an interrupt */
+#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
+
+/* Extended Interrupt Cause Read */
+#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
+#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */
+#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */
+#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */
+#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */
+#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */
+#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */
+#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */
+#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
+/* TCP Timer */
+
+/*
+ * This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register. Each bit is documented below:
+ * o RXT0 = Receiver Timer Interrupt (ring 0)
+ * o TXDW = Transmit Descriptor Written Back
+ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ * o RXSEQ = Receive Sequence Error
+ * o LSC = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+ E1000_IMS_RXT0 | \
+ E1000_IMS_TXDW | \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ | \
+ E1000_IMS_LSC | \
+ E1000_IMS_DOUTSYNC)
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */
+#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
+#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
+#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */
+#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
+
+/* Extended Interrupt Mask Set */
+#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */
+
+/* Extended Interrupt Cause Set */
+
+/* Transmit Descriptor Control */
+/* Enable the counting of descriptors still to be processed. */
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE 0x8808
+
+/* 802.1q VLAN Packet Size */
+#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
+
+/* Receive Address */
+/*
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * Technically, we have 16 spots. However, we reserve one of these spots
+ * (RAR[15]) for our directed address used by controllers with
+ * manageability enabled, allowing us room for 15 multicast addresses.
+ */
+#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
+#define E1000_RAL_MAC_ADDR_LEN 4
+#define E1000_RAH_MAC_ADDR_LEN 2
+#define E1000_RAH_POOL_MASK 0x03FC0000
+#define E1000_RAH_POOL_1 0x00040000
+
+/* Error Codes */
+#define E1000_SUCCESS 0
+#define E1000_ERR_NVM 1
+#define E1000_ERR_PHY 2
+#define E1000_ERR_CONFIG 3
+#define E1000_ERR_PARAM 4
+#define E1000_ERR_MAC_INIT 5
+#define E1000_ERR_RESET 9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_BLK_PHY_RESET 12
+#define E1000_ERR_SWFW_SYNC 13
+#define E1000_NOT_IMPLEMENTED 14
+#define E1000_ERR_MBX 15
+#define E1000_ERR_INVALID_ARGUMENT 16
+#define E1000_ERR_NO_SPACE 17
+#define E1000_ERR_NVM_PBA_SECTION 18
+
+/* Loop limit on how long we wait for auto-negotiation to complete */
+#define COPPER_LINK_UP_LIMIT 10
+#define PHY_AUTO_NEG_LIMIT 45
+#define PHY_FORCE_LIMIT 20
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT 800
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT 100
+/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
+/* Number of milliseconds for NVM auto read done after MAC reset. */
+#define AUTO_READ_DONE_TIMEOUT 10
+
+/* Flow Control */
+#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
+
+#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */
+#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */
+
+#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */
+#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */
+#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
+#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
+#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
+#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
+#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */
+
+#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
+#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
+#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
+#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
+
+#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00
+#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300
+#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00
+#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00
+#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00
+#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00
+
+#define E1000_TIMINCA_16NS_SHIFT 24
+
+#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
+#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
+#define E1000_MDICNFG_PHY_MASK 0x03E00000
+#define E1000_MDICNFG_PHY_SHIFT 21
+
+/* PCI Express Control */
+#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
+#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
+#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000
+#define E1000_GCR_CAP_VER2 0x00040000
+
+/* mPHY Address Control and Data Registers */
+#define E1000_MPHY_ADDR_CTL 0x0024 /* mPHY Address Control Register */
+#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000
+#define E1000_MPHY_DATA 0x0E10 /* mPHY Data Register */
+
+/* mPHY PCS CLK Register */
+#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* mPHY PCS CLK AFE CSR Offset */
+/* mPHY Near End Digital Loopback Override Bit */
+#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10
+
+/* PHY Control Register */
+#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_POWER_DOWN 0x0800 /* Power down */
+#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
+#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_1000 0x0040
+#define MII_CR_SPEED_100 0x2000
+#define MII_CR_SPEED_10 0x0000
+
+/* PHY Status Register */
+#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
+#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
+#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
+
+/* Autoneg Expansion Register */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
+#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
+ /* 0=Configure PHY as Slave */
+#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
+ /* 0=Automatic Master/Slave config */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
+
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CONTROL 0x00 /* Control Register */
+#define PHY_STATUS 0x01 /* Status Register */
+#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
+#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+
+/* NVM Control */
+#define E1000_EECD_SK 0x00000001 /* NVM Clock */
+#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
+#define E1000_EECD_DI 0x00000004 /* NVM Data In */
+#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
+#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
+#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
+#define E1000_EECD_PRES 0x00000100 /* NVM Present */
+/* NVM Addressing bits based on type 0=small, 1=large */
+#define E1000_EECD_ADDR_BITS 0x00000400
+#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
+#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
+#define E1000_EECD_SIZE_EX_SHIFT 11
+
+/* Offset to data in NVM read/write registers */
+#define E1000_NVM_RW_REG_DATA 16
+#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
+#define E1000_NVM_RW_REG_START 1 /* Start operation */
+#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
+
+/* NVM Word Offsets */
+#define NVM_COMPAT 0x0003
+#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */
+#define NVM_INIT_CONTROL2_REG 0x000F
+#define NVM_INIT_CONTROL3_PORT_B 0x0014
+#define NVM_INIT_CONTROL3_PORT_A 0x0024
+#define NVM_ALT_MAC_ADDR_PTR 0x0037
+#define NVM_CHECKSUM_REG 0x003F
+#define NVM_COMPATIBILITY_REG_3 0x0003
+#define NVM_COMPATIBILITY_BIT_MASK 0x8000
+
+#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
+#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
+#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */
+#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */
+
+#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0)
+
+/* Mask bits for fields in Word 0x24 of the NVM */
+#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */
+#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */
+
+/* Mask bits for fields in Word 0x0f of the NVM */
+#define NVM_WORD0F_PAUSE_MASK 0x3000
+#define NVM_WORD0F_ASM_DIR 0x2000
+
+/* Mask bits for fields in Word 0x1a of the NVM */
+
+/* length of string needed to store part num */
+#define E1000_PBANUM_LENGTH 11
+
+/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
+#define NVM_SUM 0xBABA
+
+#define NVM_PBA_OFFSET_0 8
+#define NVM_PBA_OFFSET_1 9
+#define NVM_PBA_PTR_GUARD 0xFAFA
+#define NVM_WORD_SIZE_BASE_SHIFT 6
+
+/* NVM Commands - Microwire */
+
+/* NVM Commands - SPI */
+#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
+#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
+#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
+#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
+#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
+#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
+
+/* SPI NVM Status Register */
+#define NVM_STATUS_RDY_SPI 0x01
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
+ (ID_LED_OFF1_OFF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2 0x1
+#define ID_LED_DEF1_ON2 0x2
+#define ID_LED_DEF1_OFF2 0x3
+#define ID_LED_ON1_DEF2 0x4
+#define ID_LED_ON1_ON2 0x5
+#define ID_LED_ON1_OFF2 0x6
+#define ID_LED_OFF1_DEF2 0x7
+#define ID_LED_OFF1_ON2 0x8
+#define ID_LED_OFF1_OFF2 0x9
+
+#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE 0x07000000
+
+/* PCI/PCI-X/PCI-EX Config space */
+#define PCIE_DEVICE_CONTROL2 0x28
+#define PCIE_DEVICE_CONTROL2_16ms 0x0005
+
+#define PHY_REVISION_MASK 0xFFFFFFF0
+#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF
+
+/* Bit definitions for valid PHY IDs. */
+/*
+ * I = Integrated
+ * E = External
+ */
+#define M88E1111_I_PHY_ID 0x01410CC0
+#define M88E1112_E_PHY_ID 0x01410C90
+#define I347AT4_E_PHY_ID 0x01410DC0
+#define IGP03E1000_E_PHY_ID 0x02A80390
+#define I82580_I_PHY_ID 0x015403A0
+#define I350_I_PHY_ID 0x015403B0
+#define M88_VENDOR 0x0141
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
+
+#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+/* 1=CLK125 low, 0=CLK125 toggling */
+#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
+ /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
+/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
+#define M88E1000_PSCR_AUTO_X_1000T 0x0040
+/* Auto crossover enabled all speeds */
+#define M88E1000_PSCR_AUTO_X_MODE 0x0060
+/*
+ * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
+ * 0=Normal 10BASE-T Rx Threshold
+ */
+/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
+/*
+ * 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-110M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+#define M88E1000_PSSR_CABLE_LENGTH 0x0380
+#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/* M88E1000 Extended PHY Specific Control Register */
+/*
+ * 1 = Lost lock detect enabled.
+ * Will assert lost lock and bring
+ * link down if idle not seen
+ * within 1ms in 1000BASE-T
+ */
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave
+ */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
+#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
+
+/* Intel i347-AT4 Registers */
+
+#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */
+#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */
+#define I347AT4_PAGE_SELECT 0x16
+
+/* i347-AT4 Extended PHY Specific Control Register */
+
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
+#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000
+#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000
+#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000
+#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000
+#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000
+#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000
+#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000
+#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000
+#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000
+
+/* i347-AT4 PHY Cable Diagnostics Control */
+#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */
+
+/* Marvell 1112 only registers */
+#define M88E1112_VCT_DSP_DISTANCE 0x001A
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
+
+/* MDI Control */
+#define E1000_MDIC_DATA_MASK 0x0000FFFF
+#define E1000_MDIC_REG_MASK 0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK 0x03E00000
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE 0x04000000
+#define E1000_MDIC_OP_READ 0x08000000
+#define E1000_MDIC_READY 0x10000000
+#define E1000_MDIC_INT_EN 0x20000000
+#define E1000_MDIC_ERROR 0x40000000
+#define E1000_MDIC_DEST 0x80000000
+
+/* Thermal Sensor */
+#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */
+#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */
+
+/* Energy Efficient Ethernet */
+#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */
+#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */
+#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */
+#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */
+#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
+
+/* SerDes Control */
+#define E1000_GEN_CTL_READY 0x80000000
+#define E1000_GEN_CTL_ADDRESS_SHIFT 8
+#define E1000_GEN_POLL_TIMEOUT 640
+
+#define E1000_VFTA_ENTRY_SHIFT 5
+#define E1000_VFTA_ENTRY_MASK 0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
+
+/* DMA Coalescing register fields */
+#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based
+ on DMA coal */
+
+/* Tx Rate-Scheduler Config fields */
+#define E1000_RTTBCNRC_RS_ENA 0x80000000
+#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF
+#define E1000_RTTBCNRC_RF_INT_SHIFT 14
+#define E1000_RTTBCNRC_RF_INT_MASK \
+ (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
+
+#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
new file mode 100644
index 000000000000..4519a1367170
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -0,0 +1,529 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+
+#include "e1000_regs.h"
+#include "e1000_defines.h"
+
+struct e1000_hw;
+
+#define E1000_DEV_ID_82576 0x10C9
+#define E1000_DEV_ID_82576_FIBER 0x10E6
+#define E1000_DEV_ID_82576_SERDES 0x10E7
+#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
+#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526
+#define E1000_DEV_ID_82576_NS 0x150A
+#define E1000_DEV_ID_82576_NS_SERDES 0x1518
+#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
+#define E1000_DEV_ID_82575EB_COPPER 0x10A7
+#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
+#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
+#define E1000_DEV_ID_82580_COPPER 0x150E
+#define E1000_DEV_ID_82580_FIBER 0x150F
+#define E1000_DEV_ID_82580_SERDES 0x1510
+#define E1000_DEV_ID_82580_SGMII 0x1511
+#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
+#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
+#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
+#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
+#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
+#define E1000_DEV_ID_DH89XXCC_SFP 0x0440
+#define E1000_DEV_ID_I350_COPPER 0x1521
+#define E1000_DEV_ID_I350_FIBER 0x1522
+#define E1000_DEV_ID_I350_SERDES 0x1523
+#define E1000_DEV_ID_I350_SGMII 0x1524
+
+#define E1000_REVISION_2 2
+#define E1000_REVISION_4 4
+
+#define E1000_FUNC_0 0
+#define E1000_FUNC_1 1
+#define E1000_FUNC_2 2
+#define E1000_FUNC_3 3
+
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9
+
+enum e1000_mac_type {
+ e1000_undefined = 0,
+ e1000_82575,
+ e1000_82576,
+ e1000_82580,
+ e1000_i350,
+ e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
+};
+
+enum e1000_media_type {
+ e1000_media_type_unknown = 0,
+ e1000_media_type_copper = 1,
+ e1000_media_type_internal_serdes = 2,
+ e1000_num_media_types
+};
+
+enum e1000_nvm_type {
+ e1000_nvm_unknown = 0,
+ e1000_nvm_none,
+ e1000_nvm_eeprom_spi,
+ e1000_nvm_flash_hw,
+ e1000_nvm_flash_sw
+};
+
+enum e1000_nvm_override {
+ e1000_nvm_override_none = 0,
+ e1000_nvm_override_spi_small,
+ e1000_nvm_override_spi_large,
+};
+
+enum e1000_phy_type {
+ e1000_phy_unknown = 0,
+ e1000_phy_none,
+ e1000_phy_m88,
+ e1000_phy_igp,
+ e1000_phy_igp_2,
+ e1000_phy_gg82563,
+ e1000_phy_igp_3,
+ e1000_phy_ife,
+ e1000_phy_82580,
+};
+
+enum e1000_bus_type {
+ e1000_bus_type_unknown = 0,
+ e1000_bus_type_pci,
+ e1000_bus_type_pcix,
+ e1000_bus_type_pci_express,
+ e1000_bus_type_reserved
+};
+
+enum e1000_bus_speed {
+ e1000_bus_speed_unknown = 0,
+ e1000_bus_speed_33,
+ e1000_bus_speed_66,
+ e1000_bus_speed_100,
+ e1000_bus_speed_120,
+ e1000_bus_speed_133,
+ e1000_bus_speed_2500,
+ e1000_bus_speed_5000,
+ e1000_bus_speed_reserved
+};
+
+enum e1000_bus_width {
+ e1000_bus_width_unknown = 0,
+ e1000_bus_width_pcie_x1,
+ e1000_bus_width_pcie_x2,
+ e1000_bus_width_pcie_x4 = 4,
+ e1000_bus_width_pcie_x8 = 8,
+ e1000_bus_width_32,
+ e1000_bus_width_64,
+ e1000_bus_width_reserved
+};
+
+enum e1000_1000t_rx_status {
+ e1000_1000t_rx_status_not_ok = 0,
+ e1000_1000t_rx_status_ok,
+ e1000_1000t_rx_status_undefined = 0xFF
+};
+
+enum e1000_rev_polarity {
+ e1000_rev_polarity_normal = 0,
+ e1000_rev_polarity_reversed,
+ e1000_rev_polarity_undefined = 0xFF
+};
+
+enum e1000_fc_mode {
+ e1000_fc_none = 0,
+ e1000_fc_rx_pause,
+ e1000_fc_tx_pause,
+ e1000_fc_full,
+ e1000_fc_default = 0xFF
+};
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+ u64 crcerrs;
+ u64 algnerrc;
+ u64 symerrs;
+ u64 rxerrc;
+ u64 mpc;
+ u64 scc;
+ u64 ecol;
+ u64 mcc;
+ u64 latecol;
+ u64 colc;
+ u64 dc;
+ u64 tncrs;
+ u64 sec;
+ u64 cexterr;
+ u64 rlec;
+ u64 xonrxc;
+ u64 xontxc;
+ u64 xoffrxc;
+ u64 xofftxc;
+ u64 fcruc;
+ u64 prc64;
+ u64 prc127;
+ u64 prc255;
+ u64 prc511;
+ u64 prc1023;
+ u64 prc1522;
+ u64 gprc;
+ u64 bprc;
+ u64 mprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 rnbc;
+ u64 ruc;
+ u64 rfc;
+ u64 roc;
+ u64 rjc;
+ u64 mgprc;
+ u64 mgpdc;
+ u64 mgptc;
+ u64 tor;
+ u64 tot;
+ u64 tpr;
+ u64 tpt;
+ u64 ptc64;
+ u64 ptc127;
+ u64 ptc255;
+ u64 ptc511;
+ u64 ptc1023;
+ u64 ptc1522;
+ u64 mptc;
+ u64 bptc;
+ u64 tsctc;
+ u64 tsctfc;
+ u64 iac;
+ u64 icrxptc;
+ u64 icrxatc;
+ u64 ictxptc;
+ u64 ictxatc;
+ u64 ictxqec;
+ u64 ictxqmtc;
+ u64 icrxdmtc;
+ u64 icrxoc;
+ u64 cbtmpc;
+ u64 htdpmc;
+ u64 cbrdpc;
+ u64 cbrmpc;
+ u64 rpthc;
+ u64 hgptc;
+ u64 htcbdpc;
+ u64 hgorc;
+ u64 hgotc;
+ u64 lenerrs;
+ u64 scvpc;
+ u64 hrmpc;
+ u64 doosync;
+ u64 o2bgptc;
+ u64 o2bspc;
+ u64 b2ospc;
+ u64 b2ogprc;
+};
+
+struct e1000_phy_stats {
+ u32 idle_errors;
+ u32 receive_errors;
+};
+
+struct e1000_host_mng_dhcp_cookie {
+ u32 signature;
+ u8 status;
+ u8 reserved0;
+ u16 vlan_id;
+ u32 reserved1;
+ u16 reserved2;
+ u8 reserved3;
+ u8 checksum;
+};
+
+/* Host Interface "Rev 1" */
+struct e1000_host_command_header {
+ u8 command_id;
+ u8 command_length;
+ u8 command_options;
+ u8 checksum;
+};
+
+#define E1000_HI_MAX_DATA_LENGTH 252
+struct e1000_host_command_info {
+ struct e1000_host_command_header command_header;
+ u8 command_data[E1000_HI_MAX_DATA_LENGTH];
+};
+
+/* Host Interface "Rev 2" */
+struct e1000_host_mng_command_header {
+ u8 command_id;
+ u8 checksum;
+ u16 reserved1;
+ u16 reserved2;
+ u16 command_length;
+};
+
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
+struct e1000_host_mng_command_info {
+ struct e1000_host_mng_command_header command_header;
+ u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
+};
+
+#include "e1000_mac.h"
+#include "e1000_phy.h"
+#include "e1000_nvm.h"
+#include "e1000_mbx.h"
+
+struct e1000_mac_operations {
+ s32 (*check_for_link)(struct e1000_hw *);
+ s32 (*reset_hw)(struct e1000_hw *);
+ s32 (*init_hw)(struct e1000_hw *);
+ bool (*check_mng_mode)(struct e1000_hw *);
+ s32 (*setup_physical_interface)(struct e1000_hw *);
+ void (*rar_set)(struct e1000_hw *, u8 *, u32);
+ s32 (*read_mac_addr)(struct e1000_hw *);
+ s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
+};
+
+struct e1000_phy_operations {
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*check_polarity)(struct e1000_hw *);
+ s32 (*check_reset_block)(struct e1000_hw *);
+ s32 (*force_speed_duplex)(struct e1000_hw *);
+ s32 (*get_cfg_done)(struct e1000_hw *hw);
+ s32 (*get_cable_length)(struct e1000_hw *);
+ s32 (*get_phy_info)(struct e1000_hw *);
+ s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
+ void (*release)(struct e1000_hw *);
+ s32 (*reset)(struct e1000_hw *);
+ s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
+ s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
+ s32 (*write_reg)(struct e1000_hw *, u32, u16);
+};
+
+struct e1000_nvm_operations {
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
+ void (*release)(struct e1000_hw *);
+ s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
+ s32 (*update)(struct e1000_hw *);
+ s32 (*validate)(struct e1000_hw *);
+};
+
+struct e1000_info {
+ s32 (*get_invariants)(struct e1000_hw *);
+ struct e1000_mac_operations *mac_ops;
+ struct e1000_phy_operations *phy_ops;
+ struct e1000_nvm_operations *nvm_ops;
+};
+
+extern const struct e1000_info e1000_82575_info;
+
+struct e1000_mac_info {
+ struct e1000_mac_operations ops;
+
+ u8 addr[6];
+ u8 perm_addr[6];
+
+ enum e1000_mac_type type;
+
+ u32 ledctl_default;
+ u32 ledctl_mode1;
+ u32 ledctl_mode2;
+ u32 mc_filter_type;
+ u32 txcw;
+
+ u16 mta_reg_count;
+ u16 uta_reg_count;
+
+ /* Maximum size of the MTA register table in all supported adapters */
+ #define MAX_MTA_REG 128
+ u32 mta_shadow[MAX_MTA_REG];
+ u16 rar_entry_count;
+
+ u8 forced_speed_duplex;
+
+ bool adaptive_ifs;
+ bool arc_subsystem_valid;
+ bool asf_firmware_present;
+ bool autoneg;
+ bool autoneg_failed;
+ bool disable_hw_init_bits;
+ bool get_link_status;
+ bool ifs_params_forced;
+ bool in_ifs_mode;
+ bool report_tx_early;
+ bool serdes_has_link;
+ bool tx_pkt_filtering;
+};
+
+struct e1000_phy_info {
+ struct e1000_phy_operations ops;
+
+ enum e1000_phy_type type;
+
+ enum e1000_1000t_rx_status local_rx;
+ enum e1000_1000t_rx_status remote_rx;
+ enum e1000_ms_type ms_type;
+ enum e1000_ms_type original_ms_type;
+ enum e1000_rev_polarity cable_polarity;
+ enum e1000_smart_speed smart_speed;
+
+ u32 addr;
+ u32 id;
+ u32 reset_delay_us; /* in usec */
+ u32 revision;
+
+ enum e1000_media_type media_type;
+
+ u16 autoneg_advertised;
+ u16 autoneg_mask;
+ u16 cable_length;
+ u16 max_cable_length;
+ u16 min_cable_length;
+
+ u8 mdix;
+
+ bool disable_polarity_correction;
+ bool is_mdix;
+ bool polarity_correction;
+ bool reset_disable;
+ bool speed_downgraded;
+ bool autoneg_wait_to_complete;
+};
+
+struct e1000_nvm_info {
+ struct e1000_nvm_operations ops;
+ enum e1000_nvm_type type;
+ enum e1000_nvm_override override;
+
+ u32 flash_bank_size;
+ u32 flash_base_addr;
+
+ u16 word_size;
+ u16 delay_usec;
+ u16 address_bits;
+ u16 opcode_bits;
+ u16 page_size;
+};
+
+struct e1000_bus_info {
+ enum e1000_bus_type type;
+ enum e1000_bus_speed speed;
+ enum e1000_bus_width width;
+
+ u32 snoop;
+
+ u16 func;
+ u16 pci_cmd_word;
+};
+
+struct e1000_fc_info {
+ u32 high_water; /* Flow control high-water mark */
+ u32 low_water; /* Flow control low-water mark */
+ u16 pause_time; /* Flow control pause timer */
+ bool send_xon; /* Flow control send XON */
+ bool strict_ieee; /* Strict IEEE mode */
+ enum e1000_fc_mode current_mode; /* Type of flow control */
+ enum e1000_fc_mode requested_mode;
+};
+
+struct e1000_mbx_operations {
+ s32 (*init_params)(struct e1000_hw *hw);
+ s32 (*read)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*check_for_msg)(struct e1000_hw *, u16);
+ s32 (*check_for_ack)(struct e1000_hw *, u16);
+ s32 (*check_for_rst)(struct e1000_hw *, u16);
+};
+
+struct e1000_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct e1000_mbx_info {
+ struct e1000_mbx_operations ops;
+ struct e1000_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+ u16 size;
+};
+
+struct e1000_dev_spec_82575 {
+ bool sgmii_active;
+ bool global_device_reset;
+ bool eee_disable;
+};
+
+struct e1000_hw {
+ void *back;
+
+ u8 __iomem *hw_addr;
+ u8 __iomem *flash_address;
+ unsigned long io_base;
+
+ struct e1000_mac_info mac;
+ struct e1000_fc_info fc;
+ struct e1000_phy_info phy;
+ struct e1000_nvm_info nvm;
+ struct e1000_bus_info bus;
+ struct e1000_mbx_info mbx;
+ struct e1000_host_mng_dhcp_cookie mng_cookie;
+
+ union {
+ struct e1000_dev_spec_82575 _82575;
+ } dev_spec;
+
+ u16 device_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_device_id;
+ u16 vendor_id;
+
+ u8 revision_id;
+};
+
+extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
+#define hw_dbg(format, arg...) \
+ netdev_dbg(igb_get_hw_dev(hw), format, ##arg)
+
+/* These functions must be implemented by drivers */
+s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
new file mode 100644
index 000000000000..872119d91afd
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -0,0 +1,1426 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/if_ether.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "e1000_mac.h"
+
+#include "igb.h"
+
+static s32 igb_set_default_fc(struct e1000_hw *hw);
+static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
+
+/**
+ * igb_get_bus_info_pcie - Get PCIe bus information
+ * @hw: pointer to the HW structure
+ *
+ * Determines and stores the system bus information for a particular
+ * network interface. The following bus information is determined and stored:
+ * bus speed, bus width, type (PCIe), and PCIe function.
+ **/
+s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+ s32 ret_val;
+ u32 reg;
+ u16 pcie_link_status;
+
+ bus->type = e1000_bus_type_pci_express;
+
+ ret_val = igb_read_pcie_cap_reg(hw,
+ PCI_EXP_LNKSTA,
+ &pcie_link_status);
+ if (ret_val) {
+ bus->width = e1000_bus_width_unknown;
+ bus->speed = e1000_bus_speed_unknown;
+ } else {
+ switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
+ case PCI_EXP_LNKSTA_CLS_2_5GB:
+ bus->speed = e1000_bus_speed_2500;
+ break;
+ case PCI_EXP_LNKSTA_CLS_5_0GB:
+ bus->speed = e1000_bus_speed_5000;
+ break;
+ default:
+ bus->speed = e1000_bus_speed_unknown;
+ break;
+ }
+
+ bus->width = (enum e1000_bus_width)((pcie_link_status &
+ PCI_EXP_LNKSTA_NLW) >>
+ PCI_EXP_LNKSTA_NLW_SHIFT);
+ }
+
+ reg = rd32(E1000_STATUS);
+ bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+
+ return 0;
+}
+
+/**
+ * igb_clear_vfta - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * Clears the register array which contains the VLAN filter table by
+ * setting all the values to 0.
+ **/
+void igb_clear_vfta(struct e1000_hw *hw)
+{
+ u32 offset;
+
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ array_wr32(E1000_VFTA, offset, 0);
+ wrfl();
+ }
+}
+
+/**
+ * igb_write_vfta - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: register offset in VLAN filter table
+ * @value: register value written to VLAN filter table
+ *
+ * Writes value at the given offset in the register array which stores
+ * the VLAN filter table.
+ **/
+static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ array_wr32(E1000_VFTA, offset, value);
+ wrfl();
+}
+
+/**
+ * igb_init_rx_addrs - Initialize receive address's
+ * @hw: pointer to the HW structure
+ * @rar_count: receive address registers
+ *
+ * Setups the receive address registers by setting the base receive address
+ * register to the devices MAC address and clearing all the other receive
+ * address registers to 0.
+ **/
+void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
+{
+ u32 i;
+ u8 mac_addr[ETH_ALEN] = {0};
+
+ /* Setup the receive address */
+ hw_dbg("Programming MAC Address into RAR[0]\n");
+
+ hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
+
+ /* Zero out the other (rar_entry_count - 1) receive addresses */
+ hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
+ for (i = 1; i < rar_count; i++)
+ hw->mac.ops.rar_set(hw, mac_addr, i);
+}
+
+/**
+ * igb_vfta_set - enable or disable vlan in VLAN filter table
+ * @hw: pointer to the HW structure
+ * @vid: VLAN id to add or remove
+ * @add: if true add filter, if false remove
+ *
+ * Sets or clears a bit in the VLAN filter table array based on VLAN id
+ * and if we are adding or removing the filter
+ **/
+s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
+{
+ u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
+ u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+ u32 vfta = array_rd32(E1000_VFTA, index);
+ s32 ret_val = 0;
+
+ /* bit was set/cleared before we started */
+ if ((!!(vfta & mask)) == add) {
+ ret_val = -E1000_ERR_CONFIG;
+ } else {
+ if (add)
+ vfta |= mask;
+ else
+ vfta &= ~mask;
+ }
+
+ igb_write_vfta(hw, index, vfta);
+
+ return ret_val;
+}
+
+/**
+ * igb_check_alt_mac_addr - Check for alternate MAC addr
+ * @hw: pointer to the HW structure
+ *
+ * Checks the nvm for an alternate MAC address. An alternate MAC address
+ * can be setup by pre-boot software and must be treated like a permanent
+ * address and must override the actual permanent MAC address. If an
+ * alternate MAC address is fopund it is saved in the hw struct and
+ * prgrammed into RAR0 and the cuntion returns success, otherwise the
+ * function returns an error.
+ **/
+s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
+{
+ u32 i;
+ s32 ret_val = 0;
+ u16 offset, nvm_alt_mac_addr_offset, nvm_data;
+ u8 alt_mac_addr[ETH_ALEN];
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+ &nvm_alt_mac_addr_offset);
+ if (ret_val) {
+ hw_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
+ (nvm_alt_mac_addr_offset == 0x0000))
+ /* There is no Alternate MAC Address */
+ goto out;
+
+ if (hw->bus.func == E1000_FUNC_1)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
+ if (hw->bus.func == E1000_FUNC_2)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
+
+ if (hw->bus.func == E1000_FUNC_3)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
+ for (i = 0; i < ETH_ALEN; i += 2) {
+ offset = nvm_alt_mac_addr_offset + (i >> 1);
+ ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
+ alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
+ }
+
+ /* if multicast bit is set, the alternate address will not be used */
+ if (is_multicast_ether_addr(alt_mac_addr)) {
+ hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
+ goto out;
+ }
+
+ /*
+ * We have a valid alternate MAC address, and we want to treat it the
+ * same as the normal permanent MAC address stored by the HW into the
+ * RAR. Do this by mapping this address into RAR0.
+ */
+ hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_rar_set - Set receive address register
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index: receive address array register
+ *
+ * Sets the receive address array register at index to the address passed
+ * in by addr.
+ **/
+void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+
+ /*
+ * HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32) addr[0] |
+ ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= E1000_RAH_AV;
+
+ /*
+ * Some bridges will combine consecutive 32-bit writes into
+ * a single burst write, which will malfunction on some parts.
+ * The flushes avoid this.
+ */
+ wr32(E1000_RAL(index), rar_low);
+ wrfl();
+ wr32(E1000_RAH(index), rar_high);
+ wrfl();
+}
+
+/**
+ * igb_mta_set - Set multicast filter table address
+ * @hw: pointer to the HW structure
+ * @hash_value: determines the MTA register and bit to set
+ *
+ * The multicast table address is a register array of 32-bit registers.
+ * The hash_value is used to determine what register the bit is in, the
+ * current value is read, the new bit is OR'd in and the new value is
+ * written back into the register.
+ **/
+void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
+{
+ u32 hash_bit, hash_reg, mta;
+
+ /*
+ * The MTA is a register array of 32-bit registers. It is
+ * treated like an array of (32*mta_reg_count) bits. We want to
+ * set bit BitArray[hash_value]. So we figure out what register
+ * the bit is in, read it, OR in the new bit, then write
+ * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
+ * mask to bits 31:5 of the hash value which gives us the
+ * register we're modifying. The hash bit within that register
+ * is determined by the lower 5 bits of the hash value.
+ */
+ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+ hash_bit = hash_value & 0x1F;
+
+ mta = array_rd32(E1000_MTA, hash_reg);
+
+ mta |= (1 << hash_bit);
+
+ array_wr32(E1000_MTA, hash_reg, mta);
+ wrfl();
+}
+
+/**
+ * igb_hash_mc_addr - Generate a multicast hash value
+ * @hw: pointer to the HW structure
+ * @mc_addr: pointer to a multicast address
+ *
+ * Generates a multicast address hash value which is used to determine
+ * the multicast filter table array address and new table value. See
+ * igb_mta_set()
+ **/
+static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+ u32 hash_value, hash_mask;
+ u8 bit_shift = 0;
+
+ /* Register count multiplied by bits per register */
+ hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+ /*
+ * For a mc_filter_type of 0, bit_shift is the number of left-shifts
+ * where 0xFF would still fall within the hash mask.
+ */
+ while (hash_mask >> bit_shift != 0xFF)
+ bit_shift++;
+
+ /*
+ * The portion of the address that is used for the hash table
+ * is determined by the mc_filter_type setting.
+ * The algorithm is such that there is a total of 8 bits of shifting.
+ * The bit_shift for a mc_filter_type of 0 represents the number of
+ * left-shifts where the MSB of mc_addr[5] would still fall within
+ * the hash_mask. Case 0 does this exactly. Since there are a total
+ * of 8 bits of shifting, then mc_addr[4] will shift right the
+ * remaining number of bits. Thus 8 - bit_shift. The rest of the
+ * cases are a variation of this algorithm...essentially raising the
+ * number of bits to shift mc_addr[5] left, while still keeping the
+ * 8-bit shifting total.
+ *
+ * For example, given the following Destination MAC Address and an
+ * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
+ * we can see that the bit_shift for case 0 is 4. These are the hash
+ * values resulting from each mc_filter_type...
+ * [0] [1] [2] [3] [4] [5]
+ * 01 AA 00 12 34 56
+ * LSB MSB
+ *
+ * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
+ * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
+ * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
+ * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
+ */
+ switch (hw->mac.mc_filter_type) {
+ default:
+ case 0:
+ break;
+ case 1:
+ bit_shift += 1;
+ break;
+ case 2:
+ bit_shift += 2;
+ break;
+ case 3:
+ bit_shift += 4;
+ break;
+ }
+
+ hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+ (((u16) mc_addr[5]) << bit_shift)));
+
+ return hash_value;
+}
+
+/**
+ * igb_update_mc_addr_list - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates entire Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void igb_update_mc_addr_list(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count)
+{
+ u32 hash_value, hash_bit, hash_reg;
+ int i;
+
+ /* clear mta_shadow */
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+
+ /* update mta_shadow from mc_addr_list */
+ for (i = 0; (u32) i < mc_addr_count; i++) {
+ hash_value = igb_hash_mc_addr(hw, mc_addr_list);
+
+ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+ hash_bit = hash_value & 0x1F;
+
+ hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+ mc_addr_list += (ETH_ALEN);
+ }
+
+ /* replace the entire MTA table */
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
+ wrfl();
+}
+
+/**
+ * igb_clear_hw_cntrs_base - Clear base hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the base hardware counters by reading the counter registers.
+ **/
+void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
+{
+ rd32(E1000_CRCERRS);
+ rd32(E1000_SYMERRS);
+ rd32(E1000_MPC);
+ rd32(E1000_SCC);
+ rd32(E1000_ECOL);
+ rd32(E1000_MCC);
+ rd32(E1000_LATECOL);
+ rd32(E1000_COLC);
+ rd32(E1000_DC);
+ rd32(E1000_SEC);
+ rd32(E1000_RLEC);
+ rd32(E1000_XONRXC);
+ rd32(E1000_XONTXC);
+ rd32(E1000_XOFFRXC);
+ rd32(E1000_XOFFTXC);
+ rd32(E1000_FCRUC);
+ rd32(E1000_GPRC);
+ rd32(E1000_BPRC);
+ rd32(E1000_MPRC);
+ rd32(E1000_GPTC);
+ rd32(E1000_GORCL);
+ rd32(E1000_GORCH);
+ rd32(E1000_GOTCL);
+ rd32(E1000_GOTCH);
+ rd32(E1000_RNBC);
+ rd32(E1000_RUC);
+ rd32(E1000_RFC);
+ rd32(E1000_ROC);
+ rd32(E1000_RJC);
+ rd32(E1000_TORL);
+ rd32(E1000_TORH);
+ rd32(E1000_TOTL);
+ rd32(E1000_TOTH);
+ rd32(E1000_TPR);
+ rd32(E1000_TPT);
+ rd32(E1000_MPTC);
+ rd32(E1000_BPTC);
+}
+
+/**
+ * igb_check_for_copper_link - Check for link (Copper)
+ * @hw: pointer to the HW structure
+ *
+ * Checks to see of the link status of the hardware has changed. If a
+ * change in link status has been detected, then we read the PHY registers
+ * to get the current speed/duplex if link exists.
+ **/
+s32 igb_check_for_copper_link(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ bool link;
+
+ /*
+ * We only want to go out to the PHY registers to see if Auto-Neg
+ * has completed and/or if our link status has changed. The
+ * get_link_status flag is set upon receiving a Link Status
+ * Change or Rx Sequence Error interrupt.
+ */
+ if (!mac->get_link_status) {
+ ret_val = 0;
+ goto out;
+ }
+
+ /*
+ * First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+ ret_val = igb_phy_has_link(hw, 1, 0, &link);
+ if (ret_val)
+ goto out;
+
+ if (!link)
+ goto out; /* No link detected */
+
+ mac->get_link_status = false;
+
+ /*
+ * Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
+ igb_check_downshift(hw);
+
+ /*
+ * If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!mac->autoneg) {
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ /*
+ * Auto-Neg is enabled. Auto Speed Detection takes care
+ * of MAC speed/duplex configuration. So we only need to
+ * configure Collision Distance in the MAC.
+ */
+ igb_config_collision_dist(hw);
+
+ /*
+ * Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = igb_config_fc_after_link_up(hw);
+ if (ret_val)
+ hw_dbg("Error configuring flow control\n");
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_setup_link - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Determines which flow control settings to use, then configures flow
+ * control. Calls the appropriate media-specific link configuration
+ * function. Assuming the adapter has a valid link partner, a valid link
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ **/
+s32 igb_setup_link(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ /*
+ * In the case of the phy reset being blocked, we already have a link.
+ * We do not need to set it up again.
+ */
+ if (igb_check_reset_block(hw))
+ goto out;
+
+ /*
+ * If requested flow control is set to default, set flow control
+ * based on the EEPROM flow control settings.
+ */
+ if (hw->fc.requested_mode == e1000_fc_default) {
+ ret_val = igb_set_default_fc(hw);
+ if (ret_val)
+ goto out;
+ }
+
+ /*
+ * We want to save off the original Flow Control configuration just
+ * in case we get disconnected and then reconnected into a different
+ * hub or switch with different Flow Control capabilities.
+ */
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
+
+ /* Call the necessary media_type subroutine to configure the link. */
+ ret_val = hw->mac.ops.setup_physical_interface(hw);
+ if (ret_val)
+ goto out;
+
+ /*
+ * Initialize the flow control address, type, and PAUSE timer
+ * registers to their default values. This is done even if flow
+ * control is disabled, because it does not hurt anything to
+ * initialize these registers.
+ */
+ hw_dbg("Initializing the Flow Control address, type and timer regs\n");
+ wr32(E1000_FCT, FLOW_CONTROL_TYPE);
+ wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+ wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+ wr32(E1000_FCTTV, hw->fc.pause_time);
+
+ ret_val = igb_set_fc_watermarks(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_config_collision_dist - Configure collision distance
+ * @hw: pointer to the HW structure
+ *
+ * Configures the collision distance to the default value and is used
+ * during link setup. Currently no func pointer exists and all
+ * implementations are handled in the generic version of this function.
+ **/
+void igb_config_collision_dist(struct e1000_hw *hw)
+{
+ u32 tctl;
+
+ tctl = rd32(E1000_TCTL);
+
+ tctl &= ~E1000_TCTL_COLD;
+ tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
+
+ wr32(E1000_TCTL, tctl);
+ wrfl();
+}
+
+/**
+ * igb_set_fc_watermarks - Set flow control high/low watermarks
+ * @hw: pointer to the HW structure
+ *
+ * Sets the flow control high/low threshold (watermark) registers. If
+ * flow control XON frame transmission is enabled, then set XON frame
+ * tansmission as well.
+ **/
+static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u32 fcrtl = 0, fcrth = 0;
+
+ /*
+ * Set the flow control receive threshold registers. Normally,
+ * these registers will be set to a default threshold that may be
+ * adjusted later by the driver's runtime code. However, if the
+ * ability to transmit pause frames is not enabled, then these
+ * registers will be set to 0.
+ */
+ if (hw->fc.current_mode & e1000_fc_tx_pause) {
+ /*
+ * We need to set up the Receive Threshold high and low water
+ * marks as well as (optionally) enabling the transmission of
+ * XON frames.
+ */
+ fcrtl = hw->fc.low_water;
+ if (hw->fc.send_xon)
+ fcrtl |= E1000_FCRTL_XONE;
+
+ fcrth = hw->fc.high_water;
+ }
+ wr32(E1000_FCRTL, fcrtl);
+ wr32(E1000_FCRTH, fcrth);
+
+ return ret_val;
+}
+
+/**
+ * igb_set_default_fc - Set flow control default values
+ * @hw: pointer to the HW structure
+ *
+ * Read the EEPROM for the default values for flow control and store the
+ * values.
+ **/
+static s32 igb_set_default_fc(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 nvm_data;
+
+ /*
+ * Read and store word 0x0F of the EEPROM. This word contains bits
+ * that determine the hardware's default PAUSE (flow control) mode,
+ * a bit that determines whether the HW defaults to enabling or
+ * disabling auto-negotiation, and the direction of the
+ * SW defined pins. If there is no SW over-ride of the flow
+ * control setting, then the variable hw->fc will
+ * be initialized based on a value in the EEPROM.
+ */
+ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
+
+ if (ret_val) {
+ hw_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
+ hw->fc.requested_mode = e1000_fc_none;
+ else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
+ NVM_WORD0F_ASM_DIR)
+ hw->fc.requested_mode = e1000_fc_tx_pause;
+ else
+ hw->fc.requested_mode = e1000_fc_full;
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_force_mac_fc - Force the MAC's flow control settings
+ * @hw: pointer to the HW structure
+ *
+ * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
+ * device control register to reflect the adapter settings. TFCE and RFCE
+ * need to be explicitly set by software when a copper PHY is used because
+ * autonegotiation is managed by the PHY rather than the MAC. Software must
+ * also configure these bits when link is forced on a fiber connection.
+ **/
+s32 igb_force_mac_fc(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val = 0;
+
+ ctrl = rd32(E1000_CTRL);
+
+ /*
+ * Because we didn't get link via the internal auto-negotiation
+ * mechanism (we either forced link or we got link via PHY
+ * auto-neg), we have to manually enable/disable transmit an
+ * receive flow control.
+ *
+ * The "Case" statement below enables/disable flow control
+ * according to the "hw->fc.current_mode" parameter.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause
+ * frames but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * frames but we do not receive pause frames).
+ * 3: Both Rx and TX flow control (symmetric) is enabled.
+ * other: No other values should be possible at this point.
+ */
+ hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
+
+ switch (hw->fc.current_mode) {
+ case e1000_fc_none:
+ ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+ break;
+ case e1000_fc_rx_pause:
+ ctrl &= (~E1000_CTRL_TFCE);
+ ctrl |= E1000_CTRL_RFCE;
+ break;
+ case e1000_fc_tx_pause:
+ ctrl &= (~E1000_CTRL_RFCE);
+ ctrl |= E1000_CTRL_TFCE;
+ break;
+ case e1000_fc_full:
+ ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+ break;
+ default:
+ hw_dbg("Flow control param set incorrectly\n");
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ wr32(E1000_CTRL, ctrl);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_config_fc_after_link_up - Configures flow control after link
+ * @hw: pointer to the HW structure
+ *
+ * Checks the status of auto-negotiation after link up to ensure that the
+ * speed and duplex were not forced. If the link needed to be forced, then
+ * flow control needs to be forced also. If auto-negotiation is enabled
+ * and did not fail, then we configure flow control based on our link
+ * partner.
+ **/
+s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val = 0;
+ u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
+ u16 speed, duplex;
+
+ /*
+ * Check for the case where we have fiber media and auto-neg failed
+ * so we had to force link. In this case, we need to force the
+ * configuration of the MAC to match the "fc" parameter.
+ */
+ if (mac->autoneg_failed) {
+ if (hw->phy.media_type == e1000_media_type_internal_serdes)
+ ret_val = igb_force_mac_fc(hw);
+ } else {
+ if (hw->phy.media_type == e1000_media_type_copper)
+ ret_val = igb_force_mac_fc(hw);
+ }
+
+ if (ret_val) {
+ hw_dbg("Error forcing flow control settings\n");
+ goto out;
+ }
+
+ /*
+ * Check for the case where we have copper media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
+ /*
+ * Read the MII Status Register and check to see if AutoNeg
+ * has completed. We read this twice because this reg has
+ * some "sticky" (latched) bits.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
+ &mii_status_reg);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
+ &mii_status_reg);
+ if (ret_val)
+ goto out;
+
+ if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+ hw_dbg("Copper PHY and Auto Neg "
+ "has not completed.\n");
+ goto out;
+ }
+
+ /*
+ * The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (Address 4) and the Auto_Negotiation Base
+ * Page Ability Register (Address 5) to determine how
+ * flow control was negotiated.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
+ &mii_nway_adv_reg);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
+ &mii_nway_lp_ability_reg);
+ if (ret_val)
+ goto out;
+
+ /*
+ * Two bits in the Auto Negotiation Advertisement Register
+ * (Address 4) and two bits in the Auto Negotiation Base
+ * Page Ability Register (Address 5) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | e1000_fc_none
+ * 0 | 1 | 0 | DC | e1000_fc_none
+ * 0 | 1 | 1 | 0 | e1000_fc_none
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ * 1 | 0 | 0 | DC | e1000_fc_none
+ * 1 | DC | 1 | DC | e1000_fc_full
+ * 1 | 1 | 0 | 0 | e1000_fc_none
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | E1000_fc_full
+ *
+ */
+ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+ /*
+ * Now we need to check if the user selected RX ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == e1000_fc_full) {
+ hw->fc.current_mode = e1000_fc_full;
+ hw_dbg("Flow Control = FULL.\r\n");
+ } else {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ hw_dbg("Flow Control = "
+ "RX PAUSE frames only.\r\n");
+ }
+ }
+ /*
+ * For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ */
+ else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_tx_pause;
+ hw_dbg("Flow Control = TX PAUSE frames only.\r\n");
+ }
+ /*
+ * For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ */
+ else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
+ }
+ /*
+ * Per the IEEE spec, at this point flow control should be
+ * disabled. However, we want to consider that we could
+ * be connected to a legacy switch that doesn't advertise
+ * desired flow control, but can be forced on the link
+ * partner. So if we advertised no flow control, that is
+ * what we will resolve to. If we advertised some kind of
+ * receive capability (Rx Pause Only or Full Flow Control)
+ * and the link partner advertised none, we will configure
+ * ourselves to enable Rx Flow Control only. We can do
+ * this safely for two reasons: If the link partner really
+ * didn't want flow control enabled, and we enable Rx, no
+ * harm done since we won't be receiving any PAUSE frames
+ * anyway. If the intent on the link partner was to have
+ * flow control enabled, then by us enabling RX only, we
+ * can at least receive pause frames and process them.
+ * This is a good idea because in most cases, since we are
+ * predominantly a server NIC, more times than not we will
+ * be asked to delay transmission of packets than asking
+ * our link partner to pause transmission of frames.
+ */
+ else if ((hw->fc.requested_mode == e1000_fc_none ||
+ hw->fc.requested_mode == e1000_fc_tx_pause) ||
+ hw->fc.strict_ieee) {
+ hw->fc.current_mode = e1000_fc_none;
+ hw_dbg("Flow Control = NONE.\r\n");
+ } else {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
+ }
+
+ /*
+ * Now we need to do one last check... If we auto-
+ * negotiated to HALF DUPLEX, flow control should not be
+ * enabled per IEEE 802.3 spec.
+ */
+ ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ hw_dbg("Error getting link speed and duplex\n");
+ goto out;
+ }
+
+ if (duplex == HALF_DUPLEX)
+ hw->fc.current_mode = e1000_fc_none;
+
+ /*
+ * Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ ret_val = igb_force_mac_fc(hw);
+ if (ret_val) {
+ hw_dbg("Error forcing flow control settings\n");
+ goto out;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * Read the status register for the current speed/duplex and store the current
+ * speed and duplex for copper connections.
+ **/
+s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ u32 status;
+
+ status = rd32(E1000_STATUS);
+ if (status & E1000_STATUS_SPEED_1000) {
+ *speed = SPEED_1000;
+ hw_dbg("1000 Mbs, ");
+ } else if (status & E1000_STATUS_SPEED_100) {
+ *speed = SPEED_100;
+ hw_dbg("100 Mbs, ");
+ } else {
+ *speed = SPEED_10;
+ hw_dbg("10 Mbs, ");
+ }
+
+ if (status & E1000_STATUS_FD) {
+ *duplex = FULL_DUPLEX;
+ hw_dbg("Full Duplex\n");
+ } else {
+ *duplex = HALF_DUPLEX;
+ hw_dbg("Half Duplex\n");
+ }
+
+ return 0;
+}
+
+/**
+ * igb_get_hw_semaphore - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ **/
+s32 igb_get_hw_semaphore(struct e1000_hw *hw)
+{
+ u32 swsm;
+ s32 ret_val = 0;
+ s32 timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+
+ /* Get the SW semaphore */
+ while (i < timeout) {
+ swsm = rd32(E1000_SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ udelay(50);
+ i++;
+ }
+
+ if (i == timeout) {
+ hw_dbg("Driver can't access device - SMBI bit is set.\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+ /* Get the FW semaphore. */
+ for (i = 0; i < timeout; i++) {
+ swsm = rd32(E1000_SWSM);
+ wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
+ break;
+
+ udelay(50);
+ }
+
+ if (i == timeout) {
+ /* Release semaphores */
+ igb_put_hw_semaphore(hw);
+ hw_dbg("Driver can't access the NVM\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_put_hw_semaphore - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ **/
+void igb_put_hw_semaphore(struct e1000_hw *hw)
+{
+ u32 swsm;
+
+ swsm = rd32(E1000_SWSM);
+
+ swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+
+ wr32(E1000_SWSM, swsm);
+}
+
+/**
+ * igb_get_auto_rd_done - Check for auto read completion
+ * @hw: pointer to the HW structure
+ *
+ * Check EEPROM for Auto Read done bit.
+ **/
+s32 igb_get_auto_rd_done(struct e1000_hw *hw)
+{
+ s32 i = 0;
+ s32 ret_val = 0;
+
+
+ while (i < AUTO_READ_DONE_TIMEOUT) {
+ if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
+ break;
+ msleep(1);
+ i++;
+ }
+
+ if (i == AUTO_READ_DONE_TIMEOUT) {
+ hw_dbg("Auto read by HW from NVM has not completed.\n");
+ ret_val = -E1000_ERR_RESET;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_valid_led_default - Verify a valid default LED config
+ * @hw: pointer to the HW structure
+ * @data: pointer to the NVM (EEPROM)
+ *
+ * Read the EEPROM for the current default LED configuration. If the
+ * LED configuration is not valid, set to a valid LED configuration.
+ **/
+static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ hw_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
+ switch(hw->phy.media_type) {
+ case e1000_media_type_internal_serdes:
+ *data = ID_LED_DEFAULT_82575_SERDES;
+ break;
+ case e1000_media_type_copper:
+ default:
+ *data = ID_LED_DEFAULT;
+ break;
+ }
+ }
+out:
+ return ret_val;
+}
+
+/**
+ * igb_id_led_init -
+ * @hw: pointer to the HW structure
+ *
+ **/
+s32 igb_id_led_init(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ const u32 ledctl_mask = 0x000000FF;
+ const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+ const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+ u16 data, i, temp;
+ const u16 led_mask = 0x0F;
+
+ ret_val = igb_valid_led_default(hw, &data);
+ if (ret_val)
+ goto out;
+
+ mac->ledctl_default = rd32(E1000_LEDCTL);
+ mac->ledctl_mode1 = mac->ledctl_default;
+ mac->ledctl_mode2 = mac->ledctl_default;
+
+ for (i = 0; i < 4; i++) {
+ temp = (data >> (i << 2)) & led_mask;
+ switch (temp) {
+ case ID_LED_ON1_DEF2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_ON1_OFF2:
+ mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode1 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_OFF1_DEF2:
+ case ID_LED_OFF1_ON2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode1 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ switch (temp) {
+ case ID_LED_DEF1_ON2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_OFF1_ON2:
+ mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode2 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_DEF1_OFF2:
+ case ID_LED_ON1_OFF2:
+ case ID_LED_OFF1_OFF2:
+ mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ mac->ledctl_mode2 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_cleanup_led - Set LED config to default operation
+ * @hw: pointer to the HW structure
+ *
+ * Remove the current LED configuration and set the LED configuration
+ * to the default value, saved from the EEPROM.
+ **/
+s32 igb_cleanup_led(struct e1000_hw *hw)
+{
+ wr32(E1000_LEDCTL, hw->mac.ledctl_default);
+ return 0;
+}
+
+/**
+ * igb_blink_led - Blink LED
+ * @hw: pointer to the HW structure
+ *
+ * Blink the led's which are set to be on.
+ **/
+s32 igb_blink_led(struct e1000_hw *hw)
+{
+ u32 ledctl_blink = 0;
+ u32 i;
+
+ /*
+ * set the blink bit for each LED that's "on" (0x0E)
+ * in ledctl_mode2
+ */
+ ledctl_blink = hw->mac.ledctl_mode2;
+ for (i = 0; i < 4; i++)
+ if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
+ E1000_LEDCTL_MODE_LED_ON)
+ ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
+ (i * 8));
+
+ wr32(E1000_LEDCTL, ledctl_blink);
+
+ return 0;
+}
+
+/**
+ * igb_led_off - Turn LED off
+ * @hw: pointer to the HW structure
+ *
+ * Turn LED off.
+ **/
+s32 igb_led_off(struct e1000_hw *hw)
+{
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * igb_disable_pcie_master - Disables PCI-express master access
+ * @hw: pointer to the HW structure
+ *
+ * Returns 0 (0) if successful, else returns -10
+ * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued
+ * the master requests to be disabled.
+ *
+ * Disables PCI-Express master access and verifies there are no pending
+ * requests.
+ **/
+s32 igb_disable_pcie_master(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 timeout = MASTER_DISABLE_TIMEOUT;
+ s32 ret_val = 0;
+
+ if (hw->bus.type != e1000_bus_type_pci_express)
+ goto out;
+
+ ctrl = rd32(E1000_CTRL);
+ ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+ wr32(E1000_CTRL, ctrl);
+
+ while (timeout) {
+ if (!(rd32(E1000_STATUS) &
+ E1000_STATUS_GIO_MASTER_ENABLE))
+ break;
+ udelay(100);
+ timeout--;
+ }
+
+ if (!timeout) {
+ hw_dbg("Master requests are pending.\n");
+ ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_validate_mdi_setting - Verify MDI/MDIx settings
+ * @hw: pointer to the HW structure
+ *
+ * Verify that when not using auto-negotitation that MDI/MDIx is correctly
+ * set, which is forced to MDI mode only.
+ **/
+s32 igb_validate_mdi_setting(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
+ hw_dbg("Invalid MDI setting detected\n");
+ hw->phy.mdix = 1;
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register
+ * @hw: pointer to the HW structure
+ * @reg: 32bit register offset such as E1000_SCTL
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes an address/data control type register. There are several of these
+ * and they all have the format address << 8 | data and bit 31 is polled for
+ * completion.
+ **/
+s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
+ u32 offset, u8 data)
+{
+ u32 i, regvalue = 0;
+ s32 ret_val = 0;
+
+ /* Set up the address and data */
+ regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
+ wr32(reg, regvalue);
+
+ /* Poll the ready bit to see if the MDI read completed */
+ for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
+ udelay(5);
+ regvalue = rd32(reg);
+ if (regvalue & E1000_GEN_CTL_READY)
+ break;
+ }
+ if (!(regvalue & E1000_GEN_CTL_READY)) {
+ hw_dbg("Reg %08x did not indicate ready\n", reg);
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_enable_mng_pass_thru - Enable processing of ARP's
+ * @hw: pointer to the HW structure
+ *
+ * Verifies the hardware needs to leave interface enabled so that frames can
+ * be directed to and from the management interface.
+ **/
+bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+ u32 manc;
+ u32 fwsm, factps;
+ bool ret_val = false;
+
+ if (!hw->mac.asf_firmware_present)
+ goto out;
+
+ manc = rd32(E1000_MANC);
+
+ if (!(manc & E1000_MANC_RCV_TCO_EN))
+ goto out;
+
+ if (hw->mac.arc_subsystem_valid) {
+ fwsm = rd32(E1000_FWSM);
+ factps = rd32(E1000_FACTPS);
+
+ if (!(factps & E1000_FACTPS_MNGCG) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
+ ret_val = true;
+ goto out;
+ }
+ } else {
+ if ((manc & E1000_MANC_SMBUS_EN) &&
+ !(manc & E1000_MANC_ASF_EN)) {
+ ret_val = true;
+ goto out;
+ }
+ }
+
+out:
+ return ret_val;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
new file mode 100644
index 000000000000..4927f61fbbc8
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -0,0 +1,90 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_MAC_H_
+#define _E1000_MAC_H_
+
+#include "e1000_hw.h"
+
+#include "e1000_phy.h"
+#include "e1000_nvm.h"
+#include "e1000_defines.h"
+
+/*
+ * Functions that should not be called directly from drivers but can be used
+ * by other files in this 'shared code'
+ */
+s32 igb_blink_led(struct e1000_hw *hw);
+s32 igb_check_for_copper_link(struct e1000_hw *hw);
+s32 igb_cleanup_led(struct e1000_hw *hw);
+s32 igb_config_fc_after_link_up(struct e1000_hw *hw);
+s32 igb_disable_pcie_master(struct e1000_hw *hw);
+s32 igb_force_mac_fc(struct e1000_hw *hw);
+s32 igb_get_auto_rd_done(struct e1000_hw *hw);
+s32 igb_get_bus_info_pcie(struct e1000_hw *hw);
+s32 igb_get_hw_semaphore(struct e1000_hw *hw);
+s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex);
+s32 igb_id_led_init(struct e1000_hw *hw);
+s32 igb_led_off(struct e1000_hw *hw);
+void igb_update_mc_addr_list(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count);
+s32 igb_setup_link(struct e1000_hw *hw);
+s32 igb_validate_mdi_setting(struct e1000_hw *hw);
+s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
+ u32 offset, u8 data);
+
+void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
+void igb_clear_vfta(struct e1000_hw *hw);
+s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add);
+void igb_config_collision_dist(struct e1000_hw *hw);
+void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
+void igb_mta_set(struct e1000_hw *hw, u32 hash_value);
+void igb_put_hw_semaphore(struct e1000_hw *hw);
+void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
+s32 igb_check_alt_mac_addr(struct e1000_hw *hw);
+
+bool igb_enable_mng_pass_thru(struct e1000_hw *hw);
+
+enum e1000_mng_mode {
+ e1000_mng_mode_none = 0,
+ e1000_mng_mode_asf,
+ e1000_mng_mode_pt,
+ e1000_mng_mode_ipmi,
+ e1000_mng_mode_host_if_only
+};
+
+#define E1000_FACTPS_MNGCG 0x20000000
+
+#define E1000_FWSM_MODE_MASK 0xE
+#define E1000_FWSM_MODE_SHIFT 1
+
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
+
+extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
+
+#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
new file mode 100644
index 000000000000..74f2f11ac290
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -0,0 +1,446 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_mbx.h"
+
+/**
+ * igb_read_mbx - Reads a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfuly read message from buffer
+ **/
+s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ if (mbx->ops.read)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * igb_write_mbx - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = 0;
+
+ if (size > mbx->size)
+ ret_val = -E1000_ERR_MBX;
+
+ else if (mbx->ops.write)
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * igb_check_for_msg - checks to see if someone sent us mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (mbx->ops.check_for_msg)
+ ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * igb_check_for_ack - checks to see if someone sent us ACK
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (mbx->ops.check_for_ack)
+ ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * igb_check_for_rst - checks to see if other side has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (mbx->ops.check_for_rst)
+ ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * igb_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification
+ **/
+static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ if (!countdown || !mbx->ops.check_for_msg)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ udelay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? 0 : -E1000_ERR_MBX;
+}
+
+/**
+ * igb_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message acknowledgement
+ **/
+static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ if (!countdown || !mbx->ops.check_for_ack)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ udelay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? 0 : -E1000_ERR_MBX;
+}
+
+/**
+ * igb_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (!mbx->ops.read)
+ goto out;
+
+ ret_val = igb_poll_for_msg(hw, mbx_id);
+
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * igb_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ /* exit if either we can't write or there isn't a defined timeout */
+ if (!mbx->ops.write || !mbx->timeout)
+ goto out;
+
+ /* send msg */
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = igb_poll_for_ack(hw, mbx_id);
+out:
+ return ret_val;
+}
+
+static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
+{
+ u32 mbvficr = rd32(E1000_MBVFICR);
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (mbvficr & mask) {
+ ret_val = 0;
+ wr32(E1000_MBVFICR, mask);
+ }
+
+ return ret_val;
+}
+
+/**
+ * igb_check_for_msg_pf - checks to see if the VF has sent mail
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
+ ret_val = 0;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * igb_check_for_ack_pf - checks to see if the VF has ACKed
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
+ ret_val = 0;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * igb_check_for_rst_pf - checks to see if the VF has reset
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
+{
+ u32 vflre = rd32(E1000_VFLRE);
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (vflre & (1 << vf_number)) {
+ ret_val = 0;
+ wr32(E1000_VFLRE, (1 << vf_number));
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * igb_obtain_mbx_lock_pf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+ u32 p2v_mailbox;
+
+
+ /* Take ownership of the buffer */
+ wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
+
+ /* reserve mailbox for vf use */
+ p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
+ if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
+ ret_val = 0;
+
+ return ret_val;
+}
+
+/**
+ * igb_write_mbx_pf - Places a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ igb_check_for_msg_pf(hw, vf_number);
+ igb_check_for_ack_pf(hw, vf_number);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ array_wr32(E1000_VMBMEM(vf_number), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer*/
+ wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+ return ret_val;
+
+}
+
+/**
+ * igb_read_mbx_pf - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * This function copies a message from the mailbox buffer to the caller's
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a VF request so no polling for message is needed.
+ **/
+static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = array_rd32(E1000_VMBMEM(vf_number), i);
+
+ /* Acknowledge the message and release buffer */
+ wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * e1000_init_mbx_params_pf - set initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+s32 igb_init_mbx_params_pf(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->size = E1000_VFMAILBOX_SIZE;
+
+ mbx->ops.read = igb_read_mbx_pf;
+ mbx->ops.write = igb_write_mbx_pf;
+ mbx->ops.read_posted = igb_read_posted_mbx;
+ mbx->ops.write_posted = igb_write_posted_mbx;
+ mbx->ops.check_for_msg = igb_check_for_msg_pf;
+ mbx->ops.check_for_ack = igb_check_for_ack_pf;
+ mbx->ops.check_for_rst = igb_check_for_rst_pf;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+
+ return 0;
+}
+
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
new file mode 100644
index 000000000000..eddb0f83dcea
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -0,0 +1,77 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_MBX_H_
+#define _E1000_MBX_H_
+
+#include "e1000_hw.h"
+
+#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
+#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
+#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+
+/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is E1000_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ clear to send requests */
+#define E1000_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for exra info for certain messages */
+#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
+
+#define E1000_VF_RESET 0x01 /* VF requests reset */
+#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
+#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
+#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
+#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */
+#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
+#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
+
+#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 igb_check_for_msg(struct e1000_hw *, u16);
+s32 igb_check_for_ack(struct e1000_hw *, u16);
+s32 igb_check_for_rst(struct e1000_hw *, u16);
+s32 igb_init_mbx_params_pf(struct e1000_hw *);
+
+#endif /* _E1000_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
new file mode 100644
index 000000000000..40407124e722
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -0,0 +1,713 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/if_ether.h>
+#include <linux/delay.h>
+
+#include "e1000_mac.h"
+#include "e1000_nvm.h"
+
+/**
+ * igb_raise_eec_clk - Raise EEPROM clock
+ * @hw: pointer to the HW structure
+ * @eecd: pointer to the EEPROM
+ *
+ * Enable/Raise the EEPROM clock bit.
+ **/
+static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ *eecd = *eecd | E1000_EECD_SK;
+ wr32(E1000_EECD, *eecd);
+ wrfl();
+ udelay(hw->nvm.delay_usec);
+}
+
+/**
+ * igb_lower_eec_clk - Lower EEPROM clock
+ * @hw: pointer to the HW structure
+ * @eecd: pointer to the EEPROM
+ *
+ * Clear/Lower the EEPROM clock bit.
+ **/
+static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+ *eecd = *eecd & ~E1000_EECD_SK;
+ wr32(E1000_EECD, *eecd);
+ wrfl();
+ udelay(hw->nvm.delay_usec);
+}
+
+/**
+ * igb_shift_out_eec_bits - Shift data bits our to the EEPROM
+ * @hw: pointer to the HW structure
+ * @data: data to send to the EEPROM
+ * @count: number of bits to shift out
+ *
+ * We need to shift 'count' bits out to the EEPROM. So, the value in the
+ * "data" parameter will be shifted out to the EEPROM one bit at a time.
+ * In order to do this, "data" must be broken down into bits.
+ **/
+static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = rd32(E1000_EECD);
+ u32 mask;
+
+ mask = 0x01 << (count - 1);
+ if (nvm->type == e1000_nvm_eeprom_spi)
+ eecd |= E1000_EECD_DO;
+
+ do {
+ eecd &= ~E1000_EECD_DI;
+
+ if (data & mask)
+ eecd |= E1000_EECD_DI;
+
+ wr32(E1000_EECD, eecd);
+ wrfl();
+
+ udelay(nvm->delay_usec);
+
+ igb_raise_eec_clk(hw, &eecd);
+ igb_lower_eec_clk(hw, &eecd);
+
+ mask >>= 1;
+ } while (mask);
+
+ eecd &= ~E1000_EECD_DI;
+ wr32(E1000_EECD, eecd);
+}
+
+/**
+ * igb_shift_in_eec_bits - Shift data bits in from the EEPROM
+ * @hw: pointer to the HW structure
+ * @count: number of bits to shift in
+ *
+ * In order to read a register from the EEPROM, we need to shift 'count' bits
+ * in from the EEPROM. Bits are "shifted in" by raising the clock input to
+ * the EEPROM (setting the SK bit), and then reading the value of the data out
+ * "DO" bit. During this "shifting in" process the data in "DI" bit should
+ * always be clear.
+ **/
+static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
+{
+ u32 eecd;
+ u32 i;
+ u16 data;
+
+ eecd = rd32(E1000_EECD);
+
+ eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+ data = 0;
+
+ for (i = 0; i < count; i++) {
+ data <<= 1;
+ igb_raise_eec_clk(hw, &eecd);
+
+ eecd = rd32(E1000_EECD);
+
+ eecd &= ~E1000_EECD_DI;
+ if (eecd & E1000_EECD_DO)
+ data |= 1;
+
+ igb_lower_eec_clk(hw, &eecd);
+ }
+
+ return data;
+}
+
+/**
+ * igb_poll_eerd_eewr_done - Poll for EEPROM read/write completion
+ * @hw: pointer to the HW structure
+ * @ee_reg: EEPROM flag for polling
+ *
+ * Polls the EEPROM status bit for either read or write completion based
+ * upon the value of 'ee_reg'.
+ **/
+static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
+{
+ u32 attempts = 100000;
+ u32 i, reg = 0;
+ s32 ret_val = -E1000_ERR_NVM;
+
+ for (i = 0; i < attempts; i++) {
+ if (ee_reg == E1000_NVM_POLL_READ)
+ reg = rd32(E1000_EERD);
+ else
+ reg = rd32(E1000_EEWR);
+
+ if (reg & E1000_NVM_RW_REG_DONE) {
+ ret_val = 0;
+ break;
+ }
+
+ udelay(5);
+ }
+
+ return ret_val;
+}
+
+/**
+ * igb_acquire_nvm - Generic request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+s32 igb_acquire_nvm(struct e1000_hw *hw)
+{
+ u32 eecd = rd32(E1000_EECD);
+ s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
+ s32 ret_val = 0;
+
+
+ wr32(E1000_EECD, eecd | E1000_EECD_REQ);
+ eecd = rd32(E1000_EECD);
+
+ while (timeout) {
+ if (eecd & E1000_EECD_GNT)
+ break;
+ udelay(5);
+ eecd = rd32(E1000_EECD);
+ timeout--;
+ }
+
+ if (!timeout) {
+ eecd &= ~E1000_EECD_REQ;
+ wr32(E1000_EECD, eecd);
+ hw_dbg("Could not acquire NVM grant\n");
+ ret_val = -E1000_ERR_NVM;
+ }
+
+ return ret_val;
+}
+
+/**
+ * igb_standby_nvm - Return EEPROM to standby state
+ * @hw: pointer to the HW structure
+ *
+ * Return the EEPROM to a standby state.
+ **/
+static void igb_standby_nvm(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = rd32(E1000_EECD);
+
+ if (nvm->type == e1000_nvm_eeprom_spi) {
+ /* Toggle CS to flush commands */
+ eecd |= E1000_EECD_CS;
+ wr32(E1000_EECD, eecd);
+ wrfl();
+ udelay(nvm->delay_usec);
+ eecd &= ~E1000_EECD_CS;
+ wr32(E1000_EECD, eecd);
+ wrfl();
+ udelay(nvm->delay_usec);
+ }
+}
+
+/**
+ * e1000_stop_nvm - Terminate EEPROM command
+ * @hw: pointer to the HW structure
+ *
+ * Terminates the current command by inverting the EEPROM's chip select pin.
+ **/
+static void e1000_stop_nvm(struct e1000_hw *hw)
+{
+ u32 eecd;
+
+ eecd = rd32(E1000_EECD);
+ if (hw->nvm.type == e1000_nvm_eeprom_spi) {
+ /* Pull CS high */
+ eecd |= E1000_EECD_CS;
+ igb_lower_eec_clk(hw, &eecd);
+ }
+}
+
+/**
+ * igb_release_nvm - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+void igb_release_nvm(struct e1000_hw *hw)
+{
+ u32 eecd;
+
+ e1000_stop_nvm(hw);
+
+ eecd = rd32(E1000_EECD);
+ eecd &= ~E1000_EECD_REQ;
+ wr32(E1000_EECD, eecd);
+}
+
+/**
+ * igb_ready_nvm_eeprom - Prepares EEPROM for read/write
+ * @hw: pointer to the HW structure
+ *
+ * Setups the EEPROM for reading and writing.
+ **/
+static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = rd32(E1000_EECD);
+ s32 ret_val = 0;
+ u16 timeout = 0;
+ u8 spi_stat_reg;
+
+
+ if (nvm->type == e1000_nvm_eeprom_spi) {
+ /* Clear SK and CS */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+ wr32(E1000_EECD, eecd);
+ wrfl();
+ udelay(1);
+ timeout = NVM_MAX_RETRY_SPI;
+
+ /*
+ * Read "Status Register" repeatedly until the LSB is cleared.
+ * The EEPROM will signal that the command has been completed
+ * by clearing bit 0 of the internal status register. If it's
+ * not cleared within 'timeout', then error out.
+ */
+ while (timeout) {
+ igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
+ hw->nvm.opcode_bits);
+ spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8);
+ if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
+ break;
+
+ udelay(5);
+ igb_standby_nvm(hw);
+ timeout--;
+ }
+
+ if (!timeout) {
+ hw_dbg("SPI NVM Status error\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_read_nvm_spi - Read EEPROM's using SPI
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM.
+ **/
+s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i = 0;
+ s32 ret_val;
+ u16 word_in;
+ u8 read_opcode = NVM_READ_OPCODE_SPI;
+
+ /*
+ * A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ hw_dbg("nvm parameter(s) out of bounds\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+ ret_val = nvm->ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = igb_ready_nvm_eeprom(hw);
+ if (ret_val)
+ goto release;
+
+ igb_standby_nvm(hw);
+
+ if ((nvm->address_bits == 8) && (offset >= 128))
+ read_opcode |= NVM_A8_OPCODE_SPI;
+
+ /* Send the READ command (opcode + addr) */
+ igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
+ igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
+
+ /*
+ * Read the data. SPI NVMs increment the address with each byte
+ * read and will roll over if reading beyond the end. This allows
+ * us to read the whole NVM from any offset
+ */
+ for (i = 0; i < words; i++) {
+ word_in = igb_shift_in_eec_bits(hw, 16);
+ data[i] = (word_in >> 8) | (word_in << 8);
+ }
+
+release:
+ nvm->ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_read_nvm_eerd - Reads EEPROM using EERD register
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i, eerd = 0;
+ s32 ret_val = 0;
+
+ /*
+ * A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ hw_dbg("nvm parameter(s) out of bounds\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+ for (i = 0; i < words; i++) {
+ eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
+ E1000_NVM_RW_REG_START;
+
+ wr32(E1000_EERD, eerd);
+ ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
+ if (ret_val)
+ break;
+
+ data[i] = (rd32(E1000_EERD) >>
+ E1000_NVM_RW_REG_DATA);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_write_nvm_spi - Write to EEPROM using SPI
+ * @hw: pointer to the HW structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * Writes data to EEPROM at offset using SPI interface.
+ *
+ * If e1000_update_nvm_checksum is not called after this function , the
+ * EEPROM will most likley contain an invalid checksum.
+ **/
+s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ s32 ret_val;
+ u16 widx = 0;
+
+ /*
+ * A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ hw_dbg("nvm parameter(s) out of bounds\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+ ret_val = hw->nvm.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ msleep(10);
+
+ while (widx < words) {
+ u8 write_opcode = NVM_WRITE_OPCODE_SPI;
+
+ ret_val = igb_ready_nvm_eeprom(hw);
+ if (ret_val)
+ goto release;
+
+ igb_standby_nvm(hw);
+
+ /* Send the WRITE ENABLE command (8 bit opcode) */
+ igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
+ nvm->opcode_bits);
+
+ igb_standby_nvm(hw);
+
+ /*
+ * Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+ */
+ if ((nvm->address_bits == 8) && (offset >= 128))
+ write_opcode |= NVM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+ igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
+ igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
+ nvm->address_bits);
+
+ /* Loop to allow for up to whole page write of eeprom */
+ while (widx < words) {
+ u16 word_out = data[widx];
+ word_out = (word_out >> 8) | (word_out << 8);
+ igb_shift_out_eec_bits(hw, word_out, 16);
+ widx++;
+
+ if ((((offset + widx) * 2) % nvm->page_size) == 0) {
+ igb_standby_nvm(hw);
+ break;
+ }
+ }
+ }
+
+ msleep(10);
+release:
+ hw->nvm.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_read_part_string - Read device part number
+ * @hw: pointer to the HW structure
+ * @part_num: pointer to device part number
+ * @part_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+ * the value in part_num.
+ **/
+s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size)
+{
+ s32 ret_val;
+ u16 nvm_data;
+ u16 pointer;
+ u16 offset;
+ u16 length;
+
+ if (part_num == NULL) {
+ hw_dbg("PBA string buffer was null\n");
+ ret_val = E1000_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer);
+ if (ret_val) {
+ hw_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ /*
+ * if nvm_data is not ptr guard the PBA must be in legacy format which
+ * means pointer is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (nvm_data != NVM_PBA_PTR_GUARD) {
+ hw_dbg("NVM PBA number is not stored as string\n");
+
+ /* we will need 11 characters to store the PBA */
+ if (part_num_size < 11) {
+ hw_dbg("PBA string buffer too small\n");
+ return E1000_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pointer */
+ part_num[0] = (nvm_data >> 12) & 0xF;
+ part_num[1] = (nvm_data >> 8) & 0xF;
+ part_num[2] = (nvm_data >> 4) & 0xF;
+ part_num[3] = nvm_data & 0xF;
+ part_num[4] = (pointer >> 12) & 0xF;
+ part_num[5] = (pointer >> 8) & 0xF;
+ part_num[6] = '-';
+ part_num[7] = 0;
+ part_num[8] = (pointer >> 4) & 0xF;
+ part_num[9] = pointer & 0xF;
+
+ /* put a null character on the end of our string */
+ part_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (part_num[offset] < 0xA)
+ part_num[offset] += '0';
+ else if (part_num[offset] < 0x10)
+ part_num[offset] += 'A' - 0xA;
+ }
+
+ goto out;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, pointer, 1, &length);
+ if (ret_val) {
+ hw_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ hw_dbg("NVM PBA number section invalid length\n");
+ ret_val = E1000_ERR_NVM_PBA_SECTION;
+ goto out;
+ }
+ /* check if part_num buffer is big enough */
+ if (part_num_size < (((u32)length * 2) - 1)) {
+ hw_dbg("PBA string buffer too small\n");
+ ret_val = E1000_ERR_NO_SPACE;
+ goto out;
+ }
+
+ /* trim pba length from start of string */
+ pointer++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Read Error\n");
+ goto out;
+ }
+ part_num[offset * 2] = (u8)(nvm_data >> 8);
+ part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+ }
+ part_num[offset * 2] = '\0';
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_read_mac_addr - Read device MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the device MAC address from the EEPROM and stores the value.
+ * Since devices with two ports use the same EEPROM, we increment the
+ * last bit in the MAC address for the second port.
+ **/
+s32 igb_read_mac_addr(struct e1000_hw *hw)
+{
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
+
+ rar_high = rd32(E1000_RAH(0));
+ rar_low = rd32(E1000_RAL(0));
+
+ for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
+
+ for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
+
+ for (i = 0; i < ETH_ALEN; i++)
+ hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+ return 0;
+}
+
+/**
+ * igb_validate_nvm_checksum - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 igb_validate_nvm_checksum(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Read Error\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+
+ if (checksum != (u16) NVM_SUM) {
+ hw_dbg("NVM Checksum Invalid\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_update_nvm_checksum - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM.
+ **/
+s32 igb_update_nvm_checksum(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Read Error while updating checksum.\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16) NVM_SUM - checksum;
+ ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
+ if (ret_val)
+ hw_dbg("NVM Write Error while updating checksum.\n");
+
+out:
+ return ret_val;
+}
+
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
new file mode 100644
index 000000000000..a2a7ca9fa733
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -0,0 +1,43 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_NVM_H_
+#define _E1000_NVM_H_
+
+s32 igb_acquire_nvm(struct e1000_hw *hw);
+void igb_release_nvm(struct e1000_hw *hw);
+s32 igb_read_mac_addr(struct e1000_hw *hw);
+s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
+s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
+ u32 part_num_size);
+s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 igb_validate_nvm_checksum(struct e1000_hw *hw);
+s32 igb_update_nvm_checksum(struct e1000_hw *hw);
+
+#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
new file mode 100644
index 000000000000..7edf31efe756
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -0,0 +1,2347 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/if_ether.h>
+#include <linux/delay.h>
+
+#include "e1000_mac.h"
+#include "e1000_phy.h"
+
+static s32 igb_phy_setup_autoneg(struct e1000_hw *hw);
+static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
+ u16 *phy_ctrl);
+static s32 igb_wait_autoneg(struct e1000_hw *hw);
+
+/* Cable length tables */
+static const u16 e1000_m88_cable_length_table[] =
+ { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
+ (sizeof(e1000_m88_cable_length_table) / \
+ sizeof(e1000_m88_cable_length_table[0]))
+
+static const u16 e1000_igp_2_cable_length_table[] =
+ { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
+ 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
+ 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
+ 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
+ 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
+ 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
+ 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
+ 104, 109, 114, 118, 121, 124};
+#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
+ (sizeof(e1000_igp_2_cable_length_table) / \
+ sizeof(e1000_igp_2_cable_length_table[0]))
+
+/**
+ * igb_check_reset_block - Check if PHY reset is blocked
+ * @hw: pointer to the HW structure
+ *
+ * Read the PHY management control register and check whether a PHY reset
+ * is blocked. If a reset is not blocked return 0, otherwise
+ * return E1000_BLK_PHY_RESET (12).
+ **/
+s32 igb_check_reset_block(struct e1000_hw *hw)
+{
+ u32 manc;
+
+ manc = rd32(E1000_MANC);
+
+ return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
+ E1000_BLK_PHY_RESET : 0;
+}
+
+/**
+ * igb_get_phy_id - Retrieve the PHY ID and revision
+ * @hw: pointer to the HW structure
+ *
+ * Reads the PHY registers and stores the PHY ID and possibly the PHY
+ * revision in the hardware structure.
+ **/
+s32 igb_get_phy_id(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+ u16 phy_id;
+
+ ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
+ if (ret_val)
+ goto out;
+
+ phy->id = (u32)(phy_id << 16);
+ udelay(20);
+ ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
+ if (ret_val)
+ goto out;
+
+ phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
+ phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_phy_reset_dsp - Reset PHY DSP
+ * @hw: pointer to the HW structure
+ *
+ * Reset the digital signal processor.
+ **/
+static s32 igb_phy_reset_dsp(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ if (!(hw->phy.ops.write_reg))
+ goto out;
+
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
+ if (ret_val)
+ goto out;
+
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_read_phy_reg_mdic - Read MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the MDI control regsiter in the PHY at offset and stores the
+ * information read to data.
+ **/
+s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, mdic = 0;
+ s32 ret_val = 0;
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ hw_dbg("PHY Address %d is out of range\n", offset);
+ ret_val = -E1000_ERR_PARAM;
+ goto out;
+ }
+
+ /*
+ * Set up Op-code, Phy Address, and register offset in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ mdic = ((offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_READ));
+
+ wr32(E1000_MDIC, mdic);
+
+ /*
+ * Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ udelay(50);
+ mdic = rd32(E1000_MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ hw_dbg("MDI Read did not complete\n");
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ hw_dbg("MDI Error\n");
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+ *data = (u16) mdic;
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_write_phy_reg_mdic - Write MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write to register at offset
+ *
+ * Writes data to MDI control register in the PHY at offset.
+ **/
+s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, mdic = 0;
+ s32 ret_val = 0;
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ hw_dbg("PHY Address %d is out of range\n", offset);
+ ret_val = -E1000_ERR_PARAM;
+ goto out;
+ }
+
+ /*
+ * Set up Op-code, Phy Address, and register offset in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ mdic = (((u32)data) |
+ (offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_WRITE));
+
+ wr32(E1000_MDIC, mdic);
+
+ /*
+ * Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ udelay(50);
+ mdic = rd32(E1000_MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ hw_dbg("MDI Write did not complete\n");
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ hw_dbg("MDI Error\n");
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_read_phy_reg_i2c - Read PHY register using i2c
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset using the i2c interface and stores the
+ * retrieved information in data.
+ **/
+s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, i2ccmd = 0;
+
+
+ /*
+ * Set up Op-code, Phy Address, and register address in the I2CCMD
+ * register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+ (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+ (E1000_I2CCMD_OPCODE_READ));
+
+ wr32(E1000_I2CCMD, i2ccmd);
+
+ /* Poll the ready bit to see if the I2C read completed */
+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+ udelay(50);
+ i2ccmd = rd32(E1000_I2CCMD);
+ if (i2ccmd & E1000_I2CCMD_READY)
+ break;
+ }
+ if (!(i2ccmd & E1000_I2CCMD_READY)) {
+ hw_dbg("I2CCMD Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (i2ccmd & E1000_I2CCMD_ERROR) {
+ hw_dbg("I2CCMD Error bit set\n");
+ return -E1000_ERR_PHY;
+ }
+
+ /* Need to byte-swap the 16-bit value. */
+ *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
+
+ return 0;
+}
+
+/**
+ * igb_write_phy_reg_i2c - Write PHY register using i2c
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset using the i2c interface.
+ **/
+s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ u32 i, i2ccmd = 0;
+ u16 phy_data_swapped;
+
+ /* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/
+ if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) {
+ hw_dbg("PHY I2C Address %d is out of range.\n",
+ hw->phy.addr);
+ return -E1000_ERR_CONFIG;
+ }
+
+ /* Swap the data bytes for the I2C interface */
+ phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
+
+ /*
+ * Set up Op-code, Phy Address, and register address in the I2CCMD
+ * register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+ (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+ E1000_I2CCMD_OPCODE_WRITE |
+ phy_data_swapped);
+
+ wr32(E1000_I2CCMD, i2ccmd);
+
+ /* Poll the ready bit to see if the I2C read completed */
+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+ udelay(50);
+ i2ccmd = rd32(E1000_I2CCMD);
+ if (i2ccmd & E1000_I2CCMD_READY)
+ break;
+ }
+ if (!(i2ccmd & E1000_I2CCMD_READY)) {
+ hw_dbg("I2CCMD Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (i2ccmd & E1000_I2CCMD_ERROR) {
+ hw_dbg("I2CCMD Error bit set\n");
+ return -E1000_ERR_PHY;
+ }
+
+ return 0;
+}
+
+/**
+ * igb_read_phy_reg_igp - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and storing the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val = 0;
+
+ if (!(hw->phy.ops.acquire))
+ goto out;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ ret_val = igb_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (u16)offset);
+ if (ret_val) {
+ hw->phy.ops.release(hw);
+ goto out;
+ }
+ }
+
+ ret_val = igb_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_write_phy_reg_igp - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val = 0;
+
+ if (!(hw->phy.ops.acquire))
+ goto out;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ ret_val = igb_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (u16)offset);
+ if (ret_val) {
+ hw->phy.ops.release(hw);
+ goto out;
+ }
+ }
+
+ ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ hw->phy.ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_copper_link_setup_82580 - Setup 82580 PHY for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up Carrier-sense on Transmit and downshift values.
+ **/
+s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+
+
+ if (phy->reset_disable) {
+ ret_val = 0;
+ goto out;
+ }
+
+ if (phy->type == e1000_phy_82580) {
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val) {
+ hw_dbg("Error resetting the PHY.\n");
+ goto out;
+ }
+ }
+
+ /* Enable CRS on TX. This must be set for half-duplex operation. */
+ ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data |= I82580_CFG_ASSERT_CRS_ON_TX;
+
+ /* Enable downshift */
+ phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
+
+ ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock
+ * and downshift values are set also.
+ **/
+s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+
+ if (phy->reset_disable) {
+ ret_val = 0;
+ goto out;
+ }
+
+ /* Enable CRS on TX. This must be set for half-duplex operation. */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+ /*
+ * Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+ switch (phy->mdix) {
+ case 1:
+ phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+ break;
+ case 2:
+ phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+ break;
+ case 3:
+ phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+ break;
+ case 0:
+ default:
+ phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+ break;
+ }
+
+ /*
+ * Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+ if (phy->disable_polarity_correction == 1)
+ phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ goto out;
+
+ if (phy->revision < E1000_REVISION_4) {
+ /*
+ * Force TX_CLK in the Extended PHY Specific Control Register
+ * to 25MHz clock.
+ */
+ ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+ if ((phy->revision == E1000_REVISION_2) &&
+ (phy->id == M88E1111_I_PHY_ID)) {
+ /* 82573L PHY - set the downshift counter to 5x. */
+ phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
+ phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+ } else {
+ /* Configure Master and Slave downshift values */
+ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+ phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+ }
+ ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ goto out;
+ }
+
+ /* Commit the changes. */
+ ret_val = igb_phy_sw_reset(hw);
+ if (ret_val) {
+ hw_dbg("Error committing the PHY changes\n");
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's.
+ * Also enables and sets the downshift parameters.
+ **/
+s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+
+ if (phy->reset_disable) {
+ ret_val = 0;
+ goto out;
+ }
+
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ goto out;
+
+ /*
+ * Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+ switch (phy->mdix) {
+ case 1:
+ phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+ break;
+ case 2:
+ phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+ break;
+ case 3:
+ /* M88E1112 does not support this mode) */
+ if (phy->id != M88E1112_E_PHY_ID) {
+ phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+ break;
+ }
+ case 0:
+ default:
+ phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+ break;
+ }
+
+ /*
+ * Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+ if (phy->disable_polarity_correction == 1)
+ phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+ /* Enable downshift and setting it to X6 */
+ phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
+ phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
+ phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
+
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ goto out;
+
+ /* Commit the changes. */
+ ret_val = igb_phy_sw_reset(hw);
+ if (ret_val) {
+ hw_dbg("Error committing the PHY changes\n");
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_copper_link_setup_igp - Setup igp PHY's for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
+ * igp PHY's.
+ **/
+s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ if (phy->reset_disable) {
+ ret_val = 0;
+ goto out;
+ }
+
+ ret_val = phy->ops.reset(hw);
+ if (ret_val) {
+ hw_dbg("Error resetting the PHY.\n");
+ goto out;
+ }
+
+ /*
+ * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
+ * timeout issues when LFS is enabled.
+ */
+ msleep(100);
+
+ /*
+ * The NVM settings will configure LPLU in D3 for
+ * non-IGP1 PHYs.
+ */
+ if (phy->type == e1000_phy_igp) {
+ /* disable lplu d3 during driver init */
+ if (phy->ops.set_d3_lplu_state)
+ ret_val = phy->ops.set_d3_lplu_state(hw, false);
+ if (ret_val) {
+ hw_dbg("Error Disabling LPLU D3\n");
+ goto out;
+ }
+ }
+
+ /* disable lplu d0 during driver init */
+ ret_val = phy->ops.set_d0_lplu_state(hw, false);
+ if (ret_val) {
+ hw_dbg("Error Disabling LPLU D0\n");
+ goto out;
+ }
+ /* Configure mdi-mdix settings */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
+ if (ret_val)
+ goto out;
+
+ data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+ switch (phy->mdix) {
+ case 1:
+ data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 2:
+ data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 0:
+ default:
+ data |= IGP01E1000_PSCR_AUTO_MDIX;
+ break;
+ }
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
+ if (ret_val)
+ goto out;
+
+ /* set auto-master slave resolution settings */
+ if (hw->mac.autoneg) {
+ /*
+ * when autonegotiation advertisement is only 1000Mbps then we
+ * should disable SmartSpeed and enable Auto MasterSlave
+ * resolution as hardware default.
+ */
+ if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
+ /* Disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ goto out;
+
+ /* Set auto Master/Slave resolution process */
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
+ if (ret_val)
+ goto out;
+
+ data &= ~CR_1000T_MS_ENABLE;
+ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
+ if (ret_val)
+ goto out;
+ }
+
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
+ if (ret_val)
+ goto out;
+
+ /* load defaults for future use */
+ phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
+ ((data & CR_1000T_MS_VALUE) ?
+ e1000_ms_force_master :
+ e1000_ms_force_slave) :
+ e1000_ms_auto;
+
+ switch (phy->ms_type) {
+ case e1000_ms_force_master:
+ data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_force_slave:
+ data |= CR_1000T_MS_ENABLE;
+ data &= ~(CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_auto:
+ data &= ~CR_1000T_MS_ENABLE;
+ default:
+ break;
+ }
+ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_copper_link_autoneg - Setup/Enable autoneg for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Performs initial bounds checking on autoneg advertisement parameter, then
+ * configure to advertise the full capability. Setup the PHY to autoneg
+ * and restart the negotiation process between the link partner. If
+ * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
+ **/
+static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_ctrl;
+
+ /*
+ * Perform some bounds checking on the autoneg advertisement
+ * parameter.
+ */
+ phy->autoneg_advertised &= phy->autoneg_mask;
+
+ /*
+ * If autoneg_advertised is zero, we assume it was not defaulted
+ * by the calling code so we set to advertise full capability.
+ */
+ if (phy->autoneg_advertised == 0)
+ phy->autoneg_advertised = phy->autoneg_mask;
+
+ hw_dbg("Reconfiguring auto-neg advertisement params\n");
+ ret_val = igb_phy_setup_autoneg(hw);
+ if (ret_val) {
+ hw_dbg("Error Setting up Auto-Negotiation\n");
+ goto out;
+ }
+ hw_dbg("Restarting Auto-Neg\n");
+
+ /*
+ * Restart auto-negotiation by setting the Auto Neg Enable bit and
+ * the Auto Neg Restart bit in the PHY control register.
+ */
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+ if (ret_val)
+ goto out;
+
+ phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+ if (ret_val)
+ goto out;
+
+ /*
+ * Does the user want to wait for Auto-Neg to complete here, or
+ * check at a later time (for example, callback routine).
+ */
+ if (phy->autoneg_wait_to_complete) {
+ ret_val = igb_wait_autoneg(hw);
+ if (ret_val) {
+ hw_dbg("Error while waiting for "
+ "autoneg to complete\n");
+ goto out;
+ }
+ }
+
+ hw->mac.get_link_status = true;
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_phy_setup_autoneg - Configure PHY for auto-negotiation
+ * @hw: pointer to the HW structure
+ *
+ * Reads the MII auto-neg advertisement register and/or the 1000T control
+ * register and if the PHY is already setup for auto-negotiation, then
+ * return successful. Otherwise, setup advertisement and flow control to
+ * the appropriate values for the wanted auto-negotiation.
+ **/
+static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 mii_autoneg_adv_reg;
+ u16 mii_1000t_ctrl_reg = 0;
+
+ phy->autoneg_advertised &= phy->autoneg_mask;
+
+ /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+ ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+ if (ret_val)
+ goto out;
+
+ if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+ /* Read the MII 1000Base-T Control Register (Address 9). */
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
+ &mii_1000t_ctrl_reg);
+ if (ret_val)
+ goto out;
+ }
+
+ /*
+ * Need to parse both autoneg_advertised and fc and set up
+ * the appropriate PHY registers. First we will parse for
+ * autoneg_advertised software override. Since we can advertise
+ * a plethora of combinations, we need to check each bit
+ * individually.
+ */
+
+ /*
+ * First we clear all the 10/100 mb speed bits in the Auto-Neg
+ * Advertisement Register (Address 4) and the 1000 mb speed bits in
+ * the 1000Base-T Control Register (Address 9).
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
+ NWAY_AR_100TX_HD_CAPS |
+ NWAY_AR_10T_FD_CAPS |
+ NWAY_AR_10T_HD_CAPS);
+ mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
+
+ hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
+
+ /* Do we want to advertise 10 Mb Half Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
+ hw_dbg("Advertise 10mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+ }
+
+ /* Do we want to advertise 10 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
+ hw_dbg("Advertise 10mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Half Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
+ hw_dbg("Advertise 100mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
+ hw_dbg("Advertise 100mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+ }
+
+ /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+ if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
+ hw_dbg("Advertise 1000mb Half duplex request denied!\n");
+
+ /* Do we want to advertise 1000 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
+ hw_dbg("Advertise 1000mb Full duplex\n");
+ mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+ }
+
+ /*
+ * Check for a software override of the flow control settings, and
+ * setup the PHY advertisement registers accordingly. If
+ * auto-negotiation is enabled, then software will have to set the
+ * "PAUSE" bits to the correct value in the Auto-Negotiation
+ * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
+ * negotiation.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * but we do not support receiving pause frames).
+ * 3: Both Rx and TX flow control (symmetric) are enabled.
+ * other: No software override. The flow control configuration
+ * in the EEPROM is used.
+ */
+ switch (hw->fc.current_mode) {
+ case e1000_fc_none:
+ /*
+ * Flow control (RX & TX) is completely disabled by a
+ * software over-ride.
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case e1000_fc_rx_pause:
+ /*
+ * RX Flow control is enabled, and TX Flow control is
+ * disabled, by a software over-ride.
+ *
+ * Since there really isn't a way to advertise that we are
+ * capable of RX Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric RX PAUSE. Later
+ * (in e1000_config_fc_after_link_up) we will disable the
+ * hw's ability to send PAUSE frames.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case e1000_fc_tx_pause:
+ /*
+ * TX Flow control is enabled, and RX Flow control is
+ * disabled, by a software over-ride.
+ */
+ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+ mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+ break;
+ case e1000_fc_full:
+ /*
+ * Flow control (both RX and TX) is enabled by a software
+ * over-ride.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ default:
+ hw_dbg("Flow control param set incorrectly\n");
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+ if (ret_val)
+ goto out;
+
+ hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+ if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+ ret_val = phy->ops.write_reg(hw,
+ PHY_1000T_CTRL,
+ mii_1000t_ctrl_reg);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_setup_copper_link - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Calls the appropriate function to configure the link for auto-neg or forced
+ * speed and duplex. Then we check for link, once link is established calls
+ * to configure collision distance and flow control are called. If link is
+ * not established, we return -E1000_ERR_PHY (-2).
+ **/
+s32 igb_setup_copper_link(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ bool link;
+
+
+ if (hw->mac.autoneg) {
+ /*
+ * Setup autoneg and flow control advertisement and perform
+ * autonegotiation.
+ */
+ ret_val = igb_copper_link_autoneg(hw);
+ if (ret_val)
+ goto out;
+ } else {
+ /*
+ * PHY will be set to 10H, 10F, 100H or 100F
+ * depending on user settings.
+ */
+ hw_dbg("Forcing Speed and Duplex\n");
+ ret_val = hw->phy.ops.force_speed_duplex(hw);
+ if (ret_val) {
+ hw_dbg("Error Forcing Speed and Duplex\n");
+ goto out;
+ }
+ }
+
+ /*
+ * Check link status. Wait up to 100 microseconds for link to become
+ * valid.
+ */
+ ret_val = igb_phy_has_link(hw,
+ COPPER_LINK_UP_LIMIT,
+ 10,
+ &link);
+ if (ret_val)
+ goto out;
+
+ if (link) {
+ hw_dbg("Valid link established!!!\n");
+ igb_config_collision_dist(hw);
+ ret_val = igb_config_fc_after_link_up(hw);
+ } else {
+ hw_dbg("Unable to establish link!!!\n");
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex. Clears the
+ * auto-crossover to force MDI manually. Waits for link and returns
+ * successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+ goto out;
+
+ igb_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+ goto out;
+
+ /*
+ * Clear Auto-Crossover to force MDI manually. IGP requires MDI
+ * forced whenever speed and duplex are forced.
+ */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+ if (ret_val)
+ goto out;
+
+ hw_dbg("IGP PSCR: %X\n", phy_data);
+
+ udelay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+ hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n");
+
+ ret_val = igb_phy_has_link(hw,
+ PHY_FORCE_LIMIT,
+ 100000,
+ &link);
+ if (ret_val)
+ goto out;
+
+ if (!link)
+ hw_dbg("Link taking longer than expected.\n");
+
+ /* Try once more */
+ ret_val = igb_phy_has_link(hw,
+ PHY_FORCE_LIMIT,
+ 100000,
+ &link);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex. Clears the
+ * auto-crossover to force MDI manually. Resets the PHY to commit the
+ * changes. If time expires while waiting for link up, we reset the DSP.
+ * After reset, TX_CLK and CRS on TX must be set. Return successful upon
+ * successful completion, else return corresponding error code.
+ **/
+s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ /*
+ * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+ * forced whenever speed and duplex are forced.
+ */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ goto out;
+
+ hw_dbg("M88E1000 PSCR: %X\n", phy_data);
+
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+ goto out;
+
+ igb_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+ goto out;
+
+ /* Reset the phy to commit changes. */
+ ret_val = igb_phy_sw_reset(hw);
+ if (ret_val)
+ goto out;
+
+ if (phy->autoneg_wait_to_complete) {
+ hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
+
+ ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
+ if (ret_val)
+ goto out;
+
+ if (!link) {
+ if (hw->phy.type != e1000_phy_m88 ||
+ hw->phy.id == I347AT4_E_PHY_ID ||
+ hw->phy.id == M88E1112_E_PHY_ID) {
+ hw_dbg("Link taking longer than expected.\n");
+ } else {
+
+ /*
+ * We didn't get link.
+ * Reset the DSP and cross our fingers.
+ */
+ ret_val = phy->ops.write_reg(hw,
+ M88E1000_PHY_PAGE_SELECT,
+ 0x001d);
+ if (ret_val)
+ goto out;
+ ret_val = igb_phy_reset_dsp(hw);
+ if (ret_val)
+ goto out;
+ }
+ }
+
+ /* Try once more */
+ ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ goto out;
+ }
+
+ if (hw->phy.type != e1000_phy_m88 ||
+ hw->phy.id == I347AT4_E_PHY_ID ||
+ hw->phy.id == M88E1112_E_PHY_ID)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ goto out;
+
+ /*
+ * Resetting the phy means we need to re-force TX_CLK in the
+ * Extended PHY Specific Control Register to 25MHz clock from
+ * the reset value of 2.5MHz.
+ */
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+ ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ goto out;
+
+ /*
+ * In addition, we must re-enable CRS on Tx for both half and full
+ * duplex.
+ */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
+ * @hw: pointer to the HW structure
+ * @phy_ctrl: pointer to current value of PHY_CONTROL
+ *
+ * Forces speed and duplex on the PHY by doing the following: disable flow
+ * control, force speed/duplex on the MAC, disable auto speed detection,
+ * disable auto-negotiation, configure duplex, configure speed, configure
+ * the collision distance, write configuration to CTRL register. The
+ * caller must write to the PHY_CONTROL register for these settings to
+ * take affect.
+ **/
+static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
+ u16 *phy_ctrl)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 ctrl;
+
+ /* Turn off flow control when forcing speed/duplex */
+ hw->fc.current_mode = e1000_fc_none;
+
+ /* Force speed/duplex on the mac */
+ ctrl = rd32(E1000_CTRL);
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ctrl &= ~E1000_CTRL_SPD_SEL;
+
+ /* Disable Auto Speed Detection */
+ ctrl &= ~E1000_CTRL_ASDE;
+
+ /* Disable autoneg on the phy */
+ *phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
+
+ /* Forcing Full or Half Duplex? */
+ if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
+ ctrl &= ~E1000_CTRL_FD;
+ *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
+ hw_dbg("Half Duplex\n");
+ } else {
+ ctrl |= E1000_CTRL_FD;
+ *phy_ctrl |= MII_CR_FULL_DUPLEX;
+ hw_dbg("Full Duplex\n");
+ }
+
+ /* Forcing 10mb or 100mb? */
+ if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
+ ctrl |= E1000_CTRL_SPD_100;
+ *phy_ctrl |= MII_CR_SPEED_100;
+ *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+ hw_dbg("Forcing 100mb\n");
+ } else {
+ ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+ *phy_ctrl |= MII_CR_SPEED_10;
+ *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+ hw_dbg("Forcing 10mb\n");
+ }
+
+ igb_config_collision_dist(hw);
+
+ wr32(E1000_CTRL, ctrl);
+}
+
+/**
+ * igb_set_d3_lplu_state - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * and SmartSpeed is disabled when active is true, else clear lplu for D3
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained.
+ **/
+s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+ u16 data;
+
+ if (!(hw->phy.ops.read_reg))
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+ if (ret_val)
+ goto out;
+
+ if (!active) {
+ data &= ~IGP02E1000_PM_D3_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ data);
+ if (ret_val)
+ goto out;
+ /*
+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
+ if (phy->smart_speed == e1000_smart_speed_on) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ goto out;
+ } else if (phy->smart_speed == e1000_smart_speed_off) {
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ if (ret_val)
+ goto out;
+ }
+ } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+ data |= IGP02E1000_PM_D3_LPLU;
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ data);
+ if (ret_val)
+ goto out;
+
+ /* When LPLU is enabled, we should disable SmartSpeed */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ data);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_check_downshift - Checks whether a downshift in speed occurred
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * A downshift is detected by querying the PHY link health.
+ **/
+s32 igb_check_downshift(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, offset, mask;
+
+ switch (phy->type) {
+ case e1000_phy_m88:
+ case e1000_phy_gg82563:
+ offset = M88E1000_PHY_SPEC_STATUS;
+ mask = M88E1000_PSSR_DOWNSHIFT;
+ break;
+ case e1000_phy_igp_2:
+ case e1000_phy_igp:
+ case e1000_phy_igp_3:
+ offset = IGP01E1000_PHY_LINK_HEALTH;
+ mask = IGP01E1000_PLHR_SS_DOWNGRADE;
+ break;
+ default:
+ /* speed downshift not supported */
+ phy->speed_downgraded = false;
+ ret_val = 0;
+ goto out;
+ }
+
+ ret_val = phy->ops.read_reg(hw, offset, &phy_data);
+
+ if (!ret_val)
+ phy->speed_downgraded = (phy_data & mask) ? true : false;
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_check_polarity_m88 - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY specific status register.
+ **/
+static s32 igb_check_polarity_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
+
+ if (!ret_val)
+ phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal;
+
+ return ret_val;
+}
+
+/**
+ * igb_check_polarity_igp - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY port status register, and the
+ * current speed (since there is no polarity at 100Mbps).
+ **/
+static s32 igb_check_polarity_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data, offset, mask;
+
+ /*
+ * Polarity is determined based on the speed of
+ * our connection.
+ */
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+ if (ret_val)
+ goto out;
+
+ if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+ offset = IGP01E1000_PHY_PCS_INIT_REG;
+ mask = IGP01E1000_PHY_POLARITY_MASK;
+ } else {
+ /*
+ * This really only applies to 10Mbps since
+ * there is no polarity for 100Mbps (always 0).
+ */
+ offset = IGP01E1000_PHY_PORT_STATUS;
+ mask = IGP01E1000_PSSR_POLARITY_REVERSED;
+ }
+
+ ret_val = phy->ops.read_reg(hw, offset, &data);
+
+ if (!ret_val)
+ phy->cable_polarity = (data & mask)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal;
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_wait_autoneg - Wait for auto-neg compeletion
+ * @hw: pointer to the HW structure
+ *
+ * Waits for auto-negotiation to complete or for the auto-negotiation time
+ * limit to expire, which ever happens first.
+ **/
+static s32 igb_wait_autoneg(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 i, phy_status;
+
+ /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
+ for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ if (phy_status & MII_SR_AUTONEG_COMPLETE)
+ break;
+ msleep(100);
+ }
+
+ /*
+ * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+ * has completed.
+ */
+ return ret_val;
+}
+
+/**
+ * igb_phy_has_link - Polls PHY for link
+ * @hw: pointer to the HW structure
+ * @iterations: number of times to poll for link
+ * @usec_interval: delay between polling attempts
+ * @success: pointer to whether polling was successful or not
+ *
+ * Polls the PHY status register for link, 'iterations' number of times.
+ **/
+s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success)
+{
+ s32 ret_val = 0;
+ u16 i, phy_status;
+
+ for (i = 0; i < iterations; i++) {
+ /*
+ * Some PHYs require the PHY_STATUS register to be read
+ * twice due to the link bit being sticky. No harm doing
+ * it across the board.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val) {
+ /*
+ * If the first read fails, another entity may have
+ * ownership of the resources, wait and try again to
+ * see if they have relinquished the resources yet.
+ */
+ udelay(usec_interval);
+ }
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ if (phy_status & MII_SR_LINK_STATUS)
+ break;
+ if (usec_interval >= 1000)
+ mdelay(usec_interval/1000);
+ else
+ udelay(usec_interval);
+ }
+
+ *success = (i < iterations) ? true : false;
+
+ return ret_val;
+}
+
+/**
+ * igb_get_cable_length_m88 - Determine cable length for m88 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Reads the PHY specific status register to retrieve the cable length
+ * information. The cable length is determined by averaging the minimum and
+ * maximum values to get the "average" cable length. The m88 PHY has four
+ * possible cable length values, which are:
+ * Register Value Cable Length
+ * 0 < 50 meters
+ * 1 50 - 80 meters
+ * 2 80 - 110 meters
+ * 3 110 - 140 meters
+ * 4 > 140 meters
+ **/
+s32 igb_get_cable_length_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, index;
+
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ goto out;
+
+ index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+ phy->min_cable_length = e1000_m88_cable_length_table[index];
+ phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+ return ret_val;
+}
+
+s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, phy_data2, index, default_page, is_cm;
+
+ switch (hw->phy.id) {
+ case I347AT4_E_PHY_ID:
+ /* Remember the original page select and set it to 7 */
+ ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
+ &default_page);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07);
+ if (ret_val)
+ goto out;
+
+ /* Get cable length from PHY Cable Diagnostics Control Reg */
+ ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr),
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ /* Check if the unit of cable length is meters or cm */
+ ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2);
+ if (ret_val)
+ goto out;
+
+ is_cm = !(phy_data & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+
+ /* Populate the phy structure with cable length in meters */
+ phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->cable_length = phy_data / (is_cm ? 100 : 1);
+
+ /* Reset the page selec to its original value */
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
+ default_page);
+ if (ret_val)
+ goto out;
+ break;
+ case M88E1112_E_PHY_ID:
+ /* Remember the original page select and set it to 5 */
+ ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
+ &default_page);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+ phy->min_cable_length = e1000_m88_cable_length_table[index];
+ phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+ phy->cable_length = (phy->min_cable_length +
+ phy->max_cable_length) / 2;
+
+ /* Reset the page select to its original value */
+ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
+ default_page);
+ if (ret_val)
+ goto out;
+
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY
+ * @hw: pointer to the HW structure
+ *
+ * The automatic gain control (agc) normalizes the amplitude of the
+ * received signal, adjusting for the attenuation produced by the
+ * cable. By reading the AGC registers, which represent the
+ * combination of coarse and fine gain value, the value can be put
+ * into a lookup table to obtain the approximate cable length
+ * for each channel.
+ **/
+s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+ u16 phy_data, i, agc_value = 0;
+ u16 cur_agc_index, max_agc_index = 0;
+ u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+ static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
+ IGP02E1000_PHY_AGC_A,
+ IGP02E1000_PHY_AGC_B,
+ IGP02E1000_PHY_AGC_C,
+ IGP02E1000_PHY_AGC_D
+ };
+
+ /* Read the AGC registers for all channels */
+ for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data);
+ if (ret_val)
+ goto out;
+
+ /*
+ * Getting bits 15:9, which represent the combination of
+ * coarse and fine gain values. The result is a number
+ * that can be put into the lookup table to obtain the
+ * approximate cable length.
+ */
+ cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+ IGP02E1000_AGC_LENGTH_MASK;
+
+ /* Array index bound check. */
+ if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+ (cur_agc_index == 0)) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+ /* Remove min & max AGC values from calculation. */
+ if (e1000_igp_2_cable_length_table[min_agc_index] >
+ e1000_igp_2_cable_length_table[cur_agc_index])
+ min_agc_index = cur_agc_index;
+ if (e1000_igp_2_cable_length_table[max_agc_index] <
+ e1000_igp_2_cable_length_table[cur_agc_index])
+ max_agc_index = cur_agc_index;
+
+ agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
+ }
+
+ agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+ e1000_igp_2_cable_length_table[max_agc_index]);
+ agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+ /* Calculate cable length with the error range of +/- 10 meters. */
+ phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+ (agc_value - IGP02E1000_AGC_RANGE) : 0;
+ phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
+
+ phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_get_phy_info_m88 - Retrieve PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Valid for only copper links. Read the PHY status register (sticky read)
+ * to verify that link is up. Read the PHY special control register to
+ * determine the polarity and 10base-T extended distance. Read the PHY
+ * special status register to determine MDI/MDIx and current speed. If
+ * speed is 1000, then determine cable length, local and remote receiver.
+ **/
+s32 igb_get_phy_info_m88(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+ if (phy->media_type != e1000_media_type_copper) {
+ hw_dbg("Phy info is only valid for copper media\n");
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ ret_val = igb_phy_has_link(hw, 1, 0, &link);
+ if (ret_val)
+ goto out;
+
+ if (!link) {
+ hw_dbg("Phy info is only valid if link is up\n");
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
+ ? true : false;
+
+ ret_val = igb_check_polarity_m88(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false;
+
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+ ret_val = phy->ops.get_cable_length(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+
+ phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+ } else {
+ /* Set values to "undefined" */
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_get_phy_info_igp - Retrieve igp PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Read PHY status to determine if link is up. If link is up, then
+ * set/determine 10base-T extended distance and polarity correction. Read
+ * PHY port status to determine MDI/MDIx and speed. Based on the speed,
+ * determine on the cable length, local and remote receiver.
+ **/
+s32 igb_get_phy_info_igp(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ ret_val = igb_phy_has_link(hw, 1, 0, &link);
+ if (ret_val)
+ goto out;
+
+ if (!link) {
+ hw_dbg("Phy info is only valid if link is up\n");
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ phy->polarity_correction = true;
+
+ ret_val = igb_check_polarity_igp(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+ if (ret_val)
+ goto out;
+
+ phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false;
+
+ if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+ ret_val = phy->ops.get_cable_length(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+ if (ret_val)
+ goto out;
+
+ phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+
+ phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+ } else {
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_phy_sw_reset - PHY software reset
+ * @hw: pointer to the HW structure
+ *
+ * Does a software reset of the PHY by reading the PHY control register and
+ * setting/write the control register reset bit to the PHY.
+ **/
+s32 igb_phy_sw_reset(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 phy_ctrl;
+
+ if (!(hw->phy.ops.read_reg))
+ goto out;
+
+ ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+ if (ret_val)
+ goto out;
+
+ phy_ctrl |= MII_CR_RESET;
+ ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+ if (ret_val)
+ goto out;
+
+ udelay(1);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_phy_hw_reset - PHY hardware reset
+ * @hw: pointer to the HW structure
+ *
+ * Verify the reset block is not blocking us from resetting. Acquire
+ * semaphore (if necessary) and read/set/write the device control reset
+ * bit in the PHY. Wait the appropriate delay time for the device to
+ * reset and relase the semaphore (if necessary).
+ **/
+s32 igb_phy_hw_reset(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u32 ctrl;
+
+ ret_val = igb_check_reset_block(hw);
+ if (ret_val) {
+ ret_val = 0;
+ goto out;
+ }
+
+ ret_val = phy->ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ ctrl = rd32(E1000_CTRL);
+ wr32(E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
+ wrfl();
+
+ udelay(phy->reset_delay_us);
+
+ wr32(E1000_CTRL, ctrl);
+ wrfl();
+
+ udelay(150);
+
+ phy->ops.release(hw);
+
+ ret_val = phy->ops.get_cfg_done(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_phy_init_script_igp3 - Inits the IGP3 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
+ **/
+s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
+{
+ hw_dbg("Running IGP 3 PHY init script\n");
+
+ /* PHY init IGP 3 */
+ /* Enable rise/fall, 10-mode work in class-A */
+ hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018);
+ /* Remove all caps from Replica path filter */
+ hw->phy.ops.write_reg(hw, 0x2F52, 0x0000);
+ /* Bias trimming for ADC, AFE and Driver (Default) */
+ hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24);
+ /* Increase Hybrid poly bias */
+ hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0);
+ /* Add 4% to TX amplitude in Giga mode */
+ hw->phy.ops.write_reg(hw, 0x2010, 0x10B0);
+ /* Disable trimming (TTT) */
+ hw->phy.ops.write_reg(hw, 0x2011, 0x0000);
+ /* Poly DC correction to 94.6% + 2% for all channels */
+ hw->phy.ops.write_reg(hw, 0x20DD, 0x249A);
+ /* ABS DC correction to 95.9% */
+ hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3);
+ /* BG temp curve trim */
+ hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE);
+ /* Increasing ADC OPAMP stage 1 currents to max */
+ hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4);
+ /* Force 1000 ( required for enabling PHY regs configuration) */
+ hw->phy.ops.write_reg(hw, 0x0000, 0x0140);
+ /* Set upd_freq to 6 */
+ hw->phy.ops.write_reg(hw, 0x1F30, 0x1606);
+ /* Disable NPDFE */
+ hw->phy.ops.write_reg(hw, 0x1F31, 0xB814);
+ /* Disable adaptive fixed FFE (Default) */
+ hw->phy.ops.write_reg(hw, 0x1F35, 0x002A);
+ /* Enable FFE hysteresis */
+ hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067);
+ /* Fixed FFE for short cable lengths */
+ hw->phy.ops.write_reg(hw, 0x1F54, 0x0065);
+ /* Fixed FFE for medium cable lengths */
+ hw->phy.ops.write_reg(hw, 0x1F55, 0x002A);
+ /* Fixed FFE for long cable lengths */
+ hw->phy.ops.write_reg(hw, 0x1F56, 0x002A);
+ /* Enable Adaptive Clip Threshold */
+ hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0);
+ /* AHT reset limit to 1 */
+ hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF);
+ /* Set AHT master delay to 127 msec */
+ hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC);
+ /* Set scan bits for AHT */
+ hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF);
+ /* Set AHT Preset bits */
+ hw->phy.ops.write_reg(hw, 0x1F79, 0x0210);
+ /* Change integ_factor of channel A to 3 */
+ hw->phy.ops.write_reg(hw, 0x1895, 0x0003);
+ /* Change prop_factor of channels BCD to 8 */
+ hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
+ /* Change cg_icount + enable integbp for channels BCD */
+ hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
+ /*
+ * Change cg_icount + enable integbp + change prop_factor_master
+ * to 8 for channel A
+ */
+ hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
+ /* Disable AHT in Slave mode on channel A */
+ hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
+ /*
+ * Enable LPLU and disable AN to 1000 in non-D0a states,
+ * Enable SPD+B2B
+ */
+ hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
+ /* Enable restart AN on an1000_dis change */
+ hw->phy.ops.write_reg(hw, 0x001B, 0x2080);
+ /* Enable wh_fifo read clock in 10/100 modes */
+ hw->phy.ops.write_reg(hw, 0x0014, 0x0045);
+ /* Restart AN, Speed selection is 1000 */
+ hw->phy.ops.write_reg(hw, 0x0000, 0x1340);
+
+ return 0;
+}
+
+/**
+ * igb_power_up_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, restore the link to previous settings.
+ **/
+void igb_power_up_phy_copper(struct e1000_hw *hw)
+{
+ u16 mii_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+ mii_reg &= ~MII_CR_POWER_DOWN;
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+}
+
+/**
+ * igb_power_down_phy_copper - Power down copper PHY
+ * @hw: pointer to the HW structure
+ *
+ * Power down PHY to save power when interface is down and wake on lan
+ * is not enabled.
+ **/
+void igb_power_down_phy_copper(struct e1000_hw *hw)
+{
+ u16 mii_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+ mii_reg |= MII_CR_POWER_DOWN;
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+ msleep(1);
+}
+
+/**
+ * igb_check_polarity_82580 - Checks the polarity.
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ * Polarity is determined based on the PHY specific status register.
+ **/
+static s32 igb_check_polarity_82580(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+
+
+ ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data);
+
+ if (!ret_val)
+ phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal;
+
+ return ret_val;
+}
+
+/**
+ * igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY setup function to force speed and duplex. Clears the
+ * auto-crossover to force MDI manually. Waits for link and returns
+ * successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data;
+ bool link;
+
+
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+ if (ret_val)
+ goto out;
+
+ igb_phy_force_speed_duplex_setup(hw, &phy_data);
+
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+ if (ret_val)
+ goto out;
+
+ /*
+ * Clear Auto-Crossover to force MDI manually. 82580 requires MDI
+ * forced whenever speed and duplex are forced.
+ */
+ ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data &= ~I82580_PHY_CTRL2_AUTO_MDIX;
+ phy_data &= ~I82580_PHY_CTRL2_FORCE_MDI_MDIX;
+
+ ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
+ if (ret_val)
+ goto out;
+
+ hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data);
+
+ udelay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+ hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n");
+
+ ret_val = igb_phy_has_link(hw,
+ PHY_FORCE_LIMIT,
+ 100000,
+ &link);
+ if (ret_val)
+ goto out;
+
+ if (!link)
+ hw_dbg("Link taking longer than expected.\n");
+
+ /* Try once more */
+ ret_val = igb_phy_has_link(hw,
+ PHY_FORCE_LIMIT,
+ 100000,
+ &link);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_get_phy_info_82580 - Retrieve I82580 PHY information
+ * @hw: pointer to the HW structure
+ *
+ * Read PHY status to determine if link is up. If link is up, then
+ * set/determine 10base-T extended distance and polarity correction. Read
+ * PHY port status to determine MDI/MDIx and speed. Based on the speed,
+ * determine on the cable length, local and remote receiver.
+ **/
+s32 igb_get_phy_info_82580(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+
+ ret_val = igb_phy_has_link(hw, 1, 0, &link);
+ if (ret_val)
+ goto out;
+
+ if (!link) {
+ hw_dbg("Phy info is only valid if link is up\n");
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ phy->polarity_correction = true;
+
+ ret_val = igb_check_polarity_82580(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data);
+ if (ret_val)
+ goto out;
+
+ phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false;
+
+ if ((data & I82580_PHY_STATUS2_SPEED_MASK) ==
+ I82580_PHY_STATUS2_SPEED_1000MBPS) {
+ ret_val = hw->phy.ops.get_cable_length(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+ if (ret_val)
+ goto out;
+
+ phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+
+ phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
+ } else {
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_get_cable_length_82580 - Determine cable length for 82580 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Reads the diagnostic status register and verifies result is valid before
+ * placing it in the phy_cable_length field.
+ **/
+s32 igb_get_cable_length_82580(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, length;
+
+
+ ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data);
+ if (ret_val)
+ goto out;
+
+ length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >>
+ I82580_DSTATUS_CABLE_LENGTH_SHIFT;
+
+ if (length == E1000_CABLE_LENGTH_UNDEFINED)
+ ret_val = -E1000_ERR_PHY;
+
+ phy->cable_length = length;
+
+out:
+ return ret_val;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
new file mode 100644
index 000000000000..8510797b9d81
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -0,0 +1,136 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_PHY_H_
+#define _E1000_PHY_H_
+
+enum e1000_ms_type {
+ e1000_ms_hw_default = 0,
+ e1000_ms_force_master,
+ e1000_ms_force_slave,
+ e1000_ms_auto
+};
+
+enum e1000_smart_speed {
+ e1000_smart_speed_default = 0,
+ e1000_smart_speed_on,
+ e1000_smart_speed_off
+};
+
+s32 igb_check_downshift(struct e1000_hw *hw);
+s32 igb_check_reset_block(struct e1000_hw *hw);
+s32 igb_copper_link_setup_igp(struct e1000_hw *hw);
+s32 igb_copper_link_setup_m88(struct e1000_hw *hw);
+s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw);
+s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+s32 igb_get_cable_length_m88(struct e1000_hw *hw);
+s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw);
+s32 igb_get_cable_length_igp_2(struct e1000_hw *hw);
+s32 igb_get_phy_id(struct e1000_hw *hw);
+s32 igb_get_phy_info_igp(struct e1000_hw *hw);
+s32 igb_get_phy_info_m88(struct e1000_hw *hw);
+s32 igb_phy_sw_reset(struct e1000_hw *hw);
+s32 igb_phy_hw_reset(struct e1000_hw *hw);
+s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+s32 igb_setup_copper_link(struct e1000_hw *hw);
+s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success);
+void igb_power_up_phy_copper(struct e1000_hw *hw);
+void igb_power_down_phy_copper(struct e1000_hw *hw);
+s32 igb_phy_init_script_igp3(struct e1000_hw *hw);
+s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
+s32 igb_copper_link_setup_82580(struct e1000_hw *hw);
+s32 igb_get_phy_info_82580(struct e1000_hw *hw);
+s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
+s32 igb_get_cable_length_82580(struct e1000_hw *hw);
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
+#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
+#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
+#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK 0x0078
+#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
+#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
+
+#define I82580_ADDR_REG 16
+#define I82580_CFG_REG 22
+#define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15)
+#define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
+#define I82580_CTRL_REG 23
+#define I82580_CTRL_DOWNSHIFT_MASK (7 << 10)
+
+/* 82580 specific PHY registers */
+#define I82580_PHY_CTRL_2 18
+#define I82580_PHY_LBK_CTRL 19
+#define I82580_PHY_STATUS_2 26
+#define I82580_PHY_DIAG_STATUS 31
+
+/* I82580 PHY Status 2 */
+#define I82580_PHY_STATUS2_REV_POLARITY 0x0400
+#define I82580_PHY_STATUS2_MDIX 0x0800
+#define I82580_PHY_STATUS2_SPEED_MASK 0x0300
+#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200
+#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100
+
+/* I82580 PHY Control 2 */
+#define I82580_PHY_CTRL2_AUTO_MDIX 0x0400
+#define I82580_PHY_CTRL2_FORCE_MDI_MDIX 0x0200
+
+/* I82580 PHY Diagnostics Status */
+#define I82580_DSTATUS_CABLE_LENGTH 0x03FC
+#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2
+/* Enable flexible speed on link-up */
+#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
+#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_MDIX 0x0800
+#define IGP01E1000_PSSR_SPEED_MASK 0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
+#define IGP02E1000_PHY_CHANNEL_NUM 4
+#define IGP02E1000_PHY_AGC_A 0x11B1
+#define IGP02E1000_PHY_AGC_B 0x12B1
+#define IGP02E1000_PHY_AGC_C 0x14B1
+#define IGP02E1000_PHY_AGC_D 0x18B1
+#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK 0x7F
+#define IGP02E1000_AGC_RANGE 15
+
+#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
+
+#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
new file mode 100644
index 000000000000..0990f6d860c7
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -0,0 +1,354 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_REGS_H_
+#define _E1000_REGS_H_
+
+#define E1000_CTRL 0x00000 /* Device Control - RW */
+#define E1000_STATUS 0x00008 /* Device Status - RO */
+#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
+#define E1000_EERD 0x00014 /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+#define E1000_MDIC 0x00020 /* MDI Control - RW */
+#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */
+#define E1000_SCTL 0x00024 /* SerDes Control - RW */
+#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
+#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
+#define E1000_FCT 0x00030 /* Flow Control Type - RW */
+#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
+#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
+#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
+#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
+#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
+#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
+#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
+#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
+#define E1000_RCTL 0x00100 /* RX Control - RW */
+#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
+#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
+#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
+#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
+#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
+#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
+#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
+#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */
+#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
+#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
+#define E1000_TCTL 0x00400 /* TX Control - RW */
+#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */
+#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
+#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
+#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
+#define E1000_PBS 0x01008 /* Packet Buffer Size */
+#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
+#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
+#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
+#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */
+#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
+#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
+
+/* IEEE 1588 TIMESYNCH */
+#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
+#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
+#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
+#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */
+#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */
+#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
+#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
+#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
+#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
+#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
+#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
+#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
+
+/* Filtering Registers */
+#define E1000_SAQF(_n) (0x5980 + 4 * (_n))
+#define E1000_DAQF(_n) (0x59A0 + 4 * (_n))
+#define E1000_SPQF(_n) (0x59C0 + 4 * (_n))
+#define E1000_FTQF(_n) (0x59E0 + 4 * (_n))
+#define E1000_SAQF0 E1000_SAQF(0)
+#define E1000_DAQF0 E1000_DAQF(0)
+#define E1000_SPQF0 E1000_SPQF(0)
+#define E1000_FTQF0 E1000_FTQF(0)
+#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
+#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
+
+#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
+
+/* DMA Coalescing registers */
+#define E1000_DMACR 0x02508 /* Control Register */
+#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */
+#define E1000_DMCTLX 0x02514 /* Time to Lx Request */
+#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
+#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
+#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
+#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
+
+/* TX Rate Limit Registers */
+#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
+#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
+
+/* Split and Replication RX Control - RW */
+#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+/*
+ * Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ */
+#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) \
+ : (0x0C000 + ((_n) * 0x40)))
+#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) \
+ : (0x0C004 + ((_n) * 0x40)))
+#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) \
+ : (0x0C008 + ((_n) * 0x40)))
+#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) \
+ : (0x0C00C + ((_n) * 0x40)))
+#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) \
+ : (0x0C010 + ((_n) * 0x40)))
+#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) \
+ : (0x0C018 + ((_n) * 0x40)))
+#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) \
+ : (0x0C028 + ((_n) * 0x40)))
+#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) \
+ : (0x0E000 + ((_n) * 0x40)))
+#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) \
+ : (0x0E004 + ((_n) * 0x40)))
+#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) \
+ : (0x0E008 + ((_n) * 0x40)))
+#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) \
+ : (0x0E010 + ((_n) * 0x40)))
+#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) \
+ : (0x0E018 + ((_n) * 0x40)))
+#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \
+ : (0x0E028 + ((_n) * 0x40)))
+#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
+#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
+#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \
+ : (0x0E038 + ((_n) * 0x40)))
+#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
+ : (0x0E03C + ((_n) * 0x40)))
+#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
+#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */
+#define E1000_DTXCTL 0x03590 /* DMA TX Control - RW */
+#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
+#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
+#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
+#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
+#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
+#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
+#define E1000_COLC 0x04028 /* Collision Count - R/clr */
+#define E1000_DC 0x04030 /* Defer Count - R/clr */
+#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */
+#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */
+#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */
+#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */
+#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */
+#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */
+#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */
+#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */
+#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */
+#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */
+#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */
+#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */
+#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */
+#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */
+#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */
+#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */
+#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */
+#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */
+#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */
+#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */
+#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */
+#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */
+#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */
+#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */
+#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */
+#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */
+#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */
+#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */
+#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */
+#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */
+#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */
+#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */
+#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */
+#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */
+#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */
+#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */
+#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */
+#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */
+#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */
+#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */
+#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */
+#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */
+#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
+/* Interrupt Cause Rx Packet Timer Expire Count */
+#define E1000_ICRXPTC 0x04104
+/* Interrupt Cause Rx Absolute Timer Expire Count */
+#define E1000_ICRXATC 0x04108
+/* Interrupt Cause Tx Packet Timer Expire Count */
+#define E1000_ICTXPTC 0x0410C
+/* Interrupt Cause Tx Absolute Timer Expire Count */
+#define E1000_ICTXATC 0x04110
+/* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQEC 0x04118
+/* Interrupt Cause Tx Queue Minimum Threshold Count */
+#define E1000_ICTXQMTC 0x0411C
+/* Interrupt Cause Rx Descriptor Minimum Threshold Count */
+#define E1000_ICRXDMTC 0x04120
+#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
+#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */
+#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
+#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */
+#define E1000_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */
+#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */
+#define E1000_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */
+#define E1000_RPTHC 0x04104 /* Rx Packets To Host */
+#define E1000_HGPTC 0x04118 /* Host Good Packets TX Count */
+#define E1000_HTCBDPC 0x04124 /* Host TX Circuit Breaker Dropped Count */
+#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */
+#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */
+#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
+#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
+#define E1000_LENERRS 0x04138 /* Length Errors Count */
+#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
+#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */
+#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
+#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */
+#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */
+#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */
+#define E1000_RLPML 0x05004 /* RX Long Packet Max Length */
+#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
+#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
+#define E1000_RA 0x05400 /* Receive Address - RW Array */
+#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */
+#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
+#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+ (0x054E0 + ((_i - 16) * 8)))
+#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+ (0x054E4 + ((_i - 16) * 8)))
+#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
+#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
+#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
+#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
+#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
+#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
+#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
+#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
+#define E1000_WUC 0x05800 /* Wakeup Control - RW */
+#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
+#define E1000_WUS 0x05810 /* Wakeup Status - RO */
+#define E1000_MANC 0x05820 /* Management Control - RW */
+#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
+#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
+
+#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
+#define E1000_CCMCTL 0x05B48 /* CCM Control Register */
+#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */
+#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */
+#define E1000_GCR 0x05B00 /* PCI-Ex Control */
+#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM 0x05B50 /* SW Semaphore */
+#define E1000_FWSM 0x05B54 /* FW Semaphore */
+#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */
+
+/* RSS registers */
+#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
+#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */
+#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/
+#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */
+/* MSI-X Allocation Register (_i) - RW */
+#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4))
+/* Redirection Table - RW Array */
+#define E1000_RETA(_i) (0x05C00 + ((_i) * 4))
+#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */
+
+/* VT Registers */
+#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */
+#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */
+#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */
+#define E1000_VFRE 0x00C8C /* VF Receive Enables */
+#define E1000_VFTE 0x00C90 /* VF Transmit Enables */
+#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
+#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
+#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */
+#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
+#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
+#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
+/* These act per VF so an array friendly macro is used */
+#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
+#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
+#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
+#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
+ * Filter - RW */
+#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
+
+#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
+#define rd32(reg) (readl(hw->hw_addr + reg))
+#define wrfl() ((void)rd32(E1000_STATUS))
+
+#define array_wr32(reg, offset, value) \
+ (writel(value, hw->hw_addr + reg + ((offset) << 2)))
+#define array_rd32(reg, offset) \
+ (readl(hw->hw_addr + reg + ((offset) << 2)))
+
+/* DMA Coalescing registers */
+#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
+
+/* Energy Efficient Ethernet "EEE" register */
+#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
+#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */
+
+/* Thermal Sensor Register */
+#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
+
+/* OS2BMC Registers */
+#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */
+#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */
+#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
+#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
+
+#endif
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
new file mode 100644
index 000000000000..77793a9debcc
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -0,0 +1,426 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _IGB_H_
+#define _IGB_H_
+
+#include "e1000_mac.h"
+#include "e1000_82575.h"
+
+#include <linux/clocksource.h>
+#include <linux/timecompare.h>
+#include <linux/net_tstamp.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+
+struct igb_adapter;
+
+/* ((1000000000ns / (6000ints/s * 1024ns)) << 2 = 648 */
+#define IGB_START_ITR 648
+
+/* TX/RX descriptor defines */
+#define IGB_DEFAULT_TXD 256
+#define IGB_DEFAULT_TX_WORK 128
+#define IGB_MIN_TXD 80
+#define IGB_MAX_TXD 4096
+
+#define IGB_DEFAULT_RXD 256
+#define IGB_MIN_RXD 80
+#define IGB_MAX_RXD 4096
+
+#define IGB_DEFAULT_ITR 3 /* dynamic */
+#define IGB_MAX_ITR_USECS 10000
+#define IGB_MIN_ITR_USECS 10
+#define NON_Q_VECTORS 1
+#define MAX_Q_VECTORS 8
+
+/* Transmit and receive queues */
+#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \
+ (hw->mac.type > e1000_82575 ? 8 : 4))
+#define IGB_MAX_TX_QUEUES 16
+
+#define IGB_MAX_VF_MC_ENTRIES 30
+#define IGB_MAX_VF_FUNCTIONS 8
+#define IGB_MAX_VFTA_ENTRIES 128
+
+struct vf_data_storage {
+ unsigned char vf_mac_addresses[ETH_ALEN];
+ u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
+ u16 num_vf_mc_hashes;
+ u16 vlans_enabled;
+ u32 flags;
+ unsigned long last_nack;
+ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+ u16 pf_qos;
+ u16 tx_rate;
+};
+
+#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
+#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
+#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
+#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */
+
+/* RX descriptor control thresholds.
+ * PTHRESH - MAC will consider prefetch if it has fewer than this number of
+ * descriptors available in its onboard memory.
+ * Setting this to 0 disables RX descriptor prefetch.
+ * HTHRESH - MAC will only prefetch if there are at least this many descriptors
+ * available in host memory.
+ * If PTHRESH is 0, this should also be 0.
+ * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
+ * descriptors until either it has this many to write back, or the
+ * ITR timer expires.
+ */
+#define IGB_RX_PTHRESH 8
+#define IGB_RX_HTHRESH 8
+#define IGB_TX_PTHRESH 8
+#define IGB_TX_HTHRESH 1
+#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
+ adapter->msix_entries) ? 1 : 4)
+#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
+ adapter->msix_entries) ? 1 : 16)
+
+/* this is the size past which hardware will drop packets when setting LPE=0 */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+
+/* Supported Rx Buffer Sizes */
+#define IGB_RXBUFFER_512 512
+#define IGB_RXBUFFER_16384 16384
+#define IGB_RX_HDR_LEN IGB_RXBUFFER_512
+
+/* How many Tx Descriptors do we need to call netif_wake_queue ? */
+#define IGB_TX_QUEUE_WAKE 16
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define AUTO_ALL_MODES 0
+#define IGB_EEPROM_APME 0x0400
+
+#ifndef IGB_MASTER_SLAVE
+/* Switch to override PHY master/slave setting */
+#define IGB_MASTER_SLAVE e1000_ms_hw_default
+#endif
+
+#define IGB_MNG_VLAN_NONE -1
+
+#define IGB_TX_FLAGS_CSUM 0x00000001
+#define IGB_TX_FLAGS_VLAN 0x00000002
+#define IGB_TX_FLAGS_TSO 0x00000004
+#define IGB_TX_FLAGS_IPV4 0x00000008
+#define IGB_TX_FLAGS_TSTAMP 0x00000010
+#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IGB_TX_FLAGS_VLAN_SHIFT 16
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct igb_tx_buffer {
+ union e1000_adv_tx_desc *next_to_watch;
+ unsigned long time_stamp;
+ struct sk_buff *skb;
+ unsigned int bytecount;
+ u16 gso_segs;
+ dma_addr_t dma;
+ u32 length;
+ u32 tx_flags;
+};
+
+struct igb_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
+ dma_addr_t page_dma;
+ u32 page_offset;
+};
+
+struct igb_tx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 restart_queue;
+ u64 restart_queue2;
+};
+
+struct igb_rx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 drops;
+ u64 csum_err;
+ u64 alloc_failed;
+};
+
+struct igb_q_vector {
+ struct igb_adapter *adapter; /* backlink */
+ struct igb_ring *rx_ring;
+ struct igb_ring *tx_ring;
+ struct napi_struct napi;
+
+ u32 eims_value;
+ u16 cpu;
+ u16 tx_work_limit;
+
+ u16 itr_val;
+ u8 set_itr;
+ void __iomem *itr_register;
+
+ char name[IFNAMSIZ + 9];
+};
+
+struct igb_ring {
+ struct igb_q_vector *q_vector; /* backlink to q_vector */
+ struct net_device *netdev; /* back pointer to net_device */
+ struct device *dev; /* device pointer for dma mapping */
+ union { /* array of buffer info structs */
+ struct igb_tx_buffer *tx_buffer_info;
+ struct igb_rx_buffer *rx_buffer_info;
+ };
+ void *desc; /* descriptor ring memory */
+ unsigned long flags; /* ring specific flags */
+ void __iomem *tail; /* pointer to ring tail register */
+
+ u16 count; /* number of desc. in the ring */
+ u8 queue_index; /* logical index of the ring*/
+ u8 reg_idx; /* physical index of the ring */
+ u32 size; /* length of desc. ring in bytes */
+
+ /* everything past this point are written often */
+ u16 next_to_clean ____cacheline_aligned_in_smp;
+ u16 next_to_use;
+
+ unsigned int total_bytes;
+ unsigned int total_packets;
+
+ union {
+ /* TX */
+ struct {
+ struct igb_tx_queue_stats tx_stats;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync tx_syncp2;
+ bool detect_tx_hung;
+ };
+ /* RX */
+ struct {
+ struct igb_rx_queue_stats rx_stats;
+ struct u64_stats_sync rx_syncp;
+ };
+ };
+ /* Items past this point are only used during ring alloc / free */
+ dma_addr_t dma; /* phys address of the ring */
+};
+
+#define IGB_RING_FLAG_RX_CSUM 0x00000001 /* RX CSUM enabled */
+#define IGB_RING_FLAG_RX_SCTP_CSUM 0x00000002 /* SCTP CSUM offload enabled */
+
+#define IGB_RING_FLAG_TX_CTX_IDX 0x00000001 /* HW requires context index */
+
+#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
+
+#define IGB_RX_DESC(R, i) \
+ (&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
+#define IGB_TX_DESC(R, i) \
+ (&(((union e1000_adv_tx_desc *)((R)->desc))[i]))
+#define IGB_TX_CTXTDESC(R, i) \
+ (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
+
+/* igb_desc_unused - calculate if we have unused descriptors */
+static inline int igb_desc_unused(struct igb_ring *ring)
+{
+ if (ring->next_to_clean > ring->next_to_use)
+ return ring->next_to_clean - ring->next_to_use - 1;
+
+ return ring->count + ring->next_to_clean - ring->next_to_use - 1;
+}
+
+/* board specific private data structure */
+struct igb_adapter {
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
+ struct net_device *netdev;
+
+ unsigned long state;
+ unsigned int flags;
+
+ unsigned int num_q_vectors;
+ struct msix_entry *msix_entries;
+
+ /* Interrupt Throttle Rate */
+ u32 rx_itr_setting;
+ u32 tx_itr_setting;
+ u16 tx_itr;
+ u16 rx_itr;
+
+ /* TX */
+ u16 tx_work_limit;
+ u32 tx_timeout_count;
+ int num_tx_queues;
+ struct igb_ring *tx_ring[16];
+
+ /* RX */
+ int num_rx_queues;
+ struct igb_ring *rx_ring[16];
+
+ u32 max_frame_size;
+ u32 min_frame_size;
+
+ struct timer_list watchdog_timer;
+ struct timer_list phy_info_timer;
+
+ u16 mng_vlan_id;
+ u32 bd_number;
+ u32 wol;
+ u32 en_mng_pt;
+ u16 link_speed;
+ u16 link_duplex;
+
+ struct work_struct reset_task;
+ struct work_struct watchdog_task;
+ bool fc_autoneg;
+ u8 tx_timeout_factor;
+ struct timer_list blink_timer;
+ unsigned long led_status;
+
+ /* OS defined structs */
+ struct pci_dev *pdev;
+ struct cyclecounter cycles;
+ struct timecounter clock;
+ struct timecompare compare;
+ struct hwtstamp_config hwtstamp_config;
+
+ spinlock_t stats64_lock;
+ struct rtnl_link_stats64 stats64;
+
+ /* structs defined in e1000_hw.h */
+ struct e1000_hw hw;
+ struct e1000_hw_stats stats;
+ struct e1000_phy_info phy_info;
+ struct e1000_phy_stats phy_stats;
+
+ u32 test_icr;
+ struct igb_ring test_tx_ring;
+ struct igb_ring test_rx_ring;
+
+ int msg_enable;
+
+ struct igb_q_vector *q_vector[MAX_Q_VECTORS];
+ u32 eims_enable_mask;
+ u32 eims_other;
+
+ /* to not mess up cache alignment, always add to the bottom */
+ u32 eeprom_wol;
+
+ u16 tx_ring_count;
+ u16 rx_ring_count;
+ unsigned int vfs_allocated_count;
+ struct vf_data_storage *vf_data;
+ int vf_rate_link_speed;
+ u32 rss_queues;
+ u32 wvbr;
+};
+
+#define IGB_FLAG_HAS_MSI (1 << 0)
+#define IGB_FLAG_DCA_ENABLED (1 << 1)
+#define IGB_FLAG_QUAD_PORT_A (1 << 2)
+#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
+#define IGB_FLAG_DMAC (1 << 4)
+
+/* DMA Coalescing defines */
+#define IGB_MIN_TXPBSIZE 20408
+#define IGB_TX_BUF_4096 4096
+#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
+
+#define IGB_82576_TSYNC_SHIFT 19
+#define IGB_82580_TSYNC_SHIFT 24
+#define IGB_TS_HDR_LEN 16
+enum e1000_state_t {
+ __IGB_TESTING,
+ __IGB_RESETTING,
+ __IGB_DOWN
+};
+
+enum igb_boards {
+ board_82575,
+};
+
+extern char igb_driver_name[];
+extern char igb_driver_version[];
+
+extern int igb_up(struct igb_adapter *);
+extern void igb_down(struct igb_adapter *);
+extern void igb_reinit_locked(struct igb_adapter *);
+extern void igb_reset(struct igb_adapter *);
+extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
+extern int igb_setup_tx_resources(struct igb_ring *);
+extern int igb_setup_rx_resources(struct igb_ring *);
+extern void igb_free_tx_resources(struct igb_ring *);
+extern void igb_free_rx_resources(struct igb_ring *);
+extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
+extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+extern void igb_setup_tctl(struct igb_adapter *);
+extern void igb_setup_rctl(struct igb_adapter *);
+extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
+extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
+ struct igb_tx_buffer *);
+extern void igb_alloc_rx_buffers(struct igb_ring *, u16);
+extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
+extern bool igb_has_link(struct igb_adapter *adapter);
+extern void igb_set_ethtool_ops(struct net_device *);
+extern void igb_power_up_link(struct igb_adapter *);
+
+static inline s32 igb_reset_phy(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.reset)
+ return hw->phy.ops.reset(hw);
+
+ return 0;
+}
+
+static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ if (hw->phy.ops.read_reg)
+ return hw->phy.ops.read_reg(hw, offset, data);
+
+ return 0;
+}
+
+static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ if (hw->phy.ops.write_reg)
+ return hw->phy.ops.write_reg(hw, offset, data);
+
+ return 0;
+}
+
+static inline s32 igb_get_phy_info(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.get_phy_info)
+ return hw->phy.ops.get_phy_info(hw);
+
+ return 0;
+}
+
+#endif /* _IGB_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
new file mode 100644
index 000000000000..f227fc57eb11
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -0,0 +1,2202 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for igb */
+
+#include <linux/vmalloc.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/ethtool.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "igb.h"
+
+struct igb_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define IGB_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \
+ .stat_offset = offsetof(struct igb_adapter, _stat) \
+}
+static const struct igb_stats igb_gstrings_stats[] = {
+ IGB_STAT("rx_packets", stats.gprc),
+ IGB_STAT("tx_packets", stats.gptc),
+ IGB_STAT("rx_bytes", stats.gorc),
+ IGB_STAT("tx_bytes", stats.gotc),
+ IGB_STAT("rx_broadcast", stats.bprc),
+ IGB_STAT("tx_broadcast", stats.bptc),
+ IGB_STAT("rx_multicast", stats.mprc),
+ IGB_STAT("tx_multicast", stats.mptc),
+ IGB_STAT("multicast", stats.mprc),
+ IGB_STAT("collisions", stats.colc),
+ IGB_STAT("rx_crc_errors", stats.crcerrs),
+ IGB_STAT("rx_no_buffer_count", stats.rnbc),
+ IGB_STAT("rx_missed_errors", stats.mpc),
+ IGB_STAT("tx_aborted_errors", stats.ecol),
+ IGB_STAT("tx_carrier_errors", stats.tncrs),
+ IGB_STAT("tx_window_errors", stats.latecol),
+ IGB_STAT("tx_abort_late_coll", stats.latecol),
+ IGB_STAT("tx_deferred_ok", stats.dc),
+ IGB_STAT("tx_single_coll_ok", stats.scc),
+ IGB_STAT("tx_multi_coll_ok", stats.mcc),
+ IGB_STAT("tx_timeout_count", tx_timeout_count),
+ IGB_STAT("rx_long_length_errors", stats.roc),
+ IGB_STAT("rx_short_length_errors", stats.ruc),
+ IGB_STAT("rx_align_errors", stats.algnerrc),
+ IGB_STAT("tx_tcp_seg_good", stats.tsctc),
+ IGB_STAT("tx_tcp_seg_failed", stats.tsctfc),
+ IGB_STAT("rx_flow_control_xon", stats.xonrxc),
+ IGB_STAT("rx_flow_control_xoff", stats.xoffrxc),
+ IGB_STAT("tx_flow_control_xon", stats.xontxc),
+ IGB_STAT("tx_flow_control_xoff", stats.xofftxc),
+ IGB_STAT("rx_long_byte_count", stats.gorc),
+ IGB_STAT("tx_dma_out_of_sync", stats.doosync),
+ IGB_STAT("tx_smbus", stats.mgptc),
+ IGB_STAT("rx_smbus", stats.mgprc),
+ IGB_STAT("dropped_smbus", stats.mgpdc),
+ IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
+ IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
+ IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
+ IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
+};
+
+#define IGB_NETDEV_STAT(_net_stat) { \
+ .stat_string = __stringify(_net_stat), \
+ .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \
+ .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \
+}
+static const struct igb_stats igb_gstrings_net_stats[] = {
+ IGB_NETDEV_STAT(rx_errors),
+ IGB_NETDEV_STAT(tx_errors),
+ IGB_NETDEV_STAT(tx_dropped),
+ IGB_NETDEV_STAT(rx_length_errors),
+ IGB_NETDEV_STAT(rx_over_errors),
+ IGB_NETDEV_STAT(rx_frame_errors),
+ IGB_NETDEV_STAT(rx_fifo_errors),
+ IGB_NETDEV_STAT(tx_fifo_errors),
+ IGB_NETDEV_STAT(tx_heartbeat_errors)
+};
+
+#define IGB_GLOBAL_STATS_LEN \
+ (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats))
+#define IGB_NETDEV_STATS_LEN \
+ (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats))
+#define IGB_RX_QUEUE_STATS_LEN \
+ (sizeof(struct igb_rx_queue_stats) / sizeof(u64))
+
+#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */
+
+#define IGB_QUEUE_STATS_LEN \
+ ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
+ IGB_RX_QUEUE_STATS_LEN) + \
+ (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \
+ IGB_TX_QUEUE_STATS_LEN))
+#define IGB_STATS_LEN \
+ (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN)
+
+static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)", "Eeprom test (offline)",
+ "Interrupt test (offline)", "Loopback test (offline)",
+ "Link test (on/offline)"
+};
+#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
+
+static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 status;
+
+ if (hw->phy.media_type == e1000_media_type_copper) {
+
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full|
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
+ ecmd->advertising = ADVERTISED_TP;
+
+ if (hw->mac.autoneg == 1) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ /* the e1000 autoneg seems to match ethtool nicely */
+ ecmd->advertising |= hw->phy.autoneg_advertised;
+ }
+
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = hw->phy.addr;
+ } else {
+ ecmd->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg);
+
+ ecmd->advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg);
+
+ ecmd->port = PORT_FIBRE;
+ }
+
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ status = rd32(E1000_STATUS);
+
+ if (status & E1000_STATUS_LU) {
+
+ if ((status & E1000_STATUS_SPEED_1000) ||
+ hw->phy.media_type != e1000_media_type_copper)
+ ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ else if (status & E1000_STATUS_SPEED_100)
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
+ else
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
+
+ if ((status & E1000_STATUS_FD) ||
+ hw->phy.media_type != e1000_media_type_copper)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ } else {
+ ethtool_cmd_speed_set(ecmd, -1);
+ ecmd->duplex = -1;
+ }
+
+ ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ return 0;
+}
+
+static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* When SoL/IDER sessions are active, autoneg/speed/duplex
+ * cannot be changed */
+ if (igb_check_reset_block(hw)) {
+ dev_err(&adapter->pdev->dev, "Cannot change link "
+ "characteristics when SoL/IDER is active.\n");
+ return -EINVAL;
+ }
+
+ while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+ msleep(1);
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ hw->mac.autoneg = 1;
+ hw->phy.autoneg_advertised = ecmd->advertising |
+ ADVERTISED_TP |
+ ADVERTISED_Autoneg;
+ ecmd->advertising = hw->phy.autoneg_advertised;
+ if (adapter->fc_autoneg)
+ hw->fc.requested_mode = e1000_fc_default;
+ } else {
+ u32 speed = ethtool_cmd_speed(ecmd);
+ if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) {
+ clear_bit(__IGB_RESETTING, &adapter->state);
+ return -EINVAL;
+ }
+ }
+
+ /* reset the link */
+ if (netif_running(adapter->netdev)) {
+ igb_down(adapter);
+ igb_up(adapter);
+ } else
+ igb_reset(adapter);
+
+ clear_bit(__IGB_RESETTING, &adapter->state);
+ return 0;
+}
+
+static u32 igb_get_link(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+
+ /*
+ * If the link is not reported up to netdev, interrupts are disabled,
+ * and so the physical link state may have changed since we last
+ * looked. Set get_link_status to make sure that the true link
+ * state is interrogated, rather than pulling a cached and possibly
+ * stale link state from the driver.
+ */
+ if (!netif_carrier_ok(netdev))
+ mac->get_link_status = 1;
+
+ return igb_has_link(adapter);
+}
+
+static void igb_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ pause->autoneg =
+ (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ if (hw->fc.current_mode == e1000_fc_rx_pause)
+ pause->rx_pause = 1;
+ else if (hw->fc.current_mode == e1000_fc_tx_pause)
+ pause->tx_pause = 1;
+ else if (hw->fc.current_mode == e1000_fc_full) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ }
+}
+
+static int igb_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int retval = 0;
+
+ adapter->fc_autoneg = pause->autoneg;
+
+ while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+ msleep(1);
+
+ if (adapter->fc_autoneg == AUTONEG_ENABLE) {
+ hw->fc.requested_mode = e1000_fc_default;
+ if (netif_running(adapter->netdev)) {
+ igb_down(adapter);
+ igb_up(adapter);
+ } else {
+ igb_reset(adapter);
+ }
+ } else {
+ if (pause->rx_pause && pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_full;
+ else if (pause->rx_pause && !pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_rx_pause;
+ else if (!pause->rx_pause && pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_tx_pause;
+ else if (!pause->rx_pause && !pause->tx_pause)
+ hw->fc.requested_mode = e1000_fc_none;
+
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ retval = ((hw->phy.media_type == e1000_media_type_copper) ?
+ igb_force_mac_fc(hw) : igb_setup_link(hw));
+ }
+
+ clear_bit(__IGB_RESETTING, &adapter->state);
+ return retval;
+}
+
+static u32 igb_get_msglevel(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void igb_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+static int igb_get_regs_len(struct net_device *netdev)
+{
+#define IGB_REGS_LEN 551
+ return IGB_REGS_LEN * sizeof(u32);
+}
+
+static void igb_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u8 i;
+
+ memset(p, 0, IGB_REGS_LEN * sizeof(u32));
+
+ regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+
+ /* General Registers */
+ regs_buff[0] = rd32(E1000_CTRL);
+ regs_buff[1] = rd32(E1000_STATUS);
+ regs_buff[2] = rd32(E1000_CTRL_EXT);
+ regs_buff[3] = rd32(E1000_MDIC);
+ regs_buff[4] = rd32(E1000_SCTL);
+ regs_buff[5] = rd32(E1000_CONNSW);
+ regs_buff[6] = rd32(E1000_VET);
+ regs_buff[7] = rd32(E1000_LEDCTL);
+ regs_buff[8] = rd32(E1000_PBA);
+ regs_buff[9] = rd32(E1000_PBS);
+ regs_buff[10] = rd32(E1000_FRTIMER);
+ regs_buff[11] = rd32(E1000_TCPTIMER);
+
+ /* NVM Register */
+ regs_buff[12] = rd32(E1000_EECD);
+
+ /* Interrupt */
+ /* Reading EICS for EICR because they read the
+ * same but EICS does not clear on read */
+ regs_buff[13] = rd32(E1000_EICS);
+ regs_buff[14] = rd32(E1000_EICS);
+ regs_buff[15] = rd32(E1000_EIMS);
+ regs_buff[16] = rd32(E1000_EIMC);
+ regs_buff[17] = rd32(E1000_EIAC);
+ regs_buff[18] = rd32(E1000_EIAM);
+ /* Reading ICS for ICR because they read the
+ * same but ICS does not clear on read */
+ regs_buff[19] = rd32(E1000_ICS);
+ regs_buff[20] = rd32(E1000_ICS);
+ regs_buff[21] = rd32(E1000_IMS);
+ regs_buff[22] = rd32(E1000_IMC);
+ regs_buff[23] = rd32(E1000_IAC);
+ regs_buff[24] = rd32(E1000_IAM);
+ regs_buff[25] = rd32(E1000_IMIRVP);
+
+ /* Flow Control */
+ regs_buff[26] = rd32(E1000_FCAL);
+ regs_buff[27] = rd32(E1000_FCAH);
+ regs_buff[28] = rd32(E1000_FCTTV);
+ regs_buff[29] = rd32(E1000_FCRTL);
+ regs_buff[30] = rd32(E1000_FCRTH);
+ regs_buff[31] = rd32(E1000_FCRTV);
+
+ /* Receive */
+ regs_buff[32] = rd32(E1000_RCTL);
+ regs_buff[33] = rd32(E1000_RXCSUM);
+ regs_buff[34] = rd32(E1000_RLPML);
+ regs_buff[35] = rd32(E1000_RFCTL);
+ regs_buff[36] = rd32(E1000_MRQC);
+ regs_buff[37] = rd32(E1000_VT_CTL);
+
+ /* Transmit */
+ regs_buff[38] = rd32(E1000_TCTL);
+ regs_buff[39] = rd32(E1000_TCTL_EXT);
+ regs_buff[40] = rd32(E1000_TIPG);
+ regs_buff[41] = rd32(E1000_DTXCTL);
+
+ /* Wake Up */
+ regs_buff[42] = rd32(E1000_WUC);
+ regs_buff[43] = rd32(E1000_WUFC);
+ regs_buff[44] = rd32(E1000_WUS);
+ regs_buff[45] = rd32(E1000_IPAV);
+ regs_buff[46] = rd32(E1000_WUPL);
+
+ /* MAC */
+ regs_buff[47] = rd32(E1000_PCS_CFG0);
+ regs_buff[48] = rd32(E1000_PCS_LCTL);
+ regs_buff[49] = rd32(E1000_PCS_LSTAT);
+ regs_buff[50] = rd32(E1000_PCS_ANADV);
+ regs_buff[51] = rd32(E1000_PCS_LPAB);
+ regs_buff[52] = rd32(E1000_PCS_NPTX);
+ regs_buff[53] = rd32(E1000_PCS_LPABNP);
+
+ /* Statistics */
+ regs_buff[54] = adapter->stats.crcerrs;
+ regs_buff[55] = adapter->stats.algnerrc;
+ regs_buff[56] = adapter->stats.symerrs;
+ regs_buff[57] = adapter->stats.rxerrc;
+ regs_buff[58] = adapter->stats.mpc;
+ regs_buff[59] = adapter->stats.scc;
+ regs_buff[60] = adapter->stats.ecol;
+ regs_buff[61] = adapter->stats.mcc;
+ regs_buff[62] = adapter->stats.latecol;
+ regs_buff[63] = adapter->stats.colc;
+ regs_buff[64] = adapter->stats.dc;
+ regs_buff[65] = adapter->stats.tncrs;
+ regs_buff[66] = adapter->stats.sec;
+ regs_buff[67] = adapter->stats.htdpmc;
+ regs_buff[68] = adapter->stats.rlec;
+ regs_buff[69] = adapter->stats.xonrxc;
+ regs_buff[70] = adapter->stats.xontxc;
+ regs_buff[71] = adapter->stats.xoffrxc;
+ regs_buff[72] = adapter->stats.xofftxc;
+ regs_buff[73] = adapter->stats.fcruc;
+ regs_buff[74] = adapter->stats.prc64;
+ regs_buff[75] = adapter->stats.prc127;
+ regs_buff[76] = adapter->stats.prc255;
+ regs_buff[77] = adapter->stats.prc511;
+ regs_buff[78] = adapter->stats.prc1023;
+ regs_buff[79] = adapter->stats.prc1522;
+ regs_buff[80] = adapter->stats.gprc;
+ regs_buff[81] = adapter->stats.bprc;
+ regs_buff[82] = adapter->stats.mprc;
+ regs_buff[83] = adapter->stats.gptc;
+ regs_buff[84] = adapter->stats.gorc;
+ regs_buff[86] = adapter->stats.gotc;
+ regs_buff[88] = adapter->stats.rnbc;
+ regs_buff[89] = adapter->stats.ruc;
+ regs_buff[90] = adapter->stats.rfc;
+ regs_buff[91] = adapter->stats.roc;
+ regs_buff[92] = adapter->stats.rjc;
+ regs_buff[93] = adapter->stats.mgprc;
+ regs_buff[94] = adapter->stats.mgpdc;
+ regs_buff[95] = adapter->stats.mgptc;
+ regs_buff[96] = adapter->stats.tor;
+ regs_buff[98] = adapter->stats.tot;
+ regs_buff[100] = adapter->stats.tpr;
+ regs_buff[101] = adapter->stats.tpt;
+ regs_buff[102] = adapter->stats.ptc64;
+ regs_buff[103] = adapter->stats.ptc127;
+ regs_buff[104] = adapter->stats.ptc255;
+ regs_buff[105] = adapter->stats.ptc511;
+ regs_buff[106] = adapter->stats.ptc1023;
+ regs_buff[107] = adapter->stats.ptc1522;
+ regs_buff[108] = adapter->stats.mptc;
+ regs_buff[109] = adapter->stats.bptc;
+ regs_buff[110] = adapter->stats.tsctc;
+ regs_buff[111] = adapter->stats.iac;
+ regs_buff[112] = adapter->stats.rpthc;
+ regs_buff[113] = adapter->stats.hgptc;
+ regs_buff[114] = adapter->stats.hgorc;
+ regs_buff[116] = adapter->stats.hgotc;
+ regs_buff[118] = adapter->stats.lenerrs;
+ regs_buff[119] = adapter->stats.scvpc;
+ regs_buff[120] = adapter->stats.hrmpc;
+
+ for (i = 0; i < 4; i++)
+ regs_buff[121 + i] = rd32(E1000_SRRCTL(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[125 + i] = rd32(E1000_PSRTYPE(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[129 + i] = rd32(E1000_RDBAL(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[133 + i] = rd32(E1000_RDBAH(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[137 + i] = rd32(E1000_RDLEN(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[141 + i] = rd32(E1000_RDH(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[145 + i] = rd32(E1000_RDT(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[149 + i] = rd32(E1000_RXDCTL(i));
+
+ for (i = 0; i < 10; i++)
+ regs_buff[153 + i] = rd32(E1000_EITR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[163 + i] = rd32(E1000_IMIR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[171 + i] = rd32(E1000_IMIREXT(i));
+ for (i = 0; i < 16; i++)
+ regs_buff[179 + i] = rd32(E1000_RAL(i));
+ for (i = 0; i < 16; i++)
+ regs_buff[195 + i] = rd32(E1000_RAH(i));
+
+ for (i = 0; i < 4; i++)
+ regs_buff[211 + i] = rd32(E1000_TDBAL(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[215 + i] = rd32(E1000_TDBAH(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[219 + i] = rd32(E1000_TDLEN(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[223 + i] = rd32(E1000_TDH(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[227 + i] = rd32(E1000_TDT(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[231 + i] = rd32(E1000_TXDCTL(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[235 + i] = rd32(E1000_TDWBAL(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[239 + i] = rd32(E1000_TDWBAH(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i));
+
+ for (i = 0; i < 4; i++)
+ regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i));
+ for (i = 0; i < 32; i++)
+ regs_buff[255 + i] = rd32(E1000_WUPM_REG(i));
+ for (i = 0; i < 128; i++)
+ regs_buff[287 + i] = rd32(E1000_FFMT_REG(i));
+ for (i = 0; i < 128; i++)
+ regs_buff[415 + i] = rd32(E1000_FFVT_REG(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[543 + i] = rd32(E1000_FFLT_REG(i));
+
+ regs_buff[547] = rd32(E1000_TDFH);
+ regs_buff[548] = rd32(E1000_TDFT);
+ regs_buff[549] = rd32(E1000_TDFHS);
+ regs_buff[550] = rd32(E1000_TDFPC);
+ regs_buff[551] = adapter->stats.o2bgptc;
+ regs_buff[552] = adapter->stats.b2ospc;
+ regs_buff[553] = adapter->stats.o2bspc;
+ regs_buff[554] = adapter->stats.b2ogprc;
+}
+
+static int igb_get_eeprom_len(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ return adapter->hw.nvm.word_size * 2;
+}
+
+static int igb_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ int first_word, last_word;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) *
+ (last_word - first_word + 1), GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ if (hw->nvm.type == e1000_nvm_eeprom_spi)
+ ret_val = hw->nvm.ops.read(hw, first_word,
+ last_word - first_word + 1,
+ eeprom_buff);
+ else {
+ for (i = 0; i < last_word - first_word + 1; i++) {
+ ret_val = hw->nvm.ops.read(hw, first_word + i, 1,
+ &eeprom_buff[i]);
+ if (ret_val)
+ break;
+ }
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
+ eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int igb_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ void *ptr;
+ int max_len, first_word, last_word, ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EOPNOTSUPP;
+
+ if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+ return -EFAULT;
+
+ max_len = hw->nvm.word_size * 2;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = (void *)eeprom_buff;
+
+ if (eeprom->offset & 1) {
+ /* need read/modify/write of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ ret_val = hw->nvm.ops.read(hw, first_word, 1,
+ &eeprom_buff[0]);
+ ptr++;
+ }
+ if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
+ /* need read/modify/write of last changed EEPROM word */
+ /* only the first byte of the word is being modified */
+ ret_val = hw->nvm.ops.read(hw, last_word, 1,
+ &eeprom_buff[last_word - first_word]);
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+
+ ret_val = hw->nvm.ops.write(hw, first_word,
+ last_word - first_word + 1, eeprom_buff);
+
+ /* Update the checksum over the first part of the EEPROM if needed
+ * and flush shadow RAM for 82573 controllers */
+ if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
+ hw->nvm.ops.update(hw);
+
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
+static void igb_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ char firmware_version[32];
+ u16 eeprom_data;
+
+ strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1);
+ strncpy(drvinfo->version, igb_driver_version,
+ sizeof(drvinfo->version) - 1);
+
+ /* EEPROM image version # is reported as firmware version # for
+ * 82575 controllers */
+ adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data);
+ sprintf(firmware_version, "%d.%d-%d",
+ (eeprom_data & 0xF000) >> 12,
+ (eeprom_data & 0x0FF0) >> 4,
+ eeprom_data & 0x000F);
+
+ strncpy(drvinfo->fw_version, firmware_version,
+ sizeof(drvinfo->fw_version) - 1);
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info) - 1);
+ drvinfo->n_stats = IGB_STATS_LEN;
+ drvinfo->testinfo_len = IGB_TEST_LEN;
+ drvinfo->regdump_len = igb_get_regs_len(netdev);
+ drvinfo->eedump_len = igb_get_eeprom_len(netdev);
+}
+
+static void igb_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ ring->rx_max_pending = IGB_MAX_RXD;
+ ring->tx_max_pending = IGB_MAX_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = adapter->rx_ring_count;
+ ring->tx_pending = adapter->tx_ring_count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int igb_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct igb_ring *temp_ring;
+ int i, err = 0;
+ u16 new_rx_count, new_tx_count;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD);
+ new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD);
+ new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD);
+ new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD);
+ new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ if ((new_tx_count == adapter->tx_ring_count) &&
+ (new_rx_count == adapter->rx_ring_count)) {
+ /* nothing to do */
+ return 0;
+ }
+
+ while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+ msleep(1);
+
+ if (!netif_running(adapter->netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i]->count = new_tx_count;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->count = new_rx_count;
+ adapter->tx_ring_count = new_tx_count;
+ adapter->rx_ring_count = new_rx_count;
+ goto clear_reset;
+ }
+
+ if (adapter->num_tx_queues > adapter->num_rx_queues)
+ temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
+ else
+ temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
+
+ if (!temp_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
+
+ igb_down(adapter);
+
+ /*
+ * We can't just free everything and then setup again,
+ * because the ISRs in MSI-X mode get passed pointers
+ * to the tx and rx ring structs.
+ */
+ if (new_tx_count != adapter->tx_ring_count) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ memcpy(&temp_ring[i], adapter->tx_ring[i],
+ sizeof(struct igb_ring));
+
+ temp_ring[i].count = new_tx_count;
+ err = igb_setup_tx_resources(&temp_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ igb_free_tx_resources(&temp_ring[i]);
+ }
+ goto err_setup;
+ }
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ igb_free_tx_resources(adapter->tx_ring[i]);
+
+ memcpy(adapter->tx_ring[i], &temp_ring[i],
+ sizeof(struct igb_ring));
+ }
+
+ adapter->tx_ring_count = new_tx_count;
+ }
+
+ if (new_rx_count != adapter->rx_ring_count) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ memcpy(&temp_ring[i], adapter->rx_ring[i],
+ sizeof(struct igb_ring));
+
+ temp_ring[i].count = new_rx_count;
+ err = igb_setup_rx_resources(&temp_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ igb_free_rx_resources(&temp_ring[i]);
+ }
+ goto err_setup;
+ }
+
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ igb_free_rx_resources(adapter->rx_ring[i]);
+
+ memcpy(adapter->rx_ring[i], &temp_ring[i],
+ sizeof(struct igb_ring));
+ }
+
+ adapter->rx_ring_count = new_rx_count;
+ }
+err_setup:
+ igb_up(adapter);
+ vfree(temp_ring);
+clear_reset:
+ clear_bit(__IGB_RESETTING, &adapter->state);
+ return err;
+}
+
+/* ethtool register test data */
+struct igb_reg_test {
+ u16 reg;
+ u16 reg_offset;
+ u16 array_len;
+ u16 test_type;
+ u32 mask;
+ u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x100 bytes apart, or in contiguous tables. We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST 1
+#define SET_READ_TEST 2
+#define WRITE_NO_TEST 3
+#define TABLE32_TEST 4
+#define TABLE64_TEST_LO 5
+#define TABLE64_TEST_HI 6
+
+/* i350 reg test */
+static struct igb_reg_test reg_test_i350[] = {
+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 },
+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ /* RDH is read-only for i350, only test RDT. */
+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RA, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_HI,
+ 0xC3FFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 16, TABLE64_TEST_HI,
+ 0xC3FFFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+/* 82580 reg test */
+static struct igb_reg_test reg_test_82580[] = {
+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ /* RDH is read-only for 82580, only test RDT. */
+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RA, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_HI,
+ 0x83FFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 8, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 8, TABLE64_TEST_HI,
+ 0x83FFFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+/* 82576 reg test */
+static struct igb_reg_test reg_test_82576[] = {
+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ /* Enable all RX queues before testing. */
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ /* RDH is read-only for 82576, only test RDT. */
+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
+ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+/* 82575 register test */
+static struct igb_reg_test reg_test_82575[] = {
+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ /* Enable all four RX queues before testing. */
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ /* RDH is read-only for 82575, only test RDT. */
+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
+ int reg, u32 mask, u32 write)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 pat, val;
+ static const u32 _test[] =
+ {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
+ wr32(reg, (_test[pat] & write));
+ val = rd32(reg) & mask;
+ if (val != (_test[pat] & write & mask)) {
+ dev_err(&adapter->pdev->dev, "pattern test reg %04X "
+ "failed: got 0x%08X expected 0x%08X\n",
+ reg, val, (_test[pat] & write & mask));
+ *data = reg;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
+ int reg, u32 mask, u32 write)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 val;
+ wr32(reg, write & mask);
+ val = rd32(reg);
+ if ((write & mask) != (val & mask)) {
+ dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:"
+ " got 0x%08X expected 0x%08X\n", reg,
+ (val & mask), (write & mask));
+ *data = reg;
+ return 1;
+ }
+
+ return 0;
+}
+
+#define REG_PATTERN_TEST(reg, mask, write) \
+ do { \
+ if (reg_pattern_test(adapter, data, reg, mask, write)) \
+ return 1; \
+ } while (0)
+
+#define REG_SET_AND_CHECK(reg, mask, write) \
+ do { \
+ if (reg_set_and_check(adapter, data, reg, mask, write)) \
+ return 1; \
+ } while (0)
+
+static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct igb_reg_test *test;
+ u32 value, before, after;
+ u32 i, toggle;
+
+ switch (adapter->hw.mac.type) {
+ case e1000_i350:
+ test = reg_test_i350;
+ toggle = 0x7FEFF3FF;
+ break;
+ case e1000_82580:
+ test = reg_test_82580;
+ toggle = 0x7FEFF3FF;
+ break;
+ case e1000_82576:
+ test = reg_test_82576;
+ toggle = 0x7FFFF3FF;
+ break;
+ default:
+ test = reg_test_82575;
+ toggle = 0x7FFFF3FF;
+ break;
+ }
+
+ /* Because the status register is such a special case,
+ * we handle it separately from the rest of the register
+ * tests. Some bits are read-only, some toggle, and some
+ * are writable on newer MACs.
+ */
+ before = rd32(E1000_STATUS);
+ value = (rd32(E1000_STATUS) & toggle);
+ wr32(E1000_STATUS, toggle);
+ after = rd32(E1000_STATUS) & toggle;
+ if (value != after) {
+ dev_err(&adapter->pdev->dev, "failed STATUS register test "
+ "got: 0x%08X expected: 0x%08X\n", after, value);
+ *data = 1;
+ return 1;
+ }
+ /* restore previous status */
+ wr32(E1000_STATUS, before);
+
+ /* Perform the remainder of the register test, looping through
+ * the test table until we either fail or reach the null entry.
+ */
+ while (test->reg) {
+ for (i = 0; i < test->array_len; i++) {
+ switch (test->test_type) {
+ case PATTERN_TEST:
+ REG_PATTERN_TEST(test->reg +
+ (i * test->reg_offset),
+ test->mask,
+ test->write);
+ break;
+ case SET_READ_TEST:
+ REG_SET_AND_CHECK(test->reg +
+ (i * test->reg_offset),
+ test->mask,
+ test->write);
+ break;
+ case WRITE_NO_TEST:
+ writel(test->write,
+ (adapter->hw.hw_addr + test->reg)
+ + (i * test->reg_offset));
+ break;
+ case TABLE32_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 4),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_LO:
+ REG_PATTERN_TEST(test->reg + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_HI:
+ REG_PATTERN_TEST((test->reg + 4) + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ }
+ }
+ test++;
+ }
+
+ *data = 0;
+ return 0;
+}
+
+static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
+{
+ u16 temp;
+ u16 checksum = 0;
+ u16 i;
+
+ *data = 0;
+ /* Read and add up the contents of the EEPROM */
+ for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+ if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) < 0) {
+ *data = 1;
+ break;
+ }
+ checksum += temp;
+ }
+
+ /* If Checksum is not Correct return error else test passed */
+ if ((checksum != (u16) NVM_SUM) && !(*data))
+ *data = 2;
+
+ return *data;
+}
+
+static irqreturn_t igb_test_intr(int irq, void *data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *) data;
+ struct e1000_hw *hw = &adapter->hw;
+
+ adapter->test_icr |= rd32(E1000_ICR);
+
+ return IRQ_HANDLED;
+}
+
+static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 mask, ics_mask, i = 0, shared_int = true;
+ u32 irq = adapter->pdev->irq;
+
+ *data = 0;
+
+ /* Hook up test interrupt handler just for this test */
+ if (adapter->msix_entries) {
+ if (request_irq(adapter->msix_entries[0].vector,
+ igb_test_intr, 0, netdev->name, adapter)) {
+ *data = 1;
+ return -1;
+ }
+ } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
+ shared_int = false;
+ if (request_irq(irq,
+ igb_test_intr, 0, netdev->name, adapter)) {
+ *data = 1;
+ return -1;
+ }
+ } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED,
+ netdev->name, adapter)) {
+ shared_int = false;
+ } else if (request_irq(irq, igb_test_intr, IRQF_SHARED,
+ netdev->name, adapter)) {
+ *data = 1;
+ return -1;
+ }
+ dev_info(&adapter->pdev->dev, "testing %s interrupt\n",
+ (shared_int ? "shared" : "unshared"));
+
+ /* Disable all the interrupts */
+ wr32(E1000_IMC, ~0);
+ wrfl();
+ msleep(10);
+
+ /* Define all writable bits for ICS */
+ switch (hw->mac.type) {
+ case e1000_82575:
+ ics_mask = 0x37F47EDD;
+ break;
+ case e1000_82576:
+ ics_mask = 0x77D4FBFD;
+ break;
+ case e1000_82580:
+ ics_mask = 0x77DCFED5;
+ break;
+ case e1000_i350:
+ ics_mask = 0x77DCFED5;
+ break;
+ default:
+ ics_mask = 0x7FFFFFFF;
+ break;
+ }
+
+ /* Test each interrupt */
+ for (; i < 31; i++) {
+ /* Interrupt to test */
+ mask = 1 << i;
+
+ if (!(mask & ics_mask))
+ continue;
+
+ if (!shared_int) {
+ /* Disable the interrupt to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+
+ /* Flush any pending interrupts */
+ wr32(E1000_ICR, ~0);
+
+ wr32(E1000_IMC, mask);
+ wr32(E1000_ICS, mask);
+ wrfl();
+ msleep(10);
+
+ if (adapter->test_icr & mask) {
+ *data = 3;
+ break;
+ }
+ }
+
+ /* Enable the interrupt to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was not posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+
+ /* Flush any pending interrupts */
+ wr32(E1000_ICR, ~0);
+
+ wr32(E1000_IMS, mask);
+ wr32(E1000_ICS, mask);
+ wrfl();
+ msleep(10);
+
+ if (!(adapter->test_icr & mask)) {
+ *data = 4;
+ break;
+ }
+
+ if (!shared_int) {
+ /* Disable the other interrupts to be reported in
+ * the cause register and then force the other
+ * interrupts and see if any get posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+
+ /* Flush any pending interrupts */
+ wr32(E1000_ICR, ~0);
+
+ wr32(E1000_IMC, ~mask);
+ wr32(E1000_ICS, ~mask);
+ wrfl();
+ msleep(10);
+
+ if (adapter->test_icr & mask) {
+ *data = 5;
+ break;
+ }
+ }
+ }
+
+ /* Disable all the interrupts */
+ wr32(E1000_IMC, ~0);
+ wrfl();
+ msleep(10);
+
+ /* Unhook test interrupt handler */
+ if (adapter->msix_entries)
+ free_irq(adapter->msix_entries[0].vector, adapter);
+ else
+ free_irq(irq, adapter);
+
+ return *data;
+}
+
+static void igb_free_desc_rings(struct igb_adapter *adapter)
+{
+ igb_free_tx_resources(&adapter->test_tx_ring);
+ igb_free_rx_resources(&adapter->test_rx_ring);
+}
+
+static int igb_setup_desc_rings(struct igb_adapter *adapter)
+{
+ struct igb_ring *tx_ring = &adapter->test_tx_ring;
+ struct igb_ring *rx_ring = &adapter->test_rx_ring;
+ struct e1000_hw *hw = &adapter->hw;
+ int ret_val;
+
+ /* Setup Tx descriptor ring and Tx buffers */
+ tx_ring->count = IGB_DEFAULT_TXD;
+ tx_ring->dev = &adapter->pdev->dev;
+ tx_ring->netdev = adapter->netdev;
+ tx_ring->reg_idx = adapter->vfs_allocated_count;
+
+ if (igb_setup_tx_resources(tx_ring)) {
+ ret_val = 1;
+ goto err_nomem;
+ }
+
+ igb_setup_tctl(adapter);
+ igb_configure_tx_ring(adapter, tx_ring);
+
+ /* Setup Rx descriptor ring and Rx buffers */
+ rx_ring->count = IGB_DEFAULT_RXD;
+ rx_ring->dev = &adapter->pdev->dev;
+ rx_ring->netdev = adapter->netdev;
+ rx_ring->reg_idx = adapter->vfs_allocated_count;
+
+ if (igb_setup_rx_resources(rx_ring)) {
+ ret_val = 3;
+ goto err_nomem;
+ }
+
+ /* set the default queue to queue 0 of PF */
+ wr32(E1000_MRQC, adapter->vfs_allocated_count << 3);
+
+ /* enable receive ring */
+ igb_setup_rctl(adapter);
+ igb_configure_rx_ring(adapter, rx_ring);
+
+ igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring));
+
+ return 0;
+
+err_nomem:
+ igb_free_desc_rings(adapter);
+ return ret_val;
+}
+
+static void igb_phy_disable_receiver(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Write out to PHY registers 29 and 30 to disable the Receiver. */
+ igb_write_phy_reg(hw, 29, 0x001F);
+ igb_write_phy_reg(hw, 30, 0x8FFC);
+ igb_write_phy_reg(hw, 29, 0x001A);
+ igb_write_phy_reg(hw, 30, 0x8FF0);
+}
+
+static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_reg = 0;
+
+ hw->mac.autoneg = false;
+
+ if (hw->phy.type == e1000_phy_m88) {
+ /* Auto-MDI/MDIX Off */
+ igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
+ /* reset to update Auto-MDI/MDIX */
+ igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
+ /* autoneg off */
+ igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
+ } else if (hw->phy.type == e1000_phy_82580) {
+ /* enable MII loopback */
+ igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
+ }
+
+ ctrl_reg = rd32(E1000_CTRL);
+
+ /* force 1000, set loopback */
+ igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
+
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg = rd32(E1000_CTRL);
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
+ E1000_CTRL_FD | /* Force Duplex to FULL */
+ E1000_CTRL_SLU); /* Set link up enable bit */
+
+ if (hw->phy.type == e1000_phy_m88)
+ ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
+
+ wr32(E1000_CTRL, ctrl_reg);
+
+ /* Disable the receiver on the PHY so when a cable is plugged in, the
+ * PHY does not begin to autoneg when a cable is reconnected to the NIC.
+ */
+ if (hw->phy.type == e1000_phy_m88)
+ igb_phy_disable_receiver(adapter);
+
+ udelay(500);
+
+ return 0;
+}
+
+static int igb_set_phy_loopback(struct igb_adapter *adapter)
+{
+ return igb_integrated_phy_loopback(adapter);
+}
+
+static int igb_setup_loopback_test(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg;
+
+ reg = rd32(E1000_CTRL_EXT);
+
+ /* use CTRL_EXT to identify link type as SGMII can appear as copper */
+ if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
+ if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+
+ /* Enable DH89xxCC MPHY for near end loopback */
+ reg = rd32(E1000_MPHY_ADDR_CTL);
+ reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
+ E1000_MPHY_PCS_CLK_REG_OFFSET;
+ wr32(E1000_MPHY_ADDR_CTL, reg);
+
+ reg = rd32(E1000_MPHY_DATA);
+ reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
+ wr32(E1000_MPHY_DATA, reg);
+ }
+
+ reg = rd32(E1000_RCTL);
+ reg |= E1000_RCTL_LBM_TCVR;
+ wr32(E1000_RCTL, reg);
+
+ wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK);
+
+ reg = rd32(E1000_CTRL);
+ reg &= ~(E1000_CTRL_RFCE |
+ E1000_CTRL_TFCE |
+ E1000_CTRL_LRST);
+ reg |= E1000_CTRL_SLU |
+ E1000_CTRL_FD;
+ wr32(E1000_CTRL, reg);
+
+ /* Unset switch control to serdes energy detect */
+ reg = rd32(E1000_CONNSW);
+ reg &= ~E1000_CONNSW_ENRGSRC;
+ wr32(E1000_CONNSW, reg);
+
+ /* Set PCS register for forced speed */
+ reg = rd32(E1000_PCS_LCTL);
+ reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/
+ reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */
+ E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
+ E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */
+ E1000_PCS_LCTL_FSD | /* Force Speed */
+ E1000_PCS_LCTL_FORCE_LINK; /* Force Link */
+ wr32(E1000_PCS_LCTL, reg);
+
+ return 0;
+ }
+
+ return igb_set_phy_loopback(adapter);
+}
+
+static void igb_loopback_cleanup(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+ u16 phy_reg;
+
+ if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+ u32 reg;
+
+ /* Disable near end loopback on DH89xxCC */
+ reg = rd32(E1000_MPHY_ADDR_CTL);
+ reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
+ E1000_MPHY_PCS_CLK_REG_OFFSET;
+ wr32(E1000_MPHY_ADDR_CTL, reg);
+
+ reg = rd32(E1000_MPHY_DATA);
+ reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
+ wr32(E1000_MPHY_DATA, reg);
+ }
+
+ rctl = rd32(E1000_RCTL);
+ rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
+ wr32(E1000_RCTL, rctl);
+
+ hw->mac.autoneg = true;
+ igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
+ if (phy_reg & MII_CR_LOOPBACK) {
+ phy_reg &= ~MII_CR_LOOPBACK;
+ igb_write_phy_reg(hw, PHY_CONTROL, phy_reg);
+ igb_phy_sw_reset(hw);
+ }
+}
+
+static void igb_create_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ memset(skb->data, 0xFF, frame_size);
+ frame_size /= 2;
+ memset(&skb->data[frame_size], 0xAA, frame_size - 1);
+ memset(&skb->data[frame_size + 10], 0xBE, 1);
+ memset(&skb->data[frame_size + 12], 0xAF, 1);
+}
+
+static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
+{
+ frame_size /= 2;
+ if (*(skb->data + 3) == 0xFF) {
+ if ((*(skb->data + frame_size + 10) == 0xBE) &&
+ (*(skb->data + frame_size + 12) == 0xAF)) {
+ return 0;
+ }
+ }
+ return 13;
+}
+
+static int igb_clean_test_rings(struct igb_ring *rx_ring,
+ struct igb_ring *tx_ring,
+ unsigned int size)
+{
+ union e1000_adv_rx_desc *rx_desc;
+ struct igb_rx_buffer *rx_buffer_info;
+ struct igb_tx_buffer *tx_buffer_info;
+ int rx_ntc, tx_ntc, count = 0;
+ u32 staterr;
+
+ /* initialize next to clean and descriptor values */
+ rx_ntc = rx_ring->next_to_clean;
+ tx_ntc = tx_ring->next_to_clean;
+ rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ while (staterr & E1000_RXD_STAT_DD) {
+ /* check rx buffer */
+ rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
+
+ /* unmap rx buffer, will be remapped by alloc_rx_buffers */
+ dma_unmap_single(rx_ring->dev,
+ rx_buffer_info->dma,
+ IGB_RX_HDR_LEN,
+ DMA_FROM_DEVICE);
+ rx_buffer_info->dma = 0;
+
+ /* verify contents of skb */
+ if (!igb_check_lbtest_frame(rx_buffer_info->skb, size))
+ count++;
+
+ /* unmap buffer on tx side */
+ tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
+ igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+
+ /* increment rx/tx next to clean counters */
+ rx_ntc++;
+ if (rx_ntc == rx_ring->count)
+ rx_ntc = 0;
+ tx_ntc++;
+ if (tx_ntc == tx_ring->count)
+ tx_ntc = 0;
+
+ /* fetch next descriptor */
+ rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+
+ /* re-map buffers to ring, store next to clean values */
+ igb_alloc_rx_buffers(rx_ring, count);
+ rx_ring->next_to_clean = rx_ntc;
+ tx_ring->next_to_clean = tx_ntc;
+
+ return count;
+}
+
+static int igb_run_loopback_test(struct igb_adapter *adapter)
+{
+ struct igb_ring *tx_ring = &adapter->test_tx_ring;
+ struct igb_ring *rx_ring = &adapter->test_rx_ring;
+ int i, j, lc, good_cnt, ret_val = 0;
+ unsigned int size = IGB_RX_HDR_LEN;
+ netdev_tx_t tx_ret_val;
+ struct sk_buff *skb;
+
+ /* allocate test skb */
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return 11;
+
+ /* place data into test skb */
+ igb_create_lbtest_frame(skb, size);
+ skb_put(skb, size);
+
+ /*
+ * Calculate the loop count based on the largest descriptor ring
+ * The idea is to wrap the largest ring a number of times using 64
+ * send/receive pairs during each loop
+ */
+
+ if (rx_ring->count <= tx_ring->count)
+ lc = ((tx_ring->count / 64) * 2) + 1;
+ else
+ lc = ((rx_ring->count / 64) * 2) + 1;
+
+ for (j = 0; j <= lc; j++) { /* loop count loop */
+ /* reset count of good packets */
+ good_cnt = 0;
+
+ /* place 64 packets on the transmit queue*/
+ for (i = 0; i < 64; i++) {
+ skb_get(skb);
+ tx_ret_val = igb_xmit_frame_ring(skb, tx_ring);
+ if (tx_ret_val == NETDEV_TX_OK)
+ good_cnt++;
+ }
+
+ if (good_cnt != 64) {
+ ret_val = 12;
+ break;
+ }
+
+ /* allow 200 milliseconds for packets to go from tx to rx */
+ msleep(200);
+
+ good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
+ if (good_cnt != 64) {
+ ret_val = 13;
+ break;
+ }
+ } /* end loop count loop */
+
+ /* free the original skb */
+ kfree_skb(skb);
+
+ return ret_val;
+}
+
+static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
+{
+ /* PHY loopback cannot be performed if SoL/IDER
+ * sessions are active */
+ if (igb_check_reset_block(&adapter->hw)) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot do PHY loopback test "
+ "when SoL/IDER is active.\n");
+ *data = 0;
+ goto out;
+ }
+ *data = igb_setup_desc_rings(adapter);
+ if (*data)
+ goto out;
+ *data = igb_setup_loopback_test(adapter);
+ if (*data)
+ goto err_loopback;
+ *data = igb_run_loopback_test(adapter);
+ igb_loopback_cleanup(adapter);
+
+err_loopback:
+ igb_free_desc_rings(adapter);
+out:
+ return *data;
+}
+
+static int igb_link_test(struct igb_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ *data = 0;
+ if (hw->phy.media_type == e1000_media_type_internal_serdes) {
+ int i = 0;
+ hw->mac.serdes_has_link = false;
+
+ /* On some blade server designs, link establishment
+ * could take as long as 2-3 minutes */
+ do {
+ hw->mac.ops.check_for_link(&adapter->hw);
+ if (hw->mac.serdes_has_link)
+ return *data;
+ msleep(20);
+ } while (i++ < 3750);
+
+ *data = 1;
+ } else {
+ hw->mac.ops.check_for_link(&adapter->hw);
+ if (hw->mac.autoneg)
+ msleep(4000);
+
+ if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
+ *data = 1;
+ }
+ return *data;
+}
+
+static void igb_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ u16 autoneg_advertised;
+ u8 forced_speed_duplex, autoneg;
+ bool if_running = netif_running(netdev);
+
+ set_bit(__IGB_TESTING, &adapter->state);
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ /* save speed, duplex, autoneg settings */
+ autoneg_advertised = adapter->hw.phy.autoneg_advertised;
+ forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
+ autoneg = adapter->hw.mac.autoneg;
+
+ dev_info(&adapter->pdev->dev, "offline testing starting\n");
+
+ /* power up link for link test */
+ igb_power_up_link(adapter);
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result */
+ if (igb_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+ else
+ igb_reset(adapter);
+
+ if (igb_reg_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ igb_reset(adapter);
+ if (igb_eeprom_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ igb_reset(adapter);
+ if (igb_intr_test(adapter, &data[2]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ igb_reset(adapter);
+ /* power up link for loopback test */
+ igb_power_up_link(adapter);
+ if (igb_loopback_test(adapter, &data[3]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* restore speed, duplex, autoneg settings */
+ adapter->hw.phy.autoneg_advertised = autoneg_advertised;
+ adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
+ adapter->hw.mac.autoneg = autoneg;
+
+ /* force this routine to wait until autoneg complete/timeout */
+ adapter->hw.phy.autoneg_wait_to_complete = true;
+ igb_reset(adapter);
+ adapter->hw.phy.autoneg_wait_to_complete = false;
+
+ clear_bit(__IGB_TESTING, &adapter->state);
+ if (if_running)
+ dev_open(netdev);
+ } else {
+ dev_info(&adapter->pdev->dev, "online testing starting\n");
+
+ /* PHY is powered down when interface is down */
+ if (if_running && igb_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ else
+ data[4] = 0;
+
+ /* Online tests aren't run; pass by default */
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 0;
+ data[3] = 0;
+
+ clear_bit(__IGB_TESTING, &adapter->state);
+ }
+ msleep_interruptible(4 * 1000);
+}
+
+static int igb_wol_exclusion(struct igb_adapter *adapter,
+ struct ethtool_wolinfo *wol)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int retval = 1; /* fail by default */
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82575GB_QUAD_COPPER:
+ /* WoL not supported */
+ wol->supported = 0;
+ break;
+ case E1000_DEV_ID_82575EB_FIBER_SERDES:
+ case E1000_DEV_ID_82576_FIBER:
+ case E1000_DEV_ID_82576_SERDES:
+ /* Wake events not supported on port B */
+ if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) {
+ wol->supported = 0;
+ break;
+ }
+ /* return success for non excluded adapter ports */
+ retval = 0;
+ break;
+ case E1000_DEV_ID_82576_QUAD_COPPER:
+ case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+ /* quad port adapters only support WoL on port A */
+ if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
+ wol->supported = 0;
+ break;
+ }
+ /* return success for non excluded adapter ports */
+ retval = 0;
+ break;
+ default:
+ /* dual port cards only support WoL on port A from now on
+ * unless it was enabled in the eeprom for port B
+ * so exclude FUNC_1 ports from having WoL enabled */
+ if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) &&
+ !adapter->eeprom_wol) {
+ wol->supported = 0;
+ break;
+ }
+
+ retval = 0;
+ }
+
+ return retval;
+}
+
+static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = WAKE_UCAST | WAKE_MCAST |
+ WAKE_BCAST | WAKE_MAGIC |
+ WAKE_PHY;
+ wol->wolopts = 0;
+
+ /* this function will set ->supported = 0 and return 1 if wol is not
+ * supported by this hardware */
+ if (igb_wol_exclusion(adapter, wol) ||
+ !device_can_wakeup(&adapter->pdev->dev))
+ return;
+
+ /* apply any specific unsupported masks here */
+ switch (adapter->hw.device_id) {
+ default:
+ break;
+ }
+
+ if (adapter->wol & E1000_WUFC_EX)
+ wol->wolopts |= WAKE_UCAST;
+ if (adapter->wol & E1000_WUFC_MC)
+ wol->wolopts |= WAKE_MCAST;
+ if (adapter->wol & E1000_WUFC_BC)
+ wol->wolopts |= WAKE_BCAST;
+ if (adapter->wol & E1000_WUFC_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+ if (adapter->wol & E1000_WUFC_LNKC)
+ wol->wolopts |= WAKE_PHY;
+}
+
+static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
+ return -EOPNOTSUPP;
+
+ if (igb_wol_exclusion(adapter, wol) ||
+ !device_can_wakeup(&adapter->pdev->dev))
+ return wol->wolopts ? -EOPNOTSUPP : 0;
+
+ /* these settings will always override what we currently have */
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wol |= E1000_WUFC_EX;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wol |= E1000_WUFC_MC;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wol |= E1000_WUFC_BC;
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wol |= E1000_WUFC_MAG;
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wol |= E1000_WUFC_LNKC;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
+/* bit defines for adapter->led_status */
+#define IGB_LED_ON 0
+
+static int igb_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ igb_blink_led(hw);
+ return 2;
+ case ETHTOOL_ID_ON:
+ igb_blink_led(hw);
+ break;
+ case ETHTOOL_ID_OFF:
+ igb_led_off(hw);
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ igb_led_off(hw);
+ clear_bit(IGB_LED_ON, &adapter->led_status);
+ igb_cleanup_led(hw);
+ break;
+ }
+
+ return 0;
+}
+
+static int igb_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
+ ((ec->rx_coalesce_usecs > 3) &&
+ (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
+ (ec->rx_coalesce_usecs == 2))
+ return -EINVAL;
+
+ if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
+ ((ec->tx_coalesce_usecs > 3) &&
+ (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
+ (ec->tx_coalesce_usecs == 2))
+ return -EINVAL;
+
+ if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
+ return -EINVAL;
+
+ /* If ITR is disabled, disable DMAC */
+ if (ec->rx_coalesce_usecs == 0) {
+ if (adapter->flags & IGB_FLAG_DMAC)
+ adapter->flags &= ~IGB_FLAG_DMAC;
+ }
+
+ /* convert to rate of irq's per second */
+ if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs;
+ else
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
+
+ /* convert to rate of irq's per second */
+ if (adapter->flags & IGB_FLAG_QUEUE_PAIRS)
+ adapter->tx_itr_setting = adapter->rx_itr_setting;
+ else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3)
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs;
+ else
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[i];
+ q_vector->tx_work_limit = adapter->tx_work_limit;
+ if (q_vector->rx_ring)
+ q_vector->itr_val = adapter->rx_itr_setting;
+ else
+ q_vector->itr_val = adapter->tx_itr_setting;
+ if (q_vector->itr_val && q_vector->itr_val <= 3)
+ q_vector->itr_val = IGB_START_ITR;
+ q_vector->set_itr = 1;
+ }
+
+ return 0;
+}
+
+static int igb_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->rx_itr_setting <= 3)
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting;
+ else
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
+
+ if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) {
+ if (adapter->tx_itr_setting <= 3)
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting;
+ else
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
+ }
+
+ return 0;
+}
+
+static int igb_nway_reset(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ if (netif_running(netdev))
+ igb_reinit_locked(adapter);
+ return 0;
+}
+
+static int igb_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return IGB_STATS_LEN;
+ case ETH_SS_TEST:
+ return IGB_TEST_LEN;
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static void igb_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct rtnl_link_stats64 *net_stats = &adapter->stats64;
+ unsigned int start;
+ struct igb_ring *ring;
+ int i, j;
+ char *p;
+
+ spin_lock(&adapter->stats64_lock);
+ igb_update_stats(adapter, net_stats);
+
+ for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
+ p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
+ data[i] = (igb_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) {
+ p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset;
+ data[i] = (igb_gstrings_net_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ u64 restart2;
+
+ ring = adapter->tx_ring[j];
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
+ data[i] = ring->tx_stats.packets;
+ data[i+1] = ring->tx_stats.bytes;
+ data[i+2] = ring->tx_stats.restart_queue;
+ } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->tx_syncp2);
+ restart2 = ring->tx_stats.restart_queue2;
+ } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start));
+ data[i+2] += restart2;
+
+ i += IGB_TX_QUEUE_STATS_LEN;
+ }
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+ ring = adapter->rx_ring[j];
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
+ data[i] = ring->rx_stats.packets;
+ data[i+1] = ring->rx_stats.bytes;
+ data[i+2] = ring->rx_stats.drops;
+ data[i+3] = ring->rx_stats.csum_err;
+ data[i+4] = ring->rx_stats.alloc_failed;
+ } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
+ i += IGB_RX_QUEUE_STATS_LEN;
+ }
+ spin_unlock(&adapter->stats64_lock);
+}
+
+static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *igb_gstrings_test,
+ IGB_TEST_LEN*ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, igb_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) {
+ memcpy(p, igb_gstrings_net_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ sprintf(p, "tx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_restart", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ sprintf(p, "rx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_drops", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_csum_err", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_alloc_failed", i);
+ p += ETH_GSTRING_LEN;
+ }
+/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
+ break;
+ }
+}
+
+static const struct ethtool_ops igb_ethtool_ops = {
+ .get_settings = igb_get_settings,
+ .set_settings = igb_set_settings,
+ .get_drvinfo = igb_get_drvinfo,
+ .get_regs_len = igb_get_regs_len,
+ .get_regs = igb_get_regs,
+ .get_wol = igb_get_wol,
+ .set_wol = igb_set_wol,
+ .get_msglevel = igb_get_msglevel,
+ .set_msglevel = igb_set_msglevel,
+ .nway_reset = igb_nway_reset,
+ .get_link = igb_get_link,
+ .get_eeprom_len = igb_get_eeprom_len,
+ .get_eeprom = igb_get_eeprom,
+ .set_eeprom = igb_set_eeprom,
+ .get_ringparam = igb_get_ringparam,
+ .set_ringparam = igb_set_ringparam,
+ .get_pauseparam = igb_get_pauseparam,
+ .set_pauseparam = igb_set_pauseparam,
+ .self_test = igb_diag_test,
+ .get_strings = igb_get_strings,
+ .set_phys_id = igb_set_phys_id,
+ .get_sset_count = igb_get_sset_count,
+ .get_ethtool_stats = igb_get_ethtool_stats,
+ .get_coalesce = igb_get_coalesce,
+ .set_coalesce = igb_set_coalesce,
+};
+
+void igb_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
new file mode 100644
index 000000000000..862dd7c0cc70
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -0,0 +1,6927 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/netdevice.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/net_tstamp.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/if_ether.h>
+#include <linux/aer.h>
+#include <linux/prefetch.h>
+#ifdef CONFIG_IGB_DCA
+#include <linux/dca.h>
+#endif
+#include "igb.h"
+
+#define MAJ 3
+#define MIN 0
+#define BUILD 6
+#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
+__stringify(BUILD) "-k"
+char igb_driver_name[] = "igb";
+char igb_driver_version[] = DRV_VERSION;
+static const char igb_driver_string[] =
+ "Intel(R) Gigabit Ethernet Network Driver";
+static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
+
+static const struct e1000_info *igb_info_tbl[] = {
+ [board_82575] = &e1000_82575_info,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
+ /* required last entry */
+ {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
+
+void igb_reset(struct igb_adapter *);
+static int igb_setup_all_tx_resources(struct igb_adapter *);
+static int igb_setup_all_rx_resources(struct igb_adapter *);
+static void igb_free_all_tx_resources(struct igb_adapter *);
+static void igb_free_all_rx_resources(struct igb_adapter *);
+static void igb_setup_mrqc(struct igb_adapter *);
+static int igb_probe(struct pci_dev *, const struct pci_device_id *);
+static void __devexit igb_remove(struct pci_dev *pdev);
+static void igb_init_hw_timer(struct igb_adapter *adapter);
+static int igb_sw_init(struct igb_adapter *);
+static int igb_open(struct net_device *);
+static int igb_close(struct net_device *);
+static void igb_configure_tx(struct igb_adapter *);
+static void igb_configure_rx(struct igb_adapter *);
+static void igb_clean_all_tx_rings(struct igb_adapter *);
+static void igb_clean_all_rx_rings(struct igb_adapter *);
+static void igb_clean_tx_ring(struct igb_ring *);
+static void igb_clean_rx_ring(struct igb_ring *);
+static void igb_set_rx_mode(struct net_device *);
+static void igb_update_phy_info(unsigned long);
+static void igb_watchdog(unsigned long);
+static void igb_watchdog_task(struct work_struct *);
+static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
+static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
+static int igb_change_mtu(struct net_device *, int);
+static int igb_set_mac(struct net_device *, void *);
+static void igb_set_uta(struct igb_adapter *adapter);
+static irqreturn_t igb_intr(int irq, void *);
+static irqreturn_t igb_intr_msi(int irq, void *);
+static irqreturn_t igb_msix_other(int irq, void *);
+static irqreturn_t igb_msix_ring(int irq, void *);
+#ifdef CONFIG_IGB_DCA
+static void igb_update_dca(struct igb_q_vector *);
+static void igb_setup_dca(struct igb_adapter *);
+#endif /* CONFIG_IGB_DCA */
+static int igb_poll(struct napi_struct *, int);
+static bool igb_clean_tx_irq(struct igb_q_vector *);
+static bool igb_clean_rx_irq(struct igb_q_vector *, int);
+static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
+static void igb_tx_timeout(struct net_device *);
+static void igb_reset_task(struct work_struct *);
+static void igb_vlan_mode(struct net_device *netdev, u32 features);
+static void igb_vlan_rx_add_vid(struct net_device *, u16);
+static void igb_vlan_rx_kill_vid(struct net_device *, u16);
+static void igb_restore_vlan(struct igb_adapter *);
+static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
+static void igb_ping_all_vfs(struct igb_adapter *);
+static void igb_msg_task(struct igb_adapter *);
+static void igb_vmm_control(struct igb_adapter *);
+static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
+static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
+static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
+static int igb_ndo_set_vf_vlan(struct net_device *netdev,
+ int vf, u16 vlan, u8 qos);
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
+ struct ifla_vf_info *ivi);
+static void igb_check_vf_rate_limit(struct igb_adapter *);
+
+#ifdef CONFIG_PM
+static int igb_suspend(struct pci_dev *, pm_message_t);
+static int igb_resume(struct pci_dev *);
+#endif
+static void igb_shutdown(struct pci_dev *);
+#ifdef CONFIG_IGB_DCA
+static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
+static struct notifier_block dca_notifier = {
+ .notifier_call = igb_notify_dca,
+ .next = NULL,
+ .priority = 0
+};
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* for netdump / net console */
+static void igb_netpoll(struct net_device *);
+#endif
+#ifdef CONFIG_PCI_IOV
+static unsigned int max_vfs = 0;
+module_param(max_vfs, uint, 0);
+MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
+ "per physical function");
+#endif /* CONFIG_PCI_IOV */
+
+static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
+ pci_channel_state_t);
+static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
+static void igb_io_resume(struct pci_dev *);
+
+static struct pci_error_handlers igb_err_handler = {
+ .error_detected = igb_io_error_detected,
+ .slot_reset = igb_io_slot_reset,
+ .resume = igb_io_resume,
+};
+
+
+static struct pci_driver igb_driver = {
+ .name = igb_driver_name,
+ .id_table = igb_pci_tbl,
+ .probe = igb_probe,
+ .remove = __devexit_p(igb_remove),
+#ifdef CONFIG_PM
+ /* Power Management Hooks */
+ .suspend = igb_suspend,
+ .resume = igb_resume,
+#endif
+ .shutdown = igb_shutdown,
+ .err_handler = &igb_err_handler
+};
+
+MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
+MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+struct igb_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+static const struct igb_reg_info igb_reg_info_tbl[] = {
+
+ /* General Registers */
+ {E1000_CTRL, "CTRL"},
+ {E1000_STATUS, "STATUS"},
+ {E1000_CTRL_EXT, "CTRL_EXT"},
+
+ /* Interrupt Registers */
+ {E1000_ICR, "ICR"},
+
+ /* RX Registers */
+ {E1000_RCTL, "RCTL"},
+ {E1000_RDLEN(0), "RDLEN"},
+ {E1000_RDH(0), "RDH"},
+ {E1000_RDT(0), "RDT"},
+ {E1000_RXDCTL(0), "RXDCTL"},
+ {E1000_RDBAL(0), "RDBAL"},
+ {E1000_RDBAH(0), "RDBAH"},
+
+ /* TX Registers */
+ {E1000_TCTL, "TCTL"},
+ {E1000_TDBAL(0), "TDBAL"},
+ {E1000_TDBAH(0), "TDBAH"},
+ {E1000_TDLEN(0), "TDLEN"},
+ {E1000_TDH(0), "TDH"},
+ {E1000_TDT(0), "TDT"},
+ {E1000_TXDCTL(0), "TXDCTL"},
+ {E1000_TDFH, "TDFH"},
+ {E1000_TDFT, "TDFT"},
+ {E1000_TDFHS, "TDFHS"},
+ {E1000_TDFPC, "TDFPC"},
+
+ /* List Terminator */
+ {}
+};
+
+/*
+ * igb_regdump - register printout routine
+ */
+static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
+{
+ int n = 0;
+ char rname[16];
+ u32 regs[8];
+
+ switch (reginfo->ofs) {
+ case E1000_RDLEN(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDLEN(n));
+ break;
+ case E1000_RDH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDH(n));
+ break;
+ case E1000_RDT(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDT(n));
+ break;
+ case E1000_RXDCTL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RXDCTL(n));
+ break;
+ case E1000_RDBAL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDBAL(n));
+ break;
+ case E1000_RDBAH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDBAH(n));
+ break;
+ case E1000_TDBAL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDBAL(n));
+ break;
+ case E1000_TDBAH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TDBAH(n));
+ break;
+ case E1000_TDLEN(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TDLEN(n));
+ break;
+ case E1000_TDH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TDH(n));
+ break;
+ case E1000_TDT(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TDT(n));
+ break;
+ case E1000_TXDCTL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TXDCTL(n));
+ break;
+ default:
+ printk(KERN_INFO "%-15s %08x\n",
+ reginfo->name, rd32(reginfo->ofs));
+ return;
+ }
+
+ snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
+ printk(KERN_INFO "%-15s ", rname);
+ for (n = 0; n < 4; n++)
+ printk(KERN_CONT "%08x ", regs[n]);
+ printk(KERN_CONT "\n");
+}
+
+/*
+ * igb_dump - Print registers, tx-rings and rx-rings
+ */
+static void igb_dump(struct igb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct igb_reg_info *reginfo;
+ int n = 0;
+ struct igb_ring *tx_ring;
+ union e1000_adv_tx_desc *tx_desc;
+ struct my_u0 { u64 a; u64 b; } *u0;
+ struct igb_ring *rx_ring;
+ union e1000_adv_rx_desc *rx_desc;
+ u32 staterr;
+ int i = 0;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ printk(KERN_INFO "Device Name state "
+ "trans_start last_rx\n");
+ printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
+ netdev->name,
+ netdev->state,
+ netdev->trans_start,
+ netdev->last_rx);
+ }
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ printk(KERN_INFO " Register Name Value\n");
+ for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ igb_regdump(hw, reginfo);
+ }
+
+ /* Print TX Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
+ " leng ntw timestamp\n");
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ struct igb_tx_buffer *buffer_info;
+ tx_ring = adapter->tx_ring[n];
+ buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
+ printk(KERN_INFO " %5d %5X %5X %016llX %04X %p %016llX\n",
+ n, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp);
+ }
+
+ /* Print TX Rings */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+ /* Transmit Descriptor Formats
+ *
+ * Advanced Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
+ * +--------------------------------------------------------------+
+ * 63 46 45 40 39 38 36 35 32 31 24 15 0
+ */
+
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "T [desc] [address 63:0 ] "
+ "[PlPOCIStDDM Ln] [bi->dma ] "
+ "leng ntw timestamp bi->skb\n");
+
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ struct igb_tx_buffer *buffer_info;
+ tx_desc = IGB_TX_DESC(tx_ring, i);
+ buffer_info = &tx_ring->tx_buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
+ " %04X %p %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp,
+ buffer_info->skb);
+ if (i == tx_ring->next_to_use &&
+ i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC/U\n");
+ else if (i == tx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1, phys_to_virt(buffer_info->dma),
+ buffer_info->length, true);
+ }
+ }
+
+ /* Print RX Rings Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ printk(KERN_INFO " %5d %5X %5X\n", n,
+ rx_ring->next_to_use, rx_ring->next_to_clean);
+ }
+
+ /* Print RX Rings */
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+
+ /* Advanced Receive Descriptor (Read) Format
+ * 63 1 0
+ * +-----------------------------------------------------+
+ * 0 | Packet Buffer Address [63:1] |A0/NSE|
+ * +----------------------------------------------+------+
+ * 8 | Header Buffer Address [63:1] | DD |
+ * +-----------------------------------------------------+
+ *
+ *
+ * Advanced Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 30 21 20 17 16 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
+ * | Checksum Ident | | | | Type | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "R [desc] [ PktBuf A0] "
+ "[ HeadBuf DD] [bi->dma ] [bi->skb] "
+ "<-- Adv Rx Read format\n");
+ printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
+ "[vl er S cks ln] ---------------- [bi->skb] "
+ "<-- Adv Rx Write-Back format\n");
+
+ for (i = 0; i < rx_ring->count; i++) {
+ struct igb_rx_buffer *buffer_info;
+ buffer_info = &rx_ring->rx_buffer_info[i];
+ rx_desc = IGB_RX_DESC(rx_ring, i);
+ u0 = (struct my_u0 *)rx_desc;
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ if (staterr & E1000_RXD_STAT_DD) {
+ /* Descriptor Done */
+ printk(KERN_INFO "RWB[0x%03X] %016llX "
+ "%016llX ---------------- %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ buffer_info->skb);
+ } else {
+ printk(KERN_INFO "R [0x%03X] %016llX "
+ "%016llX %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)buffer_info->dma,
+ buffer_info->skb);
+
+ if (netif_msg_pktdata(adapter)) {
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1,
+ phys_to_virt(buffer_info->dma),
+ IGB_RX_HDR_LEN, true);
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1,
+ phys_to_virt(
+ buffer_info->page_dma +
+ buffer_info->page_offset),
+ PAGE_SIZE/2, true);
+ }
+ }
+
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ }
+ }
+
+exit:
+ return;
+}
+
+
+/**
+ * igb_read_clock - read raw cycle counter (to be used by time counter)
+ */
+static cycle_t igb_read_clock(const struct cyclecounter *tc)
+{
+ struct igb_adapter *adapter =
+ container_of(tc, struct igb_adapter, cycles);
+ struct e1000_hw *hw = &adapter->hw;
+ u64 stamp = 0;
+ int shift = 0;
+
+ /*
+ * The timestamp latches on lowest register read. For the 82580
+ * the lowest register is SYSTIMR instead of SYSTIML. However we never
+ * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
+ */
+ if (hw->mac.type == e1000_82580) {
+ stamp = rd32(E1000_SYSTIMR) >> 8;
+ shift = IGB_82580_TSYNC_SHIFT;
+ }
+
+ stamp |= (u64)rd32(E1000_SYSTIML) << shift;
+ stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
+ return stamp;
+}
+
+/**
+ * igb_get_hw_dev - return device
+ * used by hardware layer to print debugging information
+ **/
+struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
+{
+ struct igb_adapter *adapter = hw->back;
+ return adapter->netdev;
+}
+
+/**
+ * igb_init_module - Driver Registration Routine
+ *
+ * igb_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init igb_init_module(void)
+{
+ int ret;
+ printk(KERN_INFO "%s - version %s\n",
+ igb_driver_string, igb_driver_version);
+
+ printk(KERN_INFO "%s\n", igb_copyright);
+
+#ifdef CONFIG_IGB_DCA
+ dca_register_notify(&dca_notifier);
+#endif
+ ret = pci_register_driver(&igb_driver);
+ return ret;
+}
+
+module_init(igb_init_module);
+
+/**
+ * igb_exit_module - Driver Exit Cleanup Routine
+ *
+ * igb_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit igb_exit_module(void)
+{
+#ifdef CONFIG_IGB_DCA
+ dca_unregister_notify(&dca_notifier);
+#endif
+ pci_unregister_driver(&igb_driver);
+}
+
+module_exit(igb_exit_module);
+
+#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
+/**
+ * igb_cache_ring_register - Descriptor ring to register mapping
+ * @adapter: board private structure to initialize
+ *
+ * Once we know the feature-set enabled for the device, we'll cache
+ * the register offset the descriptor ring is assigned to.
+ **/
+static void igb_cache_ring_register(struct igb_adapter *adapter)
+{
+ int i = 0, j = 0;
+ u32 rbase_offset = adapter->vfs_allocated_count;
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ /* The queues are allocated for virtualization such that VF 0
+ * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
+ * In order to avoid collision we start at the first free queue
+ * and continue consuming queues in the same sequence
+ */
+ if (adapter->vfs_allocated_count) {
+ for (; i < adapter->rss_queues; i++)
+ adapter->rx_ring[i]->reg_idx = rbase_offset +
+ Q_IDX_82576(i);
+ }
+ case e1000_82575:
+ case e1000_82580:
+ case e1000_i350:
+ default:
+ for (; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->reg_idx = rbase_offset + i;
+ for (; j < adapter->num_tx_queues; j++)
+ adapter->tx_ring[j]->reg_idx = rbase_offset + j;
+ break;
+ }
+}
+
+static void igb_free_queues(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ kfree(adapter->tx_ring[i]);
+ adapter->tx_ring[i] = NULL;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ kfree(adapter->rx_ring[i]);
+ adapter->rx_ring[i] = NULL;
+ }
+ adapter->num_rx_queues = 0;
+ adapter->num_tx_queues = 0;
+}
+
+/**
+ * igb_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time.
+ **/
+static int igb_alloc_queues(struct igb_adapter *adapter)
+{
+ struct igb_ring *ring;
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
+ if (!ring)
+ goto err;
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = i;
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+ /* For 82575, context index must be unique per ring. */
+ if (adapter->hw.mac.type == e1000_82575)
+ ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
+ adapter->tx_ring[i] = ring;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
+ if (!ring)
+ goto err;
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = i;
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+ ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
+ /* set flag indicating ring supports SCTP checksum offload */
+ if (adapter->hw.mac.type >= e1000_82576)
+ ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
+ adapter->rx_ring[i] = ring;
+ }
+
+ igb_cache_ring_register(adapter);
+
+ return 0;
+
+err:
+ igb_free_queues(adapter);
+
+ return -ENOMEM;
+}
+
+#define IGB_N0_QUEUE -1
+static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
+{
+ u32 msixbm = 0;
+ struct igb_adapter *adapter = q_vector->adapter;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ivar, index;
+ int rx_queue = IGB_N0_QUEUE;
+ int tx_queue = IGB_N0_QUEUE;
+
+ if (q_vector->rx_ring)
+ rx_queue = q_vector->rx_ring->reg_idx;
+ if (q_vector->tx_ring)
+ tx_queue = q_vector->tx_ring->reg_idx;
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+ /* The 82575 assigns vectors using a bitmask, which matches the
+ bitmask for the EICR/EIMS/EIMC registers. To assign one
+ or more queues to a vector, we write the appropriate bits
+ into the MSIXBM register for that vector. */
+ if (rx_queue > IGB_N0_QUEUE)
+ msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
+ if (tx_queue > IGB_N0_QUEUE)
+ msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
+ if (!adapter->msix_entries && msix_vector == 0)
+ msixbm |= E1000_EIMS_OTHER;
+ array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
+ q_vector->eims_value = msixbm;
+ break;
+ case e1000_82576:
+ /* 82576 uses a table-based method for assigning vectors.
+ Each queue has a single entry in the table to which we write
+ a vector number along with a "valid" bit. Sadly, the layout
+ of the table is somewhat counterintuitive. */
+ if (rx_queue > IGB_N0_QUEUE) {
+ index = (rx_queue & 0x7);
+ ivar = array_rd32(E1000_IVAR0, index);
+ if (rx_queue < 8) {
+ /* vector goes into low byte of register */
+ ivar = ivar & 0xFFFFFF00;
+ ivar |= msix_vector | E1000_IVAR_VALID;
+ } else {
+ /* vector goes into third byte of register */
+ ivar = ivar & 0xFF00FFFF;
+ ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
+ }
+ array_wr32(E1000_IVAR0, index, ivar);
+ }
+ if (tx_queue > IGB_N0_QUEUE) {
+ index = (tx_queue & 0x7);
+ ivar = array_rd32(E1000_IVAR0, index);
+ if (tx_queue < 8) {
+ /* vector goes into second byte of register */
+ ivar = ivar & 0xFFFF00FF;
+ ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
+ } else {
+ /* vector goes into high byte of register */
+ ivar = ivar & 0x00FFFFFF;
+ ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
+ }
+ array_wr32(E1000_IVAR0, index, ivar);
+ }
+ q_vector->eims_value = 1 << msix_vector;
+ break;
+ case e1000_82580:
+ case e1000_i350:
+ /* 82580 uses the same table-based approach as 82576 but has fewer
+ entries as a result we carry over for queues greater than 4. */
+ if (rx_queue > IGB_N0_QUEUE) {
+ index = (rx_queue >> 1);
+ ivar = array_rd32(E1000_IVAR0, index);
+ if (rx_queue & 0x1) {
+ /* vector goes into third byte of register */
+ ivar = ivar & 0xFF00FFFF;
+ ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
+ } else {
+ /* vector goes into low byte of register */
+ ivar = ivar & 0xFFFFFF00;
+ ivar |= msix_vector | E1000_IVAR_VALID;
+ }
+ array_wr32(E1000_IVAR0, index, ivar);
+ }
+ if (tx_queue > IGB_N0_QUEUE) {
+ index = (tx_queue >> 1);
+ ivar = array_rd32(E1000_IVAR0, index);
+ if (tx_queue & 0x1) {
+ /* vector goes into high byte of register */
+ ivar = ivar & 0x00FFFFFF;
+ ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
+ } else {
+ /* vector goes into second byte of register */
+ ivar = ivar & 0xFFFF00FF;
+ ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
+ }
+ array_wr32(E1000_IVAR0, index, ivar);
+ }
+ q_vector->eims_value = 1 << msix_vector;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ /* add q_vector eims value to global eims_enable_mask */
+ adapter->eims_enable_mask |= q_vector->eims_value;
+
+ /* configure q_vector to set itr on first interrupt */
+ q_vector->set_itr = 1;
+}
+
+/**
+ * igb_configure_msix - Configure MSI-X hardware
+ *
+ * igb_configure_msix sets up the hardware to properly
+ * generate MSI-X interrupts.
+ **/
+static void igb_configure_msix(struct igb_adapter *adapter)
+{
+ u32 tmp;
+ int i, vector = 0;
+ struct e1000_hw *hw = &adapter->hw;
+
+ adapter->eims_enable_mask = 0;
+
+ /* set vector for other causes, i.e. link changes */
+ switch (hw->mac.type) {
+ case e1000_82575:
+ tmp = rd32(E1000_CTRL_EXT);
+ /* enable MSI-X PBA support*/
+ tmp |= E1000_CTRL_EXT_PBA_CLR;
+
+ /* Auto-Mask interrupts upon ICR read. */
+ tmp |= E1000_CTRL_EXT_EIAME;
+ tmp |= E1000_CTRL_EXT_IRCA;
+
+ wr32(E1000_CTRL_EXT, tmp);
+
+ /* enable msix_other interrupt */
+ array_wr32(E1000_MSIXBM(0), vector++,
+ E1000_EIMS_OTHER);
+ adapter->eims_other = E1000_EIMS_OTHER;
+
+ break;
+
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ /* Turn on MSI-X capability first, or our settings
+ * won't stick. And it will take days to debug. */
+ wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
+ E1000_GPIE_PBA | E1000_GPIE_EIAME |
+ E1000_GPIE_NSICR);
+
+ /* enable msix_other interrupt */
+ adapter->eims_other = 1 << vector;
+ tmp = (vector++ | E1000_IVAR_VALID) << 8;
+
+ wr32(E1000_IVAR_MISC, tmp);
+ break;
+ default:
+ /* do nothing, since nothing else supports MSI-X */
+ break;
+ } /* switch (hw->mac.type) */
+
+ adapter->eims_enable_mask |= adapter->eims_other;
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ igb_assign_vector(adapter->q_vector[i], vector++);
+
+ wrfl();
+}
+
+/**
+ * igb_request_msix - Initialize MSI-X interrupts
+ *
+ * igb_request_msix allocates MSI-X vectors and requests interrupts from the
+ * kernel.
+ **/
+static int igb_request_msix(struct igb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ int i, err = 0, vector = 0;
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ igb_msix_other, 0, netdev->name, adapter);
+ if (err)
+ goto out;
+ vector++;
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[i];
+
+ q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
+
+ if (q_vector->rx_ring && q_vector->tx_ring)
+ sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
+ q_vector->rx_ring->queue_index);
+ else if (q_vector->tx_ring)
+ sprintf(q_vector->name, "%s-tx-%u", netdev->name,
+ q_vector->tx_ring->queue_index);
+ else if (q_vector->rx_ring)
+ sprintf(q_vector->name, "%s-rx-%u", netdev->name,
+ q_vector->rx_ring->queue_index);
+ else
+ sprintf(q_vector->name, "%s-unused", netdev->name);
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ igb_msix_ring, 0, q_vector->name,
+ q_vector);
+ if (err)
+ goto out;
+ vector++;
+ }
+
+ igb_configure_msix(adapter);
+ return 0;
+out:
+ return err;
+}
+
+static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
+{
+ if (adapter->msix_entries) {
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
+ pci_disable_msi(adapter->pdev);
+ }
+}
+
+/**
+ * igb_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void igb_free_q_vectors(struct igb_adapter *adapter)
+{
+ int v_idx;
+
+ for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+ adapter->q_vector[v_idx] = NULL;
+ if (!q_vector)
+ continue;
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ }
+ adapter->num_q_vectors = 0;
+}
+
+/**
+ * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
+ *
+ * This function resets the device so that it has 0 rx queues, tx queues, and
+ * MSI-X interrupts allocated.
+ */
+static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
+{
+ igb_free_queues(adapter);
+ igb_free_q_vectors(adapter);
+ igb_reset_interrupt_capability(adapter);
+}
+
+/**
+ * igb_set_interrupt_capability - set MSI or MSI-X if supported
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int igb_set_interrupt_capability(struct igb_adapter *adapter)
+{
+ int err;
+ int numvecs, i;
+
+ /* Number of supported queues. */
+ adapter->num_rx_queues = adapter->rss_queues;
+ if (adapter->vfs_allocated_count)
+ adapter->num_tx_queues = 1;
+ else
+ adapter->num_tx_queues = adapter->rss_queues;
+
+ /* start with one vector for every rx queue */
+ numvecs = adapter->num_rx_queues;
+
+ /* if tx handler is separate add 1 for every tx queue */
+ if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
+ numvecs += adapter->num_tx_queues;
+
+ /* store the number of vectors reserved for queues */
+ adapter->num_q_vectors = numvecs;
+
+ /* add 1 vector for link status interrupts */
+ numvecs++;
+ adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!adapter->msix_entries)
+ goto msi_only;
+
+ for (i = 0; i < numvecs; i++)
+ adapter->msix_entries[i].entry = i;
+
+ err = pci_enable_msix(adapter->pdev,
+ adapter->msix_entries,
+ numvecs);
+ if (err == 0)
+ goto out;
+
+ igb_reset_interrupt_capability(adapter);
+
+ /* If we can't do MSI-X, try MSI */
+msi_only:
+#ifdef CONFIG_PCI_IOV
+ /* disable SR-IOV for non MSI-X configurations */
+ if (adapter->vf_data) {
+ struct e1000_hw *hw = &adapter->hw;
+ /* disable iov and allow time for transactions to clear */
+ pci_disable_sriov(adapter->pdev);
+ msleep(500);
+
+ kfree(adapter->vf_data);
+ adapter->vf_data = NULL;
+ wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
+ wrfl();
+ msleep(100);
+ dev_info(&adapter->pdev->dev, "IOV Disabled\n");
+ }
+#endif
+ adapter->vfs_allocated_count = 0;
+ adapter->rss_queues = 1;
+ adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_q_vectors = 1;
+ if (!pci_enable_msi(adapter->pdev))
+ adapter->flags |= IGB_FLAG_HAS_MSI;
+out:
+ /* Notify the stack of the (possibly) reduced queue counts. */
+ netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
+ return netif_set_real_num_rx_queues(adapter->netdev,
+ adapter->num_rx_queues);
+}
+
+/**
+ * igb_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int igb_alloc_q_vectors(struct igb_adapter *adapter)
+{
+ struct igb_q_vector *q_vector;
+ struct e1000_hw *hw = &adapter->hw;
+ int v_idx;
+
+ for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
+ q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
+ if (!q_vector)
+ goto err_out;
+ q_vector->adapter = adapter;
+ q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
+ q_vector->itr_val = IGB_START_ITR;
+ netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
+ adapter->q_vector[v_idx] = q_vector;
+ }
+ return 0;
+
+err_out:
+ igb_free_q_vectors(adapter);
+ return -ENOMEM;
+}
+
+static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
+ int ring_idx, int v_idx)
+{
+ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
+ q_vector->rx_ring = adapter->rx_ring[ring_idx];
+ q_vector->rx_ring->q_vector = q_vector;
+ q_vector->itr_val = adapter->rx_itr_setting;
+ if (q_vector->itr_val && q_vector->itr_val <= 3)
+ q_vector->itr_val = IGB_START_ITR;
+}
+
+static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
+ int ring_idx, int v_idx)
+{
+ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
+ q_vector->tx_ring = adapter->tx_ring[ring_idx];
+ q_vector->tx_ring->q_vector = q_vector;
+ q_vector->itr_val = adapter->tx_itr_setting;
+ q_vector->tx_work_limit = adapter->tx_work_limit;
+ if (q_vector->itr_val && q_vector->itr_val <= 3)
+ q_vector->itr_val = IGB_START_ITR;
+}
+
+/**
+ * igb_map_ring_to_vector - maps allocated queues to vectors
+ *
+ * This function maps the recently allocated queues to vectors.
+ **/
+static int igb_map_ring_to_vector(struct igb_adapter *adapter)
+{
+ int i;
+ int v_idx = 0;
+
+ if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
+ (adapter->num_q_vectors < adapter->num_tx_queues))
+ return -ENOMEM;
+
+ if (adapter->num_q_vectors >=
+ (adapter->num_rx_queues + adapter->num_tx_queues)) {
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ igb_map_rx_ring_to_vector(adapter, i, v_idx++);
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ igb_map_tx_ring_to_vector(adapter, i, v_idx++);
+ } else {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ if (i < adapter->num_tx_queues)
+ igb_map_tx_ring_to_vector(adapter, i, v_idx);
+ igb_map_rx_ring_to_vector(adapter, i, v_idx++);
+ }
+ for (; i < adapter->num_tx_queues; i++)
+ igb_map_tx_ring_to_vector(adapter, i, v_idx++);
+ }
+ return 0;
+}
+
+/**
+ * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
+ *
+ * This function initializes the interrupts and allocates all of the queues.
+ **/
+static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+
+ err = igb_set_interrupt_capability(adapter);
+ if (err)
+ return err;
+
+ err = igb_alloc_q_vectors(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ err = igb_alloc_queues(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ goto err_alloc_queues;
+ }
+
+ err = igb_map_ring_to_vector(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
+ goto err_map_queues;
+ }
+
+
+ return 0;
+err_map_queues:
+ igb_free_queues(adapter);
+err_alloc_queues:
+ igb_free_q_vectors(adapter);
+err_alloc_q_vectors:
+ igb_reset_interrupt_capability(adapter);
+ return err;
+}
+
+/**
+ * igb_request_irq - initialize interrupts
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int igb_request_irq(struct igb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err = 0;
+
+ if (adapter->msix_entries) {
+ err = igb_request_msix(adapter);
+ if (!err)
+ goto request_done;
+ /* fall back to MSI */
+ igb_clear_interrupt_scheme(adapter);
+ if (!pci_enable_msi(adapter->pdev))
+ adapter->flags |= IGB_FLAG_HAS_MSI;
+ igb_free_all_tx_resources(adapter);
+ igb_free_all_rx_resources(adapter);
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_queues = 1;
+ adapter->num_q_vectors = 1;
+ err = igb_alloc_q_vectors(adapter);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Unable to allocate memory for vectors\n");
+ goto request_done;
+ }
+ err = igb_alloc_queues(adapter);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Unable to allocate memory for queues\n");
+ igb_free_q_vectors(adapter);
+ goto request_done;
+ }
+ igb_setup_all_tx_resources(adapter);
+ igb_setup_all_rx_resources(adapter);
+ } else {
+ igb_assign_vector(adapter->q_vector[0], 0);
+ }
+
+ if (adapter->flags & IGB_FLAG_HAS_MSI) {
+ err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
+ netdev->name, adapter);
+ if (!err)
+ goto request_done;
+
+ /* fall back to legacy interrupts */
+ igb_reset_interrupt_capability(adapter);
+ adapter->flags &= ~IGB_FLAG_HAS_MSI;
+ }
+
+ err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
+ netdev->name, adapter);
+
+ if (err)
+ dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
+ err);
+
+request_done:
+ return err;
+}
+
+static void igb_free_irq(struct igb_adapter *adapter)
+{
+ if (adapter->msix_entries) {
+ int vector = 0, i;
+
+ free_irq(adapter->msix_entries[vector++].vector, adapter);
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[i];
+ free_irq(adapter->msix_entries[vector++].vector,
+ q_vector);
+ }
+ } else {
+ free_irq(adapter->pdev->irq, adapter);
+ }
+}
+
+/**
+ * igb_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static void igb_irq_disable(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /*
+ * we need to be careful when disabling interrupts. The VFs are also
+ * mapped into these registers and so clearing the bits can cause
+ * issues on the VF drivers so we only need to clear what we set
+ */
+ if (adapter->msix_entries) {
+ u32 regval = rd32(E1000_EIAM);
+ wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
+ wr32(E1000_EIMC, adapter->eims_enable_mask);
+ regval = rd32(E1000_EIAC);
+ wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
+ }
+
+ wr32(E1000_IAM, 0);
+ wr32(E1000_IMC, ~0);
+ wrfl();
+ if (adapter->msix_entries) {
+ int i;
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ synchronize_irq(adapter->msix_entries[i].vector);
+ } else {
+ synchronize_irq(adapter->pdev->irq);
+ }
+}
+
+/**
+ * igb_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static void igb_irq_enable(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (adapter->msix_entries) {
+ u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
+ u32 regval = rd32(E1000_EIAC);
+ wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
+ regval = rd32(E1000_EIAM);
+ wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
+ wr32(E1000_EIMS, adapter->eims_enable_mask);
+ if (adapter->vfs_allocated_count) {
+ wr32(E1000_MBVFIMR, 0xFF);
+ ims |= E1000_IMS_VMMB;
+ }
+ if (adapter->hw.mac.type == e1000_82580)
+ ims |= E1000_IMS_DRSTA;
+
+ wr32(E1000_IMS, ims);
+ } else {
+ wr32(E1000_IMS, IMS_ENABLE_MASK |
+ E1000_IMS_DRSTA);
+ wr32(E1000_IAM, IMS_ENABLE_MASK |
+ E1000_IMS_DRSTA);
+ }
+}
+
+static void igb_update_mng_vlan(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 vid = adapter->hw.mng_cookie.vlan_id;
+ u16 old_vid = adapter->mng_vlan_id;
+
+ if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+ /* add VID to filter table */
+ igb_vfta_set(hw, vid, true);
+ adapter->mng_vlan_id = vid;
+ } else {
+ adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
+ }
+
+ if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
+ (vid != old_vid) &&
+ !test_bit(old_vid, adapter->active_vlans)) {
+ /* remove VID from filter table */
+ igb_vfta_set(hw, old_vid, false);
+ }
+}
+
+/**
+ * igb_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded.
+ *
+ **/
+static void igb_release_hw_control(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_ext;
+
+ /* Let firmware take over control of h/w */
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ wr32(E1000_CTRL_EXT,
+ ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+}
+
+/**
+ * igb_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded.
+ *
+ **/
+static void igb_get_hw_control(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_ext;
+
+ /* Let firmware know the driver has taken over */
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ wr32(E1000_CTRL_EXT,
+ ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+}
+
+/**
+ * igb_configure - configure the hardware for RX and TX
+ * @adapter: private board structure
+ **/
+static void igb_configure(struct igb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ igb_get_hw_control(adapter);
+ igb_set_rx_mode(netdev);
+
+ igb_restore_vlan(adapter);
+
+ igb_setup_tctl(adapter);
+ igb_setup_mrqc(adapter);
+ igb_setup_rctl(adapter);
+
+ igb_configure_tx(adapter);
+ igb_configure_rx(adapter);
+
+ igb_rx_fifo_flush_82575(&adapter->hw);
+
+ /* call igb_desc_unused which always leaves
+ * at least 1 descriptor unused to make sure
+ * next_to_use != next_to_clean */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igb_ring *ring = adapter->rx_ring[i];
+ igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
+ }
+}
+
+/**
+ * igb_power_up_link - Power up the phy/serdes link
+ * @adapter: address of board private structure
+ **/
+void igb_power_up_link(struct igb_adapter *adapter)
+{
+ if (adapter->hw.phy.media_type == e1000_media_type_copper)
+ igb_power_up_phy_copper(&adapter->hw);
+ else
+ igb_power_up_serdes_link_82575(&adapter->hw);
+}
+
+/**
+ * igb_power_down_link - Power down the phy/serdes link
+ * @adapter: address of board private structure
+ */
+static void igb_power_down_link(struct igb_adapter *adapter)
+{
+ if (adapter->hw.phy.media_type == e1000_media_type_copper)
+ igb_power_down_phy_copper_82575(&adapter->hw);
+ else
+ igb_shutdown_serdes_link_82575(&adapter->hw);
+}
+
+/**
+ * igb_up - Open the interface and prepare it to handle traffic
+ * @adapter: board private structure
+ **/
+int igb_up(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+
+ /* hardware has been reset, we need to reload some things */
+ igb_configure(adapter);
+
+ clear_bit(__IGB_DOWN, &adapter->state);
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[i];
+ napi_enable(&q_vector->napi);
+ }
+ if (adapter->msix_entries)
+ igb_configure_msix(adapter);
+ else
+ igb_assign_vector(adapter->q_vector[0], 0);
+
+ /* Clear any pending interrupts. */
+ rd32(E1000_ICR);
+ igb_irq_enable(adapter);
+
+ /* notify VFs that reset has been completed */
+ if (adapter->vfs_allocated_count) {
+ u32 reg_data = rd32(E1000_CTRL_EXT);
+ reg_data |= E1000_CTRL_EXT_PFRSTD;
+ wr32(E1000_CTRL_EXT, reg_data);
+ }
+
+ netif_tx_start_all_queues(adapter->netdev);
+
+ /* start the watchdog. */
+ hw->mac.get_link_status = 1;
+ schedule_work(&adapter->watchdog_task);
+
+ return 0;
+}
+
+void igb_down(struct igb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 tctl, rctl;
+ int i;
+
+ /* signal that we're down so the interrupt handler does not
+ * reschedule our watchdog timer */
+ set_bit(__IGB_DOWN, &adapter->state);
+
+ /* disable receives in the hardware */
+ rctl = rd32(E1000_RCTL);
+ wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
+ /* flush and sleep below */
+
+ netif_tx_stop_all_queues(netdev);
+
+ /* disable transmits in the hardware */
+ tctl = rd32(E1000_TCTL);
+ tctl &= ~E1000_TCTL_EN;
+ wr32(E1000_TCTL, tctl);
+ /* flush both disables and wait for them to finish */
+ wrfl();
+ msleep(10);
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[i];
+ napi_disable(&q_vector->napi);
+ }
+
+ igb_irq_disable(adapter);
+
+ del_timer_sync(&adapter->watchdog_timer);
+ del_timer_sync(&adapter->phy_info_timer);
+
+ netif_carrier_off(netdev);
+
+ /* record the stats before reset*/
+ spin_lock(&adapter->stats64_lock);
+ igb_update_stats(adapter, &adapter->stats64);
+ spin_unlock(&adapter->stats64_lock);
+
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+
+ if (!pci_channel_offline(adapter->pdev))
+ igb_reset(adapter);
+ igb_clean_all_tx_rings(adapter);
+ igb_clean_all_rx_rings(adapter);
+#ifdef CONFIG_IGB_DCA
+
+ /* since we reset the hardware DCA settings were cleared */
+ igb_setup_dca(adapter);
+#endif
+}
+
+void igb_reinit_locked(struct igb_adapter *adapter)
+{
+ WARN_ON(in_interrupt());
+ while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+ msleep(1);
+ igb_down(adapter);
+ igb_up(adapter);
+ clear_bit(__IGB_RESETTING, &adapter->state);
+}
+
+void igb_reset(struct igb_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_fc_info *fc = &hw->fc;
+ u32 pba = 0, tx_space, min_tx_space, min_rx_space;
+ u16 hwm;
+
+ /* Repartition Pba for greater than 9k mtu
+ * To take effect CTRL.RST is required.
+ */
+ switch (mac->type) {
+ case e1000_i350:
+ case e1000_82580:
+ pba = rd32(E1000_RXPBS);
+ pba = igb_rxpbs_adjust_82580(pba);
+ break;
+ case e1000_82576:
+ pba = rd32(E1000_RXPBS);
+ pba &= E1000_RXPBS_SIZE_MASK_82576;
+ break;
+ case e1000_82575:
+ default:
+ pba = E1000_PBA_34K;
+ break;
+ }
+
+ if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+ (mac->type < e1000_82576)) {
+ /* adjust PBA for jumbo frames */
+ wr32(E1000_PBA, pba);
+
+ /* To maintain wire speed transmits, the Tx FIFO should be
+ * large enough to accommodate two full transmit packets,
+ * rounded up to the next 1KB and expressed in KB. Likewise,
+ * the Rx FIFO should be large enough to accommodate at least
+ * one full receive packet and is similarly rounded up and
+ * expressed in KB. */
+ pba = rd32(E1000_PBA);
+ /* upper 16 bits has Tx packet buffer allocation size in KB */
+ tx_space = pba >> 16;
+ /* lower 16 bits has Rx packet buffer allocation size in KB */
+ pba &= 0xffff;
+ /* the tx fifo also stores 16 bytes of information about the tx
+ * but don't include ethernet FCS because hardware appends it */
+ min_tx_space = (adapter->max_frame_size +
+ sizeof(union e1000_adv_tx_desc) -
+ ETH_FCS_LEN) * 2;
+ min_tx_space = ALIGN(min_tx_space, 1024);
+ min_tx_space >>= 10;
+ /* software strips receive CRC, so leave room for it */
+ min_rx_space = adapter->max_frame_size;
+ min_rx_space = ALIGN(min_rx_space, 1024);
+ min_rx_space >>= 10;
+
+ /* If current Tx allocation is less than the min Tx FIFO size,
+ * and the min Tx FIFO size is less than the current Rx FIFO
+ * allocation, take space away from current Rx allocation */
+ if (tx_space < min_tx_space &&
+ ((min_tx_space - tx_space) < pba)) {
+ pba = pba - (min_tx_space - tx_space);
+
+ /* if short on rx space, rx wins and must trump tx
+ * adjustment */
+ if (pba < min_rx_space)
+ pba = min_rx_space;
+ }
+ wr32(E1000_PBA, pba);
+ }
+
+ /* flow control settings */
+ /* The high water mark must be low enough to fit one full frame
+ * (or the size used for early receive) above it in the Rx FIFO.
+ * Set it to the lower of:
+ * - 90% of the Rx FIFO size, or
+ * - the full Rx FIFO size minus one full frame */
+ hwm = min(((pba << 10) * 9 / 10),
+ ((pba << 10) - 2 * adapter->max_frame_size));
+
+ fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
+ fc->low_water = fc->high_water - 16;
+ fc->pause_time = 0xFFFF;
+ fc->send_xon = 1;
+ fc->current_mode = fc->requested_mode;
+
+ /* disable receive for all VFs and wait one second */
+ if (adapter->vfs_allocated_count) {
+ int i;
+ for (i = 0 ; i < adapter->vfs_allocated_count; i++)
+ adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
+
+ /* ping all the active vfs to let them know we are going down */
+ igb_ping_all_vfs(adapter);
+
+ /* disable transmits and receives */
+ wr32(E1000_VFRE, 0);
+ wr32(E1000_VFTE, 0);
+ }
+
+ /* Allow time for pending master requests to run */
+ hw->mac.ops.reset_hw(hw);
+ wr32(E1000_WUC, 0);
+
+ if (hw->mac.ops.init_hw(hw))
+ dev_err(&pdev->dev, "Hardware Error\n");
+ if (hw->mac.type > e1000_82580) {
+ if (adapter->flags & IGB_FLAG_DMAC) {
+ u32 reg;
+
+ /*
+ * DMA Coalescing high water mark needs to be higher
+ * than * the * Rx threshold. The Rx threshold is
+ * currently * pba - 6, so we * should use a high water
+ * mark of pba * - 4. */
+ hwm = (pba - 4) << 10;
+
+ reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
+ & E1000_DMACR_DMACTHR_MASK);
+
+ /* transition to L0x or L1 if available..*/
+ reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
+
+ /* watchdog timer= +-1000 usec in 32usec intervals */
+ reg |= (1000 >> 5);
+ wr32(E1000_DMACR, reg);
+
+ /* no lower threshold to disable coalescing(smart fifb)
+ * -UTRESH=0*/
+ wr32(E1000_DMCRTRH, 0);
+
+ /* set hwm to PBA - 2 * max frame size */
+ wr32(E1000_FCRTC, hwm);
+
+ /*
+ * This sets the time to wait before requesting tran-
+ * sition to * low power state to number of usecs needed
+ * to receive 1 512 * byte frame at gigabit line rate
+ */
+ reg = rd32(E1000_DMCTLX);
+ reg |= IGB_DMCTLX_DCFLUSH_DIS;
+
+ /* Delay 255 usec before entering Lx state. */
+ reg |= 0xFF;
+ wr32(E1000_DMCTLX, reg);
+
+ /* free space in Tx packet buffer to wake from DMAC */
+ wr32(E1000_DMCTXTH,
+ (IGB_MIN_TXPBSIZE -
+ (IGB_TX_BUF_4096 + adapter->max_frame_size))
+ >> 6);
+
+ /* make low power state decision controlled by DMAC */
+ reg = rd32(E1000_PCIEMISC);
+ reg |= E1000_PCIEMISC_LX_DECISION;
+ wr32(E1000_PCIEMISC, reg);
+ } /* end if IGB_FLAG_DMAC set */
+ }
+ if (hw->mac.type == e1000_82580) {
+ u32 reg = rd32(E1000_PCIEMISC);
+ wr32(E1000_PCIEMISC,
+ reg & ~E1000_PCIEMISC_LX_DECISION);
+ }
+ if (!netif_running(adapter->netdev))
+ igb_power_down_link(adapter);
+
+ igb_update_mng_vlan(adapter);
+
+ /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
+ wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
+
+ igb_get_phy_info(hw);
+}
+
+static u32 igb_fix_features(struct net_device *netdev, u32 features)
+{
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_RX)
+ features |= NETIF_F_HW_VLAN_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_TX;
+
+ return features;
+}
+
+static int igb_set_features(struct net_device *netdev, u32 features)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ int i;
+ u32 changed = netdev->features ^ features;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ if (features & NETIF_F_RXCSUM)
+ adapter->rx_ring[i]->flags |= IGB_RING_FLAG_RX_CSUM;
+ else
+ adapter->rx_ring[i]->flags &= ~IGB_RING_FLAG_RX_CSUM;
+ }
+
+ if (changed & NETIF_F_HW_VLAN_RX)
+ igb_vlan_mode(netdev, features);
+
+ return 0;
+}
+
+static const struct net_device_ops igb_netdev_ops = {
+ .ndo_open = igb_open,
+ .ndo_stop = igb_close,
+ .ndo_start_xmit = igb_xmit_frame,
+ .ndo_get_stats64 = igb_get_stats64,
+ .ndo_set_rx_mode = igb_set_rx_mode,
+ .ndo_set_mac_address = igb_set_mac,
+ .ndo_change_mtu = igb_change_mtu,
+ .ndo_do_ioctl = igb_ioctl,
+ .ndo_tx_timeout = igb_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
+ .ndo_set_vf_mac = igb_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
+ .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
+ .ndo_get_vf_config = igb_ndo_get_vf_config,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = igb_netpoll,
+#endif
+ .ndo_fix_features = igb_fix_features,
+ .ndo_set_features = igb_set_features,
+};
+
+/**
+ * igb_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in igb_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * igb_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit igb_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct igb_adapter *adapter;
+ struct e1000_hw *hw;
+ u16 eeprom_data = 0;
+ s32 ret_val;
+ static int global_quad_port_a; /* global quad port a indication */
+ const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
+ unsigned long mmio_start, mmio_len;
+ int err, pci_using_dac;
+ u16 eeprom_apme_mask = IGB_EEPROM_APME;
+ u8 part_str[E1000_PBANUM_LENGTH];
+
+ /* Catch broken hardware that put the wrong VF device ID in
+ * the PCIe SR-IOV capability.
+ */
+ if (pdev->is_virtfn) {
+ WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
+ pci_name(pdev), pdev->vendor, pdev->device);
+ return -EINVAL;
+ }
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ pci_using_dac = 0;
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err)
+ pci_using_dac = 1;
+ } else {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA "
+ "configuration, aborting\n");
+ goto err_dma;
+ }
+ }
+ }
+
+ err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
+ IORESOURCE_MEM),
+ igb_driver_name);
+ if (err)
+ goto err_pci_reg;
+
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+
+ err = -ENOMEM;
+ netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
+ IGB_MAX_TX_QUEUES);
+ if (!netdev)
+ goto err_alloc_etherdev;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ hw = &adapter->hw;
+ hw->back = adapter;
+ adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
+
+ mmio_start = pci_resource_start(pdev, 0);
+ mmio_len = pci_resource_len(pdev, 0);
+
+ err = -EIO;
+ hw->hw_addr = ioremap(mmio_start, mmio_len);
+ if (!hw->hw_addr)
+ goto err_ioremap;
+
+ netdev->netdev_ops = &igb_netdev_ops;
+ igb_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+
+ strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+ netdev->mem_start = mmio_start;
+ netdev->mem_end = mmio_start + mmio_len;
+
+ /* PCI config space info */
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->revision_id = pdev->revision;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+
+ /* Copy the default MAC, PHY and NVM function pointers */
+ memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
+ memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
+ memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
+ /* Initialize skew-specific constants */
+ err = ei->get_invariants(hw);
+ if (err)
+ goto err_sw_init;
+
+ /* setup the private structure */
+ err = igb_sw_init(adapter);
+ if (err)
+ goto err_sw_init;
+
+ igb_get_bus_info_pcie(hw);
+
+ hw->phy.autoneg_wait_to_complete = false;
+
+ /* Copper options */
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ hw->phy.mdix = AUTO_ALL_MODES;
+ hw->phy.disable_polarity_correction = false;
+ hw->phy.ms_type = e1000_ms_hw_default;
+ }
+
+ if (igb_check_reset_block(hw))
+ dev_info(&pdev->dev,
+ "PHY reset is blocked due to SOL/IDER session.\n");
+
+ netdev->hw_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_RX;
+
+ netdev->features = netdev->hw_features |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_FILTER;
+
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_IP_CSUM;
+ netdev->vlan_features |= NETIF_F_IPV6_CSUM;
+ netdev->vlan_features |= NETIF_F_SG;
+
+ if (pci_using_dac) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
+
+ if (hw->mac.type >= e1000_82576) {
+ netdev->hw_features |= NETIF_F_SCTP_CSUM;
+ netdev->features |= NETIF_F_SCTP_CSUM;
+ }
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
+
+ /* before reading the NVM, reset the controller to put the device in a
+ * known good starting state */
+ hw->mac.ops.reset_hw(hw);
+
+ /* make sure the NVM is good */
+ if (hw->nvm.ops.validate(hw) < 0) {
+ dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ /* copy the MAC address out of the NVM */
+ if (hw->mac.ops.read_mac_addr(hw))
+ dev_err(&pdev->dev, "NVM Read Error\n");
+
+ memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->perm_addr)) {
+ dev_err(&pdev->dev, "Invalid MAC Address\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ setup_timer(&adapter->watchdog_timer, igb_watchdog,
+ (unsigned long) adapter);
+ setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
+ (unsigned long) adapter);
+
+ INIT_WORK(&adapter->reset_task, igb_reset_task);
+ INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
+
+ /* Initialize link properties that are user-changeable */
+ adapter->fc_autoneg = true;
+ hw->mac.autoneg = true;
+ hw->phy.autoneg_advertised = 0x2f;
+
+ hw->fc.requested_mode = e1000_fc_default;
+ hw->fc.current_mode = e1000_fc_default;
+
+ igb_validate_mdi_setting(hw);
+
+ /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
+ * enable the ACPI Magic Packet filter
+ */
+
+ if (hw->bus.func == 0)
+ hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+ else if (hw->mac.type >= e1000_82580)
+ hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+ NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+ &eeprom_data);
+ else if (hw->bus.func == 1)
+ hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+
+ if (eeprom_data & eeprom_apme_mask)
+ adapter->eeprom_wol |= E1000_WUFC_MAG;
+
+ /* now that we have the eeprom settings, apply the special cases where
+ * the eeprom may be wrong or the board simply won't support wake on
+ * lan on a particular port */
+ switch (pdev->device) {
+ case E1000_DEV_ID_82575GB_QUAD_COPPER:
+ adapter->eeprom_wol = 0;
+ break;
+ case E1000_DEV_ID_82575EB_FIBER_SERDES:
+ case E1000_DEV_ID_82576_FIBER:
+ case E1000_DEV_ID_82576_SERDES:
+ /* Wake events only supported on port A for dual fiber
+ * regardless of eeprom setting */
+ if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
+ adapter->eeprom_wol = 0;
+ break;
+ case E1000_DEV_ID_82576_QUAD_COPPER:
+ case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+ /* if quad port adapter, disable WoL on all but port A */
+ if (global_quad_port_a != 0)
+ adapter->eeprom_wol = 0;
+ else
+ adapter->flags |= IGB_FLAG_QUAD_PORT_A;
+ /* Reset for multiple quad port adapters */
+ if (++global_quad_port_a == 4)
+ global_quad_port_a = 0;
+ break;
+ }
+
+ /* initialize the wol settings based on the eeprom settings */
+ adapter->wol = adapter->eeprom_wol;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ /* reset the hardware with the new settings */
+ igb_reset(adapter);
+
+ /* let the f/w know that the h/w is now under the control of the
+ * driver. */
+ igb_get_hw_control(adapter);
+
+ strcpy(netdev->name, "eth%d");
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ igb_vlan_mode(netdev, netdev->features);
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+#ifdef CONFIG_IGB_DCA
+ if (dca_add_requester(&pdev->dev) == 0) {
+ adapter->flags |= IGB_FLAG_DCA_ENABLED;
+ dev_info(&pdev->dev, "DCA enabled\n");
+ igb_setup_dca(adapter);
+ }
+
+#endif
+ /* do hw tstamp init after resetting */
+ igb_init_hw_timer(adapter);
+
+ dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
+ /* print bus type/speed/width info */
+ dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
+ netdev->name,
+ ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+ (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
+ "unknown"),
+ ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
+ (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
+ (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
+ "unknown"),
+ netdev->dev_addr);
+
+ ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
+ if (ret_val)
+ strcpy(part_str, "Unknown");
+ dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
+ dev_info(&pdev->dev,
+ "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
+ adapter->msix_entries ? "MSI-X" :
+ (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
+ adapter->num_rx_queues, adapter->num_tx_queues);
+ switch (hw->mac.type) {
+ case e1000_i350:
+ igb_set_eee_i350(hw);
+ break;
+ default:
+ break;
+ }
+ return 0;
+
+err_register:
+ igb_release_hw_control(adapter);
+err_eeprom:
+ if (!igb_check_reset_block(hw))
+ igb_reset_phy(hw);
+
+ if (hw->flash_address)
+ iounmap(hw->flash_address);
+err_sw_init:
+ igb_clear_interrupt_scheme(adapter);
+ iounmap(hw->hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * igb_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * igb_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void __devexit igb_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ /*
+ * The watchdog timer may be rescheduled, so explicitly
+ * disable watchdog from being rescheduled.
+ */
+ set_bit(__IGB_DOWN, &adapter->state);
+ del_timer_sync(&adapter->watchdog_timer);
+ del_timer_sync(&adapter->phy_info_timer);
+
+ cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->watchdog_task);
+
+#ifdef CONFIG_IGB_DCA
+ if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
+ dev_info(&pdev->dev, "DCA disabled\n");
+ dca_remove_requester(&pdev->dev);
+ adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
+ wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
+ }
+#endif
+
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant. */
+ igb_release_hw_control(adapter);
+
+ unregister_netdev(netdev);
+
+ igb_clear_interrupt_scheme(adapter);
+
+#ifdef CONFIG_PCI_IOV
+ /* reclaim resources allocated to VFs */
+ if (adapter->vf_data) {
+ /* disable iov and allow time for transactions to clear */
+ pci_disable_sriov(pdev);
+ msleep(500);
+
+ kfree(adapter->vf_data);
+ adapter->vf_data = NULL;
+ wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
+ wrfl();
+ msleep(100);
+ dev_info(&pdev->dev, "IOV Disabled\n");
+ }
+#endif
+
+ iounmap(hw->hw_addr);
+ if (hw->flash_address)
+ iounmap(hw->flash_address);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+
+ free_netdev(netdev);
+
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+/**
+ * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
+ * @adapter: board private structure to initialize
+ *
+ * This function initializes the vf specific data storage and then attempts to
+ * allocate the VFs. The reason for ordering it this way is because it is much
+ * mor expensive time wise to disable SR-IOV than it is to allocate and free
+ * the memory for the VFs.
+ **/
+static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
+{
+#ifdef CONFIG_PCI_IOV
+ struct pci_dev *pdev = adapter->pdev;
+
+ if (adapter->vfs_allocated_count) {
+ adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
+ sizeof(struct vf_data_storage),
+ GFP_KERNEL);
+ /* if allocation failed then we do not support SR-IOV */
+ if (!adapter->vf_data) {
+ adapter->vfs_allocated_count = 0;
+ dev_err(&pdev->dev, "Unable to allocate memory for VF "
+ "Data Storage\n");
+ }
+ }
+
+ if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
+ kfree(adapter->vf_data);
+ adapter->vf_data = NULL;
+#endif /* CONFIG_PCI_IOV */
+ adapter->vfs_allocated_count = 0;
+#ifdef CONFIG_PCI_IOV
+ } else {
+ unsigned char mac_addr[ETH_ALEN];
+ int i;
+ dev_info(&pdev->dev, "%d vfs allocated\n",
+ adapter->vfs_allocated_count);
+ for (i = 0; i < adapter->vfs_allocated_count; i++) {
+ random_ether_addr(mac_addr);
+ igb_set_vf_mac(adapter, i, mac_addr);
+ }
+ /* DMA Coalescing is not supported in IOV mode. */
+ if (adapter->flags & IGB_FLAG_DMAC)
+ adapter->flags &= ~IGB_FLAG_DMAC;
+ }
+#endif /* CONFIG_PCI_IOV */
+}
+
+
+/**
+ * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
+ * @adapter: board private structure to initialize
+ *
+ * igb_init_hw_timer initializes the function pointer and values for the hw
+ * timer found in hardware.
+ **/
+static void igb_init_hw_timer(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ switch (hw->mac.type) {
+ case e1000_i350:
+ case e1000_82580:
+ memset(&adapter->cycles, 0, sizeof(adapter->cycles));
+ adapter->cycles.read = igb_read_clock;
+ adapter->cycles.mask = CLOCKSOURCE_MASK(64);
+ adapter->cycles.mult = 1;
+ /*
+ * The 82580 timesync updates the system timer every 8ns by 8ns
+ * and the value cannot be shifted. Instead we need to shift
+ * the registers to generate a 64bit timer value. As a result
+ * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
+ * 24 in order to generate a larger value for synchronization.
+ */
+ adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
+ /* disable system timer temporarily by setting bit 31 */
+ wr32(E1000_TSAUXC, 0x80000000);
+ wrfl();
+
+ /* Set registers so that rollover occurs soon to test this. */
+ wr32(E1000_SYSTIMR, 0x00000000);
+ wr32(E1000_SYSTIML, 0x80000000);
+ wr32(E1000_SYSTIMH, 0x000000FF);
+ wrfl();
+
+ /* enable system timer by clearing bit 31 */
+ wr32(E1000_TSAUXC, 0x0);
+ wrfl();
+
+ timecounter_init(&adapter->clock,
+ &adapter->cycles,
+ ktime_to_ns(ktime_get_real()));
+ /*
+ * Synchronize our NIC clock against system wall clock. NIC
+ * time stamp reading requires ~3us per sample, each sample
+ * was pretty stable even under load => only require 10
+ * samples for each offset comparison.
+ */
+ memset(&adapter->compare, 0, sizeof(adapter->compare));
+ adapter->compare.source = &adapter->clock;
+ adapter->compare.target = ktime_get_real;
+ adapter->compare.num_samples = 10;
+ timecompare_update(&adapter->compare, 0);
+ break;
+ case e1000_82576:
+ /*
+ * Initialize hardware timer: we keep it running just in case
+ * that some program needs it later on.
+ */
+ memset(&adapter->cycles, 0, sizeof(adapter->cycles));
+ adapter->cycles.read = igb_read_clock;
+ adapter->cycles.mask = CLOCKSOURCE_MASK(64);
+ adapter->cycles.mult = 1;
+ /**
+ * Scale the NIC clock cycle by a large factor so that
+ * relatively small clock corrections can be added or
+ * subtracted at each clock tick. The drawbacks of a large
+ * factor are a) that the clock register overflows more quickly
+ * (not such a big deal) and b) that the increment per tick has
+ * to fit into 24 bits. As a result we need to use a shift of
+ * 19 so we can fit a value of 16 into the TIMINCA register.
+ */
+ adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
+ wr32(E1000_TIMINCA,
+ (1 << E1000_TIMINCA_16NS_SHIFT) |
+ (16 << IGB_82576_TSYNC_SHIFT));
+
+ /* Set registers so that rollover occurs soon to test this. */
+ wr32(E1000_SYSTIML, 0x00000000);
+ wr32(E1000_SYSTIMH, 0xFF800000);
+ wrfl();
+
+ timecounter_init(&adapter->clock,
+ &adapter->cycles,
+ ktime_to_ns(ktime_get_real()));
+ /*
+ * Synchronize our NIC clock against system wall clock. NIC
+ * time stamp reading requires ~3us per sample, each sample
+ * was pretty stable even under load => only require 10
+ * samples for each offset comparison.
+ */
+ memset(&adapter->compare, 0, sizeof(adapter->compare));
+ adapter->compare.source = &adapter->clock;
+ adapter->compare.target = ktime_get_real;
+ adapter->compare.num_samples = 10;
+ timecompare_update(&adapter->compare, 0);
+ break;
+ case e1000_82575:
+ /* 82575 does not support timesync */
+ default:
+ break;
+ }
+
+}
+
+/**
+ * igb_sw_init - Initialize general software structures (struct igb_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * igb_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int __devinit igb_sw_init(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
+
+ /* set default ring sizes */
+ adapter->tx_ring_count = IGB_DEFAULT_TXD;
+ adapter->rx_ring_count = IGB_DEFAULT_RXD;
+
+ /* set default ITR values */
+ adapter->rx_itr_setting = IGB_DEFAULT_ITR;
+ adapter->tx_itr_setting = IGB_DEFAULT_ITR;
+
+ /* set default work limits */
+ adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
+
+ adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
+ VLAN_HLEN;
+ adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+ spin_lock_init(&adapter->stats64_lock);
+#ifdef CONFIG_PCI_IOV
+ switch (hw->mac.type) {
+ case e1000_82576:
+ case e1000_i350:
+ if (max_vfs > 7) {
+ dev_warn(&pdev->dev,
+ "Maximum of 7 VFs per PF, using max\n");
+ adapter->vfs_allocated_count = 7;
+ } else
+ adapter->vfs_allocated_count = max_vfs;
+ break;
+ default:
+ break;
+ }
+#endif /* CONFIG_PCI_IOV */
+ adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
+ /* i350 cannot do RSS and SR-IOV at the same time */
+ if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
+ adapter->rss_queues = 1;
+
+ /*
+ * if rss_queues > 4 or vfs are going to be allocated with rss_queues
+ * then we should combine the queues into a queue pair in order to
+ * conserve interrupts due to limited supply
+ */
+ if ((adapter->rss_queues > 4) ||
+ ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
+ adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+
+ /* This call may decrease the number of queues */
+ if (igb_init_interrupt_scheme(adapter)) {
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ igb_probe_vfs(adapter);
+
+ /* Explicitly disable IRQ since the NIC can be in any state. */
+ igb_irq_disable(adapter);
+
+ if (hw->mac.type == e1000_i350)
+ adapter->flags &= ~IGB_FLAG_DMAC;
+
+ set_bit(__IGB_DOWN, &adapter->state);
+ return 0;
+}
+
+/**
+ * igb_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int igb_open(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
+ int i;
+
+ /* disallow open during test */
+ if (test_bit(__IGB_TESTING, &adapter->state))
+ return -EBUSY;
+
+ netif_carrier_off(netdev);
+
+ /* allocate transmit descriptors */
+ err = igb_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = igb_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ igb_power_up_link(adapter);
+
+ /* before we allocate an interrupt, we must be ready to handle it.
+ * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
+ * as soon as we call pci_request_irq, so we have to setup our
+ * clean_rx handler before we do so. */
+ igb_configure(adapter);
+
+ err = igb_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ /* From here on the code is the same as igb_up() */
+ clear_bit(__IGB_DOWN, &adapter->state);
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[i];
+ napi_enable(&q_vector->napi);
+ }
+
+ /* Clear any pending interrupts. */
+ rd32(E1000_ICR);
+
+ igb_irq_enable(adapter);
+
+ /* notify VFs that reset has been completed */
+ if (adapter->vfs_allocated_count) {
+ u32 reg_data = rd32(E1000_CTRL_EXT);
+ reg_data |= E1000_CTRL_EXT_PFRSTD;
+ wr32(E1000_CTRL_EXT, reg_data);
+ }
+
+ netif_tx_start_all_queues(netdev);
+
+ /* start the watchdog. */
+ hw->mac.get_link_status = 1;
+ schedule_work(&adapter->watchdog_task);
+
+ return 0;
+
+err_req_irq:
+ igb_release_hw_control(adapter);
+ igb_power_down_link(adapter);
+ igb_free_all_rx_resources(adapter);
+err_setup_rx:
+ igb_free_all_tx_resources(adapter);
+err_setup_tx:
+ igb_reset(adapter);
+
+ return err;
+}
+
+/**
+ * igb_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the driver's control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int igb_close(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
+ igb_down(adapter);
+
+ igb_free_irq(adapter);
+
+ igb_free_all_tx_resources(adapter);
+ igb_free_all_rx_resources(adapter);
+
+ return 0;
+}
+
+/**
+ * igb_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+int igb_setup_tx_resources(struct igb_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int size;
+
+ size = sizeof(struct igb_tx_buffer) * tx_ring->count;
+ tx_ring->tx_buffer_info = vzalloc(size);
+ if (!tx_ring->tx_buffer_info)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+ tx_ring->desc = dma_alloc_coherent(dev,
+ tx_ring->size,
+ &tx_ring->dma,
+ GFP_KERNEL);
+
+ if (!tx_ring->desc)
+ goto err;
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ return 0;
+
+err:
+ vfree(tx_ring->tx_buffer_info);
+ dev_err(dev,
+ "Unable to allocate memory for the transmit descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * igb_setup_all_tx_resources - wrapper to allocate Tx resources
+ * (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ err = igb_setup_tx_resources(adapter->tx_ring[i]);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Allocation for Tx Queue %u failed\n", i);
+ for (i--; i >= 0; i--)
+ igb_free_tx_resources(adapter->tx_ring[i]);
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * igb_setup_tctl - configure the transmit control registers
+ * @adapter: Board private structure
+ **/
+void igb_setup_tctl(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 tctl;
+
+ /* disable queue 0 which is enabled by default on 82575 and 82576 */
+ wr32(E1000_TXDCTL(0), 0);
+
+ /* Program the Transmit Control Register */
+ tctl = rd32(E1000_TCTL);
+ tctl &= ~E1000_TCTL_CT;
+ tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+ (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+ igb_config_collision_dist(hw);
+
+ /* Enable transmits */
+ tctl |= E1000_TCTL_EN;
+
+ wr32(E1000_TCTL, tctl);
+}
+
+/**
+ * igb_configure_tx_ring - Configure transmit ring after Reset
+ * @adapter: board private structure
+ * @ring: tx ring to configure
+ *
+ * Configure a transmit ring after a reset.
+ **/
+void igb_configure_tx_ring(struct igb_adapter *adapter,
+ struct igb_ring *ring)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 txdctl = 0;
+ u64 tdba = ring->dma;
+ int reg_idx = ring->reg_idx;
+
+ /* disable the queue */
+ wr32(E1000_TXDCTL(reg_idx), 0);
+ wrfl();
+ mdelay(10);
+
+ wr32(E1000_TDLEN(reg_idx),
+ ring->count * sizeof(union e1000_adv_tx_desc));
+ wr32(E1000_TDBAL(reg_idx),
+ tdba & 0x00000000ffffffffULL);
+ wr32(E1000_TDBAH(reg_idx), tdba >> 32);
+
+ ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
+ wr32(E1000_TDH(reg_idx), 0);
+ writel(0, ring->tail);
+
+ txdctl |= IGB_TX_PTHRESH;
+ txdctl |= IGB_TX_HTHRESH << 8;
+ txdctl |= IGB_TX_WTHRESH << 16;
+
+ txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+ wr32(E1000_TXDCTL(reg_idx), txdctl);
+}
+
+/**
+ * igb_configure_tx - Configure transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void igb_configure_tx(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
+}
+
+/**
+ * igb_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int igb_setup_rx_resources(struct igb_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ int size, desc_len;
+
+ size = sizeof(struct igb_rx_buffer) * rx_ring->count;
+ rx_ring->rx_buffer_info = vzalloc(size);
+ if (!rx_ring->rx_buffer_info)
+ goto err;
+
+ desc_len = sizeof(union e1000_adv_rx_desc);
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * desc_len;
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+ rx_ring->desc = dma_alloc_coherent(dev,
+ rx_ring->size,
+ &rx_ring->dma,
+ GFP_KERNEL);
+
+ if (!rx_ring->desc)
+ goto err;
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ return 0;
+
+err:
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ dev_err(dev, "Unable to allocate memory for the receive descriptor"
+ " ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * igb_setup_all_rx_resources - wrapper to allocate Rx resources
+ * (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = igb_setup_rx_resources(adapter->rx_ring[i]);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Allocation for Rx Queue %u failed\n", i);
+ for (i--; i >= 0; i--)
+ igb_free_rx_resources(adapter->rx_ring[i]);
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * igb_setup_mrqc - configure the multiple receive queue control registers
+ * @adapter: Board private structure
+ **/
+static void igb_setup_mrqc(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mrqc, rxcsum;
+ u32 j, num_rx_queues, shift = 0, shift2 = 0;
+ union e1000_reta {
+ u32 dword;
+ u8 bytes[4];
+ } reta;
+ static const u8 rsshash[40] = {
+ 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
+ 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
+ 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
+ 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
+
+ /* Fill out hash function seeds */
+ for (j = 0; j < 10; j++) {
+ u32 rsskey = rsshash[(j * 4)];
+ rsskey |= rsshash[(j * 4) + 1] << 8;
+ rsskey |= rsshash[(j * 4) + 2] << 16;
+ rsskey |= rsshash[(j * 4) + 3] << 24;
+ array_wr32(E1000_RSSRK(0), j, rsskey);
+ }
+
+ num_rx_queues = adapter->rss_queues;
+
+ if (adapter->vfs_allocated_count) {
+ /* 82575 and 82576 supports 2 RSS queues for VMDq */
+ switch (hw->mac.type) {
+ case e1000_i350:
+ case e1000_82580:
+ num_rx_queues = 1;
+ shift = 0;
+ break;
+ case e1000_82576:
+ shift = 3;
+ num_rx_queues = 2;
+ break;
+ case e1000_82575:
+ shift = 2;
+ shift2 = 6;
+ default:
+ break;
+ }
+ } else {
+ if (hw->mac.type == e1000_82575)
+ shift = 6;
+ }
+
+ for (j = 0; j < (32 * 4); j++) {
+ reta.bytes[j & 3] = (j % num_rx_queues) << shift;
+ if (shift2)
+ reta.bytes[j & 3] |= num_rx_queues << shift2;
+ if ((j & 3) == 3)
+ wr32(E1000_RETA(j >> 2), reta.dword);
+ }
+
+ /*
+ * Disable raw packet checksumming so that RSS hash is placed in
+ * descriptor on writeback. No need to enable TCP/UDP/IP checksum
+ * offloads as they are enabled by default
+ */
+ rxcsum = rd32(E1000_RXCSUM);
+ rxcsum |= E1000_RXCSUM_PCSD;
+
+ if (adapter->hw.mac.type >= e1000_82576)
+ /* Enable Receive Checksum Offload for SCTP */
+ rxcsum |= E1000_RXCSUM_CRCOFL;
+
+ /* Don't need to set TUOFL or IPOFL, they default to 1 */
+ wr32(E1000_RXCSUM, rxcsum);
+
+ /* If VMDq is enabled then we set the appropriate mode for that, else
+ * we default to RSS so that an RSS hash is calculated per packet even
+ * if we are only using one queue */
+ if (adapter->vfs_allocated_count) {
+ if (hw->mac.type > e1000_82575) {
+ /* Set the default pool for the PF's first queue */
+ u32 vtctl = rd32(E1000_VT_CTL);
+ vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
+ E1000_VT_CTL_DISABLE_DEF_POOL);
+ vtctl |= adapter->vfs_allocated_count <<
+ E1000_VT_CTL_DEFAULT_POOL_SHIFT;
+ wr32(E1000_VT_CTL, vtctl);
+ }
+ if (adapter->rss_queues > 1)
+ mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
+ else
+ mrqc = E1000_MRQC_ENABLE_VMDQ;
+ } else {
+ mrqc = E1000_MRQC_ENABLE_RSS_4Q;
+ }
+ igb_vmm_control(adapter);
+
+ /*
+ * Generate RSS hash based on TCP port numbers and/or
+ * IPv4/v6 src and dst addresses since UDP cannot be
+ * hashed reliably due to IP fragmentation
+ */
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
+ E1000_MRQC_RSS_FIELD_IPV4_TCP |
+ E1000_MRQC_RSS_FIELD_IPV6 |
+ E1000_MRQC_RSS_FIELD_IPV6_TCP |
+ E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
+
+ wr32(E1000_MRQC, mrqc);
+}
+
+/**
+ * igb_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
+ **/
+void igb_setup_rctl(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
+
+ rctl = rd32(E1000_RCTL);
+
+ rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+ rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
+
+ rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
+ (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+ /*
+ * enable stripping of CRC. It's unlikely this will break BMC
+ * redirection as it did with e1000. Newer features require
+ * that the HW strips the CRC.
+ */
+ rctl |= E1000_RCTL_SECRC;
+
+ /* disable store bad packets and clear size bits. */
+ rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
+
+ /* enable LPE to prevent packets larger than max_frame_size */
+ rctl |= E1000_RCTL_LPE;
+
+ /* disable queue 0 to prevent tail write w/o re-config */
+ wr32(E1000_RXDCTL(0), 0);
+
+ /* Attention!!! For SR-IOV PF driver operations you must enable
+ * queue drop for all VF and PF queues to prevent head of line blocking
+ * if an un-trusted VF does not provide descriptors to hardware.
+ */
+ if (adapter->vfs_allocated_count) {
+ /* set all queue drop enable bits */
+ wr32(E1000_QDE, ALL_QUEUES);
+ }
+
+ wr32(E1000_RCTL, rctl);
+}
+
+static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
+ int vfn)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vmolr;
+
+ /* if it isn't the PF check to see if VFs are enabled and
+ * increase the size to support vlan tags */
+ if (vfn < adapter->vfs_allocated_count &&
+ adapter->vf_data[vfn].vlans_enabled)
+ size += VLAN_TAG_SIZE;
+
+ vmolr = rd32(E1000_VMOLR(vfn));
+ vmolr &= ~E1000_VMOLR_RLPML_MASK;
+ vmolr |= size | E1000_VMOLR_LPE;
+ wr32(E1000_VMOLR(vfn), vmolr);
+
+ return 0;
+}
+
+/**
+ * igb_rlpml_set - set maximum receive packet size
+ * @adapter: board private structure
+ *
+ * Configure maximum receivable packet size.
+ **/
+static void igb_rlpml_set(struct igb_adapter *adapter)
+{
+ u32 max_frame_size = adapter->max_frame_size;
+ struct e1000_hw *hw = &adapter->hw;
+ u16 pf_id = adapter->vfs_allocated_count;
+
+ if (pf_id) {
+ igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
+ /*
+ * If we're in VMDQ or SR-IOV mode, then set global RLPML
+ * to our max jumbo frame size, in case we need to enable
+ * jumbo frames on one of the rings later.
+ * This will not pass over-length frames into the default
+ * queue because it's gated by the VMOLR.RLPML.
+ */
+ max_frame_size = MAX_JUMBO_FRAME_SIZE;
+ }
+
+ wr32(E1000_RLPML, max_frame_size);
+}
+
+static inline void igb_set_vmolr(struct igb_adapter *adapter,
+ int vfn, bool aupe)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vmolr;
+
+ /*
+ * This register exists only on 82576 and newer so if we are older then
+ * we should exit and do nothing
+ */
+ if (hw->mac.type < e1000_82576)
+ return;
+
+ vmolr = rd32(E1000_VMOLR(vfn));
+ vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
+ if (aupe)
+ vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
+ else
+ vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
+
+ /* clear all bits that might not be set */
+ vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
+
+ if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
+ vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
+ /*
+ * for VMDq only allow the VFs and pool 0 to accept broadcast and
+ * multicast packets
+ */
+ if (vfn <= adapter->vfs_allocated_count)
+ vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
+
+ wr32(E1000_VMOLR(vfn), vmolr);
+}
+
+/**
+ * igb_configure_rx_ring - Configure a receive ring after Reset
+ * @adapter: board private structure
+ * @ring: receive ring to be configured
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+void igb_configure_rx_ring(struct igb_adapter *adapter,
+ struct igb_ring *ring)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u64 rdba = ring->dma;
+ int reg_idx = ring->reg_idx;
+ u32 srrctl = 0, rxdctl = 0;
+
+ /* disable the queue */
+ wr32(E1000_RXDCTL(reg_idx), 0);
+
+ /* Set DMA base address registers */
+ wr32(E1000_RDBAL(reg_idx),
+ rdba & 0x00000000ffffffffULL);
+ wr32(E1000_RDBAH(reg_idx), rdba >> 32);
+ wr32(E1000_RDLEN(reg_idx),
+ ring->count * sizeof(union e1000_adv_rx_desc));
+
+ /* initialize head and tail */
+ ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
+ wr32(E1000_RDH(reg_idx), 0);
+ writel(0, ring->tail);
+
+ /* set descriptor configuration */
+ srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
+ srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+#else
+ srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+#endif
+ srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ if (hw->mac.type == e1000_82580)
+ srrctl |= E1000_SRRCTL_TIMESTAMP;
+ /* Only set Drop Enable if we are supporting multiple queues */
+ if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
+ srrctl |= E1000_SRRCTL_DROP_EN;
+
+ wr32(E1000_SRRCTL(reg_idx), srrctl);
+
+ /* set filtering for VMDQ pools */
+ igb_set_vmolr(adapter, reg_idx & 0x7, true);
+
+ rxdctl |= IGB_RX_PTHRESH;
+ rxdctl |= IGB_RX_HTHRESH << 8;
+ rxdctl |= IGB_RX_WTHRESH << 16;
+
+ /* enable receive descriptor fetching */
+ rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
+ wr32(E1000_RXDCTL(reg_idx), rxdctl);
+}
+
+/**
+ * igb_configure_rx - Configure receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void igb_configure_rx(struct igb_adapter *adapter)
+{
+ int i;
+
+ /* set UTA to appropriate mode */
+ igb_set_uta(adapter);
+
+ /* set the correct pool for the PF default MAC address in entry 0 */
+ igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
+ adapter->vfs_allocated_count);
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
+}
+
+/**
+ * igb_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void igb_free_tx_resources(struct igb_ring *tx_ring)
+{
+ igb_clean_tx_ring(tx_ring);
+
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
+
+/**
+ * igb_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void igb_free_all_tx_resources(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ igb_free_tx_resources(adapter->tx_ring[i]);
+}
+
+void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
+ struct igb_tx_buffer *tx_buffer)
+{
+ if (tx_buffer->skb) {
+ dev_kfree_skb_any(tx_buffer->skb);
+ if (tx_buffer->dma)
+ dma_unmap_single(ring->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
+ } else if (tx_buffer->dma) {
+ dma_unmap_page(ring->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
+ }
+ tx_buffer->next_to_watch = NULL;
+ tx_buffer->skb = NULL;
+ tx_buffer->dma = 0;
+ /* buffer_info must be completely set up in the transmit path */
+}
+
+/**
+ * igb_clean_tx_ring - Free Tx Buffers
+ * @tx_ring: ring to be cleaned
+ **/
+static void igb_clean_tx_ring(struct igb_ring *tx_ring)
+{
+ struct igb_tx_buffer *buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ if (!tx_ring->tx_buffer_info)
+ return;
+ /* Free all the Tx ring sk_buffs */
+
+ for (i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->tx_buffer_info[i];
+ igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
+ }
+
+ size = sizeof(struct igb_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+}
+
+/**
+ * igb_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ igb_clean_tx_ring(adapter->tx_ring[i]);
+}
+
+/**
+ * igb_free_rx_resources - Free Rx Resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void igb_free_rx_resources(struct igb_ring *rx_ring)
+{
+ igb_clean_rx_ring(rx_ring);
+
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!rx_ring->desc)
+ return;
+
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
+
+ rx_ring->desc = NULL;
+}
+
+/**
+ * igb_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void igb_free_all_rx_resources(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ igb_free_rx_resources(adapter->rx_ring[i]);
+}
+
+/**
+ * igb_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
+ **/
+static void igb_clean_rx_ring(struct igb_ring *rx_ring)
+{
+ unsigned long size;
+ u16 i;
+
+ if (!rx_ring->rx_buffer_info)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
+ if (buffer_info->dma) {
+ dma_unmap_single(rx_ring->dev,
+ buffer_info->dma,
+ IGB_RX_HDR_LEN,
+ DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+ }
+
+ if (buffer_info->skb) {
+ dev_kfree_skb(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+ if (buffer_info->page_dma) {
+ dma_unmap_page(rx_ring->dev,
+ buffer_info->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ buffer_info->page_dma = 0;
+ }
+ if (buffer_info->page) {
+ put_page(buffer_info->page);
+ buffer_info->page = NULL;
+ buffer_info->page_offset = 0;
+ }
+ }
+
+ size = sizeof(struct igb_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+/**
+ * igb_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ igb_clean_rx_ring(adapter->rx_ring[i]);
+}
+
+/**
+ * igb_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int igb_set_mac(struct net_device *netdev, void *p)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+
+ /* set the correct pool for the new PF MAC address in entry 0 */
+ igb_rar_set_qsel(adapter, hw->mac.addr, 0,
+ adapter->vfs_allocated_count);
+
+ return 0;
+}
+
+/**
+ * igb_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ * 0 on no addresses written
+ * X on writing X addresses to MTA
+ **/
+static int igb_write_mc_addr_list(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u8 *mta_list;
+ int i;
+
+ if (netdev_mc_empty(netdev)) {
+ /* nothing to program, so clear mc list */
+ igb_update_mc_addr_list(hw, NULL, 0);
+ igb_restore_vf_multicasts(adapter);
+ return 0;
+ }
+
+ mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
+ if (!mta_list)
+ return -ENOMEM;
+
+ /* The shared function expects a packed array of only addresses. */
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+
+ igb_update_mc_addr_list(hw, mta_list, i);
+ kfree(mta_list);
+
+ return netdev_mc_count(netdev);
+}
+
+/**
+ * igb_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
+ *
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ * 0 on no addresses written
+ * X on writing X addresses to the RAR table
+ **/
+static int igb_write_uc_addr_list(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned int vfn = adapter->vfs_allocated_count;
+ unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
+ int count = 0;
+
+ /* return ENOMEM indicating insufficient memory for addresses */
+ if (netdev_uc_count(netdev) > rar_entries)
+ return -ENOMEM;
+
+ if (!netdev_uc_empty(netdev) && rar_entries) {
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_uc_addr(ha, netdev) {
+ if (!rar_entries)
+ break;
+ igb_rar_set_qsel(adapter, ha->addr,
+ rar_entries--,
+ vfn);
+ count++;
+ }
+ }
+ /* write the addresses in reverse order to avoid write combining */
+ for (; rar_entries > 0 ; rar_entries--) {
+ wr32(E1000_RAH(rar_entries), 0);
+ wr32(E1000_RAL(rar_entries), 0);
+ }
+ wrfl();
+
+ return count;
+}
+
+/**
+ * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_mode entry point is called whenever the unicast or multicast
+ * address lists or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+static void igb_set_rx_mode(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned int vfn = adapter->vfs_allocated_count;
+ u32 rctl, vmolr = 0;
+ int count;
+
+ /* Check for Promiscuous and All Multicast modes */
+ rctl = rd32(E1000_RCTL);
+
+ /* clear the effected bits */
+ rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
+
+ if (netdev->flags & IFF_PROMISC) {
+ rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+ vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
+ } else {
+ if (netdev->flags & IFF_ALLMULTI) {
+ rctl |= E1000_RCTL_MPE;
+ vmolr |= E1000_VMOLR_MPME;
+ } else {
+ /*
+ * Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ count = igb_write_mc_addr_list(netdev);
+ if (count < 0) {
+ rctl |= E1000_RCTL_MPE;
+ vmolr |= E1000_VMOLR_MPME;
+ } else if (count) {
+ vmolr |= E1000_VMOLR_ROMPE;
+ }
+ }
+ /*
+ * Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
+ */
+ count = igb_write_uc_addr_list(netdev);
+ if (count < 0) {
+ rctl |= E1000_RCTL_UPE;
+ vmolr |= E1000_VMOLR_ROPE;
+ }
+ rctl |= E1000_RCTL_VFE;
+ }
+ wr32(E1000_RCTL, rctl);
+
+ /*
+ * In order to support SR-IOV and eventually VMDq it is necessary to set
+ * the VMOLR to enable the appropriate modes. Without this workaround
+ * we will have issues with VLAN tag stripping not being done for frames
+ * that are only arriving because we are the default pool
+ */
+ if (hw->mac.type < e1000_82576)
+ return;
+
+ vmolr |= rd32(E1000_VMOLR(vfn)) &
+ ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
+ wr32(E1000_VMOLR(vfn), vmolr);
+ igb_restore_vf_multicasts(adapter);
+}
+
+static void igb_check_wvbr(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 wvbr = 0;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ case e1000_i350:
+ if (!(wvbr = rd32(E1000_WVBR)))
+ return;
+ break;
+ default:
+ break;
+ }
+
+ adapter->wvbr |= wvbr;
+}
+
+#define IGB_STAGGERED_QUEUE_OFFSET 8
+
+static void igb_spoof_check(struct igb_adapter *adapter)
+{
+ int j;
+
+ if (!adapter->wvbr)
+ return;
+
+ for(j = 0; j < adapter->vfs_allocated_count; j++) {
+ if (adapter->wvbr & (1 << j) ||
+ adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
+ dev_warn(&adapter->pdev->dev,
+ "Spoof event(s) detected on VF %d\n", j);
+ adapter->wvbr &=
+ ~((1 << j) |
+ (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
+ }
+ }
+}
+
+/* Need to wait a few seconds after link up to get diagnostic information from
+ * the phy */
+static void igb_update_phy_info(unsigned long data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *) data;
+ igb_get_phy_info(&adapter->hw);
+}
+
+/**
+ * igb_has_link - check shared code for link and determine up/down
+ * @adapter: pointer to driver private info
+ **/
+bool igb_has_link(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ bool link_active = false;
+ s32 ret_val = 0;
+
+ /* get_link_status is set on LSC (link status) interrupt or
+ * rx sequence error interrupt. get_link_status will stay
+ * false until the e1000_check_for_link establishes link
+ * for copper adapters ONLY
+ */
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ if (hw->mac.get_link_status) {
+ ret_val = hw->mac.ops.check_for_link(hw);
+ link_active = !hw->mac.get_link_status;
+ } else {
+ link_active = true;
+ }
+ break;
+ case e1000_media_type_internal_serdes:
+ ret_val = hw->mac.ops.check_for_link(hw);
+ link_active = hw->mac.serdes_has_link;
+ break;
+ default:
+ case e1000_media_type_unknown:
+ break;
+ }
+
+ return link_active;
+}
+
+static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
+{
+ bool ret = false;
+ u32 ctrl_ext, thstat;
+
+ /* check for thermal sensor event on i350, copper only */
+ if (hw->mac.type == e1000_i350) {
+ thstat = rd32(E1000_THSTAT);
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+
+ if ((hw->phy.media_type == e1000_media_type_copper) &&
+ !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
+ ret = !!(thstat & event);
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * igb_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void igb_watchdog(unsigned long data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ /* Do the rest outside of interrupt context */
+ schedule_work(&adapter->watchdog_task);
+}
+
+static void igb_watchdog_task(struct work_struct *work)
+{
+ struct igb_adapter *adapter = container_of(work,
+ struct igb_adapter,
+ watchdog_task);
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 link;
+ int i;
+
+ link = igb_has_link(adapter);
+ if (link) {
+ if (!netif_carrier_ok(netdev)) {
+ u32 ctrl;
+ hw->mac.ops.get_speed_and_duplex(hw,
+ &adapter->link_speed,
+ &adapter->link_duplex);
+
+ ctrl = rd32(E1000_CTRL);
+ /* Links status message must follow this format */
+ printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
+ "Flow Control: %s\n",
+ netdev->name,
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "Full Duplex" : "Half Duplex",
+ ((ctrl & E1000_CTRL_TFCE) &&
+ (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
+ ((ctrl & E1000_CTRL_RFCE) ? "RX" :
+ ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
+
+ /* check for thermal sensor event */
+ if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) {
+ printk(KERN_INFO "igb: %s The network adapter "
+ "link speed was downshifted "
+ "because it overheated.\n",
+ netdev->name);
+ }
+
+ /* adjust timeout factor according to speed/duplex */
+ adapter->tx_timeout_factor = 1;
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adapter->tx_timeout_factor = 14;
+ break;
+ case SPEED_100:
+ /* maybe add some timeout factor ? */
+ break;
+ }
+
+ netif_carrier_on(netdev);
+
+ igb_ping_all_vfs(adapter);
+ igb_check_vf_rate_limit(adapter);
+
+ /* link state has changed, schedule phy info update */
+ if (!test_bit(__IGB_DOWN, &adapter->state))
+ mod_timer(&adapter->phy_info_timer,
+ round_jiffies(jiffies + 2 * HZ));
+ }
+ } else {
+ if (netif_carrier_ok(netdev)) {
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+
+ /* check for thermal sensor event */
+ if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) {
+ printk(KERN_ERR "igb: %s The network adapter "
+ "was stopped because it "
+ "overheated.\n",
+ netdev->name);
+ }
+
+ /* Links status message must follow this format */
+ printk(KERN_INFO "igb: %s NIC Link is Down\n",
+ netdev->name);
+ netif_carrier_off(netdev);
+
+ igb_ping_all_vfs(adapter);
+
+ /* link state has changed, schedule phy info update */
+ if (!test_bit(__IGB_DOWN, &adapter->state))
+ mod_timer(&adapter->phy_info_timer,
+ round_jiffies(jiffies + 2 * HZ));
+ }
+ }
+
+ spin_lock(&adapter->stats64_lock);
+ igb_update_stats(adapter, &adapter->stats64);
+ spin_unlock(&adapter->stats64_lock);
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igb_ring *tx_ring = adapter->tx_ring[i];
+ if (!netif_carrier_ok(netdev)) {
+ /* We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context). */
+ if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+ /* return immediately since reset is imminent */
+ return;
+ }
+ }
+
+ /* Force detection of hung controller every watchdog period */
+ tx_ring->detect_tx_hung = true;
+ }
+
+ /* Cause software interrupt to ensure rx ring is cleaned */
+ if (adapter->msix_entries) {
+ u32 eics = 0;
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[i];
+ eics |= q_vector->eims_value;
+ }
+ wr32(E1000_EICS, eics);
+ } else {
+ wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ }
+
+ igb_spoof_check(adapter);
+
+ /* Reset the timer */
+ if (!test_bit(__IGB_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 2 * HZ));
+}
+
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
+/**
+ * igb_update_ring_itr - update the dynamic ITR value based on packet size
+ *
+ * Stores a new ITR value based on strictly on packet size. This
+ * algorithm is less sophisticated than that used in igb_update_itr,
+ * due to the difficulty of synchronizing statistics across multiple
+ * receive rings. The divisors and thresholds used by this function
+ * were determined based on theoretical maximum wire speed and testing
+ * data, in order to minimize response time while increasing bulk
+ * throughput.
+ * This functionality is controlled by the InterruptThrottleRate module
+ * parameter (see igb_param.c)
+ * NOTE: This function is called only when operating in a multiqueue
+ * receive environment.
+ * @q_vector: pointer to q_vector
+ **/
+static void igb_update_ring_itr(struct igb_q_vector *q_vector)
+{
+ int new_val = q_vector->itr_val;
+ int avg_wire_size = 0;
+ struct igb_adapter *adapter = q_vector->adapter;
+ struct igb_ring *ring;
+ unsigned int packets;
+
+ /* For non-gigabit speeds, just fix the interrupt rate at 4000
+ * ints/sec - ITR timer value of 120 ticks.
+ */
+ if (adapter->link_speed != SPEED_1000) {
+ new_val = 976;
+ goto set_itr_val;
+ }
+
+ ring = q_vector->rx_ring;
+ if (ring) {
+ packets = ACCESS_ONCE(ring->total_packets);
+
+ if (packets)
+ avg_wire_size = ring->total_bytes / packets;
+ }
+
+ ring = q_vector->tx_ring;
+ if (ring) {
+ packets = ACCESS_ONCE(ring->total_packets);
+
+ if (packets)
+ avg_wire_size = max_t(u32, avg_wire_size,
+ ring->total_bytes / packets);
+ }
+
+ /* if avg_wire_size isn't set no work was done */
+ if (!avg_wire_size)
+ goto clear_counts;
+
+ /* Add 24 bytes to size to account for CRC, preamble, and gap */
+ avg_wire_size += 24;
+
+ /* Don't starve jumbo frames */
+ avg_wire_size = min(avg_wire_size, 3000);
+
+ /* Give a little boost to mid-size frames */
+ if ((avg_wire_size > 300) && (avg_wire_size < 1200))
+ new_val = avg_wire_size / 3;
+ else
+ new_val = avg_wire_size / 2;
+
+ /* when in itr mode 3 do not exceed 20K ints/sec */
+ if (adapter->rx_itr_setting == 3 && new_val < 196)
+ new_val = 196;
+
+set_itr_val:
+ if (new_val != q_vector->itr_val) {
+ q_vector->itr_val = new_val;
+ q_vector->set_itr = 1;
+ }
+clear_counts:
+ if (q_vector->rx_ring) {
+ q_vector->rx_ring->total_bytes = 0;
+ q_vector->rx_ring->total_packets = 0;
+ }
+ if (q_vector->tx_ring) {
+ q_vector->tx_ring->total_bytes = 0;
+ q_vector->tx_ring->total_packets = 0;
+ }
+}
+
+/**
+ * igb_update_itr - update the dynamic ITR value based on statistics
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ * this functionality is controlled by the InterruptThrottleRate module
+ * parameter (see igb_param.c)
+ * NOTE: These calculations are only valid when operating in a single-
+ * queue environment.
+ * @adapter: pointer to adapter
+ * @itr_setting: current q_vector->itr_val
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ **/
+static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
+ int packets, int bytes)
+{
+ unsigned int retval = itr_setting;
+
+ if (packets == 0)
+ goto update_itr_done;
+
+ switch (itr_setting) {
+ case lowest_latency:
+ /* handle TSO and jumbo frames */
+ if (bytes/packets > 8000)
+ retval = bulk_latency;
+ else if ((packets < 5) && (bytes > 512))
+ retval = low_latency;
+ break;
+ case low_latency: /* 50 usec aka 20000 ints/s */
+ if (bytes > 10000) {
+ /* this if handles the TSO accounting */
+ if (bytes/packets > 8000) {
+ retval = bulk_latency;
+ } else if ((packets < 10) || ((bytes/packets) > 1200)) {
+ retval = bulk_latency;
+ } else if ((packets > 35)) {
+ retval = lowest_latency;
+ }
+ } else if (bytes/packets > 2000) {
+ retval = bulk_latency;
+ } else if (packets <= 2 && bytes < 512) {
+ retval = lowest_latency;
+ }
+ break;
+ case bulk_latency: /* 250 usec aka 4000 ints/s */
+ if (bytes > 25000) {
+ if (packets > 35)
+ retval = low_latency;
+ } else if (bytes < 1500) {
+ retval = low_latency;
+ }
+ break;
+ }
+
+update_itr_done:
+ return retval;
+}
+
+static void igb_set_itr(struct igb_adapter *adapter)
+{
+ struct igb_q_vector *q_vector = adapter->q_vector[0];
+ u16 current_itr;
+ u32 new_itr = q_vector->itr_val;
+
+ /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+ if (adapter->link_speed != SPEED_1000) {
+ current_itr = 0;
+ new_itr = 4000;
+ goto set_itr_now;
+ }
+
+ adapter->rx_itr = igb_update_itr(adapter,
+ adapter->rx_itr,
+ q_vector->rx_ring->total_packets,
+ q_vector->rx_ring->total_bytes);
+
+ adapter->tx_itr = igb_update_itr(adapter,
+ adapter->tx_itr,
+ q_vector->tx_ring->total_packets,
+ q_vector->tx_ring->total_bytes);
+ current_itr = max(adapter->rx_itr, adapter->tx_itr);
+
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
+ current_itr = low_latency;
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = 56; /* aka 70,000 ints/sec */
+ break;
+ case low_latency:
+ new_itr = 196; /* aka 20,000 ints/sec */
+ break;
+ case bulk_latency:
+ new_itr = 980; /* aka 4,000 ints/sec */
+ break;
+ default:
+ break;
+ }
+
+set_itr_now:
+ q_vector->rx_ring->total_bytes = 0;
+ q_vector->rx_ring->total_packets = 0;
+ q_vector->tx_ring->total_bytes = 0;
+ q_vector->tx_ring->total_packets = 0;
+
+ if (new_itr != q_vector->itr_val) {
+ /* this attempts to bias the interrupt rate towards Bulk
+ * by adding intermediate steps when interrupt rate is
+ * increasing */
+ new_itr = new_itr > q_vector->itr_val ?
+ max((new_itr * q_vector->itr_val) /
+ (new_itr + (q_vector->itr_val >> 2)),
+ new_itr) :
+ new_itr;
+ /* Don't write the value here; it resets the adapter's
+ * internal timer, and causes us to delay far longer than
+ * we should between interrupts. Instead, we write the ITR
+ * value at the beginning of the next interrupt so the timing
+ * ends up being correct.
+ */
+ q_vector->itr_val = new_itr;
+ q_vector->set_itr = 1;
+ }
+}
+
+void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
+ u32 type_tucmd, u32 mss_l4len_idx)
+{
+ struct e1000_adv_tx_context_desc *context_desc;
+ u16 i = tx_ring->next_to_use;
+
+ context_desc = IGB_TX_CTXTDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ /* set bits to identify this as an advanced context descriptor */
+ type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
+
+ /* For 82575, context index must be unique per ring. */
+ if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
+ mss_l4len_idx |= tx_ring->reg_idx << 4;
+
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = 0;
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+}
+
+static inline int igb_tso(struct igb_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol, u8 *hdr_len)
+{
+ int err;
+ u32 vlan_macip_lens, type_tucmd;
+ u32 mss_l4len_idx, l4len;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
+
+ if (protocol == __constant_htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ l4len = tcp_hdrlen(skb);
+ *hdr_len = skb_transport_offset(skb) + l4len;
+
+ /* MSS L4LEN IDX */
+ mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
+
+ /* VLAN MACLEN IPLEN */
+ vlan_macip_lens = skb_network_header_len(skb);
+ vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IGB_TX_FLAGS_VLAN_MASK;
+
+ igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
+
+ return 1;
+}
+
+static inline bool igb_tx_csum(struct igb_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol)
+{
+ u32 vlan_macip_lens = 0;
+ u32 mss_l4len_idx = 0;
+ u32 type_tucmd = 0;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ if (!(tx_flags & IGB_TX_FLAGS_VLAN))
+ return false;
+ } else {
+ u8 l4_hdr = 0;
+ switch (protocol) {
+ case __constant_htons(ETH_P_IP):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but proto=%x!\n",
+ protocol);
+ }
+ break;
+ }
+
+ switch (l4_hdr) {
+ case IPPROTO_TCP:
+ type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+ mss_l4len_idx = tcp_hdrlen(skb) <<
+ E1000_ADVTXD_L4LEN_SHIFT;
+ break;
+ case IPPROTO_SCTP:
+ type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
+ mss_l4len_idx = sizeof(struct sctphdr) <<
+ E1000_ADVTXD_L4LEN_SHIFT;
+ break;
+ case IPPROTO_UDP:
+ mss_l4len_idx = sizeof(struct udphdr) <<
+ E1000_ADVTXD_L4LEN_SHIFT;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but l4 proto=%x!\n",
+ l4_hdr);
+ }
+ break;
+ }
+ }
+
+ vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IGB_TX_FLAGS_VLAN_MASK;
+
+ igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
+
+ return (skb->ip_summed == CHECKSUM_PARTIAL);
+}
+
+static __le32 igb_tx_cmd_type(u32 tx_flags)
+{
+ /* set type for advanced descriptor with frame checksum insertion */
+ __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
+ E1000_ADVTXD_DCMD_IFCS |
+ E1000_ADVTXD_DCMD_DEXT);
+
+ /* set HW vlan bit if vlan is present */
+ if (tx_flags & IGB_TX_FLAGS_VLAN)
+ cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
+
+ /* set timestamp bit if present */
+ if (tx_flags & IGB_TX_FLAGS_TSTAMP)
+ cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
+
+ /* set segmentation bits for TSO */
+ if (tx_flags & IGB_TX_FLAGS_TSO)
+ cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
+
+ return cmd_type;
+}
+
+static __le32 igb_tx_olinfo_status(u32 tx_flags, unsigned int paylen,
+ struct igb_ring *tx_ring)
+{
+ u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
+
+ /* 82575 requires a unique index per ring if any offload is enabled */
+ if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
+ (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX))
+ olinfo_status |= tx_ring->reg_idx << 4;
+
+ /* insert L4 checksum */
+ if (tx_flags & IGB_TX_FLAGS_CSUM) {
+ olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+
+ /* insert IPv4 checksum */
+ if (tx_flags & IGB_TX_FLAGS_IPV4)
+ olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
+ }
+
+ return cpu_to_le32(olinfo_status);
+}
+
+/*
+ * The largest size we can write to the descriptor is 65535. In order to
+ * maintain a power of two alignment we have to limit ourselves to 32K.
+ */
+#define IGB_MAX_TXD_PWR 15
+#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR)
+
+static void igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb,
+ struct igb_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len)
+{
+ struct igb_tx_buffer *tx_buffer_info;
+ union e1000_adv_tx_desc *tx_desc;
+ dma_addr_t dma;
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int data_len = skb->data_len;
+ unsigned int size = skb_headlen(skb);
+ unsigned int paylen = skb->len - hdr_len;
+ __le32 cmd_type;
+ u16 i = tx_ring->next_to_use;
+ u16 gso_segs;
+
+ if (tx_flags & IGB_TX_FLAGS_TSO)
+ gso_segs = skb_shinfo(skb)->gso_segs;
+ else
+ gso_segs = 1;
+
+ /* multiply data chunks by size of headers */
+ first->bytecount = paylen + (gso_segs * hdr_len);
+ first->gso_segs = gso_segs;
+ first->skb = skb;
+
+ tx_desc = IGB_TX_DESC(tx_ring, i);
+
+ tx_desc->read.olinfo_status =
+ igb_tx_olinfo_status(tx_flags, paylen, tx_ring);
+
+ cmd_type = igb_tx_cmd_type(tx_flags);
+
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ first->length = size;
+ first->dma = dma;
+ first->tx_flags = tx_flags;
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ for (;;) {
+ while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
+ tx_desc->read.cmd_type_len =
+ cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
+
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = IGB_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+
+ dma += IGB_MAX_DATA_PER_TXD;
+ size -= IGB_MAX_DATA_PER_TXD;
+
+ tx_desc->read.olinfo_status = 0;
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ }
+
+ if (likely(!data_len))
+ break;
+
+ tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
+
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = IGB_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+
+ size = frag->size;
+ data_len -= size;
+
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ tx_buffer_info->length = size;
+ tx_buffer_info->dma = dma;
+
+ tx_desc->read.olinfo_status = 0;
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ frag++;
+ }
+
+ /* write last descriptor with RS and EOP bits */
+ cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
+ tx_desc->read.cmd_type_len = cmd_type;
+
+ /* set the timestamp */
+ first->time_stamp = jiffies;
+
+ /*
+ * Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch. (Only applicable for weak-ordered
+ * memory model archs, such as IA-64).
+ *
+ * We also need this memory barrier to make certain all of the
+ * status bits have been updated before next_to_watch is written.
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ writel(i, tx_ring->tail);
+
+ /* we need this if more than one processor can write to our tail
+ * at a time, it syncronizes IO on IA64/Altix systems */
+ mmiowb();
+
+ return;
+
+dma_error:
+ dev_err(tx_ring->dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_buffer_info map */
+ for (;;) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+ if (tx_buffer_info == first)
+ break;
+ if (i == 0)
+ i = tx_ring->count;
+ i--;
+ }
+
+ tx_ring->next_to_use = i;
+}
+
+static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
+{
+ struct net_device *netdev = tx_ring->netdev;
+
+ netif_stop_subqueue(netdev, tx_ring->queue_index);
+
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it. */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available. */
+ if (igb_desc_unused(tx_ring) < size)
+ return -EBUSY;
+
+ /* A reprieve! */
+ netif_wake_subqueue(netdev, tx_ring->queue_index);
+
+ u64_stats_update_begin(&tx_ring->tx_syncp2);
+ tx_ring->tx_stats.restart_queue2++;
+ u64_stats_update_end(&tx_ring->tx_syncp2);
+
+ return 0;
+}
+
+static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
+{
+ if (igb_desc_unused(tx_ring) >= size)
+ return 0;
+ return __igb_maybe_stop_tx(tx_ring, size);
+}
+
+netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
+ struct igb_ring *tx_ring)
+{
+ struct igb_tx_buffer *first;
+ int tso;
+ u32 tx_flags = 0;
+ __be16 protocol = vlan_get_protocol(skb);
+ u8 hdr_len = 0;
+
+ /* need: 1 descriptor per page,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for skb->data,
+ * + 1 desc for context descriptor,
+ * otherwise try next time */
+ if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
+ /* this is a hard error */
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_flags |= IGB_TX_FLAGS_TSTAMP;
+ }
+
+ if (vlan_tx_tag_present(skb)) {
+ tx_flags |= IGB_TX_FLAGS_VLAN;
+ tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
+ }
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+
+ tso = igb_tso(tx_ring, skb, tx_flags, protocol, &hdr_len);
+ if (tso < 0) {
+ goto out_drop;
+ } else if (tso) {
+ tx_flags |= IGB_TX_FLAGS_TSO | IGB_TX_FLAGS_CSUM;
+ if (protocol == htons(ETH_P_IP))
+ tx_flags |= IGB_TX_FLAGS_IPV4;
+ } else if (igb_tx_csum(tx_ring, skb, tx_flags, protocol) &&
+ (skb->ip_summed == CHECKSUM_PARTIAL)) {
+ tx_flags |= IGB_TX_FLAGS_CSUM;
+ }
+
+ igb_tx_map(tx_ring, skb, first, tx_flags, hdr_len);
+
+ /* Make sure there is space in the ring for the next send. */
+ igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
+
+ return NETDEV_TX_OK;
+
+out_drop:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
+ struct sk_buff *skb)
+{
+ unsigned int r_idx = skb->queue_mapping;
+
+ if (r_idx >= adapter->num_tx_queues)
+ r_idx = r_idx % adapter->num_tx_queues;
+
+ return adapter->tx_ring[r_idx];
+}
+
+static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (skb->len <= 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /*
+ * The minimum packet size with TCTL.PSP set is 17 so pad the skb
+ * in order to meet this minimum size requirement.
+ */
+ if (skb->len < 17) {
+ if (skb_padto(skb, 17))
+ return NETDEV_TX_OK;
+ skb->len = 17;
+ }
+
+ return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
+}
+
+/**
+ * igb_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void igb_tx_timeout(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Do the reset outside of interrupt context */
+ adapter->tx_timeout_count++;
+
+ if (hw->mac.type == e1000_82580)
+ hw->dev_spec._82575.global_device_reset = true;
+
+ schedule_work(&adapter->reset_task);
+ wr32(E1000_EICS,
+ (adapter->eims_enable_mask & ~adapter->eims_other));
+}
+
+static void igb_reset_task(struct work_struct *work)
+{
+ struct igb_adapter *adapter;
+ adapter = container_of(work, struct igb_adapter, reset_task);
+
+ igb_dump(adapter);
+ netdev_err(adapter->netdev, "Reset adapter\n");
+ igb_reinit_locked(adapter);
+}
+
+/**
+ * igb_get_stats64 - Get System Network Statistics
+ * @netdev: network interface device structure
+ * @stats: rtnl_link_stats64 pointer
+ *
+ **/
+static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ spin_lock(&adapter->stats64_lock);
+ igb_update_stats(adapter, &adapter->stats64);
+ memcpy(stats, &adapter->stats64, sizeof(*stats));
+ spin_unlock(&adapter->stats64_lock);
+
+ return stats;
+}
+
+/**
+ * igb_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int igb_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+ dev_err(&pdev->dev, "Invalid MTU setting\n");
+ return -EINVAL;
+ }
+
+#define MAX_STD_JUMBO_FRAME_SIZE 9238
+ if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
+ dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
+ return -EINVAL;
+ }
+
+ while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+ msleep(1);
+
+ /* igb_down has a dependency on max_frame_size */
+ adapter->max_frame_size = max_frame;
+
+ if (netif_running(netdev))
+ igb_down(adapter);
+
+ dev_info(&pdev->dev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+
+ if (netif_running(netdev))
+ igb_up(adapter);
+ else
+ igb_reset(adapter);
+
+ clear_bit(__IGB_RESETTING, &adapter->state);
+
+ return 0;
+}
+
+/**
+ * igb_update_stats - Update the board statistics counters
+ * @adapter: board private structure
+ **/
+
+void igb_update_stats(struct igb_adapter *adapter,
+ struct rtnl_link_stats64 *net_stats)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ u32 reg, mpc;
+ u16 phy_tmp;
+ int i;
+ u64 bytes, packets;
+ unsigned int start;
+ u64 _bytes, _packets;
+
+#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
+
+ /*
+ * Prevent stats update while adapter is being reset, or if the pci
+ * connection is down.
+ */
+ if (adapter->link_speed == 0)
+ return;
+ if (pci_channel_offline(pdev))
+ return;
+
+ bytes = 0;
+ packets = 0;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
+ struct igb_ring *ring = adapter->rx_ring[i];
+
+ ring->rx_stats.drops += rqdpc_tmp;
+ net_stats->rx_fifo_errors += rqdpc_tmp;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
+ _bytes = ring->rx_stats.bytes;
+ _packets = ring->rx_stats.packets;
+ } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
+ bytes += _bytes;
+ packets += _packets;
+ }
+
+ net_stats->rx_bytes = bytes;
+ net_stats->rx_packets = packets;
+
+ bytes = 0;
+ packets = 0;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igb_ring *ring = adapter->tx_ring[i];
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
+ _bytes = ring->tx_stats.bytes;
+ _packets = ring->tx_stats.packets;
+ } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
+ bytes += _bytes;
+ packets += _packets;
+ }
+ net_stats->tx_bytes = bytes;
+ net_stats->tx_packets = packets;
+
+ /* read stats registers */
+ adapter->stats.crcerrs += rd32(E1000_CRCERRS);
+ adapter->stats.gprc += rd32(E1000_GPRC);
+ adapter->stats.gorc += rd32(E1000_GORCL);
+ rd32(E1000_GORCH); /* clear GORCL */
+ adapter->stats.bprc += rd32(E1000_BPRC);
+ adapter->stats.mprc += rd32(E1000_MPRC);
+ adapter->stats.roc += rd32(E1000_ROC);
+
+ adapter->stats.prc64 += rd32(E1000_PRC64);
+ adapter->stats.prc127 += rd32(E1000_PRC127);
+ adapter->stats.prc255 += rd32(E1000_PRC255);
+ adapter->stats.prc511 += rd32(E1000_PRC511);
+ adapter->stats.prc1023 += rd32(E1000_PRC1023);
+ adapter->stats.prc1522 += rd32(E1000_PRC1522);
+ adapter->stats.symerrs += rd32(E1000_SYMERRS);
+ adapter->stats.sec += rd32(E1000_SEC);
+
+ mpc = rd32(E1000_MPC);
+ adapter->stats.mpc += mpc;
+ net_stats->rx_fifo_errors += mpc;
+ adapter->stats.scc += rd32(E1000_SCC);
+ adapter->stats.ecol += rd32(E1000_ECOL);
+ adapter->stats.mcc += rd32(E1000_MCC);
+ adapter->stats.latecol += rd32(E1000_LATECOL);
+ adapter->stats.dc += rd32(E1000_DC);
+ adapter->stats.rlec += rd32(E1000_RLEC);
+ adapter->stats.xonrxc += rd32(E1000_XONRXC);
+ adapter->stats.xontxc += rd32(E1000_XONTXC);
+ adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
+ adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
+ adapter->stats.fcruc += rd32(E1000_FCRUC);
+ adapter->stats.gptc += rd32(E1000_GPTC);
+ adapter->stats.gotc += rd32(E1000_GOTCL);
+ rd32(E1000_GOTCH); /* clear GOTCL */
+ adapter->stats.rnbc += rd32(E1000_RNBC);
+ adapter->stats.ruc += rd32(E1000_RUC);
+ adapter->stats.rfc += rd32(E1000_RFC);
+ adapter->stats.rjc += rd32(E1000_RJC);
+ adapter->stats.tor += rd32(E1000_TORH);
+ adapter->stats.tot += rd32(E1000_TOTH);
+ adapter->stats.tpr += rd32(E1000_TPR);
+
+ adapter->stats.ptc64 += rd32(E1000_PTC64);
+ adapter->stats.ptc127 += rd32(E1000_PTC127);
+ adapter->stats.ptc255 += rd32(E1000_PTC255);
+ adapter->stats.ptc511 += rd32(E1000_PTC511);
+ adapter->stats.ptc1023 += rd32(E1000_PTC1023);
+ adapter->stats.ptc1522 += rd32(E1000_PTC1522);
+
+ adapter->stats.mptc += rd32(E1000_MPTC);
+ adapter->stats.bptc += rd32(E1000_BPTC);
+
+ adapter->stats.tpt += rd32(E1000_TPT);
+ adapter->stats.colc += rd32(E1000_COLC);
+
+ adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
+ /* read internal phy specific stats */
+ reg = rd32(E1000_CTRL_EXT);
+ if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
+ adapter->stats.rxerrc += rd32(E1000_RXERRC);
+ adapter->stats.tncrs += rd32(E1000_TNCRS);
+ }
+
+ adapter->stats.tsctc += rd32(E1000_TSCTC);
+ adapter->stats.tsctfc += rd32(E1000_TSCTFC);
+
+ adapter->stats.iac += rd32(E1000_IAC);
+ adapter->stats.icrxoc += rd32(E1000_ICRXOC);
+ adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
+ adapter->stats.icrxatc += rd32(E1000_ICRXATC);
+ adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
+ adapter->stats.ictxatc += rd32(E1000_ICTXATC);
+ adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
+ adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
+ adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
+
+ /* Fill out the OS statistics structure */
+ net_stats->multicast = adapter->stats.mprc;
+ net_stats->collisions = adapter->stats.colc;
+
+ /* Rx Errors */
+
+ /* RLEC on some newer hardware can be incorrect so build
+ * our own version based on RUC and ROC */
+ net_stats->rx_errors = adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc +
+ adapter->stats.cexterr;
+ net_stats->rx_length_errors = adapter->stats.ruc +
+ adapter->stats.roc;
+ net_stats->rx_crc_errors = adapter->stats.crcerrs;
+ net_stats->rx_frame_errors = adapter->stats.algnerrc;
+ net_stats->rx_missed_errors = adapter->stats.mpc;
+
+ /* Tx Errors */
+ net_stats->tx_errors = adapter->stats.ecol +
+ adapter->stats.latecol;
+ net_stats->tx_aborted_errors = adapter->stats.ecol;
+ net_stats->tx_window_errors = adapter->stats.latecol;
+ net_stats->tx_carrier_errors = adapter->stats.tncrs;
+
+ /* Tx Dropped needs to be maintained elsewhere */
+
+ /* Phy Stats */
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ if ((adapter->link_speed == SPEED_1000) &&
+ (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
+ phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
+ adapter->phy_stats.idle_errors += phy_tmp;
+ }
+ }
+
+ /* Management Stats */
+ adapter->stats.mgptc += rd32(E1000_MGTPTC);
+ adapter->stats.mgprc += rd32(E1000_MGTPRC);
+ adapter->stats.mgpdc += rd32(E1000_MGTPDC);
+
+ /* OS2BMC Stats */
+ reg = rd32(E1000_MANC);
+ if (reg & E1000_MANC_EN_BMC2OS) {
+ adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
+ adapter->stats.o2bspc += rd32(E1000_O2BSPC);
+ adapter->stats.b2ospc += rd32(E1000_B2OSPC);
+ adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
+ }
+}
+
+static irqreturn_t igb_msix_other(int irq, void *data)
+{
+ struct igb_adapter *adapter = data;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 icr = rd32(E1000_ICR);
+ /* reading ICR causes bit 31 of EICR to be cleared */
+
+ if (icr & E1000_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
+ if (icr & E1000_ICR_DOUTSYNC) {
+ /* HW is reporting DMA is out of sync */
+ adapter->stats.doosync++;
+ /* The DMA Out of Sync is also indication of a spoof event
+ * in IOV mode. Check the Wrong VM Behavior register to
+ * see if it is really a spoof event. */
+ igb_check_wvbr(adapter);
+ }
+
+ /* Check for a mailbox event */
+ if (icr & E1000_ICR_VMMB)
+ igb_msg_task(adapter);
+
+ if (icr & E1000_ICR_LSC) {
+ hw->mac.get_link_status = 1;
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__IGB_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+ if (adapter->vfs_allocated_count)
+ wr32(E1000_IMS, E1000_IMS_LSC |
+ E1000_IMS_VMMB |
+ E1000_IMS_DOUTSYNC);
+ else
+ wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
+ wr32(E1000_EIMS, adapter->eims_other);
+
+ return IRQ_HANDLED;
+}
+
+static void igb_write_itr(struct igb_q_vector *q_vector)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ u32 itr_val = q_vector->itr_val & 0x7FFC;
+
+ if (!q_vector->set_itr)
+ return;
+
+ if (!itr_val)
+ itr_val = 0x4;
+
+ if (adapter->hw.mac.type == e1000_82575)
+ itr_val |= itr_val << 16;
+ else
+ itr_val |= 0x8000000;
+
+ writel(itr_val, q_vector->itr_register);
+ q_vector->set_itr = 0;
+}
+
+static irqreturn_t igb_msix_ring(int irq, void *data)
+{
+ struct igb_q_vector *q_vector = data;
+
+ /* Write the ITR value calculated from the previous interrupt. */
+ igb_write_itr(q_vector);
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_IGB_DCA
+static void igb_update_dca(struct igb_q_vector *q_vector)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ struct e1000_hw *hw = &adapter->hw;
+ int cpu = get_cpu();
+
+ if (q_vector->cpu == cpu)
+ goto out_no_update;
+
+ if (q_vector->tx_ring) {
+ int q = q_vector->tx_ring->reg_idx;
+ u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
+ if (hw->mac.type == e1000_82575) {
+ dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
+ dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+ } else {
+ dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
+ dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
+ E1000_DCA_TXCTRL_CPUID_SHIFT;
+ }
+ dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
+ wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
+ }
+ if (q_vector->rx_ring) {
+ int q = q_vector->rx_ring->reg_idx;
+ u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
+ if (hw->mac.type == e1000_82575) {
+ dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
+ dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+ } else {
+ dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
+ dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
+ E1000_DCA_RXCTRL_CPUID_SHIFT;
+ }
+ dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
+ dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
+ dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
+ wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
+ }
+ q_vector->cpu = cpu;
+out_no_update:
+ put_cpu();
+}
+
+static void igb_setup_dca(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+
+ if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
+ return;
+
+ /* Always use CB2 mode, difference is masked in the CB driver. */
+ wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ adapter->q_vector[i]->cpu = -1;
+ igb_update_dca(adapter->q_vector[i]);
+ }
+}
+
+static int __igb_notify_dca(struct device *dev, void *data)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned long event = *(unsigned long *)data;
+
+ switch (event) {
+ case DCA_PROVIDER_ADD:
+ /* if already enabled, don't do it again */
+ if (adapter->flags & IGB_FLAG_DCA_ENABLED)
+ break;
+ if (dca_add_requester(dev) == 0) {
+ adapter->flags |= IGB_FLAG_DCA_ENABLED;
+ dev_info(&pdev->dev, "DCA enabled\n");
+ igb_setup_dca(adapter);
+ break;
+ }
+ /* Fall Through since DCA is disabled. */
+ case DCA_PROVIDER_REMOVE:
+ if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
+ /* without this a class_device is left
+ * hanging around in the sysfs model */
+ dca_remove_requester(dev);
+ dev_info(&pdev->dev, "DCA disabled\n");
+ adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
+ wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
+ void *p)
+{
+ int ret_val;
+
+ ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
+ __igb_notify_dca);
+
+ return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
+}
+#endif /* CONFIG_IGB_DCA */
+
+static void igb_ping_all_vfs(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ping;
+ int i;
+
+ for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
+ ping = E1000_PF_CONTROL_MSG;
+ if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
+ ping |= E1000_VT_MSGTYPE_CTS;
+ igb_write_mbx(hw, &ping, 1, i);
+ }
+}
+
+static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vmolr = rd32(E1000_VMOLR(vf));
+ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+
+ vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
+ IGB_VF_FLAG_MULTI_PROMISC);
+ vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
+
+ if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
+ vmolr |= E1000_VMOLR_MPME;
+ vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
+ *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
+ } else {
+ /*
+ * if we have hashes and we are clearing a multicast promisc
+ * flag we need to write the hashes to the MTA as this step
+ * was previously skipped
+ */
+ if (vf_data->num_vf_mc_hashes > 30) {
+ vmolr |= E1000_VMOLR_MPME;
+ } else if (vf_data->num_vf_mc_hashes) {
+ int j;
+ vmolr |= E1000_VMOLR_ROMPE;
+ for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
+ igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
+ }
+ }
+
+ wr32(E1000_VMOLR(vf), vmolr);
+
+ /* there are flags left unprocessed, likely not supported */
+ if (*msgbuf & E1000_VT_MSGINFO_MASK)
+ return -EINVAL;
+
+ return 0;
+
+}
+
+static int igb_set_vf_multicasts(struct igb_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
+ u16 *hash_list = (u16 *)&msgbuf[1];
+ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+ int i;
+
+ /* salt away the number of multicast addresses assigned
+ * to this VF for later use to restore when the PF multi cast
+ * list changes
+ */
+ vf_data->num_vf_mc_hashes = n;
+
+ /* only up to 30 hash values supported */
+ if (n > 30)
+ n = 30;
+
+ /* store the hashes for later use */
+ for (i = 0; i < n; i++)
+ vf_data->vf_mc_hashes[i] = hash_list[i];
+
+ /* Flush and reset the mta with the new values */
+ igb_set_rx_mode(adapter->netdev);
+
+ return 0;
+}
+
+static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct vf_data_storage *vf_data;
+ int i, j;
+
+ for (i = 0; i < adapter->vfs_allocated_count; i++) {
+ u32 vmolr = rd32(E1000_VMOLR(i));
+ vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
+
+ vf_data = &adapter->vf_data[i];
+
+ if ((vf_data->num_vf_mc_hashes > 30) ||
+ (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
+ vmolr |= E1000_VMOLR_MPME;
+ } else if (vf_data->num_vf_mc_hashes) {
+ vmolr |= E1000_VMOLR_ROMPE;
+ for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
+ igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
+ }
+ wr32(E1000_VMOLR(i), vmolr);
+ }
+}
+
+static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 pool_mask, reg, vid;
+ int i;
+
+ pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
+
+ /* Find the vlan filter for this id */
+ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
+ reg = rd32(E1000_VLVF(i));
+
+ /* remove the vf from the pool */
+ reg &= ~pool_mask;
+
+ /* if pool is empty then remove entry from vfta */
+ if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
+ (reg & E1000_VLVF_VLANID_ENABLE)) {
+ reg = 0;
+ vid = reg & E1000_VLVF_VLANID_MASK;
+ igb_vfta_set(hw, vid, false);
+ }
+
+ wr32(E1000_VLVF(i), reg);
+ }
+
+ adapter->vf_data[vf].vlans_enabled = 0;
+}
+
+static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg, i;
+
+ /* The vlvf table only exists on 82576 hardware and newer */
+ if (hw->mac.type < e1000_82576)
+ return -1;
+
+ /* we only need to do this if VMDq is enabled */
+ if (!adapter->vfs_allocated_count)
+ return -1;
+
+ /* Find the vlan filter for this id */
+ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
+ reg = rd32(E1000_VLVF(i));
+ if ((reg & E1000_VLVF_VLANID_ENABLE) &&
+ vid == (reg & E1000_VLVF_VLANID_MASK))
+ break;
+ }
+
+ if (add) {
+ if (i == E1000_VLVF_ARRAY_SIZE) {
+ /* Did not find a matching VLAN ID entry that was
+ * enabled. Search for a free filter entry, i.e.
+ * one without the enable bit set
+ */
+ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
+ reg = rd32(E1000_VLVF(i));
+ if (!(reg & E1000_VLVF_VLANID_ENABLE))
+ break;
+ }
+ }
+ if (i < E1000_VLVF_ARRAY_SIZE) {
+ /* Found an enabled/available entry */
+ reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
+
+ /* if !enabled we need to set this up in vfta */
+ if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
+ /* add VID to filter table */
+ igb_vfta_set(hw, vid, true);
+ reg |= E1000_VLVF_VLANID_ENABLE;
+ }
+ reg &= ~E1000_VLVF_VLANID_MASK;
+ reg |= vid;
+ wr32(E1000_VLVF(i), reg);
+
+ /* do not modify RLPML for PF devices */
+ if (vf >= adapter->vfs_allocated_count)
+ return 0;
+
+ if (!adapter->vf_data[vf].vlans_enabled) {
+ u32 size;
+ reg = rd32(E1000_VMOLR(vf));
+ size = reg & E1000_VMOLR_RLPML_MASK;
+ size += 4;
+ reg &= ~E1000_VMOLR_RLPML_MASK;
+ reg |= size;
+ wr32(E1000_VMOLR(vf), reg);
+ }
+
+ adapter->vf_data[vf].vlans_enabled++;
+ return 0;
+ }
+ } else {
+ if (i < E1000_VLVF_ARRAY_SIZE) {
+ /* remove vf from the pool */
+ reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
+ /* if pool is empty then remove entry from vfta */
+ if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
+ reg = 0;
+ igb_vfta_set(hw, vid, false);
+ }
+ wr32(E1000_VLVF(i), reg);
+
+ /* do not modify RLPML for PF devices */
+ if (vf >= adapter->vfs_allocated_count)
+ return 0;
+
+ adapter->vf_data[vf].vlans_enabled--;
+ if (!adapter->vf_data[vf].vlans_enabled) {
+ u32 size;
+ reg = rd32(E1000_VMOLR(vf));
+ size = reg & E1000_VMOLR_RLPML_MASK;
+ size -= 4;
+ reg &= ~E1000_VMOLR_RLPML_MASK;
+ reg |= size;
+ wr32(E1000_VMOLR(vf), reg);
+ }
+ }
+ }
+ return 0;
+}
+
+static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (vid)
+ wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
+ else
+ wr32(E1000_VMVIR(vf), 0);
+}
+
+static int igb_ndo_set_vf_vlan(struct net_device *netdev,
+ int vf, u16 vlan, u8 qos)
+{
+ int err = 0;
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
+ return -EINVAL;
+ if (vlan || qos) {
+ err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
+ if (err)
+ goto out;
+ igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
+ igb_set_vmolr(adapter, vf, !vlan);
+ adapter->vf_data[vf].pf_vlan = vlan;
+ adapter->vf_data[vf].pf_qos = qos;
+ dev_info(&adapter->pdev->dev,
+ "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev,
+ "The VF VLAN has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev,
+ "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ } else {
+ igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
+ false, vf);
+ igb_set_vmvir(adapter, vlan, vf);
+ igb_set_vmolr(adapter, vf, true);
+ adapter->vf_data[vf].pf_vlan = 0;
+ adapter->vf_data[vf].pf_qos = 0;
+ }
+out:
+ return err;
+}
+
+static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
+{
+ int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
+ int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
+
+ return igb_vlvf_set(adapter, vid, add, vf);
+}
+
+static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
+{
+ /* clear flags - except flag that indicates PF has set the MAC */
+ adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
+ adapter->vf_data[vf].last_nack = jiffies;
+
+ /* reset offloads to defaults */
+ igb_set_vmolr(adapter, vf, true);
+
+ /* reset vlans for device */
+ igb_clear_vf_vfta(adapter, vf);
+ if (adapter->vf_data[vf].pf_vlan)
+ igb_ndo_set_vf_vlan(adapter->netdev, vf,
+ adapter->vf_data[vf].pf_vlan,
+ adapter->vf_data[vf].pf_qos);
+ else
+ igb_clear_vf_vfta(adapter, vf);
+
+ /* reset multicast table array for vf */
+ adapter->vf_data[vf].num_vf_mc_hashes = 0;
+
+ /* Flush and reset the mta with the new values */
+ igb_set_rx_mode(adapter->netdev);
+}
+
+static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
+{
+ unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
+
+ /* generate a new mac address as we were hotplug removed/added */
+ if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
+ random_ether_addr(vf_mac);
+
+ /* process remaining reset events */
+ igb_vf_reset(adapter, vf);
+}
+
+static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
+ int rar_entry = hw->mac.rar_entry_count - (vf + 1);
+ u32 reg, msgbuf[3];
+ u8 *addr = (u8 *)(&msgbuf[1]);
+
+ /* process all the same items cleared in a function level reset */
+ igb_vf_reset(adapter, vf);
+
+ /* set vf mac address */
+ igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
+
+ /* enable transmit and receive for vf */
+ reg = rd32(E1000_VFTE);
+ wr32(E1000_VFTE, reg | (1 << vf));
+ reg = rd32(E1000_VFRE);
+ wr32(E1000_VFRE, reg | (1 << vf));
+
+ adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
+ memcpy(addr, vf_mac, 6);
+ igb_write_mbx(hw, msgbuf, 3, vf);
+}
+
+static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
+{
+ /*
+ * The VF MAC Address is stored in a packed array of bytes
+ * starting at the second 32 bit word of the msg array
+ */
+ unsigned char *addr = (char *)&msg[1];
+ int err = -1;
+
+ if (is_valid_ether_addr(addr))
+ err = igb_set_vf_mac(adapter, vf, addr);
+
+ return err;
+}
+
+static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+ u32 msg = E1000_VT_MSGTYPE_NACK;
+
+ /* if device isn't clear to send it shouldn't be reading either */
+ if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
+ time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
+ igb_write_mbx(hw, &msg, 1, vf);
+ vf_data->last_nack = jiffies;
+ }
+}
+
+static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ u32 msgbuf[E1000_VFMAILBOX_SIZE];
+ struct e1000_hw *hw = &adapter->hw;
+ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+ s32 retval;
+
+ retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
+
+ if (retval) {
+ /* if receive failed revoke VF CTS stats and restart init */
+ dev_err(&pdev->dev, "Error receiving message from VF\n");
+ vf_data->flags &= ~IGB_VF_FLAG_CTS;
+ if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
+ return;
+ goto out;
+ }
+
+ /* this is a message we already processed, do nothing */
+ if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
+ return;
+
+ /*
+ * until the vf completes a reset it should not be
+ * allowed to start any configuration.
+ */
+
+ if (msgbuf[0] == E1000_VF_RESET) {
+ igb_vf_reset_msg(adapter, vf);
+ return;
+ }
+
+ if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
+ if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
+ return;
+ retval = -1;
+ goto out;
+ }
+
+ switch ((msgbuf[0] & 0xFFFF)) {
+ case E1000_VF_SET_MAC_ADDR:
+ retval = -EINVAL;
+ if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
+ retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
+ else
+ dev_warn(&pdev->dev,
+ "VF %d attempted to override administratively "
+ "set MAC address\nReload the VF driver to "
+ "resume operations\n", vf);
+ break;
+ case E1000_VF_SET_PROMISC:
+ retval = igb_set_vf_promisc(adapter, msgbuf, vf);
+ break;
+ case E1000_VF_SET_MULTICAST:
+ retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
+ break;
+ case E1000_VF_SET_LPE:
+ retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
+ break;
+ case E1000_VF_SET_VLAN:
+ retval = -1;
+ if (vf_data->pf_vlan)
+ dev_warn(&pdev->dev,
+ "VF %d attempted to override administratively "
+ "set VLAN tag\nReload the VF driver to "
+ "resume operations\n", vf);
+ else
+ retval = igb_set_vf_vlan(adapter, msgbuf, vf);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
+ retval = -1;
+ break;
+ }
+
+ msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
+out:
+ /* notify the VF of the results of what it sent us */
+ if (retval)
+ msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
+
+ igb_write_mbx(hw, msgbuf, 1, vf);
+}
+
+static void igb_msg_task(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vf;
+
+ for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
+ /* process any reset requests */
+ if (!igb_check_for_rst(hw, vf))
+ igb_vf_reset_event(adapter, vf);
+
+ /* process any messages pending */
+ if (!igb_check_for_msg(hw, vf))
+ igb_rcv_msg_from_vf(adapter, vf);
+
+ /* process any acks */
+ if (!igb_check_for_ack(hw, vf))
+ igb_rcv_ack_from_vf(adapter, vf);
+ }
+}
+
+/**
+ * igb_set_uta - Set unicast filter table address
+ * @adapter: board private structure
+ *
+ * The unicast table address is a register array of 32-bit registers.
+ * The table is meant to be used in a way similar to how the MTA is used
+ * however due to certain limitations in the hardware it is necessary to
+ * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
+ * enable bit to allow vlan tag stripping when promiscuous mode is enabled
+ **/
+static void igb_set_uta(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+
+ /* The UTA table only exists on 82576 hardware and newer */
+ if (hw->mac.type < e1000_82576)
+ return;
+
+ /* we only need to do this if VMDq is enabled */
+ if (!adapter->vfs_allocated_count)
+ return;
+
+ for (i = 0; i < hw->mac.uta_reg_count; i++)
+ array_wr32(E1000_UTA, i, ~0);
+}
+
+/**
+ * igb_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t igb_intr_msi(int irq, void *data)
+{
+ struct igb_adapter *adapter = data;
+ struct igb_q_vector *q_vector = adapter->q_vector[0];
+ struct e1000_hw *hw = &adapter->hw;
+ /* read ICR disables interrupts using IAM */
+ u32 icr = rd32(E1000_ICR);
+
+ igb_write_itr(q_vector);
+
+ if (icr & E1000_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
+ if (icr & E1000_ICR_DOUTSYNC) {
+ /* HW is reporting DMA is out of sync */
+ adapter->stats.doosync++;
+ }
+
+ if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
+ hw->mac.get_link_status = 1;
+ if (!test_bit(__IGB_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * igb_intr - Legacy Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t igb_intr(int irq, void *data)
+{
+ struct igb_adapter *adapter = data;
+ struct igb_q_vector *q_vector = adapter->q_vector[0];
+ struct e1000_hw *hw = &adapter->hw;
+ /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
+ * need for the IMC write */
+ u32 icr = rd32(E1000_ICR);
+ if (!icr)
+ return IRQ_NONE; /* Not our interrupt */
+
+ igb_write_itr(q_vector);
+
+ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+ * not set, then the adapter didn't send an interrupt */
+ if (!(icr & E1000_ICR_INT_ASSERTED))
+ return IRQ_NONE;
+
+ if (icr & E1000_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
+ if (icr & E1000_ICR_DOUTSYNC) {
+ /* HW is reporting DMA is out of sync */
+ adapter->stats.doosync++;
+ }
+
+ if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
+ hw->mac.get_link_status = 1;
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__IGB_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ struct e1000_hw *hw = &adapter->hw;
+
+ if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
+ (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
+ if (!adapter->msix_entries)
+ igb_set_itr(adapter);
+ else
+ igb_update_ring_itr(q_vector);
+ }
+
+ if (!test_bit(__IGB_DOWN, &adapter->state)) {
+ if (adapter->msix_entries)
+ wr32(E1000_EIMS, q_vector->eims_value);
+ else
+ igb_irq_enable(adapter);
+ }
+}
+
+/**
+ * igb_poll - NAPI Rx polling callback
+ * @napi: napi polling structure
+ * @budget: count of how many packets we should handle
+ **/
+static int igb_poll(struct napi_struct *napi, int budget)
+{
+ struct igb_q_vector *q_vector = container_of(napi,
+ struct igb_q_vector,
+ napi);
+ bool clean_complete = true;
+
+#ifdef CONFIG_IGB_DCA
+ if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
+ igb_update_dca(q_vector);
+#endif
+ if (q_vector->tx_ring)
+ clean_complete = igb_clean_tx_irq(q_vector);
+
+ if (q_vector->rx_ring)
+ clean_complete &= igb_clean_rx_irq(q_vector, budget);
+
+ /* If all work not completed, return budget and keep polling */
+ if (!clean_complete)
+ return budget;
+
+ /* If not enough Rx work done, exit the polling mode */
+ napi_complete(napi);
+ igb_ring_irq_enable(q_vector);
+
+ return 0;
+}
+
+/**
+ * igb_systim_to_hwtstamp - convert system time value to hw timestamp
+ * @adapter: board private structure
+ * @shhwtstamps: timestamp structure to update
+ * @regval: unsigned 64bit system time value.
+ *
+ * We need to convert the system time value stored in the RX/TXSTMP registers
+ * into a hwtstamp which can be used by the upper level timestamping functions
+ */
+static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
+ struct skb_shared_hwtstamps *shhwtstamps,
+ u64 regval)
+{
+ u64 ns;
+
+ /*
+ * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
+ * 24 to match clock shift we setup earlier.
+ */
+ if (adapter->hw.mac.type == e1000_82580)
+ regval <<= IGB_82580_TSYNC_SHIFT;
+
+ ns = timecounter_cyc2time(&adapter->clock, regval);
+ timecompare_update(&adapter->compare, ns);
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
+}
+
+/**
+ * igb_tx_hwtstamp - utility function which checks for TX time stamp
+ * @q_vector: pointer to q_vector containing needed info
+ * @buffer: pointer to igb_tx_buffer structure
+ *
+ * If we were asked to do hardware stamping and such a time stamp is
+ * available, then it must have been for this skb here because we only
+ * allow only one such packet into the queue.
+ */
+static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
+ struct igb_tx_buffer *buffer_info)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ struct e1000_hw *hw = &adapter->hw;
+ struct skb_shared_hwtstamps shhwtstamps;
+ u64 regval;
+
+ /* if skb does not support hw timestamp or TX stamp not valid exit */
+ if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
+ !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
+ return;
+
+ regval = rd32(E1000_TXSTMPL);
+ regval |= (u64)rd32(E1000_TXSTMPH) << 32;
+
+ igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+ skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
+}
+
+/**
+ * igb_clean_tx_irq - Reclaim resources after transmit completes
+ * @q_vector: pointer to q_vector containing needed info
+ * returns true if ring is completely cleaned
+ **/
+static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ struct igb_ring *tx_ring = q_vector->tx_ring;
+ struct igb_tx_buffer *tx_buffer;
+ union e1000_adv_tx_desc *tx_desc, *eop_desc;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int budget = q_vector->tx_work_limit;
+ unsigned int i = tx_ring->next_to_clean;
+
+ if (test_bit(__IGB_DOWN, &adapter->state))
+ return true;
+
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ tx_desc = IGB_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ for (; budget; budget--) {
+ eop_desc = tx_buffer->next_to_watch;
+
+ /* prevent any other reads prior to eop_desc */
+ rmb();
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* if DD is not set pending work has not been completed */
+ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buffer->next_to_watch = NULL;
+
+ /* update the statistics for this packet */
+ total_bytes += tx_buffer->bytecount;
+ total_packets += tx_buffer->gso_segs;
+
+ /* retrieve hardware timestamp */
+ igb_tx_hwtstamp(q_vector, tx_buffer);
+
+ /* free the skb */
+ dev_kfree_skb_any(tx_buffer->skb);
+ tx_buffer->skb = NULL;
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
+
+ /* clear last DMA location and unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer->dma = 0;
+
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IGB_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (tx_buffer->dma) {
+ dma_unmap_page(tx_ring->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
+ }
+ }
+
+ /* clear last DMA location */
+ tx_buffer->dma = 0;
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IGB_TX_DESC(tx_ring, 0);
+ }
+ }
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+ u64_stats_update_begin(&tx_ring->tx_syncp);
+ tx_ring->tx_stats.bytes += total_bytes;
+ tx_ring->tx_stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->tx_syncp);
+ tx_ring->total_bytes += total_bytes;
+ tx_ring->total_packets += total_packets;
+
+ if (tx_ring->detect_tx_hung) {
+ struct e1000_hw *hw = &adapter->hw;
+
+ eop_desc = tx_buffer->next_to_watch;
+
+ /* Detect a transmit hang in hardware, this serializes the
+ * check with the clearing of time_stamp and movement of i */
+ tx_ring->detect_tx_hung = false;
+ if (eop_desc &&
+ time_after(jiffies, tx_buffer->time_stamp +
+ (adapter->tx_timeout_factor * HZ)) &&
+ !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
+
+ /* detected Tx unit hang */
+ dev_err(tx_ring->dev,
+ "Detected Tx Unit Hang\n"
+ " Tx Queue <%d>\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%p>\n"
+ " jiffies <%lx>\n"
+ " desc.status <%x>\n",
+ tx_ring->queue_index,
+ rd32(E1000_TDH(tx_ring->reg_idx)),
+ readl(tx_ring->tail),
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_buffer->time_stamp,
+ eop_desc,
+ jiffies,
+ eop_desc->wb.status);
+ netif_stop_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+
+ /* we are about to reset, no point in enabling stuff */
+ return true;
+ }
+ }
+
+ if (unlikely(total_packets &&
+ netif_carrier_ok(tx_ring->netdev) &&
+ igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index) &&
+ !(test_bit(__IGB_DOWN, &adapter->state))) {
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+
+ u64_stats_update_begin(&tx_ring->tx_syncp);
+ tx_ring->tx_stats.restart_queue++;
+ u64_stats_update_end(&tx_ring->tx_syncp);
+ }
+ }
+
+ return !!budget;
+}
+
+static inline void igb_rx_checksum(struct igb_ring *ring,
+ u32 status_err, struct sk_buff *skb)
+{
+ skb_checksum_none_assert(skb);
+
+ /* Ignore Checksum bit is set or checksum is disabled through ethtool */
+ if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
+ (status_err & E1000_RXD_STAT_IXSM))
+ return;
+
+ /* TCP/UDP checksum error bit is set */
+ if (status_err &
+ (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
+ /*
+ * work around errata with sctp packets where the TCPE aka
+ * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
+ * packets, (aka let the stack check the crc32c)
+ */
+ if ((skb->len == 60) &&
+ (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) {
+ u64_stats_update_begin(&ring->rx_syncp);
+ ring->rx_stats.csum_err++;
+ u64_stats_update_end(&ring->rx_syncp);
+ }
+ /* let the stack verify checksum errors */
+ return;
+ }
+ /* It must be a TCP or UDP packet with a valid checksum */
+ if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
+}
+
+static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
+ struct sk_buff *skb)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ struct e1000_hw *hw = &adapter->hw;
+ u64 regval;
+
+ /*
+ * If this bit is set, then the RX registers contain the time stamp. No
+ * other packet will be time stamped until we read these registers, so
+ * read the registers to make them available again. Because only one
+ * packet can be time stamped at a time, we know that the register
+ * values must belong to this one here and therefore we don't need to
+ * compare any of the additional attributes stored for it.
+ *
+ * If nothing went wrong, then it should have a shared tx_flags that we
+ * can turn into a skb_shared_hwtstamps.
+ */
+ if (staterr & E1000_RXDADV_STAT_TSIP) {
+ u32 *stamp = (u32 *)skb->data;
+ regval = le32_to_cpu(*(stamp + 2));
+ regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
+ skb_pull(skb, IGB_TS_HDR_LEN);
+ } else {
+ if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+ return;
+
+ regval = rd32(E1000_RXSTMPL);
+ regval |= (u64)rd32(E1000_RXSTMPH) << 32;
+ }
+
+ igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+}
+static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
+{
+ /* HW will not DMA in data larger than the given buffer, even if it
+ * parses the (NFS, of course) header to be larger. In that case, it
+ * fills the header buffer and spills the rest into the page.
+ */
+ u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
+ E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
+ if (hlen > IGB_RX_HDR_LEN)
+ hlen = IGB_RX_HDR_LEN;
+ return hlen;
+}
+
+static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
+{
+ struct igb_ring *rx_ring = q_vector->rx_ring;
+ union e1000_adv_rx_desc *rx_desc;
+ const int current_node = numa_node_id();
+ unsigned int total_bytes = 0, total_packets = 0;
+ u32 staterr;
+ u16 cleaned_count = igb_desc_unused(rx_ring);
+ u16 i = rx_ring->next_to_clean;
+
+ rx_desc = IGB_RX_DESC(rx_ring, i);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ while (staterr & E1000_RXD_STAT_DD) {
+ struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
+ struct sk_buff *skb = buffer_info->skb;
+ union e1000_adv_rx_desc *next_rxd;
+
+ buffer_info->skb = NULL;
+ prefetch(skb->data);
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+
+ next_rxd = IGB_RX_DESC(rx_ring, i);
+ prefetch(next_rxd);
+
+ /*
+ * This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * RXD_STAT_DD bit is set
+ */
+ rmb();
+
+ if (!skb_is_nonlinear(skb)) {
+ __skb_put(skb, igb_get_hlen(rx_desc));
+ dma_unmap_single(rx_ring->dev, buffer_info->dma,
+ IGB_RX_HDR_LEN,
+ DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+ }
+
+ if (rx_desc->wb.upper.length) {
+ u16 length = le16_to_cpu(rx_desc->wb.upper.length);
+
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ buffer_info->page,
+ buffer_info->page_offset,
+ length);
+
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+
+ if ((page_count(buffer_info->page) != 1) ||
+ (page_to_nid(buffer_info->page) != current_node))
+ buffer_info->page = NULL;
+ else
+ get_page(buffer_info->page);
+
+ dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
+ buffer_info->page_dma = 0;
+ }
+
+ if (!(staterr & E1000_RXD_STAT_EOP)) {
+ struct igb_rx_buffer *next_buffer;
+ next_buffer = &rx_ring->rx_buffer_info[i];
+ buffer_info->skb = next_buffer->skb;
+ buffer_info->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
+ goto next_desc;
+ }
+
+ if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
+ dev_kfree_skb_any(skb);
+ goto next_desc;
+ }
+
+ if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS))
+ igb_rx_hwtstamp(q_vector, staterr, skb);
+ total_bytes += skb->len;
+ total_packets++;
+
+ igb_rx_checksum(rx_ring, staterr, skb);
+
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+ if (staterr & E1000_RXD_STAT_VP) {
+ u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
+
+ __vlan_hwaccel_put_tag(skb, vid);
+ }
+ napi_gro_receive(&q_vector->napi, skb);
+
+ budget--;
+next_desc:
+ if (!budget)
+ break;
+
+ cleaned_count++;
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
+ igb_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+
+ rx_ring->next_to_clean = i;
+ u64_stats_update_begin(&rx_ring->rx_syncp);
+ rx_ring->rx_stats.packets += total_packets;
+ rx_ring->rx_stats.bytes += total_bytes;
+ u64_stats_update_end(&rx_ring->rx_syncp);
+ rx_ring->total_packets += total_packets;
+ rx_ring->total_bytes += total_bytes;
+
+ if (cleaned_count)
+ igb_alloc_rx_buffers(rx_ring, cleaned_count);
+
+ return !!budget;
+}
+
+static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *bi)
+{
+ struct sk_buff *skb = bi->skb;
+ dma_addr_t dma = bi->dma;
+
+ if (dma)
+ return true;
+
+ if (likely(!skb)) {
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ IGB_RX_HDR_LEN);
+ bi->skb = skb;
+ if (!skb) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ /* initialize skb for ring */
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ }
+
+ dma = dma_map_single(rx_ring->dev, skb->data,
+ IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ bi->dma = dma;
+ return true;
+}
+
+static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *bi)
+{
+ struct page *page = bi->page;
+ dma_addr_t page_dma = bi->page_dma;
+ unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
+
+ if (page_dma)
+ return true;
+
+ if (!page) {
+ page = netdev_alloc_page(rx_ring->netdev);
+ bi->page = page;
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+ }
+
+ page_dma = dma_map_page(rx_ring->dev, page,
+ page_offset, PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(rx_ring->dev, page_dma)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ bi->page_dma = page_dma;
+ bi->page_offset = page_offset;
+ return true;
+}
+
+/**
+ * igb_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @adapter: address of board private structure
+ **/
+void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
+{
+ union e1000_adv_rx_desc *rx_desc;
+ struct igb_rx_buffer *bi;
+ u16 i = rx_ring->next_to_use;
+
+ rx_desc = IGB_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ i -= rx_ring->count;
+
+ while (cleaned_count--) {
+ if (!igb_alloc_mapped_skb(rx_ring, bi))
+ break;
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info. */
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+
+ if (!igb_alloc_mapped_page(rx_ring, bi))
+ break;
+
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+
+ rx_desc++;
+ bi++;
+ i++;
+ if (unlikely(!i)) {
+ rx_desc = IGB_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_buffer_info;
+ i -= rx_ring->count;
+ }
+
+ /* clear the hdr_addr for the next_to_use descriptor */
+ rx_desc->read.hdr_addr = 0;
+ }
+
+ i += rx_ring->count;
+
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+ writel(i, rx_ring->tail);
+ }
+}
+
+/**
+ * igb_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ if (adapter->hw.phy.media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = adapter->hw.phy.addr;
+ break;
+ case SIOCGMIIREG:
+ if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
+ &data->val_out))
+ return -EIO;
+ break;
+ case SIOCSMIIREG:
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+/**
+ * igb_hwtstamp_ioctl - control hardware time stamping
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ *
+ * Outgoing time stamping can be enabled and disabled. Play nice and
+ * disable it when requested, although it shouldn't case any overhead
+ * when no packet needs it. At most one packet in the queue may be
+ * marked for time stamping, otherwise it would be impossible to tell
+ * for sure to which packet the hardware time stamp belongs.
+ *
+ * Incoming time stamping has to be configured via the hardware
+ * filters. Not all combinations are supported, in particular event
+ * type has to be specified. Matching the kind of event packet is
+ * not supported, with the exception of "all V2 events regardless of
+ * level 2 or 4".
+ *
+ **/
+static int igb_hwtstamp_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct hwtstamp_config config;
+ u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
+ u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+ u32 tsync_rx_cfg = 0;
+ bool is_l4 = false;
+ bool is_l2 = false;
+ u32 regval;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tsync_tx_ctl = 0;
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tsync_rx_ctl = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_ALL:
+ /*
+ * register TSYNCRXCFG must be set, therefore it is not
+ * possible to time stamp both Sync and Delay_Req messages
+ * => fall back to time stamping all packets
+ */
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
+ is_l2 = true;
+ is_l4 = true;
+ config.rx_filter = HWTSTAMP_FILTER_SOME;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
+ is_l2 = true;
+ is_l4 = true;
+ config.rx_filter = HWTSTAMP_FILTER_SOME;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ is_l2 = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (hw->mac.type == e1000_82575) {
+ if (tsync_rx_ctl | tsync_tx_ctl)
+ return -EINVAL;
+ return 0;
+ }
+
+ /*
+ * Per-packet timestamping only works if all packets are
+ * timestamped, so enable timestamping in all packets as
+ * long as one rx filter was configured.
+ */
+ if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) {
+ tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ }
+
+ /* enable/disable TX */
+ regval = rd32(E1000_TSYNCTXCTL);
+ regval &= ~E1000_TSYNCTXCTL_ENABLED;
+ regval |= tsync_tx_ctl;
+ wr32(E1000_TSYNCTXCTL, regval);
+
+ /* enable/disable RX */
+ regval = rd32(E1000_TSYNCRXCTL);
+ regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
+ regval |= tsync_rx_ctl;
+ wr32(E1000_TSYNCRXCTL, regval);
+
+ /* define which PTP packets are time stamped */
+ wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
+
+ /* define ethertype filter for timestamped packets */
+ if (is_l2)
+ wr32(E1000_ETQF(3),
+ (E1000_ETQF_FILTER_ENABLE | /* enable filter */
+ E1000_ETQF_1588 | /* enable timestamping */
+ ETH_P_1588)); /* 1588 eth protocol type */
+ else
+ wr32(E1000_ETQF(3), 0);
+
+#define PTP_PORT 319
+ /* L4 Queue Filter[3]: filter by destination port and protocol */
+ if (is_l4) {
+ u32 ftqf = (IPPROTO_UDP /* UDP */
+ | E1000_FTQF_VF_BP /* VF not compared */
+ | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
+ | E1000_FTQF_MASK); /* mask all inputs */
+ ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
+
+ wr32(E1000_IMIR(3), htons(PTP_PORT));
+ wr32(E1000_IMIREXT(3),
+ (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
+ if (hw->mac.type == e1000_82576) {
+ /* enable source port check */
+ wr32(E1000_SPQF(3), htons(PTP_PORT));
+ ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
+ }
+ wr32(E1000_FTQF(3), ftqf);
+ } else {
+ wr32(E1000_FTQF(3), E1000_FTQF_MASK);
+ }
+ wrfl();
+
+ adapter->hwtstamp_config = config;
+
+ /* clear TX/RX time stamp registers, just to be sure */
+ regval = rd32(E1000_TXSTMPH);
+ regval = rd32(E1000_RXSTMPH);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * igb_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return igb_mii_ioctl(netdev, ifr, cmd);
+ case SIOCSHWTSTAMP:
+ return igb_hwtstamp_ioctl(netdev, ifr, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ struct igb_adapter *adapter = hw->back;
+ u16 cap_offset;
+
+ cap_offset = adapter->pdev->pcie_cap;
+ if (!cap_offset)
+ return -E1000_ERR_CONFIG;
+
+ pci_read_config_word(adapter->pdev, cap_offset + reg, value);
+
+ return 0;
+}
+
+s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ struct igb_adapter *adapter = hw->back;
+ u16 cap_offset;
+
+ cap_offset = adapter->pdev->pcie_cap;
+ if (!cap_offset)
+ return -E1000_ERR_CONFIG;
+
+ pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
+
+ return 0;
+}
+
+static void igb_vlan_mode(struct net_device *netdev, u32 features)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl, rctl;
+
+ igb_irq_disable(adapter);
+
+ if (features & NETIF_F_HW_VLAN_RX) {
+ /* enable VLAN tag insert/strip */
+ ctrl = rd32(E1000_CTRL);
+ ctrl |= E1000_CTRL_VME;
+ wr32(E1000_CTRL, ctrl);
+
+ /* Disable CFI check */
+ rctl = rd32(E1000_RCTL);
+ rctl &= ~E1000_RCTL_CFIEN;
+ wr32(E1000_RCTL, rctl);
+ } else {
+ /* disable VLAN tag insert/strip */
+ ctrl = rd32(E1000_CTRL);
+ ctrl &= ~E1000_CTRL_VME;
+ wr32(E1000_CTRL, ctrl);
+ }
+
+ igb_rlpml_set(adapter);
+
+ if (!test_bit(__IGB_DOWN, &adapter->state))
+ igb_irq_enable(adapter);
+}
+
+static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int pf_id = adapter->vfs_allocated_count;
+
+ /* attempt to add filter to vlvf array */
+ igb_vlvf_set(adapter, vid, true, pf_id);
+
+ /* add the filter since PF can receive vlans w/o entry in vlvf */
+ igb_vfta_set(hw, vid, true);
+
+ set_bit(vid, adapter->active_vlans);
+}
+
+static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int pf_id = adapter->vfs_allocated_count;
+ s32 err;
+
+ igb_irq_disable(adapter);
+
+ if (!test_bit(__IGB_DOWN, &adapter->state))
+ igb_irq_enable(adapter);
+
+ /* remove vlan from VLVF table array */
+ err = igb_vlvf_set(adapter, vid, false, pf_id);
+
+ /* if vid was not present in VLVF just remove it from table */
+ if (err)
+ igb_vfta_set(hw, vid, false);
+
+ clear_bit(vid, adapter->active_vlans);
+}
+
+static void igb_restore_vlan(struct igb_adapter *adapter)
+{
+ u16 vid;
+
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ igb_vlan_rx_add_vid(adapter->netdev, vid);
+}
+
+int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+
+ mac->autoneg = 0;
+
+ /* Make sure dplx is at most 1 bit and lsb of speed is not set
+ * for the switch() below to work */
+ if ((spd & 1) || (dplx & ~1))
+ goto err_inval;
+
+ /* Fiber NIC's only allow 1000 Gbps Full duplex */
+ if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
+ spd != SPEED_1000 &&
+ dplx != DUPLEX_FULL)
+ goto err_inval;
+
+ switch (spd + dplx) {
+ case SPEED_10 + DUPLEX_HALF:
+ mac->forced_speed_duplex = ADVERTISE_10_HALF;
+ break;
+ case SPEED_10 + DUPLEX_FULL:
+ mac->forced_speed_duplex = ADVERTISE_10_FULL;
+ break;
+ case SPEED_100 + DUPLEX_HALF:
+ mac->forced_speed_duplex = ADVERTISE_100_HALF;
+ break;
+ case SPEED_100 + DUPLEX_FULL:
+ mac->forced_speed_duplex = ADVERTISE_100_FULL;
+ break;
+ case SPEED_1000 + DUPLEX_FULL:
+ mac->autoneg = 1;
+ adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
+ break;
+ case SPEED_1000 + DUPLEX_HALF: /* not supported */
+ default:
+ goto err_inval;
+ }
+ return 0;
+
+err_inval:
+ dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
+ return -EINVAL;
+}
+
+static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl, rctl, status;
+ u32 wufc = adapter->wol;
+#ifdef CONFIG_PM
+ int retval = 0;
+#endif
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ igb_close(netdev);
+
+ igb_clear_interrupt_scheme(adapter);
+
+#ifdef CONFIG_PM
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+#endif
+
+ status = rd32(E1000_STATUS);
+ if (status & E1000_STATUS_LU)
+ wufc &= ~E1000_WUFC_LNKC;
+
+ if (wufc) {
+ igb_setup_rctl(adapter);
+ igb_set_rx_mode(netdev);
+
+ /* turn on all-multi mode if wake on multicast is enabled */
+ if (wufc & E1000_WUFC_MC) {
+ rctl = rd32(E1000_RCTL);
+ rctl |= E1000_RCTL_MPE;
+ wr32(E1000_RCTL, rctl);
+ }
+
+ ctrl = rd32(E1000_CTRL);
+ /* advertise wake from D3Cold */
+ #define E1000_CTRL_ADVD3WUC 0x00100000
+ /* phy power management enable */
+ #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
+ ctrl |= E1000_CTRL_ADVD3WUC;
+ wr32(E1000_CTRL, ctrl);
+
+ /* Allow time for pending master requests to run */
+ igb_disable_pcie_master(hw);
+
+ wr32(E1000_WUC, E1000_WUC_PME_EN);
+ wr32(E1000_WUFC, wufc);
+ } else {
+ wr32(E1000_WUC, 0);
+ wr32(E1000_WUFC, 0);
+ }
+
+ *enable_wake = wufc || adapter->en_mng_pt;
+ if (!*enable_wake)
+ igb_power_down_link(adapter);
+ else
+ igb_power_up_link(adapter);
+
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant. */
+ igb_release_hw_control(adapter);
+
+ pci_disable_device(pdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int retval;
+ bool wake;
+
+ retval = __igb_shutdown(pdev, &wake);
+ if (retval)
+ return retval;
+
+ if (wake) {
+ pci_prepare_to_sleep(pdev);
+ } else {
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+
+ return 0;
+}
+
+static int igb_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "igb: Cannot enable PCI device from suspend\n");
+ return err;
+ }
+ pci_set_master(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ if (igb_init_interrupt_scheme(adapter)) {
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ igb_reset(adapter);
+
+ /* let the f/w know that the h/w is now under the control of the
+ * driver. */
+ igb_get_hw_control(adapter);
+
+ wr32(E1000_WUS, ~0);
+
+ if (netif_running(netdev)) {
+ err = igb_open(netdev);
+ if (err)
+ return err;
+ }
+
+ netif_device_attach(netdev);
+
+ return 0;
+}
+#endif
+
+static void igb_shutdown(struct pci_dev *pdev)
+{
+ bool wake;
+
+ __igb_shutdown(pdev, &wake);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, wake);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void igb_netpoll(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+
+ if (!adapter->msix_entries) {
+ struct igb_q_vector *q_vector = adapter->q_vector[0];
+ igb_irq_disable(adapter);
+ napi_schedule(&q_vector->napi);
+ return;
+ }
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[i];
+ wr32(E1000_EIMC, q_vector->eims_value);
+ napi_schedule(&q_vector->napi);
+ }
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+/**
+ * igb_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ igb_down(adapter);
+ pci_disable_device(pdev);
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * igb_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the igb_resume routine.
+ */
+static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ pci_ers_result_t result;
+ int err;
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ igb_reset(adapter);
+ wr32(E1000_WUS, ~0);
+ result = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ err = pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
+ "failed 0x%0x\n", err);
+ /* non-fatal, continue */
+ }
+
+ return result;
+}
+
+/**
+ * igb_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the igb_resume routine.
+ */
+static void igb_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev)) {
+ if (igb_up(adapter)) {
+ dev_err(&pdev->dev, "igb_up failed after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+
+ /* let the f/w know that the h/w is now under the control of the
+ * driver. */
+ igb_get_hw_control(adapter);
+}
+
+static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
+ u8 qsel)
+{
+ u32 rar_low, rar_high;
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+ /* Indicate to hardware the Address is Valid. */
+ rar_high |= E1000_RAH_AV;
+
+ if (hw->mac.type == e1000_82575)
+ rar_high |= E1000_RAH_POOL_1 * qsel;
+ else
+ rar_high |= E1000_RAH_POOL_1 << qsel;
+
+ wr32(E1000_RAL(index), rar_low);
+ wrfl();
+ wr32(E1000_RAH(index), rar_high);
+ wrfl();
+}
+
+static int igb_set_vf_mac(struct igb_adapter *adapter,
+ int vf, unsigned char *mac_addr)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ /* VF MAC addresses start at end of receive addresses and moves
+ * torwards the first, as a result a collision should not be possible */
+ int rar_entry = hw->mac.rar_entry_count - (vf + 1);
+
+ memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
+
+ igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
+
+ return 0;
+}
+
+static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
+ return -EINVAL;
+ adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
+ dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
+ dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
+ " change effective.");
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ return igb_set_vf_mac(adapter, vf, mac);
+}
+
+static int igb_link_mbps(int internal_link_speed)
+{
+ switch (internal_link_speed) {
+ case SPEED_100:
+ return 100;
+ case SPEED_1000:
+ return 1000;
+ default:
+ return 0;
+ }
+}
+
+static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
+ int link_speed)
+{
+ int rf_dec, rf_int;
+ u32 bcnrc_val;
+
+ if (tx_rate != 0) {
+ /* Calculate the rate factor values to set */
+ rf_int = link_speed / tx_rate;
+ rf_dec = (link_speed - (rf_int * tx_rate));
+ rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
+
+ bcnrc_val = E1000_RTTBCNRC_RS_ENA;
+ bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
+ E1000_RTTBCNRC_RF_INT_MASK);
+ bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
+ } else {
+ bcnrc_val = 0;
+ }
+
+ wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
+ wr32(E1000_RTTBCNRC, bcnrc_val);
+}
+
+static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
+{
+ int actual_link_speed, i;
+ bool reset_rate = false;
+
+ /* VF TX rate limit was not set or not supported */
+ if ((adapter->vf_rate_link_speed == 0) ||
+ (adapter->hw.mac.type != e1000_82576))
+ return;
+
+ actual_link_speed = igb_link_mbps(adapter->link_speed);
+ if (actual_link_speed != adapter->vf_rate_link_speed) {
+ reset_rate = true;
+ adapter->vf_rate_link_speed = 0;
+ dev_info(&adapter->pdev->dev,
+ "Link speed has been changed. VF Transmit "
+ "rate is disabled\n");
+ }
+
+ for (i = 0; i < adapter->vfs_allocated_count; i++) {
+ if (reset_rate)
+ adapter->vf_data[i].tx_rate = 0;
+
+ igb_set_vf_rate_limit(&adapter->hw, i,
+ adapter->vf_data[i].tx_rate,
+ actual_link_speed);
+ }
+}
+
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int actual_link_speed;
+
+ if (hw->mac.type != e1000_82576)
+ return -EOPNOTSUPP;
+
+ actual_link_speed = igb_link_mbps(adapter->link_speed);
+ if ((vf >= adapter->vfs_allocated_count) ||
+ (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
+ (tx_rate < 0) || (tx_rate > actual_link_speed))
+ return -EINVAL;
+
+ adapter->vf_rate_link_speed = actual_link_speed;
+ adapter->vf_data[vf].tx_rate = (u16)tx_rate;
+ igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
+
+ return 0;
+}
+
+static int igb_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ if (vf >= adapter->vfs_allocated_count)
+ return -EINVAL;
+ ivi->vf = vf;
+ memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
+ ivi->tx_rate = adapter->vf_data[vf].tx_rate;
+ ivi->vlan = adapter->vf_data[vf].pf_vlan;
+ ivi->qos = adapter->vf_data[vf].pf_qos;
+ return 0;
+}
+
+static void igb_vmm_control(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg;
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+ default:
+ /* replication is not supported for 82575 */
+ return;
+ case e1000_82576:
+ /* notify HW that the MAC is adding vlan tags */
+ reg = rd32(E1000_DTXCTL);
+ reg |= E1000_DTXCTL_VLAN_ADDED;
+ wr32(E1000_DTXCTL, reg);
+ case e1000_82580:
+ /* enable replication vlan tag stripping */
+ reg = rd32(E1000_RPLOLR);
+ reg |= E1000_RPLOLR_STRVLAN;
+ wr32(E1000_RPLOLR, reg);
+ case e1000_i350:
+ /* none of the above registers are supported by i350 */
+ break;
+ }
+
+ if (adapter->vfs_allocated_count) {
+ igb_vmdq_set_loopback_pf(hw, true);
+ igb_vmdq_set_replication_pf(hw, true);
+ igb_vmdq_set_anti_spoofing_pf(hw, true,
+ adapter->vfs_allocated_count);
+ } else {
+ igb_vmdq_set_loopback_pf(hw, false);
+ igb_vmdq_set_replication_pf(hw, false);
+ }
+}
+
+/* igb_main.c */
diff --git a/drivers/net/ethernet/intel/igbvf/Makefile b/drivers/net/ethernet/intel/igbvf/Makefile
new file mode 100644
index 000000000000..0fa3db3dd8b6
--- /dev/null
+++ b/drivers/net/ethernet/intel/igbvf/Makefile
@@ -0,0 +1,38 @@
+################################################################################
+#
+# Intel(R) 82576 Virtual Function Linux driver
+# Copyright(c) 2009 - 2010 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) 82576 VF ethernet driver
+#
+
+obj-$(CONFIG_IGBVF) += igbvf.o
+
+igbvf-objs := vf.o \
+ mbx.o \
+ ethtool.o \
+ netdev.o
+
diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h
new file mode 100644
index 000000000000..79f2604673fe
--- /dev/null
+++ b/drivers/net/ethernet/intel/igbvf/defines.h
@@ -0,0 +1,125 @@
+/*******************************************************************************
+
+ Intel(R) 82576 Virtual Function Linux driver
+ Copyright(c) 1999 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_DEFINES_H_
+#define _E1000_DEFINES_H_
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define REQ_RX_DESCRIPTOR_MULTIPLE 8
+
+/* IVAR valid bit */
+#define E1000_IVAR_VALID 0x80
+
+/* Receive Descriptor bit definitions */
+#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
+#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
+#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+
+#define E1000_RXDEXT_STATERR_CE 0x01000000
+#define E1000_RXDEXT_STATERR_SE 0x02000000
+#define E1000_RXDEXT_STATERR_SEQ 0x04000000
+#define E1000_RXDEXT_STATERR_CXE 0x10000000
+#define E1000_RXDEXT_STATERR_TCPE 0x20000000
+#define E1000_RXDEXT_STATERR_IPE 0x40000000
+#define E1000_RXDEXT_STATERR_RXE 0x80000000
+
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+ E1000_RXDEXT_STATERR_CE | \
+ E1000_RXDEXT_STATERR_SE | \
+ E1000_RXDEXT_STATERR_SEQ | \
+ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
+
+/* Device Control */
+#define E1000_CTRL_RST 0x04000000 /* Global reset */
+
+/* Device Status */
+#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
+#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
+#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+
+#define MAX_JUMBO_FRAME_SIZE 0x3F00
+
+/* 802.1q VLAN Packet Size */
+#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
+
+/* Error Codes */
+#define E1000_SUCCESS 0
+#define E1000_ERR_CONFIG 3
+#define E1000_ERR_MAC_INIT 5
+#define E1000_ERR_MBX 15
+
+#ifndef ETH_ADDR_LEN
+#define ETH_ADDR_LEN 6
+#endif
+
+/* SRRCTL bit definitions */
+#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
+#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
+#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
+#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
+#define E1000_SRRCTL_DROP_EN 0x80000000
+
+#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00
+
+/* Additional Descriptor Control definitions */
+#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */
+#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */
+
+/* Direct Cache Access (DCA) definitions */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+
+#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
+#endif /* _E1000_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
new file mode 100644
index 000000000000..b0b14d63dfbf
--- /dev/null
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -0,0 +1,534 @@
+/*******************************************************************************
+
+ Intel(R) 82576 Virtual Function Linux driver
+ Copyright(c) 2009 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for igbvf */
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+
+#include "igbvf.h"
+#include <linux/if_vlan.h>
+
+
+struct igbvf_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+ int base_stat_offset;
+};
+
+#define IGBVF_STAT(current, base) \
+ sizeof(((struct igbvf_adapter *)0)->current), \
+ offsetof(struct igbvf_adapter, current), \
+ offsetof(struct igbvf_adapter, base)
+
+static const struct igbvf_stats igbvf_gstrings_stats[] = {
+ { "rx_packets", IGBVF_STAT(stats.gprc, stats.base_gprc) },
+ { "tx_packets", IGBVF_STAT(stats.gptc, stats.base_gptc) },
+ { "rx_bytes", IGBVF_STAT(stats.gorc, stats.base_gorc) },
+ { "tx_bytes", IGBVF_STAT(stats.gotc, stats.base_gotc) },
+ { "multicast", IGBVF_STAT(stats.mprc, stats.base_mprc) },
+ { "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) },
+ { "lbrx_packets", IGBVF_STAT(stats.gprlbc, stats.base_gprlbc) },
+ { "tx_restart_queue", IGBVF_STAT(restart_queue, zero_base) },
+ { "rx_long_byte_count", IGBVF_STAT(stats.gorc, stats.base_gorc) },
+ { "rx_csum_offload_good", IGBVF_STAT(hw_csum_good, zero_base) },
+ { "rx_csum_offload_errors", IGBVF_STAT(hw_csum_err, zero_base) },
+ { "rx_header_split", IGBVF_STAT(rx_hdr_split, zero_base) },
+ { "alloc_rx_buff_failed", IGBVF_STAT(alloc_rx_buff_failed, zero_base) },
+};
+
+#define IGBVF_GLOBAL_STATS_LEN ARRAY_SIZE(igbvf_gstrings_stats)
+
+static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Link test (on/offline)"
+};
+
+#define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test)
+
+static int igbvf_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 status;
+
+ ecmd->supported = SUPPORTED_1000baseT_Full;
+
+ ecmd->advertising = ADVERTISED_1000baseT_Full;
+
+ ecmd->port = -1;
+ ecmd->transceiver = XCVR_DUMMY1;
+
+ status = er32(STATUS);
+ if (status & E1000_STATUS_LU) {
+ if (status & E1000_STATUS_SPEED_1000)
+ ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ else if (status & E1000_STATUS_SPEED_100)
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
+ else
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
+
+ if (status & E1000_STATUS_FD)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ } else {
+ ethtool_cmd_speed_set(ecmd, -1);
+ ecmd->duplex = -1;
+ }
+
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static int igbvf_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ return -EOPNOTSUPP;
+}
+
+static void igbvf_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ return;
+}
+
+static int igbvf_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ return -EOPNOTSUPP;
+}
+
+static u32 igbvf_get_rx_csum(struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ return !(adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED);
+}
+
+static int igbvf_set_rx_csum(struct net_device *netdev, u32 data)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ if (data)
+ adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED;
+ else
+ adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED;
+
+ return 0;
+}
+
+static u32 igbvf_get_tx_csum(struct net_device *netdev)
+{
+ return (netdev->features & NETIF_F_IP_CSUM) != 0;
+}
+
+static int igbvf_set_tx_csum(struct net_device *netdev, u32 data)
+{
+ if (data)
+ netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ else
+ netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ return 0;
+}
+
+static int igbvf_set_tso(struct net_device *netdev, u32 data)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ if (data) {
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ } else {
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ }
+
+ dev_info(&adapter->pdev->dev, "TSO is %s\n",
+ data ? "Enabled" : "Disabled");
+ return 0;
+}
+
+static u32 igbvf_get_msglevel(struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void igbvf_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+static int igbvf_get_regs_len(struct net_device *netdev)
+{
+#define IGBVF_REGS_LEN 8
+ return IGBVF_REGS_LEN * sizeof(u32);
+}
+
+static void igbvf_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+
+ memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
+
+ regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
+ adapter->pdev->device;
+
+ regs_buff[0] = er32(CTRL);
+ regs_buff[1] = er32(STATUS);
+
+ regs_buff[2] = er32(RDLEN(0));
+ regs_buff[3] = er32(RDH(0));
+ regs_buff[4] = er32(RDT(0));
+
+ regs_buff[5] = er32(TDLEN(0));
+ regs_buff[6] = er32(TDH(0));
+ regs_buff[7] = er32(TDT(0));
+}
+
+static int igbvf_get_eeprom_len(struct net_device *netdev)
+{
+ return 0;
+}
+
+static int igbvf_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ return -EOPNOTSUPP;
+}
+
+static int igbvf_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ return -EOPNOTSUPP;
+}
+
+static void igbvf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ char firmware_version[32] = "N/A";
+
+ strncpy(drvinfo->driver, igbvf_driver_name, 32);
+ strncpy(drvinfo->version, igbvf_driver_version, 32);
+ strncpy(drvinfo->fw_version, firmware_version, 32);
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ drvinfo->regdump_len = igbvf_get_regs_len(netdev);
+ drvinfo->eedump_len = igbvf_get_eeprom_len(netdev);
+}
+
+static void igbvf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct igbvf_ring *tx_ring = adapter->tx_ring;
+ struct igbvf_ring *rx_ring = adapter->rx_ring;
+
+ ring->rx_max_pending = IGBVF_MAX_RXD;
+ ring->tx_max_pending = IGBVF_MAX_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rx_ring->count;
+ ring->tx_pending = tx_ring->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int igbvf_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct igbvf_ring *temp_ring;
+ int err = 0;
+ u32 new_rx_count, new_tx_count;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_rx_count = max(ring->rx_pending, (u32)IGBVF_MIN_RXD);
+ new_rx_count = min(new_rx_count, (u32)IGBVF_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ new_tx_count = max(ring->tx_pending, (u32)IGBVF_MIN_TXD);
+ new_tx_count = min(new_tx_count, (u32)IGBVF_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ if ((new_tx_count == adapter->tx_ring->count) &&
+ (new_rx_count == adapter->rx_ring->count)) {
+ /* nothing to do */
+ return 0;
+ }
+
+ while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
+ msleep(1);
+
+ if (!netif_running(adapter->netdev)) {
+ adapter->tx_ring->count = new_tx_count;
+ adapter->rx_ring->count = new_rx_count;
+ goto clear_reset;
+ }
+
+ temp_ring = vmalloc(sizeof(struct igbvf_ring));
+ if (!temp_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
+
+ igbvf_down(adapter);
+
+ /*
+ * We can't just free everything and then setup again,
+ * because the ISRs in MSI-X mode get passed pointers
+ * to the tx and rx ring structs.
+ */
+ if (new_tx_count != adapter->tx_ring->count) {
+ memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring));
+
+ temp_ring->count = new_tx_count;
+ err = igbvf_setup_tx_resources(adapter, temp_ring);
+ if (err)
+ goto err_setup;
+
+ igbvf_free_tx_resources(adapter->tx_ring);
+
+ memcpy(adapter->tx_ring, temp_ring, sizeof(struct igbvf_ring));
+ }
+
+ if (new_rx_count != adapter->rx_ring->count) {
+ memcpy(temp_ring, adapter->rx_ring, sizeof(struct igbvf_ring));
+
+ temp_ring->count = new_rx_count;
+ err = igbvf_setup_rx_resources(adapter, temp_ring);
+ if (err)
+ goto err_setup;
+
+ igbvf_free_rx_resources(adapter->rx_ring);
+
+ memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring));
+ }
+err_setup:
+ igbvf_up(adapter);
+ vfree(temp_ring);
+clear_reset:
+ clear_bit(__IGBVF_RESETTING, &adapter->state);
+ return err;
+}
+
+static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ *data = 0;
+
+ hw->mac.ops.check_for_link(hw);
+
+ if (!(er32(STATUS) & E1000_STATUS_LU))
+ *data = 1;
+
+ return *data;
+}
+
+static void igbvf_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ set_bit(__IGBVF_TESTING, &adapter->state);
+
+ /*
+ * Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result
+ */
+ if (igbvf_link_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ clear_bit(__IGBVF_TESTING, &adapter->state);
+ msleep_interruptible(4 * 1000);
+}
+
+static void igbvf_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+}
+
+static int igbvf_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ return -EOPNOTSUPP;
+}
+
+static int igbvf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->itr_setting <= 3)
+ ec->rx_coalesce_usecs = adapter->itr_setting;
+ else
+ ec->rx_coalesce_usecs = adapter->itr_setting >> 2;
+
+ return 0;
+}
+
+static int igbvf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) ||
+ ((ec->rx_coalesce_usecs > 3) &&
+ (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) ||
+ (ec->rx_coalesce_usecs == 2))
+ return -EINVAL;
+
+ /* convert to rate of irq's per second */
+ if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
+ adapter->itr = IGBVF_START_ITR;
+ adapter->itr_setting = ec->rx_coalesce_usecs;
+ } else {
+ adapter->itr = ec->rx_coalesce_usecs << 2;
+ adapter->itr_setting = adapter->itr;
+ }
+
+ writel(adapter->itr,
+ hw->hw_addr + adapter->rx_ring[0].itr_register);
+
+ return 0;
+}
+
+static int igbvf_nway_reset(struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ if (netif_running(netdev))
+ igbvf_reinit_locked(adapter);
+ return 0;
+}
+
+
+static void igbvf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ igbvf_update_stats(adapter);
+ for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) {
+ char *p = (char *)adapter +
+ igbvf_gstrings_stats[i].stat_offset;
+ char *b = (char *)adapter +
+ igbvf_gstrings_stats[i].base_stat_offset;
+ data[i] = ((igbvf_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) :
+ (*(u32 *)p - *(u32 *)b));
+ }
+
+}
+
+static int igbvf_get_sset_count(struct net_device *dev, int stringset)
+{
+ switch(stringset) {
+ case ETH_SS_TEST:
+ return IGBVF_TEST_LEN;
+ case ETH_SS_STATS:
+ return IGBVF_GLOBAL_STATS_LEN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *igbvf_gstrings_test, sizeof(igbvf_gstrings_test));
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, igbvf_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static const struct ethtool_ops igbvf_ethtool_ops = {
+ .get_settings = igbvf_get_settings,
+ .set_settings = igbvf_set_settings,
+ .get_drvinfo = igbvf_get_drvinfo,
+ .get_regs_len = igbvf_get_regs_len,
+ .get_regs = igbvf_get_regs,
+ .get_wol = igbvf_get_wol,
+ .set_wol = igbvf_set_wol,
+ .get_msglevel = igbvf_get_msglevel,
+ .set_msglevel = igbvf_set_msglevel,
+ .nway_reset = igbvf_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = igbvf_get_eeprom_len,
+ .get_eeprom = igbvf_get_eeprom,
+ .set_eeprom = igbvf_set_eeprom,
+ .get_ringparam = igbvf_get_ringparam,
+ .set_ringparam = igbvf_set_ringparam,
+ .get_pauseparam = igbvf_get_pauseparam,
+ .set_pauseparam = igbvf_set_pauseparam,
+ .get_rx_csum = igbvf_get_rx_csum,
+ .set_rx_csum = igbvf_set_rx_csum,
+ .get_tx_csum = igbvf_get_tx_csum,
+ .set_tx_csum = igbvf_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = igbvf_set_tso,
+ .self_test = igbvf_diag_test,
+ .get_sset_count = igbvf_get_sset_count,
+ .get_strings = igbvf_get_strings,
+ .get_ethtool_stats = igbvf_get_ethtool_stats,
+ .get_coalesce = igbvf_get_coalesce,
+ .set_coalesce = igbvf_set_coalesce,
+};
+
+void igbvf_set_ethtool_ops(struct net_device *netdev)
+{
+ /* have to "undeclare" const on this struct to remove warnings */
+ SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igbvf_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
new file mode 100644
index 000000000000..fd4a7b780fdd
--- /dev/null
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -0,0 +1,326 @@
+/*******************************************************************************
+
+ Intel(R) 82576 Virtual Function Linux driver
+ Copyright(c) 2009 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _IGBVF_H_
+#define _IGBVF_H_
+
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+
+#include "vf.h"
+
+/* Forward declarations */
+struct igbvf_info;
+struct igbvf_adapter;
+
+/* Interrupt defines */
+#define IGBVF_START_ITR 648 /* ~6000 ints/sec */
+
+/* Interrupt modes, as used by the IntMode parameter */
+#define IGBVF_INT_MODE_LEGACY 0
+#define IGBVF_INT_MODE_MSI 1
+#define IGBVF_INT_MODE_MSIX 2
+
+/* Tx/Rx descriptor defines */
+#define IGBVF_DEFAULT_TXD 256
+#define IGBVF_MAX_TXD 4096
+#define IGBVF_MIN_TXD 80
+
+#define IGBVF_DEFAULT_RXD 256
+#define IGBVF_MAX_RXD 4096
+#define IGBVF_MIN_RXD 80
+
+#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */
+#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */
+
+/* RX descriptor control thresholds.
+ * PTHRESH - MAC will consider prefetch if it has fewer than this number of
+ * descriptors available in its onboard memory.
+ * Setting this to 0 disables RX descriptor prefetch.
+ * HTHRESH - MAC will only prefetch if there are at least this many descriptors
+ * available in host memory.
+ * If PTHRESH is 0, this should also be 0.
+ * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
+ * descriptors until either it has this many to write back, or the
+ * ITR timer expires.
+ */
+#define IGBVF_RX_PTHRESH 16
+#define IGBVF_RX_HTHRESH 8
+#define IGBVF_RX_WTHRESH 1
+
+/* this is the size past which hardware will drop packets when setting LPE=0 */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+
+#define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */
+
+/* How many Tx Descriptors do we need to call netif_wake_queue ? */
+#define IGBVF_TX_QUEUE_WAKE 32
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define AUTO_ALL_MODES 0
+#define IGBVF_EEPROM_APME 0x0400
+
+#define IGBVF_MNG_VLAN_NONE (-1)
+
+/* Number of packet split data buffers (not including the header buffer) */
+#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
+
+enum igbvf_boards {
+ board_vf,
+ board_i350_vf,
+};
+
+struct igbvf_queue_stats {
+ u64 packets;
+ u64 bytes;
+};
+
+/*
+ * wrappers around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct igbvf_buffer {
+ dma_addr_t dma;
+ struct sk_buff *skb;
+ union {
+ /* Tx */
+ struct {
+ unsigned long time_stamp;
+ u16 length;
+ u16 next_to_watch;
+ u16 mapped_as_page;
+ };
+ /* Rx */
+ struct {
+ struct page *page;
+ u64 page_dma;
+ unsigned int page_offset;
+ };
+ };
+};
+
+union igbvf_desc {
+ union e1000_adv_rx_desc rx_desc;
+ union e1000_adv_tx_desc tx_desc;
+ struct e1000_adv_tx_context_desc tx_context_desc;
+};
+
+struct igbvf_ring {
+ struct igbvf_adapter *adapter; /* backlink */
+ union igbvf_desc *desc; /* pointer to ring memory */
+ dma_addr_t dma; /* phys address of ring */
+ unsigned int size; /* length of ring in bytes */
+ unsigned int count; /* number of desc. in ring */
+
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ u16 head;
+ u16 tail;
+
+ /* array of buffer information structs */
+ struct igbvf_buffer *buffer_info;
+ struct napi_struct napi;
+
+ char name[IFNAMSIZ + 5];
+ u32 eims_value;
+ u32 itr_val;
+ u16 itr_register;
+ int set_itr;
+
+ struct sk_buff *rx_skb_top;
+
+ struct igbvf_queue_stats stats;
+};
+
+/* board specific private data structure */
+struct igbvf_adapter {
+ struct timer_list watchdog_timer;
+ struct timer_list blink_timer;
+
+ struct work_struct reset_task;
+ struct work_struct watchdog_task;
+
+ const struct igbvf_info *ei;
+
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u32 bd_number;
+ u32 rx_buffer_len;
+ u32 polling_interval;
+ u16 mng_vlan_id;
+ u16 link_speed;
+ u16 link_duplex;
+
+ spinlock_t tx_queue_lock; /* prevent concurrent tail updates */
+
+ /* track device up/down/testing state */
+ unsigned long state;
+
+ /* Interrupt Throttle Rate */
+ u32 itr;
+ u32 itr_setting;
+ u16 tx_itr;
+ u16 rx_itr;
+
+ /*
+ * Tx
+ */
+ struct igbvf_ring *tx_ring /* One per active queue */
+ ____cacheline_aligned_in_smp;
+
+ unsigned int restart_queue;
+ u32 txd_cmd;
+
+ u32 tx_int_delay;
+ u32 tx_abs_int_delay;
+
+ unsigned int total_tx_bytes;
+ unsigned int total_tx_packets;
+ unsigned int total_rx_bytes;
+ unsigned int total_rx_packets;
+
+ /* Tx stats */
+ u32 tx_timeout_count;
+ u32 tx_fifo_head;
+ u32 tx_head_addr;
+ u32 tx_fifo_size;
+ u32 tx_dma_failed;
+
+ /*
+ * Rx
+ */
+ struct igbvf_ring *rx_ring;
+
+ u32 rx_int_delay;
+ u32 rx_abs_int_delay;
+
+ /* Rx stats */
+ u64 hw_csum_err;
+ u64 hw_csum_good;
+ u64 rx_hdr_split;
+ u32 alloc_rx_buff_failed;
+ u32 rx_dma_failed;
+
+ unsigned int rx_ps_hdr_size;
+ u32 max_frame_size;
+ u32 min_frame_size;
+
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct net_device_stats net_stats;
+ spinlock_t stats_lock; /* prevent concurrent stats updates */
+
+ /* structs defined in e1000_hw.h */
+ struct e1000_hw hw;
+
+ /* The VF counters don't clear on read so we have to get a base
+ * count on driver start up and always subtract that base on
+ * on the first update, thus the flag..
+ */
+ struct e1000_vf_stats stats;
+ u64 zero_base;
+
+ struct igbvf_ring test_tx_ring;
+ struct igbvf_ring test_rx_ring;
+ u32 test_icr;
+
+ u32 msg_enable;
+ struct msix_entry *msix_entries;
+ int int_mode;
+ u32 eims_enable_mask;
+ u32 eims_other;
+ u32 int_counter0;
+ u32 int_counter1;
+
+ u32 eeprom_wol;
+ u32 wol;
+ u32 pba;
+
+ bool fc_autoneg;
+
+ unsigned long led_status;
+
+ unsigned int flags;
+ unsigned long last_reset;
+};
+
+struct igbvf_info {
+ enum e1000_mac_type mac;
+ unsigned int flags;
+ u32 pba;
+ void (*init_ops)(struct e1000_hw *);
+ s32 (*get_variants)(struct igbvf_adapter *);
+};
+
+/* hardware capability, feature, and workaround flags */
+#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0)
+
+#define IGBVF_RX_DESC_ADV(R, i) \
+ (&((((R).desc))[i].rx_desc))
+#define IGBVF_TX_DESC_ADV(R, i) \
+ (&((((R).desc))[i].tx_desc))
+#define IGBVF_TX_CTXTDESC_ADV(R, i) \
+ (&((((R).desc))[i].tx_context_desc))
+
+enum igbvf_state_t {
+ __IGBVF_TESTING,
+ __IGBVF_RESETTING,
+ __IGBVF_DOWN
+};
+
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
+extern char igbvf_driver_name[];
+extern const char igbvf_driver_version[];
+
+extern void igbvf_check_options(struct igbvf_adapter *);
+extern void igbvf_set_ethtool_ops(struct net_device *);
+
+extern int igbvf_up(struct igbvf_adapter *);
+extern void igbvf_down(struct igbvf_adapter *);
+extern void igbvf_reinit_locked(struct igbvf_adapter *);
+extern int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
+extern int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
+extern void igbvf_free_rx_resources(struct igbvf_ring *);
+extern void igbvf_free_tx_resources(struct igbvf_ring *);
+extern void igbvf_update_stats(struct igbvf_adapter *);
+
+extern unsigned int copybreak;
+
+#endif /* _IGBVF_H_ */
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c
new file mode 100644
index 000000000000..3d6f4cc3998a
--- /dev/null
+++ b/drivers/net/ethernet/intel/igbvf/mbx.c
@@ -0,0 +1,350 @@
+/*******************************************************************************
+
+ Intel(R) 82576 Virtual Function Linux driver
+ Copyright(c) 2009 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "mbx.h"
+
+/**
+ * e1000_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ *
+ * returns SUCCESS if it successfully received a message notification
+ **/
+static s32 e1000_poll_for_msg(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ if (!mbx->ops.check_for_msg)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_msg(hw)) {
+ countdown--;
+ udelay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
+}
+
+/**
+ * e1000_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ *
+ * returns SUCCESS if it successfully received a message acknowledgement
+ **/
+static s32 e1000_poll_for_ack(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ if (!mbx->ops.check_for_ack)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_ack(hw)) {
+ countdown--;
+ udelay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
+}
+
+/**
+ * e1000_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns SUCCESS if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+static s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (!mbx->ops.read)
+ goto out;
+
+ ret_val = e1000_poll_for_msg(hw);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns SUCCESS if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+static s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ /* exit if we either can't write or there isn't a defined timeout */
+ if (!mbx->ops.write || !mbx->timeout)
+ goto out;
+
+ /* send msg*/
+ ret_val = mbx->ops.write(hw, msg, size);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = e1000_poll_for_ack(hw);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_read_v2p_mailbox - read v2p mailbox
+ * @hw: pointer to the HW structure
+ *
+ * This function is used to read the v2p mailbox without losing the read to
+ * clear status bits.
+ **/
+static u32 e1000_read_v2p_mailbox(struct e1000_hw *hw)
+{
+ u32 v2p_mailbox = er32(V2PMAILBOX(0));
+
+ v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox;
+ hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS;
+
+ return v2p_mailbox;
+}
+
+/**
+ * e1000_check_for_bit_vf - Determine if a status bit was set
+ * @hw: pointer to the HW structure
+ * @mask: bitmask for bits to be tested and cleared
+ *
+ * This function is used to check for the read to clear bits within
+ * the V2P mailbox.
+ **/
+static s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask)
+{
+ u32 v2p_mailbox = e1000_read_v2p_mailbox(hw);
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (v2p_mailbox & mask)
+ ret_val = E1000_SUCCESS;
+
+ hw->dev_spec.vf.v2p_mailbox &= ~mask;
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_msg_vf - checks to see if the PF has sent mail
+ * @hw: pointer to the HW structure
+ *
+ * returns SUCCESS if the PF has set the Status bit or else ERR_MBX
+ **/
+static s32 e1000_check_for_msg_vf(struct e1000_hw *hw)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) {
+ ret_val = E1000_SUCCESS;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_ack_vf - checks to see if the PF has ACK'd
+ * @hw: pointer to the HW structure
+ *
+ * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
+ **/
+static s32 e1000_check_for_ack_vf(struct e1000_hw *hw)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) {
+ ret_val = E1000_SUCCESS;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_rst_vf - checks to see if the PF has reset
+ * @hw: pointer to the HW structure
+ *
+ * returns true if the PF has set the reset done bit or else false
+ **/
+static s32 e1000_check_for_rst_vf(struct e1000_hw *hw)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD |
+ E1000_V2PMAILBOX_RSTI))) {
+ ret_val = E1000_SUCCESS;
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_obtain_mbx_lock_vf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ /* Take ownership of the buffer */
+ ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
+
+ /* reserve mailbox for vf use */
+ if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU)
+ ret_val = E1000_SUCCESS;
+
+ return ret_val;
+}
+
+/**
+ * e1000_write_mbx_vf - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
+{
+ s32 err;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ err = e1000_obtain_mbx_lock_vf(hw);
+ if (err)
+ goto out_no_write;
+
+ /* flush any ack or msg as we are going to overwrite mailbox */
+ e1000_check_for_ack_vf(hw);
+ e1000_check_for_msg_vf(hw);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ array_ew32(VMBMEM(0), i, msg[i]);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* Drop VFU and interrupt the PF to tell it a message has been sent */
+ ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_REQ);
+
+out_no_write:
+ return err;
+}
+
+/**
+ * e1000_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns SUCCESS if it successfuly read message from buffer
+ **/
+static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
+{
+ s32 err;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ err = e1000_obtain_mbx_lock_vf(hw);
+ if (err)
+ goto out_no_read;
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = array_er32(VMBMEM(0), i);
+
+ /* Acknowledge receipt and release mailbox, then we're done */
+ ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return err;
+}
+
+/**
+ * e1000_init_mbx_params_vf - set initial values for vf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+s32 e1000_init_mbx_params_vf(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+
+ /* start mailbox as timed out and let the reset_hw call set the timeout
+ * value to being communications */
+ mbx->timeout = 0;
+ mbx->usec_delay = E1000_VF_MBX_INIT_DELAY;
+
+ mbx->size = E1000_VFMAILBOX_SIZE;
+
+ mbx->ops.read = e1000_read_mbx_vf;
+ mbx->ops.write = e1000_write_mbx_vf;
+ mbx->ops.read_posted = e1000_read_posted_mbx;
+ mbx->ops.write_posted = e1000_write_posted_mbx;
+ mbx->ops.check_for_msg = e1000_check_for_msg_vf;
+ mbx->ops.check_for_ack = e1000_check_for_ack_vf;
+ mbx->ops.check_for_rst = e1000_check_for_rst_vf;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+
+ return E1000_SUCCESS;
+}
+
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h
new file mode 100644
index 000000000000..c2883c45d477
--- /dev/null
+++ b/drivers/net/ethernet/intel/igbvf/mbx.h
@@ -0,0 +1,75 @@
+/*******************************************************************************
+
+ Intel(R) 82576 Virtual Function Linux driver
+ Copyright(c) 1999 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_MBX_H_
+#define _E1000_MBX_H_
+
+#include "vf.h"
+
+#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
+#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */
+#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */
+#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
+#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
+
+#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+
+/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is E1000_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ clear to send requests */
+
+/* We have a total wait time of 1s for vf mailbox posted messages */
+#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mailbox timeout */
+#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */
+
+#define E1000_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for exra info for certain messages */
+#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
+
+#define E1000_VF_RESET 0x01 /* VF requests reset */
+#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+
+#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
+s32 e1000_init_mbx_params_vf(struct e1000_hw *);
+
+#endif /* _E1000_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
new file mode 100644
index 000000000000..b3d760b08a5f
--- /dev/null
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -0,0 +1,2856 @@
+/*******************************************************************************
+
+ Intel(R) 82576 Virtual Function Linux driver
+ Copyright(c) 2009 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/prefetch.h>
+
+#include "igbvf.h"
+
+#define DRV_VERSION "2.0.0-k"
+char igbvf_driver_name[] = "igbvf";
+const char igbvf_driver_version[] = DRV_VERSION;
+static const char igbvf_driver_string[] =
+ "Intel(R) Virtual Function Network Driver";
+static const char igbvf_copyright[] =
+ "Copyright (c) 2009 - 2010 Intel Corporation.";
+
+static int igbvf_poll(struct napi_struct *napi, int budget);
+static void igbvf_reset(struct igbvf_adapter *);
+static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
+static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
+
+static struct igbvf_info igbvf_vf_info = {
+ .mac = e1000_vfadapt,
+ .flags = 0,
+ .pba = 10,
+ .init_ops = e1000_init_function_pointers_vf,
+};
+
+static struct igbvf_info igbvf_i350_vf_info = {
+ .mac = e1000_vfadapt_i350,
+ .flags = 0,
+ .pba = 10,
+ .init_ops = e1000_init_function_pointers_vf,
+};
+
+static const struct igbvf_info *igbvf_info_tbl[] = {
+ [board_vf] = &igbvf_vf_info,
+ [board_i350_vf] = &igbvf_i350_vf_info,
+};
+
+/**
+ * igbvf_desc_unused - calculate if we have unused descriptors
+ **/
+static int igbvf_desc_unused(struct igbvf_ring *ring)
+{
+ if (ring->next_to_clean > ring->next_to_use)
+ return ring->next_to_clean - ring->next_to_use - 1;
+
+ return ring->count + ring->next_to_clean - ring->next_to_use - 1;
+}
+
+/**
+ * igbvf_receive_skb - helper function to handle Rx indications
+ * @adapter: board private structure
+ * @status: descriptor status field as written by hardware
+ * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
+ * @skb: pointer to sk_buff to be indicated to stack
+ **/
+static void igbvf_receive_skb(struct igbvf_adapter *adapter,
+ struct net_device *netdev,
+ struct sk_buff *skb,
+ u32 status, u16 vlan)
+{
+ if (status & E1000_RXD_STAT_VP) {
+ u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
+
+ __vlan_hwaccel_put_tag(skb, vid);
+ }
+ netif_receive_skb(skb);
+}
+
+static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
+ u32 status_err, struct sk_buff *skb)
+{
+ skb_checksum_none_assert(skb);
+
+ /* Ignore Checksum bit is set or checksum is disabled through ethtool */
+ if ((status_err & E1000_RXD_STAT_IXSM) ||
+ (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED))
+ return;
+
+ /* TCP/UDP checksum error bit is set */
+ if (status_err &
+ (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
+ /* let the stack verify checksum errors */
+ adapter->hw_csum_err++;
+ return;
+ }
+
+ /* It must be a TCP or UDP packet with a valid checksum */
+ if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ adapter->hw_csum_good++;
+}
+
+/**
+ * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @rx_ring: address of ring structure to repopulate
+ * @cleaned_count: number of buffers to repopulate
+ **/
+static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
+ int cleaned_count)
+{
+ struct igbvf_adapter *adapter = rx_ring->adapter;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ union e1000_adv_rx_desc *rx_desc;
+ struct igbvf_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ int bufsz;
+
+ i = rx_ring->next_to_use;
+ buffer_info = &rx_ring->buffer_info[i];
+
+ if (adapter->rx_ps_hdr_size)
+ bufsz = adapter->rx_ps_hdr_size;
+ else
+ bufsz = adapter->rx_buffer_len;
+
+ while (cleaned_count--) {
+ rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
+
+ if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
+ if (!buffer_info->page) {
+ buffer_info->page = alloc_page(GFP_ATOMIC);
+ if (!buffer_info->page) {
+ adapter->alloc_rx_buff_failed++;
+ goto no_buffers;
+ }
+ buffer_info->page_offset = 0;
+ } else {
+ buffer_info->page_offset ^= PAGE_SIZE / 2;
+ }
+ buffer_info->page_dma =
+ dma_map_page(&pdev->dev, buffer_info->page,
+ buffer_info->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ }
+
+ if (!buffer_info->skb) {
+ skb = netdev_alloc_skb_ip_align(netdev, bufsz);
+ if (!skb) {
+ adapter->alloc_rx_buff_failed++;
+ goto no_buffers;
+ }
+
+ buffer_info->skb = skb;
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
+ bufsz,
+ DMA_FROM_DEVICE);
+ }
+ /* Refresh the desc even if buffer_addrs didn't change because
+ * each write-back erases this info. */
+ if (adapter->rx_ps_hdr_size) {
+ rx_desc->read.pkt_addr =
+ cpu_to_le64(buffer_info->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
+ } else {
+ rx_desc->read.pkt_addr =
+ cpu_to_le64(buffer_info->dma);
+ rx_desc->read.hdr_addr = 0;
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ buffer_info = &rx_ring->buffer_info[i];
+ }
+
+no_buffers:
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+ if (i == 0)
+ i = (rx_ring->count - 1);
+ else
+ i--;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+ writel(i, adapter->hw.hw_addr + rx_ring->tail);
+ }
+}
+
+/**
+ * igbvf_clean_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
+ int *work_done, int work_to_do)
+{
+ struct igbvf_ring *rx_ring = adapter->rx_ring;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ union e1000_adv_rx_desc *rx_desc, *next_rxd;
+ struct igbvf_buffer *buffer_info, *next_buffer;
+ struct sk_buff *skb;
+ bool cleaned = false;
+ int cleaned_count = 0;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int i;
+ u32 length, hlen, staterr;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ while (staterr & E1000_RXD_STAT_DD) {
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+ rmb(); /* read descriptor and rx_buffer_info after status DD */
+
+ buffer_info = &rx_ring->buffer_info[i];
+
+ /* HW will not DMA in data larger than the given buffer, even
+ * if it parses the (NFS, of course) header to be larger. In
+ * that case, it fills the header buffer and spills the rest
+ * into the page.
+ */
+ hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) &
+ E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
+ if (hlen > adapter->rx_ps_hdr_size)
+ hlen = adapter->rx_ps_hdr_size;
+
+ length = le16_to_cpu(rx_desc->wb.upper.length);
+ cleaned = true;
+ cleaned_count++;
+
+ skb = buffer_info->skb;
+ prefetch(skb->data - NET_IP_ALIGN);
+ buffer_info->skb = NULL;
+ if (!adapter->rx_ps_hdr_size) {
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+ skb_put(skb, length);
+ goto send_up;
+ }
+
+ if (!skb_shinfo(skb)->nr_frags) {
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_ps_hdr_size,
+ DMA_FROM_DEVICE);
+ skb_put(skb, hlen);
+ }
+
+ if (length) {
+ dma_unmap_page(&pdev->dev, buffer_info->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ buffer_info->page_dma = 0;
+
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ buffer_info->page,
+ buffer_info->page_offset,
+ length);
+
+ if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
+ (page_count(buffer_info->page) != 1))
+ buffer_info->page = NULL;
+ else
+ get_page(buffer_info->page);
+
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+ }
+send_up:
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i);
+ prefetch(next_rxd);
+ next_buffer = &rx_ring->buffer_info[i];
+
+ if (!(staterr & E1000_RXD_STAT_EOP)) {
+ buffer_info->skb = next_buffer->skb;
+ buffer_info->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
+ goto next_desc;
+ }
+
+ if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+
+ total_bytes += skb->len;
+ total_packets++;
+
+ igbvf_rx_checksum_adv(adapter, staterr, skb);
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ igbvf_receive_skb(adapter, netdev, skb, staterr,
+ rx_desc->wb.upper.vlan);
+
+next_desc:
+ rx_desc->wb.upper.status_error = 0;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) {
+ igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+
+ rx_ring->next_to_clean = i;
+ cleaned_count = igbvf_desc_unused(rx_ring);
+
+ if (cleaned_count)
+ igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
+
+ adapter->total_rx_packets += total_packets;
+ adapter->total_rx_bytes += total_bytes;
+ adapter->net_stats.rx_bytes += total_bytes;
+ adapter->net_stats.rx_packets += total_packets;
+ return cleaned;
+}
+
+static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
+ struct igbvf_buffer *buffer_info)
+{
+ if (buffer_info->dma) {
+ if (buffer_info->mapped_as_page)
+ dma_unmap_page(&adapter->pdev->dev,
+ buffer_info->dma,
+ buffer_info->length,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(&adapter->pdev->dev,
+ buffer_info->dma,
+ buffer_info->length,
+ DMA_TO_DEVICE);
+ buffer_info->dma = 0;
+ }
+ if (buffer_info->skb) {
+ dev_kfree_skb_any(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+ buffer_info->time_stamp = 0;
+}
+
+/**
+ * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
+ struct igbvf_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct igbvf_buffer) * tx_ring->count;
+ tx_ring->buffer_info = vzalloc(size);
+ if (!tx_ring->buffer_info)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+
+ if (!tx_ring->desc)
+ goto err;
+
+ tx_ring->adapter = adapter;
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ return 0;
+err:
+ vfree(tx_ring->buffer_info);
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate memory for the transmit descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
+ struct igbvf_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int size, desc_len;
+
+ size = sizeof(struct igbvf_buffer) * rx_ring->count;
+ rx_ring->buffer_info = vzalloc(size);
+ if (!rx_ring->buffer_info)
+ goto err;
+
+ desc_len = sizeof(union e1000_adv_rx_desc);
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * desc_len;
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+
+ if (!rx_ring->desc)
+ goto err;
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ rx_ring->adapter = adapter;
+
+ return 0;
+
+err:
+ vfree(rx_ring->buffer_info);
+ rx_ring->buffer_info = NULL;
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate memory for the receive descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * igbvf_clean_tx_ring - Free Tx Buffers
+ * @tx_ring: ring to be cleaned
+ **/
+static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring)
+{
+ struct igbvf_adapter *adapter = tx_ring->adapter;
+ struct igbvf_buffer *buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ if (!tx_ring->buffer_info)
+ return;
+
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->buffer_info[i];
+ igbvf_put_txbuf(adapter, buffer_info);
+ }
+
+ size = sizeof(struct igbvf_buffer) * tx_ring->count;
+ memset(tx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ writel(0, adapter->hw.hw_addr + tx_ring->head);
+ writel(0, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+/**
+ * igbvf_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: ring to free resources from
+ *
+ * Free all transmit software resources
+ **/
+void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
+{
+ struct pci_dev *pdev = tx_ring->adapter->pdev;
+
+ igbvf_clean_tx_ring(tx_ring);
+
+ vfree(tx_ring->buffer_info);
+ tx_ring->buffer_info = NULL;
+
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
+
+/**
+ * igbvf_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ **/
+static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
+{
+ struct igbvf_adapter *adapter = rx_ring->adapter;
+ struct igbvf_buffer *buffer_info;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long size;
+ unsigned int i;
+
+ if (!rx_ring->buffer_info)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ if (buffer_info->dma) {
+ if (adapter->rx_ps_hdr_size){
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_ps_hdr_size,
+ DMA_FROM_DEVICE);
+ } else {
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ }
+ buffer_info->dma = 0;
+ }
+
+ if (buffer_info->skb) {
+ dev_kfree_skb(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+
+ if (buffer_info->page) {
+ if (buffer_info->page_dma)
+ dma_unmap_page(&pdev->dev,
+ buffer_info->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ put_page(buffer_info->page);
+ buffer_info->page = NULL;
+ buffer_info->page_dma = 0;
+ buffer_info->page_offset = 0;
+ }
+ }
+
+ size = sizeof(struct igbvf_buffer) * rx_ring->count;
+ memset(rx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ writel(0, adapter->hw.hw_addr + rx_ring->head);
+ writel(0, adapter->hw.hw_addr + rx_ring->tail);
+}
+
+/**
+ * igbvf_free_rx_resources - Free Rx Resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+
+void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
+{
+ struct pci_dev *pdev = rx_ring->adapter->pdev;
+
+ igbvf_clean_rx_ring(rx_ring);
+
+ vfree(rx_ring->buffer_info);
+ rx_ring->buffer_info = NULL;
+
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
+ rx_ring->desc = NULL;
+}
+
+/**
+ * igbvf_update_itr - update the dynamic ITR value based on statistics
+ * @adapter: pointer to adapter
+ * @itr_setting: current adapter->itr
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput. This functionality is controlled
+ * by the InterruptThrottleRate module parameter.
+ **/
+static unsigned int igbvf_update_itr(struct igbvf_adapter *adapter,
+ u16 itr_setting, int packets,
+ int bytes)
+{
+ unsigned int retval = itr_setting;
+
+ if (packets == 0)
+ goto update_itr_done;
+
+ switch (itr_setting) {
+ case lowest_latency:
+ /* handle TSO and jumbo frames */
+ if (bytes/packets > 8000)
+ retval = bulk_latency;
+ else if ((packets < 5) && (bytes > 512))
+ retval = low_latency;
+ break;
+ case low_latency: /* 50 usec aka 20000 ints/s */
+ if (bytes > 10000) {
+ /* this if handles the TSO accounting */
+ if (bytes/packets > 8000)
+ retval = bulk_latency;
+ else if ((packets < 10) || ((bytes/packets) > 1200))
+ retval = bulk_latency;
+ else if ((packets > 35))
+ retval = lowest_latency;
+ } else if (bytes/packets > 2000) {
+ retval = bulk_latency;
+ } else if (packets <= 2 && bytes < 512) {
+ retval = lowest_latency;
+ }
+ break;
+ case bulk_latency: /* 250 usec aka 4000 ints/s */
+ if (bytes > 25000) {
+ if (packets > 35)
+ retval = low_latency;
+ } else if (bytes < 6000) {
+ retval = low_latency;
+ }
+ break;
+ }
+
+update_itr_done:
+ return retval;
+}
+
+static void igbvf_set_itr(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 current_itr;
+ u32 new_itr = adapter->itr;
+
+ adapter->tx_itr = igbvf_update_itr(adapter, adapter->tx_itr,
+ adapter->total_tx_packets,
+ adapter->total_tx_bytes);
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
+ adapter->tx_itr = low_latency;
+
+ adapter->rx_itr = igbvf_update_itr(adapter, adapter->rx_itr,
+ adapter->total_rx_packets,
+ adapter->total_rx_bytes);
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
+ adapter->rx_itr = low_latency;
+
+ current_itr = max(adapter->rx_itr, adapter->tx_itr);
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = 70000;
+ break;
+ case low_latency:
+ new_itr = 20000; /* aka hwitr = ~200 */
+ break;
+ case bulk_latency:
+ new_itr = 4000;
+ break;
+ default:
+ break;
+ }
+
+ if (new_itr != adapter->itr) {
+ /*
+ * this attempts to bias the interrupt rate towards Bulk
+ * by adding intermediate steps when interrupt rate is
+ * increasing
+ */
+ new_itr = new_itr > adapter->itr ?
+ min(adapter->itr + (new_itr >> 2), new_itr) :
+ new_itr;
+ adapter->itr = new_itr;
+ adapter->rx_ring->itr_val = 1952;
+
+ if (adapter->msix_entries)
+ adapter->rx_ring->set_itr = 1;
+ else
+ ew32(ITR, 1952);
+ }
+}
+
+/**
+ * igbvf_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ * returns true if ring is completely cleaned
+ **/
+static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
+{
+ struct igbvf_adapter *adapter = tx_ring->adapter;
+ struct net_device *netdev = adapter->netdev;
+ struct igbvf_buffer *buffer_info;
+ struct sk_buff *skb;
+ union e1000_adv_tx_desc *tx_desc, *eop_desc;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int i, eop, count = 0;
+ bool cleaned = false;
+
+ i = tx_ring->next_to_clean;
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
+
+ while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+ (count < tx_ring->count)) {
+ rmb(); /* read buffer_info after eop_desc status */
+ for (cleaned = false; !cleaned; count++) {
+ tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+ cleaned = (i == eop);
+ skb = buffer_info->skb;
+
+ if (skb) {
+ unsigned int segs, bytecount;
+
+ /* gso_segs is currently only valid for tcp */
+ segs = skb_shinfo(skb)->gso_segs ?: 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) +
+ skb->len;
+ total_packets += segs;
+ total_bytes += bytecount;
+ }
+
+ igbvf_put_txbuf(adapter, buffer_info);
+ tx_desc->wb.status = 0;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
+ }
+
+ tx_ring->next_to_clean = i;
+
+ if (unlikely(count &&
+ netif_carrier_ok(netdev) &&
+ igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (netif_queue_stopped(netdev) &&
+ !(test_bit(__IGBVF_DOWN, &adapter->state))) {
+ netif_wake_queue(netdev);
+ ++adapter->restart_queue;
+ }
+ }
+
+ adapter->net_stats.tx_bytes += total_bytes;
+ adapter->net_stats.tx_packets += total_packets;
+ return count < tx_ring->count;
+}
+
+static irqreturn_t igbvf_msix_other(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ adapter->int_counter1++;
+
+ netif_carrier_off(netdev);
+ hw->mac.get_link_status = 1;
+ if (!test_bit(__IGBVF_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+
+ ew32(EIMS, adapter->eims_other);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t igbvf_intr_msix_tx(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct igbvf_ring *tx_ring = adapter->tx_ring;
+
+
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+
+ /* auto mask will automatically reenable the interrupt when we write
+ * EICS */
+ if (!igbvf_clean_tx_irq(tx_ring))
+ /* Ring was not completely cleaned, so fire another interrupt */
+ ew32(EICS, tx_ring->eims_value);
+ else
+ ew32(EIMS, tx_ring->eims_value);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ adapter->int_counter0++;
+
+ /* Write the ITR value calculated at the end of the
+ * previous interrupt.
+ */
+ if (adapter->rx_ring->set_itr) {
+ writel(adapter->rx_ring->itr_val,
+ adapter->hw.hw_addr + adapter->rx_ring->itr_register);
+ adapter->rx_ring->set_itr = 0;
+ }
+
+ if (napi_schedule_prep(&adapter->rx_ring->napi)) {
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
+ __napi_schedule(&adapter->rx_ring->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define IGBVF_NO_QUEUE -1
+
+static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
+ int tx_queue, int msix_vector)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ivar, index;
+
+ /* 82576 uses a table-based method for assigning vectors.
+ Each queue has a single entry in the table to which we write
+ a vector number along with a "valid" bit. Sadly, the layout
+ of the table is somewhat counterintuitive. */
+ if (rx_queue > IGBVF_NO_QUEUE) {
+ index = (rx_queue >> 1);
+ ivar = array_er32(IVAR0, index);
+ if (rx_queue & 0x1) {
+ /* vector goes into third byte of register */
+ ivar = ivar & 0xFF00FFFF;
+ ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
+ } else {
+ /* vector goes into low byte of register */
+ ivar = ivar & 0xFFFFFF00;
+ ivar |= msix_vector | E1000_IVAR_VALID;
+ }
+ adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector;
+ array_ew32(IVAR0, index, ivar);
+ }
+ if (tx_queue > IGBVF_NO_QUEUE) {
+ index = (tx_queue >> 1);
+ ivar = array_er32(IVAR0, index);
+ if (tx_queue & 0x1) {
+ /* vector goes into high byte of register */
+ ivar = ivar & 0x00FFFFFF;
+ ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
+ } else {
+ /* vector goes into second byte of register */
+ ivar = ivar & 0xFFFF00FF;
+ ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
+ }
+ adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector;
+ array_ew32(IVAR0, index, ivar);
+ }
+}
+
+/**
+ * igbvf_configure_msix - Configure MSI-X hardware
+ *
+ * igbvf_configure_msix sets up the hardware to properly
+ * generate MSI-X interrupts.
+ **/
+static void igbvf_configure_msix(struct igbvf_adapter *adapter)
+{
+ u32 tmp;
+ struct e1000_hw *hw = &adapter->hw;
+ struct igbvf_ring *tx_ring = adapter->tx_ring;
+ struct igbvf_ring *rx_ring = adapter->rx_ring;
+ int vector = 0;
+
+ adapter->eims_enable_mask = 0;
+
+ igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++);
+ adapter->eims_enable_mask |= tx_ring->eims_value;
+ if (tx_ring->itr_val)
+ writel(tx_ring->itr_val,
+ hw->hw_addr + tx_ring->itr_register);
+ else
+ writel(1952, hw->hw_addr + tx_ring->itr_register);
+
+ igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++);
+ adapter->eims_enable_mask |= rx_ring->eims_value;
+ if (rx_ring->itr_val)
+ writel(rx_ring->itr_val,
+ hw->hw_addr + rx_ring->itr_register);
+ else
+ writel(1952, hw->hw_addr + rx_ring->itr_register);
+
+ /* set vector for other causes, i.e. link changes */
+
+ tmp = (vector++ | E1000_IVAR_VALID);
+
+ ew32(IVAR_MISC, tmp);
+
+ adapter->eims_enable_mask = (1 << (vector)) - 1;
+ adapter->eims_other = 1 << (vector - 1);
+ e1e_flush();
+}
+
+static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
+{
+ if (adapter->msix_entries) {
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ }
+}
+
+/**
+ * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
+{
+ int err = -ENOMEM;
+ int i;
+
+ /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */
+ adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (adapter->msix_entries) {
+ for (i = 0; i < 3; i++)
+ adapter->msix_entries[i].entry = i;
+
+ err = pci_enable_msix(adapter->pdev,
+ adapter->msix_entries, 3);
+ }
+
+ if (err) {
+ /* MSI-X failed */
+ dev_err(&adapter->pdev->dev,
+ "Failed to initialize MSI-X interrupts.\n");
+ igbvf_reset_interrupt_capability(adapter);
+ }
+}
+
+/**
+ * igbvf_request_msix - Initialize MSI-X interrupts
+ *
+ * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
+ * kernel.
+ **/
+static int igbvf_request_msix(struct igbvf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err = 0, vector = 0;
+
+ if (strlen(netdev->name) < (IFNAMSIZ - 5)) {
+ sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
+ sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
+ } else {
+ memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
+ memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
+ }
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
+ netdev);
+ if (err)
+ goto out;
+
+ adapter->tx_ring->itr_register = E1000_EITR(vector);
+ adapter->tx_ring->itr_val = 1952;
+ vector++;
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
+ netdev);
+ if (err)
+ goto out;
+
+ adapter->rx_ring->itr_register = E1000_EITR(vector);
+ adapter->rx_ring->itr_val = 1952;
+ vector++;
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ igbvf_msix_other, 0, netdev->name, netdev);
+ if (err)
+ goto out;
+
+ igbvf_configure_msix(adapter);
+ return 0;
+out:
+ return err;
+}
+
+/**
+ * igbvf_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ **/
+static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
+ if (!adapter->tx_ring)
+ return -ENOMEM;
+
+ adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
+ if (!adapter->rx_ring) {
+ kfree(adapter->tx_ring);
+ return -ENOMEM;
+ }
+
+ netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64);
+
+ return 0;
+}
+
+/**
+ * igbvf_request_irq - initialize interrupts
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int igbvf_request_irq(struct igbvf_adapter *adapter)
+{
+ int err = -1;
+
+ /* igbvf supports msi-x only */
+ if (adapter->msix_entries)
+ err = igbvf_request_msix(adapter);
+
+ if (!err)
+ return err;
+
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate interrupt, Error: %d\n", err);
+
+ return err;
+}
+
+static void igbvf_free_irq(struct igbvf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int vector;
+
+ if (adapter->msix_entries) {
+ for (vector = 0; vector < 3; vector++)
+ free_irq(adapter->msix_entries[vector].vector, netdev);
+ }
+}
+
+/**
+ * igbvf_irq_disable - Mask off interrupt generation on the NIC
+ **/
+static void igbvf_irq_disable(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ ew32(EIMC, ~0);
+
+ if (adapter->msix_entries)
+ ew32(EIAC, 0);
+}
+
+/**
+ * igbvf_irq_enable - Enable default interrupt generation settings
+ **/
+static void igbvf_irq_enable(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ ew32(EIAC, adapter->eims_enable_mask);
+ ew32(EIAM, adapter->eims_enable_mask);
+ ew32(EIMS, adapter->eims_enable_mask);
+}
+
+/**
+ * igbvf_poll - NAPI Rx polling callback
+ * @napi: struct associated with this polling callback
+ * @budget: amount of packets driver is allowed to process this poll
+ **/
+static int igbvf_poll(struct napi_struct *napi, int budget)
+{
+ struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi);
+ struct igbvf_adapter *adapter = rx_ring->adapter;
+ struct e1000_hw *hw = &adapter->hw;
+ int work_done = 0;
+
+ igbvf_clean_rx_irq(adapter, &work_done, budget);
+
+ /* If not enough Rx work done, exit the polling mode */
+ if (work_done < budget) {
+ napi_complete(napi);
+
+ if (adapter->itr_setting & 3)
+ igbvf_set_itr(adapter);
+
+ if (!test_bit(__IGBVF_DOWN, &adapter->state))
+ ew32(EIMS, adapter->rx_ring->eims_value);
+ }
+
+ return work_done;
+}
+
+/**
+ * igbvf_set_rlpml - set receive large packet maximum length
+ * @adapter: board private structure
+ *
+ * Configure the maximum size of packets that will be received
+ */
+static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
+{
+ int max_frame_size;
+ struct e1000_hw *hw = &adapter->hw;
+
+ max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE;
+ e1000_rlpml_set_vf(hw, max_frame_size);
+}
+
+static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (hw->mac.ops.set_vfta(hw, vid, true))
+ dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
+ else
+ set_bit(vid, adapter->active_vlans);
+}
+
+static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ igbvf_irq_disable(adapter);
+
+ if (!test_bit(__IGBVF_DOWN, &adapter->state))
+ igbvf_irq_enable(adapter);
+
+ if (hw->mac.ops.set_vfta(hw, vid, false))
+ dev_err(&adapter->pdev->dev,
+ "Failed to remove vlan id %d\n", vid);
+ else
+ clear_bit(vid, adapter->active_vlans);
+}
+
+static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
+{
+ u16 vid;
+
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ igbvf_vlan_rx_add_vid(adapter->netdev, vid);
+}
+
+/**
+ * igbvf_configure_tx - Configure Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void igbvf_configure_tx(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct igbvf_ring *tx_ring = adapter->tx_ring;
+ u64 tdba;
+ u32 txdctl, dca_txctrl;
+
+ /* disable transmits */
+ txdctl = er32(TXDCTL(0));
+ ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
+ e1e_flush();
+ msleep(10);
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc));
+ tdba = tx_ring->dma;
+ ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
+ ew32(TDBAH(0), (tdba >> 32));
+ ew32(TDH(0), 0);
+ ew32(TDT(0), 0);
+ tx_ring->head = E1000_TDH(0);
+ tx_ring->tail = E1000_TDT(0);
+
+ /* Turn off Relaxed Ordering on head write-backs. The writebacks
+ * MUST be delivered in order or it will completely screw up
+ * our bookeeping.
+ */
+ dca_txctrl = er32(DCA_TXCTRL(0));
+ dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
+ ew32(DCA_TXCTRL(0), dca_txctrl);
+
+ /* enable transmits */
+ txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+ ew32(TXDCTL(0), txdctl);
+
+ /* Setup Transmit Descriptor Settings for eop descriptor */
+ adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS;
+
+ /* enable Report Status bit */
+ adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
+}
+
+/**
+ * igbvf_setup_srrctl - configure the receive control registers
+ * @adapter: Board private structure
+ **/
+static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 srrctl = 0;
+
+ srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK |
+ E1000_SRRCTL_BSIZEHDR_MASK |
+ E1000_SRRCTL_BSIZEPKT_MASK);
+
+ /* Enable queue drop to avoid head of line blocking */
+ srrctl |= E1000_SRRCTL_DROP_EN;
+
+ /* Setup buffer sizes */
+ srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >>
+ E1000_SRRCTL_BSIZEPKT_SHIFT;
+
+ if (adapter->rx_buffer_len < 2048) {
+ adapter->rx_ps_hdr_size = 0;
+ srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ } else {
+ adapter->rx_ps_hdr_size = 128;
+ srrctl |= adapter->rx_ps_hdr_size <<
+ E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ }
+
+ ew32(SRRCTL(0), srrctl);
+}
+
+/**
+ * igbvf_configure_rx - Configure Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void igbvf_configure_rx(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct igbvf_ring *rx_ring = adapter->rx_ring;
+ u64 rdba;
+ u32 rdlen, rxdctl;
+
+ /* disable receives */
+ rxdctl = er32(RXDCTL(0));
+ ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
+ e1e_flush();
+ msleep(10);
+
+ rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
+
+ /*
+ * Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring
+ */
+ rdba = rx_ring->dma;
+ ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
+ ew32(RDBAH(0), (rdba >> 32));
+ ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc));
+ rx_ring->head = E1000_RDH(0);
+ rx_ring->tail = E1000_RDT(0);
+ ew32(RDH(0), 0);
+ ew32(RDT(0), 0);
+
+ rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
+ rxdctl &= 0xFFF00000;
+ rxdctl |= IGBVF_RX_PTHRESH;
+ rxdctl |= IGBVF_RX_HTHRESH << 8;
+ rxdctl |= IGBVF_RX_WTHRESH << 16;
+
+ igbvf_set_rlpml(adapter);
+
+ /* enable receives */
+ ew32(RXDCTL(0), rxdctl);
+}
+
+/**
+ * igbvf_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+static void igbvf_set_multi(struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u8 *mta_list = NULL;
+ int i;
+
+ if (!netdev_mc_empty(netdev)) {
+ mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
+ if (!mta_list) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate multicast filter list\n");
+ return;
+ }
+ }
+
+ /* prepare a packed array of only addresses. */
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+
+ hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
+ kfree(mta_list);
+}
+
+/**
+ * igbvf_configure - configure the hardware for Rx and Tx
+ * @adapter: private board structure
+ **/
+static void igbvf_configure(struct igbvf_adapter *adapter)
+{
+ igbvf_set_multi(adapter->netdev);
+
+ igbvf_restore_vlan(adapter);
+
+ igbvf_configure_tx(adapter);
+ igbvf_setup_srrctl(adapter);
+ igbvf_configure_rx(adapter);
+ igbvf_alloc_rx_buffers(adapter->rx_ring,
+ igbvf_desc_unused(adapter->rx_ring));
+}
+
+/* igbvf_reset - bring the hardware into a known good state
+ *
+ * This function boots the hardware and enables some settings that
+ * require a configuration cycle of the hardware - those cannot be
+ * set/changed during runtime. After reset the device needs to be
+ * properly configured for Rx, Tx etc.
+ */
+static void igbvf_reset(struct igbvf_adapter *adapter)
+{
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Allow time for pending master requests to run */
+ if (mac->ops.reset_hw(hw))
+ dev_err(&adapter->pdev->dev, "PF still resetting\n");
+
+ mac->ops.init_hw(hw);
+
+ if (is_valid_ether_addr(adapter->hw.mac.addr)) {
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr,
+ netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr,
+ netdev->addr_len);
+ }
+
+ adapter->last_reset = jiffies;
+}
+
+int igbvf_up(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* hardware has been reset, we need to reload some things */
+ igbvf_configure(adapter);
+
+ clear_bit(__IGBVF_DOWN, &adapter->state);
+
+ napi_enable(&adapter->rx_ring->napi);
+ if (adapter->msix_entries)
+ igbvf_configure_msix(adapter);
+
+ /* Clear any pending interrupts. */
+ er32(EICR);
+ igbvf_irq_enable(adapter);
+
+ /* start the watchdog */
+ hw->mac.get_link_status = 1;
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+
+
+ return 0;
+}
+
+void igbvf_down(struct igbvf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rxdctl, txdctl;
+
+ /*
+ * signal that we're down so the interrupt handler does not
+ * reschedule our watchdog timer
+ */
+ set_bit(__IGBVF_DOWN, &adapter->state);
+
+ /* disable receives in the hardware */
+ rxdctl = er32(RXDCTL(0));
+ ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
+
+ netif_stop_queue(netdev);
+
+ /* disable transmits in the hardware */
+ txdctl = er32(TXDCTL(0));
+ ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
+
+ /* flush both disables and wait for them to finish */
+ e1e_flush();
+ msleep(10);
+
+ napi_disable(&adapter->rx_ring->napi);
+
+ igbvf_irq_disable(adapter);
+
+ del_timer_sync(&adapter->watchdog_timer);
+
+ netif_carrier_off(netdev);
+
+ /* record the stats before reset*/
+ igbvf_update_stats(adapter);
+
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+
+ igbvf_reset(adapter);
+ igbvf_clean_tx_ring(adapter->tx_ring);
+ igbvf_clean_rx_ring(adapter->rx_ring);
+}
+
+void igbvf_reinit_locked(struct igbvf_adapter *adapter)
+{
+ might_sleep();
+ while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
+ msleep(1);
+ igbvf_down(adapter);
+ igbvf_up(adapter);
+ clear_bit(__IGBVF_RESETTING, &adapter->state);
+}
+
+/**
+ * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * igbvf_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ s32 rc;
+
+ adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
+ adapter->rx_ps_hdr_size = 0;
+ adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+ adapter->tx_int_delay = 8;
+ adapter->tx_abs_int_delay = 32;
+ adapter->rx_int_delay = 0;
+ adapter->rx_abs_int_delay = 8;
+ adapter->itr_setting = 3;
+ adapter->itr = 20000;
+
+ /* Set various function pointers */
+ adapter->ei->init_ops(&adapter->hw);
+
+ rc = adapter->hw.mac.ops.init_params(&adapter->hw);
+ if (rc)
+ return rc;
+
+ rc = adapter->hw.mbx.ops.init_params(&adapter->hw);
+ if (rc)
+ return rc;
+
+ igbvf_set_interrupt_capability(adapter);
+
+ if (igbvf_alloc_queues(adapter))
+ return -ENOMEM;
+
+ spin_lock_init(&adapter->tx_queue_lock);
+
+ /* Explicitly disable IRQ since the NIC can be in any state. */
+ igbvf_irq_disable(adapter);
+
+ spin_lock_init(&adapter->stats_lock);
+
+ set_bit(__IGBVF_DOWN, &adapter->state);
+ return 0;
+}
+
+static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ adapter->stats.last_gprc = er32(VFGPRC);
+ adapter->stats.last_gorc = er32(VFGORC);
+ adapter->stats.last_gptc = er32(VFGPTC);
+ adapter->stats.last_gotc = er32(VFGOTC);
+ adapter->stats.last_mprc = er32(VFMPRC);
+ adapter->stats.last_gotlbc = er32(VFGOTLBC);
+ adapter->stats.last_gptlbc = er32(VFGPTLBC);
+ adapter->stats.last_gorlbc = er32(VFGORLBC);
+ adapter->stats.last_gprlbc = er32(VFGPRLBC);
+
+ adapter->stats.base_gprc = er32(VFGPRC);
+ adapter->stats.base_gorc = er32(VFGORC);
+ adapter->stats.base_gptc = er32(VFGPTC);
+ adapter->stats.base_gotc = er32(VFGOTC);
+ adapter->stats.base_mprc = er32(VFMPRC);
+ adapter->stats.base_gotlbc = er32(VFGOTLBC);
+ adapter->stats.base_gptlbc = er32(VFGPTLBC);
+ adapter->stats.base_gorlbc = er32(VFGORLBC);
+ adapter->stats.base_gprlbc = er32(VFGPRLBC);
+}
+
+/**
+ * igbvf_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int igbvf_open(struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__IGBVF_TESTING, &adapter->state))
+ return -EBUSY;
+
+ /* allocate transmit descriptors */
+ err = igbvf_setup_tx_resources(adapter, adapter->tx_ring);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = igbvf_setup_rx_resources(adapter, adapter->rx_ring);
+ if (err)
+ goto err_setup_rx;
+
+ /*
+ * before we allocate an interrupt, we must be ready to handle it.
+ * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
+ * as soon as we call pci_request_irq, so we have to setup our
+ * clean_rx handler before we do so.
+ */
+ igbvf_configure(adapter);
+
+ err = igbvf_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ /* From here on the code is the same as igbvf_up() */
+ clear_bit(__IGBVF_DOWN, &adapter->state);
+
+ napi_enable(&adapter->rx_ring->napi);
+
+ /* clear any pending interrupts */
+ er32(EICR);
+
+ igbvf_irq_enable(adapter);
+
+ /* start the watchdog */
+ hw->mac.get_link_status = 1;
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+
+ return 0;
+
+err_req_irq:
+ igbvf_free_rx_resources(adapter->rx_ring);
+err_setup_rx:
+ igbvf_free_tx_resources(adapter->tx_ring);
+err_setup_tx:
+ igbvf_reset(adapter);
+
+ return err;
+}
+
+/**
+ * igbvf_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int igbvf_close(struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
+ igbvf_down(adapter);
+
+ igbvf_free_irq(adapter);
+
+ igbvf_free_tx_resources(adapter->tx_ring);
+ igbvf_free_rx_resources(adapter->rx_ring);
+
+ return 0;
+}
+/**
+ * igbvf_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int igbvf_set_mac(struct net_device *netdev, void *p)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+
+ hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
+
+ if (memcmp(addr->sa_data, hw->mac.addr, 6))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+ return 0;
+}
+
+#define UPDATE_VF_COUNTER(reg, name) \
+ { \
+ u32 current_counter = er32(reg); \
+ if (current_counter < adapter->stats.last_##name) \
+ adapter->stats.name += 0x100000000LL; \
+ adapter->stats.last_##name = current_counter; \
+ adapter->stats.name &= 0xFFFFFFFF00000000LL; \
+ adapter->stats.name |= current_counter; \
+ }
+
+/**
+ * igbvf_update_stats - Update the board statistics counters
+ * @adapter: board private structure
+**/
+void igbvf_update_stats(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /*
+ * Prevent stats update while adapter is being reset, link is down
+ * or if the pci connection is down.
+ */
+ if (adapter->link_speed == 0)
+ return;
+
+ if (test_bit(__IGBVF_RESETTING, &adapter->state))
+ return;
+
+ if (pci_channel_offline(pdev))
+ return;
+
+ UPDATE_VF_COUNTER(VFGPRC, gprc);
+ UPDATE_VF_COUNTER(VFGORC, gorc);
+ UPDATE_VF_COUNTER(VFGPTC, gptc);
+ UPDATE_VF_COUNTER(VFGOTC, gotc);
+ UPDATE_VF_COUNTER(VFMPRC, mprc);
+ UPDATE_VF_COUNTER(VFGOTLBC, gotlbc);
+ UPDATE_VF_COUNTER(VFGPTLBC, gptlbc);
+ UPDATE_VF_COUNTER(VFGORLBC, gorlbc);
+ UPDATE_VF_COUNTER(VFGPRLBC, gprlbc);
+
+ /* Fill out the OS statistics structure */
+ adapter->net_stats.multicast = adapter->stats.mprc;
+}
+
+static void igbvf_print_link_info(struct igbvf_adapter *adapter)
+{
+ dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s\n",
+ adapter->link_speed,
+ ((adapter->link_duplex == FULL_DUPLEX) ?
+ "Full Duplex" : "Half Duplex"));
+}
+
+static bool igbvf_has_link(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ s32 ret_val = E1000_SUCCESS;
+ bool link_active;
+
+ /* If interface is down, stay link down */
+ if (test_bit(__IGBVF_DOWN, &adapter->state))
+ return false;
+
+ ret_val = hw->mac.ops.check_for_link(hw);
+ link_active = !hw->mac.get_link_status;
+
+ /* if check for link returns error we will need to reset */
+ if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ)))
+ schedule_work(&adapter->reset_task);
+
+ return link_active;
+}
+
+/**
+ * igbvf_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void igbvf_watchdog(unsigned long data)
+{
+ struct igbvf_adapter *adapter = (struct igbvf_adapter *) data;
+
+ /* Do the rest outside of interrupt context */
+ schedule_work(&adapter->watchdog_task);
+}
+
+static void igbvf_watchdog_task(struct work_struct *work)
+{
+ struct igbvf_adapter *adapter = container_of(work,
+ struct igbvf_adapter,
+ watchdog_task);
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+ struct igbvf_ring *tx_ring = adapter->tx_ring;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 link;
+ int tx_pending = 0;
+
+ link = igbvf_has_link(adapter);
+
+ if (link) {
+ if (!netif_carrier_ok(netdev)) {
+ mac->ops.get_link_up_info(&adapter->hw,
+ &adapter->link_speed,
+ &adapter->link_duplex);
+ igbvf_print_link_info(adapter);
+
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ }
+ } else {
+ if (netif_carrier_ok(netdev)) {
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+ dev_info(&adapter->pdev->dev, "Link is Down\n");
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ }
+
+ if (netif_carrier_ok(netdev)) {
+ igbvf_update_stats(adapter);
+ } else {
+ tx_pending = (igbvf_desc_unused(tx_ring) + 1 <
+ tx_ring->count);
+ if (tx_pending) {
+ /*
+ * We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context).
+ */
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+ }
+ }
+
+ /* Cause software interrupt to ensure Rx ring is cleaned */
+ ew32(EICS, adapter->rx_ring->eims_value);
+
+ /* Reset the timer */
+ if (!test_bit(__IGBVF_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + (2 * HZ)));
+}
+
+#define IGBVF_TX_FLAGS_CSUM 0x00000001
+#define IGBVF_TX_FLAGS_VLAN 0x00000002
+#define IGBVF_TX_FLAGS_TSO 0x00000004
+#define IGBVF_TX_FLAGS_IPV4 0x00000008
+#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IGBVF_TX_FLAGS_VLAN_SHIFT 16
+
+static int igbvf_tso(struct igbvf_adapter *adapter,
+ struct igbvf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+{
+ struct e1000_adv_tx_context_desc *context_desc;
+ unsigned int i;
+ int err;
+ struct igbvf_buffer *buffer_info;
+ u32 info = 0, tu_cmd = 0;
+ u32 mss_l4len_idx, l4len;
+ *hdr_len = 0;
+
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "igbvf_tso returning an error\n");
+ return err;
+ }
+ }
+
+ l4len = tcp_hdrlen(skb);
+ *hdr_len += l4len;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ i = tx_ring->next_to_use;
+
+ buffer_info = &tx_ring->buffer_info[i];
+ context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
+ /* VLAN MACLEN IPLEN */
+ if (tx_flags & IGBVF_TX_FLAGS_VLAN)
+ info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
+ info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
+ *hdr_len += skb_network_offset(skb);
+ info |= (skb_transport_header(skb) - skb_network_header(skb));
+ *hdr_len += (skb_transport_header(skb) - skb_network_header(skb));
+ context_desc->vlan_macip_lens = cpu_to_le32(info);
+
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
+
+ if (skb->protocol == htons(ETH_P_IP))
+ tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
+ tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+
+ context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
+
+ /* MSS L4LEN IDX */
+ mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
+ mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
+
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+ context_desc->seqnum_seed = 0;
+
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+ buffer_info->dma = 0;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ return true;
+}
+
+static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
+ struct igbvf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags)
+{
+ struct e1000_adv_tx_context_desc *context_desc;
+ unsigned int i;
+ struct igbvf_buffer *buffer_info;
+ u32 info = 0, tu_cmd = 0;
+
+ if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
+ (tx_flags & IGBVF_TX_FLAGS_VLAN)) {
+ i = tx_ring->next_to_use;
+ buffer_info = &tx_ring->buffer_info[i];
+ context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
+
+ if (tx_flags & IGBVF_TX_FLAGS_VLAN)
+ info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
+
+ info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ info |= (skb_transport_header(skb) -
+ skb_network_header(skb));
+
+
+ context_desc->vlan_macip_lens = cpu_to_le32(info);
+
+ tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ default:
+ break;
+ }
+ }
+
+ context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
+ context_desc->seqnum_seed = 0;
+ context_desc->mss_l4len_idx = 0;
+
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+ buffer_info->dma = 0;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
+ }
+
+ return false;
+}
+
+static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ /* there is enough descriptors then we don't need to worry */
+ if (igbvf_desc_unused(adapter->tx_ring) >= size)
+ return 0;
+
+ netif_stop_queue(netdev);
+
+ smp_mb();
+
+ /* We need to check again just in case room has been made available */
+ if (igbvf_desc_unused(adapter->tx_ring) < size)
+ return -EBUSY;
+
+ netif_wake_queue(netdev);
+
+ ++adapter->restart_queue;
+ return 0;
+}
+
+#define IGBVF_MAX_TXD_PWR 16
+#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
+
+static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
+ struct igbvf_ring *tx_ring,
+ struct sk_buff *skb,
+ unsigned int first)
+{
+ struct igbvf_buffer *buffer_info;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned int len = skb_headlen(skb);
+ unsigned int count = 0, i;
+ unsigned int f;
+
+ i = tx_ring->next_to_use;
+
+ buffer_info = &tx_ring->buffer_info[i];
+ BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
+ buffer_info->length = len;
+ /* set time_stamp *before* dma to help avoid a possible race */
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+ buffer_info->mapped_as_page = false;
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
+ goto dma_error;
+
+
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+ struct skb_frag_struct *frag;
+
+ count++;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ len = frag->size;
+
+ buffer_info = &tx_ring->buffer_info[i];
+ BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
+ buffer_info->length = len;
+ buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
+ buffer_info->mapped_as_page = true;
+ buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
+ goto dma_error;
+ }
+
+ tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[first].next_to_watch = i;
+
+ return ++count;
+
+dma_error:
+ dev_err(&pdev->dev, "TX DMA map failed\n");
+
+ /* clear timestamp and dma mappings for failed buffer_info mapping */
+ buffer_info->dma = 0;
+ buffer_info->time_stamp = 0;
+ buffer_info->length = 0;
+ buffer_info->next_to_watch = 0;
+ buffer_info->mapped_as_page = false;
+ if (count)
+ count--;
+
+ /* clear timestamp and dma mappings for remaining portion of packet */
+ while (count--) {
+ if (i==0)
+ i += tx_ring->count;
+ i--;
+ buffer_info = &tx_ring->buffer_info[i];
+ igbvf_put_txbuf(adapter, buffer_info);
+ }
+
+ return 0;
+}
+
+static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
+ struct igbvf_ring *tx_ring,
+ int tx_flags, int count, u32 paylen,
+ u8 hdr_len)
+{
+ union e1000_adv_tx_desc *tx_desc = NULL;
+ struct igbvf_buffer *buffer_info;
+ u32 olinfo_status = 0, cmd_type_len;
+ unsigned int i;
+
+ cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
+ E1000_ADVTXD_DCMD_DEXT);
+
+ if (tx_flags & IGBVF_TX_FLAGS_VLAN)
+ cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
+
+ if (tx_flags & IGBVF_TX_FLAGS_TSO) {
+ cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
+
+ /* insert tcp checksum */
+ olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+
+ /* insert ip checksum */
+ if (tx_flags & IGBVF_TX_FLAGS_IPV4)
+ olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
+
+ } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) {
+ olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+ }
+
+ olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
+
+ i = tx_ring->next_to_use;
+ while (count--) {
+ buffer_info = &tx_ring->buffer_info[i];
+ tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
+ tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
+ tx_desc->read.cmd_type_len =
+ cpu_to_le32(cmd_type_len | buffer_info->length);
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+
+ tx_ring->next_to_use = i;
+ writel(i, adapter->hw.hw_addr + tx_ring->tail);
+ /* we need this if more than one processor can write to our tail
+ * at a time, it syncronizes IO on IA64/Altix systems */
+ mmiowb();
+}
+
+static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
+ struct net_device *netdev,
+ struct igbvf_ring *tx_ring)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ unsigned int first, tx_flags = 0;
+ u8 hdr_len = 0;
+ int count = 0;
+ int tso = 0;
+
+ if (test_bit(__IGBVF_DOWN, &adapter->state)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (skb->len <= 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /*
+ * need: count + 4 desc gap to keep tail from touching
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for skb->data,
+ * + 1 desc for context descriptor,
+ * head, otherwise try next time
+ */
+ if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) {
+ /* this is a hard error */
+ return NETDEV_TX_BUSY;
+ }
+
+ if (vlan_tx_tag_present(skb)) {
+ tx_flags |= IGBVF_TX_FLAGS_VLAN;
+ tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT);
+ }
+
+ if (skb->protocol == htons(ETH_P_IP))
+ tx_flags |= IGBVF_TX_FLAGS_IPV4;
+
+ first = tx_ring->next_to_use;
+
+ tso = skb_is_gso(skb) ?
+ igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0;
+ if (unlikely(tso < 0)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (tso)
+ tx_flags |= IGBVF_TX_FLAGS_TSO;
+ else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+ (skb->ip_summed == CHECKSUM_PARTIAL))
+ tx_flags |= IGBVF_TX_FLAGS_CSUM;
+
+ /*
+ * count reflects descriptors mapped, if 0 then mapping error
+ * has occurred and we need to rewind the descriptor queue
+ */
+ count = igbvf_tx_map_adv(adapter, tx_ring, skb, first);
+
+ if (count) {
+ igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
+ skb->len, hdr_len);
+ /* Make sure there is space in the ring for the next send. */
+ igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
+ } else {
+ dev_kfree_skb_any(skb);
+ tx_ring->buffer_info[first].time_stamp = 0;
+ tx_ring->next_to_use = first;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct igbvf_ring *tx_ring;
+
+ if (test_bit(__IGBVF_DOWN, &adapter->state)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ tx_ring = &adapter->tx_ring[0];
+
+ return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring);
+}
+
+/**
+ * igbvf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void igbvf_tx_timeout(struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+}
+
+static void igbvf_reset_task(struct work_struct *work)
+{
+ struct igbvf_adapter *adapter;
+ adapter = container_of(work, struct igbvf_adapter, reset_task);
+
+ igbvf_reinit_locked(adapter);
+}
+
+/**
+ * igbvf_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats *igbvf_get_stats(struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ /* only return the current stats */
+ return &adapter->net_stats;
+}
+
+/**
+ * igbvf_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+ dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
+ return -EINVAL;
+ }
+
+#define MAX_STD_JUMBO_FRAME_SIZE 9234
+ if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
+ dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
+ return -EINVAL;
+ }
+
+ while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
+ msleep(1);
+ /* igbvf_down has a dependency on max_frame_size */
+ adapter->max_frame_size = max_frame;
+ if (netif_running(netdev))
+ igbvf_down(adapter);
+
+ /*
+ * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ * means we reserve 2 more, this pushes us to allocate from the next
+ * larger slab size.
+ * i.e. RXBUFFER_2048 --> size-4096 slab
+ * However with the new *_jumbo_rx* routines, jumbo receives will use
+ * fragmented skbs
+ */
+
+ if (max_frame <= 1024)
+ adapter->rx_buffer_len = 1024;
+ else if (max_frame <= 2048)
+ adapter->rx_buffer_len = 2048;
+ else
+#if (PAGE_SIZE / 2) > 16384
+ adapter->rx_buffer_len = 16384;
+#else
+ adapter->rx_buffer_len = PAGE_SIZE / 2;
+#endif
+
+
+ /* adjust allocation if LPE protects us, and we aren't using SBP */
+ if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
+ (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
+ adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
+ ETH_FCS_LEN;
+
+ dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+
+ if (netif_running(netdev))
+ igbvf_up(adapter);
+ else
+ igbvf_reset(adapter);
+
+ clear_bit(__IGBVF_RESETTING, &adapter->state);
+
+ return 0;
+}
+
+static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+#ifdef CONFIG_PM
+ int retval = 0;
+#endif
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
+ igbvf_down(adapter);
+ igbvf_free_irq(adapter);
+ }
+
+#ifdef CONFIG_PM
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+#endif
+
+ pci_disable_device(pdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int igbvf_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ u32 err;
+
+ pci_restore_state(pdev);
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ if (netif_running(netdev)) {
+ err = igbvf_request_irq(adapter);
+ if (err)
+ return err;
+ }
+
+ igbvf_reset(adapter);
+
+ if (netif_running(netdev))
+ igbvf_up(adapter);
+
+ netif_device_attach(netdev);
+
+ return 0;
+}
+#endif
+
+static void igbvf_shutdown(struct pci_dev *pdev)
+{
+ igbvf_suspend(pdev, PMSG_SUSPEND);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void igbvf_netpoll(struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ disable_irq(adapter->pdev->irq);
+
+ igbvf_clean_tx_irq(adapter->tx_ring);
+
+ enable_irq(adapter->pdev->irq);
+}
+#endif
+
+/**
+ * igbvf_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ igbvf_down(adapter);
+ pci_disable_device(pdev);
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * igbvf_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the igbvf_resume routine.
+ */
+static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+
+ igbvf_reset(adapter);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * igbvf_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the igbvf_resume routine.
+ */
+static void igbvf_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev)) {
+ if (igbvf_up(adapter)) {
+ dev_err(&pdev->dev,
+ "can't bring device back up after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+}
+
+static void igbvf_print_device_info(struct igbvf_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
+ dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
+ dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
+}
+
+static const struct net_device_ops igbvf_netdev_ops = {
+ .ndo_open = igbvf_open,
+ .ndo_stop = igbvf_close,
+ .ndo_start_xmit = igbvf_xmit_frame,
+ .ndo_get_stats = igbvf_get_stats,
+ .ndo_set_rx_mode = igbvf_set_multi,
+ .ndo_set_mac_address = igbvf_set_mac,
+ .ndo_change_mtu = igbvf_change_mtu,
+ .ndo_do_ioctl = igbvf_ioctl,
+ .ndo_tx_timeout = igbvf_tx_timeout,
+ .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = igbvf_netpoll,
+#endif
+};
+
+/**
+ * igbvf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in igbvf_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * igbvf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit igbvf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct igbvf_adapter *adapter;
+ struct e1000_hw *hw;
+ const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
+
+ static int cards_found;
+ int err, pci_using_dac;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ pci_using_dac = 0;
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err)
+ pci_using_dac = 1;
+ } else {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA "
+ "configuration, aborting\n");
+ goto err_dma;
+ }
+ }
+ }
+
+ err = pci_request_regions(pdev, igbvf_driver_name);
+ if (err)
+ goto err_pci_reg;
+
+ pci_set_master(pdev);
+
+ err = -ENOMEM;
+ netdev = alloc_etherdev(sizeof(struct igbvf_adapter));
+ if (!netdev)
+ goto err_alloc_etherdev;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ hw = &adapter->hw;
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->ei = ei;
+ adapter->pba = ei->pba;
+ adapter->flags = ei->flags;
+ adapter->hw.back = adapter;
+ adapter->hw.mac.type = ei->mac;
+ adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
+
+ /* PCI config space info */
+
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+ hw->revision_id = pdev->revision;
+
+ err = -EIO;
+ adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+
+ if (!adapter->hw.hw_addr)
+ goto err_ioremap;
+
+ if (ei->get_variants) {
+ err = ei->get_variants(adapter);
+ if (err)
+ goto err_ioremap;
+ }
+
+ /* setup adapter struct */
+ err = igbvf_sw_init(adapter);
+ if (err)
+ goto err_sw_init;
+
+ /* construct the net_device struct */
+ netdev->netdev_ops = &igbvf_netdev_ops;
+
+ igbvf_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+ strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+ adapter->bd_number = cards_found++;
+
+ netdev->features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
+ netdev->features |= NETIF_F_IPV6_CSUM;
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_IP_CSUM;
+ netdev->vlan_features |= NETIF_F_IPV6_CSUM;
+ netdev->vlan_features |= NETIF_F_SG;
+
+ /*reset the controller to put the device in a known good state */
+ err = hw->mac.ops.reset_hw(hw);
+ if (err) {
+ dev_info(&pdev->dev,
+ "PF still in reset state, assigning new address."
+ " Is the PF interface up?\n");
+ dev_hw_addr_random(adapter->netdev, hw->mac.addr);
+ } else {
+ err = hw->mac.ops.read_mac_addr(hw);
+ if (err) {
+ dev_err(&pdev->dev, "Error reading MAC address\n");
+ goto err_hw_init;
+ }
+ }
+
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->perm_addr)) {
+ dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
+ netdev->dev_addr);
+ err = -EIO;
+ goto err_hw_init;
+ }
+
+ setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
+ (unsigned long) adapter);
+
+ INIT_WORK(&adapter->reset_task, igbvf_reset_task);
+ INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task);
+
+ /* ring size defaults */
+ adapter->rx_ring->count = 1024;
+ adapter->tx_ring->count = 1024;
+
+ /* reset the hardware with the new settings */
+ igbvf_reset(adapter);
+
+ strcpy(netdev->name, "eth%d");
+ err = register_netdev(netdev);
+ if (err)
+ goto err_hw_init;
+
+ /* tell the stack to leave us alone until igbvf_open() is called */
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ igbvf_print_device_info(adapter);
+
+ igbvf_initialize_last_counter_stats(adapter);
+
+ return 0;
+
+err_hw_init:
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+err_sw_init:
+ igbvf_reset_interrupt_capability(adapter);
+ iounmap(adapter->hw.hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * igbvf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * igbvf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void __devexit igbvf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ /*
+ * The watchdog timer may be rescheduled, so explicitly
+ * disable it from being rescheduled.
+ */
+ set_bit(__IGBVF_DOWN, &adapter->state);
+ del_timer_sync(&adapter->watchdog_timer);
+
+ cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->watchdog_task);
+
+ unregister_netdev(netdev);
+
+ igbvf_reset_interrupt_capability(adapter);
+
+ /*
+ * it is important to delete the napi struct prior to freeing the
+ * rx ring so that you do not end up with null pointer refs
+ */
+ netif_napi_del(&adapter->rx_ring->napi);
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+
+ iounmap(hw->hw_addr);
+ if (hw->flash_address)
+ iounmap(hw->flash_address);
+ pci_release_regions(pdev);
+
+ free_netdev(netdev);
+
+ pci_disable_device(pdev);
+}
+
+/* PCI Error Recovery (ERS) */
+static struct pci_error_handlers igbvf_err_handler = {
+ .error_detected = igbvf_io_error_detected,
+ .slot_reset = igbvf_io_slot_reset,
+ .resume = igbvf_io_resume,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = {
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf },
+ { } /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
+
+/* PCI Device API Driver */
+static struct pci_driver igbvf_driver = {
+ .name = igbvf_driver_name,
+ .id_table = igbvf_pci_tbl,
+ .probe = igbvf_probe,
+ .remove = __devexit_p(igbvf_remove),
+#ifdef CONFIG_PM
+ /* Power Management Hooks */
+ .suspend = igbvf_suspend,
+ .resume = igbvf_resume,
+#endif
+ .shutdown = igbvf_shutdown,
+ .err_handler = &igbvf_err_handler
+};
+
+/**
+ * igbvf_init_module - Driver Registration Routine
+ *
+ * igbvf_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init igbvf_init_module(void)
+{
+ int ret;
+ printk(KERN_INFO "%s - version %s\n",
+ igbvf_driver_string, igbvf_driver_version);
+ printk(KERN_INFO "%s\n", igbvf_copyright);
+
+ ret = pci_register_driver(&igbvf_driver);
+
+ return ret;
+}
+module_init(igbvf_init_module);
+
+/**
+ * igbvf_exit_module - Driver Exit Cleanup Routine
+ *
+ * igbvf_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit igbvf_exit_module(void)
+{
+ pci_unregister_driver(&igbvf_driver);
+}
+module_exit(igbvf_exit_module);
+
+
+MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
+MODULE_DESCRIPTION("Intel(R) 82576 Virtual Function Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/* netdev.c */
diff --git a/drivers/net/ethernet/intel/igbvf/regs.h b/drivers/net/ethernet/intel/igbvf/regs.h
new file mode 100644
index 000000000000..77e18d3d6b15
--- /dev/null
+++ b/drivers/net/ethernet/intel/igbvf/regs.h
@@ -0,0 +1,108 @@
+/*******************************************************************************
+
+ Intel(R) 82576 Virtual Function Linux driver
+ Copyright(c) 2009 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_REGS_H_
+#define _E1000_REGS_H_
+
+#define E1000_CTRL 0x00000 /* Device Control - RW */
+#define E1000_STATUS 0x00008 /* Device Status - RO */
+#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
+#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
+#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
+#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
+#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
+#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
+#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
+#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
+#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
+/*
+ * Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ */
+#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
+ (0x0C000 + ((_n) * 0x40)))
+#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
+ (0x0C004 + ((_n) * 0x40)))
+#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
+ (0x0C008 + ((_n) * 0x40)))
+#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
+ (0x0C00C + ((_n) * 0x40)))
+#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
+ (0x0C010 + ((_n) * 0x40)))
+#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
+ (0x0C018 + ((_n) * 0x40)))
+#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
+ (0x0C028 + ((_n) * 0x40)))
+#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
+ (0x0E000 + ((_n) * 0x40)))
+#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
+ (0x0E004 + ((_n) * 0x40)))
+#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
+ (0x0E008 + ((_n) * 0x40)))
+#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
+ (0x0E010 + ((_n) * 0x40)))
+#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
+ (0x0E018 + ((_n) * 0x40)))
+#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
+ (0x0E028 + ((_n) * 0x40)))
+#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
+#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
+#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+ (0x054E0 + ((_i - 16) * 8)))
+#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+ (0x054E4 + ((_i - 16) * 8)))
+
+/* Statistics registers */
+#define E1000_VFGPRC 0x00F10
+#define E1000_VFGORC 0x00F18
+#define E1000_VFMPRC 0x00F3C
+#define E1000_VFGPTC 0x00F14
+#define E1000_VFGOTC 0x00F34
+#define E1000_VFGOTLBC 0x00F50
+#define E1000_VFGPTLBC 0x00F44
+#define E1000_VFGORLBC 0x00F48
+#define E1000_VFGPRLBC 0x00F40
+
+/* These act per VF so an array friendly macro is used */
+#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n)))
+#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
+
+/* Define macros for handling registers */
+#define er32(reg) readl(hw->hw_addr + E1000_##reg)
+#define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg)
+#define array_er32(reg, offset) \
+ readl(hw->hw_addr + E1000_##reg + (offset << 2))
+#define array_ew32(reg, offset, val) \
+ writel((val), hw->hw_addr + E1000_##reg + (offset << 2))
+#define e1e_flush() er32(STATUS)
+
+#endif
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
new file mode 100644
index 000000000000..af3822f9ea9a
--- /dev/null
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -0,0 +1,402 @@
+/*******************************************************************************
+
+ Intel(R) 82576 Virtual Function Linux driver
+ Copyright(c) 2009 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+#include "vf.h"
+
+static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
+static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex);
+static s32 e1000_init_hw_vf(struct e1000_hw *hw);
+static s32 e1000_reset_hw_vf(struct e1000_hw *hw);
+
+static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *,
+ u32, u32, u32);
+static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32);
+static s32 e1000_read_mac_addr_vf(struct e1000_hw *);
+static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool);
+
+/**
+ * e1000_init_mac_params_vf - Inits MAC params
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_mac_params_vf(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ /* VF's have no MTA Registers - PF feature only */
+ mac->mta_reg_count = 128;
+ /* VF's have no access to RAR entries */
+ mac->rar_entry_count = 1;
+
+ /* Function pointers */
+ /* reset */
+ mac->ops.reset_hw = e1000_reset_hw_vf;
+ /* hw initialization */
+ mac->ops.init_hw = e1000_init_hw_vf;
+ /* check for link */
+ mac->ops.check_for_link = e1000_check_for_link_vf;
+ /* link info */
+ mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
+ /* set mac address */
+ mac->ops.rar_set = e1000_rar_set_vf;
+ /* read mac address */
+ mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
+ /* set vlan filter table array */
+ mac->ops.set_vfta = e1000_set_vfta_vf;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_function_pointers_vf - Inits function pointers
+ * @hw: pointer to the HW structure
+ **/
+void e1000_init_function_pointers_vf(struct e1000_hw *hw)
+{
+ hw->mac.ops.init_params = e1000_init_mac_params_vf;
+ hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
+}
+
+/**
+ * e1000_get_link_up_info_vf - Gets link info.
+ * @hw: pointer to the HW structure
+ * @speed: pointer to 16 bit value to store link speed.
+ * @duplex: pointer to 16 bit value to store duplex.
+ *
+ * Since we cannot read the PHY and get accurate link info, we must rely upon
+ * the status register's data which is often stale and inaccurate.
+ **/
+static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 status;
+
+ status = er32(STATUS);
+ if (status & E1000_STATUS_SPEED_1000)
+ *speed = SPEED_1000;
+ else if (status & E1000_STATUS_SPEED_100)
+ *speed = SPEED_100;
+ else
+ *speed = SPEED_10;
+
+ if (status & E1000_STATUS_FD)
+ *duplex = FULL_DUPLEX;
+ else
+ *duplex = HALF_DUPLEX;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_reset_hw_vf - Resets the HW
+ * @hw: pointer to the HW structure
+ *
+ * VF's provide a function level reset. This is done using bit 26 of ctrl_reg.
+ * This is all the reset we can perform on a VF.
+ **/
+static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ u32 timeout = E1000_VF_INIT_TIMEOUT;
+ u32 ret_val = -E1000_ERR_MAC_INIT;
+ u32 msgbuf[3];
+ u8 *addr = (u8 *)(&msgbuf[1]);
+ u32 ctrl;
+
+ /* assert vf queue/interrupt reset */
+ ctrl = er32(CTRL);
+ ew32(CTRL, ctrl | E1000_CTRL_RST);
+
+ /* we cannot initialize while the RSTI / RSTD bits are asserted */
+ while (!mbx->ops.check_for_rst(hw) && timeout) {
+ timeout--;
+ udelay(5);
+ }
+
+ if (timeout) {
+ /* mailbox timeout can now become active */
+ mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT;
+
+ /* notify pf of vf reset completion */
+ msgbuf[0] = E1000_VF_RESET;
+ mbx->ops.write_posted(hw, msgbuf, 1);
+
+ msleep(10);
+
+ /* set our "perm_addr" based on info provided by PF */
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
+ if (!ret_val) {
+ if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK))
+ memcpy(hw->mac.perm_addr, addr, 6);
+ else
+ ret_val = -E1000_ERR_MAC_INIT;
+ }
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_hw_vf - Inits the HW
+ * @hw: pointer to the HW structure
+ *
+ * Not much to do here except clear the PF Reset indication if there is one.
+ **/
+static s32 e1000_init_hw_vf(struct e1000_hw *hw)
+{
+ /* attempt to set and restore our mac address */
+ e1000_rar_set_vf(hw, hw->mac.addr, 0);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_hash_mc_addr_vf - Generate a multicast hash value
+ * @hw: pointer to the HW structure
+ * @mc_addr: pointer to a multicast address
+ *
+ * Generates a multicast address hash value which is used to determine
+ * the multicast filter table array address and new table value. See
+ * e1000_mta_set_generic()
+ **/
+static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
+{
+ u32 hash_value, hash_mask;
+ u8 bit_shift = 0;
+
+ /* Register count multiplied by bits per register */
+ hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+ /*
+ * The bit_shift is the number of left-shifts
+ * where 0xFF would still fall within the hash mask.
+ */
+ while (hash_mask >> bit_shift != 0xFF)
+ bit_shift++;
+
+ hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+ (((u16) mc_addr[5]) << bit_shift)));
+
+ return hash_value;
+}
+
+/**
+ * e1000_update_mc_addr_list_vf - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ * @rar_used_count: the first RAR register free to program
+ * @rar_count: total number of supported Receive Address Registers
+ *
+ * Updates the Receive Address Registers and Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ * The parameter rar_count will usually be hw->mac.rar_entry_count
+ * unless there are workarounds that change this.
+ **/
+static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count,
+ u32 rar_used_count, u32 rar_count)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[E1000_VFMAILBOX_SIZE];
+ u16 *hash_list = (u16 *)&msgbuf[1];
+ u32 hash_value;
+ u32 cnt, i;
+
+ /* Each entry in the list uses 1 16 bit word. We have 30
+ * 16 bit words available in our HW msg buffer (minus 1 for the
+ * msg type). That's 30 hash values if we pack 'em right. If
+ * there are more than 30 MC addresses to add then punt the
+ * extras for now and then add code to handle more than 30 later.
+ * It would be unusual for a server to request that many multi-cast
+ * addresses except for in large enterprise network environments.
+ */
+
+ cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
+ msgbuf[0] = E1000_VF_SET_MULTICAST;
+ msgbuf[0] |= cnt << E1000_VT_MSGINFO_SHIFT;
+
+ for (i = 0; i < cnt; i++) {
+ hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list);
+ hash_list[i] = hash_value & 0x0FFFF;
+ mc_addr_list += ETH_ADDR_LEN;
+ }
+
+ mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE);
+}
+
+/**
+ * e1000_set_vfta_vf - Set/Unset vlan filter table address
+ * @hw: pointer to the HW structure
+ * @vid: determines the vfta register and bit to set/unset
+ * @set: if true then set bit, else clear bit
+ **/
+static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[2];
+ s32 err;
+
+ msgbuf[0] = E1000_VF_SET_VLAN;
+ msgbuf[1] = vid;
+ /* Setting the 8 bit field MSG INFO to true indicates "add" */
+ if (set)
+ msgbuf[0] |= 1 << E1000_VT_MSGINFO_SHIFT;
+
+ mbx->ops.write_posted(hw, msgbuf, 2);
+
+ err = mbx->ops.read_posted(hw, msgbuf, 2);
+
+ msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
+
+ /* if nacked the vlan was rejected */
+ if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK)))
+ err = -E1000_ERR_MAC_INIT;
+
+ return err;
+}
+
+/** e1000_rlpml_set_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ **/
+void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[2];
+
+ msgbuf[0] = E1000_VF_SET_LPE;
+ msgbuf[1] = max_size;
+
+ mbx->ops.write_posted(hw, msgbuf, 2);
+}
+
+/**
+ * e1000_rar_set_vf - set device MAC address
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index receive address array register
+ **/
+static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[3];
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
+
+ memset(msgbuf, 0, 12);
+ msgbuf[0] = E1000_VF_SET_MAC_ADDR;
+ memcpy(msg_addr, addr, 6);
+ ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
+
+ if (!ret_val)
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
+
+ msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
+
+ /* if nacked the address was rejected, use "perm_addr" */
+ if (!ret_val &&
+ (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK)))
+ e1000_read_mac_addr_vf(hw);
+}
+
+/**
+ * e1000_read_mac_addr_vf - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < ETH_ADDR_LEN; i++)
+ hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_for_link_vf - Check for link for a virtual interface
+ * @hw: pointer to the HW structure
+ *
+ * Checks to see if the underlying PF is still talking to the VF and
+ * if it is then it reports the link state to the hardware, otherwise
+ * it reports link down and returns an error.
+ **/
+static s32 e1000_check_for_link_vf(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val = E1000_SUCCESS;
+ u32 in_msg = 0;
+
+ /*
+ * We only want to run this if there has been a rst asserted.
+ * in this case that could mean a link change, device reset,
+ * or a virtual function reset
+ */
+
+ /* If we were hit with a reset or timeout drop the link */
+ if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
+ mac->get_link_status = true;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ if (!(er32(STATUS) & E1000_STATUS_LU))
+ goto out;
+
+ /* if the read failed it could just be a mailbox collision, best wait
+ * until we are called again and don't report an error */
+ if (mbx->ops.read(hw, &in_msg, 1))
+ goto out;
+
+ /* if incoming message isn't clear to send we are waiting on response */
+ if (!(in_msg & E1000_VT_MSGTYPE_CTS)) {
+ /* message is not CTS and is NACK we must have lost CTS status */
+ if (in_msg & E1000_VT_MSGTYPE_NACK)
+ ret_val = -E1000_ERR_MAC_INIT;
+ goto out;
+ }
+
+ /* the pf is talking, if we timed out in the past we reinit */
+ if (!mbx->timeout) {
+ ret_val = -E1000_ERR_MAC_INIT;
+ goto out;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link */
+ mac->get_link_status = false;
+
+out:
+ return ret_val;
+}
+
diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
new file mode 100644
index 000000000000..d7ed58fcd9bb
--- /dev/null
+++ b/drivers/net/ethernet/intel/igbvf/vf.h
@@ -0,0 +1,266 @@
+/*******************************************************************************
+
+ Intel(R) 82576 Virtual Function Linux driver
+ Copyright(c) 2009 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_VF_H_
+#define _E1000_VF_H_
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+
+#include "regs.h"
+#include "defines.h"
+
+struct e1000_hw;
+
+#define E1000_DEV_ID_82576_VF 0x10CA
+#define E1000_DEV_ID_I350_VF 0x1520
+#define E1000_REVISION_0 0
+#define E1000_REVISION_1 1
+#define E1000_REVISION_2 2
+#define E1000_REVISION_3 3
+#define E1000_REVISION_4 4
+
+#define E1000_FUNC_0 0
+#define E1000_FUNC_1 1
+
+/*
+ * Receive Address Register Count
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * These entries are also used for MAC-based filtering.
+ */
+#define E1000_RAR_ENTRIES_VF 1
+
+/* Receive Descriptor - Advanced */
+union e1000_adv_rx_desc {
+ struct {
+ u64 pkt_addr; /* Packet buffer address */
+ u64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ u32 data;
+ struct {
+ u16 pkt_info; /* RSS/Packet type */
+ u16 hdr_info; /* Split Header,
+ * hdr buffer length */
+ } hs_rss;
+ } lo_dword;
+ union {
+ u32 rss; /* RSS Hash */
+ struct {
+ u16 ip_id; /* IP id */
+ u16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ u32 status_error; /* ext status/error */
+ u16 length; /* Packet length */
+ u16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
+#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
+
+/* Transmit Descriptor - Advanced */
+union e1000_adv_tx_desc {
+ struct {
+ u64 buffer_addr; /* Address of descriptor's data buf */
+ u32 cmd_type_len;
+ u32 olinfo_status;
+ } read;
+ struct {
+ u64 rsvd; /* Reserved */
+ u32 nxtseq_seed;
+ u32 status;
+ } wb;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
+#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
+#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
+#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
+#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
+#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+
+/* Context descriptors */
+struct e1000_adv_tx_context_desc {
+ u32 vlan_macip_lens;
+ u32 seqnum_seed;
+ u32 type_tucmd_mlhl;
+ u32 mss_l4len_idx;
+};
+
+#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+
+enum e1000_mac_type {
+ e1000_undefined = 0,
+ e1000_vfadapt,
+ e1000_vfadapt_i350,
+ e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
+};
+
+struct e1000_vf_stats {
+ u64 base_gprc;
+ u64 base_gptc;
+ u64 base_gorc;
+ u64 base_gotc;
+ u64 base_mprc;
+ u64 base_gotlbc;
+ u64 base_gptlbc;
+ u64 base_gorlbc;
+ u64 base_gprlbc;
+
+ u32 last_gprc;
+ u32 last_gptc;
+ u32 last_gorc;
+ u32 last_gotc;
+ u32 last_mprc;
+ u32 last_gotlbc;
+ u32 last_gptlbc;
+ u32 last_gorlbc;
+ u32 last_gprlbc;
+
+ u64 gprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 mprc;
+ u64 gotlbc;
+ u64 gptlbc;
+ u64 gorlbc;
+ u64 gprlbc;
+};
+
+#include "mbx.h"
+
+struct e1000_mac_operations {
+ /* Function pointers for the MAC. */
+ s32 (*init_params)(struct e1000_hw *);
+ s32 (*check_for_link)(struct e1000_hw *);
+ void (*clear_vfta)(struct e1000_hw *);
+ s32 (*get_bus_info)(struct e1000_hw *);
+ s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
+ void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32);
+ s32 (*reset_hw)(struct e1000_hw *);
+ s32 (*init_hw)(struct e1000_hw *);
+ s32 (*setup_link)(struct e1000_hw *);
+ void (*write_vfta)(struct e1000_hw *, u32, u32);
+ void (*mta_set)(struct e1000_hw *, u32);
+ void (*rar_set)(struct e1000_hw *, u8*, u32);
+ s32 (*read_mac_addr)(struct e1000_hw *);
+ s32 (*set_vfta)(struct e1000_hw *, u16, bool);
+};
+
+struct e1000_mac_info {
+ struct e1000_mac_operations ops;
+ u8 addr[6];
+ u8 perm_addr[6];
+
+ enum e1000_mac_type type;
+
+ u16 mta_reg_count;
+ u16 rar_entry_count;
+
+ bool get_link_status;
+};
+
+struct e1000_mbx_operations {
+ s32 (*init_params)(struct e1000_hw *hw);
+ s32 (*read)(struct e1000_hw *, u32 *, u16);
+ s32 (*write)(struct e1000_hw *, u32 *, u16);
+ s32 (*read_posted)(struct e1000_hw *, u32 *, u16);
+ s32 (*write_posted)(struct e1000_hw *, u32 *, u16);
+ s32 (*check_for_msg)(struct e1000_hw *);
+ s32 (*check_for_ack)(struct e1000_hw *);
+ s32 (*check_for_rst)(struct e1000_hw *);
+};
+
+struct e1000_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct e1000_mbx_info {
+ struct e1000_mbx_operations ops;
+ struct e1000_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+ u16 size;
+};
+
+struct e1000_dev_spec_vf {
+ u32 vf_number;
+ u32 v2p_mailbox;
+};
+
+struct e1000_hw {
+ void *back;
+
+ u8 __iomem *hw_addr;
+ u8 __iomem *flash_address;
+ unsigned long io_base;
+
+ struct e1000_mac_info mac;
+ struct e1000_mbx_info mbx;
+
+ union {
+ struct e1000_dev_spec_vf vf;
+ } dev_spec;
+
+ u16 device_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_device_id;
+ u16 vendor_id;
+
+ u8 revision_id;
+};
+
+/* These functions must be implemented by drivers */
+void e1000_rlpml_set_vf(struct e1000_hw *, u16);
+void e1000_init_function_pointers_vf(struct e1000_hw *hw);
+
+
+#endif /* _E1000_VF_H_ */
diff --git a/drivers/net/ethernet/intel/ixgb/Makefile b/drivers/net/ethernet/intel/ixgb/Makefile
new file mode 100644
index 000000000000..0b20c5e62ffe
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgb/Makefile
@@ -0,0 +1,35 @@
+################################################################################
+#
+# Intel PRO/10GbE Linux driver
+# Copyright(c) 1999 - 2008 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# Linux NICS <linux.nics@intel.com>
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) PRO/10GbE ethernet driver
+#
+
+obj-$(CONFIG_IXGB) += ixgb.o
+
+ixgb-objs := ixgb_main.o ixgb_hw.o ixgb_ee.o ixgb_ethtool.o ixgb_param.o
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
new file mode 100644
index 000000000000..cb23448fe2fa
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgb/ixgb.h
@@ -0,0 +1,219 @@
+/*******************************************************************************
+
+ Intel PRO/10GbE Linux driver
+ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGB_H_
+#define _IXGB_H_
+
+#include <linux/stddef.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/capability.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <net/pkt_sched.h>
+#include <linux/list.h>
+#include <linux/reboot.h>
+#include <net/checksum.h>
+
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+
+#define BAR_0 0
+#define BAR_1 1
+#define BAR_5 5
+
+struct ixgb_adapter;
+#include "ixgb_hw.h"
+#include "ixgb_ee.h"
+#include "ixgb_ids.h"
+
+#define PFX "ixgb: "
+
+#ifdef _DEBUG_DRIVER_
+#define IXGB_DBG(fmt, args...) printk(KERN_DEBUG PFX fmt, ##args)
+#else
+#define IXGB_DBG(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG PFX fmt, ##args); \
+} while (0)
+#endif
+
+/* TX/RX descriptor defines */
+#define DEFAULT_TXD 256
+#define MAX_TXD 4096
+#define MIN_TXD 64
+
+/* hardware cannot reliably support more than 512 descriptors owned by
+ * hardware descriptor cache otherwise an unreliable ring under heavy
+ * receive load may result */
+#define DEFAULT_RXD 512
+#define MAX_RXD 512
+#define MIN_RXD 64
+
+/* Supported Rx Buffer Sizes */
+#define IXGB_RXBUFFER_2048 2048
+#define IXGB_RXBUFFER_4096 4096
+#define IXGB_RXBUFFER_8192 8192
+#define IXGB_RXBUFFER_16384 16384
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IXGB_RX_BUFFER_WRITE 8 /* Must be power of 2 */
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct ixgb_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ unsigned long time_stamp;
+ u16 length;
+ u16 next_to_watch;
+ u16 mapped_as_page;
+};
+
+struct ixgb_desc_ring {
+ /* pointer to the descriptor ring memory */
+ void *desc;
+ /* physical address of the descriptor ring */
+ dma_addr_t dma;
+ /* length of descriptor ring in bytes */
+ unsigned int size;
+ /* number of descriptors in the ring */
+ unsigned int count;
+ /* next descriptor to associate a buffer with */
+ unsigned int next_to_use;
+ /* next descriptor to check for DD status bit */
+ unsigned int next_to_clean;
+ /* array of buffer information structs */
+ struct ixgb_buffer *buffer_info;
+};
+
+#define IXGB_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define IXGB_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
+#define IXGB_RX_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_rx_desc)
+#define IXGB_TX_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_tx_desc)
+#define IXGB_CONTEXT_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_context_desc)
+
+/* board specific private data structure */
+
+struct ixgb_adapter {
+ struct timer_list watchdog_timer;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u32 bd_number;
+ u32 rx_buffer_len;
+ u32 part_num;
+ u16 link_speed;
+ u16 link_duplex;
+ struct work_struct tx_timeout_task;
+
+ /* TX */
+ struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp;
+ unsigned int restart_queue;
+ unsigned long timeo_start;
+ u32 tx_cmd_type;
+ u64 hw_csum_tx_good;
+ u64 hw_csum_tx_error;
+ u32 tx_int_delay;
+ u32 tx_timeout_count;
+ bool tx_int_delay_enable;
+ bool detect_tx_hung;
+
+ /* RX */
+ struct ixgb_desc_ring rx_ring;
+ u64 hw_csum_rx_error;
+ u64 hw_csum_rx_good;
+ u32 rx_int_delay;
+ bool rx_csum;
+
+ /* OS defined structs */
+ struct napi_struct napi;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
+ /* structs defined in ixgb_hw.h */
+ struct ixgb_hw hw;
+ u16 msg_enable;
+ struct ixgb_hw_stats stats;
+ u32 alloc_rx_buff_failed;
+ bool have_msi;
+ unsigned long flags;
+};
+
+enum ixgb_state_t {
+ /* TBD
+ __IXGB_TESTING,
+ __IXGB_RESETTING,
+ */
+ __IXGB_DOWN
+};
+
+/* Exported from other modules */
+extern void ixgb_check_options(struct ixgb_adapter *adapter);
+extern void ixgb_set_ethtool_ops(struct net_device *netdev);
+extern char ixgb_driver_name[];
+extern const char ixgb_driver_version[];
+
+extern void ixgb_set_speed_duplex(struct net_device *netdev);
+
+extern int ixgb_up(struct ixgb_adapter *adapter);
+extern void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
+extern void ixgb_reset(struct ixgb_adapter *adapter);
+extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
+extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
+extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
+extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
+extern void ixgb_update_stats(struct ixgb_adapter *adapter);
+
+
+#endif /* _IXGB_H_ */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.c b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c
new file mode 100644
index 000000000000..2ed925f38811
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c
@@ -0,0 +1,607 @@
+/*******************************************************************************
+
+ Intel PRO/10GbE Linux driver
+ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "ixgb_hw.h"
+#include "ixgb_ee.h"
+/* Local prototypes */
+static u16 ixgb_shift_in_bits(struct ixgb_hw *hw);
+
+static void ixgb_shift_out_bits(struct ixgb_hw *hw,
+ u16 data,
+ u16 count);
+static void ixgb_standby_eeprom(struct ixgb_hw *hw);
+
+static bool ixgb_wait_eeprom_command(struct ixgb_hw *hw);
+
+static void ixgb_cleanup_eeprom(struct ixgb_hw *hw);
+
+/******************************************************************************
+ * Raises the EEPROM's clock input.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * eecd_reg - EECD's current value
+ *****************************************************************************/
+static void
+ixgb_raise_clock(struct ixgb_hw *hw,
+ u32 *eecd_reg)
+{
+ /* Raise the clock input to the EEPROM (by setting the SK bit), and then
+ * wait 50 microseconds.
+ */
+ *eecd_reg = *eecd_reg | IXGB_EECD_SK;
+ IXGB_WRITE_REG(hw, EECD, *eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
+ udelay(50);
+}
+
+/******************************************************************************
+ * Lowers the EEPROM's clock input.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * eecd_reg - EECD's current value
+ *****************************************************************************/
+static void
+ixgb_lower_clock(struct ixgb_hw *hw,
+ u32 *eecd_reg)
+{
+ /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
+ * wait 50 microseconds.
+ */
+ *eecd_reg = *eecd_reg & ~IXGB_EECD_SK;
+ IXGB_WRITE_REG(hw, EECD, *eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
+ udelay(50);
+}
+
+/******************************************************************************
+ * Shift data bits out to the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * data - data to send to the EEPROM
+ * count - number of bits to shift out
+ *****************************************************************************/
+static void
+ixgb_shift_out_bits(struct ixgb_hw *hw,
+ u16 data,
+ u16 count)
+{
+ u32 eecd_reg;
+ u32 mask;
+
+ /* We need to shift "count" bits out to the EEPROM. So, value in the
+ * "data" parameter will be shifted out to the EEPROM one bit at a time.
+ * In order to do this, "data" must be broken down into bits.
+ */
+ mask = 0x01 << (count - 1);
+ eecd_reg = IXGB_READ_REG(hw, EECD);
+ eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI);
+ do {
+ /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
+ * and then raising and then lowering the clock (the SK bit controls
+ * the clock input to the EEPROM). A "0" is shifted out to the EEPROM
+ * by setting "DI" to "0" and then raising and then lowering the clock.
+ */
+ eecd_reg &= ~IXGB_EECD_DI;
+
+ if (data & mask)
+ eecd_reg |= IXGB_EECD_DI;
+
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
+
+ udelay(50);
+
+ ixgb_raise_clock(hw, &eecd_reg);
+ ixgb_lower_clock(hw, &eecd_reg);
+
+ mask = mask >> 1;
+
+ } while (mask);
+
+ /* We leave the "DI" bit set to "0" when we leave this routine. */
+ eecd_reg &= ~IXGB_EECD_DI;
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+}
+
+/******************************************************************************
+ * Shift data bits in from the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static u16
+ixgb_shift_in_bits(struct ixgb_hw *hw)
+{
+ u32 eecd_reg;
+ u32 i;
+ u16 data;
+
+ /* In order to read a register from the EEPROM, we need to shift 16 bits
+ * in from the EEPROM. Bits are "shifted in" by raising the clock input to
+ * the EEPROM (setting the SK bit), and then reading the value of the "DO"
+ * bit. During this "shifting in" process the "DI" bit should always be
+ * clear..
+ */
+
+ eecd_reg = IXGB_READ_REG(hw, EECD);
+
+ eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI);
+ data = 0;
+
+ for (i = 0; i < 16; i++) {
+ data = data << 1;
+ ixgb_raise_clock(hw, &eecd_reg);
+
+ eecd_reg = IXGB_READ_REG(hw, EECD);
+
+ eecd_reg &= ~(IXGB_EECD_DI);
+ if (eecd_reg & IXGB_EECD_DO)
+ data |= 1;
+
+ ixgb_lower_clock(hw, &eecd_reg);
+ }
+
+ return data;
+}
+
+/******************************************************************************
+ * Prepares EEPROM for access
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
+ * function should be called before issuing a command to the EEPROM.
+ *****************************************************************************/
+static void
+ixgb_setup_eeprom(struct ixgb_hw *hw)
+{
+ u32 eecd_reg;
+
+ eecd_reg = IXGB_READ_REG(hw, EECD);
+
+ /* Clear SK and DI */
+ eecd_reg &= ~(IXGB_EECD_SK | IXGB_EECD_DI);
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+
+ /* Set CS */
+ eecd_reg |= IXGB_EECD_CS;
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+}
+
+/******************************************************************************
+ * Returns EEPROM to a "standby" state
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void
+ixgb_standby_eeprom(struct ixgb_hw *hw)
+{
+ u32 eecd_reg;
+
+ eecd_reg = IXGB_READ_REG(hw, EECD);
+
+ /* Deselect EEPROM */
+ eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK);
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
+ udelay(50);
+
+ /* Clock high */
+ eecd_reg |= IXGB_EECD_SK;
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
+ udelay(50);
+
+ /* Select EEPROM */
+ eecd_reg |= IXGB_EECD_CS;
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
+ udelay(50);
+
+ /* Clock low */
+ eecd_reg &= ~IXGB_EECD_SK;
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
+ udelay(50);
+}
+
+/******************************************************************************
+ * Raises then lowers the EEPROM's clock pin
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void
+ixgb_clock_eeprom(struct ixgb_hw *hw)
+{
+ u32 eecd_reg;
+
+ eecd_reg = IXGB_READ_REG(hw, EECD);
+
+ /* Rising edge of clock */
+ eecd_reg |= IXGB_EECD_SK;
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
+ udelay(50);
+
+ /* Falling edge of clock */
+ eecd_reg &= ~IXGB_EECD_SK;
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
+ udelay(50);
+}
+
+/******************************************************************************
+ * Terminates a command by lowering the EEPROM's chip select pin
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void
+ixgb_cleanup_eeprom(struct ixgb_hw *hw)
+{
+ u32 eecd_reg;
+
+ eecd_reg = IXGB_READ_REG(hw, EECD);
+
+ eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_DI);
+
+ IXGB_WRITE_REG(hw, EECD, eecd_reg);
+
+ ixgb_clock_eeprom(hw);
+}
+
+/******************************************************************************
+ * Waits for the EEPROM to finish the current command.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * The command is done when the EEPROM's data out pin goes high.
+ *
+ * Returns:
+ * true: EEPROM data pin is high before timeout.
+ * false: Time expired.
+ *****************************************************************************/
+static bool
+ixgb_wait_eeprom_command(struct ixgb_hw *hw)
+{
+ u32 eecd_reg;
+ u32 i;
+
+ /* Toggle the CS line. This in effect tells to EEPROM to actually execute
+ * the command in question.
+ */
+ ixgb_standby_eeprom(hw);
+
+ /* Now read DO repeatedly until is high (equal to '1'). The EEPROM will
+ * signal that the command has been completed by raising the DO signal.
+ * If DO does not go high in 10 milliseconds, then error out.
+ */
+ for (i = 0; i < 200; i++) {
+ eecd_reg = IXGB_READ_REG(hw, EECD);
+
+ if (eecd_reg & IXGB_EECD_DO)
+ return true;
+
+ udelay(50);
+ }
+ ASSERT(0);
+ return false;
+}
+
+/******************************************************************************
+ * Verifies that the EEPROM has a valid checksum
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Reads the first 64 16 bit words of the EEPROM and sums the values read.
+ * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
+ * valid.
+ *
+ * Returns:
+ * true: Checksum is valid
+ * false: Checksum is not valid.
+ *****************************************************************************/
+bool
+ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
+{
+ u16 checksum = 0;
+ u16 i;
+
+ for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++)
+ checksum += ixgb_read_eeprom(hw, i);
+
+ if (checksum == (u16) EEPROM_SUM)
+ return true;
+ else
+ return false;
+}
+
+/******************************************************************************
+ * Calculates the EEPROM checksum and writes it to the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
+ * Writes the difference to word offset 63 of the EEPROM.
+ *****************************************************************************/
+void
+ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
+{
+ u16 checksum = 0;
+ u16 i;
+
+ for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
+ checksum += ixgb_read_eeprom(hw, i);
+
+ checksum = (u16) EEPROM_SUM - checksum;
+
+ ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum);
+}
+
+/******************************************************************************
+ * Writes a 16 bit word to a given offset in the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * reg - offset within the EEPROM to be written to
+ * data - 16 bit word to be written to the EEPROM
+ *
+ * If ixgb_update_eeprom_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ *
+ *****************************************************************************/
+void
+ixgb_write_eeprom(struct ixgb_hw *hw, u16 offset, u16 data)
+{
+ struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+
+ /* Prepare the EEPROM for writing */
+ ixgb_setup_eeprom(hw);
+
+ /* Send the 9-bit EWEN (write enable) command to the EEPROM (5-bit opcode
+ * plus 4-bit dummy). This puts the EEPROM into write/erase mode.
+ */
+ ixgb_shift_out_bits(hw, EEPROM_EWEN_OPCODE, 5);
+ ixgb_shift_out_bits(hw, 0, 4);
+
+ /* Prepare the EEPROM */
+ ixgb_standby_eeprom(hw);
+
+ /* Send the Write command (3-bit opcode + 6-bit addr) */
+ ixgb_shift_out_bits(hw, EEPROM_WRITE_OPCODE, 3);
+ ixgb_shift_out_bits(hw, offset, 6);
+
+ /* Send the data */
+ ixgb_shift_out_bits(hw, data, 16);
+
+ ixgb_wait_eeprom_command(hw);
+
+ /* Recover from write */
+ ixgb_standby_eeprom(hw);
+
+ /* Send the 9-bit EWDS (write disable) command to the EEPROM (5-bit
+ * opcode plus 4-bit dummy). This takes the EEPROM out of write/erase
+ * mode.
+ */
+ ixgb_shift_out_bits(hw, EEPROM_EWDS_OPCODE, 5);
+ ixgb_shift_out_bits(hw, 0, 4);
+
+ /* Done with writing */
+ ixgb_cleanup_eeprom(hw);
+
+ /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */
+ ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of 16 bit word in the EEPROM to read
+ *
+ * Returns:
+ * The 16-bit value read from the eeprom
+ *****************************************************************************/
+u16
+ixgb_read_eeprom(struct ixgb_hw *hw,
+ u16 offset)
+{
+ u16 data;
+
+ /* Prepare the EEPROM for reading */
+ ixgb_setup_eeprom(hw);
+
+ /* Send the READ command (opcode + addr) */
+ ixgb_shift_out_bits(hw, EEPROM_READ_OPCODE, 3);
+ /*
+ * We have a 64 word EEPROM, there are 6 address bits
+ */
+ ixgb_shift_out_bits(hw, offset, 6);
+
+ /* Read the data */
+ data = ixgb_shift_in_bits(hw);
+
+ /* End this read operation */
+ ixgb_standby_eeprom(hw);
+
+ return data;
+}
+
+/******************************************************************************
+ * Reads eeprom and stores data in shared structure.
+ * Validates eeprom checksum and eeprom signature.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Returns:
+ * true: if eeprom read is successful
+ * false: otherwise.
+ *****************************************************************************/
+bool
+ixgb_get_eeprom_data(struct ixgb_hw *hw)
+{
+ u16 i;
+ u16 checksum = 0;
+ struct ixgb_ee_map_type *ee_map;
+
+ ENTER();
+
+ ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+
+ pr_debug("Reading eeprom data\n");
+ for (i = 0; i < IXGB_EEPROM_SIZE ; i++) {
+ u16 ee_data;
+ ee_data = ixgb_read_eeprom(hw, i);
+ checksum += ee_data;
+ hw->eeprom[i] = cpu_to_le16(ee_data);
+ }
+
+ if (checksum != (u16) EEPROM_SUM) {
+ pr_debug("Checksum invalid\n");
+ /* clear the init_ctrl_reg_1 to signify that the cache is
+ * invalidated */
+ ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
+ return false;
+ }
+
+ if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
+ != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
+ pr_debug("Signature invalid\n");
+ return false;
+ }
+
+ return true;
+}
+
+/******************************************************************************
+ * Local function to check if the eeprom signature is good
+ * If the eeprom signature is good, calls ixgb)get_eeprom_data.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Returns:
+ * true: eeprom signature was good and the eeprom read was successful
+ * false: otherwise.
+ ******************************************************************************/
+static bool
+ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw)
+{
+ struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+
+ if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
+ == cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
+ return true;
+ } else {
+ return ixgb_get_eeprom_data(hw);
+ }
+}
+
+/******************************************************************************
+ * return a word from the eeprom
+ *
+ * hw - Struct containing variables accessed by shared code
+ * index - Offset of eeprom word
+ *
+ * Returns:
+ * Word at indexed offset in eeprom, if valid, 0 otherwise.
+ ******************************************************************************/
+__le16
+ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index)
+{
+
+ if ((index < IXGB_EEPROM_SIZE) &&
+ (ixgb_check_and_get_eeprom_data(hw) == true)) {
+ return hw->eeprom[index];
+ }
+
+ return 0;
+}
+
+/******************************************************************************
+ * return the mac address from EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ * mac_addr - Ethernet Address if EEPROM contents are valid, 0 otherwise
+ *
+ * Returns: None.
+ ******************************************************************************/
+void
+ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
+ u8 *mac_addr)
+{
+ int i;
+ struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+
+ ENTER();
+
+ if (ixgb_check_and_get_eeprom_data(hw) == true) {
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac_addr[i] = ee_map->mac_addr[i];
+ }
+ pr_debug("eeprom mac address = %pM\n", mac_addr);
+ }
+}
+
+
+/******************************************************************************
+ * return the Printed Board Assembly number from EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Returns:
+ * PBA number if EEPROM contents are valid, 0 otherwise
+ ******************************************************************************/
+u32
+ixgb_get_ee_pba_number(struct ixgb_hw *hw)
+{
+ if (ixgb_check_and_get_eeprom_data(hw) == true)
+ return le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG])
+ | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16);
+
+ return 0;
+}
+
+
+/******************************************************************************
+ * return the Device Id from EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Returns:
+ * Device Id if EEPROM contents are valid, 0 otherwise
+ ******************************************************************************/
+u16
+ixgb_get_ee_device_id(struct ixgb_hw *hw)
+{
+ struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
+
+ if (ixgb_check_and_get_eeprom_data(hw) == true)
+ return le16_to_cpu(ee_map->device_id);
+
+ return 0;
+}
+
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.h b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h
new file mode 100644
index 000000000000..5680f64314b8
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h
@@ -0,0 +1,104 @@
+/*******************************************************************************
+
+ Intel PRO/10GbE Linux driver
+ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGB_EE_H_
+#define _IXGB_EE_H_
+
+#define IXGB_EEPROM_SIZE 64 /* Size in words */
+
+/* EEPROM Commands */
+#define EEPROM_READ_OPCODE 0x6 /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE 0x5 /* EEPROM write opcode */
+#define EEPROM_ERASE_OPCODE 0x7 /* EEPROM erase opcode */
+#define EEPROM_EWEN_OPCODE 0x13 /* EEPROM erase/write enable */
+#define EEPROM_EWDS_OPCODE 0x10 /* EEPROM erase/write disable */
+
+/* EEPROM MAP (Word Offsets) */
+#define EEPROM_IA_1_2_REG 0x0000
+#define EEPROM_IA_3_4_REG 0x0001
+#define EEPROM_IA_5_6_REG 0x0002
+#define EEPROM_COMPATIBILITY_REG 0x0003
+#define EEPROM_PBA_1_2_REG 0x0008
+#define EEPROM_PBA_3_4_REG 0x0009
+#define EEPROM_INIT_CONTROL1_REG 0x000A
+#define EEPROM_SUBSYS_ID_REG 0x000B
+#define EEPROM_SUBVEND_ID_REG 0x000C
+#define EEPROM_DEVICE_ID_REG 0x000D
+#define EEPROM_VENDOR_ID_REG 0x000E
+#define EEPROM_INIT_CONTROL2_REG 0x000F
+#define EEPROM_SWDPINS_REG 0x0020
+#define EEPROM_CIRCUIT_CTRL_REG 0x0021
+#define EEPROM_D0_D3_POWER_REG 0x0022
+#define EEPROM_FLASH_VERSION 0x0032
+#define EEPROM_CHECKSUM_REG 0x003F
+
+/* Mask bits for fields in Word 0x0a of the EEPROM */
+
+#define EEPROM_ICW1_SIGNATURE_MASK 0xC000
+#define EEPROM_ICW1_SIGNATURE_VALID 0x4000
+#define EEPROM_ICW1_SIGNATURE_CLEAR 0x0000
+
+/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */
+#define EEPROM_SUM 0xBABA
+
+/* EEPROM Map Sizes (Byte Counts) */
+#define PBA_SIZE 4
+
+/* EEPROM Map defines (WORD OFFSETS)*/
+
+/* EEPROM structure */
+struct ixgb_ee_map_type {
+ u8 mac_addr[ETH_ALEN];
+ __le16 compatibility;
+ __le16 reserved1[4];
+ __le32 pba_number;
+ __le16 init_ctrl_reg_1;
+ __le16 subsystem_id;
+ __le16 subvendor_id;
+ __le16 device_id;
+ __le16 vendor_id;
+ __le16 init_ctrl_reg_2;
+ __le16 oem_reserved[16];
+ __le16 swdpins_reg;
+ __le16 circuit_ctrl_reg;
+ u8 d3_power;
+ u8 d0_power;
+ __le16 reserved2[28];
+ __le16 checksum;
+};
+
+/* EEPROM Functions */
+u16 ixgb_read_eeprom(struct ixgb_hw *hw, u16 reg);
+
+bool ixgb_validate_eeprom_checksum(struct ixgb_hw *hw);
+
+void ixgb_update_eeprom_checksum(struct ixgb_hw *hw);
+
+void ixgb_write_eeprom(struct ixgb_hw *hw, u16 reg, u16 data);
+
+#endif /* IXGB_EE_H */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
new file mode 100644
index 000000000000..ab404e71f86a
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -0,0 +1,662 @@
+/*******************************************************************************
+
+ Intel PRO/10GbE Linux driver
+ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for ixgb */
+
+#include "ixgb.h"
+
+#include <asm/uaccess.h>
+
+#define IXGB_ALL_RAR_ENTRIES 16
+
+enum {NETDEV_STATS, IXGB_STATS};
+
+struct ixgb_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int type;
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define IXGB_STAT(m) IXGB_STATS, \
+ FIELD_SIZEOF(struct ixgb_adapter, m), \
+ offsetof(struct ixgb_adapter, m)
+#define IXGB_NETDEV_STAT(m) NETDEV_STATS, \
+ FIELD_SIZEOF(struct net_device, m), \
+ offsetof(struct net_device, m)
+
+static struct ixgb_stats ixgb_gstrings_stats[] = {
+ {"rx_packets", IXGB_NETDEV_STAT(stats.rx_packets)},
+ {"tx_packets", IXGB_NETDEV_STAT(stats.tx_packets)},
+ {"rx_bytes", IXGB_NETDEV_STAT(stats.rx_bytes)},
+ {"tx_bytes", IXGB_NETDEV_STAT(stats.tx_bytes)},
+ {"rx_errors", IXGB_NETDEV_STAT(stats.rx_errors)},
+ {"tx_errors", IXGB_NETDEV_STAT(stats.tx_errors)},
+ {"rx_dropped", IXGB_NETDEV_STAT(stats.rx_dropped)},
+ {"tx_dropped", IXGB_NETDEV_STAT(stats.tx_dropped)},
+ {"multicast", IXGB_NETDEV_STAT(stats.multicast)},
+ {"collisions", IXGB_NETDEV_STAT(stats.collisions)},
+
+/* { "rx_length_errors", IXGB_NETDEV_STAT(stats.rx_length_errors) }, */
+ {"rx_over_errors", IXGB_NETDEV_STAT(stats.rx_over_errors)},
+ {"rx_crc_errors", IXGB_NETDEV_STAT(stats.rx_crc_errors)},
+ {"rx_frame_errors", IXGB_NETDEV_STAT(stats.rx_frame_errors)},
+ {"rx_no_buffer_count", IXGB_STAT(stats.rnbc)},
+ {"rx_fifo_errors", IXGB_NETDEV_STAT(stats.rx_fifo_errors)},
+ {"rx_missed_errors", IXGB_NETDEV_STAT(stats.rx_missed_errors)},
+ {"tx_aborted_errors", IXGB_NETDEV_STAT(stats.tx_aborted_errors)},
+ {"tx_carrier_errors", IXGB_NETDEV_STAT(stats.tx_carrier_errors)},
+ {"tx_fifo_errors", IXGB_NETDEV_STAT(stats.tx_fifo_errors)},
+ {"tx_heartbeat_errors", IXGB_NETDEV_STAT(stats.tx_heartbeat_errors)},
+ {"tx_window_errors", IXGB_NETDEV_STAT(stats.tx_window_errors)},
+ {"tx_deferred_ok", IXGB_STAT(stats.dc)},
+ {"tx_timeout_count", IXGB_STAT(tx_timeout_count) },
+ {"tx_restart_queue", IXGB_STAT(restart_queue) },
+ {"rx_long_length_errors", IXGB_STAT(stats.roc)},
+ {"rx_short_length_errors", IXGB_STAT(stats.ruc)},
+ {"tx_tcp_seg_good", IXGB_STAT(stats.tsctc)},
+ {"tx_tcp_seg_failed", IXGB_STAT(stats.tsctfc)},
+ {"rx_flow_control_xon", IXGB_STAT(stats.xonrxc)},
+ {"rx_flow_control_xoff", IXGB_STAT(stats.xoffrxc)},
+ {"tx_flow_control_xon", IXGB_STAT(stats.xontxc)},
+ {"tx_flow_control_xoff", IXGB_STAT(stats.xofftxc)},
+ {"rx_csum_offload_good", IXGB_STAT(hw_csum_rx_good)},
+ {"rx_csum_offload_errors", IXGB_STAT(hw_csum_rx_error)},
+ {"tx_csum_offload_good", IXGB_STAT(hw_csum_tx_good)},
+ {"tx_csum_offload_errors", IXGB_STAT(hw_csum_tx_error)}
+};
+
+#define IXGB_STATS_LEN ARRAY_SIZE(ixgb_gstrings_stats)
+
+static int
+ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ if (netif_carrier_ok(adapter->netdev)) {
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ethtool_cmd_speed_set(ecmd, -1);
+ ecmd->duplex = -1;
+ }
+
+ ecmd->autoneg = AUTONEG_DISABLE;
+ return 0;
+}
+
+void ixgb_set_speed_duplex(struct net_device *netdev)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ /* be optimistic about our link, since we were up before */
+ adapter->link_speed = 10000;
+ adapter->link_duplex = FULL_DUPLEX;
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+}
+
+static int
+ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ u32 speed = ethtool_cmd_speed(ecmd);
+
+ if (ecmd->autoneg == AUTONEG_ENABLE ||
+ (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
+ return -EINVAL;
+
+ if (netif_running(adapter->netdev)) {
+ ixgb_down(adapter, true);
+ ixgb_reset(adapter);
+ ixgb_up(adapter);
+ ixgb_set_speed_duplex(netdev);
+ } else
+ ixgb_reset(adapter);
+
+ return 0;
+}
+
+static void
+ixgb_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ struct ixgb_hw *hw = &adapter->hw;
+
+ pause->autoneg = AUTONEG_DISABLE;
+
+ if (hw->fc.type == ixgb_fc_rx_pause)
+ pause->rx_pause = 1;
+ else if (hw->fc.type == ixgb_fc_tx_pause)
+ pause->tx_pause = 1;
+ else if (hw->fc.type == ixgb_fc_full) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ }
+}
+
+static int
+ixgb_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ struct ixgb_hw *hw = &adapter->hw;
+
+ if (pause->autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+
+ if (pause->rx_pause && pause->tx_pause)
+ hw->fc.type = ixgb_fc_full;
+ else if (pause->rx_pause && !pause->tx_pause)
+ hw->fc.type = ixgb_fc_rx_pause;
+ else if (!pause->rx_pause && pause->tx_pause)
+ hw->fc.type = ixgb_fc_tx_pause;
+ else if (!pause->rx_pause && !pause->tx_pause)
+ hw->fc.type = ixgb_fc_none;
+
+ if (netif_running(adapter->netdev)) {
+ ixgb_down(adapter, true);
+ ixgb_up(adapter);
+ ixgb_set_speed_duplex(netdev);
+ } else
+ ixgb_reset(adapter);
+
+ return 0;
+}
+
+static u32
+ixgb_get_msglevel(struct net_device *netdev)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void
+ixgb_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+#define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_
+
+static int
+ixgb_get_regs_len(struct net_device *netdev)
+{
+#define IXGB_REG_DUMP_LEN 136*sizeof(u32)
+ return IXGB_REG_DUMP_LEN;
+}
+
+static void
+ixgb_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ struct ixgb_hw *hw = &adapter->hw;
+ u32 *reg = p;
+ u32 *reg_start = reg;
+ u8 i;
+
+ /* the 1 (one) below indicates an attempt at versioning, if the
+ * interface in ethtool or the driver changes, this 1 should be
+ * incremented */
+ regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id;
+
+ /* General Registers */
+ *reg++ = IXGB_READ_REG(hw, CTRL0); /* 0 */
+ *reg++ = IXGB_READ_REG(hw, CTRL1); /* 1 */
+ *reg++ = IXGB_READ_REG(hw, STATUS); /* 2 */
+ *reg++ = IXGB_READ_REG(hw, EECD); /* 3 */
+ *reg++ = IXGB_READ_REG(hw, MFS); /* 4 */
+
+ /* Interrupt */
+ *reg++ = IXGB_READ_REG(hw, ICR); /* 5 */
+ *reg++ = IXGB_READ_REG(hw, ICS); /* 6 */
+ *reg++ = IXGB_READ_REG(hw, IMS); /* 7 */
+ *reg++ = IXGB_READ_REG(hw, IMC); /* 8 */
+
+ /* Receive */
+ *reg++ = IXGB_READ_REG(hw, RCTL); /* 9 */
+ *reg++ = IXGB_READ_REG(hw, FCRTL); /* 10 */
+ *reg++ = IXGB_READ_REG(hw, FCRTH); /* 11 */
+ *reg++ = IXGB_READ_REG(hw, RDBAL); /* 12 */
+ *reg++ = IXGB_READ_REG(hw, RDBAH); /* 13 */
+ *reg++ = IXGB_READ_REG(hw, RDLEN); /* 14 */
+ *reg++ = IXGB_READ_REG(hw, RDH); /* 15 */
+ *reg++ = IXGB_READ_REG(hw, RDT); /* 16 */
+ *reg++ = IXGB_READ_REG(hw, RDTR); /* 17 */
+ *reg++ = IXGB_READ_REG(hw, RXDCTL); /* 18 */
+ *reg++ = IXGB_READ_REG(hw, RAIDC); /* 19 */
+ *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */
+
+ /* there are 16 RAR entries in hardware, we only use 3 */
+ for (i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) {
+ *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
+ *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
+ }
+
+ /* Transmit */
+ *reg++ = IXGB_READ_REG(hw, TCTL); /* 53 */
+ *reg++ = IXGB_READ_REG(hw, TDBAL); /* 54 */
+ *reg++ = IXGB_READ_REG(hw, TDBAH); /* 55 */
+ *reg++ = IXGB_READ_REG(hw, TDLEN); /* 56 */
+ *reg++ = IXGB_READ_REG(hw, TDH); /* 57 */
+ *reg++ = IXGB_READ_REG(hw, TDT); /* 58 */
+ *reg++ = IXGB_READ_REG(hw, TIDV); /* 59 */
+ *reg++ = IXGB_READ_REG(hw, TXDCTL); /* 60 */
+ *reg++ = IXGB_READ_REG(hw, TSPMT); /* 61 */
+ *reg++ = IXGB_READ_REG(hw, PAP); /* 62 */
+
+ /* Physical */
+ *reg++ = IXGB_READ_REG(hw, PCSC1); /* 63 */
+ *reg++ = IXGB_READ_REG(hw, PCSC2); /* 64 */
+ *reg++ = IXGB_READ_REG(hw, PCSS1); /* 65 */
+ *reg++ = IXGB_READ_REG(hw, PCSS2); /* 66 */
+ *reg++ = IXGB_READ_REG(hw, XPCSS); /* 67 */
+ *reg++ = IXGB_READ_REG(hw, UCCR); /* 68 */
+ *reg++ = IXGB_READ_REG(hw, XPCSTC); /* 69 */
+ *reg++ = IXGB_READ_REG(hw, MACA); /* 70 */
+ *reg++ = IXGB_READ_REG(hw, APAE); /* 71 */
+ *reg++ = IXGB_READ_REG(hw, ARD); /* 72 */
+ *reg++ = IXGB_READ_REG(hw, AIS); /* 73 */
+ *reg++ = IXGB_READ_REG(hw, MSCA); /* 74 */
+ *reg++ = IXGB_READ_REG(hw, MSRWD); /* 75 */
+
+ /* Statistics */
+ *reg++ = IXGB_GET_STAT(adapter, tprl); /* 76 */
+ *reg++ = IXGB_GET_STAT(adapter, tprh); /* 77 */
+ *reg++ = IXGB_GET_STAT(adapter, gprcl); /* 78 */
+ *reg++ = IXGB_GET_STAT(adapter, gprch); /* 79 */
+ *reg++ = IXGB_GET_STAT(adapter, bprcl); /* 80 */
+ *reg++ = IXGB_GET_STAT(adapter, bprch); /* 81 */
+ *reg++ = IXGB_GET_STAT(adapter, mprcl); /* 82 */
+ *reg++ = IXGB_GET_STAT(adapter, mprch); /* 83 */
+ *reg++ = IXGB_GET_STAT(adapter, uprcl); /* 84 */
+ *reg++ = IXGB_GET_STAT(adapter, uprch); /* 85 */
+ *reg++ = IXGB_GET_STAT(adapter, vprcl); /* 86 */
+ *reg++ = IXGB_GET_STAT(adapter, vprch); /* 87 */
+ *reg++ = IXGB_GET_STAT(adapter, jprcl); /* 88 */
+ *reg++ = IXGB_GET_STAT(adapter, jprch); /* 89 */
+ *reg++ = IXGB_GET_STAT(adapter, gorcl); /* 90 */
+ *reg++ = IXGB_GET_STAT(adapter, gorch); /* 91 */
+ *reg++ = IXGB_GET_STAT(adapter, torl); /* 92 */
+ *reg++ = IXGB_GET_STAT(adapter, torh); /* 93 */
+ *reg++ = IXGB_GET_STAT(adapter, rnbc); /* 94 */
+ *reg++ = IXGB_GET_STAT(adapter, ruc); /* 95 */
+ *reg++ = IXGB_GET_STAT(adapter, roc); /* 96 */
+ *reg++ = IXGB_GET_STAT(adapter, rlec); /* 97 */
+ *reg++ = IXGB_GET_STAT(adapter, crcerrs); /* 98 */
+ *reg++ = IXGB_GET_STAT(adapter, icbc); /* 99 */
+ *reg++ = IXGB_GET_STAT(adapter, ecbc); /* 100 */
+ *reg++ = IXGB_GET_STAT(adapter, mpc); /* 101 */
+ *reg++ = IXGB_GET_STAT(adapter, tptl); /* 102 */
+ *reg++ = IXGB_GET_STAT(adapter, tpth); /* 103 */
+ *reg++ = IXGB_GET_STAT(adapter, gptcl); /* 104 */
+ *reg++ = IXGB_GET_STAT(adapter, gptch); /* 105 */
+ *reg++ = IXGB_GET_STAT(adapter, bptcl); /* 106 */
+ *reg++ = IXGB_GET_STAT(adapter, bptch); /* 107 */
+ *reg++ = IXGB_GET_STAT(adapter, mptcl); /* 108 */
+ *reg++ = IXGB_GET_STAT(adapter, mptch); /* 109 */
+ *reg++ = IXGB_GET_STAT(adapter, uptcl); /* 110 */
+ *reg++ = IXGB_GET_STAT(adapter, uptch); /* 111 */
+ *reg++ = IXGB_GET_STAT(adapter, vptcl); /* 112 */
+ *reg++ = IXGB_GET_STAT(adapter, vptch); /* 113 */
+ *reg++ = IXGB_GET_STAT(adapter, jptcl); /* 114 */
+ *reg++ = IXGB_GET_STAT(adapter, jptch); /* 115 */
+ *reg++ = IXGB_GET_STAT(adapter, gotcl); /* 116 */
+ *reg++ = IXGB_GET_STAT(adapter, gotch); /* 117 */
+ *reg++ = IXGB_GET_STAT(adapter, totl); /* 118 */
+ *reg++ = IXGB_GET_STAT(adapter, toth); /* 119 */
+ *reg++ = IXGB_GET_STAT(adapter, dc); /* 120 */
+ *reg++ = IXGB_GET_STAT(adapter, plt64c); /* 121 */
+ *reg++ = IXGB_GET_STAT(adapter, tsctc); /* 122 */
+ *reg++ = IXGB_GET_STAT(adapter, tsctfc); /* 123 */
+ *reg++ = IXGB_GET_STAT(adapter, ibic); /* 124 */
+ *reg++ = IXGB_GET_STAT(adapter, rfc); /* 125 */
+ *reg++ = IXGB_GET_STAT(adapter, lfc); /* 126 */
+ *reg++ = IXGB_GET_STAT(adapter, pfrc); /* 127 */
+ *reg++ = IXGB_GET_STAT(adapter, pftc); /* 128 */
+ *reg++ = IXGB_GET_STAT(adapter, mcfrc); /* 129 */
+ *reg++ = IXGB_GET_STAT(adapter, mcftc); /* 130 */
+ *reg++ = IXGB_GET_STAT(adapter, xonrxc); /* 131 */
+ *reg++ = IXGB_GET_STAT(adapter, xontxc); /* 132 */
+ *reg++ = IXGB_GET_STAT(adapter, xoffrxc); /* 133 */
+ *reg++ = IXGB_GET_STAT(adapter, xofftxc); /* 134 */
+ *reg++ = IXGB_GET_STAT(adapter, rjc); /* 135 */
+
+ regs->len = (reg - reg_start) * sizeof(u32);
+}
+
+static int
+ixgb_get_eeprom_len(struct net_device *netdev)
+{
+ /* return size in bytes */
+ return IXGB_EEPROM_SIZE << 1;
+}
+
+static int
+ixgb_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ struct ixgb_hw *hw = &adapter->hw;
+ __le16 *eeprom_buff;
+ int i, max_len, first_word, last_word;
+ int ret_val = 0;
+
+ if (eeprom->len == 0) {
+ ret_val = -EINVAL;
+ goto geeprom_error;
+ }
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ max_len = ixgb_get_eeprom_len(netdev);
+
+ if (eeprom->offset > eeprom->offset + eeprom->len) {
+ ret_val = -EINVAL;
+ goto geeprom_error;
+ }
+
+ if ((eeprom->offset + eeprom->len) > max_len)
+ eeprom->len = (max_len - eeprom->offset);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc(sizeof(__le16) *
+ (last_word - first_word + 1), GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ /* note the eeprom was good because the driver loaded */
+ for (i = 0; i <= (last_word - first_word); i++)
+ eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i));
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+ kfree(eeprom_buff);
+
+geeprom_error:
+ return ret_val;
+}
+
+static int
+ixgb_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ struct ixgb_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ void *ptr;
+ int max_len, first_word, last_word;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+ return -EFAULT;
+
+ max_len = ixgb_get_eeprom_len(netdev);
+
+ if (eeprom->offset > eeprom->offset + eeprom->len)
+ return -EINVAL;
+
+ if ((eeprom->offset + eeprom->len) > max_len)
+ eeprom->len = (max_len - eeprom->offset);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = (void *)eeprom_buff;
+
+ if (eeprom->offset & 1) {
+ /* need read/modify/write of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ eeprom_buff[0] = ixgb_read_eeprom(hw, first_word);
+ ptr++;
+ }
+ if ((eeprom->offset + eeprom->len) & 1) {
+ /* need read/modify/write of last changed EEPROM word */
+ /* only the first byte of the word is being modified */
+ eeprom_buff[last_word - first_word]
+ = ixgb_read_eeprom(hw, last_word);
+ }
+
+ memcpy(ptr, bytes, eeprom->len);
+ for (i = 0; i <= (last_word - first_word); i++)
+ ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]);
+
+ /* Update the checksum over the first part of the EEPROM if needed */
+ if (first_word <= EEPROM_CHECKSUM_REG)
+ ixgb_update_eeprom_checksum(hw);
+
+ kfree(eeprom_buff);
+ return 0;
+}
+
+static void
+ixgb_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+
+ strncpy(drvinfo->driver, ixgb_driver_name, 32);
+ strncpy(drvinfo->version, ixgb_driver_version, 32);
+ strncpy(drvinfo->fw_version, "N/A", 32);
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ drvinfo->n_stats = IXGB_STATS_LEN;
+ drvinfo->regdump_len = ixgb_get_regs_len(netdev);
+ drvinfo->eedump_len = ixgb_get_eeprom_len(netdev);
+}
+
+static void
+ixgb_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ struct ixgb_desc_ring *txdr = &adapter->tx_ring;
+ struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
+
+ ring->rx_max_pending = MAX_RXD;
+ ring->tx_max_pending = MAX_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rxdr->count;
+ ring->tx_pending = txdr->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int
+ixgb_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ struct ixgb_desc_ring *txdr = &adapter->tx_ring;
+ struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
+ struct ixgb_desc_ring tx_old, tx_new, rx_old, rx_new;
+ int err;
+
+ tx_old = adapter->tx_ring;
+ rx_old = adapter->rx_ring;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ if (netif_running(adapter->netdev))
+ ixgb_down(adapter, true);
+
+ rxdr->count = max(ring->rx_pending,(u32)MIN_RXD);
+ rxdr->count = min(rxdr->count,(u32)MAX_RXD);
+ rxdr->count = ALIGN(rxdr->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ txdr->count = max(ring->tx_pending,(u32)MIN_TXD);
+ txdr->count = min(txdr->count,(u32)MAX_TXD);
+ txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ if (netif_running(adapter->netdev)) {
+ /* Try to get new resources before deleting old */
+ if ((err = ixgb_setup_rx_resources(adapter)))
+ goto err_setup_rx;
+ if ((err = ixgb_setup_tx_resources(adapter)))
+ goto err_setup_tx;
+
+ /* save the new, restore the old in order to free it,
+ * then restore the new back again */
+
+ rx_new = adapter->rx_ring;
+ tx_new = adapter->tx_ring;
+ adapter->rx_ring = rx_old;
+ adapter->tx_ring = tx_old;
+ ixgb_free_rx_resources(adapter);
+ ixgb_free_tx_resources(adapter);
+ adapter->rx_ring = rx_new;
+ adapter->tx_ring = tx_new;
+ if ((err = ixgb_up(adapter)))
+ return err;
+ ixgb_set_speed_duplex(netdev);
+ }
+
+ return 0;
+err_setup_tx:
+ ixgb_free_rx_resources(adapter);
+err_setup_rx:
+ adapter->rx_ring = rx_old;
+ adapter->tx_ring = tx_old;
+ ixgb_up(adapter);
+ return err;
+}
+
+static int
+ixgb_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 2;
+
+ case ETHTOOL_ID_ON:
+ ixgb_led_on(&adapter->hw);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ case ETHTOOL_ID_INACTIVE:
+ ixgb_led_off(&adapter->hw);
+ }
+
+ return 0;
+}
+
+static int
+ixgb_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return IXGB_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+ixgb_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ int i;
+ char *p = NULL;
+
+ ixgb_update_stats(adapter);
+ for (i = 0; i < IXGB_STATS_LEN; i++) {
+ switch (ixgb_gstrings_stats[i].type) {
+ case NETDEV_STATS:
+ p = (char *) netdev +
+ ixgb_gstrings_stats[i].stat_offset;
+ break;
+ case IXGB_STATS:
+ p = (char *) adapter +
+ ixgb_gstrings_stats[i].stat_offset;
+ break;
+ }
+
+ data[i] = (ixgb_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+}
+
+static void
+ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ int i;
+
+ switch(stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < IXGB_STATS_LEN; i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ ixgb_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ }
+}
+
+static const struct ethtool_ops ixgb_ethtool_ops = {
+ .get_settings = ixgb_get_settings,
+ .set_settings = ixgb_set_settings,
+ .get_drvinfo = ixgb_get_drvinfo,
+ .get_regs_len = ixgb_get_regs_len,
+ .get_regs = ixgb_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = ixgb_get_eeprom_len,
+ .get_eeprom = ixgb_get_eeprom,
+ .set_eeprom = ixgb_set_eeprom,
+ .get_ringparam = ixgb_get_ringparam,
+ .set_ringparam = ixgb_set_ringparam,
+ .get_pauseparam = ixgb_get_pauseparam,
+ .set_pauseparam = ixgb_set_pauseparam,
+ .get_msglevel = ixgb_get_msglevel,
+ .set_msglevel = ixgb_set_msglevel,
+ .get_strings = ixgb_get_strings,
+ .set_phys_id = ixgb_set_phys_id,
+ .get_sset_count = ixgb_get_sset_count,
+ .get_ethtool_stats = ixgb_get_ethtool_stats,
+};
+
+void ixgb_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &ixgb_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
new file mode 100644
index 000000000000..99b69adb4a0f
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
@@ -0,0 +1,1262 @@
+/*******************************************************************************
+
+ Intel PRO/10GbE Linux driver
+ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ixgb_hw.c
+ * Shared functions for accessing and configuring the adapter
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "ixgb_hw.h"
+#include "ixgb_ids.h"
+
+#include <linux/etherdevice.h>
+
+/* Local function prototypes */
+
+static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr);
+
+static void ixgb_mta_set(struct ixgb_hw *hw, u32 hash_value);
+
+static void ixgb_get_bus_info(struct ixgb_hw *hw);
+
+static bool ixgb_link_reset(struct ixgb_hw *hw);
+
+static void ixgb_optics_reset(struct ixgb_hw *hw);
+
+static void ixgb_optics_reset_bcm(struct ixgb_hw *hw);
+
+static ixgb_phy_type ixgb_identify_phy(struct ixgb_hw *hw);
+
+static void ixgb_clear_hw_cntrs(struct ixgb_hw *hw);
+
+static void ixgb_clear_vfta(struct ixgb_hw *hw);
+
+static void ixgb_init_rx_addrs(struct ixgb_hw *hw);
+
+static u16 ixgb_read_phy_reg(struct ixgb_hw *hw,
+ u32 reg_address,
+ u32 phy_address,
+ u32 device_type);
+
+static bool ixgb_setup_fc(struct ixgb_hw *hw);
+
+static bool mac_addr_valid(u8 *mac_addr);
+
+static u32 ixgb_mac_reset(struct ixgb_hw *hw)
+{
+ u32 ctrl_reg;
+
+ ctrl_reg = IXGB_CTRL0_RST |
+ IXGB_CTRL0_SDP3_DIR | /* All pins are Output=1 */
+ IXGB_CTRL0_SDP2_DIR |
+ IXGB_CTRL0_SDP1_DIR |
+ IXGB_CTRL0_SDP0_DIR |
+ IXGB_CTRL0_SDP3 | /* Initial value 1101 */
+ IXGB_CTRL0_SDP2 |
+ IXGB_CTRL0_SDP0;
+
+#ifdef HP_ZX1
+ /* Workaround for 82597EX reset errata */
+ IXGB_WRITE_REG_IO(hw, CTRL0, ctrl_reg);
+#else
+ IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
+#endif
+
+ /* Delay a few ms just to allow the reset to complete */
+ msleep(IXGB_DELAY_AFTER_RESET);
+ ctrl_reg = IXGB_READ_REG(hw, CTRL0);
+#ifdef DBG
+ /* Make sure the self-clearing global reset bit did self clear */
+ ASSERT(!(ctrl_reg & IXGB_CTRL0_RST));
+#endif
+
+ if (hw->subsystem_vendor_id == SUN_SUBVENDOR_ID) {
+ ctrl_reg = /* Enable interrupt from XFP and SerDes */
+ IXGB_CTRL1_GPI0_EN |
+ IXGB_CTRL1_SDP6_DIR |
+ IXGB_CTRL1_SDP7_DIR |
+ IXGB_CTRL1_SDP6 |
+ IXGB_CTRL1_SDP7;
+ IXGB_WRITE_REG(hw, CTRL1, ctrl_reg);
+ ixgb_optics_reset_bcm(hw);
+ }
+
+ if (hw->phy_type == ixgb_phy_type_txn17401)
+ ixgb_optics_reset(hw);
+
+ return ctrl_reg;
+}
+
+/******************************************************************************
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+bool
+ixgb_adapter_stop(struct ixgb_hw *hw)
+{
+ u32 ctrl_reg;
+ u32 icr_reg;
+
+ ENTER();
+
+ /* If we are stopped or resetting exit gracefully and wait to be
+ * started again before accessing the hardware.
+ */
+ if (hw->adapter_stopped) {
+ pr_debug("Exiting because the adapter is already stopped!!!\n");
+ return false;
+ }
+
+ /* Set the Adapter Stopped flag so other driver functions stop
+ * touching the Hardware.
+ */
+ hw->adapter_stopped = true;
+
+ /* Clear interrupt mask to stop board from generating interrupts */
+ pr_debug("Masking off all interrupts\n");
+ IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF);
+
+ /* Disable the Transmit and Receive units. Then delay to allow
+ * any pending transactions to complete before we hit the MAC with
+ * the global reset.
+ */
+ IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN);
+ IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN);
+ IXGB_WRITE_FLUSH(hw);
+ msleep(IXGB_DELAY_BEFORE_RESET);
+
+ /* Issue a global reset to the MAC. This will reset the chip's
+ * transmit, receive, DMA, and link units. It will not effect
+ * the current PCI configuration. The global reset bit is self-
+ * clearing, and should clear within a microsecond.
+ */
+ pr_debug("Issuing a global reset to MAC\n");
+
+ ctrl_reg = ixgb_mac_reset(hw);
+
+ /* Clear interrupt mask to stop board from generating interrupts */
+ pr_debug("Masking off all interrupts\n");
+ IXGB_WRITE_REG(hw, IMC, 0xffffffff);
+
+ /* Clear any pending interrupt events. */
+ icr_reg = IXGB_READ_REG(hw, ICR);
+
+ return ctrl_reg & IXGB_CTRL0_RST;
+}
+
+
+/******************************************************************************
+ * Identifies the vendor of the optics module on the adapter. The SR adapters
+ * support two different types of XPAK optics, so it is necessary to determine
+ * which optics are present before applying any optics-specific workarounds.
+ *
+ * hw - Struct containing variables accessed by shared code.
+ *
+ * Returns: the vendor of the XPAK optics module.
+ *****************************************************************************/
+static ixgb_xpak_vendor
+ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
+{
+ u32 i;
+ u16 vendor_name[5];
+ ixgb_xpak_vendor xpak_vendor;
+
+ ENTER();
+
+ /* Read the first few bytes of the vendor string from the XPAK NVR
+ * registers. These are standard XENPAK/XPAK registers, so all XPAK
+ * devices should implement them. */
+ for (i = 0; i < 5; i++) {
+ vendor_name[i] = ixgb_read_phy_reg(hw,
+ MDIO_PMA_PMD_XPAK_VENDOR_NAME
+ + i, IXGB_PHY_ADDRESS,
+ MDIO_MMD_PMAPMD);
+ }
+
+ /* Determine the actual vendor */
+ if (vendor_name[0] == 'I' &&
+ vendor_name[1] == 'N' &&
+ vendor_name[2] == 'T' &&
+ vendor_name[3] == 'E' && vendor_name[4] == 'L') {
+ xpak_vendor = ixgb_xpak_vendor_intel;
+ } else {
+ xpak_vendor = ixgb_xpak_vendor_infineon;
+ }
+
+ return xpak_vendor;
+}
+
+/******************************************************************************
+ * Determine the physical layer module on the adapter.
+ *
+ * hw - Struct containing variables accessed by shared code. The device_id
+ * field must be (correctly) populated before calling this routine.
+ *
+ * Returns: the phy type of the adapter.
+ *****************************************************************************/
+static ixgb_phy_type
+ixgb_identify_phy(struct ixgb_hw *hw)
+{
+ ixgb_phy_type phy_type;
+ ixgb_xpak_vendor xpak_vendor;
+
+ ENTER();
+
+ /* Infer the transceiver/phy type from the device id */
+ switch (hw->device_id) {
+ case IXGB_DEVICE_ID_82597EX:
+ pr_debug("Identified TXN17401 optics\n");
+ phy_type = ixgb_phy_type_txn17401;
+ break;
+
+ case IXGB_DEVICE_ID_82597EX_SR:
+ /* The SR adapters carry two different types of XPAK optics
+ * modules; read the vendor identifier to determine the exact
+ * type of optics. */
+ xpak_vendor = ixgb_identify_xpak_vendor(hw);
+ if (xpak_vendor == ixgb_xpak_vendor_intel) {
+ pr_debug("Identified TXN17201 optics\n");
+ phy_type = ixgb_phy_type_txn17201;
+ } else {
+ pr_debug("Identified G6005 optics\n");
+ phy_type = ixgb_phy_type_g6005;
+ }
+ break;
+ case IXGB_DEVICE_ID_82597EX_LR:
+ pr_debug("Identified G6104 optics\n");
+ phy_type = ixgb_phy_type_g6104;
+ break;
+ case IXGB_DEVICE_ID_82597EX_CX4:
+ pr_debug("Identified CX4\n");
+ xpak_vendor = ixgb_identify_xpak_vendor(hw);
+ if (xpak_vendor == ixgb_xpak_vendor_intel) {
+ pr_debug("Identified TXN17201 optics\n");
+ phy_type = ixgb_phy_type_txn17201;
+ } else {
+ pr_debug("Identified G6005 optics\n");
+ phy_type = ixgb_phy_type_g6005;
+ }
+ break;
+ default:
+ pr_debug("Unknown physical layer module\n");
+ phy_type = ixgb_phy_type_unknown;
+ break;
+ }
+
+ /* update phy type for sun specific board */
+ if (hw->subsystem_vendor_id == SUN_SUBVENDOR_ID)
+ phy_type = ixgb_phy_type_bcm;
+
+ return phy_type;
+}
+
+/******************************************************************************
+ * Performs basic configuration of the adapter.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Resets the controller.
+ * Reads and validates the EEPROM.
+ * Initializes the receive address registers.
+ * Initializes the multicast table.
+ * Clears all on-chip counters.
+ * Calls routine to setup flow control settings.
+ * Leaves the transmit and receive units disabled and uninitialized.
+ *
+ * Returns:
+ * true if successful,
+ * false if unrecoverable problems were encountered.
+ *****************************************************************************/
+bool
+ixgb_init_hw(struct ixgb_hw *hw)
+{
+ u32 i;
+ u32 ctrl_reg;
+ bool status;
+
+ ENTER();
+
+ /* Issue a global reset to the MAC. This will reset the chip's
+ * transmit, receive, DMA, and link units. It will not effect
+ * the current PCI configuration. The global reset bit is self-
+ * clearing, and should clear within a microsecond.
+ */
+ pr_debug("Issuing a global reset to MAC\n");
+
+ ctrl_reg = ixgb_mac_reset(hw);
+
+ pr_debug("Issuing an EE reset to MAC\n");
+#ifdef HP_ZX1
+ /* Workaround for 82597EX reset errata */
+ IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST);
+#else
+ IXGB_WRITE_REG(hw, CTRL1, IXGB_CTRL1_EE_RST);
+#endif
+
+ /* Delay a few ms just to allow the reset to complete */
+ msleep(IXGB_DELAY_AFTER_EE_RESET);
+
+ if (!ixgb_get_eeprom_data(hw))
+ return false;
+
+ /* Use the device id to determine the type of phy/transceiver. */
+ hw->device_id = ixgb_get_ee_device_id(hw);
+ hw->phy_type = ixgb_identify_phy(hw);
+
+ /* Setup the receive addresses.
+ * Receive Address Registers (RARs 0 - 15).
+ */
+ ixgb_init_rx_addrs(hw);
+
+ /*
+ * Check that a valid MAC address has been set.
+ * If it is not valid, we fail hardware init.
+ */
+ if (!mac_addr_valid(hw->curr_mac_addr)) {
+ pr_debug("MAC address invalid after ixgb_init_rx_addrs\n");
+ return(false);
+ }
+
+ /* tell the routines in this file they can access hardware again */
+ hw->adapter_stopped = false;
+
+ /* Fill in the bus_info structure */
+ ixgb_get_bus_info(hw);
+
+ /* Zero out the Multicast HASH table */
+ pr_debug("Zeroing the MTA\n");
+ for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
+ IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
+
+ /* Zero out the VLAN Filter Table Array */
+ ixgb_clear_vfta(hw);
+
+ /* Zero all of the hardware counters */
+ ixgb_clear_hw_cntrs(hw);
+
+ /* Call a subroutine to setup flow control. */
+ status = ixgb_setup_fc(hw);
+
+ /* 82597EX errata: Call check-for-link in case lane deskew is locked */
+ ixgb_check_for_link(hw);
+
+ return status;
+}
+
+/******************************************************************************
+ * Initializes receive address filters.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive address registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ *****************************************************************************/
+static void
+ixgb_init_rx_addrs(struct ixgb_hw *hw)
+{
+ u32 i;
+
+ ENTER();
+
+ /*
+ * If the current mac address is valid, assume it is a software override
+ * to the permanent address.
+ * Otherwise, use the permanent address from the eeprom.
+ */
+ if (!mac_addr_valid(hw->curr_mac_addr)) {
+
+ /* Get the MAC address from the eeprom for later reference */
+ ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr);
+
+ pr_debug("Keeping Permanent MAC Addr = %pM\n",
+ hw->curr_mac_addr);
+ } else {
+
+ /* Setup the receive address. */
+ pr_debug("Overriding MAC Address in RAR[0]\n");
+ pr_debug("New MAC Addr = %pM\n", hw->curr_mac_addr);
+
+ ixgb_rar_set(hw, hw->curr_mac_addr, 0);
+ }
+
+ /* Zero out the other 15 receive addresses. */
+ pr_debug("Clearing RAR[1-15]\n");
+ for (i = 1; i < IXGB_RAR_ENTRIES; i++) {
+ /* Write high reg first to disable the AV bit first */
+ IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+ IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+ }
+}
+
+/******************************************************************************
+ * Updates the MAC's list of multicast addresses.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * mc_addr_list - the list of new multicast addresses
+ * mc_addr_count - number of addresses
+ * pad - number of bytes between addresses in the list
+ *
+ * The given list replaces any existing list. Clears the last 15 receive
+ * address registers and the multicast table. Uses receive address registers
+ * for the first 15 multicast addresses, and hashes the rest into the
+ * multicast table.
+ *****************************************************************************/
+void
+ixgb_mc_addr_list_update(struct ixgb_hw *hw,
+ u8 *mc_addr_list,
+ u32 mc_addr_count,
+ u32 pad)
+{
+ u32 hash_value;
+ u32 i;
+ u32 rar_used_count = 1; /* RAR[0] is used for our MAC address */
+ u8 *mca;
+
+ ENTER();
+
+ /* Set the new number of MC addresses that we are being requested to use. */
+ hw->num_mc_addrs = mc_addr_count;
+
+ /* Clear RAR[1-15] */
+ pr_debug("Clearing RAR[1-15]\n");
+ for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
+ IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+ IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+ }
+
+ /* Clear the MTA */
+ pr_debug("Clearing MTA\n");
+ for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
+ IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
+
+ /* Add the new addresses */
+ mca = mc_addr_list;
+ for (i = 0; i < mc_addr_count; i++) {
+ pr_debug("Adding the multicast addresses:\n");
+ pr_debug("MC Addr #%d = %pM\n", i, mca);
+
+ /* Place this multicast address in the RAR if there is room, *
+ * else put it in the MTA
+ */
+ if (rar_used_count < IXGB_RAR_ENTRIES) {
+ ixgb_rar_set(hw, mca, rar_used_count);
+ pr_debug("Added a multicast address to RAR[%d]\n", i);
+ rar_used_count++;
+ } else {
+ hash_value = ixgb_hash_mc_addr(hw, mca);
+
+ pr_debug("Hash value = 0x%03X\n", hash_value);
+
+ ixgb_mta_set(hw, hash_value);
+ }
+
+ mca += ETH_ALEN + pad;
+ }
+
+ pr_debug("MC Update Complete\n");
+}
+
+/******************************************************************************
+ * Hashes an address to determine its location in the multicast table
+ *
+ * hw - Struct containing variables accessed by shared code
+ * mc_addr - the multicast address to hash
+ *
+ * Returns:
+ * The hash value
+ *****************************************************************************/
+static u32
+ixgb_hash_mc_addr(struct ixgb_hw *hw,
+ u8 *mc_addr)
+{
+ u32 hash_value = 0;
+
+ ENTER();
+
+ /* The portion of the address that is used for the hash table is
+ * determined by the mc_filter_type setting.
+ */
+ switch (hw->mc_filter_type) {
+ /* [0] [1] [2] [3] [4] [5]
+ * 01 AA 00 12 34 56
+ * LSB MSB - According to H/W docs */
+ case 0:
+ /* [47:36] i.e. 0x563 for above example address */
+ hash_value =
+ ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
+ break;
+ case 1: /* [46:35] i.e. 0xAC6 for above example address */
+ hash_value =
+ ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5));
+ break;
+ case 2: /* [45:34] i.e. 0x5D8 for above example address */
+ hash_value =
+ ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
+ break;
+ case 3: /* [43:32] i.e. 0x634 for above example address */
+ hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8));
+ break;
+ default:
+ /* Invalid mc_filter_type, what should we do? */
+ pr_debug("MC filter type param set incorrectly\n");
+ ASSERT(0);
+ break;
+ }
+
+ hash_value &= 0xFFF;
+ return hash_value;
+}
+
+/******************************************************************************
+ * Sets the bit in the multicast table corresponding to the hash value.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * hash_value - Multicast address hash value
+ *****************************************************************************/
+static void
+ixgb_mta_set(struct ixgb_hw *hw,
+ u32 hash_value)
+{
+ u32 hash_bit, hash_reg;
+ u32 mta_reg;
+
+ /* The MTA is a register array of 128 32-bit registers.
+ * It is treated like an array of 4096 bits. We want to set
+ * bit BitArray[hash_value]. So we figure out what register
+ * the bit is in, read it, OR in the new bit, then write
+ * back the new value. The register is determined by the
+ * upper 7 bits of the hash value and the bit within that
+ * register are determined by the lower 5 bits of the value.
+ */
+ hash_reg = (hash_value >> 5) & 0x7F;
+ hash_bit = hash_value & 0x1F;
+
+ mta_reg = IXGB_READ_REG_ARRAY(hw, MTA, hash_reg);
+
+ mta_reg |= (1 << hash_bit);
+
+ IXGB_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta_reg);
+}
+
+/******************************************************************************
+ * Puts an ethernet address into a receive address register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * addr - Address to put into receive address register
+ * index - Receive address register to write
+ *****************************************************************************/
+void
+ixgb_rar_set(struct ixgb_hw *hw,
+ u8 *addr,
+ u32 index)
+{
+ u32 rar_low, rar_high;
+
+ ENTER();
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32) addr[0] |
+ ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) |
+ ((u32)addr[3] << 24));
+
+ rar_high = ((u32) addr[4] |
+ ((u32)addr[5] << 8) |
+ IXGB_RAH_AV);
+
+ IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
+ IXGB_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
+}
+
+/******************************************************************************
+ * Writes a value to the specified offset in the VLAN filter table.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - Offset in VLAN filer table to write
+ * value - Value to write into VLAN filter table
+ *****************************************************************************/
+void
+ixgb_write_vfta(struct ixgb_hw *hw,
+ u32 offset,
+ u32 value)
+{
+ IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+}
+
+/******************************************************************************
+ * Clears the VLAN filer table
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void
+ixgb_clear_vfta(struct ixgb_hw *hw)
+{
+ u32 offset;
+
+ for (offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
+ IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
+}
+
+/******************************************************************************
+ * Configures the flow control settings based on SW configuration.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+
+static bool
+ixgb_setup_fc(struct ixgb_hw *hw)
+{
+ u32 ctrl_reg;
+ u32 pap_reg = 0; /* by default, assume no pause time */
+ bool status = true;
+
+ ENTER();
+
+ /* Get the current control reg 0 settings */
+ ctrl_reg = IXGB_READ_REG(hw, CTRL0);
+
+ /* Clear the Receive Pause Enable and Transmit Pause Enable bits */
+ ctrl_reg &= ~(IXGB_CTRL0_RPE | IXGB_CTRL0_TPE);
+
+ /* The possible values of the "flow_control" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * but we do not support receiving pause frames).
+ * 3: Both Rx and TX flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.type) {
+ case ixgb_fc_none: /* 0 */
+ /* Set CMDC bit to disable Rx Flow control */
+ ctrl_reg |= (IXGB_CTRL0_CMDC);
+ break;
+ case ixgb_fc_rx_pause: /* 1 */
+ /* RX Flow control is enabled, and TX Flow control is
+ * disabled.
+ */
+ ctrl_reg |= (IXGB_CTRL0_RPE);
+ break;
+ case ixgb_fc_tx_pause: /* 2 */
+ /* TX Flow control is enabled, and RX Flow control is
+ * disabled, by a software over-ride.
+ */
+ ctrl_reg |= (IXGB_CTRL0_TPE);
+ pap_reg = hw->fc.pause_time;
+ break;
+ case ixgb_fc_full: /* 3 */
+ /* Flow control (both RX and TX) is enabled by a software
+ * over-ride.
+ */
+ ctrl_reg |= (IXGB_CTRL0_RPE | IXGB_CTRL0_TPE);
+ pap_reg = hw->fc.pause_time;
+ break;
+ default:
+ /* We should never get here. The value should be 0-3. */
+ pr_debug("Flow control param set incorrectly\n");
+ ASSERT(0);
+ break;
+ }
+
+ /* Write the new settings */
+ IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
+
+ if (pap_reg != 0)
+ IXGB_WRITE_REG(hw, PAP, pap_reg);
+
+ /* Set the flow control receive threshold registers. Normally,
+ * these registers will be set to a default threshold that may be
+ * adjusted later by the driver's runtime code. However, if the
+ * ability to transmit pause frames in not enabled, then these
+ * registers will be set to 0.
+ */
+ if (!(hw->fc.type & ixgb_fc_tx_pause)) {
+ IXGB_WRITE_REG(hw, FCRTL, 0);
+ IXGB_WRITE_REG(hw, FCRTH, 0);
+ } else {
+ /* We need to set up the Receive Threshold high and low water
+ * marks as well as (optionally) enabling the transmission of XON
+ * frames. */
+ if (hw->fc.send_xon) {
+ IXGB_WRITE_REG(hw, FCRTL,
+ (hw->fc.low_water | IXGB_FCRTL_XONE));
+ } else {
+ IXGB_WRITE_REG(hw, FCRTL, hw->fc.low_water);
+ }
+ IXGB_WRITE_REG(hw, FCRTH, hw->fc.high_water);
+ }
+ return status;
+}
+
+/******************************************************************************
+ * Reads a word from a device over the Management Data Interface (MDI) bus.
+ * This interface is used to manage Physical layer devices.
+ *
+ * hw - Struct containing variables accessed by hw code
+ * reg_address - Offset of device register being read.
+ * phy_address - Address of device on MDI.
+ *
+ * Returns: Data word (16 bits) from MDI device.
+ *
+ * The 82597EX has support for several MDI access methods. This routine
+ * uses the new protocol MDI Single Command and Address Operation.
+ * This requires that first an address cycle command is sent, followed by a
+ * read command.
+ *****************************************************************************/
+static u16
+ixgb_read_phy_reg(struct ixgb_hw *hw,
+ u32 reg_address,
+ u32 phy_address,
+ u32 device_type)
+{
+ u32 i;
+ u32 data;
+ u32 command = 0;
+
+ ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
+ ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
+ ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
+
+ /* Setup and write the address cycle command */
+ command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
+ (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
+ (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
+
+ IXGB_WRITE_REG(hw, MSCA, command);
+
+ /**************************************************************
+ ** Check every 10 usec to see if the address cycle completed
+ ** The COMMAND bit will clear when the operation is complete.
+ ** This may take as long as 64 usecs (we'll wait 100 usecs max)
+ ** from the CPU Write to the Ready bit assertion.
+ **************************************************************/
+
+ for (i = 0; i < 10; i++)
+ {
+ udelay(10);
+
+ command = IXGB_READ_REG(hw, MSCA);
+
+ if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
+
+ /* Address cycle complete, setup and write the read command */
+ command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
+ (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
+ (IXGB_MSCA_READ | IXGB_MSCA_MDI_COMMAND));
+
+ IXGB_WRITE_REG(hw, MSCA, command);
+
+ /**************************************************************
+ ** Check every 10 usec to see if the read command completed
+ ** The COMMAND bit will clear when the operation is complete.
+ ** The read may take as long as 64 usecs (we'll wait 100 usecs max)
+ ** from the CPU Write to the Ready bit assertion.
+ **************************************************************/
+
+ for (i = 0; i < 10; i++)
+ {
+ udelay(10);
+
+ command = IXGB_READ_REG(hw, MSCA);
+
+ if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
+
+ /* Operation is complete, get the data from the MDIO Read/Write Data
+ * register and return.
+ */
+ data = IXGB_READ_REG(hw, MSRWD);
+ data >>= IXGB_MSRWD_READ_DATA_SHIFT;
+ return((u16) data);
+}
+
+/******************************************************************************
+ * Writes a word to a device over the Management Data Interface (MDI) bus.
+ * This interface is used to manage Physical layer devices.
+ *
+ * hw - Struct containing variables accessed by hw code
+ * reg_address - Offset of device register being read.
+ * phy_address - Address of device on MDI.
+ * device_type - Also known as the Device ID or DID.
+ * data - 16-bit value to be written
+ *
+ * Returns: void.
+ *
+ * The 82597EX has support for several MDI access methods. This routine
+ * uses the new protocol MDI Single Command and Address Operation.
+ * This requires that first an address cycle command is sent, followed by a
+ * write command.
+ *****************************************************************************/
+static void
+ixgb_write_phy_reg(struct ixgb_hw *hw,
+ u32 reg_address,
+ u32 phy_address,
+ u32 device_type,
+ u16 data)
+{
+ u32 i;
+ u32 command = 0;
+
+ ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
+ ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
+ ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
+
+ /* Put the data in the MDIO Read/Write Data register */
+ IXGB_WRITE_REG(hw, MSRWD, (u32)data);
+
+ /* Setup and write the address cycle command */
+ command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
+ (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
+ (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
+
+ IXGB_WRITE_REG(hw, MSCA, command);
+
+ /**************************************************************
+ ** Check every 10 usec to see if the address cycle completed
+ ** The COMMAND bit will clear when the operation is complete.
+ ** This may take as long as 64 usecs (we'll wait 100 usecs max)
+ ** from the CPU Write to the Ready bit assertion.
+ **************************************************************/
+
+ for (i = 0; i < 10; i++)
+ {
+ udelay(10);
+
+ command = IXGB_READ_REG(hw, MSCA);
+
+ if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
+
+ /* Address cycle complete, setup and write the write command */
+ command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
+ (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
+ (IXGB_MSCA_WRITE | IXGB_MSCA_MDI_COMMAND));
+
+ IXGB_WRITE_REG(hw, MSCA, command);
+
+ /**************************************************************
+ ** Check every 10 usec to see if the read command completed
+ ** The COMMAND bit will clear when the operation is complete.
+ ** The write may take as long as 64 usecs (we'll wait 100 usecs max)
+ ** from the CPU Write to the Ready bit assertion.
+ **************************************************************/
+
+ for (i = 0; i < 10; i++)
+ {
+ udelay(10);
+
+ command = IXGB_READ_REG(hw, MSCA);
+
+ if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
+
+ /* Operation is complete, return. */
+}
+
+/******************************************************************************
+ * Checks to see if the link status of the hardware has changed.
+ *
+ * hw - Struct containing variables accessed by hw code
+ *
+ * Called by any function that needs to check the link status of the adapter.
+ *****************************************************************************/
+void
+ixgb_check_for_link(struct ixgb_hw *hw)
+{
+ u32 status_reg;
+ u32 xpcss_reg;
+
+ ENTER();
+
+ xpcss_reg = IXGB_READ_REG(hw, XPCSS);
+ status_reg = IXGB_READ_REG(hw, STATUS);
+
+ if ((xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
+ (status_reg & IXGB_STATUS_LU)) {
+ hw->link_up = true;
+ } else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
+ (status_reg & IXGB_STATUS_LU)) {
+ pr_debug("XPCSS Not Aligned while Status:LU is set\n");
+ hw->link_up = ixgb_link_reset(hw);
+ } else {
+ /*
+ * 82597EX errata. Since the lane deskew problem may prevent
+ * link, reset the link before reporting link down.
+ */
+ hw->link_up = ixgb_link_reset(hw);
+ }
+ /* Anything else for 10 Gig?? */
+}
+
+/******************************************************************************
+ * Check for a bad link condition that may have occurred.
+ * The indication is that the RFC / LFC registers may be incrementing
+ * continually. A full adapter reset is required to recover.
+ *
+ * hw - Struct containing variables accessed by hw code
+ *
+ * Called by any function that needs to check the link status of the adapter.
+ *****************************************************************************/
+bool ixgb_check_for_bad_link(struct ixgb_hw *hw)
+{
+ u32 newLFC, newRFC;
+ bool bad_link_returncode = false;
+
+ if (hw->phy_type == ixgb_phy_type_txn17401) {
+ newLFC = IXGB_READ_REG(hw, LFC);
+ newRFC = IXGB_READ_REG(hw, RFC);
+ if ((hw->lastLFC + 250 < newLFC)
+ || (hw->lastRFC + 250 < newRFC)) {
+ pr_debug("BAD LINK! too many LFC/RFC since last check\n");
+ bad_link_returncode = true;
+ }
+ hw->lastLFC = newLFC;
+ hw->lastRFC = newRFC;
+ }
+
+ return bad_link_returncode;
+}
+
+/******************************************************************************
+ * Clears all hardware statistics counters.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void
+ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
+{
+ volatile u32 temp_reg;
+
+ ENTER();
+
+ /* if we are stopped or resetting exit gracefully */
+ if (hw->adapter_stopped) {
+ pr_debug("Exiting because the adapter is stopped!!!\n");
+ return;
+ }
+
+ temp_reg = IXGB_READ_REG(hw, TPRL);
+ temp_reg = IXGB_READ_REG(hw, TPRH);
+ temp_reg = IXGB_READ_REG(hw, GPRCL);
+ temp_reg = IXGB_READ_REG(hw, GPRCH);
+ temp_reg = IXGB_READ_REG(hw, BPRCL);
+ temp_reg = IXGB_READ_REG(hw, BPRCH);
+ temp_reg = IXGB_READ_REG(hw, MPRCL);
+ temp_reg = IXGB_READ_REG(hw, MPRCH);
+ temp_reg = IXGB_READ_REG(hw, UPRCL);
+ temp_reg = IXGB_READ_REG(hw, UPRCH);
+ temp_reg = IXGB_READ_REG(hw, VPRCL);
+ temp_reg = IXGB_READ_REG(hw, VPRCH);
+ temp_reg = IXGB_READ_REG(hw, JPRCL);
+ temp_reg = IXGB_READ_REG(hw, JPRCH);
+ temp_reg = IXGB_READ_REG(hw, GORCL);
+ temp_reg = IXGB_READ_REG(hw, GORCH);
+ temp_reg = IXGB_READ_REG(hw, TORL);
+ temp_reg = IXGB_READ_REG(hw, TORH);
+ temp_reg = IXGB_READ_REG(hw, RNBC);
+ temp_reg = IXGB_READ_REG(hw, RUC);
+ temp_reg = IXGB_READ_REG(hw, ROC);
+ temp_reg = IXGB_READ_REG(hw, RLEC);
+ temp_reg = IXGB_READ_REG(hw, CRCERRS);
+ temp_reg = IXGB_READ_REG(hw, ICBC);
+ temp_reg = IXGB_READ_REG(hw, ECBC);
+ temp_reg = IXGB_READ_REG(hw, MPC);
+ temp_reg = IXGB_READ_REG(hw, TPTL);
+ temp_reg = IXGB_READ_REG(hw, TPTH);
+ temp_reg = IXGB_READ_REG(hw, GPTCL);
+ temp_reg = IXGB_READ_REG(hw, GPTCH);
+ temp_reg = IXGB_READ_REG(hw, BPTCL);
+ temp_reg = IXGB_READ_REG(hw, BPTCH);
+ temp_reg = IXGB_READ_REG(hw, MPTCL);
+ temp_reg = IXGB_READ_REG(hw, MPTCH);
+ temp_reg = IXGB_READ_REG(hw, UPTCL);
+ temp_reg = IXGB_READ_REG(hw, UPTCH);
+ temp_reg = IXGB_READ_REG(hw, VPTCL);
+ temp_reg = IXGB_READ_REG(hw, VPTCH);
+ temp_reg = IXGB_READ_REG(hw, JPTCL);
+ temp_reg = IXGB_READ_REG(hw, JPTCH);
+ temp_reg = IXGB_READ_REG(hw, GOTCL);
+ temp_reg = IXGB_READ_REG(hw, GOTCH);
+ temp_reg = IXGB_READ_REG(hw, TOTL);
+ temp_reg = IXGB_READ_REG(hw, TOTH);
+ temp_reg = IXGB_READ_REG(hw, DC);
+ temp_reg = IXGB_READ_REG(hw, PLT64C);
+ temp_reg = IXGB_READ_REG(hw, TSCTC);
+ temp_reg = IXGB_READ_REG(hw, TSCTFC);
+ temp_reg = IXGB_READ_REG(hw, IBIC);
+ temp_reg = IXGB_READ_REG(hw, RFC);
+ temp_reg = IXGB_READ_REG(hw, LFC);
+ temp_reg = IXGB_READ_REG(hw, PFRC);
+ temp_reg = IXGB_READ_REG(hw, PFTC);
+ temp_reg = IXGB_READ_REG(hw, MCFRC);
+ temp_reg = IXGB_READ_REG(hw, MCFTC);
+ temp_reg = IXGB_READ_REG(hw, XONRXC);
+ temp_reg = IXGB_READ_REG(hw, XONTXC);
+ temp_reg = IXGB_READ_REG(hw, XOFFRXC);
+ temp_reg = IXGB_READ_REG(hw, XOFFTXC);
+ temp_reg = IXGB_READ_REG(hw, RJC);
+}
+
+/******************************************************************************
+ * Turns on the software controllable LED
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+void
+ixgb_led_on(struct ixgb_hw *hw)
+{
+ u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
+
+ /* To turn on the LED, clear software-definable pin 0 (SDP0). */
+ ctrl0_reg &= ~IXGB_CTRL0_SDP0;
+ IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
+}
+
+/******************************************************************************
+ * Turns off the software controllable LED
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+void
+ixgb_led_off(struct ixgb_hw *hw)
+{
+ u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
+
+ /* To turn off the LED, set software-definable pin 0 (SDP0). */
+ ctrl0_reg |= IXGB_CTRL0_SDP0;
+ IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
+}
+
+/******************************************************************************
+ * Gets the current PCI bus type, speed, and width of the hardware
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void
+ixgb_get_bus_info(struct ixgb_hw *hw)
+{
+ u32 status_reg;
+
+ status_reg = IXGB_READ_REG(hw, STATUS);
+
+ hw->bus.type = (status_reg & IXGB_STATUS_PCIX_MODE) ?
+ ixgb_bus_type_pcix : ixgb_bus_type_pci;
+
+ if (hw->bus.type == ixgb_bus_type_pci) {
+ hw->bus.speed = (status_reg & IXGB_STATUS_PCI_SPD) ?
+ ixgb_bus_speed_66 : ixgb_bus_speed_33;
+ } else {
+ switch (status_reg & IXGB_STATUS_PCIX_SPD_MASK) {
+ case IXGB_STATUS_PCIX_SPD_66:
+ hw->bus.speed = ixgb_bus_speed_66;
+ break;
+ case IXGB_STATUS_PCIX_SPD_100:
+ hw->bus.speed = ixgb_bus_speed_100;
+ break;
+ case IXGB_STATUS_PCIX_SPD_133:
+ hw->bus.speed = ixgb_bus_speed_133;
+ break;
+ default:
+ hw->bus.speed = ixgb_bus_speed_reserved;
+ break;
+ }
+ }
+
+ hw->bus.width = (status_reg & IXGB_STATUS_BUS64) ?
+ ixgb_bus_width_64 : ixgb_bus_width_32;
+}
+
+/******************************************************************************
+ * Tests a MAC address to ensure it is a valid Individual Address
+ *
+ * mac_addr - pointer to MAC address.
+ *
+ *****************************************************************************/
+static bool
+mac_addr_valid(u8 *mac_addr)
+{
+ bool is_valid = true;
+ ENTER();
+
+ /* Make sure it is not a multicast address */
+ if (is_multicast_ether_addr(mac_addr)) {
+ pr_debug("MAC address is multicast\n");
+ is_valid = false;
+ }
+ /* Not a broadcast address */
+ else if (is_broadcast_ether_addr(mac_addr)) {
+ pr_debug("MAC address is broadcast\n");
+ is_valid = false;
+ }
+ /* Reject the zero address */
+ else if (is_zero_ether_addr(mac_addr)) {
+ pr_debug("MAC address is all zeros\n");
+ is_valid = false;
+ }
+ return is_valid;
+}
+
+/******************************************************************************
+ * Resets the 10GbE link. Waits the settle time and returns the state of
+ * the link.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static bool
+ixgb_link_reset(struct ixgb_hw *hw)
+{
+ bool link_status = false;
+ u8 wait_retries = MAX_RESET_ITERATIONS;
+ u8 lrst_retries = MAX_RESET_ITERATIONS;
+
+ do {
+ /* Reset the link */
+ IXGB_WRITE_REG(hw, CTRL0,
+ IXGB_READ_REG(hw, CTRL0) | IXGB_CTRL0_LRST);
+
+ /* Wait for link-up and lane re-alignment */
+ do {
+ udelay(IXGB_DELAY_USECS_AFTER_LINK_RESET);
+ link_status =
+ ((IXGB_READ_REG(hw, STATUS) & IXGB_STATUS_LU)
+ && (IXGB_READ_REG(hw, XPCSS) &
+ IXGB_XPCSS_ALIGN_STATUS)) ? true : false;
+ } while (!link_status && --wait_retries);
+
+ } while (!link_status && --lrst_retries);
+
+ return link_status;
+}
+
+/******************************************************************************
+ * Resets the 10GbE optics module.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void
+ixgb_optics_reset(struct ixgb_hw *hw)
+{
+ if (hw->phy_type == ixgb_phy_type_txn17401) {
+ u16 mdio_reg;
+
+ ixgb_write_phy_reg(hw,
+ MDIO_CTRL1,
+ IXGB_PHY_ADDRESS,
+ MDIO_MMD_PMAPMD,
+ MDIO_CTRL1_RESET);
+
+ mdio_reg = ixgb_read_phy_reg(hw,
+ MDIO_CTRL1,
+ IXGB_PHY_ADDRESS,
+ MDIO_MMD_PMAPMD);
+ }
+}
+
+/******************************************************************************
+ * Resets the 10GbE optics module for Sun variant NIC.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+
+#define IXGB_BCM8704_USER_PMD_TX_CTRL_REG 0xC803
+#define IXGB_BCM8704_USER_PMD_TX_CTRL_REG_VAL 0x0164
+#define IXGB_BCM8704_USER_CTRL_REG 0xC800
+#define IXGB_BCM8704_USER_CTRL_REG_VAL 0x7FBF
+#define IXGB_BCM8704_USER_DEV3_ADDR 0x0003
+#define IXGB_SUN_PHY_ADDRESS 0x0000
+#define IXGB_SUN_PHY_RESET_DELAY 305
+
+static void
+ixgb_optics_reset_bcm(struct ixgb_hw *hw)
+{
+ u32 ctrl = IXGB_READ_REG(hw, CTRL0);
+ ctrl &= ~IXGB_CTRL0_SDP2;
+ ctrl |= IXGB_CTRL0_SDP3;
+ IXGB_WRITE_REG(hw, CTRL0, ctrl);
+ IXGB_WRITE_FLUSH(hw);
+
+ /* SerDes needs extra delay */
+ msleep(IXGB_SUN_PHY_RESET_DELAY);
+
+ /* Broadcom 7408L configuration */
+ /* Reference clock config */
+ ixgb_write_phy_reg(hw,
+ IXGB_BCM8704_USER_PMD_TX_CTRL_REG,
+ IXGB_SUN_PHY_ADDRESS,
+ IXGB_BCM8704_USER_DEV3_ADDR,
+ IXGB_BCM8704_USER_PMD_TX_CTRL_REG_VAL);
+ /* we must read the registers twice */
+ ixgb_read_phy_reg(hw,
+ IXGB_BCM8704_USER_PMD_TX_CTRL_REG,
+ IXGB_SUN_PHY_ADDRESS,
+ IXGB_BCM8704_USER_DEV3_ADDR);
+ ixgb_read_phy_reg(hw,
+ IXGB_BCM8704_USER_PMD_TX_CTRL_REG,
+ IXGB_SUN_PHY_ADDRESS,
+ IXGB_BCM8704_USER_DEV3_ADDR);
+
+ ixgb_write_phy_reg(hw,
+ IXGB_BCM8704_USER_CTRL_REG,
+ IXGB_SUN_PHY_ADDRESS,
+ IXGB_BCM8704_USER_DEV3_ADDR,
+ IXGB_BCM8704_USER_CTRL_REG_VAL);
+ ixgb_read_phy_reg(hw,
+ IXGB_BCM8704_USER_CTRL_REG,
+ IXGB_SUN_PHY_ADDRESS,
+ IXGB_BCM8704_USER_DEV3_ADDR);
+ ixgb_read_phy_reg(hw,
+ IXGB_BCM8704_USER_CTRL_REG,
+ IXGB_SUN_PHY_ADDRESS,
+ IXGB_BCM8704_USER_DEV3_ADDR);
+
+ /* SerDes needs extra delay */
+ msleep(IXGB_SUN_PHY_RESET_DELAY);
+}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
new file mode 100644
index 000000000000..2a99a35c33aa
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
@@ -0,0 +1,799 @@
+/*******************************************************************************
+
+ Intel PRO/10GbE Linux driver
+ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGB_HW_H_
+#define _IXGB_HW_H_
+
+#include <linux/mdio.h>
+
+#include "ixgb_osdep.h"
+
+/* Enums */
+typedef enum {
+ ixgb_mac_unknown = 0,
+ ixgb_82597,
+ ixgb_num_macs
+} ixgb_mac_type;
+
+/* Types of physical layer modules */
+typedef enum {
+ ixgb_phy_type_unknown = 0,
+ ixgb_phy_type_g6005, /* 850nm, MM fiber, XPAK transceiver */
+ ixgb_phy_type_g6104, /* 1310nm, SM fiber, XPAK transceiver */
+ ixgb_phy_type_txn17201, /* 850nm, MM fiber, XPAK transceiver */
+ ixgb_phy_type_txn17401, /* 1310nm, SM fiber, XENPAK transceiver */
+ ixgb_phy_type_bcm /* SUN specific board */
+} ixgb_phy_type;
+
+/* XPAK transceiver vendors, for the SR adapters */
+typedef enum {
+ ixgb_xpak_vendor_intel,
+ ixgb_xpak_vendor_infineon
+} ixgb_xpak_vendor;
+
+/* Media Types */
+typedef enum {
+ ixgb_media_type_unknown = 0,
+ ixgb_media_type_fiber = 1,
+ ixgb_media_type_copper = 2,
+ ixgb_num_media_types
+} ixgb_media_type;
+
+/* Flow Control Settings */
+typedef enum {
+ ixgb_fc_none = 0,
+ ixgb_fc_rx_pause = 1,
+ ixgb_fc_tx_pause = 2,
+ ixgb_fc_full = 3,
+ ixgb_fc_default = 0xFF
+} ixgb_fc_type;
+
+/* PCI bus types */
+typedef enum {
+ ixgb_bus_type_unknown = 0,
+ ixgb_bus_type_pci,
+ ixgb_bus_type_pcix
+} ixgb_bus_type;
+
+/* PCI bus speeds */
+typedef enum {
+ ixgb_bus_speed_unknown = 0,
+ ixgb_bus_speed_33,
+ ixgb_bus_speed_66,
+ ixgb_bus_speed_100,
+ ixgb_bus_speed_133,
+ ixgb_bus_speed_reserved
+} ixgb_bus_speed;
+
+/* PCI bus widths */
+typedef enum {
+ ixgb_bus_width_unknown = 0,
+ ixgb_bus_width_32,
+ ixgb_bus_width_64
+} ixgb_bus_width;
+
+#define IXGB_EEPROM_SIZE 64 /* Size in words */
+
+#define SPEED_10000 10000
+#define FULL_DUPLEX 2
+
+#define MIN_NUMBER_OF_DESCRIPTORS 8
+#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 /* 13 bits in RDLEN/TDLEN, 128B aligned */
+
+#define IXGB_DELAY_BEFORE_RESET 10 /* allow 10ms after idling rx/tx units */
+#define IXGB_DELAY_AFTER_RESET 1 /* allow 1ms after the reset */
+#define IXGB_DELAY_AFTER_EE_RESET 10 /* allow 10ms after the EEPROM reset */
+
+#define IXGB_DELAY_USECS_AFTER_LINK_RESET 13 /* allow 13 microseconds after the reset */
+ /* NOTE: this is MICROSECONDS */
+#define MAX_RESET_ITERATIONS 8 /* number of iterations to get things right */
+
+/* General Registers */
+#define IXGB_CTRL0 0x00000 /* Device Control Register 0 - RW */
+#define IXGB_CTRL1 0x00008 /* Device Control Register 1 - RW */
+#define IXGB_STATUS 0x00010 /* Device Status Register - RO */
+#define IXGB_EECD 0x00018 /* EEPROM/Flash Control/Data Register - RW */
+#define IXGB_MFS 0x00020 /* Maximum Frame Size - RW */
+
+/* Interrupt */
+#define IXGB_ICR 0x00080 /* Interrupt Cause Read - R/clr */
+#define IXGB_ICS 0x00088 /* Interrupt Cause Set - RW */
+#define IXGB_IMS 0x00090 /* Interrupt Mask Set/Read - RW */
+#define IXGB_IMC 0x00098 /* Interrupt Mask Clear - WO */
+
+/* Receive */
+#define IXGB_RCTL 0x00100 /* RX Control - RW */
+#define IXGB_FCRTL 0x00108 /* Flow Control Receive Threshold Low - RW */
+#define IXGB_FCRTH 0x00110 /* Flow Control Receive Threshold High - RW */
+#define IXGB_RDBAL 0x00118 /* RX Descriptor Base Low - RW */
+#define IXGB_RDBAH 0x0011C /* RX Descriptor Base High - RW */
+#define IXGB_RDLEN 0x00120 /* RX Descriptor Length - RW */
+#define IXGB_RDH 0x00128 /* RX Descriptor Head - RW */
+#define IXGB_RDT 0x00130 /* RX Descriptor Tail - RW */
+#define IXGB_RDTR 0x00138 /* RX Delay Timer Ring - RW */
+#define IXGB_RXDCTL 0x00140 /* Receive Descriptor Control - RW */
+#define IXGB_RAIDC 0x00148 /* Receive Adaptive Interrupt Delay Control - RW */
+#define IXGB_RXCSUM 0x00158 /* Receive Checksum Control - RW */
+#define IXGB_RA 0x00180 /* Receive Address Array Base - RW */
+#define IXGB_RAL 0x00180 /* Receive Address Low [0:15] - RW */
+#define IXGB_RAH 0x00184 /* Receive Address High [0:15] - RW */
+#define IXGB_MTA 0x00200 /* Multicast Table Array [0:127] - RW */
+#define IXGB_VFTA 0x00400 /* VLAN Filter Table Array [0:127] - RW */
+#define IXGB_REQ_RX_DESCRIPTOR_MULTIPLE 8
+
+/* Transmit */
+#define IXGB_TCTL 0x00600 /* TX Control - RW */
+#define IXGB_TDBAL 0x00608 /* TX Descriptor Base Low - RW */
+#define IXGB_TDBAH 0x0060C /* TX Descriptor Base High - RW */
+#define IXGB_TDLEN 0x00610 /* TX Descriptor Length - RW */
+#define IXGB_TDH 0x00618 /* TX Descriptor Head - RW */
+#define IXGB_TDT 0x00620 /* TX Descriptor Tail - RW */
+#define IXGB_TIDV 0x00628 /* TX Interrupt Delay Value - RW */
+#define IXGB_TXDCTL 0x00630 /* Transmit Descriptor Control - RW */
+#define IXGB_TSPMT 0x00638 /* TCP Segmentation PAD & Min Threshold - RW */
+#define IXGB_PAP 0x00640 /* Pause and Pace - RW */
+#define IXGB_REQ_TX_DESCRIPTOR_MULTIPLE 8
+
+/* Physical */
+#define IXGB_PCSC1 0x00700 /* PCS Control 1 - RW */
+#define IXGB_PCSC2 0x00708 /* PCS Control 2 - RW */
+#define IXGB_PCSS1 0x00710 /* PCS Status 1 - RO */
+#define IXGB_PCSS2 0x00718 /* PCS Status 2 - RO */
+#define IXGB_XPCSS 0x00720 /* 10GBASE-X PCS Status (or XGXS Lane Status) - RO */
+#define IXGB_UCCR 0x00728 /* Unilink Circuit Control Register */
+#define IXGB_XPCSTC 0x00730 /* 10GBASE-X PCS Test Control */
+#define IXGB_MACA 0x00738 /* MDI Autoscan Command and Address - RW */
+#define IXGB_APAE 0x00740 /* Autoscan PHY Address Enable - RW */
+#define IXGB_ARD 0x00748 /* Autoscan Read Data - RO */
+#define IXGB_AIS 0x00750 /* Autoscan Interrupt Status - RO */
+#define IXGB_MSCA 0x00758 /* MDI Single Command and Address - RW */
+#define IXGB_MSRWD 0x00760 /* MDI Single Read and Write Data - RW, RO */
+
+/* Wake-up */
+#define IXGB_WUFC 0x00808 /* Wake Up Filter Control - RW */
+#define IXGB_WUS 0x00810 /* Wake Up Status - RO */
+#define IXGB_FFLT 0x01000 /* Flexible Filter Length Table - RW */
+#define IXGB_FFMT 0x01020 /* Flexible Filter Mask Table - RW */
+#define IXGB_FTVT 0x01420 /* Flexible Filter Value Table - RW */
+
+/* Statistics */
+#define IXGB_TPRL 0x02000 /* Total Packets Received (Low) */
+#define IXGB_TPRH 0x02004 /* Total Packets Received (High) */
+#define IXGB_GPRCL 0x02008 /* Good Packets Received Count (Low) */
+#define IXGB_GPRCH 0x0200C /* Good Packets Received Count (High) */
+#define IXGB_BPRCL 0x02010 /* Broadcast Packets Received Count (Low) */
+#define IXGB_BPRCH 0x02014 /* Broadcast Packets Received Count (High) */
+#define IXGB_MPRCL 0x02018 /* Multicast Packets Received Count (Low) */
+#define IXGB_MPRCH 0x0201C /* Multicast Packets Received Count (High) */
+#define IXGB_UPRCL 0x02020 /* Unicast Packets Received Count (Low) */
+#define IXGB_UPRCH 0x02024 /* Unicast Packets Received Count (High) */
+#define IXGB_VPRCL 0x02028 /* VLAN Packets Received Count (Low) */
+#define IXGB_VPRCH 0x0202C /* VLAN Packets Received Count (High) */
+#define IXGB_JPRCL 0x02030 /* Jumbo Packets Received Count (Low) */
+#define IXGB_JPRCH 0x02034 /* Jumbo Packets Received Count (High) */
+#define IXGB_GORCL 0x02038 /* Good Octets Received Count (Low) */
+#define IXGB_GORCH 0x0203C /* Good Octets Received Count (High) */
+#define IXGB_TORL 0x02040 /* Total Octets Received (Low) */
+#define IXGB_TORH 0x02044 /* Total Octets Received (High) */
+#define IXGB_RNBC 0x02048 /* Receive No Buffers Count */
+#define IXGB_RUC 0x02050 /* Receive Undersize Count */
+#define IXGB_ROC 0x02058 /* Receive Oversize Count */
+#define IXGB_RLEC 0x02060 /* Receive Length Error Count */
+#define IXGB_CRCERRS 0x02068 /* CRC Error Count */
+#define IXGB_ICBC 0x02070 /* Illegal control byte in mid-packet Count */
+#define IXGB_ECBC 0x02078 /* Error Control byte in mid-packet Count */
+#define IXGB_MPC 0x02080 /* Missed Packets Count */
+#define IXGB_TPTL 0x02100 /* Total Packets Transmitted (Low) */
+#define IXGB_TPTH 0x02104 /* Total Packets Transmitted (High) */
+#define IXGB_GPTCL 0x02108 /* Good Packets Transmitted Count (Low) */
+#define IXGB_GPTCH 0x0210C /* Good Packets Transmitted Count (High) */
+#define IXGB_BPTCL 0x02110 /* Broadcast Packets Transmitted Count (Low) */
+#define IXGB_BPTCH 0x02114 /* Broadcast Packets Transmitted Count (High) */
+#define IXGB_MPTCL 0x02118 /* Multicast Packets Transmitted Count (Low) */
+#define IXGB_MPTCH 0x0211C /* Multicast Packets Transmitted Count (High) */
+#define IXGB_UPTCL 0x02120 /* Unicast Packets Transmitted Count (Low) */
+#define IXGB_UPTCH 0x02124 /* Unicast Packets Transmitted Count (High) */
+#define IXGB_VPTCL 0x02128 /* VLAN Packets Transmitted Count (Low) */
+#define IXGB_VPTCH 0x0212C /* VLAN Packets Transmitted Count (High) */
+#define IXGB_JPTCL 0x02130 /* Jumbo Packets Transmitted Count (Low) */
+#define IXGB_JPTCH 0x02134 /* Jumbo Packets Transmitted Count (High) */
+#define IXGB_GOTCL 0x02138 /* Good Octets Transmitted Count (Low) */
+#define IXGB_GOTCH 0x0213C /* Good Octets Transmitted Count (High) */
+#define IXGB_TOTL 0x02140 /* Total Octets Transmitted Count (Low) */
+#define IXGB_TOTH 0x02144 /* Total Octets Transmitted Count (High) */
+#define IXGB_DC 0x02148 /* Defer Count */
+#define IXGB_PLT64C 0x02150 /* Packet Transmitted was less than 64 bytes Count */
+#define IXGB_TSCTC 0x02170 /* TCP Segmentation Context Transmitted Count */
+#define IXGB_TSCTFC 0x02178 /* TCP Segmentation Context Tx Fail Count */
+#define IXGB_IBIC 0x02180 /* Illegal byte during Idle stream count */
+#define IXGB_RFC 0x02188 /* Remote Fault Count */
+#define IXGB_LFC 0x02190 /* Local Fault Count */
+#define IXGB_PFRC 0x02198 /* Pause Frame Receive Count */
+#define IXGB_PFTC 0x021A0 /* Pause Frame Transmit Count */
+#define IXGB_MCFRC 0x021A8 /* MAC Control Frames (non-Pause) Received Count */
+#define IXGB_MCFTC 0x021B0 /* MAC Control Frames (non-Pause) Transmitted Count */
+#define IXGB_XONRXC 0x021B8 /* XON Received Count */
+#define IXGB_XONTXC 0x021C0 /* XON Transmitted Count */
+#define IXGB_XOFFRXC 0x021C8 /* XOFF Received Count */
+#define IXGB_XOFFTXC 0x021D0 /* XOFF Transmitted Count */
+#define IXGB_RJC 0x021D8 /* Receive Jabber Count */
+
+/* CTRL0 Bit Masks */
+#define IXGB_CTRL0_LRST 0x00000008
+#define IXGB_CTRL0_JFE 0x00000010
+#define IXGB_CTRL0_XLE 0x00000020
+#define IXGB_CTRL0_MDCS 0x00000040
+#define IXGB_CTRL0_CMDC 0x00000080
+#define IXGB_CTRL0_SDP0 0x00040000
+#define IXGB_CTRL0_SDP1 0x00080000
+#define IXGB_CTRL0_SDP2 0x00100000
+#define IXGB_CTRL0_SDP3 0x00200000
+#define IXGB_CTRL0_SDP0_DIR 0x00400000
+#define IXGB_CTRL0_SDP1_DIR 0x00800000
+#define IXGB_CTRL0_SDP2_DIR 0x01000000
+#define IXGB_CTRL0_SDP3_DIR 0x02000000
+#define IXGB_CTRL0_RST 0x04000000
+#define IXGB_CTRL0_RPE 0x08000000
+#define IXGB_CTRL0_TPE 0x10000000
+#define IXGB_CTRL0_VME 0x40000000
+
+/* CTRL1 Bit Masks */
+#define IXGB_CTRL1_GPI0_EN 0x00000001
+#define IXGB_CTRL1_GPI1_EN 0x00000002
+#define IXGB_CTRL1_GPI2_EN 0x00000004
+#define IXGB_CTRL1_GPI3_EN 0x00000008
+#define IXGB_CTRL1_SDP4 0x00000010
+#define IXGB_CTRL1_SDP5 0x00000020
+#define IXGB_CTRL1_SDP6 0x00000040
+#define IXGB_CTRL1_SDP7 0x00000080
+#define IXGB_CTRL1_SDP4_DIR 0x00000100
+#define IXGB_CTRL1_SDP5_DIR 0x00000200
+#define IXGB_CTRL1_SDP6_DIR 0x00000400
+#define IXGB_CTRL1_SDP7_DIR 0x00000800
+#define IXGB_CTRL1_EE_RST 0x00002000
+#define IXGB_CTRL1_RO_DIS 0x00020000
+#define IXGB_CTRL1_PCIXHM_MASK 0x00C00000
+#define IXGB_CTRL1_PCIXHM_1_2 0x00000000
+#define IXGB_CTRL1_PCIXHM_5_8 0x00400000
+#define IXGB_CTRL1_PCIXHM_3_4 0x00800000
+#define IXGB_CTRL1_PCIXHM_7_8 0x00C00000
+
+/* STATUS Bit Masks */
+#define IXGB_STATUS_LU 0x00000002
+#define IXGB_STATUS_AIP 0x00000004
+#define IXGB_STATUS_TXOFF 0x00000010
+#define IXGB_STATUS_XAUIME 0x00000020
+#define IXGB_STATUS_RES 0x00000040
+#define IXGB_STATUS_RIS 0x00000080
+#define IXGB_STATUS_RIE 0x00000100
+#define IXGB_STATUS_RLF 0x00000200
+#define IXGB_STATUS_RRF 0x00000400
+#define IXGB_STATUS_PCI_SPD 0x00000800
+#define IXGB_STATUS_BUS64 0x00001000
+#define IXGB_STATUS_PCIX_MODE 0x00002000
+#define IXGB_STATUS_PCIX_SPD_MASK 0x0000C000
+#define IXGB_STATUS_PCIX_SPD_66 0x00000000
+#define IXGB_STATUS_PCIX_SPD_100 0x00004000
+#define IXGB_STATUS_PCIX_SPD_133 0x00008000
+#define IXGB_STATUS_REV_ID_MASK 0x000F0000
+#define IXGB_STATUS_REV_ID_SHIFT 16
+
+/* EECD Bit Masks */
+#define IXGB_EECD_SK 0x00000001
+#define IXGB_EECD_CS 0x00000002
+#define IXGB_EECD_DI 0x00000004
+#define IXGB_EECD_DO 0x00000008
+#define IXGB_EECD_FWE_MASK 0x00000030
+#define IXGB_EECD_FWE_DIS 0x00000010
+#define IXGB_EECD_FWE_EN 0x00000020
+
+/* MFS */
+#define IXGB_MFS_SHIFT 16
+
+/* Interrupt Register Bit Masks (used for ICR, ICS, IMS, and IMC) */
+#define IXGB_INT_TXDW 0x00000001
+#define IXGB_INT_TXQE 0x00000002
+#define IXGB_INT_LSC 0x00000004
+#define IXGB_INT_RXSEQ 0x00000008
+#define IXGB_INT_RXDMT0 0x00000010
+#define IXGB_INT_RXO 0x00000040
+#define IXGB_INT_RXT0 0x00000080
+#define IXGB_INT_AUTOSCAN 0x00000200
+#define IXGB_INT_GPI0 0x00000800
+#define IXGB_INT_GPI1 0x00001000
+#define IXGB_INT_GPI2 0x00002000
+#define IXGB_INT_GPI3 0x00004000
+
+/* RCTL Bit Masks */
+#define IXGB_RCTL_RXEN 0x00000002
+#define IXGB_RCTL_SBP 0x00000004
+#define IXGB_RCTL_UPE 0x00000008
+#define IXGB_RCTL_MPE 0x00000010
+#define IXGB_RCTL_RDMTS_MASK 0x00000300
+#define IXGB_RCTL_RDMTS_1_2 0x00000000
+#define IXGB_RCTL_RDMTS_1_4 0x00000100
+#define IXGB_RCTL_RDMTS_1_8 0x00000200
+#define IXGB_RCTL_MO_MASK 0x00003000
+#define IXGB_RCTL_MO_47_36 0x00000000
+#define IXGB_RCTL_MO_46_35 0x00001000
+#define IXGB_RCTL_MO_45_34 0x00002000
+#define IXGB_RCTL_MO_43_32 0x00003000
+#define IXGB_RCTL_MO_SHIFT 12
+#define IXGB_RCTL_BAM 0x00008000
+#define IXGB_RCTL_BSIZE_MASK 0x00030000
+#define IXGB_RCTL_BSIZE_2048 0x00000000
+#define IXGB_RCTL_BSIZE_4096 0x00010000
+#define IXGB_RCTL_BSIZE_8192 0x00020000
+#define IXGB_RCTL_BSIZE_16384 0x00030000
+#define IXGB_RCTL_VFE 0x00040000
+#define IXGB_RCTL_CFIEN 0x00080000
+#define IXGB_RCTL_CFI 0x00100000
+#define IXGB_RCTL_RPDA_MASK 0x00600000
+#define IXGB_RCTL_RPDA_MC_MAC 0x00000000
+#define IXGB_RCTL_MC_ONLY 0x00400000
+#define IXGB_RCTL_CFF 0x00800000
+#define IXGB_RCTL_SECRC 0x04000000
+#define IXGB_RDT_FPDB 0x80000000
+
+#define IXGB_RCTL_IDLE_RX_UNIT 0
+
+/* FCRTL Bit Masks */
+#define IXGB_FCRTL_XONE 0x80000000
+
+/* RXDCTL Bit Masks */
+#define IXGB_RXDCTL_PTHRESH_MASK 0x000001FF
+#define IXGB_RXDCTL_PTHRESH_SHIFT 0
+#define IXGB_RXDCTL_HTHRESH_MASK 0x0003FE00
+#define IXGB_RXDCTL_HTHRESH_SHIFT 9
+#define IXGB_RXDCTL_WTHRESH_MASK 0x07FC0000
+#define IXGB_RXDCTL_WTHRESH_SHIFT 18
+
+/* RAIDC Bit Masks */
+#define IXGB_RAIDC_HIGHTHRS_MASK 0x0000003F
+#define IXGB_RAIDC_DELAY_MASK 0x000FF800
+#define IXGB_RAIDC_DELAY_SHIFT 11
+#define IXGB_RAIDC_POLL_MASK 0x1FF00000
+#define IXGB_RAIDC_POLL_SHIFT 20
+#define IXGB_RAIDC_RXT_GATE 0x40000000
+#define IXGB_RAIDC_EN 0x80000000
+
+#define IXGB_RAIDC_POLL_1000_INTERRUPTS_PER_SECOND 1220
+#define IXGB_RAIDC_POLL_5000_INTERRUPTS_PER_SECOND 244
+#define IXGB_RAIDC_POLL_10000_INTERRUPTS_PER_SECOND 122
+#define IXGB_RAIDC_POLL_20000_INTERRUPTS_PER_SECOND 61
+
+/* RXCSUM Bit Masks */
+#define IXGB_RXCSUM_IPOFL 0x00000100
+#define IXGB_RXCSUM_TUOFL 0x00000200
+
+/* RAH Bit Masks */
+#define IXGB_RAH_ASEL_MASK 0x00030000
+#define IXGB_RAH_ASEL_DEST 0x00000000
+#define IXGB_RAH_ASEL_SRC 0x00010000
+#define IXGB_RAH_AV 0x80000000
+
+/* TCTL Bit Masks */
+#define IXGB_TCTL_TCE 0x00000001
+#define IXGB_TCTL_TXEN 0x00000002
+#define IXGB_TCTL_TPDE 0x00000004
+
+#define IXGB_TCTL_IDLE_TX_UNIT 0
+
+/* TXDCTL Bit Masks */
+#define IXGB_TXDCTL_PTHRESH_MASK 0x0000007F
+#define IXGB_TXDCTL_HTHRESH_MASK 0x00007F00
+#define IXGB_TXDCTL_HTHRESH_SHIFT 8
+#define IXGB_TXDCTL_WTHRESH_MASK 0x007F0000
+#define IXGB_TXDCTL_WTHRESH_SHIFT 16
+
+/* TSPMT Bit Masks */
+#define IXGB_TSPMT_TSMT_MASK 0x0000FFFF
+#define IXGB_TSPMT_TSPBP_MASK 0xFFFF0000
+#define IXGB_TSPMT_TSPBP_SHIFT 16
+
+/* PAP Bit Masks */
+#define IXGB_PAP_TXPC_MASK 0x0000FFFF
+#define IXGB_PAP_TXPV_MASK 0x000F0000
+#define IXGB_PAP_TXPV_10G 0x00000000
+#define IXGB_PAP_TXPV_1G 0x00010000
+#define IXGB_PAP_TXPV_2G 0x00020000
+#define IXGB_PAP_TXPV_3G 0x00030000
+#define IXGB_PAP_TXPV_4G 0x00040000
+#define IXGB_PAP_TXPV_5G 0x00050000
+#define IXGB_PAP_TXPV_6G 0x00060000
+#define IXGB_PAP_TXPV_7G 0x00070000
+#define IXGB_PAP_TXPV_8G 0x00080000
+#define IXGB_PAP_TXPV_9G 0x00090000
+#define IXGB_PAP_TXPV_WAN 0x000F0000
+
+/* PCSC1 Bit Masks */
+#define IXGB_PCSC1_LOOPBACK 0x00004000
+
+/* PCSC2 Bit Masks */
+#define IXGB_PCSC2_PCS_TYPE_MASK 0x00000003
+#define IXGB_PCSC2_PCS_TYPE_10GBX 0x00000001
+
+/* PCSS1 Bit Masks */
+#define IXGB_PCSS1_LOCAL_FAULT 0x00000080
+#define IXGB_PCSS1_RX_LINK_STATUS 0x00000004
+
+/* PCSS2 Bit Masks */
+#define IXGB_PCSS2_DEV_PRES_MASK 0x0000C000
+#define IXGB_PCSS2_DEV_PRES 0x00004000
+#define IXGB_PCSS2_TX_LF 0x00000800
+#define IXGB_PCSS2_RX_LF 0x00000400
+#define IXGB_PCSS2_10GBW 0x00000004
+#define IXGB_PCSS2_10GBX 0x00000002
+#define IXGB_PCSS2_10GBR 0x00000001
+
+/* XPCSS Bit Masks */
+#define IXGB_XPCSS_ALIGN_STATUS 0x00001000
+#define IXGB_XPCSS_PATTERN_TEST 0x00000800
+#define IXGB_XPCSS_LANE_3_SYNC 0x00000008
+#define IXGB_XPCSS_LANE_2_SYNC 0x00000004
+#define IXGB_XPCSS_LANE_1_SYNC 0x00000002
+#define IXGB_XPCSS_LANE_0_SYNC 0x00000001
+
+/* XPCSTC Bit Masks */
+#define IXGB_XPCSTC_BERT_TRIG 0x00200000
+#define IXGB_XPCSTC_BERT_SST 0x00100000
+#define IXGB_XPCSTC_BERT_PSZ_MASK 0x000C0000
+#define IXGB_XPCSTC_BERT_PSZ_SHIFT 17
+#define IXGB_XPCSTC_BERT_PSZ_INF 0x00000003
+#define IXGB_XPCSTC_BERT_PSZ_68 0x00000001
+#define IXGB_XPCSTC_BERT_PSZ_1028 0x00000000
+
+/* MSCA bit Masks */
+/* New Protocol Address */
+#define IXGB_MSCA_NP_ADDR_MASK 0x0000FFFF
+#define IXGB_MSCA_NP_ADDR_SHIFT 0
+/* Either Device Type or Register Address,depending on ST_CODE */
+#define IXGB_MSCA_DEV_TYPE_MASK 0x001F0000
+#define IXGB_MSCA_DEV_TYPE_SHIFT 16
+#define IXGB_MSCA_PHY_ADDR_MASK 0x03E00000
+#define IXGB_MSCA_PHY_ADDR_SHIFT 21
+#define IXGB_MSCA_OP_CODE_MASK 0x0C000000
+/* OP_CODE == 00, Address cycle, New Protocol */
+/* OP_CODE == 01, Write operation */
+/* OP_CODE == 10, Read operation */
+/* OP_CODE == 11, Read, auto increment, New Protocol */
+#define IXGB_MSCA_ADDR_CYCLE 0x00000000
+#define IXGB_MSCA_WRITE 0x04000000
+#define IXGB_MSCA_READ 0x08000000
+#define IXGB_MSCA_READ_AUTOINC 0x0C000000
+#define IXGB_MSCA_OP_CODE_SHIFT 26
+#define IXGB_MSCA_ST_CODE_MASK 0x30000000
+/* ST_CODE == 00, New Protocol */
+/* ST_CODE == 01, Old Protocol */
+#define IXGB_MSCA_NEW_PROTOCOL 0x00000000
+#define IXGB_MSCA_OLD_PROTOCOL 0x10000000
+#define IXGB_MSCA_ST_CODE_SHIFT 28
+/* Initiate command, self-clearing when command completes */
+#define IXGB_MSCA_MDI_COMMAND 0x40000000
+/*MDI In Progress Enable. */
+#define IXGB_MSCA_MDI_IN_PROG_EN 0x80000000
+
+/* MSRWD bit masks */
+#define IXGB_MSRWD_WRITE_DATA_MASK 0x0000FFFF
+#define IXGB_MSRWD_WRITE_DATA_SHIFT 0
+#define IXGB_MSRWD_READ_DATA_MASK 0xFFFF0000
+#define IXGB_MSRWD_READ_DATA_SHIFT 16
+
+/* Definitions for the optics devices on the MDIO bus. */
+#define IXGB_PHY_ADDRESS 0x0 /* Single PHY, multiple "Devices" */
+
+#define MDIO_PMA_PMD_XPAK_VENDOR_NAME 0x803A /* XPAK/XENPAK devices only */
+
+/* Vendor-specific MDIO registers */
+#define G6XXX_PMA_PMD_VS1 0xC001 /* Vendor-specific register */
+#define G6XXX_XGXS_XAUI_VS2 0x18 /* Vendor-specific register */
+
+#define G6XXX_PMA_PMD_VS1_PLL_RESET 0x80
+#define G6XXX_PMA_PMD_VS1_REMOVE_PLL_RESET 0x00
+#define G6XXX_XGXS_XAUI_VS2_INPUT_MASK 0x0F /* XAUI lanes synchronized */
+
+/* Layout of a single receive descriptor. The controller assumes that this
+ * structure is packed into 16 bytes, which is a safe assumption with most
+ * compilers. However, some compilers may insert padding between the fields,
+ * in which case the structure must be packed in some compiler-specific
+ * manner. */
+struct ixgb_rx_desc {
+ __le64 buff_addr;
+ __le16 length;
+ __le16 reserved;
+ u8 status;
+ u8 errors;
+ __le16 special;
+};
+
+#define IXGB_RX_DESC_STATUS_DD 0x01
+#define IXGB_RX_DESC_STATUS_EOP 0x02
+#define IXGB_RX_DESC_STATUS_IXSM 0x04
+#define IXGB_RX_DESC_STATUS_VP 0x08
+#define IXGB_RX_DESC_STATUS_TCPCS 0x20
+#define IXGB_RX_DESC_STATUS_IPCS 0x40
+#define IXGB_RX_DESC_STATUS_PIF 0x80
+
+#define IXGB_RX_DESC_ERRORS_CE 0x01
+#define IXGB_RX_DESC_ERRORS_SE 0x02
+#define IXGB_RX_DESC_ERRORS_P 0x08
+#define IXGB_RX_DESC_ERRORS_TCPE 0x20
+#define IXGB_RX_DESC_ERRORS_IPE 0x40
+#define IXGB_RX_DESC_ERRORS_RXE 0x80
+
+#define IXGB_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define IXGB_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+#define IXGB_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority is in upper 3 of 16 */
+
+/* Layout of a single transmit descriptor. The controller assumes that this
+ * structure is packed into 16 bytes, which is a safe assumption with most
+ * compilers. However, some compilers may insert padding between the fields,
+ * in which case the structure must be packed in some compiler-specific
+ * manner. */
+struct ixgb_tx_desc {
+ __le64 buff_addr;
+ __le32 cmd_type_len;
+ u8 status;
+ u8 popts;
+ __le16 vlan;
+};
+
+#define IXGB_TX_DESC_LENGTH_MASK 0x000FFFFF
+#define IXGB_TX_DESC_TYPE_MASK 0x00F00000
+#define IXGB_TX_DESC_TYPE_SHIFT 20
+#define IXGB_TX_DESC_CMD_MASK 0xFF000000
+#define IXGB_TX_DESC_CMD_SHIFT 24
+#define IXGB_TX_DESC_CMD_EOP 0x01000000
+#define IXGB_TX_DESC_CMD_TSE 0x04000000
+#define IXGB_TX_DESC_CMD_RS 0x08000000
+#define IXGB_TX_DESC_CMD_VLE 0x40000000
+#define IXGB_TX_DESC_CMD_IDE 0x80000000
+
+#define IXGB_TX_DESC_TYPE 0x00100000
+
+#define IXGB_TX_DESC_STATUS_DD 0x01
+
+#define IXGB_TX_DESC_POPTS_IXSM 0x01
+#define IXGB_TX_DESC_POPTS_TXSM 0x02
+#define IXGB_TX_DESC_SPECIAL_PRI_SHIFT IXGB_RX_DESC_SPECIAL_PRI_SHIFT /* Priority is in upper 3 of 16 */
+
+struct ixgb_context_desc {
+ u8 ipcss;
+ u8 ipcso;
+ __le16 ipcse;
+ u8 tucss;
+ u8 tucso;
+ __le16 tucse;
+ __le32 cmd_type_len;
+ u8 status;
+ u8 hdr_len;
+ __le16 mss;
+};
+
+#define IXGB_CONTEXT_DESC_CMD_TCP 0x01000000
+#define IXGB_CONTEXT_DESC_CMD_IP 0x02000000
+#define IXGB_CONTEXT_DESC_CMD_TSE 0x04000000
+#define IXGB_CONTEXT_DESC_CMD_RS 0x08000000
+#define IXGB_CONTEXT_DESC_CMD_IDE 0x80000000
+
+#define IXGB_CONTEXT_DESC_TYPE 0x00000000
+
+#define IXGB_CONTEXT_DESC_STATUS_DD 0x01
+
+/* Filters */
+#define IXGB_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
+#define IXGB_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
+#define IXGB_RAR_ENTRIES 3 /* Number of entries in Rx Address array */
+
+#define IXGB_MEMORY_REGISTER_BASE_ADDRESS 0
+#define ENET_HEADER_SIZE 14
+#define ENET_FCS_LENGTH 4
+#define IXGB_MAX_NUM_MULTICAST_ADDRESSES 128
+#define IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS 60
+#define IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS 1514
+#define IXGB_MAX_JUMBO_FRAME_SIZE 0x3F00
+
+/* Phy Addresses */
+#define IXGB_OPTICAL_PHY_ADDR 0x0 /* Optical Module phy address */
+#define IXGB_XAUII_PHY_ADDR 0x1 /* Xauii transceiver phy address */
+#define IXGB_DIAG_PHY_ADDR 0x1F /* Diagnostic Device phy address */
+
+/* This structure takes a 64k flash and maps it for identification commands */
+struct ixgb_flash_buffer {
+ u8 manufacturer_id;
+ u8 device_id;
+ u8 filler1[0x2AA8];
+ u8 cmd2;
+ u8 filler2[0x2AAA];
+ u8 cmd1;
+ u8 filler3[0xAAAA];
+};
+
+/* Flow control parameters */
+struct ixgb_fc {
+ u32 high_water; /* Flow Control High-water */
+ u32 low_water; /* Flow Control Low-water */
+ u16 pause_time; /* Flow Control Pause timer */
+ bool send_xon; /* Flow control send XON */
+ ixgb_fc_type type; /* Type of flow control */
+};
+
+/* The historical defaults for the flow control values are given below. */
+#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */
+#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */
+#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */
+
+/* Phy definitions */
+#define IXGB_MAX_PHY_REG_ADDRESS 0xFFFF
+#define IXGB_MAX_PHY_ADDRESS 31
+#define IXGB_MAX_PHY_DEV_TYPE 31
+
+/* Bus parameters */
+struct ixgb_bus {
+ ixgb_bus_speed speed;
+ ixgb_bus_width width;
+ ixgb_bus_type type;
+};
+
+struct ixgb_hw {
+ u8 __iomem *hw_addr;/* Base Address of the hardware */
+ void *back; /* Pointer to OS-dependent struct */
+ struct ixgb_fc fc; /* Flow control parameters */
+ struct ixgb_bus bus; /* Bus parameters */
+ u32 phy_id; /* Phy Identifier */
+ u32 phy_addr; /* XGMII address of Phy */
+ ixgb_mac_type mac_type; /* Identifier for MAC controller */
+ ixgb_phy_type phy_type; /* Transceiver/phy identifier */
+ u32 max_frame_size; /* Maximum frame size supported */
+ u32 mc_filter_type; /* Multicast filter hash type */
+ u32 num_mc_addrs; /* Number of current Multicast addrs */
+ u8 curr_mac_addr[ETH_ALEN]; /* Individual address currently programmed in MAC */
+ u32 num_tx_desc; /* Number of Transmit descriptors */
+ u32 num_rx_desc; /* Number of Receive descriptors */
+ u32 rx_buffer_size; /* Size of Receive buffer */
+ bool link_up; /* true if link is valid */
+ bool adapter_stopped; /* State of adapter */
+ u16 device_id; /* device id from PCI configuration space */
+ u16 vendor_id; /* vendor id from PCI configuration space */
+ u8 revision_id; /* revision id from PCI configuration space */
+ u16 subsystem_vendor_id; /* subsystem vendor id from PCI configuration space */
+ u16 subsystem_id; /* subsystem id from PCI configuration space */
+ u32 bar0; /* Base Address registers */
+ u32 bar1;
+ u32 bar2;
+ u32 bar3;
+ u16 pci_cmd_word; /* PCI command register id from PCI configuration space */
+ __le16 eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */
+ unsigned long io_base; /* Our I/O mapped location */
+ u32 lastLFC;
+ u32 lastRFC;
+};
+
+/* Statistics reported by the hardware */
+struct ixgb_hw_stats {
+ u64 tprl;
+ u64 tprh;
+ u64 gprcl;
+ u64 gprch;
+ u64 bprcl;
+ u64 bprch;
+ u64 mprcl;
+ u64 mprch;
+ u64 uprcl;
+ u64 uprch;
+ u64 vprcl;
+ u64 vprch;
+ u64 jprcl;
+ u64 jprch;
+ u64 gorcl;
+ u64 gorch;
+ u64 torl;
+ u64 torh;
+ u64 rnbc;
+ u64 ruc;
+ u64 roc;
+ u64 rlec;
+ u64 crcerrs;
+ u64 icbc;
+ u64 ecbc;
+ u64 mpc;
+ u64 tptl;
+ u64 tpth;
+ u64 gptcl;
+ u64 gptch;
+ u64 bptcl;
+ u64 bptch;
+ u64 mptcl;
+ u64 mptch;
+ u64 uptcl;
+ u64 uptch;
+ u64 vptcl;
+ u64 vptch;
+ u64 jptcl;
+ u64 jptch;
+ u64 gotcl;
+ u64 gotch;
+ u64 totl;
+ u64 toth;
+ u64 dc;
+ u64 plt64c;
+ u64 tsctc;
+ u64 tsctfc;
+ u64 ibic;
+ u64 rfc;
+ u64 lfc;
+ u64 pfrc;
+ u64 pftc;
+ u64 mcfrc;
+ u64 mcftc;
+ u64 xonrxc;
+ u64 xontxc;
+ u64 xoffrxc;
+ u64 xofftxc;
+ u64 rjc;
+};
+
+/* Function Prototypes */
+extern bool ixgb_adapter_stop(struct ixgb_hw *hw);
+extern bool ixgb_init_hw(struct ixgb_hw *hw);
+extern bool ixgb_adapter_start(struct ixgb_hw *hw);
+extern void ixgb_check_for_link(struct ixgb_hw *hw);
+extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
+
+extern void ixgb_rar_set(struct ixgb_hw *hw,
+ u8 *addr,
+ u32 index);
+
+
+/* Filters (multicast, vlan, receive) */
+extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw,
+ u8 *mc_addr_list,
+ u32 mc_addr_count,
+ u32 pad);
+
+/* Vfta functions */
+extern void ixgb_write_vfta(struct ixgb_hw *hw,
+ u32 offset,
+ u32 value);
+
+/* Access functions to eeprom data */
+void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr);
+u32 ixgb_get_ee_pba_number(struct ixgb_hw *hw);
+u16 ixgb_get_ee_device_id(struct ixgb_hw *hw);
+bool ixgb_get_eeprom_data(struct ixgb_hw *hw);
+__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index);
+
+/* Everything else */
+void ixgb_led_on(struct ixgb_hw *hw);
+void ixgb_led_off(struct ixgb_hw *hw);
+void ixgb_write_pci_cfg(struct ixgb_hw *hw,
+ u32 reg,
+ u16 * value);
+
+
+#endif /* _IXGB_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
new file mode 100644
index 000000000000..2a58847f46e8
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
@@ -0,0 +1,53 @@
+/*******************************************************************************
+
+ Intel PRO/10GbE Linux driver
+ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGB_IDS_H_
+#define _IXGB_IDS_H_
+
+/**********************************************************************
+** The Device and Vendor IDs for 10 Gigabit MACs
+**********************************************************************/
+
+#define INTEL_VENDOR_ID 0x8086
+#define INTEL_SUBVENDOR_ID 0x8086
+#define SUN_VENDOR_ID 0x108E
+#define SUN_SUBVENDOR_ID 0x108E
+
+#define IXGB_DEVICE_ID_82597EX 0x1048
+#define IXGB_DEVICE_ID_82597EX_SR 0x1A48
+#define IXGB_DEVICE_ID_82597EX_LR 0x1B48
+#define IXGB_SUBDEVICE_ID_A11F 0xA11F
+#define IXGB_SUBDEVICE_ID_A01F 0xA01F
+
+#define IXGB_DEVICE_ID_82597EX_CX4 0x109E
+#define IXGB_SUBDEVICE_ID_A00C 0xA00C
+#define IXGB_SUBDEVICE_ID_A01C 0xA01C
+#define IXGB_SUBDEVICE_ID_7036 0x7036
+
+#endif /* #ifndef _IXGB_IDS_H_ */
+/* End of File */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
new file mode 100644
index 000000000000..88558b1aac07
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -0,0 +1,2378 @@
+/*******************************************************************************
+
+ Intel PRO/10GbE Linux driver
+ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/prefetch.h>
+#include "ixgb.h"
+
+char ixgb_driver_name[] = "ixgb";
+static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
+
+#define DRIVERNAPI "-NAPI"
+#define DRV_VERSION "1.0.135-k2" DRIVERNAPI
+const char ixgb_driver_version[] = DRV_VERSION;
+static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
+
+#define IXGB_CB_LENGTH 256
+static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH;
+module_param(copybreak, uint, 0644);
+MODULE_PARM_DESC(copybreak,
+ "Maximum size of packet that is copied to a new buffer on receive");
+
+/* ixgb_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl) = {
+ {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+
+ /* required last entry */
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
+
+/* Local Function Prototypes */
+static int ixgb_init_module(void);
+static void ixgb_exit_module(void);
+static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void __devexit ixgb_remove(struct pci_dev *pdev);
+static int ixgb_sw_init(struct ixgb_adapter *adapter);
+static int ixgb_open(struct net_device *netdev);
+static int ixgb_close(struct net_device *netdev);
+static void ixgb_configure_tx(struct ixgb_adapter *adapter);
+static void ixgb_configure_rx(struct ixgb_adapter *adapter);
+static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
+static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
+static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
+static void ixgb_set_multi(struct net_device *netdev);
+static void ixgb_watchdog(unsigned long data);
+static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev);
+static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
+static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
+static int ixgb_set_mac(struct net_device *netdev, void *p);
+static irqreturn_t ixgb_intr(int irq, void *data);
+static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
+
+static int ixgb_clean(struct napi_struct *, int);
+static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
+static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
+
+static void ixgb_tx_timeout(struct net_device *dev);
+static void ixgb_tx_timeout_task(struct work_struct *work);
+
+static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
+static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
+static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* for netdump / net console */
+static void ixgb_netpoll(struct net_device *dev);
+#endif
+
+static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
+ enum pci_channel_state state);
+static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
+static void ixgb_io_resume (struct pci_dev *pdev);
+
+static struct pci_error_handlers ixgb_err_handler = {
+ .error_detected = ixgb_io_error_detected,
+ .slot_reset = ixgb_io_slot_reset,
+ .resume = ixgb_io_resume,
+};
+
+static struct pci_driver ixgb_driver = {
+ .name = ixgb_driver_name,
+ .id_table = ixgb_pci_tbl,
+ .probe = ixgb_probe,
+ .remove = __devexit_p(ixgb_remove),
+ .err_handler = &ixgb_err_handler
+};
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+static int debug = DEFAULT_DEBUG_LEVEL_SHIFT;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+/**
+ * ixgb_init_module - Driver Registration Routine
+ *
+ * ixgb_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+
+static int __init
+ixgb_init_module(void)
+{
+ pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version);
+ pr_info("%s\n", ixgb_copyright);
+
+ return pci_register_driver(&ixgb_driver);
+}
+
+module_init(ixgb_init_module);
+
+/**
+ * ixgb_exit_module - Driver Exit Cleanup Routine
+ *
+ * ixgb_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+
+static void __exit
+ixgb_exit_module(void)
+{
+ pci_unregister_driver(&ixgb_driver);
+}
+
+module_exit(ixgb_exit_module);
+
+/**
+ * ixgb_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+
+static void
+ixgb_irq_disable(struct ixgb_adapter *adapter)
+{
+ IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
+ IXGB_WRITE_FLUSH(&adapter->hw);
+ synchronize_irq(adapter->pdev->irq);
+}
+
+/**
+ * ixgb_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+
+static void
+ixgb_irq_enable(struct ixgb_adapter *adapter)
+{
+ u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
+ IXGB_INT_TXDW | IXGB_INT_LSC;
+ if (adapter->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID)
+ val |= IXGB_INT_GPI0;
+ IXGB_WRITE_REG(&adapter->hw, IMS, val);
+ IXGB_WRITE_FLUSH(&adapter->hw);
+}
+
+int
+ixgb_up(struct ixgb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err, irq_flags = IRQF_SHARED;
+ int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
+ struct ixgb_hw *hw = &adapter->hw;
+
+ /* hardware has been reset, we need to reload some things */
+
+ ixgb_rar_set(hw, netdev->dev_addr, 0);
+ ixgb_set_multi(netdev);
+
+ ixgb_restore_vlan(adapter);
+
+ ixgb_configure_tx(adapter);
+ ixgb_setup_rctl(adapter);
+ ixgb_configure_rx(adapter);
+ ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring));
+
+ /* disable interrupts and get the hardware into a known state */
+ IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
+
+ /* only enable MSI if bus is in PCI-X mode */
+ if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
+ err = pci_enable_msi(adapter->pdev);
+ if (!err) {
+ adapter->have_msi = 1;
+ irq_flags = 0;
+ }
+ /* proceed to try to request regular interrupt */
+ }
+
+ err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags,
+ netdev->name, netdev);
+ if (err) {
+ if (adapter->have_msi)
+ pci_disable_msi(adapter->pdev);
+ netif_err(adapter, probe, adapter->netdev,
+ "Unable to allocate interrupt Error: %d\n", err);
+ return err;
+ }
+
+ if ((hw->max_frame_size != max_frame) ||
+ (hw->max_frame_size !=
+ (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
+
+ hw->max_frame_size = max_frame;
+
+ IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
+
+ if (hw->max_frame_size >
+ IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
+ u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
+
+ if (!(ctrl0 & IXGB_CTRL0_JFE)) {
+ ctrl0 |= IXGB_CTRL0_JFE;
+ IXGB_WRITE_REG(hw, CTRL0, ctrl0);
+ }
+ }
+ }
+
+ clear_bit(__IXGB_DOWN, &adapter->flags);
+
+ napi_enable(&adapter->napi);
+ ixgb_irq_enable(adapter);
+
+ netif_wake_queue(netdev);
+
+ mod_timer(&adapter->watchdog_timer, jiffies);
+
+ return 0;
+}
+
+void
+ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ /* prevent the interrupt handler from restarting watchdog */
+ set_bit(__IXGB_DOWN, &adapter->flags);
+
+ napi_disable(&adapter->napi);
+ /* waiting for NAPI to complete can re-enable interrupts */
+ ixgb_irq_disable(adapter);
+ free_irq(adapter->pdev->irq, netdev);
+
+ if (adapter->have_msi)
+ pci_disable_msi(adapter->pdev);
+
+ if (kill_watchdog)
+ del_timer_sync(&adapter->watchdog_timer);
+
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ ixgb_reset(adapter);
+ ixgb_clean_tx_ring(adapter);
+ ixgb_clean_rx_ring(adapter);
+}
+
+void
+ixgb_reset(struct ixgb_adapter *adapter)
+{
+ struct ixgb_hw *hw = &adapter->hw;
+
+ ixgb_adapter_stop(hw);
+ if (!ixgb_init_hw(hw))
+ netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n");
+
+ /* restore frame size information */
+ IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
+ if (hw->max_frame_size >
+ IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
+ u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
+ if (!(ctrl0 & IXGB_CTRL0_JFE)) {
+ ctrl0 |= IXGB_CTRL0_JFE;
+ IXGB_WRITE_REG(hw, CTRL0, ctrl0);
+ }
+ }
+}
+
+static u32
+ixgb_fix_features(struct net_device *netdev, u32 features)
+{
+ /*
+ * Tx VLAN insertion does not work per HW design when Rx stripping is
+ * disabled.
+ */
+ if (!(features & NETIF_F_HW_VLAN_RX))
+ features &= ~NETIF_F_HW_VLAN_TX;
+
+ return features;
+}
+
+static int
+ixgb_set_features(struct net_device *netdev, u32 features)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ u32 changed = features ^ netdev->features;
+
+ if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_RX)))
+ return 0;
+
+ adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
+
+ if (netif_running(netdev)) {
+ ixgb_down(adapter, true);
+ ixgb_up(adapter);
+ ixgb_set_speed_duplex(netdev);
+ } else
+ ixgb_reset(adapter);
+
+ return 0;
+}
+
+
+static const struct net_device_ops ixgb_netdev_ops = {
+ .ndo_open = ixgb_open,
+ .ndo_stop = ixgb_close,
+ .ndo_start_xmit = ixgb_xmit_frame,
+ .ndo_get_stats = ixgb_get_stats,
+ .ndo_set_rx_mode = ixgb_set_multi,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = ixgb_set_mac,
+ .ndo_change_mtu = ixgb_change_mtu,
+ .ndo_tx_timeout = ixgb_tx_timeout,
+ .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ixgb_netpoll,
+#endif
+ .ndo_fix_features = ixgb_fix_features,
+ .ndo_set_features = ixgb_set_features,
+};
+
+/**
+ * ixgb_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ixgb_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * ixgb_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+
+static int __devinit
+ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev = NULL;
+ struct ixgb_adapter *adapter;
+ static int cards_found = 0;
+ int pci_using_dac;
+ int i;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_using_dac = 0;
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err)
+ pci_using_dac = 1;
+ } else {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ pr_err("No usable DMA configuration, aborting\n");
+ goto err_dma_mask;
+ }
+ }
+ }
+
+ err = pci_request_regions(pdev, ixgb_driver_name);
+ if (err)
+ goto err_request_regions;
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->hw.back = adapter;
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT);
+
+ adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0);
+ if (!adapter->hw.hw_addr) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ for (i = BAR_1; i <= BAR_5; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+ adapter->hw.io_base = pci_resource_start(pdev, i);
+ break;
+ }
+ }
+
+ netdev->netdev_ops = &ixgb_netdev_ops;
+ ixgb_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+ netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
+
+ strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+ adapter->bd_number = cards_found;
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+
+ /* setup the private structure */
+
+ err = ixgb_sw_init(adapter);
+ if (err)
+ goto err_sw_init;
+
+ netdev->hw_features = NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX;
+ netdev->features = netdev->hw_features |
+ NETIF_F_HW_VLAN_FILTER;
+ netdev->hw_features |= NETIF_F_RXCSUM;
+
+ if (pci_using_dac) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
+
+ /* make sure the EEPROM is good */
+
+ if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
+ netif_err(adapter, probe, adapter->netdev,
+ "The EEPROM Checksum Is Not Valid\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
+ memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->perm_addr)) {
+ netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->watchdog_timer.function = ixgb_watchdog;
+ adapter->watchdog_timer.data = (unsigned long)adapter;
+
+ INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
+
+ strcpy(netdev->name, "eth%d");
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+ netif_info(adapter, probe, adapter->netdev,
+ "Intel(R) PRO/10GbE Network Connection\n");
+ ixgb_check_options(adapter);
+ /* reset the hardware with the new settings */
+
+ ixgb_reset(adapter);
+
+ cards_found++;
+ return 0;
+
+err_register:
+err_sw_init:
+err_eeprom:
+ iounmap(adapter->hw.hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+err_request_regions:
+err_dma_mask:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * ixgb_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ixgb_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+
+static void __devexit
+ixgb_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+
+ cancel_work_sync(&adapter->tx_timeout_task);
+
+ unregister_netdev(netdev);
+
+ iounmap(adapter->hw.hw_addr);
+ pci_release_regions(pdev);
+
+ free_netdev(netdev);
+ pci_disable_device(pdev);
+}
+
+/**
+ * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * ixgb_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+
+static int __devinit
+ixgb_sw_init(struct ixgb_adapter *adapter)
+{
+ struct ixgb_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* PCI config space info */
+
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_id = pdev->subsystem_device;
+
+ hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
+ adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
+
+ if ((hw->device_id == IXGB_DEVICE_ID_82597EX) ||
+ (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) ||
+ (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) ||
+ (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
+ hw->mac_type = ixgb_82597;
+ else {
+ /* should never have loaded on this device */
+ netif_err(adapter, probe, adapter->netdev, "unsupported device id\n");
+ }
+
+ /* enable flow control to be programmed */
+ hw->fc.send_xon = 1;
+
+ set_bit(__IXGB_DOWN, &adapter->flags);
+ return 0;
+}
+
+/**
+ * ixgb_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+
+static int
+ixgb_open(struct net_device *netdev)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ /* allocate transmit descriptors */
+ err = ixgb_setup_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ netif_carrier_off(netdev);
+
+ /* allocate receive descriptors */
+
+ err = ixgb_setup_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ err = ixgb_up(adapter);
+ if (err)
+ goto err_up;
+
+ netif_start_queue(netdev);
+
+ return 0;
+
+err_up:
+ ixgb_free_rx_resources(adapter);
+err_setup_rx:
+ ixgb_free_tx_resources(adapter);
+err_setup_tx:
+ ixgb_reset(adapter);
+
+ return err;
+}
+
+/**
+ * ixgb_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+
+static int
+ixgb_close(struct net_device *netdev)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+
+ ixgb_down(adapter, true);
+
+ ixgb_free_tx_resources(adapter);
+ ixgb_free_rx_resources(adapter);
+
+ return 0;
+}
+
+/**
+ * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int
+ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
+{
+ struct ixgb_desc_ring *txdr = &adapter->tx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct ixgb_buffer) * txdr->count;
+ txdr->buffer_info = vzalloc(size);
+ if (!txdr->buffer_info) {
+ netif_err(adapter, probe, adapter->netdev,
+ "Unable to allocate transmit descriptor ring memory\n");
+ return -ENOMEM;
+ }
+
+ /* round up to nearest 4K */
+
+ txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
+ txdr->size = ALIGN(txdr->size, 4096);
+
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
+ if (!txdr->desc) {
+ vfree(txdr->buffer_info);
+ netif_err(adapter, probe, adapter->netdev,
+ "Unable to allocate transmit descriptor memory\n");
+ return -ENOMEM;
+ }
+ memset(txdr->desc, 0, txdr->size);
+
+ txdr->next_to_use = 0;
+ txdr->next_to_clean = 0;
+
+ return 0;
+}
+
+/**
+ * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+
+static void
+ixgb_configure_tx(struct ixgb_adapter *adapter)
+{
+ u64 tdba = adapter->tx_ring.dma;
+ u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
+ u32 tctl;
+ struct ixgb_hw *hw = &adapter->hw;
+
+ /* Setup the Base and Length of the Tx Descriptor Ring
+ * tx_ring.dma can be either a 32 or 64 bit value
+ */
+
+ IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
+ IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
+
+ IXGB_WRITE_REG(hw, TDLEN, tdlen);
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+
+ IXGB_WRITE_REG(hw, TDH, 0);
+ IXGB_WRITE_REG(hw, TDT, 0);
+
+ /* don't set up txdctl, it induces performance problems if configured
+ * incorrectly */
+ /* Set the Tx Interrupt Delay register */
+
+ IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
+
+ /* Program the Transmit Control Register */
+
+ tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
+ IXGB_WRITE_REG(hw, TCTL, tctl);
+
+ /* Setup Transmit Descriptor Settings for this adapter */
+ adapter->tx_cmd_type =
+ IXGB_TX_DESC_TYPE |
+ (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
+}
+
+/**
+ * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+int
+ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
+{
+ struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct ixgb_buffer) * rxdr->count;
+ rxdr->buffer_info = vzalloc(size);
+ if (!rxdr->buffer_info) {
+ netif_err(adapter, probe, adapter->netdev,
+ "Unable to allocate receive descriptor ring\n");
+ return -ENOMEM;
+ }
+
+ /* Round up to nearest 4K */
+
+ rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
+ rxdr->size = ALIGN(rxdr->size, 4096);
+
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
+
+ if (!rxdr->desc) {
+ vfree(rxdr->buffer_info);
+ netif_err(adapter, probe, adapter->netdev,
+ "Unable to allocate receive descriptors\n");
+ return -ENOMEM;
+ }
+ memset(rxdr->desc, 0, rxdr->size);
+
+ rxdr->next_to_clean = 0;
+ rxdr->next_to_use = 0;
+
+ return 0;
+}
+
+/**
+ * ixgb_setup_rctl - configure the receive control register
+ * @adapter: Board private structure
+ **/
+
+static void
+ixgb_setup_rctl(struct ixgb_adapter *adapter)
+{
+ u32 rctl;
+
+ rctl = IXGB_READ_REG(&adapter->hw, RCTL);
+
+ rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
+
+ rctl |=
+ IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
+ IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
+ (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
+
+ rctl |= IXGB_RCTL_SECRC;
+
+ if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
+ rctl |= IXGB_RCTL_BSIZE_2048;
+ else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
+ rctl |= IXGB_RCTL_BSIZE_4096;
+ else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
+ rctl |= IXGB_RCTL_BSIZE_8192;
+ else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
+ rctl |= IXGB_RCTL_BSIZE_16384;
+
+ IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
+}
+
+/**
+ * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+
+static void
+ixgb_configure_rx(struct ixgb_adapter *adapter)
+{
+ u64 rdba = adapter->rx_ring.dma;
+ u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
+ struct ixgb_hw *hw = &adapter->hw;
+ u32 rctl;
+ u32 rxcsum;
+
+ /* make sure receives are disabled while setting up the descriptors */
+
+ rctl = IXGB_READ_REG(hw, RCTL);
+ IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
+
+ /* set the Receive Delay Timer Register */
+
+ IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
+
+ /* Setup the Base and Length of the Rx Descriptor Ring */
+
+ IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
+ IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
+
+ IXGB_WRITE_REG(hw, RDLEN, rdlen);
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers */
+ IXGB_WRITE_REG(hw, RDH, 0);
+ IXGB_WRITE_REG(hw, RDT, 0);
+
+ /* due to the hardware errata with RXDCTL, we are unable to use any of
+ * the performance enhancing features of it without causing other
+ * subtle bugs, some of the bugs could include receive length
+ * corruption at high data rates (WTHRESH > 0) and/or receive
+ * descriptor ring irregularites (particularly in hardware cache) */
+ IXGB_WRITE_REG(hw, RXDCTL, 0);
+
+ /* Enable Receive Checksum Offload for TCP and UDP */
+ if (adapter->rx_csum) {
+ rxcsum = IXGB_READ_REG(hw, RXCSUM);
+ rxcsum |= IXGB_RXCSUM_TUOFL;
+ IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
+ }
+
+ /* Enable Receives */
+
+ IXGB_WRITE_REG(hw, RCTL, rctl);
+}
+
+/**
+ * ixgb_free_tx_resources - Free Tx Resources
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+
+void
+ixgb_free_tx_resources(struct ixgb_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ ixgb_clean_tx_ring(adapter);
+
+ vfree(adapter->tx_ring.buffer_info);
+ adapter->tx_ring.buffer_info = NULL;
+
+ dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
+ adapter->tx_ring.desc, adapter->tx_ring.dma);
+
+ adapter->tx_ring.desc = NULL;
+}
+
+static void
+ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
+ struct ixgb_buffer *buffer_info)
+{
+ if (buffer_info->dma) {
+ if (buffer_info->mapped_as_page)
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
+ buffer_info->dma = 0;
+ }
+
+ if (buffer_info->skb) {
+ dev_kfree_skb_any(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+ buffer_info->time_stamp = 0;
+ /* these fields must always be initialized in tx
+ * buffer_info->length = 0;
+ * buffer_info->next_to_watch = 0; */
+}
+
+/**
+ * ixgb_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ **/
+
+static void
+ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
+{
+ struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
+ struct ixgb_buffer *buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Tx ring sk_buffs */
+
+ for (i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->buffer_info[i];
+ ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
+ }
+
+ size = sizeof(struct ixgb_buffer) * tx_ring->count;
+ memset(tx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ IXGB_WRITE_REG(&adapter->hw, TDH, 0);
+ IXGB_WRITE_REG(&adapter->hw, TDT, 0);
+}
+
+/**
+ * ixgb_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+
+void
+ixgb_free_rx_resources(struct ixgb_adapter *adapter)
+{
+ struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+
+ ixgb_clean_rx_ring(adapter);
+
+ vfree(rx_ring->buffer_info);
+ rx_ring->buffer_info = NULL;
+
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
+
+ rx_ring->desc = NULL;
+}
+
+/**
+ * ixgb_clean_rx_ring - Free Rx Buffers
+ * @adapter: board private structure
+ **/
+
+static void
+ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
+{
+ struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
+ struct ixgb_buffer *buffer_info;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Rx ring sk_buffs */
+
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ if (buffer_info->dma) {
+ dma_unmap_single(&pdev->dev,
+ buffer_info->dma,
+ buffer_info->length,
+ DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+ buffer_info->length = 0;
+ }
+
+ if (buffer_info->skb) {
+ dev_kfree_skb(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+ }
+
+ size = sizeof(struct ixgb_buffer) * rx_ring->count;
+ memset(rx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ IXGB_WRITE_REG(&adapter->hw, RDH, 0);
+ IXGB_WRITE_REG(&adapter->hw, RDT, 0);
+}
+
+/**
+ * ixgb_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int
+ixgb_set_mac(struct net_device *netdev, void *p)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+ ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
+
+ return 0;
+}
+
+/**
+ * ixgb_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+
+static void
+ixgb_set_multi(struct net_device *netdev)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ struct ixgb_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u32 rctl;
+
+ /* Check for Promiscuous and All Multicast modes */
+
+ rctl = IXGB_READ_REG(hw, RCTL);
+
+ if (netdev->flags & IFF_PROMISC) {
+ rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
+ /* disable VLAN filtering */
+ rctl &= ~IXGB_RCTL_CFIEN;
+ rctl &= ~IXGB_RCTL_VFE;
+ } else {
+ if (netdev->flags & IFF_ALLMULTI) {
+ rctl |= IXGB_RCTL_MPE;
+ rctl &= ~IXGB_RCTL_UPE;
+ } else {
+ rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
+ }
+ /* enable VLAN filtering */
+ rctl |= IXGB_RCTL_VFE;
+ rctl &= ~IXGB_RCTL_CFIEN;
+ }
+
+ if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
+ rctl |= IXGB_RCTL_MPE;
+ IXGB_WRITE_REG(hw, RCTL, rctl);
+ } else {
+ u8 *mta = kmalloc(IXGB_MAX_NUM_MULTICAST_ADDRESSES *
+ ETH_ALEN, GFP_ATOMIC);
+ u8 *addr;
+ if (!mta) {
+ pr_err("allocation of multicast memory failed\n");
+ goto alloc_failed;
+ }
+
+ IXGB_WRITE_REG(hw, RCTL, rctl);
+
+ addr = mta;
+ netdev_for_each_mc_addr(ha, netdev) {
+ memcpy(addr, ha->addr, ETH_ALEN);
+ addr += ETH_ALEN;
+ }
+
+ ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
+ kfree(mta);
+ }
+
+alloc_failed:
+ if (netdev->features & NETIF_F_HW_VLAN_RX)
+ ixgb_vlan_strip_enable(adapter);
+ else
+ ixgb_vlan_strip_disable(adapter);
+
+}
+
+/**
+ * ixgb_watchdog - Timer Call-back
+ * @data: pointer to netdev cast into an unsigned long
+ **/
+
+static void
+ixgb_watchdog(unsigned long data)
+{
+ struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
+ struct net_device *netdev = adapter->netdev;
+ struct ixgb_desc_ring *txdr = &adapter->tx_ring;
+
+ ixgb_check_for_link(&adapter->hw);
+
+ if (ixgb_check_for_bad_link(&adapter->hw)) {
+ /* force the reset path */
+ netif_stop_queue(netdev);
+ }
+
+ if (adapter->hw.link_up) {
+ if (!netif_carrier_ok(netdev)) {
+ netdev_info(netdev,
+ "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
+ (adapter->hw.fc.type == ixgb_fc_full) ?
+ "RX/TX" :
+ (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
+ "RX" :
+ (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
+ "TX" : "None");
+ adapter->link_speed = 10000;
+ adapter->link_duplex = FULL_DUPLEX;
+ netif_carrier_on(netdev);
+ }
+ } else {
+ if (netif_carrier_ok(netdev)) {
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+ netdev_info(netdev, "NIC Link is Down\n");
+ netif_carrier_off(netdev);
+ }
+ }
+
+ ixgb_update_stats(adapter);
+
+ if (!netif_carrier_ok(netdev)) {
+ if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
+ /* We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context). */
+ schedule_work(&adapter->tx_timeout_task);
+ /* return immediately since reset is imminent */
+ return;
+ }
+ }
+
+ /* Force detection of hung controller every watchdog period */
+ adapter->detect_tx_hung = true;
+
+ /* generate an interrupt to force clean up of any stragglers */
+ IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
+
+ /* Reset the timer */
+ mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
+}
+
+#define IXGB_TX_FLAGS_CSUM 0x00000001
+#define IXGB_TX_FLAGS_VLAN 0x00000002
+#define IXGB_TX_FLAGS_TSO 0x00000004
+
+static int
+ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
+{
+ struct ixgb_context_desc *context_desc;
+ unsigned int i;
+ u8 ipcss, ipcso, tucss, tucso, hdr_len;
+ u16 ipcse, tucse, mss;
+ int err;
+
+ if (likely(skb_is_gso(skb))) {
+ struct ixgb_buffer *buffer_info;
+ struct iphdr *iph;
+
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ mss = skb_shinfo(skb)->gso_size;
+ iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP, 0);
+ ipcss = skb_network_offset(skb);
+ ipcso = (void *)&(iph->check) - (void *)skb->data;
+ ipcse = skb_transport_offset(skb) - 1;
+ tucss = skb_transport_offset(skb);
+ tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
+ tucse = 0;
+
+ i = adapter->tx_ring.next_to_use;
+ context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
+ buffer_info = &adapter->tx_ring.buffer_info[i];
+ WARN_ON(buffer_info->dma != 0);
+
+ context_desc->ipcss = ipcss;
+ context_desc->ipcso = ipcso;
+ context_desc->ipcse = cpu_to_le16(ipcse);
+ context_desc->tucss = tucss;
+ context_desc->tucso = tucso;
+ context_desc->tucse = cpu_to_le16(tucse);
+ context_desc->mss = cpu_to_le16(mss);
+ context_desc->hdr_len = hdr_len;
+ context_desc->status = 0;
+ context_desc->cmd_type_len = cpu_to_le32(
+ IXGB_CONTEXT_DESC_TYPE
+ | IXGB_CONTEXT_DESC_CMD_TSE
+ | IXGB_CONTEXT_DESC_CMD_IP
+ | IXGB_CONTEXT_DESC_CMD_TCP
+ | IXGB_CONTEXT_DESC_CMD_IDE
+ | (skb->len - (hdr_len)));
+
+
+ if (++i == adapter->tx_ring.count) i = 0;
+ adapter->tx_ring.next_to_use = i;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static bool
+ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
+{
+ struct ixgb_context_desc *context_desc;
+ unsigned int i;
+ u8 css, cso;
+
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ struct ixgb_buffer *buffer_info;
+ css = skb_checksum_start_offset(skb);
+ cso = css + skb->csum_offset;
+
+ i = adapter->tx_ring.next_to_use;
+ context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
+ buffer_info = &adapter->tx_ring.buffer_info[i];
+ WARN_ON(buffer_info->dma != 0);
+
+ context_desc->tucss = css;
+ context_desc->tucso = cso;
+ context_desc->tucse = 0;
+ /* zero out any previously existing data in one instruction */
+ *(u32 *)&(context_desc->ipcss) = 0;
+ context_desc->status = 0;
+ context_desc->hdr_len = 0;
+ context_desc->mss = 0;
+ context_desc->cmd_type_len =
+ cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
+ | IXGB_TX_DESC_CMD_IDE);
+
+ if (++i == adapter->tx_ring.count) i = 0;
+ adapter->tx_ring.next_to_use = i;
+
+ return true;
+ }
+
+ return false;
+}
+
+#define IXGB_MAX_TXD_PWR 14
+#define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
+
+static int
+ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
+ unsigned int first)
+{
+ struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ struct ixgb_buffer *buffer_info;
+ int len = skb_headlen(skb);
+ unsigned int offset = 0, size, count = 0, i;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ unsigned int f;
+
+ i = tx_ring->next_to_use;
+
+ while (len) {
+ buffer_info = &tx_ring->buffer_info[i];
+ size = min(len, IXGB_MAX_DATA_PER_TXD);
+ /* Workaround for premature desc write-backs
+ * in TSO mode. Append 4-byte sentinel desc */
+ if (unlikely(mss && !nr_frags && size == len && size > 8))
+ size -= 4;
+
+ buffer_info->length = size;
+ WARN_ON(buffer_info->dma != 0);
+ buffer_info->time_stamp = jiffies;
+ buffer_info->mapped_as_page = false;
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data + offset,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
+ goto dma_error;
+ buffer_info->next_to_watch = 0;
+
+ len -= size;
+ offset += size;
+ count++;
+ if (len) {
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+ }
+
+ for (f = 0; f < nr_frags; f++) {
+ struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ len = frag->size;
+ offset = 0;
+
+ while (len) {
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ buffer_info = &tx_ring->buffer_info[i];
+ size = min(len, IXGB_MAX_DATA_PER_TXD);
+
+ /* Workaround for premature desc write-backs
+ * in TSO mode. Append 4-byte sentinel desc */
+ if (unlikely(mss && (f == (nr_frags - 1))
+ && size == len && size > 8))
+ size -= 4;
+
+ buffer_info->length = size;
+ buffer_info->time_stamp = jiffies;
+ buffer_info->mapped_as_page = true;
+ buffer_info->dma =
+ skb_frag_dma_map(&pdev->dev, frag, offset, size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
+ goto dma_error;
+ buffer_info->next_to_watch = 0;
+
+ len -= size;
+ offset += size;
+ count++;
+ }
+ }
+ tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[first].next_to_watch = i;
+
+ return count;
+
+dma_error:
+ dev_err(&pdev->dev, "TX DMA map failed\n");
+ buffer_info->dma = 0;
+ if (count)
+ count--;
+
+ while (count--) {
+ if (i==0)
+ i += tx_ring->count;
+ i--;
+ buffer_info = &tx_ring->buffer_info[i];
+ ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
+ }
+
+ return 0;
+}
+
+static void
+ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
+{
+ struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
+ struct ixgb_tx_desc *tx_desc = NULL;
+ struct ixgb_buffer *buffer_info;
+ u32 cmd_type_len = adapter->tx_cmd_type;
+ u8 status = 0;
+ u8 popts = 0;
+ unsigned int i;
+
+ if (tx_flags & IXGB_TX_FLAGS_TSO) {
+ cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
+ popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
+ }
+
+ if (tx_flags & IXGB_TX_FLAGS_CSUM)
+ popts |= IXGB_TX_DESC_POPTS_TXSM;
+
+ if (tx_flags & IXGB_TX_FLAGS_VLAN)
+ cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
+
+ i = tx_ring->next_to_use;
+
+ while (count--) {
+ buffer_info = &tx_ring->buffer_info[i];
+ tx_desc = IXGB_TX_DESC(*tx_ring, i);
+ tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
+ tx_desc->cmd_type_len =
+ cpu_to_le32(cmd_type_len | buffer_info->length);
+ tx_desc->status = status;
+ tx_desc->popts = popts;
+ tx_desc->vlan = cpu_to_le16(vlan_id);
+
+ if (++i == tx_ring->count) i = 0;
+ }
+
+ tx_desc->cmd_type_len |=
+ cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+
+ tx_ring->next_to_use = i;
+ IXGB_WRITE_REG(&adapter->hw, TDT, i);
+}
+
+static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
+
+ netif_stop_queue(netdev);
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it. */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available. */
+ if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! */
+ netif_start_queue(netdev);
+ ++adapter->restart_queue;
+ return 0;
+}
+
+static int ixgb_maybe_stop_tx(struct net_device *netdev,
+ struct ixgb_desc_ring *tx_ring, int size)
+{
+ if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __ixgb_maybe_stop_tx(netdev, size);
+}
+
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
+ (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
+#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
+ MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
+ + 1 /* one more needed for sentinel TSO workaround */
+
+static netdev_tx_t
+ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ unsigned int first;
+ unsigned int tx_flags = 0;
+ int vlan_id = 0;
+ int count = 0;
+ int tso;
+
+ if (test_bit(__IXGB_DOWN, &adapter->flags)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (skb->len <= 0) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
+ DESC_NEEDED)))
+ return NETDEV_TX_BUSY;
+
+ if (vlan_tx_tag_present(skb)) {
+ tx_flags |= IXGB_TX_FLAGS_VLAN;
+ vlan_id = vlan_tx_tag_get(skb);
+ }
+
+ first = adapter->tx_ring.next_to_use;
+
+ tso = ixgb_tso(adapter, skb);
+ if (tso < 0) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (likely(tso))
+ tx_flags |= IXGB_TX_FLAGS_TSO;
+ else if (ixgb_tx_csum(adapter, skb))
+ tx_flags |= IXGB_TX_FLAGS_CSUM;
+
+ count = ixgb_tx_map(adapter, skb, first);
+
+ if (count) {
+ ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
+ /* Make sure there is space in the ring for the next send. */
+ ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
+
+ } else {
+ dev_kfree_skb_any(skb);
+ adapter->tx_ring.buffer_info[first].time_stamp = 0;
+ adapter->tx_ring.next_to_use = first;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ixgb_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+
+static void
+ixgb_tx_timeout(struct net_device *netdev)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->tx_timeout_task);
+}
+
+static void
+ixgb_tx_timeout_task(struct work_struct *work)
+{
+ struct ixgb_adapter *adapter =
+ container_of(work, struct ixgb_adapter, tx_timeout_task);
+
+ adapter->tx_timeout_count++;
+ ixgb_down(adapter, true);
+ ixgb_up(adapter);
+}
+
+/**
+ * ixgb_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+
+static struct net_device_stats *
+ixgb_get_stats(struct net_device *netdev)
+{
+ return &netdev->stats;
+}
+
+/**
+ * ixgb_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int
+ixgb_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
+ int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
+
+ /* MTU < 68 is an error for IPv4 traffic, just don't allow it */
+ if ((new_mtu < 68) ||
+ (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
+ netif_err(adapter, probe, adapter->netdev,
+ "Invalid MTU setting %d\n", new_mtu);
+ return -EINVAL;
+ }
+
+ if (old_max_frame == max_frame)
+ return 0;
+
+ if (netif_running(netdev))
+ ixgb_down(adapter, true);
+
+ adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */
+
+ netdev->mtu = new_mtu;
+
+ if (netif_running(netdev))
+ ixgb_up(adapter);
+
+ return 0;
+}
+
+/**
+ * ixgb_update_stats - Update the board statistics counters.
+ * @adapter: board private structure
+ **/
+
+void
+ixgb_update_stats(struct ixgb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* Prevent stats update while adapter is being reset */
+ if (pci_channel_offline(pdev))
+ return;
+
+ if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
+ u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
+ u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
+ u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
+ u64 bcast = ((u64)bcast_h << 32) | bcast_l;
+
+ multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
+ /* fix up multicast stats by removing broadcasts */
+ if (multi >= bcast)
+ multi -= bcast;
+
+ adapter->stats.mprcl += (multi & 0xFFFFFFFF);
+ adapter->stats.mprch += (multi >> 32);
+ adapter->stats.bprcl += bcast_l;
+ adapter->stats.bprch += bcast_h;
+ } else {
+ adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
+ adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
+ adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
+ adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
+ }
+ adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
+ adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
+ adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
+ adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
+ adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
+ adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
+ adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
+ adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
+ adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
+ adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
+ adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
+ adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
+ adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
+ adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
+ adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
+ adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
+ adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
+ adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
+ adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
+ adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
+ adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
+ adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
+ adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
+ adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
+ adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
+ adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
+ adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
+ adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
+ adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
+ adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
+ adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
+ adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
+ adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
+ adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
+ adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
+ adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
+ adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
+ adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
+ adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
+ adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
+ adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
+ adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
+ adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
+ adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
+ adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
+ adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
+ adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
+ adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
+ adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
+ adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
+ adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
+ adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
+ adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
+ adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
+ adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
+ adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
+
+ /* Fill out the OS statistics structure */
+
+ netdev->stats.rx_packets = adapter->stats.gprcl;
+ netdev->stats.tx_packets = adapter->stats.gptcl;
+ netdev->stats.rx_bytes = adapter->stats.gorcl;
+ netdev->stats.tx_bytes = adapter->stats.gotcl;
+ netdev->stats.multicast = adapter->stats.mprcl;
+ netdev->stats.collisions = 0;
+
+ /* ignore RLEC as it reports errors for padded (<64bytes) frames
+ * with a length in the type/len field */
+ netdev->stats.rx_errors =
+ /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
+ adapter->stats.ruc +
+ adapter->stats.roc /*+ adapter->stats.rlec */ +
+ adapter->stats.icbc +
+ adapter->stats.ecbc + adapter->stats.mpc;
+
+ /* see above
+ * netdev->stats.rx_length_errors = adapter->stats.rlec;
+ */
+
+ netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
+ netdev->stats.rx_fifo_errors = adapter->stats.mpc;
+ netdev->stats.rx_missed_errors = adapter->stats.mpc;
+ netdev->stats.rx_over_errors = adapter->stats.mpc;
+
+ netdev->stats.tx_errors = 0;
+ netdev->stats.rx_frame_errors = 0;
+ netdev->stats.tx_aborted_errors = 0;
+ netdev->stats.tx_carrier_errors = 0;
+ netdev->stats.tx_fifo_errors = 0;
+ netdev->stats.tx_heartbeat_errors = 0;
+ netdev->stats.tx_window_errors = 0;
+}
+
+#define IXGB_MAX_INTR 10
+/**
+ * ixgb_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+
+static irqreturn_t
+ixgb_intr(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ struct ixgb_hw *hw = &adapter->hw;
+ u32 icr = IXGB_READ_REG(hw, ICR);
+
+ if (unlikely(!icr))
+ return IRQ_NONE; /* Not our interrupt */
+
+ if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
+ if (!test_bit(__IXGB_DOWN, &adapter->flags))
+ mod_timer(&adapter->watchdog_timer, jiffies);
+
+ if (napi_schedule_prep(&adapter->napi)) {
+
+ /* Disable interrupts and register for poll. The flush
+ of the posted write is intentionally left out.
+ */
+
+ IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
+ __napi_schedule(&adapter->napi);
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * ixgb_clean - NAPI Rx polling callback
+ * @adapter: board private structure
+ **/
+
+static int
+ixgb_clean(struct napi_struct *napi, int budget)
+{
+ struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi);
+ int work_done = 0;
+
+ ixgb_clean_tx_irq(adapter);
+ ixgb_clean_rx_irq(adapter, &work_done, budget);
+
+ /* If budget not fully consumed, exit the polling mode */
+ if (work_done < budget) {
+ napi_complete(napi);
+ if (!test_bit(__IXGB_DOWN, &adapter->flags))
+ ixgb_irq_enable(adapter);
+ }
+
+ return work_done;
+}
+
+/**
+ * ixgb_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ **/
+
+static bool
+ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
+{
+ struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
+ struct net_device *netdev = adapter->netdev;
+ struct ixgb_tx_desc *tx_desc, *eop_desc;
+ struct ixgb_buffer *buffer_info;
+ unsigned int i, eop;
+ bool cleaned = false;
+
+ i = tx_ring->next_to_clean;
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = IXGB_TX_DESC(*tx_ring, eop);
+
+ while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
+
+ rmb(); /* read buffer_info after eop_desc */
+ for (cleaned = false; !cleaned; ) {
+ tx_desc = IXGB_TX_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+
+ if (tx_desc->popts &
+ (IXGB_TX_DESC_POPTS_TXSM |
+ IXGB_TX_DESC_POPTS_IXSM))
+ adapter->hw_csum_tx_good++;
+
+ ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
+
+ *(u32 *)&(tx_desc->status) = 0;
+
+ cleaned = (i == eop);
+ if (++i == tx_ring->count) i = 0;
+ }
+
+ eop = tx_ring->buffer_info[i].next_to_watch;
+ eop_desc = IXGB_TX_DESC(*tx_ring, eop);
+ }
+
+ tx_ring->next_to_clean = i;
+
+ if (unlikely(cleaned && netif_carrier_ok(netdev) &&
+ IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean. */
+ smp_mb();
+
+ if (netif_queue_stopped(netdev) &&
+ !(test_bit(__IXGB_DOWN, &adapter->flags))) {
+ netif_wake_queue(netdev);
+ ++adapter->restart_queue;
+ }
+ }
+
+ if (adapter->detect_tx_hung) {
+ /* detect a transmit hang in hardware, this serializes the
+ * check with the clearing of time_stamp and movement of i */
+ adapter->detect_tx_hung = false;
+ if (tx_ring->buffer_info[eop].time_stamp &&
+ time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
+ && !(IXGB_READ_REG(&adapter->hw, STATUS) &
+ IXGB_STATUS_TXOFF)) {
+ /* detected Tx unit hang */
+ netif_err(adapter, drv, adapter->netdev,
+ "Detected Tx Unit Hang\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%x>\n"
+ " jiffies <%lx>\n"
+ " next_to_watch.status <%x>\n",
+ IXGB_READ_REG(&adapter->hw, TDH),
+ IXGB_READ_REG(&adapter->hw, TDT),
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_ring->buffer_info[eop].time_stamp,
+ eop,
+ jiffies,
+ eop_desc->status);
+ netif_stop_queue(netdev);
+ }
+ }
+
+ return cleaned;
+}
+
+/**
+ * ixgb_rx_checksum - Receive Checksum Offload for 82597.
+ * @adapter: board private structure
+ * @rx_desc: receive descriptor
+ * @sk_buff: socket buffer with received data
+ **/
+
+static void
+ixgb_rx_checksum(struct ixgb_adapter *adapter,
+ struct ixgb_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ /* Ignore Checksum bit is set OR
+ * TCP Checksum has not been calculated
+ */
+ if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
+ (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
+ skb_checksum_none_assert(skb);
+ return;
+ }
+
+ /* At this point we know the hardware did the TCP checksum */
+ /* now look at the TCP checksum error bit */
+ if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
+ /* let the stack verify checksum errors */
+ skb_checksum_none_assert(skb);
+ adapter->hw_csum_rx_error++;
+ } else {
+ /* TCP checksum is good */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ adapter->hw_csum_rx_good++;
+ }
+}
+
+/*
+ * this should improve performance for small packets with large amounts
+ * of reassembly being done in the stack
+ */
+static void ixgb_check_copybreak(struct net_device *netdev,
+ struct ixgb_buffer *buffer_info,
+ u32 length, struct sk_buff **skb)
+{
+ struct sk_buff *new_skb;
+
+ if (length > copybreak)
+ return;
+
+ new_skb = netdev_alloc_skb_ip_align(netdev, length);
+ if (!new_skb)
+ return;
+
+ skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
+ (*skb)->data - NET_IP_ALIGN,
+ length + NET_IP_ALIGN);
+ /* save the skb in buffer_info as good */
+ buffer_info->skb = *skb;
+ *skb = new_skb;
+}
+
+/**
+ * ixgb_clean_rx_irq - Send received data up the network stack,
+ * @adapter: board private structure
+ **/
+
+static bool
+ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
+{
+ struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct ixgb_rx_desc *rx_desc, *next_rxd;
+ struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
+ u32 length;
+ unsigned int i, j;
+ int cleaned_count = 0;
+ bool cleaned = false;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = IXGB_RX_DESC(*rx_ring, i);
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
+ struct sk_buff *skb;
+ u8 status;
+
+ if (*work_done >= work_to_do)
+ break;
+
+ (*work_done)++;
+ rmb(); /* read descriptor and rx_buffer_info after status DD */
+ status = rx_desc->status;
+ skb = buffer_info->skb;
+ buffer_info->skb = NULL;
+
+ prefetch(skb->data - NET_IP_ALIGN);
+
+ if (++i == rx_ring->count)
+ i = 0;
+ next_rxd = IXGB_RX_DESC(*rx_ring, i);
+ prefetch(next_rxd);
+
+ j = i + 1;
+ if (j == rx_ring->count)
+ j = 0;
+ next2_buffer = &rx_ring->buffer_info[j];
+ prefetch(next2_buffer);
+
+ next_buffer = &rx_ring->buffer_info[i];
+
+ cleaned = true;
+ cleaned_count++;
+
+ dma_unmap_single(&pdev->dev,
+ buffer_info->dma,
+ buffer_info->length,
+ DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+
+ length = le16_to_cpu(rx_desc->length);
+ rx_desc->length = 0;
+
+ if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
+
+ /* All receives must fit into a single buffer */
+
+ IXGB_DBG("Receive packet consumed multiple buffers "
+ "length<%x>\n", length);
+
+ dev_kfree_skb_irq(skb);
+ goto rxdesc_done;
+ }
+
+ if (unlikely(rx_desc->errors &
+ (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE |
+ IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) {
+ dev_kfree_skb_irq(skb);
+ goto rxdesc_done;
+ }
+
+ ixgb_check_copybreak(netdev, buffer_info, length, &skb);
+
+ /* Good Receive */
+ skb_put(skb, length);
+
+ /* Receive Checksum Offload */
+ ixgb_rx_checksum(adapter, rx_desc, skb);
+
+ skb->protocol = eth_type_trans(skb, netdev);
+ if (status & IXGB_RX_DESC_STATUS_VP)
+ __vlan_hwaccel_put_tag(skb,
+ le16_to_cpu(rx_desc->special));
+
+ netif_receive_skb(skb);
+
+rxdesc_done:
+ /* clean up descriptor, might be written over by hw */
+ rx_desc->status = 0;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) {
+ ixgb_alloc_rx_buffers(adapter, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+ }
+
+ rx_ring->next_to_clean = i;
+
+ cleaned_count = IXGB_DESC_UNUSED(rx_ring);
+ if (cleaned_count)
+ ixgb_alloc_rx_buffers(adapter, cleaned_count);
+
+ return cleaned;
+}
+
+/**
+ * ixgb_alloc_rx_buffers - Replace used receive buffers
+ * @adapter: address of board private structure
+ **/
+
+static void
+ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
+{
+ struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct ixgb_rx_desc *rx_desc;
+ struct ixgb_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ long cleancount;
+
+ i = rx_ring->next_to_use;
+ buffer_info = &rx_ring->buffer_info[i];
+ cleancount = IXGB_DESC_UNUSED(rx_ring);
+
+
+ /* leave three descriptors unused */
+ while (--cleancount > 2 && cleaned_count--) {
+ /* recycle! its good for you */
+ skb = buffer_info->skb;
+ if (skb) {
+ skb_trim(skb, 0);
+ goto map_skb;
+ }
+
+ skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
+ if (unlikely(!skb)) {
+ /* Better luck next round */
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+
+ buffer_info->skb = skb;
+ buffer_info->length = adapter->rx_buffer_len;
+map_skb:
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data,
+ adapter->rx_buffer_len,
+ DMA_FROM_DEVICE);
+
+ rx_desc = IXGB_RX_DESC(*rx_ring, i);
+ rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
+ /* guarantee DD bit not set now before h/w gets descriptor
+ * this is the rest of the workaround for h/w double
+ * writeback. */
+ rx_desc->status = 0;
+
+
+ if (++i == rx_ring->count) i = 0;
+ buffer_info = &rx_ring->buffer_info[i];
+ }
+
+ if (likely(rx_ring->next_to_use != i)) {
+ rx_ring->next_to_use = i;
+ if (unlikely(i-- == 0))
+ i = (rx_ring->count - 1);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs, such
+ * as IA-64). */
+ wmb();
+ IXGB_WRITE_REG(&adapter->hw, RDT, i);
+ }
+}
+
+static void
+ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
+{
+ u32 ctrl;
+
+ /* enable VLAN tag insert/strip */
+ ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
+ ctrl |= IXGB_CTRL0_VME;
+ IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
+}
+
+static void
+ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
+{
+ u32 ctrl;
+
+ /* disable VLAN tag insert/strip */
+ ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
+ ctrl &= ~IXGB_CTRL0_VME;
+ IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
+}
+
+static void
+ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ u32 vfta, index;
+
+ /* add VID to filter table */
+
+ index = (vid >> 5) & 0x7F;
+ vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
+ vfta |= (1 << (vid & 0x1F));
+ ixgb_write_vfta(&adapter->hw, index, vfta);
+ set_bit(vid, adapter->active_vlans);
+}
+
+static void
+ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ u32 vfta, index;
+
+ /* remove VID from filter table */
+
+ index = (vid >> 5) & 0x7F;
+ vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
+ vfta &= ~(1 << (vid & 0x1F));
+ ixgb_write_vfta(&adapter->hw, index, vfta);
+ clear_bit(vid, adapter->active_vlans);
+}
+
+static void
+ixgb_restore_vlan(struct ixgb_adapter *adapter)
+{
+ u16 vid;
+
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ ixgb_vlan_rx_add_vid(adapter->netdev, vid);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+
+static void ixgb_netpoll(struct net_device *dev)
+{
+ struct ixgb_adapter *adapter = netdev_priv(dev);
+
+ disable_irq(adapter->pdev->irq);
+ ixgb_intr(adapter->pdev->irq, dev);
+ enable_irq(adapter->pdev->irq);
+}
+#endif
+
+/**
+ * ixgb_io_error_detected() - called when PCI error is detected
+ * @pdev pointer to pci device with error
+ * @state pci channel state after error
+ *
+ * This callback is called by the PCI subsystem whenever
+ * a PCI bus error is detected.
+ */
+static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
+ enum pci_channel_state state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ ixgb_down(adapter, true);
+
+ pci_disable_device(pdev);
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * ixgb_io_slot_reset - called after the pci bus has been reset.
+ * @pdev pointer to pci device with error
+ *
+ * This callback is called after the PCI bus has been reset.
+ * Basically, this tries to restart the card from scratch.
+ * This is a shortened version of the device probe/discovery code,
+ * it resembles the first-half of the ixgb_probe() routine.
+ */
+static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+
+ if (pci_enable_device(pdev)) {
+ netif_err(adapter, probe, adapter->netdev,
+ "Cannot re-enable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ /* Perform card reset only on one instance of the card */
+ if (0 != PCI_FUNC (pdev->devfn))
+ return PCI_ERS_RESULT_RECOVERED;
+
+ pci_set_master(pdev);
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ ixgb_reset(adapter);
+
+ /* Make sure the EEPROM is good */
+ if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
+ netif_err(adapter, probe, adapter->netdev,
+ "After reset, the EEPROM checksum is not valid\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
+ memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->perm_addr)) {
+ netif_err(adapter, probe, adapter->netdev,
+ "After reset, invalid MAC address\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * ixgb_io_resume - called when its OK to resume normal operations
+ * @pdev pointer to pci device with error
+ *
+ * The error recovery driver tells us that its OK to resume
+ * normal operation. Implementation resembles the second-half
+ * of the ixgb_probe() routine.
+ */
+static void ixgb_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+
+ pci_set_master(pdev);
+
+ if (netif_running(netdev)) {
+ if (ixgb_up(adapter)) {
+ pr_err("can't bring device back up after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+ mod_timer(&adapter->watchdog_timer, jiffies);
+}
+
+/* ixgb_main.c */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h
new file mode 100644
index 000000000000..8fc905192231
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h
@@ -0,0 +1,64 @@
+/*******************************************************************************
+
+ Intel PRO/10GbE Linux driver
+ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* glue for the OS independent part of ixgb
+ * includes register access macros
+ */
+
+#ifndef _IXGB_OSDEP_H_
+#define _IXGB_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/if_ether.h>
+
+#undef ASSERT
+#define ASSERT(x) BUG_ON(!(x))
+
+#define ENTER() pr_debug("%s\n", __func__);
+
+#define IXGB_WRITE_REG(a, reg, value) ( \
+ writel((value), ((a)->hw_addr + IXGB_##reg)))
+
+#define IXGB_READ_REG(a, reg) ( \
+ readl((a)->hw_addr + IXGB_##reg))
+
+#define IXGB_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+ writel((value), ((a)->hw_addr + IXGB_##reg + ((offset) << 2))))
+
+#define IXGB_READ_REG_ARRAY(a, reg, offset) ( \
+ readl((a)->hw_addr + IXGB_##reg + ((offset) << 2)))
+
+#define IXGB_WRITE_FLUSH(a) IXGB_READ_REG(a, STATUS)
+
+#define IXGB_MEMCPY memcpy
+
+#endif /* _IXGB_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_param.c b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
new file mode 100644
index 000000000000..07d83ab46e21
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
@@ -0,0 +1,469 @@
+/*******************************************************************************
+
+ Intel PRO/10GbE Linux driver
+ Copyright(c) 1999 - 2008 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "ixgb.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define IXGB_MAX_NIC 8
+
+#define OPTION_UNSET -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED 1
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define IXGB_PARAM_INIT { [0 ... IXGB_MAX_NIC] = OPTION_UNSET }
+#define IXGB_PARAM(X, desc) \
+ static int __devinitdata X[IXGB_MAX_NIC+1] \
+ = IXGB_PARAM_INIT; \
+ static unsigned int num_##X = 0; \
+ module_param_array_named(X, X, int, &num_##X, 0); \
+ MODULE_PARM_DESC(X, desc);
+
+/* Transmit Descriptor Count
+ *
+ * Valid Range: 64-4096
+ *
+ * Default Value: 256
+ */
+
+IXGB_PARAM(TxDescriptors, "Number of transmit descriptors");
+
+/* Receive Descriptor Count
+ *
+ * Valid Range: 64-4096
+ *
+ * Default Value: 1024
+ */
+
+IXGB_PARAM(RxDescriptors, "Number of receive descriptors");
+
+/* User Specified Flow Control Override
+ *
+ * Valid Range: 0-3
+ * - 0 - No Flow Control
+ * - 1 - Rx only, respond to PAUSE frames but do not generate them
+ * - 2 - Tx only, generate PAUSE frames but ignore them on receive
+ * - 3 - Full Flow Control Support
+ *
+ * Default Value: 2 - Tx only (silicon bug avoidance)
+ */
+
+IXGB_PARAM(FlowControl, "Flow Control setting");
+
+/* XsumRX - Receive Checksum Offload Enable/Disable
+ *
+ * Valid Range: 0, 1
+ * - 0 - disables all checksum offload
+ * - 1 - enables receive IP/TCP/UDP checksum offload
+ * on 82597 based NICs
+ *
+ * Default Value: 1
+ */
+
+IXGB_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
+
+/* Transmit Interrupt Delay in units of 0.8192 microseconds
+ *
+ * Valid Range: 0-65535
+ *
+ * Default Value: 32
+ */
+
+IXGB_PARAM(TxIntDelay, "Transmit Interrupt Delay");
+
+/* Receive Interrupt Delay in units of 0.8192 microseconds
+ *
+ * Valid Range: 0-65535
+ *
+ * Default Value: 72
+ */
+
+IXGB_PARAM(RxIntDelay, "Receive Interrupt Delay");
+
+/* Receive Flow control high threshold (when we send a pause frame)
+ * (FCRTH)
+ *
+ * Valid Range: 1,536 - 262,136 (0x600 - 0x3FFF8, 8 byte granularity)
+ *
+ * Default Value: 196,608 (0x30000)
+ */
+
+IXGB_PARAM(RxFCHighThresh, "Receive Flow Control High Threshold");
+
+/* Receive Flow control low threshold (when we send a resume frame)
+ * (FCRTL)
+ *
+ * Valid Range: 64 - 262,136 (0x40 - 0x3FFF8, 8 byte granularity)
+ * must be less than high threshold by at least 8 bytes
+ *
+ * Default Value: 163,840 (0x28000)
+ */
+
+IXGB_PARAM(RxFCLowThresh, "Receive Flow Control Low Threshold");
+
+/* Flow control request timeout (how long to pause the link partner's tx)
+ * (PAP 15:0)
+ *
+ * Valid Range: 1 - 65535
+ *
+ * Default Value: 65535 (0xffff) (we'll send an xon if we recover)
+ */
+
+IXGB_PARAM(FCReqTimeout, "Flow Control Request Timeout");
+
+/* Interrupt Delay Enable
+ *
+ * Valid Range: 0, 1
+ *
+ * - 0 - disables transmit interrupt delay
+ * - 1 - enables transmmit interrupt delay
+ *
+ * Default Value: 1
+ */
+
+IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable");
+
+
+#define DEFAULT_TIDV 32
+#define MAX_TIDV 0xFFFF
+#define MIN_TIDV 0
+
+#define DEFAULT_RDTR 72
+#define MAX_RDTR 0xFFFF
+#define MIN_RDTR 0
+
+#define XSUMRX_DEFAULT OPTION_ENABLED
+
+#define DEFAULT_FCRTL 0x28000
+#define DEFAULT_FCRTH 0x30000
+#define MIN_FCRTL 0
+#define MAX_FCRTL 0x3FFE8
+#define MIN_FCRTH 8
+#define MAX_FCRTH 0x3FFF0
+
+#define MIN_FCPAUSE 1
+#define MAX_FCPAUSE 0xffff
+#define DEFAULT_FCPAUSE 0xFFFF /* this may be too long */
+
+struct ixgb_option {
+ enum { enable_option, range_option, list_option } type;
+ const char *name;
+ const char *err;
+ int def;
+ union {
+ struct { /* range_option info */
+ int min;
+ int max;
+ } r;
+ struct { /* list_option info */
+ int nr;
+ const struct ixgb_opt_list {
+ int i;
+ const char *str;
+ } *p;
+ } l;
+ } arg;
+};
+
+static int __devinit
+ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
+{
+ if (*value == OPTION_UNSET) {
+ *value = opt->def;
+ return 0;
+ }
+
+ switch (opt->type) {
+ case enable_option:
+ switch (*value) {
+ case OPTION_ENABLED:
+ pr_info("%s Enabled\n", opt->name);
+ return 0;
+ case OPTION_DISABLED:
+ pr_info("%s Disabled\n", opt->name);
+ return 0;
+ }
+ break;
+ case range_option:
+ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+ pr_info("%s set to %i\n", opt->name, *value);
+ return 0;
+ }
+ break;
+ case list_option: {
+ int i;
+ const struct ixgb_opt_list *ent;
+
+ for (i = 0; i < opt->arg.l.nr; i++) {
+ ent = &opt->arg.l.p[i];
+ if (*value == ent->i) {
+ if (ent->str[0] != '\0')
+ pr_info("%s\n", ent->str);
+ return 0;
+ }
+ }
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ pr_info("Invalid %s specified (%i) %s\n", opt->name, *value, opt->err);
+ *value = opt->def;
+ return -1;
+}
+
+/**
+ * ixgb_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input. If an invalid value is given, or if no user specified
+ * value exists, a default value is used. The final value is stored
+ * in a variable in the adapter structure.
+ **/
+
+void __devinit
+ixgb_check_options(struct ixgb_adapter *adapter)
+{
+ int bd = adapter->bd_number;
+ if (bd >= IXGB_MAX_NIC) {
+ pr_notice("Warning: no configuration for board #%i\n", bd);
+ pr_notice("Using defaults for all values\n");
+ }
+
+ { /* Transmit Descriptor Count */
+ static const struct ixgb_option opt = {
+ .type = range_option,
+ .name = "Transmit Descriptors",
+ .err = "using default of " __MODULE_STRING(DEFAULT_TXD),
+ .def = DEFAULT_TXD,
+ .arg = { .r = { .min = MIN_TXD,
+ .max = MAX_TXD}}
+ };
+ struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
+
+ if (num_TxDescriptors > bd) {
+ tx_ring->count = TxDescriptors[bd];
+ ixgb_validate_option(&tx_ring->count, &opt);
+ } else {
+ tx_ring->count = opt.def;
+ }
+ tx_ring->count = ALIGN(tx_ring->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
+ }
+ { /* Receive Descriptor Count */
+ static const struct ixgb_option opt = {
+ .type = range_option,
+ .name = "Receive Descriptors",
+ .err = "using default of " __MODULE_STRING(DEFAULT_RXD),
+ .def = DEFAULT_RXD,
+ .arg = { .r = { .min = MIN_RXD,
+ .max = MAX_RXD}}
+ };
+ struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
+
+ if (num_RxDescriptors > bd) {
+ rx_ring->count = RxDescriptors[bd];
+ ixgb_validate_option(&rx_ring->count, &opt);
+ } else {
+ rx_ring->count = opt.def;
+ }
+ rx_ring->count = ALIGN(rx_ring->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
+ }
+ { /* Receive Checksum Offload Enable */
+ static const struct ixgb_option opt = {
+ .type = enable_option,
+ .name = "Receive Checksum Offload",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+
+ if (num_XsumRX > bd) {
+ unsigned int rx_csum = XsumRX[bd];
+ ixgb_validate_option(&rx_csum, &opt);
+ adapter->rx_csum = rx_csum;
+ } else {
+ adapter->rx_csum = opt.def;
+ }
+ }
+ { /* Flow Control */
+
+ static const struct ixgb_opt_list fc_list[] = {
+ { ixgb_fc_none, "Flow Control Disabled" },
+ { ixgb_fc_rx_pause, "Flow Control Receive Only" },
+ { ixgb_fc_tx_pause, "Flow Control Transmit Only" },
+ { ixgb_fc_full, "Flow Control Enabled" },
+ { ixgb_fc_default, "Flow Control Hardware Default" }
+ };
+
+ static const struct ixgb_option opt = {
+ .type = list_option,
+ .name = "Flow Control",
+ .err = "reading default settings from EEPROM",
+ .def = ixgb_fc_tx_pause,
+ .arg = { .l = { .nr = ARRAY_SIZE(fc_list),
+ .p = fc_list }}
+ };
+
+ if (num_FlowControl > bd) {
+ unsigned int fc = FlowControl[bd];
+ ixgb_validate_option(&fc, &opt);
+ adapter->hw.fc.type = fc;
+ } else {
+ adapter->hw.fc.type = opt.def;
+ }
+ }
+ { /* Receive Flow Control High Threshold */
+ static const struct ixgb_option opt = {
+ .type = range_option,
+ .name = "Rx Flow Control High Threshold",
+ .err = "using default of " __MODULE_STRING(DEFAULT_FCRTH),
+ .def = DEFAULT_FCRTH,
+ .arg = { .r = { .min = MIN_FCRTH,
+ .max = MAX_FCRTH}}
+ };
+
+ if (num_RxFCHighThresh > bd) {
+ adapter->hw.fc.high_water = RxFCHighThresh[bd];
+ ixgb_validate_option(&adapter->hw.fc.high_water, &opt);
+ } else {
+ adapter->hw.fc.high_water = opt.def;
+ }
+ if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
+ pr_info("Ignoring RxFCHighThresh when no RxFC\n");
+ }
+ { /* Receive Flow Control Low Threshold */
+ static const struct ixgb_option opt = {
+ .type = range_option,
+ .name = "Rx Flow Control Low Threshold",
+ .err = "using default of " __MODULE_STRING(DEFAULT_FCRTL),
+ .def = DEFAULT_FCRTL,
+ .arg = { .r = { .min = MIN_FCRTL,
+ .max = MAX_FCRTL}}
+ };
+
+ if (num_RxFCLowThresh > bd) {
+ adapter->hw.fc.low_water = RxFCLowThresh[bd];
+ ixgb_validate_option(&adapter->hw.fc.low_water, &opt);
+ } else {
+ adapter->hw.fc.low_water = opt.def;
+ }
+ if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
+ pr_info("Ignoring RxFCLowThresh when no RxFC\n");
+ }
+ { /* Flow Control Pause Time Request*/
+ static const struct ixgb_option opt = {
+ .type = range_option,
+ .name = "Flow Control Pause Time Request",
+ .err = "using default of "__MODULE_STRING(DEFAULT_FCPAUSE),
+ .def = DEFAULT_FCPAUSE,
+ .arg = { .r = { .min = MIN_FCPAUSE,
+ .max = MAX_FCPAUSE}}
+ };
+
+ if (num_FCReqTimeout > bd) {
+ unsigned int pause_time = FCReqTimeout[bd];
+ ixgb_validate_option(&pause_time, &opt);
+ adapter->hw.fc.pause_time = pause_time;
+ } else {
+ adapter->hw.fc.pause_time = opt.def;
+ }
+ if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
+ pr_info("Ignoring FCReqTimeout when no RxFC\n");
+ }
+ /* high low and spacing check for rx flow control thresholds */
+ if (adapter->hw.fc.type & ixgb_fc_tx_pause) {
+ /* high must be greater than low */
+ if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
+ /* set defaults */
+ pr_info("RxFCHighThresh must be >= (RxFCLowThresh + 8), Using Defaults\n");
+ adapter->hw.fc.high_water = DEFAULT_FCRTH;
+ adapter->hw.fc.low_water = DEFAULT_FCRTL;
+ }
+ }
+ { /* Receive Interrupt Delay */
+ static const struct ixgb_option opt = {
+ .type = range_option,
+ .name = "Receive Interrupt Delay",
+ .err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
+ .def = DEFAULT_RDTR,
+ .arg = { .r = { .min = MIN_RDTR,
+ .max = MAX_RDTR}}
+ };
+
+ if (num_RxIntDelay > bd) {
+ adapter->rx_int_delay = RxIntDelay[bd];
+ ixgb_validate_option(&adapter->rx_int_delay, &opt);
+ } else {
+ adapter->rx_int_delay = opt.def;
+ }
+ }
+ { /* Transmit Interrupt Delay */
+ static const struct ixgb_option opt = {
+ .type = range_option,
+ .name = "Transmit Interrupt Delay",
+ .err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
+ .def = DEFAULT_TIDV,
+ .arg = { .r = { .min = MIN_TIDV,
+ .max = MAX_TIDV}}
+ };
+
+ if (num_TxIntDelay > bd) {
+ adapter->tx_int_delay = TxIntDelay[bd];
+ ixgb_validate_option(&adapter->tx_int_delay, &opt);
+ } else {
+ adapter->tx_int_delay = opt.def;
+ }
+ }
+
+ { /* Transmit Interrupt Delay Enable */
+ static const struct ixgb_option opt = {
+ .type = enable_option,
+ .name = "Tx Interrupt Delay Enable",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+
+ if (num_IntDelayEnable > bd) {
+ unsigned int ide = IntDelayEnable[bd];
+ ixgb_validate_option(&ide, &opt);
+ adapter->tx_int_delay_enable = ide;
+ } else {
+ adapter->tx_int_delay_enable = opt.def;
+ }
+ }
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
new file mode 100644
index 000000000000..7d7387fbdecd
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -0,0 +1,42 @@
+################################################################################
+#
+# Intel 10 Gigabit PCI Express Linux driver
+# Copyright(c) 1999 - 2010 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# Linux NICS <linux.nics@intel.com>
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) 10GbE PCI Express ethernet driver
+#
+
+obj-$(CONFIG_IXGBE) += ixgbe.o
+
+ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
+ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
+ ixgbe_mbx.o ixgbe_x540.o
+
+ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
+ ixgbe_dcb_82599.o ixgbe_dcb_nl.o
+
+ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
new file mode 100644
index 000000000000..38940d72991d
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -0,0 +1,626 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_H_
+#define _IXGBE_H_
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/cpumask.h>
+#include <linux/aer.h>
+#include <linux/if_vlan.h>
+
+#include "ixgbe_type.h"
+#include "ixgbe_common.h"
+#include "ixgbe_dcb.h"
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#define IXGBE_FCOE
+#include "ixgbe_fcoe.h"
+#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
+#ifdef CONFIG_IXGBE_DCA
+#include <linux/dca.h>
+#endif
+
+/* common prefix used by pr_<> macros */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+/* TX/RX descriptor defines */
+#define IXGBE_DEFAULT_TXD 512
+#define IXGBE_DEFAULT_TX_WORK 256
+#define IXGBE_MAX_TXD 4096
+#define IXGBE_MIN_TXD 64
+
+#define IXGBE_DEFAULT_RXD 512
+#define IXGBE_MAX_RXD 4096
+#define IXGBE_MIN_RXD 64
+
+/* flow control */
+#define IXGBE_MIN_FCRTL 0x40
+#define IXGBE_MAX_FCRTL 0x7FF80
+#define IXGBE_MIN_FCRTH 0x600
+#define IXGBE_MAX_FCRTH 0x7FFF0
+#define IXGBE_DEFAULT_FCPAUSE 0xFFFF
+#define IXGBE_MIN_FCPAUSE 0
+#define IXGBE_MAX_FCPAUSE 0xFFFF
+
+/* Supported Rx Buffer Sizes */
+#define IXGBE_RXBUFFER_512 512 /* Used for packet split */
+#define IXGBE_RXBUFFER_2K 2048
+#define IXGBE_RXBUFFER_3K 3072
+#define IXGBE_RXBUFFER_4K 4096
+#define IXGBE_RXBUFFER_7K 7168
+#define IXGBE_RXBUFFER_8K 8192
+#define IXGBE_RXBUFFER_15K 15360
+#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
+
+/*
+ * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we
+ * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
+ * this adds up to 512 bytes of extra data meaning the smallest allocation
+ * we could have is 1K.
+ * i.e. RXBUFFER_512 --> size-1024 slab
+ */
+#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512
+
+#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define IXGBE_TX_FLAGS_CSUM (u32)(1)
+#define IXGBE_TX_FLAGS_HW_VLAN (u32)(1 << 1)
+#define IXGBE_TX_FLAGS_SW_VLAN (u32)(1 << 2)
+#define IXGBE_TX_FLAGS_TSO (u32)(1 << 3)
+#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 4)
+#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5)
+#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
+#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
+#define IXGBE_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 8)
+#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
+#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
+#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
+
+#define IXGBE_MAX_RSC_INT_RATE 162760
+
+#define IXGBE_MAX_VF_MC_ENTRIES 30
+#define IXGBE_MAX_VF_FUNCTIONS 64
+#define IXGBE_MAX_VFTA_ENTRIES 128
+#define MAX_EMULATION_MAC_ADDRS 16
+#define IXGBE_MAX_PF_MACVLANS 15
+#define VMDQ_P(p) ((p) + adapter->num_vfs)
+
+struct vf_data_storage {
+ unsigned char vf_mac_addresses[ETH_ALEN];
+ u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
+ u16 num_vf_mc_hashes;
+ u16 default_vf_vlan_id;
+ u16 vlans_enabled;
+ bool clear_to_send;
+ bool pf_set_mac;
+ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+ u16 pf_qos;
+ u16 tx_rate;
+ struct pci_dev *vfdev;
+};
+
+struct vf_macvlans {
+ struct list_head l;
+ int vf;
+ int rar_entry;
+ bool free;
+ bool is_macvlan;
+ u8 vf_macvlan[ETH_ALEN];
+};
+
+#define IXGBE_MAX_TXD_PWR 14
+#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
+#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct ixgbe_tx_buffer {
+ union ixgbe_adv_tx_desc *next_to_watch;
+ unsigned long time_stamp;
+ dma_addr_t dma;
+ u32 length;
+ u32 tx_flags;
+ struct sk_buff *skb;
+ u32 bytecount;
+ u16 gso_segs;
+};
+
+struct ixgbe_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
+ dma_addr_t page_dma;
+ unsigned int page_offset;
+};
+
+struct ixgbe_queue_stats {
+ u64 packets;
+ u64 bytes;
+};
+
+struct ixgbe_tx_queue_stats {
+ u64 restart_queue;
+ u64 tx_busy;
+ u64 completed;
+ u64 tx_done_old;
+};
+
+struct ixgbe_rx_queue_stats {
+ u64 rsc_count;
+ u64 rsc_flush;
+ u64 non_eop_descs;
+ u64 alloc_rx_page_failed;
+ u64 alloc_rx_buff_failed;
+};
+
+enum ixbge_ring_state_t {
+ __IXGBE_TX_FDIR_INIT_DONE,
+ __IXGBE_TX_DETECT_HANG,
+ __IXGBE_HANG_CHECK_ARMED,
+ __IXGBE_RX_PS_ENABLED,
+ __IXGBE_RX_RSC_ENABLED,
+};
+
+#define ring_is_ps_enabled(ring) \
+ test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
+#define set_ring_ps_enabled(ring) \
+ set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
+#define clear_ring_ps_enabled(ring) \
+ clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
+#define check_for_tx_hang(ring) \
+ test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+ set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+ clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define ring_is_rsc_enabled(ring) \
+ test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+#define set_ring_rsc_enabled(ring) \
+ set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+#define clear_ring_rsc_enabled(ring) \
+ clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+struct ixgbe_ring {
+ struct ixgbe_ring *next; /* pointer to next ring in q_vector */
+ void *desc; /* descriptor ring memory */
+ struct device *dev; /* device for DMA mapping */
+ struct net_device *netdev; /* netdev ring belongs to */
+ union {
+ struct ixgbe_tx_buffer *tx_buffer_info;
+ struct ixgbe_rx_buffer *rx_buffer_info;
+ };
+ unsigned long state;
+ u8 __iomem *tail;
+
+ u16 count; /* amount of descriptors */
+ u16 rx_buf_len;
+
+ u8 queue_index; /* needed for multiqueue queue management */
+ u8 reg_idx; /* holds the special value that gets
+ * the hardware register offset
+ * associated with this ring, which is
+ * different for DCB and RSS modes
+ */
+ u8 atr_sample_rate;
+ u8 atr_count;
+
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ u8 dcb_tc;
+ struct ixgbe_queue_stats stats;
+ struct u64_stats_sync syncp;
+ union {
+ struct ixgbe_tx_queue_stats tx_stats;
+ struct ixgbe_rx_queue_stats rx_stats;
+ };
+ int numa_node;
+ unsigned int size; /* length in bytes */
+ dma_addr_t dma; /* phys. address of descriptor ring */
+ struct rcu_head rcu;
+ struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */
+} ____cacheline_internodealigned_in_smp;
+
+enum ixgbe_ring_f_enum {
+ RING_F_NONE = 0,
+ RING_F_VMDQ, /* SR-IOV uses the same ring feature */
+ RING_F_RSS,
+ RING_F_FDIR,
+#ifdef IXGBE_FCOE
+ RING_F_FCOE,
+#endif /* IXGBE_FCOE */
+
+ RING_F_ARRAY_SIZE /* must be last in enum set */
+};
+
+#define IXGBE_MAX_RSS_INDICES 16
+#define IXGBE_MAX_VMDQ_INDICES 64
+#define IXGBE_MAX_FDIR_INDICES 64
+#ifdef IXGBE_FCOE
+#define IXGBE_MAX_FCOE_INDICES 8
+#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
+#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
+#else
+#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
+#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
+#endif /* IXGBE_FCOE */
+struct ixgbe_ring_feature {
+ int indices;
+ int mask;
+} ____cacheline_internodealigned_in_smp;
+
+struct ixgbe_ring_container {
+ struct ixgbe_ring *ring; /* pointer to linked list of rings */
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u16 work_limit; /* total work allowed per interrupt */
+ u8 count; /* total number of rings in vector */
+ u8 itr; /* current ITR setting for ring */
+};
+
+#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
+ ? 8 : 1)
+#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct ixgbe_q_vector {
+ struct ixgbe_adapter *adapter;
+#ifdef CONFIG_IXGBE_DCA
+ int cpu; /* CPU for DCA */
+#endif
+ u16 v_idx; /* index of q_vector within array, also used for
+ * finding the bit in EICR and friends that
+ * represents the vector for this ring */
+ u16 itr; /* Interrupt throttle rate written to EITR */
+ struct ixgbe_ring_container rx, tx;
+
+ struct napi_struct napi;
+ cpumask_var_t affinity_mask;
+ char name[IFNAMSIZ + 9];
+};
+
+/*
+ * microsecond values for various ITR rates shifted by 2 to fit itr register
+ * with the first 3 bits reserved 0
+ */
+#define IXGBE_MIN_RSC_ITR 24
+#define IXGBE_100K_ITR 40
+#define IXGBE_20K_ITR 200
+#define IXGBE_10K_ITR 400
+#define IXGBE_8K_ITR 500
+
+static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
+{
+ u16 ntc = ring->next_to_clean;
+ u16 ntu = ring->next_to_use;
+
+ return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
+
+#define IXGBE_RX_DESC_ADV(R, i) \
+ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
+#define IXGBE_TX_DESC_ADV(R, i) \
+ (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
+#define IXGBE_TX_CTXTDESC_ADV(R, i) \
+ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
+
+#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
+#ifdef IXGBE_FCOE
+/* Use 3K as the baby jumbo frame size for FCoE */
+#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072
+#endif /* IXGBE_FCOE */
+
+#define OTHER_VECTOR 1
+#define NON_Q_VECTORS (OTHER_VECTOR)
+
+#define MAX_MSIX_VECTORS_82599 64
+#define MAX_MSIX_Q_VECTORS_82599 64
+#define MAX_MSIX_VECTORS_82598 18
+#define MAX_MSIX_Q_VECTORS_82598 16
+
+#define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82599
+#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
+
+#define MIN_MSIX_Q_VECTORS 2
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+
+/* board specific private data structure */
+struct ixgbe_adapter {
+ unsigned long state;
+
+ /* Some features need tri-state capability,
+ * thus the additional *_CAPABLE flags.
+ */
+ u32 flags;
+#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
+#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1)
+#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2)
+#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3)
+#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4)
+#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6)
+#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7)
+#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8)
+#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9)
+#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10)
+#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11)
+#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12)
+#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13)
+#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14)
+#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16)
+#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17)
+#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18)
+#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
+#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
+#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
+#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 23)
+#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 24)
+#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 25)
+#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 26)
+#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 27)
+#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 28)
+#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 29)
+
+ u32 flags2;
+#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
+#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
+#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2)
+#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3)
+#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 4)
+#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5)
+#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6)
+#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
+
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u16 bd_number;
+ struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+
+ /* DCB parameters */
+ struct ieee_pfc *ixgbe_ieee_pfc;
+ struct ieee_ets *ixgbe_ieee_ets;
+ struct ixgbe_dcb_config dcb_cfg;
+ struct ixgbe_dcb_config temp_dcb_cfg;
+ u8 dcb_set_bitmap;
+ u8 dcbx_cap;
+ enum ixgbe_fc_mode last_lfc_mode;
+
+ /* Interrupt Throttle Rate */
+ u32 rx_itr_setting;
+ u32 tx_itr_setting;
+ u16 eitr_low;
+ u16 eitr_high;
+
+ /* Work limits */
+ u16 tx_work_limit;
+
+ /* TX */
+ struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
+ int num_tx_queues;
+ u32 tx_timeout_count;
+ bool detect_tx_hung;
+
+ u64 restart_queue;
+ u64 lsc_int;
+
+ /* RX */
+ struct ixgbe_ring *rx_ring[MAX_RX_QUEUES] ____cacheline_aligned_in_smp;
+ int num_rx_queues;
+ int num_rx_pools; /* == num_rx_queues in 82598 */
+ int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
+ u64 hw_csum_rx_error;
+ u64 hw_rx_no_dma_resources;
+ u64 non_eop_descs;
+ int num_msix_vectors;
+ int max_msix_q_vectors; /* true count of q_vectors for device */
+ struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
+ struct msix_entry *msix_entries;
+
+ u32 alloc_rx_page_failed;
+ u32 alloc_rx_buff_failed;
+
+/* default to trying for four seconds */
+#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
+
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
+ u32 test_icr;
+ struct ixgbe_ring test_tx_ring;
+ struct ixgbe_ring test_rx_ring;
+
+ /* structs defined in ixgbe_hw.h */
+ struct ixgbe_hw hw;
+ u16 msg_enable;
+ struct ixgbe_hw_stats stats;
+
+ /* Interrupt Throttle Rate */
+ u32 rx_eitr_param;
+ u32 tx_eitr_param;
+
+ u64 tx_busy;
+ unsigned int tx_ring_count;
+ unsigned int rx_ring_count;
+
+ u32 link_speed;
+ bool link_up;
+ unsigned long link_check_timeout;
+
+ struct work_struct service_task;
+ struct timer_list service_timer;
+ u32 fdir_pballoc;
+ u32 atr_sample_rate;
+ unsigned long fdir_overflow; /* number of times ATR was backed off */
+ spinlock_t fdir_perfect_lock;
+#ifdef IXGBE_FCOE
+ struct ixgbe_fcoe fcoe;
+#endif /* IXGBE_FCOE */
+ u64 rsc_total_count;
+ u64 rsc_total_flush;
+ u32 wol;
+ u16 eeprom_version;
+ u16 eeprom_cap;
+
+ int node;
+ u32 led_reg;
+ u32 interrupt_event;
+
+ /* SR-IOV */
+ DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
+ unsigned int num_vfs;
+ struct vf_data_storage *vfinfo;
+ int vf_rate_link_speed;
+ struct vf_macvlans vf_mvs;
+ struct vf_macvlans *mv_list;
+ bool antispoofing_enabled;
+
+ struct hlist_head fdir_filter_list;
+ union ixgbe_atr_input fdir_mask;
+ int fdir_filter_count;
+};
+
+struct ixgbe_fdir_filter {
+ struct hlist_node fdir_node;
+ union ixgbe_atr_input filter;
+ u16 sw_idx;
+ u16 action;
+};
+
+enum ixbge_state_t {
+ __IXGBE_TESTING,
+ __IXGBE_RESETTING,
+ __IXGBE_DOWN,
+ __IXGBE_SERVICE_SCHED,
+ __IXGBE_IN_SFP_INIT,
+};
+
+struct ixgbe_rsc_cb {
+ dma_addr_t dma;
+ u16 skb_cnt;
+ bool delay_unmap;
+};
+#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
+
+enum ixgbe_boards {
+ board_82598,
+ board_82599,
+ board_X540,
+};
+
+extern struct ixgbe_info ixgbe_82598_info;
+extern struct ixgbe_info ixgbe_82599_info;
+extern struct ixgbe_info ixgbe_X540_info;
+#ifdef CONFIG_IXGBE_DCB
+extern const struct dcbnl_rtnl_ops dcbnl_ops;
+extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
+ struct ixgbe_dcb_config *dst_dcb_cfg,
+ int tc_max);
+#endif
+
+extern char ixgbe_driver_name[];
+extern const char ixgbe_driver_version[];
+
+extern void ixgbe_up(struct ixgbe_adapter *adapter);
+extern void ixgbe_down(struct ixgbe_adapter *adapter);
+extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
+extern void ixgbe_reset(struct ixgbe_adapter *adapter);
+extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
+extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
+extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
+extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
+extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
+extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
+extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
+extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *);
+extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
+extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
+extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
+extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
+ struct ixgbe_adapter *,
+ struct ixgbe_ring *);
+extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
+ struct ixgbe_tx_buffer *);
+extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
+extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
+extern int ethtool_ioctl(struct ifreq *ifr);
+extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
+ u8 queue);
+extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input_mask);
+extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id, u8 queue);
+extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id);
+extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ union ixgbe_atr_input *mask);
+extern void ixgbe_set_rx_mode(struct net_device *netdev);
+extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
+extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
+extern void ixgbe_do_reset(struct net_device *netdev);
+#ifdef IXGBE_FCOE
+extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
+extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, u8 *hdr_len);
+extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
+extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb,
+ u32 staterr);
+extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc);
+extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc);
+extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
+extern int ixgbe_fcoe_enable(struct net_device *netdev);
+extern int ixgbe_fcoe_disable(struct net_device *netdev);
+#ifdef CONFIG_IXGBE_DCB
+extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
+extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
+#endif /* CONFIG_IXGBE_DCB */
+extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
+#endif /* IXGBE_FCOE */
+
+#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
new file mode 100644
index 000000000000..e02e911057de
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -0,0 +1,1334 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include "ixgbe.h"
+#include "ixgbe_phy.h"
+
+#define IXGBE_82598_MAX_TX_QUEUES 32
+#define IXGBE_82598_MAX_RX_QUEUES 64
+#define IXGBE_82598_RAR_ENTRIES 16
+#define IXGBE_82598_MC_TBL_SIZE 128
+#define IXGBE_82598_VFT_TBL_SIZE 128
+#define IXGBE_82598_RX_PB_SIZE 512
+
+static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
+static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data);
+
+/**
+ * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
+ * @hw: pointer to the HW structure
+ *
+ * The defaults for 82598 should be in the range of 50us to 50ms,
+ * however the hardware default for these parts is 500us to 1ms which is less
+ * than the 10ms recommended by the pci-e spec. To address this we need to
+ * increase the value to either 10ms to 250ms for capability version 1 config,
+ * or 16ms to 55ms for version 2.
+ **/
+static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
+ u16 pcie_devctl2;
+
+ /* only take action if timeout value is defaulted to 0 */
+ if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
+ goto out;
+
+ /*
+ * if capababilities version is type 1 we can write the
+ * timeout of 10ms to 250ms through the GCR register
+ */
+ if (!(gcr & IXGBE_GCR_CAP_VER2)) {
+ gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
+ goto out;
+ }
+
+ /*
+ * for version 2 capabilities we need to write the config space
+ * directly in order to set the completion timeout value for
+ * 16ms to 55ms
+ */
+ pci_read_config_word(adapter->pdev,
+ IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2);
+ pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
+ pci_write_config_word(adapter->pdev,
+ IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
+out:
+ /* disable completion timeout resend */
+ gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
+ IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
+}
+
+/**
+ * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
+ * @hw: pointer to hardware structure
+ *
+ * Read PCIe configuration space, and get the MSI-X vector count from
+ * the capabilities table.
+ **/
+static u16 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ u16 msix_count;
+ pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82598_CAPS,
+ &msix_count);
+ msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
+
+ /* MSI-X count is zero-based in HW, so increment to give proper value */
+ msix_count++;
+
+ return msix_count;
+}
+
+/**
+ */
+static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+
+ /* Call PHY identify routine to get the phy type */
+ ixgbe_identify_phy_generic(hw);
+
+ mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
+ mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
+ mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
+ mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
+ mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during get_invariants because the PHY/SFP type was
+ * not known. Perform the SFP init if necessary.
+ *
+ **/
+static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+ u16 list_offset, data_offset;
+
+ /* Identify the PHY */
+ phy->ops.identify(hw);
+
+ /* Overwrite the link function pointers if copper PHY */
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
+ mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
+ mac->ops.get_link_capabilities =
+ &ixgbe_get_copper_link_capabilities_generic;
+ }
+
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
+ phy->ops.check_link = &ixgbe_check_phy_link_tnx;
+ phy->ops.get_firmware_version =
+ &ixgbe_get_phy_firmware_version_tnx;
+ break;
+ case ixgbe_phy_nl:
+ phy->ops.reset = &ixgbe_reset_phy_nl;
+
+ /* Call SFP+ identify routine to get the SFP+ module type */
+ ret_val = phy->ops.identify_sfp(hw);
+ if (ret_val != 0)
+ goto out;
+ else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
+ ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* Check to see if SFP+ module is supported */
+ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
+ &list_offset,
+ &data_offset);
+ if (ret_val != 0) {
+ ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+ break;
+ default:
+ break;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware using the generic start_hw function.
+ * Disables relaxed ordering Then set pcie completion timeout
+ *
+ **/
+static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
+{
+ u32 regval;
+ u32 i;
+ s32 ret_val = 0;
+
+ ret_val = ixgbe_start_hw_generic(hw);
+
+ /* Disable relaxed ordering */
+ for (i = 0; ((i < hw->mac.max_tx_queues) &&
+ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+ regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
+ }
+
+ for (i = 0; ((i < hw->mac.max_rx_queues) &&
+ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
+ IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+ }
+
+ hw->mac.rx_pb_size = IXGBE_82598_RX_PB_SIZE;
+
+ /* set the completion timeout for interface */
+ if (ret_val == 0)
+ ixgbe_set_pcie_completion_timeout(hw);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_link_capabilities_82598 - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: boolean auto-negotiation value
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ **/
+static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ s32 status = 0;
+ u32 autoc = 0;
+
+ /*
+ * Determine link capabilities based on the stored value of AUTOC,
+ * which represents EEPROM defaults. If AUTOC value has not been
+ * stored, use the current register value.
+ */
+ if (hw->mac.orig_link_settings_stored)
+ autoc = hw->mac.orig_autoc;
+ else
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+ switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = false;
+ break;
+
+ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ *autoneg = false;
+ break;
+
+ case IXGBE_AUTOC_LMS_1G_AN:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ break;
+
+ case IXGBE_AUTOC_LMS_KX4_AN:
+ case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ *autoneg = true;
+ break;
+
+ default:
+ status = IXGBE_ERR_LINK_SETUP;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_media_type_82598 - Determines media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ **/
+static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
+{
+ enum ixgbe_media_type media_type;
+
+ /* Detect if there is a copper PHY attached. */
+ switch (hw->phy.type) {
+ case ixgbe_phy_cu_unknown:
+ case ixgbe_phy_tn:
+ media_type = ixgbe_media_type_copper;
+ goto out;
+ default:
+ break;
+ }
+
+ /* Media type for I82598 is based on device ID */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598:
+ case IXGBE_DEV_ID_82598_BX:
+ /* Default device ID is mezzanine card KX/KX4 */
+ media_type = ixgbe_media_type_backplane;
+ break;
+ case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+ case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+ case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+ case IXGBE_DEV_ID_82598EB_XF_LR:
+ case IXGBE_DEV_ID_82598EB_SFP_LOM:
+ media_type = ixgbe_media_type_fiber;
+ break;
+ case IXGBE_DEV_ID_82598EB_CX4:
+ case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+ media_type = ixgbe_media_type_cx4;
+ break;
+ case IXGBE_DEV_ID_82598AT:
+ case IXGBE_DEV_ID_82598AT2:
+ media_type = ixgbe_media_type_copper;
+ break;
+ default:
+ media_type = ixgbe_media_type_unknown;
+ break;
+ }
+out:
+ return media_type;
+}
+
+/**
+ * ixgbe_fc_enable_82598 - Enable flow control
+ * @hw: pointer to hardware structure
+ * @packetbuf_num: packet buffer number (0-7)
+ *
+ * Enable flow control according to the current settings.
+ **/
+static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
+{
+ s32 ret_val = 0;
+ u32 fctrl_reg;
+ u32 rmcs_reg;
+ u32 reg;
+ u32 link_speed = 0;
+ bool link_up;
+
+#ifdef CONFIG_DCB
+ if (hw->fc.requested_mode == ixgbe_fc_pfc)
+ goto out;
+
+#endif /* CONFIG_DCB */
+ /*
+ * On 82598 having Rx FC on causes resets while doing 1G
+ * so if it's on turn it off once we know link_speed. For
+ * more details see 82598 Specification update.
+ */
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_full:
+ hw->fc.requested_mode = ixgbe_fc_tx_pause;
+ break;
+ case ixgbe_fc_rx_pause:
+ hw->fc.requested_mode = ixgbe_fc_none;
+ break;
+ default:
+ /* no change */
+ break;
+ }
+ }
+
+ /* Negotiate the fc mode to use */
+ ret_val = ixgbe_fc_autoneg(hw);
+ if (ret_val == IXGBE_ERR_FLOW_CONTROL)
+ goto out;
+
+ /* Disable any previous flow control settings */
+ fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
+
+ rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+ rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
+
+ /*
+ * The possible values of fc.current_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+#ifdef CONFIG_DCB
+ * 4: Priority Flow Control is enabled.
+#endif
+ * other: Invalid.
+ */
+ switch (hw->fc.current_mode) {
+ case ixgbe_fc_none:
+ /*
+ * Flow control is disabled by software override or autoneg.
+ * The code below will actually disable it in the HW.
+ */
+ break;
+ case ixgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ fctrl_reg |= IXGBE_FCTRL_RFCE;
+ break;
+ case ixgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+ break;
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ fctrl_reg |= IXGBE_FCTRL_RFCE;
+ rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+ break;
+#ifdef CONFIG_DCB
+ case ixgbe_fc_pfc:
+ goto out;
+ break;
+#endif /* CONFIG_DCB */
+ default:
+ hw_dbg(hw, "Flow control param set incorrectly\n");
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ break;
+ }
+
+ /* Set 802.3x based flow control settings. */
+ fctrl_reg |= IXGBE_FCTRL_DPF;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
+ IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
+
+ /* Set up and enable Rx high/low water mark thresholds, enable XON. */
+ if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
+ reg = hw->fc.low_water << 6;
+ if (hw->fc.send_xon)
+ reg |= IXGBE_FCRTL_XONE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
+
+ reg = hw->fc.high_water[packetbuf_num] << 6;
+ reg |= IXGBE_FCRTH_FCEN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
+ }
+
+ /* Configure pause time (2 TCs per register) */
+ reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
+ if ((packetbuf_num & 1) == 0)
+ reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
+ else
+ reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_start_mac_link_82598 - Configures MAC link settings
+ * @hw: pointer to hardware structure
+ *
+ * Configures link settings based on values in the ixgbe_hw struct.
+ * Restarts the link. Performs autonegotiation if needed.
+ **/
+static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete)
+{
+ u32 autoc_reg;
+ u32 links_reg;
+ u32 i;
+ s32 status = 0;
+
+ /* Restart link */
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+ /* Only poll for autoneg to complete if specified to do so */
+ if (autoneg_wait_to_complete) {
+ if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_AN ||
+ (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+ links_reg = 0; /* Just in case Autoneg time = 0 */
+ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (links_reg & IXGBE_LINKS_KX_AN_COMP)
+ break;
+ msleep(100);
+ }
+ if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+ status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ hw_dbg(hw, "Autonegotiation did not complete.\n");
+ }
+ }
+ }
+
+ /* Add delay to filter out noises during initial link setup */
+ msleep(50);
+
+ return status;
+}
+
+/**
+ * ixgbe_validate_link_ready - Function looks for phy link
+ * @hw: pointer to hardware structure
+ *
+ * Function indicates success when phy link is available. If phy is not ready
+ * within 5 seconds of MAC indicating link, the function returns error.
+ **/
+static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
+{
+ u32 timeout;
+ u16 an_reg;
+
+ if (hw->device_id != IXGBE_DEV_ID_82598AT2)
+ return 0;
+
+ for (timeout = 0;
+ timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
+ hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg);
+
+ if ((an_reg & MDIO_AN_STAT1_COMPLETE) &&
+ (an_reg & MDIO_STAT1_LSTATUS))
+ break;
+
+ msleep(100);
+ }
+
+ if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
+ hw_dbg(hw, "Link was indicated but link is down\n");
+ return IXGBE_ERR_LINK_SETUP;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_check_mac_link_82598 - Get link/speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *link_up,
+ bool link_up_wait_to_complete)
+{
+ u32 links_reg;
+ u32 i;
+ u16 link_reg, adapt_comp_reg;
+
+ /*
+ * SERDES PHY requires us to read link status from register 0xC79F.
+ * Bit 0 set indicates link is up/ready; clear indicates link down.
+ * 0xC00C is read to check that the XAUI lanes are active. Bit 0
+ * clear indicates active; set indicates inactive.
+ */
+ if (hw->phy.type == ixgbe_phy_nl) {
+ hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
+ hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
+ hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD,
+ &adapt_comp_reg);
+ if (link_up_wait_to_complete) {
+ for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+ if ((link_reg & 1) &&
+ ((adapt_comp_reg & 1) == 0)) {
+ *link_up = true;
+ break;
+ } else {
+ *link_up = false;
+ }
+ msleep(100);
+ hw->phy.ops.read_reg(hw, 0xC79F,
+ MDIO_MMD_PMAPMD,
+ &link_reg);
+ hw->phy.ops.read_reg(hw, 0xC00C,
+ MDIO_MMD_PMAPMD,
+ &adapt_comp_reg);
+ }
+ } else {
+ if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
+ *link_up = true;
+ else
+ *link_up = false;
+ }
+
+ if (*link_up == false)
+ goto out;
+ }
+
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (link_up_wait_to_complete) {
+ for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+ if (links_reg & IXGBE_LINKS_UP) {
+ *link_up = true;
+ break;
+ } else {
+ *link_up = false;
+ }
+ msleep(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ }
+ } else {
+ if (links_reg & IXGBE_LINKS_UP)
+ *link_up = true;
+ else
+ *link_up = false;
+ }
+
+ if (links_reg & IXGBE_LINKS_SPEED)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) &&
+ (ixgbe_validate_link_ready(hw) != 0))
+ *link_up = false;
+
+out:
+ return 0;
+}
+
+/**
+ * ixgbe_setup_mac_link_82598 - Set MAC link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if auto-negotiation enabled
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the AUTOC register and restarts link.
+ **/
+static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ s32 status = 0;
+ ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+ u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc = curr_autoc;
+ u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
+
+ /* Check to see if speed passed in is supported. */
+ ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg);
+ speed &= link_capabilities;
+
+ if (speed == IXGBE_LINK_SPEED_UNKNOWN)
+ status = IXGBE_ERR_LINK_SETUP;
+
+ /* Set KX4/KX support according to speed requested */
+ else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+ autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ autoc |= IXGBE_AUTOC_KX4_SUPP;
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ autoc |= IXGBE_AUTOC_KX_SUPP;
+ if (autoc != curr_autoc)
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+ }
+
+ if (status == 0) {
+ /*
+ * Setup and restart the link based on the new values in
+ * ixgbe_hw This will write the AUTOC register based on the new
+ * stored values
+ */
+ status = ixgbe_start_mac_link_82598(hw,
+ autoneg_wait_to_complete);
+ }
+
+ return status;
+}
+
+
+/**
+ * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ * @autoneg_wait_to_complete: true if waiting is needed to complete
+ *
+ * Sets the link speed in the AUTOC register in the MAC and restarts link.
+ **/
+static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ s32 status;
+
+ /* Setup the PHY according to input speed */
+ status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+ /* Set up MAC */
+ ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
+
+ return status;
+}
+
+/**
+ * ixgbe_reset_hw_82598 - Performs hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks and
+ * clears all interrupts, performing a PHY reset, and performing a link (MAC)
+ * reset.
+ **/
+static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ s32 phy_status = 0;
+ u32 ctrl;
+ u32 gheccr;
+ u32 i;
+ u32 autoc;
+ u8 analog_val;
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != 0)
+ goto reset_hw_out;
+
+ /*
+ * Power up the Atlas Tx lanes if they are currently powered down.
+ * Atlas Tx lanes are powered down for MAC loopback tests, but
+ * they are not automatically restored on reset.
+ */
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
+ if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
+ /* Enable Tx Atlas so packets can be transmitted again */
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
+ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
+ analog_val);
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
+ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
+ analog_val);
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
+ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
+ analog_val);
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
+ &analog_val);
+ analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
+ analog_val);
+ }
+
+ /* Reset PHY */
+ if (hw->phy.reset_disable == false) {
+ /* PHY ops must be identified and initialized prior to reset */
+
+ /* Init PHY and function pointers, perform SFP setup */
+ phy_status = hw->phy.ops.init(hw);
+ if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto reset_hw_out;
+ if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto mac_reset_top;
+
+ hw->phy.ops.reset(hw);
+ }
+
+mac_reset_top:
+ /*
+ * Issue global reset to the MAC. This needs to be a SW reset.
+ * If link reset is used, it might reset the MAC when mng is using it
+ */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ udelay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST))
+ break;
+ }
+ if (ctrl & IXGBE_CTRL_RST) {
+ status = IXGBE_ERR_RESET_FAILED;
+ hw_dbg(hw, "Reset polling failed to complete.\n");
+ }
+
+ msleep(50);
+
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
+ gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
+ IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
+
+ /*
+ * Store the original AUTOC value if it has not been
+ * stored off yet. Otherwise restore the stored original
+ * AUTOC value since the reset operation sets back to deaults.
+ */
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ if (hw->mac.orig_link_settings_stored == false) {
+ hw->mac.orig_autoc = autoc;
+ hw->mac.orig_link_settings_stored = true;
+ } else if (autoc != hw->mac.orig_autoc) {
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
+ }
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table
+ */
+ hw->mac.ops.init_rx_addrs(hw);
+
+reset_hw_out:
+ if (phy_status)
+ status = phy_status;
+
+ return status;
+}
+
+/**
+ * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to associate with a VMDq index
+ * @vmdq: VMDq set index
+ **/
+static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+ rar_high &= ~IXGBE_RAH_VIND_MASK;
+ rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
+ return 0;
+}
+
+/**
+ * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to associate with a VMDq index
+ * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
+ **/
+static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+ if (rar_high & IXGBE_RAH_VIND_MASK) {
+ rar_high &= ~IXGBE_RAH_VIND_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_set_vfta_82598 - Set VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFTA
+ * @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ **/
+static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on)
+{
+ u32 regindex;
+ u32 bitindex;
+ u32 bits;
+ u32 vftabyte;
+
+ if (vlan > 4095)
+ return IXGBE_ERR_PARAM;
+
+ /* Determine 32-bit word position in array */
+ regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
+
+ /* Determine the location of the (VMD) queue index */
+ vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
+ bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
+
+ /* Set the nibble for VMD queue index */
+ bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
+ bits &= (~(0x0F << bitindex));
+ bits |= (vind << bitindex);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
+
+ /* Determine the location of the bit for this VLAN id */
+ bitindex = vlan & 0x1F; /* lower five bits */
+
+ bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+ if (vlan_on)
+ /* Turn on this VLAN id */
+ bits |= (1 << bitindex);
+ else
+ /* Turn off this VLAN id */
+ bits &= ~(1 << bitindex);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
+
+ return 0;
+}
+
+/**
+ * ixgbe_clear_vfta_82598 - Clear VLAN filter table
+ * @hw: pointer to hardware structure
+ *
+ * Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
+{
+ u32 offset;
+ u32 vlanbyte;
+
+ for (offset = 0; offset < hw->mac.vft_size; offset++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+
+ for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
+ for (offset = 0; offset < hw->mac.vft_size; offset++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
+ 0);
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
+ * @hw: pointer to hardware structure
+ * @reg: analog register to read
+ * @val: read value
+ *
+ * Performs read operation to Atlas analog register specified.
+ **/
+static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+ u32 atlas_ctl;
+
+ IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
+ IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(10);
+ atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
+ *val = (u8)atlas_ctl;
+
+ return 0;
+}
+
+/**
+ * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
+ * @hw: pointer to hardware structure
+ * @reg: atlas register to write
+ * @val: value to write
+ *
+ * Performs write operation to Atlas analog register specified.
+ **/
+static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+ u32 atlas_ctl;
+
+ atlas_ctl = (reg << 8) | val;
+ IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(10);
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data)
+{
+ s32 status = 0;
+ u16 sfp_addr = 0;
+ u16 sfp_data = 0;
+ u16 sfp_stat = 0;
+ u32 i;
+
+ if (hw->phy.type == ixgbe_phy_nl) {
+ /*
+ * phy SDA/SCL registers are at addresses 0xC30A to
+ * 0xC30D. These registers are used to talk to the SFP+
+ * module's EEPROM through the SDA/SCL (I2C) interface.
+ */
+ sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
+ sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
+ hw->phy.ops.write_reg(hw,
+ IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
+ MDIO_MMD_PMAPMD,
+ sfp_addr);
+
+ /* Poll status */
+ for (i = 0; i < 100; i++) {
+ hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
+ MDIO_MMD_PMAPMD,
+ &sfp_stat);
+ sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
+ if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
+ break;
+ usleep_range(10000, 20000);
+ }
+
+ if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
+ hw_dbg(hw, "EEPROM read did not pass.\n");
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ goto out;
+ }
+
+ /* Read data */
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
+ MDIO_MMD_PMAPMD, &sfp_data);
+
+ *eeprom_data = (u8)(sfp_data >> 8);
+ } else {
+ status = IXGBE_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
+{
+ u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
+ u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+ u16 ext_ability = 0;
+
+ hw->phy.ops.identify(hw);
+
+ /* Copper PHY must be checked before AUTOC LMS to determine correct
+ * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ case ixgbe_phy_cu_unknown:
+ hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE,
+ MDIO_MMD_PMAPMD, &ext_ability);
+ if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+ goto out;
+ default:
+ break;
+ }
+
+ switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+ case IXGBE_AUTOC_LMS_1G_AN:
+ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+ if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ else
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
+ break;
+ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+ if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
+ else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ else /* XAUI */
+ physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ break;
+ case IXGBE_AUTOC_LMS_KX4_AN:
+ case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ break;
+ default:
+ break;
+ }
+
+ if (hw->phy.type == ixgbe_phy_nl) {
+ hw->phy.ops.identify_sfp(hw);
+
+ switch (hw->phy.sfp_type) {
+ case ixgbe_sfp_type_da_cu:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+ break;
+ case ixgbe_sfp_type_sr:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ break;
+ case ixgbe_sfp_type_lr:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ break;
+ default:
+ physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ break;
+ }
+ }
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+ break;
+ case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+ case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ break;
+ case IXGBE_DEV_ID_82598EB_XF_LR:
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ break;
+ default:
+ break;
+ }
+
+out:
+ return physical_layer;
+}
+
+/**
+ * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
+ * port devices.
+ * @hw: pointer to the HW structure
+ *
+ * Calls common function and corrects issue with some single port devices
+ * that enable LAN1 but not LAN0.
+ **/
+static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bus_info *bus = &hw->bus;
+ u16 pci_gen = 0;
+ u16 pci_ctrl2 = 0;
+
+ ixgbe_set_lan_id_multi_port_pcie(hw);
+
+ /* check if LAN0 is disabled */
+ hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
+ if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
+
+ hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
+
+ /* if LAN0 is completely disabled force function to 0 */
+ if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
+ !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
+ !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
+
+ bus->func = 0;
+ }
+ }
+}
+
+/**
+ * ixgbe_set_rxpba_82598 - Configure packet buffers
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure packet buffers.
+ */
+static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom,
+ int strategy)
+{
+ u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
+ u8 i = 0;
+
+ if (!num_pb)
+ return;
+
+ /* Setup Rx packet buffer sizes */
+ switch (strategy) {
+ case PBA_STRATEGY_WEIGHTED:
+ /* Setup the first four at 80KB */
+ rxpktsize = IXGBE_RXPBSIZE_80KB;
+ for (; i < 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ /* Setup the last four at 48KB...don't re-init i */
+ rxpktsize = IXGBE_RXPBSIZE_48KB;
+ /* Fall Through */
+ case PBA_STRATEGY_EQUAL:
+ default:
+ /* Divide the remaining Rx packet buffer evenly among the TCs */
+ for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ break;
+ }
+
+ /* Setup Tx packet buffer sizes */
+ for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
+
+ return;
+}
+
+static struct ixgbe_mac_operations mac_ops_82598 = {
+ .init_hw = &ixgbe_init_hw_generic,
+ .reset_hw = &ixgbe_reset_hw_82598,
+ .start_hw = &ixgbe_start_hw_82598,
+ .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
+ .get_media_type = &ixgbe_get_media_type_82598,
+ .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598,
+ .enable_rx_dma = &ixgbe_enable_rx_dma_generic,
+ .get_mac_addr = &ixgbe_get_mac_addr_generic,
+ .stop_adapter = &ixgbe_stop_adapter_generic,
+ .get_bus_info = &ixgbe_get_bus_info_generic,
+ .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598,
+ .read_analog_reg8 = &ixgbe_read_analog_reg8_82598,
+ .write_analog_reg8 = &ixgbe_write_analog_reg8_82598,
+ .setup_link = &ixgbe_setup_mac_link_82598,
+ .set_rxpba = &ixgbe_set_rxpba_82598,
+ .check_link = &ixgbe_check_mac_link_82598,
+ .get_link_capabilities = &ixgbe_get_link_capabilities_82598,
+ .led_on = &ixgbe_led_on_generic,
+ .led_off = &ixgbe_led_off_generic,
+ .blink_led_start = &ixgbe_blink_led_start_generic,
+ .blink_led_stop = &ixgbe_blink_led_stop_generic,
+ .set_rar = &ixgbe_set_rar_generic,
+ .clear_rar = &ixgbe_clear_rar_generic,
+ .set_vmdq = &ixgbe_set_vmdq_82598,
+ .clear_vmdq = &ixgbe_clear_vmdq_82598,
+ .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
+ .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
+ .enable_mc = &ixgbe_enable_mc_generic,
+ .disable_mc = &ixgbe_disable_mc_generic,
+ .clear_vfta = &ixgbe_clear_vfta_82598,
+ .set_vfta = &ixgbe_set_vfta_82598,
+ .fc_enable = &ixgbe_fc_enable_82598,
+ .set_fw_drv_ver = NULL,
+ .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
+ .release_swfw_sync = &ixgbe_release_swfw_sync,
+};
+
+static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
+ .init_params = &ixgbe_init_eeprom_params_generic,
+ .read = &ixgbe_read_eerd_generic,
+ .read_buffer = &ixgbe_read_eerd_buffer_generic,
+ .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
+ .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
+ .update_checksum = &ixgbe_update_eeprom_checksum_generic,
+};
+
+static struct ixgbe_phy_operations phy_ops_82598 = {
+ .identify = &ixgbe_identify_phy_generic,
+ .identify_sfp = &ixgbe_identify_sfp_module_generic,
+ .init = &ixgbe_init_phy_ops_82598,
+ .reset = &ixgbe_reset_phy_generic,
+ .read_reg = &ixgbe_read_phy_reg_generic,
+ .write_reg = &ixgbe_write_phy_reg_generic,
+ .setup_link = &ixgbe_setup_phy_link_generic,
+ .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
+ .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598,
+ .check_overtemp = &ixgbe_tn_check_overtemp,
+};
+
+struct ixgbe_info ixgbe_82598_info = {
+ .mac = ixgbe_mac_82598EB,
+ .get_invariants = &ixgbe_get_invariants_82598,
+ .mac_ops = &mac_ops_82598,
+ .eeprom_ops = &eeprom_ops_82598,
+ .phy_ops = &phy_ops_82598,
+};
+
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
new file mode 100644
index 000000000000..4ae26a748da0
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -0,0 +1,2176 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include "ixgbe.h"
+#include "ixgbe_phy.h"
+#include "ixgbe_mbx.h"
+
+#define IXGBE_82599_MAX_TX_QUEUES 128
+#define IXGBE_82599_MAX_RX_QUEUES 128
+#define IXGBE_82599_RAR_ENTRIES 128
+#define IXGBE_82599_MC_TBL_SIZE 128
+#define IXGBE_82599_VFT_TBL_SIZE 128
+#define IXGBE_82599_RX_PB_SIZE 512
+
+static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
+static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
+static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete);
+static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
+static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
+static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
+static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
+
+static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+
+ /* enable the laser control functions for SFP+ fiber */
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
+ mac->ops.disable_tx_laser =
+ &ixgbe_disable_tx_laser_multispeed_fiber;
+ mac->ops.enable_tx_laser =
+ &ixgbe_enable_tx_laser_multispeed_fiber;
+ mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
+ } else {
+ mac->ops.disable_tx_laser = NULL;
+ mac->ops.enable_tx_laser = NULL;
+ mac->ops.flap_tx_laser = NULL;
+ }
+
+ if (hw->phy.multispeed_fiber) {
+ /* Set up dual speed SFP+ support */
+ mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
+ } else {
+ if ((mac->ops.get_media_type(hw) ==
+ ixgbe_media_type_backplane) &&
+ (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
+ hw->phy.smart_speed == ixgbe_smart_speed_on) &&
+ !ixgbe_verify_lesm_fw_enabled_82599(hw))
+ mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
+ else
+ mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
+ }
+}
+
+static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
+{
+ s32 ret_val = 0;
+ u32 reg_anlp1 = 0;
+ u32 i = 0;
+ u16 list_offset, data_offset, data_value;
+
+ if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
+ ixgbe_init_mac_link_ops_82599(hw);
+
+ hw->phy.ops.reset = NULL;
+
+ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
+ &data_offset);
+ if (ret_val != 0)
+ goto setup_sfp_out;
+
+ /* PHY config will finish before releasing the semaphore */
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val != 0) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto setup_sfp_out;
+ }
+
+ hw->eeprom.ops.read(hw, ++data_offset, &data_value);
+ while (data_value != 0xffff) {
+ IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
+ IXGBE_WRITE_FLUSH(hw);
+ hw->eeprom.ops.read(hw, ++data_offset, &data_value);
+ }
+
+ /* Release the semaphore */
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ /*
+ * Delay obtaining semaphore again to allow FW access,
+ * semaphore_delay is in ms usleep_range needs us.
+ */
+ usleep_range(hw->eeprom.semaphore_delay * 1000,
+ hw->eeprom.semaphore_delay * 2000);
+
+ /* Now restart DSP by setting Restart_AN and clearing LMS */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
+ IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
+ IXGBE_AUTOC_AN_RESTART));
+
+ /* Wait for AN to leave state 0 */
+ for (i = 0; i < 10; i++) {
+ usleep_range(4000, 8000);
+ reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+ if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
+ break;
+ }
+ if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
+ hw_dbg(hw, "sfp module setup not complete\n");
+ ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
+ goto setup_sfp_out;
+ }
+
+ /* Restart DSP by setting Restart_AN and return to SFI mode */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
+ IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
+ IXGBE_AUTOC_AN_RESTART));
+ }
+
+setup_sfp_out:
+ return ret_val;
+}
+
+static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+
+ ixgbe_init_mac_link_ops_82599(hw);
+
+ mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
+ mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
+ mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
+ mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
+ mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during get_invariants because the PHY/SFP type was
+ * not known. Perform the SFP init if necessary.
+ *
+ **/
+static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+
+ /* Identify the PHY or SFP module */
+ ret_val = phy->ops.identify(hw);
+
+ /* Setup function pointers based on detected SFP module and speeds */
+ ixgbe_init_mac_link_ops_82599(hw);
+
+ /* If copper media, overwrite with copper function pointers */
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
+ mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
+ mac->ops.get_link_capabilities =
+ &ixgbe_get_copper_link_capabilities_generic;
+ }
+
+ /* Set necessary function pointers based on phy type */
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ phy->ops.check_link = &ixgbe_check_phy_link_tnx;
+ phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
+ phy->ops.get_firmware_version =
+ &ixgbe_get_phy_firmware_version_tnx;
+ break;
+ default:
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_link_capabilities_82599 - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @negotiation: true when autoneg or autotry is enabled
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ **/
+static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *negotiation)
+{
+ s32 status = 0;
+ u32 autoc = 0;
+
+ /* Determine 1G link capabilities off of SFP+ type */
+ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) {
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *negotiation = true;
+ goto out;
+ }
+
+ /*
+ * Determine link capabilities based on the stored value of AUTOC,
+ * which represents EEPROM defaults. If AUTOC value has not been
+ * stored, use the current register value.
+ */
+ if (hw->mac.orig_link_settings_stored)
+ autoc = hw->mac.orig_autoc;
+ else
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+ switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *negotiation = false;
+ break;
+
+ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ *negotiation = false;
+ break;
+
+ case IXGBE_AUTOC_LMS_1G_AN:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *negotiation = true;
+ break;
+
+ case IXGBE_AUTOC_LMS_10G_SERIAL:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ *negotiation = false;
+ break;
+
+ case IXGBE_AUTOC_LMS_KX4_KX_KR:
+ case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ if (autoc & IXGBE_AUTOC_KR_SUPP)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ *negotiation = true;
+ break;
+
+ case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ if (autoc & IXGBE_AUTOC_KR_SUPP)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ *negotiation = true;
+ break;
+
+ case IXGBE_AUTOC_LMS_SGMII_1G_100M:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
+ *negotiation = false;
+ break;
+
+ default:
+ status = IXGBE_ERR_LINK_SETUP;
+ goto out;
+ break;
+ }
+
+ if (hw->phy.multispeed_fiber) {
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ *negotiation = true;
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_get_media_type_82599 - Get media type
+ * @hw: pointer to hardware structure
+ *
+ * Returns the media type (fiber, copper, backplane)
+ **/
+static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
+{
+ enum ixgbe_media_type media_type;
+
+ /* Detect if there is a copper PHY attached. */
+ switch (hw->phy.type) {
+ case ixgbe_phy_cu_unknown:
+ case ixgbe_phy_tn:
+ media_type = ixgbe_media_type_copper;
+ goto out;
+ default:
+ break;
+ }
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_KX4:
+ case IXGBE_DEV_ID_82599_KX4_MEZZ:
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ case IXGBE_DEV_ID_82599_KR:
+ case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
+ case IXGBE_DEV_ID_82599_XAUI_LOM:
+ /* Default device ID is mezzanine card KX/KX4 */
+ media_type = ixgbe_media_type_backplane;
+ break;
+ case IXGBE_DEV_ID_82599_SFP:
+ case IXGBE_DEV_ID_82599_SFP_FCOE:
+ case IXGBE_DEV_ID_82599_SFP_EM:
+ case IXGBE_DEV_ID_82599_SFP_SF2:
+ case IXGBE_DEV_ID_82599EN_SFP:
+ media_type = ixgbe_media_type_fiber;
+ break;
+ case IXGBE_DEV_ID_82599_CX4:
+ media_type = ixgbe_media_type_cx4;
+ break;
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ media_type = ixgbe_media_type_copper;
+ break;
+ case IXGBE_DEV_ID_82599_LS:
+ media_type = ixgbe_media_type_fiber_lco;
+ break;
+ default:
+ media_type = ixgbe_media_type_unknown;
+ break;
+ }
+out:
+ return media_type;
+}
+
+/**
+ * ixgbe_start_mac_link_82599 - Setup MAC link settings
+ * @hw: pointer to hardware structure
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Configures link settings based on values in the ixgbe_hw struct.
+ * Restarts the link. Performs autonegotiation if needed.
+ **/
+static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete)
+{
+ u32 autoc_reg;
+ u32 links_reg;
+ u32 i;
+ s32 status = 0;
+
+ /* Restart link */
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+ /* Only poll for autoneg to complete if specified to do so */
+ if (autoneg_wait_to_complete) {
+ if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_KX_KR ||
+ (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+ (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+ links_reg = 0; /* Just in case Autoneg time = 0 */
+ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (links_reg & IXGBE_LINKS_KX_AN_COMP)
+ break;
+ msleep(100);
+ }
+ if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+ status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ hw_dbg(hw, "Autoneg did not complete.\n");
+ }
+ }
+ }
+
+ /* Add delay to filter out noises during initial link setup */
+ msleep(50);
+
+ return status;
+}
+
+/**
+ * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * The base drivers may require better control over SFP+ module
+ * PHY states. This includes selectively shutting down the Tx
+ * laser on the PHY, effectively halting physical link.
+ **/
+static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ /* Disable tx laser; allow 100us to go dark per spec */
+ esdp_reg |= IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(100);
+}
+
+/**
+ * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * The base drivers may require better control over SFP+ module
+ * PHY states. This includes selectively turning on the Tx
+ * laser on the PHY, effectively starting physical link.
+ **/
+static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ /* Enable tx laser; allow 100ms to light up */
+ esdp_reg &= ~IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ msleep(100);
+}
+
+/**
+ * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * When the driver changes the link speeds that it can support,
+ * it sets autotry_restart to true to indicate that we need to
+ * initiate a new autotry session with the link partner. To do
+ * so, we set the speed then disable and re-enable the tx laser, to
+ * alert the link partner that it also needs to restart autotry on its
+ * end. This is consistent with true clause 37 autoneg, which also
+ * involves a loss of signal.
+ **/
+static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ if (hw->mac.autotry_restart) {
+ ixgbe_disable_tx_laser_multispeed_fiber(hw);
+ ixgbe_enable_tx_laser_multispeed_fiber(hw);
+ hw->mac.autotry_restart = false;
+ }
+}
+
+/**
+ * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the AUTOC register and restarts link.
+ **/
+static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ s32 status = 0;
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ u32 speedcnt = 0;
+ u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ u32 i = 0;
+ bool link_up = false;
+ bool negotiation;
+
+ /* Mask off requested but non-supported speeds */
+ status = hw->mac.ops.get_link_capabilities(hw, &link_speed,
+ &negotiation);
+ if (status != 0)
+ return status;
+
+ speed &= link_speed;
+
+ /*
+ * Try each speed one by one, highest priority first. We do this in
+ * software because 10gb fiber doesn't support speed autonegotiation.
+ */
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ speedcnt++;
+ highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+ /* If we already have link at this speed, just jump out */
+ status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
+ false);
+ if (status != 0)
+ return status;
+
+ if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
+ goto out;
+
+ /* Set the module link speed */
+ esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Allow module to change analog characteristics (1G->10G) */
+ msleep(40);
+
+ status = ixgbe_setup_mac_link_82599(hw,
+ IXGBE_LINK_SPEED_10GB_FULL,
+ autoneg,
+ autoneg_wait_to_complete);
+ if (status != 0)
+ return status;
+
+ /* Flap the tx laser if it has not already been done */
+ hw->mac.ops.flap_tx_laser(hw);
+
+ /*
+ * Wait for the controller to acquire link. Per IEEE 802.3ap,
+ * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * attempted. 82599 uses the same timing for 10g SFI.
+ */
+ for (i = 0; i < 5; i++) {
+ /* Wait for the link partner to also set speed */
+ msleep(100);
+
+ /* If we have link, just jump out */
+ status = hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false);
+ if (status != 0)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+ }
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+ speedcnt++;
+ if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
+ highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ /* If we already have link at this speed, just jump out */
+ status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
+ false);
+ if (status != 0)
+ return status;
+
+ if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
+ goto out;
+
+ /* Set the module link speed */
+ esdp_reg &= ~IXGBE_ESDP_SDP5;
+ esdp_reg |= IXGBE_ESDP_SDP5_DIR;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Allow module to change analog characteristics (10G->1G) */
+ msleep(40);
+
+ status = ixgbe_setup_mac_link_82599(hw,
+ IXGBE_LINK_SPEED_1GB_FULL,
+ autoneg,
+ autoneg_wait_to_complete);
+ if (status != 0)
+ return status;
+
+ /* Flap the tx laser if it has not already been done */
+ hw->mac.ops.flap_tx_laser(hw);
+
+ /* Wait for the link partner to also set speed */
+ msleep(100);
+
+ /* If we have link, just jump out */
+ status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
+ false);
+ if (status != 0)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+
+ /*
+ * We didn't get link. Configure back to the highest speed we tried,
+ * (if there was more than one). We call ourselves back with just the
+ * single highest speed that the user requested.
+ */
+ if (speedcnt > 1)
+ status = ixgbe_setup_mac_link_multispeed_fiber(hw,
+ highest_link_speed,
+ autoneg,
+ autoneg_wait_to_complete);
+
+out:
+ /* Set autoneg_advertised value based on input link speed */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Implements the Intel SmartSpeed algorithm.
+ **/
+static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ s32 status = 0;
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ s32 i, j;
+ bool link_up = false;
+ u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+ /* Set autoneg_advertised value based on input link speed */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_100_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+
+ /*
+ * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
+ * autoneg advertisement if link is unable to be established at the
+ * highest negotiated rate. This can sometimes happen due to integrity
+ * issues with the physical media connection.
+ */
+
+ /* First, try to get link with full advertisement */
+ hw->phy.smart_speed_active = false;
+ for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
+ status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+ if (status != 0)
+ goto out;
+
+ /*
+ * Wait for the controller to acquire link. Per IEEE 802.3ap,
+ * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
+ * Table 9 in the AN MAS.
+ */
+ for (i = 0; i < 5; i++) {
+ mdelay(100);
+
+ /* If we have link, just jump out */
+ status = hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false);
+ if (status != 0)
+ goto out;
+
+ if (link_up)
+ goto out;
+ }
+ }
+
+ /*
+ * We didn't get link. If we advertised KR plus one of KX4/KX
+ * (or BX4/BX), then disable KR and try again.
+ */
+ if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
+ ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
+ goto out;
+
+ /* Turn SmartSpeed on to disable KR support */
+ hw->phy.smart_speed_active = true;
+ status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+ if (status != 0)
+ goto out;
+
+ /*
+ * Wait for the controller to acquire link. 600ms will allow for
+ * the AN link_fail_inhibit_timer as well for multiple cycles of
+ * parallel detect, both 10g and 1g. This allows for the maximum
+ * connect attempts as defined in the AN MAS table 73-7.
+ */
+ for (i = 0; i < 6; i++) {
+ mdelay(100);
+
+ /* If we have link, just jump out */
+ status = hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false);
+ if (status != 0)
+ goto out;
+
+ if (link_up)
+ goto out;
+ }
+
+ /* We didn't get link. Turn SmartSpeed back off. */
+ hw->phy.smart_speed_active = false;
+ status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+
+out:
+ if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
+ hw_dbg(hw, "Smartspeed has downgraded the link speed from "
+ "the maximum advertised\n");
+ return status;
+}
+
+/**
+ * ixgbe_setup_mac_link_82599 - Set MAC link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the AUTOC register and restarts link.
+ **/
+static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ s32 status = 0;
+ u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ u32 start_autoc = autoc;
+ u32 orig_autoc = 0;
+ u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
+ u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+ u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
+ u32 links_reg;
+ u32 i;
+ ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+
+ /* Check to see if speed passed in is supported. */
+ hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg);
+ if (status != 0)
+ goto out;
+
+ speed &= link_capabilities;
+
+ if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
+ status = IXGBE_ERR_LINK_SETUP;
+ goto out;
+ }
+
+ /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
+ if (hw->mac.orig_link_settings_stored)
+ orig_autoc = hw->mac.orig_autoc;
+ else
+ orig_autoc = autoc;
+
+ if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+ /* Set KX4/KX/KR support according to speed requested */
+ autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
+ autoc |= IXGBE_AUTOC_KX4_SUPP;
+ if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
+ (hw->phy.smart_speed_active == false))
+ autoc |= IXGBE_AUTOC_KR_SUPP;
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ autoc |= IXGBE_AUTOC_KX_SUPP;
+ } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
+ (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
+ link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
+ /* Switch from 1G SFI to 10G SFI if requested */
+ if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
+ (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
+ autoc &= ~IXGBE_AUTOC_LMS_MASK;
+ autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
+ }
+ } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
+ (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
+ /* Switch from 10G SFI to 1G SFI if requested */
+ if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
+ (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
+ autoc &= ~IXGBE_AUTOC_LMS_MASK;
+ if (autoneg)
+ autoc |= IXGBE_AUTOC_LMS_1G_AN;
+ else
+ autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
+ }
+ }
+
+ if (autoc != start_autoc) {
+ /* Restart link */
+ autoc |= IXGBE_AUTOC_AN_RESTART;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+
+ /* Only poll for autoneg to complete if specified to do so */
+ if (autoneg_wait_to_complete) {
+ if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+ links_reg = 0; /*Just in case Autoneg time=0*/
+ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
+ links_reg =
+ IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (links_reg & IXGBE_LINKS_KX_AN_COMP)
+ break;
+ msleep(100);
+ }
+ if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+ status =
+ IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ hw_dbg(hw, "Autoneg did not "
+ "complete.\n");
+ }
+ }
+ }
+
+ /* Add delay to filter out noises during initial link setup */
+ msleep(50);
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ * @autoneg_wait_to_complete: true if waiting is needed to complete
+ *
+ * Restarts link on PHY and MAC based on settings passed in.
+ **/
+static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ s32 status;
+
+ /* Setup the PHY according to input speed */
+ status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+ /* Set up MAC */
+ ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
+
+ return status;
+}
+
+/**
+ * ixgbe_reset_hw_82599 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ * reset.
+ **/
+static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
+{
+ ixgbe_link_speed link_speed;
+ s32 status;
+ u32 ctrl, i, autoc, autoc2;
+ bool link_up = false;
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != 0)
+ goto reset_hw_out;
+
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
+
+ /* PHY ops must be identified and initialized prior to reset */
+
+ /* Identify PHY and related function pointers */
+ status = hw->phy.ops.init(hw);
+
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto reset_hw_out;
+
+ /* Setup SFP module if there is one present. */
+ if (hw->phy.sfp_setup_needed) {
+ status = hw->mac.ops.setup_sfp(hw);
+ hw->phy.sfp_setup_needed = false;
+ }
+
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto reset_hw_out;
+
+ /* Reset PHY */
+ if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
+ hw->phy.ops.reset(hw);
+
+mac_reset_top:
+ /*
+ * Issue global reset to the MAC. Needs to be SW reset if link is up.
+ * If link reset is used when link is up, it might reset the PHY when
+ * mng is using it. If link is down or the flag to force full link
+ * reset is set, then perform link reset.
+ */
+ ctrl = IXGBE_CTRL_LNK_RST;
+ if (!hw->force_full_reset) {
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (link_up)
+ ctrl = IXGBE_CTRL_RST;
+ }
+
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ udelay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ status = IXGBE_ERR_RESET_FAILED;
+ hw_dbg(hw, "Reset polling failed to complete.\n");
+ }
+
+ msleep(50);
+
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /*
+ * Store the original AUTOC/AUTOC2 values if they have not been
+ * stored off yet. Otherwise restore the stored original
+ * values since the reset operation sets back to defaults.
+ */
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ if (hw->mac.orig_link_settings_stored == false) {
+ hw->mac.orig_autoc = autoc;
+ hw->mac.orig_autoc2 = autoc2;
+ hw->mac.orig_link_settings_stored = true;
+ } else {
+ if (autoc != hw->mac.orig_autoc)
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
+ IXGBE_AUTOC_AN_RESTART));
+
+ if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
+ (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
+ autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
+ autoc2 |= (hw->mac.orig_autoc2 &
+ IXGBE_AUTOC2_UPPER_MASK);
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
+ }
+ }
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ /* Store the permanent SAN mac address */
+ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
+
+ /* Add the SAN MAC address to the RAR only if it's a valid address */
+ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+ hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+ /* Reserve the last RAR for the SAN MAC address */
+ hw->mac.num_rar_entries--;
+ }
+
+ /* Store the alternative WWNN/WWPN prefix */
+ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+ &hw->mac.wwpn_prefix);
+
+reset_hw_out:
+ return status;
+}
+
+/**
+ * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
+{
+ int i;
+ u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+ fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
+
+ /*
+ * Before starting reinitialization process,
+ * FDIRCMD.CMD must be zero.
+ */
+ for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
+ if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+ IXGBE_FDIRCMD_CMD_MASK))
+ break;
+ udelay(10);
+ }
+ if (i >= IXGBE_FDIRCMD_CMD_POLL) {
+ hw_dbg(hw, "Flow Director previous command isn't complete, "
+ "aborting table re-initialization.\n");
+ return IXGBE_ERR_FDIR_REINIT_FAILED;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
+ IXGBE_WRITE_FLUSH(hw);
+ /*
+ * 82599 adapters flow director init flow cannot be restarted,
+ * Workaround 82599 silicon errata by performing the following steps
+ * before re-writing the FDIRCTRL control register with the same value.
+ * - write 1 to bit 8 of FDIRCMD register &
+ * - write 0 to bit 8 of FDIRCMD register
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
+ IXGBE_FDIRCMD_CLEARHT));
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+ ~IXGBE_FDIRCMD_CLEARHT));
+ IXGBE_WRITE_FLUSH(hw);
+ /*
+ * Clear FDIR Hash register to clear any leftover hashes
+ * waiting to be programmed.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
+ IXGBE_WRITE_FLUSH(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll init-done after we write FDIRCTRL register */
+ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+ IXGBE_FDIRCTRL_INIT_DONE)
+ break;
+ udelay(10);
+ }
+ if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
+ hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
+ return IXGBE_ERR_FDIR_REINIT_FAILED;
+ }
+
+ /* Clear FDIR statistics registers (read to clear) */
+ IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
+ IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
+ IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+ IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+ IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
+
+ return 0;
+}
+
+/**
+ * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register
+ **/
+static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+{
+ int i;
+
+ /* Prime the keys for hashing */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
+
+ /*
+ * Poll init-done after we write the register. Estimated times:
+ * 10G: PBALLOC = 11b, timing is 60us
+ * 1G: PBALLOC = 11b, timing is 600us
+ * 100M: PBALLOC = 11b, timing is 6ms
+ *
+ * Multiple these timings by 4 if under full Rx load
+ *
+ * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
+ * 1 msec per poll time. If we're at line rate and drop to 100M, then
+ * this might not finish in our poll time, but we can live with that
+ * for now.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+ IXGBE_FDIRCTRL_INIT_DONE)
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ if (i >= IXGBE_FDIR_INIT_DONE_POLL)
+ hw_dbg(hw, "Flow Director poll time exceeded!\n");
+}
+
+/**
+ * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register, initially
+ * contains just the value of the Rx packet buffer allocation
+ **/
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+{
+ /*
+ * Continue setup of fdirctrl register bits:
+ * Move the flexible bytes to use the ethertype - shift 6 words
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 filters are left
+ */
+ fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
+ (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+ (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+
+ /* write hashes and fdirctrl register, poll for completion */
+ ixgbe_fdir_enable_82599(hw, fdirctrl);
+
+ return 0;
+}
+
+/**
+ * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register, initially
+ * contains just the value of the Rx packet buffer allocation
+ **/
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+{
+ /*
+ * Continue setup of fdirctrl register bits:
+ * Turn perfect match filtering on
+ * Report hash in RSS field of Rx wb descriptor
+ * Initialize the drop queue
+ * Move the flexible bytes to use the ethertype - shift 6 words
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 (0x4 * 16) filters are left
+ */
+ fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
+ IXGBE_FDIRCTRL_REPORT_STATUS |
+ (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
+ (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
+ (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+ (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+
+ /* write hashes and fdirctrl register, poll for completion */
+ ixgbe_fdir_enable_82599(hw, fdirctrl);
+
+ return 0;
+}
+
+/*
+ * These defines allow us to quickly generate all of the necessary instructions
+ * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
+ * for values 0 through 15
+ */
+#define IXGBE_ATR_COMMON_HASH_KEY \
+ (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
+#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
+do { \
+ u32 n = (_n); \
+ if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+ common_hash ^= lo_hash_dword >> n; \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ bucket_hash ^= lo_hash_dword >> n; \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+ sig_hash ^= lo_hash_dword << (16 - n); \
+ if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+ common_hash ^= hi_hash_dword >> n; \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ bucket_hash ^= hi_hash_dword >> n; \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+ sig_hash ^= hi_hash_dword << (16 - n); \
+} while (0);
+
+/**
+ * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
+ * @stream: input bitstream to compute the hash on
+ *
+ * This function is almost identical to the function above but contains
+ * several optomizations such as unwinding all of the loops, letting the
+ * compiler work out all of the conditional ifs since the keys are static
+ * defines, and computing two keys at once since the hashed dword stream
+ * will be the same for both keys.
+ **/
+static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common)
+{
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = ntohl(input.dword);
+
+ /* generate common hash dword */
+ hi_hash_dword = ntohl(common.dword);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+ /* Process bits 0 and 16 */
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
+
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the vlan until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+ /* Process remaining 30 bit of the key */
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
+
+ /* combine common_hash result with signature and bucket hashes */
+ bucket_hash ^= common_hash;
+ bucket_hash &= IXGBE_ATR_HASH_MASK;
+
+ sig_hash ^= common_hash << 16;
+ sig_hash &= IXGBE_ATR_HASH_MASK << 16;
+
+ /* return completed signature hash */
+ return sig_hash ^ bucket_hash;
+}
+
+/**
+ * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
+ * @hw: pointer to hardware structure
+ * @input: unique input dword
+ * @common: compressed common input dword
+ * @queue: queue index to direct traffic to
+ **/
+s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
+ u8 queue)
+{
+ u64 fdirhashcmd;
+ u32 fdircmd;
+
+ /*
+ * Get the flow_type in order to program FDIRCMD properly
+ * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
+ */
+ switch (input.formatted.flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ case IXGBE_ATR_FLOW_TYPE_TCPV6:
+ case IXGBE_ATR_FLOW_TYPE_UDPV6:
+ case IXGBE_ATR_FLOW_TYPE_SCTPV6:
+ break;
+ default:
+ hw_dbg(hw, " Error on flow type input\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ /* configure FDIRCMD register */
+ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+
+ /*
+ * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
+ * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
+ */
+ fdirhashcmd = (u64)fdircmd << 32;
+ fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
+ IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
+
+ hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
+
+ return 0;
+}
+
+#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
+do { \
+ u32 n = (_n); \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ bucket_hash ^= lo_hash_dword >> n; \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ bucket_hash ^= hi_hash_dword >> n; \
+} while (0);
+
+/**
+ * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
+ * @atr_input: input bitstream to compute the hash on
+ * @input_mask: mask for the input bitstream
+ *
+ * This function serves two main purposes. First it applys the input_mask
+ * to the atr_input resulting in a cleaned up atr_input data stream.
+ * Secondly it computes the hash and stores it in the bkt_hash field at
+ * the end of the input byte stream. This way it will be available for
+ * future use without needing to recompute the hash.
+ **/
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ union ixgbe_atr_input *input_mask)
+{
+
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 bucket_hash = 0;
+
+ /* Apply masks to input data */
+ input->dword_stream[0] &= input_mask->dword_stream[0];
+ input->dword_stream[1] &= input_mask->dword_stream[1];
+ input->dword_stream[2] &= input_mask->dword_stream[2];
+ input->dword_stream[3] &= input_mask->dword_stream[3];
+ input->dword_stream[4] &= input_mask->dword_stream[4];
+ input->dword_stream[5] &= input_mask->dword_stream[5];
+ input->dword_stream[6] &= input_mask->dword_stream[6];
+ input->dword_stream[7] &= input_mask->dword_stream[7];
+ input->dword_stream[8] &= input_mask->dword_stream[8];
+ input->dword_stream[9] &= input_mask->dword_stream[9];
+ input->dword_stream[10] &= input_mask->dword_stream[10];
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = ntohl(input->dword_stream[0]);
+
+ /* generate common hash dword */
+ hi_hash_dword = ntohl(input->dword_stream[1] ^
+ input->dword_stream[2] ^
+ input->dword_stream[3] ^
+ input->dword_stream[4] ^
+ input->dword_stream[5] ^
+ input->dword_stream[6] ^
+ input->dword_stream[7] ^
+ input->dword_stream[8] ^
+ input->dword_stream[9] ^
+ input->dword_stream[10]);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+ /* Process bits 0 and 16 */
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
+
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the vlan until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+ /* Process remaining 30 bit of the key */
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
+
+ /*
+ * Limit hash to 13 bits since max bucket count is 8K.
+ * Store result at the end of the input stream.
+ */
+ input->formatted.bkt_hash = bucket_hash & 0x1FFF;
+}
+
+/**
+ * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
+ * @input_mask: mask to be bit swapped
+ *
+ * The source and destination port masks for flow director are bit swapped
+ * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
+ * generate a correctly swapped value we need to bit swap the mask and that
+ * is what is accomplished by this function.
+ **/
+static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
+{
+ u32 mask = ntohs(input_mask->formatted.dst_port);
+ mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
+ mask |= ntohs(input_mask->formatted.src_port);
+ mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+ mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+ mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+ return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+/*
+ * These two macros are meant to address the fact that we have registers
+ * that are either all or in part big-endian. As a result on big-endian
+ * systems we will end up byte swapping the value to little-endian before
+ * it is byte swapped again and written to the hardware in the original
+ * big-endian format.
+ */
+#define IXGBE_STORE_AS_BE32(_value) \
+ (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
+ (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
+
+#define IXGBE_WRITE_REG_BE32(a, reg, value) \
+ IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
+
+#define IXGBE_STORE_AS_BE16(_value) \
+ ntohs(((u16)(_value) >> 8) | ((u16)(_value) << 8))
+
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input_mask)
+{
+ /* mask IPv6 since it is currently not supported */
+ u32 fdirm = IXGBE_FDIRM_DIPv6;
+ u32 fdirtcpm;
+
+ /*
+ * Program the relevant mask registers. If src/dst_port or src/dst_addr
+ * are zero, then assume a full mask for that field. Also assume that
+ * a VLAN of 0 is unspecified, so mask that out as well. L4type
+ * cannot be masked out in this implementation.
+ *
+ * This also assumes IPv4 only. IPv6 masking isn't supported at this
+ * point in time.
+ */
+
+ /* verify bucket hash is cleared on hash generation */
+ if (input_mask->formatted.bkt_hash)
+ hw_dbg(hw, " bucket hash should always be 0 in mask\n");
+
+ /* Program FDIRM and verify partial masks */
+ switch (input_mask->formatted.vm_pool & 0x7F) {
+ case 0x0:
+ fdirm |= IXGBE_FDIRM_POOL;
+ case 0x7F:
+ break;
+ default:
+ hw_dbg(hw, " Error on vm pool mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
+ case 0x0:
+ fdirm |= IXGBE_FDIRM_L4P;
+ if (input_mask->formatted.dst_port ||
+ input_mask->formatted.src_port) {
+ hw_dbg(hw, " Error on src/dst port mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ case IXGBE_ATR_L4TYPE_MASK:
+ break;
+ default:
+ hw_dbg(hw, " Error on flow type mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) {
+ case 0x0000:
+ /* mask VLAN ID, fall through to mask VLAN priority */
+ fdirm |= IXGBE_FDIRM_VLANID;
+ case 0x0FFF:
+ /* mask VLAN priority */
+ fdirm |= IXGBE_FDIRM_VLANP;
+ break;
+ case 0xE000:
+ /* mask VLAN ID only, fall through */
+ fdirm |= IXGBE_FDIRM_VLANID;
+ case 0xEFFF:
+ /* no VLAN fields masked */
+ break;
+ default:
+ hw_dbg(hw, " Error on VLAN mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (input_mask->formatted.flex_bytes & 0xFFFF) {
+ case 0x0000:
+ /* Mask Flex Bytes, fall through */
+ fdirm |= IXGBE_FDIRM_FLEX;
+ case 0xFFFF:
+ break;
+ default:
+ hw_dbg(hw, " Error on flexible byte mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
+
+ /* store the TCP/UDP port masks, bit reversed from port layout */
+ fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
+
+ /* write both the same so that UDP and TCP use the same mask */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+
+ /* store source and destination IP masks (big-enian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
+ ~input_mask->formatted.src_ip[0]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
+ ~input_mask->formatted.dst_ip[0]);
+
+ return 0;
+}
+
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id, u8 queue)
+{
+ u32 fdirport, fdirvlan, fdirhash, fdircmd;
+
+ /* currently IPv6 is not supported, must be programmed with 0 */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
+ input->formatted.src_ip[0]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
+ input->formatted.src_ip[1]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
+ input->formatted.src_ip[2]);
+
+ /* record the source address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
+
+ /* record the first 32 bits of the destination address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
+
+ /* record source and destination port (little-endian)*/
+ fdirport = ntohs(input->formatted.dst_port);
+ fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+ fdirport |= ntohs(input->formatted.src_port);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+
+ /* record vlan (little-endian) and flex_bytes(big-endian) */
+ fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
+ fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+ fdirvlan |= ntohs(input->formatted.vlan_id);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+
+ /* configure FDIRHASH register */
+ fdirhash = input->formatted.bkt_hash;
+ fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+ /*
+ * flush all previous writes to make certain registers are
+ * programmed prior to issuing the command
+ */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* configure FDIRCMD register */
+ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ if (queue == IXGBE_FDIR_DROP_QUEUE)
+ fdircmd |= IXGBE_FDIRCMD_DROP;
+ fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+ fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
+
+ return 0;
+}
+
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id)
+{
+ u32 fdirhash;
+ u32 fdircmd = 0;
+ u32 retry_count;
+ s32 err = 0;
+
+ /* configure FDIRHASH register */
+ fdirhash = input->formatted.bkt_hash;
+ fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+ /* flush hash to HW */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Query if filter is present */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
+
+ for (retry_count = 10; retry_count; retry_count--) {
+ /* allow 10us for query to process */
+ udelay(10);
+ /* verify query completed successfully */
+ fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
+ if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
+ break;
+ }
+
+ if (!retry_count)
+ err = IXGBE_ERR_FDIR_REINIT_FAILED;
+
+ /* if filter exists in hardware then remove it */
+ if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
+ * @hw: pointer to hardware structure
+ * @reg: analog register to read
+ * @val: read value
+ *
+ * Performs read operation to Omer analog register specified.
+ **/
+static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+ u32 core_ctl;
+
+ IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
+ (reg << 8));
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(10);
+ core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
+ *val = (u8)core_ctl;
+
+ return 0;
+}
+
+/**
+ * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
+ * @hw: pointer to hardware structure
+ * @reg: atlas register to write
+ * @val: value to write
+ *
+ * Performs write operation to Omer analog register specified.
+ **/
+static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+ u32 core_ctl;
+
+ core_ctl = (reg << 8) | val;
+ IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(10);
+
+ return 0;
+}
+
+/**
+ * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware using the generic start_hw function
+ * and the generation start_hw function.
+ * Then performs revision-specific operations, if any.
+ **/
+static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
+{
+ s32 ret_val = 0;
+
+ ret_val = ixgbe_start_hw_generic(hw);
+ if (ret_val != 0)
+ goto out;
+
+ ret_val = ixgbe_start_hw_gen2(hw);
+ if (ret_val != 0)
+ goto out;
+
+ /* We need to run link autotry after the driver loads */
+ hw->mac.autotry_restart = true;
+ hw->mac.rx_pb_size = IXGBE_82599_RX_PB_SIZE;
+
+ if (ret_val == 0)
+ ret_val = ixgbe_verify_fw_version_82599(hw);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_identify_phy_82599 - Get physical layer module
+ * @hw: pointer to hardware structure
+ *
+ * Determines the physical layer module found on the current adapter.
+ * If PHY already detected, maintains current PHY type in hw struct,
+ * otherwise executes the PHY detection routine.
+ **/
+static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+
+ /* Detect PHY if not unknown - returns success if already detected. */
+ status = ixgbe_identify_phy_generic(hw);
+ if (status != 0) {
+ /* 82599 10GBASE-T requires an external PHY */
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
+ goto out;
+ else
+ status = ixgbe_identify_sfp_module_generic(hw);
+ }
+
+ /* Set PHY type none if no PHY detected */
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ hw->phy.type = ixgbe_phy_none;
+ status = 0;
+ }
+
+ /* Return error if SFP module has been detected but is not supported */
+ if (hw->phy.type == ixgbe_phy_sfp_unsupported)
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
+{
+ u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
+ u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
+ u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+ u16 ext_ability = 0;
+ u8 comp_codes_10g = 0;
+ u8 comp_codes_1g = 0;
+
+ hw->phy.ops.identify(hw);
+
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ case ixgbe_phy_cu_unknown:
+ hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
+ &ext_ability);
+ if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+ goto out;
+ default:
+ break;
+ }
+
+ switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+ case IXGBE_AUTOC_LMS_1G_AN:
+ case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+ if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
+ IXGBE_PHYSICAL_LAYER_1000BASE_BX;
+ goto out;
+ } else
+ /* SFI mode so read SFP module */
+ goto sfp_check;
+ break;
+ case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+ if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
+ else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
+ goto out;
+ break;
+ case IXGBE_AUTOC_LMS_10G_SERIAL:
+ if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+ goto out;
+ } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
+ goto sfp_check;
+ break;
+ case IXGBE_AUTOC_LMS_KX4_KX_KR:
+ case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
+ if (autoc & IXGBE_AUTOC_KX_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ if (autoc & IXGBE_AUTOC_KX4_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+ if (autoc & IXGBE_AUTOC_KR_SUPP)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+ goto out;
+ break;
+ default:
+ goto out;
+ break;
+ }
+
+sfp_check:
+ /* SFP check must be done last since DA modules are sometimes used to
+ * test KR mode - we need to id KR mode correctly before SFP module.
+ * Call identify_sfp because the pluggable module may have changed */
+ hw->phy.ops.identify_sfp(hw);
+ if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+ goto out;
+
+ switch (hw->phy.type) {
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+ break;
+ case ixgbe_phy_sfp_ftl_active:
+ case ixgbe_phy_sfp_active_unknown:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
+ break;
+ case ixgbe_phy_sfp_avago:
+ case ixgbe_phy_sfp_ftl:
+ case ixgbe_phy_sfp_intel:
+ case ixgbe_phy_sfp_unknown:
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
+ if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ break;
+ default:
+ break;
+ }
+
+out:
+ return physical_layer;
+}
+
+/**
+ * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
+ * @hw: pointer to hardware structure
+ * @regval: register value to write to RXCTRL
+ *
+ * Enables the Rx DMA unit for 82599
+ **/
+static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
+{
+#define IXGBE_MAX_SECRX_POLL 30
+ int i;
+ int secrxreg;
+
+ /*
+ * Workaround for 82599 silicon errata when enabling the Rx datapath.
+ * If traffic is incoming before we enable the Rx unit, it could hang
+ * the Rx DMA unit. Therefore, make sure the security engine is
+ * completely disabled prior to enabling the Rx unit.
+ */
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+ for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
+ if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
+ break;
+ else
+ /* Use interrupt-safe sleep just in case */
+ udelay(10);
+ }
+
+ /* For informational purposes only */
+ if (i >= IXGBE_MAX_SECRX_POLL)
+ hw_dbg(hw, "Rx unit being enabled before security "
+ "path fully disabled. Continuing with init.\n");
+
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_verify_fw_version_82599 - verify fw version for 82599
+ * @hw: pointer to hardware structure
+ *
+ * Verifies that installed the firmware version is 0.6 or higher
+ * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
+ *
+ * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
+ * if the FW version is not supported.
+ **/
+static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_EEPROM_VERSION;
+ u16 fw_offset, fw_ptp_cfg_offset;
+ u16 fw_version = 0;
+
+ /* firmware check is only necessary for SFI devices */
+ if (hw->phy.media_type != ixgbe_media_type_fiber) {
+ status = 0;
+ goto fw_version_out;
+ }
+
+ /* get the offset to the Firmware Module block */
+ hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
+
+ if ((fw_offset == 0) || (fw_offset == 0xFFFF))
+ goto fw_version_out;
+
+ /* get the offset to the Pass Through Patch Configuration block */
+ hw->eeprom.ops.read(hw, (fw_offset +
+ IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
+ &fw_ptp_cfg_offset);
+
+ if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
+ goto fw_version_out;
+
+ /* get the firmware version */
+ hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
+ IXGBE_FW_PATCH_VERSION_4),
+ &fw_version);
+
+ if (fw_version > 0x5)
+ status = 0;
+
+fw_version_out:
+ return status;
+}
+
+/**
+ * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
+ * @hw: pointer to hardware structure
+ *
+ * Returns true if the LESM FW module is present and enabled. Otherwise
+ * returns false. Smart Speed must be disabled if LESM FW module is enabled.
+ **/
+static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
+{
+ bool lesm_enabled = false;
+ u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
+ s32 status;
+
+ /* get the offset to the Firmware Module block */
+ status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
+
+ if ((status != 0) ||
+ (fw_offset == 0) || (fw_offset == 0xFFFF))
+ goto out;
+
+ /* get the offset to the LESM Parameters block */
+ status = hw->eeprom.ops.read(hw, (fw_offset +
+ IXGBE_FW_LESM_PARAMETERS_PTR),
+ &fw_lesm_param_offset);
+
+ if ((status != 0) ||
+ (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
+ goto out;
+
+ /* get the lesm state word */
+ status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
+ IXGBE_FW_LESM_STATE_1),
+ &fw_lesm_state);
+
+ if ((status == 0) &&
+ (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
+ lesm_enabled = true;
+
+out:
+ return lesm_enabled;
+}
+
+/**
+ * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
+ * fastest available method
+ *
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Retrieves 16 bit word(s) read from EEPROM
+ **/
+static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ /*
+ * If EEPROM is detected and can be addressed using 14 bits,
+ * use EERD otherwise use bit bang
+ */
+ if ((eeprom->type == ixgbe_eeprom_spi) &&
+ (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
+ ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
+ data);
+ else
+ ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
+ words,
+ data);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_read_eeprom_82599 - Read EEPROM word using
+ * fastest available method
+ *
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM
+ **/
+static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
+ u16 offset, u16 *data)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ /*
+ * If EEPROM is detected and can be addressed using 14 bits,
+ * use EERD otherwise use bit bang
+ */
+ if ((eeprom->type == ixgbe_eeprom_spi) &&
+ (offset <= IXGBE_EERD_MAX_ADDR))
+ ret_val = ixgbe_read_eerd_generic(hw, offset, data);
+ else
+ ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
+
+ return ret_val;
+}
+
+static struct ixgbe_mac_operations mac_ops_82599 = {
+ .init_hw = &ixgbe_init_hw_generic,
+ .reset_hw = &ixgbe_reset_hw_82599,
+ .start_hw = &ixgbe_start_hw_82599,
+ .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
+ .get_media_type = &ixgbe_get_media_type_82599,
+ .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599,
+ .enable_rx_dma = &ixgbe_enable_rx_dma_82599,
+ .get_mac_addr = &ixgbe_get_mac_addr_generic,
+ .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
+ .get_device_caps = &ixgbe_get_device_caps_generic,
+ .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
+ .stop_adapter = &ixgbe_stop_adapter_generic,
+ .get_bus_info = &ixgbe_get_bus_info_generic,
+ .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
+ .read_analog_reg8 = &ixgbe_read_analog_reg8_82599,
+ .write_analog_reg8 = &ixgbe_write_analog_reg8_82599,
+ .setup_link = &ixgbe_setup_mac_link_82599,
+ .set_rxpba = &ixgbe_set_rxpba_generic,
+ .check_link = &ixgbe_check_mac_link_generic,
+ .get_link_capabilities = &ixgbe_get_link_capabilities_82599,
+ .led_on = &ixgbe_led_on_generic,
+ .led_off = &ixgbe_led_off_generic,
+ .blink_led_start = &ixgbe_blink_led_start_generic,
+ .blink_led_stop = &ixgbe_blink_led_stop_generic,
+ .set_rar = &ixgbe_set_rar_generic,
+ .clear_rar = &ixgbe_clear_rar_generic,
+ .set_vmdq = &ixgbe_set_vmdq_generic,
+ .clear_vmdq = &ixgbe_clear_vmdq_generic,
+ .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
+ .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
+ .enable_mc = &ixgbe_enable_mc_generic,
+ .disable_mc = &ixgbe_disable_mc_generic,
+ .clear_vfta = &ixgbe_clear_vfta_generic,
+ .set_vfta = &ixgbe_set_vfta_generic,
+ .fc_enable = &ixgbe_fc_enable_generic,
+ .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic,
+ .init_uta_tables = &ixgbe_init_uta_tables_generic,
+ .setup_sfp = &ixgbe_setup_sfp_modules_82599,
+ .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
+ .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
+ .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
+ .release_swfw_sync = &ixgbe_release_swfw_sync,
+
+};
+
+static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
+ .init_params = &ixgbe_init_eeprom_params_generic,
+ .read = &ixgbe_read_eeprom_82599,
+ .read_buffer = &ixgbe_read_eeprom_buffer_82599,
+ .write = &ixgbe_write_eeprom_generic,
+ .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic,
+ .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
+ .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
+ .update_checksum = &ixgbe_update_eeprom_checksum_generic,
+};
+
+static struct ixgbe_phy_operations phy_ops_82599 = {
+ .identify = &ixgbe_identify_phy_82599,
+ .identify_sfp = &ixgbe_identify_sfp_module_generic,
+ .init = &ixgbe_init_phy_ops_82599,
+ .reset = &ixgbe_reset_phy_generic,
+ .read_reg = &ixgbe_read_phy_reg_generic,
+ .write_reg = &ixgbe_write_phy_reg_generic,
+ .setup_link = &ixgbe_setup_phy_link_generic,
+ .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
+ .read_i2c_byte = &ixgbe_read_i2c_byte_generic,
+ .write_i2c_byte = &ixgbe_write_i2c_byte_generic,
+ .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
+ .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
+ .check_overtemp = &ixgbe_tn_check_overtemp,
+};
+
+struct ixgbe_info ixgbe_82599_info = {
+ .mac = ixgbe_mac_82599EB,
+ .get_invariants = &ixgbe_get_invariants_82599,
+ .mac_ops = &mac_ops_82599,
+ .eeprom_ops = &eeprom_ops_82599,
+ .phy_ops = &phy_ops_82599,
+ .mbx_ops = &mbx_ops_generic,
+};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
new file mode 100644
index 000000000000..35fa444556b3
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -0,0 +1,3526 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/netdevice.h>
+
+#include "ixgbe.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
+static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
+static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
+static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
+static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
+static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
+ u16 count);
+static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
+static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
+static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
+static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
+
+static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
+static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
+static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
+static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
+static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
+static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
+static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
+static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
+static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+ u16 offset);
+static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+
+/**
+ * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type, clears
+ * all on chip counters, initializes receive address registers, multicast
+ * table, VLAN filter table, calls routine to set up link and flow control
+ * settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
+{
+ u32 ctrl_ext;
+
+ /* Set the media type */
+ hw->phy.media_type = hw->mac.ops.get_media_type(hw);
+
+ /* Identify the PHY */
+ hw->phy.ops.identify(hw);
+
+ /* Clear the VLAN filter table */
+ hw->mac.ops.clear_vfta(hw);
+
+ /* Clear statistics registers */
+ hw->mac.ops.clear_hw_cntrs(hw);
+
+ /* Set No Snoop Disable */
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Setup flow control */
+ ixgbe_setup_fc(hw, 0);
+
+ /* Clear adapter stopped flag */
+ hw->adapter_stopped = false;
+
+ return 0;
+}
+
+/**
+ * ixgbe_start_hw_gen2 - Init sequence for common device family
+ * @hw: pointer to hw structure
+ *
+ * Performs the init sequence common to the second generation
+ * of 10 GbE devices.
+ * Devices in the second generation:
+ * 82599
+ * X540
+ **/
+s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 regval;
+
+ /* Clear the rate limiters */
+ for (i = 0; i < hw->mac.max_tx_queues; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
+ }
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Disable relaxed ordering */
+ for (i = 0; i < hw->mac.max_tx_queues; i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
+ regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
+ }
+
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
+ IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_init_hw_generic - Generic hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting the hardware, filling the bus info
+ * structure and media type, clears all on chip counters, initializes receive
+ * address registers, multicast table, VLAN filter table, calls routine to set
+ * up link and flow control settings, and leaves transmit and receive units
+ * disabled and uninitialized
+ **/
+s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ /* Reset the hardware */
+ status = hw->mac.ops.reset_hw(hw);
+
+ if (status == 0) {
+ /* Start the HW */
+ status = hw->mac.ops.start_hw(hw);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
+ * @hw: pointer to hardware structure
+ *
+ * Clears all hardware statistics counters by reading them from the hardware
+ * Statistics counters are clear on read.
+ **/
+s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
+{
+ u16 i = 0;
+
+ IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+ IXGBE_READ_REG(hw, IXGBE_ILLERRC);
+ IXGBE_READ_REG(hw, IXGBE_ERRBC);
+ IXGBE_READ_REG(hw, IXGBE_MSPDC);
+ for (i = 0; i < 8; i++)
+ IXGBE_READ_REG(hw, IXGBE_MPC(i));
+
+ IXGBE_READ_REG(hw, IXGBE_MLFC);
+ IXGBE_READ_REG(hw, IXGBE_MRFC);
+ IXGBE_READ_REG(hw, IXGBE_RLEC);
+ IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+ IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+ if (hw->mac.type >= ixgbe_mac_82599EB) {
+ IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+ IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+ } else {
+ IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+ IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+ }
+
+ for (i = 0; i < 8; i++) {
+ IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
+ IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
+ if (hw->mac.type >= ixgbe_mac_82599EB) {
+ IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+ } else {
+ IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+ }
+ }
+ if (hw->mac.type >= ixgbe_mac_82599EB)
+ for (i = 0; i < 8; i++)
+ IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
+ IXGBE_READ_REG(hw, IXGBE_PRC64);
+ IXGBE_READ_REG(hw, IXGBE_PRC127);
+ IXGBE_READ_REG(hw, IXGBE_PRC255);
+ IXGBE_READ_REG(hw, IXGBE_PRC511);
+ IXGBE_READ_REG(hw, IXGBE_PRC1023);
+ IXGBE_READ_REG(hw, IXGBE_PRC1522);
+ IXGBE_READ_REG(hw, IXGBE_GPRC);
+ IXGBE_READ_REG(hw, IXGBE_BPRC);
+ IXGBE_READ_REG(hw, IXGBE_MPRC);
+ IXGBE_READ_REG(hw, IXGBE_GPTC);
+ IXGBE_READ_REG(hw, IXGBE_GORCL);
+ IXGBE_READ_REG(hw, IXGBE_GORCH);
+ IXGBE_READ_REG(hw, IXGBE_GOTCL);
+ IXGBE_READ_REG(hw, IXGBE_GOTCH);
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ for (i = 0; i < 8; i++)
+ IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ IXGBE_READ_REG(hw, IXGBE_RUC);
+ IXGBE_READ_REG(hw, IXGBE_RFC);
+ IXGBE_READ_REG(hw, IXGBE_ROC);
+ IXGBE_READ_REG(hw, IXGBE_RJC);
+ IXGBE_READ_REG(hw, IXGBE_MNGPRC);
+ IXGBE_READ_REG(hw, IXGBE_MNGPDC);
+ IXGBE_READ_REG(hw, IXGBE_MNGPTC);
+ IXGBE_READ_REG(hw, IXGBE_TORL);
+ IXGBE_READ_REG(hw, IXGBE_TORH);
+ IXGBE_READ_REG(hw, IXGBE_TPR);
+ IXGBE_READ_REG(hw, IXGBE_TPT);
+ IXGBE_READ_REG(hw, IXGBE_PTC64);
+ IXGBE_READ_REG(hw, IXGBE_PTC127);
+ IXGBE_READ_REG(hw, IXGBE_PTC255);
+ IXGBE_READ_REG(hw, IXGBE_PTC511);
+ IXGBE_READ_REG(hw, IXGBE_PTC1023);
+ IXGBE_READ_REG(hw, IXGBE_PTC1522);
+ IXGBE_READ_REG(hw, IXGBE_MPTC);
+ IXGBE_READ_REG(hw, IXGBE_BPTC);
+ for (i = 0; i < 16; i++) {
+ IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+ IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+ if (hw->mac.type >= ixgbe_mac_82599EB) {
+ IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
+ IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+ } else {
+ IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+ }
+ }
+
+ if (hw->mac.type == ixgbe_mac_X540) {
+ if (hw->phy.id == 0)
+ hw->phy.ops.identify(hw);
+ hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i);
+ hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i);
+ hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i);
+ hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the EEPROM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the EEPROM.
+ **/
+s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ s32 ret_val;
+ u16 data;
+ u16 pba_ptr;
+ u16 offset;
+ u16 length;
+
+ if (pba_num == NULL) {
+ hw_dbg(hw, "PBA string buffer was null\n");
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
+ if (ret_val) {
+ hw_dbg(hw, "NVM Read Error\n");
+ return ret_val;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
+ if (ret_val) {
+ hw_dbg(hw, "NVM Read Error\n");
+ return ret_val;
+ }
+
+ /*
+ * if data is not ptr guard the PBA must be in legacy format which
+ * means pba_ptr is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (data != IXGBE_PBANUM_PTR_GUARD) {
+ hw_dbg(hw, "NVM PBA number is not stored as string\n");
+
+ /* we will need 11 characters to store the PBA */
+ if (pba_num_size < 11) {
+ hw_dbg(hw, "PBA string buffer too small\n");
+ return IXGBE_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pba_ptr */
+ pba_num[0] = (data >> 12) & 0xF;
+ pba_num[1] = (data >> 8) & 0xF;
+ pba_num[2] = (data >> 4) & 0xF;
+ pba_num[3] = data & 0xF;
+ pba_num[4] = (pba_ptr >> 12) & 0xF;
+ pba_num[5] = (pba_ptr >> 8) & 0xF;
+ pba_num[6] = '-';
+ pba_num[7] = 0;
+ pba_num[8] = (pba_ptr >> 4) & 0xF;
+ pba_num[9] = pba_ptr & 0xF;
+
+ /* put a null character on the end of our string */
+ pba_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (pba_num[offset] < 0xA)
+ pba_num[offset] += '0';
+ else if (pba_num[offset] < 0x10)
+ pba_num[offset] += 'A' - 0xA;
+ }
+
+ return 0;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
+ if (ret_val) {
+ hw_dbg(hw, "NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ hw_dbg(hw, "NVM PBA number section invalid length\n");
+ return IXGBE_ERR_PBA_SECTION;
+ }
+
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((u32)length * 2) - 1)) {
+ hw_dbg(hw, "PBA string buffer too small\n");
+ return IXGBE_ERR_NO_SPACE;
+ }
+
+ /* trim pba length from start of string */
+ pba_ptr++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
+ if (ret_val) {
+ hw_dbg(hw, "NVM Read Error\n");
+ return ret_val;
+ }
+ pba_num[offset * 2] = (u8)(data >> 8);
+ pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
+ }
+ pba_num[offset * 2] = '\0';
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_mac_addr_generic - Generic get MAC address
+ * @hw: pointer to hardware structure
+ * @mac_addr: Adapter MAC address
+ *
+ * Reads the adapter's MAC address from first Receive Address Register (RAR0)
+ * A reset of the adapter must be performed prior to calling this function
+ * in order for the MAC address to have been loaded from the EEPROM into RAR0
+ **/
+s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
+
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
+ rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
+
+ for (i = 0; i < 4; i++)
+ mac_addr[i] = (u8)(rar_low >> (i*8));
+
+ for (i = 0; i < 2; i++)
+ mac_addr[i+4] = (u8)(rar_high >> (i*8));
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_bus_info_generic - Generic set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
+ **/
+s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u16 link_status;
+
+ hw->bus.type = ixgbe_bus_type_pci_express;
+
+ /* Get the negotiated link width and speed from PCI config space */
+ pci_read_config_word(adapter->pdev, IXGBE_PCI_LINK_STATUS,
+ &link_status);
+
+ switch (link_status & IXGBE_PCI_LINK_WIDTH) {
+ case IXGBE_PCI_LINK_WIDTH_1:
+ hw->bus.width = ixgbe_bus_width_pcie_x1;
+ break;
+ case IXGBE_PCI_LINK_WIDTH_2:
+ hw->bus.width = ixgbe_bus_width_pcie_x2;
+ break;
+ case IXGBE_PCI_LINK_WIDTH_4:
+ hw->bus.width = ixgbe_bus_width_pcie_x4;
+ break;
+ case IXGBE_PCI_LINK_WIDTH_8:
+ hw->bus.width = ixgbe_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus.width = ixgbe_bus_width_unknown;
+ break;
+ }
+
+ switch (link_status & IXGBE_PCI_LINK_SPEED) {
+ case IXGBE_PCI_LINK_SPEED_2500:
+ hw->bus.speed = ixgbe_bus_speed_2500;
+ break;
+ case IXGBE_PCI_LINK_SPEED_5000:
+ hw->bus.speed = ixgbe_bus_speed_5000;
+ break;
+ default:
+ hw->bus.speed = ixgbe_bus_speed_unknown;
+ break;
+ }
+
+ mac->ops.set_lan_id(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ * @hw: pointer to the HW structure
+ *
+ * Determines the LAN function id by reading memory-mapped registers
+ * and swaps the port value if requested.
+ **/
+void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bus_info *bus = &hw->bus;
+ u32 reg;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
+ bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
+ bus->lan_id = bus->func;
+
+ /* check for a port swap */
+ reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+ if (reg & IXGBE_FACTPS_LFS)
+ bus->func ^= 0x1;
+}
+
+/**
+ * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
+{
+ u32 reg_val;
+ u16 i;
+
+ /*
+ * Set the adapter_stopped flag so other driver functions stop touching
+ * the hardware
+ */
+ hw->adapter_stopped = true;
+
+ /* Disable the receive unit */
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
+
+ /* Clear interrupt mask to stop interrupts from being generated */
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
+
+ /* Clear any pending interrupts, flush previous writes */
+ IXGBE_READ_REG(hw, IXGBE_EICR);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ for (i = 0; i < hw->mac.max_tx_queues; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
+
+ /* Disable the receive unit by stopping each queue */
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ reg_val &= ~IXGBE_RXDCTL_ENABLE;
+ reg_val |= IXGBE_RXDCTL_SWFLSH;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
+ }
+
+ /* flush all queues disables */
+ IXGBE_WRITE_FLUSH(hw);
+ usleep_range(1000, 2000);
+
+ /*
+ * Prevent the PCI-E bus from from hanging by disabling PCI-E master
+ * access and verify no pending requests
+ */
+ return ixgbe_disable_pcie_master(hw);
+}
+
+/**
+ * ixgbe_led_on_generic - Turns on the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @index: led number to turn on
+ **/
+s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
+{
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ /* To turn on the LED, set mode to ON. */
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+ led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_led_off_generic - Turns off the software controllable LEDs.
+ * @hw: pointer to hardware structure
+ * @index: led number to turn off
+ **/
+s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
+{
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ /* To turn off the LED, set mode to OFF. */
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+ led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 eec;
+ u16 eeprom_size;
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->type = ixgbe_eeprom_none;
+ /* Set default semaphore delay to 10ms which is a well
+ * tested value */
+ eeprom->semaphore_delay = 10;
+ /* Clear EEPROM page size, it will be initialized as needed */
+ eeprom->word_page_size = 0;
+
+ /*
+ * Check for EEPROM present first.
+ * If not present leave as none
+ */
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ if (eec & IXGBE_EEC_PRES) {
+ eeprom->type = ixgbe_eeprom_spi;
+
+ /*
+ * SPI EEPROM is assumed here. This code would need to
+ * change if a future EEPROM is not SPI.
+ */
+ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+ IXGBE_EEC_SIZE_SHIFT);
+ eeprom->word_size = 1 << (eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ }
+
+ if (eec & IXGBE_EEC_ADDR_SIZE)
+ eeprom->address_bits = 16;
+ else
+ eeprom->address_bits = 8;
+ hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: "
+ "%d\n", eeprom->type, eeprom->word_size,
+ eeprom->address_bits);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to write
+ * @words: number of words
+ * @data: 16 bit word(s) to write to EEPROM
+ *
+ * Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status = 0;
+ u16 i, count;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ if (offset + words > hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ /*
+ * The EEPROM page size cannot be queried from the chip. We do lazy
+ * initialization. It is worth to do that when we write large buffer.
+ */
+ if ((hw->eeprom.word_page_size == 0) &&
+ (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
+ ixgbe_detect_eeprom_page_size_generic(hw, offset);
+
+ /*
+ * We cannot hold synchronization semaphores for too long
+ * to avoid other entity starvation. However it is more efficient
+ * to read in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
+ count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
+ IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
+ status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
+ count, &data[i]);
+
+ if (status != 0)
+ break;
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of word(s)
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * If ixgbe_eeprom_update_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+ u16 word;
+ u16 page_size;
+ u16 i;
+ u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
+
+ /* Prepare the EEPROM for writing */
+ status = ixgbe_acquire_eeprom(hw);
+
+ if (status == 0) {
+ if (ixgbe_ready_eeprom(hw) != 0) {
+ ixgbe_release_eeprom(hw);
+ status = IXGBE_ERR_EEPROM;
+ }
+ }
+
+ if (status == 0) {
+ for (i = 0; i < words; i++) {
+ ixgbe_standby_eeprom(hw);
+
+ /* Send the WRITE ENABLE command (8 bit opcode ) */
+ ixgbe_shift_out_eeprom_bits(hw,
+ IXGBE_EEPROM_WREN_OPCODE_SPI,
+ IXGBE_EEPROM_OPCODE_BITS);
+
+ ixgbe_standby_eeprom(hw);
+
+ /*
+ * Some SPI eeproms use the 8th address bit embedded
+ * in the opcode
+ */
+ if ((hw->eeprom.address_bits == 8) &&
+ ((offset + i) >= 128))
+ write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+ ixgbe_shift_out_eeprom_bits(hw, write_opcode,
+ IXGBE_EEPROM_OPCODE_BITS);
+ ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
+ hw->eeprom.address_bits);
+
+ page_size = hw->eeprom.word_page_size;
+
+ /* Send the data in burst via SPI*/
+ do {
+ word = data[i];
+ word = (word >> 8) | (word << 8);
+ ixgbe_shift_out_eeprom_bits(hw, word, 16);
+
+ if (page_size == 0)
+ break;
+
+ /* do not wrap around page */
+ if (((offset + i) & (page_size - 1)) ==
+ (page_size - 1))
+ break;
+ } while (++i < words);
+
+ ixgbe_standby_eeprom(hw);
+ usleep_range(10000, 20000);
+ }
+ /* Done with writing - release the EEPROM */
+ ixgbe_release_eeprom(hw);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @data: 16 bit word to be written to the EEPROM
+ *
+ * If ixgbe_eeprom_update_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @words: number of word(s)
+ * @data: read 16 bit words(s) from EEPROM
+ *
+ * Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status = 0;
+ u16 i, count;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ if (offset + words > hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ /*
+ * We cannot hold synchronization semaphores for too long
+ * to avoid other entity starvation. However it is more efficient
+ * to read in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
+ count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
+ IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
+
+ status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
+ count, &data[i]);
+
+ if (status != 0)
+ break;
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @words: number of word(s)
+ * @data: read 16 bit word(s) from EEPROM
+ *
+ * Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+ u16 word_in;
+ u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
+ u16 i;
+
+ /* Prepare the EEPROM for reading */
+ status = ixgbe_acquire_eeprom(hw);
+
+ if (status == 0) {
+ if (ixgbe_ready_eeprom(hw) != 0) {
+ ixgbe_release_eeprom(hw);
+ status = IXGBE_ERR_EEPROM;
+ }
+ }
+
+ if (status == 0) {
+ for (i = 0; i < words; i++) {
+ ixgbe_standby_eeprom(hw);
+ /*
+ * Some SPI eeproms use the 8th address bit embedded
+ * in the opcode
+ */
+ if ((hw->eeprom.address_bits == 8) &&
+ ((offset + i) >= 128))
+ read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+ /* Send the READ command (opcode + addr) */
+ ixgbe_shift_out_eeprom_bits(hw, read_opcode,
+ IXGBE_EEPROM_OPCODE_BITS);
+ ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
+ hw->eeprom.address_bits);
+
+ /* Read the data. */
+ word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
+ data[i] = (word_in >> 8) | (word_in << 8);
+ }
+
+ /* End this read operation */
+ ixgbe_release_eeprom(hw);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit value from EEPROM
+ *
+ * Reads 16 bit value from EEPROM through bit-bang method
+ **/
+s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 *data)
+{
+ s32 status;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of word(s)
+ * @data: 16 bit word(s) from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ u32 eerd;
+ s32 status = 0;
+ u32 i;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ for (i = 0; i < words; i++) {
+ eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
+ IXGBE_EEPROM_RW_REG_START;
+
+ IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
+
+ if (status == 0) {
+ data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
+ IXGBE_EEPROM_RW_REG_DATA);
+ } else {
+ hw_dbg(hw, "Eeprom read timed out\n");
+ goto out;
+ }
+ }
+out:
+ return status;
+}
+
+/**
+ * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be used as a scratch pad
+ *
+ * Discover EEPROM page size by writing marching data at given offset.
+ * This function is called only when we are writing a new large buffer
+ * at given offset so the data would be overwritten anyway.
+ **/
+static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+ u16 offset)
+{
+ u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
+ s32 status = 0;
+ u16 i;
+
+ for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
+ data[i] = i;
+
+ hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
+ status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
+ IXGBE_EEPROM_PAGE_SIZE_MAX, data);
+ hw->eeprom.word_page_size = 0;
+ if (status != 0)
+ goto out;
+
+ status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
+ if (status != 0)
+ goto out;
+
+ /*
+ * When writing in burst more than the actual page size
+ * EEPROM address wraps around current page.
+ */
+ hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
+
+ hw_dbg(hw, "Detected EEPROM page size = %d words.",
+ hw->eeprom.word_page_size);
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_eerd_generic - Read EEPROM word using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
+}
+
+/**
+ * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of words
+ * @data: word(s) write to the EEPROM
+ *
+ * Write a 16 bit word(s) to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ u32 eewr;
+ s32 status = 0;
+ u16 i;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ for (i = 0; i < words; i++) {
+ eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+ (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
+ IXGBE_EEPROM_RW_REG_START;
+
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != 0) {
+ hw_dbg(hw, "Eeprom write EEWR timed out\n");
+ goto out;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
+
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != 0) {
+ hw_dbg(hw, "Eeprom write EEWR timed out\n");
+ goto out;
+ }
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
+}
+
+/**
+ * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
+ * @hw: pointer to hardware structure
+ * @ee_reg: EEPROM flag for polling
+ *
+ * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
+ * read or write is done respectively.
+ **/
+static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
+{
+ u32 i;
+ u32 reg;
+ s32 status = IXGBE_ERR_EEPROM;
+
+ for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
+ if (ee_reg == IXGBE_NVM_POLL_READ)
+ reg = IXGBE_READ_REG(hw, IXGBE_EERD);
+ else
+ reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
+
+ if (reg & IXGBE_EEPROM_RW_REG_DONE) {
+ status = 0;
+ break;
+ }
+ udelay(5);
+ }
+ return status;
+}
+
+/**
+ * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ *
+ * Prepares EEPROM for access using bit-bang method. This function should
+ * be called before issuing a command to the EEPROM.
+ **/
+static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ u32 eec;
+ u32 i;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ if (status == 0) {
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ /* Request EEPROM Access */
+ eec |= IXGBE_EEC_REQ;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+
+ for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ if (eec & IXGBE_EEC_GNT)
+ break;
+ udelay(5);
+ }
+
+ /* Release if grant not acquired */
+ if (!(eec & IXGBE_EEC_GNT)) {
+ eec &= ~IXGBE_EEC_REQ;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ hw_dbg(hw, "Could not acquire EEPROM grant\n");
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ status = IXGBE_ERR_EEPROM;
+ }
+
+ /* Setup EEPROM for Read/Write */
+ if (status == 0) {
+ /* Clear CS and SK */
+ eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(1);
+ }
+ }
+ return status;
+}
+
+/**
+ * ixgbe_get_eeprom_semaphore - Get hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
+ **/
+static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_EEPROM;
+ u32 timeout = 2000;
+ u32 i;
+ u32 swsm;
+
+ /* Get SMBI software semaphore between device drivers first */
+ for (i = 0; i < timeout; i++) {
+ /*
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ if (!(swsm & IXGBE_SWSM_SMBI)) {
+ status = 0;
+ break;
+ }
+ udelay(50);
+ }
+
+ if (i == timeout) {
+ hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore "
+ "not granted.\n");
+ /*
+ * this release is particularly important because our attempts
+ * above to get the semaphore may have succeeded, and if there
+ * was a timeout, we should unconditionally clear the semaphore
+ * bits to free the driver to make progress
+ */
+ ixgbe_release_eeprom_semaphore(hw);
+
+ udelay(50);
+ /*
+ * one last try
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ if (!(swsm & IXGBE_SWSM_SMBI))
+ status = 0;
+ }
+
+ /* Now get the semaphore between SW/FW through the SWESMBI bit */
+ if (status == 0) {
+ for (i = 0; i < timeout; i++) {
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+
+ /* Set the SW EEPROM semaphore bit to request access */
+ swsm |= IXGBE_SWSM_SWESMBI;
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+
+ /*
+ * If we set the bit successfully then we got the
+ * semaphore.
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ if (swsm & IXGBE_SWSM_SWESMBI)
+ break;
+
+ udelay(50);
+ }
+
+ /*
+ * Release semaphores and return error if SW EEPROM semaphore
+ * was not granted because we don't have access to the EEPROM
+ */
+ if (i >= timeout) {
+ hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
+ "not granted.\n");
+ ixgbe_release_eeprom_semaphore(hw);
+ status = IXGBE_ERR_EEPROM;
+ }
+ } else {
+ hw_dbg(hw, "Software semaphore SMBI between device drivers "
+ "not granted.\n");
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_release_eeprom_semaphore - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function clears hardware semaphore bits.
+ **/
+static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
+{
+ u32 swsm;
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+
+ /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
+ swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_ready_eeprom - Polls for EEPROM ready
+ * @hw: pointer to hardware structure
+ **/
+static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ u16 i;
+ u8 spi_stat_reg;
+
+ /*
+ * Read "Status Register" repeatedly until the LSB is cleared. The
+ * EEPROM will signal that the command has been completed by clearing
+ * bit 0 of the internal status register. If it's not cleared within
+ * 5 milliseconds, then error out.
+ */
+ for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
+ ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
+ IXGBE_EEPROM_OPCODE_BITS);
+ spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
+ if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
+ break;
+
+ udelay(5);
+ ixgbe_standby_eeprom(hw);
+ }
+
+ /*
+ * On some parts, SPI write time could vary from 0-20mSec on 3.3V
+ * devices (and only 0-5mSec on 5V devices)
+ */
+ if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
+ hw_dbg(hw, "SPI EEPROM Status error\n");
+ status = IXGBE_ERR_EEPROM;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
+ * @hw: pointer to hardware structure
+ **/
+static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
+{
+ u32 eec;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ /* Toggle CS to flush commands */
+ eec |= IXGBE_EEC_CS;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(1);
+ eec &= ~IXGBE_EEC_CS;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(1);
+}
+
+/**
+ * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
+ * @hw: pointer to hardware structure
+ * @data: data to send to the EEPROM
+ * @count: number of bits to shift out
+ **/
+static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
+ u16 count)
+{
+ u32 eec;
+ u32 mask;
+ u32 i;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ /*
+ * Mask is used to shift "count" bits of "data" out to the EEPROM
+ * one bit at a time. Determine the starting bit based on count
+ */
+ mask = 0x01 << (count - 1);
+
+ for (i = 0; i < count; i++) {
+ /*
+ * A "1" is shifted out to the EEPROM by setting bit "DI" to a
+ * "1", and then raising and then lowering the clock (the SK
+ * bit controls the clock input to the EEPROM). A "0" is
+ * shifted out to the EEPROM by setting "DI" to "0" and then
+ * raising and then lowering the clock.
+ */
+ if (data & mask)
+ eec |= IXGBE_EEC_DI;
+ else
+ eec &= ~IXGBE_EEC_DI;
+
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+
+ udelay(1);
+
+ ixgbe_raise_eeprom_clk(hw, &eec);
+ ixgbe_lower_eeprom_clk(hw, &eec);
+
+ /*
+ * Shift mask to signify next bit of data to shift in to the
+ * EEPROM
+ */
+ mask = mask >> 1;
+ }
+
+ /* We leave the "DI" bit set to "0" when we leave this routine. */
+ eec &= ~IXGBE_EEC_DI;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
+ * @hw: pointer to hardware structure
+ **/
+static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
+{
+ u32 eec;
+ u32 i;
+ u16 data = 0;
+
+ /*
+ * In order to read a register from the EEPROM, we need to shift
+ * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
+ * the clock input to the EEPROM (setting the SK bit), and then reading
+ * the value of the "DO" bit. During this "shifting in" process the
+ * "DI" bit should always be clear.
+ */
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
+
+ for (i = 0; i < count; i++) {
+ data = data << 1;
+ ixgbe_raise_eeprom_clk(hw, &eec);
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ eec &= ~(IXGBE_EEC_DI);
+ if (eec & IXGBE_EEC_DO)
+ data |= 1;
+
+ ixgbe_lower_eeprom_clk(hw, &eec);
+ }
+
+ return data;
+}
+
+/**
+ * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
+ * @hw: pointer to hardware structure
+ * @eec: EEC register's current value
+ **/
+static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
+{
+ /*
+ * Raise the clock input to the EEPROM
+ * (setting the SK bit), then delay
+ */
+ *eec = *eec | IXGBE_EEC_SK;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(1);
+}
+
+/**
+ * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
+ * @hw: pointer to hardware structure
+ * @eecd: EECD's current value
+ **/
+static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
+{
+ /*
+ * Lower the clock input to the EEPROM (clearing the SK bit), then
+ * delay
+ */
+ *eec = *eec & ~IXGBE_EEC_SK;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(1);
+}
+
+/**
+ * ixgbe_release_eeprom - Release EEPROM, release semaphores
+ * @hw: pointer to hardware structure
+ **/
+static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
+{
+ u32 eec;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ eec |= IXGBE_EEC_CS; /* Pull CS high */
+ eec &= ~IXGBE_EEC_SK; /* Lower SCK */
+
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+
+ udelay(1);
+
+ /* Stop requesting EEPROM access */
+ eec &= ~IXGBE_EEC_REQ;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+ /*
+ * Delay before attempt to obtain semaphore again to allow FW
+ * access. semaphore_delay is in ms we need us for usleep_range
+ */
+ usleep_range(hw->eeprom.semaphore_delay * 1000,
+ hw->eeprom.semaphore_delay * 2000);
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ **/
+u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
+{
+ u16 i;
+ u16 j;
+ u16 checksum = 0;
+ u16 length = 0;
+ u16 pointer = 0;
+ u16 word = 0;
+
+ /* Include 0x0-0x3F in the checksum */
+ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
+ if (hw->eeprom.ops.read(hw, i, &word) != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+ checksum += word;
+ }
+
+ /* Include all data from pointers except for the fw pointer */
+ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+ hw->eeprom.ops.read(hw, i, &pointer);
+
+ /* Make sure the pointer seems valid */
+ if (pointer != 0xFFFF && pointer != 0) {
+ hw->eeprom.ops.read(hw, pointer, &length);
+
+ if (length != 0xFFFF && length != 0) {
+ for (j = pointer+1; j <= pointer+length; j++) {
+ hw->eeprom.ops.read(hw, j, &word);
+ checksum += word;
+ }
+ }
+ }
+ }
+
+ checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+ return checksum;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ **/
+s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+ u16 *checksum_val)
+{
+ s32 status;
+ u16 checksum;
+ u16 read_checksum = 0;
+
+ /*
+ * Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+
+ if (status == 0) {
+ checksum = hw->eeprom.ops.calc_checksum(hw);
+
+ hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
+
+ /*
+ * Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum)
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+ } else {
+ hw_dbg(hw, "EEPROM read failed\n");
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u16 checksum;
+
+ /*
+ * Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+
+ if (status == 0) {
+ checksum = hw->eeprom.ops.calc_checksum(hw);
+ status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
+ checksum);
+ } else {
+ hw_dbg(hw, "EEPROM read failed\n");
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_validate_mac_addr - Validate MAC address
+ * @mac_addr: pointer to MAC address.
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address
+ **/
+s32 ixgbe_validate_mac_addr(u8 *mac_addr)
+{
+ s32 status = 0;
+
+ /* Make sure it is not a multicast address */
+ if (IXGBE_IS_MULTICAST(mac_addr))
+ status = IXGBE_ERR_INVALID_MAC_ADDR;
+ /* Not a broadcast address */
+ else if (IXGBE_IS_BROADCAST(mac_addr))
+ status = IXGBE_ERR_INVALID_MAC_ADDR;
+ /* Reject the zero address */
+ else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
+ status = IXGBE_ERR_INVALID_MAC_ADDR;
+
+ return status;
+}
+
+/**
+ * ixgbe_set_rar_generic - Set Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq "set" or "pool" index
+ * @enable_addr: set flag that address is active
+ *
+ * Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr)
+{
+ u32 rar_low, rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ /* Make sure we are using a valid rar index range */
+ if (index >= rar_entries) {
+ hw_dbg(hw, "RAR index %d is out of range.\n", index);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ /* setup VMDq pool selection before this RAR gets enabled */
+ hw->mac.ops.set_vmdq(hw, index, vmdq);
+
+ /*
+ * HW expects these in little endian so we reverse the byte
+ * order from network order (big endian) to little endian
+ */
+ rar_low = ((u32)addr[0] |
+ ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) |
+ ((u32)addr[3] << 24));
+ /*
+ * Some parts put the VMDq setting in the extra RAH bits,
+ * so save everything except the lower 16 bits that hold part
+ * of the address and the address valid bit.
+ */
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+ rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+ rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
+
+ if (enable_addr != 0)
+ rar_high |= IXGBE_RAH_AV;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+
+ return 0;
+}
+
+/**
+ * ixgbe_clear_rar_generic - Remove Rx address register
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ *
+ * Clears an ethernet address from a receive address register.
+ **/
+s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
+{
+ u32 rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ /* Make sure we are using a valid rar index range */
+ if (index >= rar_entries) {
+ hw_dbg(hw, "RAR index %d is out of range.\n", index);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ /*
+ * Some parts put the VMDq setting in the extra RAH bits,
+ * so save everything except the lower 16 bits that hold part
+ * of the address and the address valid bit.
+ */
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+ rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+
+ /* clear VMDq pool/queue selection for this RAR */
+ hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
+
+ return 0;
+}
+
+/**
+ * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
+ * @hw: pointer to hardware structure
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive address registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ **/
+s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ /*
+ * If the current mac address is valid, assume it is a software override
+ * to the permanent address.
+ * Otherwise, use the permanent address from the eeprom.
+ */
+ if (ixgbe_validate_mac_addr(hw->mac.addr) ==
+ IXGBE_ERR_INVALID_MAC_ADDR) {
+ /* Get the MAC address from the RAR0 for later reference */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+ hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr);
+ } else {
+ /* Setup the receive address. */
+ hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
+ hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
+
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+ /* clear VMDq pool/queue selection for RAR 0 */
+ hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
+ }
+ hw->addr_ctrl.overflow_promisc = 0;
+
+ hw->addr_ctrl.rar_used_count = 1;
+
+ /* Zero out the other receive addresses. */
+ hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
+ for (i = 1; i < rar_entries; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+ }
+
+ /* Clear the MTA */
+ hw->addr_ctrl.mta_in_use = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
+
+ hw_dbg(hw, " Clearing MTA\n");
+ for (i = 0; i < hw->mac.mcft_size; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
+
+ if (hw->mac.ops.init_uta_tables)
+ hw->mac.ops.init_uta_tables(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_mta_vector - Determines bit-vector in multicast table to set
+ * @hw: pointer to hardware structure
+ * @mc_addr: the multicast address
+ *
+ * Extracts the 12 bits, from a multicast address, to determine which
+ * bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ * incoming rx multicast addresses, to determine the bit-vector to check in
+ * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ * by the MO field of the MCSTCTRL. The MO field is set during initialization
+ * to mc_filter_type.
+ **/
+static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector = 0;
+
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ hw_dbg(hw, "MC filter type param set incorrectly\n");
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+/**
+ * ixgbe_set_mta - Set bit-vector in multicast table
+ * @hw: pointer to hardware structure
+ * @hash_value: Multicast address hash value
+ *
+ * Sets the bit-vector in the multicast table.
+ **/
+static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector;
+ u32 vector_bit;
+ u32 vector_reg;
+
+ hw->addr_ctrl.mta_in_use++;
+
+ vector = ixgbe_mta_vector(hw, mc_addr);
+ hw_dbg(hw, " bit-vector = 0x%03X\n", vector);
+
+ /*
+ * The MTA is a register array of 128 32-bit registers. It is treated
+ * like an array of 4096 bits. We want to set bit
+ * BitArray[vector_value]. So we figure out what register the bit is
+ * in, read it, OR in the new bit, then write back the new value. The
+ * register is determined by the upper 7 bits of the vector value and
+ * the bit within that register are determined by the lower 5 bits of
+ * the value.
+ */
+ vector_reg = (vector >> 5) & 0x7F;
+ vector_bit = vector & 0x1F;
+ hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
+}
+
+/**
+ * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
+ * @hw: pointer to hardware structure
+ * @netdev: pointer to net device structure
+ *
+ * The given list replaces any existing list. Clears the MC addrs from receive
+ * address registers and the multicast table. Uses unused receive address
+ * registers for the first multicast addresses, and hashes the rest into the
+ * multicast table.
+ **/
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
+ struct net_device *netdev)
+{
+ struct netdev_hw_addr *ha;
+ u32 i;
+
+ /*
+ * Set the new number of MC addresses that we are being requested to
+ * use.
+ */
+ hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
+ hw->addr_ctrl.mta_in_use = 0;
+
+ /* Clear mta_shadow */
+ hw_dbg(hw, " Clearing MTA\n");
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+
+ /* Update mta shadow */
+ netdev_for_each_mc_addr(ha, netdev) {
+ hw_dbg(hw, " Adding the multicast addresses:\n");
+ ixgbe_set_mta(hw, ha->addr);
+ }
+
+ /* Enable mta */
+ for (i = 0; i < hw->mac.mcft_size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
+ hw->mac.mta_shadow[i]);
+
+ if (hw->addr_ctrl.mta_in_use > 0)
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
+ IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+
+ hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
+ return 0;
+}
+
+/**
+ * ixgbe_enable_mc_generic - Enable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Enables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
+
+ if (a->mta_in_use > 0)
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
+ hw->mac.mc_filter_type);
+
+ return 0;
+}
+
+/**
+ * ixgbe_disable_mc_generic - Disable multicast address in RAR
+ * @hw: pointer to hardware structure
+ *
+ * Disables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
+
+ if (a->mta_in_use > 0)
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
+
+ return 0;
+}
+
+/**
+ * ixgbe_fc_enable_generic - Enable flow control
+ * @hw: pointer to hardware structure
+ * @packetbuf_num: packet buffer number (0-7)
+ *
+ * Enable flow control according to the current settings.
+ **/
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
+{
+ s32 ret_val = 0;
+ u32 mflcn_reg, fccfg_reg;
+ u32 reg;
+ u32 fcrtl, fcrth;
+
+#ifdef CONFIG_DCB
+ if (hw->fc.requested_mode == ixgbe_fc_pfc)
+ goto out;
+
+#endif /* CONFIG_DCB */
+ /* Negotiate the fc mode to use */
+ ret_val = ixgbe_fc_autoneg(hw);
+ if (ret_val == IXGBE_ERR_FLOW_CONTROL)
+ goto out;
+
+ /* Disable any previous flow control settings */
+ mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE);
+
+ fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+ fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
+
+ /*
+ * The possible values of fc.current_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+#ifdef CONFIG_DCB
+ * 4: Priority Flow Control is enabled.
+#endif
+ * other: Invalid.
+ */
+ switch (hw->fc.current_mode) {
+ case ixgbe_fc_none:
+ /*
+ * Flow control is disabled by software override or autoneg.
+ * The code below will actually disable it in the HW.
+ */
+ break;
+ case ixgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ mflcn_reg |= IXGBE_MFLCN_RFCE;
+ break;
+ case ixgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
+ break;
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ mflcn_reg |= IXGBE_MFLCN_RFCE;
+ fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
+ break;
+#ifdef CONFIG_DCB
+ case ixgbe_fc_pfc:
+ goto out;
+ break;
+#endif /* CONFIG_DCB */
+ default:
+ hw_dbg(hw, "Flow control param set incorrectly\n");
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ break;
+ }
+
+ /* Set 802.3x based flow control settings. */
+ mflcn_reg |= IXGBE_MFLCN_DPF;
+ IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
+ IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
+
+ fcrth = hw->fc.high_water[packetbuf_num] << 10;
+ fcrtl = hw->fc.low_water << 10;
+
+ if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
+ fcrth |= IXGBE_FCRTH_FCEN;
+ if (hw->fc.send_xon)
+ fcrtl |= IXGBE_FCRTL_XONE;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
+
+ /* Configure pause time (2 TCs per register) */
+ reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
+ if ((packetbuf_num & 1) == 0)
+ reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
+ else
+ reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_fc_autoneg - Configure flow control
+ * @hw: pointer to hardware structure
+ *
+ * Compares our advertised flow control capabilities to those advertised by
+ * our link partner, and determines the proper flow control mode to use.
+ **/
+s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ ixgbe_link_speed speed;
+ bool link_up;
+
+ if (hw->fc.disable_fc_autoneg)
+ goto out;
+
+ /*
+ * AN should have completed when the cable was plugged in.
+ * Look for reasons to bail out. Bail out if:
+ * - FC autoneg is disabled, or if
+ * - link is not up.
+ *
+ * Since we're being called from an LSC, link is already known to be up.
+ * So use link_up_wait_to_complete=false.
+ */
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ if (!link_up) {
+ ret_val = IXGBE_ERR_FLOW_CONTROL;
+ goto out;
+ }
+
+ switch (hw->phy.media_type) {
+ /* Autoneg flow control on fiber adapters */
+ case ixgbe_media_type_fiber:
+ if (speed == IXGBE_LINK_SPEED_1GB_FULL)
+ ret_val = ixgbe_fc_autoneg_fiber(hw);
+ break;
+
+ /* Autoneg flow control on backplane adapters */
+ case ixgbe_media_type_backplane:
+ ret_val = ixgbe_fc_autoneg_backplane(hw);
+ break;
+
+ /* Autoneg flow control on copper adapters */
+ case ixgbe_media_type_copper:
+ if (ixgbe_device_supports_autoneg_fc(hw) == 0)
+ ret_val = ixgbe_fc_autoneg_copper(hw);
+ break;
+
+ default:
+ break;
+ }
+
+out:
+ if (ret_val == 0) {
+ hw->fc.fc_was_autonegged = true;
+ } else {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+ return ret_val;
+}
+
+/**
+ * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according on 1 gig fiber.
+ **/
+static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
+{
+ u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
+ s32 ret_val;
+
+ /*
+ * On multispeed fiber at 1g, bail out if
+ * - link is up but AN did not complete, or if
+ * - link is up and AN completed but timed out
+ */
+
+ linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+ if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
+ (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+ ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+
+ pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+
+ ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
+ pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
+ IXGBE_PCS1GANA_ASM_PAUSE,
+ IXGBE_PCS1GANA_SYM_PAUSE,
+ IXGBE_PCS1GANA_ASM_PAUSE);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
+{
+ u32 links2, anlp1_reg, autoc_reg, links;
+ s32 ret_val;
+
+ /*
+ * On backplane, bail out if
+ * - backplane autoneg was not completed, or if
+ * - we are 82599 and link partner is not AN enabled
+ */
+ links = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
+ if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+ }
+ /*
+ * Read the 10g AN autoc and LP ability registers and resolve
+ * local flow control settings accordingly
+ */
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+
+ ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
+ anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
+ IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
+{
+ u16 technology_ability_reg = 0;
+ u16 lp_technology_ability_reg = 0;
+
+ hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
+ MDIO_MMD_AN,
+ &technology_ability_reg);
+ hw->phy.ops.read_reg(hw, MDIO_AN_LPA,
+ MDIO_MMD_AN,
+ &lp_technology_ability_reg);
+
+ return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
+ (u32)lp_technology_ability_reg,
+ IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
+ IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
+}
+
+/**
+ * ixgbe_negotiate_fc - Negotiate flow control
+ * @hw: pointer to hardware structure
+ * @adv_reg: flow control advertised settings
+ * @lp_reg: link partner's flow control settings
+ * @adv_sym: symmetric pause bit in advertisement
+ * @adv_asm: asymmetric pause bit in advertisement
+ * @lp_sym: symmetric pause bit in link partner advertisement
+ * @lp_asm: asymmetric pause bit in link partner advertisement
+ *
+ * Find the intersection between advertised settings and link partner's
+ * advertised settings
+ **/
+static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
+{
+ if ((!(adv_reg)) || (!(lp_reg)))
+ return IXGBE_ERR_FC_NOT_NEGOTIATED;
+
+ if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
+ /*
+ * Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_full) {
+ hw->fc.current_mode = ixgbe_fc_full;
+ hw_dbg(hw, "Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
+ }
+ } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ hw->fc.current_mode = ixgbe_fc_tx_pause;
+ hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
+ } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_none;
+ hw_dbg(hw, "Flow Control = NONE.\n");
+ }
+ return 0;
+}
+
+/**
+ * ixgbe_setup_fc - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ **/
+static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
+{
+ s32 ret_val = 0;
+ u32 reg = 0, reg_bp = 0;
+ u16 reg_cu = 0;
+
+#ifdef CONFIG_DCB
+ if (hw->fc.requested_mode == ixgbe_fc_pfc) {
+ hw->fc.current_mode = hw->fc.requested_mode;
+ goto out;
+ }
+
+#endif /* CONFIG_DCB */
+ /* Validate the packetbuf configuration */
+ if (packetbuf_num < 0 || packetbuf_num > 7) {
+ hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
+ "is 0-7\n", packetbuf_num);
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /*
+ * Validate the water mark configuration. Zero water marks are invalid
+ * because it causes the controller to just blast out fc packets.
+ */
+ if (!hw->fc.low_water ||
+ !hw->fc.high_water[packetbuf_num] ||
+ !hw->fc.pause_time) {
+ hw_dbg(hw, "Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /*
+ * Validate the requested mode. Strict IEEE mode does not allow
+ * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
+ */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict "
+ "IEEE mode\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /*
+ * 10gig parts do not have a word in the EEPROM to determine the
+ * default flow control setting, so we explicitly set it to full.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ /*
+ * Set up the 1G and 10G flow control advertisement registers so the
+ * HW will be able to do fc autoneg once the cable is plugged in. If
+ * we link at 10G, the 1G advertisement is harmless and vice versa.
+ */
+
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber:
+ case ixgbe_media_type_backplane:
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ break;
+
+ case ixgbe_media_type_copper:
+ hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
+ MDIO_MMD_AN, &reg_cu);
+ break;
+
+ default:
+ ;
+ }
+
+ /*
+ * The possible values of fc.requested_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+#ifdef CONFIG_DCB
+ * 4: Priority Flow Control is enabled.
+#endif
+ * other: Invalid.
+ */
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_none:
+ /* Flow control completely disabled by software override. */
+ reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE);
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
+ break;
+ case ixgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE);
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
+ break;
+ case ixgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
+ reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
+ reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
+ } else if (hw->phy.media_type == ixgbe_media_type_copper) {
+ reg_cu |= (IXGBE_TAF_ASM_PAUSE);
+ reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
+ }
+ break;
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE);
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
+ break;
+#ifdef CONFIG_DCB
+ case ixgbe_fc_pfc:
+ goto out;
+ break;
+#endif /* CONFIG_DCB */
+ default:
+ hw_dbg(hw, "Flow control param set incorrectly\n");
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ break;
+ }
+
+ if (hw->mac.type != ixgbe_mac_X540) {
+ /*
+ * Enable auto-negotiation between the MAC & PHY;
+ * the MAC will advertise clause 37 flow control.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
+
+ /* Disable AN timeout */
+ if (hw->fc.strict_ieee)
+ reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
+ hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+ }
+
+ /*
+ * AUTOC restart handles negotiation of 1G and 10G on backplane
+ * and copper. There is no need to set the PCS1GCTL register.
+ *
+ */
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ reg_bp |= IXGBE_AUTOC_AN_RESTART;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
+ } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
+ (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
+ hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
+ MDIO_MMD_AN, reg_cu);
+ }
+
+ hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_disable_pcie_master - Disable PCI-express master access
+ * @hw: pointer to hardware structure
+ *
+ * Disables PCI-Express master access and verifies there are no pending
+ * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
+ * bit hasn't caused the master requests to be disabled, else 0
+ * is returned signifying master requests disabled.
+ **/
+static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ s32 status = 0;
+ u32 i;
+ u16 value;
+
+ /* Always set this bit to ensure any future transactions are blocked */
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
+
+ /* Exit if master requests are blocked */
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+ goto out;
+
+ /* Poll for master request bit to clear */
+ for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ udelay(100);
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+ goto out;
+ }
+
+ /*
+ * Two consecutive resets are required via CTRL.RST per datasheet
+ * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
+ * of this need. The first reset prevents new master requests from
+ * being issued by our device. We then must wait 1usec or more for any
+ * remaining completions from the PCIe bus to trickle in, and then reset
+ * again to clear out any effects they may have had on our device.
+ */
+ hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
+ hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+
+ /*
+ * Before proceeding, make sure that the PCIe block does not have
+ * transactions pending.
+ */
+ for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ udelay(100);
+ pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
+ &value);
+ if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
+ goto out;
+ }
+
+ hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
+ status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore through the GSSR register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
+{
+ u32 gssr;
+ u32 swmask = mask;
+ u32 fwmask = mask << 5;
+ s32 timeout = 200;
+
+ while (timeout) {
+ /*
+ * SW EEPROM semaphore bit is used for access to all
+ * SW_FW_SYNC/GSSR bits (not just EEPROM)
+ */
+ if (ixgbe_get_eeprom_semaphore(hw))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
+ if (!(gssr & (fwmask | swmask)))
+ break;
+
+ /*
+ * Firmware currently using resource (fwmask) or other software
+ * thread currently using resource (swmask)
+ */
+ ixgbe_release_eeprom_semaphore(hw);
+ usleep_range(5000, 10000);
+ timeout--;
+ }
+
+ if (!timeout) {
+ hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n");
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+
+ gssr |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+
+ ixgbe_release_eeprom_semaphore(hw);
+ return 0;
+}
+
+/**
+ * ixgbe_release_swfw_sync - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore through the GSSR register for the specified
+ * function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
+{
+ u32 gssr;
+ u32 swmask = mask;
+
+ ixgbe_get_eeprom_semaphore(hw);
+
+ gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
+ gssr &= ~swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+
+ ixgbe_release_eeprom_semaphore(hw);
+}
+
+/**
+ * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
+ * @hw: pointer to hardware structure
+ * @regval: register value to write to RXCTRL
+ *
+ * Enables the Rx DMA unit
+ **/
+s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
+{
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
+
+ return 0;
+}
+
+/**
+ * ixgbe_blink_led_start_generic - Blink LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to blink
+ **/
+s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
+{
+ ixgbe_link_speed speed = 0;
+ bool link_up = 0;
+ u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ /*
+ * Link must be up to auto-blink the LEDs;
+ * Force it if link is down.
+ */
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+
+ if (!link_up) {
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+ autoc_reg |= IXGBE_AUTOC_FLU;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ usleep_range(10000, 20000);
+ }
+
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+ led_reg |= IXGBE_LED_BLINK(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to stop blinking
+ **/
+s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
+{
+ u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ autoc_reg &= ~IXGBE_AUTOC_FLU;
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+ led_reg &= ~IXGBE_LED_MODE_MASK(index);
+ led_reg &= ~IXGBE_LED_BLINK(index);
+ led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
+ * @hw: pointer to hardware structure
+ * @san_mac_offset: SAN MAC address offset
+ *
+ * This function will read the EEPROM location for the SAN MAC address
+ * pointer, and returns the value at that location. This is used in both
+ * get and set mac_addr routines.
+ **/
+static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
+ u16 *san_mac_offset)
+{
+ /*
+ * First read the EEPROM pointer to see if the MAC addresses are
+ * available.
+ */
+ hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
+ * @hw: pointer to hardware structure
+ * @san_mac_addr: SAN MAC address
+ *
+ * Reads the SAN MAC address from the EEPROM, if it's available. This is
+ * per-port, so set_lan_id() must be called before reading the addresses.
+ * set_lan_id() is called by identify_sfp(), but this cannot be relied
+ * upon for non-SFP connections, so we must call it here.
+ **/
+s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+ u16 san_mac_data, san_mac_offset;
+ u8 i;
+
+ /*
+ * First read the EEPROM pointer to see if the MAC addresses are
+ * available. If they're not, no point in calling set_lan_id() here.
+ */
+ ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+
+ if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
+ /*
+ * No addresses available in this EEPROM. It's not an
+ * error though, so just wipe the local address and return.
+ */
+ for (i = 0; i < 6; i++)
+ san_mac_addr[i] = 0xFF;
+
+ goto san_mac_addr_out;
+ }
+
+ /* make sure we know which port we need to program */
+ hw->mac.ops.set_lan_id(hw);
+ /* apply the port offset to the address offset */
+ (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
+ (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+ for (i = 0; i < 3; i++) {
+ hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
+ san_mac_addr[i * 2] = (u8)(san_mac_data);
+ san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
+ san_mac_offset++;
+ }
+
+san_mac_addr_out:
+ return 0;
+}
+
+/**
+ * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
+ * @hw: pointer to hardware structure
+ *
+ * Read PCIe configuration space, and get the MSI-X vector count from
+ * the capabilities table.
+ **/
+u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ u16 msix_count;
+ pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS,
+ &msix_count);
+ msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
+
+ /* MSI-X count is zero-based in HW, so increment to give proper value */
+ msix_count++;
+
+ return msix_count;
+}
+
+/**
+ * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to disassociate
+ * @vmdq: VMDq pool index to remove from the rar
+ **/
+s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 mpsar_lo, mpsar_hi;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+ mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+
+ if (!mpsar_lo && !mpsar_hi)
+ goto done;
+
+ if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
+ if (mpsar_lo) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+ mpsar_lo = 0;
+ }
+ if (mpsar_hi) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+ mpsar_hi = 0;
+ }
+ } else if (vmdq < 32) {
+ mpsar_lo &= ~(1 << vmdq);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
+ } else {
+ mpsar_hi &= ~(1 << (vmdq - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
+ }
+
+ /* was that the last pool using this rar? */
+ if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+ hw->mac.ops.clear_rar(hw, rar);
+done:
+ return 0;
+}
+
+/**
+ * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to associate with a VMDq index
+ * @vmdq: VMDq pool index
+ **/
+s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 mpsar;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ if (vmdq < 32) {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+ mpsar |= 1 << vmdq;
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
+ } else {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+ mpsar |= 1 << (vmdq - 32);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
+ }
+ return 0;
+}
+
+/**
+ * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < 128; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
+
+ return 0;
+}
+
+/**
+ * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ *
+ * return the VLVF index where this VLAN id should be placed
+ *
+ **/
+static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
+{
+ u32 bits = 0;
+ u32 first_empty_slot = 0;
+ s32 regindex;
+
+ /* short cut the special case */
+ if (vlan == 0)
+ return 0;
+
+ /*
+ * Search for the vlan id in the VLVF entries. Save off the first empty
+ * slot found along the way
+ */
+ for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
+ bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
+ if (!bits && !(first_empty_slot))
+ first_empty_slot = regindex;
+ else if ((bits & 0x0FFF) == vlan)
+ break;
+ }
+
+ /*
+ * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
+ * in the VLVF. Else use the first empty VLVF register for this
+ * vlan id.
+ */
+ if (regindex >= IXGBE_VLVF_ENTRIES) {
+ if (first_empty_slot)
+ regindex = first_empty_slot;
+ else {
+ hw_dbg(hw, "No space in VLVF.\n");
+ regindex = IXGBE_ERR_NO_SPACE;
+ }
+ }
+
+ return regindex;
+}
+
+/**
+ * ixgbe_set_vfta_generic - Set VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFVFB
+ * @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on)
+{
+ s32 regindex;
+ u32 bitindex;
+ u32 vfta;
+ u32 bits;
+ u32 vt;
+ u32 targetbit;
+ bool vfta_changed = false;
+
+ if (vlan > 4095)
+ return IXGBE_ERR_PARAM;
+
+ /*
+ * this is a 2 part operation - first the VFTA, then the
+ * VLVF and VLVFB if VT Mode is set
+ * We don't write the VFTA until we know the VLVF part succeeded.
+ */
+
+ /* Part 1
+ * The VFTA is a bitstring made up of 128 32-bit registers
+ * that enable the particular VLAN id, much like the MTA:
+ * bits[11-5]: which register
+ * bits[4-0]: which bit in the register
+ */
+ regindex = (vlan >> 5) & 0x7F;
+ bitindex = vlan & 0x1F;
+ targetbit = (1 << bitindex);
+ vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+
+ if (vlan_on) {
+ if (!(vfta & targetbit)) {
+ vfta |= targetbit;
+ vfta_changed = true;
+ }
+ } else {
+ if ((vfta & targetbit)) {
+ vfta &= ~targetbit;
+ vfta_changed = true;
+ }
+ }
+
+ /* Part 2
+ * If VT Mode is set
+ * Either vlan_on
+ * make sure the vlan is in VLVF
+ * set the vind bit in the matching VLVFB
+ * Or !vlan_on
+ * clear the pool bit and possibly the vind
+ */
+ vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ if (vt & IXGBE_VT_CTL_VT_ENABLE) {
+ s32 vlvf_index;
+
+ vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
+ if (vlvf_index < 0)
+ return vlvf_index;
+
+ if (vlan_on) {
+ /* set the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index*2));
+ bits |= (1 << vind);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index*2),
+ bits);
+ } else {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index*2)+1));
+ bits |= (1 << (vind-32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index*2)+1),
+ bits);
+ }
+ } else {
+ /* clear the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index*2));
+ bits &= ~(1 << vind);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index*2),
+ bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index*2)+1));
+ } else {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index*2)+1));
+ bits &= ~(1 << (vind-32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index*2)+1),
+ bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index*2));
+ }
+ }
+
+ /*
+ * If there are still bits set in the VLVFB registers
+ * for the VLAN ID indicated we need to see if the
+ * caller is requesting that we clear the VFTA entry bit.
+ * If the caller has requested that we clear the VFTA
+ * entry bit but there are still pools/VFs using this VLAN
+ * ID entry then ignore the request. We're not worried
+ * about the case where we're turning the VFTA VLAN ID
+ * entry bit on, only when requested to turn it off as
+ * there may be multiple pools and/or VFs using the
+ * VLAN ID entry. In that case we cannot clear the
+ * VFTA bit until all pools/VFs using that VLAN ID have also
+ * been cleared. This will be indicated by "bits" being
+ * zero.
+ */
+ if (bits) {
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
+ (IXGBE_VLVF_VIEN | vlan));
+ if (!vlan_on) {
+ /* someone wants to clear the vfta entry
+ * but some pools/VFs are still using it.
+ * Ignore it. */
+ vfta_changed = false;
+ }
+ }
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
+ }
+
+ if (vfta_changed)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
+
+ return 0;
+}
+
+/**
+ * ixgbe_clear_vfta_generic - Clear VLAN filter table
+ * @hw: pointer to hardware structure
+ *
+ * Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
+{
+ u32 offset;
+
+ for (offset = 0; offset < hw->mac.vft_size; offset++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+
+ for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_check_mac_link_generic - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ u32 links_reg, links_orig;
+ u32 i;
+
+ /* clear the old state */
+ links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
+ if (links_orig != links_reg) {
+ hw_dbg(hw, "LINKS changed from %08X to %08X\n",
+ links_orig, links_reg);
+ }
+
+ if (link_up_wait_to_complete) {
+ for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+ if (links_reg & IXGBE_LINKS_UP) {
+ *link_up = true;
+ break;
+ } else {
+ *link_up = false;
+ }
+ msleep(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ }
+ } else {
+ if (links_reg & IXGBE_LINKS_UP)
+ *link_up = true;
+ else
+ *link_up = false;
+ }
+
+ if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_10G_82599)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_1G_82599)
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_100_82599)
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from
+ * the EEPROM
+ * @hw: pointer to hardware structure
+ * @wwnn_prefix: the alternative WWNN prefix
+ * @wwpn_prefix: the alternative WWPN prefix
+ *
+ * This function will read the EEPROM from the alternative SAN MAC address
+ * block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix)
+{
+ u16 offset, caps;
+ u16 alt_san_mac_blk_offset;
+
+ /* clear output first */
+ *wwnn_prefix = 0xFFFF;
+ *wwpn_prefix = 0xFFFF;
+
+ /* check if alternative SAN MAC is supported */
+ hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
+ &alt_san_mac_blk_offset);
+
+ if ((alt_san_mac_blk_offset == 0) ||
+ (alt_san_mac_blk_offset == 0xFFFF))
+ goto wwn_prefix_out;
+
+ /* check capability in alternative san mac address block */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
+ hw->eeprom.ops.read(hw, offset, &caps);
+ if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
+ goto wwn_prefix_out;
+
+ /* get the corresponding prefix for WWNN/WWPN */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
+ hw->eeprom.ops.read(hw, offset, wwnn_prefix);
+
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
+ hw->eeprom.ops.read(hw, offset, wwpn_prefix);
+
+wwn_prefix_out:
+ return 0;
+}
+
+/**
+ * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
+ * control
+ * @hw: pointer to hardware structure
+ *
+ * There are several phys that do not support autoneg flow control. This
+ * function check the device id to see if the associated phy supports
+ * autoneg flow control.
+ **/
+static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+{
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X540T:
+ return 0;
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ return 0;
+ default:
+ return IXGBE_ERR_FC_NOT_SUPPORTED;
+ }
+}
+
+/**
+ * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable switch for anti-spoofing
+ * @pf: Physical Function pool - do not enable anti-spoofing for the PF
+ *
+ **/
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
+{
+ int j;
+ int pf_target_reg = pf >> 3;
+ int pf_target_shift = pf % 8;
+ u32 pfvfspoof = 0;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ if (enable)
+ pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
+
+ /*
+ * PFVFSPOOF register array is size 8 with 8 bits assigned to
+ * MAC anti-spoof enables in each register array element.
+ */
+ for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
+
+ /* If not enabling anti-spoofing then done */
+ if (!enable)
+ return;
+
+ /*
+ * The PF should be allowed to spoof so that it can support
+ * emulation mode NICs. Reset the bit assigned to the PF
+ */
+ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg));
+ pfvfspoof ^= (1 << pf_target_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof);
+}
+
+/**
+ * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable switch for VLAN anti-spoofing
+ * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
+ *
+ **/
+void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
+{
+ int vf_target_reg = vf >> 3;
+ int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
+ u32 pfvfspoof;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
+ if (enable)
+ pfvfspoof |= (1 << vf_target_shift);
+ else
+ pfvfspoof &= ~(1 << vf_target_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
+}
+
+/**
+ * ixgbe_get_device_caps_generic - Get additional device capabilities
+ * @hw: pointer to hardware structure
+ * @device_caps: the EEPROM word with the extra device capabilities
+ *
+ * This function will read the EEPROM location for the device capabilities,
+ * and return the word through device_caps.
+ **/
+s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
+{
+ hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
+
+ return 0;
+}
+
+/**
+ * ixgbe_set_rxpba_generic - Initialize RX packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,
+ int num_pb,
+ u32 headroom,
+ int strategy)
+{
+ u32 pbsize = hw->mac.rx_pb_size;
+ int i = 0;
+ u32 rxpktsize, txpktsize, txpbthresh;
+
+ /* Reserve headroom */
+ pbsize -= headroom;
+
+ if (!num_pb)
+ num_pb = 1;
+
+ /* Divide remaining packet buffer space amongst the number
+ * of packet buffers requested using supplied strategy.
+ */
+ switch (strategy) {
+ case (PBA_STRATEGY_WEIGHTED):
+ /* pba_80_48 strategy weight first half of packet buffer with
+ * 5/8 of the packet buffer space.
+ */
+ rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8));
+ pbsize -= rxpktsize * (num_pb / 2);
+ rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
+ for (; i < (num_pb / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ /* Fall through to configure remaining packet buffers */
+ case (PBA_STRATEGY_EQUAL):
+ /* Divide the remaining Rx packet buffer evenly among the TCs */
+ rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
+ for (; i < num_pb; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Setup Tx packet buffer and threshold equally for all TCs
+ * TXPBTHRESH register is set in K so divide by 1024 and subtract
+ * 10 since the largest packet we support is just over 9K.
+ */
+ txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
+ txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
+ for (i = 0; i < num_pb; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
+ }
+
+ /* Clear unused TCs, if any, to zero buffer size*/
+ for (; i < IXGBE_MAX_PB; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
+ }
+}
+
+/**
+ * ixgbe_calculate_checksum - Calculate checksum for buffer
+ * @buffer: pointer to EEPROM
+ * @length: size of EEPROM to calculate a checksum for
+ * Calculates the checksum for some buffer on a specified length. The
+ * checksum calculated is returned.
+ **/
+static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
+{
+ u32 i;
+ u8 sum = 0;
+
+ if (!buffer)
+ return 0;
+
+ for (i = 0; i < length; i++)
+ sum += buffer[i];
+
+ return (u8) (0 - sum);
+}
+
+/**
+ * ixgbe_host_interface_command - Issue command to manageability block
+ * @hw: pointer to the HW structure
+ * @buffer: contains the command to write and where the return status will
+ * be placed
+ * @lenght: lenght of buffer, must be multiple of 4 bytes
+ *
+ * Communicates with the manageability block. On success return 0
+ * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ **/
+static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u8 *buffer,
+ u32 length)
+{
+ u32 hicr, i;
+ u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
+ u8 buf_len, dword_len;
+
+ s32 ret_val = 0;
+
+ if (length == 0 || length & 0x3 ||
+ length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ hw_dbg(hw, "Buffer length failure.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+ if ((hicr & IXGBE_HICR_EN) == 0) {
+ hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Calculate length in DWORDs */
+ dword_len = length >> 2;
+
+ /*
+ * The device driver writes the relevant command block
+ * into the ram area.
+ */
+ for (i = 0; i < dword_len; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+ i, *((u32 *)buffer + i));
+
+ /* Setting this bit tells the ARC that a new command is pending. */
+ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
+
+ for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
+ hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+ if (!(hicr & IXGBE_HICR_C))
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ /* Check command successful completion. */
+ if (i == IXGBE_HI_COMMAND_TIMEOUT ||
+ (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
+ hw_dbg(hw, "Command has failed with no status valid.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Calculate length in DWORDs */
+ dword_len = hdr_size >> 2;
+
+ /* first pull in the header so we know the buffer length */
+ for (i = 0; i < dword_len; i++)
+ *((u32 *)buffer + i) =
+ IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i);
+
+ /* If there is any thing in data position pull it in */
+ buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
+ if (buf_len == 0)
+ goto out;
+
+ if (length < (buf_len + hdr_size)) {
+ hw_dbg(hw, "Buffer not large enough for reply message.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Calculate length in DWORDs, add one for odd lengths */
+ dword_len = (buf_len + 1) >> 2;
+
+ /* Pull in the rest of the buffer (i is where we left off)*/
+ for (; i < buf_len; i++)
+ *((u32 *)buffer + i) =
+ IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
+ * @hw: pointer to the HW structure
+ * @maj: driver version major number
+ * @min: driver version minor number
+ * @build: driver version build number
+ * @sub: driver version sub build number
+ *
+ * Sends driver version number to firmware through the manageability
+ * block. On success return 0
+ * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub)
+{
+ struct ixgbe_hic_drv_info fw_cmd;
+ int i;
+ s32 ret_val = 0;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM) != 0) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+ fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
+ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ fw_cmd.port_num = (u8)hw->bus.func;
+ fw_cmd.ver_maj = maj;
+ fw_cmd.ver_min = min;
+ fw_cmd.ver_build = build;
+ fw_cmd.ver_sub = sub;
+ fw_cmd.hdr.checksum = 0;
+ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+ fw_cmd.pad = 0;
+ fw_cmd.pad2 = 0;
+
+ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+ ret_val = ixgbe_host_interface_command(hw, (u8 *)&fw_cmd,
+ sizeof(fw_cmd));
+ if (ret_val != 0)
+ continue;
+
+ if (fw_cmd.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS)
+ ret_val = 0;
+ else
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+ break;
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
+ * @hw: pointer to the hardware structure
+ *
+ * The 82599 and x540 MACs can experience issues if TX work is still pending
+ * when a reset occurs. This function prevents this by flushing the PCIe
+ * buffers on the system.
+ **/
+void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
+{
+ u32 gcr_ext, hlreg0;
+
+ /*
+ * If double reset is not requested then all transactions should
+ * already be clear and as such there is no work to do
+ */
+ if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
+ return;
+
+ /*
+ * Set loopback enable to prevent any transmits from being sent
+ * should the link come up. This assumes that the RXCTRL.RXEN bit
+ * has already been cleared.
+ */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
+
+ /* initiate cleaning flow for buffers in the PCIe transaction layer */
+ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
+ gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
+
+ /* Flush all writes and allow 20usec for all transactions to clear */
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(20);
+
+ /* restore previous register values */
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
new file mode 100644
index 000000000000..863f9c1f145b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -0,0 +1,145 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_COMMON_H_
+#define _IXGBE_COMMON_H_
+
+#include "ixgbe_type.h"
+#include "ixgbe.h"
+
+u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
+s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
+s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
+s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
+s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
+void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
+
+s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
+
+s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
+s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 *data);
+s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+ u16 *checksum_val);
+s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
+
+s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ u32 enable_addr);
+s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
+ struct net_device *netdev);
+s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
+s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num);
+s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
+
+s32 ixgbe_validate_mac_addr(u8 *mac_addr);
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
+s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
+s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
+ u32 vind, bool vlan_on);
+s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
+s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix);
+s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
+void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
+s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
+s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 ver);
+void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
+
+void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
+ u32 headroom, int strategy);
+
+#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+
+#ifndef writeq
+#define writeq(val, addr) writel((u32) (val), addr); \
+ writel((u32) (val >> 32), (addr + 4));
+#endif
+
+#define IXGBE_WRITE_REG64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
+
+#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
+
+#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) (\
+ writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
+
+#define IXGBE_READ_REG_ARRAY(a, reg, offset) (\
+ readl((a)->hw_addr + (reg) + ((offset) << 2)))
+
+#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
+
+#define hw_dbg(hw, format, arg...) \
+ netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg)
+#define e_dev_info(format, arg...) \
+ dev_info(&adapter->pdev->dev, format, ## arg)
+#define e_dev_warn(format, arg...) \
+ dev_warn(&adapter->pdev->dev, format, ## arg)
+#define e_dev_err(format, arg...) \
+ dev_err(&adapter->pdev->dev, format, ## arg)
+#define e_dev_notice(format, arg...) \
+ dev_notice(&adapter->pdev->dev, format, ## arg)
+#define e_info(msglvl, format, arg...) \
+ netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_err(msglvl, format, arg...) \
+ netif_err(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_warn(msglvl, format, arg...) \
+ netif_warn(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_crit(msglvl, format, arg...) \
+ netif_crit(adapter, msglvl, adapter->netdev, format, ## arg)
+#endif /* IXGBE_COMMON */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
new file mode 100644
index 000000000000..318caf4bf623
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -0,0 +1,366 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+#include "ixgbe.h"
+#include "ixgbe_type.h"
+#include "ixgbe_dcb.h"
+#include "ixgbe_dcb_82598.h"
+#include "ixgbe_dcb_82599.h"
+
+/**
+ * ixgbe_ieee_credits - This calculates the ieee traffic class
+ * credits from the configured bandwidth percentages. Credits
+ * are the smallest unit programmable into the underlying
+ * hardware. The IEEE 802.1Qaz specification do not use bandwidth
+ * groups so this is much simplified from the CEE case.
+ */
+static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
+ __u16 *max, int max_frame)
+{
+ int min_percent = 100;
+ int min_credit, multiplier;
+ int i;
+
+ min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
+ DCB_CREDIT_QUANTUM;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ if (bw[i] < min_percent && bw[i])
+ min_percent = bw[i];
+ }
+
+ multiplier = (min_credit / min_percent) + 1;
+
+ /* Find out the hw credits for each TC */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL);
+
+ if (val < min_credit)
+ val = min_credit;
+ refill[i] = val;
+
+ max[i] = bw[i] ? (bw[i] * MAX_CREDIT)/100 : min_credit;
+ }
+ return 0;
+}
+
+/**
+ * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
+ * @ixgbe_dcb_config: Struct containing DCB settings.
+ * @direction: Configuring either Tx or Rx.
+ *
+ * This function calculates the credits allocated to each traffic class.
+ * It should be called only after the rules are checked by
+ * ixgbe_dcb_check_config().
+ */
+s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config,
+ int max_frame, u8 direction)
+{
+ struct tc_bw_alloc *p;
+ int min_credit;
+ int min_multiplier;
+ int min_percent = 100;
+ s32 ret_val = 0;
+ /* Initialization values default for Tx settings */
+ u32 credit_refill = 0;
+ u32 credit_max = 0;
+ u16 link_percentage = 0;
+ u8 bw_percent = 0;
+ u8 i;
+
+ if (dcb_config == NULL) {
+ ret_val = DCB_ERR_CONFIG;
+ goto out;
+ }
+
+ min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
+ DCB_CREDIT_QUANTUM;
+
+ /* Find smallest link percentage */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ p = &dcb_config->tc_config[i].path[direction];
+ bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
+ link_percentage = p->bwg_percent;
+
+ link_percentage = (link_percentage * bw_percent) / 100;
+
+ if (link_percentage && link_percentage < min_percent)
+ min_percent = link_percentage;
+ }
+
+ /*
+ * The ratio between traffic classes will control the bandwidth
+ * percentages seen on the wire. To calculate this ratio we use
+ * a multiplier. It is required that the refill credits must be
+ * larger than the max frame size so here we find the smallest
+ * multiplier that will allow all bandwidth percentages to be
+ * greater than the max frame size.
+ */
+ min_multiplier = (min_credit / min_percent) + 1;
+
+ /* Find out the link percentage for each TC first */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ p = &dcb_config->tc_config[i].path[direction];
+ bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
+
+ link_percentage = p->bwg_percent;
+ /* Must be careful of integer division for very small nums */
+ link_percentage = (link_percentage * bw_percent) / 100;
+ if (p->bwg_percent > 0 && link_percentage == 0)
+ link_percentage = 1;
+
+ /* Save link_percentage for reference */
+ p->link_percent = (u8)link_percentage;
+
+ /* Calculate credit refill ratio using multiplier */
+ credit_refill = min(link_percentage * min_multiplier,
+ MAX_CREDIT_REFILL);
+ p->data_credits_refill = (u16)credit_refill;
+
+ /* Calculate maximum credit for the TC */
+ credit_max = (link_percentage * MAX_CREDIT) / 100;
+
+ /*
+ * Adjustment based on rule checking, if the percentage
+ * of a TC is too small, the maximum credit may not be
+ * enough to send out a jumbo frame in data plane arbitration.
+ */
+ if (credit_max && (credit_max < min_credit))
+ credit_max = min_credit;
+
+ if (direction == DCB_TX_CONFIG) {
+ /*
+ * Adjustment based on rule checking, if the
+ * percentage of a TC is too small, the maximum
+ * credit may not be enough to send out a TSO
+ * packet in descriptor plane arbitration.
+ */
+ if ((hw->mac.type == ixgbe_mac_82598EB) &&
+ credit_max &&
+ (credit_max < MINIMUM_CREDIT_FOR_TSO))
+ credit_max = MINIMUM_CREDIT_FOR_TSO;
+
+ dcb_config->tc_config[i].desc_credits_max =
+ (u16)credit_max;
+ }
+
+ p->data_credits_max = (u16)credit_max;
+ }
+
+out:
+ return ret_val;
+}
+
+void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
+{
+ int i;
+
+ *pfc_en = 0;
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+ *pfc_en |= !!(cfg->tc_config[i].dcb_pfc & 0xF) << i;
+}
+
+void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction,
+ u16 *refill)
+{
+ struct tc_bw_alloc *p;
+ int i;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ p = &cfg->tc_config[i].path[direction];
+ refill[i] = p->data_credits_refill;
+ }
+}
+
+void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max)
+{
+ int i;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+ max[i] = cfg->tc_config[i].desc_credits_max;
+}
+
+void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction,
+ u8 *bwgid)
+{
+ struct tc_bw_alloc *p;
+ int i;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ p = &cfg->tc_config[i].path[direction];
+ bwgid[i] = p->bwg_id;
+ }
+}
+
+void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
+ u8 *ptype)
+{
+ struct tc_bw_alloc *p;
+ int i;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ p = &cfg->tc_config[i].path[direction];
+ ptype[i] = p->prio_type;
+ }
+}
+
+void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
+{
+ int i, up;
+ unsigned long bitmap;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ bitmap = cfg->tc_config[i].path[direction].up_to_tc_bitmap;
+ for_each_set_bit(up, &bitmap, MAX_USER_PRIORITY)
+ map[up] = i;
+ }
+}
+
+/**
+ * ixgbe_dcb_hw_config - Config and enable DCB
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure dcb settings and enable dcb mode.
+ */
+s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config)
+{
+ s32 ret = 0;
+ u8 pfc_en;
+ u8 ptype[MAX_TRAFFIC_CLASS];
+ u8 bwgid[MAX_TRAFFIC_CLASS];
+ u8 prio_tc[MAX_TRAFFIC_CLASS];
+ u16 refill[MAX_TRAFFIC_CLASS];
+ u16 max[MAX_TRAFFIC_CLASS];
+
+ /* Unpack CEE standard containers */
+ ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en);
+ ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill);
+ ixgbe_dcb_unpack_max(dcb_config, max);
+ ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid);
+ ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype);
+ ixgbe_dcb_unpack_map(dcb_config, DCB_TX_CONFIG, prio_tc);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max,
+ bwgid, ptype);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ ret = ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
+ bwgid, ptype, prio_tc);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+/* Helper routines to abstract HW specifics from DCB netlink ops */
+s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
+{
+ int ret = -EINVAL;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
+{
+ __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
+ __u8 prio_type[IEEE_8021QAZ_MAX_TCS];
+ int i;
+
+ /* naively give each TC a bwg to map onto CEE hardware */
+ __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7};
+
+ /* Map TSA onto CEE prio type */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ prio_type[i] = 2;
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ prio_type[i] = 0;
+ break;
+ default:
+ /* Hardware only supports priority strict or
+ * ETS transmission selection algorithms if
+ * we receive some other value from dcbnl
+ * throw an error
+ */
+ return -EINVAL;
+ }
+ }
+
+ ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame);
+ return ixgbe_dcb_hw_ets_config(hw, refill, max,
+ bwg_id, prio_type, ets->prio_tc);
+}
+
+s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
+ u16 *refill, u16 *max, u8 *bwg_id,
+ u8 *prio_type, u8 *prio_tc)
+{
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max,
+ prio_type);
+ ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
+ bwg_id, prio_type);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
+ bwg_id, prio_type, prio_tc);
+ ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
+ prio_type, prio_tc);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
new file mode 100644
index 000000000000..e162775064da
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -0,0 +1,168 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _DCB_CONFIG_H_
+#define _DCB_CONFIG_H_
+
+#include <linux/dcbnl.h>
+#include "ixgbe_type.h"
+
+/* DCB data structures */
+
+#define IXGBE_MAX_PACKET_BUFFERS 8
+#define MAX_USER_PRIORITY 8
+#define MAX_BW_GROUP 8
+#define BW_PERCENT 100
+
+#define DCB_TX_CONFIG 0
+#define DCB_RX_CONFIG 1
+
+/* DCB error Codes */
+#define DCB_SUCCESS 0
+#define DCB_ERR_CONFIG -1
+#define DCB_ERR_PARAM -2
+
+/* Transmit and receive Errors */
+/* Error in bandwidth group allocation */
+#define DCB_ERR_BW_GROUP -3
+/* Error in traffic class bandwidth allocation */
+#define DCB_ERR_TC_BW -4
+/* Traffic class has both link strict and group strict enabled */
+#define DCB_ERR_LS_GS -5
+/* Link strict traffic class has non zero bandwidth */
+#define DCB_ERR_LS_BW_NONZERO -6
+/* Link strict bandwidth group has non zero bandwidth */
+#define DCB_ERR_LS_BWG_NONZERO -7
+/* Traffic class has zero bandwidth */
+#define DCB_ERR_TC_BW_ZERO -8
+
+#define DCB_NOT_IMPLEMENTED 0x7FFFFFFF
+
+struct dcb_pfc_tc_debug {
+ u8 tc;
+ u8 pause_status;
+ u64 pause_quanta;
+};
+
+enum strict_prio_type {
+ prio_none = 0,
+ prio_group,
+ prio_link
+};
+
+/* DCB capability definitions */
+#define IXGBE_DCB_PG_SUPPORT 0x00000001
+#define IXGBE_DCB_PFC_SUPPORT 0x00000002
+#define IXGBE_DCB_BCN_SUPPORT 0x00000004
+#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008
+#define IXGBE_DCB_GSP_SUPPORT 0x00000010
+
+#define IXGBE_DCB_8_TC_SUPPORT 0x80
+
+struct dcb_support {
+ /* DCB capabilities */
+ u32 capabilities;
+
+ /* Each bit represents a number of TCs configurable in the hw.
+ * If 8 traffic classes can be configured, the value is 0x80.
+ */
+ u8 traffic_classes;
+ u8 pfc_traffic_classes;
+};
+
+/* Traffic class bandwidth allocation per direction */
+struct tc_bw_alloc {
+ u8 bwg_id; /* Bandwidth Group (BWG) ID */
+ u8 bwg_percent; /* % of BWG's bandwidth */
+ u8 link_percent; /* % of link bandwidth */
+ u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
+ u16 data_credits_refill; /* Credit refill amount in 64B granularity */
+ u16 data_credits_max; /* Max credits for a configured packet buffer
+ * in 64B granularity.*/
+ enum strict_prio_type prio_type; /* Link or Group Strict Priority */
+};
+
+enum dcb_pfc_type {
+ pfc_disabled = 0,
+ pfc_enabled_full,
+ pfc_enabled_tx,
+ pfc_enabled_rx
+};
+
+/* Traffic class configuration */
+struct tc_configuration {
+ struct tc_bw_alloc path[2]; /* One each for Tx/Rx */
+ enum dcb_pfc_type dcb_pfc; /* Class based flow control setting */
+
+ u16 desc_credits_max; /* For Tx Descriptor arbitration */
+ u8 tc; /* Traffic class (TC) */
+};
+
+struct dcb_num_tcs {
+ u8 pg_tcs;
+ u8 pfc_tcs;
+};
+
+struct ixgbe_dcb_config {
+ struct dcb_support support;
+ struct dcb_num_tcs num_tcs;
+ struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
+ u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
+ bool pfc_mode_enable;
+
+ u32 dcb_cfg_version; /* Not used...OS-specific? */
+ u32 link_speed; /* For bandwidth allocation validation purpose */
+};
+
+/* DCB driver APIs */
+void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en);
+void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *);
+void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *);
+void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);
+void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
+void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *);
+
+/* DCB credits calculation */
+s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
+ struct ixgbe_dcb_config *, int, u8);
+
+/* DCB hw initialization */
+s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max);
+s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
+ u8 *bwg_id, u8 *prio_type, u8 *tc_prio);
+s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio);
+s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+
+/* DCB definitions for credit calculation */
+#define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */
+#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */
+#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */
+#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */
+#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */
+
+#endif /* _DCB_CONFIG_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
new file mode 100644
index 000000000000..fcd0e479721f
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -0,0 +1,296 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgbe.h"
+#include "ixgbe_type.h"
+#include "ixgbe_dcb.h"
+#include "ixgbe_dcb_82598.h"
+
+/**
+ * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Rx Data Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *prio_type)
+{
+ u32 reg = 0;
+ u32 credit_refill = 0;
+ u32 credit_max = 0;
+ u8 i = 0;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA;
+ IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+ /* Enable Arbiter */
+ reg &= ~IXGBE_RMCS_ARBDIS;
+ /* Enable Receive Recycle within the BWG */
+ reg |= IXGBE_RMCS_RRM;
+ /* Enable Deficit Fixed Priority arbitration*/
+ reg |= IXGBE_RMCS_DFP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ credit_refill = refill[i];
+ credit_max = max[i];
+
+ reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
+
+ if (prio_type[i] == prio_link)
+ reg |= IXGBE_RT2CR_LSP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
+ }
+
+ reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+ reg |= IXGBE_RDRXCTL_RDMTS_1_2;
+ reg |= IXGBE_RDRXCTL_MPBEN;
+ reg |= IXGBE_RDRXCTL_MCEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ /* Make sure there is enough descriptors before arbitration */
+ reg &= ~IXGBE_RXCTRL_DMBYPS;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg);
+
+ return 0;
+}
+
+/**
+ * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Descriptor Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type)
+{
+ u32 reg, max_credits;
+ u8 i;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_DPMCS);
+
+ /* Enable arbiter */
+ reg &= ~IXGBE_DPMCS_ARBDIS;
+ /* Enable DFP and Recycle mode */
+ reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
+ reg |= IXGBE_DPMCS_TSOEF;
+ /* Configure Max TSO packet size 34KB including payload and headers */
+ reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
+
+ IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg);
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ max_credits = max[i];
+ reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
+ reg |= refill[i];
+ reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
+
+ if (prio_type[i] == prio_group)
+ reg |= IXGBE_TDTQ2TCCR_GSP;
+
+ if (prio_type[i] == prio_link)
+ reg |= IXGBE_TDTQ2TCCR_LSP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Data Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type)
+{
+ u32 reg;
+ u8 i;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
+ /* Enable Data Plane Arbiter */
+ reg &= ~IXGBE_PDPMCS_ARBDIS;
+ /* Enable DFP and Transmit Recycle Mode */
+ reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM);
+
+ IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg);
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ reg = refill[i];
+ reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
+ reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
+
+ if (prio_type[i] == prio_group)
+ reg |= IXGBE_TDPT2TCCR_GSP;
+
+ if (prio_type[i] == prio_link)
+ reg |= IXGBE_TDPT2TCCR_LSP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
+ }
+
+ /* Enable Tx packet buffer division */
+ reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
+ reg |= IXGBE_DTXCTL_ENDBUBD;
+ IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg);
+
+ return 0;
+}
+
+/**
+ * ixgbe_dcb_config_pfc_82598 - Config priority flow control
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Priority Flow Control for each traffic class.
+ */
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
+{
+ u32 reg;
+ u8 i;
+
+ if (pfc_en) {
+ /* Enable Transmit Priority Flow Control */
+ reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+ reg &= ~IXGBE_RMCS_TFCE_802_3X;
+ /* correct the reporting of our flow control status */
+ reg |= IXGBE_RMCS_TFCE_PRIORITY;
+ IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
+
+ /* Enable Receive Priority Flow Control */
+ reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ reg &= ~IXGBE_FCTRL_RFCE;
+ reg |= IXGBE_FCTRL_RPFCE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
+
+ /* Configure pause time */
+ for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
+
+ /* Configure flow control refresh threshold value */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
+ }
+
+ /*
+ * Configure flow control thresholds and enable priority flow control
+ * for each traffic class.
+ */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ int enabled = pfc_en & (1 << i);
+
+ reg = hw->fc.low_water << 10;
+
+ if (enabled == pfc_enabled_tx ||
+ enabled == pfc_enabled_full)
+ reg |= IXGBE_FCRTL_XONE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
+
+ reg = hw->fc.high_water[i] << 10;
+ if (enabled == pfc_enabled_tx ||
+ enabled == pfc_enabled_full)
+ reg |= IXGBE_FCRTH_FCEN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics
+ * @hw: pointer to hardware structure
+ *
+ * Configure queue statistics registers, all queues belonging to same traffic
+ * class uses a single set of queue statistics counters.
+ */
+static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
+{
+ u32 reg = 0;
+ u8 i = 0;
+ u8 j = 0;
+
+ /* Receive Queues stats setting - 8 queues per statistics reg */
+ for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i));
+ reg |= ((0x1010101) * j);
+ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
+ reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1));
+ reg |= ((0x1010101) * j);
+ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
+ }
+ /* Transmit Queues stats setting - 4 queues per statistics reg */
+ for (i = 0; i < 8; i++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
+ reg |= ((0x1010101) * i);
+ IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_dcb_hw_config_82598 - Config and enable DCB
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure dcb settings and enable dcb mode.
+ */
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type)
+{
+ ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
+ ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_pfc_82598(hw, pfc_en);
+ ixgbe_dcb_config_tc_stats_82598(hw);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
new file mode 100644
index 000000000000..2f318935561a
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
@@ -0,0 +1,97 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _DCB_82598_CONFIG_H_
+#define _DCB_82598_CONFIG_H_
+
+/* DCB register definitions */
+
+#define IXGBE_DPMCS_MTSOS_SHIFT 16
+#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, 1 DFP - Deficit Fixed Priority */
+#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */
+#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */
+#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */
+
+#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */
+
+#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */
+#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */
+
+#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet buffers enable */
+#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores (RSS) enable */
+
+#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12
+#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9
+#define IXGBE_TDTQ2TCCR_GSP 0x40000000
+#define IXGBE_TDTQ2TCCR_LSP 0x80000000
+
+#define IXGBE_TDPT2TCCR_MCL_SHIFT 12
+#define IXGBE_TDPT2TCCR_BWG_SHIFT 9
+#define IXGBE_TDPT2TCCR_GSP 0x40000000
+#define IXGBE_TDPT2TCCR_LSP 0x80000000
+
+#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, 1 for DFP - Deficit Fixed Priority */
+#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */
+#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */
+
+#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */
+
+#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */
+#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */
+#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
+#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
+
+#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000
+
+/* DCB hardware-specific driver APIs */
+
+/* DCB PFC functions */
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
+
+/* DCB hw initialization */
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type);
+
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type);
+
+#endif /* _DCB_82598_CONFIG_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
new file mode 100644
index 000000000000..32cd97bc794d
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -0,0 +1,376 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgbe.h"
+#include "ixgbe_type.h"
+#include "ixgbe_dcb.h"
+#include "ixgbe_dcb_82599.h"
+
+/**
+ * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
+ * @hw: pointer to hardware structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
+ *
+ * Configure Rx Packet Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type,
+ u8 *prio_tc)
+{
+ u32 reg = 0;
+ u32 credit_refill = 0;
+ u32 credit_max = 0;
+ u8 i = 0;
+
+ /*
+ * Disable the arbiter before changing parameters
+ * (always enable recycle mode; WSP)
+ */
+ reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
+
+ /* Map all traffic classes to their UP */
+ reg = 0;
+ for (i = 0; i < MAX_USER_PRIORITY; i++)
+ reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
+ IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ credit_refill = refill[i];
+ credit_max = max[i];
+ reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
+
+ reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
+
+ if (prio_type[i] == prio_link)
+ reg |= IXGBE_RTRPT4C_LSP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
+ }
+
+ /*
+ * Configure Rx packet plane (recycle mode; WSP) and
+ * enable arbiter
+ */
+ reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
+ IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
+
+ return 0;
+}
+
+/**
+ * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
+ * @hw: pointer to hardware structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
+ *
+ * Configure Tx Descriptor Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type)
+{
+ u32 reg, max_credits;
+ u8 i;
+
+ /* Clear the per-Tx queue credits; we use per-TC instead */
+ for (i = 0; i < 128; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
+ }
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ max_credits = max[i];
+ reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
+ reg |= refill[i];
+ reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
+
+ if (prio_type[i] == prio_group)
+ reg |= IXGBE_RTTDT2C_GSP;
+
+ if (prio_type[i] == prio_link)
+ reg |= IXGBE_RTTDT2C_LSP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
+ }
+
+ /*
+ * Configure Tx descriptor plane (recycle mode; WSP) and
+ * enable arbiter
+ */
+ reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+ return 0;
+}
+
+/**
+ * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
+ * @hw: pointer to hardware structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
+ *
+ * Configure Tx Packet Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type,
+ u8 *prio_tc)
+{
+ u32 reg;
+ u8 i;
+
+ /*
+ * Disable the arbiter before changing parameters
+ * (always enable recycle mode; SP; arb delay)
+ */
+ reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
+ (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
+ IXGBE_RTTPCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
+
+ /* Map all traffic classes to their UP */
+ reg = 0;
+ for (i = 0; i < MAX_USER_PRIORITY; i++)
+ reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
+ IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
+
+ /* Configure traffic class credits and priority */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ reg = refill[i];
+ reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
+ reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
+
+ if (prio_type[i] == prio_group)
+ reg |= IXGBE_RTTPT2C_GSP;
+
+ if (prio_type[i] == prio_link)
+ reg |= IXGBE_RTTPT2C_LSP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
+ }
+
+ /*
+ * Configure Tx packet plane (recycle mode; SP; arb delay) and
+ * enable arbiter
+ */
+ reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
+ (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
+
+ return 0;
+}
+
+/**
+ * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
+ * @hw: pointer to hardware structure
+ * @pfc_en: enabled pfc bitmask
+ * @prio_tc: priority to tc assignments indexed by priority
+ *
+ * Configure Priority Flow Control (PFC) for each traffic class.
+ */
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
+{
+ u32 i, j, reg;
+ u8 max_tc = 0;
+
+ for (i = 0; i < MAX_USER_PRIORITY; i++)
+ if (prio_tc[i] > max_tc)
+ max_tc = prio_tc[i];
+
+ /* Configure PFC Tx thresholds per TC */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ int enabled = 0;
+
+ if (i > max_tc) {
+ reg = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
+ continue;
+ }
+
+ for (j = 0; j < MAX_USER_PRIORITY; j++) {
+ if ((prio_tc[j] == i) && (pfc_en & (1 << j))) {
+ enabled = 1;
+ break;
+ }
+ }
+
+ reg = hw->fc.low_water << 10;
+
+ if (enabled)
+ reg |= IXGBE_FCRTL_XONE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
+
+ reg = hw->fc.high_water[i] << 10;
+ if (enabled)
+ reg |= IXGBE_FCRTH_FCEN;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
+ }
+
+ if (pfc_en) {
+ /* Configure pause time (2 TCs per register) */
+ reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
+ for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+ /* Configure flow control refresh threshold value */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+
+ reg = IXGBE_FCCFG_TFCE_PRIORITY;
+ IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
+ /*
+ * Enable Receive PFC
+ * 82599 will always honor XOFF frames we receive when
+ * we are in PFC mode however X540 only honors enabled
+ * traffic classes.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ reg &= ~IXGBE_MFLCN_RFCE;
+ reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
+
+ if (hw->mac.type == ixgbe_mac_X540) {
+ reg &= ~IXGBE_MFLCN_RPFCE_MASK;
+ reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
+
+ } else {
+ /* X540 devices have a RX bit that should be cleared
+ * if PFC is disabled on all TCs but PFC features is
+ * enabled.
+ */
+ if (hw->mac.type == ixgbe_mac_X540) {
+ reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ reg &= ~IXGBE_MFLCN_RPFCE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
+ }
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+ hw->mac.ops.fc_enable(hw, i);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
+ * @hw: pointer to hardware structure
+ *
+ * Configure queue statistics registers, all queues belonging to same traffic
+ * class uses a single set of queue statistics counters.
+ */
+static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
+{
+ u32 reg = 0;
+ u8 i = 0;
+
+ /*
+ * Receive Queues stats setting
+ * 32 RQSMR registers, each configuring 4 queues.
+ * Set all 16 queues of each TC to the same stat
+ * with TC 'n' going to stat 'n'.
+ */
+ for (i = 0; i < 32; i++) {
+ reg = 0x01010101 * (i / 4);
+ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
+ }
+ /*
+ * Transmit Queues stats setting
+ * 32 TQSM registers, each controlling 4 queues.
+ * Set all queues of each TC to the same stat
+ * with TC 'n' going to stat 'n'.
+ * Tx queues are allocated non-uniformly to TCs:
+ * 32, 32, 16, 16, 8, 8, 8, 8.
+ */
+ for (i = 0; i < 32; i++) {
+ if (i < 8)
+ reg = 0x00000000;
+ else if (i < 16)
+ reg = 0x01010101;
+ else if (i < 20)
+ reg = 0x02020202;
+ else if (i < 24)
+ reg = 0x03030303;
+ else if (i < 26)
+ reg = 0x04040404;
+ else if (i < 28)
+ reg = 0x05050505;
+ else if (i < 30)
+ reg = 0x06060606;
+ else
+ reg = 0x07070707;
+ IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
+ * @hw: pointer to hardware structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
+ * @pfc_en: enabled pfc bitmask
+ *
+ * Configure dcb settings and enable dcb mode.
+ */
+s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
+{
+ ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
+ prio_type, prio_tc);
+ ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
+ bwg_id, prio_type, prio_tc);
+ ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
+ ixgbe_dcb_config_tc_stats_82599(hw);
+
+ return 0;
+}
+
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
new file mode 100644
index 000000000000..a59d5dc59d04
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -0,0 +1,123 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _DCB_82599_CONFIG_H_
+#define _DCB_82599_CONFIG_H_
+
+/* DCB register definitions */
+#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin,
+ * 1 WSP - Weighted Strict Priority
+ */
+#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin,
+ * 1 WRR - Weighted Round Robin
+ */
+#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */
+#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
+#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */
+#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must
+ * clear!
+ */
+#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */
+
+/* Receive UP2TC mapping */
+#define IXGBE_RTRUP2TC_UP_SHIFT 3
+/* Transmit UP2TC mapping */
+#define IXGBE_RTTUP2TC_UP_SHIFT 3
+
+#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */
+#define IXGBE_RTRPT4C_BWG_SHIFT 9 /* Offset to BWG index */
+#define IXGBE_RTRPT4C_GSP 0x40000000 /* GSP enable bit */
+#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */
+
+#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet
+ * buffers enable
+ */
+#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores
+ * (RSS) enable
+ */
+
+/* RTRPCS Bit Masks */
+#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */
+/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
+#define IXGBE_RTRPCS_RAC 0x00000004
+#define IXGBE_RTRPCS_ARBDIS 0x00000040 /* Arbitration disable bit */
+
+/* RTTDT2C Bit Masks */
+#define IXGBE_RTTDT2C_MCL_SHIFT 12
+#define IXGBE_RTTDT2C_BWG_SHIFT 9
+#define IXGBE_RTTDT2C_GSP 0x40000000
+#define IXGBE_RTTDT2C_LSP 0x80000000
+
+#define IXGBE_RTTPT2C_MCL_SHIFT 12
+#define IXGBE_RTTPT2C_BWG_SHIFT 9
+#define IXGBE_RTTPT2C_GSP 0x40000000
+#define IXGBE_RTTPT2C_LSP 0x80000000
+
+/* RTTPCS Bit Masks */
+#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin,
+ * 1 SP - Strict Priority
+ */
+#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */
+#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */
+#define IXGBE_RTTPCS_ARBD_SHIFT 22
+#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */
+
+/* SECTXMINIFG DCB */
+#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */
+
+
+/* DCB hardware-specific driver APIs */
+
+/* DCB PFC functions */
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc);
+
+/* DCB hw initialization */
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type,
+ u8 *prio_tc);
+
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type,
+ u8 *prio_tc);
+
+s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type,
+ u8 *prio_tc);
+
+#endif /* _DCB_82599_CONFIG_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
new file mode 100644
index 000000000000..be66bb679d5a
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -0,0 +1,797 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgbe.h"
+#include <linux/dcbnl.h>
+#include "ixgbe_dcb_82598.h"
+#include "ixgbe_dcb_82599.h"
+
+/* Callbacks for DCB netlink in the kernel */
+#define BIT_DCB_MODE 0x01
+#define BIT_PFC 0x02
+#define BIT_PG_RX 0x04
+#define BIT_PG_TX 0x08
+#define BIT_APP_UPCHG 0x10
+#define BIT_LINKSPEED 0x80
+
+/* Responses for the DCB_C_SET_ALL command */
+#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */
+#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */
+#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */
+
+int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
+ struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max)
+{
+ struct tc_configuration *src_tc_cfg = NULL;
+ struct tc_configuration *dst_tc_cfg = NULL;
+ int i;
+
+ if (!src_dcb_cfg || !dst_dcb_cfg)
+ return -EINVAL;
+
+ for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
+ src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
+ dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
+
+ dst_tc_cfg->path[DCB_TX_CONFIG].prio_type =
+ src_tc_cfg->path[DCB_TX_CONFIG].prio_type;
+
+ dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id =
+ src_tc_cfg->path[DCB_TX_CONFIG].bwg_id;
+
+ dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent =
+ src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent;
+
+ dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap =
+ src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap;
+
+ dst_tc_cfg->path[DCB_RX_CONFIG].prio_type =
+ src_tc_cfg->path[DCB_RX_CONFIG].prio_type;
+
+ dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id =
+ src_tc_cfg->path[DCB_RX_CONFIG].bwg_id;
+
+ dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent =
+ src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent;
+
+ dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap =
+ src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap;
+ }
+
+ for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) {
+ dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG]
+ [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
+ [DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
+ dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG]
+ [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
+ [DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
+ }
+
+ for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) {
+ dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc =
+ src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc;
+ }
+
+ dst_dcb_cfg->pfc_mode_enable = src_dcb_cfg->pfc_mode_enable;
+
+ return 0;
+}
+
+static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
+}
+
+static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
+{
+ u8 err = 0;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ /* Fail command if not in CEE mode */
+ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return 1;
+
+ /* verify there is something to do, if not then exit */
+ if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ return err;
+
+ if (state > 0)
+ err = ixgbe_setup_tc(netdev, adapter->dcb_cfg.num_tcs.pg_tcs);
+ else
+ err = ixgbe_setup_tc(netdev, 0);
+
+ return err;
+}
+
+static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
+ u8 *perm_addr)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int i, j;
+
+ memset(perm_addr, 0xff, MAX_ADDR_LEN);
+
+ for (i = 0; i < netdev->addr_len; i++)
+ perm_addr[i] = adapter->hw.mac.perm_addr[i];
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ for (j = 0; j < netdev->addr_len; j++, i++)
+ perm_addr[i] = adapter->hw.mac.san_addr[j];
+ break;
+ default:
+ break;
+ }
+}
+
+static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
+ u8 prio, u8 bwg_id, u8 bw_pct,
+ u8 up_map)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ /* Abort a bad configuration */
+ if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
+ return;
+
+ if (prio != DCB_ATTR_VALUE_UNDEFINED)
+ adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
+ if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
+ adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id;
+ if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
+ adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent =
+ bw_pct;
+ if (up_map != DCB_ATTR_VALUE_UNDEFINED)
+ adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap =
+ up_map;
+
+ if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type !=
+ adapter->dcb_cfg.tc_config[tc].path[0].prio_type) ||
+ (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id !=
+ adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) ||
+ (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
+ adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
+ (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
+ adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
+ adapter->dcb_set_bitmap |= BIT_PG_TX;
+
+ if (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
+ adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)
+ adapter->dcb_set_bitmap |= BIT_PFC;
+}
+
+static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
+ u8 bw_pct)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
+
+ if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
+ adapter->dcb_cfg.bw_percentage[0][bwg_id])
+ adapter->dcb_set_bitmap |= BIT_PG_TX;
+}
+
+static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
+ u8 prio, u8 bwg_id, u8 bw_pct,
+ u8 up_map)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ /* Abort bad configurations */
+ if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
+ return;
+
+ if (prio != DCB_ATTR_VALUE_UNDEFINED)
+ adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
+ if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
+ adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id;
+ if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
+ adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent =
+ bw_pct;
+ if (up_map != DCB_ATTR_VALUE_UNDEFINED)
+ adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap =
+ up_map;
+
+ if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type !=
+ adapter->dcb_cfg.tc_config[tc].path[1].prio_type) ||
+ (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id !=
+ adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) ||
+ (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
+ adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
+ (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
+ adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
+ adapter->dcb_set_bitmap |= BIT_PG_RX;
+
+ if (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
+ adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)
+ adapter->dcb_set_bitmap |= BIT_PFC;
+}
+
+static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
+ u8 bw_pct)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
+
+ if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
+ adapter->dcb_cfg.bw_percentage[1][bwg_id])
+ adapter->dcb_set_bitmap |= BIT_PG_RX;
+}
+
+static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
+ u8 *prio, u8 *bwg_id, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type;
+ *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id;
+ *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent;
+ *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap;
+}
+
+static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
+ u8 *bw_pct)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id];
+}
+
+static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
+ u8 *prio, u8 *bwg_id, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type;
+ *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id;
+ *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent;
+ *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap;
+}
+
+static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
+ u8 *bw_pct)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id];
+}
+
+static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
+ u8 setting)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
+ if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
+ adapter->dcb_cfg.tc_config[priority].dcb_pfc) {
+ adapter->dcb_set_bitmap |= BIT_PFC;
+ adapter->temp_dcb_cfg.pfc_mode_enable = true;
+ }
+}
+
+static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
+ u8 *setting)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
+}
+
+static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int ret, i;
+#ifdef IXGBE_FCOE
+ struct dcb_app app = {
+ .selector = DCB_APP_IDTYPE_ETHTYPE,
+ .protocol = ETH_P_FCOE,
+ };
+ u8 up = dcb_getapp(netdev, &app);
+#endif
+
+ /* Fail command if not in CEE mode */
+ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return 1;
+
+ ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
+ MAX_TRAFFIC_CLASS);
+ if (ret)
+ return DCB_NO_HW_CHG;
+
+#ifdef IXGBE_FCOE
+ if (up && (up != (1 << adapter->fcoe.up)))
+ adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
+
+ /*
+ * Only take down the adapter if an app change occurred. FCoE
+ * may shuffle tx rings in this case and this can not be done
+ * without a reset currently.
+ */
+ if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
+ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ adapter->fcoe.up = ffs(up) - 1;
+
+ if (netif_running(netdev))
+ netdev->netdev_ops->ndo_stop(netdev);
+ ixgbe_clear_interrupt_scheme(adapter);
+ }
+#endif
+
+ if (adapter->dcb_cfg.pfc_mode_enable) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ if (adapter->hw.fc.current_mode != ixgbe_fc_pfc)
+ adapter->last_lfc_mode =
+ adapter->hw.fc.current_mode;
+ break;
+ default:
+ break;
+ }
+ adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
+ } else {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ adapter->hw.fc.requested_mode = ixgbe_fc_none;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
+ break;
+ default:
+ break;
+ }
+ }
+
+#ifdef IXGBE_FCOE
+ if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
+ ixgbe_init_interrupt_scheme(adapter);
+ if (netif_running(netdev))
+ netdev->netdev_ops->ndo_open(netdev);
+ ret = DCB_HW_CHG_RST;
+ }
+#endif
+
+ if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
+ u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
+ u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
+ /* Priority to TC mapping in CEE case default to 1:1 */
+ u8 prio_tc[MAX_USER_PRIORITY];
+ int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+#ifdef IXGBE_FCOE
+ if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+ max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
+#endif
+
+ ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
+ max_frame, DCB_TX_CONFIG);
+ ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
+ max_frame, DCB_RX_CONFIG);
+
+ ixgbe_dcb_unpack_refill(&adapter->dcb_cfg,
+ DCB_TX_CONFIG, refill);
+ ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max);
+ ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg,
+ DCB_TX_CONFIG, bwg_id);
+ ixgbe_dcb_unpack_prio(&adapter->dcb_cfg,
+ DCB_TX_CONFIG, prio_type);
+ ixgbe_dcb_unpack_map(&adapter->dcb_cfg,
+ DCB_TX_CONFIG, prio_tc);
+
+ ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
+ bwg_id, prio_type, prio_tc);
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
+ }
+
+ if (adapter->dcb_set_bitmap & BIT_PFC) {
+ u8 pfc_en;
+ u8 prio_tc[MAX_USER_PRIORITY];
+
+ ixgbe_dcb_unpack_map(&adapter->dcb_cfg,
+ DCB_TX_CONFIG, prio_tc);
+ ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
+ ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en, prio_tc);
+ ret = DCB_HW_CHG;
+ }
+
+ if (adapter->dcb_cfg.pfc_mode_enable)
+ adapter->hw.fc.current_mode = ixgbe_fc_pfc;
+
+ if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
+ clear_bit(__IXGBE_RESETTING, &adapter->state);
+ adapter->dcb_set_bitmap = 0x00;
+ return ret;
+}
+
+static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_UP2TC:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ *cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_GSP:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_BCN:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = adapter->dcbx_cap;
+ break;
+ default:
+ *cap = false;
+ break;
+ }
+
+ return 0;
+}
+
+static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ u8 rval = 0;
+
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ switch (tcid) {
+ case DCB_NUMTCS_ATTR_PG:
+ *num = adapter->dcb_cfg.num_tcs.pg_tcs;
+ break;
+ case DCB_NUMTCS_ATTR_PFC:
+ *num = adapter->dcb_cfg.num_tcs.pfc_tcs;
+ break;
+ default:
+ rval = -EINVAL;
+ break;
+ }
+ } else {
+ rval = -EINVAL;
+ }
+
+ return rval;
+}
+
+static u8 ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
+{
+ return -EINVAL;
+}
+
+static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->dcb_cfg.pfc_mode_enable;
+}
+
+static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ adapter->temp_dcb_cfg.pfc_mode_enable = state;
+ if (adapter->temp_dcb_cfg.pfc_mode_enable !=
+ adapter->dcb_cfg.pfc_mode_enable)
+ adapter->dcb_set_bitmap |= BIT_PFC;
+}
+
+/**
+ * ixgbe_dcbnl_getapp - retrieve the DCBX application user priority
+ * @netdev : the corresponding netdev
+ * @idtype : identifies the id as ether type or TCP/UDP port number
+ * @id: id is either ether type or TCP/UDP port number
+ *
+ * Returns : on success, returns a non-zero 802.1p user priority bitmap
+ * otherwise returns 0 as the invalid user priority bitmap to indicate an
+ * error.
+ */
+static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct dcb_app app = {
+ .selector = idtype,
+ .protocol = id,
+ };
+
+ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return 0;
+
+ return dcb_getapp(netdev, &app);
+}
+
+static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets;
+
+ /* No IEEE PFC settings available */
+ if (!my_ets)
+ return -EINVAL;
+
+ ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs;
+ ets->cbs = my_ets->cbs;
+ memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
+ memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
+ return 0;
+}
+
+static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ int i;
+ __u8 max_tc = 0;
+
+ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ if (!adapter->ixgbe_ieee_ets) {
+ adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets),
+ GFP_KERNEL);
+ if (!adapter->ixgbe_ieee_ets)
+ return -ENOMEM;
+ }
+
+ memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ if (ets->prio_tc[i] > max_tc)
+ max_tc = ets->prio_tc[i];
+ }
+
+ if (max_tc)
+ max_tc++;
+
+ if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs)
+ return -EINVAL;
+
+ if (max_tc != netdev_get_num_tc(dev))
+ ixgbe_setup_tc(dev, max_tc);
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ netdev_set_prio_tc_map(dev, i, ets->prio_tc[i]);
+
+ return ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
+}
+
+static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc;
+ int i;
+
+ /* No IEEE PFC settings available */
+ if (!my_pfc)
+ return -EINVAL;
+
+ pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs;
+ pfc->pfc_en = my_pfc->pfc_en;
+ pfc->mbc = my_pfc->mbc;
+ pfc->delay = my_pfc->delay;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ pfc->requests[i] = adapter->stats.pxoffrxc[i];
+ pfc->indications[i] = adapter->stats.pxofftxc[i];
+ }
+
+ return 0;
+}
+
+static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ u8 *prio_tc;
+
+ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ if (!adapter->ixgbe_ieee_pfc) {
+ adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc),
+ GFP_KERNEL);
+ if (!adapter->ixgbe_ieee_pfc)
+ return -ENOMEM;
+ }
+
+ prio_tc = adapter->ixgbe_ieee_ets->prio_tc;
+ memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
+ return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc);
+}
+
+#ifdef IXGBE_FCOE
+static void ixgbe_dcbnl_devreset(struct net_device *dev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+ if (netif_running(dev))
+ dev->netdev_ops->ndo_stop(dev);
+
+ ixgbe_clear_interrupt_scheme(adapter);
+ ixgbe_init_interrupt_scheme(adapter);
+
+ if (netif_running(dev))
+ dev->netdev_ops->ndo_open(dev);
+}
+#endif
+
+static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int err = -EINVAL;
+
+ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return err;
+
+ err = dcb_ieee_setapp(dev, app);
+
+#ifdef IXGBE_FCOE
+ if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ app->protocol == ETH_P_FCOE) {
+ u8 app_mask = dcb_ieee_getapp_mask(dev, app);
+
+ if (app_mask & (1 << adapter->fcoe.up))
+ return err;
+
+ adapter->fcoe.up = app->priority;
+ ixgbe_dcbnl_devreset(dev);
+ }
+#endif
+ return 0;
+}
+
+static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int err;
+
+ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ err = dcb_ieee_delapp(dev, app);
+
+#ifdef IXGBE_FCOE
+ if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ app->protocol == ETH_P_FCOE) {
+ u8 app_mask = dcb_ieee_getapp_mask(dev, app);
+
+ if (app_mask & (1 << adapter->fcoe.up))
+ return err;
+
+ adapter->fcoe.up = app_mask ?
+ ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC;
+ ixgbe_dcbnl_devreset(dev);
+ }
+#endif
+ return err;
+}
+
+static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ return adapter->dcbx_cap;
+}
+
+static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ieee_ets ets = {0};
+ struct ieee_pfc pfc = {0};
+
+ /* no support for LLD_MANAGED modes or CEE+IEEE */
+ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
+ ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
+ !(mode & DCB_CAP_DCBX_HOST))
+ return 1;
+
+ if (mode == adapter->dcbx_cap)
+ return 0;
+
+ adapter->dcbx_cap = mode;
+
+ /* ETS and PFC defaults */
+ ets.ets_cap = 8;
+ pfc.pfc_cap = 8;
+
+ if (mode & DCB_CAP_DCBX_VER_IEEE) {
+ ixgbe_dcbnl_ieee_setets(dev, &ets);
+ ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
+ } else if (mode & DCB_CAP_DCBX_VER_CEE) {
+ adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX);
+ ixgbe_dcbnl_set_all(dev);
+ } else {
+ /* Drop into single TC mode strict priority as this
+ * indicates CEE and IEEE versions are disabled
+ */
+ ixgbe_dcbnl_ieee_setets(dev, &ets);
+ ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
+ ixgbe_setup_tc(dev, 0);
+ }
+
+ return 0;
+}
+
+const struct dcbnl_rtnl_ops dcbnl_ops = {
+ .ieee_getets = ixgbe_dcbnl_ieee_getets,
+ .ieee_setets = ixgbe_dcbnl_ieee_setets,
+ .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc,
+ .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc,
+ .ieee_setapp = ixgbe_dcbnl_ieee_setapp,
+ .ieee_delapp = ixgbe_dcbnl_ieee_delapp,
+ .getstate = ixgbe_dcbnl_get_state,
+ .setstate = ixgbe_dcbnl_set_state,
+ .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
+ .setpgtccfgtx = ixgbe_dcbnl_set_pg_tc_cfg_tx,
+ .setpgbwgcfgtx = ixgbe_dcbnl_set_pg_bwg_cfg_tx,
+ .setpgtccfgrx = ixgbe_dcbnl_set_pg_tc_cfg_rx,
+ .setpgbwgcfgrx = ixgbe_dcbnl_set_pg_bwg_cfg_rx,
+ .getpgtccfgtx = ixgbe_dcbnl_get_pg_tc_cfg_tx,
+ .getpgbwgcfgtx = ixgbe_dcbnl_get_pg_bwg_cfg_tx,
+ .getpgtccfgrx = ixgbe_dcbnl_get_pg_tc_cfg_rx,
+ .getpgbwgcfgrx = ixgbe_dcbnl_get_pg_bwg_cfg_rx,
+ .setpfccfg = ixgbe_dcbnl_set_pfc_cfg,
+ .getpfccfg = ixgbe_dcbnl_get_pfc_cfg,
+ .setall = ixgbe_dcbnl_set_all,
+ .getcap = ixgbe_dcbnl_getcap,
+ .getnumtcs = ixgbe_dcbnl_getnumtcs,
+ .setnumtcs = ixgbe_dcbnl_setnumtcs,
+ .getpfcstate = ixgbe_dcbnl_getpfcstate,
+ .setpfcstate = ixgbe_dcbnl_setpfcstate,
+ .getapp = ixgbe_dcbnl_getapp,
+ .getdcbx = ixgbe_dcbnl_getdcbx,
+ .setdcbx = ixgbe_dcbnl_setdcbx,
+};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
new file mode 100644
index 000000000000..10ea29f66405
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -0,0 +1,2549 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for ixgbe */
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+
+#include "ixgbe.h"
+
+
+#define IXGBE_ALL_RAR_ENTRIES 16
+
+enum {NETDEV_STATS, IXGBE_STATS};
+
+struct ixgbe_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int type;
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define IXGBE_STAT(m) IXGBE_STATS, \
+ sizeof(((struct ixgbe_adapter *)0)->m), \
+ offsetof(struct ixgbe_adapter, m)
+#define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
+ sizeof(((struct rtnl_link_stats64 *)0)->m), \
+ offsetof(struct rtnl_link_stats64, m)
+
+static struct ixgbe_stats ixgbe_gstrings_stats[] = {
+ {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
+ {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
+ {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
+ {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
+ {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
+ {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
+ {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
+ {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
+ {"lsc_int", IXGBE_STAT(lsc_int)},
+ {"tx_busy", IXGBE_STAT(tx_busy)},
+ {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
+ {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
+ {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
+ {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
+ {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
+ {"multicast", IXGBE_NETDEV_STAT(multicast)},
+ {"broadcast", IXGBE_STAT(stats.bprc)},
+ {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
+ {"collisions", IXGBE_NETDEV_STAT(collisions)},
+ {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
+ {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
+ {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
+ {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
+ {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
+ {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
+ {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
+ {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
+ {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
+ {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
+ {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
+ {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
+ {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
+ {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
+ {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
+ {"tx_restart_queue", IXGBE_STAT(restart_queue)},
+ {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
+ {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
+ {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
+ {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
+ {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
+ {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
+ {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
+ {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
+ {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
+ {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
+ {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
+ {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
+ {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
+ {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
+#ifdef IXGBE_FCOE
+ {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
+ {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
+ {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
+ {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
+ {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
+ {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
+#endif /* IXGBE_FCOE */
+};
+
+#define IXGBE_QUEUE_STATS_LEN \
+ ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \
+ ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \
+ (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
+#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
+#define IXGBE_PB_STATS_LEN ( \
+ (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \
+ IXGBE_FLAG_DCB_ENABLED) ? \
+ (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
+ sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
+ sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
+ sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
+ / sizeof(u64) : 0)
+#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
+ IXGBE_PB_STATS_LEN + \
+ IXGBE_QUEUE_STATS_LEN)
+
+static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)", "Eeprom test (offline)",
+ "Interrupt test (offline)", "Loopback test (offline)",
+ "Link test (on/offline)"
+};
+#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
+
+static int ixgbe_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = 0;
+ bool link_up;
+
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ if ((hw->phy.media_type == ixgbe_media_type_copper) ||
+ (hw->phy.multispeed_fiber)) {
+ ecmd->supported |= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
+ ecmd->supported |= SUPPORTED_100baseT_Full;
+ break;
+ default:
+ break;
+ }
+
+ ecmd->advertising = ADVERTISED_Autoneg;
+ if (hw->phy.autoneg_advertised) {
+ if (hw->phy.autoneg_advertised &
+ IXGBE_LINK_SPEED_100_FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ if (hw->phy.autoneg_advertised &
+ IXGBE_LINK_SPEED_10GB_FULL)
+ ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (hw->phy.autoneg_advertised &
+ IXGBE_LINK_SPEED_1GB_FULL)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ } else {
+ /*
+ * Default advertised modes in case
+ * phy.autoneg_advertised isn't set.
+ */
+ ecmd->advertising |= (ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full);
+ if (hw->mac.type == ixgbe_mac_X540)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ }
+
+ if (hw->phy.media_type == ixgbe_media_type_copper) {
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->port = PORT_TP;
+ } else {
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
+ }
+ } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ /* Set as FIBRE until SERDES defined in kernel */
+ if (hw->device_id == IXGBE_DEV_ID_82598_BX) {
+ ecmd->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE);
+ ecmd->advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
+ (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
+ ecmd->supported |= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_FIBRE);
+ ecmd->advertising = (ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_Autoneg |
+ ADVERTISED_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ } else {
+ ecmd->supported |= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE);
+ ecmd->advertising = (ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ }
+ } else {
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising = (ADVERTISED_10000baseT_Full |
+ ADVERTISED_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ }
+
+ /* Get PHY type */
+ switch (adapter->hw.phy.type) {
+ case ixgbe_phy_tn:
+ case ixgbe_phy_aq:
+ case ixgbe_phy_cu_unknown:
+ /* Copper 10G-BASET */
+ ecmd->port = PORT_TP;
+ break;
+ case ixgbe_phy_qt:
+ ecmd->port = PORT_FIBRE;
+ break;
+ case ixgbe_phy_nl:
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
+ case ixgbe_phy_sfp_ftl:
+ case ixgbe_phy_sfp_avago:
+ case ixgbe_phy_sfp_intel:
+ case ixgbe_phy_sfp_unknown:
+ switch (adapter->hw.phy.sfp_type) {
+ /* SFP+ devices, further checking needed */
+ case ixgbe_sfp_type_da_cu:
+ case ixgbe_sfp_type_da_cu_core0:
+ case ixgbe_sfp_type_da_cu_core1:
+ ecmd->port = PORT_DA;
+ break;
+ case ixgbe_sfp_type_sr:
+ case ixgbe_sfp_type_lr:
+ case ixgbe_sfp_type_srlr_core0:
+ case ixgbe_sfp_type_srlr_core1:
+ ecmd->port = PORT_FIBRE;
+ break;
+ case ixgbe_sfp_type_not_present:
+ ecmd->port = PORT_NONE;
+ break;
+ case ixgbe_sfp_type_1g_cu_core0:
+ case ixgbe_sfp_type_1g_cu_core1:
+ ecmd->port = PORT_TP;
+ ecmd->supported = SUPPORTED_TP;
+ ecmd->advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_TP);
+ break;
+ case ixgbe_sfp_type_unknown:
+ default:
+ ecmd->port = PORT_OTHER;
+ break;
+ }
+ break;
+ case ixgbe_phy_xaui:
+ ecmd->port = PORT_NONE;
+ break;
+ case ixgbe_phy_unknown:
+ case ixgbe_phy_generic:
+ case ixgbe_phy_sfp_unsupported:
+ default:
+ ecmd->port = PORT_OTHER;
+ break;
+ }
+
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (link_up) {
+ switch (link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ break;
+ case IXGBE_LINK_SPEED_100_FULL:
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
+ break;
+ default:
+ break;
+ }
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ethtool_cmd_speed_set(ecmd, -1);
+ ecmd->duplex = -1;
+ }
+
+ return 0;
+}
+
+static int ixgbe_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 advertised, old;
+ s32 err = 0;
+
+ if ((hw->phy.media_type == ixgbe_media_type_copper) ||
+ (hw->phy.multispeed_fiber)) {
+ /*
+ * this function does not support duplex forcing, but can
+ * limit the advertising of the adapter to the specified speed
+ */
+ if (ecmd->autoneg == AUTONEG_DISABLE)
+ return -EINVAL;
+
+ if (ecmd->advertising & ~ecmd->supported)
+ return -EINVAL;
+
+ old = hw->phy.autoneg_advertised;
+ advertised = 0;
+ if (ecmd->advertising & ADVERTISED_10000baseT_Full)
+ advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (ecmd->advertising & ADVERTISED_1000baseT_Full)
+ advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ if (ecmd->advertising & ADVERTISED_100baseT_Full)
+ advertised |= IXGBE_LINK_SPEED_100_FULL;
+
+ if (old == advertised)
+ return err;
+ /* this sets the link speed and restarts auto-neg */
+ hw->mac.autotry_restart = true;
+ err = hw->mac.ops.setup_link(hw, advertised, true, true);
+ if (err) {
+ e_info(probe, "setup link failed with code %d\n", err);
+ hw->mac.ops.setup_link(hw, old, true, true);
+ }
+ } else {
+ /* in this case we currently only support 10Gb/FULL */
+ u32 speed = ethtool_cmd_speed(ecmd);
+ if ((ecmd->autoneg == AUTONEG_ENABLE) ||
+ (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
+ (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static void ixgbe_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (hw->fc.disable_fc_autoneg)
+ pause->autoneg = 0;
+ else
+ pause->autoneg = 1;
+
+ if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
+ pause->rx_pause = 1;
+ } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
+ pause->tx_pause = 1;
+ } else if (hw->fc.current_mode == ixgbe_fc_full) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+#ifdef CONFIG_DCB
+ } else if (hw->fc.current_mode == ixgbe_fc_pfc) {
+ pause->rx_pause = 0;
+ pause->tx_pause = 0;
+#endif
+ }
+}
+
+static int ixgbe_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_fc_info fc;
+
+#ifdef CONFIG_DCB
+ if (adapter->dcb_cfg.pfc_mode_enable ||
+ ((hw->mac.type == ixgbe_mac_82598EB) &&
+ (adapter->flags & IXGBE_FLAG_DCB_ENABLED)))
+ return -EINVAL;
+
+#endif
+ fc = hw->fc;
+
+ if (pause->autoneg != AUTONEG_ENABLE)
+ fc.disable_fc_autoneg = true;
+ else
+ fc.disable_fc_autoneg = false;
+
+ if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
+ fc.requested_mode = ixgbe_fc_full;
+ else if (pause->rx_pause && !pause->tx_pause)
+ fc.requested_mode = ixgbe_fc_rx_pause;
+ else if (!pause->rx_pause && pause->tx_pause)
+ fc.requested_mode = ixgbe_fc_tx_pause;
+ else if (!pause->rx_pause && !pause->tx_pause)
+ fc.requested_mode = ixgbe_fc_none;
+ else
+ return -EINVAL;
+
+#ifdef CONFIG_DCB
+ adapter->last_lfc_mode = fc.requested_mode;
+#endif
+
+ /* if the thing changed then we'll update and use new autoneg */
+ if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
+ hw->fc = fc;
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
+ ixgbe_reset(adapter);
+ }
+
+ return 0;
+}
+
+static u32 ixgbe_get_msglevel(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+static int ixgbe_get_regs_len(struct net_device *netdev)
+{
+#define IXGBE_REGS_LEN 1129
+ return IXGBE_REGS_LEN * sizeof(u32);
+}
+
+#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
+
+static void ixgbe_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u8 i;
+
+ memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
+
+ regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
+
+ /* General Registers */
+ regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
+ regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
+ regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
+ regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
+
+ /* NVM Register */
+ regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
+ regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
+ regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
+ regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
+ regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
+ regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
+ regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
+ regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
+ regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
+ regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
+
+ /* Interrupt */
+ /* don't read EICR because it can clear interrupt causes, instead
+ * read EICS which is a shadow but doesn't clear EICR */
+ regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
+ regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
+ regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
+ regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
+ regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
+ regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
+ regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
+ regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
+ regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
+ regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
+ regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
+ regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
+
+ /* Flow Control */
+ regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
+ regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
+ regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
+ regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
+ regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
+ for (i = 0; i < 8; i++) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
+ regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
+ regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
+ break;
+ default:
+ break;
+ }
+ }
+ regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
+ regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
+
+ /* Receive DMA */
+ for (i = 0; i < 64; i++)
+ regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
+ for (i = 0; i < 64; i++)
+ regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
+ for (i = 0; i < 64; i++)
+ regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
+ for (i = 0; i < 64; i++)
+ regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
+ for (i = 0; i < 64; i++)
+ regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
+ for (i = 0; i < 64; i++)
+ regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ for (i = 0; i < 16; i++)
+ regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+ for (i = 0; i < 16; i++)
+ regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+ for (i = 0; i < 8; i++)
+ regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+ regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
+
+ /* Receive */
+ regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+ regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
+ for (i = 0; i < 16; i++)
+ regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
+ for (i = 0; i < 16; i++)
+ regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
+ regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
+ regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
+ regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
+ for (i = 0; i < 8; i++)
+ regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
+ regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
+
+ /* Transmit */
+ for (i = 0; i < 32; i++)
+ regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
+ for (i = 0; i < 32; i++)
+ regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
+ for (i = 0; i < 32; i++)
+ regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
+ for (i = 0; i < 32; i++)
+ regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
+ for (i = 0; i < 32; i++)
+ regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
+ for (i = 0; i < 32; i++)
+ regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
+ for (i = 0; i < 32; i++)
+ regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
+ for (i = 0; i < 32; i++)
+ regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
+ regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
+ for (i = 0; i < 16; i++)
+ regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+ regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
+ for (i = 0; i < 8; i++)
+ regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
+ regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
+
+ /* Wake Up */
+ regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
+ regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
+ regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
+ regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
+ regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
+ regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
+ regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
+ regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
+ regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
+
+ /* DCB */
+ regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
+ regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
+ regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
+ regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
+ for (i = 0; i < 8; i++)
+ regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
+
+ /* Statistics */
+ regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
+ regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
+ regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
+ regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
+ for (i = 0; i < 8; i++)
+ regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
+ regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
+ regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
+ regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
+ regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
+ regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
+ regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
+ regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
+ for (i = 0; i < 8; i++)
+ regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
+ for (i = 0; i < 8; i++)
+ regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
+ for (i = 0; i < 8; i++)
+ regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
+ for (i = 0; i < 8; i++)
+ regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
+ regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
+ regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
+ regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
+ regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
+ regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
+ regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
+ regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
+ regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
+ regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
+ regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
+ regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
+ regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
+ for (i = 0; i < 8; i++)
+ regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
+ regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
+ regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
+ regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
+ regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
+ regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
+ regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
+ regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
+ regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
+ regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
+ regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
+ regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
+ regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
+ regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
+ regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
+ regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
+ regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
+ regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
+ regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
+ regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
+ for (i = 0; i < 16; i++)
+ regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
+ for (i = 0; i < 16; i++)
+ regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
+ for (i = 0; i < 16; i++)
+ regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
+ for (i = 0; i < 16; i++)
+ regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
+
+ /* MAC */
+ regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
+ regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
+ regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+ regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
+ regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
+ regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+ regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
+ regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
+ regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
+ regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
+ regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
+ regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
+ regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
+ regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
+ regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
+ regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
+ regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
+ regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
+ regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
+ regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
+ regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
+ regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
+ regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
+ regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+ regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
+ regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
+
+ /* Diagnostic */
+ regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
+ for (i = 0; i < 8; i++)
+ regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
+ regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
+ for (i = 0; i < 4; i++)
+ regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
+ regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
+ regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
+ for (i = 0; i < 8; i++)
+ regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
+ regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
+ for (i = 0; i < 4; i++)
+ regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
+ regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
+ regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
+ regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
+ regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
+ regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
+ regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
+ regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
+ regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
+ regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
+ regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
+ regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
+ for (i = 0; i < 8; i++)
+ regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
+ regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
+ regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
+ regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
+ regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
+ regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
+ regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
+ regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
+ regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
+ regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
+
+ /* 82599 X540 specific registers */
+ regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+}
+
+static int ixgbe_get_eeprom_len(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ return adapter->hw.eeprom.word_size * 2;
+}
+
+static int ixgbe_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ int first_word, last_word, eeprom_len;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_len = last_word - first_word + 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
+ eeprom_buff);
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < eeprom_len; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static void ixgbe_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ char firmware_version[32];
+
+ strncpy(drvinfo->driver, ixgbe_driver_name,
+ sizeof(drvinfo->driver) - 1);
+ strncpy(drvinfo->version, ixgbe_driver_version,
+ sizeof(drvinfo->version) - 1);
+
+ snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
+ (adapter->eeprom_version & 0xF000) >> 12,
+ (adapter->eeprom_version & 0x0FF0) >> 4,
+ adapter->eeprom_version & 0x000F);
+
+ strncpy(drvinfo->fw_version, firmware_version,
+ sizeof(drvinfo->fw_version));
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
+ drvinfo->n_stats = IXGBE_STATS_LEN;
+ drvinfo->testinfo_len = IXGBE_TEST_LEN;
+ drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
+}
+
+static void ixgbe_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
+
+ ring->rx_max_pending = IXGBE_MAX_RXD;
+ ring->tx_max_pending = IXGBE_MAX_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rx_ring->count;
+ ring->tx_pending = tx_ring->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int ixgbe_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
+ int i, err = 0;
+ u32 new_rx_count, new_tx_count;
+ bool need_update = false;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD);
+ new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD);
+ new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ if ((new_tx_count == adapter->tx_ring[0]->count) &&
+ (new_rx_count == adapter->rx_ring[0]->count)) {
+ /* nothing to do */
+ return 0;
+ }
+
+ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (!netif_running(adapter->netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i]->count = new_tx_count;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->count = new_rx_count;
+ adapter->tx_ring_count = new_tx_count;
+ adapter->rx_ring_count = new_rx_count;
+ goto clear_reset;
+ }
+
+ temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
+ if (!temp_tx_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
+
+ if (new_tx_count != adapter->tx_ring_count) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
+ sizeof(struct ixgbe_ring));
+ temp_tx_ring[i].count = new_tx_count;
+ err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbe_free_tx_resources(&temp_tx_ring[i]);
+ }
+ goto clear_reset;
+ }
+ }
+ need_update = true;
+ }
+
+ temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
+ if (!temp_rx_ring) {
+ err = -ENOMEM;
+ goto err_setup;
+ }
+
+ if (new_rx_count != adapter->rx_ring_count) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
+ sizeof(struct ixgbe_ring));
+ temp_rx_ring[i].count = new_rx_count;
+ err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbe_free_rx_resources(&temp_rx_ring[i]);
+ }
+ goto err_setup;
+ }
+ }
+ need_update = true;
+ }
+
+ /* if rings need to be updated, here's the place to do it in one shot */
+ if (need_update) {
+ ixgbe_down(adapter);
+
+ /* tx */
+ if (new_tx_count != adapter->tx_ring_count) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ ixgbe_free_tx_resources(adapter->tx_ring[i]);
+ memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
+ sizeof(struct ixgbe_ring));
+ }
+ adapter->tx_ring_count = new_tx_count;
+ }
+
+ /* rx */
+ if (new_rx_count != adapter->rx_ring_count) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ ixgbe_free_rx_resources(adapter->rx_ring[i]);
+ memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
+ sizeof(struct ixgbe_ring));
+ }
+ adapter->rx_ring_count = new_rx_count;
+ }
+ ixgbe_up(adapter);
+ }
+
+ vfree(temp_rx_ring);
+err_setup:
+ vfree(temp_tx_ring);
+clear_reset:
+ clear_bit(__IXGBE_RESETTING, &adapter->state);
+ return err;
+}
+
+static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return IXGBE_TEST_LEN;
+ case ETH_SS_STATS:
+ return IXGBE_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void ixgbe_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct rtnl_link_stats64 temp;
+ const struct rtnl_link_stats64 *net_stats;
+ unsigned int start;
+ struct ixgbe_ring *ring;
+ int i, j;
+ char *p = NULL;
+
+ ixgbe_update_stats(adapter);
+ net_stats = dev_get_stats(netdev, &temp);
+ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+ switch (ixgbe_gstrings_stats[i].type) {
+ case NETDEV_STATS:
+ p = (char *) net_stats +
+ ixgbe_gstrings_stats[i].stat_offset;
+ break;
+ case IXGBE_STATS:
+ p = (char *) adapter +
+ ixgbe_gstrings_stats[i].stat_offset;
+ break;
+ }
+
+ data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ ring = adapter->tx_ring[j];
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->syncp);
+ data[i] = ring->stats.packets;
+ data[i+1] = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ i += 2;
+ }
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+ ring = adapter->rx_ring[j];
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->syncp);
+ data[i] = ring->stats.packets;
+ data[i+1] = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ i += 2;
+ }
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
+ data[i++] = adapter->stats.pxontxc[j];
+ data[i++] = adapter->stats.pxofftxc[j];
+ }
+ for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) {
+ data[i++] = adapter->stats.pxonrxc[j];
+ data[i++] = adapter->stats.pxoffrxc[j];
+ }
+ }
+}
+
+static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ char *p = (char *)data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *ixgbe_gstrings_test,
+ IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, ixgbe_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ sprintf(p, "tx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ sprintf(p, "rx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
+ sprintf(p, "tx_pb_%u_pxon", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_pb_%u_pxoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) {
+ sprintf(p, "rx_pb_%u_pxon", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_pb_%u_pxoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
+ break;
+ }
+}
+
+static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool link_up;
+ u32 link_speed = 0;
+ *data = 0;
+
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
+ if (link_up)
+ return *data;
+ else
+ *data = 1;
+ return *data;
+}
+
+/* ethtool register test data */
+struct ixgbe_reg_test {
+ u16 reg;
+ u8 array_len;
+ u8 test_type;
+ u32 mask;
+ u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x40 bytes apart, or in contiguous tables. We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST 1
+#define SET_READ_TEST 2
+#define WRITE_NO_TEST 3
+#define TABLE32_TEST 4
+#define TABLE64_TEST_LO 5
+#define TABLE64_TEST_HI 6
+
+/* default 82599 register test */
+static const struct ixgbe_reg_test reg_test_82599[] = {
+ { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+ { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+ { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
+ { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+ { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
+ { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
+ { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+ { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
+ { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
+ { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
+ { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+/* default 82598 register test */
+static const struct ixgbe_reg_test reg_test_82598[] = {
+ { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+ { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+ { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
+ { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ /* Enable all four RX queues before testing. */
+ { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
+ /* RDH is read-only for 82598, only test RDT. */
+ { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
+ { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+ { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
+ { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
+ { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
+ { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
+ { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
+ u32 mask, u32 write)
+{
+ u32 pat, val, before;
+ static const u32 test_pattern[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+
+ for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
+ before = readl(adapter->hw.hw_addr + reg);
+ writel((test_pattern[pat] & write),
+ (adapter->hw.hw_addr + reg));
+ val = readl(adapter->hw.hw_addr + reg);
+ if (val != (test_pattern[pat] & write & mask)) {
+ e_err(drv, "pattern test reg %04X failed: got "
+ "0x%08X expected 0x%08X\n",
+ reg, val, (test_pattern[pat] & write & mask));
+ *data = reg;
+ writel(before, adapter->hw.hw_addr + reg);
+ return 1;
+ }
+ writel(before, adapter->hw.hw_addr + reg);
+ }
+ return 0;
+}
+
+static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
+ u32 mask, u32 write)
+{
+ u32 val, before;
+ before = readl(adapter->hw.hw_addr + reg);
+ writel((write & mask), (adapter->hw.hw_addr + reg));
+ val = readl(adapter->hw.hw_addr + reg);
+ if ((write & mask) != (val & mask)) {
+ e_err(drv, "set/check reg %04X test failed: got 0x%08X "
+ "expected 0x%08X\n", reg, (val & mask), (write & mask));
+ *data = reg;
+ writel(before, (adapter->hw.hw_addr + reg));
+ return 1;
+ }
+ writel(before, (adapter->hw.hw_addr + reg));
+ return 0;
+}
+
+#define REG_PATTERN_TEST(reg, mask, write) \
+ do { \
+ if (reg_pattern_test(adapter, data, reg, mask, write)) \
+ return 1; \
+ } while (0) \
+
+
+#define REG_SET_AND_CHECK(reg, mask, write) \
+ do { \
+ if (reg_set_and_check(adapter, data, reg, mask, write)) \
+ return 1; \
+ } while (0) \
+
+static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+ const struct ixgbe_reg_test *test;
+ u32 value, before, after;
+ u32 i, toggle;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ toggle = 0x7FFFF3FF;
+ test = reg_test_82598;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ toggle = 0x7FFFF30F;
+ test = reg_test_82599;
+ break;
+ default:
+ *data = 1;
+ return 1;
+ break;
+ }
+
+ /*
+ * Because the status register is such a special case,
+ * we handle it separately from the rest of the register
+ * tests. Some bits are read-only, some toggle, and some
+ * are writeable on newer MACs.
+ */
+ before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
+ value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
+ after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
+ if (value != after) {
+ e_err(drv, "failed STATUS register test got: 0x%08X "
+ "expected: 0x%08X\n", after, value);
+ *data = 1;
+ return 1;
+ }
+ /* restore previous status */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before);
+
+ /*
+ * Perform the remainder of the register test, looping through
+ * the test table until we either fail or reach the null entry.
+ */
+ while (test->reg) {
+ for (i = 0; i < test->array_len; i++) {
+ switch (test->test_type) {
+ case PATTERN_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case SET_READ_TEST:
+ REG_SET_AND_CHECK(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case WRITE_NO_TEST:
+ writel(test->write,
+ (adapter->hw.hw_addr + test->reg)
+ + (i * 0x40));
+ break;
+ case TABLE32_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 4),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_LO:
+ REG_PATTERN_TEST(test->reg + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_HI:
+ REG_PATTERN_TEST((test->reg + 4) + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ }
+ }
+ test++;
+ }
+
+ *data = 0;
+ return 0;
+}
+
+static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ if (hw->eeprom.ops.validate_checksum(hw, NULL))
+ *data = 1;
+ else
+ *data = 0;
+ return *data;
+}
+
+static irqreturn_t ixgbe_test_intr(int irq, void *data)
+{
+ struct net_device *netdev = (struct net_device *) data;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
+
+ return IRQ_HANDLED;
+}
+
+static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 mask, i = 0, shared_int = true;
+ u32 irq = adapter->pdev->irq;
+
+ *data = 0;
+
+ /* Hook up test interrupt handler just for this test */
+ if (adapter->msix_entries) {
+ /* NOTE: we don't test MSI-X interrupts here, yet */
+ return 0;
+ } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
+ shared_int = false;
+ if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
+ netdev)) {
+ *data = 1;
+ return -1;
+ }
+ } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
+ netdev->name, netdev)) {
+ shared_int = false;
+ } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
+ netdev->name, netdev)) {
+ *data = 1;
+ return -1;
+ }
+ e_info(hw, "testing %s interrupt\n", shared_int ?
+ "shared" : "unshared");
+
+ /* Disable all the interrupts */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ usleep_range(10000, 20000);
+
+ /* Test each interrupt */
+ for (; i < 10; i++) {
+ /* Interrupt to test */
+ mask = 1 << i;
+
+ if (!shared_int) {
+ /*
+ * Disable the interrupts to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
+ ~mask & 0x00007FFF);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
+ ~mask & 0x00007FFF);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ usleep_range(10000, 20000);
+
+ if (adapter->test_icr & mask) {
+ *data = 3;
+ break;
+ }
+ }
+
+ /*
+ * Enable the interrupt to be reported in the cause
+ * register and then force the same interrupt and see
+ * if one gets posted. If an interrupt was not posted
+ * to the bus, the test failed.
+ */
+ adapter->test_icr = 0;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ usleep_range(10000, 20000);
+
+ if (!(adapter->test_icr &mask)) {
+ *data = 4;
+ break;
+ }
+
+ if (!shared_int) {
+ /*
+ * Disable the other interrupts to be reported in
+ * the cause register and then force the other
+ * interrupts and see if any get posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
+ ~mask & 0x00007FFF);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
+ ~mask & 0x00007FFF);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ usleep_range(10000, 20000);
+
+ if (adapter->test_icr) {
+ *data = 5;
+ break;
+ }
+ }
+ }
+
+ /* Disable all the interrupts */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ usleep_range(10000, 20000);
+
+ /* Unhook test interrupt handler */
+ free_irq(irq, netdev);
+
+ return *data;
+}
+
+static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
+ struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg_ctl;
+
+ /* shut down the DMA engines now so they can be reinitialized later */
+
+ /* first Rx */
+ reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ reg_ctl &= ~IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
+ ixgbe_disable_rx_queue(adapter, rx_ring);
+
+ /* now Tx */
+ reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
+ reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ reg_ctl &= ~IXGBE_DMATXCTL_TE;
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
+ break;
+ default:
+ break;
+ }
+
+ ixgbe_reset(adapter);
+
+ ixgbe_free_tx_resources(&adapter->test_tx_ring);
+ ixgbe_free_rx_resources(&adapter->test_rx_ring);
+}
+
+static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
+ struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
+ u32 rctl, reg_data;
+ int ret_val;
+ int err;
+
+ /* Setup Tx descriptor ring and Tx buffers */
+ tx_ring->count = IXGBE_DEFAULT_TXD;
+ tx_ring->queue_index = 0;
+ tx_ring->dev = &adapter->pdev->dev;
+ tx_ring->netdev = adapter->netdev;
+ tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
+ tx_ring->numa_node = adapter->node;
+
+ err = ixgbe_setup_tx_resources(tx_ring);
+ if (err)
+ return 1;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
+ reg_data |= IXGBE_DMATXCTL_TE;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
+ break;
+ default:
+ break;
+ }
+
+ ixgbe_configure_tx_ring(adapter, tx_ring);
+
+ /* Setup Rx Descriptor ring and Rx buffers */
+ rx_ring->count = IXGBE_DEFAULT_RXD;
+ rx_ring->queue_index = 0;
+ rx_ring->dev = &adapter->pdev->dev;
+ rx_ring->netdev = adapter->netdev;
+ rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
+ rx_ring->rx_buf_len = IXGBE_RXBUFFER_2K;
+ rx_ring->numa_node = adapter->node;
+
+ err = ixgbe_setup_rx_resources(rx_ring);
+ if (err) {
+ ret_val = 4;
+ goto err_nomem;
+ }
+
+ rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
+
+ ixgbe_configure_rx_ring(adapter, rx_ring);
+
+ rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
+
+ return 0;
+
+err_nomem:
+ ixgbe_free_desc_rings(adapter);
+ return ret_val;
+}
+
+static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg_data;
+
+ /* X540 needs to set the MACC.FLU bit to force link up */
+ if (adapter->hw.mac.type == ixgbe_mac_X540) {
+ reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
+ reg_data |= IXGBE_MACC_FLU;
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
+ }
+
+ /* right now we only support MAC loopback in the driver */
+ reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ /* Setup MAC loopback */
+ reg_data |= IXGBE_HLREG0_LPBK;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
+
+ reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
+
+ reg_data = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ reg_data &= ~IXGBE_AUTOC_LMS_MASK;
+ reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
+ IXGBE_WRITE_FLUSH(hw);
+ usleep_range(10000, 20000);
+
+ /* Disable Atlas Tx lanes; re-enabled in reset path */
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ u8 atlas;
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
+ atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
+ atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
+ atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
+
+ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
+ atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
+ hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
+ }
+
+ return 0;
+}
+
+static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
+{
+ u32 reg_data;
+
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
+ reg_data &= ~IXGBE_HLREG0_LPBK;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
+}
+
+static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ memset(skb->data, 0xFF, frame_size);
+ frame_size &= ~1;
+ memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+ memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+ memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ frame_size &= ~1;
+ if (*(skb->data + 3) == 0xFF) {
+ if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+ (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+ return 0;
+ }
+ }
+ return 13;
+}
+
+static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
+ struct ixgbe_ring *tx_ring,
+ unsigned int size)
+{
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbe_rx_buffer *rx_buffer_info;
+ struct ixgbe_tx_buffer *tx_buffer_info;
+ const int bufsz = rx_ring->rx_buf_len;
+ u32 staterr;
+ u16 rx_ntc, tx_ntc, count = 0;
+
+ /* initialize next to clean and descriptor values */
+ rx_ntc = rx_ring->next_to_clean;
+ tx_ntc = tx_ring->next_to_clean;
+ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ while (staterr & IXGBE_RXD_STAT_DD) {
+ /* check Rx buffer */
+ rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
+
+ /* unmap Rx buffer, will be remapped by alloc_rx_buffers */
+ dma_unmap_single(rx_ring->dev,
+ rx_buffer_info->dma,
+ bufsz,
+ DMA_FROM_DEVICE);
+ rx_buffer_info->dma = 0;
+
+ /* verify contents of skb */
+ if (!ixgbe_check_lbtest_frame(rx_buffer_info->skb, size))
+ count++;
+
+ /* unmap buffer on Tx side */
+ tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
+ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+
+ /* increment Rx/Tx next to clean counters */
+ rx_ntc++;
+ if (rx_ntc == rx_ring->count)
+ rx_ntc = 0;
+ tx_ntc++;
+ if (tx_ntc == tx_ring->count)
+ tx_ntc = 0;
+
+ /* fetch next descriptor */
+ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+
+ /* re-map buffers to ring, store next to clean values */
+ ixgbe_alloc_rx_buffers(rx_ring, count);
+ rx_ring->next_to_clean = rx_ntc;
+ tx_ring->next_to_clean = tx_ntc;
+
+ return count;
+}
+
+static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
+ struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
+ int i, j, lc, good_cnt, ret_val = 0;
+ unsigned int size = 1024;
+ netdev_tx_t tx_ret_val;
+ struct sk_buff *skb;
+
+ /* allocate test skb */
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return 11;
+
+ /* place data into test skb */
+ ixgbe_create_lbtest_frame(skb, size);
+ skb_put(skb, size);
+
+ /*
+ * Calculate the loop count based on the largest descriptor ring
+ * The idea is to wrap the largest ring a number of times using 64
+ * send/receive pairs during each loop
+ */
+
+ if (rx_ring->count <= tx_ring->count)
+ lc = ((tx_ring->count / 64) * 2) + 1;
+ else
+ lc = ((rx_ring->count / 64) * 2) + 1;
+
+ for (j = 0; j <= lc; j++) {
+ /* reset count of good packets */
+ good_cnt = 0;
+
+ /* place 64 packets on the transmit queue*/
+ for (i = 0; i < 64; i++) {
+ skb_get(skb);
+ tx_ret_val = ixgbe_xmit_frame_ring(skb,
+ adapter,
+ tx_ring);
+ if (tx_ret_val == NETDEV_TX_OK)
+ good_cnt++;
+ }
+
+ if (good_cnt != 64) {
+ ret_val = 12;
+ break;
+ }
+
+ /* allow 200 milliseconds for packets to go from Tx to Rx */
+ msleep(200);
+
+ good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
+ if (good_cnt != 64) {
+ ret_val = 13;
+ break;
+ }
+ }
+
+ /* free the original skb */
+ kfree_skb(skb);
+
+ return ret_val;
+}
+
+static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+ *data = ixgbe_setup_desc_rings(adapter);
+ if (*data)
+ goto out;
+ *data = ixgbe_setup_loopback_test(adapter);
+ if (*data)
+ goto err_loopback;
+ *data = ixgbe_run_loopback_test(adapter);
+ ixgbe_loopback_cleanup(adapter);
+
+err_loopback:
+ ixgbe_free_desc_rings(adapter);
+out:
+ return *data;
+}
+
+static void ixgbe_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ bool if_running = netif_running(netdev);
+
+ set_bit(__IXGBE_TESTING, &adapter->state);
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ e_info(hw, "offline testing starting\n");
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result */
+ if (ixgbe_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ int i;
+ for (i = 0; i < adapter->num_vfs; i++) {
+ if (adapter->vfinfo[i].clear_to_send) {
+ netdev_warn(netdev, "%s",
+ "offline diagnostic is not "
+ "supported when VFs are "
+ "present\n");
+ data[0] = 1;
+ data[1] = 1;
+ data[2] = 1;
+ data[3] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ clear_bit(__IXGBE_TESTING,
+ &adapter->state);
+ goto skip_ol_tests;
+ }
+ }
+ }
+
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+ else
+ ixgbe_reset(adapter);
+
+ e_info(hw, "register testing starting\n");
+ if (ixgbe_reg_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbe_reset(adapter);
+ e_info(hw, "eeprom testing starting\n");
+ if (ixgbe_eeprom_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbe_reset(adapter);
+ e_info(hw, "interrupt testing starting\n");
+ if (ixgbe_intr_test(adapter, &data[2]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* If SRIOV or VMDq is enabled then skip MAC
+ * loopback diagnostic. */
+ if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
+ IXGBE_FLAG_VMDQ_ENABLED)) {
+ e_info(hw, "Skip MAC loopback diagnostic in VT "
+ "mode\n");
+ data[3] = 0;
+ goto skip_loopback;
+ }
+
+ ixgbe_reset(adapter);
+ e_info(hw, "loopback testing starting\n");
+ if (ixgbe_loopback_test(adapter, &data[3]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+skip_loopback:
+ ixgbe_reset(adapter);
+
+ clear_bit(__IXGBE_TESTING, &adapter->state);
+ if (if_running)
+ dev_open(netdev);
+ } else {
+ e_info(hw, "online testing starting\n");
+ /* Online tests */
+ if (ixgbe_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* Online tests aren't run; pass by default */
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 0;
+ data[3] = 0;
+
+ clear_bit(__IXGBE_TESTING, &adapter->state);
+ }
+skip_ol_tests:
+ msleep_interruptible(4 * 1000);
+}
+
+static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
+ struct ethtool_wolinfo *wol)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int retval = 1;
+ u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
+
+ /* WOL not supported except for the following */
+ switch(hw->device_id) {
+ case IXGBE_DEV_ID_82599_SFP:
+ /* Only this subdevice supports WOL */
+ if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) {
+ wol->supported = 0;
+ break;
+ }
+ retval = 0;
+ break;
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ /* All except this subdevice support WOL */
+ if (hw->subsystem_device_id ==
+ IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
+ wol->supported = 0;
+ break;
+ }
+ retval = 0;
+ break;
+ case IXGBE_DEV_ID_82599_KX4:
+ retval = 0;
+ break;
+ case IXGBE_DEV_ID_X540T:
+ /* check eeprom to see if enabled wol */
+ if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
+ ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
+ (hw->bus.func == 0))) {
+ retval = 0;
+ break;
+ }
+
+ /* All others not supported */
+ wol->supported = 0;
+ break;
+ default:
+ wol->supported = 0;
+ }
+
+ return retval;
+}
+
+static void ixgbe_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = WAKE_UCAST | WAKE_MCAST |
+ WAKE_BCAST | WAKE_MAGIC;
+ wol->wolopts = 0;
+
+ if (ixgbe_wol_exclusion(adapter, wol) ||
+ !device_can_wakeup(&adapter->pdev->dev))
+ return;
+
+ if (adapter->wol & IXGBE_WUFC_EX)
+ wol->wolopts |= WAKE_UCAST;
+ if (adapter->wol & IXGBE_WUFC_MC)
+ wol->wolopts |= WAKE_MCAST;
+ if (adapter->wol & IXGBE_WUFC_BC)
+ wol->wolopts |= WAKE_BCAST;
+ if (adapter->wol & IXGBE_WUFC_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+ return -EOPNOTSUPP;
+
+ if (ixgbe_wol_exclusion(adapter, wol))
+ return wol->wolopts ? -EOPNOTSUPP : 0;
+
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wol |= IXGBE_WUFC_EX;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wol |= IXGBE_WUFC_MC;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wol |= IXGBE_WUFC_BC;
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wol |= IXGBE_WUFC_MAG;
+
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
+static int ixgbe_nway_reset(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+
+ return 0;
+}
+
+static int ixgbe_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ return 2;
+
+ case ETHTOOL_ID_ON:
+ hw->mac.ops.led_on(hw, IXGBE_LED_ON);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ hw->mac.ops.led_off(hw, IXGBE_LED_ON);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ /* Restore LED settings */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
+ break;
+ }
+
+ return 0;
+}
+
+static int ixgbe_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
+
+ /* only valid if in constant ITR mode */
+ if (adapter->rx_itr_setting <= 1)
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting;
+ else
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
+
+ /* if in mixed tx/rx queues per vector mode, report only rx settings */
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
+ return 0;
+
+ /* only valid if in constant ITR mode */
+ if (adapter->tx_itr_setting <= 1)
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting;
+ else
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
+
+ return 0;
+}
+
+/*
+ * this function must be called before setting the new value of
+ * rx_itr_setting
+ */
+static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter,
+ struct ethtool_coalesce *ec)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
+ return false;
+
+ /* if interrupt rate is too high then disable RSC */
+ if (ec->rx_coalesce_usecs != 1 &&
+ ec->rx_coalesce_usecs <= (IXGBE_MIN_RSC_ITR >> 2)) {
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+ e_info(probe, "rx-usecs set too low, disabling RSC\n");
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+ return true;
+ }
+ } else {
+ /* check the feature flag value and enable RSC if necessary */
+ if ((netdev->features & NETIF_F_LRO) &&
+ !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
+ e_info(probe, "rx-usecs set to %d, re-enabling RSC\n",
+ ec->rx_coalesce_usecs);
+ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+ return true;
+ }
+ }
+ return false;
+}
+
+static int ixgbe_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_q_vector *q_vector;
+ int i;
+ int num_vectors;
+ u16 tx_itr_param, rx_itr_param;
+ bool need_reset = false;
+
+ /* don't accept tx specific changes if we've got mixed RxTx vectors */
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
+ && ec->tx_coalesce_usecs)
+ return -EINVAL;
+
+ if (ec->tx_max_coalesced_frames_irq)
+ adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
+
+ if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
+ (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
+ return -EINVAL;
+
+ /* check the old value and enable RSC if necessary */
+ need_reset = ixgbe_update_rsc(adapter, ec);
+
+ if (ec->rx_coalesce_usecs > 1)
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
+ else
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs;
+
+ if (adapter->rx_itr_setting == 1)
+ rx_itr_param = IXGBE_20K_ITR;
+ else
+ rx_itr_param = adapter->rx_itr_setting;
+
+ if (ec->tx_coalesce_usecs > 1)
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
+ else
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs;
+
+ if (adapter->tx_itr_setting == 1)
+ tx_itr_param = IXGBE_10K_ITR;
+ else
+ tx_itr_param = adapter->tx_itr_setting;
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ else
+ num_vectors = 1;
+
+ for (i = 0; i < num_vectors; i++) {
+ q_vector = adapter->q_vector[i];
+ q_vector->tx.work_limit = adapter->tx_work_limit;
+ if (q_vector->tx.count && !q_vector->rx.count)
+ /* tx only */
+ q_vector->itr = tx_itr_param;
+ else
+ /* rx only or mixed */
+ q_vector->itr = rx_itr_param;
+ ixgbe_write_eitr(q_vector);
+ }
+
+ /*
+ * do reset here at the end to make sure EITR==0 case is handled
+ * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
+ * also locks in RSC enable/disable which requires reset
+ */
+ if (need_reset)
+ ixgbe_do_reset(netdev);
+
+ return 0;
+}
+
+static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ union ixgbe_atr_input *mask = &adapter->fdir_mask;
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct hlist_node *node, *node2;
+ struct ixgbe_fdir_filter *rule = NULL;
+
+ /* report total rule count */
+ cmd->data = (1024 << adapter->fdir_pballoc) - 2;
+
+ hlist_for_each_entry_safe(rule, node, node2,
+ &adapter->fdir_filter_list, fdir_node) {
+ if (fsp->location <= rule->sw_idx)
+ break;
+ }
+
+ if (!rule || fsp->location != rule->sw_idx)
+ return -EINVAL;
+
+ /* fill out the flow spec entry */
+
+ /* set flow type field */
+ switch (rule->filter.formatted.flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ fsp->flow_type = TCP_V4_FLOW;
+ break;
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
+ fsp->flow_type = UDP_V4_FLOW;
+ break;
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ fsp->flow_type = SCTP_V4_FLOW;
+ break;
+ case IXGBE_ATR_FLOW_TYPE_IPV4:
+ fsp->flow_type = IP_USER_FLOW;
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fsp->h_u.usr_ip4_spec.proto = 0;
+ fsp->m_u.usr_ip4_spec.proto = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
+ fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
+ fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
+ fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
+ fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
+ fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
+ fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
+ fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
+ fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
+ fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
+ fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
+ fsp->flow_type |= FLOW_EXT;
+
+ /* record action */
+ if (rule->action == IXGBE_FDIR_DROP_QUEUE)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fsp->ring_cookie = rule->action;
+
+ return 0;
+}
+
+static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct hlist_node *node, *node2;
+ struct ixgbe_fdir_filter *rule;
+ int cnt = 0;
+
+ /* report total rule count */
+ cmd->data = (1024 << adapter->fdir_pballoc) - 2;
+
+ hlist_for_each_entry_safe(rule, node, node2,
+ &adapter->fdir_filter_list, fdir_node) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[cnt] = rule->sw_idx;
+ cnt++;
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = adapter->fdir_filter_count;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
+ struct ixgbe_fdir_filter *input,
+ u16 sw_idx)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct hlist_node *node, *node2, *parent;
+ struct ixgbe_fdir_filter *rule;
+ int err = -EINVAL;
+
+ parent = NULL;
+ rule = NULL;
+
+ hlist_for_each_entry_safe(rule, node, node2,
+ &adapter->fdir_filter_list, fdir_node) {
+ /* hash found, or no matching entry */
+ if (rule->sw_idx >= sw_idx)
+ break;
+ parent = node;
+ }
+
+ /* if there is an old rule occupying our place remove it */
+ if (rule && (rule->sw_idx == sw_idx)) {
+ if (!input || (rule->filter.formatted.bkt_hash !=
+ input->filter.formatted.bkt_hash)) {
+ err = ixgbe_fdir_erase_perfect_filter_82599(hw,
+ &rule->filter,
+ sw_idx);
+ }
+
+ hlist_del(&rule->fdir_node);
+ kfree(rule);
+ adapter->fdir_filter_count--;
+ }
+
+ /*
+ * If no input this was a delete, err should be 0 if a rule was
+ * successfully found and removed from the list else -EINVAL
+ */
+ if (!input)
+ return err;
+
+ /* initialize node and set software index */
+ INIT_HLIST_NODE(&input->fdir_node);
+
+ /* add filter to the list */
+ if (parent)
+ hlist_add_after(parent, &input->fdir_node);
+ else
+ hlist_add_head(&input->fdir_node,
+ &adapter->fdir_filter_list);
+
+ /* update counts */
+ adapter->fdir_filter_count++;
+
+ return 0;
+}
+
+static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
+ u8 *flow_type)
+{
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
+ break;
+ case UDP_V4_FLOW:
+ *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
+ break;
+ case SCTP_V4_FLOW:
+ *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
+ break;
+ case IP_USER_FLOW:
+ switch (fsp->h_u.usr_ip4_spec.proto) {
+ case IPPROTO_TCP:
+ *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
+ break;
+ case IPPROTO_UDP:
+ *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
+ break;
+ case IPPROTO_SCTP:
+ *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
+ break;
+ case 0:
+ if (!fsp->m_u.usr_ip4_spec.proto) {
+ *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
+ break;
+ }
+ default:
+ return 0;
+ }
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_fdir_filter *input;
+ union ixgbe_atr_input mask;
+ int err;
+
+ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+ return -EOPNOTSUPP;
+
+ /*
+ * Don't allow programming if the action is a queue greater than
+ * the number of online Rx queues.
+ */
+ if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
+ (fsp->ring_cookie >= adapter->num_rx_queues))
+ return -EINVAL;
+
+ /* Don't allow indexes to exist outside of available space */
+ if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
+ e_err(drv, "Location out of range\n");
+ return -EINVAL;
+ }
+
+ input = kzalloc(sizeof(*input), GFP_ATOMIC);
+ if (!input)
+ return -ENOMEM;
+
+ memset(&mask, 0, sizeof(union ixgbe_atr_input));
+
+ /* set SW index */
+ input->sw_idx = fsp->location;
+
+ /* record flow type */
+ if (!ixgbe_flowspec_to_flow_type(fsp,
+ &input->filter.formatted.flow_type)) {
+ e_err(drv, "Unrecognized flow type\n");
+ goto err_out;
+ }
+
+ mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+ IXGBE_ATR_L4TYPE_MASK;
+
+ if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
+ mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
+
+ /* Copy input into formatted structures */
+ input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
+ mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
+ input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+ mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
+ input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
+ mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
+ input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+ mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
+
+ if (fsp->flow_type & FLOW_EXT) {
+ input->filter.formatted.vm_pool =
+ (unsigned char)ntohl(fsp->h_ext.data[1]);
+ mask.formatted.vm_pool =
+ (unsigned char)ntohl(fsp->m_ext.data[1]);
+ input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
+ mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
+ input->filter.formatted.flex_bytes =
+ fsp->h_ext.vlan_etype;
+ mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
+ }
+
+ /* determine if we need to drop or route the packet */
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
+ input->action = IXGBE_FDIR_DROP_QUEUE;
+ else
+ input->action = fsp->ring_cookie;
+
+ spin_lock(&adapter->fdir_perfect_lock);
+
+ if (hlist_empty(&adapter->fdir_filter_list)) {
+ /* save mask and program input mask into HW */
+ memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
+ err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
+ if (err) {
+ e_err(drv, "Error writing mask\n");
+ goto err_out_w_lock;
+ }
+ } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
+ e_err(drv, "Only one mask supported per port\n");
+ goto err_out_w_lock;
+ }
+
+ /* apply mask and compute/store hash */
+ ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
+
+ /* program filters to filter memory */
+ err = ixgbe_fdir_write_perfect_filter_82599(hw,
+ &input->filter, input->sw_idx,
+ (input->action == IXGBE_FDIR_DROP_QUEUE) ?
+ IXGBE_FDIR_DROP_QUEUE :
+ adapter->rx_ring[input->action]->reg_idx);
+ if (err)
+ goto err_out_w_lock;
+
+ ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
+
+ spin_unlock(&adapter->fdir_perfect_lock);
+
+ return err;
+err_out_w_lock:
+ spin_unlock(&adapter->fdir_perfect_lock);
+err_out:
+ kfree(input);
+ return -EINVAL;
+}
+
+static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ int err;
+
+ spin_lock(&adapter->fdir_perfect_lock);
+ err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
+ spin_unlock(&adapter->fdir_perfect_lock);
+
+ return err;
+}
+
+static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static const struct ethtool_ops ixgbe_ethtool_ops = {
+ .get_settings = ixgbe_get_settings,
+ .set_settings = ixgbe_set_settings,
+ .get_drvinfo = ixgbe_get_drvinfo,
+ .get_regs_len = ixgbe_get_regs_len,
+ .get_regs = ixgbe_get_regs,
+ .get_wol = ixgbe_get_wol,
+ .set_wol = ixgbe_set_wol,
+ .nway_reset = ixgbe_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = ixgbe_get_eeprom_len,
+ .get_eeprom = ixgbe_get_eeprom,
+ .get_ringparam = ixgbe_get_ringparam,
+ .set_ringparam = ixgbe_set_ringparam,
+ .get_pauseparam = ixgbe_get_pauseparam,
+ .set_pauseparam = ixgbe_set_pauseparam,
+ .get_msglevel = ixgbe_get_msglevel,
+ .set_msglevel = ixgbe_set_msglevel,
+ .self_test = ixgbe_diag_test,
+ .get_strings = ixgbe_get_strings,
+ .set_phys_id = ixgbe_set_phys_id,
+ .get_sset_count = ixgbe_get_sset_count,
+ .get_ethtool_stats = ixgbe_get_ethtool_stats,
+ .get_coalesce = ixgbe_get_coalesce,
+ .set_coalesce = ixgbe_set_coalesce,
+ .get_rxnfc = ixgbe_get_rxnfc,
+ .set_rxnfc = ixgbe_set_rxnfc,
+};
+
+void ixgbe_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
new file mode 100644
index 000000000000..323f4529992d
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -0,0 +1,835 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgbe.h"
+#include <linux/if_ether.h>
+#include <linux/gfp.h>
+#include <linux/if_vlan.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/fc/fc_fs.h>
+#include <scsi/fc/fc_fcoe.h>
+#include <scsi/libfc.h>
+#include <scsi/libfcoe.h>
+
+/**
+ * ixgbe_fcoe_clear_ddp - clear the given ddp context
+ * @ddp - ptr to the ixgbe_fcoe_ddp
+ *
+ * Returns : none
+ *
+ */
+static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
+{
+ ddp->len = 0;
+ ddp->err = 1;
+ ddp->udl = NULL;
+ ddp->udp = 0UL;
+ ddp->sgl = NULL;
+ ddp->sgc = 0;
+}
+
+/**
+ * ixgbe_fcoe_ddp_put - free the ddp context for a given xid
+ * @netdev: the corresponding net_device
+ * @xid: the xid that corresponding ddp will be freed
+ *
+ * This is the implementation of net_device_ops.ndo_fcoe_ddp_done
+ * and it is expected to be called by ULD, i.e., FCP layer of libfc
+ * to release the corresponding ddp context when the I/O is done.
+ *
+ * Returns : data length already ddp-ed in bytes
+ */
+int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
+{
+ int len = 0;
+ struct ixgbe_fcoe *fcoe;
+ struct ixgbe_adapter *adapter;
+ struct ixgbe_fcoe_ddp *ddp;
+ u32 fcbuff;
+
+ if (!netdev)
+ goto out_ddp_put;
+
+ if (xid >= IXGBE_FCOE_DDP_MAX)
+ goto out_ddp_put;
+
+ adapter = netdev_priv(netdev);
+ fcoe = &adapter->fcoe;
+ ddp = &fcoe->ddp[xid];
+ if (!ddp->udl)
+ goto out_ddp_put;
+
+ len = ddp->len;
+ /* if there an error, force to invalidate ddp context */
+ if (ddp->err) {
+ spin_lock_bh(&fcoe->lock);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW,
+ (xid | IXGBE_FCFLTRW_WE));
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
+ (xid | IXGBE_FCDMARW_WE));
+
+ /* guaranteed to be invalidated after 100us */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
+ (xid | IXGBE_FCDMARW_RE));
+ fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF);
+ spin_unlock_bh(&fcoe->lock);
+ if (fcbuff & IXGBE_FCBUFF_VALID)
+ udelay(100);
+ }
+ if (ddp->sgl)
+ pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
+ DMA_FROM_DEVICE);
+ if (ddp->pool) {
+ pci_pool_free(ddp->pool, ddp->udl, ddp->udp);
+ ddp->pool = NULL;
+ }
+
+ ixgbe_fcoe_clear_ddp(ddp);
+
+out_ddp_put:
+ return len;
+}
+
+/**
+ * ixgbe_fcoe_ddp_setup - called to set up ddp context
+ * @netdev: the corresponding net_device
+ * @xid: the exchange id requesting ddp
+ * @sgl: the scatter-gather list for this request
+ * @sgc: the number of scatter-gather items
+ *
+ * Returns : 1 for success and 0 for no ddp
+ */
+static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc,
+ int target_mode)
+{
+ struct ixgbe_adapter *adapter;
+ struct ixgbe_hw *hw;
+ struct ixgbe_fcoe *fcoe;
+ struct ixgbe_fcoe_ddp *ddp;
+ struct scatterlist *sg;
+ unsigned int i, j, dmacount;
+ unsigned int len;
+ static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
+ unsigned int firstoff = 0;
+ unsigned int lastsize;
+ unsigned int thisoff = 0;
+ unsigned int thislen = 0;
+ u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
+ dma_addr_t addr = 0;
+ struct pci_pool *pool;
+
+ if (!netdev || !sgl)
+ return 0;
+
+ adapter = netdev_priv(netdev);
+ if (xid >= IXGBE_FCOE_DDP_MAX) {
+ e_warn(drv, "xid=0x%x out-of-range\n", xid);
+ return 0;
+ }
+
+ /* no DDP if we are already down or resetting */
+ if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_RESETTING, &adapter->state))
+ return 0;
+
+ fcoe = &adapter->fcoe;
+ if (!fcoe->pool) {
+ e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
+ return 0;
+ }
+
+ ddp = &fcoe->ddp[xid];
+ if (ddp->sgl) {
+ e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
+ xid, ddp->sgl, ddp->sgc);
+ return 0;
+ }
+ ixgbe_fcoe_clear_ddp(ddp);
+
+ /* setup dma from scsi command sgl */
+ dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
+ if (dmacount == 0) {
+ e_err(drv, "xid 0x%x DMA map error\n", xid);
+ return 0;
+ }
+
+ /* alloc the udl from per cpu ddp pool */
+ pool = *per_cpu_ptr(fcoe->pool, get_cpu());
+ ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp);
+ if (!ddp->udl) {
+ e_err(drv, "failed allocated ddp context\n");
+ goto out_noddp_unmap;
+ }
+ ddp->pool = pool;
+ ddp->sgl = sgl;
+ ddp->sgc = sgc;
+
+ j = 0;
+ for_each_sg(sgl, sg, dmacount, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ while (len) {
+ /* max number of buffers allowed in one DDP context */
+ if (j >= IXGBE_BUFFCNT_MAX) {
+ e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
+ "not enough descriptors\n",
+ xid, i, j, dmacount, (u64)addr);
+ goto out_noddp_free;
+ }
+
+ /* get the offset of length of current buffer */
+ thisoff = addr & ((dma_addr_t)bufflen - 1);
+ thislen = min((bufflen - thisoff), len);
+ /*
+ * all but the 1st buffer (j == 0)
+ * must be aligned on bufflen
+ */
+ if ((j != 0) && (thisoff))
+ goto out_noddp_free;
+ /*
+ * all but the last buffer
+ * ((i == (dmacount - 1)) && (thislen == len))
+ * must end at bufflen
+ */
+ if (((i != (dmacount - 1)) || (thislen != len))
+ && ((thislen + thisoff) != bufflen))
+ goto out_noddp_free;
+
+ ddp->udl[j] = (u64)(addr - thisoff);
+ /* only the first buffer may have none-zero offset */
+ if (j == 0)
+ firstoff = thisoff;
+ len -= thislen;
+ addr += thislen;
+ j++;
+ }
+ }
+ /* only the last buffer may have non-full bufflen */
+ lastsize = thisoff + thislen;
+
+ /*
+ * lastsize can not be buffer len.
+ * If it is then adding another buffer with lastsize = 1.
+ */
+ if (lastsize == bufflen) {
+ if (j >= IXGBE_BUFFCNT_MAX) {
+ printk_once("Will NOT use DDP since there are not "
+ "enough user buffers. We need an extra "
+ "buffer because lastsize is bufflen. "
+ "xid=%x:%d,%d,%d:addr=%llx\n",
+ xid, i, j, dmacount, (u64)addr);
+
+ goto out_noddp_free;
+ }
+
+ ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
+ j++;
+ lastsize = 1;
+ }
+ put_cpu();
+
+ fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
+ fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
+ fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
+ /* Set WRCONTX bit to allow DDP for target */
+ if (target_mode)
+ fcbuff |= (IXGBE_FCBUFF_WRCONTX);
+ fcbuff |= (IXGBE_FCBUFF_VALID);
+
+ fcdmarw = xid;
+ fcdmarw |= IXGBE_FCDMARW_WE;
+ fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT);
+
+ fcfltrw = xid;
+ fcfltrw |= IXGBE_FCFLTRW_WE;
+
+ /* program DMA context */
+ hw = &adapter->hw;
+ spin_lock_bh(&fcoe->lock);
+
+ /* turn on last frame indication for target mode as FCP_RSPtarget is
+ * supposed to send FCP_RSP when it is done. */
+ if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) {
+ set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode);
+ fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL);
+ fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
+ IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
+ IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
+ IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw);
+ /* program filter context */
+ IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
+ IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
+
+ spin_unlock_bh(&fcoe->lock);
+
+ return 1;
+
+out_noddp_free:
+ pci_pool_free(pool, ddp->udl, ddp->udp);
+ ixgbe_fcoe_clear_ddp(ddp);
+
+out_noddp_unmap:
+ pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
+ put_cpu();
+ return 0;
+}
+
+/**
+ * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
+ * @netdev: the corresponding net_device
+ * @xid: the exchange id requesting ddp
+ * @sgl: the scatter-gather list for this request
+ * @sgc: the number of scatter-gather items
+ *
+ * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
+ * and is expected to be called from ULD, e.g., FCP layer of libfc
+ * to set up ddp for the corresponding xid of the given sglist for
+ * the corresponding I/O.
+ *
+ * Returns : 1 for success and 0 for no ddp
+ */
+int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc)
+{
+ return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
+}
+
+/**
+ * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
+ * @netdev: the corresponding net_device
+ * @xid: the exchange id requesting ddp
+ * @sgl: the scatter-gather list for this request
+ * @sgc: the number of scatter-gather items
+ *
+ * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
+ * and is expected to be called from ULD, e.g., FCP layer of libfc
+ * to set up ddp for the corresponding xid of the given sglist for
+ * the corresponding I/O. The DDP in target mode is a write I/O request
+ * from the initiator.
+ *
+ * Returns : 1 for success and 0 for no ddp
+ */
+int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc)
+{
+ return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
+}
+
+/**
+ * ixgbe_fcoe_ddp - check ddp status and mark it done
+ * @adapter: ixgbe adapter
+ * @rx_desc: advanced rx descriptor
+ * @skb: the skb holding the received data
+ *
+ * This checks ddp status.
+ *
+ * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates
+ * not passing the skb to ULD, > 0 indicates is the length of data
+ * being ddped.
+ */
+int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb,
+ u32 staterr)
+{
+ u16 xid;
+ u32 fctl;
+ u32 fceofe, fcerr, fcstat;
+ int rc = -EINVAL;
+ struct ixgbe_fcoe *fcoe;
+ struct ixgbe_fcoe_ddp *ddp;
+ struct fc_frame_header *fh;
+ struct fcoe_crc_eof *crc;
+
+ fcerr = (staterr & IXGBE_RXDADV_ERR_FCERR);
+ fceofe = (staterr & IXGBE_RXDADV_ERR_FCEOFE);
+ if (fcerr == IXGBE_FCERR_BADCRC)
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
+ fh = (struct fc_frame_header *)(skb->data +
+ sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr));
+ else
+ fh = (struct fc_frame_header *)(skb->data +
+ sizeof(struct fcoe_hdr));
+ fctl = ntoh24(fh->fh_f_ctl);
+ if (fctl & FC_FC_EX_CTX)
+ xid = be16_to_cpu(fh->fh_ox_id);
+ else
+ xid = be16_to_cpu(fh->fh_rx_id);
+
+ if (xid >= IXGBE_FCOE_DDP_MAX)
+ goto ddp_out;
+
+ fcoe = &adapter->fcoe;
+ ddp = &fcoe->ddp[xid];
+ if (!ddp->udl)
+ goto ddp_out;
+
+ if (fcerr | fceofe)
+ goto ddp_out;
+
+ fcstat = (staterr & IXGBE_RXDADV_STAT_FCSTAT);
+ if (fcstat) {
+ /* update length of DDPed data */
+ ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
+ /* unmap the sg list when FCP_RSP is received */
+ if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) {
+ pci_unmap_sg(adapter->pdev, ddp->sgl,
+ ddp->sgc, DMA_FROM_DEVICE);
+ ddp->err = (fcerr | fceofe);
+ ddp->sgl = NULL;
+ ddp->sgc = 0;
+ }
+ /* return 0 to bypass going to ULD for DDPed data */
+ if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP)
+ rc = 0;
+ else if (ddp->len)
+ rc = ddp->len;
+ }
+ /* In target mode, check the last data frame of the sequence.
+ * For DDP in target mode, data is already DDPed but the header
+ * indication of the last data frame ould allow is to tell if we
+ * got all the data and the ULP can send FCP_RSP back, as this is
+ * not a full fcoe frame, we fill the trailer here so it won't be
+ * dropped by the ULP stack.
+ */
+ if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
+ (fctl & FC_FC_END_SEQ)) {
+ crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
+ crc->fcoe_eof = FC_EOF_T;
+ }
+ddp_out:
+ return rc;
+}
+
+/**
+ * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
+ * @tx_ring: tx desc ring
+ * @skb: associated skb
+ * @tx_flags: tx flags
+ * @hdr_len: hdr_len to be returned
+ *
+ * This sets up large send offload for FCoE
+ *
+ * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error
+ */
+int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, u8 *hdr_len)
+{
+ struct fc_frame_header *fh;
+ u32 vlan_macip_lens;
+ u32 fcoe_sof_eof = 0;
+ u32 mss_l4len_idx;
+ u8 sof, eof;
+
+ if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
+ dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
+ skb_shinfo(skb)->gso_type);
+ return -EINVAL;
+ }
+
+ /* resets the header to point fcoe/fc */
+ skb_set_network_header(skb, skb->mac_len);
+ skb_set_transport_header(skb, skb->mac_len +
+ sizeof(struct fcoe_hdr));
+
+ /* sets up SOF and ORIS */
+ sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
+ switch (sof) {
+ case FC_SOF_I2:
+ fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS;
+ break;
+ case FC_SOF_I3:
+ fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF |
+ IXGBE_ADVTXD_FCOEF_ORIS;
+ break;
+ case FC_SOF_N2:
+ break;
+ case FC_SOF_N3:
+ fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF;
+ break;
+ default:
+ dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof);
+ return -EINVAL;
+ }
+
+ /* the first byte of the last dword is EOF */
+ skb_copy_bits(skb, skb->len - 4, &eof, 1);
+ /* sets up EOF and ORIE */
+ switch (eof) {
+ case FC_EOF_N:
+ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N;
+ break;
+ case FC_EOF_T:
+ /* lso needs ORIE */
+ if (skb_is_gso(skb))
+ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N |
+ IXGBE_ADVTXD_FCOEF_ORIE;
+ else
+ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T;
+ break;
+ case FC_EOF_NI:
+ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI;
+ break;
+ case FC_EOF_A:
+ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
+ break;
+ default:
+ dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof);
+ return -EINVAL;
+ }
+
+ /* sets up PARINC indicating data offset */
+ fh = (struct fc_frame_header *)skb_transport_header(skb);
+ if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
+ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC;
+
+ /* include trailer in headlen as it is replicated per frame */
+ *hdr_len = sizeof(struct fcoe_crc_eof);
+
+ /* hdr_len includes fc_hdr if FCoE LSO is enabled */
+ if (skb_is_gso(skb))
+ *hdr_len += (skb_transport_offset(skb) +
+ sizeof(struct fc_frame_header));
+
+ /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
+ mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
+ mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
+
+ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
+ vlan_macip_lens = skb_transport_offset(skb) +
+ sizeof(struct fc_frame_header);
+ vlan_macip_lens |= (skb_transport_offset(skb) - 4)
+ << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+
+ /* write context desc */
+ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
+ IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx);
+
+ return skb_is_gso(skb);
+}
+
+static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe)
+{
+ unsigned int cpu;
+ struct pci_pool **pool;
+
+ for_each_possible_cpu(cpu) {
+ pool = per_cpu_ptr(fcoe->pool, cpu);
+ if (*pool)
+ pci_pool_destroy(*pool);
+ }
+ free_percpu(fcoe->pool);
+ fcoe->pool = NULL;
+}
+
+static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ unsigned int cpu;
+ struct pci_pool **pool;
+ char pool_name[32];
+
+ fcoe->pool = alloc_percpu(struct pci_pool *);
+ if (!fcoe->pool)
+ return;
+
+ /* allocate pci pool for each cpu */
+ for_each_possible_cpu(cpu) {
+ snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
+ pool = per_cpu_ptr(fcoe->pool, cpu);
+ *pool = pci_pool_create(pool_name,
+ adapter->pdev, IXGBE_FCPTR_MAX,
+ IXGBE_FCPTR_ALIGN, PAGE_SIZE);
+ if (!*pool) {
+ e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
+ ixgbe_fcoe_ddp_pools_free(fcoe);
+ return;
+ }
+ }
+}
+
+/**
+ * ixgbe_configure_fcoe - configures registers for fcoe at start
+ * @adapter: ptr to ixgbe adapter
+ *
+ * This sets up FCoE related registers
+ *
+ * Returns : none
+ */
+void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
+{
+ int i, fcoe_q, fcoe_i;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+
+ if (!fcoe->pool) {
+ spin_lock_init(&fcoe->lock);
+
+ ixgbe_fcoe_ddp_pools_alloc(adapter);
+ if (!fcoe->pool) {
+ e_err(drv, "failed to alloc percpu fcoe DDP pools\n");
+ return;
+ }
+
+ /* Extra buffer to be shared by all DDPs for HW work around */
+ fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
+ if (fcoe->extra_ddp_buffer == NULL) {
+ e_err(drv, "failed to allocated extra DDP buffer\n");
+ goto out_ddp_pools;
+ }
+
+ fcoe->extra_ddp_buffer_dma =
+ dma_map_single(&adapter->pdev->dev,
+ fcoe->extra_ddp_buffer,
+ IXGBE_FCBUFF_MIN,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ fcoe->extra_ddp_buffer_dma)) {
+ e_err(drv, "failed to map extra DDP buffer\n");
+ goto out_extra_ddp_buffer;
+ }
+ }
+
+ /* Enable L2 eth type filter for FCoE */
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE),
+ (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN));
+ /* Enable L2 eth type filter for FIP */
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP),
+ (ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
+ if (adapter->ring_feature[RING_F_FCOE].indices) {
+ /* Use multiple rx queues for FCoE by redirection table */
+ for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
+ fcoe_i = f->mask + i % f->indices;
+ fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
+ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
+ } else {
+ /* Use single rx queue for FCoE */
+ fcoe_i = f->mask;
+ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
+ IXGBE_ETQS_QUEUE_EN |
+ (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
+ }
+ /* send FIP frames to the first FCoE queue */
+ fcoe_i = f->mask;
+ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
+ IXGBE_ETQS_QUEUE_EN |
+ (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, IXGBE_FCRXCTRL_FCCRCBO |
+ (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
+ return;
+
+out_extra_ddp_buffer:
+ kfree(fcoe->extra_ddp_buffer);
+out_ddp_pools:
+ ixgbe_fcoe_ddp_pools_free(fcoe);
+}
+
+/**
+ * ixgbe_cleanup_fcoe - release all fcoe ddp context resources
+ * @adapter : ixgbe adapter
+ *
+ * Cleans up outstanding ddp context resources
+ *
+ * Returns : none
+ */
+void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
+{
+ int i;
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+
+ if (!fcoe->pool)
+ return;
+
+ for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
+ ixgbe_fcoe_ddp_put(adapter->netdev, i);
+ dma_unmap_single(&adapter->pdev->dev,
+ fcoe->extra_ddp_buffer_dma,
+ IXGBE_FCBUFF_MIN,
+ DMA_FROM_DEVICE);
+ kfree(fcoe->extra_ddp_buffer);
+ ixgbe_fcoe_ddp_pools_free(fcoe);
+}
+
+/**
+ * ixgbe_fcoe_enable - turn on FCoE offload feature
+ * @netdev: the corresponding netdev
+ *
+ * Turns on FCoE offload feature in 82599.
+ *
+ * Returns : 0 indicates success or -EINVAL on failure
+ */
+int ixgbe_fcoe_enable(struct net_device *netdev)
+{
+ int rc = -EINVAL;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+
+
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
+ goto out_enable;
+
+ atomic_inc(&fcoe->refcnt);
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ goto out_enable;
+
+ e_info(drv, "Enabling FCoE offload features.\n");
+ if (netif_running(netdev))
+ netdev->netdev_ops->ndo_stop(netdev);
+
+ ixgbe_clear_interrupt_scheme(adapter);
+
+ adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
+ adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE;
+ netdev->features |= NETIF_F_FCOE_CRC;
+ netdev->features |= NETIF_F_FSO;
+ netdev->features |= NETIF_F_FCOE_MTU;
+ netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
+
+ ixgbe_init_interrupt_scheme(adapter);
+ netdev_features_change(netdev);
+
+ if (netif_running(netdev))
+ netdev->netdev_ops->ndo_open(netdev);
+ rc = 0;
+
+out_enable:
+ return rc;
+}
+
+/**
+ * ixgbe_fcoe_disable - turn off FCoE offload feature
+ * @netdev: the corresponding netdev
+ *
+ * Turns off FCoE offload feature in 82599.
+ *
+ * Returns : 0 indicates success or -EINVAL on failure
+ */
+int ixgbe_fcoe_disable(struct net_device *netdev)
+{
+ int rc = -EINVAL;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
+ goto out_disable;
+
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ goto out_disable;
+
+ if (!atomic_dec_and_test(&fcoe->refcnt))
+ goto out_disable;
+
+ e_info(drv, "Disabling FCoE offload features.\n");
+ netdev->features &= ~NETIF_F_FCOE_CRC;
+ netdev->features &= ~NETIF_F_FSO;
+ netdev->features &= ~NETIF_F_FCOE_MTU;
+ netdev->fcoe_ddp_xid = 0;
+ netdev_features_change(netdev);
+
+ if (netif_running(netdev))
+ netdev->netdev_ops->ndo_stop(netdev);
+
+ ixgbe_clear_interrupt_scheme(adapter);
+ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+ adapter->ring_feature[RING_F_FCOE].indices = 0;
+ ixgbe_cleanup_fcoe(adapter);
+ ixgbe_init_interrupt_scheme(adapter);
+
+ if (netif_running(netdev))
+ netdev->netdev_ops->ndo_open(netdev);
+ rc = 0;
+
+out_disable:
+ return rc;
+}
+
+/**
+ * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
+ * @netdev : ixgbe adapter
+ * @wwn : the world wide name
+ * @type: the type of world wide name
+ *
+ * Returns the node or port world wide name if both the prefix and the san
+ * mac address are valid, then the wwn is formed based on the NAA-2 for
+ * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
+ *
+ * Returns : 0 on success
+ */
+int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
+{
+ int rc = -EINVAL;
+ u16 prefix = 0xffff;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_mac_info *mac = &adapter->hw.mac;
+
+ switch (type) {
+ case NETDEV_FCOE_WWNN:
+ prefix = mac->wwnn_prefix;
+ break;
+ case NETDEV_FCOE_WWPN:
+ prefix = mac->wwpn_prefix;
+ break;
+ default:
+ break;
+ }
+
+ if ((prefix != 0xffff) &&
+ is_valid_ether_addr(mac->san_addr)) {
+ *wwn = ((u64) prefix << 48) |
+ ((u64) mac->san_addr[0] << 40) |
+ ((u64) mac->san_addr[1] << 32) |
+ ((u64) mac->san_addr[2] << 24) |
+ ((u64) mac->san_addr[3] << 16) |
+ ((u64) mac->san_addr[4] << 8) |
+ ((u64) mac->san_addr[5]);
+ rc = 0;
+ }
+ return rc;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
new file mode 100644
index 000000000000..99de145e290d
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -0,0 +1,81 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_FCOE_H
+#define _IXGBE_FCOE_H
+
+#include <scsi/fc/fc_fs.h>
+#include <scsi/fc/fc_fcoe.h>
+
+/* shift bits within STAT fo FCSTAT */
+#define IXGBE_RXDADV_FCSTAT_SHIFT 4
+
+/* ddp user buffer */
+#define IXGBE_BUFFCNT_MAX 256 /* 8 bits bufcnt */
+#define IXGBE_FCPTR_ALIGN 16
+#define IXGBE_FCPTR_MAX (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t))
+#define IXGBE_FCBUFF_4KB 0x0
+#define IXGBE_FCBUFF_8KB 0x1
+#define IXGBE_FCBUFF_16KB 0x2
+#define IXGBE_FCBUFF_64KB 0x3
+#define IXGBE_FCBUFF_MAX 65536 /* 64KB max */
+#define IXGBE_FCBUFF_MIN 4096 /* 4KB min */
+#define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */
+
+/* Default traffic class to use for FCoE */
+#define IXGBE_FCOE_DEFTC 3
+
+/* fcerr */
+#define IXGBE_FCERR_BADCRC 0x00100000
+
+/* FCoE DDP for target mode */
+#define __IXGBE_FCOE_TARGET 1
+
+struct ixgbe_fcoe_ddp {
+ int len;
+ u32 err;
+ unsigned int sgc;
+ struct scatterlist *sgl;
+ dma_addr_t udp;
+ u64 *udl;
+ struct pci_pool *pool;
+};
+
+struct ixgbe_fcoe {
+ struct pci_pool **pool;
+ atomic_t refcnt;
+ spinlock_t lock;
+ struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
+ unsigned char *extra_ddp_buffer;
+ dma_addr_t extra_ddp_buffer_dma;
+ unsigned long mode;
+#ifdef CONFIG_IXGBE_DCB
+ u8 up;
+#endif
+};
+
+#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
new file mode 100644
index 000000000000..1519a23421af
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -0,0 +1,7860 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/pkt_sched.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/ethtool.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/prefetch.h>
+#include <scsi/fc/fc_fcoe.h>
+
+#include "ixgbe.h"
+#include "ixgbe_common.h"
+#include "ixgbe_dcb_82599.h"
+#include "ixgbe_sriov.h"
+
+char ixgbe_driver_name[] = "ixgbe";
+static const char ixgbe_driver_string[] =
+ "Intel(R) 10 Gigabit PCI Express Network Driver";
+#define MAJ 3
+#define MIN 6
+#define BUILD 7
+#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
+ __stringify(BUILD) "-k"
+const char ixgbe_driver_version[] = DRV_VERSION;
+static const char ixgbe_copyright[] =
+ "Copyright (c) 1999-2011 Intel Corporation.";
+
+static const struct ixgbe_info *ixgbe_info_tbl[] = {
+ [board_82598] = &ixgbe_82598_info,
+ [board_82599] = &ixgbe_82599_info,
+ [board_X540] = &ixgbe_X540_info,
+};
+
+/* ixgbe_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
+ /* required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
+
+#ifdef CONFIG_IXGBE_DCA
+static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
+ void *p);
+static struct notifier_block dca_notifier = {
+ .notifier_call = ixgbe_notify_dca,
+ .next = NULL,
+ .priority = 0
+};
+#endif
+
+#ifdef CONFIG_PCI_IOV
+static unsigned int max_vfs;
+module_param(max_vfs, uint, 0);
+MODULE_PARM_DESC(max_vfs,
+ "Maximum number of virtual functions to allocate per physical function");
+#endif /* CONFIG_PCI_IOV */
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+
+static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
+{
+ if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
+ !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
+ schedule_work(&adapter->service_task);
+}
+
+static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
+{
+ BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
+
+ /* flush memory to make sure state is correct before next watchog */
+ smp_mb__before_clear_bit();
+ clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
+}
+
+struct ixgbe_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
+
+ /* General Registers */
+ {IXGBE_CTRL, "CTRL"},
+ {IXGBE_STATUS, "STATUS"},
+ {IXGBE_CTRL_EXT, "CTRL_EXT"},
+
+ /* Interrupt Registers */
+ {IXGBE_EICR, "EICR"},
+
+ /* RX Registers */
+ {IXGBE_SRRCTL(0), "SRRCTL"},
+ {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
+ {IXGBE_RDLEN(0), "RDLEN"},
+ {IXGBE_RDH(0), "RDH"},
+ {IXGBE_RDT(0), "RDT"},
+ {IXGBE_RXDCTL(0), "RXDCTL"},
+ {IXGBE_RDBAL(0), "RDBAL"},
+ {IXGBE_RDBAH(0), "RDBAH"},
+
+ /* TX Registers */
+ {IXGBE_TDBAL(0), "TDBAL"},
+ {IXGBE_TDBAH(0), "TDBAH"},
+ {IXGBE_TDLEN(0), "TDLEN"},
+ {IXGBE_TDH(0), "TDH"},
+ {IXGBE_TDT(0), "TDT"},
+ {IXGBE_TXDCTL(0), "TXDCTL"},
+
+ /* List Terminator */
+ {}
+};
+
+
+/*
+ * ixgbe_regdump - register printout routine
+ */
+static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
+{
+ int i = 0, j = 0;
+ char rname[16];
+ u32 regs[64];
+
+ switch (reginfo->ofs) {
+ case IXGBE_SRRCTL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+ break;
+ case IXGBE_DCA_RXCTRL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ break;
+ case IXGBE_RDLEN(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
+ break;
+ case IXGBE_RDH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
+ break;
+ case IXGBE_RDT(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
+ break;
+ case IXGBE_RXDCTL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ break;
+ case IXGBE_RDBAL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
+ break;
+ case IXGBE_RDBAH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
+ break;
+ case IXGBE_TDBAL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
+ break;
+ case IXGBE_TDBAH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
+ break;
+ case IXGBE_TDLEN(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
+ break;
+ case IXGBE_TDH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
+ break;
+ case IXGBE_TDT(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
+ break;
+ case IXGBE_TXDCTL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
+ break;
+ default:
+ pr_info("%-15s %08x\n", reginfo->name,
+ IXGBE_READ_REG(hw, reginfo->ofs));
+ return;
+ }
+
+ for (i = 0; i < 8; i++) {
+ snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
+ pr_err("%-15s", rname);
+ for (j = 0; j < 8; j++)
+ pr_cont(" %08x", regs[i*8+j]);
+ pr_cont("\n");
+ }
+
+}
+
+/*
+ * ixgbe_dump - Print registers, tx-rings and rx-rings
+ */
+static void ixgbe_dump(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_reg_info *reginfo;
+ int n = 0;
+ struct ixgbe_ring *tx_ring;
+ struct ixgbe_tx_buffer *tx_buffer_info;
+ union ixgbe_adv_tx_desc *tx_desc;
+ struct my_u0 { u64 a; u64 b; } *u0;
+ struct ixgbe_ring *rx_ring;
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbe_rx_buffer *rx_buffer_info;
+ u32 staterr;
+ int i = 0;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ pr_info("Device Name state "
+ "trans_start last_rx\n");
+ pr_info("%-15s %016lX %016lX %016lX\n",
+ netdev->name,
+ netdev->state,
+ netdev->trans_start,
+ netdev->last_rx);
+ }
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ pr_info(" Register Name Value\n");
+ for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ ixgbe_regdump(hw, reginfo);
+ }
+
+ /* Print TX Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ tx_buffer_info =
+ &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
+ pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
+ n, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)tx_buffer_info->dma,
+ tx_buffer_info->length,
+ tx_buffer_info->next_to_watch,
+ (u64)tx_buffer_info->time_stamp);
+ }
+
+ /* Print TX Rings */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+ /* Transmit Descriptor Formats
+ *
+ * Advanced Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
+ * +--------------------------------------------------------------+
+ * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
+ */
+
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ pr_info("------------------------------------\n");
+ pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("T [desc] [address 63:0 ] "
+ "[PlPOIdStDDt Ln] [bi->dma ] "
+ "leng ntw timestamp bi->skb\n");
+
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ pr_info("T [0x%03X] %016llX %016llX %016llX"
+ " %04X %p %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)tx_buffer_info->dma,
+ tx_buffer_info->length,
+ tx_buffer_info->next_to_watch,
+ (u64)tx_buffer_info->time_stamp,
+ tx_buffer_info->skb);
+ if (i == tx_ring->next_to_use &&
+ i == tx_ring->next_to_clean)
+ pr_cont(" NTC/U\n");
+ else if (i == tx_ring->next_to_use)
+ pr_cont(" NTU\n");
+ else if (i == tx_ring->next_to_clean)
+ pr_cont(" NTC\n");
+ else
+ pr_cont("\n");
+
+ if (netif_msg_pktdata(adapter) &&
+ tx_buffer_info->dma != 0)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(tx_buffer_info->dma),
+ tx_buffer_info->length, true);
+ }
+ }
+
+ /* Print RX Rings Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ pr_info("Queue [NTU] [NTC]\n");
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ pr_info("%5d %5X %5X\n",
+ n, rx_ring->next_to_use, rx_ring->next_to_clean);
+ }
+
+ /* Print RX Rings */
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+
+ /* Advanced Receive Descriptor (Read) Format
+ * 63 1 0
+ * +-----------------------------------------------------+
+ * 0 | Packet Buffer Address [63:1] |A0/NSE|
+ * +----------------------------------------------+------+
+ * 8 | Header Buffer Address [63:1] | DD |
+ * +-----------------------------------------------------+
+ *
+ *
+ * Advanced Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 30 21 20 16 15 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
+ * | Checksum Ident | | | | Type | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ pr_info("------------------------------------\n");
+ pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("R [desc] [ PktBuf A0] "
+ "[ HeadBuf DD] [bi->dma ] [bi->skb] "
+ "<-- Adv Rx Read format\n");
+ pr_info("RWB[desc] [PcsmIpSHl PtRs] "
+ "[vl er S cks ln] ---------------- [bi->skb] "
+ "<-- Adv Rx Write-Back format\n");
+
+ for (i = 0; i < rx_ring->count; i++) {
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
+ u0 = (struct my_u0 *)rx_desc;
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ if (staterr & IXGBE_RXD_STAT_DD) {
+ /* Descriptor Done */
+ pr_info("RWB[0x%03X] %016llX "
+ "%016llX ---------------- %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ rx_buffer_info->skb);
+ } else {
+ pr_info("R [0x%03X] %016llX "
+ "%016llX %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)rx_buffer_info->dma,
+ rx_buffer_info->skb);
+
+ if (netif_msg_pktdata(adapter)) {
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(rx_buffer_info->dma),
+ rx_ring->rx_buf_len, true);
+
+ if (rx_ring->rx_buf_len
+ < IXGBE_RXBUFFER_2K)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(
+ rx_buffer_info->page_dma +
+ rx_buffer_info->page_offset
+ ),
+ PAGE_SIZE/2, true);
+ }
+ }
+
+ if (i == rx_ring->next_to_use)
+ pr_cont(" NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ pr_cont(" NTC\n");
+ else
+ pr_cont("\n");
+
+ }
+ }
+
+exit:
+ return;
+}
+
+static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
+{
+ u32 ctrl_ext;
+
+ /* Let firmware take over control of h/w */
+ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
+ ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
+}
+
+static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
+{
+ u32 ctrl_ext;
+
+ /* Let firmware know the driver has taken over */
+ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
+ ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
+}
+
+/*
+ * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
+ * @adapter: pointer to adapter struct
+ * @direction: 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue: queue to map the corresponding interrupt to
+ * @msix_vector: the vector to map to the corresponding queue
+ *
+ */
+static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
+ u8 queue, u8 msix_vector)
+{
+ u32 ivar, index;
+ struct ixgbe_hw *hw = &adapter->hw;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ if (direction == -1)
+ direction = 0;
+ index = (((direction * 64) + queue) >> 2) & 0x1F;
+ ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
+ ivar &= ~(0xFF << (8 * (queue & 0x3)));
+ ivar |= (msix_vector << (8 * (queue & 0x3)));
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ index = ((queue & 1) * 8);
+ ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
+ ivar &= ~(0xFF << index);
+ ivar |= (msix_vector << index);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
+ break;
+ } else {
+ /* tx or rx causes */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ index = ((16 * (queue & 1)) + (8 * direction));
+ ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
+ ivar &= ~(0xFF << index);
+ ivar |= (msix_vector << index);
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
+ u64 qmask)
+{
+ u32 mask;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ mask = (qmask & 0xFFFFFFFF);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
+ mask = (qmask >> 32);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
+ break;
+ default:
+ break;
+ }
+}
+
+static inline void ixgbe_unmap_tx_resource(struct ixgbe_ring *ring,
+ struct ixgbe_tx_buffer *tx_buffer)
+{
+ if (tx_buffer->dma) {
+ if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_MAPPED_AS_PAGE)
+ dma_unmap_page(ring->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(ring->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
+ }
+ tx_buffer->dma = 0;
+}
+
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *tx_buffer_info)
+{
+ ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info);
+ if (tx_buffer_info->skb)
+ dev_kfree_skb_any(tx_buffer_info->skb);
+ tx_buffer_info->skb = NULL;
+ /* tx_buffer_info must be completely set up in the transmit path */
+}
+
+static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
+ u32 data = 0;
+ u32 xoff[8] = {0};
+ int i;
+
+ if ((hw->fc.current_mode == ixgbe_fc_full) ||
+ (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+ break;
+ default:
+ data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+ }
+ hwstats->lxoffrxc += data;
+
+ /* refill credits (no tx hang) if we received xoff */
+ if (!data)
+ return;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ clear_bit(__IXGBE_HANG_CHECK_ARMED,
+ &adapter->tx_ring[i]->state);
+ return;
+ } else if (!(adapter->dcb_cfg.pfc_mode_enable))
+ return;
+
+ /* update stats for each tc, only valid with PFC enabled */
+ for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+ break;
+ default:
+ xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+ }
+ hwstats->pxoffrxc[i] += xoff[i];
+ }
+
+ /* disarm tx queues that have received xoff frames */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ u8 tc = tx_ring->dcb_tc;
+
+ if (xoff[tc])
+ clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
+ }
+}
+
+static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
+{
+ return ring->tx_stats.completed;
+}
+
+static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
+ u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
+
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+
+ return 0;
+}
+
+static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
+{
+ u32 tx_done = ixgbe_get_tx_completed(tx_ring);
+ u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
+ u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
+ bool ret = false;
+
+ clear_check_for_tx_hang(tx_ring);
+
+ /*
+ * Check for a hung queue, but be thorough. This verifies
+ * that a transmit has been completed since the previous
+ * check AND there is at least one packet pending. The
+ * ARMED bit is set to indicate a potential hang. The
+ * bit is cleared if a pause frame is received to remove
+ * false hang detection due to PFC or 802.3x frames. By
+ * requiring this to fail twice we avoid races with
+ * pfc clearing the ARMED bit and conditions where we
+ * run the check_tx_hang logic with a transmit completion
+ * pending but without time to complete it yet.
+ */
+ if ((tx_done_old == tx_done) && tx_pending) {
+ /* make sure it is true for two checks in a row */
+ ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
+ &tx_ring->state);
+ } else {
+ /* update completed stats and continue */
+ tx_ring->tx_stats.tx_done_old = tx_done;
+ /* reset the countdown */
+ clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
+ }
+
+ return ret;
+}
+
+/**
+ * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
+ * @adapter: driver private struct
+ **/
+static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
+{
+
+ /* Do the reset outside of interrupt context */
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ ixgbe_service_event_schedule(adapter);
+ }
+}
+
+/**
+ * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
+ * @q_vector: structure containing interrupt and ring information
+ * @tx_ring: tx ring to clean
+ **/
+static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
+ struct ixgbe_ring *tx_ring)
+{
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ struct ixgbe_tx_buffer *tx_buffer;
+ union ixgbe_adv_tx_desc *tx_desc;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int budget = q_vector->tx.work_limit;
+ u16 i = tx_ring->next_to_clean;
+
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
+
+ for (; budget; budget--) {
+ union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* if DD is not set pending work has not been completed */
+ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
+ break;
+
+ /* count the packet as being completed */
+ tx_ring->tx_stats.completed++;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buffer->next_to_watch = NULL;
+
+ /* prevent any other reads prior to eop_desc being verified */
+ rmb();
+
+ do {
+ ixgbe_unmap_tx_resource(tx_ring, tx_buffer);
+ tx_desc->wb.status = 0;
+ if (likely(tx_desc == eop_desc)) {
+ eop_desc = NULL;
+ dev_kfree_skb_any(tx_buffer->skb);
+ tx_buffer->skb = NULL;
+
+ total_bytes += tx_buffer->bytecount;
+ total_packets += tx_buffer->gso_segs;
+ }
+
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
+ }
+
+ } while (eop_desc);
+ }
+
+ tx_ring->next_to_clean = i;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
+ q_vector->tx.total_bytes += total_bytes;
+ q_vector->tx.total_packets += total_packets;
+
+ if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
+ /* schedule immediate reset if we believe we hung */
+ struct ixgbe_hw *hw = &adapter->hw;
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
+ e_err(drv, "Detected Tx Unit Hang\n"
+ " Tx Queue <%d>\n"
+ " TDH, TDT <%x>, <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "tx_buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->queue_index,
+ IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
+ IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
+ tx_ring->next_to_use, i,
+ tx_ring->tx_buffer_info[i].time_stamp, jiffies);
+
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ e_info(probe,
+ "tx hang %d detected on queue %d, resetting adapter\n",
+ adapter->tx_timeout_count + 1, tx_ring->queue_index);
+
+ /* schedule immediate reset if we believe we hung */
+ ixgbe_tx_timeout_reset(adapter);
+
+ /* the adapter is about to reset, no point in enabling stuff */
+ return true;
+ }
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
+ (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
+ !test_bit(__IXGBE_DOWN, &adapter->state)) {
+ netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ }
+ }
+
+ return !!budget;
+}
+
+#ifdef CONFIG_IXGBE_DCA
+static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
+ int cpu)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 rxctrl;
+ u8 reg_idx = rx_ring->reg_idx;
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
+ rxctrl |= dca3_get_tag(rx_ring->dev, cpu);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
+ rxctrl |= (dca3_get_tag(rx_ring->dev, cpu) <<
+ IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
+ break;
+ default:
+ break;
+ }
+ rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
+ rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
+ rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
+}
+
+static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring,
+ int cpu)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 txctrl;
+ u8 reg_idx = tx_ring->reg_idx;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
+ txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
+ txctrl |= dca3_get_tag(tx_ring->dev, cpu);
+ txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
+ txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
+ txctrl |= (dca3_get_tag(tx_ring->dev, cpu) <<
+ IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
+ txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
+{
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ struct ixgbe_ring *ring;
+ int cpu = get_cpu();
+
+ if (q_vector->cpu == cpu)
+ goto out_no_update;
+
+ for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
+ ixgbe_update_tx_dca(adapter, ring, cpu);
+
+ for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
+ ixgbe_update_rx_dca(adapter, ring, cpu);
+
+ q_vector->cpu = cpu;
+out_no_update:
+ put_cpu();
+}
+
+static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
+{
+ int num_q_vectors;
+ int i;
+
+ if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
+ return;
+
+ /* always use CB2 mode, difference is masked in the CB driver */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ else
+ num_q_vectors = 1;
+
+ for (i = 0; i < num_q_vectors; i++) {
+ adapter->q_vector[i]->cpu = -1;
+ ixgbe_update_dca(adapter->q_vector[i]);
+ }
+}
+
+static int __ixgbe_notify_dca(struct device *dev, void *data)
+{
+ struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
+ unsigned long event = *(unsigned long *)data;
+
+ if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
+ return 0;
+
+ switch (event) {
+ case DCA_PROVIDER_ADD:
+ /* if we're already enabled, don't do it again */
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ break;
+ if (dca_add_requester(dev) == 0) {
+ adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+ ixgbe_setup_dca(adapter);
+ break;
+ }
+ /* Fall Through since DCA is disabled. */
+ case DCA_PROVIDER_REMOVE:
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
+ dca_remove_requester(dev);
+ adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
+ }
+ break;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_IXGBE_DCA */
+
+static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
+}
+
+/**
+ * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
+ * @adapter: address of board private structure
+ * @rx_desc: advanced rx descriptor
+ *
+ * Returns : true if it is FCoE pkt
+ */
+static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+
+ return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+ ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
+ (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
+ IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
+}
+
+/**
+ * ixgbe_receive_skb - Send a completed packet up the stack
+ * @adapter: board private structure
+ * @skb: packet to send up
+ * @status: hardware indication of status of receive
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ * @rx_desc: rx descriptor
+ **/
+static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
+ struct sk_buff *skb, u8 status,
+ struct ixgbe_ring *ring,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ struct napi_struct *napi = &q_vector->napi;
+ bool is_vlan = (status & IXGBE_RXD_STAT_VP);
+ u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
+
+ if (is_vlan && (tag & VLAN_VID_MASK))
+ __vlan_hwaccel_put_tag(skb, tag);
+
+ if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
+ napi_gro_receive(napi, skb);
+ else
+ netif_rx(skb);
+}
+
+/**
+ * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @adapter: address of board private structure
+ * @status_err: hardware indication of status of receive
+ * @skb: skb currently being received and modified
+ * @status_err: status error value of last descriptor in packet
+ **/
+static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb,
+ u32 status_err)
+{
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Rx csum disabled */
+ if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+ return;
+
+ /* if IP and error */
+ if ((status_err & IXGBE_RXD_STAT_IPCS) &&
+ (status_err & IXGBE_RXDADV_ERR_IPE)) {
+ adapter->hw_csum_rx_error++;
+ return;
+ }
+
+ if (!(status_err & IXGBE_RXD_STAT_L4CS))
+ return;
+
+ if (status_err & IXGBE_RXDADV_ERR_TCPE) {
+ u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+
+ /*
+ * 82599 errata, UDP frames with a 0 checksum can be marked as
+ * checksum errors.
+ */
+ if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
+ (adapter->hw.mac.type == ixgbe_mac_82599EB))
+ return;
+
+ adapter->hw_csum_rx_error++;
+ return;
+ }
+
+ /* It must be a TCP or UDP packet with a valid checksum */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
+{
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+}
+
+/**
+ * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
+{
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbe_rx_buffer *bi;
+ struct sk_buff *skb;
+ u16 i = rx_ring->next_to_use;
+
+ /* do nothing if no valid netdev defined */
+ if (!rx_ring->netdev)
+ return;
+
+ while (cleaned_count--) {
+ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ skb = bi->skb;
+
+ if (!skb) {
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_buf_len);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ goto no_buffers;
+ }
+ /* initialize queue mapping */
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ bi->skb = skb;
+ }
+
+ if (!bi->dma) {
+ bi->dma = dma_map_single(rx_ring->dev,
+ skb->data,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev, bi->dma)) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ bi->dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ if (!bi->page) {
+ bi->page = netdev_alloc_page(rx_ring->netdev);
+ if (!bi->page) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ goto no_buffers;
+ }
+ }
+
+ if (!bi->page_dma) {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= PAGE_SIZE / 2;
+ bi->page_dma = dma_map_page(rx_ring->dev,
+ bi->page,
+ bi->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ bi->page_dma)) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ bi->page_dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info. */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+ } else {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ rx_desc->read.hdr_addr = 0;
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ }
+
+no_buffers:
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+ ixgbe_release_rx_desc(rx_ring, i);
+ }
+}
+
+static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
+{
+ /* HW will not DMA in data larger than the given buffer, even if it
+ * parses the (NFS, of course) header to be larger. In that case, it
+ * fills the header buffer and spills the rest into the page.
+ */
+ u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
+ u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+ IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+ if (hlen > IXGBE_RX_HDR_SIZE)
+ hlen = IXGBE_RX_HDR_SIZE;
+ return hlen;
+}
+
+/**
+ * ixgbe_transform_rsc_queue - change rsc queue into a full packet
+ * @skb: pointer to the last skb in the rsc queue
+ *
+ * This function changes a queue full of hw rsc buffers into a completed
+ * packet. It uses the ->prev pointers to find the first packet and then
+ * turns it into the frag list owner.
+ **/
+static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
+{
+ unsigned int frag_list_size = 0;
+ unsigned int skb_cnt = 1;
+
+ while (skb->prev) {
+ struct sk_buff *prev = skb->prev;
+ frag_list_size += skb->len;
+ skb->prev = NULL;
+ skb = prev;
+ skb_cnt++;
+ }
+
+ skb_shinfo(skb)->frag_list = skb->next;
+ skb->next = NULL;
+ skb->len += frag_list_size;
+ skb->data_len += frag_list_size;
+ skb->truesize += frag_list_size;
+ IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
+
+ return skb;
+}
+
+static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
+{
+ return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
+ IXGBE_RXDADV_RSCCNT_MASK);
+}
+
+static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
+ struct ixgbe_ring *rx_ring,
+ int budget)
+{
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
+ struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
+ struct sk_buff *skb;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ const int current_node = numa_node_id();
+#ifdef IXGBE_FCOE
+ int ddp_bytes = 0;
+#endif /* IXGBE_FCOE */
+ u32 staterr;
+ u16 i;
+ u16 cleaned_count = 0;
+ bool pkt_is_rsc = false;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ while (staterr & IXGBE_RXD_STAT_DD) {
+ u32 upper_len = 0;
+
+ rmb(); /* read descriptor and rx_buffer_info after status DD */
+
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
+ skb = rx_buffer_info->skb;
+ rx_buffer_info->skb = NULL;
+ prefetch(skb->data);
+
+ if (ring_is_rsc_enabled(rx_ring))
+ pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
+
+ /* linear means we are building an skb from multiple pages */
+ if (!skb_is_nonlinear(skb)) {
+ u16 hlen;
+ if (pkt_is_rsc &&
+ !(staterr & IXGBE_RXD_STAT_EOP) &&
+ !skb->prev) {
+ /*
+ * When HWRSC is enabled, delay unmapping
+ * of the first packet. It carries the
+ * header information, HW may still
+ * access the header after the writeback.
+ * Only unmap it when EOP is reached
+ */
+ IXGBE_RSC_CB(skb)->delay_unmap = true;
+ IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
+ } else {
+ dma_unmap_single(rx_ring->dev,
+ rx_buffer_info->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ }
+ rx_buffer_info->dma = 0;
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ hlen = ixgbe_get_hlen(rx_desc);
+ upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+ } else {
+ hlen = le16_to_cpu(rx_desc->wb.upper.length);
+ }
+
+ skb_put(skb, hlen);
+ } else {
+ /* assume packet split since header is unmapped */
+ upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+ }
+
+ if (upper_len) {
+ dma_unmap_page(rx_ring->dev,
+ rx_buffer_info->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ rx_buffer_info->page_dma = 0;
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buffer_info->page,
+ rx_buffer_info->page_offset,
+ upper_len);
+
+ if ((page_count(rx_buffer_info->page) == 1) &&
+ (page_to_nid(rx_buffer_info->page) == current_node))
+ get_page(rx_buffer_info->page);
+ else
+ rx_buffer_info->page = NULL;
+
+ skb->len += upper_len;
+ skb->data_len += upper_len;
+ skb->truesize += upper_len;
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+
+ next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i);
+ prefetch(next_rxd);
+ cleaned_count++;
+
+ if (pkt_is_rsc) {
+ u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
+ IXGBE_RXDADV_NEXTP_SHIFT;
+ next_buffer = &rx_ring->rx_buffer_info[nextp];
+ } else {
+ next_buffer = &rx_ring->rx_buffer_info[i];
+ }
+
+ if (!(staterr & IXGBE_RXD_STAT_EOP)) {
+ if (ring_is_ps_enabled(rx_ring)) {
+ rx_buffer_info->skb = next_buffer->skb;
+ rx_buffer_info->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
+ } else {
+ skb->next = next_buffer->skb;
+ skb->next->prev = skb;
+ }
+ rx_ring->rx_stats.non_eop_descs++;
+ goto next_desc;
+ }
+
+ if (skb->prev) {
+ skb = ixgbe_transform_rsc_queue(skb);
+ /* if we got here without RSC the packet is invalid */
+ if (!pkt_is_rsc) {
+ __pskb_trim(skb, 0);
+ rx_buffer_info->skb = skb;
+ goto next_desc;
+ }
+ }
+
+ if (ring_is_rsc_enabled(rx_ring)) {
+ if (IXGBE_RSC_CB(skb)->delay_unmap) {
+ dma_unmap_single(rx_ring->dev,
+ IXGBE_RSC_CB(skb)->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ IXGBE_RSC_CB(skb)->dma = 0;
+ IXGBE_RSC_CB(skb)->delay_unmap = false;
+ }
+ }
+ if (pkt_is_rsc) {
+ if (ring_is_ps_enabled(rx_ring))
+ rx_ring->rx_stats.rsc_count +=
+ skb_shinfo(skb)->nr_frags;
+ else
+ rx_ring->rx_stats.rsc_count +=
+ IXGBE_RSC_CB(skb)->skb_cnt;
+ rx_ring->rx_stats.rsc_flush++;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
+ if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
+ dev_kfree_skb_any(skb);
+ goto next_desc;
+ }
+
+ ixgbe_rx_checksum(adapter, rx_desc, skb, staterr);
+ if (adapter->netdev->features & NETIF_F_RXHASH)
+ ixgbe_rx_hash(rx_desc, skb);
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+#ifdef IXGBE_FCOE
+ /* if ddp, not passing to ULD unless for FCP_RSP or error */
+ if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
+ ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb,
+ staterr);
+ if (!ddp_bytes) {
+ dev_kfree_skb_any(skb);
+ goto next_desc;
+ }
+ }
+#endif /* IXGBE_FCOE */
+ ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
+
+ budget--;
+next_desc:
+ rx_desc->wb.upper.status_error = 0;
+
+ if (!budget)
+ break;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
+ ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+
+ rx_ring->next_to_clean = i;
+ cleaned_count = ixgbe_desc_unused(rx_ring);
+
+ if (cleaned_count)
+ ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
+
+#ifdef IXGBE_FCOE
+ /* include DDPed FCoE data */
+ if (ddp_bytes > 0) {
+ unsigned int mss;
+
+ mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
+ sizeof(struct fc_frame_header) -
+ sizeof(struct fcoe_crc_eof);
+ if (mss > 512)
+ mss &= ~511;
+ total_rx_bytes += ddp_bytes;
+ total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
+ }
+#endif /* IXGBE_FCOE */
+
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
+ q_vector->rx.total_packets += total_rx_packets;
+ q_vector->rx.total_bytes += total_rx_bytes;
+
+ return !!budget;
+}
+
+/**
+ * ixgbe_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
+ *
+ * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
+ * interrupts.
+ **/
+static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_q_vector *q_vector;
+ int q_vectors, v_idx;
+ u32 mask;
+
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ /* Populate MSIX to EITR Select */
+ if (adapter->num_vfs > 32) {
+ u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
+ }
+
+ /*
+ * Populate the IVAR table and set the ITR values to the
+ * corresponding register.
+ */
+ for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ struct ixgbe_ring *ring;
+ q_vector = adapter->q_vector[v_idx];
+
+ for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
+ ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
+
+ for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
+ ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
+
+ if (q_vector->tx.ring && !q_vector->rx.ring) {
+ /* tx only vector */
+ if (adapter->tx_itr_setting == 1)
+ q_vector->itr = IXGBE_10K_ITR;
+ else
+ q_vector->itr = adapter->tx_itr_setting;
+ } else {
+ /* rx or rx/tx vector */
+ if (adapter->rx_itr_setting == 1)
+ q_vector->itr = IXGBE_20K_ITR;
+ else
+ q_vector->itr = adapter->rx_itr_setting;
+ }
+
+ ixgbe_write_eitr(q_vector);
+ }
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
+ v_idx);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ ixgbe_set_ivar(adapter, -1, 1, v_idx);
+ break;
+ default:
+ break;
+ }
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
+
+ /* set up to autoclear timer, and the vectors */
+ mask = IXGBE_EIMS_ENABLE_MASK;
+ mask &= ~(IXGBE_EIMS_OTHER |
+ IXGBE_EIMS_MAILBOX |
+ IXGBE_EIMS_LSC);
+
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
+}
+
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
+/**
+ * ixgbe_update_itr - update the dynamic ITR value based on statistics
+ * @q_vector: structure containing interrupt and ring information
+ * @ring_container: structure containing ring performance data
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ * this functionality is controlled by the InterruptThrottleRate module
+ * parameter (see ixgbe_param.c)
+ **/
+static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
+ struct ixgbe_ring_container *ring_container)
+{
+ u64 bytes_perint;
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ int bytes = ring_container->total_bytes;
+ int packets = ring_container->total_packets;
+ u32 timepassed_us;
+ u8 itr_setting = ring_container->itr;
+
+ if (packets == 0)
+ return;
+
+ /* simple throttlerate management
+ * 0-20MB/s lowest (100000 ints/s)
+ * 20-100MB/s low (20000 ints/s)
+ * 100-1249MB/s bulk (8000 ints/s)
+ */
+ /* what was last interrupt timeslice? */
+ timepassed_us = q_vector->itr >> 2;
+ bytes_perint = bytes / timepassed_us; /* bytes/usec */
+
+ switch (itr_setting) {
+ case lowest_latency:
+ if (bytes_perint > adapter->eitr_low)
+ itr_setting = low_latency;
+ break;
+ case low_latency:
+ if (bytes_perint > adapter->eitr_high)
+ itr_setting = bulk_latency;
+ else if (bytes_perint <= adapter->eitr_low)
+ itr_setting = lowest_latency;
+ break;
+ case bulk_latency:
+ if (bytes_perint <= adapter->eitr_high)
+ itr_setting = low_latency;
+ break;
+ }
+
+ /* clear work counters since we have the values we need */
+ ring_container->total_bytes = 0;
+ ring_container->total_packets = 0;
+
+ /* write updated itr to ring container */
+ ring_container->itr = itr_setting;
+}
+
+/**
+ * ixgbe_write_eitr - write EITR register in hardware specific way
+ * @q_vector: structure containing interrupt and ring information
+ *
+ * This function is made to be called by ethtool and by the driver
+ * when it needs to update EITR registers at runtime. Hardware
+ * specific quirks/differences are taken care of here.
+ */
+void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
+{
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int v_idx = q_vector->v_idx;
+ u32 itr_reg = q_vector->itr;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ /* must write high and low 16 bits to reset counter */
+ itr_reg |= (itr_reg << 16);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ /*
+ * set the WDIS bit to not clear the timer bits and cause an
+ * immediate assertion of the interrupt
+ */
+ itr_reg |= IXGBE_EITR_CNT_WDIS;
+ break;
+ default:
+ break;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
+}
+
+static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
+{
+ u32 new_itr = q_vector->itr;
+ u8 current_itr;
+
+ ixgbe_update_itr(q_vector, &q_vector->tx);
+ ixgbe_update_itr(q_vector, &q_vector->rx);
+
+ current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = IXGBE_100K_ITR;
+ break;
+ case low_latency:
+ new_itr = IXGBE_20K_ITR;
+ break;
+ case bulk_latency:
+ new_itr = IXGBE_8K_ITR;
+ break;
+ default:
+ break;
+ }
+
+ if (new_itr != q_vector->itr) {
+ /* do an exponential smoothing */
+ new_itr = (10 * new_itr * q_vector->itr) /
+ ((9 * new_itr) + q_vector->itr);
+
+ /* save the algorithm value here */
+ q_vector->itr = new_itr & IXGBE_MAX_EITR;
+
+ ixgbe_write_eitr(q_vector);
+ }
+}
+
+/**
+ * ixgbe_check_overtemp_subtask - check for over tempurature
+ * @adapter: pointer to adapter
+ **/
+static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 eicr = adapter->interrupt_event;
+
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ return;
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+ !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
+ return;
+
+ adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ /*
+ * Since the warning interrupt is for both ports
+ * we don't have to check if:
+ * - This interrupt wasn't for our port.
+ * - We may have missed the interrupt so always have to
+ * check if we got a LSC
+ */
+ if (!(eicr & IXGBE_EICR_GPI_SDP0) &&
+ !(eicr & IXGBE_EICR_LSC))
+ return;
+
+ if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
+ u32 autoneg;
+ bool link_up = false;
+
+ hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
+
+ if (link_up)
+ return;
+ }
+
+ /* Check if this is not due to overtemp */
+ if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
+ return;
+
+ break;
+ default:
+ if (!(eicr & IXGBE_EICR_GPI_SDP0))
+ return;
+ break;
+ }
+ e_crit(drv,
+ "Network adapter has been stopped because it has over heated. "
+ "Restart the computer. If the problem persists, "
+ "power off the system and replace the adapter\n");
+
+ adapter->interrupt_event = 0;
+}
+
+static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
+ (eicr & IXGBE_EICR_GPI_SDP1)) {
+ e_crit(probe, "Fan has stopped, replace the adapter\n");
+ /* write to clear the interrupt */
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
+ }
+}
+
+static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
+{
+ if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
+ return;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ /*
+ * Need to check link state so complete overtemp check
+ * on service task
+ */
+ if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) &&
+ (!test_bit(__IXGBE_DOWN, &adapter->state))) {
+ adapter->interrupt_event = eicr;
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
+ ixgbe_service_event_schedule(adapter);
+ return;
+ }
+ return;
+ case ixgbe_mac_X540:
+ if (!(eicr & IXGBE_EICR_TS))
+ return;
+ break;
+ default:
+ return;
+ }
+
+ e_crit(drv,
+ "Network adapter has been stopped because it has over heated. "
+ "Restart the computer. If the problem persists, "
+ "power off the system and replace the adapter\n");
+}
+
+static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (eicr & IXGBE_EICR_GPI_SDP2) {
+ /* Clear the interrupt */
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
+ ixgbe_service_event_schedule(adapter);
+ }
+ }
+
+ if (eicr & IXGBE_EICR_GPI_SDP1) {
+ /* Clear the interrupt */
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
+ ixgbe_service_event_schedule(adapter);
+ }
+ }
+}
+
+static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ adapter->lsc_int++;
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
+ IXGBE_WRITE_FLUSH(hw);
+ ixgbe_service_event_schedule(adapter);
+ }
+}
+
+static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
+ u64 qmask)
+{
+ u32 mask;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ mask = (qmask & 0xFFFFFFFF);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+ mask = (qmask >> 32);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ break;
+ default:
+ break;
+ }
+ /* skip the flush */
+}
+
+static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
+ u64 qmask)
+{
+ u32 mask;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ mask = (qmask & 0xFFFFFFFF);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
+ mask = (qmask >> 32);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
+ break;
+ default:
+ break;
+ }
+ /* skip the flush */
+}
+
+/**
+ * ixgbe_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
+ bool flush)
+{
+ u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
+
+ /* don't reenable LSC while waiting for link */
+ if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
+ mask &= ~IXGBE_EIMS_LSC;
+
+ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ mask |= IXGBE_EIMS_GPI_SDP0;
+ break;
+ case ixgbe_mac_X540:
+ mask |= IXGBE_EIMS_TS;
+ break;
+ default:
+ break;
+ }
+ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
+ mask |= IXGBE_EIMS_GPI_SDP1;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ mask |= IXGBE_EIMS_GPI_SDP1;
+ mask |= IXGBE_EIMS_GPI_SDP2;
+ case ixgbe_mac_X540:
+ mask |= IXGBE_EIMS_ECC;
+ mask |= IXGBE_EIMS_MAILBOX;
+ break;
+ default:
+ break;
+ }
+ if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
+ !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
+ mask |= IXGBE_EIMS_FLOW_DIR;
+
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
+ if (queues)
+ ixgbe_irq_enable_queues(adapter, ~0);
+ if (flush)
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+}
+
+static irqreturn_t ixgbe_msix_other(int irq, void *data)
+{
+ struct ixgbe_adapter *adapter = data;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 eicr;
+
+ /*
+ * Workaround for Silicon errata. Use clear-by-write instead
+ * of clear-by-read. Reading with EICS will return the
+ * interrupt causes without clearing, which later be done
+ * with the write to EICR.
+ */
+ eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
+
+ if (eicr & IXGBE_EICR_LSC)
+ ixgbe_check_lsc(adapter);
+
+ if (eicr & IXGBE_EICR_MAILBOX)
+ ixgbe_msg_task(adapter);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ if (eicr & IXGBE_EICR_ECC)
+ e_info(link, "Received unrecoverable ECC Err, please "
+ "reboot\n");
+ /* Handle Flow Director Full threshold interrupt */
+ if (eicr & IXGBE_EICR_FLOW_DIR) {
+ int reinit_count = 0;
+ int i;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
+ if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
+ &ring->state))
+ reinit_count++;
+ }
+ if (reinit_count) {
+ /* no more flow director interrupts until after init */
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
+ adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
+ ixgbe_service_event_schedule(adapter);
+ }
+ }
+ ixgbe_check_sfp_event(adapter, eicr);
+ ixgbe_check_overtemp_event(adapter, eicr);
+ break;
+ default:
+ break;
+ }
+
+ ixgbe_check_fan_failure(adapter, eicr);
+
+ /* re-enable the original interrupt state, no lsc, no queues */
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable(adapter, false, false);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
+{
+ struct ixgbe_q_vector *q_vector = data;
+
+ /* EIAM disabled interrupts (on this vector) for us */
+
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
+ int r_idx)
+{
+ struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+ struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
+
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = q_vector->rx.ring;
+ q_vector->rx.ring = rx_ring;
+ q_vector->rx.count++;
+}
+
+static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
+ int t_idx)
+{
+ struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+ struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
+
+ tx_ring->q_vector = q_vector;
+ tx_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = tx_ring;
+ q_vector->tx.count++;
+ q_vector->tx.work_limit = a->tx_work_limit;
+}
+
+/**
+ * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code. Ideally, we'd have
+ * one vector per ring/queue, but on a constrained vector budget, we
+ * group the rings as "efficiently" as possible. You would add new
+ * mapping configurations in here.
+ **/
+static void ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
+{
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int rxr_remaining = adapter->num_rx_queues, rxr_idx = 0;
+ int txr_remaining = adapter->num_tx_queues, txr_idx = 0;
+ int v_start = 0;
+
+ /* only one q_vector if MSI-X is disabled. */
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+ q_vectors = 1;
+
+ /*
+ * If we don't have enough vectors for a 1-to-1 mapping, we'll have to
+ * group them so there are multiple queues per vector.
+ *
+ * Re-adjusting *qpv takes care of the remainder.
+ */
+ for (; v_start < q_vectors && rxr_remaining; v_start++) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_start);
+ for (; rqpv; rqpv--, rxr_idx++, rxr_remaining--)
+ map_vector_to_rxq(adapter, v_start, rxr_idx);
+ }
+
+ /*
+ * If there are not enough q_vectors for each ring to have it's own
+ * vector then we must pair up Rx/Tx on a each vector
+ */
+ if ((v_start + txr_remaining) > q_vectors)
+ v_start = 0;
+
+ for (; v_start < q_vectors && txr_remaining; v_start++) {
+ int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_start);
+ for (; tqpv; tqpv--, txr_idx++, txr_remaining--)
+ map_vector_to_txq(adapter, v_start, txr_idx);
+ }
+}
+
+/**
+ * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
+ * interrupts from the kernel.
+ **/
+static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int vector, err;
+ int ri = 0, ti = 0;
+
+ for (vector = 0; vector < q_vectors; vector++) {
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
+ struct msix_entry *entry = &adapter->msix_entries[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "TxRx", ri++);
+ ti++;
+ } else if (q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "rx", ri++);
+ } else if (q_vector->tx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "tx", ti++);
+ } else {
+ /* skip this unused q_vector */
+ continue;
+ }
+ err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
+ q_vector->name, q_vector);
+ if (err) {
+ e_err(probe, "request_irq failed for MSIX interrupt "
+ "Error: %d\n", err);
+ goto free_queue_irqs;
+ }
+ /* If Flow Director is enabled, set interrupt affinity */
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ /* assign the mask for this irq */
+ irq_set_affinity_hint(entry->vector,
+ q_vector->affinity_mask);
+ }
+ }
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ ixgbe_msix_other, 0, netdev->name, adapter);
+ if (err) {
+ e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
+ goto free_queue_irqs;
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ irq_set_affinity_hint(adapter->msix_entries[vector].vector,
+ NULL);
+ free_irq(adapter->msix_entries[vector].vector,
+ adapter->q_vector[vector]);
+ }
+ adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ return err;
+}
+
+/**
+ * ixgbe_intr - legacy mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t ixgbe_intr(int irq, void *data)
+{
+ struct ixgbe_adapter *adapter = data;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
+ u32 eicr;
+
+ /*
+ * Workaround for silicon errata on 82598. Mask the interrupts
+ * before the read of EICR.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
+
+ /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
+ * therefore no explict interrupt disable is necessary */
+ eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+ if (!eicr) {
+ /*
+ * shared interrupt alert!
+ * make sure interrupts are enabled because the read will
+ * have disabled interrupts due to EIAM
+ * finish the workaround of silicon errata on 82598. Unmask
+ * the interrupt that we masked before the EICR read.
+ */
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable(adapter, true, true);
+ return IRQ_NONE; /* Not our interrupt */
+ }
+
+ if (eicr & IXGBE_EICR_LSC)
+ ixgbe_check_lsc(adapter);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ ixgbe_check_sfp_event(adapter, eicr);
+ /* Fall through */
+ case ixgbe_mac_X540:
+ if (eicr & IXGBE_EICR_ECC)
+ e_info(link, "Received unrecoverable ECC err, please "
+ "reboot\n");
+ ixgbe_check_overtemp_event(adapter, eicr);
+ break;
+ default:
+ break;
+ }
+
+ ixgbe_check_fan_failure(adapter, eicr);
+
+ if (napi_schedule_prep(&(q_vector->napi))) {
+ /* would disable interrupts here but EIAM disabled it */
+ __napi_schedule(&(q_vector->napi));
+ }
+
+ /*
+ * re-enable link(maybe) and non-queue interrupts, no flush.
+ * ixgbe_poll will re-enable the queue interrupts
+ */
+
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable(adapter, false, false);
+
+ return IRQ_HANDLED;
+}
+
+static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
+{
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int i;
+
+ /* legacy and MSI only use one vector */
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+ q_vectors = 1;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ adapter->rx_ring[i]->q_vector = NULL;
+ adapter->rx_ring[i]->next = NULL;
+ }
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ adapter->tx_ring[i]->q_vector = NULL;
+ adapter->tx_ring[i]->next = NULL;
+ }
+
+ for (i = 0; i < q_vectors; i++) {
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
+ memset(&q_vector->rx, 0, sizeof(struct ixgbe_ring_container));
+ memset(&q_vector->tx, 0, sizeof(struct ixgbe_ring_container));
+ }
+}
+
+/**
+ * ixgbe_request_irq - initialize interrupts
+ * @adapter: board private structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ /* map all of the rings to the q_vectors */
+ ixgbe_map_rings_to_vectors(adapter);
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ err = ixgbe_request_msix_irqs(adapter);
+ else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
+ err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
+ netdev->name, adapter);
+ else
+ err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
+ netdev->name, adapter);
+
+ if (err) {
+ e_err(probe, "request_irq failed, Error %d\n", err);
+
+ /* place q_vectors and rings back into a known good state */
+ ixgbe_reset_q_vectors(adapter);
+ }
+
+ return err;
+}
+
+static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
+{
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ int i, q_vectors;
+
+ q_vectors = adapter->num_msix_vectors;
+ i = q_vectors - 1;
+ free_irq(adapter->msix_entries[i].vector, adapter);
+ i--;
+
+ for (; i >= 0; i--) {
+ /* free only the irqs that were actually requested */
+ if (!adapter->q_vector[i]->rx.ring &&
+ !adapter->q_vector[i]->tx.ring)
+ continue;
+
+ /* clear the affinity_mask in the IRQ descriptor */
+ irq_set_affinity_hint(adapter->msix_entries[i].vector,
+ NULL);
+
+ free_irq(adapter->msix_entries[i].vector,
+ adapter->q_vector[i]);
+ }
+ } else {
+ free_irq(adapter->pdev->irq, adapter);
+ }
+
+ /* clear q_vector state information */
+ ixgbe_reset_q_vectors(adapter);
+}
+
+/**
+ * ixgbe_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
+{
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
+ break;
+ default:
+ break;
+ }
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ int i;
+ for (i = 0; i < adapter->num_msix_vectors; i++)
+ synchronize_irq(adapter->msix_entries[i].vector);
+ } else {
+ synchronize_irq(adapter->pdev->irq);
+ }
+}
+
+/**
+ * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
+ *
+ **/
+static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
+
+ /* rx/tx vector */
+ if (adapter->rx_itr_setting == 1)
+ q_vector->itr = IXGBE_20K_ITR;
+ else
+ q_vector->itr = adapter->rx_itr_setting;
+
+ ixgbe_write_eitr(q_vector);
+
+ ixgbe_set_ivar(adapter, 0, 0, 0);
+ ixgbe_set_ivar(adapter, 1, 0, 0);
+
+ e_info(hw, "Legacy interrupt IVAR setup done\n");
+}
+
+/**
+ * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
+ * @adapter: board private structure
+ * @ring: structure containing ring specific data
+ *
+ * Configure the Tx descriptor ring after a reset.
+ **/
+void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 tdba = ring->dma;
+ int wait_loop = 10;
+ u32 txdctl = IXGBE_TXDCTL_ENABLE;
+ u8 reg_idx = ring->reg_idx;
+
+ /* disable queue to avoid issues while updating state */
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
+ IXGBE_WRITE_FLUSH(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
+ (tdba & DMA_BIT_MASK(32)));
+ IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
+ ring->count * sizeof(union ixgbe_adv_tx_desc));
+ IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
+ ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
+
+ /*
+ * set WTHRESH to encourage burst writeback, it should not be set
+ * higher than 1 when ITR is 0 as it could cause false TX hangs
+ *
+ * In order to avoid issues WTHRESH + PTHRESH should always be equal
+ * to or less than the number of on chip descriptors, which is
+ * currently 40.
+ */
+ if (!adapter->tx_itr_setting || !adapter->rx_itr_setting)
+ txdctl |= (1 << 16); /* WTHRESH = 1 */
+ else
+ txdctl |= (8 << 16); /* WTHRESH = 8 */
+
+ /* PTHRESH=32 is needed to avoid a Tx hang with DFP enabled. */
+ txdctl |= (1 << 8) | /* HTHRESH = 1 */
+ 32; /* PTHRESH = 32 */
+
+ /* reinitialize flowdirector state */
+ if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
+ adapter->atr_sample_rate) {
+ ring->atr_sample_rate = adapter->atr_sample_rate;
+ ring->atr_count = 0;
+ set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
+ } else {
+ ring->atr_sample_rate = 0;
+ }
+
+ clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
+
+ /* enable queue */
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
+
+ /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
+ if (hw->mac.type == ixgbe_mac_82598EB &&
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ return;
+
+ /* poll to verify queue is enabled */
+ do {
+ usleep_range(1000, 2000);
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+ } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!wait_loop)
+ e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
+}
+
+static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 rttdcs;
+ u32 reg;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ /* disable the arbiter while setting MTQC */
+ rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ rttdcs |= IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+
+ /* set transmit pool layout */
+ switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ case (IXGBE_FLAG_SRIOV_ENABLED):
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+ (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
+ break;
+ default:
+ if (!tcs)
+ reg = IXGBE_MTQC_64Q_1PB;
+ else if (tcs <= 4)
+ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+ else
+ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
+
+ /* Enable Security TX Buffer IFG for multiple pb */
+ if (tcs) {
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ reg |= IXGBE_SECTX_DCB;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+ }
+ break;
+ }
+
+ /* re-enable the arbiter */
+ rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+}
+
+/**
+ * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 dmatxctl;
+ u32 i;
+
+ ixgbe_setup_mtqc(adapter);
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ /* DMATXCTL.EN must be before Tx queues are enabled */
+ dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ dmatxctl |= IXGBE_DMATXCTL_TE;
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
+ }
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
+}
+
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
+
+static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring)
+{
+ u32 srrctl;
+ u8 reg_idx = rx_ring->reg_idx;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB: {
+ struct ixgbe_ring_feature *feature = adapter->ring_feature;
+ const int mask = feature[RING_F_RSS].mask;
+ reg_idx = reg_idx & mask;
+ }
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ default:
+ break;
+ }
+
+ srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
+
+ srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
+ srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
+ if (adapter->num_vfs)
+ srrctl |= IXGBE_SRRCTL_DROP_EN;
+
+ srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ IXGBE_SRRCTL_BSIZEHDR_MASK;
+
+ if (ring_is_ps_enabled(rx_ring)) {
+#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
+ srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+#else
+ srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+#endif
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ } else {
+ srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ }
+
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
+}
+
+static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
+ 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
+ 0x6A3E67EA, 0x14364D17, 0x3BED200D};
+ u32 mrqc = 0, reta = 0;
+ u32 rxcsum;
+ int i, j;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
+ int maxq = adapter->ring_feature[RING_F_RSS].indices;
+
+ if (tcs)
+ maxq = min(maxq, adapter->num_tx_queues / tcs);
+
+ /* Fill out hash function seeds */
+ for (i = 0; i < 10; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
+
+ /* Fill out redirection table */
+ for (i = 0, j = 0; i < 128; i++, j++) {
+ if (j == maxq)
+ j = 0;
+ /* reta = 4-byte sliding window of
+ * 0x00..(indices-1)(indices-1)00..etc. */
+ reta = (reta << 8) | (j * 0x11);
+ if ((i & 3) == 3)
+ IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+ }
+
+ /* Disable indicating checksum in descriptor, enables RSS hash */
+ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+ rxcsum |= IXGBE_RXCSUM_PCSD;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB &&
+ (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
+ mrqc = IXGBE_MRQC_RSSEN;
+ } else {
+ int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
+ | IXGBE_FLAG_SRIOV_ENABLED);
+
+ switch (mask) {
+ case (IXGBE_FLAG_RSS_ENABLED):
+ if (!tcs)
+ mrqc = IXGBE_MRQC_RSSEN;
+ else if (tcs <= 4)
+ mrqc = IXGBE_MRQC_RTRSS4TCEN;
+ else
+ mrqc = IXGBE_MRQC_RTRSS8TCEN;
+ break;
+ case (IXGBE_FLAG_SRIOV_ENABLED):
+ mrqc = IXGBE_MRQC_VMDQEN;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Perform hash on these packet types */
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
+ | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
+ | IXGBE_MRQC_RSS_FIELD_IPV6
+ | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+}
+
+/**
+ * ixgbe_configure_rscctl - enable RSC for the indicated ring
+ * @adapter: address of board private structure
+ * @index: index of ring to set
+ **/
+static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 rscctrl;
+ int rx_buf_len;
+ u8 reg_idx = ring->reg_idx;
+
+ if (!ring_is_rsc_enabled(ring))
+ return;
+
+ rx_buf_len = ring->rx_buf_len;
+ rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
+ rscctrl |= IXGBE_RSCCTL_RSCEN;
+ /*
+ * we must limit the number of descriptors so that the
+ * total size of max desc * buf_len is not greater
+ * than 65535
+ */
+ if (ring_is_ps_enabled(ring)) {
+#if (MAX_SKB_FRAGS > 16)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+#elif (MAX_SKB_FRAGS > 8)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+#elif (MAX_SKB_FRAGS > 4)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+#else
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
+#endif
+ } else {
+ if (rx_buf_len < IXGBE_RXBUFFER_4K)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+ else if (rx_buf_len < IXGBE_RXBUFFER_8K)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+ else
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
+}
+
+/**
+ * ixgbe_set_uta - Set unicast filter table address
+ * @adapter: board private structure
+ *
+ * The unicast table address is a register array of 32-bit registers.
+ * The table is meant to be used in a way similar to how the MTA is used
+ * however due to certain limitations in the hardware it is necessary to
+ * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
+ * enable bit to allow vlan tag stripping when promiscuous mode is enabled
+ **/
+static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ /* The UTA table only exists on 82599 hardware and newer */
+ if (hw->mac.type < ixgbe_mac_82599EB)
+ return;
+
+ /* we only need to do this if VMDq is enabled */
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return;
+
+ for (i = 0; i < 128; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
+}
+
+#define IXGBE_MAX_RX_DESC_POLL 10
+static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
+ if (hw->mac.type == ixgbe_mac_82598EB &&
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ return;
+
+ do {
+ usleep_range(1000, 2000);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop) {
+ e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
+ "the polling period\n", reg_idx);
+ }
+}
+
+void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+
+ if (hw->mac.type == ixgbe_mac_82598EB &&
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ return;
+
+ /* the hardware may take up to 100us to really disable the rx queue */
+ do {
+ udelay(10);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop) {
+ e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
+ "the polling period\n", reg_idx);
+ }
+}
+
+void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 rdba = ring->dma;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ /* disable queue to avoid issues while updating state */
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ ixgbe_disable_rx_queue(adapter, ring);
+
+ IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
+ IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
+ ring->count * sizeof(union ixgbe_adv_rx_desc));
+ IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
+ ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
+
+ ixgbe_configure_srrctl(adapter, ring);
+ ixgbe_configure_rscctl(adapter, ring);
+
+ /* If operating in IOV mode set RLPML for X540 */
+ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
+ hw->mac.type == ixgbe_mac_X540) {
+ rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
+ rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
+ ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
+ }
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ /*
+ * enable cache line friendly hardware writes:
+ * PTHRESH=32 descriptors (half the internal cache),
+ * this also removes ugly rx_no_buffer_count increment
+ * HTHRESH=4 descriptors (to minimize latency on fetch)
+ * WTHRESH=8 burst writeback up to two cache lines
+ */
+ rxdctl &= ~0x3FFFFF;
+ rxdctl |= 0x080420;
+ }
+
+ /* enable receive descriptor ring */
+ rxdctl |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+
+ ixgbe_rx_desc_queue_enable(adapter, ring);
+ ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
+}
+
+static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int p;
+
+ /* PSRTYPE must be initialized in non 82598 adapters */
+ u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_L2HDR |
+ IXGBE_PSRTYPE_IPV6HDR;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
+ psrtype |= (adapter->num_rx_queues_per_pool << 29);
+
+ for (p = 0; p < adapter->num_rx_pools; p++)
+ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
+ psrtype);
+}
+
+static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gcr_ext;
+ u32 vt_reg_bits;
+ u32 reg_offset, vf_shift;
+ u32 vmdctl;
+
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return;
+
+ vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
+ vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
+
+ vf_shift = adapter->num_vfs % 32;
+ reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
+
+ /* Enable only the PF's pool for Tx/Rx */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+
+ /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
+ hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
+
+ /*
+ * Set up VF register offsets for selected VT Mode,
+ * i.e. 32 or 64 VFs for SR-IOV
+ */
+ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
+ gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
+
+ /* enable Tx loopback for VF/PF communication */
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+ /* Enable MAC Anti-Spoofing */
+ hw->mac.ops.set_mac_anti_spoofing(hw,
+ (adapter->antispoofing_enabled =
+ (adapter->num_vfs != 0)),
+ adapter->num_vfs);
+}
+
+static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ int rx_buf_len;
+ struct ixgbe_ring *rx_ring;
+ int i;
+ u32 mhadd, hlreg0;
+
+ /* Decide whether to use packet split mode or not */
+ /* On by default */
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+
+ /* Do not use packet split if we're in SR-IOV Mode */
+ if (adapter->num_vfs)
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+
+ /* Disable packet split due to 82599 erratum #45 */
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+
+#ifdef IXGBE_FCOE
+ /* adjust max frame to be able to do baby jumbo for FCoE */
+ if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+ (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
+ max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
+
+#endif /* IXGBE_FCOE */
+ mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
+ if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
+ mhadd &= ~IXGBE_MHADD_MFS_MASK;
+ mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
+ }
+
+ /* MHADD will allow an extra 4 bytes past for vlan tagged frames */
+ max_frame += VLAN_HLEN;
+
+ /* Set the RX buffer length according to the mode */
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ rx_buf_len = IXGBE_RX_HDR_SIZE;
+ } else {
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
+ (netdev->mtu <= ETH_DATA_LEN))
+ rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+ /*
+ * Make best use of allocation by using all but 1K of a
+ * power of 2 allocation that will be used for skb->head.
+ */
+ else if (max_frame <= IXGBE_RXBUFFER_3K)
+ rx_buf_len = IXGBE_RXBUFFER_3K;
+ else if (max_frame <= IXGBE_RXBUFFER_7K)
+ rx_buf_len = IXGBE_RXBUFFER_7K;
+ else if (max_frame <= IXGBE_RXBUFFER_15K)
+ rx_buf_len = IXGBE_RXBUFFER_15K;
+ else
+ rx_buf_len = IXGBE_MAX_RXBUFFER;
+ }
+
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
+ hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+ /*
+ * Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring
+ */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rx_ring = adapter->rx_ring[i];
+ rx_ring->rx_buf_len = rx_buf_len;
+
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
+ set_ring_ps_enabled(rx_ring);
+ else
+ clear_ring_ps_enabled(rx_ring);
+
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+ set_ring_rsc_enabled(rx_ring);
+ else
+ clear_ring_rsc_enabled(rx_ring);
+
+#ifdef IXGBE_FCOE
+ if (netdev->features & NETIF_F_FCOE_MTU) {
+ struct ixgbe_ring_feature *f;
+ f = &adapter->ring_feature[RING_F_FCOE];
+ if ((i >= f->mask) && (i < f->mask + f->indices)) {
+ clear_ring_ps_enabled(rx_ring);
+ if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
+ rx_ring->rx_buf_len =
+ IXGBE_FCOE_JUMBO_FRAME_SIZE;
+ } else if (!ring_is_rsc_enabled(rx_ring) &&
+ !ring_is_ps_enabled(rx_ring)) {
+ rx_ring->rx_buf_len =
+ IXGBE_FCOE_JUMBO_FRAME_SIZE;
+ }
+ }
+#endif /* IXGBE_FCOE */
+ }
+}
+
+static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ /*
+ * For VMDq support of different descriptor types or
+ * buffer sizes through the use of multiple SRRCTL
+ * registers, RDRXCTL.MVMEN must be set to 1
+ *
+ * also, the manual doesn't mention it clearly but DCA hints
+ * will only use queue 0's tags unless this bit is set. Side
+ * effects of setting this bit are only that SRRCTL must be
+ * fully programmed [0..15]
+ */
+ rdrxctl |= IXGBE_RDRXCTL_MVMEN;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ /* Disable RSC for ACK packets */
+ IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
+ (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
+ rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
+ /* hardware requires some bits to be set by default */
+ rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
+ rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
+ break;
+ default:
+ /* We should do nothing since we don't know this hardware */
+ return;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+}
+
+/**
+ * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ u32 rxctrl;
+
+ /* disable receives while setting up the descriptors */
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
+
+ ixgbe_setup_psrtype(adapter);
+ ixgbe_setup_rdrxctl(adapter);
+
+ /* Program registers for the distribution of queues */
+ ixgbe_setup_mrqc(adapter);
+
+ ixgbe_set_uta(adapter);
+
+ /* set_rx_buffer_len must be called before ring initialization */
+ ixgbe_set_rx_buffer_len(adapter);
+
+ /*
+ * Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring
+ */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
+
+ /* disable drop enable for 82598 parts */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ rxctrl |= IXGBE_RXCTRL_DMBYPS;
+
+ /* enable all receives */
+ rxctrl |= IXGBE_RXCTRL_RXEN;
+ hw->mac.ops.enable_rx_dma(hw, rxctrl);
+}
+
+static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int pool_ndx = adapter->num_vfs;
+
+ /* add VID to filter table */
+ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
+ set_bit(vid, adapter->active_vlans);
+}
+
+static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int pool_ndx = adapter->num_vfs;
+
+ /* remove VID from filter table */
+ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
+ clear_bit(vid, adapter->active_vlans);
+}
+
+/**
+ * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
+ * @adapter: driver data
+ */
+static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vlnctrl;
+
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+}
+
+/**
+ * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
+ * @adapter: driver data
+ */
+static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vlnctrl;
+
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
+ vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+}
+
+/**
+ * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
+ * @adapter: driver data
+ */
+static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vlnctrl;
+ int i, j;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlnctrl &= ~IXGBE_VLNCTRL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ j = adapter->rx_ring[i]->reg_idx;
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
+ vlnctrl &= ~IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
+ * @adapter: driver data
+ */
+static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vlnctrl;
+ int i, j;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlnctrl |= IXGBE_VLNCTRL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ j = adapter->rx_ring[i]->reg_idx;
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
+ vlnctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
+{
+ u16 vid;
+
+ ixgbe_vlan_rx_add_vid(adapter->netdev, 0);
+
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
+}
+
+/**
+ * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
+ *
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ * 0 on no addresses written
+ * X on writing X addresses to the RAR table
+ **/
+static int ixgbe_write_uc_addr_list(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ unsigned int vfn = adapter->num_vfs;
+ unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS;
+ int count = 0;
+
+ /* return ENOMEM indicating insufficient memory for addresses */
+ if (netdev_uc_count(netdev) > rar_entries)
+ return -ENOMEM;
+
+ if (!netdev_uc_empty(netdev) && rar_entries) {
+ struct netdev_hw_addr *ha;
+ /* return error if we do not support writing to RAR table */
+ if (!hw->mac.ops.set_rar)
+ return -ENOMEM;
+
+ netdev_for_each_uc_addr(ha, netdev) {
+ if (!rar_entries)
+ break;
+ hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
+ vfn, IXGBE_RAH_AV);
+ count++;
+ }
+ }
+ /* write the addresses in reverse order to avoid write combining */
+ for (; rar_entries > 0 ; rar_entries--)
+ hw->mac.ops.clear_rar(hw, rar_entries);
+
+ return count;
+}
+
+/**
+ * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_method entry point is called whenever the unicast/multicast
+ * address list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast and
+ * promiscuous mode.
+ **/
+void ixgbe_set_rx_mode(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+ int count;
+
+ /* Check for Promiscuous and All Multicast modes */
+
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+
+ /* set all bits that we expect to always be set */
+ fctrl |= IXGBE_FCTRL_BAM;
+ fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
+ fctrl |= IXGBE_FCTRL_PMCF;
+
+ /* clear the bits we are changing the status of */
+ fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+
+ if (netdev->flags & IFF_PROMISC) {
+ hw->addr_ctrl.user_set_promisc = true;
+ fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+ vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
+ /* don't hardware filter vlans in promisc mode */
+ ixgbe_vlan_filter_disable(adapter);
+ } else {
+ if (netdev->flags & IFF_ALLMULTI) {
+ fctrl |= IXGBE_FCTRL_MPE;
+ vmolr |= IXGBE_VMOLR_MPE;
+ } else {
+ /*
+ * Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ }
+ ixgbe_vlan_filter_enable(adapter);
+ hw->addr_ctrl.user_set_promisc = false;
+ /*
+ * Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
+ */
+ count = ixgbe_write_uc_addr_list(netdev);
+ if (count < 0) {
+ fctrl |= IXGBE_FCTRL_UPE;
+ vmolr |= IXGBE_VMOLR_ROPE;
+ }
+ }
+
+ if (adapter->num_vfs) {
+ ixgbe_restore_vf_multicasts(adapter);
+ vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
+ ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
+ IXGBE_VMOLR_ROPE);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+
+ if (netdev->features & NETIF_F_HW_VLAN_RX)
+ ixgbe_vlan_strip_enable(adapter);
+ else
+ ixgbe_vlan_strip_disable(adapter);
+}
+
+static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
+{
+ int q_idx;
+ struct ixgbe_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ /* legacy and MSI only use one vector */
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+ q_vectors = 1;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ q_vector = adapter->q_vector[q_idx];
+ napi_enable(&q_vector->napi);
+ }
+}
+
+static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
+{
+ int q_idx;
+ struct ixgbe_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ /* legacy and MSI only use one vector */
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+ q_vectors = 1;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ q_vector = adapter->q_vector[q_idx];
+ napi_disable(&q_vector->napi);
+ }
+}
+
+#ifdef CONFIG_IXGBE_DCB
+/*
+ * ixgbe_configure_dcb - Configure DCB hardware
+ * @adapter: ixgbe adapter struct
+ *
+ * This is called by the driver on open to configure the DCB hardware.
+ * This is also called by the gennetlink interface when reconfiguring
+ * the DCB state.
+ */
+static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ netif_set_gso_max_size(adapter->netdev, 65536);
+ return;
+ }
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ netif_set_gso_max_size(adapter->netdev, 32768);
+
+
+ /* Enable VLAN tag insert/strip */
+ adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
+
+ hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
+
+ /* reconfigure the hardware */
+ if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
+#ifdef IXGBE_FCOE
+ if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+ max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
+#endif
+ ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
+ DCB_TX_CONFIG);
+ ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
+ DCB_RX_CONFIG);
+ ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
+ } else {
+ struct net_device *dev = adapter->netdev;
+
+ if (adapter->ixgbe_ieee_ets) {
+ struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
+ int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
+ }
+
+ if (adapter->ixgbe_ieee_pfc) {
+ struct ieee_pfc *pfc = adapter->ixgbe_ieee_pfc;
+ u8 *prio_tc = adapter->ixgbe_ieee_ets->prio_tc;
+
+ ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en,
+ prio_tc);
+ }
+ }
+
+ /* Enable RSS Hash per TC */
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ int i;
+ u32 reg = 0;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ u8 msb = 0;
+ u8 cnt = adapter->netdev->tc_to_txq[i].count;
+
+ while (cnt >>= 1)
+ msb++;
+
+ reg |= msb << IXGBE_RQTC_SHIFT_TC(i);
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg);
+ }
+}
+#endif
+
+/* Additional bittime to account for IXGBE framing */
+#define IXGBE_ETH_FRAMING 20
+
+/*
+ * ixgbe_hpbthresh - calculate high water mark for flow control
+ *
+ * @adapter: board private structure to calculate for
+ * @pb - packet buffer to calculate
+ */
+static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *dev = adapter->netdev;
+ int link, tc, kb, marker;
+ u32 dv_id, rx_pba;
+
+ /* Calculate max LAN frame size */
+ tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
+
+#ifdef IXGBE_FCOE
+ /* FCoE traffic class uses FCOE jumbo frames */
+ if (dev->features & NETIF_F_FCOE_MTU) {
+ int fcoe_pb = 0;
+
+#ifdef CONFIG_IXGBE_DCB
+ fcoe_pb = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
+
+#endif
+ if (fcoe_pb == pb && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE)
+ tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
+ }
+#endif
+
+ /* Calculate delay value for device */
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
+ dv_id = IXGBE_DV_X540(link, tc);
+ break;
+ default:
+ dv_id = IXGBE_DV(link, tc);
+ break;
+ }
+
+ /* Loopback switch introduces additional latency */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ dv_id += IXGBE_B2BT(tc);
+
+ /* Delay value is calculated in bit times convert to KB */
+ kb = IXGBE_BT2KB(dv_id);
+ rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
+
+ marker = rx_pba - kb;
+
+ /* It is possible that the packet buffer is not large enough
+ * to provide required headroom. In this case throw an error
+ * to user and a do the best we can.
+ */
+ if (marker < 0) {
+ e_warn(drv, "Packet Buffer(%i) can not provide enough"
+ "headroom to support flow control."
+ "Decrease MTU or number of traffic classes\n", pb);
+ marker = tc + 1;
+ }
+
+ return marker;
+}
+
+/*
+ * ixgbe_lpbthresh - calculate low water mark for for flow control
+ *
+ * @adapter: board private structure to calculate for
+ * @pb - packet buffer to calculate
+ */
+static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *dev = adapter->netdev;
+ int tc;
+ u32 dv_id;
+
+ /* Calculate max LAN frame size */
+ tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* Calculate delay value for device */
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
+ dv_id = IXGBE_LOW_DV_X540(tc);
+ break;
+ default:
+ dv_id = IXGBE_LOW_DV(tc);
+ break;
+ }
+
+ /* Delay value is calculated in bit times convert to KB */
+ return IXGBE_BT2KB(dv_id);
+}
+
+/*
+ * ixgbe_pbthresh_setup - calculate and setup high low water marks
+ */
+static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int num_tc = netdev_get_num_tc(adapter->netdev);
+ int i;
+
+ if (!num_tc)
+ num_tc = 1;
+
+ hw->fc.low_water = ixgbe_lpbthresh(adapter);
+
+ for (i = 0; i < num_tc; i++) {
+ hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
+
+ /* Low water marks must not be larger than high water marks */
+ if (hw->fc.low_water > hw->fc.high_water[i])
+ hw->fc.low_water = 0;
+ }
+}
+
+static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int hdrm;
+ u8 tc = netdev_get_num_tc(adapter->netdev);
+
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+ adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+ hdrm = 32 << adapter->fdir_pballoc;
+ else
+ hdrm = 0;
+
+ hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
+ ixgbe_pbthresh_setup(adapter);
+}
+
+static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct hlist_node *node, *node2;
+ struct ixgbe_fdir_filter *filter;
+
+ spin_lock(&adapter->fdir_perfect_lock);
+
+ if (!hlist_empty(&adapter->fdir_filter_list))
+ ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
+
+ hlist_for_each_entry_safe(filter, node, node2,
+ &adapter->fdir_filter_list, fdir_node) {
+ ixgbe_fdir_write_perfect_filter_82599(hw,
+ &filter->filter,
+ filter->sw_idx,
+ (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
+ IXGBE_FDIR_DROP_QUEUE :
+ adapter->rx_ring[filter->action]->reg_idx);
+ }
+
+ spin_unlock(&adapter->fdir_perfect_lock);
+}
+
+static void ixgbe_configure(struct ixgbe_adapter *adapter)
+{
+ ixgbe_configure_pb(adapter);
+#ifdef CONFIG_IXGBE_DCB
+ ixgbe_configure_dcb(adapter);
+#endif
+
+ ixgbe_set_rx_mode(adapter->netdev);
+ ixgbe_restore_vlan(adapter);
+
+#ifdef IXGBE_FCOE
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ ixgbe_configure_fcoe(adapter);
+
+#endif /* IXGBE_FCOE */
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ ixgbe_init_fdir_signature_82599(&adapter->hw,
+ adapter->fdir_pballoc);
+ } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
+ ixgbe_init_fdir_perfect_82599(&adapter->hw,
+ adapter->fdir_pballoc);
+ ixgbe_fdir_filter_restore(adapter);
+ }
+
+ ixgbe_configure_virtualization(adapter);
+
+ ixgbe_configure_tx(adapter);
+ ixgbe_configure_rx(adapter);
+}
+
+static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
+{
+ switch (hw->phy.type) {
+ case ixgbe_phy_sfp_avago:
+ case ixgbe_phy_sfp_ftl:
+ case ixgbe_phy_sfp_intel:
+ case ixgbe_phy_sfp_unknown:
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
+ case ixgbe_phy_sfp_active_unknown:
+ case ixgbe_phy_sfp_ftl_active:
+ return true;
+ case ixgbe_phy_nl:
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ixgbe_sfp_link_config - set up SFP+ link
+ * @adapter: pointer to private adapter struct
+ **/
+static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
+{
+ /*
+ * We are assuming the worst case scenerio here, and that
+ * is that an SFP was inserted/removed after the reset
+ * but before SFP detection was enabled. As such the best
+ * solution is to just start searching as soon as we start
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
+
+ adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
+}
+
+/**
+ * ixgbe_non_sfp_link_config - set up non-SFP+ link
+ * @hw: pointer to private hardware struct
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
+{
+ u32 autoneg;
+ bool negotiation, link_up = false;
+ u32 ret = IXGBE_ERR_LINK_SETUP;
+
+ if (hw->mac.ops.check_link)
+ ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
+
+ if (ret)
+ goto link_cfg_out;
+
+ autoneg = hw->phy.autoneg_advertised;
+ if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
+ ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
+ &negotiation);
+ if (ret)
+ goto link_cfg_out;
+
+ if (hw->mac.ops.setup_link)
+ ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
+link_cfg_out:
+ return ret;
+}
+
+static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gpie = 0;
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
+ IXGBE_GPIE_OCD;
+ gpie |= IXGBE_GPIE_EIAME;
+ /*
+ * use EIAM to auto-mask when MSI-X interrupt is asserted
+ * this saves a register write for every interrupt
+ */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ default:
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
+ break;
+ }
+ } else {
+ /* legacy interrupts, use EIAM to auto-mask when reading EICR,
+ * specifically only auto mask tx and rx interrupts */
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+ }
+
+ /* XXX: to interrupt immediately for EICS writes, enable this */
+ /* gpie |= IXGBE_GPIE_EIMEN; */
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ gpie |= IXGBE_GPIE_VTMODE_64;
+ }
+
+ /* Enable Thermal over heat sensor interrupt */
+ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ gpie |= IXGBE_SDP0_GPIEN;
+ break;
+ case ixgbe_mac_X540:
+ gpie |= IXGBE_EIMS_TS;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Enable fan failure interrupt */
+ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
+ gpie |= IXGBE_SDP1_GPIEN;
+
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ gpie |= IXGBE_SDP1_GPIEN;
+ gpie |= IXGBE_SDP2_GPIEN;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+}
+
+static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+ u32 ctrl_ext;
+
+ ixgbe_get_hw_control(adapter);
+ ixgbe_setup_gpie(adapter);
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ ixgbe_configure_msix(adapter);
+ else
+ ixgbe_configure_msi_and_legacy(adapter);
+
+ /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */
+ if (hw->mac.ops.enable_tx_laser &&
+ ((hw->phy.multispeed_fiber) ||
+ ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+ (hw->mac.type == ixgbe_mac_82599EB))))
+ hw->mac.ops.enable_tx_laser(hw);
+
+ clear_bit(__IXGBE_DOWN, &adapter->state);
+ ixgbe_napi_enable_all(adapter);
+
+ if (ixgbe_is_sfp(hw)) {
+ ixgbe_sfp_link_config(adapter);
+ } else {
+ err = ixgbe_non_sfp_link_config(hw);
+ if (err)
+ e_err(probe, "link_config FAILED %d\n", err);
+ }
+
+ /* clear any pending interrupts, may auto mask */
+ IXGBE_READ_REG(hw, IXGBE_EICR);
+ ixgbe_irq_enable(adapter, true, true);
+
+ /*
+ * If this adapter has a fan, check to see if we had a failure
+ * before we enabled the interrupt.
+ */
+ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
+ u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ e_crit(drv, "Fan has stopped, replace the adapter\n");
+ }
+
+ /* enable transmits */
+ netif_tx_start_all_queues(adapter->netdev);
+
+ /* bring the link up in the watchdog, this could race with our first
+ * link up interrupt but shouldn't be a problem */
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ mod_timer(&adapter->service_timer, jiffies);
+
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+}
+
+void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
+{
+ WARN_ON(in_interrupt());
+ /* put off any impending NetWatchDogTimeout */
+ adapter->netdev->trans_start = jiffies;
+
+ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+ ixgbe_down(adapter);
+ /*
+ * If SR-IOV enabled then wait a bit before bringing the adapter
+ * back up to give the VFs time to respond to the reset. The
+ * two second wait is based upon the watchdog timer cycle in
+ * the VF driver.
+ */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ msleep(2000);
+ ixgbe_up(adapter);
+ clear_bit(__IXGBE_RESETTING, &adapter->state);
+}
+
+void ixgbe_up(struct ixgbe_adapter *adapter)
+{
+ /* hardware has been reset, we need to reload some things */
+ ixgbe_configure(adapter);
+
+ ixgbe_up_complete(adapter);
+}
+
+void ixgbe_reset(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ /* lock SFP init bit to prevent race conditions with the watchdog */
+ while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ usleep_range(1000, 2000);
+
+ /* clear all SFP and link config related flags while holding SFP_INIT */
+ adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
+ IXGBE_FLAG2_SFP_NEEDS_RESET);
+ adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
+
+ err = hw->mac.ops.init_hw(hw);
+ switch (err) {
+ case 0:
+ case IXGBE_ERR_SFP_NOT_PRESENT:
+ case IXGBE_ERR_SFP_NOT_SUPPORTED:
+ break;
+ case IXGBE_ERR_MASTER_REQUESTS_PENDING:
+ e_dev_err("master disable timed out\n");
+ break;
+ case IXGBE_ERR_EEPROM_VERSION:
+ /* We are running on a pre-production device, log a warning */
+ e_dev_warn("This device is a pre-production adapter/LOM. "
+ "Please be aware there may be issuesassociated with "
+ "your hardware. If you are experiencing problems "
+ "please contact your Intel or hardware "
+ "representative who provided you with this "
+ "hardware.\n");
+ break;
+ default:
+ e_dev_err("Hardware Error: %d\n", err);
+ }
+
+ clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
+
+ /* reprogram the RAR[0] in case user changed it. */
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+ IXGBE_RAH_AV);
+}
+
+/**
+ * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
+ **/
+static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ unsigned long size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!rx_ring->rx_buffer_info)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ struct ixgbe_rx_buffer *rx_buffer_info;
+
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+ if (rx_buffer_info->dma) {
+ dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_buffer_info->dma = 0;
+ }
+ if (rx_buffer_info->skb) {
+ struct sk_buff *skb = rx_buffer_info->skb;
+ rx_buffer_info->skb = NULL;
+ do {
+ struct sk_buff *this = skb;
+ if (IXGBE_RSC_CB(this)->delay_unmap) {
+ dma_unmap_single(dev,
+ IXGBE_RSC_CB(this)->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ IXGBE_RSC_CB(this)->dma = 0;
+ IXGBE_RSC_CB(skb)->delay_unmap = false;
+ }
+ skb = skb->prev;
+ dev_kfree_skb(this);
+ } while (skb);
+ }
+ if (!rx_buffer_info->page)
+ continue;
+ if (rx_buffer_info->page_dma) {
+ dma_unmap_page(dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
+ rx_buffer_info->page_dma = 0;
+ }
+ put_page(rx_buffer_info->page);
+ rx_buffer_info->page = NULL;
+ rx_buffer_info->page_offset = 0;
+ }
+
+ size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+/**
+ * ixgbe_clean_tx_ring - Free Tx Buffers
+ * @tx_ring: ring to be cleaned
+ **/
+static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
+{
+ struct ixgbe_tx_buffer *tx_buffer_info;
+ unsigned long size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!tx_ring->tx_buffer_info)
+ return;
+
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tx_ring->count; i++) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+ }
+
+ size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+}
+
+/**
+ * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbe_clean_rx_ring(adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ ixgbe_clean_tx_ring(adapter->tx_ring[i]);
+}
+
+static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
+{
+ struct hlist_node *node, *node2;
+ struct ixgbe_fdir_filter *filter;
+
+ spin_lock(&adapter->fdir_perfect_lock);
+
+ hlist_for_each_entry_safe(filter, node, node2,
+ &adapter->fdir_filter_list, fdir_node) {
+ hlist_del(&filter->fdir_node);
+ kfree(filter);
+ }
+ adapter->fdir_filter_count = 0;
+
+ spin_unlock(&adapter->fdir_perfect_lock);
+}
+
+void ixgbe_down(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 rxctrl;
+ int i;
+
+ /* signal that we are down to the interrupt handler */
+ set_bit(__IXGBE_DOWN, &adapter->state);
+
+ /* disable receives */
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
+
+ /* disable all enabled rx queues */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ /* this call also flushes the previous write */
+ ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
+
+ usleep_range(10000, 20000);
+
+ netif_tx_stop_all_queues(netdev);
+
+ /* call carrier off first to avoid false dev_watchdog timeouts */
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ ixgbe_irq_disable(adapter);
+
+ ixgbe_napi_disable_all(adapter);
+
+ adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
+ IXGBE_FLAG2_RESET_REQUESTED);
+ adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
+
+ del_timer_sync(&adapter->service_timer);
+
+ if (adapter->num_vfs) {
+ /* Clear EITR Select mapping */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
+
+ /* Mark all the VFs as inactive */
+ for (i = 0 ; i < adapter->num_vfs; i++)
+ adapter->vfinfo[i].clear_to_send = 0;
+
+ /* ping all the active vfs to let them know we are going down */
+ ixgbe_ping_all_vfs(adapter);
+
+ /* Disable all VFTE/VFRE TX/RX */
+ ixgbe_disable_tx_rx(adapter);
+ }
+
+ /* disable transmits in the hardware now that interrupts are off */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ u8 reg_idx = adapter->tx_ring[i]->reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+ }
+
+ /* Disable the Tx DMA engine on 82599 and X540 */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
+ (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
+ ~IXGBE_DMATXCTL_TE));
+ break;
+ default:
+ break;
+ }
+
+ if (!pci_channel_offline(adapter->pdev))
+ ixgbe_reset(adapter);
+
+ /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
+ if (hw->mac.ops.disable_tx_laser &&
+ ((hw->phy.multispeed_fiber) ||
+ ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+ (hw->mac.type == ixgbe_mac_82599EB))))
+ hw->mac.ops.disable_tx_laser(hw);
+
+ ixgbe_clean_all_tx_rings(adapter);
+ ixgbe_clean_all_rx_rings(adapter);
+
+#ifdef CONFIG_IXGBE_DCA
+ /* since we reset the hardware DCA settings were cleared */
+ ixgbe_setup_dca(adapter);
+#endif
+}
+
+/**
+ * ixgbe_poll - NAPI Rx polling callback
+ * @napi: structure for representing this polling device
+ * @budget: how many packets driver is allowed to clean
+ *
+ * This function is used for legacy and MSI, NAPI mode
+ **/
+static int ixgbe_poll(struct napi_struct *napi, int budget)
+{
+ struct ixgbe_q_vector *q_vector =
+ container_of(napi, struct ixgbe_q_vector, napi);
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ struct ixgbe_ring *ring;
+ int per_ring_budget;
+ bool clean_complete = true;
+
+#ifdef CONFIG_IXGBE_DCA
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ ixgbe_update_dca(q_vector);
+#endif
+
+ for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
+ clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
+
+ /* attempt to distribute budget to each queue fairly, but don't allow
+ * the budget to go below 1 because we'll exit polling */
+ if (q_vector->rx.count > 1)
+ per_ring_budget = max(budget/q_vector->rx.count, 1);
+ else
+ per_ring_budget = budget;
+
+ for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
+ clean_complete &= ixgbe_clean_rx_irq(q_vector, ring,
+ per_ring_budget);
+
+ /* If all work not completed, return budget and keep polling */
+ if (!clean_complete)
+ return budget;
+
+ /* all work done, exit the polling mode */
+ napi_complete(napi);
+ if (adapter->rx_itr_setting & 1)
+ ixgbe_set_itr(q_vector);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
+
+ return 0;
+}
+
+/**
+ * ixgbe_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void ixgbe_tx_timeout(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ ixgbe_tx_timeout_reset(adapter);
+}
+
+/**
+ * ixgbe_set_rss_queues: Allocate queues for RSS
+ * @adapter: board private structure to initialize
+ *
+ * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
+ * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
+ *
+ **/
+static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
+{
+ bool ret = false;
+ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
+
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+ f->mask = 0xF;
+ adapter->num_rx_queues = f->indices;
+ adapter->num_tx_queues = f->indices;
+ ret = true;
+ } else {
+ ret = false;
+ }
+
+ return ret;
+}
+
+/**
+ * ixgbe_set_fdir_queues: Allocate queues for Flow Director
+ * @adapter: board private structure to initialize
+ *
+ * Flow Director is an advanced Rx filter, attempting to get Rx flows back
+ * to the original CPU that initiated the Tx session. This runs in addition
+ * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
+ * Rx load across CPUs using RSS.
+ *
+ **/
+static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
+{
+ bool ret = false;
+ struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
+
+ f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
+ f_fdir->mask = 0;
+
+ /* Flow Director must have RSS enabled */
+ if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
+ (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
+ adapter->num_tx_queues = f_fdir->indices;
+ adapter->num_rx_queues = f_fdir->indices;
+ ret = true;
+ } else {
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ }
+ return ret;
+}
+
+#ifdef IXGBE_FCOE
+/**
+ * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
+ * @adapter: board private structure to initialize
+ *
+ * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
+ * The ring feature mask is not used as a mask for FCoE, as it can take any 8
+ * rx queues out of the max number of rx queues, instead, it is used as the
+ * index of the first rx queue used by FCoE.
+ *
+ **/
+static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ return false;
+
+ f->indices = min((int)num_online_cpus(), f->indices);
+
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+ e_info(probe, "FCoE enabled with RSS\n");
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
+ ixgbe_set_fdir_queues(adapter);
+ else
+ ixgbe_set_rss_queues(adapter);
+ }
+
+ /* adding FCoE rx rings to the end */
+ f->mask = adapter->num_rx_queues;
+ adapter->num_rx_queues += f->indices;
+ adapter->num_tx_queues += f->indices;
+
+ return true;
+}
+#endif /* IXGBE_FCOE */
+
+/* Artificial max queue cap per traffic class in DCB mode */
+#define DCB_QUEUE_CAP 8
+
+#ifdef CONFIG_IXGBE_DCB
+static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
+{
+ int per_tc_q, q, i, offset = 0;
+ struct net_device *dev = adapter->netdev;
+ int tcs = netdev_get_num_tc(dev);
+
+ if (!tcs)
+ return false;
+
+ /* Map queue offset and counts onto allocated tx queues */
+ per_tc_q = min(dev->num_tx_queues / tcs, (unsigned int)DCB_QUEUE_CAP);
+ q = min((int)num_online_cpus(), per_tc_q);
+
+ for (i = 0; i < tcs; i++) {
+ netdev_set_tc_queue(dev, i, q, offset);
+ offset += q;
+ }
+
+ adapter->num_tx_queues = q * tcs;
+ adapter->num_rx_queues = q * tcs;
+
+#ifdef IXGBE_FCOE
+ /* FCoE enabled queues require special configuration indexed
+ * by feature specific indices and mask. Here we map FCoE
+ * indices onto the DCB queue pairs allowing FCoE to own
+ * configuration later.
+ */
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ int tc;
+ struct ixgbe_ring_feature *f =
+ &adapter->ring_feature[RING_F_FCOE];
+
+ tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
+ f->indices = dev->tc_to_txq[tc].count;
+ f->mask = dev->tc_to_txq[tc].offset;
+ }
+#endif
+
+ return true;
+}
+#endif
+
+/**
+ * ixgbe_set_sriov_queues: Allocate queues for IOV use
+ * @adapter: board private structure to initialize
+ *
+ * IOV doesn't actually use anything, so just NAK the
+ * request for now and let the other queue routines
+ * figure out what to do.
+ */
+static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
+{
+ return false;
+}
+
+/*
+ * ixgbe_set_num_queues: Allocate queues for device, feature dependent
+ * @adapter: board private structure to initialize
+ *
+ * This is the top level queue allocation routine. The order here is very
+ * important, starting with the "most" number of features turned on at once,
+ * and ending with the smallest set of features. This way large combinations
+ * can be allocated if they're turned on, and smaller combinations are the
+ * fallthrough conditions.
+ *
+ **/
+static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
+{
+ /* Start with base case */
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_queues_per_pool = 1;
+
+ if (ixgbe_set_sriov_queues(adapter))
+ goto done;
+
+#ifdef CONFIG_IXGBE_DCB
+ if (ixgbe_set_dcb_queues(adapter))
+ goto done;
+
+#endif
+#ifdef IXGBE_FCOE
+ if (ixgbe_set_fcoe_queues(adapter))
+ goto done;
+
+#endif /* IXGBE_FCOE */
+ if (ixgbe_set_fdir_queues(adapter))
+ goto done;
+
+ if (ixgbe_set_rss_queues(adapter))
+ goto done;
+
+ /* fallback to base case */
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+
+done:
+ /* Notify the stack of the (possibly) reduced queue counts. */
+ netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
+ return netif_set_real_num_rx_queues(adapter->netdev,
+ adapter->num_rx_queues);
+}
+
+static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
+ int vectors)
+{
+ int err, vector_threshold;
+
+ /* We'll want at least 3 (vector_threshold):
+ * 1) TxQ[0] Cleanup
+ * 2) RxQ[0] Cleanup
+ * 3) Other (Link Status Change, etc.)
+ * 4) TCP Timer (optional)
+ */
+ vector_threshold = MIN_MSIX_COUNT;
+
+ /* The more we get, the more we will assign to Tx/Rx Cleanup
+ * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+ * Right now, we simply care about how many we'll get; we'll
+ * set them up later while requesting irq's.
+ */
+ while (vectors >= vector_threshold) {
+ err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+ vectors);
+ if (!err) /* Success in acquiring all requested vectors. */
+ break;
+ else if (err < 0)
+ vectors = 0; /* Nasty failure, quit now */
+ else /* err == number of vectors we should try again with */
+ vectors = err;
+ }
+
+ if (vectors < vector_threshold) {
+ /* Can't allocate enough MSI-X interrupts? Oh well.
+ * This just means we'll go with either a single MSI
+ * vector or fall back to legacy interrupts.
+ */
+ netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
+ "Unable to allocate MSI-X interrupts\n");
+ adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else {
+ adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
+ /*
+ * Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = min(vectors,
+ adapter->max_msix_q_vectors + NON_Q_VECTORS);
+ }
+}
+
+/**
+ * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for RSS to the assigned rings.
+ *
+ **/
+static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
+{
+ int i;
+
+ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+ return false;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->reg_idx = i;
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i]->reg_idx = i;
+
+ return true;
+}
+
+#ifdef CONFIG_IXGBE_DCB
+
+/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
+static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
+ unsigned int *tx, unsigned int *rx)
+{
+ struct net_device *dev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 num_tcs = netdev_get_num_tc(dev);
+
+ *tx = 0;
+ *rx = 0;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ *tx = tc << 2;
+ *rx = tc << 3;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ if (num_tcs > 4) {
+ if (tc < 3) {
+ *tx = tc << 5;
+ *rx = tc << 4;
+ } else if (tc < 5) {
+ *tx = ((tc + 2) << 4);
+ *rx = tc << 4;
+ } else if (tc < num_tcs) {
+ *tx = ((tc + 8) << 3);
+ *rx = tc << 4;
+ }
+ } else {
+ *rx = tc << 5;
+ switch (tc) {
+ case 0:
+ *tx = 0;
+ break;
+ case 1:
+ *tx = 64;
+ break;
+ case 2:
+ *tx = 96;
+ break;
+ case 3:
+ *tx = 112;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for DCB to the assigned rings.
+ *
+ **/
+static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
+{
+ struct net_device *dev = adapter->netdev;
+ int i, j, k;
+ u8 num_tcs = netdev_get_num_tc(dev);
+
+ if (!num_tcs)
+ return false;
+
+ for (i = 0, k = 0; i < num_tcs; i++) {
+ unsigned int tx_s, rx_s;
+ u16 count = dev->tc_to_txq[i].count;
+
+ ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
+ for (j = 0; j < count; j++, k++) {
+ adapter->tx_ring[k]->reg_idx = tx_s + j;
+ adapter->rx_ring[k]->reg_idx = rx_s + j;
+ adapter->tx_ring[k]->dcb_tc = i;
+ adapter->rx_ring[k]->dcb_tc = i;
+ }
+ }
+
+ return true;
+}
+#endif
+
+/**
+ * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for Flow Director to the assigned rings.
+ *
+ **/
+static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
+{
+ int i;
+ bool ret = false;
+
+ if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
+ (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->reg_idx = i;
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i]->reg_idx = i;
+ ret = true;
+ }
+
+ return ret;
+}
+
+#ifdef IXGBE_FCOE
+/**
+ * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
+ *
+ */
+static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+ int i;
+ u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
+
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ return false;
+
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
+ ixgbe_cache_ring_fdir(adapter);
+ else
+ ixgbe_cache_ring_rss(adapter);
+
+ fcoe_rx_i = f->mask;
+ fcoe_tx_i = f->mask;
+ }
+ for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
+ adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
+ adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
+ }
+ return true;
+}
+
+#endif /* IXGBE_FCOE */
+/**
+ * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
+ * @adapter: board private structure to initialize
+ *
+ * SR-IOV doesn't use any descriptor rings but changes the default if
+ * no other mapping is used.
+ *
+ */
+static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
+{
+ adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
+ adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
+ if (adapter->num_vfs)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * ixgbe_cache_ring_register - Descriptor ring to register mapping
+ * @adapter: board private structure to initialize
+ *
+ * Once we know the feature-set enabled for the device, we'll cache
+ * the register offset the descriptor ring is assigned to.
+ *
+ * Note, the order the various feature calls is important. It must start with
+ * the "most" features enabled at the same time, then trickle down to the
+ * least amount of features turned on at once.
+ **/
+static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
+{
+ /* start with default case */
+ adapter->rx_ring[0]->reg_idx = 0;
+ adapter->tx_ring[0]->reg_idx = 0;
+
+ if (ixgbe_cache_ring_sriov(adapter))
+ return;
+
+#ifdef CONFIG_IXGBE_DCB
+ if (ixgbe_cache_ring_dcb(adapter))
+ return;
+#endif
+
+#ifdef IXGBE_FCOE
+ if (ixgbe_cache_ring_fcoe(adapter))
+ return;
+#endif /* IXGBE_FCOE */
+
+ if (ixgbe_cache_ring_fdir(adapter))
+ return;
+
+ if (ixgbe_cache_ring_rss(adapter))
+ return;
+}
+
+/**
+ * ixgbe_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time. The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
+{
+ int rx = 0, tx = 0, nid = adapter->node;
+
+ if (nid < 0 || !node_online(nid))
+ nid = first_online_node;
+
+ for (; tx < adapter->num_tx_queues; tx++) {
+ struct ixgbe_ring *ring;
+
+ ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
+ if (!ring)
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ goto err_allocation;
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = tx;
+ ring->numa_node = nid;
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ adapter->tx_ring[tx] = ring;
+ }
+
+ for (; rx < adapter->num_rx_queues; rx++) {
+ struct ixgbe_ring *ring;
+
+ ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
+ if (!ring)
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ goto err_allocation;
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = rx;
+ ring->numa_node = nid;
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ adapter->rx_ring[rx] = ring;
+ }
+
+ ixgbe_cache_ring_register(adapter);
+
+ return 0;
+
+err_allocation:
+ while (tx)
+ kfree(adapter->tx_ring[--tx]);
+
+ while (rx)
+ kfree(adapter->rx_ring[--rx]);
+ return -ENOMEM;
+}
+
+/**
+ * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err = 0;
+ int vector, v_budget;
+
+ /*
+ * It's easy to be greedy for MSI-X vectors, but it really
+ * doesn't do us much good if we have a lot more vectors
+ * than CPU's. So let's be conservative and only ask for
+ * (roughly) the same number of vectors as there are CPU's.
+ */
+ v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
+ (int)num_online_cpus()) + NON_Q_VECTORS;
+
+ /*
+ * At the same time, hardware can only support a maximum of
+ * hw.mac->max_msix_vectors vectors. With features
+ * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
+ * descriptor queues supported by our device. Thus, we cap it off in
+ * those rare cases where the cpu count also exceeds our vector limit.
+ */
+ v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
+
+ /* A failure in MSI-X entry allocation isn't fatal, but it does
+ * mean we disable MSI-X capabilities of the adapter. */
+ adapter->msix_entries = kcalloc(v_budget,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (adapter->msix_entries) {
+ for (vector = 0; vector < v_budget; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
+ ixgbe_acquire_msix_vectors(adapter, v_budget);
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ goto out;
+ }
+
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ e_err(probe,
+ "ATR is not supported while multiple "
+ "queues are disabled. Disabling Flow Director\n");
+ }
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->atr_sample_rate = 0;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
+
+ err = ixgbe_set_num_queues(adapter);
+ if (err)
+ return err;
+
+ err = pci_enable_msi(adapter->pdev);
+ if (!err) {
+ adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
+ } else {
+ netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
+ "Unable to allocate MSI interrupt, "
+ "falling back to legacy. Error: %d\n", err);
+ /* reset err */
+ err = 0;
+ }
+
+out:
+ return err;
+}
+
+/**
+ * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
+{
+ int v_idx, num_q_vectors;
+ struct ixgbe_q_vector *q_vector;
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ else
+ num_q_vectors = 1;
+
+ for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+ q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
+ GFP_KERNEL, adapter->node);
+ if (!q_vector)
+ q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
+ GFP_KERNEL);
+ if (!q_vector)
+ goto err_out;
+
+ q_vector->adapter = adapter;
+ q_vector->v_idx = v_idx;
+
+ /* Allocate the affinity_hint cpumask, configure the mask */
+ if (!alloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
+ goto err_out;
+ cpumask_set_cpu(v_idx, q_vector->affinity_mask);
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ ixgbe_poll, 64);
+ adapter->q_vector[v_idx] = q_vector;
+ }
+
+ return 0;
+
+err_out:
+ while (v_idx) {
+ v_idx--;
+ q_vector = adapter->q_vector[v_idx];
+ netif_napi_del(&q_vector->napi);
+ free_cpumask_var(q_vector->affinity_mask);
+ kfree(q_vector);
+ adapter->q_vector[v_idx] = NULL;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
+{
+ int v_idx, num_q_vectors;
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ else
+ num_q_vectors = 1;
+
+ for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
+ adapter->q_vector[v_idx] = NULL;
+ netif_napi_del(&q_vector->napi);
+ free_cpumask_var(q_vector->affinity_mask);
+ kfree(q_vector);
+ }
+}
+
+static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
+{
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
+ adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
+ pci_disable_msi(adapter->pdev);
+ }
+}
+
+/**
+ * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
+ * @adapter: board private structure to initialize
+ *
+ * We determine which interrupt scheme to use based on...
+ * - Kernel support (MSI, MSI-X)
+ * - which can be user-defined (via MODULE_PARAM)
+ * - Hardware queue count (num_*_queues)
+ * - defined by miscellaneous hardware support/features (RSS, etc.)
+ **/
+int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
+{
+ int err;
+
+ /* Number of supported queues */
+ err = ixgbe_set_num_queues(adapter);
+ if (err)
+ return err;
+
+ err = ixgbe_set_interrupt_capability(adapter);
+ if (err) {
+ e_dev_err("Unable to setup interrupt capabilities\n");
+ goto err_set_interrupt;
+ }
+
+ err = ixgbe_alloc_q_vectors(adapter);
+ if (err) {
+ e_dev_err("Unable to allocate memory for queue vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ err = ixgbe_alloc_queues(adapter);
+ if (err) {
+ e_dev_err("Unable to allocate memory for queues\n");
+ goto err_alloc_queues;
+ }
+
+ e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
+ (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
+ adapter->num_rx_queues, adapter->num_tx_queues);
+
+ set_bit(__IXGBE_DOWN, &adapter->state);
+
+ return 0;
+
+err_alloc_queues:
+ ixgbe_free_q_vectors(adapter);
+err_alloc_q_vectors:
+ ixgbe_reset_interrupt_capability(adapter);
+err_set_interrupt:
+ return err;
+}
+
+/**
+ * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @adapter: board private structure to clear interrupt scheme on
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ kfree(adapter->tx_ring[i]);
+ adapter->tx_ring[i] = NULL;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+ /* ixgbe_get_stats64() might access this ring, we must wait
+ * a grace period before freeing it.
+ */
+ kfree_rcu(ring, rcu);
+ adapter->rx_ring[i] = NULL;
+ }
+
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+
+ ixgbe_free_q_vectors(adapter);
+ ixgbe_reset_interrupt_capability(adapter);
+}
+
+/**
+ * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * ixgbe_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned int rss;
+#ifdef CONFIG_IXGBE_DCB
+ int j;
+ struct tc_configuration *tc;
+#endif
+
+ /* PCI config space info */
+
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->revision_id = pdev->revision;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+
+ /* Set capability flags */
+ rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
+ adapter->ring_feature[RING_F_RSS].indices = rss;
+ adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ if (hw->device_id == IXGBE_DEV_ID_82598AT)
+ adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
+ adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
+ break;
+ case ixgbe_mac_X540:
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
+ case ixgbe_mac_82599EB:
+ adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
+ adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
+ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+ if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
+ /* Flow Director hash filters enabled */
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->atr_sample_rate = 20;
+ adapter->ring_feature[RING_F_FDIR].indices =
+ IXGBE_MAX_FDIR_INDICES;
+ adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
+#ifdef IXGBE_FCOE
+ adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+ adapter->ring_feature[RING_F_FCOE].indices = 0;
+#ifdef CONFIG_IXGBE_DCB
+ /* Default traffic class to use for FCoE */
+ adapter->fcoe.up = IXGBE_FCOE_DEFTC;
+#endif
+#endif /* IXGBE_FCOE */
+ break;
+ default:
+ break;
+ }
+
+ /* n-tuple support exists, always init our spinlock */
+ spin_lock_init(&adapter->fdir_perfect_lock);
+
+#ifdef CONFIG_IXGBE_DCB
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
+ adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
+ break;
+ default:
+ adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
+ break;
+ }
+
+ /* Configure DCB traffic classes */
+ for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+ tc = &adapter->dcb_cfg.tc_config[j];
+ tc->path[DCB_TX_CONFIG].bwg_id = 0;
+ tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
+ tc->path[DCB_RX_CONFIG].bwg_id = 0;
+ tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
+ tc->dcb_pfc = pfc_disabled;
+ }
+
+ /* Initialize default user to priority mapping, UPx->TC0 */
+ tc = &adapter->dcb_cfg.tc_config[0];
+ tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
+ tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
+
+ adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
+ adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
+ adapter->dcb_cfg.pfc_mode_enable = false;
+ adapter->dcb_set_bitmap = 0x00;
+ adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
+ ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
+ MAX_TRAFFIC_CLASS);
+
+#endif
+
+ /* default flow control settings */
+ hw->fc.requested_mode = ixgbe_fc_full;
+ hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
+#ifdef CONFIG_DCB
+ adapter->last_lfc_mode = hw->fc.current_mode;
+#endif
+ ixgbe_pbthresh_setup(adapter);
+ hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
+ hw->fc.send_xon = true;
+ hw->fc.disable_fc_autoneg = false;
+
+ /* enable itr by default in dynamic mode */
+ adapter->rx_itr_setting = 1;
+ adapter->tx_itr_setting = 1;
+
+ /* set defaults for eitr in MegaBytes */
+ adapter->eitr_low = 10;
+ adapter->eitr_high = 20;
+
+ /* set default ring sizes */
+ adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
+ adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
+
+ /* set default work limits */
+ adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
+
+ /* initialize eeprom parameters */
+ if (ixgbe_init_eeprom_params_generic(hw)) {
+ e_dev_err("EEPROM initialization failed\n");
+ return -EIO;
+ }
+
+ /* enable rx csum by default */
+ adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+
+ /* get assigned NUMA node */
+ adapter->node = dev_to_node(&pdev->dev);
+
+ set_bit(__IXGBE_DOWN, &adapter->state);
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int size;
+
+ size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
+ tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
+ if (!tx_ring->tx_buffer_info)
+ tx_ring->tx_buffer_info = vzalloc(size);
+ if (!tx_ring->tx_buffer_info)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc)
+ goto err;
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ return 0;
+
+err:
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+ dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
+ if (!err)
+ continue;
+ e_err(probe, "Allocation for Tx Queue %u failed\n", i);
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ int size;
+
+ size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
+ rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
+ if (!rx_ring->rx_buffer_info)
+ rx_ring->rx_buffer_info = vzalloc(size);
+ if (!rx_ring->rx_buffer_info)
+ goto err;
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+
+ if (!rx_ring->desc)
+ goto err;
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ return 0;
+err:
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
+ if (!err)
+ continue;
+ e_err(probe, "Allocation for Rx Queue %u failed\n", i);
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
+{
+ ixgbe_clean_tx_ring(tx_ring);
+
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
+
+/**
+ * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ if (adapter->tx_ring[i]->desc)
+ ixgbe_free_tx_resources(adapter->tx_ring[i]);
+}
+
+/**
+ * ixgbe_free_rx_resources - Free Rx Resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
+{
+ ixgbe_clean_rx_ring(rx_ring);
+
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!rx_ring->desc)
+ return;
+
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
+
+ rx_ring->desc = NULL;
+}
+
+/**
+ * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ if (adapter->rx_ring[i]->desc)
+ ixgbe_free_rx_resources(adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbe_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* MTU < 68 is an error and causes problems on some kernels */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED &&
+ hw->mac.type != ixgbe_mac_X540) {
+ if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+ return -EINVAL;
+ } else {
+ if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
+ return -EINVAL;
+ }
+
+ e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
+ /* must set new MTU before calling down or up */
+ netdev->mtu = new_mtu;
+
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+
+ return 0;
+}
+
+/**
+ * ixgbe_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int ixgbe_open(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__IXGBE_TESTING, &adapter->state))
+ return -EBUSY;
+
+ netif_carrier_off(netdev);
+
+ /* allocate transmit descriptors */
+ err = ixgbe_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = ixgbe_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ ixgbe_configure(adapter);
+
+ err = ixgbe_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ ixgbe_up_complete(adapter);
+
+ return 0;
+
+err_req_irq:
+err_setup_rx:
+ ixgbe_free_all_rx_resources(adapter);
+err_setup_tx:
+ ixgbe_free_all_tx_resources(adapter);
+ ixgbe_reset(adapter);
+
+ return err;
+}
+
+/**
+ * ixgbe_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int ixgbe_close(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ ixgbe_down(adapter);
+ ixgbe_free_irq(adapter);
+
+ ixgbe_fdir_filter_exit(adapter);
+
+ ixgbe_free_all_tx_resources(adapter);
+ ixgbe_free_all_rx_resources(adapter);
+
+ ixgbe_release_hw_control(adapter);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ixgbe_resume(struct pci_dev *pdev)
+{
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ /*
+ * pci_restore_state clears dev->state_saved so call
+ * pci_save_state to restore it.
+ */
+ pci_save_state(pdev);
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ e_dev_err("Cannot enable PCI device from suspend\n");
+ return err;
+ }
+ pci_set_master(pdev);
+
+ pci_wake_from_d3(pdev, false);
+
+ err = ixgbe_init_interrupt_scheme(adapter);
+ if (err) {
+ e_dev_err("Cannot initialize interrupts for device\n");
+ return err;
+ }
+
+ ixgbe_reset(adapter);
+
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+
+ if (netif_running(netdev)) {
+ err = ixgbe_open(netdev);
+ if (err)
+ return err;
+ }
+
+ netif_device_attach(netdev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
+{
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 ctrl, fctrl;
+ u32 wufc = adapter->wol;
+#ifdef CONFIG_PM
+ int retval = 0;
+#endif
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ ixgbe_down(adapter);
+ ixgbe_free_irq(adapter);
+ ixgbe_free_all_tx_resources(adapter);
+ ixgbe_free_all_rx_resources(adapter);
+ }
+
+ ixgbe_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_DCB
+ kfree(adapter->ixgbe_ieee_pfc);
+ kfree(adapter->ixgbe_ieee_ets);
+#endif
+
+#ifdef CONFIG_PM
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+#endif
+ if (wufc) {
+ ixgbe_set_rx_mode(netdev);
+
+ /* turn on all-multi mode if wake on multicast is enabled */
+ if (wufc & IXGBE_WUFC_MC) {
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ fctrl |= IXGBE_FCTRL_MPE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ }
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ ctrl |= IXGBE_CTRL_GIO_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+
+ IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
+ }
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ pci_wake_from_d3(pdev, false);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ pci_wake_from_d3(pdev, !!wufc);
+ break;
+ default:
+ break;
+ }
+
+ *enable_wake = !!wufc;
+
+ ixgbe_release_hw_control(adapter);
+
+ pci_disable_device(pdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int retval;
+ bool wake;
+
+ retval = __ixgbe_shutdown(pdev, &wake);
+ if (retval)
+ return retval;
+
+ if (wake) {
+ pci_prepare_to_sleep(pdev);
+ } else {
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static void ixgbe_shutdown(struct pci_dev *pdev)
+{
+ bool wake;
+
+ __ixgbe_shutdown(pdev, &wake);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, wake);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+/**
+ * ixgbe_update_stats - Update the board statistics counters.
+ * @adapter: board private structure
+ **/
+void ixgbe_update_stats(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
+ u64 total_mpc = 0;
+ u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
+ u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
+ u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
+ u64 bytes = 0, packets = 0;
+
+ if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_RESETTING, &adapter->state))
+ return;
+
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+ u64 rsc_count = 0;
+ u64 rsc_flush = 0;
+ for (i = 0; i < 16; i++)
+ adapter->hw_rx_no_dma_resources +=
+ IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
+ rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
+ }
+ adapter->rsc_total_count = rsc_count;
+ adapter->rsc_total_flush = rsc_flush;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
+ non_eop_descs += rx_ring->rx_stats.non_eop_descs;
+ alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
+ alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
+ bytes += rx_ring->stats.bytes;
+ packets += rx_ring->stats.packets;
+ }
+ adapter->non_eop_descs = non_eop_descs;
+ adapter->alloc_rx_page_failed = alloc_rx_page_failed;
+ adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
+ netdev->stats.rx_bytes = bytes;
+ netdev->stats.rx_packets = packets;
+
+ bytes = 0;
+ packets = 0;
+ /* gather some stats to the adapter struct that are per queue */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ restart_queue += tx_ring->tx_stats.restart_queue;
+ tx_busy += tx_ring->tx_stats.tx_busy;
+ bytes += tx_ring->stats.bytes;
+ packets += tx_ring->stats.packets;
+ }
+ adapter->restart_queue = restart_queue;
+ adapter->tx_busy = tx_busy;
+ netdev->stats.tx_bytes = bytes;
+ netdev->stats.tx_packets = packets;
+
+ hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+
+ /* 8 register reads */
+ for (i = 0; i < 8; i++) {
+ /* for packet buffers not used, the register should read 0 */
+ mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+ missed_rx += mpc;
+ hwstats->mpc[i] += mpc;
+ total_mpc += hwstats->mpc[i];
+ hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
+ hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+ hwstats->pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ hwstats->pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+ break;
+ default:
+ break;
+ }
+ }
+
+ /*16 register reads */
+ for (i = 0; i < 16; i++) {
+ hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+ hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+ if ((hw->mac.type == ixgbe_mac_82599EB) ||
+ (hw->mac.type == ixgbe_mac_X540)) {
+ hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
+ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */
+ }
+ }
+
+ hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
+ /* work around hardware counting issue */
+ hwstats->gprc -= missed_rx;
+
+ ixgbe_update_xoff_received(adapter);
+
+ /* 82598 hardware only has a 32 bit counter in the high register */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+ hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+ hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+ hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+ break;
+ case ixgbe_mac_X540:
+ /* OS2BMC stats are X540 only*/
+ hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
+ hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
+ hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
+ hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
+ case ixgbe_mac_82599EB:
+ hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
+ IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
+ hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
+ IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
+ hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
+ IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
+ hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+ hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+ hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+#ifdef IXGBE_FCOE
+ hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
+ hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
+ hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
+ hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
+ hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
+ hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
+#endif /* IXGBE_FCOE */
+ break;
+ default:
+ break;
+ }
+ bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
+ hwstats->bprc += bprc;
+ hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ hwstats->mprc -= bprc;
+ hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
+ hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
+ hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
+ hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
+ hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
+ hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
+ hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
+ hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
+ lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+ hwstats->lxontxc += lxon;
+ lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+ hwstats->lxofftxc += lxoff;
+ hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
+ hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
+ /*
+ * 82598 errata - tx of flow control packets is included in tx counters
+ */
+ xon_off_tot = lxon + lxoff;
+ hwstats->gptc -= xon_off_tot;
+ hwstats->mptc -= xon_off_tot;
+ hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
+ hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
+ hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
+ hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
+ hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
+ hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
+ hwstats->ptc64 -= xon_off_tot;
+ hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
+ hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
+ hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
+ hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
+ hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
+ hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
+
+ /* Fill out the OS statistics structure */
+ netdev->stats.multicast = hwstats->mprc;
+
+ /* Rx Errors */
+ netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
+ netdev->stats.rx_dropped = 0;
+ netdev->stats.rx_length_errors = hwstats->rlec;
+ netdev->stats.rx_crc_errors = hwstats->crcerrs;
+ netdev->stats.rx_missed_errors = total_mpc;
+}
+
+/**
+ * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
+ * @adapter - pointer to the device adapter structure
+ **/
+static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
+ return;
+
+ adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
+
+ /* if interface is down do nothing */
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ return;
+
+ /* do nothing if we are not using signature filters */
+ if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
+ return;
+
+ adapter->fdir_overflow++;
+
+ if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ set_bit(__IXGBE_TX_FDIR_INIT_DONE,
+ &(adapter->tx_ring[i]->state));
+ /* re-enable flow director interrupts */
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
+ } else {
+ e_err(probe, "failed to finish FDIR re-initialization, "
+ "ignored adding FDIR ATR filters\n");
+ }
+}
+
+/**
+ * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
+ * @adapter - pointer to the device adapter structure
+ *
+ * This function serves two purposes. First it strobes the interrupt lines
+ * in order to make certain interrupts are occuring. Secondly it sets the
+ * bits needed to check for TX hangs. As a result we should immediately
+ * determine if a hang has occured.
+ */
+static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 eics = 0;
+ int i;
+
+ /* If we're down or resetting, just bail */
+ if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_RESETTING, &adapter->state))
+ return;
+
+ /* Force detection of hung controller */
+ if (netif_carrier_ok(adapter->netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ set_check_for_tx_hang(adapter->tx_ring[i]);
+ }
+
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+ /*
+ * for legacy and MSI interrupts don't set any bits
+ * that are enabled for EIAM, because this operation
+ * would set *both* EIMS and EICS for any bit in EIAM
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_EICS,
+ (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
+ } else {
+ /* get one bit for every active tx/rx interrupt vector */
+ for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
+ struct ixgbe_q_vector *qv = adapter->q_vector[i];
+ if (qv->rx.ring || qv->tx.ring)
+ eics |= ((u64)1 << i);
+ }
+ }
+
+ /* Cause software interrupt to ensure rings are cleaned */
+ ixgbe_irq_rearm_queues(adapter, eics);
+
+}
+
+/**
+ * ixgbe_watchdog_update_link - update the link status
+ * @adapter - pointer to the device adapter structure
+ * @link_speed - pointer to a u32 to store the link_speed
+ **/
+static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = adapter->link_speed;
+ bool link_up = adapter->link_up;
+ int i;
+
+ if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
+ return;
+
+ if (hw->mac.ops.check_link) {
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ } else {
+ /* always assume link is up, if no check link function */
+ link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ link_up = true;
+ }
+ if (link_up) {
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+ hw->mac.ops.fc_enable(hw, i);
+ } else {
+ hw->mac.ops.fc_enable(hw, 0);
+ }
+ }
+
+ if (link_up ||
+ time_after(jiffies, (adapter->link_check_timeout +
+ IXGBE_TRY_LINK_TIMEOUT))) {
+ adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ adapter->link_up = link_up;
+ adapter->link_speed = link_speed;
+}
+
+/**
+ * ixgbe_watchdog_link_is_up - update netif_carrier status and
+ * print link up message
+ * @adapter - pointer to the device adapter structure
+ **/
+static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = adapter->link_speed;
+ bool flow_rx, flow_tx;
+
+ /* only continue if link was previously down */
+ if (netif_carrier_ok(netdev))
+ return;
+
+ adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB: {
+ u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
+ flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
+ flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
+ }
+ break;
+ case ixgbe_mac_X540:
+ case ixgbe_mac_82599EB: {
+ u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+ flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
+ flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
+ }
+ break;
+ default:
+ flow_tx = false;
+ flow_rx = false;
+ break;
+ }
+ e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
+ (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
+ "10 Gbps" :
+ (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
+ "1 Gbps" :
+ (link_speed == IXGBE_LINK_SPEED_100_FULL ?
+ "100 Mbps" :
+ "unknown speed"))),
+ ((flow_rx && flow_tx) ? "RX/TX" :
+ (flow_rx ? "RX" :
+ (flow_tx ? "TX" : "None"))));
+
+ netif_carrier_on(netdev);
+ ixgbe_check_vf_rate_limit(adapter);
+}
+
+/**
+ * ixgbe_watchdog_link_is_down - update netif_carrier status and
+ * print link down message
+ * @adapter - pointer to the adapter structure
+ **/
+static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter* adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ adapter->link_up = false;
+ adapter->link_speed = 0;
+
+ /* only continue if link was up previously */
+ if (!netif_carrier_ok(netdev))
+ return;
+
+ /* poll for SFP+ cable when link is down */
+ if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
+ adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
+
+ e_info(drv, "NIC Link is Down\n");
+ netif_carrier_off(netdev);
+}
+
+/**
+ * ixgbe_watchdog_flush_tx - flush queues on link down
+ * @adapter - pointer to the device adapter structure
+ **/
+static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
+{
+ int i;
+ int some_tx_pending = 0;
+
+ if (!netif_carrier_ok(adapter->netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ if (tx_ring->next_to_use != tx_ring->next_to_clean) {
+ some_tx_pending = 1;
+ break;
+ }
+ }
+
+ if (some_tx_pending) {
+ /* We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context).
+ */
+ adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ }
+ }
+}
+
+static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
+{
+ u32 ssvpc;
+
+ /* Do not perform spoof check for 82598 */
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ return;
+
+ ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
+
+ /*
+ * ssvpc register is cleared on read, if zero then no
+ * spoofed packets in the last interval.
+ */
+ if (!ssvpc)
+ return;
+
+ e_warn(drv, "%d Spoofed packets detected\n", ssvpc);
+}
+
+/**
+ * ixgbe_watchdog_subtask - check and bring link up
+ * @adapter - pointer to the device adapter structure
+ **/
+static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
+{
+ /* if interface is down do nothing */
+ if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_RESETTING, &adapter->state))
+ return;
+
+ ixgbe_watchdog_update_link(adapter);
+
+ if (adapter->link_up)
+ ixgbe_watchdog_link_is_up(adapter);
+ else
+ ixgbe_watchdog_link_is_down(adapter);
+
+ ixgbe_spoof_check(adapter);
+ ixgbe_update_stats(adapter);
+
+ ixgbe_watchdog_flush_tx(adapter);
+}
+
+/**
+ * ixgbe_sfp_detection_subtask - poll for SFP+ cable
+ * @adapter - the ixgbe adapter structure
+ **/
+static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ s32 err;
+
+ /* not searching for SFP so there is nothing to do here */
+ if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
+ !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
+ return;
+
+ /* someone else is in init, wait until next service event */
+ if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ return;
+
+ err = hw->phy.ops.identify_sfp(hw);
+ if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto sfp_out;
+
+ if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
+ /* If no cable is present, then we need to reset
+ * the next time we find a good cable. */
+ adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
+ }
+
+ /* exit on error */
+ if (err)
+ goto sfp_out;
+
+ /* exit if reset not needed */
+ if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
+ goto sfp_out;
+
+ adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
+
+ /*
+ * A module may be identified correctly, but the EEPROM may not have
+ * support for that module. setup_sfp() will fail in that case, so
+ * we should not allow that module to load.
+ */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ err = hw->phy.ops.reset(hw);
+ else
+ err = hw->mac.ops.setup_sfp(hw);
+
+ if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto sfp_out;
+
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
+ e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
+
+sfp_out:
+ clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
+
+ if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
+ (adapter->netdev->reg_state == NETREG_REGISTERED)) {
+ e_dev_err("failed to initialize because an unsupported "
+ "SFP+ module type was detected.\n");
+ e_dev_err("Reload the driver after installing a "
+ "supported module.\n");
+ unregister_netdev(adapter->netdev);
+ }
+}
+
+/**
+ * ixgbe_sfp_link_config_subtask - set up link SFP after module install
+ * @adapter - the ixgbe adapter structure
+ **/
+static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 autoneg;
+ bool negotiation;
+
+ if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
+ return;
+
+ /* someone else is in init, wait until next service event */
+ if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ return;
+
+ adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
+
+ autoneg = hw->phy.autoneg_advertised;
+ if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
+ hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
+ hw->mac.autotry_restart = false;
+ if (hw->mac.ops.setup_link)
+ hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
+
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
+}
+
+/**
+ * ixgbe_service_timer - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void ixgbe_service_timer(unsigned long data)
+{
+ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+ unsigned long next_event_offset;
+
+ /* poll faster when waiting for link */
+ if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
+ next_event_offset = HZ / 10;
+ else
+ next_event_offset = HZ * 2;
+
+ /* Reset the timer */
+ mod_timer(&adapter->service_timer, next_event_offset + jiffies);
+
+ ixgbe_service_event_schedule(adapter);
+}
+
+static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
+{
+ if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
+ return;
+
+ adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
+
+ /* If we're already down or resetting, just bail */
+ if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_RESETTING, &adapter->state))
+ return;
+
+ ixgbe_dump(adapter);
+ netdev_err(adapter->netdev, "Reset adapter\n");
+ adapter->tx_timeout_count++;
+
+ ixgbe_reinit_locked(adapter);
+}
+
+/**
+ * ixgbe_service_task - manages and runs subtasks
+ * @work: pointer to work_struct containing our data
+ **/
+static void ixgbe_service_task(struct work_struct *work)
+{
+ struct ixgbe_adapter *adapter = container_of(work,
+ struct ixgbe_adapter,
+ service_task);
+
+ ixgbe_reset_subtask(adapter);
+ ixgbe_sfp_detection_subtask(adapter);
+ ixgbe_sfp_link_config_subtask(adapter);
+ ixgbe_check_overtemp_subtask(adapter);
+ ixgbe_watchdog_subtask(adapter);
+ ixgbe_fdir_reinit_subtask(adapter);
+ ixgbe_check_hang_subtask(adapter);
+
+ ixgbe_service_event_complete(adapter);
+}
+
+void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
+ u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
+{
+ struct ixgbe_adv_tx_context_desc *context_desc;
+ u16 i = tx_ring->next_to_use;
+
+ context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ /* set bits to identify this as an advanced context descriptor */
+ type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
+
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+}
+
+static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol, u8 *hdr_len)
+{
+ int err;
+ u32 vlan_macip_lens, type_tucmd;
+ u32 mss_l4len_idx, l4len;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+
+ if (protocol == __constant_htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ l4len = tcp_hdrlen(skb);
+ *hdr_len = skb_transport_offset(skb) + l4len;
+
+ /* mss_l4len_id: use 1 as index for TSO */
+ mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
+ mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
+
+ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
+ vlan_macip_lens = skb_network_header_len(skb);
+ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+
+ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
+ mss_l4len_idx);
+
+ return 1;
+}
+
+static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags,
+ __be16 protocol)
+{
+ u32 vlan_macip_lens = 0;
+ u32 mss_l4len_idx = 0;
+ u32 type_tucmd = 0;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ if (!(tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
+ !(tx_flags & IXGBE_TX_FLAGS_TXSW))
+ return false;
+ } else {
+ u8 l4_hdr = 0;
+ switch (protocol) {
+ case __constant_htons(ETH_P_IP):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but proto=%x!\n",
+ skb->protocol);
+ }
+ break;
+ }
+
+ switch (l4_hdr) {
+ case IPPROTO_TCP:
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ mss_l4len_idx = tcp_hdrlen(skb) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ case IPPROTO_SCTP:
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+ mss_l4len_idx = sizeof(struct sctphdr) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ case IPPROTO_UDP:
+ mss_l4len_idx = sizeof(struct udphdr) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but l4 proto=%x!\n",
+ skb->protocol);
+ }
+ break;
+ }
+ }
+
+ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+
+ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
+ type_tucmd, mss_l4len_idx);
+
+ return (skb->ip_summed == CHECKSUM_PARTIAL);
+}
+
+static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
+{
+ /* set type for advanced descriptor with frame checksum insertion */
+ __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_IFCS |
+ IXGBE_ADVTXD_DCMD_DEXT);
+
+ /* set HW vlan bit if vlan is present */
+ if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
+ cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
+
+ /* set segmentation enable bits for TSO/FSO */
+#ifdef IXGBE_FCOE
+ if ((tx_flags & IXGBE_TX_FLAGS_TSO) || (tx_flags & IXGBE_TX_FLAGS_FSO))
+#else
+ if (tx_flags & IXGBE_TX_FLAGS_TSO)
+#endif
+ cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
+
+ return cmd_type;
+}
+
+static __le32 ixgbe_tx_olinfo_status(u32 tx_flags, unsigned int paylen)
+{
+ __le32 olinfo_status =
+ cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+ if (tx_flags & IXGBE_TX_FLAGS_TSO) {
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM |
+ (1 << IXGBE_ADVTXD_IDX_SHIFT));
+ /* enble IPv4 checksum for TSO */
+ if (tx_flags & IXGBE_TX_FLAGS_IPV4)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
+ }
+
+ /* enable L4 checksum for TSO and TX checksum offload */
+ if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
+
+#ifdef IXGBE_FCOE
+ /* use index 1 context for FCOE/FSO */
+ if (tx_flags & IXGBE_TX_FLAGS_FCOE)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC |
+ (1 << IXGBE_ADVTXD_IDX_SHIFT));
+
+#endif
+ /*
+ * Check Context must be set if Tx switch is enabled, which it
+ * always is for case where virtual functions are running
+ */
+ if (tx_flags & IXGBE_TX_FLAGS_TXSW)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
+
+ return olinfo_status;
+}
+
+#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
+ IXGBE_TXD_CMD_RS)
+
+static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
+ struct sk_buff *skb,
+ struct ixgbe_tx_buffer *first,
+ u32 tx_flags,
+ const u8 hdr_len)
+{
+ struct device *dev = tx_ring->dev;
+ struct ixgbe_tx_buffer *tx_buffer_info;
+ union ixgbe_adv_tx_desc *tx_desc;
+ dma_addr_t dma;
+ __le32 cmd_type, olinfo_status;
+ struct skb_frag_struct *frag;
+ unsigned int f = 0;
+ unsigned int data_len = skb->data_len;
+ unsigned int size = skb_headlen(skb);
+ u32 offset = 0;
+ u32 paylen = skb->len - hdr_len;
+ u16 i = tx_ring->next_to_use;
+ u16 gso_segs;
+
+#ifdef IXGBE_FCOE
+ if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
+ if (data_len >= sizeof(struct fcoe_crc_eof)) {
+ data_len -= sizeof(struct fcoe_crc_eof);
+ } else {
+ size -= sizeof(struct fcoe_crc_eof) - data_len;
+ data_len = 0;
+ }
+ }
+
+#endif
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_error;
+
+ cmd_type = ixgbe_tx_cmd_type(tx_flags);
+ olinfo_status = ixgbe_tx_olinfo_status(tx_flags, paylen);
+
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
+
+ for (;;) {
+ while (size > IXGBE_MAX_DATA_PER_TXD) {
+ tx_desc->read.buffer_addr = cpu_to_le64(dma + offset);
+ tx_desc->read.cmd_type_len =
+ cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
+ tx_desc->read.olinfo_status = olinfo_status;
+
+ offset += IXGBE_MAX_DATA_PER_TXD;
+ size -= IXGBE_MAX_DATA_PER_TXD;
+
+ tx_desc++;
+ i++;
+ if (i == tx_ring->count) {
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
+ i = 0;
+ }
+ }
+
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ tx_buffer_info->length = offset + size;
+ tx_buffer_info->tx_flags = tx_flags;
+ tx_buffer_info->dma = dma;
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma + offset);
+ tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
+ tx_desc->read.olinfo_status = olinfo_status;
+
+ if (!data_len)
+ break;
+
+ frag = &skb_shinfo(skb)->frags[f];
+#ifdef IXGBE_FCOE
+ size = min_t(unsigned int, data_len, frag->size);
+#else
+ size = frag->size;
+#endif
+ data_len -= size;
+ f++;
+
+ offset = 0;
+ tx_flags |= IXGBE_TX_FLAGS_MAPPED_AS_PAGE;
+
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_error;
+
+ tx_desc++;
+ i++;
+ if (i == tx_ring->count) {
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
+ i = 0;
+ }
+ }
+
+ tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD);
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ if (tx_flags & IXGBE_TX_FLAGS_TSO)
+ gso_segs = skb_shinfo(skb)->gso_segs;
+#ifdef IXGBE_FCOE
+ /* adjust for FCoE Sequence Offload */
+ else if (tx_flags & IXGBE_TX_FLAGS_FSO)
+ gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
+ skb_shinfo(skb)->gso_size);
+#endif /* IXGBE_FCOE */
+ else
+ gso_segs = 1;
+
+ /* multiply data chunks by size of headers */
+ tx_buffer_info->bytecount = paylen + (gso_segs * hdr_len);
+ tx_buffer_info->gso_segs = gso_segs;
+ tx_buffer_info->skb = skb;
+
+ /* set the timestamp */
+ first->time_stamp = jiffies;
+
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ /* notify HW of packet */
+ writel(i, tx_ring->tail);
+
+ return;
+dma_error:
+ dev_err(dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_buffer_info map */
+ for (;;) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info);
+ if (tx_buffer_info == first)
+ break;
+ if (i == 0)
+ i = tx_ring->count;
+ i--;
+ }
+
+ dev_kfree_skb_any(skb);
+
+ tx_ring->next_to_use = i;
+}
+
+static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol)
+{
+ struct ixgbe_q_vector *q_vector = ring->q_vector;
+ union ixgbe_atr_hash_dword input = { .dword = 0 };
+ union ixgbe_atr_hash_dword common = { .dword = 0 };
+ union {
+ unsigned char *network;
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ } hdr;
+ struct tcphdr *th;
+ __be16 vlan_id;
+
+ /* if ring doesn't have a interrupt vector, cannot perform ATR */
+ if (!q_vector)
+ return;
+
+ /* do nothing if sampling is disabled */
+ if (!ring->atr_sample_rate)
+ return;
+
+ ring->atr_count++;
+
+ /* snag network header to get L4 type and address */
+ hdr.network = skb_network_header(skb);
+
+ /* Currently only IPv4/IPv6 with TCP is supported */
+ if ((protocol != __constant_htons(ETH_P_IPV6) ||
+ hdr.ipv6->nexthdr != IPPROTO_TCP) &&
+ (protocol != __constant_htons(ETH_P_IP) ||
+ hdr.ipv4->protocol != IPPROTO_TCP))
+ return;
+
+ th = tcp_hdr(skb);
+
+ /* skip this packet since it is invalid or the socket is closing */
+ if (!th || th->fin)
+ return;
+
+ /* sample on all syn packets or once every atr sample count */
+ if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
+ return;
+
+ /* reset sample count */
+ ring->atr_count = 0;
+
+ vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
+
+ /*
+ * src and dst are inverted, think how the receiver sees them
+ *
+ * The input is broken into two sections, a non-compressed section
+ * containing vm_pool, vlan_id, and flow_type. The rest of the data
+ * is XORed together and stored in the compressed dword.
+ */
+ input.formatted.vlan_id = vlan_id;
+
+ /*
+ * since src port and flex bytes occupy the same word XOR them together
+ * and write the value to source port portion of compressed dword
+ */
+ if (tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
+ common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
+ else
+ common.port.src ^= th->dest ^ protocol;
+ common.port.dst ^= th->source;
+
+ if (protocol == __constant_htons(ETH_P_IP)) {
+ input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
+ common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
+ } else {
+ input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
+ common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
+ hdr.ipv6->saddr.s6_addr32[1] ^
+ hdr.ipv6->saddr.s6_addr32[2] ^
+ hdr.ipv6->saddr.s6_addr32[3] ^
+ hdr.ipv6->daddr.s6_addr32[0] ^
+ hdr.ipv6->daddr.s6_addr32[1] ^
+ hdr.ipv6->daddr.s6_addr32[2] ^
+ hdr.ipv6->daddr.s6_addr32[3];
+ }
+
+ /* This assumes the Rx queue and Tx queue are bound to the same CPU */
+ ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
+ input, common, ring->queue_index);
+}
+
+static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
+{
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it. */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available. */
+ if (likely(ixgbe_desc_unused(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ return 0;
+}
+
+static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
+{
+ if (likely(ixgbe_desc_unused(tx_ring) >= size))
+ return 0;
+ return __ixgbe_maybe_stop_tx(tx_ring, size);
+}
+
+static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
+ smp_processor_id();
+#ifdef IXGBE_FCOE
+ __be16 protocol = vlan_get_protocol(skb);
+
+ if (((protocol == htons(ETH_P_FCOE)) ||
+ (protocol == htons(ETH_P_FIP))) &&
+ (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
+ txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
+ txq += adapter->ring_feature[RING_F_FCOE].mask;
+ return txq;
+ }
+#endif
+
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ while (unlikely(txq >= dev->real_num_tx_queues))
+ txq -= dev->real_num_tx_queues;
+ return txq;
+ }
+
+ return skb_tx_hash(dev, skb);
+}
+
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
+ struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring)
+{
+ struct ixgbe_tx_buffer *first;
+ int tso;
+ u32 tx_flags = 0;
+#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
+ unsigned short f;
+#endif
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
+ __be16 protocol = skb->protocol;
+ u8 hdr_len = 0;
+
+ /*
+ * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_head_len/IXGBE_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#else
+ count += skb_shinfo(skb)->nr_frags;
+#endif
+ if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
+ tx_ring->tx_stats.tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
+#ifdef CONFIG_PCI_IOV
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ tx_flags |= IXGBE_TX_FLAGS_TXSW;
+
+#endif
+ /* if we have a HW VLAN tag being added default to the HW one */
+ if (vlan_tx_tag_present(skb)) {
+ tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
+ /* else if it is a SW VLAN check the next protocol and store the tag */
+ } else if (protocol == __constant_htons(ETH_P_8021Q)) {
+ struct vlan_hdr *vhdr, _vhdr;
+ vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
+ if (!vhdr)
+ goto out_drop;
+
+ protocol = vhdr->h_vlan_encapsulated_proto;
+ tx_flags |= ntohs(vhdr->h_vlan_TCI) << IXGBE_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
+ }
+
+ /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */
+ if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
+ ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
+ (skb->priority != TC_PRIO_CONTROL))) {
+ tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
+ tx_flags |= (skb->priority & 0x7) <<
+ IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
+ if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
+ struct vlan_ethhdr *vhdr;
+ if (skb_header_cloned(skb) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ goto out_drop;
+ vhdr = (struct vlan_ethhdr *)skb->data;
+ vhdr->h_vlan_TCI = htons(tx_flags >>
+ IXGBE_TX_FLAGS_VLAN_SHIFT);
+ } else {
+ tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
+ }
+ }
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+
+#ifdef IXGBE_FCOE
+ /* setup tx offload for FCoE */
+ if ((protocol == __constant_htons(ETH_P_FCOE)) &&
+ (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
+ tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len);
+ if (tso < 0)
+ goto out_drop;
+ else if (tso)
+ tx_flags |= IXGBE_TX_FLAGS_FSO |
+ IXGBE_TX_FLAGS_FCOE;
+ else
+ tx_flags |= IXGBE_TX_FLAGS_FCOE;
+
+ goto xmit_fcoe;
+ }
+
+#endif /* IXGBE_FCOE */
+ /* setup IPv4/IPv6 offloads */
+ if (protocol == __constant_htons(ETH_P_IP))
+ tx_flags |= IXGBE_TX_FLAGS_IPV4;
+
+ tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len);
+ if (tso < 0)
+ goto out_drop;
+ else if (tso)
+ tx_flags |= IXGBE_TX_FLAGS_TSO;
+ else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol))
+ tx_flags |= IXGBE_TX_FLAGS_CSUM;
+
+ /* add the ATR filter if ATR is on */
+ if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
+ ixgbe_atr(tx_ring, skb, tx_flags, protocol);
+
+#ifdef IXGBE_FCOE
+xmit_fcoe:
+#endif /* IXGBE_FCOE */
+ ixgbe_tx_map(tx_ring, skb, first, tx_flags, hdr_len);
+
+ ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ return NETDEV_TX_OK;
+
+out_drop:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_ring *tx_ring;
+
+ tx_ring = adapter->tx_ring[skb->queue_mapping];
+ return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
+}
+
+/**
+ * ixgbe_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbe_set_mac(struct net_device *netdev, void *p)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+ IXGBE_RAH_AV);
+
+ return 0;
+}
+
+static int
+ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u16 value;
+ int rc;
+
+ if (prtad != hw->phy.mdio.prtad)
+ return -EINVAL;
+ rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
+ if (!rc)
+ rc = value;
+ return rc;
+}
+
+static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
+ u16 addr, u16 value)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (prtad != hw->phy.mdio.prtad)
+ return -EINVAL;
+ return hw->phy.ops.write_reg(hw, addr, devad, value);
+}
+
+static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
+}
+
+/**
+ * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
+ * netdev->dev_addrs
+ * @netdev: network interface device structure
+ *
+ * Returns non-zero on failure
+ **/
+static int ixgbe_add_sanmac_netdev(struct net_device *dev)
+{
+ int err = 0;
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_mac_info *mac = &adapter->hw.mac;
+
+ if (is_valid_ether_addr(mac->san_addr)) {
+ rtnl_lock();
+ err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
+ rtnl_unlock();
+ }
+ return err;
+}
+
+/**
+ * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
+ * netdev->dev_addrs
+ * @netdev: network interface device structure
+ *
+ * Returns non-zero on failure
+ **/
+static int ixgbe_del_sanmac_netdev(struct net_device *dev)
+{
+ int err = 0;
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_mac_info *mac = &adapter->hw.mac;
+
+ if (is_valid_ether_addr(mac->san_addr)) {
+ rtnl_lock();
+ err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
+ rtnl_unlock();
+ }
+ return err;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void ixgbe_netpoll(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ /* if interface is down do nothing */
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ return;
+
+ adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ for (i = 0; i < num_q_vectors; i++) {
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
+ ixgbe_msix_clean_rings(0, q_vector);
+ }
+ } else {
+ ixgbe_intr(adapter->pdev->irq, netdev);
+ }
+ adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
+}
+#endif
+
+static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
+ u64 bytes, packets;
+ unsigned int start;
+
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ stats->rx_packets += packets;
+ stats->rx_bytes += bytes;
+ }
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
+ u64 bytes, packets;
+ unsigned int start;
+
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+ }
+ }
+ rcu_read_unlock();
+ /* following stats updated by ixgbe_watchdog_task() */
+ stats->multicast = netdev->stats.multicast;
+ stats->rx_errors = netdev->stats.rx_errors;
+ stats->rx_length_errors = netdev->stats.rx_length_errors;
+ stats->rx_crc_errors = netdev->stats.rx_crc_errors;
+ stats->rx_missed_errors = netdev->stats.rx_missed_errors;
+ return stats;
+}
+
+/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
+ * #adapter: pointer to ixgbe_adapter
+ * @tc: number of traffic classes currently enabled
+ *
+ * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
+ * 802.1Q priority maps to a packet buffer that exists.
+ */
+static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg, rsave;
+ int i;
+
+ /* 82598 have a static priority to TC mapping that can not
+ * be changed so no validation is needed.
+ */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
+ rsave = reg;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
+
+ /* If up2tc is out of bounds default to zero */
+ if (up2tc > tc)
+ reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
+ }
+
+ if (reg != rsave)
+ IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
+
+ return;
+}
+
+
+/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
+ * classes.
+ *
+ * @netdev: net device to configure
+ * @tc: number of traffic classes to enable
+ */
+int ixgbe_setup_tc(struct net_device *dev, u8 tc)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* Multiple traffic classes requires multiple queues */
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+ e_err(drv, "Enable failed, needs MSI-X\n");
+ return -EINVAL;
+ }
+
+ /* Hardware supports up to 8 traffic classes */
+ if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
+ (hw->mac.type == ixgbe_mac_82598EB && tc < MAX_TRAFFIC_CLASS))
+ return -EINVAL;
+
+ /* Hardware has to reinitialize queues and interrupts to
+ * match packet buffer alignment. Unfortunantly, the
+ * hardware is not flexible enough to do this dynamically.
+ */
+ if (netif_running(dev))
+ ixgbe_close(dev);
+ ixgbe_clear_interrupt_scheme(adapter);
+
+ if (tc) {
+ netdev_set_num_tc(dev, tc);
+ adapter->last_lfc_mode = adapter->hw.fc.current_mode;
+
+ adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ adapter->hw.fc.requested_mode = ixgbe_fc_none;
+ } else {
+ netdev_reset_tc(dev);
+
+ adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
+
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+ adapter->temp_dcb_cfg.pfc_mode_enable = false;
+ adapter->dcb_cfg.pfc_mode_enable = false;
+ }
+
+ ixgbe_init_interrupt_scheme(adapter);
+ ixgbe_validate_rtr(adapter, tc);
+ if (netif_running(dev))
+ ixgbe_open(dev);
+
+ return 0;
+}
+
+void ixgbe_do_reset(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
+ ixgbe_reset(adapter);
+}
+
+static u32 ixgbe_fix_features(struct net_device *netdev, u32 data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+#ifdef CONFIG_DCB
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
+ data &= ~NETIF_F_HW_VLAN_RX;
+#endif
+
+ /* return error if RXHASH is being enabled when RSS is not supported */
+ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+ data &= ~NETIF_F_RXHASH;
+
+ /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
+ if (!(data & NETIF_F_RXCSUM))
+ data &= ~NETIF_F_LRO;
+
+ /* Turn off LRO if not RSC capable or invalid ITR settings */
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) {
+ data &= ~NETIF_F_LRO;
+ } else if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
+ (adapter->rx_itr_setting != 1 &&
+ adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE)) {
+ data &= ~NETIF_F_LRO;
+ e_info(probe, "rx-usecs set too low, not enabling RSC\n");
+ }
+
+ return data;
+}
+
+static int ixgbe_set_features(struct net_device *netdev, u32 data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ bool need_reset = false;
+
+ /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
+ if (!(data & NETIF_F_RXCSUM))
+ adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
+ else
+ adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+
+ /* Make sure RSC matches LRO, reset if change */
+ if (!!(data & NETIF_F_LRO) !=
+ !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
+ adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X540:
+ case ixgbe_mac_82599EB:
+ need_reset = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /*
+ * Check if Flow Director n-tuple support was enabled or disabled. If
+ * the state changed, we need to reset.
+ */
+ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+ /* turn off ATR, enable perfect filters and reset */
+ if (data & NETIF_F_NTUPLE) {
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ need_reset = true;
+ }
+ } else if (!(data & NETIF_F_NTUPLE)) {
+ /* turn off Flow Director, set ATR and reset */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
+ !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ need_reset = true;
+ }
+
+ if (need_reset)
+ ixgbe_do_reset(netdev);
+
+ return 0;
+
+}
+
+static const struct net_device_ops ixgbe_netdev_ops = {
+ .ndo_open = ixgbe_open,
+ .ndo_stop = ixgbe_close,
+ .ndo_start_xmit = ixgbe_xmit_frame,
+ .ndo_select_queue = ixgbe_select_queue,
+ .ndo_set_rx_mode = ixgbe_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = ixgbe_set_mac,
+ .ndo_change_mtu = ixgbe_change_mtu,
+ .ndo_tx_timeout = ixgbe_tx_timeout,
+ .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
+ .ndo_do_ioctl = ixgbe_ioctl,
+ .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
+ .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
+ .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
+ .ndo_get_stats64 = ixgbe_get_stats64,
+ .ndo_setup_tc = ixgbe_setup_tc,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ixgbe_netpoll,
+#endif
+#ifdef IXGBE_FCOE
+ .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
+ .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
+ .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
+ .ndo_fcoe_enable = ixgbe_fcoe_enable,
+ .ndo_fcoe_disable = ixgbe_fcoe_disable,
+ .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
+#endif /* IXGBE_FCOE */
+ .ndo_set_features = ixgbe_set_features,
+ .ndo_fix_features = ixgbe_fix_features,
+};
+
+static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
+ const struct ixgbe_info *ii)
+{
+#ifdef CONFIG_PCI_IOV
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ /* The 82599 supports up to 64 VFs per physical function
+ * but this implementation limits allocation to 63 so that
+ * basic networking resources are still available to the
+ * physical function
+ */
+ adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
+ ixgbe_enable_sriov(adapter, ii);
+#endif /* CONFIG_PCI_IOV */
+}
+
+/**
+ * ixgbe_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ixgbe_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * ixgbe_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit ixgbe_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct ixgbe_adapter *adapter = NULL;
+ struct ixgbe_hw *hw;
+ const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
+ static int cards_found;
+ int i, err, pci_using_dac;
+ u8 part_str[IXGBE_PBANUM_LENGTH];
+ unsigned int indices = num_possible_cpus();
+#ifdef IXGBE_FCOE
+ u16 device_caps;
+#endif
+ u32 eec;
+ u16 wol_cap;
+
+ /* Catch broken hardware that put the wrong VF device ID in
+ * the PCIe SR-IOV capability.
+ */
+ if (pdev->is_virtfn) {
+ WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
+ pci_name(pdev), pdev->vendor, pdev->device);
+ return -EINVAL;
+ }
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ pci_using_dac = 1;
+ } else {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
+ }
+ }
+ pci_using_dac = 0;
+ }
+
+ err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
+ IORESOURCE_MEM), ixgbe_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed 0x%x\n", err);
+ goto err_pci_reg;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+
+#ifdef CONFIG_IXGBE_DCB
+ indices *= MAX_TRAFFIC_CLASS;
+#endif
+
+ if (ii->mac == ixgbe_mac_82598EB)
+ indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
+ else
+ indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
+
+#ifdef IXGBE_FCOE
+ indices += min_t(unsigned int, num_possible_cpus(),
+ IXGBE_MAX_FCOE_INDICES);
+#endif
+ netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ pci_set_drvdata(pdev, adapter);
+
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ hw = &adapter->hw;
+ hw->back = adapter;
+ adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->hw_addr) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ for (i = 1; i <= 5; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ }
+
+ netdev->netdev_ops = &ixgbe_netdev_ops;
+ ixgbe_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+ strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+ adapter->bd_number = cards_found;
+
+ /* Setup hw api */
+ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+ hw->mac.type = ii->mac;
+
+ /* EEPROM */
+ memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
+ if (!(eec & (1 << 8)))
+ hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
+
+ /* PHY */
+ memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+ /* ixgbe_identify_phy_generic will set prtad and mmds properly */
+ hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
+ hw->phy.mdio.mmds = 0;
+ hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ hw->phy.mdio.dev = netdev;
+ hw->phy.mdio.mdio_read = ixgbe_mdio_read;
+ hw->phy.mdio.mdio_write = ixgbe_mdio_write;
+
+ ii->get_invariants(hw);
+
+ /* setup the private structure */
+ err = ixgbe_sw_init(adapter);
+ if (err)
+ goto err_sw_init;
+
+ /* Make it possible the adapter to be woken up via WOL */
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * If there is a fan on this device and it has failed log the
+ * failure.
+ */
+ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
+ u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ e_crit(probe, "Fan has stopped, replace the adapter\n");
+ }
+
+ /* reset_hw fills in the perm_addr as well */
+ hw->phy.reset_if_overtemp = true;
+ err = hw->mac.ops.reset_hw(hw);
+ hw->phy.reset_if_overtemp = false;
+ if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
+ hw->mac.type == ixgbe_mac_82598EB) {
+ err = 0;
+ } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+ e_dev_err("failed to load because an unsupported SFP+ "
+ "module type was detected.\n");
+ e_dev_err("Reload the driver after installing a supported "
+ "module.\n");
+ goto err_sw_init;
+ } else if (err) {
+ e_dev_err("HW Init failed: %d\n", err);
+ goto err_sw_init;
+ }
+
+ ixgbe_probe_vf(adapter, ii);
+
+ netdev->features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM;
+
+ netdev->hw_features = netdev->features;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ netdev->features |= NETIF_F_SCTP_CSUM;
+ netdev->hw_features |= NETIF_F_SCTP_CSUM |
+ NETIF_F_NTUPLE;
+ break;
+ default:
+ break;
+ }
+
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_IP_CSUM;
+ netdev->vlan_features |= NETIF_F_IPV6_CSUM;
+ netdev->vlan_features |= NETIF_F_SG;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
+ IXGBE_FLAG_DCB_ENABLED);
+
+#ifdef CONFIG_IXGBE_DCB
+ netdev->dcbnl_ops = &dcbnl_ops;
+#endif
+
+#ifdef IXGBE_FCOE
+ if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
+ if (hw->mac.ops.get_device_caps) {
+ hw->mac.ops.get_device_caps(hw, &device_caps);
+ if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
+ adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
+ }
+ }
+ if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
+ netdev->vlan_features |= NETIF_F_FCOE_CRC;
+ netdev->vlan_features |= NETIF_F_FSO;
+ netdev->vlan_features |= NETIF_F_FCOE_MTU;
+ }
+#endif /* IXGBE_FCOE */
+ if (pci_using_dac) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
+
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
+ netdev->hw_features |= NETIF_F_LRO;
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+ netdev->features |= NETIF_F_LRO;
+
+ /* make sure the EEPROM is good */
+ if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
+ e_dev_err("The EEPROM Checksum Is Not Valid\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
+
+ if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
+ e_dev_err("invalid MAC address\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
+ if (hw->mac.ops.disable_tx_laser &&
+ ((hw->phy.multispeed_fiber) ||
+ ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+ (hw->mac.type == ixgbe_mac_82599EB))))
+ hw->mac.ops.disable_tx_laser(hw);
+
+ setup_timer(&adapter->service_timer, &ixgbe_service_timer,
+ (unsigned long) adapter);
+
+ INIT_WORK(&adapter->service_task, ixgbe_service_task);
+ clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
+
+ err = ixgbe_init_interrupt_scheme(adapter);
+ if (err)
+ goto err_sw_init;
+
+ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
+ netdev->hw_features &= ~NETIF_F_RXHASH;
+ netdev->features &= ~NETIF_F_RXHASH;
+ }
+
+ /* WOL not supported for all but the following */
+ adapter->wol = 0;
+ switch (pdev->device) {
+ case IXGBE_DEV_ID_82599_SFP:
+ /* Only this subdevice supports WOL */
+ if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
+ adapter->wol = IXGBE_WUFC_MAG;
+ break;
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ /* All except this subdevice support WOL */
+ if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
+ adapter->wol = IXGBE_WUFC_MAG;
+ break;
+ case IXGBE_DEV_ID_82599_KX4:
+ adapter->wol = IXGBE_WUFC_MAG;
+ break;
+ case IXGBE_DEV_ID_X540T:
+ /* Check eeprom to see if it is enabled */
+ hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
+ wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
+
+ if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
+ ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
+ (hw->bus.func == 0)))
+ adapter->wol = IXGBE_WUFC_MAG;
+ break;
+ }
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ /* pick up the PCI bus settings for reporting later */
+ hw->mac.ops.get_bus_info(hw);
+
+ /* print bus type/speed/width info */
+ e_dev_info("(PCI Express:%s:%s) %pM\n",
+ (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
+ hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
+ "Unknown"),
+ (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
+ hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
+ hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
+ "Unknown"),
+ netdev->dev_addr);
+
+ err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
+ if (err)
+ strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
+ if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
+ e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
+ hw->mac.type, hw->phy.type, hw->phy.sfp_type,
+ part_str);
+ else
+ e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
+ hw->mac.type, hw->phy.type, part_str);
+
+ if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
+ e_dev_warn("PCI-Express bandwidth available for this card is "
+ "not sufficient for optimal performance.\n");
+ e_dev_warn("For optimal performance a x8 PCI-Express slot "
+ "is required.\n");
+ }
+
+ /* save off EEPROM version number */
+ hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
+
+ /* reset the hardware with the new settings */
+ err = hw->mac.ops.start_hw(hw);
+
+ if (err == IXGBE_ERR_EEPROM_VERSION) {
+ /* We are running on a pre-production device, log a warning */
+ e_dev_warn("This device is a pre-production adapter/LOM. "
+ "Please be aware there may be issues associated "
+ "with your hardware. If you are experiencing "
+ "problems please contact your Intel or hardware "
+ "representative who provided you with this "
+ "hardware.\n");
+ }
+ strcpy(netdev->name, "eth%d");
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+#ifdef CONFIG_IXGBE_DCA
+ if (dca_add_requester(&pdev->dev) == 0) {
+ adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+ ixgbe_setup_dca(adapter);
+ }
+#endif
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
+ for (i = 0; i < adapter->num_vfs; i++)
+ ixgbe_vf_configuration(pdev, (i | 0x10000000));
+ }
+
+ /* firmware requires driver version to be 0xFFFFFFFF
+ * since os does not support feature
+ */
+ if (hw->mac.ops.set_fw_drv_ver)
+ hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF,
+ 0xFF);
+
+ /* add san mac addr to netdev */
+ ixgbe_add_sanmac_netdev(netdev);
+
+ e_dev_info("Intel(R) 10 Gigabit Network Connection\n");
+ cards_found++;
+ return 0;
+
+err_register:
+ ixgbe_release_hw_control(adapter);
+ ixgbe_clear_interrupt_scheme(adapter);
+err_sw_init:
+err_eeprom:
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
+ adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
+ iounmap(hw->hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * ixgbe_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ixgbe_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void __devexit ixgbe_remove(struct pci_dev *pdev)
+{
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ set_bit(__IXGBE_DOWN, &adapter->state);
+ cancel_work_sync(&adapter->service_task);
+
+#ifdef CONFIG_IXGBE_DCA
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
+ adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
+ dca_remove_requester(&pdev->dev);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
+ }
+
+#endif
+#ifdef IXGBE_FCOE
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ ixgbe_cleanup_fcoe(adapter);
+
+#endif /* IXGBE_FCOE */
+
+ /* remove the added san mac */
+ ixgbe_del_sanmac_netdev(netdev);
+
+ if (netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(netdev);
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ if (!(ixgbe_check_vf_assignment(adapter)))
+ ixgbe_disable_sriov(adapter);
+ else
+ e_dev_warn("Unloading driver while VFs are assigned "
+ "- VFs will not be deallocated\n");
+ }
+
+ ixgbe_clear_interrupt_scheme(adapter);
+
+ ixgbe_release_hw_control(adapter);
+
+ iounmap(adapter->hw.hw_addr);
+ pci_release_selected_regions(pdev, pci_select_bars(pdev,
+ IORESOURCE_MEM));
+
+ e_dev_info("complete\n");
+
+ free_netdev(netdev);
+
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+/**
+ * ixgbe_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ ixgbe_down(adapter);
+ pci_disable_device(pdev);
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * ixgbe_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ */
+static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
+{
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ pci_ers_result_t result;
+ int err;
+
+ if (pci_enable_device_mem(pdev)) {
+ e_err(probe, "Cannot re-enable PCI device after reset.\n");
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ pci_wake_from_d3(pdev, false);
+
+ ixgbe_reset(adapter);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+ result = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ err = pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (err) {
+ e_dev_err("pci_cleanup_aer_uncorrect_error_status "
+ "failed 0x%0x\n", err);
+ /* non-fatal, continue */
+ }
+
+ return result;
+}
+
+/**
+ * ixgbe_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation.
+ */
+static void ixgbe_io_resume(struct pci_dev *pdev)
+{
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ if (netif_running(netdev))
+ ixgbe_up(adapter);
+
+ netif_device_attach(netdev);
+}
+
+static struct pci_error_handlers ixgbe_err_handler = {
+ .error_detected = ixgbe_io_error_detected,
+ .slot_reset = ixgbe_io_slot_reset,
+ .resume = ixgbe_io_resume,
+};
+
+static struct pci_driver ixgbe_driver = {
+ .name = ixgbe_driver_name,
+ .id_table = ixgbe_pci_tbl,
+ .probe = ixgbe_probe,
+ .remove = __devexit_p(ixgbe_remove),
+#ifdef CONFIG_PM
+ .suspend = ixgbe_suspend,
+ .resume = ixgbe_resume,
+#endif
+ .shutdown = ixgbe_shutdown,
+ .err_handler = &ixgbe_err_handler
+};
+
+/**
+ * ixgbe_init_module - Driver Registration Routine
+ *
+ * ixgbe_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init ixgbe_init_module(void)
+{
+ int ret;
+ pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
+ pr_info("%s\n", ixgbe_copyright);
+
+#ifdef CONFIG_IXGBE_DCA
+ dca_register_notify(&dca_notifier);
+#endif
+
+ ret = pci_register_driver(&ixgbe_driver);
+ return ret;
+}
+
+module_init(ixgbe_init_module);
+
+/**
+ * ixgbe_exit_module - Driver Exit Cleanup Routine
+ *
+ * ixgbe_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit ixgbe_exit_module(void)
+{
+#ifdef CONFIG_IXGBE_DCA
+ dca_unregister_notify(&dca_notifier);
+#endif
+ pci_unregister_driver(&ixgbe_driver);
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
+}
+
+#ifdef CONFIG_IXGBE_DCA
+static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
+ void *p)
+{
+ int ret_val;
+
+ ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
+ __ixgbe_notify_dca);
+
+ return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
+}
+
+#endif /* CONFIG_IXGBE_DCA */
+
+module_exit(ixgbe_exit_module);
+
+/* ixgbe_main.c */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
new file mode 100644
index 000000000000..1ff0eefcfd0a
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -0,0 +1,471 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "ixgbe_type.h"
+#include "ixgbe_common.h"
+#include "ixgbe_mbx.h"
+
+/**
+ * ixgbe_read_mbx - Reads a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfuly read message from buffer
+ **/
+s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ if (mbx->ops.read)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = 0;
+
+ if (size > mbx->size)
+ ret_val = IXGBE_ERR_MBX;
+
+ else if (mbx->ops.write)
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg - checks to see if someone sent us mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbx->ops.check_for_msg)
+ ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack - checks to see if someone sent us ACK
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbx->ops.check_for_ack)
+ ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst - checks to see if other side has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbx->ops.check_for_rst)
+ ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification
+ **/
+static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ if (!countdown || !mbx->ops.check_for_msg)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ udelay(mbx->usec_delay);
+ }
+
+out:
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message acknowledgement
+ **/
+static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ if (!countdown || !mbx->ops.check_for_ack)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ udelay(mbx->usec_delay);
+ }
+
+out:
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!mbx->ops.read)
+ goto out;
+
+ ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ /* exit if either we can't write or there isn't a defined timeout */
+ if (!mbx->ops.write || !mbx->timeout)
+ goto out;
+
+ /* send msg */
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = ixgbe_poll_for_ack(hw, mbx_id);
+out:
+ return ret_val;
+}
+
+static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
+{
+ u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbvficr & mask) {
+ ret_val = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
+ index)) {
+ ret_val = 0;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
+ index)) {
+ ret_val = 0;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst_pf - checks to see if the VF has reset
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ u32 reg_offset = (vf_number < 32) ? 0 : 1;
+ u32 vf_shift = vf_number % 32;
+ u32 vflre = 0;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
+ break;
+ case ixgbe_mac_X540:
+ vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
+ break;
+ default:
+ break;
+ }
+
+ if (vflre & (1 << vf_shift)) {
+ ret_val = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ u32 p2v_mailbox;
+
+ /* Take ownership of the buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
+
+ /* reserve mailbox for vf use */
+ p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
+ if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
+ ret_val = 0;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx_pf - Places a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbe_check_for_msg_pf(hw, vf_number);
+ ixgbe_check_for_ack_pf(hw, vf_number);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer*/
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+ return ret_val;
+
+}
+
+/**
+ * ixgbe_read_mbx_pf - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * This function copies a message from the mailbox buffer to the caller's
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a VF request so no polling for message is needed.
+ **/
+static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
+
+ /* Acknowledge the message and release buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+#ifdef CONFIG_PCI_IOV
+/**
+ * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540)
+ return;
+
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+}
+#endif /* CONFIG_PCI_IOV */
+
+struct ixgbe_mbx_operations mbx_ops_generic = {
+ .read = ixgbe_read_mbx_pf,
+ .write = ixgbe_write_mbx_pf,
+ .read_posted = ixgbe_read_posted_mbx,
+ .write_posted = ixgbe_write_posted_mbx,
+ .check_for_msg = ixgbe_check_for_msg_pf,
+ .check_for_ack = ixgbe_check_for_ack_pf,
+ .check_for_rst = ixgbe_check_for_rst_pf,
+};
+
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
new file mode 100644
index 000000000000..b239bdac38da
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -0,0 +1,93 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_MBX_H_
+#define _IXGBE_MBX_H_
+
+#include "ixgbe_type.h"
+
+#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX -100
+
+#define IXGBE_VFMAILBOX 0x002FC
+#define IXGBE_VFMBMEM 0x00200
+
+#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for exra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_RESET 0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD 3
+
+#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+#ifdef CONFIG_PCI_IOV
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+#endif /* CONFIG_PCI_IOV */
+
+extern struct ixgbe_mbx_operations mbx_ops_generic;
+
+#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
new file mode 100644
index 000000000000..9a56fd74e673
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -0,0 +1,1714 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+static void ixgbe_i2c_start(struct ixgbe_hw *hw);
+static void ixgbe_i2c_stop(struct ixgbe_hw *hw);
+static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
+static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
+static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
+static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
+static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
+static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
+static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
+static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
+static bool ixgbe_get_i2c_data(u32 *i2cctl);
+static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
+static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
+static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
+
+/**
+ * ixgbe_identify_phy_generic - Get physical layer module
+ * @hw: pointer to hardware structure
+ *
+ * Determines the physical layer module found on the current adapter.
+ **/
+s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u32 phy_addr;
+ u16 ext_ability = 0;
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
+ hw->phy.mdio.prtad = phy_addr;
+ if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) {
+ ixgbe_get_phy_id(hw);
+ hw->phy.type =
+ ixgbe_get_phy_type_from_id(hw->phy.id);
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ hw->phy.ops.read_reg(hw,
+ MDIO_PMA_EXTABLE,
+ MDIO_MMD_PMAPMD,
+ &ext_ability);
+ if (ext_ability &
+ (MDIO_PMA_EXTABLE_10GBT |
+ MDIO_PMA_EXTABLE_1000BT))
+ hw->phy.type =
+ ixgbe_phy_cu_unknown;
+ else
+ hw->phy.type =
+ ixgbe_phy_generic;
+ }
+
+ status = 0;
+ break;
+ }
+ }
+ /* clear value if nothing found */
+ if (status != 0)
+ hw->phy.mdio.prtad = 0;
+ } else {
+ status = 0;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_phy_id - Get the phy type
+ * @hw: pointer to hardware structure
+ *
+ **/
+static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
+{
+ u32 status;
+ u16 phy_id_high = 0;
+ u16 phy_id_low = 0;
+
+ status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
+ &phy_id_high);
+
+ if (status == 0) {
+ hw->phy.id = (u32)(phy_id_high << 16);
+ status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
+ &phy_id_low);
+ hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
+ hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
+ }
+ return status;
+}
+
+/**
+ * ixgbe_get_phy_type_from_id - Get the phy type
+ * @hw: pointer to hardware structure
+ *
+ **/
+static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
+{
+ enum ixgbe_phy_type phy_type;
+
+ switch (phy_id) {
+ case TN1010_PHY_ID:
+ phy_type = ixgbe_phy_tn;
+ break;
+ case X540_PHY_ID:
+ phy_type = ixgbe_phy_aq;
+ break;
+ case QT2022_PHY_ID:
+ phy_type = ixgbe_phy_qt;
+ break;
+ case ATH_PHY_ID:
+ phy_type = ixgbe_phy_nl;
+ break;
+ default:
+ phy_type = ixgbe_phy_unknown;
+ break;
+ }
+
+ return phy_type;
+}
+
+/**
+ * ixgbe_reset_phy_generic - Performs a PHY reset
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u16 ctrl = 0;
+ s32 status = 0;
+
+ if (hw->phy.type == ixgbe_phy_unknown)
+ status = ixgbe_identify_phy_generic(hw);
+
+ if (status != 0 || hw->phy.type == ixgbe_phy_none)
+ goto out;
+
+ /* Don't reset PHY if it's shut down due to overtemp. */
+ if (!hw->phy.reset_if_overtemp &&
+ (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
+ goto out;
+
+ /*
+ * Perform soft PHY reset to the PHY_XS.
+ * This will cause a soft reset to the PHY
+ */
+ hw->phy.ops.write_reg(hw, MDIO_CTRL1,
+ MDIO_MMD_PHYXS,
+ MDIO_CTRL1_RESET);
+
+ /*
+ * Poll for reset bit to self-clear indicating reset is complete.
+ * Some PHYs could take up to 3 seconds to complete and need about
+ * 1.7 usec delay after the reset is complete.
+ */
+ for (i = 0; i < 30; i++) {
+ msleep(100);
+ hw->phy.ops.read_reg(hw, MDIO_CTRL1,
+ MDIO_MMD_PHYXS, &ctrl);
+ if (!(ctrl & MDIO_CTRL1_RESET)) {
+ udelay(2);
+ break;
+ }
+ }
+
+ if (ctrl & MDIO_CTRL1_RESET) {
+ status = IXGBE_ERR_RESET_FAILED;
+ hw_dbg(hw, "PHY reset polling failed to complete.\n");
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @phy_data: Pointer to read data from PHY register
+ **/
+s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data)
+{
+ u32 command;
+ u32 i;
+ u32 data;
+ s32 status = 0;
+ u16 gssr;
+
+ if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+ gssr = IXGBE_GSSR_PHY1_SM;
+ else
+ gssr = IXGBE_GSSR_PHY0_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ if (status == 0) {
+ /* Setup and write the address cycle command */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY address command did not complete.\n");
+ status = IXGBE_ERR_PHY;
+ }
+
+ if (status == 0) {
+ /*
+ * Address cycle complete, setup and write the read
+ * command
+ */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.mdio.prtad <<
+ IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle
+ * completed. The MDI Command bit will clear when the
+ * operation is complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY read command didn't complete\n");
+ status = IXGBE_ERR_PHY;
+ } else {
+ /*
+ * Read operation is complete. Get the data
+ * from MSRWD
+ */
+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
+ *phy_data = (u16)(data);
+ }
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ **/
+s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ u32 command;
+ u32 i;
+ s32 status = 0;
+ u16 gssr;
+
+ if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+ gssr = IXGBE_GSSR_PHY1_SM;
+ else
+ gssr = IXGBE_GSSR_PHY0_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ if (status == 0) {
+ /* Put the data in the MDI single read and write data register*/
+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
+
+ /* Setup and write the address cycle command */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY address cmd didn't complete\n");
+ status = IXGBE_ERR_PHY;
+ }
+
+ if (status == 0) {
+ /*
+ * Address cycle complete, setup and write the write
+ * command
+ */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.mdio.prtad <<
+ IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /*
+ * Check every 10 usec to see if the address cycle
+ * completed. The MDI Command bit will clear when the
+ * operation is complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY address cmd didn't complete\n");
+ status = IXGBE_ERR_PHY;
+ }
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_phy_link_generic - Set and restart autoneg
+ * @hw: pointer to hardware structure
+ *
+ * Restart autonegotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ u32 time_out;
+ u32 max_time_out = 10;
+ u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
+ bool autoneg = false;
+ ixgbe_link_speed speed;
+
+ ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ /* Set or unset auto-negotiation 10G advertisement */
+ hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
+ MDIO_MMD_AN,
+ &autoneg_reg);
+
+ autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+ autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
+
+ hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
+ MDIO_MMD_AN,
+ autoneg_reg);
+ }
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+ /* Set or unset auto-negotiation 1G advertisement */
+ hw->phy.ops.read_reg(hw,
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ MDIO_MMD_AN,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
+
+ hw->phy.ops.write_reg(hw,
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ MDIO_MMD_AN,
+ autoneg_reg);
+ }
+
+ if (speed & IXGBE_LINK_SPEED_100_FULL) {
+ /* Set or unset auto-negotiation 100M advertisement */
+ hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
+ MDIO_MMD_AN,
+ &autoneg_reg);
+
+ autoneg_reg &= ~(ADVERTISE_100FULL |
+ ADVERTISE_100HALF);
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+ autoneg_reg |= ADVERTISE_100FULL;
+
+ hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
+ MDIO_MMD_AN,
+ autoneg_reg);
+ }
+
+ /* Restart PHY autonegotiation and wait for completion */
+ hw->phy.ops.read_reg(hw, MDIO_CTRL1,
+ MDIO_MMD_AN, &autoneg_reg);
+
+ autoneg_reg |= MDIO_AN_CTRL1_RESTART;
+
+ hw->phy.ops.write_reg(hw, MDIO_CTRL1,
+ MDIO_MMD_AN, autoneg_reg);
+
+ /* Wait for autonegotiation to finish */
+ for (time_out = 0; time_out < max_time_out; time_out++) {
+ udelay(10);
+ /* Restart PHY autonegotiation and wait for completion */
+ status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
+ MDIO_MMD_AN,
+ &autoneg_reg);
+
+ autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
+ if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) {
+ break;
+ }
+ }
+
+ if (time_out == max_time_out) {
+ status = IXGBE_ERR_LINK_SETUP;
+ hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out");
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ **/
+s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+
+ /*
+ * Clear autoneg_advertised and set new values based on input link
+ * speed.
+ */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_100_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+
+ /* Setup link based on the new speed settings */
+ hw->phy.ops.setup_link(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: boolean auto-negotiation value
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ */
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ s32 status = IXGBE_ERR_LINK_SETUP;
+ u16 speed_ability;
+
+ *speed = 0;
+ *autoneg = true;
+
+ status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
+ &speed_ability);
+
+ if (status == 0) {
+ if (speed_ability & MDIO_SPEED_10G)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (speed_ability & MDIO_PMA_SPEED_1000)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (speed_ability & MDIO_PMA_SPEED_100)
+ *speed |= IXGBE_LINK_SPEED_100_FULL;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_check_phy_link_tnx - Determine link and speed status
+ * @hw: pointer to hardware structure
+ *
+ * Reads the VS1 register to determine if link is up and the current speed for
+ * the PHY.
+ **/
+s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up)
+{
+ s32 status = 0;
+ u32 time_out;
+ u32 max_time_out = 10;
+ u16 phy_link = 0;
+ u16 phy_speed = 0;
+ u16 phy_data = 0;
+
+ /* Initialize speed and link to default case */
+ *link_up = false;
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+ /*
+ * Check current speed and link status of the PHY register.
+ * This is a vendor specific register and may have to
+ * be changed for other copper PHYs.
+ */
+ for (time_out = 0; time_out < max_time_out; time_out++) {
+ udelay(10);
+ status = hw->phy.ops.read_reg(hw,
+ MDIO_STAT1,
+ MDIO_MMD_VEND1,
+ &phy_data);
+ phy_link = phy_data &
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
+ phy_speed = phy_data &
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
+ if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
+ *link_up = true;
+ if (phy_speed ==
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_phy_link_tnx - Set and restart autoneg
+ * @hw: pointer to hardware structure
+ *
+ * Restart autonegotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ u32 time_out;
+ u32 max_time_out = 10;
+ u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
+ bool autoneg = false;
+ ixgbe_link_speed speed;
+
+ ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ /* Set or unset auto-negotiation 10G advertisement */
+ hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
+ MDIO_MMD_AN,
+ &autoneg_reg);
+
+ autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+ autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
+
+ hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
+ MDIO_MMD_AN,
+ autoneg_reg);
+ }
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+ /* Set or unset auto-negotiation 1G advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
+ MDIO_MMD_AN,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
+ MDIO_MMD_AN,
+ autoneg_reg);
+ }
+
+ if (speed & IXGBE_LINK_SPEED_100_FULL) {
+ /* Set or unset auto-negotiation 100M advertisement */
+ hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
+ MDIO_MMD_AN,
+ &autoneg_reg);
+
+ autoneg_reg &= ~(ADVERTISE_100FULL |
+ ADVERTISE_100HALF);
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+ autoneg_reg |= ADVERTISE_100FULL;
+
+ hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
+ MDIO_MMD_AN,
+ autoneg_reg);
+ }
+
+ /* Restart PHY autonegotiation and wait for completion */
+ hw->phy.ops.read_reg(hw, MDIO_CTRL1,
+ MDIO_MMD_AN, &autoneg_reg);
+
+ autoneg_reg |= MDIO_AN_CTRL1_RESTART;
+
+ hw->phy.ops.write_reg(hw, MDIO_CTRL1,
+ MDIO_MMD_AN, autoneg_reg);
+
+ /* Wait for autonegotiation to finish */
+ for (time_out = 0; time_out < max_time_out; time_out++) {
+ udelay(10);
+ /* Restart PHY autonegotiation and wait for completion */
+ status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
+ MDIO_MMD_AN,
+ &autoneg_reg);
+
+ autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
+ if (autoneg_reg == MDIO_AN_STAT1_COMPLETE)
+ break;
+ }
+
+ if (time_out == max_time_out) {
+ status = IXGBE_ERR_LINK_SETUP;
+ hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out");
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to the PHY Firmware Version
+ **/
+s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
+ u16 *firmware_version)
+{
+ s32 status = 0;
+
+ status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
+ MDIO_MMD_VEND1,
+ firmware_version);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to the PHY Firmware Version
+ **/
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+ u16 *firmware_version)
+{
+ s32 status = 0;
+
+ status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
+ MDIO_MMD_VEND1,
+ firmware_version);
+
+ return status;
+}
+
+/**
+ * ixgbe_reset_phy_nl - Performs a PHY reset
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
+{
+ u16 phy_offset, control, eword, edata, block_crc;
+ bool end_data = false;
+ u16 list_offset, data_offset;
+ u16 phy_data = 0;
+ s32 ret_val = 0;
+ u32 i;
+
+ hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
+
+ /* reset the PHY and poll for completion */
+ hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
+ (phy_data | MDIO_CTRL1_RESET));
+
+ for (i = 0; i < 100; i++) {
+ hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
+ &phy_data);
+ if ((phy_data & MDIO_CTRL1_RESET) == 0)
+ break;
+ usleep_range(10000, 20000);
+ }
+
+ if ((phy_data & MDIO_CTRL1_RESET) != 0) {
+ hw_dbg(hw, "PHY reset did not complete.\n");
+ ret_val = IXGBE_ERR_PHY;
+ goto out;
+ }
+
+ /* Get init offsets */
+ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
+ &data_offset);
+ if (ret_val != 0)
+ goto out;
+
+ ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
+ data_offset++;
+ while (!end_data) {
+ /*
+ * Read control word from PHY init contents offset
+ */
+ ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
+ control = (eword & IXGBE_CONTROL_MASK_NL) >>
+ IXGBE_CONTROL_SHIFT_NL;
+ edata = eword & IXGBE_DATA_MASK_NL;
+ switch (control) {
+ case IXGBE_DELAY_NL:
+ data_offset++;
+ hw_dbg(hw, "DELAY: %d MS\n", edata);
+ usleep_range(edata * 1000, edata * 2000);
+ break;
+ case IXGBE_DATA_NL:
+ hw_dbg(hw, "DATA:\n");
+ data_offset++;
+ hw->eeprom.ops.read(hw, data_offset++,
+ &phy_offset);
+ for (i = 0; i < edata; i++) {
+ hw->eeprom.ops.read(hw, data_offset, &eword);
+ hw->phy.ops.write_reg(hw, phy_offset,
+ MDIO_MMD_PMAPMD, eword);
+ hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
+ phy_offset);
+ data_offset++;
+ phy_offset++;
+ }
+ break;
+ case IXGBE_CONTROL_NL:
+ data_offset++;
+ hw_dbg(hw, "CONTROL:\n");
+ if (edata == IXGBE_CONTROL_EOL_NL) {
+ hw_dbg(hw, "EOL\n");
+ end_data = true;
+ } else if (edata == IXGBE_CONTROL_SOL_NL) {
+ hw_dbg(hw, "SOL\n");
+ } else {
+ hw_dbg(hw, "Bad control value\n");
+ ret_val = IXGBE_ERR_PHY;
+ goto out;
+ }
+ break;
+ default:
+ hw_dbg(hw, "Bad control type\n");
+ ret_val = IXGBE_ERR_PHY;
+ goto out;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_identify_sfp_module_generic - Identifies SFP modules
+ * @hw: pointer to hardware structure
+ *
+ * Searches for and identifies the SFP module and assigns appropriate PHY type.
+ **/
+s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u32 vendor_oui = 0;
+ enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
+ u8 identifier = 0;
+ u8 comp_codes_1g = 0;
+ u8 comp_codes_10g = 0;
+ u8 oui_bytes[3] = {0, 0, 0};
+ u8 cable_tech = 0;
+ u8 cable_spec = 0;
+ u16 enforce_sfp = 0;
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ goto out;
+ }
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_IDENTIFIER,
+ &identifier);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ /* LAN ID is needed for sfp_type determination */
+ hw->mac.ops.set_lan_id(hw);
+
+ if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ } else {
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_1GBE_COMP_CODES,
+ &comp_codes_1g);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_10GBE_COMP_CODES,
+ &comp_codes_10g);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_CABLE_TECHNOLOGY,
+ &cable_tech);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ /* ID Module
+ * =========
+ * 0 SFP_DA_CU
+ * 1 SFP_SR
+ * 2 SFP_LR
+ * 3 SFP_DA_CORE0 - 82599-specific
+ * 4 SFP_DA_CORE1 - 82599-specific
+ * 5 SFP_SR/LR_CORE0 - 82599-specific
+ * 6 SFP_SR/LR_CORE1 - 82599-specific
+ * 7 SFP_act_lmt_DA_CORE0 - 82599-specific
+ * 8 SFP_act_lmt_DA_CORE1 - 82599-specific
+ * 9 SFP_1g_cu_CORE0 - 82599-specific
+ * 10 SFP_1g_cu_CORE1 - 82599-specific
+ */
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+ else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+ hw->phy.sfp_type = ixgbe_sfp_type_sr;
+ else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+ hw->phy.sfp_type = ixgbe_sfp_type_lr;
+ else
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+ } else if (hw->mac.type == ixgbe_mac_82599EB) {
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_cu_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_cu_core1;
+ } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
+ hw->phy.ops.read_i2c_eeprom(
+ hw, IXGBE_SFF_CABLE_SPEC_COMP,
+ &cable_spec);
+ if (cable_spec &
+ IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core1;
+ } else {
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_unknown;
+ }
+ } else if (comp_codes_10g &
+ (IXGBE_SFF_10GBASESR_CAPABLE |
+ IXGBE_SFF_10GBASELR_CAPABLE)) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_srlr_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_srlr_core1;
+ } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_cu_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_cu_core1;
+ } else {
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+ }
+ }
+
+ if (hw->phy.sfp_type != stored_sfp_type)
+ hw->phy.sfp_setup_needed = true;
+
+ /* Determine if the SFP+ PHY is dual speed or not. */
+ hw->phy.multispeed_fiber = false;
+ if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
+ ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
+ hw->phy.multispeed_fiber = true;
+
+ /* Determine PHY vendor */
+ if (hw->phy.type != ixgbe_phy_nl) {
+ hw->phy.id = identifier;
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_VENDOR_OUI_BYTE0,
+ &oui_bytes[0]);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_VENDOR_OUI_BYTE1,
+ &oui_bytes[1]);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_VENDOR_OUI_BYTE2,
+ &oui_bytes[2]);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ vendor_oui =
+ ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
+ (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
+ (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
+
+ switch (vendor_oui) {
+ case IXGBE_SFF_VENDOR_OUI_TYCO:
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+ hw->phy.type =
+ ixgbe_phy_sfp_passive_tyco;
+ break;
+ case IXGBE_SFF_VENDOR_OUI_FTL:
+ if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+ hw->phy.type = ixgbe_phy_sfp_ftl_active;
+ else
+ hw->phy.type = ixgbe_phy_sfp_ftl;
+ break;
+ case IXGBE_SFF_VENDOR_OUI_AVAGO:
+ hw->phy.type = ixgbe_phy_sfp_avago;
+ break;
+ case IXGBE_SFF_VENDOR_OUI_INTEL:
+ hw->phy.type = ixgbe_phy_sfp_intel;
+ break;
+ default:
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+ hw->phy.type =
+ ixgbe_phy_sfp_passive_unknown;
+ else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+ hw->phy.type =
+ ixgbe_phy_sfp_active_unknown;
+ else
+ hw->phy.type = ixgbe_phy_sfp_unknown;
+ break;
+ }
+ }
+
+ /* Allow any DA cable vendor */
+ if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
+ IXGBE_SFF_DA_ACTIVE_CABLE)) {
+ status = 0;
+ goto out;
+ }
+
+ /* Verify supported 1G SFP modules */
+ if (comp_codes_10g == 0 &&
+ !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) {
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* Anything else 82598-based is supported */
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ status = 0;
+ goto out;
+ }
+
+ hw->mac.ops.get_device_caps(hw, &enforce_sfp);
+ if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
+ !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) {
+ /* Make sure we're a supported PHY type */
+ if (hw->phy.type == ixgbe_phy_sfp_intel) {
+ status = 0;
+ } else {
+ hw_dbg(hw, "SFP+ module not supported\n");
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+ } else {
+ status = 0;
+ }
+ }
+
+out:
+ return status;
+
+err_read_i2c_eeprom:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ if (hw->phy.type != ixgbe_phy_nl) {
+ hw->phy.id = 0;
+ hw->phy.type = ixgbe_phy_unknown;
+ }
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+}
+
+/**
+ * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
+ * @hw: pointer to hardware structure
+ * @list_offset: offset to the SFP ID list
+ * @data_offset: offset to the SFP data block
+ *
+ * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
+ * so it returns the offsets to the phy init sequence block.
+ **/
+s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+ u16 *list_offset,
+ u16 *data_offset)
+{
+ u16 sfp_id;
+ u16 sfp_type = hw->phy.sfp_type;
+
+ if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+ if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+
+ if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
+ (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+ /*
+ * Limiting active cables and 1G Phys must be initialized as
+ * SR modules
+ */
+ if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_cu_core0)
+ sfp_type = ixgbe_sfp_type_srlr_core0;
+ else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_cu_core1)
+ sfp_type = ixgbe_sfp_type_srlr_core1;
+
+ /* Read offset to PHY init contents */
+ hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset);
+
+ if ((!*list_offset) || (*list_offset == 0xFFFF))
+ return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+
+ /* Shift offset to first ID word */
+ (*list_offset)++;
+
+ /*
+ * Find the matching SFP ID in the EEPROM
+ * and program the init sequence
+ */
+ hw->eeprom.ops.read(hw, *list_offset, &sfp_id);
+
+ while (sfp_id != IXGBE_PHY_INIT_END_NL) {
+ if (sfp_id == sfp_type) {
+ (*list_offset)++;
+ hw->eeprom.ops.read(hw, *list_offset, data_offset);
+ if ((!*data_offset) || (*data_offset == 0xFFFF)) {
+ hw_dbg(hw, "SFP+ module not supported\n");
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ } else {
+ break;
+ }
+ } else {
+ (*list_offset) += 2;
+ if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
+ return IXGBE_ERR_PHY;
+ }
+ }
+
+ if (sfp_id == IXGBE_PHY_INIT_END_NL) {
+ hw_dbg(hw, "No matching SFP+ module found\n");
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data)
+{
+ return hw->phy.ops.read_i2c_byte(hw, byte_offset,
+ IXGBE_I2C_EEPROM_DEV_ADDR,
+ eeprom_data);
+}
+
+/**
+ * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to write
+ * @eeprom_data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 eeprom_data)
+{
+ return hw->phy.ops.write_i2c_byte(hw, byte_offset,
+ IXGBE_I2C_EEPROM_DEV_ADDR,
+ eeprom_data);
+}
+
+/**
+ * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ s32 status = 0;
+ u32 max_retry = 10;
+ u32 retry = 0;
+ u16 swfw_mask = 0;
+ bool nack = 1;
+ *data = 0;
+
+ if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+ swfw_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ swfw_mask = IXGBE_GSSR_PHY0_SM;
+
+ do {
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) {
+ status = IXGBE_ERR_SWFW_SYNC;
+ goto read_byte_out;
+ }
+
+ ixgbe_i2c_start(hw);
+
+ /* Device Address and write indication */
+ status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != 0)
+ goto fail;
+
+ ixgbe_i2c_start(hw);
+
+ /* Device Address and read indication */
+ status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1));
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_clock_in_i2c_byte(hw, data);
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_clock_out_i2c_bit(hw, nack);
+ if (status != 0)
+ goto fail;
+
+ ixgbe_i2c_stop(hw);
+ break;
+
+fail:
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msleep(100);
+ ixgbe_i2c_bus_clear(hw);
+ retry++;
+ if (retry < max_retry)
+ hw_dbg(hw, "I2C byte read error - Retrying.\n");
+ else
+ hw_dbg(hw, "I2C byte read error.\n");
+
+ } while (retry < max_retry);
+
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+read_byte_out:
+ return status;
+}
+
+/**
+ * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ s32 status = 0;
+ u32 max_retry = 1;
+ u32 retry = 0;
+ u16 swfw_mask = 0;
+
+ if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+ swfw_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ swfw_mask = IXGBE_GSSR_PHY0_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) {
+ status = IXGBE_ERR_SWFW_SYNC;
+ goto write_byte_out;
+ }
+
+ do {
+ ixgbe_i2c_start(hw);
+
+ status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_clock_out_i2c_byte(hw, data);
+ if (status != 0)
+ goto fail;
+
+ status = ixgbe_get_i2c_ack(hw);
+ if (status != 0)
+ goto fail;
+
+ ixgbe_i2c_stop(hw);
+ break;
+
+fail:
+ ixgbe_i2c_bus_clear(hw);
+ retry++;
+ if (retry < max_retry)
+ hw_dbg(hw, "I2C byte write error - Retrying.\n");
+ else
+ hw_dbg(hw, "I2C byte write error.\n");
+ } while (retry < max_retry);
+
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+write_byte_out:
+ return status;
+}
+
+/**
+ * ixgbe_i2c_start - Sets I2C start condition
+ * @hw: pointer to hardware structure
+ *
+ * Sets I2C start condition (High -> Low on SDA while SCL is High)
+ **/
+static void ixgbe_i2c_start(struct ixgbe_hw *hw)
+{
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+
+ /* Start condition must begin with data and clock high */
+ ixgbe_set_i2c_data(hw, &i2cctl, 1);
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+ /* Setup time for start condition (4.7us) */
+ udelay(IXGBE_I2C_T_SU_STA);
+
+ ixgbe_set_i2c_data(hw, &i2cctl, 0);
+
+ /* Hold time for start condition (4us) */
+ udelay(IXGBE_I2C_T_HD_STA);
+
+ ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ udelay(IXGBE_I2C_T_LOW);
+
+}
+
+/**
+ * ixgbe_i2c_stop - Sets I2C stop condition
+ * @hw: pointer to hardware structure
+ *
+ * Sets I2C stop condition (Low -> High on SDA while SCL is High)
+ **/
+static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
+{
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+
+ /* Stop condition must begin with data low and clock high */
+ ixgbe_set_i2c_data(hw, &i2cctl, 0);
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+ /* Setup time for stop condition (4us) */
+ udelay(IXGBE_I2C_T_SU_STO);
+
+ ixgbe_set_i2c_data(hw, &i2cctl, 1);
+
+ /* bus free time between stop and start (4.7us)*/
+ udelay(IXGBE_I2C_T_BUF);
+}
+
+/**
+ * ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C
+ * @hw: pointer to hardware structure
+ * @data: data byte to clock in
+ *
+ * Clocks in one byte data via I2C data/clock
+ **/
+static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
+{
+ s32 i;
+ bool bit = 0;
+
+ for (i = 7; i >= 0; i--) {
+ ixgbe_clock_in_i2c_bit(hw, &bit);
+ *data |= bit << i;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C
+ * @hw: pointer to hardware structure
+ * @data: data byte clocked out
+ *
+ * Clocks out one byte data via I2C data/clock
+ **/
+static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
+{
+ s32 status = 0;
+ s32 i;
+ u32 i2cctl;
+ bool bit = 0;
+
+ for (i = 7; i >= 0; i--) {
+ bit = (data >> i) & 0x1;
+ status = ixgbe_clock_out_i2c_bit(hw, bit);
+
+ if (status != 0)
+ break;
+ }
+
+ /* Release SDA line (set high) */
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ i2cctl |= IXGBE_I2C_DATA_OUT;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_i2c_ack - Polls for I2C ACK
+ * @hw: pointer to hardware structure
+ *
+ * Clocks in/out one bit via I2C data/clock
+ **/
+static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ u32 i = 0;
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 timeout = 10;
+ bool ack = 1;
+
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+
+ /* Minimum high period of clock is 4us */
+ udelay(IXGBE_I2C_T_HIGH);
+
+ /* Poll for ACK. Note that ACK in I2C spec is
+ * transition from 1 to 0 */
+ for (i = 0; i < timeout; i++) {
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ ack = ixgbe_get_i2c_data(&i2cctl);
+
+ udelay(1);
+ if (ack == 0)
+ break;
+ }
+
+ if (ack == 1) {
+ hw_dbg(hw, "I2C ack was not received.\n");
+ status = IXGBE_ERR_I2C;
+ }
+
+ ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ udelay(IXGBE_I2C_T_LOW);
+
+ return status;
+}
+
+/**
+ * ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock
+ * @hw: pointer to hardware structure
+ * @data: read data value
+ *
+ * Clocks in one bit via I2C data/clock
+ **/
+static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
+{
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ udelay(IXGBE_I2C_T_HIGH);
+
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ *data = ixgbe_get_i2c_data(&i2cctl);
+
+ ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ udelay(IXGBE_I2C_T_LOW);
+
+ return 0;
+}
+
+/**
+ * ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock
+ * @hw: pointer to hardware structure
+ * @data: data value to write
+ *
+ * Clocks out one bit via I2C data/clock
+ **/
+static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
+{
+ s32 status;
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+
+ status = ixgbe_set_i2c_data(hw, &i2cctl, data);
+ if (status == 0) {
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ udelay(IXGBE_I2C_T_HIGH);
+
+ ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us.
+ * This also takes care of the data hold time.
+ */
+ udelay(IXGBE_I2C_T_LOW);
+ } else {
+ status = IXGBE_ERR_I2C;
+ hw_dbg(hw, "I2C data was not set to %X\n", data);
+ }
+
+ return status;
+}
+/**
+ * ixgbe_raise_i2c_clk - Raises the I2C SCL clock
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Raises the I2C clock line '0'->'1'
+ **/
+static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
+{
+ *i2cctl |= IXGBE_I2C_CLK_OUT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* SCL rise time (1000ns) */
+ udelay(IXGBE_I2C_T_RISE);
+}
+
+/**
+ * ixgbe_lower_i2c_clk - Lowers the I2C SCL clock
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Lowers the I2C clock line '1'->'0'
+ **/
+static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
+{
+
+ *i2cctl &= ~IXGBE_I2C_CLK_OUT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* SCL fall time (300ns) */
+ udelay(IXGBE_I2C_T_FALL);
+}
+
+/**
+ * ixgbe_set_i2c_data - Sets the I2C data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ * @data: I2C data value (0 or 1) to set
+ *
+ * Sets the I2C data bit
+ **/
+static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
+{
+ s32 status = 0;
+
+ if (data)
+ *i2cctl |= IXGBE_I2C_DATA_OUT;
+ else
+ *i2cctl &= ~IXGBE_I2C_DATA_OUT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
+ udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
+
+ /* Verify data was set correctly */
+ *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ if (data != ixgbe_get_i2c_data(i2cctl)) {
+ status = IXGBE_ERR_I2C;
+ hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_i2c_data - Reads the I2C SDA data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Returns the I2C data bit value
+ **/
+static bool ixgbe_get_i2c_data(u32 *i2cctl)
+{
+ bool data;
+
+ if (*i2cctl & IXGBE_I2C_DATA_IN)
+ data = 1;
+ else
+ data = 0;
+
+ return data;
+}
+
+/**
+ * ixgbe_i2c_bus_clear - Clears the I2C bus
+ * @hw: pointer to hardware structure
+ *
+ * Clears the I2C bus by sending nine clock pulses.
+ * Used when data line is stuck low.
+ **/
+static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
+{
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ u32 i;
+
+ ixgbe_i2c_start(hw);
+
+ ixgbe_set_i2c_data(hw, &i2cctl, 1);
+
+ for (i = 0; i < 9; i++) {
+ ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+ /* Min high period of clock is 4us */
+ udelay(IXGBE_I2C_T_HIGH);
+
+ ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+ /* Min low period of clock is 4.7us*/
+ udelay(IXGBE_I2C_T_LOW);
+ }
+
+ ixgbe_i2c_start(hw);
+
+ /* Put the i2c bus back to default state */
+ ixgbe_i2c_stop(hw);
+}
+
+/**
+ * ixgbe_tn_check_overtemp - Checks if an overtemp occurred.
+ * @hw: pointer to hardware structure
+ *
+ * Checks if the LASI temp alarm status was triggered due to overtemp
+ **/
+s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ u16 phy_data = 0;
+
+ if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
+ goto out;
+
+ /* Check that the LASI temp alarm status was triggered */
+ hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
+ MDIO_MMD_PMAPMD, &phy_data);
+
+ if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
+ goto out;
+
+ status = IXGBE_ERR_OVERTEMP;
+out:
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
new file mode 100644
index 000000000000..197bdd13106a
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -0,0 +1,131 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_PHY_H_
+#define _IXGBE_PHY_H_
+
+#include "ixgbe_type.h"
+#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
+
+/* EEPROM byte offsets */
+#define IXGBE_SFF_IDENTIFIER 0x0
+#define IXGBE_SFF_IDENTIFIER_SFP 0x3
+#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
+#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
+#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
+#define IXGBE_SFF_1GBE_COMP_CODES 0x6
+#define IXGBE_SFF_10GBE_COMP_CODES 0x3
+#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
+#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
+
+/* Bitmasks */
+#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
+#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
+#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
+#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
+#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
+#define IXGBE_SFF_1GBASET_CAPABLE 0x8
+#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
+#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_I2C_EEPROM_READ_MASK 0x100
+#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
+#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
+#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
+#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
+#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
+
+/* Flow control defines */
+#define IXGBE_TAF_SYM_PAUSE 0x400
+#define IXGBE_TAF_ASM_PAUSE 0x800
+
+/* Bit-shift macros */
+#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24
+#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16
+#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8
+
+/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
+#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600
+#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500
+#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00
+#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100
+
+/* I2C SDA and SCL timing parameters for standard mode */
+#define IXGBE_I2C_T_HD_STA 4
+#define IXGBE_I2C_T_LOW 5
+#define IXGBE_I2C_T_HIGH 4
+#define IXGBE_I2C_T_SU_STA 5
+#define IXGBE_I2C_T_HD_DATA 5
+#define IXGBE_I2C_T_SU_DATA 1
+#define IXGBE_I2C_T_RISE 1
+#define IXGBE_I2C_T_FALL 1
+#define IXGBE_I2C_T_SU_STO 4
+#define IXGBE_I2C_T_BUF 5
+
+#define IXGBE_TN_LASI_STATUS_REG 0x9005
+#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
+
+s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
+s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
+s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data);
+s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data);
+s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
+s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
+
+/* PHY specific */
+s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up);
+s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
+s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
+ u16 *firmware_version);
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+ u16 *firmware_version);
+
+s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
+s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+ u16 *list_offset,
+ u16 *data_offset);
+s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
+s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
+s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data);
+s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 eeprom_data);
+#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
new file mode 100644
index 000000000000..468ddd0873da
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -0,0 +1,892 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#ifdef NETIF_F_HW_VLAN_TX
+#include <linux/if_vlan.h>
+#endif
+
+#include "ixgbe.h"
+#include "ixgbe_type.h"
+#include "ixgbe_sriov.h"
+
+#ifdef CONFIG_PCI_IOV
+static int ixgbe_find_enabled_vfs(struct ixgbe_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pci_dev *pvfdev;
+ u16 vf_devfn = 0;
+ int device_id;
+ int vfs_found = 0;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ device_id = IXGBE_DEV_ID_82599_VF;
+ break;
+ case ixgbe_mac_X540:
+ device_id = IXGBE_DEV_ID_X540_VF;
+ break;
+ default:
+ device_id = 0;
+ break;
+ }
+
+ vf_devfn = pdev->devfn + 0x80;
+ pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
+ while (pvfdev) {
+ if (pvfdev->devfn == vf_devfn)
+ vfs_found++;
+ vf_devfn += 2;
+ pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
+ device_id, pvfdev);
+ }
+
+ return vfs_found;
+}
+
+void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
+ const struct ixgbe_info *ii)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err = 0;
+ int num_vf_macvlans, i;
+ struct vf_macvlans *mv_list;
+ int pre_existing_vfs = 0;
+
+ pre_existing_vfs = ixgbe_find_enabled_vfs(adapter);
+ if (!pre_existing_vfs && !adapter->num_vfs)
+ return;
+
+ /* If there are pre-existing VFs then we have to force
+ * use of that many because they were not deleted the last
+ * time someone removed the PF driver. That would have
+ * been because they were allocated to guest VMs and can't
+ * be removed. Go ahead and just re-enable the old amount.
+ * If the user wants to change the number of VFs they can
+ * use ethtool while making sure no VFs are allocated to
+ * guest VMs... i.e. the right way.
+ */
+ if (pre_existing_vfs) {
+ adapter->num_vfs = pre_existing_vfs;
+ dev_warn(&adapter->pdev->dev, "Virtual Functions already "
+ "enabled for this device - Please reload all "
+ "VF drivers to avoid spoofed packet errors\n");
+ } else {
+ err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ }
+ if (err) {
+ e_err(probe, "Failed to enable PCI sriov: %d\n", err);
+ goto err_novfs;
+ }
+ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
+
+ e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
+
+ num_vf_macvlans = hw->mac.num_rar_entries -
+ (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
+
+ adapter->mv_list = mv_list = kcalloc(num_vf_macvlans,
+ sizeof(struct vf_macvlans),
+ GFP_KERNEL);
+ if (mv_list) {
+ /* Initialize list of VF macvlans */
+ INIT_LIST_HEAD(&adapter->vf_mvs.l);
+ for (i = 0; i < num_vf_macvlans; i++) {
+ mv_list->vf = -1;
+ mv_list->free = true;
+ mv_list->rar_entry = hw->mac.num_rar_entries -
+ (i + adapter->num_vfs + 1);
+ list_add(&mv_list->l, &adapter->vf_mvs.l);
+ mv_list++;
+ }
+ }
+
+ /* If call to enable VFs succeeded then allocate memory
+ * for per VF control structures.
+ */
+ adapter->vfinfo =
+ kcalloc(adapter->num_vfs,
+ sizeof(struct vf_data_storage), GFP_KERNEL);
+ if (adapter->vfinfo) {
+ /* Now that we're sure SR-IOV is enabled
+ * and memory allocated set up the mailbox parameters
+ */
+ ixgbe_init_mbx_params_pf(hw);
+ memcpy(&hw->mbx.ops, ii->mbx_ops,
+ sizeof(hw->mbx.ops));
+
+ /* Disable RSC when in SR-IOV mode */
+ adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
+ IXGBE_FLAG2_RSC_ENABLED);
+ return;
+ }
+
+ /* Oh oh */
+ e_err(probe, "Unable to allocate memory for VF Data Storage - "
+ "SRIOV disabled\n");
+ pci_disable_sriov(adapter->pdev);
+
+err_novfs:
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+ adapter->num_vfs = 0;
+}
+#endif /* #ifdef CONFIG_PCI_IOV */
+
+void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gcr;
+ u32 gpie;
+ u32 vmdctl;
+ int i;
+
+#ifdef CONFIG_PCI_IOV
+ /* disable iov and allow time for transactions to clear */
+ pci_disable_sriov(adapter->pdev);
+#endif
+
+ /* turn off device IOV mode */
+ gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ gcr &= ~(IXGBE_GCR_EXT_SRIOV);
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+ /* set default pool back to 0 */
+ vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* take a breather then clean up driver data */
+ msleep(100);
+
+ /* Release reference to VF devices */
+ for (i = 0; i < adapter->num_vfs; i++) {
+ if (adapter->vfinfo[i].vfdev)
+ pci_dev_put(adapter->vfinfo[i].vfdev);
+ }
+ kfree(adapter->vfinfo);
+ kfree(adapter->mv_list);
+ adapter->vfinfo = NULL;
+
+ adapter->num_vfs = 0;
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+}
+
+static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
+ int entries, u16 *hash_list, u32 vf)
+{
+ struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ u32 vector_bit;
+ u32 vector_reg;
+ u32 mta_reg;
+
+ /* only so many hash values supported */
+ entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
+
+ /*
+ * salt away the number of multi cast addresses assigned
+ * to this VF for later use to restore when the PF multi cast
+ * list changes
+ */
+ vfinfo->num_vf_mc_hashes = entries;
+
+ /*
+ * VFs are limited to using the MTA hash table for their multicast
+ * addresses
+ */
+ for (i = 0; i < entries; i++) {
+ vfinfo->vf_mc_hashes[i] = hash_list[i];
+ }
+
+ for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
+ vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
+ vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
+ mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
+ mta_reg |= (1 << vector_bit);
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
+ }
+
+ return 0;
+}
+
+static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct list_head *pos;
+ struct vf_macvlans *entry;
+
+ list_for_each(pos, &adapter->vf_mvs.l) {
+ entry = list_entry(pos, struct vf_macvlans, l);
+ if (entry->free == false)
+ hw->mac.ops.set_rar(hw, entry->rar_entry,
+ entry->vf_macvlan,
+ entry->vf, IXGBE_RAH_AV);
+ }
+}
+
+void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct vf_data_storage *vfinfo;
+ int i, j;
+ u32 vector_bit;
+ u32 vector_reg;
+ u32 mta_reg;
+
+ for (i = 0; i < adapter->num_vfs; i++) {
+ vfinfo = &adapter->vfinfo[i];
+ for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
+ hw->addr_ctrl.mta_in_use++;
+ vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
+ vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
+ mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
+ mta_reg |= (1 << vector_bit);
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
+ }
+ }
+
+ /* Restore any VF macvlans */
+ ixgbe_restore_vf_macvlans(adapter);
+}
+
+static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
+ u32 vf)
+{
+ return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
+}
+
+static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int new_mtu = msgbuf[1];
+ u32 max_frs;
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* Only X540 supports jumbo frames in IOV mode */
+ if (adapter->hw.mac.type != ixgbe_mac_X540)
+ return;
+
+ /* MTU < 68 is an error and causes problems on some kernels */
+ if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) {
+ e_err(drv, "VF mtu %d out of range\n", new_mtu);
+ return;
+ }
+
+ max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
+ IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
+ if (max_frs < new_mtu) {
+ max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
+ }
+
+ e_info(hw, "VF requests change max MTU to %d\n", new_mtu);
+}
+
+static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
+{
+ u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+ vmolr |= (IXGBE_VMOLR_ROMPE |
+ IXGBE_VMOLR_BAM);
+ if (aupe)
+ vmolr |= IXGBE_VMOLR_AUPE;
+ else
+ vmolr &= ~IXGBE_VMOLR_AUPE;
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+}
+
+static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (vid)
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
+ (vid | IXGBE_VMVIR_VLANA_DEFAULT));
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
+}
+
+static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+
+ /* reset offloads to defaults */
+ if (adapter->vfinfo[vf].pf_vlan) {
+ ixgbe_set_vf_vlan(adapter, true,
+ adapter->vfinfo[vf].pf_vlan, vf);
+ ixgbe_set_vmvir(adapter,
+ (adapter->vfinfo[vf].pf_vlan |
+ (adapter->vfinfo[vf].pf_qos <<
+ VLAN_PRIO_SHIFT)), vf);
+ ixgbe_set_vmolr(hw, vf, false);
+ } else {
+ ixgbe_set_vmvir(adapter, 0, vf);
+ ixgbe_set_vmolr(hw, vf, true);
+ }
+
+ /* reset multicast table array for vf */
+ adapter->vfinfo[vf].num_vf_mc_hashes = 0;
+
+ /* Flush and reset the mta with the new values */
+ ixgbe_set_rx_mode(adapter->netdev);
+
+ hw->mac.ops.clear_rar(hw, rar_entry);
+}
+
+static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
+ int vf, unsigned char *mac_addr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+
+ memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
+ hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+
+ return 0;
+}
+
+static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
+ int vf, int index, unsigned char *mac_addr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct list_head *pos;
+ struct vf_macvlans *entry;
+
+ if (index <= 1) {
+ list_for_each(pos, &adapter->vf_mvs.l) {
+ entry = list_entry(pos, struct vf_macvlans, l);
+ if (entry->vf == vf) {
+ entry->vf = -1;
+ entry->free = true;
+ entry->is_macvlan = false;
+ hw->mac.ops.clear_rar(hw, entry->rar_entry);
+ }
+ }
+ }
+
+ /*
+ * If index was zero then we were asked to clear the uc list
+ * for the VF. We're done.
+ */
+ if (!index)
+ return 0;
+
+ entry = NULL;
+
+ list_for_each(pos, &adapter->vf_mvs.l) {
+ entry = list_entry(pos, struct vf_macvlans, l);
+ if (entry->free)
+ break;
+ }
+
+ /*
+ * If we traversed the entire list and didn't find a free entry
+ * then we're out of space on the RAR table. Also entry may
+ * be NULL because the original memory allocation for the list
+ * failed, which is not fatal but does mean we can't support
+ * VF requests for MACVLAN because we couldn't allocate
+ * memory for the list management required.
+ */
+ if (!entry || !entry->free)
+ return -ENOSPC;
+
+ entry->free = false;
+ entry->is_macvlan = true;
+ entry->vf = vf;
+ memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
+
+ hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+
+ return 0;
+}
+
+int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter)
+{
+ int i;
+ for (i = 0; i < adapter->num_vfs; i++) {
+ if (adapter->vfinfo[i].vfdev->dev_flags &
+ PCI_DEV_FLAGS_ASSIGNED)
+ return true;
+ }
+ return false;
+}
+
+int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
+{
+ unsigned char vf_mac_addr[6];
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ unsigned int vfn = (event_mask & 0x3f);
+ struct pci_dev *pvfdev;
+ unsigned int device_id;
+ u16 thisvf_devfn = (pdev->devfn + 0x80 + (vfn << 1)) |
+ (pdev->devfn & 1);
+
+ bool enable = ((event_mask & 0x10000000U) != 0);
+
+ if (enable) {
+ random_ether_addr(vf_mac_addr);
+ e_info(probe, "IOV: VF %d is enabled MAC %pM\n",
+ vfn, vf_mac_addr);
+ /*
+ * Store away the VF "permananet" MAC address, it will ask
+ * for it later.
+ */
+ memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ device_id = IXGBE_DEV_ID_82599_VF;
+ break;
+ case ixgbe_mac_X540:
+ device_id = IXGBE_DEV_ID_X540_VF;
+ break;
+ default:
+ device_id = 0;
+ break;
+ }
+
+ pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
+ while (pvfdev) {
+ if (pvfdev->devfn == thisvf_devfn)
+ break;
+ pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
+ device_id, pvfdev);
+ }
+ if (pvfdev)
+ adapter->vfinfo[vfn].vfdev = pvfdev;
+ else
+ e_err(drv, "Couldn't find pci dev ptr for VF %4.4x\n",
+ thisvf_devfn);
+ }
+
+ return 0;
+}
+
+static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg;
+ u32 reg_offset, vf_shift;
+
+ vf_shift = vf % 32;
+ reg_offset = vf / 32;
+
+ /* enable transmit and receive for vf */
+ reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
+
+ /* Enable counting of spoofed packets in the SSVPC register */
+ reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
+ reg |= (1 << vf_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
+
+ ixgbe_vf_reset_event(adapter, vf);
+}
+
+static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
+{
+ u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
+ u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
+ struct ixgbe_hw *hw = &adapter->hw;
+ s32 retval;
+ int entries;
+ u16 *hash_list;
+ int add, vid, index;
+ u8 *new_mac;
+
+ retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
+
+ if (retval)
+ pr_err("Error receiving message from VF\n");
+
+ /* this is a message we already processed, do nothing */
+ if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
+ return retval;
+
+ /*
+ * until the vf completes a virtual function reset it should not be
+ * allowed to start any configuration.
+ */
+
+ if (msgbuf[0] == IXGBE_VF_RESET) {
+ unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
+ new_mac = (u8 *)(&msgbuf[1]);
+ e_info(probe, "VF Reset msg received from vf %d\n", vf);
+ adapter->vfinfo[vf].clear_to_send = false;
+ ixgbe_vf_reset_msg(adapter, vf);
+ adapter->vfinfo[vf].clear_to_send = true;
+
+ if (is_valid_ether_addr(new_mac) &&
+ !adapter->vfinfo[vf].pf_set_mac)
+ ixgbe_set_vf_mac(adapter, vf, vf_mac);
+ else
+ ixgbe_set_vf_mac(adapter,
+ vf, adapter->vfinfo[vf].vf_mac_addresses);
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
+ memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ /*
+ * Piggyback the multicast filter type so VF can compute the
+ * correct vectors
+ */
+ msgbuf[3] = hw->mac.mc_filter_type;
+ ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
+
+ return retval;
+ }
+
+ if (!adapter->vfinfo[vf].clear_to_send) {
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
+ ixgbe_write_mbx(hw, msgbuf, 1, vf);
+ return retval;
+ }
+
+ switch ((msgbuf[0] & 0xFFFF)) {
+ case IXGBE_VF_SET_MAC_ADDR:
+ new_mac = ((u8 *)(&msgbuf[1]));
+ if (is_valid_ether_addr(new_mac) &&
+ !adapter->vfinfo[vf].pf_set_mac) {
+ ixgbe_set_vf_mac(adapter, vf, new_mac);
+ } else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses,
+ new_mac, ETH_ALEN)) {
+ e_warn(drv, "VF %d attempted to override "
+ "administratively set MAC address\nReload "
+ "the VF driver to resume operations\n", vf);
+ retval = -1;
+ }
+ break;
+ case IXGBE_VF_SET_MULTICAST:
+ entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+ >> IXGBE_VT_MSGINFO_SHIFT;
+ hash_list = (u16 *)&msgbuf[1];
+ retval = ixgbe_set_vf_multicasts(adapter, entries,
+ hash_list, vf);
+ break;
+ case IXGBE_VF_SET_LPE:
+ ixgbe_set_vf_lpe(adapter, msgbuf);
+ break;
+ case IXGBE_VF_SET_VLAN:
+ add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+ >> IXGBE_VT_MSGINFO_SHIFT;
+ vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
+ if (adapter->vfinfo[vf].pf_vlan) {
+ e_warn(drv, "VF %d attempted to override "
+ "administratively set VLAN configuration\n"
+ "Reload the VF driver to resume operations\n",
+ vf);
+ retval = -1;
+ } else {
+ retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
+ }
+ break;
+ case IXGBE_VF_SET_MACVLAN:
+ index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
+ IXGBE_VT_MSGINFO_SHIFT;
+ /*
+ * If the VF is allowed to set MAC filters then turn off
+ * anti-spoofing to avoid false positives. An index
+ * greater than 0 will indicate the VF is setting a
+ * macvlan MAC filter.
+ */
+ if (index > 0 && adapter->antispoofing_enabled) {
+ hw->mac.ops.set_mac_anti_spoofing(hw, false,
+ adapter->num_vfs);
+ hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
+ adapter->antispoofing_enabled = false;
+ }
+ retval = ixgbe_set_vf_macvlan(adapter, vf, index,
+ (unsigned char *)(&msgbuf[1]));
+ break;
+ default:
+ e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
+ retval = IXGBE_ERR_MBX;
+ break;
+ }
+
+ /* notify the VF of the results of what it sent us */
+ if (retval)
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
+
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
+
+ ixgbe_write_mbx(hw, msgbuf, 1, vf);
+
+ return retval;
+}
+
+static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 msg = IXGBE_VT_MSGTYPE_NACK;
+
+ /* if device isn't clear to send it shouldn't be reading either */
+ if (!adapter->vfinfo[vf].clear_to_send)
+ ixgbe_write_mbx(hw, &msg, 1, vf);
+}
+
+void ixgbe_msg_task(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vf;
+
+ for (vf = 0; vf < adapter->num_vfs; vf++) {
+ /* process any reset requests */
+ if (!ixgbe_check_for_rst(hw, vf))
+ ixgbe_vf_reset_event(adapter, vf);
+
+ /* process any messages pending */
+ if (!ixgbe_check_for_msg(hw, vf))
+ ixgbe_rcv_msg_from_vf(adapter, vf);
+
+ /* process any acks */
+ if (!ixgbe_check_for_ack(hw, vf))
+ ixgbe_rcv_ack_from_vf(adapter, vf);
+ }
+}
+
+void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* disable transmit and receive for all vfs */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
+}
+
+void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 ping;
+ int i;
+
+ for (i = 0 ; i < adapter->num_vfs; i++) {
+ ping = IXGBE_PF_CONTROL_MSG;
+ if (adapter->vfinfo[i].clear_to_send)
+ ping |= IXGBE_VT_MSGTYPE_CTS;
+ ixgbe_write_mbx(hw, &ping, 1, i);
+ }
+}
+
+int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
+ return -EINVAL;
+ adapter->vfinfo[vf].pf_set_mac = true;
+ dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
+ dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
+ " change effective.");
+ if (test_bit(__IXGBE_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ return ixgbe_set_vf_mac(adapter, vf, mac);
+}
+
+int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
+{
+ int err = 0;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
+ return -EINVAL;
+ if (vlan || qos) {
+ err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
+ if (err)
+ goto out;
+ ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
+ ixgbe_set_vmolr(hw, vf, false);
+ if (adapter->antispoofing_enabled)
+ hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
+ adapter->vfinfo[vf].pf_vlan = vlan;
+ adapter->vfinfo[vf].pf_qos = qos;
+ dev_info(&adapter->pdev->dev,
+ "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
+ if (test_bit(__IXGBE_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev,
+ "The VF VLAN has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev,
+ "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ } else {
+ err = ixgbe_set_vf_vlan(adapter, false,
+ adapter->vfinfo[vf].pf_vlan, vf);
+ ixgbe_set_vmvir(adapter, vlan, vf);
+ ixgbe_set_vmolr(hw, vf, true);
+ hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
+ adapter->vfinfo[vf].pf_vlan = 0;
+ adapter->vfinfo[vf].pf_qos = 0;
+ }
+out:
+ return err;
+}
+
+static int ixgbe_link_mbps(int internal_link_speed)
+{
+ switch (internal_link_speed) {
+ case IXGBE_LINK_SPEED_100_FULL:
+ return 100;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ return 1000;
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ return 10000;
+ default:
+ return 0;
+ }
+}
+
+static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
+ int link_speed)
+{
+ int rf_dec, rf_int;
+ u32 bcnrc_val;
+
+ if (tx_rate != 0) {
+ /* Calculate the rate factor values to set */
+ rf_int = link_speed / tx_rate;
+ rf_dec = (link_speed - (rf_int * tx_rate));
+ rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
+
+ bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
+ bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) &
+ IXGBE_RTTBCNRC_RF_INT_MASK);
+ bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
+ } else {
+ bcnrc_val = 0;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
+ /*
+ * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+ * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
+ * and 0x004 otherwise.
+ */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4);
+ break;
+ case ixgbe_mac_X540:
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14);
+ break;
+ default:
+ break;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+}
+
+void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
+{
+ int actual_link_speed, i;
+ bool reset_rate = false;
+
+ /* VF Tx rate limit was not set */
+ if (adapter->vf_rate_link_speed == 0)
+ return;
+
+ actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
+ if (actual_link_speed != adapter->vf_rate_link_speed) {
+ reset_rate = true;
+ adapter->vf_rate_link_speed = 0;
+ dev_info(&adapter->pdev->dev,
+ "Link speed has been changed. VF Transmit rate "
+ "is disabled\n");
+ }
+
+ for (i = 0; i < adapter->num_vfs; i++) {
+ if (reset_rate)
+ adapter->vfinfo[i].tx_rate = 0;
+
+ ixgbe_set_vf_rate_limit(&adapter->hw, i,
+ adapter->vfinfo[i].tx_rate,
+ actual_link_speed);
+ }
+}
+
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int actual_link_speed;
+
+ actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
+ if ((vf >= adapter->num_vfs) || (!adapter->link_up) ||
+ (tx_rate > actual_link_speed) || (actual_link_speed != 10000) ||
+ ((tx_rate != 0) && (tx_rate <= 10)))
+ /* rate limit cannot be set to 10Mb or less in 10Gb adapters */
+ return -EINVAL;
+
+ adapter->vf_rate_link_speed = actual_link_speed;
+ adapter->vfinfo[vf].tx_rate = (u16)tx_rate;
+ ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
+
+ return 0;
+}
+
+int ixgbe_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
+ ivi->vf = vf;
+ memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
+ ivi->tx_rate = adapter->vfinfo[vf].tx_rate;
+ ivi->vlan = adapter->vfinfo[vf].pf_vlan;
+ ivi->qos = adapter->vfinfo[vf].pf_qos;
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
new file mode 100644
index 000000000000..278184757b69
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -0,0 +1,51 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_SRIOV_H_
+#define _IXGBE_SRIOV_H_
+
+void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
+void ixgbe_msg_task(struct ixgbe_adapter *adapter);
+int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
+void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
+void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
+void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
+int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
+int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
+ u8 qos);
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+int ixgbe_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi);
+void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
+void ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
+void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
+ const struct ixgbe_info *ii);
+int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter);
+
+
+#endif /* _IXGBE_SRIOV_H_ */
+
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
new file mode 100644
index 000000000000..d1d689471523
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -0,0 +1,2941 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_TYPE_H_
+#define _IXGBE_TYPE_H_
+
+#include <linux/types.h>
+#include <linux/mdio.h>
+#include <linux/netdevice.h>
+
+/* Vendor ID */
+#define IXGBE_INTEL_VENDOR_ID 0x8086
+
+/* Device IDs */
+#define IXGBE_DEV_ID_82598 0x10B6
+#define IXGBE_DEV_ID_82598_BX 0x1508
+#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
+#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
+#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB
+#define IXGBE_DEV_ID_82598AT 0x10C8
+#define IXGBE_DEV_ID_82598AT2 0x150B
+#define IXGBE_DEV_ID_82598EB_CX4 0x10DD
+#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
+#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1
+#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1
+#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
+#define IXGBE_DEV_ID_82599_KX4 0x10F7
+#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
+#define IXGBE_DEV_ID_82599_KR 0x1517
+#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
+#define IXGBE_DEV_ID_82599_CX4 0x10F9
+#define IXGBE_DEV_ID_82599_SFP 0x10FB
+#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a
+#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
+#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
+#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
+#define IXGBE_DEV_ID_82599EN_SFP 0x1557
+#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
+#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
+#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
+#define IXGBE_DEV_ID_82599_LS 0x154F
+#define IXGBE_DEV_ID_X540T 0x1528
+
+/* VF Device IDs */
+#define IXGBE_DEV_ID_82599_VF 0x10ED
+#define IXGBE_DEV_ID_X540_VF 0x1515
+
+/* General Registers */
+#define IXGBE_CTRL 0x00000
+#define IXGBE_STATUS 0x00008
+#define IXGBE_CTRL_EXT 0x00018
+#define IXGBE_ESDP 0x00020
+#define IXGBE_EODSDP 0x00028
+#define IXGBE_I2CCTL 0x00028
+#define IXGBE_LEDCTL 0x00200
+#define IXGBE_FRTIMER 0x00048
+#define IXGBE_TCPTIMER 0x0004C
+#define IXGBE_CORESPARE 0x00600
+#define IXGBE_EXVET 0x05078
+
+/* NVM Registers */
+#define IXGBE_EEC 0x10010
+#define IXGBE_EERD 0x10014
+#define IXGBE_EEWR 0x10018
+#define IXGBE_FLA 0x1001C
+#define IXGBE_EEMNGCTL 0x10110
+#define IXGBE_EEMNGDATA 0x10114
+#define IXGBE_FLMNGCTL 0x10118
+#define IXGBE_FLMNGDATA 0x1011C
+#define IXGBE_FLMNGCNT 0x10120
+#define IXGBE_FLOP 0x1013C
+#define IXGBE_GRC 0x10200
+
+/* General Receive Control */
+#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
+#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
+
+#define IXGBE_VPDDIAG0 0x10204
+#define IXGBE_VPDDIAG1 0x10208
+
+/* I2CCTL Bit Masks */
+#define IXGBE_I2C_CLK_IN 0x00000001
+#define IXGBE_I2C_CLK_OUT 0x00000002
+#define IXGBE_I2C_DATA_IN 0x00000004
+#define IXGBE_I2C_DATA_OUT 0x00000008
+
+/* Interrupt Registers */
+#define IXGBE_EICR 0x00800
+#define IXGBE_EICS 0x00808
+#define IXGBE_EIMS 0x00880
+#define IXGBE_EIMC 0x00888
+#define IXGBE_EIAC 0x00810
+#define IXGBE_EIAM 0x00890
+#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4)
+#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4)
+#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4)
+#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4)
+/*
+ * 82598 EITR is 16 bits but set the limits based on the max
+ * supported by all ixgbe hardware. 82599 EITR is only 12 bits,
+ * with the lower 3 always zero.
+ */
+#define IXGBE_MAX_INT_RATE 488281
+#define IXGBE_MIN_INT_RATE 956
+#define IXGBE_MAX_EITR 0x00000FF8
+#define IXGBE_MIN_EITR 8
+#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
+ (0x012300 + (((_i) - 24) * 4)))
+#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8
+#define IXGBE_EITR_LLI_MOD 0x00008000
+#define IXGBE_EITR_CNT_WDIS 0x80000000
+#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
+#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */
+#define IXGBE_EITRSEL 0x00894
+#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */
+#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */
+#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4)))
+#define IXGBE_GPIE 0x00898
+
+/* Flow Control Registers */
+#define IXGBE_FCADBUL 0x03210
+#define IXGBE_FCADBUH 0x03214
+#define IXGBE_FCAMACL 0x04328
+#define IXGBE_FCAMACH 0x0432C
+#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_PFCTOP 0x03008
+#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */
+#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */
+#define IXGBE_FCRTV 0x032A0
+#define IXGBE_FCCFG 0x03D00
+#define IXGBE_TFCS 0x0CE00
+
+/* Receive DMA Registers */
+#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
+ (0x0D000 + ((_i - 64) * 0x40)))
+#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
+ (0x0D004 + ((_i - 64) * 0x40)))
+#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
+ (0x0D008 + ((_i - 64) * 0x40)))
+#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
+ (0x0D010 + ((_i - 64) * 0x40)))
+#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
+ (0x0D018 + ((_i - 64) * 0x40)))
+#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
+ (0x0D028 + ((_i - 64) * 0x40)))
+#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
+ (0x0D02C + ((_i - 64) * 0x40)))
+#define IXGBE_RSCDBU 0x03028
+#define IXGBE_RDDCC 0x02F20
+#define IXGBE_RXMEMWRAP 0x03190
+#define IXGBE_STARCTRL 0x03024
+/*
+ * Split and Replication Receive Control Registers
+ * 00-15 : 0x02100 + n*4
+ * 16-64 : 0x01014 + n*0x40
+ * 64-127: 0x0D014 + (n-64)*0x40
+ */
+#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
+ (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
+ (0x0D014 + ((_i - 64) * 0x40))))
+/*
+ * Rx DCA Control Register:
+ * 00-15 : 0x02200 + n*4
+ * 16-64 : 0x0100C + n*0x40
+ * 64-127: 0x0D00C + (n-64)*0x40
+ */
+#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
+ (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
+ (0x0D00C + ((_i - 64) * 0x40))))
+#define IXGBE_RDRXCTL 0x02F00
+#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
+ /* 8 of these 0x03C00 - 0x03C1C */
+#define IXGBE_RXCTRL 0x03000
+#define IXGBE_DROPEN 0x03D04
+#define IXGBE_RXPBSIZE_SHIFT 10
+
+/* Receive Registers */
+#define IXGBE_RXCSUM 0x05000
+#define IXGBE_RFCTL 0x05008
+#define IXGBE_DRECCCTL 0x02F08
+#define IXGBE_DRECCCTL_DISABLE 0
+/* Multicast Table Array - 128 entries */
+#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
+#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+ (0x0A200 + ((_i) * 8)))
+#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+ (0x0A204 + ((_i) * 8)))
+#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
+#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
+/* Packet split receive type */
+#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
+ (0x0EA00 + ((_i) * 4)))
+/* array of 4096 1-bit vlan filters */
+#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
+/*array of 4096 4-bit vlan vmdq indices */
+#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
+#define IXGBE_FCTRL 0x05080
+#define IXGBE_VLNCTRL 0x05088
+#define IXGBE_MCSTCTRL 0x05090
+#define IXGBE_MRQC 0x05818
+#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */
+#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */
+#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */
+#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */
+#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */
+#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */
+#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */
+#define IXGBE_RQTC 0x0EC70
+#define IXGBE_MTQC 0x08120
+#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
+#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
+#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
+#define IXGBE_VT_CTL 0x051B0
+#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */
+#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) /* 64 Mailboxes, 16 DW each */
+#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */
+#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */
+#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
+#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
+#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4))
+#define IXGBE_QDE 0x2F04
+#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */
+#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */
+#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4))
+#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4))
+#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
+#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4))
+#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
+#define IXGBE_RXFECCERR0 0x051B8
+#define IXGBE_LLITHRESH 0x0EC90
+#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_IMIRVP 0x05AC0
+#define IXGBE_VMD_CTL 0x0581C
+#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
+#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
+
+/* Flow Director registers */
+#define IXGBE_FDIRCTRL 0x0EE00
+#define IXGBE_FDIRHKEY 0x0EE68
+#define IXGBE_FDIRSKEY 0x0EE6C
+#define IXGBE_FDIRDIP4M 0x0EE3C
+#define IXGBE_FDIRSIP4M 0x0EE40
+#define IXGBE_FDIRTCPM 0x0EE44
+#define IXGBE_FDIRUDPM 0x0EE48
+#define IXGBE_FDIRIP6M 0x0EE74
+#define IXGBE_FDIRM 0x0EE70
+
+/* Flow Director Stats registers */
+#define IXGBE_FDIRFREE 0x0EE38
+#define IXGBE_FDIRLEN 0x0EE4C
+#define IXGBE_FDIRUSTAT 0x0EE50
+#define IXGBE_FDIRFSTAT 0x0EE54
+#define IXGBE_FDIRMATCH 0x0EE58
+#define IXGBE_FDIRMISS 0x0EE5C
+
+/* Flow Director Programming registers */
+#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */
+#define IXGBE_FDIRIPSA 0x0EE18
+#define IXGBE_FDIRIPDA 0x0EE1C
+#define IXGBE_FDIRPORT 0x0EE20
+#define IXGBE_FDIRVLAN 0x0EE24
+#define IXGBE_FDIRHASH 0x0EE28
+#define IXGBE_FDIRCMD 0x0EE2C
+
+/* Transmit DMA registers */
+#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/
+#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
+#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
+#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40))
+#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40))
+#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40))
+#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
+#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
+#define IXGBE_DTXCTL 0x07E00
+
+#define IXGBE_DMATXCTL 0x04A80
+#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */
+#define IXGBE_PFDTXGSWC 0x08220
+#define IXGBE_DTXMXSZRQ 0x08100
+#define IXGBE_DTXTCPFLGL 0x04A88
+#define IXGBE_DTXTCPFLGH 0x04A8C
+#define IXGBE_LBDRPEN 0x0CA00
+#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */
+
+#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */
+#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
+#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
+#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
+
+#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
+
+/* Anti-spoofing defines */
+#define IXGBE_SPOOF_MACAS_MASK 0xFF
+#define IXGBE_SPOOF_VLANAS_MASK 0xFF00
+#define IXGBE_SPOOF_VLANAS_SHIFT 8
+#define IXGBE_PFVFSPOOF_REG_COUNT 8
+
+#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
+/* Tx DCA Control register : 128 of these (0-127) */
+#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
+#define IXGBE_TIPG 0x0CB00
+#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_MNGTXMAP 0x0CD10
+#define IXGBE_TIPG_FIBER_DEFAULT 3
+#define IXGBE_TXPBSIZE_SHIFT 10
+
+/* Wake up registers */
+#define IXGBE_WUC 0x05800
+#define IXGBE_WUFC 0x05808
+#define IXGBE_WUS 0x05810
+#define IXGBE_IPAV 0x05838
+#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */
+#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */
+
+#define IXGBE_WUPL 0x05900
+#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
+#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */
+#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100)) /* Ext Flexible Host
+ * Filter Table */
+
+#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4
+#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
+
+/* Each Flexible Filter is at most 128 (0x80) bytes in length */
+#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128
+#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */
+#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */
+#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */
+
+/* Wake Up Filter Control */
+#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */
+
+#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
+#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
+#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
+#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
+#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */
+#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */
+#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
+
+/* Wake Up Status */
+#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC
+#define IXGBE_WUS_MAG IXGBE_WUFC_MAG
+#define IXGBE_WUS_EX IXGBE_WUFC_EX
+#define IXGBE_WUS_MC IXGBE_WUFC_MC
+#define IXGBE_WUS_BC IXGBE_WUFC_BC
+#define IXGBE_WUS_ARP IXGBE_WUFC_ARP
+#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4
+#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6
+#define IXGBE_WUS_MNG IXGBE_WUFC_MNG
+#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0
+#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1
+#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2
+#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3
+#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4
+#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5
+#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS
+
+/* Wake Up Packet Length */
+#define IXGBE_WUPL_LENGTH_MASK 0xFFFF
+
+/* DCB registers */
+#define MAX_TRAFFIC_CLASS 8
+#define X540_TRAFFIC_CLASS 4
+#define IXGBE_RMCS 0x03D00
+#define IXGBE_DPMCS 0x07F40
+#define IXGBE_PDPMCS 0x0CD00
+#define IXGBE_RUPPBMR 0x050A0
+#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */
+#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */
+#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
+
+
+/* Security Control Registers */
+#define IXGBE_SECTXCTRL 0x08800
+#define IXGBE_SECTXSTAT 0x08804
+#define IXGBE_SECTXBUFFAF 0x08808
+#define IXGBE_SECTXMINIFG 0x08810
+#define IXGBE_SECRXCTRL 0x08D00
+#define IXGBE_SECRXSTAT 0x08D04
+
+/* Security Bit Fields and Masks */
+#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001
+#define IXGBE_SECTXCTRL_TX_DIS 0x00000002
+#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004
+
+#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001
+#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002
+
+#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001
+#define IXGBE_SECRXCTRL_RX_DIS 0x00000002
+
+#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001
+#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002
+
+/* LinkSec (MacSec) Registers */
+#define IXGBE_LSECTXCAP 0x08A00
+#define IXGBE_LSECRXCAP 0x08F00
+#define IXGBE_LSECTXCTRL 0x08A04
+#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */
+#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */
+#define IXGBE_LSECTXSA 0x08A10
+#define IXGBE_LSECTXPN0 0x08A14
+#define IXGBE_LSECTXPN1 0x08A18
+#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */
+#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */
+#define IXGBE_LSECRXCTRL 0x08F04
+#define IXGBE_LSECRXSCL 0x08F08
+#define IXGBE_LSECRXSCH 0x08F0C
+#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */
+#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */
+#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m))))
+#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */
+#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */
+#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */
+#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */
+#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */
+#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */
+#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */
+#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */
+#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */
+#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */
+#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */
+#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */
+#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */
+#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */
+#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */
+#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */
+#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */
+#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */
+#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */
+
+/* LinkSec (MacSec) Bit Fields and Masks */
+#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000
+#define IXGBE_LSECTXCAP_SUM_SHIFT 16
+#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000
+#define IXGBE_LSECRXCAP_SUM_SHIFT 16
+
+#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003
+#define IXGBE_LSECTXCTRL_DISABLE 0x0
+#define IXGBE_LSECTXCTRL_AUTH 0x1
+#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2
+#define IXGBE_LSECTXCTRL_AISCI 0x00000020
+#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00
+#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8
+
+#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C
+#define IXGBE_LSECRXCTRL_EN_SHIFT 2
+#define IXGBE_LSECRXCTRL_DISABLE 0x0
+#define IXGBE_LSECRXCTRL_CHECK 0x1
+#define IXGBE_LSECRXCTRL_STRICT 0x2
+#define IXGBE_LSECRXCTRL_DROP 0x3
+#define IXGBE_LSECRXCTRL_PLSH 0x00000040
+#define IXGBE_LSECRXCTRL_RP 0x00000080
+#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33
+
+/* IpSec Registers */
+#define IXGBE_IPSTXIDX 0x08900
+#define IXGBE_IPSTXSALT 0x08904
+#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXIDX 0x08E00
+#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXSPI 0x08E14
+#define IXGBE_IPSRXIPIDX 0x08E18
+#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXSALT 0x08E2C
+#define IXGBE_IPSRXMOD 0x08E30
+
+#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4
+
+/* DCB registers */
+#define IXGBE_RTRPCS 0x02430
+#define IXGBE_RTTDCS 0x04900
+#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
+#define IXGBE_RTTPCS 0x0CD00
+#define IXGBE_RTRUP2TC 0x03020
+#define IXGBE_RTTUP2TC 0x0C800
+#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDQSEL 0x04904
+#define IXGBE_RTTDT1C 0x04908
+#define IXGBE_RTTDT1S 0x0490C
+#define IXGBE_RTTDTECC 0x04990
+#define IXGBE_RTTDTECC_NO_BCN 0x00000100
+#define IXGBE_RTTBCNRC 0x04984
+#define IXGBE_RTTBCNRC_RS_ENA 0x80000000
+#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF
+#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14
+#define IXGBE_RTTBCNRC_RF_INT_MASK \
+ (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
+#define IXGBE_RTTBCNRM 0x04980
+
+/* FCoE DMA Context Registers */
+#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
+#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */
+#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */
+#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */
+#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */
+#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4))
+#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */
+#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */
+#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */
+#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */
+#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */
+#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3
+#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8
+#define IXGBE_FCBUFF_OFFSET_SHIFT 16
+#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */
+#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */
+#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */
+#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */
+#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16
+
+/* FCoE SOF/EOF */
+#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */
+#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */
+#define IXGBE_REOFF 0x05158 /* Rx FC EOF */
+#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */
+/* FCoE Filter Context Registers */
+#define IXGBE_FCFLT 0x05108 /* FC FLT Context */
+#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */
+#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */
+#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */
+#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */
+#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */
+#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */
+#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */
+#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */
+#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */
+/* FCoE Receive Control */
+#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */
+#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */
+#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */
+#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */
+#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */
+#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */
+#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */
+#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */
+#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */
+#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */
+#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8
+/* FCoE Redirection */
+#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */
+#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */
+#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */
+#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */
+#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */
+#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */
+
+/* Stats registers */
+#define IXGBE_CRCERRS 0x04000
+#define IXGBE_ILLERRC 0x04004
+#define IXGBE_ERRBC 0x04008
+#define IXGBE_MSPDC 0x04010
+#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/
+#define IXGBE_MLFC 0x04034
+#define IXGBE_MRFC 0x04038
+#define IXGBE_RLEC 0x04040
+#define IXGBE_LXONTXC 0x03F60
+#define IXGBE_LXONRXC 0x0CF60
+#define IXGBE_LXOFFTXC 0x03F68
+#define IXGBE_LXOFFRXC 0x0CF68
+#define IXGBE_LXONRXCNT 0x041A4
+#define IXGBE_LXOFFRXCNT 0x041A8
+#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/
+#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/
+#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/
+#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/
+#define IXGBE_PRC64 0x0405C
+#define IXGBE_PRC127 0x04060
+#define IXGBE_PRC255 0x04064
+#define IXGBE_PRC511 0x04068
+#define IXGBE_PRC1023 0x0406C
+#define IXGBE_PRC1522 0x04070
+#define IXGBE_GPRC 0x04074
+#define IXGBE_BPRC 0x04078
+#define IXGBE_MPRC 0x0407C
+#define IXGBE_GPTC 0x04080
+#define IXGBE_GORCL 0x04088
+#define IXGBE_GORCH 0x0408C
+#define IXGBE_GOTCL 0x04090
+#define IXGBE_GOTCH 0x04094
+#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/
+#define IXGBE_RUC 0x040A4
+#define IXGBE_RFC 0x040A8
+#define IXGBE_ROC 0x040AC
+#define IXGBE_RJC 0x040B0
+#define IXGBE_MNGPRC 0x040B4
+#define IXGBE_MNGPDC 0x040B8
+#define IXGBE_MNGPTC 0x0CF90
+#define IXGBE_TORL 0x040C0
+#define IXGBE_TORH 0x040C4
+#define IXGBE_TPR 0x040D0
+#define IXGBE_TPT 0x040D4
+#define IXGBE_PTC64 0x040D8
+#define IXGBE_PTC127 0x040DC
+#define IXGBE_PTC255 0x040E0
+#define IXGBE_PTC511 0x040E4
+#define IXGBE_PTC1023 0x040E8
+#define IXGBE_PTC1522 0x040EC
+#define IXGBE_MPTC 0x040F0
+#define IXGBE_BPTC 0x040F4
+#define IXGBE_XEC 0x04120
+#define IXGBE_SSVPC 0x08780
+
+#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4))
+#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
+ (0x08600 + ((_i) * 4)))
+#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4))
+
+#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
+#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
+#define IXGBE_FCCRC 0x05118 /* Count of Good Eth CRC w/ Bad FC CRC */
+#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */
+#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */
+#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */
+#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */
+#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */
+#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */
+#define IXGBE_O2BGPTC 0x041C4
+#define IXGBE_O2BSPC 0x087B0
+#define IXGBE_B2OSPC 0x041C0
+#define IXGBE_B2OGPRC 0x02F90
+#define IXGBE_PCRC8ECL 0x0E810
+#define IXGBE_PCRC8ECH 0x0E811
+#define IXGBE_PCRC8ECH_MASK 0x1F
+#define IXGBE_LDPCECL 0x0E820
+#define IXGBE_LDPCECH 0x0E821
+
+/* Management */
+#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MANC 0x05820
+#define IXGBE_MFVAL 0x05824
+#define IXGBE_MANC2H 0x05860
+#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MIPAF 0x058B0
+#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */
+#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */
+#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */
+#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_LSWFW 0x15014
+
+/* ARC Subsystem registers */
+#define IXGBE_HICR 0x15F00
+#define IXGBE_FWSTS 0x15F0C
+#define IXGBE_HSMC0R 0x15F04
+#define IXGBE_HSMC1R 0x15F08
+#define IXGBE_SWSR 0x15F10
+#define IXGBE_HFDR 0x15FE8
+#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */
+
+#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define IXGBE_HICR_C 0x02
+#define IXGBE_HICR_SV 0x04 /* Status Validity */
+#define IXGBE_HICR_FW_RESET_ENABLE 0x40
+#define IXGBE_HICR_FW_RESET 0x80
+
+/* PCI-E registers */
+#define IXGBE_GCR 0x11000
+#define IXGBE_GTV 0x11004
+#define IXGBE_FUNCTAG 0x11008
+#define IXGBE_GLT 0x1100C
+#define IXGBE_GSCL_1 0x11010
+#define IXGBE_GSCL_2 0x11014
+#define IXGBE_GSCL_3 0x11018
+#define IXGBE_GSCL_4 0x1101C
+#define IXGBE_GSCN_0 0x11020
+#define IXGBE_GSCN_1 0x11024
+#define IXGBE_GSCN_2 0x11028
+#define IXGBE_GSCN_3 0x1102C
+#define IXGBE_FACTPS 0x10150
+#define IXGBE_PCIEANACTL 0x11040
+#define IXGBE_SWSM 0x10140
+#define IXGBE_FWSM 0x10148
+#define IXGBE_GSSR 0x10160
+#define IXGBE_MREVID 0x11064
+#define IXGBE_DCA_ID 0x11070
+#define IXGBE_DCA_CTRL 0x11074
+#define IXGBE_SWFW_SYNC IXGBE_GSSR
+
+/* PCIe registers 82599-specific */
+#define IXGBE_GCR_EXT 0x11050
+#define IXGBE_GSCL_5_82599 0x11030
+#define IXGBE_GSCL_6_82599 0x11034
+#define IXGBE_GSCL_7_82599 0x11038
+#define IXGBE_GSCL_8_82599 0x1103C
+#define IXGBE_PHYADR_82599 0x11040
+#define IXGBE_PHYDAT_82599 0x11044
+#define IXGBE_PHYCTL_82599 0x11048
+#define IXGBE_PBACLR_82599 0x11068
+#define IXGBE_CIAA_82599 0x11088
+#define IXGBE_CIAD_82599 0x1108C
+#define IXGBE_PICAUSE 0x110B0
+#define IXGBE_PIENA 0x110B8
+#define IXGBE_CDQ_MBR_82599 0x110B4
+#define IXGBE_PCIESPARE 0x110BC
+#define IXGBE_MISC_REG_82599 0x110F0
+#define IXGBE_ECC_CTRL_0_82599 0x11100
+#define IXGBE_ECC_CTRL_1_82599 0x11104
+#define IXGBE_ECC_STATUS_82599 0x110E0
+#define IXGBE_BAR_CTRL_82599 0x110F4
+
+/* PCI Express Control */
+#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000
+#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000
+#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
+#define IXGBE_GCR_CAP_VER2 0x00040000
+
+#define IXGBE_GCR_EXT_MSIX_EN 0x80000000
+#define IXGBE_GCR_EXT_BUFFERS_CLEAR 0x40000000
+#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
+#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
+#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
+#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
+ IXGBE_GCR_EXT_VT_MODE_64)
+
+/* Time Sync Registers */
+#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
+#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
+#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */
+#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */
+#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */
+#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */
+#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */
+#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */
+#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */
+#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */
+#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */
+#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */
+#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */
+#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */
+#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */
+#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */
+#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */
+#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */
+#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */
+#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */
+#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */
+#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */
+#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */
+#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */
+#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */
+
+/* Diagnostic Registers */
+#define IXGBE_RDSTATCTL 0x02C20
+#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
+#define IXGBE_RDHMPN 0x02F08
+#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4))
+#define IXGBE_RDPROBE 0x02F20
+#define IXGBE_RDMAM 0x02F30
+#define IXGBE_RDMAD 0x02F34
+#define IXGBE_TDSTATCTL 0x07C20
+#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
+#define IXGBE_TDHMPN 0x07F08
+#define IXGBE_TDHMPN2 0x082FC
+#define IXGBE_TXDESCIC 0x082CC
+#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4))
+#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4))
+#define IXGBE_TDPROBE 0x07F20
+#define IXGBE_TXBUFCTRL 0x0C600
+#define IXGBE_TXBUFDATA0 0x0C610
+#define IXGBE_TXBUFDATA1 0x0C614
+#define IXGBE_TXBUFDATA2 0x0C618
+#define IXGBE_TXBUFDATA3 0x0C61C
+#define IXGBE_RXBUFCTRL 0x03600
+#define IXGBE_RXBUFDATA0 0x03610
+#define IXGBE_RXBUFDATA1 0x03614
+#define IXGBE_RXBUFDATA2 0x03618
+#define IXGBE_RXBUFDATA3 0x0361C
+#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_RFVAL 0x050A4
+#define IXGBE_MDFTC1 0x042B8
+#define IXGBE_MDFTC2 0x042C0
+#define IXGBE_MDFTFIFO1 0x042C4
+#define IXGBE_MDFTFIFO2 0x042C8
+#define IXGBE_MDFTS 0x042CC
+#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/
+#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/
+#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/
+#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/
+#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/
+#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/
+#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/
+#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/
+#define IXGBE_PCIEECCCTL 0x1106C
+#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/
+#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/
+#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/
+#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/
+#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/
+#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/
+#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/
+#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/
+#define IXGBE_PCIEECCCTL0 0x11100
+#define IXGBE_PCIEECCCTL1 0x11104
+#define IXGBE_RXDBUECC 0x03F70
+#define IXGBE_TXDBUECC 0x0CF70
+#define IXGBE_RXDBUEST 0x03F74
+#define IXGBE_TXDBUEST 0x0CF74
+#define IXGBE_PBTXECC 0x0C300
+#define IXGBE_PBRXECC 0x03300
+#define IXGBE_GHECCR 0x110B0
+
+/* MAC Registers */
+#define IXGBE_PCS1GCFIG 0x04200
+#define IXGBE_PCS1GLCTL 0x04208
+#define IXGBE_PCS1GLSTA 0x0420C
+#define IXGBE_PCS1GDBG0 0x04210
+#define IXGBE_PCS1GDBG1 0x04214
+#define IXGBE_PCS1GANA 0x04218
+#define IXGBE_PCS1GANLP 0x0421C
+#define IXGBE_PCS1GANNP 0x04220
+#define IXGBE_PCS1GANLPNP 0x04224
+#define IXGBE_HLREG0 0x04240
+#define IXGBE_HLREG1 0x04244
+#define IXGBE_PAP 0x04248
+#define IXGBE_MACA 0x0424C
+#define IXGBE_APAE 0x04250
+#define IXGBE_ARD 0x04254
+#define IXGBE_AIS 0x04258
+#define IXGBE_MSCA 0x0425C
+#define IXGBE_MSRWD 0x04260
+#define IXGBE_MLADD 0x04264
+#define IXGBE_MHADD 0x04268
+#define IXGBE_MAXFRS 0x04268
+#define IXGBE_TREG 0x0426C
+#define IXGBE_PCSS1 0x04288
+#define IXGBE_PCSS2 0x0428C
+#define IXGBE_XPCSS 0x04290
+#define IXGBE_MFLCN 0x04294
+#define IXGBE_SERDESC 0x04298
+#define IXGBE_MACS 0x0429C
+#define IXGBE_AUTOC 0x042A0
+#define IXGBE_LINKS 0x042A4
+#define IXGBE_LINKS2 0x04324
+#define IXGBE_AUTOC2 0x042A8
+#define IXGBE_AUTOC3 0x042AC
+#define IXGBE_ANLP1 0x042B0
+#define IXGBE_ANLP2 0x042B4
+#define IXGBE_MACC 0x04330
+#define IXGBE_ATLASCTL 0x04800
+#define IXGBE_MMNGC 0x042D0
+#define IXGBE_ANLPNP1 0x042D4
+#define IXGBE_ANLPNP2 0x042D8
+#define IXGBE_KRPCSFC 0x042E0
+#define IXGBE_KRPCSS 0x042E4
+#define IXGBE_FECS1 0x042E8
+#define IXGBE_FECS2 0x042EC
+#define IXGBE_SMADARCTL 0x14F10
+#define IXGBE_MPVC 0x04318
+#define IXGBE_SGMIIC 0x04314
+
+/* Statistics Registers */
+#define IXGBE_RXNFGPC 0x041B0
+#define IXGBE_RXNFGBCL 0x041B4
+#define IXGBE_RXNFGBCH 0x041B8
+#define IXGBE_RXDGPC 0x02F50
+#define IXGBE_RXDGBCL 0x02F54
+#define IXGBE_RXDGBCH 0x02F58
+#define IXGBE_RXDDGPC 0x02F5C
+#define IXGBE_RXDDGBCL 0x02F60
+#define IXGBE_RXDDGBCH 0x02F64
+#define IXGBE_RXLPBKGPC 0x02F68
+#define IXGBE_RXLPBKGBCL 0x02F6C
+#define IXGBE_RXLPBKGBCH 0x02F70
+#define IXGBE_RXDLPBKGPC 0x02F74
+#define IXGBE_RXDLPBKGBCL 0x02F78
+#define IXGBE_RXDLPBKGBCH 0x02F7C
+#define IXGBE_TXDGPC 0x087A0
+#define IXGBE_TXDGBCL 0x087A4
+#define IXGBE_TXDGBCH 0x087A8
+
+#define IXGBE_RXDSTATCTRL 0x02F40
+
+/* Copper Pond 2 link timeout */
+#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50
+
+/* Omer CORECTL */
+#define IXGBE_CORECTL 0x014F00
+/* BARCTRL */
+#define IXGBE_BARCTRL 0x110F4
+#define IXGBE_BARCTRL_FLSIZE 0x0700
+#define IXGBE_BARCTRL_FLSIZE_SHIFT 8
+#define IXGBE_BARCTRL_CSRSIZE 0x2000
+
+/* RSCCTL Bit Masks */
+#define IXGBE_RSCCTL_RSCEN 0x01
+#define IXGBE_RSCCTL_MAXDESC_1 0x00
+#define IXGBE_RSCCTL_MAXDESC_4 0x04
+#define IXGBE_RSCCTL_MAXDESC_8 0x08
+#define IXGBE_RSCCTL_MAXDESC_16 0x0C
+
+/* RSCDBU Bit Masks */
+#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F
+#define IXGBE_RSCDBU_RSCACKDIS 0x00000080
+
+/* RDRXCTL Bit Masks */
+#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */
+#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */
+#define IXGBE_RDRXCTL_MVMEN 0x00000020
+#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
+#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
+#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */
+#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */
+#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */
+#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */
+
+/* RQTC Bit Masks and Shifts */
+#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
+#define IXGBE_RQTC_TC0_MASK (0x7 << 0)
+#define IXGBE_RQTC_TC1_MASK (0x7 << 4)
+#define IXGBE_RQTC_TC2_MASK (0x7 << 8)
+#define IXGBE_RQTC_TC3_MASK (0x7 << 12)
+#define IXGBE_RQTC_TC4_MASK (0x7 << 16)
+#define IXGBE_RQTC_TC5_MASK (0x7 << 20)
+#define IXGBE_RQTC_TC6_MASK (0x7 << 24)
+#define IXGBE_RQTC_TC7_MASK (0x7 << 28)
+
+/* PSRTYPE.RQPL Bit masks and shift */
+#define IXGBE_PSRTYPE_RQPL_MASK 0x7
+#define IXGBE_PSRTYPE_RQPL_SHIFT 29
+
+/* CTRL Bit Masks */
+#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
+#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */
+#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
+#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
+
+/* FACTPS */
+#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */
+
+/* MHADD Bit Masks */
+#define IXGBE_MHADD_MFS_MASK 0xFFFF0000
+#define IXGBE_MHADD_MFS_SHIFT 16
+
+/* Extended Device Control */
+#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */
+#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */
+#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
+#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
+
+/* Direct Cache Access (DCA) definitions */
+#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
+#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
+
+#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
+#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
+
+#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
+#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */
+#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DESC_WRO_EN (1 << 13) /* DCA Rx wr Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DESC_HSRO_EN (1 << 15) /* DCA Rx Split Header RO */
+
+#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
+#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */
+#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
+
+/* MSCA Bit Masks */
+#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Address (new protocol) */
+#define IXGBE_MSCA_NP_ADDR_SHIFT 0
+#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Device Type (new protocol) */
+#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old protocol */
+#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */
+#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/
+#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */
+#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */
+#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */
+#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */
+#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (read) */
+#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (read, auto inc)*/
+#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */
+#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */
+#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */
+#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old protocol) */
+#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */
+#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */
+
+/* MSRWD bit masks */
+#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF
+#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0
+#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000
+#define IXGBE_MSRWD_READ_DATA_SHIFT 16
+
+/* Atlas registers */
+#define IXGBE_ATLAS_PDN_LPBK 0x24
+#define IXGBE_ATLAS_PDN_10G 0xB
+#define IXGBE_ATLAS_PDN_1G 0xC
+#define IXGBE_ATLAS_PDN_AN 0xD
+
+/* Atlas bit masks */
+#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000
+#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10
+#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0
+#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0
+#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0
+
+/* Omer bit masks */
+#define IXGBE_CORECTL_WRITE_CMD 0x00010000
+
+/* MDIO definitions */
+
+#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
+
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0 - 10G, 1 - 1G */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010
+
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
+
+/* MII clause 22/28 definitions */
+#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
+#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */
+#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/
+#define IXGBE_MII_AUTONEG_REG 0x0
+
+#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0
+#define IXGBE_MAX_PHY_ADDR 32
+
+/* PHY IDs*/
+#define TN1010_PHY_ID 0x00A19410
+#define TNX_FW_REV 0xB
+#define X540_PHY_ID 0x01540200
+#define QT2022_PHY_ID 0x0043A400
+#define ATH_PHY_ID 0x03429050
+#define AQ_FW_REV 0x20
+
+/* PHY Types */
+#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
+
+/* Special PHY Init Routine */
+#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
+#define IXGBE_PHY_INIT_END_NL 0xFFFF
+#define IXGBE_CONTROL_MASK_NL 0xF000
+#define IXGBE_DATA_MASK_NL 0x0FFF
+#define IXGBE_CONTROL_SHIFT_NL 12
+#define IXGBE_DELAY_NL 0
+#define IXGBE_DATA_NL 1
+#define IXGBE_CONTROL_NL 0x000F
+#define IXGBE_CONTROL_EOL_NL 0x0FFF
+#define IXGBE_CONTROL_SOL_NL 0x0000
+
+/* General purpose Interrupt Enable */
+#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
+#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
+#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */
+#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
+#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
+#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
+#define IXGBE_GPIE_EIAME 0x40000000
+#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
+#define IXGBE_GPIE_RSC_DELAY_SHIFT 11
+#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */
+#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */
+#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */
+#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */
+
+/* Packet Buffer Initialization */
+#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */
+#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */
+#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */
+#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
+#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
+#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */
+#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer*/
+#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer*/
+
+#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
+#define IXGBE_MAX_PB 8
+
+/* Packet buffer allocation strategies */
+enum {
+ PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */
+#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL
+ PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */
+#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED
+};
+
+/* Transmit Flow Control status */
+#define IXGBE_TFCS_TXOFF 0x00000001
+#define IXGBE_TFCS_TXOFF0 0x00000100
+#define IXGBE_TFCS_TXOFF1 0x00000200
+#define IXGBE_TFCS_TXOFF2 0x00000400
+#define IXGBE_TFCS_TXOFF3 0x00000800
+#define IXGBE_TFCS_TXOFF4 0x00001000
+#define IXGBE_TFCS_TXOFF5 0x00002000
+#define IXGBE_TFCS_TXOFF6 0x00004000
+#define IXGBE_TFCS_TXOFF7 0x00008000
+
+/* TCP Timer */
+#define IXGBE_TCPTIMER_KS 0x00000100
+#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200
+#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400
+#define IXGBE_TCPTIMER_LOOP 0x00000800
+#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF
+
+/* HLREG0 Bit Masks */
+#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */
+#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */
+#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */
+#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */
+#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */
+#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */
+#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */
+#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */
+#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */
+#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */
+#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */
+#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */
+#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */
+#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */
+#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */
+
+/* VMD_CTL bitmasks */
+#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001
+#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002
+
+/* VT_CTL bitmasks */
+#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */
+#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */
+#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */
+#define IXGBE_VT_CTL_POOL_SHIFT 7
+#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT)
+
+/* VMOLR bitmasks */
+#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */
+#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */
+#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */
+#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */
+#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */
+
+/* VFRE bitmask */
+#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF
+
+#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
+/* RDHMPN and TDHMPN bitmasks */
+#define IXGBE_RDHMPN_RDICADDR 0x007FF800
+#define IXGBE_RDHMPN_RDICRDREQ 0x00800000
+#define IXGBE_RDHMPN_RDICADDR_SHIFT 11
+#define IXGBE_TDHMPN_TDICADDR 0x003FF800
+#define IXGBE_TDHMPN_TDICRDREQ 0x00800000
+#define IXGBE_TDHMPN_TDICADDR_SHIFT 11
+
+#define IXGBE_RDMAM_MEM_SEL_SHIFT 13
+#define IXGBE_RDMAM_DWORD_SHIFT 9
+#define IXGBE_RDMAM_DESC_COMP_FIFO 1
+#define IXGBE_RDMAM_DFC_CMD_FIFO 2
+#define IXGBE_RDMAM_TCN_STATUS_RAM 4
+#define IXGBE_RDMAM_WB_COLL_FIFO 5
+#define IXGBE_RDMAM_QSC_CNT_RAM 6
+#define IXGBE_RDMAM_QSC_QUEUE_CNT 8
+#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA
+#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135
+#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4
+#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48
+#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7
+#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256
+#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9
+#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8
+#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4
+#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64
+#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4
+#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32
+#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4
+#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128
+#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8
+
+#define IXGBE_TXDESCIC_READY 0x80000000
+
+/* Receive Checksum Control */
+#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
+#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+
+/* FCRTL Bit Masks */
+#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */
+#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */
+
+/* PAP bit masks*/
+#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */
+
+/* RMCS Bit Masks */
+#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */
+/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
+#define IXGBE_RMCS_RAC 0x00000004
+#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */
+#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */
+#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */
+#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */
+
+/* FCCFG Bit Masks */
+#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */
+#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */
+
+/* Interrupt register bitmasks */
+
+/* Extended Interrupt Cause Read */
+#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
+#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */
+#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */
+#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */
+#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
+#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
+#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
+#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
+#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
+#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
+#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
+#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */
+#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */
+#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */
+#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */
+#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
+#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
+
+/* Extended Interrupt Cause Set */
+#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
+#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */
+#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */
+#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
+#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Set */
+#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
+#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
+#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */
+#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */
+#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */
+#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Clear */
+#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */
+#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */
+#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */
+#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
+#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
+#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */
+#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+#define IXGBE_EIMS_ENABLE_MASK ( \
+ IXGBE_EIMS_RTX_QUEUE | \
+ IXGBE_EIMS_LSC | \
+ IXGBE_EIMS_TCP_TIMER | \
+ IXGBE_EIMS_OTHER)
+
+/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
+#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
+#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
+#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
+#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */
+#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */
+#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */
+#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */
+#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */
+#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */
+#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */
+#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */
+#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */
+#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */
+#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */
+#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */
+#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */
+#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */
+#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass check of control bits */
+#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */
+#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */
+#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */
+#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */
+#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */
+
+#define IXGBE_MAX_FTQF_FILTERS 128
+#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003
+#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000
+#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001
+#define IXGBE_FTQF_PROTOCOL_SCTP 2
+#define IXGBE_FTQF_PRIORITY_MASK 0x00000007
+#define IXGBE_FTQF_PRIORITY_SHIFT 2
+#define IXGBE_FTQF_POOL_MASK 0x0000003F
+#define IXGBE_FTQF_POOL_SHIFT 8
+#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F
+#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25
+#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E
+#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D
+#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B
+#define IXGBE_FTQF_DEST_PORT_MASK 0x17
+#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F
+#define IXGBE_FTQF_POOL_MASK_EN 0x40000000
+#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000
+
+/* Interrupt clear mask */
+#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF
+
+/* Interrupt Vector Allocation Registers */
+#define IXGBE_IVAR_REG_NUM 25
+#define IXGBE_IVAR_REG_NUM_82599 64
+#define IXGBE_IVAR_TXRX_ENTRY 96
+#define IXGBE_IVAR_RX_ENTRY 64
+#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i))
+#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i))
+#define IXGBE_IVAR_TX_ENTRY 32
+
+#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */
+#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */
+
+#define IXGBE_MSIX_VECTOR(_i) (0 + (_i))
+
+#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
+
+/* ETYPE Queue Filter/Select Bit Masks */
+#define IXGBE_MAX_ETQF_FILTERS 8
+#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */
+#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */
+#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
+#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
+#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
+
+#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */
+#define IXGBE_ETQS_RX_QUEUE_SHIFT 16
+#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */
+#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */
+
+/*
+ * ETQF filter list: one static filter per filter consumer. This is
+ * to avoid filter collisions later. Add new filters
+ * here!!
+ *
+ * Current filters:
+ * EAPOL 802.1x (0x888e): Filter 0
+ * FCoE (0x8906): Filter 2
+ * 1588 (0x88f7): Filter 3
+ * FIP (0x8914): Filter 4
+ */
+#define IXGBE_ETQF_FILTER_EAPOL 0
+#define IXGBE_ETQF_FILTER_FCOE 2
+#define IXGBE_ETQF_FILTER_1588 3
+#define IXGBE_ETQF_FILTER_FIP 4
+/* VLAN Control Bit Masks */
+#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
+#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
+#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */
+#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */
+#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */
+
+/* VLAN pool filtering masks */
+#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */
+#define IXGBE_VLVF_ENTRIES 64
+#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
+
+/* Per VF Port VLAN insertion rules */
+#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
+
+#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
+
+/* STATUS Bit Masks */
+#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */
+#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/
+#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */
+
+#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */
+#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */
+
+/* ESDP Bit Masks */
+#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */
+#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */
+#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */
+#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */
+#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
+#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
+#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
+#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */
+#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */
+
+/* LEDCTL Bit Masks */
+#define IXGBE_LED_IVRT_BASE 0x00000040
+#define IXGBE_LED_BLINK_BASE 0x00000080
+#define IXGBE_LED_MODE_MASK_BASE 0x0000000F
+#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i)))
+#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i))
+#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
+#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
+#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
+
+/* LED modes */
+#define IXGBE_LED_LINK_UP 0x0
+#define IXGBE_LED_LINK_10G 0x1
+#define IXGBE_LED_MAC 0x2
+#define IXGBE_LED_FILTER 0x3
+#define IXGBE_LED_LINK_ACTIVE 0x4
+#define IXGBE_LED_LINK_1G 0x5
+#define IXGBE_LED_ON 0xE
+#define IXGBE_LED_OFF 0xF
+
+/* AUTOC Bit Masks */
+#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000
+#define IXGBE_AUTOC_KX4_SUPP 0x80000000
+#define IXGBE_AUTOC_KX_SUPP 0x40000000
+#define IXGBE_AUTOC_PAUSE 0x30000000
+#define IXGBE_AUTOC_ASM_PAUSE 0x20000000
+#define IXGBE_AUTOC_SYM_PAUSE 0x10000000
+#define IXGBE_AUTOC_RF 0x08000000
+#define IXGBE_AUTOC_PD_TMR 0x06000000
+#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
+#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000
+#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000
+#define IXGBE_AUTOC_FECA 0x00040000
+#define IXGBE_AUTOC_FECR 0x00020000
+#define IXGBE_AUTOC_KR_SUPP 0x00010000
+#define IXGBE_AUTOC_AN_RESTART 0x00001000
+#define IXGBE_AUTOC_FLU 0x00000001
+#define IXGBE_AUTOC_LMS_SHIFT 13
+#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+
+#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200
+#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
+#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180
+#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
+#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+
+#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000
+#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000
+#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16
+#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+
+#define IXGBE_MACC_FLU 0x00000001
+#define IXGBE_MACC_FSV_10G 0x00030000
+#define IXGBE_MACC_FS 0x00040000
+#define IXGBE_MAC_RX2TX_LPBK 0x00000002
+
+/* LINKS Bit Masks */
+#define IXGBE_LINKS_KX_AN_COMP 0x80000000
+#define IXGBE_LINKS_UP 0x40000000
+#define IXGBE_LINKS_SPEED 0x20000000
+#define IXGBE_LINKS_MODE 0x18000000
+#define IXGBE_LINKS_RX_MODE 0x06000000
+#define IXGBE_LINKS_TX_MODE 0x01800000
+#define IXGBE_LINKS_XGXS_EN 0x00400000
+#define IXGBE_LINKS_SGMII_EN 0x02000000
+#define IXGBE_LINKS_PCS_1G_EN 0x00200000
+#define IXGBE_LINKS_1G_AN_EN 0x00100000
+#define IXGBE_LINKS_KX_AN_IDLE 0x00080000
+#define IXGBE_LINKS_1G_SYNC 0x00040000
+#define IXGBE_LINKS_10G_ALIGN 0x00020000
+#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000
+#define IXGBE_LINKS_TL_FAULT 0x00001000
+#define IXGBE_LINKS_SIGNAL 0x00000F00
+
+#define IXGBE_LINKS_SPEED_82599 0x30000000
+#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
+#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
+#define IXGBE_LINKS_SPEED_100_82599 0x10000000
+#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
+#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
+
+#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040
+
+/* PCS1GLSTA Bit Masks */
+#define IXGBE_PCS1GLSTA_LINK_OK 1
+#define IXGBE_PCS1GLSTA_SYNK_OK 0x10
+#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000
+#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000
+#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000
+#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000
+#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000
+
+#define IXGBE_PCS1GANA_SYM_PAUSE 0x80
+#define IXGBE_PCS1GANA_ASM_PAUSE 0x100
+
+/* PCS1GLCTL Bit Masks */
+#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */
+#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1
+#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20
+#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40
+#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000
+#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000
+
+/* ANLP1 Bit Masks */
+#define IXGBE_ANLP1_PAUSE 0x0C00
+#define IXGBE_ANLP1_SYM_PAUSE 0x0400
+#define IXGBE_ANLP1_ASM_PAUSE 0x0800
+#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000
+
+/* SW Semaphore Register bitmasks */
+#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
+#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
+
+/* SW_FW_SYNC/GSSR definitions */
+#define IXGBE_GSSR_EEP_SM 0x0001
+#define IXGBE_GSSR_PHY0_SM 0x0002
+#define IXGBE_GSSR_PHY1_SM 0x0004
+#define IXGBE_GSSR_MAC_CSR_SM 0x0008
+#define IXGBE_GSSR_FLASH_SM 0x0010
+#define IXGBE_GSSR_SW_MNG_SM 0x0400
+
+/* FW Status register bitmask */
+#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */
+
+/* EEC Register */
+#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */
+#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */
+#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */
+#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */
+#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */
+#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */
+#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */
+#define IXGBE_EEC_FWE_SHIFT 4
+#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */
+#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */
+#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
+#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
+#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
+#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */
+#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
+/* EEPROM Addressing bits based on type (0-small, 1-large) */
+#define IXGBE_EEC_ADDR_SIZE 0x00000400
+#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */
+#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */
+
+#define IXGBE_EEC_SIZE_SHIFT 11
+#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6
+#define IXGBE_EEPROM_OPCODE_BITS 8
+
+/* Part Number String Length */
+#define IXGBE_PBANUM_LENGTH 11
+
+/* Checksum and EEPROM pointers */
+#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
+#define IXGBE_EEPROM_CHECKSUM 0x3F
+#define IXGBE_EEPROM_SUM 0xBABA
+#define IXGBE_PCIE_ANALOG_PTR 0x03
+#define IXGBE_ATLAS0_CONFIG_PTR 0x04
+#define IXGBE_PHY_PTR 0x04
+#define IXGBE_ATLAS1_CONFIG_PTR 0x05
+#define IXGBE_OPTION_ROM_PTR 0x05
+#define IXGBE_PCIE_GENERAL_PTR 0x06
+#define IXGBE_PCIE_CONFIG0_PTR 0x07
+#define IXGBE_PCIE_CONFIG1_PTR 0x08
+#define IXGBE_CORE0_PTR 0x09
+#define IXGBE_CORE1_PTR 0x0A
+#define IXGBE_MAC0_PTR 0x0B
+#define IXGBE_MAC1_PTR 0x0C
+#define IXGBE_CSR0_CONFIG_PTR 0x0D
+#define IXGBE_CSR1_CONFIG_PTR 0x0E
+#define IXGBE_FW_PTR 0x0F
+#define IXGBE_PBANUM0_PTR 0x15
+#define IXGBE_PBANUM1_PTR 0x16
+#define IXGBE_FREE_SPACE_PTR 0X3E
+#define IXGBE_SAN_MAC_ADDR_PTR 0x28
+#define IXGBE_DEVICE_CAPS 0x2C
+#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
+#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
+#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
+
+/* MSI-X capability fields masks */
+#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF
+
+/* Legacy EEPROM word offsets */
+#define IXGBE_ISCSI_BOOT_CAPS 0x0033
+#define IXGBE_ISCSI_SETUP_PORT_0 0x0030
+#define IXGBE_ISCSI_SETUP_PORT_1 0x0034
+
+/* EEPROM Commands - SPI */
+#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */
+#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01
+#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */
+#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
+#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */
+#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */
+/* EEPROM reset Write Enable latch */
+#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04
+#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */
+#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */
+#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */
+#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */
+#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
+
+/* EEPROM Read Register */
+#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */
+#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */
+#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */
+#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
+#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */
+
+#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
+
+#define IXGBE_EEPROM_PAGE_SIZE_MAX 128
+#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
+#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
+
+#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
+#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
+#endif
+
+#ifndef IXGBE_EERD_EEWR_ATTEMPTS
+/* Number of 5 microseconds we wait for EERD read and
+ * EERW write to complete */
+#define IXGBE_EERD_EEWR_ATTEMPTS 100000
+#endif
+
+#ifndef IXGBE_FLUDONE_ATTEMPTS
+/* # attempts we wait for flush update to complete */
+#define IXGBE_FLUDONE_ATTEMPTS 20000
+#endif
+
+#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */
+#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */
+#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */
+#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */
+
+#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0
+#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
+#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
+#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
+#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2
+#define IXGBE_FW_LESM_STATE_1 0x1
+#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */
+#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
+#define IXGBE_FW_PATCH_VERSION_4 0x7
+#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */
+#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */
+#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */
+#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt. WWNN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt. WWPN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */
+
+#define IXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */
+#define IXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */
+#define IXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */
+
+/* PCI Bus Info */
+#define IXGBE_PCI_DEVICE_STATUS 0xAA
+#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
+#define IXGBE_PCI_LINK_STATUS 0xB2
+#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
+#define IXGBE_PCI_LINK_WIDTH 0x3F0
+#define IXGBE_PCI_LINK_WIDTH_1 0x10
+#define IXGBE_PCI_LINK_WIDTH_2 0x20
+#define IXGBE_PCI_LINK_WIDTH_4 0x40
+#define IXGBE_PCI_LINK_WIDTH_8 0x80
+#define IXGBE_PCI_LINK_SPEED 0xF
+#define IXGBE_PCI_LINK_SPEED_2500 0x1
+#define IXGBE_PCI_LINK_SPEED_5000 0x2
+#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
+#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
+#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
+
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+
+/* Check whether address is multicast. This is little-endian specific check.*/
+#define IXGBE_IS_MULTICAST(Address) \
+ (bool)(((u8 *)(Address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define IXGBE_IS_BROADCAST(Address) \
+ ((((u8 *)(Address))[0] == ((u8)0xff)) && \
+ (((u8 *)(Address))[1] == ((u8)0xff)))
+
+/* RAH */
+#define IXGBE_RAH_VIND_MASK 0x003C0000
+#define IXGBE_RAH_VIND_SHIFT 18
+#define IXGBE_RAH_AV 0x80000000
+#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF
+
+/* Header split receive */
+#define IXGBE_RFCTL_ISCSI_DIS 0x00000001
+#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E
+#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1
+#define IXGBE_RFCTL_NFSW_DIS 0x00000040
+#define IXGBE_RFCTL_NFSR_DIS 0x00000080
+#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300
+#define IXGBE_RFCTL_NFS_VER_SHIFT 8
+#define IXGBE_RFCTL_NFS_VER_2 0
+#define IXGBE_RFCTL_NFS_VER_3 1
+#define IXGBE_RFCTL_NFS_VER_4 2
+#define IXGBE_RFCTL_IPV6_DIS 0x00000400
+#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800
+#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000
+#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000
+#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+
+/* Transmit Config masks */
+#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
+#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */
+#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
+/* Enable short packet padding to 64 bytes */
+#define IXGBE_TX_PAD_ENABLE 0x00000400
+#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */
+/* This allows for 16K packets + 4k for vlan */
+#define IXGBE_MAX_FRAME_SZ 0x40040000
+
+#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */
+#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */
+
+/* Receive Config masks */
+#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
+#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. write-back flushing */
+#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */
+#define IXGBE_RXDCTL_RLPML_EN 0x00008000
+#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
+
+#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
+#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
+#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */
+#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
+#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */
+#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */
+/* Receive Priority Flow Control Enable */
+#define IXGBE_FCTRL_RPFCE 0x00004000
+#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */
+#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */
+#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */
+#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */
+#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */
+#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF0 /* Receive FC Mask */
+
+#define IXGBE_MFLCN_RPFCE_SHIFT 4
+
+/* Multiple Receive Queue Control */
+#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */
+#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */
+#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */
+#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */
+#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */
+#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */
+#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */
+#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */
+#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */
+#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */
+#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */
+#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000
+#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000
+#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
+#define IXGBE_MRQC_L3L4TXSWEN 0x00008000
+
+/* Queue Drop Enable */
+#define IXGBE_QDE_ENABLE 0x00000001
+#define IXGBE_QDE_IDX_MASK 0x00007F00
+#define IXGBE_QDE_IDX_SHIFT 8
+
+#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
+#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
+#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+
+#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
+#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000
+#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
+/* Multiple Transmit Queue Command Register */
+#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */
+#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */
+#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */
+#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
+#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
+#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
+#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA or 4 TQ if VT_ENA */
+
+/* Receive Descriptor bit definitions */
+#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
+#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */
+#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */
+#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004
+#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
+#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
+#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
+#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */
+#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */
+#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */
+#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */
+#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
+#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */
+#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */
+#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */
+#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */
+#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
+#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
+#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
+#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */
+#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */
+#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */
+#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */
+#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */
+#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */
+#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
+#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
+#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
+#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
+#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
+#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
+#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
+#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+#define IXGBE_RXD_PRI_SHIFT 13
+#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
+#define IXGBE_RXD_CFI_SHIFT 12
+
+#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */
+#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */
+#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */
+#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */
+#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */
+#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
+#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
+#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
+#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
+
+/* PSRTYPE bit definitions */
+#define IXGBE_PSRTYPE_TCPHDR 0x00000010
+#define IXGBE_PSRTYPE_UDPHDR 0x00000020
+#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
+#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
+#define IXGBE_PSRTYPE_L2HDR 0x00001000
+
+/* SRRCTL bit definitions */
+#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
+#define IXGBE_SRRCTL_RDMTS_SHIFT 22
+#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000
+#define IXGBE_SRRCTL_DROP_EN 0x10000000
+#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
+#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
+
+#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000
+#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
+
+#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
+#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
+#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
+#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
+#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
+#define IXGBE_RXDADV_RSCCNT_SHIFT 17
+#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
+#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
+#define IXGBE_RXDADV_SPH 0x8000
+
+/* RSS Hash results */
+#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000
+#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
+#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
+#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004
+#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
+#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
+
+/* RSS Packet Types as indicated in the receive descriptor. */
+#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000
+#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */
+#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */
+#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
+#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
+#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
+#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */
+#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
+#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
+
+/* Security Processing bit Indication */
+#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000
+#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
+
+/* Masks to determine if packets should be dropped due to frame errors */
+#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXD_ERR_CE | \
+ IXGBE_RXD_ERR_LE | \
+ IXGBE_RXD_ERR_PE | \
+ IXGBE_RXD_ERR_OSE | \
+ IXGBE_RXD_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXDADV_ERR_CE | \
+ IXGBE_RXDADV_ERR_LE | \
+ IXGBE_RXDADV_ERR_PE | \
+ IXGBE_RXDADV_ERR_OSE | \
+ IXGBE_RXDADV_ERR_USE)
+
+/* Multicast bit mask */
+#define IXGBE_MCSTCTRL_MFE 0x4
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024
+
+/* Vlan-specific macros */
+#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */
+#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */
+#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
+#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
+
+/* SR-IOV specific macros */
+#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
+#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4))
+#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
+#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
+
+enum ixgbe_fdir_pballoc_type {
+ IXGBE_FDIR_PBALLOC_NONE = 0,
+ IXGBE_FDIR_PBALLOC_64K = 1,
+ IXGBE_FDIR_PBALLOC_128K = 2,
+ IXGBE_FDIR_PBALLOC_256K = 3,
+};
+#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16
+
+/* Flow Director register values */
+#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001
+#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002
+#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003
+#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008
+#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010
+#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020
+#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080
+#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8
+#define IXGBE_FDIRCTRL_FLEX_SHIFT 16
+#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000
+#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24
+#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000
+#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28
+
+#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16
+#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16
+#define IXGBE_FDIRIP6M_DIPM_SHIFT 16
+#define IXGBE_FDIRM_VLANID 0x00000001
+#define IXGBE_FDIRM_VLANP 0x00000002
+#define IXGBE_FDIRM_POOL 0x00000004
+#define IXGBE_FDIRM_L4P 0x00000008
+#define IXGBE_FDIRM_FLEX 0x00000010
+#define IXGBE_FDIRM_DIPv6 0x00000020
+
+#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
+#define IXGBE_FDIRFREE_FREE_SHIFT 0
+#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000
+#define IXGBE_FDIRFREE_COLL_SHIFT 16
+#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F
+#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0
+#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000
+#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16
+#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF
+#define IXGBE_FDIRUSTAT_ADD_SHIFT 0
+#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000
+#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16
+#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF
+#define IXGBE_FDIRFSTAT_FADD_SHIFT 0
+#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00
+#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8
+#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16
+#define IXGBE_FDIRVLAN_FLEX_SHIFT 16
+#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15
+#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16
+
+#define IXGBE_FDIRCMD_CMD_MASK 0x00000003
+#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001
+#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002
+#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003
+#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004
+#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008
+#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010
+#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020
+#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040
+#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060
+#define IXGBE_FDIRCMD_IPV6 0x00000080
+#define IXGBE_FDIRCMD_CLEARHT 0x00000100
+#define IXGBE_FDIRCMD_DROP 0x00000200
+#define IXGBE_FDIRCMD_INT 0x00000400
+#define IXGBE_FDIRCMD_LAST 0x00000800
+#define IXGBE_FDIRCMD_COLLISION 0x00001000
+#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
+#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5
+#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
+#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
+#define IXGBE_FDIR_INIT_DONE_POLL 10
+#define IXGBE_FDIRCMD_CMD_POLL 10
+
+#define IXGBE_FDIR_DROP_QUEUE 127
+
+/* Manageablility Host Interface defines */
+#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
+#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
+#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */
+
+/* CEM Support */
+#define FW_CEM_HDR_LEN 0x4
+#define FW_CEM_CMD_DRIVER_INFO 0xDD
+#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5
+#define FW_CEM_CMD_RESERVED 0x0
+#define FW_CEM_UNUSED_VER 0x0
+#define FW_CEM_MAX_RETRIES 3
+#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+
+/* Host Interface Command Structures */
+struct ixgbe_hic_hdr {
+ u8 cmd;
+ u8 buf_len;
+ union {
+ u8 cmd_resv;
+ u8 ret_status;
+ } cmd_or_resp;
+ u8 checksum;
+};
+
+struct ixgbe_hic_drv_info {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_num;
+ u8 ver_sub;
+ u8 ver_build;
+ u8 ver_min;
+ u8 ver_maj;
+ u8 pad; /* end spacing to ensure length is mult. of dword */
+ u16 pad2; /* end spacing to ensure length is mult. of dword2 */
+};
+
+/* Transmit Descriptor - Advanced */
+union ixgbe_adv_tx_desc {
+ struct {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+/* Receive Descriptor - Advanced */
+union ixgbe_adv_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ __le32 data;
+ struct {
+ __le16 pkt_info; /* RSS, Pkt type */
+ __le16 hdr_info; /* Splithdr, hdrlen */
+ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+/* Context descriptors */
+struct ixgbe_adv_tx_context_desc {
+ __le32 vlan_macip_lens;
+ __le32 seqnum_seed;
+ __le32 type_tucmd_mlhl;
+ __le32 mss_l4len_idx;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */
+#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */
+#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */
+#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */
+#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
+#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
+#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
+#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
+#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
+#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
+#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
+#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
+#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
+#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */
+#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */
+#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
+#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
+#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */
+#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
+#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
+#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
+#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
+#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/
+#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
+#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
+#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
+#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */
+#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */
+#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */
+#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */
+#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation: End */
+#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation: Start */
+#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */
+#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */
+#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */
+#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */
+#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+
+/* Autonegotiation advertised speeds */
+typedef u32 ixgbe_autoneg_advertised;
+/* Link speed */
+typedef u32 ixgbe_link_speed;
+#define IXGBE_LINK_SPEED_UNKNOWN 0
+#define IXGBE_LINK_SPEED_100_FULL 0x0008
+#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
+#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
+#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
+ IXGBE_LINK_SPEED_10GB_FULL)
+#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
+ IXGBE_LINK_SPEED_1GB_FULL | \
+ IXGBE_LINK_SPEED_10GB_FULL)
+
+
+/* Physical layer type */
+typedef u32 ixgbe_physical_layer;
+#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
+#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001
+#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002
+#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004
+#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020
+#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080
+#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100
+#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200
+#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
+#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
+
+/* Flow Control Data Sheet defined values
+ * Calculation and defines taken from 802.1bb Annex O
+ */
+
+/* BitTimes (BT) conversion */
+#define IXGBE_BT2KB(BT) ((BT + 1023) / (8 * 1024))
+#define IXGBE_B2BT(BT) (BT * 8)
+
+/* Calculate Delay to respond to PFC */
+#define IXGBE_PFC_D 672
+
+/* Calculate Cable Delay */
+#define IXGBE_CABLE_DC 5556 /* Delay Copper */
+#define IXGBE_CABLE_DO 5000 /* Delay Optical */
+
+/* Calculate Interface Delay X540 */
+#define IXGBE_PHY_DC 25600 /* Delay 10G BASET */
+#define IXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */
+#define IXGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */
+
+#define IXGBE_ID_X540 (IXGBE_MAC_DC + IXGBE_XAUI_DC + IXGBE_PHY_DC)
+
+/* Calculate Interface Delay 82598, 82599 */
+#define IXGBE_PHY_D 12800
+#define IXGBE_MAC_D 4096
+#define IXGBE_XAUI_D (2 * 1024)
+
+#define IXGBE_ID (IXGBE_MAC_D + IXGBE_XAUI_D + IXGBE_PHY_D)
+
+/* Calculate Delay incurred from higher layer */
+#define IXGBE_HD 6144
+
+/* Calculate PCI Bus delay for low thresholds */
+#define IXGBE_PCI_DELAY 10000
+
+/* Calculate X540 delay value in bit times */
+#define IXGBE_FILL_RATE (36 / 25)
+
+#define IXGBE_DV_X540(LINK, TC) (IXGBE_FILL_RATE * \
+ (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
+ (2 * IXGBE_CABLE_DC) + \
+ (2 * IXGBE_ID_X540) + \
+ IXGBE_HD + IXGBE_B2BT(TC)))
+
+/* Calculate 82599, 82598 delay value in bit times */
+#define IXGBE_DV(LINK, TC) (IXGBE_FILL_RATE * \
+ (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
+ (2 * IXGBE_CABLE_DC) + (2 * IXGBE_ID) + \
+ IXGBE_HD + IXGBE_B2BT(TC)))
+
+/* Calculate low threshold delay values */
+#define IXGBE_LOW_DV_X540(TC) (2 * IXGBE_B2BT(TC) + \
+ (IXGBE_FILL_RATE * IXGBE_PCI_DELAY))
+#define IXGBE_LOW_DV(TC) (2 * IXGBE_LOW_DV_X540(TC))
+
+/* Software ATR hash keys */
+#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
+#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
+
+/* Software ATR input stream values and masks */
+#define IXGBE_ATR_HASH_MASK 0x7fff
+#define IXGBE_ATR_L4TYPE_MASK 0x3
+#define IXGBE_ATR_L4TYPE_UDP 0x1
+#define IXGBE_ATR_L4TYPE_TCP 0x2
+#define IXGBE_ATR_L4TYPE_SCTP 0x3
+#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+enum ixgbe_atr_flow_type {
+ IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
+ IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
+ IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
+ IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+ IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
+ IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
+ IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
+ IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+};
+
+/* Flow Director ATR input struct. */
+union ixgbe_atr_input {
+ /*
+ * Byte layout in order, all values with MSB first:
+ *
+ * vm_pool - 1 byte
+ * flow_type - 1 byte
+ * vlan_id - 2 bytes
+ * src_ip - 16 bytes
+ * dst_ip - 16 bytes
+ * src_port - 2 bytes
+ * dst_port - 2 bytes
+ * flex_bytes - 2 bytes
+ * bkt_hash - 2 bytes
+ */
+ struct {
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 vlan_id;
+ __be32 dst_ip[4];
+ __be32 src_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 flex_bytes;
+ __be16 bkt_hash;
+ } formatted;
+ __be32 dword_stream[11];
+};
+
+/* Flow Director compressed ATR hash input struct */
+union ixgbe_atr_hash_dword {
+ struct {
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 vlan_id;
+ } formatted;
+ __be32 ip;
+ struct {
+ __be16 src;
+ __be16 dst;
+ } port;
+ __be16 flex_bytes;
+ __be32 dword;
+};
+
+enum ixgbe_eeprom_type {
+ ixgbe_eeprom_uninitialized = 0,
+ ixgbe_eeprom_spi,
+ ixgbe_flash,
+ ixgbe_eeprom_none /* No NVM support */
+};
+
+enum ixgbe_mac_type {
+ ixgbe_mac_unknown = 0,
+ ixgbe_mac_82598EB,
+ ixgbe_mac_82599EB,
+ ixgbe_mac_X540,
+ ixgbe_num_macs
+};
+
+enum ixgbe_phy_type {
+ ixgbe_phy_unknown = 0,
+ ixgbe_phy_none,
+ ixgbe_phy_tn,
+ ixgbe_phy_aq,
+ ixgbe_phy_cu_unknown,
+ ixgbe_phy_qt,
+ ixgbe_phy_xaui,
+ ixgbe_phy_nl,
+ ixgbe_phy_sfp_passive_tyco,
+ ixgbe_phy_sfp_passive_unknown,
+ ixgbe_phy_sfp_active_unknown,
+ ixgbe_phy_sfp_avago,
+ ixgbe_phy_sfp_ftl,
+ ixgbe_phy_sfp_ftl_active,
+ ixgbe_phy_sfp_unknown,
+ ixgbe_phy_sfp_intel,
+ ixgbe_phy_sfp_unsupported,
+ ixgbe_phy_generic
+};
+
+/*
+ * SFP+ module type IDs:
+ *
+ * ID Module Type
+ * =============
+ * 0 SFP_DA_CU
+ * 1 SFP_SR
+ * 2 SFP_LR
+ * 3 SFP_DA_CU_CORE0 - 82599-specific
+ * 4 SFP_DA_CU_CORE1 - 82599-specific
+ * 5 SFP_SR/LR_CORE0 - 82599-specific
+ * 6 SFP_SR/LR_CORE1 - 82599-specific
+ */
+enum ixgbe_sfp_type {
+ ixgbe_sfp_type_da_cu = 0,
+ ixgbe_sfp_type_sr = 1,
+ ixgbe_sfp_type_lr = 2,
+ ixgbe_sfp_type_da_cu_core0 = 3,
+ ixgbe_sfp_type_da_cu_core1 = 4,
+ ixgbe_sfp_type_srlr_core0 = 5,
+ ixgbe_sfp_type_srlr_core1 = 6,
+ ixgbe_sfp_type_da_act_lmt_core0 = 7,
+ ixgbe_sfp_type_da_act_lmt_core1 = 8,
+ ixgbe_sfp_type_1g_cu_core0 = 9,
+ ixgbe_sfp_type_1g_cu_core1 = 10,
+ ixgbe_sfp_type_not_present = 0xFFFE,
+ ixgbe_sfp_type_unknown = 0xFFFF
+};
+
+enum ixgbe_media_type {
+ ixgbe_media_type_unknown = 0,
+ ixgbe_media_type_fiber,
+ ixgbe_media_type_fiber_lco,
+ ixgbe_media_type_copper,
+ ixgbe_media_type_backplane,
+ ixgbe_media_type_cx4,
+ ixgbe_media_type_virtual
+};
+
+/* Flow Control Settings */
+enum ixgbe_fc_mode {
+ ixgbe_fc_none = 0,
+ ixgbe_fc_rx_pause,
+ ixgbe_fc_tx_pause,
+ ixgbe_fc_full,
+#ifdef CONFIG_DCB
+ ixgbe_fc_pfc,
+#endif
+ ixgbe_fc_default
+};
+
+/* Smart Speed Settings */
+#define IXGBE_SMARTSPEED_MAX_RETRIES 3
+enum ixgbe_smart_speed {
+ ixgbe_smart_speed_auto = 0,
+ ixgbe_smart_speed_on,
+ ixgbe_smart_speed_off
+};
+
+/* PCI bus types */
+enum ixgbe_bus_type {
+ ixgbe_bus_type_unknown = 0,
+ ixgbe_bus_type_pci,
+ ixgbe_bus_type_pcix,
+ ixgbe_bus_type_pci_express,
+ ixgbe_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum ixgbe_bus_speed {
+ ixgbe_bus_speed_unknown = 0,
+ ixgbe_bus_speed_33 = 33,
+ ixgbe_bus_speed_66 = 66,
+ ixgbe_bus_speed_100 = 100,
+ ixgbe_bus_speed_120 = 120,
+ ixgbe_bus_speed_133 = 133,
+ ixgbe_bus_speed_2500 = 2500,
+ ixgbe_bus_speed_5000 = 5000,
+ ixgbe_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum ixgbe_bus_width {
+ ixgbe_bus_width_unknown = 0,
+ ixgbe_bus_width_pcie_x1 = 1,
+ ixgbe_bus_width_pcie_x2 = 2,
+ ixgbe_bus_width_pcie_x4 = 4,
+ ixgbe_bus_width_pcie_x8 = 8,
+ ixgbe_bus_width_32 = 32,
+ ixgbe_bus_width_64 = 64,
+ ixgbe_bus_width_reserved
+};
+
+struct ixgbe_addr_filter_info {
+ u32 num_mc_addrs;
+ u32 rar_used_count;
+ u32 mta_in_use;
+ u32 overflow_promisc;
+ bool uc_set_promisc;
+ bool user_set_promisc;
+};
+
+/* Bus parameters */
+struct ixgbe_bus_info {
+ enum ixgbe_bus_speed speed;
+ enum ixgbe_bus_width width;
+ enum ixgbe_bus_type type;
+
+ u16 func;
+ u16 lan_id;
+};
+
+/* Flow control parameters */
+struct ixgbe_fc_info {
+ u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
+ u32 low_water; /* Flow Control Low-water */
+ u16 pause_time; /* Flow Control Pause timer */
+ bool send_xon; /* Flow control send XON */
+ bool strict_ieee; /* Strict IEEE mode */
+ bool disable_fc_autoneg; /* Do not autonegotiate FC */
+ bool fc_was_autonegged; /* Is current_mode the result of autonegging? */
+ enum ixgbe_fc_mode current_mode; /* FC mode in effect */
+ enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+/* Statistics counters collected by the MAC */
+struct ixgbe_hw_stats {
+ u64 crcerrs;
+ u64 illerrc;
+ u64 errbc;
+ u64 mspdc;
+ u64 mpctotal;
+ u64 mpc[8];
+ u64 mlfc;
+ u64 mrfc;
+ u64 rlec;
+ u64 lxontxc;
+ u64 lxonrxc;
+ u64 lxofftxc;
+ u64 lxoffrxc;
+ u64 pxontxc[8];
+ u64 pxonrxc[8];
+ u64 pxofftxc[8];
+ u64 pxoffrxc[8];
+ u64 prc64;
+ u64 prc127;
+ u64 prc255;
+ u64 prc511;
+ u64 prc1023;
+ u64 prc1522;
+ u64 gprc;
+ u64 bprc;
+ u64 mprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 rnbc[8];
+ u64 ruc;
+ u64 rfc;
+ u64 roc;
+ u64 rjc;
+ u64 mngprc;
+ u64 mngpdc;
+ u64 mngptc;
+ u64 tor;
+ u64 tpr;
+ u64 tpt;
+ u64 ptc64;
+ u64 ptc127;
+ u64 ptc255;
+ u64 ptc511;
+ u64 ptc1023;
+ u64 ptc1522;
+ u64 mptc;
+ u64 bptc;
+ u64 xec;
+ u64 rqsmr[16];
+ u64 tqsmr[8];
+ u64 qprc[16];
+ u64 qptc[16];
+ u64 qbrc[16];
+ u64 qbtc[16];
+ u64 qprdc[16];
+ u64 pxon2offc[8];
+ u64 fdirustat_add;
+ u64 fdirustat_remove;
+ u64 fdirfstat_fadd;
+ u64 fdirfstat_fremove;
+ u64 fdirmatch;
+ u64 fdirmiss;
+ u64 fccrc;
+ u64 fcoerpdc;
+ u64 fcoeprc;
+ u64 fcoeptc;
+ u64 fcoedwrc;
+ u64 fcoedwtc;
+ u64 b2ospc;
+ u64 b2ogprc;
+ u64 o2bgptc;
+ u64 o2bspc;
+};
+
+/* forward declaration */
+struct ixgbe_hw;
+
+/* iterator type for walking multicast address lists */
+typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+ u32 *vmdq);
+
+/* Function pointer table */
+struct ixgbe_eeprom_operations {
+ s32 (*init_params)(struct ixgbe_hw *);
+ s32 (*read)(struct ixgbe_hw *, u16, u16 *);
+ s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+ s32 (*write)(struct ixgbe_hw *, u16, u16);
+ s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+ s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
+ s32 (*update_checksum)(struct ixgbe_hw *);
+ u16 (*calc_checksum)(struct ixgbe_hw *);
+};
+
+struct ixgbe_mac_operations {
+ s32 (*init_hw)(struct ixgbe_hw *);
+ s32 (*reset_hw)(struct ixgbe_hw *);
+ s32 (*start_hw)(struct ixgbe_hw *);
+ s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
+ enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+ u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
+ s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+ s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
+ s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
+ s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
+ s32 (*stop_adapter)(struct ixgbe_hw *);
+ s32 (*get_bus_info)(struct ixgbe_hw *);
+ void (*set_lan_id)(struct ixgbe_hw *);
+ s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
+ s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
+ s32 (*setup_sfp)(struct ixgbe_hw *);
+ s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
+ s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
+ void (*release_swfw_sync)(struct ixgbe_hw *, u16);
+
+ /* Link */
+ void (*disable_tx_laser)(struct ixgbe_hw *);
+ void (*enable_tx_laser)(struct ixgbe_hw *);
+ void (*flap_tx_laser)(struct ixgbe_hw *);
+ s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
+ s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
+ s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
+ bool *);
+
+ /* Packet Buffer Manipulation */
+ void (*set_rxpba)(struct ixgbe_hw *, int, u32, int);
+
+ /* LED */
+ s32 (*led_on)(struct ixgbe_hw *, u32);
+ s32 (*led_off)(struct ixgbe_hw *, u32);
+ s32 (*blink_led_start)(struct ixgbe_hw *, u32);
+ s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
+
+ /* RAR, Multicast, VLAN */
+ s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
+ s32 (*clear_rar)(struct ixgbe_hw *, u32);
+ s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
+ s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
+ s32 (*init_rx_addrs)(struct ixgbe_hw *);
+ s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
+ s32 (*enable_mc)(struct ixgbe_hw *);
+ s32 (*disable_mc)(struct ixgbe_hw *);
+ s32 (*clear_vfta)(struct ixgbe_hw *);
+ s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+ s32 (*init_uta_tables)(struct ixgbe_hw *);
+ void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
+ void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
+
+ /* Flow Control */
+ s32 (*fc_enable)(struct ixgbe_hw *, s32);
+
+ /* Manageability interface */
+ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
+};
+
+struct ixgbe_phy_operations {
+ s32 (*identify)(struct ixgbe_hw *);
+ s32 (*identify_sfp)(struct ixgbe_hw *);
+ s32 (*init)(struct ixgbe_hw *);
+ s32 (*reset)(struct ixgbe_hw *);
+ s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
+ s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
+ s32 (*setup_link)(struct ixgbe_hw *);
+ s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
+ bool);
+ s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
+ s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
+ s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
+ s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
+ s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
+ s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
+ s32 (*check_overtemp)(struct ixgbe_hw *);
+};
+
+struct ixgbe_eeprom_info {
+ struct ixgbe_eeprom_operations ops;
+ enum ixgbe_eeprom_type type;
+ u32 semaphore_delay;
+ u16 word_size;
+ u16 address_bits;
+ u16 word_page_size;
+};
+
+#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
+struct ixgbe_mac_info {
+ struct ixgbe_mac_operations ops;
+ enum ixgbe_mac_type type;
+ u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ /* prefix for World Wide Node Name (WWNN) */
+ u16 wwnn_prefix;
+ /* prefix for World Wide Port Name (WWPN) */
+ u16 wwpn_prefix;
+#define IXGBE_MAX_MTA 128
+ u32 mta_shadow[IXGBE_MAX_MTA];
+ s32 mc_filter_type;
+ u32 mcft_size;
+ u32 vft_size;
+ u32 num_rar_entries;
+ u32 rar_highwater;
+ u32 rx_pb_size;
+ u32 max_tx_queues;
+ u32 max_rx_queues;
+ u32 max_msix_vectors;
+ u32 orig_autoc;
+ u32 orig_autoc2;
+ bool orig_link_settings_stored;
+ bool autotry_restart;
+ u8 flags;
+};
+
+struct ixgbe_phy_info {
+ struct ixgbe_phy_operations ops;
+ struct mdio_if_info mdio;
+ enum ixgbe_phy_type type;
+ u32 id;
+ enum ixgbe_sfp_type sfp_type;
+ bool sfp_setup_needed;
+ u32 revision;
+ enum ixgbe_media_type media_type;
+ bool reset_disable;
+ ixgbe_autoneg_advertised autoneg_advertised;
+ enum ixgbe_smart_speed smart_speed;
+ bool smart_speed_active;
+ bool multispeed_fiber;
+ bool reset_if_overtemp;
+};
+
+#include "ixgbe_mbx.h"
+
+struct ixgbe_mbx_operations {
+ s32 (*init_params)(struct ixgbe_hw *hw);
+ s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*check_for_msg)(struct ixgbe_hw *, u16);
+ s32 (*check_for_ack)(struct ixgbe_hw *, u16);
+ s32 (*check_for_rst)(struct ixgbe_hw *, u16);
+};
+
+struct ixgbe_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+ struct ixgbe_mbx_operations ops;
+ struct ixgbe_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+ u32 v2p_mailbox;
+ u16 size;
+};
+
+struct ixgbe_hw {
+ u8 __iomem *hw_addr;
+ void *back;
+ struct ixgbe_mac_info mac;
+ struct ixgbe_addr_filter_info addr_ctrl;
+ struct ixgbe_fc_info fc;
+ struct ixgbe_phy_info phy;
+ struct ixgbe_eeprom_info eeprom;
+ struct ixgbe_bus_info bus;
+ struct ixgbe_mbx_info mbx;
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ bool adapter_stopped;
+ bool force_full_reset;
+};
+
+struct ixgbe_info {
+ enum ixgbe_mac_type mac;
+ s32 (*get_invariants)(struct ixgbe_hw *);
+ struct ixgbe_mac_operations *mac_ops;
+ struct ixgbe_eeprom_operations *eeprom_ops;
+ struct ixgbe_phy_operations *phy_ops;
+ struct ixgbe_mbx_operations *mbx_ops;
+};
+
+
+/* Error Codes */
+#define IXGBE_ERR_EEPROM -1
+#define IXGBE_ERR_EEPROM_CHECKSUM -2
+#define IXGBE_ERR_PHY -3
+#define IXGBE_ERR_CONFIG -4
+#define IXGBE_ERR_PARAM -5
+#define IXGBE_ERR_MAC_TYPE -6
+#define IXGBE_ERR_UNKNOWN_PHY -7
+#define IXGBE_ERR_LINK_SETUP -8
+#define IXGBE_ERR_ADAPTER_STOPPED -9
+#define IXGBE_ERR_INVALID_MAC_ADDR -10
+#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11
+#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12
+#define IXGBE_ERR_INVALID_LINK_SETTINGS -13
+#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14
+#define IXGBE_ERR_RESET_FAILED -15
+#define IXGBE_ERR_SWFW_SYNC -16
+#define IXGBE_ERR_PHY_ADDR_INVALID -17
+#define IXGBE_ERR_I2C -18
+#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
+#define IXGBE_ERR_SFP_NOT_PRESENT -20
+#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21
+#define IXGBE_ERR_NO_SAN_ADDR_PTR -22
+#define IXGBE_ERR_FDIR_REINIT_FAILED -23
+#define IXGBE_ERR_EEPROM_VERSION -24
+#define IXGBE_ERR_NO_SPACE -25
+#define IXGBE_ERR_OVERTEMP -26
+#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
+#define IXGBE_ERR_FC_NOT_SUPPORTED -28
+#define IXGBE_ERR_FLOW_CONTROL -29
+#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
+#define IXGBE_ERR_PBA_SECTION -31
+#define IXGBE_ERR_INVALID_ARGUMENT -32
+#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
+#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
+
+#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
new file mode 100644
index 000000000000..e5101e91b6b5
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -0,0 +1,883 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include "ixgbe.h"
+#include "ixgbe_phy.h"
+
+#define IXGBE_X540_MAX_TX_QUEUES 128
+#define IXGBE_X540_MAX_RX_QUEUES 128
+#define IXGBE_X540_RAR_ENTRIES 128
+#define IXGBE_X540_MC_TBL_SIZE 128
+#define IXGBE_X540_VFT_TBL_SIZE 128
+#define IXGBE_X540_RX_PB_SIZE 384
+
+static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
+static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
+static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
+static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
+
+static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
+{
+ return ixgbe_media_type_copper;
+}
+
+static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+
+ /* Call PHY identify routine to get the phy type */
+ ixgbe_identify_phy_generic(hw);
+
+ mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
+ mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
+ mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
+ mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
+ mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ **/
+static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+}
+
+/**
+ * ixgbe_reset_hw_X540 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ * reset.
+ **/
+static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u32 ctrl, i;
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != 0)
+ goto reset_hw_out;
+
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
+
+mac_reset_top:
+ ctrl = IXGBE_CTRL_RST;
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ udelay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ status = IXGBE_ERR_RESET_FAILED;
+ hw_dbg(hw, "Reset polling failed to complete.\n");
+ }
+ msleep(100);
+
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /* Set the Rx packet buffer size. */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ /* Store the permanent SAN mac address */
+ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
+
+ /* Add the SAN MAC address to the RAR only if it's a valid address */
+ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+ hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+ /* Reserve the last RAR for the SAN MAC address */
+ hw->mac.num_rar_entries--;
+ }
+
+ /* Store the alternative WWNN/WWPN prefix */
+ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+ &hw->mac.wwpn_prefix);
+
+reset_hw_out:
+ return status;
+}
+
+/**
+ * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware using the generic start_hw function
+ * and the generation start_hw function.
+ * Then performs revision-specific operations, if any.
+ **/
+static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
+{
+ s32 ret_val = 0;
+
+ ret_val = ixgbe_start_hw_generic(hw);
+ if (ret_val != 0)
+ goto out;
+
+ ret_val = ixgbe_start_hw_gen2(hw);
+ hw->mac.rx_pb_size = IXGBE_X540_RX_PB_SIZE;
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
+{
+ u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u16 ext_ability = 0;
+
+ hw->phy.ops.identify(hw);
+
+ hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
+ &ext_ability);
+ if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+
+ return physical_layer;
+}
+
+/**
+ * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ **/
+static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 eec;
+ u16 eeprom_size;
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->semaphore_delay = 10;
+ eeprom->type = ixgbe_flash;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+ IXGBE_EEC_SIZE_SHIFT);
+ eeprom->word_size = 1 << (eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
+
+ hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
+ eeprom->type, eeprom->word_size);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_eerd_X540- Read EEPROM word using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ s32 status = 0;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ 0)
+ status = ixgbe_read_eerd_generic(hw, offset, data);
+ else
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/**
+ * ixgbe_read_eerd_buffer_X540 - Read EEPROM word(s) using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the EERD register.
+ **/
+static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
+{
+ s32 status = 0;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ 0)
+ status = ixgbe_read_eerd_buffer_generic(hw, offset,
+ words, data);
+ else
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/**
+ * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status = 0;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0)
+ status = ixgbe_write_eewr_generic(hw, offset, data);
+ else
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/**
+ * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of words
+ * @data: word(s) write to the EEPROM
+ *
+ * Write a 16 bit word(s) to the EEPROM using the EEWR register.
+ **/
+static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
+{
+ s32 status = 0;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ 0)
+ status = ixgbe_write_eewr_buffer_generic(hw, offset,
+ words, data);
+ else
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
+ *
+ * This function does not use synchronization for EERD and EEWR. It can
+ * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540.
+ *
+ * @hw: pointer to hardware structure
+ **/
+static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+ u16 i;
+ u16 j;
+ u16 checksum = 0;
+ u16 length = 0;
+ u16 pointer = 0;
+ u16 word = 0;
+
+ /*
+ * Do not use hw->eeprom.ops.read because we do not want to take
+ * the synchronization semaphores here. Instead use
+ * ixgbe_read_eerd_generic
+ */
+
+ /* Include 0x0-0x3F in the checksum */
+ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
+ if (ixgbe_read_eerd_generic(hw, i, &word) != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+ checksum += word;
+ }
+
+ /*
+ * Include all data from pointers 0x3, 0x6-0xE. This excludes the
+ * FW, PHY module, and PCIe Expansion/Option ROM pointers.
+ */
+ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+ if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
+ continue;
+
+ if (ixgbe_read_eerd_generic(hw, i, &pointer) != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+
+ /* Skip pointer section if the pointer is invalid. */
+ if (pointer == 0xFFFF || pointer == 0 ||
+ pointer >= hw->eeprom.word_size)
+ continue;
+
+ if (ixgbe_read_eerd_generic(hw, pointer, &length) != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+
+ /* Skip pointer section if length is invalid. */
+ if (length == 0xFFFF || length == 0 ||
+ (pointer + length) >= hw->eeprom.word_size)
+ continue;
+
+ for (j = pointer+1; j <= pointer+length; j++) {
+ if (ixgbe_read_eerd_generic(hw, j, &word) != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+ checksum += word;
+ }
+ }
+
+ checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+ return checksum;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ **/
+static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
+ u16 *checksum_val)
+{
+ s32 status;
+ u16 checksum;
+ u16 read_checksum = 0;
+
+ /*
+ * Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+
+ if (status != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ goto out;
+ }
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
+ checksum = hw->eeprom.ops.calc_checksum(hw);
+
+ /*
+ * Do not use hw->eeprom.ops.read because we do not want to take
+ * the synchronization semaphores twice here.
+ */
+ ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
+ &read_checksum);
+
+ /*
+ * Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum)
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+out:
+ return status;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to shadow RAM using EEWR register, software calculates
+ * checksum and updates the EEPROM and instructs the hardware to update
+ * the flash.
+ **/
+static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u16 checksum;
+
+ /*
+ * Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+
+ if (status != 0)
+ hw_dbg(hw, "EEPROM read failed\n");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
+ checksum = hw->eeprom.ops.calc_checksum(hw);
+
+ /*
+ * Do not use hw->eeprom.ops.write because we do not want to
+ * take the synchronization semaphores twice here.
+ */
+ status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM,
+ checksum);
+
+ if (status == 0)
+ status = ixgbe_update_flash_X540(hw);
+ else
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+ return status;
+}
+
+/**
+ * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
+ * @hw: pointer to hardware structure
+ *
+ * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
+ * EEPROM from shadow RAM to the flash device.
+ **/
+static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
+{
+ u32 flup;
+ s32 status = IXGBE_ERR_EEPROM;
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status == IXGBE_ERR_EEPROM) {
+ hw_dbg(hw, "Flash update time out\n");
+ goto out;
+ }
+
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status == 0)
+ hw_dbg(hw, "Flash update complete\n");
+ else
+ hw_dbg(hw, "Flash update time out\n");
+
+ if (hw->revision_id == 0) {
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ if (flup & IXGBE_EEC_SEC1VAL) {
+ flup |= IXGBE_EEC_FLUP;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+ }
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status == 0)
+ hw_dbg(hw, "Flash update complete\n");
+ else
+ hw_dbg(hw, "Flash update time out\n");
+ }
+out:
+ return status;
+}
+
+/**
+ * ixgbe_poll_flash_update_done_X540 - Poll flash update status
+ * @hw: pointer to hardware structure
+ *
+ * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
+ * flash update is done.
+ **/
+static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 reg;
+ s32 status = IXGBE_ERR_EEPROM;
+
+ for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_EEC);
+ if (reg & IXGBE_EEC_FLUDONE) {
+ status = 0;
+ break;
+ }
+ udelay(5);
+ }
+ return status;
+}
+
+/**
+ * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
+ * the specified function (CSR, PHY0, PHY1, NVM, Flash)
+ **/
+static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 5;
+ u32 hwmask = 0;
+ u32 timeout = 200;
+ u32 i;
+
+ if (swmask == IXGBE_GSSR_EEP_SM)
+ hwmask = IXGBE_GSSR_FLASH_SM;
+
+ for (i = 0; i < timeout; i++) {
+ /*
+ * SW NVM semaphore bit is used for access to all
+ * SW_FW_SYNC bits (not just NVM)
+ */
+ if (ixgbe_get_swfw_sync_semaphore(hw))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask | hwmask))) {
+ swfw_sync |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ break;
+ } else {
+ /*
+ * Firmware currently using resource (fwmask),
+ * hardware currently using resource (hwmask),
+ * or other software thread currently using
+ * resource (swmask)
+ */
+ ixgbe_release_swfw_sync_semaphore(hw);
+ usleep_range(5000, 10000);
+ }
+ }
+
+ /*
+ * If the resource is not released by the FW/HW the SW can assume that
+ * the FW/HW malfunctions. In that case the SW should sets the
+ * SW bit(s) of the requested resource(s) while ignoring the
+ * corresponding FW/HW bits in the SW_FW_SYNC register.
+ */
+ if (i >= timeout) {
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ if (swfw_sync & (fwmask | hwmask)) {
+ if (ixgbe_get_swfw_sync_semaphore(hw))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ swfw_sync |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ }
+ }
+
+ usleep_range(5000, 10000);
+ return 0;
+}
+
+/**
+ * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore through the SW_FW_SYNC register
+ * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
+ **/
+static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+
+ ixgbe_get_swfw_sync_semaphore(hw);
+
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swfw_sync &= ~swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+
+ ixgbe_release_swfw_sync_semaphore(hw);
+ usleep_range(5000, 10000);
+}
+
+/**
+ * ixgbe_get_nvm_semaphore - Get hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * Sets the hardware semaphores so SW/FW can gain control of shared resources
+ **/
+static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_EEPROM;
+ u32 timeout = 2000;
+ u32 i;
+ u32 swsm;
+
+ /* Get SMBI software semaphore between device drivers first */
+ for (i = 0; i < timeout; i++) {
+ /*
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ if (!(swsm & IXGBE_SWSM_SMBI)) {
+ status = 0;
+ break;
+ }
+ udelay(50);
+ }
+
+ /* Now get the semaphore between SW/FW through the REGSMP bit */
+ if (status) {
+ for (i = 0; i < timeout; i++) {
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ if (!(swsm & IXGBE_SWFW_REGSMP))
+ break;
+
+ udelay(50);
+ }
+ } else {
+ hw_dbg(hw, "Software semaphore SMBI between device drivers "
+ "not granted.\n");
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_release_nvm_semaphore - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function clears hardware semaphore bits.
+ **/
+static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+ u32 swsm;
+
+ /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm &= ~IXGBE_SWSM_SMBI;
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swsm &= ~IXGBE_SWFW_REGSMP;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_blink_led_start_X540 - Blink LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to blink
+ *
+ * Devices that implement the version 2 interface:
+ * X540
+ **/
+static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
+{
+ u32 macc_reg;
+ u32 ledctl_reg;
+
+ /*
+ * In order for the blink bit in the LED control register
+ * to work, link and speed must be forced in the MAC. We
+ * will reverse this when we stop the blinking.
+ */
+ macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+
+ /* Set the LED to LINK_UP + BLINK. */
+ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
+ ledctl_reg |= IXGBE_LED_BLINK(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to stop blinking
+ *
+ * Devices that implement the version 2 interface:
+ * X540
+ **/
+static s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
+{
+ u32 macc_reg;
+ u32 ledctl_reg;
+
+ /* Restore the LED to its default value. */
+ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
+ ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
+ ledctl_reg &= ~IXGBE_LED_BLINK(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
+
+ /* Unforce link and speed in the MAC. */
+ macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS);
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+static struct ixgbe_mac_operations mac_ops_X540 = {
+ .init_hw = &ixgbe_init_hw_generic,
+ .reset_hw = &ixgbe_reset_hw_X540,
+ .start_hw = &ixgbe_start_hw_X540,
+ .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
+ .get_media_type = &ixgbe_get_media_type_X540,
+ .get_supported_physical_layer =
+ &ixgbe_get_supported_physical_layer_X540,
+ .enable_rx_dma = &ixgbe_enable_rx_dma_generic,
+ .get_mac_addr = &ixgbe_get_mac_addr_generic,
+ .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
+ .get_device_caps = &ixgbe_get_device_caps_generic,
+ .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
+ .stop_adapter = &ixgbe_stop_adapter_generic,
+ .get_bus_info = &ixgbe_get_bus_info_generic,
+ .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
+ .read_analog_reg8 = NULL,
+ .write_analog_reg8 = NULL,
+ .setup_link = &ixgbe_setup_mac_link_X540,
+ .set_rxpba = &ixgbe_set_rxpba_generic,
+ .check_link = &ixgbe_check_mac_link_generic,
+ .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
+ .led_on = &ixgbe_led_on_generic,
+ .led_off = &ixgbe_led_off_generic,
+ .blink_led_start = &ixgbe_blink_led_start_X540,
+ .blink_led_stop = &ixgbe_blink_led_stop_X540,
+ .set_rar = &ixgbe_set_rar_generic,
+ .clear_rar = &ixgbe_clear_rar_generic,
+ .set_vmdq = &ixgbe_set_vmdq_generic,
+ .clear_vmdq = &ixgbe_clear_vmdq_generic,
+ .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
+ .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
+ .enable_mc = &ixgbe_enable_mc_generic,
+ .disable_mc = &ixgbe_disable_mc_generic,
+ .clear_vfta = &ixgbe_clear_vfta_generic,
+ .set_vfta = &ixgbe_set_vfta_generic,
+ .fc_enable = &ixgbe_fc_enable_generic,
+ .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic,
+ .init_uta_tables = &ixgbe_init_uta_tables_generic,
+ .setup_sfp = NULL,
+ .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
+ .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
+ .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
+ .release_swfw_sync = &ixgbe_release_swfw_sync_X540,
+};
+
+static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
+ .init_params = &ixgbe_init_eeprom_params_X540,
+ .read = &ixgbe_read_eerd_X540,
+ .read_buffer = &ixgbe_read_eerd_buffer_X540,
+ .write = &ixgbe_write_eewr_X540,
+ .write_buffer = &ixgbe_write_eewr_buffer_X540,
+ .calc_checksum = &ixgbe_calc_eeprom_checksum_X540,
+ .validate_checksum = &ixgbe_validate_eeprom_checksum_X540,
+ .update_checksum = &ixgbe_update_eeprom_checksum_X540,
+};
+
+static struct ixgbe_phy_operations phy_ops_X540 = {
+ .identify = &ixgbe_identify_phy_generic,
+ .identify_sfp = &ixgbe_identify_sfp_module_generic,
+ .init = NULL,
+ .reset = NULL,
+ .read_reg = &ixgbe_read_phy_reg_generic,
+ .write_reg = &ixgbe_write_phy_reg_generic,
+ .setup_link = &ixgbe_setup_phy_link_generic,
+ .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
+ .read_i2c_byte = &ixgbe_read_i2c_byte_generic,
+ .write_i2c_byte = &ixgbe_write_i2c_byte_generic,
+ .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
+ .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
+ .check_overtemp = &ixgbe_tn_check_overtemp,
+ .get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
+};
+
+struct ixgbe_info ixgbe_X540_info = {
+ .mac = ixgbe_mac_X540,
+ .get_invariants = &ixgbe_get_invariants_X540,
+ .mac_ops = &mac_ops_X540,
+ .eeprom_ops = &eeprom_ops_X540,
+ .phy_ops = &phy_ops_X540,
+ .mbx_ops = &mbx_ops_generic,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile
new file mode 100644
index 000000000000..1f35d229e71a
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbevf/Makefile
@@ -0,0 +1,38 @@
+################################################################################
+#
+# Intel 82599 Virtual Function driver
+# Copyright(c) 1999 - 2010 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) 82599 VF ethernet driver
+#
+
+obj-$(CONFIG_IXGBEVF) += ixgbevf.o
+
+ixgbevf-objs := vf.o \
+ mbx.o \
+ ethtool.o \
+ ixgbevf_main.o
+
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
new file mode 100644
index 000000000000..78abb6f1a866
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -0,0 +1,297 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_DEFINES_H_
+#define _IXGBEVF_DEFINES_H_
+
+/* Device IDs */
+#define IXGBE_DEV_ID_82599_VF 0x10ED
+#define IXGBE_DEV_ID_X540_VF 0x1515
+
+#define IXGBE_VF_IRQ_CLEAR_MASK 7
+#define IXGBE_VF_MAX_TX_QUEUES 1
+#define IXGBE_VF_MAX_RX_QUEUES 1
+#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
+
+/* Link speed */
+typedef u32 ixgbe_link_speed;
+#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
+#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
+
+#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
+#define IXGBE_LINKS_UP 0x40000000
+#define IXGBE_LINKS_SPEED_82599 0x30000000
+#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
+#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024
+
+/* Interrupt Vector Allocation Registers */
+#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
+
+#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
+/* Receive Config masks */
+#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
+#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
+#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */
+#define IXGBE_RXDCTL_RLPML_EN 0x00008000
+
+/* DCA Control */
+#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+
+/* PSRTYPE bit definitions */
+#define IXGBE_PSRTYPE_TCPHDR 0x00000010
+#define IXGBE_PSRTYPE_UDPHDR 0x00000020
+#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
+#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
+#define IXGBE_PSRTYPE_L2HDR 0x00001000
+
+/* SRRCTL bit definitions */
+#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
+#define IXGBE_SRRCTL_RDMTS_SHIFT 22
+#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000
+#define IXGBE_SRRCTL_DROP_EN 0x10000000
+#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
+#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
+
+/* Receive Descriptor bit definitions */
+#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
+#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */
+#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */
+#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004
+#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
+#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
+#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
+#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */
+#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */
+#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */
+#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
+#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */
+#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */
+#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */
+#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */
+#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
+#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
+#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
+#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */
+#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
+#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
+#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
+#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
+#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
+#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
+#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
+#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+#define IXGBE_RXD_PRI_SHIFT 13
+#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
+#define IXGBE_RXD_CFI_SHIFT 12
+
+#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */
+#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */
+#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */
+#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */
+#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */
+#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
+#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
+#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
+#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
+
+#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
+#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
+#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
+#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
+#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
+#define IXGBE_RXDADV_RSCCNT_SHIFT 17
+#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
+#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
+#define IXGBE_RXDADV_SPH 0x8000
+
+#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXD_ERR_CE | \
+ IXGBE_RXD_ERR_LE | \
+ IXGBE_RXD_ERR_PE | \
+ IXGBE_RXD_ERR_OSE | \
+ IXGBE_RXD_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXDADV_ERR_CE | \
+ IXGBE_RXDADV_ERR_LE | \
+ IXGBE_RXDADV_ERR_PE | \
+ IXGBE_RXDADV_ERR_OSE | \
+ IXGBE_RXDADV_ERR_USE)
+
+#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
+#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
+#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+
+/* Transmit Descriptor - Advanced */
+union ixgbe_adv_tx_desc {
+ struct {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+/* Receive Descriptor - Advanced */
+union ixgbe_adv_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ __le32 data;
+ struct {
+ __le16 pkt_info; /* RSS, Pkt type */
+ __le16 hdr_info; /* Splithdr, hdrlen */
+ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+/* Context descriptors */
+struct ixgbe_adv_tx_context_desc {
+ __le32 vlan_macip_lens;
+ __le32 seqnum_seed;
+ __le32 type_tucmd_mlhl;
+ __le32 mss_l4len_idx;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
+#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
+#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
+#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
+#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
+#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
+#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
+#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
+#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
+#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
+#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
+#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
+#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
+#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+
+/* Interrupt register bitmasks */
+
+/* Extended Interrupt Cause Read */
+#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
+#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
+#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
+
+/* Extended Interrupt Cause Set */
+#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Set */
+#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Clear */
+#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+#define IXGBE_EIMS_ENABLE_MASK ( \
+ IXGBE_EIMS_RTX_QUEUE | \
+ IXGBE_EIMS_MAILBOX | \
+ IXGBE_EIMS_OTHER)
+
+#define IXGBE_EITR_CNT_WDIS 0x80000000
+
+/* Error Codes */
+#define IXGBE_ERR_INVALID_MAC_ADDR -1
+#define IXGBE_ERR_RESET_FAILED -2
+
+#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
new file mode 100644
index 000000000000..e1d9e3b63448
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -0,0 +1,696 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for ixgbevf */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+#include <linux/if_vlan.h>
+#include <linux/uaccess.h>
+
+#include "ixgbevf.h"
+
+#define IXGBE_ALL_RAR_ENTRIES 16
+
+#ifdef ETHTOOL_GSTATS
+struct ixgbe_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+ int base_stat_offset;
+ int saved_reset_offset;
+};
+
+#define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \
+ offsetof(struct ixgbevf_adapter, m), \
+ offsetof(struct ixgbevf_adapter, b), \
+ offsetof(struct ixgbevf_adapter, r)
+static struct ixgbe_stats ixgbe_gstrings_stats[] = {
+ {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
+ stats.saved_reset_vfgprc)},
+ {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc,
+ stats.saved_reset_vfgptc)},
+ {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc,
+ stats.saved_reset_vfgorc)},
+ {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
+ stats.saved_reset_vfgotc)},
+ {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)},
+ {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
+ stats.saved_reset_vfmprc)},
+ {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base,
+ zero_base)},
+ {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base,
+ zero_base)},
+ {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
+ zero_base)},
+ {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base, zero_base)},
+};
+
+#define IXGBE_QUEUE_STATS_LEN 0
+#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
+
+#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
+#endif /* ETHTOOL_GSTATS */
+#ifdef ETHTOOL_TEST
+static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)",
+ "Link test (on/offline)"
+};
+#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
+#endif /* ETHTOOL_TEST */
+
+static int ixgbevf_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = 0;
+ bool link_up;
+
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->transceiver = XCVR_DUMMY1;
+ ecmd->port = -1;
+
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+
+ if (link_up) {
+ ethtool_cmd_speed_set(
+ ecmd,
+ (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+ SPEED_10000 : SPEED_1000);
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ethtool_cmd_speed_set(ecmd, -1);
+ ecmd->duplex = -1;
+ }
+
+ return 0;
+}
+
+static u32 ixgbevf_get_msglevel(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
+
+static char *ixgbevf_reg_names[] = {
+ "IXGBE_VFCTRL",
+ "IXGBE_VFSTATUS",
+ "IXGBE_VFLINKS",
+ "IXGBE_VFRXMEMWRAP",
+ "IXGBE_VFFRTIMER",
+ "IXGBE_VTEICR",
+ "IXGBE_VTEICS",
+ "IXGBE_VTEIMS",
+ "IXGBE_VTEIMC",
+ "IXGBE_VTEIAC",
+ "IXGBE_VTEIAM",
+ "IXGBE_VTEITR",
+ "IXGBE_VTIVAR",
+ "IXGBE_VTIVAR_MISC",
+ "IXGBE_VFRDBAL0",
+ "IXGBE_VFRDBAL1",
+ "IXGBE_VFRDBAH0",
+ "IXGBE_VFRDBAH1",
+ "IXGBE_VFRDLEN0",
+ "IXGBE_VFRDLEN1",
+ "IXGBE_VFRDH0",
+ "IXGBE_VFRDH1",
+ "IXGBE_VFRDT0",
+ "IXGBE_VFRDT1",
+ "IXGBE_VFRXDCTL0",
+ "IXGBE_VFRXDCTL1",
+ "IXGBE_VFSRRCTL0",
+ "IXGBE_VFSRRCTL1",
+ "IXGBE_VFPSRTYPE",
+ "IXGBE_VFTDBAL0",
+ "IXGBE_VFTDBAL1",
+ "IXGBE_VFTDBAH0",
+ "IXGBE_VFTDBAH1",
+ "IXGBE_VFTDLEN0",
+ "IXGBE_VFTDLEN1",
+ "IXGBE_VFTDH0",
+ "IXGBE_VFTDH1",
+ "IXGBE_VFTDT0",
+ "IXGBE_VFTDT1",
+ "IXGBE_VFTXDCTL0",
+ "IXGBE_VFTXDCTL1",
+ "IXGBE_VFTDWBAL0",
+ "IXGBE_VFTDWBAL1",
+ "IXGBE_VFTDWBAH0",
+ "IXGBE_VFTDWBAH1"
+};
+
+
+static int ixgbevf_get_regs_len(struct net_device *netdev)
+{
+ return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
+}
+
+static void ixgbevf_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs,
+ void *p)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u32 regs_len = ixgbevf_get_regs_len(netdev);
+ u8 i;
+
+ memset(p, 0, regs_len);
+
+ regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
+
+ /* General Registers */
+ regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
+ regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
+ regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
+ regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER);
+
+ /* Interrupt */
+ /* don't read EICR because it can clear interrupt causes, instead
+ * read EICS which is a shadow but doesn't clear EICR */
+ regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+ regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
+ regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
+ regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
+ regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
+ regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
+ regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+
+ /* Receive DMA */
+ for (i = 0; i < 2; i++)
+ regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
+
+ /* Receive */
+ regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
+
+ /* Transmit */
+ for (i = 0; i < 2; i++)
+ regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
+
+ for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
+ hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
+}
+
+static void ixgbevf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, ixgbevf_driver_name, 32);
+ strlcpy(drvinfo->version, ixgbevf_driver_version, 32);
+
+ strlcpy(drvinfo->fw_version, "N/A", 4);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+}
+
+static void ixgbevf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring = adapter->tx_ring;
+ struct ixgbevf_ring *rx_ring = adapter->rx_ring;
+
+ ring->rx_max_pending = IXGBEVF_MAX_RXD;
+ ring->tx_max_pending = IXGBEVF_MAX_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rx_ring->count;
+ ring->tx_pending = tx_ring->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int ixgbevf_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
+ int i, err = 0;
+ u32 new_rx_count, new_tx_count;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
+ new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
+ new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ if ((new_tx_count == adapter->tx_ring->count) &&
+ (new_rx_count == adapter->rx_ring->count)) {
+ /* nothing to do */
+ return 0;
+ }
+
+ while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
+ msleep(1);
+
+ /*
+ * If the adapter isn't up and running then just set the
+ * new parameters and scurry for the exits.
+ */
+ if (!netif_running(adapter->netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i].count = new_tx_count;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i].count = new_rx_count;
+ adapter->tx_ring_count = new_tx_count;
+ adapter->rx_ring_count = new_rx_count;
+ goto clear_reset;
+ }
+
+ tx_ring = kcalloc(adapter->num_tx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!tx_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
+
+ rx_ring = kcalloc(adapter->num_rx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!rx_ring) {
+ err = -ENOMEM;
+ goto err_rx_setup;
+ }
+
+ ixgbevf_down(adapter);
+
+ memcpy(tx_ring, adapter->tx_ring,
+ adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tx_ring[i].count = new_tx_count;
+ err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_tx_resources(adapter,
+ &tx_ring[i]);
+ }
+ goto err_tx_ring_setup;
+ }
+ tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
+ }
+
+ memcpy(rx_ring, adapter->rx_ring,
+ adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rx_ring[i].count = new_rx_count;
+ err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_rx_resources(adapter,
+ &rx_ring[i]);
+ }
+ goto err_rx_ring_setup;
+ }
+ rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
+ }
+
+ /*
+ * Only switch to new rings if all the prior allocations
+ * and ring setups have succeeded.
+ */
+ kfree(adapter->tx_ring);
+ adapter->tx_ring = tx_ring;
+ adapter->tx_ring_count = new_tx_count;
+
+ kfree(adapter->rx_ring);
+ adapter->rx_ring = rx_ring;
+ adapter->rx_ring_count = new_rx_count;
+
+ /* success! */
+ ixgbevf_up(adapter);
+
+ goto clear_reset;
+
+err_rx_ring_setup:
+ for(i = 0; i < adapter->num_tx_queues; i++)
+ ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
+
+err_tx_ring_setup:
+ kfree(rx_ring);
+
+err_rx_setup:
+ kfree(tx_ring);
+
+clear_reset:
+ clear_bit(__IXGBEVF_RESETTING, &adapter->state);
+ return err;
+}
+
+static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
+{
+ switch (stringset) {
+ case ETH_SS_TEST:
+ return IXGBE_TEST_LEN;
+ case ETH_SS_STATS:
+ return IXGBE_GLOBAL_STATS_LEN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ ixgbevf_update_stats(adapter);
+ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+ char *p = (char *)adapter +
+ ixgbe_gstrings_stats[i].stat_offset;
+ char *b = (char *)adapter +
+ ixgbe_gstrings_stats[i].base_stat_offset;
+ char *r = (char *)adapter +
+ ixgbe_gstrings_stats[i].saved_reset_offset;
+ data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
+ ((ixgbe_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)b : *(u32 *)b) +
+ ((ixgbe_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)r : *(u32 *)r);
+ }
+}
+
+static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ char *p = (char *)data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *ixgbe_gstrings_test,
+ IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, ixgbe_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool link_up;
+ u32 link_speed = 0;
+ *data = 0;
+
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
+ if (!link_up)
+ *data = 1;
+
+ return *data;
+}
+
+/* ethtool register test data */
+struct ixgbevf_reg_test {
+ u16 reg;
+ u8 array_len;
+ u8 test_type;
+ u32 mask;
+ u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x40 bytes apart, or in contiguous tables. We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST 1
+#define SET_READ_TEST 2
+#define WRITE_NO_TEST 3
+#define TABLE32_TEST 4
+#define TABLE64_TEST_LO 5
+#define TABLE64_TEST_HI 6
+
+/* default VF register test */
+static const struct ixgbevf_reg_test reg_test_vf[] = {
+ { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+ { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
+ { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
+ { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
+ { 0, 0, 0, 0 }
+};
+
+static const u32 register_test_patterns[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
+};
+
+#define REG_PATTERN_TEST(R, M, W) \
+{ \
+ u32 pat, val, before; \
+ for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \
+ before = readl(adapter->hw.hw_addr + R); \
+ writel((register_test_patterns[pat] & W), \
+ (adapter->hw.hw_addr + R)); \
+ val = readl(adapter->hw.hw_addr + R); \
+ if (val != (register_test_patterns[pat] & W & M)) { \
+ hw_dbg(&adapter->hw, \
+ "pattern test reg %04X failed: got " \
+ "0x%08X expected 0x%08X\n", \
+ R, val, (register_test_patterns[pat] & W & M)); \
+ *data = R; \
+ writel(before, adapter->hw.hw_addr + R); \
+ return 1; \
+ } \
+ writel(before, adapter->hw.hw_addr + R); \
+ } \
+}
+
+#define REG_SET_AND_CHECK(R, M, W) \
+{ \
+ u32 val, before; \
+ before = readl(adapter->hw.hw_addr + R); \
+ writel((W & M), (adapter->hw.hw_addr + R)); \
+ val = readl(adapter->hw.hw_addr + R); \
+ if ((W & M) != (val & M)) { \
+ printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \
+ "expected 0x%08X\n", R, (val & M), (W & M)); \
+ *data = R; \
+ writel(before, (adapter->hw.hw_addr + R)); \
+ return 1; \
+ } \
+ writel(before, (adapter->hw.hw_addr + R)); \
+}
+
+static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
+{
+ const struct ixgbevf_reg_test *test;
+ u32 i;
+
+ test = reg_test_vf;
+
+ /*
+ * Perform the register test, looping through the test table
+ * until we either fail or reach the null entry.
+ */
+ while (test->reg) {
+ for (i = 0; i < test->array_len; i++) {
+ switch (test->test_type) {
+ case PATTERN_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case SET_READ_TEST:
+ REG_SET_AND_CHECK(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case WRITE_NO_TEST:
+ writel(test->write,
+ (adapter->hw.hw_addr + test->reg)
+ + (i * 0x40));
+ break;
+ case TABLE32_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 4),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_LO:
+ REG_PATTERN_TEST(test->reg + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_HI:
+ REG_PATTERN_TEST((test->reg + 4) + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ }
+ }
+ test++;
+ }
+
+ *data = 0;
+ return *data;
+}
+
+static void ixgbevf_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ bool if_running = netif_running(netdev);
+
+ set_bit(__IXGBEVF_TESTING, &adapter->state);
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ hw_dbg(&adapter->hw, "offline testing starting\n");
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result */
+ if (ixgbevf_link_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+ else
+ ixgbevf_reset(adapter);
+
+ hw_dbg(&adapter->hw, "register testing starting\n");
+ if (ixgbevf_reg_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbevf_reset(adapter);
+
+ clear_bit(__IXGBEVF_TESTING, &adapter->state);
+ if (if_running)
+ dev_open(netdev);
+ } else {
+ hw_dbg(&adapter->hw, "online testing starting\n");
+ /* Online tests */
+ if (ixgbevf_link_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* Online tests aren't run; pass by default */
+ data[0] = 0;
+
+ clear_bit(__IXGBEVF_TESTING, &adapter->state);
+ }
+ msleep_interruptible(4 * 1000);
+}
+
+static int ixgbevf_nway_reset(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev)) {
+ if (!adapter->dev_closed)
+ ixgbevf_reinit_locked(adapter);
+ }
+
+ return 0;
+}
+
+static struct ethtool_ops ixgbevf_ethtool_ops = {
+ .get_settings = ixgbevf_get_settings,
+ .get_drvinfo = ixgbevf_get_drvinfo,
+ .get_regs_len = ixgbevf_get_regs_len,
+ .get_regs = ixgbevf_get_regs,
+ .nway_reset = ixgbevf_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = ixgbevf_get_ringparam,
+ .set_ringparam = ixgbevf_set_ringparam,
+ .get_msglevel = ixgbevf_get_msglevel,
+ .set_msglevel = ixgbevf_set_msglevel,
+ .self_test = ixgbevf_diag_test,
+ .get_sset_count = ixgbevf_get_sset_count,
+ .get_strings = ixgbevf_get_strings,
+ .get_ethtool_stats = ixgbevf_get_ethtool_stats,
+};
+
+void ixgbevf_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
new file mode 100644
index 000000000000..e6c9d1a927a9
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -0,0 +1,320 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_H_
+#define _IXGBEVF_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/u64_stats_sync.h>
+
+#include "vf.h"
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct ixgbevf_tx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ unsigned long time_stamp;
+ u16 length;
+ u16 next_to_watch;
+ u16 mapped_as_page;
+};
+
+struct ixgbevf_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
+ dma_addr_t page_dma;
+ unsigned int page_offset;
+};
+
+struct ixgbevf_ring {
+ struct ixgbevf_adapter *adapter; /* backlink */
+ void *desc; /* descriptor ring memory */
+ dma_addr_t dma; /* phys. address of descriptor ring */
+ unsigned int size; /* length in bytes */
+ unsigned int count; /* amount of descriptors */
+ unsigned int next_to_use;
+ unsigned int next_to_clean;
+
+ int queue_index; /* needed for multiqueue queue management */
+ union {
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ struct ixgbevf_rx_buffer *rx_buffer_info;
+ };
+
+ u64 total_bytes;
+ u64 total_packets;
+ struct u64_stats_sync syncp;
+
+ u16 head;
+ u16 tail;
+
+ u16 reg_idx; /* holds the special value that gets the hardware register
+ * offset associated with this ring, which is different
+ * for DCB and RSS modes */
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ /* cpu for tx queue */
+ int cpu;
+#endif
+
+ u64 v_idx; /* maps directly to the index for this ring in the hardware
+ * vector array, can also be used for finding the bit in EICR
+ * and friends that represents the vector for this ring */
+
+ u16 work_limit; /* max work per interrupt */
+ u16 rx_buf_len;
+};
+
+enum ixgbevf_ring_f_enum {
+ RING_F_NONE = 0,
+ RING_F_ARRAY_SIZE /* must be last in enum set */
+};
+
+struct ixgbevf_ring_feature {
+ int indices;
+ int mask;
+};
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define MAX_RX_QUEUES 1
+#define MAX_TX_QUEUES 1
+
+#define IXGBEVF_DEFAULT_TXD 1024
+#define IXGBEVF_DEFAULT_RXD 512
+#define IXGBEVF_MAX_TXD 4096
+#define IXGBEVF_MIN_TXD 64
+#define IXGBEVF_MAX_RXD 4096
+#define IXGBEVF_MIN_RXD 64
+
+/* Supported Rx Buffer Sizes */
+#define IXGBEVF_RXBUFFER_64 64 /* Used for packet split */
+#define IXGBEVF_RXBUFFER_128 128 /* Used for packet split */
+#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
+#define IXGBEVF_RXBUFFER_2048 2048
+#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
+
+#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
+
+#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+#define IXGBE_TX_FLAGS_CSUM (u32)(1)
+#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
+#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
+#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
+#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4)
+#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5)
+#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
+#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct ixgbevf_q_vector {
+ struct ixgbevf_adapter *adapter;
+ struct napi_struct napi;
+ DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
+ DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
+ u8 rxr_count; /* Rx ring count assigned to this vector */
+ u8 txr_count; /* Tx ring count assigned to this vector */
+ u8 tx_itr;
+ u8 rx_itr;
+ u32 eitr;
+ int v_idx; /* vector index in list */
+};
+
+/* Helper macros to switch between ints/sec and what the register uses.
+ * And yes, it's the same math going both ways. The lowest value
+ * supported by all of the ixgbe hardware is 8.
+ */
+#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
+ ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
+#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+
+#define IXGBE_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define IXGBE_RX_DESC_ADV(R, i) \
+ (&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
+#define IXGBE_TX_DESC_ADV(R, i) \
+ (&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
+#define IXGBE_TX_CTXTDESC_ADV(R, i) \
+ (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
+
+#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
+
+#define OTHER_VECTOR 1
+#define NON_Q_VECTORS (OTHER_VECTOR)
+
+#define MAX_MSIX_Q_VECTORS 2
+#define MAX_MSIX_COUNT 2
+
+#define MIN_MSIX_Q_VECTORS 2
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+
+/* board specific private data structure */
+struct ixgbevf_adapter {
+ struct timer_list watchdog_timer;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u16 bd_number;
+ struct work_struct reset_task;
+ struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+ char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
+
+ /* Interrupt Throttle Rate */
+ u32 itr_setting;
+ u16 eitr_low;
+ u16 eitr_high;
+
+ /* TX */
+ struct ixgbevf_ring *tx_ring; /* One per active queue */
+ int num_tx_queues;
+ u64 restart_queue;
+ u64 hw_csum_tx_good;
+ u64 lsc_int;
+ u64 hw_tso_ctxt;
+ u64 hw_tso6_ctxt;
+ u32 tx_timeout_count;
+
+ /* RX */
+ struct ixgbevf_ring *rx_ring; /* One per active queue */
+ int num_rx_queues;
+ int num_rx_pools; /* == num_rx_queues in 82598 */
+ int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
+ u64 hw_csum_rx_error;
+ u64 hw_rx_no_dma_resources;
+ u64 hw_csum_rx_good;
+ u64 non_eop_descs;
+ int num_msix_vectors;
+ int max_msix_q_vectors; /* true count of q_vectors for device */
+ struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE];
+ struct msix_entry *msix_entries;
+
+ u64 rx_hdr_split;
+ u32 alloc_rx_page_failed;
+ u32 alloc_rx_buff_failed;
+
+ /* Some features need tri-state capability,
+ * thus the additional *_CAPABLE flags.
+ */
+ u32 flags;
+#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
+#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
+#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
+#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
+#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
+#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5)
+#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 6)
+#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
+#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 8)
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
+ /* structs defined in ixgbe_vf.h */
+ struct ixgbe_hw hw;
+ u16 msg_enable;
+ struct ixgbevf_hw_stats stats;
+ u64 zero_base;
+ /* Interrupt Throttle Rate */
+ u32 eitr_param;
+
+ unsigned long state;
+ u32 *config_space;
+ u64 tx_busy;
+ unsigned int tx_ring_count;
+ unsigned int rx_ring_count;
+
+ u32 link_speed;
+ bool link_up;
+ unsigned long link_check_timeout;
+
+ struct work_struct watchdog_task;
+ bool netdev_registered;
+ bool dev_closed;
+};
+
+enum ixbgevf_state_t {
+ __IXGBEVF_TESTING,
+ __IXGBEVF_RESETTING,
+ __IXGBEVF_DOWN
+};
+
+enum ixgbevf_boards {
+ board_82599_vf,
+ board_X540_vf,
+};
+
+extern struct ixgbevf_info ixgbevf_82599_vf_info;
+extern struct ixgbevf_info ixgbevf_X540_vf_info;
+extern struct ixgbe_mbx_operations ixgbevf_mbx_ops;
+
+/* needed by ethtool.c */
+extern char ixgbevf_driver_name[];
+extern const char ixgbevf_driver_version[];
+
+extern int ixgbevf_up(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_set_ethtool_ops(struct net_device *netdev);
+extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
+
+#ifdef ETHTOOL_OPS_COMPAT
+extern int ethtool_ioctl(struct ifreq *ifr);
+
+#endif
+extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
+extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
+
+#ifdef DEBUG
+extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
+#define hw_dbg(hw, format, arg...) \
+ printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
+#else
+#define hw_dbg(hw, format, arg...) do {} while (0)
+#endif
+
+#endif /* _IXGBEVF_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
new file mode 100644
index 000000000000..4930c4605493
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -0,0 +1,3574 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/******************************************************************************
+ Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
+******************************************************************************/
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/ethtool.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/prefetch.h>
+
+#include "ixgbevf.h"
+
+char ixgbevf_driver_name[] = "ixgbevf";
+static const char ixgbevf_driver_string[] =
+ "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
+
+#define DRV_VERSION "2.1.0-k"
+const char ixgbevf_driver_version[] = DRV_VERSION;
+static char ixgbevf_copyright[] =
+ "Copyright (c) 2009 - 2010 Intel Corporation.";
+
+static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
+ [board_82599_vf] = &ixgbevf_82599_vf_info,
+ [board_X540_vf] = &ixgbevf_X540_vf_info,
+};
+
+/* ixgbevf_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static struct pci_device_id ixgbevf_pci_tbl[] = {
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
+ board_82599_vf},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
+ board_X540_vf},
+
+ /* required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+
+/* forward decls */
+static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
+static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
+ u32 itr_reg);
+
+static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
+ struct ixgbevf_ring *rx_ring,
+ u32 val)
+{
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
+}
+
+/*
+ * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
+ * @adapter: pointer to adapter struct
+ * @direction: 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue: queue to map the corresponding interrupt to
+ * @msix_vector: the vector to map to the corresponding queue
+ *
+ */
+static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
+ u8 queue, u8 msix_vector)
+{
+ u32 ivar, index;
+ struct ixgbe_hw *hw = &adapter->hw;
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+ ivar &= ~0xFF;
+ ivar |= msix_vector;
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
+ } else {
+ /* tx or rx causes */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ index = ((16 * (queue & 1)) + (8 * direction));
+ ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
+ ivar &= ~(0xFF << index);
+ ivar |= (msix_vector << index);
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
+ }
+}
+
+static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_tx_buffer
+ *tx_buffer_info)
+{
+ if (tx_buffer_info->dma) {
+ if (tx_buffer_info->mapped_as_page)
+ dma_unmap_page(&adapter->pdev->dev,
+ tx_buffer_info->dma,
+ tx_buffer_info->length,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(&adapter->pdev->dev,
+ tx_buffer_info->dma,
+ tx_buffer_info->length,
+ DMA_TO_DEVICE);
+ tx_buffer_info->dma = 0;
+ }
+ if (tx_buffer_info->skb) {
+ dev_kfree_skb_any(tx_buffer_info->skb);
+ tx_buffer_info->skb = NULL;
+ }
+ tx_buffer_info->time_stamp = 0;
+ /* tx_buffer_info must be completely set up in the transmit path */
+}
+
+#define IXGBE_MAX_TXD_PWR 14
+#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
+ (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
+#ifdef MAX_SKB_FRAGS
+#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
+ MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
+#else
+#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
+#endif
+
+static void ixgbevf_tx_timeout(struct net_device *netdev);
+
+/**
+ * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ * @tx_ring: tx ring to clean
+ **/
+static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ unsigned int i, eop, count = 0;
+ unsigned int total_bytes = 0, total_packets = 0;
+
+ i = tx_ring->next_to_clean;
+ eop = tx_ring->tx_buffer_info[i].next_to_watch;
+ eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+
+ while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
+ (count < tx_ring->work_limit)) {
+ bool cleaned = false;
+ rmb(); /* read buffer_info after eop_desc */
+ /* eop could change between read and DD-check */
+ if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch))
+ goto cont_loop;
+ for ( ; !cleaned; count++) {
+ struct sk_buff *skb;
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ cleaned = (i == eop);
+ skb = tx_buffer_info->skb;
+
+ if (cleaned && skb) {
+ unsigned int segs, bytecount;
+
+ /* gso_segs is currently only valid for tcp */
+ segs = skb_shinfo(skb)->gso_segs ?: 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) +
+ skb->len;
+ total_packets += segs;
+ total_bytes += bytecount;
+ }
+
+ ixgbevf_unmap_and_free_tx_resource(adapter,
+ tx_buffer_info);
+
+ tx_desc->wb.status = 0;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+cont_loop:
+ eop = tx_ring->tx_buffer_info[i].next_to_watch;
+ eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ }
+
+ tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(count && netif_carrier_ok(netdev) &&
+ (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+#ifdef HAVE_TX_MQ
+ if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
+ !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+ netif_wake_subqueue(netdev, tx_ring->queue_index);
+ ++adapter->restart_queue;
+ }
+#else
+ if (netif_queue_stopped(netdev) &&
+ !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+ netif_wake_queue(netdev);
+ ++adapter->restart_queue;
+ }
+#endif
+ }
+
+ /* re-arm the interrupt */
+ if ((count >= tx_ring->work_limit) &&
+ (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
+ }
+
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->total_bytes += total_bytes;
+ tx_ring->total_packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ return count < tx_ring->work_limit;
+}
+
+/**
+ * ixgbevf_receive_skb - Send a completed packet up the stack
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: packet to send up
+ * @status: hardware indication of status of receive
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ * @rx_desc: rx descriptor
+ **/
+static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
+ struct sk_buff *skb, u8 status,
+ struct ixgbevf_ring *ring,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ bool is_vlan = (status & IXGBE_RXD_STAT_VP);
+ u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
+
+ if (is_vlan && test_bit(tag, adapter->active_vlans))
+ __vlan_hwaccel_put_tag(skb, tag);
+
+ if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
+ napi_gro_receive(&q_vector->napi, skb);
+ else
+ netif_rx(skb);
+}
+
+/**
+ * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @adapter: address of board private structure
+ * @status_err: hardware indication of status of receive
+ * @skb: skb currently being received and modified
+ **/
+static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
+ u32 status_err, struct sk_buff *skb)
+{
+ skb_checksum_none_assert(skb);
+
+ /* Rx csum disabled */
+ if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+ return;
+
+ /* if IP and error */
+ if ((status_err & IXGBE_RXD_STAT_IPCS) &&
+ (status_err & IXGBE_RXDADV_ERR_IPE)) {
+ adapter->hw_csum_rx_error++;
+ return;
+ }
+
+ if (!(status_err & IXGBE_RXD_STAT_L4CS))
+ return;
+
+ if (status_err & IXGBE_RXDADV_ERR_TCPE) {
+ adapter->hw_csum_rx_error++;
+ return;
+ }
+
+ /* It must be a TCP or UDP packet with a valid checksum */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ adapter->hw_csum_rx_good++;
+}
+
+/**
+ * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @adapter: address of board private structure
+ **/
+static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring,
+ int cleaned_count)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbevf_rx_buffer *bi;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
+
+ i = rx_ring->next_to_use;
+ bi = &rx_ring->rx_buffer_info[i];
+
+ while (cleaned_count--) {
+ rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+
+ if (!bi->page_dma &&
+ (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
+ if (!bi->page) {
+ bi->page = netdev_alloc_page(adapter->netdev);
+ if (!bi->page) {
+ adapter->alloc_rx_page_failed++;
+ goto no_buffers;
+ }
+ bi->page_offset = 0;
+ } else {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= (PAGE_SIZE / 2);
+ }
+
+ bi->page_dma = dma_map_page(&pdev->dev, bi->page,
+ bi->page_offset,
+ (PAGE_SIZE / 2),
+ DMA_FROM_DEVICE);
+ }
+
+ skb = bi->skb;
+ if (!skb) {
+ skb = netdev_alloc_skb(adapter->netdev,
+ bufsz);
+
+ if (!skb) {
+ adapter->alloc_rx_buff_failed++;
+ goto no_buffers;
+ }
+
+ /*
+ * Make buffer alignment 2 beyond a 16 byte boundary
+ * this will result in a 16 byte aligned IP header after
+ * the 14 byte MAC header is removed
+ */
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ bi->skb = skb;
+ }
+ if (!bi->dma) {
+ bi->dma = dma_map_single(&pdev->dev, skb->data,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ }
+ /* Refresh the desc even if buffer_addrs didn't change because
+ * each write-back erases this info. */
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+ } else {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ bi = &rx_ring->rx_buffer_info[i];
+ }
+
+no_buffers:
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+ if (i-- == 0)
+ i = (rx_ring->count - 1);
+
+ ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
+ }
+}
+
+static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
+ u64 qmask)
+{
+ u32 mask;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ mask = (qmask & 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+}
+
+static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
+{
+ return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
+}
+
+static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
+{
+ return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+}
+
+static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
+ struct ixgbevf_ring *rx_ring,
+ int *work_done, int work_to_do)
+{
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct pci_dev *pdev = adapter->pdev;
+ union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
+ struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
+ struct sk_buff *skb;
+ unsigned int i;
+ u32 len, staterr;
+ u16 hdr_info;
+ bool cleaned = false;
+ int cleaned_count = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
+ while (staterr & IXGBE_RXD_STAT_DD) {
+ u32 upper_len = 0;
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+
+ rmb(); /* read descriptor and rx_buffer_info after status DD */
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
+ len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+ IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+ if (hdr_info & IXGBE_RXDADV_SPH)
+ adapter->rx_hdr_split++;
+ if (len > IXGBEVF_RX_HDR_SIZE)
+ len = IXGBEVF_RX_HDR_SIZE;
+ upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+ } else {
+ len = le16_to_cpu(rx_desc->wb.upper.length);
+ }
+ cleaned = true;
+ skb = rx_buffer_info->skb;
+ prefetch(skb->data - NET_IP_ALIGN);
+ rx_buffer_info->skb = NULL;
+
+ if (rx_buffer_info->dma) {
+ dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_buffer_info->dma = 0;
+ skb_put(skb, len);
+ }
+
+ if (upper_len) {
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
+ rx_buffer_info->page_dma = 0;
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buffer_info->page,
+ rx_buffer_info->page_offset,
+ upper_len);
+
+ if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
+ (page_count(rx_buffer_info->page) != 1))
+ rx_buffer_info->page = NULL;
+ else
+ get_page(rx_buffer_info->page);
+
+ skb->len += upper_len;
+ skb->data_len += upper_len;
+ skb->truesize += upper_len;
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+
+ next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ prefetch(next_rxd);
+ cleaned_count++;
+
+ next_buffer = &rx_ring->rx_buffer_info[i];
+
+ if (!(staterr & IXGBE_RXD_STAT_EOP)) {
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ rx_buffer_info->skb = next_buffer->skb;
+ rx_buffer_info->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
+ } else {
+ skb->next = next_buffer->skb;
+ skb->next->prev = skb;
+ }
+ adapter->non_eop_descs++;
+ goto next_desc;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
+ if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+
+ ixgbevf_rx_checksum(adapter, staterr, skb);
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ /*
+ * Work around issue of some types of VM to VM loop back
+ * packets not getting split correctly
+ */
+ if (staterr & IXGBE_RXD_STAT_LB) {
+ u32 header_fixup_len = skb_headlen(skb);
+ if (header_fixup_len < 14)
+ skb_push(skb, header_fixup_len);
+ }
+ skb->protocol = eth_type_trans(skb, adapter->netdev);
+
+ ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
+
+next_desc:
+ rx_desc->wb.upper.status_error = 0;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
+ ixgbevf_alloc_rx_buffers(adapter, rx_ring,
+ cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+
+ rx_ring->next_to_clean = i;
+ cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
+
+ if (cleaned_count)
+ ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->total_packets += total_rx_packets;
+ rx_ring->total_bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
+
+ return cleaned;
+}
+
+/**
+ * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function is optimized for cleaning one queue only on a single
+ * q_vector!!!
+ **/
+static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
+{
+ struct ixgbevf_q_vector *q_vector =
+ container_of(napi, struct ixgbevf_q_vector, napi);
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *rx_ring = NULL;
+ int work_done = 0;
+ long r_idx;
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = &(adapter->rx_ring[r_idx]);
+
+ ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
+
+ /* If all Rx work done, exit the polling mode */
+ if (work_done < budget) {
+ napi_complete(napi);
+ if (adapter->itr_setting & 1)
+ ixgbevf_set_itr_msix(q_vector);
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
+ }
+
+ return work_done;
+}
+
+/**
+ * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean more than one rx queue associated with a
+ * q_vector.
+ **/
+static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
+{
+ struct ixgbevf_q_vector *q_vector =
+ container_of(napi, struct ixgbevf_q_vector, napi);
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *rx_ring = NULL;
+ int work_done = 0, i;
+ long r_idx;
+ u64 enable_mask = 0;
+
+ /* attempt to distribute budget to each queue fairly, but don't allow
+ * the budget to go below 1 because we'll exit polling */
+ budget /= (q_vector->rxr_count ?: 1);
+ budget = max(budget, 1);
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
+ enable_mask |= rx_ring->v_idx;
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+#ifndef HAVE_NETDEV_NAPI_LIST
+ if (!netif_running(adapter->netdev))
+ work_done = 0;
+
+#endif
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = &(adapter->rx_ring[r_idx]);
+
+ /* If all Rx work done, exit the polling mode */
+ if (work_done < budget) {
+ napi_complete(napi);
+ if (adapter->itr_setting & 1)
+ ixgbevf_set_itr_msix(q_vector);
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_enable_queues(adapter, enable_mask);
+ }
+
+ return work_done;
+}
+
+
+/**
+ * ixgbevf_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
+ *
+ * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
+ * interrupts.
+ **/
+static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbevf_q_vector *q_vector;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, j, q_vectors, v_idx, r_idx;
+ u32 mask;
+
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ /*
+ * Populate the IVAR table and set the ITR values to the
+ * corresponding register.
+ */
+ for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ q_vector = adapter->q_vector[v_idx];
+ /* XXX for_each_set_bit(...) */
+ r_idx = find_first_bit(q_vector->rxr_idx,
+ adapter->num_rx_queues);
+
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ j = adapter->rx_ring[r_idx].reg_idx;
+ ixgbevf_set_ivar(adapter, 0, j, v_idx);
+ r_idx = find_next_bit(q_vector->rxr_idx,
+ adapter->num_rx_queues,
+ r_idx + 1);
+ }
+ r_idx = find_first_bit(q_vector->txr_idx,
+ adapter->num_tx_queues);
+
+ for (i = 0; i < q_vector->txr_count; i++) {
+ j = adapter->tx_ring[r_idx].reg_idx;
+ ixgbevf_set_ivar(adapter, 1, j, v_idx);
+ r_idx = find_next_bit(q_vector->txr_idx,
+ adapter->num_tx_queues,
+ r_idx + 1);
+ }
+
+ /* if this is a tx only vector halve the interrupt rate */
+ if (q_vector->txr_count && !q_vector->rxr_count)
+ q_vector->eitr = (adapter->eitr_param >> 1);
+ else if (q_vector->rxr_count)
+ /* rx only */
+ q_vector->eitr = adapter->eitr_param;
+
+ ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
+ }
+
+ ixgbevf_set_ivar(adapter, -1, 1, v_idx);
+
+ /* set up to autoclear timer, and the vectors */
+ mask = IXGBE_EIMS_ENABLE_MASK;
+ mask &= ~IXGBE_EIMS_OTHER;
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
+}
+
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
+/**
+ * ixgbevf_update_itr - update the dynamic ITR value based on statistics
+ * @adapter: pointer to adapter
+ * @eitr: eitr setting (ints per sec) to give last timeslice
+ * @itr_setting: current throttle rate in ints/second
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ **/
+static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
+ u32 eitr, u8 itr_setting,
+ int packets, int bytes)
+{
+ unsigned int retval = itr_setting;
+ u32 timepassed_us;
+ u64 bytes_perint;
+
+ if (packets == 0)
+ goto update_itr_done;
+
+
+ /* simple throttlerate management
+ * 0-20MB/s lowest (100000 ints/s)
+ * 20-100MB/s low (20000 ints/s)
+ * 100-1249MB/s bulk (8000 ints/s)
+ */
+ /* what was last interrupt timeslice? */
+ timepassed_us = 1000000/eitr;
+ bytes_perint = bytes / timepassed_us; /* bytes/usec */
+
+ switch (itr_setting) {
+ case lowest_latency:
+ if (bytes_perint > adapter->eitr_low)
+ retval = low_latency;
+ break;
+ case low_latency:
+ if (bytes_perint > adapter->eitr_high)
+ retval = bulk_latency;
+ else if (bytes_perint <= adapter->eitr_low)
+ retval = lowest_latency;
+ break;
+ case bulk_latency:
+ if (bytes_perint <= adapter->eitr_high)
+ retval = low_latency;
+ break;
+ }
+
+update_itr_done:
+ return retval;
+}
+
+/**
+ * ixgbevf_write_eitr - write VTEITR register in hardware specific way
+ * @adapter: pointer to adapter struct
+ * @v_idx: vector index into q_vector array
+ * @itr_reg: new value to be written in *register* format, not ints/s
+ *
+ * This function is made to be called by ethtool and by the driver
+ * when it needs to update VTEITR registers at runtime. Hardware
+ * specific quirks/differences are taken care of here.
+ */
+static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
+ u32 itr_reg)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
+
+ /*
+ * set the WDIS bit to not clear the timer bits and cause an
+ * immediate assertion of the interrupt
+ */
+ itr_reg |= IXGBE_EITR_CNT_WDIS;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
+}
+
+static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
+{
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ u32 new_itr;
+ u8 current_itr, ret_itr;
+ int i, r_idx, v_idx = q_vector->v_idx;
+ struct ixgbevf_ring *rx_ring, *tx_ring;
+
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->txr_count; i++) {
+ tx_ring = &(adapter->tx_ring[r_idx]);
+ ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
+ q_vector->tx_itr,
+ tx_ring->total_packets,
+ tx_ring->total_bytes);
+ /* if the result for this queue would decrease interrupt
+ * rate for this vector then use that result */
+ q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
+ q_vector->tx_itr - 1 : ret_itr);
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx + 1);
+ }
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
+ q_vector->rx_itr,
+ rx_ring->total_packets,
+ rx_ring->total_bytes);
+ /* if the result for this queue would decrease interrupt
+ * rate for this vector then use that result */
+ q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
+ q_vector->rx_itr - 1 : ret_itr);
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+ current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = 100000;
+ break;
+ case low_latency:
+ new_itr = 20000; /* aka hwitr = ~200 */
+ break;
+ case bulk_latency:
+ default:
+ new_itr = 8000;
+ break;
+ }
+
+ if (new_itr != q_vector->eitr) {
+ u32 itr_reg;
+
+ /* save the algorithm value here, not the smoothed one */
+ q_vector->eitr = new_itr;
+ /* do an exponential smoothing */
+ new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+ itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
+ ixgbevf_write_eitr(adapter, v_idx, itr_reg);
+ }
+}
+
+static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 eicr;
+ u32 msg;
+
+ eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
+
+ if (!hw->mbx.ops.check_for_ack(hw)) {
+ /*
+ * checking for the ack clears the PFACK bit. Place
+ * it back in the v2p_mailbox cache so that anyone
+ * polling for an ack will not miss it. Also
+ * avoid the read below because the code to read
+ * the mailbox will also clear the ack bit. This was
+ * causing lost acks. Just cache the bit and exit
+ * the IRQ handler.
+ */
+ hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
+ goto out;
+ }
+
+ /* Not an ack interrupt, go ahead and read the message */
+ hw->mbx.ops.read(hw, &msg, 1);
+
+ if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 1));
+
+out:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
+{
+ struct ixgbevf_q_vector *q_vector = data;
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *tx_ring;
+ int i, r_idx;
+
+ if (!q_vector->txr_count)
+ return IRQ_HANDLED;
+
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->txr_count; i++) {
+ tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring->total_bytes = 0;
+ tx_ring->total_packets = 0;
+ ixgbevf_clean_tx_irq(adapter, tx_ring);
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx + 1);
+ }
+
+ if (adapter->itr_setting & 1)
+ ixgbevf_set_itr_msix(q_vector);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues)
+ * @irq: unused
+ * @data: pointer to our q_vector struct for this interrupt vector
+ **/
+static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
+{
+ struct ixgbevf_q_vector *q_vector = data;
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbevf_ring *rx_ring;
+ int r_idx;
+ int i;
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring->total_bytes = 0;
+ rx_ring->total_packets = 0;
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+ if (!q_vector->rxr_count)
+ return IRQ_HANDLED;
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ /* disable interrupts on this vector only */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
+ napi_schedule(&q_vector->napi);
+
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
+{
+ ixgbevf_msix_clean_rx(irq, data);
+ ixgbevf_msix_clean_tx(irq, data);
+
+ return IRQ_HANDLED;
+}
+
+static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
+ int r_idx)
+{
+ struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
+
+ set_bit(r_idx, q_vector->rxr_idx);
+ q_vector->rxr_count++;
+ a->rx_ring[r_idx].v_idx = 1 << v_idx;
+}
+
+static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
+ int t_idx)
+{
+ struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
+
+ set_bit(t_idx, q_vector->txr_idx);
+ q_vector->txr_count++;
+ a->tx_ring[t_idx].v_idx = 1 << v_idx;
+}
+
+/**
+ * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code. Ideally, we'd have
+ * one vector per ring/queue, but on a constrained vector budget, we
+ * group the rings as "efficiently" as possible. You would add new
+ * mapping configurations in here.
+ **/
+static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
+{
+ int q_vectors;
+ int v_start = 0;
+ int rxr_idx = 0, txr_idx = 0;
+ int rxr_remaining = adapter->num_rx_queues;
+ int txr_remaining = adapter->num_tx_queues;
+ int i, j;
+ int rqpv, tqpv;
+ int err = 0;
+
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ /*
+ * The ideal configuration...
+ * We have enough vectors to map one per queue.
+ */
+ if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
+ for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
+ map_vector_to_rxq(adapter, v_start, rxr_idx);
+
+ for (; txr_idx < txr_remaining; v_start++, txr_idx++)
+ map_vector_to_txq(adapter, v_start, txr_idx);
+ goto out;
+ }
+
+ /*
+ * If we don't have enough vectors for a 1-to-1
+ * mapping, we'll have to group them so there are
+ * multiple queues per vector.
+ */
+ /* Re-adjusting *qpv takes care of the remainder. */
+ for (i = v_start; i < q_vectors; i++) {
+ rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
+ for (j = 0; j < rqpv; j++) {
+ map_vector_to_rxq(adapter, i, rxr_idx);
+ rxr_idx++;
+ rxr_remaining--;
+ }
+ }
+ for (i = v_start; i < q_vectors; i++) {
+ tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
+ for (j = 0; j < tqpv; j++) {
+ map_vector_to_txq(adapter, i, txr_idx);
+ txr_idx++;
+ txr_remaining--;
+ }
+ }
+
+out:
+ return err;
+}
+
+/**
+ * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
+ * interrupts from the kernel.
+ **/
+static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ irqreturn_t (*handler)(int, void *);
+ int i, vector, q_vectors, err;
+ int ri = 0, ti = 0;
+
+ /* Decrement for Other and TCP Timer vectors */
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
+ ? &ixgbevf_msix_clean_many : \
+ (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
+ (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
+ NULL)
+ for (vector = 0; vector < q_vectors; vector++) {
+ handler = SET_HANDLER(adapter->q_vector[vector]);
+
+ if (handler == &ixgbevf_msix_clean_rx) {
+ sprintf(adapter->name[vector], "%s-%s-%d",
+ netdev->name, "rx", ri++);
+ } else if (handler == &ixgbevf_msix_clean_tx) {
+ sprintf(adapter->name[vector], "%s-%s-%d",
+ netdev->name, "tx", ti++);
+ } else if (handler == &ixgbevf_msix_clean_many) {
+ sprintf(adapter->name[vector], "%s-%s-%d",
+ netdev->name, "TxRx", vector);
+ } else {
+ /* skip this unused q_vector */
+ continue;
+ }
+ err = request_irq(adapter->msix_entries[vector].vector,
+ handler, 0, adapter->name[vector],
+ adapter->q_vector[vector]);
+ if (err) {
+ hw_dbg(&adapter->hw,
+ "request_irq failed for MSIX interrupt "
+ "Error: %d\n", err);
+ goto free_queue_irqs;
+ }
+ }
+
+ sprintf(adapter->name[vector], "%s:mbx", netdev->name);
+ err = request_irq(adapter->msix_entries[vector].vector,
+ &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
+ if (err) {
+ hw_dbg(&adapter->hw,
+ "request_irq for msix_mbx failed: %d\n", err);
+ goto free_queue_irqs;
+ }
+
+ return 0;
+
+free_queue_irqs:
+ for (i = vector - 1; i >= 0; i--)
+ free_irq(adapter->msix_entries[--vector].vector,
+ &(adapter->q_vector[i]));
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ return err;
+}
+
+static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
+{
+ int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (i = 0; i < q_vectors; i++) {
+ struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
+ bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
+ bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
+ q_vector->rxr_count = 0;
+ q_vector->txr_count = 0;
+ q_vector->eitr = adapter->eitr_param;
+ }
+}
+
+/**
+ * ixgbevf_request_irq - initialize interrupts
+ * @adapter: board private structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
+{
+ int err = 0;
+
+ err = ixgbevf_request_msix_irqs(adapter);
+
+ if (err)
+ hw_dbg(&adapter->hw,
+ "request_irq failed, Error %d\n", err);
+
+ return err;
+}
+
+static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i, q_vectors;
+
+ q_vectors = adapter->num_msix_vectors;
+
+ i = q_vectors - 1;
+
+ free_irq(adapter->msix_entries[i].vector, netdev);
+ i--;
+
+ for (; i >= 0; i--) {
+ free_irq(adapter->msix_entries[i].vector,
+ adapter->q_vector[i]);
+ }
+
+ ixgbevf_reset_q_vectors(adapter);
+}
+
+/**
+ * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
+{
+ int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ for (i = 0; i < adapter->num_msix_vectors; i++)
+ synchronize_irq(adapter->msix_entries[i].vector);
+}
+
+/**
+ * ixgbevf_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
+ bool queues, bool flush)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 mask;
+ u64 qmask;
+
+ mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
+ qmask = ~0;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+
+ if (queues)
+ ixgbevf_irq_enable_queues(adapter, qmask);
+
+ if (flush)
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
+{
+ u64 tdba;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 i, j, tdlen, txctrl;
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbevf_ring *ring = &adapter->tx_ring[i];
+ j = ring->reg_idx;
+ tdba = ring->dma;
+ tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
+ (tdba & DMA_BIT_MASK(32)));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
+ adapter->tx_ring[i].head = IXGBE_VFTDH(j);
+ adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
+ /* Disable Tx Head Writeback RO bit, since this hoses
+ * bookkeeping if things aren't delivered in order.
+ */
+ txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
+ txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
+ }
+}
+
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
+
+static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
+{
+ struct ixgbevf_ring *rx_ring;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 srrctl;
+
+ rx_ring = &adapter->rx_ring[index];
+
+ srrctl = IXGBE_SRRCTL_DROP_EN;
+
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ u16 bufsz = IXGBEVF_RXBUFFER_2048;
+ /* grow the amount we can receive on large page machines */
+ if (bufsz < (PAGE_SIZE / 2))
+ bufsz = (PAGE_SIZE / 2);
+ /* cap the bufsz at our largest descriptor size */
+ bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
+
+ srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
+ IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ IXGBE_SRRCTL_BSIZEHDR_MASK);
+ } else {
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+ if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
+ srrctl |= IXGBEVF_RXBUFFER_2048 >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= rx_ring->rx_buf_len >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
+}
+
+/**
+ * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
+{
+ u64 rdba;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ int i, j;
+ u32 rdlen;
+ int rx_buf_len;
+
+ /* Decide whether to use packet split mode or not */
+ if (netdev->mtu > ETH_DATA_LEN) {
+ if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+ } else {
+ if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ }
+
+ /* Set the RX buffer length according to the mode */
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ /* PSRTYPE must be initialized in 82599 */
+ u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_IPV6HDR |
+ IXGBE_PSRTYPE_L2HDR;
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
+ rx_buf_len = IXGBEVF_RX_HDR_SIZE;
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+ if (netdev->mtu <= ETH_DATA_LEN)
+ rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+ else
+ rx_buf_len = ALIGN(max_frame, 1024);
+ }
+
+ rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rdba = adapter->rx_ring[i].dma;
+ j = adapter->rx_ring[i].reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
+ (rdba & DMA_BIT_MASK(32)));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
+ adapter->rx_ring[i].head = IXGBE_VFRDH(j);
+ adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
+ adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+
+ ixgbevf_configure_srrctl(adapter, j);
+ }
+}
+
+static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* add VID to filter table */
+ if (hw->mac.ops.set_vfta)
+ hw->mac.ops.set_vfta(hw, vid, 0, true);
+ set_bit(vid, adapter->active_vlans);
+}
+
+static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* remove VID from filter table */
+ if (hw->mac.ops.set_vfta)
+ hw->mac.ops.set_vfta(hw, vid, 0, false);
+ clear_bit(vid, adapter->active_vlans);
+}
+
+static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
+{
+ u16 vid;
+
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
+}
+
+static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int count = 0;
+
+ if ((netdev_uc_count(netdev)) > 10) {
+ printk(KERN_ERR "Too many unicast filters - No Space\n");
+ return -ENOSPC;
+ }
+
+ if (!netdev_uc_empty(netdev)) {
+ struct netdev_hw_addr *ha;
+ netdev_for_each_uc_addr(ha, netdev) {
+ hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
+ udelay(200);
+ }
+ } else {
+ /*
+ * If the list is empty then send message to PF driver to
+ * clear all macvlans on this VF.
+ */
+ hw->mac.ops.set_uc_addr(hw, 0, NULL);
+ }
+
+ return count;
+}
+
+/**
+ * ixgbevf_set_rx_mode - Multicast set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_method entry point is called whenever the multicast address
+ * list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper multicast mode.
+ **/
+static void ixgbevf_set_rx_mode(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* reprogram multicast list */
+ if (hw->mac.ops.update_mc_addr_list)
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
+
+ ixgbevf_write_uc_addr_list(netdev);
+}
+
+static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
+{
+ int q_idx;
+ struct ixgbevf_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ struct napi_struct *napi;
+ q_vector = adapter->q_vector[q_idx];
+ if (!q_vector->rxr_count)
+ continue;
+ napi = &q_vector->napi;
+ if (q_vector->rxr_count > 1)
+ napi->poll = &ixgbevf_clean_rxonly_many;
+
+ napi_enable(napi);
+ }
+}
+
+static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
+{
+ int q_idx;
+ struct ixgbevf_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ q_vector = adapter->q_vector[q_idx];
+ if (!q_vector->rxr_count)
+ continue;
+ napi_disable(&q_vector->napi);
+ }
+}
+
+static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ ixgbevf_set_rx_mode(netdev);
+
+ ixgbevf_restore_vlan(adapter);
+
+ ixgbevf_configure_tx(adapter);
+ ixgbevf_configure_rx(adapter);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbevf_ring *ring = &adapter->rx_ring[i];
+ ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
+ ring->next_to_use = ring->count - 1;
+ writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
+ }
+}
+
+#define IXGBE_MAX_RX_DESC_POLL 10
+static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
+ int rxr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int j = adapter->rx_ring[rxr].reg_idx;
+ int k;
+
+ for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
+ if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
+ break;
+ else
+ msleep(1);
+ }
+ if (k >= IXGBE_MAX_RX_DESC_POLL) {
+ hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
+ "not set within the polling period\n", rxr);
+ }
+
+ ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
+ (adapter->rx_ring[rxr].count - 1));
+}
+
+static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
+{
+ /* Only save pre-reset stats if there are some */
+ if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
+ adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
+ adapter->stats.base_vfgprc;
+ adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
+ adapter->stats.base_vfgptc;
+ adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
+ adapter->stats.base_vfgorc;
+ adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
+ adapter->stats.base_vfgotc;
+ adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
+ adapter->stats.base_vfmprc;
+ }
+}
+
+static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
+ adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
+ adapter->stats.last_vfgorc |=
+ (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
+ adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
+ adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
+ adapter->stats.last_vfgotc |=
+ (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
+ adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
+
+ adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
+ adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
+ adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
+ adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
+ adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
+}
+
+static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, j = 0;
+ int num_rx_rings = adapter->num_rx_queues;
+ u32 txdctl, rxdctl;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ j = adapter->tx_ring[i].reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+ /* enable WTHRESH=8 descriptors, to encourage burst writeback */
+ txdctl |= (8 << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ j = adapter->tx_ring[i].reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
+ }
+
+ for (i = 0; i < num_rx_rings; i++) {
+ j = adapter->rx_ring[i].reg_idx;
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+ rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
+ if (hw->mac.type == ixgbe_mac_X540_vf) {
+ rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
+ rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
+ IXGBE_RXDCTL_RLPML_EN);
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
+ ixgbevf_rx_desc_queue_enable(adapter, i);
+ }
+
+ ixgbevf_configure_msix(adapter);
+
+ if (hw->mac.ops.set_rar) {
+ if (is_valid_ether_addr(hw->mac.addr))
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+ else
+ hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
+ }
+
+ clear_bit(__IXGBEVF_DOWN, &adapter->state);
+ ixgbevf_napi_enable_all(adapter);
+
+ /* enable transmits */
+ netif_tx_start_all_queues(netdev);
+
+ ixgbevf_save_reset_stats(adapter);
+ ixgbevf_init_last_counter_stats(adapter);
+
+ /* bring the link up in the watchdog, this could race with our first
+ * link up interrupt but shouldn't be a problem */
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ mod_timer(&adapter->watchdog_timer, jiffies);
+ return 0;
+}
+
+int ixgbevf_up(struct ixgbevf_adapter *adapter)
+{
+ int err;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ ixgbevf_configure(adapter);
+
+ err = ixgbevf_up_complete(adapter);
+
+ /* clear any pending interrupts, may auto mask */
+ IXGBE_READ_REG(hw, IXGBE_VTEICR);
+
+ ixgbevf_irq_enable(adapter, true, true);
+
+ return err;
+}
+
+/**
+ * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ * @rx_ring: ring to free buffers from
+ **/
+static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long size;
+ unsigned int i;
+
+ if (!rx_ring->rx_buffer_info)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ struct ixgbevf_rx_buffer *rx_buffer_info;
+
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+ if (rx_buffer_info->dma) {
+ dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_buffer_info->dma = 0;
+ }
+ if (rx_buffer_info->skb) {
+ struct sk_buff *skb = rx_buffer_info->skb;
+ rx_buffer_info->skb = NULL;
+ do {
+ struct sk_buff *this = skb;
+ skb = skb->prev;
+ dev_kfree_skb(this);
+ } while (skb);
+ }
+ if (!rx_buffer_info->page)
+ continue;
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
+ rx_buffer_info->page_dma = 0;
+ put_page(rx_buffer_info->page);
+ rx_buffer_info->page = NULL;
+ rx_buffer_info->page_offset = 0;
+ }
+
+ size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ if (rx_ring->head)
+ writel(0, adapter->hw.hw_addr + rx_ring->head);
+ if (rx_ring->tail)
+ writel(0, adapter->hw.hw_addr + rx_ring->tail);
+}
+
+/**
+ * ixgbevf_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
+ **/
+static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ if (!tx_ring->tx_buffer_info)
+ return;
+
+ /* Free all the Tx ring sk_buffs */
+
+ for (i = 0; i < tx_ring->count; i++) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ }
+
+ size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_buffer_info, 0, size);
+
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ if (tx_ring->head)
+ writel(0, adapter->hw.hw_addr + tx_ring->head);
+ if (tx_ring->tail)
+ writel(0, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+/**
+ * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+void ixgbevf_down(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 txdctl;
+ int i, j;
+
+ /* signal that we are down to the interrupt handler */
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+ /* disable receives */
+
+ netif_tx_disable(netdev);
+
+ msleep(10);
+
+ netif_tx_stop_all_queues(netdev);
+
+ ixgbevf_irq_disable(adapter);
+
+ ixgbevf_napi_disable_all(adapter);
+
+ del_timer_sync(&adapter->watchdog_timer);
+ /* can't call flush scheduled work here because it can deadlock
+ * if linkwatch_event tries to acquire the rtnl_lock which we are
+ * holding */
+ while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
+ msleep(1);
+
+ /* disable transmits in the hardware now that interrupts are off */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ j = adapter->tx_ring[i].reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
+ (txdctl & ~IXGBE_TXDCTL_ENABLE));
+ }
+
+ netif_carrier_off(netdev);
+
+ if (!pci_channel_offline(adapter->pdev))
+ ixgbevf_reset(adapter);
+
+ ixgbevf_clean_all_tx_rings(adapter);
+ ixgbevf_clean_all_rx_rings(adapter);
+}
+
+void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ WARN_ON(in_interrupt());
+
+ while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
+ msleep(1);
+
+ /*
+ * Check if PF is up before re-init. If not then skip until
+ * later when the PF is up and ready to service requests from
+ * the VF via mailbox. If the VF is up and running then the
+ * watchdog task will continue to schedule reset tasks until
+ * the PF is up and running.
+ */
+ if (!hw->mac.ops.reset_hw(hw)) {
+ ixgbevf_down(adapter);
+ ixgbevf_up(adapter);
+ }
+
+ clear_bit(__IXGBEVF_RESETTING, &adapter->state);
+}
+
+void ixgbevf_reset(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+
+ if (hw->mac.ops.reset_hw(hw))
+ hw_dbg(hw, "PF still resetting\n");
+ else
+ hw->mac.ops.init_hw(hw);
+
+ if (is_valid_ether_addr(adapter->hw.mac.addr)) {
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr,
+ netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr,
+ netdev->addr_len);
+ }
+}
+
+static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
+ int vectors)
+{
+ int err, vector_threshold;
+
+ /* We'll want at least 3 (vector_threshold):
+ * 1) TxQ[0] Cleanup
+ * 2) RxQ[0] Cleanup
+ * 3) Other (Link Status Change, etc.)
+ */
+ vector_threshold = MIN_MSIX_COUNT;
+
+ /* The more we get, the more we will assign to Tx/Rx Cleanup
+ * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+ * Right now, we simply care about how many we'll get; we'll
+ * set them up later while requesting irq's.
+ */
+ while (vectors >= vector_threshold) {
+ err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+ vectors);
+ if (!err) /* Success in acquiring all requested vectors. */
+ break;
+ else if (err < 0)
+ vectors = 0; /* Nasty failure, quit now */
+ else /* err == number of vectors we should try again with */
+ vectors = err;
+ }
+
+ if (vectors < vector_threshold) {
+ /* Can't allocate enough MSI-X interrupts? Oh well.
+ * This just means we'll go with either a single MSI
+ * vector or fall back to legacy interrupts.
+ */
+ hw_dbg(&adapter->hw,
+ "Unable to allocate MSI-X interrupts\n");
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else {
+ /*
+ * Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = vectors;
+ }
+}
+
+/*
+ * ixgbevf_set_num_queues: Allocate queues for device, feature dependent
+ * @adapter: board private structure to initialize
+ *
+ * This is the top level queue allocation routine. The order here is very
+ * important, starting with the "most" number of features turned on at once,
+ * and ending with the smallest set of features. This way large combinations
+ * can be allocated if they're turned on, and smaller combinations are the
+ * fallthrough conditions.
+ *
+ **/
+static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
+{
+ /* Start with base case */
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_queues_per_pool = 1;
+}
+
+/**
+ * ixgbevf_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time. The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ adapter->tx_ring = kcalloc(adapter->num_tx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!adapter->tx_ring)
+ goto err_tx_ring_allocation;
+
+ adapter->rx_ring = kcalloc(adapter->num_rx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!adapter->rx_ring)
+ goto err_rx_ring_allocation;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ adapter->tx_ring[i].count = adapter->tx_ring_count;
+ adapter->tx_ring[i].queue_index = i;
+ adapter->tx_ring[i].reg_idx = i;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ adapter->rx_ring[i].count = adapter->rx_ring_count;
+ adapter->rx_ring[i].queue_index = i;
+ adapter->rx_ring[i].reg_idx = i;
+ }
+
+ return 0;
+
+err_rx_ring_allocation:
+ kfree(adapter->tx_ring);
+err_tx_ring_allocation:
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
+{
+ int err = 0;
+ int vector, v_budget;
+
+ /*
+ * It's easy to be greedy for MSI-X vectors, but it really
+ * doesn't do us much good if we have a lot more vectors
+ * than CPU's. So let's be conservative and only ask for
+ * (roughly) twice the number of vectors as there are CPU's.
+ */
+ v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
+ (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
+
+ /* A failure in MSI-X entry allocation isn't fatal, but it does
+ * mean we disable MSI-X capabilities of the adapter. */
+ adapter->msix_entries = kcalloc(v_budget,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (!adapter->msix_entries) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (vector = 0; vector < v_budget; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
+ ixgbevf_acquire_msix_vectors(adapter, v_budget);
+
+out:
+ return err;
+}
+
+/**
+ * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ struct ixgbevf_q_vector *q_vector;
+ int napi_vectors;
+ int (*poll)(struct napi_struct *, int);
+
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ napi_vectors = adapter->num_rx_queues;
+ poll = &ixgbevf_clean_rxonly;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
+ if (!q_vector)
+ goto err_out;
+ q_vector->adapter = adapter;
+ q_vector->v_idx = q_idx;
+ q_vector->eitr = adapter->eitr_param;
+ if (q_idx < napi_vectors)
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ (*poll), 64);
+ adapter->q_vector[q_idx] = q_vector;
+ }
+
+ return 0;
+
+err_out:
+ while (q_idx) {
+ q_idx--;
+ q_vector = adapter->q_vector[q_idx];
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ adapter->q_vector[q_idx] = NULL;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ int napi_vectors;
+
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ napi_vectors = adapter->num_rx_queues;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
+
+ adapter->q_vector[q_idx] = NULL;
+ if (q_idx < napi_vectors)
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ }
+}
+
+/**
+ * ixgbevf_reset_interrupt_capability - Reset MSIX setup
+ * @adapter: board private structure
+ *
+ **/
+static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
+{
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+}
+
+/**
+ * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
+ * @adapter: board private structure to initialize
+ *
+ **/
+static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
+{
+ int err;
+
+ /* Number of supported queues */
+ ixgbevf_set_num_queues(adapter);
+
+ err = ixgbevf_set_interrupt_capability(adapter);
+ if (err) {
+ hw_dbg(&adapter->hw,
+ "Unable to setup interrupt capabilities\n");
+ goto err_set_interrupt;
+ }
+
+ err = ixgbevf_alloc_q_vectors(adapter);
+ if (err) {
+ hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
+ "vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ err = ixgbevf_alloc_queues(adapter);
+ if (err) {
+ printk(KERN_ERR "Unable to allocate memory for queues\n");
+ goto err_alloc_queues;
+ }
+
+ hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
+ "Tx Queue count = %u\n",
+ (adapter->num_rx_queues > 1) ? "Enabled" :
+ "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
+
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+ return 0;
+err_alloc_queues:
+ ixgbevf_free_q_vectors(adapter);
+err_alloc_q_vectors:
+ ixgbevf_reset_interrupt_capability(adapter);
+err_set_interrupt:
+ return err;
+}
+
+/**
+ * ixgbevf_sw_init - Initialize general software structures
+ * (struct ixgbevf_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * ixgbevf_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+
+ /* PCI config space info */
+
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->revision_id = pdev->revision;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+
+ hw->mbx.ops.init_params(hw);
+ hw->mac.max_tx_queues = MAX_TX_QUEUES;
+ hw->mac.max_rx_queues = MAX_RX_QUEUES;
+ err = hw->mac.ops.reset_hw(hw);
+ if (err) {
+ dev_info(&pdev->dev,
+ "PF still in reset state, assigning new address\n");
+ dev_hw_addr_random(adapter->netdev, hw->mac.addr);
+ } else {
+ err = hw->mac.ops.init_hw(hw);
+ if (err) {
+ printk(KERN_ERR "init_shared_code failed: %d\n", err);
+ goto out;
+ }
+ }
+
+ /* Enable dynamic interrupt throttling rates */
+ adapter->eitr_param = 20000;
+ adapter->itr_setting = 1;
+
+ /* set defaults for eitr in MegaBytes */
+ adapter->eitr_low = 10;
+ adapter->eitr_high = 20;
+
+ /* set default ring sizes */
+ adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
+ adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
+
+ /* enable rx csum by default */
+ adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+out:
+ return err;
+}
+
+#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
+ { \
+ u32 current_counter = IXGBE_READ_REG(hw, reg); \
+ if (current_counter < last_counter) \
+ counter += 0x100000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFFF00000000LL; \
+ counter |= current_counter; \
+ }
+
+#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
+ { \
+ u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
+ u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
+ u64 current_counter = (current_counter_msb << 32) | \
+ current_counter_lsb; \
+ if (current_counter < last_counter) \
+ counter += 0x1000000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFF000000000LL; \
+ counter |= current_counter; \
+ }
+/**
+ * ixgbevf_update_stats - Update the board statistics counters.
+ * @adapter: board private structure
+ **/
+void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
+ adapter->stats.vfgprc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
+ adapter->stats.vfgptc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
+ adapter->stats.last_vfgorc,
+ adapter->stats.vfgorc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
+ adapter->stats.last_vfgotc,
+ adapter->stats.vfgotc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
+ adapter->stats.vfmprc);
+}
+
+/**
+ * ixgbevf_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void ixgbevf_watchdog(unsigned long data)
+{
+ struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 eics = 0;
+ int i;
+
+ /*
+ * Do the watchdog outside of interrupt context due to the lovely
+ * delays that some of the newer hardware requires
+ */
+
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+ goto watchdog_short_circuit;
+
+ /* get one bit for every active tx/rx interrupt vector */
+ for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
+ struct ixgbevf_q_vector *qv = adapter->q_vector[i];
+ if (qv->rxr_count || qv->txr_count)
+ eics |= (1 << i);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
+
+watchdog_short_circuit:
+ schedule_work(&adapter->watchdog_task);
+}
+
+/**
+ * ixgbevf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void ixgbevf_tx_timeout(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+}
+
+static void ixgbevf_reset_task(struct work_struct *work)
+{
+ struct ixgbevf_adapter *adapter;
+ adapter = container_of(work, struct ixgbevf_adapter, reset_task);
+
+ /* If we're already down or resetting, just bail */
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+ test_bit(__IXGBEVF_RESETTING, &adapter->state))
+ return;
+
+ adapter->tx_timeout_count++;
+
+ ixgbevf_reinit_locked(adapter);
+}
+
+/**
+ * ixgbevf_watchdog_task - worker thread to bring link up
+ * @work: pointer to work_struct containing our data
+ **/
+static void ixgbevf_watchdog_task(struct work_struct *work)
+{
+ struct ixgbevf_adapter *adapter = container_of(work,
+ struct ixgbevf_adapter,
+ watchdog_task);
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = adapter->link_speed;
+ bool link_up = adapter->link_up;
+
+ adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
+
+ /*
+ * Always check the link on the watchdog because we have
+ * no LSC interrupt
+ */
+ if (hw->mac.ops.check_link) {
+ if ((hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false)) != 0) {
+ adapter->link_up = link_up;
+ adapter->link_speed = link_speed;
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ schedule_work(&adapter->reset_task);
+ goto pf_has_reset;
+ }
+ } else {
+ /* always assume link is up, if no check link
+ * function */
+ link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ link_up = true;
+ }
+ adapter->link_up = link_up;
+ adapter->link_speed = link_speed;
+
+ if (link_up) {
+ if (!netif_carrier_ok(netdev)) {
+ hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
+ (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+ 10 : 1);
+ netif_carrier_on(netdev);
+ netif_tx_wake_all_queues(netdev);
+ }
+ } else {
+ adapter->link_up = false;
+ adapter->link_speed = 0;
+ if (netif_carrier_ok(netdev)) {
+ hw_dbg(&adapter->hw, "NIC Link is Down\n");
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ }
+ }
+
+ ixgbevf_update_stats(adapter);
+
+pf_has_reset:
+ /* Reset the timer */
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + (2 * HZ)));
+
+ adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
+}
+
+/**
+ * ixgbevf_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ ixgbevf_clean_tx_ring(adapter, tx_ring);
+
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
+
+/**
+ * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ if (adapter->tx_ring[i].desc)
+ ixgbevf_free_tx_resources(adapter,
+ &adapter->tx_ring[i]);
+
+}
+
+/**
+ * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
+ tx_ring->tx_buffer_info = vzalloc(size);
+ if (!tx_ring->tx_buffer_info)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc)
+ goto err;
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ tx_ring->work_limit = tx_ring->count;
+ return 0;
+
+err:
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+ hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
+ "descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+ if (!err)
+ continue;
+ hw_dbg(&adapter->hw,
+ "Allocation for Tx Queue %u failed\n", i);
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
+ rx_ring->rx_buffer_info = vzalloc(size);
+ if (!rx_ring->rx_buffer_info) {
+ hw_dbg(&adapter->hw,
+ "Unable to vmalloc buffer memory for "
+ "the receive descriptor ring\n");
+ goto alloc_failed;
+ }
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+
+ if (!rx_ring->desc) {
+ hw_dbg(&adapter->hw,
+ "Unable to allocate memory for "
+ "the receive descriptor ring\n");
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ goto alloc_failed;
+ }
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ return 0;
+alloc_failed:
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+ if (!err)
+ continue;
+ hw_dbg(&adapter->hw,
+ "Allocation for Rx Queue %u failed\n", i);
+ break;
+ }
+ return err;
+}
+
+/**
+ * ixgbevf_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ ixgbevf_clean_rx_ring(adapter, rx_ring);
+
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
+
+ rx_ring->desc = NULL;
+}
+
+/**
+ * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ if (adapter->rx_ring[i].desc)
+ ixgbevf_free_rx_resources(adapter,
+ &adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbevf_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int ixgbevf_open(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__IXGBEVF_TESTING, &adapter->state))
+ return -EBUSY;
+
+ if (hw->adapter_stopped) {
+ ixgbevf_reset(adapter);
+ /* if adapter is still stopped then PF isn't up and
+ * the vf can't start. */
+ if (hw->adapter_stopped) {
+ err = IXGBE_ERR_MBX;
+ printk(KERN_ERR "Unable to start - perhaps the PF"
+ " Driver isn't up yet\n");
+ goto err_setup_reset;
+ }
+ }
+
+ /* allocate transmit descriptors */
+ err = ixgbevf_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = ixgbevf_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ ixgbevf_configure(adapter);
+
+ /*
+ * Map the Tx/Rx rings to the vectors we were allotted.
+ * if request_irq will be called in this function map_rings
+ * must be called *before* up_complete
+ */
+ ixgbevf_map_rings_to_vectors(adapter);
+
+ err = ixgbevf_up_complete(adapter);
+ if (err)
+ goto err_up;
+
+ /* clear any pending interrupts, may auto mask */
+ IXGBE_READ_REG(hw, IXGBE_VTEICR);
+ err = ixgbevf_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ ixgbevf_irq_enable(adapter, true, true);
+
+ return 0;
+
+err_req_irq:
+ ixgbevf_down(adapter);
+err_up:
+ ixgbevf_free_irq(adapter);
+err_setup_rx:
+ ixgbevf_free_all_rx_resources(adapter);
+err_setup_tx:
+ ixgbevf_free_all_tx_resources(adapter);
+ ixgbevf_reset(adapter);
+
+err_setup_reset:
+
+ return err;
+}
+
+/**
+ * ixgbevf_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int ixgbevf_close(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ ixgbevf_down(adapter);
+ ixgbevf_free_irq(adapter);
+
+ ixgbevf_free_all_tx_resources(adapter);
+ ixgbevf_free_all_rx_resources(adapter);
+
+ return 0;
+}
+
+static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+{
+ struct ixgbe_adv_tx_context_desc *context_desc;
+ unsigned int i;
+ int err;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ u32 vlan_macip_lens = 0, type_tucmd_mlhl;
+ u32 mss_l4len_idx, l4len;
+
+ if (skb_is_gso(skb)) {
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+ l4len = tcp_hdrlen(skb);
+ *hdr_len += l4len;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ adapter->hw_tso_ctxt++;
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ adapter->hw_tso6_ctxt++;
+ }
+
+ i = tx_ring->next_to_use;
+
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+
+ /* VLAN MACLEN IPLEN */
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ vlan_macip_lens |=
+ (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
+ vlan_macip_lens |= ((skb_network_offset(skb)) <<
+ IXGBE_ADVTXD_MACLEN_SHIFT);
+ *hdr_len += skb_network_offset(skb);
+ vlan_macip_lens |=
+ (skb_transport_header(skb) - skb_network_header(skb));
+ *hdr_len +=
+ (skb_transport_header(skb) - skb_network_header(skb));
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = 0;
+
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
+ IXGBE_ADVTXD_DTYP_CTXT);
+
+ if (skb->protocol == htons(ETH_P_IP))
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
+
+ /* MSS L4LEN IDX */
+ mss_l4len_idx =
+ (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
+ mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
+ /* use index 1 for TSO */
+ mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
+ }
+
+ return false;
+}
+
+static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags)
+{
+ struct ixgbe_adv_tx_context_desc *context_desc;
+ unsigned int i;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL ||
+ (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
+ i = tx_ring->next_to_use;
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ vlan_macip_lens |= (tx_flags &
+ IXGBE_TX_FLAGS_VLAN_MASK);
+ vlan_macip_lens |= (skb_network_offset(skb) <<
+ IXGBE_ADVTXD_MACLEN_SHIFT);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ vlan_macip_lens |= (skb_transport_header(skb) -
+ skb_network_header(skb));
+
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = 0;
+
+ type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
+ IXGBE_ADVTXD_DTYP_CTXT);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ type_tucmd_mlhl |=
+ IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ /* XXX what about other V6 headers?? */
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ type_tucmd_mlhl |=
+ IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ printk(KERN_WARNING
+ "partial checksum but "
+ "proto=%x!\n",
+ skb->protocol);
+ }
+ break;
+ }
+ }
+
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
+ /* use index zero for tx checksum offload */
+ context_desc->mss_l4len_idx = 0;
+
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ adapter->hw_csum_tx_good++;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
+ }
+
+ return false;
+}
+
+static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags,
+ unsigned int first)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ unsigned int len;
+ unsigned int total = skb->len;
+ unsigned int offset = 0, size;
+ int count = 0;
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ unsigned int f;
+ int i;
+
+ i = tx_ring->next_to_use;
+
+ len = min(skb_headlen(skb), total);
+ while (len) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
+
+ tx_buffer_info->length = size;
+ tx_buffer_info->mapped_as_page = false;
+ tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev,
+ skb->data + offset,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+ goto dma_error;
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ len -= size;
+ total -= size;
+ offset += size;
+ count++;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ for (f = 0; f < nr_frags; f++) {
+ struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ len = min((unsigned int)frag->size, total);
+ offset = 0;
+
+ while (len) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
+
+ tx_buffer_info->length = size;
+ tx_buffer_info->dma =
+ skb_frag_dma_map(&adapter->pdev->dev, frag,
+ offset, size, DMA_TO_DEVICE);
+ tx_buffer_info->mapped_as_page = true;
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+ goto dma_error;
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ len -= size;
+ total -= size;
+ offset += size;
+ count++;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+ if (total == 0)
+ break;
+ }
+
+ if (i == 0)
+ i = tx_ring->count - 1;
+ else
+ i = i - 1;
+ tx_ring->tx_buffer_info[i].skb = skb;
+ tx_ring->tx_buffer_info[first].next_to_watch = i;
+
+ return count;
+
+dma_error:
+ dev_err(&pdev->dev, "TX DMA map failed\n");
+
+ /* clear timestamp and dma mappings for failed tx_buffer_info map */
+ tx_buffer_info->dma = 0;
+ tx_buffer_info->time_stamp = 0;
+ tx_buffer_info->next_to_watch = 0;
+ count--;
+
+ /* clear timestamp and dma mappings for remaining portion of packet */
+ while (count >= 0) {
+ count--;
+ i--;
+ if (i < 0)
+ i += tx_ring->count;
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ }
+
+ return count;
+}
+
+static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring, int tx_flags,
+ int count, u32 paylen, u8 hdr_len)
+{
+ union ixgbe_adv_tx_desc *tx_desc = NULL;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ u32 olinfo_status = 0, cmd_type_len = 0;
+ unsigned int i;
+
+ u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
+
+ cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
+
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+
+ if (tx_flags & IXGBE_TX_FLAGS_TSO) {
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+
+ olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
+ IXGBE_ADVTXD_POPTS_SHIFT;
+
+ /* use index 1 context for tso */
+ olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+ if (tx_flags & IXGBE_TX_FLAGS_IPV4)
+ olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
+ IXGBE_ADVTXD_POPTS_SHIFT;
+
+ } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+ olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
+ IXGBE_ADVTXD_POPTS_SHIFT;
+
+ olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+ i = tx_ring->next_to_use;
+ while (count--) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
+ tx_desc->read.cmd_type_len =
+ cpu_to_le32(cmd_type_len | tx_buffer_info->length);
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
+
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ tx_ring->next_to_use = i;
+ writel(i, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
+ struct ixgbevf_ring *tx_ring, int size)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ netif_stop_subqueue(netdev, tx_ring->queue_index);
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it. */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available. */
+ if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(netdev, tx_ring->queue_index);
+ ++adapter->restart_queue;
+ return 0;
+}
+
+static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
+ struct ixgbevf_ring *tx_ring, int size)
+{
+ if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
+}
+
+static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring;
+ unsigned int first;
+ unsigned int tx_flags = 0;
+ u8 hdr_len = 0;
+ int r_idx = 0, tso;
+ int count = 0;
+
+ unsigned int f;
+
+ tx_ring = &adapter->tx_ring[r_idx];
+
+ if (vlan_tx_tag_present(skb)) {
+ tx_flags |= vlan_tx_tag_get(skb);
+ tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= IXGBE_TX_FLAGS_VLAN;
+ }
+
+ /* four things can cause us to need a context descriptor */
+ if (skb_is_gso(skb) ||
+ (skb->ip_summed == CHECKSUM_PARTIAL) ||
+ (tx_flags & IXGBE_TX_FLAGS_VLAN))
+ count++;
+
+ count += TXD_USE_COUNT(skb_headlen(skb));
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+
+ if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
+ adapter->tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
+ first = tx_ring->next_to_use;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ tx_flags |= IXGBE_TX_FLAGS_IPV4;
+ tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+ if (tso < 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (tso)
+ tx_flags |= IXGBE_TX_FLAGS_TSO;
+ else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+ (skb->ip_summed == CHECKSUM_PARTIAL))
+ tx_flags |= IXGBE_TX_FLAGS_CSUM;
+
+ ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
+ ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
+ skb->len, hdr_len);
+
+ ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ixgbevf_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbevf_set_mac(struct net_device *netdev, void *p)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+
+ if (hw->mac.ops.set_rar)
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+
+ return 0;
+}
+
+/**
+ * ixgbevf_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+ int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
+ u32 msg[2];
+
+ if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
+ max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
+
+ /* MTU < 68 is an error and causes problems on some kernels */
+ if ((new_mtu < 68) || (max_frame > max_possible_frame))
+ return -EINVAL;
+
+ hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ /* must set new MTU before calling down or up */
+ netdev->mtu = new_mtu;
+
+ msg[0] = IXGBE_VF_SET_LPE;
+ msg[1] = max_frame;
+ hw->mbx.ops.write_posted(hw, msg, 2);
+
+ if (netif_running(netdev))
+ ixgbevf_reinit_locked(adapter);
+
+ return 0;
+}
+
+static void ixgbevf_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ ixgbevf_down(adapter);
+ ixgbevf_free_irq(adapter);
+ ixgbevf_free_all_tx_resources(adapter);
+ ixgbevf_free_all_rx_resources(adapter);
+ }
+
+#ifdef CONFIG_PM
+ pci_save_state(pdev);
+#endif
+
+ pci_disable_device(pdev);
+}
+
+static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ unsigned int start;
+ u64 bytes, packets;
+ const struct ixgbevf_ring *ring;
+ int i;
+
+ ixgbevf_update_stats(adapter);
+
+ stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ ring = &adapter->rx_ring[i];
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->syncp);
+ bytes = ring->total_bytes;
+ packets = ring->total_packets;
+ } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ stats->rx_bytes += bytes;
+ stats->rx_packets += packets;
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ ring = &adapter->tx_ring[i];
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->syncp);
+ bytes = ring->total_bytes;
+ packets = ring->total_packets;
+ } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ stats->tx_bytes += bytes;
+ stats->tx_packets += packets;
+ }
+
+ return stats;
+}
+
+static int ixgbevf_set_features(struct net_device *netdev, u32 features)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ if (features & NETIF_F_RXCSUM)
+ adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+ else
+ adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
+
+ return 0;
+}
+
+static const struct net_device_ops ixgbe_netdev_ops = {
+ .ndo_open = ixgbevf_open,
+ .ndo_stop = ixgbevf_close,
+ .ndo_start_xmit = ixgbevf_xmit_frame,
+ .ndo_set_rx_mode = ixgbevf_set_rx_mode,
+ .ndo_get_stats64 = ixgbevf_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = ixgbevf_set_mac,
+ .ndo_change_mtu = ixgbevf_change_mtu,
+ .ndo_tx_timeout = ixgbevf_tx_timeout,
+ .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
+ .ndo_set_features = ixgbevf_set_features,
+};
+
+static void ixgbevf_assign_netdev_ops(struct net_device *dev)
+{
+ dev->netdev_ops = &ixgbe_netdev_ops;
+ ixgbevf_set_ethtool_ops(dev);
+ dev->watchdog_timeo = 5 * HZ;
+}
+
+/**
+ * ixgbevf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ixgbevf_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit ixgbevf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct ixgbevf_adapter *adapter = NULL;
+ struct ixgbe_hw *hw = NULL;
+ const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
+ static int cards_found;
+ int err, pci_using_dac;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ pci_using_dac = 1;
+ } else {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA "
+ "configuration, aborting\n");
+ goto err_dma;
+ }
+ }
+ pci_using_dac = 0;
+ }
+
+ err = pci_request_regions(pdev, ixgbevf_driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
+ goto err_pci_reg;
+ }
+
+ pci_set_master(pdev);
+
+#ifdef HAVE_TX_MQ
+ netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
+ MAX_TX_QUEUES);
+#else
+ netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
+#endif
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ hw = &adapter->hw;
+ hw->back = adapter;
+ adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+
+ /*
+ * call save state here in standalone driver because it relies on
+ * adapter struct to exist, and needs to call netdev_priv
+ */
+ pci_save_state(pdev);
+
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->hw_addr) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ ixgbevf_assign_netdev_ops(netdev);
+
+ adapter->bd_number = cards_found;
+
+ /* Setup hw api */
+ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+ hw->mac.type = ii->mac;
+
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
+ sizeof(struct ixgbe_mbx_operations));
+
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+ adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
+
+ /* setup the private structure */
+ err = ixgbevf_sw_init(adapter);
+
+ netdev->hw_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXCSUM;
+
+ netdev->features = netdev->hw_features |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_IP_CSUM;
+ netdev->vlan_features |= NETIF_F_IPV6_CSUM;
+ netdev->vlan_features |= NETIF_F_SG;
+
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* The HW MAC address was set and/or determined in sw_init */
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ printk(KERN_ERR "invalid MAC address\n");
+ err = -EIO;
+ goto err_sw_init;
+ }
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->watchdog_timer.function = ixgbevf_watchdog;
+ adapter->watchdog_timer.data = (unsigned long)adapter;
+
+ INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
+ INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
+
+ err = ixgbevf_init_interrupt_scheme(adapter);
+ if (err)
+ goto err_sw_init;
+
+ /* pick up the PCI bus settings for reporting later */
+ if (hw->mac.ops.get_bus_info)
+ hw->mac.ops.get_bus_info(hw);
+
+ strcpy(netdev->name, "eth%d");
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ adapter->netdev_registered = true;
+
+ netif_carrier_off(netdev);
+
+ ixgbevf_init_last_counter_stats(adapter);
+
+ /* print the MAC address */
+ hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
+ netdev->dev_addr[0],
+ netdev->dev_addr[1],
+ netdev->dev_addr[2],
+ netdev->dev_addr[3],
+ netdev->dev_addr[4],
+ netdev->dev_addr[5]);
+
+ hw_dbg(hw, "MAC: %d\n", hw->mac.type);
+
+ hw_dbg(hw, "LRO is disabled\n");
+
+ hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
+ cards_found++;
+ return 0;
+
+err_register:
+err_sw_init:
+ ixgbevf_reset_interrupt_capability(adapter);
+ iounmap(hw->hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * ixgbevf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ixgbevf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void __devexit ixgbevf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+ del_timer_sync(&adapter->watchdog_timer);
+
+ cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->watchdog_task);
+
+ if (adapter->netdev_registered) {
+ unregister_netdev(netdev);
+ adapter->netdev_registered = false;
+ }
+
+ ixgbevf_reset_interrupt_capability(adapter);
+
+ iounmap(adapter->hw.hw_addr);
+ pci_release_regions(pdev);
+
+ hw_dbg(&adapter->hw, "Remove complete\n");
+
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+
+ free_netdev(netdev);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver ixgbevf_driver = {
+ .name = ixgbevf_driver_name,
+ .id_table = ixgbevf_pci_tbl,
+ .probe = ixgbevf_probe,
+ .remove = __devexit_p(ixgbevf_remove),
+ .shutdown = ixgbevf_shutdown,
+};
+
+/**
+ * ixgbevf_init_module - Driver Registration Routine
+ *
+ * ixgbevf_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init ixgbevf_init_module(void)
+{
+ int ret;
+ printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
+ ixgbevf_driver_version);
+
+ printk(KERN_INFO "%s\n", ixgbevf_copyright);
+
+ ret = pci_register_driver(&ixgbevf_driver);
+ return ret;
+}
+
+module_init(ixgbevf_init_module);
+
+/**
+ * ixgbevf_exit_module - Driver Exit Cleanup Routine
+ *
+ * ixgbevf_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit ixgbevf_exit_module(void)
+{
+ pci_unregister_driver(&ixgbevf_driver);
+}
+
+#ifdef DEBUG
+/**
+ * ixgbevf_get_hw_dev_name - return device name string
+ * used by hardware layer to print debugging information
+ **/
+char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
+{
+ struct ixgbevf_adapter *adapter = hw->back;
+ return adapter->netdev->name;
+}
+
+#endif
+module_exit(ixgbevf_exit_module);
+
+/* ixgbevf_main.c */
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
new file mode 100644
index 000000000000..7a8833125770
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -0,0 +1,341 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "mbx.h"
+
+/**
+ * ixgbevf_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if it successfully received a message notification
+ **/
+static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ while (countdown && mbx->ops.check_for_msg(hw)) {
+ countdown--;
+ udelay(mbx->udelay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbevf_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if it successfully received a message acknowledgement
+ **/
+static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ while (countdown && mbx->ops.check_for_ack(hw)) {
+ countdown--;
+ udelay(mbx->udelay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbevf_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ ret_val = ixgbevf_poll_for_msg(hw);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val;
+
+ /* send msg */
+ ret_val = mbx->ops.write(hw, msg, size);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = ixgbevf_poll_for_ack(hw);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_read_v2p_mailbox - read v2p mailbox
+ * @hw: pointer to the HW structure
+ *
+ * This function is used to read the v2p mailbox without losing the read to
+ * clear status bits.
+ **/
+static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw)
+{
+ u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
+
+ v2p_mailbox |= hw->mbx.v2p_mailbox;
+ hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
+
+ return v2p_mailbox;
+}
+
+/**
+ * ixgbevf_check_for_bit_vf - Determine if a status bit was set
+ * @hw: pointer to the HW structure
+ * @mask: bitmask for bits to be tested and cleared
+ *
+ * This function is used to check for the read to clear bits within
+ * the V2P mailbox.
+ **/
+static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw);
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (v2p_mailbox & mask)
+ ret_val = 0;
+
+ hw->mbx.v2p_mailbox &= ~mask;
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_check_for_msg_vf - checks to see if the PF has sent mail
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if the PF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbevf_check_for_msg_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
+ ret_val = 0;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_check_for_ack_vf - checks to see if the PF has ACK'd
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if the PF has set the ACK bit or else ERR_MBX
+ **/
+static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
+ ret_val = 0;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_check_for_rst_vf - checks to see if the PF has reset
+ * @hw: pointer to the HW structure
+ *
+ * returns true if the PF has set the reset done bit or else false
+ **/
+static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
+ IXGBE_VFMAILBOX_RSTI))) {
+ ret_val = 0;
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_obtain_mbx_lock_vf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ *
+ * return 0 if we obtained the mailbox lock
+ **/
+static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ /* Take ownership of the buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
+
+ /* reserve mailbox for vf use */
+ if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
+ ret_val = 0;
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_mbx_vf - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer
+ **/
+static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ s32 ret_val;
+ u16 i;
+
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbevf_check_for_msg_vf(hw);
+ ixgbevf_check_for_ack_vf(hw);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* Drop VFU and interrupt the PF to tell it a message has been sent */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
+
+out_no_write:
+ return ret_val;
+}
+
+/**
+ * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfuly read message from buffer
+ **/
+static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ s32 ret_val = 0;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+ /* Acknowledge receipt and release mailbox, then we're done */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * ixgbevf_init_mbx_params_vf - set initial values for vf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ /* start mailbox as timed out and let the reset_hw call set the timeout
+ * value to begin communications */
+ mbx->timeout = 0;
+ mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+
+ return 0;
+}
+
+struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
+ .init_params = ixgbevf_init_mbx_params_vf,
+ .read = ixgbevf_read_mbx_vf,
+ .write = ixgbevf_write_mbx_vf,
+ .read_posted = ixgbevf_read_posted_mbx,
+ .write_posted = ixgbevf_write_posted_mbx,
+ .check_for_msg = ixgbevf_check_for_msg_vf,
+ .check_for_ack = ixgbevf_check_for_ack_vf,
+ .check_for_rst = ixgbevf_check_for_rst_vf,
+};
+
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
new file mode 100644
index 000000000000..ea393eb03f3a
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -0,0 +1,99 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_MBX_H_
+#define _IXGBE_MBX_H_
+
+#include "vf.h"
+
+#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX -100
+
+#define IXGBE_VFMAILBOX 0x002FC
+#define IXGBE_VFMBMEM 0x00200
+
+/* Define mailbox register bits */
+#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
+#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
+#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
+#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
+#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
+
+#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
+#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
+
+#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ * clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for exra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_RESET 0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD 3
+
+#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+/* forward declaration of the HW struct */
+struct ixgbe_hw;
+
+#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
new file mode 100644
index 000000000000..189200eeca26
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
@@ -0,0 +1,85 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_REGS_H_
+#define _IXGBEVF_REGS_H_
+
+#define IXGBE_VFCTRL 0x00000
+#define IXGBE_VFSTATUS 0x00008
+#define IXGBE_VFLINKS 0x00010
+#define IXGBE_VFFRTIMER 0x00048
+#define IXGBE_VFRXMEMWRAP 0x03190
+#define IXGBE_VTEICR 0x00100
+#define IXGBE_VTEICS 0x00104
+#define IXGBE_VTEIMS 0x00108
+#define IXGBE_VTEIMC 0x0010C
+#define IXGBE_VTEIAC 0x00110
+#define IXGBE_VTEIAM 0x00114
+#define IXGBE_VTEITR(x) (0x00820 + (4 * x))
+#define IXGBE_VTIVAR(x) (0x00120 + (4 * x))
+#define IXGBE_VTIVAR_MISC 0x00140
+#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x))
+#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x))
+#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x))
+#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x))
+#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x))
+#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x))
+#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x))
+#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x))
+#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x))
+#define IXGBE_VFPSRTYPE 0x00300
+#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x))
+#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x))
+#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x))
+#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x))
+#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x))
+#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x))
+#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x))
+#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x))
+#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x))
+#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x))
+#define IXGBE_VFGPRC 0x0101C
+#define IXGBE_VFGPTC 0x0201C
+#define IXGBE_VFGORC_LSB 0x01020
+#define IXGBE_VFGORC_MSB 0x01024
+#define IXGBE_VFGOTC_LSB 0x02020
+#define IXGBE_VFGOTC_MSB 0x02024
+#define IXGBE_VFMPRC 0x01034
+
+#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+
+#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
+
+#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+ writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
+
+#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
+ readl((a)->hw_addr + (reg) + ((offset) << 2)))
+
+#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
+
+#endif /* _IXGBEVF_REGS_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
new file mode 100644
index 000000000000..aa3682e8c473
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -0,0 +1,426 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "vf.h"
+
+/**
+ * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type, clears
+ * all on chip counters, initializes receive address registers, multicast
+ * table, VLAN filter table, calls routine to set up link and flow control
+ * settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
+{
+ /* Clear adapter stopped flag */
+ hw->adapter_stopped = false;
+
+ return 0;
+}
+
+/**
+ * ixgbevf_init_hw_vf - virtual function hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting the hardware and then starting
+ * the hardware
+ **/
+static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
+{
+ s32 status = hw->mac.ops.start_hw(hw);
+
+ hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+ return status;
+}
+
+/**
+ * ixgbevf_reset_hw_vf - Performs hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by reseting the transmit and receive units, masks and
+ * clears all interrupts.
+ **/
+static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 timeout = IXGBE_VF_INIT_TIMEOUT;
+ s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
+ u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
+ u8 *addr = (u8 *)(&msgbuf[1]);
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ hw->mac.ops.stop_adapter(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* we cannot reset while the RSTI / RSTD bits are asserted */
+ while (!mbx->ops.check_for_rst(hw) && timeout) {
+ timeout--;
+ udelay(5);
+ }
+
+ if (!timeout)
+ return IXGBE_ERR_RESET_FAILED;
+
+ /* mailbox timeout can now become active */
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+
+ msgbuf[0] = IXGBE_VF_RESET;
+ mbx->ops.write_posted(hw, msgbuf, 1);
+
+ msleep(10);
+
+ /* set our "perm_addr" based on info provided by PF */
+ /* also set up the mc_filter_type which is piggy backed
+ * on the mac address in word 3 */
+ ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
+ if (ret_val)
+ return ret_val;
+
+ if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
+ return IXGBE_ERR_INVALID_MAC_ADDR;
+
+ memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
+
+ return 0;
+}
+
+/**
+ * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
+{
+ u32 number_of_queues;
+ u32 reg_val;
+ u16 i;
+
+ /*
+ * Set the adapter_stopped flag so other driver functions stop touching
+ * the hardware
+ */
+ hw->adapter_stopped = true;
+
+ /* Disable the receive unit by stopped each queue */
+ number_of_queues = hw->mac.max_rx_queues;
+ for (i = 0; i < number_of_queues; i++) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ if (reg_val & IXGBE_RXDCTL_ENABLE) {
+ reg_val &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
+ }
+ }
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
+
+ /* Clear any pending interrupts */
+ IXGBE_READ_REG(hw, IXGBE_VTEICR);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ number_of_queues = hw->mac.max_tx_queues;
+ for (i = 0; i < number_of_queues; i++) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ if (reg_val & IXGBE_TXDCTL_ENABLE) {
+ reg_val &= ~IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbevf_mta_vector - Determines bit-vector in multicast table to set
+ * @hw: pointer to hardware structure
+ * @mc_addr: the multicast address
+ *
+ * Extracts the 12 bits, from a multicast address, to determine which
+ * bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ * incoming rx multicast addresses, to determine the bit-vector to check in
+ * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ * by the MO field of the MCSTCTRL. The MO field is set during initialization
+ * to mc_filter_type.
+ **/
+static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector = 0;
+
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+/**
+ * ixgbevf_get_mac_addr_vf - Read device MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to storage for retrieved MAC address
+ **/
+static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+ memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+
+ return 0;
+}
+
+static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[3];
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
+
+ memset(msgbuf, 0, sizeof(msgbuf));
+ /*
+ * If index is one then this is the start of a new list and needs
+ * indication to the PF so it can do it's own list management.
+ * If it is zero then that tells the PF to just clear all of
+ * this VF's macvlans and there is no new list.
+ */
+ msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
+ msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
+ if (addr)
+ memcpy(msg_addr, addr, 6);
+ ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
+
+ if (!ret_val)
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ if (!ret_val)
+ if (msgbuf[0] ==
+ (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK))
+ ret_val = -ENOMEM;
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_set_rar_vf - set device MAC address
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: Unused in this implementation
+ **/
+static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
+ u32 vmdq)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[3];
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
+
+ memset(msgbuf, 0, sizeof(msgbuf));
+ msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
+ memcpy(msg_addr, addr, 6);
+ ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
+
+ if (!ret_val)
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /* if nacked the address was rejected, use "perm_addr" */
+ if (!ret_val &&
+ (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
+ ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @netdev: pointer to net device structure
+ *
+ * Updates the Multicast Table Array.
+ **/
+static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
+ struct net_device *netdev)
+{
+ struct netdev_hw_addr *ha;
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
+ u16 *vector_list = (u16 *)&msgbuf[1];
+ u32 cnt, i;
+
+ /* Each entry in the list uses 1 16 bit word. We have 30
+ * 16 bit words available in our HW msg buffer (minus 1 for the
+ * msg type). That's 30 hash values if we pack 'em right. If
+ * there are more than 30 MC addresses to add then punt the
+ * extras for now and then add code to handle more than 30 later.
+ * It would be unusual for a server to request that many multi-cast
+ * addresses except for in large enterprise network environments.
+ */
+
+ cnt = netdev_mc_count(netdev);
+ if (cnt > 30)
+ cnt = 30;
+ msgbuf[0] = IXGBE_VF_SET_MULTICAST;
+ msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
+
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (i == cnt)
+ break;
+ vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
+ }
+
+ mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
+
+ return 0;
+}
+
+/**
+ * ixgbevf_set_vfta_vf - Set/Unset vlan filter table address
+ * @hw: pointer to the HW structure
+ * @vlan: 12 bit VLAN ID
+ * @vind: unused by VF drivers
+ * @vlan_on: if true then set bit, else clear bit
+ **/
+static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[2];
+
+ msgbuf[0] = IXGBE_VF_SET_VLAN;
+ msgbuf[1] = vlan;
+ /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+ msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
+
+ return mbx->ops.write_posted(hw, msgbuf, 2);
+}
+
+/**
+ * ixgbevf_setup_mac_link_vf - Setup MAC link settings
+ * @hw: pointer to hardware structure
+ * @speed: Unused in this implementation
+ * @autoneg: Unused in this implementation
+ * @autoneg_wait_to_complete: Unused in this implementation
+ *
+ * Do nothing and return success. VF drivers are not allowed to change
+ * global settings. Maintained for driver compatibility.
+ **/
+static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ return 0;
+}
+
+/**
+ * ixgbevf_check_mac_link_vf - Get link/speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up,
+ bool autoneg_wait_to_complete)
+{
+ u32 links_reg;
+
+ if (!(hw->mbx.ops.check_for_rst(hw))) {
+ *link_up = false;
+ *speed = 0;
+ return -1;
+ }
+
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (links_reg & IXGBE_LINKS_UP)
+ *link_up = true;
+ else
+ *link_up = false;
+
+ if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_10G_82599)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ return 0;
+}
+
+static struct ixgbe_mac_operations ixgbevf_mac_ops = {
+ .init_hw = ixgbevf_init_hw_vf,
+ .reset_hw = ixgbevf_reset_hw_vf,
+ .start_hw = ixgbevf_start_hw_vf,
+ .get_mac_addr = ixgbevf_get_mac_addr_vf,
+ .stop_adapter = ixgbevf_stop_hw_vf,
+ .setup_link = ixgbevf_setup_mac_link_vf,
+ .check_link = ixgbevf_check_mac_link_vf,
+ .set_rar = ixgbevf_set_rar_vf,
+ .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
+ .set_uc_addr = ixgbevf_set_uc_addr_vf,
+ .set_vfta = ixgbevf_set_vfta_vf,
+};
+
+struct ixgbevf_info ixgbevf_82599_vf_info = {
+ .mac = ixgbe_mac_82599_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+};
+
+struct ixgbevf_info ixgbevf_X540_vf_info = {
+ .mac = ixgbe_mac_X540_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
new file mode 100644
index 000000000000..10306b492ee6
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -0,0 +1,174 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2010 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef __IXGBE_VF_H__
+#define __IXGBE_VF_H__
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+
+#include "defines.h"
+#include "regs.h"
+#include "mbx.h"
+
+struct ixgbe_hw;
+
+/* iterator type for walking multicast address lists */
+typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+ u32 *vmdq);
+struct ixgbe_mac_operations {
+ s32 (*init_hw)(struct ixgbe_hw *);
+ s32 (*reset_hw)(struct ixgbe_hw *);
+ s32 (*start_hw)(struct ixgbe_hw *);
+ s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
+ enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+ u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
+ s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+ s32 (*stop_adapter)(struct ixgbe_hw *);
+ s32 (*get_bus_info)(struct ixgbe_hw *);
+
+ /* Link */
+ s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
+ s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
+ s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
+ bool *);
+
+ /* RAR, Multicast, VLAN */
+ s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32);
+ s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
+ s32 (*init_rx_addrs)(struct ixgbe_hw *);
+ s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
+ s32 (*enable_mc)(struct ixgbe_hw *);
+ s32 (*disable_mc)(struct ixgbe_hw *);
+ s32 (*clear_vfta)(struct ixgbe_hw *);
+ s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+};
+
+enum ixgbe_mac_type {
+ ixgbe_mac_unknown = 0,
+ ixgbe_mac_82599_vf,
+ ixgbe_mac_X540_vf,
+ ixgbe_num_macs
+};
+
+struct ixgbe_mac_info {
+ struct ixgbe_mac_operations ops;
+ u8 addr[6];
+ u8 perm_addr[6];
+
+ enum ixgbe_mac_type type;
+
+ s32 mc_filter_type;
+
+ bool get_link_status;
+ u32 max_tx_queues;
+ u32 max_rx_queues;
+ u32 max_msix_vectors;
+};
+
+struct ixgbe_mbx_operations {
+ s32 (*init_params)(struct ixgbe_hw *hw);
+ s32 (*read)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*write)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*check_for_msg)(struct ixgbe_hw *);
+ s32 (*check_for_ack)(struct ixgbe_hw *);
+ s32 (*check_for_rst)(struct ixgbe_hw *);
+};
+
+struct ixgbe_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+ struct ixgbe_mbx_operations ops;
+ struct ixgbe_mbx_stats stats;
+ u32 timeout;
+ u32 udelay;
+ u32 v2p_mailbox;
+ u16 size;
+};
+
+struct ixgbe_hw {
+ void *back;
+
+ u8 __iomem *hw_addr;
+
+ struct ixgbe_mac_info mac;
+ struct ixgbe_mbx_info mbx;
+
+ u16 device_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_device_id;
+ u16 vendor_id;
+
+ u8 revision_id;
+ bool adapter_stopped;
+};
+
+struct ixgbevf_hw_stats {
+ u64 base_vfgprc;
+ u64 base_vfgptc;
+ u64 base_vfgorc;
+ u64 base_vfgotc;
+ u64 base_vfmprc;
+
+ u64 last_vfgprc;
+ u64 last_vfgptc;
+ u64 last_vfgorc;
+ u64 last_vfgotc;
+ u64 last_vfmprc;
+
+ u64 vfgprc;
+ u64 vfgptc;
+ u64 vfgorc;
+ u64 vfgotc;
+ u64 vfmprc;
+
+ u64 saved_reset_vfgprc;
+ u64 saved_reset_vfgptc;
+ u64 saved_reset_vfgorc;
+ u64 saved_reset_vfgotc;
+ u64 saved_reset_vfmprc;
+};
+
+struct ixgbevf_info {
+ enum ixgbe_mac_type mac;
+ struct ixgbe_mac_operations *mac_ops;
+};
+
+#endif /* __IXGBE_VF_H__ */
+
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
new file mode 100644
index 000000000000..48a0a23f342f
--- /dev/null
+++ b/drivers/net/ethernet/jme.c
@@ -0,0 +1,3236 @@
+/*
+ * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
+ *
+ * Copyright 2008 JMicron Technology Corporation
+ * http://www.jmicron.com/
+ * Copyright (c) 2009 - 2010 Guo-Fu Tseng <cooldavid@cooldavid.org>
+ *
+ * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_vlan.h>
+#include <linux/slab.h>
+#include <net/ip6_checksum.h>
+#include "jme.h"
+
+static int force_pseudohp = -1;
+static int no_pseudohp = -1;
+static int no_extplug = -1;
+module_param(force_pseudohp, int, 0);
+MODULE_PARM_DESC(force_pseudohp,
+ "Enable pseudo hot-plug feature manually by driver instead of BIOS.");
+module_param(no_pseudohp, int, 0);
+MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature.");
+module_param(no_extplug, int, 0);
+MODULE_PARM_DESC(no_extplug,
+ "Do not use external plug signal for pseudo hot-plug.");
+
+static int
+jme_mdio_read(struct net_device *netdev, int phy, int reg)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ int i, val, again = (reg == MII_BMSR) ? 1 : 0;
+
+read_again:
+ jwrite32(jme, JME_SMI, SMI_OP_REQ |
+ smi_phy_addr(phy) |
+ smi_reg_addr(reg));
+
+ wmb();
+ for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
+ udelay(20);
+ val = jread32(jme, JME_SMI);
+ if ((val & SMI_OP_REQ) == 0)
+ break;
+ }
+
+ if (i == 0) {
+ pr_err("phy(%d) read timeout : %d\n", phy, reg);
+ return 0;
+ }
+
+ if (again--)
+ goto read_again;
+
+ return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT;
+}
+
+static void
+jme_mdio_write(struct net_device *netdev,
+ int phy, int reg, int val)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ int i;
+
+ jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
+ ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
+ smi_phy_addr(phy) | smi_reg_addr(reg));
+
+ wmb();
+ for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
+ udelay(20);
+ if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0)
+ break;
+ }
+
+ if (i == 0)
+ pr_err("phy(%d) write timeout : %d\n", phy, reg);
+}
+
+static inline void
+jme_reset_phy_processor(struct jme_adapter *jme)
+{
+ u32 val;
+
+ jme_mdio_write(jme->dev,
+ jme->mii_if.phy_id,
+ MII_ADVERTISE, ADVERTISE_ALL |
+ ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+
+ if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
+ jme_mdio_write(jme->dev,
+ jme->mii_if.phy_id,
+ MII_CTRL1000,
+ ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+
+ val = jme_mdio_read(jme->dev,
+ jme->mii_if.phy_id,
+ MII_BMCR);
+
+ jme_mdio_write(jme->dev,
+ jme->mii_if.phy_id,
+ MII_BMCR, val | BMCR_RESET);
+}
+
+static void
+jme_setup_wakeup_frame(struct jme_adapter *jme,
+ const u32 *mask, u32 crc, int fnr)
+{
+ int i;
+
+ /*
+ * Setup CRC pattern
+ */
+ jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
+ wmb();
+ jwrite32(jme, JME_WFODP, crc);
+ wmb();
+
+ /*
+ * Setup Mask
+ */
+ for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
+ jwrite32(jme, JME_WFOI,
+ ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
+ (fnr & WFOI_FRAME_SEL));
+ wmb();
+ jwrite32(jme, JME_WFODP, mask[i]);
+ wmb();
+ }
+}
+
+static inline void
+jme_mac_rxclk_off(struct jme_adapter *jme)
+{
+ jme->reg_gpreg1 |= GPREG1_RXCLKOFF;
+ jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
+}
+
+static inline void
+jme_mac_rxclk_on(struct jme_adapter *jme)
+{
+ jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF;
+ jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
+}
+
+static inline void
+jme_mac_txclk_off(struct jme_adapter *jme)
+{
+ jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC);
+ jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_mac_txclk_on(struct jme_adapter *jme)
+{
+ u32 speed = jme->reg_ghc & GHC_SPEED;
+ if (speed == GHC_SPEED_1000M)
+ jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
+ else
+ jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
+ jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_reset_ghc_speed(struct jme_adapter *jme)
+{
+ jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX);
+ jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_reset_250A2_workaround(struct jme_adapter *jme)
+{
+ jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
+ GPREG1_RSSPATCH);
+ jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
+}
+
+static inline void
+jme_assert_ghc_reset(struct jme_adapter *jme)
+{
+ jme->reg_ghc |= GHC_SWRST;
+ jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_clear_ghc_reset(struct jme_adapter *jme)
+{
+ jme->reg_ghc &= ~GHC_SWRST;
+ jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_reset_mac_processor(struct jme_adapter *jme)
+{
+ static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
+ u32 crc = 0xCDCDCDCD;
+ u32 gpreg0;
+ int i;
+
+ jme_reset_ghc_speed(jme);
+ jme_reset_250A2_workaround(jme);
+
+ jme_mac_rxclk_on(jme);
+ jme_mac_txclk_on(jme);
+ udelay(1);
+ jme_assert_ghc_reset(jme);
+ udelay(1);
+ jme_mac_rxclk_off(jme);
+ jme_mac_txclk_off(jme);
+ udelay(1);
+ jme_clear_ghc_reset(jme);
+ udelay(1);
+ jme_mac_rxclk_on(jme);
+ jme_mac_txclk_on(jme);
+ udelay(1);
+ jme_mac_rxclk_off(jme);
+ jme_mac_txclk_off(jme);
+
+ jwrite32(jme, JME_RXDBA_LO, 0x00000000);
+ jwrite32(jme, JME_RXDBA_HI, 0x00000000);
+ jwrite32(jme, JME_RXQDC, 0x00000000);
+ jwrite32(jme, JME_RXNDA, 0x00000000);
+ jwrite32(jme, JME_TXDBA_LO, 0x00000000);
+ jwrite32(jme, JME_TXDBA_HI, 0x00000000);
+ jwrite32(jme, JME_TXQDC, 0x00000000);
+ jwrite32(jme, JME_TXNDA, 0x00000000);
+
+ jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
+ jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
+ for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
+ jme_setup_wakeup_frame(jme, mask, crc, i);
+ if (jme->fpgaver)
+ gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL;
+ else
+ gpreg0 = GPREG0_DEFAULT;
+ jwrite32(jme, JME_GPREG0, gpreg0);
+}
+
+static inline void
+jme_clear_pm(struct jme_adapter *jme)
+{
+ jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
+}
+
+static int
+jme_reload_eeprom(struct jme_adapter *jme)
+{
+ u32 val;
+ int i;
+
+ val = jread32(jme, JME_SMBCSR);
+
+ if (val & SMBCSR_EEPROMD) {
+ val |= SMBCSR_CNACK;
+ jwrite32(jme, JME_SMBCSR, val);
+ val |= SMBCSR_RELOAD;
+ jwrite32(jme, JME_SMBCSR, val);
+ mdelay(12);
+
+ for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) {
+ mdelay(1);
+ if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
+ break;
+ }
+
+ if (i == 0) {
+ pr_err("eeprom reload timeout\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static void
+jme_load_macaddr(struct net_device *netdev)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ unsigned char macaddr[6];
+ u32 val;
+
+ spin_lock_bh(&jme->macaddr_lock);
+ val = jread32(jme, JME_RXUMA_LO);
+ macaddr[0] = (val >> 0) & 0xFF;
+ macaddr[1] = (val >> 8) & 0xFF;
+ macaddr[2] = (val >> 16) & 0xFF;
+ macaddr[3] = (val >> 24) & 0xFF;
+ val = jread32(jme, JME_RXUMA_HI);
+ macaddr[4] = (val >> 0) & 0xFF;
+ macaddr[5] = (val >> 8) & 0xFF;
+ memcpy(netdev->dev_addr, macaddr, 6);
+ spin_unlock_bh(&jme->macaddr_lock);
+}
+
+static inline void
+jme_set_rx_pcc(struct jme_adapter *jme, int p)
+{
+ switch (p) {
+ case PCC_OFF:
+ jwrite32(jme, JME_PCCRX0,
+ ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
+ ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK));
+ break;
+ case PCC_P1:
+ jwrite32(jme, JME_PCCRX0,
+ ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
+ ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
+ break;
+ case PCC_P2:
+ jwrite32(jme, JME_PCCRX0,
+ ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
+ ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
+ break;
+ case PCC_P3:
+ jwrite32(jme, JME_PCCRX0,
+ ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
+ ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
+ break;
+ default:
+ break;
+ }
+ wmb();
+
+ if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
+ netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p);
+}
+
+static void
+jme_start_irq(struct jme_adapter *jme)
+{
+ register struct dynpcc_info *dpi = &(jme->dpi);
+
+ jme_set_rx_pcc(jme, PCC_P1);
+ dpi->cur = PCC_P1;
+ dpi->attempt = PCC_P1;
+ dpi->cnt = 0;
+
+ jwrite32(jme, JME_PCCTX,
+ ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
+ ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
+ PCCTXQ0_EN
+ );
+
+ /*
+ * Enable Interrupts
+ */
+ jwrite32(jme, JME_IENS, INTR_ENABLE);
+}
+
+static inline void
+jme_stop_irq(struct jme_adapter *jme)
+{
+ /*
+ * Disable Interrupts
+ */
+ jwrite32f(jme, JME_IENC, INTR_ENABLE);
+}
+
+static u32
+jme_linkstat_from_phy(struct jme_adapter *jme)
+{
+ u32 phylink, bmsr;
+
+ phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17);
+ bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR);
+ if (bmsr & BMSR_ANCOMP)
+ phylink |= PHY_LINK_AUTONEG_COMPLETE;
+
+ return phylink;
+}
+
+static inline void
+jme_set_phyfifo_5level(struct jme_adapter *jme)
+{
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
+}
+
+static inline void
+jme_set_phyfifo_8level(struct jme_adapter *jme)
+{
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
+}
+
+static int
+jme_check_link(struct net_device *netdev, int testonly)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr;
+ char linkmsg[64];
+ int rc = 0;
+
+ linkmsg[0] = '\0';
+
+ if (jme->fpgaver)
+ phylink = jme_linkstat_from_phy(jme);
+ else
+ phylink = jread32(jme, JME_PHY_LINK);
+
+ if (phylink & PHY_LINK_UP) {
+ if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
+ /*
+ * If we did not enable AN
+ * Speed/Duplex Info should be obtained from SMI
+ */
+ phylink = PHY_LINK_UP;
+
+ bmcr = jme_mdio_read(jme->dev,
+ jme->mii_if.phy_id,
+ MII_BMCR);
+
+ phylink |= ((bmcr & BMCR_SPEED1000) &&
+ (bmcr & BMCR_SPEED100) == 0) ?
+ PHY_LINK_SPEED_1000M :
+ (bmcr & BMCR_SPEED100) ?
+ PHY_LINK_SPEED_100M :
+ PHY_LINK_SPEED_10M;
+
+ phylink |= (bmcr & BMCR_FULLDPLX) ?
+ PHY_LINK_DUPLEX : 0;
+
+ strcat(linkmsg, "Forced: ");
+ } else {
+ /*
+ * Keep polling for speed/duplex resolve complete
+ */
+ while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
+ --cnt) {
+
+ udelay(1);
+
+ if (jme->fpgaver)
+ phylink = jme_linkstat_from_phy(jme);
+ else
+ phylink = jread32(jme, JME_PHY_LINK);
+ }
+ if (!cnt)
+ pr_err("Waiting speed resolve timeout\n");
+
+ strcat(linkmsg, "ANed: ");
+ }
+
+ if (jme->phylink == phylink) {
+ rc = 1;
+ goto out;
+ }
+ if (testonly)
+ goto out;
+
+ jme->phylink = phylink;
+
+ /*
+ * The speed/duplex setting of jme->reg_ghc already cleared
+ * by jme_reset_mac_processor()
+ */
+ switch (phylink & PHY_LINK_SPEED_MASK) {
+ case PHY_LINK_SPEED_10M:
+ jme->reg_ghc |= GHC_SPEED_10M;
+ strcat(linkmsg, "10 Mbps, ");
+ break;
+ case PHY_LINK_SPEED_100M:
+ jme->reg_ghc |= GHC_SPEED_100M;
+ strcat(linkmsg, "100 Mbps, ");
+ break;
+ case PHY_LINK_SPEED_1000M:
+ jme->reg_ghc |= GHC_SPEED_1000M;
+ strcat(linkmsg, "1000 Mbps, ");
+ break;
+ default:
+ break;
+ }
+
+ if (phylink & PHY_LINK_DUPLEX) {
+ jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
+ jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX);
+ jme->reg_ghc |= GHC_DPX;
+ } else {
+ jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
+ TXMCS_BACKOFF |
+ TXMCS_CARRIERSENSE |
+ TXMCS_COLLISION);
+ jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX);
+ }
+
+ jwrite32(jme, JME_GHC, jme->reg_ghc);
+
+ if (is_buggy250(jme->pdev->device, jme->chiprev)) {
+ jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
+ GPREG1_RSSPATCH);
+ if (!(phylink & PHY_LINK_DUPLEX))
+ jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH;
+ switch (phylink & PHY_LINK_SPEED_MASK) {
+ case PHY_LINK_SPEED_10M:
+ jme_set_phyfifo_8level(jme);
+ jme->reg_gpreg1 |= GPREG1_RSSPATCH;
+ break;
+ case PHY_LINK_SPEED_100M:
+ jme_set_phyfifo_5level(jme);
+ jme->reg_gpreg1 |= GPREG1_RSSPATCH;
+ break;
+ case PHY_LINK_SPEED_1000M:
+ jme_set_phyfifo_8level(jme);
+ break;
+ default:
+ break;
+ }
+ }
+ jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
+
+ strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
+ "Full-Duplex, " :
+ "Half-Duplex, ");
+ strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ?
+ "MDI-X" :
+ "MDI");
+ netif_info(jme, link, jme->dev, "Link is up at %s\n", linkmsg);
+ netif_carrier_on(netdev);
+ } else {
+ if (testonly)
+ goto out;
+
+ netif_info(jme, link, jme->dev, "Link is down\n");
+ jme->phylink = 0;
+ netif_carrier_off(netdev);
+ }
+
+out:
+ return rc;
+}
+
+static int
+jme_setup_tx_resources(struct jme_adapter *jme)
+{
+ struct jme_ring *txring = &(jme->txring[0]);
+
+ txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
+ TX_RING_ALLOC_SIZE(jme->tx_ring_size),
+ &(txring->dmaalloc),
+ GFP_ATOMIC);
+
+ if (!txring->alloc)
+ goto err_set_null;
+
+ /*
+ * 16 Bytes align
+ */
+ txring->desc = (void *)ALIGN((unsigned long)(txring->alloc),
+ RING_DESC_ALIGN);
+ txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
+ txring->next_to_use = 0;
+ atomic_set(&txring->next_to_clean, 0);
+ atomic_set(&txring->nr_free, jme->tx_ring_size);
+
+ txring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
+ jme->tx_ring_size, GFP_ATOMIC);
+ if (unlikely(!(txring->bufinf)))
+ goto err_free_txring;
+
+ /*
+ * Initialize Transmit Descriptors
+ */
+ memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
+ memset(txring->bufinf, 0,
+ sizeof(struct jme_buffer_info) * jme->tx_ring_size);
+
+ return 0;
+
+err_free_txring:
+ dma_free_coherent(&(jme->pdev->dev),
+ TX_RING_ALLOC_SIZE(jme->tx_ring_size),
+ txring->alloc,
+ txring->dmaalloc);
+
+err_set_null:
+ txring->desc = NULL;
+ txring->dmaalloc = 0;
+ txring->dma = 0;
+ txring->bufinf = NULL;
+
+ return -ENOMEM;
+}
+
+static void
+jme_free_tx_resources(struct jme_adapter *jme)
+{
+ int i;
+ struct jme_ring *txring = &(jme->txring[0]);
+ struct jme_buffer_info *txbi;
+
+ if (txring->alloc) {
+ if (txring->bufinf) {
+ for (i = 0 ; i < jme->tx_ring_size ; ++i) {
+ txbi = txring->bufinf + i;
+ if (txbi->skb) {
+ dev_kfree_skb(txbi->skb);
+ txbi->skb = NULL;
+ }
+ txbi->mapping = 0;
+ txbi->len = 0;
+ txbi->nr_desc = 0;
+ txbi->start_xmit = 0;
+ }
+ kfree(txring->bufinf);
+ }
+
+ dma_free_coherent(&(jme->pdev->dev),
+ TX_RING_ALLOC_SIZE(jme->tx_ring_size),
+ txring->alloc,
+ txring->dmaalloc);
+
+ txring->alloc = NULL;
+ txring->desc = NULL;
+ txring->dmaalloc = 0;
+ txring->dma = 0;
+ txring->bufinf = NULL;
+ }
+ txring->next_to_use = 0;
+ atomic_set(&txring->next_to_clean, 0);
+ atomic_set(&txring->nr_free, 0);
+}
+
+static inline void
+jme_enable_tx_engine(struct jme_adapter *jme)
+{
+ /*
+ * Select Queue 0
+ */
+ jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
+ wmb();
+
+ /*
+ * Setup TX Queue 0 DMA Bass Address
+ */
+ jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
+ jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
+ jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
+
+ /*
+ * Setup TX Descptor Count
+ */
+ jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
+
+ /*
+ * Enable TX Engine
+ */
+ wmb();
+ jwrite32f(jme, JME_TXCS, jme->reg_txcs |
+ TXCS_SELECT_QUEUE0 |
+ TXCS_ENABLE);
+
+ /*
+ * Start clock for TX MAC Processor
+ */
+ jme_mac_txclk_on(jme);
+}
+
+static inline void
+jme_restart_tx_engine(struct jme_adapter *jme)
+{
+ /*
+ * Restart TX Engine
+ */
+ jwrite32(jme, JME_TXCS, jme->reg_txcs |
+ TXCS_SELECT_QUEUE0 |
+ TXCS_ENABLE);
+}
+
+static inline void
+jme_disable_tx_engine(struct jme_adapter *jme)
+{
+ int i;
+ u32 val;
+
+ /*
+ * Disable TX Engine
+ */
+ jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
+ wmb();
+
+ val = jread32(jme, JME_TXCS);
+ for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) {
+ mdelay(1);
+ val = jread32(jme, JME_TXCS);
+ rmb();
+ }
+
+ if (!i)
+ pr_err("Disable TX engine timeout\n");
+
+ /*
+ * Stop clock for TX MAC Processor
+ */
+ jme_mac_txclk_off(jme);
+}
+
+static void
+jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
+{
+ struct jme_ring *rxring = &(jme->rxring[0]);
+ register struct rxdesc *rxdesc = rxring->desc;
+ struct jme_buffer_info *rxbi = rxring->bufinf;
+ rxdesc += i;
+ rxbi += i;
+
+ rxdesc->dw[0] = 0;
+ rxdesc->dw[1] = 0;
+ rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32);
+ rxdesc->desc1.bufaddrl = cpu_to_le32(
+ (__u64)rxbi->mapping & 0xFFFFFFFFUL);
+ rxdesc->desc1.datalen = cpu_to_le16(rxbi->len);
+ if (jme->dev->features & NETIF_F_HIGHDMA)
+ rxdesc->desc1.flags = RXFLAG_64BIT;
+ wmb();
+ rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT;
+}
+
+static int
+jme_make_new_rx_buf(struct jme_adapter *jme, int i)
+{
+ struct jme_ring *rxring = &(jme->rxring[0]);
+ struct jme_buffer_info *rxbi = rxring->bufinf + i;
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+
+ skb = netdev_alloc_skb(jme->dev,
+ jme->dev->mtu + RX_EXTRA_LEN);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ mapping = pci_map_page(jme->pdev, virt_to_page(skb->data),
+ offset_in_page(skb->data), skb_tailroom(skb),
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ if (likely(rxbi->mapping))
+ pci_unmap_page(jme->pdev, rxbi->mapping,
+ rxbi->len, PCI_DMA_FROMDEVICE);
+
+ rxbi->skb = skb;
+ rxbi->len = skb_tailroom(skb);
+ rxbi->mapping = mapping;
+ return 0;
+}
+
+static void
+jme_free_rx_buf(struct jme_adapter *jme, int i)
+{
+ struct jme_ring *rxring = &(jme->rxring[0]);
+ struct jme_buffer_info *rxbi = rxring->bufinf;
+ rxbi += i;
+
+ if (rxbi->skb) {
+ pci_unmap_page(jme->pdev,
+ rxbi->mapping,
+ rxbi->len,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(rxbi->skb);
+ rxbi->skb = NULL;
+ rxbi->mapping = 0;
+ rxbi->len = 0;
+ }
+}
+
+static void
+jme_free_rx_resources(struct jme_adapter *jme)
+{
+ int i;
+ struct jme_ring *rxring = &(jme->rxring[0]);
+
+ if (rxring->alloc) {
+ if (rxring->bufinf) {
+ for (i = 0 ; i < jme->rx_ring_size ; ++i)
+ jme_free_rx_buf(jme, i);
+ kfree(rxring->bufinf);
+ }
+
+ dma_free_coherent(&(jme->pdev->dev),
+ RX_RING_ALLOC_SIZE(jme->rx_ring_size),
+ rxring->alloc,
+ rxring->dmaalloc);
+ rxring->alloc = NULL;
+ rxring->desc = NULL;
+ rxring->dmaalloc = 0;
+ rxring->dma = 0;
+ rxring->bufinf = NULL;
+ }
+ rxring->next_to_use = 0;
+ atomic_set(&rxring->next_to_clean, 0);
+}
+
+static int
+jme_setup_rx_resources(struct jme_adapter *jme)
+{
+ int i;
+ struct jme_ring *rxring = &(jme->rxring[0]);
+
+ rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
+ RX_RING_ALLOC_SIZE(jme->rx_ring_size),
+ &(rxring->dmaalloc),
+ GFP_ATOMIC);
+ if (!rxring->alloc)
+ goto err_set_null;
+
+ /*
+ * 16 Bytes align
+ */
+ rxring->desc = (void *)ALIGN((unsigned long)(rxring->alloc),
+ RING_DESC_ALIGN);
+ rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
+ rxring->next_to_use = 0;
+ atomic_set(&rxring->next_to_clean, 0);
+
+ rxring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
+ jme->rx_ring_size, GFP_ATOMIC);
+ if (unlikely(!(rxring->bufinf)))
+ goto err_free_rxring;
+
+ /*
+ * Initiallize Receive Descriptors
+ */
+ memset(rxring->bufinf, 0,
+ sizeof(struct jme_buffer_info) * jme->rx_ring_size);
+ for (i = 0 ; i < jme->rx_ring_size ; ++i) {
+ if (unlikely(jme_make_new_rx_buf(jme, i))) {
+ jme_free_rx_resources(jme);
+ return -ENOMEM;
+ }
+
+ jme_set_clean_rxdesc(jme, i);
+ }
+
+ return 0;
+
+err_free_rxring:
+ dma_free_coherent(&(jme->pdev->dev),
+ RX_RING_ALLOC_SIZE(jme->rx_ring_size),
+ rxring->alloc,
+ rxring->dmaalloc);
+err_set_null:
+ rxring->desc = NULL;
+ rxring->dmaalloc = 0;
+ rxring->dma = 0;
+ rxring->bufinf = NULL;
+
+ return -ENOMEM;
+}
+
+static inline void
+jme_enable_rx_engine(struct jme_adapter *jme)
+{
+ /*
+ * Select Queue 0
+ */
+ jwrite32(jme, JME_RXCS, jme->reg_rxcs |
+ RXCS_QUEUESEL_Q0);
+ wmb();
+
+ /*
+ * Setup RX DMA Bass Address
+ */
+ jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
+ jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
+ jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
+
+ /*
+ * Setup RX Descriptor Count
+ */
+ jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
+
+ /*
+ * Setup Unicast Filter
+ */
+ jme_set_unicastaddr(jme->dev);
+ jme_set_multi(jme->dev);
+
+ /*
+ * Enable RX Engine
+ */
+ wmb();
+ jwrite32f(jme, JME_RXCS, jme->reg_rxcs |
+ RXCS_QUEUESEL_Q0 |
+ RXCS_ENABLE |
+ RXCS_QST);
+
+ /*
+ * Start clock for RX MAC Processor
+ */
+ jme_mac_rxclk_on(jme);
+}
+
+static inline void
+jme_restart_rx_engine(struct jme_adapter *jme)
+{
+ /*
+ * Start RX Engine
+ */
+ jwrite32(jme, JME_RXCS, jme->reg_rxcs |
+ RXCS_QUEUESEL_Q0 |
+ RXCS_ENABLE |
+ RXCS_QST);
+}
+
+static inline void
+jme_disable_rx_engine(struct jme_adapter *jme)
+{
+ int i;
+ u32 val;
+
+ /*
+ * Disable RX Engine
+ */
+ jwrite32(jme, JME_RXCS, jme->reg_rxcs);
+ wmb();
+
+ val = jread32(jme, JME_RXCS);
+ for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) {
+ mdelay(1);
+ val = jread32(jme, JME_RXCS);
+ rmb();
+ }
+
+ if (!i)
+ pr_err("Disable RX engine timeout\n");
+
+ /*
+ * Stop clock for RX MAC Processor
+ */
+ jme_mac_rxclk_off(jme);
+}
+
+static u16
+jme_udpsum(struct sk_buff *skb)
+{
+ u16 csum = 0xFFFFu;
+
+ if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
+ return csum;
+ if (skb->protocol != htons(ETH_P_IP))
+ return csum;
+ skb_set_network_header(skb, ETH_HLEN);
+ if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
+ (skb->len < (ETH_HLEN +
+ (ip_hdr(skb)->ihl << 2) +
+ sizeof(struct udphdr)))) {
+ skb_reset_network_header(skb);
+ return csum;
+ }
+ skb_set_transport_header(skb,
+ ETH_HLEN + (ip_hdr(skb)->ihl << 2));
+ csum = udp_hdr(skb)->check;
+ skb_reset_transport_header(skb);
+ skb_reset_network_header(skb);
+
+ return csum;
+}
+
+static int
+jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb)
+{
+ if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
+ return false;
+
+ if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS))
+ == RXWBFLAG_TCPON)) {
+ if (flags & RXWBFLAG_IPV4)
+ netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n");
+ return false;
+ }
+
+ if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
+ == RXWBFLAG_UDPON) && jme_udpsum(skb)) {
+ if (flags & RXWBFLAG_IPV4)
+ netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
+ return false;
+ }
+
+ if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS))
+ == RXWBFLAG_IPV4)) {
+ netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void
+jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
+{
+ struct jme_ring *rxring = &(jme->rxring[0]);
+ struct rxdesc *rxdesc = rxring->desc;
+ struct jme_buffer_info *rxbi = rxring->bufinf;
+ struct sk_buff *skb;
+ int framesize;
+
+ rxdesc += idx;
+ rxbi += idx;
+
+ skb = rxbi->skb;
+ pci_dma_sync_single_for_cpu(jme->pdev,
+ rxbi->mapping,
+ rxbi->len,
+ PCI_DMA_FROMDEVICE);
+
+ if (unlikely(jme_make_new_rx_buf(jme, idx))) {
+ pci_dma_sync_single_for_device(jme->pdev,
+ rxbi->mapping,
+ rxbi->len,
+ PCI_DMA_FROMDEVICE);
+
+ ++(NET_STAT(jme).rx_dropped);
+ } else {
+ framesize = le16_to_cpu(rxdesc->descwb.framesize)
+ - RX_PREPAD_SIZE;
+
+ skb_reserve(skb, RX_PREPAD_SIZE);
+ skb_put(skb, framesize);
+ skb->protocol = eth_type_trans(skb, jme->dev);
+
+ if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+
+ if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
+ u16 vid = le16_to_cpu(rxdesc->descwb.vlan);
+
+ __vlan_hwaccel_put_tag(skb, vid);
+ NET_STAT(jme).rx_bytes += 4;
+ }
+ jme->jme_rx(skb);
+
+ if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) ==
+ cpu_to_le16(RXWBFLAG_DEST_MUL))
+ ++(NET_STAT(jme).multicast);
+
+ NET_STAT(jme).rx_bytes += framesize;
+ ++(NET_STAT(jme).rx_packets);
+ }
+
+ jme_set_clean_rxdesc(jme, idx);
+
+}
+
+static int
+jme_process_receive(struct jme_adapter *jme, int limit)
+{
+ struct jme_ring *rxring = &(jme->rxring[0]);
+ struct rxdesc *rxdesc = rxring->desc;
+ int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
+
+ if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
+ goto out_inc;
+
+ if (unlikely(atomic_read(&jme->link_changing) != 1))
+ goto out_inc;
+
+ if (unlikely(!netif_carrier_ok(jme->dev)))
+ goto out_inc;
+
+ i = atomic_read(&rxring->next_to_clean);
+ while (limit > 0) {
+ rxdesc = rxring->desc;
+ rxdesc += i;
+
+ if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) ||
+ !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
+ goto out;
+ --limit;
+
+ rmb();
+ desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
+
+ if (unlikely(desccnt > 1 ||
+ rxdesc->descwb.errstat & RXWBERR_ALLERR)) {
+
+ if (rxdesc->descwb.errstat & RXWBERR_CRCERR)
+ ++(NET_STAT(jme).rx_crc_errors);
+ else if (rxdesc->descwb.errstat & RXWBERR_OVERUN)
+ ++(NET_STAT(jme).rx_fifo_errors);
+ else
+ ++(NET_STAT(jme).rx_errors);
+
+ if (desccnt > 1)
+ limit -= desccnt - 1;
+
+ for (j = i, ccnt = desccnt ; ccnt-- ; ) {
+ jme_set_clean_rxdesc(jme, j);
+ j = (j + 1) & (mask);
+ }
+
+ } else {
+ jme_alloc_and_feed_skb(jme, i);
+ }
+
+ i = (i + desccnt) & (mask);
+ }
+
+out:
+ atomic_set(&rxring->next_to_clean, i);
+
+out_inc:
+ atomic_inc(&jme->rx_cleaning);
+
+ return limit > 0 ? limit : 0;
+
+}
+
+static void
+jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
+{
+ if (likely(atmp == dpi->cur)) {
+ dpi->cnt = 0;
+ return;
+ }
+
+ if (dpi->attempt == atmp) {
+ ++(dpi->cnt);
+ } else {
+ dpi->attempt = atmp;
+ dpi->cnt = 0;
+ }
+
+}
+
+static void
+jme_dynamic_pcc(struct jme_adapter *jme)
+{
+ register struct dynpcc_info *dpi = &(jme->dpi);
+
+ if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
+ jme_attempt_pcc(dpi, PCC_P3);
+ else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD ||
+ dpi->intr_cnt > PCC_INTR_THRESHOLD)
+ jme_attempt_pcc(dpi, PCC_P2);
+ else
+ jme_attempt_pcc(dpi, PCC_P1);
+
+ if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
+ if (dpi->attempt < dpi->cur)
+ tasklet_schedule(&jme->rxclean_task);
+ jme_set_rx_pcc(jme, dpi->attempt);
+ dpi->cur = dpi->attempt;
+ dpi->cnt = 0;
+ }
+}
+
+static void
+jme_start_pcc_timer(struct jme_adapter *jme)
+{
+ struct dynpcc_info *dpi = &(jme->dpi);
+ dpi->last_bytes = NET_STAT(jme).rx_bytes;
+ dpi->last_pkts = NET_STAT(jme).rx_packets;
+ dpi->intr_cnt = 0;
+ jwrite32(jme, JME_TMCSR,
+ TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
+}
+
+static inline void
+jme_stop_pcc_timer(struct jme_adapter *jme)
+{
+ jwrite32(jme, JME_TMCSR, 0);
+}
+
+static void
+jme_shutdown_nic(struct jme_adapter *jme)
+{
+ u32 phylink;
+
+ phylink = jme_linkstat_from_phy(jme);
+
+ if (!(phylink & PHY_LINK_UP)) {
+ /*
+ * Disable all interrupt before issue timer
+ */
+ jme_stop_irq(jme);
+ jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE);
+ }
+}
+
+static void
+jme_pcc_tasklet(unsigned long arg)
+{
+ struct jme_adapter *jme = (struct jme_adapter *)arg;
+ struct net_device *netdev = jme->dev;
+
+ if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
+ jme_shutdown_nic(jme);
+ return;
+ }
+
+ if (unlikely(!netif_carrier_ok(netdev) ||
+ (atomic_read(&jme->link_changing) != 1)
+ )) {
+ jme_stop_pcc_timer(jme);
+ return;
+ }
+
+ if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
+ jme_dynamic_pcc(jme);
+
+ jme_start_pcc_timer(jme);
+}
+
+static inline void
+jme_polling_mode(struct jme_adapter *jme)
+{
+ jme_set_rx_pcc(jme, PCC_OFF);
+}
+
+static inline void
+jme_interrupt_mode(struct jme_adapter *jme)
+{
+ jme_set_rx_pcc(jme, PCC_P1);
+}
+
+static inline int
+jme_pseudo_hotplug_enabled(struct jme_adapter *jme)
+{
+ u32 apmc;
+ apmc = jread32(jme, JME_APMC);
+ return apmc & JME_APMC_PSEUDO_HP_EN;
+}
+
+static void
+jme_start_shutdown_timer(struct jme_adapter *jme)
+{
+ u32 apmc;
+
+ apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN;
+ apmc &= ~JME_APMC_EPIEN_CTRL;
+ if (!no_extplug) {
+ jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN);
+ wmb();
+ }
+ jwrite32f(jme, JME_APMC, apmc);
+
+ jwrite32f(jme, JME_TIMER2, 0);
+ set_bit(JME_FLAG_SHUTDOWN, &jme->flags);
+ jwrite32(jme, JME_TMCSR,
+ TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT));
+}
+
+static void
+jme_stop_shutdown_timer(struct jme_adapter *jme)
+{
+ u32 apmc;
+
+ jwrite32f(jme, JME_TMCSR, 0);
+ jwrite32f(jme, JME_TIMER2, 0);
+ clear_bit(JME_FLAG_SHUTDOWN, &jme->flags);
+
+ apmc = jread32(jme, JME_APMC);
+ apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL);
+ jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS);
+ wmb();
+ jwrite32f(jme, JME_APMC, apmc);
+}
+
+static void
+jme_link_change_tasklet(unsigned long arg)
+{
+ struct jme_adapter *jme = (struct jme_adapter *)arg;
+ struct net_device *netdev = jme->dev;
+ int rc;
+
+ while (!atomic_dec_and_test(&jme->link_changing)) {
+ atomic_inc(&jme->link_changing);
+ netif_info(jme, intr, jme->dev, "Get link change lock failed\n");
+ while (atomic_read(&jme->link_changing) != 1)
+ netif_info(jme, intr, jme->dev, "Waiting link change lock\n");
+ }
+
+ if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
+ goto out;
+
+ jme->old_mtu = netdev->mtu;
+ netif_stop_queue(netdev);
+ if (jme_pseudo_hotplug_enabled(jme))
+ jme_stop_shutdown_timer(jme);
+
+ jme_stop_pcc_timer(jme);
+ tasklet_disable(&jme->txclean_task);
+ tasklet_disable(&jme->rxclean_task);
+ tasklet_disable(&jme->rxempty_task);
+
+ if (netif_carrier_ok(netdev)) {
+ jme_disable_rx_engine(jme);
+ jme_disable_tx_engine(jme);
+ jme_reset_mac_processor(jme);
+ jme_free_rx_resources(jme);
+ jme_free_tx_resources(jme);
+
+ if (test_bit(JME_FLAG_POLL, &jme->flags))
+ jme_polling_mode(jme);
+
+ netif_carrier_off(netdev);
+ }
+
+ jme_check_link(netdev, 0);
+ if (netif_carrier_ok(netdev)) {
+ rc = jme_setup_rx_resources(jme);
+ if (rc) {
+ pr_err("Allocating resources for RX error, Device STOPPED!\n");
+ goto out_enable_tasklet;
+ }
+
+ rc = jme_setup_tx_resources(jme);
+ if (rc) {
+ pr_err("Allocating resources for TX error, Device STOPPED!\n");
+ goto err_out_free_rx_resources;
+ }
+
+ jme_enable_rx_engine(jme);
+ jme_enable_tx_engine(jme);
+
+ netif_start_queue(netdev);
+
+ if (test_bit(JME_FLAG_POLL, &jme->flags))
+ jme_interrupt_mode(jme);
+
+ jme_start_pcc_timer(jme);
+ } else if (jme_pseudo_hotplug_enabled(jme)) {
+ jme_start_shutdown_timer(jme);
+ }
+
+ goto out_enable_tasklet;
+
+err_out_free_rx_resources:
+ jme_free_rx_resources(jme);
+out_enable_tasklet:
+ tasklet_enable(&jme->txclean_task);
+ tasklet_hi_enable(&jme->rxclean_task);
+ tasklet_hi_enable(&jme->rxempty_task);
+out:
+ atomic_inc(&jme->link_changing);
+}
+
+static void
+jme_rx_clean_tasklet(unsigned long arg)
+{
+ struct jme_adapter *jme = (struct jme_adapter *)arg;
+ struct dynpcc_info *dpi = &(jme->dpi);
+
+ jme_process_receive(jme, jme->rx_ring_size);
+ ++(dpi->intr_cnt);
+
+}
+
+static int
+jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
+{
+ struct jme_adapter *jme = jme_napi_priv(holder);
+ int rest;
+
+ rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget));
+
+ while (atomic_read(&jme->rx_empty) > 0) {
+ atomic_dec(&jme->rx_empty);
+ ++(NET_STAT(jme).rx_dropped);
+ jme_restart_rx_engine(jme);
+ }
+ atomic_inc(&jme->rx_empty);
+
+ if (rest) {
+ JME_RX_COMPLETE(netdev, holder);
+ jme_interrupt_mode(jme);
+ }
+
+ JME_NAPI_WEIGHT_SET(budget, rest);
+ return JME_NAPI_WEIGHT_VAL(budget) - rest;
+}
+
+static void
+jme_rx_empty_tasklet(unsigned long arg)
+{
+ struct jme_adapter *jme = (struct jme_adapter *)arg;
+
+ if (unlikely(atomic_read(&jme->link_changing) != 1))
+ return;
+
+ if (unlikely(!netif_carrier_ok(jme->dev)))
+ return;
+
+ netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
+
+ jme_rx_clean_tasklet(arg);
+
+ while (atomic_read(&jme->rx_empty) > 0) {
+ atomic_dec(&jme->rx_empty);
+ ++(NET_STAT(jme).rx_dropped);
+ jme_restart_rx_engine(jme);
+ }
+ atomic_inc(&jme->rx_empty);
+}
+
+static void
+jme_wake_queue_if_stopped(struct jme_adapter *jme)
+{
+ struct jme_ring *txring = &(jme->txring[0]);
+
+ smp_wmb();
+ if (unlikely(netif_queue_stopped(jme->dev) &&
+ atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
+ netif_info(jme, tx_done, jme->dev, "TX Queue Waked\n");
+ netif_wake_queue(jme->dev);
+ }
+
+}
+
+static void
+jme_tx_clean_tasklet(unsigned long arg)
+{
+ struct jme_adapter *jme = (struct jme_adapter *)arg;
+ struct jme_ring *txring = &(jme->txring[0]);
+ struct txdesc *txdesc = txring->desc;
+ struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
+ int i, j, cnt = 0, max, err, mask;
+
+ tx_dbg(jme, "Into txclean\n");
+
+ if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
+ goto out;
+
+ if (unlikely(atomic_read(&jme->link_changing) != 1))
+ goto out;
+
+ if (unlikely(!netif_carrier_ok(jme->dev)))
+ goto out;
+
+ max = jme->tx_ring_size - atomic_read(&txring->nr_free);
+ mask = jme->tx_ring_mask;
+
+ for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) {
+
+ ctxbi = txbi + i;
+
+ if (likely(ctxbi->skb &&
+ !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
+
+ tx_dbg(jme, "txclean: %d+%d@%lu\n",
+ i, ctxbi->nr_desc, jiffies);
+
+ err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
+
+ for (j = 1 ; j < ctxbi->nr_desc ; ++j) {
+ ttxbi = txbi + ((i + j) & (mask));
+ txdesc[(i + j) & (mask)].dw[0] = 0;
+
+ pci_unmap_page(jme->pdev,
+ ttxbi->mapping,
+ ttxbi->len,
+ PCI_DMA_TODEVICE);
+
+ ttxbi->mapping = 0;
+ ttxbi->len = 0;
+ }
+
+ dev_kfree_skb(ctxbi->skb);
+
+ cnt += ctxbi->nr_desc;
+
+ if (unlikely(err)) {
+ ++(NET_STAT(jme).tx_carrier_errors);
+ } else {
+ ++(NET_STAT(jme).tx_packets);
+ NET_STAT(jme).tx_bytes += ctxbi->len;
+ }
+
+ ctxbi->skb = NULL;
+ ctxbi->len = 0;
+ ctxbi->start_xmit = 0;
+
+ } else {
+ break;
+ }
+
+ i = (i + ctxbi->nr_desc) & mask;
+
+ ctxbi->nr_desc = 0;
+ }
+
+ tx_dbg(jme, "txclean: done %d@%lu\n", i, jiffies);
+ atomic_set(&txring->next_to_clean, i);
+ atomic_add(cnt, &txring->nr_free);
+
+ jme_wake_queue_if_stopped(jme);
+
+out:
+ atomic_inc(&jme->tx_cleaning);
+}
+
+static void
+jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
+{
+ /*
+ * Disable interrupt
+ */
+ jwrite32f(jme, JME_IENC, INTR_ENABLE);
+
+ if (intrstat & (INTR_LINKCH | INTR_SWINTR)) {
+ /*
+ * Link change event is critical
+ * all other events are ignored
+ */
+ jwrite32(jme, JME_IEVE, intrstat);
+ tasklet_schedule(&jme->linkch_task);
+ goto out_reenable;
+ }
+
+ if (intrstat & INTR_TMINTR) {
+ jwrite32(jme, JME_IEVE, INTR_TMINTR);
+ tasklet_schedule(&jme->pcc_task);
+ }
+
+ if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
+ jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
+ tasklet_schedule(&jme->txclean_task);
+ }
+
+ if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
+ jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO |
+ INTR_PCCRX0 |
+ INTR_RX0EMP)) |
+ INTR_RX0);
+ }
+
+ if (test_bit(JME_FLAG_POLL, &jme->flags)) {
+ if (intrstat & INTR_RX0EMP)
+ atomic_inc(&jme->rx_empty);
+
+ if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
+ if (likely(JME_RX_SCHEDULE_PREP(jme))) {
+ jme_polling_mode(jme);
+ JME_RX_SCHEDULE(jme);
+ }
+ }
+ } else {
+ if (intrstat & INTR_RX0EMP) {
+ atomic_inc(&jme->rx_empty);
+ tasklet_hi_schedule(&jme->rxempty_task);
+ } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) {
+ tasklet_hi_schedule(&jme->rxclean_task);
+ }
+ }
+
+out_reenable:
+ /*
+ * Re-enable interrupt
+ */
+ jwrite32f(jme, JME_IENS, INTR_ENABLE);
+}
+
+static irqreturn_t
+jme_intr(int irq, void *dev_id)
+{
+ struct net_device *netdev = dev_id;
+ struct jme_adapter *jme = netdev_priv(netdev);
+ u32 intrstat;
+
+ intrstat = jread32(jme, JME_IEVE);
+
+ /*
+ * Check if it's really an interrupt for us
+ */
+ if (unlikely((intrstat & INTR_ENABLE) == 0))
+ return IRQ_NONE;
+
+ /*
+ * Check if the device still exist
+ */
+ if (unlikely(intrstat == ~((typeof(intrstat))0)))
+ return IRQ_NONE;
+
+ jme_intr_msi(jme, intrstat);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+jme_msi(int irq, void *dev_id)
+{
+ struct net_device *netdev = dev_id;
+ struct jme_adapter *jme = netdev_priv(netdev);
+ u32 intrstat;
+
+ intrstat = jread32(jme, JME_IEVE);
+
+ jme_intr_msi(jme, intrstat);
+
+ return IRQ_HANDLED;
+}
+
+static void
+jme_reset_link(struct jme_adapter *jme)
+{
+ jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
+}
+
+static void
+jme_restart_an(struct jme_adapter *jme)
+{
+ u32 bmcr;
+
+ spin_lock_bh(&jme->phy_lock);
+ bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+ spin_unlock_bh(&jme->phy_lock);
+}
+
+static int
+jme_request_irq(struct jme_adapter *jme)
+{
+ int rc;
+ struct net_device *netdev = jme->dev;
+ irq_handler_t handler = jme_intr;
+ int irq_flags = IRQF_SHARED;
+
+ if (!pci_enable_msi(jme->pdev)) {
+ set_bit(JME_FLAG_MSI, &jme->flags);
+ handler = jme_msi;
+ irq_flags = 0;
+ }
+
+ rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
+ netdev);
+ if (rc) {
+ netdev_err(netdev,
+ "Unable to request %s interrupt (return: %d)\n",
+ test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx",
+ rc);
+
+ if (test_bit(JME_FLAG_MSI, &jme->flags)) {
+ pci_disable_msi(jme->pdev);
+ clear_bit(JME_FLAG_MSI, &jme->flags);
+ }
+ } else {
+ netdev->irq = jme->pdev->irq;
+ }
+
+ return rc;
+}
+
+static void
+jme_free_irq(struct jme_adapter *jme)
+{
+ free_irq(jme->pdev->irq, jme->dev);
+ if (test_bit(JME_FLAG_MSI, &jme->flags)) {
+ pci_disable_msi(jme->pdev);
+ clear_bit(JME_FLAG_MSI, &jme->flags);
+ jme->dev->irq = jme->pdev->irq;
+ }
+}
+
+static inline void
+jme_new_phy_on(struct jme_adapter *jme)
+{
+ u32 reg;
+
+ reg = jread32(jme, JME_PHY_PWR);
+ reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
+ PHY_PWR_DWN2 | PHY_PWR_CLKSEL);
+ jwrite32(jme, JME_PHY_PWR, reg);
+
+ pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
+ reg &= ~PE1_GPREG0_PBG;
+ reg |= PE1_GPREG0_ENBG;
+ pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
+}
+
+static inline void
+jme_new_phy_off(struct jme_adapter *jme)
+{
+ u32 reg;
+
+ reg = jread32(jme, JME_PHY_PWR);
+ reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
+ PHY_PWR_DWN2 | PHY_PWR_CLKSEL;
+ jwrite32(jme, JME_PHY_PWR, reg);
+
+ pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
+ reg &= ~PE1_GPREG0_PBG;
+ reg |= PE1_GPREG0_PDD3COLD;
+ pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
+}
+
+static inline void
+jme_phy_on(struct jme_adapter *jme)
+{
+ u32 bmcr;
+
+ bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
+ bmcr &= ~BMCR_PDOWN;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+
+ if (new_phy_power_ctrl(jme->chip_main_rev))
+ jme_new_phy_on(jme);
+}
+
+static inline void
+jme_phy_off(struct jme_adapter *jme)
+{
+ u32 bmcr;
+
+ bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
+ bmcr |= BMCR_PDOWN;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+
+ if (new_phy_power_ctrl(jme->chip_main_rev))
+ jme_new_phy_off(jme);
+}
+
+static int
+jme_open(struct net_device *netdev)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ int rc;
+
+ jme_clear_pm(jme);
+ JME_NAPI_ENABLE(jme);
+
+ tasklet_enable(&jme->linkch_task);
+ tasklet_enable(&jme->txclean_task);
+ tasklet_hi_enable(&jme->rxclean_task);
+ tasklet_hi_enable(&jme->rxempty_task);
+
+ rc = jme_request_irq(jme);
+ if (rc)
+ goto err_out;
+
+ jme_start_irq(jme);
+
+ jme_phy_on(jme);
+ if (test_bit(JME_FLAG_SSET, &jme->flags))
+ jme_set_settings(netdev, &jme->old_ecmd);
+ else
+ jme_reset_phy_processor(jme);
+
+ jme_reset_link(jme);
+
+ return 0;
+
+err_out:
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+ return rc;
+}
+
+static void
+jme_set_100m_half(struct jme_adapter *jme)
+{
+ u32 bmcr, tmp;
+
+ jme_phy_on(jme);
+ bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
+ tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
+ BMCR_SPEED1000 | BMCR_FULLDPLX);
+ tmp |= BMCR_SPEED100;
+
+ if (bmcr != tmp)
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
+
+ if (jme->fpgaver)
+ jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL);
+ else
+ jwrite32(jme, JME_GHC, GHC_SPEED_100M);
+}
+
+#define JME_WAIT_LINK_TIME 2000 /* 2000ms */
+static void
+jme_wait_link(struct jme_adapter *jme)
+{
+ u32 phylink, to = JME_WAIT_LINK_TIME;
+
+ mdelay(1000);
+ phylink = jme_linkstat_from_phy(jme);
+ while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) {
+ mdelay(10);
+ phylink = jme_linkstat_from_phy(jme);
+ }
+}
+
+static void
+jme_powersave_phy(struct jme_adapter *jme)
+{
+ if (jme->reg_pmcs) {
+ jme_set_100m_half(jme);
+ if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
+ jme_wait_link(jme);
+ jme_clear_pm(jme);
+ } else {
+ jme_phy_off(jme);
+ }
+}
+
+static int
+jme_close(struct net_device *netdev)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+
+ jme_stop_irq(jme);
+ jme_free_irq(jme);
+
+ JME_NAPI_DISABLE(jme);
+
+ tasklet_disable(&jme->linkch_task);
+ tasklet_disable(&jme->txclean_task);
+ tasklet_disable(&jme->rxclean_task);
+ tasklet_disable(&jme->rxempty_task);
+
+ jme_disable_rx_engine(jme);
+ jme_disable_tx_engine(jme);
+ jme_reset_mac_processor(jme);
+ jme_free_rx_resources(jme);
+ jme_free_tx_resources(jme);
+ jme->phylink = 0;
+ jme_phy_off(jme);
+
+ return 0;
+}
+
+static int
+jme_alloc_txdesc(struct jme_adapter *jme,
+ struct sk_buff *skb)
+{
+ struct jme_ring *txring = &(jme->txring[0]);
+ int idx, nr_alloc, mask = jme->tx_ring_mask;
+
+ idx = txring->next_to_use;
+ nr_alloc = skb_shinfo(skb)->nr_frags + 2;
+
+ if (unlikely(atomic_read(&txring->nr_free) < nr_alloc))
+ return -1;
+
+ atomic_sub(nr_alloc, &txring->nr_free);
+
+ txring->next_to_use = (txring->next_to_use + nr_alloc) & mask;
+
+ return idx;
+}
+
+static void
+jme_fill_tx_map(struct pci_dev *pdev,
+ struct txdesc *txdesc,
+ struct jme_buffer_info *txbi,
+ struct page *page,
+ u32 page_offset,
+ u32 len,
+ u8 hidma)
+{
+ dma_addr_t dmaaddr;
+
+ dmaaddr = pci_map_page(pdev,
+ page,
+ page_offset,
+ len,
+ PCI_DMA_TODEVICE);
+
+ pci_dma_sync_single_for_device(pdev,
+ dmaaddr,
+ len,
+ PCI_DMA_TODEVICE);
+
+ txdesc->dw[0] = 0;
+ txdesc->dw[1] = 0;
+ txdesc->desc2.flags = TXFLAG_OWN;
+ txdesc->desc2.flags |= (hidma) ? TXFLAG_64BIT : 0;
+ txdesc->desc2.datalen = cpu_to_le16(len);
+ txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32);
+ txdesc->desc2.bufaddrl = cpu_to_le32(
+ (__u64)dmaaddr & 0xFFFFFFFFUL);
+
+ txbi->mapping = dmaaddr;
+ txbi->len = len;
+}
+
+static void
+jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
+{
+ struct jme_ring *txring = &(jme->txring[0]);
+ struct txdesc *txdesc = txring->desc, *ctxdesc;
+ struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
+ u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
+ int i, nr_frags = skb_shinfo(skb)->nr_frags;
+ int mask = jme->tx_ring_mask;
+ struct skb_frag_struct *frag;
+ u32 len;
+
+ for (i = 0 ; i < nr_frags ; ++i) {
+ frag = &skb_shinfo(skb)->frags[i];
+ ctxdesc = txdesc + ((idx + i + 2) & (mask));
+ ctxbi = txbi + ((idx + i + 2) & (mask));
+
+ jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
+ skb_frag_page(frag),
+ frag->page_offset, frag->size, hidma);
+ }
+
+ len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
+ ctxdesc = txdesc + ((idx + 1) & (mask));
+ ctxbi = txbi + ((idx + 1) & (mask));
+ jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
+ offset_in_page(skb->data), len, hidma);
+
+}
+
+static int
+jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
+{
+ if (unlikely(skb_shinfo(skb)->gso_size &&
+ skb_header_cloned(skb) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
+ dev_kfree_skb(skb);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
+{
+ *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT);
+ if (*mss) {
+ *flags |= TXFLAG_LSEN;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ } else {
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+ tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
+ &ip6h->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ }
+
+ return 0;
+ }
+
+ return 1;
+}
+
+static void
+jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
+{
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 ip_proto;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ ip_proto = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ ip_proto = 0;
+ break;
+ }
+
+ switch (ip_proto) {
+ case IPPROTO_TCP:
+ *flags |= TXFLAG_TCPCS;
+ break;
+ case IPPROTO_UDP:
+ *flags |= TXFLAG_UDPCS;
+ break;
+ default:
+ netif_err(jme, tx_err, jme->dev, "Error upper layer protocol\n");
+ break;
+ }
+ }
+}
+
+static inline void
+jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
+{
+ if (vlan_tx_tag_present(skb)) {
+ *flags |= TXFLAG_TAGON;
+ *vlan = cpu_to_le16(vlan_tx_tag_get(skb));
+ }
+}
+
+static int
+jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
+{
+ struct jme_ring *txring = &(jme->txring[0]);
+ struct txdesc *txdesc;
+ struct jme_buffer_info *txbi;
+ u8 flags;
+
+ txdesc = (struct txdesc *)txring->desc + idx;
+ txbi = txring->bufinf + idx;
+
+ txdesc->dw[0] = 0;
+ txdesc->dw[1] = 0;
+ txdesc->dw[2] = 0;
+ txdesc->dw[3] = 0;
+ txdesc->desc1.pktsize = cpu_to_le16(skb->len);
+ /*
+ * Set OWN bit at final.
+ * When kernel transmit faster than NIC.
+ * And NIC trying to send this descriptor before we tell
+ * it to start sending this TX queue.
+ * Other fields are already filled correctly.
+ */
+ wmb();
+ flags = TXFLAG_OWN | TXFLAG_INT;
+ /*
+ * Set checksum flags while not tso
+ */
+ if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
+ jme_tx_csum(jme, skb, &flags);
+ jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
+ jme_map_tx_skb(jme, skb, idx);
+ txdesc->desc1.flags = flags;
+ /*
+ * Set tx buffer info after telling NIC to send
+ * For better tx_clean timing
+ */
+ wmb();
+ txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
+ txbi->skb = skb;
+ txbi->len = skb->len;
+ txbi->start_xmit = jiffies;
+ if (!txbi->start_xmit)
+ txbi->start_xmit = (0UL-1);
+
+ return 0;
+}
+
+static void
+jme_stop_queue_if_full(struct jme_adapter *jme)
+{
+ struct jme_ring *txring = &(jme->txring[0]);
+ struct jme_buffer_info *txbi = txring->bufinf;
+ int idx = atomic_read(&txring->next_to_clean);
+
+ txbi += idx;
+
+ smp_wmb();
+ if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
+ netif_stop_queue(jme->dev);
+ netif_info(jme, tx_queued, jme->dev, "TX Queue Paused\n");
+ smp_wmb();
+ if (atomic_read(&txring->nr_free)
+ >= (jme->tx_wake_threshold)) {
+ netif_wake_queue(jme->dev);
+ netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked\n");
+ }
+ }
+
+ if (unlikely(txbi->start_xmit &&
+ (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
+ txbi->skb)) {
+ netif_stop_queue(jme->dev);
+ netif_info(jme, tx_queued, jme->dev,
+ "TX Queue Stopped %d@%lu\n", idx, jiffies);
+ }
+}
+
+/*
+ * This function is already protected by netif_tx_lock()
+ */
+
+static netdev_tx_t
+jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ int idx;
+
+ if (unlikely(jme_expand_header(jme, skb))) {
+ ++(NET_STAT(jme).tx_dropped);
+ return NETDEV_TX_OK;
+ }
+
+ idx = jme_alloc_txdesc(jme, skb);
+
+ if (unlikely(idx < 0)) {
+ netif_stop_queue(netdev);
+ netif_err(jme, tx_err, jme->dev,
+ "BUG! Tx ring full when queue awake!\n");
+
+ return NETDEV_TX_BUSY;
+ }
+
+ jme_fill_tx_desc(jme, skb, idx);
+
+ jwrite32(jme, JME_TXCS, jme->reg_txcs |
+ TXCS_SELECT_QUEUE0 |
+ TXCS_QUEUE0S |
+ TXCS_ENABLE);
+
+ tx_dbg(jme, "xmit: %d+%d@%lu\n",
+ idx, skb_shinfo(skb)->nr_frags + 2, jiffies);
+ jme_stop_queue_if_full(jme);
+
+ return NETDEV_TX_OK;
+}
+
+static void
+jme_set_unicastaddr(struct net_device *netdev)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ u32 val;
+
+ val = (netdev->dev_addr[3] & 0xff) << 24 |
+ (netdev->dev_addr[2] & 0xff) << 16 |
+ (netdev->dev_addr[1] & 0xff) << 8 |
+ (netdev->dev_addr[0] & 0xff);
+ jwrite32(jme, JME_RXUMA_LO, val);
+ val = (netdev->dev_addr[5] & 0xff) << 8 |
+ (netdev->dev_addr[4] & 0xff);
+ jwrite32(jme, JME_RXUMA_HI, val);
+}
+
+static int
+jme_set_macaddr(struct net_device *netdev, void *p)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ spin_lock_bh(&jme->macaddr_lock);
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ jme_set_unicastaddr(netdev);
+ spin_unlock_bh(&jme->macaddr_lock);
+
+ return 0;
+}
+
+static void
+jme_set_multi(struct net_device *netdev)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ u32 mc_hash[2] = {};
+
+ spin_lock_bh(&jme->rxmcs_lock);
+
+ jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
+
+ if (netdev->flags & IFF_PROMISC) {
+ jme->reg_rxmcs |= RXMCS_ALLFRAME;
+ } else if (netdev->flags & IFF_ALLMULTI) {
+ jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
+ } else if (netdev->flags & IFF_MULTICAST) {
+ struct netdev_hw_addr *ha;
+ int bit_nr;
+
+ jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
+ netdev_for_each_mc_addr(ha, netdev) {
+ bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F;
+ mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
+ }
+
+ jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
+ jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
+ }
+
+ wmb();
+ jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
+
+ spin_unlock_bh(&jme->rxmcs_lock);
+}
+
+static int
+jme_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ if (new_mtu == jme->old_mtu)
+ return 0;
+
+ if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
+ ((new_mtu) < IPV6_MIN_MTU))
+ return -EINVAL;
+
+ if (new_mtu > 4000) {
+ jme->reg_rxcs &= ~RXCS_FIFOTHNP;
+ jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
+ jme_restart_rx_engine(jme);
+ } else {
+ jme->reg_rxcs &= ~RXCS_FIFOTHNP;
+ jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
+ jme_restart_rx_engine(jme);
+ }
+
+ netdev->mtu = new_mtu;
+ netdev_update_features(netdev);
+
+ jme_reset_link(jme);
+
+ return 0;
+}
+
+static void
+jme_tx_timeout(struct net_device *netdev)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ jme->phylink = 0;
+ jme_reset_phy_processor(jme);
+ if (test_bit(JME_FLAG_SSET, &jme->flags))
+ jme_set_settings(netdev, &jme->old_ecmd);
+
+ /*
+ * Force to Reset the link again
+ */
+ jme_reset_link(jme);
+}
+
+static inline void jme_pause_rx(struct jme_adapter *jme)
+{
+ atomic_dec(&jme->link_changing);
+
+ jme_set_rx_pcc(jme, PCC_OFF);
+ if (test_bit(JME_FLAG_POLL, &jme->flags)) {
+ JME_NAPI_DISABLE(jme);
+ } else {
+ tasklet_disable(&jme->rxclean_task);
+ tasklet_disable(&jme->rxempty_task);
+ }
+}
+
+static inline void jme_resume_rx(struct jme_adapter *jme)
+{
+ struct dynpcc_info *dpi = &(jme->dpi);
+
+ if (test_bit(JME_FLAG_POLL, &jme->flags)) {
+ JME_NAPI_ENABLE(jme);
+ } else {
+ tasklet_hi_enable(&jme->rxclean_task);
+ tasklet_hi_enable(&jme->rxempty_task);
+ }
+ dpi->cur = PCC_P1;
+ dpi->attempt = PCC_P1;
+ dpi->cnt = 0;
+ jme_set_rx_pcc(jme, PCC_P1);
+
+ atomic_inc(&jme->link_changing);
+}
+
+static void
+jme_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(jme->pdev));
+}
+
+static int
+jme_get_regs_len(struct net_device *netdev)
+{
+ return JME_REG_LEN;
+}
+
+static void
+mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len)
+{
+ int i;
+
+ for (i = 0 ; i < len ; i += 4)
+ p[i >> 2] = jread32(jme, reg + i);
+}
+
+static void
+mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr)
+{
+ int i;
+ u16 *p16 = (u16 *)p;
+
+ for (i = 0 ; i < reg_nr ; ++i)
+ p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i);
+}
+
+static void
+jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ u32 *p32 = (u32 *)p;
+
+ memset(p, 0xFF, JME_REG_LEN);
+
+ regs->version = 1;
+ mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
+
+ p32 += 0x100 >> 2;
+ mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
+
+ p32 += 0x100 >> 2;
+ mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
+
+ p32 += 0x100 >> 2;
+ mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
+
+ p32 += 0x100 >> 2;
+ mdio_memcpy(jme, p32, JME_PHY_REG_NR);
+}
+
+static int
+jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ ecmd->tx_coalesce_usecs = PCC_TX_TO;
+ ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
+
+ if (test_bit(JME_FLAG_POLL, &jme->flags)) {
+ ecmd->use_adaptive_rx_coalesce = false;
+ ecmd->rx_coalesce_usecs = 0;
+ ecmd->rx_max_coalesced_frames = 0;
+ return 0;
+ }
+
+ ecmd->use_adaptive_rx_coalesce = true;
+
+ switch (jme->dpi.cur) {
+ case PCC_P1:
+ ecmd->rx_coalesce_usecs = PCC_P1_TO;
+ ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
+ break;
+ case PCC_P2:
+ ecmd->rx_coalesce_usecs = PCC_P2_TO;
+ ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
+ break;
+ case PCC_P3:
+ ecmd->rx_coalesce_usecs = PCC_P3_TO;
+ ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int
+jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ struct dynpcc_info *dpi = &(jme->dpi);
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ if (ecmd->use_adaptive_rx_coalesce &&
+ test_bit(JME_FLAG_POLL, &jme->flags)) {
+ clear_bit(JME_FLAG_POLL, &jme->flags);
+ jme->jme_rx = netif_rx;
+ dpi->cur = PCC_P1;
+ dpi->attempt = PCC_P1;
+ dpi->cnt = 0;
+ jme_set_rx_pcc(jme, PCC_P1);
+ jme_interrupt_mode(jme);
+ } else if (!(ecmd->use_adaptive_rx_coalesce) &&
+ !(test_bit(JME_FLAG_POLL, &jme->flags))) {
+ set_bit(JME_FLAG_POLL, &jme->flags);
+ jme->jme_rx = netif_receive_skb;
+ jme_interrupt_mode(jme);
+ }
+
+ return 0;
+}
+
+static void
+jme_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *ecmd)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ u32 val;
+
+ ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
+ ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
+
+ spin_lock_bh(&jme->phy_lock);
+ val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
+ spin_unlock_bh(&jme->phy_lock);
+
+ ecmd->autoneg =
+ (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
+}
+
+static int
+jme_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *ecmd)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ u32 val;
+
+ if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^
+ (ecmd->tx_pause != 0)) {
+
+ if (ecmd->tx_pause)
+ jme->reg_txpfc |= TXPFC_PF_EN;
+ else
+ jme->reg_txpfc &= ~TXPFC_PF_EN;
+
+ jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
+ }
+
+ spin_lock_bh(&jme->rxmcs_lock);
+ if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^
+ (ecmd->rx_pause != 0)) {
+
+ if (ecmd->rx_pause)
+ jme->reg_rxmcs |= RXMCS_FLOWCTRL;
+ else
+ jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
+
+ jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
+ }
+ spin_unlock_bh(&jme->rxmcs_lock);
+
+ spin_lock_bh(&jme->phy_lock);
+ val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
+ if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^
+ (ecmd->autoneg != 0)) {
+
+ if (ecmd->autoneg)
+ val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+ else
+ val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id,
+ MII_ADVERTISE, val);
+ }
+ spin_unlock_bh(&jme->phy_lock);
+
+ return 0;
+}
+
+static void
+jme_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ wol->supported = WAKE_MAGIC | WAKE_PHY;
+
+ wol->wolopts = 0;
+
+ if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
+ wol->wolopts |= WAKE_PHY;
+
+ if (jme->reg_pmcs & PMCS_MFEN)
+ wol->wolopts |= WAKE_MAGIC;
+
+}
+
+static int
+jme_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ if (wol->wolopts & (WAKE_MAGICSECURE |
+ WAKE_UCAST |
+ WAKE_MCAST |
+ WAKE_BCAST |
+ WAKE_ARP))
+ return -EOPNOTSUPP;
+
+ jme->reg_pmcs = 0;
+
+ if (wol->wolopts & WAKE_PHY)
+ jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ jme->reg_pmcs |= PMCS_MFEN;
+
+ jwrite32(jme, JME_PMCS, jme->reg_pmcs);
+ device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
+
+ return 0;
+}
+
+static int
+jme_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ int rc;
+
+ spin_lock_bh(&jme->phy_lock);
+ rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
+ spin_unlock_bh(&jme->phy_lock);
+ return rc;
+}
+
+static int
+jme_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ int rc, fdc = 0;
+
+ if (ethtool_cmd_speed(ecmd) == SPEED_1000
+ && ecmd->autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+
+ /*
+ * Check If user changed duplex only while force_media.
+ * Hardware would not generate link change interrupt.
+ */
+ if (jme->mii_if.force_media &&
+ ecmd->autoneg != AUTONEG_ENABLE &&
+ (jme->mii_if.full_duplex != ecmd->duplex))
+ fdc = 1;
+
+ spin_lock_bh(&jme->phy_lock);
+ rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
+ spin_unlock_bh(&jme->phy_lock);
+
+ if (!rc) {
+ if (fdc)
+ jme_reset_link(jme);
+ jme->old_ecmd = *ecmd;
+ set_bit(JME_FLAG_SSET, &jme->flags);
+ }
+
+ return rc;
+}
+
+static int
+jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+ int rc;
+ struct jme_adapter *jme = netdev_priv(netdev);
+ struct mii_ioctl_data *mii_data = if_mii(rq);
+ unsigned int duplex_chg;
+
+ if (cmd == SIOCSMIIREG) {
+ u16 val = mii_data->val_in;
+ if (!(val & (BMCR_RESET|BMCR_ANENABLE)) &&
+ (val & BMCR_SPEED1000))
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&jme->phy_lock);
+ rc = generic_mii_ioctl(&jme->mii_if, mii_data, cmd, &duplex_chg);
+ spin_unlock_bh(&jme->phy_lock);
+
+ if (!rc && (cmd == SIOCSMIIREG)) {
+ if (duplex_chg)
+ jme_reset_link(jme);
+ jme_get_settings(netdev, &jme->old_ecmd);
+ set_bit(JME_FLAG_SSET, &jme->flags);
+ }
+
+ return rc;
+}
+
+static u32
+jme_get_link(struct net_device *netdev)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
+}
+
+static u32
+jme_get_msglevel(struct net_device *netdev)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ return jme->msg_enable;
+}
+
+static void
+jme_set_msglevel(struct net_device *netdev, u32 value)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ jme->msg_enable = value;
+}
+
+static u32
+jme_fix_features(struct net_device *netdev, u32 features)
+{
+ if (netdev->mtu > 1900)
+ features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM);
+ return features;
+}
+
+static int
+jme_set_features(struct net_device *netdev, u32 features)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ spin_lock_bh(&jme->rxmcs_lock);
+ if (features & NETIF_F_RXCSUM)
+ jme->reg_rxmcs |= RXMCS_CHECKSUM;
+ else
+ jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
+ jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
+ spin_unlock_bh(&jme->rxmcs_lock);
+
+ return 0;
+}
+
+static int
+jme_nway_reset(struct net_device *netdev)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ jme_restart_an(jme);
+ return 0;
+}
+
+static u8
+jme_smb_read(struct jme_adapter *jme, unsigned int addr)
+{
+ u32 val;
+ int to;
+
+ val = jread32(jme, JME_SMBCSR);
+ to = JME_SMB_BUSY_TIMEOUT;
+ while ((val & SMBCSR_BUSY) && --to) {
+ msleep(1);
+ val = jread32(jme, JME_SMBCSR);
+ }
+ if (!to) {
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
+ return 0xFF;
+ }
+
+ jwrite32(jme, JME_SMBINTF,
+ ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
+ SMBINTF_HWRWN_READ |
+ SMBINTF_HWCMD);
+
+ val = jread32(jme, JME_SMBINTF);
+ to = JME_SMB_BUSY_TIMEOUT;
+ while ((val & SMBINTF_HWCMD) && --to) {
+ msleep(1);
+ val = jread32(jme, JME_SMBINTF);
+ }
+ if (!to) {
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
+ return 0xFF;
+ }
+
+ return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT;
+}
+
+static void
+jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
+{
+ u32 val;
+ int to;
+
+ val = jread32(jme, JME_SMBCSR);
+ to = JME_SMB_BUSY_TIMEOUT;
+ while ((val & SMBCSR_BUSY) && --to) {
+ msleep(1);
+ val = jread32(jme, JME_SMBCSR);
+ }
+ if (!to) {
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
+ return;
+ }
+
+ jwrite32(jme, JME_SMBINTF,
+ ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) |
+ ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
+ SMBINTF_HWRWN_WRITE |
+ SMBINTF_HWCMD);
+
+ val = jread32(jme, JME_SMBINTF);
+ to = JME_SMB_BUSY_TIMEOUT;
+ while ((val & SMBINTF_HWCMD) && --to) {
+ msleep(1);
+ val = jread32(jme, JME_SMBINTF);
+ }
+ if (!to) {
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
+ return;
+ }
+
+ mdelay(2);
+}
+
+static int
+jme_get_eeprom_len(struct net_device *netdev)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ u32 val;
+ val = jread32(jme, JME_SMBCSR);
+ return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0;
+}
+
+static int
+jme_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ int i, offset = eeprom->offset, len = eeprom->len;
+
+ /*
+ * ethtool will check the boundary for us
+ */
+ eeprom->magic = JME_EEPROM_MAGIC;
+ for (i = 0 ; i < len ; ++i)
+ data[i] = jme_smb_read(jme, i + offset);
+
+ return 0;
+}
+
+static int
+jme_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ int i, offset = eeprom->offset, len = eeprom->len;
+
+ if (eeprom->magic != JME_EEPROM_MAGIC)
+ return -EINVAL;
+
+ /*
+ * ethtool will check the boundary for us
+ */
+ for (i = 0 ; i < len ; ++i)
+ jme_smb_write(jme, i + offset, data[i]);
+
+ return 0;
+}
+
+static const struct ethtool_ops jme_ethtool_ops = {
+ .get_drvinfo = jme_get_drvinfo,
+ .get_regs_len = jme_get_regs_len,
+ .get_regs = jme_get_regs,
+ .get_coalesce = jme_get_coalesce,
+ .set_coalesce = jme_set_coalesce,
+ .get_pauseparam = jme_get_pauseparam,
+ .set_pauseparam = jme_set_pauseparam,
+ .get_wol = jme_get_wol,
+ .set_wol = jme_set_wol,
+ .get_settings = jme_get_settings,
+ .set_settings = jme_set_settings,
+ .get_link = jme_get_link,
+ .get_msglevel = jme_get_msglevel,
+ .set_msglevel = jme_set_msglevel,
+ .nway_reset = jme_nway_reset,
+ .get_eeprom_len = jme_get_eeprom_len,
+ .get_eeprom = jme_get_eeprom,
+ .set_eeprom = jme_set_eeprom,
+};
+
+static int
+jme_pci_dma64(struct pci_dev *pdev)
+{
+ if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
+ if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+ return 1;
+
+ if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))
+ if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)))
+ return 1;
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
+ if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ return 0;
+
+ return -1;
+}
+
+static inline void
+jme_phy_init(struct jme_adapter *jme)
+{
+ u16 reg26;
+
+ reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26);
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
+}
+
+static inline void
+jme_check_hw_ver(struct jme_adapter *jme)
+{
+ u32 chipmode;
+
+ chipmode = jread32(jme, JME_CHIPMODE);
+
+ jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
+ jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
+ jme->chip_main_rev = jme->chiprev & 0xF;
+ jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF;
+}
+
+static const struct net_device_ops jme_netdev_ops = {
+ .ndo_open = jme_open,
+ .ndo_stop = jme_close,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = jme_ioctl,
+ .ndo_start_xmit = jme_start_xmit,
+ .ndo_set_mac_address = jme_set_macaddr,
+ .ndo_set_rx_mode = jme_set_multi,
+ .ndo_change_mtu = jme_change_mtu,
+ .ndo_tx_timeout = jme_tx_timeout,
+ .ndo_fix_features = jme_fix_features,
+ .ndo_set_features = jme_set_features,
+};
+
+static int __devinit
+jme_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rc = 0, using_dac, i;
+ struct net_device *netdev;
+ struct jme_adapter *jme;
+ u16 bmcr, bmsr;
+ u32 apmc;
+
+ /*
+ * set up PCI device basics
+ */
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ pr_err("Cannot enable PCI device\n");
+ goto err_out;
+ }
+
+ using_dac = jme_pci_dma64(pdev);
+ if (using_dac < 0) {
+ pr_err("Cannot set PCI DMA Mask\n");
+ rc = -EIO;
+ goto err_out_disable_pdev;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ pr_err("No PCI resource region found\n");
+ rc = -ENOMEM;
+ goto err_out_disable_pdev;
+ }
+
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc) {
+ pr_err("Cannot obtain PCI resource region\n");
+ goto err_out_disable_pdev;
+ }
+
+ pci_set_master(pdev);
+
+ /*
+ * alloc and init net device
+ */
+ netdev = alloc_etherdev(sizeof(*jme));
+ if (!netdev) {
+ pr_err("Cannot allocate netdev structure\n");
+ rc = -ENOMEM;
+ goto err_out_release_regions;
+ }
+ netdev->netdev_ops = &jme_netdev_ops;
+ netdev->ethtool_ops = &jme_ethtool_ops;
+ netdev->watchdog_timeo = TX_TIMEOUT;
+ netdev->hw_features = NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXCSUM;
+ netdev->features = NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX;
+ if (using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ pci_set_drvdata(pdev, netdev);
+
+ /*
+ * init adapter info
+ */
+ jme = netdev_priv(netdev);
+ jme->pdev = pdev;
+ jme->dev = netdev;
+ jme->jme_rx = netif_rx;
+ jme->old_mtu = netdev->mtu = 1500;
+ jme->phylink = 0;
+ jme->tx_ring_size = 1 << 10;
+ jme->tx_ring_mask = jme->tx_ring_size - 1;
+ jme->tx_wake_threshold = 1 << 9;
+ jme->rx_ring_size = 1 << 9;
+ jme->rx_ring_mask = jme->rx_ring_size - 1;
+ jme->msg_enable = JME_DEF_MSG_ENABLE;
+ jme->regs = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!(jme->regs)) {
+ pr_err("Mapping PCI resource region error\n");
+ rc = -ENOMEM;
+ goto err_out_free_netdev;
+ }
+
+ if (no_pseudohp) {
+ apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN;
+ jwrite32(jme, JME_APMC, apmc);
+ } else if (force_pseudohp) {
+ apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN;
+ jwrite32(jme, JME_APMC, apmc);
+ }
+
+ NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2)
+
+ spin_lock_init(&jme->phy_lock);
+ spin_lock_init(&jme->macaddr_lock);
+ spin_lock_init(&jme->rxmcs_lock);
+
+ atomic_set(&jme->link_changing, 1);
+ atomic_set(&jme->rx_cleaning, 1);
+ atomic_set(&jme->tx_cleaning, 1);
+ atomic_set(&jme->rx_empty, 1);
+
+ tasklet_init(&jme->pcc_task,
+ jme_pcc_tasklet,
+ (unsigned long) jme);
+ tasklet_init(&jme->linkch_task,
+ jme_link_change_tasklet,
+ (unsigned long) jme);
+ tasklet_init(&jme->txclean_task,
+ jme_tx_clean_tasklet,
+ (unsigned long) jme);
+ tasklet_init(&jme->rxclean_task,
+ jme_rx_clean_tasklet,
+ (unsigned long) jme);
+ tasklet_init(&jme->rxempty_task,
+ jme_rx_empty_tasklet,
+ (unsigned long) jme);
+ tasklet_disable_nosync(&jme->linkch_task);
+ tasklet_disable_nosync(&jme->txclean_task);
+ tasklet_disable_nosync(&jme->rxclean_task);
+ tasklet_disable_nosync(&jme->rxempty_task);
+ jme->dpi.cur = PCC_P1;
+
+ jme->reg_ghc = 0;
+ jme->reg_rxcs = RXCS_DEFAULT;
+ jme->reg_rxmcs = RXMCS_DEFAULT;
+ jme->reg_txpfc = 0;
+ jme->reg_pmcs = PMCS_MFEN;
+ jme->reg_gpreg1 = GPREG1_DEFAULT;
+
+ if (jme->reg_rxmcs & RXMCS_CHECKSUM)
+ netdev->features |= NETIF_F_RXCSUM;
+
+ /*
+ * Get Max Read Req Size from PCI Config Space
+ */
+ pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs);
+ jme->mrrs &= PCI_DCSR_MRRS_MASK;
+ switch (jme->mrrs) {
+ case MRRS_128B:
+ jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
+ break;
+ case MRRS_256B:
+ jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
+ break;
+ default:
+ jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
+ break;
+ }
+
+ /*
+ * Must check before reset_mac_processor
+ */
+ jme_check_hw_ver(jme);
+ jme->mii_if.dev = netdev;
+ if (jme->fpgaver) {
+ jme->mii_if.phy_id = 0;
+ for (i = 1 ; i < 32 ; ++i) {
+ bmcr = jme_mdio_read(netdev, i, MII_BMCR);
+ bmsr = jme_mdio_read(netdev, i, MII_BMSR);
+ if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) {
+ jme->mii_if.phy_id = i;
+ break;
+ }
+ }
+
+ if (!jme->mii_if.phy_id) {
+ rc = -EIO;
+ pr_err("Can not find phy_id\n");
+ goto err_out_unmap;
+ }
+
+ jme->reg_ghc |= GHC_LINK_POLL;
+ } else {
+ jme->mii_if.phy_id = 1;
+ }
+ if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
+ jme->mii_if.supports_gmii = true;
+ else
+ jme->mii_if.supports_gmii = false;
+ jme->mii_if.phy_id_mask = 0x1F;
+ jme->mii_if.reg_num_mask = 0x1F;
+ jme->mii_if.mdio_read = jme_mdio_read;
+ jme->mii_if.mdio_write = jme_mdio_write;
+
+ jme_clear_pm(jme);
+ pci_set_power_state(jme->pdev, PCI_D0);
+ device_set_wakeup_enable(&pdev->dev, true);
+
+ jme_set_phyfifo_5level(jme);
+ jme->pcirev = pdev->revision;
+ if (!jme->fpgaver)
+ jme_phy_init(jme);
+ jme_phy_off(jme);
+
+ /*
+ * Reset MAC processor and reload EEPROM for MAC Address
+ */
+ jme_reset_mac_processor(jme);
+ rc = jme_reload_eeprom(jme);
+ if (rc) {
+ pr_err("Reload eeprom for reading MAC Address error\n");
+ goto err_out_unmap;
+ }
+ jme_load_macaddr(netdev);
+
+ /*
+ * Tell stack that we are not ready to work until open()
+ */
+ netif_carrier_off(netdev);
+
+ rc = register_netdev(netdev);
+ if (rc) {
+ pr_err("Cannot register net device\n");
+ goto err_out_unmap;
+ }
+
+ netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n",
+ (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
+ "JMC250 Gigabit Ethernet" :
+ (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
+ "JMC260 Fast Ethernet" : "Unknown",
+ (jme->fpgaver != 0) ? " (FPGA)" : "",
+ (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
+ jme->pcirev, netdev->dev_addr);
+
+ return 0;
+
+err_out_unmap:
+ iounmap(jme->regs);
+err_out_free_netdev:
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+err_out_release_regions:
+ pci_release_regions(pdev);
+err_out_disable_pdev:
+ pci_disable_device(pdev);
+err_out:
+ return rc;
+}
+
+static void __devexit
+jme_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ unregister_netdev(netdev);
+ iounmap(jme->regs);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+}
+
+static void
+jme_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ jme_powersave_phy(jme);
+ pci_pme_active(pdev, true);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int
+jme_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ atomic_dec(&jme->link_changing);
+
+ netif_device_detach(netdev);
+ netif_stop_queue(netdev);
+ jme_stop_irq(jme);
+
+ tasklet_disable(&jme->txclean_task);
+ tasklet_disable(&jme->rxclean_task);
+ tasklet_disable(&jme->rxempty_task);
+
+ if (netif_carrier_ok(netdev)) {
+ if (test_bit(JME_FLAG_POLL, &jme->flags))
+ jme_polling_mode(jme);
+
+ jme_stop_pcc_timer(jme);
+ jme_disable_rx_engine(jme);
+ jme_disable_tx_engine(jme);
+ jme_reset_mac_processor(jme);
+ jme_free_rx_resources(jme);
+ jme_free_tx_resources(jme);
+ netif_carrier_off(netdev);
+ jme->phylink = 0;
+ }
+
+ tasklet_enable(&jme->txclean_task);
+ tasklet_hi_enable(&jme->rxclean_task);
+ tasklet_hi_enable(&jme->rxempty_task);
+
+ jme_powersave_phy(jme);
+
+ return 0;
+}
+
+static int
+jme_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ jme_clear_pm(jme);
+ jme_phy_on(jme);
+ if (test_bit(JME_FLAG_SSET, &jme->flags))
+ jme_set_settings(netdev, &jme->old_ecmd);
+ else
+ jme_reset_phy_processor(jme);
+
+ jme_start_irq(jme);
+ netif_device_attach(netdev);
+
+ atomic_inc(&jme->link_changing);
+
+ jme_reset_link(jme);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume);
+#define JME_PM_OPS (&jme_pm_ops)
+
+#else
+
+#define JME_PM_OPS NULL
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = {
+ { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) },
+ { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) },
+ { }
+};
+
+static struct pci_driver jme_driver = {
+ .name = DRV_NAME,
+ .id_table = jme_pci_tbl,
+ .probe = jme_init_one,
+ .remove = __devexit_p(jme_remove_one),
+ .shutdown = jme_shutdown,
+ .driver.pm = JME_PM_OPS,
+};
+
+static int __init
+jme_init_module(void)
+{
+ pr_info("JMicron JMC2XX ethernet driver version %s\n", DRV_VERSION);
+ return pci_register_driver(&jme_driver);
+}
+
+static void __exit
+jme_cleanup_module(void)
+{
+ pci_unregister_driver(&jme_driver);
+}
+
+module_init(jme_init_module);
+module_exit(jme_cleanup_module);
+
+MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
+MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
+
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
new file mode 100644
index 000000000000..02ea27c1dcb5
--- /dev/null
+++ b/drivers/net/ethernet/jme.h
@@ -0,0 +1,1260 @@
+/*
+ * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
+ *
+ * Copyright 2008 JMicron Technology Corporation
+ * http://www.jmicron.com/
+ * Copyright (c) 2009 - 2010 Guo-Fu Tseng <cooldavid@cooldavid.org>
+ *
+ * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __JME_H_INCLUDED__
+#define __JME_H_INCLUDED__
+#include <linux/interrupt.h>
+
+#define DRV_NAME "jme"
+#define DRV_VERSION "1.0.8"
+#define PFX DRV_NAME ": "
+
+#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250
+#define PCI_DEVICE_ID_JMICRON_JMC260 0x0260
+
+/*
+ * Message related definitions
+ */
+#define JME_DEF_MSG_ENABLE \
+ (NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR | \
+ NETIF_MSG_HW)
+
+#ifdef TX_DEBUG
+#define tx_dbg(priv, fmt, args...) \
+ printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ##args)
+#else
+#define tx_dbg(priv, fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ##args); \
+} while (0)
+#endif
+
+/*
+ * Extra PCI Configuration space interface
+ */
+#define PCI_DCSR_MRRS 0x59
+#define PCI_DCSR_MRRS_MASK 0x70
+
+enum pci_dcsr_mrrs_vals {
+ MRRS_128B = 0x00,
+ MRRS_256B = 0x10,
+ MRRS_512B = 0x20,
+ MRRS_1024B = 0x30,
+ MRRS_2048B = 0x40,
+ MRRS_4096B = 0x50,
+};
+
+#define PCI_SPI 0xB0
+
+enum pci_spi_bits {
+ SPI_EN = 0x10,
+ SPI_MISO = 0x08,
+ SPI_MOSI = 0x04,
+ SPI_SCLK = 0x02,
+ SPI_CS = 0x01,
+};
+
+struct jme_spi_op {
+ void __user *uwbuf;
+ void __user *urbuf;
+ __u8 wn; /* Number of write actions */
+ __u8 rn; /* Number of read actions */
+ __u8 bitn; /* Number of bits per action */
+ __u8 spd; /* The maxim acceptable speed of controller, in MHz.*/
+ __u8 mode; /* CPOL, CPHA, and Duplex mode of SPI */
+
+ /* Internal use only */
+ u8 *kwbuf;
+ u8 *krbuf;
+ u8 sr;
+ u16 halfclk; /* Half of clock cycle calculated from spd, in ns */
+};
+
+enum jme_spi_op_bits {
+ SPI_MODE_CPHA = 0x01,
+ SPI_MODE_CPOL = 0x02,
+ SPI_MODE_DUP = 0x80,
+};
+
+#define HALF_US 500 /* 500 ns */
+
+#define PCI_PRIV_PE1 0xE4
+
+enum pci_priv_pe1_bit_masks {
+ PE1_ASPMSUPRT = 0x00000003, /*
+ * RW:
+ * Aspm_support[1:0]
+ * (R/W Port of 5C[11:10])
+ */
+ PE1_MULTIFUN = 0x00000004, /* RW: Multi_fun_bit */
+ PE1_RDYDMA = 0x00000008, /* RO: ~link.rdy_for_dma */
+ PE1_ASPMOPTL = 0x00000030, /* RW: link.rx10s_option[1:0] */
+ PE1_ASPMOPTH = 0x000000C0, /* RW: 10_req=[3]?HW:[2] */
+ PE1_GPREG0 = 0x0000FF00, /*
+ * SRW:
+ * Cfg_gp_reg0
+ * [7:6] phy_giga BG control
+ * [5] CREQ_N as CREQ_N1 (CPPE# as CREQ#)
+ * [4:0] Reserved
+ */
+ PE1_GPREG0_PBG = 0x0000C000, /* phy_giga BG control */
+ PE1_GPREG1 = 0x00FF0000, /* RW: Cfg_gp_reg1 */
+ PE1_REVID = 0xFF000000, /* RO: Rev ID */
+};
+
+enum pci_priv_pe1_values {
+ PE1_GPREG0_ENBG = 0x00000000, /* en BG */
+ PE1_GPREG0_PDD3COLD = 0x00004000, /* giga_PD + d3cold */
+ PE1_GPREG0_PDPCIESD = 0x00008000, /* giga_PD + pcie_shutdown */
+ PE1_GPREG0_PDPCIEIDDQ = 0x0000C000, /* giga_PD + pcie_iddq */
+};
+
+/*
+ * Dynamic(adaptive)/Static PCC values
+ */
+enum dynamic_pcc_values {
+ PCC_OFF = 0,
+ PCC_P1 = 1,
+ PCC_P2 = 2,
+ PCC_P3 = 3,
+
+ PCC_OFF_TO = 0,
+ PCC_P1_TO = 1,
+ PCC_P2_TO = 64,
+ PCC_P3_TO = 128,
+
+ PCC_OFF_CNT = 0,
+ PCC_P1_CNT = 1,
+ PCC_P2_CNT = 16,
+ PCC_P3_CNT = 32,
+};
+struct dynpcc_info {
+ unsigned long last_bytes;
+ unsigned long last_pkts;
+ unsigned long intr_cnt;
+ unsigned char cur;
+ unsigned char attempt;
+ unsigned char cnt;
+};
+#define PCC_INTERVAL_US 100000
+#define PCC_INTERVAL (HZ / (1000000 / PCC_INTERVAL_US))
+#define PCC_P3_THRESHOLD (2 * 1024 * 1024)
+#define PCC_P2_THRESHOLD 800
+#define PCC_INTR_THRESHOLD 800
+#define PCC_TX_TO 1000
+#define PCC_TX_CNT 8
+
+/*
+ * TX/RX Descriptors
+ *
+ * TX/RX Ring DESC Count Must be multiple of 16 and <= 1024
+ */
+#define RING_DESC_ALIGN 16 /* Descriptor alignment */
+#define TX_DESC_SIZE 16
+#define TX_RING_NR 8
+#define TX_RING_ALLOC_SIZE(s) ((s * TX_DESC_SIZE) + RING_DESC_ALIGN)
+
+struct txdesc {
+ union {
+ __u8 all[16];
+ __le32 dw[4];
+ struct {
+ /* DW0 */
+ __le16 vlan;
+ __u8 rsv1;
+ __u8 flags;
+
+ /* DW1 */
+ __le16 datalen;
+ __le16 mss;
+
+ /* DW2 */
+ __le16 pktsize;
+ __le16 rsv2;
+
+ /* DW3 */
+ __le32 bufaddr;
+ } desc1;
+ struct {
+ /* DW0 */
+ __le16 rsv1;
+ __u8 rsv2;
+ __u8 flags;
+
+ /* DW1 */
+ __le16 datalen;
+ __le16 rsv3;
+
+ /* DW2 */
+ __le32 bufaddrh;
+
+ /* DW3 */
+ __le32 bufaddrl;
+ } desc2;
+ struct {
+ /* DW0 */
+ __u8 ehdrsz;
+ __u8 rsv1;
+ __u8 rsv2;
+ __u8 flags;
+
+ /* DW1 */
+ __le16 trycnt;
+ __le16 segcnt;
+
+ /* DW2 */
+ __le16 pktsz;
+ __le16 rsv3;
+
+ /* DW3 */
+ __le32 bufaddrl;
+ } descwb;
+ };
+};
+
+enum jme_txdesc_flags_bits {
+ TXFLAG_OWN = 0x80,
+ TXFLAG_INT = 0x40,
+ TXFLAG_64BIT = 0x20,
+ TXFLAG_TCPCS = 0x10,
+ TXFLAG_UDPCS = 0x08,
+ TXFLAG_IPCS = 0x04,
+ TXFLAG_LSEN = 0x02,
+ TXFLAG_TAGON = 0x01,
+};
+
+#define TXDESC_MSS_SHIFT 2
+enum jme_txwbdesc_flags_bits {
+ TXWBFLAG_OWN = 0x80,
+ TXWBFLAG_INT = 0x40,
+ TXWBFLAG_TMOUT = 0x20,
+ TXWBFLAG_TRYOUT = 0x10,
+ TXWBFLAG_COL = 0x08,
+
+ TXWBFLAG_ALLERR = TXWBFLAG_TMOUT |
+ TXWBFLAG_TRYOUT |
+ TXWBFLAG_COL,
+};
+
+#define RX_DESC_SIZE 16
+#define RX_RING_NR 4
+#define RX_RING_ALLOC_SIZE(s) ((s * RX_DESC_SIZE) + RING_DESC_ALIGN)
+#define RX_BUF_DMA_ALIGN 8
+#define RX_PREPAD_SIZE 10
+#define ETH_CRC_LEN 2
+#define RX_VLANHDR_LEN 2
+#define RX_EXTRA_LEN (RX_PREPAD_SIZE + \
+ ETH_HLEN + \
+ ETH_CRC_LEN + \
+ RX_VLANHDR_LEN + \
+ RX_BUF_DMA_ALIGN)
+
+struct rxdesc {
+ union {
+ __u8 all[16];
+ __le32 dw[4];
+ struct {
+ /* DW0 */
+ __le16 rsv2;
+ __u8 rsv1;
+ __u8 flags;
+
+ /* DW1 */
+ __le16 datalen;
+ __le16 wbcpl;
+
+ /* DW2 */
+ __le32 bufaddrh;
+
+ /* DW3 */
+ __le32 bufaddrl;
+ } desc1;
+ struct {
+ /* DW0 */
+ __le16 vlan;
+ __le16 flags;
+
+ /* DW1 */
+ __le16 framesize;
+ __u8 errstat;
+ __u8 desccnt;
+
+ /* DW2 */
+ __le32 rsshash;
+
+ /* DW3 */
+ __u8 hashfun;
+ __u8 hashtype;
+ __le16 resrv;
+ } descwb;
+ };
+};
+
+enum jme_rxdesc_flags_bits {
+ RXFLAG_OWN = 0x80,
+ RXFLAG_INT = 0x40,
+ RXFLAG_64BIT = 0x20,
+};
+
+enum jme_rxwbdesc_flags_bits {
+ RXWBFLAG_OWN = 0x8000,
+ RXWBFLAG_INT = 0x4000,
+ RXWBFLAG_MF = 0x2000,
+ RXWBFLAG_64BIT = 0x2000,
+ RXWBFLAG_TCPON = 0x1000,
+ RXWBFLAG_UDPON = 0x0800,
+ RXWBFLAG_IPCS = 0x0400,
+ RXWBFLAG_TCPCS = 0x0200,
+ RXWBFLAG_UDPCS = 0x0100,
+ RXWBFLAG_TAGON = 0x0080,
+ RXWBFLAG_IPV4 = 0x0040,
+ RXWBFLAG_IPV6 = 0x0020,
+ RXWBFLAG_PAUSE = 0x0010,
+ RXWBFLAG_MAGIC = 0x0008,
+ RXWBFLAG_WAKEUP = 0x0004,
+ RXWBFLAG_DEST = 0x0003,
+ RXWBFLAG_DEST_UNI = 0x0001,
+ RXWBFLAG_DEST_MUL = 0x0002,
+ RXWBFLAG_DEST_BRO = 0x0003,
+};
+
+enum jme_rxwbdesc_desccnt_mask {
+ RXWBDCNT_WBCPL = 0x80,
+ RXWBDCNT_DCNT = 0x7F,
+};
+
+enum jme_rxwbdesc_errstat_bits {
+ RXWBERR_LIMIT = 0x80,
+ RXWBERR_MIIER = 0x40,
+ RXWBERR_NIBON = 0x20,
+ RXWBERR_COLON = 0x10,
+ RXWBERR_ABORT = 0x08,
+ RXWBERR_SHORT = 0x04,
+ RXWBERR_OVERUN = 0x02,
+ RXWBERR_CRCERR = 0x01,
+ RXWBERR_ALLERR = 0xFF,
+};
+
+/*
+ * Buffer information corresponding to ring descriptors.
+ */
+struct jme_buffer_info {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ int len;
+ int nr_desc;
+ unsigned long start_xmit;
+};
+
+/*
+ * The structure holding buffer information and ring descriptors all together.
+ */
+struct jme_ring {
+ void *alloc; /* pointer to allocated memory */
+ void *desc; /* pointer to ring memory */
+ dma_addr_t dmaalloc; /* phys address of ring alloc */
+ dma_addr_t dma; /* phys address for ring dma */
+
+ /* Buffer information corresponding to each descriptor */
+ struct jme_buffer_info *bufinf;
+
+ int next_to_use;
+ atomic_t next_to_clean;
+ atomic_t nr_free;
+};
+
+#define NET_STAT(priv) (priv->dev->stats)
+#define NETDEV_GET_STATS(netdev, fun_ptr)
+#define DECLARE_NET_DEVICE_STATS
+
+#define DECLARE_NAPI_STRUCT struct napi_struct napi;
+#define NETIF_NAPI_SET(dev, napis, pollfn, q) \
+ netif_napi_add(dev, napis, pollfn, q);
+#define JME_NAPI_HOLDER(holder) struct napi_struct *holder
+#define JME_NAPI_WEIGHT(w) int w
+#define JME_NAPI_WEIGHT_VAL(w) w
+#define JME_NAPI_WEIGHT_SET(w, r)
+#define JME_RX_COMPLETE(dev, napis) napi_complete(napis)
+#define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi);
+#define JME_NAPI_DISABLE(priv) \
+ if (!napi_disable_pending(&priv->napi)) \
+ napi_disable(&priv->napi);
+#define JME_RX_SCHEDULE_PREP(priv) \
+ napi_schedule_prep(&priv->napi)
+#define JME_RX_SCHEDULE(priv) \
+ __napi_schedule(&priv->napi);
+
+/*
+ * Jmac Adapter Private data
+ */
+struct jme_adapter {
+ struct pci_dev *pdev;
+ struct net_device *dev;
+ void __iomem *regs;
+ struct mii_if_info mii_if;
+ struct jme_ring rxring[RX_RING_NR];
+ struct jme_ring txring[TX_RING_NR];
+ spinlock_t phy_lock;
+ spinlock_t macaddr_lock;
+ spinlock_t rxmcs_lock;
+ struct tasklet_struct rxempty_task;
+ struct tasklet_struct rxclean_task;
+ struct tasklet_struct txclean_task;
+ struct tasklet_struct linkch_task;
+ struct tasklet_struct pcc_task;
+ unsigned long flags;
+ u32 reg_txcs;
+ u32 reg_txpfc;
+ u32 reg_rxcs;
+ u32 reg_rxmcs;
+ u32 reg_ghc;
+ u32 reg_pmcs;
+ u32 reg_gpreg1;
+ u32 phylink;
+ u32 tx_ring_size;
+ u32 tx_ring_mask;
+ u32 tx_wake_threshold;
+ u32 rx_ring_size;
+ u32 rx_ring_mask;
+ u8 mrrs;
+ unsigned int fpgaver;
+ u8 chiprev;
+ u8 chip_main_rev;
+ u8 chip_sub_rev;
+ u8 pcirev;
+ u32 msg_enable;
+ struct ethtool_cmd old_ecmd;
+ unsigned int old_mtu;
+ struct dynpcc_info dpi;
+ atomic_t intr_sem;
+ atomic_t link_changing;
+ atomic_t tx_cleaning;
+ atomic_t rx_cleaning;
+ atomic_t rx_empty;
+ int (*jme_rx)(struct sk_buff *skb);
+ DECLARE_NAPI_STRUCT
+ DECLARE_NET_DEVICE_STATS
+};
+
+enum jme_flags_bits {
+ JME_FLAG_MSI = 1,
+ JME_FLAG_SSET = 2,
+ JME_FLAG_POLL = 5,
+ JME_FLAG_SHUTDOWN = 6,
+};
+
+#define TX_TIMEOUT (5 * HZ)
+#define JME_REG_LEN 0x500
+#define MAX_ETHERNET_JUMBO_PACKET_SIZE 9216
+
+static inline struct jme_adapter*
+jme_napi_priv(struct napi_struct *napi)
+{
+ struct jme_adapter *jme;
+ jme = container_of(napi, struct jme_adapter, napi);
+ return jme;
+}
+
+/*
+ * MMaped I/O Resters
+ */
+enum jme_iomap_offsets {
+ JME_MAC = 0x0000,
+ JME_PHY = 0x0400,
+ JME_MISC = 0x0800,
+ JME_RSS = 0x0C00,
+};
+
+enum jme_iomap_lens {
+ JME_MAC_LEN = 0x80,
+ JME_PHY_LEN = 0x58,
+ JME_MISC_LEN = 0x98,
+ JME_RSS_LEN = 0xFF,
+};
+
+enum jme_iomap_regs {
+ JME_TXCS = JME_MAC | 0x00, /* Transmit Control and Status */
+ JME_TXDBA_LO = JME_MAC | 0x04, /* Transmit Queue Desc Base Addr */
+ JME_TXDBA_HI = JME_MAC | 0x08, /* Transmit Queue Desc Base Addr */
+ JME_TXQDC = JME_MAC | 0x0C, /* Transmit Queue Desc Count */
+ JME_TXNDA = JME_MAC | 0x10, /* Transmit Queue Next Desc Addr */
+ JME_TXMCS = JME_MAC | 0x14, /* Transmit MAC Control Status */
+ JME_TXPFC = JME_MAC | 0x18, /* Transmit Pause Frame Control */
+ JME_TXTRHD = JME_MAC | 0x1C, /* Transmit Timer/Retry@Half-Dup */
+
+ JME_RXCS = JME_MAC | 0x20, /* Receive Control and Status */
+ JME_RXDBA_LO = JME_MAC | 0x24, /* Receive Queue Desc Base Addr */
+ JME_RXDBA_HI = JME_MAC | 0x28, /* Receive Queue Desc Base Addr */
+ JME_RXQDC = JME_MAC | 0x2C, /* Receive Queue Desc Count */
+ JME_RXNDA = JME_MAC | 0x30, /* Receive Queue Next Desc Addr */
+ JME_RXMCS = JME_MAC | 0x34, /* Receive MAC Control Status */
+ JME_RXUMA_LO = JME_MAC | 0x38, /* Receive Unicast MAC Address */
+ JME_RXUMA_HI = JME_MAC | 0x3C, /* Receive Unicast MAC Address */
+ JME_RXMCHT_LO = JME_MAC | 0x40, /* Recv Multicast Addr HashTable */
+ JME_RXMCHT_HI = JME_MAC | 0x44, /* Recv Multicast Addr HashTable */
+ JME_WFODP = JME_MAC | 0x48, /* Wakeup Frame Output Data Port */
+ JME_WFOI = JME_MAC | 0x4C, /* Wakeup Frame Output Interface */
+
+ JME_SMI = JME_MAC | 0x50, /* Station Management Interface */
+ JME_GHC = JME_MAC | 0x54, /* Global Host Control */
+ JME_PMCS = JME_MAC | 0x60, /* Power Management Control/Stat */
+
+
+ JME_PHY_PWR = JME_PHY | 0x24, /* New PHY Power Ctrl Register */
+ JME_PHY_CS = JME_PHY | 0x28, /* PHY Ctrl and Status Register */
+ JME_PHY_LINK = JME_PHY | 0x30, /* PHY Link Status Register */
+ JME_SMBCSR = JME_PHY | 0x40, /* SMB Control and Status */
+ JME_SMBINTF = JME_PHY | 0x44, /* SMB Interface */
+
+
+ JME_TMCSR = JME_MISC | 0x00, /* Timer Control/Status Register */
+ JME_GPREG0 = JME_MISC | 0x08, /* General purpose REG-0 */
+ JME_GPREG1 = JME_MISC | 0x0C, /* General purpose REG-1 */
+ JME_IEVE = JME_MISC | 0x20, /* Interrupt Event Status */
+ JME_IREQ = JME_MISC | 0x24, /* Intr Req Status(For Debug) */
+ JME_IENS = JME_MISC | 0x28, /* Intr Enable - Setting Port */
+ JME_IENC = JME_MISC | 0x2C, /* Interrupt Enable - Clear Port */
+ JME_PCCRX0 = JME_MISC | 0x30, /* PCC Control for RX Queue 0 */
+ JME_PCCTX = JME_MISC | 0x40, /* PCC Control for TX Queues */
+ JME_CHIPMODE = JME_MISC | 0x44, /* Identify FPGA Version */
+ JME_SHBA_HI = JME_MISC | 0x48, /* Shadow Register Base HI */
+ JME_SHBA_LO = JME_MISC | 0x4C, /* Shadow Register Base LO */
+ JME_TIMER1 = JME_MISC | 0x70, /* Timer1 */
+ JME_TIMER2 = JME_MISC | 0x74, /* Timer2 */
+ JME_APMC = JME_MISC | 0x7C, /* Aggressive Power Mode Control */
+ JME_PCCSRX0 = JME_MISC | 0x80, /* PCC Status of RX0 */
+};
+
+/*
+ * TX Control/Status Bits
+ */
+enum jme_txcs_bits {
+ TXCS_QUEUE7S = 0x00008000,
+ TXCS_QUEUE6S = 0x00004000,
+ TXCS_QUEUE5S = 0x00002000,
+ TXCS_QUEUE4S = 0x00001000,
+ TXCS_QUEUE3S = 0x00000800,
+ TXCS_QUEUE2S = 0x00000400,
+ TXCS_QUEUE1S = 0x00000200,
+ TXCS_QUEUE0S = 0x00000100,
+ TXCS_FIFOTH = 0x000000C0,
+ TXCS_DMASIZE = 0x00000030,
+ TXCS_BURST = 0x00000004,
+ TXCS_ENABLE = 0x00000001,
+};
+
+enum jme_txcs_value {
+ TXCS_FIFOTH_16QW = 0x000000C0,
+ TXCS_FIFOTH_12QW = 0x00000080,
+ TXCS_FIFOTH_8QW = 0x00000040,
+ TXCS_FIFOTH_4QW = 0x00000000,
+
+ TXCS_DMASIZE_64B = 0x00000000,
+ TXCS_DMASIZE_128B = 0x00000010,
+ TXCS_DMASIZE_256B = 0x00000020,
+ TXCS_DMASIZE_512B = 0x00000030,
+
+ TXCS_SELECT_QUEUE0 = 0x00000000,
+ TXCS_SELECT_QUEUE1 = 0x00010000,
+ TXCS_SELECT_QUEUE2 = 0x00020000,
+ TXCS_SELECT_QUEUE3 = 0x00030000,
+ TXCS_SELECT_QUEUE4 = 0x00040000,
+ TXCS_SELECT_QUEUE5 = 0x00050000,
+ TXCS_SELECT_QUEUE6 = 0x00060000,
+ TXCS_SELECT_QUEUE7 = 0x00070000,
+
+ TXCS_DEFAULT = TXCS_FIFOTH_4QW |
+ TXCS_BURST,
+};
+
+#define JME_TX_DISABLE_TIMEOUT 10 /* 10 msec */
+
+/*
+ * TX MAC Control/Status Bits
+ */
+enum jme_txmcs_bit_masks {
+ TXMCS_IFG2 = 0xC0000000,
+ TXMCS_IFG1 = 0x30000000,
+ TXMCS_TTHOLD = 0x00000300,
+ TXMCS_FBURST = 0x00000080,
+ TXMCS_CARRIEREXT = 0x00000040,
+ TXMCS_DEFER = 0x00000020,
+ TXMCS_BACKOFF = 0x00000010,
+ TXMCS_CARRIERSENSE = 0x00000008,
+ TXMCS_COLLISION = 0x00000004,
+ TXMCS_CRC = 0x00000002,
+ TXMCS_PADDING = 0x00000001,
+};
+
+enum jme_txmcs_values {
+ TXMCS_IFG2_6_4 = 0x00000000,
+ TXMCS_IFG2_8_5 = 0x40000000,
+ TXMCS_IFG2_10_6 = 0x80000000,
+ TXMCS_IFG2_12_7 = 0xC0000000,
+
+ TXMCS_IFG1_8_4 = 0x00000000,
+ TXMCS_IFG1_12_6 = 0x10000000,
+ TXMCS_IFG1_16_8 = 0x20000000,
+ TXMCS_IFG1_20_10 = 0x30000000,
+
+ TXMCS_TTHOLD_1_8 = 0x00000000,
+ TXMCS_TTHOLD_1_4 = 0x00000100,
+ TXMCS_TTHOLD_1_2 = 0x00000200,
+ TXMCS_TTHOLD_FULL = 0x00000300,
+
+ TXMCS_DEFAULT = TXMCS_IFG2_8_5 |
+ TXMCS_IFG1_16_8 |
+ TXMCS_TTHOLD_FULL |
+ TXMCS_DEFER |
+ TXMCS_CRC |
+ TXMCS_PADDING,
+};
+
+enum jme_txpfc_bits_masks {
+ TXPFC_VLAN_TAG = 0xFFFF0000,
+ TXPFC_VLAN_EN = 0x00008000,
+ TXPFC_PF_EN = 0x00000001,
+};
+
+enum jme_txtrhd_bits_masks {
+ TXTRHD_TXPEN = 0x80000000,
+ TXTRHD_TXP = 0x7FFFFF00,
+ TXTRHD_TXREN = 0x00000080,
+ TXTRHD_TXRL = 0x0000007F,
+};
+
+enum jme_txtrhd_shifts {
+ TXTRHD_TXP_SHIFT = 8,
+ TXTRHD_TXRL_SHIFT = 0,
+};
+
+enum jme_txtrhd_values {
+ TXTRHD_FULLDUPLEX = 0x00000000,
+ TXTRHD_HALFDUPLEX = TXTRHD_TXPEN |
+ ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
+ TXTRHD_TXREN |
+ ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL),
+};
+
+/*
+ * RX Control/Status Bits
+ */
+enum jme_rxcs_bit_masks {
+ /* FIFO full threshold for transmitting Tx Pause Packet */
+ RXCS_FIFOTHTP = 0x30000000,
+ /* FIFO threshold for processing next packet */
+ RXCS_FIFOTHNP = 0x0C000000,
+ RXCS_DMAREQSZ = 0x03000000, /* DMA Request Size */
+ RXCS_QUEUESEL = 0x00030000, /* Queue selection */
+ RXCS_RETRYGAP = 0x0000F000, /* RX Desc full retry gap */
+ RXCS_RETRYCNT = 0x00000F00, /* RX Desc full retry counter */
+ RXCS_WAKEUP = 0x00000040, /* Enable receive wakeup packet */
+ RXCS_MAGIC = 0x00000020, /* Enable receive magic packet */
+ RXCS_SHORT = 0x00000010, /* Enable receive short packet */
+ RXCS_ABORT = 0x00000008, /* Enable receive errorr packet */
+ RXCS_QST = 0x00000004, /* Receive queue start */
+ RXCS_SUSPEND = 0x00000002,
+ RXCS_ENABLE = 0x00000001,
+};
+
+enum jme_rxcs_values {
+ RXCS_FIFOTHTP_16T = 0x00000000,
+ RXCS_FIFOTHTP_32T = 0x10000000,
+ RXCS_FIFOTHTP_64T = 0x20000000,
+ RXCS_FIFOTHTP_128T = 0x30000000,
+
+ RXCS_FIFOTHNP_16QW = 0x00000000,
+ RXCS_FIFOTHNP_32QW = 0x04000000,
+ RXCS_FIFOTHNP_64QW = 0x08000000,
+ RXCS_FIFOTHNP_128QW = 0x0C000000,
+
+ RXCS_DMAREQSZ_16B = 0x00000000,
+ RXCS_DMAREQSZ_32B = 0x01000000,
+ RXCS_DMAREQSZ_64B = 0x02000000,
+ RXCS_DMAREQSZ_128B = 0x03000000,
+
+ RXCS_QUEUESEL_Q0 = 0x00000000,
+ RXCS_QUEUESEL_Q1 = 0x00010000,
+ RXCS_QUEUESEL_Q2 = 0x00020000,
+ RXCS_QUEUESEL_Q3 = 0x00030000,
+
+ RXCS_RETRYGAP_256ns = 0x00000000,
+ RXCS_RETRYGAP_512ns = 0x00001000,
+ RXCS_RETRYGAP_1024ns = 0x00002000,
+ RXCS_RETRYGAP_2048ns = 0x00003000,
+ RXCS_RETRYGAP_4096ns = 0x00004000,
+ RXCS_RETRYGAP_8192ns = 0x00005000,
+ RXCS_RETRYGAP_16384ns = 0x00006000,
+ RXCS_RETRYGAP_32768ns = 0x00007000,
+
+ RXCS_RETRYCNT_0 = 0x00000000,
+ RXCS_RETRYCNT_4 = 0x00000100,
+ RXCS_RETRYCNT_8 = 0x00000200,
+ RXCS_RETRYCNT_12 = 0x00000300,
+ RXCS_RETRYCNT_16 = 0x00000400,
+ RXCS_RETRYCNT_20 = 0x00000500,
+ RXCS_RETRYCNT_24 = 0x00000600,
+ RXCS_RETRYCNT_28 = 0x00000700,
+ RXCS_RETRYCNT_32 = 0x00000800,
+ RXCS_RETRYCNT_36 = 0x00000900,
+ RXCS_RETRYCNT_40 = 0x00000A00,
+ RXCS_RETRYCNT_44 = 0x00000B00,
+ RXCS_RETRYCNT_48 = 0x00000C00,
+ RXCS_RETRYCNT_52 = 0x00000D00,
+ RXCS_RETRYCNT_56 = 0x00000E00,
+ RXCS_RETRYCNT_60 = 0x00000F00,
+
+ RXCS_DEFAULT = RXCS_FIFOTHTP_128T |
+ RXCS_FIFOTHNP_128QW |
+ RXCS_DMAREQSZ_128B |
+ RXCS_RETRYGAP_256ns |
+ RXCS_RETRYCNT_32,
+};
+
+#define JME_RX_DISABLE_TIMEOUT 10 /* 10 msec */
+
+/*
+ * RX MAC Control/Status Bits
+ */
+enum jme_rxmcs_bits {
+ RXMCS_ALLFRAME = 0x00000800,
+ RXMCS_BRDFRAME = 0x00000400,
+ RXMCS_MULFRAME = 0x00000200,
+ RXMCS_UNIFRAME = 0x00000100,
+ RXMCS_ALLMULFRAME = 0x00000080,
+ RXMCS_MULFILTERED = 0x00000040,
+ RXMCS_RXCOLLDEC = 0x00000020,
+ RXMCS_FLOWCTRL = 0x00000008,
+ RXMCS_VTAGRM = 0x00000004,
+ RXMCS_PREPAD = 0x00000002,
+ RXMCS_CHECKSUM = 0x00000001,
+
+ RXMCS_DEFAULT = RXMCS_VTAGRM |
+ RXMCS_PREPAD |
+ RXMCS_FLOWCTRL |
+ RXMCS_CHECKSUM,
+};
+
+/*
+ * Wakeup Frame setup interface registers
+ */
+#define WAKEUP_FRAME_NR 8
+#define WAKEUP_FRAME_MASK_DWNR 4
+
+enum jme_wfoi_bit_masks {
+ WFOI_MASK_SEL = 0x00000070,
+ WFOI_CRC_SEL = 0x00000008,
+ WFOI_FRAME_SEL = 0x00000007,
+};
+
+enum jme_wfoi_shifts {
+ WFOI_MASK_SHIFT = 4,
+};
+
+/*
+ * SMI Related definitions
+ */
+enum jme_smi_bit_mask {
+ SMI_DATA_MASK = 0xFFFF0000,
+ SMI_REG_ADDR_MASK = 0x0000F800,
+ SMI_PHY_ADDR_MASK = 0x000007C0,
+ SMI_OP_WRITE = 0x00000020,
+ /* Set to 1, after req done it'll be cleared to 0 */
+ SMI_OP_REQ = 0x00000010,
+ SMI_OP_MDIO = 0x00000008, /* Software assess In/Out */
+ SMI_OP_MDOE = 0x00000004, /* Software Output Enable */
+ SMI_OP_MDC = 0x00000002, /* Software CLK Control */
+ SMI_OP_MDEN = 0x00000001, /* Software access Enable */
+};
+
+enum jme_smi_bit_shift {
+ SMI_DATA_SHIFT = 16,
+ SMI_REG_ADDR_SHIFT = 11,
+ SMI_PHY_ADDR_SHIFT = 6,
+};
+
+static inline u32 smi_reg_addr(int x)
+{
+ return (x << SMI_REG_ADDR_SHIFT) & SMI_REG_ADDR_MASK;
+}
+
+static inline u32 smi_phy_addr(int x)
+{
+ return (x << SMI_PHY_ADDR_SHIFT) & SMI_PHY_ADDR_MASK;
+}
+
+#define JME_PHY_TIMEOUT 100 /* 100 msec */
+#define JME_PHY_REG_NR 32
+
+/*
+ * Global Host Control
+ */
+enum jme_ghc_bit_mask {
+ GHC_SWRST = 0x40000000,
+ GHC_TO_CLK_SRC = 0x00C00000,
+ GHC_TXMAC_CLK_SRC = 0x00300000,
+ GHC_DPX = 0x00000040,
+ GHC_SPEED = 0x00000030,
+ GHC_LINK_POLL = 0x00000001,
+};
+
+enum jme_ghc_speed_val {
+ GHC_SPEED_10M = 0x00000010,
+ GHC_SPEED_100M = 0x00000020,
+ GHC_SPEED_1000M = 0x00000030,
+};
+
+enum jme_ghc_to_clk {
+ GHC_TO_CLK_OFF = 0x00000000,
+ GHC_TO_CLK_GPHY = 0x00400000,
+ GHC_TO_CLK_PCIE = 0x00800000,
+ GHC_TO_CLK_INVALID = 0x00C00000,
+};
+
+enum jme_ghc_txmac_clk {
+ GHC_TXMAC_CLK_OFF = 0x00000000,
+ GHC_TXMAC_CLK_GPHY = 0x00100000,
+ GHC_TXMAC_CLK_PCIE = 0x00200000,
+ GHC_TXMAC_CLK_INVALID = 0x00300000,
+};
+
+/*
+ * Power management control and status register
+ */
+enum jme_pmcs_bit_masks {
+ PMCS_STMASK = 0xFFFF0000,
+ PMCS_WF7DET = 0x80000000,
+ PMCS_WF6DET = 0x40000000,
+ PMCS_WF5DET = 0x20000000,
+ PMCS_WF4DET = 0x10000000,
+ PMCS_WF3DET = 0x08000000,
+ PMCS_WF2DET = 0x04000000,
+ PMCS_WF1DET = 0x02000000,
+ PMCS_WF0DET = 0x01000000,
+ PMCS_LFDET = 0x00040000,
+ PMCS_LRDET = 0x00020000,
+ PMCS_MFDET = 0x00010000,
+ PMCS_ENMASK = 0x0000FFFF,
+ PMCS_WF7EN = 0x00008000,
+ PMCS_WF6EN = 0x00004000,
+ PMCS_WF5EN = 0x00002000,
+ PMCS_WF4EN = 0x00001000,
+ PMCS_WF3EN = 0x00000800,
+ PMCS_WF2EN = 0x00000400,
+ PMCS_WF1EN = 0x00000200,
+ PMCS_WF0EN = 0x00000100,
+ PMCS_LFEN = 0x00000004,
+ PMCS_LREN = 0x00000002,
+ PMCS_MFEN = 0x00000001,
+};
+
+/*
+ * New PHY Power Control Register
+ */
+enum jme_phy_pwr_bit_masks {
+ PHY_PWR_DWN1SEL = 0x01000000, /* Phy_giga.p_PWR_DOWN1_SEL */
+ PHY_PWR_DWN1SW = 0x02000000, /* Phy_giga.p_PWR_DOWN1_SW */
+ PHY_PWR_DWN2 = 0x04000000, /* Phy_giga.p_PWR_DOWN2 */
+ PHY_PWR_CLKSEL = 0x08000000, /*
+ * XTL_OUT Clock select
+ * (an internal free-running clock)
+ * 0: xtl_out = phy_giga.A_XTL25_O
+ * 1: xtl_out = phy_giga.PD_OSC
+ */
+};
+
+/*
+ * Giga PHY Status Registers
+ */
+enum jme_phy_link_bit_mask {
+ PHY_LINK_SPEED_MASK = 0x0000C000,
+ PHY_LINK_DUPLEX = 0x00002000,
+ PHY_LINK_SPEEDDPU_RESOLVED = 0x00000800,
+ PHY_LINK_UP = 0x00000400,
+ PHY_LINK_AUTONEG_COMPLETE = 0x00000200,
+ PHY_LINK_MDI_STAT = 0x00000040,
+};
+
+enum jme_phy_link_speed_val {
+ PHY_LINK_SPEED_10M = 0x00000000,
+ PHY_LINK_SPEED_100M = 0x00004000,
+ PHY_LINK_SPEED_1000M = 0x00008000,
+};
+
+#define JME_SPDRSV_TIMEOUT 500 /* 500 us */
+
+/*
+ * SMB Control and Status
+ */
+enum jme_smbcsr_bit_mask {
+ SMBCSR_CNACK = 0x00020000,
+ SMBCSR_RELOAD = 0x00010000,
+ SMBCSR_EEPROMD = 0x00000020,
+ SMBCSR_INITDONE = 0x00000010,
+ SMBCSR_BUSY = 0x0000000F,
+};
+
+enum jme_smbintf_bit_mask {
+ SMBINTF_HWDATR = 0xFF000000,
+ SMBINTF_HWDATW = 0x00FF0000,
+ SMBINTF_HWADDR = 0x0000FF00,
+ SMBINTF_HWRWN = 0x00000020,
+ SMBINTF_HWCMD = 0x00000010,
+ SMBINTF_FASTM = 0x00000008,
+ SMBINTF_GPIOSCL = 0x00000004,
+ SMBINTF_GPIOSDA = 0x00000002,
+ SMBINTF_GPIOEN = 0x00000001,
+};
+
+enum jme_smbintf_vals {
+ SMBINTF_HWRWN_READ = 0x00000020,
+ SMBINTF_HWRWN_WRITE = 0x00000000,
+};
+
+enum jme_smbintf_shifts {
+ SMBINTF_HWDATR_SHIFT = 24,
+ SMBINTF_HWDATW_SHIFT = 16,
+ SMBINTF_HWADDR_SHIFT = 8,
+};
+
+#define JME_EEPROM_RELOAD_TIMEOUT 2000 /* 2000 msec */
+#define JME_SMB_BUSY_TIMEOUT 20 /* 20 msec */
+#define JME_SMB_LEN 256
+#define JME_EEPROM_MAGIC 0x250
+
+/*
+ * Timer Control/Status Register
+ */
+enum jme_tmcsr_bit_masks {
+ TMCSR_SWIT = 0x80000000,
+ TMCSR_EN = 0x01000000,
+ TMCSR_CNT = 0x00FFFFFF,
+};
+
+/*
+ * General Purpose REG-0
+ */
+enum jme_gpreg0_masks {
+ GPREG0_DISSH = 0xFF000000,
+ GPREG0_PCIRLMT = 0x00300000,
+ GPREG0_PCCNOMUTCLR = 0x00040000,
+ GPREG0_LNKINTPOLL = 0x00001000,
+ GPREG0_PCCTMR = 0x00000300,
+ GPREG0_PHYADDR = 0x0000001F,
+};
+
+enum jme_gpreg0_vals {
+ GPREG0_DISSH_DW7 = 0x80000000,
+ GPREG0_DISSH_DW6 = 0x40000000,
+ GPREG0_DISSH_DW5 = 0x20000000,
+ GPREG0_DISSH_DW4 = 0x10000000,
+ GPREG0_DISSH_DW3 = 0x08000000,
+ GPREG0_DISSH_DW2 = 0x04000000,
+ GPREG0_DISSH_DW1 = 0x02000000,
+ GPREG0_DISSH_DW0 = 0x01000000,
+ GPREG0_DISSH_ALL = 0xFF000000,
+
+ GPREG0_PCIRLMT_8 = 0x00000000,
+ GPREG0_PCIRLMT_6 = 0x00100000,
+ GPREG0_PCIRLMT_5 = 0x00200000,
+ GPREG0_PCIRLMT_4 = 0x00300000,
+
+ GPREG0_PCCTMR_16ns = 0x00000000,
+ GPREG0_PCCTMR_256ns = 0x00000100,
+ GPREG0_PCCTMR_1us = 0x00000200,
+ GPREG0_PCCTMR_1ms = 0x00000300,
+
+ GPREG0_PHYADDR_1 = 0x00000001,
+
+ GPREG0_DEFAULT = GPREG0_PCIRLMT_4 |
+ GPREG0_PCCTMR_1us |
+ GPREG0_PHYADDR_1,
+};
+
+/*
+ * General Purpose REG-1
+ */
+enum jme_gpreg1_bit_masks {
+ GPREG1_RXCLKOFF = 0x04000000,
+ GPREG1_PCREQN = 0x00020000,
+ GPREG1_HALFMODEPATCH = 0x00000040, /* For Chip revision 0x11 only */
+ GPREG1_RSSPATCH = 0x00000020, /* For Chip revision 0x11 only */
+ GPREG1_INTRDELAYUNIT = 0x00000018,
+ GPREG1_INTRDELAYENABLE = 0x00000007,
+};
+
+enum jme_gpreg1_vals {
+ GPREG1_INTDLYUNIT_16NS = 0x00000000,
+ GPREG1_INTDLYUNIT_256NS = 0x00000008,
+ GPREG1_INTDLYUNIT_1US = 0x00000010,
+ GPREG1_INTDLYUNIT_16US = 0x00000018,
+
+ GPREG1_INTDLYEN_1U = 0x00000001,
+ GPREG1_INTDLYEN_2U = 0x00000002,
+ GPREG1_INTDLYEN_3U = 0x00000003,
+ GPREG1_INTDLYEN_4U = 0x00000004,
+ GPREG1_INTDLYEN_5U = 0x00000005,
+ GPREG1_INTDLYEN_6U = 0x00000006,
+ GPREG1_INTDLYEN_7U = 0x00000007,
+
+ GPREG1_DEFAULT = GPREG1_PCREQN,
+};
+
+/*
+ * Interrupt Status Bits
+ */
+enum jme_interrupt_bits {
+ INTR_SWINTR = 0x80000000,
+ INTR_TMINTR = 0x40000000,
+ INTR_LINKCH = 0x20000000,
+ INTR_PAUSERCV = 0x10000000,
+ INTR_MAGICRCV = 0x08000000,
+ INTR_WAKERCV = 0x04000000,
+ INTR_PCCRX0TO = 0x02000000,
+ INTR_PCCRX1TO = 0x01000000,
+ INTR_PCCRX2TO = 0x00800000,
+ INTR_PCCRX3TO = 0x00400000,
+ INTR_PCCTXTO = 0x00200000,
+ INTR_PCCRX0 = 0x00100000,
+ INTR_PCCRX1 = 0x00080000,
+ INTR_PCCRX2 = 0x00040000,
+ INTR_PCCRX3 = 0x00020000,
+ INTR_PCCTX = 0x00010000,
+ INTR_RX3EMP = 0x00008000,
+ INTR_RX2EMP = 0x00004000,
+ INTR_RX1EMP = 0x00002000,
+ INTR_RX0EMP = 0x00001000,
+ INTR_RX3 = 0x00000800,
+ INTR_RX2 = 0x00000400,
+ INTR_RX1 = 0x00000200,
+ INTR_RX0 = 0x00000100,
+ INTR_TX7 = 0x00000080,
+ INTR_TX6 = 0x00000040,
+ INTR_TX5 = 0x00000020,
+ INTR_TX4 = 0x00000010,
+ INTR_TX3 = 0x00000008,
+ INTR_TX2 = 0x00000004,
+ INTR_TX1 = 0x00000002,
+ INTR_TX0 = 0x00000001,
+};
+
+static const u32 INTR_ENABLE = INTR_SWINTR |
+ INTR_TMINTR |
+ INTR_LINKCH |
+ INTR_PCCRX0TO |
+ INTR_PCCRX0 |
+ INTR_PCCTXTO |
+ INTR_PCCTX |
+ INTR_RX0EMP;
+
+/*
+ * PCC Control Registers
+ */
+enum jme_pccrx_masks {
+ PCCRXTO_MASK = 0xFFFF0000,
+ PCCRX_MASK = 0x0000FF00,
+};
+
+enum jme_pcctx_masks {
+ PCCTXTO_MASK = 0xFFFF0000,
+ PCCTX_MASK = 0x0000FF00,
+ PCCTX_QS_MASK = 0x000000FF,
+};
+
+enum jme_pccrx_shifts {
+ PCCRXTO_SHIFT = 16,
+ PCCRX_SHIFT = 8,
+};
+
+enum jme_pcctx_shifts {
+ PCCTXTO_SHIFT = 16,
+ PCCTX_SHIFT = 8,
+};
+
+enum jme_pcctx_bits {
+ PCCTXQ0_EN = 0x00000001,
+ PCCTXQ1_EN = 0x00000002,
+ PCCTXQ2_EN = 0x00000004,
+ PCCTXQ3_EN = 0x00000008,
+ PCCTXQ4_EN = 0x00000010,
+ PCCTXQ5_EN = 0x00000020,
+ PCCTXQ6_EN = 0x00000040,
+ PCCTXQ7_EN = 0x00000080,
+};
+
+/*
+ * Chip Mode Register
+ */
+enum jme_chipmode_bit_masks {
+ CM_FPGAVER_MASK = 0xFFFF0000,
+ CM_CHIPREV_MASK = 0x0000FF00,
+ CM_CHIPMODE_MASK = 0x0000000F,
+};
+
+enum jme_chipmode_shifts {
+ CM_FPGAVER_SHIFT = 16,
+ CM_CHIPREV_SHIFT = 8,
+};
+
+/*
+ * Aggressive Power Mode Control
+ */
+enum jme_apmc_bits {
+ JME_APMC_PCIE_SD_EN = 0x40000000,
+ JME_APMC_PSEUDO_HP_EN = 0x20000000,
+ JME_APMC_EPIEN = 0x04000000,
+ JME_APMC_EPIEN_CTRL = 0x03000000,
+};
+
+enum jme_apmc_values {
+ JME_APMC_EPIEN_CTRL_EN = 0x02000000,
+ JME_APMC_EPIEN_CTRL_DIS = 0x01000000,
+};
+
+#define APMC_PHP_SHUTDOWN_DELAY (10 * 1000 * 1000)
+
+#ifdef REG_DEBUG
+static char *MAC_REG_NAME[] = {
+ "JME_TXCS", "JME_TXDBA_LO", "JME_TXDBA_HI", "JME_TXQDC",
+ "JME_TXNDA", "JME_TXMCS", "JME_TXPFC", "JME_TXTRHD",
+ "JME_RXCS", "JME_RXDBA_LO", "JME_RXDBA_HI", "JME_RXQDC",
+ "JME_RXNDA", "JME_RXMCS", "JME_RXUMA_LO", "JME_RXUMA_HI",
+ "JME_RXMCHT_LO", "JME_RXMCHT_HI", "JME_WFODP", "JME_WFOI",
+ "JME_SMI", "JME_GHC", "UNKNOWN", "UNKNOWN",
+ "JME_PMCS"};
+
+static char *PE_REG_NAME[] = {
+ "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN",
+ "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN",
+ "UNKNOWN", "UNKNOWN", "JME_PHY_CS", "UNKNOWN",
+ "JME_PHY_LINK", "UNKNOWN", "UNKNOWN", "UNKNOWN",
+ "JME_SMBCSR", "JME_SMBINTF"};
+
+static char *MISC_REG_NAME[] = {
+ "JME_TMCSR", "JME_GPIO", "JME_GPREG0", "JME_GPREG1",
+ "JME_IEVE", "JME_IREQ", "JME_IENS", "JME_IENC",
+ "JME_PCCRX0", "JME_PCCRX1", "JME_PCCRX2", "JME_PCCRX3",
+ "JME_PCCTX0", "JME_CHIPMODE", "JME_SHBA_HI", "JME_SHBA_LO",
+ "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN",
+ "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN",
+ "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN",
+ "JME_TIMER1", "JME_TIMER2", "UNKNOWN", "JME_APMC",
+ "JME_PCCSRX0"};
+
+static inline void reg_dbg(const struct jme_adapter *jme,
+ const char *msg, u32 val, u32 reg)
+{
+ const char *regname;
+ switch (reg & 0xF00) {
+ case 0x000:
+ regname = MAC_REG_NAME[(reg & 0xFF) >> 2];
+ break;
+ case 0x400:
+ regname = PE_REG_NAME[(reg & 0xFF) >> 2];
+ break;
+ case 0x800:
+ regname = MISC_REG_NAME[(reg & 0xFF) >> 2];
+ break;
+ default:
+ regname = PE_REG_NAME[0];
+ }
+ printk(KERN_DEBUG "%s: %-20s %08x@%s\n", jme->dev->name,
+ msg, val, regname);
+}
+#else
+static inline void reg_dbg(const struct jme_adapter *jme,
+ const char *msg, u32 val, u32 reg) {}
+#endif
+
+/*
+ * Read/Write MMaped I/O Registers
+ */
+static inline u32 jread32(struct jme_adapter *jme, u32 reg)
+{
+ return readl(jme->regs + reg);
+}
+
+static inline void jwrite32(struct jme_adapter *jme, u32 reg, u32 val)
+{
+ reg_dbg(jme, "REG WRITE", val, reg);
+ writel(val, jme->regs + reg);
+ reg_dbg(jme, "VAL AFTER WRITE", readl(jme->regs + reg), reg);
+}
+
+static inline void jwrite32f(struct jme_adapter *jme, u32 reg, u32 val)
+{
+ /*
+ * Read after write should cause flush
+ */
+ reg_dbg(jme, "REG WRITE FLUSH", val, reg);
+ writel(val, jme->regs + reg);
+ readl(jme->regs + reg);
+ reg_dbg(jme, "VAL AFTER WRITE", readl(jme->regs + reg), reg);
+}
+
+/*
+ * PHY Regs
+ */
+enum jme_phy_reg17_bit_masks {
+ PREG17_SPEED = 0xC000,
+ PREG17_DUPLEX = 0x2000,
+ PREG17_SPDRSV = 0x0800,
+ PREG17_LNKUP = 0x0400,
+ PREG17_MDI = 0x0040,
+};
+
+enum jme_phy_reg17_vals {
+ PREG17_SPEED_10M = 0x0000,
+ PREG17_SPEED_100M = 0x4000,
+ PREG17_SPEED_1000M = 0x8000,
+};
+
+#define BMSR_ANCOMP 0x0020
+
+/*
+ * Workaround
+ */
+static inline int is_buggy250(unsigned short device, u8 chiprev)
+{
+ return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11;
+}
+
+static inline int new_phy_power_ctrl(u8 chip_main_rev)
+{
+ return chip_main_rev >= 5;
+}
+
+/*
+ * Function prototypes
+ */
+static int jme_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd);
+static void jme_set_unicastaddr(struct net_device *netdev);
+static void jme_set_multi(struct net_device *netdev);
+
+#endif
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
new file mode 100644
index 000000000000..d8430f487b84
--- /dev/null
+++ b/drivers/net/ethernet/korina.c
@@ -0,0 +1,1250 @@
+/*
+ * Driver for the IDT RC32434 (Korina) on-chip ethernet controller.
+ *
+ * Copyright 2004 IDT Inc. (rischelp@idt.com)
+ * Copyright 2006 Felix Fietkau <nbd@openwrt.org>
+ * Copyright 2008 Florian Fainelli <florian@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Writing to a DMA status register:
+ *
+ * When writing to the status register, you should mask the bit you have
+ * been testing the status register with. Both Tx and Rx DMA registers
+ * should stick to this procedure.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/ctype.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+
+#include <asm/bootinfo.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <asm/mach-rc32434/rb.h>
+#include <asm/mach-rc32434/rc32434.h>
+#include <asm/mach-rc32434/eth.h>
+#include <asm/mach-rc32434/dma_v.h>
+
+#define DRV_NAME "korina"
+#define DRV_VERSION "0.10"
+#define DRV_RELDATE "04Mar2008"
+
+#define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \
+ ((dev)->dev_addr[1]))
+#define STATION_ADDRESS_LOW(dev) (((dev)->dev_addr[2] << 24) | \
+ ((dev)->dev_addr[3] << 16) | \
+ ((dev)->dev_addr[4] << 8) | \
+ ((dev)->dev_addr[5]))
+
+#define MII_CLOCK 1250000 /* no more than 2.5MHz */
+
+/* the following must be powers of two */
+#define KORINA_NUM_RDS 64 /* number of receive descriptors */
+#define KORINA_NUM_TDS 64 /* number of transmit descriptors */
+
+/* KORINA_RBSIZE is the hardware's default maximum receive
+ * frame size in bytes. Having this hardcoded means that there
+ * is no support for MTU sizes greater than 1500. */
+#define KORINA_RBSIZE 1536 /* size of one resource buffer = Ether MTU */
+#define KORINA_RDS_MASK (KORINA_NUM_RDS - 1)
+#define KORINA_TDS_MASK (KORINA_NUM_TDS - 1)
+#define RD_RING_SIZE (KORINA_NUM_RDS * sizeof(struct dma_desc))
+#define TD_RING_SIZE (KORINA_NUM_TDS * sizeof(struct dma_desc))
+
+#define TX_TIMEOUT (6000 * HZ / 1000)
+
+enum chain_status { desc_filled, desc_empty };
+#define IS_DMA_FINISHED(X) (((X) & (DMA_DESC_FINI)) != 0)
+#define IS_DMA_DONE(X) (((X) & (DMA_DESC_DONE)) != 0)
+#define RCVPKT_LENGTH(X) (((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT)
+
+/* Information that need to be kept for each board. */
+struct korina_private {
+ struct eth_regs *eth_regs;
+ struct dma_reg *rx_dma_regs;
+ struct dma_reg *tx_dma_regs;
+ struct dma_desc *td_ring; /* transmit descriptor ring */
+ struct dma_desc *rd_ring; /* receive descriptor ring */
+
+ struct sk_buff *tx_skb[KORINA_NUM_TDS];
+ struct sk_buff *rx_skb[KORINA_NUM_RDS];
+
+ int rx_next_done;
+ int rx_chain_head;
+ int rx_chain_tail;
+ enum chain_status rx_chain_status;
+
+ int tx_next_done;
+ int tx_chain_head;
+ int tx_chain_tail;
+ enum chain_status tx_chain_status;
+ int tx_count;
+ int tx_full;
+
+ int rx_irq;
+ int tx_irq;
+ int ovr_irq;
+ int und_irq;
+
+ spinlock_t lock; /* NIC xmit lock */
+
+ int dma_halt_cnt;
+ int dma_run_cnt;
+ struct napi_struct napi;
+ struct timer_list media_check_timer;
+ struct mii_if_info mii_if;
+ struct work_struct restart_task;
+ struct net_device *dev;
+ int phy_addr;
+};
+
+extern unsigned int idt_cpu_freq;
+
+static inline void korina_start_dma(struct dma_reg *ch, u32 dma_addr)
+{
+ writel(0, &ch->dmandptr);
+ writel(dma_addr, &ch->dmadptr);
+}
+
+static inline void korina_abort_dma(struct net_device *dev,
+ struct dma_reg *ch)
+{
+ if (readl(&ch->dmac) & DMA_CHAN_RUN_BIT) {
+ writel(0x10, &ch->dmac);
+
+ while (!(readl(&ch->dmas) & DMA_STAT_HALT))
+ dev->trans_start = jiffies;
+
+ writel(0, &ch->dmas);
+ }
+
+ writel(0, &ch->dmadptr);
+ writel(0, &ch->dmandptr);
+}
+
+static inline void korina_chain_dma(struct dma_reg *ch, u32 dma_addr)
+{
+ writel(dma_addr, &ch->dmandptr);
+}
+
+static void korina_abort_tx(struct net_device *dev)
+{
+ struct korina_private *lp = netdev_priv(dev);
+
+ korina_abort_dma(dev, lp->tx_dma_regs);
+}
+
+static void korina_abort_rx(struct net_device *dev)
+{
+ struct korina_private *lp = netdev_priv(dev);
+
+ korina_abort_dma(dev, lp->rx_dma_regs);
+}
+
+static void korina_start_rx(struct korina_private *lp,
+ struct dma_desc *rd)
+{
+ korina_start_dma(lp->rx_dma_regs, CPHYSADDR(rd));
+}
+
+static void korina_chain_rx(struct korina_private *lp,
+ struct dma_desc *rd)
+{
+ korina_chain_dma(lp->rx_dma_regs, CPHYSADDR(rd));
+}
+
+/* transmit packet */
+static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct korina_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ u32 length;
+ u32 chain_prev, chain_next;
+ struct dma_desc *td;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ td = &lp->td_ring[lp->tx_chain_tail];
+
+ /* stop queue when full, drop pkts if queue already full */
+ if (lp->tx_count >= (KORINA_NUM_TDS - 2)) {
+ lp->tx_full = 1;
+
+ if (lp->tx_count == (KORINA_NUM_TDS - 2))
+ netif_stop_queue(dev);
+ else {
+ dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ lp->tx_count++;
+
+ lp->tx_skb[lp->tx_chain_tail] = skb;
+
+ length = skb->len;
+ dma_cache_wback((u32)skb->data, skb->len);
+
+ /* Setup the transmit descriptor. */
+ dma_cache_inv((u32) td, sizeof(*td));
+ td->ca = CPHYSADDR(skb->data);
+ chain_prev = (lp->tx_chain_tail - 1) & KORINA_TDS_MASK;
+ chain_next = (lp->tx_chain_tail + 1) & KORINA_TDS_MASK;
+
+ if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
+ if (lp->tx_chain_status == desc_empty) {
+ /* Update tail */
+ td->control = DMA_COUNT(length) |
+ DMA_DESC_COF | DMA_DESC_IOF;
+ /* Move tail */
+ lp->tx_chain_tail = chain_next;
+ /* Write to NDPTR */
+ writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
+ &lp->tx_dma_regs->dmandptr);
+ /* Move head to tail */
+ lp->tx_chain_head = lp->tx_chain_tail;
+ } else {
+ /* Update tail */
+ td->control = DMA_COUNT(length) |
+ DMA_DESC_COF | DMA_DESC_IOF;
+ /* Link to prev */
+ lp->td_ring[chain_prev].control &=
+ ~DMA_DESC_COF;
+ /* Link to prev */
+ lp->td_ring[chain_prev].link = CPHYSADDR(td);
+ /* Move tail */
+ lp->tx_chain_tail = chain_next;
+ /* Write to NDPTR */
+ writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
+ &(lp->tx_dma_regs->dmandptr));
+ /* Move head to tail */
+ lp->tx_chain_head = lp->tx_chain_tail;
+ lp->tx_chain_status = desc_empty;
+ }
+ } else {
+ if (lp->tx_chain_status == desc_empty) {
+ /* Update tail */
+ td->control = DMA_COUNT(length) |
+ DMA_DESC_COF | DMA_DESC_IOF;
+ /* Move tail */
+ lp->tx_chain_tail = chain_next;
+ lp->tx_chain_status = desc_filled;
+ } else {
+ /* Update tail */
+ td->control = DMA_COUNT(length) |
+ DMA_DESC_COF | DMA_DESC_IOF;
+ lp->td_ring[chain_prev].control &=
+ ~DMA_DESC_COF;
+ lp->td_ring[chain_prev].link = CPHYSADDR(td);
+ lp->tx_chain_tail = chain_next;
+ }
+ }
+ dma_cache_wback((u32) td, sizeof(*td));
+
+ dev->trans_start = jiffies;
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static int mdio_read(struct net_device *dev, int mii_id, int reg)
+{
+ struct korina_private *lp = netdev_priv(dev);
+ int ret;
+
+ mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);
+
+ writel(0, &lp->eth_regs->miimcfg);
+ writel(0, &lp->eth_regs->miimcmd);
+ writel(mii_id | reg, &lp->eth_regs->miimaddr);
+ writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);
+
+ ret = (int)(readl(&lp->eth_regs->miimrdd));
+ return ret;
+}
+
+static void mdio_write(struct net_device *dev, int mii_id, int reg, int val)
+{
+ struct korina_private *lp = netdev_priv(dev);
+
+ mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);
+
+ writel(0, &lp->eth_regs->miimcfg);
+ writel(1, &lp->eth_regs->miimcmd);
+ writel(mii_id | reg, &lp->eth_regs->miimaddr);
+ writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);
+ writel(val, &lp->eth_regs->miimwtd);
+}
+
+/* Ethernet Rx DMA interrupt */
+static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct korina_private *lp = netdev_priv(dev);
+ u32 dmas, dmasm;
+ irqreturn_t retval;
+
+ dmas = readl(&lp->rx_dma_regs->dmas);
+ if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) {
+ dmasm = readl(&lp->rx_dma_regs->dmasm);
+ writel(dmasm | (DMA_STAT_DONE |
+ DMA_STAT_HALT | DMA_STAT_ERR),
+ &lp->rx_dma_regs->dmasm);
+
+ napi_schedule(&lp->napi);
+
+ if (dmas & DMA_STAT_ERR)
+ printk(KERN_ERR "%s: DMA error\n", dev->name);
+
+ retval = IRQ_HANDLED;
+ } else
+ retval = IRQ_NONE;
+
+ return retval;
+}
+
+static int korina_rx(struct net_device *dev, int limit)
+{
+ struct korina_private *lp = netdev_priv(dev);
+ struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
+ struct sk_buff *skb, *skb_new;
+ u8 *pkt_buf;
+ u32 devcs, pkt_len, dmas;
+ int count;
+
+ dma_cache_inv((u32)rd, sizeof(*rd));
+
+ for (count = 0; count < limit; count++) {
+ skb = lp->rx_skb[lp->rx_next_done];
+ skb_new = NULL;
+
+ devcs = rd->devcs;
+
+ if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0)
+ break;
+
+ /* Update statistics counters */
+ if (devcs & ETH_RX_CRC)
+ dev->stats.rx_crc_errors++;
+ if (devcs & ETH_RX_LOR)
+ dev->stats.rx_length_errors++;
+ if (devcs & ETH_RX_LE)
+ dev->stats.rx_length_errors++;
+ if (devcs & ETH_RX_OVR)
+ dev->stats.rx_fifo_errors++;
+ if (devcs & ETH_RX_CV)
+ dev->stats.rx_frame_errors++;
+ if (devcs & ETH_RX_CES)
+ dev->stats.rx_length_errors++;
+ if (devcs & ETH_RX_MP)
+ dev->stats.multicast++;
+
+ if ((devcs & ETH_RX_LD) != ETH_RX_LD) {
+ /* check that this is a whole packet
+ * WARNING: DMA_FD bit incorrectly set
+ * in Rc32434 (errata ref #077) */
+ dev->stats.rx_errors++;
+ dev->stats.rx_dropped++;
+ } else if ((devcs & ETH_RX_ROK)) {
+ pkt_len = RCVPKT_LENGTH(devcs);
+
+ /* must be the (first and) last
+ * descriptor then */
+ pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
+
+ /* invalidate the cache */
+ dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
+
+ /* Malloc up new buffer. */
+ skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
+
+ if (!skb_new)
+ break;
+ /* Do not count the CRC */
+ skb_put(skb, pkt_len - 4);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* Pass the packet to upper layers */
+ netif_receive_skb(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+
+ /* Update the mcast stats */
+ if (devcs & ETH_RX_MP)
+ dev->stats.multicast++;
+
+ lp->rx_skb[lp->rx_next_done] = skb_new;
+ }
+
+ rd->devcs = 0;
+
+ /* Restore descriptor's curr_addr */
+ if (skb_new)
+ rd->ca = CPHYSADDR(skb_new->data);
+ else
+ rd->ca = CPHYSADDR(skb->data);
+
+ rd->control = DMA_COUNT(KORINA_RBSIZE) |
+ DMA_DESC_COD | DMA_DESC_IOD;
+ lp->rd_ring[(lp->rx_next_done - 1) &
+ KORINA_RDS_MASK].control &=
+ ~DMA_DESC_COD;
+
+ lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
+ dma_cache_wback((u32)rd, sizeof(*rd));
+ rd = &lp->rd_ring[lp->rx_next_done];
+ writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
+ }
+
+ dmas = readl(&lp->rx_dma_regs->dmas);
+
+ if (dmas & DMA_STAT_HALT) {
+ writel(~(DMA_STAT_HALT | DMA_STAT_ERR),
+ &lp->rx_dma_regs->dmas);
+
+ lp->dma_halt_cnt++;
+ rd->devcs = 0;
+ skb = lp->rx_skb[lp->rx_next_done];
+ rd->ca = CPHYSADDR(skb->data);
+ dma_cache_wback((u32)rd, sizeof(*rd));
+ korina_chain_rx(lp, rd);
+ }
+
+ return count;
+}
+
+static int korina_poll(struct napi_struct *napi, int budget)
+{
+ struct korina_private *lp =
+ container_of(napi, struct korina_private, napi);
+ struct net_device *dev = lp->dev;
+ int work_done;
+
+ work_done = korina_rx(dev, budget);
+ if (work_done < budget) {
+ napi_complete(napi);
+
+ writel(readl(&lp->rx_dma_regs->dmasm) &
+ ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
+ &lp->rx_dma_regs->dmasm);
+ }
+ return work_done;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+static void korina_multicast_list(struct net_device *dev)
+{
+ struct korina_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ struct netdev_hw_addr *ha;
+ u32 recognise = ETH_ARC_AB; /* always accept broadcasts */
+ int i;
+
+ /* Set promiscuous mode */
+ if (dev->flags & IFF_PROMISC)
+ recognise |= ETH_ARC_PRO;
+
+ else if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 4))
+ /* All multicast and broadcast */
+ recognise |= ETH_ARC_AM;
+
+ /* Build the hash table */
+ if (netdev_mc_count(dev) > 4) {
+ u16 hash_table[4];
+ u32 crc;
+
+ for (i = 0; i < 4; i++)
+ hash_table[i] = 0;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
+ crc >>= 26;
+ hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
+ }
+ /* Accept filtered multicast */
+ recognise |= ETH_ARC_AFM;
+
+ /* Fill the MAC hash tables with their values */
+ writel((u32)(hash_table[1] << 16 | hash_table[0]),
+ &lp->eth_regs->ethhash0);
+ writel((u32)(hash_table[3] << 16 | hash_table[2]),
+ &lp->eth_regs->ethhash1);
+ }
+
+ spin_lock_irqsave(&lp->lock, flags);
+ writel(recognise, &lp->eth_regs->etharc);
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+static void korina_tx(struct net_device *dev)
+{
+ struct korina_private *lp = netdev_priv(dev);
+ struct dma_desc *td = &lp->td_ring[lp->tx_next_done];
+ u32 devcs;
+ u32 dmas;
+
+ spin_lock(&lp->lock);
+
+ /* Process all desc that are done */
+ while (IS_DMA_FINISHED(td->control)) {
+ if (lp->tx_full == 1) {
+ netif_wake_queue(dev);
+ lp->tx_full = 0;
+ }
+
+ devcs = lp->td_ring[lp->tx_next_done].devcs;
+ if ((devcs & (ETH_TX_FD | ETH_TX_LD)) !=
+ (ETH_TX_FD | ETH_TX_LD)) {
+ dev->stats.tx_errors++;
+ dev->stats.tx_dropped++;
+
+ /* Should never happen */
+ printk(KERN_ERR "%s: split tx ignored\n",
+ dev->name);
+ } else if (devcs & ETH_TX_TOK) {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes +=
+ lp->tx_skb[lp->tx_next_done]->len;
+ } else {
+ dev->stats.tx_errors++;
+ dev->stats.tx_dropped++;
+
+ /* Underflow */
+ if (devcs & ETH_TX_UND)
+ dev->stats.tx_fifo_errors++;
+
+ /* Oversized frame */
+ if (devcs & ETH_TX_OF)
+ dev->stats.tx_aborted_errors++;
+
+ /* Excessive deferrals */
+ if (devcs & ETH_TX_ED)
+ dev->stats.tx_carrier_errors++;
+
+ /* Collisions: medium busy */
+ if (devcs & ETH_TX_EC)
+ dev->stats.collisions++;
+
+ /* Late collision */
+ if (devcs & ETH_TX_LC)
+ dev->stats.tx_window_errors++;
+ }
+
+ /* We must always free the original skb */
+ if (lp->tx_skb[lp->tx_next_done]) {
+ dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]);
+ lp->tx_skb[lp->tx_next_done] = NULL;
+ }
+
+ lp->td_ring[lp->tx_next_done].control = DMA_DESC_IOF;
+ lp->td_ring[lp->tx_next_done].devcs = ETH_TX_FD | ETH_TX_LD;
+ lp->td_ring[lp->tx_next_done].link = 0;
+ lp->td_ring[lp->tx_next_done].ca = 0;
+ lp->tx_count--;
+
+ /* Go on to next transmission */
+ lp->tx_next_done = (lp->tx_next_done + 1) & KORINA_TDS_MASK;
+ td = &lp->td_ring[lp->tx_next_done];
+
+ }
+
+ /* Clear the DMA status register */
+ dmas = readl(&lp->tx_dma_regs->dmas);
+ writel(~dmas, &lp->tx_dma_regs->dmas);
+
+ writel(readl(&lp->tx_dma_regs->dmasm) &
+ ~(DMA_STAT_FINI | DMA_STAT_ERR),
+ &lp->tx_dma_regs->dmasm);
+
+ spin_unlock(&lp->lock);
+}
+
+static irqreturn_t
+korina_tx_dma_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct korina_private *lp = netdev_priv(dev);
+ u32 dmas, dmasm;
+ irqreturn_t retval;
+
+ dmas = readl(&lp->tx_dma_regs->dmas);
+
+ if (dmas & (DMA_STAT_FINI | DMA_STAT_ERR)) {
+ dmasm = readl(&lp->tx_dma_regs->dmasm);
+ writel(dmasm | (DMA_STAT_FINI | DMA_STAT_ERR),
+ &lp->tx_dma_regs->dmasm);
+
+ korina_tx(dev);
+
+ if (lp->tx_chain_status == desc_filled &&
+ (readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
+ writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
+ &(lp->tx_dma_regs->dmandptr));
+ lp->tx_chain_status = desc_empty;
+ lp->tx_chain_head = lp->tx_chain_tail;
+ dev->trans_start = jiffies;
+ }
+ if (dmas & DMA_STAT_ERR)
+ printk(KERN_ERR "%s: DMA error\n", dev->name);
+
+ retval = IRQ_HANDLED;
+ } else
+ retval = IRQ_NONE;
+
+ return retval;
+}
+
+
+static void korina_check_media(struct net_device *dev, unsigned int init_media)
+{
+ struct korina_private *lp = netdev_priv(dev);
+
+ mii_check_media(&lp->mii_if, 0, init_media);
+
+ if (lp->mii_if.full_duplex)
+ writel(readl(&lp->eth_regs->ethmac2) | ETH_MAC2_FD,
+ &lp->eth_regs->ethmac2);
+ else
+ writel(readl(&lp->eth_regs->ethmac2) & ~ETH_MAC2_FD,
+ &lp->eth_regs->ethmac2);
+}
+
+static void korina_poll_media(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct korina_private *lp = netdev_priv(dev);
+
+ korina_check_media(dev, 0);
+ mod_timer(&lp->media_check_timer, jiffies + HZ);
+}
+
+static void korina_set_carrier(struct mii_if_info *mii)
+{
+ if (mii->force_media) {
+ /* autoneg is off: Link is always assumed to be up */
+ if (!netif_carrier_ok(mii->dev))
+ netif_carrier_on(mii->dev);
+ } else /* Let MMI library update carrier status */
+ korina_check_media(mii->dev, 0);
+}
+
+static int korina_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct korina_private *lp = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(rq);
+ int rc;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+ spin_lock_irq(&lp->lock);
+ rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL);
+ spin_unlock_irq(&lp->lock);
+ korina_set_carrier(&lp->mii_if);
+
+ return rc;
+}
+
+/* ethtool helpers */
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct korina_private *lp = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, lp->dev->name);
+}
+
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct korina_private *lp = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&lp->lock);
+ rc = mii_ethtool_gset(&lp->mii_if, cmd);
+ spin_unlock_irq(&lp->lock);
+
+ return rc;
+}
+
+static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct korina_private *lp = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&lp->lock);
+ rc = mii_ethtool_sset(&lp->mii_if, cmd);
+ spin_unlock_irq(&lp->lock);
+ korina_set_carrier(&lp->mii_if);
+
+ return rc;
+}
+
+static u32 netdev_get_link(struct net_device *dev)
+{
+ struct korina_private *lp = netdev_priv(dev);
+
+ return mii_link_ok(&lp->mii_if);
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_settings = netdev_get_settings,
+ .set_settings = netdev_set_settings,
+ .get_link = netdev_get_link,
+};
+
+static int korina_alloc_ring(struct net_device *dev)
+{
+ struct korina_private *lp = netdev_priv(dev);
+ struct sk_buff *skb;
+ int i;
+
+ /* Initialize the transmit descriptors */
+ for (i = 0; i < KORINA_NUM_TDS; i++) {
+ lp->td_ring[i].control = DMA_DESC_IOF;
+ lp->td_ring[i].devcs = ETH_TX_FD | ETH_TX_LD;
+ lp->td_ring[i].ca = 0;
+ lp->td_ring[i].link = 0;
+ }
+ lp->tx_next_done = lp->tx_chain_head = lp->tx_chain_tail =
+ lp->tx_full = lp->tx_count = 0;
+ lp->tx_chain_status = desc_empty;
+
+ /* Initialize the receive descriptors */
+ for (i = 0; i < KORINA_NUM_RDS; i++) {
+ skb = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
+ if (!skb)
+ return -ENOMEM;
+ lp->rx_skb[i] = skb;
+ lp->rd_ring[i].control = DMA_DESC_IOD |
+ DMA_COUNT(KORINA_RBSIZE);
+ lp->rd_ring[i].devcs = 0;
+ lp->rd_ring[i].ca = CPHYSADDR(skb->data);
+ lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]);
+ }
+
+ /* loop back receive descriptors, so the last
+ * descriptor points to the first one */
+ lp->rd_ring[i - 1].link = CPHYSADDR(&lp->rd_ring[0]);
+ lp->rd_ring[i - 1].control |= DMA_DESC_COD;
+
+ lp->rx_next_done = 0;
+ lp->rx_chain_head = 0;
+ lp->rx_chain_tail = 0;
+ lp->rx_chain_status = desc_empty;
+
+ return 0;
+}
+
+static void korina_free_ring(struct net_device *dev)
+{
+ struct korina_private *lp = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < KORINA_NUM_RDS; i++) {
+ lp->rd_ring[i].control = 0;
+ if (lp->rx_skb[i])
+ dev_kfree_skb_any(lp->rx_skb[i]);
+ lp->rx_skb[i] = NULL;
+ }
+
+ for (i = 0; i < KORINA_NUM_TDS; i++) {
+ lp->td_ring[i].control = 0;
+ if (lp->tx_skb[i])
+ dev_kfree_skb_any(lp->tx_skb[i]);
+ lp->tx_skb[i] = NULL;
+ }
+}
+
+/*
+ * Initialize the RC32434 ethernet controller.
+ */
+static int korina_init(struct net_device *dev)
+{
+ struct korina_private *lp = netdev_priv(dev);
+
+ /* Disable DMA */
+ korina_abort_tx(dev);
+ korina_abort_rx(dev);
+
+ /* reset ethernet logic */
+ writel(0, &lp->eth_regs->ethintfc);
+ while ((readl(&lp->eth_regs->ethintfc) & ETH_INT_FC_RIP))
+ dev->trans_start = jiffies;
+
+ /* Enable Ethernet Interface */
+ writel(ETH_INT_FC_EN, &lp->eth_regs->ethintfc);
+
+ /* Allocate rings */
+ if (korina_alloc_ring(dev)) {
+ printk(KERN_ERR "%s: descriptor allocation failed\n", dev->name);
+ korina_free_ring(dev);
+ return -ENOMEM;
+ }
+
+ writel(0, &lp->rx_dma_regs->dmas);
+ /* Start Rx DMA */
+ korina_start_rx(lp, &lp->rd_ring[0]);
+
+ writel(readl(&lp->tx_dma_regs->dmasm) &
+ ~(DMA_STAT_FINI | DMA_STAT_ERR),
+ &lp->tx_dma_regs->dmasm);
+ writel(readl(&lp->rx_dma_regs->dmasm) &
+ ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
+ &lp->rx_dma_regs->dmasm);
+
+ /* Accept only packets destined for this Ethernet device address */
+ writel(ETH_ARC_AB, &lp->eth_regs->etharc);
+
+ /* Set all Ether station address registers to their initial values */
+ writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal0);
+ writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah0);
+
+ writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal1);
+ writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah1);
+
+ writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal2);
+ writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah2);
+
+ writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal3);
+ writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah3);
+
+
+ /* Frame Length Checking, Pad Enable, CRC Enable, Full Duplex set */
+ writel(ETH_MAC2_PE | ETH_MAC2_CEN | ETH_MAC2_FD,
+ &lp->eth_regs->ethmac2);
+
+ /* Back to back inter-packet-gap */
+ writel(0x15, &lp->eth_regs->ethipgt);
+ /* Non - Back to back inter-packet-gap */
+ writel(0x12, &lp->eth_regs->ethipgr);
+
+ /* Management Clock Prescaler Divisor
+ * Clock independent setting */
+ writel(((idt_cpu_freq) / MII_CLOCK + 1) & ~1,
+ &lp->eth_regs->ethmcp);
+
+ /* don't transmit until fifo contains 48b */
+ writel(48, &lp->eth_regs->ethfifott);
+
+ writel(ETH_MAC1_RE, &lp->eth_regs->ethmac1);
+
+ napi_enable(&lp->napi);
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/*
+ * Restart the RC32434 ethernet controller.
+ */
+static void korina_restart_task(struct work_struct *work)
+{
+ struct korina_private *lp = container_of(work,
+ struct korina_private, restart_task);
+ struct net_device *dev = lp->dev;
+
+ /*
+ * Disable interrupts
+ */
+ disable_irq(lp->rx_irq);
+ disable_irq(lp->tx_irq);
+ disable_irq(lp->ovr_irq);
+ disable_irq(lp->und_irq);
+
+ writel(readl(&lp->tx_dma_regs->dmasm) |
+ DMA_STAT_FINI | DMA_STAT_ERR,
+ &lp->tx_dma_regs->dmasm);
+ writel(readl(&lp->rx_dma_regs->dmasm) |
+ DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
+ &lp->rx_dma_regs->dmasm);
+
+ korina_free_ring(dev);
+
+ napi_disable(&lp->napi);
+
+ if (korina_init(dev) < 0) {
+ printk(KERN_ERR "%s: cannot restart device\n", dev->name);
+ return;
+ }
+ korina_multicast_list(dev);
+
+ enable_irq(lp->und_irq);
+ enable_irq(lp->ovr_irq);
+ enable_irq(lp->tx_irq);
+ enable_irq(lp->rx_irq);
+}
+
+static void korina_clear_and_restart(struct net_device *dev, u32 value)
+{
+ struct korina_private *lp = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ writel(value, &lp->eth_regs->ethintfc);
+ schedule_work(&lp->restart_task);
+}
+
+/* Ethernet Tx Underflow interrupt */
+static irqreturn_t korina_und_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct korina_private *lp = netdev_priv(dev);
+ unsigned int und;
+
+ spin_lock(&lp->lock);
+
+ und = readl(&lp->eth_regs->ethintfc);
+
+ if (und & ETH_INT_FC_UND)
+ korina_clear_and_restart(dev, und & ~ETH_INT_FC_UND);
+
+ spin_unlock(&lp->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void korina_tx_timeout(struct net_device *dev)
+{
+ struct korina_private *lp = netdev_priv(dev);
+
+ schedule_work(&lp->restart_task);
+}
+
+/* Ethernet Rx Overflow interrupt */
+static irqreturn_t
+korina_ovr_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct korina_private *lp = netdev_priv(dev);
+ unsigned int ovr;
+
+ spin_lock(&lp->lock);
+ ovr = readl(&lp->eth_regs->ethintfc);
+
+ if (ovr & ETH_INT_FC_OVR)
+ korina_clear_and_restart(dev, ovr & ~ETH_INT_FC_OVR);
+
+ spin_unlock(&lp->lock);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void korina_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ korina_tx_dma_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static int korina_open(struct net_device *dev)
+{
+ struct korina_private *lp = netdev_priv(dev);
+ int ret;
+
+ /* Initialize */
+ ret = korina_init(dev);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: cannot open device\n", dev->name);
+ goto out;
+ }
+
+ /* Install the interrupt handler
+ * that handles the Done Finished
+ * Ovr and Und Events */
+ ret = request_irq(lp->rx_irq, korina_rx_dma_interrupt,
+ IRQF_DISABLED, "Korina ethernet Rx", dev);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: unable to get Rx DMA IRQ %d\n",
+ dev->name, lp->rx_irq);
+ goto err_release;
+ }
+ ret = request_irq(lp->tx_irq, korina_tx_dma_interrupt,
+ IRQF_DISABLED, "Korina ethernet Tx", dev);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: unable to get Tx DMA IRQ %d\n",
+ dev->name, lp->tx_irq);
+ goto err_free_rx_irq;
+ }
+
+ /* Install handler for overrun error. */
+ ret = request_irq(lp->ovr_irq, korina_ovr_interrupt,
+ IRQF_DISABLED, "Ethernet Overflow", dev);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: unable to get OVR IRQ %d\n",
+ dev->name, lp->ovr_irq);
+ goto err_free_tx_irq;
+ }
+
+ /* Install handler for underflow error. */
+ ret = request_irq(lp->und_irq, korina_und_interrupt,
+ IRQF_DISABLED, "Ethernet Underflow", dev);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: unable to get UND IRQ %d\n",
+ dev->name, lp->und_irq);
+ goto err_free_ovr_irq;
+ }
+ mod_timer(&lp->media_check_timer, jiffies + 1);
+out:
+ return ret;
+
+err_free_ovr_irq:
+ free_irq(lp->ovr_irq, dev);
+err_free_tx_irq:
+ free_irq(lp->tx_irq, dev);
+err_free_rx_irq:
+ free_irq(lp->rx_irq, dev);
+err_release:
+ korina_free_ring(dev);
+ goto out;
+}
+
+static int korina_close(struct net_device *dev)
+{
+ struct korina_private *lp = netdev_priv(dev);
+ u32 tmp;
+
+ del_timer(&lp->media_check_timer);
+
+ /* Disable interrupts */
+ disable_irq(lp->rx_irq);
+ disable_irq(lp->tx_irq);
+ disable_irq(lp->ovr_irq);
+ disable_irq(lp->und_irq);
+
+ korina_abort_tx(dev);
+ tmp = readl(&lp->tx_dma_regs->dmasm);
+ tmp = tmp | DMA_STAT_FINI | DMA_STAT_ERR;
+ writel(tmp, &lp->tx_dma_regs->dmasm);
+
+ korina_abort_rx(dev);
+ tmp = readl(&lp->rx_dma_regs->dmasm);
+ tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
+ writel(tmp, &lp->rx_dma_regs->dmasm);
+
+ korina_free_ring(dev);
+
+ napi_disable(&lp->napi);
+
+ cancel_work_sync(&lp->restart_task);
+
+ free_irq(lp->rx_irq, dev);
+ free_irq(lp->tx_irq, dev);
+ free_irq(lp->ovr_irq, dev);
+ free_irq(lp->und_irq, dev);
+
+ return 0;
+}
+
+static const struct net_device_ops korina_netdev_ops = {
+ .ndo_open = korina_open,
+ .ndo_stop = korina_close,
+ .ndo_start_xmit = korina_send_packet,
+ .ndo_set_rx_mode = korina_multicast_list,
+ .ndo_tx_timeout = korina_tx_timeout,
+ .ndo_do_ioctl = korina_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = korina_poll_controller,
+#endif
+};
+
+static int korina_probe(struct platform_device *pdev)
+{
+ struct korina_device *bif = platform_get_drvdata(pdev);
+ struct korina_private *lp;
+ struct net_device *dev;
+ struct resource *r;
+ int rc;
+
+ dev = alloc_etherdev(sizeof(struct korina_private));
+ if (!dev) {
+ printk(KERN_ERR DRV_NAME ": alloc_etherdev failed\n");
+ return -ENOMEM;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ lp = netdev_priv(dev);
+
+ bif->dev = dev;
+ memcpy(dev->dev_addr, bif->mac, 6);
+
+ lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
+ lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
+ lp->ovr_irq = platform_get_irq_byname(pdev, "korina_ovr");
+ lp->und_irq = platform_get_irq_byname(pdev, "korina_und");
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
+ dev->base_addr = r->start;
+ lp->eth_regs = ioremap_nocache(r->start, resource_size(r));
+ if (!lp->eth_regs) {
+ printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
+ rc = -ENXIO;
+ goto probe_err_out;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
+ lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r));
+ if (!lp->rx_dma_regs) {
+ printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
+ rc = -ENXIO;
+ goto probe_err_dma_rx;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
+ lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r));
+ if (!lp->tx_dma_regs) {
+ printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
+ rc = -ENXIO;
+ goto probe_err_dma_tx;
+ }
+
+ lp->td_ring = kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL);
+ if (!lp->td_ring) {
+ printk(KERN_ERR DRV_NAME ": cannot allocate descriptors\n");
+ rc = -ENXIO;
+ goto probe_err_td_ring;
+ }
+
+ dma_cache_inv((unsigned long)(lp->td_ring),
+ TD_RING_SIZE + RD_RING_SIZE);
+
+ /* now convert TD_RING pointer to KSEG1 */
+ lp->td_ring = (struct dma_desc *)KSEG1ADDR(lp->td_ring);
+ lp->rd_ring = &lp->td_ring[KORINA_NUM_TDS];
+
+ spin_lock_init(&lp->lock);
+ /* just use the rx dma irq */
+ dev->irq = lp->rx_irq;
+ lp->dev = dev;
+
+ dev->netdev_ops = &korina_netdev_ops;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ netif_napi_add(dev, &lp->napi, korina_poll, 64);
+
+ lp->phy_addr = (((lp->rx_irq == 0x2c? 1:0) << 8) | 0x05);
+ lp->mii_if.dev = dev;
+ lp->mii_if.mdio_read = mdio_read;
+ lp->mii_if.mdio_write = mdio_write;
+ lp->mii_if.phy_id = lp->phy_addr;
+ lp->mii_if.phy_id_mask = 0x1f;
+ lp->mii_if.reg_num_mask = 0x1f;
+
+ rc = register_netdev(dev);
+ if (rc < 0) {
+ printk(KERN_ERR DRV_NAME
+ ": cannot register net device: %d\n", rc);
+ goto probe_err_register;
+ }
+ setup_timer(&lp->media_check_timer, korina_poll_media, (unsigned long) dev);
+
+ INIT_WORK(&lp->restart_task, korina_restart_task);
+
+ printk(KERN_INFO "%s: " DRV_NAME "-" DRV_VERSION " " DRV_RELDATE "\n",
+ dev->name);
+out:
+ return rc;
+
+probe_err_register:
+ kfree(lp->td_ring);
+probe_err_td_ring:
+ iounmap(lp->tx_dma_regs);
+probe_err_dma_tx:
+ iounmap(lp->rx_dma_regs);
+probe_err_dma_rx:
+ iounmap(lp->eth_regs);
+probe_err_out:
+ free_netdev(dev);
+ goto out;
+}
+
+static int korina_remove(struct platform_device *pdev)
+{
+ struct korina_device *bif = platform_get_drvdata(pdev);
+ struct korina_private *lp = netdev_priv(bif->dev);
+
+ iounmap(lp->eth_regs);
+ iounmap(lp->rx_dma_regs);
+ iounmap(lp->tx_dma_regs);
+
+ platform_set_drvdata(pdev, NULL);
+ unregister_netdev(bif->dev);
+ free_netdev(bif->dev);
+
+ return 0;
+}
+
+static struct platform_driver korina_driver = {
+ .driver.name = "korina",
+ .probe = korina_probe,
+ .remove = korina_remove,
+};
+
+static int __init korina_init_module(void)
+{
+ return platform_driver_register(&korina_driver);
+}
+
+static void korina_cleanup_module(void)
+{
+ return platform_driver_unregister(&korina_driver);
+}
+
+module_init(korina_init_module);
+module_exit(korina_cleanup_module);
+
+MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>");
+MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_DESCRIPTION("IDT RC32434 (Korina) Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
new file mode 100644
index 000000000000..6bb2b9506cad
--- /dev/null
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -0,0 +1,805 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#include <linux/in.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
+#include <linux/ethtool.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <asm/checksum.h>
+
+#include <lantiq_soc.h>
+#include <xway_dma.h>
+#include <lantiq_platform.h>
+
+#define LTQ_ETOP_MDIO 0x11804
+#define MDIO_REQUEST 0x80000000
+#define MDIO_READ 0x40000000
+#define MDIO_ADDR_MASK 0x1f
+#define MDIO_ADDR_OFFSET 0x15
+#define MDIO_REG_MASK 0x1f
+#define MDIO_REG_OFFSET 0x10
+#define MDIO_VAL_MASK 0xffff
+
+#define PPE32_CGEN 0x800
+#define LQ_PPE32_ENET_MAC_CFG 0x1840
+
+#define LTQ_ETOP_ENETS0 0x11850
+#define LTQ_ETOP_MAC_DA0 0x1186C
+#define LTQ_ETOP_MAC_DA1 0x11870
+#define LTQ_ETOP_CFG 0x16020
+#define LTQ_ETOP_IGPLEN 0x16080
+
+#define MAX_DMA_CHAN 0x8
+#define MAX_DMA_CRC_LEN 0x4
+#define MAX_DMA_DATA_LEN 0x600
+
+#define ETOP_FTCU BIT(28)
+#define ETOP_MII_MASK 0xf
+#define ETOP_MII_NORMAL 0xd
+#define ETOP_MII_REVERSE 0xe
+#define ETOP_PLEN_UNDER 0x40
+#define ETOP_CGEN 0x800
+
+/* use 2 static channels for TX/RX */
+#define LTQ_ETOP_TX_CHANNEL 1
+#define LTQ_ETOP_RX_CHANNEL 6
+#define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL)
+#define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL)
+
+#define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x))
+#define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y))
+#define ltq_etop_w32_mask(x, y, z) \
+ ltq_w32_mask(x, y, ltq_etop_membase + (z))
+
+#define DRV_VERSION "1.0"
+
+static void __iomem *ltq_etop_membase;
+
+struct ltq_etop_chan {
+ int idx;
+ int tx_free;
+ struct net_device *netdev;
+ struct napi_struct napi;
+ struct ltq_dma_channel dma;
+ struct sk_buff *skb[LTQ_DESC_NUM];
+};
+
+struct ltq_etop_priv {
+ struct net_device *netdev;
+ struct ltq_eth_data *pldata;
+ struct resource *res;
+
+ struct mii_bus *mii_bus;
+ struct phy_device *phydev;
+
+ struct ltq_etop_chan ch[MAX_DMA_CHAN];
+ int tx_free[MAX_DMA_CHAN >> 1];
+
+ spinlock_t lock;
+};
+
+static int
+ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
+{
+ ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN);
+ if (!ch->skb[ch->dma.desc])
+ return -ENOMEM;
+ ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
+ ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN,
+ DMA_FROM_DEVICE);
+ ch->dma.desc_base[ch->dma.desc].addr =
+ CPHYSADDR(ch->skb[ch->dma.desc]->data);
+ ch->dma.desc_base[ch->dma.desc].ctl =
+ LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
+ MAX_DMA_DATA_LEN;
+ skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN);
+ return 0;
+}
+
+static void
+ltq_etop_hw_receive(struct ltq_etop_chan *ch)
+{
+ struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
+ struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
+ struct sk_buff *skb = ch->skb[ch->dma.desc];
+ int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - MAX_DMA_CRC_LEN;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (ltq_etop_alloc_skb(ch)) {
+ netdev_err(ch->netdev,
+ "failed to allocate new rx buffer, stopping DMA\n");
+ ltq_dma_close(&ch->dma);
+ }
+ ch->dma.desc++;
+ ch->dma.desc %= LTQ_DESC_NUM;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ skb_put(skb, len);
+ skb->dev = ch->netdev;
+ skb->protocol = eth_type_trans(skb, ch->netdev);
+ netif_receive_skb(skb);
+}
+
+static int
+ltq_etop_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct ltq_etop_chan *ch = container_of(napi,
+ struct ltq_etop_chan, napi);
+ int rx = 0;
+ int complete = 0;
+
+ while ((rx < budget) && !complete) {
+ struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
+
+ if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
+ ltq_etop_hw_receive(ch);
+ rx++;
+ } else {
+ complete = 1;
+ }
+ }
+ if (complete || !rx) {
+ napi_complete(&ch->napi);
+ ltq_dma_ack_irq(&ch->dma);
+ }
+ return rx;
+}
+
+static int
+ltq_etop_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct ltq_etop_chan *ch =
+ container_of(napi, struct ltq_etop_chan, napi);
+ struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
+ struct netdev_queue *txq =
+ netdev_get_tx_queue(ch->netdev, ch->idx >> 1);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ while ((ch->dma.desc_base[ch->tx_free].ctl &
+ (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
+ dev_kfree_skb_any(ch->skb[ch->tx_free]);
+ ch->skb[ch->tx_free] = NULL;
+ memset(&ch->dma.desc_base[ch->tx_free], 0,
+ sizeof(struct ltq_dma_desc));
+ ch->tx_free++;
+ ch->tx_free %= LTQ_DESC_NUM;
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (netif_tx_queue_stopped(txq))
+ netif_tx_start_queue(txq);
+ napi_complete(&ch->napi);
+ ltq_dma_ack_irq(&ch->dma);
+ return 1;
+}
+
+static irqreturn_t
+ltq_etop_dma_irq(int irq, void *_priv)
+{
+ struct ltq_etop_priv *priv = _priv;
+ int ch = irq - LTQ_DMA_CH0_INT;
+
+ napi_schedule(&priv->ch[ch].napi);
+ return IRQ_HANDLED;
+}
+
+static void
+ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
+{
+ struct ltq_etop_priv *priv = netdev_priv(dev);
+
+ ltq_dma_free(&ch->dma);
+ if (ch->dma.irq)
+ free_irq(ch->dma.irq, priv);
+ if (IS_RX(ch->idx)) {
+ int desc;
+ for (desc = 0; desc < LTQ_DESC_NUM; desc++)
+ dev_kfree_skb_any(ch->skb[ch->dma.desc]);
+ }
+}
+
+static void
+ltq_etop_hw_exit(struct net_device *dev)
+{
+ struct ltq_etop_priv *priv = netdev_priv(dev);
+ int i;
+
+ ltq_pmu_disable(PMU_PPE);
+ for (i = 0; i < MAX_DMA_CHAN; i++)
+ if (IS_TX(i) || IS_RX(i))
+ ltq_etop_free_channel(dev, &priv->ch[i]);
+}
+
+static int
+ltq_etop_hw_init(struct net_device *dev)
+{
+ struct ltq_etop_priv *priv = netdev_priv(dev);
+ int i;
+
+ ltq_pmu_enable(PMU_PPE);
+
+ switch (priv->pldata->mii_mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ ltq_etop_w32_mask(ETOP_MII_MASK,
+ ETOP_MII_REVERSE, LTQ_ETOP_CFG);
+ break;
+
+ case PHY_INTERFACE_MODE_MII:
+ ltq_etop_w32_mask(ETOP_MII_MASK,
+ ETOP_MII_NORMAL, LTQ_ETOP_CFG);
+ break;
+
+ default:
+ netdev_err(dev, "unknown mii mode %d\n",
+ priv->pldata->mii_mode);
+ return -ENOTSUPP;
+ }
+
+ /* enable crc generation */
+ ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG);
+
+ ltq_dma_init_port(DMA_PORT_ETOP);
+
+ for (i = 0; i < MAX_DMA_CHAN; i++) {
+ int irq = LTQ_DMA_CH0_INT + i;
+ struct ltq_etop_chan *ch = &priv->ch[i];
+
+ ch->idx = ch->dma.nr = i;
+
+ if (IS_TX(i)) {
+ ltq_dma_alloc_tx(&ch->dma);
+ request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
+ "etop_tx", priv);
+ } else if (IS_RX(i)) {
+ ltq_dma_alloc_rx(&ch->dma);
+ for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
+ ch->dma.desc++)
+ if (ltq_etop_alloc_skb(ch))
+ return -ENOMEM;
+ ch->dma.desc = 0;
+ request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
+ "etop_rx", priv);
+ }
+ ch->dma.irq = irq;
+ }
+ return 0;
+}
+
+static void
+ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, "Lantiq ETOP");
+ strcpy(info->bus_info, "internal");
+ strcpy(info->version, DRV_VERSION);
+}
+
+static int
+ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ltq_etop_priv *priv = netdev_priv(dev);
+
+ return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int
+ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ltq_etop_priv *priv = netdev_priv(dev);
+
+ return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static int
+ltq_etop_nway_reset(struct net_device *dev)
+{
+ struct ltq_etop_priv *priv = netdev_priv(dev);
+
+ return phy_start_aneg(priv->phydev);
+}
+
+static const struct ethtool_ops ltq_etop_ethtool_ops = {
+ .get_drvinfo = ltq_etop_get_drvinfo,
+ .get_settings = ltq_etop_get_settings,
+ .set_settings = ltq_etop_set_settings,
+ .nway_reset = ltq_etop_nway_reset,
+};
+
+static int
+ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data)
+{
+ u32 val = MDIO_REQUEST |
+ ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
+ ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET) |
+ phy_data;
+
+ while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
+ ;
+ ltq_etop_w32(val, LTQ_ETOP_MDIO);
+ return 0;
+}
+
+static int
+ltq_etop_mdio_rd(struct mii_bus *bus, int phy_addr, int phy_reg)
+{
+ u32 val = MDIO_REQUEST | MDIO_READ |
+ ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
+ ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET);
+
+ while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
+ ;
+ ltq_etop_w32(val, LTQ_ETOP_MDIO);
+ while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
+ ;
+ val = ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_VAL_MASK;
+ return val;
+}
+
+static void
+ltq_etop_mdio_link(struct net_device *dev)
+{
+ /* nothing to do */
+}
+
+static int
+ltq_etop_mdio_probe(struct net_device *dev)
+{
+ struct ltq_etop_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = NULL;
+ int phy_addr;
+
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+ if (priv->mii_bus->phy_map[phy_addr]) {
+ phydev = priv->mii_bus->phy_map[phy_addr];
+ break;
+ }
+ }
+
+ if (!phydev) {
+ netdev_err(dev, "no PHY found\n");
+ return -ENODEV;
+ }
+
+ phydev = phy_connect(dev, dev_name(&phydev->dev), &ltq_etop_mdio_link,
+ 0, priv->pldata->mii_mode);
+
+ if (IS_ERR(phydev)) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ return PTR_ERR(phydev);
+ }
+
+ phydev->supported &= (SUPPORTED_10baseT_Half
+ | SUPPORTED_10baseT_Full
+ | SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full
+ | SUPPORTED_Autoneg
+ | SUPPORTED_MII
+ | SUPPORTED_TP);
+
+ phydev->advertising = phydev->supported;
+ priv->phydev = phydev;
+ pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n",
+ dev->name, phydev->drv->name,
+ dev_name(&phydev->dev), phydev->irq);
+
+ return 0;
+}
+
+static int
+ltq_etop_mdio_init(struct net_device *dev)
+{
+ struct ltq_etop_priv *priv = netdev_priv(dev);
+ int i;
+ int err;
+
+ priv->mii_bus = mdiobus_alloc();
+ if (!priv->mii_bus) {
+ netdev_err(dev, "failed to allocate mii bus\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ priv->mii_bus->priv = dev;
+ priv->mii_bus->read = ltq_etop_mdio_rd;
+ priv->mii_bus->write = ltq_etop_mdio_wr;
+ priv->mii_bus->name = "ltq_mii";
+ snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
+ priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!priv->mii_bus->irq) {
+ err = -ENOMEM;
+ goto err_out_free_mdiobus;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; ++i)
+ priv->mii_bus->irq[i] = PHY_POLL;
+
+ if (mdiobus_register(priv->mii_bus)) {
+ err = -ENXIO;
+ goto err_out_free_mdio_irq;
+ }
+
+ if (ltq_etop_mdio_probe(dev)) {
+ err = -ENXIO;
+ goto err_out_unregister_bus;
+ }
+ return 0;
+
+err_out_unregister_bus:
+ mdiobus_unregister(priv->mii_bus);
+err_out_free_mdio_irq:
+ kfree(priv->mii_bus->irq);
+err_out_free_mdiobus:
+ mdiobus_free(priv->mii_bus);
+err_out:
+ return err;
+}
+
+static void
+ltq_etop_mdio_cleanup(struct net_device *dev)
+{
+ struct ltq_etop_priv *priv = netdev_priv(dev);
+
+ phy_disconnect(priv->phydev);
+ mdiobus_unregister(priv->mii_bus);
+ kfree(priv->mii_bus->irq);
+ mdiobus_free(priv->mii_bus);
+}
+
+static int
+ltq_etop_open(struct net_device *dev)
+{
+ struct ltq_etop_priv *priv = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < MAX_DMA_CHAN; i++) {
+ struct ltq_etop_chan *ch = &priv->ch[i];
+
+ if (!IS_TX(i) && (!IS_RX(i)))
+ continue;
+ ltq_dma_open(&ch->dma);
+ napi_enable(&ch->napi);
+ }
+ phy_start(priv->phydev);
+ netif_tx_start_all_queues(dev);
+ return 0;
+}
+
+static int
+ltq_etop_stop(struct net_device *dev)
+{
+ struct ltq_etop_priv *priv = netdev_priv(dev);
+ int i;
+
+ netif_tx_stop_all_queues(dev);
+ phy_stop(priv->phydev);
+ for (i = 0; i < MAX_DMA_CHAN; i++) {
+ struct ltq_etop_chan *ch = &priv->ch[i];
+
+ if (!IS_RX(i) && !IS_TX(i))
+ continue;
+ napi_disable(&ch->napi);
+ ltq_dma_close(&ch->dma);
+ }
+ return 0;
+}
+
+static int
+ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ int queue = skb_get_queue_mapping(skb);
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
+ struct ltq_etop_priv *priv = netdev_priv(dev);
+ struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1];
+ struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
+ int len;
+ unsigned long flags;
+ u32 byte_offset;
+
+ len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
+
+ if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
+ dev_kfree_skb_any(skb);
+ netdev_err(dev, "tx ring full\n");
+ netif_tx_stop_queue(txq);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* dma needs to start on a 16 byte aligned address */
+ byte_offset = CPHYSADDR(skb->data) % 16;
+ ch->skb[ch->dma.desc] = skb;
+
+ dev->trans_start = jiffies;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
+ DMA_TO_DEVICE)) - byte_offset;
+ wmb();
+ desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
+ LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
+ ch->dma.desc++;
+ ch->dma.desc %= LTQ_DESC_NUM;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
+ netif_tx_stop_queue(txq);
+
+ return NETDEV_TX_OK;
+}
+
+static int
+ltq_etop_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int ret = eth_change_mtu(dev, new_mtu);
+
+ if (!ret) {
+ struct ltq_etop_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu,
+ LTQ_ETOP_IGPLEN);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ return ret;
+}
+
+static int
+ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct ltq_etop_priv *priv = netdev_priv(dev);
+
+ /* TODO: mii-toll reports "No MII transceiver present!." ?!*/
+ return phy_mii_ioctl(priv->phydev, rq, cmd);
+}
+
+static int
+ltq_etop_set_mac_address(struct net_device *dev, void *p)
+{
+ int ret = eth_mac_addr(dev, p);
+
+ if (!ret) {
+ struct ltq_etop_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ /* store the mac for the unicast filter */
+ spin_lock_irqsave(&priv->lock, flags);
+ ltq_etop_w32(*((u32 *)dev->dev_addr), LTQ_ETOP_MAC_DA0);
+ ltq_etop_w32(*((u16 *)&dev->dev_addr[4]) << 16,
+ LTQ_ETOP_MAC_DA1);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ return ret;
+}
+
+static void
+ltq_etop_set_multicast_list(struct net_device *dev)
+{
+ struct ltq_etop_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ /* ensure that the unicast filter is not enabled in promiscious mode */
+ spin_lock_irqsave(&priv->lock, flags);
+ if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI))
+ ltq_etop_w32_mask(ETOP_FTCU, 0, LTQ_ETOP_ENETS0);
+ else
+ ltq_etop_w32_mask(0, ETOP_FTCU, LTQ_ETOP_ENETS0);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static u16
+ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ /* we are currently only using the first queue */
+ return 0;
+}
+
+static int
+ltq_etop_init(struct net_device *dev)
+{
+ struct ltq_etop_priv *priv = netdev_priv(dev);
+ struct sockaddr mac;
+ int err;
+
+ ether_setup(dev);
+ dev->watchdog_timeo = 10 * HZ;
+ err = ltq_etop_hw_init(dev);
+ if (err)
+ goto err_hw;
+ ltq_etop_change_mtu(dev, 1500);
+
+ memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
+ if (!is_valid_ether_addr(mac.sa_data)) {
+ pr_warn("etop: invalid MAC, using random\n");
+ random_ether_addr(mac.sa_data);
+ }
+
+ err = ltq_etop_set_mac_address(dev, &mac);
+ if (err)
+ goto err_netdev;
+ ltq_etop_set_multicast_list(dev);
+ err = ltq_etop_mdio_init(dev);
+ if (err)
+ goto err_netdev;
+ return 0;
+
+err_netdev:
+ unregister_netdev(dev);
+ free_netdev(dev);
+err_hw:
+ ltq_etop_hw_exit(dev);
+ return err;
+}
+
+static void
+ltq_etop_tx_timeout(struct net_device *dev)
+{
+ int err;
+
+ ltq_etop_hw_exit(dev);
+ err = ltq_etop_hw_init(dev);
+ if (err)
+ goto err_hw;
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+ return;
+
+err_hw:
+ ltq_etop_hw_exit(dev);
+ netdev_err(dev, "failed to restart etop after TX timeout\n");
+}
+
+static const struct net_device_ops ltq_eth_netdev_ops = {
+ .ndo_open = ltq_etop_open,
+ .ndo_stop = ltq_etop_stop,
+ .ndo_start_xmit = ltq_etop_tx,
+ .ndo_change_mtu = ltq_etop_change_mtu,
+ .ndo_do_ioctl = ltq_etop_ioctl,
+ .ndo_set_mac_address = ltq_etop_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = ltq_etop_set_multicast_list,
+ .ndo_select_queue = ltq_etop_select_queue,
+ .ndo_init = ltq_etop_init,
+ .ndo_tx_timeout = ltq_etop_tx_timeout,
+};
+
+static int __init
+ltq_etop_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct ltq_etop_priv *priv;
+ struct resource *res;
+ int err;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get etop resource\n");
+ err = -ENOENT;
+ goto err_out;
+ }
+
+ res = devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), dev_name(&pdev->dev));
+ if (!res) {
+ dev_err(&pdev->dev, "failed to request etop resource\n");
+ err = -EBUSY;
+ goto err_out;
+ }
+
+ ltq_etop_membase = devm_ioremap_nocache(&pdev->dev,
+ res->start, resource_size(res));
+ if (!ltq_etop_membase) {
+ dev_err(&pdev->dev, "failed to remap etop engine %d\n",
+ pdev->id);
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
+ strcpy(dev->name, "eth%d");
+ dev->netdev_ops = &ltq_eth_netdev_ops;
+ dev->ethtool_ops = &ltq_etop_ethtool_ops;
+ priv = netdev_priv(dev);
+ priv->res = res;
+ priv->pldata = dev_get_platdata(&pdev->dev);
+ priv->netdev = dev;
+ spin_lock_init(&priv->lock);
+
+ for (i = 0; i < MAX_DMA_CHAN; i++) {
+ if (IS_TX(i))
+ netif_napi_add(dev, &priv->ch[i].napi,
+ ltq_etop_poll_tx, 8);
+ else if (IS_RX(i))
+ netif_napi_add(dev, &priv->ch[i].napi,
+ ltq_etop_poll_rx, 32);
+ priv->ch[i].netdev = dev;
+ }
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_free;
+
+ platform_set_drvdata(pdev, dev);
+ return 0;
+
+err_free:
+ kfree(dev);
+err_out:
+ return err;
+}
+
+static int __devexit
+ltq_etop_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+
+ if (dev) {
+ netif_tx_stop_all_queues(dev);
+ ltq_etop_hw_exit(dev);
+ ltq_etop_mdio_cleanup(dev);
+ unregister_netdev(dev);
+ }
+ return 0;
+}
+
+static struct platform_driver ltq_mii_driver = {
+ .remove = __devexit_p(ltq_etop_remove),
+ .driver = {
+ .name = "ltq_etop",
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init
+init_ltq_etop(void)
+{
+ int ret = platform_driver_probe(&ltq_mii_driver, ltq_etop_probe);
+
+ if (ret)
+ pr_err("ltq_etop: Error registering platfom driver!");
+ return ret;
+}
+
+static void __exit
+exit_ltq_etop(void)
+{
+ platform_driver_unregister(&ltq_mii_driver);
+}
+
+module_init(init_ltq_etop);
+module_exit(exit_ltq_etop);
+
+MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
+MODULE_DESCRIPTION("Lantiq SoC ETOP");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
new file mode 100644
index 000000000000..0029934748bc
--- /dev/null
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -0,0 +1,111 @@
+#
+# Marvell device configuration
+#
+
+config NET_VENDOR_MARVELL
+ bool "Marvell devices"
+ default y
+ depends on PCI || CPU_PXA168 || MV64X60 || PPC32 || PLAT_ORION || INET
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Marvell devices. If you say Y, you will be
+ asked for your specific card in the following questions.
+
+if NET_VENDOR_MARVELL
+
+config MV643XX_ETH
+ tristate "Marvell Discovery (643XX) and Orion ethernet support"
+ depends on (MV64X60 || PPC32 || PLAT_ORION) && INET
+ select INET_LRO
+ select PHYLIB
+ ---help---
+ This driver supports the gigabit ethernet MACs in the
+ Marvell Discovery PPC/MIPS chipset family (MV643XX) and
+ in the Marvell Orion ARM SoC family.
+
+ Some boards that use the Discovery chipset are the Momenco
+ Ocelot C and Jaguar ATX and Pegasos II.
+
+config PXA168_ETH
+ tristate "Marvell pxa168 ethernet support"
+ depends on CPU_PXA168
+ select PHYLIB
+ ---help---
+ This driver supports the pxa168 Ethernet ports.
+
+ To compile this driver as a module, choose M here. The module
+ will be called pxa168_eth.
+
+config SKGE
+ tristate "Marvell Yukon Gigabit Ethernet support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx
+ and related Gigabit Ethernet adapters. It is a new smaller driver
+ with better performance and more complete ethtool support.
+
+ It does not support the link failover and network management
+ features that "portable" vendor supplied sk98lin driver does.
+
+ This driver supports adapters based on the original Yukon chipset:
+ Marvell 88E8001, Belkin F5D5005, CNet GigaCard, DLink DGE-530T,
+ Linksys EG1032/EG1064, 3Com 3C940/3C940B, SysKonnect SK-9871/9872.
+
+ It does not support the newer Yukon2 chipset: a separate driver,
+ sky2, is provided for these adapters.
+
+ To compile this driver as a module, choose M here: the module
+ will be called skge. This is recommended.
+
+config SKGE_DEBUG
+ bool "Debugging interface"
+ depends on SKGE && DEBUG_FS
+ ---help---
+ This option adds the ability to dump driver state for debugging.
+ The file /sys/kernel/debug/skge/ethX displays the state of the internal
+ transmit and receive rings.
+
+ If unsure, say N.
+
+config SKGE_GENESIS
+ bool "Support for older SysKonnect Genesis boards"
+ depends on SKGE
+ ---help---
+ This enables support for the older and uncommon SysKonnect Genesis
+ chips, which support MII via an external transceiver, instead of
+ an internal one. Disabling this option will save some memory
+ by making code smaller. If unsure say Y.
+
+config SKY2
+ tristate "Marvell Yukon 2 support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This driver supports Gigabit Ethernet adapters based on the
+ Marvell Yukon 2 chipset:
+ Marvell 88E8021/88E8022/88E8035/88E8036/88E8038/88E8050/88E8052/
+ 88E8053/88E8055/88E8061/88E8062, SysKonnect SK-9E21D/SK-9S21
+
+ There is companion driver for the older Marvell Yukon and
+ SysKonnect Genesis based adapters: skge.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sky2. This is recommended.
+
+config SKY2_DEBUG
+ bool "Debugging interface"
+ depends on SKY2 && DEBUG_FS
+ ---help---
+ This option adds the ability to dump driver state for debugging.
+ The file /sys/kernel/debug/sky2/ethX displays the state of the internal
+ transmit and receive rings.
+
+ If unsure, say N.
+
+endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
new file mode 100644
index 000000000000..57e3234a37ba
--- /dev/null
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Marvell device drivers.
+#
+
+obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
+obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
+obj-$(CONFIG_SKGE) += skge.o
+obj-$(CONFIG_SKY2) += sky2.o
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
new file mode 100644
index 000000000000..7325737fe93b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -0,0 +1,3022 @@
+/*
+ * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
+ * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
+ *
+ * Based on the 64360 driver from:
+ * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
+ * Rabeeh Khoury <rabeeh@marvell.com>
+ *
+ * Copyright (C) 2003 PMC-Sierra, Inc.,
+ * written by Manish Lachwani
+ *
+ * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
+ *
+ * Copyright (C) 2004-2006 MontaVista Software, Inc.
+ * Dale Farnsworth <dale@farnsworth.org>
+ *
+ * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
+ * <sjhill@realitydiluted.com>
+ *
+ * Copyright (C) 2007-2008 Marvell Semiconductor
+ * Lennert Buytenhek <buytenh@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/phy.h>
+#include <linux/mv643xx_eth.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/inet_lro.h>
+#include <linux/slab.h>
+#include <asm/system.h>
+
+static char mv643xx_eth_driver_name[] = "mv643xx_eth";
+static char mv643xx_eth_driver_version[] = "1.4";
+
+
+/*
+ * Registers shared between all ports.
+ */
+#define PHY_ADDR 0x0000
+#define SMI_REG 0x0004
+#define SMI_BUSY 0x10000000
+#define SMI_READ_VALID 0x08000000
+#define SMI_OPCODE_READ 0x04000000
+#define SMI_OPCODE_WRITE 0x00000000
+#define ERR_INT_CAUSE 0x0080
+#define ERR_INT_SMI_DONE 0x00000010
+#define ERR_INT_MASK 0x0084
+#define WINDOW_BASE(w) (0x0200 + ((w) << 3))
+#define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
+#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
+#define WINDOW_BAR_ENABLE 0x0290
+#define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
+
+/*
+ * Main per-port registers. These live at offset 0x0400 for
+ * port #0, 0x0800 for port #1, and 0x0c00 for port #2.
+ */
+#define PORT_CONFIG 0x0000
+#define UNICAST_PROMISCUOUS_MODE 0x00000001
+#define PORT_CONFIG_EXT 0x0004
+#define MAC_ADDR_LOW 0x0014
+#define MAC_ADDR_HIGH 0x0018
+#define SDMA_CONFIG 0x001c
+#define TX_BURST_SIZE_16_64BIT 0x01000000
+#define TX_BURST_SIZE_4_64BIT 0x00800000
+#define BLM_TX_NO_SWAP 0x00000020
+#define BLM_RX_NO_SWAP 0x00000010
+#define RX_BURST_SIZE_16_64BIT 0x00000008
+#define RX_BURST_SIZE_4_64BIT 0x00000004
+#define PORT_SERIAL_CONTROL 0x003c
+#define SET_MII_SPEED_TO_100 0x01000000
+#define SET_GMII_SPEED_TO_1000 0x00800000
+#define SET_FULL_DUPLEX_MODE 0x00200000
+#define MAX_RX_PACKET_9700BYTE 0x000a0000
+#define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000
+#define DO_NOT_FORCE_LINK_FAIL 0x00000400
+#define SERIAL_PORT_CONTROL_RESERVED 0x00000200
+#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008
+#define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004
+#define FORCE_LINK_PASS 0x00000002
+#define SERIAL_PORT_ENABLE 0x00000001
+#define PORT_STATUS 0x0044
+#define TX_FIFO_EMPTY 0x00000400
+#define TX_IN_PROGRESS 0x00000080
+#define PORT_SPEED_MASK 0x00000030
+#define PORT_SPEED_1000 0x00000010
+#define PORT_SPEED_100 0x00000020
+#define PORT_SPEED_10 0x00000000
+#define FLOW_CONTROL_ENABLED 0x00000008
+#define FULL_DUPLEX 0x00000004
+#define LINK_UP 0x00000002
+#define TXQ_COMMAND 0x0048
+#define TXQ_FIX_PRIO_CONF 0x004c
+#define TX_BW_RATE 0x0050
+#define TX_BW_MTU 0x0058
+#define TX_BW_BURST 0x005c
+#define INT_CAUSE 0x0060
+#define INT_TX_END 0x07f80000
+#define INT_TX_END_0 0x00080000
+#define INT_RX 0x000003fc
+#define INT_RX_0 0x00000004
+#define INT_EXT 0x00000002
+#define INT_CAUSE_EXT 0x0064
+#define INT_EXT_LINK_PHY 0x00110000
+#define INT_EXT_TX 0x000000ff
+#define INT_MASK 0x0068
+#define INT_MASK_EXT 0x006c
+#define TX_FIFO_URGENT_THRESHOLD 0x0074
+#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc
+#define TX_BW_RATE_MOVED 0x00e0
+#define TX_BW_MTU_MOVED 0x00e8
+#define TX_BW_BURST_MOVED 0x00ec
+#define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4))
+#define RXQ_COMMAND 0x0280
+#define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2))
+#define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4))
+#define TXQ_BW_CONF(q) (0x0304 + ((q) << 4))
+#define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4))
+
+/*
+ * Misc per-port registers.
+ */
+#define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
+#define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
+#define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
+#define UNICAST_TABLE(p) (0x1600 + ((p) << 10))
+
+
+/*
+ * SDMA configuration register default value.
+ */
+#if defined(__BIG_ENDIAN)
+#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
+ (RX_BURST_SIZE_4_64BIT | \
+ TX_BURST_SIZE_4_64BIT)
+#elif defined(__LITTLE_ENDIAN)
+#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
+ (RX_BURST_SIZE_4_64BIT | \
+ BLM_RX_NO_SWAP | \
+ BLM_TX_NO_SWAP | \
+ TX_BURST_SIZE_4_64BIT)
+#else
+#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
+#endif
+
+
+/*
+ * Misc definitions.
+ */
+#define DEFAULT_RX_QUEUE_SIZE 128
+#define DEFAULT_TX_QUEUE_SIZE 256
+#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
+
+
+/*
+ * RX/TX descriptors.
+ */
+#if defined(__BIG_ENDIAN)
+struct rx_desc {
+ u16 byte_cnt; /* Descriptor buffer byte count */
+ u16 buf_size; /* Buffer size */
+ u32 cmd_sts; /* Descriptor command status */
+ u32 next_desc_ptr; /* Next descriptor pointer */
+ u32 buf_ptr; /* Descriptor buffer pointer */
+};
+
+struct tx_desc {
+ u16 byte_cnt; /* buffer byte count */
+ u16 l4i_chk; /* CPU provided TCP checksum */
+ u32 cmd_sts; /* Command/status field */
+ u32 next_desc_ptr; /* Pointer to next descriptor */
+ u32 buf_ptr; /* pointer to buffer for this descriptor*/
+};
+#elif defined(__LITTLE_ENDIAN)
+struct rx_desc {
+ u32 cmd_sts; /* Descriptor command status */
+ u16 buf_size; /* Buffer size */
+ u16 byte_cnt; /* Descriptor buffer byte count */
+ u32 buf_ptr; /* Descriptor buffer pointer */
+ u32 next_desc_ptr; /* Next descriptor pointer */
+};
+
+struct tx_desc {
+ u32 cmd_sts; /* Command/status field */
+ u16 l4i_chk; /* CPU provided TCP checksum */
+ u16 byte_cnt; /* buffer byte count */
+ u32 buf_ptr; /* pointer to buffer for this descriptor*/
+ u32 next_desc_ptr; /* Pointer to next descriptor */
+};
+#else
+#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
+#endif
+
+/* RX & TX descriptor command */
+#define BUFFER_OWNED_BY_DMA 0x80000000
+
+/* RX & TX descriptor status */
+#define ERROR_SUMMARY 0x00000001
+
+/* RX descriptor status */
+#define LAYER_4_CHECKSUM_OK 0x40000000
+#define RX_ENABLE_INTERRUPT 0x20000000
+#define RX_FIRST_DESC 0x08000000
+#define RX_LAST_DESC 0x04000000
+#define RX_IP_HDR_OK 0x02000000
+#define RX_PKT_IS_IPV4 0x01000000
+#define RX_PKT_IS_ETHERNETV2 0x00800000
+#define RX_PKT_LAYER4_TYPE_MASK 0x00600000
+#define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000
+#define RX_PKT_IS_VLAN_TAGGED 0x00080000
+
+/* TX descriptor command */
+#define TX_ENABLE_INTERRUPT 0x00800000
+#define GEN_CRC 0x00400000
+#define TX_FIRST_DESC 0x00200000
+#define TX_LAST_DESC 0x00100000
+#define ZERO_PADDING 0x00080000
+#define GEN_IP_V4_CHECKSUM 0x00040000
+#define GEN_TCP_UDP_CHECKSUM 0x00020000
+#define UDP_FRAME 0x00010000
+#define MAC_HDR_EXTRA_4_BYTES 0x00008000
+#define MAC_HDR_EXTRA_8_BYTES 0x00000200
+
+#define TX_IHL_SHIFT 11
+
+
+/* global *******************************************************************/
+struct mv643xx_eth_shared_private {
+ /*
+ * Ethernet controller base address.
+ */
+ void __iomem *base;
+
+ /*
+ * Points at the right SMI instance to use.
+ */
+ struct mv643xx_eth_shared_private *smi;
+
+ /*
+ * Provides access to local SMI interface.
+ */
+ struct mii_bus *smi_bus;
+
+ /*
+ * If we have access to the error interrupt pin (which is
+ * somewhat misnamed as it not only reflects internal errors
+ * but also reflects SMI completion), use that to wait for
+ * SMI access completion instead of polling the SMI busy bit.
+ */
+ int err_interrupt;
+ wait_queue_head_t smi_busy_wait;
+
+ /*
+ * Per-port MBUS window access register value.
+ */
+ u32 win_protect;
+
+ /*
+ * Hardware-specific parameters.
+ */
+ unsigned int t_clk;
+ int extended_rx_coal_limit;
+ int tx_bw_control;
+ int tx_csum_limit;
+};
+
+#define TX_BW_CONTROL_ABSENT 0
+#define TX_BW_CONTROL_OLD_LAYOUT 1
+#define TX_BW_CONTROL_NEW_LAYOUT 2
+
+static int mv643xx_eth_open(struct net_device *dev);
+static int mv643xx_eth_stop(struct net_device *dev);
+
+
+/* per-port *****************************************************************/
+struct mib_counters {
+ u64 good_octets_received;
+ u32 bad_octets_received;
+ u32 internal_mac_transmit_err;
+ u32 good_frames_received;
+ u32 bad_frames_received;
+ u32 broadcast_frames_received;
+ u32 multicast_frames_received;
+ u32 frames_64_octets;
+ u32 frames_65_to_127_octets;
+ u32 frames_128_to_255_octets;
+ u32 frames_256_to_511_octets;
+ u32 frames_512_to_1023_octets;
+ u32 frames_1024_to_max_octets;
+ u64 good_octets_sent;
+ u32 good_frames_sent;
+ u32 excessive_collision;
+ u32 multicast_frames_sent;
+ u32 broadcast_frames_sent;
+ u32 unrec_mac_control_received;
+ u32 fc_sent;
+ u32 good_fc_received;
+ u32 bad_fc_received;
+ u32 undersize_received;
+ u32 fragments_received;
+ u32 oversize_received;
+ u32 jabber_received;
+ u32 mac_receive_error;
+ u32 bad_crc_event;
+ u32 collision;
+ u32 late_collision;
+};
+
+struct lro_counters {
+ u32 lro_aggregated;
+ u32 lro_flushed;
+ u32 lro_no_desc;
+};
+
+struct rx_queue {
+ int index;
+
+ int rx_ring_size;
+
+ int rx_desc_count;
+ int rx_curr_desc;
+ int rx_used_desc;
+
+ struct rx_desc *rx_desc_area;
+ dma_addr_t rx_desc_dma;
+ int rx_desc_area_size;
+ struct sk_buff **rx_skb;
+
+ struct net_lro_mgr lro_mgr;
+ struct net_lro_desc lro_arr[8];
+};
+
+struct tx_queue {
+ int index;
+
+ int tx_ring_size;
+
+ int tx_desc_count;
+ int tx_curr_desc;
+ int tx_used_desc;
+
+ struct tx_desc *tx_desc_area;
+ dma_addr_t tx_desc_dma;
+ int tx_desc_area_size;
+
+ struct sk_buff_head tx_skb;
+
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+ unsigned long tx_dropped;
+};
+
+struct mv643xx_eth_private {
+ struct mv643xx_eth_shared_private *shared;
+ void __iomem *base;
+ int port_num;
+
+ struct net_device *dev;
+
+ struct phy_device *phy;
+
+ struct timer_list mib_counters_timer;
+ spinlock_t mib_counters_lock;
+ struct mib_counters mib_counters;
+
+ struct lro_counters lro_counters;
+
+ struct work_struct tx_timeout_task;
+
+ struct napi_struct napi;
+ u32 int_mask;
+ u8 oom;
+ u8 work_link;
+ u8 work_tx;
+ u8 work_tx_end;
+ u8 work_rx;
+ u8 work_rx_refill;
+
+ int skb_size;
+ struct sk_buff_head rx_recycle;
+
+ /*
+ * RX state.
+ */
+ int rx_ring_size;
+ unsigned long rx_desc_sram_addr;
+ int rx_desc_sram_size;
+ int rxq_count;
+ struct timer_list rx_oom;
+ struct rx_queue rxq[8];
+
+ /*
+ * TX state.
+ */
+ int tx_ring_size;
+ unsigned long tx_desc_sram_addr;
+ int tx_desc_sram_size;
+ int txq_count;
+ struct tx_queue txq[8];
+};
+
+
+/* port register accessors **************************************************/
+static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
+{
+ return readl(mp->shared->base + offset);
+}
+
+static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
+{
+ return readl(mp->base + offset);
+}
+
+static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
+{
+ writel(data, mp->shared->base + offset);
+}
+
+static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
+{
+ writel(data, mp->base + offset);
+}
+
+
+/* rxq/txq helper functions *************************************************/
+static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
+{
+ return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
+}
+
+static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
+{
+ return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
+}
+
+static void rxq_enable(struct rx_queue *rxq)
+{
+ struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
+ wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
+}
+
+static void rxq_disable(struct rx_queue *rxq)
+{
+ struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
+ u8 mask = 1 << rxq->index;
+
+ wrlp(mp, RXQ_COMMAND, mask << 8);
+ while (rdlp(mp, RXQ_COMMAND) & mask)
+ udelay(10);
+}
+
+static void txq_reset_hw_ptr(struct tx_queue *txq)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ u32 addr;
+
+ addr = (u32)txq->tx_desc_dma;
+ addr += txq->tx_curr_desc * sizeof(struct tx_desc);
+ wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
+}
+
+static void txq_enable(struct tx_queue *txq)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ wrlp(mp, TXQ_COMMAND, 1 << txq->index);
+}
+
+static void txq_disable(struct tx_queue *txq)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ u8 mask = 1 << txq->index;
+
+ wrlp(mp, TXQ_COMMAND, mask << 8);
+ while (rdlp(mp, TXQ_COMMAND) & mask)
+ udelay(10);
+}
+
+static void txq_maybe_wake(struct tx_queue *txq)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
+
+ if (netif_tx_queue_stopped(nq)) {
+ __netif_tx_lock(nq, smp_processor_id());
+ if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
+ netif_tx_wake_queue(nq);
+ __netif_tx_unlock(nq);
+ }
+}
+
+
+/* rx napi ******************************************************************/
+static int
+mv643xx_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph,
+ u64 *hdr_flags, void *priv)
+{
+ unsigned long cmd_sts = (unsigned long)priv;
+
+ /*
+ * Make sure that this packet is Ethernet II, is not VLAN
+ * tagged, is IPv4, has a valid IP header, and is TCP.
+ */
+ if ((cmd_sts & (RX_IP_HDR_OK | RX_PKT_IS_IPV4 |
+ RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_MASK |
+ RX_PKT_IS_VLAN_TAGGED)) !=
+ (RX_IP_HDR_OK | RX_PKT_IS_IPV4 |
+ RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_TCP_IPV4))
+ return -1;
+
+ skb_reset_network_header(skb);
+ skb_set_transport_header(skb, ip_hdrlen(skb));
+ *iphdr = ip_hdr(skb);
+ *tcph = tcp_hdr(skb);
+ *hdr_flags = LRO_IPV4 | LRO_TCP;
+
+ return 0;
+}
+
+static int rxq_process(struct rx_queue *rxq, int budget)
+{
+ struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
+ struct net_device_stats *stats = &mp->dev->stats;
+ int lro_flush_needed;
+ int rx;
+
+ lro_flush_needed = 0;
+ rx = 0;
+ while (rx < budget && rxq->rx_desc_count) {
+ struct rx_desc *rx_desc;
+ unsigned int cmd_sts;
+ struct sk_buff *skb;
+ u16 byte_cnt;
+
+ rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
+
+ cmd_sts = rx_desc->cmd_sts;
+ if (cmd_sts & BUFFER_OWNED_BY_DMA)
+ break;
+ rmb();
+
+ skb = rxq->rx_skb[rxq->rx_curr_desc];
+ rxq->rx_skb[rxq->rx_curr_desc] = NULL;
+
+ rxq->rx_curr_desc++;
+ if (rxq->rx_curr_desc == rxq->rx_ring_size)
+ rxq->rx_curr_desc = 0;
+
+ dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
+ rx_desc->buf_size, DMA_FROM_DEVICE);
+ rxq->rx_desc_count--;
+ rx++;
+
+ mp->work_rx_refill |= 1 << rxq->index;
+
+ byte_cnt = rx_desc->byte_cnt;
+
+ /*
+ * Update statistics.
+ *
+ * Note that the descriptor byte count includes 2 dummy
+ * bytes automatically inserted by the hardware at the
+ * start of the packet (which we don't count), and a 4
+ * byte CRC at the end of the packet (which we do count).
+ */
+ stats->rx_packets++;
+ stats->rx_bytes += byte_cnt - 2;
+
+ /*
+ * In case we received a packet without first / last bits
+ * on, or the error summary bit is set, the packet needs
+ * to be dropped.
+ */
+ if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY))
+ != (RX_FIRST_DESC | RX_LAST_DESC))
+ goto err;
+
+ /*
+ * The -4 is for the CRC in the trailer of the
+ * received packet
+ */
+ skb_put(skb, byte_cnt - 2 - 4);
+
+ if (cmd_sts & LAYER_4_CHECKSUM_OK)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->protocol = eth_type_trans(skb, mp->dev);
+
+ if (skb->dev->features & NETIF_F_LRO &&
+ skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ lro_receive_skb(&rxq->lro_mgr, skb, (void *)cmd_sts);
+ lro_flush_needed = 1;
+ } else
+ netif_receive_skb(skb);
+
+ continue;
+
+err:
+ stats->rx_dropped++;
+
+ if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+ (RX_FIRST_DESC | RX_LAST_DESC)) {
+ if (net_ratelimit())
+ netdev_err(mp->dev,
+ "received packet spanning multiple descriptors\n");
+ }
+
+ if (cmd_sts & ERROR_SUMMARY)
+ stats->rx_errors++;
+
+ dev_kfree_skb(skb);
+ }
+
+ if (lro_flush_needed)
+ lro_flush_all(&rxq->lro_mgr);
+
+ if (rx < budget)
+ mp->work_rx &= ~(1 << rxq->index);
+
+ return rx;
+}
+
+static int rxq_refill(struct rx_queue *rxq, int budget)
+{
+ struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
+ int refilled;
+
+ refilled = 0;
+ while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
+ struct sk_buff *skb;
+ int rx;
+ struct rx_desc *rx_desc;
+ int size;
+
+ skb = __skb_dequeue(&mp->rx_recycle);
+ if (skb == NULL)
+ skb = dev_alloc_skb(mp->skb_size);
+
+ if (skb == NULL) {
+ mp->oom = 1;
+ goto oom;
+ }
+
+ if (SKB_DMA_REALIGN)
+ skb_reserve(skb, SKB_DMA_REALIGN);
+
+ refilled++;
+ rxq->rx_desc_count++;
+
+ rx = rxq->rx_used_desc++;
+ if (rxq->rx_used_desc == rxq->rx_ring_size)
+ rxq->rx_used_desc = 0;
+
+ rx_desc = rxq->rx_desc_area + rx;
+
+ size = skb->end - skb->data;
+ rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
+ skb->data, size,
+ DMA_FROM_DEVICE);
+ rx_desc->buf_size = size;
+ rxq->rx_skb[rx] = skb;
+ wmb();
+ rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
+ wmb();
+
+ /*
+ * The hardware automatically prepends 2 bytes of
+ * dummy data to each received packet, so that the
+ * IP header ends up 16-byte aligned.
+ */
+ skb_reserve(skb, 2);
+ }
+
+ if (refilled < budget)
+ mp->work_rx_refill &= ~(1 << rxq->index);
+
+oom:
+ return refilled;
+}
+
+
+/* tx ***********************************************************************/
+static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
+{
+ int frag;
+
+ for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
+ if (fragp->size <= 8 && fragp->page_offset & 7)
+ return 1;
+ }
+
+ return 0;
+}
+
+static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int frag;
+
+ for (frag = 0; frag < nr_frags; frag++) {
+ skb_frag_t *this_frag;
+ int tx_index;
+ struct tx_desc *desc;
+
+ this_frag = &skb_shinfo(skb)->frags[frag];
+ tx_index = txq->tx_curr_desc++;
+ if (txq->tx_curr_desc == txq->tx_ring_size)
+ txq->tx_curr_desc = 0;
+ desc = &txq->tx_desc_area[tx_index];
+
+ /*
+ * The last fragment will generate an interrupt
+ * which will free the skb on TX completion.
+ */
+ if (frag == nr_frags - 1) {
+ desc->cmd_sts = BUFFER_OWNED_BY_DMA |
+ ZERO_PADDING | TX_LAST_DESC |
+ TX_ENABLE_INTERRUPT;
+ } else {
+ desc->cmd_sts = BUFFER_OWNED_BY_DMA;
+ }
+
+ desc->l4i_chk = 0;
+ desc->byte_cnt = this_frag->size;
+ desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
+ this_frag, 0,
+ this_frag->size,
+ DMA_TO_DEVICE);
+ }
+}
+
+static inline __be16 sum16_as_be(__sum16 sum)
+{
+ return (__force __be16)sum;
+}
+
+static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int tx_index;
+ struct tx_desc *desc;
+ u32 cmd_sts;
+ u16 l4i_chk;
+ int length;
+
+ cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
+ l4i_chk = 0;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ int hdr_len;
+ int tag_bytes;
+
+ BUG_ON(skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_8021Q));
+
+ hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
+ tag_bytes = hdr_len - ETH_HLEN;
+ if (skb->len - hdr_len > mp->shared->tx_csum_limit ||
+ unlikely(tag_bytes & ~12)) {
+ if (skb_checksum_help(skb) == 0)
+ goto no_csum;
+ kfree_skb(skb);
+ return 1;
+ }
+
+ if (tag_bytes & 4)
+ cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
+ if (tag_bytes & 8)
+ cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
+
+ cmd_sts |= GEN_TCP_UDP_CHECKSUM |
+ GEN_IP_V4_CHECKSUM |
+ ip_hdr(skb)->ihl << TX_IHL_SHIFT;
+
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_UDP:
+ cmd_sts |= UDP_FRAME;
+ l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
+ break;
+ case IPPROTO_TCP:
+ l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
+ break;
+ default:
+ BUG();
+ }
+ } else {
+no_csum:
+ /* Errata BTS #50, IHL must be 5 if no HW checksum */
+ cmd_sts |= 5 << TX_IHL_SHIFT;
+ }
+
+ tx_index = txq->tx_curr_desc++;
+ if (txq->tx_curr_desc == txq->tx_ring_size)
+ txq->tx_curr_desc = 0;
+ desc = &txq->tx_desc_area[tx_index];
+
+ if (nr_frags) {
+ txq_submit_frag_skb(txq, skb);
+ length = skb_headlen(skb);
+ } else {
+ cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
+ length = skb->len;
+ }
+
+ desc->l4i_chk = l4i_chk;
+ desc->byte_cnt = length;
+ desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
+ length, DMA_TO_DEVICE);
+
+ __skb_queue_tail(&txq->tx_skb, skb);
+
+ skb_tx_timestamp(skb);
+
+ /* ensure all other descriptors are written before first cmd_sts */
+ wmb();
+ desc->cmd_sts = cmd_sts;
+
+ /* clear TX_END status */
+ mp->work_tx_end &= ~(1 << txq->index);
+
+ /* ensure all descriptors are written before poking hardware */
+ wmb();
+ txq_enable(txq);
+
+ txq->tx_desc_count += nr_frags + 1;
+
+ return 0;
+}
+
+static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ int length, queue;
+ struct tx_queue *txq;
+ struct netdev_queue *nq;
+
+ queue = skb_get_queue_mapping(skb);
+ txq = mp->txq + queue;
+ nq = netdev_get_tx_queue(dev, queue);
+
+ if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
+ txq->tx_dropped++;
+ netdev_printk(KERN_DEBUG, dev,
+ "failed to linearize skb with tiny unaligned fragment\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
+ if (net_ratelimit())
+ netdev_err(dev, "tx queue full?!\n");
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ length = skb->len;
+
+ if (!txq_submit_skb(txq, skb)) {
+ int entries_left;
+
+ txq->tx_bytes += length;
+ txq->tx_packets++;
+
+ entries_left = txq->tx_ring_size - txq->tx_desc_count;
+ if (entries_left < MAX_SKB_FRAGS + 1)
+ netif_tx_stop_queue(nq);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+
+/* tx napi ******************************************************************/
+static void txq_kick(struct tx_queue *txq)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
+ u32 hw_desc_ptr;
+ u32 expected_ptr;
+
+ __netif_tx_lock(nq, smp_processor_id());
+
+ if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
+ goto out;
+
+ hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
+ expected_ptr = (u32)txq->tx_desc_dma +
+ txq->tx_curr_desc * sizeof(struct tx_desc);
+
+ if (hw_desc_ptr != expected_ptr)
+ txq_enable(txq);
+
+out:
+ __netif_tx_unlock(nq);
+
+ mp->work_tx_end &= ~(1 << txq->index);
+}
+
+static int txq_reclaim(struct tx_queue *txq, int budget, int force)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
+ int reclaimed;
+
+ __netif_tx_lock(nq, smp_processor_id());
+
+ reclaimed = 0;
+ while (reclaimed < budget && txq->tx_desc_count > 0) {
+ int tx_index;
+ struct tx_desc *desc;
+ u32 cmd_sts;
+ struct sk_buff *skb;
+
+ tx_index = txq->tx_used_desc;
+ desc = &txq->tx_desc_area[tx_index];
+ cmd_sts = desc->cmd_sts;
+
+ if (cmd_sts & BUFFER_OWNED_BY_DMA) {
+ if (!force)
+ break;
+ desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
+ }
+
+ txq->tx_used_desc = tx_index + 1;
+ if (txq->tx_used_desc == txq->tx_ring_size)
+ txq->tx_used_desc = 0;
+
+ reclaimed++;
+ txq->tx_desc_count--;
+
+ skb = NULL;
+ if (cmd_sts & TX_LAST_DESC)
+ skb = __skb_dequeue(&txq->tx_skb);
+
+ if (cmd_sts & ERROR_SUMMARY) {
+ netdev_info(mp->dev, "tx error\n");
+ mp->dev->stats.tx_errors++;
+ }
+
+ if (cmd_sts & TX_FIRST_DESC) {
+ dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
+ desc->byte_cnt, DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr,
+ desc->byte_cnt, DMA_TO_DEVICE);
+ }
+
+ if (skb != NULL) {
+ if (skb_queue_len(&mp->rx_recycle) <
+ mp->rx_ring_size &&
+ skb_recycle_check(skb, mp->skb_size))
+ __skb_queue_head(&mp->rx_recycle, skb);
+ else
+ dev_kfree_skb(skb);
+ }
+ }
+
+ __netif_tx_unlock(nq);
+
+ if (reclaimed < budget)
+ mp->work_tx &= ~(1 << txq->index);
+
+ return reclaimed;
+}
+
+
+/* tx rate control **********************************************************/
+/*
+ * Set total maximum TX rate (shared by all TX queues for this port)
+ * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
+ */
+static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
+{
+ int token_rate;
+ int mtu;
+ int bucket_size;
+
+ token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
+ if (token_rate > 1023)
+ token_rate = 1023;
+
+ mtu = (mp->dev->mtu + 255) >> 8;
+ if (mtu > 63)
+ mtu = 63;
+
+ bucket_size = (burst + 255) >> 8;
+ if (bucket_size > 65535)
+ bucket_size = 65535;
+
+ switch (mp->shared->tx_bw_control) {
+ case TX_BW_CONTROL_OLD_LAYOUT:
+ wrlp(mp, TX_BW_RATE, token_rate);
+ wrlp(mp, TX_BW_MTU, mtu);
+ wrlp(mp, TX_BW_BURST, bucket_size);
+ break;
+ case TX_BW_CONTROL_NEW_LAYOUT:
+ wrlp(mp, TX_BW_RATE_MOVED, token_rate);
+ wrlp(mp, TX_BW_MTU_MOVED, mtu);
+ wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
+ break;
+ }
+}
+
+static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ int token_rate;
+ int bucket_size;
+
+ token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
+ if (token_rate > 1023)
+ token_rate = 1023;
+
+ bucket_size = (burst + 255) >> 8;
+ if (bucket_size > 65535)
+ bucket_size = 65535;
+
+ wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
+ wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
+}
+
+static void txq_set_fixed_prio_mode(struct tx_queue *txq)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ int off;
+ u32 val;
+
+ /*
+ * Turn on fixed priority mode.
+ */
+ off = 0;
+ switch (mp->shared->tx_bw_control) {
+ case TX_BW_CONTROL_OLD_LAYOUT:
+ off = TXQ_FIX_PRIO_CONF;
+ break;
+ case TX_BW_CONTROL_NEW_LAYOUT:
+ off = TXQ_FIX_PRIO_CONF_MOVED;
+ break;
+ }
+
+ if (off) {
+ val = rdlp(mp, off);
+ val |= 1 << txq->index;
+ wrlp(mp, off, val);
+ }
+}
+
+
+/* mii management interface *************************************************/
+static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
+{
+ struct mv643xx_eth_shared_private *msp = dev_id;
+
+ if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) {
+ writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE);
+ wake_up(&msp->smi_busy_wait);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int smi_is_done(struct mv643xx_eth_shared_private *msp)
+{
+ return !(readl(msp->base + SMI_REG) & SMI_BUSY);
+}
+
+static int smi_wait_ready(struct mv643xx_eth_shared_private *msp)
+{
+ if (msp->err_interrupt == NO_IRQ) {
+ int i;
+
+ for (i = 0; !smi_is_done(msp); i++) {
+ if (i == 10)
+ return -ETIMEDOUT;
+ msleep(10);
+ }
+
+ return 0;
+ }
+
+ if (!smi_is_done(msp)) {
+ wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
+ msecs_to_jiffies(100));
+ if (!smi_is_done(msp))
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
+{
+ struct mv643xx_eth_shared_private *msp = bus->priv;
+ void __iomem *smi_reg = msp->base + SMI_REG;
+ int ret;
+
+ if (smi_wait_ready(msp)) {
+ pr_warn("SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
+
+ if (smi_wait_ready(msp)) {
+ pr_warn("SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = readl(smi_reg);
+ if (!(ret & SMI_READ_VALID)) {
+ pr_warn("SMI bus read not valid\n");
+ return -ENODEV;
+ }
+
+ return ret & 0xffff;
+}
+
+static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
+{
+ struct mv643xx_eth_shared_private *msp = bus->priv;
+ void __iomem *smi_reg = msp->base + SMI_REG;
+
+ if (smi_wait_ready(msp)) {
+ pr_warn("SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ writel(SMI_OPCODE_WRITE | (reg << 21) |
+ (addr << 16) | (val & 0xffff), smi_reg);
+
+ if (smi_wait_ready(msp)) {
+ pr_warn("SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+
+/* statistics ***************************************************************/
+static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ unsigned long tx_packets = 0;
+ unsigned long tx_bytes = 0;
+ unsigned long tx_dropped = 0;
+ int i;
+
+ for (i = 0; i < mp->txq_count; i++) {
+ struct tx_queue *txq = mp->txq + i;
+
+ tx_packets += txq->tx_packets;
+ tx_bytes += txq->tx_bytes;
+ tx_dropped += txq->tx_dropped;
+ }
+
+ stats->tx_packets = tx_packets;
+ stats->tx_bytes = tx_bytes;
+ stats->tx_dropped = tx_dropped;
+
+ return stats;
+}
+
+static void mv643xx_eth_grab_lro_stats(struct mv643xx_eth_private *mp)
+{
+ u32 lro_aggregated = 0;
+ u32 lro_flushed = 0;
+ u32 lro_no_desc = 0;
+ int i;
+
+ for (i = 0; i < mp->rxq_count; i++) {
+ struct rx_queue *rxq = mp->rxq + i;
+
+ lro_aggregated += rxq->lro_mgr.stats.aggregated;
+ lro_flushed += rxq->lro_mgr.stats.flushed;
+ lro_no_desc += rxq->lro_mgr.stats.no_desc;
+ }
+
+ mp->lro_counters.lro_aggregated = lro_aggregated;
+ mp->lro_counters.lro_flushed = lro_flushed;
+ mp->lro_counters.lro_no_desc = lro_no_desc;
+}
+
+static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
+{
+ return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
+}
+
+static void mib_counters_clear(struct mv643xx_eth_private *mp)
+{
+ int i;
+
+ for (i = 0; i < 0x80; i += 4)
+ mib_read(mp, i);
+}
+
+static void mib_counters_update(struct mv643xx_eth_private *mp)
+{
+ struct mib_counters *p = &mp->mib_counters;
+
+ spin_lock_bh(&mp->mib_counters_lock);
+ p->good_octets_received += mib_read(mp, 0x00);
+ p->bad_octets_received += mib_read(mp, 0x08);
+ p->internal_mac_transmit_err += mib_read(mp, 0x0c);
+ p->good_frames_received += mib_read(mp, 0x10);
+ p->bad_frames_received += mib_read(mp, 0x14);
+ p->broadcast_frames_received += mib_read(mp, 0x18);
+ p->multicast_frames_received += mib_read(mp, 0x1c);
+ p->frames_64_octets += mib_read(mp, 0x20);
+ p->frames_65_to_127_octets += mib_read(mp, 0x24);
+ p->frames_128_to_255_octets += mib_read(mp, 0x28);
+ p->frames_256_to_511_octets += mib_read(mp, 0x2c);
+ p->frames_512_to_1023_octets += mib_read(mp, 0x30);
+ p->frames_1024_to_max_octets += mib_read(mp, 0x34);
+ p->good_octets_sent += mib_read(mp, 0x38);
+ p->good_frames_sent += mib_read(mp, 0x40);
+ p->excessive_collision += mib_read(mp, 0x44);
+ p->multicast_frames_sent += mib_read(mp, 0x48);
+ p->broadcast_frames_sent += mib_read(mp, 0x4c);
+ p->unrec_mac_control_received += mib_read(mp, 0x50);
+ p->fc_sent += mib_read(mp, 0x54);
+ p->good_fc_received += mib_read(mp, 0x58);
+ p->bad_fc_received += mib_read(mp, 0x5c);
+ p->undersize_received += mib_read(mp, 0x60);
+ p->fragments_received += mib_read(mp, 0x64);
+ p->oversize_received += mib_read(mp, 0x68);
+ p->jabber_received += mib_read(mp, 0x6c);
+ p->mac_receive_error += mib_read(mp, 0x70);
+ p->bad_crc_event += mib_read(mp, 0x74);
+ p->collision += mib_read(mp, 0x78);
+ p->late_collision += mib_read(mp, 0x7c);
+ spin_unlock_bh(&mp->mib_counters_lock);
+
+ mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
+}
+
+static void mib_counters_timer_wrapper(unsigned long _mp)
+{
+ struct mv643xx_eth_private *mp = (void *)_mp;
+
+ mib_counters_update(mp);
+}
+
+
+/* interrupt coalescing *****************************************************/
+/*
+ * Hardware coalescing parameters are set in units of 64 t_clk
+ * cycles. I.e.:
+ *
+ * coal_delay_in_usec = 64000000 * register_value / t_clk_rate
+ *
+ * register_value = coal_delay_in_usec * t_clk_rate / 64000000
+ *
+ * In the ->set*() methods, we round the computed register value
+ * to the nearest integer.
+ */
+static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
+{
+ u32 val = rdlp(mp, SDMA_CONFIG);
+ u64 temp;
+
+ if (mp->shared->extended_rx_coal_limit)
+ temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7);
+ else
+ temp = (val & 0x003fff00) >> 8;
+
+ temp *= 64000000;
+ do_div(temp, mp->shared->t_clk);
+
+ return (unsigned int)temp;
+}
+
+static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
+{
+ u64 temp;
+ u32 val;
+
+ temp = (u64)usec * mp->shared->t_clk;
+ temp += 31999999;
+ do_div(temp, 64000000);
+
+ val = rdlp(mp, SDMA_CONFIG);
+ if (mp->shared->extended_rx_coal_limit) {
+ if (temp > 0xffff)
+ temp = 0xffff;
+ val &= ~0x023fff80;
+ val |= (temp & 0x8000) << 10;
+ val |= (temp & 0x7fff) << 7;
+ } else {
+ if (temp > 0x3fff)
+ temp = 0x3fff;
+ val &= ~0x003fff00;
+ val |= (temp & 0x3fff) << 8;
+ }
+ wrlp(mp, SDMA_CONFIG, val);
+}
+
+static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
+{
+ u64 temp;
+
+ temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
+ temp *= 64000000;
+ do_div(temp, mp->shared->t_clk);
+
+ return (unsigned int)temp;
+}
+
+static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
+{
+ u64 temp;
+
+ temp = (u64)usec * mp->shared->t_clk;
+ temp += 31999999;
+ do_div(temp, 64000000);
+
+ if (temp > 0x3fff)
+ temp = 0x3fff;
+
+ wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4);
+}
+
+
+/* ethtool ******************************************************************/
+struct mv643xx_eth_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int netdev_off;
+ int mp_off;
+};
+
+#define SSTAT(m) \
+ { #m, FIELD_SIZEOF(struct net_device_stats, m), \
+ offsetof(struct net_device, stats.m), -1 }
+
+#define MIBSTAT(m) \
+ { #m, FIELD_SIZEOF(struct mib_counters, m), \
+ -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
+
+#define LROSTAT(m) \
+ { #m, FIELD_SIZEOF(struct lro_counters, m), \
+ -1, offsetof(struct mv643xx_eth_private, lro_counters.m) }
+
+static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
+ SSTAT(rx_packets),
+ SSTAT(tx_packets),
+ SSTAT(rx_bytes),
+ SSTAT(tx_bytes),
+ SSTAT(rx_errors),
+ SSTAT(tx_errors),
+ SSTAT(rx_dropped),
+ SSTAT(tx_dropped),
+ MIBSTAT(good_octets_received),
+ MIBSTAT(bad_octets_received),
+ MIBSTAT(internal_mac_transmit_err),
+ MIBSTAT(good_frames_received),
+ MIBSTAT(bad_frames_received),
+ MIBSTAT(broadcast_frames_received),
+ MIBSTAT(multicast_frames_received),
+ MIBSTAT(frames_64_octets),
+ MIBSTAT(frames_65_to_127_octets),
+ MIBSTAT(frames_128_to_255_octets),
+ MIBSTAT(frames_256_to_511_octets),
+ MIBSTAT(frames_512_to_1023_octets),
+ MIBSTAT(frames_1024_to_max_octets),
+ MIBSTAT(good_octets_sent),
+ MIBSTAT(good_frames_sent),
+ MIBSTAT(excessive_collision),
+ MIBSTAT(multicast_frames_sent),
+ MIBSTAT(broadcast_frames_sent),
+ MIBSTAT(unrec_mac_control_received),
+ MIBSTAT(fc_sent),
+ MIBSTAT(good_fc_received),
+ MIBSTAT(bad_fc_received),
+ MIBSTAT(undersize_received),
+ MIBSTAT(fragments_received),
+ MIBSTAT(oversize_received),
+ MIBSTAT(jabber_received),
+ MIBSTAT(mac_receive_error),
+ MIBSTAT(bad_crc_event),
+ MIBSTAT(collision),
+ MIBSTAT(late_collision),
+ LROSTAT(lro_aggregated),
+ LROSTAT(lro_flushed),
+ LROSTAT(lro_no_desc),
+};
+
+static int
+mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp,
+ struct ethtool_cmd *cmd)
+{
+ int err;
+
+ err = phy_read_status(mp->phy);
+ if (err == 0)
+ err = phy_ethtool_gset(mp->phy, cmd);
+
+ /*
+ * The MAC does not support 1000baseT_Half.
+ */
+ cmd->supported &= ~SUPPORTED_1000baseT_Half;
+ cmd->advertising &= ~ADVERTISED_1000baseT_Half;
+
+ return err;
+}
+
+static int
+mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp,
+ struct ethtool_cmd *cmd)
+{
+ u32 port_status;
+
+ port_status = rdlp(mp, PORT_STATUS);
+
+ cmd->supported = SUPPORTED_MII;
+ cmd->advertising = ADVERTISED_MII;
+ switch (port_status & PORT_SPEED_MASK) {
+ case PORT_SPEED_10:
+ ethtool_cmd_speed_set(cmd, SPEED_10);
+ break;
+ case PORT_SPEED_100:
+ ethtool_cmd_speed_set(cmd, SPEED_100);
+ break;
+ case PORT_SPEED_1000:
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
+ break;
+ default:
+ cmd->speed = -1;
+ break;
+ }
+ cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
+ cmd->port = PORT_MII;
+ cmd->phy_address = 0;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->maxtxpkt = 1;
+ cmd->maxrxpkt = 1;
+
+ return 0;
+}
+
+static int
+mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ if (mp->phy != NULL)
+ return mv643xx_eth_get_settings_phy(mp, cmd);
+ else
+ return mv643xx_eth_get_settings_phyless(mp, cmd);
+}
+
+static int
+mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ if (mp->phy == NULL)
+ return -EINVAL;
+
+ /*
+ * The MAC does not support 1000baseT_Half.
+ */
+ cmd->advertising &= ~ADVERTISED_1000baseT_Half;
+
+ return phy_ethtool_sset(mp->phy, cmd);
+}
+
+static void mv643xx_eth_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32);
+ strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
+ strncpy(drvinfo->fw_version, "N/A", 32);
+ strncpy(drvinfo->bus_info, "platform", 32);
+ drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
+}
+
+static int mv643xx_eth_nway_reset(struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ if (mp->phy == NULL)
+ return -EINVAL;
+
+ return genphy_restart_aneg(mp->phy);
+}
+
+static int
+mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ ec->rx_coalesce_usecs = get_rx_coal(mp);
+ ec->tx_coalesce_usecs = get_tx_coal(mp);
+
+ return 0;
+}
+
+static int
+mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ set_rx_coal(mp, ec->rx_coalesce_usecs);
+ set_tx_coal(mp, ec->tx_coalesce_usecs);
+
+ return 0;
+}
+
+static void
+mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ er->rx_max_pending = 4096;
+ er->tx_max_pending = 4096;
+ er->rx_mini_max_pending = 0;
+ er->rx_jumbo_max_pending = 0;
+
+ er->rx_pending = mp->rx_ring_size;
+ er->tx_pending = mp->tx_ring_size;
+ er->rx_mini_pending = 0;
+ er->rx_jumbo_pending = 0;
+}
+
+static int
+mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ if (er->rx_mini_pending || er->rx_jumbo_pending)
+ return -EINVAL;
+
+ mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
+ mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096;
+
+ if (netif_running(dev)) {
+ mv643xx_eth_stop(dev);
+ if (mv643xx_eth_open(dev)) {
+ netdev_err(dev,
+ "fatal error on re-opening device after ring param change\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+
+static int
+mv643xx_eth_set_features(struct net_device *dev, u32 features)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ u32 rx_csum = features & NETIF_F_RXCSUM;
+
+ wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
+
+ return 0;
+}
+
+static void mv643xx_eth_get_strings(struct net_device *dev,
+ uint32_t stringset, uint8_t *data)
+{
+ int i;
+
+ if (stringset == ETH_SS_STATS) {
+ for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ mv643xx_eth_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ }
+}
+
+static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ uint64_t *data)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ int i;
+
+ mv643xx_eth_get_stats(dev);
+ mib_counters_update(mp);
+ mv643xx_eth_grab_lro_stats(mp);
+
+ for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
+ const struct mv643xx_eth_stats *stat;
+ void *p;
+
+ stat = mv643xx_eth_stats + i;
+
+ if (stat->netdev_off >= 0)
+ p = ((void *)mp->dev) + stat->netdev_off;
+ else
+ p = ((void *)mp) + stat->mp_off;
+
+ data[i] = (stat->sizeof_stat == 8) ?
+ *(uint64_t *)p : *(uint32_t *)p;
+ }
+}
+
+static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ARRAY_SIZE(mv643xx_eth_stats);
+
+ return -EOPNOTSUPP;
+}
+
+static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
+ .get_settings = mv643xx_eth_get_settings,
+ .set_settings = mv643xx_eth_set_settings,
+ .get_drvinfo = mv643xx_eth_get_drvinfo,
+ .nway_reset = mv643xx_eth_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = mv643xx_eth_get_coalesce,
+ .set_coalesce = mv643xx_eth_set_coalesce,
+ .get_ringparam = mv643xx_eth_get_ringparam,
+ .set_ringparam = mv643xx_eth_set_ringparam,
+ .get_strings = mv643xx_eth_get_strings,
+ .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
+ .get_sset_count = mv643xx_eth_get_sset_count,
+};
+
+
+/* address handling *********************************************************/
+static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
+{
+ unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
+ unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
+
+ addr[0] = (mac_h >> 24) & 0xff;
+ addr[1] = (mac_h >> 16) & 0xff;
+ addr[2] = (mac_h >> 8) & 0xff;
+ addr[3] = mac_h & 0xff;
+ addr[4] = (mac_l >> 8) & 0xff;
+ addr[5] = mac_l & 0xff;
+}
+
+static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
+{
+ wrlp(mp, MAC_ADDR_HIGH,
+ (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
+ wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
+}
+
+static u32 uc_addr_filter_mask(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ u32 nibbles;
+
+ if (dev->flags & IFF_PROMISC)
+ return 0;
+
+ nibbles = 1 << (dev->dev_addr[5] & 0x0f);
+ netdev_for_each_uc_addr(ha, dev) {
+ if (memcmp(dev->dev_addr, ha->addr, 5))
+ return 0;
+ if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
+ return 0;
+
+ nibbles |= 1 << (ha->addr[5] & 0x0f);
+ }
+
+ return nibbles;
+}
+
+static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ u32 port_config;
+ u32 nibbles;
+ int i;
+
+ uc_addr_set(mp, dev->dev_addr);
+
+ port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
+
+ nibbles = uc_addr_filter_mask(dev);
+ if (!nibbles) {
+ port_config |= UNICAST_PROMISCUOUS_MODE;
+ nibbles = 0xffff;
+ }
+
+ for (i = 0; i < 16; i += 4) {
+ int off = UNICAST_TABLE(mp->port_num) + i;
+ u32 v;
+
+ v = 0;
+ if (nibbles & 1)
+ v |= 0x00000001;
+ if (nibbles & 2)
+ v |= 0x00000100;
+ if (nibbles & 4)
+ v |= 0x00010000;
+ if (nibbles & 8)
+ v |= 0x01000000;
+ nibbles >>= 4;
+
+ wrl(mp, off, v);
+ }
+
+ wrlp(mp, PORT_CONFIG, port_config);
+}
+
+static int addr_crc(unsigned char *addr)
+{
+ int crc = 0;
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ int j;
+
+ crc = (crc ^ addr[i]) << 8;
+ for (j = 7; j >= 0; j--) {
+ if (crc & (0x100 << j))
+ crc ^= 0x107 << j;
+ }
+ }
+
+ return crc;
+}
+
+static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ u32 *mc_spec;
+ u32 *mc_other;
+ struct netdev_hw_addr *ha;
+ int i;
+
+ if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
+ int port_num;
+ u32 accept;
+
+oom:
+ port_num = mp->port_num;
+ accept = 0x01010101;
+ for (i = 0; i < 0x100; i += 4) {
+ wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
+ wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
+ }
+ return;
+ }
+
+ mc_spec = kmalloc(0x200, GFP_ATOMIC);
+ if (mc_spec == NULL)
+ goto oom;
+ mc_other = mc_spec + (0x100 >> 2);
+
+ memset(mc_spec, 0, 0x100);
+ memset(mc_other, 0, 0x100);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ u8 *a = ha->addr;
+ u32 *table;
+ int entry;
+
+ if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
+ table = mc_spec;
+ entry = a[5];
+ } else {
+ table = mc_other;
+ entry = addr_crc(a);
+ }
+
+ table[entry >> 2] |= 1 << (8 * (entry & 3));
+ }
+
+ for (i = 0; i < 0x100; i += 4) {
+ wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]);
+ wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]);
+ }
+
+ kfree(mc_spec);
+}
+
+static void mv643xx_eth_set_rx_mode(struct net_device *dev)
+{
+ mv643xx_eth_program_unicast_filter(dev);
+ mv643xx_eth_program_multicast_filter(dev);
+}
+
+static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = addr;
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EINVAL;
+
+ memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+
+ netif_addr_lock_bh(dev);
+ mv643xx_eth_program_unicast_filter(dev);
+ netif_addr_unlock_bh(dev);
+
+ return 0;
+}
+
+
+/* rx/tx queue initialisation ***********************************************/
+static int rxq_init(struct mv643xx_eth_private *mp, int index)
+{
+ struct rx_queue *rxq = mp->rxq + index;
+ struct rx_desc *rx_desc;
+ int size;
+ int i;
+
+ rxq->index = index;
+
+ rxq->rx_ring_size = mp->rx_ring_size;
+
+ rxq->rx_desc_count = 0;
+ rxq->rx_curr_desc = 0;
+ rxq->rx_used_desc = 0;
+
+ size = rxq->rx_ring_size * sizeof(struct rx_desc);
+
+ if (index == 0 && size <= mp->rx_desc_sram_size) {
+ rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
+ mp->rx_desc_sram_size);
+ rxq->rx_desc_dma = mp->rx_desc_sram_addr;
+ } else {
+ rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
+ size, &rxq->rx_desc_dma,
+ GFP_KERNEL);
+ }
+
+ if (rxq->rx_desc_area == NULL) {
+ netdev_err(mp->dev,
+ "can't allocate rx ring (%d bytes)\n", size);
+ goto out;
+ }
+ memset(rxq->rx_desc_area, 0, size);
+
+ rxq->rx_desc_area_size = size;
+ rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
+ GFP_KERNEL);
+ if (rxq->rx_skb == NULL) {
+ netdev_err(mp->dev, "can't allocate rx skb ring\n");
+ goto out_free;
+ }
+
+ rx_desc = (struct rx_desc *)rxq->rx_desc_area;
+ for (i = 0; i < rxq->rx_ring_size; i++) {
+ int nexti;
+
+ nexti = i + 1;
+ if (nexti == rxq->rx_ring_size)
+ nexti = 0;
+
+ rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
+ nexti * sizeof(struct rx_desc);
+ }
+
+ rxq->lro_mgr.dev = mp->dev;
+ memset(&rxq->lro_mgr.stats, 0, sizeof(rxq->lro_mgr.stats));
+ rxq->lro_mgr.features = LRO_F_NAPI;
+ rxq->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
+ rxq->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
+ rxq->lro_mgr.max_desc = ARRAY_SIZE(rxq->lro_arr);
+ rxq->lro_mgr.max_aggr = 32;
+ rxq->lro_mgr.frag_align_pad = 0;
+ rxq->lro_mgr.lro_arr = rxq->lro_arr;
+ rxq->lro_mgr.get_skb_header = mv643xx_get_skb_header;
+
+ memset(&rxq->lro_arr, 0, sizeof(rxq->lro_arr));
+
+ return 0;
+
+
+out_free:
+ if (index == 0 && size <= mp->rx_desc_sram_size)
+ iounmap(rxq->rx_desc_area);
+ else
+ dma_free_coherent(mp->dev->dev.parent, size,
+ rxq->rx_desc_area,
+ rxq->rx_desc_dma);
+
+out:
+ return -ENOMEM;
+}
+
+static void rxq_deinit(struct rx_queue *rxq)
+{
+ struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
+ int i;
+
+ rxq_disable(rxq);
+
+ for (i = 0; i < rxq->rx_ring_size; i++) {
+ if (rxq->rx_skb[i]) {
+ dev_kfree_skb(rxq->rx_skb[i]);
+ rxq->rx_desc_count--;
+ }
+ }
+
+ if (rxq->rx_desc_count) {
+ netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
+ rxq->rx_desc_count);
+ }
+
+ if (rxq->index == 0 &&
+ rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
+ iounmap(rxq->rx_desc_area);
+ else
+ dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
+ rxq->rx_desc_area, rxq->rx_desc_dma);
+
+ kfree(rxq->rx_skb);
+}
+
+static int txq_init(struct mv643xx_eth_private *mp, int index)
+{
+ struct tx_queue *txq = mp->txq + index;
+ struct tx_desc *tx_desc;
+ int size;
+ int i;
+
+ txq->index = index;
+
+ txq->tx_ring_size = mp->tx_ring_size;
+
+ txq->tx_desc_count = 0;
+ txq->tx_curr_desc = 0;
+ txq->tx_used_desc = 0;
+
+ size = txq->tx_ring_size * sizeof(struct tx_desc);
+
+ if (index == 0 && size <= mp->tx_desc_sram_size) {
+ txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
+ mp->tx_desc_sram_size);
+ txq->tx_desc_dma = mp->tx_desc_sram_addr;
+ } else {
+ txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
+ size, &txq->tx_desc_dma,
+ GFP_KERNEL);
+ }
+
+ if (txq->tx_desc_area == NULL) {
+ netdev_err(mp->dev,
+ "can't allocate tx ring (%d bytes)\n", size);
+ return -ENOMEM;
+ }
+ memset(txq->tx_desc_area, 0, size);
+
+ txq->tx_desc_area_size = size;
+
+ tx_desc = (struct tx_desc *)txq->tx_desc_area;
+ for (i = 0; i < txq->tx_ring_size; i++) {
+ struct tx_desc *txd = tx_desc + i;
+ int nexti;
+
+ nexti = i + 1;
+ if (nexti == txq->tx_ring_size)
+ nexti = 0;
+
+ txd->cmd_sts = 0;
+ txd->next_desc_ptr = txq->tx_desc_dma +
+ nexti * sizeof(struct tx_desc);
+ }
+
+ skb_queue_head_init(&txq->tx_skb);
+
+ return 0;
+}
+
+static void txq_deinit(struct tx_queue *txq)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+
+ txq_disable(txq);
+ txq_reclaim(txq, txq->tx_ring_size, 1);
+
+ BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
+
+ if (txq->index == 0 &&
+ txq->tx_desc_area_size <= mp->tx_desc_sram_size)
+ iounmap(txq->tx_desc_area);
+ else
+ dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
+ txq->tx_desc_area, txq->tx_desc_dma);
+}
+
+
+/* netdev ops and related ***************************************************/
+static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
+{
+ u32 int_cause;
+ u32 int_cause_ext;
+
+ int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
+ if (int_cause == 0)
+ return 0;
+
+ int_cause_ext = 0;
+ if (int_cause & INT_EXT) {
+ int_cause &= ~INT_EXT;
+ int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
+ }
+
+ if (int_cause) {
+ wrlp(mp, INT_CAUSE, ~int_cause);
+ mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
+ ~(rdlp(mp, TXQ_COMMAND) & 0xff);
+ mp->work_rx |= (int_cause & INT_RX) >> 2;
+ }
+
+ int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
+ if (int_cause_ext) {
+ wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
+ if (int_cause_ext & INT_EXT_LINK_PHY)
+ mp->work_link = 1;
+ mp->work_tx |= int_cause_ext & INT_EXT_TX;
+ }
+
+ return 1;
+}
+
+static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ if (unlikely(!mv643xx_eth_collect_events(mp)))
+ return IRQ_NONE;
+
+ wrlp(mp, INT_MASK, 0);
+ napi_schedule(&mp->napi);
+
+ return IRQ_HANDLED;
+}
+
+static void handle_link_event(struct mv643xx_eth_private *mp)
+{
+ struct net_device *dev = mp->dev;
+ u32 port_status;
+ int speed;
+ int duplex;
+ int fc;
+
+ port_status = rdlp(mp, PORT_STATUS);
+ if (!(port_status & LINK_UP)) {
+ if (netif_carrier_ok(dev)) {
+ int i;
+
+ netdev_info(dev, "link down\n");
+
+ netif_carrier_off(dev);
+
+ for (i = 0; i < mp->txq_count; i++) {
+ struct tx_queue *txq = mp->txq + i;
+
+ txq_reclaim(txq, txq->tx_ring_size, 1);
+ txq_reset_hw_ptr(txq);
+ }
+ }
+ return;
+ }
+
+ switch (port_status & PORT_SPEED_MASK) {
+ case PORT_SPEED_10:
+ speed = 10;
+ break;
+ case PORT_SPEED_100:
+ speed = 100;
+ break;
+ case PORT_SPEED_1000:
+ speed = 1000;
+ break;
+ default:
+ speed = -1;
+ break;
+ }
+ duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
+ fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
+
+ netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
+ speed, duplex ? "full" : "half", fc ? "en" : "dis");
+
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+}
+
+static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
+{
+ struct mv643xx_eth_private *mp;
+ int work_done;
+
+ mp = container_of(napi, struct mv643xx_eth_private, napi);
+
+ if (unlikely(mp->oom)) {
+ mp->oom = 0;
+ del_timer(&mp->rx_oom);
+ }
+
+ work_done = 0;
+ while (work_done < budget) {
+ u8 queue_mask;
+ int queue;
+ int work_tbd;
+
+ if (mp->work_link) {
+ mp->work_link = 0;
+ handle_link_event(mp);
+ work_done++;
+ continue;
+ }
+
+ queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
+ if (likely(!mp->oom))
+ queue_mask |= mp->work_rx_refill;
+
+ if (!queue_mask) {
+ if (mv643xx_eth_collect_events(mp))
+ continue;
+ break;
+ }
+
+ queue = fls(queue_mask) - 1;
+ queue_mask = 1 << queue;
+
+ work_tbd = budget - work_done;
+ if (work_tbd > 16)
+ work_tbd = 16;
+
+ if (mp->work_tx_end & queue_mask) {
+ txq_kick(mp->txq + queue);
+ } else if (mp->work_tx & queue_mask) {
+ work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
+ txq_maybe_wake(mp->txq + queue);
+ } else if (mp->work_rx & queue_mask) {
+ work_done += rxq_process(mp->rxq + queue, work_tbd);
+ } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
+ work_done += rxq_refill(mp->rxq + queue, work_tbd);
+ } else {
+ BUG();
+ }
+ }
+
+ if (work_done < budget) {
+ if (mp->oom)
+ mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
+ napi_complete(napi);
+ wrlp(mp, INT_MASK, mp->int_mask);
+ }
+
+ return work_done;
+}
+
+static inline void oom_timer_wrapper(unsigned long data)
+{
+ struct mv643xx_eth_private *mp = (void *)data;
+
+ napi_schedule(&mp->napi);
+}
+
+static void phy_reset(struct mv643xx_eth_private *mp)
+{
+ int data;
+
+ data = phy_read(mp->phy, MII_BMCR);
+ if (data < 0)
+ return;
+
+ data |= BMCR_RESET;
+ if (phy_write(mp->phy, MII_BMCR, data) < 0)
+ return;
+
+ do {
+ data = phy_read(mp->phy, MII_BMCR);
+ } while (data >= 0 && data & BMCR_RESET);
+}
+
+static void port_start(struct mv643xx_eth_private *mp)
+{
+ u32 pscr;
+ int i;
+
+ /*
+ * Perform PHY reset, if there is a PHY.
+ */
+ if (mp->phy != NULL) {
+ struct ethtool_cmd cmd;
+
+ mv643xx_eth_get_settings(mp->dev, &cmd);
+ phy_reset(mp);
+ mv643xx_eth_set_settings(mp->dev, &cmd);
+ }
+
+ /*
+ * Configure basic link parameters.
+ */
+ pscr = rdlp(mp, PORT_SERIAL_CONTROL);
+
+ pscr |= SERIAL_PORT_ENABLE;
+ wrlp(mp, PORT_SERIAL_CONTROL, pscr);
+
+ pscr |= DO_NOT_FORCE_LINK_FAIL;
+ if (mp->phy == NULL)
+ pscr |= FORCE_LINK_PASS;
+ wrlp(mp, PORT_SERIAL_CONTROL, pscr);
+
+ /*
+ * Configure TX path and queues.
+ */
+ tx_set_rate(mp, 1000000000, 16777216);
+ for (i = 0; i < mp->txq_count; i++) {
+ struct tx_queue *txq = mp->txq + i;
+
+ txq_reset_hw_ptr(txq);
+ txq_set_rate(txq, 1000000000, 16777216);
+ txq_set_fixed_prio_mode(txq);
+ }
+
+ /*
+ * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
+ * frames to RX queue #0, and include the pseudo-header when
+ * calculating receive checksums.
+ */
+ mv643xx_eth_set_features(mp->dev, mp->dev->features);
+
+ /*
+ * Treat BPDUs as normal multicasts, and disable partition mode.
+ */
+ wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
+
+ /*
+ * Add configured unicast addresses to address filter table.
+ */
+ mv643xx_eth_program_unicast_filter(mp->dev);
+
+ /*
+ * Enable the receive queues.
+ */
+ for (i = 0; i < mp->rxq_count; i++) {
+ struct rx_queue *rxq = mp->rxq + i;
+ u32 addr;
+
+ addr = (u32)rxq->rx_desc_dma;
+ addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
+ wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
+
+ rxq_enable(rxq);
+ }
+}
+
+static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
+{
+ int skb_size;
+
+ /*
+ * Reserve 2+14 bytes for an ethernet header (the hardware
+ * automatically prepends 2 bytes of dummy data to each
+ * received packet), 16 bytes for up to four VLAN tags, and
+ * 4 bytes for the trailing FCS -- 36 bytes total.
+ */
+ skb_size = mp->dev->mtu + 36;
+
+ /*
+ * Make sure that the skb size is a multiple of 8 bytes, as
+ * the lower three bits of the receive descriptor's buffer
+ * size field are ignored by the hardware.
+ */
+ mp->skb_size = (skb_size + 7) & ~7;
+
+ /*
+ * If NET_SKB_PAD is smaller than a cache line,
+ * netdev_alloc_skb() will cause skb->data to be misaligned
+ * to a cache line boundary. If this is the case, include
+ * some extra space to allow re-aligning the data area.
+ */
+ mp->skb_size += SKB_DMA_REALIGN;
+}
+
+static int mv643xx_eth_open(struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ int err;
+ int i;
+
+ wrlp(mp, INT_CAUSE, 0);
+ wrlp(mp, INT_CAUSE_EXT, 0);
+ rdlp(mp, INT_CAUSE_EXT);
+
+ err = request_irq(dev->irq, mv643xx_eth_irq,
+ IRQF_SHARED, dev->name, dev);
+ if (err) {
+ netdev_err(dev, "can't assign irq\n");
+ return -EAGAIN;
+ }
+
+ mv643xx_eth_recalc_skb_size(mp);
+
+ napi_enable(&mp->napi);
+
+ skb_queue_head_init(&mp->rx_recycle);
+
+ mp->int_mask = INT_EXT;
+
+ for (i = 0; i < mp->rxq_count; i++) {
+ err = rxq_init(mp, i);
+ if (err) {
+ while (--i >= 0)
+ rxq_deinit(mp->rxq + i);
+ goto out;
+ }
+
+ rxq_refill(mp->rxq + i, INT_MAX);
+ mp->int_mask |= INT_RX_0 << i;
+ }
+
+ if (mp->oom) {
+ mp->rx_oom.expires = jiffies + (HZ / 10);
+ add_timer(&mp->rx_oom);
+ }
+
+ for (i = 0; i < mp->txq_count; i++) {
+ err = txq_init(mp, i);
+ if (err) {
+ while (--i >= 0)
+ txq_deinit(mp->txq + i);
+ goto out_free;
+ }
+ mp->int_mask |= INT_TX_END_0 << i;
+ }
+
+ port_start(mp);
+
+ wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
+ wrlp(mp, INT_MASK, mp->int_mask);
+
+ return 0;
+
+
+out_free:
+ for (i = 0; i < mp->rxq_count; i++)
+ rxq_deinit(mp->rxq + i);
+out:
+ free_irq(dev->irq, dev);
+
+ return err;
+}
+
+static void port_reset(struct mv643xx_eth_private *mp)
+{
+ unsigned int data;
+ int i;
+
+ for (i = 0; i < mp->rxq_count; i++)
+ rxq_disable(mp->rxq + i);
+ for (i = 0; i < mp->txq_count; i++)
+ txq_disable(mp->txq + i);
+
+ while (1) {
+ u32 ps = rdlp(mp, PORT_STATUS);
+
+ if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
+ break;
+ udelay(10);
+ }
+
+ /* Reset the Enable bit in the Configuration Register */
+ data = rdlp(mp, PORT_SERIAL_CONTROL);
+ data &= ~(SERIAL_PORT_ENABLE |
+ DO_NOT_FORCE_LINK_FAIL |
+ FORCE_LINK_PASS);
+ wrlp(mp, PORT_SERIAL_CONTROL, data);
+}
+
+static int mv643xx_eth_stop(struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ int i;
+
+ wrlp(mp, INT_MASK_EXT, 0x00000000);
+ wrlp(mp, INT_MASK, 0x00000000);
+ rdlp(mp, INT_MASK);
+
+ napi_disable(&mp->napi);
+
+ del_timer_sync(&mp->rx_oom);
+
+ netif_carrier_off(dev);
+
+ free_irq(dev->irq, dev);
+
+ port_reset(mp);
+ mv643xx_eth_get_stats(dev);
+ mib_counters_update(mp);
+ del_timer_sync(&mp->mib_counters_timer);
+
+ skb_queue_purge(&mp->rx_recycle);
+
+ for (i = 0; i < mp->rxq_count; i++)
+ rxq_deinit(mp->rxq + i);
+ for (i = 0; i < mp->txq_count; i++)
+ txq_deinit(mp->txq + i);
+
+ return 0;
+}
+
+static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ if (mp->phy != NULL)
+ return phy_mii_ioctl(mp->phy, ifr, cmd);
+
+ return -EOPNOTSUPP;
+}
+
+static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ if (new_mtu < 64 || new_mtu > 9500)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ mv643xx_eth_recalc_skb_size(mp);
+ tx_set_rate(mp, 1000000000, 16777216);
+
+ if (!netif_running(dev))
+ return 0;
+
+ /*
+ * Stop and then re-open the interface. This will allocate RX
+ * skbs of the new MTU.
+ * There is a possible danger that the open will not succeed,
+ * due to memory being full.
+ */
+ mv643xx_eth_stop(dev);
+ if (mv643xx_eth_open(dev)) {
+ netdev_err(dev,
+ "fatal error on re-opening device after MTU change\n");
+ }
+
+ return 0;
+}
+
+static void tx_timeout_task(struct work_struct *ugly)
+{
+ struct mv643xx_eth_private *mp;
+
+ mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
+ if (netif_running(mp->dev)) {
+ netif_tx_stop_all_queues(mp->dev);
+ port_reset(mp);
+ port_start(mp);
+ netif_tx_wake_all_queues(mp->dev);
+ }
+}
+
+static void mv643xx_eth_tx_timeout(struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ netdev_info(dev, "tx timeout\n");
+
+ schedule_work(&mp->tx_timeout_task);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void mv643xx_eth_netpoll(struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ wrlp(mp, INT_MASK, 0x00000000);
+ rdlp(mp, INT_MASK);
+
+ mv643xx_eth_irq(dev->irq, dev);
+
+ wrlp(mp, INT_MASK, mp->int_mask);
+}
+#endif
+
+
+/* platform glue ************************************************************/
+static void
+mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
+ struct mbus_dram_target_info *dram)
+{
+ void __iomem *base = msp->base;
+ u32 win_enable;
+ u32 win_protect;
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ writel(0, base + WINDOW_BASE(i));
+ writel(0, base + WINDOW_SIZE(i));
+ if (i < 4)
+ writel(0, base + WINDOW_REMAP_HIGH(i));
+ }
+
+ win_enable = 0x3f;
+ win_protect = 0;
+
+ for (i = 0; i < dram->num_cs; i++) {
+ struct mbus_dram_window *cs = dram->cs + i;
+
+ writel((cs->base & 0xffff0000) |
+ (cs->mbus_attr << 8) |
+ dram->mbus_dram_target_id, base + WINDOW_BASE(i));
+ writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
+
+ win_enable &= ~(1 << i);
+ win_protect |= 3 << (2 * i);
+ }
+
+ writel(win_enable, base + WINDOW_BAR_ENABLE);
+ msp->win_protect = win_protect;
+}
+
+static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
+{
+ /*
+ * Check whether we have a 14-bit coal limit field in bits
+ * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
+ * SDMA config register.
+ */
+ writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
+ if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
+ msp->extended_rx_coal_limit = 1;
+ else
+ msp->extended_rx_coal_limit = 0;
+
+ /*
+ * Check whether the MAC supports TX rate control, and if
+ * yes, whether its associated registers are in the old or
+ * the new place.
+ */
+ writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
+ if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
+ msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
+ } else {
+ writel(7, msp->base + 0x0400 + TX_BW_RATE);
+ if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
+ msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
+ else
+ msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
+ }
+}
+
+static int mv643xx_eth_shared_probe(struct platform_device *pdev)
+{
+ static int mv643xx_eth_version_printed;
+ struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
+ struct mv643xx_eth_shared_private *msp;
+ struct resource *res;
+ int ret;
+
+ if (!mv643xx_eth_version_printed++)
+ pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
+ mv643xx_eth_driver_version);
+
+ ret = -EINVAL;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ goto out;
+
+ ret = -ENOMEM;
+ msp = kzalloc(sizeof(*msp), GFP_KERNEL);
+ if (msp == NULL)
+ goto out;
+
+ msp->base = ioremap(res->start, resource_size(res));
+ if (msp->base == NULL)
+ goto out_free;
+
+ /*
+ * Set up and register SMI bus.
+ */
+ if (pd == NULL || pd->shared_smi == NULL) {
+ msp->smi_bus = mdiobus_alloc();
+ if (msp->smi_bus == NULL)
+ goto out_unmap;
+
+ msp->smi_bus->priv = msp;
+ msp->smi_bus->name = "mv643xx_eth smi";
+ msp->smi_bus->read = smi_bus_read;
+ msp->smi_bus->write = smi_bus_write,
+ snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
+ msp->smi_bus->parent = &pdev->dev;
+ msp->smi_bus->phy_mask = 0xffffffff;
+ if (mdiobus_register(msp->smi_bus) < 0)
+ goto out_free_mii_bus;
+ msp->smi = msp;
+ } else {
+ msp->smi = platform_get_drvdata(pd->shared_smi);
+ }
+
+ msp->err_interrupt = NO_IRQ;
+ init_waitqueue_head(&msp->smi_busy_wait);
+
+ /*
+ * Check whether the error interrupt is hooked up.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res != NULL) {
+ int err;
+
+ err = request_irq(res->start, mv643xx_eth_err_irq,
+ IRQF_SHARED, "mv643xx_eth", msp);
+ if (!err) {
+ writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK);
+ msp->err_interrupt = res->start;
+ }
+ }
+
+ /*
+ * (Re-)program MBUS remapping windows if we are asked to.
+ */
+ if (pd != NULL && pd->dram != NULL)
+ mv643xx_eth_conf_mbus_windows(msp, pd->dram);
+
+ /*
+ * Detect hardware parameters.
+ */
+ msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
+ msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
+ pd->tx_csum_limit : 9 * 1024;
+ infer_hw_params(msp);
+
+ platform_set_drvdata(pdev, msp);
+
+ return 0;
+
+out_free_mii_bus:
+ mdiobus_free(msp->smi_bus);
+out_unmap:
+ iounmap(msp->base);
+out_free:
+ kfree(msp);
+out:
+ return ret;
+}
+
+static int mv643xx_eth_shared_remove(struct platform_device *pdev)
+{
+ struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
+ struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
+
+ if (pd == NULL || pd->shared_smi == NULL) {
+ mdiobus_unregister(msp->smi_bus);
+ mdiobus_free(msp->smi_bus);
+ }
+ if (msp->err_interrupt != NO_IRQ)
+ free_irq(msp->err_interrupt, msp);
+ iounmap(msp->base);
+ kfree(msp);
+
+ return 0;
+}
+
+static struct platform_driver mv643xx_eth_shared_driver = {
+ .probe = mv643xx_eth_shared_probe,
+ .remove = mv643xx_eth_shared_remove,
+ .driver = {
+ .name = MV643XX_ETH_SHARED_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
+{
+ int addr_shift = 5 * mp->port_num;
+ u32 data;
+
+ data = rdl(mp, PHY_ADDR);
+ data &= ~(0x1f << addr_shift);
+ data |= (phy_addr & 0x1f) << addr_shift;
+ wrl(mp, PHY_ADDR, data);
+}
+
+static int phy_addr_get(struct mv643xx_eth_private *mp)
+{
+ unsigned int data;
+
+ data = rdl(mp, PHY_ADDR);
+
+ return (data >> (5 * mp->port_num)) & 0x1f;
+}
+
+static void set_params(struct mv643xx_eth_private *mp,
+ struct mv643xx_eth_platform_data *pd)
+{
+ struct net_device *dev = mp->dev;
+
+ if (is_valid_ether_addr(pd->mac_addr))
+ memcpy(dev->dev_addr, pd->mac_addr, 6);
+ else
+ uc_addr_get(mp, dev->dev_addr);
+
+ mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
+ if (pd->rx_queue_size)
+ mp->rx_ring_size = pd->rx_queue_size;
+ mp->rx_desc_sram_addr = pd->rx_sram_addr;
+ mp->rx_desc_sram_size = pd->rx_sram_size;
+
+ mp->rxq_count = pd->rx_queue_count ? : 1;
+
+ mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
+ if (pd->tx_queue_size)
+ mp->tx_ring_size = pd->tx_queue_size;
+ mp->tx_desc_sram_addr = pd->tx_sram_addr;
+ mp->tx_desc_sram_size = pd->tx_sram_size;
+
+ mp->txq_count = pd->tx_queue_count ? : 1;
+}
+
+static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
+ int phy_addr)
+{
+ struct mii_bus *bus = mp->shared->smi->smi_bus;
+ struct phy_device *phydev;
+ int start;
+ int num;
+ int i;
+
+ if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
+ start = phy_addr_get(mp) & 0x1f;
+ num = 32;
+ } else {
+ start = phy_addr & 0x1f;
+ num = 1;
+ }
+
+ phydev = NULL;
+ for (i = 0; i < num; i++) {
+ int addr = (start + i) & 0x1f;
+
+ if (bus->phy_map[addr] == NULL)
+ mdiobus_scan(bus, addr);
+
+ if (phydev == NULL) {
+ phydev = bus->phy_map[addr];
+ if (phydev != NULL)
+ phy_addr_set(mp, addr);
+ }
+ }
+
+ return phydev;
+}
+
+static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
+{
+ struct phy_device *phy = mp->phy;
+
+ phy_reset(mp);
+
+ phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII);
+
+ if (speed == 0) {
+ phy->autoneg = AUTONEG_ENABLE;
+ phy->speed = 0;
+ phy->duplex = 0;
+ phy->advertising = phy->supported | ADVERTISED_Autoneg;
+ } else {
+ phy->autoneg = AUTONEG_DISABLE;
+ phy->advertising = 0;
+ phy->speed = speed;
+ phy->duplex = duplex;
+ }
+ phy_start_aneg(phy);
+}
+
+static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
+{
+ u32 pscr;
+
+ pscr = rdlp(mp, PORT_SERIAL_CONTROL);
+ if (pscr & SERIAL_PORT_ENABLE) {
+ pscr &= ~SERIAL_PORT_ENABLE;
+ wrlp(mp, PORT_SERIAL_CONTROL, pscr);
+ }
+
+ pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
+ if (mp->phy == NULL) {
+ pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
+ if (speed == SPEED_1000)
+ pscr |= SET_GMII_SPEED_TO_1000;
+ else if (speed == SPEED_100)
+ pscr |= SET_MII_SPEED_TO_100;
+
+ pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
+
+ pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
+ if (duplex == DUPLEX_FULL)
+ pscr |= SET_FULL_DUPLEX_MODE;
+ }
+
+ wrlp(mp, PORT_SERIAL_CONTROL, pscr);
+}
+
+static const struct net_device_ops mv643xx_eth_netdev_ops = {
+ .ndo_open = mv643xx_eth_open,
+ .ndo_stop = mv643xx_eth_stop,
+ .ndo_start_xmit = mv643xx_eth_xmit,
+ .ndo_set_rx_mode = mv643xx_eth_set_rx_mode,
+ .ndo_set_mac_address = mv643xx_eth_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = mv643xx_eth_ioctl,
+ .ndo_change_mtu = mv643xx_eth_change_mtu,
+ .ndo_set_features = mv643xx_eth_set_features,
+ .ndo_tx_timeout = mv643xx_eth_tx_timeout,
+ .ndo_get_stats = mv643xx_eth_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = mv643xx_eth_netpoll,
+#endif
+};
+
+static int mv643xx_eth_probe(struct platform_device *pdev)
+{
+ struct mv643xx_eth_platform_data *pd;
+ struct mv643xx_eth_private *mp;
+ struct net_device *dev;
+ struct resource *res;
+ int err;
+
+ pd = pdev->dev.platform_data;
+ if (pd == NULL) {
+ dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
+ return -ENODEV;
+ }
+
+ if (pd->shared == NULL) {
+ dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
+ return -ENODEV;
+ }
+
+ dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
+ if (!dev)
+ return -ENOMEM;
+
+ mp = netdev_priv(dev);
+ platform_set_drvdata(pdev, mp);
+
+ mp->shared = platform_get_drvdata(pd->shared);
+ mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
+ mp->port_num = pd->port_number;
+
+ mp->dev = dev;
+
+ set_params(mp, pd);
+ netif_set_real_num_tx_queues(dev, mp->txq_count);
+ netif_set_real_num_rx_queues(dev, mp->rxq_count);
+
+ if (pd->phy_addr != MV643XX_ETH_PHY_NONE)
+ mp->phy = phy_scan(mp, pd->phy_addr);
+
+ if (mp->phy != NULL)
+ phy_init(mp, pd->speed, pd->duplex);
+
+ SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
+
+ init_pscr(mp, pd->speed, pd->duplex);
+
+
+ mib_counters_clear(mp);
+
+ init_timer(&mp->mib_counters_timer);
+ mp->mib_counters_timer.data = (unsigned long)mp;
+ mp->mib_counters_timer.function = mib_counters_timer_wrapper;
+ mp->mib_counters_timer.expires = jiffies + 30 * HZ;
+ add_timer(&mp->mib_counters_timer);
+
+ spin_lock_init(&mp->mib_counters_lock);
+
+ INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
+
+ netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128);
+
+ init_timer(&mp->rx_oom);
+ mp->rx_oom.data = (unsigned long)mp;
+ mp->rx_oom.function = oom_timer_wrapper;
+
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ BUG_ON(!res);
+ dev->irq = res->start;
+
+ dev->netdev_ops = &mv643xx_eth_netdev_ops;
+
+ dev->watchdog_timeo = 2 * HZ;
+ dev->base_addr = 0;
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_LRO;
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
+
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ if (mp->shared->win_protect)
+ wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
+
+ netif_carrier_off(dev);
+
+ wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
+
+ set_rx_coal(mp, 250);
+ set_tx_coal(mp, 0);
+
+ err = register_netdev(dev);
+ if (err)
+ goto out;
+
+ netdev_notice(dev, "port %d with MAC address %pM\n",
+ mp->port_num, dev->dev_addr);
+
+ if (mp->tx_desc_sram_size > 0)
+ netdev_notice(dev, "configured with sram\n");
+
+ return 0;
+
+out:
+ free_netdev(dev);
+
+ return err;
+}
+
+static int mv643xx_eth_remove(struct platform_device *pdev)
+{
+ struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
+
+ unregister_netdev(mp->dev);
+ if (mp->phy != NULL)
+ phy_detach(mp->phy);
+ cancel_work_sync(&mp->tx_timeout_task);
+ free_netdev(mp->dev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static void mv643xx_eth_shutdown(struct platform_device *pdev)
+{
+ struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
+
+ /* Mask all interrupts on ethernet port */
+ wrlp(mp, INT_MASK, 0);
+ rdlp(mp, INT_MASK);
+
+ if (netif_running(mp->dev))
+ port_reset(mp);
+}
+
+static struct platform_driver mv643xx_eth_driver = {
+ .probe = mv643xx_eth_probe,
+ .remove = mv643xx_eth_remove,
+ .shutdown = mv643xx_eth_shutdown,
+ .driver = {
+ .name = MV643XX_ETH_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mv643xx_eth_init_module(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&mv643xx_eth_shared_driver);
+ if (!rc) {
+ rc = platform_driver_register(&mv643xx_eth_driver);
+ if (rc)
+ platform_driver_unregister(&mv643xx_eth_shared_driver);
+ }
+
+ return rc;
+}
+module_init(mv643xx_eth_init_module);
+
+static void __exit mv643xx_eth_cleanup_module(void)
+{
+ platform_driver_unregister(&mv643xx_eth_driver);
+ platform_driver_unregister(&mv643xx_eth_shared_driver);
+}
+module_exit(mv643xx_eth_cleanup_module);
+
+MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
+ "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
+MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
+MODULE_ALIAS("platform:" MV643XX_ETH_NAME);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
new file mode 100644
index 000000000000..d17d0624c5e6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -0,0 +1,1663 @@
+/*
+ * PXA168 ethernet driver.
+ * Most of the code is derived from mv643xx ethernet driver.
+ *
+ * Copyright (C) 2010 Marvell International Ltd.
+ * Sachin Sanap <ssanap@marvell.com>
+ * Zhangfei Gao <zgao6@marvell.com>
+ * Philip Rakity <prakity@marvell.com>
+ * Mark Brown <markb@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/etherdevice.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/cacheflush.h>
+#include <linux/pxa168_eth.h>
+
+#define DRIVER_NAME "pxa168-eth"
+#define DRIVER_VERSION "0.3"
+
+/*
+ * Registers
+ */
+
+#define PHY_ADDRESS 0x0000
+#define SMI 0x0010
+#define PORT_CONFIG 0x0400
+#define PORT_CONFIG_EXT 0x0408
+#define PORT_COMMAND 0x0410
+#define PORT_STATUS 0x0418
+#define HTPR 0x0428
+#define SDMA_CONFIG 0x0440
+#define SDMA_CMD 0x0448
+#define INT_CAUSE 0x0450
+#define INT_W_CLEAR 0x0454
+#define INT_MASK 0x0458
+#define ETH_F_RX_DESC_0 0x0480
+#define ETH_C_RX_DESC_0 0x04A0
+#define ETH_C_TX_DESC_1 0x04E4
+
+/* smi register */
+#define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */
+#define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */
+#define SMI_OP_W (0 << 26) /* Write operation */
+#define SMI_OP_R (1 << 26) /* Read operation */
+
+#define PHY_WAIT_ITERATIONS 10
+
+#define PXA168_ETH_PHY_ADDR_DEFAULT 0
+/* RX & TX descriptor command */
+#define BUF_OWNED_BY_DMA (1 << 31)
+
+/* RX descriptor status */
+#define RX_EN_INT (1 << 23)
+#define RX_FIRST_DESC (1 << 17)
+#define RX_LAST_DESC (1 << 16)
+#define RX_ERROR (1 << 15)
+
+/* TX descriptor command */
+#define TX_EN_INT (1 << 23)
+#define TX_GEN_CRC (1 << 22)
+#define TX_ZERO_PADDING (1 << 18)
+#define TX_FIRST_DESC (1 << 17)
+#define TX_LAST_DESC (1 << 16)
+#define TX_ERROR (1 << 15)
+
+/* SDMA_CMD */
+#define SDMA_CMD_AT (1 << 31)
+#define SDMA_CMD_TXDL (1 << 24)
+#define SDMA_CMD_TXDH (1 << 23)
+#define SDMA_CMD_AR (1 << 15)
+#define SDMA_CMD_ERD (1 << 7)
+
+/* Bit definitions of the Port Config Reg */
+#define PCR_HS (1 << 12)
+#define PCR_EN (1 << 7)
+#define PCR_PM (1 << 0)
+
+/* Bit definitions of the Port Config Extend Reg */
+#define PCXR_2BSM (1 << 28)
+#define PCXR_DSCP_EN (1 << 21)
+#define PCXR_MFL_1518 (0 << 14)
+#define PCXR_MFL_1536 (1 << 14)
+#define PCXR_MFL_2048 (2 << 14)
+#define PCXR_MFL_64K (3 << 14)
+#define PCXR_FLP (1 << 11)
+#define PCXR_PRIO_TX_OFF 3
+#define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF)
+
+/* Bit definitions of the SDMA Config Reg */
+#define SDCR_BSZ_OFF 12
+#define SDCR_BSZ8 (3 << SDCR_BSZ_OFF)
+#define SDCR_BSZ4 (2 << SDCR_BSZ_OFF)
+#define SDCR_BSZ2 (1 << SDCR_BSZ_OFF)
+#define SDCR_BSZ1 (0 << SDCR_BSZ_OFF)
+#define SDCR_BLMR (1 << 6)
+#define SDCR_BLMT (1 << 7)
+#define SDCR_RIFB (1 << 9)
+#define SDCR_RC_OFF 2
+#define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF)
+
+/*
+ * Bit definitions of the Interrupt Cause Reg
+ * and Interrupt MASK Reg is the same
+ */
+#define ICR_RXBUF (1 << 0)
+#define ICR_TXBUF_H (1 << 2)
+#define ICR_TXBUF_L (1 << 3)
+#define ICR_TXEND_H (1 << 6)
+#define ICR_TXEND_L (1 << 7)
+#define ICR_RXERR (1 << 8)
+#define ICR_TXERR_H (1 << 10)
+#define ICR_TXERR_L (1 << 11)
+#define ICR_TX_UDR (1 << 13)
+#define ICR_MII_CH (1 << 28)
+
+#define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\
+ ICR_TXERR_H | ICR_TXERR_L |\
+ ICR_TXEND_H | ICR_TXEND_L |\
+ ICR_RXBUF | ICR_RXERR | ICR_MII_CH)
+
+#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
+
+#define NUM_RX_DESCS 64
+#define NUM_TX_DESCS 64
+
+#define HASH_ADD 0
+#define HASH_DELETE 1
+#define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */
+#define HOP_NUMBER 12
+
+/* Bit definitions for Port status */
+#define PORT_SPEED_100 (1 << 0)
+#define FULL_DUPLEX (1 << 1)
+#define FLOW_CONTROL_ENABLED (1 << 2)
+#define LINK_UP (1 << 3)
+
+/* Bit definitions for work to be done */
+#define WORK_LINK (1 << 0)
+#define WORK_TX_DONE (1 << 1)
+
+/*
+ * Misc definitions.
+ */
+#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
+
+struct rx_desc {
+ u32 cmd_sts; /* Descriptor command status */
+ u16 byte_cnt; /* Descriptor buffer byte count */
+ u16 buf_size; /* Buffer size */
+ u32 buf_ptr; /* Descriptor buffer pointer */
+ u32 next_desc_ptr; /* Next descriptor pointer */
+};
+
+struct tx_desc {
+ u32 cmd_sts; /* Command/status field */
+ u16 reserved;
+ u16 byte_cnt; /* buffer byte count */
+ u32 buf_ptr; /* pointer to buffer for this descriptor */
+ u32 next_desc_ptr; /* Pointer to next descriptor */
+};
+
+struct pxa168_eth_private {
+ int port_num; /* User Ethernet port number */
+
+ int rx_resource_err; /* Rx ring resource error flag */
+
+ /* Next available and first returning Rx resource */
+ int rx_curr_desc_q, rx_used_desc_q;
+
+ /* Next available and first returning Tx resource */
+ int tx_curr_desc_q, tx_used_desc_q;
+
+ struct rx_desc *p_rx_desc_area;
+ dma_addr_t rx_desc_dma;
+ int rx_desc_area_size;
+ struct sk_buff **rx_skb;
+
+ struct tx_desc *p_tx_desc_area;
+ dma_addr_t tx_desc_dma;
+ int tx_desc_area_size;
+ struct sk_buff **tx_skb;
+
+ struct work_struct tx_timeout_task;
+
+ struct net_device *dev;
+ struct napi_struct napi;
+ u8 work_todo;
+ int skb_size;
+
+ struct net_device_stats stats;
+ /* Size of Tx Ring per queue */
+ int tx_ring_size;
+ /* Number of tx descriptors in use */
+ int tx_desc_count;
+ /* Size of Rx Ring per queue */
+ int rx_ring_size;
+ /* Number of rx descriptors in use */
+ int rx_desc_count;
+
+ /*
+ * Used in case RX Ring is empty, which can occur when
+ * system does not have resources (skb's)
+ */
+ struct timer_list timeout;
+ struct mii_bus *smi_bus;
+ struct phy_device *phy;
+
+ /* clock */
+ struct clk *clk;
+ struct pxa168_eth_platform_data *pd;
+ /*
+ * Ethernet controller base address.
+ */
+ void __iomem *base;
+
+ /* Pointer to the hardware address filter table */
+ void *htpr;
+ dma_addr_t htpr_dma;
+};
+
+struct addr_table_entry {
+ __le32 lo;
+ __le32 hi;
+};
+
+/* Bit fields of a Hash Table Entry */
+enum hash_table_entry {
+ HASH_ENTRY_VALID = 1,
+ SKIP = 2,
+ HASH_ENTRY_RECEIVE_DISCARD = 4,
+ HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
+};
+
+static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static int pxa168_init_hw(struct pxa168_eth_private *pep);
+static void eth_port_reset(struct net_device *dev);
+static void eth_port_start(struct net_device *dev);
+static int pxa168_eth_open(struct net_device *dev);
+static int pxa168_eth_stop(struct net_device *dev);
+static int ethernet_phy_setup(struct net_device *dev);
+
+static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
+{
+ return readl(pep->base + offset);
+}
+
+static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
+{
+ writel(data, pep->base + offset);
+}
+
+static void abort_dma(struct pxa168_eth_private *pep)
+{
+ int delay;
+ int max_retries = 40;
+
+ do {
+ wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
+ udelay(100);
+
+ delay = 10;
+ while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
+ && delay-- > 0) {
+ udelay(10);
+ }
+ } while (max_retries-- > 0 && delay <= 0);
+
+ if (max_retries <= 0)
+ printk(KERN_ERR "%s : DMA Stuck\n", __func__);
+}
+
+static int ethernet_phy_get(struct pxa168_eth_private *pep)
+{
+ unsigned int reg_data;
+
+ reg_data = rdl(pep, PHY_ADDRESS);
+
+ return (reg_data >> (5 * pep->port_num)) & 0x1f;
+}
+
+static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
+{
+ u32 reg_data;
+ int addr_shift = 5 * pep->port_num;
+
+ reg_data = rdl(pep, PHY_ADDRESS);
+ reg_data &= ~(0x1f << addr_shift);
+ reg_data |= (phy_addr & 0x1f) << addr_shift;
+ wrl(pep, PHY_ADDRESS, reg_data);
+}
+
+static void ethernet_phy_reset(struct pxa168_eth_private *pep)
+{
+ int data;
+
+ data = phy_read(pep->phy, MII_BMCR);
+ if (data < 0)
+ return;
+
+ data |= BMCR_RESET;
+ if (phy_write(pep->phy, MII_BMCR, data) < 0)
+ return;
+
+ do {
+ data = phy_read(pep->phy, MII_BMCR);
+ } while (data >= 0 && data & BMCR_RESET);
+}
+
+static void rxq_refill(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct rx_desc *p_used_rx_desc;
+ int used_rx_desc;
+
+ while (pep->rx_desc_count < pep->rx_ring_size) {
+ int size;
+
+ skb = dev_alloc_skb(pep->skb_size);
+ if (!skb)
+ break;
+ if (SKB_DMA_REALIGN)
+ skb_reserve(skb, SKB_DMA_REALIGN);
+ pep->rx_desc_count++;
+ /* Get 'used' Rx descriptor */
+ used_rx_desc = pep->rx_used_desc_q;
+ p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
+ size = skb->end - skb->data;
+ p_used_rx_desc->buf_ptr = dma_map_single(NULL,
+ skb->data,
+ size,
+ DMA_FROM_DEVICE);
+ p_used_rx_desc->buf_size = size;
+ pep->rx_skb[used_rx_desc] = skb;
+
+ /* Return the descriptor to DMA ownership */
+ wmb();
+ p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
+ wmb();
+
+ /* Move the used descriptor pointer to the next descriptor */
+ pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
+
+ /* Any Rx return cancels the Rx resource error status */
+ pep->rx_resource_err = 0;
+
+ skb_reserve(skb, ETH_HW_IP_ALIGN);
+ }
+
+ /*
+ * If RX ring is empty of SKB, set a timer to try allocating
+ * again at a later time.
+ */
+ if (pep->rx_desc_count == 0) {
+ pep->timeout.expires = jiffies + (HZ / 10);
+ add_timer(&pep->timeout);
+ }
+}
+
+static inline void rxq_refill_timer_wrapper(unsigned long data)
+{
+ struct pxa168_eth_private *pep = (void *)data;
+ napi_schedule(&pep->napi);
+}
+
+static inline u8 flip_8_bits(u8 x)
+{
+ return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
+ | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
+ | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
+ | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
+}
+
+static void nibble_swap_every_byte(unsigned char *mac_addr)
+{
+ int i;
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
+ ((mac_addr[i] & 0xf0) >> 4);
+ }
+}
+
+static void inverse_every_nibble(unsigned char *mac_addr)
+{
+ int i;
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = flip_8_bits(mac_addr[i]);
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * This function will calculate the hash function of the address.
+ * Inputs
+ * mac_addr_orig - MAC address.
+ * Outputs
+ * return the calculated entry.
+ */
+static u32 hash_function(unsigned char *mac_addr_orig)
+{
+ u32 hash_result;
+ u32 addr0;
+ u32 addr1;
+ u32 addr2;
+ u32 addr3;
+ unsigned char mac_addr[ETH_ALEN];
+
+ /* Make a copy of MAC address since we are going to performe bit
+ * operations on it
+ */
+ memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
+
+ nibble_swap_every_byte(mac_addr);
+ inverse_every_nibble(mac_addr);
+
+ addr0 = (mac_addr[5] >> 2) & 0x3f;
+ addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
+ addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
+ addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
+
+ hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
+ hash_result = hash_result & 0x07ff;
+ return hash_result;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * This function will add/del an entry to the address table.
+ * Inputs
+ * pep - ETHERNET .
+ * mac_addr - MAC address.
+ * skip - if 1, skip this address.Used in case of deleting an entry which is a
+ * part of chain in the hash table.We can't just delete the entry since
+ * that will break the chain.We need to defragment the tables time to
+ * time.
+ * rd - 0 Discard packet upon match.
+ * - 1 Receive packet upon match.
+ * Outputs
+ * address table entry is added/deleted.
+ * 0 if success.
+ * -ENOSPC if table full
+ */
+static int add_del_hash_entry(struct pxa168_eth_private *pep,
+ unsigned char *mac_addr,
+ u32 rd, u32 skip, int del)
+{
+ struct addr_table_entry *entry, *start;
+ u32 new_high;
+ u32 new_low;
+ u32 i;
+
+ new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
+ | (((mac_addr[1] >> 0) & 0xf) << 11)
+ | (((mac_addr[0] >> 4) & 0xf) << 7)
+ | (((mac_addr[0] >> 0) & 0xf) << 3)
+ | (((mac_addr[3] >> 4) & 0x1) << 31)
+ | (((mac_addr[3] >> 0) & 0xf) << 27)
+ | (((mac_addr[2] >> 4) & 0xf) << 23)
+ | (((mac_addr[2] >> 0) & 0xf) << 19)
+ | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
+ | HASH_ENTRY_VALID;
+
+ new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
+ | (((mac_addr[5] >> 0) & 0xf) << 11)
+ | (((mac_addr[4] >> 4) & 0xf) << 7)
+ | (((mac_addr[4] >> 0) & 0xf) << 3)
+ | (((mac_addr[3] >> 5) & 0x7) << 0);
+
+ /*
+ * Pick the appropriate table, start scanning for free/reusable
+ * entries at the index obtained by hashing the specified MAC address
+ */
+ start = pep->htpr;
+ entry = start + hash_function(mac_addr);
+ for (i = 0; i < HOP_NUMBER; i++) {
+ if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
+ break;
+ } else {
+ /* if same address put in same position */
+ if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
+ (new_low & 0xfffffff8)) &&
+ (le32_to_cpu(entry->hi) == new_high)) {
+ break;
+ }
+ }
+ if (entry == start + 0x7ff)
+ entry = start;
+ else
+ entry++;
+ }
+
+ if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
+ (le32_to_cpu(entry->hi) != new_high) && del)
+ return 0;
+
+ if (i == HOP_NUMBER) {
+ if (!del) {
+ printk(KERN_INFO "%s: table section is full, need to "
+ "move to 16kB implementation?\n",
+ __FILE__);
+ return -ENOSPC;
+ } else
+ return 0;
+ }
+
+ /*
+ * Update the selected entry
+ */
+ if (del) {
+ entry->hi = 0;
+ entry->lo = 0;
+ } else {
+ entry->hi = cpu_to_le32(new_high);
+ entry->lo = cpu_to_le32(new_low);
+ }
+
+ return 0;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * Create an addressTable entry from MAC address info
+ * found in the specifed net_device struct
+ *
+ * Input : pointer to ethernet interface network device structure
+ * Output : N/A
+ */
+static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
+ unsigned char *oaddr,
+ unsigned char *addr)
+{
+ /* Delete old entry */
+ if (oaddr)
+ add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
+ /* Add new entry */
+ add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
+}
+
+static int init_hash_table(struct pxa168_eth_private *pep)
+{
+ /*
+ * Hardware expects CPU to build a hash table based on a predefined
+ * hash function and populate it based on hardware address. The
+ * location of the hash table is identified by 32-bit pointer stored
+ * in HTPR internal register. Two possible sizes exists for the hash
+ * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
+ * (16kB of DRAM required (4 x 4 kB banks)).We currently only support
+ * 1/2kB.
+ */
+ /* TODO: Add support for 8kB hash table and alternative hash
+ * function.Driver can dynamically switch to them if the 1/2kB hash
+ * table is full.
+ */
+ if (pep->htpr == NULL) {
+ pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
+ HASH_ADDR_TABLE_SIZE,
+ &pep->htpr_dma, GFP_KERNEL);
+ if (pep->htpr == NULL)
+ return -ENOMEM;
+ }
+ memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
+ wrl(pep, HTPR, pep->htpr_dma);
+ return 0;
+}
+
+static void pxa168_eth_set_rx_mode(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ u32 val;
+
+ val = rdl(pep, PORT_CONFIG);
+ if (dev->flags & IFF_PROMISC)
+ val |= PCR_PM;
+ else
+ val &= ~PCR_PM;
+ wrl(pep, PORT_CONFIG, val);
+
+ /*
+ * Remove the old list of MAC address and add dev->addr
+ * and multicast address.
+ */
+ memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
+ update_hash_table_mac_address(pep, NULL, dev->dev_addr);
+
+ netdev_for_each_mc_addr(ha, dev)
+ update_hash_table_mac_address(pep, NULL, ha->addr);
+}
+
+static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = addr;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ unsigned char oldMac[ETH_ALEN];
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EINVAL;
+ memcpy(oldMac, dev->dev_addr, ETH_ALEN);
+ memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ netif_addr_lock_bh(dev);
+ update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
+ netif_addr_unlock_bh(dev);
+ return 0;
+}
+
+static void eth_port_start(struct net_device *dev)
+{
+ unsigned int val = 0;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int tx_curr_desc, rx_curr_desc;
+
+ /* Perform PHY reset, if there is a PHY. */
+ if (pep->phy != NULL) {
+ struct ethtool_cmd cmd;
+
+ pxa168_get_settings(pep->dev, &cmd);
+ ethernet_phy_reset(pep);
+ pxa168_set_settings(pep->dev, &cmd);
+ }
+
+ /* Assignment of Tx CTRP of given queue */
+ tx_curr_desc = pep->tx_curr_desc_q;
+ wrl(pep, ETH_C_TX_DESC_1,
+ (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
+
+ /* Assignment of Rx CRDP of given queue */
+ rx_curr_desc = pep->rx_curr_desc_q;
+ wrl(pep, ETH_C_RX_DESC_0,
+ (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
+
+ wrl(pep, ETH_F_RX_DESC_0,
+ (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
+
+ /* Clear all interrupts */
+ wrl(pep, INT_CAUSE, 0);
+
+ /* Enable all interrupts for receive, transmit and error. */
+ wrl(pep, INT_MASK, ALL_INTS);
+
+ val = rdl(pep, PORT_CONFIG);
+ val |= PCR_EN;
+ wrl(pep, PORT_CONFIG, val);
+
+ /* Start RX DMA engine */
+ val = rdl(pep, SDMA_CMD);
+ val |= SDMA_CMD_ERD;
+ wrl(pep, SDMA_CMD, val);
+}
+
+static void eth_port_reset(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ unsigned int val = 0;
+
+ /* Stop all interrupts for receive, transmit and error. */
+ wrl(pep, INT_MASK, 0);
+
+ /* Clear all interrupts */
+ wrl(pep, INT_CAUSE, 0);
+
+ /* Stop RX DMA */
+ val = rdl(pep, SDMA_CMD);
+ val &= ~SDMA_CMD_ERD; /* abort dma command */
+
+ /* Abort any transmit and receive operations and put DMA
+ * in idle state.
+ */
+ abort_dma(pep);
+
+ /* Disable port */
+ val = rdl(pep, PORT_CONFIG);
+ val &= ~PCR_EN;
+ wrl(pep, PORT_CONFIG, val);
+}
+
+/*
+ * txq_reclaim - Free the tx desc data for completed descriptors
+ * If force is non-zero, frees uncompleted descriptors as well
+ */
+static int txq_reclaim(struct net_device *dev, int force)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct tx_desc *desc;
+ u32 cmd_sts;
+ struct sk_buff *skb;
+ int tx_index;
+ dma_addr_t addr;
+ int count;
+ int released = 0;
+
+ netif_tx_lock(dev);
+
+ pep->work_todo &= ~WORK_TX_DONE;
+ while (pep->tx_desc_count > 0) {
+ tx_index = pep->tx_used_desc_q;
+ desc = &pep->p_tx_desc_area[tx_index];
+ cmd_sts = desc->cmd_sts;
+ if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
+ if (released > 0) {
+ goto txq_reclaim_end;
+ } else {
+ released = -1;
+ goto txq_reclaim_end;
+ }
+ }
+ pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
+ pep->tx_desc_count--;
+ addr = desc->buf_ptr;
+ count = desc->byte_cnt;
+ skb = pep->tx_skb[tx_index];
+ if (skb)
+ pep->tx_skb[tx_index] = NULL;
+
+ if (cmd_sts & TX_ERROR) {
+ if (net_ratelimit())
+ printk(KERN_ERR "%s: Error in TX\n", dev->name);
+ dev->stats.tx_errors++;
+ }
+ dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
+ if (skb)
+ dev_kfree_skb_irq(skb);
+ released++;
+ }
+txq_reclaim_end:
+ netif_tx_unlock(dev);
+ return released;
+}
+
+static void pxa168_eth_tx_timeout(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ printk(KERN_INFO "%s: TX timeout desc_count %d\n",
+ dev->name, pep->tx_desc_count);
+
+ schedule_work(&pep->tx_timeout_task);
+}
+
+static void pxa168_eth_tx_timeout_task(struct work_struct *work)
+{
+ struct pxa168_eth_private *pep = container_of(work,
+ struct pxa168_eth_private,
+ tx_timeout_task);
+ struct net_device *dev = pep->dev;
+ pxa168_eth_stop(dev);
+ pxa168_eth_open(dev);
+}
+
+static int rxq_process(struct net_device *dev, int budget)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ unsigned int received_packets = 0;
+ struct sk_buff *skb;
+
+ while (budget-- > 0) {
+ int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
+ struct rx_desc *rx_desc;
+ unsigned int cmd_sts;
+
+ /* Do not process Rx ring in case of Rx ring resource error */
+ if (pep->rx_resource_err)
+ break;
+ rx_curr_desc = pep->rx_curr_desc_q;
+ rx_used_desc = pep->rx_used_desc_q;
+ rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
+ cmd_sts = rx_desc->cmd_sts;
+ rmb();
+ if (cmd_sts & (BUF_OWNED_BY_DMA))
+ break;
+ skb = pep->rx_skb[rx_curr_desc];
+ pep->rx_skb[rx_curr_desc] = NULL;
+
+ rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
+ pep->rx_curr_desc_q = rx_next_curr_desc;
+
+ /* Rx descriptors exhausted. */
+ /* Set the Rx ring resource error flag */
+ if (rx_next_curr_desc == rx_used_desc)
+ pep->rx_resource_err = 1;
+ pep->rx_desc_count--;
+ dma_unmap_single(NULL, rx_desc->buf_ptr,
+ rx_desc->buf_size,
+ DMA_FROM_DEVICE);
+ received_packets++;
+ /*
+ * Update statistics.
+ * Note byte count includes 4 byte CRC count
+ */
+ stats->rx_packets++;
+ stats->rx_bytes += rx_desc->byte_cnt;
+ /*
+ * In case received a packet without first / last bits on OR
+ * the error summary bit is on, the packets needs to be droped.
+ */
+ if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+ (RX_FIRST_DESC | RX_LAST_DESC))
+ || (cmd_sts & RX_ERROR)) {
+
+ stats->rx_dropped++;
+ if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+ (RX_FIRST_DESC | RX_LAST_DESC)) {
+ if (net_ratelimit())
+ printk(KERN_ERR
+ "%s: Rx pkt on multiple desc\n",
+ dev->name);
+ }
+ if (cmd_sts & RX_ERROR)
+ stats->rx_errors++;
+ dev_kfree_skb_irq(skb);
+ } else {
+ /*
+ * The -4 is for the CRC in the trailer of the
+ * received packet
+ */
+ skb_put(skb, rx_desc->byte_cnt - 4);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ }
+ }
+ /* Fill RX ring with skb's */
+ rxq_refill(dev);
+ return received_packets;
+}
+
+static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
+ struct net_device *dev)
+{
+ u32 icr;
+ int ret = 0;
+
+ icr = rdl(pep, INT_CAUSE);
+ if (icr == 0)
+ return IRQ_NONE;
+
+ wrl(pep, INT_CAUSE, ~icr);
+ if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
+ pep->work_todo |= WORK_TX_DONE;
+ ret = 1;
+ }
+ if (icr & ICR_RXBUF)
+ ret = 1;
+ if (icr & ICR_MII_CH) {
+ pep->work_todo |= WORK_LINK;
+ ret = 1;
+ }
+ return ret;
+}
+
+static void handle_link_event(struct pxa168_eth_private *pep)
+{
+ struct net_device *dev = pep->dev;
+ u32 port_status;
+ int speed;
+ int duplex;
+ int fc;
+
+ port_status = rdl(pep, PORT_STATUS);
+ if (!(port_status & LINK_UP)) {
+ if (netif_carrier_ok(dev)) {
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ netif_carrier_off(dev);
+ txq_reclaim(dev, 1);
+ }
+ return;
+ }
+ if (port_status & PORT_SPEED_100)
+ speed = 100;
+ else
+ speed = 10;
+
+ duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
+ fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
+ printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
+ "flow control %sabled\n", dev->name,
+ speed, duplex ? "full" : "half", fc ? "en" : "dis");
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+}
+
+static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ if (unlikely(!pxa168_eth_collect_events(pep, dev)))
+ return IRQ_NONE;
+ /* Disable interrupts */
+ wrl(pep, INT_MASK, 0);
+ napi_schedule(&pep->napi);
+ return IRQ_HANDLED;
+}
+
+static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
+{
+ int skb_size;
+
+ /*
+ * Reserve 2+14 bytes for an ethernet header (the hardware
+ * automatically prepends 2 bytes of dummy data to each
+ * received packet), 16 bytes for up to four VLAN tags, and
+ * 4 bytes for the trailing FCS -- 36 bytes total.
+ */
+ skb_size = pep->dev->mtu + 36;
+
+ /*
+ * Make sure that the skb size is a multiple of 8 bytes, as
+ * the lower three bits of the receive descriptor's buffer
+ * size field are ignored by the hardware.
+ */
+ pep->skb_size = (skb_size + 7) & ~7;
+
+ /*
+ * If NET_SKB_PAD is smaller than a cache line,
+ * netdev_alloc_skb() will cause skb->data to be misaligned
+ * to a cache line boundary. If this is the case, include
+ * some extra space to allow re-aligning the data area.
+ */
+ pep->skb_size += SKB_DMA_REALIGN;
+
+}
+
+static int set_port_config_ext(struct pxa168_eth_private *pep)
+{
+ int skb_size;
+
+ pxa168_eth_recalc_skb_size(pep);
+ if (pep->skb_size <= 1518)
+ skb_size = PCXR_MFL_1518;
+ else if (pep->skb_size <= 1536)
+ skb_size = PCXR_MFL_1536;
+ else if (pep->skb_size <= 2048)
+ skb_size = PCXR_MFL_2048;
+ else
+ skb_size = PCXR_MFL_64K;
+
+ /* Extended Port Configuration */
+ wrl(pep,
+ PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */
+ PCXR_DSCP_EN | /* Enable DSCP in IP */
+ skb_size | PCXR_FLP | /* do not force link pass */
+ PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */
+
+ return 0;
+}
+
+static int pxa168_init_hw(struct pxa168_eth_private *pep)
+{
+ int err = 0;
+
+ /* Disable interrupts */
+ wrl(pep, INT_MASK, 0);
+ wrl(pep, INT_CAUSE, 0);
+ /* Write to ICR to clear interrupts. */
+ wrl(pep, INT_W_CLEAR, 0);
+ /* Abort any transmit and receive operations and put DMA
+ * in idle state.
+ */
+ abort_dma(pep);
+ /* Initialize address hash table */
+ err = init_hash_table(pep);
+ if (err)
+ return err;
+ /* SDMA configuration */
+ wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */
+ SDCR_RIFB | /* Rx interrupt on frame */
+ SDCR_BLMT | /* Little endian transmit */
+ SDCR_BLMR | /* Little endian receive */
+ SDCR_RC_MAX_RETRANS); /* Max retransmit count */
+ /* Port Configuration */
+ wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */
+ set_port_config_ext(pep);
+
+ return err;
+}
+
+static int rxq_init(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct rx_desc *p_rx_desc;
+ int size = 0, i = 0;
+ int rx_desc_num = pep->rx_ring_size;
+
+ /* Allocate RX skb rings */
+ pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
+ GFP_KERNEL);
+ if (!pep->rx_skb) {
+ printk(KERN_ERR "%s: Cannot alloc RX skb ring\n", dev->name);
+ return -ENOMEM;
+ }
+ /* Allocate RX ring */
+ pep->rx_desc_count = 0;
+ size = pep->rx_ring_size * sizeof(struct rx_desc);
+ pep->rx_desc_area_size = size;
+ pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+ &pep->rx_desc_dma, GFP_KERNEL);
+ if (!pep->p_rx_desc_area) {
+ printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n",
+ dev->name, size);
+ goto out;
+ }
+ memset((void *)pep->p_rx_desc_area, 0, size);
+ /* initialize the next_desc_ptr links in the Rx descriptors ring */
+ p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area;
+ for (i = 0; i < rx_desc_num; i++) {
+ p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
+ ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
+ }
+ /* Save Rx desc pointer to driver struct. */
+ pep->rx_curr_desc_q = 0;
+ pep->rx_used_desc_q = 0;
+ pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
+ return 0;
+out:
+ kfree(pep->rx_skb);
+ return -ENOMEM;
+}
+
+static void rxq_deinit(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int curr;
+
+ /* Free preallocated skb's on RX rings */
+ for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
+ if (pep->rx_skb[curr]) {
+ dev_kfree_skb(pep->rx_skb[curr]);
+ pep->rx_desc_count--;
+ }
+ }
+ if (pep->rx_desc_count)
+ printk(KERN_ERR
+ "Error in freeing Rx Ring. %d skb's still\n",
+ pep->rx_desc_count);
+ /* Free RX ring */
+ if (pep->p_rx_desc_area)
+ dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
+ pep->p_rx_desc_area, pep->rx_desc_dma);
+ kfree(pep->rx_skb);
+}
+
+static int txq_init(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct tx_desc *p_tx_desc;
+ int size = 0, i = 0;
+ int tx_desc_num = pep->tx_ring_size;
+
+ pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
+ GFP_KERNEL);
+ if (!pep->tx_skb) {
+ printk(KERN_ERR "%s: Cannot alloc TX skb ring\n", dev->name);
+ return -ENOMEM;
+ }
+ /* Allocate TX ring */
+ pep->tx_desc_count = 0;
+ size = pep->tx_ring_size * sizeof(struct tx_desc);
+ pep->tx_desc_area_size = size;
+ pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+ &pep->tx_desc_dma, GFP_KERNEL);
+ if (!pep->p_tx_desc_area) {
+ printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
+ dev->name, size);
+ goto out;
+ }
+ memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
+ /* Initialize the next_desc_ptr links in the Tx descriptors ring */
+ p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area;
+ for (i = 0; i < tx_desc_num; i++) {
+ p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
+ ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
+ }
+ pep->tx_curr_desc_q = 0;
+ pep->tx_used_desc_q = 0;
+ pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
+ return 0;
+out:
+ kfree(pep->tx_skb);
+ return -ENOMEM;
+}
+
+static void txq_deinit(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ /* Free outstanding skb's on TX ring */
+ txq_reclaim(dev, 1);
+ BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
+ /* Free TX ring */
+ if (pep->p_tx_desc_area)
+ dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
+ pep->p_tx_desc_area, pep->tx_desc_dma);
+ kfree(pep->tx_skb);
+}
+
+static int pxa168_eth_open(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int err;
+
+ err = request_irq(dev->irq, pxa168_eth_int_handler,
+ IRQF_DISABLED, dev->name, dev);
+ if (err) {
+ dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
+ return -EAGAIN;
+ }
+ pep->rx_resource_err = 0;
+ err = rxq_init(dev);
+ if (err != 0)
+ goto out_free_irq;
+ err = txq_init(dev);
+ if (err != 0)
+ goto out_free_rx_skb;
+ pep->rx_used_desc_q = 0;
+ pep->rx_curr_desc_q = 0;
+
+ /* Fill RX ring with skb's */
+ rxq_refill(dev);
+ pep->rx_used_desc_q = 0;
+ pep->rx_curr_desc_q = 0;
+ netif_carrier_off(dev);
+ eth_port_start(dev);
+ napi_enable(&pep->napi);
+ return 0;
+out_free_rx_skb:
+ rxq_deinit(dev);
+out_free_irq:
+ free_irq(dev->irq, dev);
+ return err;
+}
+
+static int pxa168_eth_stop(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ eth_port_reset(dev);
+
+ /* Disable interrupts */
+ wrl(pep, INT_MASK, 0);
+ wrl(pep, INT_CAUSE, 0);
+ /* Write to ICR to clear interrupts. */
+ wrl(pep, INT_W_CLEAR, 0);
+ napi_disable(&pep->napi);
+ del_timer_sync(&pep->timeout);
+ netif_carrier_off(dev);
+ free_irq(dev->irq, dev);
+ rxq_deinit(dev);
+ txq_deinit(dev);
+
+ return 0;
+}
+
+static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
+{
+ int retval;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ if ((mtu > 9500) || (mtu < 68))
+ return -EINVAL;
+
+ dev->mtu = mtu;
+ retval = set_port_config_ext(pep);
+
+ if (!netif_running(dev))
+ return 0;
+
+ /*
+ * Stop and then re-open the interface. This will allocate RX
+ * skbs of the new MTU.
+ * There is a possible danger that the open will not succeed,
+ * due to memory being full.
+ */
+ pxa168_eth_stop(dev);
+ if (pxa168_eth_open(dev)) {
+ dev_printk(KERN_ERR, &dev->dev,
+ "fatal error on re-opening device after "
+ "MTU change\n");
+ }
+
+ return 0;
+}
+
+static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
+{
+ int tx_desc_curr;
+
+ tx_desc_curr = pep->tx_curr_desc_q;
+ pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
+ BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
+ pep->tx_desc_count++;
+
+ return tx_desc_curr;
+}
+
+static int pxa168_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct pxa168_eth_private *pep =
+ container_of(napi, struct pxa168_eth_private, napi);
+ struct net_device *dev = pep->dev;
+ int work_done = 0;
+
+ if (unlikely(pep->work_todo & WORK_LINK)) {
+ pep->work_todo &= ~(WORK_LINK);
+ handle_link_event(pep);
+ }
+ /*
+ * We call txq_reclaim every time since in NAPI interupts are disabled
+ * and due to this we miss the TX_DONE interrupt,which is not updated in
+ * interrupt status register.
+ */
+ txq_reclaim(dev, 0);
+ if (netif_queue_stopped(dev)
+ && pep->tx_ring_size - pep->tx_desc_count > 1) {
+ netif_wake_queue(dev);
+ }
+ work_done = rxq_process(dev, budget);
+ if (work_done < budget) {
+ napi_complete(napi);
+ wrl(pep, INT_MASK, ALL_INTS);
+ }
+
+ return work_done;
+}
+
+static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct tx_desc *desc;
+ int tx_index;
+ int length;
+
+ tx_index = eth_alloc_tx_desc_index(pep);
+ desc = &pep->p_tx_desc_area[tx_index];
+ length = skb->len;
+ pep->tx_skb[tx_index] = skb;
+ desc->byte_cnt = length;
+ desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
+
+ skb_tx_timestamp(skb);
+
+ wmb();
+ desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
+ TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
+ wmb();
+ wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
+
+ stats->tx_bytes += length;
+ stats->tx_packets++;
+ dev->trans_start = jiffies;
+ if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
+ /* We handled the current skb, but now we are out of space.*/
+ netif_stop_queue(dev);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int smi_wait_ready(struct pxa168_eth_private *pep)
+{
+ int i = 0;
+
+ /* wait for the SMI register to become available */
+ for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
+ if (i == PHY_WAIT_ITERATIONS)
+ return -ETIMEDOUT;
+ msleep(10);
+ }
+
+ return 0;
+}
+
+static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct pxa168_eth_private *pep = bus->priv;
+ int i = 0;
+ int val;
+
+ if (smi_wait_ready(pep)) {
+ printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+ wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
+ /* now wait for the data to be valid */
+ for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
+ if (i == PHY_WAIT_ITERATIONS) {
+ printk(KERN_WARNING
+ "pxa168_eth: SMI bus read not valid\n");
+ return -ENODEV;
+ }
+ msleep(10);
+ }
+
+ return val & 0xffff;
+}
+
+static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
+ u16 value)
+{
+ struct pxa168_eth_private *pep = bus->priv;
+
+ if (smi_wait_ready(pep)) {
+ printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
+ SMI_OP_W | (value & 0xffff));
+
+ if (smi_wait_ready(pep)) {
+ printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ if (pep->phy != NULL)
+ return phy_mii_ioctl(pep->phy, ifr, cmd);
+
+ return -EOPNOTSUPP;
+}
+
+static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
+{
+ struct mii_bus *bus = pep->smi_bus;
+ struct phy_device *phydev;
+ int start;
+ int num;
+ int i;
+
+ if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) {
+ /* Scan entire range */
+ start = ethernet_phy_get(pep);
+ num = 32;
+ } else {
+ /* Use phy addr specific to platform */
+ start = phy_addr & 0x1f;
+ num = 1;
+ }
+ phydev = NULL;
+ for (i = 0; i < num; i++) {
+ int addr = (start + i) & 0x1f;
+ if (bus->phy_map[addr] == NULL)
+ mdiobus_scan(bus, addr);
+
+ if (phydev == NULL) {
+ phydev = bus->phy_map[addr];
+ if (phydev != NULL)
+ ethernet_phy_set_addr(pep, addr);
+ }
+ }
+
+ return phydev;
+}
+
+static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
+{
+ struct phy_device *phy = pep->phy;
+ ethernet_phy_reset(pep);
+
+ phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII);
+
+ if (speed == 0) {
+ phy->autoneg = AUTONEG_ENABLE;
+ phy->speed = 0;
+ phy->duplex = 0;
+ phy->supported &= PHY_BASIC_FEATURES;
+ phy->advertising = phy->supported | ADVERTISED_Autoneg;
+ } else {
+ phy->autoneg = AUTONEG_DISABLE;
+ phy->advertising = 0;
+ phy->speed = speed;
+ phy->duplex = duplex;
+ }
+ phy_start_aneg(phy);
+}
+
+static int ethernet_phy_setup(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ if (pep->pd->init)
+ pep->pd->init();
+ pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f);
+ if (pep->phy != NULL)
+ phy_init(pep, pep->pd->speed, pep->pd->duplex);
+ update_hash_table_mac_address(pep, NULL, dev->dev_addr);
+
+ return 0;
+}
+
+static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int err;
+
+ err = phy_read_status(pep->phy);
+ if (err == 0)
+ err = phy_ethtool_gset(pep->phy, cmd);
+
+ return err;
+}
+
+static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ return phy_ethtool_sset(pep->phy, cmd);
+}
+
+static void pxa168_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, DRIVER_NAME, 32);
+ strncpy(info->version, DRIVER_VERSION, 32);
+ strncpy(info->fw_version, "N/A", 32);
+ strncpy(info->bus_info, "N/A", 32);
+}
+
+static const struct ethtool_ops pxa168_ethtool_ops = {
+ .get_settings = pxa168_get_settings,
+ .set_settings = pxa168_set_settings,
+ .get_drvinfo = pxa168_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+static const struct net_device_ops pxa168_eth_netdev_ops = {
+ .ndo_open = pxa168_eth_open,
+ .ndo_stop = pxa168_eth_stop,
+ .ndo_start_xmit = pxa168_eth_start_xmit,
+ .ndo_set_rx_mode = pxa168_eth_set_rx_mode,
+ .ndo_set_mac_address = pxa168_eth_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = pxa168_eth_do_ioctl,
+ .ndo_change_mtu = pxa168_eth_change_mtu,
+ .ndo_tx_timeout = pxa168_eth_tx_timeout,
+};
+
+static int pxa168_eth_probe(struct platform_device *pdev)
+{
+ struct pxa168_eth_private *pep = NULL;
+ struct net_device *dev = NULL;
+ struct resource *res;
+ struct clk *clk;
+ int err;
+
+ printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
+
+ clk = clk_get(&pdev->dev, "MFUCLK");
+ if (IS_ERR(clk)) {
+ printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n",
+ DRIVER_NAME);
+ return -ENODEV;
+ }
+ clk_enable(clk);
+
+ dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_clk;
+ }
+
+ platform_set_drvdata(pdev, dev);
+ pep = netdev_priv(dev);
+ pep->dev = dev;
+ pep->clk = clk;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ err = -ENODEV;
+ goto err_netdev;
+ }
+ pep->base = ioremap(res->start, resource_size(res));
+ if (pep->base == NULL) {
+ err = -ENOMEM;
+ goto err_netdev;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ BUG_ON(!res);
+ dev->irq = res->start;
+ dev->netdev_ops = &pxa168_eth_netdev_ops;
+ dev->watchdog_timeo = 2 * HZ;
+ dev->base_addr = 0;
+ SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops);
+
+ INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
+
+ printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);
+ random_ether_addr(dev->dev_addr);
+
+ pep->pd = pdev->dev.platform_data;
+ pep->rx_ring_size = NUM_RX_DESCS;
+ if (pep->pd->rx_queue_size)
+ pep->rx_ring_size = pep->pd->rx_queue_size;
+
+ pep->tx_ring_size = NUM_TX_DESCS;
+ if (pep->pd->tx_queue_size)
+ pep->tx_ring_size = pep->pd->tx_queue_size;
+
+ pep->port_num = pep->pd->port_number;
+ /* Hardware supports only 3 ports */
+ BUG_ON(pep->port_num > 2);
+ netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
+
+ memset(&pep->timeout, 0, sizeof(struct timer_list));
+ init_timer(&pep->timeout);
+ pep->timeout.function = rxq_refill_timer_wrapper;
+ pep->timeout.data = (unsigned long)pep;
+
+ pep->smi_bus = mdiobus_alloc();
+ if (pep->smi_bus == NULL) {
+ err = -ENOMEM;
+ goto err_base;
+ }
+ pep->smi_bus->priv = pep;
+ pep->smi_bus->name = "pxa168_eth smi";
+ pep->smi_bus->read = pxa168_smi_read;
+ pep->smi_bus->write = pxa168_smi_write;
+ snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
+ pep->smi_bus->parent = &pdev->dev;
+ pep->smi_bus->phy_mask = 0xffffffff;
+ err = mdiobus_register(pep->smi_bus);
+ if (err)
+ goto err_free_mdio;
+
+ pxa168_init_hw(pep);
+ err = ethernet_phy_setup(dev);
+ if (err)
+ goto err_mdiobus;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ err = register_netdev(dev);
+ if (err)
+ goto err_mdiobus;
+ return 0;
+
+err_mdiobus:
+ mdiobus_unregister(pep->smi_bus);
+err_free_mdio:
+ mdiobus_free(pep->smi_bus);
+err_base:
+ iounmap(pep->base);
+err_netdev:
+ free_netdev(dev);
+err_clk:
+ clk_disable(clk);
+ clk_put(clk);
+ return err;
+}
+
+static int pxa168_eth_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ if (pep->htpr) {
+ dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
+ pep->htpr, pep->htpr_dma);
+ pep->htpr = NULL;
+ }
+ if (pep->clk) {
+ clk_disable(pep->clk);
+ clk_put(pep->clk);
+ pep->clk = NULL;
+ }
+ if (pep->phy != NULL)
+ phy_detach(pep->phy);
+
+ iounmap(pep->base);
+ pep->base = NULL;
+ mdiobus_unregister(pep->smi_bus);
+ mdiobus_free(pep->smi_bus);
+ unregister_netdev(dev);
+ cancel_work_sync(&pep->tx_timeout_task);
+ free_netdev(dev);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static void pxa168_eth_shutdown(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ eth_port_reset(dev);
+}
+
+#ifdef CONFIG_PM
+static int pxa168_eth_resume(struct platform_device *pdev)
+{
+ return -ENOSYS;
+}
+
+static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return -ENOSYS;
+}
+
+#else
+#define pxa168_eth_resume NULL
+#define pxa168_eth_suspend NULL
+#endif
+
+static struct platform_driver pxa168_eth_driver = {
+ .probe = pxa168_eth_probe,
+ .remove = pxa168_eth_remove,
+ .shutdown = pxa168_eth_shutdown,
+ .resume = pxa168_eth_resume,
+ .suspend = pxa168_eth_suspend,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init pxa168_init_module(void)
+{
+ return platform_driver_register(&pxa168_eth_driver);
+}
+
+static void __exit pxa168_cleanup_module(void)
+{
+ platform_driver_unregister(&pxa168_eth_driver);
+}
+
+module_init(pxa168_init_module);
+module_exit(pxa168_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
+MODULE_ALIAS("platform:pxa168_eth");
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
new file mode 100644
index 000000000000..32db4c877ff1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -0,0 +1,4165 @@
+/*
+ * New driver for Marvell Yukon chipset and SysKonnect Gigabit
+ * Ethernet adapters. Based on earlier sk98lin, e100 and
+ * FreeBSD if_sk drivers.
+ *
+ * This driver intentionally does not support all the features
+ * of the original driver such as link fail-over and link management because
+ * those should be done at higher levels.
+ *
+ * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/in.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/mii.h>
+#include <linux/slab.h>
+#include <linux/dmi.h>
+#include <linux/prefetch.h>
+#include <asm/irq.h>
+
+#include "skge.h"
+
+#define DRV_NAME "skge"
+#define DRV_VERSION "1.14"
+
+#define DEFAULT_TX_RING_SIZE 128
+#define DEFAULT_RX_RING_SIZE 512
+#define MAX_TX_RING_SIZE 1024
+#define TX_LOW_WATER (MAX_SKB_FRAGS + 1)
+#define MAX_RX_RING_SIZE 4096
+#define RX_COPY_THRESHOLD 128
+#define RX_BUF_SIZE 1536
+#define PHY_RETRIES 1000
+#define ETH_JUMBO_MTU 9000
+#define TX_WATCHDOG (5 * HZ)
+#define NAPI_WEIGHT 64
+#define BLINK_MS 250
+#define LINK_HZ HZ
+
+#define SKGE_EEPROM_MAGIC 0x9933aabb
+
+
+MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver");
+MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static const u32 default_msg = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN);
+
+static int debug = -1; /* defaults above */
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static DEFINE_PCI_DEVICE_TABLE(skge_id_table) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x1700) }, /* 3Com 3C940 */
+ { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x80EB) }, /* 3Com 3C940B */
+#ifdef CONFIG_SKGE_GENESIS
+ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4300) }, /* SK-9xx */
+#endif
+ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4320) }, /* SK-98xx V2.0 */
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* D-Link DGE-530T (rev.B) */
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4c00) }, /* D-Link DGE-530T */
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302) }, /* D-Link DGE-530T Rev C1 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, /* Marvell Yukon 88E8001/8003/8010 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
+ { PCI_DEVICE(PCI_VENDOR_ID_CNET, 0x434E) }, /* CNet PowerG-2000 */
+ { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, 0x1064) }, /* Linksys EG1064 v2 */
+ { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 }, /* Linksys EG1032 v2 */
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, skge_id_table);
+
+static int skge_up(struct net_device *dev);
+static int skge_down(struct net_device *dev);
+static void skge_phy_reset(struct skge_port *skge);
+static void skge_tx_clean(struct net_device *dev);
+static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
+static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
+static void genesis_get_stats(struct skge_port *skge, u64 *data);
+static void yukon_get_stats(struct skge_port *skge, u64 *data);
+static void yukon_init(struct skge_hw *hw, int port);
+static void genesis_mac_init(struct skge_hw *hw, int port);
+static void genesis_link_up(struct skge_port *skge);
+static void skge_set_multicast(struct net_device *dev);
+static irqreturn_t skge_intr(int irq, void *dev_id);
+
+/* Avoid conditionals by using array */
+static const int txqaddr[] = { Q_XA1, Q_XA2 };
+static const int rxqaddr[] = { Q_R1, Q_R2 };
+static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
+static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
+static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F };
+static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 };
+
+static inline bool is_genesis(const struct skge_hw *hw)
+{
+#ifdef CONFIG_SKGE_GENESIS
+ return hw->chip_id == CHIP_ID_GENESIS;
+#else
+ return false;
+#endif
+}
+
+static int skge_get_regs_len(struct net_device *dev)
+{
+ return 0x4000;
+}
+
+/*
+ * Returns copy of whole control register region
+ * Note: skip RAM address register because accessing it will
+ * cause bus hangs!
+ */
+static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ const struct skge_port *skge = netdev_priv(dev);
+ const void __iomem *io = skge->hw->regs;
+
+ regs->version = 1;
+ memset(p, 0, regs->len);
+ memcpy_fromio(p, io, B3_RAM_ADDR);
+
+ memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
+ regs->len - B3_RI_WTO_R1);
+}
+
+/* Wake on Lan only supported on Yukon chips with rev 1 or above */
+static u32 wol_supported(const struct skge_hw *hw)
+{
+ if (is_genesis(hw))
+ return 0;
+
+ if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)
+ return 0;
+
+ return WAKE_MAGIC | WAKE_PHY;
+}
+
+static void skge_wol_init(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ u16 ctrl;
+
+ skge_write16(hw, B0_CTST, CS_RST_CLR);
+ skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
+
+ /* Turn on Vaux */
+ skge_write8(hw, B0_POWER_CTRL,
+ PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
+
+ /* WA code for COMA mode -- clear PHY reset */
+ if (hw->chip_id == CHIP_ID_YUKON_LITE &&
+ hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
+ u32 reg = skge_read32(hw, B2_GP_IO);
+ reg |= GP_DIR_9;
+ reg &= ~GP_IO_9;
+ skge_write32(hw, B2_GP_IO, reg);
+ }
+
+ skge_write32(hw, SK_REG(port, GPHY_CTRL),
+ GPC_DIS_SLEEP |
+ GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 |
+ GPC_ANEG_1 | GPC_RST_SET);
+
+ skge_write32(hw, SK_REG(port, GPHY_CTRL),
+ GPC_DIS_SLEEP |
+ GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 |
+ GPC_ANEG_1 | GPC_RST_CLR);
+
+ skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
+
+ /* Force to 10/100 skge_reset will re-enable on resume */
+ gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
+ (PHY_AN_100FULL | PHY_AN_100HALF |
+ PHY_AN_10FULL | PHY_AN_10HALF | PHY_AN_CSMA));
+ /* no 1000 HD/FD */
+ gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0);
+ gm_phy_write(hw, port, PHY_MARV_CTRL,
+ PHY_CT_RESET | PHY_CT_SPS_LSB | PHY_CT_ANE |
+ PHY_CT_RE_CFG | PHY_CT_DUP_MD);
+
+
+ /* Set GMAC to no flow control and auto update for speed/duplex */
+ gma_write16(hw, port, GM_GP_CTRL,
+ GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
+ GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
+
+ /* Set WOL address */
+ memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
+ skge->netdev->dev_addr, ETH_ALEN);
+
+ /* Turn on appropriate WOL control bits */
+ skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
+ ctrl = 0;
+ if (skge->wol & WAKE_PHY)
+ ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
+ else
+ ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
+
+ if (skge->wol & WAKE_MAGIC)
+ ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
+ else
+ ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;
+
+ ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
+ skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
+
+ /* block receiver */
+ skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
+}
+
+static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct skge_port *skge = netdev_priv(dev);
+
+ wol->supported = wol_supported(skge->hw);
+ wol->wolopts = skge->wol;
+}
+
+static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+
+ if ((wol->wolopts & ~wol_supported(hw)) ||
+ !device_can_wakeup(&hw->pdev->dev))
+ return -EOPNOTSUPP;
+
+ skge->wol = wol->wolopts;
+
+ device_set_wakeup_enable(&hw->pdev->dev, skge->wol);
+
+ return 0;
+}
+
+/* Determine supported/advertised modes based on hardware.
+ * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx
+ */
+static u32 skge_supported_modes(const struct skge_hw *hw)
+{
+ u32 supported;
+
+ if (hw->copper) {
+ supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
+
+ if (is_genesis(hw))
+ supported &= ~(SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full);
+
+ else if (hw->chip_id == CHIP_ID_YUKON)
+ supported &= ~SUPPORTED_1000baseT_Half;
+ } else
+ supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg);
+
+ return supported;
+}
+
+static int skge_get_settings(struct net_device *dev,
+ struct ethtool_cmd *ecmd)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+
+ ecmd->transceiver = XCVR_INTERNAL;
+ ecmd->supported = skge_supported_modes(hw);
+
+ if (hw->copper) {
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = hw->phy_addr;
+ } else
+ ecmd->port = PORT_FIBRE;
+
+ ecmd->advertising = skge->advertising;
+ ecmd->autoneg = skge->autoneg;
+ ethtool_cmd_speed_set(ecmd, skge->speed);
+ ecmd->duplex = skge->duplex;
+ return 0;
+}
+
+static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ const struct skge_hw *hw = skge->hw;
+ u32 supported = skge_supported_modes(hw);
+ int err = 0;
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ ecmd->advertising = supported;
+ skge->duplex = -1;
+ skge->speed = -1;
+ } else {
+ u32 setting;
+ u32 speed = ethtool_cmd_speed(ecmd);
+
+ switch (speed) {
+ case SPEED_1000:
+ if (ecmd->duplex == DUPLEX_FULL)
+ setting = SUPPORTED_1000baseT_Full;
+ else if (ecmd->duplex == DUPLEX_HALF)
+ setting = SUPPORTED_1000baseT_Half;
+ else
+ return -EINVAL;
+ break;
+ case SPEED_100:
+ if (ecmd->duplex == DUPLEX_FULL)
+ setting = SUPPORTED_100baseT_Full;
+ else if (ecmd->duplex == DUPLEX_HALF)
+ setting = SUPPORTED_100baseT_Half;
+ else
+ return -EINVAL;
+ break;
+
+ case SPEED_10:
+ if (ecmd->duplex == DUPLEX_FULL)
+ setting = SUPPORTED_10baseT_Full;
+ else if (ecmd->duplex == DUPLEX_HALF)
+ setting = SUPPORTED_10baseT_Half;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((setting & supported) == 0)
+ return -EINVAL;
+
+ skge->speed = speed;
+ skge->duplex = ecmd->duplex;
+ }
+
+ skge->autoneg = ecmd->autoneg;
+ skge->advertising = ecmd->advertising;
+
+ if (netif_running(dev)) {
+ skge_down(dev);
+ err = skge_up(dev);
+ if (err) {
+ dev_close(dev);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void skge_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct skge_port *skge = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->fw_version, "N/A");
+ strcpy(info->bus_info, pci_name(skge->hw->pdev));
+}
+
+static const struct skge_stat {
+ char name[ETH_GSTRING_LEN];
+ u16 xmac_offset;
+ u16 gma_offset;
+} skge_stats[] = {
+ { "tx_bytes", XM_TXO_OK_HI, GM_TXO_OK_HI },
+ { "rx_bytes", XM_RXO_OK_HI, GM_RXO_OK_HI },
+
+ { "tx_broadcast", XM_TXF_BC_OK, GM_TXF_BC_OK },
+ { "rx_broadcast", XM_RXF_BC_OK, GM_RXF_BC_OK },
+ { "tx_multicast", XM_TXF_MC_OK, GM_TXF_MC_OK },
+ { "rx_multicast", XM_RXF_MC_OK, GM_RXF_MC_OK },
+ { "tx_unicast", XM_TXF_UC_OK, GM_TXF_UC_OK },
+ { "rx_unicast", XM_RXF_UC_OK, GM_RXF_UC_OK },
+ { "tx_mac_pause", XM_TXF_MPAUSE, GM_TXF_MPAUSE },
+ { "rx_mac_pause", XM_RXF_MPAUSE, GM_RXF_MPAUSE },
+
+ { "collisions", XM_TXF_SNG_COL, GM_TXF_SNG_COL },
+ { "multi_collisions", XM_TXF_MUL_COL, GM_TXF_MUL_COL },
+ { "aborted", XM_TXF_ABO_COL, GM_TXF_ABO_COL },
+ { "late_collision", XM_TXF_LAT_COL, GM_TXF_LAT_COL },
+ { "fifo_underrun", XM_TXE_FIFO_UR, GM_TXE_FIFO_UR },
+ { "fifo_overflow", XM_RXE_FIFO_OV, GM_RXE_FIFO_OV },
+
+ { "rx_toolong", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR },
+ { "rx_jabber", XM_RXF_JAB_PKT, GM_RXF_JAB_PKT },
+ { "rx_runt", XM_RXE_RUNT, GM_RXE_FRAG },
+ { "rx_too_long", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR },
+ { "rx_fcs_error", XM_RXF_FCS_ERR, GM_RXF_FCS_ERR },
+};
+
+static int skge_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(skge_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void skge_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct skge_port *skge = netdev_priv(dev);
+
+ if (is_genesis(skge->hw))
+ genesis_get_stats(skge, data);
+ else
+ yukon_get_stats(skge, data);
+}
+
+/* Use hardware MIB variables for critical path statistics and
+ * transmit feedback not reported at interrupt.
+ * Other errors are accounted for in interrupt handler.
+ */
+static struct net_device_stats *skge_get_stats(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ u64 data[ARRAY_SIZE(skge_stats)];
+
+ if (is_genesis(skge->hw))
+ genesis_get_stats(skge, data);
+ else
+ yukon_get_stats(skge, data);
+
+ dev->stats.tx_bytes = data[0];
+ dev->stats.rx_bytes = data[1];
+ dev->stats.tx_packets = data[2] + data[4] + data[6];
+ dev->stats.rx_packets = data[3] + data[5] + data[7];
+ dev->stats.multicast = data[3] + data[5];
+ dev->stats.collisions = data[10];
+ dev->stats.tx_aborted_errors = data[12];
+
+ return &dev->stats;
+}
+
+static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(skge_stats); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ skge_stats[i].name, ETH_GSTRING_LEN);
+ break;
+ }
+}
+
+static void skge_get_ring_param(struct net_device *dev,
+ struct ethtool_ringparam *p)
+{
+ struct skge_port *skge = netdev_priv(dev);
+
+ p->rx_max_pending = MAX_RX_RING_SIZE;
+ p->tx_max_pending = MAX_TX_RING_SIZE;
+ p->rx_mini_max_pending = 0;
+ p->rx_jumbo_max_pending = 0;
+
+ p->rx_pending = skge->rx_ring.count;
+ p->tx_pending = skge->tx_ring.count;
+ p->rx_mini_pending = 0;
+ p->rx_jumbo_pending = 0;
+}
+
+static int skge_set_ring_param(struct net_device *dev,
+ struct ethtool_ringparam *p)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ int err = 0;
+
+ if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
+ p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE)
+ return -EINVAL;
+
+ skge->rx_ring.count = p->rx_pending;
+ skge->tx_ring.count = p->tx_pending;
+
+ if (netif_running(dev)) {
+ skge_down(dev);
+ err = skge_up(dev);
+ if (err)
+ dev_close(dev);
+ }
+
+ return err;
+}
+
+static u32 skge_get_msglevel(struct net_device *netdev)
+{
+ struct skge_port *skge = netdev_priv(netdev);
+ return skge->msg_enable;
+}
+
+static void skge_set_msglevel(struct net_device *netdev, u32 value)
+{
+ struct skge_port *skge = netdev_priv(netdev);
+ skge->msg_enable = value;
+}
+
+static int skge_nway_reset(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+
+ if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev))
+ return -EINVAL;
+
+ skge_phy_reset(skge);
+ return 0;
+}
+
+static void skge_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *ecmd)
+{
+ struct skge_port *skge = netdev_priv(dev);
+
+ ecmd->rx_pause = ((skge->flow_control == FLOW_MODE_SYMMETRIC) ||
+ (skge->flow_control == FLOW_MODE_SYM_OR_REM));
+ ecmd->tx_pause = (ecmd->rx_pause ||
+ (skge->flow_control == FLOW_MODE_LOC_SEND));
+
+ ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause;
+}
+
+static int skge_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *ecmd)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct ethtool_pauseparam old;
+ int err = 0;
+
+ skge_get_pauseparam(dev, &old);
+
+ if (ecmd->autoneg != old.autoneg)
+ skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC;
+ else {
+ if (ecmd->rx_pause && ecmd->tx_pause)
+ skge->flow_control = FLOW_MODE_SYMMETRIC;
+ else if (ecmd->rx_pause && !ecmd->tx_pause)
+ skge->flow_control = FLOW_MODE_SYM_OR_REM;
+ else if (!ecmd->rx_pause && ecmd->tx_pause)
+ skge->flow_control = FLOW_MODE_LOC_SEND;
+ else
+ skge->flow_control = FLOW_MODE_NONE;
+ }
+
+ if (netif_running(dev)) {
+ skge_down(dev);
+ err = skge_up(dev);
+ if (err) {
+ dev_close(dev);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/* Chip internal frequency for clock calculations */
+static inline u32 hwkhz(const struct skge_hw *hw)
+{
+ return is_genesis(hw) ? 53125 : 78125;
+}
+
+/* Chip HZ to microseconds */
+static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks)
+{
+ return (ticks * 1000) / hwkhz(hw);
+}
+
+/* Microseconds to chip HZ */
+static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec)
+{
+ return hwkhz(hw) * usec / 1000;
+}
+
+static int skge_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+
+ ecmd->rx_coalesce_usecs = 0;
+ ecmd->tx_coalesce_usecs = 0;
+
+ if (skge_read32(hw, B2_IRQM_CTRL) & TIM_START) {
+ u32 delay = skge_clk2usec(hw, skge_read32(hw, B2_IRQM_INI));
+ u32 msk = skge_read32(hw, B2_IRQM_MSK);
+
+ if (msk & rxirqmask[port])
+ ecmd->rx_coalesce_usecs = delay;
+ if (msk & txirqmask[port])
+ ecmd->tx_coalesce_usecs = delay;
+ }
+
+ return 0;
+}
+
+/* Note: interrupt timer is per board, but can turn on/off per port */
+static int skge_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ u32 msk = skge_read32(hw, B2_IRQM_MSK);
+ u32 delay = 25;
+
+ if (ecmd->rx_coalesce_usecs == 0)
+ msk &= ~rxirqmask[port];
+ else if (ecmd->rx_coalesce_usecs < 25 ||
+ ecmd->rx_coalesce_usecs > 33333)
+ return -EINVAL;
+ else {
+ msk |= rxirqmask[port];
+ delay = ecmd->rx_coalesce_usecs;
+ }
+
+ if (ecmd->tx_coalesce_usecs == 0)
+ msk &= ~txirqmask[port];
+ else if (ecmd->tx_coalesce_usecs < 25 ||
+ ecmd->tx_coalesce_usecs > 33333)
+ return -EINVAL;
+ else {
+ msk |= txirqmask[port];
+ delay = min(delay, ecmd->rx_coalesce_usecs);
+ }
+
+ skge_write32(hw, B2_IRQM_MSK, msk);
+ if (msk == 0)
+ skge_write32(hw, B2_IRQM_CTRL, TIM_STOP);
+ else {
+ skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, delay));
+ skge_write32(hw, B2_IRQM_CTRL, TIM_START);
+ }
+ return 0;
+}
+
+enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST };
+static void skge_led(struct skge_port *skge, enum led_mode mode)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+
+ spin_lock_bh(&hw->phy_lock);
+ if (is_genesis(hw)) {
+ switch (mode) {
+ case LED_MODE_OFF:
+ if (hw->phy_type == SK_PHY_BCOM)
+ xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF);
+ else {
+ skge_write32(hw, SK_REG(port, TX_LED_VAL), 0);
+ skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_T_OFF);
+ }
+ skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
+ skge_write32(hw, SK_REG(port, RX_LED_VAL), 0);
+ skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF);
+ break;
+
+ case LED_MODE_ON:
+ skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON);
+ skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON);
+
+ skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
+ skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
+
+ break;
+
+ case LED_MODE_TST:
+ skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON);
+ skge_write32(hw, SK_REG(port, RX_LED_VAL), 100);
+ skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
+
+ if (hw->phy_type == SK_PHY_BCOM)
+ xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON);
+ else {
+ skge_write8(hw, SK_REG(port, TX_LED_TST), LED_T_ON);
+ skge_write32(hw, SK_REG(port, TX_LED_VAL), 100);
+ skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
+ }
+
+ }
+ } else {
+ switch (mode) {
+ case LED_MODE_OFF:
+ gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
+ gm_phy_write(hw, port, PHY_MARV_LED_OVER,
+ PHY_M_LED_MO_DUP(MO_LED_OFF) |
+ PHY_M_LED_MO_10(MO_LED_OFF) |
+ PHY_M_LED_MO_100(MO_LED_OFF) |
+ PHY_M_LED_MO_1000(MO_LED_OFF) |
+ PHY_M_LED_MO_RX(MO_LED_OFF));
+ break;
+ case LED_MODE_ON:
+ gm_phy_write(hw, port, PHY_MARV_LED_CTRL,
+ PHY_M_LED_PULS_DUR(PULS_170MS) |
+ PHY_M_LED_BLINK_RT(BLINK_84MS) |
+ PHY_M_LEDC_TX_CTRL |
+ PHY_M_LEDC_DP_CTRL);
+
+ gm_phy_write(hw, port, PHY_MARV_LED_OVER,
+ PHY_M_LED_MO_RX(MO_LED_OFF) |
+ (skge->speed == SPEED_100 ?
+ PHY_M_LED_MO_100(MO_LED_ON) : 0));
+ break;
+ case LED_MODE_TST:
+ gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
+ gm_phy_write(hw, port, PHY_MARV_LED_OVER,
+ PHY_M_LED_MO_DUP(MO_LED_ON) |
+ PHY_M_LED_MO_10(MO_LED_ON) |
+ PHY_M_LED_MO_100(MO_LED_ON) |
+ PHY_M_LED_MO_1000(MO_LED_ON) |
+ PHY_M_LED_MO_RX(MO_LED_ON));
+ }
+ }
+ spin_unlock_bh(&hw->phy_lock);
+}
+
+/* blink LED's for finding board */
+static int skge_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct skge_port *skge = netdev_priv(dev);
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 2; /* cycle on/off twice per second */
+
+ case ETHTOOL_ID_ON:
+ skge_led(skge, LED_MODE_TST);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ skge_led(skge, LED_MODE_OFF);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ /* back to regular LED state */
+ skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF);
+ }
+
+ return 0;
+}
+
+static int skge_get_eeprom_len(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ u32 reg2;
+
+ pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, &reg2);
+ return 1 << (((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
+}
+
+static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset)
+{
+ u32 val;
+
+ pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset);
+
+ do {
+ pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
+ } while (!(offset & PCI_VPD_ADDR_F));
+
+ pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val);
+ return val;
+}
+
+static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val)
+{
+ pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val);
+ pci_write_config_word(pdev, cap + PCI_VPD_ADDR,
+ offset | PCI_VPD_ADDR_F);
+
+ do {
+ pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
+ } while (offset & PCI_VPD_ADDR_F);
+}
+
+static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct pci_dev *pdev = skge->hw->pdev;
+ int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
+ int length = eeprom->len;
+ u16 offset = eeprom->offset;
+
+ if (!cap)
+ return -EINVAL;
+
+ eeprom->magic = SKGE_EEPROM_MAGIC;
+
+ while (length > 0) {
+ u32 val = skge_vpd_read(pdev, cap, offset);
+ int n = min_t(int, length, sizeof(val));
+
+ memcpy(data, &val, n);
+ length -= n;
+ data += n;
+ offset += n;
+ }
+ return 0;
+}
+
+static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct pci_dev *pdev = skge->hw->pdev;
+ int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
+ int length = eeprom->len;
+ u16 offset = eeprom->offset;
+
+ if (!cap)
+ return -EINVAL;
+
+ if (eeprom->magic != SKGE_EEPROM_MAGIC)
+ return -EINVAL;
+
+ while (length > 0) {
+ u32 val;
+ int n = min_t(int, length, sizeof(val));
+
+ if (n < sizeof(val))
+ val = skge_vpd_read(pdev, cap, offset);
+ memcpy(&val, data, n);
+
+ skge_vpd_write(pdev, cap, offset, val);
+
+ length -= n;
+ data += n;
+ offset += n;
+ }
+ return 0;
+}
+
+static const struct ethtool_ops skge_ethtool_ops = {
+ .get_settings = skge_get_settings,
+ .set_settings = skge_set_settings,
+ .get_drvinfo = skge_get_drvinfo,
+ .get_regs_len = skge_get_regs_len,
+ .get_regs = skge_get_regs,
+ .get_wol = skge_get_wol,
+ .set_wol = skge_set_wol,
+ .get_msglevel = skge_get_msglevel,
+ .set_msglevel = skge_set_msglevel,
+ .nway_reset = skge_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = skge_get_eeprom_len,
+ .get_eeprom = skge_get_eeprom,
+ .set_eeprom = skge_set_eeprom,
+ .get_ringparam = skge_get_ring_param,
+ .set_ringparam = skge_set_ring_param,
+ .get_pauseparam = skge_get_pauseparam,
+ .set_pauseparam = skge_set_pauseparam,
+ .get_coalesce = skge_get_coalesce,
+ .set_coalesce = skge_set_coalesce,
+ .get_strings = skge_get_strings,
+ .set_phys_id = skge_set_phys_id,
+ .get_sset_count = skge_get_sset_count,
+ .get_ethtool_stats = skge_get_ethtool_stats,
+};
+
+/*
+ * Allocate ring elements and chain them together
+ * One-to-one association of board descriptors with ring elements
+ */
+static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
+{
+ struct skge_tx_desc *d;
+ struct skge_element *e;
+ int i;
+
+ ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL);
+ if (!ring->start)
+ return -ENOMEM;
+
+ for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
+ e->desc = d;
+ if (i == ring->count - 1) {
+ e->next = ring->start;
+ d->next_offset = base;
+ } else {
+ e->next = e + 1;
+ d->next_offset = base + (i+1) * sizeof(*d);
+ }
+ }
+ ring->to_use = ring->to_clean = ring->start;
+
+ return 0;
+}
+
+/* Allocate and setup a new buffer for receiving */
+static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
+ struct sk_buff *skb, unsigned int bufsize)
+{
+ struct skge_rx_desc *rd = e->desc;
+ u64 map;
+
+ map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
+ PCI_DMA_FROMDEVICE);
+
+ rd->dma_lo = map;
+ rd->dma_hi = map >> 32;
+ e->skb = skb;
+ rd->csum1_start = ETH_HLEN;
+ rd->csum2_start = ETH_HLEN;
+ rd->csum1 = 0;
+ rd->csum2 = 0;
+
+ wmb();
+
+ rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
+ dma_unmap_addr_set(e, mapaddr, map);
+ dma_unmap_len_set(e, maplen, bufsize);
+}
+
+/* Resume receiving using existing skb,
+ * Note: DMA address is not changed by chip.
+ * MTU not changed while receiver active.
+ */
+static inline void skge_rx_reuse(struct skge_element *e, unsigned int size)
+{
+ struct skge_rx_desc *rd = e->desc;
+
+ rd->csum2 = 0;
+ rd->csum2_start = ETH_HLEN;
+
+ wmb();
+
+ rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size;
+}
+
+
+/* Free all buffers in receive ring, assumes receiver stopped */
+static void skge_rx_clean(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ struct skge_ring *ring = &skge->rx_ring;
+ struct skge_element *e;
+
+ e = ring->start;
+ do {
+ struct skge_rx_desc *rd = e->desc;
+ rd->control = 0;
+ if (e->skb) {
+ pci_unmap_single(hw->pdev,
+ dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(e->skb);
+ e->skb = NULL;
+ }
+ } while ((e = e->next) != ring->start);
+}
+
+
+/* Allocate buffers for receive ring
+ * For receive: to_clean is next received frame.
+ */
+static int skge_rx_fill(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_ring *ring = &skge->rx_ring;
+ struct skge_element *e;
+
+ e = ring->start;
+ do {
+ struct sk_buff *skb;
+
+ skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN,
+ GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ skge_rx_setup(skge, e, skb, skge->rx_buf_size);
+ } while ((e = e->next) != ring->start);
+
+ ring->to_clean = ring->start;
+ return 0;
+}
+
+static const char *skge_pause(enum pause_status status)
+{
+ switch (status) {
+ case FLOW_STAT_NONE:
+ return "none";
+ case FLOW_STAT_REM_SEND:
+ return "rx only";
+ case FLOW_STAT_LOC_SEND:
+ return "tx_only";
+ case FLOW_STAT_SYMMETRIC: /* Both station may send PAUSE */
+ return "both";
+ default:
+ return "indeterminated";
+ }
+}
+
+
+static void skge_link_up(struct skge_port *skge)
+{
+ skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
+ LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
+
+ netif_carrier_on(skge->netdev);
+ netif_wake_queue(skge->netdev);
+
+ netif_info(skge, link, skge->netdev,
+ "Link is up at %d Mbps, %s duplex, flow control %s\n",
+ skge->speed,
+ skge->duplex == DUPLEX_FULL ? "full" : "half",
+ skge_pause(skge->flow_status));
+}
+
+static void skge_link_down(struct skge_port *skge)
+{
+ skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
+ netif_carrier_off(skge->netdev);
+ netif_stop_queue(skge->netdev);
+
+ netif_info(skge, link, skge->netdev, "Link is down\n");
+}
+
+static void xm_link_down(struct skge_hw *hw, int port)
+{
+ struct net_device *dev = hw->dev[port];
+ struct skge_port *skge = netdev_priv(dev);
+
+ xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE);
+
+ if (netif_carrier_ok(dev))
+ skge_link_down(skge);
+}
+
+static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
+{
+ int i;
+
+ xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
+ *val = xm_read16(hw, port, XM_PHY_DATA);
+
+ if (hw->phy_type == SK_PHY_XMAC)
+ goto ready;
+
+ for (i = 0; i < PHY_RETRIES; i++) {
+ if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY)
+ goto ready;
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+ ready:
+ *val = xm_read16(hw, port, XM_PHY_DATA);
+
+ return 0;
+}
+
+static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
+{
+ u16 v = 0;
+ if (__xm_phy_read(hw, port, reg, &v))
+ pr_warning("%s: phy read timed out\n", hw->dev[port]->name);
+ return v;
+}
+
+static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
+{
+ int i;
+
+ xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
+ for (i = 0; i < PHY_RETRIES; i++) {
+ if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
+ goto ready;
+ udelay(1);
+ }
+ return -EIO;
+
+ ready:
+ xm_write16(hw, port, XM_PHY_DATA, val);
+ for (i = 0; i < PHY_RETRIES; i++) {
+ if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static void genesis_init(struct skge_hw *hw)
+{
+ /* set blink source counter */
+ skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100);
+ skge_write8(hw, B2_BSC_CTRL, BSC_START);
+
+ /* configure mac arbiter */
+ skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
+
+ /* configure mac arbiter timeout values */
+ skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53);
+ skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53);
+ skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53);
+ skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53);
+
+ skge_write8(hw, B3_MA_RCINI_RX1, 0);
+ skge_write8(hw, B3_MA_RCINI_RX2, 0);
+ skge_write8(hw, B3_MA_RCINI_TX1, 0);
+ skge_write8(hw, B3_MA_RCINI_TX2, 0);
+
+ /* configure packet arbiter timeout */
+ skge_write16(hw, B3_PA_CTRL, PA_RST_CLR);
+ skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX);
+ skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX);
+ skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX);
+ skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX);
+}
+
+static void genesis_reset(struct skge_hw *hw, int port)
+{
+ static const u8 zero[8] = { 0 };
+ u32 reg;
+
+ skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
+
+ /* reset the statistics module */
+ xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT);
+ xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE);
+ xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */
+ xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */
+ xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */
+
+ /* disable Broadcom PHY IRQ */
+ if (hw->phy_type == SK_PHY_BCOM)
+ xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff);
+
+ xm_outhash(hw, port, XM_HSM, zero);
+
+ /* Flush TX and RX fifo */
+ reg = xm_read32(hw, port, XM_MODE);
+ xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF);
+ xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF);
+}
+
+/* Convert mode to MII values */
+static const u16 phy_pause_map[] = {
+ [FLOW_MODE_NONE] = 0,
+ [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM,
+ [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP,
+ [FLOW_MODE_SYM_OR_REM] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM,
+};
+
+/* special defines for FIBER (88E1011S only) */
+static const u16 fiber_pause_map[] = {
+ [FLOW_MODE_NONE] = PHY_X_P_NO_PAUSE,
+ [FLOW_MODE_LOC_SEND] = PHY_X_P_ASYM_MD,
+ [FLOW_MODE_SYMMETRIC] = PHY_X_P_SYM_MD,
+ [FLOW_MODE_SYM_OR_REM] = PHY_X_P_BOTH_MD,
+};
+
+
+/* Check status of Broadcom phy link */
+static void bcom_check_link(struct skge_hw *hw, int port)
+{
+ struct net_device *dev = hw->dev[port];
+ struct skge_port *skge = netdev_priv(dev);
+ u16 status;
+
+ /* read twice because of latch */
+ xm_phy_read(hw, port, PHY_BCOM_STAT);
+ status = xm_phy_read(hw, port, PHY_BCOM_STAT);
+
+ if ((status & PHY_ST_LSYNC) == 0) {
+ xm_link_down(hw, port);
+ return;
+ }
+
+ if (skge->autoneg == AUTONEG_ENABLE) {
+ u16 lpa, aux;
+
+ if (!(status & PHY_ST_AN_OVER))
+ return;
+
+ lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
+ if (lpa & PHY_B_AN_RF) {
+ netdev_notice(dev, "remote fault\n");
+ return;
+ }
+
+ aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT);
+
+ /* Check Duplex mismatch */
+ switch (aux & PHY_B_AS_AN_RES_MSK) {
+ case PHY_B_RES_1000FD:
+ skge->duplex = DUPLEX_FULL;
+ break;
+ case PHY_B_RES_1000HD:
+ skge->duplex = DUPLEX_HALF;
+ break;
+ default:
+ netdev_notice(dev, "duplex mismatch\n");
+ return;
+ }
+
+ /* We are using IEEE 802.3z/D5.0 Table 37-4 */
+ switch (aux & PHY_B_AS_PAUSE_MSK) {
+ case PHY_B_AS_PAUSE_MSK:
+ skge->flow_status = FLOW_STAT_SYMMETRIC;
+ break;
+ case PHY_B_AS_PRR:
+ skge->flow_status = FLOW_STAT_REM_SEND;
+ break;
+ case PHY_B_AS_PRT:
+ skge->flow_status = FLOW_STAT_LOC_SEND;
+ break;
+ default:
+ skge->flow_status = FLOW_STAT_NONE;
+ }
+ skge->speed = SPEED_1000;
+ }
+
+ if (!netif_carrier_ok(dev))
+ genesis_link_up(skge);
+}
+
+/* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional
+ * Phy on for 100 or 10Mbit operation
+ */
+static void bcom_phy_init(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ int i;
+ u16 id1, r, ext, ctl;
+
+ /* magic workaround patterns for Broadcom */
+ static const struct {
+ u16 reg;
+ u16 val;
+ } A1hack[] = {
+ { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 },
+ { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 },
+ { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 },
+ { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
+ }, C0hack[] = {
+ { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 },
+ { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
+ };
+
+ /* read Id from external PHY (all have the same address) */
+ id1 = xm_phy_read(hw, port, PHY_XMAC_ID1);
+
+ /* Optimize MDIO transfer by suppressing preamble. */
+ r = xm_read16(hw, port, XM_MMU_CMD);
+ r |= XM_MMU_NO_PRE;
+ xm_write16(hw, port, XM_MMU_CMD, r);
+
+ switch (id1) {
+ case PHY_BCOM_ID1_C0:
+ /*
+ * Workaround BCOM Errata for the C0 type.
+ * Write magic patterns to reserved registers.
+ */
+ for (i = 0; i < ARRAY_SIZE(C0hack); i++)
+ xm_phy_write(hw, port,
+ C0hack[i].reg, C0hack[i].val);
+
+ break;
+ case PHY_BCOM_ID1_A1:
+ /*
+ * Workaround BCOM Errata for the A1 type.
+ * Write magic patterns to reserved registers.
+ */
+ for (i = 0; i < ARRAY_SIZE(A1hack); i++)
+ xm_phy_write(hw, port,
+ A1hack[i].reg, A1hack[i].val);
+ break;
+ }
+
+ /*
+ * Workaround BCOM Errata (#10523) for all BCom PHYs.
+ * Disable Power Management after reset.
+ */
+ r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL);
+ r |= PHY_B_AC_DIS_PM;
+ xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r);
+
+ /* Dummy read */
+ xm_read16(hw, port, XM_ISRC);
+
+ ext = PHY_B_PEC_EN_LTR; /* enable tx led */
+ ctl = PHY_CT_SP1000; /* always 1000mbit */
+
+ if (skge->autoneg == AUTONEG_ENABLE) {
+ /*
+ * Workaround BCOM Errata #1 for the C5 type.
+ * 1000Base-T Link Acquisition Failure in Slave Mode
+ * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
+ */
+ u16 adv = PHY_B_1000C_RD;
+ if (skge->advertising & ADVERTISED_1000baseT_Half)
+ adv |= PHY_B_1000C_AHD;
+ if (skge->advertising & ADVERTISED_1000baseT_Full)
+ adv |= PHY_B_1000C_AFD;
+ xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv);
+
+ ctl |= PHY_CT_ANE | PHY_CT_RE_CFG;
+ } else {
+ if (skge->duplex == DUPLEX_FULL)
+ ctl |= PHY_CT_DUP_MD;
+ /* Force to slave */
+ xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE);
+ }
+
+ /* Set autonegotiation pause parameters */
+ xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV,
+ phy_pause_map[skge->flow_control] | PHY_AN_CSMA);
+
+ /* Handle Jumbo frames */
+ if (hw->dev[port]->mtu > ETH_DATA_LEN) {
+ xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
+ PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK);
+
+ ext |= PHY_B_PEC_HIGH_LA;
+
+ }
+
+ xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext);
+ xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl);
+
+ /* Use link status change interrupt */
+ xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
+}
+
+static void xm_phy_init(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ u16 ctrl = 0;
+
+ if (skge->autoneg == AUTONEG_ENABLE) {
+ if (skge->advertising & ADVERTISED_1000baseT_Half)
+ ctrl |= PHY_X_AN_HD;
+ if (skge->advertising & ADVERTISED_1000baseT_Full)
+ ctrl |= PHY_X_AN_FD;
+
+ ctrl |= fiber_pause_map[skge->flow_control];
+
+ xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl);
+
+ /* Restart Auto-negotiation */
+ ctrl = PHY_CT_ANE | PHY_CT_RE_CFG;
+ } else {
+ /* Set DuplexMode in Config register */
+ if (skge->duplex == DUPLEX_FULL)
+ ctrl |= PHY_CT_DUP_MD;
+ /*
+ * Do NOT enable Auto-negotiation here. This would hold
+ * the link down because no IDLEs are transmitted
+ */
+ }
+
+ xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl);
+
+ /* Poll PHY for status changes */
+ mod_timer(&skge->link_timer, jiffies + LINK_HZ);
+}
+
+static int xm_check_link(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ u16 status;
+
+ /* read twice because of latch */
+ xm_phy_read(hw, port, PHY_XMAC_STAT);
+ status = xm_phy_read(hw, port, PHY_XMAC_STAT);
+
+ if ((status & PHY_ST_LSYNC) == 0) {
+ xm_link_down(hw, port);
+ return 0;
+ }
+
+ if (skge->autoneg == AUTONEG_ENABLE) {
+ u16 lpa, res;
+
+ if (!(status & PHY_ST_AN_OVER))
+ return 0;
+
+ lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
+ if (lpa & PHY_B_AN_RF) {
+ netdev_notice(dev, "remote fault\n");
+ return 0;
+ }
+
+ res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI);
+
+ /* Check Duplex mismatch */
+ switch (res & (PHY_X_RS_HD | PHY_X_RS_FD)) {
+ case PHY_X_RS_FD:
+ skge->duplex = DUPLEX_FULL;
+ break;
+ case PHY_X_RS_HD:
+ skge->duplex = DUPLEX_HALF;
+ break;
+ default:
+ netdev_notice(dev, "duplex mismatch\n");
+ return 0;
+ }
+
+ /* We are using IEEE 802.3z/D5.0 Table 37-4 */
+ if ((skge->flow_control == FLOW_MODE_SYMMETRIC ||
+ skge->flow_control == FLOW_MODE_SYM_OR_REM) &&
+ (lpa & PHY_X_P_SYM_MD))
+ skge->flow_status = FLOW_STAT_SYMMETRIC;
+ else if (skge->flow_control == FLOW_MODE_SYM_OR_REM &&
+ (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD)
+ /* Enable PAUSE receive, disable PAUSE transmit */
+ skge->flow_status = FLOW_STAT_REM_SEND;
+ else if (skge->flow_control == FLOW_MODE_LOC_SEND &&
+ (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD)
+ /* Disable PAUSE receive, enable PAUSE transmit */
+ skge->flow_status = FLOW_STAT_LOC_SEND;
+ else
+ skge->flow_status = FLOW_STAT_NONE;
+
+ skge->speed = SPEED_1000;
+ }
+
+ if (!netif_carrier_ok(dev))
+ genesis_link_up(skge);
+ return 1;
+}
+
+/* Poll to check for link coming up.
+ *
+ * Since internal PHY is wired to a level triggered pin, can't
+ * get an interrupt when carrier is detected, need to poll for
+ * link coming up.
+ */
+static void xm_link_timer(unsigned long arg)
+{
+ struct skge_port *skge = (struct skge_port *) arg;
+ struct net_device *dev = skge->netdev;
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ int i;
+ unsigned long flags;
+
+ if (!netif_running(dev))
+ return;
+
+ spin_lock_irqsave(&hw->phy_lock, flags);
+
+ /*
+ * Verify that the link by checking GPIO register three times.
+ * This pin has the signal from the link_sync pin connected to it.
+ */
+ for (i = 0; i < 3; i++) {
+ if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS)
+ goto link_down;
+ }
+
+ /* Re-enable interrupt to detect link down */
+ if (xm_check_link(dev)) {
+ u16 msk = xm_read16(hw, port, XM_IMSK);
+ msk &= ~XM_IS_INP_ASS;
+ xm_write16(hw, port, XM_IMSK, msk);
+ xm_read16(hw, port, XM_ISRC);
+ } else {
+link_down:
+ mod_timer(&skge->link_timer,
+ round_jiffies(jiffies + LINK_HZ));
+ }
+ spin_unlock_irqrestore(&hw->phy_lock, flags);
+}
+
+static void genesis_mac_init(struct skge_hw *hw, int port)
+{
+ struct net_device *dev = hw->dev[port];
+ struct skge_port *skge = netdev_priv(dev);
+ int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN;
+ int i;
+ u32 r;
+ static const u8 zero[6] = { 0 };
+
+ for (i = 0; i < 10; i++) {
+ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
+ MFF_SET_MAC_RST);
+ if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)
+ goto reset_ok;
+ udelay(1);
+ }
+
+ netdev_warn(dev, "genesis reset failed\n");
+
+ reset_ok:
+ /* Unreset the XMAC. */
+ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
+
+ /*
+ * Perform additional initialization for external PHYs,
+ * namely for the 1000baseTX cards that use the XMAC's
+ * GMII mode.
+ */
+ if (hw->phy_type != SK_PHY_XMAC) {
+ /* Take external Phy out of reset */
+ r = skge_read32(hw, B2_GP_IO);
+ if (port == 0)
+ r |= GP_DIR_0|GP_IO_0;
+ else
+ r |= GP_DIR_2|GP_IO_2;
+
+ skge_write32(hw, B2_GP_IO, r);
+
+ /* Enable GMII interface */
+ xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
+ }
+
+
+ switch (hw->phy_type) {
+ case SK_PHY_XMAC:
+ xm_phy_init(skge);
+ break;
+ case SK_PHY_BCOM:
+ bcom_phy_init(skge);
+ bcom_check_link(hw, port);
+ }
+
+ /* Set Station Address */
+ xm_outaddr(hw, port, XM_SA, dev->dev_addr);
+
+ /* We don't use match addresses so clear */
+ for (i = 1; i < 16; i++)
+ xm_outaddr(hw, port, XM_EXM(i), zero);
+
+ /* Clear MIB counters */
+ xm_write16(hw, port, XM_STAT_CMD,
+ XM_SC_CLR_RXC | XM_SC_CLR_TXC);
+ /* Clear two times according to Errata #3 */
+ xm_write16(hw, port, XM_STAT_CMD,
+ XM_SC_CLR_RXC | XM_SC_CLR_TXC);
+
+ /* configure Rx High Water Mark (XM_RX_HI_WM) */
+ xm_write16(hw, port, XM_RX_HI_WM, 1450);
+
+ /* We don't need the FCS appended to the packet. */
+ r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS;
+ if (jumbo)
+ r |= XM_RX_BIG_PK_OK;
+
+ if (skge->duplex == DUPLEX_HALF) {
+ /*
+ * If in manual half duplex mode the other side might be in
+ * full duplex mode, so ignore if a carrier extension is not seen
+ * on frames received
+ */
+ r |= XM_RX_DIS_CEXT;
+ }
+ xm_write16(hw, port, XM_RX_CMD, r);
+
+ /* We want short frames padded to 60 bytes. */
+ xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD);
+
+ /* Increase threshold for jumbo frames on dual port */
+ if (hw->ports > 1 && jumbo)
+ xm_write16(hw, port, XM_TX_THR, 1020);
+ else
+ xm_write16(hw, port, XM_TX_THR, 512);
+
+ /*
+ * Enable the reception of all error frames. This is is
+ * a necessary evil due to the design of the XMAC. The
+ * XMAC's receive FIFO is only 8K in size, however jumbo
+ * frames can be up to 9000 bytes in length. When bad
+ * frame filtering is enabled, the XMAC's RX FIFO operates
+ * in 'store and forward' mode. For this to work, the
+ * entire frame has to fit into the FIFO, but that means
+ * that jumbo frames larger than 8192 bytes will be
+ * truncated. Disabling all bad frame filtering causes
+ * the RX FIFO to operate in streaming mode, in which
+ * case the XMAC will start transferring frames out of the
+ * RX FIFO as soon as the FIFO threshold is reached.
+ */
+ xm_write32(hw, port, XM_MODE, XM_DEF_MODE);
+
+
+ /*
+ * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK)
+ * - Enable all bits excepting 'Octets Rx OK Low CntOv'
+ * and 'Octets Rx OK Hi Cnt Ov'.
+ */
+ xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK);
+
+ /*
+ * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK)
+ * - Enable all bits excepting 'Octets Tx OK Low CntOv'
+ * and 'Octets Tx OK Hi Cnt Ov'.
+ */
+ xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK);
+
+ /* Configure MAC arbiter */
+ skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
+
+ /* configure timeout values */
+ skge_write8(hw, B3_MA_TOINI_RX1, 72);
+ skge_write8(hw, B3_MA_TOINI_RX2, 72);
+ skge_write8(hw, B3_MA_TOINI_TX1, 72);
+ skge_write8(hw, B3_MA_TOINI_TX2, 72);
+
+ skge_write8(hw, B3_MA_RCINI_RX1, 0);
+ skge_write8(hw, B3_MA_RCINI_RX2, 0);
+ skge_write8(hw, B3_MA_RCINI_TX1, 0);
+ skge_write8(hw, B3_MA_RCINI_TX2, 0);
+
+ /* Configure Rx MAC FIFO */
+ skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR);
+ skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT);
+ skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD);
+
+ /* Configure Tx MAC FIFO */
+ skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR);
+ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF);
+ skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD);
+
+ if (jumbo) {
+ /* Enable frame flushing if jumbo frames used */
+ skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_FLUSH);
+ } else {
+ /* enable timeout timers if normal frames */
+ skge_write16(hw, B3_PA_CTRL,
+ (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2);
+ }
+}
+
+static void genesis_stop(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ unsigned retries = 1000;
+ u16 cmd;
+
+ /* Disable Tx and Rx */
+ cmd = xm_read16(hw, port, XM_MMU_CMD);
+ cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
+ xm_write16(hw, port, XM_MMU_CMD, cmd);
+
+ genesis_reset(hw, port);
+
+ /* Clear Tx packet arbiter timeout IRQ */
+ skge_write16(hw, B3_PA_CTRL,
+ port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2);
+
+ /* Reset the MAC */
+ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
+ do {
+ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST);
+ if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST))
+ break;
+ } while (--retries > 0);
+
+ /* For external PHYs there must be special handling */
+ if (hw->phy_type != SK_PHY_XMAC) {
+ u32 reg = skge_read32(hw, B2_GP_IO);
+ if (port == 0) {
+ reg |= GP_DIR_0;
+ reg &= ~GP_IO_0;
+ } else {
+ reg |= GP_DIR_2;
+ reg &= ~GP_IO_2;
+ }
+ skge_write32(hw, B2_GP_IO, reg);
+ skge_read32(hw, B2_GP_IO);
+ }
+
+ xm_write16(hw, port, XM_MMU_CMD,
+ xm_read16(hw, port, XM_MMU_CMD)
+ & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
+
+ xm_read16(hw, port, XM_MMU_CMD);
+}
+
+
+static void genesis_get_stats(struct skge_port *skge, u64 *data)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ int i;
+ unsigned long timeout = jiffies + HZ;
+
+ xm_write16(hw, port,
+ XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC);
+
+ /* wait for update to complete */
+ while (xm_read16(hw, port, XM_STAT_CMD)
+ & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) {
+ if (time_after(jiffies, timeout))
+ break;
+ udelay(10);
+ }
+
+ /* special case for 64 bit octet counter */
+ data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32
+ | xm_read32(hw, port, XM_TXO_OK_LO);
+ data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32
+ | xm_read32(hw, port, XM_RXO_OK_LO);
+
+ for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
+ data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset);
+}
+
+static void genesis_mac_intr(struct skge_hw *hw, int port)
+{
+ struct net_device *dev = hw->dev[port];
+ struct skge_port *skge = netdev_priv(dev);
+ u16 status = xm_read16(hw, port, XM_ISRC);
+
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "mac interrupt status 0x%x\n", status);
+
+ if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) {
+ xm_link_down(hw, port);
+ mod_timer(&skge->link_timer, jiffies + 1);
+ }
+
+ if (status & XM_IS_TXF_UR) {
+ xm_write32(hw, port, XM_MODE, XM_MD_FTF);
+ ++dev->stats.tx_fifo_errors;
+ }
+}
+
+static void genesis_link_up(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ u16 cmd, msk;
+ u32 mode;
+
+ cmd = xm_read16(hw, port, XM_MMU_CMD);
+
+ /*
+ * enabling pause frame reception is required for 1000BT
+ * because the XMAC is not reset if the link is going down
+ */
+ if (skge->flow_status == FLOW_STAT_NONE ||
+ skge->flow_status == FLOW_STAT_LOC_SEND)
+ /* Disable Pause Frame Reception */
+ cmd |= XM_MMU_IGN_PF;
+ else
+ /* Enable Pause Frame Reception */
+ cmd &= ~XM_MMU_IGN_PF;
+
+ xm_write16(hw, port, XM_MMU_CMD, cmd);
+
+ mode = xm_read32(hw, port, XM_MODE);
+ if (skge->flow_status == FLOW_STAT_SYMMETRIC ||
+ skge->flow_status == FLOW_STAT_LOC_SEND) {
+ /*
+ * Configure Pause Frame Generation
+ * Use internal and external Pause Frame Generation.
+ * Sending pause frames is edge triggered.
+ * Send a Pause frame with the maximum pause time if
+ * internal oder external FIFO full condition occurs.
+ * Send a zero pause time frame to re-start transmission.
+ */
+ /* XM_PAUSE_DA = '010000C28001' (default) */
+ /* XM_MAC_PTIME = 0xffff (maximum) */
+ /* remember this value is defined in big endian (!) */
+ xm_write16(hw, port, XM_MAC_PTIME, 0xffff);
+
+ mode |= XM_PAUSE_MODE;
+ skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE);
+ } else {
+ /*
+ * disable pause frame generation is required for 1000BT
+ * because the XMAC is not reset if the link is going down
+ */
+ /* Disable Pause Mode in Mode Register */
+ mode &= ~XM_PAUSE_MODE;
+
+ skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE);
+ }
+
+ xm_write32(hw, port, XM_MODE, mode);
+
+ /* Turn on detection of Tx underrun */
+ msk = xm_read16(hw, port, XM_IMSK);
+ msk &= ~XM_IS_TXF_UR;
+ xm_write16(hw, port, XM_IMSK, msk);
+
+ xm_read16(hw, port, XM_ISRC);
+
+ /* get MMU Command Reg. */
+ cmd = xm_read16(hw, port, XM_MMU_CMD);
+ if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL)
+ cmd |= XM_MMU_GMII_FD;
+
+ /*
+ * Workaround BCOM Errata (#10523) for all BCom Phys
+ * Enable Power Management after link up
+ */
+ if (hw->phy_type == SK_PHY_BCOM) {
+ xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
+ xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL)
+ & ~PHY_B_AC_DIS_PM);
+ xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
+ }
+
+ /* enable Rx/Tx */
+ xm_write16(hw, port, XM_MMU_CMD,
+ cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX);
+ skge_link_up(skge);
+}
+
+
+static inline void bcom_phy_intr(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ u16 isrc;
+
+ isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT);
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "phy interrupt status 0x%x\n", isrc);
+
+ if (isrc & PHY_B_IS_PSE)
+ pr_err("%s: uncorrectable pair swap error\n",
+ hw->dev[port]->name);
+
+ /* Workaround BCom Errata:
+ * enable and disable loopback mode if "NO HCD" occurs.
+ */
+ if (isrc & PHY_B_IS_NO_HDCL) {
+ u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL);
+ xm_phy_write(hw, port, PHY_BCOM_CTRL,
+ ctrl | PHY_CT_LOOP);
+ xm_phy_write(hw, port, PHY_BCOM_CTRL,
+ ctrl & ~PHY_CT_LOOP);
+ }
+
+ if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE))
+ bcom_check_link(hw, port);
+
+}
+
+static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
+{
+ int i;
+
+ gma_write16(hw, port, GM_SMI_DATA, val);
+ gma_write16(hw, port, GM_SMI_CTRL,
+ GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg));
+ for (i = 0; i < PHY_RETRIES; i++) {
+ udelay(1);
+
+ if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
+ return 0;
+ }
+
+ pr_warning("%s: phy write timeout\n", hw->dev[port]->name);
+ return -EIO;
+}
+
+static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
+{
+ int i;
+
+ gma_write16(hw, port, GM_SMI_CTRL,
+ GM_SMI_CT_PHY_AD(hw->phy_addr)
+ | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
+
+ for (i = 0; i < PHY_RETRIES; i++) {
+ udelay(1);
+ if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL)
+ goto ready;
+ }
+
+ return -ETIMEDOUT;
+ ready:
+ *val = gma_read16(hw, port, GM_SMI_DATA);
+ return 0;
+}
+
+static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
+{
+ u16 v = 0;
+ if (__gm_phy_read(hw, port, reg, &v))
+ pr_warning("%s: phy read timeout\n", hw->dev[port]->name);
+ return v;
+}
+
+/* Marvell Phy Initialization */
+static void yukon_init(struct skge_hw *hw, int port)
+{
+ struct skge_port *skge = netdev_priv(hw->dev[port]);
+ u16 ctrl, ct1000, adv;
+
+ if (skge->autoneg == AUTONEG_ENABLE) {
+ u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
+
+ ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
+ PHY_M_EC_MAC_S_MSK);
+ ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
+
+ ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
+
+ gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
+ }
+
+ ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
+ if (skge->autoneg == AUTONEG_DISABLE)
+ ctrl &= ~PHY_CT_ANE;
+
+ ctrl |= PHY_CT_RESET;
+ gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
+
+ ctrl = 0;
+ ct1000 = 0;
+ adv = PHY_AN_CSMA;
+
+ if (skge->autoneg == AUTONEG_ENABLE) {
+ if (hw->copper) {
+ if (skge->advertising & ADVERTISED_1000baseT_Full)
+ ct1000 |= PHY_M_1000C_AFD;
+ if (skge->advertising & ADVERTISED_1000baseT_Half)
+ ct1000 |= PHY_M_1000C_AHD;
+ if (skge->advertising & ADVERTISED_100baseT_Full)
+ adv |= PHY_M_AN_100_FD;
+ if (skge->advertising & ADVERTISED_100baseT_Half)
+ adv |= PHY_M_AN_100_HD;
+ if (skge->advertising & ADVERTISED_10baseT_Full)
+ adv |= PHY_M_AN_10_FD;
+ if (skge->advertising & ADVERTISED_10baseT_Half)
+ adv |= PHY_M_AN_10_HD;
+
+ /* Set Flow-control capabilities */
+ adv |= phy_pause_map[skge->flow_control];
+ } else {
+ if (skge->advertising & ADVERTISED_1000baseT_Full)
+ adv |= PHY_M_AN_1000X_AFD;
+ if (skge->advertising & ADVERTISED_1000baseT_Half)
+ adv |= PHY_M_AN_1000X_AHD;
+
+ adv |= fiber_pause_map[skge->flow_control];
+ }
+
+ /* Restart Auto-negotiation */
+ ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
+ } else {
+ /* forced speed/duplex settings */
+ ct1000 = PHY_M_1000C_MSE;
+
+ if (skge->duplex == DUPLEX_FULL)
+ ctrl |= PHY_CT_DUP_MD;
+
+ switch (skge->speed) {
+ case SPEED_1000:
+ ctrl |= PHY_CT_SP1000;
+ break;
+ case SPEED_100:
+ ctrl |= PHY_CT_SP100;
+ break;
+ }
+
+ ctrl |= PHY_CT_RESET;
+ }
+
+ gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
+
+ gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
+ gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
+
+ /* Enable phy interrupt on autonegotiation complete (or link up) */
+ if (skge->autoneg == AUTONEG_ENABLE)
+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK);
+ else
+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
+}
+
+static void yukon_reset(struct skge_hw *hw, int port)
+{
+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */
+ gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
+ gma_write16(hw, port, GM_MC_ADDR_H2, 0);
+ gma_write16(hw, port, GM_MC_ADDR_H3, 0);
+ gma_write16(hw, port, GM_MC_ADDR_H4, 0);
+
+ gma_write16(hw, port, GM_RX_CTRL,
+ gma_read16(hw, port, GM_RX_CTRL)
+ | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
+}
+
+/* Apparently, early versions of Yukon-Lite had wrong chip_id? */
+static int is_yukon_lite_a0(struct skge_hw *hw)
+{
+ u32 reg;
+ int ret;
+
+ if (hw->chip_id != CHIP_ID_YUKON)
+ return 0;
+
+ reg = skge_read32(hw, B2_FAR);
+ skge_write8(hw, B2_FAR + 3, 0xff);
+ ret = (skge_read8(hw, B2_FAR + 3) != 0);
+ skge_write32(hw, B2_FAR, reg);
+ return ret;
+}
+
+static void yukon_mac_init(struct skge_hw *hw, int port)
+{
+ struct skge_port *skge = netdev_priv(hw->dev[port]);
+ int i;
+ u32 reg;
+ const u8 *addr = hw->dev[port]->dev_addr;
+
+ /* WA code for COMA mode -- set PHY reset */
+ if (hw->chip_id == CHIP_ID_YUKON_LITE &&
+ hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
+ reg = skge_read32(hw, B2_GP_IO);
+ reg |= GP_DIR_9 | GP_IO_9;
+ skge_write32(hw, B2_GP_IO, reg);
+ }
+
+ /* hard reset */
+ skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
+ skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
+
+ /* WA code for COMA mode -- clear PHY reset */
+ if (hw->chip_id == CHIP_ID_YUKON_LITE &&
+ hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
+ reg = skge_read32(hw, B2_GP_IO);
+ reg |= GP_DIR_9;
+ reg &= ~GP_IO_9;
+ skge_write32(hw, B2_GP_IO, reg);
+ }
+
+ /* Set hardware config mode */
+ reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
+ GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE;
+ reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
+
+ /* Clear GMC reset */
+ skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
+ skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR);
+ skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
+
+ if (skge->autoneg == AUTONEG_DISABLE) {
+ reg = GM_GPCR_AU_ALL_DIS;
+ gma_write16(hw, port, GM_GP_CTRL,
+ gma_read16(hw, port, GM_GP_CTRL) | reg);
+
+ switch (skge->speed) {
+ case SPEED_1000:
+ reg &= ~GM_GPCR_SPEED_100;
+ reg |= GM_GPCR_SPEED_1000;
+ break;
+ case SPEED_100:
+ reg &= ~GM_GPCR_SPEED_1000;
+ reg |= GM_GPCR_SPEED_100;
+ break;
+ case SPEED_10:
+ reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
+ break;
+ }
+
+ if (skge->duplex == DUPLEX_FULL)
+ reg |= GM_GPCR_DUP_FULL;
+ } else
+ reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
+
+ switch (skge->flow_control) {
+ case FLOW_MODE_NONE:
+ skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
+ reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
+ break;
+ case FLOW_MODE_LOC_SEND:
+ /* disable Rx flow-control */
+ reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
+ break;
+ case FLOW_MODE_SYMMETRIC:
+ case FLOW_MODE_SYM_OR_REM:
+ /* enable Tx & Rx flow-control */
+ break;
+ }
+
+ gma_write16(hw, port, GM_GP_CTRL, reg);
+ skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
+
+ yukon_init(hw, port);
+
+ /* MIB clear */
+ reg = gma_read16(hw, port, GM_PHY_ADDR);
+ gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
+
+ for (i = 0; i < GM_MIB_CNT_SIZE; i++)
+ gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i);
+ gma_write16(hw, port, GM_PHY_ADDR, reg);
+
+ /* transmit control */
+ gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
+
+ /* receive control reg: unicast + multicast + no FCS */
+ gma_write16(hw, port, GM_RX_CTRL,
+ GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
+
+ /* transmit flow control */
+ gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
+
+ /* transmit parameter */
+ gma_write16(hw, port, GM_TX_PARAM,
+ TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
+ TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
+ TX_IPG_JAM_DATA(TX_IPG_JAM_DEF));
+
+ /* configure the Serial Mode Register */
+ reg = DATA_BLIND_VAL(DATA_BLIND_DEF)
+ | GM_SMOD_VLAN_ENA
+ | IPG_DATA_VAL(IPG_DATA_DEF);
+
+ if (hw->dev[port]->mtu > ETH_DATA_LEN)
+ reg |= GM_SMOD_JUMBO_ENA;
+
+ gma_write16(hw, port, GM_SERIAL_MODE, reg);
+
+ /* physical address: used for pause frames */
+ gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
+ /* virtual address for data */
+ gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
+
+ /* enable interrupt mask for counter overflows */
+ gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
+ gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
+ gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
+
+ /* Initialize Mac Fifo */
+
+ /* Configure Rx MAC FIFO */
+ skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
+ reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
+
+ /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
+ if (is_yukon_lite_a0(hw))
+ reg &= ~GMF_RX_F_FL_ON;
+
+ skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
+ skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
+ /*
+ * because Pause Packet Truncation in GMAC is not working
+ * we have to increase the Flush Threshold to 64 bytes
+ * in order to flush pause packets in Rx FIFO on Yukon-1
+ */
+ skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1);
+
+ /* Configure Tx MAC FIFO */
+ skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
+ skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
+}
+
+/* Go into power down mode */
+static void yukon_suspend(struct skge_hw *hw, int port)
+{
+ u16 ctrl;
+
+ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+ ctrl |= PHY_M_PC_POL_R_DIS;
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+ ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
+ ctrl |= PHY_CT_RESET;
+ gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
+
+ /* switch IEEE compatible power down mode on */
+ ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
+ ctrl |= PHY_CT_PDOWN;
+ gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
+}
+
+static void yukon_stop(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+
+ skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
+ yukon_reset(hw, port);
+
+ gma_write16(hw, port, GM_GP_CTRL,
+ gma_read16(hw, port, GM_GP_CTRL)
+ & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA));
+ gma_read16(hw, port, GM_GP_CTRL);
+
+ yukon_suspend(hw, port);
+
+ /* set GPHY Control reset */
+ skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
+ skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
+}
+
+static void yukon_get_stats(struct skge_port *skge, u64 *data)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ int i;
+
+ data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
+ | gma_read32(hw, port, GM_TXO_OK_LO);
+ data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
+ | gma_read32(hw, port, GM_RXO_OK_LO);
+
+ for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
+ data[i] = gma_read32(hw, port,
+ skge_stats[i].gma_offset);
+}
+
+static void yukon_mac_intr(struct skge_hw *hw, int port)
+{
+ struct net_device *dev = hw->dev[port];
+ struct skge_port *skge = netdev_priv(dev);
+ u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
+
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "mac interrupt status 0x%x\n", status);
+
+ if (status & GM_IS_RX_FF_OR) {
+ ++dev->stats.rx_fifo_errors;
+ skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
+ }
+
+ if (status & GM_IS_TX_FF_UR) {
+ ++dev->stats.tx_fifo_errors;
+ skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
+ }
+
+}
+
+static u16 yukon_speed(const struct skge_hw *hw, u16 aux)
+{
+ switch (aux & PHY_M_PS_SPEED_MSK) {
+ case PHY_M_PS_SPEED_1000:
+ return SPEED_1000;
+ case PHY_M_PS_SPEED_100:
+ return SPEED_100;
+ default:
+ return SPEED_10;
+ }
+}
+
+static void yukon_link_up(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ u16 reg;
+
+ /* Enable Transmit FIFO Underrun */
+ skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
+
+ reg = gma_read16(hw, port, GM_GP_CTRL);
+ if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
+ reg |= GM_GPCR_DUP_FULL;
+
+ /* enable Rx/Tx */
+ reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
+ gma_write16(hw, port, GM_GP_CTRL, reg);
+
+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
+ skge_link_up(skge);
+}
+
+static void yukon_link_down(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ u16 ctrl;
+
+ ctrl = gma_read16(hw, port, GM_GP_CTRL);
+ ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
+ gma_write16(hw, port, GM_GP_CTRL, ctrl);
+
+ if (skge->flow_status == FLOW_STAT_REM_SEND) {
+ ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
+ ctrl |= PHY_M_AN_ASP;
+ /* restore Asymmetric Pause bit */
+ gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl);
+ }
+
+ skge_link_down(skge);
+
+ yukon_init(hw, port);
+}
+
+static void yukon_phy_intr(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ const char *reason = NULL;
+ u16 istatus, phystat;
+
+ istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
+ phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
+
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "phy interrupt status 0x%x 0x%x\n", istatus, phystat);
+
+ if (istatus & PHY_M_IS_AN_COMPL) {
+ if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP)
+ & PHY_M_AN_RF) {
+ reason = "remote fault";
+ goto failed;
+ }
+
+ if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) {
+ reason = "master/slave fault";
+ goto failed;
+ }
+
+ if (!(phystat & PHY_M_PS_SPDUP_RES)) {
+ reason = "speed/duplex";
+ goto failed;
+ }
+
+ skge->duplex = (phystat & PHY_M_PS_FULL_DUP)
+ ? DUPLEX_FULL : DUPLEX_HALF;
+ skge->speed = yukon_speed(hw, phystat);
+
+ /* We are using IEEE 802.3z/D5.0 Table 37-4 */
+ switch (phystat & PHY_M_PS_PAUSE_MSK) {
+ case PHY_M_PS_PAUSE_MSK:
+ skge->flow_status = FLOW_STAT_SYMMETRIC;
+ break;
+ case PHY_M_PS_RX_P_EN:
+ skge->flow_status = FLOW_STAT_REM_SEND;
+ break;
+ case PHY_M_PS_TX_P_EN:
+ skge->flow_status = FLOW_STAT_LOC_SEND;
+ break;
+ default:
+ skge->flow_status = FLOW_STAT_NONE;
+ }
+
+ if (skge->flow_status == FLOW_STAT_NONE ||
+ (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF))
+ skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
+ else
+ skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
+ yukon_link_up(skge);
+ return;
+ }
+
+ if (istatus & PHY_M_IS_LSP_CHANGE)
+ skge->speed = yukon_speed(hw, phystat);
+
+ if (istatus & PHY_M_IS_DUP_CHANGE)
+ skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
+ if (istatus & PHY_M_IS_LST_CHANGE) {
+ if (phystat & PHY_M_PS_LINK_UP)
+ yukon_link_up(skge);
+ else
+ yukon_link_down(skge);
+ }
+ return;
+ failed:
+ pr_err("%s: autonegotiation failed (%s)\n", skge->netdev->name, reason);
+
+ /* XXX restart autonegotiation? */
+}
+
+static void skge_phy_reset(struct skge_port *skge)
+{
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ struct net_device *dev = hw->dev[port];
+
+ netif_stop_queue(skge->netdev);
+ netif_carrier_off(skge->netdev);
+
+ spin_lock_bh(&hw->phy_lock);
+ if (is_genesis(hw)) {
+ genesis_reset(hw, port);
+ genesis_mac_init(hw, port);
+ } else {
+ yukon_reset(hw, port);
+ yukon_init(hw, port);
+ }
+ spin_unlock_bh(&hw->phy_lock);
+
+ skge_set_multicast(dev);
+}
+
+/* Basic MII support */
+static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(ifr);
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ int err = -EOPNOTSUPP;
+
+ if (!netif_running(dev))
+ return -ENODEV; /* Phy still in reset */
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = hw->phy_addr;
+
+ /* fallthru */
+ case SIOCGMIIREG: {
+ u16 val = 0;
+ spin_lock_bh(&hw->phy_lock);
+
+ if (is_genesis(hw))
+ err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
+ else
+ err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
+ spin_unlock_bh(&hw->phy_lock);
+ data->val_out = val;
+ break;
+ }
+
+ case SIOCSMIIREG:
+ spin_lock_bh(&hw->phy_lock);
+ if (is_genesis(hw))
+ err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f,
+ data->val_in);
+ else
+ err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f,
+ data->val_in);
+ spin_unlock_bh(&hw->phy_lock);
+ break;
+ }
+ return err;
+}
+
+static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
+{
+ u32 end;
+
+ start /= 8;
+ len /= 8;
+ end = start + len - 1;
+
+ skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
+ skge_write32(hw, RB_ADDR(q, RB_START), start);
+ skge_write32(hw, RB_ADDR(q, RB_WP), start);
+ skge_write32(hw, RB_ADDR(q, RB_RP), start);
+ skge_write32(hw, RB_ADDR(q, RB_END), end);
+
+ if (q == Q_R1 || q == Q_R2) {
+ /* Set thresholds on receive queue's */
+ skge_write32(hw, RB_ADDR(q, RB_RX_UTPP),
+ start + (2*len)/3);
+ skge_write32(hw, RB_ADDR(q, RB_RX_LTPP),
+ start + (len/3));
+ } else {
+ /* Enable store & forward on Tx queue's because
+ * Tx FIFO is only 4K on Genesis and 1K on Yukon
+ */
+ skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
+ }
+
+ skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
+}
+
+/* Setup Bus Memory Interface */
+static void skge_qset(struct skge_port *skge, u16 q,
+ const struct skge_element *e)
+{
+ struct skge_hw *hw = skge->hw;
+ u32 watermark = 0x600;
+ u64 base = skge->dma + (e->desc - skge->mem);
+
+ /* optimization to reduce window on 32bit/33mhz */
+ if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0)
+ watermark /= 2;
+
+ skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET);
+ skge_write32(hw, Q_ADDR(q, Q_F), watermark);
+ skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32));
+ skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base);
+}
+
+static int skge_up(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ u32 chunk, ram_addr;
+ size_t rx_size, tx_size;
+ int err;
+
+ if (!is_valid_ether_addr(dev->dev_addr))
+ return -EINVAL;
+
+ netif_info(skge, ifup, skge->netdev, "enabling interface\n");
+
+ if (dev->mtu > RX_BUF_SIZE)
+ skge->rx_buf_size = dev->mtu + ETH_HLEN;
+ else
+ skge->rx_buf_size = RX_BUF_SIZE;
+
+
+ rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc);
+ tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc);
+ skge->mem_size = tx_size + rx_size;
+ skge->mem = pci_alloc_consistent(hw->pdev, skge->mem_size, &skge->dma);
+ if (!skge->mem)
+ return -ENOMEM;
+
+ BUG_ON(skge->dma & 7);
+
+ if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) {
+ dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n");
+ err = -EINVAL;
+ goto free_pci_mem;
+ }
+
+ memset(skge->mem, 0, skge->mem_size);
+
+ err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma);
+ if (err)
+ goto free_pci_mem;
+
+ err = skge_rx_fill(dev);
+ if (err)
+ goto free_rx_ring;
+
+ err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
+ skge->dma + rx_size);
+ if (err)
+ goto free_rx_ring;
+
+ if (hw->ports == 1) {
+ err = request_irq(hw->pdev->irq, skge_intr, IRQF_SHARED,
+ dev->name, hw);
+ if (err) {
+ netdev_err(dev, "Unable to allocate interrupt %d error: %d\n",
+ hw->pdev->irq, err);
+ goto free_tx_ring;
+ }
+ }
+
+ /* Initialize MAC */
+ spin_lock_bh(&hw->phy_lock);
+ if (is_genesis(hw))
+ genesis_mac_init(hw, port);
+ else
+ yukon_mac_init(hw, port);
+ spin_unlock_bh(&hw->phy_lock);
+
+ /* Configure RAMbuffers - equally between ports and tx/rx */
+ chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2);
+ ram_addr = hw->ram_offset + 2 * chunk * port;
+
+ skge_ramset(hw, rxqaddr[port], ram_addr, chunk);
+ skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean);
+
+ BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean);
+ skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk);
+ skge_qset(skge, txqaddr[port], skge->tx_ring.to_use);
+
+ /* Start receiver BMU */
+ wmb();
+ skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
+ skge_led(skge, LED_MODE_ON);
+
+ spin_lock_irq(&hw->hw_lock);
+ hw->intr_mask |= portmask[port];
+ skge_write32(hw, B0_IMSK, hw->intr_mask);
+ skge_read32(hw, B0_IMSK);
+ spin_unlock_irq(&hw->hw_lock);
+
+ napi_enable(&skge->napi);
+ return 0;
+
+ free_tx_ring:
+ kfree(skge->tx_ring.start);
+ free_rx_ring:
+ skge_rx_clean(skge);
+ kfree(skge->rx_ring.start);
+ free_pci_mem:
+ pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
+ skge->mem = NULL;
+
+ return err;
+}
+
+/* stop receiver */
+static void skge_rx_stop(struct skge_hw *hw, int port)
+{
+ skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP);
+ skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL),
+ RB_RST_SET|RB_DIS_OP_MD);
+ skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET);
+}
+
+static int skge_down(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+
+ if (skge->mem == NULL)
+ return 0;
+
+ netif_info(skge, ifdown, skge->netdev, "disabling interface\n");
+
+ netif_tx_disable(dev);
+
+ if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC)
+ del_timer_sync(&skge->link_timer);
+
+ napi_disable(&skge->napi);
+ netif_carrier_off(dev);
+
+ spin_lock_irq(&hw->hw_lock);
+ hw->intr_mask &= ~portmask[port];
+ skge_write32(hw, B0_IMSK, (hw->ports == 1) ? 0 : hw->intr_mask);
+ skge_read32(hw, B0_IMSK);
+ spin_unlock_irq(&hw->hw_lock);
+
+ if (hw->ports == 1)
+ free_irq(hw->pdev->irq, hw);
+
+ skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
+ if (is_genesis(hw))
+ genesis_stop(skge);
+ else
+ yukon_stop(skge);
+
+ /* Stop transmitter */
+ skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
+ skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
+ RB_RST_SET|RB_DIS_OP_MD);
+
+
+ /* Disable Force Sync bit and Enable Alloc bit */
+ skge_write8(hw, SK_REG(port, TXA_CTRL),
+ TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
+
+ /* Stop Interval Timer and Limit Counter of Tx Arbiter */
+ skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
+ skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
+
+ /* Reset PCI FIFO */
+ skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET);
+ skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
+
+ /* Reset the RAM Buffer async Tx queue */
+ skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET);
+
+ skge_rx_stop(hw, port);
+
+ if (is_genesis(hw)) {
+ skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET);
+ skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET);
+ } else {
+ skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
+ skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
+ }
+
+ skge_led(skge, LED_MODE_OFF);
+
+ netif_tx_lock_bh(dev);
+ skge_tx_clean(dev);
+ netif_tx_unlock_bh(dev);
+
+ skge_rx_clean(skge);
+
+ kfree(skge->rx_ring.start);
+ kfree(skge->tx_ring.start);
+ pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
+ skge->mem = NULL;
+ return 0;
+}
+
+static inline int skge_avail(const struct skge_ring *ring)
+{
+ smp_mb();
+ return ((ring->to_clean > ring->to_use) ? 0 : ring->count)
+ + (ring->to_clean - ring->to_use) - 1;
+}
+
+static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ struct skge_element *e;
+ struct skge_tx_desc *td;
+ int i;
+ u32 control, len;
+ u64 map;
+
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
+ if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1))
+ return NETDEV_TX_BUSY;
+
+ e = skge->tx_ring.to_use;
+ td = e->desc;
+ BUG_ON(td->control & BMU_OWN);
+ e->skb = skb;
+ len = skb_headlen(skb);
+ map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ dma_unmap_addr_set(e, mapaddr, map);
+ dma_unmap_len_set(e, maplen, len);
+
+ td->dma_lo = map;
+ td->dma_hi = map >> 32;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ const int offset = skb_checksum_start_offset(skb);
+
+ /* This seems backwards, but it is what the sk98lin
+ * does. Looks like hardware is wrong?
+ */
+ if (ipip_hdr(skb)->protocol == IPPROTO_UDP &&
+ hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
+ control = BMU_TCP_CHECK;
+ else
+ control = BMU_UDP_CHECK;
+
+ td->csum_offs = 0;
+ td->csum_start = offset;
+ td->csum_write = offset + skb->csum_offset;
+ } else
+ control = BMU_CHECK;
+
+ if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */
+ control |= BMU_EOF | BMU_IRQ_EOF;
+ else {
+ struct skge_tx_desc *tf = td;
+
+ control |= BMU_STFWD;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
+ frag->size, DMA_TO_DEVICE);
+
+ e = e->next;
+ e->skb = skb;
+ tf = e->desc;
+ BUG_ON(tf->control & BMU_OWN);
+
+ tf->dma_lo = map;
+ tf->dma_hi = (u64) map >> 32;
+ dma_unmap_addr_set(e, mapaddr, map);
+ dma_unmap_len_set(e, maplen, frag->size);
+
+ tf->control = BMU_OWN | BMU_SW | control | frag->size;
+ }
+ tf->control |= BMU_EOF | BMU_IRQ_EOF;
+ }
+ /* Make sure all the descriptors written */
+ wmb();
+ td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
+ wmb();
+
+ skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
+
+ netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev,
+ "tx queued, slot %td, len %d\n",
+ e - skge->tx_ring.start, skb->len);
+
+ skge->tx_ring.to_use = e->next;
+ smp_wmb();
+
+ if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) {
+ netdev_dbg(dev, "transmit queue full\n");
+ netif_stop_queue(dev);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+
+/* Free resources associated with this reing element */
+static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
+ u32 control)
+{
+ struct pci_dev *pdev = skge->hw->pdev;
+
+ /* skb header vs. fragment */
+ if (control & BMU_STF)
+ pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
+ PCI_DMA_TODEVICE);
+
+ if (control & BMU_EOF) {
+ netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
+ "tx done slot %td\n", e - skge->tx_ring.start);
+
+ dev_kfree_skb(e->skb);
+ }
+}
+
+/* Free all buffers in transmit ring */
+static void skge_tx_clean(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_element *e;
+
+ for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
+ struct skge_tx_desc *td = e->desc;
+ skge_tx_free(skge, e, td->control);
+ td->control = 0;
+ }
+
+ skge->tx_ring.to_clean = e;
+}
+
+static void skge_tx_timeout(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+
+ netif_printk(skge, timer, KERN_DEBUG, skge->netdev, "tx timeout\n");
+
+ skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
+ skge_tx_clean(dev);
+ netif_wake_queue(dev);
+}
+
+static int skge_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int err;
+
+ if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
+ return -EINVAL;
+
+ if (!netif_running(dev)) {
+ dev->mtu = new_mtu;
+ return 0;
+ }
+
+ skge_down(dev);
+
+ dev->mtu = new_mtu;
+
+ err = skge_up(dev);
+ if (err)
+ dev_close(dev);
+
+ return err;
+}
+
+static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
+
+static void genesis_add_filter(u8 filter[8], const u8 *addr)
+{
+ u32 crc, bit;
+
+ crc = ether_crc_le(ETH_ALEN, addr);
+ bit = ~crc & 0x3f;
+ filter[bit/8] |= 1 << (bit%8);
+}
+
+static void genesis_set_multicast(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ struct netdev_hw_addr *ha;
+ u32 mode;
+ u8 filter[8];
+
+ mode = xm_read32(hw, port, XM_MODE);
+ mode |= XM_MD_ENA_HASH;
+ if (dev->flags & IFF_PROMISC)
+ mode |= XM_MD_ENA_PROM;
+ else
+ mode &= ~XM_MD_ENA_PROM;
+
+ if (dev->flags & IFF_ALLMULTI)
+ memset(filter, 0xff, sizeof(filter));
+ else {
+ memset(filter, 0, sizeof(filter));
+
+ if (skge->flow_status == FLOW_STAT_REM_SEND ||
+ skge->flow_status == FLOW_STAT_SYMMETRIC)
+ genesis_add_filter(filter, pause_mc_addr);
+
+ netdev_for_each_mc_addr(ha, dev)
+ genesis_add_filter(filter, ha->addr);
+ }
+
+ xm_write32(hw, port, XM_MODE, mode);
+ xm_outhash(hw, port, XM_HSM, filter);
+}
+
+static void yukon_add_filter(u8 filter[8], const u8 *addr)
+{
+ u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f;
+ filter[bit/8] |= 1 << (bit%8);
+}
+
+static void yukon_set_multicast(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ int port = skge->port;
+ struct netdev_hw_addr *ha;
+ int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND ||
+ skge->flow_status == FLOW_STAT_SYMMETRIC);
+ u16 reg;
+ u8 filter[8];
+
+ memset(filter, 0, sizeof(filter));
+
+ reg = gma_read16(hw, port, GM_RX_CTRL);
+ reg |= GM_RXCR_UCF_ENA;
+
+ if (dev->flags & IFF_PROMISC) /* promiscuous */
+ reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
+ else if (dev->flags & IFF_ALLMULTI) /* all multicast */
+ memset(filter, 0xff, sizeof(filter));
+ else if (netdev_mc_empty(dev) && !rx_pause)/* no multicast */
+ reg &= ~GM_RXCR_MCF_ENA;
+ else {
+ reg |= GM_RXCR_MCF_ENA;
+
+ if (rx_pause)
+ yukon_add_filter(filter, pause_mc_addr);
+
+ netdev_for_each_mc_addr(ha, dev)
+ yukon_add_filter(filter, ha->addr);
+ }
+
+
+ gma_write16(hw, port, GM_MC_ADDR_H1,
+ (u16)filter[0] | ((u16)filter[1] << 8));
+ gma_write16(hw, port, GM_MC_ADDR_H2,
+ (u16)filter[2] | ((u16)filter[3] << 8));
+ gma_write16(hw, port, GM_MC_ADDR_H3,
+ (u16)filter[4] | ((u16)filter[5] << 8));
+ gma_write16(hw, port, GM_MC_ADDR_H4,
+ (u16)filter[6] | ((u16)filter[7] << 8));
+
+ gma_write16(hw, port, GM_RX_CTRL, reg);
+}
+
+static inline u16 phy_length(const struct skge_hw *hw, u32 status)
+{
+ if (is_genesis(hw))
+ return status >> XMR_FS_LEN_SHIFT;
+ else
+ return status >> GMR_FS_LEN_SHIFT;
+}
+
+static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
+{
+ if (is_genesis(hw))
+ return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0;
+ else
+ return (status & GMR_FS_ANY_ERR) ||
+ (status & GMR_FS_RX_OK) == 0;
+}
+
+static void skge_set_multicast(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+
+ if (is_genesis(skge->hw))
+ genesis_set_multicast(dev);
+ else
+ yukon_set_multicast(dev);
+
+}
+
+
+/* Get receive buffer from descriptor.
+ * Handles copy of small buffers and reallocation failures
+ */
+static struct sk_buff *skge_rx_get(struct net_device *dev,
+ struct skge_element *e,
+ u32 control, u32 status, u16 csum)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct sk_buff *skb;
+ u16 len = control & BMU_BBC;
+
+ netif_printk(skge, rx_status, KERN_DEBUG, skge->netdev,
+ "rx slot %td status 0x%x len %d\n",
+ e - skge->rx_ring.start, status, len);
+
+ if (len > skge->rx_buf_size)
+ goto error;
+
+ if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
+ goto error;
+
+ if (bad_phy_status(skge->hw, status))
+ goto error;
+
+ if (phy_length(skge->hw, status) != len)
+ goto error;
+
+ if (len < RX_COPY_THRESHOLD) {
+ skb = netdev_alloc_skb_ip_align(dev, len);
+ if (!skb)
+ goto resubmit;
+
+ pci_dma_sync_single_for_cpu(skge->hw->pdev,
+ dma_unmap_addr(e, mapaddr),
+ len, PCI_DMA_FROMDEVICE);
+ skb_copy_from_linear_data(e->skb, skb->data, len);
+ pci_dma_sync_single_for_device(skge->hw->pdev,
+ dma_unmap_addr(e, mapaddr),
+ len, PCI_DMA_FROMDEVICE);
+ skge_rx_reuse(e, skge->rx_buf_size);
+ } else {
+ struct sk_buff *nskb;
+
+ nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
+ if (!nskb)
+ goto resubmit;
+
+ pci_unmap_single(skge->hw->pdev,
+ dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
+ PCI_DMA_FROMDEVICE);
+ skb = e->skb;
+ prefetch(skb->data);
+ skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
+ }
+
+ skb_put(skb, len);
+
+ if (dev->features & NETIF_F_RXCSUM) {
+ skb->csum = csum;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ return skb;
+error:
+
+ netif_printk(skge, rx_err, KERN_DEBUG, skge->netdev,
+ "rx err, slot %td control 0x%x status 0x%x\n",
+ e - skge->rx_ring.start, control, status);
+
+ if (is_genesis(skge->hw)) {
+ if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
+ dev->stats.rx_length_errors++;
+ if (status & XMR_FS_FRA_ERR)
+ dev->stats.rx_frame_errors++;
+ if (status & XMR_FS_FCS_ERR)
+ dev->stats.rx_crc_errors++;
+ } else {
+ if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
+ dev->stats.rx_length_errors++;
+ if (status & GMR_FS_FRAGMENT)
+ dev->stats.rx_frame_errors++;
+ if (status & GMR_FS_CRC_ERR)
+ dev->stats.rx_crc_errors++;
+ }
+
+resubmit:
+ skge_rx_reuse(e, skge->rx_buf_size);
+ return NULL;
+}
+
+/* Free all buffers in Tx ring which are no longer owned by device */
+static void skge_tx_done(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_ring *ring = &skge->tx_ring;
+ struct skge_element *e;
+
+ skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
+
+ for (e = ring->to_clean; e != ring->to_use; e = e->next) {
+ u32 control = ((const struct skge_tx_desc *) e->desc)->control;
+
+ if (control & BMU_OWN)
+ break;
+
+ skge_tx_free(skge, e, control);
+ }
+ skge->tx_ring.to_clean = e;
+
+ /* Can run lockless until we need to synchronize to restart queue. */
+ smp_mb();
+
+ if (unlikely(netif_queue_stopped(dev) &&
+ skge_avail(&skge->tx_ring) > TX_LOW_WATER)) {
+ netif_tx_lock(dev);
+ if (unlikely(netif_queue_stopped(dev) &&
+ skge_avail(&skge->tx_ring) > TX_LOW_WATER)) {
+ netif_wake_queue(dev);
+
+ }
+ netif_tx_unlock(dev);
+ }
+}
+
+static int skge_poll(struct napi_struct *napi, int to_do)
+{
+ struct skge_port *skge = container_of(napi, struct skge_port, napi);
+ struct net_device *dev = skge->netdev;
+ struct skge_hw *hw = skge->hw;
+ struct skge_ring *ring = &skge->rx_ring;
+ struct skge_element *e;
+ int work_done = 0;
+
+ skge_tx_done(dev);
+
+ skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
+
+ for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
+ struct skge_rx_desc *rd = e->desc;
+ struct sk_buff *skb;
+ u32 control;
+
+ rmb();
+ control = rd->control;
+ if (control & BMU_OWN)
+ break;
+
+ skb = skge_rx_get(dev, e, control, rd->status, rd->csum2);
+ if (likely(skb)) {
+ napi_gro_receive(napi, skb);
+ ++work_done;
+ }
+ }
+ ring->to_clean = e;
+
+ /* restart receiver */
+ wmb();
+ skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
+
+ if (work_done < to_do) {
+ unsigned long flags;
+
+ napi_gro_flush(napi);
+ spin_lock_irqsave(&hw->hw_lock, flags);
+ __napi_complete(napi);
+ hw->intr_mask |= napimask[skge->port];
+ skge_write32(hw, B0_IMSK, hw->intr_mask);
+ skge_read32(hw, B0_IMSK);
+ spin_unlock_irqrestore(&hw->hw_lock, flags);
+ }
+
+ return work_done;
+}
+
+/* Parity errors seem to happen when Genesis is connected to a switch
+ * with no other ports present. Heartbeat error??
+ */
+static void skge_mac_parity(struct skge_hw *hw, int port)
+{
+ struct net_device *dev = hw->dev[port];
+
+ ++dev->stats.tx_heartbeat_errors;
+
+ if (is_genesis(hw))
+ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
+ MFF_CLR_PERR);
+ else
+ /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */
+ skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T),
+ (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)
+ ? GMF_CLI_TX_FC : GMF_CLI_TX_PE);
+}
+
+static void skge_mac_intr(struct skge_hw *hw, int port)
+{
+ if (is_genesis(hw))
+ genesis_mac_intr(hw, port);
+ else
+ yukon_mac_intr(hw, port);
+}
+
+/* Handle device specific framing and timeout interrupts */
+static void skge_error_irq(struct skge_hw *hw)
+{
+ struct pci_dev *pdev = hw->pdev;
+ u32 hwstatus = skge_read32(hw, B0_HWE_ISRC);
+
+ if (is_genesis(hw)) {
+ /* clear xmac errors */
+ if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1))
+ skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT);
+ if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2))
+ skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT);
+ } else {
+ /* Timestamp (unused) overflow */
+ if (hwstatus & IS_IRQ_TIST_OV)
+ skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
+ }
+
+ if (hwstatus & IS_RAM_RD_PAR) {
+ dev_err(&pdev->dev, "Ram read data parity error\n");
+ skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR);
+ }
+
+ if (hwstatus & IS_RAM_WR_PAR) {
+ dev_err(&pdev->dev, "Ram write data parity error\n");
+ skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR);
+ }
+
+ if (hwstatus & IS_M1_PAR_ERR)
+ skge_mac_parity(hw, 0);
+
+ if (hwstatus & IS_M2_PAR_ERR)
+ skge_mac_parity(hw, 1);
+
+ if (hwstatus & IS_R1_PAR_ERR) {
+ dev_err(&pdev->dev, "%s: receive queue parity error\n",
+ hw->dev[0]->name);
+ skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P);
+ }
+
+ if (hwstatus & IS_R2_PAR_ERR) {
+ dev_err(&pdev->dev, "%s: receive queue parity error\n",
+ hw->dev[1]->name);
+ skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P);
+ }
+
+ if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) {
+ u16 pci_status, pci_cmd;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+ pci_read_config_word(pdev, PCI_STATUS, &pci_status);
+
+ dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n",
+ pci_cmd, pci_status);
+
+ /* Write the error bits back to clear them. */
+ pci_status &= PCI_STATUS_ERROR_BITS;
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ pci_write_config_word(pdev, PCI_COMMAND,
+ pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+ pci_write_config_word(pdev, PCI_STATUS, pci_status);
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+ /* if error still set then just ignore it */
+ hwstatus = skge_read32(hw, B0_HWE_ISRC);
+ if (hwstatus & IS_IRQ_STAT) {
+ dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n");
+ hw->intr_mask &= ~IS_HW_ERR;
+ }
+ }
+}
+
+/*
+ * Interrupt from PHY are handled in tasklet (softirq)
+ * because accessing phy registers requires spin wait which might
+ * cause excess interrupt latency.
+ */
+static void skge_extirq(unsigned long arg)
+{
+ struct skge_hw *hw = (struct skge_hw *) arg;
+ int port;
+
+ for (port = 0; port < hw->ports; port++) {
+ struct net_device *dev = hw->dev[port];
+
+ if (netif_running(dev)) {
+ struct skge_port *skge = netdev_priv(dev);
+
+ spin_lock(&hw->phy_lock);
+ if (!is_genesis(hw))
+ yukon_phy_intr(skge);
+ else if (hw->phy_type == SK_PHY_BCOM)
+ bcom_phy_intr(skge);
+ spin_unlock(&hw->phy_lock);
+ }
+ }
+
+ spin_lock_irq(&hw->hw_lock);
+ hw->intr_mask |= IS_EXT_REG;
+ skge_write32(hw, B0_IMSK, hw->intr_mask);
+ skge_read32(hw, B0_IMSK);
+ spin_unlock_irq(&hw->hw_lock);
+}
+
+static irqreturn_t skge_intr(int irq, void *dev_id)
+{
+ struct skge_hw *hw = dev_id;
+ u32 status;
+ int handled = 0;
+
+ spin_lock(&hw->hw_lock);
+ /* Reading this register masks IRQ */
+ status = skge_read32(hw, B0_SP_ISRC);
+ if (status == 0 || status == ~0)
+ goto out;
+
+ handled = 1;
+ status &= hw->intr_mask;
+ if (status & IS_EXT_REG) {
+ hw->intr_mask &= ~IS_EXT_REG;
+ tasklet_schedule(&hw->phy_task);
+ }
+
+ if (status & (IS_XA1_F|IS_R1_F)) {
+ struct skge_port *skge = netdev_priv(hw->dev[0]);
+ hw->intr_mask &= ~(IS_XA1_F|IS_R1_F);
+ napi_schedule(&skge->napi);
+ }
+
+ if (status & IS_PA_TO_TX1)
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
+
+ if (status & IS_PA_TO_RX1) {
+ ++hw->dev[0]->stats.rx_over_errors;
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
+ }
+
+
+ if (status & IS_MAC1)
+ skge_mac_intr(hw, 0);
+
+ if (hw->dev[1]) {
+ struct skge_port *skge = netdev_priv(hw->dev[1]);
+
+ if (status & (IS_XA2_F|IS_R2_F)) {
+ hw->intr_mask &= ~(IS_XA2_F|IS_R2_F);
+ napi_schedule(&skge->napi);
+ }
+
+ if (status & IS_PA_TO_RX2) {
+ ++hw->dev[1]->stats.rx_over_errors;
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
+ }
+
+ if (status & IS_PA_TO_TX2)
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
+
+ if (status & IS_MAC2)
+ skge_mac_intr(hw, 1);
+ }
+
+ if (status & IS_HW_ERR)
+ skge_error_irq(hw);
+
+ skge_write32(hw, B0_IMSK, hw->intr_mask);
+ skge_read32(hw, B0_IMSK);
+out:
+ spin_unlock(&hw->hw_lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void skge_netpoll(struct net_device *dev)
+{
+ struct skge_port *skge = netdev_priv(dev);
+
+ disable_irq(dev->irq);
+ skge_intr(dev->irq, skge->hw);
+ enable_irq(dev->irq);
+}
+#endif
+
+static int skge_set_mac_address(struct net_device *dev, void *p)
+{
+ struct skge_port *skge = netdev_priv(dev);
+ struct skge_hw *hw = skge->hw;
+ unsigned port = skge->port;
+ const struct sockaddr *addr = p;
+ u16 ctrl;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+
+ if (!netif_running(dev)) {
+ memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN);
+ memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN);
+ } else {
+ /* disable Rx */
+ spin_lock_bh(&hw->phy_lock);
+ ctrl = gma_read16(hw, port, GM_GP_CTRL);
+ gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA);
+
+ memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN);
+ memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN);
+
+ if (is_genesis(hw))
+ xm_outaddr(hw, port, XM_SA, dev->dev_addr);
+ else {
+ gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
+ gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
+ }
+
+ gma_write16(hw, port, GM_GP_CTRL, ctrl);
+ spin_unlock_bh(&hw->phy_lock);
+ }
+
+ return 0;
+}
+
+static const struct {
+ u8 id;
+ const char *name;
+} skge_chips[] = {
+ { CHIP_ID_GENESIS, "Genesis" },
+ { CHIP_ID_YUKON, "Yukon" },
+ { CHIP_ID_YUKON_LITE, "Yukon-Lite"},
+ { CHIP_ID_YUKON_LP, "Yukon-LP"},
+};
+
+static const char *skge_board_name(const struct skge_hw *hw)
+{
+ int i;
+ static char buf[16];
+
+ for (i = 0; i < ARRAY_SIZE(skge_chips); i++)
+ if (skge_chips[i].id == hw->chip_id)
+ return skge_chips[i].name;
+
+ snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id);
+ return buf;
+}
+
+
+/*
+ * Setup the board data structure, but don't bring up
+ * the port(s)
+ */
+static int skge_reset(struct skge_hw *hw)
+{
+ u32 reg;
+ u16 ctst, pci_status;
+ u8 t8, mac_cfg, pmd_type;
+ int i;
+
+ ctst = skge_read16(hw, B0_CTST);
+
+ /* do a SW reset */
+ skge_write8(hw, B0_CTST, CS_RST_SET);
+ skge_write8(hw, B0_CTST, CS_RST_CLR);
+
+ /* clear PCI errors, if any */
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ skge_write8(hw, B2_TST_CTRL2, 0);
+
+ pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
+ pci_write_config_word(hw->pdev, PCI_STATUS,
+ pci_status | PCI_STATUS_ERROR_BITS);
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+ skge_write8(hw, B0_CTST, CS_MRST_CLR);
+
+ /* restore CLK_RUN bits (for Yukon-Lite) */
+ skge_write16(hw, B0_CTST,
+ ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA));
+
+ hw->chip_id = skge_read8(hw, B2_CHIP_ID);
+ hw->phy_type = skge_read8(hw, B2_E_1) & 0xf;
+ pmd_type = skge_read8(hw, B2_PMD_TYP);
+ hw->copper = (pmd_type == 'T' || pmd_type == '1');
+
+ switch (hw->chip_id) {
+ case CHIP_ID_GENESIS:
+#ifdef CONFIG_SKGE_GENESIS
+ switch (hw->phy_type) {
+ case SK_PHY_XMAC:
+ hw->phy_addr = PHY_ADDR_XMAC;
+ break;
+ case SK_PHY_BCOM:
+ hw->phy_addr = PHY_ADDR_BCOM;
+ break;
+ default:
+ dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n",
+ hw->phy_type);
+ return -EOPNOTSUPP;
+ }
+ break;
+#else
+ dev_err(&hw->pdev->dev, "Genesis chip detected but not configured\n");
+ return -EOPNOTSUPP;
+#endif
+
+ case CHIP_ID_YUKON:
+ case CHIP_ID_YUKON_LITE:
+ case CHIP_ID_YUKON_LP:
+ if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S')
+ hw->copper = 1;
+
+ hw->phy_addr = PHY_ADDR_MARV;
+ break;
+
+ default:
+ dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
+ hw->chip_id);
+ return -EOPNOTSUPP;
+ }
+
+ mac_cfg = skge_read8(hw, B2_MAC_CFG);
+ hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2;
+ hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4;
+
+ /* read the adapters RAM size */
+ t8 = skge_read8(hw, B2_E_0);
+ if (is_genesis(hw)) {
+ if (t8 == 3) {
+ /* special case: 4 x 64k x 36, offset = 0x80000 */
+ hw->ram_size = 0x100000;
+ hw->ram_offset = 0x80000;
+ } else
+ hw->ram_size = t8 * 512;
+ } else if (t8 == 0)
+ hw->ram_size = 0x20000;
+ else
+ hw->ram_size = t8 * 4096;
+
+ hw->intr_mask = IS_HW_ERR;
+
+ /* Use PHY IRQ for all but fiber based Genesis board */
+ if (!(is_genesis(hw) && hw->phy_type == SK_PHY_XMAC))
+ hw->intr_mask |= IS_EXT_REG;
+
+ if (is_genesis(hw))
+ genesis_init(hw);
+ else {
+ /* switch power to VCC (WA for VAUX problem) */
+ skge_write8(hw, B0_POWER_CTRL,
+ PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
+
+ /* avoid boards with stuck Hardware error bits */
+ if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
+ (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
+ dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n");
+ hw->intr_mask &= ~IS_HW_ERR;
+ }
+
+ /* Clear PHY COMA */
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg);
+ reg &= ~PCI_PHY_COMA;
+ pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg);
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+
+ for (i = 0; i < hw->ports; i++) {
+ skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
+ skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
+ }
+ }
+
+ /* turn off hardware timer (unused) */
+ skge_write8(hw, B2_TI_CTRL, TIM_STOP);
+ skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
+ skge_write8(hw, B0_LED, LED_STAT_ON);
+
+ /* enable the Tx Arbiters */
+ for (i = 0; i < hw->ports; i++)
+ skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
+
+ /* Initialize ram interface */
+ skge_write16(hw, B3_RI_CTRL, RI_RST_CLR);
+
+ skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53);
+ skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53);
+
+ skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK);
+
+ /* Set interrupt moderation for Transmit only
+ * Receive interrupts avoided by NAPI
+ */
+ skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F);
+ skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100));
+ skge_write32(hw, B2_IRQM_CTRL, TIM_START);
+
+ /* Leave irq disabled until first port is brought up. */
+ skge_write32(hw, B0_IMSK, 0);
+
+ for (i = 0; i < hw->ports; i++) {
+ if (is_genesis(hw))
+ genesis_reset(hw, i);
+ else
+ yukon_reset(hw, i);
+ }
+
+ return 0;
+}
+
+
+#ifdef CONFIG_SKGE_DEBUG
+
+static struct dentry *skge_debug;
+
+static int skge_debug_show(struct seq_file *seq, void *v)
+{
+ struct net_device *dev = seq->private;
+ const struct skge_port *skge = netdev_priv(dev);
+ const struct skge_hw *hw = skge->hw;
+ const struct skge_element *e;
+
+ if (!netif_running(dev))
+ return -ENETDOWN;
+
+ seq_printf(seq, "IRQ src=%x mask=%x\n", skge_read32(hw, B0_ISRC),
+ skge_read32(hw, B0_IMSK));
+
+ seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring));
+ for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
+ const struct skge_tx_desc *t = e->desc;
+ seq_printf(seq, "%#x dma=%#x%08x %#x csum=%#x/%x/%x\n",
+ t->control, t->dma_hi, t->dma_lo, t->status,
+ t->csum_offs, t->csum_write, t->csum_start);
+ }
+
+ seq_printf(seq, "\nRx Ring:\n");
+ for (e = skge->rx_ring.to_clean; ; e = e->next) {
+ const struct skge_rx_desc *r = e->desc;
+
+ if (r->control & BMU_OWN)
+ break;
+
+ seq_printf(seq, "%#x dma=%#x%08x %#x %#x csum=%#x/%x\n",
+ r->control, r->dma_hi, r->dma_lo, r->status,
+ r->timestamp, r->csum1, r->csum1_start);
+ }
+
+ return 0;
+}
+
+static int skge_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, skge_debug_show, inode->i_private);
+}
+
+static const struct file_operations skge_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = skge_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*
+ * Use network device events to create/remove/rename
+ * debugfs file entries
+ */
+static int skge_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = ptr;
+ struct skge_port *skge;
+ struct dentry *d;
+
+ if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug)
+ goto done;
+
+ skge = netdev_priv(dev);
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ if (skge->debugfs) {
+ d = debugfs_rename(skge_debug, skge->debugfs,
+ skge_debug, dev->name);
+ if (d)
+ skge->debugfs = d;
+ else {
+ netdev_info(dev, "rename failed\n");
+ debugfs_remove(skge->debugfs);
+ }
+ }
+ break;
+
+ case NETDEV_GOING_DOWN:
+ if (skge->debugfs) {
+ debugfs_remove(skge->debugfs);
+ skge->debugfs = NULL;
+ }
+ break;
+
+ case NETDEV_UP:
+ d = debugfs_create_file(dev->name, S_IRUGO,
+ skge_debug, dev,
+ &skge_debug_fops);
+ if (!d || IS_ERR(d))
+ netdev_info(dev, "debugfs create failed\n");
+ else
+ skge->debugfs = d;
+ break;
+ }
+
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block skge_notifier = {
+ .notifier_call = skge_device_event,
+};
+
+
+static __init void skge_debug_init(void)
+{
+ struct dentry *ent;
+
+ ent = debugfs_create_dir("skge", NULL);
+ if (!ent || IS_ERR(ent)) {
+ pr_info("debugfs create directory failed\n");
+ return;
+ }
+
+ skge_debug = ent;
+ register_netdevice_notifier(&skge_notifier);
+}
+
+static __exit void skge_debug_cleanup(void)
+{
+ if (skge_debug) {
+ unregister_netdevice_notifier(&skge_notifier);
+ debugfs_remove(skge_debug);
+ skge_debug = NULL;
+ }
+}
+
+#else
+#define skge_debug_init()
+#define skge_debug_cleanup()
+#endif
+
+static const struct net_device_ops skge_netdev_ops = {
+ .ndo_open = skge_up,
+ .ndo_stop = skge_down,
+ .ndo_start_xmit = skge_xmit_frame,
+ .ndo_do_ioctl = skge_ioctl,
+ .ndo_get_stats = skge_get_stats,
+ .ndo_tx_timeout = skge_tx_timeout,
+ .ndo_change_mtu = skge_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = skge_set_multicast,
+ .ndo_set_mac_address = skge_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = skge_netpoll,
+#endif
+};
+
+
+/* Initialize network device */
+static struct net_device *skge_devinit(struct skge_hw *hw, int port,
+ int highmem)
+{
+ struct skge_port *skge;
+ struct net_device *dev = alloc_etherdev(sizeof(*skge));
+
+ if (!dev) {
+ dev_err(&hw->pdev->dev, "etherdev alloc failed\n");
+ return NULL;
+ }
+
+ SET_NETDEV_DEV(dev, &hw->pdev->dev);
+ dev->netdev_ops = &skge_netdev_ops;
+ dev->ethtool_ops = &skge_ethtool_ops;
+ dev->watchdog_timeo = TX_WATCHDOG;
+ dev->irq = hw->pdev->irq;
+
+ if (highmem)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ skge = netdev_priv(dev);
+ netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT);
+ skge->netdev = dev;
+ skge->hw = hw;
+ skge->msg_enable = netif_msg_init(debug, default_msg);
+
+ skge->tx_ring.count = DEFAULT_TX_RING_SIZE;
+ skge->rx_ring.count = DEFAULT_RX_RING_SIZE;
+
+ /* Auto speed and flow control */
+ skge->autoneg = AUTONEG_ENABLE;
+ skge->flow_control = FLOW_MODE_SYM_OR_REM;
+ skge->duplex = -1;
+ skge->speed = -1;
+ skge->advertising = skge_supported_modes(hw);
+
+ if (device_can_wakeup(&hw->pdev->dev)) {
+ skge->wol = wol_supported(hw) & WAKE_MAGIC;
+ device_set_wakeup_enable(&hw->pdev->dev, skge->wol);
+ }
+
+ hw->dev[port] = dev;
+
+ skge->port = port;
+
+ /* Only used for Genesis XMAC */
+ if (is_genesis(hw))
+ setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge);
+ else {
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM;
+ dev->features |= dev->hw_features;
+ }
+
+ /* read the mac address */
+ memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ return dev;
+}
+
+static void __devinit skge_show_addr(struct net_device *dev)
+{
+ const struct skge_port *skge = netdev_priv(dev);
+
+ netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr);
+}
+
+static int only_32bit_dma;
+
+static int __devinit skge_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev, *dev1;
+ struct skge_hw *hw;
+ int err, using_dac = 0;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "cannot enable PCI device\n");
+ goto err_out;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "cannot obtain PCI resources\n");
+ goto err_out_disable_pdev;
+ }
+
+ pci_set_master(pdev);
+
+ if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ using_dac = 1;
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ using_dac = 0;
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ }
+
+ if (err) {
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
+ goto err_out_free_regions;
+ }
+
+#ifdef __BIG_ENDIAN
+ /* byte swap descriptors in hardware */
+ {
+ u32 reg;
+
+ pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
+ reg |= PCI_REV_DESC;
+ pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
+ }
+#endif
+
+ err = -ENOMEM;
+ /* space for skge@pci:0000:04:00.0 */
+ hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
+ + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
+ if (!hw) {
+ dev_err(&pdev->dev, "cannot allocate hardware struct\n");
+ goto err_out_free_regions;
+ }
+ sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
+
+ hw->pdev = pdev;
+ spin_lock_init(&hw->hw_lock);
+ spin_lock_init(&hw->phy_lock);
+ tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw);
+
+ hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
+ if (!hw->regs) {
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ goto err_out_free_hw;
+ }
+
+ err = skge_reset(hw);
+ if (err)
+ goto err_out_iounmap;
+
+ pr_info("%s addr 0x%llx irq %d chip %s rev %d\n",
+ DRV_VERSION,
+ (unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
+ skge_board_name(hw), hw->chip_rev);
+
+ dev = skge_devinit(hw, 0, using_dac);
+ if (!dev)
+ goto err_out_led_off;
+
+ /* Some motherboards are broken and has zero in ROM. */
+ if (!is_valid_ether_addr(dev->dev_addr))
+ dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n");
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "cannot register net device\n");
+ goto err_out_free_netdev;
+ }
+
+ skge_show_addr(dev);
+
+ if (hw->ports > 1) {
+ dev1 = skge_devinit(hw, 1, using_dac);
+ if (!dev1) {
+ err = -ENOMEM;
+ goto err_out_unregister;
+ }
+
+ err = register_netdev(dev1);
+ if (err) {
+ dev_err(&pdev->dev, "cannot register second net device\n");
+ goto err_out_free_dev1;
+ }
+
+ err = request_irq(pdev->irq, skge_intr, IRQF_SHARED,
+ hw->irq_name, hw);
+ if (err) {
+ dev_err(&pdev->dev, "cannot assign irq %d\n",
+ pdev->irq);
+ goto err_out_unregister_dev1;
+ }
+
+ skge_show_addr(dev1);
+ }
+ pci_set_drvdata(pdev, hw);
+
+ return 0;
+
+err_out_unregister_dev1:
+ unregister_netdev(dev1);
+err_out_free_dev1:
+ free_netdev(dev1);
+err_out_unregister:
+ unregister_netdev(dev);
+err_out_free_netdev:
+ free_netdev(dev);
+err_out_led_off:
+ skge_write16(hw, B0_LED, LED_STAT_OFF);
+err_out_iounmap:
+ iounmap(hw->regs);
+err_out_free_hw:
+ kfree(hw);
+err_out_free_regions:
+ pci_release_regions(pdev);
+err_out_disable_pdev:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+err_out:
+ return err;
+}
+
+static void __devexit skge_remove(struct pci_dev *pdev)
+{
+ struct skge_hw *hw = pci_get_drvdata(pdev);
+ struct net_device *dev0, *dev1;
+
+ if (!hw)
+ return;
+
+ dev1 = hw->dev[1];
+ if (dev1)
+ unregister_netdev(dev1);
+ dev0 = hw->dev[0];
+ unregister_netdev(dev0);
+
+ tasklet_disable(&hw->phy_task);
+
+ spin_lock_irq(&hw->hw_lock);
+ hw->intr_mask = 0;
+
+ if (hw->ports > 1) {
+ skge_write32(hw, B0_IMSK, 0);
+ skge_read32(hw, B0_IMSK);
+ free_irq(pdev->irq, hw);
+ }
+ spin_unlock_irq(&hw->hw_lock);
+
+ skge_write16(hw, B0_LED, LED_STAT_OFF);
+ skge_write8(hw, B0_CTST, CS_RST_SET);
+
+ if (hw->ports > 1)
+ free_irq(pdev->irq, hw);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ if (dev1)
+ free_netdev(dev1);
+ free_netdev(dev0);
+
+ iounmap(hw->regs);
+ kfree(hw);
+ pci_set_drvdata(pdev, NULL);
+}
+
+#ifdef CONFIG_PM
+static int skge_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct skge_hw *hw = pci_get_drvdata(pdev);
+ int i;
+
+ if (!hw)
+ return 0;
+
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ struct skge_port *skge = netdev_priv(dev);
+
+ if (netif_running(dev))
+ skge_down(dev);
+
+ if (skge->wol)
+ skge_wol_init(skge);
+ }
+
+ skge_write32(hw, B0_IMSK, 0);
+
+ return 0;
+}
+
+static int skge_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct skge_hw *hw = pci_get_drvdata(pdev);
+ int i, err;
+
+ if (!hw)
+ return 0;
+
+ err = skge_reset(hw);
+ if (err)
+ goto out;
+
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+
+ if (netif_running(dev)) {
+ err = skge_up(dev);
+
+ if (err) {
+ netdev_err(dev, "could not up: %d\n", err);
+ dev_close(dev);
+ goto out;
+ }
+ }
+ }
+out:
+ return err;
+}
+
+static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume);
+#define SKGE_PM_OPS (&skge_pm_ops)
+
+#else
+
+#define SKGE_PM_OPS NULL
+#endif
+
+static void skge_shutdown(struct pci_dev *pdev)
+{
+ struct skge_hw *hw = pci_get_drvdata(pdev);
+ int i;
+
+ if (!hw)
+ return;
+
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ struct skge_port *skge = netdev_priv(dev);
+
+ if (skge->wol)
+ skge_wol_init(skge);
+ }
+
+ pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static struct pci_driver skge_driver = {
+ .name = DRV_NAME,
+ .id_table = skge_id_table,
+ .probe = skge_probe,
+ .remove = __devexit_p(skge_remove),
+ .shutdown = skge_shutdown,
+ .driver.pm = SKGE_PM_OPS,
+};
+
+static struct dmi_system_id skge_32bit_dma_boards[] = {
+ {
+ .ident = "Gigabyte nForce boards",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"),
+ DMI_MATCH(DMI_BOARD_NAME, "nForce"),
+ },
+ },
+ {}
+};
+
+static int __init skge_init_module(void)
+{
+ if (dmi_check_system(skge_32bit_dma_boards))
+ only_32bit_dma = 1;
+ skge_debug_init();
+ return pci_register_driver(&skge_driver);
+}
+
+static void __exit skge_cleanup_module(void)
+{
+ pci_unregister_driver(&skge_driver);
+ skge_debug_cleanup();
+}
+
+module_init(skge_init_module);
+module_exit(skge_cleanup_module);
diff --git a/drivers/net/ethernet/marvell/skge.h b/drivers/net/ethernet/marvell/skge.h
new file mode 100644
index 000000000000..a2eb34115844
--- /dev/null
+++ b/drivers/net/ethernet/marvell/skge.h
@@ -0,0 +1,2584 @@
+/*
+ * Definitions for the new Marvell Yukon / SysKonnect driver.
+ */
+#ifndef _SKGE_H
+#define _SKGE_H
+#include <linux/interrupt.h>
+
+/* PCI config registers */
+#define PCI_DEV_REG1 0x40
+#define PCI_PHY_COMA 0x8000000
+#define PCI_VIO 0x2000000
+
+#define PCI_DEV_REG2 0x44
+#define PCI_VPD_ROM_SZ 7L<<14 /* VPD ROM size 0=256, 1=512, ... */
+#define PCI_REV_DESC 1<<2 /* Reverse Descriptor bytes */
+
+#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
+ PCI_STATUS_SIG_SYSTEM_ERROR | \
+ PCI_STATUS_REC_MASTER_ABORT | \
+ PCI_STATUS_REC_TARGET_ABORT | \
+ PCI_STATUS_PARITY)
+
+enum csr_regs {
+ B0_RAP = 0x0000,
+ B0_CTST = 0x0004,
+ B0_LED = 0x0006,
+ B0_POWER_CTRL = 0x0007,
+ B0_ISRC = 0x0008,
+ B0_IMSK = 0x000c,
+ B0_HWE_ISRC = 0x0010,
+ B0_HWE_IMSK = 0x0014,
+ B0_SP_ISRC = 0x0018,
+ B0_XM1_IMSK = 0x0020,
+ B0_XM1_ISRC = 0x0028,
+ B0_XM1_PHY_ADDR = 0x0030,
+ B0_XM1_PHY_DATA = 0x0034,
+ B0_XM2_IMSK = 0x0040,
+ B0_XM2_ISRC = 0x0048,
+ B0_XM2_PHY_ADDR = 0x0050,
+ B0_XM2_PHY_DATA = 0x0054,
+ B0_R1_CSR = 0x0060,
+ B0_R2_CSR = 0x0064,
+ B0_XS1_CSR = 0x0068,
+ B0_XA1_CSR = 0x006c,
+ B0_XS2_CSR = 0x0070,
+ B0_XA2_CSR = 0x0074,
+
+ B2_MAC_1 = 0x0100,
+ B2_MAC_2 = 0x0108,
+ B2_MAC_3 = 0x0110,
+ B2_CONN_TYP = 0x0118,
+ B2_PMD_TYP = 0x0119,
+ B2_MAC_CFG = 0x011a,
+ B2_CHIP_ID = 0x011b,
+ B2_E_0 = 0x011c,
+ B2_E_1 = 0x011d,
+ B2_E_2 = 0x011e,
+ B2_E_3 = 0x011f,
+ B2_FAR = 0x0120,
+ B2_FDP = 0x0124,
+ B2_LD_CTRL = 0x0128,
+ B2_LD_TEST = 0x0129,
+ B2_TI_INI = 0x0130,
+ B2_TI_VAL = 0x0134,
+ B2_TI_CTRL = 0x0138,
+ B2_TI_TEST = 0x0139,
+ B2_IRQM_INI = 0x0140,
+ B2_IRQM_VAL = 0x0144,
+ B2_IRQM_CTRL = 0x0148,
+ B2_IRQM_TEST = 0x0149,
+ B2_IRQM_MSK = 0x014c,
+ B2_IRQM_HWE_MSK = 0x0150,
+ B2_TST_CTRL1 = 0x0158,
+ B2_TST_CTRL2 = 0x0159,
+ B2_GP_IO = 0x015c,
+ B2_I2C_CTRL = 0x0160,
+ B2_I2C_DATA = 0x0164,
+ B2_I2C_IRQ = 0x0168,
+ B2_I2C_SW = 0x016c,
+ B2_BSC_INI = 0x0170,
+ B2_BSC_VAL = 0x0174,
+ B2_BSC_CTRL = 0x0178,
+ B2_BSC_STAT = 0x0179,
+ B2_BSC_TST = 0x017a,
+
+ B3_RAM_ADDR = 0x0180,
+ B3_RAM_DATA_LO = 0x0184,
+ B3_RAM_DATA_HI = 0x0188,
+ B3_RI_WTO_R1 = 0x0190,
+ B3_RI_WTO_XA1 = 0x0191,
+ B3_RI_WTO_XS1 = 0x0192,
+ B3_RI_RTO_R1 = 0x0193,
+ B3_RI_RTO_XA1 = 0x0194,
+ B3_RI_RTO_XS1 = 0x0195,
+ B3_RI_WTO_R2 = 0x0196,
+ B3_RI_WTO_XA2 = 0x0197,
+ B3_RI_WTO_XS2 = 0x0198,
+ B3_RI_RTO_R2 = 0x0199,
+ B3_RI_RTO_XA2 = 0x019a,
+ B3_RI_RTO_XS2 = 0x019b,
+ B3_RI_TO_VAL = 0x019c,
+ B3_RI_CTRL = 0x01a0,
+ B3_RI_TEST = 0x01a2,
+ B3_MA_TOINI_RX1 = 0x01b0,
+ B3_MA_TOINI_RX2 = 0x01b1,
+ B3_MA_TOINI_TX1 = 0x01b2,
+ B3_MA_TOINI_TX2 = 0x01b3,
+ B3_MA_TOVAL_RX1 = 0x01b4,
+ B3_MA_TOVAL_RX2 = 0x01b5,
+ B3_MA_TOVAL_TX1 = 0x01b6,
+ B3_MA_TOVAL_TX2 = 0x01b7,
+ B3_MA_TO_CTRL = 0x01b8,
+ B3_MA_TO_TEST = 0x01ba,
+ B3_MA_RCINI_RX1 = 0x01c0,
+ B3_MA_RCINI_RX2 = 0x01c1,
+ B3_MA_RCINI_TX1 = 0x01c2,
+ B3_MA_RCINI_TX2 = 0x01c3,
+ B3_MA_RCVAL_RX1 = 0x01c4,
+ B3_MA_RCVAL_RX2 = 0x01c5,
+ B3_MA_RCVAL_TX1 = 0x01c6,
+ B3_MA_RCVAL_TX2 = 0x01c7,
+ B3_MA_RC_CTRL = 0x01c8,
+ B3_MA_RC_TEST = 0x01ca,
+ B3_PA_TOINI_RX1 = 0x01d0,
+ B3_PA_TOINI_RX2 = 0x01d4,
+ B3_PA_TOINI_TX1 = 0x01d8,
+ B3_PA_TOINI_TX2 = 0x01dc,
+ B3_PA_TOVAL_RX1 = 0x01e0,
+ B3_PA_TOVAL_RX2 = 0x01e4,
+ B3_PA_TOVAL_TX1 = 0x01e8,
+ B3_PA_TOVAL_TX2 = 0x01ec,
+ B3_PA_CTRL = 0x01f0,
+ B3_PA_TEST = 0x01f2,
+};
+
+/* B0_CTST 16 bit Control/Status register */
+enum {
+ CS_CLK_RUN_HOT = 1<<13,/* CLK_RUN hot m. (YUKON-Lite only) */
+ CS_CLK_RUN_RST = 1<<12,/* CLK_RUN reset (YUKON-Lite only) */
+ CS_CLK_RUN_ENA = 1<<11,/* CLK_RUN enable (YUKON-Lite only) */
+ CS_VAUX_AVAIL = 1<<10,/* VAUX available (YUKON only) */
+ CS_BUS_CLOCK = 1<<9, /* Bus Clock 0/1 = 33/66 MHz */
+ CS_BUS_SLOT_SZ = 1<<8, /* Slot Size 0/1 = 32/64 bit slot */
+ CS_ST_SW_IRQ = 1<<7, /* Set IRQ SW Request */
+ CS_CL_SW_IRQ = 1<<6, /* Clear IRQ SW Request */
+ CS_STOP_DONE = 1<<5, /* Stop Master is finished */
+ CS_STOP_MAST = 1<<4, /* Command Bit to stop the master */
+ CS_MRST_CLR = 1<<3, /* Clear Master reset */
+ CS_MRST_SET = 1<<2, /* Set Master reset */
+ CS_RST_CLR = 1<<1, /* Clear Software reset */
+ CS_RST_SET = 1, /* Set Software reset */
+
+/* B0_LED 8 Bit LED register */
+/* Bit 7.. 2: reserved */
+ LED_STAT_ON = 1<<1, /* Status LED on */
+ LED_STAT_OFF = 1, /* Status LED off */
+
+/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */
+ PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */
+ PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */
+ PC_VCC_ENA = 1<<5, /* Switch VCC Enable */
+ PC_VCC_DIS = 1<<4, /* Switch VCC Disable */
+ PC_VAUX_ON = 1<<3, /* Switch VAUX On */
+ PC_VAUX_OFF = 1<<2, /* Switch VAUX Off */
+ PC_VCC_ON = 1<<1, /* Switch VCC On */
+ PC_VCC_OFF = 1<<0, /* Switch VCC Off */
+};
+
+/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */
+enum {
+ IS_ALL_MSK = 0xbffffffful, /* All Interrupt bits */
+ IS_HW_ERR = 1<<31, /* Interrupt HW Error */
+ /* Bit 30: reserved */
+ IS_PA_TO_RX1 = 1<<29, /* Packet Arb Timeout Rx1 */
+ IS_PA_TO_RX2 = 1<<28, /* Packet Arb Timeout Rx2 */
+ IS_PA_TO_TX1 = 1<<27, /* Packet Arb Timeout Tx1 */
+ IS_PA_TO_TX2 = 1<<26, /* Packet Arb Timeout Tx2 */
+ IS_I2C_READY = 1<<25, /* IRQ on end of I2C Tx */
+ IS_IRQ_SW = 1<<24, /* SW forced IRQ */
+ IS_EXT_REG = 1<<23, /* IRQ from LM80 or PHY (GENESIS only) */
+ /* IRQ from PHY (YUKON only) */
+ IS_TIMINT = 1<<22, /* IRQ from Timer */
+ IS_MAC1 = 1<<21, /* IRQ from MAC 1 */
+ IS_LNK_SYNC_M1 = 1<<20, /* Link Sync Cnt wrap MAC 1 */
+ IS_MAC2 = 1<<19, /* IRQ from MAC 2 */
+ IS_LNK_SYNC_M2 = 1<<18, /* Link Sync Cnt wrap MAC 2 */
+/* Receive Queue 1 */
+ IS_R1_B = 1<<17, /* Q_R1 End of Buffer */
+ IS_R1_F = 1<<16, /* Q_R1 End of Frame */
+ IS_R1_C = 1<<15, /* Q_R1 Encoding Error */
+/* Receive Queue 2 */
+ IS_R2_B = 1<<14, /* Q_R2 End of Buffer */
+ IS_R2_F = 1<<13, /* Q_R2 End of Frame */
+ IS_R2_C = 1<<12, /* Q_R2 Encoding Error */
+/* Synchronous Transmit Queue 1 */
+ IS_XS1_B = 1<<11, /* Q_XS1 End of Buffer */
+ IS_XS1_F = 1<<10, /* Q_XS1 End of Frame */
+ IS_XS1_C = 1<<9, /* Q_XS1 Encoding Error */
+/* Asynchronous Transmit Queue 1 */
+ IS_XA1_B = 1<<8, /* Q_XA1 End of Buffer */
+ IS_XA1_F = 1<<7, /* Q_XA1 End of Frame */
+ IS_XA1_C = 1<<6, /* Q_XA1 Encoding Error */
+/* Synchronous Transmit Queue 2 */
+ IS_XS2_B = 1<<5, /* Q_XS2 End of Buffer */
+ IS_XS2_F = 1<<4, /* Q_XS2 End of Frame */
+ IS_XS2_C = 1<<3, /* Q_XS2 Encoding Error */
+/* Asynchronous Transmit Queue 2 */
+ IS_XA2_B = 1<<2, /* Q_XA2 End of Buffer */
+ IS_XA2_F = 1<<1, /* Q_XA2 End of Frame */
+ IS_XA2_C = 1<<0, /* Q_XA2 Encoding Error */
+
+ IS_TO_PORT1 = IS_PA_TO_RX1 | IS_PA_TO_TX1,
+ IS_TO_PORT2 = IS_PA_TO_RX2 | IS_PA_TO_TX2,
+
+ IS_PORT_1 = IS_XA1_F| IS_R1_F | IS_TO_PORT1 | IS_MAC1,
+ IS_PORT_2 = IS_XA2_F| IS_R2_F | IS_TO_PORT2 | IS_MAC2,
+};
+
+
+/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
+enum {
+ IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */
+ IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */
+ IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */
+ IS_IRQ_STAT = 1<<10, /* IRQ status exception */
+ IS_NO_STAT_M1 = 1<<9, /* No Rx Status from MAC 1 */
+ IS_NO_STAT_M2 = 1<<8, /* No Rx Status from MAC 2 */
+ IS_NO_TIST_M1 = 1<<7, /* No Time Stamp from MAC 1 */
+ IS_NO_TIST_M2 = 1<<6, /* No Time Stamp from MAC 2 */
+ IS_RAM_RD_PAR = 1<<5, /* RAM Read Parity Error */
+ IS_RAM_WR_PAR = 1<<4, /* RAM Write Parity Error */
+ IS_M1_PAR_ERR = 1<<3, /* MAC 1 Parity Error */
+ IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */
+ IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */
+ IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */
+
+ IS_ERR_MSK = IS_IRQ_MST_ERR | IS_IRQ_STAT
+ | IS_RAM_RD_PAR | IS_RAM_WR_PAR
+ | IS_M1_PAR_ERR | IS_M2_PAR_ERR
+ | IS_R1_PAR_ERR | IS_R2_PAR_ERR,
+};
+
+/* B2_TST_CTRL1 8 bit Test Control Register 1 */
+enum {
+ TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */
+ TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */
+ TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */
+ TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */
+ TST_FRC_APERR_M = 1<<3, /* force ADDRPERR on MST */
+ TST_FRC_APERR_T = 1<<2, /* force ADDRPERR on TRG */
+ TST_CFG_WRITE_ON = 1<<1, /* Enable Config Reg WR */
+ TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */
+};
+
+/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */
+enum {
+ CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */
+ /* Bit 3.. 2: reserved */
+ CFG_DIS_M2_CLK = 1<<1, /* Disable Clock for 2nd MAC */
+ CFG_SNG_MAC = 1<<0, /* MAC Config: 0=2 MACs / 1=1 MAC*/
+};
+
+/* B2_CHIP_ID 8 bit Chip Identification Number */
+enum {
+ CHIP_ID_GENESIS = 0x0a, /* Chip ID for GENESIS */
+ CHIP_ID_YUKON = 0xb0, /* Chip ID for YUKON */
+ CHIP_ID_YUKON_LITE = 0xb1, /* Chip ID for YUKON-Lite (Rev. A1-A3) */
+ CHIP_ID_YUKON_LP = 0xb2, /* Chip ID for YUKON-LP */
+ CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */
+ CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */
+ CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */
+
+ CHIP_REV_YU_LITE_A1 = 3, /* Chip Rev. for YUKON-Lite A1,A2 */
+ CHIP_REV_YU_LITE_A3 = 7, /* Chip Rev. for YUKON-Lite A3 */
+};
+
+/* B2_TI_CTRL 8 bit Timer control */
+/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */
+enum {
+ TIM_START = 1<<2, /* Start Timer */
+ TIM_STOP = 1<<1, /* Stop Timer */
+ TIM_CLR_IRQ = 1<<0, /* Clear Timer IRQ (!IRQM) */
+};
+
+/* B2_TI_TEST 8 Bit Timer Test */
+/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */
+/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */
+enum {
+ TIM_T_ON = 1<<2, /* Test mode on */
+ TIM_T_OFF = 1<<1, /* Test mode off */
+ TIM_T_STEP = 1<<0, /* Test step */
+};
+
+/* B2_GP_IO 32 bit General Purpose I/O Register */
+enum {
+ GP_DIR_9 = 1<<25, /* IO_9 direct, 0=In/1=Out */
+ GP_DIR_8 = 1<<24, /* IO_8 direct, 0=In/1=Out */
+ GP_DIR_7 = 1<<23, /* IO_7 direct, 0=In/1=Out */
+ GP_DIR_6 = 1<<22, /* IO_6 direct, 0=In/1=Out */
+ GP_DIR_5 = 1<<21, /* IO_5 direct, 0=In/1=Out */
+ GP_DIR_4 = 1<<20, /* IO_4 direct, 0=In/1=Out */
+ GP_DIR_3 = 1<<19, /* IO_3 direct, 0=In/1=Out */
+ GP_DIR_2 = 1<<18, /* IO_2 direct, 0=In/1=Out */
+ GP_DIR_1 = 1<<17, /* IO_1 direct, 0=In/1=Out */
+ GP_DIR_0 = 1<<16, /* IO_0 direct, 0=In/1=Out */
+
+ GP_IO_9 = 1<<9, /* IO_9 pin */
+ GP_IO_8 = 1<<8, /* IO_8 pin */
+ GP_IO_7 = 1<<7, /* IO_7 pin */
+ GP_IO_6 = 1<<6, /* IO_6 pin */
+ GP_IO_5 = 1<<5, /* IO_5 pin */
+ GP_IO_4 = 1<<4, /* IO_4 pin */
+ GP_IO_3 = 1<<3, /* IO_3 pin */
+ GP_IO_2 = 1<<2, /* IO_2 pin */
+ GP_IO_1 = 1<<1, /* IO_1 pin */
+ GP_IO_0 = 1<<0, /* IO_0 pin */
+};
+
+/* Descriptor Bit Definition */
+/* TxCtrl Transmit Buffer Control Field */
+/* RxCtrl Receive Buffer Control Field */
+enum {
+ BMU_OWN = 1<<31, /* OWN bit: 0=host/1=BMU */
+ BMU_STF = 1<<30, /* Start of Frame */
+ BMU_EOF = 1<<29, /* End of Frame */
+ BMU_IRQ_EOB = 1<<28, /* Req "End of Buffer" IRQ */
+ BMU_IRQ_EOF = 1<<27, /* Req "End of Frame" IRQ */
+ /* TxCtrl specific bits */
+ BMU_STFWD = 1<<26, /* (Tx) Store & Forward Frame */
+ BMU_NO_FCS = 1<<25, /* (Tx) Disable MAC FCS (CRC) generation */
+ BMU_SW = 1<<24, /* (Tx) 1 bit res. for SW use */
+ /* RxCtrl specific bits */
+ BMU_DEV_0 = 1<<26, /* (Rx) Transfer data to Dev0 */
+ BMU_STAT_VAL = 1<<25, /* (Rx) Rx Status Valid */
+ BMU_TIST_VAL = 1<<24, /* (Rx) Rx TimeStamp Valid */
+ /* Bit 23..16: BMU Check Opcodes */
+ BMU_CHECK = 0x55<<16, /* Default BMU check */
+ BMU_TCP_CHECK = 0x56<<16, /* Descr with TCP ext */
+ BMU_UDP_CHECK = 0x57<<16, /* Descr with UDP ext (YUKON only) */
+ BMU_BBC = 0xffffL, /* Bit 15.. 0: Buffer Byte Counter */
+};
+
+/* B2_BSC_CTRL 8 bit Blink Source Counter Control */
+enum {
+ BSC_START = 1<<1, /* Start Blink Source Counter */
+ BSC_STOP = 1<<0, /* Stop Blink Source Counter */
+};
+
+/* B2_BSC_STAT 8 bit Blink Source Counter Status */
+enum {
+ BSC_SRC = 1<<0, /* Blink Source, 0=Off / 1=On */
+};
+
+/* B2_BSC_TST 16 bit Blink Source Counter Test Reg */
+enum {
+ BSC_T_ON = 1<<2, /* Test mode on */
+ BSC_T_OFF = 1<<1, /* Test mode off */
+ BSC_T_STEP = 1<<0, /* Test step */
+};
+
+/* B3_RAM_ADDR 32 bit RAM Address, to read or write */
+ /* Bit 31..19: reserved */
+#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */
+/* RAM Interface Registers */
+
+/* B3_RI_CTRL 16 bit RAM Iface Control Register */
+enum {
+ RI_CLR_RD_PERR = 1<<9, /* Clear IRQ RAM Read Parity Err */
+ RI_CLR_WR_PERR = 1<<8, /* Clear IRQ RAM Write Parity Err*/
+
+ RI_RST_CLR = 1<<1, /* Clear RAM Interface Reset */
+ RI_RST_SET = 1<<0, /* Set RAM Interface Reset */
+};
+
+/* MAC Arbiter Registers */
+/* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */
+enum {
+ MA_FOE_ON = 1<<3, /* XMAC Fast Output Enable ON */
+ MA_FOE_OFF = 1<<2, /* XMAC Fast Output Enable OFF */
+ MA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */
+ MA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */
+
+};
+
+/* Timeout values */
+#define SK_MAC_TO_53 72 /* MAC arbiter timeout */
+#define SK_PKT_TO_53 0x2000 /* Packet arbiter timeout */
+#define SK_PKT_TO_MAX 0xffff /* Maximum value */
+#define SK_RI_TO_53 36 /* RAM interface timeout */
+
+/* Packet Arbiter Registers */
+/* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */
+enum {
+ PA_CLR_TO_TX2 = 1<<13,/* Clear IRQ Packet Timeout TX2 */
+ PA_CLR_TO_TX1 = 1<<12,/* Clear IRQ Packet Timeout TX1 */
+ PA_CLR_TO_RX2 = 1<<11,/* Clear IRQ Packet Timeout RX2 */
+ PA_CLR_TO_RX1 = 1<<10,/* Clear IRQ Packet Timeout RX1 */
+ PA_ENA_TO_TX2 = 1<<9, /* Enable Timeout Timer TX2 */
+ PA_DIS_TO_TX2 = 1<<8, /* Disable Timeout Timer TX2 */
+ PA_ENA_TO_TX1 = 1<<7, /* Enable Timeout Timer TX1 */
+ PA_DIS_TO_TX1 = 1<<6, /* Disable Timeout Timer TX1 */
+ PA_ENA_TO_RX2 = 1<<5, /* Enable Timeout Timer RX2 */
+ PA_DIS_TO_RX2 = 1<<4, /* Disable Timeout Timer RX2 */
+ PA_ENA_TO_RX1 = 1<<3, /* Enable Timeout Timer RX1 */
+ PA_DIS_TO_RX1 = 1<<2, /* Disable Timeout Timer RX1 */
+ PA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */
+ PA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */
+};
+
+#define PA_ENA_TO_ALL (PA_ENA_TO_RX1 | PA_ENA_TO_RX2 |\
+ PA_ENA_TO_TX1 | PA_ENA_TO_TX2)
+
+
+/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
+/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */
+/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */
+/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */
+/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */
+
+#define TXA_MAX_VAL 0x00ffffffUL /* Bit 23.. 0: Max TXA Timer/Cnt Val */
+
+/* TXA_CTRL 8 bit Tx Arbiter Control Register */
+enum {
+ TXA_ENA_FSYNC = 1<<7, /* Enable force of sync Tx queue */
+ TXA_DIS_FSYNC = 1<<6, /* Disable force of sync Tx queue */
+ TXA_ENA_ALLOC = 1<<5, /* Enable alloc of free bandwidth */
+ TXA_DIS_ALLOC = 1<<4, /* Disable alloc of free bandwidth */
+ TXA_START_RC = 1<<3, /* Start sync Rate Control */
+ TXA_STOP_RC = 1<<2, /* Stop sync Rate Control */
+ TXA_ENA_ARB = 1<<1, /* Enable Tx Arbiter */
+ TXA_DIS_ARB = 1<<0, /* Disable Tx Arbiter */
+};
+
+/*
+ * Bank 4 - 5
+ */
+/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
+enum {
+ TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/
+ TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */
+ TXA_LIM_INI = 0x0208,/* 32 bit Tx Arb Limit Counter Init Val */
+ TXA_LIM_VAL = 0x020c,/* 32 bit Tx Arb Limit Counter Value */
+ TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */
+ TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */
+ TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */
+};
+
+
+enum {
+ B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */
+ B7_CFG_SPC = 0x0380,/* copy of the Configuration register */
+ B8_RQ1_REGS = 0x0400,/* Receive Queue 1 */
+ B8_RQ2_REGS = 0x0480,/* Receive Queue 2 */
+ B8_TS1_REGS = 0x0600,/* Transmit sync queue 1 */
+ B8_TA1_REGS = 0x0680,/* Transmit async queue 1 */
+ B8_TS2_REGS = 0x0700,/* Transmit sync queue 2 */
+ B8_TA2_REGS = 0x0780,/* Transmit sync queue 2 */
+ B16_RAM_REGS = 0x0800,/* RAM Buffer Registers */
+};
+
+/* Queue Register Offsets, use Q_ADDR() to access */
+enum {
+ B8_Q_REGS = 0x0400, /* base of Queue registers */
+ Q_D = 0x00, /* 8*32 bit Current Descriptor */
+ Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */
+ Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */
+ Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */
+ Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */
+ Q_BC = 0x30, /* 32 bit Current Byte Counter */
+ Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */
+ Q_F = 0x38, /* 32 bit Flag Register */
+ Q_T1 = 0x3c, /* 32 bit Test Register 1 */
+ Q_T1_TR = 0x3c, /* 8 bit Test Register 1 Transfer SM */
+ Q_T1_WR = 0x3d, /* 8 bit Test Register 1 Write Descriptor SM */
+ Q_T1_RD = 0x3e, /* 8 bit Test Register 1 Read Descriptor SM */
+ Q_T1_SV = 0x3f, /* 8 bit Test Register 1 Supervisor SM */
+ Q_T2 = 0x40, /* 32 bit Test Register 2 */
+ Q_T3 = 0x44, /* 32 bit Test Register 3 */
+
+};
+#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
+
+/* RAM Buffer Register Offsets */
+enum {
+
+ RB_START= 0x00,/* 32 bit RAM Buffer Start Address */
+ RB_END = 0x04,/* 32 bit RAM Buffer End Address */
+ RB_WP = 0x08,/* 32 bit RAM Buffer Write Pointer */
+ RB_RP = 0x0c,/* 32 bit RAM Buffer Read Pointer */
+ RB_RX_UTPP= 0x10,/* 32 bit Rx Upper Threshold, Pause Packet */
+ RB_RX_LTPP= 0x14,/* 32 bit Rx Lower Threshold, Pause Packet */
+ RB_RX_UTHP= 0x18,/* 32 bit Rx Upper Threshold, High Prio */
+ RB_RX_LTHP= 0x1c,/* 32 bit Rx Lower Threshold, High Prio */
+ /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */
+ RB_PC = 0x20,/* 32 bit RAM Buffer Packet Counter */
+ RB_LEV = 0x24,/* 32 bit RAM Buffer Level Register */
+ RB_CTRL = 0x28,/* 32 bit RAM Buffer Control Register */
+ RB_TST1 = 0x29,/* 8 bit RAM Buffer Test Register 1 */
+ RB_TST2 = 0x2a,/* 8 bit RAM Buffer Test Register 2 */
+};
+
+/* Receive and Transmit Queues */
+enum {
+ Q_R1 = 0x0000, /* Receive Queue 1 */
+ Q_R2 = 0x0080, /* Receive Queue 2 */
+ Q_XS1 = 0x0200, /* Synchronous Transmit Queue 1 */
+ Q_XA1 = 0x0280, /* Asynchronous Transmit Queue 1 */
+ Q_XS2 = 0x0300, /* Synchronous Transmit Queue 2 */
+ Q_XA2 = 0x0380, /* Asynchronous Transmit Queue 2 */
+};
+
+/* Different MAC Types */
+enum {
+ SK_MAC_XMAC = 0, /* Xaqti XMAC II */
+ SK_MAC_GMAC = 1, /* Marvell GMAC */
+};
+
+/* Different PHY Types */
+enum {
+ SK_PHY_XMAC = 0,/* integrated in XMAC II */
+ SK_PHY_BCOM = 1,/* Broadcom BCM5400 */
+ SK_PHY_LONE = 2,/* Level One LXT1000 [not supported]*/
+ SK_PHY_NAT = 3,/* National DP83891 [not supported] */
+ SK_PHY_MARV_COPPER= 4,/* Marvell 88E1011S */
+ SK_PHY_MARV_FIBER = 5,/* Marvell 88E1011S working on fiber */
+};
+
+/* PHY addresses (bits 12..8 of PHY address reg) */
+enum {
+ PHY_ADDR_XMAC = 0<<8,
+ PHY_ADDR_BCOM = 1<<8,
+
+/* GPHY address (bits 15..11 of SMI control reg) */
+ PHY_ADDR_MARV = 0,
+};
+
+#define RB_ADDR(offs, queue) ((u16)B16_RAM_REGS + (u16)(queue) + (offs))
+
+/* Receive MAC FIFO, Receive LED, and Link_Sync regs (GENESIS only) */
+enum {
+ RX_MFF_EA = 0x0c00,/* 32 bit Receive MAC FIFO End Address */
+ RX_MFF_WP = 0x0c04,/* 32 bit Receive MAC FIFO Write Pointer */
+
+ RX_MFF_RP = 0x0c0c,/* 32 bit Receive MAC FIFO Read Pointer */
+ RX_MFF_PC = 0x0c10,/* 32 bit Receive MAC FIFO Packet Cnt */
+ RX_MFF_LEV = 0x0c14,/* 32 bit Receive MAC FIFO Level */
+ RX_MFF_CTRL1 = 0x0c18,/* 16 bit Receive MAC FIFO Control Reg 1*/
+ RX_MFF_STAT_TO = 0x0c1a,/* 8 bit Receive MAC Status Timeout */
+ RX_MFF_TIST_TO = 0x0c1b,/* 8 bit Receive MAC Time Stamp Timeout */
+ RX_MFF_CTRL2 = 0x0c1c,/* 8 bit Receive MAC FIFO Control Reg 2*/
+ RX_MFF_TST1 = 0x0c1d,/* 8 bit Receive MAC FIFO Test Reg 1 */
+ RX_MFF_TST2 = 0x0c1e,/* 8 bit Receive MAC FIFO Test Reg 2 */
+
+ RX_LED_INI = 0x0c20,/* 32 bit Receive LED Cnt Init Value */
+ RX_LED_VAL = 0x0c24,/* 32 bit Receive LED Cnt Current Value */
+ RX_LED_CTRL = 0x0c28,/* 8 bit Receive LED Cnt Control Reg */
+ RX_LED_TST = 0x0c29,/* 8 bit Receive LED Cnt Test Register */
+
+ LNK_SYNC_INI = 0x0c30,/* 32 bit Link Sync Cnt Init Value */
+ LNK_SYNC_VAL = 0x0c34,/* 32 bit Link Sync Cnt Current Value */
+ LNK_SYNC_CTRL = 0x0c38,/* 8 bit Link Sync Cnt Control Register */
+ LNK_SYNC_TST = 0x0c39,/* 8 bit Link Sync Cnt Test Register */
+ LNK_LED_REG = 0x0c3c,/* 8 bit Link LED Register */
+};
+
+/* Receive and Transmit MAC FIFO Registers (GENESIS only) */
+/* RX_MFF_CTRL1 16 bit Receive MAC FIFO Control Reg 1 */
+enum {
+ MFF_ENA_RDY_PAT = 1<<13, /* Enable Ready Patch */
+ MFF_DIS_RDY_PAT = 1<<12, /* Disable Ready Patch */
+ MFF_ENA_TIM_PAT = 1<<11, /* Enable Timing Patch */
+ MFF_DIS_TIM_PAT = 1<<10, /* Disable Timing Patch */
+ MFF_ENA_ALM_FUL = 1<<9, /* Enable AlmostFull Sign */
+ MFF_DIS_ALM_FUL = 1<<8, /* Disable AlmostFull Sign */
+ MFF_ENA_PAUSE = 1<<7, /* Enable Pause Signaling */
+ MFF_DIS_PAUSE = 1<<6, /* Disable Pause Signaling */
+ MFF_ENA_FLUSH = 1<<5, /* Enable Frame Flushing */
+ MFF_DIS_FLUSH = 1<<4, /* Disable Frame Flushing */
+ MFF_ENA_TIST = 1<<3, /* Enable Time Stamp Gener */
+ MFF_DIS_TIST = 1<<2, /* Disable Time Stamp Gener */
+ MFF_CLR_INTIST = 1<<1, /* Clear IRQ No Time Stamp */
+ MFF_CLR_INSTAT = 1<<0, /* Clear IRQ No Status */
+ MFF_RX_CTRL_DEF = MFF_ENA_TIM_PAT,
+};
+
+/* TX_MFF_CTRL1 16 bit Transmit MAC FIFO Control Reg 1 */
+enum {
+ MFF_CLR_PERR = 1<<15, /* Clear Parity Error IRQ */
+
+ MFF_ENA_PKT_REC = 1<<13, /* Enable Packet Recovery */
+ MFF_DIS_PKT_REC = 1<<12, /* Disable Packet Recovery */
+
+ MFF_ENA_W4E = 1<<7, /* Enable Wait for Empty */
+ MFF_DIS_W4E = 1<<6, /* Disable Wait for Empty */
+
+ MFF_ENA_LOOPB = 1<<3, /* Enable Loopback */
+ MFF_DIS_LOOPB = 1<<2, /* Disable Loopback */
+ MFF_CLR_MAC_RST = 1<<1, /* Clear XMAC Reset */
+ MFF_SET_MAC_RST = 1<<0, /* Set XMAC Reset */
+
+ MFF_TX_CTRL_DEF = MFF_ENA_PKT_REC | (u16) MFF_ENA_TIM_PAT | MFF_ENA_FLUSH,
+};
+
+
+/* RX_MFF_TST2 8 bit Receive MAC FIFO Test Register 2 */
+/* TX_MFF_TST2 8 bit Transmit MAC FIFO Test Register 2 */
+enum {
+ MFF_WSP_T_ON = 1<<6, /* Tx: Write Shadow Ptr TestOn */
+ MFF_WSP_T_OFF = 1<<5, /* Tx: Write Shadow Ptr TstOff */
+ MFF_WSP_INC = 1<<4, /* Tx: Write Shadow Ptr Increment */
+ MFF_PC_DEC = 1<<3, /* Packet Counter Decrement */
+ MFF_PC_T_ON = 1<<2, /* Packet Counter Test On */
+ MFF_PC_T_OFF = 1<<1, /* Packet Counter Test Off */
+ MFF_PC_INC = 1<<0, /* Packet Counter Increment */
+};
+
+/* RX_MFF_TST1 8 bit Receive MAC FIFO Test Register 1 */
+/* TX_MFF_TST1 8 bit Transmit MAC FIFO Test Register 1 */
+enum {
+ MFF_WP_T_ON = 1<<6, /* Write Pointer Test On */
+ MFF_WP_T_OFF = 1<<5, /* Write Pointer Test Off */
+ MFF_WP_INC = 1<<4, /* Write Pointer Increm */
+
+ MFF_RP_T_ON = 1<<2, /* Read Pointer Test On */
+ MFF_RP_T_OFF = 1<<1, /* Read Pointer Test Off */
+ MFF_RP_DEC = 1<<0, /* Read Pointer Decrement */
+};
+
+/* RX_MFF_CTRL2 8 bit Receive MAC FIFO Control Reg 2 */
+/* TX_MFF_CTRL2 8 bit Transmit MAC FIFO Control Reg 2 */
+enum {
+ MFF_ENA_OP_MD = 1<<3, /* Enable Operation Mode */
+ MFF_DIS_OP_MD = 1<<2, /* Disable Operation Mode */
+ MFF_RST_CLR = 1<<1, /* Clear MAC FIFO Reset */
+ MFF_RST_SET = 1<<0, /* Set MAC FIFO Reset */
+};
+
+
+/* Link LED Counter Registers (GENESIS only) */
+
+/* RX_LED_CTRL 8 bit Receive LED Cnt Control Reg */
+/* TX_LED_CTRL 8 bit Transmit LED Cnt Control Reg */
+/* LNK_SYNC_CTRL 8 bit Link Sync Cnt Control Register */
+enum {
+ LED_START = 1<<2, /* Start Timer */
+ LED_STOP = 1<<1, /* Stop Timer */
+ LED_STATE = 1<<0, /* Rx/Tx: LED State, 1=LED on */
+};
+
+/* RX_LED_TST 8 bit Receive LED Cnt Test Register */
+/* TX_LED_TST 8 bit Transmit LED Cnt Test Register */
+/* LNK_SYNC_TST 8 bit Link Sync Cnt Test Register */
+enum {
+ LED_T_ON = 1<<2, /* LED Counter Test mode On */
+ LED_T_OFF = 1<<1, /* LED Counter Test mode Off */
+ LED_T_STEP = 1<<0, /* LED Counter Step */
+};
+
+/* LNK_LED_REG 8 bit Link LED Register */
+enum {
+ LED_BLK_ON = 1<<5, /* Link LED Blinking On */
+ LED_BLK_OFF = 1<<4, /* Link LED Blinking Off */
+ LED_SYNC_ON = 1<<3, /* Use Sync Wire to switch LED */
+ LED_SYNC_OFF = 1<<2, /* Disable Sync Wire Input */
+ LED_ON = 1<<1, /* switch LED on */
+ LED_OFF = 1<<0, /* switch LED off */
+};
+
+/* Receive GMAC FIFO (YUKON) */
+enum {
+ RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */
+ RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */
+ RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */
+ RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */
+ RX_GMF_FL_THR = 0x0c50,/* 32 bit Rx GMAC FIFO Flush Threshold */
+ RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */
+ RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */
+ RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */
+ RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */
+};
+
+
+/* TXA_TEST 8 bit Tx Arbiter Test Register */
+enum {
+ TXA_INT_T_ON = 1<<5, /* Tx Arb Interval Timer Test On */
+ TXA_INT_T_OFF = 1<<4, /* Tx Arb Interval Timer Test Off */
+ TXA_INT_T_STEP = 1<<3, /* Tx Arb Interval Timer Step */
+ TXA_LIM_T_ON = 1<<2, /* Tx Arb Limit Timer Test On */
+ TXA_LIM_T_OFF = 1<<1, /* Tx Arb Limit Timer Test Off */
+ TXA_LIM_T_STEP = 1<<0, /* Tx Arb Limit Timer Step */
+};
+
+/* TXA_STAT 8 bit Tx Arbiter Status Register */
+enum {
+ TXA_PRIO_XS = 1<<0, /* sync queue has prio to send */
+};
+
+
+/* Q_BC 32 bit Current Byte Counter */
+
+/* BMU Control Status Registers */
+/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */
+/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */
+/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */
+/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */
+/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */
+/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */
+/* Q_CSR 32 bit BMU Control/Status Register */
+
+enum {
+ CSR_SV_IDLE = 1<<24, /* BMU SM Idle */
+
+ CSR_DESC_CLR = 1<<21, /* Clear Reset for Descr */
+ CSR_DESC_SET = 1<<20, /* Set Reset for Descr */
+ CSR_FIFO_CLR = 1<<19, /* Clear Reset for FIFO */
+ CSR_FIFO_SET = 1<<18, /* Set Reset for FIFO */
+ CSR_HPI_RUN = 1<<17, /* Release HPI SM */
+ CSR_HPI_RST = 1<<16, /* Reset HPI SM to Idle */
+ CSR_SV_RUN = 1<<15, /* Release Supervisor SM */
+ CSR_SV_RST = 1<<14, /* Reset Supervisor SM */
+ CSR_DREAD_RUN = 1<<13, /* Release Descr Read SM */
+ CSR_DREAD_RST = 1<<12, /* Reset Descr Read SM */
+ CSR_DWRITE_RUN = 1<<11, /* Release Descr Write SM */
+ CSR_DWRITE_RST = 1<<10, /* Reset Descr Write SM */
+ CSR_TRANS_RUN = 1<<9, /* Release Transfer SM */
+ CSR_TRANS_RST = 1<<8, /* Reset Transfer SM */
+ CSR_ENA_POL = 1<<7, /* Enable Descr Polling */
+ CSR_DIS_POL = 1<<6, /* Disable Descr Polling */
+ CSR_STOP = 1<<5, /* Stop Rx/Tx Queue */
+ CSR_START = 1<<4, /* Start Rx/Tx Queue */
+ CSR_IRQ_CL_P = 1<<3, /* (Rx) Clear Parity IRQ */
+ CSR_IRQ_CL_B = 1<<2, /* Clear EOB IRQ */
+ CSR_IRQ_CL_F = 1<<1, /* Clear EOF IRQ */
+ CSR_IRQ_CL_C = 1<<0, /* Clear ERR IRQ */
+};
+
+#define CSR_SET_RESET (CSR_DESC_SET | CSR_FIFO_SET | CSR_HPI_RST |\
+ CSR_SV_RST | CSR_DREAD_RST | CSR_DWRITE_RST |\
+ CSR_TRANS_RST)
+#define CSR_CLR_RESET (CSR_DESC_CLR | CSR_FIFO_CLR | CSR_HPI_RUN |\
+ CSR_SV_RUN | CSR_DREAD_RUN | CSR_DWRITE_RUN |\
+ CSR_TRANS_RUN)
+
+/* Q_F 32 bit Flag Register */
+enum {
+ F_ALM_FULL = 1<<27, /* Rx FIFO: almost full */
+ F_EMPTY = 1<<27, /* Tx FIFO: empty flag */
+ F_FIFO_EOF = 1<<26, /* Tag (EOF Flag) bit in FIFO */
+ F_WM_REACHED = 1<<25, /* Watermark reached */
+
+ F_FIFO_LEVEL = 0x1fL<<16, /* Bit 23..16: # of Qwords in FIFO */
+ F_WATER_MARK = 0x0007ffL, /* Bit 10.. 0: Watermark */
+};
+
+/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */
+/* RB_START 32 bit RAM Buffer Start Address */
+/* RB_END 32 bit RAM Buffer End Address */
+/* RB_WP 32 bit RAM Buffer Write Pointer */
+/* RB_RP 32 bit RAM Buffer Read Pointer */
+/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */
+/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */
+/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */
+/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */
+/* RB_PC 32 bit RAM Buffer Packet Counter */
+/* RB_LEV 32 bit RAM Buffer Level Register */
+
+#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */
+/* RB_TST2 8 bit RAM Buffer Test Register 2 */
+/* RB_TST1 8 bit RAM Buffer Test Register 1 */
+
+/* RB_CTRL 8 bit RAM Buffer Control Register */
+enum {
+ RB_ENA_STFWD = 1<<5, /* Enable Store & Forward */
+ RB_DIS_STFWD = 1<<4, /* Disable Store & Forward */
+ RB_ENA_OP_MD = 1<<3, /* Enable Operation Mode */
+ RB_DIS_OP_MD = 1<<2, /* Disable Operation Mode */
+ RB_RST_CLR = 1<<1, /* Clear RAM Buf STM Reset */
+ RB_RST_SET = 1<<0, /* Set RAM Buf STM Reset */
+};
+
+/* Transmit MAC FIFO and Transmit LED Registers (GENESIS only), */
+enum {
+ TX_MFF_EA = 0x0d00,/* 32 bit Transmit MAC FIFO End Address */
+ TX_MFF_WP = 0x0d04,/* 32 bit Transmit MAC FIFO WR Pointer */
+ TX_MFF_WSP = 0x0d08,/* 32 bit Transmit MAC FIFO WR Shadow Ptr */
+ TX_MFF_RP = 0x0d0c,/* 32 bit Transmit MAC FIFO RD Pointer */
+ TX_MFF_PC = 0x0d10,/* 32 bit Transmit MAC FIFO Packet Cnt */
+ TX_MFF_LEV = 0x0d14,/* 32 bit Transmit MAC FIFO Level */
+ TX_MFF_CTRL1 = 0x0d18,/* 16 bit Transmit MAC FIFO Ctrl Reg 1 */
+ TX_MFF_WAF = 0x0d1a,/* 8 bit Transmit MAC Wait after flush */
+
+ TX_MFF_CTRL2 = 0x0d1c,/* 8 bit Transmit MAC FIFO Ctrl Reg 2 */
+ TX_MFF_TST1 = 0x0d1d,/* 8 bit Transmit MAC FIFO Test Reg 1 */
+ TX_MFF_TST2 = 0x0d1e,/* 8 bit Transmit MAC FIFO Test Reg 2 */
+
+ TX_LED_INI = 0x0d20,/* 32 bit Transmit LED Cnt Init Value */
+ TX_LED_VAL = 0x0d24,/* 32 bit Transmit LED Cnt Current Val */
+ TX_LED_CTRL = 0x0d28,/* 8 bit Transmit LED Cnt Control Reg */
+ TX_LED_TST = 0x0d29,/* 8 bit Transmit LED Cnt Test Reg */
+};
+
+/* Counter and Timer constants, for a host clock of 62.5 MHz */
+#define SK_XMIT_DUR 0x002faf08UL /* 50 ms */
+#define SK_BLK_DUR 0x01dcd650UL /* 500 ms */
+
+#define SK_DPOLL_DEF 0x00ee6b28UL /* 250 ms at 62.5 MHz */
+
+#define SK_DPOLL_MAX 0x00ffffffUL /* 268 ms at 62.5 MHz */
+ /* 215 ms at 78.12 MHz */
+
+#define SK_FACT_62 100 /* is given in percent */
+#define SK_FACT_53 85 /* on GENESIS: 53.12 MHz */
+#define SK_FACT_78 125 /* on YUKON: 78.12 MHz */
+
+
+/* Transmit GMAC FIFO (YUKON only) */
+enum {
+ TX_GMF_EA = 0x0d40,/* 32 bit Tx GMAC FIFO End Address */
+ TX_GMF_AE_THR = 0x0d44,/* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/
+ TX_GMF_CTRL_T = 0x0d48,/* 32 bit Tx GMAC FIFO Control/Test */
+
+ TX_GMF_WP = 0x0d60,/* 32 bit Tx GMAC FIFO Write Pointer */
+ TX_GMF_WSP = 0x0d64,/* 32 bit Tx GMAC FIFO Write Shadow Ptr. */
+ TX_GMF_WLEV = 0x0d68,/* 32 bit Tx GMAC FIFO Write Level */
+
+ TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */
+ TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */
+ TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */
+
+ /* Descriptor Poll Timer Registers */
+ B28_DPT_INI = 0x0e00,/* 24 bit Descriptor Poll Timer Init Val */
+ B28_DPT_VAL = 0x0e04,/* 24 bit Descriptor Poll Timer Curr Val */
+ B28_DPT_CTRL = 0x0e08,/* 8 bit Descriptor Poll Timer Ctrl Reg */
+
+ B28_DPT_TST = 0x0e0a,/* 8 bit Descriptor Poll Timer Test Reg */
+
+ /* Time Stamp Timer Registers (YUKON only) */
+ GMAC_TI_ST_VAL = 0x0e14,/* 32 bit Time Stamp Timer Curr Val */
+ GMAC_TI_ST_CTRL = 0x0e18,/* 8 bit Time Stamp Timer Ctrl Reg */
+ GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */
+};
+
+
+enum {
+ LINKLED_OFF = 0x01,
+ LINKLED_ON = 0x02,
+ LINKLED_LINKSYNC_OFF = 0x04,
+ LINKLED_LINKSYNC_ON = 0x08,
+ LINKLED_BLINK_OFF = 0x10,
+ LINKLED_BLINK_ON = 0x20,
+};
+
+/* GMAC and GPHY Control Registers (YUKON only) */
+enum {
+ GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */
+ GPHY_CTRL = 0x0f04,/* 32 bit GPHY Control Reg */
+ GMAC_IRQ_SRC = 0x0f08,/* 8 bit GMAC Interrupt Source Reg */
+ GMAC_IRQ_MSK = 0x0f0c,/* 8 bit GMAC Interrupt Mask Reg */
+ GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */
+
+/* Wake-up Frame Pattern Match Control Registers (YUKON only) */
+
+ WOL_REG_OFFS = 0x20,/* HW-Bug: Address is + 0x20 against spec. */
+
+ WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */
+ WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */
+ WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */
+ WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */
+ WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */
+
+/* WOL Pattern Length Registers (YUKON only) */
+
+ WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */
+ WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */
+
+/* WOL Pattern Counter Registers (YUKON only) */
+
+ WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */
+ WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */
+};
+#define WOL_REGS(port, x) (x + (port)*0x80)
+
+enum {
+ WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */
+ WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */
+};
+#define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400)
+
+enum {
+ BASE_XMAC_1 = 0x2000,/* XMAC 1 registers */
+ BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */
+ BASE_XMAC_2 = 0x3000,/* XMAC 2 registers */
+ BASE_GMAC_2 = 0x3800,/* GMAC 2 registers */
+};
+
+/*
+ * Receive Frame Status Encoding
+ */
+enum {
+ XMR_FS_LEN = 0x3fff<<18, /* Bit 31..18: Rx Frame Length */
+ XMR_FS_LEN_SHIFT = 18,
+ XMR_FS_2L_VLAN = 1<<17, /* Bit 17: tagged wh 2Lev VLAN ID*/
+ XMR_FS_1_VLAN = 1<<16, /* Bit 16: tagged wh 1ev VLAN ID*/
+ XMR_FS_BC = 1<<15, /* Bit 15: Broadcast Frame */
+ XMR_FS_MC = 1<<14, /* Bit 14: Multicast Frame */
+ XMR_FS_UC = 1<<13, /* Bit 13: Unicast Frame */
+
+ XMR_FS_BURST = 1<<11, /* Bit 11: Burst Mode */
+ XMR_FS_CEX_ERR = 1<<10, /* Bit 10: Carrier Ext. Error */
+ XMR_FS_802_3 = 1<<9, /* Bit 9: 802.3 Frame */
+ XMR_FS_COL_ERR = 1<<8, /* Bit 8: Collision Error */
+ XMR_FS_CAR_ERR = 1<<7, /* Bit 7: Carrier Event Error */
+ XMR_FS_LEN_ERR = 1<<6, /* Bit 6: In-Range Length Error */
+ XMR_FS_FRA_ERR = 1<<5, /* Bit 5: Framing Error */
+ XMR_FS_RUNT = 1<<4, /* Bit 4: Runt Frame */
+ XMR_FS_LNG_ERR = 1<<3, /* Bit 3: Giant (Jumbo) Frame */
+ XMR_FS_FCS_ERR = 1<<2, /* Bit 2: Frame Check Sequ Err */
+ XMR_FS_ERR = 1<<1, /* Bit 1: Frame Error */
+ XMR_FS_MCTRL = 1<<0, /* Bit 0: MAC Control Packet */
+
+/*
+ * XMR_FS_ERR will be set if
+ * XMR_FS_FCS_ERR, XMR_FS_LNG_ERR, XMR_FS_RUNT,
+ * XMR_FS_FRA_ERR, XMR_FS_LEN_ERR, or XMR_FS_CEX_ERR
+ * is set. XMR_FS_LNG_ERR and XMR_FS_LEN_ERR will issue
+ * XMR_FS_ERR unless the corresponding bit in the Receive Command
+ * Register is set.
+ */
+};
+
+/*
+,* XMAC-PHY Registers, indirect addressed over the XMAC
+ */
+enum {
+ PHY_XMAC_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
+ PHY_XMAC_STAT = 0x01,/* 16 bit r/w PHY Status Register */
+ PHY_XMAC_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
+ PHY_XMAC_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
+ PHY_XMAC_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
+ PHY_XMAC_AUNE_LP = 0x05,/* 16 bit r/o Link Partner Abi Reg */
+ PHY_XMAC_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
+ PHY_XMAC_NEPG = 0x07,/* 16 bit r/w Next Page Register */
+ PHY_XMAC_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
+
+ PHY_XMAC_EXT_STAT = 0x0f,/* 16 bit r/o Ext Status Register */
+ PHY_XMAC_RES_ABI = 0x10,/* 16 bit r/o PHY Resolved Ability */
+};
+/*
+ * Broadcom-PHY Registers, indirect addressed over XMAC
+ */
+enum {
+ PHY_BCOM_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
+ PHY_BCOM_STAT = 0x01,/* 16 bit r/o PHY Status Register */
+ PHY_BCOM_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
+ PHY_BCOM_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
+ PHY_BCOM_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
+ PHY_BCOM_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */
+ PHY_BCOM_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
+ PHY_BCOM_NEPG = 0x07,/* 16 bit r/w Next Page Register */
+ PHY_BCOM_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
+ /* Broadcom-specific registers */
+ PHY_BCOM_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
+ PHY_BCOM_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
+ PHY_BCOM_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */
+ PHY_BCOM_P_EXT_CTRL = 0x10,/* 16 bit r/w PHY Extended Ctrl Reg */
+ PHY_BCOM_P_EXT_STAT = 0x11,/* 16 bit r/o PHY Extended Stat Reg */
+ PHY_BCOM_RE_CTR = 0x12,/* 16 bit r/w Receive Error Counter */
+ PHY_BCOM_FC_CTR = 0x13,/* 16 bit r/w False Carrier Sense Cnt */
+ PHY_BCOM_RNO_CTR = 0x14,/* 16 bit r/w Receiver NOT_OK Cnt */
+
+ PHY_BCOM_AUX_CTRL = 0x18,/* 16 bit r/w Auxiliary Control Reg */
+ PHY_BCOM_AUX_STAT = 0x19,/* 16 bit r/o Auxiliary Stat Summary */
+ PHY_BCOM_INT_STAT = 0x1a,/* 16 bit r/o Interrupt Status Reg */
+ PHY_BCOM_INT_MASK = 0x1b,/* 16 bit r/w Interrupt Mask Reg */
+};
+
+/*
+ * Marvel-PHY Registers, indirect addressed over GMAC
+ */
+enum {
+ PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
+ PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */
+ PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
+ PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
+ PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
+ PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */
+ PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
+ PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */
+ PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
+ /* Marvel-specific registers */
+ PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
+ PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
+ PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */
+ PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */
+ PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */
+ PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */
+ PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */
+ PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */
+ PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */
+ PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */
+ PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */
+ PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */
+ PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */
+ PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */
+ PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */
+ PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */
+ PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */
+ PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+ PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */
+ PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */
+ PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */
+ PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */
+ PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */
+};
+
+enum {
+ PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */
+ PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */
+ PHY_CT_SPS_LSB = 1<<13, /* Bit 13: Speed select, lower bit */
+ PHY_CT_ANE = 1<<12, /* Bit 12: Auto-Negotiation Enabled */
+ PHY_CT_PDOWN = 1<<11, /* Bit 11: Power Down Mode */
+ PHY_CT_ISOL = 1<<10, /* Bit 10: Isolate Mode */
+ PHY_CT_RE_CFG = 1<<9, /* Bit 9: (sc) Restart Auto-Negotiation */
+ PHY_CT_DUP_MD = 1<<8, /* Bit 8: Duplex Mode */
+ PHY_CT_COL_TST = 1<<7, /* Bit 7: Collision Test enabled */
+ PHY_CT_SPS_MSB = 1<<6, /* Bit 6: Speed select, upper bit */
+};
+
+enum {
+ PHY_CT_SP1000 = PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */
+ PHY_CT_SP100 = PHY_CT_SPS_LSB, /* enable speed of 100 Mbps */
+ PHY_CT_SP10 = 0, /* enable speed of 10 Mbps */
+};
+
+enum {
+ PHY_ST_EXT_ST = 1<<8, /* Bit 8: Extended Status Present */
+
+ PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */
+ PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */
+ PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occurred */
+ PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */
+ PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */
+ PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */
+ PHY_ST_EXT_REG = 1<<0, /* Bit 0: Extended Register available */
+};
+
+enum {
+ PHY_I1_OUI_MSK = 0x3f<<10, /* Bit 15..10: Organization Unique ID */
+ PHY_I1_MOD_NUM = 0x3f<<4, /* Bit 9.. 4: Model Number */
+ PHY_I1_REV_MSK = 0xf, /* Bit 3.. 0: Revision Number */
+};
+
+/* different Broadcom PHY Ids */
+enum {
+ PHY_BCOM_ID1_A1 = 0x6041,
+ PHY_BCOM_ID1_B2 = 0x6043,
+ PHY_BCOM_ID1_C0 = 0x6044,
+ PHY_BCOM_ID1_C5 = 0x6047,
+};
+
+/* different Marvell PHY Ids */
+enum {
+ PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */
+ PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */
+ PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */
+ PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */
+ PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
+};
+
+/* Advertisement register bits */
+enum {
+ PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */
+ PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */
+ PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */
+
+ PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */
+ PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */
+ PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */
+ PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */
+ PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */
+ PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */
+ PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */
+ PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */
+ PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
+ PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA,
+ PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL |
+ PHY_AN_100HALF | PHY_AN_100FULL,
+};
+
+/* Xmac Specific */
+enum {
+ PHY_X_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */
+ PHY_X_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */
+ PHY_X_AN_RFB = 3<<12,/* Bit 13..12: Remote Fault Bits */
+
+ PHY_X_AN_PAUSE = 3<<7,/* Bit 8.. 7: Pause Bits */
+ PHY_X_AN_HD = 1<<6, /* Bit 6: Half Duplex */
+ PHY_X_AN_FD = 1<<5, /* Bit 5: Full Duplex */
+};
+
+/* Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding */
+enum {
+ PHY_X_P_NO_PAUSE= 0<<7,/* Bit 8..7: no Pause Mode */
+ PHY_X_P_SYM_MD = 1<<7, /* Bit 8..7: symmetric Pause Mode */
+ PHY_X_P_ASYM_MD = 2<<7,/* Bit 8..7: asymmetric Pause Mode */
+ PHY_X_P_BOTH_MD = 3<<7,/* Bit 8..7: both Pause Mode */
+};
+
+
+/***** PHY_XMAC_EXT_STAT 16 bit r/w Extended Status Register *****/
+enum {
+ PHY_X_EX_FD = 1<<15, /* Bit 15: Device Supports Full Duplex */
+ PHY_X_EX_HD = 1<<14, /* Bit 14: Device Supports Half Duplex */
+};
+
+/***** PHY_XMAC_RES_ABI 16 bit r/o PHY Resolved Ability *****/
+enum {
+ PHY_X_RS_PAUSE = 3<<7, /* Bit 8..7: selected Pause Mode */
+ PHY_X_RS_HD = 1<<6, /* Bit 6: Half Duplex Mode selected */
+ PHY_X_RS_FD = 1<<5, /* Bit 5: Full Duplex Mode selected */
+ PHY_X_RS_ABLMIS = 1<<4, /* Bit 4: duplex or pause cap mismatch */
+ PHY_X_RS_PAUMIS = 1<<3, /* Bit 3: pause capability mismatch */
+};
+
+/* Remote Fault Bits (PHY_X_AN_RFB) encoding */
+enum {
+ X_RFB_OK = 0<<12,/* Bit 13..12 No errors, Link OK */
+ X_RFB_LF = 1<<12,/* Bit 13..12 Link Failure */
+ X_RFB_OFF = 2<<12,/* Bit 13..12 Offline */
+ X_RFB_AN_ERR = 3<<12,/* Bit 13..12 Auto-Negotiation Error */
+};
+
+/* Broadcom-Specific */
+/***** PHY_BCOM_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
+enum {
+ PHY_B_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */
+ PHY_B_1000C_MSE = 1<<12, /* Bit 12: Master/Slave Enable */
+ PHY_B_1000C_MSC = 1<<11, /* Bit 11: M/S Configuration */
+ PHY_B_1000C_RD = 1<<10, /* Bit 10: Repeater/DTE */
+ PHY_B_1000C_AFD = 1<<9, /* Bit 9: Advertise Full Duplex */
+ PHY_B_1000C_AHD = 1<<8, /* Bit 8: Advertise Half Duplex */
+};
+
+/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
+/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
+enum {
+ PHY_B_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */
+ PHY_B_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */
+ PHY_B_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */
+ PHY_B_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */
+ PHY_B_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */
+ PHY_B_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */
+ /* Bit 9..8: reserved */
+ PHY_B_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */
+};
+
+/***** PHY_BCOM_EXT_STAT 16 bit r/o Extended Status Register *****/
+enum {
+ PHY_B_ES_X_FD_CAP = 1<<15, /* Bit 15: 1000Base-X FD capable */
+ PHY_B_ES_X_HD_CAP = 1<<14, /* Bit 14: 1000Base-X HD capable */
+ PHY_B_ES_T_FD_CAP = 1<<13, /* Bit 13: 1000Base-T FD capable */
+ PHY_B_ES_T_HD_CAP = 1<<12, /* Bit 12: 1000Base-T HD capable */
+};
+
+/***** PHY_BCOM_P_EXT_CTRL 16 bit r/w PHY Extended Control Reg *****/
+enum {
+ PHY_B_PEC_MAC_PHY = 1<<15, /* Bit 15: 10BIT/GMI-Interface */
+ PHY_B_PEC_DIS_CROSS = 1<<14, /* Bit 14: Disable MDI Crossover */
+ PHY_B_PEC_TX_DIS = 1<<13, /* Bit 13: Tx output Disabled */
+ PHY_B_PEC_INT_DIS = 1<<12, /* Bit 12: Interrupts Disabled */
+ PHY_B_PEC_F_INT = 1<<11, /* Bit 11: Force Interrupt */
+ PHY_B_PEC_BY_45 = 1<<10, /* Bit 10: Bypass 4B5B-Decoder */
+ PHY_B_PEC_BY_SCR = 1<<9, /* Bit 9: Bypass Scrambler */
+ PHY_B_PEC_BY_MLT3 = 1<<8, /* Bit 8: Bypass MLT3 Encoder */
+ PHY_B_PEC_BY_RXA = 1<<7, /* Bit 7: Bypass Rx Alignm. */
+ PHY_B_PEC_RES_SCR = 1<<6, /* Bit 6: Reset Scrambler */
+ PHY_B_PEC_EN_LTR = 1<<5, /* Bit 5: Ena LED Traffic Mode */
+ PHY_B_PEC_LED_ON = 1<<4, /* Bit 4: Force LED's on */
+ PHY_B_PEC_LED_OFF = 1<<3, /* Bit 3: Force LED's off */
+ PHY_B_PEC_EX_IPG = 1<<2, /* Bit 2: Extend Tx IPG Mode */
+ PHY_B_PEC_3_LED = 1<<1, /* Bit 1: Three Link LED mode */
+ PHY_B_PEC_HIGH_LA = 1<<0, /* Bit 0: GMII FIFO Elasticy */
+};
+
+/***** PHY_BCOM_P_EXT_STAT 16 bit r/o PHY Extended Status Reg *****/
+enum {
+ PHY_B_PES_CROSS_STAT = 1<<13, /* Bit 13: MDI Crossover Status */
+ PHY_B_PES_INT_STAT = 1<<12, /* Bit 12: Interrupt Status */
+ PHY_B_PES_RRS = 1<<11, /* Bit 11: Remote Receiver Stat. */
+ PHY_B_PES_LRS = 1<<10, /* Bit 10: Local Receiver Stat. */
+ PHY_B_PES_LOCKED = 1<<9, /* Bit 9: Locked */
+ PHY_B_PES_LS = 1<<8, /* Bit 8: Link Status */
+ PHY_B_PES_RF = 1<<7, /* Bit 7: Remote Fault */
+ PHY_B_PES_CE_ER = 1<<6, /* Bit 6: Carrier Ext Error */
+ PHY_B_PES_BAD_SSD = 1<<5, /* Bit 5: Bad SSD */
+ PHY_B_PES_BAD_ESD = 1<<4, /* Bit 4: Bad ESD */
+ PHY_B_PES_RX_ER = 1<<3, /* Bit 3: Receive Error */
+ PHY_B_PES_TX_ER = 1<<2, /* Bit 2: Transmit Error */
+ PHY_B_PES_LOCK_ER = 1<<1, /* Bit 1: Lock Error */
+ PHY_B_PES_MLT3_ER = 1<<0, /* Bit 0: MLT3 code Error */
+};
+
+/* PHY_BCOM_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/
+/* PHY_BCOM_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/
+enum {
+ PHY_B_AN_RF = 1<<13, /* Bit 13: Remote Fault */
+
+ PHY_B_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */
+ PHY_B_AN_PC = 1<<10, /* Bit 10: Pause Capable */
+};
+
+
+/***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/
+enum {
+ PHY_B_FC_CTR = 0xff, /* Bit 7..0: False Carrier Counter */
+
+/***** PHY_BCOM_RNO_CTR 16 bit r/w Receive NOT_OK Counter *****/
+ PHY_B_RC_LOC_MSK = 0xff00, /* Bit 15..8: Local Rx NOT_OK cnt */
+ PHY_B_RC_REM_MSK = 0x00ff, /* Bit 7..0: Remote Rx NOT_OK cnt */
+
+/***** PHY_BCOM_AUX_CTRL 16 bit r/w Auxiliary Control Reg *****/
+ PHY_B_AC_L_SQE = 1<<15, /* Bit 15: Low Squelch */
+ PHY_B_AC_LONG_PACK = 1<<14, /* Bit 14: Rx Long Packets */
+ PHY_B_AC_ER_CTRL = 3<<12,/* Bit 13..12: Edgerate Control */
+ /* Bit 11: reserved */
+ PHY_B_AC_TX_TST = 1<<10, /* Bit 10: Tx test bit, always 1 */
+ /* Bit 9.. 8: reserved */
+ PHY_B_AC_DIS_PRF = 1<<7, /* Bit 7: dis part resp filter */
+ /* Bit 6: reserved */
+ PHY_B_AC_DIS_PM = 1<<5, /* Bit 5: dis power management */
+ /* Bit 4: reserved */
+ PHY_B_AC_DIAG = 1<<3, /* Bit 3: Diagnostic Mode */
+};
+
+/***** PHY_BCOM_AUX_STAT 16 bit r/o Auxiliary Status Reg *****/
+enum {
+ PHY_B_AS_AN_C = 1<<15, /* Bit 15: AutoNeg complete */
+ PHY_B_AS_AN_CA = 1<<14, /* Bit 14: AN Complete Ack */
+ PHY_B_AS_ANACK_D = 1<<13, /* Bit 13: AN Ack Detect */
+ PHY_B_AS_ANAB_D = 1<<12, /* Bit 12: AN Ability Detect */
+ PHY_B_AS_NPW = 1<<11, /* Bit 11: AN Next Page Wait */
+ PHY_B_AS_AN_RES_MSK = 7<<8,/* Bit 10..8: AN HDC */
+ PHY_B_AS_PDF = 1<<7, /* Bit 7: Parallel Detect. Fault */
+ PHY_B_AS_RF = 1<<6, /* Bit 6: Remote Fault */
+ PHY_B_AS_ANP_R = 1<<5, /* Bit 5: AN Page Received */
+ PHY_B_AS_LP_ANAB = 1<<4, /* Bit 4: LP AN Ability */
+ PHY_B_AS_LP_NPAB = 1<<3, /* Bit 3: LP Next Page Ability */
+ PHY_B_AS_LS = 1<<2, /* Bit 2: Link Status */
+ PHY_B_AS_PRR = 1<<1, /* Bit 1: Pause Resolution-Rx */
+ PHY_B_AS_PRT = 1<<0, /* Bit 0: Pause Resolution-Tx */
+};
+#define PHY_B_AS_PAUSE_MSK (PHY_B_AS_PRR | PHY_B_AS_PRT)
+
+/***** PHY_BCOM_INT_STAT 16 bit r/o Interrupt Status Reg *****/
+/***** PHY_BCOM_INT_MASK 16 bit r/w Interrupt Mask Reg *****/
+enum {
+ PHY_B_IS_PSE = 1<<14, /* Bit 14: Pair Swap Error */
+ PHY_B_IS_MDXI_SC = 1<<13, /* Bit 13: MDIX Status Change */
+ PHY_B_IS_HCT = 1<<12, /* Bit 12: counter above 32k */
+ PHY_B_IS_LCT = 1<<11, /* Bit 11: counter above 128 */
+ PHY_B_IS_AN_PR = 1<<10, /* Bit 10: Page Received */
+ PHY_B_IS_NO_HDCL = 1<<9, /* Bit 9: No HCD Link */
+ PHY_B_IS_NO_HDC = 1<<8, /* Bit 8: No HCD */
+ PHY_B_IS_NEG_USHDC = 1<<7, /* Bit 7: Negotiated Unsup. HCD */
+ PHY_B_IS_SCR_S_ER = 1<<6, /* Bit 6: Scrambler Sync Error */
+ PHY_B_IS_RRS_CHANGE = 1<<5, /* Bit 5: Remote Rx Stat Change */
+ PHY_B_IS_LRS_CHANGE = 1<<4, /* Bit 4: Local Rx Stat Change */
+ PHY_B_IS_DUP_CHANGE = 1<<3, /* Bit 3: Duplex Mode Change */
+ PHY_B_IS_LSP_CHANGE = 1<<2, /* Bit 2: Link Speed Change */
+ PHY_B_IS_LST_CHANGE = 1<<1, /* Bit 1: Link Status Changed */
+ PHY_B_IS_CRC_ER = 1<<0, /* Bit 0: CRC Error */
+};
+#define PHY_B_DEF_MSK \
+ (~(PHY_B_IS_PSE | PHY_B_IS_AN_PR | PHY_B_IS_DUP_CHANGE | \
+ PHY_B_IS_LSP_CHANGE | PHY_B_IS_LST_CHANGE))
+
+/* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */
+enum {
+ PHY_B_P_NO_PAUSE = 0<<10,/* Bit 11..10: no Pause Mode */
+ PHY_B_P_SYM_MD = 1<<10, /* Bit 11..10: symmetric Pause Mode */
+ PHY_B_P_ASYM_MD = 2<<10,/* Bit 11..10: asymmetric Pause Mode */
+ PHY_B_P_BOTH_MD = 3<<10,/* Bit 11..10: both Pause Mode */
+};
+/*
+ * Resolved Duplex mode and Capabilities (Aux Status Summary Reg)
+ */
+enum {
+ PHY_B_RES_1000FD = 7<<8,/* Bit 10..8: 1000Base-T Full Dup. */
+ PHY_B_RES_1000HD = 6<<8,/* Bit 10..8: 1000Base-T Half Dup. */
+};
+
+/** Marvell-Specific */
+enum {
+ PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */
+ PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */
+ PHY_M_AN_RF = 1<<13, /* Remote Fault */
+
+ PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */
+ PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */
+ PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */
+ PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */
+ PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */
+ PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */
+ PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */
+ PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */
+};
+
+/* special defines for FIBER (88E1011S only) */
+enum {
+ PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */
+ PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */
+ PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */
+ PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */
+};
+
+/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */
+enum {
+ PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */
+ PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */
+ PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */
+ PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */
+};
+
+/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
+enum {
+ PHY_M_1000C_TEST= 7<<13,/* Bit 15..13: Test Modes */
+ PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */
+ PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */
+ PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */
+ PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */
+ PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */
+};
+
+/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/
+enum {
+ PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */
+ PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */
+ PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */
+ PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */
+ PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */
+ PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */
+ PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */
+ PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */
+ PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */
+ PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */
+ PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */
+ PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */
+};
+
+enum {
+ PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */
+ PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */
+};
+
+enum {
+ PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */
+ PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */
+ PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */
+};
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+enum {
+ PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */
+ PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */
+ PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */
+ PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */
+ PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */
+
+ PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */
+ PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */
+
+ PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */
+ PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */
+};
+
+/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/
+enum {
+ PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */
+ PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */
+ PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */
+ PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */
+ PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */
+ PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */
+ PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */
+ PHY_M_PS_LINK_UP = 1<<10, /* Link Up */
+ PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */
+ PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */
+ PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */
+ PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */
+ PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */
+ PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */
+ PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */
+ PHY_M_PS_JABBER = 1<<0, /* Jabber */
+};
+
+#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN)
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+enum {
+ PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */
+ PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */
+};
+
+enum {
+ PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */
+ PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */
+ PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */
+ PHY_M_IS_AN_PR = 1<<12, /* Page Received */
+ PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */
+ PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */
+ PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */
+ PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */
+ PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */
+ PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */
+ PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */
+ PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */
+
+ PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */
+ PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */
+ PHY_M_IS_JABBER = 1<<0, /* Jabber */
+
+ PHY_M_IS_DEF_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_LSP_CHANGE |
+ PHY_M_IS_LST_CHANGE | PHY_M_IS_FIFO_ERROR,
+
+ PHY_M_IS_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL,
+};
+
+/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/
+enum {
+ PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */
+ PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */
+
+ PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */
+ PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */
+ /* (88E1011 only) */
+ PHY_M_EC_S_DSC_MSK = 3<<8, /* Bit 9.. 8: Slave Downshift Counter */
+ /* (88E1011 only) */
+ PHY_M_EC_M_DSC_MSK2 = 7<<9, /* Bit 11.. 9: Master Downshift Counter */
+ /* (88E1111 only) */
+ PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */
+ /* !!! Errata in spec. (1 = disable) */
+ PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/
+ PHY_M_EC_MAC_S_MSK = 7<<4, /* Bit 6.. 4: Def. MAC interface speed */
+ PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */
+ PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */
+ PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */
+ PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */};
+
+#define PHY_M_EC_M_DSC(x) ((u16)(x)<<10) /* 00=1x; 01=2x; 10=3x; 11=4x */
+#define PHY_M_EC_S_DSC(x) ((u16)(x)<<8) /* 00=dis; 01=1x; 10=2x; 11=3x */
+#define PHY_M_EC_MAC_S(x) ((u16)(x)<<4) /* 01X=0; 110=2.5; 111=25 (MHz) */
+
+#define PHY_M_EC_M_DSC_2(x) ((u16)(x)<<9) /* 000=1x; 001=2x; 010=3x; 011=4x */
+ /* 100=5x; 101=6x; 110=7x; 111=8x */
+enum {
+ MAC_TX_CLK_0_MHZ = 2,
+ MAC_TX_CLK_2_5_MHZ = 6,
+ MAC_TX_CLK_25_MHZ = 7,
+};
+
+/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/
+enum {
+ PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */
+ PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */
+ PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */
+ PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */
+ PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */
+ PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */
+ PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */
+ /* (88E1111 only) */
+};
+#define PHY_M_LED_PULS_DUR(x) (((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK)
+#define PHY_M_LED_BLINK_RT(x) (((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK)
+
+enum {
+ PHY_M_LEDC_LINK_MSK = 3<<3, /* Bit 4.. 3: Link Control Mask */
+ /* (88E1011 only) */
+ PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */
+ PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */
+ PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */
+ PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */
+ PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */
+};
+
+enum {
+ PULS_NO_STR = 0, /* no pulse stretching */
+ PULS_21MS = 1, /* 21 ms to 42 ms */
+ PULS_42MS = 2, /* 42 ms to 84 ms */
+ PULS_84MS = 3, /* 84 ms to 170 ms */
+ PULS_170MS = 4, /* 170 ms to 340 ms */
+ PULS_340MS = 5, /* 340 ms to 670 ms */
+ PULS_670MS = 6, /* 670 ms to 1.3 s */
+ PULS_1300MS = 7, /* 1.3 s to 2.7 s */
+};
+
+
+enum {
+ BLINK_42MS = 0, /* 42 ms */
+ BLINK_84MS = 1, /* 84 ms */
+ BLINK_170MS = 2, /* 170 ms */
+ BLINK_340MS = 3, /* 340 ms */
+ BLINK_670MS = 4, /* 670 ms */
+};
+
+/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/
+#define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */
+ /* Bit 13..12: reserved */
+#define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */
+#define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */
+#define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */
+#define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */
+#define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */
+#define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */
+
+enum {
+ MO_LED_NORM = 0,
+ MO_LED_BLINK = 1,
+ MO_LED_OFF = 2,
+ MO_LED_ON = 3,
+};
+
+/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/
+enum {
+ PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */
+ PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */
+ PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */
+ PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */
+ PHY_M_EC2_FO_AM_MSK = 7, /* Bit 2.. 0: Fiber Output Amplitude */
+};
+
+/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/
+enum {
+ PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */
+ PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */
+ PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */
+ PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */
+ PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */
+ PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */
+ PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */
+ /* (88E1111 only) */
+ /* Bit 9.. 4: reserved (88E1011 only) */
+ PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */
+ PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */
+ PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */
+};
+
+/***** PHY_MARV_CABLE_DIAG 16 bit r/o Cable Diagnostic Reg *****/
+enum {
+ PHY_M_CABD_ENA_TEST = 1<<15, /* Enable Test (Page 0) */
+ PHY_M_CABD_DIS_WAIT = 1<<15, /* Disable Waiting Period (Page 1) */
+ /* (88E1111 only) */
+ PHY_M_CABD_STAT_MSK = 3<<13, /* Bit 14..13: Status Mask */
+ PHY_M_CABD_AMPL_MSK = 0x1f<<8, /* Bit 12.. 8: Amplitude Mask */
+ /* (88E1111 only) */
+ PHY_M_CABD_DIST_MSK = 0xff, /* Bit 7.. 0: Distance Mask */
+};
+
+/* values for Cable Diagnostic Status (11=fail; 00=OK; 10=open; 01=short) */
+enum {
+ CABD_STAT_NORMAL= 0,
+ CABD_STAT_SHORT = 1,
+ CABD_STAT_OPEN = 2,
+ CABD_STAT_FAIL = 3,
+};
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+/***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/
+ /* Bit 15..12: reserved (used internally) */
+enum {
+ PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */
+ PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */
+ PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */
+};
+
+#define PHY_M_FELP_LED2_CTRL(x) (((x)<<8) & PHY_M_FELP_LED2_MSK)
+#define PHY_M_FELP_LED1_CTRL(x) (((x)<<4) & PHY_M_FELP_LED1_MSK)
+#define PHY_M_FELP_LED0_CTRL(x) (((x)<<0) & PHY_M_FELP_LED0_MSK)
+
+enum {
+ LED_PAR_CTRL_COLX = 0x00,
+ LED_PAR_CTRL_ERROR = 0x01,
+ LED_PAR_CTRL_DUPLEX = 0x02,
+ LED_PAR_CTRL_DP_COL = 0x03,
+ LED_PAR_CTRL_SPEED = 0x04,
+ LED_PAR_CTRL_LINK = 0x05,
+ LED_PAR_CTRL_TX = 0x06,
+ LED_PAR_CTRL_RX = 0x07,
+ LED_PAR_CTRL_ACT = 0x08,
+ LED_PAR_CTRL_LNK_RX = 0x09,
+ LED_PAR_CTRL_LNK_AC = 0x0a,
+ LED_PAR_CTRL_ACT_BL = 0x0b,
+ LED_PAR_CTRL_TX_BL = 0x0c,
+ LED_PAR_CTRL_RX_BL = 0x0d,
+ LED_PAR_CTRL_COL_BL = 0x0e,
+ LED_PAR_CTRL_INACT = 0x0f
+};
+
+/*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/
+enum {
+ PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */
+ PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */
+ PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */
+};
+
+
+/***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/
+enum {
+ PHY_M_LEDC_LOS_MSK = 0xf<<12, /* Bit 15..12: LOS LED Ctrl. Mask */
+ PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */
+ PHY_M_LEDC_STA1_MSK = 0xf<<4, /* Bit 7.. 4: STAT1 LED Ctrl. Mask */
+ PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */
+};
+
+#define PHY_M_LEDC_LOS_CTRL(x) (((x)<<12) & PHY_M_LEDC_LOS_MSK)
+#define PHY_M_LEDC_INIT_CTRL(x) (((x)<<8) & PHY_M_LEDC_INIT_MSK)
+#define PHY_M_LEDC_STA1_CTRL(x) (((x)<<4) & PHY_M_LEDC_STA1_MSK)
+#define PHY_M_LEDC_STA0_CTRL(x) (((x)<<0) & PHY_M_LEDC_STA0_MSK)
+
+/* GMAC registers */
+/* Port Registers */
+enum {
+ GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */
+ GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */
+ GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */
+ GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */
+ GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */
+ GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */
+ GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */
+/* Source Address Registers */
+ GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */
+ GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */
+ GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */
+ GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */
+ GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */
+ GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */
+
+/* Multicast Address Hash Registers */
+ GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */
+ GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */
+ GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */
+ GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */
+
+/* Interrupt Source Registers */
+ GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */
+ GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */
+ GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */
+
+/* Interrupt Mask Registers */
+ GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */
+ GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */
+ GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */
+
+/* Serial Management Interface (SMI) Registers */
+ GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */
+ GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */
+ GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */
+};
+
+/* MIB Counters */
+#define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */
+#define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */
+
+/*
+ * MIB Counters base address definitions (low word) -
+ * use offset 4 for access to high word (32 bit r/o)
+ */
+enum {
+ GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */
+ GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */
+ GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */
+ GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */
+ GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */
+ /* GM_MIB_CNT_BASE + 40: reserved */
+ GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */
+ GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */
+ GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */
+ GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */
+ GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */
+ GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */
+ GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */
+ GM_RXF_127B = GM_MIB_CNT_BASE + 104, /* 65-127 Byte Rx Frame */
+ GM_RXF_255B = GM_MIB_CNT_BASE + 112, /* 128-255 Byte Rx Frame */
+ GM_RXF_511B = GM_MIB_CNT_BASE + 120, /* 256-511 Byte Rx Frame */
+ GM_RXF_1023B = GM_MIB_CNT_BASE + 128, /* 512-1023 Byte Rx Frame */
+ GM_RXF_1518B = GM_MIB_CNT_BASE + 136, /* 1024-1518 Byte Rx Frame */
+ GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144, /* 1519-MaxSize Byte Rx Frame */
+ GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152, /* Rx Frame too Long Error */
+ GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160, /* Rx Jabber Packet Frame */
+ /* GM_MIB_CNT_BASE + 168: reserved */
+ GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176, /* Rx FIFO overflow Event */
+ /* GM_MIB_CNT_BASE + 184: reserved */
+ GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192, /* Unicast Frames Xmitted OK */
+ GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200, /* Broadcast Frames Xmitted OK */
+ GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208, /* Pause MAC Ctrl Frames Xmitted */
+ GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216, /* Multicast Frames Xmitted OK */
+ GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224, /* Octets Transmitted OK Low */
+ GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232, /* Octets Transmitted OK High */
+ GM_TXF_64B = GM_MIB_CNT_BASE + 240, /* 64 Byte Tx Frame */
+ GM_TXF_127B = GM_MIB_CNT_BASE + 248, /* 65-127 Byte Tx Frame */
+ GM_TXF_255B = GM_MIB_CNT_BASE + 256, /* 128-255 Byte Tx Frame */
+ GM_TXF_511B = GM_MIB_CNT_BASE + 264, /* 256-511 Byte Tx Frame */
+ GM_TXF_1023B = GM_MIB_CNT_BASE + 272, /* 512-1023 Byte Tx Frame */
+ GM_TXF_1518B = GM_MIB_CNT_BASE + 280, /* 1024-1518 Byte Tx Frame */
+ GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288, /* 1519-MaxSize Byte Tx Frame */
+
+ GM_TXF_COL = GM_MIB_CNT_BASE + 304, /* Tx Collision */
+ GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312, /* Tx Late Collision */
+ GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320, /* Tx aborted due to Exces. Col. */
+ GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328, /* Tx Multiple Collision */
+ GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336, /* Tx Single Collision */
+ GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344, /* Tx FIFO Underrun Event */
+};
+
+/* GMAC Bit Definitions */
+/* GM_GP_STAT 16 bit r/o General Purpose Status Register */
+enum {
+ GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */
+ GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */
+ GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */
+ GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */
+ GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */
+ GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */
+ GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occurred */
+ GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occurred */
+
+ GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */
+ GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */
+ GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */
+ GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */
+ GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */
+};
+
+/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */
+enum {
+ GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */
+ GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */
+ GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */
+ GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */
+ GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */
+ GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */
+ GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */
+ GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */
+ GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */
+ GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */
+ GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */
+ GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */
+ GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */
+ GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */
+ GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */
+};
+
+#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100)
+#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS)
+
+/* GM_TX_CTRL 16 bit r/w Transmit Control Register */
+enum {
+ GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */
+ GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */
+ GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */
+ GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */
+};
+
+#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK)
+#define TX_COL_DEF 0x04 /* late collision after 64 byte */
+
+/* GM_RX_CTRL 16 bit r/w Receive Control Register */
+enum {
+ GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */
+ GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */
+ GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */
+ GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */
+};
+
+/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */
+enum {
+ GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */
+ GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */
+ GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */
+
+ TX_JAM_LEN_DEF = 0x03,
+ TX_JAM_IPG_DEF = 0x0b,
+ TX_IPG_JAM_DEF = 0x1c,
+};
+
+#define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK)
+#define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK)
+#define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK)
+
+
+/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */
+enum {
+ GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */
+ GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */
+ GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */
+ GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */
+ GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
+};
+
+#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK)
+#define DATA_BLIND_DEF 0x04
+
+#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK)
+#define IPG_DATA_DEF 0x1e
+
+/* GM_SMI_CTRL 16 bit r/w SMI Control Register */
+enum {
+ GM_SMI_CT_PHY_A_MSK = 0x1f<<11, /* Bit 15..11: PHY Device Address */
+ GM_SMI_CT_REG_A_MSK = 0x1f<<6, /* Bit 10.. 6: PHY Register Address */
+ GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/
+ GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */
+ GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */
+};
+
+#define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK)
+#define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK)
+
+/* GM_PHY_ADDR 16 bit r/w GPHY Address Register */
+enum {
+ GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */
+ GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */
+};
+
+/* Receive Frame Status Encoding */
+enum {
+ GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */
+ GMR_FS_LEN_SHIFT = 16,
+ GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */
+ GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */
+ GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */
+ GMR_FS_MC = 1<<10, /* Bit 10: Multicast Packet */
+ GMR_FS_BC = 1<<9, /* Bit 9: Broadcast Packet */
+ GMR_FS_RX_OK = 1<<8, /* Bit 8: Receive OK (Good Packet) */
+ GMR_FS_GOOD_FC = 1<<7, /* Bit 7: Good Flow-Control Packet */
+ GMR_FS_BAD_FC = 1<<6, /* Bit 6: Bad Flow-Control Packet */
+ GMR_FS_MII_ERR = 1<<5, /* Bit 5: MII Error */
+ GMR_FS_LONG_ERR = 1<<4, /* Bit 4: Too Long Packet */
+ GMR_FS_FRAGMENT = 1<<3, /* Bit 3: Fragment */
+
+ GMR_FS_CRC_ERR = 1<<1, /* Bit 1: CRC Error */
+ GMR_FS_RX_FF_OV = 1<<0, /* Bit 0: Rx FIFO Overflow */
+
+/*
+ * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR)
+ */
+ GMR_FS_ANY_ERR = GMR_FS_CRC_ERR | GMR_FS_LONG_ERR |
+ GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC |
+ GMR_FS_JABBER,
+/* Rx GMAC FIFO Flush Mask (default) */
+ RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR |
+ GMR_FS_BAD_FC | GMR_FS_UN_SIZE | GMR_FS_JABBER,
+};
+
+/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
+enum {
+ GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */
+ GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */
+ GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */
+
+ GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */
+ GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */
+ GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */
+ GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */
+ GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */
+ GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */
+ GMF_CLI_RX_FC = 1<<4, /* Clear IRQ Rx Frame Complete */
+ GMF_OPER_ON = 1<<3, /* Operational Mode On */
+ GMF_OPER_OFF = 1<<2, /* Operational Mode Off */
+ GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */
+ GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */
+
+ RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */
+};
+
+
+/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */
+enum {
+ GMF_WSP_TST_ON = 1<<18, /* Write Shadow Pointer Test On */
+ GMF_WSP_TST_OFF = 1<<17, /* Write Shadow Pointer Test Off */
+ GMF_WSP_STEP = 1<<16, /* Write Shadow Pointer Step/Increment */
+
+ GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */
+ GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */
+ GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */
+};
+
+/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */
+enum {
+ GMT_ST_START = 1<<2, /* Start Time Stamp Timer */
+ GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */
+ GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */
+};
+
+/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */
+enum {
+ GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */
+ GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */
+ GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */
+ GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */
+ GMC_PAUSE_ON = 1<<3, /* Pause On */
+ GMC_PAUSE_OFF = 1<<2, /* Pause Off */
+ GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */
+ GMC_RST_SET = 1<<0, /* Set GMAC Reset */
+};
+
+/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */
+enum {
+ GPC_SEL_BDT = 1<<28, /* Select Bi-Dir. Transfer for MDC/MDIO */
+ GPC_INT_POL_HI = 1<<27, /* IRQ Polarity is Active HIGH */
+ GPC_75_OHM = 1<<26, /* Use 75 Ohm Termination instead of 50 */
+ GPC_DIS_FC = 1<<25, /* Disable Automatic Fiber/Copper Detection */
+ GPC_DIS_SLEEP = 1<<24, /* Disable Energy Detect */
+ GPC_HWCFG_M_3 = 1<<23, /* HWCFG_MODE[3] */
+ GPC_HWCFG_M_2 = 1<<22, /* HWCFG_MODE[2] */
+ GPC_HWCFG_M_1 = 1<<21, /* HWCFG_MODE[1] */
+ GPC_HWCFG_M_0 = 1<<20, /* HWCFG_MODE[0] */
+ GPC_ANEG_0 = 1<<19, /* ANEG[0] */
+ GPC_ENA_XC = 1<<18, /* Enable MDI crossover */
+ GPC_DIS_125 = 1<<17, /* Disable 125 MHz clock */
+ GPC_ANEG_3 = 1<<16, /* ANEG[3] */
+ GPC_ANEG_2 = 1<<15, /* ANEG[2] */
+ GPC_ANEG_1 = 1<<14, /* ANEG[1] */
+ GPC_ENA_PAUSE = 1<<13, /* Enable Pause (SYM_OR_REM) */
+ GPC_PHYADDR_4 = 1<<12, /* Bit 4 of Phy Addr */
+ GPC_PHYADDR_3 = 1<<11, /* Bit 3 of Phy Addr */
+ GPC_PHYADDR_2 = 1<<10, /* Bit 2 of Phy Addr */
+ GPC_PHYADDR_1 = 1<<9, /* Bit 1 of Phy Addr */
+ GPC_PHYADDR_0 = 1<<8, /* Bit 0 of Phy Addr */
+ /* Bits 7..2: reserved */
+ GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */
+ GPC_RST_SET = 1<<0, /* Set GPHY Reset */
+};
+
+#define GPC_HWCFG_GMII_COP (GPC_HWCFG_M_3|GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0)
+#define GPC_HWCFG_GMII_FIB (GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0)
+#define GPC_ANEG_ADV_ALL_M (GPC_ANEG_3 | GPC_ANEG_2 | GPC_ANEG_1 | GPC_ANEG_0)
+
+/* forced speed and duplex mode (don't mix with other ANEG bits) */
+#define GPC_FRC10MBIT_HALF 0
+#define GPC_FRC10MBIT_FULL GPC_ANEG_0
+#define GPC_FRC100MBIT_HALF GPC_ANEG_1
+#define GPC_FRC100MBIT_FULL (GPC_ANEG_0 | GPC_ANEG_1)
+
+/* auto-negotiation with limited advertised speeds */
+/* mix only with master/slave settings (for copper) */
+#define GPC_ADV_1000_HALF GPC_ANEG_2
+#define GPC_ADV_1000_FULL GPC_ANEG_3
+#define GPC_ADV_ALL (GPC_ANEG_2 | GPC_ANEG_3)
+
+/* master/slave settings */
+/* only for copper with 1000 Mbps */
+#define GPC_FORCE_MASTER 0
+#define GPC_FORCE_SLAVE GPC_ANEG_0
+#define GPC_PREF_MASTER GPC_ANEG_1
+#define GPC_PREF_SLAVE (GPC_ANEG_1 | GPC_ANEG_0)
+
+/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */
+/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */
+enum {
+ GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */
+ GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */
+ GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */
+ GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */
+ GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
+ GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
+
+#define GMAC_DEF_MSK (GM_IS_RX_FF_OR | GM_IS_TX_FF_UR)
+
+/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
+ /* Bits 15.. 2: reserved */
+ GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */
+ GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */
+
+
+/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */
+ WOL_CTL_LINK_CHG_OCC = 1<<15,
+ WOL_CTL_MAGIC_PKT_OCC = 1<<14,
+ WOL_CTL_PATTERN_OCC = 1<<13,
+ WOL_CTL_CLEAR_RESULT = 1<<12,
+ WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11,
+ WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10,
+ WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9,
+ WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8,
+ WOL_CTL_ENA_PME_ON_PATTERN = 1<<7,
+ WOL_CTL_DIS_PME_ON_PATTERN = 1<<6,
+ WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5,
+ WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4,
+ WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3,
+ WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2,
+ WOL_CTL_ENA_PATTERN_UNIT = 1<<1,
+ WOL_CTL_DIS_PATTERN_UNIT = 1<<0,
+};
+
+#define WOL_CTL_DEFAULT \
+ (WOL_CTL_DIS_PME_ON_LINK_CHG | \
+ WOL_CTL_DIS_PME_ON_PATTERN | \
+ WOL_CTL_DIS_PME_ON_MAGIC_PKT | \
+ WOL_CTL_DIS_LINK_CHG_UNIT | \
+ WOL_CTL_DIS_PATTERN_UNIT | \
+ WOL_CTL_DIS_MAGIC_PKT_UNIT)
+
+/* WOL_MATCH_CTL 8 bit WOL Match Control Reg */
+#define WOL_CTL_PATT_ENA(x) (1 << (x))
+
+
+/* XMAC II registers */
+enum {
+ XM_MMU_CMD = 0x0000, /* 16 bit r/w MMU Command Register */
+ XM_POFF = 0x0008, /* 32 bit r/w Packet Offset Register */
+ XM_BURST = 0x000c, /* 32 bit r/w Burst Register for half duplex*/
+ XM_1L_VLAN_TAG = 0x0010, /* 16 bit r/w One Level VLAN Tag ID */
+ XM_2L_VLAN_TAG = 0x0014, /* 16 bit r/w Two Level VLAN Tag ID */
+ XM_TX_CMD = 0x0020, /* 16 bit r/w Transmit Command Register */
+ XM_TX_RT_LIM = 0x0024, /* 16 bit r/w Transmit Retry Limit Register */
+ XM_TX_STIME = 0x0028, /* 16 bit r/w Transmit Slottime Register */
+ XM_TX_IPG = 0x002c, /* 16 bit r/w Transmit Inter Packet Gap */
+ XM_RX_CMD = 0x0030, /* 16 bit r/w Receive Command Register */
+ XM_PHY_ADDR = 0x0034, /* 16 bit r/w PHY Address Register */
+ XM_PHY_DATA = 0x0038, /* 16 bit r/w PHY Data Register */
+ XM_GP_PORT = 0x0040, /* 32 bit r/w General Purpose Port Register */
+ XM_IMSK = 0x0044, /* 16 bit r/w Interrupt Mask Register */
+ XM_ISRC = 0x0048, /* 16 bit r/o Interrupt Status Register */
+ XM_HW_CFG = 0x004c, /* 16 bit r/w Hardware Config Register */
+ XM_TX_LO_WM = 0x0060, /* 16 bit r/w Tx FIFO Low Water Mark */
+ XM_TX_HI_WM = 0x0062, /* 16 bit r/w Tx FIFO High Water Mark */
+ XM_TX_THR = 0x0064, /* 16 bit r/w Tx Request Threshold */
+ XM_HT_THR = 0x0066, /* 16 bit r/w Host Request Threshold */
+ XM_PAUSE_DA = 0x0068, /* NA reg r/w Pause Destination Address */
+ XM_CTL_PARA = 0x0070, /* 32 bit r/w Control Parameter Register */
+ XM_MAC_OPCODE = 0x0074, /* 16 bit r/w Opcode for MAC control frames */
+ XM_MAC_PTIME = 0x0076, /* 16 bit r/w Pause time for MAC ctrl frames*/
+ XM_TX_STAT = 0x0078, /* 32 bit r/o Tx Status LIFO Register */
+
+ XM_EXM_START = 0x0080, /* r/w Start Address of the EXM Regs */
+#define XM_EXM(reg) (XM_EXM_START + ((reg) << 3))
+};
+
+enum {
+ XM_SRC_CHK = 0x0100, /* NA reg r/w Source Check Address Register */
+ XM_SA = 0x0108, /* NA reg r/w Station Address Register */
+ XM_HSM = 0x0110, /* 64 bit r/w Hash Match Address Registers */
+ XM_RX_LO_WM = 0x0118, /* 16 bit r/w Receive Low Water Mark */
+ XM_RX_HI_WM = 0x011a, /* 16 bit r/w Receive High Water Mark */
+ XM_RX_THR = 0x011c, /* 32 bit r/w Receive Request Threshold */
+ XM_DEV_ID = 0x0120, /* 32 bit r/o Device ID Register */
+ XM_MODE = 0x0124, /* 32 bit r/w Mode Register */
+ XM_LSA = 0x0128, /* NA reg r/o Last Source Register */
+ XM_TS_READ = 0x0130, /* 32 bit r/o Time Stamp Read Register */
+ XM_TS_LOAD = 0x0134, /* 32 bit r/o Time Stamp Load Value */
+ XM_STAT_CMD = 0x0200, /* 16 bit r/w Statistics Command Register */
+ XM_RX_CNT_EV = 0x0204, /* 32 bit r/o Rx Counter Event Register */
+ XM_TX_CNT_EV = 0x0208, /* 32 bit r/o Tx Counter Event Register */
+ XM_RX_EV_MSK = 0x020c, /* 32 bit r/w Rx Counter Event Mask */
+ XM_TX_EV_MSK = 0x0210, /* 32 bit r/w Tx Counter Event Mask */
+ XM_TXF_OK = 0x0280, /* 32 bit r/o Frames Transmitted OK Conuter */
+ XM_TXO_OK_HI = 0x0284, /* 32 bit r/o Octets Transmitted OK High Cnt*/
+ XM_TXO_OK_LO = 0x0288, /* 32 bit r/o Octets Transmitted OK Low Cnt */
+ XM_TXF_BC_OK = 0x028c, /* 32 bit r/o Broadcast Frames Xmitted OK */
+ XM_TXF_MC_OK = 0x0290, /* 32 bit r/o Multicast Frames Xmitted OK */
+ XM_TXF_UC_OK = 0x0294, /* 32 bit r/o Unicast Frames Xmitted OK */
+ XM_TXF_LONG = 0x0298, /* 32 bit r/o Tx Long Frame Counter */
+ XM_TXE_BURST = 0x029c, /* 32 bit r/o Tx Burst Event Counter */
+ XM_TXF_MPAUSE = 0x02a0, /* 32 bit r/o Tx Pause MAC Ctrl Frame Cnt */
+ XM_TXF_MCTRL = 0x02a4, /* 32 bit r/o Tx MAC Ctrl Frame Counter */
+ XM_TXF_SNG_COL = 0x02a8, /* 32 bit r/o Tx Single Collision Counter */
+ XM_TXF_MUL_COL = 0x02ac, /* 32 bit r/o Tx Multiple Collision Counter */
+ XM_TXF_ABO_COL = 0x02b0, /* 32 bit r/o Tx aborted due to Exces. Col. */
+ XM_TXF_LAT_COL = 0x02b4, /* 32 bit r/o Tx Late Collision Counter */
+ XM_TXF_DEF = 0x02b8, /* 32 bit r/o Tx Deferred Frame Counter */
+ XM_TXF_EX_DEF = 0x02bc, /* 32 bit r/o Tx Excessive Deferall Counter */
+ XM_TXE_FIFO_UR = 0x02c0, /* 32 bit r/o Tx FIFO Underrun Event Cnt */
+ XM_TXE_CS_ERR = 0x02c4, /* 32 bit r/o Tx Carrier Sense Error Cnt */
+ XM_TXP_UTIL = 0x02c8, /* 32 bit r/o Tx Utilization in % */
+ XM_TXF_64B = 0x02d0, /* 32 bit r/o 64 Byte Tx Frame Counter */
+ XM_TXF_127B = 0x02d4, /* 32 bit r/o 65-127 Byte Tx Frame Counter */
+ XM_TXF_255B = 0x02d8, /* 32 bit r/o 128-255 Byte Tx Frame Counter */
+ XM_TXF_511B = 0x02dc, /* 32 bit r/o 256-511 Byte Tx Frame Counter */
+ XM_TXF_1023B = 0x02e0, /* 32 bit r/o 512-1023 Byte Tx Frame Counter*/
+ XM_TXF_MAX_SZ = 0x02e4, /* 32 bit r/o 1024-MaxSize Byte Tx Frame Cnt*/
+ XM_RXF_OK = 0x0300, /* 32 bit r/o Frames Received OK */
+ XM_RXO_OK_HI = 0x0304, /* 32 bit r/o Octets Received OK High Cnt */
+ XM_RXO_OK_LO = 0x0308, /* 32 bit r/o Octets Received OK Low Counter*/
+ XM_RXF_BC_OK = 0x030c, /* 32 bit r/o Broadcast Frames Received OK */
+ XM_RXF_MC_OK = 0x0310, /* 32 bit r/o Multicast Frames Received OK */
+ XM_RXF_UC_OK = 0x0314, /* 32 bit r/o Unicast Frames Received OK */
+ XM_RXF_MPAUSE = 0x0318, /* 32 bit r/o Rx Pause MAC Ctrl Frame Cnt */
+ XM_RXF_MCTRL = 0x031c, /* 32 bit r/o Rx MAC Ctrl Frame Counter */
+ XM_RXF_INV_MP = 0x0320, /* 32 bit r/o Rx invalid Pause Frame Cnt */
+ XM_RXF_INV_MOC = 0x0324, /* 32 bit r/o Rx Frames with inv. MAC Opcode*/
+ XM_RXE_BURST = 0x0328, /* 32 bit r/o Rx Burst Event Counter */
+ XM_RXE_FMISS = 0x032c, /* 32 bit r/o Rx Missed Frames Event Cnt */
+ XM_RXF_FRA_ERR = 0x0330, /* 32 bit r/o Rx Framing Error Counter */
+ XM_RXE_FIFO_OV = 0x0334, /* 32 bit r/o Rx FIFO overflow Event Cnt */
+ XM_RXF_JAB_PKT = 0x0338, /* 32 bit r/o Rx Jabber Packet Frame Cnt */
+ XM_RXE_CAR_ERR = 0x033c, /* 32 bit r/o Rx Carrier Event Error Cnt */
+ XM_RXF_LEN_ERR = 0x0340, /* 32 bit r/o Rx in Range Length Error */
+ XM_RXE_SYM_ERR = 0x0344, /* 32 bit r/o Rx Symbol Error Counter */
+ XM_RXE_SHT_ERR = 0x0348, /* 32 bit r/o Rx Short Event Error Cnt */
+ XM_RXE_RUNT = 0x034c, /* 32 bit r/o Rx Runt Event Counter */
+ XM_RXF_LNG_ERR = 0x0350, /* 32 bit r/o Rx Frame too Long Error Cnt */
+ XM_RXF_FCS_ERR = 0x0354, /* 32 bit r/o Rx Frame Check Seq. Error Cnt */
+ XM_RXF_CEX_ERR = 0x035c, /* 32 bit r/o Rx Carrier Ext Error Frame Cnt*/
+ XM_RXP_UTIL = 0x0360, /* 32 bit r/o Rx Utilization in % */
+ XM_RXF_64B = 0x0368, /* 32 bit r/o 64 Byte Rx Frame Counter */
+ XM_RXF_127B = 0x036c, /* 32 bit r/o 65-127 Byte Rx Frame Counter */
+ XM_RXF_255B = 0x0370, /* 32 bit r/o 128-255 Byte Rx Frame Counter */
+ XM_RXF_511B = 0x0374, /* 32 bit r/o 256-511 Byte Rx Frame Counter */
+ XM_RXF_1023B = 0x0378, /* 32 bit r/o 512-1023 Byte Rx Frame Counter*/
+ XM_RXF_MAX_SZ = 0x037c, /* 32 bit r/o 1024-MaxSize Byte Rx Frame Cnt*/
+};
+
+/* XM_MMU_CMD 16 bit r/w MMU Command Register */
+enum {
+ XM_MMU_PHY_RDY = 1<<12, /* Bit 12: PHY Read Ready */
+ XM_MMU_PHY_BUSY = 1<<11, /* Bit 11: PHY Busy */
+ XM_MMU_IGN_PF = 1<<10, /* Bit 10: Ignore Pause Frame */
+ XM_MMU_MAC_LB = 1<<9, /* Bit 9: Enable MAC Loopback */
+ XM_MMU_FRC_COL = 1<<7, /* Bit 7: Force Collision */
+ XM_MMU_SIM_COL = 1<<6, /* Bit 6: Simulate Collision */
+ XM_MMU_NO_PRE = 1<<5, /* Bit 5: No MDIO Preamble */
+ XM_MMU_GMII_FD = 1<<4, /* Bit 4: GMII uses Full Duplex */
+ XM_MMU_RAT_CTRL = 1<<3, /* Bit 3: Enable Rate Control */
+ XM_MMU_GMII_LOOP= 1<<2, /* Bit 2: PHY is in Loopback Mode */
+ XM_MMU_ENA_RX = 1<<1, /* Bit 1: Enable Receiver */
+ XM_MMU_ENA_TX = 1<<0, /* Bit 0: Enable Transmitter */
+};
+
+
+/* XM_TX_CMD 16 bit r/w Transmit Command Register */
+enum {
+ XM_TX_BK2BK = 1<<6, /* Bit 6: Ignor Carrier Sense (Tx Bk2Bk)*/
+ XM_TX_ENC_BYP = 1<<5, /* Bit 5: Set Encoder in Bypass Mode */
+ XM_TX_SAM_LINE = 1<<4, /* Bit 4: (sc) Start utilization calculation */
+ XM_TX_NO_GIG_MD = 1<<3, /* Bit 3: Disable Carrier Extension */
+ XM_TX_NO_PRE = 1<<2, /* Bit 2: Disable Preamble Generation */
+ XM_TX_NO_CRC = 1<<1, /* Bit 1: Disable CRC Generation */
+ XM_TX_AUTO_PAD = 1<<0, /* Bit 0: Enable Automatic Padding */
+};
+
+/* XM_TX_RT_LIM 16 bit r/w Transmit Retry Limit Register */
+#define XM_RT_LIM_MSK 0x1f /* Bit 4..0: Tx Retry Limit */
+
+
+/* XM_TX_STIME 16 bit r/w Transmit Slottime Register */
+#define XM_STIME_MSK 0x7f /* Bit 6..0: Tx Slottime bits */
+
+
+/* XM_TX_IPG 16 bit r/w Transmit Inter Packet Gap */
+#define XM_IPG_MSK 0xff /* Bit 7..0: IPG value bits */
+
+
+/* XM_RX_CMD 16 bit r/w Receive Command Register */
+enum {
+ XM_RX_LENERR_OK = 1<<8, /* Bit 8 don't set Rx Err bit for */
+ /* inrange error packets */
+ XM_RX_BIG_PK_OK = 1<<7, /* Bit 7 don't set Rx Err bit for */
+ /* jumbo packets */
+ XM_RX_IPG_CAP = 1<<6, /* Bit 6 repl. type field with IPG */
+ XM_RX_TP_MD = 1<<5, /* Bit 5: Enable transparent Mode */
+ XM_RX_STRIP_FCS = 1<<4, /* Bit 4: Enable FCS Stripping */
+ XM_RX_SELF_RX = 1<<3, /* Bit 3: Enable Rx of own packets */
+ XM_RX_SAM_LINE = 1<<2, /* Bit 2: (sc) Start utilization calculation */
+ XM_RX_STRIP_PAD = 1<<1, /* Bit 1: Strip pad bytes of Rx frames */
+ XM_RX_DIS_CEXT = 1<<0, /* Bit 0: Disable carrier ext. check */
+};
+
+
+/* XM_GP_PORT 32 bit r/w General Purpose Port Register */
+enum {
+ XM_GP_ANIP = 1<<6, /* Bit 6: (ro) Auto-Neg. in progress */
+ XM_GP_FRC_INT = 1<<5, /* Bit 5: (sc) Force Interrupt */
+ XM_GP_RES_MAC = 1<<3, /* Bit 3: (sc) Reset MAC and FIFOs */
+ XM_GP_RES_STAT = 1<<2, /* Bit 2: (sc) Reset the statistics module */
+ XM_GP_INP_ASS = 1<<0, /* Bit 0: (ro) GP Input Pin asserted */
+};
+
+
+/* XM_IMSK 16 bit r/w Interrupt Mask Register */
+/* XM_ISRC 16 bit r/o Interrupt Status Register */
+enum {
+ XM_IS_LNK_AE = 1<<14, /* Bit 14: Link Asynchronous Event */
+ XM_IS_TX_ABORT = 1<<13, /* Bit 13: Transmit Abort, late Col. etc */
+ XM_IS_FRC_INT = 1<<12, /* Bit 12: Force INT bit set in GP */
+ XM_IS_INP_ASS = 1<<11, /* Bit 11: Input Asserted, GP bit 0 set */
+ XM_IS_LIPA_RC = 1<<10, /* Bit 10: Link Partner requests config */
+ XM_IS_RX_PAGE = 1<<9, /* Bit 9: Page Received */
+ XM_IS_TX_PAGE = 1<<8, /* Bit 8: Next Page Loaded for Transmit */
+ XM_IS_AND = 1<<7, /* Bit 7: Auto-Negotiation Done */
+ XM_IS_TSC_OV = 1<<6, /* Bit 6: Time Stamp Counter Overflow */
+ XM_IS_RXC_OV = 1<<5, /* Bit 5: Rx Counter Event Overflow */
+ XM_IS_TXC_OV = 1<<4, /* Bit 4: Tx Counter Event Overflow */
+ XM_IS_RXF_OV = 1<<3, /* Bit 3: Receive FIFO Overflow */
+ XM_IS_TXF_UR = 1<<2, /* Bit 2: Transmit FIFO Underrun */
+ XM_IS_TX_COMP = 1<<1, /* Bit 1: Frame Tx Complete */
+ XM_IS_RX_COMP = 1<<0, /* Bit 0: Frame Rx Complete */
+
+ XM_IMSK_DISABLE = 0xffff,
+};
+
+/* XM_HW_CFG 16 bit r/w Hardware Config Register */
+enum {
+ XM_HW_GEN_EOP = 1<<3, /* Bit 3: generate End of Packet pulse */
+ XM_HW_COM4SIG = 1<<2, /* Bit 2: use Comma Detect for Sig. Det.*/
+ XM_HW_GMII_MD = 1<<0, /* Bit 0: GMII Interface selected */
+};
+
+
+/* XM_TX_LO_WM 16 bit r/w Tx FIFO Low Water Mark */
+/* XM_TX_HI_WM 16 bit r/w Tx FIFO High Water Mark */
+#define XM_TX_WM_MSK 0x01ff /* Bit 9.. 0 Tx FIFO Watermark bits */
+
+/* XM_TX_THR 16 bit r/w Tx Request Threshold */
+/* XM_HT_THR 16 bit r/w Host Request Threshold */
+/* XM_RX_THR 16 bit r/w Rx Request Threshold */
+#define XM_THR_MSK 0x03ff /* Bit 10.. 0 Rx/Tx Request Threshold bits */
+
+
+/* XM_TX_STAT 32 bit r/o Tx Status LIFO Register */
+enum {
+ XM_ST_VALID = (1UL<<31), /* Bit 31: Status Valid */
+ XM_ST_BYTE_CNT = (0x3fffL<<17), /* Bit 30..17: Tx frame Length */
+ XM_ST_RETRY_CNT = (0x1fL<<12), /* Bit 16..12: Retry Count */
+ XM_ST_EX_COL = 1<<11, /* Bit 11: Excessive Collisions */
+ XM_ST_EX_DEF = 1<<10, /* Bit 10: Excessive Deferral */
+ XM_ST_BURST = 1<<9, /* Bit 9: p. xmitted in burst md*/
+ XM_ST_DEFER = 1<<8, /* Bit 8: packet was defered */
+ XM_ST_BC = 1<<7, /* Bit 7: Broadcast packet */
+ XM_ST_MC = 1<<6, /* Bit 6: Multicast packet */
+ XM_ST_UC = 1<<5, /* Bit 5: Unicast packet */
+ XM_ST_TX_UR = 1<<4, /* Bit 4: FIFO Underrun occurred */
+ XM_ST_CS_ERR = 1<<3, /* Bit 3: Carrier Sense Error */
+ XM_ST_LAT_COL = 1<<2, /* Bit 2: Late Collision Error */
+ XM_ST_MUL_COL = 1<<1, /* Bit 1: Multiple Collisions */
+ XM_ST_SGN_COL = 1<<0, /* Bit 0: Single Collision */
+};
+
+/* XM_RX_LO_WM 16 bit r/w Receive Low Water Mark */
+/* XM_RX_HI_WM 16 bit r/w Receive High Water Mark */
+#define XM_RX_WM_MSK 0x03ff /* Bit 11.. 0: Rx FIFO Watermark bits */
+
+
+/* XM_DEV_ID 32 bit r/o Device ID Register */
+#define XM_DEV_OUI (0x00ffffffUL<<8) /* Bit 31..8: Device OUI */
+#define XM_DEV_REV (0x07L << 5) /* Bit 7..5: Chip Rev Num */
+
+
+/* XM_MODE 32 bit r/w Mode Register */
+enum {
+ XM_MD_ENA_REJ = 1<<26, /* Bit 26: Enable Frame Reject */
+ XM_MD_SPOE_E = 1<<25, /* Bit 25: Send Pause on Edge */
+ /* extern generated */
+ XM_MD_TX_REP = 1<<24, /* Bit 24: Transmit Repeater Mode */
+ XM_MD_SPOFF_I = 1<<23, /* Bit 23: Send Pause on FIFO full */
+ /* intern generated */
+ XM_MD_LE_STW = 1<<22, /* Bit 22: Rx Stat Word in Little Endian */
+ XM_MD_TX_CONT = 1<<21, /* Bit 21: Send Continuous */
+ XM_MD_TX_PAUSE = 1<<20, /* Bit 20: (sc) Send Pause Frame */
+ XM_MD_ATS = 1<<19, /* Bit 19: Append Time Stamp */
+ XM_MD_SPOL_I = 1<<18, /* Bit 18: Send Pause on Low */
+ /* intern generated */
+ XM_MD_SPOH_I = 1<<17, /* Bit 17: Send Pause on High */
+ /* intern generated */
+ XM_MD_CAP = 1<<16, /* Bit 16: Check Address Pair */
+ XM_MD_ENA_HASH = 1<<15, /* Bit 15: Enable Hashing */
+ XM_MD_CSA = 1<<14, /* Bit 14: Check Station Address */
+ XM_MD_CAA = 1<<13, /* Bit 13: Check Address Array */
+ XM_MD_RX_MCTRL = 1<<12, /* Bit 12: Rx MAC Control Frame */
+ XM_MD_RX_RUNT = 1<<11, /* Bit 11: Rx Runt Frames */
+ XM_MD_RX_IRLE = 1<<10, /* Bit 10: Rx in Range Len Err Frame */
+ XM_MD_RX_LONG = 1<<9, /* Bit 9: Rx Long Frame */
+ XM_MD_RX_CRCE = 1<<8, /* Bit 8: Rx CRC Error Frame */
+ XM_MD_RX_ERR = 1<<7, /* Bit 7: Rx Error Frame */
+ XM_MD_DIS_UC = 1<<6, /* Bit 6: Disable Rx Unicast */
+ XM_MD_DIS_MC = 1<<5, /* Bit 5: Disable Rx Multicast */
+ XM_MD_DIS_BC = 1<<4, /* Bit 4: Disable Rx Broadcast */
+ XM_MD_ENA_PROM = 1<<3, /* Bit 3: Enable Promiscuous */
+ XM_MD_ENA_BE = 1<<2, /* Bit 2: Enable Big Endian */
+ XM_MD_FTF = 1<<1, /* Bit 1: (sc) Flush Tx FIFO */
+ XM_MD_FRF = 1<<0, /* Bit 0: (sc) Flush Rx FIFO */
+};
+
+#define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I)
+#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\
+ XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA)
+
+/* XM_STAT_CMD 16 bit r/w Statistics Command Register */
+enum {
+ XM_SC_SNP_RXC = 1<<5, /* Bit 5: (sc) Snap Rx Counters */
+ XM_SC_SNP_TXC = 1<<4, /* Bit 4: (sc) Snap Tx Counters */
+ XM_SC_CP_RXC = 1<<3, /* Bit 3: Copy Rx Counters Continuously */
+ XM_SC_CP_TXC = 1<<2, /* Bit 2: Copy Tx Counters Continuously */
+ XM_SC_CLR_RXC = 1<<1, /* Bit 1: (sc) Clear Rx Counters */
+ XM_SC_CLR_TXC = 1<<0, /* Bit 0: (sc) Clear Tx Counters */
+};
+
+
+/* XM_RX_CNT_EV 32 bit r/o Rx Counter Event Register */
+/* XM_RX_EV_MSK 32 bit r/w Rx Counter Event Mask */
+enum {
+ XMR_MAX_SZ_OV = 1<<31, /* Bit 31: 1024-MaxSize Rx Cnt Ov*/
+ XMR_1023B_OV = 1<<30, /* Bit 30: 512-1023Byte Rx Cnt Ov*/
+ XMR_511B_OV = 1<<29, /* Bit 29: 256-511 Byte Rx Cnt Ov*/
+ XMR_255B_OV = 1<<28, /* Bit 28: 128-255 Byte Rx Cnt Ov*/
+ XMR_127B_OV = 1<<27, /* Bit 27: 65-127 Byte Rx Cnt Ov */
+ XMR_64B_OV = 1<<26, /* Bit 26: 64 Byte Rx Cnt Ov */
+ XMR_UTIL_OV = 1<<25, /* Bit 25: Rx Util Cnt Overflow */
+ XMR_UTIL_UR = 1<<24, /* Bit 24: Rx Util Cnt Underrun */
+ XMR_CEX_ERR_OV = 1<<23, /* Bit 23: CEXT Err Cnt Ov */
+ XMR_FCS_ERR_OV = 1<<21, /* Bit 21: Rx FCS Error Cnt Ov */
+ XMR_LNG_ERR_OV = 1<<20, /* Bit 20: Rx too Long Err Cnt Ov*/
+ XMR_RUNT_OV = 1<<19, /* Bit 19: Runt Event Cnt Ov */
+ XMR_SHT_ERR_OV = 1<<18, /* Bit 18: Rx Short Ev Err Cnt Ov*/
+ XMR_SYM_ERR_OV = 1<<17, /* Bit 17: Rx Sym Err Cnt Ov */
+ XMR_CAR_ERR_OV = 1<<15, /* Bit 15: Rx Carr Ev Err Cnt Ov */
+ XMR_JAB_PKT_OV = 1<<14, /* Bit 14: Rx Jabb Packet Cnt Ov */
+ XMR_FIFO_OV = 1<<13, /* Bit 13: Rx FIFO Ov Ev Cnt Ov */
+ XMR_FRA_ERR_OV = 1<<12, /* Bit 12: Rx Framing Err Cnt Ov */
+ XMR_FMISS_OV = 1<<11, /* Bit 11: Rx Missed Ev Cnt Ov */
+ XMR_BURST = 1<<10, /* Bit 10: Rx Burst Event Cnt Ov */
+ XMR_INV_MOC = 1<<9, /* Bit 9: Rx with inv. MAC OC Ov*/
+ XMR_INV_MP = 1<<8, /* Bit 8: Rx inv Pause Frame Ov */
+ XMR_MCTRL_OV = 1<<7, /* Bit 7: Rx MAC Ctrl-F Cnt Ov */
+ XMR_MPAUSE_OV = 1<<6, /* Bit 6: Rx Pause MAC Ctrl-F Ov*/
+ XMR_UC_OK_OV = 1<<5, /* Bit 5: Rx Unicast Frame CntOv*/
+ XMR_MC_OK_OV = 1<<4, /* Bit 4: Rx Multicast Cnt Ov */
+ XMR_BC_OK_OV = 1<<3, /* Bit 3: Rx Broadcast Cnt Ov */
+ XMR_OK_LO_OV = 1<<2, /* Bit 2: Octets Rx OK Low CntOv*/
+ XMR_OK_HI_OV = 1<<1, /* Bit 1: Octets Rx OK Hi Cnt Ov*/
+ XMR_OK_OV = 1<<0, /* Bit 0: Frames Received Ok Ov */
+};
+
+#define XMR_DEF_MSK (XMR_OK_LO_OV | XMR_OK_HI_OV)
+
+/* XM_TX_CNT_EV 32 bit r/o Tx Counter Event Register */
+/* XM_TX_EV_MSK 32 bit r/w Tx Counter Event Mask */
+enum {
+ XMT_MAX_SZ_OV = 1<<25, /* Bit 25: 1024-MaxSize Tx Cnt Ov*/
+ XMT_1023B_OV = 1<<24, /* Bit 24: 512-1023Byte Tx Cnt Ov*/
+ XMT_511B_OV = 1<<23, /* Bit 23: 256-511 Byte Tx Cnt Ov*/
+ XMT_255B_OV = 1<<22, /* Bit 22: 128-255 Byte Tx Cnt Ov*/
+ XMT_127B_OV = 1<<21, /* Bit 21: 65-127 Byte Tx Cnt Ov */
+ XMT_64B_OV = 1<<20, /* Bit 20: 64 Byte Tx Cnt Ov */
+ XMT_UTIL_OV = 1<<19, /* Bit 19: Tx Util Cnt Overflow */
+ XMT_UTIL_UR = 1<<18, /* Bit 18: Tx Util Cnt Underrun */
+ XMT_CS_ERR_OV = 1<<17, /* Bit 17: Tx Carr Sen Err Cnt Ov*/
+ XMT_FIFO_UR_OV = 1<<16, /* Bit 16: Tx FIFO Ur Ev Cnt Ov */
+ XMT_EX_DEF_OV = 1<<15, /* Bit 15: Tx Ex Deferall Cnt Ov */
+ XMT_DEF = 1<<14, /* Bit 14: Tx Deferred Cnt Ov */
+ XMT_LAT_COL_OV = 1<<13, /* Bit 13: Tx Late Col Cnt Ov */
+ XMT_ABO_COL_OV = 1<<12, /* Bit 12: Tx abo dueto Ex Col Ov*/
+ XMT_MUL_COL_OV = 1<<11, /* Bit 11: Tx Mult Col Cnt Ov */
+ XMT_SNG_COL = 1<<10, /* Bit 10: Tx Single Col Cnt Ov */
+ XMT_MCTRL_OV = 1<<9, /* Bit 9: Tx MAC Ctrl Counter Ov*/
+ XMT_MPAUSE = 1<<8, /* Bit 8: Tx Pause MAC Ctrl-F Ov*/
+ XMT_BURST = 1<<7, /* Bit 7: Tx Burst Event Cnt Ov */
+ XMT_LONG = 1<<6, /* Bit 6: Tx Long Frame Cnt Ov */
+ XMT_UC_OK_OV = 1<<5, /* Bit 5: Tx Unicast Cnt Ov */
+ XMT_MC_OK_OV = 1<<4, /* Bit 4: Tx Multicast Cnt Ov */
+ XMT_BC_OK_OV = 1<<3, /* Bit 3: Tx Broadcast Cnt Ov */
+ XMT_OK_LO_OV = 1<<2, /* Bit 2: Octets Tx OK Low CntOv*/
+ XMT_OK_HI_OV = 1<<1, /* Bit 1: Octets Tx OK Hi Cnt Ov*/
+ XMT_OK_OV = 1<<0, /* Bit 0: Frames Tx Ok Ov */
+};
+
+#define XMT_DEF_MSK (XMT_OK_LO_OV | XMT_OK_HI_OV)
+
+struct skge_rx_desc {
+ u32 control;
+ u32 next_offset;
+ u32 dma_lo;
+ u32 dma_hi;
+ u32 status;
+ u32 timestamp;
+ u16 csum2;
+ u16 csum1;
+ u16 csum2_start;
+ u16 csum1_start;
+};
+
+struct skge_tx_desc {
+ u32 control;
+ u32 next_offset;
+ u32 dma_lo;
+ u32 dma_hi;
+ u32 status;
+ u32 csum_offs;
+ u16 csum_write;
+ u16 csum_start;
+ u32 rsvd;
+};
+
+struct skge_element {
+ struct skge_element *next;
+ void *desc;
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
+};
+
+struct skge_ring {
+ struct skge_element *to_clean;
+ struct skge_element *to_use;
+ struct skge_element *start;
+ unsigned long count;
+};
+
+
+struct skge_hw {
+ void __iomem *regs;
+ struct pci_dev *pdev;
+ spinlock_t hw_lock;
+ u32 intr_mask;
+ struct net_device *dev[2];
+
+ u8 chip_id;
+ u8 chip_rev;
+ u8 copper;
+ u8 ports;
+ u8 phy_type;
+
+ u32 ram_size;
+ u32 ram_offset;
+ u16 phy_addr;
+ spinlock_t phy_lock;
+ struct tasklet_struct phy_task;
+
+ char irq_name[0]; /* skge@pci:000:04:00.0 */
+};
+
+enum pause_control {
+ FLOW_MODE_NONE = 1, /* No Flow-Control */
+ FLOW_MODE_LOC_SEND = 2, /* Local station sends PAUSE */
+ FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */
+ FLOW_MODE_SYM_OR_REM = 4, /* Both stations may send PAUSE or
+ * just the remote station may send PAUSE
+ */
+};
+
+enum pause_status {
+ FLOW_STAT_INDETERMINATED=0, /* indeterminated */
+ FLOW_STAT_NONE, /* No Flow Control */
+ FLOW_STAT_REM_SEND, /* Remote Station sends PAUSE */
+ FLOW_STAT_LOC_SEND, /* Local station sends PAUSE */
+ FLOW_STAT_SYMMETRIC, /* Both station may send PAUSE */
+};
+
+
+struct skge_port {
+ struct skge_hw *hw;
+ struct net_device *netdev;
+ struct napi_struct napi;
+ int port;
+ u32 msg_enable;
+
+ struct skge_ring tx_ring;
+
+ struct skge_ring rx_ring ____cacheline_aligned_in_smp;
+ unsigned int rx_buf_size;
+
+ struct timer_list link_timer;
+ enum pause_control flow_control;
+ enum pause_status flow_status;
+ u8 blink_on;
+ u8 wol;
+ u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */
+ u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
+ u16 speed; /* SPEED_1000, SPEED_100, ... */
+ u32 advertising;
+
+ void *mem; /* PCI memory for rings */
+ dma_addr_t dma;
+ unsigned long mem_size;
+#ifdef CONFIG_SKGE_DEBUG
+ struct dentry *debugfs;
+#endif
+};
+
+
+/* Register accessor for memory mapped device */
+static inline u32 skge_read32(const struct skge_hw *hw, int reg)
+{
+ return readl(hw->regs + reg);
+}
+
+static inline u16 skge_read16(const struct skge_hw *hw, int reg)
+{
+ return readw(hw->regs + reg);
+}
+
+static inline u8 skge_read8(const struct skge_hw *hw, int reg)
+{
+ return readb(hw->regs + reg);
+}
+
+static inline void skge_write32(const struct skge_hw *hw, int reg, u32 val)
+{
+ writel(val, hw->regs + reg);
+}
+
+static inline void skge_write16(const struct skge_hw *hw, int reg, u16 val)
+{
+ writew(val, hw->regs + reg);
+}
+
+static inline void skge_write8(const struct skge_hw *hw, int reg, u8 val)
+{
+ writeb(val, hw->regs + reg);
+}
+
+/* MAC Related Registers inside the device. */
+#define SK_REG(port,reg) (((port)<<7)+(u16)(reg))
+#define SK_XMAC_REG(port, reg) \
+ ((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1)
+
+static inline u32 xm_read32(const struct skge_hw *hw, int port, int reg)
+{
+ u32 v;
+ v = skge_read16(hw, SK_XMAC_REG(port, reg));
+ v |= (u32)skge_read16(hw, SK_XMAC_REG(port, reg+2)) << 16;
+ return v;
+}
+
+static inline u16 xm_read16(const struct skge_hw *hw, int port, int reg)
+{
+ return skge_read16(hw, SK_XMAC_REG(port,reg));
+}
+
+static inline void xm_write32(const struct skge_hw *hw, int port, int r, u32 v)
+{
+ skge_write16(hw, SK_XMAC_REG(port,r), v & 0xffff);
+ skge_write16(hw, SK_XMAC_REG(port,r+2), v >> 16);
+}
+
+static inline void xm_write16(const struct skge_hw *hw, int port, int r, u16 v)
+{
+ skge_write16(hw, SK_XMAC_REG(port,r), v);
+}
+
+static inline void xm_outhash(const struct skge_hw *hw, int port, int reg,
+ const u8 *hash)
+{
+ xm_write16(hw, port, reg, (u16)hash[0] | ((u16)hash[1] << 8));
+ xm_write16(hw, port, reg+2, (u16)hash[2] | ((u16)hash[3] << 8));
+ xm_write16(hw, port, reg+4, (u16)hash[4] | ((u16)hash[5] << 8));
+ xm_write16(hw, port, reg+6, (u16)hash[6] | ((u16)hash[7] << 8));
+}
+
+static inline void xm_outaddr(const struct skge_hw *hw, int port, int reg,
+ const u8 *addr)
+{
+ xm_write16(hw, port, reg, (u16)addr[0] | ((u16)addr[1] << 8));
+ xm_write16(hw, port, reg+2, (u16)addr[2] | ((u16)addr[3] << 8));
+ xm_write16(hw, port, reg+4, (u16)addr[4] | ((u16)addr[5] << 8));
+}
+
+#define SK_GMAC_REG(port,reg) \
+ (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg))
+
+static inline u16 gma_read16(const struct skge_hw *hw, int port, int reg)
+{
+ return skge_read16(hw, SK_GMAC_REG(port,reg));
+}
+
+static inline u32 gma_read32(const struct skge_hw *hw, int port, int reg)
+{
+ return (u32) skge_read16(hw, SK_GMAC_REG(port,reg))
+ | ((u32)skge_read16(hw, SK_GMAC_REG(port,reg+4)) << 16);
+}
+
+static inline void gma_write16(const struct skge_hw *hw, int port, int r, u16 v)
+{
+ skge_write16(hw, SK_GMAC_REG(port,r), v);
+}
+
+static inline void gma_set_addr(struct skge_hw *hw, int port, int reg,
+ const u8 *addr)
+{
+ gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8));
+ gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
+ gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
+}
+
+#endif
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
new file mode 100644
index 000000000000..a3ce9b6d36af
--- /dev/null
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -0,0 +1,5162 @@
+/*
+ * New driver for Marvell Yukon 2 chipset.
+ * Based on earlier sk98lin, and skge driver.
+ *
+ * This driver intentionally does not support all the features
+ * of the original driver such as link fail-over and link management because
+ * those should be done at higher levels.
+ *
+ * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/crc32.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/slab.h>
+#include <net/ip.h>
+#include <linux/tcp.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/if_vlan.h>
+#include <linux/prefetch.h>
+#include <linux/debugfs.h>
+#include <linux/mii.h>
+
+#include <asm/irq.h>
+
+#include "sky2.h"
+
+#define DRV_NAME "sky2"
+#define DRV_VERSION "1.29"
+
+/*
+ * The Yukon II chipset takes 64 bit command blocks (called list elements)
+ * that are organized into three (receive, transmit, status) different rings
+ * similar to Tigon3.
+ */
+
+#define RX_LE_SIZE 1024
+#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
+#define RX_MAX_PENDING (RX_LE_SIZE/6 - 2)
+#define RX_DEF_PENDING RX_MAX_PENDING
+
+/* This is the worst case number of transmit list elements for a single skb:
+ VLAN:GSO + CKSUM + Data + skb_frags * DMA */
+#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1))
+#define TX_MIN_PENDING (MAX_SKB_TX_LE+1)
+#define TX_MAX_PENDING 1024
+#define TX_DEF_PENDING 127
+
+#define TX_WATCHDOG (5 * HZ)
+#define NAPI_WEIGHT 64
+#define PHY_RETRIES 1000
+
+#define SKY2_EEPROM_MAGIC 0x9955aabb
+
+#define RING_NEXT(x, s) (((x)+1) & ((s)-1))
+
+static const u32 default_msg =
+ NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
+ | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
+ | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
+
+static int debug = -1; /* defaults above */
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static int copybreak __read_mostly = 128;
+module_param(copybreak, int, 0);
+MODULE_PARM_DESC(copybreak, "Receive copy threshold");
+
+static int disable_msi = 0;
+module_param(disable_msi, int, 0);
+MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
+
+static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
+ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
+ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E01) }, /* SK-9E21M */
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B03) }, /* DGE-550T */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) }, /* 88E8062 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) }, /* 88E8021 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) }, /* 88E8022 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) }, /* 88E8061 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) }, /* 88E8062 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) }, /* 88E8035 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4355) }, /* 88E8040T */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4357) }, /* 88E8042 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) }, /* 88E8070 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, sky2_id_table);
+
+/* Avoid conditionals by using array */
+static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
+static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
+static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
+
+static void sky2_set_multicast(struct net_device *dev);
+static irqreturn_t sky2_intr(int irq, void *dev_id);
+
+/* Access to PHY via serial interconnect */
+static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
+{
+ int i;
+
+ gma_write16(hw, port, GM_SMI_DATA, val);
+ gma_write16(hw, port, GM_SMI_CTRL,
+ GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg));
+
+ for (i = 0; i < PHY_RETRIES; i++) {
+ u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
+ if (ctrl == 0xffff)
+ goto io_error;
+
+ if (!(ctrl & GM_SMI_CT_BUSY))
+ return 0;
+
+ udelay(10);
+ }
+
+ dev_warn(&hw->pdev->dev, "%s: phy write timeout\n", hw->dev[port]->name);
+ return -ETIMEDOUT;
+
+io_error:
+ dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name);
+ return -EIO;
+}
+
+static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val)
+{
+ int i;
+
+ gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV)
+ | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
+
+ for (i = 0; i < PHY_RETRIES; i++) {
+ u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
+ if (ctrl == 0xffff)
+ goto io_error;
+
+ if (ctrl & GM_SMI_CT_RD_VAL) {
+ *val = gma_read16(hw, port, GM_SMI_DATA);
+ return 0;
+ }
+
+ udelay(10);
+ }
+
+ dev_warn(&hw->pdev->dev, "%s: phy read timeout\n", hw->dev[port]->name);
+ return -ETIMEDOUT;
+io_error:
+ dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name);
+ return -EIO;
+}
+
+static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
+{
+ u16 v;
+ __gm_phy_read(hw, port, reg, &v);
+ return v;
+}
+
+
+static void sky2_power_on(struct sky2_hw *hw)
+{
+ /* switch power to VCC (WA for VAUX problem) */
+ sky2_write8(hw, B0_POWER_CTRL,
+ PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
+
+ /* disable Core Clock Division, */
+ sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
+
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
+ /* enable bits are inverted */
+ sky2_write8(hw, B2_Y2_CLK_GATE,
+ Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
+ Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
+ Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
+ else
+ sky2_write8(hw, B2_Y2_CLK_GATE, 0);
+
+ if (hw->flags & SKY2_HW_ADV_POWER_CTL) {
+ u32 reg;
+
+ sky2_pci_write32(hw, PCI_DEV_REG3, 0);
+
+ reg = sky2_pci_read32(hw, PCI_DEV_REG4);
+ /* set all bits to 0 except bits 15..12 and 8 */
+ reg &= P_ASPM_CONTROL_MSK;
+ sky2_pci_write32(hw, PCI_DEV_REG4, reg);
+
+ reg = sky2_pci_read32(hw, PCI_DEV_REG5);
+ /* set all bits to 0 except bits 28 & 27 */
+ reg &= P_CTL_TIM_VMAIN_AV_MSK;
+ sky2_pci_write32(hw, PCI_DEV_REG5, reg);
+
+ sky2_pci_write32(hw, PCI_CFG_REG_1, 0);
+
+ sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON);
+
+ /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */
+ reg = sky2_read32(hw, B2_GP_IO);
+ reg |= GLB_GPIO_STAT_RACE_DIS;
+ sky2_write32(hw, B2_GP_IO, reg);
+
+ sky2_read32(hw, B2_GP_IO);
+ }
+
+ /* Turn on "driver loaded" LED */
+ sky2_write16(hw, B0_CTST, Y2_LED_STAT_ON);
+}
+
+static void sky2_power_aux(struct sky2_hw *hw)
+{
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
+ sky2_write8(hw, B2_Y2_CLK_GATE, 0);
+ else
+ /* enable bits are inverted */
+ sky2_write8(hw, B2_Y2_CLK_GATE,
+ Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
+ Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
+ Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
+
+ /* switch power to VAUX if supported and PME from D3cold */
+ if ( (sky2_read32(hw, B0_CTST) & Y2_VAUX_AVAIL) &&
+ pci_pme_capable(hw->pdev, PCI_D3cold))
+ sky2_write8(hw, B0_POWER_CTRL,
+ (PC_VAUX_ENA | PC_VCC_ENA |
+ PC_VAUX_ON | PC_VCC_OFF));
+
+ /* turn off "driver loaded LED" */
+ sky2_write16(hw, B0_CTST, Y2_LED_STAT_OFF);
+}
+
+static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
+{
+ u16 reg;
+
+ /* disable all GMAC IRQ's */
+ sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
+
+ gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
+ gma_write16(hw, port, GM_MC_ADDR_H2, 0);
+ gma_write16(hw, port, GM_MC_ADDR_H3, 0);
+ gma_write16(hw, port, GM_MC_ADDR_H4, 0);
+
+ reg = gma_read16(hw, port, GM_RX_CTRL);
+ reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
+ gma_write16(hw, port, GM_RX_CTRL, reg);
+}
+
+/* flow control to advertise bits */
+static const u16 copper_fc_adv[] = {
+ [FC_NONE] = 0,
+ [FC_TX] = PHY_M_AN_ASP,
+ [FC_RX] = PHY_M_AN_PC,
+ [FC_BOTH] = PHY_M_AN_PC | PHY_M_AN_ASP,
+};
+
+/* flow control to advertise bits when using 1000BaseX */
+static const u16 fiber_fc_adv[] = {
+ [FC_NONE] = PHY_M_P_NO_PAUSE_X,
+ [FC_TX] = PHY_M_P_ASYM_MD_X,
+ [FC_RX] = PHY_M_P_SYM_MD_X,
+ [FC_BOTH] = PHY_M_P_BOTH_MD_X,
+};
+
+/* flow control to GMA disable bits */
+static const u16 gm_fc_disable[] = {
+ [FC_NONE] = GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS,
+ [FC_TX] = GM_GPCR_FC_RX_DIS,
+ [FC_RX] = GM_GPCR_FC_TX_DIS,
+ [FC_BOTH] = 0,
+};
+
+
+static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
+{
+ struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
+ u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg;
+
+ if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) &&
+ !(hw->flags & SKY2_HW_NEWER_PHY)) {
+ u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
+
+ ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
+ PHY_M_EC_MAC_S_MSK);
+ ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
+
+ /* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */
+ if (hw->chip_id == CHIP_ID_YUKON_EC)
+ /* set downshift counter to 3x and enable downshift */
+ ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA;
+ else
+ /* set master & slave downshift counter to 1x */
+ ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
+
+ gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
+ }
+
+ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+ if (sky2_is_copper(hw)) {
+ if (!(hw->flags & SKY2_HW_GIGABIT)) {
+ /* enable automatic crossover */
+ ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
+
+ if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+ hw->chip_rev == CHIP_REV_YU_FE2_A0) {
+ u16 spec;
+
+ /* Enable Class A driver for FE+ A0 */
+ spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2);
+ spec |= PHY_M_FESC_SEL_CL_A;
+ gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
+ }
+ } else {
+ if (hw->chip_id >= CHIP_ID_YUKON_OPT) {
+ u16 ctrl2 = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL_2);
+
+ /* enable PHY Reverse Auto-Negotiation */
+ ctrl2 |= 1u << 13;
+
+ /* Write PHY changes (SW-reset must follow) */
+ gm_phy_write(hw, port, PHY_MARV_EXT_CTRL_2, ctrl2);
+ }
+
+
+ /* disable energy detect */
+ ctrl &= ~PHY_M_PC_EN_DET_MSK;
+
+ /* enable automatic crossover */
+ ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
+
+ /* downshift on PHY 88E1112 and 88E1149 is changed */
+ if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) &&
+ (hw->flags & SKY2_HW_NEWER_PHY)) {
+ /* set downshift counter to 3x and enable downshift */
+ ctrl &= ~PHY_M_PC_DSC_MSK;
+ ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
+ }
+ }
+ } else {
+ /* workaround for deviation #4.88 (CRC errors) */
+ /* disable Automatic Crossover */
+
+ ctrl &= ~PHY_M_PC_MDIX_MSK;
+ }
+
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+ /* special setup for PHY 88E1112 Fiber */
+ if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) {
+ pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
+
+ /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
+ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+ ctrl &= ~PHY_M_MAC_MD_MSK;
+ ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+ if (hw->pmd_type == 'P') {
+ /* select page 1 to access Fiber registers */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
+
+ /* for SFP-module set SIGDET polarity to low */
+ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+ ctrl |= PHY_M_FIB_SIGD_POL;
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+ }
+
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
+ }
+
+ ctrl = PHY_CT_RESET;
+ ct1000 = 0;
+ adv = PHY_AN_CSMA;
+ reg = 0;
+
+ if (sky2->flags & SKY2_FLAG_AUTO_SPEED) {
+ if (sky2_is_copper(hw)) {
+ if (sky2->advertising & ADVERTISED_1000baseT_Full)
+ ct1000 |= PHY_M_1000C_AFD;
+ if (sky2->advertising & ADVERTISED_1000baseT_Half)
+ ct1000 |= PHY_M_1000C_AHD;
+ if (sky2->advertising & ADVERTISED_100baseT_Full)
+ adv |= PHY_M_AN_100_FD;
+ if (sky2->advertising & ADVERTISED_100baseT_Half)
+ adv |= PHY_M_AN_100_HD;
+ if (sky2->advertising & ADVERTISED_10baseT_Full)
+ adv |= PHY_M_AN_10_FD;
+ if (sky2->advertising & ADVERTISED_10baseT_Half)
+ adv |= PHY_M_AN_10_HD;
+
+ } else { /* special defines for FIBER (88E1040S only) */
+ if (sky2->advertising & ADVERTISED_1000baseT_Full)
+ adv |= PHY_M_AN_1000X_AFD;
+ if (sky2->advertising & ADVERTISED_1000baseT_Half)
+ adv |= PHY_M_AN_1000X_AHD;
+ }
+
+ /* Restart Auto-negotiation */
+ ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
+ } else {
+ /* forced speed/duplex settings */
+ ct1000 = PHY_M_1000C_MSE;
+
+ /* Disable auto update for duplex flow control and duplex */
+ reg |= GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_SPD_DIS;
+
+ switch (sky2->speed) {
+ case SPEED_1000:
+ ctrl |= PHY_CT_SP1000;
+ reg |= GM_GPCR_SPEED_1000;
+ break;
+ case SPEED_100:
+ ctrl |= PHY_CT_SP100;
+ reg |= GM_GPCR_SPEED_100;
+ break;
+ }
+
+ if (sky2->duplex == DUPLEX_FULL) {
+ reg |= GM_GPCR_DUP_FULL;
+ ctrl |= PHY_CT_DUP_MD;
+ } else if (sky2->speed < SPEED_1000)
+ sky2->flow_mode = FC_NONE;
+ }
+
+ if (sky2->flags & SKY2_FLAG_AUTO_PAUSE) {
+ if (sky2_is_copper(hw))
+ adv |= copper_fc_adv[sky2->flow_mode];
+ else
+ adv |= fiber_fc_adv[sky2->flow_mode];
+ } else {
+ reg |= GM_GPCR_AU_FCT_DIS;
+ reg |= gm_fc_disable[sky2->flow_mode];
+
+ /* Forward pause packets to GMAC? */
+ if (sky2->flow_mode & FC_RX)
+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
+ else
+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
+ }
+
+ gma_write16(hw, port, GM_GP_CTRL, reg);
+
+ if (hw->flags & SKY2_HW_GIGABIT)
+ gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
+
+ gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
+ gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
+
+ /* Setup Phy LED's */
+ ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
+ ledover = 0;
+
+ switch (hw->chip_id) {
+ case CHIP_ID_YUKON_FE:
+ /* on 88E3082 these bits are at 11..9 (shifted left) */
+ ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
+
+ ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR);
+
+ /* delete ACT LED control bits */
+ ctrl &= ~PHY_M_FELP_LED1_MSK;
+ /* change ACT LED control to blink mode */
+ ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL);
+ gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
+ break;
+
+ case CHIP_ID_YUKON_FE_P:
+ /* Enable Link Partner Next Page */
+ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+ ctrl |= PHY_M_PC_ENA_LIP_NP;
+
+ /* disable Energy Detect and enable scrambler */
+ ctrl &= ~(PHY_M_PC_ENA_ENE_DT | PHY_M_PC_DIS_SCRAMB);
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+ /* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */
+ ctrl = PHY_M_FELP_LED2_CTRL(LED_PAR_CTRL_ACT_BL) |
+ PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_LINK) |
+ PHY_M_FELP_LED0_CTRL(LED_PAR_CTRL_SPEED);
+
+ gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
+ break;
+
+ case CHIP_ID_YUKON_XL:
+ pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
+
+ /* select page 3 to access LED control register */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
+
+ /* set LED Function Control register */
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+ (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
+ PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
+ PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
+ PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */
+
+ /* set Polarity Control register */
+ gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
+ (PHY_M_POLC_LS1_P_MIX(4) |
+ PHY_M_POLC_IS0_P_MIX(4) |
+ PHY_M_POLC_LOS_CTRL(2) |
+ PHY_M_POLC_INIT_CTRL(2) |
+ PHY_M_POLC_STA1_CTRL(2) |
+ PHY_M_POLC_STA0_CTRL(2)));
+
+ /* restore page register */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
+ break;
+
+ case CHIP_ID_YUKON_EC_U:
+ case CHIP_ID_YUKON_EX:
+ case CHIP_ID_YUKON_SUPR:
+ pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
+
+ /* select page 3 to access LED control register */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
+
+ /* set LED Function Control register */
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+ (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
+ PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */
+ PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
+ PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */
+
+ /* set Blink Rate in LED Timer Control Register */
+ gm_phy_write(hw, port, PHY_MARV_INT_MASK,
+ ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS));
+ /* restore page register */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
+ break;
+
+ default:
+ /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
+ ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
+
+ /* turn off the Rx LED (LED_RX) */
+ ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
+ }
+
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_UL_2) {
+ /* apply fixes in PHY AFE */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255);
+
+ /* increase differential signal amplitude in 10BASE-T */
+ gm_phy_write(hw, port, 0x18, 0xaa99);
+ gm_phy_write(hw, port, 0x17, 0x2011);
+
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
+ /* fix for IEEE A/B Symmetry failure in 1000BASE-T */
+ gm_phy_write(hw, port, 0x18, 0xa204);
+ gm_phy_write(hw, port, 0x17, 0x2002);
+ }
+
+ /* set page register to 0 */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
+ } else if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+ hw->chip_rev == CHIP_REV_YU_FE2_A0) {
+ /* apply workaround for integrated resistors calibration */
+ gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17);
+ gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60);
+ } else if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
+ /* apply fixes in PHY AFE */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff);
+
+ /* apply RDAC termination workaround */
+ gm_phy_write(hw, port, 24, 0x2800);
+ gm_phy_write(hw, port, 23, 0x2001);
+
+ /* set page register back to 0 */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
+ } else if (hw->chip_id != CHIP_ID_YUKON_EX &&
+ hw->chip_id < CHIP_ID_YUKON_SUPR) {
+ /* no effect on Yukon-XL */
+ gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
+
+ if (!(sky2->flags & SKY2_FLAG_AUTO_SPEED) ||
+ sky2->speed == SPEED_100) {
+ /* turn on 100 Mbps LED (LED_LINK100) */
+ ledover |= PHY_M_LED_MO_100(MO_LED_ON);
+ }
+
+ if (ledover)
+ gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
+
+ } else if (hw->chip_id == CHIP_ID_YUKON_PRM &&
+ (sky2_read8(hw, B2_MAC_CFG) & 0xf) == 0x7) {
+ int i;
+ /* This a phy register setup workaround copied from vendor driver. */
+ static const struct {
+ u16 reg, val;
+ } eee_afe[] = {
+ { 0x156, 0x58ce },
+ { 0x153, 0x99eb },
+ { 0x141, 0x8064 },
+ /* { 0x155, 0x130b },*/
+ { 0x000, 0x0000 },
+ { 0x151, 0x8433 },
+ { 0x14b, 0x8c44 },
+ { 0x14c, 0x0f90 },
+ { 0x14f, 0x39aa },
+ /* { 0x154, 0x2f39 },*/
+ { 0x14d, 0xba33 },
+ { 0x144, 0x0048 },
+ { 0x152, 0x2010 },
+ /* { 0x158, 0x1223 },*/
+ { 0x140, 0x4444 },
+ { 0x154, 0x2f3b },
+ { 0x158, 0xb203 },
+ { 0x157, 0x2029 },
+ };
+
+ /* Start Workaround for OptimaEEE Rev.Z0 */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fb);
+
+ gm_phy_write(hw, port, 1, 0x4099);
+ gm_phy_write(hw, port, 3, 0x1120);
+ gm_phy_write(hw, port, 11, 0x113c);
+ gm_phy_write(hw, port, 14, 0x8100);
+ gm_phy_write(hw, port, 15, 0x112a);
+ gm_phy_write(hw, port, 17, 0x1008);
+
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fc);
+ gm_phy_write(hw, port, 1, 0x20b0);
+
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff);
+
+ for (i = 0; i < ARRAY_SIZE(eee_afe); i++) {
+ /* apply AFE settings */
+ gm_phy_write(hw, port, 17, eee_afe[i].val);
+ gm_phy_write(hw, port, 16, eee_afe[i].reg | 1u<<13);
+ }
+
+ /* End Workaround for OptimaEEE */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
+
+ /* Enable 10Base-Te (EEE) */
+ if (hw->chip_id >= CHIP_ID_YUKON_PRM) {
+ reg = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
+ gm_phy_write(hw, port, PHY_MARV_EXT_CTRL,
+ reg | PHY_M_10B_TE_ENABLE);
+ }
+ }
+
+ /* Enable phy interrupt on auto-negotiation complete (or link up) */
+ if (sky2->flags & SKY2_FLAG_AUTO_SPEED)
+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
+ else
+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
+}
+
+static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
+static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA };
+
+static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
+{
+ u32 reg1;
+
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
+ reg1 &= ~phy_power[port];
+
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
+ reg1 |= coma_mode[port];
+
+ sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+ sky2_pci_read32(hw, PCI_DEV_REG1);
+
+ if (hw->chip_id == CHIP_ID_YUKON_FE)
+ gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE);
+ else if (hw->flags & SKY2_HW_ADV_POWER_CTL)
+ sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
+}
+
+static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
+{
+ u32 reg1;
+ u16 ctrl;
+
+ /* release GPHY Control reset */
+ sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
+
+ /* release GMAC reset */
+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
+
+ if (hw->flags & SKY2_HW_NEWER_PHY) {
+ /* select page 2 to access MAC control register */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
+
+ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+ /* allow GMII Power Down */
+ ctrl &= ~PHY_M_MAC_GMIF_PUP;
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+ /* set page register back to 0 */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
+ }
+
+ /* setup General Purpose Control Register */
+ gma_write16(hw, port, GM_GP_CTRL,
+ GM_GPCR_FL_PASS | GM_GPCR_SPEED_100 |
+ GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS |
+ GM_GPCR_AU_SPD_DIS);
+
+ if (hw->chip_id != CHIP_ID_YUKON_EC) {
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
+ /* select page 2 to access MAC control register */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
+
+ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+ /* enable Power Down */
+ ctrl |= PHY_M_PC_POW_D_ENA;
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+ /* set page register back to 0 */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
+ }
+
+ /* set IEEE compatible Power Down Mode (dev. #4.99) */
+ gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN);
+ }
+
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
+ reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */
+ sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+}
+
+/* configure IPG according to used link speed */
+static void sky2_set_ipg(struct sky2_port *sky2)
+{
+ u16 reg;
+
+ reg = gma_read16(sky2->hw, sky2->port, GM_SERIAL_MODE);
+ reg &= ~GM_SMOD_IPG_MSK;
+ if (sky2->speed > SPEED_100)
+ reg |= IPG_DATA_VAL(IPG_DATA_DEF_1000);
+ else
+ reg |= IPG_DATA_VAL(IPG_DATA_DEF_10_100);
+ gma_write16(sky2->hw, sky2->port, GM_SERIAL_MODE, reg);
+}
+
+/* Enable Rx/Tx */
+static void sky2_enable_rx_tx(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ u16 reg;
+
+ reg = gma_read16(hw, port, GM_GP_CTRL);
+ reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
+ gma_write16(hw, port, GM_GP_CTRL, reg);
+}
+
+/* Force a renegotiation */
+static void sky2_phy_reinit(struct sky2_port *sky2)
+{
+ spin_lock_bh(&sky2->phy_lock);
+ sky2_phy_init(sky2->hw, sky2->port);
+ sky2_enable_rx_tx(sky2);
+ spin_unlock_bh(&sky2->phy_lock);
+}
+
+/* Put device in state to listen for Wake On Lan */
+static void sky2_wol_init(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ enum flow_control save_mode;
+ u16 ctrl;
+
+ /* Bring hardware out of reset */
+ sky2_write16(hw, B0_CTST, CS_RST_CLR);
+ sky2_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
+
+ sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
+
+ /* Force to 10/100
+ * sky2_reset will re-enable on resume
+ */
+ save_mode = sky2->flow_mode;
+ ctrl = sky2->advertising;
+
+ sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
+ sky2->flow_mode = FC_NONE;
+
+ spin_lock_bh(&sky2->phy_lock);
+ sky2_phy_power_up(hw, port);
+ sky2_phy_init(hw, port);
+ spin_unlock_bh(&sky2->phy_lock);
+
+ sky2->flow_mode = save_mode;
+ sky2->advertising = ctrl;
+
+ /* Set GMAC to no flow control and auto update for speed/duplex */
+ gma_write16(hw, port, GM_GP_CTRL,
+ GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
+ GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
+
+ /* Set WOL address */
+ memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
+ sky2->netdev->dev_addr, ETH_ALEN);
+
+ /* Turn on appropriate WOL control bits */
+ sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
+ ctrl = 0;
+ if (sky2->wol & WAKE_PHY)
+ ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
+ else
+ ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
+
+ if (sky2->wol & WAKE_MAGIC)
+ ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
+ else
+ ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;
+
+ ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
+ sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
+
+ /* Disable PiG firmware */
+ sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF);
+
+ /* block receiver */
+ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
+}
+
+static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
+{
+ struct net_device *dev = hw->dev[port];
+
+ if ( (hw->chip_id == CHIP_ID_YUKON_EX &&
+ hw->chip_rev != CHIP_REV_YU_EX_A0) ||
+ hw->chip_id >= CHIP_ID_YUKON_FE_P) {
+ /* Yukon-Extreme B0 and further Extreme devices */
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
+ } else if (dev->mtu > ETH_DATA_LEN) {
+ /* set Tx GMAC FIFO Almost Empty Threshold */
+ sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
+ (ECU_JUMBO_WM << 16) | ECU_AE_THR);
+
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
+ } else
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
+}
+
+static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
+{
+ struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
+ u16 reg;
+ u32 rx_reg;
+ int i;
+ const u8 *addr = hw->dev[port]->dev_addr;
+
+ sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
+ sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
+
+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
+
+ if (hw->chip_id == CHIP_ID_YUKON_XL &&
+ hw->chip_rev == CHIP_REV_YU_XL_A0 &&
+ port == 1) {
+ /* WA DEV_472 -- looks like crossed wires on port 2 */
+ /* clear GMAC 1 Control reset */
+ sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
+ do {
+ sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET);
+ sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR);
+ } while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL ||
+ gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 ||
+ gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0);
+ }
+
+ sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
+
+ /* Enable Transmit FIFO Underrun */
+ sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
+
+ spin_lock_bh(&sky2->phy_lock);
+ sky2_phy_power_up(hw, port);
+ sky2_phy_init(hw, port);
+ spin_unlock_bh(&sky2->phy_lock);
+
+ /* MIB clear */
+ reg = gma_read16(hw, port, GM_PHY_ADDR);
+ gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
+
+ for (i = GM_MIB_CNT_BASE; i <= GM_MIB_CNT_END; i += 4)
+ gma_read16(hw, port, i);
+ gma_write16(hw, port, GM_PHY_ADDR, reg);
+
+ /* transmit control */
+ gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
+
+ /* receive control reg: unicast + multicast + no FCS */
+ gma_write16(hw, port, GM_RX_CTRL,
+ GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
+
+ /* transmit flow control */
+ gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
+
+ /* transmit parameter */
+ gma_write16(hw, port, GM_TX_PARAM,
+ TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
+ TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
+ TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) |
+ TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
+
+ /* serial mode register */
+ reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
+ GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF_1000);
+
+ if (hw->dev[port]->mtu > ETH_DATA_LEN)
+ reg |= GM_SMOD_JUMBO_ENA;
+
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
+ hw->chip_rev == CHIP_REV_YU_EC_U_B1)
+ reg |= GM_NEW_FLOW_CTRL;
+
+ gma_write16(hw, port, GM_SERIAL_MODE, reg);
+
+ /* virtual address for data */
+ gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
+
+ /* physical address: used for pause frames */
+ gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
+
+ /* ignore counter overflows */
+ gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
+ gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
+ gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
+
+ /* Configure Rx MAC FIFO */
+ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
+ rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
+ if (hw->chip_id == CHIP_ID_YUKON_EX ||
+ hw->chip_id == CHIP_ID_YUKON_FE_P)
+ rx_reg |= GMF_RX_OVER_ON;
+
+ sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg);
+
+ if (hw->chip_id == CHIP_ID_YUKON_XL) {
+ /* Hardware errata - clear flush mask */
+ sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), 0);
+ } else {
+ /* Flush Rx MAC FIFO on any flow control or error */
+ sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
+ }
+
+ /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug */
+ reg = RX_GMF_FL_THR_DEF + 1;
+ /* Another magic mystery workaround from sk98lin */
+ if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+ hw->chip_rev == CHIP_REV_YU_FE2_A0)
+ reg = 0x178;
+ sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), reg);
+
+ /* Configure Tx MAC FIFO */
+ sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
+ sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
+
+ /* On chips without ram buffer, pause is controlled by MAC level */
+ if (!(hw->flags & SKY2_HW_RAM_BUFFER)) {
+ /* Pause threshold is scaled by 8 in bytes */
+ if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+ hw->chip_rev == CHIP_REV_YU_FE2_A0)
+ reg = 1568 / 8;
+ else
+ reg = 1024 / 8;
+ sky2_write16(hw, SK_REG(port, RX_GMF_UP_THR), reg);
+ sky2_write16(hw, SK_REG(port, RX_GMF_LP_THR), 768 / 8);
+
+ sky2_set_tx_stfwd(hw, port);
+ }
+
+ if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+ hw->chip_rev == CHIP_REV_YU_FE2_A0) {
+ /* disable dynamic watermark */
+ reg = sky2_read16(hw, SK_REG(port, TX_GMF_EA));
+ reg &= ~TX_DYN_WM_ENA;
+ sky2_write16(hw, SK_REG(port, TX_GMF_EA), reg);
+ }
+}
+
+/* Assign Ram Buffer allocation to queue */
+static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
+{
+ u32 end;
+
+ /* convert from K bytes to qwords used for hw register */
+ start *= 1024/8;
+ space *= 1024/8;
+ end = start + space - 1;
+
+ sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
+ sky2_write32(hw, RB_ADDR(q, RB_START), start);
+ sky2_write32(hw, RB_ADDR(q, RB_END), end);
+ sky2_write32(hw, RB_ADDR(q, RB_WP), start);
+ sky2_write32(hw, RB_ADDR(q, RB_RP), start);
+
+ if (q == Q_R1 || q == Q_R2) {
+ u32 tp = space - space/4;
+
+ /* On receive queue's set the thresholds
+ * give receiver priority when > 3/4 full
+ * send pause when down to 2K
+ */
+ sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
+ sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
+
+ tp = space - 2048/8;
+ sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
+ sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
+ } else {
+ /* Enable store & forward on Tx queue's because
+ * Tx FIFO is only 1K on Yukon
+ */
+ sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
+ }
+
+ sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
+ sky2_read8(hw, RB_ADDR(q, RB_CTRL));
+}
+
+/* Setup Bus Memory Interface */
+static void sky2_qset(struct sky2_hw *hw, u16 q)
+{
+ sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET);
+ sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT);
+ sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON);
+ sky2_write32(hw, Q_ADDR(q, Q_WM), BMU_WM_DEFAULT);
+}
+
+/* Setup prefetch unit registers. This is the interface between
+ * hardware and driver list elements
+ */
+static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
+ dma_addr_t addr, u32 last)
+{
+ sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
+ sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR);
+ sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), upper_32_bits(addr));
+ sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), lower_32_bits(addr));
+ sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last);
+ sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON);
+
+ sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL));
+}
+
+static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot)
+{
+ struct sky2_tx_le *le = sky2->tx_le + *slot;
+
+ *slot = RING_NEXT(*slot, sky2->tx_ring_size);
+ le->ctrl = 0;
+ return le;
+}
+
+static void tx_init(struct sky2_port *sky2)
+{
+ struct sky2_tx_le *le;
+
+ sky2->tx_prod = sky2->tx_cons = 0;
+ sky2->tx_tcpsum = 0;
+ sky2->tx_last_mss = 0;
+
+ le = get_tx_le(sky2, &sky2->tx_prod);
+ le->addr = 0;
+ le->opcode = OP_ADDR64 | HW_OWNER;
+ sky2->tx_last_upper = 0;
+}
+
+/* Update chip's next pointer */
+static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
+{
+ /* Make sure write' to descriptors are complete before we tell hardware */
+ wmb();
+ sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
+
+ /* Synchronize I/O on since next processor may write to tail */
+ mmiowb();
+}
+
+
+static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
+{
+ struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
+ sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE);
+ le->ctrl = 0;
+ return le;
+}
+
+static unsigned sky2_get_rx_threshold(struct sky2_port *sky2)
+{
+ unsigned size;
+
+ /* Space needed for frame data + headers rounded up */
+ size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
+
+ /* Stopping point for hardware truncation */
+ return (size - 8) / sizeof(u32);
+}
+
+static unsigned sky2_get_rx_data_size(struct sky2_port *sky2)
+{
+ struct rx_ring_info *re;
+ unsigned size;
+
+ /* Space needed for frame data + headers rounded up */
+ size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
+
+ sky2->rx_nfrags = size >> PAGE_SHIFT;
+ BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));
+
+ /* Compute residue after pages */
+ size -= sky2->rx_nfrags << PAGE_SHIFT;
+
+ /* Optimize to handle small packets and headers */
+ if (size < copybreak)
+ size = copybreak;
+ if (size < ETH_HLEN)
+ size = ETH_HLEN;
+
+ return size;
+}
+
+/* Build description to hardware for one receive segment */
+static void sky2_rx_add(struct sky2_port *sky2, u8 op,
+ dma_addr_t map, unsigned len)
+{
+ struct sky2_rx_le *le;
+
+ if (sizeof(dma_addr_t) > sizeof(u32)) {
+ le = sky2_next_rx(sky2);
+ le->addr = cpu_to_le32(upper_32_bits(map));
+ le->opcode = OP_ADDR64 | HW_OWNER;
+ }
+
+ le = sky2_next_rx(sky2);
+ le->addr = cpu_to_le32(lower_32_bits(map));
+ le->length = cpu_to_le16(len);
+ le->opcode = op | HW_OWNER;
+}
+
+/* Build description to hardware for one possibly fragmented skb */
+static void sky2_rx_submit(struct sky2_port *sky2,
+ const struct rx_ring_info *re)
+{
+ int i;
+
+ sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size);
+
+ for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++)
+ sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE);
+}
+
+
+static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
+ unsigned size)
+{
+ struct sk_buff *skb = re->skb;
+ int i;
+
+ re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(pdev, re->data_addr))
+ goto mapping_error;
+
+ dma_unmap_len_set(re, data_size, size);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ re->frag_addr[i] = skb_frag_dma_map(&pdev->dev, frag, 0,
+ frag->size,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(&pdev->dev, re->frag_addr[i]))
+ goto map_page_error;
+ }
+ return 0;
+
+map_page_error:
+ while (--i >= 0) {
+ pci_unmap_page(pdev, re->frag_addr[i],
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_FROMDEVICE);
+ }
+
+ pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
+ PCI_DMA_FROMDEVICE);
+
+mapping_error:
+ if (net_ratelimit())
+ dev_warn(&pdev->dev, "%s: rx mapping error\n",
+ skb->dev->name);
+ return -EIO;
+}
+
+static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
+{
+ struct sk_buff *skb = re->skb;
+ int i;
+
+ pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
+ PCI_DMA_FROMDEVICE);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ pci_unmap_page(pdev, re->frag_addr[i],
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_FROMDEVICE);
+}
+
+/* Tell chip where to start receive checksum.
+ * Actually has two checksums, but set both same to avoid possible byte
+ * order problems.
+ */
+static void rx_set_checksum(struct sky2_port *sky2)
+{
+ struct sky2_rx_le *le = sky2_next_rx(sky2);
+
+ le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN);
+ le->ctrl = 0;
+ le->opcode = OP_TCPSTART | HW_OWNER;
+
+ sky2_write32(sky2->hw,
+ Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ (sky2->netdev->features & NETIF_F_RXCSUM)
+ ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+}
+
+/* Enable/disable receive hash calculation (RSS) */
+static void rx_set_rss(struct net_device *dev, u32 features)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ int i, nkeys = 4;
+
+ /* Supports IPv6 and other modes */
+ if (hw->flags & SKY2_HW_NEW_LE) {
+ nkeys = 10;
+ sky2_write32(hw, SK_REG(sky2->port, RSS_CFG), HASH_ALL);
+ }
+
+ /* Program RSS initial values */
+ if (features & NETIF_F_RXHASH) {
+ u32 key[nkeys];
+
+ get_random_bytes(key, nkeys * sizeof(u32));
+ for (i = 0; i < nkeys; i++)
+ sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4),
+ key[i]);
+
+ /* Need to turn on (undocumented) flag to make hashing work */
+ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T),
+ RX_STFW_ENA);
+
+ sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ BMU_ENA_RX_RSS_HASH);
+ } else
+ sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ BMU_DIS_RX_RSS_HASH);
+}
+
+/*
+ * The RX Stop command will not work for Yukon-2 if the BMU does not
+ * reach the end of packet and since we can't make sure that we have
+ * incoming data, we must reset the BMU while it is not doing a DMA
+ * transfer. Since it is possible that the RX path is still active,
+ * the RX RAM buffer will be stopped first, so any possible incoming
+ * data will not trigger a DMA. After the RAM buffer is stopped, the
+ * BMU is polled until any DMA in progress is ended and only then it
+ * will be reset.
+ */
+static void sky2_rx_stop(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned rxq = rxqaddr[sky2->port];
+ int i;
+
+ /* disable the RAM Buffer receive queue */
+ sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);
+
+ for (i = 0; i < 0xffff; i++)
+ if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL))
+ == sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
+ goto stopped;
+
+ netdev_warn(sky2->netdev, "receiver stop failed\n");
+stopped:
+ sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
+
+ /* reset the Rx prefetch unit */
+ sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
+ mmiowb();
+}
+
+/* Clean out receive buffer area, assumes receiver hardware stopped */
+static void sky2_rx_clean(struct sky2_port *sky2)
+{
+ unsigned i;
+
+ memset(sky2->rx_le, 0, RX_LE_BYTES);
+ for (i = 0; i < sky2->rx_pending; i++) {
+ struct rx_ring_info *re = sky2->rx_ring + i;
+
+ if (re->skb) {
+ sky2_rx_unmap_skb(sky2->hw->pdev, re);
+ kfree_skb(re->skb);
+ re->skb = NULL;
+ }
+ }
+}
+
+/* Basic MII support */
+static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(ifr);
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ int err = -EOPNOTSUPP;
+
+ if (!netif_running(dev))
+ return -ENODEV; /* Phy still in reset */
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = PHY_ADDR_MARV;
+
+ /* fallthru */
+ case SIOCGMIIREG: {
+ u16 val = 0;
+
+ spin_lock_bh(&sky2->phy_lock);
+ err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val);
+ spin_unlock_bh(&sky2->phy_lock);
+
+ data->val_out = val;
+ break;
+ }
+
+ case SIOCSMIIREG:
+ spin_lock_bh(&sky2->phy_lock);
+ err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f,
+ data->val_in);
+ spin_unlock_bh(&sky2->phy_lock);
+ break;
+ }
+ return err;
+}
+
+#define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO)
+
+static void sky2_vlan_mode(struct net_device *dev, u32 features)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ u16 port = sky2->port;
+
+ if (features & NETIF_F_HW_VLAN_RX)
+ sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
+ RX_VLAN_STRIP_ON);
+ else
+ sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
+ RX_VLAN_STRIP_OFF);
+
+ if (features & NETIF_F_HW_VLAN_TX) {
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+ TX_VLAN_TAG_ON);
+
+ dev->vlan_features |= SKY2_VLAN_OFFLOADS;
+ } else {
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+ TX_VLAN_TAG_OFF);
+
+ /* Can't do transmit offload of vlan without hw vlan */
+ dev->vlan_features &= ~SKY2_VLAN_OFFLOADS;
+ }
+}
+
+/* Amount of required worst case padding in rx buffer */
+static inline unsigned sky2_rx_pad(const struct sky2_hw *hw)
+{
+ return (hw->flags & SKY2_HW_RAM_BUFFER) ? 8 : 2;
+}
+
+/*
+ * Allocate an skb for receiving. If the MTU is large enough
+ * make the skb non-linear with a fragment list of pages.
+ */
+static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2, gfp_t gfp)
+{
+ struct sk_buff *skb;
+ int i;
+
+ skb = __netdev_alloc_skb(sky2->netdev,
+ sky2->rx_data_size + sky2_rx_pad(sky2->hw),
+ gfp);
+ if (!skb)
+ goto nomem;
+
+ if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) {
+ unsigned char *start;
+ /*
+ * Workaround for a bug in FIFO that cause hang
+ * if the FIFO if the receive buffer is not 64 byte aligned.
+ * The buffer returned from netdev_alloc_skb is
+ * aligned except if slab debugging is enabled.
+ */
+ start = PTR_ALIGN(skb->data, 8);
+ skb_reserve(skb, start - skb->data);
+ } else
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ for (i = 0; i < sky2->rx_nfrags; i++) {
+ struct page *page = alloc_page(gfp);
+
+ if (!page)
+ goto free_partial;
+ skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
+ }
+
+ return skb;
+free_partial:
+ kfree_skb(skb);
+nomem:
+ return NULL;
+}
+
+static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
+{
+ sky2_put_idx(sky2->hw, rxq, sky2->rx_put);
+}
+
+static int sky2_alloc_rx_skbs(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned i;
+
+ sky2->rx_data_size = sky2_get_rx_data_size(sky2);
+
+ /* Fill Rx ring */
+ for (i = 0; i < sky2->rx_pending; i++) {
+ struct rx_ring_info *re = sky2->rx_ring + i;
+
+ re->skb = sky2_rx_alloc(sky2, GFP_KERNEL);
+ if (!re->skb)
+ return -ENOMEM;
+
+ if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) {
+ dev_kfree_skb(re->skb);
+ re->skb = NULL;
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Setup receiver buffer pool.
+ * Normal case this ends up creating one list element for skb
+ * in the receive ring. Worst case if using large MTU and each
+ * allocation falls on a different 64 bit region, that results
+ * in 6 list elements per ring entry.
+ * One element is used for checksum enable/disable, and one
+ * extra to avoid wrap.
+ */
+static void sky2_rx_start(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ struct rx_ring_info *re;
+ unsigned rxq = rxqaddr[sky2->port];
+ unsigned i, thresh;
+
+ sky2->rx_put = sky2->rx_next = 0;
+ sky2_qset(hw, rxq);
+
+ /* On PCI express lowering the watermark gives better performance */
+ if (pci_is_pcie(hw->pdev))
+ sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX);
+
+ /* These chips have no ram buffer?
+ * MAC Rx RAM Read is controlled by hardware */
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
+ hw->chip_rev > CHIP_REV_YU_EC_U_A0)
+ sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
+
+ sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
+
+ if (!(hw->flags & SKY2_HW_NEW_LE))
+ rx_set_checksum(sky2);
+
+ if (!(hw->flags & SKY2_HW_RSS_BROKEN))
+ rx_set_rss(sky2->netdev, sky2->netdev->features);
+
+ /* submit Rx ring */
+ for (i = 0; i < sky2->rx_pending; i++) {
+ re = sky2->rx_ring + i;
+ sky2_rx_submit(sky2, re);
+ }
+
+ /*
+ * The receiver hangs if it receives frames larger than the
+ * packet buffer. As a workaround, truncate oversize frames, but
+ * the register is limited to 9 bits, so if you do frames > 2052
+ * you better get the MTU right!
+ */
+ thresh = sky2_get_rx_threshold(sky2);
+ if (thresh > 0x1ff)
+ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
+ else {
+ sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh);
+ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
+ }
+
+ /* Tell chip about available buffers */
+ sky2_rx_update(sky2, rxq);
+
+ if (hw->chip_id == CHIP_ID_YUKON_EX ||
+ hw->chip_id == CHIP_ID_YUKON_SUPR) {
+ /*
+ * Disable flushing of non ASF packets;
+ * must be done after initializing the BMUs;
+ * drivers without ASF support should do this too, otherwise
+ * it may happen that they cannot run on ASF devices;
+ * remember that the MAC FIFO isn't reset during initialization.
+ */
+ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_MACSEC_FLUSH_OFF);
+ }
+
+ if (hw->chip_id >= CHIP_ID_YUKON_SUPR) {
+ /* Enable RX Home Address & Routing Header checksum fix */
+ sky2_write16(hw, SK_REG(sky2->port, RX_GMF_FL_CTRL),
+ RX_IPV6_SA_MOB_ENA | RX_IPV6_DA_MOB_ENA);
+
+ /* Enable TX Home Address & Routing Header checksum fix */
+ sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST),
+ TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN);
+ }
+}
+
+static int sky2_alloc_buffers(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+
+ /* must be power of 2 */
+ sky2->tx_le = pci_alloc_consistent(hw->pdev,
+ sky2->tx_ring_size *
+ sizeof(struct sky2_tx_le),
+ &sky2->tx_le_map);
+ if (!sky2->tx_le)
+ goto nomem;
+
+ sky2->tx_ring = kcalloc(sky2->tx_ring_size, sizeof(struct tx_ring_info),
+ GFP_KERNEL);
+ if (!sky2->tx_ring)
+ goto nomem;
+
+ sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES,
+ &sky2->rx_le_map);
+ if (!sky2->rx_le)
+ goto nomem;
+ memset(sky2->rx_le, 0, RX_LE_BYTES);
+
+ sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info),
+ GFP_KERNEL);
+ if (!sky2->rx_ring)
+ goto nomem;
+
+ return sky2_alloc_rx_skbs(sky2);
+nomem:
+ return -ENOMEM;
+}
+
+static void sky2_free_buffers(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+
+ sky2_rx_clean(sky2);
+
+ if (sky2->rx_le) {
+ pci_free_consistent(hw->pdev, RX_LE_BYTES,
+ sky2->rx_le, sky2->rx_le_map);
+ sky2->rx_le = NULL;
+ }
+ if (sky2->tx_le) {
+ pci_free_consistent(hw->pdev,
+ sky2->tx_ring_size * sizeof(struct sky2_tx_le),
+ sky2->tx_le, sky2->tx_le_map);
+ sky2->tx_le = NULL;
+ }
+ kfree(sky2->tx_ring);
+ kfree(sky2->rx_ring);
+
+ sky2->tx_ring = NULL;
+ sky2->rx_ring = NULL;
+}
+
+static void sky2_hw_up(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ u32 ramsize;
+ int cap;
+ struct net_device *otherdev = hw->dev[sky2->port^1];
+
+ tx_init(sky2);
+
+ /*
+ * On dual port PCI-X card, there is an problem where status
+ * can be received out of order due to split transactions
+ */
+ if (otherdev && netif_running(otherdev) &&
+ (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) {
+ u16 cmd;
+
+ cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
+ cmd &= ~PCI_X_CMD_MAX_SPLIT;
+ sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
+ }
+
+ sky2_mac_init(hw, port);
+
+ /* Register is number of 4K blocks on internal RAM buffer. */
+ ramsize = sky2_read8(hw, B2_E_0) * 4;
+ if (ramsize > 0) {
+ u32 rxspace;
+
+ netdev_dbg(sky2->netdev, "ram buffer %dK\n", ramsize);
+ if (ramsize < 16)
+ rxspace = ramsize / 2;
+ else
+ rxspace = 8 + (2*(ramsize - 16))/3;
+
+ sky2_ramset(hw, rxqaddr[port], 0, rxspace);
+ sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
+
+ /* Make sure SyncQ is disabled */
+ sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
+ RB_RST_SET);
+ }
+
+ sky2_qset(hw, txqaddr[port]);
+
+ /* This is copied from sk98lin 10.0.5.3; no one tells me about erratta's */
+ if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev == CHIP_REV_YU_EX_B0)
+ sky2_write32(hw, Q_ADDR(txqaddr[port], Q_TEST), F_TX_CHK_AUTO_OFF);
+
+ /* Set almost empty threshold */
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
+ hw->chip_rev == CHIP_REV_YU_EC_U_A0)
+ sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV);
+
+ sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
+ sky2->tx_ring_size - 1);
+
+ sky2_vlan_mode(sky2->netdev, sky2->netdev->features);
+ netdev_update_features(sky2->netdev);
+
+ sky2_rx_start(sky2);
+}
+
+/* Setup device IRQ and enable napi to process */
+static int sky2_setup_irq(struct sky2_hw *hw, const char *name)
+{
+ struct pci_dev *pdev = hw->pdev;
+ int err;
+
+ err = request_irq(pdev->irq, sky2_intr,
+ (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
+ name, hw);
+ if (err)
+ dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
+ else {
+ napi_enable(&hw->napi);
+ sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
+ sky2_read32(hw, B0_IMSK);
+ }
+
+ return err;
+}
+
+
+/* Bring up network interface. */
+static int sky2_up(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ u32 imask;
+ int err;
+
+ netif_carrier_off(dev);
+
+ err = sky2_alloc_buffers(sky2);
+ if (err)
+ goto err_out;
+
+ /* With single port, IRQ is setup when device is brought up */
+ if (hw->ports == 1 && (err = sky2_setup_irq(hw, dev->name)))
+ goto err_out;
+
+ sky2_hw_up(sky2);
+
+ /* Enable interrupts from phy/mac for port */
+ imask = sky2_read32(hw, B0_IMSK);
+ imask |= portirq_msk[port];
+ sky2_write32(hw, B0_IMSK, imask);
+ sky2_read32(hw, B0_IMSK);
+
+ netif_info(sky2, ifup, dev, "enabling interface\n");
+
+ return 0;
+
+err_out:
+ sky2_free_buffers(sky2);
+ return err;
+}
+
+/* Modular subtraction in ring */
+static inline int tx_inuse(const struct sky2_port *sky2)
+{
+ return (sky2->tx_prod - sky2->tx_cons) & (sky2->tx_ring_size - 1);
+}
+
+/* Number of list elements available for next tx */
+static inline int tx_avail(const struct sky2_port *sky2)
+{
+ return sky2->tx_pending - tx_inuse(sky2);
+}
+
+/* Estimate of number of transmit list elements required */
+static unsigned tx_le_req(const struct sk_buff *skb)
+{
+ unsigned count;
+
+ count = (skb_shinfo(skb)->nr_frags + 1)
+ * (sizeof(dma_addr_t) / sizeof(u32));
+
+ if (skb_is_gso(skb))
+ ++count;
+ else if (sizeof(dma_addr_t) == sizeof(u32))
+ ++count; /* possible vlan */
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ ++count;
+
+ return count;
+}
+
+static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
+{
+ if (re->flags & TX_MAP_SINGLE)
+ pci_unmap_single(pdev, dma_unmap_addr(re, mapaddr),
+ dma_unmap_len(re, maplen),
+ PCI_DMA_TODEVICE);
+ else if (re->flags & TX_MAP_PAGE)
+ pci_unmap_page(pdev, dma_unmap_addr(re, mapaddr),
+ dma_unmap_len(re, maplen),
+ PCI_DMA_TODEVICE);
+ re->flags = 0;
+}
+
+/*
+ * Put one packet in ring for transmit.
+ * A single packet can generate multiple list elements, and
+ * the number of ring elements will probably be less than the number
+ * of list elements used.
+ */
+static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ struct sky2_tx_le *le = NULL;
+ struct tx_ring_info *re;
+ unsigned i, len;
+ dma_addr_t mapping;
+ u32 upper;
+ u16 slot;
+ u16 mss;
+ u8 ctrl;
+
+ if (unlikely(tx_avail(sky2) < tx_le_req(skb)))
+ return NETDEV_TX_BUSY;
+
+ len = skb_headlen(skb);
+ mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
+
+ if (pci_dma_mapping_error(hw->pdev, mapping))
+ goto mapping_error;
+
+ slot = sky2->tx_prod;
+ netif_printk(sky2, tx_queued, KERN_DEBUG, dev,
+ "tx queued, slot %u, len %d\n", slot, skb->len);
+
+ /* Send high bits if needed */
+ upper = upper_32_bits(mapping);
+ if (upper != sky2->tx_last_upper) {
+ le = get_tx_le(sky2, &slot);
+ le->addr = cpu_to_le32(upper);
+ sky2->tx_last_upper = upper;
+ le->opcode = OP_ADDR64 | HW_OWNER;
+ }
+
+ /* Check for TCP Segmentation Offload */
+ mss = skb_shinfo(skb)->gso_size;
+ if (mss != 0) {
+
+ if (!(hw->flags & SKY2_HW_NEW_LE))
+ mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
+
+ if (mss != sky2->tx_last_mss) {
+ le = get_tx_le(sky2, &slot);
+ le->addr = cpu_to_le32(mss);
+
+ if (hw->flags & SKY2_HW_NEW_LE)
+ le->opcode = OP_MSS | HW_OWNER;
+ else
+ le->opcode = OP_LRGLEN | HW_OWNER;
+ sky2->tx_last_mss = mss;
+ }
+ }
+
+ ctrl = 0;
+
+ /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
+ if (vlan_tx_tag_present(skb)) {
+ if (!le) {
+ le = get_tx_le(sky2, &slot);
+ le->addr = 0;
+ le->opcode = OP_VLAN|HW_OWNER;
+ } else
+ le->opcode |= OP_VLAN;
+ le->length = cpu_to_be16(vlan_tx_tag_get(skb));
+ ctrl |= INS_VLAN;
+ }
+
+ /* Handle TCP checksum offload */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ /* On Yukon EX (some versions) encoding change. */
+ if (hw->flags & SKY2_HW_AUTO_TX_SUM)
+ ctrl |= CALSUM; /* auto checksum */
+ else {
+ const unsigned offset = skb_transport_offset(skb);
+ u32 tcpsum;
+
+ tcpsum = offset << 16; /* sum start */
+ tcpsum |= offset + skb->csum_offset; /* sum write */
+
+ ctrl |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
+ if (ip_hdr(skb)->protocol == IPPROTO_UDP)
+ ctrl |= UDPTCP;
+
+ if (tcpsum != sky2->tx_tcpsum) {
+ sky2->tx_tcpsum = tcpsum;
+
+ le = get_tx_le(sky2, &slot);
+ le->addr = cpu_to_le32(tcpsum);
+ le->length = 0; /* initial checksum value */
+ le->ctrl = 1; /* one packet */
+ le->opcode = OP_TCPLISW | HW_OWNER;
+ }
+ }
+ }
+
+ re = sky2->tx_ring + slot;
+ re->flags = TX_MAP_SINGLE;
+ dma_unmap_addr_set(re, mapaddr, mapping);
+ dma_unmap_len_set(re, maplen, len);
+
+ le = get_tx_le(sky2, &slot);
+ le->addr = cpu_to_le32(lower_32_bits(mapping));
+ le->length = cpu_to_le16(len);
+ le->ctrl = ctrl;
+ le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER);
+
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
+ frag->size, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(&hw->pdev->dev, mapping))
+ goto mapping_unwind;
+
+ upper = upper_32_bits(mapping);
+ if (upper != sky2->tx_last_upper) {
+ le = get_tx_le(sky2, &slot);
+ le->addr = cpu_to_le32(upper);
+ sky2->tx_last_upper = upper;
+ le->opcode = OP_ADDR64 | HW_OWNER;
+ }
+
+ re = sky2->tx_ring + slot;
+ re->flags = TX_MAP_PAGE;
+ dma_unmap_addr_set(re, mapaddr, mapping);
+ dma_unmap_len_set(re, maplen, frag->size);
+
+ le = get_tx_le(sky2, &slot);
+ le->addr = cpu_to_le32(lower_32_bits(mapping));
+ le->length = cpu_to_le16(frag->size);
+ le->ctrl = ctrl;
+ le->opcode = OP_BUFFER | HW_OWNER;
+ }
+
+ re->skb = skb;
+ le->ctrl |= EOP;
+
+ sky2->tx_prod = slot;
+
+ if (tx_avail(sky2) <= MAX_SKB_TX_LE)
+ netif_stop_queue(dev);
+
+ sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
+
+ return NETDEV_TX_OK;
+
+mapping_unwind:
+ for (i = sky2->tx_prod; i != slot; i = RING_NEXT(i, sky2->tx_ring_size)) {
+ re = sky2->tx_ring + i;
+
+ sky2_tx_unmap(hw->pdev, re);
+ }
+
+mapping_error:
+ if (net_ratelimit())
+ dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Free ring elements from starting at tx_cons until "done"
+ *
+ * NB:
+ * 1. The hardware will tell us about partial completion of multi-part
+ * buffers so make sure not to free skb to early.
+ * 2. This may run in parallel start_xmit because the it only
+ * looks at the tail of the queue of FIFO (tx_cons), not
+ * the head (tx_prod)
+ */
+static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
+{
+ struct net_device *dev = sky2->netdev;
+ unsigned idx;
+
+ BUG_ON(done >= sky2->tx_ring_size);
+
+ for (idx = sky2->tx_cons; idx != done;
+ idx = RING_NEXT(idx, sky2->tx_ring_size)) {
+ struct tx_ring_info *re = sky2->tx_ring + idx;
+ struct sk_buff *skb = re->skb;
+
+ sky2_tx_unmap(sky2->hw->pdev, re);
+
+ if (skb) {
+ netif_printk(sky2, tx_done, KERN_DEBUG, dev,
+ "tx done %u\n", idx);
+
+ u64_stats_update_begin(&sky2->tx_stats.syncp);
+ ++sky2->tx_stats.packets;
+ sky2->tx_stats.bytes += skb->len;
+ u64_stats_update_end(&sky2->tx_stats.syncp);
+
+ re->skb = NULL;
+ dev_kfree_skb_any(skb);
+
+ sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
+ }
+ }
+
+ sky2->tx_cons = idx;
+ smp_mb();
+}
+
+static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
+{
+ /* Disable Force Sync bit and Enable Alloc bit */
+ sky2_write8(hw, SK_REG(port, TXA_CTRL),
+ TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
+
+ /* Stop Interval Timer and Limit Counter of Tx Arbiter */
+ sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
+ sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
+
+ /* Reset the PCI FIFO of the async Tx queue */
+ sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR),
+ BMU_RST_SET | BMU_FIFO_RST);
+
+ /* Reset the Tx prefetch units */
+ sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL),
+ PREF_UNIT_RST_SET);
+
+ sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
+ sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
+}
+
+static void sky2_hw_down(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ u16 ctrl;
+
+ /* Force flow control off */
+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
+
+ /* Stop transmitter */
+ sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
+ sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR));
+
+ sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
+ RB_RST_SET | RB_DIS_OP_MD);
+
+ ctrl = gma_read16(hw, port, GM_GP_CTRL);
+ ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
+ gma_write16(hw, port, GM_GP_CTRL, ctrl);
+
+ sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
+
+ /* Workaround shared GMAC reset */
+ if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 &&
+ port == 0 && hw->dev[1] && netif_running(hw->dev[1])))
+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
+
+ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
+
+ /* Force any delayed status interrrupt and NAPI */
+ sky2_write32(hw, STAT_LEV_TIMER_CNT, 0);
+ sky2_write32(hw, STAT_TX_TIMER_CNT, 0);
+ sky2_write32(hw, STAT_ISR_TIMER_CNT, 0);
+ sky2_read8(hw, STAT_ISR_TIMER_CTRL);
+
+ sky2_rx_stop(sky2);
+
+ spin_lock_bh(&sky2->phy_lock);
+ sky2_phy_power_down(hw, port);
+ spin_unlock_bh(&sky2->phy_lock);
+
+ sky2_tx_reset(hw, port);
+
+ /* Free any pending frames stuck in HW queue */
+ sky2_tx_complete(sky2, sky2->tx_prod);
+}
+
+/* Network shutdown */
+static int sky2_down(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+
+ /* Never really got started! */
+ if (!sky2->tx_le)
+ return 0;
+
+ netif_info(sky2, ifdown, dev, "disabling interface\n");
+
+ /* Disable port IRQ */
+ sky2_write32(hw, B0_IMSK,
+ sky2_read32(hw, B0_IMSK) & ~portirq_msk[sky2->port]);
+ sky2_read32(hw, B0_IMSK);
+
+ if (hw->ports == 1) {
+ napi_disable(&hw->napi);
+ free_irq(hw->pdev->irq, hw);
+ } else {
+ synchronize_irq(hw->pdev->irq);
+ napi_synchronize(&hw->napi);
+ }
+
+ sky2_hw_down(sky2);
+
+ sky2_free_buffers(sky2);
+
+ return 0;
+}
+
+static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
+{
+ if (hw->flags & SKY2_HW_FIBRE_PHY)
+ return SPEED_1000;
+
+ if (!(hw->flags & SKY2_HW_GIGABIT)) {
+ if (aux & PHY_M_PS_SPEED_100)
+ return SPEED_100;
+ else
+ return SPEED_10;
+ }
+
+ switch (aux & PHY_M_PS_SPEED_MSK) {
+ case PHY_M_PS_SPEED_1000:
+ return SPEED_1000;
+ case PHY_M_PS_SPEED_100:
+ return SPEED_100;
+ default:
+ return SPEED_10;
+ }
+}
+
+static void sky2_link_up(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ static const char *fc_name[] = {
+ [FC_NONE] = "none",
+ [FC_TX] = "tx",
+ [FC_RX] = "rx",
+ [FC_BOTH] = "both",
+ };
+
+ sky2_set_ipg(sky2);
+
+ sky2_enable_rx_tx(sky2);
+
+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
+
+ netif_carrier_on(sky2->netdev);
+
+ mod_timer(&hw->watchdog_timer, jiffies + 1);
+
+ /* Turn on link LED */
+ sky2_write8(hw, SK_REG(port, LNK_LED_REG),
+ LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
+
+ netif_info(sky2, link, sky2->netdev,
+ "Link is up at %d Mbps, %s duplex, flow control %s\n",
+ sky2->speed,
+ sky2->duplex == DUPLEX_FULL ? "full" : "half",
+ fc_name[sky2->flow_status]);
+}
+
+static void sky2_link_down(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ u16 reg;
+
+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
+
+ reg = gma_read16(hw, port, GM_GP_CTRL);
+ reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
+ gma_write16(hw, port, GM_GP_CTRL, reg);
+
+ netif_carrier_off(sky2->netdev);
+
+ /* Turn off link LED */
+ sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
+
+ netif_info(sky2, link, sky2->netdev, "Link is down\n");
+
+ sky2_phy_init(hw, port);
+}
+
+static enum flow_control sky2_flow(int rx, int tx)
+{
+ if (rx)
+ return tx ? FC_BOTH : FC_RX;
+ else
+ return tx ? FC_TX : FC_NONE;
+}
+
+static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ u16 advert, lpa;
+
+ advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
+ lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
+ if (lpa & PHY_M_AN_RF) {
+ netdev_err(sky2->netdev, "remote fault\n");
+ return -1;
+ }
+
+ if (!(aux & PHY_M_PS_SPDUP_RES)) {
+ netdev_err(sky2->netdev, "speed/duplex mismatch\n");
+ return -1;
+ }
+
+ sky2->speed = sky2_phy_speed(hw, aux);
+ sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
+
+ /* Since the pause result bits seem to in different positions on
+ * different chips. look at registers.
+ */
+ if (hw->flags & SKY2_HW_FIBRE_PHY) {
+ /* Shift for bits in fiber PHY */
+ advert &= ~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM);
+ lpa &= ~(LPA_PAUSE_CAP|LPA_PAUSE_ASYM);
+
+ if (advert & ADVERTISE_1000XPAUSE)
+ advert |= ADVERTISE_PAUSE_CAP;
+ if (advert & ADVERTISE_1000XPSE_ASYM)
+ advert |= ADVERTISE_PAUSE_ASYM;
+ if (lpa & LPA_1000XPAUSE)
+ lpa |= LPA_PAUSE_CAP;
+ if (lpa & LPA_1000XPAUSE_ASYM)
+ lpa |= LPA_PAUSE_ASYM;
+ }
+
+ sky2->flow_status = FC_NONE;
+ if (advert & ADVERTISE_PAUSE_CAP) {
+ if (lpa & LPA_PAUSE_CAP)
+ sky2->flow_status = FC_BOTH;
+ else if (advert & ADVERTISE_PAUSE_ASYM)
+ sky2->flow_status = FC_RX;
+ } else if (advert & ADVERTISE_PAUSE_ASYM) {
+ if ((lpa & LPA_PAUSE_CAP) && (lpa & LPA_PAUSE_ASYM))
+ sky2->flow_status = FC_TX;
+ }
+
+ if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000 &&
+ !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX))
+ sky2->flow_status = FC_NONE;
+
+ if (sky2->flow_status & FC_TX)
+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
+ else
+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
+
+ return 0;
+}
+
+/* Interrupt from PHY */
+static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
+{
+ struct net_device *dev = hw->dev[port];
+ struct sky2_port *sky2 = netdev_priv(dev);
+ u16 istatus, phystat;
+
+ if (!netif_running(dev))
+ return;
+
+ spin_lock(&sky2->phy_lock);
+ istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
+ phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
+
+ netif_info(sky2, intr, sky2->netdev, "phy interrupt status 0x%x 0x%x\n",
+ istatus, phystat);
+
+ if (istatus & PHY_M_IS_AN_COMPL) {
+ if (sky2_autoneg_done(sky2, phystat) == 0 &&
+ !netif_carrier_ok(dev))
+ sky2_link_up(sky2);
+ goto out;
+ }
+
+ if (istatus & PHY_M_IS_LSP_CHANGE)
+ sky2->speed = sky2_phy_speed(hw, phystat);
+
+ if (istatus & PHY_M_IS_DUP_CHANGE)
+ sky2->duplex =
+ (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
+
+ if (istatus & PHY_M_IS_LST_CHANGE) {
+ if (phystat & PHY_M_PS_LINK_UP)
+ sky2_link_up(sky2);
+ else
+ sky2_link_down(sky2);
+ }
+out:
+ spin_unlock(&sky2->phy_lock);
+}
+
+/* Special quick link interrupt (Yukon-2 Optima only) */
+static void sky2_qlink_intr(struct sky2_hw *hw)
+{
+ struct sky2_port *sky2 = netdev_priv(hw->dev[0]);
+ u32 imask;
+ u16 phy;
+
+ /* disable irq */
+ imask = sky2_read32(hw, B0_IMSK);
+ imask &= ~Y2_IS_PHY_QLNK;
+ sky2_write32(hw, B0_IMSK, imask);
+
+ /* reset PHY Link Detect */
+ phy = sky2_pci_read16(hw, PSM_CONFIG_REG4);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+ sky2_link_up(sky2);
+}
+
+/* Transmit timeout is only called if we are running, carrier is up
+ * and tx queue is full (stopped).
+ */
+static void sky2_tx_timeout(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+
+ netif_err(sky2, timer, dev, "tx timeout\n");
+
+ netdev_printk(KERN_DEBUG, dev, "transmit ring %u .. %u report=%u done=%u\n",
+ sky2->tx_cons, sky2->tx_prod,
+ sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
+ sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE)));
+
+ /* can't restart safely under softirq */
+ schedule_work(&hw->restart_work);
+}
+
+static int sky2_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ int err;
+ u16 ctl, mode;
+ u32 imask;
+
+ /* MTU size outside the spec */
+ if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
+ return -EINVAL;
+
+ /* MTU > 1500 on yukon FE and FE+ not allowed */
+ if (new_mtu > ETH_DATA_LEN &&
+ (hw->chip_id == CHIP_ID_YUKON_FE ||
+ hw->chip_id == CHIP_ID_YUKON_FE_P))
+ return -EINVAL;
+
+ if (!netif_running(dev)) {
+ dev->mtu = new_mtu;
+ netdev_update_features(dev);
+ return 0;
+ }
+
+ imask = sky2_read32(hw, B0_IMSK);
+ sky2_write32(hw, B0_IMSK, 0);
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ napi_disable(&hw->napi);
+ netif_tx_disable(dev);
+
+ synchronize_irq(hw->pdev->irq);
+
+ if (!(hw->flags & SKY2_HW_RAM_BUFFER))
+ sky2_set_tx_stfwd(hw, port);
+
+ ctl = gma_read16(hw, port, GM_GP_CTRL);
+ gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
+ sky2_rx_stop(sky2);
+ sky2_rx_clean(sky2);
+
+ dev->mtu = new_mtu;
+ netdev_update_features(dev);
+
+ mode = DATA_BLIND_VAL(DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA;
+ if (sky2->speed > SPEED_100)
+ mode |= IPG_DATA_VAL(IPG_DATA_DEF_1000);
+ else
+ mode |= IPG_DATA_VAL(IPG_DATA_DEF_10_100);
+
+ if (dev->mtu > ETH_DATA_LEN)
+ mode |= GM_SMOD_JUMBO_ENA;
+
+ gma_write16(hw, port, GM_SERIAL_MODE, mode);
+
+ sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD);
+
+ err = sky2_alloc_rx_skbs(sky2);
+ if (!err)
+ sky2_rx_start(sky2);
+ else
+ sky2_rx_clean(sky2);
+ sky2_write32(hw, B0_IMSK, imask);
+
+ sky2_read32(hw, B0_Y2_SP_LISR);
+ napi_enable(&hw->napi);
+
+ if (err)
+ dev_close(dev);
+ else {
+ gma_write16(hw, port, GM_GP_CTRL, ctl);
+
+ netif_wake_queue(dev);
+ }
+
+ return err;
+}
+
+/* For small just reuse existing skb for next receive */
+static struct sk_buff *receive_copy(struct sky2_port *sky2,
+ const struct rx_ring_info *re,
+ unsigned length)
+{
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(sky2->netdev, length);
+ if (likely(skb)) {
+ pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
+ length, PCI_DMA_FROMDEVICE);
+ skb_copy_from_linear_data(re->skb, skb->data, length);
+ skb->ip_summed = re->skb->ip_summed;
+ skb->csum = re->skb->csum;
+ pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
+ length, PCI_DMA_FROMDEVICE);
+ re->skb->ip_summed = CHECKSUM_NONE;
+ skb_put(skb, length);
+ }
+ return skb;
+}
+
+/* Adjust length of skb with fragments to match received data */
+static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
+ unsigned int length)
+{
+ int i, num_frags;
+ unsigned int size;
+
+ /* put header into skb */
+ size = min(length, hdr_space);
+ skb->tail += size;
+ skb->len += size;
+ length -= size;
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ for (i = 0; i < num_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ if (length == 0) {
+ /* don't need this page */
+ __skb_frag_unref(frag);
+ --skb_shinfo(skb)->nr_frags;
+ } else {
+ size = min(length, (unsigned) PAGE_SIZE);
+
+ frag->size = size;
+ skb->data_len += size;
+ skb->truesize += size;
+ skb->len += size;
+ length -= size;
+ }
+ }
+}
+
+/* Normal packet - take skb from ring element and put in a new one */
+static struct sk_buff *receive_new(struct sky2_port *sky2,
+ struct rx_ring_info *re,
+ unsigned int length)
+{
+ struct sk_buff *skb;
+ struct rx_ring_info nre;
+ unsigned hdr_space = sky2->rx_data_size;
+
+ nre.skb = sky2_rx_alloc(sky2, GFP_ATOMIC);
+ if (unlikely(!nre.skb))
+ goto nobuf;
+
+ if (sky2_rx_map_skb(sky2->hw->pdev, &nre, hdr_space))
+ goto nomap;
+
+ skb = re->skb;
+ sky2_rx_unmap_skb(sky2->hw->pdev, re);
+ prefetch(skb->data);
+ *re = nre;
+
+ if (skb_shinfo(skb)->nr_frags)
+ skb_put_frags(skb, hdr_space, length);
+ else
+ skb_put(skb, length);
+ return skb;
+
+nomap:
+ dev_kfree_skb(nre.skb);
+nobuf:
+ return NULL;
+}
+
+/*
+ * Receive one packet.
+ * For larger packets, get new buffer.
+ */
+static struct sk_buff *sky2_receive(struct net_device *dev,
+ u16 length, u32 status)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next;
+ struct sk_buff *skb = NULL;
+ u16 count = (status & GMR_FS_LEN) >> 16;
+
+ if (status & GMR_FS_VLAN)
+ count -= VLAN_HLEN; /* Account for vlan tag */
+
+ netif_printk(sky2, rx_status, KERN_DEBUG, dev,
+ "rx slot %u status 0x%x len %d\n",
+ sky2->rx_next, status, length);
+
+ sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
+ prefetch(sky2->rx_ring + sky2->rx_next);
+
+ /* This chip has hardware problems that generates bogus status.
+ * So do only marginal checking and expect higher level protocols
+ * to handle crap frames.
+ */
+ if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
+ sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 &&
+ length != count)
+ goto okay;
+
+ if (status & GMR_FS_ANY_ERR)
+ goto error;
+
+ if (!(status & GMR_FS_RX_OK))
+ goto resubmit;
+
+ /* if length reported by DMA does not match PHY, packet was truncated */
+ if (length != count)
+ goto error;
+
+okay:
+ if (length < copybreak)
+ skb = receive_copy(sky2, re, length);
+ else
+ skb = receive_new(sky2, re, length);
+
+ dev->stats.rx_dropped += (skb == NULL);
+
+resubmit:
+ sky2_rx_submit(sky2, re);
+
+ return skb;
+
+error:
+ ++dev->stats.rx_errors;
+
+ if (net_ratelimit())
+ netif_info(sky2, rx_err, dev,
+ "rx error, status 0x%x length %d\n", status, length);
+
+ goto resubmit;
+}
+
+/* Transmit complete */
+static inline void sky2_tx_done(struct net_device *dev, u16 last)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (netif_running(dev)) {
+ sky2_tx_complete(sky2, last);
+
+ /* Wake unless it's detached, and called e.g. from sky2_down() */
+ if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
+ netif_wake_queue(dev);
+ }
+}
+
+static inline void sky2_skb_rx(const struct sky2_port *sky2,
+ u32 status, struct sk_buff *skb)
+{
+ if (status & GMR_FS_VLAN)
+ __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag));
+
+ if (skb->ip_summed == CHECKSUM_NONE)
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&sky2->hw->napi, skb);
+}
+
+static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port,
+ unsigned packets, unsigned bytes)
+{
+ struct net_device *dev = hw->dev[port];
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (packets == 0)
+ return;
+
+ u64_stats_update_begin(&sky2->rx_stats.syncp);
+ sky2->rx_stats.packets += packets;
+ sky2->rx_stats.bytes += bytes;
+ u64_stats_update_end(&sky2->rx_stats.syncp);
+
+ dev->last_rx = jiffies;
+ sky2_rx_update(netdev_priv(dev), rxqaddr[port]);
+}
+
+static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
+{
+ /* If this happens then driver assuming wrong format for chip type */
+ BUG_ON(sky2->hw->flags & SKY2_HW_NEW_LE);
+
+ /* Both checksum counters are programmed to start at
+ * the same offset, so unless there is a problem they
+ * should match. This failure is an early indication that
+ * hardware receive checksumming won't work.
+ */
+ if (likely((u16)(status >> 16) == (u16)status)) {
+ struct sk_buff *skb = sky2->rx_ring[sky2->rx_next].skb;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = le16_to_cpu(status);
+ } else {
+ dev_notice(&sky2->hw->pdev->dev,
+ "%s: receive checksum problem (status = %#x)\n",
+ sky2->netdev->name, status);
+
+ /* Disable checksum offload
+ * It will be reenabled on next ndo_set_features, but if it's
+ * really broken, will get disabled again
+ */
+ sky2->netdev->features &= ~NETIF_F_RXCSUM;
+ sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ BMU_DIS_RX_CHKSUM);
+ }
+}
+
+static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
+{
+ struct sk_buff *skb;
+
+ skb = sky2->rx_ring[sky2->rx_next].skb;
+ skb->rxhash = le32_to_cpu(status);
+}
+
+/* Process status response ring */
+static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
+{
+ int work_done = 0;
+ unsigned int total_bytes[2] = { 0 };
+ unsigned int total_packets[2] = { 0 };
+
+ rmb();
+ do {
+ struct sky2_port *sky2;
+ struct sky2_status_le *le = hw->st_le + hw->st_idx;
+ unsigned port;
+ struct net_device *dev;
+ struct sk_buff *skb;
+ u32 status;
+ u16 length;
+ u8 opcode = le->opcode;
+
+ if (!(opcode & HW_OWNER))
+ break;
+
+ hw->st_idx = RING_NEXT(hw->st_idx, hw->st_size);
+
+ port = le->css & CSS_LINK_BIT;
+ dev = hw->dev[port];
+ sky2 = netdev_priv(dev);
+ length = le16_to_cpu(le->length);
+ status = le32_to_cpu(le->status);
+
+ le->opcode = 0;
+ switch (opcode & ~HW_OWNER) {
+ case OP_RXSTAT:
+ total_packets[port]++;
+ total_bytes[port] += length;
+
+ skb = sky2_receive(dev, length, status);
+ if (!skb)
+ break;
+
+ /* This chip reports checksum status differently */
+ if (hw->flags & SKY2_HW_NEW_LE) {
+ if ((dev->features & NETIF_F_RXCSUM) &&
+ (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) &&
+ (le->css & CSS_TCPUDPCSOK))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ sky2_skb_rx(sky2, status, skb);
+
+ /* Stop after net poll weight */
+ if (++work_done >= to_do)
+ goto exit_loop;
+ break;
+
+ case OP_RXVLAN:
+ sky2->rx_tag = length;
+ break;
+
+ case OP_RXCHKSVLAN:
+ sky2->rx_tag = length;
+ /* fall through */
+ case OP_RXCHKS:
+ if (likely(dev->features & NETIF_F_RXCSUM))
+ sky2_rx_checksum(sky2, status);
+ break;
+
+ case OP_RSS_HASH:
+ sky2_rx_hash(sky2, status);
+ break;
+
+ case OP_TXINDEXLE:
+ /* TX index reports status for both ports */
+ sky2_tx_done(hw->dev[0], status & 0xfff);
+ if (hw->dev[1])
+ sky2_tx_done(hw->dev[1],
+ ((status >> 24) & 0xff)
+ | (u16)(length & 0xf) << 8);
+ break;
+
+ default:
+ if (net_ratelimit())
+ pr_warning("unknown status opcode 0x%x\n", opcode);
+ }
+ } while (hw->st_idx != idx);
+
+ /* Fully processed status ring so clear irq */
+ sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
+
+exit_loop:
+ sky2_rx_done(hw, 0, total_packets[0], total_bytes[0]);
+ sky2_rx_done(hw, 1, total_packets[1], total_bytes[1]);
+
+ return work_done;
+}
+
+static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
+{
+ struct net_device *dev = hw->dev[port];
+
+ if (net_ratelimit())
+ netdev_info(dev, "hw error interrupt status 0x%x\n", status);
+
+ if (status & Y2_IS_PAR_RD1) {
+ if (net_ratelimit())
+ netdev_err(dev, "ram data read parity error\n");
+ /* Clear IRQ */
+ sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
+ }
+
+ if (status & Y2_IS_PAR_WR1) {
+ if (net_ratelimit())
+ netdev_err(dev, "ram data write parity error\n");
+
+ sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
+ }
+
+ if (status & Y2_IS_PAR_MAC1) {
+ if (net_ratelimit())
+ netdev_err(dev, "MAC parity error\n");
+ sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
+ }
+
+ if (status & Y2_IS_PAR_RX1) {
+ if (net_ratelimit())
+ netdev_err(dev, "RX parity error\n");
+ sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
+ }
+
+ if (status & Y2_IS_TCP_TXA1) {
+ if (net_ratelimit())
+ netdev_err(dev, "TCP segmentation error\n");
+ sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
+ }
+}
+
+static void sky2_hw_intr(struct sky2_hw *hw)
+{
+ struct pci_dev *pdev = hw->pdev;
+ u32 status = sky2_read32(hw, B0_HWE_ISRC);
+ u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
+
+ status &= hwmsk;
+
+ if (status & Y2_IS_TIST_OV)
+ sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
+
+ if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
+ u16 pci_err;
+
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ pci_err = sky2_pci_read16(hw, PCI_STATUS);
+ if (net_ratelimit())
+ dev_err(&pdev->dev, "PCI hardware error (0x%x)\n",
+ pci_err);
+
+ sky2_pci_write16(hw, PCI_STATUS,
+ pci_err | PCI_STATUS_ERROR_BITS);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+ }
+
+ if (status & Y2_IS_PCI_EXP) {
+ /* PCI-Express uncorrectable Error occurred */
+ u32 err;
+
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
+ sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
+ 0xfffffffful);
+ if (net_ratelimit())
+ dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err);
+
+ sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+ }
+
+ if (status & Y2_HWE_L1_MASK)
+ sky2_hw_error(hw, 0, status);
+ status >>= 8;
+ if (status & Y2_HWE_L1_MASK)
+ sky2_hw_error(hw, 1, status);
+}
+
+static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
+{
+ struct net_device *dev = hw->dev[port];
+ struct sky2_port *sky2 = netdev_priv(dev);
+ u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
+
+ netif_info(sky2, intr, dev, "mac interrupt status 0x%x\n", status);
+
+ if (status & GM_IS_RX_CO_OV)
+ gma_read16(hw, port, GM_RX_IRQ_SRC);
+
+ if (status & GM_IS_TX_CO_OV)
+ gma_read16(hw, port, GM_TX_IRQ_SRC);
+
+ if (status & GM_IS_RX_FF_OR) {
+ ++dev->stats.rx_fifo_errors;
+ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
+ }
+
+ if (status & GM_IS_TX_FF_UR) {
+ ++dev->stats.tx_fifo_errors;
+ sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
+ }
+}
+
+/* This should never happen it is a bug. */
+static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q)
+{
+ struct net_device *dev = hw->dev[port];
+ u16 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
+
+ dev_err(&hw->pdev->dev, "%s: descriptor error q=%#x get=%u put=%u\n",
+ dev->name, (unsigned) q, (unsigned) idx,
+ (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX)));
+
+ sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK);
+}
+
+static int sky2_rx_hung(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ unsigned rxq = rxqaddr[port];
+ u32 mac_rp = sky2_read32(hw, SK_REG(port, RX_GMF_RP));
+ u8 mac_lev = sky2_read8(hw, SK_REG(port, RX_GMF_RLEV));
+ u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP));
+ u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL));
+
+ /* If idle and MAC or PCI is stuck */
+ if (sky2->check.last == dev->last_rx &&
+ ((mac_rp == sky2->check.mac_rp &&
+ mac_lev != 0 && mac_lev >= sky2->check.mac_lev) ||
+ /* Check if the PCI RX hang */
+ (fifo_rp == sky2->check.fifo_rp &&
+ fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) {
+ netdev_printk(KERN_DEBUG, dev,
+ "hung mac %d:%d fifo %d (%d:%d)\n",
+ mac_lev, mac_rp, fifo_lev,
+ fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP)));
+ return 1;
+ } else {
+ sky2->check.last = dev->last_rx;
+ sky2->check.mac_rp = mac_rp;
+ sky2->check.mac_lev = mac_lev;
+ sky2->check.fifo_rp = fifo_rp;
+ sky2->check.fifo_lev = fifo_lev;
+ return 0;
+ }
+}
+
+static void sky2_watchdog(unsigned long arg)
+{
+ struct sky2_hw *hw = (struct sky2_hw *) arg;
+
+ /* Check for lost IRQ once a second */
+ if (sky2_read32(hw, B0_ISRC)) {
+ napi_schedule(&hw->napi);
+ } else {
+ int i, active = 0;
+
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ if (!netif_running(dev))
+ continue;
+ ++active;
+
+ /* For chips with Rx FIFO, check if stuck */
+ if ((hw->flags & SKY2_HW_RAM_BUFFER) &&
+ sky2_rx_hung(dev)) {
+ netdev_info(dev, "receiver hang detected\n");
+ schedule_work(&hw->restart_work);
+ return;
+ }
+ }
+
+ if (active == 0)
+ return;
+ }
+
+ mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ));
+}
+
+/* Hardware/software error handling */
+static void sky2_err_intr(struct sky2_hw *hw, u32 status)
+{
+ if (net_ratelimit())
+ dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status);
+
+ if (status & Y2_IS_HW_ERR)
+ sky2_hw_intr(hw);
+
+ if (status & Y2_IS_IRQ_MAC1)
+ sky2_mac_intr(hw, 0);
+
+ if (status & Y2_IS_IRQ_MAC2)
+ sky2_mac_intr(hw, 1);
+
+ if (status & Y2_IS_CHK_RX1)
+ sky2_le_error(hw, 0, Q_R1);
+
+ if (status & Y2_IS_CHK_RX2)
+ sky2_le_error(hw, 1, Q_R2);
+
+ if (status & Y2_IS_CHK_TXA1)
+ sky2_le_error(hw, 0, Q_XA1);
+
+ if (status & Y2_IS_CHK_TXA2)
+ sky2_le_error(hw, 1, Q_XA2);
+}
+
+static int sky2_poll(struct napi_struct *napi, int work_limit)
+{
+ struct sky2_hw *hw = container_of(napi, struct sky2_hw, napi);
+ u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
+ int work_done = 0;
+ u16 idx;
+
+ if (unlikely(status & Y2_IS_ERROR))
+ sky2_err_intr(hw, status);
+
+ if (status & Y2_IS_IRQ_PHY1)
+ sky2_phy_intr(hw, 0);
+
+ if (status & Y2_IS_IRQ_PHY2)
+ sky2_phy_intr(hw, 1);
+
+ if (status & Y2_IS_PHY_QLNK)
+ sky2_qlink_intr(hw);
+
+ while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) {
+ work_done += sky2_status_intr(hw, work_limit - work_done, idx);
+
+ if (work_done >= work_limit)
+ goto done;
+ }
+
+ napi_complete(napi);
+ sky2_read32(hw, B0_Y2_SP_LISR);
+done:
+
+ return work_done;
+}
+
+static irqreturn_t sky2_intr(int irq, void *dev_id)
+{
+ struct sky2_hw *hw = dev_id;
+ u32 status;
+
+ /* Reading this mask interrupts as side effect */
+ status = sky2_read32(hw, B0_Y2_SP_ISRC2);
+ if (status == 0 || status == ~0)
+ return IRQ_NONE;
+
+ prefetch(&hw->st_le[hw->st_idx]);
+
+ napi_schedule(&hw->napi);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sky2_netpoll(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ napi_schedule(&sky2->hw->napi);
+}
+#endif
+
+/* Chip internal frequency for clock calculations */
+static u32 sky2_mhz(const struct sky2_hw *hw)
+{
+ switch (hw->chip_id) {
+ case CHIP_ID_YUKON_EC:
+ case CHIP_ID_YUKON_EC_U:
+ case CHIP_ID_YUKON_EX:
+ case CHIP_ID_YUKON_SUPR:
+ case CHIP_ID_YUKON_UL_2:
+ case CHIP_ID_YUKON_OPT:
+ case CHIP_ID_YUKON_PRM:
+ case CHIP_ID_YUKON_OP_2:
+ return 125;
+
+ case CHIP_ID_YUKON_FE:
+ return 100;
+
+ case CHIP_ID_YUKON_FE_P:
+ return 50;
+
+ case CHIP_ID_YUKON_XL:
+ return 156;
+
+ default:
+ BUG();
+ }
+}
+
+static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us)
+{
+ return sky2_mhz(hw) * us;
+}
+
+static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
+{
+ return clk / sky2_mhz(hw);
+}
+
+
+static int __devinit sky2_init(struct sky2_hw *hw)
+{
+ u8 t8;
+
+ /* Enable all clocks and check for bad PCI access */
+ sky2_pci_write32(hw, PCI_DEV_REG3, 0);
+
+ sky2_write8(hw, B0_CTST, CS_RST_CLR);
+
+ hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
+ hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
+
+ switch (hw->chip_id) {
+ case CHIP_ID_YUKON_XL:
+ hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
+ if (hw->chip_rev < CHIP_REV_YU_XL_A2)
+ hw->flags |= SKY2_HW_RSS_BROKEN;
+ break;
+
+ case CHIP_ID_YUKON_EC_U:
+ hw->flags = SKY2_HW_GIGABIT
+ | SKY2_HW_NEWER_PHY
+ | SKY2_HW_ADV_POWER_CTL;
+ break;
+
+ case CHIP_ID_YUKON_EX:
+ hw->flags = SKY2_HW_GIGABIT
+ | SKY2_HW_NEWER_PHY
+ | SKY2_HW_NEW_LE
+ | SKY2_HW_ADV_POWER_CTL
+ | SKY2_HW_RSS_CHKSUM;
+
+ /* New transmit checksum */
+ if (hw->chip_rev != CHIP_REV_YU_EX_B0)
+ hw->flags |= SKY2_HW_AUTO_TX_SUM;
+ break;
+
+ case CHIP_ID_YUKON_EC:
+ /* This rev is really old, and requires untested workarounds */
+ if (hw->chip_rev == CHIP_REV_YU_EC_A1) {
+ dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
+ return -EOPNOTSUPP;
+ }
+ hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RSS_BROKEN;
+ break;
+
+ case CHIP_ID_YUKON_FE:
+ hw->flags = SKY2_HW_RSS_BROKEN;
+ break;
+
+ case CHIP_ID_YUKON_FE_P:
+ hw->flags = SKY2_HW_NEWER_PHY
+ | SKY2_HW_NEW_LE
+ | SKY2_HW_AUTO_TX_SUM
+ | SKY2_HW_ADV_POWER_CTL;
+
+ /* The workaround for status conflicts VLAN tag detection. */
+ if (hw->chip_rev == CHIP_REV_YU_FE2_A0)
+ hw->flags |= SKY2_HW_VLAN_BROKEN | SKY2_HW_RSS_CHKSUM;
+ break;
+
+ case CHIP_ID_YUKON_SUPR:
+ hw->flags = SKY2_HW_GIGABIT
+ | SKY2_HW_NEWER_PHY
+ | SKY2_HW_NEW_LE
+ | SKY2_HW_AUTO_TX_SUM
+ | SKY2_HW_ADV_POWER_CTL;
+
+ if (hw->chip_rev == CHIP_REV_YU_SU_A0)
+ hw->flags |= SKY2_HW_RSS_CHKSUM;
+ break;
+
+ case CHIP_ID_YUKON_UL_2:
+ hw->flags = SKY2_HW_GIGABIT
+ | SKY2_HW_ADV_POWER_CTL;
+ break;
+
+ case CHIP_ID_YUKON_OPT:
+ case CHIP_ID_YUKON_PRM:
+ case CHIP_ID_YUKON_OP_2:
+ hw->flags = SKY2_HW_GIGABIT
+ | SKY2_HW_NEW_LE
+ | SKY2_HW_ADV_POWER_CTL;
+ break;
+
+ default:
+ dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
+ hw->chip_id);
+ return -EOPNOTSUPP;
+ }
+
+ hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
+ if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P')
+ hw->flags |= SKY2_HW_FIBRE_PHY;
+
+ hw->ports = 1;
+ t8 = sky2_read8(hw, B2_Y2_HW_RES);
+ if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
+ if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
+ ++hw->ports;
+ }
+
+ if (sky2_read8(hw, B2_E_0))
+ hw->flags |= SKY2_HW_RAM_BUFFER;
+
+ return 0;
+}
+
+static void sky2_reset(struct sky2_hw *hw)
+{
+ struct pci_dev *pdev = hw->pdev;
+ u16 status;
+ int i;
+ u32 hwe_mask = Y2_HWE_ALL_MASK;
+
+ /* disable ASF */
+ if (hw->chip_id == CHIP_ID_YUKON_EX
+ || hw->chip_id == CHIP_ID_YUKON_SUPR) {
+ sky2_write32(hw, CPU_WDOG, 0);
+ status = sky2_read16(hw, HCU_CCSR);
+ status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
+ HCU_CCSR_UC_STATE_MSK);
+ /*
+ * CPU clock divider shouldn't be used because
+ * - ASF firmware may malfunction
+ * - Yukon-Supreme: Parallel FLASH doesn't support divided clocks
+ */
+ status &= ~HCU_CCSR_CPU_CLK_DIVIDE_MSK;
+ sky2_write16(hw, HCU_CCSR, status);
+ sky2_write32(hw, CPU_WDOG, 0);
+ } else
+ sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
+ sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
+
+ /* do a SW reset */
+ sky2_write8(hw, B0_CTST, CS_RST_SET);
+ sky2_write8(hw, B0_CTST, CS_RST_CLR);
+
+ /* allow writes to PCI config */
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+
+ /* clear PCI errors, if any */
+ status = sky2_pci_read16(hw, PCI_STATUS);
+ status |= PCI_STATUS_ERROR_BITS;
+ sky2_pci_write16(hw, PCI_STATUS, status);
+
+ sky2_write8(hw, B0_CTST, CS_MRST_CLR);
+
+ if (pci_is_pcie(pdev)) {
+ sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
+ 0xfffffffful);
+
+ /* If error bit is stuck on ignore it */
+ if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP)
+ dev_info(&pdev->dev, "ignoring stuck error report bit\n");
+ else
+ hwe_mask |= Y2_IS_PCI_EXP;
+ }
+
+ sky2_power_on(hw);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+ for (i = 0; i < hw->ports; i++) {
+ sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
+ sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
+
+ if (hw->chip_id == CHIP_ID_YUKON_EX ||
+ hw->chip_id == CHIP_ID_YUKON_SUPR)
+ sky2_write16(hw, SK_REG(i, GMAC_CTRL),
+ GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON
+ | GMC_BYP_RETR_ON);
+
+ }
+
+ if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev > CHIP_REV_YU_SU_B0) {
+ /* enable MACSec clock gating */
+ sky2_pci_write32(hw, PCI_DEV_REG3, P_CLK_MACSEC_DIS);
+ }
+
+ if (hw->chip_id == CHIP_ID_YUKON_OPT ||
+ hw->chip_id == CHIP_ID_YUKON_PRM ||
+ hw->chip_id == CHIP_ID_YUKON_OP_2) {
+ u16 reg;
+ u32 msk;
+
+ if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
+ /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */
+ sky2_write32(hw, Y2_PEX_PHY_DATA, (0x80UL << 16) | (1 << 7));
+
+ /* set PHY Link Detect Timer to 1.1 second (11x 100ms) */
+ reg = 10;
+
+ /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
+ sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
+ } else {
+ /* set PHY Link Detect Timer to 0.4 second (4x 100ms) */
+ reg = 3;
+ }
+
+ reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE;
+ reg |= PSM_CONFIG_REG4_RST_PHY_LINK_DETECT;
+
+ /* reset PHY Link Detect */
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
+
+ /* enable PHY Quick Link */
+ msk = sky2_read32(hw, B0_IMSK);
+ msk |= Y2_IS_PHY_QLNK;
+ sky2_write32(hw, B0_IMSK, msk);
+
+ /* check if PSMv2 was running before */
+ reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
+ if (reg & PCI_EXP_LNKCTL_ASPMC)
+ /* restore the PCIe Link Control register */
+ sky2_pci_write16(hw, pdev->pcie_cap + PCI_EXP_LNKCTL,
+ reg);
+
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+ /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
+ sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
+ }
+
+ /* Clear I2C IRQ noise */
+ sky2_write32(hw, B2_I2C_IRQ, 1);
+
+ /* turn off hardware timer (unused) */
+ sky2_write8(hw, B2_TI_CTRL, TIM_STOP);
+ sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
+
+ /* Turn off descriptor polling */
+ sky2_write32(hw, B28_DPT_CTRL, DPT_STOP);
+
+ /* Turn off receive timestamp */
+ sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP);
+ sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
+
+ /* enable the Tx Arbiters */
+ for (i = 0; i < hw->ports; i++)
+ sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
+
+ /* Initialize ram interface */
+ for (i = 0; i < hw->ports; i++) {
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
+
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53);
+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
+ }
+
+ sky2_write32(hw, B0_HWE_IMSK, hwe_mask);
+
+ for (i = 0; i < hw->ports; i++)
+ sky2_gmac_reset(hw, i);
+
+ memset(hw->st_le, 0, hw->st_size * sizeof(struct sky2_status_le));
+ hw->st_idx = 0;
+
+ sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
+ sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR);
+
+ sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma);
+ sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
+
+ /* Set the list last index */
+ sky2_write16(hw, STAT_LAST_IDX, hw->st_size - 1);
+
+ sky2_write16(hw, STAT_TX_IDX_TH, 10);
+ sky2_write8(hw, STAT_FIFO_WM, 16);
+
+ /* set Status-FIFO ISR watermark */
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
+ sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
+ else
+ sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
+
+ sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
+ sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
+ sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
+
+ /* enable status unit */
+ sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
+
+ sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
+ sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
+ sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
+}
+
+/* Take device down (offline).
+ * Equivalent to doing dev_stop() but this does not
+ * inform upper layers of the transition.
+ */
+static void sky2_detach(struct net_device *dev)
+{
+ if (netif_running(dev)) {
+ netif_tx_lock(dev);
+ netif_device_detach(dev); /* stop txq */
+ netif_tx_unlock(dev);
+ sky2_down(dev);
+ }
+}
+
+/* Bring device back after doing sky2_detach */
+static int sky2_reattach(struct net_device *dev)
+{
+ int err = 0;
+
+ if (netif_running(dev)) {
+ err = sky2_up(dev);
+ if (err) {
+ netdev_info(dev, "could not restart %d\n", err);
+ dev_close(dev);
+ } else {
+ netif_device_attach(dev);
+ sky2_set_multicast(dev);
+ }
+ }
+
+ return err;
+}
+
+static void sky2_all_down(struct sky2_hw *hw)
+{
+ int i;
+
+ sky2_read32(hw, B0_IMSK);
+ sky2_write32(hw, B0_IMSK, 0);
+ synchronize_irq(hw->pdev->irq);
+ napi_disable(&hw->napi);
+
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ continue;
+
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+ sky2_hw_down(sky2);
+ }
+}
+
+static void sky2_all_up(struct sky2_hw *hw)
+{
+ u32 imask = Y2_IS_BASE;
+ int i;
+
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ continue;
+
+ sky2_hw_up(sky2);
+ sky2_set_multicast(dev);
+ imask |= portirq_msk[i];
+ netif_wake_queue(dev);
+ }
+
+ sky2_write32(hw, B0_IMSK, imask);
+ sky2_read32(hw, B0_IMSK);
+
+ sky2_read32(hw, B0_Y2_SP_LISR);
+ napi_enable(&hw->napi);
+}
+
+static void sky2_restart(struct work_struct *work)
+{
+ struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
+
+ rtnl_lock();
+
+ sky2_all_down(hw);
+ sky2_reset(hw);
+ sky2_all_up(hw);
+
+ rtnl_unlock();
+}
+
+static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
+{
+ return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
+}
+
+static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ const struct sky2_port *sky2 = netdev_priv(dev);
+
+ wol->supported = sky2_wol_supported(sky2->hw);
+ wol->wolopts = sky2->wol;
+}
+
+static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ bool enable_wakeup = false;
+ int i;
+
+ if ((wol->wolopts & ~sky2_wol_supported(sky2->hw)) ||
+ !device_can_wakeup(&hw->pdev->dev))
+ return -EOPNOTSUPP;
+
+ sky2->wol = wol->wolopts;
+
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (sky2->wol)
+ enable_wakeup = true;
+ }
+ device_set_wakeup_enable(&hw->pdev->dev, enable_wakeup);
+
+ return 0;
+}
+
+static u32 sky2_supported_modes(const struct sky2_hw *hw)
+{
+ if (sky2_is_copper(hw)) {
+ u32 modes = SUPPORTED_10baseT_Half
+ | SUPPORTED_10baseT_Full
+ | SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full;
+
+ if (hw->flags & SKY2_HW_GIGABIT)
+ modes |= SUPPORTED_1000baseT_Half
+ | SUPPORTED_1000baseT_Full;
+ return modes;
+ } else
+ return SUPPORTED_1000baseT_Half
+ | SUPPORTED_1000baseT_Full;
+}
+
+static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+
+ ecmd->transceiver = XCVR_INTERNAL;
+ ecmd->supported = sky2_supported_modes(hw);
+ ecmd->phy_address = PHY_ADDR_MARV;
+ if (sky2_is_copper(hw)) {
+ ecmd->port = PORT_TP;
+ ethtool_cmd_speed_set(ecmd, sky2->speed);
+ ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP;
+ } else {
+ ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ ecmd->port = PORT_FIBRE;
+ ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE;
+ }
+
+ ecmd->advertising = sky2->advertising;
+ ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED)
+ ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ ecmd->duplex = sky2->duplex;
+ return 0;
+}
+
+static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ const struct sky2_hw *hw = sky2->hw;
+ u32 supported = sky2_supported_modes(hw);
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if (ecmd->advertising & ~supported)
+ return -EINVAL;
+
+ if (sky2_is_copper(hw))
+ sky2->advertising = ecmd->advertising |
+ ADVERTISED_TP |
+ ADVERTISED_Autoneg;
+ else
+ sky2->advertising = ecmd->advertising |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg;
+
+ sky2->flags |= SKY2_FLAG_AUTO_SPEED;
+ sky2->duplex = -1;
+ sky2->speed = -1;
+ } else {
+ u32 setting;
+ u32 speed = ethtool_cmd_speed(ecmd);
+
+ switch (speed) {
+ case SPEED_1000:
+ if (ecmd->duplex == DUPLEX_FULL)
+ setting = SUPPORTED_1000baseT_Full;
+ else if (ecmd->duplex == DUPLEX_HALF)
+ setting = SUPPORTED_1000baseT_Half;
+ else
+ return -EINVAL;
+ break;
+ case SPEED_100:
+ if (ecmd->duplex == DUPLEX_FULL)
+ setting = SUPPORTED_100baseT_Full;
+ else if (ecmd->duplex == DUPLEX_HALF)
+ setting = SUPPORTED_100baseT_Half;
+ else
+ return -EINVAL;
+ break;
+
+ case SPEED_10:
+ if (ecmd->duplex == DUPLEX_FULL)
+ setting = SUPPORTED_10baseT_Full;
+ else if (ecmd->duplex == DUPLEX_HALF)
+ setting = SUPPORTED_10baseT_Half;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((setting & supported) == 0)
+ return -EINVAL;
+
+ sky2->speed = speed;
+ sky2->duplex = ecmd->duplex;
+ sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
+ }
+
+ if (netif_running(dev)) {
+ sky2_phy_reinit(sky2);
+ sky2_set_multicast(dev);
+ }
+
+ return 0;
+}
+
+static void sky2_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->fw_version, "N/A");
+ strcpy(info->bus_info, pci_name(sky2->hw->pdev));
+}
+
+static const struct sky2_stat {
+ char name[ETH_GSTRING_LEN];
+ u16 offset;
+} sky2_stats[] = {
+ { "tx_bytes", GM_TXO_OK_HI },
+ { "rx_bytes", GM_RXO_OK_HI },
+ { "tx_broadcast", GM_TXF_BC_OK },
+ { "rx_broadcast", GM_RXF_BC_OK },
+ { "tx_multicast", GM_TXF_MC_OK },
+ { "rx_multicast", GM_RXF_MC_OK },
+ { "tx_unicast", GM_TXF_UC_OK },
+ { "rx_unicast", GM_RXF_UC_OK },
+ { "tx_mac_pause", GM_TXF_MPAUSE },
+ { "rx_mac_pause", GM_RXF_MPAUSE },
+ { "collisions", GM_TXF_COL },
+ { "late_collision",GM_TXF_LAT_COL },
+ { "aborted", GM_TXF_ABO_COL },
+ { "single_collisions", GM_TXF_SNG_COL },
+ { "multi_collisions", GM_TXF_MUL_COL },
+
+ { "rx_short", GM_RXF_SHT },
+ { "rx_runt", GM_RXE_FRAG },
+ { "rx_64_byte_packets", GM_RXF_64B },
+ { "rx_65_to_127_byte_packets", GM_RXF_127B },
+ { "rx_128_to_255_byte_packets", GM_RXF_255B },
+ { "rx_256_to_511_byte_packets", GM_RXF_511B },
+ { "rx_512_to_1023_byte_packets", GM_RXF_1023B },
+ { "rx_1024_to_1518_byte_packets", GM_RXF_1518B },
+ { "rx_1518_to_max_byte_packets", GM_RXF_MAX_SZ },
+ { "rx_too_long", GM_RXF_LNG_ERR },
+ { "rx_fifo_overflow", GM_RXE_FIFO_OV },
+ { "rx_jabber", GM_RXF_JAB_PKT },
+ { "rx_fcs_error", GM_RXF_FCS_ERR },
+
+ { "tx_64_byte_packets", GM_TXF_64B },
+ { "tx_65_to_127_byte_packets", GM_TXF_127B },
+ { "tx_128_to_255_byte_packets", GM_TXF_255B },
+ { "tx_256_to_511_byte_packets", GM_TXF_511B },
+ { "tx_512_to_1023_byte_packets", GM_TXF_1023B },
+ { "tx_1024_to_1518_byte_packets", GM_TXF_1518B },
+ { "tx_1519_to_max_byte_packets", GM_TXF_MAX_SZ },
+ { "tx_fifo_underrun", GM_TXE_FIFO_UR },
+};
+
+static u32 sky2_get_msglevel(struct net_device *netdev)
+{
+ struct sky2_port *sky2 = netdev_priv(netdev);
+ return sky2->msg_enable;
+}
+
+static int sky2_nway_reset(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (!netif_running(dev) || !(sky2->flags & SKY2_FLAG_AUTO_SPEED))
+ return -EINVAL;
+
+ sky2_phy_reinit(sky2);
+ sky2_set_multicast(dev);
+
+ return 0;
+}
+
+static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ int i;
+
+ data[0] = get_stats64(hw, port, GM_TXO_OK_LO);
+ data[1] = get_stats64(hw, port, GM_RXO_OK_LO);
+
+ for (i = 2; i < count; i++)
+ data[i] = get_stats32(hw, port, sky2_stats[i].offset);
+}
+
+static void sky2_set_msglevel(struct net_device *netdev, u32 value)
+{
+ struct sky2_port *sky2 = netdev_priv(netdev);
+ sky2->msg_enable = value;
+}
+
+static int sky2_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(sky2_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void sky2_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 * data)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats));
+}
+
+static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(sky2_stats); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ sky2_stats[i].name, ETH_GSTRING_LEN);
+ break;
+ }
+}
+
+static int sky2_set_mac_address(struct net_device *dev, void *p)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ const struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ memcpy_toio(hw->regs + B2_MAC_1 + port * 8,
+ dev->dev_addr, ETH_ALEN);
+ memcpy_toio(hw->regs + B2_MAC_2 + port * 8,
+ dev->dev_addr, ETH_ALEN);
+
+ /* virtual address for data */
+ gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
+
+ /* physical address: used for pause frames */
+ gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
+
+ return 0;
+}
+
+static inline void sky2_add_filter(u8 filter[8], const u8 *addr)
+{
+ u32 bit;
+
+ bit = ether_crc(ETH_ALEN, addr) & 63;
+ filter[bit >> 3] |= 1 << (bit & 7);
+}
+
+static void sky2_set_multicast(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ struct netdev_hw_addr *ha;
+ u16 reg;
+ u8 filter[8];
+ int rx_pause;
+ static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
+
+ rx_pause = (sky2->flow_status == FC_RX || sky2->flow_status == FC_BOTH);
+ memset(filter, 0, sizeof(filter));
+
+ reg = gma_read16(hw, port, GM_RX_CTRL);
+ reg |= GM_RXCR_UCF_ENA;
+
+ if (dev->flags & IFF_PROMISC) /* promiscuous */
+ reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
+ else if (dev->flags & IFF_ALLMULTI)
+ memset(filter, 0xff, sizeof(filter));
+ else if (netdev_mc_empty(dev) && !rx_pause)
+ reg &= ~GM_RXCR_MCF_ENA;
+ else {
+ reg |= GM_RXCR_MCF_ENA;
+
+ if (rx_pause)
+ sky2_add_filter(filter, pause_mc_addr);
+
+ netdev_for_each_mc_addr(ha, dev)
+ sky2_add_filter(filter, ha->addr);
+ }
+
+ gma_write16(hw, port, GM_MC_ADDR_H1,
+ (u16) filter[0] | ((u16) filter[1] << 8));
+ gma_write16(hw, port, GM_MC_ADDR_H2,
+ (u16) filter[2] | ((u16) filter[3] << 8));
+ gma_write16(hw, port, GM_MC_ADDR_H3,
+ (u16) filter[4] | ((u16) filter[5] << 8));
+ gma_write16(hw, port, GM_MC_ADDR_H4,
+ (u16) filter[6] | ((u16) filter[7] << 8));
+
+ gma_write16(hw, port, GM_RX_CTRL, reg);
+}
+
+static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ unsigned int start;
+ u64 _bytes, _packets;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&sky2->rx_stats.syncp);
+ _bytes = sky2->rx_stats.bytes;
+ _packets = sky2->rx_stats.packets;
+ } while (u64_stats_fetch_retry_bh(&sky2->rx_stats.syncp, start));
+
+ stats->rx_packets = _packets;
+ stats->rx_bytes = _bytes;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&sky2->tx_stats.syncp);
+ _bytes = sky2->tx_stats.bytes;
+ _packets = sky2->tx_stats.packets;
+ } while (u64_stats_fetch_retry_bh(&sky2->tx_stats.syncp, start));
+
+ stats->tx_packets = _packets;
+ stats->tx_bytes = _bytes;
+
+ stats->multicast = get_stats32(hw, port, GM_RXF_MC_OK)
+ + get_stats32(hw, port, GM_RXF_BC_OK);
+
+ stats->collisions = get_stats32(hw, port, GM_TXF_COL);
+
+ stats->rx_length_errors = get_stats32(hw, port, GM_RXF_LNG_ERR);
+ stats->rx_crc_errors = get_stats32(hw, port, GM_RXF_FCS_ERR);
+ stats->rx_frame_errors = get_stats32(hw, port, GM_RXF_SHT)
+ + get_stats32(hw, port, GM_RXE_FRAG);
+ stats->rx_over_errors = get_stats32(hw, port, GM_RXE_FIFO_OV);
+
+ stats->rx_dropped = dev->stats.rx_dropped;
+ stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
+ stats->tx_fifo_errors = dev->stats.tx_fifo_errors;
+
+ return stats;
+}
+
+/* Can have one global because blinking is controlled by
+ * ethtool and that is always under RTNL mutex
+ */
+static void sky2_led(struct sky2_port *sky2, enum led_mode mode)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+
+ spin_lock_bh(&sky2->phy_lock);
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
+ hw->chip_id == CHIP_ID_YUKON_EX ||
+ hw->chip_id == CHIP_ID_YUKON_SUPR) {
+ u16 pg;
+ pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
+
+ switch (mode) {
+ case MO_LED_OFF:
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+ PHY_M_LEDC_LOS_CTRL(8) |
+ PHY_M_LEDC_INIT_CTRL(8) |
+ PHY_M_LEDC_STA1_CTRL(8) |
+ PHY_M_LEDC_STA0_CTRL(8));
+ break;
+ case MO_LED_ON:
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+ PHY_M_LEDC_LOS_CTRL(9) |
+ PHY_M_LEDC_INIT_CTRL(9) |
+ PHY_M_LEDC_STA1_CTRL(9) |
+ PHY_M_LEDC_STA0_CTRL(9));
+ break;
+ case MO_LED_BLINK:
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+ PHY_M_LEDC_LOS_CTRL(0xa) |
+ PHY_M_LEDC_INIT_CTRL(0xa) |
+ PHY_M_LEDC_STA1_CTRL(0xa) |
+ PHY_M_LEDC_STA0_CTRL(0xa));
+ break;
+ case MO_LED_NORM:
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+ PHY_M_LEDC_LOS_CTRL(1) |
+ PHY_M_LEDC_INIT_CTRL(8) |
+ PHY_M_LEDC_STA1_CTRL(7) |
+ PHY_M_LEDC_STA0_CTRL(7));
+ }
+
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
+ } else
+ gm_phy_write(hw, port, PHY_MARV_LED_OVER,
+ PHY_M_LED_MO_DUP(mode) |
+ PHY_M_LED_MO_10(mode) |
+ PHY_M_LED_MO_100(mode) |
+ PHY_M_LED_MO_1000(mode) |
+ PHY_M_LED_MO_RX(mode) |
+ PHY_M_LED_MO_TX(mode));
+
+ spin_unlock_bh(&sky2->phy_lock);
+}
+
+/* blink LED's for finding board */
+static int sky2_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 1; /* cycle on/off once per second */
+ case ETHTOOL_ID_INACTIVE:
+ sky2_led(sky2, MO_LED_NORM);
+ break;
+ case ETHTOOL_ID_ON:
+ sky2_led(sky2, MO_LED_ON);
+ break;
+ case ETHTOOL_ID_OFF:
+ sky2_led(sky2, MO_LED_OFF);
+ break;
+ }
+
+ return 0;
+}
+
+static void sky2_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *ecmd)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ switch (sky2->flow_mode) {
+ case FC_NONE:
+ ecmd->tx_pause = ecmd->rx_pause = 0;
+ break;
+ case FC_TX:
+ ecmd->tx_pause = 1, ecmd->rx_pause = 0;
+ break;
+ case FC_RX:
+ ecmd->tx_pause = 0, ecmd->rx_pause = 1;
+ break;
+ case FC_BOTH:
+ ecmd->tx_pause = ecmd->rx_pause = 1;
+ }
+
+ ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_PAUSE)
+ ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+}
+
+static int sky2_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *ecmd)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (ecmd->autoneg == AUTONEG_ENABLE)
+ sky2->flags |= SKY2_FLAG_AUTO_PAUSE;
+ else
+ sky2->flags &= ~SKY2_FLAG_AUTO_PAUSE;
+
+ sky2->flow_mode = sky2_flow(ecmd->rx_pause, ecmd->tx_pause);
+
+ if (netif_running(dev))
+ sky2_phy_reinit(sky2);
+
+ return 0;
+}
+
+static int sky2_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+
+ if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_STOP)
+ ecmd->tx_coalesce_usecs = 0;
+ else {
+ u32 clks = sky2_read32(hw, STAT_TX_TIMER_INI);
+ ecmd->tx_coalesce_usecs = sky2_clk2us(hw, clks);
+ }
+ ecmd->tx_max_coalesced_frames = sky2_read16(hw, STAT_TX_IDX_TH);
+
+ if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_STOP)
+ ecmd->rx_coalesce_usecs = 0;
+ else {
+ u32 clks = sky2_read32(hw, STAT_LEV_TIMER_INI);
+ ecmd->rx_coalesce_usecs = sky2_clk2us(hw, clks);
+ }
+ ecmd->rx_max_coalesced_frames = sky2_read8(hw, STAT_FIFO_WM);
+
+ if (sky2_read8(hw, STAT_ISR_TIMER_CTRL) == TIM_STOP)
+ ecmd->rx_coalesce_usecs_irq = 0;
+ else {
+ u32 clks = sky2_read32(hw, STAT_ISR_TIMER_INI);
+ ecmd->rx_coalesce_usecs_irq = sky2_clk2us(hw, clks);
+ }
+
+ ecmd->rx_max_coalesced_frames_irq = sky2_read8(hw, STAT_FIFO_ISR_WM);
+
+ return 0;
+}
+
+/* Note: this affect both ports */
+static int sky2_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ const u32 tmax = sky2_clk2us(hw, 0x0ffffff);
+
+ if (ecmd->tx_coalesce_usecs > tmax ||
+ ecmd->rx_coalesce_usecs > tmax ||
+ ecmd->rx_coalesce_usecs_irq > tmax)
+ return -EINVAL;
+
+ if (ecmd->tx_max_coalesced_frames >= sky2->tx_ring_size-1)
+ return -EINVAL;
+ if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING)
+ return -EINVAL;
+ if (ecmd->rx_max_coalesced_frames_irq > RX_MAX_PENDING)
+ return -EINVAL;
+
+ if (ecmd->tx_coalesce_usecs == 0)
+ sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
+ else {
+ sky2_write32(hw, STAT_TX_TIMER_INI,
+ sky2_us2clk(hw, ecmd->tx_coalesce_usecs));
+ sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
+ }
+ sky2_write16(hw, STAT_TX_IDX_TH, ecmd->tx_max_coalesced_frames);
+
+ if (ecmd->rx_coalesce_usecs == 0)
+ sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
+ else {
+ sky2_write32(hw, STAT_LEV_TIMER_INI,
+ sky2_us2clk(hw, ecmd->rx_coalesce_usecs));
+ sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
+ }
+ sky2_write8(hw, STAT_FIFO_WM, ecmd->rx_max_coalesced_frames);
+
+ if (ecmd->rx_coalesce_usecs_irq == 0)
+ sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_STOP);
+ else {
+ sky2_write32(hw, STAT_ISR_TIMER_INI,
+ sky2_us2clk(hw, ecmd->rx_coalesce_usecs_irq));
+ sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
+ }
+ sky2_write8(hw, STAT_FIFO_ISR_WM, ecmd->rx_max_coalesced_frames_irq);
+ return 0;
+}
+
+static void sky2_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ ering->rx_max_pending = RX_MAX_PENDING;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = 0;
+ ering->tx_max_pending = TX_MAX_PENDING;
+
+ ering->rx_pending = sky2->rx_pending;
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = 0;
+ ering->tx_pending = sky2->tx_pending;
+}
+
+static int sky2_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (ering->rx_pending > RX_MAX_PENDING ||
+ ering->rx_pending < 8 ||
+ ering->tx_pending < TX_MIN_PENDING ||
+ ering->tx_pending > TX_MAX_PENDING)
+ return -EINVAL;
+
+ sky2_detach(dev);
+
+ sky2->rx_pending = ering->rx_pending;
+ sky2->tx_pending = ering->tx_pending;
+ sky2->tx_ring_size = roundup_pow_of_two(sky2->tx_pending+1);
+
+ return sky2_reattach(dev);
+}
+
+static int sky2_get_regs_len(struct net_device *dev)
+{
+ return 0x4000;
+}
+
+static int sky2_reg_access_ok(struct sky2_hw *hw, unsigned int b)
+{
+ /* This complicated switch statement is to make sure and
+ * only access regions that are unreserved.
+ * Some blocks are only valid on dual port cards.
+ */
+ switch (b) {
+ /* second port */
+ case 5: /* Tx Arbiter 2 */
+ case 9: /* RX2 */
+ case 14 ... 15: /* TX2 */
+ case 17: case 19: /* Ram Buffer 2 */
+ case 22 ... 23: /* Tx Ram Buffer 2 */
+ case 25: /* Rx MAC Fifo 1 */
+ case 27: /* Tx MAC Fifo 2 */
+ case 31: /* GPHY 2 */
+ case 40 ... 47: /* Pattern Ram 2 */
+ case 52: case 54: /* TCP Segmentation 2 */
+ case 112 ... 116: /* GMAC 2 */
+ return hw->ports > 1;
+
+ case 0: /* Control */
+ case 2: /* Mac address */
+ case 4: /* Tx Arbiter 1 */
+ case 7: /* PCI express reg */
+ case 8: /* RX1 */
+ case 12 ... 13: /* TX1 */
+ case 16: case 18:/* Rx Ram Buffer 1 */
+ case 20 ... 21: /* Tx Ram Buffer 1 */
+ case 24: /* Rx MAC Fifo 1 */
+ case 26: /* Tx MAC Fifo 1 */
+ case 28 ... 29: /* Descriptor and status unit */
+ case 30: /* GPHY 1*/
+ case 32 ... 39: /* Pattern Ram 1 */
+ case 48: case 50: /* TCP Segmentation 1 */
+ case 56 ... 60: /* PCI space */
+ case 80 ... 84: /* GMAC 1 */
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+/*
+ * Returns copy of control register region
+ * Note: ethtool_get_regs always provides full size (16k) buffer
+ */
+static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ const struct sky2_port *sky2 = netdev_priv(dev);
+ const void __iomem *io = sky2->hw->regs;
+ unsigned int b;
+
+ regs->version = 1;
+
+ for (b = 0; b < 128; b++) {
+ /* skip poisonous diagnostic ram region in block 3 */
+ if (b == 3)
+ memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10);
+ else if (sky2_reg_access_ok(sky2->hw, b))
+ memcpy_fromio(p, io, 128);
+ else
+ memset(p, 0, 128);
+
+ p += 128;
+ io += 128;
+ }
+}
+
+static int sky2_get_eeprom_len(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ u16 reg2;
+
+ reg2 = sky2_pci_read16(hw, PCI_DEV_REG2);
+ return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
+}
+
+static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy)
+{
+ unsigned long start = jiffies;
+
+ while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) {
+ /* Can take up to 10.6 ms for write */
+ if (time_after(jiffies, start + HZ/4)) {
+ dev_err(&hw->pdev->dev, "VPD cycle timed out\n");
+ return -ETIMEDOUT;
+ }
+ mdelay(1);
+ }
+
+ return 0;
+}
+
+static int sky2_vpd_read(struct sky2_hw *hw, int cap, void *data,
+ u16 offset, size_t length)
+{
+ int rc = 0;
+
+ while (length > 0) {
+ u32 val;
+
+ sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset);
+ rc = sky2_vpd_wait(hw, cap, 0);
+ if (rc)
+ break;
+
+ val = sky2_pci_read32(hw, cap + PCI_VPD_DATA);
+
+ memcpy(data, &val, min(sizeof(val), length));
+ offset += sizeof(u32);
+ data += sizeof(u32);
+ length -= sizeof(u32);
+ }
+
+ return rc;
+}
+
+static int sky2_vpd_write(struct sky2_hw *hw, int cap, const void *data,
+ u16 offset, unsigned int length)
+{
+ unsigned int i;
+ int rc = 0;
+
+ for (i = 0; i < length; i += sizeof(u32)) {
+ u32 val = *(u32 *)(data + i);
+
+ sky2_pci_write32(hw, cap + PCI_VPD_DATA, val);
+ sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F);
+
+ rc = sky2_vpd_wait(hw, cap, PCI_VPD_ADDR_F);
+ if (rc)
+ break;
+ }
+ return rc;
+}
+
+static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
+
+ if (!cap)
+ return -EINVAL;
+
+ eeprom->magic = SKY2_EEPROM_MAGIC;
+
+ return sky2_vpd_read(sky2->hw, cap, data, eeprom->offset, eeprom->len);
+}
+
+static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
+
+ if (!cap)
+ return -EINVAL;
+
+ if (eeprom->magic != SKY2_EEPROM_MAGIC)
+ return -EINVAL;
+
+ /* Partial writes not supported */
+ if ((eeprom->offset & 3) || (eeprom->len & 3))
+ return -EINVAL;
+
+ return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
+}
+
+static u32 sky2_fix_features(struct net_device *dev, u32 features)
+{
+ const struct sky2_port *sky2 = netdev_priv(dev);
+ const struct sky2_hw *hw = sky2->hw;
+
+ /* In order to do Jumbo packets on these chips, need to turn off the
+ * transmit store/forward. Therefore checksum offload won't work.
+ */
+ if (dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U) {
+ netdev_info(dev, "checksum offload not possible with jumbo frames\n");
+ features &= ~(NETIF_F_TSO|NETIF_F_SG|NETIF_F_ALL_CSUM);
+ }
+
+ /* Some hardware requires receive checksum for RSS to work. */
+ if ( (features & NETIF_F_RXHASH) &&
+ !(features & NETIF_F_RXCSUM) &&
+ (sky2->hw->flags & SKY2_HW_RSS_CHKSUM)) {
+ netdev_info(dev, "receive hashing forces receive checksum\n");
+ features |= NETIF_F_RXCSUM;
+ }
+
+ return features;
+}
+
+static int sky2_set_features(struct net_device *dev, u32 features)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ u32 changed = dev->features ^ features;
+
+ if (changed & NETIF_F_RXCSUM) {
+ u32 on = features & NETIF_F_RXCSUM;
+ sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+ }
+
+ if (changed & NETIF_F_RXHASH)
+ rx_set_rss(dev, features);
+
+ if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
+ sky2_vlan_mode(dev, features);
+
+ return 0;
+}
+
+static const struct ethtool_ops sky2_ethtool_ops = {
+ .get_settings = sky2_get_settings,
+ .set_settings = sky2_set_settings,
+ .get_drvinfo = sky2_get_drvinfo,
+ .get_wol = sky2_get_wol,
+ .set_wol = sky2_set_wol,
+ .get_msglevel = sky2_get_msglevel,
+ .set_msglevel = sky2_set_msglevel,
+ .nway_reset = sky2_nway_reset,
+ .get_regs_len = sky2_get_regs_len,
+ .get_regs = sky2_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = sky2_get_eeprom_len,
+ .get_eeprom = sky2_get_eeprom,
+ .set_eeprom = sky2_set_eeprom,
+ .get_strings = sky2_get_strings,
+ .get_coalesce = sky2_get_coalesce,
+ .set_coalesce = sky2_set_coalesce,
+ .get_ringparam = sky2_get_ringparam,
+ .set_ringparam = sky2_set_ringparam,
+ .get_pauseparam = sky2_get_pauseparam,
+ .set_pauseparam = sky2_set_pauseparam,
+ .set_phys_id = sky2_set_phys_id,
+ .get_sset_count = sky2_get_sset_count,
+ .get_ethtool_stats = sky2_get_ethtool_stats,
+};
+
+#ifdef CONFIG_SKY2_DEBUG
+
+static struct dentry *sky2_debug;
+
+
+/*
+ * Read and parse the first part of Vital Product Data
+ */
+#define VPD_SIZE 128
+#define VPD_MAGIC 0x82
+
+static const struct vpd_tag {
+ char tag[2];
+ char *label;
+} vpd_tags[] = {
+ { "PN", "Part Number" },
+ { "EC", "Engineering Level" },
+ { "MN", "Manufacturer" },
+ { "SN", "Serial Number" },
+ { "YA", "Asset Tag" },
+ { "VL", "First Error Log Message" },
+ { "VF", "Second Error Log Message" },
+ { "VB", "Boot Agent ROM Configuration" },
+ { "VE", "EFI UNDI Configuration" },
+};
+
+static void sky2_show_vpd(struct seq_file *seq, struct sky2_hw *hw)
+{
+ size_t vpd_size;
+ loff_t offs;
+ u8 len;
+ unsigned char *buf;
+ u16 reg2;
+
+ reg2 = sky2_pci_read16(hw, PCI_DEV_REG2);
+ vpd_size = 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
+
+ seq_printf(seq, "%s Product Data\n", pci_name(hw->pdev));
+ buf = kmalloc(vpd_size, GFP_KERNEL);
+ if (!buf) {
+ seq_puts(seq, "no memory!\n");
+ return;
+ }
+
+ if (pci_read_vpd(hw->pdev, 0, vpd_size, buf) < 0) {
+ seq_puts(seq, "VPD read failed\n");
+ goto out;
+ }
+
+ if (buf[0] != VPD_MAGIC) {
+ seq_printf(seq, "VPD tag mismatch: %#x\n", buf[0]);
+ goto out;
+ }
+ len = buf[1];
+ if (len == 0 || len > vpd_size - 4) {
+ seq_printf(seq, "Invalid id length: %d\n", len);
+ goto out;
+ }
+
+ seq_printf(seq, "%.*s\n", len, buf + 3);
+ offs = len + 3;
+
+ while (offs < vpd_size - 4) {
+ int i;
+
+ if (!memcmp("RW", buf + offs, 2)) /* end marker */
+ break;
+ len = buf[offs + 2];
+ if (offs + len + 3 >= vpd_size)
+ break;
+
+ for (i = 0; i < ARRAY_SIZE(vpd_tags); i++) {
+ if (!memcmp(vpd_tags[i].tag, buf + offs, 2)) {
+ seq_printf(seq, " %s: %.*s\n",
+ vpd_tags[i].label, len, buf + offs + 3);
+ break;
+ }
+ }
+ offs += len + 3;
+ }
+out:
+ kfree(buf);
+}
+
+static int sky2_debug_show(struct seq_file *seq, void *v)
+{
+ struct net_device *dev = seq->private;
+ const struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ unsigned idx, last;
+ int sop;
+
+ sky2_show_vpd(seq, hw);
+
+ seq_printf(seq, "\nIRQ src=%x mask=%x control=%x\n",
+ sky2_read32(hw, B0_ISRC),
+ sky2_read32(hw, B0_IMSK),
+ sky2_read32(hw, B0_Y2_SP_ICR));
+
+ if (!netif_running(dev)) {
+ seq_printf(seq, "network not running\n");
+ return 0;
+ }
+
+ napi_disable(&hw->napi);
+ last = sky2_read16(hw, STAT_PUT_IDX);
+
+ seq_printf(seq, "Status ring %u\n", hw->st_size);
+ if (hw->st_idx == last)
+ seq_puts(seq, "Status ring (empty)\n");
+ else {
+ seq_puts(seq, "Status ring\n");
+ for (idx = hw->st_idx; idx != last && idx < hw->st_size;
+ idx = RING_NEXT(idx, hw->st_size)) {
+ const struct sky2_status_le *le = hw->st_le + idx;
+ seq_printf(seq, "[%d] %#x %d %#x\n",
+ idx, le->opcode, le->length, le->status);
+ }
+ seq_puts(seq, "\n");
+ }
+
+ seq_printf(seq, "Tx ring pending=%u...%u report=%d done=%d\n",
+ sky2->tx_cons, sky2->tx_prod,
+ sky2_read16(hw, port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
+ sky2_read16(hw, Q_ADDR(txqaddr[port], Q_DONE)));
+
+ /* Dump contents of tx ring */
+ sop = 1;
+ for (idx = sky2->tx_next; idx != sky2->tx_prod && idx < sky2->tx_ring_size;
+ idx = RING_NEXT(idx, sky2->tx_ring_size)) {
+ const struct sky2_tx_le *le = sky2->tx_le + idx;
+ u32 a = le32_to_cpu(le->addr);
+
+ if (sop)
+ seq_printf(seq, "%u:", idx);
+ sop = 0;
+
+ switch (le->opcode & ~HW_OWNER) {
+ case OP_ADDR64:
+ seq_printf(seq, " %#x:", a);
+ break;
+ case OP_LRGLEN:
+ seq_printf(seq, " mtu=%d", a);
+ break;
+ case OP_VLAN:
+ seq_printf(seq, " vlan=%d", be16_to_cpu(le->length));
+ break;
+ case OP_TCPLISW:
+ seq_printf(seq, " csum=%#x", a);
+ break;
+ case OP_LARGESEND:
+ seq_printf(seq, " tso=%#x(%d)", a, le16_to_cpu(le->length));
+ break;
+ case OP_PACKET:
+ seq_printf(seq, " %#x(%d)", a, le16_to_cpu(le->length));
+ break;
+ case OP_BUFFER:
+ seq_printf(seq, " frag=%#x(%d)", a, le16_to_cpu(le->length));
+ break;
+ default:
+ seq_printf(seq, " op=%#x,%#x(%d)", le->opcode,
+ a, le16_to_cpu(le->length));
+ }
+
+ if (le->ctrl & EOP) {
+ seq_putc(seq, '\n');
+ sop = 1;
+ }
+ }
+
+ seq_printf(seq, "\nRx ring hw get=%d put=%d last=%d\n",
+ sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_GET_IDX)),
+ sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_PUT_IDX)),
+ sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_LAST_IDX)));
+
+ sky2_read32(hw, B0_Y2_SP_LISR);
+ napi_enable(&hw->napi);
+ return 0;
+}
+
+static int sky2_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sky2_debug_show, inode->i_private);
+}
+
+static const struct file_operations sky2_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = sky2_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*
+ * Use network device events to create/remove/rename
+ * debugfs file entries
+ */
+static int sky2_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = ptr;
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (dev->netdev_ops->ndo_open != sky2_up || !sky2_debug)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ if (sky2->debugfs) {
+ sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs,
+ sky2_debug, dev->name);
+ }
+ break;
+
+ case NETDEV_GOING_DOWN:
+ if (sky2->debugfs) {
+ netdev_printk(KERN_DEBUG, dev, "remove debugfs\n");
+ debugfs_remove(sky2->debugfs);
+ sky2->debugfs = NULL;
+ }
+ break;
+
+ case NETDEV_UP:
+ sky2->debugfs = debugfs_create_file(dev->name, S_IRUGO,
+ sky2_debug, dev,
+ &sky2_debug_fops);
+ if (IS_ERR(sky2->debugfs))
+ sky2->debugfs = NULL;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block sky2_notifier = {
+ .notifier_call = sky2_device_event,
+};
+
+
+static __init void sky2_debug_init(void)
+{
+ struct dentry *ent;
+
+ ent = debugfs_create_dir("sky2", NULL);
+ if (!ent || IS_ERR(ent))
+ return;
+
+ sky2_debug = ent;
+ register_netdevice_notifier(&sky2_notifier);
+}
+
+static __exit void sky2_debug_cleanup(void)
+{
+ if (sky2_debug) {
+ unregister_netdevice_notifier(&sky2_notifier);
+ debugfs_remove(sky2_debug);
+ sky2_debug = NULL;
+ }
+}
+
+#else
+#define sky2_debug_init()
+#define sky2_debug_cleanup()
+#endif
+
+/* Two copies of network device operations to handle special case of
+ not allowing netpoll on second port */
+static const struct net_device_ops sky2_netdev_ops[2] = {
+ {
+ .ndo_open = sky2_up,
+ .ndo_stop = sky2_down,
+ .ndo_start_xmit = sky2_xmit_frame,
+ .ndo_do_ioctl = sky2_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = sky2_set_mac_address,
+ .ndo_set_rx_mode = sky2_set_multicast,
+ .ndo_change_mtu = sky2_change_mtu,
+ .ndo_fix_features = sky2_fix_features,
+ .ndo_set_features = sky2_set_features,
+ .ndo_tx_timeout = sky2_tx_timeout,
+ .ndo_get_stats64 = sky2_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = sky2_netpoll,
+#endif
+ },
+ {
+ .ndo_open = sky2_up,
+ .ndo_stop = sky2_down,
+ .ndo_start_xmit = sky2_xmit_frame,
+ .ndo_do_ioctl = sky2_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = sky2_set_mac_address,
+ .ndo_set_rx_mode = sky2_set_multicast,
+ .ndo_change_mtu = sky2_change_mtu,
+ .ndo_fix_features = sky2_fix_features,
+ .ndo_set_features = sky2_set_features,
+ .ndo_tx_timeout = sky2_tx_timeout,
+ .ndo_get_stats64 = sky2_get_stats,
+ },
+};
+
+/* Initialize network device */
+static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
+ unsigned port,
+ int highmem, int wol)
+{
+ struct sky2_port *sky2;
+ struct net_device *dev = alloc_etherdev(sizeof(*sky2));
+
+ if (!dev) {
+ dev_err(&hw->pdev->dev, "etherdev alloc failed\n");
+ return NULL;
+ }
+
+ SET_NETDEV_DEV(dev, &hw->pdev->dev);
+ dev->irq = hw->pdev->irq;
+ SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops);
+ dev->watchdog_timeo = TX_WATCHDOG;
+ dev->netdev_ops = &sky2_netdev_ops[port];
+
+ sky2 = netdev_priv(dev);
+ sky2->netdev = dev;
+ sky2->hw = hw;
+ sky2->msg_enable = netif_msg_init(debug, default_msg);
+
+ /* Auto speed and flow control */
+ sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE;
+ if (hw->chip_id != CHIP_ID_YUKON_XL)
+ dev->hw_features |= NETIF_F_RXCSUM;
+
+ sky2->flow_mode = FC_BOTH;
+
+ sky2->duplex = -1;
+ sky2->speed = -1;
+ sky2->advertising = sky2_supported_modes(hw);
+ sky2->wol = wol;
+
+ spin_lock_init(&sky2->phy_lock);
+
+ sky2->tx_pending = TX_DEF_PENDING;
+ sky2->tx_ring_size = roundup_pow_of_two(TX_DEF_PENDING+1);
+ sky2->rx_pending = RX_DEF_PENDING;
+
+ hw->dev[port] = dev;
+
+ sky2->port = port;
+
+ dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
+
+ if (highmem)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ /* Enable receive hashing unless hardware is known broken */
+ if (!(hw->flags & SKY2_HW_RSS_BROKEN))
+ dev->hw_features |= NETIF_F_RXHASH;
+
+ if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) {
+ dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->vlan_features |= SKY2_VLAN_OFFLOADS;
+ }
+
+ dev->features |= dev->hw_features;
+
+ /* read the mac address */
+ memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ return dev;
+}
+
+static void __devinit sky2_show_addr(struct net_device *dev)
+{
+ const struct sky2_port *sky2 = netdev_priv(dev);
+
+ netif_info(sky2, probe, dev, "addr %pM\n", dev->dev_addr);
+}
+
+/* Handle software interrupt used during MSI test */
+static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id)
+{
+ struct sky2_hw *hw = dev_id;
+ u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
+
+ if (status == 0)
+ return IRQ_NONE;
+
+ if (status & Y2_IS_IRQ_SW) {
+ hw->flags |= SKY2_HW_USE_MSI;
+ wake_up(&hw->msi_wait);
+ sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
+ }
+ sky2_write32(hw, B0_Y2_SP_ICR, 2);
+
+ return IRQ_HANDLED;
+}
+
+/* Test interrupt path by forcing a a software IRQ */
+static int __devinit sky2_test_msi(struct sky2_hw *hw)
+{
+ struct pci_dev *pdev = hw->pdev;
+ int err;
+
+ init_waitqueue_head(&hw->msi_wait);
+
+ sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
+
+ err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
+ if (err) {
+ dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
+ return err;
+ }
+
+ sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
+ sky2_read8(hw, B0_CTST);
+
+ wait_event_timeout(hw->msi_wait, (hw->flags & SKY2_HW_USE_MSI), HZ/10);
+
+ if (!(hw->flags & SKY2_HW_USE_MSI)) {
+ /* MSI test failed, go back to INTx mode */
+ dev_info(&pdev->dev, "No interrupt generated using MSI, "
+ "switching to INTx mode.\n");
+
+ err = -EOPNOTSUPP;
+ sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
+ }
+
+ sky2_write32(hw, B0_IMSK, 0);
+ sky2_read32(hw, B0_IMSK);
+
+ free_irq(pdev->irq, hw);
+
+ return err;
+}
+
+/* This driver supports yukon2 chipset only */
+static const char *sky2_name(u8 chipid, char *buf, int sz)
+{
+ const char *name[] = {
+ "XL", /* 0xb3 */
+ "EC Ultra", /* 0xb4 */
+ "Extreme", /* 0xb5 */
+ "EC", /* 0xb6 */
+ "FE", /* 0xb7 */
+ "FE+", /* 0xb8 */
+ "Supreme", /* 0xb9 */
+ "UL 2", /* 0xba */
+ "Unknown", /* 0xbb */
+ "Optima", /* 0xbc */
+ "Optima Prime", /* 0xbd */
+ "Optima 2", /* 0xbe */
+ };
+
+ if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_OP_2)
+ strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz);
+ else
+ snprintf(buf, sz, "(chip %#x)", chipid);
+ return buf;
+}
+
+static int __devinit sky2_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev, *dev1;
+ struct sky2_hw *hw;
+ int err, using_dac = 0, wol_default;
+ u32 reg;
+ char buf1[16];
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "cannot enable PCI device\n");
+ goto err_out;
+ }
+
+ /* Get configuration information
+ * Note: only regular PCI config access once to test for HW issues
+ * other PCI access through shared memory for speed and to
+ * avoid MMCONFIG problems.
+ */
+ err = pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
+ if (err) {
+ dev_err(&pdev->dev, "PCI read config failed\n");
+ goto err_out;
+ }
+
+ if (~reg == 0) {
+ dev_err(&pdev->dev, "PCI configuration read error\n");
+ goto err_out;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "cannot obtain PCI resources\n");
+ goto err_out_disable;
+ }
+
+ pci_set_master(pdev);
+
+ if (sizeof(dma_addr_t) > sizeof(u32) &&
+ !(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))) {
+ using_dac = 1;
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to obtain 64 bit DMA "
+ "for consistent allocations\n");
+ goto err_out_free_regions;
+ }
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
+ goto err_out_free_regions;
+ }
+ }
+
+
+#ifdef __BIG_ENDIAN
+ /* The sk98lin vendor driver uses hardware byte swapping but
+ * this driver uses software swapping.
+ */
+ reg &= ~PCI_REV_DESC;
+ err = pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
+ if (err) {
+ dev_err(&pdev->dev, "PCI write config failed\n");
+ goto err_out_free_regions;
+ }
+#endif
+
+ wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0;
+
+ err = -ENOMEM;
+
+ hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
+ + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
+ if (!hw) {
+ dev_err(&pdev->dev, "cannot allocate hardware struct\n");
+ goto err_out_free_regions;
+ }
+
+ hw->pdev = pdev;
+ sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
+
+ hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
+ if (!hw->regs) {
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ goto err_out_free_hw;
+ }
+
+ err = sky2_init(hw);
+ if (err)
+ goto err_out_iounmap;
+
+ /* ring for status responses */
+ hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING);
+ hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
+ &hw->st_dma);
+ if (!hw->st_le)
+ goto err_out_reset;
+
+ dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n",
+ sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
+
+ sky2_reset(hw);
+
+ dev = sky2_init_netdev(hw, 0, using_dac, wol_default);
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_out_free_pci;
+ }
+
+ if (!disable_msi && pci_enable_msi(pdev) == 0) {
+ err = sky2_test_msi(hw);
+ if (err == -EOPNOTSUPP)
+ pci_disable_msi(pdev);
+ else if (err)
+ goto err_out_free_netdev;
+ }
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "cannot register net device\n");
+ goto err_out_free_netdev;
+ }
+
+ netif_carrier_off(dev);
+
+ netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
+
+ sky2_show_addr(dev);
+
+ if (hw->ports > 1) {
+ dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default);
+ if (!dev1) {
+ err = -ENOMEM;
+ goto err_out_unregister;
+ }
+
+ err = register_netdev(dev1);
+ if (err) {
+ dev_err(&pdev->dev, "cannot register second net device\n");
+ goto err_out_free_dev1;
+ }
+
+ err = sky2_setup_irq(hw, hw->irq_name);
+ if (err)
+ goto err_out_unregister_dev1;
+
+ sky2_show_addr(dev1);
+ }
+
+ setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw);
+ INIT_WORK(&hw->restart_work, sky2_restart);
+
+ pci_set_drvdata(pdev, hw);
+ pdev->d3_delay = 150;
+
+ return 0;
+
+err_out_unregister_dev1:
+ unregister_netdev(dev1);
+err_out_free_dev1:
+ free_netdev(dev1);
+err_out_unregister:
+ if (hw->flags & SKY2_HW_USE_MSI)
+ pci_disable_msi(pdev);
+ unregister_netdev(dev);
+err_out_free_netdev:
+ free_netdev(dev);
+err_out_free_pci:
+ pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
+ hw->st_le, hw->st_dma);
+err_out_reset:
+ sky2_write8(hw, B0_CTST, CS_RST_SET);
+err_out_iounmap:
+ iounmap(hw->regs);
+err_out_free_hw:
+ kfree(hw);
+err_out_free_regions:
+ pci_release_regions(pdev);
+err_out_disable:
+ pci_disable_device(pdev);
+err_out:
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void __devexit sky2_remove(struct pci_dev *pdev)
+{
+ struct sky2_hw *hw = pci_get_drvdata(pdev);
+ int i;
+
+ if (!hw)
+ return;
+
+ del_timer_sync(&hw->watchdog_timer);
+ cancel_work_sync(&hw->restart_work);
+
+ for (i = hw->ports-1; i >= 0; --i)
+ unregister_netdev(hw->dev[i]);
+
+ sky2_write32(hw, B0_IMSK, 0);
+ sky2_read32(hw, B0_IMSK);
+
+ sky2_power_aux(hw);
+
+ sky2_write8(hw, B0_CTST, CS_RST_SET);
+ sky2_read8(hw, B0_CTST);
+
+ if (hw->ports > 1) {
+ napi_disable(&hw->napi);
+ free_irq(pdev->irq, hw);
+ }
+
+ if (hw->flags & SKY2_HW_USE_MSI)
+ pci_disable_msi(pdev);
+ pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
+ hw->st_le, hw->st_dma);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ for (i = hw->ports-1; i >= 0; --i)
+ free_netdev(hw->dev[i]);
+
+ iounmap(hw->regs);
+ kfree(hw);
+
+ pci_set_drvdata(pdev, NULL);
+}
+
+static int sky2_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct sky2_hw *hw = pci_get_drvdata(pdev);
+ int i;
+
+ if (!hw)
+ return 0;
+
+ del_timer_sync(&hw->watchdog_timer);
+ cancel_work_sync(&hw->restart_work);
+
+ rtnl_lock();
+
+ sky2_all_down(hw);
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (sky2->wol)
+ sky2_wol_init(sky2);
+ }
+
+ sky2_power_aux(hw);
+ rtnl_unlock();
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sky2_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct sky2_hw *hw = pci_get_drvdata(pdev);
+ int err;
+
+ if (!hw)
+ return 0;
+
+ /* Re-enable all clocks */
+ err = pci_write_config_dword(pdev, PCI_DEV_REG3, 0);
+ if (err) {
+ dev_err(&pdev->dev, "PCI write config failed\n");
+ goto out;
+ }
+
+ rtnl_lock();
+ sky2_reset(hw);
+ sky2_all_up(hw);
+ rtnl_unlock();
+
+ return 0;
+out:
+
+ dev_err(&pdev->dev, "resume failed (%d)\n", err);
+ pci_disable_device(pdev);
+ return err;
+}
+
+static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume);
+#define SKY2_PM_OPS (&sky2_pm_ops)
+
+#else
+
+#define SKY2_PM_OPS NULL
+#endif
+
+static void sky2_shutdown(struct pci_dev *pdev)
+{
+ sky2_suspend(&pdev->dev);
+ pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static struct pci_driver sky2_driver = {
+ .name = DRV_NAME,
+ .id_table = sky2_id_table,
+ .probe = sky2_probe,
+ .remove = __devexit_p(sky2_remove),
+ .shutdown = sky2_shutdown,
+ .driver.pm = SKY2_PM_OPS,
+};
+
+static int __init sky2_init_module(void)
+{
+ pr_info("driver version " DRV_VERSION "\n");
+
+ sky2_debug_init();
+ return pci_register_driver(&sky2_driver);
+}
+
+static void __exit sky2_cleanup_module(void)
+{
+ pci_unregister_driver(&sky2_driver);
+ sky2_debug_cleanup();
+}
+
+module_init(sky2_init_module);
+module_exit(sky2_cleanup_module);
+
+MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver");
+MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
new file mode 100644
index 000000000000..0af31b8b5f10
--- /dev/null
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -0,0 +1,2427 @@
+/*
+ * Definitions for the new Marvell Yukon 2 driver.
+ */
+#ifndef _SKY2_H
+#define _SKY2_H
+
+#define ETH_JUMBO_MTU 9000 /* Maximum MTU supported */
+
+/* PCI config registers */
+enum {
+ PCI_DEV_REG1 = 0x40,
+ PCI_DEV_REG2 = 0x44,
+ PCI_DEV_STATUS = 0x7c,
+ PCI_DEV_REG3 = 0x80,
+ PCI_DEV_REG4 = 0x84,
+ PCI_DEV_REG5 = 0x88,
+ PCI_CFG_REG_0 = 0x90,
+ PCI_CFG_REG_1 = 0x94,
+
+ PSM_CONFIG_REG0 = 0x98,
+ PSM_CONFIG_REG1 = 0x9C,
+ PSM_CONFIG_REG2 = 0x160,
+ PSM_CONFIG_REG3 = 0x164,
+ PSM_CONFIG_REG4 = 0x168,
+
+};
+
+/* Yukon-2 */
+enum pci_dev_reg_1 {
+ PCI_Y2_PIG_ENA = 1<<31, /* Enable Plug-in-Go (YUKON-2) */
+ PCI_Y2_DLL_DIS = 1<<30, /* Disable PCI DLL (YUKON-2) */
+ PCI_SW_PWR_ON_RST= 1<<30, /* SW Power on Reset (Yukon-EX) */
+ PCI_Y2_PHY2_COMA = 1<<29, /* Set PHY 2 to Coma Mode (YUKON-2) */
+ PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */
+ PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */
+ PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */
+ PCI_Y2_PME_LEGACY= 1<<15, /* PCI Express legacy power management mode */
+
+ PCI_PHY_LNK_TIM_MSK= 3L<<8,/* Bit 9.. 8: GPHY Link Trigger Timer */
+ PCI_ENA_L1_EVENT = 1<<7, /* Enable PEX L1 Event */
+ PCI_ENA_GPHY_LNK = 1<<6, /* Enable PEX L1 on GPHY Link down */
+ PCI_FORCE_PEX_L1 = 1<<5, /* Force to PEX L1 */
+};
+
+enum pci_dev_reg_2 {
+ PCI_VPD_WR_THR = 0xffL<<24, /* Bit 31..24: VPD Write Threshold */
+ PCI_DEV_SEL = 0x7fL<<17, /* Bit 23..17: EEPROM Device Select */
+ PCI_VPD_ROM_SZ = 7L<<14, /* Bit 16..14: VPD ROM Size */
+
+ PCI_PATCH_DIR = 0xfL<<8, /* Bit 11.. 8: Ext Patches dir 3..0 */
+ PCI_EXT_PATCHS = 0xfL<<4, /* Bit 7.. 4: Extended Patches 3..0 */
+ PCI_EN_DUMMY_RD = 1<<3, /* Enable Dummy Read */
+ PCI_REV_DESC = 1<<2, /* Reverse Desc. Bytes */
+
+ PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */
+};
+
+/* PCI_OUR_REG_3 32 bit Our Register 3 (Yukon-ECU only) */
+enum pci_dev_reg_3 {
+ P_CLK_ASF_REGS_DIS = 1<<18,/* Disable Clock ASF (Yukon-Ext.) */
+ P_CLK_COR_REGS_D0_DIS = 1<<17,/* Disable Clock Core Regs D0 */
+ P_CLK_MACSEC_DIS = 1<<17,/* Disable Clock MACSec (Yukon-Ext.) */
+ P_CLK_PCI_REGS_D0_DIS = 1<<16,/* Disable Clock PCI Regs D0 */
+ P_CLK_COR_YTB_ARB_DIS = 1<<15,/* Disable Clock YTB Arbiter */
+ P_CLK_MAC_LNK1_D3_DIS = 1<<14,/* Disable Clock MAC Link1 D3 */
+ P_CLK_COR_LNK1_D0_DIS = 1<<13,/* Disable Clock Core Link1 D0 */
+ P_CLK_MAC_LNK1_D0_DIS = 1<<12,/* Disable Clock MAC Link1 D0 */
+ P_CLK_COR_LNK1_D3_DIS = 1<<11,/* Disable Clock Core Link1 D3 */
+ P_CLK_PCI_MST_ARB_DIS = 1<<10,/* Disable Clock PCI Master Arb. */
+ P_CLK_COR_REGS_D3_DIS = 1<<9, /* Disable Clock Core Regs D3 */
+ P_CLK_PCI_REGS_D3_DIS = 1<<8, /* Disable Clock PCI Regs D3 */
+ P_CLK_REF_LNK1_GM_DIS = 1<<7, /* Disable Clock Ref. Link1 GMAC */
+ P_CLK_COR_LNK1_GM_DIS = 1<<6, /* Disable Clock Core Link1 GMAC */
+ P_CLK_PCI_COMMON_DIS = 1<<5, /* Disable Clock PCI Common */
+ P_CLK_COR_COMMON_DIS = 1<<4, /* Disable Clock Core Common */
+ P_CLK_PCI_LNK1_BMU_DIS = 1<<3, /* Disable Clock PCI Link1 BMU */
+ P_CLK_COR_LNK1_BMU_DIS = 1<<2, /* Disable Clock Core Link1 BMU */
+ P_CLK_PCI_LNK1_BIU_DIS = 1<<1, /* Disable Clock PCI Link1 BIU */
+ P_CLK_COR_LNK1_BIU_DIS = 1<<0, /* Disable Clock Core Link1 BIU */
+ PCIE_OUR3_WOL_D3_COLD_SET = P_CLK_ASF_REGS_DIS |
+ P_CLK_COR_REGS_D0_DIS |
+ P_CLK_COR_LNK1_D0_DIS |
+ P_CLK_MAC_LNK1_D0_DIS |
+ P_CLK_PCI_MST_ARB_DIS |
+ P_CLK_COR_COMMON_DIS |
+ P_CLK_COR_LNK1_BMU_DIS,
+};
+
+/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */
+enum pci_dev_reg_4 {
+ /* (Link Training & Status State Machine) */
+ P_PEX_LTSSM_STAT_MSK = 0x7fL<<25, /* Bit 31..25: PEX LTSSM Mask */
+#define P_PEX_LTSSM_STAT(x) ((x << 25) & P_PEX_LTSSM_STAT_MSK)
+ P_PEX_LTSSM_L1_STAT = 0x34,
+ P_PEX_LTSSM_DET_STAT = 0x01,
+ P_TIMER_VALUE_MSK = 0xffL<<16, /* Bit 23..16: Timer Value Mask */
+ /* (Active State Power Management) */
+ P_FORCE_ASPM_REQUEST = 1<<15, /* Force ASPM Request (A1 only) */
+ P_ASPM_GPHY_LINK_DOWN = 1<<14, /* GPHY Link Down (A1 only) */
+ P_ASPM_INT_FIFO_EMPTY = 1<<13, /* Internal FIFO Empty (A1 only) */
+ P_ASPM_CLKRUN_REQUEST = 1<<12, /* CLKRUN Request (A1 only) */
+
+ P_ASPM_FORCE_CLKREQ_ENA = 1<<4, /* Force CLKREQ Enable (A1b only) */
+ P_ASPM_CLKREQ_PAD_CTL = 1<<3, /* CLKREQ PAD Control (A1 only) */
+ P_ASPM_A1_MODE_SELECT = 1<<2, /* A1 Mode Select (A1 only) */
+ P_CLK_GATE_PEX_UNIT_ENA = 1<<1, /* Enable Gate PEX Unit Clock */
+ P_CLK_GATE_ROOT_COR_ENA = 1<<0, /* Enable Gate Root Core Clock */
+ P_ASPM_CONTROL_MSK = P_FORCE_ASPM_REQUEST | P_ASPM_GPHY_LINK_DOWN
+ | P_ASPM_CLKRUN_REQUEST | P_ASPM_INT_FIFO_EMPTY,
+};
+
+/* PCI_OUR_REG_5 32 bit Our Register 5 (Yukon-ECU only) */
+enum pci_dev_reg_5 {
+ /* Bit 31..27: for A3 & later */
+ P_CTL_DIV_CORE_CLK_ENA = 1<<31, /* Divide Core Clock Enable */
+ P_CTL_SRESET_VMAIN_AV = 1<<30, /* Soft Reset for Vmain_av De-Glitch */
+ P_CTL_BYPASS_VMAIN_AV = 1<<29, /* Bypass En. for Vmain_av De-Glitch */
+ P_CTL_TIM_VMAIN_AV_MSK = 3<<27, /* Bit 28..27: Timer Vmain_av Mask */
+ /* Bit 26..16: Release Clock on Event */
+ P_REL_PCIE_RST_DE_ASS = 1<<26, /* PCIe Reset De-Asserted */
+ P_REL_GPHY_REC_PACKET = 1<<25, /* GPHY Received Packet */
+ P_REL_INT_FIFO_N_EMPTY = 1<<24, /* Internal FIFO Not Empty */
+ P_REL_MAIN_PWR_AVAIL = 1<<23, /* Main Power Available */
+ P_REL_CLKRUN_REQ_REL = 1<<22, /* CLKRUN Request Release */
+ P_REL_PCIE_RESET_ASS = 1<<21, /* PCIe Reset Asserted */
+ P_REL_PME_ASSERTED = 1<<20, /* PME Asserted */
+ P_REL_PCIE_EXIT_L1_ST = 1<<19, /* PCIe Exit L1 State */
+ P_REL_LOADER_NOT_FIN = 1<<18, /* EPROM Loader Not Finished */
+ P_REL_PCIE_RX_EX_IDLE = 1<<17, /* PCIe Rx Exit Electrical Idle State */
+ P_REL_GPHY_LINK_UP = 1<<16, /* GPHY Link Up */
+
+ /* Bit 10.. 0: Mask for Gate Clock */
+ P_GAT_PCIE_RST_ASSERTED = 1<<10,/* PCIe Reset Asserted */
+ P_GAT_GPHY_N_REC_PACKET = 1<<9, /* GPHY Not Received Packet */
+ P_GAT_INT_FIFO_EMPTY = 1<<8, /* Internal FIFO Empty */
+ P_GAT_MAIN_PWR_N_AVAIL = 1<<7, /* Main Power Not Available */
+ P_GAT_CLKRUN_REQ_REL = 1<<6, /* CLKRUN Not Requested */
+ P_GAT_PCIE_RESET_ASS = 1<<5, /* PCIe Reset Asserted */
+ P_GAT_PME_DE_ASSERTED = 1<<4, /* PME De-Asserted */
+ P_GAT_PCIE_ENTER_L1_ST = 1<<3, /* PCIe Enter L1 State */
+ P_GAT_LOADER_FINISHED = 1<<2, /* EPROM Loader Finished */
+ P_GAT_PCIE_RX_EL_IDLE = 1<<1, /* PCIe Rx Electrical Idle State */
+ P_GAT_GPHY_LINK_DOWN = 1<<0, /* GPHY Link Down */
+
+ PCIE_OUR5_EVENT_CLK_D3_SET = P_REL_GPHY_REC_PACKET |
+ P_REL_INT_FIFO_N_EMPTY |
+ P_REL_PCIE_EXIT_L1_ST |
+ P_REL_PCIE_RX_EX_IDLE |
+ P_GAT_GPHY_N_REC_PACKET |
+ P_GAT_INT_FIFO_EMPTY |
+ P_GAT_PCIE_ENTER_L1_ST |
+ P_GAT_PCIE_RX_EL_IDLE,
+};
+
+/* PCI_CFG_REG_1 32 bit Config Register 1 (Yukon-Ext only) */
+enum pci_cfg_reg1 {
+ P_CF1_DIS_REL_EVT_RST = 1<<24, /* Dis. Rel. Event during PCIE reset */
+ /* Bit 23..21: Release Clock on Event */
+ P_CF1_REL_LDR_NOT_FIN = 1<<23, /* EEPROM Loader Not Finished */
+ P_CF1_REL_VMAIN_AVLBL = 1<<22, /* Vmain available */
+ P_CF1_REL_PCIE_RESET = 1<<21, /* PCI-E reset */
+ /* Bit 20..18: Gate Clock on Event */
+ P_CF1_GAT_LDR_NOT_FIN = 1<<20, /* EEPROM Loader Finished */
+ P_CF1_GAT_PCIE_RX_IDLE = 1<<19, /* PCI-E Rx Electrical idle */
+ P_CF1_GAT_PCIE_RESET = 1<<18, /* PCI-E Reset */
+ P_CF1_PRST_PHY_CLKREQ = 1<<17, /* Enable PCI-E rst & PM2PHY gen. CLKREQ */
+ P_CF1_PCIE_RST_CLKREQ = 1<<16, /* Enable PCI-E rst generate CLKREQ */
+
+ P_CF1_ENA_CFG_LDR_DONE = 1<<8, /* Enable core level Config loader done */
+
+ P_CF1_ENA_TXBMU_RD_IDLE = 1<<1, /* Enable TX BMU Read IDLE for ASPM */
+ P_CF1_ENA_TXBMU_WR_IDLE = 1<<0, /* Enable TX BMU Write IDLE for ASPM */
+
+ PCIE_CFG1_EVENT_CLK_D3_SET = P_CF1_DIS_REL_EVT_RST |
+ P_CF1_REL_LDR_NOT_FIN |
+ P_CF1_REL_VMAIN_AVLBL |
+ P_CF1_REL_PCIE_RESET |
+ P_CF1_GAT_LDR_NOT_FIN |
+ P_CF1_GAT_PCIE_RESET |
+ P_CF1_PRST_PHY_CLKREQ |
+ P_CF1_ENA_CFG_LDR_DONE |
+ P_CF1_ENA_TXBMU_RD_IDLE |
+ P_CF1_ENA_TXBMU_WR_IDLE,
+};
+
+/* Yukon-Optima */
+enum {
+ PSM_CONFIG_REG1_AC_PRESENT_STATUS = 1<<31, /* AC Present Status */
+
+ PSM_CONFIG_REG1_PTP_CLK_SEL = 1<<29, /* PTP Clock Select */
+ PSM_CONFIG_REG1_PTP_MODE = 1<<28, /* PTP Mode */
+
+ PSM_CONFIG_REG1_MUX_PHY_LINK = 1<<27, /* PHY Energy Detect Event */
+
+ PSM_CONFIG_REG1_EN_PIN63_AC_PRESENT = 1<<26, /* Enable LED_DUPLEX for ac_present */
+ PSM_CONFIG_REG1_EN_PCIE_TIMER = 1<<25, /* Enable PCIe Timer */
+ PSM_CONFIG_REG1_EN_SPU_TIMER = 1<<24, /* Enable SPU Timer */
+ PSM_CONFIG_REG1_POLARITY_AC_PRESENT = 1<<23, /* AC Present Polarity */
+
+ PSM_CONFIG_REG1_EN_AC_PRESENT = 1<<21, /* Enable AC Present */
+
+ PSM_CONFIG_REG1_EN_GPHY_INT_PSM = 1<<20, /* Enable GPHY INT for PSM */
+ PSM_CONFIG_REG1_DIS_PSM_TIMER = 1<<19, /* Disable PSM Timer */
+};
+
+/* Yukon-Supreme */
+enum {
+ PSM_CONFIG_REG1_GPHY_ENERGY_STS = 1<<31, /* GPHY Energy Detect Status */
+
+ PSM_CONFIG_REG1_UART_MODE_MSK = 3<<29, /* UART_Mode */
+ PSM_CONFIG_REG1_CLK_RUN_ASF = 1<<28, /* Enable Clock Free Running for ASF Subsystem */
+ PSM_CONFIG_REG1_UART_CLK_DISABLE= 1<<27, /* Disable UART clock */
+ PSM_CONFIG_REG1_VAUX_ONE = 1<<26, /* Tie internal Vaux to 1'b1 */
+ PSM_CONFIG_REG1_UART_FC_RI_VAL = 1<<25, /* Default value for UART_RI_n */
+ PSM_CONFIG_REG1_UART_FC_DCD_VAL = 1<<24, /* Default value for UART_DCD_n */
+ PSM_CONFIG_REG1_UART_FC_DSR_VAL = 1<<23, /* Default value for UART_DSR_n */
+ PSM_CONFIG_REG1_UART_FC_CTS_VAL = 1<<22, /* Default value for UART_CTS_n */
+ PSM_CONFIG_REG1_LATCH_VAUX = 1<<21, /* Enable Latch current Vaux_avlbl */
+ PSM_CONFIG_REG1_FORCE_TESTMODE_INPUT= 1<<20, /* Force Testmode pin as input PAD */
+ PSM_CONFIG_REG1_UART_RST = 1<<19, /* UART_RST */
+ PSM_CONFIG_REG1_PSM_PCIE_L1_POL = 1<<18, /* PCIE L1 Event Polarity for PSM */
+ PSM_CONFIG_REG1_TIMER_STAT = 1<<17, /* PSM Timer Status */
+ PSM_CONFIG_REG1_GPHY_INT = 1<<16, /* GPHY INT Status */
+ PSM_CONFIG_REG1_FORCE_TESTMODE_ZERO= 1<<15, /* Force internal Testmode as 1'b0 */
+ PSM_CONFIG_REG1_EN_INT_ASPM_CLKREQ = 1<<14, /* ENABLE INT for CLKRUN on ASPM and CLKREQ */
+ PSM_CONFIG_REG1_EN_SND_TASK_ASPM_CLKREQ = 1<<13, /* ENABLE Snd_task for CLKRUN on ASPM and CLKREQ */
+ PSM_CONFIG_REG1_DIS_CLK_GATE_SND_TASK = 1<<12, /* Disable CLK_GATE control snd_task */
+ PSM_CONFIG_REG1_DIS_FF_CHIAN_SND_INTA = 1<<11, /* Disable flip-flop chain for sndmsg_inta */
+
+ PSM_CONFIG_REG1_DIS_LOADER = 1<<9, /* Disable Loader SM after PSM Goes back to IDLE */
+ PSM_CONFIG_REG1_DO_PWDN = 1<<8, /* Do Power Down, Start PSM Scheme */
+ PSM_CONFIG_REG1_DIS_PIG = 1<<7, /* Disable Plug-in-Go SM after PSM Goes back to IDLE */
+ PSM_CONFIG_REG1_DIS_PERST = 1<<6, /* Disable Internal PCIe Reset after PSM Goes back to IDLE */
+ PSM_CONFIG_REG1_EN_REG18_PD = 1<<5, /* Enable REG18 Power Down for PSM */
+ PSM_CONFIG_REG1_EN_PSM_LOAD = 1<<4, /* Disable EEPROM Loader after PSM Goes back to IDLE */
+ PSM_CONFIG_REG1_EN_PSM_HOT_RST = 1<<3, /* Enable PCIe Hot Reset for PSM */
+ PSM_CONFIG_REG1_EN_PSM_PERST = 1<<2, /* Enable PCIe Reset Event for PSM */
+ PSM_CONFIG_REG1_EN_PSM_PCIE_L1 = 1<<1, /* Enable PCIe L1 Event for PSM */
+ PSM_CONFIG_REG1_EN_PSM = 1<<0, /* Enable PSM Scheme */
+};
+
+/* PSM_CONFIG_REG4 0x0168 PSM Config Register 4 */
+enum {
+ /* PHY Link Detect Timer */
+ PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_MSK = 0xf<<4,
+ PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE = 4,
+
+ PSM_CONFIG_REG4_DEBUG_TIMER = 1<<1, /* Debug Timer */
+ PSM_CONFIG_REG4_RST_PHY_LINK_DETECT = 1<<0, /* Reset GPHY Link Detect */
+};
+
+
+#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
+ PCI_STATUS_SIG_SYSTEM_ERROR | \
+ PCI_STATUS_REC_MASTER_ABORT | \
+ PCI_STATUS_REC_TARGET_ABORT | \
+ PCI_STATUS_PARITY)
+
+enum csr_regs {
+ B0_RAP = 0x0000,
+ B0_CTST = 0x0004,
+
+ B0_POWER_CTRL = 0x0007,
+ B0_ISRC = 0x0008,
+ B0_IMSK = 0x000c,
+ B0_HWE_ISRC = 0x0010,
+ B0_HWE_IMSK = 0x0014,
+
+ /* Special ISR registers (Yukon-2 only) */
+ B0_Y2_SP_ISRC2 = 0x001c,
+ B0_Y2_SP_ISRC3 = 0x0020,
+ B0_Y2_SP_EISR = 0x0024,
+ B0_Y2_SP_LISR = 0x0028,
+ B0_Y2_SP_ICR = 0x002c,
+
+ B2_MAC_1 = 0x0100,
+ B2_MAC_2 = 0x0108,
+ B2_MAC_3 = 0x0110,
+ B2_CONN_TYP = 0x0118,
+ B2_PMD_TYP = 0x0119,
+ B2_MAC_CFG = 0x011a,
+ B2_CHIP_ID = 0x011b,
+ B2_E_0 = 0x011c,
+
+ B2_Y2_CLK_GATE = 0x011d,
+ B2_Y2_HW_RES = 0x011e,
+ B2_E_3 = 0x011f,
+ B2_Y2_CLK_CTRL = 0x0120,
+
+ B2_TI_INI = 0x0130,
+ B2_TI_VAL = 0x0134,
+ B2_TI_CTRL = 0x0138,
+ B2_TI_TEST = 0x0139,
+
+ B2_TST_CTRL1 = 0x0158,
+ B2_TST_CTRL2 = 0x0159,
+ B2_GP_IO = 0x015c,
+
+ B2_I2C_CTRL = 0x0160,
+ B2_I2C_DATA = 0x0164,
+ B2_I2C_IRQ = 0x0168,
+ B2_I2C_SW = 0x016c,
+
+ Y2_PEX_PHY_DATA = 0x0170,
+ Y2_PEX_PHY_ADDR = 0x0172,
+
+ B3_RAM_ADDR = 0x0180,
+ B3_RAM_DATA_LO = 0x0184,
+ B3_RAM_DATA_HI = 0x0188,
+
+/* RAM Interface Registers */
+/* Yukon-2: use RAM_BUFFER() to access the RAM buffer */
+/*
+ * The HW-Spec. calls this registers Timeout Value 0..11. But this names are
+ * not usable in SW. Please notice these are NOT real timeouts, these are
+ * the number of qWords transferred continuously.
+ */
+#define RAM_BUFFER(port, reg) (reg | (port <<6))
+
+ B3_RI_WTO_R1 = 0x0190,
+ B3_RI_WTO_XA1 = 0x0191,
+ B3_RI_WTO_XS1 = 0x0192,
+ B3_RI_RTO_R1 = 0x0193,
+ B3_RI_RTO_XA1 = 0x0194,
+ B3_RI_RTO_XS1 = 0x0195,
+ B3_RI_WTO_R2 = 0x0196,
+ B3_RI_WTO_XA2 = 0x0197,
+ B3_RI_WTO_XS2 = 0x0198,
+ B3_RI_RTO_R2 = 0x0199,
+ B3_RI_RTO_XA2 = 0x019a,
+ B3_RI_RTO_XS2 = 0x019b,
+ B3_RI_TO_VAL = 0x019c,
+ B3_RI_CTRL = 0x01a0,
+ B3_RI_TEST = 0x01a2,
+ B3_MA_TOINI_RX1 = 0x01b0,
+ B3_MA_TOINI_RX2 = 0x01b1,
+ B3_MA_TOINI_TX1 = 0x01b2,
+ B3_MA_TOINI_TX2 = 0x01b3,
+ B3_MA_TOVAL_RX1 = 0x01b4,
+ B3_MA_TOVAL_RX2 = 0x01b5,
+ B3_MA_TOVAL_TX1 = 0x01b6,
+ B3_MA_TOVAL_TX2 = 0x01b7,
+ B3_MA_TO_CTRL = 0x01b8,
+ B3_MA_TO_TEST = 0x01ba,
+ B3_MA_RCINI_RX1 = 0x01c0,
+ B3_MA_RCINI_RX2 = 0x01c1,
+ B3_MA_RCINI_TX1 = 0x01c2,
+ B3_MA_RCINI_TX2 = 0x01c3,
+ B3_MA_RCVAL_RX1 = 0x01c4,
+ B3_MA_RCVAL_RX2 = 0x01c5,
+ B3_MA_RCVAL_TX1 = 0x01c6,
+ B3_MA_RCVAL_TX2 = 0x01c7,
+ B3_MA_RC_CTRL = 0x01c8,
+ B3_MA_RC_TEST = 0x01ca,
+ B3_PA_TOINI_RX1 = 0x01d0,
+ B3_PA_TOINI_RX2 = 0x01d4,
+ B3_PA_TOINI_TX1 = 0x01d8,
+ B3_PA_TOINI_TX2 = 0x01dc,
+ B3_PA_TOVAL_RX1 = 0x01e0,
+ B3_PA_TOVAL_RX2 = 0x01e4,
+ B3_PA_TOVAL_TX1 = 0x01e8,
+ B3_PA_TOVAL_TX2 = 0x01ec,
+ B3_PA_CTRL = 0x01f0,
+ B3_PA_TEST = 0x01f2,
+
+ Y2_CFG_SPC = 0x1c00, /* PCI config space region */
+ Y2_CFG_AER = 0x1d00, /* PCI Advanced Error Report region */
+};
+
+/* B0_CTST 24 bit Control/Status register */
+enum {
+ Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */
+ Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */
+ Y2_HW_WOL_ON = 1<<15,/* HW WOL On (Yukon-EC Ultra A1 only) */
+ Y2_HW_WOL_OFF = 1<<14,/* HW WOL On (Yukon-EC Ultra A1 only) */
+ Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */
+ Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */
+ Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */
+ Y2_CLK_RUN_DIS = 1<<10,/* CLK_RUN Disable (YUKON-2 only) */
+ Y2_LED_STAT_ON = 1<<9, /* Status LED On (YUKON-2 only) */
+ Y2_LED_STAT_OFF = 1<<8, /* Status LED Off (YUKON-2 only) */
+
+ CS_ST_SW_IRQ = 1<<7, /* Set IRQ SW Request */
+ CS_CL_SW_IRQ = 1<<6, /* Clear IRQ SW Request */
+ CS_STOP_DONE = 1<<5, /* Stop Master is finished */
+ CS_STOP_MAST = 1<<4, /* Command Bit to stop the master */
+ CS_MRST_CLR = 1<<3, /* Clear Master reset */
+ CS_MRST_SET = 1<<2, /* Set Master reset */
+ CS_RST_CLR = 1<<1, /* Clear Software reset */
+ CS_RST_SET = 1, /* Set Software reset */
+};
+
+/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */
+enum {
+ PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */
+ PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */
+ PC_VCC_ENA = 1<<5, /* Switch VCC Enable */
+ PC_VCC_DIS = 1<<4, /* Switch VCC Disable */
+ PC_VAUX_ON = 1<<3, /* Switch VAUX On */
+ PC_VAUX_OFF = 1<<2, /* Switch VAUX Off */
+ PC_VCC_ON = 1<<1, /* Switch VCC On */
+ PC_VCC_OFF = 1<<0, /* Switch VCC Off */
+};
+
+/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */
+
+/* B0_Y2_SP_ISRC2 32 bit Special Interrupt Source Reg 2 */
+/* B0_Y2_SP_ISRC3 32 bit Special Interrupt Source Reg 3 */
+/* B0_Y2_SP_EISR 32 bit Enter ISR Reg */
+/* B0_Y2_SP_LISR 32 bit Leave ISR Reg */
+enum {
+ Y2_IS_HW_ERR = 1<<31, /* Interrupt HW Error */
+ Y2_IS_STAT_BMU = 1<<30, /* Status BMU Interrupt */
+ Y2_IS_ASF = 1<<29, /* ASF subsystem Interrupt */
+ Y2_IS_CPU_TO = 1<<28, /* CPU Timeout */
+ Y2_IS_POLL_CHK = 1<<27, /* Check IRQ from polling unit */
+ Y2_IS_TWSI_RDY = 1<<26, /* IRQ on end of TWSI Tx */
+ Y2_IS_IRQ_SW = 1<<25, /* SW forced IRQ */
+ Y2_IS_TIMINT = 1<<24, /* IRQ from Timer */
+
+ Y2_IS_IRQ_PHY2 = 1<<12, /* Interrupt from PHY 2 */
+ Y2_IS_IRQ_MAC2 = 1<<11, /* Interrupt from MAC 2 */
+ Y2_IS_CHK_RX2 = 1<<10, /* Descriptor error Rx 2 */
+ Y2_IS_CHK_TXS2 = 1<<9, /* Descriptor error TXS 2 */
+ Y2_IS_CHK_TXA2 = 1<<8, /* Descriptor error TXA 2 */
+
+ Y2_IS_PSM_ACK = 1<<7, /* PSM Acknowledge (Yukon-Optima only) */
+ Y2_IS_PTP_TIST = 1<<6, /* PTP Time Stamp (Yukon-Optima only) */
+ Y2_IS_PHY_QLNK = 1<<5, /* PHY Quick Link (Yukon-Optima only) */
+
+ Y2_IS_IRQ_PHY1 = 1<<4, /* Interrupt from PHY 1 */
+ Y2_IS_IRQ_MAC1 = 1<<3, /* Interrupt from MAC 1 */
+ Y2_IS_CHK_RX1 = 1<<2, /* Descriptor error Rx 1 */
+ Y2_IS_CHK_TXS1 = 1<<1, /* Descriptor error TXS 1 */
+ Y2_IS_CHK_TXA1 = 1<<0, /* Descriptor error TXA 1 */
+
+ Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU,
+ Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1
+ | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1,
+ Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2
+ | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2,
+ Y2_IS_ERROR = Y2_IS_HW_ERR |
+ Y2_IS_IRQ_MAC1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1 |
+ Y2_IS_IRQ_MAC2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2,
+};
+
+/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
+enum {
+ IS_ERR_MSK = 0x00003fff,/* All Error bits */
+
+ IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */
+ IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */
+ IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */
+ IS_IRQ_STAT = 1<<10, /* IRQ status exception */
+ IS_NO_STAT_M1 = 1<<9, /* No Rx Status from MAC 1 */
+ IS_NO_STAT_M2 = 1<<8, /* No Rx Status from MAC 2 */
+ IS_NO_TIST_M1 = 1<<7, /* No Time Stamp from MAC 1 */
+ IS_NO_TIST_M2 = 1<<6, /* No Time Stamp from MAC 2 */
+ IS_RAM_RD_PAR = 1<<5, /* RAM Read Parity Error */
+ IS_RAM_WR_PAR = 1<<4, /* RAM Write Parity Error */
+ IS_M1_PAR_ERR = 1<<3, /* MAC 1 Parity Error */
+ IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */
+ IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */
+ IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */
+};
+
+/* Hardware error interrupt mask for Yukon 2 */
+enum {
+ Y2_IS_TIST_OV = 1<<29,/* Time Stamp Timer overflow interrupt */
+ Y2_IS_SENSOR = 1<<28, /* Sensor interrupt */
+ Y2_IS_MST_ERR = 1<<27, /* Master error interrupt */
+ Y2_IS_IRQ_STAT = 1<<26, /* Status exception interrupt */
+ Y2_IS_PCI_EXP = 1<<25, /* PCI-Express interrupt */
+ Y2_IS_PCI_NEXP = 1<<24, /* PCI-Express error similar to PCI error */
+ /* Link 2 */
+ Y2_IS_PAR_RD2 = 1<<13, /* Read RAM parity error interrupt */
+ Y2_IS_PAR_WR2 = 1<<12, /* Write RAM parity error interrupt */
+ Y2_IS_PAR_MAC2 = 1<<11, /* MAC hardware fault interrupt */
+ Y2_IS_PAR_RX2 = 1<<10, /* Parity Error Rx Queue 2 */
+ Y2_IS_TCP_TXS2 = 1<<9, /* TCP length mismatch sync Tx queue IRQ */
+ Y2_IS_TCP_TXA2 = 1<<8, /* TCP length mismatch async Tx queue IRQ */
+ /* Link 1 */
+ Y2_IS_PAR_RD1 = 1<<5, /* Read RAM parity error interrupt */
+ Y2_IS_PAR_WR1 = 1<<4, /* Write RAM parity error interrupt */
+ Y2_IS_PAR_MAC1 = 1<<3, /* MAC hardware fault interrupt */
+ Y2_IS_PAR_RX1 = 1<<2, /* Parity Error Rx Queue 1 */
+ Y2_IS_TCP_TXS1 = 1<<1, /* TCP length mismatch sync Tx queue IRQ */
+ Y2_IS_TCP_TXA1 = 1<<0, /* TCP length mismatch async Tx queue IRQ */
+
+ Y2_HWE_L1_MASK = Y2_IS_PAR_RD1 | Y2_IS_PAR_WR1 | Y2_IS_PAR_MAC1 |
+ Y2_IS_PAR_RX1 | Y2_IS_TCP_TXS1| Y2_IS_TCP_TXA1,
+ Y2_HWE_L2_MASK = Y2_IS_PAR_RD2 | Y2_IS_PAR_WR2 | Y2_IS_PAR_MAC2 |
+ Y2_IS_PAR_RX2 | Y2_IS_TCP_TXS2| Y2_IS_TCP_TXA2,
+
+ Y2_HWE_ALL_MASK = Y2_IS_TIST_OV | Y2_IS_MST_ERR | Y2_IS_IRQ_STAT |
+ Y2_HWE_L1_MASK | Y2_HWE_L2_MASK,
+};
+
+/* B28_DPT_CTRL 8 bit Descriptor Poll Timer Ctrl Reg */
+enum {
+ DPT_START = 1<<1,
+ DPT_STOP = 1<<0,
+};
+
+/* B2_TST_CTRL1 8 bit Test Control Register 1 */
+enum {
+ TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */
+ TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */
+ TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */
+ TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */
+ TST_FRC_APERR_M = 1<<3, /* force ADDRPERR on MST */
+ TST_FRC_APERR_T = 1<<2, /* force ADDRPERR on TRG */
+ TST_CFG_WRITE_ON = 1<<1, /* Enable Config Reg WR */
+ TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */
+};
+
+/* B2_GPIO */
+enum {
+ GLB_GPIO_CLK_DEB_ENA = 1<<31, /* Clock Debug Enable */
+ GLB_GPIO_CLK_DBG_MSK = 0xf<<26, /* Clock Debug */
+
+ GLB_GPIO_INT_RST_D3_DIS = 1<<15, /* Disable Internal Reset After D3 to D0 */
+ GLB_GPIO_LED_PAD_SPEED_UP = 1<<14, /* LED PAD Speed Up */
+ GLB_GPIO_STAT_RACE_DIS = 1<<13, /* Status Race Disable */
+ GLB_GPIO_TEST_SEL_MSK = 3<<11, /* Testmode Select */
+ GLB_GPIO_TEST_SEL_BASE = 1<<11,
+ GLB_GPIO_RAND_ENA = 1<<10, /* Random Enable */
+ GLB_GPIO_RAND_BIT_1 = 1<<9, /* Random Bit 1 */
+};
+
+/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */
+enum {
+ CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */
+ /* Bit 3.. 2: reserved */
+ CFG_DIS_M2_CLK = 1<<1, /* Disable Clock for 2nd MAC */
+ CFG_SNG_MAC = 1<<0, /* MAC Config: 0=2 MACs / 1=1 MAC*/
+};
+
+/* B2_CHIP_ID 8 bit Chip Identification Number */
+enum {
+ CHIP_ID_YUKON_XL = 0xb3, /* YUKON-2 XL */
+ CHIP_ID_YUKON_EC_U = 0xb4, /* YUKON-2 EC Ultra */
+ CHIP_ID_YUKON_EX = 0xb5, /* YUKON-2 Extreme */
+ CHIP_ID_YUKON_EC = 0xb6, /* YUKON-2 EC */
+ CHIP_ID_YUKON_FE = 0xb7, /* YUKON-2 FE */
+ CHIP_ID_YUKON_FE_P = 0xb8, /* YUKON-2 FE+ */
+ CHIP_ID_YUKON_SUPR = 0xb9, /* YUKON-2 Supreme */
+ CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */
+ CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */
+ CHIP_ID_YUKON_PRM = 0xbd, /* YUKON-2 Optima Prime */
+ CHIP_ID_YUKON_OP_2 = 0xbe, /* YUKON-2 Optima 2 */
+};
+
+enum yukon_xl_rev {
+ CHIP_REV_YU_XL_A0 = 0,
+ CHIP_REV_YU_XL_A1 = 1,
+ CHIP_REV_YU_XL_A2 = 2,
+ CHIP_REV_YU_XL_A3 = 3,
+};
+
+enum yukon_ec_rev {
+ CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */
+ CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */
+ CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */
+};
+enum yukon_ec_u_rev {
+ CHIP_REV_YU_EC_U_A0 = 1,
+ CHIP_REV_YU_EC_U_A1 = 2,
+ CHIP_REV_YU_EC_U_B0 = 3,
+ CHIP_REV_YU_EC_U_B1 = 5,
+};
+enum yukon_fe_rev {
+ CHIP_REV_YU_FE_A1 = 1,
+ CHIP_REV_YU_FE_A2 = 2,
+};
+enum yukon_fe_p_rev {
+ CHIP_REV_YU_FE2_A0 = 0,
+};
+enum yukon_ex_rev {
+ CHIP_REV_YU_EX_A0 = 1,
+ CHIP_REV_YU_EX_B0 = 2,
+};
+enum yukon_supr_rev {
+ CHIP_REV_YU_SU_A0 = 0,
+ CHIP_REV_YU_SU_B0 = 1,
+ CHIP_REV_YU_SU_B1 = 3,
+};
+
+
+/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */
+enum {
+ Y2_STATUS_LNK2_INAC = 1<<7, /* Status Link 2 inactive (0 = active) */
+ Y2_CLK_GAT_LNK2_DIS = 1<<6, /* Disable clock gating Link 2 */
+ Y2_COR_CLK_LNK2_DIS = 1<<5, /* Disable Core clock Link 2 */
+ Y2_PCI_CLK_LNK2_DIS = 1<<4, /* Disable PCI clock Link 2 */
+ Y2_STATUS_LNK1_INAC = 1<<3, /* Status Link 1 inactive (0 = active) */
+ Y2_CLK_GAT_LNK1_DIS = 1<<2, /* Disable clock gating Link 1 */
+ Y2_COR_CLK_LNK1_DIS = 1<<1, /* Disable Core clock Link 1 */
+ Y2_PCI_CLK_LNK1_DIS = 1<<0, /* Disable PCI clock Link 1 */
+};
+
+/* B2_Y2_HW_RES 8 bit HW Resources (Yukon-2 only) */
+enum {
+ CFG_LED_MODE_MSK = 7<<2, /* Bit 4.. 2: LED Mode Mask */
+ CFG_LINK_2_AVAIL = 1<<1, /* Link 2 available */
+ CFG_LINK_1_AVAIL = 1<<0, /* Link 1 available */
+};
+#define CFG_LED_MODE(x) (((x) & CFG_LED_MODE_MSK) >> 2)
+#define CFG_DUAL_MAC_MSK (CFG_LINK_2_AVAIL | CFG_LINK_1_AVAIL)
+
+
+/* B2_Y2_CLK_CTRL 32 bit Clock Frequency Control Register (Yukon-2/EC) */
+enum {
+ Y2_CLK_DIV_VAL_MSK = 0xff<<16,/* Bit 23..16: Clock Divisor Value */
+#define Y2_CLK_DIV_VAL(x) (((x)<<16) & Y2_CLK_DIV_VAL_MSK)
+ Y2_CLK_DIV_VAL2_MSK = 7<<21, /* Bit 23..21: Clock Divisor Value */
+ Y2_CLK_SELECT2_MSK = 0x1f<<16,/* Bit 20..16: Clock Select */
+#define Y2_CLK_DIV_VAL_2(x) (((x)<<21) & Y2_CLK_DIV_VAL2_MSK)
+#define Y2_CLK_SEL_VAL_2(x) (((x)<<16) & Y2_CLK_SELECT2_MSK)
+ Y2_CLK_DIV_ENA = 1<<1, /* Enable Core Clock Division */
+ Y2_CLK_DIV_DIS = 1<<0, /* Disable Core Clock Division */
+};
+
+/* B2_TI_CTRL 8 bit Timer control */
+/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */
+enum {
+ TIM_START = 1<<2, /* Start Timer */
+ TIM_STOP = 1<<1, /* Stop Timer */
+ TIM_CLR_IRQ = 1<<0, /* Clear Timer IRQ (!IRQM) */
+};
+
+/* B2_TI_TEST 8 Bit Timer Test */
+/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */
+/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */
+enum {
+ TIM_T_ON = 1<<2, /* Test mode on */
+ TIM_T_OFF = 1<<1, /* Test mode off */
+ TIM_T_STEP = 1<<0, /* Test step */
+};
+
+/* Y2_PEX_PHY_ADDR/DATA PEX PHY address and data reg (Yukon-2 only) */
+enum {
+ PEX_RD_ACCESS = 1<<31, /* Access Mode Read = 1, Write = 0 */
+ PEX_DB_ACCESS = 1<<30, /* Access to debug register */
+};
+
+/* B3_RAM_ADDR 32 bit RAM Address, to read or write */
+ /* Bit 31..19: reserved */
+#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */
+/* RAM Interface Registers */
+
+/* B3_RI_CTRL 16 bit RAM Interface Control Register */
+enum {
+ RI_CLR_RD_PERR = 1<<9, /* Clear IRQ RAM Read Parity Err */
+ RI_CLR_WR_PERR = 1<<8, /* Clear IRQ RAM Write Parity Err*/
+
+ RI_RST_CLR = 1<<1, /* Clear RAM Interface Reset */
+ RI_RST_SET = 1<<0, /* Set RAM Interface Reset */
+};
+
+#define SK_RI_TO_53 36 /* RAM interface timeout */
+
+
+/* Port related registers FIFO, and Arbiter */
+#define SK_REG(port,reg) (((port)<<7)+(reg))
+
+/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
+/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */
+/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */
+/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */
+/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */
+
+#define TXA_MAX_VAL 0x00ffffffUL /* Bit 23.. 0: Max TXA Timer/Cnt Val */
+
+/* TXA_CTRL 8 bit Tx Arbiter Control Register */
+enum {
+ TXA_ENA_FSYNC = 1<<7, /* Enable force of sync Tx queue */
+ TXA_DIS_FSYNC = 1<<6, /* Disable force of sync Tx queue */
+ TXA_ENA_ALLOC = 1<<5, /* Enable alloc of free bandwidth */
+ TXA_DIS_ALLOC = 1<<4, /* Disable alloc of free bandwidth */
+ TXA_START_RC = 1<<3, /* Start sync Rate Control */
+ TXA_STOP_RC = 1<<2, /* Stop sync Rate Control */
+ TXA_ENA_ARB = 1<<1, /* Enable Tx Arbiter */
+ TXA_DIS_ARB = 1<<0, /* Disable Tx Arbiter */
+};
+
+/*
+ * Bank 4 - 5
+ */
+/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
+enum {
+ TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/
+ TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */
+ TXA_LIM_INI = 0x0208,/* 32 bit Tx Arb Limit Counter Init Val */
+ TXA_LIM_VAL = 0x020c,/* 32 bit Tx Arb Limit Counter Value */
+ TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */
+ TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */
+ TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */
+
+ RSS_KEY = 0x0220, /* RSS Key setup */
+ RSS_CFG = 0x0248, /* RSS Configuration */
+};
+
+enum {
+ HASH_TCP_IPV6_EX_CTRL = 1<<5,
+ HASH_IPV6_EX_CTRL = 1<<4,
+ HASH_TCP_IPV6_CTRL = 1<<3,
+ HASH_IPV6_CTRL = 1<<2,
+ HASH_TCP_IPV4_CTRL = 1<<1,
+ HASH_IPV4_CTRL = 1<<0,
+
+ HASH_ALL = 0x3f,
+};
+
+enum {
+ B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */
+ B7_CFG_SPC = 0x0380,/* copy of the Configuration register */
+ B8_RQ1_REGS = 0x0400,/* Receive Queue 1 */
+ B8_RQ2_REGS = 0x0480,/* Receive Queue 2 */
+ B8_TS1_REGS = 0x0600,/* Transmit sync queue 1 */
+ B8_TA1_REGS = 0x0680,/* Transmit async queue 1 */
+ B8_TS2_REGS = 0x0700,/* Transmit sync queue 2 */
+ B8_TA2_REGS = 0x0780,/* Transmit sync queue 2 */
+ B16_RAM_REGS = 0x0800,/* RAM Buffer Registers */
+};
+
+/* Queue Register Offsets, use Q_ADDR() to access */
+enum {
+ B8_Q_REGS = 0x0400, /* base of Queue registers */
+ Q_D = 0x00, /* 8*32 bit Current Descriptor */
+ Q_VLAN = 0x20, /* 16 bit Current VLAN Tag */
+ Q_DONE = 0x24, /* 16 bit Done Index */
+ Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */
+ Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */
+ Q_BC = 0x30, /* 32 bit Current Byte Counter */
+ Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */
+ Q_TEST = 0x38, /* 32 bit Test/Control Register */
+
+/* Yukon-2 */
+ Q_WM = 0x40, /* 16 bit FIFO Watermark */
+ Q_AL = 0x42, /* 8 bit FIFO Alignment */
+ Q_RSP = 0x44, /* 16 bit FIFO Read Shadow Pointer */
+ Q_RSL = 0x46, /* 8 bit FIFO Read Shadow Level */
+ Q_RP = 0x48, /* 8 bit FIFO Read Pointer */
+ Q_RL = 0x4a, /* 8 bit FIFO Read Level */
+ Q_WP = 0x4c, /* 8 bit FIFO Write Pointer */
+ Q_WSP = 0x4d, /* 8 bit FIFO Write Shadow Pointer */
+ Q_WL = 0x4e, /* 8 bit FIFO Write Level */
+ Q_WSL = 0x4f, /* 8 bit FIFO Write Shadow Level */
+};
+#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
+
+/* Q_TEST 32 bit Test Register */
+enum {
+ /* Transmit */
+ F_TX_CHK_AUTO_OFF = 1<<31, /* Tx checksum auto calc off (Yukon EX) */
+ F_TX_CHK_AUTO_ON = 1<<30, /* Tx checksum auto calc off (Yukon EX) */
+
+ /* Receive */
+ F_M_RX_RAM_DIS = 1<<24, /* MAC Rx RAM Read Port disable */
+
+ /* Hardware testbits not used */
+};
+
+/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
+enum {
+ Y2_B8_PREF_REGS = 0x0450,
+
+ PREF_UNIT_CTRL = 0x00, /* 32 bit Control register */
+ PREF_UNIT_LAST_IDX = 0x04, /* 16 bit Last Index */
+ PREF_UNIT_ADDR_LO = 0x08, /* 32 bit List start addr, low part */
+ PREF_UNIT_ADDR_HI = 0x0c, /* 32 bit List start addr, high part*/
+ PREF_UNIT_GET_IDX = 0x10, /* 16 bit Get Index */
+ PREF_UNIT_PUT_IDX = 0x14, /* 16 bit Put Index */
+ PREF_UNIT_FIFO_WP = 0x20, /* 8 bit FIFO write pointer */
+ PREF_UNIT_FIFO_RP = 0x24, /* 8 bit FIFO read pointer */
+ PREF_UNIT_FIFO_WM = 0x28, /* 8 bit FIFO watermark */
+ PREF_UNIT_FIFO_LEV = 0x2c, /* 8 bit FIFO level */
+
+ PREF_UNIT_MASK_IDX = 0x0fff,
+};
+#define Y2_QADDR(q,reg) (Y2_B8_PREF_REGS + (q) + (reg))
+
+/* RAM Buffer Register Offsets */
+enum {
+
+ RB_START = 0x00,/* 32 bit RAM Buffer Start Address */
+ RB_END = 0x04,/* 32 bit RAM Buffer End Address */
+ RB_WP = 0x08,/* 32 bit RAM Buffer Write Pointer */
+ RB_RP = 0x0c,/* 32 bit RAM Buffer Read Pointer */
+ RB_RX_UTPP = 0x10,/* 32 bit Rx Upper Threshold, Pause Packet */
+ RB_RX_LTPP = 0x14,/* 32 bit Rx Lower Threshold, Pause Packet */
+ RB_RX_UTHP = 0x18,/* 32 bit Rx Upper Threshold, High Prio */
+ RB_RX_LTHP = 0x1c,/* 32 bit Rx Lower Threshold, High Prio */
+ /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */
+ RB_PC = 0x20,/* 32 bit RAM Buffer Packet Counter */
+ RB_LEV = 0x24,/* 32 bit RAM Buffer Level Register */
+ RB_CTRL = 0x28,/* 32 bit RAM Buffer Control Register */
+ RB_TST1 = 0x29,/* 8 bit RAM Buffer Test Register 1 */
+ RB_TST2 = 0x2a,/* 8 bit RAM Buffer Test Register 2 */
+};
+
+/* Receive and Transmit Queues */
+enum {
+ Q_R1 = 0x0000, /* Receive Queue 1 */
+ Q_R2 = 0x0080, /* Receive Queue 2 */
+ Q_XS1 = 0x0200, /* Synchronous Transmit Queue 1 */
+ Q_XA1 = 0x0280, /* Asynchronous Transmit Queue 1 */
+ Q_XS2 = 0x0300, /* Synchronous Transmit Queue 2 */
+ Q_XA2 = 0x0380, /* Asynchronous Transmit Queue 2 */
+};
+
+/* Different PHY Types */
+enum {
+ PHY_ADDR_MARV = 0,
+};
+
+#define RB_ADDR(offs, queue) ((u16) B16_RAM_REGS + (queue) + (offs))
+
+
+enum {
+ LNK_SYNC_INI = 0x0c30,/* 32 bit Link Sync Cnt Init Value */
+ LNK_SYNC_VAL = 0x0c34,/* 32 bit Link Sync Cnt Current Value */
+ LNK_SYNC_CTRL = 0x0c38,/* 8 bit Link Sync Cnt Control Register */
+ LNK_SYNC_TST = 0x0c39,/* 8 bit Link Sync Cnt Test Register */
+
+ LNK_LED_REG = 0x0c3c,/* 8 bit Link LED Register */
+
+/* Receive GMAC FIFO (YUKON and Yukon-2) */
+
+ RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */
+ RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */
+ RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */
+ RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */
+ RX_GMF_FL_THR = 0x0c50,/* 16 bit Rx GMAC FIFO Flush Threshold */
+ RX_GMF_FL_CTRL = 0x0c52,/* 16 bit Rx GMAC FIFO Flush Control */
+ RX_GMF_TR_THR = 0x0c54,/* 32 bit Rx Truncation Threshold (Yukon-2) */
+ RX_GMF_UP_THR = 0x0c58,/* 16 bit Rx Upper Pause Thr (Yukon-EC_U) */
+ RX_GMF_LP_THR = 0x0c5a,/* 16 bit Rx Lower Pause Thr (Yukon-EC_U) */
+ RX_GMF_VLAN = 0x0c5c,/* 32 bit Rx VLAN Type Register (Yukon-2) */
+ RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */
+
+ RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */
+
+ RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */
+
+ RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */
+};
+
+
+/* Q_BC 32 bit Current Byte Counter */
+
+/* BMU Control Status Registers */
+/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */
+/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */
+/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */
+/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */
+/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */
+/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */
+/* Q_CSR 32 bit BMU Control/Status Register */
+
+/* Rx BMU Control / Status Registers (Yukon-2) */
+enum {
+ BMU_IDLE = 1<<31, /* BMU Idle State */
+ BMU_RX_TCP_PKT = 1<<30, /* Rx TCP Packet (when RSS Hash enabled) */
+ BMU_RX_IP_PKT = 1<<29, /* Rx IP Packet (when RSS Hash enabled) */
+
+ BMU_ENA_RX_RSS_HASH = 1<<15, /* Enable Rx RSS Hash */
+ BMU_DIS_RX_RSS_HASH = 1<<14, /* Disable Rx RSS Hash */
+ BMU_ENA_RX_CHKSUM = 1<<13, /* Enable Rx TCP/IP Checksum Check */
+ BMU_DIS_RX_CHKSUM = 1<<12, /* Disable Rx TCP/IP Checksum Check */
+ BMU_CLR_IRQ_PAR = 1<<11, /* Clear IRQ on Parity errors (Rx) */
+ BMU_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment. error (Tx) */
+ BMU_CLR_IRQ_CHK = 1<<10, /* Clear IRQ Check */
+ BMU_STOP = 1<<9, /* Stop Rx/Tx Queue */
+ BMU_START = 1<<8, /* Start Rx/Tx Queue */
+ BMU_FIFO_OP_ON = 1<<7, /* FIFO Operational On */
+ BMU_FIFO_OP_OFF = 1<<6, /* FIFO Operational Off */
+ BMU_FIFO_ENA = 1<<5, /* Enable FIFO */
+ BMU_FIFO_RST = 1<<4, /* Reset FIFO */
+ BMU_OP_ON = 1<<3, /* BMU Operational On */
+ BMU_OP_OFF = 1<<2, /* BMU Operational Off */
+ BMU_RST_CLR = 1<<1, /* Clear BMU Reset (Enable) */
+ BMU_RST_SET = 1<<0, /* Set BMU Reset */
+
+ BMU_CLR_RESET = BMU_FIFO_RST | BMU_OP_OFF | BMU_RST_CLR,
+ BMU_OPER_INIT = BMU_CLR_IRQ_PAR | BMU_CLR_IRQ_CHK | BMU_START |
+ BMU_FIFO_ENA | BMU_OP_ON,
+
+ BMU_WM_DEFAULT = 0x600,
+ BMU_WM_PEX = 0x80,
+};
+
+/* Tx BMU Control / Status Registers (Yukon-2) */
+ /* Bit 31: same as for Rx */
+enum {
+ BMU_TX_IPIDINCR_ON = 1<<13, /* Enable IP ID Increment */
+ BMU_TX_IPIDINCR_OFF = 1<<12, /* Disable IP ID Increment */
+ BMU_TX_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment length mismatch */
+};
+
+/* TBMU_TEST 0x06B8 Transmit BMU Test Register */
+enum {
+ TBMU_TEST_BMU_TX_CHK_AUTO_OFF = 1<<31, /* BMU Tx Checksum Auto Calculation Disable */
+ TBMU_TEST_BMU_TX_CHK_AUTO_ON = 1<<30, /* BMU Tx Checksum Auto Calculation Enable */
+ TBMU_TEST_HOME_ADD_PAD_FIX1_EN = 1<<29, /* Home Address Paddiing FIX1 Enable */
+ TBMU_TEST_HOME_ADD_PAD_FIX1_DIS = 1<<28, /* Home Address Paddiing FIX1 Disable */
+ TBMU_TEST_ROUTING_ADD_FIX_EN = 1<<27, /* Routing Address Fix Enable */
+ TBMU_TEST_ROUTING_ADD_FIX_DIS = 1<<26, /* Routing Address Fix Disable */
+ TBMU_TEST_HOME_ADD_FIX_EN = 1<<25, /* Home address checksum fix enable */
+ TBMU_TEST_HOME_ADD_FIX_DIS = 1<<24, /* Home address checksum fix disable */
+
+ TBMU_TEST_TEST_RSPTR_ON = 1<<22, /* Testmode Shadow Read Ptr On */
+ TBMU_TEST_TEST_RSPTR_OFF = 1<<21, /* Testmode Shadow Read Ptr Off */
+ TBMU_TEST_TESTSTEP_RSPTR = 1<<20, /* Teststep Shadow Read Ptr */
+
+ TBMU_TEST_TEST_RPTR_ON = 1<<18, /* Testmode Read Ptr On */
+ TBMU_TEST_TEST_RPTR_OFF = 1<<17, /* Testmode Read Ptr Off */
+ TBMU_TEST_TESTSTEP_RPTR = 1<<16, /* Teststep Read Ptr */
+
+ TBMU_TEST_TEST_WSPTR_ON = 1<<14, /* Testmode Shadow Write Ptr On */
+ TBMU_TEST_TEST_WSPTR_OFF = 1<<13, /* Testmode Shadow Write Ptr Off */
+ TBMU_TEST_TESTSTEP_WSPTR = 1<<12, /* Teststep Shadow Write Ptr */
+
+ TBMU_TEST_TEST_WPTR_ON = 1<<10, /* Testmode Write Ptr On */
+ TBMU_TEST_TEST_WPTR_OFF = 1<<9, /* Testmode Write Ptr Off */
+ TBMU_TEST_TESTSTEP_WPTR = 1<<8, /* Teststep Write Ptr */
+
+ TBMU_TEST_TEST_REQ_NB_ON = 1<<6, /* Testmode Req Nbytes/Addr On */
+ TBMU_TEST_TEST_REQ_NB_OFF = 1<<5, /* Testmode Req Nbytes/Addr Off */
+ TBMU_TEST_TESTSTEP_REQ_NB = 1<<4, /* Teststep Req Nbytes/Addr */
+
+ TBMU_TEST_TEST_DONE_IDX_ON = 1<<2, /* Testmode Done Index On */
+ TBMU_TEST_TEST_DONE_IDX_OFF = 1<<1, /* Testmode Done Index Off */
+ TBMU_TEST_TESTSTEP_DONE_IDX = 1<<0, /* Teststep Done Index */
+};
+
+/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
+/* PREF_UNIT_CTRL 32 bit Prefetch Control register */
+enum {
+ PREF_UNIT_OP_ON = 1<<3, /* prefetch unit operational */
+ PREF_UNIT_OP_OFF = 1<<2, /* prefetch unit not operational */
+ PREF_UNIT_RST_CLR = 1<<1, /* Clear Prefetch Unit Reset */
+ PREF_UNIT_RST_SET = 1<<0, /* Set Prefetch Unit Reset */
+};
+
+/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */
+/* RB_START 32 bit RAM Buffer Start Address */
+/* RB_END 32 bit RAM Buffer End Address */
+/* RB_WP 32 bit RAM Buffer Write Pointer */
+/* RB_RP 32 bit RAM Buffer Read Pointer */
+/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */
+/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */
+/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */
+/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */
+/* RB_PC 32 bit RAM Buffer Packet Counter */
+/* RB_LEV 32 bit RAM Buffer Level Register */
+
+#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */
+/* RB_TST2 8 bit RAM Buffer Test Register 2 */
+/* RB_TST1 8 bit RAM Buffer Test Register 1 */
+
+/* RB_CTRL 8 bit RAM Buffer Control Register */
+enum {
+ RB_ENA_STFWD = 1<<5, /* Enable Store & Forward */
+ RB_DIS_STFWD = 1<<4, /* Disable Store & Forward */
+ RB_ENA_OP_MD = 1<<3, /* Enable Operation Mode */
+ RB_DIS_OP_MD = 1<<2, /* Disable Operation Mode */
+ RB_RST_CLR = 1<<1, /* Clear RAM Buf STM Reset */
+ RB_RST_SET = 1<<0, /* Set RAM Buf STM Reset */
+};
+
+
+/* Transmit GMAC FIFO (YUKON only) */
+enum {
+ TX_GMF_EA = 0x0d40,/* 32 bit Tx GMAC FIFO End Address */
+ TX_GMF_AE_THR = 0x0d44,/* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/
+ TX_GMF_CTRL_T = 0x0d48,/* 32 bit Tx GMAC FIFO Control/Test */
+
+ TX_GMF_WP = 0x0d60,/* 32 bit Tx GMAC FIFO Write Pointer */
+ TX_GMF_WSP = 0x0d64,/* 32 bit Tx GMAC FIFO Write Shadow Ptr. */
+ TX_GMF_WLEV = 0x0d68,/* 32 bit Tx GMAC FIFO Write Level */
+
+ TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */
+ TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */
+ TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */
+
+ /* Threshold values for Yukon-EC Ultra and Extreme */
+ ECU_AE_THR = 0x0070, /* Almost Empty Threshold */
+ ECU_TXFF_LEV = 0x01a0, /* Tx BMU FIFO Level */
+ ECU_JUMBO_WM = 0x0080, /* Jumbo Mode Watermark */
+};
+
+/* Descriptor Poll Timer Registers */
+enum {
+ B28_DPT_INI = 0x0e00,/* 24 bit Descriptor Poll Timer Init Val */
+ B28_DPT_VAL = 0x0e04,/* 24 bit Descriptor Poll Timer Curr Val */
+ B28_DPT_CTRL = 0x0e08,/* 8 bit Descriptor Poll Timer Ctrl Reg */
+
+ B28_DPT_TST = 0x0e0a,/* 8 bit Descriptor Poll Timer Test Reg */
+};
+
+/* Time Stamp Timer Registers (YUKON only) */
+enum {
+ GMAC_TI_ST_VAL = 0x0e14,/* 32 bit Time Stamp Timer Curr Val */
+ GMAC_TI_ST_CTRL = 0x0e18,/* 8 bit Time Stamp Timer Ctrl Reg */
+ GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */
+};
+
+/* Polling Unit Registers (Yukon-2 only) */
+enum {
+ POLL_CTRL = 0x0e20, /* 32 bit Polling Unit Control Reg */
+ POLL_LAST_IDX = 0x0e24,/* 16 bit Polling Unit List Last Index */
+
+ POLL_LIST_ADDR_LO= 0x0e28,/* 32 bit Poll. List Start Addr (low) */
+ POLL_LIST_ADDR_HI= 0x0e2c,/* 32 bit Poll. List Start Addr (high) */
+};
+
+enum {
+ SMB_CFG = 0x0e40, /* 32 bit SMBus Config Register */
+ SMB_CSR = 0x0e44, /* 32 bit SMBus Control/Status Register */
+};
+
+enum {
+ CPU_WDOG = 0x0e48, /* 32 bit Watchdog Register */
+ CPU_CNTR = 0x0e4C, /* 32 bit Counter Register */
+ CPU_TIM = 0x0e50,/* 32 bit Timer Compare Register */
+ CPU_AHB_ADDR = 0x0e54, /* 32 bit CPU AHB Debug Register */
+ CPU_AHB_WDATA = 0x0e58, /* 32 bit CPU AHB Debug Register */
+ CPU_AHB_RDATA = 0x0e5C, /* 32 bit CPU AHB Debug Register */
+ HCU_MAP_BASE = 0x0e60, /* 32 bit Reset Mapping Base */
+ CPU_AHB_CTRL = 0x0e64, /* 32 bit CPU AHB Debug Register */
+ HCU_CCSR = 0x0e68, /* 32 bit CPU Control and Status Register */
+ HCU_HCSR = 0x0e6C, /* 32 bit Host Control and Status Register */
+};
+
+/* ASF Subsystem Registers (Yukon-2 only) */
+enum {
+ B28_Y2_SMB_CONFIG = 0x0e40,/* 32 bit ASF SMBus Config Register */
+ B28_Y2_SMB_CSD_REG = 0x0e44,/* 32 bit ASF SMB Control/Status/Data */
+ B28_Y2_ASF_IRQ_V_BASE=0x0e60,/* 32 bit ASF IRQ Vector Base */
+
+ B28_Y2_ASF_STAT_CMD= 0x0e68,/* 32 bit ASF Status and Command Reg */
+ B28_Y2_ASF_HOST_COM= 0x0e6c,/* 32 bit ASF Host Communication Reg */
+ B28_Y2_DATA_REG_1 = 0x0e70,/* 32 bit ASF/Host Data Register 1 */
+ B28_Y2_DATA_REG_2 = 0x0e74,/* 32 bit ASF/Host Data Register 2 */
+ B28_Y2_DATA_REG_3 = 0x0e78,/* 32 bit ASF/Host Data Register 3 */
+ B28_Y2_DATA_REG_4 = 0x0e7c,/* 32 bit ASF/Host Data Register 4 */
+};
+
+/* Status BMU Registers (Yukon-2 only)*/
+enum {
+ STAT_CTRL = 0x0e80,/* 32 bit Status BMU Control Reg */
+ STAT_LAST_IDX = 0x0e84,/* 16 bit Status BMU Last Index */
+
+ STAT_LIST_ADDR_LO= 0x0e88,/* 32 bit Status List Start Addr (low) */
+ STAT_LIST_ADDR_HI= 0x0e8c,/* 32 bit Status List Start Addr (high) */
+ STAT_TXA1_RIDX = 0x0e90,/* 16 bit Status TxA1 Report Index Reg */
+ STAT_TXS1_RIDX = 0x0e92,/* 16 bit Status TxS1 Report Index Reg */
+ STAT_TXA2_RIDX = 0x0e94,/* 16 bit Status TxA2 Report Index Reg */
+ STAT_TXS2_RIDX = 0x0e96,/* 16 bit Status TxS2 Report Index Reg */
+ STAT_TX_IDX_TH = 0x0e98,/* 16 bit Status Tx Index Threshold Reg */
+ STAT_PUT_IDX = 0x0e9c,/* 16 bit Status Put Index Reg */
+
+/* FIFO Control/Status Registers (Yukon-2 only)*/
+ STAT_FIFO_WP = 0x0ea0,/* 8 bit Status FIFO Write Pointer Reg */
+ STAT_FIFO_RP = 0x0ea4,/* 8 bit Status FIFO Read Pointer Reg */
+ STAT_FIFO_RSP = 0x0ea6,/* 8 bit Status FIFO Read Shadow Ptr */
+ STAT_FIFO_LEVEL = 0x0ea8,/* 8 bit Status FIFO Level Reg */
+ STAT_FIFO_SHLVL = 0x0eaa,/* 8 bit Status FIFO Shadow Level Reg */
+ STAT_FIFO_WM = 0x0eac,/* 8 bit Status FIFO Watermark Reg */
+ STAT_FIFO_ISR_WM= 0x0ead,/* 8 bit Status FIFO ISR Watermark Reg */
+
+/* Level and ISR Timer Registers (Yukon-2 only)*/
+ STAT_LEV_TIMER_INI= 0x0eb0,/* 32 bit Level Timer Init. Value Reg */
+ STAT_LEV_TIMER_CNT= 0x0eb4,/* 32 bit Level Timer Counter Reg */
+ STAT_LEV_TIMER_CTRL= 0x0eb8,/* 8 bit Level Timer Control Reg */
+ STAT_LEV_TIMER_TEST= 0x0eb9,/* 8 bit Level Timer Test Reg */
+ STAT_TX_TIMER_INI = 0x0ec0,/* 32 bit Tx Timer Init. Value Reg */
+ STAT_TX_TIMER_CNT = 0x0ec4,/* 32 bit Tx Timer Counter Reg */
+ STAT_TX_TIMER_CTRL = 0x0ec8,/* 8 bit Tx Timer Control Reg */
+ STAT_TX_TIMER_TEST = 0x0ec9,/* 8 bit Tx Timer Test Reg */
+ STAT_ISR_TIMER_INI = 0x0ed0,/* 32 bit ISR Timer Init. Value Reg */
+ STAT_ISR_TIMER_CNT = 0x0ed4,/* 32 bit ISR Timer Counter Reg */
+ STAT_ISR_TIMER_CTRL= 0x0ed8,/* 8 bit ISR Timer Control Reg */
+ STAT_ISR_TIMER_TEST= 0x0ed9,/* 8 bit ISR Timer Test Reg */
+};
+
+enum {
+ LINKLED_OFF = 0x01,
+ LINKLED_ON = 0x02,
+ LINKLED_LINKSYNC_OFF = 0x04,
+ LINKLED_LINKSYNC_ON = 0x08,
+ LINKLED_BLINK_OFF = 0x10,
+ LINKLED_BLINK_ON = 0x20,
+};
+
+/* GMAC and GPHY Control Registers (YUKON only) */
+enum {
+ GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */
+ GPHY_CTRL = 0x0f04,/* 32 bit GPHY Control Reg */
+ GMAC_IRQ_SRC = 0x0f08,/* 8 bit GMAC Interrupt Source Reg */
+ GMAC_IRQ_MSK = 0x0f0c,/* 8 bit GMAC Interrupt Mask Reg */
+ GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */
+
+/* Wake-up Frame Pattern Match Control Registers (YUKON only) */
+ WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */
+ WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */
+ WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */
+ WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */
+ WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */
+
+/* WOL Pattern Length Registers (YUKON only) */
+ WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */
+ WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */
+
+/* WOL Pattern Counter Registers (YUKON only) */
+ WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */
+ WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */
+};
+#define WOL_REGS(port, x) (x + (port)*0x80)
+
+enum {
+ WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */
+ WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */
+};
+#define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400)
+
+enum {
+ BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */
+ BASE_GMAC_2 = 0x3800,/* GMAC 2 registers */
+};
+
+/*
+ * Marvel-PHY Registers, indirect addressed over GMAC
+ */
+enum {
+ PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
+ PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */
+ PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
+ PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
+ PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
+ PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */
+ PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
+ PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */
+ PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
+ /* Marvel-specific registers */
+ PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
+ PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
+ PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */
+ PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */
+ PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */
+ PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */
+ PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */
+ PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */
+ PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */
+ PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */
+ PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */
+ PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */
+ PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */
+ PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */
+ PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */
+ PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */
+ PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */
+ PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+ PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */
+ PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */
+ PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */
+ PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */
+ PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */
+};
+
+enum {
+ PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */
+ PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */
+ PHY_CT_SPS_LSB = 1<<13, /* Bit 13: Speed select, lower bit */
+ PHY_CT_ANE = 1<<12, /* Bit 12: Auto-Negotiation Enabled */
+ PHY_CT_PDOWN = 1<<11, /* Bit 11: Power Down Mode */
+ PHY_CT_ISOL = 1<<10, /* Bit 10: Isolate Mode */
+ PHY_CT_RE_CFG = 1<<9, /* Bit 9: (sc) Restart Auto-Negotiation */
+ PHY_CT_DUP_MD = 1<<8, /* Bit 8: Duplex Mode */
+ PHY_CT_COL_TST = 1<<7, /* Bit 7: Collision Test enabled */
+ PHY_CT_SPS_MSB = 1<<6, /* Bit 6: Speed select, upper bit */
+};
+
+enum {
+ PHY_CT_SP1000 = PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */
+ PHY_CT_SP100 = PHY_CT_SPS_LSB, /* enable speed of 100 Mbps */
+ PHY_CT_SP10 = 0, /* enable speed of 10 Mbps */
+};
+
+enum {
+ PHY_ST_EXT_ST = 1<<8, /* Bit 8: Extended Status Present */
+
+ PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */
+ PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */
+ PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occurred */
+ PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */
+ PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */
+ PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */
+ PHY_ST_EXT_REG = 1<<0, /* Bit 0: Extended Register available */
+};
+
+enum {
+ PHY_I1_OUI_MSK = 0x3f<<10, /* Bit 15..10: Organization Unique ID */
+ PHY_I1_MOD_NUM = 0x3f<<4, /* Bit 9.. 4: Model Number */
+ PHY_I1_REV_MSK = 0xf, /* Bit 3.. 0: Revision Number */
+};
+
+/* different Marvell PHY Ids */
+enum {
+ PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */
+
+ PHY_BCOM_ID1_A1 = 0x6041,
+ PHY_BCOM_ID1_B2 = 0x6043,
+ PHY_BCOM_ID1_C0 = 0x6044,
+ PHY_BCOM_ID1_C5 = 0x6047,
+
+ PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */
+ PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */
+ PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */
+ PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
+ PHY_MARV_ID1_FE = 0x0C83, /* Yukon-FE (PHY 88E3082 Rev.A1) */
+ PHY_MARV_ID1_ECU= 0x0CB0, /* Yukon-ECU (PHY 88E1149 Rev.B2?) */
+};
+
+/* Advertisement register bits */
+enum {
+ PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */
+ PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */
+ PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */
+
+ PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */
+ PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */
+ PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */
+ PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */
+ PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */
+ PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */
+ PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */
+ PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */
+ PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
+ PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA,
+ PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL |
+ PHY_AN_100HALF | PHY_AN_100FULL,
+};
+
+/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
+/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
+enum {
+ PHY_B_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */
+ PHY_B_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */
+ PHY_B_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */
+ PHY_B_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */
+ PHY_B_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */
+ PHY_B_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */
+ /* Bit 9..8: reserved */
+ PHY_B_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */
+};
+
+/** Marvell-Specific */
+enum {
+ PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */
+ PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */
+ PHY_M_AN_RF = 1<<13, /* Remote Fault */
+
+ PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */
+ PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */
+ PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */
+ PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */
+ PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */
+ PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */
+ PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */
+ PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */
+};
+
+/* special defines for FIBER (88E1011S only) */
+enum {
+ PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */
+ PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */
+ PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */
+ PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */
+};
+
+/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */
+enum {
+ PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */
+ PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */
+ PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */
+ PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */
+};
+
+/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
+enum {
+ PHY_M_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */
+ PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */
+ PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */
+ PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */
+ PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */
+ PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */
+};
+
+/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/
+enum {
+ PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */
+ PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */
+ PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */
+ PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */
+ PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */
+ PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */
+ PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */
+ PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */
+ PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */
+ PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */
+ PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */
+ PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */
+};
+
+enum {
+ PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */
+ PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */
+};
+
+#define PHY_M_PC_MDI_XMODE(x) (((u16)(x)<<5) & PHY_M_PC_MDIX_MSK)
+
+enum {
+ PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */
+ PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */
+ PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */
+};
+
+/* for Yukon-EC Ultra Gigabit Ethernet PHY (88E1149 only) */
+enum {
+ PHY_M_PC_COP_TX_DIS = 1<<3, /* Copper Transmitter Disable */
+ PHY_M_PC_POW_D_ENA = 1<<2, /* Power Down Enable */
+};
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+enum {
+ PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */
+ PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */
+ PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */
+ PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */
+ PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */
+
+ PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */
+ PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */
+
+ PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */
+ PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */
+};
+
+/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/
+enum {
+ PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */
+ PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */
+ PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */
+ PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */
+ PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */
+ PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */
+ PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */
+ PHY_M_PS_LINK_UP = 1<<10, /* Link Up */
+ PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */
+ PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */
+ PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */
+ PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */
+ PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */
+ PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */
+ PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */
+ PHY_M_PS_JABBER = 1<<0, /* Jabber */
+};
+
+#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN)
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+enum {
+ PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */
+ PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */
+};
+
+enum {
+ PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */
+ PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */
+ PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */
+ PHY_M_IS_AN_PR = 1<<12, /* Page Received */
+ PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */
+ PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */
+ PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */
+ PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */
+ PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */
+ PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */
+ PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */
+ PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */
+
+ PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */
+ PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */
+ PHY_M_IS_JABBER = 1<<0, /* Jabber */
+
+ PHY_M_DEF_MSK = PHY_M_IS_LSP_CHANGE | PHY_M_IS_LST_CHANGE
+ | PHY_M_IS_DUP_CHANGE,
+ PHY_M_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL,
+};
+
+
+/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/
+enum {
+ PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */
+ PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */
+
+ PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */
+ PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */
+ /* (88E1011 only) */
+ PHY_M_EC_S_DSC_MSK = 3<<8,/* Bit 9.. 8: Slave Downshift Counter */
+ /* (88E1011 only) */
+ PHY_M_EC_M_DSC_MSK2 = 7<<9,/* Bit 11.. 9: Master Downshift Counter */
+ /* (88E1111 only) */
+ PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */
+ /* !!! Errata in spec. (1 = disable) */
+ PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/
+ PHY_M_EC_MAC_S_MSK = 7<<4,/* Bit 6.. 4: Def. MAC interface speed */
+ PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */
+ PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */
+ PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */
+ PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */
+
+ PHY_M_10B_TE_ENABLE = 1<<7, /* 10Base-Te Enable (88E8079 and above) */
+};
+#define PHY_M_EC_M_DSC(x) ((u16)(x)<<10 & PHY_M_EC_M_DSC_MSK)
+ /* 00=1x; 01=2x; 10=3x; 11=4x */
+#define PHY_M_EC_S_DSC(x) ((u16)(x)<<8 & PHY_M_EC_S_DSC_MSK)
+ /* 00=dis; 01=1x; 10=2x; 11=3x */
+#define PHY_M_EC_DSC_2(x) ((u16)(x)<<9 & PHY_M_EC_M_DSC_MSK2)
+ /* 000=1x; 001=2x; 010=3x; 011=4x */
+#define PHY_M_EC_MAC_S(x) ((u16)(x)<<4 & PHY_M_EC_MAC_S_MSK)
+ /* 01X=0; 110=2.5; 111=25 (MHz) */
+
+/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
+enum {
+ PHY_M_PC_DIS_LINK_Pa = 1<<15,/* Disable Link Pulses */
+ PHY_M_PC_DSC_MSK = 7<<12,/* Bit 14..12: Downshift Counter */
+ PHY_M_PC_DOWN_S_ENA = 1<<11,/* Downshift Enable */
+};
+/* !!! Errata in spec. (1 = disable) */
+
+#define PHY_M_PC_DSC(x) (((u16)(x)<<12) & PHY_M_PC_DSC_MSK)
+ /* 100=5x; 101=6x; 110=7x; 111=8x */
+enum {
+ MAC_TX_CLK_0_MHZ = 2,
+ MAC_TX_CLK_2_5_MHZ = 6,
+ MAC_TX_CLK_25_MHZ = 7,
+};
+
+/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/
+enum {
+ PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */
+ PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */
+ PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */
+ PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */
+ PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */
+ PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */
+ PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */
+ /* (88E1111 only) */
+};
+
+enum {
+ PHY_M_LEDC_LINK_MSK = 3<<3,/* Bit 4.. 3: Link Control Mask */
+ /* (88E1011 only) */
+ PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */
+ PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */
+ PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */
+ PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */
+ PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */
+};
+
+#define PHY_M_LED_PULS_DUR(x) (((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK)
+
+/***** PHY_MARV_PHY_STAT (page 3)16 bit r/w Polarity Control Reg. *****/
+enum {
+ PHY_M_POLC_LS1M_MSK = 0xf<<12, /* Bit 15..12: LOS,STAT1 Mix % Mask */
+ PHY_M_POLC_IS0M_MSK = 0xf<<8, /* Bit 11.. 8: INIT,STAT0 Mix % Mask */
+ PHY_M_POLC_LOS_MSK = 0x3<<6, /* Bit 7.. 6: LOS Pol. Ctrl. Mask */
+ PHY_M_POLC_INIT_MSK = 0x3<<4, /* Bit 5.. 4: INIT Pol. Ctrl. Mask */
+ PHY_M_POLC_STA1_MSK = 0x3<<2, /* Bit 3.. 2: STAT1 Pol. Ctrl. Mask */
+ PHY_M_POLC_STA0_MSK = 0x3, /* Bit 1.. 0: STAT0 Pol. Ctrl. Mask */
+};
+
+#define PHY_M_POLC_LS1_P_MIX(x) (((x)<<12) & PHY_M_POLC_LS1M_MSK)
+#define PHY_M_POLC_IS0_P_MIX(x) (((x)<<8) & PHY_M_POLC_IS0M_MSK)
+#define PHY_M_POLC_LOS_CTRL(x) (((x)<<6) & PHY_M_POLC_LOS_MSK)
+#define PHY_M_POLC_INIT_CTRL(x) (((x)<<4) & PHY_M_POLC_INIT_MSK)
+#define PHY_M_POLC_STA1_CTRL(x) (((x)<<2) & PHY_M_POLC_STA1_MSK)
+#define PHY_M_POLC_STA0_CTRL(x) (((x)<<0) & PHY_M_POLC_STA0_MSK)
+
+enum {
+ PULS_NO_STR = 0,/* no pulse stretching */
+ PULS_21MS = 1,/* 21 ms to 42 ms */
+ PULS_42MS = 2,/* 42 ms to 84 ms */
+ PULS_84MS = 3,/* 84 ms to 170 ms */
+ PULS_170MS = 4,/* 170 ms to 340 ms */
+ PULS_340MS = 5,/* 340 ms to 670 ms */
+ PULS_670MS = 6,/* 670 ms to 1.3 s */
+ PULS_1300MS = 7,/* 1.3 s to 2.7 s */
+};
+
+#define PHY_M_LED_BLINK_RT(x) (((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK)
+
+enum {
+ BLINK_42MS = 0,/* 42 ms */
+ BLINK_84MS = 1,/* 84 ms */
+ BLINK_170MS = 2,/* 170 ms */
+ BLINK_340MS = 3,/* 340 ms */
+ BLINK_670MS = 4,/* 670 ms */
+};
+
+/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/
+#define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */
+
+#define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */
+#define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */
+#define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */
+#define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */
+#define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */
+#define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */
+
+enum led_mode {
+ MO_LED_NORM = 0,
+ MO_LED_BLINK = 1,
+ MO_LED_OFF = 2,
+ MO_LED_ON = 3,
+};
+
+/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/
+enum {
+ PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */
+ PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */
+ PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */
+ PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */
+ PHY_M_EC2_FO_AM_MSK = 7,/* Bit 2.. 0: Fiber Output Amplitude */
+};
+
+/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/
+enum {
+ PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */
+ PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */
+ PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */
+ PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */
+ PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */
+ PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */
+ PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */
+ /* (88E1111 only) */
+
+ PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */
+ PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */
+ PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */
+};
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+/***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/
+ /* Bit 15..12: reserved (used internally) */
+enum {
+ PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */
+ PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */
+ PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */
+};
+
+#define PHY_M_FELP_LED2_CTRL(x) (((u16)(x)<<8) & PHY_M_FELP_LED2_MSK)
+#define PHY_M_FELP_LED1_CTRL(x) (((u16)(x)<<4) & PHY_M_FELP_LED1_MSK)
+#define PHY_M_FELP_LED0_CTRL(x) (((u16)(x)<<0) & PHY_M_FELP_LED0_MSK)
+
+enum {
+ LED_PAR_CTRL_COLX = 0x00,
+ LED_PAR_CTRL_ERROR = 0x01,
+ LED_PAR_CTRL_DUPLEX = 0x02,
+ LED_PAR_CTRL_DP_COL = 0x03,
+ LED_PAR_CTRL_SPEED = 0x04,
+ LED_PAR_CTRL_LINK = 0x05,
+ LED_PAR_CTRL_TX = 0x06,
+ LED_PAR_CTRL_RX = 0x07,
+ LED_PAR_CTRL_ACT = 0x08,
+ LED_PAR_CTRL_LNK_RX = 0x09,
+ LED_PAR_CTRL_LNK_AC = 0x0a,
+ LED_PAR_CTRL_ACT_BL = 0x0b,
+ LED_PAR_CTRL_TX_BL = 0x0c,
+ LED_PAR_CTRL_RX_BL = 0x0d,
+ LED_PAR_CTRL_COL_BL = 0x0e,
+ LED_PAR_CTRL_INACT = 0x0f
+};
+
+/*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/
+enum {
+ PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */
+ PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */
+ PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */
+};
+
+/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
+/***** PHY_MARV_PHY_CTRL (page 1) 16 bit r/w Fiber Specific Ctrl *****/
+enum {
+ PHY_M_FIB_FORCE_LNK = 1<<10,/* Force Link Good */
+ PHY_M_FIB_SIGD_POL = 1<<9, /* SIGDET Polarity */
+ PHY_M_FIB_TX_DIS = 1<<3, /* Transmitter Disable */
+};
+
+/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
+/***** PHY_MARV_PHY_CTRL (page 2) 16 bit r/w MAC Specific Ctrl *****/
+enum {
+ PHY_M_MAC_MD_MSK = 7<<7, /* Bit 9.. 7: Mode Select Mask */
+ PHY_M_MAC_GMIF_PUP = 1<<3, /* GMII Power Up (88E1149 only) */
+ PHY_M_MAC_MD_AUTO = 3,/* Auto Copper/1000Base-X */
+ PHY_M_MAC_MD_COPPER = 5,/* Copper only */
+ PHY_M_MAC_MD_1000BX = 7,/* 1000Base-X only */
+};
+#define PHY_M_MAC_MODE_SEL(x) (((x)<<7) & PHY_M_MAC_MD_MSK)
+
+/***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/
+enum {
+ PHY_M_LEDC_LOS_MSK = 0xf<<12,/* Bit 15..12: LOS LED Ctrl. Mask */
+ PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */
+ PHY_M_LEDC_STA1_MSK = 0xf<<4,/* Bit 7.. 4: STAT1 LED Ctrl. Mask */
+ PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */
+};
+
+#define PHY_M_LEDC_LOS_CTRL(x) (((x)<<12) & PHY_M_LEDC_LOS_MSK)
+#define PHY_M_LEDC_INIT_CTRL(x) (((x)<<8) & PHY_M_LEDC_INIT_MSK)
+#define PHY_M_LEDC_STA1_CTRL(x) (((x)<<4) & PHY_M_LEDC_STA1_MSK)
+#define PHY_M_LEDC_STA0_CTRL(x) (((x)<<0) & PHY_M_LEDC_STA0_MSK)
+
+/* GMAC registers */
+/* Port Registers */
+enum {
+ GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */
+ GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */
+ GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */
+ GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */
+ GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */
+ GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */
+ GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */
+/* Source Address Registers */
+ GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */
+ GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */
+ GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */
+ GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */
+ GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */
+ GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */
+
+/* Multicast Address Hash Registers */
+ GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */
+ GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */
+ GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */
+ GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */
+
+/* Interrupt Source Registers */
+ GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */
+ GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */
+ GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */
+
+/* Interrupt Mask Registers */
+ GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */
+ GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */
+ GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */
+
+/* Serial Management Interface (SMI) Registers */
+ GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */
+ GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */
+ GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */
+/* MIB Counters */
+ GM_MIB_CNT_BASE = 0x0100, /* Base Address of MIB Counters */
+ GM_MIB_CNT_END = 0x025C, /* Last MIB counter */
+};
+
+
+/*
+ * MIB Counters base address definitions (low word) -
+ * use offset 4 for access to high word (32 bit r/o)
+ */
+enum {
+ GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */
+ GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */
+ GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */
+ GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */
+ GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */
+
+ GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */
+ GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */
+ GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */
+ GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */
+ GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */
+ GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */
+ GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */
+ GM_RXF_127B = GM_MIB_CNT_BASE + 104,/* 65-127 Byte Rx Frame */
+ GM_RXF_255B = GM_MIB_CNT_BASE + 112,/* 128-255 Byte Rx Frame */
+ GM_RXF_511B = GM_MIB_CNT_BASE + 120,/* 256-511 Byte Rx Frame */
+ GM_RXF_1023B = GM_MIB_CNT_BASE + 128,/* 512-1023 Byte Rx Frame */
+ GM_RXF_1518B = GM_MIB_CNT_BASE + 136,/* 1024-1518 Byte Rx Frame */
+ GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144,/* 1519-MaxSize Byte Rx Frame */
+ GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152,/* Rx Frame too Long Error */
+ GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160,/* Rx Jabber Packet Frame */
+
+ GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176,/* Rx FIFO overflow Event */
+ GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192,/* Unicast Frames Xmitted OK */
+ GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200,/* Broadcast Frames Xmitted OK */
+ GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208,/* Pause MAC Ctrl Frames Xmitted */
+ GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216,/* Multicast Frames Xmitted OK */
+ GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224,/* Octets Transmitted OK Low */
+ GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232,/* Octets Transmitted OK High */
+ GM_TXF_64B = GM_MIB_CNT_BASE + 240,/* 64 Byte Tx Frame */
+ GM_TXF_127B = GM_MIB_CNT_BASE + 248,/* 65-127 Byte Tx Frame */
+ GM_TXF_255B = GM_MIB_CNT_BASE + 256,/* 128-255 Byte Tx Frame */
+ GM_TXF_511B = GM_MIB_CNT_BASE + 264,/* 256-511 Byte Tx Frame */
+ GM_TXF_1023B = GM_MIB_CNT_BASE + 272,/* 512-1023 Byte Tx Frame */
+ GM_TXF_1518B = GM_MIB_CNT_BASE + 280,/* 1024-1518 Byte Tx Frame */
+ GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288,/* 1519-MaxSize Byte Tx Frame */
+
+ GM_TXF_COL = GM_MIB_CNT_BASE + 304,/* Tx Collision */
+ GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312,/* Tx Late Collision */
+ GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320,/* Tx aborted due to Exces. Col. */
+ GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328,/* Tx Multiple Collision */
+ GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336,/* Tx Single Collision */
+ GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344,/* Tx FIFO Underrun Event */
+};
+
+/* GMAC Bit Definitions */
+/* GM_GP_STAT 16 bit r/o General Purpose Status Register */
+enum {
+ GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */
+ GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */
+ GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */
+ GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */
+ GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */
+ GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */
+ GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occurred */
+ GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occurred */
+
+ GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */
+ GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */
+ GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */
+ GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */
+ GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */
+};
+
+/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */
+enum {
+ GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */
+ GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */
+ GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */
+ GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */
+ GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */
+ GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */
+ GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */
+ GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */
+ GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */
+ GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */
+ GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */
+ GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */
+ GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */
+ GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */
+ GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */
+};
+
+#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100)
+
+/* GM_TX_CTRL 16 bit r/w Transmit Control Register */
+enum {
+ GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */
+ GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */
+ GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */
+ GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */
+};
+
+#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK)
+#define TX_COL_DEF 0x04
+
+/* GM_RX_CTRL 16 bit r/w Receive Control Register */
+enum {
+ GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */
+ GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */
+ GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */
+ GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */
+};
+
+/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */
+enum {
+ GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */
+ GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */
+ GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */
+ GM_TXPA_BO_LIM_MSK = 0x0f, /* Bit 3.. 0: Backoff Limit Mask */
+
+ TX_JAM_LEN_DEF = 0x03,
+ TX_JAM_IPG_DEF = 0x0b,
+ TX_IPG_JAM_DEF = 0x1c,
+ TX_BOF_LIM_DEF = 0x04,
+};
+
+#define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK)
+#define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK)
+#define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK)
+#define TX_BACK_OFF_LIM(x) ((x) & GM_TXPA_BO_LIM_MSK)
+
+
+/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */
+enum {
+ GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */
+ GM_SMOD_LIMIT_4 = 1<<10, /* 4 consecutive Tx trials */
+ GM_SMOD_VLAN_ENA = 1<<9, /* Enable VLAN (Max. Frame Len) */
+ GM_SMOD_JUMBO_ENA = 1<<8, /* Enable Jumbo (Max. Frame Len) */
+
+ GM_NEW_FLOW_CTRL = 1<<6, /* Enable New Flow-Control */
+
+ GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
+};
+
+#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK)
+#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK)
+
+#define DATA_BLIND_DEF 0x04
+#define IPG_DATA_DEF_1000 0x1e
+#define IPG_DATA_DEF_10_100 0x18
+
+/* GM_SMI_CTRL 16 bit r/w SMI Control Register */
+enum {
+ GM_SMI_CT_PHY_A_MSK = 0x1f<<11,/* Bit 15..11: PHY Device Address */
+ GM_SMI_CT_REG_A_MSK = 0x1f<<6,/* Bit 10.. 6: PHY Register Address */
+ GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/
+ GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */
+ GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */
+};
+
+#define GM_SMI_CT_PHY_AD(x) (((u16)(x)<<11) & GM_SMI_CT_PHY_A_MSK)
+#define GM_SMI_CT_REG_AD(x) (((u16)(x)<<6) & GM_SMI_CT_REG_A_MSK)
+
+/* GM_PHY_ADDR 16 bit r/w GPHY Address Register */
+enum {
+ GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */
+ GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */
+};
+
+/* Receive Frame Status Encoding */
+enum {
+ GMR_FS_LEN = 0x7fff<<16, /* Bit 30..16: Rx Frame Length */
+ GMR_FS_VLAN = 1<<13, /* VLAN Packet */
+ GMR_FS_JABBER = 1<<12, /* Jabber Packet */
+ GMR_FS_UN_SIZE = 1<<11, /* Undersize Packet */
+ GMR_FS_MC = 1<<10, /* Multicast Packet */
+ GMR_FS_BC = 1<<9, /* Broadcast Packet */
+ GMR_FS_RX_OK = 1<<8, /* Receive OK (Good Packet) */
+ GMR_FS_GOOD_FC = 1<<7, /* Good Flow-Control Packet */
+ GMR_FS_BAD_FC = 1<<6, /* Bad Flow-Control Packet */
+ GMR_FS_MII_ERR = 1<<5, /* MII Error */
+ GMR_FS_LONG_ERR = 1<<4, /* Too Long Packet */
+ GMR_FS_FRAGMENT = 1<<3, /* Fragment */
+
+ GMR_FS_CRC_ERR = 1<<1, /* CRC Error */
+ GMR_FS_RX_FF_OV = 1<<0, /* Rx FIFO Overflow */
+
+ GMR_FS_ANY_ERR = GMR_FS_RX_FF_OV | GMR_FS_CRC_ERR |
+ GMR_FS_FRAGMENT | GMR_FS_LONG_ERR |
+ GMR_FS_MII_ERR | GMR_FS_BAD_FC |
+ GMR_FS_UN_SIZE | GMR_FS_JABBER,
+};
+
+/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
+enum {
+ RX_GCLKMAC_ENA = 1<<31, /* RX MAC Clock Gating Enable */
+ RX_GCLKMAC_OFF = 1<<30,
+
+ RX_STFW_DIS = 1<<29, /* RX Store and Forward Enable */
+ RX_STFW_ENA = 1<<28,
+
+ RX_TRUNC_ON = 1<<27, /* enable packet truncation */
+ RX_TRUNC_OFF = 1<<26, /* disable packet truncation */
+ RX_VLAN_STRIP_ON = 1<<25, /* enable VLAN stripping */
+ RX_VLAN_STRIP_OFF = 1<<24, /* disable VLAN stripping */
+
+ RX_MACSEC_FLUSH_ON = 1<<23,
+ RX_MACSEC_FLUSH_OFF = 1<<22,
+ RX_MACSEC_ASF_FLUSH_ON = 1<<21,
+ RX_MACSEC_ASF_FLUSH_OFF = 1<<20,
+
+ GMF_RX_OVER_ON = 1<<19, /* enable flushing on receive overrun */
+ GMF_RX_OVER_OFF = 1<<18, /* disable flushing on receive overrun */
+ GMF_ASF_RX_OVER_ON = 1<<17, /* enable flushing of ASF when overrun */
+ GMF_ASF_RX_OVER_OFF = 1<<16, /* disable flushing of ASF when overrun */
+
+ GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */
+ GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */
+ GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */
+
+ GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */
+ GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */
+ GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */
+ GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */
+ GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */
+ GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */
+ GMF_CLI_RX_C = 1<<4, /* Clear IRQ Rx Frame Complete */
+
+ GMF_OPER_ON = 1<<3, /* Operational Mode On */
+ GMF_OPER_OFF = 1<<2, /* Operational Mode Off */
+ GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */
+ GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */
+
+ RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */
+
+ GMF_RX_CTRL_DEF = GMF_OPER_ON | GMF_RX_F_FL_ON,
+};
+
+/* RX_GMF_FL_CTRL 16 bit Rx GMAC FIFO Flush Control (Yukon-Supreme) */
+enum {
+ RX_IPV6_SA_MOB_ENA = 1<<9, /* IPv6 SA Mobility Support Enable */
+ RX_IPV6_SA_MOB_DIS = 1<<8, /* IPv6 SA Mobility Support Disable */
+ RX_IPV6_DA_MOB_ENA = 1<<7, /* IPv6 DA Mobility Support Enable */
+ RX_IPV6_DA_MOB_DIS = 1<<6, /* IPv6 DA Mobility Support Disable */
+ RX_PTR_SYNCDLY_ENA = 1<<5, /* Pointers Delay Synch Enable */
+ RX_PTR_SYNCDLY_DIS = 1<<4, /* Pointers Delay Synch Disable */
+ RX_ASF_NEWFLAG_ENA = 1<<3, /* RX ASF Flag New Logic Enable */
+ RX_ASF_NEWFLAG_DIS = 1<<2, /* RX ASF Flag New Logic Disable */
+ RX_FLSH_MISSPKT_ENA = 1<<1, /* RX Flush Miss-Packet Enable */
+ RX_FLSH_MISSPKT_DIS = 1<<0, /* RX Flush Miss-Packet Disable */
+};
+
+/* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */
+enum {
+ TX_DYN_WM_ENA = 3, /* Yukon-FE+ specific */
+};
+
+/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */
+enum {
+ TX_STFW_DIS = 1<<31,/* Disable Store & Forward */
+ TX_STFW_ENA = 1<<30,/* Enable Store & Forward */
+
+ TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */
+ TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */
+
+ TX_PCI_JUM_ENA = 1<<23,/* PCI Jumbo Mode enable */
+ TX_PCI_JUM_DIS = 1<<22,/* PCI Jumbo Mode enable */
+
+ GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */
+ GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */
+ GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */
+
+ GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */
+ GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */
+ GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */
+};
+
+/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */
+enum {
+ GMT_ST_START = 1<<2, /* Start Time Stamp Timer */
+ GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */
+ GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */
+};
+
+/* B28_Y2_ASF_STAT_CMD 32 bit ASF Status and Command Reg */
+enum {
+ Y2_ASF_OS_PRES = 1<<4, /* ASF operation system present */
+ Y2_ASF_RESET = 1<<3, /* ASF system in reset state */
+ Y2_ASF_RUNNING = 1<<2, /* ASF system operational */
+ Y2_ASF_CLR_HSTI = 1<<1, /* Clear ASF IRQ */
+ Y2_ASF_IRQ = 1<<0, /* Issue an IRQ to ASF system */
+
+ Y2_ASF_UC_STATE = 3<<2, /* ASF uC State */
+ Y2_ASF_CLK_HALT = 0, /* ASF system clock stopped */
+};
+
+/* B28_Y2_ASF_HOST_COM 32 bit ASF Host Communication Reg */
+enum {
+ Y2_ASF_CLR_ASFI = 1<<1, /* Clear host IRQ */
+ Y2_ASF_HOST_IRQ = 1<<0, /* Issue an IRQ to HOST system */
+};
+/* HCU_CCSR CPU Control and Status Register */
+enum {
+ HCU_CCSR_SMBALERT_MONITOR= 1<<27, /* SMBALERT pin monitor */
+ HCU_CCSR_CPU_SLEEP = 1<<26, /* CPU sleep status */
+ /* Clock Stretching Timeout */
+ HCU_CCSR_CS_TO = 1<<25,
+ HCU_CCSR_WDOG = 1<<24, /* Watchdog Reset */
+
+ HCU_CCSR_CLR_IRQ_HOST = 1<<17, /* Clear IRQ_HOST */
+ HCU_CCSR_SET_IRQ_HCU = 1<<16, /* Set IRQ_HCU */
+
+ HCU_CCSR_AHB_RST = 1<<9, /* Reset AHB bridge */
+ HCU_CCSR_CPU_RST_MODE = 1<<8, /* CPU Reset Mode */
+
+ HCU_CCSR_SET_SYNC_CPU = 1<<5,
+ HCU_CCSR_CPU_CLK_DIVIDE_MSK = 3<<3,/* CPU Clock Divide */
+ HCU_CCSR_CPU_CLK_DIVIDE_BASE= 1<<3,
+ HCU_CCSR_OS_PRSNT = 1<<2, /* ASF OS Present */
+/* Microcontroller State */
+ HCU_CCSR_UC_STATE_MSK = 3,
+ HCU_CCSR_UC_STATE_BASE = 1<<0,
+ HCU_CCSR_ASF_RESET = 0,
+ HCU_CCSR_ASF_HALTED = 1<<1,
+ HCU_CCSR_ASF_RUNNING = 1<<0,
+};
+
+/* HCU_HCSR Host Control and Status Register */
+enum {
+ HCU_HCSR_SET_IRQ_CPU = 1<<16, /* Set IRQ_CPU */
+
+ HCU_HCSR_CLR_IRQ_HCU = 1<<1, /* Clear IRQ_HCU */
+ HCU_HCSR_SET_IRQ_HOST = 1<<0, /* Set IRQ_HOST */
+};
+
+/* STAT_CTRL 32 bit Status BMU control register (Yukon-2 only) */
+enum {
+ SC_STAT_CLR_IRQ = 1<<4, /* Status Burst IRQ clear */
+ SC_STAT_OP_ON = 1<<3, /* Operational Mode On */
+ SC_STAT_OP_OFF = 1<<2, /* Operational Mode Off */
+ SC_STAT_RST_CLR = 1<<1, /* Clear Status Unit Reset (Enable) */
+ SC_STAT_RST_SET = 1<<0, /* Set Status Unit Reset */
+};
+
+/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */
+enum {
+ GMC_SET_RST = 1<<15,/* MAC SEC RST */
+ GMC_SEC_RST_OFF = 1<<14,/* MAC SEC RSt OFF */
+ GMC_BYP_MACSECRX_ON = 1<<13,/* Bypass macsec RX */
+ GMC_BYP_MACSECRX_OFF= 1<<12,/* Bypass macsec RX off */
+ GMC_BYP_MACSECTX_ON = 1<<11,/* Bypass macsec TX */
+ GMC_BYP_MACSECTX_OFF= 1<<10,/* Bypass macsec TX off*/
+ GMC_BYP_RETR_ON = 1<<9, /* Bypass retransmit FIFO On */
+ GMC_BYP_RETR_OFF= 1<<8, /* Bypass retransmit FIFO Off */
+
+ GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */
+ GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */
+ GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */
+ GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */
+ GMC_PAUSE_ON = 1<<3, /* Pause On */
+ GMC_PAUSE_OFF = 1<<2, /* Pause Off */
+ GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */
+ GMC_RST_SET = 1<<0, /* Set GMAC Reset */
+};
+
+/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */
+enum {
+ GPC_TX_PAUSE = 1<<30, /* Tx pause enabled (ro) */
+ GPC_RX_PAUSE = 1<<29, /* Rx pause enabled (ro) */
+ GPC_SPEED = 3<<27, /* PHY speed (ro) */
+ GPC_LINK = 1<<26, /* Link up (ro) */
+ GPC_DUPLEX = 1<<25, /* Duplex (ro) */
+ GPC_CLOCK = 1<<24, /* 125Mhz clock stable (ro) */
+
+ GPC_PDOWN = 1<<23, /* Internal regulator 2.5 power down */
+ GPC_TSTMODE = 1<<22, /* Test mode */
+ GPC_REG18 = 1<<21, /* Reg18 Power down */
+ GPC_REG12SEL = 3<<19, /* Reg12 power setting */
+ GPC_REG18SEL = 3<<17, /* Reg18 power setting */
+ GPC_SPILOCK = 1<<16, /* SPI lock (ASF) */
+
+ GPC_LEDMUX = 3<<14, /* LED Mux */
+ GPC_INTPOL = 1<<13, /* Interrupt polarity */
+ GPC_DETECT = 1<<12, /* Energy detect */
+ GPC_1000HD = 1<<11, /* Enable 1000Mbit HD */
+ GPC_SLAVE = 1<<10, /* Slave mode */
+ GPC_PAUSE = 1<<9, /* Pause enable */
+ GPC_LEDCTL = 3<<6, /* GPHY Leds */
+
+ GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */
+ GPC_RST_SET = 1<<0, /* Set GPHY Reset */
+};
+
+/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */
+/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */
+enum {
+ GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */
+ GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */
+ GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */
+ GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */
+ GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
+ GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
+
+#define GMAC_DEF_MSK GM_IS_TX_FF_UR
+};
+
+/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
+enum { /* Bits 15.. 2: reserved */
+ GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */
+ GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */
+};
+
+
+/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */
+enum {
+ WOL_CTL_LINK_CHG_OCC = 1<<15,
+ WOL_CTL_MAGIC_PKT_OCC = 1<<14,
+ WOL_CTL_PATTERN_OCC = 1<<13,
+ WOL_CTL_CLEAR_RESULT = 1<<12,
+ WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11,
+ WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10,
+ WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9,
+ WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8,
+ WOL_CTL_ENA_PME_ON_PATTERN = 1<<7,
+ WOL_CTL_DIS_PME_ON_PATTERN = 1<<6,
+ WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5,
+ WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4,
+ WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3,
+ WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2,
+ WOL_CTL_ENA_PATTERN_UNIT = 1<<1,
+ WOL_CTL_DIS_PATTERN_UNIT = 1<<0,
+};
+
+
+/* Control flags */
+enum {
+ UDPTCP = 1<<0,
+ CALSUM = 1<<1,
+ WR_SUM = 1<<2,
+ INIT_SUM= 1<<3,
+ LOCK_SUM= 1<<4,
+ INS_VLAN= 1<<5,
+ EOP = 1<<7,
+};
+
+enum {
+ HW_OWNER = 1<<7,
+ OP_TCPWRITE = 0x11,
+ OP_TCPSTART = 0x12,
+ OP_TCPINIT = 0x14,
+ OP_TCPLCK = 0x18,
+ OP_TCPCHKSUM = OP_TCPSTART,
+ OP_TCPIS = OP_TCPINIT | OP_TCPSTART,
+ OP_TCPLW = OP_TCPLCK | OP_TCPWRITE,
+ OP_TCPLSW = OP_TCPLCK | OP_TCPSTART | OP_TCPWRITE,
+ OP_TCPLISW = OP_TCPLCK | OP_TCPINIT | OP_TCPSTART | OP_TCPWRITE,
+
+ OP_ADDR64 = 0x21,
+ OP_VLAN = 0x22,
+ OP_ADDR64VLAN = OP_ADDR64 | OP_VLAN,
+ OP_LRGLEN = 0x24,
+ OP_LRGLENVLAN = OP_LRGLEN | OP_VLAN,
+ OP_MSS = 0x28,
+ OP_MSSVLAN = OP_MSS | OP_VLAN,
+
+ OP_BUFFER = 0x40,
+ OP_PACKET = 0x41,
+ OP_LARGESEND = 0x43,
+ OP_LSOV2 = 0x45,
+
+/* YUKON-2 STATUS opcodes defines */
+ OP_RXSTAT = 0x60,
+ OP_RXTIMESTAMP = 0x61,
+ OP_RXVLAN = 0x62,
+ OP_RXCHKS = 0x64,
+ OP_RXCHKSVLAN = OP_RXCHKS | OP_RXVLAN,
+ OP_RXTIMEVLAN = OP_RXTIMESTAMP | OP_RXVLAN,
+ OP_RSS_HASH = 0x65,
+ OP_TXINDEXLE = 0x68,
+ OP_MACSEC = 0x6c,
+ OP_PUTIDX = 0x70,
+};
+
+enum status_css {
+ CSS_TCPUDPCSOK = 1<<7, /* TCP / UDP checksum is ok */
+ CSS_ISUDP = 1<<6, /* packet is a UDP packet */
+ CSS_ISTCP = 1<<5, /* packet is a TCP packet */
+ CSS_ISIPFRAG = 1<<4, /* packet is a TCP/UDP frag, CS calc not done */
+ CSS_ISIPV6 = 1<<3, /* packet is a IPv6 packet */
+ CSS_IPV4CSUMOK = 1<<2, /* IP v4: TCP header checksum is ok */
+ CSS_ISIPV4 = 1<<1, /* packet is a IPv4 packet */
+ CSS_LINK_BIT = 1<<0, /* port number (legacy) */
+};
+
+/* Yukon 2 hardware interface */
+struct sky2_tx_le {
+ __le32 addr;
+ __le16 length; /* also vlan tag or checksum start */
+ u8 ctrl;
+ u8 opcode;
+} __packed;
+
+struct sky2_rx_le {
+ __le32 addr;
+ __le16 length;
+ u8 ctrl;
+ u8 opcode;
+} __packed;
+
+struct sky2_status_le {
+ __le32 status; /* also checksum */
+ __le16 length; /* also vlan tag */
+ u8 css;
+ u8 opcode;
+} __packed;
+
+struct tx_ring_info {
+ struct sk_buff *skb;
+ unsigned long flags;
+#define TX_MAP_SINGLE 0x0001
+#define TX_MAP_PAGE 0x0002
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
+};
+
+struct rx_ring_info {
+ struct sk_buff *skb;
+ dma_addr_t data_addr;
+ DEFINE_DMA_UNMAP_LEN(data_size);
+ dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT];
+};
+
+enum flow_control {
+ FC_NONE = 0,
+ FC_TX = 1,
+ FC_RX = 2,
+ FC_BOTH = 3,
+};
+
+struct sky2_stats {
+ struct u64_stats_sync syncp;
+ u64 packets;
+ u64 bytes;
+};
+
+struct sky2_port {
+ struct sky2_hw *hw;
+ struct net_device *netdev;
+ unsigned port;
+ u32 msg_enable;
+ spinlock_t phy_lock;
+
+ struct tx_ring_info *tx_ring;
+ struct sky2_tx_le *tx_le;
+ struct sky2_stats tx_stats;
+
+ u16 tx_ring_size;
+ u16 tx_cons; /* next le to check */
+ u16 tx_prod; /* next le to use */
+ u16 tx_next; /* debug only */
+
+ u16 tx_pending;
+ u16 tx_last_mss;
+ u32 tx_last_upper;
+ u32 tx_tcpsum;
+
+ struct rx_ring_info *rx_ring ____cacheline_aligned_in_smp;
+ struct sky2_rx_le *rx_le;
+ struct sky2_stats rx_stats;
+
+ u16 rx_next; /* next re to check */
+ u16 rx_put; /* next le index to use */
+ u16 rx_pending;
+ u16 rx_data_size;
+ u16 rx_nfrags;
+ u16 rx_tag;
+
+ struct {
+ unsigned long last;
+ u32 mac_rp;
+ u8 mac_lev;
+ u8 fifo_rp;
+ u8 fifo_lev;
+ } check;
+
+ dma_addr_t rx_le_map;
+ dma_addr_t tx_le_map;
+
+ u16 advertising; /* ADVERTISED_ bits */
+ u16 speed; /* SPEED_1000, SPEED_100, ... */
+ u8 wol; /* WAKE_ bits */
+ u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
+ u16 flags;
+#define SKY2_FLAG_AUTO_SPEED 0x0002
+#define SKY2_FLAG_AUTO_PAUSE 0x0004
+
+ enum flow_control flow_mode;
+ enum flow_control flow_status;
+
+#ifdef CONFIG_SKY2_DEBUG
+ struct dentry *debugfs;
+#endif
+};
+
+struct sky2_hw {
+ void __iomem *regs;
+ struct pci_dev *pdev;
+ struct napi_struct napi;
+ struct net_device *dev[2];
+ unsigned long flags;
+#define SKY2_HW_USE_MSI 0x00000001
+#define SKY2_HW_FIBRE_PHY 0x00000002
+#define SKY2_HW_GIGABIT 0x00000004
+#define SKY2_HW_NEWER_PHY 0x00000008
+#define SKY2_HW_RAM_BUFFER 0x00000010
+#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
+#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
+#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
+#define SKY2_HW_RSS_BROKEN 0x00000100
+#define SKY2_HW_VLAN_BROKEN 0x00000200
+#define SKY2_HW_RSS_CHKSUM 0x00000400 /* RSS requires chksum */
+
+ u8 chip_id;
+ u8 chip_rev;
+ u8 pmd_type;
+ u8 ports;
+
+ struct sky2_status_le *st_le;
+ u32 st_size;
+ u32 st_idx;
+ dma_addr_t st_dma;
+
+ struct timer_list watchdog_timer;
+ struct work_struct restart_work;
+ wait_queue_head_t msi_wait;
+
+ char irq_name[0];
+};
+
+static inline int sky2_is_copper(const struct sky2_hw *hw)
+{
+ return !(hw->flags & SKY2_HW_FIBRE_PHY);
+}
+
+/* Register accessor for memory mapped device */
+static inline u32 sky2_read32(const struct sky2_hw *hw, unsigned reg)
+{
+ return readl(hw->regs + reg);
+}
+
+static inline u16 sky2_read16(const struct sky2_hw *hw, unsigned reg)
+{
+ return readw(hw->regs + reg);
+}
+
+static inline u8 sky2_read8(const struct sky2_hw *hw, unsigned reg)
+{
+ return readb(hw->regs + reg);
+}
+
+static inline void sky2_write32(const struct sky2_hw *hw, unsigned reg, u32 val)
+{
+ writel(val, hw->regs + reg);
+}
+
+static inline void sky2_write16(const struct sky2_hw *hw, unsigned reg, u16 val)
+{
+ writew(val, hw->regs + reg);
+}
+
+static inline void sky2_write8(const struct sky2_hw *hw, unsigned reg, u8 val)
+{
+ writeb(val, hw->regs + reg);
+}
+
+/* Yukon PHY related registers */
+#define SK_GMAC_REG(port,reg) \
+ (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg))
+#define GM_PHY_RETRIES 100
+
+static inline u16 gma_read16(const struct sky2_hw *hw, unsigned port, unsigned reg)
+{
+ return sky2_read16(hw, SK_GMAC_REG(port,reg));
+}
+
+static inline u32 gma_read32(struct sky2_hw *hw, unsigned port, unsigned reg)
+{
+ unsigned base = SK_GMAC_REG(port, reg);
+ return (u32) sky2_read16(hw, base)
+ | (u32) sky2_read16(hw, base+4) << 16;
+}
+
+static inline u64 gma_read64(struct sky2_hw *hw, unsigned port, unsigned reg)
+{
+ unsigned base = SK_GMAC_REG(port, reg);
+
+ return (u64) sky2_read16(hw, base)
+ | (u64) sky2_read16(hw, base+4) << 16
+ | (u64) sky2_read16(hw, base+8) << 32
+ | (u64) sky2_read16(hw, base+12) << 48;
+}
+
+/* There is no way to atomically read32 bit values from PHY, so retry */
+static inline u32 get_stats32(struct sky2_hw *hw, unsigned port, unsigned reg)
+{
+ u32 val;
+
+ do {
+ val = gma_read32(hw, port, reg);
+ } while (gma_read32(hw, port, reg) != val);
+
+ return val;
+}
+
+static inline u64 get_stats64(struct sky2_hw *hw, unsigned port, unsigned reg)
+{
+ u64 val;
+
+ do {
+ val = gma_read64(hw, port, reg);
+ } while (gma_read64(hw, port, reg) != val);
+
+ return val;
+}
+
+static inline void gma_write16(const struct sky2_hw *hw, unsigned port, int r, u16 v)
+{
+ sky2_write16(hw, SK_GMAC_REG(port,r), v);
+}
+
+static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg,
+ const u8 *addr)
+{
+ gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8));
+ gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
+ gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
+}
+
+/* PCI config space access */
+static inline u32 sky2_pci_read32(const struct sky2_hw *hw, unsigned reg)
+{
+ return sky2_read32(hw, Y2_CFG_SPC + reg);
+}
+
+static inline u16 sky2_pci_read16(const struct sky2_hw *hw, unsigned reg)
+{
+ return sky2_read16(hw, Y2_CFG_SPC + reg);
+}
+
+static inline void sky2_pci_write32(struct sky2_hw *hw, unsigned reg, u32 val)
+{
+ sky2_write32(hw, Y2_CFG_SPC + reg, val);
+}
+
+static inline void sky2_pci_write16(struct sky2_hw *hw, unsigned reg, u16 val)
+{
+ sky2_write16(hw, Y2_CFG_SPC + reg, val);
+}
+#endif
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
new file mode 100644
index 000000000000..d8099a7903d3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -0,0 +1,23 @@
+#
+# Mellanox driver configuration
+#
+
+config NET_VENDOR_MELLANOX
+ bool "Mellanox devices"
+ default y
+ depends on PCI && INET
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Mellanox cards. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_MELLANOX
+
+source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
+
+endif # NET_VENDOR_MELLANOX
diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile
new file mode 100644
index 000000000000..37afb9683372
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Mellanox device drivers.
+#
+
+obj-$(CONFIG_MLX4_CORE) += mlx4/
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
new file mode 100644
index 000000000000..1bb93531f1ba
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -0,0 +1,27 @@
+#
+# Mellanox driver configuration
+#
+
+config MLX4_EN
+ tristate "Mellanox Technologies 10Gbit Ethernet support"
+ depends on PCI && INET
+ select MLX4_CORE
+ select INET_LRO
+ ---help---
+ This driver supports Mellanox Technologies ConnectX Ethernet
+ devices.
+
+config MLX4_CORE
+ tristate
+ depends on PCI
+ default n
+
+config MLX4_DEBUG
+ bool "Verbose debugging output" if (MLX4_CORE && EXPERT)
+ depends on MLX4_CORE
+ default y
+ ---help---
+ This option causes debugging code to be compiled into the
+ mlx4_core driver. The output can be turned on via the
+ debug_level module parameter (which can also be set after
+ the driver is loaded through sysfs).
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
new file mode 100644
index 000000000000..d1aa45a15854
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
+
+mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
+ mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o
+
+obj-$(CONFIG_MLX4_EN) += mlx4_en.o
+
+mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
+ en_resources.o en_netdev.o en_selftest.o
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
new file mode 100644
index 000000000000..116cae334dad
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/bitmap.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+
+#include "mlx4.h"
+
+u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
+{
+ u32 obj;
+
+ spin_lock(&bitmap->lock);
+
+ obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
+ if (obj >= bitmap->max) {
+ bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+ & bitmap->mask;
+ obj = find_first_zero_bit(bitmap->table, bitmap->max);
+ }
+
+ if (obj < bitmap->max) {
+ set_bit(obj, bitmap->table);
+ bitmap->last = (obj + 1);
+ if (bitmap->last == bitmap->max)
+ bitmap->last = 0;
+ obj |= bitmap->top;
+ } else
+ obj = -1;
+
+ if (obj != -1)
+ --bitmap->avail;
+
+ spin_unlock(&bitmap->lock);
+
+ return obj;
+}
+
+void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
+{
+ mlx4_bitmap_free_range(bitmap, obj, 1);
+}
+
+u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
+{
+ u32 obj;
+
+ if (likely(cnt == 1 && align == 1))
+ return mlx4_bitmap_alloc(bitmap);
+
+ spin_lock(&bitmap->lock);
+
+ obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
+ bitmap->last, cnt, align - 1);
+ if (obj >= bitmap->max) {
+ bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+ & bitmap->mask;
+ obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
+ 0, cnt, align - 1);
+ }
+
+ if (obj < bitmap->max) {
+ bitmap_set(bitmap->table, obj, cnt);
+ if (obj == bitmap->last) {
+ bitmap->last = (obj + cnt);
+ if (bitmap->last >= bitmap->max)
+ bitmap->last = 0;
+ }
+ obj |= bitmap->top;
+ } else
+ obj = -1;
+
+ if (obj != -1)
+ bitmap->avail -= cnt;
+
+ spin_unlock(&bitmap->lock);
+
+ return obj;
+}
+
+u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
+{
+ return bitmap->avail;
+}
+
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
+{
+ obj &= bitmap->max + bitmap->reserved_top - 1;
+
+ spin_lock(&bitmap->lock);
+ bitmap_clear(bitmap->table, obj, cnt);
+ bitmap->last = min(bitmap->last, obj);
+ bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+ & bitmap->mask;
+ bitmap->avail += cnt;
+ spin_unlock(&bitmap->lock);
+}
+
+int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
+ u32 reserved_bot, u32 reserved_top)
+{
+ /* num must be a power of 2 */
+ if (num != roundup_pow_of_two(num))
+ return -EINVAL;
+
+ bitmap->last = 0;
+ bitmap->top = 0;
+ bitmap->max = num - reserved_top;
+ bitmap->mask = mask;
+ bitmap->reserved_top = reserved_top;
+ bitmap->avail = num - reserved_top - reserved_bot;
+ spin_lock_init(&bitmap->lock);
+ bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
+ sizeof (long), GFP_KERNEL);
+ if (!bitmap->table)
+ return -ENOMEM;
+
+ bitmap_set(bitmap->table, 0, reserved_bot);
+
+ return 0;
+}
+
+void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
+{
+ kfree(bitmap->table);
+}
+
+/*
+ * Handling for queue buffers -- we allocate a bunch of memory and
+ * register it in a memory region at HCA virtual address 0. If the
+ * requested size is > max_direct, we split the allocation into
+ * multiple pages, so we don't require too much contiguous memory.
+ */
+
+int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
+ struct mlx4_buf *buf)
+{
+ dma_addr_t t;
+
+ if (size <= max_direct) {
+ buf->nbufs = 1;
+ buf->npages = 1;
+ buf->page_shift = get_order(size) + PAGE_SHIFT;
+ buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
+ size, &t, GFP_KERNEL);
+ if (!buf->direct.buf)
+ return -ENOMEM;
+
+ buf->direct.map = t;
+
+ while (t & ((1 << buf->page_shift) - 1)) {
+ --buf->page_shift;
+ buf->npages *= 2;
+ }
+
+ memset(buf->direct.buf, 0, size);
+ } else {
+ int i;
+
+ buf->direct.buf = NULL;
+ buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ buf->npages = buf->nbufs;
+ buf->page_shift = PAGE_SHIFT;
+ buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
+ GFP_KERNEL);
+ if (!buf->page_list)
+ return -ENOMEM;
+
+ for (i = 0; i < buf->nbufs; ++i) {
+ buf->page_list[i].buf =
+ dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
+ &t, GFP_KERNEL);
+ if (!buf->page_list[i].buf)
+ goto err_free;
+
+ buf->page_list[i].map = t;
+
+ memset(buf->page_list[i].buf, 0, PAGE_SIZE);
+ }
+
+ if (BITS_PER_LONG == 64) {
+ struct page **pages;
+ pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
+ if (!pages)
+ goto err_free;
+ for (i = 0; i < buf->nbufs; ++i)
+ pages[i] = virt_to_page(buf->page_list[i].buf);
+ buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
+ kfree(pages);
+ if (!buf->direct.buf)
+ goto err_free;
+ }
+ }
+
+ return 0;
+
+err_free:
+ mlx4_buf_free(dev, size, buf);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
+
+void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
+{
+ int i;
+
+ if (buf->nbufs == 1)
+ dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
+ buf->direct.map);
+ else {
+ if (BITS_PER_LONG == 64 && buf->direct.buf)
+ vunmap(buf->direct.buf);
+
+ for (i = 0; i < buf->nbufs; ++i)
+ if (buf->page_list[i].buf)
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ buf->page_list[i].buf,
+ buf->page_list[i].map);
+ kfree(buf->page_list);
+ }
+}
+EXPORT_SYMBOL_GPL(mlx4_buf_free);
+
+static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
+{
+ struct mlx4_db_pgdir *pgdir;
+
+ pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
+ if (!pgdir)
+ return NULL;
+
+ bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
+ pgdir->bits[0] = pgdir->order0;
+ pgdir->bits[1] = pgdir->order1;
+ pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
+ &pgdir->db_dma, GFP_KERNEL);
+ if (!pgdir->db_page) {
+ kfree(pgdir);
+ return NULL;
+ }
+
+ return pgdir;
+}
+
+static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
+ struct mlx4_db *db, int order)
+{
+ int o;
+ int i;
+
+ for (o = order; o <= 1; ++o) {
+ i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
+ if (i < MLX4_DB_PER_PAGE >> o)
+ goto found;
+ }
+
+ return -ENOMEM;
+
+found:
+ clear_bit(i, pgdir->bits[o]);
+
+ i <<= o;
+
+ if (o > order)
+ set_bit(i ^ 1, pgdir->bits[order]);
+
+ db->u.pgdir = pgdir;
+ db->index = i;
+ db->db = pgdir->db_page + db->index;
+ db->dma = pgdir->db_dma + db->index * 4;
+ db->order = order;
+
+ return 0;
+}
+
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_db_pgdir *pgdir;
+ int ret = 0;
+
+ mutex_lock(&priv->pgdir_mutex);
+
+ list_for_each_entry(pgdir, &priv->pgdir_list, list)
+ if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
+ goto out;
+
+ pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
+ if (!pgdir) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ list_add(&pgdir->list, &priv->pgdir_list);
+
+ /* This should never fail -- we just allocated an empty page: */
+ WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
+
+out:
+ mutex_unlock(&priv->pgdir_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_db_alloc);
+
+void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int o;
+ int i;
+
+ mutex_lock(&priv->pgdir_mutex);
+
+ o = db->order;
+ i = db->index;
+
+ if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
+ clear_bit(i ^ 1, db->u.pgdir->order0);
+ ++o;
+ }
+ i >>= o;
+ set_bit(i, db->u.pgdir->bits[o]);
+
+ if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
+ dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ db->u.pgdir->db_page, db->u.pgdir->db_dma);
+ list_del(&db->u.pgdir->list);
+ kfree(db->u.pgdir);
+ }
+
+ mutex_unlock(&priv->pgdir_mutex);
+}
+EXPORT_SYMBOL_GPL(mlx4_db_free);
+
+int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
+ int size, int max_direct)
+{
+ int err;
+
+ err = mlx4_db_alloc(dev, &wqres->db, 1);
+ if (err)
+ return err;
+
+ *wqres->db.db = 0;
+
+ err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
+ if (err)
+ goto err_db;
+
+ err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
+ &wqres->mtt);
+ if (err)
+ goto err_buf;
+
+ err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
+ if (err)
+ goto err_mtt;
+
+ return 0;
+
+err_mtt:
+ mlx4_mtt_cleanup(dev, &wqres->mtt);
+err_buf:
+ mlx4_buf_free(dev, size, &wqres->buf);
+err_db:
+ mlx4_db_free(dev, &wqres->db);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
+
+void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
+ int size)
+{
+ mlx4_mtt_cleanup(dev, &wqres->mtt);
+ mlx4_buf_free(dev, size, &wqres->buf);
+ mlx4_db_free(dev, &wqres->db);
+}
+EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
new file mode 100644
index 000000000000..32f947154c33
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/workqueue.h>
+
+#include "mlx4.h"
+
+enum {
+ MLX4_CATAS_POLL_INTERVAL = 5 * HZ,
+};
+
+static DEFINE_SPINLOCK(catas_lock);
+
+static LIST_HEAD(catas_list);
+static struct work_struct catas_work;
+
+static int internal_err_reset = 1;
+module_param(internal_err_reset, int, 0644);
+MODULE_PARM_DESC(internal_err_reset,
+ "Reset device on internal errors if non-zero (default 1)");
+
+static void dump_err_buf(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ int i;
+
+ mlx4_err(dev, "Internal error detected:\n");
+ for (i = 0; i < priv->fw.catas_size; ++i)
+ mlx4_err(dev, " buf[%02x]: %08x\n",
+ i, swab32(readl(priv->catas_err.map + i)));
+}
+
+static void poll_catas(unsigned long dev_ptr)
+{
+ struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ if (readl(priv->catas_err.map)) {
+ dump_err_buf(dev);
+
+ mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
+
+ if (internal_err_reset) {
+ spin_lock(&catas_lock);
+ list_add(&priv->catas_err.list, &catas_list);
+ spin_unlock(&catas_lock);
+
+ queue_work(mlx4_wq, &catas_work);
+ }
+ } else
+ mod_timer(&priv->catas_err.timer,
+ round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
+}
+
+static void catas_reset(struct work_struct *work)
+{
+ struct mlx4_priv *priv, *tmppriv;
+ struct mlx4_dev *dev;
+
+ LIST_HEAD(tlist);
+ int ret;
+
+ spin_lock_irq(&catas_lock);
+ list_splice_init(&catas_list, &tlist);
+ spin_unlock_irq(&catas_lock);
+
+ list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) {
+ struct pci_dev *pdev = priv->dev.pdev;
+
+ ret = mlx4_restart_one(priv->dev.pdev);
+ /* 'priv' now is not valid */
+ if (ret)
+ pr_err("mlx4 %s: Reset failed (%d)\n",
+ pci_name(pdev), ret);
+ else {
+ dev = pci_get_drvdata(pdev);
+ mlx4_dbg(dev, "Reset succeeded\n");
+ }
+ }
+}
+
+void mlx4_start_catas_poll(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ phys_addr_t addr;
+
+ INIT_LIST_HEAD(&priv->catas_err.list);
+ init_timer(&priv->catas_err.timer);
+ priv->catas_err.map = NULL;
+
+ addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) +
+ priv->fw.catas_offset;
+
+ priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
+ if (!priv->catas_err.map) {
+ mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
+ (unsigned long long) addr);
+ return;
+ }
+
+ priv->catas_err.timer.data = (unsigned long) dev;
+ priv->catas_err.timer.function = poll_catas;
+ priv->catas_err.timer.expires =
+ round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL);
+ add_timer(&priv->catas_err.timer);
+}
+
+void mlx4_stop_catas_poll(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ del_timer_sync(&priv->catas_err.timer);
+
+ if (priv->catas_err.map)
+ iounmap(priv->catas_err.map);
+
+ spin_lock_irq(&catas_lock);
+ list_del(&priv->catas_err.list);
+ spin_unlock_irq(&catas_lock);
+}
+
+void __init mlx4_catas_init(void)
+{
+ INIT_WORK(&catas_work, catas_reset);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
new file mode 100644
index 000000000000..23cee7b6af91
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -0,0 +1,443 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include <asm/io.h>
+
+#include "mlx4.h"
+
+#define CMD_POLL_TOKEN 0xffff
+
+enum {
+ /* command completed successfully: */
+ CMD_STAT_OK = 0x00,
+ /* Internal error (such as a bus error) occurred while processing command: */
+ CMD_STAT_INTERNAL_ERR = 0x01,
+ /* Operation/command not supported or opcode modifier not supported: */
+ CMD_STAT_BAD_OP = 0x02,
+ /* Parameter not supported or parameter out of range: */
+ CMD_STAT_BAD_PARAM = 0x03,
+ /* System not enabled or bad system state: */
+ CMD_STAT_BAD_SYS_STATE = 0x04,
+ /* Attempt to access reserved or unallocaterd resource: */
+ CMD_STAT_BAD_RESOURCE = 0x05,
+ /* Requested resource is currently executing a command, or is otherwise busy: */
+ CMD_STAT_RESOURCE_BUSY = 0x06,
+ /* Required capability exceeds device limits: */
+ CMD_STAT_EXCEED_LIM = 0x08,
+ /* Resource is not in the appropriate state or ownership: */
+ CMD_STAT_BAD_RES_STATE = 0x09,
+ /* Index out of range: */
+ CMD_STAT_BAD_INDEX = 0x0a,
+ /* FW image corrupted: */
+ CMD_STAT_BAD_NVMEM = 0x0b,
+ /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
+ CMD_STAT_ICM_ERROR = 0x0c,
+ /* Attempt to modify a QP/EE which is not in the presumed state: */
+ CMD_STAT_BAD_QP_STATE = 0x10,
+ /* Bad segment parameters (Address/Size): */
+ CMD_STAT_BAD_SEG_PARAM = 0x20,
+ /* Memory Region has Memory Windows bound to: */
+ CMD_STAT_REG_BOUND = 0x21,
+ /* HCA local attached memory not present: */
+ CMD_STAT_LAM_NOT_PRE = 0x22,
+ /* Bad management packet (silently discarded): */
+ CMD_STAT_BAD_PKT = 0x30,
+ /* More outstanding CQEs in CQ than new CQ size: */
+ CMD_STAT_BAD_SIZE = 0x40,
+ /* Multi Function device support required: */
+ CMD_STAT_MULTI_FUNC_REQ = 0x50,
+};
+
+enum {
+ HCR_IN_PARAM_OFFSET = 0x00,
+ HCR_IN_MODIFIER_OFFSET = 0x08,
+ HCR_OUT_PARAM_OFFSET = 0x0c,
+ HCR_TOKEN_OFFSET = 0x14,
+ HCR_STATUS_OFFSET = 0x18,
+
+ HCR_OPMOD_SHIFT = 12,
+ HCR_T_BIT = 21,
+ HCR_E_BIT = 22,
+ HCR_GO_BIT = 23
+};
+
+enum {
+ GO_BIT_TIMEOUT_MSECS = 10000
+};
+
+struct mlx4_cmd_context {
+ struct completion done;
+ int result;
+ int next;
+ u64 out_param;
+ u16 token;
+};
+
+static int mlx4_status_to_errno(u8 status)
+{
+ static const int trans_table[] = {
+ [CMD_STAT_INTERNAL_ERR] = -EIO,
+ [CMD_STAT_BAD_OP] = -EPERM,
+ [CMD_STAT_BAD_PARAM] = -EINVAL,
+ [CMD_STAT_BAD_SYS_STATE] = -ENXIO,
+ [CMD_STAT_BAD_RESOURCE] = -EBADF,
+ [CMD_STAT_RESOURCE_BUSY] = -EBUSY,
+ [CMD_STAT_EXCEED_LIM] = -ENOMEM,
+ [CMD_STAT_BAD_RES_STATE] = -EBADF,
+ [CMD_STAT_BAD_INDEX] = -EBADF,
+ [CMD_STAT_BAD_NVMEM] = -EFAULT,
+ [CMD_STAT_ICM_ERROR] = -ENFILE,
+ [CMD_STAT_BAD_QP_STATE] = -EINVAL,
+ [CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
+ [CMD_STAT_REG_BOUND] = -EBUSY,
+ [CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
+ [CMD_STAT_BAD_PKT] = -EINVAL,
+ [CMD_STAT_BAD_SIZE] = -ENOMEM,
+ [CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
+ };
+
+ if (status >= ARRAY_SIZE(trans_table) ||
+ (status != CMD_STAT_OK && trans_table[status] == 0))
+ return -EIO;
+
+ return trans_table[status];
+}
+
+static int cmd_pending(struct mlx4_dev *dev)
+{
+ u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
+
+ return (status & swab32(1 << HCR_GO_BIT)) ||
+ (mlx4_priv(dev)->cmd.toggle ==
+ !!(status & swab32(1 << HCR_T_BIT)));
+}
+
+static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
+ u32 in_modifier, u8 op_modifier, u16 op, u16 token,
+ int event)
+{
+ struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
+ u32 __iomem *hcr = cmd->hcr;
+ int ret = -EAGAIN;
+ unsigned long end;
+
+ mutex_lock(&cmd->hcr_mutex);
+
+ end = jiffies;
+ if (event)
+ end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
+
+ while (cmd_pending(dev)) {
+ if (time_after_eq(jiffies, end))
+ goto out;
+ cond_resched();
+ }
+
+ /*
+ * We use writel (instead of something like memcpy_toio)
+ * because writes of less than 32 bits to the HCR don't work
+ * (and some architectures such as ia64 implement memcpy_toio
+ * in terms of writeb).
+ */
+ __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0);
+ __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1);
+ __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2);
+ __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3);
+ __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
+ __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5);
+
+ /* __raw_writel may not order writes. */
+ wmb();
+
+ __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
+ (cmd->toggle << HCR_T_BIT) |
+ (event ? (1 << HCR_E_BIT) : 0) |
+ (op_modifier << HCR_OPMOD_SHIFT) |
+ op), hcr + 6);
+
+ /*
+ * Make sure that our HCR writes don't get mixed in with
+ * writes from another CPU starting a FW command.
+ */
+ mmiowb();
+
+ cmd->toggle = cmd->toggle ^ 1;
+
+ ret = 0;
+
+out:
+ mutex_unlock(&cmd->hcr_mutex);
+ return ret;
+}
+
+static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+ int out_is_imm, u32 in_modifier, u8 op_modifier,
+ u16 op, unsigned long timeout)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ void __iomem *hcr = priv->cmd.hcr;
+ int err = 0;
+ unsigned long end;
+
+ down(&priv->cmd.poll_sem);
+
+ err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
+ in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
+ if (err)
+ goto out;
+
+ end = msecs_to_jiffies(timeout) + jiffies;
+ while (cmd_pending(dev) && time_before(jiffies, end))
+ cond_resched();
+
+ if (cmd_pending(dev)) {
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (out_is_imm)
+ *out_param =
+ (u64) be32_to_cpu((__force __be32)
+ __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
+ (u64) be32_to_cpu((__force __be32)
+ __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
+
+ err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
+ __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
+
+out:
+ up(&priv->cmd.poll_sem);
+ return err;
+}
+
+void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cmd_context *context =
+ &priv->cmd.context[token & priv->cmd.token_mask];
+
+ /* previously timed out command completing at long last */
+ if (token != context->token)
+ return;
+
+ context->result = mlx4_status_to_errno(status);
+ context->out_param = out_param;
+
+ complete(&context->done);
+}
+
+static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+ int out_is_imm, u32 in_modifier, u8 op_modifier,
+ u16 op, unsigned long timeout)
+{
+ struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
+ struct mlx4_cmd_context *context;
+ int err = 0;
+
+ down(&cmd->event_sem);
+
+ spin_lock(&cmd->context_lock);
+ BUG_ON(cmd->free_head < 0);
+ context = &cmd->context[cmd->free_head];
+ context->token += cmd->token_mask + 1;
+ cmd->free_head = context->next;
+ spin_unlock(&cmd->context_lock);
+
+ init_completion(&context->done);
+
+ mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
+ in_modifier, op_modifier, op, context->token, 1);
+
+ if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = context->result;
+ if (err)
+ goto out;
+
+ if (out_is_imm)
+ *out_param = context->out_param;
+
+out:
+ spin_lock(&cmd->context_lock);
+ context->next = cmd->free_head;
+ cmd->free_head = context - cmd->context;
+ spin_unlock(&cmd->context_lock);
+
+ up(&cmd->event_sem);
+ return err;
+}
+
+int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+ int out_is_imm, u32 in_modifier, u8 op_modifier,
+ u16 op, unsigned long timeout)
+{
+ if (mlx4_priv(dev)->cmd.use_events)
+ return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
+ in_modifier, op_modifier, op, timeout);
+ else
+ return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
+ in_modifier, op_modifier, op, timeout);
+}
+EXPORT_SYMBOL_GPL(__mlx4_cmd);
+
+int mlx4_cmd_init(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ mutex_init(&priv->cmd.hcr_mutex);
+ sema_init(&priv->cmd.poll_sem, 1);
+ priv->cmd.use_events = 0;
+ priv->cmd.toggle = 1;
+
+ priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
+ MLX4_HCR_SIZE);
+ if (!priv->cmd.hcr) {
+ mlx4_err(dev, "Couldn't map command register.");
+ return -ENOMEM;
+ }
+
+ priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
+ MLX4_MAILBOX_SIZE,
+ MLX4_MAILBOX_SIZE, 0);
+ if (!priv->cmd.pool) {
+ iounmap(priv->cmd.hcr);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void mlx4_cmd_cleanup(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ pci_pool_destroy(priv->cmd.pool);
+ iounmap(priv->cmd.hcr);
+}
+
+/*
+ * Switch to using events to issue FW commands (can only be called
+ * after event queue for command events has been initialized).
+ */
+int mlx4_cmd_use_events(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i;
+
+ priv->cmd.context = kmalloc(priv->cmd.max_cmds *
+ sizeof (struct mlx4_cmd_context),
+ GFP_KERNEL);
+ if (!priv->cmd.context)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->cmd.max_cmds; ++i) {
+ priv->cmd.context[i].token = i;
+ priv->cmd.context[i].next = i + 1;
+ }
+
+ priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
+ priv->cmd.free_head = 0;
+
+ sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
+ spin_lock_init(&priv->cmd.context_lock);
+
+ for (priv->cmd.token_mask = 1;
+ priv->cmd.token_mask < priv->cmd.max_cmds;
+ priv->cmd.token_mask <<= 1)
+ ; /* nothing */
+ --priv->cmd.token_mask;
+
+ priv->cmd.use_events = 1;
+
+ down(&priv->cmd.poll_sem);
+
+ return 0;
+}
+
+/*
+ * Switch back to polling (used when shutting down the device)
+ */
+void mlx4_cmd_use_polling(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i;
+
+ priv->cmd.use_events = 0;
+
+ for (i = 0; i < priv->cmd.max_cmds; ++i)
+ down(&priv->cmd.event_sem);
+
+ kfree(priv->cmd.context);
+
+ up(&priv->cmd.poll_sem);
+}
+
+struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+
+ mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
+ if (!mailbox)
+ return ERR_PTR(-ENOMEM);
+
+ mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
+ &mailbox->dma);
+ if (!mailbox->buf) {
+ kfree(mailbox);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return mailbox;
+}
+EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
+
+void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
+{
+ if (!mailbox)
+ return;
+
+ pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
+ kfree(mailbox);
+}
+EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
new file mode 100644
index 000000000000..bd8ef9f2fa71
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/hardirq.h>
+#include <linux/gfp.h>
+
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/cq.h>
+
+#include "mlx4.h"
+#include "icm.h"
+
+struct mlx4_cq_context {
+ __be32 flags;
+ u16 reserved1[3];
+ __be16 page_offset;
+ __be32 logsize_usrpage;
+ __be16 cq_period;
+ __be16 cq_max_count;
+ u8 reserved2[3];
+ u8 comp_eqn;
+ u8 log_page_size;
+ u8 reserved3[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ __be32 last_notified_index;
+ __be32 solicit_producer_index;
+ __be32 consumer_index;
+ __be32 producer_index;
+ u32 reserved4[2];
+ __be64 db_rec_addr;
+};
+
+#define MLX4_CQ_STATUS_OK ( 0 << 28)
+#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
+#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
+#define MLX4_CQ_FLAG_CC ( 1 << 18)
+#define MLX4_CQ_FLAG_OI ( 1 << 17)
+#define MLX4_CQ_STATE_ARMED ( 9 << 8)
+#define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8)
+#define MLX4_EQ_STATE_FIRED (10 << 8)
+
+void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
+{
+ struct mlx4_cq *cq;
+
+ cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
+ cqn & (dev->caps.num_cqs - 1));
+ if (!cq) {
+ mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
+ return;
+ }
+
+ ++cq->arm_sn;
+
+ cq->comp(cq);
+}
+
+void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
+{
+ struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
+ struct mlx4_cq *cq;
+
+ spin_lock(&cq_table->lock);
+
+ cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
+ if (cq)
+ atomic_inc(&cq->refcount);
+
+ spin_unlock(&cq_table->lock);
+
+ if (!cq) {
+ mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
+ return;
+ }
+
+ cq->event(cq, event_type);
+
+ if (atomic_dec_and_test(&cq->refcount))
+ complete(&cq->free);
+}
+
+static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ int cq_num)
+{
+ return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ int cq_num, u32 opmod)
+{
+ return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ int cq_num)
+{
+ return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
+ mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
+int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
+ u16 count, u16 period)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_cq_context *cq_context;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ cq_context = mailbox->buf;
+ memset(cq_context, 0, sizeof *cq_context);
+
+ cq_context->cq_max_count = cpu_to_be16(count);
+ cq_context->cq_period = cpu_to_be16(period);
+
+ err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_cq_modify);
+
+int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
+ int entries, struct mlx4_mtt *mtt)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_cq_context *cq_context;
+ u64 mtt_addr;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ cq_context = mailbox->buf;
+ memset(cq_context, 0, sizeof *cq_context);
+
+ cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
+ cq_context->log_page_size = mtt->page_shift - 12;
+ mtt_addr = mlx4_mtt_addr(dev, mtt);
+ cq_context->mtt_base_addr_h = mtt_addr >> 32;
+ cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
+
+ err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_cq_resize);
+
+int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
+ struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
+ unsigned vector, int collapsed)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cq_table *cq_table = &priv->cq_table;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_cq_context *cq_context;
+ u64 mtt_addr;
+ int err;
+
+ if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
+ return -EINVAL;
+
+ cq->vector = vector;
+
+ cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
+ if (cq->cqn == -1)
+ return -ENOMEM;
+
+ err = mlx4_table_get(dev, &cq_table->table, cq->cqn);
+ if (err)
+ goto err_out;
+
+ err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn);
+ if (err)
+ goto err_put;
+
+ spin_lock_irq(&cq_table->lock);
+ err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
+ spin_unlock_irq(&cq_table->lock);
+ if (err)
+ goto err_cmpt_put;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
+ goto err_radix;
+ }
+
+ cq_context = mailbox->buf;
+ memset(cq_context, 0, sizeof *cq_context);
+
+ cq_context->flags = cpu_to_be32(!!collapsed << 18);
+ cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
+ cq_context->comp_eqn = priv->eq_table.eq[vector].eqn;
+ cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
+
+ mtt_addr = mlx4_mtt_addr(dev, mtt);
+ cq_context->mtt_base_addr_h = mtt_addr >> 32;
+ cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
+ cq_context->db_rec_addr = cpu_to_be64(db_rec);
+
+ err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ if (err)
+ goto err_radix;
+
+ cq->cons_index = 0;
+ cq->arm_sn = 1;
+ cq->uar = uar;
+ atomic_set(&cq->refcount, 1);
+ init_completion(&cq->free);
+
+ return 0;
+
+err_radix:
+ spin_lock_irq(&cq_table->lock);
+ radix_tree_delete(&cq_table->tree, cq->cqn);
+ spin_unlock_irq(&cq_table->lock);
+
+err_cmpt_put:
+ mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn);
+
+err_put:
+ mlx4_table_put(dev, &cq_table->table, cq->cqn);
+
+err_out:
+ mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_cq_alloc);
+
+void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cq_table *cq_table = &priv->cq_table;
+ int err;
+
+ err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
+ if (err)
+ mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
+
+ synchronize_irq(priv->eq_table.eq[cq->vector].irq);
+
+ spin_lock_irq(&cq_table->lock);
+ radix_tree_delete(&cq_table->tree, cq->cqn);
+ spin_unlock_irq(&cq_table->lock);
+
+ if (atomic_dec_and_test(&cq->refcount))
+ complete(&cq->free);
+ wait_for_completion(&cq->free);
+
+ mlx4_table_put(dev, &cq_table->table, cq->cqn);
+ mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+}
+EXPORT_SYMBOL_GPL(mlx4_cq_free);
+
+int mlx4_init_cq_table(struct mlx4_dev *dev)
+{
+ struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
+ int err;
+
+ spin_lock_init(&cq_table->lock);
+ INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
+
+ err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
+ dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
+{
+ /* Nothing to do to clean up radix_tree */
+ mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
new file mode 100644
index 000000000000..ec4b6d047fe0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx4/cq.h>
+#include <linux/mlx4/qp.h>
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4_en.h"
+
+static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
+{
+ return;
+}
+
+
+int mlx4_en_create_cq(struct mlx4_en_priv *priv,
+ struct mlx4_en_cq *cq,
+ int entries, int ring, enum cq_type mode)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+
+ cq->size = entries;
+ if (mode == RX)
+ cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
+ else
+ cq->buf_size = sizeof(struct mlx4_cqe);
+
+ cq->ring = ring;
+ cq->is_tx = mode;
+ spin_lock_init(&cq->lock);
+
+ err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
+ cq->buf_size, 2 * PAGE_SIZE);
+ if (err)
+ return err;
+
+ err = mlx4_en_map_buffer(&cq->wqres.buf);
+ if (err)
+ mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
+ else
+ cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
+
+ return err;
+}
+
+int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err = 0;
+ char name[25];
+
+ cq->dev = mdev->pndev[priv->port];
+ cq->mcq.set_ci_db = cq->wqres.db.db;
+ cq->mcq.arm_db = cq->wqres.db.db + 1;
+ *cq->mcq.set_ci_db = 0;
+ *cq->mcq.arm_db = 0;
+ memset(cq->buf, 0, cq->buf_size);
+
+ if (cq->is_tx == RX) {
+ if (mdev->dev->caps.comp_pool) {
+ if (!cq->vector) {
+ sprintf(name , "%s-rx-%d", priv->dev->name, cq->ring);
+ if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) {
+ cq->vector = (cq->ring + 1 + priv->port) %
+ mdev->dev->caps.num_comp_vectors;
+ mlx4_warn(mdev, "Failed Assigning an EQ to "
+ "%s_rx-%d ,Falling back to legacy EQ's\n",
+ priv->dev->name, cq->ring);
+ }
+ }
+ } else {
+ cq->vector = (cq->ring + 1 + priv->port) %
+ mdev->dev->caps.num_comp_vectors;
+ }
+ } else {
+ if (!cq->vector || !mdev->dev->caps.comp_pool) {
+ /*Fallback to legacy pool in case of error*/
+ cq->vector = 0;
+ }
+ }
+
+ if (!cq->is_tx)
+ cq->size = priv->rx_ring[cq->ring].actual_size;
+
+ err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
+ cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx);
+ if (err)
+ return err;
+
+ cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
+ cq->mcq.event = mlx4_en_cq_event;
+
+ if (cq->is_tx) {
+ init_timer(&cq->timer);
+ cq->timer.function = mlx4_en_poll_tx_cq;
+ cq->timer.data = (unsigned long) cq;
+ } else {
+ netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
+ napi_enable(&cq->napi);
+ }
+
+ return 0;
+}
+
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
+ bool reserve_vectors)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ mlx4_en_unmap_buffer(&cq->wqres.buf);
+ mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
+ if (priv->mdev->dev->caps.comp_pool && cq->vector && !reserve_vectors)
+ mlx4_release_eq(priv->mdev->dev, cq->vector);
+ cq->buf_size = 0;
+ cq->buf = NULL;
+}
+
+void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ if (cq->is_tx)
+ del_timer(&cq->timer);
+ else {
+ napi_disable(&cq->napi);
+ netif_napi_del(&cq->napi);
+ }
+
+ mlx4_cq_free(mdev->dev, &cq->mcq);
+}
+
+/* Set rx cq moderation parameters */
+int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+ return mlx4_cq_modify(priv->mdev->dev, &cq->mcq,
+ cq->moder_cnt, cq->moder_time);
+}
+
+int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+ mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map,
+ &priv->mdev->uar_lock);
+
+ return 0;
+}
+
+
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
new file mode 100644
index 000000000000..eb096253d781
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -0,0 +1,477 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#include "mlx4_en.h"
+#include "en_port.h"
+
+
+static void
+mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ strncpy(drvinfo->driver, DRV_NAME, 32);
+ strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
+ sprintf(drvinfo->fw_version, "%d.%d.%d",
+ (u16) (mdev->dev->caps.fw_ver >> 32),
+ (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
+ (u16) (mdev->dev->caps.fw_ver & 0xffff));
+ strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
+ drvinfo->n_stats = 0;
+ drvinfo->regdump_len = 0;
+ drvinfo->eedump_len = 0;
+}
+
+static const char main_strings[][ETH_GSTRING_LEN] = {
+ "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
+ "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
+ "rx_length_errors", "rx_over_errors", "rx_crc_errors",
+ "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
+ "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
+ "tx_heartbeat_errors", "tx_window_errors",
+
+ /* port statistics */
+ "tso_packets",
+ "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
+ "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
+
+ /* packet statistics */
+ "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
+ "rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0",
+ "tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5",
+ "tx_prio_6", "tx_prio_7",
+};
+#define NUM_MAIN_STATS 21
+#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
+
+static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
+ "Interupt Test",
+ "Link Test",
+ "Speed Test",
+ "Register Test",
+ "Loopback Test",
+};
+
+static u32 mlx4_en_get_msglevel(struct net_device *dev)
+{
+ return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
+}
+
+static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
+{
+ ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
+}
+
+static void mlx4_en_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+ int err = 0;
+ u64 config = 0;
+
+ if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL)) {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ return;
+ }
+
+ err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
+ if (err) {
+ en_err(priv, "Failed to get WoL information\n");
+ return;
+ }
+
+ if (config & MLX4_EN_WOL_MAGIC)
+ wol->supported = WAKE_MAGIC;
+ else
+ wol->supported = 0;
+
+ if (config & MLX4_EN_WOL_ENABLED)
+ wol->wolopts = WAKE_MAGIC;
+ else
+ wol->wolopts = 0;
+}
+
+static int mlx4_en_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+ u64 config = 0;
+ int err = 0;
+
+ if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL))
+ return -EOPNOTSUPP;
+
+ if (wol->supported & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
+ if (err) {
+ en_err(priv, "Failed to get WoL info, unable to modify\n");
+ return err;
+ }
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
+ MLX4_EN_WOL_MAGIC;
+ } else {
+ config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
+ config |= MLX4_EN_WOL_DO_MODIFY;
+ }
+
+ err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
+ if (err)
+ en_err(priv, "Failed to set WoL information\n");
+
+ return err;
+}
+
+static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return NUM_ALL_STATS +
+ (priv->tx_ring_num + priv->rx_ring_num) * 2;
+ case ETH_SS_TEST:
+ return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
+ & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void mlx4_en_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, uint64_t *data)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int index = 0;
+ int i;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ for (i = 0; i < NUM_MAIN_STATS; i++)
+ data[index++] = ((unsigned long *) &priv->stats)[i];
+ for (i = 0; i < NUM_PORT_STATS; i++)
+ data[index++] = ((unsigned long *) &priv->port_stats)[i];
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ data[index++] = priv->tx_ring[i].packets;
+ data[index++] = priv->tx_ring[i].bytes;
+ }
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ data[index++] = priv->rx_ring[i].packets;
+ data[index++] = priv->rx_ring[i].bytes;
+ }
+ for (i = 0; i < NUM_PKT_STATS; i++)
+ data[index++] = ((unsigned long *) &priv->pkstats)[i];
+ spin_unlock_bh(&priv->stats_lock);
+
+}
+
+static void mlx4_en_self_test(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf)
+{
+ mlx4_en_ex_selftest(dev, &etest->flags, buf);
+}
+
+static void mlx4_en_get_strings(struct net_device *dev,
+ uint32_t stringset, uint8_t *data)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int index = 0;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
+ strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
+ if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
+ for (; i < MLX4_EN_NUM_SELF_TEST; i++)
+ strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
+ break;
+
+ case ETH_SS_STATS:
+ /* Add main counters */
+ for (i = 0; i < NUM_MAIN_STATS; i++)
+ strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
+ for (i = 0; i< NUM_PORT_STATS; i++)
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
+ main_strings[i + NUM_MAIN_STATS]);
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "tx%d_packets", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "tx%d_bytes", i);
+ }
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_packets", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_bytes", i);
+ }
+ for (i = 0; i< NUM_PKT_STATS; i++)
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
+ main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
+ break;
+ }
+}
+
+static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int trans_type;
+
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->supported = SUPPORTED_10000baseT_Full;
+ cmd->advertising = ADVERTISED_10000baseT_Full;
+
+ if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
+ return -ENOMEM;
+
+ trans_type = priv->port_state.transciver;
+ if (netif_carrier_ok(dev)) {
+ ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
+ cmd->duplex = DUPLEX_FULL;
+ } else {
+ ethtool_cmd_speed_set(cmd, -1);
+ cmd->duplex = -1;
+ }
+
+ if (trans_type > 0 && trans_type <= 0xC) {
+ cmd->port = PORT_FIBRE;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_FIBRE;
+ } else if (trans_type == 0x80 || trans_type == 0) {
+ cmd->port = PORT_TP;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->supported |= SUPPORTED_TP;
+ cmd->advertising |= ADVERTISED_TP;
+ } else {
+ cmd->port = -1;
+ cmd->transceiver = -1;
+ }
+ return 0;
+}
+
+static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ if ((cmd->autoneg == AUTONEG_ENABLE) ||
+ (ethtool_cmd_speed(cmd) != SPEED_10000) ||
+ (cmd->duplex != DUPLEX_FULL))
+ return -EINVAL;
+
+ /* Nothing to change */
+ return 0;
+}
+
+static int mlx4_en_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ coal->tx_coalesce_usecs = 0;
+ coal->tx_max_coalesced_frames = 0;
+ coal->rx_coalesce_usecs = priv->rx_usecs;
+ coal->rx_max_coalesced_frames = priv->rx_frames;
+
+ coal->pkt_rate_low = priv->pkt_rate_low;
+ coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
+ coal->pkt_rate_high = priv->pkt_rate_high;
+ coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
+ coal->rate_sample_interval = priv->sample_interval;
+ coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
+ return 0;
+}
+
+static int mlx4_en_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int err, i;
+
+ priv->rx_frames = (coal->rx_max_coalesced_frames ==
+ MLX4_EN_AUTO_CONF) ?
+ MLX4_EN_RX_COAL_TARGET :
+ coal->rx_max_coalesced_frames;
+ priv->rx_usecs = (coal->rx_coalesce_usecs ==
+ MLX4_EN_AUTO_CONF) ?
+ MLX4_EN_RX_COAL_TIME :
+ coal->rx_coalesce_usecs;
+
+ /* Set adaptive coalescing params */
+ priv->pkt_rate_low = coal->pkt_rate_low;
+ priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
+ priv->pkt_rate_high = coal->pkt_rate_high;
+ priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
+ priv->sample_interval = coal->rate_sample_interval;
+ priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
+ priv->last_moder_time = MLX4_EN_AUTO_CONF;
+ if (priv->adaptive_rx_coal)
+ return 0;
+
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ priv->rx_cq[i].moder_cnt = priv->rx_frames;
+ priv->rx_cq[i].moder_time = priv->rx_usecs;
+ err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int mlx4_en_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+
+ priv->prof->tx_pause = pause->tx_pause != 0;
+ priv->prof->rx_pause = pause->rx_pause != 0;
+ err = mlx4_SET_PORT_general(mdev->dev, priv->port,
+ priv->rx_skb_size + ETH_FCS_LEN,
+ priv->prof->tx_pause,
+ priv->prof->tx_ppp,
+ priv->prof->rx_pause,
+ priv->prof->rx_ppp);
+ if (err)
+ en_err(priv, "Failed setting pause params\n");
+
+ return err;
+}
+
+static void mlx4_en_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ pause->tx_pause = priv->prof->tx_pause;
+ pause->rx_pause = priv->prof->rx_pause;
+}
+
+static int mlx4_en_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ u32 rx_size, tx_size;
+ int port_up = 0;
+ int err = 0;
+
+ if (param->rx_jumbo_pending || param->rx_mini_pending)
+ return -EINVAL;
+
+ rx_size = roundup_pow_of_two(param->rx_pending);
+ rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
+ rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
+ tx_size = roundup_pow_of_two(param->tx_pending);
+ tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
+ tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
+
+ if (rx_size == (priv->port_up ? priv->rx_ring[0].actual_size :
+ priv->rx_ring[0].size) &&
+ tx_size == priv->tx_ring[0].size)
+ return 0;
+
+ mutex_lock(&mdev->state_lock);
+ if (priv->port_up) {
+ port_up = 1;
+ mlx4_en_stop_port(dev);
+ }
+
+ mlx4_en_free_resources(priv, true);
+
+ priv->prof->tx_ring_size = tx_size;
+ priv->prof->rx_ring_size = rx_size;
+
+ err = mlx4_en_alloc_resources(priv);
+ if (err) {
+ en_err(priv, "Failed reallocating port resources\n");
+ goto out;
+ }
+ if (port_up) {
+ err = mlx4_en_start_port(dev);
+ if (err)
+ en_err(priv, "Failed starting port\n");
+ }
+
+out:
+ mutex_unlock(&mdev->state_lock);
+ return err;
+}
+
+static void mlx4_en_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ memset(param, 0, sizeof(*param));
+ param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
+ param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
+ param->rx_pending = priv->port_up ?
+ priv->rx_ring[0].actual_size : priv->rx_ring[0].size;
+ param->tx_pending = priv->tx_ring[0].size;
+}
+
+const struct ethtool_ops mlx4_en_ethtool_ops = {
+ .get_drvinfo = mlx4_en_get_drvinfo,
+ .get_settings = mlx4_en_get_settings,
+ .set_settings = mlx4_en_set_settings,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mlx4_en_get_strings,
+ .get_sset_count = mlx4_en_get_sset_count,
+ .get_ethtool_stats = mlx4_en_get_ethtool_stats,
+ .self_test = mlx4_en_self_test,
+ .get_wol = mlx4_en_get_wol,
+ .set_wol = mlx4_en_set_wol,
+ .get_msglevel = mlx4_en_get_msglevel,
+ .set_msglevel = mlx4_en_set_msglevel,
+ .get_coalesce = mlx4_en_get_coalesce,
+ .set_coalesce = mlx4_en_set_coalesce,
+ .get_pauseparam = mlx4_en_get_pauseparam,
+ .set_pauseparam = mlx4_en_set_pauseparam,
+ .get_ringparam = mlx4_en_get_ringparam,
+ .set_ringparam = mlx4_en_set_ringparam,
+};
+
+
+
+
+
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
new file mode 100644
index 000000000000..6bfea233a9f2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -0,0 +1,315 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/cpumask.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+
+#include <linux/mlx4/driver.h>
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4_en.h"
+
+MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin");
+MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")");
+
+static const char mlx4_en_version[] =
+ DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
+ DRV_VERSION " (" DRV_RELDATE ")\n";
+
+#define MLX4_EN_PARM_INT(X, def_val, desc) \
+ static unsigned int X = def_val;\
+ module_param(X , uint, 0444); \
+ MODULE_PARM_DESC(X, desc);
+
+
+/*
+ * Device scope module parameters
+ */
+
+
+/* Enable RSS TCP traffic */
+MLX4_EN_PARM_INT(tcp_rss, 1,
+ "Enable RSS for incomming TCP traffic or disabled (0)");
+/* Enable RSS UDP traffic */
+MLX4_EN_PARM_INT(udp_rss, 1,
+ "Enable RSS for incomming UDP traffic or disabled (0)");
+
+/* Priority pausing */
+MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
+ " Per priority bit mask");
+MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
+ " Per priority bit mask");
+
+int en_print(const char *level, const struct mlx4_en_priv *priv,
+ const char *format, ...)
+{
+ va_list args;
+ struct va_format vaf;
+ int i;
+
+ va_start(args, format);
+
+ vaf.fmt = format;
+ vaf.va = &args;
+ if (priv->registered)
+ i = printk("%s%s: %s: %pV",
+ level, DRV_NAME, priv->dev->name, &vaf);
+ else
+ i = printk("%s%s: %s: Port %d: %pV",
+ level, DRV_NAME, dev_name(&priv->mdev->pdev->dev),
+ priv->port, &vaf);
+ va_end(args);
+
+ return i;
+}
+
+static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
+{
+ struct mlx4_en_profile *params = &mdev->profile;
+ int i;
+
+ params->tcp_rss = tcp_rss;
+ params->udp_rss = udp_rss;
+ if (params->udp_rss && !(mdev->dev->caps.flags
+ & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
+ mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
+ params->udp_rss = 0;
+ }
+ for (i = 1; i <= MLX4_MAX_PORTS; i++) {
+ params->prof[i].rx_pause = 1;
+ params->prof[i].rx_ppp = pfcrx;
+ params->prof[i].tx_pause = 1;
+ params->prof[i].tx_ppp = pfctx;
+ params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
+ params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
+ params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS +
+ (!!pfcrx) * MLX4_EN_NUM_PPP_RINGS;
+ }
+
+ return 0;
+}
+
+static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port)
+{
+ struct mlx4_en_dev *endev = ctx;
+
+ return endev->pndev[port];
+}
+
+static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
+ enum mlx4_dev_event event, int port)
+{
+ struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
+ struct mlx4_en_priv *priv;
+
+ if (!mdev->pndev[port])
+ return;
+
+ priv = netdev_priv(mdev->pndev[port]);
+ switch (event) {
+ case MLX4_DEV_EVENT_PORT_UP:
+ case MLX4_DEV_EVENT_PORT_DOWN:
+ /* To prevent races, we poll the link state in a separate
+ task rather than changing it here */
+ priv->link_state = event;
+ queue_work(mdev->workqueue, &priv->linkstate_task);
+ break;
+
+ case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
+ mlx4_err(mdev, "Internal error detected, restarting device\n");
+ break;
+
+ default:
+ mlx4_warn(mdev, "Unhandled event: %d\n", event);
+ }
+}
+
+static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
+{
+ struct mlx4_en_dev *mdev = endev_ptr;
+ int i;
+
+ mutex_lock(&mdev->state_lock);
+ mdev->device_up = false;
+ mutex_unlock(&mdev->state_lock);
+
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
+ if (mdev->pndev[i])
+ mlx4_en_destroy_netdev(mdev->pndev[i]);
+
+ flush_workqueue(mdev->workqueue);
+ destroy_workqueue(mdev->workqueue);
+ mlx4_mr_free(dev, &mdev->mr);
+ mlx4_uar_free(dev, &mdev->priv_uar);
+ mlx4_pd_free(dev, mdev->priv_pdn);
+ kfree(mdev);
+}
+
+static void *mlx4_en_add(struct mlx4_dev *dev)
+{
+ struct mlx4_en_dev *mdev;
+ int i;
+ int err;
+
+ printk_once(KERN_INFO "%s", mlx4_en_version);
+
+ mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
+ if (!mdev) {
+ dev_err(&dev->pdev->dev, "Device struct alloc failed, "
+ "aborting.\n");
+ err = -ENOMEM;
+ goto err_free_res;
+ }
+
+ if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
+ goto err_free_dev;
+
+ if (mlx4_uar_alloc(dev, &mdev->priv_uar))
+ goto err_pd;
+
+ mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT,
+ PAGE_SIZE);
+ if (!mdev->uar_map)
+ goto err_uar;
+ spin_lock_init(&mdev->uar_lock);
+
+ mdev->dev = dev;
+ mdev->dma_device = &(dev->pdev->dev);
+ mdev->pdev = dev->pdev;
+ mdev->device_up = false;
+
+ mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
+ if (!mdev->LSO_support)
+ mlx4_warn(mdev, "LSO not supported, please upgrade to later "
+ "FW version to enable LSO\n");
+
+ if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
+ MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
+ 0, 0, &mdev->mr)) {
+ mlx4_err(mdev, "Failed allocating memory region\n");
+ goto err_uar;
+ }
+ if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
+ mlx4_err(mdev, "Failed enabling memory region\n");
+ goto err_mr;
+ }
+
+ /* Build device profile according to supplied module parameters */
+ err = mlx4_en_get_profile(mdev);
+ if (err) {
+ mlx4_err(mdev, "Bad module parameters, aborting.\n");
+ goto err_mr;
+ }
+
+ /* Configure which ports to start according to module parameters */
+ mdev->port_cnt = 0;
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
+ mdev->port_cnt++;
+
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+ if (!dev->caps.comp_pool) {
+ mdev->profile.prof[i].rx_ring_num =
+ rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
+ min_t(int,
+ dev->caps.num_comp_vectors,
+ MAX_RX_RINGS)));
+ } else {
+ mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
+ min_t(int, dev->caps.comp_pool/
+ dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1));
+ }
+ }
+
+ /* Create our own workqueue for reset/multicast tasks
+ * Note: we cannot use the shared workqueue because of deadlocks caused
+ * by the rtnl lock */
+ mdev->workqueue = create_singlethread_workqueue("mlx4_en");
+ if (!mdev->workqueue) {
+ err = -ENOMEM;
+ goto err_mr;
+ }
+
+ /* At this stage all non-port specific tasks are complete:
+ * mark the card state as up */
+ mutex_init(&mdev->state_lock);
+ mdev->device_up = true;
+
+ /* Setup ports */
+
+ /* Create a netdev for each port */
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+ mlx4_info(mdev, "Activating port:%d\n", i);
+ if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
+ mdev->pndev[i] = NULL;
+ }
+ return mdev;
+
+err_mr:
+ mlx4_mr_free(dev, &mdev->mr);
+err_uar:
+ mlx4_uar_free(dev, &mdev->priv_uar);
+err_pd:
+ mlx4_pd_free(dev, mdev->priv_pdn);
+err_free_dev:
+ kfree(mdev);
+err_free_res:
+ return NULL;
+}
+
+static struct mlx4_interface mlx4_en_interface = {
+ .add = mlx4_en_add,
+ .remove = mlx4_en_remove,
+ .event = mlx4_en_event,
+ .get_dev = mlx4_en_get_netdev,
+ .protocol = MLX4_PROT_ETH,
+};
+
+static int __init mlx4_en_init(void)
+{
+ return mlx4_register_interface(&mlx4_en_interface);
+}
+
+static void __exit mlx4_en_cleanup(void)
+{
+ mlx4_unregister_interface(&mlx4_en_interface);
+}
+
+module_init(mlx4_en_init);
+module_exit(mlx4_en_cleanup);
+
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
new file mode 100644
index 000000000000..27789be1e6ac
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -0,0 +1,1166 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include <linux/mlx4/driver.h>
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/cq.h>
+
+#include "mlx4_en.h"
+#include "en_port.h"
+
+static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+ int idx;
+
+ en_dbg(HW, priv, "adding VLAN:%d\n", vid);
+
+ set_bit(vid, priv->active_vlans);
+
+ /* Add VID to port VLAN filter */
+ mutex_lock(&mdev->state_lock);
+ if (mdev->device_up && priv->port_up) {
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
+ if (err)
+ en_err(priv, "Failed configuring VLAN filter\n");
+ }
+ if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
+ en_err(priv, "failed adding vlan %d\n", vid);
+ mutex_unlock(&mdev->state_lock);
+
+}
+
+static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+ int idx;
+
+ en_dbg(HW, priv, "Killing VID:%d\n", vid);
+
+ clear_bit(vid, priv->active_vlans);
+
+ /* Remove VID from port VLAN filter */
+ mutex_lock(&mdev->state_lock);
+ if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
+ mlx4_unregister_vlan(mdev->dev, priv->port, idx);
+ else
+ en_err(priv, "could not find vid %d in cache\n", vid);
+
+ if (mdev->device_up && priv->port_up) {
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
+ if (err)
+ en_err(priv, "Failed configuring VLAN filter\n");
+ }
+ mutex_unlock(&mdev->state_lock);
+}
+
+u64 mlx4_en_mac_to_u64(u8 *addr)
+{
+ u64 mac = 0;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac <<= 8;
+ mac |= addr[i];
+ }
+ return mac;
+}
+
+static int mlx4_en_set_mac(struct net_device *dev, void *addr)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct sockaddr *saddr = addr;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
+ priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
+ queue_work(mdev->workqueue, &priv->mac_task);
+ return 0;
+}
+
+static void mlx4_en_do_set_mac(struct work_struct *work)
+{
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ mac_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err = 0;
+
+ mutex_lock(&mdev->state_lock);
+ if (priv->port_up) {
+ /* Remove old MAC and insert the new one */
+ err = mlx4_replace_mac(mdev->dev, priv->port,
+ priv->base_qpn, priv->mac, 0);
+ if (err)
+ en_err(priv, "Failed changing HW MAC address\n");
+ } else
+ en_dbg(HW, priv, "Port is down while "
+ "registering mac, exiting...\n");
+
+ mutex_unlock(&mdev->state_lock);
+}
+
+static void mlx4_en_clear_list(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ kfree(priv->mc_addrs);
+ priv->mc_addrs_cnt = 0;
+}
+
+static void mlx4_en_cache_mclist(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ char *mc_addrs;
+ int mc_addrs_cnt = netdev_mc_count(dev);
+ int i;
+
+ mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC);
+ if (!mc_addrs) {
+ en_err(priv, "failed to allocate multicast list\n");
+ return;
+ }
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
+ priv->mc_addrs = mc_addrs;
+ priv->mc_addrs_cnt = mc_addrs_cnt;
+}
+
+
+static void mlx4_en_set_multicast(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (!priv->port_up)
+ return;
+
+ queue_work(priv->mdev->workqueue, &priv->mcast_task);
+}
+
+static void mlx4_en_do_set_multicast(struct work_struct *work)
+{
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ mcast_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->dev;
+ u64 mcast_addr = 0;
+ u8 mc_list[16] = {0};
+ int err;
+
+ mutex_lock(&mdev->state_lock);
+ if (!mdev->device_up) {
+ en_dbg(HW, priv, "Card is not up, "
+ "ignoring multicast change.\n");
+ goto out;
+ }
+ if (!priv->port_up) {
+ en_dbg(HW, priv, "Port is down, "
+ "ignoring multicast change.\n");
+ goto out;
+ }
+
+ /*
+ * Promsicuous mode: disable all filters
+ */
+
+ if (dev->flags & IFF_PROMISC) {
+ if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
+ if (netif_msg_rx_status(priv))
+ en_warn(priv, "Entering promiscuous mode\n");
+ priv->flags |= MLX4_EN_FLAG_PROMISC;
+
+ /* Enable promiscouos mode */
+ if (!(mdev->dev->caps.flags &
+ MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
+ priv->base_qpn, 1);
+ else
+ err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed enabling "
+ "promiscuous mode\n");
+
+ /* Disable port multicast filter (unconditionally) */
+ err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+ 0, MLX4_MCAST_DISABLE);
+ if (err)
+ en_err(priv, "Failed disabling "
+ "multicast filter\n");
+
+ /* Add the default qp number as multicast promisc */
+ if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
+ err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed entering multicast promisc mode\n");
+ priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
+ }
+
+ /* Disable port VLAN filter */
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
+ if (err)
+ en_err(priv, "Failed disabling VLAN filter\n");
+ }
+ goto out;
+ }
+
+ /*
+ * Not in promiscuous mode
+ */
+
+ if (priv->flags & MLX4_EN_FLAG_PROMISC) {
+ if (netif_msg_rx_status(priv))
+ en_warn(priv, "Leaving promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_PROMISC;
+
+ /* Disable promiscouos mode */
+ if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
+ priv->base_qpn, 0);
+ else
+ err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed disabling promiscuous mode\n");
+
+ /* Disable Multicast promisc */
+ if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
+ err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed disabling multicast promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ }
+
+ /* Enable port VLAN filter */
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
+ if (err)
+ en_err(priv, "Failed enabling VLAN filter\n");
+ }
+
+ /* Enable/disable the multicast filter according to IFF_ALLMULTI */
+ if (dev->flags & IFF_ALLMULTI) {
+ err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+ 0, MLX4_MCAST_DISABLE);
+ if (err)
+ en_err(priv, "Failed disabling multicast filter\n");
+
+ /* Add the default qp number as multicast promisc */
+ if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
+ err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed entering multicast promisc mode\n");
+ priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
+ }
+ } else {
+ int i;
+ /* Disable Multicast promisc */
+ if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
+ err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed disabling multicast promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ }
+
+ err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+ 0, MLX4_MCAST_DISABLE);
+ if (err)
+ en_err(priv, "Failed disabling multicast filter\n");
+
+ /* Detach our qp from all the multicast addresses */
+ for (i = 0; i < priv->mc_addrs_cnt; i++) {
+ memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
+ mc_list[5] = priv->port;
+ mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
+ mc_list, MLX4_PROT_ETH);
+ }
+ /* Flush mcast filter and init it with broadcast address */
+ mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
+ 1, MLX4_MCAST_CONFIG);
+
+ /* Update multicast list - we cache all addresses so they won't
+ * change while HW is updated holding the command semaphor */
+ netif_tx_lock_bh(dev);
+ mlx4_en_cache_mclist(dev);
+ netif_tx_unlock_bh(dev);
+ for (i = 0; i < priv->mc_addrs_cnt; i++) {
+ mcast_addr =
+ mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
+ memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
+ mc_list[5] = priv->port;
+ mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp,
+ mc_list, 0, MLX4_PROT_ETH);
+ mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
+ mcast_addr, 0, MLX4_MCAST_CONFIG);
+ }
+ err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+ 0, MLX4_MCAST_ENABLE);
+ if (err)
+ en_err(priv, "Failed enabling multicast filter\n");
+ }
+out:
+ mutex_unlock(&mdev->state_lock);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void mlx4_en_netpoll(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_cq *cq;
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ cq = &priv->rx_cq[i];
+ spin_lock_irqsave(&cq->lock, flags);
+ napi_synchronize(&cq->napi);
+ mlx4_en_process_rx_cq(dev, cq, 0);
+ spin_unlock_irqrestore(&cq->lock, flags);
+ }
+}
+#endif
+
+static void mlx4_en_tx_timeout(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ if (netif_msg_timer(priv))
+ en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
+
+ priv->port_stats.tx_timeout++;
+ en_dbg(DRV, priv, "Scheduling watchdog\n");
+ queue_work(mdev->workqueue, &priv->watchdog_task);
+}
+
+
+static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ spin_lock_bh(&priv->stats_lock);
+ memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
+ spin_unlock_bh(&priv->stats_lock);
+
+ return &priv->ret_stats;
+}
+
+static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_cq *cq;
+ int i;
+
+ /* If we haven't received a specific coalescing setting
+ * (module param), we set the moderation parameters as follows:
+ * - moder_cnt is set to the number of mtu sized packets to
+ * satisfy our coelsing target.
+ * - moder_time is set to a fixed value.
+ */
+ priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
+ priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
+ en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
+ "rx_frames:%d rx_usecs:%d\n",
+ priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
+
+ /* Setup cq moderation params */
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ cq = &priv->rx_cq[i];
+ cq->moder_cnt = priv->rx_frames;
+ cq->moder_time = priv->rx_usecs;
+ }
+
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ cq = &priv->tx_cq[i];
+ cq->moder_cnt = MLX4_EN_TX_COAL_PKTS;
+ cq->moder_time = MLX4_EN_TX_COAL_TIME;
+ }
+
+ /* Reset auto-moderation params */
+ priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
+ priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
+ priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
+ priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
+ priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
+ priv->adaptive_rx_coal = 1;
+ priv->last_moder_time = MLX4_EN_AUTO_CONF;
+ priv->last_moder_jiffies = 0;
+ priv->last_moder_packets = 0;
+ priv->last_moder_tx_packets = 0;
+ priv->last_moder_bytes = 0;
+}
+
+static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
+{
+ unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
+ struct mlx4_en_cq *cq;
+ unsigned long packets;
+ unsigned long rate;
+ unsigned long avg_pkt_size;
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_packets;
+ unsigned long tx_pkt_diff;
+ unsigned long rx_pkt_diff;
+ int moder_time;
+ int i, err;
+
+ if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
+ return;
+
+ spin_lock_bh(&priv->stats_lock);
+ rx_packets = priv->stats.rx_packets;
+ rx_bytes = priv->stats.rx_bytes;
+ tx_packets = priv->stats.tx_packets;
+ spin_unlock_bh(&priv->stats_lock);
+
+ if (!priv->last_moder_jiffies || !period)
+ goto out;
+
+ tx_pkt_diff = ((unsigned long) (tx_packets -
+ priv->last_moder_tx_packets));
+ rx_pkt_diff = ((unsigned long) (rx_packets -
+ priv->last_moder_packets));
+ packets = max(tx_pkt_diff, rx_pkt_diff);
+ rate = packets * HZ / period;
+ avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
+ priv->last_moder_bytes)) / packets : 0;
+
+ /* Apply auto-moderation only when packet rate exceeds a rate that
+ * it matters */
+ if (rate > MLX4_EN_RX_RATE_THRESH && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
+ /* If tx and rx packet rates are not balanced, assume that
+ * traffic is mainly BW bound and apply maximum moderation.
+ * Otherwise, moderate according to packet rate */
+ if (2 * tx_pkt_diff > 3 * rx_pkt_diff ||
+ 2 * rx_pkt_diff > 3 * tx_pkt_diff) {
+ moder_time = priv->rx_usecs_high;
+ } else {
+ if (rate < priv->pkt_rate_low)
+ moder_time = priv->rx_usecs_low;
+ else if (rate > priv->pkt_rate_high)
+ moder_time = priv->rx_usecs_high;
+ else
+ moder_time = (rate - priv->pkt_rate_low) *
+ (priv->rx_usecs_high - priv->rx_usecs_low) /
+ (priv->pkt_rate_high - priv->pkt_rate_low) +
+ priv->rx_usecs_low;
+ }
+ } else {
+ moder_time = priv->rx_usecs_low;
+ }
+
+ en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
+ tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period);
+
+ en_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu "
+ "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n",
+ priv->last_moder_time, moder_time, period, packets,
+ avg_pkt_size, rate);
+
+ if (moder_time != priv->last_moder_time) {
+ priv->last_moder_time = moder_time;
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ cq = &priv->rx_cq[i];
+ cq->moder_time = moder_time;
+ err = mlx4_en_set_cq_moder(priv, cq);
+ if (err) {
+ en_err(priv, "Failed modifying moderation for cq:%d\n", i);
+ break;
+ }
+ }
+ }
+
+out:
+ priv->last_moder_packets = rx_packets;
+ priv->last_moder_tx_packets = tx_packets;
+ priv->last_moder_bytes = rx_bytes;
+ priv->last_moder_jiffies = jiffies;
+}
+
+static void mlx4_en_do_get_stats(struct work_struct *work)
+{
+ struct delayed_work *delay = to_delayed_work(work);
+ struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
+ stats_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+
+ err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
+ if (err)
+ en_dbg(HW, priv, "Could not update stats\n");
+
+ mutex_lock(&mdev->state_lock);
+ if (mdev->device_up) {
+ if (priv->port_up)
+ mlx4_en_auto_moderation(priv);
+
+ queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
+ }
+ if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
+ queue_work(mdev->workqueue, &priv->mac_task);
+ mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
+ }
+ mutex_unlock(&mdev->state_lock);
+}
+
+static void mlx4_en_linkstate(struct work_struct *work)
+{
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ linkstate_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int linkstate = priv->link_state;
+
+ mutex_lock(&mdev->state_lock);
+ /* If observable port state changed set carrier state and
+ * report to system log */
+ if (priv->last_link_state != linkstate) {
+ if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
+ en_info(priv, "Link Down\n");
+ netif_carrier_off(priv->dev);
+ } else {
+ en_info(priv, "Link Up\n");
+ netif_carrier_on(priv->dev);
+ }
+ }
+ priv->last_link_state = linkstate;
+ mutex_unlock(&mdev->state_lock);
+}
+
+
+int mlx4_en_start_port(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_cq *cq;
+ struct mlx4_en_tx_ring *tx_ring;
+ int rx_index = 0;
+ int tx_index = 0;
+ int err = 0;
+ int i;
+ int j;
+ u8 mc_list[16] = {0};
+ char name[32];
+
+ if (priv->port_up) {
+ en_dbg(DRV, priv, "start port called while port already up\n");
+ return 0;
+ }
+
+ /* Calculate Rx buf size */
+ dev->mtu = min(dev->mtu, priv->max_mtu);
+ mlx4_en_calc_rx_buf(dev);
+ en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
+
+ /* Configure rx cq's and rings */
+ err = mlx4_en_activate_rx_rings(priv);
+ if (err) {
+ en_err(priv, "Failed to activate RX rings\n");
+ return err;
+ }
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ cq = &priv->rx_cq[i];
+
+ err = mlx4_en_activate_cq(priv, cq);
+ if (err) {
+ en_err(priv, "Failed activating Rx CQ\n");
+ goto cq_err;
+ }
+ for (j = 0; j < cq->size; j++)
+ cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
+ err = mlx4_en_set_cq_moder(priv, cq);
+ if (err) {
+ en_err(priv, "Failed setting cq moderation parameters");
+ mlx4_en_deactivate_cq(priv, cq);
+ goto cq_err;
+ }
+ mlx4_en_arm_cq(priv, cq);
+ priv->rx_ring[i].cqn = cq->mcq.cqn;
+ ++rx_index;
+ }
+
+ /* Set port mac number */
+ en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
+ err = mlx4_register_mac(mdev->dev, priv->port,
+ priv->mac, &priv->base_qpn, 0);
+ if (err) {
+ en_err(priv, "Failed setting port mac\n");
+ goto cq_err;
+ }
+ mdev->mac_removed[priv->port] = 0;
+
+ err = mlx4_en_config_rss_steer(priv);
+ if (err) {
+ en_err(priv, "Failed configuring rss steering\n");
+ goto mac_err;
+ }
+
+ if (mdev->dev->caps.comp_pool && !priv->tx_vector) {
+ sprintf(name , "%s-tx", priv->dev->name);
+ if (mlx4_assign_eq(mdev->dev , name, &priv->tx_vector)) {
+ mlx4_warn(mdev, "Failed Assigning an EQ to "
+ "%s_tx ,Falling back to legacy "
+ "EQ's\n", priv->dev->name);
+ }
+ }
+ /* Configure tx cq's and rings */
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ /* Configure cq */
+ cq = &priv->tx_cq[i];
+ cq->vector = priv->tx_vector;
+ err = mlx4_en_activate_cq(priv, cq);
+ if (err) {
+ en_err(priv, "Failed allocating Tx CQ\n");
+ goto tx_err;
+ }
+ err = mlx4_en_set_cq_moder(priv, cq);
+ if (err) {
+ en_err(priv, "Failed setting cq moderation parameters");
+ mlx4_en_deactivate_cq(priv, cq);
+ goto tx_err;
+ }
+ en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
+ cq->buf->wqe_index = cpu_to_be16(0xffff);
+
+ /* Configure ring */
+ tx_ring = &priv->tx_ring[i];
+ err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn);
+ if (err) {
+ en_err(priv, "Failed allocating Tx ring\n");
+ mlx4_en_deactivate_cq(priv, cq);
+ goto tx_err;
+ }
+ /* Set initial ownership of all Tx TXBBs to SW (1) */
+ for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
+ *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
+ ++tx_index;
+ }
+
+ /* Configure port */
+ err = mlx4_SET_PORT_general(mdev->dev, priv->port,
+ priv->rx_skb_size + ETH_FCS_LEN,
+ priv->prof->tx_pause,
+ priv->prof->tx_ppp,
+ priv->prof->rx_pause,
+ priv->prof->rx_ppp);
+ if (err) {
+ en_err(priv, "Failed setting port general configurations "
+ "for port %d, with error %d\n", priv->port, err);
+ goto tx_err;
+ }
+ /* Set default qp number */
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
+ if (err) {
+ en_err(priv, "Failed setting default qp numbers\n");
+ goto tx_err;
+ }
+
+ /* Init port */
+ en_dbg(HW, priv, "Initializing port\n");
+ err = mlx4_INIT_PORT(mdev->dev, priv->port);
+ if (err) {
+ en_err(priv, "Failed Initializing port\n");
+ goto tx_err;
+ }
+
+ /* Attach rx QP to bradcast address */
+ memset(&mc_list[10], 0xff, ETH_ALEN);
+ mc_list[5] = priv->port;
+ if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
+ 0, MLX4_PROT_ETH))
+ mlx4_warn(mdev, "Failed Attaching Broadcast\n");
+
+ /* Must redo promiscuous mode setup. */
+ priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
+
+ /* Schedule multicast task to populate multicast list */
+ queue_work(mdev->workqueue, &priv->mcast_task);
+
+ priv->port_up = true;
+ netif_tx_start_all_queues(dev);
+ return 0;
+
+tx_err:
+ while (tx_index--) {
+ mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
+ mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
+ }
+
+ mlx4_en_release_rss_steer(priv);
+mac_err:
+ mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
+cq_err:
+ while (rx_index--)
+ mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
+ for (i = 0; i < priv->rx_ring_num; i++)
+ mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
+
+ return err; /* need to close devices */
+}
+
+
+void mlx4_en_stop_port(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int i;
+ u8 mc_list[16] = {0};
+
+ if (!priv->port_up) {
+ en_dbg(DRV, priv, "stop port called while port already down\n");
+ return;
+ }
+
+ /* Synchronize with tx routine */
+ netif_tx_lock_bh(dev);
+ netif_tx_stop_all_queues(dev);
+ netif_tx_unlock_bh(dev);
+
+ /* Set port as not active */
+ priv->port_up = false;
+
+ /* Detach All multicasts */
+ memset(&mc_list[10], 0xff, ETH_ALEN);
+ mc_list[5] = priv->port;
+ mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
+ MLX4_PROT_ETH);
+ for (i = 0; i < priv->mc_addrs_cnt; i++) {
+ memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
+ mc_list[5] = priv->port;
+ mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
+ mc_list, MLX4_PROT_ETH);
+ }
+ mlx4_en_clear_list(dev);
+ /* Flush multicast filter */
+ mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
+
+ /* Unregister Mac address for the port */
+ mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
+ mdev->mac_removed[priv->port] = 1;
+
+ /* Free TX Rings */
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
+ mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
+ }
+ msleep(10);
+
+ for (i = 0; i < priv->tx_ring_num; i++)
+ mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
+
+ /* Free RSS qps */
+ mlx4_en_release_rss_steer(priv);
+
+ /* Free RX Rings */
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
+ while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state))
+ msleep(1);
+ mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
+ }
+
+ /* close port*/
+ mlx4_CLOSE_PORT(mdev->dev, priv->port);
+}
+
+static void mlx4_en_restart(struct work_struct *work)
+{
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ watchdog_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->dev;
+
+ en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
+
+ mutex_lock(&mdev->state_lock);
+ if (priv->port_up) {
+ mlx4_en_stop_port(dev);
+ if (mlx4_en_start_port(dev))
+ en_err(priv, "Failed restarting port %d\n", priv->port);
+ }
+ mutex_unlock(&mdev->state_lock);
+}
+
+
+static int mlx4_en_open(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int i;
+ int err = 0;
+
+ mutex_lock(&mdev->state_lock);
+
+ if (!mdev->device_up) {
+ en_err(priv, "Cannot open - device down/disabled\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ /* Reset HW statistics and performance counters */
+ if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
+ en_dbg(HW, priv, "Failed dumping statistics\n");
+
+ memset(&priv->stats, 0, sizeof(priv->stats));
+ memset(&priv->pstats, 0, sizeof(priv->pstats));
+
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ priv->tx_ring[i].bytes = 0;
+ priv->tx_ring[i].packets = 0;
+ }
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ priv->rx_ring[i].bytes = 0;
+ priv->rx_ring[i].packets = 0;
+ }
+
+ err = mlx4_en_start_port(dev);
+ if (err)
+ en_err(priv, "Failed starting port:%d\n", priv->port);
+
+out:
+ mutex_unlock(&mdev->state_lock);
+ return err;
+}
+
+
+static int mlx4_en_close(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ en_dbg(IFDOWN, priv, "Close port called\n");
+
+ mutex_lock(&mdev->state_lock);
+
+ mlx4_en_stop_port(dev);
+ netif_carrier_off(dev);
+
+ mutex_unlock(&mdev->state_lock);
+ return 0;
+}
+
+void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors)
+{
+ int i;
+
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ if (priv->tx_ring[i].tx_info)
+ mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
+ if (priv->tx_cq[i].buf)
+ mlx4_en_destroy_cq(priv, &priv->tx_cq[i], reserve_vectors);
+ }
+
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ if (priv->rx_ring[i].rx_info)
+ mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
+ if (priv->rx_cq[i].buf)
+ mlx4_en_destroy_cq(priv, &priv->rx_cq[i], reserve_vectors);
+ }
+}
+
+int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_port_profile *prof = priv->prof;
+ int i;
+ int base_tx_qpn, err;
+
+ err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn);
+ if (err) {
+ en_err(priv, "failed reserving range for TX rings\n");
+ return err;
+ }
+
+ /* Create tx Rings */
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
+ prof->tx_ring_size, i, TX))
+ goto err;
+
+ if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i,
+ prof->tx_ring_size, TXBB_SIZE))
+ goto err;
+ }
+
+ /* Create rx Rings */
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
+ prof->rx_ring_size, i, RX))
+ goto err;
+
+ if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
+ prof->rx_ring_size, priv->stride))
+ goto err;
+ }
+
+ return 0;
+
+err:
+ en_err(priv, "Failed to allocate NIC resources\n");
+ mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
+ return -ENOMEM;
+}
+
+
+void mlx4_en_destroy_netdev(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
+
+ /* Unregister device - this will close the port if it was up */
+ if (priv->registered)
+ unregister_netdev(dev);
+
+ if (priv->allocated)
+ mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
+
+ cancel_delayed_work(&priv->stats_task);
+ /* flush any pending task for this netdev */
+ flush_workqueue(mdev->workqueue);
+
+ /* Detach the netdev so tasks would not attempt to access it */
+ mutex_lock(&mdev->state_lock);
+ mdev->pndev[priv->port] = NULL;
+ mutex_unlock(&mdev->state_lock);
+
+ mlx4_en_free_resources(priv, false);
+ free_netdev(dev);
+}
+
+static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err = 0;
+
+ en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
+ dev->mtu, new_mtu);
+
+ if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
+ en_err(priv, "Bad MTU size:%d.\n", new_mtu);
+ return -EPERM;
+ }
+ dev->mtu = new_mtu;
+
+ if (netif_running(dev)) {
+ mutex_lock(&mdev->state_lock);
+ if (!mdev->device_up) {
+ /* NIC is probably restarting - let watchdog task reset
+ * the port */
+ en_dbg(DRV, priv, "Change MTU called with card down!?\n");
+ } else {
+ mlx4_en_stop_port(dev);
+ err = mlx4_en_start_port(dev);
+ if (err) {
+ en_err(priv, "Failed restarting port:%d\n",
+ priv->port);
+ queue_work(mdev->workqueue, &priv->watchdog_task);
+ }
+ }
+ mutex_unlock(&mdev->state_lock);
+ }
+ return 0;
+}
+
+static const struct net_device_ops mlx4_netdev_ops = {
+ .ndo_open = mlx4_en_open,
+ .ndo_stop = mlx4_en_close,
+ .ndo_start_xmit = mlx4_en_xmit,
+ .ndo_select_queue = mlx4_en_select_queue,
+ .ndo_get_stats = mlx4_en_get_stats,
+ .ndo_set_rx_mode = mlx4_en_set_multicast,
+ .ndo_set_mac_address = mlx4_en_set_mac,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = mlx4_en_change_mtu,
+ .ndo_tx_timeout = mlx4_en_tx_timeout,
+ .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = mlx4_en_netpoll,
+#endif
+};
+
+int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ struct mlx4_en_port_profile *prof)
+{
+ struct net_device *dev;
+ struct mlx4_en_priv *priv;
+ int i;
+ int err;
+
+ dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
+ prof->tx_ring_num, prof->rx_ring_num);
+ if (dev == NULL) {
+ mlx4_err(mdev, "Net device allocation failed\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
+ dev->dev_id = port - 1;
+
+ /*
+ * Initialize driver private data
+ */
+
+ priv = netdev_priv(dev);
+ memset(priv, 0, sizeof(struct mlx4_en_priv));
+ priv->dev = dev;
+ priv->mdev = mdev;
+ priv->prof = prof;
+ priv->port = port;
+ priv->port_up = false;
+ priv->flags = prof->flags;
+ priv->tx_ring_num = prof->tx_ring_num;
+ priv->rx_ring_num = prof->rx_ring_num;
+ priv->mac_index = -1;
+ priv->msg_enable = MLX4_EN_MSG_LEVEL;
+ spin_lock_init(&priv->stats_lock);
+ INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
+ INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
+ INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
+ INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
+ INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
+
+ /* Query for default mac and max mtu */
+ priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
+ priv->mac = mdev->dev->caps.def_mac[priv->port];
+ if (ILLEGAL_MAC(priv->mac)) {
+ en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
+ priv->port, priv->mac);
+ err = -EINVAL;
+ goto out;
+ }
+
+ priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+ DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
+ err = mlx4_en_alloc_resources(priv);
+ if (err)
+ goto out;
+
+ /* Allocate page for receive rings */
+ err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
+ MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
+ if (err) {
+ en_err(priv, "Failed to allocate page for rx qps\n");
+ goto out;
+ }
+ priv->allocated = 1;
+
+ /*
+ * Initialize netdev entry points
+ */
+ dev->netdev_ops = &mlx4_netdev_ops;
+ dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
+ netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
+
+ SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
+
+ /* Set defualt MAC */
+ dev->addr_len = ETH_ALEN;
+ for (i = 0; i < ETH_ALEN; i++) {
+ dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
+ dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
+ }
+
+ /*
+ * Set driver features
+ */
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ if (mdev->LSO_support)
+ dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+
+ dev->vlan_features = dev->hw_features;
+
+ dev->hw_features |= NETIF_F_RXCSUM;
+ dev->features = dev->hw_features | NETIF_F_HIGHDMA |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
+ mdev->pndev[port] = dev;
+
+ netif_carrier_off(dev);
+ err = register_netdev(dev);
+ if (err) {
+ en_err(priv, "Netdev registration failed for port %d\n", port);
+ goto out;
+ }
+
+ en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
+ en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
+
+ /* Configure port */
+ err = mlx4_SET_PORT_general(mdev->dev, priv->port,
+ MLX4_EN_MIN_MTU,
+ 0, 0, 0, 0);
+ if (err) {
+ en_err(priv, "Failed setting port general configurations "
+ "for port %d, with error %d\n", priv->port, err);
+ goto out;
+ }
+
+ /* Init port */
+ en_warn(priv, "Initializing port\n");
+ err = mlx4_INIT_PORT(mdev->dev, priv->port);
+ if (err) {
+ en_err(priv, "Failed Initializing port\n");
+ goto out;
+ }
+ priv->registered = 1;
+ mlx4_en_set_default_moderation(priv);
+ queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
+ return 0;
+
+out:
+ mlx4_en_destroy_netdev(dev);
+ return err;
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
new file mode 100644
index 000000000000..5ada5b469112
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+
+#include <linux/if_vlan.h>
+
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/cmd.h>
+
+#include "en_port.h"
+#include "mlx4_en.h"
+
+
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
+ u64 mac, u64 clear, u8 mode)
+{
+ return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
+ MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
+}
+
+int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_vlan_fltr_mbox *filter;
+ int i;
+ int j;
+ int index = 0;
+ u32 entry;
+ int err = 0;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ filter = mailbox->buf;
+ memset(filter, 0, sizeof(*filter));
+ for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
+ entry = 0;
+ for (j = 0; j < 32; j++)
+ if (test_bit(index++, priv->active_vlans))
+ entry |= 1 << j;
+ filter->entry[i] = cpu_to_be32(entry);
+ }
+ err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR,
+ MLX4_CMD_TIME_CLASS_B);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+
+int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
+ u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_general_context *context;
+ int err;
+ u32 in_mod;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ memset(context, 0, sizeof *context);
+
+ context->flags = SET_PORT_GEN_ALL_VALID;
+ context->mtu = cpu_to_be16(mtu);
+ context->pptx = (pptx * (!pfctx)) << 7;
+ context->pfctx = pfctx;
+ context->pprx = (pprx * (!pfcrx)) << 7;
+ context->pfcrx = pfcrx;
+
+ in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
+ u8 promisc)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_rqp_calc_context *context;
+ int err;
+ u32 in_mod;
+ u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
+ MCAST_DIRECT : MCAST_DEFAULT;
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER &&
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
+ return 0;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ memset(context, 0, sizeof *context);
+
+ context->base_qpn = cpu_to_be32(base_qpn);
+ context->n_mac = 0x2;
+ context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
+ base_qpn);
+ context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
+ base_qpn);
+ context->intra_no_vlan = 0;
+ context->no_vlan = MLX4_NO_VLAN_IDX;
+ context->intra_vlan_miss = 0;
+ context->vlan_miss = MLX4_VLAN_MISS_IDX;
+
+ in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
+{
+ struct mlx4_en_query_port_context *qport_context;
+ struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
+ struct mlx4_en_port_state *state = &priv->port_state;
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ memset(mailbox->buf, 0, sizeof(*qport_context));
+ err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
+ MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B);
+ if (err)
+ goto out;
+ qport_context = mailbox->buf;
+
+ /* This command is always accessed from Ethtool context
+ * already synchronized, no need in locking */
+ state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK);
+ if ((qport_context->link_speed & MLX4_EN_SPEED_MASK) ==
+ MLX4_EN_1G_SPEED)
+ state->link_speed = 1000;
+ else
+ state->link_speed = 10000;
+ state->transciver = qport_context->transceiver;
+
+out:
+ mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+ return err;
+}
+
+int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
+{
+ struct mlx4_en_stat_out_mbox *mlx4_en_stats;
+ struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
+ struct net_device_stats *stats = &priv->stats;
+ struct mlx4_cmd_mailbox *mailbox;
+ u64 in_mod = reset << 8 | port;
+ int err;
+ int i;
+
+ mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
+ err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
+ MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B);
+ if (err)
+ goto out;
+
+ mlx4_en_stats = mailbox->buf;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ stats->rx_packets = 0;
+ stats->rx_bytes = 0;
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ stats->rx_packets += priv->rx_ring[i].packets;
+ stats->rx_bytes += priv->rx_ring[i].bytes;
+ }
+ stats->tx_packets = 0;
+ stats->tx_bytes = 0;
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ stats->tx_packets += priv->tx_ring[i].packets;
+ stats->tx_bytes += priv->tx_ring[i].bytes;
+ }
+
+ stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
+ be32_to_cpu(mlx4_en_stats->RdropLength) +
+ be32_to_cpu(mlx4_en_stats->RJBBR) +
+ be32_to_cpu(mlx4_en_stats->RCRC) +
+ be32_to_cpu(mlx4_en_stats->RRUNT);
+ stats->tx_errors = be32_to_cpu(mlx4_en_stats->TDROP);
+ stats->multicast = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) +
+ be64_to_cpu(mlx4_en_stats->MCAST_prio_1) +
+ be64_to_cpu(mlx4_en_stats->MCAST_prio_2) +
+ be64_to_cpu(mlx4_en_stats->MCAST_prio_3) +
+ be64_to_cpu(mlx4_en_stats->MCAST_prio_4) +
+ be64_to_cpu(mlx4_en_stats->MCAST_prio_5) +
+ be64_to_cpu(mlx4_en_stats->MCAST_prio_6) +
+ be64_to_cpu(mlx4_en_stats->MCAST_prio_7) +
+ be64_to_cpu(mlx4_en_stats->MCAST_novlan);
+ stats->collisions = 0;
+ stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
+ stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+ stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
+ stats->rx_frame_errors = 0;
+ stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+ stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+ stats->tx_aborted_errors = 0;
+ stats->tx_carrier_errors = 0;
+ stats->tx_fifo_errors = 0;
+ stats->tx_heartbeat_errors = 0;
+ stats->tx_window_errors = 0;
+
+ priv->pkstats.broadcast =
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_novlan);
+ priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
+ priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
+ priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
+ priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
+ priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
+ priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
+ priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
+ priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
+ priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
+ priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
+ priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
+ priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
+ priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
+ priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
+ priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
+ priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
+ spin_unlock_bh(&priv->stats_lock);
+
+out:
+ mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+ return err;
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h
new file mode 100644
index 000000000000..e3d73e41c567
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h
@@ -0,0 +1,594 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _MLX4_EN_PORT_H_
+#define _MLX4_EN_PORT_H_
+
+
+#define SET_PORT_GEN_ALL_VALID 0x7
+#define SET_PORT_PROMISC_SHIFT 31
+#define SET_PORT_MC_PROMISC_SHIFT 30
+
+enum {
+ MLX4_CMD_SET_VLAN_FLTR = 0x47,
+ MLX4_CMD_SET_MCAST_FLTR = 0x48,
+ MLX4_CMD_DUMP_ETH_STATS = 0x49,
+};
+
+enum {
+ MCAST_DIRECT_ONLY = 0,
+ MCAST_DIRECT = 1,
+ MCAST_DEFAULT = 2
+};
+
+struct mlx4_set_port_general_context {
+ u8 reserved[3];
+ u8 flags;
+ u16 reserved2;
+ __be16 mtu;
+ u8 pptx;
+ u8 pfctx;
+ u16 reserved3;
+ u8 pprx;
+ u8 pfcrx;
+ u16 reserved4;
+};
+
+struct mlx4_set_port_rqp_calc_context {
+ __be32 base_qpn;
+ u8 rererved;
+ u8 n_mac;
+ u8 n_vlan;
+ u8 n_prio;
+ u8 reserved2[3];
+ u8 mac_miss;
+ u8 intra_no_vlan;
+ u8 no_vlan;
+ u8 intra_vlan_miss;
+ u8 vlan_miss;
+ u8 reserved3[3];
+ u8 no_vlan_prio;
+ __be32 promisc;
+ __be32 mcast;
+};
+
+#define VLAN_FLTR_SIZE 128
+struct mlx4_set_vlan_fltr_mbox {
+ __be32 entry[VLAN_FLTR_SIZE];
+};
+
+
+enum {
+ MLX4_MCAST_CONFIG = 0,
+ MLX4_MCAST_DISABLE = 1,
+ MLX4_MCAST_ENABLE = 2,
+};
+
+struct mlx4_en_query_port_context {
+ u8 link_up;
+#define MLX4_EN_LINK_UP_MASK 0x80
+ u8 reserved;
+ __be16 mtu;
+ u8 reserved2;
+ u8 link_speed;
+#define MLX4_EN_SPEED_MASK 0x3
+#define MLX4_EN_1G_SPEED 0x2
+ u16 reserved3[5];
+ __be64 mac;
+ u8 transceiver;
+};
+
+
+struct mlx4_en_stat_out_mbox {
+ /* Received frames with a length of 64 octets */
+ __be64 R64_prio_0;
+ __be64 R64_prio_1;
+ __be64 R64_prio_2;
+ __be64 R64_prio_3;
+ __be64 R64_prio_4;
+ __be64 R64_prio_5;
+ __be64 R64_prio_6;
+ __be64 R64_prio_7;
+ __be64 R64_novlan;
+ /* Received frames with a length of 127 octets */
+ __be64 R127_prio_0;
+ __be64 R127_prio_1;
+ __be64 R127_prio_2;
+ __be64 R127_prio_3;
+ __be64 R127_prio_4;
+ __be64 R127_prio_5;
+ __be64 R127_prio_6;
+ __be64 R127_prio_7;
+ __be64 R127_novlan;
+ /* Received frames with a length of 255 octets */
+ __be64 R255_prio_0;
+ __be64 R255_prio_1;
+ __be64 R255_prio_2;
+ __be64 R255_prio_3;
+ __be64 R255_prio_4;
+ __be64 R255_prio_5;
+ __be64 R255_prio_6;
+ __be64 R255_prio_7;
+ __be64 R255_novlan;
+ /* Received frames with a length of 511 octets */
+ __be64 R511_prio_0;
+ __be64 R511_prio_1;
+ __be64 R511_prio_2;
+ __be64 R511_prio_3;
+ __be64 R511_prio_4;
+ __be64 R511_prio_5;
+ __be64 R511_prio_6;
+ __be64 R511_prio_7;
+ __be64 R511_novlan;
+ /* Received frames with a length of 1023 octets */
+ __be64 R1023_prio_0;
+ __be64 R1023_prio_1;
+ __be64 R1023_prio_2;
+ __be64 R1023_prio_3;
+ __be64 R1023_prio_4;
+ __be64 R1023_prio_5;
+ __be64 R1023_prio_6;
+ __be64 R1023_prio_7;
+ __be64 R1023_novlan;
+ /* Received frames with a length of 1518 octets */
+ __be64 R1518_prio_0;
+ __be64 R1518_prio_1;
+ __be64 R1518_prio_2;
+ __be64 R1518_prio_3;
+ __be64 R1518_prio_4;
+ __be64 R1518_prio_5;
+ __be64 R1518_prio_6;
+ __be64 R1518_prio_7;
+ __be64 R1518_novlan;
+ /* Received frames with a length of 1522 octets */
+ __be64 R1522_prio_0;
+ __be64 R1522_prio_1;
+ __be64 R1522_prio_2;
+ __be64 R1522_prio_3;
+ __be64 R1522_prio_4;
+ __be64 R1522_prio_5;
+ __be64 R1522_prio_6;
+ __be64 R1522_prio_7;
+ __be64 R1522_novlan;
+ /* Received frames with a length of 1548 octets */
+ __be64 R1548_prio_0;
+ __be64 R1548_prio_1;
+ __be64 R1548_prio_2;
+ __be64 R1548_prio_3;
+ __be64 R1548_prio_4;
+ __be64 R1548_prio_5;
+ __be64 R1548_prio_6;
+ __be64 R1548_prio_7;
+ __be64 R1548_novlan;
+ /* Received frames with a length of 1548 < octets < MTU */
+ __be64 R2MTU_prio_0;
+ __be64 R2MTU_prio_1;
+ __be64 R2MTU_prio_2;
+ __be64 R2MTU_prio_3;
+ __be64 R2MTU_prio_4;
+ __be64 R2MTU_prio_5;
+ __be64 R2MTU_prio_6;
+ __be64 R2MTU_prio_7;
+ __be64 R2MTU_novlan;
+ /* Received frames with a length of MTU< octets and good CRC */
+ __be64 RGIANT_prio_0;
+ __be64 RGIANT_prio_1;
+ __be64 RGIANT_prio_2;
+ __be64 RGIANT_prio_3;
+ __be64 RGIANT_prio_4;
+ __be64 RGIANT_prio_5;
+ __be64 RGIANT_prio_6;
+ __be64 RGIANT_prio_7;
+ __be64 RGIANT_novlan;
+ /* Received broadcast frames with good CRC */
+ __be64 RBCAST_prio_0;
+ __be64 RBCAST_prio_1;
+ __be64 RBCAST_prio_2;
+ __be64 RBCAST_prio_3;
+ __be64 RBCAST_prio_4;
+ __be64 RBCAST_prio_5;
+ __be64 RBCAST_prio_6;
+ __be64 RBCAST_prio_7;
+ __be64 RBCAST_novlan;
+ /* Received multicast frames with good CRC */
+ __be64 MCAST_prio_0;
+ __be64 MCAST_prio_1;
+ __be64 MCAST_prio_2;
+ __be64 MCAST_prio_3;
+ __be64 MCAST_prio_4;
+ __be64 MCAST_prio_5;
+ __be64 MCAST_prio_6;
+ __be64 MCAST_prio_7;
+ __be64 MCAST_novlan;
+ /* Received unicast not short or GIANT frames with good CRC */
+ __be64 RTOTG_prio_0;
+ __be64 RTOTG_prio_1;
+ __be64 RTOTG_prio_2;
+ __be64 RTOTG_prio_3;
+ __be64 RTOTG_prio_4;
+ __be64 RTOTG_prio_5;
+ __be64 RTOTG_prio_6;
+ __be64 RTOTG_prio_7;
+ __be64 RTOTG_novlan;
+
+ /* Count of total octets of received frames, includes framing characters */
+ __be64 RTTLOCT_prio_0;
+ /* Count of total octets of received frames, not including framing
+ characters */
+ __be64 RTTLOCT_NOFRM_prio_0;
+ /* Count of Total number of octets received
+ (only for frames without errors) */
+ __be64 ROCT_prio_0;
+
+ __be64 RTTLOCT_prio_1;
+ __be64 RTTLOCT_NOFRM_prio_1;
+ __be64 ROCT_prio_1;
+
+ __be64 RTTLOCT_prio_2;
+ __be64 RTTLOCT_NOFRM_prio_2;
+ __be64 ROCT_prio_2;
+
+ __be64 RTTLOCT_prio_3;
+ __be64 RTTLOCT_NOFRM_prio_3;
+ __be64 ROCT_prio_3;
+
+ __be64 RTTLOCT_prio_4;
+ __be64 RTTLOCT_NOFRM_prio_4;
+ __be64 ROCT_prio_4;
+
+ __be64 RTTLOCT_prio_5;
+ __be64 RTTLOCT_NOFRM_prio_5;
+ __be64 ROCT_prio_5;
+
+ __be64 RTTLOCT_prio_6;
+ __be64 RTTLOCT_NOFRM_prio_6;
+ __be64 ROCT_prio_6;
+
+ __be64 RTTLOCT_prio_7;
+ __be64 RTTLOCT_NOFRM_prio_7;
+ __be64 ROCT_prio_7;
+
+ __be64 RTTLOCT_novlan;
+ __be64 RTTLOCT_NOFRM_novlan;
+ __be64 ROCT_novlan;
+
+ /* Count of Total received frames including bad frames */
+ __be64 RTOT_prio_0;
+ /* Count of Total number of received frames with 802.1Q encapsulation */
+ __be64 R1Q_prio_0;
+ __be64 reserved1;
+
+ __be64 RTOT_prio_1;
+ __be64 R1Q_prio_1;
+ __be64 reserved2;
+
+ __be64 RTOT_prio_2;
+ __be64 R1Q_prio_2;
+ __be64 reserved3;
+
+ __be64 RTOT_prio_3;
+ __be64 R1Q_prio_3;
+ __be64 reserved4;
+
+ __be64 RTOT_prio_4;
+ __be64 R1Q_prio_4;
+ __be64 reserved5;
+
+ __be64 RTOT_prio_5;
+ __be64 R1Q_prio_5;
+ __be64 reserved6;
+
+ __be64 RTOT_prio_6;
+ __be64 R1Q_prio_6;
+ __be64 reserved7;
+
+ __be64 RTOT_prio_7;
+ __be64 R1Q_prio_7;
+ __be64 reserved8;
+
+ __be64 RTOT_novlan;
+ __be64 R1Q_novlan;
+ __be64 reserved9;
+
+ /* Total number of Successfully Received Control Frames */
+ __be64 RCNTL;
+ __be64 reserved10;
+ __be64 reserved11;
+ __be64 reserved12;
+ /* Count of received frames with a length/type field value between 46
+ (42 for VLANtagged frames) and 1500 (also 1500 for VLAN-tagged frames),
+ inclusive */
+ __be64 RInRangeLengthErr;
+ /* Count of received frames with length/type field between 1501 and 1535
+ decimal, inclusive */
+ __be64 ROutRangeLengthErr;
+ /* Count of received frames that are longer than max allowed size for
+ 802.3 frames (1518/1522) */
+ __be64 RFrmTooLong;
+ /* Count frames received with PCS error */
+ __be64 PCS;
+
+ /* Transmit frames with a length of 64 octets */
+ __be64 T64_prio_0;
+ __be64 T64_prio_1;
+ __be64 T64_prio_2;
+ __be64 T64_prio_3;
+ __be64 T64_prio_4;
+ __be64 T64_prio_5;
+ __be64 T64_prio_6;
+ __be64 T64_prio_7;
+ __be64 T64_novlan;
+ __be64 T64_loopbk;
+ /* Transmit frames with a length of 65 to 127 octets. */
+ __be64 T127_prio_0;
+ __be64 T127_prio_1;
+ __be64 T127_prio_2;
+ __be64 T127_prio_3;
+ __be64 T127_prio_4;
+ __be64 T127_prio_5;
+ __be64 T127_prio_6;
+ __be64 T127_prio_7;
+ __be64 T127_novlan;
+ __be64 T127_loopbk;
+ /* Transmit frames with a length of 128 to 255 octets */
+ __be64 T255_prio_0;
+ __be64 T255_prio_1;
+ __be64 T255_prio_2;
+ __be64 T255_prio_3;
+ __be64 T255_prio_4;
+ __be64 T255_prio_5;
+ __be64 T255_prio_6;
+ __be64 T255_prio_7;
+ __be64 T255_novlan;
+ __be64 T255_loopbk;
+ /* Transmit frames with a length of 256 to 511 octets */
+ __be64 T511_prio_0;
+ __be64 T511_prio_1;
+ __be64 T511_prio_2;
+ __be64 T511_prio_3;
+ __be64 T511_prio_4;
+ __be64 T511_prio_5;
+ __be64 T511_prio_6;
+ __be64 T511_prio_7;
+ __be64 T511_novlan;
+ __be64 T511_loopbk;
+ /* Transmit frames with a length of 512 to 1023 octets */
+ __be64 T1023_prio_0;
+ __be64 T1023_prio_1;
+ __be64 T1023_prio_2;
+ __be64 T1023_prio_3;
+ __be64 T1023_prio_4;
+ __be64 T1023_prio_5;
+ __be64 T1023_prio_6;
+ __be64 T1023_prio_7;
+ __be64 T1023_novlan;
+ __be64 T1023_loopbk;
+ /* Transmit frames with a length of 1024 to 1518 octets */
+ __be64 T1518_prio_0;
+ __be64 T1518_prio_1;
+ __be64 T1518_prio_2;
+ __be64 T1518_prio_3;
+ __be64 T1518_prio_4;
+ __be64 T1518_prio_5;
+ __be64 T1518_prio_6;
+ __be64 T1518_prio_7;
+ __be64 T1518_novlan;
+ __be64 T1518_loopbk;
+ /* Counts transmit frames with a length of 1519 to 1522 bytes */
+ __be64 T1522_prio_0;
+ __be64 T1522_prio_1;
+ __be64 T1522_prio_2;
+ __be64 T1522_prio_3;
+ __be64 T1522_prio_4;
+ __be64 T1522_prio_5;
+ __be64 T1522_prio_6;
+ __be64 T1522_prio_7;
+ __be64 T1522_novlan;
+ __be64 T1522_loopbk;
+ /* Transmit frames with a length of 1523 to 1548 octets */
+ __be64 T1548_prio_0;
+ __be64 T1548_prio_1;
+ __be64 T1548_prio_2;
+ __be64 T1548_prio_3;
+ __be64 T1548_prio_4;
+ __be64 T1548_prio_5;
+ __be64 T1548_prio_6;
+ __be64 T1548_prio_7;
+ __be64 T1548_novlan;
+ __be64 T1548_loopbk;
+ /* Counts transmit frames with a length of 1549 to MTU bytes */
+ __be64 T2MTU_prio_0;
+ __be64 T2MTU_prio_1;
+ __be64 T2MTU_prio_2;
+ __be64 T2MTU_prio_3;
+ __be64 T2MTU_prio_4;
+ __be64 T2MTU_prio_5;
+ __be64 T2MTU_prio_6;
+ __be64 T2MTU_prio_7;
+ __be64 T2MTU_novlan;
+ __be64 T2MTU_loopbk;
+ /* Transmit frames with a length greater than MTU octets and a good CRC. */
+ __be64 TGIANT_prio_0;
+ __be64 TGIANT_prio_1;
+ __be64 TGIANT_prio_2;
+ __be64 TGIANT_prio_3;
+ __be64 TGIANT_prio_4;
+ __be64 TGIANT_prio_5;
+ __be64 TGIANT_prio_6;
+ __be64 TGIANT_prio_7;
+ __be64 TGIANT_novlan;
+ __be64 TGIANT_loopbk;
+ /* Transmit broadcast frames with a good CRC */
+ __be64 TBCAST_prio_0;
+ __be64 TBCAST_prio_1;
+ __be64 TBCAST_prio_2;
+ __be64 TBCAST_prio_3;
+ __be64 TBCAST_prio_4;
+ __be64 TBCAST_prio_5;
+ __be64 TBCAST_prio_6;
+ __be64 TBCAST_prio_7;
+ __be64 TBCAST_novlan;
+ __be64 TBCAST_loopbk;
+ /* Transmit multicast frames with a good CRC */
+ __be64 TMCAST_prio_0;
+ __be64 TMCAST_prio_1;
+ __be64 TMCAST_prio_2;
+ __be64 TMCAST_prio_3;
+ __be64 TMCAST_prio_4;
+ __be64 TMCAST_prio_5;
+ __be64 TMCAST_prio_6;
+ __be64 TMCAST_prio_7;
+ __be64 TMCAST_novlan;
+ __be64 TMCAST_loopbk;
+ /* Transmit good frames that are neither broadcast nor multicast */
+ __be64 TTOTG_prio_0;
+ __be64 TTOTG_prio_1;
+ __be64 TTOTG_prio_2;
+ __be64 TTOTG_prio_3;
+ __be64 TTOTG_prio_4;
+ __be64 TTOTG_prio_5;
+ __be64 TTOTG_prio_6;
+ __be64 TTOTG_prio_7;
+ __be64 TTOTG_novlan;
+ __be64 TTOTG_loopbk;
+
+ /* total octets of transmitted frames, including framing characters */
+ __be64 TTTLOCT_prio_0;
+ /* total octets of transmitted frames, not including framing characters */
+ __be64 TTTLOCT_NOFRM_prio_0;
+ /* ifOutOctets */
+ __be64 TOCT_prio_0;
+
+ __be64 TTTLOCT_prio_1;
+ __be64 TTTLOCT_NOFRM_prio_1;
+ __be64 TOCT_prio_1;
+
+ __be64 TTTLOCT_prio_2;
+ __be64 TTTLOCT_NOFRM_prio_2;
+ __be64 TOCT_prio_2;
+
+ __be64 TTTLOCT_prio_3;
+ __be64 TTTLOCT_NOFRM_prio_3;
+ __be64 TOCT_prio_3;
+
+ __be64 TTTLOCT_prio_4;
+ __be64 TTTLOCT_NOFRM_prio_4;
+ __be64 TOCT_prio_4;
+
+ __be64 TTTLOCT_prio_5;
+ __be64 TTTLOCT_NOFRM_prio_5;
+ __be64 TOCT_prio_5;
+
+ __be64 TTTLOCT_prio_6;
+ __be64 TTTLOCT_NOFRM_prio_6;
+ __be64 TOCT_prio_6;
+
+ __be64 TTTLOCT_prio_7;
+ __be64 TTTLOCT_NOFRM_prio_7;
+ __be64 TOCT_prio_7;
+
+ __be64 TTTLOCT_novlan;
+ __be64 TTTLOCT_NOFRM_novlan;
+ __be64 TOCT_novlan;
+
+ __be64 TTTLOCT_loopbk;
+ __be64 TTTLOCT_NOFRM_loopbk;
+ __be64 TOCT_loopbk;
+
+ /* Total frames transmitted with a good CRC that are not aborted */
+ __be64 TTOT_prio_0;
+ /* Total number of frames transmitted with 802.1Q encapsulation */
+ __be64 T1Q_prio_0;
+ __be64 reserved13;
+
+ __be64 TTOT_prio_1;
+ __be64 T1Q_prio_1;
+ __be64 reserved14;
+
+ __be64 TTOT_prio_2;
+ __be64 T1Q_prio_2;
+ __be64 reserved15;
+
+ __be64 TTOT_prio_3;
+ __be64 T1Q_prio_3;
+ __be64 reserved16;
+
+ __be64 TTOT_prio_4;
+ __be64 T1Q_prio_4;
+ __be64 reserved17;
+
+ __be64 TTOT_prio_5;
+ __be64 T1Q_prio_5;
+ __be64 reserved18;
+
+ __be64 TTOT_prio_6;
+ __be64 T1Q_prio_6;
+ __be64 reserved19;
+
+ __be64 TTOT_prio_7;
+ __be64 T1Q_prio_7;
+ __be64 reserved20;
+
+ __be64 TTOT_novlan;
+ __be64 T1Q_novlan;
+ __be64 reserved21;
+
+ __be64 TTOT_loopbk;
+ __be64 T1Q_loopbk;
+ __be64 reserved22;
+
+ /* Received frames with a length greater than MTU octets and a bad CRC */
+ __be32 RJBBR;
+ /* Received frames with a bad CRC that are not runts, jabbers,
+ or alignment errors */
+ __be32 RCRC;
+ /* Received frames with SFD with a length of less than 64 octets and a
+ bad CRC */
+ __be32 RRUNT;
+ /* Received frames with a length less than 64 octets and a good CRC */
+ __be32 RSHORT;
+ /* Total Number of Received Packets Dropped */
+ __be32 RDROP;
+ /* Drop due to overflow */
+ __be32 RdropOvflw;
+ /* Drop due to overflow */
+ __be32 RdropLength;
+ /* Total of good frames. Does not include frames received with
+ frame-too-long, FCS, or length errors */
+ __be32 RTOTFRMS;
+ /* Total dropped Xmited packets */
+ __be32 TDROP;
+};
+
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
new file mode 100644
index 000000000000..0dfb4ec8a9dd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mlx4/qp.h>
+
+#include "mlx4_en.h"
+
+void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
+ int is_tx, int rss, int qpn, int cqn,
+ struct mlx4_qp_context *context)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ memset(context, 0, sizeof *context);
+ context->flags = cpu_to_be32(7 << 16 | rss << 13);
+ context->pd = cpu_to_be32(mdev->priv_pdn);
+ context->mtu_msgmax = 0xff;
+ if (!is_tx && !rss)
+ context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
+ if (is_tx)
+ context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
+ else
+ context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
+ context->usr_page = cpu_to_be32(mdev->priv_uar.index);
+ context->local_qpn = cpu_to_be32(qpn);
+ context->pri_path.ackto = 1 & 0x07;
+ context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
+ context->pri_path.counter_index = 0xff;
+ context->cqn_send = cpu_to_be32(cqn);
+ context->cqn_recv = cpu_to_be32(cqn);
+ context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
+}
+
+
+int mlx4_en_map_buffer(struct mlx4_buf *buf)
+{
+ struct page **pages;
+ int i;
+
+ if (BITS_PER_LONG == 64 || buf->nbufs == 1)
+ return 0;
+
+ pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ for (i = 0; i < buf->nbufs; ++i)
+ pages[i] = virt_to_page(buf->page_list[i].buf);
+
+ buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
+ kfree(pages);
+ if (!buf->direct.buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
+{
+ if (BITS_PER_LONG == 64 || buf->nbufs == 1)
+ return;
+
+ vunmap(buf->direct.buf);
+}
+
+void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
+{
+ return;
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
new file mode 100644
index 000000000000..37cc9e5c56be
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -0,0 +1,918 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx4/cq.h>
+#include <linux/slab.h>
+#include <linux/mlx4/qp.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
+
+#include "mlx4_en.h"
+
+
+static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_desc *rx_desc,
+ struct skb_frag_struct *skb_frags,
+ struct mlx4_en_rx_alloc *ring_alloc,
+ int i)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
+ struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i];
+ struct page *page;
+ dma_addr_t dma;
+
+ if (page_alloc->offset == frag_info->last_offset) {
+ /* Allocate new page */
+ page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER);
+ if (!page)
+ return -ENOMEM;
+
+ skb_frags[i].page = page_alloc->page;
+ skb_frags[i].page_offset = page_alloc->offset;
+ page_alloc->page = page;
+ page_alloc->offset = frag_info->frag_align;
+ } else {
+ page = page_alloc->page;
+ get_page(page);
+
+ skb_frags[i].page = page;
+ skb_frags[i].page_offset = page_alloc->offset;
+ page_alloc->offset += frag_info->frag_stride;
+ }
+ dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) +
+ skb_frags[i].page_offset, frag_info->frag_size,
+ PCI_DMA_FROMDEVICE);
+ rx_desc->data[i].addr = cpu_to_be64(dma);
+ return 0;
+}
+
+static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring)
+{
+ struct mlx4_en_rx_alloc *page_alloc;
+ int i;
+
+ for (i = 0; i < priv->num_frags; i++) {
+ page_alloc = &ring->page_alloc[i];
+ page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
+ MLX4_EN_ALLOC_ORDER);
+ if (!page_alloc->page)
+ goto out;
+
+ page_alloc->offset = priv->frag_info[i].frag_align;
+ en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
+ i, page_alloc->page);
+ }
+ return 0;
+
+out:
+ while (i--) {
+ page_alloc = &ring->page_alloc[i];
+ put_page(page_alloc->page);
+ page_alloc->page = NULL;
+ }
+ return -ENOMEM;
+}
+
+static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring)
+{
+ struct mlx4_en_rx_alloc *page_alloc;
+ int i;
+
+ for (i = 0; i < priv->num_frags; i++) {
+ page_alloc = &ring->page_alloc[i];
+ en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
+ i, page_count(page_alloc->page));
+
+ put_page(page_alloc->page);
+ page_alloc->page = NULL;
+ }
+}
+
+
+static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring, int index)
+{
+ struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
+ struct skb_frag_struct *skb_frags = ring->rx_info +
+ (index << priv->log_rx_info);
+ int possible_frags;
+ int i;
+
+ /* Set size and memtype fields */
+ for (i = 0; i < priv->num_frags; i++) {
+ skb_frags[i].size = priv->frag_info[i].frag_size;
+ rx_desc->data[i].byte_count =
+ cpu_to_be32(priv->frag_info[i].frag_size);
+ rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
+ }
+
+ /* If the number of used fragments does not fill up the ring stride,
+ * remaining (unused) fragments must be padded with null address/size
+ * and a special memory key */
+ possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
+ for (i = priv->num_frags; i < possible_frags; i++) {
+ rx_desc->data[i].byte_count = 0;
+ rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
+ rx_desc->data[i].addr = 0;
+ }
+}
+
+
+static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring, int index)
+{
+ struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
+ struct skb_frag_struct *skb_frags = ring->rx_info +
+ (index << priv->log_rx_info);
+ int i;
+
+ for (i = 0; i < priv->num_frags; i++)
+ if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i))
+ goto err;
+
+ return 0;
+
+err:
+ while (i--)
+ put_page(skb_frags[i].page);
+ return -ENOMEM;
+}
+
+static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
+{
+ *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
+}
+
+static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring,
+ int index)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct skb_frag_struct *skb_frags;
+ struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
+ dma_addr_t dma;
+ int nr;
+
+ skb_frags = ring->rx_info + (index << priv->log_rx_info);
+ for (nr = 0; nr < priv->num_frags; nr++) {
+ en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
+ dma = be64_to_cpu(rx_desc->data[nr].addr);
+
+ en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma);
+ pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
+ PCI_DMA_FROMDEVICE);
+ put_page(skb_frags[nr].page);
+ }
+}
+
+static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_rx_ring *ring;
+ int ring_ind;
+ int buf_ind;
+ int new_size;
+
+ for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
+ for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
+ ring = &priv->rx_ring[ring_ind];
+
+ if (mlx4_en_prepare_rx_desc(priv, ring,
+ ring->actual_size)) {
+ if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
+ en_err(priv, "Failed to allocate "
+ "enough rx buffers\n");
+ return -ENOMEM;
+ } else {
+ new_size = rounddown_pow_of_two(ring->actual_size);
+ en_warn(priv, "Only %d buffers allocated "
+ "reducing ring size to %d",
+ ring->actual_size, new_size);
+ goto reduce_rings;
+ }
+ }
+ ring->actual_size++;
+ ring->prod++;
+ }
+ }
+ return 0;
+
+reduce_rings:
+ for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
+ ring = &priv->rx_ring[ring_ind];
+ while (ring->actual_size > new_size) {
+ ring->actual_size--;
+ ring->prod--;
+ mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
+ }
+ }
+
+ return 0;
+}
+
+static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring)
+{
+ int index;
+
+ en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
+ ring->cons, ring->prod);
+
+ /* Unmap and free Rx buffers */
+ BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
+ while (ring->cons != ring->prod) {
+ index = ring->cons & ring->size_mask;
+ en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
+ mlx4_en_free_rx_desc(priv, ring, index);
+ ++ring->cons;
+ }
+}
+
+int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+ int tmp;
+
+
+ ring->prod = 0;
+ ring->cons = 0;
+ ring->size = size;
+ ring->size_mask = size - 1;
+ ring->stride = stride;
+ ring->log_stride = ffs(ring->stride) - 1;
+ ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
+
+ tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
+ sizeof(struct skb_frag_struct));
+ ring->rx_info = vmalloc(tmp);
+ if (!ring->rx_info) {
+ en_err(priv, "Failed allocating rx_info ring\n");
+ return -ENOMEM;
+ }
+ en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
+ ring->rx_info, tmp);
+
+ err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
+ ring->buf_size, 2 * PAGE_SIZE);
+ if (err)
+ goto err_ring;
+
+ err = mlx4_en_map_buffer(&ring->wqres.buf);
+ if (err) {
+ en_err(priv, "Failed to map RX buffer\n");
+ goto err_hwq;
+ }
+ ring->buf = ring->wqres.buf.direct.buf;
+
+ return 0;
+
+err_hwq:
+ mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+err_ring:
+ vfree(ring->rx_info);
+ ring->rx_info = NULL;
+ return err;
+}
+
+int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_rx_ring *ring;
+ int i;
+ int ring_ind;
+ int err;
+ int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+ DS_SIZE * priv->num_frags);
+
+ for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
+ ring = &priv->rx_ring[ring_ind];
+
+ ring->prod = 0;
+ ring->cons = 0;
+ ring->actual_size = 0;
+ ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
+
+ ring->stride = stride;
+ if (ring->stride <= TXBB_SIZE)
+ ring->buf += TXBB_SIZE;
+
+ ring->log_stride = ffs(ring->stride) - 1;
+ ring->buf_size = ring->size * ring->stride;
+
+ memset(ring->buf, 0, ring->buf_size);
+ mlx4_en_update_rx_prod_db(ring);
+
+ /* Initailize all descriptors */
+ for (i = 0; i < ring->size; i++)
+ mlx4_en_init_rx_desc(priv, ring, i);
+
+ /* Initialize page allocators */
+ err = mlx4_en_init_allocator(priv, ring);
+ if (err) {
+ en_err(priv, "Failed initializing ring allocator\n");
+ if (ring->stride <= TXBB_SIZE)
+ ring->buf -= TXBB_SIZE;
+ ring_ind--;
+ goto err_allocator;
+ }
+ }
+ err = mlx4_en_fill_rx_buffers(priv);
+ if (err)
+ goto err_buffers;
+
+ for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
+ ring = &priv->rx_ring[ring_ind];
+
+ ring->size_mask = ring->actual_size - 1;
+ mlx4_en_update_rx_prod_db(ring);
+ }
+
+ return 0;
+
+err_buffers:
+ for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
+ mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);
+
+ ring_ind = priv->rx_ring_num - 1;
+err_allocator:
+ while (ring_ind >= 0) {
+ if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE)
+ priv->rx_ring[ring_ind].buf -= TXBB_SIZE;
+ mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
+ ring_ind--;
+ }
+ return err;
+}
+
+void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ mlx4_en_unmap_buffer(&ring->wqres.buf);
+ mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE);
+ vfree(ring->rx_info);
+ ring->rx_info = NULL;
+}
+
+void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring)
+{
+ mlx4_en_free_rx_buf(priv, ring);
+ if (ring->stride <= TXBB_SIZE)
+ ring->buf -= TXBB_SIZE;
+ mlx4_en_destroy_allocator(priv, ring);
+}
+
+
+/* Unmap a completed descriptor and free unused pages */
+static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_desc *rx_desc,
+ struct skb_frag_struct *skb_frags,
+ struct skb_frag_struct *skb_frags_rx,
+ struct mlx4_en_rx_alloc *page_alloc,
+ int length)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_frag_info *frag_info;
+ int nr;
+ dma_addr_t dma;
+
+ /* Collect used fragments while replacing them in the HW descirptors */
+ for (nr = 0; nr < priv->num_frags; nr++) {
+ frag_info = &priv->frag_info[nr];
+ if (length <= frag_info->frag_prefix_size)
+ break;
+
+ /* Save page reference in skb */
+ skb_frags_rx[nr].page = skb_frags[nr].page;
+ skb_frags_rx[nr].size = skb_frags[nr].size;
+ skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset;
+ dma = be64_to_cpu(rx_desc->data[nr].addr);
+
+ /* Allocate a replacement page */
+ if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr))
+ goto fail;
+
+ /* Unmap buffer */
+ pci_unmap_single(mdev->pdev, dma, skb_frags_rx[nr].size,
+ PCI_DMA_FROMDEVICE);
+ }
+ /* Adjust size of last fragment to match actual length */
+ if (nr > 0)
+ skb_frags_rx[nr - 1].size = length -
+ priv->frag_info[nr - 1].frag_prefix_size;
+ return nr;
+
+fail:
+ /* Drop all accumulated fragments (which have already been replaced in
+ * the descriptor) of this packet; remaining fragments are reused... */
+ while (nr > 0) {
+ nr--;
+ put_page(skb_frags_rx[nr].page);
+ }
+ return 0;
+}
+
+
+static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_desc *rx_desc,
+ struct skb_frag_struct *skb_frags,
+ struct mlx4_en_rx_alloc *page_alloc,
+ unsigned int length)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct sk_buff *skb;
+ void *va;
+ int used_frags;
+ dma_addr_t dma;
+
+ skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
+ if (!skb) {
+ en_dbg(RX_ERR, priv, "Failed allocating skb\n");
+ return NULL;
+ }
+ skb->dev = priv->dev;
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb->len = length;
+ skb->truesize = length + sizeof(struct sk_buff);
+
+ /* Get pointer to first fragment so we could copy the headers into the
+ * (linear part of the) skb */
+ va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
+
+ if (length <= SMALL_PACKET_SIZE) {
+ /* We are copying all relevant data to the skb - temporarily
+ * synch buffers for the copy */
+ dma = be64_to_cpu(rx_desc->data[0].addr);
+ dma_sync_single_for_cpu(&mdev->pdev->dev, dma, length,
+ DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb, va, length);
+ dma_sync_single_for_device(&mdev->pdev->dev, dma, length,
+ DMA_FROM_DEVICE);
+ skb->tail += length;
+ } else {
+
+ /* Move relevant fragments to skb */
+ used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
+ skb_shinfo(skb)->frags,
+ page_alloc, length);
+ if (unlikely(!used_frags)) {
+ kfree_skb(skb);
+ return NULL;
+ }
+ skb_shinfo(skb)->nr_frags = used_frags;
+
+ /* Copy headers into the skb linear buffer */
+ memcpy(skb->data, va, HEADER_COPY_SIZE);
+ skb->tail += HEADER_COPY_SIZE;
+
+ /* Skip headers in first fragment */
+ skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;
+
+ /* Adjust size of first fragment */
+ skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE;
+ skb->data_len = length - HEADER_COPY_SIZE;
+ }
+ return skb;
+}
+
+static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb)
+{
+ int i;
+ int offset = ETH_HLEN;
+
+ for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
+ if (*(skb->data + offset) != (unsigned char) (i & 0xff))
+ goto out_loopback;
+ }
+ /* Loopback found */
+ priv->loopback_ok = 1;
+
+out_loopback:
+ dev_kfree_skb_any(skb);
+}
+
+int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_cqe *cqe;
+ struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
+ struct skb_frag_struct *skb_frags;
+ struct mlx4_en_rx_desc *rx_desc;
+ struct sk_buff *skb;
+ int index;
+ int nr;
+ unsigned int length;
+ int polled = 0;
+ int ip_summed;
+
+ if (!priv->port_up)
+ return 0;
+
+ /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
+ * descriptor offset can be deduced from the CQE index instead of
+ * reading 'cqe->index' */
+ index = cq->mcq.cons_index & ring->size_mask;
+ cqe = &cq->buf[index];
+
+ /* Process all completed CQEs */
+ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
+ cq->mcq.cons_index & cq->size)) {
+
+ skb_frags = ring->rx_info + (index << priv->log_rx_info);
+ rx_desc = ring->buf + (index << ring->log_stride);
+
+ /*
+ * make sure we read the CQE after we read the ownership bit
+ */
+ rmb();
+
+ /* Drop packet on bad receive or bad checksum */
+ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
+ MLX4_CQE_OPCODE_ERROR)) {
+ en_err(priv, "CQE completed in error - vendor "
+ "syndrom:%d syndrom:%d\n",
+ ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
+ ((struct mlx4_err_cqe *) cqe)->syndrome);
+ goto next;
+ }
+ if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
+ en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
+ goto next;
+ }
+
+ /*
+ * Packet is OK - process it.
+ */
+ length = be32_to_cpu(cqe->byte_cnt);
+ ring->bytes += length;
+ ring->packets++;
+
+ if (likely(dev->features & NETIF_F_RXCSUM)) {
+ if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
+ (cqe->checksum == cpu_to_be16(0xffff))) {
+ priv->port_stats.rx_chksum_good++;
+ /* This packet is eligible for LRO if it is:
+ * - DIX Ethernet (type interpretation)
+ * - TCP/IP (v4)
+ * - without IP options
+ * - not an IP fragment */
+ if (dev->features & NETIF_F_GRO) {
+ struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
+ if (!gro_skb)
+ goto next;
+
+ nr = mlx4_en_complete_rx_desc(
+ priv, rx_desc,
+ skb_frags, skb_shinfo(gro_skb)->frags,
+ ring->page_alloc, length);
+ if (!nr)
+ goto next;
+
+ skb_shinfo(gro_skb)->nr_frags = nr;
+ gro_skb->len = length;
+ gro_skb->data_len = length;
+ gro_skb->truesize += length;
+ gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (cqe->vlan_my_qpn &
+ cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) {
+ u16 vid = be16_to_cpu(cqe->sl_vid);
+
+ __vlan_hwaccel_put_tag(gro_skb, vid);
+ }
+
+ napi_gro_frags(&cq->napi);
+
+ goto next;
+ }
+
+ /* LRO not possible, complete processing here */
+ ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ ip_summed = CHECKSUM_NONE;
+ priv->port_stats.rx_chksum_none++;
+ }
+ } else {
+ ip_summed = CHECKSUM_NONE;
+ priv->port_stats.rx_chksum_none++;
+ }
+
+ skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
+ ring->page_alloc, length);
+ if (!skb) {
+ priv->stats.rx_dropped++;
+ goto next;
+ }
+
+ if (unlikely(priv->validate_loopback)) {
+ validate_loopback(priv, skb);
+ goto next;
+ }
+
+ skb->ip_summed = ip_summed;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb_record_rx_queue(skb, cq->ring);
+
+ if (be32_to_cpu(cqe->vlan_my_qpn) &
+ MLX4_CQE_VLAN_PRESENT_MASK)
+ __vlan_hwaccel_put_tag(skb, be16_to_cpu(cqe->sl_vid));
+
+ /* Push it up the stack */
+ netif_receive_skb(skb);
+
+next:
+ ++cq->mcq.cons_index;
+ index = (cq->mcq.cons_index) & ring->size_mask;
+ cqe = &cq->buf[index];
+ if (++polled == budget) {
+ /* We are here because we reached the NAPI budget -
+ * flush only pending LRO sessions */
+ goto out;
+ }
+ }
+
+out:
+ AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
+ mlx4_cq_set_ci(&cq->mcq);
+ wmb(); /* ensure HW sees CQ consumer before we post new buffers */
+ ring->cons = cq->mcq.cons_index;
+ ring->prod += polled; /* Polled descriptors were realocated in place */
+ mlx4_en_update_rx_prod_db(ring);
+ return polled;
+}
+
+
+void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+{
+ struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
+ struct mlx4_en_priv *priv = netdev_priv(cq->dev);
+
+ if (priv->port_up)
+ napi_schedule(&cq->napi);
+ else
+ mlx4_en_arm_cq(priv, cq);
+}
+
+/* Rx CQ polling - called by NAPI */
+int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
+{
+ struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
+ struct net_device *dev = cq->dev;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int done;
+
+ done = mlx4_en_process_rx_cq(dev, cq, budget);
+
+ /* If we used up all the quota - we're probably not done yet... */
+ if (done == budget)
+ INC_PERF_COUNTER(priv->pstats.napi_quota);
+ else {
+ /* Done for now */
+ napi_complete(napi);
+ mlx4_en_arm_cq(priv, cq);
+ }
+ return done;
+}
+
+
+/* Calculate the last offset position that accommodates a full fragment
+ * (assuming fagment size = stride-align) */
+static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align)
+{
+ u16 res = MLX4_EN_ALLOC_SIZE % stride;
+ u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;
+
+ en_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
+ "res:%d offset:%d\n", stride, align, res, offset);
+ return offset;
+}
+
+
+static int frag_sizes[] = {
+ FRAG_SZ0,
+ FRAG_SZ1,
+ FRAG_SZ2,
+ FRAG_SZ3
+};
+
+void mlx4_en_calc_rx_buf(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE;
+ int buf_size = 0;
+ int i = 0;
+
+ while (buf_size < eff_mtu) {
+ priv->frag_info[i].frag_size =
+ (eff_mtu > buf_size + frag_sizes[i]) ?
+ frag_sizes[i] : eff_mtu - buf_size;
+ priv->frag_info[i].frag_prefix_size = buf_size;
+ if (!i) {
+ priv->frag_info[i].frag_align = NET_IP_ALIGN;
+ priv->frag_info[i].frag_stride =
+ ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
+ } else {
+ priv->frag_info[i].frag_align = 0;
+ priv->frag_info[i].frag_stride =
+ ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
+ }
+ priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset(
+ priv, priv->frag_info[i].frag_stride,
+ priv->frag_info[i].frag_align);
+ buf_size += priv->frag_info[i].frag_size;
+ i++;
+ }
+
+ priv->num_frags = i;
+ priv->rx_skb_size = eff_mtu;
+ priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
+
+ en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
+ "num_frags:%d):\n", eff_mtu, priv->num_frags);
+ for (i = 0; i < priv->num_frags; i++) {
+ en_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d "
+ "stride:%d last_offset:%d\n", i,
+ priv->frag_info[i].frag_size,
+ priv->frag_info[i].frag_prefix_size,
+ priv->frag_info[i].frag_align,
+ priv->frag_info[i].frag_stride,
+ priv->frag_info[i].last_offset);
+ }
+}
+
+/* RSS related functions */
+
+static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
+ struct mlx4_en_rx_ring *ring,
+ enum mlx4_qp_state *state,
+ struct mlx4_qp *qp)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_qp_context *context;
+ int err = 0;
+
+ context = kmalloc(sizeof *context , GFP_KERNEL);
+ if (!context) {
+ en_err(priv, "Failed to allocate qp context\n");
+ return -ENOMEM;
+ }
+
+ err = mlx4_qp_alloc(mdev->dev, qpn, qp);
+ if (err) {
+ en_err(priv, "Failed to allocate qp #%x\n", qpn);
+ goto out;
+ }
+ qp->event = mlx4_en_sqp_event;
+
+ memset(context, 0, sizeof *context);
+ mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
+ qpn, ring->cqn, context);
+ context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
+
+ err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
+ if (err) {
+ mlx4_qp_remove(mdev->dev, qp);
+ mlx4_qp_free(mdev->dev, qp);
+ }
+ mlx4_en_update_rx_prod_db(ring);
+out:
+ kfree(context);
+ return err;
+}
+
+/* Allocate rx qp's and configure them according to rss map */
+int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_rss_map *rss_map = &priv->rss_map;
+ struct mlx4_qp_context context;
+ struct mlx4_en_rss_context *rss_context;
+ void *ptr;
+ u8 rss_mask = 0x3f;
+ int i, qpn;
+ int err = 0;
+ int good_qps = 0;
+
+ en_dbg(DRV, priv, "Configuring rss steering\n");
+ err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
+ priv->rx_ring_num,
+ &rss_map->base_qpn);
+ if (err) {
+ en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
+ return err;
+ }
+
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ qpn = rss_map->base_qpn + i;
+ err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i],
+ &rss_map->state[i],
+ &rss_map->qps[i]);
+ if (err)
+ goto rss_err;
+
+ ++good_qps;
+ }
+
+ /* Configure RSS indirection qp */
+ err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
+ if (err) {
+ en_err(priv, "Failed to allocate RSS indirection QP\n");
+ goto rss_err;
+ }
+ rss_map->indir_qp.event = mlx4_en_sqp_event;
+ mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
+ priv->rx_ring[0].cqn, &context);
+
+ ptr = ((void *) &context) + 0x3c;
+ rss_context = ptr;
+ rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
+ (rss_map->base_qpn));
+ rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
+ rss_context->flags = rss_mask;
+
+ if (priv->mdev->profile.udp_rss)
+ rss_context->base_qpn_udp = rss_context->default_qpn;
+ err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
+ &rss_map->indir_qp, &rss_map->indir_state);
+ if (err)
+ goto indir_err;
+
+ return 0;
+
+indir_err:
+ mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
+ MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
+ mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
+ mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
+rss_err:
+ for (i = 0; i < good_qps; i++) {
+ mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
+ MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
+ mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
+ mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
+ }
+ mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
+ return err;
+}
+
+void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_rss_map *rss_map = &priv->rss_map;
+ int i;
+
+ mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
+ MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
+ mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
+ mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
+
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
+ MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
+ mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
+ mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
+ }
+ mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
+}
+
+
+
+
+
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
new file mode 100644
index 000000000000..9fdbcecd499d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/mlx4/driver.h>
+
+#include "mlx4_en.h"
+
+
+static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
+{
+ return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
+{
+ struct sk_buff *skb;
+ struct ethhdr *ethh;
+ unsigned char *packet;
+ unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD;
+ unsigned int i;
+ int err;
+
+
+ /* build the pkt before xmit */
+ skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
+ if (!skb) {
+ en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n");
+ return -ENOMEM;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
+ packet = (unsigned char *)skb_put(skb, packet_size);
+ memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
+ memset(ethh->h_source, 0, ETH_ALEN);
+ ethh->h_proto = htons(ETH_P_ARP);
+ skb_set_mac_header(skb, 0);
+ for (i = 0; i < packet_size; ++i) /* fill our packet */
+ packet[i] = (unsigned char)(i & 0xff);
+
+ /* xmit the pkt */
+ err = mlx4_en_xmit(skb, priv->dev);
+ return err;
+}
+
+static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
+{
+ u32 loopback_ok = 0;
+ int i;
+
+
+ priv->loopback_ok = 0;
+ priv->validate_loopback = 1;
+
+ /* xmit */
+ if (mlx4_en_test_loopback_xmit(priv)) {
+ en_err(priv, "Transmitting loopback packet failed\n");
+ goto mlx4_en_test_loopback_exit;
+ }
+
+ /* polling for result */
+ for (i = 0; i < MLX4_EN_LOOPBACK_RETRIES; ++i) {
+ msleep(MLX4_EN_LOOPBACK_TIMEOUT);
+ if (priv->loopback_ok) {
+ loopback_ok = 1;
+ break;
+ }
+ }
+ if (!loopback_ok)
+ en_err(priv, "Loopback packet didn't arrive\n");
+
+mlx4_en_test_loopback_exit:
+
+ priv->validate_loopback = 0;
+ return !loopback_ok;
+}
+
+
+static int mlx4_en_test_link(struct mlx4_en_priv *priv)
+{
+ if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
+ return -ENOMEM;
+ if (priv->port_state.link_state == 1)
+ return 0;
+ else
+ return 1;
+}
+
+static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
+{
+
+ if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
+ return -ENOMEM;
+
+ /* The device currently only supports 10G speed */
+ if (priv->port_state.link_speed != SPEED_10000)
+ return priv->port_state.link_speed;
+ return 0;
+}
+
+
+void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_ring *tx_ring;
+ int i, carrier_ok;
+
+ memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
+
+ if (*flags & ETH_TEST_FL_OFFLINE) {
+ /* disable the interface */
+ carrier_ok = netif_carrier_ok(dev);
+
+ netif_carrier_off(dev);
+retry_tx:
+ /* Wait until all tx queues are empty.
+ * there should not be any additional incoming traffic
+ * since we turned the carrier off */
+ msleep(200);
+ for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
+ tx_ring = &priv->tx_ring[i];
+ if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
+ goto retry_tx;
+ }
+
+ if (priv->mdev->dev->caps.flags &
+ MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
+ buf[3] = mlx4_en_test_registers(priv);
+ buf[4] = mlx4_en_test_loopback(priv);
+ }
+
+ if (carrier_ok)
+ netif_carrier_on(dev);
+
+ }
+ buf[0] = mlx4_test_interrupts(mdev->dev);
+ buf[1] = mlx4_en_test_link(priv);
+ buf[2] = mlx4_en_test_speed(priv);
+
+ for (i = 0; i < MLX4_EN_NUM_SELF_TEST; i++) {
+ if (buf[i])
+ *flags |= ETH_TEST_FL_FAILED;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
new file mode 100644
index 000000000000..6e03de034ac7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -0,0 +1,828 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <asm/page.h>
+#include <linux/mlx4/cq.h>
+#include <linux/slab.h>
+#include <linux/mlx4/qp.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
+#include <linux/tcp.h>
+
+#include "mlx4_en.h"
+
+enum {
+ MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
+ MAX_BF = 256,
+};
+
+static int inline_thold __read_mostly = MAX_INLINE;
+
+module_param_named(inline_thold, inline_thold, int, 0444);
+MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
+
+int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring, int qpn, u32 size,
+ u16 stride)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int tmp;
+ int err;
+
+ ring->size = size;
+ ring->size_mask = size - 1;
+ ring->stride = stride;
+
+ inline_thold = min(inline_thold, MAX_INLINE);
+
+ spin_lock_init(&ring->comp_lock);
+
+ tmp = size * sizeof(struct mlx4_en_tx_info);
+ ring->tx_info = vmalloc(tmp);
+ if (!ring->tx_info) {
+ en_err(priv, "Failed allocating tx_info ring\n");
+ return -ENOMEM;
+ }
+ en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
+ ring->tx_info, tmp);
+
+ ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
+ if (!ring->bounce_buf) {
+ en_err(priv, "Failed allocating bounce buffer\n");
+ err = -ENOMEM;
+ goto err_tx;
+ }
+ ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
+
+ err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
+ 2 * PAGE_SIZE);
+ if (err) {
+ en_err(priv, "Failed allocating hwq resources\n");
+ goto err_bounce;
+ }
+
+ err = mlx4_en_map_buffer(&ring->wqres.buf);
+ if (err) {
+ en_err(priv, "Failed to map TX buffer\n");
+ goto err_hwq_res;
+ }
+
+ ring->buf = ring->wqres.buf.direct.buf;
+
+ en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
+ "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
+ ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
+
+ ring->qpn = qpn;
+ err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
+ if (err) {
+ en_err(priv, "Failed allocating qp %d\n", ring->qpn);
+ goto err_map;
+ }
+ ring->qp.event = mlx4_en_sqp_event;
+
+ err = mlx4_bf_alloc(mdev->dev, &ring->bf);
+ if (err) {
+ en_dbg(DRV, priv, "working without blueflame (%d)", err);
+ ring->bf.uar = &mdev->priv_uar;
+ ring->bf.uar->map = mdev->uar_map;
+ ring->bf_enabled = false;
+ } else
+ ring->bf_enabled = true;
+
+ return 0;
+
+err_map:
+ mlx4_en_unmap_buffer(&ring->wqres.buf);
+err_hwq_res:
+ mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+err_bounce:
+ kfree(ring->bounce_buf);
+ ring->bounce_buf = NULL;
+err_tx:
+ vfree(ring->tx_info);
+ ring->tx_info = NULL;
+ return err;
+}
+
+void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
+
+ if (ring->bf_enabled)
+ mlx4_bf_free(mdev->dev, &ring->bf);
+ mlx4_qp_remove(mdev->dev, &ring->qp);
+ mlx4_qp_free(mdev->dev, &ring->qp);
+ mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
+ mlx4_en_unmap_buffer(&ring->wqres.buf);
+ mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+ kfree(ring->bounce_buf);
+ ring->bounce_buf = NULL;
+ vfree(ring->tx_info);
+ ring->tx_info = NULL;
+}
+
+int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int cq)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+
+ ring->cqn = cq;
+ ring->prod = 0;
+ ring->cons = 0xffffffff;
+ ring->last_nr_txbb = 1;
+ ring->poll_cnt = 0;
+ ring->blocked = 0;
+ memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
+ memset(ring->buf, 0, ring->buf_size);
+
+ ring->qp_state = MLX4_QP_STATE_RST;
+ ring->doorbell_qpn = swab32(ring->qp.qpn << 8);
+
+ mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
+ ring->cqn, &ring->context);
+ if (ring->bf_enabled)
+ ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
+
+ err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
+ &ring->qp, &ring->qp_state);
+
+ return err;
+}
+
+void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
+ MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
+}
+
+
+static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u8 owner)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
+ struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
+ struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
+ struct sk_buff *skb = tx_info->skb;
+ struct skb_frag_struct *frag;
+ void *end = ring->buf + ring->buf_size;
+ int frags = skb_shinfo(skb)->nr_frags;
+ int i;
+ __be32 *ptr = (__be32 *)tx_desc;
+ __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
+
+ /* Optimize the common case when there are no wraparounds */
+ if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
+ if (!tx_info->inl) {
+ if (tx_info->linear) {
+ pci_unmap_single(mdev->pdev,
+ (dma_addr_t) be64_to_cpu(data->addr),
+ be32_to_cpu(data->byte_count),
+ PCI_DMA_TODEVICE);
+ ++data;
+ }
+
+ for (i = 0; i < frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ pci_unmap_page(mdev->pdev,
+ (dma_addr_t) be64_to_cpu(data[i].addr),
+ frag->size, PCI_DMA_TODEVICE);
+ }
+ }
+ /* Stamp the freed descriptor */
+ for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
+ *ptr = stamp;
+ ptr += STAMP_DWORDS;
+ }
+
+ } else {
+ if (!tx_info->inl) {
+ if ((void *) data >= end) {
+ data = ring->buf + ((void *)data - end);
+ }
+
+ if (tx_info->linear) {
+ pci_unmap_single(mdev->pdev,
+ (dma_addr_t) be64_to_cpu(data->addr),
+ be32_to_cpu(data->byte_count),
+ PCI_DMA_TODEVICE);
+ ++data;
+ }
+
+ for (i = 0; i < frags; i++) {
+ /* Check for wraparound before unmapping */
+ if ((void *) data >= end)
+ data = ring->buf;
+ frag = &skb_shinfo(skb)->frags[i];
+ pci_unmap_page(mdev->pdev,
+ (dma_addr_t) be64_to_cpu(data->addr),
+ frag->size, PCI_DMA_TODEVICE);
+ ++data;
+ }
+ }
+ /* Stamp the freed descriptor */
+ for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
+ *ptr = stamp;
+ ptr += STAMP_DWORDS;
+ if ((void *) ptr >= end) {
+ ptr = ring->buf;
+ stamp ^= cpu_to_be32(0x80000000);
+ }
+ }
+
+ }
+ dev_kfree_skb_any(skb);
+ return tx_info->nr_txbb;
+}
+
+
+int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int cnt = 0;
+
+ /* Skip last polled descriptor */
+ ring->cons += ring->last_nr_txbb;
+ en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
+ ring->cons, ring->prod);
+
+ if ((u32) (ring->prod - ring->cons) > ring->size) {
+ if (netif_msg_tx_err(priv))
+ en_warn(priv, "Tx consumer passed producer!\n");
+ return 0;
+ }
+
+ while (ring->cons != ring->prod) {
+ ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
+ ring->cons & ring->size_mask,
+ !!(ring->cons & ring->size));
+ ring->cons += ring->last_nr_txbb;
+ cnt++;
+ }
+
+ if (cnt)
+ en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
+
+ return cnt;
+}
+
+
+static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_cq *mcq = &cq->mcq;
+ struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
+ struct mlx4_cqe *cqe = cq->buf;
+ u16 index;
+ u16 new_index;
+ u32 txbbs_skipped = 0;
+ u32 cq_last_sav;
+
+ /* index always points to the first TXBB of the last polled descriptor */
+ index = ring->cons & ring->size_mask;
+ new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
+ if (index == new_index)
+ return;
+
+ if (!priv->port_up)
+ return;
+
+ /*
+ * We use a two-stage loop:
+ * - the first samples the HW-updated CQE
+ * - the second frees TXBBs until the last sample
+ * This lets us amortize CQE cache misses, while still polling the CQ
+ * until is quiescent.
+ */
+ cq_last_sav = mcq->cons_index;
+ do {
+ do {
+ /* Skip over last polled CQE */
+ index = (index + ring->last_nr_txbb) & ring->size_mask;
+ txbbs_skipped += ring->last_nr_txbb;
+
+ /* Poll next CQE */
+ ring->last_nr_txbb = mlx4_en_free_tx_desc(
+ priv, ring, index,
+ !!((ring->cons + txbbs_skipped) &
+ ring->size));
+ ++mcq->cons_index;
+
+ } while (index != new_index);
+
+ new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
+ } while (index != new_index);
+ AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
+ (u32) (mcq->cons_index - cq_last_sav));
+
+ /*
+ * To prevent CQ overflow we first update CQ consumer and only then
+ * the ring consumer.
+ */
+ mlx4_cq_set_ci(mcq);
+ wmb();
+ ring->cons += txbbs_skipped;
+
+ /* Wakeup Tx queue if this ring stopped it */
+ if (unlikely(ring->blocked)) {
+ if ((u32) (ring->prod - ring->cons) <=
+ ring->size - HEADROOM - MAX_DESC_TXBBS) {
+ ring->blocked = 0;
+ netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
+ priv->port_stats.wake_queue++;
+ }
+ }
+}
+
+void mlx4_en_tx_irq(struct mlx4_cq *mcq)
+{
+ struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
+ struct mlx4_en_priv *priv = netdev_priv(cq->dev);
+ struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
+
+ if (!spin_trylock(&ring->comp_lock))
+ return;
+ mlx4_en_process_tx_cq(cq->dev, cq);
+ mod_timer(&cq->timer, jiffies + 1);
+ spin_unlock(&ring->comp_lock);
+}
+
+
+void mlx4_en_poll_tx_cq(unsigned long data)
+{
+ struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
+ struct mlx4_en_priv *priv = netdev_priv(cq->dev);
+ struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
+ u32 inflight;
+
+ INC_PERF_COUNTER(priv->pstats.tx_poll);
+
+ if (!spin_trylock_irq(&ring->comp_lock)) {
+ mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
+ return;
+ }
+ mlx4_en_process_tx_cq(cq->dev, cq);
+ inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
+
+ /* If there are still packets in flight and the timer has not already
+ * been scheduled by the Tx routine then schedule it here to guarantee
+ * completion processing of these packets */
+ if (inflight && priv->port_up)
+ mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
+
+ spin_unlock_irq(&ring->comp_lock);
+}
+
+static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ u32 index,
+ unsigned int desc_size)
+{
+ u32 copy = (ring->size - index) * TXBB_SIZE;
+ int i;
+
+ for (i = desc_size - copy - 4; i >= 0; i -= 4) {
+ if ((i & (TXBB_SIZE - 1)) == 0)
+ wmb();
+
+ *((u32 *) (ring->buf + i)) =
+ *((u32 *) (ring->bounce_buf + copy + i));
+ }
+
+ for (i = copy - 4; i >= 4 ; i -= 4) {
+ if ((i & (TXBB_SIZE - 1)) == 0)
+ wmb();
+
+ *((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
+ *((u32 *) (ring->bounce_buf + i));
+ }
+
+ /* Return real descriptor location */
+ return ring->buf + index * TXBB_SIZE;
+}
+
+static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
+{
+ struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
+ struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
+ unsigned long flags;
+
+ /* If we don't have a pending timer, set one up to catch our recent
+ post in case the interface becomes idle */
+ if (!timer_pending(&cq->timer))
+ mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
+
+ /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
+ if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
+ if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
+ mlx4_en_process_tx_cq(priv->dev, cq);
+ spin_unlock_irqrestore(&ring->comp_lock, flags);
+ }
+}
+
+static void *get_frag_ptr(struct sk_buff *skb)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ struct page *page = frag->page;
+ void *ptr;
+
+ ptr = page_address(page);
+ if (unlikely(!ptr))
+ return NULL;
+
+ return ptr + frag->page_offset;
+}
+
+static int is_inline(struct sk_buff *skb, void **pfrag)
+{
+ void *ptr;
+
+ if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
+ if (skb_shinfo(skb)->nr_frags == 1) {
+ ptr = get_frag_ptr(skb);
+ if (unlikely(!ptr))
+ return 0;
+
+ if (pfrag)
+ *pfrag = ptr;
+
+ return 1;
+ } else if (unlikely(skb_shinfo(skb)->nr_frags))
+ return 0;
+ else
+ return 1;
+ }
+
+ return 0;
+}
+
+static int inline_size(struct sk_buff *skb)
+{
+ if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
+ <= MLX4_INLINE_ALIGN)
+ return ALIGN(skb->len + CTRL_SIZE +
+ sizeof(struct mlx4_wqe_inline_seg), 16);
+ else
+ return ALIGN(skb->len + CTRL_SIZE + 2 *
+ sizeof(struct mlx4_wqe_inline_seg), 16);
+}
+
+static int get_real_size(struct sk_buff *skb, struct net_device *dev,
+ int *lso_header_size)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int real_size;
+
+ if (skb_is_gso(skb)) {
+ *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
+ ALIGN(*lso_header_size + 4, DS_SIZE);
+ if (unlikely(*lso_header_size != skb_headlen(skb))) {
+ /* We add a segment for the skb linear buffer only if
+ * it contains data */
+ if (*lso_header_size < skb_headlen(skb))
+ real_size += DS_SIZE;
+ else {
+ if (netif_msg_tx_err(priv))
+ en_warn(priv, "Non-linear headers\n");
+ return 0;
+ }
+ }
+ } else {
+ *lso_header_size = 0;
+ if (!is_inline(skb, NULL))
+ real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
+ else
+ real_size = inline_size(skb);
+ }
+
+ return real_size;
+}
+
+static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb,
+ int real_size, u16 *vlan_tag, int tx_ind, void *fragptr)
+{
+ struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
+ int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
+
+ if (skb->len <= spc) {
+ inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
+ skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
+ if (skb_shinfo(skb)->nr_frags)
+ memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
+ skb_shinfo(skb)->frags[0].size);
+
+ } else {
+ inl->byte_count = cpu_to_be32(1 << 31 | spc);
+ if (skb_headlen(skb) <= spc) {
+ skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
+ if (skb_headlen(skb) < spc) {
+ memcpy(((void *)(inl + 1)) + skb_headlen(skb),
+ fragptr, spc - skb_headlen(skb));
+ fragptr += spc - skb_headlen(skb);
+ }
+ inl = (void *) (inl + 1) + spc;
+ memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
+ } else {
+ skb_copy_from_linear_data(skb, inl + 1, spc);
+ inl = (void *) (inl + 1) + spc;
+ skb_copy_from_linear_data_offset(skb, spc, inl + 1,
+ skb_headlen(skb) - spc);
+ if (skb_shinfo(skb)->nr_frags)
+ memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
+ fragptr, skb_shinfo(skb)->frags[0].size);
+ }
+
+ wmb();
+ inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
+ }
+ tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
+ tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+}
+
+u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ u16 vlan_tag = 0;
+
+ /* If we support per priority flow control and the packet contains
+ * a vlan tag, send the packet to the TX ring assigned to that priority
+ */
+ if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) {
+ vlan_tag = vlan_tx_tag_get(skb);
+ return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
+ }
+
+ return skb_tx_hash(dev, skb);
+}
+
+static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt)
+{
+ __iowrite64_copy(dst, src, bytecnt / 8);
+}
+
+netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_ring *ring;
+ struct mlx4_en_cq *cq;
+ struct mlx4_en_tx_desc *tx_desc;
+ struct mlx4_wqe_data_seg *data;
+ struct skb_frag_struct *frag;
+ struct mlx4_en_tx_info *tx_info;
+ struct ethhdr *ethh;
+ u64 mac;
+ u32 mac_l, mac_h;
+ int tx_ind = 0;
+ int nr_txbb;
+ int desc_size;
+ int real_size;
+ dma_addr_t dma;
+ u32 index, bf_index;
+ __be32 op_own;
+ u16 vlan_tag = 0;
+ int i;
+ int lso_header_size;
+ void *fragptr;
+ bool bounce = false;
+
+ if (!priv->port_up)
+ goto tx_drop;
+
+ real_size = get_real_size(skb, dev, &lso_header_size);
+ if (unlikely(!real_size))
+ goto tx_drop;
+
+ /* Align descriptor to TXBB size */
+ desc_size = ALIGN(real_size, TXBB_SIZE);
+ nr_txbb = desc_size / TXBB_SIZE;
+ if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
+ if (netif_msg_tx_err(priv))
+ en_warn(priv, "Oversized header or SG list\n");
+ goto tx_drop;
+ }
+
+ tx_ind = skb->queue_mapping;
+ ring = &priv->tx_ring[tx_ind];
+ if (vlan_tx_tag_present(skb))
+ vlan_tag = vlan_tx_tag_get(skb);
+
+ /* Check available TXBBs And 2K spare for prefetch */
+ if (unlikely(((int)(ring->prod - ring->cons)) >
+ ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ /* every full Tx ring stops queue */
+ netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
+ ring->blocked = 1;
+ priv->port_stats.queue_stopped++;
+
+ /* Use interrupts to find out when queue opened */
+ cq = &priv->tx_cq[tx_ind];
+ mlx4_en_arm_cq(priv, cq);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Track current inflight packets for performance analysis */
+ AVG_PERF_COUNTER(priv->pstats.inflight_avg,
+ (u32) (ring->prod - ring->cons - 1));
+
+ /* Packet is good - grab an index and transmit it */
+ index = ring->prod & ring->size_mask;
+ bf_index = ring->prod;
+
+ /* See if we have enough space for whole descriptor TXBB for setting
+ * SW ownership on next descriptor; if not, use a bounce buffer. */
+ if (likely(index + nr_txbb <= ring->size))
+ tx_desc = ring->buf + index * TXBB_SIZE;
+ else {
+ tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
+ bounce = true;
+ }
+
+ /* Save skb in tx_info ring */
+ tx_info = &ring->tx_info[index];
+ tx_info->skb = skb;
+ tx_info->nr_txbb = nr_txbb;
+
+ /* Prepare ctrl segement apart opcode+ownership, which depends on
+ * whether LSO is used */
+ tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
+ tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
+ MLX4_WQE_CTRL_SOLICITED);
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
+ MLX4_WQE_CTRL_TCP_UDP_CSUM);
+ priv->port_stats.tx_chksum_offload++;
+ }
+
+ if (unlikely(priv->validate_loopback)) {
+ /* Copy dst mac address to wqe */
+ skb_reset_mac_header(skb);
+ ethh = eth_hdr(skb);
+ if (ethh && ethh->h_dest) {
+ mac = mlx4_en_mac_to_u64(ethh->h_dest);
+ mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
+ mac_l = (u32) (mac & 0xffffffff);
+ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
+ tx_desc->ctrl.imm = cpu_to_be32(mac_l);
+ }
+ }
+
+ /* Handle LSO (TSO) packets */
+ if (lso_header_size) {
+ /* Mark opcode as LSO */
+ op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
+ ((ring->prod & ring->size) ?
+ cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
+
+ /* Fill in the LSO prefix */
+ tx_desc->lso.mss_hdr_size = cpu_to_be32(
+ skb_shinfo(skb)->gso_size << 16 | lso_header_size);
+
+ /* Copy headers;
+ * note that we already verified that it is linear */
+ memcpy(tx_desc->lso.header, skb->data, lso_header_size);
+ data = ((void *) &tx_desc->lso +
+ ALIGN(lso_header_size + 4, DS_SIZE));
+
+ priv->port_stats.tso_packets++;
+ i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
+ !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
+ ring->bytes += skb->len + (i - 1) * lso_header_size;
+ ring->packets += i;
+ } else {
+ /* Normal (Non LSO) packet */
+ op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
+ ((ring->prod & ring->size) ?
+ cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
+ data = &tx_desc->data;
+ ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN);
+ ring->packets++;
+
+ }
+ AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
+
+
+ /* valid only for none inline segments */
+ tx_info->data_offset = (void *) data - (void *) tx_desc;
+
+ tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
+ data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
+
+ if (!is_inline(skb, &fragptr)) {
+ /* Map fragments */
+ for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
+ frag = &skb_shinfo(skb)->frags[i];
+ dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset,
+ frag->size, PCI_DMA_TODEVICE);
+ data->addr = cpu_to_be64(dma);
+ data->lkey = cpu_to_be32(mdev->mr.key);
+ wmb();
+ data->byte_count = cpu_to_be32(frag->size);
+ --data;
+ }
+
+ /* Map linear part */
+ if (tx_info->linear) {
+ dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size,
+ skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
+ data->addr = cpu_to_be64(dma);
+ data->lkey = cpu_to_be32(mdev->mr.key);
+ wmb();
+ data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
+ }
+ tx_info->inl = 0;
+ } else {
+ build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
+ tx_info->inl = 1;
+ }
+
+ ring->prod += nr_txbb;
+
+ /* If we used a bounce buffer then copy descriptor back into place */
+ if (bounce)
+ tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
+
+ /* Run destructor before passing skb to HW */
+ if (likely(!skb_shared(skb)))
+ skb_orphan(skb);
+
+ if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
+ *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn;
+ op_own |= htonl((bf_index & 0xffff) << 8);
+ /* Ensure new descirptor hits memory
+ * before setting ownership of this descriptor to HW */
+ wmb();
+ tx_desc->ctrl.owner_opcode = op_own;
+
+ wmb();
+
+ mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl,
+ desc_size);
+
+ wmb();
+
+ ring->bf.offset ^= ring->bf.buf_size;
+ } else {
+ /* Ensure new descirptor hits memory
+ * before setting ownership of this descriptor to HW */
+ wmb();
+ tx_desc->ctrl.owner_opcode = op_own;
+ wmb();
+ writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
+ }
+
+ /* Poll CQ here */
+ mlx4_en_xmit_poll(priv, tx_ind);
+
+ return NETDEV_TX_OK;
+
+tx_drop:
+ dev_kfree_skb_any(skb);
+ priv->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
new file mode 100644
index 000000000000..1ad1f6029af8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -0,0 +1,842 @@
+/*
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+#include "fw.h"
+
+enum {
+ MLX4_IRQNAME_SIZE = 32
+};
+
+enum {
+ MLX4_NUM_ASYNC_EQE = 0x100,
+ MLX4_NUM_SPARE_EQE = 0x80,
+ MLX4_EQ_ENTRY_SIZE = 0x20
+};
+
+/*
+ * Must be packed because start is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_eq_context {
+ __be32 flags;
+ u16 reserved1[3];
+ __be16 page_offset;
+ u8 log_eq_size;
+ u8 reserved2[4];
+ u8 eq_period;
+ u8 reserved3;
+ u8 eq_max_count;
+ u8 reserved4[3];
+ u8 intr;
+ u8 log_page_size;
+ u8 reserved5[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ u32 reserved6[2];
+ __be32 consumer_index;
+ __be32 producer_index;
+ u32 reserved7[4];
+};
+
+#define MLX4_EQ_STATUS_OK ( 0 << 28)
+#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
+#define MLX4_EQ_OWNER_SW ( 0 << 24)
+#define MLX4_EQ_OWNER_HW ( 1 << 24)
+#define MLX4_EQ_FLAG_EC ( 1 << 18)
+#define MLX4_EQ_FLAG_OI ( 1 << 17)
+#define MLX4_EQ_STATE_ARMED ( 9 << 8)
+#define MLX4_EQ_STATE_FIRED (10 << 8)
+#define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
+
+#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
+ (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
+ (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
+ (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
+ (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
+ (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
+ (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
+ (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
+ (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
+ (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
+ (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
+ (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
+ (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
+ (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
+ (1ull << MLX4_EVENT_TYPE_CMD))
+
+struct mlx4_eqe {
+ u8 reserved1;
+ u8 type;
+ u8 reserved2;
+ u8 subtype;
+ union {
+ u32 raw[6];
+ struct {
+ __be32 cqn;
+ } __packed comp;
+ struct {
+ u16 reserved1;
+ __be16 token;
+ u32 reserved2;
+ u8 reserved3[3];
+ u8 status;
+ __be64 out_param;
+ } __packed cmd;
+ struct {
+ __be32 qpn;
+ } __packed qp;
+ struct {
+ __be32 srqn;
+ } __packed srq;
+ struct {
+ __be32 cqn;
+ u32 reserved1;
+ u8 reserved2[3];
+ u8 syndrome;
+ } __packed cq_err;
+ struct {
+ u32 reserved1[2];
+ __be32 port;
+ } __packed port_change;
+ } event;
+ u8 reserved3[3];
+ u8 owner;
+} __packed;
+
+static void eq_set_ci(struct mlx4_eq *eq, int req_not)
+{
+ __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
+ req_not << 31),
+ eq->doorbell);
+ /* We still want ordering, just not swabbing, so add a barrier */
+ mb();
+}
+
+static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
+{
+ unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
+ return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
+}
+
+static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
+{
+ struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
+ return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
+}
+
+static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
+{
+ struct mlx4_eqe *eqe;
+ int cqn;
+ int eqes_found = 0;
+ int set_ci = 0;
+ int port;
+
+ while ((eqe = next_eqe_sw(eq))) {
+ /*
+ * Make sure we read EQ entry contents after we've
+ * checked the ownership bit.
+ */
+ rmb();
+
+ switch (eqe->type) {
+ case MLX4_EVENT_TYPE_COMP:
+ cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
+ mlx4_cq_completion(dev, cqn);
+ break;
+
+ case MLX4_EVENT_TYPE_PATH_MIG:
+ case MLX4_EVENT_TYPE_COMM_EST:
+ case MLX4_EVENT_TYPE_SQ_DRAINED:
+ case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
+ case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
+ case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
+ case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
+ mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
+ eqe->type);
+ break;
+
+ case MLX4_EVENT_TYPE_SRQ_LIMIT:
+ case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
+ mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
+ eqe->type);
+ break;
+
+ case MLX4_EVENT_TYPE_CMD:
+ mlx4_cmd_event(dev,
+ be16_to_cpu(eqe->event.cmd.token),
+ eqe->event.cmd.status,
+ be64_to_cpu(eqe->event.cmd.out_param));
+ break;
+
+ case MLX4_EVENT_TYPE_PORT_CHANGE:
+ port = be32_to_cpu(eqe->event.port_change.port) >> 28;
+ if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
+ mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
+ port);
+ mlx4_priv(dev)->sense.do_sense_port[port] = 1;
+ } else {
+ mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
+ port);
+ mlx4_priv(dev)->sense.do_sense_port[port] = 0;
+ }
+ break;
+
+ case MLX4_EVENT_TYPE_CQ_ERROR:
+ mlx4_warn(dev, "CQ %s on CQN %06x\n",
+ eqe->event.cq_err.syndrome == 1 ?
+ "overrun" : "access violation",
+ be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
+ mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
+ eqe->type);
+ break;
+
+ case MLX4_EVENT_TYPE_EQ_OVERFLOW:
+ mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
+ break;
+
+ case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
+ case MLX4_EVENT_TYPE_ECC_DETECT:
+ default:
+ mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
+ eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
+ break;
+ }
+
+ ++eq->cons_index;
+ eqes_found = 1;
+ ++set_ci;
+
+ /*
+ * The HCA will think the queue has overflowed if we
+ * don't tell it we've been processing events. We
+ * create our EQs with MLX4_NUM_SPARE_EQE extra
+ * entries, so we must update our consumer index at
+ * least that often.
+ */
+ if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
+ eq_set_ci(eq, 0);
+ set_ci = 0;
+ }
+ }
+
+ eq_set_ci(eq, 1);
+
+ return eqes_found;
+}
+
+static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
+{
+ struct mlx4_dev *dev = dev_ptr;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int work = 0;
+ int i;
+
+ writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
+
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
+ work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
+
+ return IRQ_RETVAL(work);
+}
+
+static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
+{
+ struct mlx4_eq *eq = eq_ptr;
+ struct mlx4_dev *dev = eq->dev;
+
+ mlx4_eq_int(dev, eq);
+
+ /* MSI-X vectors always belong to us */
+ return IRQ_HANDLED;
+}
+
+static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
+ int eq_num)
+{
+ return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
+ 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
+}
+
+static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ int eq_num)
+{
+ return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ int eq_num)
+{
+ return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_num_eq_uar(struct mlx4_dev *dev)
+{
+ /*
+ * Each UAR holds 4 EQ doorbells. To figure out how many UARs
+ * we need to map, take the difference of highest index and
+ * the lowest index we'll use and add 1.
+ */
+ return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
+ dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
+}
+
+static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int index;
+
+ index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
+
+ if (!priv->eq_table.uar_map[index]) {
+ priv->eq_table.uar_map[index] =
+ ioremap(pci_resource_start(dev->pdev, 2) +
+ ((eq->eqn / 4) << PAGE_SHIFT),
+ PAGE_SIZE);
+ if (!priv->eq_table.uar_map[index]) {
+ mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
+ eq->eqn);
+ return NULL;
+ }
+ }
+
+ return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
+}
+
+static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
+ u8 intr, struct mlx4_eq *eq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_eq_context *eq_context;
+ int npages;
+ u64 *dma_list = NULL;
+ dma_addr_t t;
+ u64 mtt_addr;
+ int err = -ENOMEM;
+ int i;
+
+ eq->dev = dev;
+ eq->nent = roundup_pow_of_two(max(nent, 2));
+ npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
+
+ eq->page_list = kmalloc(npages * sizeof *eq->page_list,
+ GFP_KERNEL);
+ if (!eq->page_list)
+ goto err_out;
+
+ for (i = 0; i < npages; ++i)
+ eq->page_list[i].buf = NULL;
+
+ dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
+ if (!dma_list)
+ goto err_out_free;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ goto err_out_free;
+ eq_context = mailbox->buf;
+
+ for (i = 0; i < npages; ++i) {
+ eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
+ PAGE_SIZE, &t, GFP_KERNEL);
+ if (!eq->page_list[i].buf)
+ goto err_out_free_pages;
+
+ dma_list[i] = t;
+ eq->page_list[i].map = t;
+
+ memset(eq->page_list[i].buf, 0, PAGE_SIZE);
+ }
+
+ eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
+ if (eq->eqn == -1)
+ goto err_out_free_pages;
+
+ eq->doorbell = mlx4_get_eq_uar(dev, eq);
+ if (!eq->doorbell) {
+ err = -ENOMEM;
+ goto err_out_free_eq;
+ }
+
+ err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
+ if (err)
+ goto err_out_free_eq;
+
+ err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
+ if (err)
+ goto err_out_free_mtt;
+
+ memset(eq_context, 0, sizeof *eq_context);
+ eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
+ MLX4_EQ_STATE_ARMED);
+ eq_context->log_eq_size = ilog2(eq->nent);
+ eq_context->intr = intr;
+ eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
+
+ mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
+ eq_context->mtt_base_addr_h = mtt_addr >> 32;
+ eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
+
+ err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
+ if (err) {
+ mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
+ goto err_out_free_mtt;
+ }
+
+ kfree(dma_list);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ eq->cons_index = 0;
+
+ return err;
+
+err_out_free_mtt:
+ mlx4_mtt_cleanup(dev, &eq->mtt);
+
+err_out_free_eq:
+ mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
+
+err_out_free_pages:
+ for (i = 0; i < npages; ++i)
+ if (eq->page_list[i].buf)
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ eq->page_list[i].buf,
+ eq->page_list[i].map);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+err_out_free:
+ kfree(eq->page_list);
+ kfree(dma_list);
+
+err_out:
+ return err;
+}
+
+static void mlx4_free_eq(struct mlx4_dev *dev,
+ struct mlx4_eq *eq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+ int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
+ int i;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return;
+
+ err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
+ if (err)
+ mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
+
+ if (0) {
+ mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
+ for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
+ if (i % 4 == 0)
+ pr_cont("[%02x] ", i * 4);
+ pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4));
+ if ((i + 1) % 4 == 0)
+ pr_cont("\n");
+ }
+ }
+
+ mlx4_mtt_cleanup(dev, &eq->mtt);
+ for (i = 0; i < npages; ++i)
+ pci_free_consistent(dev->pdev, PAGE_SIZE,
+ eq->page_list[i].buf,
+ eq->page_list[i].map);
+
+ kfree(eq->page_list);
+ mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+}
+
+static void mlx4_free_irqs(struct mlx4_dev *dev)
+{
+ struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i, vec;
+
+ if (eq_table->have_irq)
+ free_irq(dev->pdev->irq, dev);
+
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
+ if (eq_table->eq[i].have_irq) {
+ free_irq(eq_table->eq[i].irq, eq_table->eq + i);
+ eq_table->eq[i].have_irq = 0;
+ }
+
+ for (i = 0; i < dev->caps.comp_pool; i++) {
+ /*
+ * Freeing the assigned irq's
+ * all bits should be 0, but we need to validate
+ */
+ if (priv->msix_ctl.pool_bm & 1ULL << i) {
+ /* NO need protecting*/
+ vec = dev->caps.num_comp_vectors + 1 + i;
+ free_irq(priv->eq_table.eq[vec].irq,
+ &priv->eq_table.eq[vec]);
+ }
+ }
+
+
+ kfree(eq_table->irq_names);
+}
+
+static int mlx4_map_clr_int(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
+ priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
+ if (!priv->clr_base) {
+ mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ iounmap(priv->clr_base);
+}
+
+int mlx4_alloc_eq_table(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
+ sizeof *priv->eq_table.eq, GFP_KERNEL);
+ if (!priv->eq_table.eq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx4_free_eq_table(struct mlx4_dev *dev)
+{
+ kfree(mlx4_priv(dev)->eq_table.eq);
+}
+
+int mlx4_init_eq_table(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err;
+ int i;
+
+ priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map,
+ mlx4_num_eq_uar(dev), GFP_KERNEL);
+ if (!priv->eq_table.uar_map) {
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+
+ err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
+ dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
+ if (err)
+ goto err_out_free;
+
+ for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
+ priv->eq_table.uar_map[i] = NULL;
+
+ err = mlx4_map_clr_int(dev);
+ if (err)
+ goto err_out_bitmap;
+
+ priv->eq_table.clr_mask =
+ swab32(1 << (priv->eq_table.inta_pin & 31));
+ priv->eq_table.clr_int = priv->clr_base +
+ (priv->eq_table.inta_pin < 32 ? 4 : 0);
+
+ priv->eq_table.irq_names =
+ kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
+ dev->caps.comp_pool),
+ GFP_KERNEL);
+ if (!priv->eq_table.irq_names) {
+ err = -ENOMEM;
+ goto err_out_bitmap;
+ }
+
+ for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
+ err = mlx4_create_eq(dev, dev->caps.num_cqs -
+ dev->caps.reserved_cqs +
+ MLX4_NUM_SPARE_EQE,
+ (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
+ &priv->eq_table.eq[i]);
+ if (err) {
+ --i;
+ goto err_out_unmap;
+ }
+ }
+
+ err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
+ (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
+ &priv->eq_table.eq[dev->caps.num_comp_vectors]);
+ if (err)
+ goto err_out_comp;
+
+ /*if additional completion vectors poolsize is 0 this loop will not run*/
+ for (i = dev->caps.num_comp_vectors + 1;
+ i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
+
+ err = mlx4_create_eq(dev, dev->caps.num_cqs -
+ dev->caps.reserved_cqs +
+ MLX4_NUM_SPARE_EQE,
+ (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
+ &priv->eq_table.eq[i]);
+ if (err) {
+ --i;
+ goto err_out_unmap;
+ }
+ }
+
+
+ if (dev->flags & MLX4_FLAG_MSI_X) {
+ const char *eq_name;
+
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
+ if (i < dev->caps.num_comp_vectors) {
+ snprintf(priv->eq_table.irq_names +
+ i * MLX4_IRQNAME_SIZE,
+ MLX4_IRQNAME_SIZE,
+ "mlx4-comp-%d@pci:%s", i,
+ pci_name(dev->pdev));
+ } else {
+ snprintf(priv->eq_table.irq_names +
+ i * MLX4_IRQNAME_SIZE,
+ MLX4_IRQNAME_SIZE,
+ "mlx4-async@pci:%s",
+ pci_name(dev->pdev));
+ }
+
+ eq_name = priv->eq_table.irq_names +
+ i * MLX4_IRQNAME_SIZE;
+ err = request_irq(priv->eq_table.eq[i].irq,
+ mlx4_msi_x_interrupt, 0, eq_name,
+ priv->eq_table.eq + i);
+ if (err)
+ goto err_out_async;
+
+ priv->eq_table.eq[i].have_irq = 1;
+ }
+ } else {
+ snprintf(priv->eq_table.irq_names,
+ MLX4_IRQNAME_SIZE,
+ DRV_NAME "@pci:%s",
+ pci_name(dev->pdev));
+ err = request_irq(dev->pdev->irq, mlx4_interrupt,
+ IRQF_SHARED, priv->eq_table.irq_names, dev);
+ if (err)
+ goto err_out_async;
+
+ priv->eq_table.have_irq = 1;
+ }
+
+ err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
+ priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+ if (err)
+ mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
+ priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
+
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
+ eq_set_ci(&priv->eq_table.eq[i], 1);
+
+ return 0;
+
+err_out_async:
+ mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
+
+err_out_comp:
+ i = dev->caps.num_comp_vectors - 1;
+
+err_out_unmap:
+ while (i >= 0) {
+ mlx4_free_eq(dev, &priv->eq_table.eq[i]);
+ --i;
+ }
+ mlx4_unmap_clr_int(dev);
+ mlx4_free_irqs(dev);
+
+err_out_bitmap:
+ mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
+
+err_out_free:
+ kfree(priv->eq_table.uar_map);
+
+ return err;
+}
+
+void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i;
+
+ mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
+ priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+
+ mlx4_free_irqs(dev);
+
+ for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
+ mlx4_free_eq(dev, &priv->eq_table.eq[i]);
+
+ mlx4_unmap_clr_int(dev);
+
+ for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
+ if (priv->eq_table.uar_map[i])
+ iounmap(priv->eq_table.uar_map[i]);
+
+ mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
+
+ kfree(priv->eq_table.uar_map);
+}
+
+/* A test that verifies that we can accept interrupts on all
+ * the irq vectors of the device.
+ * Interrupts are checked using the NOP command.
+ */
+int mlx4_test_interrupts(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i;
+ int err;
+
+ err = mlx4_NOP(dev);
+ /* When not in MSI_X, there is only one irq to check */
+ if (!(dev->flags & MLX4_FLAG_MSI_X))
+ return err;
+
+ /* A loop over all completion vectors, for each vector we will check
+ * whether it works by mapping command completions to that vector
+ * and performing a NOP command
+ */
+ for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
+ /* Temporary use polling for command completions */
+ mlx4_cmd_use_polling(dev);
+
+ /* Map the new eq to handle all asyncronous events */
+ err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
+ priv->eq_table.eq[i].eqn);
+ if (err) {
+ mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
+ mlx4_cmd_use_events(dev);
+ break;
+ }
+
+ /* Go back to using events */
+ mlx4_cmd_use_events(dev);
+ err = mlx4_NOP(dev);
+ }
+
+ /* Return to default */
+ mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
+ priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_test_interrupts);
+
+int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
+{
+
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int vec = 0, err = 0, i;
+
+ spin_lock(&priv->msix_ctl.pool_lock);
+ for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
+ if (~priv->msix_ctl.pool_bm & 1ULL << i) {
+ priv->msix_ctl.pool_bm |= 1ULL << i;
+ vec = dev->caps.num_comp_vectors + 1 + i;
+ snprintf(priv->eq_table.irq_names +
+ vec * MLX4_IRQNAME_SIZE,
+ MLX4_IRQNAME_SIZE, "%s", name);
+ err = request_irq(priv->eq_table.eq[vec].irq,
+ mlx4_msi_x_interrupt, 0,
+ &priv->eq_table.irq_names[vec<<5],
+ priv->eq_table.eq + vec);
+ if (err) {
+ /*zero out bit by fliping it*/
+ priv->msix_ctl.pool_bm ^= 1 << i;
+ vec = 0;
+ continue;
+ /*we dont want to break here*/
+ }
+ eq_set_ci(&priv->eq_table.eq[vec], 1);
+ }
+ }
+ spin_unlock(&priv->msix_ctl.pool_lock);
+
+ if (vec) {
+ *vector = vec;
+ } else {
+ *vector = 0;
+ err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
+ }
+ return err;
+}
+EXPORT_SYMBOL(mlx4_assign_eq);
+
+void mlx4_release_eq(struct mlx4_dev *dev, int vec)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ /*bm index*/
+ int i = vec - dev->caps.num_comp_vectors - 1;
+
+ if (likely(i >= 0)) {
+ /*sanity check , making sure were not trying to free irq's
+ Belonging to a legacy EQ*/
+ spin_lock(&priv->msix_ctl.pool_lock);
+ if (priv->msix_ctl.pool_bm & 1ULL << i) {
+ free_irq(priv->eq_table.eq[vec].irq,
+ &priv->eq_table.eq[vec]);
+ priv->msix_ctl.pool_bm &= ~(1ULL << i);
+ }
+ spin_unlock(&priv->msix_ctl.pool_lock);
+ }
+
+}
+EXPORT_SYMBOL(mlx4_release_eq);
+
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
new file mode 100644
index 000000000000..7eb8ba822e97
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -0,0 +1,944 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx4/cmd.h>
+#include <linux/cache.h>
+
+#include "fw.h"
+#include "icm.h"
+
+enum {
+ MLX4_COMMAND_INTERFACE_MIN_REV = 2,
+ MLX4_COMMAND_INTERFACE_MAX_REV = 3,
+ MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3,
+};
+
+extern void __buggy_use_of_MLX4_GET(void);
+extern void __buggy_use_of_MLX4_PUT(void);
+
+static int enable_qos;
+module_param(enable_qos, bool, 0444);
+MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
+
+#define MLX4_GET(dest, source, offset) \
+ do { \
+ void *__p = (char *) (source) + (offset); \
+ switch (sizeof (dest)) { \
+ case 1: (dest) = *(u8 *) __p; break; \
+ case 2: (dest) = be16_to_cpup(__p); break; \
+ case 4: (dest) = be32_to_cpup(__p); break; \
+ case 8: (dest) = be64_to_cpup(__p); break; \
+ default: __buggy_use_of_MLX4_GET(); \
+ } \
+ } while (0)
+
+#define MLX4_PUT(dest, source, offset) \
+ do { \
+ void *__d = ((char *) (dest) + (offset)); \
+ switch (sizeof(source)) { \
+ case 1: *(u8 *) __d = (source); break; \
+ case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
+ case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
+ case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
+ default: __buggy_use_of_MLX4_PUT(); \
+ } \
+ } while (0)
+
+static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
+{
+ static const char *fname[] = {
+ [ 0] = "RC transport",
+ [ 1] = "UC transport",
+ [ 2] = "UD transport",
+ [ 3] = "XRC transport",
+ [ 4] = "reliable multicast",
+ [ 5] = "FCoIB support",
+ [ 6] = "SRQ support",
+ [ 7] = "IPoIB checksum offload",
+ [ 8] = "P_Key violation counter",
+ [ 9] = "Q_Key violation counter",
+ [10] = "VMM",
+ [12] = "DPDP",
+ [15] = "Big LSO headers",
+ [16] = "MW support",
+ [17] = "APM support",
+ [18] = "Atomic ops support",
+ [19] = "Raw multicast support",
+ [20] = "Address vector port checking support",
+ [21] = "UD multicast support",
+ [24] = "Demand paging support",
+ [25] = "Router support",
+ [30] = "IBoE support",
+ [32] = "Unicast loopback support",
+ [38] = "Wake On LAN support",
+ [40] = "UDP RSS support",
+ [41] = "Unicast VEP steering support",
+ [42] = "Multicast VEP steering support",
+ [48] = "Counters support",
+ };
+ int i;
+
+ mlx4_dbg(dev, "DEV_CAP flags:\n");
+ for (i = 0; i < ARRAY_SIZE(fname); ++i)
+ if (fname[i] && (flags & (1LL << i)))
+ mlx4_dbg(dev, " %s\n", fname[i]);
+}
+
+int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *inbox;
+ int err = 0;
+
+#define MOD_STAT_CFG_IN_SIZE 0x100
+
+#define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002
+#define MOD_STAT_CFG_PG_SZ_OFFSET 0x003
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ inbox = mailbox->buf;
+
+ memset(inbox, 0, MOD_STAT_CFG_IN_SIZE);
+
+ MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
+ MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
+
+ err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
+ MLX4_CMD_TIME_CLASS_A);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *outbox;
+ u8 field;
+ u32 field32, flags, ext_flags;
+ u16 size;
+ u16 stat_rate;
+ int err;
+ int i;
+
+#define QUERY_DEV_CAP_OUT_SIZE 0x100
+#define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
+#define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
+#define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
+#define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
+#define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
+#define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
+#define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
+#define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
+#define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
+#define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
+#define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
+#define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
+#define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
+#define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
+#define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
+#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
+#define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
+#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
+#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
+#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
+#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
+#define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
+#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
+#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
+#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
+#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
+#define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
+#define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
+#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
+#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
+#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
+#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
+#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
+#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
+#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
+#define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
+#define QUERY_DEV_CAP_BF_OFFSET 0x4c
+#define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
+#define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
+#define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
+#define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
+#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
+#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
+#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
+#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
+#define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
+#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
+#define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
+#define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
+#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
+#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
+#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
+#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
+#define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
+#define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
+#define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
+#define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
+#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
+#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
+#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
+#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
+#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
+#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
+ MLX4_CMD_TIME_CLASS_A);
+ if (err)
+ goto out;
+
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
+ dev_cap->reserved_qps = 1 << (field & 0xf);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
+ dev_cap->max_qps = 1 << (field & 0x1f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
+ dev_cap->reserved_srqs = 1 << (field >> 4);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
+ dev_cap->max_srqs = 1 << (field & 0x1f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
+ dev_cap->max_cq_sz = 1 << field;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
+ dev_cap->reserved_cqs = 1 << (field & 0xf);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
+ dev_cap->max_cqs = 1 << (field & 0x1f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
+ dev_cap->max_mpts = 1 << (field & 0x3f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
+ dev_cap->reserved_eqs = field & 0xf;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
+ dev_cap->max_eqs = 1 << (field & 0xf);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
+ dev_cap->reserved_mtts = 1 << (field >> 4);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
+ dev_cap->max_mrw_sz = 1 << field;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
+ dev_cap->reserved_mrws = 1 << (field & 0xf);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
+ dev_cap->max_mtt_seg = 1 << (field & 0x3f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
+ dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
+ dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
+ field &= 0x1f;
+ if (!field)
+ dev_cap->max_gso_sz = 0;
+ else
+ dev_cap->max_gso_sz = 1 << field;
+
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
+ dev_cap->max_rdma_global = 1 << (field & 0x3f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
+ dev_cap->local_ca_ack_delay = field & 0x1f;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
+ dev_cap->num_ports = field & 0xf;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
+ dev_cap->max_msg_sz = 1 << (field & 0x1f);
+ MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
+ dev_cap->stat_rate_support = stat_rate;
+ MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
+ MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
+ dev_cap->flags = flags | (u64)ext_flags << 32;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
+ dev_cap->reserved_uars = field >> 4;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
+ dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
+ dev_cap->min_page_sz = 1 << field;
+
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
+ if (field & 0x80) {
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
+ dev_cap->bf_reg_size = 1 << (field & 0x1f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
+ if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
+ field = 3;
+ dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
+ mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
+ dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
+ } else {
+ dev_cap->bf_reg_size = 0;
+ mlx4_dbg(dev, "BlueFlame not available\n");
+ }
+
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
+ dev_cap->max_sq_sg = field;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
+ dev_cap->max_sq_desc_sz = size;
+
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
+ dev_cap->max_qp_per_mcg = 1 << field;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
+ dev_cap->reserved_mgms = field & 0xf;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
+ dev_cap->max_mcgs = 1 << field;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
+ dev_cap->reserved_pds = field >> 4;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
+ dev_cap->max_pds = 1 << (field & 0x3f);
+
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
+ dev_cap->rdmarc_entry_sz = size;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
+ dev_cap->qpc_entry_sz = size;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
+ dev_cap->aux_entry_sz = size;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
+ dev_cap->altc_entry_sz = size;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
+ dev_cap->eqc_entry_sz = size;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
+ dev_cap->cqc_entry_sz = size;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
+ dev_cap->srq_entry_sz = size;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
+ dev_cap->cmpt_entry_sz = size;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
+ dev_cap->mtt_entry_sz = size;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
+ dev_cap->dmpt_entry_sz = size;
+
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
+ dev_cap->max_srq_sz = 1 << field;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
+ dev_cap->max_qp_sz = 1 << field;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
+ dev_cap->resize_srq = field & 1;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
+ dev_cap->max_rq_sg = field;
+ MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
+ dev_cap->max_rq_desc_sz = size;
+
+ MLX4_GET(dev_cap->bmme_flags, outbox,
+ QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
+ MLX4_GET(dev_cap->reserved_lkey, outbox,
+ QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
+ MLX4_GET(dev_cap->max_icm_sz, outbox,
+ QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
+ if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
+ MLX4_GET(dev_cap->max_counters, outbox,
+ QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
+
+ if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+ for (i = 1; i <= dev_cap->num_ports; ++i) {
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
+ dev_cap->max_vl[i] = field >> 4;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
+ dev_cap->ib_mtu[i] = field >> 4;
+ dev_cap->max_port_width[i] = field & 0xf;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
+ dev_cap->max_gids[i] = 1 << (field & 0xf);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
+ dev_cap->max_pkeys[i] = 1 << (field & 0xf);
+ }
+ } else {
+#define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
+#define QUERY_PORT_MTU_OFFSET 0x01
+#define QUERY_PORT_ETH_MTU_OFFSET 0x02
+#define QUERY_PORT_WIDTH_OFFSET 0x06
+#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
+#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
+#define QUERY_PORT_MAX_VL_OFFSET 0x0b
+#define QUERY_PORT_MAC_OFFSET 0x10
+#define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
+#define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
+#define QUERY_PORT_TRANS_CODE_OFFSET 0x20
+
+ for (i = 1; i <= dev_cap->num_ports; ++i) {
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
+ MLX4_CMD_TIME_CLASS_B);
+ if (err)
+ goto out;
+
+ MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+ dev_cap->supported_port_types[i] = field & 3;
+ MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
+ dev_cap->ib_mtu[i] = field & 0xf;
+ MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
+ dev_cap->max_port_width[i] = field & 0xf;
+ MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
+ dev_cap->max_gids[i] = 1 << (field >> 4);
+ dev_cap->max_pkeys[i] = 1 << (field & 0xf);
+ MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
+ dev_cap->max_vl[i] = field & 0xf;
+ MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
+ dev_cap->log_max_macs[i] = field & 0xf;
+ dev_cap->log_max_vlans[i] = field >> 4;
+ MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
+ MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
+ MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
+ dev_cap->trans_type[i] = field32 >> 24;
+ dev_cap->vendor_oui[i] = field32 & 0xffffff;
+ MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET);
+ MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET);
+ }
+ }
+
+ mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
+ dev_cap->bmme_flags, dev_cap->reserved_lkey);
+
+ /*
+ * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
+ * we can't use any EQs whose doorbell falls on that page,
+ * even if the EQ itself isn't reserved.
+ */
+ dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
+ dev_cap->reserved_eqs);
+
+ mlx4_dbg(dev, "Max ICM size %lld MB\n",
+ (unsigned long long) dev_cap->max_icm_sz >> 20);
+ mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
+ dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
+ mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
+ dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
+ mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
+ dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
+ mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
+ dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz);
+ mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
+ dev_cap->reserved_mrws, dev_cap->reserved_mtts);
+ mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
+ dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
+ mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
+ dev_cap->max_pds, dev_cap->reserved_mgms);
+ mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
+ dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
+ mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
+ dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
+ dev_cap->max_port_width[1]);
+ mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
+ dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
+ mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
+ dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
+ mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
+ mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
+
+ dump_dev_cap_flags(dev, dev_cap->flags);
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_icm_iter iter;
+ __be64 *pages;
+ int lg;
+ int nent = 0;
+ int i;
+ int err = 0;
+ int ts = 0, tc = 0;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
+ pages = mailbox->buf;
+
+ for (mlx4_icm_first(icm, &iter);
+ !mlx4_icm_last(&iter);
+ mlx4_icm_next(&iter)) {
+ /*
+ * We have to pass pages that are aligned to their
+ * size, so find the least significant 1 in the
+ * address or size and use that as our log2 size.
+ */
+ lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
+ if (lg < MLX4_ICM_PAGE_SHIFT) {
+ mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
+ MLX4_ICM_PAGE_SIZE,
+ (unsigned long long) mlx4_icm_addr(&iter),
+ mlx4_icm_size(&iter));
+ err = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
+ if (virt != -1) {
+ pages[nent * 2] = cpu_to_be64(virt);
+ virt += 1 << lg;
+ }
+
+ pages[nent * 2 + 1] =
+ cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
+ (lg - MLX4_ICM_PAGE_SHIFT));
+ ts += 1 << (lg - 10);
+ ++tc;
+
+ if (++nent == MLX4_MAILBOX_SIZE / 16) {
+ err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
+ MLX4_CMD_TIME_CLASS_B);
+ if (err)
+ goto out;
+ nent = 0;
+ }
+ }
+ }
+
+ if (nent)
+ err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B);
+ if (err)
+ goto out;
+
+ switch (op) {
+ case MLX4_CMD_MAP_FA:
+ mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
+ break;
+ case MLX4_CMD_MAP_ICM_AUX:
+ mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
+ break;
+ case MLX4_CMD_MAP_ICM:
+ mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
+ tc, ts, (unsigned long long) virt - (ts << 10));
+ break;
+ }
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
+{
+ return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
+}
+
+int mlx4_UNMAP_FA(struct mlx4_dev *dev)
+{
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B);
+}
+
+
+int mlx4_RUN_FW(struct mlx4_dev *dev)
+{
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A);
+}
+
+int mlx4_QUERY_FW(struct mlx4_dev *dev)
+{
+ struct mlx4_fw *fw = &mlx4_priv(dev)->fw;
+ struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *outbox;
+ int err = 0;
+ u64 fw_ver;
+ u16 cmd_if_rev;
+ u8 lg;
+
+#define QUERY_FW_OUT_SIZE 0x100
+#define QUERY_FW_VER_OFFSET 0x00
+#define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
+#define QUERY_FW_MAX_CMD_OFFSET 0x0f
+#define QUERY_FW_ERR_START_OFFSET 0x30
+#define QUERY_FW_ERR_SIZE_OFFSET 0x38
+#define QUERY_FW_ERR_BAR_OFFSET 0x3c
+
+#define QUERY_FW_SIZE_OFFSET 0x00
+#define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
+#define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
+ MLX4_CMD_TIME_CLASS_A);
+ if (err)
+ goto out;
+
+ MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
+ /*
+ * FW subminor version is at more significant bits than minor
+ * version, so swap here.
+ */
+ dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
+ ((fw_ver & 0xffff0000ull) >> 16) |
+ ((fw_ver & 0x0000ffffull) << 16);
+
+ MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
+ if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
+ cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
+ mlx4_err(dev, "Installed FW has unsupported "
+ "command interface revision %d.\n",
+ cmd_if_rev);
+ mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
+ (int) (dev->caps.fw_ver >> 32),
+ (int) (dev->caps.fw_ver >> 16) & 0xffff,
+ (int) dev->caps.fw_ver & 0xffff);
+ mlx4_err(dev, "This driver version supports only revisions %d to %d.\n",
+ MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
+ dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
+
+ MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
+ cmd->max_cmds = 1 << lg;
+
+ mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
+ (int) (dev->caps.fw_ver >> 32),
+ (int) (dev->caps.fw_ver >> 16) & 0xffff,
+ (int) dev->caps.fw_ver & 0xffff,
+ cmd_if_rev, cmd->max_cmds);
+
+ MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
+ MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
+ MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET);
+ fw->catas_bar = (fw->catas_bar >> 6) * 2;
+
+ mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
+ (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
+
+ MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
+ MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
+ MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
+ fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
+
+ mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
+
+ /*
+ * Round up number of system pages needed in case
+ * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
+ */
+ fw->fw_pages =
+ ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
+ (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
+
+ mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
+ (unsigned long long) fw->clr_int_base, fw->clr_int_bar);
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+static void get_board_id(void *vsd, char *board_id)
+{
+ int i;
+
+#define VSD_OFFSET_SIG1 0x00
+#define VSD_OFFSET_SIG2 0xde
+#define VSD_OFFSET_MLX_BOARD_ID 0xd0
+#define VSD_OFFSET_TS_BOARD_ID 0x20
+
+#define VSD_SIGNATURE_TOPSPIN 0x5ad
+
+ memset(board_id, 0, MLX4_BOARD_ID_LEN);
+
+ if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
+ be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
+ strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
+ } else {
+ /*
+ * The board ID is a string but the firmware byte
+ * swaps each 4-byte word before passing it back to
+ * us. Therefore we need to swab it before printing.
+ */
+ for (i = 0; i < 4; ++i)
+ ((u32 *) board_id)[i] =
+ swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
+ }
+}
+
+int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *outbox;
+ int err;
+
+#define QUERY_ADAPTER_OUT_SIZE 0x100
+#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
+#define QUERY_ADAPTER_VSD_OFFSET 0x20
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
+ MLX4_CMD_TIME_CLASS_A);
+ if (err)
+ goto out;
+
+ MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
+
+ get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
+ adapter->board_id);
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ __be32 *inbox;
+ int err;
+
+#define INIT_HCA_IN_SIZE 0x200
+#define INIT_HCA_VERSION_OFFSET 0x000
+#define INIT_HCA_VERSION 2
+#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
+#define INIT_HCA_FLAGS_OFFSET 0x014
+#define INIT_HCA_QPC_OFFSET 0x020
+#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
+#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
+#define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
+#define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
+#define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
+#define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
+#define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
+#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
+#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
+#define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
+#define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
+#define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
+#define INIT_HCA_MCAST_OFFSET 0x0c0
+#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
+#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
+#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
+#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
+#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
+#define INIT_HCA_TPT_OFFSET 0x0f0
+#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
+#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
+#define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
+#define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
+#define INIT_HCA_UAR_OFFSET 0x120
+#define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
+#define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ inbox = mailbox->buf;
+
+ memset(inbox, 0, INIT_HCA_IN_SIZE);
+
+ *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
+
+ *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
+ (ilog2(cache_line_size()) - 4) << 5;
+
+#if defined(__LITTLE_ENDIAN)
+ *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
+#elif defined(__BIG_ENDIAN)
+ *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
+#else
+#error Host endianness not defined
+#endif
+ /* Check port for UD address vector: */
+ *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
+
+ /* Enable IPoIB checksumming if we can: */
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
+ *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
+
+ /* Enable QoS support if module parameter set */
+ if (enable_qos)
+ *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
+
+ /* enable counters */
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
+ *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
+
+ /* QPC/EEC/CQC/EQC/RDMARC attributes */
+
+ MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET);
+ MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
+ MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET);
+ MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET);
+ MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET);
+ MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET);
+ MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
+
+ /* multicast attributes */
+
+ MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
+ MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+
+ /* TPT attributes */
+
+ MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
+ MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
+ MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
+
+ /* UAR attributes */
+
+ MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET);
+ MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
+
+ err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000);
+
+ if (err)
+ mlx4_err(dev, "INIT_HCA returns %d\n", err);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *inbox;
+ int err;
+ u32 flags;
+ u16 field;
+
+ if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+#define INIT_PORT_IN_SIZE 256
+#define INIT_PORT_FLAGS_OFFSET 0x00
+#define INIT_PORT_FLAG_SIG (1 << 18)
+#define INIT_PORT_FLAG_NG (1 << 17)
+#define INIT_PORT_FLAG_G0 (1 << 16)
+#define INIT_PORT_VL_SHIFT 4
+#define INIT_PORT_PORT_WIDTH_SHIFT 8
+#define INIT_PORT_MTU_OFFSET 0x04
+#define INIT_PORT_MAX_GID_OFFSET 0x06
+#define INIT_PORT_MAX_PKEY_OFFSET 0x0a
+#define INIT_PORT_GUID0_OFFSET 0x10
+#define INIT_PORT_NODE_GUID_OFFSET 0x18
+#define INIT_PORT_SI_GUID_OFFSET 0x20
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ inbox = mailbox->buf;
+
+ memset(inbox, 0, INIT_PORT_IN_SIZE);
+
+ flags = 0;
+ flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
+ flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
+ MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
+
+ field = 128 << dev->caps.ib_mtu_cap[port];
+ MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
+ field = dev->caps.gid_table_len[port];
+ MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
+ field = dev->caps.pkey_table_len[port];
+ MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
+
+ err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
+ MLX4_CMD_TIME_CLASS_A);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ } else
+ err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
+ MLX4_CMD_TIME_CLASS_A);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
+
+int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
+{
+ return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000);
+}
+EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
+
+int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
+{
+ return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000);
+}
+
+int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
+{
+ int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
+ MLX4_CMD_SET_ICM_SIZE,
+ MLX4_CMD_TIME_CLASS_A);
+ if (ret)
+ return ret;
+
+ /*
+ * Round up number of system pages needed in case
+ * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
+ */
+ *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
+ (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
+
+ return 0;
+}
+
+int mlx4_NOP(struct mlx4_dev *dev)
+{
+ /* Input modifier of 0x1f means "finish as soon as possible." */
+ return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
+}
+
+#define MLX4_WOL_SETUP_MODE (5 << 28)
+int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
+{
+ u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
+
+ return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
+ MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A);
+}
+EXPORT_SYMBOL_GPL(mlx4_wol_read);
+
+int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
+{
+ u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
+
+ return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
+ MLX4_CMD_TIME_CLASS_A);
+}
+EXPORT_SYMBOL_GPL(mlx4_wol_write);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
new file mode 100644
index 000000000000..1e8ecc3708e2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_FW_H
+#define MLX4_FW_H
+
+#include "mlx4.h"
+#include "icm.h"
+
+struct mlx4_mod_stat_cfg {
+ u8 log_pg_sz;
+ u8 log_pg_sz_m;
+};
+
+struct mlx4_dev_cap {
+ int max_srq_sz;
+ int max_qp_sz;
+ int reserved_qps;
+ int max_qps;
+ int reserved_srqs;
+ int max_srqs;
+ int max_cq_sz;
+ int reserved_cqs;
+ int max_cqs;
+ int max_mpts;
+ int reserved_eqs;
+ int max_eqs;
+ int reserved_mtts;
+ int max_mrw_sz;
+ int reserved_mrws;
+ int max_mtt_seg;
+ int max_requester_per_qp;
+ int max_responder_per_qp;
+ int max_rdma_global;
+ int local_ca_ack_delay;
+ int num_ports;
+ u32 max_msg_sz;
+ int ib_mtu[MLX4_MAX_PORTS + 1];
+ int max_port_width[MLX4_MAX_PORTS + 1];
+ int max_vl[MLX4_MAX_PORTS + 1];
+ int max_gids[MLX4_MAX_PORTS + 1];
+ int max_pkeys[MLX4_MAX_PORTS + 1];
+ u64 def_mac[MLX4_MAX_PORTS + 1];
+ u16 eth_mtu[MLX4_MAX_PORTS + 1];
+ int trans_type[MLX4_MAX_PORTS + 1];
+ int vendor_oui[MLX4_MAX_PORTS + 1];
+ u16 wavelength[MLX4_MAX_PORTS + 1];
+ u64 trans_code[MLX4_MAX_PORTS + 1];
+ u16 stat_rate_support;
+ u64 flags;
+ int reserved_uars;
+ int uar_size;
+ int min_page_sz;
+ int bf_reg_size;
+ int bf_regs_per_page;
+ int max_sq_sg;
+ int max_sq_desc_sz;
+ int max_rq_sg;
+ int max_rq_desc_sz;
+ int max_qp_per_mcg;
+ int reserved_mgms;
+ int max_mcgs;
+ int reserved_pds;
+ int max_pds;
+ int qpc_entry_sz;
+ int rdmarc_entry_sz;
+ int altc_entry_sz;
+ int aux_entry_sz;
+ int srq_entry_sz;
+ int cqc_entry_sz;
+ int eqc_entry_sz;
+ int dmpt_entry_sz;
+ int cmpt_entry_sz;
+ int mtt_entry_sz;
+ int resize_srq;
+ u32 bmme_flags;
+ u32 reserved_lkey;
+ u64 max_icm_sz;
+ int max_gso_sz;
+ u8 supported_port_types[MLX4_MAX_PORTS + 1];
+ u8 log_max_macs[MLX4_MAX_PORTS + 1];
+ u8 log_max_vlans[MLX4_MAX_PORTS + 1];
+ u32 max_counters;
+};
+
+struct mlx4_adapter {
+ char board_id[MLX4_BOARD_ID_LEN];
+ u8 inta_pin;
+};
+
+struct mlx4_init_hca_param {
+ u64 qpc_base;
+ u64 rdmarc_base;
+ u64 auxc_base;
+ u64 altc_base;
+ u64 srqc_base;
+ u64 cqc_base;
+ u64 eqc_base;
+ u64 mc_base;
+ u64 dmpt_base;
+ u64 cmpt_base;
+ u64 mtt_base;
+ u16 log_mc_entry_sz;
+ u16 log_mc_hash_sz;
+ u8 log_num_qps;
+ u8 log_num_srqs;
+ u8 log_num_cqs;
+ u8 log_num_eqs;
+ u8 log_rd_per_qp;
+ u8 log_mc_table_sz;
+ u8 log_mpt_sz;
+ u8 log_uar_sz;
+};
+
+struct mlx4_init_ib_param {
+ int port_width;
+ int vl_cap;
+ int mtu_cap;
+ u16 gid_cap;
+ u16 pkey_cap;
+ int set_guid0;
+ u64 guid0;
+ int set_node_guid;
+ u64 node_guid;
+ int set_si_guid;
+ u64 si_guid;
+};
+
+struct mlx4_set_ib_param {
+ int set_si_guid;
+ int reset_qkey_viol;
+ u64 si_guid;
+ u32 cap_mask;
+};
+
+int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
+int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
+int mlx4_UNMAP_FA(struct mlx4_dev *dev);
+int mlx4_RUN_FW(struct mlx4_dev *dev);
+int mlx4_QUERY_FW(struct mlx4_dev *dev);
+int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter);
+int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
+int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic);
+int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt);
+int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages);
+int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
+int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
+int mlx4_NOP(struct mlx4_dev *dev);
+int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg);
+
+#endif /* MLX4_FW_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
new file mode 100644
index 000000000000..02393fdf44c1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+#include "icm.h"
+#include "fw.h"
+
+/*
+ * We allocate in as big chunks as we can, up to a maximum of 256 KB
+ * per chunk.
+ */
+enum {
+ MLX4_ICM_ALLOC_SIZE = 1 << 18,
+ MLX4_TABLE_CHUNK_SIZE = 1 << 18
+};
+
+static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
+{
+ int i;
+
+ if (chunk->nsg > 0)
+ pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
+ PCI_DMA_BIDIRECTIONAL);
+
+ for (i = 0; i < chunk->npages; ++i)
+ __free_pages(sg_page(&chunk->mem[i]),
+ get_order(chunk->mem[i].length));
+}
+
+static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
+{
+ int i;
+
+ for (i = 0; i < chunk->npages; ++i)
+ dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
+ lowmem_page_address(sg_page(&chunk->mem[i])),
+ sg_dma_address(&chunk->mem[i]));
+}
+
+void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
+{
+ struct mlx4_icm_chunk *chunk, *tmp;
+
+ if (!icm)
+ return;
+
+ list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
+ if (coherent)
+ mlx4_free_icm_coherent(dev, chunk);
+ else
+ mlx4_free_icm_pages(dev, chunk);
+
+ kfree(chunk);
+ }
+
+ kfree(icm);
+}
+
+static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
+{
+ struct page *page;
+
+ page = alloc_pages(gfp_mask, order);
+ if (!page)
+ return -ENOMEM;
+
+ sg_set_page(mem, page, PAGE_SIZE << order, 0);
+ return 0;
+}
+
+static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
+ int order, gfp_t gfp_mask)
+{
+ void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
+ &sg_dma_address(mem), gfp_mask);
+ if (!buf)
+ return -ENOMEM;
+
+ sg_set_buf(mem, buf, PAGE_SIZE << order);
+ BUG_ON(mem->offset);
+ sg_dma_len(mem) = PAGE_SIZE << order;
+ return 0;
+}
+
+struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
+ gfp_t gfp_mask, int coherent)
+{
+ struct mlx4_icm *icm;
+ struct mlx4_icm_chunk *chunk = NULL;
+ int cur_order;
+ int ret;
+
+ /* We use sg_set_buf for coherent allocs, which assumes low memory */
+ BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
+
+ icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
+ if (!icm)
+ return NULL;
+
+ icm->refcount = 0;
+ INIT_LIST_HEAD(&icm->chunk_list);
+
+ cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
+
+ while (npages > 0) {
+ if (!chunk) {
+ chunk = kmalloc(sizeof *chunk,
+ gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
+ if (!chunk)
+ goto fail;
+
+ sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
+ chunk->npages = 0;
+ chunk->nsg = 0;
+ list_add_tail(&chunk->list, &icm->chunk_list);
+ }
+
+ while (1 << cur_order > npages)
+ --cur_order;
+
+ if (coherent)
+ ret = mlx4_alloc_icm_coherent(&dev->pdev->dev,
+ &chunk->mem[chunk->npages],
+ cur_order, gfp_mask);
+ else
+ ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
+ cur_order, gfp_mask);
+
+ if (ret) {
+ if (--cur_order < 0)
+ goto fail;
+ else
+ continue;
+ }
+
+ ++chunk->npages;
+
+ if (coherent)
+ ++chunk->nsg;
+ else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
+ chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
+ chunk->npages,
+ PCI_DMA_BIDIRECTIONAL);
+
+ if (chunk->nsg <= 0)
+ goto fail;
+ }
+
+ if (chunk->npages == MLX4_ICM_CHUNK_LEN)
+ chunk = NULL;
+
+ npages -= 1 << cur_order;
+ }
+
+ if (!coherent && chunk) {
+ chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
+ chunk->npages,
+ PCI_DMA_BIDIRECTIONAL);
+
+ if (chunk->nsg <= 0)
+ goto fail;
+ }
+
+ return icm;
+
+fail:
+ mlx4_free_icm(dev, icm, coherent);
+ return NULL;
+}
+
+static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
+{
+ return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
+}
+
+static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
+{
+ return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
+ MLX4_CMD_TIME_CLASS_B);
+}
+
+int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
+{
+ return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
+}
+
+int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
+{
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B);
+}
+
+int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
+{
+ int i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
+ int ret = 0;
+
+ mutex_lock(&table->mutex);
+
+ if (table->icm[i]) {
+ ++table->icm[i]->refcount;
+ goto out;
+ }
+
+ table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
+ (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
+ __GFP_NOWARN, table->coherent);
+ if (!table->icm[i]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
+ (u64) i * MLX4_TABLE_CHUNK_SIZE)) {
+ mlx4_free_icm(dev, table->icm[i], table->coherent);
+ table->icm[i] = NULL;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ++table->icm[i]->refcount;
+
+out:
+ mutex_unlock(&table->mutex);
+ return ret;
+}
+
+void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
+{
+ int i;
+
+ i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
+
+ mutex_lock(&table->mutex);
+
+ if (--table->icm[i]->refcount == 0) {
+ mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
+ MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
+ mlx4_free_icm(dev, table->icm[i], table->coherent);
+ table->icm[i] = NULL;
+ }
+
+ mutex_unlock(&table->mutex);
+}
+
+void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle)
+{
+ int idx, offset, dma_offset, i;
+ struct mlx4_icm_chunk *chunk;
+ struct mlx4_icm *icm;
+ struct page *page = NULL;
+
+ if (!table->lowmem)
+ return NULL;
+
+ mutex_lock(&table->mutex);
+
+ idx = (obj & (table->num_obj - 1)) * table->obj_size;
+ icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
+ dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
+
+ if (!icm)
+ goto out;
+
+ list_for_each_entry(chunk, &icm->chunk_list, list) {
+ for (i = 0; i < chunk->npages; ++i) {
+ if (dma_handle && dma_offset >= 0) {
+ if (sg_dma_len(&chunk->mem[i]) > dma_offset)
+ *dma_handle = sg_dma_address(&chunk->mem[i]) +
+ dma_offset;
+ dma_offset -= sg_dma_len(&chunk->mem[i]);
+ }
+ /*
+ * DMA mapping can merge pages but not split them,
+ * so if we found the page, dma_handle has already
+ * been assigned to.
+ */
+ if (chunk->mem[i].length > offset) {
+ page = sg_page(&chunk->mem[i]);
+ goto out;
+ }
+ offset -= chunk->mem[i].length;
+ }
+ }
+
+out:
+ mutex_unlock(&table->mutex);
+ return page ? lowmem_page_address(page) + offset : NULL;
+}
+
+int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+ int start, int end)
+{
+ int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
+ int i, err;
+
+ for (i = start; i <= end; i += inc) {
+ err = mlx4_table_get(dev, table, i);
+ if (err)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ while (i > start) {
+ i -= inc;
+ mlx4_table_put(dev, table, i);
+ }
+
+ return err;
+}
+
+void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+ int start, int end)
+{
+ int i;
+
+ for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
+ mlx4_table_put(dev, table, i);
+}
+
+int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+ u64 virt, int obj_size, int nobj, int reserved,
+ int use_lowmem, int use_coherent)
+{
+ int obj_per_chunk;
+ int num_icm;
+ unsigned chunk_size;
+ int i;
+
+ obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
+ num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
+
+ table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL);
+ if (!table->icm)
+ return -ENOMEM;
+ table->virt = virt;
+ table->num_icm = num_icm;
+ table->num_obj = nobj;
+ table->obj_size = obj_size;
+ table->lowmem = use_lowmem;
+ table->coherent = use_coherent;
+ mutex_init(&table->mutex);
+
+ for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
+ chunk_size = MLX4_TABLE_CHUNK_SIZE;
+ if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size)
+ chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE);
+
+ table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
+ (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
+ __GFP_NOWARN, use_coherent);
+ if (!table->icm[i])
+ goto err;
+ if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
+ mlx4_free_icm(dev, table->icm[i], use_coherent);
+ table->icm[i] = NULL;
+ goto err;
+ }
+
+ /*
+ * Add a reference to this ICM chunk so that it never
+ * gets freed (since it contains reserved firmware objects).
+ */
+ ++table->icm[i]->refcount;
+ }
+
+ return 0;
+
+err:
+ for (i = 0; i < num_icm; ++i)
+ if (table->icm[i]) {
+ mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
+ MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
+ mlx4_free_icm(dev, table->icm[i], use_coherent);
+ }
+
+ return -ENOMEM;
+}
+
+void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
+{
+ int i;
+
+ for (i = 0; i < table->num_icm; ++i)
+ if (table->icm[i]) {
+ mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
+ MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
+ mlx4_free_icm(dev, table->icm[i], table->coherent);
+ }
+
+ kfree(table->icm);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
new file mode 100644
index 000000000000..b10c07a1dc1a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_ICM_H
+#define MLX4_ICM_H
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/mutex.h>
+
+#define MLX4_ICM_CHUNK_LEN \
+ ((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \
+ (sizeof (struct scatterlist)))
+
+enum {
+ MLX4_ICM_PAGE_SHIFT = 12,
+ MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
+};
+
+struct mlx4_icm_chunk {
+ struct list_head list;
+ int npages;
+ int nsg;
+ struct scatterlist mem[MLX4_ICM_CHUNK_LEN];
+};
+
+struct mlx4_icm {
+ struct list_head chunk_list;
+ int refcount;
+};
+
+struct mlx4_icm_iter {
+ struct mlx4_icm *icm;
+ struct mlx4_icm_chunk *chunk;
+ int page_idx;
+};
+
+struct mlx4_dev;
+
+struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
+ gfp_t gfp_mask, int coherent);
+void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent);
+
+int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
+void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
+int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+ int start, int end);
+void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+ int start, int end);
+int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+ u64 virt, int obj_size, int nobj, int reserved,
+ int use_lowmem, int use_coherent);
+void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
+int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
+void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
+void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle);
+int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+ int start, int end);
+void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+ int start, int end);
+
+static inline void mlx4_icm_first(struct mlx4_icm *icm,
+ struct mlx4_icm_iter *iter)
+{
+ iter->icm = icm;
+ iter->chunk = list_empty(&icm->chunk_list) ?
+ NULL : list_entry(icm->chunk_list.next,
+ struct mlx4_icm_chunk, list);
+ iter->page_idx = 0;
+}
+
+static inline int mlx4_icm_last(struct mlx4_icm_iter *iter)
+{
+ return !iter->chunk;
+}
+
+static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
+{
+ if (++iter->page_idx >= iter->chunk->nsg) {
+ if (iter->chunk->list.next == &iter->icm->chunk_list) {
+ iter->chunk = NULL;
+ return;
+ }
+
+ iter->chunk = list_entry(iter->chunk->list.next,
+ struct mlx4_icm_chunk, list);
+ iter->page_idx = 0;
+ }
+}
+
+static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
+{
+ return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
+}
+
+static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
+{
+ return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
+}
+
+int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
+int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
+
+#endif /* MLX4_ICM_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
new file mode 100644
index 000000000000..73c94fcdfddf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/slab.h>
+
+#include "mlx4.h"
+
+struct mlx4_device_context {
+ struct list_head list;
+ struct mlx4_interface *intf;
+ void *context;
+};
+
+static LIST_HEAD(intf_list);
+static LIST_HEAD(dev_list);
+static DEFINE_MUTEX(intf_mutex);
+
+static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
+{
+ struct mlx4_device_context *dev_ctx;
+
+ dev_ctx = kmalloc(sizeof *dev_ctx, GFP_KERNEL);
+ if (!dev_ctx)
+ return;
+
+ dev_ctx->intf = intf;
+ dev_ctx->context = intf->add(&priv->dev);
+
+ if (dev_ctx->context) {
+ spin_lock_irq(&priv->ctx_lock);
+ list_add_tail(&dev_ctx->list, &priv->ctx_list);
+ spin_unlock_irq(&priv->ctx_lock);
+ } else
+ kfree(dev_ctx);
+}
+
+static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
+{
+ struct mlx4_device_context *dev_ctx;
+
+ list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+ if (dev_ctx->intf == intf) {
+ spin_lock_irq(&priv->ctx_lock);
+ list_del(&dev_ctx->list);
+ spin_unlock_irq(&priv->ctx_lock);
+
+ intf->remove(&priv->dev, dev_ctx->context);
+ kfree(dev_ctx);
+ return;
+ }
+}
+
+int mlx4_register_interface(struct mlx4_interface *intf)
+{
+ struct mlx4_priv *priv;
+
+ if (!intf->add || !intf->remove)
+ return -EINVAL;
+
+ mutex_lock(&intf_mutex);
+
+ list_add_tail(&intf->list, &intf_list);
+ list_for_each_entry(priv, &dev_list, dev_list)
+ mlx4_add_device(intf, priv);
+
+ mutex_unlock(&intf_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_register_interface);
+
+void mlx4_unregister_interface(struct mlx4_interface *intf)
+{
+ struct mlx4_priv *priv;
+
+ mutex_lock(&intf_mutex);
+
+ list_for_each_entry(priv, &dev_list, dev_list)
+ mlx4_remove_device(intf, priv);
+
+ list_del(&intf->list);
+
+ mutex_unlock(&intf_mutex);
+}
+EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
+
+void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_device_context *dev_ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->ctx_lock, flags);
+
+ list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+ if (dev_ctx->intf->event)
+ dev_ctx->intf->event(dev, dev_ctx->context, type, port);
+
+ spin_unlock_irqrestore(&priv->ctx_lock, flags);
+}
+
+int mlx4_register_device(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_interface *intf;
+
+ mutex_lock(&intf_mutex);
+
+ list_add_tail(&priv->dev_list, &dev_list);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx4_add_device(intf, priv);
+
+ mutex_unlock(&intf_mutex);
+ mlx4_start_catas_poll(dev);
+
+ return 0;
+}
+
+void mlx4_unregister_device(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_interface *intf;
+
+ mlx4_stop_catas_poll(dev);
+ mutex_lock(&intf_mutex);
+
+ list_for_each_entry(intf, &intf_list, list)
+ mlx4_remove_device(intf, priv);
+
+ list_del(&priv->dev_list);
+
+ mutex_unlock(&intf_mutex);
+}
+
+void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_device_context *dev_ctx;
+ unsigned long flags;
+ void *result = NULL;
+
+ spin_lock_irqsave(&priv->ctx_lock, flags);
+
+ list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+ if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) {
+ result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port);
+ break;
+ }
+
+ spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
new file mode 100644
index 000000000000..f0ee35df4dd7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -0,0 +1,1529 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/io-mapping.h>
+
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/doorbell.h>
+
+#include "mlx4.h"
+#include "fw.h"
+#include "icm.h"
+
+MODULE_AUTHOR("Roland Dreier");
+MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+
+struct workqueue_struct *mlx4_wq;
+
+#ifdef CONFIG_MLX4_DEBUG
+
+int mlx4_debug_level = 0;
+module_param_named(debug_level, mlx4_debug_level, int, 0644);
+MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
+
+#endif /* CONFIG_MLX4_DEBUG */
+
+#ifdef CONFIG_PCI_MSI
+
+static int msi_x = 1;
+module_param(msi_x, int, 0444);
+MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
+
+#else /* CONFIG_PCI_MSI */
+
+#define msi_x (0)
+
+#endif /* CONFIG_PCI_MSI */
+
+static char mlx4_version[] __devinitdata =
+ DRV_NAME ": Mellanox ConnectX core driver v"
+ DRV_VERSION " (" DRV_RELDATE ")\n";
+
+static struct mlx4_profile default_profile = {
+ .num_qp = 1 << 17,
+ .num_srq = 1 << 16,
+ .rdmarc_per_qp = 1 << 4,
+ .num_cq = 1 << 16,
+ .num_mcg = 1 << 13,
+ .num_mpt = 1 << 17,
+ .num_mtt = 1 << 20,
+};
+
+static int log_num_mac = 2;
+module_param_named(log_num_mac, log_num_mac, int, 0444);
+MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
+
+static int log_num_vlan;
+module_param_named(log_num_vlan, log_num_vlan, int, 0444);
+MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
+
+static int use_prio;
+module_param_named(use_prio, use_prio, bool, 0444);
+MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
+ "(0/1, default 0)");
+
+static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
+module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
+MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
+
+int mlx4_check_port_params(struct mlx4_dev *dev,
+ enum mlx4_port_type *port_type)
+{
+ int i;
+
+ for (i = 0; i < dev->caps.num_ports - 1; i++) {
+ if (port_type[i] != port_type[i + 1]) {
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+ mlx4_err(dev, "Only same port types supported "
+ "on this HCA, aborting.\n");
+ return -EINVAL;
+ }
+ if (port_type[i] == MLX4_PORT_TYPE_ETH &&
+ port_type[i + 1] == MLX4_PORT_TYPE_IB)
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < dev->caps.num_ports; i++) {
+ if (!(port_type[i] & dev->caps.supported_type[i+1])) {
+ mlx4_err(dev, "Requested port type for port %d is not "
+ "supported on this HCA\n", i + 1);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static void mlx4_set_port_mask(struct mlx4_dev *dev)
+{
+ int i;
+
+ dev->caps.port_mask = 0;
+ for (i = 1; i <= dev->caps.num_ports; ++i)
+ if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
+ dev->caps.port_mask |= 1 << (i - 1);
+}
+
+static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
+{
+ int err;
+ int i;
+
+ err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ return err;
+ }
+
+ if (dev_cap->min_page_sz > PAGE_SIZE) {
+ mlx4_err(dev, "HCA minimum page size of %d bigger than "
+ "kernel PAGE_SIZE of %ld, aborting.\n",
+ dev_cap->min_page_sz, PAGE_SIZE);
+ return -ENODEV;
+ }
+ if (dev_cap->num_ports > MLX4_MAX_PORTS) {
+ mlx4_err(dev, "HCA has %d ports, but we only support %d, "
+ "aborting.\n",
+ dev_cap->num_ports, MLX4_MAX_PORTS);
+ return -ENODEV;
+ }
+
+ if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
+ mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
+ "PCI resource 2 size of 0x%llx, aborting.\n",
+ dev_cap->uar_size,
+ (unsigned long long) pci_resource_len(dev->pdev, 2));
+ return -ENODEV;
+ }
+
+ dev->caps.num_ports = dev_cap->num_ports;
+ for (i = 1; i <= dev->caps.num_ports; ++i) {
+ dev->caps.vl_cap[i] = dev_cap->max_vl[i];
+ dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
+ dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
+ dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
+ dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
+ dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
+ dev->caps.def_mac[i] = dev_cap->def_mac[i];
+ dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
+ dev->caps.trans_type[i] = dev_cap->trans_type[i];
+ dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
+ dev->caps.wavelength[i] = dev_cap->wavelength[i];
+ dev->caps.trans_code[i] = dev_cap->trans_code[i];
+ }
+
+ dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
+ dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
+ dev->caps.bf_reg_size = dev_cap->bf_reg_size;
+ dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
+ dev->caps.max_sq_sg = dev_cap->max_sq_sg;
+ dev->caps.max_rq_sg = dev_cap->max_rq_sg;
+ dev->caps.max_wqes = dev_cap->max_qp_sz;
+ dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
+ dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
+ dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
+ dev->caps.reserved_srqs = dev_cap->reserved_srqs;
+ dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
+ dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
+ dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM;
+ /*
+ * Subtract 1 from the limit because we need to allocate a
+ * spare CQE so the HCA HW can tell the difference between an
+ * empty CQ and a full CQ.
+ */
+ dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
+ dev->caps.reserved_cqs = dev_cap->reserved_cqs;
+ dev->caps.reserved_eqs = dev_cap->reserved_eqs;
+ dev->caps.mtts_per_seg = 1 << log_mtts_per_seg;
+ dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
+ dev->caps.mtts_per_seg);
+ dev->caps.reserved_mrws = dev_cap->reserved_mrws;
+ dev->caps.reserved_uars = dev_cap->reserved_uars;
+ dev->caps.reserved_pds = dev_cap->reserved_pds;
+ dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
+ dev->caps.max_msg_sz = dev_cap->max_msg_sz;
+ dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
+ dev->caps.flags = dev_cap->flags;
+ dev->caps.bmme_flags = dev_cap->bmme_flags;
+ dev->caps.reserved_lkey = dev_cap->reserved_lkey;
+ dev->caps.stat_rate_support = dev_cap->stat_rate_support;
+ dev->caps.max_gso_sz = dev_cap->max_gso_sz;
+
+ dev->caps.log_num_macs = log_num_mac;
+ dev->caps.log_num_vlans = log_num_vlan;
+ dev->caps.log_num_prios = use_prio ? 3 : 0;
+
+ for (i = 1; i <= dev->caps.num_ports; ++i) {
+ if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
+ else
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+ dev->caps.possible_type[i] = dev->caps.port_type[i];
+ mlx4_priv(dev)->sense.sense_allowed[i] =
+ dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
+
+ if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
+ dev->caps.log_num_macs = dev_cap->log_max_macs[i];
+ mlx4_warn(dev, "Requested number of MACs is too much "
+ "for port %d, reducing to %d.\n",
+ i, 1 << dev->caps.log_num_macs);
+ }
+ if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
+ dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
+ mlx4_warn(dev, "Requested number of VLANs is too much "
+ "for port %d, reducing to %d.\n",
+ i, 1 << dev->caps.log_num_vlans);
+ }
+ }
+
+ mlx4_set_port_mask(dev);
+
+ dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
+
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
+ (1 << dev->caps.log_num_macs) *
+ (1 << dev->caps.log_num_vlans) *
+ (1 << dev->caps.log_num_prios) *
+ dev->caps.num_ports;
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
+
+ dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
+
+ return 0;
+}
+
+/*
+ * Change the port configuration of the device.
+ * Every user of this function must hold the port mutex.
+ */
+int mlx4_change_port_types(struct mlx4_dev *dev,
+ enum mlx4_port_type *port_types)
+{
+ int err = 0;
+ int change = 0;
+ int port;
+
+ for (port = 0; port < dev->caps.num_ports; port++) {
+ /* Change the port type only if the new type is different
+ * from the current, and not set to Auto */
+ if (port_types[port] != dev->caps.port_type[port + 1]) {
+ change = 1;
+ dev->caps.port_type[port + 1] = port_types[port];
+ }
+ }
+ if (change) {
+ mlx4_unregister_device(dev);
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ mlx4_CLOSE_PORT(dev, port);
+ err = mlx4_SET_PORT(dev, port);
+ if (err) {
+ mlx4_err(dev, "Failed to set port %d, "
+ "aborting\n", port);
+ goto out;
+ }
+ }
+ mlx4_set_port_mask(dev);
+ err = mlx4_register_device(dev);
+ }
+
+out:
+ return err;
+}
+
+static ssize_t show_port_type(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
+ port_attr);
+ struct mlx4_dev *mdev = info->dev;
+ char type[8];
+
+ sprintf(type, "%s",
+ (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
+ "ib" : "eth");
+ if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
+ sprintf(buf, "auto (%s)\n", type);
+ else
+ sprintf(buf, "%s\n", type);
+
+ return strlen(buf);
+}
+
+static ssize_t set_port_type(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
+ port_attr);
+ struct mlx4_dev *mdev = info->dev;
+ struct mlx4_priv *priv = mlx4_priv(mdev);
+ enum mlx4_port_type types[MLX4_MAX_PORTS];
+ enum mlx4_port_type new_types[MLX4_MAX_PORTS];
+ int i;
+ int err = 0;
+
+ if (!strcmp(buf, "ib\n"))
+ info->tmp_type = MLX4_PORT_TYPE_IB;
+ else if (!strcmp(buf, "eth\n"))
+ info->tmp_type = MLX4_PORT_TYPE_ETH;
+ else if (!strcmp(buf, "auto\n"))
+ info->tmp_type = MLX4_PORT_TYPE_AUTO;
+ else {
+ mlx4_err(mdev, "%s is not supported port type\n", buf);
+ return -EINVAL;
+ }
+
+ mlx4_stop_sense(mdev);
+ mutex_lock(&priv->port_mutex);
+ /* Possible type is always the one that was delivered */
+ mdev->caps.possible_type[info->port] = info->tmp_type;
+
+ for (i = 0; i < mdev->caps.num_ports; i++) {
+ types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
+ mdev->caps.possible_type[i+1];
+ if (types[i] == MLX4_PORT_TYPE_AUTO)
+ types[i] = mdev->caps.port_type[i+1];
+ }
+
+ if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+ for (i = 1; i <= mdev->caps.num_ports; i++) {
+ if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
+ mdev->caps.possible_type[i] = mdev->caps.port_type[i];
+ err = -EINVAL;
+ }
+ }
+ }
+ if (err) {
+ mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
+ "Set only 'eth' or 'ib' for both ports "
+ "(should be the same)\n");
+ goto out;
+ }
+
+ mlx4_do_sense_ports(mdev, new_types, types);
+
+ err = mlx4_check_port_params(mdev, new_types);
+ if (err)
+ goto out;
+
+ /* We are about to apply the changes after the configuration
+ * was verified, no need to remember the temporary types
+ * any more */
+ for (i = 0; i < mdev->caps.num_ports; i++)
+ priv->port[i + 1].tmp_type = 0;
+
+ err = mlx4_change_port_types(mdev, new_types);
+
+out:
+ mlx4_start_sense(mdev);
+ mutex_unlock(&priv->port_mutex);
+ return err ? err : count;
+}
+
+static int mlx4_load_fw(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err;
+
+ priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
+ GFP_HIGHUSER | __GFP_NOWARN, 0);
+ if (!priv->fw.fw_icm) {
+ mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
+ return -ENOMEM;
+ }
+
+ err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
+ if (err) {
+ mlx4_err(dev, "MAP_FA command failed, aborting.\n");
+ goto err_free;
+ }
+
+ err = mlx4_RUN_FW(dev);
+ if (err) {
+ mlx4_err(dev, "RUN_FW command failed, aborting.\n");
+ goto err_unmap_fa;
+ }
+
+ return 0;
+
+err_unmap_fa:
+ mlx4_UNMAP_FA(dev);
+
+err_free:
+ mlx4_free_icm(dev, priv->fw.fw_icm, 0);
+ return err;
+}
+
+static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
+ int cmpt_entry_sz)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err;
+
+ err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
+ cmpt_base +
+ ((u64) (MLX4_CMPT_TYPE_QP *
+ cmpt_entry_sz) << MLX4_CMPT_SHIFT),
+ cmpt_entry_sz, dev->caps.num_qps,
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+ 0, 0);
+ if (err)
+ goto err;
+
+ err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
+ cmpt_base +
+ ((u64) (MLX4_CMPT_TYPE_SRQ *
+ cmpt_entry_sz) << MLX4_CMPT_SHIFT),
+ cmpt_entry_sz, dev->caps.num_srqs,
+ dev->caps.reserved_srqs, 0, 0);
+ if (err)
+ goto err_qp;
+
+ err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
+ cmpt_base +
+ ((u64) (MLX4_CMPT_TYPE_CQ *
+ cmpt_entry_sz) << MLX4_CMPT_SHIFT),
+ cmpt_entry_sz, dev->caps.num_cqs,
+ dev->caps.reserved_cqs, 0, 0);
+ if (err)
+ goto err_srq;
+
+ err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
+ cmpt_base +
+ ((u64) (MLX4_CMPT_TYPE_EQ *
+ cmpt_entry_sz) << MLX4_CMPT_SHIFT),
+ cmpt_entry_sz,
+ dev->caps.num_eqs, dev->caps.num_eqs, 0, 0);
+ if (err)
+ goto err_cq;
+
+ return 0;
+
+err_cq:
+ mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
+
+err_srq:
+ mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
+
+err_qp:
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
+
+err:
+ return err;
+}
+
+static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
+ struct mlx4_init_hca_param *init_hca, u64 icm_size)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ u64 aux_pages;
+ int err;
+
+ err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
+ if (err) {
+ mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
+ return err;
+ }
+
+ mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
+ (unsigned long long) icm_size >> 10,
+ (unsigned long long) aux_pages << 2);
+
+ priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
+ GFP_HIGHUSER | __GFP_NOWARN, 0);
+ if (!priv->fw.aux_icm) {
+ mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
+ return -ENOMEM;
+ }
+
+ err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
+ if (err) {
+ mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
+ goto err_free_aux;
+ }
+
+ err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
+ if (err) {
+ mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
+ goto err_unmap_aux;
+ }
+
+ err = mlx4_init_icm_table(dev, &priv->eq_table.table,
+ init_hca->eqc_base, dev_cap->eqc_entry_sz,
+ dev->caps.num_eqs, dev->caps.num_eqs,
+ 0, 0);
+ if (err) {
+ mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
+ goto err_unmap_cmpt;
+ }
+
+ /*
+ * Reserved MTT entries must be aligned up to a cacheline
+ * boundary, since the FW will write to them, while the driver
+ * writes to all other MTT entries. (The variable
+ * dev->caps.mtt_entry_sz below is really the MTT segment
+ * size, not the raw entry size)
+ */
+ dev->caps.reserved_mtts =
+ ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
+ dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
+
+ err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
+ init_hca->mtt_base,
+ dev->caps.mtt_entry_sz,
+ dev->caps.num_mtt_segs,
+ dev->caps.reserved_mtts, 1, 0);
+ if (err) {
+ mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
+ goto err_unmap_eq;
+ }
+
+ err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
+ init_hca->dmpt_base,
+ dev_cap->dmpt_entry_sz,
+ dev->caps.num_mpts,
+ dev->caps.reserved_mrws, 1, 1);
+ if (err) {
+ mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
+ goto err_unmap_mtt;
+ }
+
+ err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
+ init_hca->qpc_base,
+ dev_cap->qpc_entry_sz,
+ dev->caps.num_qps,
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+ 0, 0);
+ if (err) {
+ mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
+ goto err_unmap_dmpt;
+ }
+
+ err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
+ init_hca->auxc_base,
+ dev_cap->aux_entry_sz,
+ dev->caps.num_qps,
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+ 0, 0);
+ if (err) {
+ mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
+ goto err_unmap_qp;
+ }
+
+ err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
+ init_hca->altc_base,
+ dev_cap->altc_entry_sz,
+ dev->caps.num_qps,
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+ 0, 0);
+ if (err) {
+ mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
+ goto err_unmap_auxc;
+ }
+
+ err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
+ init_hca->rdmarc_base,
+ dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
+ dev->caps.num_qps,
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+ 0, 0);
+ if (err) {
+ mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
+ goto err_unmap_altc;
+ }
+
+ err = mlx4_init_icm_table(dev, &priv->cq_table.table,
+ init_hca->cqc_base,
+ dev_cap->cqc_entry_sz,
+ dev->caps.num_cqs,
+ dev->caps.reserved_cqs, 0, 0);
+ if (err) {
+ mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
+ goto err_unmap_rdmarc;
+ }
+
+ err = mlx4_init_icm_table(dev, &priv->srq_table.table,
+ init_hca->srqc_base,
+ dev_cap->srq_entry_sz,
+ dev->caps.num_srqs,
+ dev->caps.reserved_srqs, 0, 0);
+ if (err) {
+ mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
+ goto err_unmap_cq;
+ }
+
+ /*
+ * It's not strictly required, but for simplicity just map the
+ * whole multicast group table now. The table isn't very big
+ * and it's a lot easier than trying to track ref counts.
+ */
+ err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
+ init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
+ dev->caps.num_mgms + dev->caps.num_amgms,
+ dev->caps.num_mgms + dev->caps.num_amgms,
+ 0, 0);
+ if (err) {
+ mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
+ goto err_unmap_srq;
+ }
+
+ return 0;
+
+err_unmap_srq:
+ mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
+
+err_unmap_cq:
+ mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
+
+err_unmap_rdmarc:
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
+
+err_unmap_altc:
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
+
+err_unmap_auxc:
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
+
+err_unmap_qp:
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
+
+err_unmap_dmpt:
+ mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
+
+err_unmap_mtt:
+ mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
+
+err_unmap_eq:
+ mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
+
+err_unmap_cmpt:
+ mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
+ mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
+ mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
+
+err_unmap_aux:
+ mlx4_UNMAP_ICM_AUX(dev);
+
+err_free_aux:
+ mlx4_free_icm(dev, priv->fw.aux_icm, 0);
+
+ return err;
+}
+
+static void mlx4_free_icms(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
+ mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
+ mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
+ mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
+ mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
+ mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
+ mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
+ mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
+ mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
+ mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
+
+ mlx4_UNMAP_ICM_AUX(dev);
+ mlx4_free_icm(dev, priv->fw.aux_icm, 0);
+}
+
+static int map_bf_area(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ resource_size_t bf_start;
+ resource_size_t bf_len;
+ int err = 0;
+
+ bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
+ bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
+ priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
+ if (!priv->bf_mapping)
+ err = -ENOMEM;
+
+ return err;
+}
+
+static void unmap_bf_area(struct mlx4_dev *dev)
+{
+ if (mlx4_priv(dev)->bf_mapping)
+ io_mapping_free(mlx4_priv(dev)->bf_mapping);
+}
+
+static void mlx4_close_hca(struct mlx4_dev *dev)
+{
+ unmap_bf_area(dev);
+ mlx4_CLOSE_HCA(dev, 0);
+ mlx4_free_icms(dev);
+ mlx4_UNMAP_FA(dev);
+ mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+}
+
+static int mlx4_init_hca(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_adapter adapter;
+ struct mlx4_dev_cap dev_cap;
+ struct mlx4_mod_stat_cfg mlx4_cfg;
+ struct mlx4_profile profile;
+ struct mlx4_init_hca_param init_hca;
+ u64 icm_size;
+ int err;
+
+ err = mlx4_QUERY_FW(dev);
+ if (err) {
+ if (err == -EACCES)
+ mlx4_info(dev, "non-primary physical function, skipping.\n");
+ else
+ mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
+ return err;
+ }
+
+ err = mlx4_load_fw(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to start FW, aborting.\n");
+ return err;
+ }
+
+ mlx4_cfg.log_pg_sz_m = 1;
+ mlx4_cfg.log_pg_sz = 0;
+ err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
+ if (err)
+ mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
+
+ err = mlx4_dev_cap(dev, &dev_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ goto err_stop_fw;
+ }
+
+ profile = default_profile;
+
+ icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
+ if ((long long) icm_size < 0) {
+ err = icm_size;
+ goto err_stop_fw;
+ }
+
+ if (map_bf_area(dev))
+ mlx4_dbg(dev, "Failed to map blue flame area\n");
+
+ init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+
+ err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
+ if (err)
+ goto err_stop_fw;
+
+ err = mlx4_INIT_HCA(dev, &init_hca);
+ if (err) {
+ mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
+ goto err_free_icm;
+ }
+
+ err = mlx4_QUERY_ADAPTER(dev, &adapter);
+ if (err) {
+ mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
+ goto err_close;
+ }
+
+ priv->eq_table.inta_pin = adapter.inta_pin;
+ memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
+
+ return 0;
+
+err_close:
+ mlx4_CLOSE_HCA(dev, 0);
+
+err_free_icm:
+ mlx4_free_icms(dev);
+
+err_stop_fw:
+ unmap_bf_area(dev);
+ mlx4_UNMAP_FA(dev);
+ mlx4_free_icm(dev, priv->fw.fw_icm, 0);
+
+ return err;
+}
+
+static int mlx4_init_counters_table(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int nent;
+
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
+ return -ENOENT;
+
+ nent = dev->caps.max_counters;
+ return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
+}
+
+static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
+{
+ mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
+}
+
+int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
+ return -ENOENT;
+
+ *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
+ if (*idx == -1)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
+
+void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
+{
+ mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
+ return;
+}
+EXPORT_SYMBOL_GPL(mlx4_counter_free);
+
+static int mlx4_setup_hca(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err;
+ int port;
+ __be32 ib_port_default_caps;
+
+ err = mlx4_init_uar_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "user access region table, aborting.\n");
+ return err;
+ }
+
+ err = mlx4_uar_alloc(dev, &priv->driver_uar);
+ if (err) {
+ mlx4_err(dev, "Failed to allocate driver access region, "
+ "aborting.\n");
+ goto err_uar_table_free;
+ }
+
+ priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!priv->kar) {
+ mlx4_err(dev, "Couldn't map kernel access region, "
+ "aborting.\n");
+ err = -ENOMEM;
+ goto err_uar_free;
+ }
+
+ err = mlx4_init_pd_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "protection domain table, aborting.\n");
+ goto err_kar_unmap;
+ }
+
+ err = mlx4_init_mr_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "memory region table, aborting.\n");
+ goto err_pd_table_free;
+ }
+
+ err = mlx4_init_eq_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "event queue table, aborting.\n");
+ goto err_mr_table_free;
+ }
+
+ err = mlx4_cmd_use_events(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to switch to event-driven "
+ "firmware commands, aborting.\n");
+ goto err_eq_table_free;
+ }
+
+ err = mlx4_NOP(dev);
+ if (err) {
+ if (dev->flags & MLX4_FLAG_MSI_X) {
+ mlx4_warn(dev, "NOP command failed to generate MSI-X "
+ "interrupt IRQ %d).\n",
+ priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
+ mlx4_warn(dev, "Trying again without MSI-X.\n");
+ } else {
+ mlx4_err(dev, "NOP command failed to generate interrupt "
+ "(IRQ %d), aborting.\n",
+ priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
+ mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
+ }
+
+ goto err_cmd_poll;
+ }
+
+ mlx4_dbg(dev, "NOP command IRQ test passed\n");
+
+ err = mlx4_init_cq_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "completion queue table, aborting.\n");
+ goto err_cmd_poll;
+ }
+
+ err = mlx4_init_srq_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "shared receive queue table, aborting.\n");
+ goto err_cq_table_free;
+ }
+
+ err = mlx4_init_qp_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "queue pair table, aborting.\n");
+ goto err_srq_table_free;
+ }
+
+ err = mlx4_init_mcg_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "multicast group table, aborting.\n");
+ goto err_qp_table_free;
+ }
+
+ err = mlx4_init_counters_table(dev);
+ if (err && err != -ENOENT) {
+ mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
+ goto err_counters_table_free;
+ }
+
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ enum mlx4_port_type port_type = 0;
+ mlx4_SENSE_PORT(dev, port, &port_type);
+ if (port_type)
+ dev->caps.port_type[port] = port_type;
+ ib_port_default_caps = 0;
+ err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
+ if (err)
+ mlx4_warn(dev, "failed to get port %d default "
+ "ib capabilities (%d). Continuing with "
+ "caps = 0\n", port, err);
+ dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
+ err = mlx4_SET_PORT(dev, port);
+ if (err) {
+ mlx4_err(dev, "Failed to set port %d, aborting\n",
+ port);
+ goto err_mcg_table_free;
+ }
+ }
+ mlx4_set_port_mask(dev);
+
+ return 0;
+
+err_mcg_table_free:
+ mlx4_cleanup_mcg_table(dev);
+
+err_counters_table_free:
+ mlx4_cleanup_counters_table(dev);
+
+err_qp_table_free:
+ mlx4_cleanup_qp_table(dev);
+
+err_srq_table_free:
+ mlx4_cleanup_srq_table(dev);
+
+err_cq_table_free:
+ mlx4_cleanup_cq_table(dev);
+
+err_cmd_poll:
+ mlx4_cmd_use_polling(dev);
+
+err_eq_table_free:
+ mlx4_cleanup_eq_table(dev);
+
+err_mr_table_free:
+ mlx4_cleanup_mr_table(dev);
+
+err_pd_table_free:
+ mlx4_cleanup_pd_table(dev);
+
+err_kar_unmap:
+ iounmap(priv->kar);
+
+err_uar_free:
+ mlx4_uar_free(dev, &priv->driver_uar);
+
+err_uar_table_free:
+ mlx4_cleanup_uar_table(dev);
+ return err;
+}
+
+static void mlx4_enable_msi_x(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct msix_entry *entries;
+ int nreq = min_t(int, dev->caps.num_ports *
+ min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT)
+ + MSIX_LEGACY_SZ, MAX_MSIX);
+ int err;
+ int i;
+
+ if (msi_x) {
+ nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
+ nreq);
+ entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
+ if (!entries)
+ goto no_msi;
+
+ for (i = 0; i < nreq; ++i)
+ entries[i].entry = i;
+
+ retry:
+ err = pci_enable_msix(dev->pdev, entries, nreq);
+ if (err) {
+ /* Try again if at least 2 vectors are available */
+ if (err > 1) {
+ mlx4_info(dev, "Requested %d vectors, "
+ "but only %d MSI-X vectors available, "
+ "trying again\n", nreq, err);
+ nreq = err;
+ goto retry;
+ }
+ kfree(entries);
+ goto no_msi;
+ }
+
+ if (nreq <
+ MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
+ /*Working in legacy mode , all EQ's shared*/
+ dev->caps.comp_pool = 0;
+ dev->caps.num_comp_vectors = nreq - 1;
+ } else {
+ dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
+ dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
+ }
+ for (i = 0; i < nreq; ++i)
+ priv->eq_table.eq[i].irq = entries[i].vector;
+
+ dev->flags |= MLX4_FLAG_MSI_X;
+
+ kfree(entries);
+ return;
+ }
+
+no_msi:
+ dev->caps.num_comp_vectors = 1;
+ dev->caps.comp_pool = 0;
+
+ for (i = 0; i < 2; ++i)
+ priv->eq_table.eq[i].irq = dev->pdev->irq;
+}
+
+static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ int err = 0;
+
+ info->dev = dev;
+ info->port = port;
+ mlx4_init_mac_table(dev, &info->mac_table);
+ mlx4_init_vlan_table(dev, &info->vlan_table);
+ info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
+ (port - 1) * (1 << log_num_mac);
+
+ sprintf(info->dev_name, "mlx4_port%d", port);
+ info->port_attr.attr.name = info->dev_name;
+ info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+ info->port_attr.show = show_port_type;
+ info->port_attr.store = set_port_type;
+ sysfs_attr_init(&info->port_attr.attr);
+
+ err = device_create_file(&dev->pdev->dev, &info->port_attr);
+ if (err) {
+ mlx4_err(dev, "Failed to create file for port %d\n", port);
+ info->port = -1;
+ }
+
+ return err;
+}
+
+static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
+{
+ if (info->port < 0)
+ return;
+
+ device_remove_file(&info->dev->pdev->dev, &info->port_attr);
+}
+
+static int mlx4_init_steering(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int num_entries = dev->caps.num_ports;
+ int i, j;
+
+ priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
+ if (!priv->steer)
+ return -ENOMEM;
+
+ for (i = 0; i < num_entries; i++) {
+ for (j = 0; j < MLX4_NUM_STEERS; j++) {
+ INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
+ INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
+ }
+ INIT_LIST_HEAD(&priv->steer[i].high_prios);
+ }
+ return 0;
+}
+
+static void mlx4_clear_steering(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_steer_index *entry, *tmp_entry;
+ struct mlx4_promisc_qp *pqp, *tmp_pqp;
+ int num_entries = dev->caps.num_ports;
+ int i, j;
+
+ for (i = 0; i < num_entries; i++) {
+ for (j = 0; j < MLX4_NUM_STEERS; j++) {
+ list_for_each_entry_safe(pqp, tmp_pqp,
+ &priv->steer[i].promisc_qps[j],
+ list) {
+ list_del(&pqp->list);
+ kfree(pqp);
+ }
+ list_for_each_entry_safe(entry, tmp_entry,
+ &priv->steer[i].steer_entries[j],
+ list) {
+ list_del(&entry->list);
+ list_for_each_entry_safe(pqp, tmp_pqp,
+ &entry->duplicates,
+ list) {
+ list_del(&pqp->list);
+ kfree(pqp);
+ }
+ kfree(entry);
+ }
+ }
+ }
+ kfree(priv->steer);
+}
+
+static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mlx4_priv *priv;
+ struct mlx4_dev *dev;
+ int err;
+ int port;
+
+ pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable PCI device, "
+ "aborting.\n");
+ return err;
+ }
+
+ /*
+ * Check for BARs. We expect 0: 1MB
+ */
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
+ pci_resource_len(pdev, 0) != 1 << 20) {
+ dev_err(&pdev->dev, "Missing DCS, aborting.\n");
+ err = -ENODEV;
+ goto err_disable_pdev;
+ }
+ if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev, "Missing UAR, aborting.\n");
+ err = -ENODEV;
+ goto err_disable_pdev;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
+ goto err_disable_pdev;
+ }
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
+ goto err_release_regions;
+ }
+ }
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
+ "consistent PCI DMA mask.\n");
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
+ "aborting.\n");
+ goto err_release_regions;
+ }
+ }
+
+ /* Allow large DMA segments, up to the firmware limit of 1 GB */
+ dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
+
+ priv = kzalloc(sizeof *priv, GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "Device struct alloc failed, "
+ "aborting.\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ dev = &priv->dev;
+ dev->pdev = pdev;
+ INIT_LIST_HEAD(&priv->ctx_list);
+ spin_lock_init(&priv->ctx_lock);
+
+ mutex_init(&priv->port_mutex);
+
+ INIT_LIST_HEAD(&priv->pgdir_list);
+ mutex_init(&priv->pgdir_mutex);
+
+ INIT_LIST_HEAD(&priv->bf_list);
+ mutex_init(&priv->bf_mutex);
+
+ dev->rev_id = pdev->revision;
+
+ /*
+ * Now reset the HCA before we touch the PCI capabilities or
+ * attempt a firmware command, since a boot ROM may have left
+ * the HCA in an undefined state.
+ */
+ err = mlx4_reset(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to reset HCA, aborting.\n");
+ goto err_free_dev;
+ }
+
+ if (mlx4_cmd_init(dev)) {
+ mlx4_err(dev, "Failed to init command interface, aborting.\n");
+ goto err_free_dev;
+ }
+
+ err = mlx4_init_hca(dev);
+ if (err)
+ goto err_cmd;
+
+ err = mlx4_alloc_eq_table(dev);
+ if (err)
+ goto err_close;
+
+ priv->msix_ctl.pool_bm = 0;
+ spin_lock_init(&priv->msix_ctl.pool_lock);
+
+ mlx4_enable_msi_x(dev);
+
+ err = mlx4_init_steering(dev);
+ if (err)
+ goto err_free_eq;
+
+ err = mlx4_setup_hca(dev);
+ if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
+ dev->flags &= ~MLX4_FLAG_MSI_X;
+ pci_disable_msix(pdev);
+ err = mlx4_setup_hca(dev);
+ }
+
+ if (err)
+ goto err_steer;
+
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ err = mlx4_init_port_info(dev, port);
+ if (err)
+ goto err_port;
+ }
+
+ err = mlx4_register_device(dev);
+ if (err)
+ goto err_port;
+
+ mlx4_sense_init(dev);
+ mlx4_start_sense(dev);
+
+ pci_set_drvdata(pdev, dev);
+
+ return 0;
+
+err_port:
+ for (--port; port >= 1; --port)
+ mlx4_cleanup_port_info(&priv->port[port]);
+
+ mlx4_cleanup_counters_table(dev);
+ mlx4_cleanup_mcg_table(dev);
+ mlx4_cleanup_qp_table(dev);
+ mlx4_cleanup_srq_table(dev);
+ mlx4_cleanup_cq_table(dev);
+ mlx4_cmd_use_polling(dev);
+ mlx4_cleanup_eq_table(dev);
+ mlx4_cleanup_mr_table(dev);
+ mlx4_cleanup_pd_table(dev);
+ mlx4_cleanup_uar_table(dev);
+
+err_steer:
+ mlx4_clear_steering(dev);
+
+err_free_eq:
+ mlx4_free_eq_table(dev);
+
+err_close:
+ if (dev->flags & MLX4_FLAG_MSI_X)
+ pci_disable_msix(pdev);
+
+ mlx4_close_hca(dev);
+
+err_cmd:
+ mlx4_cmd_cleanup(dev);
+
+err_free_dev:
+ kfree(priv);
+
+err_release_regions:
+ pci_release_regions(pdev);
+
+err_disable_pdev:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static int __devinit mlx4_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ printk_once(KERN_INFO "%s", mlx4_version);
+
+ return __mlx4_init_one(pdev, id);
+}
+
+static void mlx4_remove_one(struct pci_dev *pdev)
+{
+ struct mlx4_dev *dev = pci_get_drvdata(pdev);
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int p;
+
+ if (dev) {
+ mlx4_stop_sense(dev);
+ mlx4_unregister_device(dev);
+
+ for (p = 1; p <= dev->caps.num_ports; p++) {
+ mlx4_cleanup_port_info(&priv->port[p]);
+ mlx4_CLOSE_PORT(dev, p);
+ }
+
+ mlx4_cleanup_counters_table(dev);
+ mlx4_cleanup_mcg_table(dev);
+ mlx4_cleanup_qp_table(dev);
+ mlx4_cleanup_srq_table(dev);
+ mlx4_cleanup_cq_table(dev);
+ mlx4_cmd_use_polling(dev);
+ mlx4_cleanup_eq_table(dev);
+ mlx4_cleanup_mr_table(dev);
+ mlx4_cleanup_pd_table(dev);
+
+ iounmap(priv->kar);
+ mlx4_uar_free(dev, &priv->driver_uar);
+ mlx4_cleanup_uar_table(dev);
+ mlx4_clear_steering(dev);
+ mlx4_free_eq_table(dev);
+ mlx4_close_hca(dev);
+ mlx4_cmd_cleanup(dev);
+
+ if (dev->flags & MLX4_FLAG_MSI_X)
+ pci_disable_msix(pdev);
+
+ kfree(priv);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+int mlx4_restart_one(struct pci_dev *pdev)
+{
+ mlx4_remove_one(pdev);
+ return __mlx4_init_one(pdev, NULL);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
+ { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
+ { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
+ { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
+ { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
+ { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
+ { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
+ { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
+ { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
+ { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
+ { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
+ { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
+ { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
+
+static struct pci_driver mlx4_driver = {
+ .name = DRV_NAME,
+ .id_table = mlx4_pci_table,
+ .probe = mlx4_init_one,
+ .remove = __devexit_p(mlx4_remove_one)
+};
+
+static int __init mlx4_verify_params(void)
+{
+ if ((log_num_mac < 0) || (log_num_mac > 7)) {
+ pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac);
+ return -1;
+ }
+
+ if ((log_num_vlan < 0) || (log_num_vlan > 7)) {
+ pr_warning("mlx4_core: bad num_vlan: %d\n", log_num_vlan);
+ return -1;
+ }
+
+ if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
+ pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int __init mlx4_init(void)
+{
+ int ret;
+
+ if (mlx4_verify_params())
+ return -EINVAL;
+
+ mlx4_catas_init();
+
+ mlx4_wq = create_singlethread_workqueue("mlx4");
+ if (!mlx4_wq)
+ return -ENOMEM;
+
+ ret = pci_register_driver(&mlx4_driver);
+ return ret < 0 ? ret : 0;
+}
+
+static void __exit mlx4_cleanup(void)
+{
+ pci_unregister_driver(&mlx4_driver);
+ destroy_workqueue(mlx4_wq);
+}
+
+module_init(mlx4_init);
+module_exit(mlx4_cleanup);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
new file mode 100644
index 000000000000..cd1784593a3c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -0,0 +1,928 @@
+/*
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/string.h>
+#include <linux/etherdevice.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+
+#define MGM_QPN_MASK 0x00FFFFFF
+#define MGM_BLCK_LB_BIT 30
+
+static const u8 zero_gid[16]; /* automatically initialized to 0 */
+
+static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
+ struct mlx4_cmd_mailbox *mailbox)
+{
+ return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
+ struct mlx4_cmd_mailbox *mailbox)
+{
+ return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer,
+ struct mlx4_cmd_mailbox *mailbox)
+{
+ u32 in_mod;
+
+ in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1;
+ return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
+ MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ u16 *hash, u8 op_mod)
+{
+ u64 imm;
+ int err;
+
+ err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
+ MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A);
+
+ if (!err)
+ *hash = imm;
+
+ return err;
+}
+
+static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
+ enum mlx4_steer_type steer,
+ u32 qpn)
+{
+ struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num];
+ struct mlx4_promisc_qp *pqp;
+
+ list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
+ if (pqp->qpn == qpn)
+ return pqp;
+ }
+ /* not found */
+ return NULL;
+}
+
+/*
+ * Add new entry to steering data structure.
+ * All promisc QPs should be added as well
+ */
+static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+ enum mlx4_steer_type steer,
+ unsigned int index, u32 qpn)
+{
+ struct mlx4_steer *s_steer;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_mgm *mgm;
+ u32 members_count;
+ struct mlx4_steer_index *new_entry;
+ struct mlx4_promisc_qp *pqp;
+ struct mlx4_promisc_qp *dqp = NULL;
+ u32 prot;
+ int err;
+ u8 pf_num;
+
+ pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
+ s_steer = &mlx4_priv(dev)->steer[pf_num];
+ new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
+ if (!new_entry)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&new_entry->duplicates);
+ new_entry->index = index;
+ list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
+
+ /* If the given qpn is also a promisc qp,
+ * it should be inserted to duplicates list
+ */
+ pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+ if (pqp) {
+ dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
+ if (!dqp) {
+ err = -ENOMEM;
+ goto out_alloc;
+ }
+ dqp->qpn = qpn;
+ list_add_tail(&dqp->list, &new_entry->duplicates);
+ }
+
+ /* if no promisc qps for this vep, we are done */
+ if (list_empty(&s_steer->promisc_qps[steer]))
+ return 0;
+
+ /* now need to add all the promisc qps to the new
+ * steering entry, as they should also receive the packets
+ * destined to this address */
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = -ENOMEM;
+ goto out_alloc;
+ }
+ mgm = mailbox->buf;
+
+ err = mlx4_READ_ENTRY(dev, index, mailbox);
+ if (err)
+ goto out_mailbox;
+
+ members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
+ prot = be32_to_cpu(mgm->members_count) >> 30;
+ list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
+ /* don't add already existing qpn */
+ if (pqp->qpn == qpn)
+ continue;
+ if (members_count == MLX4_QP_PER_MGM) {
+ /* out of space */
+ err = -ENOMEM;
+ goto out_mailbox;
+ }
+
+ /* add the qpn */
+ mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
+ }
+ /* update the qps count and update the entry with all the promisc qps*/
+ mgm->members_count = cpu_to_be32(members_count | (prot << 30));
+ err = mlx4_WRITE_ENTRY(dev, index, mailbox);
+
+out_mailbox:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ if (!err)
+ return 0;
+out_alloc:
+ if (dqp) {
+ list_del(&dqp->list);
+ kfree(dqp);
+ }
+ list_del(&new_entry->list);
+ kfree(new_entry);
+ return err;
+}
+
+/* update the data structures with existing steering entry */
+static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+ enum mlx4_steer_type steer,
+ unsigned int index, u32 qpn)
+{
+ struct mlx4_steer *s_steer;
+ struct mlx4_steer_index *tmp_entry, *entry = NULL;
+ struct mlx4_promisc_qp *pqp;
+ struct mlx4_promisc_qp *dqp;
+ u8 pf_num;
+
+ pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
+ s_steer = &mlx4_priv(dev)->steer[pf_num];
+
+ pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+ if (!pqp)
+ return 0; /* nothing to do */
+
+ list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
+ if (tmp_entry->index == index) {
+ entry = tmp_entry;
+ break;
+ }
+ }
+ if (unlikely(!entry)) {
+ mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
+ return -EINVAL;
+ }
+
+ /* the given qpn is listed as a promisc qpn
+ * we need to add it as a duplicate to this entry
+ * for future references */
+ list_for_each_entry(dqp, &entry->duplicates, list) {
+ if (qpn == dqp->qpn)
+ return 0; /* qp is already duplicated */
+ }
+
+ /* add the qp as a duplicate on this index */
+ dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
+ if (!dqp)
+ return -ENOMEM;
+ dqp->qpn = qpn;
+ list_add_tail(&dqp->list, &entry->duplicates);
+
+ return 0;
+}
+
+/* Check whether a qpn is a duplicate on steering entry
+ * If so, it should not be removed from mgm */
+static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+ enum mlx4_steer_type steer,
+ unsigned int index, u32 qpn)
+{
+ struct mlx4_steer *s_steer;
+ struct mlx4_steer_index *tmp_entry, *entry = NULL;
+ struct mlx4_promisc_qp *dqp, *tmp_dqp;
+ u8 pf_num;
+
+ pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
+ s_steer = &mlx4_priv(dev)->steer[pf_num];
+
+ /* if qp is not promisc, it cannot be duplicated */
+ if (!get_promisc_qp(dev, pf_num, steer, qpn))
+ return false;
+
+ /* The qp is promisc qp so it is a duplicate on this index
+ * Find the index entry, and remove the duplicate */
+ list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
+ if (tmp_entry->index == index) {
+ entry = tmp_entry;
+ break;
+ }
+ }
+ if (unlikely(!entry)) {
+ mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
+ return false;
+ }
+ list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
+ if (dqp->qpn == qpn) {
+ list_del(&dqp->list);
+ kfree(dqp);
+ }
+ }
+ return true;
+}
+
+/* I a steering entry contains only promisc QPs, it can be removed. */
+static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+ enum mlx4_steer_type steer,
+ unsigned int index, u32 tqpn)
+{
+ struct mlx4_steer *s_steer;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_mgm *mgm;
+ struct mlx4_steer_index *entry = NULL, *tmp_entry;
+ u32 qpn;
+ u32 members_count;
+ bool ret = false;
+ int i;
+ u8 pf_num;
+
+ pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
+ s_steer = &mlx4_priv(dev)->steer[pf_num];
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return false;
+ mgm = mailbox->buf;
+
+ if (mlx4_READ_ENTRY(dev, index, mailbox))
+ goto out;
+ members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
+ for (i = 0; i < members_count; i++) {
+ qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
+ if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) {
+ /* the qp is not promisc, the entry can't be removed */
+ goto out;
+ }
+ }
+ /* All the qps currently registered for this entry are promiscuous,
+ * Checking for duplicates */
+ ret = true;
+ list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
+ if (entry->index == index) {
+ if (list_empty(&entry->duplicates)) {
+ list_del(&entry->list);
+ kfree(entry);
+ } else {
+ /* This entry contains duplicates so it shouldn't be removed */
+ ret = false;
+ goto out;
+ }
+ }
+ }
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return ret;
+}
+
+static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+ enum mlx4_steer_type steer, u32 qpn)
+{
+ struct mlx4_steer *s_steer;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_mgm *mgm;
+ struct mlx4_steer_index *entry;
+ struct mlx4_promisc_qp *pqp;
+ struct mlx4_promisc_qp *dqp;
+ u32 members_count;
+ u32 prot;
+ int i;
+ bool found;
+ int last_index;
+ int err;
+ u8 pf_num;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
+ s_steer = &mlx4_priv(dev)->steer[pf_num];
+
+ mutex_lock(&priv->mcg_table.mutex);
+
+ if (get_promisc_qp(dev, pf_num, steer, qpn)) {
+ err = 0; /* Noting to do, already exists */
+ goto out_mutex;
+ }
+
+ pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
+ if (!pqp) {
+ err = -ENOMEM;
+ goto out_mutex;
+ }
+ pqp->qpn = qpn;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = -ENOMEM;
+ goto out_alloc;
+ }
+ mgm = mailbox->buf;
+
+ /* the promisc qp needs to be added for each one of the steering
+ * entries, if it already exists, needs to be added as a duplicate
+ * for this entry */
+ list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
+ err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
+ if (err)
+ goto out_mailbox;
+
+ members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
+ prot = be32_to_cpu(mgm->members_count) >> 30;
+ found = false;
+ for (i = 0; i < members_count; i++) {
+ if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
+ /* Entry already exists, add to duplicates */
+ dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
+ if (!dqp)
+ goto out_mailbox;
+ dqp->qpn = qpn;
+ list_add_tail(&dqp->list, &entry->duplicates);
+ found = true;
+ }
+ }
+ if (!found) {
+ /* Need to add the qpn to mgm */
+ if (members_count == MLX4_QP_PER_MGM) {
+ /* entry is full */
+ err = -ENOMEM;
+ goto out_mailbox;
+ }
+ mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
+ mgm->members_count = cpu_to_be32(members_count | (prot << 30));
+ err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
+ if (err)
+ goto out_mailbox;
+ }
+ last_index = entry->index;
+ }
+
+ /* add the new qpn to list of promisc qps */
+ list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
+ /* now need to add all the promisc qps to default entry */
+ memset(mgm, 0, sizeof *mgm);
+ members_count = 0;
+ list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
+ mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
+ mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
+
+ err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+ if (err)
+ goto out_list;
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ mutex_unlock(&priv->mcg_table.mutex);
+ return 0;
+
+out_list:
+ list_del(&pqp->list);
+out_mailbox:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+out_alloc:
+ kfree(pqp);
+out_mutex:
+ mutex_unlock(&priv->mcg_table.mutex);
+ return err;
+}
+
+static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+ enum mlx4_steer_type steer, u32 qpn)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_steer *s_steer;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_mgm *mgm;
+ struct mlx4_steer_index *entry;
+ struct mlx4_promisc_qp *pqp;
+ struct mlx4_promisc_qp *dqp;
+ u32 members_count;
+ bool found;
+ bool back_to_list = false;
+ int loc, i;
+ int err;
+ u8 pf_num;
+
+ pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
+ s_steer = &mlx4_priv(dev)->steer[pf_num];
+ mutex_lock(&priv->mcg_table.mutex);
+
+ pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+ if (unlikely(!pqp)) {
+ mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
+ /* nothing to do */
+ err = 0;
+ goto out_mutex;
+ }
+
+ /*remove from list of promisc qps */
+ list_del(&pqp->list);
+
+ /* set the default entry not to include the removed one */
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = -ENOMEM;
+ back_to_list = true;
+ goto out_list;
+ }
+ mgm = mailbox->buf;
+ members_count = 0;
+ list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
+ mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
+ mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
+
+ err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+ if (err)
+ goto out_mailbox;
+
+ /* remove the qp from all the steering entries*/
+ list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
+ found = false;
+ list_for_each_entry(dqp, &entry->duplicates, list) {
+ if (dqp->qpn == qpn) {
+ found = true;
+ break;
+ }
+ }
+ if (found) {
+ /* a duplicate, no need to change the mgm,
+ * only update the duplicates list */
+ list_del(&dqp->list);
+ kfree(dqp);
+ } else {
+ err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
+ if (err)
+ goto out_mailbox;
+ members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
+ for (loc = -1, i = 0; i < members_count; ++i)
+ if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
+ loc = i;
+
+ mgm->members_count = cpu_to_be32(--members_count |
+ (MLX4_PROT_ETH << 30));
+ mgm->qp[loc] = mgm->qp[i - 1];
+ mgm->qp[i - 1] = 0;
+
+ err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
+ if (err)
+ goto out_mailbox;
+ }
+
+ }
+
+out_mailbox:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+out_list:
+ if (back_to_list)
+ list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
+ else
+ kfree(pqp);
+out_mutex:
+ mutex_unlock(&priv->mcg_table.mutex);
+ return err;
+}
+
+/*
+ * Caller must hold MCG table semaphore. gid and mgm parameters must
+ * be properly aligned for command interface.
+ *
+ * Returns 0 unless a firmware command error occurs.
+ *
+ * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
+ * and *mgm holds MGM entry.
+ *
+ * if GID is found in AMGM, *index = index in AMGM, *prev = index of
+ * previous entry in hash chain and *mgm holds AMGM entry.
+ *
+ * If no AMGM exists for given gid, *index = -1, *prev = index of last
+ * entry in hash chain and *mgm holds end of hash chain.
+ */
+static int find_entry(struct mlx4_dev *dev, u8 port,
+ u8 *gid, enum mlx4_protocol prot,
+ enum mlx4_steer_type steer,
+ struct mlx4_cmd_mailbox *mgm_mailbox,
+ u16 *hash, int *prev, int *index)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_mgm *mgm = mgm_mailbox->buf;
+ u8 *mgid;
+ int err;
+ u8 op_mod = (prot == MLX4_PROT_ETH) ?
+ !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return -ENOMEM;
+ mgid = mailbox->buf;
+
+ memcpy(mgid, gid, 16);
+
+ err = mlx4_GID_HASH(dev, mailbox, hash, op_mod);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ if (err)
+ return err;
+
+ if (0)
+ mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, *hash);
+
+ *index = *hash;
+ *prev = -1;
+
+ do {
+ err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
+ if (err)
+ return err;
+
+ if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
+ if (*index != *hash) {
+ mlx4_err(dev, "Found zero MGID in AMGM.\n");
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ if (!memcmp(mgm->gid, gid, 16) &&
+ be32_to_cpu(mgm->members_count) >> 30 == prot)
+ return err;
+
+ *prev = *index;
+ *index = be32_to_cpu(mgm->next_gid_index) >> 6;
+ } while (*index);
+
+ *index = -1;
+ return err;
+}
+
+int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ int block_mcast_loopback, enum mlx4_protocol prot,
+ enum mlx4_steer_type steer)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_mgm *mgm;
+ u32 members_count;
+ u16 hash;
+ int index, prev;
+ int link = 0;
+ int i;
+ int err;
+ u8 port = gid[5];
+ u8 new_entry = 0;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ mgm = mailbox->buf;
+
+ mutex_lock(&priv->mcg_table.mutex);
+ err = find_entry(dev, port, gid, prot, steer,
+ mailbox, &hash, &prev, &index);
+ if (err)
+ goto out;
+
+ if (index != -1) {
+ if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
+ new_entry = 1;
+ memcpy(mgm->gid, gid, 16);
+ }
+ } else {
+ link = 1;
+
+ index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
+ if (index == -1) {
+ mlx4_err(dev, "No AMGM entries left\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ index += dev->caps.num_mgms;
+
+ memset(mgm, 0, sizeof *mgm);
+ memcpy(mgm->gid, gid, 16);
+ }
+
+ members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
+ if (members_count == MLX4_QP_PER_MGM) {
+ mlx4_err(dev, "MGM at index %x is full.\n", index);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < members_count; ++i)
+ if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
+ mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
+ err = 0;
+ goto out;
+ }
+
+ if (block_mcast_loopback)
+ mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
+ (1U << MGM_BLCK_LB_BIT));
+ else
+ mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
+
+ mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
+
+ err = mlx4_WRITE_ENTRY(dev, index, mailbox);
+ if (err)
+ goto out;
+
+ if (!link)
+ goto out;
+
+ err = mlx4_READ_ENTRY(dev, prev, mailbox);
+ if (err)
+ goto out;
+
+ mgm->next_gid_index = cpu_to_be32(index << 6);
+
+ err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
+ if (err)
+ goto out;
+
+out:
+ if (prot == MLX4_PROT_ETH) {
+ /* manage the steering entry for promisc mode */
+ if (new_entry)
+ new_steering_entry(dev, 0, port, steer, index, qp->qpn);
+ else
+ existing_steering_entry(dev, 0, port, steer,
+ index, qp->qpn);
+ }
+ if (err && link && index != -1) {
+ if (index < dev->caps.num_mgms)
+ mlx4_warn(dev, "Got AMGM index %d < %d",
+ index, dev->caps.num_mgms);
+ else
+ mlx4_bitmap_free(&priv->mcg_table.bitmap,
+ index - dev->caps.num_mgms);
+ }
+ mutex_unlock(&priv->mcg_table.mutex);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ enum mlx4_protocol prot, enum mlx4_steer_type steer)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_mgm *mgm;
+ u32 members_count;
+ u16 hash;
+ int prev, index;
+ int i, loc;
+ int err;
+ u8 port = gid[5];
+ bool removed_entry = false;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ mgm = mailbox->buf;
+
+ mutex_lock(&priv->mcg_table.mutex);
+
+ err = find_entry(dev, port, gid, prot, steer,
+ mailbox, &hash, &prev, &index);
+ if (err)
+ goto out;
+
+ if (index == -1) {
+ mlx4_err(dev, "MGID %pI6 not found\n", gid);
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* if this pq is also a promisc qp, it shouldn't be removed */
+ if (prot == MLX4_PROT_ETH &&
+ check_duplicate_entry(dev, 0, port, steer, index, qp->qpn))
+ goto out;
+
+ members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
+ for (loc = -1, i = 0; i < members_count; ++i)
+ if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
+ loc = i;
+
+ if (loc == -1) {
+ mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
+ err = -EINVAL;
+ goto out;
+ }
+
+
+ mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
+ mgm->qp[loc] = mgm->qp[i - 1];
+ mgm->qp[i - 1] = 0;
+
+ if (prot == MLX4_PROT_ETH)
+ removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn);
+ if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
+ err = mlx4_WRITE_ENTRY(dev, index, mailbox);
+ goto out;
+ }
+
+ /* We are going to delete the entry, members count should be 0 */
+ mgm->members_count = cpu_to_be32((u32) prot << 30);
+
+ if (prev == -1) {
+ /* Remove entry from MGM */
+ int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
+ if (amgm_index) {
+ err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
+ if (err)
+ goto out;
+ } else
+ memset(mgm->gid, 0, 16);
+
+ err = mlx4_WRITE_ENTRY(dev, index, mailbox);
+ if (err)
+ goto out;
+
+ if (amgm_index) {
+ if (amgm_index < dev->caps.num_mgms)
+ mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
+ index, amgm_index, dev->caps.num_mgms);
+ else
+ mlx4_bitmap_free(&priv->mcg_table.bitmap,
+ amgm_index - dev->caps.num_mgms);
+ }
+ } else {
+ /* Remove entry from AMGM */
+ int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
+ err = mlx4_READ_ENTRY(dev, prev, mailbox);
+ if (err)
+ goto out;
+
+ mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
+
+ err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
+ if (err)
+ goto out;
+
+ if (index < dev->caps.num_mgms)
+ mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
+ prev, index, dev->caps.num_mgms);
+ else
+ mlx4_bitmap_free(&priv->mcg_table.bitmap,
+ index - dev->caps.num_mgms);
+ }
+
+out:
+ mutex_unlock(&priv->mcg_table.mutex);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+
+int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ int block_mcast_loopback, enum mlx4_protocol prot)
+{
+ enum mlx4_steer_type steer;
+
+ steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
+
+ if (prot == MLX4_PROT_ETH &&
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
+ return 0;
+
+ if (prot == MLX4_PROT_ETH)
+ gid[7] |= (steer << 1);
+
+ return mlx4_qp_attach_common(dev, qp, gid,
+ block_mcast_loopback, prot,
+ steer);
+}
+EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
+
+int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ enum mlx4_protocol prot)
+{
+ enum mlx4_steer_type steer;
+
+ steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
+
+ if (prot == MLX4_PROT_ETH &&
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
+ return 0;
+
+ if (prot == MLX4_PROT_ETH) {
+ gid[7] |= (steer << 1);
+ }
+
+ return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
+}
+EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
+
+
+int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
+{
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
+ return 0;
+
+
+ return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+}
+EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
+
+int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
+{
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
+ return 0;
+
+
+ return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+}
+EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
+
+int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
+{
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
+ return 0;
+
+
+ return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
+
+int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
+{
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
+ return 0;
+
+ return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
+
+int mlx4_init_mcg_table(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err;
+
+ err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
+ dev->caps.num_amgms - 1, 0, 0);
+ if (err)
+ return err;
+
+ mutex_init(&priv->mcg_table.mutex);
+
+ return 0;
+}
+
+void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
+{
+ mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
new file mode 100644
index 000000000000..a2fcd8402d37
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -0,0 +1,459 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_H
+#define MLX4_H
+
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+#include <linux/timer.h>
+#include <linux/semaphore.h>
+#include <linux/workqueue.h>
+
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/driver.h>
+#include <linux/mlx4/doorbell.h>
+
+#define DRV_NAME "mlx4_core"
+#define DRV_VERSION "1.0"
+#define DRV_RELDATE "July 14, 2011"
+
+enum {
+ MLX4_HCR_BASE = 0x80680,
+ MLX4_HCR_SIZE = 0x0001c,
+ MLX4_CLR_INT_SIZE = 0x00008
+};
+
+enum {
+ MLX4_MGM_ENTRY_SIZE = 0x100,
+ MLX4_QP_PER_MGM = 4 * (MLX4_MGM_ENTRY_SIZE / 16 - 2),
+ MLX4_MTT_ENTRY_PER_SEG = 8
+};
+
+enum {
+ MLX4_NUM_PDS = 1 << 15
+};
+
+enum {
+ MLX4_CMPT_TYPE_QP = 0,
+ MLX4_CMPT_TYPE_SRQ = 1,
+ MLX4_CMPT_TYPE_CQ = 2,
+ MLX4_CMPT_TYPE_EQ = 3,
+ MLX4_CMPT_NUM_TYPE
+};
+
+enum {
+ MLX4_CMPT_SHIFT = 24,
+ MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
+};
+
+#ifdef CONFIG_MLX4_DEBUG
+extern int mlx4_debug_level;
+#else /* CONFIG_MLX4_DEBUG */
+#define mlx4_debug_level (0)
+#endif /* CONFIG_MLX4_DEBUG */
+
+#define mlx4_dbg(mdev, format, arg...) \
+do { \
+ if (mlx4_debug_level) \
+ dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \
+} while (0)
+
+#define mlx4_err(mdev, format, arg...) \
+ dev_err(&mdev->pdev->dev, format, ##arg)
+#define mlx4_info(mdev, format, arg...) \
+ dev_info(&mdev->pdev->dev, format, ##arg)
+#define mlx4_warn(mdev, format, arg...) \
+ dev_warn(&mdev->pdev->dev, format, ##arg)
+
+struct mlx4_bitmap {
+ u32 last;
+ u32 top;
+ u32 max;
+ u32 reserved_top;
+ u32 mask;
+ u32 avail;
+ spinlock_t lock;
+ unsigned long *table;
+};
+
+struct mlx4_buddy {
+ unsigned long **bits;
+ unsigned int *num_free;
+ int max_order;
+ spinlock_t lock;
+};
+
+struct mlx4_icm;
+
+struct mlx4_icm_table {
+ u64 virt;
+ int num_icm;
+ int num_obj;
+ int obj_size;
+ int lowmem;
+ int coherent;
+ struct mutex mutex;
+ struct mlx4_icm **icm;
+};
+
+struct mlx4_eq {
+ struct mlx4_dev *dev;
+ void __iomem *doorbell;
+ int eqn;
+ u32 cons_index;
+ u16 irq;
+ u16 have_irq;
+ int nent;
+ struct mlx4_buf_list *page_list;
+ struct mlx4_mtt mtt;
+};
+
+struct mlx4_profile {
+ int num_qp;
+ int rdmarc_per_qp;
+ int num_srq;
+ int num_cq;
+ int num_mcg;
+ int num_mpt;
+ int num_mtt;
+};
+
+struct mlx4_fw {
+ u64 clr_int_base;
+ u64 catas_offset;
+ struct mlx4_icm *fw_icm;
+ struct mlx4_icm *aux_icm;
+ u32 catas_size;
+ u16 fw_pages;
+ u8 clr_int_bar;
+ u8 catas_bar;
+};
+
+#define MGM_QPN_MASK 0x00FFFFFF
+#define MGM_BLCK_LB_BIT 30
+
+struct mlx4_promisc_qp {
+ struct list_head list;
+ u32 qpn;
+};
+
+struct mlx4_steer_index {
+ struct list_head list;
+ unsigned int index;
+ struct list_head duplicates;
+};
+
+struct mlx4_mgm {
+ __be32 next_gid_index;
+ __be32 members_count;
+ u32 reserved[2];
+ u8 gid[16];
+ __be32 qp[MLX4_QP_PER_MGM];
+};
+struct mlx4_cmd {
+ struct pci_pool *pool;
+ void __iomem *hcr;
+ struct mutex hcr_mutex;
+ struct semaphore poll_sem;
+ struct semaphore event_sem;
+ int max_cmds;
+ spinlock_t context_lock;
+ int free_head;
+ struct mlx4_cmd_context *context;
+ u16 token_mask;
+ u8 use_events;
+ u8 toggle;
+};
+
+struct mlx4_uar_table {
+ struct mlx4_bitmap bitmap;
+};
+
+struct mlx4_mr_table {
+ struct mlx4_bitmap mpt_bitmap;
+ struct mlx4_buddy mtt_buddy;
+ u64 mtt_base;
+ u64 mpt_base;
+ struct mlx4_icm_table mtt_table;
+ struct mlx4_icm_table dmpt_table;
+};
+
+struct mlx4_cq_table {
+ struct mlx4_bitmap bitmap;
+ spinlock_t lock;
+ struct radix_tree_root tree;
+ struct mlx4_icm_table table;
+ struct mlx4_icm_table cmpt_table;
+};
+
+struct mlx4_eq_table {
+ struct mlx4_bitmap bitmap;
+ char *irq_names;
+ void __iomem *clr_int;
+ void __iomem **uar_map;
+ u32 clr_mask;
+ struct mlx4_eq *eq;
+ struct mlx4_icm_table table;
+ struct mlx4_icm_table cmpt_table;
+ int have_irq;
+ u8 inta_pin;
+};
+
+struct mlx4_srq_table {
+ struct mlx4_bitmap bitmap;
+ spinlock_t lock;
+ struct radix_tree_root tree;
+ struct mlx4_icm_table table;
+ struct mlx4_icm_table cmpt_table;
+};
+
+struct mlx4_qp_table {
+ struct mlx4_bitmap bitmap;
+ u32 rdmarc_base;
+ int rdmarc_shift;
+ spinlock_t lock;
+ struct mlx4_icm_table qp_table;
+ struct mlx4_icm_table auxc_table;
+ struct mlx4_icm_table altc_table;
+ struct mlx4_icm_table rdmarc_table;
+ struct mlx4_icm_table cmpt_table;
+};
+
+struct mlx4_mcg_table {
+ struct mutex mutex;
+ struct mlx4_bitmap bitmap;
+ struct mlx4_icm_table table;
+};
+
+struct mlx4_catas_err {
+ u32 __iomem *map;
+ struct timer_list timer;
+ struct list_head list;
+};
+
+#define MLX4_MAX_MAC_NUM 128
+#define MLX4_MAC_TABLE_SIZE (MLX4_MAX_MAC_NUM << 3)
+
+struct mlx4_mac_table {
+ __be64 entries[MLX4_MAX_MAC_NUM];
+ int refs[MLX4_MAX_MAC_NUM];
+ struct mutex mutex;
+ int total;
+ int max;
+};
+
+#define MLX4_MAX_VLAN_NUM 128
+#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2)
+
+struct mlx4_vlan_table {
+ __be32 entries[MLX4_MAX_VLAN_NUM];
+ int refs[MLX4_MAX_VLAN_NUM];
+ struct mutex mutex;
+ int total;
+ int max;
+};
+
+struct mlx4_mac_entry {
+ u64 mac;
+};
+
+struct mlx4_port_info {
+ struct mlx4_dev *dev;
+ int port;
+ char dev_name[16];
+ struct device_attribute port_attr;
+ enum mlx4_port_type tmp_type;
+ struct mlx4_mac_table mac_table;
+ struct radix_tree_root mac_tree;
+ struct mlx4_vlan_table vlan_table;
+ int base_qpn;
+};
+
+struct mlx4_sense {
+ struct mlx4_dev *dev;
+ u8 do_sense_port[MLX4_MAX_PORTS + 1];
+ u8 sense_allowed[MLX4_MAX_PORTS + 1];
+ struct delayed_work sense_poll;
+};
+
+struct mlx4_msix_ctl {
+ u64 pool_bm;
+ spinlock_t pool_lock;
+};
+
+struct mlx4_steer {
+ struct list_head promisc_qps[MLX4_NUM_STEERS];
+ struct list_head steer_entries[MLX4_NUM_STEERS];
+ struct list_head high_prios;
+};
+
+struct mlx4_priv {
+ struct mlx4_dev dev;
+
+ struct list_head dev_list;
+ struct list_head ctx_list;
+ spinlock_t ctx_lock;
+
+ struct list_head pgdir_list;
+ struct mutex pgdir_mutex;
+
+ struct mlx4_fw fw;
+ struct mlx4_cmd cmd;
+
+ struct mlx4_bitmap pd_bitmap;
+ struct mlx4_uar_table uar_table;
+ struct mlx4_mr_table mr_table;
+ struct mlx4_cq_table cq_table;
+ struct mlx4_eq_table eq_table;
+ struct mlx4_srq_table srq_table;
+ struct mlx4_qp_table qp_table;
+ struct mlx4_mcg_table mcg_table;
+ struct mlx4_bitmap counters_bitmap;
+
+ struct mlx4_catas_err catas_err;
+
+ void __iomem *clr_base;
+
+ struct mlx4_uar driver_uar;
+ void __iomem *kar;
+ struct mlx4_port_info port[MLX4_MAX_PORTS + 1];
+ struct mlx4_sense sense;
+ struct mutex port_mutex;
+ struct mlx4_msix_ctl msix_ctl;
+ struct mlx4_steer *steer;
+ struct list_head bf_list;
+ struct mutex bf_mutex;
+ struct io_mapping *bf_mapping;
+};
+
+static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
+{
+ return container_of(dev, struct mlx4_priv, dev);
+}
+
+#define MLX4_SENSE_RANGE (HZ * 3)
+
+extern struct workqueue_struct *mlx4_wq;
+
+u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
+void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
+u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
+u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap);
+int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
+ u32 reserved_bot, u32 resetrved_top);
+void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
+
+int mlx4_reset(struct mlx4_dev *dev);
+
+int mlx4_alloc_eq_table(struct mlx4_dev *dev);
+void mlx4_free_eq_table(struct mlx4_dev *dev);
+
+int mlx4_init_pd_table(struct mlx4_dev *dev);
+int mlx4_init_uar_table(struct mlx4_dev *dev);
+int mlx4_init_mr_table(struct mlx4_dev *dev);
+int mlx4_init_eq_table(struct mlx4_dev *dev);
+int mlx4_init_cq_table(struct mlx4_dev *dev);
+int mlx4_init_qp_table(struct mlx4_dev *dev);
+int mlx4_init_srq_table(struct mlx4_dev *dev);
+int mlx4_init_mcg_table(struct mlx4_dev *dev);
+
+void mlx4_cleanup_pd_table(struct mlx4_dev *dev);
+void mlx4_cleanup_uar_table(struct mlx4_dev *dev);
+void mlx4_cleanup_mr_table(struct mlx4_dev *dev);
+void mlx4_cleanup_eq_table(struct mlx4_dev *dev);
+void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
+void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
+void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
+void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
+
+void mlx4_start_catas_poll(struct mlx4_dev *dev);
+void mlx4_stop_catas_poll(struct mlx4_dev *dev);
+void mlx4_catas_init(void);
+int mlx4_restart_one(struct pci_dev *pdev);
+int mlx4_register_device(struct mlx4_dev *dev);
+void mlx4_unregister_device(struct mlx4_dev *dev);
+void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port);
+
+struct mlx4_dev_cap;
+struct mlx4_init_hca_param;
+
+u64 mlx4_make_profile(struct mlx4_dev *dev,
+ struct mlx4_profile *request,
+ struct mlx4_dev_cap *dev_cap,
+ struct mlx4_init_hca_param *init_hca);
+
+int mlx4_cmd_init(struct mlx4_dev *dev);
+void mlx4_cmd_cleanup(struct mlx4_dev *dev);
+void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
+int mlx4_cmd_use_events(struct mlx4_dev *dev);
+void mlx4_cmd_use_polling(struct mlx4_dev *dev);
+
+void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
+void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
+
+void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
+
+void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
+
+void mlx4_handle_catas_err(struct mlx4_dev *dev);
+
+int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
+ enum mlx4_port_type *type);
+void mlx4_do_sense_ports(struct mlx4_dev *dev,
+ enum mlx4_port_type *stype,
+ enum mlx4_port_type *defaults);
+void mlx4_start_sense(struct mlx4_dev *dev);
+void mlx4_stop_sense(struct mlx4_dev *dev);
+void mlx4_sense_init(struct mlx4_dev *dev);
+int mlx4_check_port_params(struct mlx4_dev *dev,
+ enum mlx4_port_type *port_type);
+int mlx4_change_port_types(struct mlx4_dev *dev,
+ enum mlx4_port_type *port_types);
+
+void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
+void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
+
+int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
+int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
+
+int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ enum mlx4_protocol prot, enum mlx4_steer_type steer);
+int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ int block_mcast_loopback, enum mlx4_protocol prot,
+ enum mlx4_steer_type steer);
+#endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
new file mode 100644
index 000000000000..ed84811766e6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -0,0 +1,607 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _MLX4_EN_H_
+#define _MLX4_EN_H_
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/qp.h>
+#include <linux/mlx4/cq.h>
+#include <linux/mlx4/srq.h>
+#include <linux/mlx4/doorbell.h>
+#include <linux/mlx4/cmd.h>
+
+#include "en_port.h"
+
+#define DRV_NAME "mlx4_en"
+#define DRV_VERSION "1.5.4.1"
+#define DRV_RELDATE "March 2011"
+
+#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
+
+/*
+ * Device constants
+ */
+
+
+#define MLX4_EN_PAGE_SHIFT 12
+#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
+#define MAX_RX_RINGS 16
+#define MIN_RX_RINGS 4
+#define TXBB_SIZE 64
+#define HEADROOM (2048 / TXBB_SIZE + 1)
+#define STAMP_STRIDE 64
+#define STAMP_DWORDS (STAMP_STRIDE / 4)
+#define STAMP_SHIFT 31
+#define STAMP_VAL 0x7fffffff
+#define STATS_DELAY (HZ / 4)
+
+/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
+#define MAX_DESC_SIZE 512
+#define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE)
+
+/*
+ * OS related constants and tunables
+ */
+
+#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
+
+#define MLX4_EN_ALLOC_ORDER 2
+#define MLX4_EN_ALLOC_SIZE (PAGE_SIZE << MLX4_EN_ALLOC_ORDER)
+
+#define MLX4_EN_MAX_LRO_DESCRIPTORS 32
+
+/* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU
+ * and 4K allocations) */
+enum {
+ FRAG_SZ0 = 512 - NET_IP_ALIGN,
+ FRAG_SZ1 = 1024,
+ FRAG_SZ2 = 4096,
+ FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
+};
+#define MLX4_EN_MAX_RX_FRAGS 4
+
+/* Maximum ring sizes */
+#define MLX4_EN_MAX_TX_SIZE 8192
+#define MLX4_EN_MAX_RX_SIZE 8192
+
+/* Minimum ring size for our page-allocation sceme to work */
+#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
+#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
+
+#define MLX4_EN_SMALL_PKT_SIZE 64
+#define MLX4_EN_NUM_TX_RINGS 8
+#define MLX4_EN_NUM_PPP_RINGS 8
+#define MAX_TX_RINGS (MLX4_EN_NUM_TX_RINGS + MLX4_EN_NUM_PPP_RINGS)
+#define MLX4_EN_DEF_TX_RING_SIZE 512
+#define MLX4_EN_DEF_RX_RING_SIZE 1024
+
+/* Target number of packets to coalesce with interrupt moderation */
+#define MLX4_EN_RX_COAL_TARGET 44
+#define MLX4_EN_RX_COAL_TIME 0x10
+
+#define MLX4_EN_TX_COAL_PKTS 5
+#define MLX4_EN_TX_COAL_TIME 0x80
+
+#define MLX4_EN_RX_RATE_LOW 400000
+#define MLX4_EN_RX_COAL_TIME_LOW 0
+#define MLX4_EN_RX_RATE_HIGH 450000
+#define MLX4_EN_RX_COAL_TIME_HIGH 128
+#define MLX4_EN_RX_SIZE_THRESH 1024
+#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
+#define MLX4_EN_SAMPLE_INTERVAL 0
+#define MLX4_EN_AVG_PKT_SMALL 256
+
+#define MLX4_EN_AUTO_CONF 0xffff
+
+#define MLX4_EN_DEF_RX_PAUSE 1
+#define MLX4_EN_DEF_TX_PAUSE 1
+
+/* Interval between successive polls in the Tx routine when polling is used
+ instead of interrupts (in per-core Tx rings) - should be power of 2 */
+#define MLX4_EN_TX_POLL_MODER 16
+#define MLX4_EN_TX_POLL_TIMEOUT (HZ / 4)
+
+#define ETH_LLC_SNAP_SIZE 8
+
+#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
+#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
+#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
+
+#define MLX4_EN_MIN_MTU 46
+#define ETH_BCAST 0xffffffffffffULL
+
+#define MLX4_EN_LOOPBACK_RETRIES 5
+#define MLX4_EN_LOOPBACK_TIMEOUT 100
+
+#ifdef MLX4_EN_PERF_STAT
+/* Number of samples to 'average' */
+#define AVG_SIZE 128
+#define AVG_FACTOR 1024
+#define NUM_PERF_STATS NUM_PERF_COUNTERS
+
+#define INC_PERF_COUNTER(cnt) (++(cnt))
+#define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add))
+#define AVG_PERF_COUNTER(cnt, sample) \
+ ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
+#define GET_PERF_COUNTER(cnt) (cnt)
+#define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR)
+
+#else
+
+#define NUM_PERF_STATS 0
+#define INC_PERF_COUNTER(cnt) do {} while (0)
+#define ADD_PERF_COUNTER(cnt, add) do {} while (0)
+#define AVG_PERF_COUNTER(cnt, sample) do {} while (0)
+#define GET_PERF_COUNTER(cnt) (0)
+#define GET_AVG_PERF_COUNTER(cnt) (0)
+#endif /* MLX4_EN_PERF_STAT */
+
+/*
+ * Configurables
+ */
+
+enum cq_type {
+ RX = 0,
+ TX = 1,
+};
+
+
+/*
+ * Useful macros
+ */
+#define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x))
+#define XNOR(x, y) (!(x) == !(y))
+#define ILLEGAL_MAC(addr) (addr == 0xffffffffffffULL || addr == 0x0)
+
+
+struct mlx4_en_tx_info {
+ struct sk_buff *skb;
+ u32 nr_txbb;
+ u8 linear;
+ u8 data_offset;
+ u8 inl;
+};
+
+
+#define MLX4_EN_BIT_DESC_OWN 0x80000000
+#define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg)
+#define MLX4_EN_MEMTYPE_PAD 0x100
+#define DS_SIZE sizeof(struct mlx4_wqe_data_seg)
+
+
+struct mlx4_en_tx_desc {
+ struct mlx4_wqe_ctrl_seg ctrl;
+ union {
+ struct mlx4_wqe_data_seg data; /* at least one data segment */
+ struct mlx4_wqe_lso_seg lso;
+ struct mlx4_wqe_inline_seg inl;
+ };
+};
+
+#define MLX4_EN_USE_SRQ 0x01000000
+
+#define MLX4_EN_CX3_LOW_ID 0x1000
+#define MLX4_EN_CX3_HIGH_ID 0x1005
+
+struct mlx4_en_rx_alloc {
+ struct page *page;
+ u16 offset;
+};
+
+struct mlx4_en_tx_ring {
+ struct mlx4_hwq_resources wqres;
+ u32 size ; /* number of TXBBs */
+ u32 size_mask;
+ u16 stride;
+ u16 cqn; /* index of port CQ associated with this ring */
+ u32 prod;
+ u32 cons;
+ u32 buf_size;
+ u32 doorbell_qpn;
+ void *buf;
+ u16 poll_cnt;
+ int blocked;
+ struct mlx4_en_tx_info *tx_info;
+ u8 *bounce_buf;
+ u32 last_nr_txbb;
+ struct mlx4_qp qp;
+ struct mlx4_qp_context context;
+ int qpn;
+ enum mlx4_qp_state qp_state;
+ struct mlx4_srq dummy;
+ unsigned long bytes;
+ unsigned long packets;
+ spinlock_t comp_lock;
+ struct mlx4_bf bf;
+ bool bf_enabled;
+};
+
+struct mlx4_en_rx_desc {
+ /* actual number of entries depends on rx ring stride */
+ struct mlx4_wqe_data_seg data[0];
+};
+
+struct mlx4_en_rx_ring {
+ struct mlx4_hwq_resources wqres;
+ struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
+ u32 size ; /* number of Rx descs*/
+ u32 actual_size;
+ u32 size_mask;
+ u16 stride;
+ u16 log_stride;
+ u16 cqn; /* index of port CQ associated with this ring */
+ u32 prod;
+ u32 cons;
+ u32 buf_size;
+ void *buf;
+ void *rx_info;
+ unsigned long bytes;
+ unsigned long packets;
+};
+
+
+static inline int mlx4_en_can_lro(__be16 status)
+{
+ return (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
+ MLX4_CQE_STATUS_IPV4F |
+ MLX4_CQE_STATUS_IPV6 |
+ MLX4_CQE_STATUS_IPV4OPT |
+ MLX4_CQE_STATUS_TCP |
+ MLX4_CQE_STATUS_UDP |
+ MLX4_CQE_STATUS_IPOK)) ==
+ cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
+ MLX4_CQE_STATUS_IPOK |
+ MLX4_CQE_STATUS_TCP);
+}
+
+struct mlx4_en_cq {
+ struct mlx4_cq mcq;
+ struct mlx4_hwq_resources wqres;
+ int ring;
+ spinlock_t lock;
+ struct net_device *dev;
+ struct napi_struct napi;
+ /* Per-core Tx cq processing support */
+ struct timer_list timer;
+ int size;
+ int buf_size;
+ unsigned vector;
+ enum cq_type is_tx;
+ u16 moder_time;
+ u16 moder_cnt;
+ struct mlx4_cqe *buf;
+#define MLX4_EN_OPCODE_ERROR 0x1e
+};
+
+struct mlx4_en_port_profile {
+ u32 flags;
+ u32 tx_ring_num;
+ u32 rx_ring_num;
+ u32 tx_ring_size;
+ u32 rx_ring_size;
+ u8 rx_pause;
+ u8 rx_ppp;
+ u8 tx_pause;
+ u8 tx_ppp;
+};
+
+struct mlx4_en_profile {
+ int rss_xor;
+ int tcp_rss;
+ int udp_rss;
+ u8 rss_mask;
+ u32 active_ports;
+ u32 small_pkt_int;
+ u8 no_reset;
+ struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
+};
+
+struct mlx4_en_dev {
+ struct mlx4_dev *dev;
+ struct pci_dev *pdev;
+ struct mutex state_lock;
+ struct net_device *pndev[MLX4_MAX_PORTS + 1];
+ u32 port_cnt;
+ bool device_up;
+ struct mlx4_en_profile profile;
+ u32 LSO_support;
+ struct workqueue_struct *workqueue;
+ struct device *dma_device;
+ void __iomem *uar_map;
+ struct mlx4_uar priv_uar;
+ struct mlx4_mr mr;
+ u32 priv_pdn;
+ spinlock_t uar_lock;
+ u8 mac_removed[MLX4_MAX_PORTS + 1];
+};
+
+
+struct mlx4_en_rss_map {
+ int base_qpn;
+ struct mlx4_qp qps[MAX_RX_RINGS];
+ enum mlx4_qp_state state[MAX_RX_RINGS];
+ struct mlx4_qp indir_qp;
+ enum mlx4_qp_state indir_state;
+};
+
+struct mlx4_en_rss_context {
+ __be32 base_qpn;
+ __be32 default_qpn;
+ u16 reserved;
+ u8 hash_fn;
+ u8 flags;
+ __be32 rss_key[10];
+ __be32 base_qpn_udp;
+};
+
+struct mlx4_en_port_state {
+ int link_state;
+ int link_speed;
+ int transciver;
+};
+
+struct mlx4_en_pkt_stats {
+ unsigned long broadcast;
+ unsigned long rx_prio[8];
+ unsigned long tx_prio[8];
+#define NUM_PKT_STATS 17
+};
+
+struct mlx4_en_port_stats {
+ unsigned long tso_packets;
+ unsigned long queue_stopped;
+ unsigned long wake_queue;
+ unsigned long tx_timeout;
+ unsigned long rx_alloc_failed;
+ unsigned long rx_chksum_good;
+ unsigned long rx_chksum_none;
+ unsigned long tx_chksum_offload;
+#define NUM_PORT_STATS 8
+};
+
+struct mlx4_en_perf_stats {
+ u32 tx_poll;
+ u64 tx_pktsz_avg;
+ u32 inflight_avg;
+ u16 tx_coal_avg;
+ u16 rx_coal_avg;
+ u32 napi_quota;
+#define NUM_PERF_COUNTERS 6
+};
+
+struct mlx4_en_frag_info {
+ u16 frag_size;
+ u16 frag_prefix_size;
+ u16 frag_stride;
+ u16 frag_align;
+ u16 last_offset;
+
+};
+
+struct mlx4_en_priv {
+ struct mlx4_en_dev *mdev;
+ struct mlx4_en_port_profile *prof;
+ struct net_device *dev;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ struct net_device_stats stats;
+ struct net_device_stats ret_stats;
+ struct mlx4_en_port_state port_state;
+ spinlock_t stats_lock;
+
+ unsigned long last_moder_packets;
+ unsigned long last_moder_tx_packets;
+ unsigned long last_moder_bytes;
+ unsigned long last_moder_jiffies;
+ int last_moder_time;
+ u16 rx_usecs;
+ u16 rx_frames;
+ u16 tx_usecs;
+ u16 tx_frames;
+ u32 pkt_rate_low;
+ u16 rx_usecs_low;
+ u32 pkt_rate_high;
+ u16 rx_usecs_high;
+ u16 sample_interval;
+ u16 adaptive_rx_coal;
+ u32 msg_enable;
+ u32 loopback_ok;
+ u32 validate_loopback;
+
+ struct mlx4_hwq_resources res;
+ int link_state;
+ int last_link_state;
+ bool port_up;
+ int port;
+ int registered;
+ int allocated;
+ int stride;
+ u64 mac;
+ int mac_index;
+ unsigned max_mtu;
+ int base_qpn;
+
+ struct mlx4_en_rss_map rss_map;
+ u32 flags;
+#define MLX4_EN_FLAG_PROMISC 0x1
+#define MLX4_EN_FLAG_MC_PROMISC 0x2
+ u32 tx_ring_num;
+ u32 rx_ring_num;
+ u32 rx_skb_size;
+ struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
+ u16 num_frags;
+ u16 log_rx_info;
+
+ struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS];
+ int tx_vector;
+ struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
+ struct mlx4_en_cq tx_cq[MAX_TX_RINGS];
+ struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
+ struct work_struct mcast_task;
+ struct work_struct mac_task;
+ struct work_struct watchdog_task;
+ struct work_struct linkstate_task;
+ struct delayed_work stats_task;
+ struct mlx4_en_perf_stats pstats;
+ struct mlx4_en_pkt_stats pkstats;
+ struct mlx4_en_port_stats port_stats;
+ char *mc_addrs;
+ int mc_addrs_cnt;
+ struct mlx4_en_stat_out_mbox hw_stats;
+ int vids[128];
+ bool wol;
+};
+
+enum mlx4_en_wol {
+ MLX4_EN_WOL_MAGIC = (1ULL << 61),
+ MLX4_EN_WOL_ENABLED = (1ULL << 62),
+ MLX4_EN_WOL_DO_MODIFY = (1ULL << 63),
+};
+
+
+void mlx4_en_destroy_netdev(struct net_device *dev);
+int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ struct mlx4_en_port_profile *prof);
+
+int mlx4_en_start_port(struct net_device *dev);
+void mlx4_en_stop_port(struct net_device *dev);
+
+void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors);
+int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
+
+int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
+ int entries, int ring, enum cq_type mode);
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
+ bool reserve_vectors);
+int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+
+void mlx4_en_poll_tx_cq(unsigned long data);
+void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+
+int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ int qpn, u32 size, u16 stride);
+void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
+int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int cq);
+void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring);
+
+int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring,
+ u32 size, u16 stride);
+void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring);
+int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
+void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring);
+int mlx4_en_process_rx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq,
+ int budget);
+int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
+void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
+ int is_tx, int rss, int qpn, int cqn,
+ struct mlx4_qp_context *context);
+void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
+int mlx4_en_map_buffer(struct mlx4_buf *buf);
+void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
+
+void mlx4_en_calc_rx_buf(struct net_device *dev);
+int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
+void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
+int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
+void mlx4_en_rx_irq(struct mlx4_cq *mcq);
+
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
+int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv);
+int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
+ u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
+int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
+ u8 promisc);
+
+int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
+int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
+
+#define MLX4_EN_NUM_SELF_TEST 5
+void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
+u64 mlx4_en_mac_to_u64(u8 *addr);
+
+/*
+ * Globals
+ */
+extern const struct ethtool_ops mlx4_en_ethtool_ops;
+
+
+
+/*
+ * printk / logging functions
+ */
+
+int en_print(const char *level, const struct mlx4_en_priv *priv,
+ const char *format, ...) __attribute__ ((format (printf, 3, 4)));
+
+#define en_dbg(mlevel, priv, format, arg...) \
+do { \
+ if (NETIF_MSG_##mlevel & priv->msg_enable) \
+ en_print(KERN_DEBUG, priv, format, ##arg); \
+} while (0)
+#define en_warn(priv, format, arg...) \
+ en_print(KERN_WARNING, priv, format, ##arg)
+#define en_err(priv, format, arg...) \
+ en_print(KERN_ERR, priv, format, ##arg)
+#define en_info(priv, format, arg...) \
+ en_print(KERN_INFO, priv, format, ## arg)
+
+#define mlx4_err(mdev, format, arg...) \
+ pr_err("%s %s: " format, DRV_NAME, \
+ dev_name(&mdev->pdev->dev), ##arg)
+#define mlx4_info(mdev, format, arg...) \
+ pr_info("%s %s: " format, DRV_NAME, \
+ dev_name(&mdev->pdev->dev), ##arg)
+#define mlx4_warn(mdev, format, arg...) \
+ pr_warning("%s %s: " format, DRV_NAME, \
+ dev_name(&mdev->pdev->dev), ##arg)
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
new file mode 100644
index 000000000000..9c188bdd7f4f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -0,0 +1,667 @@
+/*
+ * Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+#include "icm.h"
+
+/*
+ * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_mpt_entry {
+ __be32 flags;
+ __be32 qpn;
+ __be32 key;
+ __be32 pd_flags;
+ __be64 start;
+ __be64 length;
+ __be32 lkey;
+ __be32 win_cnt;
+ u8 reserved1[3];
+ u8 mtt_rep;
+ __be64 mtt_seg;
+ __be32 mtt_sz;
+ __be32 entity_size;
+ __be32 first_byte_offset;
+} __packed;
+
+#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
+#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
+#define MLX4_MPT_FLAG_MIO (1 << 17)
+#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
+#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
+#define MLX4_MPT_FLAG_REGION (1 << 8)
+
+#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27)
+#define MLX4_MPT_PD_FLAG_RAE (1 << 28)
+#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
+
+#define MLX4_MPT_STATUS_SW 0xF0
+#define MLX4_MPT_STATUS_HW 0x00
+
+static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
+{
+ int o;
+ int m;
+ u32 seg;
+
+ spin_lock(&buddy->lock);
+
+ for (o = order; o <= buddy->max_order; ++o)
+ if (buddy->num_free[o]) {
+ m = 1 << (buddy->max_order - o);
+ seg = find_first_bit(buddy->bits[o], m);
+ if (seg < m)
+ goto found;
+ }
+
+ spin_unlock(&buddy->lock);
+ return -1;
+
+ found:
+ clear_bit(seg, buddy->bits[o]);
+ --buddy->num_free[o];
+
+ while (o > order) {
+ --o;
+ seg <<= 1;
+ set_bit(seg ^ 1, buddy->bits[o]);
+ ++buddy->num_free[o];
+ }
+
+ spin_unlock(&buddy->lock);
+
+ seg <<= order;
+
+ return seg;
+}
+
+static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
+{
+ seg >>= order;
+
+ spin_lock(&buddy->lock);
+
+ while (test_bit(seg ^ 1, buddy->bits[order])) {
+ clear_bit(seg ^ 1, buddy->bits[order]);
+ --buddy->num_free[order];
+ seg >>= 1;
+ ++order;
+ }
+
+ set_bit(seg, buddy->bits[order]);
+ ++buddy->num_free[order];
+
+ spin_unlock(&buddy->lock);
+}
+
+static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
+{
+ int i, s;
+
+ buddy->max_order = max_order;
+ spin_lock_init(&buddy->lock);
+
+ buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
+ GFP_KERNEL);
+ buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *),
+ GFP_KERNEL);
+ if (!buddy->bits || !buddy->num_free)
+ goto err_out;
+
+ for (i = 0; i <= buddy->max_order; ++i) {
+ s = BITS_TO_LONGS(1 << (buddy->max_order - i));
+ buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
+ if (!buddy->bits[i])
+ goto err_out_free;
+ bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i));
+ }
+
+ set_bit(0, buddy->bits[buddy->max_order]);
+ buddy->num_free[buddy->max_order] = 1;
+
+ return 0;
+
+err_out_free:
+ for (i = 0; i <= buddy->max_order; ++i)
+ kfree(buddy->bits[i]);
+
+err_out:
+ kfree(buddy->bits);
+ kfree(buddy->num_free);
+
+ return -ENOMEM;
+}
+
+static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
+{
+ int i;
+
+ for (i = 0; i <= buddy->max_order; ++i)
+ kfree(buddy->bits[i]);
+
+ kfree(buddy->bits);
+ kfree(buddy->num_free);
+}
+
+static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+{
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ u32 seg;
+
+ seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order);
+ if (seg == -1)
+ return -1;
+
+ if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg,
+ seg + (1 << order) - 1)) {
+ mlx4_buddy_free(&mr_table->mtt_buddy, seg, order);
+ return -1;
+ }
+
+ return seg;
+}
+
+int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
+ struct mlx4_mtt *mtt)
+{
+ int i;
+
+ if (!npages) {
+ mtt->order = -1;
+ mtt->page_shift = MLX4_ICM_PAGE_SHIFT;
+ return 0;
+ } else
+ mtt->page_shift = page_shift;
+
+ for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
+ ++mtt->order;
+
+ mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
+ if (mtt->first_seg == -1)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_mtt_init);
+
+void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+{
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+ if (mtt->order < 0)
+ return;
+
+ mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
+ mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg,
+ mtt->first_seg + (1 << mtt->order) - 1);
+}
+EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
+
+u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+{
+ return (u64) mtt->first_seg * dev->caps.mtt_entry_sz;
+}
+EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
+
+static u32 hw_index_to_key(u32 ind)
+{
+ return (ind >> 24) | (ind << 8);
+}
+
+static u32 key_to_hw_index(u32 key)
+{
+ return (key << 24) | (key >> 8);
+}
+
+static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ int mpt_index)
+{
+ return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT,
+ MLX4_CMD_TIME_CLASS_B);
+}
+
+static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ int mpt_index)
+{
+ return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
+ !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B);
+}
+
+int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
+ int npages, int page_shift, struct mlx4_mr *mr)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ u32 index;
+ int err;
+
+ index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
+ if (index == -1)
+ return -ENOMEM;
+
+ mr->iova = iova;
+ mr->size = size;
+ mr->pd = pd;
+ mr->access = access;
+ mr->enabled = 0;
+ mr->key = hw_index_to_key(index);
+
+ err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
+ if (err)
+ mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
+
+void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err;
+
+ if (mr->enabled) {
+ err = mlx4_HW2SW_MPT(dev, NULL,
+ key_to_hw_index(mr->key) &
+ (dev->caps.num_mpts - 1));
+ if (err)
+ mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err);
+ }
+
+ mlx4_mtt_cleanup(dev, &mr->mtt);
+ mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key));
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_free);
+
+int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
+{
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_mpt_entry *mpt_entry;
+ int err;
+
+ err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+ if (err)
+ return err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
+ goto err_table;
+ }
+ mpt_entry = mailbox->buf;
+
+ memset(mpt_entry, 0, sizeof *mpt_entry);
+
+ mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
+ MLX4_MPT_FLAG_REGION |
+ mr->access);
+
+ mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key));
+ mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
+ mpt_entry->start = cpu_to_be64(mr->iova);
+ mpt_entry->length = cpu_to_be64(mr->size);
+ mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
+
+ if (mr->mtt.order < 0) {
+ mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
+ mpt_entry->mtt_seg = 0;
+ } else {
+ mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
+ }
+
+ if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
+ /* fast register MR in free state */
+ mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
+ mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
+ MLX4_MPT_PD_FLAG_RAE);
+ mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) *
+ dev->caps.mtts_per_seg);
+ } else {
+ mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
+ }
+
+ err = mlx4_SW2HW_MPT(dev, mailbox,
+ key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
+ if (err) {
+ mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
+ goto err_cmd;
+ }
+
+ mr->enabled = 1;
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return 0;
+
+err_cmd:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+err_table:
+ mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_enable);
+
+static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ int start_index, int npages, u64 *page_list)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ __be64 *mtts;
+ dma_addr_t dma_handle;
+ int i;
+ int s = start_index * sizeof (u64);
+
+ /* All MTTs must fit in the same page */
+ if (start_index / (PAGE_SIZE / sizeof (u64)) !=
+ (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
+ return -EINVAL;
+
+ if (start_index & (dev->caps.mtts_per_seg - 1))
+ return -EINVAL;
+
+ mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
+ s / dev->caps.mtt_entry_sz, &dma_handle);
+ if (!mtts)
+ return -ENOMEM;
+
+ dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
+ npages * sizeof (u64), DMA_TO_DEVICE);
+
+ for (i = 0; i < npages; ++i)
+ mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
+
+ dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
+ npages * sizeof (u64), DMA_TO_DEVICE);
+
+ return 0;
+}
+
+int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ int start_index, int npages, u64 *page_list)
+{
+ int chunk;
+ int err;
+
+ if (mtt->order < 0)
+ return -EINVAL;
+
+ while (npages > 0) {
+ chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
+ err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
+ if (err)
+ return err;
+
+ npages -= chunk;
+ start_index += chunk;
+ page_list += chunk;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_write_mtt);
+
+int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ struct mlx4_buf *buf)
+{
+ u64 *page_list;
+ int err;
+ int i;
+
+ page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ for (i = 0; i < buf->npages; ++i)
+ if (buf->nbufs == 1)
+ page_list[i] = buf->direct.map + (i << buf->page_shift);
+ else
+ page_list[i] = buf->page_list[i].map;
+
+ err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
+
+ kfree(page_list);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
+
+int mlx4_init_mr_table(struct mlx4_dev *dev)
+{
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ int err;
+
+ err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
+ ~0, dev->caps.reserved_mrws, 0);
+ if (err)
+ return err;
+
+ err = mlx4_buddy_init(&mr_table->mtt_buddy,
+ ilog2(dev->caps.num_mtt_segs));
+ if (err)
+ goto err_buddy;
+
+ if (dev->caps.reserved_mtts) {
+ if (mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)) == -1) {
+ mlx4_warn(dev, "MTT table of order %d is too small.\n",
+ mr_table->mtt_buddy.max_order);
+ err = -ENOMEM;
+ goto err_reserve_mtts;
+ }
+ }
+
+ return 0;
+
+err_reserve_mtts:
+ mlx4_buddy_cleanup(&mr_table->mtt_buddy);
+
+err_buddy:
+ mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
+
+ return err;
+}
+
+void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
+{
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+ mlx4_buddy_cleanup(&mr_table->mtt_buddy);
+ mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
+}
+
+static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list,
+ int npages, u64 iova)
+{
+ int i, page_mask;
+
+ if (npages > fmr->max_pages)
+ return -EINVAL;
+
+ page_mask = (1 << fmr->page_shift) - 1;
+
+ /* We are getting page lists, so va must be page aligned. */
+ if (iova & page_mask)
+ return -EINVAL;
+
+ /* Trust the user not to pass misaligned data in page_list */
+ if (0)
+ for (i = 0; i < npages; ++i) {
+ if (page_list[i] & ~page_mask)
+ return -EINVAL;
+ }
+
+ if (fmr->maps >= fmr->max_maps)
+ return -EINVAL;
+
+ return 0;
+}
+
+int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
+ int npages, u64 iova, u32 *lkey, u32 *rkey)
+{
+ u32 key;
+ int i, err;
+
+ err = mlx4_check_fmr(fmr, page_list, npages, iova);
+ if (err)
+ return err;
+
+ ++fmr->maps;
+
+ key = key_to_hw_index(fmr->mr.key);
+ key += dev->caps.num_mpts;
+ *lkey = *rkey = fmr->mr.key = hw_index_to_key(key);
+
+ *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
+
+ /* Make sure MPT status is visible before writing MTT entries */
+ wmb();
+
+ dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
+ npages * sizeof(u64), DMA_TO_DEVICE);
+
+ for (i = 0; i < npages; ++i)
+ fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
+
+ dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle,
+ npages * sizeof(u64), DMA_TO_DEVICE);
+
+ fmr->mpt->key = cpu_to_be32(key);
+ fmr->mpt->lkey = cpu_to_be32(key);
+ fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift));
+ fmr->mpt->start = cpu_to_be64(iova);
+
+ /* Make MTT entries are visible before setting MPT status */
+ wmb();
+
+ *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW;
+
+ /* Make sure MPT status is visible before consumer can use FMR */
+ wmb();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr);
+
+int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
+ int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ u64 mtt_seg;
+ int err = -ENOMEM;
+
+ if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
+ return -EINVAL;
+
+ /* All MTTs must fit in the same page */
+ if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
+ return -EINVAL;
+
+ fmr->page_shift = page_shift;
+ fmr->max_pages = max_pages;
+ fmr->max_maps = max_maps;
+ fmr->maps = 0;
+
+ err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages,
+ page_shift, &fmr->mr);
+ if (err)
+ return err;
+
+ mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz;
+
+ fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
+ fmr->mr.mtt.first_seg,
+ &fmr->dma_handle);
+ if (!fmr->mtts) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ return 0;
+
+err_free:
+ mlx4_mr_free(dev, &fmr->mr);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
+
+int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err;
+
+ err = mlx4_mr_enable(dev, &fmr->mr);
+ if (err)
+ return err;
+
+ fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
+ key_to_hw_index(fmr->mr.key), NULL);
+ if (!fmr->mpt)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
+
+void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
+ u32 *lkey, u32 *rkey)
+{
+ if (!fmr->maps)
+ return;
+
+ fmr->maps = 0;
+
+ *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
+
+int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
+{
+ if (fmr->maps)
+ return -EBUSY;
+
+ fmr->mr.enabled = 0;
+ mlx4_mr_free(dev, &fmr->mr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_free);
+
+int mlx4_SYNC_TPT(struct mlx4_dev *dev)
+{
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000);
+}
+EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
new file mode 100644
index 000000000000..1286b886dcea
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/io-mapping.h>
+
+#include <asm/page.h>
+
+#include "mlx4.h"
+#include "icm.h"
+
+enum {
+ MLX4_NUM_RESERVED_UARS = 8
+};
+
+int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ *pdn = mlx4_bitmap_alloc(&priv->pd_bitmap);
+ if (*pdn == -1)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
+
+void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn)
+{
+ mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn);
+}
+EXPORT_SYMBOL_GPL(mlx4_pd_free);
+
+int mlx4_init_pd_table(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
+ (1 << 24) - 1, dev->caps.reserved_pds, 0);
+}
+
+void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
+{
+ mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap);
+}
+
+
+int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
+{
+ uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap);
+ if (uar->index == -1)
+ return -ENOMEM;
+
+ uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
+ uar->map = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
+
+void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
+{
+ mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index);
+}
+EXPORT_SYMBOL_GPL(mlx4_uar_free);
+
+int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_uar *uar;
+ int err = 0;
+ int idx;
+
+ if (!priv->bf_mapping)
+ return -ENOMEM;
+
+ mutex_lock(&priv->bf_mutex);
+ if (!list_empty(&priv->bf_list))
+ uar = list_entry(priv->bf_list.next, struct mlx4_uar, bf_list);
+ else {
+ if (mlx4_bitmap_avail(&priv->uar_table.bitmap) < MLX4_NUM_RESERVED_UARS) {
+ err = -ENOMEM;
+ goto out;
+ }
+ uar = kmalloc(sizeof *uar, GFP_KERNEL);
+ if (!uar) {
+ err = -ENOMEM;
+ goto out;
+ }
+ err = mlx4_uar_alloc(dev, uar);
+ if (err)
+ goto free_kmalloc;
+
+ uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!uar->map) {
+ err = -ENOMEM;
+ goto free_uar;
+ }
+
+ uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT);
+ if (!uar->bf_map) {
+ err = -ENOMEM;
+ goto unamp_uar;
+ }
+ uar->free_bf_bmap = 0;
+ list_add(&uar->bf_list, &priv->bf_list);
+ }
+
+ bf->uar = uar;
+ idx = ffz(uar->free_bf_bmap);
+ uar->free_bf_bmap |= 1 << idx;
+ bf->uar = uar;
+ bf->offset = 0;
+ bf->buf_size = dev->caps.bf_reg_size / 2;
+ bf->reg = uar->bf_map + idx * dev->caps.bf_reg_size;
+ if (uar->free_bf_bmap == (1 << dev->caps.bf_regs_per_page) - 1)
+ list_del_init(&uar->bf_list);
+
+ goto out;
+
+unamp_uar:
+ bf->uar = NULL;
+ iounmap(uar->map);
+
+free_uar:
+ mlx4_uar_free(dev, uar);
+
+free_kmalloc:
+ kfree(uar);
+
+out:
+ mutex_unlock(&priv->bf_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_bf_alloc);
+
+void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int idx;
+
+ if (!bf->uar || !bf->uar->bf_map)
+ return;
+
+ mutex_lock(&priv->bf_mutex);
+ idx = (bf->reg - bf->uar->bf_map) / dev->caps.bf_reg_size;
+ bf->uar->free_bf_bmap &= ~(1 << idx);
+ if (!bf->uar->free_bf_bmap) {
+ if (!list_empty(&bf->uar->bf_list))
+ list_del(&bf->uar->bf_list);
+
+ io_mapping_unmap(bf->uar->bf_map);
+ iounmap(bf->uar->map);
+ mlx4_uar_free(dev, bf->uar);
+ kfree(bf->uar);
+ } else if (list_empty(&bf->uar->bf_list))
+ list_add(&bf->uar->bf_list, &priv->bf_list);
+
+ mutex_unlock(&priv->bf_mutex);
+}
+EXPORT_SYMBOL_GPL(mlx4_bf_free);
+
+int mlx4_init_uar_table(struct mlx4_dev *dev)
+{
+ if (dev->caps.num_uars <= 128) {
+ mlx4_err(dev, "Only %d UAR pages (need more than 128)\n",
+ dev->caps.num_uars);
+ mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n");
+ return -ENODEV;
+ }
+
+ return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
+ dev->caps.num_uars, dev->caps.num_uars - 1,
+ max(128, dev->caps.reserved_uars), 0);
+}
+
+void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
+{
+ mlx4_bitmap_cleanup(&mlx4_priv(dev)->uar_table.bitmap);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
new file mode 100644
index 000000000000..609e0ec14cee
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -0,0 +1,487 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/if_ether.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+
+#define MLX4_MAC_VALID (1ull << 63)
+#define MLX4_MAC_MASK 0xffffffffffffULL
+
+#define MLX4_VLAN_VALID (1u << 31)
+#define MLX4_VLAN_MASK 0xfff
+
+void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
+{
+ int i;
+
+ mutex_init(&table->mutex);
+ for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+ table->entries[i] = 0;
+ table->refs[i] = 0;
+ }
+ table->max = 1 << dev->caps.log_num_macs;
+ table->total = 0;
+}
+
+void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
+{
+ int i;
+
+ mutex_init(&table->mutex);
+ for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
+ table->entries[i] = 0;
+ table->refs[i] = 0;
+ }
+ table->max = 1 << dev->caps.log_num_vlans;
+ table->total = 0;
+}
+
+static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
+ __be64 *entries)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_mod;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
+
+ in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
+ u64 mac, int *qpn, u8 reserve)
+{
+ struct mlx4_qp qp;
+ u8 gid[16] = {0};
+ int err;
+
+ if (reserve) {
+ err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
+ if (err) {
+ mlx4_err(dev, "Failed to reserve qp for mac registration\n");
+ return err;
+ }
+ }
+ qp.qpn = *qpn;
+
+ mac &= 0xffffffffffffULL;
+ mac = cpu_to_be64(mac << 16);
+ memcpy(&gid[10], &mac, ETH_ALEN);
+ gid[5] = port;
+ gid[7] = MLX4_UC_STEER << 1;
+
+ err = mlx4_qp_attach_common(dev, &qp, gid, 0,
+ MLX4_PROT_ETH, MLX4_UC_STEER);
+ if (err && reserve)
+ mlx4_qp_release_range(dev, *qpn, 1);
+
+ return err;
+}
+
+static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
+ u64 mac, int qpn, u8 free)
+{
+ struct mlx4_qp qp;
+ u8 gid[16] = {0};
+
+ qp.qpn = qpn;
+ mac &= 0xffffffffffffULL;
+ mac = cpu_to_be64(mac << 16);
+ memcpy(&gid[10], &mac, ETH_ALEN);
+ gid[5] = port;
+ gid[7] = MLX4_UC_STEER << 1;
+
+ mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER);
+ if (free)
+ mlx4_qp_release_range(dev, qpn, 1);
+}
+
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ struct mlx4_mac_table *table = &info->mac_table;
+ struct mlx4_mac_entry *entry;
+ int i, err = 0;
+ int free = -1;
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+ err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
+ if (!err) {
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry) {
+ mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
+ return -ENOMEM;
+ }
+ entry->mac = mac;
+ err = radix_tree_insert(&info->mac_tree, *qpn, entry);
+ if (err) {
+ mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
+ return err;
+ }
+ } else
+ return err;
+ }
+ mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
+ mutex_lock(&table->mutex);
+ for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
+ if (free < 0 && !table->refs[i]) {
+ free = i;
+ continue;
+ }
+
+ if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
+ /* MAC already registered, increase references count */
+ ++table->refs[i];
+ goto out;
+ }
+ }
+
+ if (free < 0) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ mlx4_dbg(dev, "Free MAC index is %d\n", free);
+
+ if (table->total == table->max) {
+ /* No free mac entries */
+ err = -ENOSPC;
+ goto out;
+ }
+
+ /* Register new MAC */
+ table->refs[free] = 1;
+ table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
+
+ err = mlx4_set_port_mac_table(dev, port, table->entries);
+ if (unlikely(err)) {
+ mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
+ table->refs[free] = 0;
+ table->entries[free] = 0;
+ goto out;
+ }
+
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+ *qpn = info->base_qpn + free;
+ ++table->total;
+out:
+ mutex_unlock(&table->mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_register_mac);
+
+static int validate_index(struct mlx4_dev *dev,
+ struct mlx4_mac_table *table, int index)
+{
+ int err = 0;
+
+ if (index < 0 || index >= table->max || !table->entries[index]) {
+ mlx4_warn(dev, "No valid Mac entry for the given index\n");
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int find_index(struct mlx4_dev *dev,
+ struct mlx4_mac_table *table, u64 mac)
+{
+ int i;
+ for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+ if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
+ return i;
+ }
+ /* Mac not found */
+ return -EINVAL;
+}
+
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ struct mlx4_mac_table *table = &info->mac_table;
+ int index = qpn - info->base_qpn;
+ struct mlx4_mac_entry *entry;
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+ entry = radix_tree_lookup(&info->mac_tree, qpn);
+ if (entry) {
+ mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
+ radix_tree_delete(&info->mac_tree, qpn);
+ index = find_index(dev, table, entry->mac);
+ kfree(entry);
+ }
+ }
+
+ mutex_lock(&table->mutex);
+
+ if (validate_index(dev, table, index))
+ goto out;
+
+ /* Check whether this address has reference count */
+ if (!(--table->refs[index])) {
+ table->entries[index] = 0;
+ mlx4_set_port_mac_table(dev, port, table->entries);
+ --table->total;
+ }
+out:
+ mutex_unlock(&table->mutex);
+}
+EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
+
+int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ struct mlx4_mac_table *table = &info->mac_table;
+ int index = qpn - info->base_qpn;
+ struct mlx4_mac_entry *entry;
+ int err;
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+ entry = radix_tree_lookup(&info->mac_tree, qpn);
+ if (!entry)
+ return -EINVAL;
+ index = find_index(dev, table, entry->mac);
+ mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0);
+ entry->mac = new_mac;
+ err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0);
+ if (err || index < 0)
+ return err;
+ }
+
+ mutex_lock(&table->mutex);
+
+ err = validate_index(dev, table, index);
+ if (err)
+ goto out;
+
+ table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
+
+ err = mlx4_set_port_mac_table(dev, port, table->entries);
+ if (unlikely(err)) {
+ mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac);
+ table->entries[index] = 0;
+ }
+out:
+ mutex_unlock(&table->mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_replace_mac);
+static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
+ __be32 *entries)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_mod;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
+ in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+
+int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
+{
+ struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
+ int i;
+
+ for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
+ if (table->refs[i] &&
+ (vid == (MLX4_VLAN_MASK &
+ be32_to_cpu(table->entries[i])))) {
+ /* VLAN already registered, increase reference count */
+ *idx = i;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
+
+int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+{
+ struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
+ int i, err = 0;
+ int free = -1;
+
+ mutex_lock(&table->mutex);
+ for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
+ if (free < 0 && (table->refs[i] == 0)) {
+ free = i;
+ continue;
+ }
+
+ if (table->refs[i] &&
+ (vlan == (MLX4_VLAN_MASK &
+ be32_to_cpu(table->entries[i])))) {
+ /* Vlan already registered, increase references count */
+ *index = i;
+ ++table->refs[i];
+ goto out;
+ }
+ }
+
+ if (free < 0) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (table->total == table->max) {
+ /* No free vlan entries */
+ err = -ENOSPC;
+ goto out;
+ }
+
+ /* Register new MAC */
+ table->refs[free] = 1;
+ table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
+
+ err = mlx4_set_port_vlan_table(dev, port, table->entries);
+ if (unlikely(err)) {
+ mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
+ table->refs[free] = 0;
+ table->entries[free] = 0;
+ goto out;
+ }
+
+ *index = free;
+ ++table->total;
+out:
+ mutex_unlock(&table->mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_register_vlan);
+
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+{
+ struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
+
+ if (index < MLX4_VLAN_REGULAR) {
+ mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
+ return;
+ }
+
+ mutex_lock(&table->mutex);
+ if (!table->refs[index]) {
+ mlx4_warn(dev, "No vlan entry for index %d\n", index);
+ goto out;
+ }
+ if (--table->refs[index]) {
+ mlx4_dbg(dev, "Have more references for index %d,"
+ "no need to modify vlan table\n", index);
+ goto out;
+ }
+ table->entries[index] = 0;
+ mlx4_set_port_vlan_table(dev, port, table->entries);
+ --table->total;
+out:
+ mutex_unlock(&table->mutex);
+}
+EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
+
+int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
+{
+ struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
+ u8 *inbuf, *outbuf;
+ int err;
+
+ inmailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(inmailbox))
+ return PTR_ERR(inmailbox);
+
+ outmailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(outmailbox)) {
+ mlx4_free_cmd_mailbox(dev, inmailbox);
+ return PTR_ERR(outmailbox);
+ }
+
+ inbuf = inmailbox->buf;
+ outbuf = outmailbox->buf;
+ memset(inbuf, 0, 256);
+ memset(outbuf, 0, 256);
+ inbuf[0] = 1;
+ inbuf[1] = 1;
+ inbuf[2] = 1;
+ inbuf[3] = 1;
+ *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
+ *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
+
+ err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+ if (!err)
+ *caps = *(__be32 *) (outbuf + 84);
+ mlx4_free_cmd_mailbox(dev, inmailbox);
+ mlx4_free_cmd_mailbox(dev, outmailbox);
+ return err;
+}
+
+int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+
+ if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
+ return 0;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memset(mailbox->buf, 0, 256);
+
+ ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
+ err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
new file mode 100644
index 000000000000..b967647d0c76
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/slab.h>
+
+#include "mlx4.h"
+#include "fw.h"
+
+enum {
+ MLX4_RES_QP,
+ MLX4_RES_RDMARC,
+ MLX4_RES_ALTC,
+ MLX4_RES_AUXC,
+ MLX4_RES_SRQ,
+ MLX4_RES_CQ,
+ MLX4_RES_EQ,
+ MLX4_RES_DMPT,
+ MLX4_RES_CMPT,
+ MLX4_RES_MTT,
+ MLX4_RES_MCG,
+ MLX4_RES_NUM
+};
+
+static const char *res_name[] = {
+ [MLX4_RES_QP] = "QP",
+ [MLX4_RES_RDMARC] = "RDMARC",
+ [MLX4_RES_ALTC] = "ALTC",
+ [MLX4_RES_AUXC] = "AUXC",
+ [MLX4_RES_SRQ] = "SRQ",
+ [MLX4_RES_CQ] = "CQ",
+ [MLX4_RES_EQ] = "EQ",
+ [MLX4_RES_DMPT] = "DMPT",
+ [MLX4_RES_CMPT] = "CMPT",
+ [MLX4_RES_MTT] = "MTT",
+ [MLX4_RES_MCG] = "MCG",
+};
+
+u64 mlx4_make_profile(struct mlx4_dev *dev,
+ struct mlx4_profile *request,
+ struct mlx4_dev_cap *dev_cap,
+ struct mlx4_init_hca_param *init_hca)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource {
+ u64 size;
+ u64 start;
+ int type;
+ int num;
+ int log_num;
+ };
+
+ u64 total_size = 0;
+ struct mlx4_resource *profile;
+ struct mlx4_resource tmp;
+ int i, j;
+
+ profile = kcalloc(MLX4_RES_NUM, sizeof(*profile), GFP_KERNEL);
+ if (!profile)
+ return -ENOMEM;
+
+ profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz;
+ profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz;
+ profile[MLX4_RES_ALTC].size = dev_cap->altc_entry_sz;
+ profile[MLX4_RES_AUXC].size = dev_cap->aux_entry_sz;
+ profile[MLX4_RES_SRQ].size = dev_cap->srq_entry_sz;
+ profile[MLX4_RES_CQ].size = dev_cap->cqc_entry_sz;
+ profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
+ profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
+ profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
+ profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
+ profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE;
+
+ profile[MLX4_RES_QP].num = request->num_qp;
+ profile[MLX4_RES_RDMARC].num = request->num_qp * request->rdmarc_per_qp;
+ profile[MLX4_RES_ALTC].num = request->num_qp;
+ profile[MLX4_RES_AUXC].num = request->num_qp;
+ profile[MLX4_RES_SRQ].num = request->num_srq;
+ profile[MLX4_RES_CQ].num = request->num_cq;
+ profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
+ profile[MLX4_RES_DMPT].num = request->num_mpt;
+ profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
+ profile[MLX4_RES_MTT].num = request->num_mtt;
+ profile[MLX4_RES_MCG].num = request->num_mcg;
+
+ for (i = 0; i < MLX4_RES_NUM; ++i) {
+ profile[i].type = i;
+ profile[i].num = roundup_pow_of_two(profile[i].num);
+ profile[i].log_num = ilog2(profile[i].num);
+ profile[i].size *= profile[i].num;
+ profile[i].size = max(profile[i].size, (u64) PAGE_SIZE);
+ }
+
+ /*
+ * Sort the resources in decreasing order of size. Since they
+ * all have sizes that are powers of 2, we'll be able to keep
+ * resources aligned to their size and pack them without gaps
+ * using the sorted order.
+ */
+ for (i = MLX4_RES_NUM; i > 0; --i)
+ for (j = 1; j < i; ++j) {
+ if (profile[j].size > profile[j - 1].size) {
+ tmp = profile[j];
+ profile[j] = profile[j - 1];
+ profile[j - 1] = tmp;
+ }
+ }
+
+ for (i = 0; i < MLX4_RES_NUM; ++i) {
+ if (profile[i].size) {
+ profile[i].start = total_size;
+ total_size += profile[i].size;
+ }
+
+ if (total_size > dev_cap->max_icm_sz) {
+ mlx4_err(dev, "Profile requires 0x%llx bytes; "
+ "won't fit in 0x%llx bytes of context memory.\n",
+ (unsigned long long) total_size,
+ (unsigned long long) dev_cap->max_icm_sz);
+ kfree(profile);
+ return -ENOMEM;
+ }
+
+ if (profile[i].size)
+ mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, "
+ "size 0x%10llx\n",
+ i, res_name[profile[i].type], profile[i].log_num,
+ (unsigned long long) profile[i].start,
+ (unsigned long long) profile[i].size);
+ }
+
+ mlx4_dbg(dev, "HCA context memory: reserving %d KB\n",
+ (int) (total_size >> 10));
+
+ for (i = 0; i < MLX4_RES_NUM; ++i) {
+ switch (profile[i].type) {
+ case MLX4_RES_QP:
+ dev->caps.num_qps = profile[i].num;
+ init_hca->qpc_base = profile[i].start;
+ init_hca->log_num_qps = profile[i].log_num;
+ break;
+ case MLX4_RES_RDMARC:
+ for (priv->qp_table.rdmarc_shift = 0;
+ request->num_qp << priv->qp_table.rdmarc_shift < profile[i].num;
+ ++priv->qp_table.rdmarc_shift)
+ ; /* nothing */
+ dev->caps.max_qp_dest_rdma = 1 << priv->qp_table.rdmarc_shift;
+ priv->qp_table.rdmarc_base = (u32) profile[i].start;
+ init_hca->rdmarc_base = profile[i].start;
+ init_hca->log_rd_per_qp = priv->qp_table.rdmarc_shift;
+ break;
+ case MLX4_RES_ALTC:
+ init_hca->altc_base = profile[i].start;
+ break;
+ case MLX4_RES_AUXC:
+ init_hca->auxc_base = profile[i].start;
+ break;
+ case MLX4_RES_SRQ:
+ dev->caps.num_srqs = profile[i].num;
+ init_hca->srqc_base = profile[i].start;
+ init_hca->log_num_srqs = profile[i].log_num;
+ break;
+ case MLX4_RES_CQ:
+ dev->caps.num_cqs = profile[i].num;
+ init_hca->cqc_base = profile[i].start;
+ init_hca->log_num_cqs = profile[i].log_num;
+ break;
+ case MLX4_RES_EQ:
+ dev->caps.num_eqs = profile[i].num;
+ init_hca->eqc_base = profile[i].start;
+ init_hca->log_num_eqs = profile[i].log_num;
+ break;
+ case MLX4_RES_DMPT:
+ dev->caps.num_mpts = profile[i].num;
+ priv->mr_table.mpt_base = profile[i].start;
+ init_hca->dmpt_base = profile[i].start;
+ init_hca->log_mpt_sz = profile[i].log_num;
+ break;
+ case MLX4_RES_CMPT:
+ init_hca->cmpt_base = profile[i].start;
+ break;
+ case MLX4_RES_MTT:
+ dev->caps.num_mtt_segs = profile[i].num;
+ priv->mr_table.mtt_base = profile[i].start;
+ init_hca->mtt_base = profile[i].start;
+ break;
+ case MLX4_RES_MCG:
+ dev->caps.num_mgms = profile[i].num >> 1;
+ dev->caps.num_amgms = profile[i].num >> 1;
+ init_hca->mc_base = profile[i].start;
+ init_hca->log_mc_entry_sz = ilog2(MLX4_MGM_ENTRY_SIZE);
+ init_hca->log_mc_table_sz = profile[i].log_num;
+ init_hca->log_mc_hash_sz = profile[i].log_num - 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /*
+ * PDs don't take any HCA memory, but we assign them as part
+ * of the HCA profile anyway.
+ */
+ dev->caps.num_pds = MLX4_NUM_PDS;
+
+ kfree(profile);
+ return total_size;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
new file mode 100644
index 000000000000..ec9350e5f21a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -0,0 +1,380 @@
+/*
+ * Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/gfp.h>
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
+
+#include "mlx4.h"
+#include "icm.h"
+
+void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
+{
+ struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+ struct mlx4_qp *qp;
+
+ spin_lock(&qp_table->lock);
+
+ qp = __mlx4_qp_lookup(dev, qpn);
+ if (qp)
+ atomic_inc(&qp->refcount);
+
+ spin_unlock(&qp_table->lock);
+
+ if (!qp) {
+ mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn);
+ return;
+ }
+
+ qp->event(qp, event_type);
+
+ if (atomic_dec_and_test(&qp->refcount))
+ complete(&qp->free);
+}
+
+int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+ struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
+ int sqd_event, struct mlx4_qp *qp)
+{
+ static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
+ [MLX4_QP_STATE_RST] = {
+ [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
+ [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
+ [MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP,
+ },
+ [MLX4_QP_STATE_INIT] = {
+ [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
+ [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
+ [MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP,
+ [MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP,
+ },
+ [MLX4_QP_STATE_RTR] = {
+ [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
+ [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
+ [MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP,
+ },
+ [MLX4_QP_STATE_RTS] = {
+ [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
+ [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
+ [MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP,
+ [MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP,
+ },
+ [MLX4_QP_STATE_SQD] = {
+ [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
+ [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
+ [MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP,
+ [MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP,
+ },
+ [MLX4_QP_STATE_SQER] = {
+ [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
+ [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
+ [MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP,
+ },
+ [MLX4_QP_STATE_ERR] = {
+ [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
+ [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
+ }
+ };
+
+ struct mlx4_cmd_mailbox *mailbox;
+ int ret = 0;
+
+ if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
+ !op[cur_state][new_state])
+ return -EINVAL;
+
+ if (op[cur_state][new_state] == MLX4_CMD_2RST_QP)
+ return mlx4_cmd(dev, 0, qp->qpn, 2,
+ MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A);
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) {
+ u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
+ context->mtt_base_addr_h = mtt_addr >> 32;
+ context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
+ context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
+ }
+
+ *(__be32 *) mailbox->buf = cpu_to_be32(optpar);
+ memcpy(mailbox->buf + 8, context, sizeof *context);
+
+ ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
+ cpu_to_be32(qp->qpn);
+
+ ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31),
+ new_state == MLX4_QP_STATE_RST ? 2 : 0,
+ op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_modify);
+
+int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_qp_table *qp_table = &priv->qp_table;
+ int qpn;
+
+ qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
+ if (qpn == -1)
+ return -ENOMEM;
+
+ *base = qpn;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
+
+void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_qp_table *qp_table = &priv->qp_table;
+ if (base_qpn < dev->caps.sqp_start + 8)
+ return;
+
+ mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
+
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_qp_table *qp_table = &priv->qp_table;
+ int err;
+
+ if (!qpn)
+ return -EINVAL;
+
+ qp->qpn = qpn;
+
+ err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
+ if (err)
+ goto err_out;
+
+ err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn);
+ if (err)
+ goto err_put_qp;
+
+ err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn);
+ if (err)
+ goto err_put_auxc;
+
+ err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn);
+ if (err)
+ goto err_put_altc;
+
+ err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn);
+ if (err)
+ goto err_put_rdmarc;
+
+ spin_lock_irq(&qp_table->lock);
+ err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp);
+ spin_unlock_irq(&qp_table->lock);
+ if (err)
+ goto err_put_cmpt;
+
+ atomic_set(&qp->refcount, 1);
+ init_completion(&qp->free);
+
+ return 0;
+
+err_put_cmpt:
+ mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
+
+err_put_rdmarc:
+ mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
+
+err_put_altc:
+ mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
+
+err_put_auxc:
+ mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
+
+err_put_qp:
+ mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+
+err_out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
+
+void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
+{
+ struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp_table->lock, flags);
+ radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
+ spin_unlock_irqrestore(&qp_table->lock, flags);
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_remove);
+
+void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
+{
+ struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+
+ if (atomic_dec_and_test(&qp->refcount))
+ complete(&qp->free);
+ wait_for_completion(&qp->free);
+
+ mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_free);
+
+static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
+{
+ return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
+ MLX4_CMD_TIME_CLASS_B);
+}
+
+int mlx4_init_qp_table(struct mlx4_dev *dev)
+{
+ struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+ int err;
+ int reserved_from_top = 0;
+
+ spin_lock_init(&qp_table->lock);
+ INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
+
+ /*
+ * We reserve 2 extra QPs per port for the special QPs. The
+ * block of special QPs must be aligned to a multiple of 8, so
+ * round up.
+ */
+ dev->caps.sqp_start =
+ ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
+
+ {
+ int sort[MLX4_NUM_QP_REGION];
+ int i, j, tmp;
+ int last_base = dev->caps.num_qps;
+
+ for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
+ sort[i] = i;
+
+ for (i = MLX4_NUM_QP_REGION; i > 0; --i) {
+ for (j = 2; j < i; ++j) {
+ if (dev->caps.reserved_qps_cnt[sort[j]] >
+ dev->caps.reserved_qps_cnt[sort[j - 1]]) {
+ tmp = sort[j];
+ sort[j] = sort[j - 1];
+ sort[j - 1] = tmp;
+ }
+ }
+ }
+
+ for (i = 1; i < MLX4_NUM_QP_REGION; ++i) {
+ last_base -= dev->caps.reserved_qps_cnt[sort[i]];
+ dev->caps.reserved_qps_base[sort[i]] = last_base;
+ reserved_from_top +=
+ dev->caps.reserved_qps_cnt[sort[i]];
+ }
+
+ }
+
+ err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
+ (1 << 23) - 1, dev->caps.sqp_start + 8,
+ reserved_from_top);
+ if (err)
+ return err;
+
+ return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start);
+}
+
+void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
+{
+ mlx4_CONF_SPECIAL_QP(dev, 0);
+ mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
+}
+
+int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ struct mlx4_qp_context *context)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
+ MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A);
+ if (!err)
+ memcpy(context, mailbox->buf + 8, sizeof *context);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_query);
+
+int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ struct mlx4_qp_context *context,
+ struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
+{
+ int err;
+ int i;
+ enum mlx4_qp_state states[] = {
+ MLX4_QP_STATE_RST,
+ MLX4_QP_STATE_INIT,
+ MLX4_QP_STATE_RTR,
+ MLX4_QP_STATE_RTS
+ };
+
+ for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
+ context->flags &= cpu_to_be32(~(0xf << 28));
+ context->flags |= cpu_to_be32(states[i + 1] << 28);
+ err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
+ context, 0, 0, qp);
+ if (err) {
+ mlx4_err(dev, "Failed to bring QP to state: "
+ "%d with error: %d\n",
+ states[i + 1], err);
+ return err;
+ }
+
+ *qp_state = states[i + 1];
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
diff --git a/drivers/net/ethernet/mellanox/mlx4/reset.c b/drivers/net/ethernet/mellanox/mlx4/reset.c
new file mode 100644
index 000000000000..11e7c1cb99bf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/reset.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+
+#include "mlx4.h"
+
+int mlx4_reset(struct mlx4_dev *dev)
+{
+ void __iomem *reset;
+ u32 *hca_header = NULL;
+ int pcie_cap;
+ u16 devctl;
+ u16 linkctl;
+ u16 vendor;
+ unsigned long end;
+ u32 sem;
+ int i;
+ int err = 0;
+
+#define MLX4_RESET_BASE 0xf0000
+#define MLX4_RESET_SIZE 0x400
+#define MLX4_SEM_OFFSET 0x3fc
+#define MLX4_RESET_OFFSET 0x10
+#define MLX4_RESET_VALUE swab32(1)
+
+#define MLX4_SEM_TIMEOUT_JIFFIES (10 * HZ)
+#define MLX4_RESET_TIMEOUT_JIFFIES (2 * HZ)
+
+ /*
+ * Reset the chip. This is somewhat ugly because we have to
+ * save off the PCI header before reset and then restore it
+ * after the chip reboots. We skip config space offsets 22
+ * and 23 since those have a special meaning.
+ */
+
+ /* Do we need to save off the full 4K PCI Express header?? */
+ hca_header = kmalloc(256, GFP_KERNEL);
+ if (!hca_header) {
+ err = -ENOMEM;
+ mlx4_err(dev, "Couldn't allocate memory to save HCA "
+ "PCI header, aborting.\n");
+ goto out;
+ }
+
+ pcie_cap = pci_pcie_cap(dev->pdev);
+
+ for (i = 0; i < 64; ++i) {
+ if (i == 22 || i == 23)
+ continue;
+ if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
+ err = -ENODEV;
+ mlx4_err(dev, "Couldn't save HCA "
+ "PCI header, aborting.\n");
+ goto out;
+ }
+ }
+
+ reset = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_RESET_BASE,
+ MLX4_RESET_SIZE);
+ if (!reset) {
+ err = -ENOMEM;
+ mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n");
+ goto out;
+ }
+
+ /* grab HW semaphore to lock out flash updates */
+ end = jiffies + MLX4_SEM_TIMEOUT_JIFFIES;
+ do {
+ sem = readl(reset + MLX4_SEM_OFFSET);
+ if (!sem)
+ break;
+
+ msleep(1);
+ } while (time_before(jiffies, end));
+
+ if (sem) {
+ mlx4_err(dev, "Failed to obtain HW semaphore, aborting\n");
+ err = -EAGAIN;
+ iounmap(reset);
+ goto out;
+ }
+
+ /* actually hit reset */
+ writel(MLX4_RESET_VALUE, reset + MLX4_RESET_OFFSET);
+ iounmap(reset);
+
+ /* Docs say to wait one second before accessing device */
+ msleep(1000);
+
+ end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES;
+ do {
+ if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) &&
+ vendor != 0xffff)
+ break;
+
+ msleep(1);
+ } while (time_before(jiffies, end));
+
+ if (vendor == 0xffff) {
+ err = -ENODEV;
+ mlx4_err(dev, "PCI device did not come back after reset, "
+ "aborting.\n");
+ goto out;
+ }
+
+ /* Now restore the PCI headers */
+ if (pcie_cap) {
+ devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4];
+ if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_DEVCTL,
+ devctl)) {
+ err = -ENODEV;
+ mlx4_err(dev, "Couldn't restore HCA PCI Express "
+ "Device Control register, aborting.\n");
+ goto out;
+ }
+ linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
+ if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_LNKCTL,
+ linkctl)) {
+ err = -ENODEV;
+ mlx4_err(dev, "Couldn't restore HCA PCI Express "
+ "Link control register, aborting.\n");
+ goto out;
+ }
+ }
+
+ for (i = 0; i < 16; ++i) {
+ if (i * 4 == PCI_COMMAND)
+ continue;
+
+ if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
+ err = -ENODEV;
+ mlx4_err(dev, "Couldn't restore HCA reg %x, "
+ "aborting.\n", i);
+ goto out;
+ }
+ }
+
+ if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
+ hca_header[PCI_COMMAND / 4])) {
+ err = -ENODEV;
+ mlx4_err(dev, "Couldn't restore HCA COMMAND, "
+ "aborting.\n");
+ goto out;
+ }
+
+out:
+ kfree(hca_header);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c
new file mode 100644
index 000000000000..e2337a7411d9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/sense.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/if_ether.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+
+int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
+ enum mlx4_port_type *type)
+{
+ u64 out_param;
+ int err = 0;
+
+ err = mlx4_cmd_imm(dev, 0, &out_param, port, 0,
+ MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B);
+ if (err) {
+ mlx4_err(dev, "Sense command failed for port: %d\n", port);
+ return err;
+ }
+
+ if (out_param > 2) {
+ mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", out_param);
+ return -EINVAL;
+ }
+
+ *type = out_param;
+ return 0;
+}
+
+void mlx4_do_sense_ports(struct mlx4_dev *dev,
+ enum mlx4_port_type *stype,
+ enum mlx4_port_type *defaults)
+{
+ struct mlx4_sense *sense = &mlx4_priv(dev)->sense;
+ int err;
+ int i;
+
+ for (i = 1; i <= dev->caps.num_ports; i++) {
+ stype[i - 1] = 0;
+ if (sense->do_sense_port[i] && sense->sense_allowed[i] &&
+ dev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
+ err = mlx4_SENSE_PORT(dev, i, &stype[i - 1]);
+ if (err)
+ stype[i - 1] = defaults[i - 1];
+ } else
+ stype[i - 1] = defaults[i - 1];
+ }
+
+ /*
+ * Adjust port configuration:
+ * If port 1 sensed nothing and port 2 is IB, set both as IB
+ * If port 2 sensed nothing and port 1 is Eth, set both as Eth
+ */
+ if (stype[0] == MLX4_PORT_TYPE_ETH) {
+ for (i = 1; i < dev->caps.num_ports; i++)
+ stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH;
+ }
+ if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) {
+ for (i = 0; i < dev->caps.num_ports - 1; i++)
+ stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB;
+ }
+
+ /*
+ * If sensed nothing, remain in current configuration.
+ */
+ for (i = 0; i < dev->caps.num_ports; i++)
+ stype[i] = stype[i] ? stype[i] : defaults[i];
+
+}
+
+static void mlx4_sense_port(struct work_struct *work)
+{
+ struct delayed_work *delay = to_delayed_work(work);
+ struct mlx4_sense *sense = container_of(delay, struct mlx4_sense,
+ sense_poll);
+ struct mlx4_dev *dev = sense->dev;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ enum mlx4_port_type stype[MLX4_MAX_PORTS];
+
+ mutex_lock(&priv->port_mutex);
+ mlx4_do_sense_ports(dev, stype, &dev->caps.port_type[1]);
+
+ if (mlx4_check_port_params(dev, stype))
+ goto sense_again;
+
+ if (mlx4_change_port_types(dev, stype))
+ mlx4_err(dev, "Failed to change port_types\n");
+
+sense_again:
+ mutex_unlock(&priv->port_mutex);
+ queue_delayed_work(mlx4_wq , &sense->sense_poll,
+ round_jiffies_relative(MLX4_SENSE_RANGE));
+}
+
+void mlx4_start_sense(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_sense *sense = &priv->sense;
+
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP))
+ return;
+
+ queue_delayed_work(mlx4_wq , &sense->sense_poll,
+ round_jiffies_relative(MLX4_SENSE_RANGE));
+}
+
+void mlx4_stop_sense(struct mlx4_dev *dev)
+{
+ cancel_delayed_work_sync(&mlx4_priv(dev)->sense.sense_poll);
+}
+
+void mlx4_sense_init(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_sense *sense = &priv->sense;
+ int port;
+
+ sense->dev = dev;
+ for (port = 1; port <= dev->caps.num_ports; port++)
+ sense->do_sense_port[port] = 1;
+
+ INIT_DELAYED_WORK_DEFERRABLE(&sense->sense_poll, mlx4_sense_port);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
new file mode 100644
index 000000000000..3b07b80a0456
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx4/cmd.h>
+#include <linux/gfp.h>
+
+#include "mlx4.h"
+#include "icm.h"
+
+struct mlx4_srq_context {
+ __be32 state_logsize_srqn;
+ u8 logstride;
+ u8 reserved1[3];
+ u8 pg_offset;
+ u8 reserved2[3];
+ u32 reserved3;
+ u8 log_page_size;
+ u8 reserved4[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ __be32 pd;
+ __be16 limit_watermark;
+ __be16 wqe_cnt;
+ u16 reserved5;
+ __be16 wqe_counter;
+ u32 reserved6;
+ __be64 db_rec_addr;
+};
+
+void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
+{
+ struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+ struct mlx4_srq *srq;
+
+ spin_lock(&srq_table->lock);
+
+ srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
+ if (srq)
+ atomic_inc(&srq->refcount);
+
+ spin_unlock(&srq_table->lock);
+
+ if (!srq) {
+ mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
+ return;
+ }
+
+ srq->event(srq, event_type);
+
+ if (atomic_dec_and_test(&srq->refcount))
+ complete(&srq->free);
+}
+
+static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ int srq_num)
+{
+ return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ int srq_num)
+{
+ return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
+ mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
+{
+ return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ,
+ MLX4_CMD_TIME_CLASS_B);
+}
+
+static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ int srq_num)
+{
+ return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
+int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
+ u64 db_rec, struct mlx4_srq *srq)
+{
+ struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_srq_context *srq_context;
+ u64 mtt_addr;
+ int err;
+
+ srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
+ if (srq->srqn == -1)
+ return -ENOMEM;
+
+ err = mlx4_table_get(dev, &srq_table->table, srq->srqn);
+ if (err)
+ goto err_out;
+
+ err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn);
+ if (err)
+ goto err_put;
+
+ spin_lock_irq(&srq_table->lock);
+ err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
+ spin_unlock_irq(&srq_table->lock);
+ if (err)
+ goto err_cmpt_put;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
+ goto err_radix;
+ }
+
+ srq_context = mailbox->buf;
+ memset(srq_context, 0, sizeof *srq_context);
+
+ srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
+ srq->srqn);
+ srq_context->logstride = srq->wqe_shift - 4;
+ srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
+
+ mtt_addr = mlx4_mtt_addr(dev, mtt);
+ srq_context->mtt_base_addr_h = mtt_addr >> 32;
+ srq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
+ srq_context->pd = cpu_to_be32(pdn);
+ srq_context->db_rec_addr = cpu_to_be64(db_rec);
+
+ err = mlx4_SW2HW_SRQ(dev, mailbox, srq->srqn);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ if (err)
+ goto err_radix;
+
+ atomic_set(&srq->refcount, 1);
+ init_completion(&srq->free);
+
+ return 0;
+
+err_radix:
+ spin_lock_irq(&srq_table->lock);
+ radix_tree_delete(&srq_table->tree, srq->srqn);
+ spin_unlock_irq(&srq_table->lock);
+
+err_cmpt_put:
+ mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn);
+
+err_put:
+ mlx4_table_put(dev, &srq_table->table, srq->srqn);
+
+err_out:
+ mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
+
+void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
+{
+ struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+ int err;
+
+ err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn);
+ if (err)
+ mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn);
+
+ spin_lock_irq(&srq_table->lock);
+ radix_tree_delete(&srq_table->tree, srq->srqn);
+ spin_unlock_irq(&srq_table->lock);
+
+ if (atomic_dec_and_test(&srq->refcount))
+ complete(&srq->free);
+ wait_for_completion(&srq->free);
+
+ mlx4_table_put(dev, &srq_table->table, srq->srqn);
+ mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
+}
+EXPORT_SYMBOL_GPL(mlx4_srq_free);
+
+int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark)
+{
+ return mlx4_ARM_SRQ(dev, srq->srqn, limit_watermark);
+}
+EXPORT_SYMBOL_GPL(mlx4_srq_arm);
+
+int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_srq_context *srq_context;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ srq_context = mailbox->buf;
+
+ err = mlx4_QUERY_SRQ(dev, mailbox, srq->srqn);
+ if (err)
+ goto err_out;
+ *limit_watermark = be16_to_cpu(srq_context->limit_watermark);
+
+err_out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_srq_query);
+
+int mlx4_init_srq_table(struct mlx4_dev *dev)
+{
+ struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+ int err;
+
+ spin_lock_init(&srq_table->lock);
+ INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
+
+ err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
+ dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
+{
+ mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
+}
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
new file mode 100644
index 000000000000..d10c2e15f4ed
--- /dev/null
+++ b/drivers/net/ethernet/micrel/Kconfig
@@ -0,0 +1,69 @@
+#
+# Micrel device configuration
+#
+
+config NET_VENDOR_MICREL
+ bool "Micrel devices"
+ default y
+ depends on (HAS_IOMEM && DMA_ENGINE) || SPI || PCI || HAS_IOMEM || \
+ (ARM && ARCH_KS8695)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Micrel devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_MICREL
+
+config ARM_KS8695_ETHER
+ tristate "KS8695 Ethernet support"
+ depends on ARM && ARCH_KS8695
+ select NET_CORE
+ select MII
+ ---help---
+ If you wish to compile a kernel for the KS8695 and want to
+ use the internal ethernet then you should answer Y to this.
+
+config KS8842
+ tristate "Micrel KSZ8841/42 with generic bus interface"
+ depends on HAS_IOMEM && DMA_ENGINE
+ ---help---
+ This platform driver is for KSZ8841(1-port) / KS8842(2-port)
+ ethernet switch chip (managed, VLAN, QoS) from Micrel or
+ Timberdale(FPGA).
+
+config KS8851
+ tristate "Micrel KS8851 SPI"
+ depends on SPI
+ select NET_CORE
+ select MII
+ select CRC32
+ ---help---
+ SPI driver for Micrel KS8851 SPI attached network chip.
+
+config KS8851_MLL
+ tristate "Micrel KS8851 MLL"
+ depends on HAS_IOMEM
+ select NET_CORE
+ select MII
+ ---help---
+ This platform driver is for Micrel KS8851 Address/data bus
+ multiplexed network chip.
+
+config KSZ884X_PCI
+ tristate "Micrel KSZ8841/2 PCI"
+ depends on PCI
+ select NET_CORE
+ select MII
+ select CRC32
+ ---help---
+ This PCI driver is for Micrel KSZ8841/KSZ8842 PCI Ethernet chip.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ksz884x.
+
+endif # NET_VENDOR_MICREL
diff --git a/drivers/net/ethernet/micrel/Makefile b/drivers/net/ethernet/micrel/Makefile
new file mode 100644
index 000000000000..c83e4bc50c73
--- /dev/null
+++ b/drivers/net/ethernet/micrel/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the Micrel network device drivers.
+#
+
+obj-$(CONFIG_ARM_KS8695_ETHER) += ks8695net.o
+obj-$(CONFIG_KS8842) += ks8842.o
+obj-$(CONFIG_KS8851) += ks8851.o
+obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
+obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
new file mode 100644
index 000000000000..70788401d699
--- /dev/null
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -0,0 +1,1656 @@
+/*
+ * Micrel KS8695 (Centaur) Ethernet.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Copyright 2008 Simtec Electronics
+ * Daniel Silverstone <dsilvers@simtec.co.uk>
+ * Vincent Sanders <vince@simtec.co.uk>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include <asm/irq.h>
+
+#include <mach/regs-switch.h>
+#include <mach/regs-misc.h>
+#include <asm/mach/irq.h>
+#include <mach/regs-irq.h>
+
+#include "ks8695net.h"
+
+#define MODULENAME "ks8695_ether"
+#define MODULEVERSION "1.02"
+
+/*
+ * Transmit and device reset timeout, default 5 seconds.
+ */
+static int watchdog = 5000;
+
+/* Hardware structures */
+
+/**
+ * struct rx_ring_desc - Receive descriptor ring element
+ * @status: The status of the descriptor element (E.g. who owns it)
+ * @length: The number of bytes in the block pointed to by data_ptr
+ * @data_ptr: The physical address of the data block to receive into
+ * @next_desc: The physical address of the next descriptor element.
+ */
+struct rx_ring_desc {
+ __le32 status;
+ __le32 length;
+ __le32 data_ptr;
+ __le32 next_desc;
+};
+
+/**
+ * struct tx_ring_desc - Transmit descriptor ring element
+ * @owner: Who owns the descriptor
+ * @status: The number of bytes in the block pointed to by data_ptr
+ * @data_ptr: The physical address of the data block to receive into
+ * @next_desc: The physical address of the next descriptor element.
+ */
+struct tx_ring_desc {
+ __le32 owner;
+ __le32 status;
+ __le32 data_ptr;
+ __le32 next_desc;
+};
+
+/**
+ * struct ks8695_skbuff - sk_buff wrapper for rx/tx rings.
+ * @skb: The buffer in the ring
+ * @dma_ptr: The mapped DMA pointer of the buffer
+ * @length: The number of bytes mapped to dma_ptr
+ */
+struct ks8695_skbuff {
+ struct sk_buff *skb;
+ dma_addr_t dma_ptr;
+ u32 length;
+};
+
+/* Private device structure */
+
+#define MAX_TX_DESC 8
+#define MAX_TX_DESC_MASK 0x7
+#define MAX_RX_DESC 16
+#define MAX_RX_DESC_MASK 0xf
+
+/*napi_weight have better more than rx DMA buffers*/
+#define NAPI_WEIGHT 64
+
+#define MAX_RXBUF_SIZE 0x700
+
+#define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC)
+#define RX_RING_DMA_SIZE (sizeof(struct rx_ring_desc) * MAX_RX_DESC)
+#define RING_DMA_SIZE (TX_RING_DMA_SIZE + RX_RING_DMA_SIZE)
+
+/**
+ * enum ks8695_dtype - Device type
+ * @KS8695_DTYPE_WAN: This device is a WAN interface
+ * @KS8695_DTYPE_LAN: This device is a LAN interface
+ * @KS8695_DTYPE_HPNA: This device is an HPNA interface
+ */
+enum ks8695_dtype {
+ KS8695_DTYPE_WAN,
+ KS8695_DTYPE_LAN,
+ KS8695_DTYPE_HPNA,
+};
+
+/**
+ * struct ks8695_priv - Private data for the KS8695 Ethernet
+ * @in_suspend: Flag to indicate if we're suspending/resuming
+ * @ndev: The net_device for this interface
+ * @dev: The platform device object for this interface
+ * @dtype: The type of this device
+ * @io_regs: The ioremapped registers for this interface
+ * @napi : Add support NAPI for Rx
+ * @rx_irq_name: The textual name of the RX IRQ from the platform data
+ * @tx_irq_name: The textual name of the TX IRQ from the platform data
+ * @link_irq_name: The textual name of the link IRQ from the
+ * platform data if available
+ * @rx_irq: The IRQ number for the RX IRQ
+ * @tx_irq: The IRQ number for the TX IRQ
+ * @link_irq: The IRQ number for the link IRQ if available
+ * @regs_req: The resource request for the registers region
+ * @phyiface_req: The resource request for the phy/switch region
+ * if available
+ * @phyiface_regs: The ioremapped registers for the phy/switch if available
+ * @ring_base: The base pointer of the dma coherent memory for the rings
+ * @ring_base_dma: The DMA mapped equivalent of ring_base
+ * @tx_ring: The pointer in ring_base of the TX ring
+ * @tx_ring_used: The number of slots in the TX ring which are occupied
+ * @tx_ring_next_slot: The next slot to fill in the TX ring
+ * @tx_ring_dma: The DMA mapped equivalent of tx_ring
+ * @tx_buffers: The sk_buff mappings for the TX ring
+ * @txq_lock: A lock to protect the tx_buffers tx_ring_used etc variables
+ * @rx_ring: The pointer in ring_base of the RX ring
+ * @rx_ring_dma: The DMA mapped equivalent of rx_ring
+ * @rx_buffers: The sk_buff mappings for the RX ring
+ * @next_rx_desc_read: The next RX descriptor to read from on IRQ
+ * @rx_lock: A lock to protect Rx irq function
+ * @msg_enable: The flags for which messages to emit
+ */
+struct ks8695_priv {
+ int in_suspend;
+ struct net_device *ndev;
+ struct device *dev;
+ enum ks8695_dtype dtype;
+ void __iomem *io_regs;
+
+ struct napi_struct napi;
+
+ const char *rx_irq_name, *tx_irq_name, *link_irq_name;
+ int rx_irq, tx_irq, link_irq;
+
+ struct resource *regs_req, *phyiface_req;
+ void __iomem *phyiface_regs;
+
+ void *ring_base;
+ dma_addr_t ring_base_dma;
+
+ struct tx_ring_desc *tx_ring;
+ int tx_ring_used;
+ int tx_ring_next_slot;
+ dma_addr_t tx_ring_dma;
+ struct ks8695_skbuff tx_buffers[MAX_TX_DESC];
+ spinlock_t txq_lock;
+
+ struct rx_ring_desc *rx_ring;
+ dma_addr_t rx_ring_dma;
+ struct ks8695_skbuff rx_buffers[MAX_RX_DESC];
+ int next_rx_desc_read;
+ spinlock_t rx_lock;
+
+ int msg_enable;
+};
+
+/* Register access */
+
+/**
+ * ks8695_readreg - Read from a KS8695 ethernet register
+ * @ksp: The device to read from
+ * @reg: The register to read
+ */
+static inline u32
+ks8695_readreg(struct ks8695_priv *ksp, int reg)
+{
+ return readl(ksp->io_regs + reg);
+}
+
+/**
+ * ks8695_writereg - Write to a KS8695 ethernet register
+ * @ksp: The device to write to
+ * @reg: The register to write
+ * @value: The value to write to the register
+ */
+static inline void
+ks8695_writereg(struct ks8695_priv *ksp, int reg, u32 value)
+{
+ writel(value, ksp->io_regs + reg);
+}
+
+/* Utility functions */
+
+/**
+ * ks8695_port_type - Retrieve port-type as user-friendly string
+ * @ksp: The device to return the type for
+ *
+ * Returns a string indicating which of the WAN, LAN or HPNA
+ * ports this device is likely to represent.
+ */
+static const char *
+ks8695_port_type(struct ks8695_priv *ksp)
+{
+ switch (ksp->dtype) {
+ case KS8695_DTYPE_LAN:
+ return "LAN";
+ case KS8695_DTYPE_WAN:
+ return "WAN";
+ case KS8695_DTYPE_HPNA:
+ return "HPNA";
+ }
+
+ return "UNKNOWN";
+}
+
+/**
+ * ks8695_update_mac - Update the MAC registers in the device
+ * @ksp: The device to update
+ *
+ * Updates the MAC registers in the KS8695 device from the address in the
+ * net_device structure associated with this interface.
+ */
+static void
+ks8695_update_mac(struct ks8695_priv *ksp)
+{
+ /* Update the HW with the MAC from the net_device */
+ struct net_device *ndev = ksp->ndev;
+ u32 machigh, maclow;
+
+ maclow = ((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
+ (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5] << 0));
+ machigh = ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1] << 0));
+
+ ks8695_writereg(ksp, KS8695_MAL, maclow);
+ ks8695_writereg(ksp, KS8695_MAH, machigh);
+
+}
+
+/**
+ * ks8695_refill_rxbuffers - Re-fill the RX buffer ring
+ * @ksp: The device to refill
+ *
+ * Iterates the RX ring of the device looking for empty slots.
+ * For each empty slot, we allocate and map a new SKB and give it
+ * to the hardware.
+ * This can be called from interrupt context safely.
+ */
+static void
+ks8695_refill_rxbuffers(struct ks8695_priv *ksp)
+{
+ /* Run around the RX ring, filling in any missing sk_buff's */
+ int buff_n;
+
+ for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
+ if (!ksp->rx_buffers[buff_n].skb) {
+ struct sk_buff *skb = dev_alloc_skb(MAX_RXBUF_SIZE);
+ dma_addr_t mapping;
+
+ ksp->rx_buffers[buff_n].skb = skb;
+ if (skb == NULL) {
+ /* Failed to allocate one, perhaps
+ * we'll try again later.
+ */
+ break;
+ }
+
+ mapping = dma_map_single(ksp->dev, skb->data,
+ MAX_RXBUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ksp->dev, mapping))) {
+ /* Failed to DMA map this SKB, try later */
+ dev_kfree_skb_irq(skb);
+ ksp->rx_buffers[buff_n].skb = NULL;
+ break;
+ }
+ ksp->rx_buffers[buff_n].dma_ptr = mapping;
+ skb->dev = ksp->ndev;
+ ksp->rx_buffers[buff_n].length = MAX_RXBUF_SIZE;
+
+ /* Record this into the DMA ring */
+ ksp->rx_ring[buff_n].data_ptr = cpu_to_le32(mapping);
+ ksp->rx_ring[buff_n].length =
+ cpu_to_le32(MAX_RXBUF_SIZE);
+
+ wmb();
+
+ /* And give ownership over to the hardware */
+ ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
+ }
+ }
+}
+
+/* Maximum number of multicast addresses which the KS8695 HW supports */
+#define KS8695_NR_ADDRESSES 16
+
+/**
+ * ks8695_init_partial_multicast - Init the mcast addr registers
+ * @ksp: The device to initialise
+ * @addr: The multicast address list to use
+ * @nr_addr: The number of addresses in the list
+ *
+ * This routine is a helper for ks8695_set_multicast - it writes
+ * the additional-address registers in the KS8695 ethernet device
+ * and cleans up any others left behind.
+ */
+static void
+ks8695_init_partial_multicast(struct ks8695_priv *ksp,
+ struct net_device *ndev)
+{
+ u32 low, high;
+ int i;
+ struct netdev_hw_addr *ha;
+
+ i = 0;
+ netdev_for_each_mc_addr(ha, ndev) {
+ /* Ran out of space in chip? */
+ BUG_ON(i == KS8695_NR_ADDRESSES);
+
+ low = (ha->addr[2] << 24) | (ha->addr[3] << 16) |
+ (ha->addr[4] << 8) | (ha->addr[5]);
+ high = (ha->addr[0] << 8) | (ha->addr[1]);
+
+ ks8695_writereg(ksp, KS8695_AAL_(i), low);
+ ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high);
+ i++;
+ }
+
+ /* Clear the remaining Additional Station Addresses */
+ for (; i < KS8695_NR_ADDRESSES; i++) {
+ ks8695_writereg(ksp, KS8695_AAL_(i), 0);
+ ks8695_writereg(ksp, KS8695_AAH_(i), 0);
+ }
+}
+
+/* Interrupt handling */
+
+/**
+ * ks8695_tx_irq - Transmit IRQ handler
+ * @irq: The IRQ which went off (ignored)
+ * @dev_id: The net_device for the interrupt
+ *
+ * Process the TX ring, clearing out any transmitted slots.
+ * Allows the net_device to pass us new packets once slots are
+ * freed.
+ */
+static irqreturn_t
+ks8695_tx_irq(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ int buff_n;
+
+ for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
+ if (ksp->tx_buffers[buff_n].skb &&
+ !(ksp->tx_ring[buff_n].owner & cpu_to_le32(TDES_OWN))) {
+ rmb();
+ /* An SKB which is not owned by HW is present */
+ /* Update the stats for the net_device */
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += ksp->tx_buffers[buff_n].length;
+
+ /* Free the packet from the ring */
+ ksp->tx_ring[buff_n].data_ptr = 0;
+
+ /* Free the sk_buff */
+ dma_unmap_single(ksp->dev,
+ ksp->tx_buffers[buff_n].dma_ptr,
+ ksp->tx_buffers[buff_n].length,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
+ ksp->tx_buffers[buff_n].skb = NULL;
+ ksp->tx_ring_used--;
+ }
+ }
+
+ netif_wake_queue(ndev);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ks8695_get_rx_enable_bit - Get rx interrupt enable/status bit
+ * @ksp: Private data for the KS8695 Ethernet
+ *
+ * For KS8695 document:
+ * Interrupt Enable Register (offset 0xE204)
+ * Bit29 : WAN MAC Receive Interrupt Enable
+ * Bit16 : LAN MAC Receive Interrupt Enable
+ * Interrupt Status Register (Offset 0xF208)
+ * Bit29: WAN MAC Receive Status
+ * Bit16: LAN MAC Receive Status
+ * So, this Rx interrrupt enable/status bit number is equal
+ * as Rx IRQ number.
+ */
+static inline u32 ks8695_get_rx_enable_bit(struct ks8695_priv *ksp)
+{
+ return ksp->rx_irq;
+}
+
+/**
+ * ks8695_rx_irq - Receive IRQ handler
+ * @irq: The IRQ which went off (ignored)
+ * @dev_id: The net_device for the interrupt
+ *
+ * Inform NAPI that packet reception needs to be scheduled
+ */
+
+static irqreturn_t
+ks8695_rx_irq(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+
+ spin_lock(&ksp->rx_lock);
+
+ if (napi_schedule_prep(&ksp->napi)) {
+ unsigned long status = readl(KS8695_IRQ_VA + KS8695_INTEN);
+ unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
+ /*disable rx interrupt*/
+ status &= ~mask_bit;
+ writel(status , KS8695_IRQ_VA + KS8695_INTEN);
+ __napi_schedule(&ksp->napi);
+ }
+
+ spin_unlock(&ksp->rx_lock);
+ return IRQ_HANDLED;
+}
+
+/**
+ * ks8695_rx - Receive packets called by NAPI poll method
+ * @ksp: Private data for the KS8695 Ethernet
+ * @budget: Number of packets allowed to process
+ */
+static int ks8695_rx(struct ks8695_priv *ksp, int budget)
+{
+ struct net_device *ndev = ksp->ndev;
+ struct sk_buff *skb;
+ int buff_n;
+ u32 flags;
+ int pktlen;
+ int received = 0;
+
+ buff_n = ksp->next_rx_desc_read;
+ while (received < budget
+ && ksp->rx_buffers[buff_n].skb
+ && (!(ksp->rx_ring[buff_n].status &
+ cpu_to_le32(RDES_OWN)))) {
+ rmb();
+ flags = le32_to_cpu(ksp->rx_ring[buff_n].status);
+
+ /* Found an SKB which we own, this means we
+ * received a packet
+ */
+ if ((flags & (RDES_FS | RDES_LS)) !=
+ (RDES_FS | RDES_LS)) {
+ /* This packet is not the first and
+ * the last segment. Therefore it is
+ * a "spanning" packet and we can't
+ * handle it
+ */
+ goto rx_failure;
+ }
+
+ if (flags & (RDES_ES | RDES_RE)) {
+ /* It's an error packet */
+ ndev->stats.rx_errors++;
+ if (flags & RDES_TL)
+ ndev->stats.rx_length_errors++;
+ if (flags & RDES_RF)
+ ndev->stats.rx_length_errors++;
+ if (flags & RDES_CE)
+ ndev->stats.rx_crc_errors++;
+ if (flags & RDES_RE)
+ ndev->stats.rx_missed_errors++;
+
+ goto rx_failure;
+ }
+
+ pktlen = flags & RDES_FLEN;
+ pktlen -= 4; /* Drop the CRC */
+
+ /* Retrieve the sk_buff */
+ skb = ksp->rx_buffers[buff_n].skb;
+
+ /* Clear it from the ring */
+ ksp->rx_buffers[buff_n].skb = NULL;
+ ksp->rx_ring[buff_n].data_ptr = 0;
+
+ /* Unmap the SKB */
+ dma_unmap_single(ksp->dev,
+ ksp->rx_buffers[buff_n].dma_ptr,
+ ksp->rx_buffers[buff_n].length,
+ DMA_FROM_DEVICE);
+
+ /* Relinquish the SKB to the network layer */
+ skb_put(skb, pktlen);
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb(skb);
+
+ /* Record stats */
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += pktlen;
+ goto rx_finished;
+
+rx_failure:
+ /* This ring entry is an error, but we can
+ * re-use the skb
+ */
+ /* Give the ring entry back to the hardware */
+ ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
+rx_finished:
+ received++;
+ buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
+ }
+
+ /* And note which RX descriptor we last did */
+ ksp->next_rx_desc_read = buff_n;
+
+ /* And refill the buffers */
+ ks8695_refill_rxbuffers(ksp);
+
+ /* Kick the RX DMA engine, in case it became suspended */
+ ks8695_writereg(ksp, KS8695_DRSC, 0);
+
+ return received;
+}
+
+
+/**
+ * ks8695_poll - Receive packet by NAPI poll method
+ * @ksp: Private data for the KS8695 Ethernet
+ * @budget: The remaining number packets for network subsystem
+ *
+ * Invoked by the network core when it requests for new
+ * packets from the driver
+ */
+static int ks8695_poll(struct napi_struct *napi, int budget)
+{
+ struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi);
+ unsigned long work_done;
+
+ unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN);
+ unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
+
+ work_done = ks8695_rx(ksp, budget);
+
+ if (work_done < budget) {
+ unsigned long flags;
+ spin_lock_irqsave(&ksp->rx_lock, flags);
+ __napi_complete(napi);
+ /*enable rx interrupt*/
+ writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN);
+ spin_unlock_irqrestore(&ksp->rx_lock, flags);
+ }
+ return work_done;
+}
+
+/**
+ * ks8695_link_irq - Link change IRQ handler
+ * @irq: The IRQ which went off (ignored)
+ * @dev_id: The net_device for the interrupt
+ *
+ * The WAN interface can generate an IRQ when the link changes,
+ * report this to the net layer and the user.
+ */
+static irqreturn_t
+ks8695_link_irq(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ u32 ctrl;
+
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+ if (ctrl & WMC_WLS) {
+ netif_carrier_on(ndev);
+ if (netif_msg_link(ksp))
+ dev_info(ksp->dev,
+ "%s: Link is now up (10%sMbps/%s-duplex)\n",
+ ndev->name,
+ (ctrl & WMC_WSS) ? "0" : "",
+ (ctrl & WMC_WDS) ? "Full" : "Half");
+ } else {
+ netif_carrier_off(ndev);
+ if (netif_msg_link(ksp))
+ dev_info(ksp->dev, "%s: Link is now down.\n",
+ ndev->name);
+ }
+
+ return IRQ_HANDLED;
+}
+
+
+/* KS8695 Device functions */
+
+/**
+ * ks8695_reset - Reset a KS8695 ethernet interface
+ * @ksp: The interface to reset
+ *
+ * Perform an engine reset of the interface and re-program it
+ * with sensible defaults.
+ */
+static void
+ks8695_reset(struct ks8695_priv *ksp)
+{
+ int reset_timeout = watchdog;
+ /* Issue the reset via the TX DMA control register */
+ ks8695_writereg(ksp, KS8695_DTXC, DTXC_TRST);
+ while (reset_timeout--) {
+ if (!(ks8695_readreg(ksp, KS8695_DTXC) & DTXC_TRST))
+ break;
+ msleep(1);
+ }
+
+ if (reset_timeout < 0) {
+ dev_crit(ksp->dev,
+ "Timeout waiting for DMA engines to reset\n");
+ /* And blithely carry on */
+ }
+
+ /* Definitely wait long enough before attempting to program
+ * the engines
+ */
+ msleep(10);
+
+ /* RX: unicast and broadcast */
+ ks8695_writereg(ksp, KS8695_DRXC, DRXC_RU | DRXC_RB);
+ /* TX: pad and add CRC */
+ ks8695_writereg(ksp, KS8695_DTXC, DTXC_TEP | DTXC_TAC);
+}
+
+/**
+ * ks8695_shutdown - Shut down a KS8695 ethernet interface
+ * @ksp: The interface to shut down
+ *
+ * This disables packet RX/TX, cleans up IRQs, drains the rings,
+ * and basically places the interface into a clean shutdown
+ * state.
+ */
+static void
+ks8695_shutdown(struct ks8695_priv *ksp)
+{
+ u32 ctrl;
+ int buff_n;
+
+ /* Disable packet transmission */
+ ctrl = ks8695_readreg(ksp, KS8695_DTXC);
+ ks8695_writereg(ksp, KS8695_DTXC, ctrl & ~DTXC_TE);
+
+ /* Disable packet reception */
+ ctrl = ks8695_readreg(ksp, KS8695_DRXC);
+ ks8695_writereg(ksp, KS8695_DRXC, ctrl & ~DRXC_RE);
+
+ /* Release the IRQs */
+ free_irq(ksp->rx_irq, ksp->ndev);
+ free_irq(ksp->tx_irq, ksp->ndev);
+ if (ksp->link_irq != -1)
+ free_irq(ksp->link_irq, ksp->ndev);
+
+ /* Throw away any pending TX packets */
+ for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
+ if (ksp->tx_buffers[buff_n].skb) {
+ /* Remove this SKB from the TX ring */
+ ksp->tx_ring[buff_n].owner = 0;
+ ksp->tx_ring[buff_n].status = 0;
+ ksp->tx_ring[buff_n].data_ptr = 0;
+
+ /* Unmap and bin this SKB */
+ dma_unmap_single(ksp->dev,
+ ksp->tx_buffers[buff_n].dma_ptr,
+ ksp->tx_buffers[buff_n].length,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
+ ksp->tx_buffers[buff_n].skb = NULL;
+ }
+ }
+
+ /* Purge the RX buffers */
+ for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
+ if (ksp->rx_buffers[buff_n].skb) {
+ /* Remove the SKB from the RX ring */
+ ksp->rx_ring[buff_n].status = 0;
+ ksp->rx_ring[buff_n].data_ptr = 0;
+
+ /* Unmap and bin the SKB */
+ dma_unmap_single(ksp->dev,
+ ksp->rx_buffers[buff_n].dma_ptr,
+ ksp->rx_buffers[buff_n].length,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_irq(ksp->rx_buffers[buff_n].skb);
+ ksp->rx_buffers[buff_n].skb = NULL;
+ }
+ }
+}
+
+
+/**
+ * ks8695_setup_irq - IRQ setup helper function
+ * @irq: The IRQ number to claim
+ * @irq_name: The name to give the IRQ claimant
+ * @handler: The function to call to handle the IRQ
+ * @ndev: The net_device to pass in as the dev_id argument to the handler
+ *
+ * Return 0 on success.
+ */
+static int
+ks8695_setup_irq(int irq, const char *irq_name,
+ irq_handler_t handler, struct net_device *ndev)
+{
+ int ret;
+
+ ret = request_irq(irq, handler, IRQF_SHARED, irq_name, ndev);
+
+ if (ret) {
+ dev_err(&ndev->dev, "failure to request IRQ %d\n", irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ks8695_init_net - Initialise a KS8695 ethernet interface
+ * @ksp: The interface to initialise
+ *
+ * This routine fills the RX ring, initialises the DMA engines,
+ * allocates the IRQs and then starts the packet TX and RX
+ * engines.
+ */
+static int
+ks8695_init_net(struct ks8695_priv *ksp)
+{
+ int ret;
+ u32 ctrl;
+
+ ks8695_refill_rxbuffers(ksp);
+
+ /* Initialise the DMA engines */
+ ks8695_writereg(ksp, KS8695_RDLB, (u32) ksp->rx_ring_dma);
+ ks8695_writereg(ksp, KS8695_TDLB, (u32) ksp->tx_ring_dma);
+
+ /* Request the IRQs */
+ ret = ks8695_setup_irq(ksp->rx_irq, ksp->rx_irq_name,
+ ks8695_rx_irq, ksp->ndev);
+ if (ret)
+ return ret;
+ ret = ks8695_setup_irq(ksp->tx_irq, ksp->tx_irq_name,
+ ks8695_tx_irq, ksp->ndev);
+ if (ret)
+ return ret;
+ if (ksp->link_irq != -1) {
+ ret = ks8695_setup_irq(ksp->link_irq, ksp->link_irq_name,
+ ks8695_link_irq, ksp->ndev);
+ if (ret)
+ return ret;
+ }
+
+ /* Set up the ring indices */
+ ksp->next_rx_desc_read = 0;
+ ksp->tx_ring_next_slot = 0;
+ ksp->tx_ring_used = 0;
+
+ /* Bring up transmission */
+ ctrl = ks8695_readreg(ksp, KS8695_DTXC);
+ /* Enable packet transmission */
+ ks8695_writereg(ksp, KS8695_DTXC, ctrl | DTXC_TE);
+
+ /* Bring up the reception */
+ ctrl = ks8695_readreg(ksp, KS8695_DRXC);
+ /* Enable packet reception */
+ ks8695_writereg(ksp, KS8695_DRXC, ctrl | DRXC_RE);
+ /* And start the DMA engine */
+ ks8695_writereg(ksp, KS8695_DRSC, 0);
+
+ /* All done */
+ return 0;
+}
+
+/**
+ * ks8695_release_device - HW resource release for KS8695 e-net
+ * @ksp: The device to be freed
+ *
+ * This unallocates io memory regions, dma-coherent regions etc
+ * which were allocated in ks8695_probe.
+ */
+static void
+ks8695_release_device(struct ks8695_priv *ksp)
+{
+ /* Unmap the registers */
+ iounmap(ksp->io_regs);
+ if (ksp->phyiface_regs)
+ iounmap(ksp->phyiface_regs);
+
+ /* And release the request */
+ release_resource(ksp->regs_req);
+ kfree(ksp->regs_req);
+ if (ksp->phyiface_req) {
+ release_resource(ksp->phyiface_req);
+ kfree(ksp->phyiface_req);
+ }
+
+ /* Free the ring buffers */
+ dma_free_coherent(ksp->dev, RING_DMA_SIZE,
+ ksp->ring_base, ksp->ring_base_dma);
+}
+
+/* Ethtool support */
+
+/**
+ * ks8695_get_msglevel - Get the messages enabled for emission
+ * @ndev: The network device to read from
+ */
+static u32
+ks8695_get_msglevel(struct net_device *ndev)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+
+ return ksp->msg_enable;
+}
+
+/**
+ * ks8695_set_msglevel - Set the messages enabled for emission
+ * @ndev: The network device to configure
+ * @value: The messages to set for emission
+ */
+static void
+ks8695_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+
+ ksp->msg_enable = value;
+}
+
+/**
+ * ks8695_wan_get_settings - Get device-specific settings.
+ * @ndev: The network device to read settings from
+ * @cmd: The ethtool structure to read into
+ */
+static int
+ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ u32 ctrl;
+
+ /* All ports on the KS8695 support these... */
+ cmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_TP | SUPPORTED_MII);
+ cmd->transceiver = XCVR_INTERNAL;
+
+ cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
+ cmd->port = PORT_MII;
+ cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
+ cmd->phy_address = 0;
+
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+ if ((ctrl & WMC_WAND) == 0) {
+ /* auto-negotiation is enabled */
+ cmd->advertising |= ADVERTISED_Autoneg;
+ if (ctrl & WMC_WANA100F)
+ cmd->advertising |= ADVERTISED_100baseT_Full;
+ if (ctrl & WMC_WANA100H)
+ cmd->advertising |= ADVERTISED_100baseT_Half;
+ if (ctrl & WMC_WANA10F)
+ cmd->advertising |= ADVERTISED_10baseT_Full;
+ if (ctrl & WMC_WANA10H)
+ cmd->advertising |= ADVERTISED_10baseT_Half;
+ if (ctrl & WMC_WANAP)
+ cmd->advertising |= ADVERTISED_Pause;
+ cmd->autoneg = AUTONEG_ENABLE;
+
+ ethtool_cmd_speed_set(cmd,
+ (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10);
+ cmd->duplex = (ctrl & WMC_WDS) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ } else {
+ /* auto-negotiation is disabled */
+ cmd->autoneg = AUTONEG_DISABLE;
+
+ ethtool_cmd_speed_set(cmd, ((ctrl & WMC_WANF100) ?
+ SPEED_100 : SPEED_10));
+ cmd->duplex = (ctrl & WMC_WANFF) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ }
+
+ return 0;
+}
+
+/**
+ * ks8695_wan_set_settings - Set device-specific settings.
+ * @ndev: The network device to configure
+ * @cmd: The settings to configure
+ */
+static int
+ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ u32 ctrl;
+
+ if ((cmd->speed != SPEED_10) && (cmd->speed != SPEED_100))
+ return -EINVAL;
+ if ((cmd->duplex != DUPLEX_HALF) && (cmd->duplex != DUPLEX_FULL))
+ return -EINVAL;
+ if (cmd->port != PORT_MII)
+ return -EINVAL;
+ if (cmd->transceiver != XCVR_INTERNAL)
+ return -EINVAL;
+ if ((cmd->autoneg != AUTONEG_DISABLE) &&
+ (cmd->autoneg != AUTONEG_ENABLE))
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ if ((cmd->advertising & (ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full)) == 0)
+ return -EINVAL;
+
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+
+ ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
+ WMC_WANA10F | WMC_WANA10H);
+ if (cmd->advertising & ADVERTISED_100baseT_Full)
+ ctrl |= WMC_WANA100F;
+ if (cmd->advertising & ADVERTISED_100baseT_Half)
+ ctrl |= WMC_WANA100H;
+ if (cmd->advertising & ADVERTISED_10baseT_Full)
+ ctrl |= WMC_WANA10F;
+ if (cmd->advertising & ADVERTISED_10baseT_Half)
+ ctrl |= WMC_WANA10H;
+
+ /* force a re-negotiation */
+ ctrl |= WMC_WANR;
+ writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
+ } else {
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+
+ /* disable auto-negotiation */
+ ctrl |= WMC_WAND;
+ ctrl &= ~(WMC_WANF100 | WMC_WANFF);
+
+ if (cmd->speed == SPEED_100)
+ ctrl |= WMC_WANF100;
+ if (cmd->duplex == DUPLEX_FULL)
+ ctrl |= WMC_WANFF;
+
+ writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
+ }
+
+ return 0;
+}
+
+/**
+ * ks8695_wan_nwayreset - Restart the autonegotiation on the port.
+ * @ndev: The network device to restart autoneotiation on
+ */
+static int
+ks8695_wan_nwayreset(struct net_device *ndev)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ u32 ctrl;
+
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+
+ if ((ctrl & WMC_WAND) == 0)
+ writel(ctrl | WMC_WANR,
+ ksp->phyiface_regs + KS8695_WMC);
+ else
+ /* auto-negotiation not enabled */
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * ks8695_wan_get_pause - Retrieve network pause/flow-control advertising
+ * @ndev: The device to retrieve settings from
+ * @param: The structure to fill out with the information
+ */
+static void
+ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ u32 ctrl;
+
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+
+ /* advertise Pause */
+ param->autoneg = (ctrl & WMC_WANAP);
+
+ /* current Rx Flow-control */
+ ctrl = ks8695_readreg(ksp, KS8695_DRXC);
+ param->rx_pause = (ctrl & DRXC_RFCE);
+
+ /* current Tx Flow-control */
+ ctrl = ks8695_readreg(ksp, KS8695_DTXC);
+ param->tx_pause = (ctrl & DTXC_TFCE);
+}
+
+/**
+ * ks8695_get_drvinfo - Retrieve driver information
+ * @ndev: The network device to retrieve info about
+ * @info: The info structure to fill out.
+ */
+static void
+ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, MODULENAME, sizeof(info->driver));
+ strlcpy(info->version, MODULEVERSION, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(ndev->dev.parent),
+ sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops ks8695_ethtool_ops = {
+ .get_msglevel = ks8695_get_msglevel,
+ .set_msglevel = ks8695_set_msglevel,
+ .get_drvinfo = ks8695_get_drvinfo,
+};
+
+static const struct ethtool_ops ks8695_wan_ethtool_ops = {
+ .get_msglevel = ks8695_get_msglevel,
+ .set_msglevel = ks8695_set_msglevel,
+ .get_settings = ks8695_wan_get_settings,
+ .set_settings = ks8695_wan_set_settings,
+ .nway_reset = ks8695_wan_nwayreset,
+ .get_link = ethtool_op_get_link,
+ .get_pauseparam = ks8695_wan_get_pause,
+ .get_drvinfo = ks8695_get_drvinfo,
+};
+
+/* Network device interface functions */
+
+/**
+ * ks8695_set_mac - Update MAC in net dev and HW
+ * @ndev: The network device to update
+ * @addr: The new MAC address to set
+ */
+static int
+ks8695_set_mac(struct net_device *ndev, void *addr)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ struct sockaddr *address = addr;
+
+ if (!is_valid_ether_addr(address->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
+
+ ks8695_update_mac(ksp);
+
+ dev_dbg(ksp->dev, "%s: Updated MAC address to %pM\n",
+ ndev->name, ndev->dev_addr);
+
+ return 0;
+}
+
+/**
+ * ks8695_set_multicast - Set up the multicast behaviour of the interface
+ * @ndev: The net_device to configure
+ *
+ * This routine, called by the net layer, configures promiscuity
+ * and multicast reception behaviour for the interface.
+ */
+static void
+ks8695_set_multicast(struct net_device *ndev)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ u32 ctrl;
+
+ ctrl = ks8695_readreg(ksp, KS8695_DRXC);
+
+ if (ndev->flags & IFF_PROMISC) {
+ /* enable promiscuous mode */
+ ctrl |= DRXC_RA;
+ } else if (ndev->flags & ~IFF_PROMISC) {
+ /* disable promiscuous mode */
+ ctrl &= ~DRXC_RA;
+ }
+
+ if (ndev->flags & IFF_ALLMULTI) {
+ /* enable all multicast mode */
+ ctrl |= DRXC_RM;
+ } else if (netdev_mc_count(ndev) > KS8695_NR_ADDRESSES) {
+ /* more specific multicast addresses than can be
+ * handled in hardware
+ */
+ ctrl |= DRXC_RM;
+ } else {
+ /* enable specific multicasts */
+ ctrl &= ~DRXC_RM;
+ ks8695_init_partial_multicast(ksp, ndev);
+ }
+
+ ks8695_writereg(ksp, KS8695_DRXC, ctrl);
+}
+
+/**
+ * ks8695_timeout - Handle a network tx/rx timeout.
+ * @ndev: The net_device which timed out.
+ *
+ * A network transaction timed out, reset the device.
+ */
+static void
+ks8695_timeout(struct net_device *ndev)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ ks8695_shutdown(ksp);
+
+ ks8695_reset(ksp);
+
+ ks8695_update_mac(ksp);
+
+ /* We ignore the return from this since it managed to init
+ * before it probably will be okay to init again.
+ */
+ ks8695_init_net(ksp);
+
+ /* Reconfigure promiscuity etc */
+ ks8695_set_multicast(ndev);
+
+ /* And start the TX queue once more */
+ netif_start_queue(ndev);
+}
+
+/**
+ * ks8695_start_xmit - Start a packet transmission
+ * @skb: The packet to transmit
+ * @ndev: The network device to send the packet on
+ *
+ * This routine, called by the net layer, takes ownership of the
+ * sk_buff and adds it to the TX ring. It then kicks the TX DMA
+ * engine to ensure transmission begins.
+ */
+static int
+ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ int buff_n;
+ dma_addr_t dmap;
+
+ spin_lock_irq(&ksp->txq_lock);
+
+ if (ksp->tx_ring_used == MAX_TX_DESC) {
+ /* Somehow we got entered when we have no room */
+ spin_unlock_irq(&ksp->txq_lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ buff_n = ksp->tx_ring_next_slot;
+
+ BUG_ON(ksp->tx_buffers[buff_n].skb);
+
+ dmap = dma_map_single(ksp->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(ksp->dev, dmap))) {
+ /* Failed to DMA map this SKB, give it back for now */
+ spin_unlock_irq(&ksp->txq_lock);
+ dev_dbg(ksp->dev, "%s: Could not map DMA memory for "\
+ "transmission, trying later\n", ndev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ ksp->tx_buffers[buff_n].dma_ptr = dmap;
+ /* Mapped okay, store the buffer pointer and length for later */
+ ksp->tx_buffers[buff_n].skb = skb;
+ ksp->tx_buffers[buff_n].length = skb->len;
+
+ /* Fill out the TX descriptor */
+ ksp->tx_ring[buff_n].data_ptr =
+ cpu_to_le32(ksp->tx_buffers[buff_n].dma_ptr);
+ ksp->tx_ring[buff_n].status =
+ cpu_to_le32(TDES_IC | TDES_FS | TDES_LS |
+ (skb->len & TDES_TBS));
+
+ wmb();
+
+ /* Hand it over to the hardware */
+ ksp->tx_ring[buff_n].owner = cpu_to_le32(TDES_OWN);
+
+ if (++ksp->tx_ring_used == MAX_TX_DESC)
+ netif_stop_queue(ndev);
+
+ /* Kick the TX DMA in case it decided to go IDLE */
+ ks8695_writereg(ksp, KS8695_DTSC, 0);
+
+ /* And update the next ring slot */
+ ksp->tx_ring_next_slot = (buff_n + 1) & MAX_TX_DESC_MASK;
+
+ spin_unlock_irq(&ksp->txq_lock);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ks8695_stop - Stop (shutdown) a KS8695 ethernet interface
+ * @ndev: The net_device to stop
+ *
+ * This disables the TX queue and cleans up a KS8695 ethernet
+ * device.
+ */
+static int
+ks8695_stop(struct net_device *ndev)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ napi_disable(&ksp->napi);
+
+ ks8695_shutdown(ksp);
+
+ return 0;
+}
+
+/**
+ * ks8695_open - Open (bring up) a KS8695 ethernet interface
+ * @ndev: The net_device to open
+ *
+ * This resets, configures the MAC, initialises the RX ring and
+ * DMA engines and starts the TX queue for a KS8695 ethernet
+ * device.
+ */
+static int
+ks8695_open(struct net_device *ndev)
+{
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+ int ret;
+
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ ks8695_reset(ksp);
+
+ ks8695_update_mac(ksp);
+
+ ret = ks8695_init_net(ksp);
+ if (ret) {
+ ks8695_shutdown(ksp);
+ return ret;
+ }
+
+ napi_enable(&ksp->napi);
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+/* Platform device driver */
+
+/**
+ * ks8695_init_switch - Init LAN switch to known good defaults.
+ * @ksp: The device to initialise
+ *
+ * This initialises the LAN switch in the KS8695 to a known-good
+ * set of defaults.
+ */
+static void __devinit
+ks8695_init_switch(struct ks8695_priv *ksp)
+{
+ u32 ctrl;
+
+ /* Default value for SEC0 according to datasheet */
+ ctrl = 0x40819e00;
+
+ /* LED0 = Speed LED1 = Link/Activity */
+ ctrl &= ~(SEC0_LLED1S | SEC0_LLED0S);
+ ctrl |= (LLED0S_LINK | LLED1S_LINK_ACTIVITY);
+
+ /* Enable Switch */
+ ctrl |= SEC0_ENABLE;
+
+ writel(ctrl, ksp->phyiface_regs + KS8695_SEC0);
+
+ /* Defaults for SEC1 */
+ writel(0x9400100, ksp->phyiface_regs + KS8695_SEC1);
+}
+
+/**
+ * ks8695_init_wan_phy - Initialise the WAN PHY to sensible defaults
+ * @ksp: The device to initialise
+ *
+ * This initialises a KS8695's WAN phy to sensible values for
+ * autonegotiation etc.
+ */
+static void __devinit
+ks8695_init_wan_phy(struct ks8695_priv *ksp)
+{
+ u32 ctrl;
+
+ /* Support auto-negotiation */
+ ctrl = (WMC_WANAP | WMC_WANA100F | WMC_WANA100H |
+ WMC_WANA10F | WMC_WANA10H);
+
+ /* LED0 = Activity , LED1 = Link */
+ ctrl |= (WLED0S_ACTIVITY | WLED1S_LINK);
+
+ /* Restart Auto-negotiation */
+ ctrl |= WMC_WANR;
+
+ writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
+
+ writel(0, ksp->phyiface_regs + KS8695_WPPM);
+ writel(0, ksp->phyiface_regs + KS8695_PPS);
+}
+
+static const struct net_device_ops ks8695_netdev_ops = {
+ .ndo_open = ks8695_open,
+ .ndo_stop = ks8695_stop,
+ .ndo_start_xmit = ks8695_start_xmit,
+ .ndo_tx_timeout = ks8695_timeout,
+ .ndo_set_mac_address = ks8695_set_mac,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = ks8695_set_multicast,
+};
+
+/**
+ * ks8695_probe - Probe and initialise a KS8695 ethernet interface
+ * @pdev: The platform device to probe
+ *
+ * Initialise a KS8695 ethernet device from platform data.
+ *
+ * This driver requires at least one IORESOURCE_MEM for the
+ * registers and two IORESOURCE_IRQ for the RX and TX IRQs
+ * respectively. It can optionally take an additional
+ * IORESOURCE_MEM for the switch or phy in the case of the lan or
+ * wan ports, and an IORESOURCE_IRQ for the link IRQ for the wan
+ * port.
+ */
+static int __devinit
+ks8695_probe(struct platform_device *pdev)
+{
+ struct ks8695_priv *ksp;
+ struct net_device *ndev;
+ struct resource *regs_res, *phyiface_res;
+ struct resource *rxirq_res, *txirq_res, *linkirq_res;
+ int ret = 0;
+ int buff_n;
+ u32 machigh, maclow;
+
+ /* Initialise a net_device */
+ ndev = alloc_etherdev(sizeof(struct ks8695_priv));
+ if (!ndev) {
+ dev_err(&pdev->dev, "could not allocate device.\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ dev_dbg(&pdev->dev, "ks8695_probe() called\n");
+
+ /* Configure our private structure a little */
+ ksp = netdev_priv(ndev);
+
+ ksp->dev = &pdev->dev;
+ ksp->ndev = ndev;
+ ksp->msg_enable = NETIF_MSG_LINK;
+
+ /* Retrieve resources */
+ regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ phyiface_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+
+ rxirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ txirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ linkirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
+
+ if (!(regs_res && rxirq_res && txirq_res)) {
+ dev_err(ksp->dev, "insufficient resources\n");
+ ret = -ENOENT;
+ goto failure;
+ }
+
+ ksp->regs_req = request_mem_region(regs_res->start,
+ resource_size(regs_res),
+ pdev->name);
+
+ if (!ksp->regs_req) {
+ dev_err(ksp->dev, "cannot claim register space\n");
+ ret = -EIO;
+ goto failure;
+ }
+
+ ksp->io_regs = ioremap(regs_res->start, resource_size(regs_res));
+
+ if (!ksp->io_regs) {
+ dev_err(ksp->dev, "failed to ioremap registers\n");
+ ret = -EINVAL;
+ goto failure;
+ }
+
+ if (phyiface_res) {
+ ksp->phyiface_req =
+ request_mem_region(phyiface_res->start,
+ resource_size(phyiface_res),
+ phyiface_res->name);
+
+ if (!ksp->phyiface_req) {
+ dev_err(ksp->dev,
+ "cannot claim switch register space\n");
+ ret = -EIO;
+ goto failure;
+ }
+
+ ksp->phyiface_regs = ioremap(phyiface_res->start,
+ resource_size(phyiface_res));
+
+ if (!ksp->phyiface_regs) {
+ dev_err(ksp->dev,
+ "failed to ioremap switch registers\n");
+ ret = -EINVAL;
+ goto failure;
+ }
+ }
+
+ ksp->rx_irq = rxirq_res->start;
+ ksp->rx_irq_name = rxirq_res->name ? rxirq_res->name : "Ethernet RX";
+ ksp->tx_irq = txirq_res->start;
+ ksp->tx_irq_name = txirq_res->name ? txirq_res->name : "Ethernet TX";
+ ksp->link_irq = (linkirq_res ? linkirq_res->start : -1);
+ ksp->link_irq_name = (linkirq_res && linkirq_res->name) ?
+ linkirq_res->name : "Ethernet Link";
+
+ /* driver system setup */
+ ndev->netdev_ops = &ks8695_netdev_ops;
+ ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
+
+ netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
+
+ /* Retrieve the default MAC addr from the chip. */
+ /* The bootloader should have left it in there for us. */
+
+ machigh = ks8695_readreg(ksp, KS8695_MAH);
+ maclow = ks8695_readreg(ksp, KS8695_MAL);
+
+ ndev->dev_addr[0] = (machigh >> 8) & 0xFF;
+ ndev->dev_addr[1] = machigh & 0xFF;
+ ndev->dev_addr[2] = (maclow >> 24) & 0xFF;
+ ndev->dev_addr[3] = (maclow >> 16) & 0xFF;
+ ndev->dev_addr[4] = (maclow >> 8) & 0xFF;
+ ndev->dev_addr[5] = maclow & 0xFF;
+
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please "
+ "set using ifconfig\n", ndev->name);
+
+ /* In order to be efficient memory-wise, we allocate both
+ * rings in one go.
+ */
+ ksp->ring_base = dma_alloc_coherent(&pdev->dev, RING_DMA_SIZE,
+ &ksp->ring_base_dma, GFP_KERNEL);
+ if (!ksp->ring_base) {
+ ret = -ENOMEM;
+ goto failure;
+ }
+
+ /* Specify the TX DMA ring buffer */
+ ksp->tx_ring = ksp->ring_base;
+ ksp->tx_ring_dma = ksp->ring_base_dma;
+
+ /* And initialise the queue's lock */
+ spin_lock_init(&ksp->txq_lock);
+ spin_lock_init(&ksp->rx_lock);
+
+ /* Specify the RX DMA ring buffer */
+ ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE;
+ ksp->rx_ring_dma = ksp->ring_base_dma + TX_RING_DMA_SIZE;
+
+ /* Zero the descriptor rings */
+ memset(ksp->tx_ring, 0, TX_RING_DMA_SIZE);
+ memset(ksp->rx_ring, 0, RX_RING_DMA_SIZE);
+
+ /* Build the rings */
+ for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
+ ksp->tx_ring[buff_n].next_desc =
+ cpu_to_le32(ksp->tx_ring_dma +
+ (sizeof(struct tx_ring_desc) *
+ ((buff_n + 1) & MAX_TX_DESC_MASK)));
+ }
+
+ for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
+ ksp->rx_ring[buff_n].next_desc =
+ cpu_to_le32(ksp->rx_ring_dma +
+ (sizeof(struct rx_ring_desc) *
+ ((buff_n + 1) & MAX_RX_DESC_MASK)));
+ }
+
+ /* Initialise the port (physically) */
+ if (ksp->phyiface_regs && ksp->link_irq == -1) {
+ ks8695_init_switch(ksp);
+ ksp->dtype = KS8695_DTYPE_LAN;
+ SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
+ } else if (ksp->phyiface_regs && ksp->link_irq != -1) {
+ ks8695_init_wan_phy(ksp);
+ ksp->dtype = KS8695_DTYPE_WAN;
+ SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops);
+ } else {
+ /* No initialisation since HPNA does not have a PHY */
+ ksp->dtype = KS8695_DTYPE_HPNA;
+ SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
+ }
+
+ /* And bring up the net_device with the net core */
+ platform_set_drvdata(pdev, ndev);
+ ret = register_netdev(ndev);
+
+ if (ret == 0) {
+ dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n",
+ ks8695_port_type(ksp), ndev->dev_addr);
+ } else {
+ /* Report the failure to register the net_device */
+ dev_err(ksp->dev, "ks8695net: failed to register netdev.\n");
+ goto failure;
+ }
+
+ /* All is well */
+ return 0;
+
+ /* Error exit path */
+failure:
+ ks8695_release_device(ksp);
+ free_netdev(ndev);
+
+ return ret;
+}
+
+/**
+ * ks8695_drv_suspend - Suspend a KS8695 ethernet platform device.
+ * @pdev: The device to suspend
+ * @state: The suspend state
+ *
+ * This routine detaches and shuts down a KS8695 ethernet device.
+ */
+static int
+ks8695_drv_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+
+ ksp->in_suspend = 1;
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ ks8695_shutdown(ksp);
+ }
+
+ return 0;
+}
+
+/**
+ * ks8695_drv_resume - Resume a KS8695 ethernet platform device.
+ * @pdev: The device to resume
+ *
+ * This routine re-initialises and re-attaches a KS8695 ethernet
+ * device.
+ */
+static int
+ks8695_drv_resume(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ ks8695_reset(ksp);
+ ks8695_init_net(ksp);
+ ks8695_set_multicast(ndev);
+ netif_device_attach(ndev);
+ }
+
+ ksp->in_suspend = 0;
+
+ return 0;
+}
+
+/**
+ * ks8695_drv_remove - Remove a KS8695 net device on driver unload.
+ * @pdev: The platform device to remove
+ *
+ * This unregisters and releases a KS8695 ethernet device.
+ */
+static int __devexit
+ks8695_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ks8695_priv *ksp = netdev_priv(ndev);
+
+ platform_set_drvdata(pdev, NULL);
+ netif_napi_del(&ksp->napi);
+
+ unregister_netdev(ndev);
+ ks8695_release_device(ksp);
+ free_netdev(ndev);
+
+ dev_dbg(&pdev->dev, "released and freed device\n");
+ return 0;
+}
+
+static struct platform_driver ks8695_driver = {
+ .driver = {
+ .name = MODULENAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ks8695_probe,
+ .remove = __devexit_p(ks8695_drv_remove),
+ .suspend = ks8695_drv_suspend,
+ .resume = ks8695_drv_resume,
+};
+
+/* Module interface */
+
+static int __init
+ks8695_init(void)
+{
+ printk(KERN_INFO "%s Ethernet driver, V%s\n",
+ MODULENAME, MODULEVERSION);
+
+ return platform_driver_register(&ks8695_driver);
+}
+
+static void __exit
+ks8695_cleanup(void)
+{
+ platform_driver_unregister(&ks8695_driver);
+}
+
+module_init(ks8695_init);
+module_exit(ks8695_cleanup);
+
+MODULE_AUTHOR("Simtec Electronics");
+MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" MODULENAME);
+
+module_param(watchdog, int, 0400);
+MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
diff --git a/drivers/net/ethernet/micrel/ks8695net.h b/drivers/net/ethernet/micrel/ks8695net.h
new file mode 100644
index 000000000000..80eff6ea5163
--- /dev/null
+++ b/drivers/net/ethernet/micrel/ks8695net.h
@@ -0,0 +1,107 @@
+/*
+ * Micrel KS8695 (Centaur) Ethernet.
+ *
+ * Copyright 2008 Simtec Electronics
+ * Daniel Silverstone <dsilvers@simtec.co.uk>
+ * Vincent Sanders <vince@simtec.co.uk>
+ */
+
+#ifndef KS8695NET_H
+#define KS8695NET_H
+
+/* Receive descriptor flags */
+#define RDES_OWN (1 << 31) /* Ownership */
+#define RDES_FS (1 << 30) /* First Descriptor */
+#define RDES_LS (1 << 29) /* Last Descriptor */
+#define RDES_IPE (1 << 28) /* IP Checksum error */
+#define RDES_TCPE (1 << 27) /* TCP Checksum error */
+#define RDES_UDPE (1 << 26) /* UDP Checksum error */
+#define RDES_ES (1 << 25) /* Error summary */
+#define RDES_MF (1 << 24) /* Multicast Frame */
+#define RDES_RE (1 << 19) /* MII Error reported */
+#define RDES_TL (1 << 18) /* Frame too Long */
+#define RDES_RF (1 << 17) /* Runt Frame */
+#define RDES_CE (1 << 16) /* CRC error */
+#define RDES_FT (1 << 15) /* Frame Type */
+#define RDES_FLEN (0x7ff) /* Frame Length */
+
+#define RDES_RER (1 << 25) /* Receive End of Ring */
+#define RDES_RBS (0x7ff) /* Receive Buffer Size */
+
+/* Transmit descriptor flags */
+
+#define TDES_OWN (1 << 31) /* Ownership */
+
+#define TDES_IC (1 << 31) /* Interrupt on Completion */
+#define TDES_FS (1 << 30) /* First Segment */
+#define TDES_LS (1 << 29) /* Last Segment */
+#define TDES_IPCKG (1 << 28) /* IP Checksum generate */
+#define TDES_TCPCKG (1 << 27) /* TCP Checksum generate */
+#define TDES_UDPCKG (1 << 26) /* UDP Checksum generate */
+#define TDES_TER (1 << 25) /* Transmit End of Ring */
+#define TDES_TBS (0x7ff) /* Transmit Buffer Size */
+
+/*
+ * Network controller register offsets
+ */
+#define KS8695_DTXC (0x00) /* DMA Transmit Control */
+#define KS8695_DRXC (0x04) /* DMA Receive Control */
+#define KS8695_DTSC (0x08) /* DMA Transmit Start Command */
+#define KS8695_DRSC (0x0c) /* DMA Receive Start Command */
+#define KS8695_TDLB (0x10) /* Transmit Descriptor List
+ * Base Address
+ */
+#define KS8695_RDLB (0x14) /* Receive Descriptor List
+ * Base Address
+ */
+#define KS8695_MAL (0x18) /* MAC Station Address Low */
+#define KS8695_MAH (0x1c) /* MAC Station Address High */
+#define KS8695_AAL_(n) (0x80 + ((n)*8)) /* MAC Additional
+ * Station Address
+ * (0..15) Low
+ */
+#define KS8695_AAH_(n) (0x84 + ((n)*8)) /* MAC Additional
+ * Station Address
+ * (0..15) High
+ */
+
+
+/* DMA Transmit Control Register */
+#define DTXC_TRST (1 << 31) /* Soft Reset */
+#define DTXC_TBS (0x3f << 24) /* Transmit Burst Size */
+#define DTXC_TUCG (1 << 18) /* Transmit UDP
+ * Checksum Generate
+ */
+#define DTXC_TTCG (1 << 17) /* Transmit TCP
+ * Checksum Generate
+ */
+#define DTXC_TICG (1 << 16) /* Transmit IP
+ * Checksum Generate
+ */
+#define DTXC_TFCE (1 << 9) /* Transmit Flow
+ * Control Enable
+ */
+#define DTXC_TLB (1 << 8) /* Loopback mode */
+#define DTXC_TEP (1 << 2) /* Transmit Enable Padding */
+#define DTXC_TAC (1 << 1) /* Transmit Add CRC */
+#define DTXC_TE (1 << 0) /* TX Enable */
+
+/* DMA Receive Control Register */
+#define DRXC_RBS (0x3f << 24) /* Receive Burst Size */
+#define DRXC_RUCC (1 << 18) /* Receive UDP Checksum check */
+#define DRXC_RTCG (1 << 17) /* Receive TCP Checksum check */
+#define DRXC_RICG (1 << 16) /* Receive IP Checksum check */
+#define DRXC_RFCE (1 << 9) /* Receive Flow Control
+ * Enable
+ */
+#define DRXC_RB (1 << 6) /* Receive Broadcast */
+#define DRXC_RM (1 << 5) /* Receive Multicast */
+#define DRXC_RU (1 << 4) /* Receive Unicast */
+#define DRXC_RERR (1 << 3) /* Receive Error Frame */
+#define DRXC_RA (1 << 2) /* Receive All */
+#define DRXC_RE (1 << 0) /* RX Enable */
+
+/* Additional Station Address High */
+#define AAH_E (1 << 31) /* Address Enabled */
+
+#endif /* KS8695NET_H */
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
new file mode 100644
index 000000000000..4a6ae057e3b1
--- /dev/null
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -0,0 +1,1284 @@
+/*
+ * ks8842.c timberdale KS8842 ethernet driver
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * The Micrel KS8842 behind the timberdale FPGA
+ * The genuine Micrel KS8841/42 device with ISA 16/32bit bus interface
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/ks8842.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+
+#define DRV_NAME "ks8842"
+
+/* Timberdale specific Registers */
+#define REG_TIMB_RST 0x1c
+#define REG_TIMB_FIFO 0x20
+#define REG_TIMB_ISR 0x24
+#define REG_TIMB_IER 0x28
+#define REG_TIMB_IAR 0x2C
+#define REQ_TIMB_DMA_RESUME 0x30
+
+/* KS8842 registers */
+
+#define REG_SELECT_BANK 0x0e
+
+/* bank 0 registers */
+#define REG_QRFCR 0x04
+
+/* bank 2 registers */
+#define REG_MARL 0x00
+#define REG_MARM 0x02
+#define REG_MARH 0x04
+
+/* bank 3 registers */
+#define REG_GRR 0x06
+
+/* bank 16 registers */
+#define REG_TXCR 0x00
+#define REG_TXSR 0x02
+#define REG_RXCR 0x04
+#define REG_TXMIR 0x08
+#define REG_RXMIR 0x0A
+
+/* bank 17 registers */
+#define REG_TXQCR 0x00
+#define REG_RXQCR 0x02
+#define REG_TXFDPR 0x04
+#define REG_RXFDPR 0x06
+#define REG_QMU_DATA_LO 0x08
+#define REG_QMU_DATA_HI 0x0A
+
+/* bank 18 registers */
+#define REG_IER 0x00
+#define IRQ_LINK_CHANGE 0x8000
+#define IRQ_TX 0x4000
+#define IRQ_RX 0x2000
+#define IRQ_RX_OVERRUN 0x0800
+#define IRQ_TX_STOPPED 0x0200
+#define IRQ_RX_STOPPED 0x0100
+#define IRQ_RX_ERROR 0x0080
+#define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \
+ IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
+/* When running via timberdale in DMA mode, the RX interrupt should be
+ enabled in the KS8842, but not in the FPGA IP, since the IP handles
+ RX DMA internally.
+ TX interrupts are not needed it is handled by the FPGA the driver is
+ notified via DMA callbacks.
+*/
+#define ENABLED_IRQS_DMA_IP (IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \
+ IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
+#define ENABLED_IRQS_DMA (ENABLED_IRQS_DMA_IP | IRQ_RX)
+#define REG_ISR 0x02
+#define REG_RXSR 0x04
+#define RXSR_VALID 0x8000
+#define RXSR_BROADCAST 0x80
+#define RXSR_MULTICAST 0x40
+#define RXSR_UNICAST 0x20
+#define RXSR_FRAMETYPE 0x08
+#define RXSR_TOO_LONG 0x04
+#define RXSR_RUNT 0x02
+#define RXSR_CRC_ERROR 0x01
+#define RXSR_ERROR (RXSR_TOO_LONG | RXSR_RUNT | RXSR_CRC_ERROR)
+
+/* bank 32 registers */
+#define REG_SW_ID_AND_ENABLE 0x00
+#define REG_SGCR1 0x02
+#define REG_SGCR2 0x04
+#define REG_SGCR3 0x06
+
+/* bank 39 registers */
+#define REG_MACAR1 0x00
+#define REG_MACAR2 0x02
+#define REG_MACAR3 0x04
+
+/* bank 45 registers */
+#define REG_P1MBCR 0x00
+#define REG_P1MBSR 0x02
+
+/* bank 46 registers */
+#define REG_P2MBCR 0x00
+#define REG_P2MBSR 0x02
+
+/* bank 48 registers */
+#define REG_P1CR2 0x02
+
+/* bank 49 registers */
+#define REG_P1CR4 0x02
+#define REG_P1SR 0x04
+
+/* flags passed by platform_device for configuration */
+#define MICREL_KS884X 0x01 /* 0=Timeberdale(FPGA), 1=Micrel */
+#define KS884X_16BIT 0x02 /* 1=16bit, 0=32bit */
+
+#define DMA_BUFFER_SIZE 2048
+
+struct ks8842_tx_dma_ctl {
+ struct dma_chan *chan;
+ struct dma_async_tx_descriptor *adesc;
+ void *buf;
+ struct scatterlist sg;
+ int channel;
+};
+
+struct ks8842_rx_dma_ctl {
+ struct dma_chan *chan;
+ struct dma_async_tx_descriptor *adesc;
+ struct sk_buff *skb;
+ struct scatterlist sg;
+ struct tasklet_struct tasklet;
+ int channel;
+};
+
+#define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \
+ ((adapter)->dma_rx.channel != -1))
+
+struct ks8842_adapter {
+ void __iomem *hw_addr;
+ int irq;
+ unsigned long conf_flags; /* copy of platform_device config */
+ struct tasklet_struct tasklet;
+ spinlock_t lock; /* spinlock to be interrupt safe */
+ struct work_struct timeout_work;
+ struct net_device *netdev;
+ struct device *dev;
+ struct ks8842_tx_dma_ctl dma_tx;
+ struct ks8842_rx_dma_ctl dma_rx;
+};
+
+static void ks8842_dma_rx_cb(void *data);
+static void ks8842_dma_tx_cb(void *data);
+
+static inline void ks8842_resume_dma(struct ks8842_adapter *adapter)
+{
+ iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME);
+}
+
+static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank)
+{
+ iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK);
+}
+
+static inline void ks8842_write8(struct ks8842_adapter *adapter, u16 bank,
+ u8 value, int offset)
+{
+ ks8842_select_bank(adapter, bank);
+ iowrite8(value, adapter->hw_addr + offset);
+}
+
+static inline void ks8842_write16(struct ks8842_adapter *adapter, u16 bank,
+ u16 value, int offset)
+{
+ ks8842_select_bank(adapter, bank);
+ iowrite16(value, adapter->hw_addr + offset);
+}
+
+static inline void ks8842_enable_bits(struct ks8842_adapter *adapter, u16 bank,
+ u16 bits, int offset)
+{
+ u16 reg;
+ ks8842_select_bank(adapter, bank);
+ reg = ioread16(adapter->hw_addr + offset);
+ reg |= bits;
+ iowrite16(reg, adapter->hw_addr + offset);
+}
+
+static inline void ks8842_clear_bits(struct ks8842_adapter *adapter, u16 bank,
+ u16 bits, int offset)
+{
+ u16 reg;
+ ks8842_select_bank(adapter, bank);
+ reg = ioread16(adapter->hw_addr + offset);
+ reg &= ~bits;
+ iowrite16(reg, adapter->hw_addr + offset);
+}
+
+static inline void ks8842_write32(struct ks8842_adapter *adapter, u16 bank,
+ u32 value, int offset)
+{
+ ks8842_select_bank(adapter, bank);
+ iowrite32(value, adapter->hw_addr + offset);
+}
+
+static inline u8 ks8842_read8(struct ks8842_adapter *adapter, u16 bank,
+ int offset)
+{
+ ks8842_select_bank(adapter, bank);
+ return ioread8(adapter->hw_addr + offset);
+}
+
+static inline u16 ks8842_read16(struct ks8842_adapter *adapter, u16 bank,
+ int offset)
+{
+ ks8842_select_bank(adapter, bank);
+ return ioread16(adapter->hw_addr + offset);
+}
+
+static inline u32 ks8842_read32(struct ks8842_adapter *adapter, u16 bank,
+ int offset)
+{
+ ks8842_select_bank(adapter, bank);
+ return ioread32(adapter->hw_addr + offset);
+}
+
+static void ks8842_reset(struct ks8842_adapter *adapter)
+{
+ if (adapter->conf_flags & MICREL_KS884X) {
+ ks8842_write16(adapter, 3, 1, REG_GRR);
+ msleep(10);
+ iowrite16(0, adapter->hw_addr + REG_GRR);
+ } else {
+ /* The KS8842 goes haywire when doing softare reset
+ * a work around in the timberdale IP is implemented to
+ * do a hardware reset instead
+ ks8842_write16(adapter, 3, 1, REG_GRR);
+ msleep(10);
+ iowrite16(0, adapter->hw_addr + REG_GRR);
+ */
+ iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST);
+ msleep(20);
+ }
+}
+
+static void ks8842_update_link_status(struct net_device *netdev,
+ struct ks8842_adapter *adapter)
+{
+ /* check the status of the link */
+ if (ks8842_read16(adapter, 45, REG_P1MBSR) & 0x4) {
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ } else {
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+ }
+}
+
+static void ks8842_enable_tx(struct ks8842_adapter *adapter)
+{
+ ks8842_enable_bits(adapter, 16, 0x01, REG_TXCR);
+}
+
+static void ks8842_disable_tx(struct ks8842_adapter *adapter)
+{
+ ks8842_clear_bits(adapter, 16, 0x01, REG_TXCR);
+}
+
+static void ks8842_enable_rx(struct ks8842_adapter *adapter)
+{
+ ks8842_enable_bits(adapter, 16, 0x01, REG_RXCR);
+}
+
+static void ks8842_disable_rx(struct ks8842_adapter *adapter)
+{
+ ks8842_clear_bits(adapter, 16, 0x01, REG_RXCR);
+}
+
+static void ks8842_reset_hw(struct ks8842_adapter *adapter)
+{
+ /* reset the HW */
+ ks8842_reset(adapter);
+
+ /* Enable QMU Transmit flow control / transmit padding / Transmit CRC */
+ ks8842_write16(adapter, 16, 0x000E, REG_TXCR);
+
+ /* enable the receiver, uni + multi + broadcast + flow ctrl
+ + crc strip */
+ ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80 | 0x400,
+ REG_RXCR);
+
+ /* TX frame pointer autoincrement */
+ ks8842_write16(adapter, 17, 0x4000, REG_TXFDPR);
+
+ /* RX frame pointer autoincrement */
+ ks8842_write16(adapter, 17, 0x4000, REG_RXFDPR);
+
+ /* RX 2 kb high watermark */
+ ks8842_write16(adapter, 0, 0x1000, REG_QRFCR);
+
+ /* aggressive back off in half duplex */
+ ks8842_enable_bits(adapter, 32, 1 << 8, REG_SGCR1);
+
+ /* enable no excessive collison drop */
+ ks8842_enable_bits(adapter, 32, 1 << 3, REG_SGCR2);
+
+ /* Enable port 1 force flow control / back pressure / transmit / recv */
+ ks8842_write16(adapter, 48, 0x1E07, REG_P1CR2);
+
+ /* restart port auto-negotiation */
+ ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4);
+
+ /* Enable the transmitter */
+ ks8842_enable_tx(adapter);
+
+ /* Enable the receiver */
+ ks8842_enable_rx(adapter);
+
+ /* clear all interrupts */
+ ks8842_write16(adapter, 18, 0xffff, REG_ISR);
+
+ /* enable interrupts */
+ if (KS8842_USE_DMA(adapter)) {
+ /* When running in DMA Mode the RX interrupt is not enabled in
+ timberdale because RX data is received by DMA callbacks
+ it must still be enabled in the KS8842 because it indicates
+ to timberdale when there is RX data for it's DMA FIFOs */
+ iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER);
+ ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
+ } else {
+ if (!(adapter->conf_flags & MICREL_KS884X))
+ iowrite16(ENABLED_IRQS,
+ adapter->hw_addr + REG_TIMB_IER);
+ ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
+ }
+ /* enable the switch */
+ ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
+}
+
+static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
+{
+ int i;
+ u16 mac;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i);
+
+ if (adapter->conf_flags & MICREL_KS884X) {
+ /*
+ the sequence of saving mac addr between MAC and Switch is
+ different.
+ */
+
+ mac = ks8842_read16(adapter, 2, REG_MARL);
+ ks8842_write16(adapter, 39, mac, REG_MACAR3);
+ mac = ks8842_read16(adapter, 2, REG_MARM);
+ ks8842_write16(adapter, 39, mac, REG_MACAR2);
+ mac = ks8842_read16(adapter, 2, REG_MARH);
+ ks8842_write16(adapter, 39, mac, REG_MACAR1);
+ } else {
+
+ /* make sure the switch port uses the same MAC as the QMU */
+ mac = ks8842_read16(adapter, 2, REG_MARL);
+ ks8842_write16(adapter, 39, mac, REG_MACAR1);
+ mac = ks8842_read16(adapter, 2, REG_MARM);
+ ks8842_write16(adapter, 39, mac, REG_MACAR2);
+ mac = ks8842_read16(adapter, 2, REG_MARH);
+ ks8842_write16(adapter, 39, mac, REG_MACAR3);
+ }
+}
+
+static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
+{
+ unsigned long flags;
+ unsigned i;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ for (i = 0; i < ETH_ALEN; i++) {
+ ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
+ if (!(adapter->conf_flags & MICREL_KS884X))
+ ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
+ REG_MACAR1 + i);
+ }
+
+ if (adapter->conf_flags & MICREL_KS884X) {
+ /*
+ the sequence of saving mac addr between MAC and Switch is
+ different.
+ */
+
+ u16 mac;
+
+ mac = ks8842_read16(adapter, 2, REG_MARL);
+ ks8842_write16(adapter, 39, mac, REG_MACAR3);
+ mac = ks8842_read16(adapter, 2, REG_MARM);
+ ks8842_write16(adapter, 39, mac, REG_MACAR2);
+ mac = ks8842_read16(adapter, 2, REG_MARH);
+ ks8842_write16(adapter, 39, mac, REG_MACAR1);
+ }
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
+static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
+{
+ return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
+}
+
+static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx;
+ u8 *buf = ctl->buf;
+
+ if (ctl->adesc) {
+ netdev_dbg(netdev, "%s: TX ongoing\n", __func__);
+ /* transfer ongoing */
+ return NETDEV_TX_BUSY;
+ }
+
+ sg_dma_len(&ctl->sg) = skb->len + sizeof(u32);
+
+ /* copy data to the TX buffer */
+ /* the control word, enable IRQ, port 1 and the length */
+ *buf++ = 0x00;
+ *buf++ = 0x01; /* Port 1 */
+ *buf++ = skb->len & 0xff;
+ *buf++ = (skb->len >> 8) & 0xff;
+ skb_copy_from_linear_data(skb, buf, skb->len);
+
+ dma_sync_single_range_for_device(adapter->dev,
+ sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg),
+ DMA_TO_DEVICE);
+
+ /* make sure the length is a multiple of 4 */
+ if (sg_dma_len(&ctl->sg) % 4)
+ sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
+
+ ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
+ &ctl->sg, 1, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
+ if (!ctl->adesc)
+ return NETDEV_TX_BUSY;
+
+ ctl->adesc->callback_param = netdev;
+ ctl->adesc->callback = ks8842_dma_tx_cb;
+ ctl->adesc->tx_submit(ctl->adesc);
+
+ netdev->stats.tx_bytes += skb->len;
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ int len = skb->len;
+
+ netdev_dbg(netdev, "%s: len %u head %p data %p tail %p end %p\n",
+ __func__, skb->len, skb->head, skb->data,
+ skb_tail_pointer(skb), skb_end_pointer(skb));
+
+ /* check FIFO buffer space, we need space for CRC and command bits */
+ if (ks8842_tx_fifo_space(adapter) < len + 8)
+ return NETDEV_TX_BUSY;
+
+ if (adapter->conf_flags & KS884X_16BIT) {
+ u16 *ptr16 = (u16 *)skb->data;
+ ks8842_write16(adapter, 17, 0x8000 | 0x100, REG_QMU_DATA_LO);
+ ks8842_write16(adapter, 17, (u16)len, REG_QMU_DATA_HI);
+ netdev->stats.tx_bytes += len;
+
+ /* copy buffer */
+ while (len > 0) {
+ iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_LO);
+ iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_HI);
+ len -= sizeof(u32);
+ }
+ } else {
+
+ u32 *ptr = (u32 *)skb->data;
+ u32 ctrl;
+ /* the control word, enable IRQ, port 1 and the length */
+ ctrl = 0x8000 | 0x100 | (len << 16);
+ ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO);
+
+ netdev->stats.tx_bytes += len;
+
+ /* copy buffer */
+ while (len > 0) {
+ iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO);
+ len -= sizeof(u32);
+ ptr++;
+ }
+ }
+
+ /* enqueue packet */
+ ks8842_write16(adapter, 17, 1, REG_TXQCR);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static void ks8842_update_rx_err_counters(struct net_device *netdev, u32 status)
+{
+ netdev_dbg(netdev, "RX error, status: %x\n", status);
+
+ netdev->stats.rx_errors++;
+ if (status & RXSR_TOO_LONG)
+ netdev->stats.rx_length_errors++;
+ if (status & RXSR_CRC_ERROR)
+ netdev->stats.rx_crc_errors++;
+ if (status & RXSR_RUNT)
+ netdev->stats.rx_frame_errors++;
+}
+
+static void ks8842_update_rx_counters(struct net_device *netdev, u32 status,
+ int len)
+{
+ netdev_dbg(netdev, "RX packet, len: %d\n", len);
+
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += len;
+ if (status & RXSR_MULTICAST)
+ netdev->stats.multicast++;
+}
+
+static int __ks8842_start_new_rx_dma(struct net_device *netdev)
+{
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
+ struct scatterlist *sg = &ctl->sg;
+ int err;
+
+ ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE);
+ if (ctl->skb) {
+ sg_init_table(sg, 1);
+ sg_dma_address(sg) = dma_map_single(adapter->dev,
+ ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+ err = dma_mapping_error(adapter->dev, sg_dma_address(sg));
+ if (unlikely(err)) {
+ sg_dma_address(sg) = 0;
+ goto out;
+ }
+
+ sg_dma_len(sg) = DMA_BUFFER_SIZE;
+
+ ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
+ sg, 1, DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
+
+ if (!ctl->adesc)
+ goto out;
+
+ ctl->adesc->callback_param = netdev;
+ ctl->adesc->callback = ks8842_dma_rx_cb;
+ ctl->adesc->tx_submit(ctl->adesc);
+ } else {
+ err = -ENOMEM;
+ sg_dma_address(sg) = 0;
+ goto out;
+ }
+
+ return err;
+out:
+ if (sg_dma_address(sg))
+ dma_unmap_single(adapter->dev, sg_dma_address(sg),
+ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+ sg_dma_address(sg) = 0;
+ if (ctl->skb)
+ dev_kfree_skb(ctl->skb);
+
+ ctl->skb = NULL;
+
+ printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err);
+ return err;
+}
+
+static void ks8842_rx_frame_dma_tasklet(unsigned long arg)
+{
+ struct net_device *netdev = (struct net_device *)arg;
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
+ struct sk_buff *skb = ctl->skb;
+ dma_addr_t addr = sg_dma_address(&ctl->sg);
+ u32 status;
+
+ ctl->adesc = NULL;
+
+ /* kick next transfer going */
+ __ks8842_start_new_rx_dma(netdev);
+
+ /* now handle the data we got */
+ dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+
+ status = *((u32 *)skb->data);
+
+ netdev_dbg(netdev, "%s - rx_data: status: %x\n",
+ __func__, status & 0xffff);
+
+ /* check the status */
+ if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
+ int len = (status >> 16) & 0x7ff;
+
+ ks8842_update_rx_counters(netdev, status, len);
+
+ /* reserve 4 bytes which is the status word */
+ skb_reserve(skb, 4);
+ skb_put(skb, len);
+
+ skb->protocol = eth_type_trans(skb, netdev);
+ netif_rx(skb);
+ } else {
+ ks8842_update_rx_err_counters(netdev, status);
+ dev_kfree_skb(skb);
+ }
+}
+
+static void ks8842_rx_frame(struct net_device *netdev,
+ struct ks8842_adapter *adapter)
+{
+ u32 status;
+ int len;
+
+ if (adapter->conf_flags & KS884X_16BIT) {
+ status = ks8842_read16(adapter, 17, REG_QMU_DATA_LO);
+ len = ks8842_read16(adapter, 17, REG_QMU_DATA_HI);
+ netdev_dbg(netdev, "%s - rx_data: status: %x\n",
+ __func__, status);
+ } else {
+ status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO);
+ len = (status >> 16) & 0x7ff;
+ status &= 0xffff;
+ netdev_dbg(netdev, "%s - rx_data: status: %x\n",
+ __func__, status);
+ }
+
+ /* check the status */
+ if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
+ struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len + 3);
+
+ if (skb) {
+
+ ks8842_update_rx_counters(netdev, status, len);
+
+ if (adapter->conf_flags & KS884X_16BIT) {
+ u16 *data16 = (u16 *)skb_put(skb, len);
+ ks8842_select_bank(adapter, 17);
+ while (len > 0) {
+ *data16++ = ioread16(adapter->hw_addr +
+ REG_QMU_DATA_LO);
+ *data16++ = ioread16(adapter->hw_addr +
+ REG_QMU_DATA_HI);
+ len -= sizeof(u32);
+ }
+ } else {
+ u32 *data = (u32 *)skb_put(skb, len);
+
+ ks8842_select_bank(adapter, 17);
+ while (len > 0) {
+ *data++ = ioread32(adapter->hw_addr +
+ REG_QMU_DATA_LO);
+ len -= sizeof(u32);
+ }
+ }
+ skb->protocol = eth_type_trans(skb, netdev);
+ netif_rx(skb);
+ } else
+ netdev->stats.rx_dropped++;
+ } else
+ ks8842_update_rx_err_counters(netdev, status);
+
+ /* set high watermark to 3K */
+ ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR);
+
+ /* release the frame */
+ ks8842_write16(adapter, 17, 0x01, REG_RXQCR);
+
+ /* set high watermark to 2K */
+ ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR);
+}
+
+void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
+{
+ u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
+ netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data);
+ while (rx_data) {
+ ks8842_rx_frame(netdev, adapter);
+ rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
+ }
+}
+
+void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
+{
+ u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
+ netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr);
+ netdev->stats.tx_packets++;
+ if (netif_queue_stopped(netdev))
+ netif_wake_queue(netdev);
+}
+
+void ks8842_handle_rx_overrun(struct net_device *netdev,
+ struct ks8842_adapter *adapter)
+{
+ netdev_dbg(netdev, "%s: entry\n", __func__);
+ netdev->stats.rx_errors++;
+ netdev->stats.rx_fifo_errors++;
+}
+
+void ks8842_tasklet(unsigned long arg)
+{
+ struct net_device *netdev = (struct net_device *)arg;
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ u16 isr;
+ unsigned long flags;
+ u16 entry_bank;
+
+ /* read current bank to be able to set it back */
+ spin_lock_irqsave(&adapter->lock, flags);
+ entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+
+ isr = ks8842_read16(adapter, 18, REG_ISR);
+ netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
+
+ /* when running in DMA mode, do not ack RX interrupts, it is handled
+ internally by timberdale, otherwise it's DMA FIFO:s would stop
+ */
+ if (KS8842_USE_DMA(adapter))
+ isr &= ~IRQ_RX;
+
+ /* Ack */
+ ks8842_write16(adapter, 18, isr, REG_ISR);
+
+ if (!(adapter->conf_flags & MICREL_KS884X))
+ /* Ack in the timberdale IP as well */
+ iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR);
+
+ if (!netif_running(netdev))
+ return;
+
+ if (isr & IRQ_LINK_CHANGE)
+ ks8842_update_link_status(netdev, adapter);
+
+ /* should not get IRQ_RX when running DMA mode */
+ if (isr & (IRQ_RX | IRQ_RX_ERROR) && !KS8842_USE_DMA(adapter))
+ ks8842_handle_rx(netdev, adapter);
+
+ /* should only happen when in PIO mode */
+ if (isr & IRQ_TX)
+ ks8842_handle_tx(netdev, adapter);
+
+ if (isr & IRQ_RX_OVERRUN)
+ ks8842_handle_rx_overrun(netdev, adapter);
+
+ if (isr & IRQ_TX_STOPPED) {
+ ks8842_disable_tx(adapter);
+ ks8842_enable_tx(adapter);
+ }
+
+ if (isr & IRQ_RX_STOPPED) {
+ ks8842_disable_rx(adapter);
+ ks8842_enable_rx(adapter);
+ }
+
+ /* re-enable interrupts, put back the bank selection register */
+ spin_lock_irqsave(&adapter->lock, flags);
+ if (KS8842_USE_DMA(adapter))
+ ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
+ else
+ ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
+ iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
+
+ /* Make sure timberdale continues DMA operations, they are stopped while
+ we are handling the ks8842 because we might change bank */
+ if (KS8842_USE_DMA(adapter))
+ ks8842_resume_dma(adapter);
+
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
+static irqreturn_t ks8842_irq(int irq, void *devid)
+{
+ struct net_device *netdev = devid;
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ u16 isr;
+ u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
+ irqreturn_t ret = IRQ_NONE;
+
+ isr = ks8842_read16(adapter, 18, REG_ISR);
+ netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
+
+ if (isr) {
+ if (KS8842_USE_DMA(adapter))
+ /* disable all but RX IRQ, since the FPGA relies on it*/
+ ks8842_write16(adapter, 18, IRQ_RX, REG_IER);
+ else
+ /* disable IRQ */
+ ks8842_write16(adapter, 18, 0x00, REG_IER);
+
+ /* schedule tasklet */
+ tasklet_schedule(&adapter->tasklet);
+
+ ret = IRQ_HANDLED;
+ }
+
+ iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
+
+ /* After an interrupt, tell timberdale to continue DMA operations.
+ DMA is disabled while we are handling the ks8842 because we might
+ change bank */
+ ks8842_resume_dma(adapter);
+
+ return ret;
+}
+
+static void ks8842_dma_rx_cb(void *data)
+{
+ struct net_device *netdev = data;
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+
+ netdev_dbg(netdev, "RX DMA finished\n");
+ /* schedule tasklet */
+ if (adapter->dma_rx.adesc)
+ tasklet_schedule(&adapter->dma_rx.tasklet);
+}
+
+static void ks8842_dma_tx_cb(void *data)
+{
+ struct net_device *netdev = data;
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx;
+
+ netdev_dbg(netdev, "TX DMA finished\n");
+
+ if (!ctl->adesc)
+ return;
+
+ netdev->stats.tx_packets++;
+ ctl->adesc = NULL;
+
+ if (netif_queue_stopped(netdev))
+ netif_wake_queue(netdev);
+}
+
+static void ks8842_stop_dma(struct ks8842_adapter *adapter)
+{
+ struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
+ struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
+
+ tx_ctl->adesc = NULL;
+ if (tx_ctl->chan)
+ tx_ctl->chan->device->device_control(tx_ctl->chan,
+ DMA_TERMINATE_ALL, 0);
+
+ rx_ctl->adesc = NULL;
+ if (rx_ctl->chan)
+ rx_ctl->chan->device->device_control(rx_ctl->chan,
+ DMA_TERMINATE_ALL, 0);
+
+ if (sg_dma_address(&rx_ctl->sg))
+ dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg),
+ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+ sg_dma_address(&rx_ctl->sg) = 0;
+
+ dev_kfree_skb(rx_ctl->skb);
+ rx_ctl->skb = NULL;
+}
+
+static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter)
+{
+ struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
+ struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
+
+ ks8842_stop_dma(adapter);
+
+ if (tx_ctl->chan)
+ dma_release_channel(tx_ctl->chan);
+ tx_ctl->chan = NULL;
+
+ if (rx_ctl->chan)
+ dma_release_channel(rx_ctl->chan);
+ rx_ctl->chan = NULL;
+
+ tasklet_kill(&rx_ctl->tasklet);
+
+ if (sg_dma_address(&tx_ctl->sg))
+ dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg),
+ DMA_BUFFER_SIZE, DMA_TO_DEVICE);
+ sg_dma_address(&tx_ctl->sg) = 0;
+
+ kfree(tx_ctl->buf);
+ tx_ctl->buf = NULL;
+}
+
+static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param)
+{
+ return chan->chan_id == (long)filter_param;
+}
+
+static int ks8842_alloc_dma_bufs(struct net_device *netdev)
+{
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
+ struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
+ int err;
+
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_PRIVATE, mask);
+
+ sg_init_table(&tx_ctl->sg, 1);
+
+ tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
+ (void *)(long)tx_ctl->channel);
+ if (!tx_ctl->chan) {
+ err = -ENODEV;
+ goto err;
+ }
+
+ /* allocate DMA buffer */
+ tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
+ if (!tx_ctl->buf) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
+ tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
+ err = dma_mapping_error(adapter->dev,
+ sg_dma_address(&tx_ctl->sg));
+ if (err) {
+ sg_dma_address(&tx_ctl->sg) = 0;
+ goto err;
+ }
+
+ rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
+ (void *)(long)rx_ctl->channel);
+ if (!rx_ctl->chan) {
+ err = -ENODEV;
+ goto err;
+ }
+
+ tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet,
+ (unsigned long)netdev);
+
+ return 0;
+err:
+ ks8842_dealloc_dma_bufs(adapter);
+ return err;
+}
+
+/* Netdevice operations */
+
+static int ks8842_open(struct net_device *netdev)
+{
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ netdev_dbg(netdev, "%s - entry\n", __func__);
+
+ if (KS8842_USE_DMA(adapter)) {
+ err = ks8842_alloc_dma_bufs(netdev);
+
+ if (!err) {
+ /* start RX dma */
+ err = __ks8842_start_new_rx_dma(netdev);
+ if (err)
+ ks8842_dealloc_dma_bufs(adapter);
+ }
+
+ if (err) {
+ printk(KERN_WARNING DRV_NAME
+ ": Failed to initiate DMA, running PIO\n");
+ ks8842_dealloc_dma_bufs(adapter);
+ adapter->dma_rx.channel = -1;
+ adapter->dma_tx.channel = -1;
+ }
+ }
+
+ /* reset the HW */
+ ks8842_reset_hw(adapter);
+
+ ks8842_write_mac_addr(adapter, netdev->dev_addr);
+
+ ks8842_update_link_status(netdev, adapter);
+
+ err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
+ netdev);
+ if (err) {
+ pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int ks8842_close(struct net_device *netdev)
+{
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+
+ netdev_dbg(netdev, "%s - entry\n", __func__);
+
+ cancel_work_sync(&adapter->timeout_work);
+
+ if (KS8842_USE_DMA(adapter))
+ ks8842_dealloc_dma_bufs(adapter);
+
+ /* free the irq */
+ free_irq(adapter->irq, netdev);
+
+ /* disable the switch */
+ ks8842_write16(adapter, 32, 0x0, REG_SW_ID_AND_ENABLE);
+
+ return 0;
+}
+
+static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ int ret;
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+
+ netdev_dbg(netdev, "%s: entry\n", __func__);
+
+ if (KS8842_USE_DMA(adapter)) {
+ unsigned long flags;
+ ret = ks8842_tx_frame_dma(skb, netdev);
+ /* for now only allow one transfer at the time */
+ spin_lock_irqsave(&adapter->lock, flags);
+ if (adapter->dma_tx.adesc)
+ netif_stop_queue(netdev);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ return ret;
+ }
+
+ ret = ks8842_tx_frame(skb, netdev);
+
+ if (ks8842_tx_fifo_space(adapter) < netdev->mtu + 8)
+ netif_stop_queue(netdev);
+
+ return ret;
+}
+
+static int ks8842_set_mac(struct net_device *netdev, void *p)
+{
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+ char *mac = (u8 *)addr->sa_data;
+
+ netdev_dbg(netdev, "%s: entry\n", __func__);
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, mac, netdev->addr_len);
+
+ ks8842_write_mac_addr(adapter, mac);
+ return 0;
+}
+
+static void ks8842_tx_timeout_work(struct work_struct *work)
+{
+ struct ks8842_adapter *adapter =
+ container_of(work, struct ks8842_adapter, timeout_work);
+ struct net_device *netdev = adapter->netdev;
+ unsigned long flags;
+
+ netdev_dbg(netdev, "%s: entry\n", __func__);
+
+ spin_lock_irqsave(&adapter->lock, flags);
+
+ if (KS8842_USE_DMA(adapter))
+ ks8842_stop_dma(adapter);
+
+ /* disable interrupts */
+ ks8842_write16(adapter, 18, 0, REG_IER);
+ ks8842_write16(adapter, 18, 0xFFFF, REG_ISR);
+
+ netif_stop_queue(netdev);
+
+ spin_unlock_irqrestore(&adapter->lock, flags);
+
+ ks8842_reset_hw(adapter);
+
+ ks8842_write_mac_addr(adapter, netdev->dev_addr);
+
+ ks8842_update_link_status(netdev, adapter);
+
+ if (KS8842_USE_DMA(adapter))
+ __ks8842_start_new_rx_dma(netdev);
+}
+
+static void ks8842_tx_timeout(struct net_device *netdev)
+{
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+
+ netdev_dbg(netdev, "%s: entry\n", __func__);
+
+ schedule_work(&adapter->timeout_work);
+}
+
+static const struct net_device_ops ks8842_netdev_ops = {
+ .ndo_open = ks8842_open,
+ .ndo_stop = ks8842_close,
+ .ndo_start_xmit = ks8842_xmit_frame,
+ .ndo_set_mac_address = ks8842_set_mac,
+ .ndo_tx_timeout = ks8842_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr
+};
+
+static const struct ethtool_ops ks8842_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+};
+
+static int __devinit ks8842_probe(struct platform_device *pdev)
+{
+ int err = -ENOMEM;
+ struct resource *iomem;
+ struct net_device *netdev;
+ struct ks8842_adapter *adapter;
+ struct ks8842_platform_data *pdata = pdev->dev.platform_data;
+ u16 id;
+ unsigned i;
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
+ goto err_mem_region;
+
+ netdev = alloc_etherdev(sizeof(struct ks8842_adapter));
+ if (!netdev)
+ goto err_alloc_etherdev;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ INIT_WORK(&adapter->timeout_work, ks8842_tx_timeout_work);
+ adapter->hw_addr = ioremap(iomem->start, resource_size(iomem));
+ adapter->conf_flags = iomem->flags;
+
+ if (!adapter->hw_addr)
+ goto err_ioremap;
+
+ adapter->irq = platform_get_irq(pdev, 0);
+ if (adapter->irq < 0) {
+ err = adapter->irq;
+ goto err_get_irq;
+ }
+
+ adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev;
+
+ /* DMA is only supported when accessed via timberdale */
+ if (!(adapter->conf_flags & MICREL_KS884X) && pdata &&
+ (pdata->tx_dma_channel != -1) &&
+ (pdata->rx_dma_channel != -1)) {
+ adapter->dma_rx.channel = pdata->rx_dma_channel;
+ adapter->dma_tx.channel = pdata->tx_dma_channel;
+ } else {
+ adapter->dma_rx.channel = -1;
+ adapter->dma_tx.channel = -1;
+ }
+
+ tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
+ spin_lock_init(&adapter->lock);
+
+ netdev->netdev_ops = &ks8842_netdev_ops;
+ netdev->ethtool_ops = &ks8842_ethtool_ops;
+
+ /* Check if a mac address was given */
+ i = netdev->addr_len;
+ if (pdata) {
+ for (i = 0; i < netdev->addr_len; i++)
+ if (pdata->macaddr[i] != 0)
+ break;
+
+ if (i < netdev->addr_len)
+ /* an address was passed, use it */
+ memcpy(netdev->dev_addr, pdata->macaddr,
+ netdev->addr_len);
+ }
+
+ if (i == netdev->addr_len) {
+ ks8842_read_mac_addr(adapter, netdev->dev_addr);
+
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ random_ether_addr(netdev->dev_addr);
+ }
+
+ id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE);
+
+ strcpy(netdev->name, "eth%d");
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ platform_set_drvdata(pdev, netdev);
+
+ pr_info("Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
+ (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
+
+ return 0;
+
+err_register:
+err_get_irq:
+ iounmap(adapter->hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ release_mem_region(iomem->start, resource_size(iomem));
+err_mem_region:
+ return err;
+}
+
+static int __devexit ks8842_remove(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ unregister_netdev(netdev);
+ tasklet_kill(&adapter->tasklet);
+ iounmap(adapter->hw_addr);
+ free_netdev(netdev);
+ release_mem_region(iomem->start, resource_size(iomem));
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+
+static struct platform_driver ks8842_platform_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ks8842_probe,
+ .remove = ks8842_remove,
+};
+
+static int __init ks8842_init(void)
+{
+ return platform_driver_register(&ks8842_platform_driver);
+}
+
+static void __exit ks8842_exit(void)
+{
+ platform_driver_unregister(&ks8842_platform_driver);
+}
+
+module_init(ks8842_init);
+module_exit(ks8842_exit);
+
+MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver");
+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ks8842");
+
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
new file mode 100644
index 000000000000..f56743a28fc0
--- /dev/null
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -0,0 +1,1737 @@
+/* drivers/net/ks8851.c
+ *
+ * Copyright 2009 Simtec Electronics
+ * http://www.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DEBUG
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/cache.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+
+#include <linux/spi/spi.h>
+
+#include "ks8851.h"
+
+/**
+ * struct ks8851_rxctrl - KS8851 driver rx control
+ * @mchash: Multicast hash-table data.
+ * @rxcr1: KS_RXCR1 register setting
+ * @rxcr2: KS_RXCR2 register setting
+ *
+ * Representation of the settings needs to control the receive filtering
+ * such as the multicast hash-filter and the receive register settings. This
+ * is used to make the job of working out if the receive settings change and
+ * then issuing the new settings to the worker that will send the necessary
+ * commands.
+ */
+struct ks8851_rxctrl {
+ u16 mchash[4];
+ u16 rxcr1;
+ u16 rxcr2;
+};
+
+/**
+ * union ks8851_tx_hdr - tx header data
+ * @txb: The header as bytes
+ * @txw: The header as 16bit, little-endian words
+ *
+ * A dual representation of the tx header data to allow
+ * access to individual bytes, and to allow 16bit accesses
+ * with 16bit alignment.
+ */
+union ks8851_tx_hdr {
+ u8 txb[6];
+ __le16 txw[3];
+};
+
+/**
+ * struct ks8851_net - KS8851 driver private data
+ * @netdev: The network device we're bound to
+ * @spidev: The spi device we're bound to.
+ * @lock: Lock to ensure that the device is not accessed when busy.
+ * @statelock: Lock on this structure for tx list.
+ * @mii: The MII state information for the mii calls.
+ * @rxctrl: RX settings for @rxctrl_work.
+ * @tx_work: Work queue for tx packets
+ * @irq_work: Work queue for servicing interrupts
+ * @rxctrl_work: Work queue for updating RX mode and multicast lists
+ * @txq: Queue of packets for transmission.
+ * @spi_msg1: pre-setup SPI transfer with one message, @spi_xfer1.
+ * @spi_msg2: pre-setup SPI transfer with two messages, @spi_xfer2.
+ * @txh: Space for generating packet TX header in DMA-able data
+ * @rxd: Space for receiving SPI data, in DMA-able space.
+ * @txd: Space for transmitting SPI data, in DMA-able space.
+ * @msg_enable: The message flags controlling driver output (see ethtool).
+ * @fid: Incrementing frame id tag.
+ * @rc_ier: Cached copy of KS_IER.
+ * @rc_ccr: Cached copy of KS_CCR.
+ * @rc_rxqcr: Cached copy of KS_RXQCR.
+ * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
+ *
+ * The @lock ensures that the chip is protected when certain operations are
+ * in progress. When the read or write packet transfer is in progress, most
+ * of the chip registers are not ccessible until the transfer is finished and
+ * the DMA has been de-asserted.
+ *
+ * The @statelock is used to protect information in the structure which may
+ * need to be accessed via several sources, such as the network driver layer
+ * or one of the work queues.
+ *
+ * We align the buffers we may use for rx/tx to ensure that if the SPI driver
+ * wants to DMA map them, it will not have any problems with data the driver
+ * modifies.
+ */
+struct ks8851_net {
+ struct net_device *netdev;
+ struct spi_device *spidev;
+ struct mutex lock;
+ spinlock_t statelock;
+
+ union ks8851_tx_hdr txh ____cacheline_aligned;
+ u8 rxd[8];
+ u8 txd[8];
+
+ u32 msg_enable ____cacheline_aligned;
+ u16 tx_space;
+ u8 fid;
+
+ u16 rc_ier;
+ u16 rc_rxqcr;
+ u16 rc_ccr;
+ u16 eeprom_size;
+
+ struct mii_if_info mii;
+ struct ks8851_rxctrl rxctrl;
+
+ struct work_struct tx_work;
+ struct work_struct irq_work;
+ struct work_struct rxctrl_work;
+
+ struct sk_buff_head txq;
+
+ struct spi_message spi_msg1;
+ struct spi_message spi_msg2;
+ struct spi_transfer spi_xfer1;
+ struct spi_transfer spi_xfer2[2];
+};
+
+static int msg_enable;
+
+/* shift for byte-enable data */
+#define BYTE_EN(_x) ((_x) << 2)
+
+/* turn register number and byte-enable mask into data for start of packet */
+#define MK_OP(_byteen, _reg) (BYTE_EN(_byteen) | (_reg) << (8+2) | (_reg) >> 6)
+
+/* SPI register read/write calls.
+ *
+ * All these calls issue SPI transactions to access the chip's registers. They
+ * all require that the necessary lock is held to prevent accesses when the
+ * chip is busy transferring packet data (RX/TX FIFO accesses).
+ */
+
+/**
+ * ks8851_wrreg16 - write 16bit register value to chip
+ * @ks: The chip state
+ * @reg: The register address
+ * @val: The value to write
+ *
+ * Issue a write to put the value @val into the register specified in @reg.
+ */
+static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val)
+{
+ struct spi_transfer *xfer = &ks->spi_xfer1;
+ struct spi_message *msg = &ks->spi_msg1;
+ __le16 txb[2];
+ int ret;
+
+ txb[0] = cpu_to_le16(MK_OP(reg & 2 ? 0xC : 0x03, reg) | KS_SPIOP_WR);
+ txb[1] = cpu_to_le16(val);
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = NULL;
+ xfer->len = 4;
+
+ ret = spi_sync(ks->spidev, msg);
+ if (ret < 0)
+ netdev_err(ks->netdev, "spi_sync() failed\n");
+}
+
+/**
+ * ks8851_wrreg8 - write 8bit register value to chip
+ * @ks: The chip state
+ * @reg: The register address
+ * @val: The value to write
+ *
+ * Issue a write to put the value @val into the register specified in @reg.
+ */
+static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
+{
+ struct spi_transfer *xfer = &ks->spi_xfer1;
+ struct spi_message *msg = &ks->spi_msg1;
+ __le16 txb[2];
+ int ret;
+ int bit;
+
+ bit = 1 << (reg & 3);
+
+ txb[0] = cpu_to_le16(MK_OP(bit, reg) | KS_SPIOP_WR);
+ txb[1] = val;
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = NULL;
+ xfer->len = 3;
+
+ ret = spi_sync(ks->spidev, msg);
+ if (ret < 0)
+ netdev_err(ks->netdev, "spi_sync() failed\n");
+}
+
+/**
+ * ks8851_rx_1msg - select whether to use one or two messages for spi read
+ * @ks: The device structure
+ *
+ * Return whether to generate a single message with a tx and rx buffer
+ * supplied to spi_sync(), or alternatively send the tx and rx buffers
+ * as separate messages.
+ *
+ * Depending on the hardware in use, a single message may be more efficient
+ * on interrupts or work done by the driver.
+ *
+ * This currently always returns true until we add some per-device data passed
+ * from the platform code to specify which mode is better.
+ */
+static inline bool ks8851_rx_1msg(struct ks8851_net *ks)
+{
+ return true;
+}
+
+/**
+ * ks8851_rdreg - issue read register command and return the data
+ * @ks: The device state
+ * @op: The register address and byte enables in message format.
+ * @rxb: The RX buffer to return the result into
+ * @rxl: The length of data expected.
+ *
+ * This is the low level read call that issues the necessary spi message(s)
+ * to read data from the register specified in @op.
+ */
+static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
+ u8 *rxb, unsigned rxl)
+{
+ struct spi_transfer *xfer;
+ struct spi_message *msg;
+ __le16 *txb = (__le16 *)ks->txd;
+ u8 *trx = ks->rxd;
+ int ret;
+
+ txb[0] = cpu_to_le16(op | KS_SPIOP_RD);
+
+ if (ks8851_rx_1msg(ks)) {
+ msg = &ks->spi_msg1;
+ xfer = &ks->spi_xfer1;
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = trx;
+ xfer->len = rxl + 2;
+ } else {
+ msg = &ks->spi_msg2;
+ xfer = ks->spi_xfer2;
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = NULL;
+ xfer->len = 2;
+
+ xfer++;
+ xfer->tx_buf = NULL;
+ xfer->rx_buf = trx;
+ xfer->len = rxl;
+ }
+
+ ret = spi_sync(ks->spidev, msg);
+ if (ret < 0)
+ netdev_err(ks->netdev, "read: spi_sync() failed\n");
+ else if (ks8851_rx_1msg(ks))
+ memcpy(rxb, trx + 2, rxl);
+ else
+ memcpy(rxb, trx, rxl);
+}
+
+/**
+ * ks8851_rdreg8 - read 8 bit register from device
+ * @ks: The chip information
+ * @reg: The register address
+ *
+ * Read a 8bit register from the chip, returning the result
+*/
+static unsigned ks8851_rdreg8(struct ks8851_net *ks, unsigned reg)
+{
+ u8 rxb[1];
+
+ ks8851_rdreg(ks, MK_OP(1 << (reg & 3), reg), rxb, 1);
+ return rxb[0];
+}
+
+/**
+ * ks8851_rdreg16 - read 16 bit register from device
+ * @ks: The chip information
+ * @reg: The register address
+ *
+ * Read a 16bit register from the chip, returning the result
+*/
+static unsigned ks8851_rdreg16(struct ks8851_net *ks, unsigned reg)
+{
+ __le16 rx = 0;
+
+ ks8851_rdreg(ks, MK_OP(reg & 2 ? 0xC : 0x3, reg), (u8 *)&rx, 2);
+ return le16_to_cpu(rx);
+}
+
+/**
+ * ks8851_rdreg32 - read 32 bit register from device
+ * @ks: The chip information
+ * @reg: The register address
+ *
+ * Read a 32bit register from the chip.
+ *
+ * Note, this read requires the address be aligned to 4 bytes.
+*/
+static unsigned ks8851_rdreg32(struct ks8851_net *ks, unsigned reg)
+{
+ __le32 rx = 0;
+
+ WARN_ON(reg & 3);
+
+ ks8851_rdreg(ks, MK_OP(0xf, reg), (u8 *)&rx, 4);
+ return le32_to_cpu(rx);
+}
+
+/**
+ * ks8851_soft_reset - issue one of the soft reset to the device
+ * @ks: The device state.
+ * @op: The bit(s) to set in the GRR
+ *
+ * Issue the relevant soft-reset command to the device's GRR register
+ * specified by @op.
+ *
+ * Note, the delays are in there as a caution to ensure that the reset
+ * has time to take effect and then complete. Since the datasheet does
+ * not currently specify the exact sequence, we have chosen something
+ * that seems to work with our device.
+ */
+static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op)
+{
+ ks8851_wrreg16(ks, KS_GRR, op);
+ mdelay(1); /* wait a short time to effect reset */
+ ks8851_wrreg16(ks, KS_GRR, 0);
+ mdelay(1); /* wait for condition to clear */
+}
+
+/**
+ * ks8851_write_mac_addr - write mac address to device registers
+ * @dev: The network device
+ *
+ * Update the KS8851 MAC address registers from the address in @dev.
+ *
+ * This call assumes that the chip is not running, so there is no need to
+ * shutdown the RXQ process whilst setting this.
+*/
+static int ks8851_write_mac_addr(struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ int i;
+
+ mutex_lock(&ks->lock);
+
+ for (i = 0; i < ETH_ALEN; i++)
+ ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]);
+
+ mutex_unlock(&ks->lock);
+
+ return 0;
+}
+
+/**
+ * ks8851_init_mac - initialise the mac address
+ * @ks: The device structure
+ *
+ * Get or create the initial mac address for the device and then set that
+ * into the station address register. Currently we assume that the device
+ * does not have a valid mac address in it, and so we use random_ether_addr()
+ * to create a new one.
+ *
+ * In future, the driver should check to see if the device has an EEPROM
+ * attached and whether that has a valid ethernet address in it.
+ */
+static void ks8851_init_mac(struct ks8851_net *ks)
+{
+ struct net_device *dev = ks->netdev;
+
+ random_ether_addr(dev->dev_addr);
+ ks8851_write_mac_addr(dev);
+}
+
+/**
+ * ks8851_irq - device interrupt handler
+ * @irq: Interrupt number passed from the IRQ hnalder.
+ * @pw: The private word passed to register_irq(), our struct ks8851_net.
+ *
+ * Disable the interrupt from happening again until we've processed the
+ * current status by scheduling ks8851_irq_work().
+ */
+static irqreturn_t ks8851_irq(int irq, void *pw)
+{
+ struct ks8851_net *ks = pw;
+
+ disable_irq_nosync(irq);
+ schedule_work(&ks->irq_work);
+ return IRQ_HANDLED;
+}
+
+/**
+ * ks8851_rdfifo - read data from the receive fifo
+ * @ks: The device state.
+ * @buff: The buffer address
+ * @len: The length of the data to read
+ *
+ * Issue an RXQ FIFO read command and read the @len amount of data from
+ * the FIFO into the buffer specified by @buff.
+ */
+static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
+{
+ struct spi_transfer *xfer = ks->spi_xfer2;
+ struct spi_message *msg = &ks->spi_msg2;
+ u8 txb[1];
+ int ret;
+
+ netif_dbg(ks, rx_status, ks->netdev,
+ "%s: %d@%p\n", __func__, len, buff);
+
+ /* set the operation we're issuing */
+ txb[0] = KS_SPIOP_RXFIFO;
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = NULL;
+ xfer->len = 1;
+
+ xfer++;
+ xfer->rx_buf = buff;
+ xfer->tx_buf = NULL;
+ xfer->len = len;
+
+ ret = spi_sync(ks->spidev, msg);
+ if (ret < 0)
+ netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
+}
+
+/**
+ * ks8851_dbg_dumpkkt - dump initial packet contents to debug
+ * @ks: The device state
+ * @rxpkt: The data for the received packet
+ *
+ * Dump the initial data from the packet to dev_dbg().
+*/
+static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
+{
+ netdev_dbg(ks->netdev,
+ "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
+ rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7],
+ rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11],
+ rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]);
+}
+
+/**
+ * ks8851_rx_pkts - receive packets from the host
+ * @ks: The device information.
+ *
+ * This is called from the IRQ work queue when the system detects that there
+ * are packets in the receive queue. Find out how many packets there are and
+ * read them from the FIFO.
+ */
+static void ks8851_rx_pkts(struct ks8851_net *ks)
+{
+ struct sk_buff *skb;
+ unsigned rxfc;
+ unsigned rxlen;
+ unsigned rxstat;
+ u32 rxh;
+ u8 *rxpkt;
+
+ rxfc = ks8851_rdreg8(ks, KS_RXFC);
+
+ netif_dbg(ks, rx_status, ks->netdev,
+ "%s: %d packets\n", __func__, rxfc);
+
+ /* Currently we're issuing a read per packet, but we could possibly
+ * improve the code by issuing a single read, getting the receive
+ * header, allocating the packet and then reading the packet data
+ * out in one go.
+ *
+ * This form of operation would require us to hold the SPI bus'
+ * chipselect low during the entie transaction to avoid any
+ * reset to the data stream coming from the chip.
+ */
+
+ for (; rxfc != 0; rxfc--) {
+ rxh = ks8851_rdreg32(ks, KS_RXFHSR);
+ rxstat = rxh & 0xffff;
+ rxlen = rxh >> 16;
+
+ netif_dbg(ks, rx_status, ks->netdev,
+ "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
+
+ /* the length of the packet includes the 32bit CRC */
+
+ /* set dma read address */
+ ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00);
+
+ /* start the packet dma process, and set auto-dequeue rx */
+ ks8851_wrreg16(ks, KS_RXQCR,
+ ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
+
+ if (rxlen > 4) {
+ unsigned int rxalign;
+
+ rxlen -= 4;
+ rxalign = ALIGN(rxlen, 4);
+ skb = netdev_alloc_skb_ip_align(ks->netdev, rxalign);
+ if (skb) {
+
+ /* 4 bytes of status header + 4 bytes of
+ * garbage: we put them before ethernet
+ * header, so that they are copied,
+ * but ignored.
+ */
+
+ rxpkt = skb_put(skb, rxlen) - 8;
+
+ ks8851_rdfifo(ks, rxpkt, rxalign + 8);
+
+ if (netif_msg_pktdata(ks))
+ ks8851_dbg_dumpkkt(ks, rxpkt);
+
+ skb->protocol = eth_type_trans(skb, ks->netdev);
+ netif_rx(skb);
+
+ ks->netdev->stats.rx_packets++;
+ ks->netdev->stats.rx_bytes += rxlen;
+ }
+ }
+
+ ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+ }
+}
+
+/**
+ * ks8851_irq_work - work queue handler for dealing with interrupt requests
+ * @work: The work structure that was scheduled by schedule_work()
+ *
+ * This is the handler invoked when the ks8851_irq() is called to find out
+ * what happened, as we cannot allow ourselves to sleep whilst waiting for
+ * anything other process has the chip's lock.
+ *
+ * Read the interrupt status, work out what needs to be done and then clear
+ * any of the interrupts that are not needed.
+ */
+static void ks8851_irq_work(struct work_struct *work)
+{
+ struct ks8851_net *ks = container_of(work, struct ks8851_net, irq_work);
+ unsigned status;
+ unsigned handled = 0;
+
+ mutex_lock(&ks->lock);
+
+ status = ks8851_rdreg16(ks, KS_ISR);
+
+ netif_dbg(ks, intr, ks->netdev,
+ "%s: status 0x%04x\n", __func__, status);
+
+ if (status & IRQ_LCI) {
+ /* should do something about checking link status */
+ handled |= IRQ_LCI;
+ }
+
+ if (status & IRQ_LDI) {
+ u16 pmecr = ks8851_rdreg16(ks, KS_PMECR);
+ pmecr &= ~PMECR_WKEVT_MASK;
+ ks8851_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
+
+ handled |= IRQ_LDI;
+ }
+
+ if (status & IRQ_RXPSI)
+ handled |= IRQ_RXPSI;
+
+ if (status & IRQ_TXI) {
+ handled |= IRQ_TXI;
+
+ /* no lock here, tx queue should have been stopped */
+
+ /* update our idea of how much tx space is available to the
+ * system */
+ ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
+
+ netif_dbg(ks, intr, ks->netdev,
+ "%s: txspace %d\n", __func__, ks->tx_space);
+ }
+
+ if (status & IRQ_RXI)
+ handled |= IRQ_RXI;
+
+ if (status & IRQ_SPIBEI) {
+ dev_err(&ks->spidev->dev, "%s: spi bus error\n", __func__);
+ handled |= IRQ_SPIBEI;
+ }
+
+ ks8851_wrreg16(ks, KS_ISR, handled);
+
+ if (status & IRQ_RXI) {
+ /* the datasheet says to disable the rx interrupt during
+ * packet read-out, however we're masking the interrupt
+ * from the device so do not bother masking just the RX
+ * from the device. */
+
+ ks8851_rx_pkts(ks);
+ }
+
+ /* if something stopped the rx process, probably due to wanting
+ * to change the rx settings, then do something about restarting
+ * it. */
+ if (status & IRQ_RXPSI) {
+ struct ks8851_rxctrl *rxc = &ks->rxctrl;
+
+ /* update the multicast hash table */
+ ks8851_wrreg16(ks, KS_MAHTR0, rxc->mchash[0]);
+ ks8851_wrreg16(ks, KS_MAHTR1, rxc->mchash[1]);
+ ks8851_wrreg16(ks, KS_MAHTR2, rxc->mchash[2]);
+ ks8851_wrreg16(ks, KS_MAHTR3, rxc->mchash[3]);
+
+ ks8851_wrreg16(ks, KS_RXCR2, rxc->rxcr2);
+ ks8851_wrreg16(ks, KS_RXCR1, rxc->rxcr1);
+ }
+
+ mutex_unlock(&ks->lock);
+
+ if (status & IRQ_TXI)
+ netif_wake_queue(ks->netdev);
+
+ enable_irq(ks->netdev->irq);
+}
+
+/**
+ * calc_txlen - calculate size of message to send packet
+ * @len: Length of data
+ *
+ * Returns the size of the TXFIFO message needed to send
+ * this packet.
+ */
+static inline unsigned calc_txlen(unsigned len)
+{
+ return ALIGN(len + 4, 4);
+}
+
+/**
+ * ks8851_wrpkt - write packet to TX FIFO
+ * @ks: The device state.
+ * @txp: The sk_buff to transmit.
+ * @irq: IRQ on completion of the packet.
+ *
+ * Send the @txp to the chip. This means creating the relevant packet header
+ * specifying the length of the packet and the other information the chip
+ * needs, such as IRQ on completion. Send the header and the packet data to
+ * the device.
+ */
+static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq)
+{
+ struct spi_transfer *xfer = ks->spi_xfer2;
+ struct spi_message *msg = &ks->spi_msg2;
+ unsigned fid = 0;
+ int ret;
+
+ netif_dbg(ks, tx_queued, ks->netdev, "%s: skb %p, %d@%p, irq %d\n",
+ __func__, txp, txp->len, txp->data, irq);
+
+ fid = ks->fid++;
+ fid &= TXFR_TXFID_MASK;
+
+ if (irq)
+ fid |= TXFR_TXIC; /* irq on completion */
+
+ /* start header at txb[1] to align txw entries */
+ ks->txh.txb[1] = KS_SPIOP_TXFIFO;
+ ks->txh.txw[1] = cpu_to_le16(fid);
+ ks->txh.txw[2] = cpu_to_le16(txp->len);
+
+ xfer->tx_buf = &ks->txh.txb[1];
+ xfer->rx_buf = NULL;
+ xfer->len = 5;
+
+ xfer++;
+ xfer->tx_buf = txp->data;
+ xfer->rx_buf = NULL;
+ xfer->len = ALIGN(txp->len, 4);
+
+ ret = spi_sync(ks->spidev, msg);
+ if (ret < 0)
+ netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
+}
+
+/**
+ * ks8851_done_tx - update and then free skbuff after transmitting
+ * @ks: The device state
+ * @txb: The buffer transmitted
+ */
+static void ks8851_done_tx(struct ks8851_net *ks, struct sk_buff *txb)
+{
+ struct net_device *dev = ks->netdev;
+
+ dev->stats.tx_bytes += txb->len;
+ dev->stats.tx_packets++;
+
+ dev_kfree_skb(txb);
+}
+
+/**
+ * ks8851_tx_work - process tx packet(s)
+ * @work: The work strucutre what was scheduled.
+ *
+ * This is called when a number of packets have been scheduled for
+ * transmission and need to be sent to the device.
+ */
+static void ks8851_tx_work(struct work_struct *work)
+{
+ struct ks8851_net *ks = container_of(work, struct ks8851_net, tx_work);
+ struct sk_buff *txb;
+ bool last = skb_queue_empty(&ks->txq);
+
+ mutex_lock(&ks->lock);
+
+ while (!last) {
+ txb = skb_dequeue(&ks->txq);
+ last = skb_queue_empty(&ks->txq);
+
+ if (txb != NULL) {
+ ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
+ ks8851_wrpkt(ks, txb, last);
+ ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+ ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
+
+ ks8851_done_tx(ks, txb);
+ }
+ }
+
+ mutex_unlock(&ks->lock);
+}
+
+/**
+ * ks8851_set_powermode - set power mode of the device
+ * @ks: The device state
+ * @pwrmode: The power mode value to write to KS_PMECR.
+ *
+ * Change the power mode of the chip.
+ */
+static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
+{
+ unsigned pmecr;
+
+ netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
+
+ pmecr = ks8851_rdreg16(ks, KS_PMECR);
+ pmecr &= ~PMECR_PM_MASK;
+ pmecr |= pwrmode;
+
+ ks8851_wrreg16(ks, KS_PMECR, pmecr);
+}
+
+/**
+ * ks8851_net_open - open network device
+ * @dev: The network device being opened.
+ *
+ * Called when the network device is marked active, such as a user executing
+ * 'ifconfig up' on the device.
+ */
+static int ks8851_net_open(struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+
+ /* lock the card, even if we may not actually be doing anything
+ * else at the moment */
+ mutex_lock(&ks->lock);
+
+ netif_dbg(ks, ifup, ks->netdev, "opening\n");
+
+ /* bring chip out of any power saving mode it was in */
+ ks8851_set_powermode(ks, PMECR_PM_NORMAL);
+
+ /* issue a soft reset to the RX/TX QMU to put it into a known
+ * state. */
+ ks8851_soft_reset(ks, GRR_QMU);
+
+ /* setup transmission parameters */
+
+ ks8851_wrreg16(ks, KS_TXCR, (TXCR_TXE | /* enable transmit process */
+ TXCR_TXPE | /* pad to min length */
+ TXCR_TXCRC | /* add CRC */
+ TXCR_TXFCE)); /* enable flow control */
+
+ /* auto-increment tx data, reset tx pointer */
+ ks8851_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
+
+ /* setup receiver control */
+
+ ks8851_wrreg16(ks, KS_RXCR1, (RXCR1_RXPAFMA | /* from mac filter */
+ RXCR1_RXFCE | /* enable flow control */
+ RXCR1_RXBE | /* broadcast enable */
+ RXCR1_RXUE | /* unicast enable */
+ RXCR1_RXE)); /* enable rx block */
+
+ /* transfer entire frames out in one go */
+ ks8851_wrreg16(ks, KS_RXCR2, RXCR2_SRDBL_FRAME);
+
+ /* set receive counter timeouts */
+ ks8851_wrreg16(ks, KS_RXDTTR, 1000); /* 1ms after first frame to IRQ */
+ ks8851_wrreg16(ks, KS_RXDBCTR, 4096); /* >4Kbytes in buffer to IRQ */
+ ks8851_wrreg16(ks, KS_RXFCTR, 10); /* 10 frames to IRQ */
+
+ ks->rc_rxqcr = (RXQCR_RXFCTE | /* IRQ on frame count exceeded */
+ RXQCR_RXDBCTE | /* IRQ on byte count exceeded */
+ RXQCR_RXDTTE); /* IRQ on time exceeded */
+
+ ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+
+ /* clear then enable interrupts */
+
+#define STD_IRQ (IRQ_LCI | /* Link Change */ \
+ IRQ_TXI | /* TX done */ \
+ IRQ_RXI | /* RX done */ \
+ IRQ_SPIBEI | /* SPI bus error */ \
+ IRQ_TXPSI | /* TX process stop */ \
+ IRQ_RXPSI) /* RX process stop */
+
+ ks->rc_ier = STD_IRQ;
+ ks8851_wrreg16(ks, KS_ISR, STD_IRQ);
+ ks8851_wrreg16(ks, KS_IER, STD_IRQ);
+
+ netif_start_queue(ks->netdev);
+
+ netif_dbg(ks, ifup, ks->netdev, "network device up\n");
+
+ mutex_unlock(&ks->lock);
+ return 0;
+}
+
+/**
+ * ks8851_net_stop - close network device
+ * @dev: The device being closed.
+ *
+ * Called to close down a network device which has been active. Cancell any
+ * work, shutdown the RX and TX process and then place the chip into a low
+ * power state whilst it is not being used.
+ */
+static int ks8851_net_stop(struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+
+ netif_info(ks, ifdown, dev, "shutting down\n");
+
+ netif_stop_queue(dev);
+
+ mutex_lock(&ks->lock);
+
+ /* stop any outstanding work */
+ flush_work(&ks->irq_work);
+ flush_work(&ks->tx_work);
+ flush_work(&ks->rxctrl_work);
+
+ /* turn off the IRQs and ack any outstanding */
+ ks8851_wrreg16(ks, KS_IER, 0x0000);
+ ks8851_wrreg16(ks, KS_ISR, 0xffff);
+
+ /* shutdown RX process */
+ ks8851_wrreg16(ks, KS_RXCR1, 0x0000);
+
+ /* shutdown TX process */
+ ks8851_wrreg16(ks, KS_TXCR, 0x0000);
+
+ /* set powermode to soft power down to save power */
+ ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
+
+ /* ensure any queued tx buffers are dumped */
+ while (!skb_queue_empty(&ks->txq)) {
+ struct sk_buff *txb = skb_dequeue(&ks->txq);
+
+ netif_dbg(ks, ifdown, ks->netdev,
+ "%s: freeing txb %p\n", __func__, txb);
+
+ dev_kfree_skb(txb);
+ }
+
+ mutex_unlock(&ks->lock);
+ return 0;
+}
+
+/**
+ * ks8851_start_xmit - transmit packet
+ * @skb: The buffer to transmit
+ * @dev: The device used to transmit the packet.
+ *
+ * Called by the network layer to transmit the @skb. Queue the packet for
+ * the device and schedule the necessary work to transmit the packet when
+ * it is free.
+ *
+ * We do this to firstly avoid sleeping with the network device locked,
+ * and secondly so we can round up more than one packet to transmit which
+ * means we can try and avoid generating too many transmit done interrupts.
+ */
+static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ unsigned needed = calc_txlen(skb->len);
+ netdev_tx_t ret = NETDEV_TX_OK;
+
+ netif_dbg(ks, tx_queued, ks->netdev,
+ "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
+
+ spin_lock(&ks->statelock);
+
+ if (needed > ks->tx_space) {
+ netif_stop_queue(dev);
+ ret = NETDEV_TX_BUSY;
+ } else {
+ ks->tx_space -= needed;
+ skb_queue_tail(&ks->txq, skb);
+ }
+
+ spin_unlock(&ks->statelock);
+ schedule_work(&ks->tx_work);
+
+ return ret;
+}
+
+/**
+ * ks8851_rxctrl_work - work handler to change rx mode
+ * @work: The work structure this belongs to.
+ *
+ * Lock the device and issue the necessary changes to the receive mode from
+ * the network device layer. This is done so that we can do this without
+ * having to sleep whilst holding the network device lock.
+ *
+ * Since the recommendation from Micrel is that the RXQ is shutdown whilst the
+ * receive parameters are programmed, we issue a write to disable the RXQ and
+ * then wait for the interrupt handler to be triggered once the RXQ shutdown is
+ * complete. The interrupt handler then writes the new values into the chip.
+ */
+static void ks8851_rxctrl_work(struct work_struct *work)
+{
+ struct ks8851_net *ks = container_of(work, struct ks8851_net, rxctrl_work);
+
+ mutex_lock(&ks->lock);
+
+ /* need to shutdown RXQ before modifying filter parameters */
+ ks8851_wrreg16(ks, KS_RXCR1, 0x00);
+
+ mutex_unlock(&ks->lock);
+}
+
+static void ks8851_set_rx_mode(struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ struct ks8851_rxctrl rxctrl;
+
+ memset(&rxctrl, 0, sizeof(rxctrl));
+
+ if (dev->flags & IFF_PROMISC) {
+ /* interface to receive everything */
+
+ rxctrl.rxcr1 = RXCR1_RXAE | RXCR1_RXINVF;
+ } else if (dev->flags & IFF_ALLMULTI) {
+ /* accept all multicast packets */
+
+ rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE |
+ RXCR1_RXPAFMA | RXCR1_RXMAFMA);
+ } else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+ u32 crc;
+
+ /* accept some multicast */
+
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc(ETH_ALEN, ha->addr);
+ crc >>= (32 - 6); /* get top six bits */
+
+ rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf));
+ }
+
+ rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA;
+ } else {
+ /* just accept broadcast / unicast */
+ rxctrl.rxcr1 = RXCR1_RXPAFMA;
+ }
+
+ rxctrl.rxcr1 |= (RXCR1_RXUE | /* unicast enable */
+ RXCR1_RXBE | /* broadcast enable */
+ RXCR1_RXE | /* RX process enable */
+ RXCR1_RXFCE); /* enable flow control */
+
+ rxctrl.rxcr2 |= RXCR2_SRDBL_FRAME;
+
+ /* schedule work to do the actual set of the data if needed */
+
+ spin_lock(&ks->statelock);
+
+ if (memcmp(&rxctrl, &ks->rxctrl, sizeof(rxctrl)) != 0) {
+ memcpy(&ks->rxctrl, &rxctrl, sizeof(ks->rxctrl));
+ schedule_work(&ks->rxctrl_work);
+ }
+
+ spin_unlock(&ks->statelock);
+}
+
+static int ks8851_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = addr;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ return ks8851_write_mac_addr(dev);
+}
+
+static int ks8851_net_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
+}
+
+static const struct net_device_ops ks8851_netdev_ops = {
+ .ndo_open = ks8851_net_open,
+ .ndo_stop = ks8851_net_stop,
+ .ndo_do_ioctl = ks8851_net_ioctl,
+ .ndo_start_xmit = ks8851_start_xmit,
+ .ndo_set_mac_address = ks8851_set_mac_address,
+ .ndo_set_rx_mode = ks8851_set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/* Companion eeprom access */
+
+enum { /* EEPROM programming states */
+ EEPROM_CONTROL,
+ EEPROM_ADDRESS,
+ EEPROM_DATA,
+ EEPROM_COMPLETE
+};
+
+/**
+ * ks8851_eeprom_read - read a 16bits word in ks8851 companion EEPROM
+ * @dev: The network device the PHY is on.
+ * @addr: EEPROM address to read
+ *
+ * eeprom_size: used to define the data coding length. Can be changed
+ * through debug-fs.
+ *
+ * Programs a read on the EEPROM using ks8851 EEPROM SW access feature.
+ * Warning: The READ feature is not supported on ks8851 revision 0.
+ *
+ * Rough programming model:
+ * - on period start: set clock high and read value on bus
+ * - on period / 2: set clock low and program value on bus
+ * - start on period / 2
+ */
+unsigned int ks8851_eeprom_read(struct net_device *dev, unsigned int addr)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ int eepcr;
+ int ctrl = EEPROM_OP_READ;
+ int state = EEPROM_CONTROL;
+ int bit_count = EEPROM_OP_LEN - 1;
+ unsigned int data = 0;
+ int dummy;
+ unsigned int addr_len;
+
+ addr_len = (ks->eeprom_size == 128) ? 6 : 8;
+
+ /* start transaction: chip select high, authorize write */
+ mutex_lock(&ks->lock);
+ eepcr = EEPCR_EESA | EEPCR_EESRWA;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ eepcr |= EEPCR_EECS;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ while (state != EEPROM_COMPLETE) {
+ /* falling clock period starts... */
+ /* set EED_IO pin for control and address */
+ eepcr &= ~EEPCR_EEDO;
+ switch (state) {
+ case EEPROM_CONTROL:
+ eepcr |= ((ctrl >> bit_count) & 1) << 2;
+ if (bit_count-- <= 0) {
+ bit_count = addr_len - 1;
+ state = EEPROM_ADDRESS;
+ }
+ break;
+ case EEPROM_ADDRESS:
+ eepcr |= ((addr >> bit_count) & 1) << 2;
+ bit_count--;
+ break;
+ case EEPROM_DATA:
+ /* Change to receive mode */
+ eepcr &= ~EEPCR_EESRWA;
+ break;
+ }
+
+ /* lower clock */
+ eepcr &= ~EEPCR_EESCK;
+
+ mutex_lock(&ks->lock);
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ /* waitread period / 2 */
+ udelay(EEPROM_SK_PERIOD / 2);
+
+ /* rising clock period starts... */
+
+ /* raise clock */
+ mutex_lock(&ks->lock);
+ eepcr |= EEPCR_EESCK;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ /* Manage read */
+ switch (state) {
+ case EEPROM_ADDRESS:
+ if (bit_count < 0) {
+ bit_count = EEPROM_DATA_LEN - 1;
+ state = EEPROM_DATA;
+ }
+ break;
+ case EEPROM_DATA:
+ mutex_lock(&ks->lock);
+ dummy = ks8851_rdreg16(ks, KS_EEPCR);
+ mutex_unlock(&ks->lock);
+ data |= ((dummy >> EEPCR_EESB_OFFSET) & 1) << bit_count;
+ if (bit_count-- <= 0)
+ state = EEPROM_COMPLETE;
+ break;
+ }
+
+ /* wait period / 2 */
+ udelay(EEPROM_SK_PERIOD / 2);
+ }
+
+ /* close transaction */
+ mutex_lock(&ks->lock);
+ eepcr &= ~EEPCR_EECS;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ eepcr = 0;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ return data;
+}
+
+/**
+ * ks8851_eeprom_write - write a 16bits word in ks8851 companion EEPROM
+ * @dev: The network device the PHY is on.
+ * @op: operand (can be WRITE, EWEN, EWDS)
+ * @addr: EEPROM address to write
+ * @data: data to write
+ *
+ * eeprom_size: used to define the data coding length. Can be changed
+ * through debug-fs.
+ *
+ * Programs a write on the EEPROM using ks8851 EEPROM SW access feature.
+ *
+ * Note that a write enable is required before writing data.
+ *
+ * Rough programming model:
+ * - on period start: set clock high
+ * - on period / 2: set clock low and program value on bus
+ * - start on period / 2
+ */
+void ks8851_eeprom_write(struct net_device *dev, unsigned int op,
+ unsigned int addr, unsigned int data)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ int eepcr;
+ int state = EEPROM_CONTROL;
+ int bit_count = EEPROM_OP_LEN - 1;
+ unsigned int addr_len;
+
+ addr_len = (ks->eeprom_size == 128) ? 6 : 8;
+
+ switch (op) {
+ case EEPROM_OP_EWEN:
+ addr = 0x30;
+ break;
+ case EEPROM_OP_EWDS:
+ addr = 0;
+ break;
+ }
+
+ /* start transaction: chip select high, authorize write */
+ mutex_lock(&ks->lock);
+ eepcr = EEPCR_EESA | EEPCR_EESRWA;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ eepcr |= EEPCR_EECS;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ while (state != EEPROM_COMPLETE) {
+ /* falling clock period starts... */
+ /* set EED_IO pin for control and address */
+ eepcr &= ~EEPCR_EEDO;
+ switch (state) {
+ case EEPROM_CONTROL:
+ eepcr |= ((op >> bit_count) & 1) << 2;
+ if (bit_count-- <= 0) {
+ bit_count = addr_len - 1;
+ state = EEPROM_ADDRESS;
+ }
+ break;
+ case EEPROM_ADDRESS:
+ eepcr |= ((addr >> bit_count) & 1) << 2;
+ if (bit_count-- <= 0) {
+ if (op == EEPROM_OP_WRITE) {
+ bit_count = EEPROM_DATA_LEN - 1;
+ state = EEPROM_DATA;
+ } else {
+ state = EEPROM_COMPLETE;
+ }
+ }
+ break;
+ case EEPROM_DATA:
+ eepcr |= ((data >> bit_count) & 1) << 2;
+ if (bit_count-- <= 0)
+ state = EEPROM_COMPLETE;
+ break;
+ }
+
+ /* lower clock */
+ eepcr &= ~EEPCR_EESCK;
+
+ mutex_lock(&ks->lock);
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ /* wait period / 2 */
+ udelay(EEPROM_SK_PERIOD / 2);
+
+ /* rising clock period starts... */
+
+ /* raise clock */
+ eepcr |= EEPCR_EESCK;
+ mutex_lock(&ks->lock);
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ /* wait period / 2 */
+ udelay(EEPROM_SK_PERIOD / 2);
+ }
+
+ /* close transaction */
+ mutex_lock(&ks->lock);
+ eepcr &= ~EEPCR_EECS;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ eepcr = 0;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+}
+
+/* ethtool support */
+
+static void ks8851_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *di)
+{
+ strlcpy(di->driver, "KS8851", sizeof(di->driver));
+ strlcpy(di->version, "1.00", sizeof(di->version));
+ strlcpy(di->bus_info, dev_name(dev->dev.parent), sizeof(di->bus_info));
+}
+
+static u32 ks8851_get_msglevel(struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ return ks->msg_enable;
+}
+
+static void ks8851_set_msglevel(struct net_device *dev, u32 to)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ ks->msg_enable = to;
+}
+
+static int ks8851_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ return mii_ethtool_gset(&ks->mii, cmd);
+}
+
+static int ks8851_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ return mii_ethtool_sset(&ks->mii, cmd);
+}
+
+static u32 ks8851_get_link(struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ return mii_link_ok(&ks->mii);
+}
+
+static int ks8851_nway_reset(struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ return mii_nway_restart(&ks->mii);
+}
+
+static int ks8851_get_eeprom_len(struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ return ks->eeprom_size;
+}
+
+static int ks8851_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ u16 *eeprom_buff;
+ int first_word;
+ int last_word;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (eeprom->len > ks->eeprom_size)
+ return -EINVAL;
+
+ eeprom->magic = ks8851_rdreg16(ks, KS_CIDER);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) *
+ (last_word - first_word + 1), GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ eeprom_buff[i] = ks8851_eeprom_read(dev, first_word + 1);
+
+ /* Device's eeprom is little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int ks8851_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ u16 *eeprom_buff;
+ void *ptr;
+ int max_len;
+ int first_word;
+ int last_word;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EOPNOTSUPP;
+
+ if (eeprom->len > ks->eeprom_size)
+ return -EINVAL;
+
+ if (eeprom->magic != ks8851_rdreg16(ks, KS_CIDER))
+ return -EFAULT;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ max_len = (last_word - first_word + 1) * 2;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = (void *)eeprom_buff;
+
+ if (eeprom->offset & 1) {
+ /* need read/modify/write of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ eeprom_buff[0] = ks8851_eeprom_read(dev, first_word);
+ ptr++;
+ }
+ if ((eeprom->offset + eeprom->len) & 1)
+ /* need read/modify/write of last changed EEPROM word */
+ /* only the first byte of the word is being modified */
+ eeprom_buff[last_word - first_word] =
+ ks8851_eeprom_read(dev, last_word);
+
+
+ /* Device's eeprom is little-endian, word addressable */
+ le16_to_cpus(&eeprom_buff[0]);
+ le16_to_cpus(&eeprom_buff[last_word - first_word]);
+
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+
+ ks8851_eeprom_write(dev, EEPROM_OP_EWEN, 0, 0);
+
+ for (i = 0; i < last_word - first_word + 1; i++) {
+ ks8851_eeprom_write(dev, EEPROM_OP_WRITE, first_word + i,
+ eeprom_buff[i]);
+ mdelay(EEPROM_WRITE_TIME);
+ }
+
+ ks8851_eeprom_write(dev, EEPROM_OP_EWDS, 0, 0);
+
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
+static const struct ethtool_ops ks8851_ethtool_ops = {
+ .get_drvinfo = ks8851_get_drvinfo,
+ .get_msglevel = ks8851_get_msglevel,
+ .set_msglevel = ks8851_set_msglevel,
+ .get_settings = ks8851_get_settings,
+ .set_settings = ks8851_set_settings,
+ .get_link = ks8851_get_link,
+ .nway_reset = ks8851_nway_reset,
+ .get_eeprom_len = ks8851_get_eeprom_len,
+ .get_eeprom = ks8851_get_eeprom,
+ .set_eeprom = ks8851_set_eeprom,
+};
+
+/* MII interface controls */
+
+/**
+ * ks8851_phy_reg - convert MII register into a KS8851 register
+ * @reg: MII register number.
+ *
+ * Return the KS8851 register number for the corresponding MII PHY register
+ * if possible. Return zero if the MII register has no direct mapping to the
+ * KS8851 register set.
+ */
+static int ks8851_phy_reg(int reg)
+{
+ switch (reg) {
+ case MII_BMCR:
+ return KS_P1MBCR;
+ case MII_BMSR:
+ return KS_P1MBSR;
+ case MII_PHYSID1:
+ return KS_PHY1ILR;
+ case MII_PHYSID2:
+ return KS_PHY1IHR;
+ case MII_ADVERTISE:
+ return KS_P1ANAR;
+ case MII_LPA:
+ return KS_P1ANLPR;
+ }
+
+ return 0x0;
+}
+
+/**
+ * ks8851_phy_read - MII interface PHY register read.
+ * @dev: The network device the PHY is on.
+ * @phy_addr: Address of PHY (ignored as we only have one)
+ * @reg: The register to read.
+ *
+ * This call reads data from the PHY register specified in @reg. Since the
+ * device does not support all the MII registers, the non-existent values
+ * are always returned as zero.
+ *
+ * We return zero for unsupported registers as the MII code does not check
+ * the value returned for any error status, and simply returns it to the
+ * caller. The mii-tool that the driver was tested with takes any -ve error
+ * as real PHY capabilities, thus displaying incorrect data to the user.
+ */
+static int ks8851_phy_read(struct net_device *dev, int phy_addr, int reg)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ int ksreg;
+ int result;
+
+ ksreg = ks8851_phy_reg(reg);
+ if (!ksreg)
+ return 0x0; /* no error return allowed, so use zero */
+
+ mutex_lock(&ks->lock);
+ result = ks8851_rdreg16(ks, ksreg);
+ mutex_unlock(&ks->lock);
+
+ return result;
+}
+
+static void ks8851_phy_write(struct net_device *dev,
+ int phy, int reg, int value)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ int ksreg;
+
+ ksreg = ks8851_phy_reg(reg);
+ if (ksreg) {
+ mutex_lock(&ks->lock);
+ ks8851_wrreg16(ks, ksreg, value);
+ mutex_unlock(&ks->lock);
+ }
+}
+
+/**
+ * ks8851_read_selftest - read the selftest memory info.
+ * @ks: The device state
+ *
+ * Read and check the TX/RX memory selftest information.
+ */
+static int ks8851_read_selftest(struct ks8851_net *ks)
+{
+ unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
+ int ret = 0;
+ unsigned rd;
+
+ rd = ks8851_rdreg16(ks, KS_MBIR);
+
+ if ((rd & both_done) != both_done) {
+ netdev_warn(ks->netdev, "Memory selftest not finished\n");
+ return 0;
+ }
+
+ if (rd & MBIR_TXMBFA) {
+ netdev_err(ks->netdev, "TX memory selftest fail\n");
+ ret |= 1;
+ }
+
+ if (rd & MBIR_RXMBFA) {
+ netdev_err(ks->netdev, "RX memory selftest fail\n");
+ ret |= 2;
+ }
+
+ return 0;
+}
+
+/* driver bus management functions */
+
+#ifdef CONFIG_PM
+static int ks8851_suspend(struct spi_device *spi, pm_message_t state)
+{
+ struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
+ struct net_device *dev = ks->netdev;
+
+ if (netif_running(dev)) {
+ netif_device_detach(dev);
+ ks8851_net_stop(dev);
+ }
+
+ return 0;
+}
+
+static int ks8851_resume(struct spi_device *spi)
+{
+ struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
+ struct net_device *dev = ks->netdev;
+
+ if (netif_running(dev)) {
+ ks8851_net_open(dev);
+ netif_device_attach(dev);
+ }
+
+ return 0;
+}
+#else
+#define ks8851_suspend NULL
+#define ks8851_resume NULL
+#endif
+
+static int __devinit ks8851_probe(struct spi_device *spi)
+{
+ struct net_device *ndev;
+ struct ks8851_net *ks;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(struct ks8851_net));
+ if (!ndev) {
+ dev_err(&spi->dev, "failed to alloc ethernet device\n");
+ return -ENOMEM;
+ }
+
+ spi->bits_per_word = 8;
+
+ ks = netdev_priv(ndev);
+
+ ks->netdev = ndev;
+ ks->spidev = spi;
+ ks->tx_space = 6144;
+
+ mutex_init(&ks->lock);
+ spin_lock_init(&ks->statelock);
+
+ INIT_WORK(&ks->tx_work, ks8851_tx_work);
+ INIT_WORK(&ks->irq_work, ks8851_irq_work);
+ INIT_WORK(&ks->rxctrl_work, ks8851_rxctrl_work);
+
+ /* initialise pre-made spi transfer messages */
+
+ spi_message_init(&ks->spi_msg1);
+ spi_message_add_tail(&ks->spi_xfer1, &ks->spi_msg1);
+
+ spi_message_init(&ks->spi_msg2);
+ spi_message_add_tail(&ks->spi_xfer2[0], &ks->spi_msg2);
+ spi_message_add_tail(&ks->spi_xfer2[1], &ks->spi_msg2);
+
+ /* setup mii state */
+ ks->mii.dev = ndev;
+ ks->mii.phy_id = 1,
+ ks->mii.phy_id_mask = 1;
+ ks->mii.reg_num_mask = 0xf;
+ ks->mii.mdio_read = ks8851_phy_read;
+ ks->mii.mdio_write = ks8851_phy_write;
+
+ dev_info(&spi->dev, "message enable is %d\n", msg_enable);
+
+ /* set the default message enable */
+ ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
+ NETIF_MSG_PROBE |
+ NETIF_MSG_LINK));
+
+ skb_queue_head_init(&ks->txq);
+
+ SET_ETHTOOL_OPS(ndev, &ks8851_ethtool_ops);
+ SET_NETDEV_DEV(ndev, &spi->dev);
+
+ dev_set_drvdata(&spi->dev, ks);
+
+ ndev->if_port = IF_PORT_100BASET;
+ ndev->netdev_ops = &ks8851_netdev_ops;
+ ndev->irq = spi->irq;
+
+ /* issue a global soft reset to reset the device. */
+ ks8851_soft_reset(ks, GRR_GSR);
+
+ /* simple check for a valid chip being connected to the bus */
+
+ if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
+ dev_err(&spi->dev, "failed to read device ID\n");
+ ret = -ENODEV;
+ goto err_id;
+ }
+
+ /* cache the contents of the CCR register for EEPROM, etc. */
+ ks->rc_ccr = ks8851_rdreg16(ks, KS_CCR);
+
+ if (ks->rc_ccr & CCR_EEPROM)
+ ks->eeprom_size = 128;
+ else
+ ks->eeprom_size = 0;
+
+ ks8851_read_selftest(ks);
+ ks8851_init_mac(ks);
+
+ ret = request_irq(spi->irq, ks8851_irq, IRQF_TRIGGER_LOW,
+ ndev->name, ks);
+ if (ret < 0) {
+ dev_err(&spi->dev, "failed to get irq\n");
+ goto err_irq;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(&spi->dev, "failed to register network device\n");
+ goto err_netdev;
+ }
+
+ netdev_info(ndev, "revision %d, MAC %pM, IRQ %d\n",
+ CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
+ ndev->dev_addr, ndev->irq);
+
+ return 0;
+
+
+err_netdev:
+ free_irq(ndev->irq, ndev);
+
+err_id:
+err_irq:
+ free_netdev(ndev);
+ return ret;
+}
+
+static int __devexit ks8851_remove(struct spi_device *spi)
+{
+ struct ks8851_net *priv = dev_get_drvdata(&spi->dev);
+
+ if (netif_msg_drv(priv))
+ dev_info(&spi->dev, "remove\n");
+
+ unregister_netdev(priv->netdev);
+ free_irq(spi->irq, priv);
+ free_netdev(priv->netdev);
+
+ return 0;
+}
+
+static struct spi_driver ks8851_driver = {
+ .driver = {
+ .name = "ks8851",
+ .owner = THIS_MODULE,
+ },
+ .probe = ks8851_probe,
+ .remove = __devexit_p(ks8851_remove),
+ .suspend = ks8851_suspend,
+ .resume = ks8851_resume,
+};
+
+static int __init ks8851_init(void)
+{
+ return spi_register_driver(&ks8851_driver);
+}
+
+static void __exit ks8851_exit(void)
+{
+ spi_unregister_driver(&ks8851_driver);
+}
+
+module_init(ks8851_init);
+module_exit(ks8851_exit);
+
+MODULE_DESCRIPTION("KS8851 Network driver");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL");
+
+module_param_named(message, msg_enable, int, 0);
+MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
+MODULE_ALIAS("spi:ks8851");
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
new file mode 100644
index 000000000000..537fb06e5932
--- /dev/null
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -0,0 +1,309 @@
+/* drivers/net/ks8851.h
+ *
+ * Copyright 2009 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * KS8851 register definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#define KS_CCR 0x08
+#define CCR_EEPROM (1 << 9)
+#define CCR_SPI (1 << 8)
+#define CCR_32PIN (1 << 0)
+
+/* MAC address registers */
+#define KS_MAR(_m) 0x15 - (_m)
+#define KS_MARL 0x10
+#define KS_MARM 0x12
+#define KS_MARH 0x14
+
+#define KS_OBCR 0x20
+#define OBCR_ODS_16mA (1 << 6)
+
+#define KS_EEPCR 0x22
+#define EEPCR_EESRWA (1 << 5)
+#define EEPCR_EESA (1 << 4)
+#define EEPCR_EESB_OFFSET 3
+#define EEPCR_EESB (1 << EEPCR_EESB_OFFSET)
+#define EEPCR_EEDO (1 << 2)
+#define EEPCR_EESCK (1 << 1)
+#define EEPCR_EECS (1 << 0)
+
+#define EEPROM_OP_LEN 3 /* bits:*/
+#define EEPROM_OP_READ 0x06
+#define EEPROM_OP_EWEN 0x04
+#define EEPROM_OP_WRITE 0x05
+#define EEPROM_OP_EWDS 0x14
+
+#define EEPROM_DATA_LEN 16 /* 16 bits EEPROM */
+#define EEPROM_WRITE_TIME 4 /* wrt ack time in ms */
+#define EEPROM_SK_PERIOD 400 /* in us */
+
+#define KS_MBIR 0x24
+#define MBIR_TXMBF (1 << 12)
+#define MBIR_TXMBFA (1 << 11)
+#define MBIR_RXMBF (1 << 4)
+#define MBIR_RXMBFA (1 << 3)
+
+#define KS_GRR 0x26
+#define GRR_QMU (1 << 1)
+#define GRR_GSR (1 << 0)
+
+#define KS_WFCR 0x2A
+#define WFCR_MPRXE (1 << 7)
+#define WFCR_WF3E (1 << 3)
+#define WFCR_WF2E (1 << 2)
+#define WFCR_WF1E (1 << 1)
+#define WFCR_WF0E (1 << 0)
+
+#define KS_WF0CRC0 0x30
+#define KS_WF0CRC1 0x32
+#define KS_WF0BM0 0x34
+#define KS_WF0BM1 0x36
+#define KS_WF0BM2 0x38
+#define KS_WF0BM3 0x3A
+
+#define KS_WF1CRC0 0x40
+#define KS_WF1CRC1 0x42
+#define KS_WF1BM0 0x44
+#define KS_WF1BM1 0x46
+#define KS_WF1BM2 0x48
+#define KS_WF1BM3 0x4A
+
+#define KS_WF2CRC0 0x50
+#define KS_WF2CRC1 0x52
+#define KS_WF2BM0 0x54
+#define KS_WF2BM1 0x56
+#define KS_WF2BM2 0x58
+#define KS_WF2BM3 0x5A
+
+#define KS_WF3CRC0 0x60
+#define KS_WF3CRC1 0x62
+#define KS_WF3BM0 0x64
+#define KS_WF3BM1 0x66
+#define KS_WF3BM2 0x68
+#define KS_WF3BM3 0x6A
+
+#define KS_TXCR 0x70
+#define TXCR_TCGICMP (1 << 8)
+#define TXCR_TCGUDP (1 << 7)
+#define TXCR_TCGTCP (1 << 6)
+#define TXCR_TCGIP (1 << 5)
+#define TXCR_FTXQ (1 << 4)
+#define TXCR_TXFCE (1 << 3)
+#define TXCR_TXPE (1 << 2)
+#define TXCR_TXCRC (1 << 1)
+#define TXCR_TXE (1 << 0)
+
+#define KS_TXSR 0x72
+#define TXSR_TXLC (1 << 13)
+#define TXSR_TXMC (1 << 12)
+#define TXSR_TXFID_MASK (0x3f << 0)
+#define TXSR_TXFID_SHIFT (0)
+#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f)
+
+#define KS_RXCR1 0x74
+#define RXCR1_FRXQ (1 << 15)
+#define RXCR1_RXUDPFCC (1 << 14)
+#define RXCR1_RXTCPFCC (1 << 13)
+#define RXCR1_RXIPFCC (1 << 12)
+#define RXCR1_RXPAFMA (1 << 11)
+#define RXCR1_RXFCE (1 << 10)
+#define RXCR1_RXEFE (1 << 9)
+#define RXCR1_RXMAFMA (1 << 8)
+#define RXCR1_RXBE (1 << 7)
+#define RXCR1_RXME (1 << 6)
+#define RXCR1_RXUE (1 << 5)
+#define RXCR1_RXAE (1 << 4)
+#define RXCR1_RXINVF (1 << 1)
+#define RXCR1_RXE (1 << 0)
+
+#define KS_RXCR2 0x76
+#define RXCR2_SRDBL_MASK (0x7 << 5)
+#define RXCR2_SRDBL_SHIFT (5)
+#define RXCR2_SRDBL_4B (0x0 << 5)
+#define RXCR2_SRDBL_8B (0x1 << 5)
+#define RXCR2_SRDBL_16B (0x2 << 5)
+#define RXCR2_SRDBL_32B (0x3 << 5)
+#define RXCR2_SRDBL_FRAME (0x4 << 5)
+#define RXCR2_IUFFP (1 << 4)
+#define RXCR2_RXIUFCEZ (1 << 3)
+#define RXCR2_UDPLFE (1 << 2)
+#define RXCR2_RXICMPFCC (1 << 1)
+#define RXCR2_RXSAF (1 << 0)
+
+#define KS_TXMIR 0x78
+
+#define KS_RXFHSR 0x7C
+#define RXFSHR_RXFV (1 << 15)
+#define RXFSHR_RXICMPFCS (1 << 13)
+#define RXFSHR_RXIPFCS (1 << 12)
+#define RXFSHR_RXTCPFCS (1 << 11)
+#define RXFSHR_RXUDPFCS (1 << 10)
+#define RXFSHR_RXBF (1 << 7)
+#define RXFSHR_RXMF (1 << 6)
+#define RXFSHR_RXUF (1 << 5)
+#define RXFSHR_RXMR (1 << 4)
+#define RXFSHR_RXFT (1 << 3)
+#define RXFSHR_RXFTL (1 << 2)
+#define RXFSHR_RXRF (1 << 1)
+#define RXFSHR_RXCE (1 << 0)
+
+#define KS_RXFHBCR 0x7E
+#define KS_TXQCR 0x80
+#define TXQCR_AETFE (1 << 2)
+#define TXQCR_TXQMAM (1 << 1)
+#define TXQCR_METFE (1 << 0)
+
+#define KS_RXQCR 0x82
+#define RXQCR_RXDTTS (1 << 12)
+#define RXQCR_RXDBCTS (1 << 11)
+#define RXQCR_RXFCTS (1 << 10)
+#define RXQCR_RXIPHTOE (1 << 9)
+#define RXQCR_RXDTTE (1 << 7)
+#define RXQCR_RXDBCTE (1 << 6)
+#define RXQCR_RXFCTE (1 << 5)
+#define RXQCR_ADRFE (1 << 4)
+#define RXQCR_SDA (1 << 3)
+#define RXQCR_RRXEF (1 << 0)
+
+#define KS_TXFDPR 0x84
+#define TXFDPR_TXFPAI (1 << 14)
+#define TXFDPR_TXFP_MASK (0x7ff << 0)
+#define TXFDPR_TXFP_SHIFT (0)
+
+#define KS_RXFDPR 0x86
+#define RXFDPR_RXFPAI (1 << 14)
+
+#define KS_RXDTTR 0x8C
+#define KS_RXDBCTR 0x8E
+
+#define KS_IER 0x90
+#define KS_ISR 0x92
+#define IRQ_LCI (1 << 15)
+#define IRQ_TXI (1 << 14)
+#define IRQ_RXI (1 << 13)
+#define IRQ_RXOI (1 << 11)
+#define IRQ_TXPSI (1 << 9)
+#define IRQ_RXPSI (1 << 8)
+#define IRQ_TXSAI (1 << 6)
+#define IRQ_RXWFDI (1 << 5)
+#define IRQ_RXMPDI (1 << 4)
+#define IRQ_LDI (1 << 3)
+#define IRQ_EDI (1 << 2)
+#define IRQ_SPIBEI (1 << 1)
+#define IRQ_DEDI (1 << 0)
+
+#define KS_RXFCTR 0x9C
+#define KS_RXFC 0x9D
+#define RXFCTR_RXFC_MASK (0xff << 8)
+#define RXFCTR_RXFC_SHIFT (8)
+#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff)
+#define RXFCTR_RXFCT_MASK (0xff << 0)
+#define RXFCTR_RXFCT_SHIFT (0)
+
+#define KS_TXNTFSR 0x9E
+
+#define KS_MAHTR0 0xA0
+#define KS_MAHTR1 0xA2
+#define KS_MAHTR2 0xA4
+#define KS_MAHTR3 0xA6
+
+#define KS_FCLWR 0xB0
+#define KS_FCHWR 0xB2
+#define KS_FCOWR 0xB4
+
+#define KS_CIDER 0xC0
+#define CIDER_ID 0x8870
+#define CIDER_REV_MASK (0x7 << 1)
+#define CIDER_REV_SHIFT (1)
+#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7)
+
+#define KS_CGCR 0xC6
+
+#define KS_IACR 0xC8
+#define IACR_RDEN (1 << 12)
+#define IACR_TSEL_MASK (0x3 << 10)
+#define IACR_TSEL_SHIFT (10)
+#define IACR_TSEL_MIB (0x3 << 10)
+#define IACR_ADDR_MASK (0x1f << 0)
+#define IACR_ADDR_SHIFT (0)
+
+#define KS_IADLR 0xD0
+#define KS_IAHDR 0xD2
+
+#define KS_PMECR 0xD4
+#define PMECR_PME_DELAY (1 << 14)
+#define PMECR_PME_POL (1 << 12)
+#define PMECR_WOL_WAKEUP (1 << 11)
+#define PMECR_WOL_MAGICPKT (1 << 10)
+#define PMECR_WOL_LINKUP (1 << 9)
+#define PMECR_WOL_ENERGY (1 << 8)
+#define PMECR_AUTO_WAKE_EN (1 << 7)
+#define PMECR_WAKEUP_NORMAL (1 << 6)
+#define PMECR_WKEVT_MASK (0xf << 2)
+#define PMECR_WKEVT_SHIFT (2)
+#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf)
+#define PMECR_WKEVT_ENERGY (0x1 << 2)
+#define PMECR_WKEVT_LINK (0x2 << 2)
+#define PMECR_WKEVT_MAGICPKT (0x4 << 2)
+#define PMECR_WKEVT_FRAME (0x8 << 2)
+#define PMECR_PM_MASK (0x3 << 0)
+#define PMECR_PM_SHIFT (0)
+#define PMECR_PM_NORMAL (0x0 << 0)
+#define PMECR_PM_ENERGY (0x1 << 0)
+#define PMECR_PM_SOFTDOWN (0x2 << 0)
+#define PMECR_PM_POWERSAVE (0x3 << 0)
+
+/* Standard MII PHY data */
+#define KS_P1MBCR 0xE4
+#define KS_P1MBSR 0xE6
+#define KS_PHY1ILR 0xE8
+#define KS_PHY1IHR 0xEA
+#define KS_P1ANAR 0xEC
+#define KS_P1ANLPR 0xEE
+
+#define KS_P1SCLMD 0xF4
+#define P1SCLMD_LEDOFF (1 << 15)
+#define P1SCLMD_TXIDS (1 << 14)
+#define P1SCLMD_RESTARTAN (1 << 13)
+#define P1SCLMD_DISAUTOMDIX (1 << 10)
+#define P1SCLMD_FORCEMDIX (1 << 9)
+#define P1SCLMD_AUTONEGEN (1 << 7)
+#define P1SCLMD_FORCE100 (1 << 6)
+#define P1SCLMD_FORCEFDX (1 << 5)
+#define P1SCLMD_ADV_FLOW (1 << 4)
+#define P1SCLMD_ADV_100BT_FDX (1 << 3)
+#define P1SCLMD_ADV_100BT_HDX (1 << 2)
+#define P1SCLMD_ADV_10BT_FDX (1 << 1)
+#define P1SCLMD_ADV_10BT_HDX (1 << 0)
+
+#define KS_P1CR 0xF6
+#define P1CR_HP_MDIX (1 << 15)
+#define P1CR_REV_POL (1 << 13)
+#define P1CR_OP_100M (1 << 10)
+#define P1CR_OP_FDX (1 << 9)
+#define P1CR_OP_MDI (1 << 7)
+#define P1CR_AN_DONE (1 << 6)
+#define P1CR_LINK_GOOD (1 << 5)
+#define P1CR_PNTR_FLOW (1 << 4)
+#define P1CR_PNTR_100BT_FDX (1 << 3)
+#define P1CR_PNTR_100BT_HDX (1 << 2)
+#define P1CR_PNTR_10BT_FDX (1 << 1)
+#define P1CR_PNTR_10BT_HDX (1 << 0)
+
+/* TX Frame control */
+
+#define TXFR_TXIC (1 << 15)
+#define TXFR_TXFID_MASK (0x3f << 0)
+#define TXFR_TXFID_SHIFT (0)
+
+/* SPI frame opcodes */
+#define KS_SPIOP_RD (0x00)
+#define KS_SPIOP_WR (0x40)
+#define KS_SPIOP_RXFIFO (0x80)
+#define KS_SPIOP_TXFIFO (0xC0)
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
new file mode 100644
index 000000000000..d19c849059d8
--- /dev/null
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -0,0 +1,1680 @@
+/**
+ * drivers/net/ks8851_mll.c
+ * Copyright (c) 2009 Micrel Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/**
+ * Supports:
+ * KS8851 16bit MLL chip from Micrel Inc.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/cache.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+#define DRV_NAME "ks8851_mll"
+
+static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
+#define MAX_RECV_FRAMES 32
+#define MAX_BUF_SIZE 2048
+#define TX_BUF_SIZE 2000
+#define RX_BUF_SIZE 2000
+
+#define KS_CCR 0x08
+#define CCR_EEPROM (1 << 9)
+#define CCR_SPI (1 << 8)
+#define CCR_8BIT (1 << 7)
+#define CCR_16BIT (1 << 6)
+#define CCR_32BIT (1 << 5)
+#define CCR_SHARED (1 << 4)
+#define CCR_32PIN (1 << 0)
+
+/* MAC address registers */
+#define KS_MARL 0x10
+#define KS_MARM 0x12
+#define KS_MARH 0x14
+
+#define KS_OBCR 0x20
+#define OBCR_ODS_16MA (1 << 6)
+
+#define KS_EEPCR 0x22
+#define EEPCR_EESA (1 << 4)
+#define EEPCR_EESB (1 << 3)
+#define EEPCR_EEDO (1 << 2)
+#define EEPCR_EESCK (1 << 1)
+#define EEPCR_EECS (1 << 0)
+
+#define KS_MBIR 0x24
+#define MBIR_TXMBF (1 << 12)
+#define MBIR_TXMBFA (1 << 11)
+#define MBIR_RXMBF (1 << 4)
+#define MBIR_RXMBFA (1 << 3)
+
+#define KS_GRR 0x26
+#define GRR_QMU (1 << 1)
+#define GRR_GSR (1 << 0)
+
+#define KS_WFCR 0x2A
+#define WFCR_MPRXE (1 << 7)
+#define WFCR_WF3E (1 << 3)
+#define WFCR_WF2E (1 << 2)
+#define WFCR_WF1E (1 << 1)
+#define WFCR_WF0E (1 << 0)
+
+#define KS_WF0CRC0 0x30
+#define KS_WF0CRC1 0x32
+#define KS_WF0BM0 0x34
+#define KS_WF0BM1 0x36
+#define KS_WF0BM2 0x38
+#define KS_WF0BM3 0x3A
+
+#define KS_WF1CRC0 0x40
+#define KS_WF1CRC1 0x42
+#define KS_WF1BM0 0x44
+#define KS_WF1BM1 0x46
+#define KS_WF1BM2 0x48
+#define KS_WF1BM3 0x4A
+
+#define KS_WF2CRC0 0x50
+#define KS_WF2CRC1 0x52
+#define KS_WF2BM0 0x54
+#define KS_WF2BM1 0x56
+#define KS_WF2BM2 0x58
+#define KS_WF2BM3 0x5A
+
+#define KS_WF3CRC0 0x60
+#define KS_WF3CRC1 0x62
+#define KS_WF3BM0 0x64
+#define KS_WF3BM1 0x66
+#define KS_WF3BM2 0x68
+#define KS_WF3BM3 0x6A
+
+#define KS_TXCR 0x70
+#define TXCR_TCGICMP (1 << 8)
+#define TXCR_TCGUDP (1 << 7)
+#define TXCR_TCGTCP (1 << 6)
+#define TXCR_TCGIP (1 << 5)
+#define TXCR_FTXQ (1 << 4)
+#define TXCR_TXFCE (1 << 3)
+#define TXCR_TXPE (1 << 2)
+#define TXCR_TXCRC (1 << 1)
+#define TXCR_TXE (1 << 0)
+
+#define KS_TXSR 0x72
+#define TXSR_TXLC (1 << 13)
+#define TXSR_TXMC (1 << 12)
+#define TXSR_TXFID_MASK (0x3f << 0)
+#define TXSR_TXFID_SHIFT (0)
+#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f)
+
+
+#define KS_RXCR1 0x74
+#define RXCR1_FRXQ (1 << 15)
+#define RXCR1_RXUDPFCC (1 << 14)
+#define RXCR1_RXTCPFCC (1 << 13)
+#define RXCR1_RXIPFCC (1 << 12)
+#define RXCR1_RXPAFMA (1 << 11)
+#define RXCR1_RXFCE (1 << 10)
+#define RXCR1_RXEFE (1 << 9)
+#define RXCR1_RXMAFMA (1 << 8)
+#define RXCR1_RXBE (1 << 7)
+#define RXCR1_RXME (1 << 6)
+#define RXCR1_RXUE (1 << 5)
+#define RXCR1_RXAE (1 << 4)
+#define RXCR1_RXINVF (1 << 1)
+#define RXCR1_RXE (1 << 0)
+#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
+ RXCR1_RXMAFMA | RXCR1_RXPAFMA)
+
+#define KS_RXCR2 0x76
+#define RXCR2_SRDBL_MASK (0x7 << 5)
+#define RXCR2_SRDBL_SHIFT (5)
+#define RXCR2_SRDBL_4B (0x0 << 5)
+#define RXCR2_SRDBL_8B (0x1 << 5)
+#define RXCR2_SRDBL_16B (0x2 << 5)
+#define RXCR2_SRDBL_32B (0x3 << 5)
+/* #define RXCR2_SRDBL_FRAME (0x4 << 5) */
+#define RXCR2_IUFFP (1 << 4)
+#define RXCR2_RXIUFCEZ (1 << 3)
+#define RXCR2_UDPLFE (1 << 2)
+#define RXCR2_RXICMPFCC (1 << 1)
+#define RXCR2_RXSAF (1 << 0)
+
+#define KS_TXMIR 0x78
+
+#define KS_RXFHSR 0x7C
+#define RXFSHR_RXFV (1 << 15)
+#define RXFSHR_RXICMPFCS (1 << 13)
+#define RXFSHR_RXIPFCS (1 << 12)
+#define RXFSHR_RXTCPFCS (1 << 11)
+#define RXFSHR_RXUDPFCS (1 << 10)
+#define RXFSHR_RXBF (1 << 7)
+#define RXFSHR_RXMF (1 << 6)
+#define RXFSHR_RXUF (1 << 5)
+#define RXFSHR_RXMR (1 << 4)
+#define RXFSHR_RXFT (1 << 3)
+#define RXFSHR_RXFTL (1 << 2)
+#define RXFSHR_RXRF (1 << 1)
+#define RXFSHR_RXCE (1 << 0)
+#define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\
+ RXFSHR_RXFTL | RXFSHR_RXMR |\
+ RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
+ RXFSHR_RXTCPFCS)
+#define KS_RXFHBCR 0x7E
+#define RXFHBCR_CNT_MASK 0x0FFF
+
+#define KS_TXQCR 0x80
+#define TXQCR_AETFE (1 << 2)
+#define TXQCR_TXQMAM (1 << 1)
+#define TXQCR_METFE (1 << 0)
+
+#define KS_RXQCR 0x82
+#define RXQCR_RXDTTS (1 << 12)
+#define RXQCR_RXDBCTS (1 << 11)
+#define RXQCR_RXFCTS (1 << 10)
+#define RXQCR_RXIPHTOE (1 << 9)
+#define RXQCR_RXDTTE (1 << 7)
+#define RXQCR_RXDBCTE (1 << 6)
+#define RXQCR_RXFCTE (1 << 5)
+#define RXQCR_ADRFE (1 << 4)
+#define RXQCR_SDA (1 << 3)
+#define RXQCR_RRXEF (1 << 0)
+#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
+
+#define KS_TXFDPR 0x84
+#define TXFDPR_TXFPAI (1 << 14)
+#define TXFDPR_TXFP_MASK (0x7ff << 0)
+#define TXFDPR_TXFP_SHIFT (0)
+
+#define KS_RXFDPR 0x86
+#define RXFDPR_RXFPAI (1 << 14)
+
+#define KS_RXDTTR 0x8C
+#define KS_RXDBCTR 0x8E
+
+#define KS_IER 0x90
+#define KS_ISR 0x92
+#define IRQ_LCI (1 << 15)
+#define IRQ_TXI (1 << 14)
+#define IRQ_RXI (1 << 13)
+#define IRQ_RXOI (1 << 11)
+#define IRQ_TXPSI (1 << 9)
+#define IRQ_RXPSI (1 << 8)
+#define IRQ_TXSAI (1 << 6)
+#define IRQ_RXWFDI (1 << 5)
+#define IRQ_RXMPDI (1 << 4)
+#define IRQ_LDI (1 << 3)
+#define IRQ_EDI (1 << 2)
+#define IRQ_SPIBEI (1 << 1)
+#define IRQ_DEDI (1 << 0)
+
+#define KS_RXFCTR 0x9C
+#define RXFCTR_THRESHOLD_MASK 0x00FF
+
+#define KS_RXFC 0x9D
+#define RXFCTR_RXFC_MASK (0xff << 8)
+#define RXFCTR_RXFC_SHIFT (8)
+#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff)
+#define RXFCTR_RXFCT_MASK (0xff << 0)
+#define RXFCTR_RXFCT_SHIFT (0)
+
+#define KS_TXNTFSR 0x9E
+
+#define KS_MAHTR0 0xA0
+#define KS_MAHTR1 0xA2
+#define KS_MAHTR2 0xA4
+#define KS_MAHTR3 0xA6
+
+#define KS_FCLWR 0xB0
+#define KS_FCHWR 0xB2
+#define KS_FCOWR 0xB4
+
+#define KS_CIDER 0xC0
+#define CIDER_ID 0x8870
+#define CIDER_REV_MASK (0x7 << 1)
+#define CIDER_REV_SHIFT (1)
+#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7)
+
+#define KS_CGCR 0xC6
+#define KS_IACR 0xC8
+#define IACR_RDEN (1 << 12)
+#define IACR_TSEL_MASK (0x3 << 10)
+#define IACR_TSEL_SHIFT (10)
+#define IACR_TSEL_MIB (0x3 << 10)
+#define IACR_ADDR_MASK (0x1f << 0)
+#define IACR_ADDR_SHIFT (0)
+
+#define KS_IADLR 0xD0
+#define KS_IAHDR 0xD2
+
+#define KS_PMECR 0xD4
+#define PMECR_PME_DELAY (1 << 14)
+#define PMECR_PME_POL (1 << 12)
+#define PMECR_WOL_WAKEUP (1 << 11)
+#define PMECR_WOL_MAGICPKT (1 << 10)
+#define PMECR_WOL_LINKUP (1 << 9)
+#define PMECR_WOL_ENERGY (1 << 8)
+#define PMECR_AUTO_WAKE_EN (1 << 7)
+#define PMECR_WAKEUP_NORMAL (1 << 6)
+#define PMECR_WKEVT_MASK (0xf << 2)
+#define PMECR_WKEVT_SHIFT (2)
+#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf)
+#define PMECR_WKEVT_ENERGY (0x1 << 2)
+#define PMECR_WKEVT_LINK (0x2 << 2)
+#define PMECR_WKEVT_MAGICPKT (0x4 << 2)
+#define PMECR_WKEVT_FRAME (0x8 << 2)
+#define PMECR_PM_MASK (0x3 << 0)
+#define PMECR_PM_SHIFT (0)
+#define PMECR_PM_NORMAL (0x0 << 0)
+#define PMECR_PM_ENERGY (0x1 << 0)
+#define PMECR_PM_SOFTDOWN (0x2 << 0)
+#define PMECR_PM_POWERSAVE (0x3 << 0)
+
+/* Standard MII PHY data */
+#define KS_P1MBCR 0xE4
+#define P1MBCR_FORCE_FDX (1 << 8)
+
+#define KS_P1MBSR 0xE6
+#define P1MBSR_AN_COMPLETE (1 << 5)
+#define P1MBSR_AN_CAPABLE (1 << 3)
+#define P1MBSR_LINK_UP (1 << 2)
+
+#define KS_PHY1ILR 0xE8
+#define KS_PHY1IHR 0xEA
+#define KS_P1ANAR 0xEC
+#define KS_P1ANLPR 0xEE
+
+#define KS_P1SCLMD 0xF4
+#define P1SCLMD_LEDOFF (1 << 15)
+#define P1SCLMD_TXIDS (1 << 14)
+#define P1SCLMD_RESTARTAN (1 << 13)
+#define P1SCLMD_DISAUTOMDIX (1 << 10)
+#define P1SCLMD_FORCEMDIX (1 << 9)
+#define P1SCLMD_AUTONEGEN (1 << 7)
+#define P1SCLMD_FORCE100 (1 << 6)
+#define P1SCLMD_FORCEFDX (1 << 5)
+#define P1SCLMD_ADV_FLOW (1 << 4)
+#define P1SCLMD_ADV_100BT_FDX (1 << 3)
+#define P1SCLMD_ADV_100BT_HDX (1 << 2)
+#define P1SCLMD_ADV_10BT_FDX (1 << 1)
+#define P1SCLMD_ADV_10BT_HDX (1 << 0)
+
+#define KS_P1CR 0xF6
+#define P1CR_HP_MDIX (1 << 15)
+#define P1CR_REV_POL (1 << 13)
+#define P1CR_OP_100M (1 << 10)
+#define P1CR_OP_FDX (1 << 9)
+#define P1CR_OP_MDI (1 << 7)
+#define P1CR_AN_DONE (1 << 6)
+#define P1CR_LINK_GOOD (1 << 5)
+#define P1CR_PNTR_FLOW (1 << 4)
+#define P1CR_PNTR_100BT_FDX (1 << 3)
+#define P1CR_PNTR_100BT_HDX (1 << 2)
+#define P1CR_PNTR_10BT_FDX (1 << 1)
+#define P1CR_PNTR_10BT_HDX (1 << 0)
+
+/* TX Frame control */
+
+#define TXFR_TXIC (1 << 15)
+#define TXFR_TXFID_MASK (0x3f << 0)
+#define TXFR_TXFID_SHIFT (0)
+
+#define KS_P1SR 0xF8
+#define P1SR_HP_MDIX (1 << 15)
+#define P1SR_REV_POL (1 << 13)
+#define P1SR_OP_100M (1 << 10)
+#define P1SR_OP_FDX (1 << 9)
+#define P1SR_OP_MDI (1 << 7)
+#define P1SR_AN_DONE (1 << 6)
+#define P1SR_LINK_GOOD (1 << 5)
+#define P1SR_PNTR_FLOW (1 << 4)
+#define P1SR_PNTR_100BT_FDX (1 << 3)
+#define P1SR_PNTR_100BT_HDX (1 << 2)
+#define P1SR_PNTR_10BT_FDX (1 << 1)
+#define P1SR_PNTR_10BT_HDX (1 << 0)
+
+#define ENUM_BUS_NONE 0
+#define ENUM_BUS_8BIT 1
+#define ENUM_BUS_16BIT 2
+#define ENUM_BUS_32BIT 3
+
+#define MAX_MCAST_LST 32
+#define HW_MCAST_SIZE 8
+
+/**
+ * union ks_tx_hdr - tx header data
+ * @txb: The header as bytes
+ * @txw: The header as 16bit, little-endian words
+ *
+ * A dual representation of the tx header data to allow
+ * access to individual bytes, and to allow 16bit accesses
+ * with 16bit alignment.
+ */
+union ks_tx_hdr {
+ u8 txb[4];
+ __le16 txw[2];
+};
+
+/**
+ * struct ks_net - KS8851 driver private data
+ * @net_device : The network device we're bound to
+ * @hw_addr : start address of data register.
+ * @hw_addr_cmd : start address of command register.
+ * @txh : temporaly buffer to save status/length.
+ * @lock : Lock to ensure that the device is not accessed when busy.
+ * @pdev : Pointer to platform device.
+ * @mii : The MII state information for the mii calls.
+ * @frame_head_info : frame header information for multi-pkt rx.
+ * @statelock : Lock on this structure for tx list.
+ * @msg_enable : The message flags controlling driver output (see ethtool).
+ * @frame_cnt : number of frames received.
+ * @bus_width : i/o bus width.
+ * @irq : irq number assigned to this device.
+ * @rc_rxqcr : Cached copy of KS_RXQCR.
+ * @rc_txcr : Cached copy of KS_TXCR.
+ * @rc_ier : Cached copy of KS_IER.
+ * @sharedbus : Multipex(addr and data bus) mode indicator.
+ * @cmd_reg_cache : command register cached.
+ * @cmd_reg_cache_int : command register cached. Used in the irq handler.
+ * @promiscuous : promiscuous mode indicator.
+ * @all_mcast : mutlicast indicator.
+ * @mcast_lst_size : size of multicast list.
+ * @mcast_lst : multicast list.
+ * @mcast_bits : multicast enabed.
+ * @mac_addr : MAC address assigned to this device.
+ * @fid : frame id.
+ * @extra_byte : number of extra byte prepended rx pkt.
+ * @enabled : indicator this device works.
+ *
+ * The @lock ensures that the chip is protected when certain operations are
+ * in progress. When the read or write packet transfer is in progress, most
+ * of the chip registers are not accessible until the transfer is finished and
+ * the DMA has been de-asserted.
+ *
+ * The @statelock is used to protect information in the structure which may
+ * need to be accessed via several sources, such as the network driver layer
+ * or one of the work queues.
+ *
+ */
+
+/* Receive multiplex framer header info */
+struct type_frame_head {
+ u16 sts; /* Frame status */
+ u16 len; /* Byte count */
+};
+
+struct ks_net {
+ struct net_device *netdev;
+ void __iomem *hw_addr;
+ void __iomem *hw_addr_cmd;
+ union ks_tx_hdr txh ____cacheline_aligned;
+ struct mutex lock; /* spinlock to be interrupt safe */
+ struct platform_device *pdev;
+ struct mii_if_info mii;
+ struct type_frame_head *frame_head_info;
+ spinlock_t statelock;
+ u32 msg_enable;
+ u32 frame_cnt;
+ int bus_width;
+ int irq;
+
+ u16 rc_rxqcr;
+ u16 rc_txcr;
+ u16 rc_ier;
+ u16 sharedbus;
+ u16 cmd_reg_cache;
+ u16 cmd_reg_cache_int;
+ u16 promiscuous;
+ u16 all_mcast;
+ u16 mcast_lst_size;
+ u8 mcast_lst[MAX_MCAST_LST][ETH_ALEN];
+ u8 mcast_bits[HW_MCAST_SIZE];
+ u8 mac_addr[6];
+ u8 fid;
+ u8 extra_byte;
+ u8 enabled;
+};
+
+static int msg_enable;
+
+#define BE3 0x8000 /* Byte Enable 3 */
+#define BE2 0x4000 /* Byte Enable 2 */
+#define BE1 0x2000 /* Byte Enable 1 */
+#define BE0 0x1000 /* Byte Enable 0 */
+
+/**
+ * register read/write calls.
+ *
+ * All these calls issue transactions to access the chip's registers. They
+ * all require that the necessary lock is held to prevent accesses when the
+ * chip is busy transferring packet data (RX/TX FIFO accesses).
+ */
+
+/**
+ * ks_rdreg8 - read 8 bit register from device
+ * @ks : The chip information
+ * @offset: The register address
+ *
+ * Read a 8bit register from the chip, returning the result
+ */
+static u8 ks_rdreg8(struct ks_net *ks, int offset)
+{
+ u16 data;
+ u8 shift_bit = offset & 0x03;
+ u8 shift_data = (offset & 1) << 3;
+ ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
+ iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+ data = ioread16(ks->hw_addr);
+ return (u8)(data >> shift_data);
+}
+
+/**
+ * ks_rdreg16 - read 16 bit register from device
+ * @ks : The chip information
+ * @offset: The register address
+ *
+ * Read a 16bit register from the chip, returning the result
+ */
+
+static u16 ks_rdreg16(struct ks_net *ks, int offset)
+{
+ ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
+ iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+ return ioread16(ks->hw_addr);
+}
+
+/**
+ * ks_wrreg8 - write 8bit register value to chip
+ * @ks: The chip information
+ * @offset: The register address
+ * @value: The value to write
+ *
+ */
+static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
+{
+ u8 shift_bit = (offset & 0x03);
+ u16 value_write = (u16)(value << ((offset & 1) << 3));
+ ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
+ iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+ iowrite16(value_write, ks->hw_addr);
+}
+
+/**
+ * ks_wrreg16 - write 16bit register value to chip
+ * @ks: The chip information
+ * @offset: The register address
+ * @value: The value to write
+ *
+ */
+
+static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
+{
+ ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
+ iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+ iowrite16(value, ks->hw_addr);
+}
+
+/**
+ * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled.
+ * @ks: The chip state
+ * @wptr: buffer address to save data
+ * @len: length in byte to read
+ *
+ */
+static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
+{
+ len >>= 1;
+ while (len--)
+ *wptr++ = (u16)ioread16(ks->hw_addr);
+}
+
+/**
+ * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled.
+ * @ks: The chip information
+ * @wptr: buffer address
+ * @len: length in byte to write
+ *
+ */
+static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
+{
+ len >>= 1;
+ while (len--)
+ iowrite16(*wptr++, ks->hw_addr);
+}
+
+static void ks_disable_int(struct ks_net *ks)
+{
+ ks_wrreg16(ks, KS_IER, 0x0000);
+} /* ks_disable_int */
+
+static void ks_enable_int(struct ks_net *ks)
+{
+ ks_wrreg16(ks, KS_IER, ks->rc_ier);
+} /* ks_enable_int */
+
+/**
+ * ks_tx_fifo_space - return the available hardware buffer size.
+ * @ks: The chip information
+ *
+ */
+static inline u16 ks_tx_fifo_space(struct ks_net *ks)
+{
+ return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
+}
+
+/**
+ * ks_save_cmd_reg - save the command register from the cache.
+ * @ks: The chip information
+ *
+ */
+static inline void ks_save_cmd_reg(struct ks_net *ks)
+{
+ /*ks8851 MLL has a bug to read back the command register.
+ * So rely on software to save the content of command register.
+ */
+ ks->cmd_reg_cache_int = ks->cmd_reg_cache;
+}
+
+/**
+ * ks_restore_cmd_reg - restore the command register from the cache and
+ * write to hardware register.
+ * @ks: The chip information
+ *
+ */
+static inline void ks_restore_cmd_reg(struct ks_net *ks)
+{
+ ks->cmd_reg_cache = ks->cmd_reg_cache_int;
+ iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+}
+
+/**
+ * ks_set_powermode - set power mode of the device
+ * @ks: The chip information
+ * @pwrmode: The power mode value to write to KS_PMECR.
+ *
+ * Change the power mode of the chip.
+ */
+static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
+{
+ unsigned pmecr;
+
+ netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
+
+ ks_rdreg16(ks, KS_GRR);
+ pmecr = ks_rdreg16(ks, KS_PMECR);
+ pmecr &= ~PMECR_PM_MASK;
+ pmecr |= pwrmode;
+
+ ks_wrreg16(ks, KS_PMECR, pmecr);
+}
+
+/**
+ * ks_read_config - read chip configuration of bus width.
+ * @ks: The chip information
+ *
+ */
+static void ks_read_config(struct ks_net *ks)
+{
+ u16 reg_data = 0;
+
+ /* Regardless of bus width, 8 bit read should always work.*/
+ reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
+ reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
+
+ /* addr/data bus are multiplexed */
+ ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
+
+ /* There are garbage data when reading data from QMU,
+ depending on bus-width.
+ */
+
+ if (reg_data & CCR_8BIT) {
+ ks->bus_width = ENUM_BUS_8BIT;
+ ks->extra_byte = 1;
+ } else if (reg_data & CCR_16BIT) {
+ ks->bus_width = ENUM_BUS_16BIT;
+ ks->extra_byte = 2;
+ } else {
+ ks->bus_width = ENUM_BUS_32BIT;
+ ks->extra_byte = 4;
+ }
+}
+
+/**
+ * ks_soft_reset - issue one of the soft reset to the device
+ * @ks: The device state.
+ * @op: The bit(s) to set in the GRR
+ *
+ * Issue the relevant soft-reset command to the device's GRR register
+ * specified by @op.
+ *
+ * Note, the delays are in there as a caution to ensure that the reset
+ * has time to take effect and then complete. Since the datasheet does
+ * not currently specify the exact sequence, we have chosen something
+ * that seems to work with our device.
+ */
+static void ks_soft_reset(struct ks_net *ks, unsigned op)
+{
+ /* Disable interrupt first */
+ ks_wrreg16(ks, KS_IER, 0x0000);
+ ks_wrreg16(ks, KS_GRR, op);
+ mdelay(10); /* wait a short time to effect reset */
+ ks_wrreg16(ks, KS_GRR, 0);
+ mdelay(1); /* wait for condition to clear */
+}
+
+
+void ks_enable_qmu(struct ks_net *ks)
+{
+ u16 w;
+
+ w = ks_rdreg16(ks, KS_TXCR);
+ /* Enables QMU Transmit (TXCR). */
+ ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
+
+ /*
+ * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
+ * Enable
+ */
+
+ w = ks_rdreg16(ks, KS_RXQCR);
+ ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
+
+ /* Enables QMU Receive (RXCR1). */
+ w = ks_rdreg16(ks, KS_RXCR1);
+ ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
+ ks->enabled = true;
+} /* ks_enable_qmu */
+
+static void ks_disable_qmu(struct ks_net *ks)
+{
+ u16 w;
+
+ w = ks_rdreg16(ks, KS_TXCR);
+
+ /* Disables QMU Transmit (TXCR). */
+ w &= ~TXCR_TXE;
+ ks_wrreg16(ks, KS_TXCR, w);
+
+ /* Disables QMU Receive (RXCR1). */
+ w = ks_rdreg16(ks, KS_RXCR1);
+ w &= ~RXCR1_RXE ;
+ ks_wrreg16(ks, KS_RXCR1, w);
+
+ ks->enabled = false;
+
+} /* ks_disable_qmu */
+
+/**
+ * ks_read_qmu - read 1 pkt data from the QMU.
+ * @ks: The chip information
+ * @buf: buffer address to save 1 pkt
+ * @len: Pkt length
+ * Here is the sequence to read 1 pkt:
+ * 1. set sudo DMA mode
+ * 2. read prepend data
+ * 3. read pkt data
+ * 4. reset sudo DMA Mode
+ */
+static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
+{
+ u32 r = ks->extra_byte & 0x1 ;
+ u32 w = ks->extra_byte - r;
+
+ /* 1. set sudo DMA mode */
+ ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
+ ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
+
+ /* 2. read prepend data */
+ /**
+ * read 4 + extra bytes and discard them.
+ * extra bytes for dummy, 2 for status, 2 for len
+ */
+
+ /* use likely(r) for 8 bit access for performance */
+ if (unlikely(r))
+ ioread8(ks->hw_addr);
+ ks_inblk(ks, buf, w + 2 + 2);
+
+ /* 3. read pkt data */
+ ks_inblk(ks, buf, ALIGN(len, 4));
+
+ /* 4. reset sudo DMA Mode */
+ ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
+}
+
+/**
+ * ks_rcv - read multiple pkts data from the QMU.
+ * @ks: The chip information
+ * @netdev: The network device being opened.
+ *
+ * Read all of header information before reading pkt content.
+ * It is not allowed only port of pkts in QMU after issuing
+ * interrupt ack.
+ */
+static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
+{
+ u32 i;
+ struct type_frame_head *frame_hdr = ks->frame_head_info;
+ struct sk_buff *skb;
+
+ ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
+
+ /* read all header information */
+ for (i = 0; i < ks->frame_cnt; i++) {
+ /* Checking Received packet status */
+ frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
+ /* Get packet len from hardware */
+ frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
+ frame_hdr++;
+ }
+
+ frame_hdr = ks->frame_head_info;
+ while (ks->frame_cnt--) {
+ skb = dev_alloc_skb(frame_hdr->len + 16);
+ if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) &&
+ (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
+ skb_reserve(skb, 2);
+ /* read data block including CRC 4 bytes */
+ ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
+ skb_put(skb, frame_hdr->len);
+ skb->protocol = eth_type_trans(skb, netdev);
+ netif_rx(skb);
+ } else {
+ pr_err("%s: err:skb alloc\n", __func__);
+ ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
+ if (skb)
+ dev_kfree_skb_irq(skb);
+ }
+ frame_hdr++;
+ }
+}
+
+/**
+ * ks_update_link_status - link status update.
+ * @netdev: The network device being opened.
+ * @ks: The chip information
+ *
+ */
+
+static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
+{
+ /* check the status of the link */
+ u32 link_up_status;
+ if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
+ netif_carrier_on(netdev);
+ link_up_status = true;
+ } else {
+ netif_carrier_off(netdev);
+ link_up_status = false;
+ }
+ netif_dbg(ks, link, ks->netdev,
+ "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
+}
+
+/**
+ * ks_irq - device interrupt handler
+ * @irq: Interrupt number passed from the IRQ hnalder.
+ * @pw: The private word passed to register_irq(), our struct ks_net.
+ *
+ * This is the handler invoked to find out what happened
+ *
+ * Read the interrupt status, work out what needs to be done and then clear
+ * any of the interrupts that are not needed.
+ */
+
+static irqreturn_t ks_irq(int irq, void *pw)
+{
+ struct net_device *netdev = pw;
+ struct ks_net *ks = netdev_priv(netdev);
+ u16 status;
+
+ /*this should be the first in IRQ handler */
+ ks_save_cmd_reg(ks);
+
+ status = ks_rdreg16(ks, KS_ISR);
+ if (unlikely(!status)) {
+ ks_restore_cmd_reg(ks);
+ return IRQ_NONE;
+ }
+
+ ks_wrreg16(ks, KS_ISR, status);
+
+ if (likely(status & IRQ_RXI))
+ ks_rcv(ks, netdev);
+
+ if (unlikely(status & IRQ_LCI))
+ ks_update_link_status(netdev, ks);
+
+ if (unlikely(status & IRQ_TXI))
+ netif_wake_queue(netdev);
+
+ if (unlikely(status & IRQ_LDI)) {
+
+ u16 pmecr = ks_rdreg16(ks, KS_PMECR);
+ pmecr &= ~PMECR_WKEVT_MASK;
+ ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
+ }
+
+ /* this should be the last in IRQ handler*/
+ ks_restore_cmd_reg(ks);
+ return IRQ_HANDLED;
+}
+
+
+/**
+ * ks_net_open - open network device
+ * @netdev: The network device being opened.
+ *
+ * Called when the network device is marked active, such as a user executing
+ * 'ifconfig up' on the device.
+ */
+static int ks_net_open(struct net_device *netdev)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ int err;
+
+#define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW)
+ /* lock the card, even if we may not actually do anything
+ * else at the moment.
+ */
+
+ netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
+
+ /* reset the HW */
+ err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
+
+ if (err) {
+ pr_err("Failed to request IRQ: %d: %d\n", ks->irq, err);
+ return err;
+ }
+
+ /* wake up powermode to normal mode */
+ ks_set_powermode(ks, PMECR_PM_NORMAL);
+ mdelay(1); /* wait for normal mode to take effect */
+
+ ks_wrreg16(ks, KS_ISR, 0xffff);
+ ks_enable_int(ks);
+ ks_enable_qmu(ks);
+ netif_start_queue(ks->netdev);
+
+ netif_dbg(ks, ifup, ks->netdev, "network device up\n");
+
+ return 0;
+}
+
+/**
+ * ks_net_stop - close network device
+ * @netdev: The device being closed.
+ *
+ * Called to close down a network device which has been active. Cancell any
+ * work, shutdown the RX and TX process and then place the chip into a low
+ * power state whilst it is not being used.
+ */
+static int ks_net_stop(struct net_device *netdev)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+
+ netif_info(ks, ifdown, netdev, "shutting down\n");
+
+ netif_stop_queue(netdev);
+
+ mutex_lock(&ks->lock);
+
+ /* turn off the IRQs and ack any outstanding */
+ ks_wrreg16(ks, KS_IER, 0x0000);
+ ks_wrreg16(ks, KS_ISR, 0xffff);
+
+ /* shutdown RX/TX QMU */
+ ks_disable_qmu(ks);
+
+ /* set powermode to soft power down to save power */
+ ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
+ free_irq(ks->irq, netdev);
+ mutex_unlock(&ks->lock);
+ return 0;
+}
+
+
+/**
+ * ks_write_qmu - write 1 pkt data to the QMU.
+ * @ks: The chip information
+ * @pdata: buffer address to save 1 pkt
+ * @len: Pkt length in byte
+ * Here is the sequence to write 1 pkt:
+ * 1. set sudo DMA mode
+ * 2. write status/length
+ * 3. write pkt data
+ * 4. reset sudo DMA Mode
+ * 5. reset sudo DMA mode
+ * 6. Wait until pkt is out
+ */
+static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
+{
+ /* start header at txb[0] to align txw entries */
+ ks->txh.txw[0] = 0;
+ ks->txh.txw[1] = cpu_to_le16(len);
+
+ /* 1. set sudo-DMA mode */
+ ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
+ /* 2. write status/lenth info */
+ ks_outblk(ks, ks->txh.txw, 4);
+ /* 3. write pkt data */
+ ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
+ /* 4. reset sudo-DMA mode */
+ ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
+ /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
+ ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
+ /* 6. wait until TXQCR_METFE is auto-cleared */
+ while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
+ ;
+}
+
+/**
+ * ks_start_xmit - transmit packet
+ * @skb : The buffer to transmit
+ * @netdev : The device used to transmit the packet.
+ *
+ * Called by the network layer to transmit the @skb.
+ * spin_lock_irqsave is required because tx and rx should be mutual exclusive.
+ * So while tx is in-progress, prevent IRQ interrupt from happenning.
+ */
+static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ int retv = NETDEV_TX_OK;
+ struct ks_net *ks = netdev_priv(netdev);
+
+ disable_irq(netdev->irq);
+ ks_disable_int(ks);
+ spin_lock(&ks->statelock);
+
+ /* Extra space are required:
+ * 4 byte for alignment, 4 for status/length, 4 for CRC
+ */
+
+ if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
+ ks_write_qmu(ks, skb->data, skb->len);
+ dev_kfree_skb(skb);
+ } else
+ retv = NETDEV_TX_BUSY;
+ spin_unlock(&ks->statelock);
+ ks_enable_int(ks);
+ enable_irq(netdev->irq);
+ return retv;
+}
+
+/**
+ * ks_start_rx - ready to serve pkts
+ * @ks : The chip information
+ *
+ */
+static void ks_start_rx(struct ks_net *ks)
+{
+ u16 cntl;
+
+ /* Enables QMU Receive (RXCR1). */
+ cntl = ks_rdreg16(ks, KS_RXCR1);
+ cntl |= RXCR1_RXE ;
+ ks_wrreg16(ks, KS_RXCR1, cntl);
+} /* ks_start_rx */
+
+/**
+ * ks_stop_rx - stop to serve pkts
+ * @ks : The chip information
+ *
+ */
+static void ks_stop_rx(struct ks_net *ks)
+{
+ u16 cntl;
+
+ /* Disables QMU Receive (RXCR1). */
+ cntl = ks_rdreg16(ks, KS_RXCR1);
+ cntl &= ~RXCR1_RXE ;
+ ks_wrreg16(ks, KS_RXCR1, cntl);
+
+} /* ks_stop_rx */
+
+static unsigned long const ethernet_polynomial = 0x04c11db7U;
+
+static unsigned long ether_gen_crc(int length, u8 *data)
+{
+ long crc = -1;
+ while (--length >= 0) {
+ u8 current_octet = *data++;
+ int bit;
+
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ?
+ ethernet_polynomial : 0);
+ }
+ }
+ return (unsigned long)crc;
+} /* ether_gen_crc */
+
+/**
+* ks_set_grpaddr - set multicast information
+* @ks : The chip information
+*/
+
+static void ks_set_grpaddr(struct ks_net *ks)
+{
+ u8 i;
+ u32 index, position, value;
+
+ memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
+
+ for (i = 0; i < ks->mcast_lst_size; i++) {
+ position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
+ index = position >> 3;
+ value = 1 << (position & 7);
+ ks->mcast_bits[index] |= (u8)value;
+ }
+
+ for (i = 0; i < HW_MCAST_SIZE; i++) {
+ if (i & 1) {
+ ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
+ (ks->mcast_bits[i] << 8) |
+ ks->mcast_bits[i - 1]);
+ }
+ }
+} /* ks_set_grpaddr */
+
+/*
+* ks_clear_mcast - clear multicast information
+*
+* @ks : The chip information
+* This routine removes all mcast addresses set in the hardware.
+*/
+
+static void ks_clear_mcast(struct ks_net *ks)
+{
+ u16 i, mcast_size;
+ for (i = 0; i < HW_MCAST_SIZE; i++)
+ ks->mcast_bits[i] = 0;
+
+ mcast_size = HW_MCAST_SIZE >> 2;
+ for (i = 0; i < mcast_size; i++)
+ ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
+}
+
+static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
+{
+ u16 cntl;
+ ks->promiscuous = promiscuous_mode;
+ ks_stop_rx(ks); /* Stop receiving for reconfiguration */
+ cntl = ks_rdreg16(ks, KS_RXCR1);
+
+ cntl &= ~RXCR1_FILTER_MASK;
+ if (promiscuous_mode)
+ /* Enable Promiscuous mode */
+ cntl |= RXCR1_RXAE | RXCR1_RXINVF;
+ else
+ /* Disable Promiscuous mode (default normal mode) */
+ cntl |= RXCR1_RXPAFMA;
+
+ ks_wrreg16(ks, KS_RXCR1, cntl);
+
+ if (ks->enabled)
+ ks_start_rx(ks);
+
+} /* ks_set_promis */
+
+static void ks_set_mcast(struct ks_net *ks, u16 mcast)
+{
+ u16 cntl;
+
+ ks->all_mcast = mcast;
+ ks_stop_rx(ks); /* Stop receiving for reconfiguration */
+ cntl = ks_rdreg16(ks, KS_RXCR1);
+ cntl &= ~RXCR1_FILTER_MASK;
+ if (mcast)
+ /* Enable "Perfect with Multicast address passed mode" */
+ cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
+ else
+ /**
+ * Disable "Perfect with Multicast address passed
+ * mode" (normal mode).
+ */
+ cntl |= RXCR1_RXPAFMA;
+
+ ks_wrreg16(ks, KS_RXCR1, cntl);
+
+ if (ks->enabled)
+ ks_start_rx(ks);
+} /* ks_set_mcast */
+
+static void ks_set_rx_mode(struct net_device *netdev)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ struct netdev_hw_addr *ha;
+
+ /* Turn on/off promiscuous mode. */
+ if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
+ ks_set_promis(ks,
+ (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
+ /* Turn on/off all mcast mode. */
+ else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
+ ks_set_mcast(ks,
+ (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
+ else
+ ks_set_promis(ks, false);
+
+ if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
+ if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
+ int i = 0;
+
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (i >= MAX_MCAST_LST)
+ break;
+ memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
+ }
+ ks->mcast_lst_size = (u8)i;
+ ks_set_grpaddr(ks);
+ } else {
+ /**
+ * List too big to support so
+ * turn on all mcast mode.
+ */
+ ks->mcast_lst_size = MAX_MCAST_LST;
+ ks_set_mcast(ks, true);
+ }
+ } else {
+ ks->mcast_lst_size = 0;
+ ks_clear_mcast(ks);
+ }
+} /* ks_set_rx_mode */
+
+static void ks_set_mac(struct ks_net *ks, u8 *data)
+{
+ u16 *pw = (u16 *)data;
+ u16 w, u;
+
+ ks_stop_rx(ks); /* Stop receiving for reconfiguration */
+
+ u = *pw++;
+ w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
+ ks_wrreg16(ks, KS_MARH, w);
+
+ u = *pw++;
+ w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
+ ks_wrreg16(ks, KS_MARM, w);
+
+ u = *pw;
+ w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
+ ks_wrreg16(ks, KS_MARL, w);
+
+ memcpy(ks->mac_addr, data, 6);
+
+ if (ks->enabled)
+ ks_start_rx(ks);
+}
+
+static int ks_set_mac_address(struct net_device *netdev, void *paddr)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ struct sockaddr *addr = paddr;
+ u8 *da;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+ da = (u8 *)netdev->dev_addr;
+
+ ks_set_mac(ks, da);
+ return 0;
+}
+
+static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
+}
+
+static const struct net_device_ops ks_netdev_ops = {
+ .ndo_open = ks_net_open,
+ .ndo_stop = ks_net_stop,
+ .ndo_do_ioctl = ks_net_ioctl,
+ .ndo_start_xmit = ks_start_xmit,
+ .ndo_set_mac_address = ks_set_mac_address,
+ .ndo_set_rx_mode = ks_set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/* ethtool support */
+
+static void ks_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *di)
+{
+ strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
+ strlcpy(di->version, "1.00", sizeof(di->version));
+ strlcpy(di->bus_info, dev_name(netdev->dev.parent),
+ sizeof(di->bus_info));
+}
+
+static u32 ks_get_msglevel(struct net_device *netdev)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ return ks->msg_enable;
+}
+
+static void ks_set_msglevel(struct net_device *netdev, u32 to)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ ks->msg_enable = to;
+}
+
+static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ return mii_ethtool_gset(&ks->mii, cmd);
+}
+
+static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ return mii_ethtool_sset(&ks->mii, cmd);
+}
+
+static u32 ks_get_link(struct net_device *netdev)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ return mii_link_ok(&ks->mii);
+}
+
+static int ks_nway_reset(struct net_device *netdev)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ return mii_nway_restart(&ks->mii);
+}
+
+static const struct ethtool_ops ks_ethtool_ops = {
+ .get_drvinfo = ks_get_drvinfo,
+ .get_msglevel = ks_get_msglevel,
+ .set_msglevel = ks_set_msglevel,
+ .get_settings = ks_get_settings,
+ .set_settings = ks_set_settings,
+ .get_link = ks_get_link,
+ .nway_reset = ks_nway_reset,
+};
+
+/* MII interface controls */
+
+/**
+ * ks_phy_reg - convert MII register into a KS8851 register
+ * @reg: MII register number.
+ *
+ * Return the KS8851 register number for the corresponding MII PHY register
+ * if possible. Return zero if the MII register has no direct mapping to the
+ * KS8851 register set.
+ */
+static int ks_phy_reg(int reg)
+{
+ switch (reg) {
+ case MII_BMCR:
+ return KS_P1MBCR;
+ case MII_BMSR:
+ return KS_P1MBSR;
+ case MII_PHYSID1:
+ return KS_PHY1ILR;
+ case MII_PHYSID2:
+ return KS_PHY1IHR;
+ case MII_ADVERTISE:
+ return KS_P1ANAR;
+ case MII_LPA:
+ return KS_P1ANLPR;
+ }
+
+ return 0x0;
+}
+
+/**
+ * ks_phy_read - MII interface PHY register read.
+ * @netdev: The network device the PHY is on.
+ * @phy_addr: Address of PHY (ignored as we only have one)
+ * @reg: The register to read.
+ *
+ * This call reads data from the PHY register specified in @reg. Since the
+ * device does not support all the MII registers, the non-existent values
+ * are always returned as zero.
+ *
+ * We return zero for unsupported registers as the MII code does not check
+ * the value returned for any error status, and simply returns it to the
+ * caller. The mii-tool that the driver was tested with takes any -ve error
+ * as real PHY capabilities, thus displaying incorrect data to the user.
+ */
+static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ int ksreg;
+ int result;
+
+ ksreg = ks_phy_reg(reg);
+ if (!ksreg)
+ return 0x0; /* no error return allowed, so use zero */
+
+ mutex_lock(&ks->lock);
+ result = ks_rdreg16(ks, ksreg);
+ mutex_unlock(&ks->lock);
+
+ return result;
+}
+
+static void ks_phy_write(struct net_device *netdev,
+ int phy, int reg, int value)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ int ksreg;
+
+ ksreg = ks_phy_reg(reg);
+ if (ksreg) {
+ mutex_lock(&ks->lock);
+ ks_wrreg16(ks, ksreg, value);
+ mutex_unlock(&ks->lock);
+ }
+}
+
+/**
+ * ks_read_selftest - read the selftest memory info.
+ * @ks: The device state
+ *
+ * Read and check the TX/RX memory selftest information.
+ */
+static int ks_read_selftest(struct ks_net *ks)
+{
+ unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
+ int ret = 0;
+ unsigned rd;
+
+ rd = ks_rdreg16(ks, KS_MBIR);
+
+ if ((rd & both_done) != both_done) {
+ netdev_warn(ks->netdev, "Memory selftest not finished\n");
+ return 0;
+ }
+
+ if (rd & MBIR_TXMBFA) {
+ netdev_err(ks->netdev, "TX memory selftest fails\n");
+ ret |= 1;
+ }
+
+ if (rd & MBIR_RXMBFA) {
+ netdev_err(ks->netdev, "RX memory selftest fails\n");
+ ret |= 2;
+ }
+
+ netdev_info(ks->netdev, "the selftest passes\n");
+ return ret;
+}
+
+static void ks_setup(struct ks_net *ks)
+{
+ u16 w;
+
+ /**
+ * Configure QMU Transmit
+ */
+
+ /* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
+ ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
+
+ /* Setup Receive Frame Data Pointer Auto-Increment */
+ ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
+
+ /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
+ ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
+
+ /* Setup RxQ Command Control (RXQCR) */
+ ks->rc_rxqcr = RXQCR_CMD_CNTL;
+ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+
+ /**
+ * set the force mode to half duplex, default is full duplex
+ * because if the auto-negotiation fails, most switch uses
+ * half-duplex.
+ */
+
+ w = ks_rdreg16(ks, KS_P1MBCR);
+ w &= ~P1MBCR_FORCE_FDX;
+ ks_wrreg16(ks, KS_P1MBCR, w);
+
+ w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
+ ks_wrreg16(ks, KS_TXCR, w);
+
+ w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC;
+
+ if (ks->promiscuous) /* bPromiscuous */
+ w |= (RXCR1_RXAE | RXCR1_RXINVF);
+ else if (ks->all_mcast) /* Multicast address passed mode */
+ w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
+ else /* Normal mode */
+ w |= RXCR1_RXPAFMA;
+
+ ks_wrreg16(ks, KS_RXCR1, w);
+} /*ks_setup */
+
+
+static void ks_setup_int(struct ks_net *ks)
+{
+ ks->rc_ier = 0x00;
+ /* Clear the interrupts status of the hardware. */
+ ks_wrreg16(ks, KS_ISR, 0xffff);
+
+ /* Enables the interrupts of the hardware. */
+ ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
+} /* ks_setup_int */
+
+static int ks_hw_init(struct ks_net *ks)
+{
+#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
+ ks->promiscuous = 0;
+ ks->all_mcast = 0;
+ ks->mcast_lst_size = 0;
+
+ ks->frame_head_info = (struct type_frame_head *) \
+ kmalloc(MHEADER_SIZE, GFP_KERNEL);
+ if (!ks->frame_head_info) {
+ pr_err("Error: Fail to allocate frame memory\n");
+ return false;
+ }
+
+ ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
+ return true;
+}
+
+
+static int __devinit ks8851_probe(struct platform_device *pdev)
+{
+ int err = -ENOMEM;
+ struct resource *io_d, *io_c;
+ struct net_device *netdev;
+ struct ks_net *ks;
+ u16 id, data;
+
+ io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+
+ if (!request_mem_region(io_d->start, resource_size(io_d), DRV_NAME))
+ goto err_mem_region;
+
+ if (!request_mem_region(io_c->start, resource_size(io_c), DRV_NAME))
+ goto err_mem_region1;
+
+ netdev = alloc_etherdev(sizeof(struct ks_net));
+ if (!netdev)
+ goto err_alloc_etherdev;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ ks = netdev_priv(netdev);
+ ks->netdev = netdev;
+ ks->hw_addr = ioremap(io_d->start, resource_size(io_d));
+
+ if (!ks->hw_addr)
+ goto err_ioremap;
+
+ ks->hw_addr_cmd = ioremap(io_c->start, resource_size(io_c));
+ if (!ks->hw_addr_cmd)
+ goto err_ioremap1;
+
+ ks->irq = platform_get_irq(pdev, 0);
+
+ if (ks->irq < 0) {
+ err = ks->irq;
+ goto err_get_irq;
+ }
+
+ ks->pdev = pdev;
+
+ mutex_init(&ks->lock);
+ spin_lock_init(&ks->statelock);
+
+ netdev->netdev_ops = &ks_netdev_ops;
+ netdev->ethtool_ops = &ks_ethtool_ops;
+
+ /* setup mii state */
+ ks->mii.dev = netdev;
+ ks->mii.phy_id = 1,
+ ks->mii.phy_id_mask = 1;
+ ks->mii.reg_num_mask = 0xf;
+ ks->mii.mdio_read = ks_phy_read;
+ ks->mii.mdio_write = ks_phy_write;
+
+ netdev_info(netdev, "message enable is %d\n", msg_enable);
+ /* set the default message enable */
+ ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
+ NETIF_MSG_PROBE |
+ NETIF_MSG_LINK));
+ ks_read_config(ks);
+
+ /* simple check for a valid chip being connected to the bus */
+ if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
+ netdev_err(netdev, "failed to read device ID\n");
+ err = -ENODEV;
+ goto err_register;
+ }
+
+ if (ks_read_selftest(ks)) {
+ netdev_err(netdev, "failed to read device ID\n");
+ err = -ENODEV;
+ goto err_register;
+ }
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ platform_set_drvdata(pdev, netdev);
+
+ ks_soft_reset(ks, GRR_GSR);
+ ks_hw_init(ks);
+ ks_disable_qmu(ks);
+ ks_setup(ks);
+ ks_setup_int(ks);
+ memcpy(netdev->dev_addr, ks->mac_addr, 6);
+
+ data = ks_rdreg16(ks, KS_OBCR);
+ ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
+
+ /**
+ * If you want to use the default MAC addr,
+ * comment out the 2 functions below.
+ */
+
+ random_ether_addr(netdev->dev_addr);
+ ks_set_mac(ks, netdev->dev_addr);
+
+ id = ks_rdreg16(ks, KS_CIDER);
+
+ netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
+ (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
+ return 0;
+
+err_register:
+err_get_irq:
+ iounmap(ks->hw_addr_cmd);
+err_ioremap1:
+ iounmap(ks->hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ release_mem_region(io_c->start, resource_size(io_c));
+err_mem_region1:
+ release_mem_region(io_d->start, resource_size(io_d));
+err_mem_region:
+ return err;
+}
+
+static int __devexit ks8851_remove(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+ struct ks_net *ks = netdev_priv(netdev);
+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ kfree(ks->frame_head_info);
+ unregister_netdev(netdev);
+ iounmap(ks->hw_addr);
+ free_netdev(netdev);
+ release_mem_region(iomem->start, resource_size(iomem));
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+
+}
+
+static struct platform_driver ks8851_platform_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ks8851_probe,
+ .remove = __devexit_p(ks8851_remove),
+};
+
+static int __init ks8851_init(void)
+{
+ return platform_driver_register(&ks8851_platform_driver);
+}
+
+static void __exit ks8851_exit(void)
+{
+ platform_driver_unregister(&ks8851_platform_driver);
+}
+
+module_init(ks8851_init);
+module_exit(ks8851_exit);
+
+MODULE_DESCRIPTION("KS8851 MLL Network driver");
+MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
+MODULE_LICENSE("GPL");
+module_param_named(message, msg_enable, int, 0);
+MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
+
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
new file mode 100644
index 000000000000..710c4aead146
--- /dev/null
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -0,0 +1,7288 @@
+/**
+ * drivers/net/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
+ *
+ * Copyright (c) 2009-2010 Micrel, Inc.
+ * Tristram Ha <Tristram.Ha@micrel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/mii.h>
+#include <linux/platform_device.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+
+/* DMA Registers */
+
+#define KS_DMA_TX_CTRL 0x0000
+#define DMA_TX_ENABLE 0x00000001
+#define DMA_TX_CRC_ENABLE 0x00000002
+#define DMA_TX_PAD_ENABLE 0x00000004
+#define DMA_TX_LOOPBACK 0x00000100
+#define DMA_TX_FLOW_ENABLE 0x00000200
+#define DMA_TX_CSUM_IP 0x00010000
+#define DMA_TX_CSUM_TCP 0x00020000
+#define DMA_TX_CSUM_UDP 0x00040000
+#define DMA_TX_BURST_SIZE 0x3F000000
+
+#define KS_DMA_RX_CTRL 0x0004
+#define DMA_RX_ENABLE 0x00000001
+#define KS884X_DMA_RX_MULTICAST 0x00000002
+#define DMA_RX_PROMISCUOUS 0x00000004
+#define DMA_RX_ERROR 0x00000008
+#define DMA_RX_UNICAST 0x00000010
+#define DMA_RX_ALL_MULTICAST 0x00000020
+#define DMA_RX_BROADCAST 0x00000040
+#define DMA_RX_FLOW_ENABLE 0x00000200
+#define DMA_RX_CSUM_IP 0x00010000
+#define DMA_RX_CSUM_TCP 0x00020000
+#define DMA_RX_CSUM_UDP 0x00040000
+#define DMA_RX_BURST_SIZE 0x3F000000
+
+#define DMA_BURST_SHIFT 24
+#define DMA_BURST_DEFAULT 8
+
+#define KS_DMA_TX_START 0x0008
+#define KS_DMA_RX_START 0x000C
+#define DMA_START 0x00000001
+
+#define KS_DMA_TX_ADDR 0x0010
+#define KS_DMA_RX_ADDR 0x0014
+
+#define DMA_ADDR_LIST_MASK 0xFFFFFFFC
+#define DMA_ADDR_LIST_SHIFT 2
+
+/* MTR0 */
+#define KS884X_MULTICAST_0_OFFSET 0x0020
+#define KS884X_MULTICAST_1_OFFSET 0x0021
+#define KS884X_MULTICAST_2_OFFSET 0x0022
+#define KS884x_MULTICAST_3_OFFSET 0x0023
+/* MTR1 */
+#define KS884X_MULTICAST_4_OFFSET 0x0024
+#define KS884X_MULTICAST_5_OFFSET 0x0025
+#define KS884X_MULTICAST_6_OFFSET 0x0026
+#define KS884X_MULTICAST_7_OFFSET 0x0027
+
+/* Interrupt Registers */
+
+/* INTEN */
+#define KS884X_INTERRUPTS_ENABLE 0x0028
+/* INTST */
+#define KS884X_INTERRUPTS_STATUS 0x002C
+
+#define KS884X_INT_RX_STOPPED 0x02000000
+#define KS884X_INT_TX_STOPPED 0x04000000
+#define KS884X_INT_RX_OVERRUN 0x08000000
+#define KS884X_INT_TX_EMPTY 0x10000000
+#define KS884X_INT_RX 0x20000000
+#define KS884X_INT_TX 0x40000000
+#define KS884X_INT_PHY 0x80000000
+
+#define KS884X_INT_RX_MASK \
+ (KS884X_INT_RX | KS884X_INT_RX_OVERRUN)
+#define KS884X_INT_TX_MASK \
+ (KS884X_INT_TX | KS884X_INT_TX_EMPTY)
+#define KS884X_INT_MASK (KS884X_INT_RX | KS884X_INT_TX | KS884X_INT_PHY)
+
+/* MAC Additional Station Address */
+
+/* MAAL0 */
+#define KS_ADD_ADDR_0_LO 0x0080
+/* MAAH0 */
+#define KS_ADD_ADDR_0_HI 0x0084
+/* MAAL1 */
+#define KS_ADD_ADDR_1_LO 0x0088
+/* MAAH1 */
+#define KS_ADD_ADDR_1_HI 0x008C
+/* MAAL2 */
+#define KS_ADD_ADDR_2_LO 0x0090
+/* MAAH2 */
+#define KS_ADD_ADDR_2_HI 0x0094
+/* MAAL3 */
+#define KS_ADD_ADDR_3_LO 0x0098
+/* MAAH3 */
+#define KS_ADD_ADDR_3_HI 0x009C
+/* MAAL4 */
+#define KS_ADD_ADDR_4_LO 0x00A0
+/* MAAH4 */
+#define KS_ADD_ADDR_4_HI 0x00A4
+/* MAAL5 */
+#define KS_ADD_ADDR_5_LO 0x00A8
+/* MAAH5 */
+#define KS_ADD_ADDR_5_HI 0x00AC
+/* MAAL6 */
+#define KS_ADD_ADDR_6_LO 0x00B0
+/* MAAH6 */
+#define KS_ADD_ADDR_6_HI 0x00B4
+/* MAAL7 */
+#define KS_ADD_ADDR_7_LO 0x00B8
+/* MAAH7 */
+#define KS_ADD_ADDR_7_HI 0x00BC
+/* MAAL8 */
+#define KS_ADD_ADDR_8_LO 0x00C0
+/* MAAH8 */
+#define KS_ADD_ADDR_8_HI 0x00C4
+/* MAAL9 */
+#define KS_ADD_ADDR_9_LO 0x00C8
+/* MAAH9 */
+#define KS_ADD_ADDR_9_HI 0x00CC
+/* MAAL10 */
+#define KS_ADD_ADDR_A_LO 0x00D0
+/* MAAH10 */
+#define KS_ADD_ADDR_A_HI 0x00D4
+/* MAAL11 */
+#define KS_ADD_ADDR_B_LO 0x00D8
+/* MAAH11 */
+#define KS_ADD_ADDR_B_HI 0x00DC
+/* MAAL12 */
+#define KS_ADD_ADDR_C_LO 0x00E0
+/* MAAH12 */
+#define KS_ADD_ADDR_C_HI 0x00E4
+/* MAAL13 */
+#define KS_ADD_ADDR_D_LO 0x00E8
+/* MAAH13 */
+#define KS_ADD_ADDR_D_HI 0x00EC
+/* MAAL14 */
+#define KS_ADD_ADDR_E_LO 0x00F0
+/* MAAH14 */
+#define KS_ADD_ADDR_E_HI 0x00F4
+/* MAAL15 */
+#define KS_ADD_ADDR_F_LO 0x00F8
+/* MAAH15 */
+#define KS_ADD_ADDR_F_HI 0x00FC
+
+#define ADD_ADDR_HI_MASK 0x0000FFFF
+#define ADD_ADDR_ENABLE 0x80000000
+#define ADD_ADDR_INCR 8
+
+/* Miscellaneous Registers */
+
+/* MARL */
+#define KS884X_ADDR_0_OFFSET 0x0200
+#define KS884X_ADDR_1_OFFSET 0x0201
+/* MARM */
+#define KS884X_ADDR_2_OFFSET 0x0202
+#define KS884X_ADDR_3_OFFSET 0x0203
+/* MARH */
+#define KS884X_ADDR_4_OFFSET 0x0204
+#define KS884X_ADDR_5_OFFSET 0x0205
+
+/* OBCR */
+#define KS884X_BUS_CTRL_OFFSET 0x0210
+
+#define BUS_SPEED_125_MHZ 0x0000
+#define BUS_SPEED_62_5_MHZ 0x0001
+#define BUS_SPEED_41_66_MHZ 0x0002
+#define BUS_SPEED_25_MHZ 0x0003
+
+/* EEPCR */
+#define KS884X_EEPROM_CTRL_OFFSET 0x0212
+
+#define EEPROM_CHIP_SELECT 0x0001
+#define EEPROM_SERIAL_CLOCK 0x0002
+#define EEPROM_DATA_OUT 0x0004
+#define EEPROM_DATA_IN 0x0008
+#define EEPROM_ACCESS_ENABLE 0x0010
+
+/* MBIR */
+#define KS884X_MEM_INFO_OFFSET 0x0214
+
+#define RX_MEM_TEST_FAILED 0x0008
+#define RX_MEM_TEST_FINISHED 0x0010
+#define TX_MEM_TEST_FAILED 0x0800
+#define TX_MEM_TEST_FINISHED 0x1000
+
+/* GCR */
+#define KS884X_GLOBAL_CTRL_OFFSET 0x0216
+#define GLOBAL_SOFTWARE_RESET 0x0001
+
+#define KS8841_POWER_MANAGE_OFFSET 0x0218
+
+/* WFCR */
+#define KS8841_WOL_CTRL_OFFSET 0x021A
+#define KS8841_WOL_MAGIC_ENABLE 0x0080
+#define KS8841_WOL_FRAME3_ENABLE 0x0008
+#define KS8841_WOL_FRAME2_ENABLE 0x0004
+#define KS8841_WOL_FRAME1_ENABLE 0x0002
+#define KS8841_WOL_FRAME0_ENABLE 0x0001
+
+/* WF0 */
+#define KS8841_WOL_FRAME_CRC_OFFSET 0x0220
+#define KS8841_WOL_FRAME_BYTE0_OFFSET 0x0224
+#define KS8841_WOL_FRAME_BYTE2_OFFSET 0x0228
+
+/* IACR */
+#define KS884X_IACR_P 0x04A0
+#define KS884X_IACR_OFFSET KS884X_IACR_P
+
+/* IADR1 */
+#define KS884X_IADR1_P 0x04A2
+#define KS884X_IADR2_P 0x04A4
+#define KS884X_IADR3_P 0x04A6
+#define KS884X_IADR4_P 0x04A8
+#define KS884X_IADR5_P 0x04AA
+
+#define KS884X_ACC_CTRL_SEL_OFFSET KS884X_IACR_P
+#define KS884X_ACC_CTRL_INDEX_OFFSET (KS884X_ACC_CTRL_SEL_OFFSET + 1)
+
+#define KS884X_ACC_DATA_0_OFFSET KS884X_IADR4_P
+#define KS884X_ACC_DATA_1_OFFSET (KS884X_ACC_DATA_0_OFFSET + 1)
+#define KS884X_ACC_DATA_2_OFFSET KS884X_IADR5_P
+#define KS884X_ACC_DATA_3_OFFSET (KS884X_ACC_DATA_2_OFFSET + 1)
+#define KS884X_ACC_DATA_4_OFFSET KS884X_IADR2_P
+#define KS884X_ACC_DATA_5_OFFSET (KS884X_ACC_DATA_4_OFFSET + 1)
+#define KS884X_ACC_DATA_6_OFFSET KS884X_IADR3_P
+#define KS884X_ACC_DATA_7_OFFSET (KS884X_ACC_DATA_6_OFFSET + 1)
+#define KS884X_ACC_DATA_8_OFFSET KS884X_IADR1_P
+
+/* P1MBCR */
+#define KS884X_P1MBCR_P 0x04D0
+#define KS884X_P1MBSR_P 0x04D2
+#define KS884X_PHY1ILR_P 0x04D4
+#define KS884X_PHY1IHR_P 0x04D6
+#define KS884X_P1ANAR_P 0x04D8
+#define KS884X_P1ANLPR_P 0x04DA
+
+/* P2MBCR */
+#define KS884X_P2MBCR_P 0x04E0
+#define KS884X_P2MBSR_P 0x04E2
+#define KS884X_PHY2ILR_P 0x04E4
+#define KS884X_PHY2IHR_P 0x04E6
+#define KS884X_P2ANAR_P 0x04E8
+#define KS884X_P2ANLPR_P 0x04EA
+
+#define KS884X_PHY_1_CTRL_OFFSET KS884X_P1MBCR_P
+#define PHY_CTRL_INTERVAL (KS884X_P2MBCR_P - KS884X_P1MBCR_P)
+
+#define KS884X_PHY_CTRL_OFFSET 0x00
+
+/* Mode Control Register */
+#define PHY_REG_CTRL 0
+
+#define PHY_RESET 0x8000
+#define PHY_LOOPBACK 0x4000
+#define PHY_SPEED_100MBIT 0x2000
+#define PHY_AUTO_NEG_ENABLE 0x1000
+#define PHY_POWER_DOWN 0x0800
+#define PHY_MII_DISABLE 0x0400
+#define PHY_AUTO_NEG_RESTART 0x0200
+#define PHY_FULL_DUPLEX 0x0100
+#define PHY_COLLISION_TEST 0x0080
+#define PHY_HP_MDIX 0x0020
+#define PHY_FORCE_MDIX 0x0010
+#define PHY_AUTO_MDIX_DISABLE 0x0008
+#define PHY_REMOTE_FAULT_DISABLE 0x0004
+#define PHY_TRANSMIT_DISABLE 0x0002
+#define PHY_LED_DISABLE 0x0001
+
+#define KS884X_PHY_STATUS_OFFSET 0x02
+
+/* Mode Status Register */
+#define PHY_REG_STATUS 1
+
+#define PHY_100BT4_CAPABLE 0x8000
+#define PHY_100BTX_FD_CAPABLE 0x4000
+#define PHY_100BTX_CAPABLE 0x2000
+#define PHY_10BT_FD_CAPABLE 0x1000
+#define PHY_10BT_CAPABLE 0x0800
+#define PHY_MII_SUPPRESS_CAPABLE 0x0040
+#define PHY_AUTO_NEG_ACKNOWLEDGE 0x0020
+#define PHY_REMOTE_FAULT 0x0010
+#define PHY_AUTO_NEG_CAPABLE 0x0008
+#define PHY_LINK_STATUS 0x0004
+#define PHY_JABBER_DETECT 0x0002
+#define PHY_EXTENDED_CAPABILITY 0x0001
+
+#define KS884X_PHY_ID_1_OFFSET 0x04
+#define KS884X_PHY_ID_2_OFFSET 0x06
+
+/* PHY Identifier Registers */
+#define PHY_REG_ID_1 2
+#define PHY_REG_ID_2 3
+
+#define KS884X_PHY_AUTO_NEG_OFFSET 0x08
+
+/* Auto-Negotiation Advertisement Register */
+#define PHY_REG_AUTO_NEGOTIATION 4
+
+#define PHY_AUTO_NEG_NEXT_PAGE 0x8000
+#define PHY_AUTO_NEG_REMOTE_FAULT 0x2000
+/* Not supported. */
+#define PHY_AUTO_NEG_ASYM_PAUSE 0x0800
+#define PHY_AUTO_NEG_SYM_PAUSE 0x0400
+#define PHY_AUTO_NEG_100BT4 0x0200
+#define PHY_AUTO_NEG_100BTX_FD 0x0100
+#define PHY_AUTO_NEG_100BTX 0x0080
+#define PHY_AUTO_NEG_10BT_FD 0x0040
+#define PHY_AUTO_NEG_10BT 0x0020
+#define PHY_AUTO_NEG_SELECTOR 0x001F
+#define PHY_AUTO_NEG_802_3 0x0001
+
+#define PHY_AUTO_NEG_PAUSE (PHY_AUTO_NEG_SYM_PAUSE | PHY_AUTO_NEG_ASYM_PAUSE)
+
+#define KS884X_PHY_REMOTE_CAP_OFFSET 0x0A
+
+/* Auto-Negotiation Link Partner Ability Register */
+#define PHY_REG_REMOTE_CAPABILITY 5
+
+#define PHY_REMOTE_NEXT_PAGE 0x8000
+#define PHY_REMOTE_ACKNOWLEDGE 0x4000
+#define PHY_REMOTE_REMOTE_FAULT 0x2000
+#define PHY_REMOTE_SYM_PAUSE 0x0400
+#define PHY_REMOTE_100BTX_FD 0x0100
+#define PHY_REMOTE_100BTX 0x0080
+#define PHY_REMOTE_10BT_FD 0x0040
+#define PHY_REMOTE_10BT 0x0020
+
+/* P1VCT */
+#define KS884X_P1VCT_P 0x04F0
+#define KS884X_P1PHYCTRL_P 0x04F2
+
+/* P2VCT */
+#define KS884X_P2VCT_P 0x04F4
+#define KS884X_P2PHYCTRL_P 0x04F6
+
+#define KS884X_PHY_SPECIAL_OFFSET KS884X_P1VCT_P
+#define PHY_SPECIAL_INTERVAL (KS884X_P2VCT_P - KS884X_P1VCT_P)
+
+#define KS884X_PHY_LINK_MD_OFFSET 0x00
+
+#define PHY_START_CABLE_DIAG 0x8000
+#define PHY_CABLE_DIAG_RESULT 0x6000
+#define PHY_CABLE_STAT_NORMAL 0x0000
+#define PHY_CABLE_STAT_OPEN 0x2000
+#define PHY_CABLE_STAT_SHORT 0x4000
+#define PHY_CABLE_STAT_FAILED 0x6000
+#define PHY_CABLE_10M_SHORT 0x1000
+#define PHY_CABLE_FAULT_COUNTER 0x01FF
+
+#define KS884X_PHY_PHY_CTRL_OFFSET 0x02
+
+#define PHY_STAT_REVERSED_POLARITY 0x0020
+#define PHY_STAT_MDIX 0x0010
+#define PHY_FORCE_LINK 0x0008
+#define PHY_POWER_SAVING_DISABLE 0x0004
+#define PHY_REMOTE_LOOPBACK 0x0002
+
+/* SIDER */
+#define KS884X_SIDER_P 0x0400
+#define KS884X_CHIP_ID_OFFSET KS884X_SIDER_P
+#define KS884X_FAMILY_ID_OFFSET (KS884X_CHIP_ID_OFFSET + 1)
+
+#define REG_FAMILY_ID 0x88
+
+#define REG_CHIP_ID_41 0x8810
+#define REG_CHIP_ID_42 0x8800
+
+#define KS884X_CHIP_ID_MASK_41 0xFF10
+#define KS884X_CHIP_ID_MASK 0xFFF0
+#define KS884X_CHIP_ID_SHIFT 4
+#define KS884X_REVISION_MASK 0x000E
+#define KS884X_REVISION_SHIFT 1
+#define KS8842_START 0x0001
+
+#define CHIP_IP_41_M 0x8810
+#define CHIP_IP_42_M 0x8800
+#define CHIP_IP_61_M 0x8890
+#define CHIP_IP_62_M 0x8880
+
+#define CHIP_IP_41_P 0x8850
+#define CHIP_IP_42_P 0x8840
+#define CHIP_IP_61_P 0x88D0
+#define CHIP_IP_62_P 0x88C0
+
+/* SGCR1 */
+#define KS8842_SGCR1_P 0x0402
+#define KS8842_SWITCH_CTRL_1_OFFSET KS8842_SGCR1_P
+
+#define SWITCH_PASS_ALL 0x8000
+#define SWITCH_TX_FLOW_CTRL 0x2000
+#define SWITCH_RX_FLOW_CTRL 0x1000
+#define SWITCH_CHECK_LENGTH 0x0800
+#define SWITCH_AGING_ENABLE 0x0400
+#define SWITCH_FAST_AGING 0x0200
+#define SWITCH_AGGR_BACKOFF 0x0100
+#define SWITCH_PASS_PAUSE 0x0008
+#define SWITCH_LINK_AUTO_AGING 0x0001
+
+/* SGCR2 */
+#define KS8842_SGCR2_P 0x0404
+#define KS8842_SWITCH_CTRL_2_OFFSET KS8842_SGCR2_P
+
+#define SWITCH_VLAN_ENABLE 0x8000
+#define SWITCH_IGMP_SNOOP 0x4000
+#define IPV6_MLD_SNOOP_ENABLE 0x2000
+#define IPV6_MLD_SNOOP_OPTION 0x1000
+#define PRIORITY_SCHEME_SELECT 0x0800
+#define SWITCH_MIRROR_RX_TX 0x0100
+#define UNICAST_VLAN_BOUNDARY 0x0080
+#define MULTICAST_STORM_DISABLE 0x0040
+#define SWITCH_BACK_PRESSURE 0x0020
+#define FAIR_FLOW_CTRL 0x0010
+#define NO_EXC_COLLISION_DROP 0x0008
+#define SWITCH_HUGE_PACKET 0x0004
+#define SWITCH_LEGAL_PACKET 0x0002
+#define SWITCH_BUF_RESERVE 0x0001
+
+/* SGCR3 */
+#define KS8842_SGCR3_P 0x0406
+#define KS8842_SWITCH_CTRL_3_OFFSET KS8842_SGCR3_P
+
+#define BROADCAST_STORM_RATE_LO 0xFF00
+#define SWITCH_REPEATER 0x0080
+#define SWITCH_HALF_DUPLEX 0x0040
+#define SWITCH_FLOW_CTRL 0x0020
+#define SWITCH_10_MBIT 0x0010
+#define SWITCH_REPLACE_NULL_VID 0x0008
+#define BROADCAST_STORM_RATE_HI 0x0007
+
+#define BROADCAST_STORM_RATE 0x07FF
+
+/* SGCR4 */
+#define KS8842_SGCR4_P 0x0408
+
+/* SGCR5 */
+#define KS8842_SGCR5_P 0x040A
+#define KS8842_SWITCH_CTRL_5_OFFSET KS8842_SGCR5_P
+
+#define LED_MODE 0x8200
+#define LED_SPEED_DUPLEX_ACT 0x0000
+#define LED_SPEED_DUPLEX_LINK_ACT 0x8000
+#define LED_DUPLEX_10_100 0x0200
+
+/* SGCR6 */
+#define KS8842_SGCR6_P 0x0410
+#define KS8842_SWITCH_CTRL_6_OFFSET KS8842_SGCR6_P
+
+#define KS8842_PRIORITY_MASK 3
+#define KS8842_PRIORITY_SHIFT 2
+
+/* SGCR7 */
+#define KS8842_SGCR7_P 0x0412
+#define KS8842_SWITCH_CTRL_7_OFFSET KS8842_SGCR7_P
+
+#define SWITCH_UNK_DEF_PORT_ENABLE 0x0008
+#define SWITCH_UNK_DEF_PORT_3 0x0004
+#define SWITCH_UNK_DEF_PORT_2 0x0002
+#define SWITCH_UNK_DEF_PORT_1 0x0001
+
+/* MACAR1 */
+#define KS8842_MACAR1_P 0x0470
+#define KS8842_MACAR2_P 0x0472
+#define KS8842_MACAR3_P 0x0474
+#define KS8842_MAC_ADDR_1_OFFSET KS8842_MACAR1_P
+#define KS8842_MAC_ADDR_0_OFFSET (KS8842_MAC_ADDR_1_OFFSET + 1)
+#define KS8842_MAC_ADDR_3_OFFSET KS8842_MACAR2_P
+#define KS8842_MAC_ADDR_2_OFFSET (KS8842_MAC_ADDR_3_OFFSET + 1)
+#define KS8842_MAC_ADDR_5_OFFSET KS8842_MACAR3_P
+#define KS8842_MAC_ADDR_4_OFFSET (KS8842_MAC_ADDR_5_OFFSET + 1)
+
+/* TOSR1 */
+#define KS8842_TOSR1_P 0x0480
+#define KS8842_TOSR2_P 0x0482
+#define KS8842_TOSR3_P 0x0484
+#define KS8842_TOSR4_P 0x0486
+#define KS8842_TOSR5_P 0x0488
+#define KS8842_TOSR6_P 0x048A
+#define KS8842_TOSR7_P 0x0490
+#define KS8842_TOSR8_P 0x0492
+#define KS8842_TOS_1_OFFSET KS8842_TOSR1_P
+#define KS8842_TOS_2_OFFSET KS8842_TOSR2_P
+#define KS8842_TOS_3_OFFSET KS8842_TOSR3_P
+#define KS8842_TOS_4_OFFSET KS8842_TOSR4_P
+#define KS8842_TOS_5_OFFSET KS8842_TOSR5_P
+#define KS8842_TOS_6_OFFSET KS8842_TOSR6_P
+
+#define KS8842_TOS_7_OFFSET KS8842_TOSR7_P
+#define KS8842_TOS_8_OFFSET KS8842_TOSR8_P
+
+/* P1CR1 */
+#define KS8842_P1CR1_P 0x0500
+#define KS8842_P1CR2_P 0x0502
+#define KS8842_P1VIDR_P 0x0504
+#define KS8842_P1CR3_P 0x0506
+#define KS8842_P1IRCR_P 0x0508
+#define KS8842_P1ERCR_P 0x050A
+#define KS884X_P1SCSLMD_P 0x0510
+#define KS884X_P1CR4_P 0x0512
+#define KS884X_P1SR_P 0x0514
+
+/* P2CR1 */
+#define KS8842_P2CR1_P 0x0520
+#define KS8842_P2CR2_P 0x0522
+#define KS8842_P2VIDR_P 0x0524
+#define KS8842_P2CR3_P 0x0526
+#define KS8842_P2IRCR_P 0x0528
+#define KS8842_P2ERCR_P 0x052A
+#define KS884X_P2SCSLMD_P 0x0530
+#define KS884X_P2CR4_P 0x0532
+#define KS884X_P2SR_P 0x0534
+
+/* P3CR1 */
+#define KS8842_P3CR1_P 0x0540
+#define KS8842_P3CR2_P 0x0542
+#define KS8842_P3VIDR_P 0x0544
+#define KS8842_P3CR3_P 0x0546
+#define KS8842_P3IRCR_P 0x0548
+#define KS8842_P3ERCR_P 0x054A
+
+#define KS8842_PORT_1_CTRL_1 KS8842_P1CR1_P
+#define KS8842_PORT_2_CTRL_1 KS8842_P2CR1_P
+#define KS8842_PORT_3_CTRL_1 KS8842_P3CR1_P
+
+#define PORT_CTRL_ADDR(port, addr) \
+ (addr = KS8842_PORT_1_CTRL_1 + (port) * \
+ (KS8842_PORT_2_CTRL_1 - KS8842_PORT_1_CTRL_1))
+
+#define KS8842_PORT_CTRL_1_OFFSET 0x00
+
+#define PORT_BROADCAST_STORM 0x0080
+#define PORT_DIFFSERV_ENABLE 0x0040
+#define PORT_802_1P_ENABLE 0x0020
+#define PORT_BASED_PRIORITY_MASK 0x0018
+#define PORT_BASED_PRIORITY_BASE 0x0003
+#define PORT_BASED_PRIORITY_SHIFT 3
+#define PORT_BASED_PRIORITY_0 0x0000
+#define PORT_BASED_PRIORITY_1 0x0008
+#define PORT_BASED_PRIORITY_2 0x0010
+#define PORT_BASED_PRIORITY_3 0x0018
+#define PORT_INSERT_TAG 0x0004
+#define PORT_REMOVE_TAG 0x0002
+#define PORT_PRIO_QUEUE_ENABLE 0x0001
+
+#define KS8842_PORT_CTRL_2_OFFSET 0x02
+
+#define PORT_INGRESS_VLAN_FILTER 0x4000
+#define PORT_DISCARD_NON_VID 0x2000
+#define PORT_FORCE_FLOW_CTRL 0x1000
+#define PORT_BACK_PRESSURE 0x0800
+#define PORT_TX_ENABLE 0x0400
+#define PORT_RX_ENABLE 0x0200
+#define PORT_LEARN_DISABLE 0x0100
+#define PORT_MIRROR_SNIFFER 0x0080
+#define PORT_MIRROR_RX 0x0040
+#define PORT_MIRROR_TX 0x0020
+#define PORT_USER_PRIORITY_CEILING 0x0008
+#define PORT_VLAN_MEMBERSHIP 0x0007
+
+#define KS8842_PORT_CTRL_VID_OFFSET 0x04
+
+#define PORT_DEFAULT_VID 0x0001
+
+#define KS8842_PORT_CTRL_3_OFFSET 0x06
+
+#define PORT_INGRESS_LIMIT_MODE 0x000C
+#define PORT_INGRESS_ALL 0x0000
+#define PORT_INGRESS_UNICAST 0x0004
+#define PORT_INGRESS_MULTICAST 0x0008
+#define PORT_INGRESS_BROADCAST 0x000C
+#define PORT_COUNT_IFG 0x0002
+#define PORT_COUNT_PREAMBLE 0x0001
+
+#define KS8842_PORT_IN_RATE_OFFSET 0x08
+#define KS8842_PORT_OUT_RATE_OFFSET 0x0A
+
+#define PORT_PRIORITY_RATE 0x0F
+#define PORT_PRIORITY_RATE_SHIFT 4
+
+#define KS884X_PORT_LINK_MD 0x10
+
+#define PORT_CABLE_10M_SHORT 0x8000
+#define PORT_CABLE_DIAG_RESULT 0x6000
+#define PORT_CABLE_STAT_NORMAL 0x0000
+#define PORT_CABLE_STAT_OPEN 0x2000
+#define PORT_CABLE_STAT_SHORT 0x4000
+#define PORT_CABLE_STAT_FAILED 0x6000
+#define PORT_START_CABLE_DIAG 0x1000
+#define PORT_FORCE_LINK 0x0800
+#define PORT_POWER_SAVING_DISABLE 0x0400
+#define PORT_PHY_REMOTE_LOOPBACK 0x0200
+#define PORT_CABLE_FAULT_COUNTER 0x01FF
+
+#define KS884X_PORT_CTRL_4_OFFSET 0x12
+
+#define PORT_LED_OFF 0x8000
+#define PORT_TX_DISABLE 0x4000
+#define PORT_AUTO_NEG_RESTART 0x2000
+#define PORT_REMOTE_FAULT_DISABLE 0x1000
+#define PORT_POWER_DOWN 0x0800
+#define PORT_AUTO_MDIX_DISABLE 0x0400
+#define PORT_FORCE_MDIX 0x0200
+#define PORT_LOOPBACK 0x0100
+#define PORT_AUTO_NEG_ENABLE 0x0080
+#define PORT_FORCE_100_MBIT 0x0040
+#define PORT_FORCE_FULL_DUPLEX 0x0020
+#define PORT_AUTO_NEG_SYM_PAUSE 0x0010
+#define PORT_AUTO_NEG_100BTX_FD 0x0008
+#define PORT_AUTO_NEG_100BTX 0x0004
+#define PORT_AUTO_NEG_10BT_FD 0x0002
+#define PORT_AUTO_NEG_10BT 0x0001
+
+#define KS884X_PORT_STATUS_OFFSET 0x14
+
+#define PORT_HP_MDIX 0x8000
+#define PORT_REVERSED_POLARITY 0x2000
+#define PORT_RX_FLOW_CTRL 0x0800
+#define PORT_TX_FLOW_CTRL 0x1000
+#define PORT_STATUS_SPEED_100MBIT 0x0400
+#define PORT_STATUS_FULL_DUPLEX 0x0200
+#define PORT_REMOTE_FAULT 0x0100
+#define PORT_MDIX_STATUS 0x0080
+#define PORT_AUTO_NEG_COMPLETE 0x0040
+#define PORT_STATUS_LINK_GOOD 0x0020
+#define PORT_REMOTE_SYM_PAUSE 0x0010
+#define PORT_REMOTE_100BTX_FD 0x0008
+#define PORT_REMOTE_100BTX 0x0004
+#define PORT_REMOTE_10BT_FD 0x0002
+#define PORT_REMOTE_10BT 0x0001
+
+/*
+#define STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
+#define STATIC_MAC_TABLE_FWD_PORTS 00-00070000-00000000
+#define STATIC_MAC_TABLE_VALID 00-00080000-00000000
+#define STATIC_MAC_TABLE_OVERRIDE 00-00100000-00000000
+#define STATIC_MAC_TABLE_USE_FID 00-00200000-00000000
+#define STATIC_MAC_TABLE_FID 00-03C00000-00000000
+*/
+
+#define STATIC_MAC_TABLE_ADDR 0x0000FFFF
+#define STATIC_MAC_TABLE_FWD_PORTS 0x00070000
+#define STATIC_MAC_TABLE_VALID 0x00080000
+#define STATIC_MAC_TABLE_OVERRIDE 0x00100000
+#define STATIC_MAC_TABLE_USE_FID 0x00200000
+#define STATIC_MAC_TABLE_FID 0x03C00000
+
+#define STATIC_MAC_FWD_PORTS_SHIFT 16
+#define STATIC_MAC_FID_SHIFT 22
+
+/*
+#define VLAN_TABLE_VID 00-00000000-00000FFF
+#define VLAN_TABLE_FID 00-00000000-0000F000
+#define VLAN_TABLE_MEMBERSHIP 00-00000000-00070000
+#define VLAN_TABLE_VALID 00-00000000-00080000
+*/
+
+#define VLAN_TABLE_VID 0x00000FFF
+#define VLAN_TABLE_FID 0x0000F000
+#define VLAN_TABLE_MEMBERSHIP 0x00070000
+#define VLAN_TABLE_VALID 0x00080000
+
+#define VLAN_TABLE_FID_SHIFT 12
+#define VLAN_TABLE_MEMBERSHIP_SHIFT 16
+
+/*
+#define DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
+#define DYNAMIC_MAC_TABLE_FID 00-000F0000-00000000
+#define DYNAMIC_MAC_TABLE_SRC_PORT 00-00300000-00000000
+#define DYNAMIC_MAC_TABLE_TIMESTAMP 00-00C00000-00000000
+#define DYNAMIC_MAC_TABLE_ENTRIES 03-FF000000-00000000
+#define DYNAMIC_MAC_TABLE_MAC_EMPTY 04-00000000-00000000
+#define DYNAMIC_MAC_TABLE_RESERVED 78-00000000-00000000
+#define DYNAMIC_MAC_TABLE_NOT_READY 80-00000000-00000000
+*/
+
+#define DYNAMIC_MAC_TABLE_ADDR 0x0000FFFF
+#define DYNAMIC_MAC_TABLE_FID 0x000F0000
+#define DYNAMIC_MAC_TABLE_SRC_PORT 0x00300000
+#define DYNAMIC_MAC_TABLE_TIMESTAMP 0x00C00000
+#define DYNAMIC_MAC_TABLE_ENTRIES 0xFF000000
+
+#define DYNAMIC_MAC_TABLE_ENTRIES_H 0x03
+#define DYNAMIC_MAC_TABLE_MAC_EMPTY 0x04
+#define DYNAMIC_MAC_TABLE_RESERVED 0x78
+#define DYNAMIC_MAC_TABLE_NOT_READY 0x80
+
+#define DYNAMIC_MAC_FID_SHIFT 16
+#define DYNAMIC_MAC_SRC_PORT_SHIFT 20
+#define DYNAMIC_MAC_TIMESTAMP_SHIFT 22
+#define DYNAMIC_MAC_ENTRIES_SHIFT 24
+#define DYNAMIC_MAC_ENTRIES_H_SHIFT 8
+
+/*
+#define MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
+#define MIB_COUNTER_VALID 00-00000000-40000000
+#define MIB_COUNTER_OVERFLOW 00-00000000-80000000
+*/
+
+#define MIB_COUNTER_VALUE 0x3FFFFFFF
+#define MIB_COUNTER_VALID 0x40000000
+#define MIB_COUNTER_OVERFLOW 0x80000000
+
+#define MIB_PACKET_DROPPED 0x0000FFFF
+
+#define KS_MIB_PACKET_DROPPED_TX_0 0x100
+#define KS_MIB_PACKET_DROPPED_TX_1 0x101
+#define KS_MIB_PACKET_DROPPED_TX 0x102
+#define KS_MIB_PACKET_DROPPED_RX_0 0x103
+#define KS_MIB_PACKET_DROPPED_RX_1 0x104
+#define KS_MIB_PACKET_DROPPED_RX 0x105
+
+/* Change default LED mode. */
+#define SET_DEFAULT_LED LED_SPEED_DUPLEX_ACT
+
+#define MAC_ADDR_LEN 6
+#define MAC_ADDR_ORDER(i) (MAC_ADDR_LEN - 1 - (i))
+
+#define MAX_ETHERNET_BODY_SIZE 1500
+#define ETHERNET_HEADER_SIZE 14
+
+#define MAX_ETHERNET_PACKET_SIZE \
+ (MAX_ETHERNET_BODY_SIZE + ETHERNET_HEADER_SIZE)
+
+#define REGULAR_RX_BUF_SIZE (MAX_ETHERNET_PACKET_SIZE + 4)
+#define MAX_RX_BUF_SIZE (1912 + 4)
+
+#define ADDITIONAL_ENTRIES 16
+#define MAX_MULTICAST_LIST 32
+
+#define HW_MULTICAST_SIZE 8
+
+#define HW_TO_DEV_PORT(port) (port - 1)
+
+enum {
+ media_connected,
+ media_disconnected
+};
+
+enum {
+ OID_COUNTER_UNKOWN,
+
+ OID_COUNTER_FIRST,
+
+ /* total transmit errors */
+ OID_COUNTER_XMIT_ERROR,
+
+ /* total receive errors */
+ OID_COUNTER_RCV_ERROR,
+
+ OID_COUNTER_LAST
+};
+
+/*
+ * Hardware descriptor definitions
+ */
+
+#define DESC_ALIGNMENT 16
+#define BUFFER_ALIGNMENT 8
+
+#define NUM_OF_RX_DESC 64
+#define NUM_OF_TX_DESC 64
+
+#define KS_DESC_RX_FRAME_LEN 0x000007FF
+#define KS_DESC_RX_FRAME_TYPE 0x00008000
+#define KS_DESC_RX_ERROR_CRC 0x00010000
+#define KS_DESC_RX_ERROR_RUNT 0x00020000
+#define KS_DESC_RX_ERROR_TOO_LONG 0x00040000
+#define KS_DESC_RX_ERROR_PHY 0x00080000
+#define KS884X_DESC_RX_PORT_MASK 0x00300000
+#define KS_DESC_RX_MULTICAST 0x01000000
+#define KS_DESC_RX_ERROR 0x02000000
+#define KS_DESC_RX_ERROR_CSUM_UDP 0x04000000
+#define KS_DESC_RX_ERROR_CSUM_TCP 0x08000000
+#define KS_DESC_RX_ERROR_CSUM_IP 0x10000000
+#define KS_DESC_RX_LAST 0x20000000
+#define KS_DESC_RX_FIRST 0x40000000
+#define KS_DESC_RX_ERROR_COND \
+ (KS_DESC_RX_ERROR_CRC | \
+ KS_DESC_RX_ERROR_RUNT | \
+ KS_DESC_RX_ERROR_PHY | \
+ KS_DESC_RX_ERROR_TOO_LONG)
+
+#define KS_DESC_HW_OWNED 0x80000000
+
+#define KS_DESC_BUF_SIZE 0x000007FF
+#define KS884X_DESC_TX_PORT_MASK 0x00300000
+#define KS_DESC_END_OF_RING 0x02000000
+#define KS_DESC_TX_CSUM_GEN_UDP 0x04000000
+#define KS_DESC_TX_CSUM_GEN_TCP 0x08000000
+#define KS_DESC_TX_CSUM_GEN_IP 0x10000000
+#define KS_DESC_TX_LAST 0x20000000
+#define KS_DESC_TX_FIRST 0x40000000
+#define KS_DESC_TX_INTERRUPT 0x80000000
+
+#define KS_DESC_PORT_SHIFT 20
+
+#define KS_DESC_RX_MASK (KS_DESC_BUF_SIZE)
+
+#define KS_DESC_TX_MASK \
+ (KS_DESC_TX_INTERRUPT | \
+ KS_DESC_TX_FIRST | \
+ KS_DESC_TX_LAST | \
+ KS_DESC_TX_CSUM_GEN_IP | \
+ KS_DESC_TX_CSUM_GEN_TCP | \
+ KS_DESC_TX_CSUM_GEN_UDP | \
+ KS_DESC_BUF_SIZE)
+
+struct ksz_desc_rx_stat {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 hw_owned:1;
+ u32 first_desc:1;
+ u32 last_desc:1;
+ u32 csum_err_ip:1;
+ u32 csum_err_tcp:1;
+ u32 csum_err_udp:1;
+ u32 error:1;
+ u32 multicast:1;
+ u32 src_port:4;
+ u32 err_phy:1;
+ u32 err_too_long:1;
+ u32 err_runt:1;
+ u32 err_crc:1;
+ u32 frame_type:1;
+ u32 reserved1:4;
+ u32 frame_len:11;
+#else
+ u32 frame_len:11;
+ u32 reserved1:4;
+ u32 frame_type:1;
+ u32 err_crc:1;
+ u32 err_runt:1;
+ u32 err_too_long:1;
+ u32 err_phy:1;
+ u32 src_port:4;
+ u32 multicast:1;
+ u32 error:1;
+ u32 csum_err_udp:1;
+ u32 csum_err_tcp:1;
+ u32 csum_err_ip:1;
+ u32 last_desc:1;
+ u32 first_desc:1;
+ u32 hw_owned:1;
+#endif
+};
+
+struct ksz_desc_tx_stat {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 hw_owned:1;
+ u32 reserved1:31;
+#else
+ u32 reserved1:31;
+ u32 hw_owned:1;
+#endif
+};
+
+struct ksz_desc_rx_buf {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 reserved4:6;
+ u32 end_of_ring:1;
+ u32 reserved3:14;
+ u32 buf_size:11;
+#else
+ u32 buf_size:11;
+ u32 reserved3:14;
+ u32 end_of_ring:1;
+ u32 reserved4:6;
+#endif
+};
+
+struct ksz_desc_tx_buf {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 intr:1;
+ u32 first_seg:1;
+ u32 last_seg:1;
+ u32 csum_gen_ip:1;
+ u32 csum_gen_tcp:1;
+ u32 csum_gen_udp:1;
+ u32 end_of_ring:1;
+ u32 reserved4:1;
+ u32 dest_port:4;
+ u32 reserved3:9;
+ u32 buf_size:11;
+#else
+ u32 buf_size:11;
+ u32 reserved3:9;
+ u32 dest_port:4;
+ u32 reserved4:1;
+ u32 end_of_ring:1;
+ u32 csum_gen_udp:1;
+ u32 csum_gen_tcp:1;
+ u32 csum_gen_ip:1;
+ u32 last_seg:1;
+ u32 first_seg:1;
+ u32 intr:1;
+#endif
+};
+
+union desc_stat {
+ struct ksz_desc_rx_stat rx;
+ struct ksz_desc_tx_stat tx;
+ u32 data;
+};
+
+union desc_buf {
+ struct ksz_desc_rx_buf rx;
+ struct ksz_desc_tx_buf tx;
+ u32 data;
+};
+
+/**
+ * struct ksz_hw_desc - Hardware descriptor data structure
+ * @ctrl: Descriptor control value.
+ * @buf: Descriptor buffer value.
+ * @addr: Physical address of memory buffer.
+ * @next: Pointer to next hardware descriptor.
+ */
+struct ksz_hw_desc {
+ union desc_stat ctrl;
+ union desc_buf buf;
+ u32 addr;
+ u32 next;
+};
+
+/**
+ * struct ksz_sw_desc - Software descriptor data structure
+ * @ctrl: Descriptor control value.
+ * @buf: Descriptor buffer value.
+ * @buf_size: Current buffers size value in hardware descriptor.
+ */
+struct ksz_sw_desc {
+ union desc_stat ctrl;
+ union desc_buf buf;
+ u32 buf_size;
+};
+
+/**
+ * struct ksz_dma_buf - OS dependent DMA buffer data structure
+ * @skb: Associated socket buffer.
+ * @dma: Associated physical DMA address.
+ * len: Actual len used.
+ */
+struct ksz_dma_buf {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ int len;
+};
+
+/**
+ * struct ksz_desc - Descriptor structure
+ * @phw: Hardware descriptor pointer to uncached physical memory.
+ * @sw: Cached memory to hold hardware descriptor values for
+ * manipulation.
+ * @dma_buf: Operating system dependent data structure to hold physical
+ * memory buffer allocation information.
+ */
+struct ksz_desc {
+ struct ksz_hw_desc *phw;
+ struct ksz_sw_desc sw;
+ struct ksz_dma_buf dma_buf;
+};
+
+#define DMA_BUFFER(desc) ((struct ksz_dma_buf *)(&(desc)->dma_buf))
+
+/**
+ * struct ksz_desc_info - Descriptor information data structure
+ * @ring: First descriptor in the ring.
+ * @cur: Current descriptor being manipulated.
+ * @ring_virt: First hardware descriptor in the ring.
+ * @ring_phys: The physical address of the first descriptor of the ring.
+ * @size: Size of hardware descriptor.
+ * @alloc: Number of descriptors allocated.
+ * @avail: Number of descriptors available for use.
+ * @last: Index for last descriptor released to hardware.
+ * @next: Index for next descriptor available for use.
+ * @mask: Mask for index wrapping.
+ */
+struct ksz_desc_info {
+ struct ksz_desc *ring;
+ struct ksz_desc *cur;
+ struct ksz_hw_desc *ring_virt;
+ u32 ring_phys;
+ int size;
+ int alloc;
+ int avail;
+ int last;
+ int next;
+ int mask;
+};
+
+/*
+ * KSZ8842 switch definitions
+ */
+
+enum {
+ TABLE_STATIC_MAC = 0,
+ TABLE_VLAN,
+ TABLE_DYNAMIC_MAC,
+ TABLE_MIB
+};
+
+#define LEARNED_MAC_TABLE_ENTRIES 1024
+#define STATIC_MAC_TABLE_ENTRIES 8
+
+/**
+ * struct ksz_mac_table - Static MAC table data structure
+ * @mac_addr: MAC address to filter.
+ * @vid: VID value.
+ * @fid: FID value.
+ * @ports: Port membership.
+ * @override: Override setting.
+ * @use_fid: FID use setting.
+ * @valid: Valid setting indicating the entry is being used.
+ */
+struct ksz_mac_table {
+ u8 mac_addr[MAC_ADDR_LEN];
+ u16 vid;
+ u8 fid;
+ u8 ports;
+ u8 override:1;
+ u8 use_fid:1;
+ u8 valid:1;
+};
+
+#define VLAN_TABLE_ENTRIES 16
+
+/**
+ * struct ksz_vlan_table - VLAN table data structure
+ * @vid: VID value.
+ * @fid: FID value.
+ * @member: Port membership.
+ */
+struct ksz_vlan_table {
+ u16 vid;
+ u8 fid;
+ u8 member;
+};
+
+#define DIFFSERV_ENTRIES 64
+#define PRIO_802_1P_ENTRIES 8
+#define PRIO_QUEUES 4
+
+#define SWITCH_PORT_NUM 2
+#define TOTAL_PORT_NUM (SWITCH_PORT_NUM + 1)
+#define HOST_MASK (1 << SWITCH_PORT_NUM)
+#define PORT_MASK 7
+
+#define MAIN_PORT 0
+#define OTHER_PORT 1
+#define HOST_PORT SWITCH_PORT_NUM
+
+#define PORT_COUNTER_NUM 0x20
+#define TOTAL_PORT_COUNTER_NUM (PORT_COUNTER_NUM + 2)
+
+#define MIB_COUNTER_RX_LO_PRIORITY 0x00
+#define MIB_COUNTER_RX_HI_PRIORITY 0x01
+#define MIB_COUNTER_RX_UNDERSIZE 0x02
+#define MIB_COUNTER_RX_FRAGMENT 0x03
+#define MIB_COUNTER_RX_OVERSIZE 0x04
+#define MIB_COUNTER_RX_JABBER 0x05
+#define MIB_COUNTER_RX_SYMBOL_ERR 0x06
+#define MIB_COUNTER_RX_CRC_ERR 0x07
+#define MIB_COUNTER_RX_ALIGNMENT_ERR 0x08
+#define MIB_COUNTER_RX_CTRL_8808 0x09
+#define MIB_COUNTER_RX_PAUSE 0x0A
+#define MIB_COUNTER_RX_BROADCAST 0x0B
+#define MIB_COUNTER_RX_MULTICAST 0x0C
+#define MIB_COUNTER_RX_UNICAST 0x0D
+#define MIB_COUNTER_RX_OCTET_64 0x0E
+#define MIB_COUNTER_RX_OCTET_65_127 0x0F
+#define MIB_COUNTER_RX_OCTET_128_255 0x10
+#define MIB_COUNTER_RX_OCTET_256_511 0x11
+#define MIB_COUNTER_RX_OCTET_512_1023 0x12
+#define MIB_COUNTER_RX_OCTET_1024_1522 0x13
+#define MIB_COUNTER_TX_LO_PRIORITY 0x14
+#define MIB_COUNTER_TX_HI_PRIORITY 0x15
+#define MIB_COUNTER_TX_LATE_COLLISION 0x16
+#define MIB_COUNTER_TX_PAUSE 0x17
+#define MIB_COUNTER_TX_BROADCAST 0x18
+#define MIB_COUNTER_TX_MULTICAST 0x19
+#define MIB_COUNTER_TX_UNICAST 0x1A
+#define MIB_COUNTER_TX_DEFERRED 0x1B
+#define MIB_COUNTER_TX_TOTAL_COLLISION 0x1C
+#define MIB_COUNTER_TX_EXCESS_COLLISION 0x1D
+#define MIB_COUNTER_TX_SINGLE_COLLISION 0x1E
+#define MIB_COUNTER_TX_MULTI_COLLISION 0x1F
+
+#define MIB_COUNTER_RX_DROPPED_PACKET 0x20
+#define MIB_COUNTER_TX_DROPPED_PACKET 0x21
+
+/**
+ * struct ksz_port_mib - Port MIB data structure
+ * @cnt_ptr: Current pointer to MIB counter index.
+ * @link_down: Indication the link has just gone down.
+ * @state: Connection status of the port.
+ * @mib_start: The starting counter index. Some ports do not start at 0.
+ * @counter: 64-bit MIB counter value.
+ * @dropped: Temporary buffer to remember last read packet dropped values.
+ *
+ * MIB counters needs to be read periodically so that counters do not get
+ * overflowed and give incorrect values. A right balance is needed to
+ * satisfy this condition and not waste too much CPU time.
+ *
+ * It is pointless to read MIB counters when the port is disconnected. The
+ * @state provides the connection status so that MIB counters are read only
+ * when the port is connected. The @link_down indicates the port is just
+ * disconnected so that all MIB counters are read one last time to update the
+ * information.
+ */
+struct ksz_port_mib {
+ u8 cnt_ptr;
+ u8 link_down;
+ u8 state;
+ u8 mib_start;
+
+ u64 counter[TOTAL_PORT_COUNTER_NUM];
+ u32 dropped[2];
+};
+
+/**
+ * struct ksz_port_cfg - Port configuration data structure
+ * @vid: VID value.
+ * @member: Port membership.
+ * @port_prio: Port priority.
+ * @rx_rate: Receive priority rate.
+ * @tx_rate: Transmit priority rate.
+ * @stp_state: Current Spanning Tree Protocol state.
+ */
+struct ksz_port_cfg {
+ u16 vid;
+ u8 member;
+ u8 port_prio;
+ u32 rx_rate[PRIO_QUEUES];
+ u32 tx_rate[PRIO_QUEUES];
+ int stp_state;
+};
+
+/**
+ * struct ksz_switch - KSZ8842 switch data structure
+ * @mac_table: MAC table entries information.
+ * @vlan_table: VLAN table entries information.
+ * @port_cfg: Port configuration information.
+ * @diffserv: DiffServ priority settings. Possible values from 6-bit of ToS
+ * (bit7 ~ bit2) field.
+ * @p_802_1p: 802.1P priority settings. Possible values from 3-bit of 802.1p
+ * Tag priority field.
+ * @br_addr: Bridge address. Used for STP.
+ * @other_addr: Other MAC address. Used for multiple network device mode.
+ * @broad_per: Broadcast storm percentage.
+ * @member: Current port membership. Used for STP.
+ */
+struct ksz_switch {
+ struct ksz_mac_table mac_table[STATIC_MAC_TABLE_ENTRIES];
+ struct ksz_vlan_table vlan_table[VLAN_TABLE_ENTRIES];
+ struct ksz_port_cfg port_cfg[TOTAL_PORT_NUM];
+
+ u8 diffserv[DIFFSERV_ENTRIES];
+ u8 p_802_1p[PRIO_802_1P_ENTRIES];
+
+ u8 br_addr[MAC_ADDR_LEN];
+ u8 other_addr[MAC_ADDR_LEN];
+
+ u8 broad_per;
+ u8 member;
+};
+
+#define TX_RATE_UNIT 10000
+
+/**
+ * struct ksz_port_info - Port information data structure
+ * @state: Connection status of the port.
+ * @tx_rate: Transmit rate divided by 10000 to get Mbit.
+ * @duplex: Duplex mode.
+ * @advertised: Advertised auto-negotiation setting. Used to determine link.
+ * @partner: Auto-negotiation partner setting. Used to determine link.
+ * @port_id: Port index to access actual hardware register.
+ * @pdev: Pointer to OS dependent network device.
+ */
+struct ksz_port_info {
+ uint state;
+ uint tx_rate;
+ u8 duplex;
+ u8 advertised;
+ u8 partner;
+ u8 port_id;
+ void *pdev;
+};
+
+#define MAX_TX_HELD_SIZE 52000
+
+/* Hardware features and bug fixes. */
+#define LINK_INT_WORKING (1 << 0)
+#define SMALL_PACKET_TX_BUG (1 << 1)
+#define HALF_DUPLEX_SIGNAL_BUG (1 << 2)
+#define RX_HUGE_FRAME (1 << 4)
+#define STP_SUPPORT (1 << 8)
+
+/* Software overrides. */
+#define PAUSE_FLOW_CTRL (1 << 0)
+#define FAST_AGING (1 << 1)
+
+/**
+ * struct ksz_hw - KSZ884X hardware data structure
+ * @io: Virtual address assigned.
+ * @ksz_switch: Pointer to KSZ8842 switch.
+ * @port_info: Port information.
+ * @port_mib: Port MIB information.
+ * @dev_count: Number of network devices this hardware supports.
+ * @dst_ports: Destination ports in switch for transmission.
+ * @id: Hardware ID. Used for display only.
+ * @mib_cnt: Number of MIB counters this hardware has.
+ * @mib_port_cnt: Number of ports with MIB counters.
+ * @tx_cfg: Cached transmit control settings.
+ * @rx_cfg: Cached receive control settings.
+ * @intr_mask: Current interrupt mask.
+ * @intr_set: Current interrup set.
+ * @intr_blocked: Interrupt blocked.
+ * @rx_desc_info: Receive descriptor information.
+ * @tx_desc_info: Transmit descriptor information.
+ * @tx_int_cnt: Transmit interrupt count. Used for TX optimization.
+ * @tx_int_mask: Transmit interrupt mask. Used for TX optimization.
+ * @tx_size: Transmit data size. Used for TX optimization.
+ * The maximum is defined by MAX_TX_HELD_SIZE.
+ * @perm_addr: Permanent MAC address.
+ * @override_addr: Overrided MAC address.
+ * @address: Additional MAC address entries.
+ * @addr_list_size: Additional MAC address list size.
+ * @mac_override: Indication of MAC address overrided.
+ * @promiscuous: Counter to keep track of promiscuous mode set.
+ * @all_multi: Counter to keep track of all multicast mode set.
+ * @multi_list: Multicast address entries.
+ * @multi_bits: Cached multicast hash table settings.
+ * @multi_list_size: Multicast address list size.
+ * @enabled: Indication of hardware enabled.
+ * @rx_stop: Indication of receive process stop.
+ * @features: Hardware features to enable.
+ * @overrides: Hardware features to override.
+ * @parent: Pointer to parent, network device private structure.
+ */
+struct ksz_hw {
+ void __iomem *io;
+
+ struct ksz_switch *ksz_switch;
+ struct ksz_port_info port_info[SWITCH_PORT_NUM];
+ struct ksz_port_mib port_mib[TOTAL_PORT_NUM];
+ int dev_count;
+ int dst_ports;
+ int id;
+ int mib_cnt;
+ int mib_port_cnt;
+
+ u32 tx_cfg;
+ u32 rx_cfg;
+ u32 intr_mask;
+ u32 intr_set;
+ uint intr_blocked;
+
+ struct ksz_desc_info rx_desc_info;
+ struct ksz_desc_info tx_desc_info;
+
+ int tx_int_cnt;
+ int tx_int_mask;
+ int tx_size;
+
+ u8 perm_addr[MAC_ADDR_LEN];
+ u8 override_addr[MAC_ADDR_LEN];
+ u8 address[ADDITIONAL_ENTRIES][MAC_ADDR_LEN];
+ u8 addr_list_size;
+ u8 mac_override;
+ u8 promiscuous;
+ u8 all_multi;
+ u8 multi_list[MAX_MULTICAST_LIST][MAC_ADDR_LEN];
+ u8 multi_bits[HW_MULTICAST_SIZE];
+ u8 multi_list_size;
+
+ u8 enabled;
+ u8 rx_stop;
+ u8 reserved2[1];
+
+ uint features;
+ uint overrides;
+
+ void *parent;
+};
+
+enum {
+ PHY_NO_FLOW_CTRL,
+ PHY_FLOW_CTRL,
+ PHY_TX_ONLY,
+ PHY_RX_ONLY
+};
+
+/**
+ * struct ksz_port - Virtual port data structure
+ * @duplex: Duplex mode setting. 1 for half duplex, 2 for full
+ * duplex, and 0 for auto, which normally results in full
+ * duplex.
+ * @speed: Speed setting. 10 for 10 Mbit, 100 for 100 Mbit, and
+ * 0 for auto, which normally results in 100 Mbit.
+ * @force_link: Force link setting. 0 for auto-negotiation, and 1 for
+ * force.
+ * @flow_ctrl: Flow control setting. PHY_NO_FLOW_CTRL for no flow
+ * control, and PHY_FLOW_CTRL for flow control.
+ * PHY_TX_ONLY and PHY_RX_ONLY are not supported for 100
+ * Mbit PHY.
+ * @first_port: Index of first port this port supports.
+ * @mib_port_cnt: Number of ports with MIB counters.
+ * @port_cnt: Number of ports this port supports.
+ * @counter: Port statistics counter.
+ * @hw: Pointer to hardware structure.
+ * @linked: Pointer to port information linked to this port.
+ */
+struct ksz_port {
+ u8 duplex;
+ u8 speed;
+ u8 force_link;
+ u8 flow_ctrl;
+
+ int first_port;
+ int mib_port_cnt;
+ int port_cnt;
+ u64 counter[OID_COUNTER_LAST];
+
+ struct ksz_hw *hw;
+ struct ksz_port_info *linked;
+};
+
+/**
+ * struct ksz_timer_info - Timer information data structure
+ * @timer: Kernel timer.
+ * @cnt: Running timer counter.
+ * @max: Number of times to run timer; -1 for infinity.
+ * @period: Timer period in jiffies.
+ */
+struct ksz_timer_info {
+ struct timer_list timer;
+ int cnt;
+ int max;
+ int period;
+};
+
+/**
+ * struct ksz_shared_mem - OS dependent shared memory data structure
+ * @dma_addr: Physical DMA address allocated.
+ * @alloc_size: Allocation size.
+ * @phys: Actual physical address used.
+ * @alloc_virt: Virtual address allocated.
+ * @virt: Actual virtual address used.
+ */
+struct ksz_shared_mem {
+ dma_addr_t dma_addr;
+ uint alloc_size;
+ uint phys;
+ u8 *alloc_virt;
+ u8 *virt;
+};
+
+/**
+ * struct ksz_counter_info - OS dependent counter information data structure
+ * @counter: Wait queue to wakeup after counters are read.
+ * @time: Next time in jiffies to read counter.
+ * @read: Indication of counters read in full or not.
+ */
+struct ksz_counter_info {
+ wait_queue_head_t counter;
+ unsigned long time;
+ int read;
+};
+
+/**
+ * struct dev_info - Network device information data structure
+ * @dev: Pointer to network device.
+ * @pdev: Pointer to PCI device.
+ * @hw: Hardware structure.
+ * @desc_pool: Physical memory used for descriptor pool.
+ * @hwlock: Spinlock to prevent hardware from accessing.
+ * @lock: Mutex lock to prevent device from accessing.
+ * @dev_rcv: Receive process function used.
+ * @last_skb: Socket buffer allocated for descriptor rx fragments.
+ * @skb_index: Buffer index for receiving fragments.
+ * @skb_len: Buffer length for receiving fragments.
+ * @mib_read: Workqueue to read MIB counters.
+ * @mib_timer_info: Timer to read MIB counters.
+ * @counter: Used for MIB reading.
+ * @mtu: Current MTU used. The default is REGULAR_RX_BUF_SIZE;
+ * the maximum is MAX_RX_BUF_SIZE.
+ * @opened: Counter to keep track of device open.
+ * @rx_tasklet: Receive processing tasklet.
+ * @tx_tasklet: Transmit processing tasklet.
+ * @wol_enable: Wake-on-LAN enable set by ethtool.
+ * @wol_support: Wake-on-LAN support used by ethtool.
+ * @pme_wait: Used for KSZ8841 power management.
+ */
+struct dev_info {
+ struct net_device *dev;
+ struct pci_dev *pdev;
+
+ struct ksz_hw hw;
+ struct ksz_shared_mem desc_pool;
+
+ spinlock_t hwlock;
+ struct mutex lock;
+
+ int (*dev_rcv)(struct dev_info *);
+
+ struct sk_buff *last_skb;
+ int skb_index;
+ int skb_len;
+
+ struct work_struct mib_read;
+ struct ksz_timer_info mib_timer_info;
+ struct ksz_counter_info counter[TOTAL_PORT_NUM];
+
+ int mtu;
+ int opened;
+
+ struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tx_tasklet;
+
+ int wol_enable;
+ int wol_support;
+ unsigned long pme_wait;
+};
+
+/**
+ * struct dev_priv - Network device private data structure
+ * @adapter: Adapter device information.
+ * @port: Port information.
+ * @monitor_time_info: Timer to monitor ports.
+ * @proc_sem: Semaphore for proc accessing.
+ * @id: Device ID.
+ * @mii_if: MII interface information.
+ * @advertising: Temporary variable to store advertised settings.
+ * @msg_enable: The message flags controlling driver output.
+ * @media_state: The connection status of the device.
+ * @multicast: The all multicast state of the device.
+ * @promiscuous: The promiscuous state of the device.
+ */
+struct dev_priv {
+ struct dev_info *adapter;
+ struct ksz_port port;
+ struct ksz_timer_info monitor_timer_info;
+
+ struct semaphore proc_sem;
+ int id;
+
+ struct mii_if_info mii_if;
+ u32 advertising;
+
+ u32 msg_enable;
+ int media_state;
+ int multicast;
+ int promiscuous;
+};
+
+#define DRV_NAME "KSZ884X PCI"
+#define DEVICE_NAME "KSZ884x PCI"
+#define DRV_VERSION "1.0.0"
+#define DRV_RELDATE "Feb 8, 2010"
+
+static char version[] __devinitdata =
+ "Micrel " DEVICE_NAME " " DRV_VERSION " (" DRV_RELDATE ")";
+
+static u8 DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x88, 0x42, 0x01 };
+
+/*
+ * Interrupt processing primary routines
+ */
+
+static inline void hw_ack_intr(struct ksz_hw *hw, uint interrupt)
+{
+ writel(interrupt, hw->io + KS884X_INTERRUPTS_STATUS);
+}
+
+static inline void hw_dis_intr(struct ksz_hw *hw)
+{
+ hw->intr_blocked = hw->intr_mask;
+ writel(0, hw->io + KS884X_INTERRUPTS_ENABLE);
+ hw->intr_set = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
+}
+
+static inline void hw_set_intr(struct ksz_hw *hw, uint interrupt)
+{
+ hw->intr_set = interrupt;
+ writel(interrupt, hw->io + KS884X_INTERRUPTS_ENABLE);
+}
+
+static inline void hw_ena_intr(struct ksz_hw *hw)
+{
+ hw->intr_blocked = 0;
+ hw_set_intr(hw, hw->intr_mask);
+}
+
+static inline void hw_dis_intr_bit(struct ksz_hw *hw, uint bit)
+{
+ hw->intr_mask &= ~(bit);
+}
+
+static inline void hw_turn_off_intr(struct ksz_hw *hw, uint interrupt)
+{
+ u32 read_intr;
+
+ read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
+ hw->intr_set = read_intr & ~interrupt;
+ writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
+ hw_dis_intr_bit(hw, interrupt);
+}
+
+/**
+ * hw_turn_on_intr - turn on specified interrupts
+ * @hw: The hardware instance.
+ * @bit: The interrupt bits to be on.
+ *
+ * This routine turns on the specified interrupts in the interrupt mask so that
+ * those interrupts will be enabled.
+ */
+static void hw_turn_on_intr(struct ksz_hw *hw, u32 bit)
+{
+ hw->intr_mask |= bit;
+
+ if (!hw->intr_blocked)
+ hw_set_intr(hw, hw->intr_mask);
+}
+
+static inline void hw_ena_intr_bit(struct ksz_hw *hw, uint interrupt)
+{
+ u32 read_intr;
+
+ read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
+ hw->intr_set = read_intr | interrupt;
+ writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
+}
+
+static inline void hw_read_intr(struct ksz_hw *hw, uint *status)
+{
+ *status = readl(hw->io + KS884X_INTERRUPTS_STATUS);
+ *status = *status & hw->intr_set;
+}
+
+static inline void hw_restore_intr(struct ksz_hw *hw, uint interrupt)
+{
+ if (interrupt)
+ hw_ena_intr(hw);
+}
+
+/**
+ * hw_block_intr - block hardware interrupts
+ *
+ * This function blocks all interrupts of the hardware and returns the current
+ * interrupt enable mask so that interrupts can be restored later.
+ *
+ * Return the current interrupt enable mask.
+ */
+static uint hw_block_intr(struct ksz_hw *hw)
+{
+ uint interrupt = 0;
+
+ if (!hw->intr_blocked) {
+ hw_dis_intr(hw);
+ interrupt = hw->intr_blocked;
+ }
+ return interrupt;
+}
+
+/*
+ * Hardware descriptor routines
+ */
+
+static inline void reset_desc(struct ksz_desc *desc, union desc_stat status)
+{
+ status.rx.hw_owned = 0;
+ desc->phw->ctrl.data = cpu_to_le32(status.data);
+}
+
+static inline void release_desc(struct ksz_desc *desc)
+{
+ desc->sw.ctrl.tx.hw_owned = 1;
+ if (desc->sw.buf_size != desc->sw.buf.data) {
+ desc->sw.buf_size = desc->sw.buf.data;
+ desc->phw->buf.data = cpu_to_le32(desc->sw.buf.data);
+ }
+ desc->phw->ctrl.data = cpu_to_le32(desc->sw.ctrl.data);
+}
+
+static void get_rx_pkt(struct ksz_desc_info *info, struct ksz_desc **desc)
+{
+ *desc = &info->ring[info->last];
+ info->last++;
+ info->last &= info->mask;
+ info->avail--;
+ (*desc)->sw.buf.data &= ~KS_DESC_RX_MASK;
+}
+
+static inline void set_rx_buf(struct ksz_desc *desc, u32 addr)
+{
+ desc->phw->addr = cpu_to_le32(addr);
+}
+
+static inline void set_rx_len(struct ksz_desc *desc, u32 len)
+{
+ desc->sw.buf.rx.buf_size = len;
+}
+
+static inline void get_tx_pkt(struct ksz_desc_info *info,
+ struct ksz_desc **desc)
+{
+ *desc = &info->ring[info->next];
+ info->next++;
+ info->next &= info->mask;
+ info->avail--;
+ (*desc)->sw.buf.data &= ~KS_DESC_TX_MASK;
+}
+
+static inline void set_tx_buf(struct ksz_desc *desc, u32 addr)
+{
+ desc->phw->addr = cpu_to_le32(addr);
+}
+
+static inline void set_tx_len(struct ksz_desc *desc, u32 len)
+{
+ desc->sw.buf.tx.buf_size = len;
+}
+
+/* Switch functions */
+
+#define TABLE_READ 0x10
+#define TABLE_SEL_SHIFT 2
+
+#define HW_DELAY(hw, reg) \
+ do { \
+ u16 dummy; \
+ dummy = readw(hw->io + reg); \
+ } while (0)
+
+/**
+ * sw_r_table - read 4 bytes of data from switch table
+ * @hw: The hardware instance.
+ * @table: The table selector.
+ * @addr: The address of the table entry.
+ * @data: Buffer to store the read data.
+ *
+ * This routine reads 4 bytes of data from the table of the switch.
+ * Hardware interrupts are disabled to minimize corruption of read data.
+ */
+static void sw_r_table(struct ksz_hw *hw, int table, u16 addr, u32 *data)
+{
+ u16 ctrl_addr;
+ uint interrupt;
+
+ ctrl_addr = (((table << TABLE_SEL_SHIFT) | TABLE_READ) << 8) | addr;
+
+ interrupt = hw_block_intr(hw);
+
+ writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
+ HW_DELAY(hw, KS884X_IACR_OFFSET);
+ *data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
+
+ hw_restore_intr(hw, interrupt);
+}
+
+/**
+ * sw_w_table_64 - write 8 bytes of data to the switch table
+ * @hw: The hardware instance.
+ * @table: The table selector.
+ * @addr: The address of the table entry.
+ * @data_hi: The high part of data to be written (bit63 ~ bit32).
+ * @data_lo: The low part of data to be written (bit31 ~ bit0).
+ *
+ * This routine writes 8 bytes of data to the table of the switch.
+ * Hardware interrupts are disabled to minimize corruption of written data.
+ */
+static void sw_w_table_64(struct ksz_hw *hw, int table, u16 addr, u32 data_hi,
+ u32 data_lo)
+{
+ u16 ctrl_addr;
+ uint interrupt;
+
+ ctrl_addr = ((table << TABLE_SEL_SHIFT) << 8) | addr;
+
+ interrupt = hw_block_intr(hw);
+
+ writel(data_hi, hw->io + KS884X_ACC_DATA_4_OFFSET);
+ writel(data_lo, hw->io + KS884X_ACC_DATA_0_OFFSET);
+
+ writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
+ HW_DELAY(hw, KS884X_IACR_OFFSET);
+
+ hw_restore_intr(hw, interrupt);
+}
+
+/**
+ * sw_w_sta_mac_table - write to the static MAC table
+ * @hw: The hardware instance.
+ * @addr: The address of the table entry.
+ * @mac_addr: The MAC address.
+ * @ports: The port members.
+ * @override: The flag to override the port receive/transmit settings.
+ * @valid: The flag to indicate entry is valid.
+ * @use_fid: The flag to indicate the FID is valid.
+ * @fid: The FID value.
+ *
+ * This routine writes an entry of the static MAC table of the switch. It
+ * calls sw_w_table_64() to write the data.
+ */
+static void sw_w_sta_mac_table(struct ksz_hw *hw, u16 addr, u8 *mac_addr,
+ u8 ports, int override, int valid, int use_fid, u8 fid)
+{
+ u32 data_hi;
+ u32 data_lo;
+
+ data_lo = ((u32) mac_addr[2] << 24) |
+ ((u32) mac_addr[3] << 16) |
+ ((u32) mac_addr[4] << 8) | mac_addr[5];
+ data_hi = ((u32) mac_addr[0] << 8) | mac_addr[1];
+ data_hi |= (u32) ports << STATIC_MAC_FWD_PORTS_SHIFT;
+
+ if (override)
+ data_hi |= STATIC_MAC_TABLE_OVERRIDE;
+ if (use_fid) {
+ data_hi |= STATIC_MAC_TABLE_USE_FID;
+ data_hi |= (u32) fid << STATIC_MAC_FID_SHIFT;
+ }
+ if (valid)
+ data_hi |= STATIC_MAC_TABLE_VALID;
+
+ sw_w_table_64(hw, TABLE_STATIC_MAC, addr, data_hi, data_lo);
+}
+
+/**
+ * sw_r_vlan_table - read from the VLAN table
+ * @hw: The hardware instance.
+ * @addr: The address of the table entry.
+ * @vid: Buffer to store the VID.
+ * @fid: Buffer to store the VID.
+ * @member: Buffer to store the port membership.
+ *
+ * This function reads an entry of the VLAN table of the switch. It calls
+ * sw_r_table() to get the data.
+ *
+ * Return 0 if the entry is valid; otherwise -1.
+ */
+static int sw_r_vlan_table(struct ksz_hw *hw, u16 addr, u16 *vid, u8 *fid,
+ u8 *member)
+{
+ u32 data;
+
+ sw_r_table(hw, TABLE_VLAN, addr, &data);
+ if (data & VLAN_TABLE_VALID) {
+ *vid = (u16)(data & VLAN_TABLE_VID);
+ *fid = (u8)((data & VLAN_TABLE_FID) >> VLAN_TABLE_FID_SHIFT);
+ *member = (u8)((data & VLAN_TABLE_MEMBERSHIP) >>
+ VLAN_TABLE_MEMBERSHIP_SHIFT);
+ return 0;
+ }
+ return -1;
+}
+
+/**
+ * port_r_mib_cnt - read MIB counter
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @addr: The address of the counter.
+ * @cnt: Buffer to store the counter.
+ *
+ * This routine reads a MIB counter of the port.
+ * Hardware interrupts are disabled to minimize corruption of read data.
+ */
+static void port_r_mib_cnt(struct ksz_hw *hw, int port, u16 addr, u64 *cnt)
+{
+ u32 data;
+ u16 ctrl_addr;
+ uint interrupt;
+ int timeout;
+
+ ctrl_addr = addr + PORT_COUNTER_NUM * port;
+
+ interrupt = hw_block_intr(hw);
+
+ ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ) << 8);
+ writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
+ HW_DELAY(hw, KS884X_IACR_OFFSET);
+
+ for (timeout = 100; timeout > 0; timeout--) {
+ data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
+
+ if (data & MIB_COUNTER_VALID) {
+ if (data & MIB_COUNTER_OVERFLOW)
+ *cnt += MIB_COUNTER_VALUE + 1;
+ *cnt += data & MIB_COUNTER_VALUE;
+ break;
+ }
+ }
+
+ hw_restore_intr(hw, interrupt);
+}
+
+/**
+ * port_r_mib_pkt - read dropped packet counts
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @cnt: Buffer to store the receive and transmit dropped packet counts.
+ *
+ * This routine reads the dropped packet counts of the port.
+ * Hardware interrupts are disabled to minimize corruption of read data.
+ */
+static void port_r_mib_pkt(struct ksz_hw *hw, int port, u32 *last, u64 *cnt)
+{
+ u32 cur;
+ u32 data;
+ u16 ctrl_addr;
+ uint interrupt;
+ int index;
+
+ index = KS_MIB_PACKET_DROPPED_RX_0 + port;
+ do {
+ interrupt = hw_block_intr(hw);
+
+ ctrl_addr = (u16) index;
+ ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ)
+ << 8);
+ writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
+ HW_DELAY(hw, KS884X_IACR_OFFSET);
+ data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
+
+ hw_restore_intr(hw, interrupt);
+
+ data &= MIB_PACKET_DROPPED;
+ cur = *last;
+ if (data != cur) {
+ *last = data;
+ if (data < cur)
+ data += MIB_PACKET_DROPPED + 1;
+ data -= cur;
+ *cnt += data;
+ }
+ ++last;
+ ++cnt;
+ index -= KS_MIB_PACKET_DROPPED_TX -
+ KS_MIB_PACKET_DROPPED_TX_0 + 1;
+ } while (index >= KS_MIB_PACKET_DROPPED_TX_0 + port);
+}
+
+/**
+ * port_r_cnt - read MIB counters periodically
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine is used to read the counters of the port periodically to avoid
+ * counter overflow. The hardware should be acquired first before calling this
+ * routine.
+ *
+ * Return non-zero when not all counters not read.
+ */
+static int port_r_cnt(struct ksz_hw *hw, int port)
+{
+ struct ksz_port_mib *mib = &hw->port_mib[port];
+
+ if (mib->mib_start < PORT_COUNTER_NUM)
+ while (mib->cnt_ptr < PORT_COUNTER_NUM) {
+ port_r_mib_cnt(hw, port, mib->cnt_ptr,
+ &mib->counter[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ }
+ if (hw->mib_cnt > PORT_COUNTER_NUM)
+ port_r_mib_pkt(hw, port, mib->dropped,
+ &mib->counter[PORT_COUNTER_NUM]);
+ mib->cnt_ptr = 0;
+ return 0;
+}
+
+/**
+ * port_init_cnt - initialize MIB counter values
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine is used to initialize all counters to zero if the hardware
+ * cannot do it after reset.
+ */
+static void port_init_cnt(struct ksz_hw *hw, int port)
+{
+ struct ksz_port_mib *mib = &hw->port_mib[port];
+
+ mib->cnt_ptr = 0;
+ if (mib->mib_start < PORT_COUNTER_NUM)
+ do {
+ port_r_mib_cnt(hw, port, mib->cnt_ptr,
+ &mib->counter[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ } while (mib->cnt_ptr < PORT_COUNTER_NUM);
+ if (hw->mib_cnt > PORT_COUNTER_NUM)
+ port_r_mib_pkt(hw, port, mib->dropped,
+ &mib->counter[PORT_COUNTER_NUM]);
+ memset((void *) mib->counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
+ mib->cnt_ptr = 0;
+}
+
+/*
+ * Port functions
+ */
+
+/**
+ * port_chk - check port register bits
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @bits: The data bits to check.
+ *
+ * This function checks whether the specified bits of the port register are set
+ * or not.
+ *
+ * Return 0 if the bits are not set.
+ */
+static int port_chk(struct ksz_hw *hw, int port, int offset, u16 bits)
+{
+ u32 addr;
+ u16 data;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ data = readw(hw->io + addr);
+ return (data & bits) == bits;
+}
+
+/**
+ * port_cfg - set port register bits
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @bits: The data bits to set.
+ * @set: The flag indicating whether the bits are to be set or not.
+ *
+ * This routine sets or resets the specified bits of the port register.
+ */
+static void port_cfg(struct ksz_hw *hw, int port, int offset, u16 bits,
+ int set)
+{
+ u32 addr;
+ u16 data;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ data = readw(hw->io + addr);
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ writew(data, hw->io + addr);
+}
+
+/**
+ * port_chk_shift - check port bit
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the register.
+ * @shift: Number of bits to shift.
+ *
+ * This function checks whether the specified port is set in the register or
+ * not.
+ *
+ * Return 0 if the port is not set.
+ */
+static int port_chk_shift(struct ksz_hw *hw, int port, u32 addr, int shift)
+{
+ u16 data;
+ u16 bit = 1 << port;
+
+ data = readw(hw->io + addr);
+ data >>= shift;
+ return (data & bit) == bit;
+}
+
+/**
+ * port_cfg_shift - set port bit
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the register.
+ * @shift: Number of bits to shift.
+ * @set: The flag indicating whether the port is to be set or not.
+ *
+ * This routine sets or resets the specified port in the register.
+ */
+static void port_cfg_shift(struct ksz_hw *hw, int port, u32 addr, int shift,
+ int set)
+{
+ u16 data;
+ u16 bits = 1 << port;
+
+ data = readw(hw->io + addr);
+ bits <<= shift;
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ writew(data, hw->io + addr);
+}
+
+/**
+ * port_r8 - read byte from port register
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @data: Buffer to store the data.
+ *
+ * This routine reads a byte from the port register.
+ */
+static void port_r8(struct ksz_hw *hw, int port, int offset, u8 *data)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ *data = readb(hw->io + addr);
+}
+
+/**
+ * port_r16 - read word from port register.
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @data: Buffer to store the data.
+ *
+ * This routine reads a word from the port register.
+ */
+static void port_r16(struct ksz_hw *hw, int port, int offset, u16 *data)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ *data = readw(hw->io + addr);
+}
+
+/**
+ * port_w16 - write word to port register.
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @data: Data to write.
+ *
+ * This routine writes a word to the port register.
+ */
+static void port_w16(struct ksz_hw *hw, int port, int offset, u16 data)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ writew(data, hw->io + addr);
+}
+
+/**
+ * sw_chk - check switch register bits
+ * @hw: The hardware instance.
+ * @addr: The address of the switch register.
+ * @bits: The data bits to check.
+ *
+ * This function checks whether the specified bits of the switch register are
+ * set or not.
+ *
+ * Return 0 if the bits are not set.
+ */
+static int sw_chk(struct ksz_hw *hw, u32 addr, u16 bits)
+{
+ u16 data;
+
+ data = readw(hw->io + addr);
+ return (data & bits) == bits;
+}
+
+/**
+ * sw_cfg - set switch register bits
+ * @hw: The hardware instance.
+ * @addr: The address of the switch register.
+ * @bits: The data bits to set.
+ * @set: The flag indicating whether the bits are to be set or not.
+ *
+ * This function sets or resets the specified bits of the switch register.
+ */
+static void sw_cfg(struct ksz_hw *hw, u32 addr, u16 bits, int set)
+{
+ u16 data;
+
+ data = readw(hw->io + addr);
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ writew(data, hw->io + addr);
+}
+
+/* Bandwidth */
+
+static inline void port_cfg_broad_storm(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM, set);
+}
+
+static inline int port_chk_broad_storm(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM);
+}
+
+/* Driver set switch broadcast storm protection at 10% rate. */
+#define BROADCAST_STORM_PROTECTION_RATE 10
+
+/* 148,800 frames * 67 ms / 100 */
+#define BROADCAST_STORM_VALUE 9969
+
+/**
+ * sw_cfg_broad_storm - configure broadcast storm threshold
+ * @hw: The hardware instance.
+ * @percent: Broadcast storm threshold in percent of transmit rate.
+ *
+ * This routine configures the broadcast storm threshold of the switch.
+ */
+static void sw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
+{
+ u16 data;
+ u32 value = ((u32) BROADCAST_STORM_VALUE * (u32) percent / 100);
+
+ if (value > BROADCAST_STORM_RATE)
+ value = BROADCAST_STORM_RATE;
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+ data &= ~(BROADCAST_STORM_RATE_LO | BROADCAST_STORM_RATE_HI);
+ data |= ((value & 0x00FF) << 8) | ((value & 0xFF00) >> 8);
+ writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+}
+
+/**
+ * sw_get_board_storm - get broadcast storm threshold
+ * @hw: The hardware instance.
+ * @percent: Buffer to store the broadcast storm threshold percentage.
+ *
+ * This routine retrieves the broadcast storm threshold of the switch.
+ */
+static void sw_get_broad_storm(struct ksz_hw *hw, u8 *percent)
+{
+ int num;
+ u16 data;
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+ num = (data & BROADCAST_STORM_RATE_HI);
+ num <<= 8;
+ num |= (data & BROADCAST_STORM_RATE_LO) >> 8;
+ num = (num * 100 + BROADCAST_STORM_VALUE / 2) / BROADCAST_STORM_VALUE;
+ *percent = (u8) num;
+}
+
+/**
+ * sw_dis_broad_storm - disable broadstorm
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the broadcast storm limit function of the switch.
+ */
+static void sw_dis_broad_storm(struct ksz_hw *hw, int port)
+{
+ port_cfg_broad_storm(hw, port, 0);
+}
+
+/**
+ * sw_ena_broad_storm - enable broadcast storm
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine enables the broadcast storm limit function of the switch.
+ */
+static void sw_ena_broad_storm(struct ksz_hw *hw, int port)
+{
+ sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
+ port_cfg_broad_storm(hw, port, 1);
+}
+
+/**
+ * sw_init_broad_storm - initialize broadcast storm
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the broadcast storm limit function of the switch.
+ */
+static void sw_init_broad_storm(struct ksz_hw *hw)
+{
+ int port;
+
+ hw->ksz_switch->broad_per = 1;
+ sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
+ for (port = 0; port < TOTAL_PORT_NUM; port++)
+ sw_dis_broad_storm(hw, port);
+ sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, MULTICAST_STORM_DISABLE, 1);
+}
+
+/**
+ * hw_cfg_broad_storm - configure broadcast storm
+ * @hw: The hardware instance.
+ * @percent: Broadcast storm threshold in percent of transmit rate.
+ *
+ * This routine configures the broadcast storm threshold of the switch.
+ * It is called by user functions. The hardware should be acquired first.
+ */
+static void hw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
+{
+ if (percent > 100)
+ percent = 100;
+
+ sw_cfg_broad_storm(hw, percent);
+ sw_get_broad_storm(hw, &percent);
+ hw->ksz_switch->broad_per = percent;
+}
+
+/**
+ * sw_dis_prio_rate - disable switch priority rate
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the priority rate function of the switch.
+ */
+static void sw_dis_prio_rate(struct ksz_hw *hw, int port)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += KS8842_PORT_IN_RATE_OFFSET;
+ writel(0, hw->io + addr);
+}
+
+/**
+ * sw_init_prio_rate - initialize switch prioirty rate
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the priority rate function of the switch.
+ */
+static void sw_init_prio_rate(struct ksz_hw *hw)
+{
+ int port;
+ int prio;
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ for (port = 0; port < TOTAL_PORT_NUM; port++) {
+ for (prio = 0; prio < PRIO_QUEUES; prio++) {
+ sw->port_cfg[port].rx_rate[prio] =
+ sw->port_cfg[port].tx_rate[prio] = 0;
+ }
+ sw_dis_prio_rate(hw, port);
+ }
+}
+
+/* Communication */
+
+static inline void port_cfg_back_pressure(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE, set);
+}
+
+static inline void port_cfg_force_flow_ctrl(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL, set);
+}
+
+static inline int port_chk_back_pressure(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE);
+}
+
+static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL);
+}
+
+/* Spanning Tree */
+
+static inline void port_cfg_dis_learn(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_LEARN_DISABLE, set);
+}
+
+static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_RX_ENABLE, set);
+}
+
+static inline void port_cfg_tx(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_TX_ENABLE, set);
+}
+
+static inline void sw_cfg_fast_aging(struct ksz_hw *hw, int set)
+{
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, SWITCH_FAST_AGING, set);
+}
+
+static inline void sw_flush_dyn_mac_table(struct ksz_hw *hw)
+{
+ if (!(hw->overrides & FAST_AGING)) {
+ sw_cfg_fast_aging(hw, 1);
+ mdelay(1);
+ sw_cfg_fast_aging(hw, 0);
+ }
+}
+
+/* VLAN */
+
+static inline void port_cfg_ins_tag(struct ksz_hw *hw, int p, int insert)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG, insert);
+}
+
+static inline void port_cfg_rmv_tag(struct ksz_hw *hw, int p, int remove)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG, remove);
+}
+
+static inline int port_chk_ins_tag(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG);
+}
+
+static inline int port_chk_rmv_tag(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG);
+}
+
+static inline void port_cfg_dis_non_vid(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID, set);
+}
+
+static inline void port_cfg_in_filter(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER, set);
+}
+
+static inline int port_chk_dis_non_vid(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID);
+}
+
+static inline int port_chk_in_filter(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER);
+}
+
+/* Mirroring */
+
+static inline void port_cfg_mirror_sniffer(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_SNIFFER, set);
+}
+
+static inline void port_cfg_mirror_rx(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_RX, set);
+}
+
+static inline void port_cfg_mirror_tx(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_TX, set);
+}
+
+static inline void sw_cfg_mirror_rx_tx(struct ksz_hw *hw, int set)
+{
+ sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, SWITCH_MIRROR_RX_TX, set);
+}
+
+static void sw_init_mirror(struct ksz_hw *hw)
+{
+ int port;
+
+ for (port = 0; port < TOTAL_PORT_NUM; port++) {
+ port_cfg_mirror_sniffer(hw, port, 0);
+ port_cfg_mirror_rx(hw, port, 0);
+ port_cfg_mirror_tx(hw, port, 0);
+ }
+ sw_cfg_mirror_rx_tx(hw, 0);
+}
+
+static inline void sw_cfg_unk_def_deliver(struct ksz_hw *hw, int set)
+{
+ sw_cfg(hw, KS8842_SWITCH_CTRL_7_OFFSET,
+ SWITCH_UNK_DEF_PORT_ENABLE, set);
+}
+
+static inline int sw_cfg_chk_unk_def_deliver(struct ksz_hw *hw)
+{
+ return sw_chk(hw, KS8842_SWITCH_CTRL_7_OFFSET,
+ SWITCH_UNK_DEF_PORT_ENABLE);
+}
+
+static inline void sw_cfg_unk_def_port(struct ksz_hw *hw, int port, int set)
+{
+ port_cfg_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0, set);
+}
+
+static inline int sw_chk_unk_def_port(struct ksz_hw *hw, int port)
+{
+ return port_chk_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0);
+}
+
+/* Priority */
+
+static inline void port_cfg_diffserv(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE, set);
+}
+
+static inline void port_cfg_802_1p(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE, set);
+}
+
+static inline void port_cfg_replace_vid(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING, set);
+}
+
+static inline void port_cfg_prio(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE, set);
+}
+
+static inline int port_chk_diffserv(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE);
+}
+
+static inline int port_chk_802_1p(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE);
+}
+
+static inline int port_chk_replace_vid(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING);
+}
+
+static inline int port_chk_prio(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE);
+}
+
+/**
+ * sw_dis_diffserv - disable switch DiffServ priority
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the DiffServ priority function of the switch.
+ */
+static void sw_dis_diffserv(struct ksz_hw *hw, int port)
+{
+ port_cfg_diffserv(hw, port, 0);
+}
+
+/**
+ * sw_dis_802_1p - disable switch 802.1p priority
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the 802.1p priority function of the switch.
+ */
+static void sw_dis_802_1p(struct ksz_hw *hw, int port)
+{
+ port_cfg_802_1p(hw, port, 0);
+}
+
+/**
+ * sw_cfg_replace_null_vid -
+ * @hw: The hardware instance.
+ * @set: The flag to disable or enable.
+ *
+ */
+static void sw_cfg_replace_null_vid(struct ksz_hw *hw, int set)
+{
+ sw_cfg(hw, KS8842_SWITCH_CTRL_3_OFFSET, SWITCH_REPLACE_NULL_VID, set);
+}
+
+/**
+ * sw_cfg_replace_vid - enable switch 802.10 priority re-mapping
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @set: The flag to disable or enable.
+ *
+ * This routine enables the 802.1p priority re-mapping function of the switch.
+ * That allows 802.1p priority field to be replaced with the port's default
+ * tag's priority value if the ingress packet's 802.1p priority has a higher
+ * priority than port's default tag's priority.
+ */
+static void sw_cfg_replace_vid(struct ksz_hw *hw, int port, int set)
+{
+ port_cfg_replace_vid(hw, port, set);
+}
+
+/**
+ * sw_cfg_port_based - configure switch port based priority
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @prio: The priority to set.
+ *
+ * This routine configures the port based priority of the switch.
+ */
+static void sw_cfg_port_based(struct ksz_hw *hw, int port, u8 prio)
+{
+ u16 data;
+
+ if (prio > PORT_BASED_PRIORITY_BASE)
+ prio = PORT_BASED_PRIORITY_BASE;
+
+ hw->ksz_switch->port_cfg[port].port_prio = prio;
+
+ port_r16(hw, port, KS8842_PORT_CTRL_1_OFFSET, &data);
+ data &= ~PORT_BASED_PRIORITY_MASK;
+ data |= prio << PORT_BASED_PRIORITY_SHIFT;
+ port_w16(hw, port, KS8842_PORT_CTRL_1_OFFSET, data);
+}
+
+/**
+ * sw_dis_multi_queue - disable transmit multiple queues
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the transmit multiple queues selection of the switch
+ * port. Only single transmit queue on the port.
+ */
+static void sw_dis_multi_queue(struct ksz_hw *hw, int port)
+{
+ port_cfg_prio(hw, port, 0);
+}
+
+/**
+ * sw_init_prio - initialize switch priority
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the switch QoS priority functions.
+ */
+static void sw_init_prio(struct ksz_hw *hw)
+{
+ int port;
+ int tos;
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ /*
+ * Init all the 802.1p tag priority value to be assigned to different
+ * priority queue.
+ */
+ sw->p_802_1p[0] = 0;
+ sw->p_802_1p[1] = 0;
+ sw->p_802_1p[2] = 1;
+ sw->p_802_1p[3] = 1;
+ sw->p_802_1p[4] = 2;
+ sw->p_802_1p[5] = 2;
+ sw->p_802_1p[6] = 3;
+ sw->p_802_1p[7] = 3;
+
+ /*
+ * Init all the DiffServ priority value to be assigned to priority
+ * queue 0.
+ */
+ for (tos = 0; tos < DIFFSERV_ENTRIES; tos++)
+ sw->diffserv[tos] = 0;
+
+ /* All QoS functions disabled. */
+ for (port = 0; port < TOTAL_PORT_NUM; port++) {
+ sw_dis_multi_queue(hw, port);
+ sw_dis_diffserv(hw, port);
+ sw_dis_802_1p(hw, port);
+ sw_cfg_replace_vid(hw, port, 0);
+
+ sw->port_cfg[port].port_prio = 0;
+ sw_cfg_port_based(hw, port, sw->port_cfg[port].port_prio);
+ }
+ sw_cfg_replace_null_vid(hw, 0);
+}
+
+/**
+ * port_get_def_vid - get port default VID.
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @vid: Buffer to store the VID.
+ *
+ * This routine retrieves the default VID of the port.
+ */
+static void port_get_def_vid(struct ksz_hw *hw, int port, u16 *vid)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += KS8842_PORT_CTRL_VID_OFFSET;
+ *vid = readw(hw->io + addr);
+}
+
+/**
+ * sw_init_vlan - initialize switch VLAN
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the VLAN function of the switch.
+ */
+static void sw_init_vlan(struct ksz_hw *hw)
+{
+ int port;
+ int entry;
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ /* Read 16 VLAN entries from device's VLAN table. */
+ for (entry = 0; entry < VLAN_TABLE_ENTRIES; entry++) {
+ sw_r_vlan_table(hw, entry,
+ &sw->vlan_table[entry].vid,
+ &sw->vlan_table[entry].fid,
+ &sw->vlan_table[entry].member);
+ }
+
+ for (port = 0; port < TOTAL_PORT_NUM; port++) {
+ port_get_def_vid(hw, port, &sw->port_cfg[port].vid);
+ sw->port_cfg[port].member = PORT_MASK;
+ }
+}
+
+/**
+ * sw_cfg_port_base_vlan - configure port-based VLAN membership
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @member: The port-based VLAN membership.
+ *
+ * This routine configures the port-based VLAN membership of the port.
+ */
+static void sw_cfg_port_base_vlan(struct ksz_hw *hw, int port, u8 member)
+{
+ u32 addr;
+ u8 data;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += KS8842_PORT_CTRL_2_OFFSET;
+
+ data = readb(hw->io + addr);
+ data &= ~PORT_VLAN_MEMBERSHIP;
+ data |= (member & PORT_MASK);
+ writeb(data, hw->io + addr);
+
+ hw->ksz_switch->port_cfg[port].member = member;
+}
+
+/**
+ * sw_get_addr - get the switch MAC address.
+ * @hw: The hardware instance.
+ * @mac_addr: Buffer to store the MAC address.
+ *
+ * This function retrieves the MAC address of the switch.
+ */
+static inline void sw_get_addr(struct ksz_hw *hw, u8 *mac_addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i += 2) {
+ mac_addr[i] = readb(hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
+ mac_addr[1 + i] = readb(hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
+ }
+}
+
+/**
+ * sw_set_addr - configure switch MAC address
+ * @hw: The hardware instance.
+ * @mac_addr: The MAC address.
+ *
+ * This function configures the MAC address of the switch.
+ */
+static void sw_set_addr(struct ksz_hw *hw, u8 *mac_addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i += 2) {
+ writeb(mac_addr[i], hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
+ writeb(mac_addr[1 + i], hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
+ }
+}
+
+/**
+ * sw_set_global_ctrl - set switch global control
+ * @hw: The hardware instance.
+ *
+ * This routine sets the global control of the switch function.
+ */
+static void sw_set_global_ctrl(struct ksz_hw *hw)
+{
+ u16 data;
+
+ /* Enable switch MII flow control. */
+ data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+ data |= SWITCH_FLOW_CTRL;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
+
+ /* Enable aggressive back off algorithm in half duplex mode. */
+ data |= SWITCH_AGGR_BACKOFF;
+
+ /* Enable automatic fast aging when link changed detected. */
+ data |= SWITCH_AGING_ENABLE;
+ data |= SWITCH_LINK_AUTO_AGING;
+
+ if (hw->overrides & FAST_AGING)
+ data |= SWITCH_FAST_AGING;
+ else
+ data &= ~SWITCH_FAST_AGING;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
+
+ /* Enable no excessive collision drop. */
+ data |= NO_EXC_COLLISION_DROP;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
+}
+
+enum {
+ STP_STATE_DISABLED = 0,
+ STP_STATE_LISTENING,
+ STP_STATE_LEARNING,
+ STP_STATE_FORWARDING,
+ STP_STATE_BLOCKED,
+ STP_STATE_SIMPLE
+};
+
+/**
+ * port_set_stp_state - configure port spanning tree state
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @state: The spanning tree state.
+ *
+ * This routine configures the spanning tree state of the port.
+ */
+static void port_set_stp_state(struct ksz_hw *hw, int port, int state)
+{
+ u16 data;
+
+ port_r16(hw, port, KS8842_PORT_CTRL_2_OFFSET, &data);
+ switch (state) {
+ case STP_STATE_DISABLED:
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_LISTENING:
+/*
+ * No need to turn on transmit because of port direct mode.
+ * Turning on receive is required if static MAC table is not setup.
+ */
+ data &= ~PORT_TX_ENABLE;
+ data |= PORT_RX_ENABLE;
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_LEARNING:
+ data &= ~PORT_TX_ENABLE;
+ data |= PORT_RX_ENABLE;
+ data &= ~PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_FORWARDING:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ data &= ~PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_BLOCKED:
+/*
+ * Need to setup static MAC table with override to keep receiving BPDU
+ * messages. See sw_init_stp routine.
+ */
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_SIMPLE:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ data |= PORT_LEARN_DISABLE;
+ break;
+ }
+ port_w16(hw, port, KS8842_PORT_CTRL_2_OFFSET, data);
+ hw->ksz_switch->port_cfg[port].stp_state = state;
+}
+
+#define STP_ENTRY 0
+#define BROADCAST_ENTRY 1
+#define BRIDGE_ADDR_ENTRY 2
+#define IPV6_ADDR_ENTRY 3
+
+/**
+ * sw_clr_sta_mac_table - clear static MAC table
+ * @hw: The hardware instance.
+ *
+ * This routine clears the static MAC table.
+ */
+static void sw_clr_sta_mac_table(struct ksz_hw *hw)
+{
+ struct ksz_mac_table *entry;
+ int i;
+
+ for (i = 0; i < STATIC_MAC_TABLE_ENTRIES; i++) {
+ entry = &hw->ksz_switch->mac_table[i];
+ sw_w_sta_mac_table(hw, i,
+ entry->mac_addr, entry->ports,
+ entry->override, 0,
+ entry->use_fid, entry->fid);
+ }
+}
+
+/**
+ * sw_init_stp - initialize switch spanning tree support
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the spanning tree support of the switch.
+ */
+static void sw_init_stp(struct ksz_hw *hw)
+{
+ struct ksz_mac_table *entry;
+
+ entry = &hw->ksz_switch->mac_table[STP_ENTRY];
+ entry->mac_addr[0] = 0x01;
+ entry->mac_addr[1] = 0x80;
+ entry->mac_addr[2] = 0xC2;
+ entry->mac_addr[3] = 0x00;
+ entry->mac_addr[4] = 0x00;
+ entry->mac_addr[5] = 0x00;
+ entry->ports = HOST_MASK;
+ entry->override = 1;
+ entry->valid = 1;
+ sw_w_sta_mac_table(hw, STP_ENTRY,
+ entry->mac_addr, entry->ports,
+ entry->override, entry->valid,
+ entry->use_fid, entry->fid);
+}
+
+/**
+ * sw_block_addr - block certain packets from the host port
+ * @hw: The hardware instance.
+ *
+ * This routine blocks certain packets from reaching to the host port.
+ */
+static void sw_block_addr(struct ksz_hw *hw)
+{
+ struct ksz_mac_table *entry;
+ int i;
+
+ for (i = BROADCAST_ENTRY; i <= IPV6_ADDR_ENTRY; i++) {
+ entry = &hw->ksz_switch->mac_table[i];
+ entry->valid = 0;
+ sw_w_sta_mac_table(hw, i,
+ entry->mac_addr, entry->ports,
+ entry->override, entry->valid,
+ entry->use_fid, entry->fid);
+ }
+}
+
+#define PHY_LINK_SUPPORT \
+ (PHY_AUTO_NEG_ASYM_PAUSE | \
+ PHY_AUTO_NEG_SYM_PAUSE | \
+ PHY_AUTO_NEG_100BT4 | \
+ PHY_AUTO_NEG_100BTX_FD | \
+ PHY_AUTO_NEG_100BTX | \
+ PHY_AUTO_NEG_10BT_FD | \
+ PHY_AUTO_NEG_10BT)
+
+static inline void hw_r_phy_ctrl(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_w_phy_ctrl(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_r_phy_link_stat(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_STATUS_OFFSET);
+}
+
+static inline void hw_r_phy_auto_neg(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
+}
+
+static inline void hw_w_phy_auto_neg(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
+}
+
+static inline void hw_r_phy_rem_cap(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_REMOTE_CAP_OFFSET);
+}
+
+static inline void hw_r_phy_crossover(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_w_phy_crossover(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_r_phy_polarity(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_w_phy_polarity(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_r_phy_link_md(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
+}
+
+static inline void hw_w_phy_link_md(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
+}
+
+/**
+ * hw_r_phy - read data from PHY register
+ * @hw: The hardware instance.
+ * @port: Port to read.
+ * @reg: PHY register to read.
+ * @val: Buffer to store the read data.
+ *
+ * This routine reads data from the PHY register.
+ */
+static void hw_r_phy(struct ksz_hw *hw, int port, u16 reg, u16 *val)
+{
+ int phy;
+
+ phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
+ *val = readw(hw->io + phy);
+}
+
+/**
+ * port_w_phy - write data to PHY register
+ * @hw: The hardware instance.
+ * @port: Port to write.
+ * @reg: PHY register to write.
+ * @val: Word data to write.
+ *
+ * This routine writes data to the PHY register.
+ */
+static void hw_w_phy(struct ksz_hw *hw, int port, u16 reg, u16 val)
+{
+ int phy;
+
+ phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
+ writew(val, hw->io + phy);
+}
+
+/*
+ * EEPROM access functions
+ */
+
+#define AT93C_CODE 0
+#define AT93C_WR_OFF 0x00
+#define AT93C_WR_ALL 0x10
+#define AT93C_ER_ALL 0x20
+#define AT93C_WR_ON 0x30
+
+#define AT93C_WRITE 1
+#define AT93C_READ 2
+#define AT93C_ERASE 3
+
+#define EEPROM_DELAY 4
+
+static inline void drop_gpio(struct ksz_hw *hw, u8 gpio)
+{
+ u16 data;
+
+ data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
+ data &= ~gpio;
+ writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
+}
+
+static inline void raise_gpio(struct ksz_hw *hw, u8 gpio)
+{
+ u16 data;
+
+ data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
+ data |= gpio;
+ writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
+}
+
+static inline u8 state_gpio(struct ksz_hw *hw, u8 gpio)
+{
+ u16 data;
+
+ data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
+ return (u8)(data & gpio);
+}
+
+static void eeprom_clk(struct ksz_hw *hw)
+{
+ raise_gpio(hw, EEPROM_SERIAL_CLOCK);
+ udelay(EEPROM_DELAY);
+ drop_gpio(hw, EEPROM_SERIAL_CLOCK);
+ udelay(EEPROM_DELAY);
+}
+
+static u16 spi_r(struct ksz_hw *hw)
+{
+ int i;
+ u16 temp = 0;
+
+ for (i = 15; i >= 0; i--) {
+ raise_gpio(hw, EEPROM_SERIAL_CLOCK);
+ udelay(EEPROM_DELAY);
+
+ temp |= (state_gpio(hw, EEPROM_DATA_IN)) ? 1 << i : 0;
+
+ drop_gpio(hw, EEPROM_SERIAL_CLOCK);
+ udelay(EEPROM_DELAY);
+ }
+ return temp;
+}
+
+static void spi_w(struct ksz_hw *hw, u16 data)
+{
+ int i;
+
+ for (i = 15; i >= 0; i--) {
+ (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
+ drop_gpio(hw, EEPROM_DATA_OUT);
+ eeprom_clk(hw);
+ }
+}
+
+static void spi_reg(struct ksz_hw *hw, u8 data, u8 reg)
+{
+ int i;
+
+ /* Initial start bit */
+ raise_gpio(hw, EEPROM_DATA_OUT);
+ eeprom_clk(hw);
+
+ /* AT93C operation */
+ for (i = 1; i >= 0; i--) {
+ (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
+ drop_gpio(hw, EEPROM_DATA_OUT);
+ eeprom_clk(hw);
+ }
+
+ /* Address location */
+ for (i = 5; i >= 0; i--) {
+ (reg & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
+ drop_gpio(hw, EEPROM_DATA_OUT);
+ eeprom_clk(hw);
+ }
+}
+
+#define EEPROM_DATA_RESERVED 0
+#define EEPROM_DATA_MAC_ADDR_0 1
+#define EEPROM_DATA_MAC_ADDR_1 2
+#define EEPROM_DATA_MAC_ADDR_2 3
+#define EEPROM_DATA_SUBSYS_ID 4
+#define EEPROM_DATA_SUBSYS_VEN_ID 5
+#define EEPROM_DATA_PM_CAP 6
+
+/* User defined EEPROM data */
+#define EEPROM_DATA_OTHER_MAC_ADDR 9
+
+/**
+ * eeprom_read - read from AT93C46 EEPROM
+ * @hw: The hardware instance.
+ * @reg: The register offset.
+ *
+ * This function reads a word from the AT93C46 EEPROM.
+ *
+ * Return the data value.
+ */
+static u16 eeprom_read(struct ksz_hw *hw, u8 reg)
+{
+ u16 data;
+
+ raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
+
+ spi_reg(hw, AT93C_READ, reg);
+ data = spi_r(hw);
+
+ drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
+
+ return data;
+}
+
+/**
+ * eeprom_write - write to AT93C46 EEPROM
+ * @hw: The hardware instance.
+ * @reg: The register offset.
+ * @data: The data value.
+ *
+ * This procedure writes a word to the AT93C46 EEPROM.
+ */
+static void eeprom_write(struct ksz_hw *hw, u8 reg, u16 data)
+{
+ int timeout;
+
+ raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
+
+ /* Enable write. */
+ spi_reg(hw, AT93C_CODE, AT93C_WR_ON);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Erase the register. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ spi_reg(hw, AT93C_ERASE, reg);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Check operation complete. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ timeout = 8;
+ mdelay(2);
+ do {
+ mdelay(1);
+ } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Write the register. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ spi_reg(hw, AT93C_WRITE, reg);
+ spi_w(hw, data);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Check operation complete. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ timeout = 8;
+ mdelay(2);
+ do {
+ mdelay(1);
+ } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Disable write. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ spi_reg(hw, AT93C_CODE, AT93C_WR_OFF);
+
+ drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
+}
+
+/*
+ * Link detection routines
+ */
+
+static u16 advertised_flow_ctrl(struct ksz_port *port, u16 ctrl)
+{
+ ctrl &= ~PORT_AUTO_NEG_SYM_PAUSE;
+ switch (port->flow_ctrl) {
+ case PHY_FLOW_CTRL:
+ ctrl |= PORT_AUTO_NEG_SYM_PAUSE;
+ break;
+ /* Not supported. */
+ case PHY_TX_ONLY:
+ case PHY_RX_ONLY:
+ default:
+ break;
+ }
+ return ctrl;
+}
+
+static void set_flow_ctrl(struct ksz_hw *hw, int rx, int tx)
+{
+ u32 rx_cfg;
+ u32 tx_cfg;
+
+ rx_cfg = hw->rx_cfg;
+ tx_cfg = hw->tx_cfg;
+ if (rx)
+ hw->rx_cfg |= DMA_RX_FLOW_ENABLE;
+ else
+ hw->rx_cfg &= ~DMA_RX_FLOW_ENABLE;
+ if (tx)
+ hw->tx_cfg |= DMA_TX_FLOW_ENABLE;
+ else
+ hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
+ if (hw->enabled) {
+ if (rx_cfg != hw->rx_cfg)
+ writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
+ if (tx_cfg != hw->tx_cfg)
+ writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
+ }
+}
+
+static void determine_flow_ctrl(struct ksz_hw *hw, struct ksz_port *port,
+ u16 local, u16 remote)
+{
+ int rx;
+ int tx;
+
+ if (hw->overrides & PAUSE_FLOW_CTRL)
+ return;
+
+ rx = tx = 0;
+ if (port->force_link)
+ rx = tx = 1;
+ if (remote & PHY_AUTO_NEG_SYM_PAUSE) {
+ if (local & PHY_AUTO_NEG_SYM_PAUSE) {
+ rx = tx = 1;
+ } else if ((remote & PHY_AUTO_NEG_ASYM_PAUSE) &&
+ (local & PHY_AUTO_NEG_PAUSE) ==
+ PHY_AUTO_NEG_ASYM_PAUSE) {
+ tx = 1;
+ }
+ } else if (remote & PHY_AUTO_NEG_ASYM_PAUSE) {
+ if ((local & PHY_AUTO_NEG_PAUSE) == PHY_AUTO_NEG_PAUSE)
+ rx = 1;
+ }
+ if (!hw->ksz_switch)
+ set_flow_ctrl(hw, rx, tx);
+}
+
+static inline void port_cfg_change(struct ksz_hw *hw, struct ksz_port *port,
+ struct ksz_port_info *info, u16 link_status)
+{
+ if ((hw->features & HALF_DUPLEX_SIGNAL_BUG) &&
+ !(hw->overrides & PAUSE_FLOW_CTRL)) {
+ u32 cfg = hw->tx_cfg;
+
+ /* Disable flow control in the half duplex mode. */
+ if (1 == info->duplex)
+ hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
+ if (hw->enabled && cfg != hw->tx_cfg)
+ writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
+ }
+}
+
+/**
+ * port_get_link_speed - get current link status
+ * @port: The port instance.
+ *
+ * This routine reads PHY registers to determine the current link status of the
+ * switch ports.
+ */
+static void port_get_link_speed(struct ksz_port *port)
+{
+ uint interrupt;
+ struct ksz_port_info *info;
+ struct ksz_port_info *linked = NULL;
+ struct ksz_hw *hw = port->hw;
+ u16 data;
+ u16 status;
+ u8 local;
+ u8 remote;
+ int i;
+ int p;
+ int change = 0;
+
+ interrupt = hw_block_intr(hw);
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
+ info = &hw->port_info[p];
+ port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
+ port_r16(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
+
+ /*
+ * Link status is changing all the time even when there is no
+ * cable connection!
+ */
+ remote = status & (PORT_AUTO_NEG_COMPLETE |
+ PORT_STATUS_LINK_GOOD);
+ local = (u8) data;
+
+ /* No change to status. */
+ if (local == info->advertised && remote == info->partner)
+ continue;
+
+ info->advertised = local;
+ info->partner = remote;
+ if (status & PORT_STATUS_LINK_GOOD) {
+
+ /* Remember the first linked port. */
+ if (!linked)
+ linked = info;
+
+ info->tx_rate = 10 * TX_RATE_UNIT;
+ if (status & PORT_STATUS_SPEED_100MBIT)
+ info->tx_rate = 100 * TX_RATE_UNIT;
+
+ info->duplex = 1;
+ if (status & PORT_STATUS_FULL_DUPLEX)
+ info->duplex = 2;
+
+ if (media_connected != info->state) {
+ hw_r_phy(hw, p, KS884X_PHY_AUTO_NEG_OFFSET,
+ &data);
+ hw_r_phy(hw, p, KS884X_PHY_REMOTE_CAP_OFFSET,
+ &status);
+ determine_flow_ctrl(hw, port, data, status);
+ if (hw->ksz_switch) {
+ port_cfg_back_pressure(hw, p,
+ (1 == info->duplex));
+ }
+ change |= 1 << i;
+ port_cfg_change(hw, port, info, status);
+ }
+ info->state = media_connected;
+ } else {
+ if (media_disconnected != info->state) {
+ change |= 1 << i;
+
+ /* Indicate the link just goes down. */
+ hw->port_mib[p].link_down = 1;
+ }
+ info->state = media_disconnected;
+ }
+ hw->port_mib[p].state = (u8) info->state;
+ }
+
+ if (linked && media_disconnected == port->linked->state)
+ port->linked = linked;
+
+ hw_restore_intr(hw, interrupt);
+}
+
+#define PHY_RESET_TIMEOUT 10
+
+/**
+ * port_set_link_speed - set port speed
+ * @port: The port instance.
+ *
+ * This routine sets the link speed of the switch ports.
+ */
+static void port_set_link_speed(struct ksz_port *port)
+{
+ struct ksz_port_info *info;
+ struct ksz_hw *hw = port->hw;
+ u16 data;
+ u16 cfg;
+ u8 status;
+ int i;
+ int p;
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
+ info = &hw->port_info[p];
+
+ port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
+ port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
+
+ cfg = 0;
+ if (status & PORT_STATUS_LINK_GOOD)
+ cfg = data;
+
+ data |= PORT_AUTO_NEG_ENABLE;
+ data = advertised_flow_ctrl(port, data);
+
+ data |= PORT_AUTO_NEG_100BTX_FD | PORT_AUTO_NEG_100BTX |
+ PORT_AUTO_NEG_10BT_FD | PORT_AUTO_NEG_10BT;
+
+ /* Check if manual configuration is specified by the user. */
+ if (port->speed || port->duplex) {
+ if (10 == port->speed)
+ data &= ~(PORT_AUTO_NEG_100BTX_FD |
+ PORT_AUTO_NEG_100BTX);
+ else if (100 == port->speed)
+ data &= ~(PORT_AUTO_NEG_10BT_FD |
+ PORT_AUTO_NEG_10BT);
+ if (1 == port->duplex)
+ data &= ~(PORT_AUTO_NEG_100BTX_FD |
+ PORT_AUTO_NEG_10BT_FD);
+ else if (2 == port->duplex)
+ data &= ~(PORT_AUTO_NEG_100BTX |
+ PORT_AUTO_NEG_10BT);
+ }
+ if (data != cfg) {
+ data |= PORT_AUTO_NEG_RESTART;
+ port_w16(hw, p, KS884X_PORT_CTRL_4_OFFSET, data);
+ }
+ }
+}
+
+/**
+ * port_force_link_speed - force port speed
+ * @port: The port instance.
+ *
+ * This routine forces the link speed of the switch ports.
+ */
+static void port_force_link_speed(struct ksz_port *port)
+{
+ struct ksz_hw *hw = port->hw;
+ u16 data;
+ int i;
+ int phy;
+ int p;
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
+ phy = KS884X_PHY_1_CTRL_OFFSET + p * PHY_CTRL_INTERVAL;
+ hw_r_phy_ctrl(hw, phy, &data);
+
+ data &= ~PHY_AUTO_NEG_ENABLE;
+
+ if (10 == port->speed)
+ data &= ~PHY_SPEED_100MBIT;
+ else if (100 == port->speed)
+ data |= PHY_SPEED_100MBIT;
+ if (1 == port->duplex)
+ data &= ~PHY_FULL_DUPLEX;
+ else if (2 == port->duplex)
+ data |= PHY_FULL_DUPLEX;
+ hw_w_phy_ctrl(hw, phy, data);
+ }
+}
+
+static void port_set_power_saving(struct ksz_port *port, int enable)
+{
+ struct ksz_hw *hw = port->hw;
+ int i;
+ int p;
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++)
+ port_cfg(hw, p,
+ KS884X_PORT_CTRL_4_OFFSET, PORT_POWER_DOWN, enable);
+}
+
+/*
+ * KSZ8841 power management functions
+ */
+
+/**
+ * hw_chk_wol_pme_status - check PMEN pin
+ * @hw: The hardware instance.
+ *
+ * This function is used to check PMEN pin is asserted.
+ *
+ * Return 1 if PMEN pin is asserted; otherwise, 0.
+ */
+static int hw_chk_wol_pme_status(struct ksz_hw *hw)
+{
+ struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
+ struct pci_dev *pdev = hw_priv->pdev;
+ u16 data;
+
+ if (!pdev->pm_cap)
+ return 0;
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
+ return (data & PCI_PM_CTRL_PME_STATUS) == PCI_PM_CTRL_PME_STATUS;
+}
+
+/**
+ * hw_clr_wol_pme_status - clear PMEN pin
+ * @hw: The hardware instance.
+ *
+ * This routine is used to clear PME_Status to deassert PMEN pin.
+ */
+static void hw_clr_wol_pme_status(struct ksz_hw *hw)
+{
+ struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
+ struct pci_dev *pdev = hw_priv->pdev;
+ u16 data;
+
+ if (!pdev->pm_cap)
+ return;
+
+ /* Clear PME_Status to deassert PMEN pin. */
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
+ data |= PCI_PM_CTRL_PME_STATUS;
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
+}
+
+/**
+ * hw_cfg_wol_pme - enable or disable Wake-on-LAN
+ * @hw: The hardware instance.
+ * @set: The flag indicating whether to enable or disable.
+ *
+ * This routine is used to enable or disable Wake-on-LAN.
+ */
+static void hw_cfg_wol_pme(struct ksz_hw *hw, int set)
+{
+ struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
+ struct pci_dev *pdev = hw_priv->pdev;
+ u16 data;
+
+ if (!pdev->pm_cap)
+ return;
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
+ data &= ~PCI_PM_CTRL_STATE_MASK;
+ if (set)
+ data |= PCI_PM_CTRL_PME_ENABLE | PCI_D3hot;
+ else
+ data &= ~PCI_PM_CTRL_PME_ENABLE;
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
+}
+
+/**
+ * hw_cfg_wol - configure Wake-on-LAN features
+ * @hw: The hardware instance.
+ * @frame: The pattern frame bit.
+ * @set: The flag indicating whether to enable or disable.
+ *
+ * This routine is used to enable or disable certain Wake-on-LAN features.
+ */
+static void hw_cfg_wol(struct ksz_hw *hw, u16 frame, int set)
+{
+ u16 data;
+
+ data = readw(hw->io + KS8841_WOL_CTRL_OFFSET);
+ if (set)
+ data |= frame;
+ else
+ data &= ~frame;
+ writew(data, hw->io + KS8841_WOL_CTRL_OFFSET);
+}
+
+/**
+ * hw_set_wol_frame - program Wake-on-LAN pattern
+ * @hw: The hardware instance.
+ * @i: The frame index.
+ * @mask_size: The size of the mask.
+ * @mask: Mask to ignore certain bytes in the pattern.
+ * @frame_size: The size of the frame.
+ * @pattern: The frame data.
+ *
+ * This routine is used to program Wake-on-LAN pattern.
+ */
+static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size,
+ const u8 *mask, uint frame_size, const u8 *pattern)
+{
+ int bits;
+ int from;
+ int len;
+ int to;
+ u32 crc;
+ u8 data[64];
+ u8 val = 0;
+
+ if (frame_size > mask_size * 8)
+ frame_size = mask_size * 8;
+ if (frame_size > 64)
+ frame_size = 64;
+
+ i *= 0x10;
+ writel(0, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i);
+ writel(0, hw->io + KS8841_WOL_FRAME_BYTE2_OFFSET + i);
+
+ bits = len = from = to = 0;
+ do {
+ if (bits) {
+ if ((val & 1))
+ data[to++] = pattern[from];
+ val >>= 1;
+ ++from;
+ --bits;
+ } else {
+ val = mask[len];
+ writeb(val, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i
+ + len);
+ ++len;
+ if (val)
+ bits = 8;
+ else
+ from += 8;
+ }
+ } while (from < (int) frame_size);
+ if (val) {
+ bits = mask[len - 1];
+ val <<= (from % 8);
+ bits &= ~val;
+ writeb(bits, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i + len -
+ 1);
+ }
+ crc = ether_crc(to, data);
+ writel(crc, hw->io + KS8841_WOL_FRAME_CRC_OFFSET + i);
+}
+
+/**
+ * hw_add_wol_arp - add ARP pattern
+ * @hw: The hardware instance.
+ * @ip_addr: The IPv4 address assigned to the device.
+ *
+ * This routine is used to add ARP pattern for waking up the host.
+ */
+static void hw_add_wol_arp(struct ksz_hw *hw, const u8 *ip_addr)
+{
+ static const u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 };
+ u8 pattern[42] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x06,
+ 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 };
+
+ memcpy(&pattern[38], ip_addr, 4);
+ hw_set_wol_frame(hw, 3, 6, mask, 42, pattern);
+}
+
+/**
+ * hw_add_wol_bcast - add broadcast pattern
+ * @hw: The hardware instance.
+ *
+ * This routine is used to add broadcast pattern for waking up the host.
+ */
+static void hw_add_wol_bcast(struct ksz_hw *hw)
+{
+ static const u8 mask[] = { 0x3F };
+ static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+
+ hw_set_wol_frame(hw, 2, 1, mask, MAC_ADDR_LEN, pattern);
+}
+
+/**
+ * hw_add_wol_mcast - add multicast pattern
+ * @hw: The hardware instance.
+ *
+ * This routine is used to add multicast pattern for waking up the host.
+ *
+ * It is assumed the multicast packet is the ICMPv6 neighbor solicitation used
+ * by IPv6 ping command. Note that multicast packets are filtred through the
+ * multicast hash table, so not all multicast packets can wake up the host.
+ */
+static void hw_add_wol_mcast(struct ksz_hw *hw)
+{
+ static const u8 mask[] = { 0x3F };
+ u8 pattern[] = { 0x33, 0x33, 0xFF, 0x00, 0x00, 0x00 };
+
+ memcpy(&pattern[3], &hw->override_addr[3], 3);
+ hw_set_wol_frame(hw, 1, 1, mask, 6, pattern);
+}
+
+/**
+ * hw_add_wol_ucast - add unicast pattern
+ * @hw: The hardware instance.
+ *
+ * This routine is used to add unicast pattern to wakeup the host.
+ *
+ * It is assumed the unicast packet is directed to the device, as the hardware
+ * can only receive them in normal case.
+ */
+static void hw_add_wol_ucast(struct ksz_hw *hw)
+{
+ static const u8 mask[] = { 0x3F };
+
+ hw_set_wol_frame(hw, 0, 1, mask, MAC_ADDR_LEN, hw->override_addr);
+}
+
+/**
+ * hw_enable_wol - enable Wake-on-LAN
+ * @hw: The hardware instance.
+ * @wol_enable: The Wake-on-LAN settings.
+ * @net_addr: The IPv4 address assigned to the device.
+ *
+ * This routine is used to enable Wake-on-LAN depending on driver settings.
+ */
+static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, const u8 *net_addr)
+{
+ hw_cfg_wol(hw, KS8841_WOL_MAGIC_ENABLE, (wol_enable & WAKE_MAGIC));
+ hw_cfg_wol(hw, KS8841_WOL_FRAME0_ENABLE, (wol_enable & WAKE_UCAST));
+ hw_add_wol_ucast(hw);
+ hw_cfg_wol(hw, KS8841_WOL_FRAME1_ENABLE, (wol_enable & WAKE_MCAST));
+ hw_add_wol_mcast(hw);
+ hw_cfg_wol(hw, KS8841_WOL_FRAME2_ENABLE, (wol_enable & WAKE_BCAST));
+ hw_cfg_wol(hw, KS8841_WOL_FRAME3_ENABLE, (wol_enable & WAKE_ARP));
+ hw_add_wol_arp(hw, net_addr);
+}
+
+/**
+ * hw_init - check driver is correct for the hardware
+ * @hw: The hardware instance.
+ *
+ * This function checks the hardware is correct for this driver and sets the
+ * hardware up for proper initialization.
+ *
+ * Return number of ports or 0 if not right.
+ */
+static int hw_init(struct ksz_hw *hw)
+{
+ int rc = 0;
+ u16 data;
+ u16 revision;
+
+ /* Set bus speed to 125MHz. */
+ writew(BUS_SPEED_125_MHZ, hw->io + KS884X_BUS_CTRL_OFFSET);
+
+ /* Check KSZ884x chip ID. */
+ data = readw(hw->io + KS884X_CHIP_ID_OFFSET);
+
+ revision = (data & KS884X_REVISION_MASK) >> KS884X_REVISION_SHIFT;
+ data &= KS884X_CHIP_ID_MASK_41;
+ if (REG_CHIP_ID_41 == data)
+ rc = 1;
+ else if (REG_CHIP_ID_42 == data)
+ rc = 2;
+ else
+ return 0;
+
+ /* Setup hardware features or bug workarounds. */
+ if (revision <= 1) {
+ hw->features |= SMALL_PACKET_TX_BUG;
+ if (1 == rc)
+ hw->features |= HALF_DUPLEX_SIGNAL_BUG;
+ }
+ return rc;
+}
+
+/**
+ * hw_reset - reset the hardware
+ * @hw: The hardware instance.
+ *
+ * This routine resets the hardware.
+ */
+static void hw_reset(struct ksz_hw *hw)
+{
+ writew(GLOBAL_SOFTWARE_RESET, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
+
+ /* Wait for device to reset. */
+ mdelay(10);
+
+ /* Write 0 to clear device reset. */
+ writew(0, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
+}
+
+/**
+ * hw_setup - setup the hardware
+ * @hw: The hardware instance.
+ *
+ * This routine setup the hardware for proper operation.
+ */
+static void hw_setup(struct ksz_hw *hw)
+{
+#if SET_DEFAULT_LED
+ u16 data;
+
+ /* Change default LED mode. */
+ data = readw(hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
+ data &= ~LED_MODE;
+ data |= SET_DEFAULT_LED;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
+#endif
+
+ /* Setup transmit control. */
+ hw->tx_cfg = (DMA_TX_PAD_ENABLE | DMA_TX_CRC_ENABLE |
+ (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_TX_ENABLE);
+
+ /* Setup receive control. */
+ hw->rx_cfg = (DMA_RX_BROADCAST | DMA_RX_UNICAST |
+ (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_RX_ENABLE);
+ hw->rx_cfg |= KS884X_DMA_RX_MULTICAST;
+
+ /* Hardware cannot handle UDP packet in IP fragments. */
+ hw->rx_cfg |= (DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
+
+ if (hw->all_multi)
+ hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
+ if (hw->promiscuous)
+ hw->rx_cfg |= DMA_RX_PROMISCUOUS;
+}
+
+/**
+ * hw_setup_intr - setup interrupt mask
+ * @hw: The hardware instance.
+ *
+ * This routine setup the interrupt mask for proper operation.
+ */
+static void hw_setup_intr(struct ksz_hw *hw)
+{
+ hw->intr_mask = KS884X_INT_MASK | KS884X_INT_RX_OVERRUN;
+}
+
+static void ksz_check_desc_num(struct ksz_desc_info *info)
+{
+#define MIN_DESC_SHIFT 2
+
+ int alloc = info->alloc;
+ int shift;
+
+ shift = 0;
+ while (!(alloc & 1)) {
+ shift++;
+ alloc >>= 1;
+ }
+ if (alloc != 1 || shift < MIN_DESC_SHIFT) {
+ pr_alert("Hardware descriptor numbers not right!\n");
+ while (alloc) {
+ shift++;
+ alloc >>= 1;
+ }
+ if (shift < MIN_DESC_SHIFT)
+ shift = MIN_DESC_SHIFT;
+ alloc = 1 << shift;
+ info->alloc = alloc;
+ }
+ info->mask = info->alloc - 1;
+}
+
+static void hw_init_desc(struct ksz_desc_info *desc_info, int transmit)
+{
+ int i;
+ u32 phys = desc_info->ring_phys;
+ struct ksz_hw_desc *desc = desc_info->ring_virt;
+ struct ksz_desc *cur = desc_info->ring;
+ struct ksz_desc *previous = NULL;
+
+ for (i = 0; i < desc_info->alloc; i++) {
+ cur->phw = desc++;
+ phys += desc_info->size;
+ previous = cur++;
+ previous->phw->next = cpu_to_le32(phys);
+ }
+ previous->phw->next = cpu_to_le32(desc_info->ring_phys);
+ previous->sw.buf.rx.end_of_ring = 1;
+ previous->phw->buf.data = cpu_to_le32(previous->sw.buf.data);
+
+ desc_info->avail = desc_info->alloc;
+ desc_info->last = desc_info->next = 0;
+
+ desc_info->cur = desc_info->ring;
+}
+
+/**
+ * hw_set_desc_base - set descriptor base addresses
+ * @hw: The hardware instance.
+ * @tx_addr: The transmit descriptor base.
+ * @rx_addr: The receive descriptor base.
+ *
+ * This routine programs the descriptor base addresses after reset.
+ */
+static void hw_set_desc_base(struct ksz_hw *hw, u32 tx_addr, u32 rx_addr)
+{
+ /* Set base address of Tx/Rx descriptors. */
+ writel(tx_addr, hw->io + KS_DMA_TX_ADDR);
+ writel(rx_addr, hw->io + KS_DMA_RX_ADDR);
+}
+
+static void hw_reset_pkts(struct ksz_desc_info *info)
+{
+ info->cur = info->ring;
+ info->avail = info->alloc;
+ info->last = info->next = 0;
+}
+
+static inline void hw_resume_rx(struct ksz_hw *hw)
+{
+ writel(DMA_START, hw->io + KS_DMA_RX_START);
+}
+
+/**
+ * hw_start_rx - start receiving
+ * @hw: The hardware instance.
+ *
+ * This routine starts the receive function of the hardware.
+ */
+static void hw_start_rx(struct ksz_hw *hw)
+{
+ writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
+
+ /* Notify when the receive stops. */
+ hw->intr_mask |= KS884X_INT_RX_STOPPED;
+
+ writel(DMA_START, hw->io + KS_DMA_RX_START);
+ hw_ack_intr(hw, KS884X_INT_RX_STOPPED);
+ hw->rx_stop++;
+
+ /* Variable overflows. */
+ if (0 == hw->rx_stop)
+ hw->rx_stop = 2;
+}
+
+/*
+ * hw_stop_rx - stop receiving
+ * @hw: The hardware instance.
+ *
+ * This routine stops the receive function of the hardware.
+ */
+static void hw_stop_rx(struct ksz_hw *hw)
+{
+ hw->rx_stop = 0;
+ hw_turn_off_intr(hw, KS884X_INT_RX_STOPPED);
+ writel((hw->rx_cfg & ~DMA_RX_ENABLE), hw->io + KS_DMA_RX_CTRL);
+}
+
+/**
+ * hw_start_tx - start transmitting
+ * @hw: The hardware instance.
+ *
+ * This routine starts the transmit function of the hardware.
+ */
+static void hw_start_tx(struct ksz_hw *hw)
+{
+ writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
+}
+
+/**
+ * hw_stop_tx - stop transmitting
+ * @hw: The hardware instance.
+ *
+ * This routine stops the transmit function of the hardware.
+ */
+static void hw_stop_tx(struct ksz_hw *hw)
+{
+ writel((hw->tx_cfg & ~DMA_TX_ENABLE), hw->io + KS_DMA_TX_CTRL);
+}
+
+/**
+ * hw_disable - disable hardware
+ * @hw: The hardware instance.
+ *
+ * This routine disables the hardware.
+ */
+static void hw_disable(struct ksz_hw *hw)
+{
+ hw_stop_rx(hw);
+ hw_stop_tx(hw);
+ hw->enabled = 0;
+}
+
+/**
+ * hw_enable - enable hardware
+ * @hw: The hardware instance.
+ *
+ * This routine enables the hardware.
+ */
+static void hw_enable(struct ksz_hw *hw)
+{
+ hw_start_tx(hw);
+ hw_start_rx(hw);
+ hw->enabled = 1;
+}
+
+/**
+ * hw_alloc_pkt - allocate enough descriptors for transmission
+ * @hw: The hardware instance.
+ * @length: The length of the packet.
+ * @physical: Number of descriptors required.
+ *
+ * This function allocates descriptors for transmission.
+ *
+ * Return 0 if not successful; 1 for buffer copy; or number of descriptors.
+ */
+static int hw_alloc_pkt(struct ksz_hw *hw, int length, int physical)
+{
+ /* Always leave one descriptor free. */
+ if (hw->tx_desc_info.avail <= 1)
+ return 0;
+
+ /* Allocate a descriptor for transmission and mark it current. */
+ get_tx_pkt(&hw->tx_desc_info, &hw->tx_desc_info.cur);
+ hw->tx_desc_info.cur->sw.buf.tx.first_seg = 1;
+
+ /* Keep track of number of transmit descriptors used so far. */
+ ++hw->tx_int_cnt;
+ hw->tx_size += length;
+
+ /* Cannot hold on too much data. */
+ if (hw->tx_size >= MAX_TX_HELD_SIZE)
+ hw->tx_int_cnt = hw->tx_int_mask + 1;
+
+ if (physical > hw->tx_desc_info.avail)
+ return 1;
+
+ return hw->tx_desc_info.avail;
+}
+
+/**
+ * hw_send_pkt - mark packet for transmission
+ * @hw: The hardware instance.
+ *
+ * This routine marks the packet for transmission in PCI version.
+ */
+static void hw_send_pkt(struct ksz_hw *hw)
+{
+ struct ksz_desc *cur = hw->tx_desc_info.cur;
+
+ cur->sw.buf.tx.last_seg = 1;
+
+ /* Interrupt only after specified number of descriptors used. */
+ if (hw->tx_int_cnt > hw->tx_int_mask) {
+ cur->sw.buf.tx.intr = 1;
+ hw->tx_int_cnt = 0;
+ hw->tx_size = 0;
+ }
+
+ /* KSZ8842 supports port directed transmission. */
+ cur->sw.buf.tx.dest_port = hw->dst_ports;
+
+ release_desc(cur);
+
+ writel(0, hw->io + KS_DMA_TX_START);
+}
+
+static int empty_addr(u8 *addr)
+{
+ u32 *addr1 = (u32 *) addr;
+ u16 *addr2 = (u16 *) &addr[4];
+
+ return 0 == *addr1 && 0 == *addr2;
+}
+
+/**
+ * hw_set_addr - set MAC address
+ * @hw: The hardware instance.
+ *
+ * This routine programs the MAC address of the hardware when the address is
+ * overrided.
+ */
+static void hw_set_addr(struct ksz_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < MAC_ADDR_LEN; i++)
+ writeb(hw->override_addr[MAC_ADDR_ORDER(i)],
+ hw->io + KS884X_ADDR_0_OFFSET + i);
+
+ sw_set_addr(hw, hw->override_addr);
+}
+
+/**
+ * hw_read_addr - read MAC address
+ * @hw: The hardware instance.
+ *
+ * This routine retrieves the MAC address of the hardware.
+ */
+static void hw_read_addr(struct ksz_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < MAC_ADDR_LEN; i++)
+ hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io +
+ KS884X_ADDR_0_OFFSET + i);
+
+ if (!hw->mac_override) {
+ memcpy(hw->override_addr, hw->perm_addr, MAC_ADDR_LEN);
+ if (empty_addr(hw->override_addr)) {
+ memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS,
+ MAC_ADDR_LEN);
+ memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS,
+ MAC_ADDR_LEN);
+ hw->override_addr[5] += hw->id;
+ hw_set_addr(hw);
+ }
+ }
+}
+
+static void hw_ena_add_addr(struct ksz_hw *hw, int index, u8 *mac_addr)
+{
+ int i;
+ u32 mac_addr_lo;
+ u32 mac_addr_hi;
+
+ mac_addr_hi = 0;
+ for (i = 0; i < 2; i++) {
+ mac_addr_hi <<= 8;
+ mac_addr_hi |= mac_addr[i];
+ }
+ mac_addr_hi |= ADD_ADDR_ENABLE;
+ mac_addr_lo = 0;
+ for (i = 2; i < 6; i++) {
+ mac_addr_lo <<= 8;
+ mac_addr_lo |= mac_addr[i];
+ }
+ index *= ADD_ADDR_INCR;
+
+ writel(mac_addr_lo, hw->io + index + KS_ADD_ADDR_0_LO);
+ writel(mac_addr_hi, hw->io + index + KS_ADD_ADDR_0_HI);
+}
+
+static void hw_set_add_addr(struct ksz_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < ADDITIONAL_ENTRIES; i++) {
+ if (empty_addr(hw->address[i]))
+ writel(0, hw->io + ADD_ADDR_INCR * i +
+ KS_ADD_ADDR_0_HI);
+ else
+ hw_ena_add_addr(hw, i, hw->address[i]);
+ }
+}
+
+static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
+{
+ int i;
+ int j = ADDITIONAL_ENTRIES;
+
+ if (!memcmp(hw->override_addr, mac_addr, MAC_ADDR_LEN))
+ return 0;
+ for (i = 0; i < hw->addr_list_size; i++) {
+ if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN))
+ return 0;
+ if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
+ j = i;
+ }
+ if (j < ADDITIONAL_ENTRIES) {
+ memcpy(hw->address[j], mac_addr, MAC_ADDR_LEN);
+ hw_ena_add_addr(hw, j, hw->address[j]);
+ return 0;
+ }
+ return -1;
+}
+
+static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
+{
+ int i;
+
+ for (i = 0; i < hw->addr_list_size; i++) {
+ if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN)) {
+ memset(hw->address[i], 0, MAC_ADDR_LEN);
+ writel(0, hw->io + ADD_ADDR_INCR * i +
+ KS_ADD_ADDR_0_HI);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+/**
+ * hw_clr_multicast - clear multicast addresses
+ * @hw: The hardware instance.
+ *
+ * This routine removes all multicast addresses set in the hardware.
+ */
+static void hw_clr_multicast(struct ksz_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < HW_MULTICAST_SIZE; i++) {
+ hw->multi_bits[i] = 0;
+
+ writeb(0, hw->io + KS884X_MULTICAST_0_OFFSET + i);
+ }
+}
+
+/**
+ * hw_set_grp_addr - set multicast addresses
+ * @hw: The hardware instance.
+ *
+ * This routine programs multicast addresses for the hardware to accept those
+ * addresses.
+ */
+static void hw_set_grp_addr(struct ksz_hw *hw)
+{
+ int i;
+ int index;
+ int position;
+ int value;
+
+ memset(hw->multi_bits, 0, sizeof(u8) * HW_MULTICAST_SIZE);
+
+ for (i = 0; i < hw->multi_list_size; i++) {
+ position = (ether_crc(6, hw->multi_list[i]) >> 26) & 0x3f;
+ index = position >> 3;
+ value = 1 << (position & 7);
+ hw->multi_bits[index] |= (u8) value;
+ }
+
+ for (i = 0; i < HW_MULTICAST_SIZE; i++)
+ writeb(hw->multi_bits[i], hw->io + KS884X_MULTICAST_0_OFFSET +
+ i);
+}
+
+/**
+ * hw_set_multicast - enable or disable all multicast receiving
+ * @hw: The hardware instance.
+ * @multicast: To turn on or off the all multicast feature.
+ *
+ * This routine enables/disables the hardware to accept all multicast packets.
+ */
+static void hw_set_multicast(struct ksz_hw *hw, u8 multicast)
+{
+ /* Stop receiving for reconfiguration. */
+ hw_stop_rx(hw);
+
+ if (multicast)
+ hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
+ else
+ hw->rx_cfg &= ~DMA_RX_ALL_MULTICAST;
+
+ if (hw->enabled)
+ hw_start_rx(hw);
+}
+
+/**
+ * hw_set_promiscuous - enable or disable promiscuous receiving
+ * @hw: The hardware instance.
+ * @prom: To turn on or off the promiscuous feature.
+ *
+ * This routine enables/disables the hardware to accept all packets.
+ */
+static void hw_set_promiscuous(struct ksz_hw *hw, u8 prom)
+{
+ /* Stop receiving for reconfiguration. */
+ hw_stop_rx(hw);
+
+ if (prom)
+ hw->rx_cfg |= DMA_RX_PROMISCUOUS;
+ else
+ hw->rx_cfg &= ~DMA_RX_PROMISCUOUS;
+
+ if (hw->enabled)
+ hw_start_rx(hw);
+}
+
+/**
+ * sw_enable - enable the switch
+ * @hw: The hardware instance.
+ * @enable: The flag to enable or disable the switch
+ *
+ * This routine is used to enable/disable the switch in KSZ8842.
+ */
+static void sw_enable(struct ksz_hw *hw, int enable)
+{
+ int port;
+
+ for (port = 0; port < SWITCH_PORT_NUM; port++) {
+ if (hw->dev_count > 1) {
+ /* Set port-base vlan membership with host port. */
+ sw_cfg_port_base_vlan(hw, port,
+ HOST_MASK | (1 << port));
+ port_set_stp_state(hw, port, STP_STATE_DISABLED);
+ } else {
+ sw_cfg_port_base_vlan(hw, port, PORT_MASK);
+ port_set_stp_state(hw, port, STP_STATE_FORWARDING);
+ }
+ }
+ if (hw->dev_count > 1)
+ port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
+ else
+ port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_FORWARDING);
+
+ if (enable)
+ enable = KS8842_START;
+ writew(enable, hw->io + KS884X_CHIP_ID_OFFSET);
+}
+
+/**
+ * sw_setup - setup the switch
+ * @hw: The hardware instance.
+ *
+ * This routine setup the hardware switch engine for default operation.
+ */
+static void sw_setup(struct ksz_hw *hw)
+{
+ int port;
+
+ sw_set_global_ctrl(hw);
+
+ /* Enable switch broadcast storm protection at 10% percent rate. */
+ sw_init_broad_storm(hw);
+ hw_cfg_broad_storm(hw, BROADCAST_STORM_PROTECTION_RATE);
+ for (port = 0; port < SWITCH_PORT_NUM; port++)
+ sw_ena_broad_storm(hw, port);
+
+ sw_init_prio(hw);
+
+ sw_init_mirror(hw);
+
+ sw_init_prio_rate(hw);
+
+ sw_init_vlan(hw);
+
+ if (hw->features & STP_SUPPORT)
+ sw_init_stp(hw);
+ if (!sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_TX_FLOW_CTRL | SWITCH_RX_FLOW_CTRL))
+ hw->overrides |= PAUSE_FLOW_CTRL;
+ sw_enable(hw, 1);
+}
+
+/**
+ * ksz_start_timer - start kernel timer
+ * @info: Kernel timer information.
+ * @time: The time tick.
+ *
+ * This routine starts the kernel timer after the specified time tick.
+ */
+static void ksz_start_timer(struct ksz_timer_info *info, int time)
+{
+ info->cnt = 0;
+ info->timer.expires = jiffies + time;
+ add_timer(&info->timer);
+
+ /* infinity */
+ info->max = -1;
+}
+
+/**
+ * ksz_stop_timer - stop kernel timer
+ * @info: Kernel timer information.
+ *
+ * This routine stops the kernel timer.
+ */
+static void ksz_stop_timer(struct ksz_timer_info *info)
+{
+ if (info->max) {
+ info->max = 0;
+ del_timer_sync(&info->timer);
+ }
+}
+
+static void ksz_init_timer(struct ksz_timer_info *info, int period,
+ void (*function)(unsigned long), void *data)
+{
+ info->max = 0;
+ info->period = period;
+ init_timer(&info->timer);
+ info->timer.function = function;
+ info->timer.data = (unsigned long) data;
+}
+
+static void ksz_update_timer(struct ksz_timer_info *info)
+{
+ ++info->cnt;
+ if (info->max > 0) {
+ if (info->cnt < info->max) {
+ info->timer.expires = jiffies + info->period;
+ add_timer(&info->timer);
+ } else
+ info->max = 0;
+ } else if (info->max < 0) {
+ info->timer.expires = jiffies + info->period;
+ add_timer(&info->timer);
+ }
+}
+
+/**
+ * ksz_alloc_soft_desc - allocate software descriptors
+ * @desc_info: Descriptor information structure.
+ * @transmit: Indication that descriptors are for transmit.
+ *
+ * This local function allocates software descriptors for manipulation in
+ * memory.
+ *
+ * Return 0 if successful.
+ */
+static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
+{
+ desc_info->ring = kmalloc(sizeof(struct ksz_desc) * desc_info->alloc,
+ GFP_KERNEL);
+ if (!desc_info->ring)
+ return 1;
+ memset((void *) desc_info->ring, 0,
+ sizeof(struct ksz_desc) * desc_info->alloc);
+ hw_init_desc(desc_info, transmit);
+ return 0;
+}
+
+/**
+ * ksz_alloc_desc - allocate hardware descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This local function allocates hardware descriptors for receiving and
+ * transmitting.
+ *
+ * Return 0 if successful.
+ */
+static int ksz_alloc_desc(struct dev_info *adapter)
+{
+ struct ksz_hw *hw = &adapter->hw;
+ int offset;
+
+ /* Allocate memory for RX & TX descriptors. */
+ adapter->desc_pool.alloc_size =
+ hw->rx_desc_info.size * hw->rx_desc_info.alloc +
+ hw->tx_desc_info.size * hw->tx_desc_info.alloc +
+ DESC_ALIGNMENT;
+
+ adapter->desc_pool.alloc_virt =
+ pci_alloc_consistent(
+ adapter->pdev, adapter->desc_pool.alloc_size,
+ &adapter->desc_pool.dma_addr);
+ if (adapter->desc_pool.alloc_virt == NULL) {
+ adapter->desc_pool.alloc_size = 0;
+ return 1;
+ }
+ memset(adapter->desc_pool.alloc_virt, 0, adapter->desc_pool.alloc_size);
+
+ /* Align to the next cache line boundary. */
+ offset = (((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT) ?
+ (DESC_ALIGNMENT -
+ ((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT)) : 0);
+ adapter->desc_pool.virt = adapter->desc_pool.alloc_virt + offset;
+ adapter->desc_pool.phys = adapter->desc_pool.dma_addr + offset;
+
+ /* Allocate receive/transmit descriptors. */
+ hw->rx_desc_info.ring_virt = (struct ksz_hw_desc *)
+ adapter->desc_pool.virt;
+ hw->rx_desc_info.ring_phys = adapter->desc_pool.phys;
+ offset = hw->rx_desc_info.alloc * hw->rx_desc_info.size;
+ hw->tx_desc_info.ring_virt = (struct ksz_hw_desc *)
+ (adapter->desc_pool.virt + offset);
+ hw->tx_desc_info.ring_phys = adapter->desc_pool.phys + offset;
+
+ if (ksz_alloc_soft_desc(&hw->rx_desc_info, 0))
+ return 1;
+ if (ksz_alloc_soft_desc(&hw->tx_desc_info, 1))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * free_dma_buf - release DMA buffer resources
+ * @adapter: Adapter information structure.
+ *
+ * This routine is just a helper function to release the DMA buffer resources.
+ */
+static void free_dma_buf(struct dev_info *adapter, struct ksz_dma_buf *dma_buf,
+ int direction)
+{
+ pci_unmap_single(adapter->pdev, dma_buf->dma, dma_buf->len, direction);
+ dev_kfree_skb(dma_buf->skb);
+ dma_buf->skb = NULL;
+ dma_buf->dma = 0;
+}
+
+/**
+ * ksz_init_rx_buffers - initialize receive descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This routine initializes DMA buffers for receiving.
+ */
+static void ksz_init_rx_buffers(struct dev_info *adapter)
+{
+ int i;
+ struct ksz_desc *desc;
+ struct ksz_dma_buf *dma_buf;
+ struct ksz_hw *hw = &adapter->hw;
+ struct ksz_desc_info *info = &hw->rx_desc_info;
+
+ for (i = 0; i < hw->rx_desc_info.alloc; i++) {
+ get_rx_pkt(info, &desc);
+
+ dma_buf = DMA_BUFFER(desc);
+ if (dma_buf->skb && dma_buf->len != adapter->mtu)
+ free_dma_buf(adapter, dma_buf, PCI_DMA_FROMDEVICE);
+ dma_buf->len = adapter->mtu;
+ if (!dma_buf->skb)
+ dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
+ if (dma_buf->skb && !dma_buf->dma) {
+ dma_buf->skb->dev = adapter->dev;
+ dma_buf->dma = pci_map_single(
+ adapter->pdev,
+ skb_tail_pointer(dma_buf->skb),
+ dma_buf->len,
+ PCI_DMA_FROMDEVICE);
+ }
+
+ /* Set descriptor. */
+ set_rx_buf(desc, dma_buf->dma);
+ set_rx_len(desc, dma_buf->len);
+ release_desc(desc);
+ }
+}
+
+/**
+ * ksz_alloc_mem - allocate memory for hardware descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This function allocates memory for use by hardware descriptors for receiving
+ * and transmitting.
+ *
+ * Return 0 if successful.
+ */
+static int ksz_alloc_mem(struct dev_info *adapter)
+{
+ struct ksz_hw *hw = &adapter->hw;
+
+ /* Determine the number of receive and transmit descriptors. */
+ hw->rx_desc_info.alloc = NUM_OF_RX_DESC;
+ hw->tx_desc_info.alloc = NUM_OF_TX_DESC;
+
+ /* Determine how many descriptors to skip transmit interrupt. */
+ hw->tx_int_cnt = 0;
+ hw->tx_int_mask = NUM_OF_TX_DESC / 4;
+ if (hw->tx_int_mask > 8)
+ hw->tx_int_mask = 8;
+ while (hw->tx_int_mask) {
+ hw->tx_int_cnt++;
+ hw->tx_int_mask >>= 1;
+ }
+ if (hw->tx_int_cnt) {
+ hw->tx_int_mask = (1 << (hw->tx_int_cnt - 1)) - 1;
+ hw->tx_int_cnt = 0;
+ }
+
+ /* Determine the descriptor size. */
+ hw->rx_desc_info.size =
+ (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
+ DESC_ALIGNMENT) * DESC_ALIGNMENT);
+ hw->tx_desc_info.size =
+ (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
+ DESC_ALIGNMENT) * DESC_ALIGNMENT);
+ if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc))
+ pr_alert("Hardware descriptor size not right!\n");
+ ksz_check_desc_num(&hw->rx_desc_info);
+ ksz_check_desc_num(&hw->tx_desc_info);
+
+ /* Allocate descriptors. */
+ if (ksz_alloc_desc(adapter))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * ksz_free_desc - free software and hardware descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This local routine frees the software and hardware descriptors allocated by
+ * ksz_alloc_desc().
+ */
+static void ksz_free_desc(struct dev_info *adapter)
+{
+ struct ksz_hw *hw = &adapter->hw;
+
+ /* Reset descriptor. */
+ hw->rx_desc_info.ring_virt = NULL;
+ hw->tx_desc_info.ring_virt = NULL;
+ hw->rx_desc_info.ring_phys = 0;
+ hw->tx_desc_info.ring_phys = 0;
+
+ /* Free memory. */
+ if (adapter->desc_pool.alloc_virt)
+ pci_free_consistent(
+ adapter->pdev,
+ adapter->desc_pool.alloc_size,
+ adapter->desc_pool.alloc_virt,
+ adapter->desc_pool.dma_addr);
+
+ /* Reset resource pool. */
+ adapter->desc_pool.alloc_size = 0;
+ adapter->desc_pool.alloc_virt = NULL;
+
+ kfree(hw->rx_desc_info.ring);
+ hw->rx_desc_info.ring = NULL;
+ kfree(hw->tx_desc_info.ring);
+ hw->tx_desc_info.ring = NULL;
+}
+
+/**
+ * ksz_free_buffers - free buffers used in the descriptors
+ * @adapter: Adapter information structure.
+ * @desc_info: Descriptor information structure.
+ *
+ * This local routine frees buffers used in the DMA buffers.
+ */
+static void ksz_free_buffers(struct dev_info *adapter,
+ struct ksz_desc_info *desc_info, int direction)
+{
+ int i;
+ struct ksz_dma_buf *dma_buf;
+ struct ksz_desc *desc = desc_info->ring;
+
+ for (i = 0; i < desc_info->alloc; i++) {
+ dma_buf = DMA_BUFFER(desc);
+ if (dma_buf->skb)
+ free_dma_buf(adapter, dma_buf, direction);
+ desc++;
+ }
+}
+
+/**
+ * ksz_free_mem - free all resources used by descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This local routine frees all the resources allocated by ksz_alloc_mem().
+ */
+static void ksz_free_mem(struct dev_info *adapter)
+{
+ /* Free transmit buffers. */
+ ksz_free_buffers(adapter, &adapter->hw.tx_desc_info,
+ PCI_DMA_TODEVICE);
+
+ /* Free receive buffers. */
+ ksz_free_buffers(adapter, &adapter->hw.rx_desc_info,
+ PCI_DMA_FROMDEVICE);
+
+ /* Free descriptors. */
+ ksz_free_desc(adapter);
+}
+
+static void get_mib_counters(struct ksz_hw *hw, int first, int cnt,
+ u64 *counter)
+{
+ int i;
+ int mib;
+ int port;
+ struct ksz_port_mib *port_mib;
+
+ memset(counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
+ for (i = 0, port = first; i < cnt; i++, port++) {
+ port_mib = &hw->port_mib[port];
+ for (mib = port_mib->mib_start; mib < hw->mib_cnt; mib++)
+ counter[mib] += port_mib->counter[mib];
+ }
+}
+
+/**
+ * send_packet - send packet
+ * @skb: Socket buffer.
+ * @dev: Network device.
+ *
+ * This routine is used to send a packet out to the network.
+ */
+static void send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ksz_desc *desc;
+ struct ksz_desc *first;
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_desc_info *info = &hw->tx_desc_info;
+ struct ksz_dma_buf *dma_buf;
+ int len;
+ int last_frag = skb_shinfo(skb)->nr_frags;
+
+ /*
+ * KSZ8842 with multiple device interfaces needs to be told which port
+ * to send.
+ */
+ if (hw->dev_count > 1)
+ hw->dst_ports = 1 << priv->port.first_port;
+
+ /* Hardware will pad the length to 60. */
+ len = skb->len;
+
+ /* Remember the very first descriptor. */
+ first = info->cur;
+ desc = first;
+
+ dma_buf = DMA_BUFFER(desc);
+ if (last_frag) {
+ int frag;
+ skb_frag_t *this_frag;
+
+ dma_buf->len = skb_headlen(skb);
+
+ dma_buf->dma = pci_map_single(
+ hw_priv->pdev, skb->data, dma_buf->len,
+ PCI_DMA_TODEVICE);
+ set_tx_buf(desc, dma_buf->dma);
+ set_tx_len(desc, dma_buf->len);
+
+ frag = 0;
+ do {
+ this_frag = &skb_shinfo(skb)->frags[frag];
+
+ /* Get a new descriptor. */
+ get_tx_pkt(info, &desc);
+
+ /* Keep track of descriptors used so far. */
+ ++hw->tx_int_cnt;
+
+ dma_buf = DMA_BUFFER(desc);
+ dma_buf->len = this_frag->size;
+
+ dma_buf->dma = pci_map_single(
+ hw_priv->pdev,
+ skb_frag_address(this_frag),
+ dma_buf->len,
+ PCI_DMA_TODEVICE);
+ set_tx_buf(desc, dma_buf->dma);
+ set_tx_len(desc, dma_buf->len);
+
+ frag++;
+ if (frag == last_frag)
+ break;
+
+ /* Do not release the last descriptor here. */
+ release_desc(desc);
+ } while (1);
+
+ /* current points to the last descriptor. */
+ info->cur = desc;
+
+ /* Release the first descriptor. */
+ release_desc(first);
+ } else {
+ dma_buf->len = len;
+
+ dma_buf->dma = pci_map_single(
+ hw_priv->pdev, skb->data, dma_buf->len,
+ PCI_DMA_TODEVICE);
+ set_tx_buf(desc, dma_buf->dma);
+ set_tx_len(desc, dma_buf->len);
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ (desc)->sw.buf.tx.csum_gen_tcp = 1;
+ (desc)->sw.buf.tx.csum_gen_udp = 1;
+ }
+
+ /*
+ * The last descriptor holds the packet so that it can be returned to
+ * network subsystem after all descriptors are transmitted.
+ */
+ dma_buf->skb = skb;
+
+ hw_send_pkt(hw);
+
+ /* Update transmit statistics. */
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += len;
+}
+
+/**
+ * transmit_cleanup - clean up transmit descriptors
+ * @dev: Network device.
+ *
+ * This routine is called to clean up the transmitted buffers.
+ */
+static void transmit_cleanup(struct dev_info *hw_priv, int normal)
+{
+ int last;
+ union desc_stat status;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_desc_info *info = &hw->tx_desc_info;
+ struct ksz_desc *desc;
+ struct ksz_dma_buf *dma_buf;
+ struct net_device *dev = NULL;
+
+ spin_lock(&hw_priv->hwlock);
+ last = info->last;
+
+ while (info->avail < info->alloc) {
+ /* Get next descriptor which is not hardware owned. */
+ desc = &info->ring[last];
+ status.data = le32_to_cpu(desc->phw->ctrl.data);
+ if (status.tx.hw_owned) {
+ if (normal)
+ break;
+ else
+ reset_desc(desc, status);
+ }
+
+ dma_buf = DMA_BUFFER(desc);
+ pci_unmap_single(
+ hw_priv->pdev, dma_buf->dma, dma_buf->len,
+ PCI_DMA_TODEVICE);
+
+ /* This descriptor contains the last buffer in the packet. */
+ if (dma_buf->skb) {
+ dev = dma_buf->skb->dev;
+
+ /* Release the packet back to network subsystem. */
+ dev_kfree_skb_irq(dma_buf->skb);
+ dma_buf->skb = NULL;
+ }
+
+ /* Free the transmitted descriptor. */
+ last++;
+ last &= info->mask;
+ info->avail++;
+ }
+ info->last = last;
+ spin_unlock(&hw_priv->hwlock);
+
+ /* Notify the network subsystem that the packet has been sent. */
+ if (dev)
+ dev->trans_start = jiffies;
+}
+
+/**
+ * transmit_done - transmit done processing
+ * @dev: Network device.
+ *
+ * This routine is called when the transmit interrupt is triggered, indicating
+ * either a packet is sent successfully or there are transmit errors.
+ */
+static void tx_done(struct dev_info *hw_priv)
+{
+ struct ksz_hw *hw = &hw_priv->hw;
+ int port;
+
+ transmit_cleanup(hw_priv, 1);
+
+ for (port = 0; port < hw->dev_count; port++) {
+ struct net_device *dev = hw->port_info[port].pdev;
+
+ if (netif_running(dev) && netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ }
+}
+
+static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
+{
+ skb->dev = old->dev;
+ skb->protocol = old->protocol;
+ skb->ip_summed = old->ip_summed;
+ skb->csum = old->csum;
+ skb_set_network_header(skb, ETH_HLEN);
+
+ dev_kfree_skb(old);
+}
+
+/**
+ * netdev_tx - send out packet
+ * @skb: Socket buffer.
+ * @dev: Network device.
+ *
+ * This function is used by the upper network layer to send out a packet.
+ *
+ * Return 0 if successful; otherwise an error code indicating failure.
+ */
+static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int left;
+ int num = 1;
+ int rc = 0;
+
+ if (hw->features & SMALL_PACKET_TX_BUG) {
+ struct sk_buff *org_skb = skb;
+
+ if (skb->len <= 48) {
+ if (skb_end_pointer(skb) - skb->data >= 50) {
+ memset(&skb->data[skb->len], 0, 50 - skb->len);
+ skb->len = 50;
+ } else {
+ skb = dev_alloc_skb(50);
+ if (!skb)
+ return NETDEV_TX_BUSY;
+ memcpy(skb->data, org_skb->data, org_skb->len);
+ memset(&skb->data[org_skb->len], 0,
+ 50 - org_skb->len);
+ skb->len = 50;
+ copy_old_skb(org_skb, skb);
+ }
+ }
+ }
+
+ spin_lock_irq(&hw_priv->hwlock);
+
+ num = skb_shinfo(skb)->nr_frags + 1;
+ left = hw_alloc_pkt(hw, skb->len, num);
+ if (left) {
+ if (left < num ||
+ ((CHECKSUM_PARTIAL == skb->ip_summed) &&
+ (ETH_P_IPV6 == htons(skb->protocol)))) {
+ struct sk_buff *org_skb = skb;
+
+ skb = dev_alloc_skb(org_skb->len);
+ if (!skb) {
+ rc = NETDEV_TX_BUSY;
+ goto unlock;
+ }
+ skb_copy_and_csum_dev(org_skb, skb->data);
+ org_skb->ip_summed = CHECKSUM_NONE;
+ skb->len = org_skb->len;
+ copy_old_skb(org_skb, skb);
+ }
+ send_packet(skb, dev);
+ if (left <= num)
+ netif_stop_queue(dev);
+ } else {
+ /* Stop the transmit queue until packet is allocated. */
+ netif_stop_queue(dev);
+ rc = NETDEV_TX_BUSY;
+ }
+unlock:
+ spin_unlock_irq(&hw_priv->hwlock);
+
+ return rc;
+}
+
+/**
+ * netdev_tx_timeout - transmit timeout processing
+ * @dev: Network device.
+ *
+ * This routine is called when the transmit timer expires. That indicates the
+ * hardware is not running correctly because transmit interrupts are not
+ * triggered to free up resources so that the transmit routine can continue
+ * sending out packets. The hardware is reset to correct the problem.
+ */
+static void netdev_tx_timeout(struct net_device *dev)
+{
+ static unsigned long last_reset;
+
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int port;
+
+ if (hw->dev_count > 1) {
+ /*
+ * Only reset the hardware if time between calls is long
+ * enough.
+ */
+ if (jiffies - last_reset <= dev->watchdog_timeo)
+ hw_priv = NULL;
+ }
+
+ last_reset = jiffies;
+ if (hw_priv) {
+ hw_dis_intr(hw);
+ hw_disable(hw);
+
+ transmit_cleanup(hw_priv, 0);
+ hw_reset_pkts(&hw->rx_desc_info);
+ hw_reset_pkts(&hw->tx_desc_info);
+ ksz_init_rx_buffers(hw_priv);
+
+ hw_reset(hw);
+
+ hw_set_desc_base(hw,
+ hw->tx_desc_info.ring_phys,
+ hw->rx_desc_info.ring_phys);
+ hw_set_addr(hw);
+ if (hw->all_multi)
+ hw_set_multicast(hw, hw->all_multi);
+ else if (hw->multi_list_size)
+ hw_set_grp_addr(hw);
+
+ if (hw->dev_count > 1) {
+ hw_set_add_addr(hw);
+ for (port = 0; port < SWITCH_PORT_NUM; port++) {
+ struct net_device *port_dev;
+
+ port_set_stp_state(hw, port,
+ STP_STATE_DISABLED);
+
+ port_dev = hw->port_info[port].pdev;
+ if (netif_running(port_dev))
+ port_set_stp_state(hw, port,
+ STP_STATE_SIMPLE);
+ }
+ }
+
+ hw_enable(hw);
+ hw_ena_intr(hw);
+ }
+
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+static inline void csum_verified(struct sk_buff *skb)
+{
+ unsigned short protocol;
+ struct iphdr *iph;
+
+ protocol = skb->protocol;
+ skb_reset_network_header(skb);
+ iph = (struct iphdr *) skb_network_header(skb);
+ if (protocol == htons(ETH_P_8021Q)) {
+ protocol = iph->tot_len;
+ skb_set_network_header(skb, VLAN_HLEN);
+ iph = (struct iphdr *) skb_network_header(skb);
+ }
+ if (protocol == htons(ETH_P_IP)) {
+ if (iph->protocol == IPPROTO_TCP)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+}
+
+static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
+ struct ksz_desc *desc, union desc_stat status)
+{
+ int packet_len;
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_dma_buf *dma_buf;
+ struct sk_buff *skb;
+ int rx_status;
+
+ /* Received length includes 4-byte CRC. */
+ packet_len = status.rx.frame_len - 4;
+
+ dma_buf = DMA_BUFFER(desc);
+ pci_dma_sync_single_for_cpu(
+ hw_priv->pdev, dma_buf->dma, packet_len + 4,
+ PCI_DMA_FROMDEVICE);
+
+ do {
+ /* skb->data != skb->head */
+ skb = dev_alloc_skb(packet_len + 2);
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ /*
+ * Align socket buffer in 4-byte boundary for better
+ * performance.
+ */
+ skb_reserve(skb, 2);
+
+ memcpy(skb_put(skb, packet_len),
+ dma_buf->skb->data, packet_len);
+ } while (0);
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP))
+ csum_verified(skb);
+
+ /* Update receive statistics. */
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += packet_len;
+
+ /* Notify upper layer for received packet. */
+ rx_status = netif_rx(skb);
+
+ return 0;
+}
+
+static int dev_rcv_packets(struct dev_info *hw_priv)
+{
+ int next;
+ union desc_stat status;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct net_device *dev = hw->port_info[0].pdev;
+ struct ksz_desc_info *info = &hw->rx_desc_info;
+ int left = info->alloc;
+ struct ksz_desc *desc;
+ int received = 0;
+
+ next = info->next;
+ while (left--) {
+ /* Get next descriptor which is not hardware owned. */
+ desc = &info->ring[next];
+ status.data = le32_to_cpu(desc->phw->ctrl.data);
+ if (status.rx.hw_owned)
+ break;
+
+ /* Status valid only when last descriptor bit is set. */
+ if (status.rx.last_desc && status.rx.first_desc) {
+ if (rx_proc(dev, hw, desc, status))
+ goto release_packet;
+ received++;
+ }
+
+release_packet:
+ release_desc(desc);
+ next++;
+ next &= info->mask;
+ }
+ info->next = next;
+
+ return received;
+}
+
+static int port_rcv_packets(struct dev_info *hw_priv)
+{
+ int next;
+ union desc_stat status;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct net_device *dev = hw->port_info[0].pdev;
+ struct ksz_desc_info *info = &hw->rx_desc_info;
+ int left = info->alloc;
+ struct ksz_desc *desc;
+ int received = 0;
+
+ next = info->next;
+ while (left--) {
+ /* Get next descriptor which is not hardware owned. */
+ desc = &info->ring[next];
+ status.data = le32_to_cpu(desc->phw->ctrl.data);
+ if (status.rx.hw_owned)
+ break;
+
+ if (hw->dev_count > 1) {
+ /* Get received port number. */
+ int p = HW_TO_DEV_PORT(status.rx.src_port);
+
+ dev = hw->port_info[p].pdev;
+ if (!netif_running(dev))
+ goto release_packet;
+ }
+
+ /* Status valid only when last descriptor bit is set. */
+ if (status.rx.last_desc && status.rx.first_desc) {
+ if (rx_proc(dev, hw, desc, status))
+ goto release_packet;
+ received++;
+ }
+
+release_packet:
+ release_desc(desc);
+ next++;
+ next &= info->mask;
+ }
+ info->next = next;
+
+ return received;
+}
+
+static int dev_rcv_special(struct dev_info *hw_priv)
+{
+ int next;
+ union desc_stat status;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct net_device *dev = hw->port_info[0].pdev;
+ struct ksz_desc_info *info = &hw->rx_desc_info;
+ int left = info->alloc;
+ struct ksz_desc *desc;
+ int received = 0;
+
+ next = info->next;
+ while (left--) {
+ /* Get next descriptor which is not hardware owned. */
+ desc = &info->ring[next];
+ status.data = le32_to_cpu(desc->phw->ctrl.data);
+ if (status.rx.hw_owned)
+ break;
+
+ if (hw->dev_count > 1) {
+ /* Get received port number. */
+ int p = HW_TO_DEV_PORT(status.rx.src_port);
+
+ dev = hw->port_info[p].pdev;
+ if (!netif_running(dev))
+ goto release_packet;
+ }
+
+ /* Status valid only when last descriptor bit is set. */
+ if (status.rx.last_desc && status.rx.first_desc) {
+ /*
+ * Receive without error. With receive errors
+ * disabled, packets with receive errors will be
+ * dropped, so no need to check the error bit.
+ */
+ if (!status.rx.error || (status.data &
+ KS_DESC_RX_ERROR_COND) ==
+ KS_DESC_RX_ERROR_TOO_LONG) {
+ if (rx_proc(dev, hw, desc, status))
+ goto release_packet;
+ received++;
+ } else {
+ struct dev_priv *priv = netdev_priv(dev);
+
+ /* Update receive error statistics. */
+ priv->port.counter[OID_COUNTER_RCV_ERROR]++;
+ }
+ }
+
+release_packet:
+ release_desc(desc);
+ next++;
+ next &= info->mask;
+ }
+ info->next = next;
+
+ return received;
+}
+
+static void rx_proc_task(unsigned long data)
+{
+ struct dev_info *hw_priv = (struct dev_info *) data;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ if (!hw->enabled)
+ return;
+ if (unlikely(!hw_priv->dev_rcv(hw_priv))) {
+
+ /* In case receive process is suspended because of overrun. */
+ hw_resume_rx(hw);
+
+ /* tasklets are interruptible. */
+ spin_lock_irq(&hw_priv->hwlock);
+ hw_turn_on_intr(hw, KS884X_INT_RX_MASK);
+ spin_unlock_irq(&hw_priv->hwlock);
+ } else {
+ hw_ack_intr(hw, KS884X_INT_RX);
+ tasklet_schedule(&hw_priv->rx_tasklet);
+ }
+}
+
+static void tx_proc_task(unsigned long data)
+{
+ struct dev_info *hw_priv = (struct dev_info *) data;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ hw_ack_intr(hw, KS884X_INT_TX_MASK);
+
+ tx_done(hw_priv);
+
+ /* tasklets are interruptible. */
+ spin_lock_irq(&hw_priv->hwlock);
+ hw_turn_on_intr(hw, KS884X_INT_TX);
+ spin_unlock_irq(&hw_priv->hwlock);
+}
+
+static inline void handle_rx_stop(struct ksz_hw *hw)
+{
+ /* Receive just has been stopped. */
+ if (0 == hw->rx_stop)
+ hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
+ else if (hw->rx_stop > 1) {
+ if (hw->enabled && (hw->rx_cfg & DMA_RX_ENABLE)) {
+ hw_start_rx(hw);
+ } else {
+ hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
+ hw->rx_stop = 0;
+ }
+ } else
+ /* Receive just has been started. */
+ hw->rx_stop++;
+}
+
+/**
+ * netdev_intr - interrupt handling
+ * @irq: Interrupt number.
+ * @dev_id: Network device.
+ *
+ * This function is called by upper network layer to signal interrupt.
+ *
+ * Return IRQ_HANDLED if interrupt is handled.
+ */
+static irqreturn_t netdev_intr(int irq, void *dev_id)
+{
+ uint int_enable = 0;
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ hw_read_intr(hw, &int_enable);
+
+ /* Not our interrupt! */
+ if (!int_enable)
+ return IRQ_NONE;
+
+ do {
+ hw_ack_intr(hw, int_enable);
+ int_enable &= hw->intr_mask;
+
+ if (unlikely(int_enable & KS884X_INT_TX_MASK)) {
+ hw_dis_intr_bit(hw, KS884X_INT_TX_MASK);
+ tasklet_schedule(&hw_priv->tx_tasklet);
+ }
+
+ if (likely(int_enable & KS884X_INT_RX)) {
+ hw_dis_intr_bit(hw, KS884X_INT_RX);
+ tasklet_schedule(&hw_priv->rx_tasklet);
+ }
+
+ if (unlikely(int_enable & KS884X_INT_RX_OVERRUN)) {
+ dev->stats.rx_fifo_errors++;
+ hw_resume_rx(hw);
+ }
+
+ if (unlikely(int_enable & KS884X_INT_PHY)) {
+ struct ksz_port *port = &priv->port;
+
+ hw->features |= LINK_INT_WORKING;
+ port_get_link_speed(port);
+ }
+
+ if (unlikely(int_enable & KS884X_INT_RX_STOPPED)) {
+ handle_rx_stop(hw);
+ break;
+ }
+
+ if (unlikely(int_enable & KS884X_INT_TX_STOPPED)) {
+ u32 data;
+
+ hw->intr_mask &= ~KS884X_INT_TX_STOPPED;
+ pr_info("Tx stopped\n");
+ data = readl(hw->io + KS_DMA_TX_CTRL);
+ if (!(data & DMA_TX_ENABLE))
+ pr_info("Tx disabled\n");
+ break;
+ }
+ } while (0);
+
+ hw_ena_intr(hw);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Linux network device functions
+ */
+
+static unsigned long next_jiffies;
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void netdev_netpoll(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ hw_dis_intr(&hw_priv->hw);
+ netdev_intr(dev->irq, dev);
+}
+#endif
+
+static void bridge_change(struct ksz_hw *hw)
+{
+ int port;
+ u8 member;
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ /* No ports in forwarding state. */
+ if (!sw->member) {
+ port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
+ sw_block_addr(hw);
+ }
+ for (port = 0; port < SWITCH_PORT_NUM; port++) {
+ if (STP_STATE_FORWARDING == sw->port_cfg[port].stp_state)
+ member = HOST_MASK | sw->member;
+ else
+ member = HOST_MASK | (1 << port);
+ if (member != sw->port_cfg[port].member)
+ sw_cfg_port_base_vlan(hw, port, member);
+ }
+}
+
+/**
+ * netdev_close - close network device
+ * @dev: Network device.
+ *
+ * This function process the close operation of network device. This is caused
+ * by the user command "ifconfig ethX down."
+ *
+ * Return 0 if successful; otherwise an error code indicating failure.
+ */
+static int netdev_close(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_port *port = &priv->port;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int pi;
+
+ netif_stop_queue(dev);
+
+ ksz_stop_timer(&priv->monitor_timer_info);
+
+ /* Need to shut the port manually in multiple device interfaces mode. */
+ if (hw->dev_count > 1) {
+ port_set_stp_state(hw, port->first_port, STP_STATE_DISABLED);
+
+ /* Port is closed. Need to change bridge setting. */
+ if (hw->features & STP_SUPPORT) {
+ pi = 1 << port->first_port;
+ if (hw->ksz_switch->member & pi) {
+ hw->ksz_switch->member &= ~pi;
+ bridge_change(hw);
+ }
+ }
+ }
+ if (port->first_port > 0)
+ hw_del_addr(hw, dev->dev_addr);
+ if (!hw_priv->wol_enable)
+ port_set_power_saving(port, true);
+
+ if (priv->multicast)
+ --hw->all_multi;
+ if (priv->promiscuous)
+ --hw->promiscuous;
+
+ hw_priv->opened--;
+ if (!(hw_priv->opened)) {
+ ksz_stop_timer(&hw_priv->mib_timer_info);
+ flush_work(&hw_priv->mib_read);
+
+ hw_dis_intr(hw);
+ hw_disable(hw);
+ hw_clr_multicast(hw);
+
+ /* Delay for receive task to stop scheduling itself. */
+ msleep(2000 / HZ);
+
+ tasklet_disable(&hw_priv->rx_tasklet);
+ tasklet_disable(&hw_priv->tx_tasklet);
+ free_irq(dev->irq, hw_priv->dev);
+
+ transmit_cleanup(hw_priv, 0);
+ hw_reset_pkts(&hw->rx_desc_info);
+ hw_reset_pkts(&hw->tx_desc_info);
+
+ /* Clean out static MAC table when the switch is shutdown. */
+ if (hw->features & STP_SUPPORT)
+ sw_clr_sta_mac_table(hw);
+ }
+
+ return 0;
+}
+
+static void hw_cfg_huge_frame(struct dev_info *hw_priv, struct ksz_hw *hw)
+{
+ if (hw->ksz_switch) {
+ u32 data;
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
+ if (hw->features & RX_HUGE_FRAME)
+ data |= SWITCH_HUGE_PACKET;
+ else
+ data &= ~SWITCH_HUGE_PACKET;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
+ }
+ if (hw->features & RX_HUGE_FRAME) {
+ hw->rx_cfg |= DMA_RX_ERROR;
+ hw_priv->dev_rcv = dev_rcv_special;
+ } else {
+ hw->rx_cfg &= ~DMA_RX_ERROR;
+ if (hw->dev_count > 1)
+ hw_priv->dev_rcv = port_rcv_packets;
+ else
+ hw_priv->dev_rcv = dev_rcv_packets;
+ }
+}
+
+static int prepare_hardware(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int rc = 0;
+
+ /* Remember the network device that requests interrupts. */
+ hw_priv->dev = dev;
+ rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
+ if (rc)
+ return rc;
+ tasklet_enable(&hw_priv->rx_tasklet);
+ tasklet_enable(&hw_priv->tx_tasklet);
+
+ hw->promiscuous = 0;
+ hw->all_multi = 0;
+ hw->multi_list_size = 0;
+
+ hw_reset(hw);
+
+ hw_set_desc_base(hw,
+ hw->tx_desc_info.ring_phys, hw->rx_desc_info.ring_phys);
+ hw_set_addr(hw);
+ hw_cfg_huge_frame(hw_priv, hw);
+ ksz_init_rx_buffers(hw_priv);
+ return 0;
+}
+
+static void set_media_state(struct net_device *dev, int media_state)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ if (media_state == priv->media_state)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ netif_info(priv, link, dev, "link %s\n",
+ media_state == priv->media_state ? "on" : "off");
+}
+
+/**
+ * netdev_open - open network device
+ * @dev: Network device.
+ *
+ * This function process the open operation of network device. This is caused
+ * by the user command "ifconfig ethX up."
+ *
+ * Return 0 if successful; otherwise an error code indicating failure.
+ */
+static int netdev_open(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+ int i;
+ int p;
+ int rc = 0;
+
+ priv->multicast = 0;
+ priv->promiscuous = 0;
+
+ /* Reset device statistics. */
+ memset(&dev->stats, 0, sizeof(struct net_device_stats));
+ memset((void *) port->counter, 0,
+ (sizeof(u64) * OID_COUNTER_LAST));
+
+ if (!(hw_priv->opened)) {
+ rc = prepare_hardware(dev);
+ if (rc)
+ return rc;
+ for (i = 0; i < hw->mib_port_cnt; i++) {
+ if (next_jiffies < jiffies)
+ next_jiffies = jiffies + HZ * 2;
+ else
+ next_jiffies += HZ * 1;
+ hw_priv->counter[i].time = next_jiffies;
+ hw->port_mib[i].state = media_disconnected;
+ port_init_cnt(hw, i);
+ }
+ if (hw->ksz_switch)
+ hw->port_mib[HOST_PORT].state = media_connected;
+ else {
+ hw_add_wol_bcast(hw);
+ hw_cfg_wol_pme(hw, 0);
+ hw_clr_wol_pme_status(&hw_priv->hw);
+ }
+ }
+ port_set_power_saving(port, false);
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
+ /*
+ * Initialize to invalid value so that link detection
+ * is done.
+ */
+ hw->port_info[p].partner = 0xFF;
+ hw->port_info[p].state = media_disconnected;
+ }
+
+ /* Need to open the port in multiple device interfaces mode. */
+ if (hw->dev_count > 1) {
+ port_set_stp_state(hw, port->first_port, STP_STATE_SIMPLE);
+ if (port->first_port > 0)
+ hw_add_addr(hw, dev->dev_addr);
+ }
+
+ port_get_link_speed(port);
+ if (port->force_link)
+ port_force_link_speed(port);
+ else
+ port_set_link_speed(port);
+
+ if (!(hw_priv->opened)) {
+ hw_setup_intr(hw);
+ hw_enable(hw);
+ hw_ena_intr(hw);
+
+ if (hw->mib_port_cnt)
+ ksz_start_timer(&hw_priv->mib_timer_info,
+ hw_priv->mib_timer_info.period);
+ }
+
+ hw_priv->opened++;
+
+ ksz_start_timer(&priv->monitor_timer_info,
+ priv->monitor_timer_info.period);
+
+ priv->media_state = port->linked->state;
+
+ set_media_state(dev, media_connected);
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/* RX errors = rx_errors */
+/* RX dropped = rx_dropped */
+/* RX overruns = rx_fifo_errors */
+/* RX frame = rx_crc_errors + rx_frame_errors + rx_length_errors */
+/* TX errors = tx_errors */
+/* TX dropped = tx_dropped */
+/* TX overruns = tx_fifo_errors */
+/* TX carrier = tx_aborted_errors + tx_carrier_errors + tx_window_errors */
+/* collisions = collisions */
+
+/**
+ * netdev_query_statistics - query network device statistics
+ * @dev: Network device.
+ *
+ * This function returns the statistics of the network device. The device
+ * needs not be opened.
+ *
+ * Return network device statistics.
+ */
+static struct net_device_stats *netdev_query_statistics(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct ksz_port *port = &priv->port;
+ struct ksz_hw *hw = &priv->adapter->hw;
+ struct ksz_port_mib *mib;
+ int i;
+ int p;
+
+ dev->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR];
+ dev->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR];
+
+ /* Reset to zero to add count later. */
+ dev->stats.multicast = 0;
+ dev->stats.collisions = 0;
+ dev->stats.rx_length_errors = 0;
+ dev->stats.rx_crc_errors = 0;
+ dev->stats.rx_frame_errors = 0;
+ dev->stats.tx_window_errors = 0;
+
+ for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
+ mib = &hw->port_mib[p];
+
+ dev->stats.multicast += (unsigned long)
+ mib->counter[MIB_COUNTER_RX_MULTICAST];
+
+ dev->stats.collisions += (unsigned long)
+ mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION];
+
+ dev->stats.rx_length_errors += (unsigned long)(
+ mib->counter[MIB_COUNTER_RX_UNDERSIZE] +
+ mib->counter[MIB_COUNTER_RX_FRAGMENT] +
+ mib->counter[MIB_COUNTER_RX_OVERSIZE] +
+ mib->counter[MIB_COUNTER_RX_JABBER]);
+ dev->stats.rx_crc_errors += (unsigned long)
+ mib->counter[MIB_COUNTER_RX_CRC_ERR];
+ dev->stats.rx_frame_errors += (unsigned long)(
+ mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] +
+ mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]);
+
+ dev->stats.tx_window_errors += (unsigned long)
+ mib->counter[MIB_COUNTER_TX_LATE_COLLISION];
+ }
+
+ return &dev->stats;
+}
+
+/**
+ * netdev_set_mac_address - set network device MAC address
+ * @dev: Network device.
+ * @addr: Buffer of MAC address.
+ *
+ * This function is used to set the MAC address of the network device.
+ *
+ * Return 0 to indicate success.
+ */
+static int netdev_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct sockaddr *mac = addr;
+ uint interrupt;
+
+ if (priv->port.first_port > 0)
+ hw_del_addr(hw, dev->dev_addr);
+ else {
+ hw->mac_override = 1;
+ memcpy(hw->override_addr, mac->sa_data, MAC_ADDR_LEN);
+ }
+
+ memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN);
+
+ interrupt = hw_block_intr(hw);
+
+ if (priv->port.first_port > 0)
+ hw_add_addr(hw, dev->dev_addr);
+ else
+ hw_set_addr(hw);
+ hw_restore_intr(hw, interrupt);
+
+ return 0;
+}
+
+static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
+ struct ksz_hw *hw, int promiscuous)
+{
+ if (promiscuous != priv->promiscuous) {
+ u8 prev_state = hw->promiscuous;
+
+ if (promiscuous)
+ ++hw->promiscuous;
+ else
+ --hw->promiscuous;
+ priv->promiscuous = promiscuous;
+
+ /* Turn on/off promiscuous mode. */
+ if (hw->promiscuous <= 1 && prev_state <= 1)
+ hw_set_promiscuous(hw, hw->promiscuous);
+
+ /*
+ * Port is not in promiscuous mode, meaning it is released
+ * from the bridge.
+ */
+ if ((hw->features & STP_SUPPORT) && !promiscuous &&
+ (dev->priv_flags & IFF_BRIDGE_PORT)) {
+ struct ksz_switch *sw = hw->ksz_switch;
+ int port = priv->port.first_port;
+
+ port_set_stp_state(hw, port, STP_STATE_DISABLED);
+ port = 1 << port;
+ if (sw->member & port) {
+ sw->member &= ~port;
+ bridge_change(hw);
+ }
+ }
+ }
+}
+
+static void dev_set_multicast(struct dev_priv *priv, struct ksz_hw *hw,
+ int multicast)
+{
+ if (multicast != priv->multicast) {
+ u8 all_multi = hw->all_multi;
+
+ if (multicast)
+ ++hw->all_multi;
+ else
+ --hw->all_multi;
+ priv->multicast = multicast;
+
+ /* Turn on/off all multicast mode. */
+ if (hw->all_multi <= 1 && all_multi <= 1)
+ hw_set_multicast(hw, hw->all_multi);
+ }
+}
+
+/**
+ * netdev_set_rx_mode
+ * @dev: Network device.
+ *
+ * This routine is used to set multicast addresses or put the network device
+ * into promiscuous mode.
+ */
+static void netdev_set_rx_mode(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct netdev_hw_addr *ha;
+ int multicast = (dev->flags & IFF_ALLMULTI);
+
+ dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC));
+
+ if (hw_priv->hw.dev_count > 1)
+ multicast |= (dev->flags & IFF_MULTICAST);
+ dev_set_multicast(priv, hw, multicast);
+
+ /* Cannot use different hashes in multiple device interfaces mode. */
+ if (hw_priv->hw.dev_count > 1)
+ return;
+
+ if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
+ int i = 0;
+
+ /* List too big to support so turn on all multicast mode. */
+ if (netdev_mc_count(dev) > MAX_MULTICAST_LIST) {
+ if (MAX_MULTICAST_LIST != hw->multi_list_size) {
+ hw->multi_list_size = MAX_MULTICAST_LIST;
+ ++hw->all_multi;
+ hw_set_multicast(hw, hw->all_multi);
+ }
+ return;
+ }
+
+ netdev_for_each_mc_addr(ha, dev) {
+ if (i >= MAX_MULTICAST_LIST)
+ break;
+ memcpy(hw->multi_list[i++], ha->addr, MAC_ADDR_LEN);
+ }
+ hw->multi_list_size = (u8) i;
+ hw_set_grp_addr(hw);
+ } else {
+ if (MAX_MULTICAST_LIST == hw->multi_list_size) {
+ --hw->all_multi;
+ hw_set_multicast(hw, hw->all_multi);
+ }
+ hw->multi_list_size = 0;
+ hw_clr_multicast(hw);
+ }
+}
+
+static int netdev_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int hw_mtu;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* Cannot use different MTU in multiple device interfaces mode. */
+ if (hw->dev_count > 1)
+ if (dev != hw_priv->dev)
+ return 0;
+ if (new_mtu < 60)
+ return -EINVAL;
+
+ if (dev->mtu != new_mtu) {
+ hw_mtu = new_mtu + ETHERNET_HEADER_SIZE + 4;
+ if (hw_mtu > MAX_RX_BUF_SIZE)
+ return -EINVAL;
+ if (hw_mtu > REGULAR_RX_BUF_SIZE) {
+ hw->features |= RX_HUGE_FRAME;
+ hw_mtu = MAX_RX_BUF_SIZE;
+ } else {
+ hw->features &= ~RX_HUGE_FRAME;
+ hw_mtu = REGULAR_RX_BUF_SIZE;
+ }
+ hw_mtu = (hw_mtu + 3) & ~3;
+ hw_priv->mtu = hw_mtu;
+ dev->mtu = new_mtu;
+ }
+ return 0;
+}
+
+/**
+ * netdev_ioctl - I/O control processing
+ * @dev: Network device.
+ * @ifr: Interface request structure.
+ * @cmd: I/O control code.
+ *
+ * This function is used to process I/O control calls.
+ *
+ * Return 0 to indicate success.
+ */
+static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+ int rc;
+ int result = 0;
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ if (down_interruptible(&priv->proc_sem))
+ return -ERESTARTSYS;
+
+ /* assume success */
+ rc = 0;
+ switch (cmd) {
+ /* Get address of MII PHY in use. */
+ case SIOCGMIIPHY:
+ data->phy_id = priv->id;
+
+ /* Fallthrough... */
+
+ /* Read MII PHY register. */
+ case SIOCGMIIREG:
+ if (data->phy_id != priv->id || data->reg_num >= 6)
+ result = -EIO;
+ else
+ hw_r_phy(hw, port->linked->port_id, data->reg_num,
+ &data->val_out);
+ break;
+
+ /* Write MII PHY register. */
+ case SIOCSMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ result = -EPERM;
+ else if (data->phy_id != priv->id || data->reg_num >= 6)
+ result = -EIO;
+ else
+ hw_w_phy(hw, port->linked->port_id, data->reg_num,
+ data->val_in);
+ break;
+
+ default:
+ result = -EOPNOTSUPP;
+ }
+
+ up(&priv->proc_sem);
+
+ return result;
+}
+
+/*
+ * MII support
+ */
+
+/**
+ * mdio_read - read PHY register
+ * @dev: Network device.
+ * @phy_id: The PHY id.
+ * @reg_num: The register number.
+ *
+ * This function returns the PHY register value.
+ *
+ * Return the register value.
+ */
+static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct ksz_port *port = &priv->port;
+ struct ksz_hw *hw = port->hw;
+ u16 val_out;
+
+ hw_r_phy(hw, port->linked->port_id, reg_num << 1, &val_out);
+ return val_out;
+}
+
+/**
+ * mdio_write - set PHY register
+ * @dev: Network device.
+ * @phy_id: The PHY id.
+ * @reg_num: The register number.
+ * @val: The register value.
+ *
+ * This procedure sets the PHY register value.
+ */
+static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct ksz_port *port = &priv->port;
+ struct ksz_hw *hw = port->hw;
+ int i;
+ int pi;
+
+ for (i = 0, pi = port->first_port; i < port->port_cnt; i++, pi++)
+ hw_w_phy(hw, pi, reg_num << 1, val);
+}
+
+/*
+ * ethtool support
+ */
+
+#define EEPROM_SIZE 0x40
+
+static u16 eeprom_data[EEPROM_SIZE] = { 0 };
+
+#define ADVERTISED_ALL \
+ (ADVERTISED_10baseT_Half | \
+ ADVERTISED_10baseT_Full | \
+ ADVERTISED_100baseT_Half | \
+ ADVERTISED_100baseT_Full)
+
+/* These functions use the MII functions in mii.c. */
+
+/**
+ * netdev_get_settings - get network device settings
+ * @dev: Network device.
+ * @cmd: Ethtool command.
+ *
+ * This function queries the PHY and returns its state in the ethtool command.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ mutex_lock(&hw_priv->lock);
+ mii_ethtool_gset(&priv->mii_if, cmd);
+ cmd->advertising |= SUPPORTED_TP;
+ mutex_unlock(&hw_priv->lock);
+
+ /* Save advertised settings for workaround in next function. */
+ priv->advertising = cmd->advertising;
+ return 0;
+}
+
+/**
+ * netdev_set_settings - set network device settings
+ * @dev: Network device.
+ * @cmd: Ethtool command.
+ *
+ * This function sets the PHY according to the ethtool command.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_port *port = &priv->port;
+ u32 speed = ethtool_cmd_speed(cmd);
+ int rc;
+
+ /*
+ * ethtool utility does not change advertised setting if auto
+ * negotiation is not specified explicitly.
+ */
+ if (cmd->autoneg && priv->advertising == cmd->advertising) {
+ cmd->advertising |= ADVERTISED_ALL;
+ if (10 == speed)
+ cmd->advertising &=
+ ~(ADVERTISED_100baseT_Full |
+ ADVERTISED_100baseT_Half);
+ else if (100 == speed)
+ cmd->advertising &=
+ ~(ADVERTISED_10baseT_Full |
+ ADVERTISED_10baseT_Half);
+ if (0 == cmd->duplex)
+ cmd->advertising &=
+ ~(ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Full);
+ else if (1 == cmd->duplex)
+ cmd->advertising &=
+ ~(ADVERTISED_100baseT_Half |
+ ADVERTISED_10baseT_Half);
+ }
+ mutex_lock(&hw_priv->lock);
+ if (cmd->autoneg &&
+ (cmd->advertising & ADVERTISED_ALL) ==
+ ADVERTISED_ALL) {
+ port->duplex = 0;
+ port->speed = 0;
+ port->force_link = 0;
+ } else {
+ port->duplex = cmd->duplex + 1;
+ if (1000 != speed)
+ port->speed = speed;
+ if (cmd->autoneg)
+ port->force_link = 0;
+ else
+ port->force_link = 1;
+ }
+ rc = mii_ethtool_sset(&priv->mii_if, cmd);
+ mutex_unlock(&hw_priv->lock);
+ return rc;
+}
+
+/**
+ * netdev_nway_reset - restart auto-negotiation
+ * @dev: Network device.
+ *
+ * This function restarts the PHY for auto-negotiation.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_nway_reset(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ int rc;
+
+ mutex_lock(&hw_priv->lock);
+ rc = mii_nway_restart(&priv->mii_if);
+ mutex_unlock(&hw_priv->lock);
+ return rc;
+}
+
+/**
+ * netdev_get_link - get network device link status
+ * @dev: Network device.
+ *
+ * This function gets the link status from the PHY.
+ *
+ * Return true if PHY is linked and false otherwise.
+ */
+static u32 netdev_get_link(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ int rc;
+
+ rc = mii_link_ok(&priv->mii_if);
+ return rc;
+}
+
+/**
+ * netdev_get_drvinfo - get network driver information
+ * @dev: Network device.
+ * @info: Ethtool driver info data structure.
+ *
+ * This procedure returns the driver information.
+ */
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(hw_priv->pdev));
+}
+
+/**
+ * netdev_get_regs_len - get length of register dump
+ * @dev: Network device.
+ *
+ * This function returns the length of the register dump.
+ *
+ * Return length of the register dump.
+ */
+static struct hw_regs {
+ int start;
+ int end;
+} hw_regs_range[] = {
+ { KS_DMA_TX_CTRL, KS884X_INTERRUPTS_STATUS },
+ { KS_ADD_ADDR_0_LO, KS_ADD_ADDR_F_HI },
+ { KS884X_ADDR_0_OFFSET, KS8841_WOL_FRAME_BYTE2_OFFSET },
+ { KS884X_SIDER_P, KS8842_SGCR7_P },
+ { KS8842_MACAR1_P, KS8842_TOSR8_P },
+ { KS884X_P1MBCR_P, KS8842_P3ERCR_P },
+ { 0, 0 }
+};
+
+static int netdev_get_regs_len(struct net_device *dev)
+{
+ struct hw_regs *range = hw_regs_range;
+ int regs_len = 0x10 * sizeof(u32);
+
+ while (range->end > range->start) {
+ regs_len += (range->end - range->start + 3) / 4 * 4;
+ range++;
+ }
+ return regs_len;
+}
+
+/**
+ * netdev_get_regs - get register dump
+ * @dev: Network device.
+ * @regs: Ethtool registers data structure.
+ * @ptr: Buffer to store the register values.
+ *
+ * This procedure dumps the register values in the provided buffer.
+ */
+static void netdev_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *ptr)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int *buf = (int *) ptr;
+ struct hw_regs *range = hw_regs_range;
+ int len;
+
+ mutex_lock(&hw_priv->lock);
+ regs->version = 0;
+ for (len = 0; len < 0x40; len += 4) {
+ pci_read_config_dword(hw_priv->pdev, len, buf);
+ buf++;
+ }
+ while (range->end > range->start) {
+ for (len = range->start; len < range->end; len += 4) {
+ *buf = readl(hw->io + len);
+ buf++;
+ }
+ range++;
+ }
+ mutex_unlock(&hw_priv->lock);
+}
+
+#define WOL_SUPPORT \
+ (WAKE_PHY | WAKE_MAGIC | \
+ WAKE_UCAST | WAKE_MCAST | \
+ WAKE_BCAST | WAKE_ARP)
+
+/**
+ * netdev_get_wol - get Wake-on-LAN support
+ * @dev: Network device.
+ * @wol: Ethtool Wake-on-LAN data structure.
+ *
+ * This procedure returns Wake-on-LAN support.
+ */
+static void netdev_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ wol->supported = hw_priv->wol_support;
+ wol->wolopts = hw_priv->wol_enable;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+/**
+ * netdev_set_wol - set Wake-on-LAN support
+ * @dev: Network device.
+ * @wol: Ethtool Wake-on-LAN data structure.
+ *
+ * This function sets Wake-on-LAN support.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ /* Need to find a way to retrieve the device IP address. */
+ static const u8 net_addr[] = { 192, 168, 1, 1 };
+
+ if (wol->wolopts & ~hw_priv->wol_support)
+ return -EINVAL;
+
+ hw_priv->wol_enable = wol->wolopts;
+
+ /* Link wakeup cannot really be disabled. */
+ if (wol->wolopts)
+ hw_priv->wol_enable |= WAKE_PHY;
+ hw_enable_wol(&hw_priv->hw, hw_priv->wol_enable, net_addr);
+ return 0;
+}
+
+/**
+ * netdev_get_msglevel - get debug message level
+ * @dev: Network device.
+ *
+ * This function returns current debug message level.
+ *
+ * Return current debug message flags.
+ */
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+/**
+ * netdev_set_msglevel - set debug message level
+ * @dev: Network device.
+ * @value: Debug message flags.
+ *
+ * This procedure sets debug message level.
+ */
+static void netdev_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ priv->msg_enable = value;
+}
+
+/**
+ * netdev_get_eeprom_len - get EEPROM length
+ * @dev: Network device.
+ *
+ * This function returns the length of the EEPROM.
+ *
+ * Return length of the EEPROM.
+ */
+static int netdev_get_eeprom_len(struct net_device *dev)
+{
+ return EEPROM_SIZE * 2;
+}
+
+/**
+ * netdev_get_eeprom - get EEPROM data
+ * @dev: Network device.
+ * @eeprom: Ethtool EEPROM data structure.
+ * @data: Buffer to store the EEPROM data.
+ *
+ * This function dumps the EEPROM data in the provided buffer.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+#define EEPROM_MAGIC 0x10A18842
+
+static int netdev_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ u8 *eeprom_byte = (u8 *) eeprom_data;
+ int i;
+ int len;
+
+ len = (eeprom->offset + eeprom->len + 1) / 2;
+ for (i = eeprom->offset / 2; i < len; i++)
+ eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
+ eeprom->magic = EEPROM_MAGIC;
+ memcpy(data, &eeprom_byte[eeprom->offset], eeprom->len);
+
+ return 0;
+}
+
+/**
+ * netdev_set_eeprom - write EEPROM data
+ * @dev: Network device.
+ * @eeprom: Ethtool EEPROM data structure.
+ * @data: Data buffer.
+ *
+ * This function modifies the EEPROM data one byte at a time.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ u16 eeprom_word[EEPROM_SIZE];
+ u8 *eeprom_byte = (u8 *) eeprom_word;
+ int i;
+ int len;
+
+ if (eeprom->magic != EEPROM_MAGIC)
+ return -EINVAL;
+
+ len = (eeprom->offset + eeprom->len + 1) / 2;
+ for (i = eeprom->offset / 2; i < len; i++)
+ eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
+ memcpy(eeprom_word, eeprom_data, EEPROM_SIZE * 2);
+ memcpy(&eeprom_byte[eeprom->offset], data, eeprom->len);
+ for (i = 0; i < EEPROM_SIZE; i++)
+ if (eeprom_word[i] != eeprom_data[i]) {
+ eeprom_data[i] = eeprom_word[i];
+ eeprom_write(&hw_priv->hw, i, eeprom_data[i]);
+ }
+
+ return 0;
+}
+
+/**
+ * netdev_get_pauseparam - get flow control parameters
+ * @dev: Network device.
+ * @pause: Ethtool PAUSE settings data structure.
+ *
+ * This procedure returns the PAUSE control flow settings.
+ */
+static void netdev_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ pause->autoneg = (hw->overrides & PAUSE_FLOW_CTRL) ? 0 : 1;
+ if (!hw->ksz_switch) {
+ pause->rx_pause =
+ (hw->rx_cfg & DMA_RX_FLOW_ENABLE) ? 1 : 0;
+ pause->tx_pause =
+ (hw->tx_cfg & DMA_TX_FLOW_ENABLE) ? 1 : 0;
+ } else {
+ pause->rx_pause =
+ (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_RX_FLOW_CTRL)) ? 1 : 0;
+ pause->tx_pause =
+ (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_TX_FLOW_CTRL)) ? 1 : 0;
+ }
+}
+
+/**
+ * netdev_set_pauseparam - set flow control parameters
+ * @dev: Network device.
+ * @pause: Ethtool PAUSE settings data structure.
+ *
+ * This function sets the PAUSE control flow settings.
+ * Not implemented yet.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+
+ mutex_lock(&hw_priv->lock);
+ if (pause->autoneg) {
+ if (!pause->rx_pause && !pause->tx_pause)
+ port->flow_ctrl = PHY_NO_FLOW_CTRL;
+ else
+ port->flow_ctrl = PHY_FLOW_CTRL;
+ hw->overrides &= ~PAUSE_FLOW_CTRL;
+ port->force_link = 0;
+ if (hw->ksz_switch) {
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_RX_FLOW_CTRL, 1);
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_TX_FLOW_CTRL, 1);
+ }
+ port_set_link_speed(port);
+ } else {
+ hw->overrides |= PAUSE_FLOW_CTRL;
+ if (hw->ksz_switch) {
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_RX_FLOW_CTRL, pause->rx_pause);
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_TX_FLOW_CTRL, pause->tx_pause);
+ } else
+ set_flow_ctrl(hw, pause->rx_pause, pause->tx_pause);
+ }
+ mutex_unlock(&hw_priv->lock);
+
+ return 0;
+}
+
+/**
+ * netdev_get_ringparam - get tx/rx ring parameters
+ * @dev: Network device.
+ * @pause: Ethtool RING settings data structure.
+ *
+ * This procedure returns the TX/RX ring settings.
+ */
+static void netdev_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ ring->tx_max_pending = (1 << 9);
+ ring->tx_pending = hw->tx_desc_info.alloc;
+ ring->rx_max_pending = (1 << 9);
+ ring->rx_pending = hw->rx_desc_info.alloc;
+}
+
+#define STATS_LEN (TOTAL_PORT_COUNTER_NUM)
+
+static struct {
+ char string[ETH_GSTRING_LEN];
+} ethtool_stats_keys[STATS_LEN] = {
+ { "rx_lo_priority_octets" },
+ { "rx_hi_priority_octets" },
+ { "rx_undersize_packets" },
+ { "rx_fragments" },
+ { "rx_oversize_packets" },
+ { "rx_jabbers" },
+ { "rx_symbol_errors" },
+ { "rx_crc_errors" },
+ { "rx_align_errors" },
+ { "rx_mac_ctrl_packets" },
+ { "rx_pause_packets" },
+ { "rx_bcast_packets" },
+ { "rx_mcast_packets" },
+ { "rx_ucast_packets" },
+ { "rx_64_or_less_octet_packets" },
+ { "rx_65_to_127_octet_packets" },
+ { "rx_128_to_255_octet_packets" },
+ { "rx_256_to_511_octet_packets" },
+ { "rx_512_to_1023_octet_packets" },
+ { "rx_1024_to_1522_octet_packets" },
+
+ { "tx_lo_priority_octets" },
+ { "tx_hi_priority_octets" },
+ { "tx_late_collisions" },
+ { "tx_pause_packets" },
+ { "tx_bcast_packets" },
+ { "tx_mcast_packets" },
+ { "tx_ucast_packets" },
+ { "tx_deferred" },
+ { "tx_total_collisions" },
+ { "tx_excessive_collisions" },
+ { "tx_single_collisions" },
+ { "tx_mult_collisions" },
+
+ { "rx_discards" },
+ { "tx_discards" },
+};
+
+/**
+ * netdev_get_strings - get statistics identity strings
+ * @dev: Network device.
+ * @stringset: String set identifier.
+ * @buf: Buffer to store the strings.
+ *
+ * This procedure returns the strings used to identify the statistics.
+ */
+static void netdev_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ if (ETH_SS_STATS == stringset)
+ memcpy(buf, &ethtool_stats_keys,
+ ETH_GSTRING_LEN * hw->mib_cnt);
+}
+
+/**
+ * netdev_get_sset_count - get statistics size
+ * @dev: Network device.
+ * @sset: The statistics set number.
+ *
+ * This function returns the size of the statistics to be reported.
+ *
+ * Return size of the statistics to be reported.
+ */
+static int netdev_get_sset_count(struct net_device *dev, int sset)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return hw->mib_cnt;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * netdev_get_ethtool_stats - get network device statistics
+ * @dev: Network device.
+ * @stats: Ethtool statistics data structure.
+ * @data: Buffer to store the statistics.
+ *
+ * This procedure returns the statistics.
+ */
+static void netdev_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+ int n_stats = stats->n_stats;
+ int i;
+ int n;
+ int p;
+ int rc;
+ u64 counter[TOTAL_PORT_COUNTER_NUM];
+
+ mutex_lock(&hw_priv->lock);
+ n = SWITCH_PORT_NUM;
+ for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
+ if (media_connected == hw->port_mib[p].state) {
+ hw_priv->counter[p].read = 1;
+
+ /* Remember first port that requests read. */
+ if (n == SWITCH_PORT_NUM)
+ n = p;
+ }
+ }
+ mutex_unlock(&hw_priv->lock);
+
+ if (n < SWITCH_PORT_NUM)
+ schedule_work(&hw_priv->mib_read);
+
+ if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) {
+ p = n;
+ rc = wait_event_interruptible_timeout(
+ hw_priv->counter[p].counter,
+ 2 == hw_priv->counter[p].read,
+ HZ * 1);
+ } else
+ for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) {
+ if (0 == i) {
+ rc = wait_event_interruptible_timeout(
+ hw_priv->counter[p].counter,
+ 2 == hw_priv->counter[p].read,
+ HZ * 2);
+ } else if (hw->port_mib[p].cnt_ptr) {
+ rc = wait_event_interruptible_timeout(
+ hw_priv->counter[p].counter,
+ 2 == hw_priv->counter[p].read,
+ HZ * 1);
+ }
+ }
+
+ get_mib_counters(hw, port->first_port, port->mib_port_cnt, counter);
+ n = hw->mib_cnt;
+ if (n > n_stats)
+ n = n_stats;
+ n_stats -= n;
+ for (i = 0; i < n; i++)
+ *data++ = counter[i];
+}
+
+/**
+ * netdev_set_features - set receive checksum support
+ * @dev: Network device.
+ * @features: New device features (offloads).
+ *
+ * This function sets receive checksum support setting.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_features(struct net_device *dev, u32 features)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ mutex_lock(&hw_priv->lock);
+
+ /* see note in hw_setup() */
+ if (features & NETIF_F_RXCSUM)
+ hw->rx_cfg |= DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP;
+ else
+ hw->rx_cfg &= ~(DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
+
+ if (hw->enabled)
+ writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
+
+ mutex_unlock(&hw_priv->lock);
+
+ return 0;
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_settings = netdev_get_settings,
+ .set_settings = netdev_set_settings,
+ .nway_reset = netdev_nway_reset,
+ .get_link = netdev_get_link,
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_regs_len = netdev_get_regs_len,
+ .get_regs = netdev_get_regs,
+ .get_wol = netdev_get_wol,
+ .set_wol = netdev_set_wol,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+ .get_eeprom_len = netdev_get_eeprom_len,
+ .get_eeprom = netdev_get_eeprom,
+ .set_eeprom = netdev_set_eeprom,
+ .get_pauseparam = netdev_get_pauseparam,
+ .set_pauseparam = netdev_set_pauseparam,
+ .get_ringparam = netdev_get_ringparam,
+ .get_strings = netdev_get_strings,
+ .get_sset_count = netdev_get_sset_count,
+ .get_ethtool_stats = netdev_get_ethtool_stats,
+};
+
+/*
+ * Hardware monitoring
+ */
+
+static void update_link(struct net_device *dev, struct dev_priv *priv,
+ struct ksz_port *port)
+{
+ if (priv->media_state != port->linked->state) {
+ priv->media_state = port->linked->state;
+ if (netif_running(dev))
+ set_media_state(dev, media_connected);
+ }
+}
+
+static void mib_read_work(struct work_struct *work)
+{
+ struct dev_info *hw_priv =
+ container_of(work, struct dev_info, mib_read);
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port_mib *mib;
+ int i;
+
+ next_jiffies = jiffies;
+ for (i = 0; i < hw->mib_port_cnt; i++) {
+ mib = &hw->port_mib[i];
+
+ /* Reading MIB counters or requested to read. */
+ if (mib->cnt_ptr || 1 == hw_priv->counter[i].read) {
+
+ /* Need to process receive interrupt. */
+ if (port_r_cnt(hw, i))
+ break;
+ hw_priv->counter[i].read = 0;
+
+ /* Finish reading counters. */
+ if (0 == mib->cnt_ptr) {
+ hw_priv->counter[i].read = 2;
+ wake_up_interruptible(
+ &hw_priv->counter[i].counter);
+ }
+ } else if (jiffies >= hw_priv->counter[i].time) {
+ /* Only read MIB counters when the port is connected. */
+ if (media_connected == mib->state)
+ hw_priv->counter[i].read = 1;
+ next_jiffies += HZ * 1 * hw->mib_port_cnt;
+ hw_priv->counter[i].time = next_jiffies;
+
+ /* Port is just disconnected. */
+ } else if (mib->link_down) {
+ mib->link_down = 0;
+
+ /* Read counters one last time after link is lost. */
+ hw_priv->counter[i].read = 1;
+ }
+ }
+}
+
+static void mib_monitor(unsigned long ptr)
+{
+ struct dev_info *hw_priv = (struct dev_info *) ptr;
+
+ mib_read_work(&hw_priv->mib_read);
+
+ /* This is used to verify Wake-on-LAN is working. */
+ if (hw_priv->pme_wait) {
+ if (hw_priv->pme_wait <= jiffies) {
+ hw_clr_wol_pme_status(&hw_priv->hw);
+ hw_priv->pme_wait = 0;
+ }
+ } else if (hw_chk_wol_pme_status(&hw_priv->hw)) {
+
+ /* PME is asserted. Wait 2 seconds to clear it. */
+ hw_priv->pme_wait = jiffies + HZ * 2;
+ }
+
+ ksz_update_timer(&hw_priv->mib_timer_info);
+}
+
+/**
+ * dev_monitor - periodic monitoring
+ * @ptr: Network device pointer.
+ *
+ * This routine is run in a kernel timer to monitor the network device.
+ */
+static void dev_monitor(unsigned long ptr)
+{
+ struct net_device *dev = (struct net_device *) ptr;
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+
+ if (!(hw->features & LINK_INT_WORKING))
+ port_get_link_speed(port);
+ update_link(dev, priv, port);
+
+ ksz_update_timer(&priv->monitor_timer_info);
+}
+
+/*
+ * Linux network device interface functions
+ */
+
+/* Driver exported variables */
+
+static int msg_enable;
+
+static char *macaddr = ":";
+static char *mac1addr = ":";
+
+/*
+ * This enables multiple network device mode for KSZ8842, which contains a
+ * switch with two physical ports. Some users like to take control of the
+ * ports for running Spanning Tree Protocol. The driver will create an
+ * additional eth? device for the other port.
+ *
+ * Some limitations are the network devices cannot have different MTU and
+ * multicast hash tables.
+ */
+static int multi_dev;
+
+/*
+ * As most users select multiple network device mode to use Spanning Tree
+ * Protocol, this enables a feature in which most unicast and multicast packets
+ * are forwarded inside the switch and not passed to the host. Only packets
+ * that need the host's attention are passed to it. This prevents the host
+ * wasting CPU time to examine each and every incoming packets and do the
+ * forwarding itself.
+ *
+ * As the hack requires the private bridge header, the driver cannot compile
+ * with just the kernel headers.
+ *
+ * Enabling STP support also turns on multiple network device mode.
+ */
+static int stp;
+
+/*
+ * This enables fast aging in the KSZ8842 switch. Not sure what situation
+ * needs that. However, fast aging is used to flush the dynamic MAC table when
+ * STP suport is enabled.
+ */
+static int fast_aging;
+
+/**
+ * netdev_init - initialize network device.
+ * @dev: Network device.
+ *
+ * This function initializes the network device.
+ *
+ * Return 0 if successful; otherwise an error code indicating failure.
+ */
+static int __init netdev_init(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ /* 500 ms timeout */
+ ksz_init_timer(&priv->monitor_timer_info, 500 * HZ / 1000,
+ dev_monitor, dev);
+
+ /* 500 ms timeout */
+ dev->watchdog_timeo = HZ / 2;
+
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
+
+ /*
+ * Hardware does not really support IPv6 checksum generation, but
+ * driver actually runs faster with this on.
+ */
+ dev->hw_features |= NETIF_F_IPV6_CSUM;
+
+ dev->features |= dev->hw_features;
+
+ sema_init(&priv->proc_sem, 1);
+
+ priv->mii_if.phy_id_mask = 0x1;
+ priv->mii_if.reg_num_mask = 0x7;
+ priv->mii_if.dev = dev;
+ priv->mii_if.mdio_read = mdio_read;
+ priv->mii_if.mdio_write = mdio_write;
+ priv->mii_if.phy_id = priv->port.first_port + 1;
+
+ priv->msg_enable = netif_msg_init(msg_enable,
+ (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK));
+
+ return 0;
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_init = netdev_init,
+ .ndo_open = netdev_open,
+ .ndo_stop = netdev_close,
+ .ndo_get_stats = netdev_query_statistics,
+ .ndo_start_xmit = netdev_tx,
+ .ndo_tx_timeout = netdev_tx_timeout,
+ .ndo_change_mtu = netdev_change_mtu,
+ .ndo_set_features = netdev_set_features,
+ .ndo_set_mac_address = netdev_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = netdev_ioctl,
+ .ndo_set_rx_mode = netdev_set_rx_mode,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = netdev_netpoll,
+#endif
+};
+
+static void netdev_free(struct net_device *dev)
+{
+ if (dev->watchdog_timeo)
+ unregister_netdev(dev);
+
+ free_netdev(dev);
+}
+
+struct platform_info {
+ struct dev_info dev_info;
+ struct net_device *netdev[SWITCH_PORT_NUM];
+};
+
+static int net_device_present;
+
+static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
+{
+ int i;
+ int j;
+ int got_num;
+ int num;
+
+ i = j = num = got_num = 0;
+ while (j < MAC_ADDR_LEN) {
+ if (macaddr[i]) {
+ int digit;
+
+ got_num = 1;
+ digit = hex_to_bin(macaddr[i]);
+ if (digit >= 0)
+ num = num * 16 + digit;
+ else if (':' == macaddr[i])
+ got_num = 2;
+ else
+ break;
+ } else if (got_num)
+ got_num = 2;
+ else
+ break;
+ if (2 == got_num) {
+ if (MAIN_PORT == port) {
+ hw_priv->hw.override_addr[j++] = (u8) num;
+ hw_priv->hw.override_addr[5] +=
+ hw_priv->hw.id;
+ } else {
+ hw_priv->hw.ksz_switch->other_addr[j++] =
+ (u8) num;
+ hw_priv->hw.ksz_switch->other_addr[5] +=
+ hw_priv->hw.id;
+ }
+ num = got_num = 0;
+ }
+ i++;
+ }
+ if (MAC_ADDR_LEN == j) {
+ if (MAIN_PORT == port)
+ hw_priv->hw.mac_override = 1;
+ }
+}
+
+#define KS884X_DMA_MASK (~0x0UL)
+
+static void read_other_addr(struct ksz_hw *hw)
+{
+ int i;
+ u16 data[3];
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ for (i = 0; i < 3; i++)
+ data[i] = eeprom_read(hw, i + EEPROM_DATA_OTHER_MAC_ADDR);
+ if ((data[0] || data[1] || data[2]) && data[0] != 0xffff) {
+ sw->other_addr[5] = (u8) data[0];
+ sw->other_addr[4] = (u8)(data[0] >> 8);
+ sw->other_addr[3] = (u8) data[1];
+ sw->other_addr[2] = (u8)(data[1] >> 8);
+ sw->other_addr[1] = (u8) data[2];
+ sw->other_addr[0] = (u8)(data[2] >> 8);
+ }
+}
+
+#ifndef PCI_VENDOR_ID_MICREL_KS
+#define PCI_VENDOR_ID_MICREL_KS 0x16c6
+#endif
+
+static int __devinit pcidev_init(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct net_device *dev;
+ struct dev_priv *priv;
+ struct dev_info *hw_priv;
+ struct ksz_hw *hw;
+ struct platform_info *info;
+ struct ksz_port *port;
+ unsigned long reg_base;
+ unsigned long reg_len;
+ int cnt;
+ int i;
+ int mib_port_count;
+ int pi;
+ int port_count;
+ int result;
+ char banner[sizeof(version)];
+ struct ksz_switch *sw = NULL;
+
+ result = pci_enable_device(pdev);
+ if (result)
+ return result;
+
+ result = -ENODEV;
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ return result;
+
+ reg_base = pci_resource_start(pdev, 0);
+ reg_len = pci_resource_len(pdev, 0);
+ if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0)
+ return result;
+
+ if (!request_mem_region(reg_base, reg_len, DRV_NAME))
+ return result;
+ pci_set_master(pdev);
+
+ result = -ENOMEM;
+
+ info = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
+ if (!info)
+ goto pcidev_init_dev_err;
+
+ hw_priv = &info->dev_info;
+ hw_priv->pdev = pdev;
+
+ hw = &hw_priv->hw;
+
+ hw->io = ioremap(reg_base, reg_len);
+ if (!hw->io)
+ goto pcidev_init_io_err;
+
+ cnt = hw_init(hw);
+ if (!cnt) {
+ if (msg_enable & NETIF_MSG_PROBE)
+ pr_alert("chip not detected\n");
+ result = -ENODEV;
+ goto pcidev_init_alloc_err;
+ }
+
+ snprintf(banner, sizeof(banner), "%s", version);
+ banner[13] = cnt + '0'; /* Replace x in "Micrel KSZ884x" */
+ dev_info(&hw_priv->pdev->dev, "%s\n", banner);
+ dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
+
+ /* Assume device is KSZ8841. */
+ hw->dev_count = 1;
+ port_count = 1;
+ mib_port_count = 1;
+ hw->addr_list_size = 0;
+ hw->mib_cnt = PORT_COUNTER_NUM;
+ hw->mib_port_cnt = 1;
+
+ /* KSZ8842 has a switch with multiple ports. */
+ if (2 == cnt) {
+ if (fast_aging)
+ hw->overrides |= FAST_AGING;
+
+ hw->mib_cnt = TOTAL_PORT_COUNTER_NUM;
+
+ /* Multiple network device interfaces are required. */
+ if (multi_dev) {
+ hw->dev_count = SWITCH_PORT_NUM;
+ hw->addr_list_size = SWITCH_PORT_NUM - 1;
+ }
+
+ /* Single network device has multiple ports. */
+ if (1 == hw->dev_count) {
+ port_count = SWITCH_PORT_NUM;
+ mib_port_count = SWITCH_PORT_NUM;
+ }
+ hw->mib_port_cnt = TOTAL_PORT_NUM;
+ hw->ksz_switch = kzalloc(sizeof(struct ksz_switch), GFP_KERNEL);
+ if (!hw->ksz_switch)
+ goto pcidev_init_alloc_err;
+
+ sw = hw->ksz_switch;
+ }
+ for (i = 0; i < hw->mib_port_cnt; i++)
+ hw->port_mib[i].mib_start = 0;
+
+ hw->parent = hw_priv;
+
+ /* Default MTU is 1500. */
+ hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3;
+
+ if (ksz_alloc_mem(hw_priv))
+ goto pcidev_init_mem_err;
+
+ hw_priv->hw.id = net_device_present;
+
+ spin_lock_init(&hw_priv->hwlock);
+ mutex_init(&hw_priv->lock);
+
+ /* tasklet is enabled. */
+ tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
+ (unsigned long) hw_priv);
+ tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
+ (unsigned long) hw_priv);
+
+ /* tasklet_enable will decrement the atomic counter. */
+ tasklet_disable(&hw_priv->rx_tasklet);
+ tasklet_disable(&hw_priv->tx_tasklet);
+
+ for (i = 0; i < TOTAL_PORT_NUM; i++)
+ init_waitqueue_head(&hw_priv->counter[i].counter);
+
+ if (macaddr[0] != ':')
+ get_mac_addr(hw_priv, macaddr, MAIN_PORT);
+
+ /* Read MAC address and initialize override address if not overrided. */
+ hw_read_addr(hw);
+
+ /* Multiple device interfaces mode requires a second MAC address. */
+ if (hw->dev_count > 1) {
+ memcpy(sw->other_addr, hw->override_addr, MAC_ADDR_LEN);
+ read_other_addr(hw);
+ if (mac1addr[0] != ':')
+ get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
+ }
+
+ hw_setup(hw);
+ if (hw->ksz_switch)
+ sw_setup(hw);
+ else {
+ hw_priv->wol_support = WOL_SUPPORT;
+ hw_priv->wol_enable = 0;
+ }
+
+ INIT_WORK(&hw_priv->mib_read, mib_read_work);
+
+ /* 500 ms timeout */
+ ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000,
+ mib_monitor, hw_priv);
+
+ for (i = 0; i < hw->dev_count; i++) {
+ dev = alloc_etherdev(sizeof(struct dev_priv));
+ if (!dev)
+ goto pcidev_init_reg_err;
+ info->netdev[i] = dev;
+
+ priv = netdev_priv(dev);
+ priv->adapter = hw_priv;
+ priv->id = net_device_present++;
+
+ port = &priv->port;
+ port->port_cnt = port_count;
+ port->mib_port_cnt = mib_port_count;
+ port->first_port = i;
+ port->flow_ctrl = PHY_FLOW_CTRL;
+
+ port->hw = hw;
+ port->linked = &hw->port_info[port->first_port];
+
+ for (cnt = 0, pi = i; cnt < port_count; cnt++, pi++) {
+ hw->port_info[pi].port_id = pi;
+ hw->port_info[pi].pdev = dev;
+ hw->port_info[pi].state = media_disconnected;
+ }
+
+ dev->mem_start = (unsigned long) hw->io;
+ dev->mem_end = dev->mem_start + reg_len - 1;
+ dev->irq = pdev->irq;
+ if (MAIN_PORT == i)
+ memcpy(dev->dev_addr, hw_priv->hw.override_addr,
+ MAC_ADDR_LEN);
+ else {
+ memcpy(dev->dev_addr, sw->other_addr,
+ MAC_ADDR_LEN);
+ if (!memcmp(sw->other_addr, hw->override_addr,
+ MAC_ADDR_LEN))
+ dev->dev_addr[5] += port->first_port;
+ }
+
+ dev->netdev_ops = &netdev_ops;
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ if (register_netdev(dev))
+ goto pcidev_init_reg_err;
+ port_set_power_saving(port, true);
+ }
+
+ pci_dev_get(hw_priv->pdev);
+ pci_set_drvdata(pdev, info);
+ return 0;
+
+pcidev_init_reg_err:
+ for (i = 0; i < hw->dev_count; i++) {
+ if (info->netdev[i]) {
+ netdev_free(info->netdev[i]);
+ info->netdev[i] = NULL;
+ }
+ }
+
+pcidev_init_mem_err:
+ ksz_free_mem(hw_priv);
+ kfree(hw->ksz_switch);
+
+pcidev_init_alloc_err:
+ iounmap(hw->io);
+
+pcidev_init_io_err:
+ kfree(info);
+
+pcidev_init_dev_err:
+ release_mem_region(reg_base, reg_len);
+
+ return result;
+}
+
+static void pcidev_exit(struct pci_dev *pdev)
+{
+ int i;
+ struct platform_info *info = pci_get_drvdata(pdev);
+ struct dev_info *hw_priv = &info->dev_info;
+
+ pci_set_drvdata(pdev, NULL);
+
+ release_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ for (i = 0; i < hw_priv->hw.dev_count; i++) {
+ if (info->netdev[i])
+ netdev_free(info->netdev[i]);
+ }
+ if (hw_priv->hw.io)
+ iounmap(hw_priv->hw.io);
+ ksz_free_mem(hw_priv);
+ kfree(hw_priv->hw.ksz_switch);
+ pci_dev_put(hw_priv->pdev);
+ kfree(info);
+}
+
+#ifdef CONFIG_PM
+static int pcidev_resume(struct pci_dev *pdev)
+{
+ int i;
+ struct platform_info *info = pci_get_drvdata(pdev);
+ struct dev_info *hw_priv = &info->dev_info;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_enable_wake(pdev, PCI_D0, 0);
+
+ if (hw_priv->wol_enable)
+ hw_cfg_wol_pme(hw, 0);
+ for (i = 0; i < hw->dev_count; i++) {
+ if (info->netdev[i]) {
+ struct net_device *dev = info->netdev[i];
+
+ if (netif_running(dev)) {
+ netdev_open(dev);
+ netif_device_attach(dev);
+ }
+ }
+ }
+ return 0;
+}
+
+static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int i;
+ struct platform_info *info = pci_get_drvdata(pdev);
+ struct dev_info *hw_priv = &info->dev_info;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ /* Need to find a way to retrieve the device IP address. */
+ static const u8 net_addr[] = { 192, 168, 1, 1 };
+
+ for (i = 0; i < hw->dev_count; i++) {
+ if (info->netdev[i]) {
+ struct net_device *dev = info->netdev[i];
+
+ if (netif_running(dev)) {
+ netif_device_detach(dev);
+ netdev_close(dev);
+ }
+ }
+ }
+ if (hw_priv->wol_enable) {
+ hw_enable_wol(hw, hw_priv->wol_enable, net_addr);
+ hw_cfg_wol_pme(hw, 1);
+ }
+
+ pci_save_state(pdev);
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+#endif
+
+static char pcidev_name[] = "ksz884xp";
+
+static struct pci_device_id pcidev_table[] = {
+ { PCI_VENDOR_ID_MICREL_KS, 0x8841,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_MICREL_KS, 0x8842,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, pcidev_table);
+
+static struct pci_driver pci_device_driver = {
+#ifdef CONFIG_PM
+ .suspend = pcidev_suspend,
+ .resume = pcidev_resume,
+#endif
+ .name = pcidev_name,
+ .id_table = pcidev_table,
+ .probe = pcidev_init,
+ .remove = pcidev_exit
+};
+
+static int __init ksz884x_init_module(void)
+{
+ return pci_register_driver(&pci_device_driver);
+}
+
+static void __exit ksz884x_cleanup_module(void)
+{
+ pci_unregister_driver(&pci_device_driver);
+}
+
+module_init(ksz884x_init_module);
+module_exit(ksz884x_cleanup_module);
+
+MODULE_DESCRIPTION("KSZ8841/2 PCI network driver");
+MODULE_AUTHOR("Tristram Ha <Tristram.Ha@micrel.com>");
+MODULE_LICENSE("GPL");
+
+module_param_named(message, msg_enable, int, 0);
+MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
+
+module_param(macaddr, charp, 0);
+module_param(mac1addr, charp, 0);
+module_param(fast_aging, int, 0);
+module_param(multi_dev, int, 0);
+module_param(stp, int, 0);
+MODULE_PARM_DESC(macaddr, "MAC address");
+MODULE_PARM_DESC(mac1addr, "Second MAC address");
+MODULE_PARM_DESC(fast_aging, "Fast aging");
+MODULE_PARM_DESC(multi_dev, "Multiple device interfaces");
+MODULE_PARM_DESC(stp, "STP support");
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
new file mode 100644
index 000000000000..8163fd0f453f
--- /dev/null
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -0,0 +1,38 @@
+#
+# Microchip network device configuration
+#
+
+config NET_VENDOR_MICROCHIP
+ bool "Microchip devices"
+ default y
+ depends on SPI && EXPERIMENTAL
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Microchip cards. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_MICROCHIP
+
+config ENC28J60
+ tristate "ENC28J60 support"
+ depends on SPI && EXPERIMENTAL
+ select CRC32
+ ---help---
+ Support for the Microchip EN28J60 ethernet chip.
+
+ To compile this driver as a module, choose M here. The module will be
+ called enc28j60.
+
+config ENC28J60_WRITEVERIFY
+ bool "Enable write verify"
+ depends on ENC28J60
+ ---help---
+ Enable the verify after the buffer write useful for debugging purpose.
+ If unsure, say N.
+
+endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile
new file mode 100644
index 000000000000..573d4292b9ea
--- /dev/null
+++ b/drivers/net/ethernet/microchip/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Microchip network device drivers.
+#
+
+obj-$(CONFIG_ENC28J60) += enc28j60.o
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
new file mode 100644
index 000000000000..50055e0282ed
--- /dev/null
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -0,0 +1,1667 @@
+/*
+ * Microchip ENC28J60 ethernet driver (MAC + PHY)
+ *
+ * Copyright (C) 2007 Eurek srl
+ * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com>
+ * based on enc28j60.c written by David Anders for 2.4 kernel version
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * $Id: enc28j60.c,v 1.22 2007/12/20 10:47:01 claudio Exp $
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+
+#include "enc28j60_hw.h"
+
+#define DRV_NAME "enc28j60"
+#define DRV_VERSION "1.01"
+
+#define SPI_OPLEN 1
+
+#define ENC28J60_MSG_DEFAULT \
+ (NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK)
+
+/* Buffer size required for the largest SPI transfer (i.e., reading a
+ * frame). */
+#define SPI_TRANSFER_BUF_LEN (4 + MAX_FRAMELEN)
+
+#define TX_TIMEOUT (4 * HZ)
+
+/* Max TX retries in case of collision as suggested by errata datasheet */
+#define MAX_TX_RETRYCOUNT 16
+
+enum {
+ RXFILTER_NORMAL,
+ RXFILTER_MULTI,
+ RXFILTER_PROMISC
+};
+
+/* Driver local data */
+struct enc28j60_net {
+ struct net_device *netdev;
+ struct spi_device *spi;
+ struct mutex lock;
+ struct sk_buff *tx_skb;
+ struct work_struct tx_work;
+ struct work_struct irq_work;
+ struct work_struct setrx_work;
+ struct work_struct restart_work;
+ u8 bank; /* current register bank selected */
+ u16 next_pk_ptr; /* next packet pointer within FIFO */
+ u16 max_pk_counter; /* statistics: max packet counter */
+ u16 tx_retry_count;
+ bool hw_enable;
+ bool full_duplex;
+ int rxfilter;
+ u32 msg_enable;
+ u8 spi_transfer_buf[SPI_TRANSFER_BUF_LEN];
+};
+
+/* use ethtool to change the level for any given device */
+static struct {
+ u32 msg_enable;
+} debug = { -1 };
+
+/*
+ * SPI read buffer
+ * wait for the SPI transfer and copy received data to destination
+ */
+static int
+spi_read_buf(struct enc28j60_net *priv, int len, u8 *data)
+{
+ u8 *rx_buf = priv->spi_transfer_buf + 4;
+ u8 *tx_buf = priv->spi_transfer_buf;
+ struct spi_transfer t = {
+ .tx_buf = tx_buf,
+ .rx_buf = rx_buf,
+ .len = SPI_OPLEN + len,
+ };
+ struct spi_message msg;
+ int ret;
+
+ tx_buf[0] = ENC28J60_READ_BUF_MEM;
+ tx_buf[1] = tx_buf[2] = tx_buf[3] = 0; /* don't care */
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&t, &msg);
+ ret = spi_sync(priv->spi, &msg);
+ if (ret == 0) {
+ memcpy(data, &rx_buf[SPI_OPLEN], len);
+ ret = msg.status;
+ }
+ if (ret && netif_msg_drv(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+/*
+ * SPI write buffer
+ */
+static int spi_write_buf(struct enc28j60_net *priv, int len,
+ const u8 *data)
+{
+ int ret;
+
+ if (len > SPI_TRANSFER_BUF_LEN - 1 || len <= 0)
+ ret = -EINVAL;
+ else {
+ priv->spi_transfer_buf[0] = ENC28J60_WRITE_BUF_MEM;
+ memcpy(&priv->spi_transfer_buf[1], data, len);
+ ret = spi_write(priv->spi, priv->spi_transfer_buf, len + 1);
+ if (ret && netif_msg_drv(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
+ __func__, ret);
+ }
+ return ret;
+}
+
+/*
+ * basic SPI read operation
+ */
+static u8 spi_read_op(struct enc28j60_net *priv, u8 op,
+ u8 addr)
+{
+ u8 tx_buf[2];
+ u8 rx_buf[4];
+ u8 val = 0;
+ int ret;
+ int slen = SPI_OPLEN;
+
+ /* do dummy read if needed */
+ if (addr & SPRD_MASK)
+ slen++;
+
+ tx_buf[0] = op | (addr & ADDR_MASK);
+ ret = spi_write_then_read(priv->spi, tx_buf, 1, rx_buf, slen);
+ if (ret)
+ printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
+ __func__, ret);
+ else
+ val = rx_buf[slen - 1];
+
+ return val;
+}
+
+/*
+ * basic SPI write operation
+ */
+static int spi_write_op(struct enc28j60_net *priv, u8 op,
+ u8 addr, u8 val)
+{
+ int ret;
+
+ priv->spi_transfer_buf[0] = op | (addr & ADDR_MASK);
+ priv->spi_transfer_buf[1] = val;
+ ret = spi_write(priv->spi, priv->spi_transfer_buf, 2);
+ if (ret && netif_msg_drv(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
+ __func__, ret);
+ return ret;
+}
+
+static void enc28j60_soft_reset(struct enc28j60_net *priv)
+{
+ if (netif_msg_hw(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
+
+ spi_write_op(priv, ENC28J60_SOFT_RESET, 0, ENC28J60_SOFT_RESET);
+ /* Errata workaround #1, CLKRDY check is unreliable,
+ * delay at least 1 mS instead */
+ udelay(2000);
+}
+
+/*
+ * select the current register bank if necessary
+ */
+static void enc28j60_set_bank(struct enc28j60_net *priv, u8 addr)
+{
+ u8 b = (addr & BANK_MASK) >> 5;
+
+ /* These registers (EIE, EIR, ESTAT, ECON2, ECON1)
+ * are present in all banks, no need to switch bank
+ */
+ if (addr >= EIE && addr <= ECON1)
+ return;
+
+ /* Clear or set each bank selection bit as needed */
+ if ((b & ECON1_BSEL0) != (priv->bank & ECON1_BSEL0)) {
+ if (b & ECON1_BSEL0)
+ spi_write_op(priv, ENC28J60_BIT_FIELD_SET, ECON1,
+ ECON1_BSEL0);
+ else
+ spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, ECON1,
+ ECON1_BSEL0);
+ }
+ if ((b & ECON1_BSEL1) != (priv->bank & ECON1_BSEL1)) {
+ if (b & ECON1_BSEL1)
+ spi_write_op(priv, ENC28J60_BIT_FIELD_SET, ECON1,
+ ECON1_BSEL1);
+ else
+ spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, ECON1,
+ ECON1_BSEL1);
+ }
+ priv->bank = b;
+}
+
+/*
+ * Register access routines through the SPI bus.
+ * Every register access comes in two flavours:
+ * - nolock_xxx: caller needs to invoke mutex_lock, usually to access
+ * atomically more than one register
+ * - locked_xxx: caller doesn't need to invoke mutex_lock, single access
+ *
+ * Some registers can be accessed through the bit field clear and
+ * bit field set to avoid a read modify write cycle.
+ */
+
+/*
+ * Register bit field Set
+ */
+static void nolock_reg_bfset(struct enc28j60_net *priv,
+ u8 addr, u8 mask)
+{
+ enc28j60_set_bank(priv, addr);
+ spi_write_op(priv, ENC28J60_BIT_FIELD_SET, addr, mask);
+}
+
+static void locked_reg_bfset(struct enc28j60_net *priv,
+ u8 addr, u8 mask)
+{
+ mutex_lock(&priv->lock);
+ nolock_reg_bfset(priv, addr, mask);
+ mutex_unlock(&priv->lock);
+}
+
+/*
+ * Register bit field Clear
+ */
+static void nolock_reg_bfclr(struct enc28j60_net *priv,
+ u8 addr, u8 mask)
+{
+ enc28j60_set_bank(priv, addr);
+ spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, addr, mask);
+}
+
+static void locked_reg_bfclr(struct enc28j60_net *priv,
+ u8 addr, u8 mask)
+{
+ mutex_lock(&priv->lock);
+ nolock_reg_bfclr(priv, addr, mask);
+ mutex_unlock(&priv->lock);
+}
+
+/*
+ * Register byte read
+ */
+static int nolock_regb_read(struct enc28j60_net *priv,
+ u8 address)
+{
+ enc28j60_set_bank(priv, address);
+ return spi_read_op(priv, ENC28J60_READ_CTRL_REG, address);
+}
+
+static int locked_regb_read(struct enc28j60_net *priv,
+ u8 address)
+{
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = nolock_regb_read(priv, address);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+/*
+ * Register word read
+ */
+static int nolock_regw_read(struct enc28j60_net *priv,
+ u8 address)
+{
+ int rl, rh;
+
+ enc28j60_set_bank(priv, address);
+ rl = spi_read_op(priv, ENC28J60_READ_CTRL_REG, address);
+ rh = spi_read_op(priv, ENC28J60_READ_CTRL_REG, address + 1);
+
+ return (rh << 8) | rl;
+}
+
+static int locked_regw_read(struct enc28j60_net *priv,
+ u8 address)
+{
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = nolock_regw_read(priv, address);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+/*
+ * Register byte write
+ */
+static void nolock_regb_write(struct enc28j60_net *priv,
+ u8 address, u8 data)
+{
+ enc28j60_set_bank(priv, address);
+ spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, data);
+}
+
+static void locked_regb_write(struct enc28j60_net *priv,
+ u8 address, u8 data)
+{
+ mutex_lock(&priv->lock);
+ nolock_regb_write(priv, address, data);
+ mutex_unlock(&priv->lock);
+}
+
+/*
+ * Register word write
+ */
+static void nolock_regw_write(struct enc28j60_net *priv,
+ u8 address, u16 data)
+{
+ enc28j60_set_bank(priv, address);
+ spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, (u8) data);
+ spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address + 1,
+ (u8) (data >> 8));
+}
+
+static void locked_regw_write(struct enc28j60_net *priv,
+ u8 address, u16 data)
+{
+ mutex_lock(&priv->lock);
+ nolock_regw_write(priv, address, data);
+ mutex_unlock(&priv->lock);
+}
+
+/*
+ * Buffer memory read
+ * Select the starting address and execute a SPI buffer read
+ */
+static void enc28j60_mem_read(struct enc28j60_net *priv,
+ u16 addr, int len, u8 *data)
+{
+ mutex_lock(&priv->lock);
+ nolock_regw_write(priv, ERDPTL, addr);
+#ifdef CONFIG_ENC28J60_WRITEVERIFY
+ if (netif_msg_drv(priv)) {
+ u16 reg;
+ reg = nolock_regw_read(priv, ERDPTL);
+ if (reg != addr)
+ printk(KERN_DEBUG DRV_NAME ": %s() error writing ERDPT "
+ "(0x%04x - 0x%04x)\n", __func__, reg, addr);
+ }
+#endif
+ spi_read_buf(priv, len, data);
+ mutex_unlock(&priv->lock);
+}
+
+/*
+ * Write packet to enc28j60 TX buffer memory
+ */
+static void
+enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data)
+{
+ mutex_lock(&priv->lock);
+ /* Set the write pointer to start of transmit buffer area */
+ nolock_regw_write(priv, EWRPTL, TXSTART_INIT);
+#ifdef CONFIG_ENC28J60_WRITEVERIFY
+ if (netif_msg_drv(priv)) {
+ u16 reg;
+ reg = nolock_regw_read(priv, EWRPTL);
+ if (reg != TXSTART_INIT)
+ printk(KERN_DEBUG DRV_NAME
+ ": %s() ERWPT:0x%04x != 0x%04x\n",
+ __func__, reg, TXSTART_INIT);
+ }
+#endif
+ /* Set the TXND pointer to correspond to the packet size given */
+ nolock_regw_write(priv, ETXNDL, TXSTART_INIT + len);
+ /* write per-packet control byte */
+ spi_write_op(priv, ENC28J60_WRITE_BUF_MEM, 0, 0x00);
+ if (netif_msg_hw(priv))
+ printk(KERN_DEBUG DRV_NAME
+ ": %s() after control byte ERWPT:0x%04x\n",
+ __func__, nolock_regw_read(priv, EWRPTL));
+ /* copy the packet into the transmit buffer */
+ spi_write_buf(priv, len, data);
+ if (netif_msg_hw(priv))
+ printk(KERN_DEBUG DRV_NAME
+ ": %s() after write packet ERWPT:0x%04x, len=%d\n",
+ __func__, nolock_regw_read(priv, EWRPTL), len);
+ mutex_unlock(&priv->lock);
+}
+
+static unsigned long msec20_to_jiffies;
+
+static int poll_ready(struct enc28j60_net *priv, u8 reg, u8 mask, u8 val)
+{
+ unsigned long timeout = jiffies + msec20_to_jiffies;
+
+ /* 20 msec timeout read */
+ while ((nolock_regb_read(priv, reg) & mask) != val) {
+ if (time_after(jiffies, timeout)) {
+ if (netif_msg_drv(priv))
+ dev_dbg(&priv->spi->dev,
+ "reg %02x ready timeout!\n", reg);
+ return -ETIMEDOUT;
+ }
+ cpu_relax();
+ }
+ return 0;
+}
+
+/*
+ * Wait until the PHY operation is complete.
+ */
+static int wait_phy_ready(struct enc28j60_net *priv)
+{
+ return poll_ready(priv, MISTAT, MISTAT_BUSY, 0) ? 0 : 1;
+}
+
+/*
+ * PHY register read
+ * PHY registers are not accessed directly, but through the MII
+ */
+static u16 enc28j60_phy_read(struct enc28j60_net *priv, u8 address)
+{
+ u16 ret;
+
+ mutex_lock(&priv->lock);
+ /* set the PHY register address */
+ nolock_regb_write(priv, MIREGADR, address);
+ /* start the register read operation */
+ nolock_regb_write(priv, MICMD, MICMD_MIIRD);
+ /* wait until the PHY read completes */
+ wait_phy_ready(priv);
+ /* quit reading */
+ nolock_regb_write(priv, MICMD, 0x00);
+ /* return the data */
+ ret = nolock_regw_read(priv, MIRDL);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int enc28j60_phy_write(struct enc28j60_net *priv, u8 address, u16 data)
+{
+ int ret;
+
+ mutex_lock(&priv->lock);
+ /* set the PHY register address */
+ nolock_regb_write(priv, MIREGADR, address);
+ /* write the PHY data */
+ nolock_regw_write(priv, MIWRL, data);
+ /* wait until the PHY write completes and return */
+ ret = wait_phy_ready(priv);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+/*
+ * Program the hardware MAC address from dev->dev_addr.
+ */
+static int enc28j60_set_hw_macaddr(struct net_device *ndev)
+{
+ int ret;
+ struct enc28j60_net *priv = netdev_priv(ndev);
+
+ mutex_lock(&priv->lock);
+ if (!priv->hw_enable) {
+ if (netif_msg_drv(priv))
+ printk(KERN_INFO DRV_NAME
+ ": %s: Setting MAC address to %pM\n",
+ ndev->name, ndev->dev_addr);
+ /* NOTE: MAC address in ENC28J60 is byte-backward */
+ nolock_regb_write(priv, MAADR5, ndev->dev_addr[0]);
+ nolock_regb_write(priv, MAADR4, ndev->dev_addr[1]);
+ nolock_regb_write(priv, MAADR3, ndev->dev_addr[2]);
+ nolock_regb_write(priv, MAADR2, ndev->dev_addr[3]);
+ nolock_regb_write(priv, MAADR1, ndev->dev_addr[4]);
+ nolock_regb_write(priv, MAADR0, ndev->dev_addr[5]);
+ ret = 0;
+ } else {
+ if (netif_msg_drv(priv))
+ printk(KERN_DEBUG DRV_NAME
+ ": %s() Hardware must be disabled to set "
+ "Mac address\n", __func__);
+ ret = -EBUSY;
+ }
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+/*
+ * Store the new hardware address in dev->dev_addr, and update the MAC.
+ */
+static int enc28j60_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *address = addr;
+
+ if (netif_running(dev))
+ return -EBUSY;
+ if (!is_valid_ether_addr(address->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
+ return enc28j60_set_hw_macaddr(dev);
+}
+
+/*
+ * Debug routine to dump useful register contents
+ */
+static void enc28j60_dump_regs(struct enc28j60_net *priv, const char *msg)
+{
+ mutex_lock(&priv->lock);
+ printk(KERN_DEBUG DRV_NAME " %s\n"
+ "HwRevID: 0x%02x\n"
+ "Cntrl: ECON1 ECON2 ESTAT EIR EIE\n"
+ " 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n"
+ "MAC : MACON1 MACON3 MACON4\n"
+ " 0x%02x 0x%02x 0x%02x\n"
+ "Rx : ERXST ERXND ERXWRPT ERXRDPT ERXFCON EPKTCNT MAMXFL\n"
+ " 0x%04x 0x%04x 0x%04x 0x%04x "
+ "0x%02x 0x%02x 0x%04x\n"
+ "Tx : ETXST ETXND MACLCON1 MACLCON2 MAPHSUP\n"
+ " 0x%04x 0x%04x 0x%02x 0x%02x 0x%02x\n",
+ msg, nolock_regb_read(priv, EREVID),
+ nolock_regb_read(priv, ECON1), nolock_regb_read(priv, ECON2),
+ nolock_regb_read(priv, ESTAT), nolock_regb_read(priv, EIR),
+ nolock_regb_read(priv, EIE), nolock_regb_read(priv, MACON1),
+ nolock_regb_read(priv, MACON3), nolock_regb_read(priv, MACON4),
+ nolock_regw_read(priv, ERXSTL), nolock_regw_read(priv, ERXNDL),
+ nolock_regw_read(priv, ERXWRPTL),
+ nolock_regw_read(priv, ERXRDPTL),
+ nolock_regb_read(priv, ERXFCON),
+ nolock_regb_read(priv, EPKTCNT),
+ nolock_regw_read(priv, MAMXFLL), nolock_regw_read(priv, ETXSTL),
+ nolock_regw_read(priv, ETXNDL),
+ nolock_regb_read(priv, MACLCON1),
+ nolock_regb_read(priv, MACLCON2),
+ nolock_regb_read(priv, MAPHSUP));
+ mutex_unlock(&priv->lock);
+}
+
+/*
+ * ERXRDPT need to be set always at odd addresses, refer to errata datasheet
+ */
+static u16 erxrdpt_workaround(u16 next_packet_ptr, u16 start, u16 end)
+{
+ u16 erxrdpt;
+
+ if ((next_packet_ptr - 1 < start) || (next_packet_ptr - 1 > end))
+ erxrdpt = end;
+ else
+ erxrdpt = next_packet_ptr - 1;
+
+ return erxrdpt;
+}
+
+/*
+ * Calculate wrap around when reading beyond the end of the RX buffer
+ */
+static u16 rx_packet_start(u16 ptr)
+{
+ if (ptr + RSV_SIZE > RXEND_INIT)
+ return (ptr + RSV_SIZE) - (RXEND_INIT - RXSTART_INIT + 1);
+ else
+ return ptr + RSV_SIZE;
+}
+
+static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
+{
+ u16 erxrdpt;
+
+ if (start > 0x1FFF || end > 0x1FFF || start > end) {
+ if (netif_msg_drv(priv))
+ printk(KERN_ERR DRV_NAME ": %s(%d, %d) RXFIFO "
+ "bad parameters!\n", __func__, start, end);
+ return;
+ }
+ /* set receive buffer start + end */
+ priv->next_pk_ptr = start;
+ nolock_regw_write(priv, ERXSTL, start);
+ erxrdpt = erxrdpt_workaround(priv->next_pk_ptr, start, end);
+ nolock_regw_write(priv, ERXRDPTL, erxrdpt);
+ nolock_regw_write(priv, ERXNDL, end);
+}
+
+static void nolock_txfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
+{
+ if (start > 0x1FFF || end > 0x1FFF || start > end) {
+ if (netif_msg_drv(priv))
+ printk(KERN_ERR DRV_NAME ": %s(%d, %d) TXFIFO "
+ "bad parameters!\n", __func__, start, end);
+ return;
+ }
+ /* set transmit buffer start + end */
+ nolock_regw_write(priv, ETXSTL, start);
+ nolock_regw_write(priv, ETXNDL, end);
+}
+
+/*
+ * Low power mode shrinks power consumption about 100x, so we'd like
+ * the chip to be in that mode whenever it's inactive. (However, we
+ * can't stay in lowpower mode during suspend with WOL active.)
+ */
+static void enc28j60_lowpower(struct enc28j60_net *priv, bool is_low)
+{
+ if (netif_msg_drv(priv))
+ dev_dbg(&priv->spi->dev, "%s power...\n",
+ is_low ? "low" : "high");
+
+ mutex_lock(&priv->lock);
+ if (is_low) {
+ nolock_reg_bfclr(priv, ECON1, ECON1_RXEN);
+ poll_ready(priv, ESTAT, ESTAT_RXBUSY, 0);
+ poll_ready(priv, ECON1, ECON1_TXRTS, 0);
+ /* ECON2_VRPS was set during initialization */
+ nolock_reg_bfset(priv, ECON2, ECON2_PWRSV);
+ } else {
+ nolock_reg_bfclr(priv, ECON2, ECON2_PWRSV);
+ poll_ready(priv, ESTAT, ESTAT_CLKRDY, ESTAT_CLKRDY);
+ /* caller sets ECON1_RXEN */
+ }
+ mutex_unlock(&priv->lock);
+}
+
+static int enc28j60_hw_init(struct enc28j60_net *priv)
+{
+ u8 reg;
+
+ if (netif_msg_drv(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() - %s\n", __func__,
+ priv->full_duplex ? "FullDuplex" : "HalfDuplex");
+
+ mutex_lock(&priv->lock);
+ /* first reset the chip */
+ enc28j60_soft_reset(priv);
+ /* Clear ECON1 */
+ spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, ECON1, 0x00);
+ priv->bank = 0;
+ priv->hw_enable = false;
+ priv->tx_retry_count = 0;
+ priv->max_pk_counter = 0;
+ priv->rxfilter = RXFILTER_NORMAL;
+ /* enable address auto increment and voltage regulator powersave */
+ nolock_regb_write(priv, ECON2, ECON2_AUTOINC | ECON2_VRPS);
+
+ nolock_rxfifo_init(priv, RXSTART_INIT, RXEND_INIT);
+ nolock_txfifo_init(priv, TXSTART_INIT, TXEND_INIT);
+ mutex_unlock(&priv->lock);
+
+ /*
+ * Check the RevID.
+ * If it's 0x00 or 0xFF probably the enc28j60 is not mounted or
+ * damaged
+ */
+ reg = locked_regb_read(priv, EREVID);
+ if (netif_msg_drv(priv))
+ printk(KERN_INFO DRV_NAME ": chip RevID: 0x%02x\n", reg);
+ if (reg == 0x00 || reg == 0xff) {
+ if (netif_msg_drv(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() Invalid RevId %d\n",
+ __func__, reg);
+ return 0;
+ }
+
+ /* default filter mode: (unicast OR broadcast) AND crc valid */
+ locked_regb_write(priv, ERXFCON,
+ ERXFCON_UCEN | ERXFCON_CRCEN | ERXFCON_BCEN);
+
+ /* enable MAC receive */
+ locked_regb_write(priv, MACON1,
+ MACON1_MARXEN | MACON1_TXPAUS | MACON1_RXPAUS);
+ /* enable automatic padding and CRC operations */
+ if (priv->full_duplex) {
+ locked_regb_write(priv, MACON3,
+ MACON3_PADCFG0 | MACON3_TXCRCEN |
+ MACON3_FRMLNEN | MACON3_FULDPX);
+ /* set inter-frame gap (non-back-to-back) */
+ locked_regb_write(priv, MAIPGL, 0x12);
+ /* set inter-frame gap (back-to-back) */
+ locked_regb_write(priv, MABBIPG, 0x15);
+ } else {
+ locked_regb_write(priv, MACON3,
+ MACON3_PADCFG0 | MACON3_TXCRCEN |
+ MACON3_FRMLNEN);
+ locked_regb_write(priv, MACON4, 1 << 6); /* DEFER bit */
+ /* set inter-frame gap (non-back-to-back) */
+ locked_regw_write(priv, MAIPGL, 0x0C12);
+ /* set inter-frame gap (back-to-back) */
+ locked_regb_write(priv, MABBIPG, 0x12);
+ }
+ /*
+ * MACLCON1 (default)
+ * MACLCON2 (default)
+ * Set the maximum packet size which the controller will accept
+ */
+ locked_regw_write(priv, MAMXFLL, MAX_FRAMELEN);
+
+ /* Configure LEDs */
+ if (!enc28j60_phy_write(priv, PHLCON, ENC28J60_LAMPS_MODE))
+ return 0;
+
+ if (priv->full_duplex) {
+ if (!enc28j60_phy_write(priv, PHCON1, PHCON1_PDPXMD))
+ return 0;
+ if (!enc28j60_phy_write(priv, PHCON2, 0x00))
+ return 0;
+ } else {
+ if (!enc28j60_phy_write(priv, PHCON1, 0x00))
+ return 0;
+ if (!enc28j60_phy_write(priv, PHCON2, PHCON2_HDLDIS))
+ return 0;
+ }
+ if (netif_msg_hw(priv))
+ enc28j60_dump_regs(priv, "Hw initialized.");
+
+ return 1;
+}
+
+static void enc28j60_hw_enable(struct enc28j60_net *priv)
+{
+ /* enable interrupts */
+ if (netif_msg_hw(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() enabling interrupts.\n",
+ __func__);
+
+ enc28j60_phy_write(priv, PHIE, PHIE_PGEIE | PHIE_PLNKIE);
+
+ mutex_lock(&priv->lock);
+ nolock_reg_bfclr(priv, EIR, EIR_DMAIF | EIR_LINKIF |
+ EIR_TXIF | EIR_TXERIF | EIR_RXERIF | EIR_PKTIF);
+ nolock_regb_write(priv, EIE, EIE_INTIE | EIE_PKTIE | EIE_LINKIE |
+ EIE_TXIE | EIE_TXERIE | EIE_RXERIE);
+
+ /* enable receive logic */
+ nolock_reg_bfset(priv, ECON1, ECON1_RXEN);
+ priv->hw_enable = true;
+ mutex_unlock(&priv->lock);
+}
+
+static void enc28j60_hw_disable(struct enc28j60_net *priv)
+{
+ mutex_lock(&priv->lock);
+ /* disable interrutps and packet reception */
+ nolock_regb_write(priv, EIE, 0x00);
+ nolock_reg_bfclr(priv, ECON1, ECON1_RXEN);
+ priv->hw_enable = false;
+ mutex_unlock(&priv->lock);
+}
+
+static int
+enc28j60_setlink(struct net_device *ndev, u8 autoneg, u16 speed, u8 duplex)
+{
+ struct enc28j60_net *priv = netdev_priv(ndev);
+ int ret = 0;
+
+ if (!priv->hw_enable) {
+ /* link is in low power mode now; duplex setting
+ * will take effect on next enc28j60_hw_init().
+ */
+ if (autoneg == AUTONEG_DISABLE && speed == SPEED_10)
+ priv->full_duplex = (duplex == DUPLEX_FULL);
+ else {
+ if (netif_msg_link(priv))
+ dev_warn(&ndev->dev,
+ "unsupported link setting\n");
+ ret = -EOPNOTSUPP;
+ }
+ } else {
+ if (netif_msg_link(priv))
+ dev_warn(&ndev->dev, "Warning: hw must be disabled "
+ "to set link mode\n");
+ ret = -EBUSY;
+ }
+ return ret;
+}
+
+/*
+ * Read the Transmit Status Vector
+ */
+static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE])
+{
+ int endptr;
+
+ endptr = locked_regw_read(priv, ETXNDL);
+ if (netif_msg_hw(priv))
+ printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n",
+ endptr + 1);
+ enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv);
+}
+
+static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
+ u8 tsv[TSV_SIZE])
+{
+ u16 tmp1, tmp2;
+
+ printk(KERN_DEBUG DRV_NAME ": %s - TSV:\n", msg);
+ tmp1 = tsv[1];
+ tmp1 <<= 8;
+ tmp1 |= tsv[0];
+
+ tmp2 = tsv[5];
+ tmp2 <<= 8;
+ tmp2 |= tsv[4];
+
+ printk(KERN_DEBUG DRV_NAME ": ByteCount: %d, CollisionCount: %d,"
+ " TotByteOnWire: %d\n", tmp1, tsv[2] & 0x0f, tmp2);
+ printk(KERN_DEBUG DRV_NAME ": TxDone: %d, CRCErr:%d, LenChkErr: %d,"
+ " LenOutOfRange: %d\n", TSV_GETBIT(tsv, TSV_TXDONE),
+ TSV_GETBIT(tsv, TSV_TXCRCERROR),
+ TSV_GETBIT(tsv, TSV_TXLENCHKERROR),
+ TSV_GETBIT(tsv, TSV_TXLENOUTOFRANGE));
+ printk(KERN_DEBUG DRV_NAME ": Multicast: %d, Broadcast: %d, "
+ "PacketDefer: %d, ExDefer: %d\n",
+ TSV_GETBIT(tsv, TSV_TXMULTICAST),
+ TSV_GETBIT(tsv, TSV_TXBROADCAST),
+ TSV_GETBIT(tsv, TSV_TXPACKETDEFER),
+ TSV_GETBIT(tsv, TSV_TXEXDEFER));
+ printk(KERN_DEBUG DRV_NAME ": ExCollision: %d, LateCollision: %d, "
+ "Giant: %d, Underrun: %d\n",
+ TSV_GETBIT(tsv, TSV_TXEXCOLLISION),
+ TSV_GETBIT(tsv, TSV_TXLATECOLLISION),
+ TSV_GETBIT(tsv, TSV_TXGIANT), TSV_GETBIT(tsv, TSV_TXUNDERRUN));
+ printk(KERN_DEBUG DRV_NAME ": ControlFrame: %d, PauseFrame: %d, "
+ "BackPressApp: %d, VLanTagFrame: %d\n",
+ TSV_GETBIT(tsv, TSV_TXCONTROLFRAME),
+ TSV_GETBIT(tsv, TSV_TXPAUSEFRAME),
+ TSV_GETBIT(tsv, TSV_BACKPRESSUREAPP),
+ TSV_GETBIT(tsv, TSV_TXVLANTAGFRAME));
+}
+
+/*
+ * Receive Status vector
+ */
+static void enc28j60_dump_rsv(struct enc28j60_net *priv, const char *msg,
+ u16 pk_ptr, int len, u16 sts)
+{
+ printk(KERN_DEBUG DRV_NAME ": %s - NextPk: 0x%04x - RSV:\n",
+ msg, pk_ptr);
+ printk(KERN_DEBUG DRV_NAME ": ByteCount: %d, DribbleNibble: %d\n", len,
+ RSV_GETBIT(sts, RSV_DRIBBLENIBBLE));
+ printk(KERN_DEBUG DRV_NAME ": RxOK: %d, CRCErr:%d, LenChkErr: %d,"
+ " LenOutOfRange: %d\n", RSV_GETBIT(sts, RSV_RXOK),
+ RSV_GETBIT(sts, RSV_CRCERROR),
+ RSV_GETBIT(sts, RSV_LENCHECKERR),
+ RSV_GETBIT(sts, RSV_LENOUTOFRANGE));
+ printk(KERN_DEBUG DRV_NAME ": Multicast: %d, Broadcast: %d, "
+ "LongDropEvent: %d, CarrierEvent: %d\n",
+ RSV_GETBIT(sts, RSV_RXMULTICAST),
+ RSV_GETBIT(sts, RSV_RXBROADCAST),
+ RSV_GETBIT(sts, RSV_RXLONGEVDROPEV),
+ RSV_GETBIT(sts, RSV_CARRIEREV));
+ printk(KERN_DEBUG DRV_NAME ": ControlFrame: %d, PauseFrame: %d,"
+ " UnknownOp: %d, VLanTagFrame: %d\n",
+ RSV_GETBIT(sts, RSV_RXCONTROLFRAME),
+ RSV_GETBIT(sts, RSV_RXPAUSEFRAME),
+ RSV_GETBIT(sts, RSV_RXUNKNOWNOPCODE),
+ RSV_GETBIT(sts, RSV_RXTYPEVLAN));
+}
+
+static void dump_packet(const char *msg, int len, const char *data)
+{
+ printk(KERN_DEBUG DRV_NAME ": %s - packet len:%d\n", msg, len);
+ print_hex_dump(KERN_DEBUG, "pk data: ", DUMP_PREFIX_OFFSET, 16, 1,
+ data, len, true);
+}
+
+/*
+ * Hardware receive function.
+ * Read the buffer memory, update the FIFO pointer to free the buffer,
+ * check the status vector and decrement the packet counter.
+ */
+static void enc28j60_hw_rx(struct net_device *ndev)
+{
+ struct enc28j60_net *priv = netdev_priv(ndev);
+ struct sk_buff *skb = NULL;
+ u16 erxrdpt, next_packet, rxstat;
+ u8 rsv[RSV_SIZE];
+ int len;
+
+ if (netif_msg_rx_status(priv))
+ printk(KERN_DEBUG DRV_NAME ": RX pk_addr:0x%04x\n",
+ priv->next_pk_ptr);
+
+ if (unlikely(priv->next_pk_ptr > RXEND_INIT)) {
+ if (netif_msg_rx_err(priv))
+ dev_err(&ndev->dev,
+ "%s() Invalid packet address!! 0x%04x\n",
+ __func__, priv->next_pk_ptr);
+ /* packet address corrupted: reset RX logic */
+ mutex_lock(&priv->lock);
+ nolock_reg_bfclr(priv, ECON1, ECON1_RXEN);
+ nolock_reg_bfset(priv, ECON1, ECON1_RXRST);
+ nolock_reg_bfclr(priv, ECON1, ECON1_RXRST);
+ nolock_rxfifo_init(priv, RXSTART_INIT, RXEND_INIT);
+ nolock_reg_bfclr(priv, EIR, EIR_RXERIF);
+ nolock_reg_bfset(priv, ECON1, ECON1_RXEN);
+ mutex_unlock(&priv->lock);
+ ndev->stats.rx_errors++;
+ return;
+ }
+ /* Read next packet pointer and rx status vector */
+ enc28j60_mem_read(priv, priv->next_pk_ptr, sizeof(rsv), rsv);
+
+ next_packet = rsv[1];
+ next_packet <<= 8;
+ next_packet |= rsv[0];
+
+ len = rsv[3];
+ len <<= 8;
+ len |= rsv[2];
+
+ rxstat = rsv[5];
+ rxstat <<= 8;
+ rxstat |= rsv[4];
+
+ if (netif_msg_rx_status(priv))
+ enc28j60_dump_rsv(priv, __func__, next_packet, len, rxstat);
+
+ if (!RSV_GETBIT(rxstat, RSV_RXOK) || len > MAX_FRAMELEN) {
+ if (netif_msg_rx_err(priv))
+ dev_err(&ndev->dev, "Rx Error (%04x)\n", rxstat);
+ ndev->stats.rx_errors++;
+ if (RSV_GETBIT(rxstat, RSV_CRCERROR))
+ ndev->stats.rx_crc_errors++;
+ if (RSV_GETBIT(rxstat, RSV_LENCHECKERR))
+ ndev->stats.rx_frame_errors++;
+ if (len > MAX_FRAMELEN)
+ ndev->stats.rx_over_errors++;
+ } else {
+ skb = dev_alloc_skb(len + NET_IP_ALIGN);
+ if (!skb) {
+ if (netif_msg_rx_err(priv))
+ dev_err(&ndev->dev,
+ "out of memory for Rx'd frame\n");
+ ndev->stats.rx_dropped++;
+ } else {
+ skb->dev = ndev;
+ skb_reserve(skb, NET_IP_ALIGN);
+ /* copy the packet from the receive buffer */
+ enc28j60_mem_read(priv,
+ rx_packet_start(priv->next_pk_ptr),
+ len, skb_put(skb, len));
+ if (netif_msg_pktdata(priv))
+ dump_packet(__func__, skb->len, skb->data);
+ skb->protocol = eth_type_trans(skb, ndev);
+ /* update statistics */
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+ netif_rx_ni(skb);
+ }
+ }
+ /*
+ * Move the RX read pointer to the start of the next
+ * received packet.
+ * This frees the memory we just read out
+ */
+ erxrdpt = erxrdpt_workaround(next_packet, RXSTART_INIT, RXEND_INIT);
+ if (netif_msg_hw(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT:0x%04x\n",
+ __func__, erxrdpt);
+
+ mutex_lock(&priv->lock);
+ nolock_regw_write(priv, ERXRDPTL, erxrdpt);
+#ifdef CONFIG_ENC28J60_WRITEVERIFY
+ if (netif_msg_drv(priv)) {
+ u16 reg;
+ reg = nolock_regw_read(priv, ERXRDPTL);
+ if (reg != erxrdpt)
+ printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT verify "
+ "error (0x%04x - 0x%04x)\n", __func__,
+ reg, erxrdpt);
+ }
+#endif
+ priv->next_pk_ptr = next_packet;
+ /* we are done with this packet, decrement the packet counter */
+ nolock_reg_bfset(priv, ECON2, ECON2_PKTDEC);
+ mutex_unlock(&priv->lock);
+}
+
+/*
+ * Calculate free space in RxFIFO
+ */
+static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv)
+{
+ int epkcnt, erxst, erxnd, erxwr, erxrd;
+ int free_space;
+
+ mutex_lock(&priv->lock);
+ epkcnt = nolock_regb_read(priv, EPKTCNT);
+ if (epkcnt >= 255)
+ free_space = -1;
+ else {
+ erxst = nolock_regw_read(priv, ERXSTL);
+ erxnd = nolock_regw_read(priv, ERXNDL);
+ erxwr = nolock_regw_read(priv, ERXWRPTL);
+ erxrd = nolock_regw_read(priv, ERXRDPTL);
+
+ if (erxwr > erxrd)
+ free_space = (erxnd - erxst) - (erxwr - erxrd);
+ else if (erxwr == erxrd)
+ free_space = (erxnd - erxst);
+ else
+ free_space = erxrd - erxwr - 1;
+ }
+ mutex_unlock(&priv->lock);
+ if (netif_msg_rx_status(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() free_space = %d\n",
+ __func__, free_space);
+ return free_space;
+}
+
+/*
+ * Access the PHY to determine link status
+ */
+static void enc28j60_check_link_status(struct net_device *ndev)
+{
+ struct enc28j60_net *priv = netdev_priv(ndev);
+ u16 reg;
+ int duplex;
+
+ reg = enc28j60_phy_read(priv, PHSTAT2);
+ if (netif_msg_hw(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() PHSTAT1: %04x, "
+ "PHSTAT2: %04x\n", __func__,
+ enc28j60_phy_read(priv, PHSTAT1), reg);
+ duplex = reg & PHSTAT2_DPXSTAT;
+
+ if (reg & PHSTAT2_LSTAT) {
+ netif_carrier_on(ndev);
+ if (netif_msg_ifup(priv))
+ dev_info(&ndev->dev, "link up - %s\n",
+ duplex ? "Full duplex" : "Half duplex");
+ } else {
+ if (netif_msg_ifdown(priv))
+ dev_info(&ndev->dev, "link down\n");
+ netif_carrier_off(ndev);
+ }
+}
+
+static void enc28j60_tx_clear(struct net_device *ndev, bool err)
+{
+ struct enc28j60_net *priv = netdev_priv(ndev);
+
+ if (err)
+ ndev->stats.tx_errors++;
+ else
+ ndev->stats.tx_packets++;
+
+ if (priv->tx_skb) {
+ if (!err)
+ ndev->stats.tx_bytes += priv->tx_skb->len;
+ dev_kfree_skb(priv->tx_skb);
+ priv->tx_skb = NULL;
+ }
+ locked_reg_bfclr(priv, ECON1, ECON1_TXRTS);
+ netif_wake_queue(ndev);
+}
+
+/*
+ * RX handler
+ * ignore PKTIF because is unreliable! (look at the errata datasheet)
+ * check EPKTCNT is the suggested workaround.
+ * We don't need to clear interrupt flag, automatically done when
+ * enc28j60_hw_rx() decrements the packet counter.
+ * Returns how many packet processed.
+ */
+static int enc28j60_rx_interrupt(struct net_device *ndev)
+{
+ struct enc28j60_net *priv = netdev_priv(ndev);
+ int pk_counter, ret;
+
+ pk_counter = locked_regb_read(priv, EPKTCNT);
+ if (pk_counter && netif_msg_intr(priv))
+ printk(KERN_DEBUG DRV_NAME ": intRX, pk_cnt: %d\n", pk_counter);
+ if (pk_counter > priv->max_pk_counter) {
+ /* update statistics */
+ priv->max_pk_counter = pk_counter;
+ if (netif_msg_rx_status(priv) && priv->max_pk_counter > 1)
+ printk(KERN_DEBUG DRV_NAME ": RX max_pk_cnt: %d\n",
+ priv->max_pk_counter);
+ }
+ ret = pk_counter;
+ while (pk_counter-- > 0)
+ enc28j60_hw_rx(ndev);
+
+ return ret;
+}
+
+static void enc28j60_irq_work_handler(struct work_struct *work)
+{
+ struct enc28j60_net *priv =
+ container_of(work, struct enc28j60_net, irq_work);
+ struct net_device *ndev = priv->netdev;
+ int intflags, loop;
+
+ if (netif_msg_intr(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
+ /* disable further interrupts */
+ locked_reg_bfclr(priv, EIE, EIE_INTIE);
+
+ do {
+ loop = 0;
+ intflags = locked_regb_read(priv, EIR);
+ /* DMA interrupt handler (not currently used) */
+ if ((intflags & EIR_DMAIF) != 0) {
+ loop++;
+ if (netif_msg_intr(priv))
+ printk(KERN_DEBUG DRV_NAME
+ ": intDMA(%d)\n", loop);
+ locked_reg_bfclr(priv, EIR, EIR_DMAIF);
+ }
+ /* LINK changed handler */
+ if ((intflags & EIR_LINKIF) != 0) {
+ loop++;
+ if (netif_msg_intr(priv))
+ printk(KERN_DEBUG DRV_NAME
+ ": intLINK(%d)\n", loop);
+ enc28j60_check_link_status(ndev);
+ /* read PHIR to clear the flag */
+ enc28j60_phy_read(priv, PHIR);
+ }
+ /* TX complete handler */
+ if ((intflags & EIR_TXIF) != 0) {
+ bool err = false;
+ loop++;
+ if (netif_msg_intr(priv))
+ printk(KERN_DEBUG DRV_NAME
+ ": intTX(%d)\n", loop);
+ priv->tx_retry_count = 0;
+ if (locked_regb_read(priv, ESTAT) & ESTAT_TXABRT) {
+ if (netif_msg_tx_err(priv))
+ dev_err(&ndev->dev,
+ "Tx Error (aborted)\n");
+ err = true;
+ }
+ if (netif_msg_tx_done(priv)) {
+ u8 tsv[TSV_SIZE];
+ enc28j60_read_tsv(priv, tsv);
+ enc28j60_dump_tsv(priv, "Tx Done", tsv);
+ }
+ enc28j60_tx_clear(ndev, err);
+ locked_reg_bfclr(priv, EIR, EIR_TXIF);
+ }
+ /* TX Error handler */
+ if ((intflags & EIR_TXERIF) != 0) {
+ u8 tsv[TSV_SIZE];
+
+ loop++;
+ if (netif_msg_intr(priv))
+ printk(KERN_DEBUG DRV_NAME
+ ": intTXErr(%d)\n", loop);
+ locked_reg_bfclr(priv, ECON1, ECON1_TXRTS);
+ enc28j60_read_tsv(priv, tsv);
+ if (netif_msg_tx_err(priv))
+ enc28j60_dump_tsv(priv, "Tx Error", tsv);
+ /* Reset TX logic */
+ mutex_lock(&priv->lock);
+ nolock_reg_bfset(priv, ECON1, ECON1_TXRST);
+ nolock_reg_bfclr(priv, ECON1, ECON1_TXRST);
+ nolock_txfifo_init(priv, TXSTART_INIT, TXEND_INIT);
+ mutex_unlock(&priv->lock);
+ /* Transmit Late collision check for retransmit */
+ if (TSV_GETBIT(tsv, TSV_TXLATECOLLISION)) {
+ if (netif_msg_tx_err(priv))
+ printk(KERN_DEBUG DRV_NAME
+ ": LateCollision TXErr (%d)\n",
+ priv->tx_retry_count);
+ if (priv->tx_retry_count++ < MAX_TX_RETRYCOUNT)
+ locked_reg_bfset(priv, ECON1,
+ ECON1_TXRTS);
+ else
+ enc28j60_tx_clear(ndev, true);
+ } else
+ enc28j60_tx_clear(ndev, true);
+ locked_reg_bfclr(priv, EIR, EIR_TXERIF);
+ }
+ /* RX Error handler */
+ if ((intflags & EIR_RXERIF) != 0) {
+ loop++;
+ if (netif_msg_intr(priv))
+ printk(KERN_DEBUG DRV_NAME
+ ": intRXErr(%d)\n", loop);
+ /* Check free FIFO space to flag RX overrun */
+ if (enc28j60_get_free_rxfifo(priv) <= 0) {
+ if (netif_msg_rx_err(priv))
+ printk(KERN_DEBUG DRV_NAME
+ ": RX Overrun\n");
+ ndev->stats.rx_dropped++;
+ }
+ locked_reg_bfclr(priv, EIR, EIR_RXERIF);
+ }
+ /* RX handler */
+ if (enc28j60_rx_interrupt(ndev))
+ loop++;
+ } while (loop);
+
+ /* re-enable interrupts */
+ locked_reg_bfset(priv, EIE, EIE_INTIE);
+ if (netif_msg_intr(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() exit\n", __func__);
+}
+
+/*
+ * Hardware transmit function.
+ * Fill the buffer memory and send the contents of the transmit buffer
+ * onto the network
+ */
+static void enc28j60_hw_tx(struct enc28j60_net *priv)
+{
+ if (netif_msg_tx_queued(priv))
+ printk(KERN_DEBUG DRV_NAME
+ ": Tx Packet Len:%d\n", priv->tx_skb->len);
+
+ if (netif_msg_pktdata(priv))
+ dump_packet(__func__,
+ priv->tx_skb->len, priv->tx_skb->data);
+ enc28j60_packet_write(priv, priv->tx_skb->len, priv->tx_skb->data);
+
+#ifdef CONFIG_ENC28J60_WRITEVERIFY
+ /* readback and verify written data */
+ if (netif_msg_drv(priv)) {
+ int test_len, k;
+ u8 test_buf[64]; /* limit the test to the first 64 bytes */
+ int okflag;
+
+ test_len = priv->tx_skb->len;
+ if (test_len > sizeof(test_buf))
+ test_len = sizeof(test_buf);
+
+ /* + 1 to skip control byte */
+ enc28j60_mem_read(priv, TXSTART_INIT + 1, test_len, test_buf);
+ okflag = 1;
+ for (k = 0; k < test_len; k++) {
+ if (priv->tx_skb->data[k] != test_buf[k]) {
+ printk(KERN_DEBUG DRV_NAME
+ ": Error, %d location differ: "
+ "0x%02x-0x%02x\n", k,
+ priv->tx_skb->data[k], test_buf[k]);
+ okflag = 0;
+ }
+ }
+ if (!okflag)
+ printk(KERN_DEBUG DRV_NAME ": Tx write buffer, "
+ "verify ERROR!\n");
+ }
+#endif
+ /* set TX request flag */
+ locked_reg_bfset(priv, ECON1, ECON1_TXRTS);
+}
+
+static netdev_tx_t enc28j60_send_packet(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct enc28j60_net *priv = netdev_priv(dev);
+
+ if (netif_msg_tx_queued(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
+
+ /* If some error occurs while trying to transmit this
+ * packet, you should return '1' from this function.
+ * In such a case you _may not_ do anything to the
+ * SKB, it is still owned by the network queueing
+ * layer when an error is returned. This means you
+ * may not modify any SKB fields, you may not free
+ * the SKB, etc.
+ */
+ netif_stop_queue(dev);
+
+ /* Remember the skb for deferred processing */
+ priv->tx_skb = skb;
+ schedule_work(&priv->tx_work);
+
+ return NETDEV_TX_OK;
+}
+
+static void enc28j60_tx_work_handler(struct work_struct *work)
+{
+ struct enc28j60_net *priv =
+ container_of(work, struct enc28j60_net, tx_work);
+
+ /* actual delivery of data */
+ enc28j60_hw_tx(priv);
+}
+
+static irqreturn_t enc28j60_irq(int irq, void *dev_id)
+{
+ struct enc28j60_net *priv = dev_id;
+
+ /*
+ * Can't do anything in interrupt context because we need to
+ * block (spi_sync() is blocking) so fire of the interrupt
+ * handling workqueue.
+ * Remember that we access enc28j60 registers through SPI bus
+ * via spi_sync() call.
+ */
+ schedule_work(&priv->irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static void enc28j60_tx_timeout(struct net_device *ndev)
+{
+ struct enc28j60_net *priv = netdev_priv(ndev);
+
+ if (netif_msg_timer(priv))
+ dev_err(&ndev->dev, DRV_NAME " tx timeout\n");
+
+ ndev->stats.tx_errors++;
+ /* can't restart safely under softirq */
+ schedule_work(&priv->restart_work);
+}
+
+/*
+ * Open/initialize the board. This is called (in the current kernel)
+ * sometime after booting when the 'ifconfig' program is run.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is non-reboot way to recover if something goes wrong.
+ */
+static int enc28j60_net_open(struct net_device *dev)
+{
+ struct enc28j60_net *priv = netdev_priv(dev);
+
+ if (netif_msg_drv(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ if (netif_msg_ifup(priv))
+ dev_err(&dev->dev, "invalid MAC address %pM\n",
+ dev->dev_addr);
+ return -EADDRNOTAVAIL;
+ }
+ /* Reset the hardware here (and take it out of low power mode) */
+ enc28j60_lowpower(priv, false);
+ enc28j60_hw_disable(priv);
+ if (!enc28j60_hw_init(priv)) {
+ if (netif_msg_ifup(priv))
+ dev_err(&dev->dev, "hw_reset() failed\n");
+ return -EINVAL;
+ }
+ /* Update the MAC address (in case user has changed it) */
+ enc28j60_set_hw_macaddr(dev);
+ /* Enable interrupts */
+ enc28j60_hw_enable(priv);
+ /* check link status */
+ enc28j60_check_link_status(dev);
+ /* We are now ready to accept transmit requests from
+ * the queueing layer of the networking.
+ */
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/* The inverse routine to net_open(). */
+static int enc28j60_net_close(struct net_device *dev)
+{
+ struct enc28j60_net *priv = netdev_priv(dev);
+
+ if (netif_msg_drv(priv))
+ printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
+
+ enc28j60_hw_disable(priv);
+ enc28j60_lowpower(priv, true);
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+/*
+ * Set or clear the multicast filter for this adapter
+ * num_addrs == -1 Promiscuous mode, receive all packets
+ * num_addrs == 0 Normal mode, filter out multicast packets
+ * num_addrs > 0 Multicast mode, receive normal and MC packets
+ */
+static void enc28j60_set_multicast_list(struct net_device *dev)
+{
+ struct enc28j60_net *priv = netdev_priv(dev);
+ int oldfilter = priv->rxfilter;
+
+ if (dev->flags & IFF_PROMISC) {
+ if (netif_msg_link(priv))
+ dev_info(&dev->dev, "promiscuous mode\n");
+ priv->rxfilter = RXFILTER_PROMISC;
+ } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
+ if (netif_msg_link(priv))
+ dev_info(&dev->dev, "%smulticast mode\n",
+ (dev->flags & IFF_ALLMULTI) ? "all-" : "");
+ priv->rxfilter = RXFILTER_MULTI;
+ } else {
+ if (netif_msg_link(priv))
+ dev_info(&dev->dev, "normal mode\n");
+ priv->rxfilter = RXFILTER_NORMAL;
+ }
+
+ if (oldfilter != priv->rxfilter)
+ schedule_work(&priv->setrx_work);
+}
+
+static void enc28j60_setrx_work_handler(struct work_struct *work)
+{
+ struct enc28j60_net *priv =
+ container_of(work, struct enc28j60_net, setrx_work);
+
+ if (priv->rxfilter == RXFILTER_PROMISC) {
+ if (netif_msg_drv(priv))
+ printk(KERN_DEBUG DRV_NAME ": promiscuous mode\n");
+ locked_regb_write(priv, ERXFCON, 0x00);
+ } else if (priv->rxfilter == RXFILTER_MULTI) {
+ if (netif_msg_drv(priv))
+ printk(KERN_DEBUG DRV_NAME ": multicast mode\n");
+ locked_regb_write(priv, ERXFCON,
+ ERXFCON_UCEN | ERXFCON_CRCEN |
+ ERXFCON_BCEN | ERXFCON_MCEN);
+ } else {
+ if (netif_msg_drv(priv))
+ printk(KERN_DEBUG DRV_NAME ": normal mode\n");
+ locked_regb_write(priv, ERXFCON,
+ ERXFCON_UCEN | ERXFCON_CRCEN |
+ ERXFCON_BCEN);
+ }
+}
+
+static void enc28j60_restart_work_handler(struct work_struct *work)
+{
+ struct enc28j60_net *priv =
+ container_of(work, struct enc28j60_net, restart_work);
+ struct net_device *ndev = priv->netdev;
+ int ret;
+
+ rtnl_lock();
+ if (netif_running(ndev)) {
+ enc28j60_net_close(ndev);
+ ret = enc28j60_net_open(ndev);
+ if (unlikely(ret)) {
+ dev_info(&ndev->dev, " could not restart %d\n", ret);
+ dev_close(ndev);
+ }
+ }
+ rtnl_unlock();
+}
+
+/* ......................... ETHTOOL SUPPORT ........................... */
+
+static void
+enc28j60_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info,
+ dev_name(dev->dev.parent), sizeof(info->bus_info));
+}
+
+static int
+enc28j60_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct enc28j60_net *priv = netdev_priv(dev);
+
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->supported = SUPPORTED_10baseT_Half
+ | SUPPORTED_10baseT_Full
+ | SUPPORTED_TP;
+ ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ cmd->port = PORT_TP;
+ cmd->autoneg = AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static int
+enc28j60_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ return enc28j60_setlink(dev, cmd->autoneg,
+ ethtool_cmd_speed(cmd), cmd->duplex);
+}
+
+static u32 enc28j60_get_msglevel(struct net_device *dev)
+{
+ struct enc28j60_net *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+static void enc28j60_set_msglevel(struct net_device *dev, u32 val)
+{
+ struct enc28j60_net *priv = netdev_priv(dev);
+ priv->msg_enable = val;
+}
+
+static const struct ethtool_ops enc28j60_ethtool_ops = {
+ .get_settings = enc28j60_get_settings,
+ .set_settings = enc28j60_set_settings,
+ .get_drvinfo = enc28j60_get_drvinfo,
+ .get_msglevel = enc28j60_get_msglevel,
+ .set_msglevel = enc28j60_set_msglevel,
+};
+
+static int enc28j60_chipset_init(struct net_device *dev)
+{
+ struct enc28j60_net *priv = netdev_priv(dev);
+
+ return enc28j60_hw_init(priv);
+}
+
+static const struct net_device_ops enc28j60_netdev_ops = {
+ .ndo_open = enc28j60_net_open,
+ .ndo_stop = enc28j60_net_close,
+ .ndo_start_xmit = enc28j60_send_packet,
+ .ndo_set_rx_mode = enc28j60_set_multicast_list,
+ .ndo_set_mac_address = enc28j60_set_mac_address,
+ .ndo_tx_timeout = enc28j60_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __devinit enc28j60_probe(struct spi_device *spi)
+{
+ struct net_device *dev;
+ struct enc28j60_net *priv;
+ int ret = 0;
+
+ if (netif_msg_drv(&debug))
+ dev_info(&spi->dev, DRV_NAME " Ethernet driver %s loaded\n",
+ DRV_VERSION);
+
+ dev = alloc_etherdev(sizeof(struct enc28j60_net));
+ if (!dev) {
+ if (netif_msg_drv(&debug))
+ dev_err(&spi->dev, DRV_NAME
+ ": unable to alloc new ethernet\n");
+ ret = -ENOMEM;
+ goto error_alloc;
+ }
+ priv = netdev_priv(dev);
+
+ priv->netdev = dev; /* priv to netdev reference */
+ priv->spi = spi; /* priv to spi reference */
+ priv->msg_enable = netif_msg_init(debug.msg_enable,
+ ENC28J60_MSG_DEFAULT);
+ mutex_init(&priv->lock);
+ INIT_WORK(&priv->tx_work, enc28j60_tx_work_handler);
+ INIT_WORK(&priv->setrx_work, enc28j60_setrx_work_handler);
+ INIT_WORK(&priv->irq_work, enc28j60_irq_work_handler);
+ INIT_WORK(&priv->restart_work, enc28j60_restart_work_handler);
+ dev_set_drvdata(&spi->dev, priv); /* spi to priv reference */
+ SET_NETDEV_DEV(dev, &spi->dev);
+
+ if (!enc28j60_chipset_init(dev)) {
+ if (netif_msg_probe(priv))
+ dev_info(&spi->dev, DRV_NAME " chip not found\n");
+ ret = -EIO;
+ goto error_irq;
+ }
+ random_ether_addr(dev->dev_addr);
+ enc28j60_set_hw_macaddr(dev);
+
+ /* Board setup must set the relevant edge trigger type;
+ * level triggers won't currently work.
+ */
+ ret = request_irq(spi->irq, enc28j60_irq, 0, DRV_NAME, priv);
+ if (ret < 0) {
+ if (netif_msg_probe(priv))
+ dev_err(&spi->dev, DRV_NAME ": request irq %d failed "
+ "(ret = %d)\n", spi->irq, ret);
+ goto error_irq;
+ }
+
+ dev->if_port = IF_PORT_10BASET;
+ dev->irq = spi->irq;
+ dev->netdev_ops = &enc28j60_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ SET_ETHTOOL_OPS(dev, &enc28j60_ethtool_ops);
+
+ enc28j60_lowpower(priv, true);
+
+ ret = register_netdev(dev);
+ if (ret) {
+ if (netif_msg_probe(priv))
+ dev_err(&spi->dev, "register netdev " DRV_NAME
+ " failed (ret = %d)\n", ret);
+ goto error_register;
+ }
+ dev_info(&dev->dev, DRV_NAME " driver registered\n");
+
+ return 0;
+
+error_register:
+ free_irq(spi->irq, priv);
+error_irq:
+ free_netdev(dev);
+error_alloc:
+ return ret;
+}
+
+static int __devexit enc28j60_remove(struct spi_device *spi)
+{
+ struct enc28j60_net *priv = dev_get_drvdata(&spi->dev);
+
+ if (netif_msg_drv(priv))
+ printk(KERN_DEBUG DRV_NAME ": remove\n");
+
+ unregister_netdev(priv->netdev);
+ free_irq(spi->irq, priv);
+ free_netdev(priv->netdev);
+
+ return 0;
+}
+
+static struct spi_driver enc28j60_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = enc28j60_probe,
+ .remove = __devexit_p(enc28j60_remove),
+};
+
+static int __init enc28j60_init(void)
+{
+ msec20_to_jiffies = msecs_to_jiffies(20);
+
+ return spi_register_driver(&enc28j60_driver);
+}
+
+module_init(enc28j60_init);
+
+static void __exit enc28j60_exit(void)
+{
+ spi_unregister_driver(&enc28j60_driver);
+}
+
+module_exit(enc28j60_exit);
+
+MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
+MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>");
+MODULE_LICENSE("GPL");
+module_param_named(debug, debug.msg_enable, int, 0);
+MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., ffff=all)");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/net/ethernet/microchip/enc28j60_hw.h b/drivers/net/ethernet/microchip/enc28j60_hw.h
new file mode 100644
index 000000000000..25b41de49f0e
--- /dev/null
+++ b/drivers/net/ethernet/microchip/enc28j60_hw.h
@@ -0,0 +1,309 @@
+/*
+ * enc28j60_hw.h: EDTP FrameThrower style enc28j60 registers
+ *
+ * $Id: enc28j60_hw.h,v 1.9 2007/12/14 11:59:16 claudio Exp $
+ */
+
+#ifndef _ENC28J60_HW_H
+#define _ENC28J60_HW_H
+
+/*
+ * ENC28J60 Control Registers
+ * Control register definitions are a combination of address,
+ * bank number, and Ethernet/MAC/PHY indicator bits.
+ * - Register address (bits 0-4)
+ * - Bank number (bits 5-6)
+ * - MAC/MII indicator (bit 7)
+ */
+#define ADDR_MASK 0x1F
+#define BANK_MASK 0x60
+#define SPRD_MASK 0x80
+/* All-bank registers */
+#define EIE 0x1B
+#define EIR 0x1C
+#define ESTAT 0x1D
+#define ECON2 0x1E
+#define ECON1 0x1F
+/* Bank 0 registers */
+#define ERDPTL (0x00|0x00)
+#define ERDPTH (0x01|0x00)
+#define EWRPTL (0x02|0x00)
+#define EWRPTH (0x03|0x00)
+#define ETXSTL (0x04|0x00)
+#define ETXSTH (0x05|0x00)
+#define ETXNDL (0x06|0x00)
+#define ETXNDH (0x07|0x00)
+#define ERXSTL (0x08|0x00)
+#define ERXSTH (0x09|0x00)
+#define ERXNDL (0x0A|0x00)
+#define ERXNDH (0x0B|0x00)
+#define ERXRDPTL (0x0C|0x00)
+#define ERXRDPTH (0x0D|0x00)
+#define ERXWRPTL (0x0E|0x00)
+#define ERXWRPTH (0x0F|0x00)
+#define EDMASTL (0x10|0x00)
+#define EDMASTH (0x11|0x00)
+#define EDMANDL (0x12|0x00)
+#define EDMANDH (0x13|0x00)
+#define EDMADSTL (0x14|0x00)
+#define EDMADSTH (0x15|0x00)
+#define EDMACSL (0x16|0x00)
+#define EDMACSH (0x17|0x00)
+/* Bank 1 registers */
+#define EHT0 (0x00|0x20)
+#define EHT1 (0x01|0x20)
+#define EHT2 (0x02|0x20)
+#define EHT3 (0x03|0x20)
+#define EHT4 (0x04|0x20)
+#define EHT5 (0x05|0x20)
+#define EHT6 (0x06|0x20)
+#define EHT7 (0x07|0x20)
+#define EPMM0 (0x08|0x20)
+#define EPMM1 (0x09|0x20)
+#define EPMM2 (0x0A|0x20)
+#define EPMM3 (0x0B|0x20)
+#define EPMM4 (0x0C|0x20)
+#define EPMM5 (0x0D|0x20)
+#define EPMM6 (0x0E|0x20)
+#define EPMM7 (0x0F|0x20)
+#define EPMCSL (0x10|0x20)
+#define EPMCSH (0x11|0x20)
+#define EPMOL (0x14|0x20)
+#define EPMOH (0x15|0x20)
+#define EWOLIE (0x16|0x20)
+#define EWOLIR (0x17|0x20)
+#define ERXFCON (0x18|0x20)
+#define EPKTCNT (0x19|0x20)
+/* Bank 2 registers */
+#define MACON1 (0x00|0x40|SPRD_MASK)
+/* #define MACON2 (0x01|0x40|SPRD_MASK) */
+#define MACON3 (0x02|0x40|SPRD_MASK)
+#define MACON4 (0x03|0x40|SPRD_MASK)
+#define MABBIPG (0x04|0x40|SPRD_MASK)
+#define MAIPGL (0x06|0x40|SPRD_MASK)
+#define MAIPGH (0x07|0x40|SPRD_MASK)
+#define MACLCON1 (0x08|0x40|SPRD_MASK)
+#define MACLCON2 (0x09|0x40|SPRD_MASK)
+#define MAMXFLL (0x0A|0x40|SPRD_MASK)
+#define MAMXFLH (0x0B|0x40|SPRD_MASK)
+#define MAPHSUP (0x0D|0x40|SPRD_MASK)
+#define MICON (0x11|0x40|SPRD_MASK)
+#define MICMD (0x12|0x40|SPRD_MASK)
+#define MIREGADR (0x14|0x40|SPRD_MASK)
+#define MIWRL (0x16|0x40|SPRD_MASK)
+#define MIWRH (0x17|0x40|SPRD_MASK)
+#define MIRDL (0x18|0x40|SPRD_MASK)
+#define MIRDH (0x19|0x40|SPRD_MASK)
+/* Bank 3 registers */
+#define MAADR1 (0x00|0x60|SPRD_MASK)
+#define MAADR0 (0x01|0x60|SPRD_MASK)
+#define MAADR3 (0x02|0x60|SPRD_MASK)
+#define MAADR2 (0x03|0x60|SPRD_MASK)
+#define MAADR5 (0x04|0x60|SPRD_MASK)
+#define MAADR4 (0x05|0x60|SPRD_MASK)
+#define EBSTSD (0x06|0x60)
+#define EBSTCON (0x07|0x60)
+#define EBSTCSL (0x08|0x60)
+#define EBSTCSH (0x09|0x60)
+#define MISTAT (0x0A|0x60|SPRD_MASK)
+#define EREVID (0x12|0x60)
+#define ECOCON (0x15|0x60)
+#define EFLOCON (0x17|0x60)
+#define EPAUSL (0x18|0x60)
+#define EPAUSH (0x19|0x60)
+/* PHY registers */
+#define PHCON1 0x00
+#define PHSTAT1 0x01
+#define PHHID1 0x02
+#define PHHID2 0x03
+#define PHCON2 0x10
+#define PHSTAT2 0x11
+#define PHIE 0x12
+#define PHIR 0x13
+#define PHLCON 0x14
+
+/* ENC28J60 EIE Register Bit Definitions */
+#define EIE_INTIE 0x80
+#define EIE_PKTIE 0x40
+#define EIE_DMAIE 0x20
+#define EIE_LINKIE 0x10
+#define EIE_TXIE 0x08
+/* #define EIE_WOLIE 0x04 (reserved) */
+#define EIE_TXERIE 0x02
+#define EIE_RXERIE 0x01
+/* ENC28J60 EIR Register Bit Definitions */
+#define EIR_PKTIF 0x40
+#define EIR_DMAIF 0x20
+#define EIR_LINKIF 0x10
+#define EIR_TXIF 0x08
+/* #define EIR_WOLIF 0x04 (reserved) */
+#define EIR_TXERIF 0x02
+#define EIR_RXERIF 0x01
+/* ENC28J60 ESTAT Register Bit Definitions */
+#define ESTAT_INT 0x80
+#define ESTAT_LATECOL 0x10
+#define ESTAT_RXBUSY 0x04
+#define ESTAT_TXABRT 0x02
+#define ESTAT_CLKRDY 0x01
+/* ENC28J60 ECON2 Register Bit Definitions */
+#define ECON2_AUTOINC 0x80
+#define ECON2_PKTDEC 0x40
+#define ECON2_PWRSV 0x20
+#define ECON2_VRPS 0x08
+/* ENC28J60 ECON1 Register Bit Definitions */
+#define ECON1_TXRST 0x80
+#define ECON1_RXRST 0x40
+#define ECON1_DMAST 0x20
+#define ECON1_CSUMEN 0x10
+#define ECON1_TXRTS 0x08
+#define ECON1_RXEN 0x04
+#define ECON1_BSEL1 0x02
+#define ECON1_BSEL0 0x01
+/* ENC28J60 MACON1 Register Bit Definitions */
+#define MACON1_LOOPBK 0x10
+#define MACON1_TXPAUS 0x08
+#define MACON1_RXPAUS 0x04
+#define MACON1_PASSALL 0x02
+#define MACON1_MARXEN 0x01
+/* ENC28J60 MACON2 Register Bit Definitions */
+#define MACON2_MARST 0x80
+#define MACON2_RNDRST 0x40
+#define MACON2_MARXRST 0x08
+#define MACON2_RFUNRST 0x04
+#define MACON2_MATXRST 0x02
+#define MACON2_TFUNRST 0x01
+/* ENC28J60 MACON3 Register Bit Definitions */
+#define MACON3_PADCFG2 0x80
+#define MACON3_PADCFG1 0x40
+#define MACON3_PADCFG0 0x20
+#define MACON3_TXCRCEN 0x10
+#define MACON3_PHDRLEN 0x08
+#define MACON3_HFRMLEN 0x04
+#define MACON3_FRMLNEN 0x02
+#define MACON3_FULDPX 0x01
+/* ENC28J60 MICMD Register Bit Definitions */
+#define MICMD_MIISCAN 0x02
+#define MICMD_MIIRD 0x01
+/* ENC28J60 MISTAT Register Bit Definitions */
+#define MISTAT_NVALID 0x04
+#define MISTAT_SCAN 0x02
+#define MISTAT_BUSY 0x01
+/* ENC28J60 ERXFCON Register Bit Definitions */
+#define ERXFCON_UCEN 0x80
+#define ERXFCON_ANDOR 0x40
+#define ERXFCON_CRCEN 0x20
+#define ERXFCON_PMEN 0x10
+#define ERXFCON_MPEN 0x08
+#define ERXFCON_HTEN 0x04
+#define ERXFCON_MCEN 0x02
+#define ERXFCON_BCEN 0x01
+
+/* ENC28J60 PHY PHCON1 Register Bit Definitions */
+#define PHCON1_PRST 0x8000
+#define PHCON1_PLOOPBK 0x4000
+#define PHCON1_PPWRSV 0x0800
+#define PHCON1_PDPXMD 0x0100
+/* ENC28J60 PHY PHSTAT1 Register Bit Definitions */
+#define PHSTAT1_PFDPX 0x1000
+#define PHSTAT1_PHDPX 0x0800
+#define PHSTAT1_LLSTAT 0x0004
+#define PHSTAT1_JBSTAT 0x0002
+/* ENC28J60 PHY PHSTAT2 Register Bit Definitions */
+#define PHSTAT2_TXSTAT (1 << 13)
+#define PHSTAT2_RXSTAT (1 << 12)
+#define PHSTAT2_COLSTAT (1 << 11)
+#define PHSTAT2_LSTAT (1 << 10)
+#define PHSTAT2_DPXSTAT (1 << 9)
+#define PHSTAT2_PLRITY (1 << 5)
+/* ENC28J60 PHY PHCON2 Register Bit Definitions */
+#define PHCON2_FRCLINK 0x4000
+#define PHCON2_TXDIS 0x2000
+#define PHCON2_JABBER 0x0400
+#define PHCON2_HDLDIS 0x0100
+/* ENC28J60 PHY PHIE Register Bit Definitions */
+#define PHIE_PLNKIE (1 << 4)
+#define PHIE_PGEIE (1 << 1)
+/* ENC28J60 PHY PHIR Register Bit Definitions */
+#define PHIR_PLNKIF (1 << 4)
+#define PHIR_PGEIF (1 << 1)
+
+/* ENC28J60 Packet Control Byte Bit Definitions */
+#define PKTCTRL_PHUGEEN 0x08
+#define PKTCTRL_PPADEN 0x04
+#define PKTCTRL_PCRCEN 0x02
+#define PKTCTRL_POVERRIDE 0x01
+
+/* ENC28J60 Transmit Status Vector */
+#define TSV_TXBYTECNT 0
+#define TSV_TXCOLLISIONCNT 16
+#define TSV_TXCRCERROR 20
+#define TSV_TXLENCHKERROR 21
+#define TSV_TXLENOUTOFRANGE 22
+#define TSV_TXDONE 23
+#define TSV_TXMULTICAST 24
+#define TSV_TXBROADCAST 25
+#define TSV_TXPACKETDEFER 26
+#define TSV_TXEXDEFER 27
+#define TSV_TXEXCOLLISION 28
+#define TSV_TXLATECOLLISION 29
+#define TSV_TXGIANT 30
+#define TSV_TXUNDERRUN 31
+#define TSV_TOTBYTETXONWIRE 32
+#define TSV_TXCONTROLFRAME 48
+#define TSV_TXPAUSEFRAME 49
+#define TSV_BACKPRESSUREAPP 50
+#define TSV_TXVLANTAGFRAME 51
+
+#define TSV_SIZE 7
+#define TSV_BYTEOF(x) ((x) / 8)
+#define TSV_BITMASK(x) (1 << ((x) % 8))
+#define TSV_GETBIT(x, y) (((x)[TSV_BYTEOF(y)] & TSV_BITMASK(y)) ? 1 : 0)
+
+/* ENC28J60 Receive Status Vector */
+#define RSV_RXLONGEVDROPEV 16
+#define RSV_CARRIEREV 18
+#define RSV_CRCERROR 20
+#define RSV_LENCHECKERR 21
+#define RSV_LENOUTOFRANGE 22
+#define RSV_RXOK 23
+#define RSV_RXMULTICAST 24
+#define RSV_RXBROADCAST 25
+#define RSV_DRIBBLENIBBLE 26
+#define RSV_RXCONTROLFRAME 27
+#define RSV_RXPAUSEFRAME 28
+#define RSV_RXUNKNOWNOPCODE 29
+#define RSV_RXTYPEVLAN 30
+
+#define RSV_SIZE 6
+#define RSV_BITMASK(x) (1 << ((x) - 16))
+#define RSV_GETBIT(x, y) (((x) & RSV_BITMASK(y)) ? 1 : 0)
+
+
+/* SPI operation codes */
+#define ENC28J60_READ_CTRL_REG 0x00
+#define ENC28J60_READ_BUF_MEM 0x3A
+#define ENC28J60_WRITE_CTRL_REG 0x40
+#define ENC28J60_WRITE_BUF_MEM 0x7A
+#define ENC28J60_BIT_FIELD_SET 0x80
+#define ENC28J60_BIT_FIELD_CLR 0xA0
+#define ENC28J60_SOFT_RESET 0xFF
+
+
+/* buffer boundaries applied to internal 8K ram
+ * entire available packet buffer space is allocated.
+ * Give TX buffer space for one full ethernet frame (~1500 bytes)
+ * receive buffer gets the rest */
+#define TXSTART_INIT 0x1A00
+#define TXEND_INIT 0x1FFF
+
+/* Put RX buffer at 0 as suggested by the Errata datasheet */
+#define RXSTART_INIT 0x0000
+#define RXEND_INIT 0x19FF
+
+/* maximum ethernet frame length */
+#define MAX_FRAMELEN 1518
+
+/* Preferred half duplex: LEDA: Link status LEDB: Rx/Tx activity */
+#define ENC28J60_LAMPS_MODE 0x3476
+
+#endif
diff --git a/drivers/net/ethernet/mipsnet.c b/drivers/net/ethernet/mipsnet.c
new file mode 100644
index 000000000000..d05b0c9e1e9c
--- /dev/null
+++ b/drivers/net/ethernet/mipsnet.c
@@ -0,0 +1,345 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <asm/mips-boards/simint.h>
+
+#define MIPSNET_VERSION "2007-11-17"
+
+/*
+ * Net status/control block as seen by sw in the core.
+ */
+struct mipsnet_regs {
+ /*
+ * Device info for probing, reads as MIPSNET%d where %d is some
+ * form of version.
+ */
+ u64 devId; /*0x00 */
+
+ /*
+ * read only busy flag.
+ * Set and cleared by the Net Device to indicate that an rx or a tx
+ * is in progress.
+ */
+ u32 busy; /*0x08 */
+
+ /*
+ * Set by the Net Device.
+ * The device will set it once data has been received.
+ * The value is the number of bytes that should be read from
+ * rxDataBuffer. The value will decrease till 0 until all the data
+ * from rxDataBuffer has been read.
+ */
+ u32 rxDataCount; /*0x0c */
+#define MIPSNET_MAX_RXTX_DATACOUNT (1 << 16)
+
+ /*
+ * Settable from the MIPS core, cleared by the Net Device.
+ * The core should set the number of bytes it wants to send,
+ * then it should write those bytes of data to txDataBuffer.
+ * The device will clear txDataCount has been processed (not
+ * necessarily sent).
+ */
+ u32 txDataCount; /*0x10 */
+
+ /*
+ * Interrupt control
+ *
+ * Used to clear the interrupted generated by this dev.
+ * Write a 1 to clear the interrupt. (except bit31).
+ *
+ * Bit0 is set if it was a tx-done interrupt.
+ * Bit1 is set when new rx-data is available.
+ * Until this bit is cleared there will be no other RXs.
+ *
+ * Bit31 is used for testing, it clears after a read.
+ * Writing 1 to this bit will cause an interrupt to be generated.
+ * To clear the test interrupt, write 0 to this register.
+ */
+ u32 interruptControl; /*0x14 */
+#define MIPSNET_INTCTL_TXDONE (1u << 0)
+#define MIPSNET_INTCTL_RXDONE (1u << 1)
+#define MIPSNET_INTCTL_TESTBIT (1u << 31)
+
+ /*
+ * Readonly core-specific interrupt info for the device to signal
+ * the core. The meaning of the contents of this field might change.
+ */
+ /* XXX: the whole memIntf interrupt scheme is messy: the device
+ * should have no control what so ever of what VPE/register set is
+ * being used.
+ * The MemIntf should only expose interrupt lines, and something in
+ * the config should be responsible for the line<->core/vpe bindings.
+ */
+ u32 interruptInfo; /*0x18 */
+
+ /*
+ * This is where the received data is read out.
+ * There is more data to read until rxDataReady is 0.
+ * Only 1 byte at this regs offset is used.
+ */
+ u32 rxDataBuffer; /*0x1c */
+
+ /*
+ * This is where the data to transmit is written.
+ * Data should be written for the amount specified in the
+ * txDataCount register.
+ * Only 1 byte at this regs offset is used.
+ */
+ u32 txDataBuffer; /*0x20 */
+};
+
+#define regaddr(dev, field) \
+ (dev->base_addr + offsetof(struct mipsnet_regs, field))
+
+static char mipsnet_string[] = "mipsnet";
+
+/*
+ * Copy data from the MIPSNET rx data port
+ */
+static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata,
+ int len)
+{
+ for (; len > 0; len--, kdata++)
+ *kdata = inb(regaddr(dev, rxDataBuffer));
+
+ return inl(regaddr(dev, rxDataCount));
+}
+
+static inline void mipsnet_put_todevice(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ int count_to_go = skb->len;
+ char *buf_ptr = skb->data;
+
+ outl(skb->len, regaddr(dev, txDataCount));
+
+ for (; count_to_go; buf_ptr++, count_to_go--)
+ outb(*buf_ptr, regaddr(dev, txDataBuffer));
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ dev_kfree_skb(skb);
+}
+
+static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ /*
+ * Only one packet at a time. Once TXDONE interrupt is serviced, the
+ * queue will be restarted.
+ */
+ netif_stop_queue(dev);
+ mipsnet_put_todevice(dev, skb);
+
+ return NETDEV_TX_OK;
+}
+
+static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t len)
+{
+ struct sk_buff *skb;
+
+ if (!len)
+ return len;
+
+ skb = dev_alloc_skb(len + NET_IP_ALIGN);
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len))
+ return -EFAULT;
+
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ netif_rx(skb);
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+
+ return len;
+}
+
+static irqreturn_t mipsnet_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ u32 int_flags;
+ irqreturn_t ret = IRQ_NONE;
+
+ if (irq != dev->irq)
+ goto out_badirq;
+
+ /* TESTBIT is cleared on read. */
+ int_flags = inl(regaddr(dev, interruptControl));
+ if (int_flags & MIPSNET_INTCTL_TESTBIT) {
+ /* TESTBIT takes effect after a write with 0. */
+ outl(0, regaddr(dev, interruptControl));
+ ret = IRQ_HANDLED;
+ } else if (int_flags & MIPSNET_INTCTL_TXDONE) {
+ /* Only one packet at a time, we are done. */
+ dev->stats.tx_packets++;
+ netif_wake_queue(dev);
+ outl(MIPSNET_INTCTL_TXDONE,
+ regaddr(dev, interruptControl));
+ ret = IRQ_HANDLED;
+ } else if (int_flags & MIPSNET_INTCTL_RXDONE) {
+ mipsnet_get_fromdev(dev, inl(regaddr(dev, rxDataCount)));
+ outl(MIPSNET_INTCTL_RXDONE, regaddr(dev, interruptControl));
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+
+out_badirq:
+ printk(KERN_INFO "%s: %s(): irq %d for unknown device\n",
+ dev->name, __func__, irq);
+ return ret;
+}
+
+static int mipsnet_open(struct net_device *dev)
+{
+ int err;
+
+ err = request_irq(dev->irq, mipsnet_interrupt,
+ IRQF_SHARED, dev->name, (void *) dev);
+ if (err) {
+ release_region(dev->base_addr, sizeof(struct mipsnet_regs));
+ return err;
+ }
+
+ netif_start_queue(dev);
+
+ /* test interrupt handler */
+ outl(MIPSNET_INTCTL_TESTBIT, regaddr(dev, interruptControl));
+
+ return 0;
+}
+
+static int mipsnet_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ free_irq(dev->irq, dev);
+ return 0;
+}
+
+static void mipsnet_set_mclist(struct net_device *dev)
+{
+}
+
+static const struct net_device_ops mipsnet_netdev_ops = {
+ .ndo_open = mipsnet_open,
+ .ndo_stop = mipsnet_close,
+ .ndo_start_xmit = mipsnet_xmit,
+ .ndo_set_rx_mode = mipsnet_set_mclist,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int __devinit mipsnet_probe(struct platform_device *dev)
+{
+ struct net_device *netdev;
+ int err;
+
+ netdev = alloc_etherdev(0);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ platform_set_drvdata(dev, netdev);
+
+ netdev->netdev_ops = &mipsnet_netdev_ops;
+
+ /*
+ * TODO: probe for these or load them from PARAM
+ */
+ netdev->base_addr = 0x4200;
+ netdev->irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB0 +
+ inl(regaddr(netdev, interruptInfo));
+
+ /* Get the io region now, get irq on open() */
+ if (!request_region(netdev->base_addr, sizeof(struct mipsnet_regs),
+ "mipsnet")) {
+ err = -EBUSY;
+ goto out_free_netdev;
+ }
+
+ /*
+ * Lacking any better mechanism to allocate a MAC address we use a
+ * random one ...
+ */
+ random_ether_addr(netdev->dev_addr);
+
+ err = register_netdev(netdev);
+ if (err) {
+ printk(KERN_ERR "MIPSNet: failed to register netdev.\n");
+ goto out_free_region;
+ }
+
+ return 0;
+
+out_free_region:
+ release_region(netdev->base_addr, sizeof(struct mipsnet_regs));
+
+out_free_netdev:
+ free_netdev(netdev);
+
+out:
+ return err;
+}
+
+static int __devexit mipsnet_device_remove(struct platform_device *device)
+{
+ struct net_device *dev = platform_get_drvdata(device);
+
+ unregister_netdev(dev);
+ release_region(dev->base_addr, sizeof(struct mipsnet_regs));
+ free_netdev(dev);
+ platform_set_drvdata(device, NULL);
+
+ return 0;
+}
+
+static struct platform_driver mipsnet_driver = {
+ .driver = {
+ .name = mipsnet_string,
+ .owner = THIS_MODULE,
+ },
+ .probe = mipsnet_probe,
+ .remove = __devexit_p(mipsnet_device_remove),
+};
+
+static int __init mipsnet_init_module(void)
+{
+ int err;
+
+ printk(KERN_INFO "MIPSNet Ethernet driver. Version: %s. "
+ "(c)2005 MIPS Technologies, Inc.\n", MIPSNET_VERSION);
+
+ err = platform_driver_register(&mipsnet_driver);
+ if (err)
+ printk(KERN_ERR "Driver registration failed\n");
+
+ return err;
+}
+
+static void __exit mipsnet_exit_module(void)
+{
+ platform_driver_unregister(&mipsnet_driver);
+}
+
+module_init(mipsnet_init_module);
+module_exit(mipsnet_exit_module);
diff --git a/drivers/net/ethernet/myricom/Kconfig b/drivers/net/ethernet/myricom/Kconfig
new file mode 100644
index 000000000000..540f0c6fc160
--- /dev/null
+++ b/drivers/net/ethernet/myricom/Kconfig
@@ -0,0 +1,47 @@
+#
+# Myricom device configuration
+#
+
+config NET_VENDOR_MYRI
+ bool "Myricom devices"
+ default y
+ depends on PCI && INET
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say
+ Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Myricom cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_MYRI
+
+config MYRI10GE
+ tristate "Myricom Myri-10G Ethernet support"
+ depends on PCI && INET
+ select FW_LOADER
+ select CRC32
+ select INET_LRO
+ ---help---
+ This driver supports Myricom Myri-10G Dual Protocol interface in
+ Ethernet mode. If the eeprom on your board is not recent enough,
+ you will need a newer firmware image.
+ You may get this image or more information, at:
+
+ <http://www.myri.com/scs/download-Myri10GE.html>
+
+ To compile this driver as a module, choose M here. The module
+ will be called myri10ge.
+
+config MYRI10GE_DCA
+ bool "Direct Cache Access (DCA) Support"
+ default y
+ depends on MYRI10GE && DCA && !(MYRI10GE=y && DCA=m)
+ ---help---
+ Say Y here if you want to use Direct Cache Access (DCA) in the
+ driver. DCA is a method for warming the CPU cache before data
+ is used, with the intent of lessening the impact of cache misses.
+
+endif # NET_VENDOR_MYRI
diff --git a/drivers/net/ethernet/myricom/Makefile b/drivers/net/ethernet/myricom/Makefile
new file mode 100644
index 000000000000..296c0a10056b
--- /dev/null
+++ b/drivers/net/ethernet/myricom/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Myricom network device drivers.
+#
+
+obj-$(CONFIG_MYRI10GE) += myri10ge/
diff --git a/drivers/net/ethernet/myricom/myri10ge/Makefile b/drivers/net/ethernet/myricom/myri10ge/Makefile
new file mode 100644
index 000000000000..5df891647aee
--- /dev/null
+++ b/drivers/net/ethernet/myricom/myri10ge/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Myricom Myri-10G ethernet driver
+#
+
+obj-$(CONFIG_MYRI10GE) += myri10ge.o
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
new file mode 100644
index 000000000000..26637279cd67
--- /dev/null
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -0,0 +1,4236 @@
+/*************************************************************************
+ * myri10ge.c: Myricom Myri-10G Ethernet driver.
+ *
+ * Copyright (C) 2005 - 2011 Myricom, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Myricom, Inc. nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * If the eeprom on your board is not recent enough, you will need to get a
+ * newer firmware image at:
+ * http://www.myri.com/scs/download-Myri10GE.html
+ *
+ * Contact Information:
+ * <help@myri.com>
+ * Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006
+ *************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/tcp.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/inet_lro.h>
+#include <linux/dca.h>
+#include <linux/ip.h>
+#include <linux/inet.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/vmalloc.h>
+#include <linux/crc32.h>
+#include <linux/moduleparam.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+#include <linux/prefetch.h>
+#include <net/checksum.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#ifdef CONFIG_MTRR
+#include <asm/mtrr.h>
+#endif
+
+#include "myri10ge_mcp.h"
+#include "myri10ge_mcp_gen_header.h"
+
+#define MYRI10GE_VERSION_STR "1.5.3-1.534"
+
+MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
+MODULE_AUTHOR("Maintainer: help@myri.com");
+MODULE_VERSION(MYRI10GE_VERSION_STR);
+MODULE_LICENSE("Dual BSD/GPL");
+
+#define MYRI10GE_MAX_ETHER_MTU 9014
+
+#define MYRI10GE_ETH_STOPPED 0
+#define MYRI10GE_ETH_STOPPING 1
+#define MYRI10GE_ETH_STARTING 2
+#define MYRI10GE_ETH_RUNNING 3
+#define MYRI10GE_ETH_OPEN_FAILED 4
+
+#define MYRI10GE_EEPROM_STRINGS_SIZE 256
+#define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2)
+#define MYRI10GE_MAX_LRO_DESCRIPTORS 8
+#define MYRI10GE_LRO_MAX_PKTS 64
+
+#define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff)
+#define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
+
+#define MYRI10GE_ALLOC_ORDER 0
+#define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE)
+#define MYRI10GE_MAX_FRAGS_PER_FRAME (MYRI10GE_MAX_ETHER_MTU/MYRI10GE_ALLOC_SIZE + 1)
+
+#define MYRI10GE_MAX_SLICES 32
+
+struct myri10ge_rx_buffer_state {
+ struct page *page;
+ int page_offset;
+ DEFINE_DMA_UNMAP_ADDR(bus);
+ DEFINE_DMA_UNMAP_LEN(len);
+};
+
+struct myri10ge_tx_buffer_state {
+ struct sk_buff *skb;
+ int last;
+ DEFINE_DMA_UNMAP_ADDR(bus);
+ DEFINE_DMA_UNMAP_LEN(len);
+};
+
+struct myri10ge_cmd {
+ u32 data0;
+ u32 data1;
+ u32 data2;
+};
+
+struct myri10ge_rx_buf {
+ struct mcp_kreq_ether_recv __iomem *lanai; /* lanai ptr for recv ring */
+ struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */
+ struct myri10ge_rx_buffer_state *info;
+ struct page *page;
+ dma_addr_t bus;
+ int page_offset;
+ int cnt;
+ int fill_cnt;
+ int alloc_fail;
+ int mask; /* number of rx slots -1 */
+ int watchdog_needed;
+};
+
+struct myri10ge_tx_buf {
+ struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */
+ __be32 __iomem *send_go; /* "go" doorbell ptr */
+ __be32 __iomem *send_stop; /* "stop" doorbell ptr */
+ struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */
+ char *req_bytes;
+ struct myri10ge_tx_buffer_state *info;
+ int mask; /* number of transmit slots -1 */
+ int req ____cacheline_aligned; /* transmit slots submitted */
+ int pkt_start; /* packets started */
+ int stop_queue;
+ int linearized;
+ int done ____cacheline_aligned; /* transmit slots completed */
+ int pkt_done; /* packets completed */
+ int wake_queue;
+ int queue_active;
+};
+
+struct myri10ge_rx_done {
+ struct mcp_slot *entry;
+ dma_addr_t bus;
+ int cnt;
+ int idx;
+ struct net_lro_mgr lro_mgr;
+ struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS];
+};
+
+struct myri10ge_slice_netstats {
+ unsigned long rx_packets;
+ unsigned long tx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_bytes;
+ unsigned long rx_dropped;
+ unsigned long tx_dropped;
+};
+
+struct myri10ge_slice_state {
+ struct myri10ge_tx_buf tx; /* transmit ring */
+ struct myri10ge_rx_buf rx_small;
+ struct myri10ge_rx_buf rx_big;
+ struct myri10ge_rx_done rx_done;
+ struct net_device *dev;
+ struct napi_struct napi;
+ struct myri10ge_priv *mgp;
+ struct myri10ge_slice_netstats stats;
+ __be32 __iomem *irq_claim;
+ struct mcp_irq_data *fw_stats;
+ dma_addr_t fw_stats_bus;
+ int watchdog_tx_done;
+ int watchdog_tx_req;
+ int watchdog_rx_done;
+ int stuck;
+#ifdef CONFIG_MYRI10GE_DCA
+ int cached_dca_tag;
+ int cpu;
+ __be32 __iomem *dca_tag;
+#endif
+ char irq_desc[32];
+};
+
+struct myri10ge_priv {
+ struct myri10ge_slice_state *ss;
+ int tx_boundary; /* boundary transmits cannot cross */
+ int num_slices;
+ int running; /* running? */
+ int small_bytes;
+ int big_bytes;
+ int max_intr_slots;
+ struct net_device *dev;
+ u8 __iomem *sram;
+ int sram_size;
+ unsigned long board_span;
+ unsigned long iomem_base;
+ __be32 __iomem *irq_deassert;
+ char *mac_addr_string;
+ struct mcp_cmd_response *cmd;
+ dma_addr_t cmd_bus;
+ struct pci_dev *pdev;
+ int msi_enabled;
+ int msix_enabled;
+ struct msix_entry *msix_vectors;
+#ifdef CONFIG_MYRI10GE_DCA
+ int dca_enabled;
+ int relaxed_order;
+#endif
+ u32 link_state;
+ unsigned int rdma_tags_available;
+ int intr_coal_delay;
+ __be32 __iomem *intr_coal_delay_ptr;
+ int mtrr;
+ int wc_enabled;
+ int down_cnt;
+ wait_queue_head_t down_wq;
+ struct work_struct watchdog_work;
+ struct timer_list watchdog_timer;
+ int watchdog_resets;
+ int watchdog_pause;
+ int pause;
+ bool fw_name_allocated;
+ char *fw_name;
+ char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
+ char *product_code_string;
+ char fw_version[128];
+ int fw_ver_major;
+ int fw_ver_minor;
+ int fw_ver_tiny;
+ int adopted_rx_filter_bug;
+ u8 mac_addr[6]; /* eeprom mac address */
+ unsigned long serial_number;
+ int vendor_specific_offset;
+ int fw_multicast_support;
+ u32 features;
+ u32 max_tso6;
+ u32 read_dma;
+ u32 write_dma;
+ u32 read_write_dma;
+ u32 link_changes;
+ u32 msg_enable;
+ unsigned int board_number;
+ int rebooted;
+};
+
+static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
+static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat";
+static char *myri10ge_fw_rss_unaligned = "myri10ge_rss_ethp_z8e.dat";
+static char *myri10ge_fw_rss_aligned = "myri10ge_rss_eth_z8e.dat";
+MODULE_FIRMWARE("myri10ge_ethp_z8e.dat");
+MODULE_FIRMWARE("myri10ge_eth_z8e.dat");
+MODULE_FIRMWARE("myri10ge_rss_ethp_z8e.dat");
+MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat");
+
+/* Careful: must be accessed under kparam_block_sysfs_write */
+static char *myri10ge_fw_name = NULL;
+module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
+
+#define MYRI10GE_MAX_BOARDS 8
+static char *myri10ge_fw_names[MYRI10GE_MAX_BOARDS] =
+ {[0 ... (MYRI10GE_MAX_BOARDS - 1)] = NULL };
+module_param_array_named(myri10ge_fw_names, myri10ge_fw_names, charp, NULL,
+ 0444);
+MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image names per board");
+
+static int myri10ge_ecrc_enable = 1;
+module_param(myri10ge_ecrc_enable, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E");
+
+static int myri10ge_small_bytes = -1; /* -1 == auto */
+module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets");
+
+static int myri10ge_msi = 1; /* enable msi by default */
+module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts");
+
+static int myri10ge_intr_coal_delay = 75;
+module_param(myri10ge_intr_coal_delay, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay");
+
+static int myri10ge_flow_control = 1;
+module_param(myri10ge_flow_control, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter");
+
+static int myri10ge_deassert_wait = 1;
+module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(myri10ge_deassert_wait,
+ "Wait when deasserting legacy interrupts");
+
+static int myri10ge_force_firmware = 0;
+module_param(myri10ge_force_firmware, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_force_firmware,
+ "Force firmware to assume aligned completions");
+
+static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
+module_param(myri10ge_initial_mtu, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU");
+
+static int myri10ge_napi_weight = 64;
+module_param(myri10ge_napi_weight, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight");
+
+static int myri10ge_watchdog_timeout = 1;
+module_param(myri10ge_watchdog_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout");
+
+static int myri10ge_max_irq_loops = 1048576;
+module_param(myri10ge_max_irq_loops, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_max_irq_loops,
+ "Set stuck legacy IRQ detection threshold");
+
+#define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK
+
+static int myri10ge_debug = -1; /* defaults above */
+module_param(myri10ge_debug, int, 0);
+MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
+
+static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS;
+module_param(myri10ge_lro_max_pkts, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_lro_max_pkts,
+ "Number of LRO packets to be aggregated");
+
+static int myri10ge_fill_thresh = 256;
+module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed");
+
+static int myri10ge_reset_recover = 1;
+
+static int myri10ge_max_slices = 1;
+module_param(myri10ge_max_slices, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues");
+
+static int myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT;
+module_param(myri10ge_rss_hash, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_rss_hash, "Type of RSS hashing to do");
+
+static int myri10ge_dca = 1;
+module_param(myri10ge_dca, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_dca, "Enable DCA if possible");
+
+#define MYRI10GE_FW_OFFSET 1024*1024
+#define MYRI10GE_HIGHPART_TO_U32(X) \
+(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
+#define MYRI10GE_LOWPART_TO_U32(X) ((u32)(X))
+
+#define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8)
+
+static void myri10ge_set_multicast_list(struct net_device *dev);
+static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
+ struct net_device *dev);
+
+static inline void put_be32(__be32 val, __be32 __iomem * p)
+{
+ __raw_writel((__force __u32) val, (__force void __iomem *)p);
+}
+
+static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
+
+static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated)
+{
+ if (mgp->fw_name_allocated)
+ kfree(mgp->fw_name);
+ mgp->fw_name = name;
+ mgp->fw_name_allocated = allocated;
+}
+
+static int
+myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
+ struct myri10ge_cmd *data, int atomic)
+{
+ struct mcp_cmd *buf;
+ char buf_bytes[sizeof(*buf) + 8];
+ struct mcp_cmd_response *response = mgp->cmd;
+ char __iomem *cmd_addr = mgp->sram + MXGEFW_ETH_CMD;
+ u32 dma_low, dma_high, result, value;
+ int sleep_total = 0;
+
+ /* ensure buf is aligned to 8 bytes */
+ buf = (struct mcp_cmd *)ALIGN((unsigned long)buf_bytes, 8);
+
+ buf->data0 = htonl(data->data0);
+ buf->data1 = htonl(data->data1);
+ buf->data2 = htonl(data->data2);
+ buf->cmd = htonl(cmd);
+ dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
+ dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
+
+ buf->response_addr.low = htonl(dma_low);
+ buf->response_addr.high = htonl(dma_high);
+ response->result = htonl(MYRI10GE_NO_RESPONSE_RESULT);
+ mb();
+ myri10ge_pio_copy(cmd_addr, buf, sizeof(*buf));
+
+ /* wait up to 15ms. Longest command is the DMA benchmark,
+ * which is capped at 5ms, but runs from a timeout handler
+ * that runs every 7.8ms. So a 15ms timeout leaves us with
+ * a 2.2ms margin
+ */
+ if (atomic) {
+ /* if atomic is set, do not sleep,
+ * and try to get the completion quickly
+ * (1ms will be enough for those commands) */
+ for (sleep_total = 0;
+ sleep_total < 1000 &&
+ response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
+ sleep_total += 10) {
+ udelay(10);
+ mb();
+ }
+ } else {
+ /* use msleep for most command */
+ for (sleep_total = 0;
+ sleep_total < 15 &&
+ response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
+ sleep_total++)
+ msleep(1);
+ }
+
+ result = ntohl(response->result);
+ value = ntohl(response->data);
+ if (result != MYRI10GE_NO_RESPONSE_RESULT) {
+ if (result == 0) {
+ data->data0 = value;
+ return 0;
+ } else if (result == MXGEFW_CMD_UNKNOWN) {
+ return -ENOSYS;
+ } else if (result == MXGEFW_CMD_ERROR_UNALIGNED) {
+ return -E2BIG;
+ } else if (result == MXGEFW_CMD_ERROR_RANGE &&
+ cmd == MXGEFW_CMD_ENABLE_RSS_QUEUES &&
+ (data->
+ data1 & MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES) !=
+ 0) {
+ return -ERANGE;
+ } else {
+ dev_err(&mgp->pdev->dev,
+ "command %d failed, result = %d\n",
+ cmd, result);
+ return -ENXIO;
+ }
+ }
+
+ dev_err(&mgp->pdev->dev, "command %d timed out, result = %d\n",
+ cmd, result);
+ return -EAGAIN;
+}
+
+/*
+ * The eeprom strings on the lanaiX have the format
+ * SN=x\0
+ * MAC=x:x:x:x:x:x\0
+ * PT:ddd mmm xx xx:xx:xx xx\0
+ * PV:ddd mmm xx xx:xx:xx xx\0
+ */
+static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp)
+{
+ char *ptr, *limit;
+ int i;
+
+ ptr = mgp->eeprom_strings;
+ limit = mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE;
+
+ while (*ptr != '\0' && ptr < limit) {
+ if (memcmp(ptr, "MAC=", 4) == 0) {
+ ptr += 4;
+ mgp->mac_addr_string = ptr;
+ for (i = 0; i < 6; i++) {
+ if ((ptr + 2) > limit)
+ goto abort;
+ mgp->mac_addr[i] =
+ simple_strtoul(ptr, &ptr, 16);
+ ptr += 1;
+ }
+ }
+ if (memcmp(ptr, "PC=", 3) == 0) {
+ ptr += 3;
+ mgp->product_code_string = ptr;
+ }
+ if (memcmp((const void *)ptr, "SN=", 3) == 0) {
+ ptr += 3;
+ mgp->serial_number = simple_strtoul(ptr, &ptr, 10);
+ }
+ while (ptr < limit && *ptr++) ;
+ }
+
+ return 0;
+
+abort:
+ dev_err(&mgp->pdev->dev, "failed to parse eeprom_strings\n");
+ return -ENXIO;
+}
+
+/*
+ * Enable or disable periodic RDMAs from the host to make certain
+ * chipsets resend dropped PCIe messages
+ */
+
+static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
+{
+ char __iomem *submit;
+ __be32 buf[16] __attribute__ ((__aligned__(8)));
+ u32 dma_low, dma_high;
+ int i;
+
+ /* clear confirmation addr */
+ mgp->cmd->data = 0;
+ mb();
+
+ /* send a rdma command to the PCIe engine, and wait for the
+ * response in the confirmation address. The firmware should
+ * write a -1 there to indicate it is alive and well
+ */
+ dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
+ dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
+
+ buf[0] = htonl(dma_high); /* confirm addr MSW */
+ buf[1] = htonl(dma_low); /* confirm addr LSW */
+ buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */
+ buf[3] = htonl(dma_high); /* dummy addr MSW */
+ buf[4] = htonl(dma_low); /* dummy addr LSW */
+ buf[5] = htonl(enable); /* enable? */
+
+ submit = mgp->sram + MXGEFW_BOOT_DUMMY_RDMA;
+
+ myri10ge_pio_copy(submit, &buf, sizeof(buf));
+ for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++)
+ msleep(1);
+ if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA)
+ dev_err(&mgp->pdev->dev, "dummy rdma %s failed\n",
+ (enable ? "enable" : "disable"));
+}
+
+static int
+myri10ge_validate_firmware(struct myri10ge_priv *mgp,
+ struct mcp_gen_header *hdr)
+{
+ struct device *dev = &mgp->pdev->dev;
+
+ /* check firmware type */
+ if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) {
+ dev_err(dev, "Bad firmware type: 0x%x\n", ntohl(hdr->mcp_type));
+ return -EINVAL;
+ }
+
+ /* save firmware version for ethtool */
+ strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version));
+
+ sscanf(mgp->fw_version, "%d.%d.%d", &mgp->fw_ver_major,
+ &mgp->fw_ver_minor, &mgp->fw_ver_tiny);
+
+ if (!(mgp->fw_ver_major == MXGEFW_VERSION_MAJOR &&
+ mgp->fw_ver_minor == MXGEFW_VERSION_MINOR)) {
+ dev_err(dev, "Found firmware version %s\n", mgp->fw_version);
+ dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR,
+ MXGEFW_VERSION_MINOR);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
+{
+ unsigned crc, reread_crc;
+ const struct firmware *fw;
+ struct device *dev = &mgp->pdev->dev;
+ unsigned char *fw_readback;
+ struct mcp_gen_header *hdr;
+ size_t hdr_offset;
+ int status;
+ unsigned i;
+
+ if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) {
+ dev_err(dev, "Unable to load %s firmware image via hotplug\n",
+ mgp->fw_name);
+ status = -EINVAL;
+ goto abort_with_nothing;
+ }
+
+ /* check size */
+
+ if (fw->size >= mgp->sram_size - MYRI10GE_FW_OFFSET ||
+ fw->size < MCP_HEADER_PTR_OFFSET + 4) {
+ dev_err(dev, "Firmware size invalid:%d\n", (int)fw->size);
+ status = -EINVAL;
+ goto abort_with_fw;
+ }
+
+ /* check id */
+ hdr_offset = ntohl(*(__be32 *) (fw->data + MCP_HEADER_PTR_OFFSET));
+ if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw->size) {
+ dev_err(dev, "Bad firmware file\n");
+ status = -EINVAL;
+ goto abort_with_fw;
+ }
+ hdr = (void *)(fw->data + hdr_offset);
+
+ status = myri10ge_validate_firmware(mgp, hdr);
+ if (status != 0)
+ goto abort_with_fw;
+
+ crc = crc32(~0, fw->data, fw->size);
+ for (i = 0; i < fw->size; i += 256) {
+ myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i,
+ fw->data + i,
+ min(256U, (unsigned)(fw->size - i)));
+ mb();
+ readb(mgp->sram);
+ }
+ fw_readback = vmalloc(fw->size);
+ if (!fw_readback) {
+ status = -ENOMEM;
+ goto abort_with_fw;
+ }
+ /* corruption checking is good for parity recovery and buggy chipset */
+ memcpy_fromio(fw_readback, mgp->sram + MYRI10GE_FW_OFFSET, fw->size);
+ reread_crc = crc32(~0, fw_readback, fw->size);
+ vfree(fw_readback);
+ if (crc != reread_crc) {
+ dev_err(dev, "CRC failed(fw-len=%u), got 0x%x (expect 0x%x)\n",
+ (unsigned)fw->size, reread_crc, crc);
+ status = -EIO;
+ goto abort_with_fw;
+ }
+ *size = (u32) fw->size;
+
+abort_with_fw:
+ release_firmware(fw);
+
+abort_with_nothing:
+ return status;
+}
+
+static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
+{
+ struct mcp_gen_header *hdr;
+ struct device *dev = &mgp->pdev->dev;
+ const size_t bytes = sizeof(struct mcp_gen_header);
+ size_t hdr_offset;
+ int status;
+
+ /* find running firmware header */
+ hdr_offset = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET));
+
+ if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > mgp->sram_size) {
+ dev_err(dev, "Running firmware has bad header offset (%d)\n",
+ (int)hdr_offset);
+ return -EIO;
+ }
+
+ /* copy header of running firmware from SRAM to host memory to
+ * validate firmware */
+ hdr = kmalloc(bytes, GFP_KERNEL);
+ if (hdr == NULL) {
+ dev_err(dev, "could not malloc firmware hdr\n");
+ return -ENOMEM;
+ }
+ memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes);
+ status = myri10ge_validate_firmware(mgp, hdr);
+ kfree(hdr);
+
+ /* check to see if adopted firmware has bug where adopting
+ * it will cause broadcasts to be filtered unless the NIC
+ * is kept in ALLMULTI mode */
+ if (mgp->fw_ver_major == 1 && mgp->fw_ver_minor == 4 &&
+ mgp->fw_ver_tiny >= 4 && mgp->fw_ver_tiny <= 11) {
+ mgp->adopted_rx_filter_bug = 1;
+ dev_warn(dev, "Adopting fw %d.%d.%d: "
+ "working around rx filter bug\n",
+ mgp->fw_ver_major, mgp->fw_ver_minor,
+ mgp->fw_ver_tiny);
+ }
+ return status;
+}
+
+static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
+{
+ struct myri10ge_cmd cmd;
+ int status;
+
+ /* probe for IPv6 TSO support */
+ mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
+ &cmd, 0);
+ if (status == 0) {
+ mgp->max_tso6 = cmd.data0;
+ mgp->features |= NETIF_F_TSO6;
+ }
+
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
+ if (status != 0) {
+ dev_err(&mgp->pdev->dev,
+ "failed MXGEFW_CMD_GET_RX_RING_SIZE\n");
+ return -ENXIO;
+ }
+
+ mgp->max_intr_slots = 2 * (cmd.data0 / sizeof(struct mcp_dma_addr));
+
+ return 0;
+}
+
+static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt)
+{
+ char __iomem *submit;
+ __be32 buf[16] __attribute__ ((__aligned__(8)));
+ u32 dma_low, dma_high, size;
+ int status, i;
+
+ size = 0;
+ status = myri10ge_load_hotplug_firmware(mgp, &size);
+ if (status) {
+ if (!adopt)
+ return status;
+ dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n");
+
+ /* Do not attempt to adopt firmware if there
+ * was a bad crc */
+ if (status == -EIO)
+ return status;
+
+ status = myri10ge_adopt_running_firmware(mgp);
+ if (status != 0) {
+ dev_err(&mgp->pdev->dev,
+ "failed to adopt running firmware\n");
+ return status;
+ }
+ dev_info(&mgp->pdev->dev,
+ "Successfully adopted running firmware\n");
+ if (mgp->tx_boundary == 4096) {
+ dev_warn(&mgp->pdev->dev,
+ "Using firmware currently running on NIC"
+ ". For optimal\n");
+ dev_warn(&mgp->pdev->dev,
+ "performance consider loading optimized "
+ "firmware\n");
+ dev_warn(&mgp->pdev->dev, "via hotplug\n");
+ }
+
+ set_fw_name(mgp, "adopted", false);
+ mgp->tx_boundary = 2048;
+ myri10ge_dummy_rdma(mgp, 1);
+ status = myri10ge_get_firmware_capabilities(mgp);
+ return status;
+ }
+
+ /* clear confirmation addr */
+ mgp->cmd->data = 0;
+ mb();
+
+ /* send a reload command to the bootstrap MCP, and wait for the
+ * response in the confirmation address. The firmware should
+ * write a -1 there to indicate it is alive and well
+ */
+ dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
+ dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
+
+ buf[0] = htonl(dma_high); /* confirm addr MSW */
+ buf[1] = htonl(dma_low); /* confirm addr LSW */
+ buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */
+
+ /* FIX: All newest firmware should un-protect the bottom of
+ * the sram before handoff. However, the very first interfaces
+ * do not. Therefore the handoff copy must skip the first 8 bytes
+ */
+ buf[3] = htonl(MYRI10GE_FW_OFFSET + 8); /* where the code starts */
+ buf[4] = htonl(size - 8); /* length of code */
+ buf[5] = htonl(8); /* where to copy to */
+ buf[6] = htonl(0); /* where to jump to */
+
+ submit = mgp->sram + MXGEFW_BOOT_HANDOFF;
+
+ myri10ge_pio_copy(submit, &buf, sizeof(buf));
+ mb();
+ msleep(1);
+ mb();
+ i = 0;
+ while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 9) {
+ msleep(1 << i);
+ i++;
+ }
+ if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) {
+ dev_err(&mgp->pdev->dev, "handoff failed\n");
+ return -ENXIO;
+ }
+ myri10ge_dummy_rdma(mgp, 1);
+ status = myri10ge_get_firmware_capabilities(mgp);
+
+ return status;
+}
+
+static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr)
+{
+ struct myri10ge_cmd cmd;
+ int status;
+
+ cmd.data0 = ((addr[0] << 24) | (addr[1] << 16)
+ | (addr[2] << 8) | addr[3]);
+
+ cmd.data1 = ((addr[4] << 8) | (addr[5]));
+
+ status = myri10ge_send_cmd(mgp, MXGEFW_SET_MAC_ADDRESS, &cmd, 0);
+ return status;
+}
+
+static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause)
+{
+ struct myri10ge_cmd cmd;
+ int status, ctl;
+
+ ctl = pause ? MXGEFW_ENABLE_FLOW_CONTROL : MXGEFW_DISABLE_FLOW_CONTROL;
+ status = myri10ge_send_cmd(mgp, ctl, &cmd, 0);
+
+ if (status) {
+ netdev_err(mgp->dev, "Failed to set flow control mode\n");
+ return status;
+ }
+ mgp->pause = pause;
+ return 0;
+}
+
+static void
+myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic)
+{
+ struct myri10ge_cmd cmd;
+ int status, ctl;
+
+ ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC;
+ status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic);
+ if (status)
+ netdev_err(mgp->dev, "Failed to set promisc mode\n");
+}
+
+static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
+{
+ struct myri10ge_cmd cmd;
+ int status;
+ u32 len;
+ struct page *dmatest_page;
+ dma_addr_t dmatest_bus;
+ char *test = " ";
+
+ dmatest_page = alloc_page(GFP_KERNEL);
+ if (!dmatest_page)
+ return -ENOMEM;
+ dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ /* Run a small DMA test.
+ * The magic multipliers to the length tell the firmware
+ * to do DMA read, write, or read+write tests. The
+ * results are returned in cmd.data0. The upper 16
+ * bits or the return is the number of transfers completed.
+ * The lower 16 bits is the time in 0.5us ticks that the
+ * transfers took to complete.
+ */
+
+ len = mgp->tx_boundary;
+
+ cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
+ cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
+ cmd.data2 = len * 0x10000;
+ status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
+ if (status != 0) {
+ test = "read";
+ goto abort;
+ }
+ mgp->read_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff);
+ cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
+ cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
+ cmd.data2 = len * 0x1;
+ status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
+ if (status != 0) {
+ test = "write";
+ goto abort;
+ }
+ mgp->write_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff);
+
+ cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
+ cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
+ cmd.data2 = len * 0x10001;
+ status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
+ if (status != 0) {
+ test = "read/write";
+ goto abort;
+ }
+ mgp->read_write_dma = ((cmd.data0 >> 16) * len * 2 * 2) /
+ (cmd.data0 & 0xffff);
+
+abort:
+ pci_unmap_page(mgp->pdev, dmatest_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ put_page(dmatest_page);
+
+ if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST)
+ dev_warn(&mgp->pdev->dev, "DMA %s benchmark failed: %d\n",
+ test, status);
+
+ return status;
+}
+
+static int myri10ge_reset(struct myri10ge_priv *mgp)
+{
+ struct myri10ge_cmd cmd;
+ struct myri10ge_slice_state *ss;
+ int i, status;
+ size_t bytes;
+#ifdef CONFIG_MYRI10GE_DCA
+ unsigned long dca_tag_off;
+#endif
+
+ /* try to send a reset command to the card to see if it
+ * is alive */
+ memset(&cmd, 0, sizeof(cmd));
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
+ if (status != 0) {
+ dev_err(&mgp->pdev->dev, "failed reset\n");
+ return -ENXIO;
+ }
+
+ (void)myri10ge_dma_test(mgp, MXGEFW_DMA_TEST);
+ /*
+ * Use non-ndis mcp_slot (eg, 4 bytes total,
+ * no toeplitz hash value returned. Older firmware will
+ * not understand this command, but will use the correct
+ * sized mcp_slot, so we ignore error returns
+ */
+ cmd.data0 = MXGEFW_RSS_MCP_SLOT_TYPE_MIN;
+ (void)myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, &cmd, 0);
+
+ /* Now exchange information about interrupts */
+
+ bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry);
+ cmd.data0 = (u32) bytes;
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
+
+ /*
+ * Even though we already know how many slices are supported
+ * via myri10ge_probe_slices() MXGEFW_CMD_GET_MAX_RSS_QUEUES
+ * has magic side effects, and must be called after a reset.
+ * It must be called prior to calling any RSS related cmds,
+ * including assigning an interrupt queue for anything but
+ * slice 0. It must also be called *after*
+ * MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by
+ * the firmware to compute offsets.
+ */
+
+ if (mgp->num_slices > 1) {
+
+ /* ask the maximum number of slices it supports */
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES,
+ &cmd, 0);
+ if (status != 0) {
+ dev_err(&mgp->pdev->dev,
+ "failed to get number of slices\n");
+ }
+
+ /*
+ * MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior
+ * to setting up the interrupt queue DMA
+ */
+
+ cmd.data0 = mgp->num_slices;
+ cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
+ if (mgp->dev->real_num_tx_queues > 1)
+ cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
+ &cmd, 0);
+
+ /* Firmware older than 1.4.32 only supports multiple
+ * RX queues, so if we get an error, first retry using a
+ * single TX queue before giving up */
+ if (status != 0 && mgp->dev->real_num_tx_queues > 1) {
+ netif_set_real_num_tx_queues(mgp->dev, 1);
+ cmd.data0 = mgp->num_slices;
+ cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
+ status = myri10ge_send_cmd(mgp,
+ MXGEFW_CMD_ENABLE_RSS_QUEUES,
+ &cmd, 0);
+ }
+
+ if (status != 0) {
+ dev_err(&mgp->pdev->dev,
+ "failed to set number of slices\n");
+
+ return status;
+ }
+ }
+ for (i = 0; i < mgp->num_slices; i++) {
+ ss = &mgp->ss[i];
+ cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus);
+ cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus);
+ cmd.data2 = i;
+ status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA,
+ &cmd, 0);
+ }
+
+ status |=
+ myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
+ for (i = 0; i < mgp->num_slices; i++) {
+ ss = &mgp->ss[i];
+ ss->irq_claim =
+ (__iomem __be32 *) (mgp->sram + cmd.data0 + 8 * i);
+ }
+ status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
+ &cmd, 0);
+ mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0);
+
+ status |= myri10ge_send_cmd
+ (mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0);
+ mgp->intr_coal_delay_ptr = (__iomem __be32 *) (mgp->sram + cmd.data0);
+ if (status != 0) {
+ dev_err(&mgp->pdev->dev, "failed set interrupt parameters\n");
+ return status;
+ }
+ put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
+
+#ifdef CONFIG_MYRI10GE_DCA
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0);
+ dca_tag_off = cmd.data0;
+ for (i = 0; i < mgp->num_slices; i++) {
+ ss = &mgp->ss[i];
+ if (status == 0) {
+ ss->dca_tag = (__iomem __be32 *)
+ (mgp->sram + dca_tag_off + 4 * i);
+ } else {
+ ss->dca_tag = NULL;
+ }
+ }
+#endif /* CONFIG_MYRI10GE_DCA */
+
+ /* reset mcp/driver shared state back to 0 */
+
+ mgp->link_changes = 0;
+ for (i = 0; i < mgp->num_slices; i++) {
+ ss = &mgp->ss[i];
+
+ memset(ss->rx_done.entry, 0, bytes);
+ ss->tx.req = 0;
+ ss->tx.done = 0;
+ ss->tx.pkt_start = 0;
+ ss->tx.pkt_done = 0;
+ ss->rx_big.cnt = 0;
+ ss->rx_small.cnt = 0;
+ ss->rx_done.idx = 0;
+ ss->rx_done.cnt = 0;
+ ss->tx.wake_queue = 0;
+ ss->tx.stop_queue = 0;
+ }
+
+ status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
+ myri10ge_change_pause(mgp, mgp->pause);
+ myri10ge_set_multicast_list(mgp->dev);
+ return status;
+}
+
+#ifdef CONFIG_MYRI10GE_DCA
+static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on)
+{
+ int ret, cap, err;
+ u16 ctl;
+
+ cap = pci_pcie_cap(pdev);
+ if (!cap)
+ return 0;
+
+ err = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
+ if (err)
+ return 0;
+
+ ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4;
+ if (ret != on) {
+ ctl &= ~PCI_EXP_DEVCTL_RELAX_EN;
+ ctl |= (on << 4);
+ pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
+ }
+ return ret;
+}
+
+static void
+myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
+{
+ ss->cached_dca_tag = tag;
+ put_be32(htonl(tag), ss->dca_tag);
+}
+
+static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss)
+{
+ int cpu = get_cpu();
+ int tag;
+
+ if (cpu != ss->cpu) {
+ tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu);
+ if (ss->cached_dca_tag != tag)
+ myri10ge_write_dca(ss, cpu, tag);
+ ss->cpu = cpu;
+ }
+ put_cpu();
+}
+
+static void myri10ge_setup_dca(struct myri10ge_priv *mgp)
+{
+ int err, i;
+ struct pci_dev *pdev = mgp->pdev;
+
+ if (mgp->ss[0].dca_tag == NULL || mgp->dca_enabled)
+ return;
+ if (!myri10ge_dca) {
+ dev_err(&pdev->dev, "dca disabled by administrator\n");
+ return;
+ }
+ err = dca_add_requester(&pdev->dev);
+ if (err) {
+ if (err != -ENODEV)
+ dev_err(&pdev->dev,
+ "dca_add_requester() failed, err=%d\n", err);
+ return;
+ }
+ mgp->relaxed_order = myri10ge_toggle_relaxed(pdev, 0);
+ mgp->dca_enabled = 1;
+ for (i = 0; i < mgp->num_slices; i++) {
+ mgp->ss[i].cpu = -1;
+ mgp->ss[i].cached_dca_tag = -1;
+ myri10ge_update_dca(&mgp->ss[i]);
+ }
+}
+
+static void myri10ge_teardown_dca(struct myri10ge_priv *mgp)
+{
+ struct pci_dev *pdev = mgp->pdev;
+
+ if (!mgp->dca_enabled)
+ return;
+ mgp->dca_enabled = 0;
+ if (mgp->relaxed_order)
+ myri10ge_toggle_relaxed(pdev, 1);
+ dca_remove_requester(&pdev->dev);
+}
+
+static int myri10ge_notify_dca_device(struct device *dev, void *data)
+{
+ struct myri10ge_priv *mgp;
+ unsigned long event;
+
+ mgp = dev_get_drvdata(dev);
+ event = *(unsigned long *)data;
+
+ if (event == DCA_PROVIDER_ADD)
+ myri10ge_setup_dca(mgp);
+ else if (event == DCA_PROVIDER_REMOVE)
+ myri10ge_teardown_dca(mgp);
+ return 0;
+}
+#endif /* CONFIG_MYRI10GE_DCA */
+
+static inline void
+myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
+ struct mcp_kreq_ether_recv *src)
+{
+ __be32 low;
+
+ low = src->addr_low;
+ src->addr_low = htonl(DMA_BIT_MASK(32));
+ myri10ge_pio_copy(dst, src, 4 * sizeof(*src));
+ mb();
+ myri10ge_pio_copy(dst + 4, src + 4, 4 * sizeof(*src));
+ mb();
+ src->addr_low = low;
+ put_be32(low, &dst->addr_low);
+ mb();
+}
+
+static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum)
+{
+ struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
+
+ if ((skb->protocol == htons(ETH_P_8021Q)) &&
+ (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
+ vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
+ skb->csum = hw_csum;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+}
+
+static inline void
+myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va,
+ struct skb_frag_struct *rx_frags, int len, int hlen)
+{
+ struct skb_frag_struct *skb_frags;
+
+ skb->len = skb->data_len = len;
+ skb->truesize = len + sizeof(struct sk_buff);
+ /* attach the page(s) */
+
+ skb_frags = skb_shinfo(skb)->frags;
+ while (len > 0) {
+ memcpy(skb_frags, rx_frags, sizeof(*skb_frags));
+ len -= rx_frags->size;
+ skb_frags++;
+ rx_frags++;
+ skb_shinfo(skb)->nr_frags++;
+ }
+
+ /* pskb_may_pull is not available in irq context, but
+ * skb_pull() (for ether_pad and eth_type_trans()) requires
+ * the beginning of the packet in skb_headlen(), move it
+ * manually */
+ skb_copy_to_linear_data(skb, va, hlen);
+ skb_shinfo(skb)->frags[0].page_offset += hlen;
+ skb_shinfo(skb)->frags[0].size -= hlen;
+ skb->data_len -= hlen;
+ skb->tail += hlen;
+ skb_pull(skb, MXGEFW_PAD);
+}
+
+static void
+myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
+ int bytes, int watchdog)
+{
+ struct page *page;
+ int idx;
+#if MYRI10GE_ALLOC_SIZE > 4096
+ int end_offset;
+#endif
+
+ if (unlikely(rx->watchdog_needed && !watchdog))
+ return;
+
+ /* try to refill entire ring */
+ while (rx->fill_cnt != (rx->cnt + rx->mask + 1)) {
+ idx = rx->fill_cnt & rx->mask;
+ if (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE) {
+ /* we can use part of previous page */
+ get_page(rx->page);
+ } else {
+ /* we need a new page */
+ page =
+ alloc_pages(GFP_ATOMIC | __GFP_COMP,
+ MYRI10GE_ALLOC_ORDER);
+ if (unlikely(page == NULL)) {
+ if (rx->fill_cnt - rx->cnt < 16)
+ rx->watchdog_needed = 1;
+ return;
+ }
+ rx->page = page;
+ rx->page_offset = 0;
+ rx->bus = pci_map_page(mgp->pdev, page, 0,
+ MYRI10GE_ALLOC_SIZE,
+ PCI_DMA_FROMDEVICE);
+ }
+ rx->info[idx].page = rx->page;
+ rx->info[idx].page_offset = rx->page_offset;
+ /* note that this is the address of the start of the
+ * page */
+ dma_unmap_addr_set(&rx->info[idx], bus, rx->bus);
+ rx->shadow[idx].addr_low =
+ htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset);
+ rx->shadow[idx].addr_high =
+ htonl(MYRI10GE_HIGHPART_TO_U32(rx->bus));
+
+ /* start next packet on a cacheline boundary */
+ rx->page_offset += SKB_DATA_ALIGN(bytes);
+
+#if MYRI10GE_ALLOC_SIZE > 4096
+ /* don't cross a 4KB boundary */
+ end_offset = rx->page_offset + bytes - 1;
+ if ((unsigned)(rx->page_offset ^ end_offset) > 4095)
+ rx->page_offset = end_offset & ~4095;
+#endif
+ rx->fill_cnt++;
+
+ /* copy 8 descriptors to the firmware at a time */
+ if ((idx & 7) == 7) {
+ myri10ge_submit_8rx(&rx->lanai[idx - 7],
+ &rx->shadow[idx - 7]);
+ }
+ }
+}
+
+static inline void
+myri10ge_unmap_rx_page(struct pci_dev *pdev,
+ struct myri10ge_rx_buffer_state *info, int bytes)
+{
+ /* unmap the recvd page if we're the only or last user of it */
+ if (bytes >= MYRI10GE_ALLOC_SIZE / 2 ||
+ (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) {
+ pci_unmap_page(pdev, (dma_unmap_addr(info, bus)
+ & ~(MYRI10GE_ALLOC_SIZE - 1)),
+ MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+ }
+}
+
+#define MYRI10GE_HLEN 64 /* The number of bytes to copy from a
+ * page into an skb */
+
+static inline int
+myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
+ bool lro_enabled)
+{
+ struct myri10ge_priv *mgp = ss->mgp;
+ struct sk_buff *skb;
+ struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME];
+ struct myri10ge_rx_buf *rx;
+ int i, idx, hlen, remainder, bytes;
+ struct pci_dev *pdev = mgp->pdev;
+ struct net_device *dev = mgp->dev;
+ u8 *va;
+
+ if (len <= mgp->small_bytes) {
+ rx = &ss->rx_small;
+ bytes = mgp->small_bytes;
+ } else {
+ rx = &ss->rx_big;
+ bytes = mgp->big_bytes;
+ }
+
+ len += MXGEFW_PAD;
+ idx = rx->cnt & rx->mask;
+ va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
+ prefetch(va);
+ /* Fill skb_frag_struct(s) with data from our receive */
+ for (i = 0, remainder = len; remainder > 0; i++) {
+ myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
+ __skb_frag_set_page(&rx_frags[i], rx->info[idx].page);
+ rx_frags[i].page_offset = rx->info[idx].page_offset;
+ if (remainder < MYRI10GE_ALLOC_SIZE)
+ rx_frags[i].size = remainder;
+ else
+ rx_frags[i].size = MYRI10GE_ALLOC_SIZE;
+ rx->cnt++;
+ idx = rx->cnt & rx->mask;
+ remainder -= MYRI10GE_ALLOC_SIZE;
+ }
+
+ if (lro_enabled) {
+ rx_frags[0].page_offset += MXGEFW_PAD;
+ rx_frags[0].size -= MXGEFW_PAD;
+ len -= MXGEFW_PAD;
+ lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags,
+ /* opaque, will come back in get_frag_header */
+ len, len,
+ (void *)(__force unsigned long)csum, csum);
+
+ return 1;
+ }
+
+ hlen = MYRI10GE_HLEN > len ? len : MYRI10GE_HLEN;
+
+ /* allocate an skb to attach the page(s) to. This is done
+ * after trying LRO, so as to avoid skb allocation overheads */
+
+ skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
+ if (unlikely(skb == NULL)) {
+ ss->stats.rx_dropped++;
+ do {
+ i--;
+ __skb_frag_unref(&rx_frags[i]);
+ } while (i != 0);
+ return 0;
+ }
+
+ /* Attach the pages to the skb, and trim off any padding */
+ myri10ge_rx_skb_build(skb, va, rx_frags, len, hlen);
+ if (skb_shinfo(skb)->frags[0].size <= 0) {
+ skb_frag_unref(skb, 0);
+ skb_shinfo(skb)->nr_frags = 0;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ skb_record_rx_queue(skb, ss - &mgp->ss[0]);
+
+ if (dev->features & NETIF_F_RXCSUM) {
+ if ((skb->protocol == htons(ETH_P_IP)) ||
+ (skb->protocol == htons(ETH_P_IPV6))) {
+ skb->csum = csum;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ } else
+ myri10ge_vlan_ip_csum(skb, csum);
+ }
+ netif_receive_skb(skb);
+ return 1;
+}
+
+static inline void
+myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
+{
+ struct pci_dev *pdev = ss->mgp->pdev;
+ struct myri10ge_tx_buf *tx = &ss->tx;
+ struct netdev_queue *dev_queue;
+ struct sk_buff *skb;
+ int idx, len;
+
+ while (tx->pkt_done != mcp_index) {
+ idx = tx->done & tx->mask;
+ skb = tx->info[idx].skb;
+
+ /* Mark as free */
+ tx->info[idx].skb = NULL;
+ if (tx->info[idx].last) {
+ tx->pkt_done++;
+ tx->info[idx].last = 0;
+ }
+ tx->done++;
+ len = dma_unmap_len(&tx->info[idx], len);
+ dma_unmap_len_set(&tx->info[idx], len, 0);
+ if (skb) {
+ ss->stats.tx_bytes += skb->len;
+ ss->stats.tx_packets++;
+ dev_kfree_skb_irq(skb);
+ if (len)
+ pci_unmap_single(pdev,
+ dma_unmap_addr(&tx->info[idx],
+ bus), len,
+ PCI_DMA_TODEVICE);
+ } else {
+ if (len)
+ pci_unmap_page(pdev,
+ dma_unmap_addr(&tx->info[idx],
+ bus), len,
+ PCI_DMA_TODEVICE);
+ }
+ }
+
+ dev_queue = netdev_get_tx_queue(ss->dev, ss - ss->mgp->ss);
+ /*
+ * Make a minimal effort to prevent the NIC from polling an
+ * idle tx queue. If we can't get the lock we leave the queue
+ * active. In this case, either a thread was about to start
+ * using the queue anyway, or we lost a race and the NIC will
+ * waste some of its resources polling an inactive queue for a
+ * while.
+ */
+
+ if ((ss->mgp->dev->real_num_tx_queues > 1) &&
+ __netif_tx_trylock(dev_queue)) {
+ if (tx->req == tx->done) {
+ tx->queue_active = 0;
+ put_be32(htonl(1), tx->send_stop);
+ mb();
+ mmiowb();
+ }
+ __netif_tx_unlock(dev_queue);
+ }
+
+ /* start the queue if we've stopped it */
+ if (netif_tx_queue_stopped(dev_queue) &&
+ tx->req - tx->done < (tx->mask >> 1) &&
+ ss->mgp->running == MYRI10GE_ETH_RUNNING) {
+ tx->wake_queue++;
+ netif_tx_wake_queue(dev_queue);
+ }
+}
+
+static inline int
+myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
+{
+ struct myri10ge_rx_done *rx_done = &ss->rx_done;
+ struct myri10ge_priv *mgp = ss->mgp;
+ unsigned long rx_bytes = 0;
+ unsigned long rx_packets = 0;
+ unsigned long rx_ok;
+ int idx = rx_done->idx;
+ int cnt = rx_done->cnt;
+ int work_done = 0;
+ u16 length;
+ __wsum checksum;
+
+ /*
+ * Prevent compiler from generating more than one ->features memory
+ * access to avoid theoretical race condition with functions that
+ * change NETIF_F_LRO flag at runtime.
+ */
+ bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO;
+
+ while (rx_done->entry[idx].length != 0 && work_done < budget) {
+ length = ntohs(rx_done->entry[idx].length);
+ rx_done->entry[idx].length = 0;
+ checksum = csum_unfold(rx_done->entry[idx].checksum);
+ rx_ok = myri10ge_rx_done(ss, length, checksum, lro_enabled);
+ rx_packets += rx_ok;
+ rx_bytes += rx_ok * (unsigned long)length;
+ cnt++;
+ idx = cnt & (mgp->max_intr_slots - 1);
+ work_done++;
+ }
+ rx_done->idx = idx;
+ rx_done->cnt = cnt;
+ ss->stats.rx_packets += rx_packets;
+ ss->stats.rx_bytes += rx_bytes;
+
+ if (lro_enabled)
+ lro_flush_all(&rx_done->lro_mgr);
+
+ /* restock receive rings if needed */
+ if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh)
+ myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
+ mgp->small_bytes + MXGEFW_PAD, 0);
+ if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh)
+ myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
+
+ return work_done;
+}
+
+static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
+{
+ struct mcp_irq_data *stats = mgp->ss[0].fw_stats;
+
+ if (unlikely(stats->stats_updated)) {
+ unsigned link_up = ntohl(stats->link_up);
+ if (mgp->link_state != link_up) {
+ mgp->link_state = link_up;
+
+ if (mgp->link_state == MXGEFW_LINK_UP) {
+ netif_info(mgp, link, mgp->dev, "link up\n");
+ netif_carrier_on(mgp->dev);
+ mgp->link_changes++;
+ } else {
+ netif_info(mgp, link, mgp->dev, "link %s\n",
+ (link_up == MXGEFW_LINK_MYRINET ?
+ "mismatch (Myrinet detected)" :
+ "down"));
+ netif_carrier_off(mgp->dev);
+ mgp->link_changes++;
+ }
+ }
+ if (mgp->rdma_tags_available !=
+ ntohl(stats->rdma_tags_available)) {
+ mgp->rdma_tags_available =
+ ntohl(stats->rdma_tags_available);
+ netdev_warn(mgp->dev, "RDMA timed out! %d tags left\n",
+ mgp->rdma_tags_available);
+ }
+ mgp->down_cnt += stats->link_down;
+ if (stats->link_down)
+ wake_up(&mgp->down_wq);
+ }
+}
+
+static int myri10ge_poll(struct napi_struct *napi, int budget)
+{
+ struct myri10ge_slice_state *ss =
+ container_of(napi, struct myri10ge_slice_state, napi);
+ int work_done;
+
+#ifdef CONFIG_MYRI10GE_DCA
+ if (ss->mgp->dca_enabled)
+ myri10ge_update_dca(ss);
+#endif
+
+ /* process as many rx events as NAPI will allow */
+ work_done = myri10ge_clean_rx_done(ss, budget);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ put_be32(htonl(3), ss->irq_claim);
+ }
+ return work_done;
+}
+
+static irqreturn_t myri10ge_intr(int irq, void *arg)
+{
+ struct myri10ge_slice_state *ss = arg;
+ struct myri10ge_priv *mgp = ss->mgp;
+ struct mcp_irq_data *stats = ss->fw_stats;
+ struct myri10ge_tx_buf *tx = &ss->tx;
+ u32 send_done_count;
+ int i;
+
+ /* an interrupt on a non-zero receive-only slice is implicitly
+ * valid since MSI-X irqs are not shared */
+ if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) {
+ napi_schedule(&ss->napi);
+ return IRQ_HANDLED;
+ }
+
+ /* make sure it is our IRQ, and that the DMA has finished */
+ if (unlikely(!stats->valid))
+ return IRQ_NONE;
+
+ /* low bit indicates receives are present, so schedule
+ * napi poll handler */
+ if (stats->valid & 1)
+ napi_schedule(&ss->napi);
+
+ if (!mgp->msi_enabled && !mgp->msix_enabled) {
+ put_be32(0, mgp->irq_deassert);
+ if (!myri10ge_deassert_wait)
+ stats->valid = 0;
+ mb();
+ } else
+ stats->valid = 0;
+
+ /* Wait for IRQ line to go low, if using INTx */
+ i = 0;
+ while (1) {
+ i++;
+ /* check for transmit completes and receives */
+ send_done_count = ntohl(stats->send_done_count);
+ if (send_done_count != tx->pkt_done)
+ myri10ge_tx_done(ss, (int)send_done_count);
+ if (unlikely(i > myri10ge_max_irq_loops)) {
+ netdev_warn(mgp->dev, "irq stuck?\n");
+ stats->valid = 0;
+ schedule_work(&mgp->watchdog_work);
+ }
+ if (likely(stats->valid == 0))
+ break;
+ cpu_relax();
+ barrier();
+ }
+
+ /* Only slice 0 updates stats */
+ if (ss == mgp->ss)
+ myri10ge_check_statblock(mgp);
+
+ put_be32(htonl(3), ss->irq_claim + 1);
+ return IRQ_HANDLED;
+}
+
+static int
+myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ char *ptr;
+ int i;
+
+ cmd->autoneg = AUTONEG_DISABLE;
+ ethtool_cmd_speed_set(cmd, SPEED_10000);
+ cmd->duplex = DUPLEX_FULL;
+
+ /*
+ * parse the product code to deterimine the interface type
+ * (CX4, XFP, Quad Ribbon Fiber) by looking at the character
+ * after the 3rd dash in the driver's cached copy of the
+ * EEPROM's product code string.
+ */
+ ptr = mgp->product_code_string;
+ if (ptr == NULL) {
+ netdev_err(netdev, "Missing product code\n");
+ return 0;
+ }
+ for (i = 0; i < 3; i++, ptr++) {
+ ptr = strchr(ptr, '-');
+ if (ptr == NULL) {
+ netdev_err(netdev, "Invalid product code %s\n",
+ mgp->product_code_string);
+ return 0;
+ }
+ }
+ if (*ptr == '2')
+ ptr++;
+ if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') {
+ /* We've found either an XFP, quad ribbon fiber, or SFP+ */
+ cmd->port = PORT_FIBRE;
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_FIBRE;
+ } else {
+ cmd->port = PORT_OTHER;
+ }
+ if (*ptr == 'R' || *ptr == 'S')
+ cmd->transceiver = XCVR_EXTERNAL;
+ else
+ cmd->transceiver = XCVR_INTERNAL;
+
+ return 0;
+}
+
+static void
+myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+ strlcpy(info->driver, "myri10ge", sizeof(info->driver));
+ strlcpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version));
+ strlcpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version));
+ strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info));
+}
+
+static int
+myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+ coal->rx_coalesce_usecs = mgp->intr_coal_delay;
+ return 0;
+}
+
+static int
+myri10ge_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+ mgp->intr_coal_delay = coal->rx_coalesce_usecs;
+ put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
+ return 0;
+}
+
+static void
+myri10ge_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+ pause->autoneg = 0;
+ pause->rx_pause = mgp->pause;
+ pause->tx_pause = mgp->pause;
+}
+
+static int
+myri10ge_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+ if (pause->tx_pause != mgp->pause)
+ return myri10ge_change_pause(mgp, pause->tx_pause);
+ if (pause->rx_pause != mgp->pause)
+ return myri10ge_change_pause(mgp, pause->rx_pause);
+ if (pause->autoneg != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static void
+myri10ge_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+ ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1;
+ ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1;
+ ring->rx_jumbo_max_pending = 0;
+ ring->tx_max_pending = mgp->ss[0].tx.mask + 1;
+ ring->rx_mini_pending = ring->rx_mini_max_pending;
+ ring->rx_pending = ring->rx_max_pending;
+ ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
+ ring->tx_pending = ring->tx_max_pending;
+}
+
+static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
+ "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
+ "rx_length_errors", "rx_over_errors", "rx_crc_errors",
+ "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
+ "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
+ "tx_heartbeat_errors", "tx_window_errors",
+ /* device-specific stats */
+ "tx_boundary", "WC", "irq", "MSI", "MSIX",
+ "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
+ "serial_number", "watchdog_resets",
+#ifdef CONFIG_MYRI10GE_DCA
+ "dca_capable_firmware", "dca_device_present",
+#endif
+ "link_changes", "link_up", "dropped_link_overflow",
+ "dropped_link_error_or_filtered",
+ "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32",
+ "dropped_unicast_filtered", "dropped_multicast_filtered",
+ "dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
+ "dropped_no_big_buffer"
+};
+
+static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
+ "----------- slice ---------",
+ "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
+ "rx_small_cnt", "rx_big_cnt",
+ "wake_queue", "stop_queue", "tx_linearized",
+ "LRO aggregated", "LRO flushed", "LRO avg aggr", "LRO no_desc",
+};
+
+#define MYRI10GE_NET_STATS_LEN 21
+#define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats)
+#define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats)
+
+static void
+myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, *myri10ge_gstrings_main_stats,
+ sizeof(myri10ge_gstrings_main_stats));
+ data += sizeof(myri10ge_gstrings_main_stats);
+ for (i = 0; i < mgp->num_slices; i++) {
+ memcpy(data, *myri10ge_gstrings_slice_stats,
+ sizeof(myri10ge_gstrings_slice_stats));
+ data += sizeof(myri10ge_gstrings_slice_stats);
+ }
+ break;
+ }
+}
+
+static int myri10ge_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return MYRI10GE_MAIN_STATS_LEN +
+ mgp->num_slices * MYRI10GE_SLICE_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+myri10ge_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 * data)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ struct myri10ge_slice_state *ss;
+ struct rtnl_link_stats64 link_stats;
+ int slice;
+ int i;
+
+ /* force stats update */
+ memset(&link_stats, 0, sizeof(link_stats));
+ (void)myri10ge_get_stats(netdev, &link_stats);
+ for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
+ data[i] = ((u64 *)&link_stats)[i];
+
+ data[i++] = (unsigned int)mgp->tx_boundary;
+ data[i++] = (unsigned int)mgp->wc_enabled;
+ data[i++] = (unsigned int)mgp->pdev->irq;
+ data[i++] = (unsigned int)mgp->msi_enabled;
+ data[i++] = (unsigned int)mgp->msix_enabled;
+ data[i++] = (unsigned int)mgp->read_dma;
+ data[i++] = (unsigned int)mgp->write_dma;
+ data[i++] = (unsigned int)mgp->read_write_dma;
+ data[i++] = (unsigned int)mgp->serial_number;
+ data[i++] = (unsigned int)mgp->watchdog_resets;
+#ifdef CONFIG_MYRI10GE_DCA
+ data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL);
+ data[i++] = (unsigned int)(mgp->dca_enabled);
+#endif
+ data[i++] = (unsigned int)mgp->link_changes;
+
+ /* firmware stats are useful only in the first slice */
+ ss = &mgp->ss[0];
+ data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up);
+ data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow);
+ data[i++] =
+ (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered);
+ data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause);
+ data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy);
+ data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32);
+ data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered);
+ data[i++] =
+ (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered);
+ data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt);
+ data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun);
+ data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer);
+ data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer);
+
+ for (slice = 0; slice < mgp->num_slices; slice++) {
+ ss = &mgp->ss[slice];
+ data[i++] = slice;
+ data[i++] = (unsigned int)ss->tx.pkt_start;
+ data[i++] = (unsigned int)ss->tx.pkt_done;
+ data[i++] = (unsigned int)ss->tx.req;
+ data[i++] = (unsigned int)ss->tx.done;
+ data[i++] = (unsigned int)ss->rx_small.cnt;
+ data[i++] = (unsigned int)ss->rx_big.cnt;
+ data[i++] = (unsigned int)ss->tx.wake_queue;
+ data[i++] = (unsigned int)ss->tx.stop_queue;
+ data[i++] = (unsigned int)ss->tx.linearized;
+ data[i++] = ss->rx_done.lro_mgr.stats.aggregated;
+ data[i++] = ss->rx_done.lro_mgr.stats.flushed;
+ if (ss->rx_done.lro_mgr.stats.flushed)
+ data[i++] = ss->rx_done.lro_mgr.stats.aggregated /
+ ss->rx_done.lro_mgr.stats.flushed;
+ else
+ data[i++] = 0;
+ data[i++] = ss->rx_done.lro_mgr.stats.no_desc;
+ }
+}
+
+static void myri10ge_set_msglevel(struct net_device *netdev, u32 value)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ mgp->msg_enable = value;
+}
+
+static u32 myri10ge_get_msglevel(struct net_device *netdev)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ return mgp->msg_enable;
+}
+
+/*
+ * Use a low-level command to change the LED behavior. Rather than
+ * blinking (which is the normal case), when identify is used, the
+ * yellow LED turns solid.
+ */
+static int myri10ge_led(struct myri10ge_priv *mgp, int on)
+{
+ struct mcp_gen_header *hdr;
+ struct device *dev = &mgp->pdev->dev;
+ size_t hdr_off, pattern_off, hdr_len;
+ u32 pattern = 0xfffffffe;
+
+ /* find running firmware header */
+ hdr_off = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET));
+ if ((hdr_off & 3) || hdr_off + sizeof(*hdr) > mgp->sram_size) {
+ dev_err(dev, "Running firmware has bad header offset (%d)\n",
+ (int)hdr_off);
+ return -EIO;
+ }
+ hdr_len = swab32(readl(mgp->sram + hdr_off +
+ offsetof(struct mcp_gen_header, header_length)));
+ pattern_off = hdr_off + offsetof(struct mcp_gen_header, led_pattern);
+ if (pattern_off >= (hdr_len + hdr_off)) {
+ dev_info(dev, "Firmware does not support LED identification\n");
+ return -EINVAL;
+ }
+ if (!on)
+ pattern = swab32(readl(mgp->sram + pattern_off + 4));
+ writel(htonl(pattern), mgp->sram + pattern_off);
+ return 0;
+}
+
+static int
+myri10ge_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ int rc;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ rc = myri10ge_led(mgp, 1);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ rc = myri10ge_led(mgp, 0);
+ break;
+
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static const struct ethtool_ops myri10ge_ethtool_ops = {
+ .get_settings = myri10ge_get_settings,
+ .get_drvinfo = myri10ge_get_drvinfo,
+ .get_coalesce = myri10ge_get_coalesce,
+ .set_coalesce = myri10ge_set_coalesce,
+ .get_pauseparam = myri10ge_get_pauseparam,
+ .set_pauseparam = myri10ge_set_pauseparam,
+ .get_ringparam = myri10ge_get_ringparam,
+ .get_link = ethtool_op_get_link,
+ .get_strings = myri10ge_get_strings,
+ .get_sset_count = myri10ge_get_sset_count,
+ .get_ethtool_stats = myri10ge_get_ethtool_stats,
+ .set_msglevel = myri10ge_set_msglevel,
+ .get_msglevel = myri10ge_get_msglevel,
+ .set_phys_id = myri10ge_phys_id,
+};
+
+static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
+{
+ struct myri10ge_priv *mgp = ss->mgp;
+ struct myri10ge_cmd cmd;
+ struct net_device *dev = mgp->dev;
+ int tx_ring_size, rx_ring_size;
+ int tx_ring_entries, rx_ring_entries;
+ int i, slice, status;
+ size_t bytes;
+
+ /* get ring sizes */
+ slice = ss - mgp->ss;
+ cmd.data0 = slice;
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
+ tx_ring_size = cmd.data0;
+ cmd.data0 = slice;
+ status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
+ if (status != 0)
+ return status;
+ rx_ring_size = cmd.data0;
+
+ tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send);
+ rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr);
+ ss->tx.mask = tx_ring_entries - 1;
+ ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1;
+
+ status = -ENOMEM;
+
+ /* allocate the host shadow rings */
+
+ bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4)
+ * sizeof(*ss->tx.req_list);
+ ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL);
+ if (ss->tx.req_bytes == NULL)
+ goto abort_with_nothing;
+
+ /* ensure req_list entries are aligned to 8 bytes */
+ ss->tx.req_list = (struct mcp_kreq_ether_send *)
+ ALIGN((unsigned long)ss->tx.req_bytes, 8);
+ ss->tx.queue_active = 0;
+
+ bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow);
+ ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL);
+ if (ss->rx_small.shadow == NULL)
+ goto abort_with_tx_req_bytes;
+
+ bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow);
+ ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL);
+ if (ss->rx_big.shadow == NULL)
+ goto abort_with_rx_small_shadow;
+
+ /* allocate the host info rings */
+
+ bytes = tx_ring_entries * sizeof(*ss->tx.info);
+ ss->tx.info = kzalloc(bytes, GFP_KERNEL);
+ if (ss->tx.info == NULL)
+ goto abort_with_rx_big_shadow;
+
+ bytes = rx_ring_entries * sizeof(*ss->rx_small.info);
+ ss->rx_small.info = kzalloc(bytes, GFP_KERNEL);
+ if (ss->rx_small.info == NULL)
+ goto abort_with_tx_info;
+
+ bytes = rx_ring_entries * sizeof(*ss->rx_big.info);
+ ss->rx_big.info = kzalloc(bytes, GFP_KERNEL);
+ if (ss->rx_big.info == NULL)
+ goto abort_with_rx_small_info;
+
+ /* Fill the receive rings */
+ ss->rx_big.cnt = 0;
+ ss->rx_small.cnt = 0;
+ ss->rx_big.fill_cnt = 0;
+ ss->rx_small.fill_cnt = 0;
+ ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE;
+ ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;
+ ss->rx_small.watchdog_needed = 0;
+ ss->rx_big.watchdog_needed = 0;
+ if (mgp->small_bytes == 0) {
+ ss->rx_small.fill_cnt = ss->rx_small.mask + 1;
+ } else {
+ myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
+ mgp->small_bytes + MXGEFW_PAD, 0);
+ }
+
+ if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
+ netdev_err(dev, "slice-%d: alloced only %d small bufs\n",
+ slice, ss->rx_small.fill_cnt);
+ goto abort_with_rx_small_ring;
+ }
+
+ myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
+ if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) {
+ netdev_err(dev, "slice-%d: alloced only %d big bufs\n",
+ slice, ss->rx_big.fill_cnt);
+ goto abort_with_rx_big_ring;
+ }
+
+ return 0;
+
+abort_with_rx_big_ring:
+ for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
+ int idx = i & ss->rx_big.mask;
+ myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
+ mgp->big_bytes);
+ put_page(ss->rx_big.info[idx].page);
+ }
+
+abort_with_rx_small_ring:
+ if (mgp->small_bytes == 0)
+ ss->rx_small.fill_cnt = ss->rx_small.cnt;
+ for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
+ int idx = i & ss->rx_small.mask;
+ myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
+ mgp->small_bytes + MXGEFW_PAD);
+ put_page(ss->rx_small.info[idx].page);
+ }
+
+ kfree(ss->rx_big.info);
+
+abort_with_rx_small_info:
+ kfree(ss->rx_small.info);
+
+abort_with_tx_info:
+ kfree(ss->tx.info);
+
+abort_with_rx_big_shadow:
+ kfree(ss->rx_big.shadow);
+
+abort_with_rx_small_shadow:
+ kfree(ss->rx_small.shadow);
+
+abort_with_tx_req_bytes:
+ kfree(ss->tx.req_bytes);
+ ss->tx.req_bytes = NULL;
+ ss->tx.req_list = NULL;
+
+abort_with_nothing:
+ return status;
+}
+
+static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
+{
+ struct myri10ge_priv *mgp = ss->mgp;
+ struct sk_buff *skb;
+ struct myri10ge_tx_buf *tx;
+ int i, len, idx;
+
+ /* If not allocated, skip it */
+ if (ss->tx.req_list == NULL)
+ return;
+
+ for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
+ idx = i & ss->rx_big.mask;
+ if (i == ss->rx_big.fill_cnt - 1)
+ ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;
+ myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
+ mgp->big_bytes);
+ put_page(ss->rx_big.info[idx].page);
+ }
+
+ if (mgp->small_bytes == 0)
+ ss->rx_small.fill_cnt = ss->rx_small.cnt;
+ for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
+ idx = i & ss->rx_small.mask;
+ if (i == ss->rx_small.fill_cnt - 1)
+ ss->rx_small.info[idx].page_offset =
+ MYRI10GE_ALLOC_SIZE;
+ myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
+ mgp->small_bytes + MXGEFW_PAD);
+ put_page(ss->rx_small.info[idx].page);
+ }
+ tx = &ss->tx;
+ while (tx->done != tx->req) {
+ idx = tx->done & tx->mask;
+ skb = tx->info[idx].skb;
+
+ /* Mark as free */
+ tx->info[idx].skb = NULL;
+ tx->done++;
+ len = dma_unmap_len(&tx->info[idx], len);
+ dma_unmap_len_set(&tx->info[idx], len, 0);
+ if (skb) {
+ ss->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ if (len)
+ pci_unmap_single(mgp->pdev,
+ dma_unmap_addr(&tx->info[idx],
+ bus), len,
+ PCI_DMA_TODEVICE);
+ } else {
+ if (len)
+ pci_unmap_page(mgp->pdev,
+ dma_unmap_addr(&tx->info[idx],
+ bus), len,
+ PCI_DMA_TODEVICE);
+ }
+ }
+ kfree(ss->rx_big.info);
+
+ kfree(ss->rx_small.info);
+
+ kfree(ss->tx.info);
+
+ kfree(ss->rx_big.shadow);
+
+ kfree(ss->rx_small.shadow);
+
+ kfree(ss->tx.req_bytes);
+ ss->tx.req_bytes = NULL;
+ ss->tx.req_list = NULL;
+}
+
+static int myri10ge_request_irq(struct myri10ge_priv *mgp)
+{
+ struct pci_dev *pdev = mgp->pdev;
+ struct myri10ge_slice_state *ss;
+ struct net_device *netdev = mgp->dev;
+ int i;
+ int status;
+
+ mgp->msi_enabled = 0;
+ mgp->msix_enabled = 0;
+ status = 0;
+ if (myri10ge_msi) {
+ if (mgp->num_slices > 1) {
+ status =
+ pci_enable_msix(pdev, mgp->msix_vectors,
+ mgp->num_slices);
+ if (status == 0) {
+ mgp->msix_enabled = 1;
+ } else {
+ dev_err(&pdev->dev,
+ "Error %d setting up MSI-X\n", status);
+ return status;
+ }
+ }
+ if (mgp->msix_enabled == 0) {
+ status = pci_enable_msi(pdev);
+ if (status != 0) {
+ dev_err(&pdev->dev,
+ "Error %d setting up MSI; falling back to xPIC\n",
+ status);
+ } else {
+ mgp->msi_enabled = 1;
+ }
+ }
+ }
+ if (mgp->msix_enabled) {
+ for (i = 0; i < mgp->num_slices; i++) {
+ ss = &mgp->ss[i];
+ snprintf(ss->irq_desc, sizeof(ss->irq_desc),
+ "%s:slice-%d", netdev->name, i);
+ status = request_irq(mgp->msix_vectors[i].vector,
+ myri10ge_intr, 0, ss->irq_desc,
+ ss);
+ if (status != 0) {
+ dev_err(&pdev->dev,
+ "slice %d failed to allocate IRQ\n", i);
+ i--;
+ while (i >= 0) {
+ free_irq(mgp->msix_vectors[i].vector,
+ &mgp->ss[i]);
+ i--;
+ }
+ pci_disable_msix(pdev);
+ return status;
+ }
+ }
+ } else {
+ status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED,
+ mgp->dev->name, &mgp->ss[0]);
+ if (status != 0) {
+ dev_err(&pdev->dev, "failed to allocate IRQ\n");
+ if (mgp->msi_enabled)
+ pci_disable_msi(pdev);
+ }
+ }
+ return status;
+}
+
+static void myri10ge_free_irq(struct myri10ge_priv *mgp)
+{
+ struct pci_dev *pdev = mgp->pdev;
+ int i;
+
+ if (mgp->msix_enabled) {
+ for (i = 0; i < mgp->num_slices; i++)
+ free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]);
+ } else {
+ free_irq(pdev->irq, &mgp->ss[0]);
+ }
+ if (mgp->msi_enabled)
+ pci_disable_msi(pdev);
+ if (mgp->msix_enabled)
+ pci_disable_msix(pdev);
+}
+
+static int
+myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
+ void **ip_hdr, void **tcpudp_hdr,
+ u64 * hdr_flags, void *priv)
+{
+ struct ethhdr *eh;
+ struct vlan_ethhdr *veh;
+ struct iphdr *iph;
+ u8 *va = skb_frag_address(frag);
+ unsigned long ll_hlen;
+ /* passed opaque through lro_receive_frags() */
+ __wsum csum = (__force __wsum) (unsigned long)priv;
+
+ /* find the mac header, aborting if not IPv4 */
+
+ eh = (struct ethhdr *)va;
+ *mac_hdr = eh;
+ ll_hlen = ETH_HLEN;
+ if (eh->h_proto != htons(ETH_P_IP)) {
+ if (eh->h_proto == htons(ETH_P_8021Q)) {
+ veh = (struct vlan_ethhdr *)va;
+ if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
+ return -1;
+
+ ll_hlen += VLAN_HLEN;
+
+ /*
+ * HW checksum starts ETH_HLEN bytes into
+ * frame, so we must subtract off the VLAN
+ * header's checksum before csum can be used
+ */
+ csum = csum_sub(csum, csum_partial(va + ETH_HLEN,
+ VLAN_HLEN, 0));
+ } else {
+ return -1;
+ }
+ }
+ *hdr_flags = LRO_IPV4;
+
+ iph = (struct iphdr *)(va + ll_hlen);
+ *ip_hdr = iph;
+ if (iph->protocol != IPPROTO_TCP)
+ return -1;
+ if (ip_is_fragment(iph))
+ return -1;
+ *hdr_flags |= LRO_TCP;
+ *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
+
+ /* verify the IP checksum */
+ if (unlikely(ip_fast_csum((u8 *) iph, iph->ihl)))
+ return -1;
+
+ /* verify the checksum */
+ if (unlikely(csum_tcpudp_magic(iph->saddr, iph->daddr,
+ ntohs(iph->tot_len) - (iph->ihl << 2),
+ IPPROTO_TCP, csum)))
+ return -1;
+
+ return 0;
+}
+
+static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice)
+{
+ struct myri10ge_cmd cmd;
+ struct myri10ge_slice_state *ss;
+ int status;
+
+ ss = &mgp->ss[slice];
+ status = 0;
+ if (slice == 0 || (mgp->dev->real_num_tx_queues > 1)) {
+ cmd.data0 = slice;
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET,
+ &cmd, 0);
+ ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *)
+ (mgp->sram + cmd.data0);
+ }
+ cmd.data0 = slice;
+ status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET,
+ &cmd, 0);
+ ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *)
+ (mgp->sram + cmd.data0);
+
+ cmd.data0 = slice;
+ status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0);
+ ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *)
+ (mgp->sram + cmd.data0);
+
+ ss->tx.send_go = (__iomem __be32 *)
+ (mgp->sram + MXGEFW_ETH_SEND_GO + 64 * slice);
+ ss->tx.send_stop = (__iomem __be32 *)
+ (mgp->sram + MXGEFW_ETH_SEND_STOP + 64 * slice);
+ return status;
+
+}
+
+static int myri10ge_set_stats(struct myri10ge_priv *mgp, int slice)
+{
+ struct myri10ge_cmd cmd;
+ struct myri10ge_slice_state *ss;
+ int status;
+
+ ss = &mgp->ss[slice];
+ cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus);
+ cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus);
+ cmd.data2 = sizeof(struct mcp_irq_data) | (slice << 16);
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0);
+ if (status == -ENOSYS) {
+ dma_addr_t bus = ss->fw_stats_bus;
+ if (slice != 0)
+ return -EINVAL;
+ bus += offsetof(struct mcp_irq_data, send_done_count);
+ cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus);
+ cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus);
+ status = myri10ge_send_cmd(mgp,
+ MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,
+ &cmd, 0);
+ /* Firmware cannot support multicast without STATS_DMA_V2 */
+ mgp->fw_multicast_support = 0;
+ } else {
+ mgp->fw_multicast_support = 1;
+ }
+ return 0;
+}
+
+static int myri10ge_open(struct net_device *dev)
+{
+ struct myri10ge_slice_state *ss;
+ struct myri10ge_priv *mgp = netdev_priv(dev);
+ struct myri10ge_cmd cmd;
+ int i, status, big_pow2, slice;
+ u8 *itable;
+ struct net_lro_mgr *lro_mgr;
+
+ if (mgp->running != MYRI10GE_ETH_STOPPED)
+ return -EBUSY;
+
+ mgp->running = MYRI10GE_ETH_STARTING;
+ status = myri10ge_reset(mgp);
+ if (status != 0) {
+ netdev_err(dev, "failed reset\n");
+ goto abort_with_nothing;
+ }
+
+ if (mgp->num_slices > 1) {
+ cmd.data0 = mgp->num_slices;
+ cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
+ if (mgp->dev->real_num_tx_queues > 1)
+ cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
+ &cmd, 0);
+ if (status != 0) {
+ netdev_err(dev, "failed to set number of slices\n");
+ goto abort_with_nothing;
+ }
+ /* setup the indirection table */
+ cmd.data0 = mgp->num_slices;
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_TABLE_SIZE,
+ &cmd, 0);
+
+ status |= myri10ge_send_cmd(mgp,
+ MXGEFW_CMD_GET_RSS_TABLE_OFFSET,
+ &cmd, 0);
+ if (status != 0) {
+ netdev_err(dev, "failed to setup rss tables\n");
+ goto abort_with_nothing;
+ }
+
+ /* just enable an identity mapping */
+ itable = mgp->sram + cmd.data0;
+ for (i = 0; i < mgp->num_slices; i++)
+ __raw_writeb(i, &itable[i]);
+
+ cmd.data0 = 1;
+ cmd.data1 = myri10ge_rss_hash;
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_ENABLE,
+ &cmd, 0);
+ if (status != 0) {
+ netdev_err(dev, "failed to enable slices\n");
+ goto abort_with_nothing;
+ }
+ }
+
+ status = myri10ge_request_irq(mgp);
+ if (status != 0)
+ goto abort_with_nothing;
+
+ /* decide what small buffer size to use. For good TCP rx
+ * performance, it is important to not receive 1514 byte
+ * frames into jumbo buffers, as it confuses the socket buffer
+ * accounting code, leading to drops and erratic performance.
+ */
+
+ if (dev->mtu <= ETH_DATA_LEN)
+ /* enough for a TCP header */
+ mgp->small_bytes = (128 > SMP_CACHE_BYTES)
+ ? (128 - MXGEFW_PAD)
+ : (SMP_CACHE_BYTES - MXGEFW_PAD);
+ else
+ /* enough for a vlan encapsulated ETH_DATA_LEN frame */
+ mgp->small_bytes = VLAN_ETH_FRAME_LEN;
+
+ /* Override the small buffer size? */
+ if (myri10ge_small_bytes >= 0)
+ mgp->small_bytes = myri10ge_small_bytes;
+
+ /* Firmware needs the big buff size as a power of 2. Lie and
+ * tell him the buffer is larger, because we only use 1
+ * buffer/pkt, and the mtu will prevent overruns.
+ */
+ big_pow2 = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
+ if (big_pow2 < MYRI10GE_ALLOC_SIZE / 2) {
+ while (!is_power_of_2(big_pow2))
+ big_pow2++;
+ mgp->big_bytes = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
+ } else {
+ big_pow2 = MYRI10GE_ALLOC_SIZE;
+ mgp->big_bytes = big_pow2;
+ }
+
+ /* setup the per-slice data structures */
+ for (slice = 0; slice < mgp->num_slices; slice++) {
+ ss = &mgp->ss[slice];
+
+ status = myri10ge_get_txrx(mgp, slice);
+ if (status != 0) {
+ netdev_err(dev, "failed to get ring sizes or locations\n");
+ goto abort_with_rings;
+ }
+ status = myri10ge_allocate_rings(ss);
+ if (status != 0)
+ goto abort_with_rings;
+
+ /* only firmware which supports multiple TX queues
+ * supports setting up the tx stats on non-zero
+ * slices */
+ if (slice == 0 || mgp->dev->real_num_tx_queues > 1)
+ status = myri10ge_set_stats(mgp, slice);
+ if (status) {
+ netdev_err(dev, "Couldn't set stats DMA\n");
+ goto abort_with_rings;
+ }
+
+ lro_mgr = &ss->rx_done.lro_mgr;
+ lro_mgr->dev = dev;
+ lro_mgr->features = LRO_F_NAPI;
+ lro_mgr->ip_summed = CHECKSUM_COMPLETE;
+ lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
+ lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS;
+ lro_mgr->lro_arr = ss->rx_done.lro_desc;
+ lro_mgr->get_frag_header = myri10ge_get_frag_header;
+ lro_mgr->max_aggr = myri10ge_lro_max_pkts;
+ lro_mgr->frag_align_pad = 2;
+ if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
+ lro_mgr->max_aggr = MAX_SKB_FRAGS;
+
+ /* must happen prior to any irq */
+ napi_enable(&(ss)->napi);
+ }
+
+ /* now give firmware buffers sizes, and MTU */
+ cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_MTU, &cmd, 0);
+ cmd.data0 = mgp->small_bytes;
+ status |=
+ myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, &cmd, 0);
+ cmd.data0 = big_pow2;
+ status |=
+ myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd, 0);
+ if (status) {
+ netdev_err(dev, "Couldn't set buffer sizes\n");
+ goto abort_with_rings;
+ }
+
+ /*
+ * Set Linux style TSO mode; this is needed only on newer
+ * firmware versions. Older versions default to Linux
+ * style TSO
+ */
+ cmd.data0 = 0;
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_TSO_MODE, &cmd, 0);
+ if (status && status != -ENOSYS) {
+ netdev_err(dev, "Couldn't set TSO mode\n");
+ goto abort_with_rings;
+ }
+
+ mgp->link_state = ~0U;
+ mgp->rdma_tags_available = 15;
+
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
+ if (status) {
+ netdev_err(dev, "Couldn't bring up link\n");
+ goto abort_with_rings;
+ }
+
+ mgp->running = MYRI10GE_ETH_RUNNING;
+ mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ;
+ add_timer(&mgp->watchdog_timer);
+ netif_tx_wake_all_queues(dev);
+
+ return 0;
+
+abort_with_rings:
+ while (slice) {
+ slice--;
+ napi_disable(&mgp->ss[slice].napi);
+ }
+ for (i = 0; i < mgp->num_slices; i++)
+ myri10ge_free_rings(&mgp->ss[i]);
+
+ myri10ge_free_irq(mgp);
+
+abort_with_nothing:
+ mgp->running = MYRI10GE_ETH_STOPPED;
+ return -ENOMEM;
+}
+
+static int myri10ge_close(struct net_device *dev)
+{
+ struct myri10ge_priv *mgp = netdev_priv(dev);
+ struct myri10ge_cmd cmd;
+ int status, old_down_cnt;
+ int i;
+
+ if (mgp->running != MYRI10GE_ETH_RUNNING)
+ return 0;
+
+ if (mgp->ss[0].tx.req_bytes == NULL)
+ return 0;
+
+ del_timer_sync(&mgp->watchdog_timer);
+ mgp->running = MYRI10GE_ETH_STOPPING;
+ for (i = 0; i < mgp->num_slices; i++) {
+ napi_disable(&mgp->ss[i].napi);
+ }
+ netif_carrier_off(dev);
+
+ netif_tx_stop_all_queues(dev);
+ if (mgp->rebooted == 0) {
+ old_down_cnt = mgp->down_cnt;
+ mb();
+ status =
+ myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0);
+ if (status)
+ netdev_err(dev, "Couldn't bring down link\n");
+
+ wait_event_timeout(mgp->down_wq, old_down_cnt != mgp->down_cnt,
+ HZ);
+ if (old_down_cnt == mgp->down_cnt)
+ netdev_err(dev, "never got down irq\n");
+ }
+ netif_tx_disable(dev);
+ myri10ge_free_irq(mgp);
+ for (i = 0; i < mgp->num_slices; i++)
+ myri10ge_free_rings(&mgp->ss[i]);
+
+ mgp->running = MYRI10GE_ETH_STOPPED;
+ return 0;
+}
+
+/* copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
+ * backwards one at a time and handle ring wraps */
+
+static inline void
+myri10ge_submit_req_backwards(struct myri10ge_tx_buf *tx,
+ struct mcp_kreq_ether_send *src, int cnt)
+{
+ int idx, starting_slot;
+ starting_slot = tx->req;
+ while (cnt > 1) {
+ cnt--;
+ idx = (starting_slot + cnt) & tx->mask;
+ myri10ge_pio_copy(&tx->lanai[idx], &src[cnt], sizeof(*src));
+ mb();
+ }
+}
+
+/*
+ * copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
+ * at most 32 bytes at a time, so as to avoid involving the software
+ * pio handler in the nic. We re-write the first segment's flags
+ * to mark them valid only after writing the entire chain.
+ */
+
+static inline void
+myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src,
+ int cnt)
+{
+ int idx, i;
+ struct mcp_kreq_ether_send __iomem *dstp, *dst;
+ struct mcp_kreq_ether_send *srcp;
+ u8 last_flags;
+
+ idx = tx->req & tx->mask;
+
+ last_flags = src->flags;
+ src->flags = 0;
+ mb();
+ dst = dstp = &tx->lanai[idx];
+ srcp = src;
+
+ if ((idx + cnt) < tx->mask) {
+ for (i = 0; i < (cnt - 1); i += 2) {
+ myri10ge_pio_copy(dstp, srcp, 2 * sizeof(*src));
+ mb(); /* force write every 32 bytes */
+ srcp += 2;
+ dstp += 2;
+ }
+ } else {
+ /* submit all but the first request, and ensure
+ * that it is submitted below */
+ myri10ge_submit_req_backwards(tx, src, cnt);
+ i = 0;
+ }
+ if (i < cnt) {
+ /* submit the first request */
+ myri10ge_pio_copy(dstp, srcp, sizeof(*src));
+ mb(); /* barrier before setting valid flag */
+ }
+
+ /* re-write the last 32-bits with the valid flags */
+ src->flags = last_flags;
+ put_be32(*((__be32 *) src + 3), (__be32 __iomem *) dst + 3);
+ tx->req += cnt;
+ mb();
+}
+
+/*
+ * Transmit a packet. We need to split the packet so that a single
+ * segment does not cross myri10ge->tx_boundary, so this makes segment
+ * counting tricky. So rather than try to count segments up front, we
+ * just give up if there are too few segments to hold a reasonably
+ * fragmented packet currently available. If we run
+ * out of segments while preparing a packet for DMA, we just linearize
+ * it and try again.
+ */
+
+static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct myri10ge_priv *mgp = netdev_priv(dev);
+ struct myri10ge_slice_state *ss;
+ struct mcp_kreq_ether_send *req;
+ struct myri10ge_tx_buf *tx;
+ struct skb_frag_struct *frag;
+ struct netdev_queue *netdev_queue;
+ dma_addr_t bus;
+ u32 low;
+ __be32 high_swapped;
+ unsigned int len;
+ int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
+ u16 pseudo_hdr_offset, cksum_offset, queue;
+ int cum_len, seglen, boundary, rdma_count;
+ u8 flags, odd_flag;
+
+ queue = skb_get_queue_mapping(skb);
+ ss = &mgp->ss[queue];
+ netdev_queue = netdev_get_tx_queue(mgp->dev, queue);
+ tx = &ss->tx;
+
+again:
+ req = tx->req_list;
+ avail = tx->mask - 1 - (tx->req - tx->done);
+
+ mss = 0;
+ max_segments = MXGEFW_MAX_SEND_DESC;
+
+ if (skb_is_gso(skb)) {
+ mss = skb_shinfo(skb)->gso_size;
+ max_segments = MYRI10GE_MAX_SEND_DESC_TSO;
+ }
+
+ if ((unlikely(avail < max_segments))) {
+ /* we are out of transmit resources */
+ tx->stop_queue++;
+ netif_tx_stop_queue(netdev_queue);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Setup checksum offloading, if needed */
+ cksum_offset = 0;
+ pseudo_hdr_offset = 0;
+ odd_flag = 0;
+ flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ cksum_offset = skb_checksum_start_offset(skb);
+ pseudo_hdr_offset = cksum_offset + skb->csum_offset;
+ /* If the headers are excessively large, then we must
+ * fall back to a software checksum */
+ if (unlikely(!mss && (cksum_offset > 255 ||
+ pseudo_hdr_offset > 127))) {
+ if (skb_checksum_help(skb))
+ goto drop;
+ cksum_offset = 0;
+ pseudo_hdr_offset = 0;
+ } else {
+ odd_flag = MXGEFW_FLAGS_ALIGN_ODD;
+ flags |= MXGEFW_FLAGS_CKSUM;
+ }
+ }
+
+ cum_len = 0;
+
+ if (mss) { /* TSO */
+ /* this removes any CKSUM flag from before */
+ flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST);
+
+ /* negative cum_len signifies to the
+ * send loop that we are still in the
+ * header portion of the TSO packet.
+ * TSO header can be at most 1KB long */
+ cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb));
+
+ /* for IPv6 TSO, the checksum offset stores the
+ * TCP header length, to save the firmware from
+ * the need to parse the headers */
+ if (skb_is_gso_v6(skb)) {
+ cksum_offset = tcp_hdrlen(skb);
+ /* Can only handle headers <= max_tso6 long */
+ if (unlikely(-cum_len > mgp->max_tso6))
+ return myri10ge_sw_tso(skb, dev);
+ }
+ /* for TSO, pseudo_hdr_offset holds mss.
+ * The firmware figures out where to put
+ * the checksum by parsing the header. */
+ pseudo_hdr_offset = mss;
+ } else
+ /* Mark small packets, and pad out tiny packets */
+ if (skb->len <= MXGEFW_SEND_SMALL_SIZE) {
+ flags |= MXGEFW_FLAGS_SMALL;
+
+ /* pad frames to at least ETH_ZLEN bytes */
+ if (unlikely(skb->len < ETH_ZLEN)) {
+ if (skb_padto(skb, ETH_ZLEN)) {
+ /* The packet is gone, so we must
+ * return 0 */
+ ss->stats.tx_dropped += 1;
+ return NETDEV_TX_OK;
+ }
+ /* adjust the len to account for the zero pad
+ * so that the nic can know how long it is */
+ skb->len = ETH_ZLEN;
+ }
+ }
+
+ /* map the skb for DMA */
+ len = skb_headlen(skb);
+ idx = tx->req & tx->mask;
+ tx->info[idx].skb = skb;
+ bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ dma_unmap_addr_set(&tx->info[idx], bus, bus);
+ dma_unmap_len_set(&tx->info[idx], len, len);
+
+ frag_cnt = skb_shinfo(skb)->nr_frags;
+ frag_idx = 0;
+ count = 0;
+ rdma_count = 0;
+
+ /* "rdma_count" is the number of RDMAs belonging to the
+ * current packet BEFORE the current send request. For
+ * non-TSO packets, this is equal to "count".
+ * For TSO packets, rdma_count needs to be reset
+ * to 0 after a segment cut.
+ *
+ * The rdma_count field of the send request is
+ * the number of RDMAs of the packet starting at
+ * that request. For TSO send requests with one ore more cuts
+ * in the middle, this is the number of RDMAs starting
+ * after the last cut in the request. All previous
+ * segments before the last cut implicitly have 1 RDMA.
+ *
+ * Since the number of RDMAs is not known beforehand,
+ * it must be filled-in retroactively - after each
+ * segmentation cut or at the end of the entire packet.
+ */
+
+ while (1) {
+ /* Break the SKB or Fragment up into pieces which
+ * do not cross mgp->tx_boundary */
+ low = MYRI10GE_LOWPART_TO_U32(bus);
+ high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus));
+ while (len) {
+ u8 flags_next;
+ int cum_len_next;
+
+ if (unlikely(count == max_segments))
+ goto abort_linearize;
+
+ boundary =
+ (low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1);
+ seglen = boundary - low;
+ if (seglen > len)
+ seglen = len;
+ flags_next = flags & ~MXGEFW_FLAGS_FIRST;
+ cum_len_next = cum_len + seglen;
+ if (mss) { /* TSO */
+ (req - rdma_count)->rdma_count = rdma_count + 1;
+
+ if (likely(cum_len >= 0)) { /* payload */
+ int next_is_first, chop;
+
+ chop = (cum_len_next > mss);
+ cum_len_next = cum_len_next % mss;
+ next_is_first = (cum_len_next == 0);
+ flags |= chop * MXGEFW_FLAGS_TSO_CHOP;
+ flags_next |= next_is_first *
+ MXGEFW_FLAGS_FIRST;
+ rdma_count |= -(chop | next_is_first);
+ rdma_count += chop & !next_is_first;
+ } else if (likely(cum_len_next >= 0)) { /* header ends */
+ int small;
+
+ rdma_count = -1;
+ cum_len_next = 0;
+ seglen = -cum_len;
+ small = (mss <= MXGEFW_SEND_SMALL_SIZE);
+ flags_next = MXGEFW_FLAGS_TSO_PLD |
+ MXGEFW_FLAGS_FIRST |
+ (small * MXGEFW_FLAGS_SMALL);
+ }
+ }
+ req->addr_high = high_swapped;
+ req->addr_low = htonl(low);
+ req->pseudo_hdr_offset = htons(pseudo_hdr_offset);
+ req->pad = 0; /* complete solid 16-byte block; does this matter? */
+ req->rdma_count = 1;
+ req->length = htons(seglen);
+ req->cksum_offset = cksum_offset;
+ req->flags = flags | ((cum_len & 1) * odd_flag);
+
+ low += seglen;
+ len -= seglen;
+ cum_len = cum_len_next;
+ flags = flags_next;
+ req++;
+ count++;
+ rdma_count++;
+ if (cksum_offset != 0 && !(mss && skb_is_gso_v6(skb))) {
+ if (unlikely(cksum_offset > seglen))
+ cksum_offset -= seglen;
+ else
+ cksum_offset = 0;
+ }
+ }
+ if (frag_idx == frag_cnt)
+ break;
+
+ /* map next fragment for DMA */
+ idx = (count + tx->req) & tx->mask;
+ frag = &skb_shinfo(skb)->frags[frag_idx];
+ frag_idx++;
+ len = frag->size;
+ bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len,
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(&tx->info[idx], bus, bus);
+ dma_unmap_len_set(&tx->info[idx], len, len);
+ }
+
+ (req - rdma_count)->rdma_count = rdma_count;
+ if (mss)
+ do {
+ req--;
+ req->flags |= MXGEFW_FLAGS_TSO_LAST;
+ } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
+ MXGEFW_FLAGS_FIRST)));
+ idx = ((count - 1) + tx->req) & tx->mask;
+ tx->info[idx].last = 1;
+ myri10ge_submit_req(tx, tx->req_list, count);
+ /* if using multiple tx queues, make sure NIC polls the
+ * current slice */
+ if ((mgp->dev->real_num_tx_queues > 1) && tx->queue_active == 0) {
+ tx->queue_active = 1;
+ put_be32(htonl(1), tx->send_go);
+ mb();
+ mmiowb();
+ }
+ tx->pkt_start++;
+ if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
+ tx->stop_queue++;
+ netif_tx_stop_queue(netdev_queue);
+ }
+ return NETDEV_TX_OK;
+
+abort_linearize:
+ /* Free any DMA resources we've alloced and clear out the skb
+ * slot so as to not trip up assertions, and to avoid a
+ * double-free if linearizing fails */
+
+ last_idx = (idx + 1) & tx->mask;
+ idx = tx->req & tx->mask;
+ tx->info[idx].skb = NULL;
+ do {
+ len = dma_unmap_len(&tx->info[idx], len);
+ if (len) {
+ if (tx->info[idx].skb != NULL)
+ pci_unmap_single(mgp->pdev,
+ dma_unmap_addr(&tx->info[idx],
+ bus), len,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(mgp->pdev,
+ dma_unmap_addr(&tx->info[idx],
+ bus), len,
+ PCI_DMA_TODEVICE);
+ dma_unmap_len_set(&tx->info[idx], len, 0);
+ tx->info[idx].skb = NULL;
+ }
+ idx = (idx + 1) & tx->mask;
+ } while (idx != last_idx);
+ if (skb_is_gso(skb)) {
+ netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n");
+ goto drop;
+ }
+
+ if (skb_linearize(skb))
+ goto drop;
+
+ tx->linearized++;
+ goto again;
+
+drop:
+ dev_kfree_skb_any(skb);
+ ss->stats.tx_dropped += 1;
+ return NETDEV_TX_OK;
+
+}
+
+static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct sk_buff *segs, *curr;
+ struct myri10ge_priv *mgp = netdev_priv(dev);
+ struct myri10ge_slice_state *ss;
+ netdev_tx_t status;
+
+ segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6);
+ if (IS_ERR(segs))
+ goto drop;
+
+ while (segs) {
+ curr = segs;
+ segs = segs->next;
+ curr->next = NULL;
+ status = myri10ge_xmit(curr, dev);
+ if (status != 0) {
+ dev_kfree_skb_any(curr);
+ if (segs != NULL) {
+ curr = segs;
+ segs = segs->next;
+ curr->next = NULL;
+ dev_kfree_skb_any(segs);
+ }
+ goto drop;
+ }
+ }
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+
+drop:
+ ss = &mgp->ss[skb_get_queue_mapping(skb)];
+ dev_kfree_skb_any(skb);
+ ss->stats.tx_dropped += 1;
+ return NETDEV_TX_OK;
+}
+
+static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ const struct myri10ge_priv *mgp = netdev_priv(dev);
+ const struct myri10ge_slice_netstats *slice_stats;
+ int i;
+
+ for (i = 0; i < mgp->num_slices; i++) {
+ slice_stats = &mgp->ss[i].stats;
+ stats->rx_packets += slice_stats->rx_packets;
+ stats->tx_packets += slice_stats->tx_packets;
+ stats->rx_bytes += slice_stats->rx_bytes;
+ stats->tx_bytes += slice_stats->tx_bytes;
+ stats->rx_dropped += slice_stats->rx_dropped;
+ stats->tx_dropped += slice_stats->tx_dropped;
+ }
+ return stats;
+}
+
+static void myri10ge_set_multicast_list(struct net_device *dev)
+{
+ struct myri10ge_priv *mgp = netdev_priv(dev);
+ struct myri10ge_cmd cmd;
+ struct netdev_hw_addr *ha;
+ __be32 data[2] = { 0, 0 };
+ int err;
+
+ /* can be called from atomic contexts,
+ * pass 1 to force atomicity in myri10ge_send_cmd() */
+ myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1);
+
+ /* This firmware is known to not support multicast */
+ if (!mgp->fw_multicast_support)
+ return;
+
+ /* Disable multicast filtering */
+
+ err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
+ if (err != 0) {
+ netdev_err(dev, "Failed MXGEFW_ENABLE_ALLMULTI, error status: %d\n",
+ err);
+ goto abort;
+ }
+
+ if ((dev->flags & IFF_ALLMULTI) || mgp->adopted_rx_filter_bug) {
+ /* request to disable multicast filtering, so quit here */
+ return;
+ }
+
+ /* Flush the filters */
+
+ err = myri10ge_send_cmd(mgp, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS,
+ &cmd, 1);
+ if (err != 0) {
+ netdev_err(dev, "Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, error status: %d\n",
+ err);
+ goto abort;
+ }
+
+ /* Walk the multicast list, and add each address */
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(data, &ha->addr, 6);
+ cmd.data0 = ntohl(data[0]);
+ cmd.data1 = ntohl(data[1]);
+ err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
+ &cmd, 1);
+
+ if (err != 0) {
+ netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n",
+ err, ha->addr);
+ goto abort;
+ }
+ }
+ /* Enable multicast filtering */
+ err = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_ALLMULTI, &cmd, 1);
+ if (err != 0) {
+ netdev_err(dev, "Failed MXGEFW_DISABLE_ALLMULTI, error status: %d\n",
+ err);
+ goto abort;
+ }
+
+ return;
+
+abort:
+ return;
+}
+
+static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = addr;
+ struct myri10ge_priv *mgp = netdev_priv(dev);
+ int status;
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EADDRNOTAVAIL;
+
+ status = myri10ge_update_mac_address(mgp, sa->sa_data);
+ if (status != 0) {
+ netdev_err(dev, "changing mac address failed with %d\n",
+ status);
+ return status;
+ }
+
+ /* change the dev structure */
+ memcpy(dev->dev_addr, sa->sa_data, 6);
+ return 0;
+}
+
+static u32 myri10ge_fix_features(struct net_device *dev, u32 features)
+{
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ return features;
+}
+
+static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct myri10ge_priv *mgp = netdev_priv(dev);
+ int error = 0;
+
+ if ((new_mtu < 68) || (ETH_HLEN + new_mtu > MYRI10GE_MAX_ETHER_MTU)) {
+ netdev_err(dev, "new mtu (%d) is not valid\n", new_mtu);
+ return -EINVAL;
+ }
+ netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu);
+ if (mgp->running) {
+ /* if we change the mtu on an active device, we must
+ * reset the device so the firmware sees the change */
+ myri10ge_close(dev);
+ dev->mtu = new_mtu;
+ myri10ge_open(dev);
+ } else
+ dev->mtu = new_mtu;
+
+ return error;
+}
+
+/*
+ * Enable ECRC to align PCI-E Completion packets on an 8-byte boundary.
+ * Only do it if the bridge is a root port since we don't want to disturb
+ * any other device, except if forced with myri10ge_ecrc_enable > 1.
+ */
+
+static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
+{
+ struct pci_dev *bridge = mgp->pdev->bus->self;
+ struct device *dev = &mgp->pdev->dev;
+ int cap;
+ unsigned err_cap;
+ u16 val;
+ u8 ext_type;
+ int ret;
+
+ if (!myri10ge_ecrc_enable || !bridge)
+ return;
+
+ /* check that the bridge is a root port */
+ cap = pci_pcie_cap(bridge);
+ pci_read_config_word(bridge, cap + PCI_CAP_FLAGS, &val);
+ ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
+ if (ext_type != PCI_EXP_TYPE_ROOT_PORT) {
+ if (myri10ge_ecrc_enable > 1) {
+ struct pci_dev *prev_bridge, *old_bridge = bridge;
+
+ /* Walk the hierarchy up to the root port
+ * where ECRC has to be enabled */
+ do {
+ prev_bridge = bridge;
+ bridge = bridge->bus->self;
+ if (!bridge || prev_bridge == bridge) {
+ dev_err(dev,
+ "Failed to find root port"
+ " to force ECRC\n");
+ return;
+ }
+ cap = pci_pcie_cap(bridge);
+ pci_read_config_word(bridge,
+ cap + PCI_CAP_FLAGS, &val);
+ ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
+ } while (ext_type != PCI_EXP_TYPE_ROOT_PORT);
+
+ dev_info(dev,
+ "Forcing ECRC on non-root port %s"
+ " (enabling on root port %s)\n",
+ pci_name(old_bridge), pci_name(bridge));
+ } else {
+ dev_err(dev,
+ "Not enabling ECRC on non-root port %s\n",
+ pci_name(bridge));
+ return;
+ }
+ }
+
+ cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
+ if (!cap)
+ return;
+
+ ret = pci_read_config_dword(bridge, cap + PCI_ERR_CAP, &err_cap);
+ if (ret) {
+ dev_err(dev, "failed reading ext-conf-space of %s\n",
+ pci_name(bridge));
+ dev_err(dev, "\t pci=nommconf in use? "
+ "or buggy/incomplete/absent ACPI MCFG attr?\n");
+ return;
+ }
+ if (!(err_cap & PCI_ERR_CAP_ECRC_GENC))
+ return;
+
+ err_cap |= PCI_ERR_CAP_ECRC_GENE;
+ pci_write_config_dword(bridge, cap + PCI_ERR_CAP, err_cap);
+ dev_info(dev, "Enabled ECRC on upstream bridge %s\n", pci_name(bridge));
+}
+
+/*
+ * The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput
+ * when the PCI-E Completion packets are aligned on an 8-byte
+ * boundary. Some PCI-E chip sets always align Completion packets; on
+ * the ones that do not, the alignment can be enforced by enabling
+ * ECRC generation (if supported).
+ *
+ * When PCI-E Completion packets are not aligned, it is actually more
+ * efficient to limit Read-DMA transactions to 2KB, rather than 4KB.
+ *
+ * If the driver can neither enable ECRC nor verify that it has
+ * already been enabled, then it must use a firmware image which works
+ * around unaligned completion packets (myri10ge_rss_ethp_z8e.dat), and it
+ * should also ensure that it never gives the device a Read-DMA which is
+ * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is
+ * enabled, then the driver should use the aligned (myri10ge_rss_eth_z8e.dat)
+ * firmware image, and set tx_boundary to 4KB.
+ */
+
+static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
+{
+ struct pci_dev *pdev = mgp->pdev;
+ struct device *dev = &pdev->dev;
+ int status;
+
+ mgp->tx_boundary = 4096;
+ /*
+ * Verify the max read request size was set to 4KB
+ * before trying the test with 4KB.
+ */
+ status = pcie_get_readrq(pdev);
+ if (status < 0) {
+ dev_err(dev, "Couldn't read max read req size: %d\n", status);
+ goto abort;
+ }
+ if (status != 4096) {
+ dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status);
+ mgp->tx_boundary = 2048;
+ }
+ /*
+ * load the optimized firmware (which assumes aligned PCIe
+ * completions) in order to see if it works on this host.
+ */
+ set_fw_name(mgp, myri10ge_fw_aligned, false);
+ status = myri10ge_load_firmware(mgp, 1);
+ if (status != 0) {
+ goto abort;
+ }
+
+ /*
+ * Enable ECRC if possible
+ */
+ myri10ge_enable_ecrc(mgp);
+
+ /*
+ * Run a DMA test which watches for unaligned completions and
+ * aborts on the first one seen.
+ */
+
+ status = myri10ge_dma_test(mgp, MXGEFW_CMD_UNALIGNED_TEST);
+ if (status == 0)
+ return; /* keep the aligned firmware */
+
+ if (status != -E2BIG)
+ dev_warn(dev, "DMA test failed: %d\n", status);
+ if (status == -ENOSYS)
+ dev_warn(dev, "Falling back to ethp! "
+ "Please install up to date fw\n");
+abort:
+ /* fall back to using the unaligned firmware */
+ mgp->tx_boundary = 2048;
+ set_fw_name(mgp, myri10ge_fw_unaligned, false);
+}
+
+static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
+{
+ int overridden = 0;
+
+ if (myri10ge_force_firmware == 0) {
+ int link_width, exp_cap;
+ u16 lnk;
+
+ exp_cap = pci_pcie_cap(mgp->pdev);
+ pci_read_config_word(mgp->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
+ link_width = (lnk >> 4) & 0x3f;
+
+ /* Check to see if Link is less than 8 or if the
+ * upstream bridge is known to provide aligned
+ * completions */
+ if (link_width < 8) {
+ dev_info(&mgp->pdev->dev, "PCIE x%d Link\n",
+ link_width);
+ mgp->tx_boundary = 4096;
+ set_fw_name(mgp, myri10ge_fw_aligned, false);
+ } else {
+ myri10ge_firmware_probe(mgp);
+ }
+ } else {
+ if (myri10ge_force_firmware == 1) {
+ dev_info(&mgp->pdev->dev,
+ "Assuming aligned completions (forced)\n");
+ mgp->tx_boundary = 4096;
+ set_fw_name(mgp, myri10ge_fw_aligned, false);
+ } else {
+ dev_info(&mgp->pdev->dev,
+ "Assuming unaligned completions (forced)\n");
+ mgp->tx_boundary = 2048;
+ set_fw_name(mgp, myri10ge_fw_unaligned, false);
+ }
+ }
+
+ kparam_block_sysfs_write(myri10ge_fw_name);
+ if (myri10ge_fw_name != NULL) {
+ char *fw_name = kstrdup(myri10ge_fw_name, GFP_KERNEL);
+ if (fw_name) {
+ overridden = 1;
+ set_fw_name(mgp, fw_name, true);
+ }
+ }
+ kparam_unblock_sysfs_write(myri10ge_fw_name);
+
+ if (mgp->board_number < MYRI10GE_MAX_BOARDS &&
+ myri10ge_fw_names[mgp->board_number] != NULL &&
+ strlen(myri10ge_fw_names[mgp->board_number])) {
+ set_fw_name(mgp, myri10ge_fw_names[mgp->board_number], false);
+ overridden = 1;
+ }
+ if (overridden)
+ dev_info(&mgp->pdev->dev, "overriding firmware to %s\n",
+ mgp->fw_name);
+}
+
+static void myri10ge_mask_surprise_down(struct pci_dev *pdev)
+{
+ struct pci_dev *bridge = pdev->bus->self;
+ int cap;
+ u32 mask;
+
+ if (bridge == NULL)
+ return;
+
+ cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
+ if (cap) {
+ /* a sram parity error can cause a surprise link
+ * down; since we expect and can recover from sram
+ * parity errors, mask surprise link down events */
+ pci_read_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, &mask);
+ mask |= 0x20;
+ pci_write_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, mask);
+ }
+}
+
+#ifdef CONFIG_PM
+static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct myri10ge_priv *mgp;
+ struct net_device *netdev;
+
+ mgp = pci_get_drvdata(pdev);
+ if (mgp == NULL)
+ return -EINVAL;
+ netdev = mgp->dev;
+
+ netif_device_detach(netdev);
+ if (netif_running(netdev)) {
+ netdev_info(netdev, "closing\n");
+ rtnl_lock();
+ myri10ge_close(netdev);
+ rtnl_unlock();
+ }
+ myri10ge_dummy_rdma(mgp, 0);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ return pci_set_power_state(pdev, pci_choose_state(pdev, state));
+}
+
+static int myri10ge_resume(struct pci_dev *pdev)
+{
+ struct myri10ge_priv *mgp;
+ struct net_device *netdev;
+ int status;
+ u16 vendor;
+
+ mgp = pci_get_drvdata(pdev);
+ if (mgp == NULL)
+ return -EINVAL;
+ netdev = mgp->dev;
+ pci_set_power_state(pdev, 0); /* zeros conf space as a side effect */
+ msleep(5); /* give card time to respond */
+ pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
+ if (vendor == 0xffff) {
+ netdev_err(mgp->dev, "device disappeared!\n");
+ return -EIO;
+ }
+
+ pci_restore_state(pdev);
+
+ status = pci_enable_device(pdev);
+ if (status) {
+ dev_err(&pdev->dev, "failed to enable device\n");
+ return status;
+ }
+
+ pci_set_master(pdev);
+
+ myri10ge_reset(mgp);
+ myri10ge_dummy_rdma(mgp, 1);
+
+ /* Save configuration space to be restored if the
+ * nic resets due to a parity error */
+ pci_save_state(pdev);
+
+ if (netif_running(netdev)) {
+ rtnl_lock();
+ status = myri10ge_open(netdev);
+ rtnl_unlock();
+ if (status != 0)
+ goto abort_with_enabled;
+
+ }
+ netif_device_attach(netdev);
+
+ return 0;
+
+abort_with_enabled:
+ pci_disable_device(pdev);
+ return -EIO;
+
+}
+#endif /* CONFIG_PM */
+
+static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp)
+{
+ struct pci_dev *pdev = mgp->pdev;
+ int vs = mgp->vendor_specific_offset;
+ u32 reboot;
+
+ /*enter read32 mode */
+ pci_write_config_byte(pdev, vs + 0x10, 0x3);
+
+ /*read REBOOT_STATUS (0xfffffff0) */
+ pci_write_config_dword(pdev, vs + 0x18, 0xfffffff0);
+ pci_read_config_dword(pdev, vs + 0x14, &reboot);
+ return reboot;
+}
+
+static void
+myri10ge_check_slice(struct myri10ge_slice_state *ss, int *reset_needed,
+ int *busy_slice_cnt, u32 rx_pause_cnt)
+{
+ struct myri10ge_priv *mgp = ss->mgp;
+ int slice = ss - mgp->ss;
+
+ if (ss->tx.req != ss->tx.done &&
+ ss->tx.done == ss->watchdog_tx_done &&
+ ss->watchdog_tx_req != ss->watchdog_tx_done) {
+ /* nic seems like it might be stuck.. */
+ if (rx_pause_cnt != mgp->watchdog_pause) {
+ if (net_ratelimit())
+ netdev_warn(mgp->dev, "slice %d: TX paused, "
+ "check link partner\n", slice);
+ } else {
+ netdev_warn(mgp->dev,
+ "slice %d: TX stuck %d %d %d %d %d %d\n",
+ slice, ss->tx.queue_active, ss->tx.req,
+ ss->tx.done, ss->tx.pkt_start,
+ ss->tx.pkt_done,
+ (int)ntohl(mgp->ss[slice].fw_stats->
+ send_done_count));
+ *reset_needed = 1;
+ ss->stuck = 1;
+ }
+ }
+ if (ss->watchdog_tx_done != ss->tx.done ||
+ ss->watchdog_rx_done != ss->rx_done.cnt) {
+ *busy_slice_cnt += 1;
+ }
+ ss->watchdog_tx_done = ss->tx.done;
+ ss->watchdog_tx_req = ss->tx.req;
+ ss->watchdog_rx_done = ss->rx_done.cnt;
+}
+
+/*
+ * This watchdog is used to check whether the board has suffered
+ * from a parity error and needs to be recovered.
+ */
+static void myri10ge_watchdog(struct work_struct *work)
+{
+ struct myri10ge_priv *mgp =
+ container_of(work, struct myri10ge_priv, watchdog_work);
+ struct myri10ge_slice_state *ss;
+ u32 reboot, rx_pause_cnt;
+ int status, rebooted;
+ int i;
+ int reset_needed = 0;
+ int busy_slice_cnt = 0;
+ u16 cmd, vendor;
+
+ mgp->watchdog_resets++;
+ pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
+ rebooted = 0;
+ if ((cmd & PCI_COMMAND_MASTER) == 0) {
+ /* Bus master DMA disabled? Check to see
+ * if the card rebooted due to a parity error
+ * For now, just report it */
+ reboot = myri10ge_read_reboot(mgp);
+ netdev_err(mgp->dev, "NIC rebooted (0x%x),%s resetting\n",
+ reboot, myri10ge_reset_recover ? "" : " not");
+ if (myri10ge_reset_recover == 0)
+ return;
+ rtnl_lock();
+ mgp->rebooted = 1;
+ rebooted = 1;
+ myri10ge_close(mgp->dev);
+ myri10ge_reset_recover--;
+ mgp->rebooted = 0;
+ /*
+ * A rebooted nic will come back with config space as
+ * it was after power was applied to PCIe bus.
+ * Attempt to restore config space which was saved
+ * when the driver was loaded, or the last time the
+ * nic was resumed from power saving mode.
+ */
+ pci_restore_state(mgp->pdev);
+
+ /* save state again for accounting reasons */
+ pci_save_state(mgp->pdev);
+
+ } else {
+ /* if we get back -1's from our slot, perhaps somebody
+ * powered off our card. Don't try to reset it in
+ * this case */
+ if (cmd == 0xffff) {
+ pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
+ if (vendor == 0xffff) {
+ netdev_err(mgp->dev, "device disappeared!\n");
+ return;
+ }
+ }
+ /* Perhaps it is a software error. See if stuck slice
+ * has recovered, reset if not */
+ rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
+ for (i = 0; i < mgp->num_slices; i++) {
+ ss = mgp->ss;
+ if (ss->stuck) {
+ myri10ge_check_slice(ss, &reset_needed,
+ &busy_slice_cnt,
+ rx_pause_cnt);
+ ss->stuck = 0;
+ }
+ }
+ if (!reset_needed) {
+ netdev_dbg(mgp->dev, "not resetting\n");
+ return;
+ }
+
+ netdev_err(mgp->dev, "device timeout, resetting\n");
+ }
+
+ if (!rebooted) {
+ rtnl_lock();
+ myri10ge_close(mgp->dev);
+ }
+ status = myri10ge_load_firmware(mgp, 1);
+ if (status != 0)
+ netdev_err(mgp->dev, "failed to load firmware\n");
+ else
+ myri10ge_open(mgp->dev);
+ rtnl_unlock();
+}
+
+/*
+ * We use our own timer routine rather than relying upon
+ * netdev->tx_timeout because we have a very large hardware transmit
+ * queue. Due to the large queue, the netdev->tx_timeout function
+ * cannot detect a NIC with a parity error in a timely fashion if the
+ * NIC is lightly loaded.
+ */
+static void myri10ge_watchdog_timer(unsigned long arg)
+{
+ struct myri10ge_priv *mgp;
+ struct myri10ge_slice_state *ss;
+ int i, reset_needed, busy_slice_cnt;
+ u32 rx_pause_cnt;
+ u16 cmd;
+
+ mgp = (struct myri10ge_priv *)arg;
+
+ rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
+ busy_slice_cnt = 0;
+ for (i = 0, reset_needed = 0;
+ i < mgp->num_slices && reset_needed == 0; ++i) {
+
+ ss = &mgp->ss[i];
+ if (ss->rx_small.watchdog_needed) {
+ myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
+ mgp->small_bytes + MXGEFW_PAD,
+ 1);
+ if (ss->rx_small.fill_cnt - ss->rx_small.cnt >=
+ myri10ge_fill_thresh)
+ ss->rx_small.watchdog_needed = 0;
+ }
+ if (ss->rx_big.watchdog_needed) {
+ myri10ge_alloc_rx_pages(mgp, &ss->rx_big,
+ mgp->big_bytes, 1);
+ if (ss->rx_big.fill_cnt - ss->rx_big.cnt >=
+ myri10ge_fill_thresh)
+ ss->rx_big.watchdog_needed = 0;
+ }
+ myri10ge_check_slice(ss, &reset_needed, &busy_slice_cnt,
+ rx_pause_cnt);
+ }
+ /* if we've sent or received no traffic, poll the NIC to
+ * ensure it is still there. Otherwise, we risk not noticing
+ * an error in a timely fashion */
+ if (busy_slice_cnt == 0) {
+ pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
+ if ((cmd & PCI_COMMAND_MASTER) == 0) {
+ reset_needed = 1;
+ }
+ }
+ mgp->watchdog_pause = rx_pause_cnt;
+
+ if (reset_needed) {
+ schedule_work(&mgp->watchdog_work);
+ } else {
+ /* rearm timer */
+ mod_timer(&mgp->watchdog_timer,
+ jiffies + myri10ge_watchdog_timeout * HZ);
+ }
+}
+
+static void myri10ge_free_slices(struct myri10ge_priv *mgp)
+{
+ struct myri10ge_slice_state *ss;
+ struct pci_dev *pdev = mgp->pdev;
+ size_t bytes;
+ int i;
+
+ if (mgp->ss == NULL)
+ return;
+
+ for (i = 0; i < mgp->num_slices; i++) {
+ ss = &mgp->ss[i];
+ if (ss->rx_done.entry != NULL) {
+ bytes = mgp->max_intr_slots *
+ sizeof(*ss->rx_done.entry);
+ dma_free_coherent(&pdev->dev, bytes,
+ ss->rx_done.entry, ss->rx_done.bus);
+ ss->rx_done.entry = NULL;
+ }
+ if (ss->fw_stats != NULL) {
+ bytes = sizeof(*ss->fw_stats);
+ dma_free_coherent(&pdev->dev, bytes,
+ ss->fw_stats, ss->fw_stats_bus);
+ ss->fw_stats = NULL;
+ }
+ netif_napi_del(&ss->napi);
+ }
+ kfree(mgp->ss);
+ mgp->ss = NULL;
+}
+
+static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
+{
+ struct myri10ge_slice_state *ss;
+ struct pci_dev *pdev = mgp->pdev;
+ size_t bytes;
+ int i;
+
+ bytes = sizeof(*mgp->ss) * mgp->num_slices;
+ mgp->ss = kzalloc(bytes, GFP_KERNEL);
+ if (mgp->ss == NULL) {
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < mgp->num_slices; i++) {
+ ss = &mgp->ss[i];
+ bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
+ ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
+ &ss->rx_done.bus,
+ GFP_KERNEL);
+ if (ss->rx_done.entry == NULL)
+ goto abort;
+ memset(ss->rx_done.entry, 0, bytes);
+ bytes = sizeof(*ss->fw_stats);
+ ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes,
+ &ss->fw_stats_bus,
+ GFP_KERNEL);
+ if (ss->fw_stats == NULL)
+ goto abort;
+ ss->mgp = mgp;
+ ss->dev = mgp->dev;
+ netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,
+ myri10ge_napi_weight);
+ }
+ return 0;
+abort:
+ myri10ge_free_slices(mgp);
+ return -ENOMEM;
+}
+
+/*
+ * This function determines the number of slices supported.
+ * The number slices is the minimum of the number of CPUS,
+ * the number of MSI-X irqs supported, the number of slices
+ * supported by the firmware
+ */
+static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
+{
+ struct myri10ge_cmd cmd;
+ struct pci_dev *pdev = mgp->pdev;
+ char *old_fw;
+ bool old_allocated;
+ int i, status, ncpus, msix_cap;
+
+ mgp->num_slices = 1;
+ msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ ncpus = num_online_cpus();
+
+ if (myri10ge_max_slices == 1 || msix_cap == 0 ||
+ (myri10ge_max_slices == -1 && ncpus < 2))
+ return;
+
+ /* try to load the slice aware rss firmware */
+ old_fw = mgp->fw_name;
+ old_allocated = mgp->fw_name_allocated;
+ /* don't free old_fw if we override it. */
+ mgp->fw_name_allocated = false;
+
+ if (myri10ge_fw_name != NULL) {
+ dev_info(&mgp->pdev->dev, "overriding rss firmware to %s\n",
+ myri10ge_fw_name);
+ set_fw_name(mgp, myri10ge_fw_name, false);
+ } else if (old_fw == myri10ge_fw_aligned)
+ set_fw_name(mgp, myri10ge_fw_rss_aligned, false);
+ else
+ set_fw_name(mgp, myri10ge_fw_rss_unaligned, false);
+ status = myri10ge_load_firmware(mgp, 0);
+ if (status != 0) {
+ dev_info(&pdev->dev, "Rss firmware not found\n");
+ if (old_allocated)
+ kfree(old_fw);
+ return;
+ }
+
+ /* hit the board with a reset to ensure it is alive */
+ memset(&cmd, 0, sizeof(cmd));
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
+ if (status != 0) {
+ dev_err(&mgp->pdev->dev, "failed reset\n");
+ goto abort_with_fw;
+ }
+
+ mgp->max_intr_slots = cmd.data0 / sizeof(struct mcp_slot);
+
+ /* tell it the size of the interrupt queues */
+ cmd.data0 = mgp->max_intr_slots * sizeof(struct mcp_slot);
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
+ if (status != 0) {
+ dev_err(&mgp->pdev->dev, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n");
+ goto abort_with_fw;
+ }
+
+ /* ask the maximum number of slices it supports */
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd, 0);
+ if (status != 0)
+ goto abort_with_fw;
+ else
+ mgp->num_slices = cmd.data0;
+
+ /* Only allow multiple slices if MSI-X is usable */
+ if (!myri10ge_msi) {
+ goto abort_with_fw;
+ }
+
+ /* if the admin did not specify a limit to how many
+ * slices we should use, cap it automatically to the
+ * number of CPUs currently online */
+ if (myri10ge_max_slices == -1)
+ myri10ge_max_slices = ncpus;
+
+ if (mgp->num_slices > myri10ge_max_slices)
+ mgp->num_slices = myri10ge_max_slices;
+
+ /* Now try to allocate as many MSI-X vectors as we have
+ * slices. We give up on MSI-X if we can only get a single
+ * vector. */
+
+ mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
+ GFP_KERNEL);
+ if (mgp->msix_vectors == NULL)
+ goto disable_msix;
+ for (i = 0; i < mgp->num_slices; i++) {
+ mgp->msix_vectors[i].entry = i;
+ }
+
+ while (mgp->num_slices > 1) {
+ /* make sure it is a power of two */
+ while (!is_power_of_2(mgp->num_slices))
+ mgp->num_slices--;
+ if (mgp->num_slices == 1)
+ goto disable_msix;
+ status = pci_enable_msix(pdev, mgp->msix_vectors,
+ mgp->num_slices);
+ if (status == 0) {
+ pci_disable_msix(pdev);
+ if (old_allocated)
+ kfree(old_fw);
+ return;
+ }
+ if (status > 0)
+ mgp->num_slices = status;
+ else
+ goto disable_msix;
+ }
+
+disable_msix:
+ if (mgp->msix_vectors != NULL) {
+ kfree(mgp->msix_vectors);
+ mgp->msix_vectors = NULL;
+ }
+
+abort_with_fw:
+ mgp->num_slices = 1;
+ set_fw_name(mgp, old_fw, old_allocated);
+ myri10ge_load_firmware(mgp, 0);
+}
+
+static const struct net_device_ops myri10ge_netdev_ops = {
+ .ndo_open = myri10ge_open,
+ .ndo_stop = myri10ge_close,
+ .ndo_start_xmit = myri10ge_xmit,
+ .ndo_get_stats64 = myri10ge_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = myri10ge_change_mtu,
+ .ndo_fix_features = myri10ge_fix_features,
+ .ndo_set_rx_mode = myri10ge_set_multicast_list,
+ .ndo_set_mac_address = myri10ge_set_mac_address,
+};
+
+static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct myri10ge_priv *mgp;
+ struct device *dev = &pdev->dev;
+ int i;
+ int status = -ENXIO;
+ int dac_enabled;
+ unsigned hdr_offset, ss_offset;
+ static int board_number;
+
+ netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES);
+ if (netdev == NULL) {
+ dev_err(dev, "Could not allocate ethernet device\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ mgp = netdev_priv(netdev);
+ mgp->dev = netdev;
+ mgp->pdev = pdev;
+ mgp->pause = myri10ge_flow_control;
+ mgp->intr_coal_delay = myri10ge_intr_coal_delay;
+ mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT);
+ mgp->board_number = board_number;
+ init_waitqueue_head(&mgp->down_wq);
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "pci_enable_device call failed\n");
+ status = -ENODEV;
+ goto abort_with_netdev;
+ }
+
+ /* Find the vendor-specific cap so we can check
+ * the reboot register later on */
+ mgp->vendor_specific_offset
+ = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+
+ /* Set our max read request to 4KB */
+ status = pcie_set_readrq(pdev, 4096);
+ if (status != 0) {
+ dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n",
+ status);
+ goto abort_with_enabled;
+ }
+
+ myri10ge_mask_surprise_down(pdev);
+ pci_set_master(pdev);
+ dac_enabled = 1;
+ status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (status != 0) {
+ dac_enabled = 0;
+ dev_err(&pdev->dev,
+ "64-bit pci address mask was refused, "
+ "trying 32-bit\n");
+ status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ }
+ if (status != 0) {
+ dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
+ goto abort_with_enabled;
+ }
+ (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
+ &mgp->cmd_bus, GFP_KERNEL);
+ if (mgp->cmd == NULL)
+ goto abort_with_enabled;
+
+ mgp->board_span = pci_resource_len(pdev, 0);
+ mgp->iomem_base = pci_resource_start(pdev, 0);
+ mgp->mtrr = -1;
+ mgp->wc_enabled = 0;
+#ifdef CONFIG_MTRR
+ mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span,
+ MTRR_TYPE_WRCOMB, 1);
+ if (mgp->mtrr >= 0)
+ mgp->wc_enabled = 1;
+#endif
+ mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span);
+ if (mgp->sram == NULL) {
+ dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n",
+ mgp->board_span, mgp->iomem_base);
+ status = -ENXIO;
+ goto abort_with_mtrr;
+ }
+ hdr_offset =
+ ntohl(__raw_readl(mgp->sram + MCP_HEADER_PTR_OFFSET)) & 0xffffc;
+ ss_offset = hdr_offset + offsetof(struct mcp_gen_header, string_specs);
+ mgp->sram_size = ntohl(__raw_readl(mgp->sram + ss_offset));
+ if (mgp->sram_size > mgp->board_span ||
+ mgp->sram_size <= MYRI10GE_FW_OFFSET) {
+ dev_err(&pdev->dev,
+ "invalid sram_size %dB or board span %ldB\n",
+ mgp->sram_size, mgp->board_span);
+ goto abort_with_ioremap;
+ }
+ memcpy_fromio(mgp->eeprom_strings,
+ mgp->sram + mgp->sram_size, MYRI10GE_EEPROM_STRINGS_SIZE);
+ memset(mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE - 2, 0, 2);
+ status = myri10ge_read_mac_addr(mgp);
+ if (status)
+ goto abort_with_ioremap;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ netdev->dev_addr[i] = mgp->mac_addr[i];
+
+ myri10ge_select_firmware(mgp);
+
+ status = myri10ge_load_firmware(mgp, 1);
+ if (status != 0) {
+ dev_err(&pdev->dev, "failed to load firmware\n");
+ goto abort_with_ioremap;
+ }
+ myri10ge_probe_slices(mgp);
+ status = myri10ge_alloc_slices(mgp);
+ if (status != 0) {
+ dev_err(&pdev->dev, "failed to alloc slice state\n");
+ goto abort_with_firmware;
+ }
+ netif_set_real_num_tx_queues(netdev, mgp->num_slices);
+ netif_set_real_num_rx_queues(netdev, mgp->num_slices);
+ status = myri10ge_reset(mgp);
+ if (status != 0) {
+ dev_err(&pdev->dev, "failed reset\n");
+ goto abort_with_slices;
+ }
+#ifdef CONFIG_MYRI10GE_DCA
+ myri10ge_setup_dca(mgp);
+#endif
+ pci_set_drvdata(pdev, mgp);
+ if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU)
+ myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
+ if ((myri10ge_initial_mtu + ETH_HLEN) < 68)
+ myri10ge_initial_mtu = 68;
+
+ netdev->netdev_ops = &myri10ge_netdev_ops;
+ netdev->mtu = myri10ge_initial_mtu;
+ netdev->base_addr = mgp->iomem_base;
+ netdev->hw_features = mgp->features | NETIF_F_LRO | NETIF_F_RXCSUM;
+ netdev->features = netdev->hw_features;
+
+ if (dac_enabled)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->vlan_features |= mgp->features;
+ if (mgp->fw_ver_tiny < 37)
+ netdev->vlan_features &= ~NETIF_F_TSO6;
+ if (mgp->fw_ver_tiny < 32)
+ netdev->vlan_features &= ~NETIF_F_TSO;
+
+ /* make sure we can get an irq, and that MSI can be
+ * setup (if available). Also ensure netdev->irq
+ * is set to correct value if MSI is enabled */
+ status = myri10ge_request_irq(mgp);
+ if (status != 0)
+ goto abort_with_firmware;
+ netdev->irq = pdev->irq;
+ myri10ge_free_irq(mgp);
+
+ /* Save configuration space to be restored if the
+ * nic resets due to a parity error */
+ pci_save_state(pdev);
+
+ /* Setup the watchdog timer */
+ setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
+ (unsigned long)mgp);
+
+ SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
+ INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
+ status = register_netdev(netdev);
+ if (status != 0) {
+ dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
+ goto abort_with_state;
+ }
+ if (mgp->msix_enabled)
+ dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, WC %s\n",
+ mgp->num_slices, mgp->tx_boundary, mgp->fw_name,
+ (mgp->wc_enabled ? "Enabled" : "Disabled"));
+ else
+ dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
+ mgp->msi_enabled ? "MSI" : "xPIC",
+ netdev->irq, mgp->tx_boundary, mgp->fw_name,
+ (mgp->wc_enabled ? "Enabled" : "Disabled"));
+
+ board_number++;
+ return 0;
+
+abort_with_state:
+ pci_restore_state(pdev);
+
+abort_with_slices:
+ myri10ge_free_slices(mgp);
+
+abort_with_firmware:
+ myri10ge_dummy_rdma(mgp, 0);
+
+abort_with_ioremap:
+ if (mgp->mac_addr_string != NULL)
+ dev_err(&pdev->dev,
+ "myri10ge_probe() failed: MAC=%s, SN=%ld\n",
+ mgp->mac_addr_string, mgp->serial_number);
+ iounmap(mgp->sram);
+
+abort_with_mtrr:
+#ifdef CONFIG_MTRR
+ if (mgp->mtrr >= 0)
+ mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
+#endif
+ dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
+ mgp->cmd, mgp->cmd_bus);
+
+abort_with_enabled:
+ pci_disable_device(pdev);
+
+abort_with_netdev:
+ set_fw_name(mgp, NULL, false);
+ free_netdev(netdev);
+ return status;
+}
+
+/*
+ * myri10ge_remove
+ *
+ * Does what is necessary to shutdown one Myrinet device. Called
+ * once for each Myrinet card by the kernel when a module is
+ * unloaded.
+ */
+static void myri10ge_remove(struct pci_dev *pdev)
+{
+ struct myri10ge_priv *mgp;
+ struct net_device *netdev;
+
+ mgp = pci_get_drvdata(pdev);
+ if (mgp == NULL)
+ return;
+
+ cancel_work_sync(&mgp->watchdog_work);
+ netdev = mgp->dev;
+ unregister_netdev(netdev);
+
+#ifdef CONFIG_MYRI10GE_DCA
+ myri10ge_teardown_dca(mgp);
+#endif
+ myri10ge_dummy_rdma(mgp, 0);
+
+ /* avoid a memory leak */
+ pci_restore_state(pdev);
+
+ iounmap(mgp->sram);
+
+#ifdef CONFIG_MTRR
+ if (mgp->mtrr >= 0)
+ mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
+#endif
+ myri10ge_free_slices(mgp);
+ if (mgp->msix_vectors != NULL)
+ kfree(mgp->msix_vectors);
+ dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
+ mgp->cmd, mgp->cmd_bus);
+
+ set_fw_name(mgp, NULL, false);
+ free_netdev(netdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
+#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009
+
+static DEFINE_PCI_DEVICE_TABLE(myri10ge_pci_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
+ {PCI_DEVICE
+ (PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)},
+ {0},
+};
+
+MODULE_DEVICE_TABLE(pci, myri10ge_pci_tbl);
+
+static struct pci_driver myri10ge_driver = {
+ .name = "myri10ge",
+ .probe = myri10ge_probe,
+ .remove = myri10ge_remove,
+ .id_table = myri10ge_pci_tbl,
+#ifdef CONFIG_PM
+ .suspend = myri10ge_suspend,
+ .resume = myri10ge_resume,
+#endif
+};
+
+#ifdef CONFIG_MYRI10GE_DCA
+static int
+myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p)
+{
+ int err = driver_for_each_device(&myri10ge_driver.driver,
+ NULL, &event,
+ myri10ge_notify_dca_device);
+
+ if (err)
+ return NOTIFY_BAD;
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block myri10ge_dca_notifier = {
+ .notifier_call = myri10ge_notify_dca,
+ .next = NULL,
+ .priority = 0,
+};
+#endif /* CONFIG_MYRI10GE_DCA */
+
+static __init int myri10ge_init_module(void)
+{
+ pr_info("Version %s\n", MYRI10GE_VERSION_STR);
+
+ if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_MAX) {
+ pr_err("Illegal rssh hash type %d, defaulting to source port\n",
+ myri10ge_rss_hash);
+ myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
+ }
+#ifdef CONFIG_MYRI10GE_DCA
+ dca_register_notify(&myri10ge_dca_notifier);
+#endif
+ if (myri10ge_max_slices > MYRI10GE_MAX_SLICES)
+ myri10ge_max_slices = MYRI10GE_MAX_SLICES;
+
+ return pci_register_driver(&myri10ge_driver);
+}
+
+module_init(myri10ge_init_module);
+
+static __exit void myri10ge_cleanup_module(void)
+{
+#ifdef CONFIG_MYRI10GE_DCA
+ dca_unregister_notify(&myri10ge_dca_notifier);
+#endif
+ pci_unregister_driver(&myri10ge_driver);
+}
+
+module_exit(myri10ge_cleanup_module);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
new file mode 100644
index 000000000000..11be150e4d67
--- /dev/null
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
@@ -0,0 +1,435 @@
+#ifndef __MYRI10GE_MCP_H__
+#define __MYRI10GE_MCP_H__
+
+#define MXGEFW_VERSION_MAJOR 1
+#define MXGEFW_VERSION_MINOR 4
+
+/* 8 Bytes */
+struct mcp_dma_addr {
+ __be32 high;
+ __be32 low;
+};
+
+/* 4 Bytes */
+struct mcp_slot {
+ __sum16 checksum;
+ __be16 length;
+};
+
+/* 64 Bytes */
+struct mcp_cmd {
+ __be32 cmd;
+ __be32 data0; /* will be low portion if data > 32 bits */
+ /* 8 */
+ __be32 data1; /* will be high portion if data > 32 bits */
+ __be32 data2; /* currently unused.. */
+ /* 16 */
+ struct mcp_dma_addr response_addr;
+ /* 24 */
+ u8 pad[40];
+};
+
+/* 8 Bytes */
+struct mcp_cmd_response {
+ __be32 data;
+ __be32 result;
+};
+
+/*
+ * flags used in mcp_kreq_ether_send_t:
+ *
+ * The SMALL flag is only needed in the first segment. It is raised
+ * for packets that are total less or equal 512 bytes.
+ *
+ * The CKSUM flag must be set in all segments.
+ *
+ * The PADDED flags is set if the packet needs to be padded, and it
+ * must be set for all segments.
+ *
+ * The MXGEFW_FLAGS_ALIGN_ODD must be set if the cumulative
+ * length of all previous segments was odd.
+ */
+
+#define MXGEFW_FLAGS_SMALL 0x1
+#define MXGEFW_FLAGS_TSO_HDR 0x1
+#define MXGEFW_FLAGS_FIRST 0x2
+#define MXGEFW_FLAGS_ALIGN_ODD 0x4
+#define MXGEFW_FLAGS_CKSUM 0x8
+#define MXGEFW_FLAGS_TSO_LAST 0x8
+#define MXGEFW_FLAGS_NO_TSO 0x10
+#define MXGEFW_FLAGS_TSO_CHOP 0x10
+#define MXGEFW_FLAGS_TSO_PLD 0x20
+
+#define MXGEFW_SEND_SMALL_SIZE 1520
+#define MXGEFW_MAX_MTU 9400
+
+union mcp_pso_or_cumlen {
+ u16 pseudo_hdr_offset;
+ u16 cum_len;
+};
+
+#define MXGEFW_MAX_SEND_DESC 12
+#define MXGEFW_PAD 2
+
+/* 16 Bytes */
+struct mcp_kreq_ether_send {
+ __be32 addr_high;
+ __be32 addr_low;
+ __be16 pseudo_hdr_offset;
+ __be16 length;
+ u8 pad;
+ u8 rdma_count;
+ u8 cksum_offset; /* where to start computing cksum */
+ u8 flags; /* as defined above */
+};
+
+/* 8 Bytes */
+struct mcp_kreq_ether_recv {
+ __be32 addr_high;
+ __be32 addr_low;
+};
+
+/* Commands */
+
+#define MXGEFW_BOOT_HANDOFF 0xfc0000
+#define MXGEFW_BOOT_DUMMY_RDMA 0xfc01c0
+
+#define MXGEFW_ETH_CMD 0xf80000
+#define MXGEFW_ETH_SEND_4 0x200000
+#define MXGEFW_ETH_SEND_1 0x240000
+#define MXGEFW_ETH_SEND_2 0x280000
+#define MXGEFW_ETH_SEND_3 0x2c0000
+#define MXGEFW_ETH_RECV_SMALL 0x300000
+#define MXGEFW_ETH_RECV_BIG 0x340000
+#define MXGEFW_ETH_SEND_GO 0x380000
+#define MXGEFW_ETH_SEND_STOP 0x3C0000
+
+#define MXGEFW_ETH_SEND(n) (0x200000 + (((n) & 0x03) * 0x40000))
+#define MXGEFW_ETH_SEND_OFFSET(n) (MXGEFW_ETH_SEND(n) - MXGEFW_ETH_SEND_4)
+
+enum myri10ge_mcp_cmd_type {
+ MXGEFW_CMD_NONE = 0,
+ /* Reset the mcp, it is left in a safe state, waiting
+ * for the driver to set all its parameters */
+ MXGEFW_CMD_RESET = 1,
+
+ /* get the version number of the current firmware..
+ * (may be available in the eeprom strings..? */
+ MXGEFW_GET_MCP_VERSION = 2,
+
+ /* Parameters which must be set by the driver before it can
+ * issue MXGEFW_CMD_ETHERNET_UP. They persist until the next
+ * MXGEFW_CMD_RESET is issued */
+
+ MXGEFW_CMD_SET_INTRQ_DMA = 3,
+ /* data0 = LSW of the host address
+ * data1 = MSW of the host address
+ * data2 = slice number if multiple slices are used
+ */
+
+ MXGEFW_CMD_SET_BIG_BUFFER_SIZE = 4, /* in bytes, power of 2 */
+ MXGEFW_CMD_SET_SMALL_BUFFER_SIZE = 5, /* in bytes */
+
+ /* Parameters which refer to lanai SRAM addresses where the
+ * driver must issue PIO writes for various things */
+
+ MXGEFW_CMD_GET_SEND_OFFSET = 6,
+ MXGEFW_CMD_GET_SMALL_RX_OFFSET = 7,
+ MXGEFW_CMD_GET_BIG_RX_OFFSET = 8,
+ /* data0 = slice number if multiple slices are used */
+
+ MXGEFW_CMD_GET_IRQ_ACK_OFFSET = 9,
+ MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET = 10,
+
+ /* Parameters which refer to rings stored on the MCP,
+ * and whose size is controlled by the mcp */
+
+ MXGEFW_CMD_GET_SEND_RING_SIZE = 11, /* in bytes */
+ MXGEFW_CMD_GET_RX_RING_SIZE = 12, /* in bytes */
+
+ /* Parameters which refer to rings stored in the host,
+ * and whose size is controlled by the host. Note that
+ * all must be physically contiguous and must contain
+ * a power of 2 number of entries. */
+
+ MXGEFW_CMD_SET_INTRQ_SIZE = 13, /* in bytes */
+#define MXGEFW_CMD_SET_INTRQ_SIZE_FLAG_NO_STRICT_SIZE_CHECK (1 << 31)
+
+ /* command to bring ethernet interface up. Above parameters
+ * (plus mtu & mac address) must have been exchanged prior
+ * to issuing this command */
+ MXGEFW_CMD_ETHERNET_UP = 14,
+
+ /* command to bring ethernet interface down. No further sends
+ * or receives may be processed until an MXGEFW_CMD_ETHERNET_UP
+ * is issued, and all interrupt queues must be flushed prior
+ * to ack'ing this command */
+
+ MXGEFW_CMD_ETHERNET_DOWN = 15,
+
+ /* commands the driver may issue live, without resetting
+ * the nic. Note that increasing the mtu "live" should
+ * only be done if the driver has already supplied buffers
+ * sufficiently large to handle the new mtu. Decreasing
+ * the mtu live is safe */
+
+ MXGEFW_CMD_SET_MTU = 16,
+ MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET = 17, /* in microseconds */
+ MXGEFW_CMD_SET_STATS_INTERVAL = 18, /* in microseconds */
+ MXGEFW_CMD_SET_STATS_DMA_OBSOLETE = 19, /* replaced by SET_STATS_DMA_V2 */
+
+ MXGEFW_ENABLE_PROMISC = 20,
+ MXGEFW_DISABLE_PROMISC = 21,
+ MXGEFW_SET_MAC_ADDRESS = 22,
+
+ MXGEFW_ENABLE_FLOW_CONTROL = 23,
+ MXGEFW_DISABLE_FLOW_CONTROL = 24,
+
+ /* do a DMA test
+ * data0,data1 = DMA address
+ * data2 = RDMA length (MSH), WDMA length (LSH)
+ * command return data = repetitions (MSH), 0.5-ms ticks (LSH)
+ */
+ MXGEFW_DMA_TEST = 25,
+
+ MXGEFW_ENABLE_ALLMULTI = 26,
+ MXGEFW_DISABLE_ALLMULTI = 27,
+
+ /* returns MXGEFW_CMD_ERROR_MULTICAST
+ * if there is no room in the cache
+ * data0,MSH(data1) = multicast group address */
+ MXGEFW_JOIN_MULTICAST_GROUP = 28,
+ /* returns MXGEFW_CMD_ERROR_MULTICAST
+ * if the address is not in the cache,
+ * or is equal to FF-FF-FF-FF-FF-FF
+ * data0,MSH(data1) = multicast group address */
+ MXGEFW_LEAVE_MULTICAST_GROUP = 29,
+ MXGEFW_LEAVE_ALL_MULTICAST_GROUPS = 30,
+
+ MXGEFW_CMD_SET_STATS_DMA_V2 = 31,
+ /* data0, data1 = bus addr,
+ * data2 = sizeof(struct mcp_irq_data) from driver point of view, allows
+ * adding new stuff to mcp_irq_data without changing the ABI
+ *
+ * If multiple slices are used, data2 contains both the size of the
+ * structure (in the lower 16 bits) and the slice number
+ * (in the upper 16 bits).
+ */
+
+ MXGEFW_CMD_UNALIGNED_TEST = 32,
+ /* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned
+ * chipset */
+
+ MXGEFW_CMD_UNALIGNED_STATUS = 33,
+ /* return data = boolean, true if the chipset is known to be unaligned */
+
+ MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS = 34,
+ /* data0 = number of big buffers to use. It must be 0 or a power of 2.
+ * 0 indicates that the NIC consumes as many buffers as they are required
+ * for packet. This is the default behavior.
+ * A power of 2 number indicates that the NIC always uses the specified
+ * number of buffers for each big receive packet.
+ * It is up to the driver to ensure that this value is big enough for
+ * the NIC to be able to receive maximum-sized packets.
+ */
+
+ MXGEFW_CMD_GET_MAX_RSS_QUEUES = 35,
+ MXGEFW_CMD_ENABLE_RSS_QUEUES = 36,
+ /* data0 = number of slices n (0, 1, ..., n-1) to enable
+ * data1 = interrupt mode | use of multiple transmit queues.
+ * 0=share one INTx/MSI.
+ * 1=use one MSI-X per queue.
+ * If all queues share one interrupt, the driver must have set
+ * RSS_SHARED_INTERRUPT_DMA before enabling queues.
+ * 2=enable both receive and send queues.
+ * Without this bit set, only one send queue (slice 0's send queue)
+ * is enabled. The receive queues are always enabled.
+ */
+#define MXGEFW_SLICE_INTR_MODE_SHARED 0x0
+#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 0x1
+#define MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES 0x2
+
+ MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET = 37,
+ MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA = 38,
+ /* data0, data1 = bus address lsw, msw */
+ MXGEFW_CMD_GET_RSS_TABLE_OFFSET = 39,
+ /* get the offset of the indirection table */
+ MXGEFW_CMD_SET_RSS_TABLE_SIZE = 40,
+ /* set the size of the indirection table */
+ MXGEFW_CMD_GET_RSS_KEY_OFFSET = 41,
+ /* get the offset of the secret key */
+ MXGEFW_CMD_RSS_KEY_UPDATED = 42,
+ /* tell nic that the secret key's been updated */
+ MXGEFW_CMD_SET_RSS_ENABLE = 43,
+ /* data0 = enable/disable rss
+ * 0: disable rss. nic does not distribute receive packets.
+ * 1: enable rss. nic distributes receive packets among queues.
+ * data1 = hash type
+ * 1: IPV4 (required by RSS)
+ * 2: TCP_IPV4 (required by RSS)
+ * 3: IPV4 | TCP_IPV4 (required by RSS)
+ * 4: source port
+ * 5: source port + destination port
+ */
+#define MXGEFW_RSS_HASH_TYPE_IPV4 0x1
+#define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2
+#define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4
+#define MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT 0x5
+#define MXGEFW_RSS_HASH_TYPE_MAX 0x5
+
+ MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE = 44,
+ /* Return data = the max. size of the entire headers of a IPv6 TSO packet.
+ * If the header size of a IPv6 TSO packet is larger than the specified
+ * value, then the driver must not use TSO.
+ * This size restriction only applies to IPv6 TSO.
+ * For IPv4 TSO, the maximum size of the headers is fixed, and the NIC
+ * always has enough header buffer to store maximum-sized headers.
+ */
+
+ MXGEFW_CMD_SET_TSO_MODE = 45,
+ /* data0 = TSO mode.
+ * 0: Linux/FreeBSD style (NIC default)
+ * 1: NDIS/NetBSD style
+ */
+#define MXGEFW_TSO_MODE_LINUX 0
+#define MXGEFW_TSO_MODE_NDIS 1
+
+ MXGEFW_CMD_MDIO_READ = 46,
+ /* data0 = dev_addr (PMA/PMD or PCS ...), data1 = register/addr */
+ MXGEFW_CMD_MDIO_WRITE = 47,
+ /* data0 = dev_addr, data1 = register/addr, data2 = value */
+
+ MXGEFW_CMD_I2C_READ = 48,
+ /* Starts to get a fresh copy of one byte or of the module i2c table, the
+ * obtained data is cached inside the xaui-xfi chip :
+ * data0 : 0 => get one byte, 1=> get 256 bytes
+ * data1 : If data0 == 0: location to refresh
+ * bit 7:0 register location
+ * bit 8:15 is the i2c slave addr (0 is interpreted as 0xA1)
+ * bit 23:16 is the i2c bus number (for multi-port NICs)
+ * If data0 == 1: unused
+ * The operation might take ~1ms for a single byte or ~65ms when refreshing all 256 bytes
+ * During the i2c operation, MXGEFW_CMD_I2C_READ or MXGEFW_CMD_I2C_BYTE attempts
+ * will return MXGEFW_CMD_ERROR_BUSY
+ */
+ MXGEFW_CMD_I2C_BYTE = 49,
+ /* Return the last obtained copy of a given byte in the xfp i2c table
+ * (copy cached during the last relevant MXGEFW_CMD_I2C_READ)
+ * data0 : index of the desired table entry
+ * Return data = the byte stored at the requested index in the table
+ */
+
+ MXGEFW_CMD_GET_VPUMP_OFFSET = 50,
+ /* Return data = NIC memory offset of mcp_vpump_public_global */
+ MXGEFW_CMD_RESET_VPUMP = 51,
+ /* Resets the VPUMP state */
+
+ MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE = 52,
+ /* data0 = mcp_slot type to use.
+ * 0 = the default 4B mcp_slot
+ * 1 = 8B mcp_slot_8
+ */
+#define MXGEFW_RSS_MCP_SLOT_TYPE_MIN 0
+#define MXGEFW_RSS_MCP_SLOT_TYPE_WITH_HASH 1
+
+ MXGEFW_CMD_SET_THROTTLE_FACTOR = 53,
+ /* set the throttle factor for ethp_z8e
+ * data0 = throttle_factor
+ * throttle_factor = 256 * pcie-raw-speed / tx_speed
+ * tx_speed = 256 * pcie-raw-speed / throttle_factor
+ *
+ * For PCI-E x8: pcie-raw-speed == 16Gb/s
+ * For PCI-E x4: pcie-raw-speed == 8Gb/s
+ *
+ * ex1: throttle_factor == 0x1a0 (416), tx_speed == 1.23GB/s == 9.846 Gb/s
+ * ex2: throttle_factor == 0x200 (512), tx_speed == 1.0GB/s == 8 Gb/s
+ *
+ * with tx_boundary == 2048, max-throttle-factor == 8191 => min-speed == 500Mb/s
+ * with tx_boundary == 4096, max-throttle-factor == 4095 => min-speed == 1Gb/s
+ */
+
+ MXGEFW_CMD_VPUMP_UP = 54,
+ /* Allocates VPump Connection, Send Request and Zero copy buffer address tables */
+ MXGEFW_CMD_GET_VPUMP_CLK = 55,
+ /* Get the lanai clock */
+
+ MXGEFW_CMD_GET_DCA_OFFSET = 56,
+ /* offset of dca control for WDMAs */
+
+ /* VMWare NetQueue commands */
+ MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE = 57,
+ MXGEFW_CMD_NETQ_ADD_FILTER = 58,
+ /* data0 = filter_id << 16 | queue << 8 | type */
+ /* data1 = MS4 of MAC Addr */
+ /* data2 = LS2_MAC << 16 | VLAN_tag */
+ MXGEFW_CMD_NETQ_DEL_FILTER = 59,
+ /* data0 = filter_id */
+ MXGEFW_CMD_NETQ_QUERY1 = 60,
+ MXGEFW_CMD_NETQ_QUERY2 = 61,
+ MXGEFW_CMD_NETQ_QUERY3 = 62,
+ MXGEFW_CMD_NETQ_QUERY4 = 63,
+
+ MXGEFW_CMD_RELAX_RXBUFFER_ALIGNMENT = 64,
+ /* When set, small receive buffers can cross page boundaries.
+ * Both small and big receive buffers may start at any address.
+ * This option has performance implications, so use with caution.
+ */
+};
+
+enum myri10ge_mcp_cmd_status {
+ MXGEFW_CMD_OK = 0,
+ MXGEFW_CMD_UNKNOWN = 1,
+ MXGEFW_CMD_ERROR_RANGE = 2,
+ MXGEFW_CMD_ERROR_BUSY = 3,
+ MXGEFW_CMD_ERROR_EMPTY = 4,
+ MXGEFW_CMD_ERROR_CLOSED = 5,
+ MXGEFW_CMD_ERROR_HASH_ERROR = 6,
+ MXGEFW_CMD_ERROR_BAD_PORT = 7,
+ MXGEFW_CMD_ERROR_RESOURCES = 8,
+ MXGEFW_CMD_ERROR_MULTICAST = 9,
+ MXGEFW_CMD_ERROR_UNALIGNED = 10,
+ MXGEFW_CMD_ERROR_NO_MDIO = 11,
+ MXGEFW_CMD_ERROR_I2C_FAILURE = 12,
+ MXGEFW_CMD_ERROR_I2C_ABSENT = 13,
+ MXGEFW_CMD_ERROR_BAD_PCIE_LINK = 14
+};
+
+#define MXGEFW_OLD_IRQ_DATA_LEN 40
+
+struct mcp_irq_data {
+ /* add new counters at the beginning */
+ __be32 future_use[1];
+ __be32 dropped_pause;
+ __be32 dropped_unicast_filtered;
+ __be32 dropped_bad_crc32;
+ __be32 dropped_bad_phy;
+ __be32 dropped_multicast_filtered;
+ /* 40 Bytes */
+ __be32 send_done_count;
+
+#define MXGEFW_LINK_DOWN 0
+#define MXGEFW_LINK_UP 1
+#define MXGEFW_LINK_MYRINET 2
+#define MXGEFW_LINK_UNKNOWN 3
+ __be32 link_up;
+ __be32 dropped_link_overflow;
+ __be32 dropped_link_error_or_filtered;
+ __be32 dropped_runt;
+ __be32 dropped_overrun;
+ __be32 dropped_no_small_buffer;
+ __be32 dropped_no_big_buffer;
+ __be32 rdma_tags_available;
+
+ u8 tx_stopped;
+ u8 link_down;
+ u8 stats_updated;
+ u8 valid;
+};
+
+/* definitions for NETQ filter type */
+#define MXGEFW_NETQ_FILTERTYPE_NONE 0
+#define MXGEFW_NETQ_FILTERTYPE_MACADDR 1
+#define MXGEFW_NETQ_FILTERTYPE_VLAN 2
+#define MXGEFW_NETQ_FILTERTYPE_VLANMACADDR 3
+
+#endif /* __MYRI10GE_MCP_H__ */
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h
new file mode 100644
index 000000000000..7ec4b864a550
--- /dev/null
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h
@@ -0,0 +1,60 @@
+#ifndef __MYRI10GE_MCP_GEN_HEADER_H__
+#define __MYRI10GE_MCP_GEN_HEADER_H__
+
+
+#define MCP_HEADER_PTR_OFFSET 0x3c
+
+#define MCP_TYPE_MX 0x4d582020 /* "MX " */
+#define MCP_TYPE_PCIE 0x70636965 /* "PCIE" pcie-only MCP */
+#define MCP_TYPE_ETH 0x45544820 /* "ETH " */
+#define MCP_TYPE_MCP0 0x4d435030 /* "MCP0" */
+#define MCP_TYPE_DFLT 0x20202020 /* " " */
+#define MCP_TYPE_ETHZ 0x4554485a /* "ETHZ" */
+
+struct mcp_gen_header {
+ /* the first 4 fields are filled at compile time */
+ unsigned header_length;
+ __be32 mcp_type;
+ char version[128];
+ unsigned mcp_private; /* pointer to mcp-type specific structure */
+
+ /* filled by the MCP at run-time */
+ unsigned sram_size;
+ unsigned string_specs; /* either the original STRING_SPECS or a superset */
+ unsigned string_specs_len;
+
+ /* Fields above this comment are guaranteed to be present.
+ *
+ * Fields below this comment are extensions added in later versions
+ * of this struct, drivers should compare the header_length against
+ * offsetof(field) to check wether a given MCP implements them.
+ *
+ * Never remove any field. Keep everything naturally align.
+ */
+
+ /* Specifies if the running mcp is mcp0, 1, or 2. */
+ unsigned char mcp_index;
+ unsigned char disable_rabbit;
+ unsigned char unaligned_tlp;
+ unsigned char pcie_link_algo;
+ unsigned counters_addr;
+ unsigned copy_block_info; /* for small mcps loaded with "lload -d" */
+ unsigned short handoff_id_major; /* must be equal */
+ unsigned short handoff_id_caps; /* bitfield: new mcp must have superset */
+ unsigned msix_table_addr; /* start address of msix table in firmware */
+ unsigned bss_addr; /* start of bss */
+ unsigned features;
+ unsigned ee_hdr_addr;
+ unsigned led_pattern;
+ unsigned led_pattern_dflt;
+ /* 8 */
+};
+
+struct zmcp_info {
+ unsigned info_len;
+ unsigned zmcp_addr;
+ unsigned zmcp_len;
+ unsigned mcp_edata;
+};
+
+#endif /* __MYRI10GE_MCP_GEN_HEADER_H__ */
diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig
new file mode 100644
index 000000000000..4a6b9fd073b6
--- /dev/null
+++ b/drivers/net/ethernet/natsemi/Kconfig
@@ -0,0 +1,83 @@
+#
+# National Semi-conductor device configuration
+#
+
+config NET_VENDOR_NATSEMI
+ bool "National Semi-conductor devices"
+ default y
+ depends on MCA || MAC || MACH_JAZZ || PCI || XTENSA_PLATFORM_XT2000
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about National Semi-conductor devices. If you say Y,
+ you will be asked for your specific card in the following questions.
+
+if NET_VENDOR_NATSEMI
+
+config IBMLANA
+ tristate "IBM LAN Adapter/A support"
+ depends on MCA
+ ---help---
+ This is a Micro Channel Ethernet adapter. You need to set
+ CONFIG_MCA to use this driver. It is both available as an in-kernel
+ driver and as a module.
+
+ To compile this driver as a module, choose M here. The only
+ currently supported card is the IBM LAN Adapter/A for Ethernet. It
+ will both support 16K and 32K memory windows, however a 32K window
+ gives a better security against packet losses. Usage of multiple
+ boards with this driver should be possible, but has not been tested
+ up to now due to lack of hardware.
+
+config MACSONIC
+ tristate "Macintosh SONIC based ethernet (onboard, NuBus, LC, CS)"
+ depends on MAC
+ ---help---
+ Support for NatSemi SONIC based Ethernet devices. This includes
+ the onboard Ethernet in many Quadras as well as some LC-PDS,
+ a few Nubus and all known Comm Slot Ethernet cards. If you have
+ one of these say Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. This module will
+ be called macsonic.
+
+config MIPS_JAZZ_SONIC
+ tristate "MIPS JAZZ onboard SONIC Ethernet support"
+ depends on MACH_JAZZ
+ ---help---
+ This is the driver for the onboard card of MIPS Magnum 4000,
+ Acer PICA, Olivetti M700-10 and a few other identical OEM systems.
+
+config NATSEMI
+ tristate "National Semiconductor DP8381x series PCI Ethernet support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This driver is for the National Semiconductor DP83810 series,
+ which is used in cards from PureData, NetGear, Linksys
+ and others, including the 83815 chip.
+ More specific information and updates are available from
+ <http://www.scyld.com/network/natsemi.html>.
+
+config NS83820
+ tristate "National Semiconductor DP83820 support"
+ depends on PCI
+ ---help---
+ This is a driver for the National Semiconductor DP83820 series
+ of gigabit ethernet MACs. Cards using this chipset include
+ the D-Link DGE-500T, PureData's PDP8023Z-TG, SMC's SMC9462TX,
+ SOHO-GA2000T, SOHO-GA2500T. The driver supports the use of
+ zero copy.
+
+config XTENSA_XT2000_SONIC
+ tristate "Xtensa XT2000 onboard SONIC Ethernet support"
+ depends on XTENSA_PLATFORM_XT2000
+ ---help---
+ This is the driver for the onboard card of the Xtensa XT2000 board.
+
+endif # NET_VENDOR_NATSEMI
diff --git a/drivers/net/ethernet/natsemi/Makefile b/drivers/net/ethernet/natsemi/Makefile
new file mode 100644
index 000000000000..9aa5dea52b3e
--- /dev/null
+++ b/drivers/net/ethernet/natsemi/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the National Semi-conductor Sonic devices.
+#
+
+obj-$(CONFIG_IBMLANA) += ibmlana.o
+obj-$(CONFIG_MACSONIC) += macsonic.o
+obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o
+obj-$(CONFIG_NATSEMI) += natsemi.o
+obj-$(CONFIG_NS83820) += ns83820.o
+obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
diff --git a/drivers/net/ethernet/natsemi/ibmlana.c b/drivers/net/ethernet/natsemi/ibmlana.c
new file mode 100644
index 000000000000..999407f7ebdf
--- /dev/null
+++ b/drivers/net/ethernet/natsemi/ibmlana.c
@@ -0,0 +1,1075 @@
+/*
+net-3-driver for the IBM LAN Adapter/A
+
+This is an extension to the Linux operating system, and is covered by the
+same GNU General Public License that covers that work.
+
+Copyright 1999 by Alfred Arnold (alfred@ccac.rwth-aachen.de,
+ alfred.arnold@lancom.de)
+
+This driver is based both on the SK_MCA driver, which is itself based on the
+SK_G16 and 3C523 driver.
+
+paper sources:
+ 'PC Hardware: Aufbau, Funktionsweise, Programmierung' by
+ Hans-Peter Messmer for the basic Microchannel stuff
+
+ 'Linux Geraetetreiber' by Allesandro Rubini, Kalle Dalheimer
+ for help on Ethernet driver programming
+
+ 'DP83934CVUL-20/25 MHz SONIC-T Ethernet Controller Datasheet' by National
+ Semiconductor for info on the MAC chip
+
+ 'LAN Technical Reference Ethernet Adapter Interface Version 1 Release 1.0
+ Document Number SC30-3661-00' by IBM for info on the adapter itself
+
+ Also see http://www.national.com/analog
+
+special acknowledgements to:
+ - Bob Eager for helping me out with documentation from IBM
+ - Jim Shorney for his endless patience with me while I was using
+ him as a beta tester to trace down the address filter bug ;-)
+
+ Missing things:
+
+ -> set debug level via ioctl instead of compile-time switches
+ -> I didn't follow the development of the 2.1.x kernels, so my
+ assumptions about which things changed with which kernel version
+ are probably nonsense
+
+History:
+ Nov 6th, 1999
+ startup from SK_MCA driver
+ Dec 6th, 1999
+ finally got docs about the card. A big thank you to Bob Eager!
+ Dec 12th, 1999
+ first packet received
+ Dec 13th, 1999
+ recv queue done, tcpdump works
+ Dec 15th, 1999
+ transmission part works
+ Dec 28th, 1999
+ added usage of the isa_functions for Linux 2.3 . Things should
+ still work with 2.0.x....
+ Jan 28th, 2000
+ in Linux 2.2.13, the version.h file mysteriously didn't get
+ included. Added a workaround for this. Furthermore, it now
+ not only compiles as a modules ;-)
+ Jan 30th, 2000
+ newer kernels automatically probe more than one board, so the
+ 'startslot' as a variable is also needed here
+ Apr 12th, 2000
+ the interrupt mask register is not set 'hard' instead of individually
+ setting registers, since this seems to set bits that shouldn't be
+ set
+ May 21st, 2000
+ reset interrupt status immediately after CAM load
+ add a recovery delay after releasing the chip's reset line
+ May 24th, 2000
+ finally found the bug in the address filter setup - damned signed
+ chars!
+ June 1st, 2000
+ corrected version codes, added support for the latest 2.3 changes
+ Oct 28th, 2002
+ cleaned up for the 2.5 tree <alan@lxorguk.ukuu.org.uk>
+
+ *************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+#include <linux/mca.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/processor.h>
+#include <asm/io.h>
+
+#define _IBM_LANA_DRIVER_
+#include "ibmlana.h"
+
+#undef DEBUG
+
+#define DRV_NAME "ibmlana"
+
+/* ------------------------------------------------------------------------
+ * global static data - not more since we can handle multiple boards and
+ * have to pack all state info into the device struct!
+ * ------------------------------------------------------------------------ */
+
+static char *MediaNames[Media_Count] = {
+ "10BaseT", "10Base5", "Unknown", "10Base2"
+};
+
+/* ------------------------------------------------------------------------
+ * private subfunctions
+ * ------------------------------------------------------------------------ */
+
+#ifdef DEBUG
+ /* dump all registers */
+
+static void dumpregs(struct net_device *dev)
+{
+ int z;
+
+ for (z = 0; z < 160; z += 2) {
+ if (!(z & 15))
+ printk("REGS: %04x:", z);
+ printk(" %04x", inw(dev->base_addr + z));
+ if ((z & 15) == 14)
+ printk("\n");
+ }
+}
+
+/* dump parts of shared memory - only needed during debugging */
+
+static void dumpmem(struct net_device *dev, u32 start, u32 len)
+{
+ ibmlana_priv *priv = netdev_priv(dev);
+ int z;
+
+ printk("Address %04x:\n", start);
+ for (z = 0; z < len; z++) {
+ if ((z & 15) == 0)
+ printk("%04x:", z);
+ printk(" %02x", readb(priv->base + start + z));
+ if ((z & 15) == 15)
+ printk("\n");
+ }
+ if ((z & 15) != 0)
+ printk("\n");
+}
+
+/* print exact time - ditto */
+
+static void PrTime(void)
+{
+ struct timeval tv;
+
+ do_gettimeofday(&tv);
+ printk("%9d:%06d: ", (int) tv.tv_sec, (int) tv.tv_usec);
+}
+#endif /* DEBUG */
+
+/* deduce resources out of POS registers */
+
+static void getaddrs(struct mca_device *mdev, int *base, int *memlen,
+ int *iobase, int *irq, ibmlana_medium *medium)
+{
+ u_char pos0, pos1;
+
+ pos0 = mca_device_read_stored_pos(mdev, 2);
+ pos1 = mca_device_read_stored_pos(mdev, 3);
+
+ *base = 0xc0000 + ((pos1 & 0xf0) << 9);
+ *memlen = (pos1 & 0x01) ? 0x8000 : 0x4000;
+ *iobase = (pos0 & 0xe0) << 7;
+ switch (pos0 & 0x06) {
+ case 0:
+ *irq = 5;
+ break;
+ case 2:
+ *irq = 15;
+ break;
+ case 4:
+ *irq = 10;
+ break;
+ case 6:
+ *irq = 11;
+ break;
+ }
+ *medium = (pos0 & 0x18) >> 3;
+}
+
+/* wait on register value with mask and timeout */
+
+static int wait_timeout(struct net_device *dev, int regoffs, u16 mask,
+ u16 value, int timeout)
+{
+ unsigned long fin = jiffies + timeout;
+
+ while (time_before(jiffies,fin))
+ if ((inw(dev->base_addr + regoffs) & mask) == value)
+ return 1;
+
+ return 0;
+}
+
+
+/* reset the whole board */
+
+static void ResetBoard(struct net_device *dev)
+{
+ unsigned char bcmval;
+
+ /* read original board control value */
+
+ bcmval = inb(dev->base_addr + BCMREG);
+
+ /* set reset bit for a while */
+
+ bcmval |= BCMREG_RESET;
+ outb(bcmval, dev->base_addr + BCMREG);
+ udelay(10);
+ bcmval &= ~BCMREG_RESET;
+ outb(bcmval, dev->base_addr + BCMREG);
+
+ /* switch over to RAM again */
+
+ bcmval |= BCMREG_RAMEN | BCMREG_RAMWIN;
+ outb(bcmval, dev->base_addr + BCMREG);
+}
+
+/* calculate RAM layout & set up descriptors in RAM */
+
+static void InitDscrs(struct net_device *dev)
+{
+ ibmlana_priv *priv = netdev_priv(dev);
+ u32 addr, baddr, raddr;
+ int z;
+ tda_t tda;
+ rda_t rda;
+ rra_t rra;
+
+ /* initialize RAM */
+
+ memset_io(priv->base, 0xaa,
+ dev->mem_start - dev->mem_start); /* XXX: typo? */
+
+ /* setup n TX descriptors - independent of RAM size */
+
+ priv->tdastart = addr = 0;
+ priv->txbufstart = baddr = sizeof(tda_t) * TXBUFCNT;
+ for (z = 0; z < TXBUFCNT; z++) {
+ tda.status = 0;
+ tda.config = 0;
+ tda.length = 0;
+ tda.fragcount = 1;
+ tda.startlo = baddr;
+ tda.starthi = 0;
+ tda.fraglength = 0;
+ if (z == TXBUFCNT - 1)
+ tda.link = priv->tdastart;
+ else
+ tda.link = addr + sizeof(tda_t);
+ tda.link |= 1;
+ memcpy_toio(priv->base + addr, &tda, sizeof(tda_t));
+ addr += sizeof(tda_t);
+ baddr += PKTSIZE;
+ }
+
+ /* calculate how many receive buffers fit into remaining memory */
+
+ priv->rxbufcnt = (dev->mem_end - dev->mem_start - baddr) / (sizeof(rra_t) + sizeof(rda_t) + PKTSIZE);
+
+ /* calculate receive addresses */
+
+ priv->rrastart = raddr = priv->txbufstart + (TXBUFCNT * PKTSIZE);
+ priv->rdastart = addr = priv->rrastart + (priv->rxbufcnt * sizeof(rra_t));
+ priv->rxbufstart = baddr = priv->rdastart + (priv->rxbufcnt * sizeof(rda_t));
+
+ for (z = 0; z < priv->rxbufcnt; z++) {
+ rra.startlo = baddr;
+ rra.starthi = 0;
+ rra.cntlo = PKTSIZE >> 1;
+ rra.cnthi = 0;
+ memcpy_toio(priv->base + raddr, &rra, sizeof(rra_t));
+
+ rda.status = 0;
+ rda.length = 0;
+ rda.startlo = 0;
+ rda.starthi = 0;
+ rda.seqno = 0;
+ if (z < priv->rxbufcnt - 1)
+ rda.link = addr + sizeof(rda_t);
+ else
+ rda.link = 1;
+ rda.inuse = 1;
+ memcpy_toio(priv->base + addr, &rda, sizeof(rda_t));
+
+ baddr += PKTSIZE;
+ raddr += sizeof(rra_t);
+ addr += sizeof(rda_t);
+ }
+
+ /* initialize current pointers */
+
+ priv->nextrxdescr = 0;
+ priv->lastrxdescr = priv->rxbufcnt - 1;
+ priv->nexttxdescr = 0;
+ priv->currtxdescr = 0;
+ priv->txusedcnt = 0;
+ memset(priv->txused, 0, sizeof(priv->txused));
+}
+
+/* set up Rx + Tx descriptors in SONIC */
+
+static int InitSONIC(struct net_device *dev)
+{
+ ibmlana_priv *priv = netdev_priv(dev);
+
+ /* set up start & end of resource area */
+
+ outw(0, SONIC_URRA);
+ outw(priv->rrastart, dev->base_addr + SONIC_RSA);
+ outw(priv->rrastart + (priv->rxbufcnt * sizeof(rra_t)), dev->base_addr + SONIC_REA);
+ outw(priv->rrastart, dev->base_addr + SONIC_RRP);
+ outw(priv->rrastart, dev->base_addr + SONIC_RWP);
+
+ /* set EOBC so that only one packet goes into one buffer */
+
+ outw((PKTSIZE - 4) >> 1, dev->base_addr + SONIC_EOBC);
+
+ /* let SONIC read the first RRA descriptor */
+
+ outw(CMDREG_RRRA, dev->base_addr + SONIC_CMDREG);
+ if (!wait_timeout(dev, SONIC_CMDREG, CMDREG_RRRA, 0, 2)) {
+ printk(KERN_ERR "%s: SONIC did not respond on RRRA command - giving up.", dev->name);
+ return 0;
+ }
+
+ /* point SONIC to the first RDA */
+
+ outw(0, dev->base_addr + SONIC_URDA);
+ outw(priv->rdastart, dev->base_addr + SONIC_CRDA);
+
+ /* set upper half of TDA address */
+
+ outw(0, dev->base_addr + SONIC_UTDA);
+
+ return 1;
+}
+
+/* stop SONIC so we can reinitialize it */
+
+static void StopSONIC(struct net_device *dev)
+{
+ /* disable interrupts */
+
+ outb(inb(dev->base_addr + BCMREG) & (~BCMREG_IEN), dev->base_addr + BCMREG);
+ outb(0, dev->base_addr + SONIC_IMREG);
+
+ /* reset the SONIC */
+
+ outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG);
+ udelay(10);
+ outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG);
+}
+
+/* initialize card and SONIC for proper operation */
+
+static void putcam(camentry_t * cams, int *camcnt, char *addr)
+{
+ camentry_t *pcam = cams + (*camcnt);
+ u8 *uaddr = (u8 *) addr;
+
+ pcam->index = *camcnt;
+ pcam->addr0 = (((u16) uaddr[1]) << 8) | uaddr[0];
+ pcam->addr1 = (((u16) uaddr[3]) << 8) | uaddr[2];
+ pcam->addr2 = (((u16) uaddr[5]) << 8) | uaddr[4];
+ (*camcnt)++;
+}
+
+static void InitBoard(struct net_device *dev)
+{
+ ibmlana_priv *priv = netdev_priv(dev);
+ int camcnt;
+ camentry_t cams[16];
+ u32 cammask;
+ struct netdev_hw_addr *ha;
+ u16 rcrval;
+
+ /* reset the SONIC */
+
+ outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG);
+ udelay(10);
+
+ /* clear all spurious interrupts */
+
+ outw(inw(dev->base_addr + SONIC_ISREG), dev->base_addr + SONIC_ISREG);
+
+ /* set up the SONIC's bus interface - constant for this adapter -
+ must be done while the SONIC is in reset */
+
+ outw(DCREG_USR1 | DCREG_USR0 | DCREG_WC1 | DCREG_DW32, dev->base_addr + SONIC_DCREG);
+ outw(0, dev->base_addr + SONIC_DCREG2);
+
+ /* remove reset form the SONIC */
+
+ outw(0, dev->base_addr + SONIC_CMDREG);
+ udelay(10);
+
+ /* data sheet requires URRA to be programmed before setting up the CAM contents */
+
+ outw(0, dev->base_addr + SONIC_URRA);
+
+ /* program the CAM entry 0 to the device address */
+
+ camcnt = 0;
+ putcam(cams, &camcnt, dev->dev_addr);
+
+ /* start putting the multicast addresses into the CAM list. Stop if
+ it is full. */
+
+ netdev_for_each_mc_addr(ha, dev) {
+ putcam(cams, &camcnt, ha->addr);
+ if (camcnt == 16)
+ break;
+ }
+
+ /* calculate CAM mask */
+
+ cammask = (1 << camcnt) - 1;
+
+ /* feed CDA into SONIC, initialize RCR value (always get broadcasts) */
+
+ memcpy_toio(priv->base, cams, sizeof(camentry_t) * camcnt);
+ memcpy_toio(priv->base + (sizeof(camentry_t) * camcnt), &cammask, sizeof(cammask));
+
+#ifdef DEBUG
+ printk("CAM setup:\n");
+ dumpmem(dev, 0, sizeof(camentry_t) * camcnt + sizeof(cammask));
+#endif
+
+ outw(0, dev->base_addr + SONIC_CAMPTR);
+ outw(camcnt, dev->base_addr + SONIC_CAMCNT);
+ outw(CMDREG_LCAM, dev->base_addr + SONIC_CMDREG);
+ if (!wait_timeout(dev, SONIC_CMDREG, CMDREG_LCAM, 0, 2)) {
+ printk(KERN_ERR "%s:SONIC did not respond on LCAM command - giving up.", dev->name);
+ return;
+ } else {
+ /* clear interrupt condition */
+
+ outw(ISREG_LCD, dev->base_addr + SONIC_ISREG);
+
+#ifdef DEBUG
+ printk("Loading CAM done, address pointers %04x:%04x\n",
+ inw(dev->base_addr + SONIC_URRA),
+ inw(dev->base_addr + SONIC_CAMPTR));
+ {
+ int z;
+
+ printk("\n-->CAM: PTR %04x CNT %04x\n",
+ inw(dev->base_addr + SONIC_CAMPTR),
+ inw(dev->base_addr + SONIC_CAMCNT));
+ outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG);
+ for (z = 0; z < camcnt; z++) {
+ outw(z, dev->base_addr + SONIC_CAMEPTR);
+ printk("Entry %d: %04x %04x %04x\n", z,
+ inw(dev->base_addr + SONIC_CAMADDR0),
+ inw(dev->base_addr + SONIC_CAMADDR1),
+ inw(dev->base_addr + SONIC_CAMADDR2));
+ }
+ outw(0, dev->base_addr + SONIC_CMDREG);
+ }
+#endif
+ }
+
+ rcrval = RCREG_BRD | RCREG_LB_NONE;
+
+ /* if still multicast addresses left or ALLMULTI is set, set the multicast
+ enable bit */
+
+ if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > camcnt)
+ rcrval |= RCREG_AMC;
+
+ /* promiscuous mode ? */
+
+ if (dev->flags & IFF_PROMISC)
+ rcrval |= RCREG_PRO;
+
+ /* program receive mode */
+
+ outw(rcrval, dev->base_addr + SONIC_RCREG);
+#ifdef DEBUG
+ printk("\nRCRVAL: %04x\n", rcrval);
+#endif
+
+ /* set up descriptors in shared memory + feed them into SONIC registers */
+
+ InitDscrs(dev);
+ if (!InitSONIC(dev))
+ return;
+
+ /* reset all pending interrupts */
+
+ outw(0xffff, dev->base_addr + SONIC_ISREG);
+
+ /* enable transmitter + receiver interrupts */
+
+ outw(CMDREG_RXEN, dev->base_addr + SONIC_CMDREG);
+ outw(IMREG_PRXEN | IMREG_RBEEN | IMREG_PTXEN | IMREG_TXEREN, dev->base_addr + SONIC_IMREG);
+
+ /* turn on card interrupts */
+
+ outb(inb(dev->base_addr + BCMREG) | BCMREG_IEN, dev->base_addr + BCMREG);
+
+#ifdef DEBUG
+ printk("Register dump after initialization:\n");
+ dumpregs(dev);
+#endif
+}
+
+/* start transmission of a descriptor */
+
+static void StartTx(struct net_device *dev, int descr)
+{
+ ibmlana_priv *priv = netdev_priv(dev);
+ int addr;
+
+ addr = priv->tdastart + (descr * sizeof(tda_t));
+
+ /* put descriptor address into SONIC */
+
+ outw(addr, dev->base_addr + SONIC_CTDA);
+
+ /* trigger transmitter */
+
+ priv->currtxdescr = descr;
+ outw(CMDREG_TXP, dev->base_addr + SONIC_CMDREG);
+}
+
+/* ------------------------------------------------------------------------
+ * interrupt handler(s)
+ * ------------------------------------------------------------------------ */
+
+/* receive buffer area exhausted */
+
+static void irqrbe_handler(struct net_device *dev)
+{
+ ibmlana_priv *priv = netdev_priv(dev);
+
+ /* point the SONIC back to the RRA start */
+
+ outw(priv->rrastart, dev->base_addr + SONIC_RRP);
+ outw(priv->rrastart, dev->base_addr + SONIC_RWP);
+}
+
+/* receive interrupt */
+
+static void irqrx_handler(struct net_device *dev)
+{
+ ibmlana_priv *priv = netdev_priv(dev);
+ rda_t rda;
+ u32 rdaaddr, lrdaaddr;
+
+ /* loop until ... */
+
+ while (1) {
+ /* read descriptor that was next to be filled by SONIC */
+
+ rdaaddr = priv->rdastart + (priv->nextrxdescr * sizeof(rda_t));
+ lrdaaddr = priv->rdastart + (priv->lastrxdescr * sizeof(rda_t));
+ memcpy_fromio(&rda, priv->base + rdaaddr, sizeof(rda_t));
+
+ /* iron out upper word halves of fields we use - SONIC will duplicate
+ bits 0..15 to 16..31 */
+
+ rda.status &= 0xffff;
+ rda.length &= 0xffff;
+ rda.startlo &= 0xffff;
+
+ /* stop if the SONIC still owns it, i.e. there is no data for us */
+
+ if (rda.inuse)
+ break;
+
+ /* good packet? */
+
+ else if (rda.status & RCREG_PRX) {
+ struct sk_buff *skb;
+
+ /* fetch buffer */
+
+ skb = dev_alloc_skb(rda.length + 2);
+ if (skb == NULL)
+ dev->stats.rx_dropped++;
+ else {
+ /* copy out data */
+
+ memcpy_fromio(skb_put(skb, rda.length),
+ priv->base +
+ rda.startlo, rda.length);
+
+ /* set up skb fields */
+
+ skb->protocol = eth_type_trans(skb, dev);
+ skb_checksum_none_assert(skb);
+
+ /* bookkeeping */
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += rda.length;
+
+ /* pass to the upper layers */
+ netif_rx(skb);
+ }
+ }
+
+ /* otherwise check error status bits and increase statistics */
+
+ else {
+ dev->stats.rx_errors++;
+ if (rda.status & RCREG_FAER)
+ dev->stats.rx_frame_errors++;
+ if (rda.status & RCREG_CRCR)
+ dev->stats.rx_crc_errors++;
+ }
+
+ /* descriptor processed, will become new last descriptor in queue */
+
+ rda.link = 1;
+ rda.inuse = 1;
+ memcpy_toio(priv->base + rdaaddr, &rda,
+ sizeof(rda_t));
+
+ /* set up link and EOL = 0 in currently last descriptor. Only write
+ the link field since the SONIC may currently already access the
+ other fields. */
+
+ memcpy_toio(priv->base + lrdaaddr + 20, &rdaaddr, 4);
+
+ /* advance indices */
+
+ priv->lastrxdescr = priv->nextrxdescr;
+ if ((++priv->nextrxdescr) >= priv->rxbufcnt)
+ priv->nextrxdescr = 0;
+ }
+}
+
+/* transmit interrupt */
+
+static void irqtx_handler(struct net_device *dev)
+{
+ ibmlana_priv *priv = netdev_priv(dev);
+ tda_t tda;
+
+ /* fetch descriptor (we forgot the size ;-) */
+ memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t));
+
+ /* update statistics */
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += tda.length;
+
+ /* update our pointers */
+ priv->txused[priv->currtxdescr] = 0;
+ priv->txusedcnt--;
+
+ /* if there are more descriptors present in RAM, start them */
+ if (priv->txusedcnt > 0)
+ StartTx(dev, (priv->currtxdescr + 1) % TXBUFCNT);
+
+ /* tell the upper layer we can go on transmitting */
+ netif_wake_queue(dev);
+}
+
+static void irqtxerr_handler(struct net_device *dev)
+{
+ ibmlana_priv *priv = netdev_priv(dev);
+ tda_t tda;
+
+ /* fetch descriptor to check status */
+ memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t));
+
+ /* update statistics */
+ dev->stats.tx_errors++;
+ if (tda.status & (TCREG_NCRS | TCREG_CRSL))
+ dev->stats.tx_carrier_errors++;
+ if (tda.status & TCREG_EXC)
+ dev->stats.tx_aborted_errors++;
+ if (tda.status & TCREG_OWC)
+ dev->stats.tx_window_errors++;
+ if (tda.status & TCREG_FU)
+ dev->stats.tx_fifo_errors++;
+
+ /* update our pointers */
+ priv->txused[priv->currtxdescr] = 0;
+ priv->txusedcnt--;
+
+ /* if there are more descriptors present in RAM, start them */
+ if (priv->txusedcnt > 0)
+ StartTx(dev, (priv->currtxdescr + 1) % TXBUFCNT);
+
+ /* tell the upper layer we can go on transmitting */
+ netif_wake_queue(dev);
+}
+
+/* general interrupt entry */
+
+static irqreturn_t irq_handler(int dummy, void *device)
+{
+ struct net_device *dev = device;
+ u16 ival;
+
+ /* in case we're not meant... */
+ if (!(inb(dev->base_addr + BCMREG) & BCMREG_IPEND))
+ return IRQ_NONE;
+
+ /* loop through the interrupt bits until everything is clear */
+ while (1) {
+ ival = inw(dev->base_addr + SONIC_ISREG);
+
+ if (ival & ISREG_RBE) {
+ irqrbe_handler(dev);
+ outw(ISREG_RBE, dev->base_addr + SONIC_ISREG);
+ }
+ if (ival & ISREG_PKTRX) {
+ irqrx_handler(dev);
+ outw(ISREG_PKTRX, dev->base_addr + SONIC_ISREG);
+ }
+ if (ival & ISREG_TXDN) {
+ irqtx_handler(dev);
+ outw(ISREG_TXDN, dev->base_addr + SONIC_ISREG);
+ }
+ if (ival & ISREG_TXER) {
+ irqtxerr_handler(dev);
+ outw(ISREG_TXER, dev->base_addr + SONIC_ISREG);
+ }
+ break;
+ }
+ return IRQ_HANDLED;
+}
+
+/* ------------------------------------------------------------------------
+ * driver methods
+ * ------------------------------------------------------------------------ */
+
+/* MCA info */
+
+#if 0 /* info available elsewhere, but this is kept for reference */
+static int ibmlana_getinfo(char *buf, int slot, void *d)
+{
+ int len = 0, i;
+ struct net_device *dev = (struct net_device *) d;
+ ibmlana_priv *priv;
+
+ /* can't say anything about an uninitialized device... */
+
+ if (dev == NULL)
+ return len;
+ priv = netdev_priv(dev);
+
+ /* print info */
+
+ len += sprintf(buf + len, "IRQ: %d\n", priv->realirq);
+ len += sprintf(buf + len, "I/O: %#lx\n", dev->base_addr);
+ len += sprintf(buf + len, "Memory: %#lx-%#lx\n", dev->mem_start, dev->mem_end - 1);
+ len += sprintf(buf + len, "Transceiver: %s\n", MediaNames[priv->medium]);
+ len += sprintf(buf + len, "Device: %s\n", dev->name);
+ len += sprintf(buf + len, "MAC address:");
+ for (i = 0; i < 6; i++)
+ len += sprintf(buf + len, " %02x", dev->dev_addr[i]);
+ buf[len++] = '\n';
+ buf[len] = 0;
+
+ return len;
+}
+#endif
+
+/* open driver. Means also initialization and start of LANCE */
+
+static int ibmlana_open(struct net_device *dev)
+{
+ int result;
+ ibmlana_priv *priv = netdev_priv(dev);
+
+ /* register resources - only necessary for IRQ */
+
+ result = request_irq(priv->realirq, irq_handler, IRQF_SHARED,
+ dev->name, dev);
+ if (result != 0) {
+ printk(KERN_ERR "%s: failed to register irq %d\n", dev->name, dev->irq);
+ return result;
+ }
+ dev->irq = priv->realirq;
+
+ /* set up the card and SONIC */
+ InitBoard(dev);
+
+ /* initialize operational flags */
+ netif_start_queue(dev);
+ return 0;
+}
+
+/* close driver. Shut down board and free allocated resources */
+
+static int ibmlana_close(struct net_device *dev)
+{
+ /* turn off board */
+
+ /* release resources */
+ if (dev->irq != 0)
+ free_irq(dev->irq, dev);
+ dev->irq = 0;
+ return 0;
+}
+
+/* transmit a block. */
+
+static netdev_tx_t ibmlana_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ ibmlana_priv *priv = netdev_priv(dev);
+ int tmplen, addr;
+ unsigned long flags;
+ tda_t tda;
+ int baddr;
+
+ /* find out if there are free slots for a frame to transmit. If not,
+ the upper layer is in deep desperation and we simply ignore the frame. */
+
+ if (priv->txusedcnt >= TXBUFCNT) {
+ dev->stats.tx_dropped++;
+ goto tx_done;
+ }
+
+ /* copy the frame data into the next free transmit buffer - fillup missing */
+ tmplen = skb->len;
+ if (tmplen < 60)
+ tmplen = 60;
+ baddr = priv->txbufstart + (priv->nexttxdescr * PKTSIZE);
+ memcpy_toio(priv->base + baddr, skb->data, skb->len);
+
+ /* copy filler into RAM - in case we're filling up...
+ we're filling a bit more than necessary, but that doesn't harm
+ since the buffer is far larger...
+ Sorry Linus for the filler string but I couldn't resist ;-) */
+
+ if (tmplen > skb->len) {
+ char *fill = "NetBSD is a nice OS too! ";
+ unsigned int destoffs = skb->len, l = strlen(fill);
+
+ while (destoffs < tmplen) {
+ memcpy_toio(priv->base + baddr + destoffs, fill, l);
+ destoffs += l;
+ }
+ }
+
+ /* set up the new frame descriptor */
+ addr = priv->tdastart + (priv->nexttxdescr * sizeof(tda_t));
+ memcpy_fromio(&tda, priv->base + addr, sizeof(tda_t));
+ tda.length = tda.fraglength = tmplen;
+ memcpy_toio(priv->base + addr, &tda, sizeof(tda_t));
+
+ /* if there were no active descriptors, trigger the SONIC */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->txusedcnt++;
+ priv->txused[priv->nexttxdescr] = 1;
+
+ /* are all transmission slots used up ? */
+ if (priv->txusedcnt >= TXBUFCNT)
+ netif_stop_queue(dev);
+
+ if (priv->txusedcnt == 1)
+ StartTx(dev, priv->nexttxdescr);
+ priv->nexttxdescr = (priv->nexttxdescr + 1) % TXBUFCNT;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+tx_done:
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+/* switch receiver mode. */
+
+static void ibmlana_set_multicast_list(struct net_device *dev)
+{
+ /* first stop the SONIC... */
+ StopSONIC(dev);
+ /* ...then reinit it with the new flags */
+ InitBoard(dev);
+}
+
+/* ------------------------------------------------------------------------
+ * hardware check
+ * ------------------------------------------------------------------------ */
+
+static int ibmlana_irq;
+static int ibmlana_io;
+static int startslot; /* counts through slots when probing multiple devices */
+
+static short ibmlana_adapter_ids[] __initdata = {
+ IBM_LANA_ID,
+ 0x0000
+};
+
+static char *ibmlana_adapter_names[] __devinitdata = {
+ "IBM LAN Adapter/A",
+ NULL
+};
+
+
+static const struct net_device_ops ibmlana_netdev_ops = {
+ .ndo_open = ibmlana_open,
+ .ndo_stop = ibmlana_close,
+ .ndo_start_xmit = ibmlana_tx,
+ .ndo_set_rx_mode = ibmlana_set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __devinit ibmlana_init_one(struct device *kdev)
+{
+ struct mca_device *mdev = to_mca_device(kdev);
+ struct net_device *dev;
+ int slot = mdev->slot, z, rc;
+ int base = 0, irq = 0, iobase = 0, memlen = 0;
+ ibmlana_priv *priv;
+ ibmlana_medium medium;
+
+ dev = alloc_etherdev(sizeof(ibmlana_priv));
+ if (!dev)
+ return -ENOMEM;
+
+ dev->irq = ibmlana_irq;
+ dev->base_addr = ibmlana_io;
+
+ base = dev->mem_start;
+ irq = dev->irq;
+
+ /* deduce card addresses */
+ getaddrs(mdev, &base, &memlen, &iobase, &irq, &medium);
+
+ /* were we looking for something different ? */
+ if (dev->irq && dev->irq != irq) {
+ rc = -ENODEV;
+ goto err_out;
+ }
+ if (dev->mem_start && dev->mem_start != base) {
+ rc = -ENODEV;
+ goto err_out;
+ }
+
+ /* announce success */
+ printk(KERN_INFO "%s: IBM LAN Adapter/A found in slot %d\n", dev->name, slot + 1);
+
+ /* try to obtain I/O range */
+ if (!request_region(iobase, IBM_LANA_IORANGE, DRV_NAME)) {
+ printk(KERN_ERR "%s: cannot allocate I/O range at %#x!\n", DRV_NAME, iobase);
+ startslot = slot + 1;
+ rc = -EBUSY;
+ goto err_out;
+ }
+
+ priv = netdev_priv(dev);
+ priv->slot = slot;
+ priv->realirq = mca_device_transform_irq(mdev, irq);
+ priv->medium = medium;
+ spin_lock_init(&priv->lock);
+
+ /* set base + irq for this device (irq not allocated so far) */
+
+ dev->irq = 0;
+ dev->mem_start = base;
+ dev->mem_end = base + memlen;
+ dev->base_addr = iobase;
+
+ priv->base = ioremap(base, memlen);
+ if (!priv->base) {
+ printk(KERN_ERR "%s: cannot remap memory!\n", DRV_NAME);
+ startslot = slot + 1;
+ rc = -EBUSY;
+ goto err_out_reg;
+ }
+
+ mca_device_set_name(mdev, ibmlana_adapter_names[mdev->index]);
+ mca_device_set_claim(mdev, 1);
+
+ /* set methods */
+ dev->netdev_ops = &ibmlana_netdev_ops;
+ dev->flags |= IFF_MULTICAST;
+
+ /* copy out MAC address */
+
+ for (z = 0; z < ETH_ALEN; z++)
+ dev->dev_addr[z] = inb(dev->base_addr + MACADDRPROM + z);
+
+ /* print config */
+
+ printk(KERN_INFO "%s: IRQ %d, I/O %#lx, memory %#lx-%#lx, "
+ "MAC address %pM.\n",
+ dev->name, priv->realirq, dev->base_addr,
+ dev->mem_start, dev->mem_end - 1,
+ dev->dev_addr);
+ printk(KERN_INFO "%s: %s medium\n", dev->name, MediaNames[priv->medium]);
+
+ /* reset board */
+
+ ResetBoard(dev);
+
+ /* next probe will start at next slot */
+
+ startslot = slot + 1;
+
+ rc = register_netdev(dev);
+ if (rc)
+ goto err_out_claimed;
+
+ dev_set_drvdata(kdev, dev);
+ return 0;
+
+err_out_claimed:
+ mca_device_set_claim(mdev, 0);
+ iounmap(priv->base);
+err_out_reg:
+ release_region(iobase, IBM_LANA_IORANGE);
+err_out:
+ free_netdev(dev);
+ return rc;
+}
+
+static int ibmlana_remove_one(struct device *kdev)
+{
+ struct mca_device *mdev = to_mca_device(kdev);
+ struct net_device *dev = dev_get_drvdata(kdev);
+ ibmlana_priv *priv = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ /*DeinitBoard(dev); */
+ release_region(dev->base_addr, IBM_LANA_IORANGE);
+ mca_device_set_claim(mdev, 0);
+ iounmap(priv->base);
+ free_netdev(dev);
+ return 0;
+}
+
+/* ------------------------------------------------------------------------
+ * modularization support
+ * ------------------------------------------------------------------------ */
+
+module_param_named(irq, ibmlana_irq, int, 0);
+module_param_named(io, ibmlana_io, int, 0);
+MODULE_PARM_DESC(irq, "IBM LAN/A IRQ number");
+MODULE_PARM_DESC(io, "IBM LAN/A I/O base address");
+MODULE_LICENSE("GPL");
+
+static struct mca_driver ibmlana_driver = {
+ .id_table = ibmlana_adapter_ids,
+ .driver = {
+ .name = "ibmlana",
+ .bus = &mca_bus_type,
+ .probe = ibmlana_init_one,
+ .remove = ibmlana_remove_one,
+ },
+};
+
+static int __init ibmlana_init_module(void)
+{
+ return mca_register_driver(&ibmlana_driver);
+}
+
+static void __exit ibmlana_cleanup_module(void)
+{
+ mca_unregister_driver(&ibmlana_driver);
+}
+
+module_init(ibmlana_init_module);
+module_exit(ibmlana_cleanup_module);
diff --git a/drivers/net/ethernet/natsemi/ibmlana.h b/drivers/net/ethernet/natsemi/ibmlana.h
new file mode 100644
index 000000000000..accd5efc9c8a
--- /dev/null
+++ b/drivers/net/ethernet/natsemi/ibmlana.h
@@ -0,0 +1,278 @@
+#ifndef _IBM_LANA_INCLUDE_
+#define _IBM_LANA_INCLUDE_
+
+#ifdef _IBM_LANA_DRIVER_
+
+/* maximum packet size */
+
+#define PKTSIZE 1524
+
+/* number of transmit buffers */
+
+#define TXBUFCNT 4
+
+/* Adapter ID's */
+#define IBM_LANA_ID 0xffe0
+
+/* media enumeration - defined in a way that it fits onto the LAN/A's
+ POS registers... */
+
+typedef enum {
+ Media_10BaseT, Media_10Base5,
+ Media_Unknown, Media_10Base2, Media_Count
+} ibmlana_medium;
+
+/* private structure */
+
+typedef struct {
+ unsigned int slot; /* MCA-Slot-# */
+ int realirq; /* memorizes actual IRQ, even when
+ currently not allocated */
+ ibmlana_medium medium; /* physical cannector */
+ u32 tdastart, txbufstart, /* addresses */
+ rrastart, rxbufstart, rdastart, rxbufcnt, txusedcnt;
+ int nextrxdescr, /* next rx descriptor to be used */
+ lastrxdescr, /* last free rx descriptor */
+ nexttxdescr, /* last tx descriptor to be used */
+ currtxdescr, /* tx descriptor currently tx'ed */
+ txused[TXBUFCNT]; /* busy flags */
+ void __iomem *base;
+ spinlock_t lock;
+} ibmlana_priv;
+
+/* this card uses quite a lot of I/O ports...luckily the MCA bus decodes
+ a full 64K I/O range... */
+
+#define IBM_LANA_IORANGE 0xa0
+
+/* Command Register: */
+
+#define SONIC_CMDREG 0x00
+#define CMDREG_HTX 0x0001 /* halt transmission */
+#define CMDREG_TXP 0x0002 /* start transmission */
+#define CMDREG_RXDIS 0x0004 /* disable receiver */
+#define CMDREG_RXEN 0x0008 /* enable receiver */
+#define CMDREG_STP 0x0010 /* stop timer */
+#define CMDREG_ST 0x0020 /* start timer */
+#define CMDREG_RST 0x0080 /* software reset */
+#define CMDREG_RRRA 0x0100 /* force SONIC to read first RRA */
+#define CMDREG_LCAM 0x0200 /* force SONIC to read CAM descrs */
+
+/* Data Configuration Register */
+
+#define SONIC_DCREG 0x02
+#define DCREG_EXBUS 0x8000 /* Extended Bus Mode */
+#define DCREG_LBR 0x2000 /* Latched Bus Retry */
+#define DCREG_PO1 0x1000 /* Programmable Outputs */
+#define DCREG_PO0 0x0800
+#define DCREG_SBUS 0x0400 /* Synchronous Bus Mode */
+#define DCREG_USR1 0x0200 /* User Definable Pins */
+#define DCREG_USR0 0x0100
+#define DCREG_WC0 0x0000 /* 0..3 Wait States */
+#define DCREG_WC1 0x0040
+#define DCREG_WC2 0x0080
+#define DCREG_WC3 0x00c0
+#define DCREG_DW16 0x0000 /* 16 bit Bus Mode */
+#define DCREG_DW32 0x0020 /* 32 bit Bus Mode */
+#define DCREG_BMS 0x0010 /* Block Mode Select */
+#define DCREG_RFT4 0x0000 /* 4/8/16/24 bytes RX Threshold */
+#define DCREG_RFT8 0x0004
+#define DCREG_RFT16 0x0008
+#define DCREG_RFT24 0x000c
+#define DCREG_TFT8 0x0000 /* 8/16/24/28 bytes TX Threshold */
+#define DCREG_TFT16 0x0001
+#define DCREG_TFT24 0x0002
+#define DCREG_TFT28 0x0003
+
+/* Receive Control Register */
+
+#define SONIC_RCREG 0x04
+#define RCREG_ERR 0x8000 /* accept damaged and collided pkts */
+#define RCREG_RNT 0x4000 /* accept packets that are < 64 */
+#define RCREG_BRD 0x2000 /* accept broadcasts */
+#define RCREG_PRO 0x1000 /* promiscuous mode */
+#define RCREG_AMC 0x0800 /* accept all multicasts */
+#define RCREG_LB_NONE 0x0000 /* no loopback */
+#define RCREG_LB_MAC 0x0200 /* MAC loopback */
+#define RCREG_LB_ENDEC 0x0400 /* ENDEC loopback */
+#define RCREG_LB_XVR 0x0600 /* Transceiver loopback */
+#define RCREG_MC 0x0100 /* Multicast received */
+#define RCREG_BC 0x0080 /* Broadcast received */
+#define RCREG_LPKT 0x0040 /* last packet in RBA */
+#define RCREG_CRS 0x0020 /* carrier sense present */
+#define RCREG_COL 0x0010 /* recv'd packet with collision */
+#define RCREG_CRCR 0x0008 /* recv'd packet with CRC error */
+#define RCREG_FAER 0x0004 /* recv'd packet with inv. framing */
+#define RCREG_LBK 0x0002 /* recv'd loopback packet */
+#define RCREG_PRX 0x0001 /* recv'd packet is OK */
+
+/* Transmit Control Register */
+
+#define SONIC_TCREG 0x06
+#define TCREG_PINT 0x8000 /* generate interrupt after TDA read */
+#define TCREG_POWC 0x4000 /* timer start out of window detect */
+#define TCREG_CRCI 0x2000 /* inhibit CRC generation */
+#define TCREG_EXDIS 0x1000 /* disable excessive deferral timer */
+#define TCREG_EXD 0x0400 /* excessive deferral occurred */
+#define TCREG_DEF 0x0200 /* single deferral occurred */
+#define TCREG_NCRS 0x0100 /* no carrier detected */
+#define TCREG_CRSL 0x0080 /* carrier lost */
+#define TCREG_EXC 0x0040 /* excessive collisions occurred */
+#define TCREG_OWC 0x0020 /* out of window collision occurred */
+#define TCREG_PMB 0x0008 /* packet monitored bad */
+#define TCREG_FU 0x0004 /* FIFO underrun */
+#define TCREG_BCM 0x0002 /* byte count mismatch of fragments */
+#define TCREG_PTX 0x0001 /* packet transmitted OK */
+
+/* Interrupt Mask Register */
+
+#define SONIC_IMREG 0x08
+#define IMREG_BREN 0x4000 /* interrupt when bus retry occurred */
+#define IMREG_HBLEN 0x2000 /* interrupt when heartbeat lost */
+#define IMREG_LCDEN 0x1000 /* interrupt when CAM loaded */
+#define IMREG_PINTEN 0x0800 /* interrupt when PINT in TDA set */
+#define IMREG_PRXEN 0x0400 /* interrupt when packet received */
+#define IMREG_PTXEN 0x0200 /* interrupt when packet was sent */
+#define IMREG_TXEREN 0x0100 /* interrupt when send failed */
+#define IMREG_TCEN 0x0080 /* interrupt when timer completed */
+#define IMREG_RDEEN 0x0040 /* interrupt when RDA exhausted */
+#define IMREG_RBEEN 0x0020 /* interrupt when RBA exhausted */
+#define IMREG_RBAEEN 0x0010 /* interrupt when RBA too short */
+#define IMREG_CRCEN 0x0008 /* interrupt when CRC counter rolls */
+#define IMREG_FAEEN 0x0004 /* interrupt when FAE counter rolls */
+#define IMREG_MPEN 0x0002 /* interrupt when MP counter rolls */
+#define IMREG_RFOEN 0x0001 /* interrupt when Rx FIFO overflows */
+
+/* Interrupt Status Register */
+
+#define SONIC_ISREG 0x0a
+#define ISREG_BR 0x4000 /* bus retry occurred */
+#define ISREG_HBL 0x2000 /* heartbeat lost */
+#define ISREG_LCD 0x1000 /* CAM loaded */
+#define ISREG_PINT 0x0800 /* PINT in TDA set */
+#define ISREG_PKTRX 0x0400 /* packet received */
+#define ISREG_TXDN 0x0200 /* packet was sent */
+#define ISREG_TXER 0x0100 /* send failed */
+#define ISREG_TC 0x0080 /* timer completed */
+#define ISREG_RDE 0x0040 /* RDA exhausted */
+#define ISREG_RBE 0x0020 /* RBA exhausted */
+#define ISREG_RBAE 0x0010 /* RBA too short for received frame */
+#define ISREG_CRC 0x0008 /* CRC counter rolls over */
+#define ISREG_FAE 0x0004 /* FAE counter rolls over */
+#define ISREG_MP 0x0002 /* MP counter rolls over */
+#define ISREG_RFO 0x0001 /* Rx FIFO overflows */
+
+#define SONIC_UTDA 0x0c /* current transmit descr address */
+#define SONIC_CTDA 0x0e
+
+#define SONIC_URDA 0x1a /* current receive descr address */
+#define SONIC_CRDA 0x1c
+
+#define SONIC_CRBA0 0x1e /* current receive buffer address */
+#define SONIC_CRBA1 0x20
+
+#define SONIC_RBWC0 0x22 /* word count in receive buffer */
+#define SONIC_RBWC1 0x24
+
+#define SONIC_EOBC 0x26 /* minimum space to be free in RBA */
+
+#define SONIC_URRA 0x28 /* upper address of CDA & Recv Area */
+
+#define SONIC_RSA 0x2a /* start of receive resource area */
+
+#define SONIC_REA 0x2c /* end of receive resource area */
+
+#define SONIC_RRP 0x2e /* resource read pointer */
+
+#define SONIC_RWP 0x30 /* resource write pointer */
+
+#define SONIC_CAMEPTR 0x42 /* CAM entry pointer */
+
+#define SONIC_CAMADDR2 0x44 /* CAM address ports */
+#define SONIC_CAMADDR1 0x46
+#define SONIC_CAMADDR0 0x48
+
+#define SONIC_CAMPTR 0x4c /* lower address of CDA */
+
+#define SONIC_CAMCNT 0x4e /* # of CAM descriptors to load */
+
+/* Data Configuration Register 2 */
+
+#define SONIC_DCREG2 0x7e
+#define DCREG2_EXPO3 0x8000 /* extended programmable outputs */
+#define DCREG2_EXPO2 0x4000
+#define DCREG2_EXPO1 0x2000
+#define DCREG2_EXPO0 0x1000
+#define DCREG2_HD 0x0800 /* heartbeat disable */
+#define DCREG2_JD 0x0200 /* jabber timer disable */
+#define DCREG2_AUTO 0x0100 /* enable AUI/TP auto selection */
+#define DCREG2_XWRAP 0x0040 /* TP transceiver loopback */
+#define DCREG2_PH 0x0010 /* HOLD request timing */
+#define DCREG2_PCM 0x0004 /* packet compress when matched */
+#define DCREG2_PCNM 0x0002 /* packet compress when not matched */
+#define DCREG2_RJCM 0x0001 /* inverse packet match via CAM */
+
+/* Board Control Register: Enable RAM, Interrupts... */
+
+#define BCMREG 0x80
+#define BCMREG_RAMEN 0x80 /* switch over to RAM */
+#define BCMREG_IPEND 0x40 /* interrupt pending ? */
+#define BCMREG_RESET 0x08 /* reset board */
+#define BCMREG_16BIT 0x04 /* adapter in 16-bit slot */
+#define BCMREG_RAMWIN 0x02 /* enable RAM window */
+#define BCMREG_IEN 0x01 /* interrupt enable */
+
+/* MAC Address PROM */
+
+#define MACADDRPROM 0x92
+
+/* structure of a CAM entry */
+
+typedef struct {
+ u32 index; /* pointer into CAM area */
+ u32 addr0; /* address part (bits 0..15 used) */
+ u32 addr1;
+ u32 addr2;
+} camentry_t;
+
+/* structure of a receive resource */
+
+typedef struct {
+ u32 startlo; /* start address (bits 0..15 used) */
+ u32 starthi;
+ u32 cntlo; /* size in 16-bit quantities */
+ u32 cnthi;
+} rra_t;
+
+/* structure of a receive descriptor */
+
+typedef struct {
+ u32 status; /* packet status */
+ u32 length; /* length in bytes */
+ u32 startlo; /* start address */
+ u32 starthi;
+ u32 seqno; /* frame sequence */
+ u32 link; /* pointer to next descriptor */
+ /* bit 0 = EOL */
+ u32 inuse; /* !=0 --> free for SONIC to write */
+} rda_t;
+
+/* structure of a transmit descriptor */
+
+typedef struct {
+ u32 status; /* transmit status */
+ u32 config; /* value for TCR */
+ u32 length; /* total length */
+ u32 fragcount; /* number of fragments */
+ u32 startlo; /* start address of fragment */
+ u32 starthi;
+ u32 fraglength; /* length of this fragment */
+ /* more address/length triplets may */
+ /* follow here */
+ u32 link; /* pointer to next descriptor */
+ /* bit 0 = EOL */
+} tda_t;
+
+#endif /* _IBM_LANA_DRIVER_ */
+
+#endif /* _IBM_LANA_INCLUDE_ */
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
new file mode 100644
index 000000000000..fc7c6a932ad9
--- /dev/null
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -0,0 +1,308 @@
+/*
+ * jazzsonic.c
+ *
+ * (C) 2005 Finn Thain
+ *
+ * Converted to DMA API, and (from the mac68k project) introduced
+ * dhd's support for 16-bit cards.
+ *
+ * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
+ *
+ * This driver is based on work from Andreas Busse, but most of
+ * the code is rewritten.
+ *
+ * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
+ *
+ * A driver for the onboard Sonic ethernet controller on Mips Jazz
+ * systems (Acer Pica-61, Mips Magnum 4000, Olivetti M700 and
+ * perhaps others, too)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include <asm/bootinfo.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/jazz.h>
+#include <asm/jazzdma.h>
+
+static char jazz_sonic_string[] = "jazzsonic";
+
+#define SONIC_MEM_SIZE 0x100
+
+#include "sonic.h"
+
+/*
+ * Macros to access SONIC registers
+ */
+#define SONIC_READ(reg) (*((volatile unsigned int *)dev->base_addr+reg))
+
+#define SONIC_WRITE(reg,val) \
+do { \
+ *((volatile unsigned int *)dev->base_addr+(reg)) = (val); \
+} while (0)
+
+
+/* use 0 for production, 1 for verification, >1 for debug */
+#ifdef SONIC_DEBUG
+static unsigned int sonic_debug = SONIC_DEBUG;
+#else
+static unsigned int sonic_debug = 1;
+#endif
+
+/*
+ * We cannot use station (ethernet) address prefixes to detect the
+ * sonic controller since these are board manufacturer depended.
+ * So we check for known Silicon Revision IDs instead.
+ */
+static unsigned short known_revisions[] =
+{
+ 0x04, /* Mips Magnum 4000 */
+ 0xffff /* end of list */
+};
+
+static int jazzsonic_open(struct net_device* dev)
+{
+ int retval;
+
+ retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED,
+ "sonic", dev);
+ if (retval) {
+ printk(KERN_ERR "%s: unable to get IRQ %d.\n",
+ dev->name, dev->irq);
+ return retval;
+ }
+
+ retval = sonic_open(dev);
+ if (retval)
+ free_irq(dev->irq, dev);
+ return retval;
+}
+
+static int jazzsonic_close(struct net_device* dev)
+{
+ int err;
+ err = sonic_close(dev);
+ free_irq(dev->irq, dev);
+ return err;
+}
+
+static const struct net_device_ops sonic_netdev_ops = {
+ .ndo_open = jazzsonic_open,
+ .ndo_stop = jazzsonic_close,
+ .ndo_start_xmit = sonic_send_packet,
+ .ndo_get_stats = sonic_get_stats,
+ .ndo_set_rx_mode = sonic_multicast_list,
+ .ndo_tx_timeout = sonic_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int __devinit sonic_probe1(struct net_device *dev)
+{
+ static unsigned version_printed;
+ unsigned int silicon_revision;
+ unsigned int val;
+ struct sonic_local *lp = netdev_priv(dev);
+ int err = -ENODEV;
+ int i;
+
+ if (!request_mem_region(dev->base_addr, SONIC_MEM_SIZE, jazz_sonic_string))
+ return -EBUSY;
+
+ /*
+ * get the Silicon Revision ID. If this is one of the known
+ * one assume that we found a SONIC ethernet controller at
+ * the expected location.
+ */
+ silicon_revision = SONIC_READ(SONIC_SR);
+ if (sonic_debug > 1)
+ printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision);
+
+ i = 0;
+ while (known_revisions[i] != 0xffff &&
+ known_revisions[i] != silicon_revision)
+ i++;
+
+ if (known_revisions[i] == 0xffff) {
+ printk("SONIC ethernet controller not found (0x%4x)\n",
+ silicon_revision);
+ goto out;
+ }
+
+ if (sonic_debug && version_printed++ == 0)
+ printk(version);
+
+ printk(KERN_INFO "%s: Sonic ethernet found at 0x%08lx, ",
+ dev_name(lp->device), dev->base_addr);
+
+ /*
+ * Put the sonic into software reset, then
+ * retrieve and print the ethernet address.
+ */
+ SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
+ SONIC_WRITE(SONIC_CEP,0);
+ for (i=0; i<3; i++) {
+ val = SONIC_READ(SONIC_CAP0-i);
+ dev->dev_addr[i*2] = val;
+ dev->dev_addr[i*2+1] = val >> 8;
+ }
+
+ err = -ENOMEM;
+
+ /* Initialize the device structure. */
+
+ lp->dma_bitmode = SONIC_BITMODE32;
+
+ /* Allocate the entire chunk of memory for the descriptors.
+ Note that this cannot cross a 64K boundary. */
+ if ((lp->descriptors = dma_alloc_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ &lp->descriptors_laddr, GFP_KERNEL)) == NULL) {
+ printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n",
+ dev_name(lp->device));
+ goto out;
+ }
+
+ /* Now set up the pointers to point to the appropriate places */
+ lp->cda = lp->descriptors;
+ lp->tda = lp->cda + (SIZEOF_SONIC_CDA
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+
+ lp->cda_laddr = lp->descriptors_laddr;
+ lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+
+ dev->netdev_ops = &sonic_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ /*
+ * clear tally counter
+ */
+ SONIC_WRITE(SONIC_CRCT,0xffff);
+ SONIC_WRITE(SONIC_FAET,0xffff);
+ SONIC_WRITE(SONIC_MPT,0xffff);
+
+ return 0;
+out:
+ release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
+ return err;
+}
+
+/*
+ * Probe for a SONIC ethernet controller on a Mips Jazz board.
+ * Actually probing is superfluous but we're paranoid.
+ */
+static int __devinit jazz_sonic_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct sonic_local *lp;
+ struct resource *res;
+ int err = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ dev = alloc_etherdev(sizeof(struct sonic_local));
+ if (!dev)
+ return -ENOMEM;
+
+ lp = netdev_priv(dev);
+ lp->device = &pdev->dev;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ platform_set_drvdata(pdev, dev);
+
+ netdev_boot_setup_check(dev);
+
+ dev->base_addr = res->start;
+ dev->irq = platform_get_irq(pdev, 0);
+ err = sonic_probe1(dev);
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+
+ printk("%s: MAC %pM IRQ %d\n", dev->name, dev->dev_addr, dev->irq);
+
+ return 0;
+
+out1:
+ release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
+out:
+ free_netdev(dev);
+
+ return err;
+}
+
+MODULE_DESCRIPTION("Jazz SONIC ethernet driver");
+module_param(sonic_debug, int, 0);
+MODULE_PARM_DESC(sonic_debug, "jazzsonic debug level (1-4)");
+MODULE_ALIAS("platform:jazzsonic");
+
+#include "sonic.c"
+
+static int __devexit jazz_sonic_device_remove (struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct sonic_local* lp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
+ release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
+ free_netdev(dev);
+
+ return 0;
+}
+
+static struct platform_driver jazz_sonic_driver = {
+ .probe = jazz_sonic_probe,
+ .remove = __devexit_p(jazz_sonic_device_remove),
+ .driver = {
+ .name = jazz_sonic_string,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init jazz_sonic_init_module(void)
+{
+ return platform_driver_register(&jazz_sonic_driver);
+}
+
+static void __exit jazz_sonic_cleanup_module(void)
+{
+ platform_driver_unregister(&jazz_sonic_driver);
+}
+
+module_init(jazz_sonic_init_module);
+module_exit(jazz_sonic_cleanup_module);
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
new file mode 100644
index 000000000000..a2eacbfb4252
--- /dev/null
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -0,0 +1,657 @@
+/*
+ * macsonic.c
+ *
+ * (C) 2005 Finn Thain
+ *
+ * Converted to DMA API, converted to unified driver model, made it work as
+ * a module again, and from the mac68k project, introduced more 32-bit cards
+ * and dhd's support for 16-bit cards.
+ *
+ * (C) 1998 Alan Cox
+ *
+ * Debugging Andreas Ehliar, Michael Schmitz
+ *
+ * Based on code
+ * (C) 1996 by Thomas Bogendoerfer (tsbogend@bigbug.franken.de)
+ *
+ * This driver is based on work from Andreas Busse, but most of
+ * the code is rewritten.
+ *
+ * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
+ *
+ * A driver for the Mac onboard Sonic ethernet chip.
+ *
+ * 98/12/21 MSch: judged from tests on Q800, it's basically working,
+ * but eating up both receive and transmit resources
+ * and duplicating packets. Needs more testing.
+ *
+ * 99/01/03 MSch: upgraded to version 0.92 of the core driver, fixed.
+ *
+ * 00/10/31 sammy@oh.verio.com: Updated driver for 2.4 kernels, fixed problems
+ * on centris.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/nubus.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitrev.h>
+#include <linux/slab.h>
+
+#include <asm/bootinfo.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/hwtest.h>
+#include <asm/dma.h>
+#include <asm/macintosh.h>
+#include <asm/macints.h>
+#include <asm/mac_via.h>
+
+static char mac_sonic_string[] = "macsonic";
+
+#include "sonic.h"
+
+/* These should basically be bus-size and endian independent (since
+ the SONIC is at least smart enough that it uses the same endianness
+ as the host, unlike certain less enlightened Macintosh NICs) */
+#define SONIC_READ(reg) (nubus_readw(dev->base_addr + (reg * 4) \
+ + lp->reg_offset))
+#define SONIC_WRITE(reg,val) (nubus_writew(val, dev->base_addr + (reg * 4) \
+ + lp->reg_offset))
+
+/* use 0 for production, 1 for verification, >1 for debug */
+#ifdef SONIC_DEBUG
+static unsigned int sonic_debug = SONIC_DEBUG;
+#else
+static unsigned int sonic_debug = 1;
+#endif
+
+static int sonic_version_printed;
+
+/* For onboard SONIC */
+#define ONBOARD_SONIC_REGISTERS 0x50F0A000
+#define ONBOARD_SONIC_PROM_BASE 0x50f08000
+
+enum macsonic_type {
+ MACSONIC_DUODOCK,
+ MACSONIC_APPLE,
+ MACSONIC_APPLE16,
+ MACSONIC_DAYNA,
+ MACSONIC_DAYNALINK
+};
+
+/* For the built-in SONIC in the Duo Dock */
+#define DUODOCK_SONIC_REGISTERS 0xe10000
+#define DUODOCK_SONIC_PROM_BASE 0xe12000
+
+/* For Apple-style NuBus SONIC */
+#define APPLE_SONIC_REGISTERS 0
+#define APPLE_SONIC_PROM_BASE 0x40000
+
+/* Daynalink LC SONIC */
+#define DAYNALINK_PROM_BASE 0x400000
+
+/* For Dayna-style NuBus SONIC (haven't seen one yet) */
+#define DAYNA_SONIC_REGISTERS 0x180000
+/* This is what OpenBSD says. However, this is definitely in NuBus
+ ROM space so we should be able to get it by walking the NuBus
+ resource directories */
+#define DAYNA_SONIC_MAC_ADDR 0xffe004
+
+#define SONIC_READ_PROM(addr) nubus_readb(prom_addr+addr)
+
+/*
+ * For reversing the PROM address
+ */
+
+static inline void bit_reverse_addr(unsigned char addr[6])
+{
+ int i;
+
+ for(i = 0; i < 6; i++)
+ addr[i] = bitrev8(addr[i]);
+}
+
+static irqreturn_t macsonic_interrupt(int irq, void *dev_id)
+{
+ irqreturn_t result;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ result = sonic_interrupt(irq, dev_id);
+ local_irq_restore(flags);
+ return result;
+}
+
+static int macsonic_open(struct net_device* dev)
+{
+ int retval;
+
+ retval = request_irq(dev->irq, sonic_interrupt, IRQ_FLG_FAST,
+ "sonic", dev);
+ if (retval) {
+ printk(KERN_ERR "%s: unable to get IRQ %d.\n",
+ dev->name, dev->irq);
+ goto err;
+ }
+ /* Under the A/UX interrupt scheme, the onboard SONIC interrupt comes
+ * in at priority level 3. However, we sometimes get the level 2 inter-
+ * rupt as well, which must prevent re-entrance of the sonic handler.
+ */
+ if (dev->irq == IRQ_AUTO_3) {
+ retval = request_irq(IRQ_NUBUS_9, macsonic_interrupt,
+ IRQ_FLG_FAST, "sonic", dev);
+ if (retval) {
+ printk(KERN_ERR "%s: unable to get IRQ %d.\n",
+ dev->name, IRQ_NUBUS_9);
+ goto err_irq;
+ }
+ }
+ retval = sonic_open(dev);
+ if (retval)
+ goto err_irq_nubus;
+ return 0;
+
+err_irq_nubus:
+ if (dev->irq == IRQ_AUTO_3)
+ free_irq(IRQ_NUBUS_9, dev);
+err_irq:
+ free_irq(dev->irq, dev);
+err:
+ return retval;
+}
+
+static int macsonic_close(struct net_device* dev)
+{
+ int err;
+ err = sonic_close(dev);
+ free_irq(dev->irq, dev);
+ if (dev->irq == IRQ_AUTO_3)
+ free_irq(IRQ_NUBUS_9, dev);
+ return err;
+}
+
+static const struct net_device_ops macsonic_netdev_ops = {
+ .ndo_open = macsonic_open,
+ .ndo_stop = macsonic_close,
+ .ndo_start_xmit = sonic_send_packet,
+ .ndo_set_rx_mode = sonic_multicast_list,
+ .ndo_tx_timeout = sonic_tx_timeout,
+ .ndo_get_stats = sonic_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int __devinit macsonic_init(struct net_device *dev)
+{
+ struct sonic_local* lp = netdev_priv(dev);
+
+ /* Allocate the entire chunk of memory for the descriptors.
+ Note that this cannot cross a 64K boundary. */
+ if ((lp->descriptors = dma_alloc_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ &lp->descriptors_laddr, GFP_KERNEL)) == NULL) {
+ printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n",
+ dev_name(lp->device));
+ return -ENOMEM;
+ }
+
+ /* Now set up the pointers to point to the appropriate places */
+ lp->cda = lp->descriptors;
+ lp->tda = lp->cda + (SIZEOF_SONIC_CDA
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+
+ lp->cda_laddr = lp->descriptors_laddr;
+ lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+
+ dev->netdev_ops = &macsonic_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ /*
+ * clear tally counter
+ */
+ SONIC_WRITE(SONIC_CRCT, 0xffff);
+ SONIC_WRITE(SONIC_FAET, 0xffff);
+ SONIC_WRITE(SONIC_MPT, 0xffff);
+
+ return 0;
+}
+
+#define INVALID_MAC(mac) (memcmp(mac, "\x08\x00\x07", 3) && \
+ memcmp(mac, "\x00\xA0\x40", 3) && \
+ memcmp(mac, "\x00\x80\x19", 3) && \
+ memcmp(mac, "\x00\x05\x02", 3))
+
+static void __devinit mac_onboard_sonic_ethernet_addr(struct net_device *dev)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+ const int prom_addr = ONBOARD_SONIC_PROM_BASE;
+ unsigned short val;
+
+ /*
+ * On NuBus boards we can sometimes look in the ROM resources.
+ * No such luck for comm-slot/onboard.
+ * On the PowerBook 520, the PROM base address is a mystery.
+ */
+ if (hwreg_present((void *)prom_addr)) {
+ int i;
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = SONIC_READ_PROM(i);
+ if (!INVALID_MAC(dev->dev_addr))
+ return;
+
+ /*
+ * Most of the time, the address is bit-reversed. The NetBSD
+ * source has a rather long and detailed historical account of
+ * why this is so.
+ */
+ bit_reverse_addr(dev->dev_addr);
+ if (!INVALID_MAC(dev->dev_addr))
+ return;
+
+ /*
+ * If we still have what seems to be a bogus address, we'll
+ * look in the CAM. The top entry should be ours.
+ */
+ printk(KERN_WARNING "macsonic: MAC address in PROM seems "
+ "to be invalid, trying CAM\n");
+ } else {
+ printk(KERN_WARNING "macsonic: cannot read MAC address from "
+ "PROM, trying CAM\n");
+ }
+
+ /* This only works if MacOS has already initialized the card. */
+
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
+ SONIC_WRITE(SONIC_CEP, 15);
+
+ val = SONIC_READ(SONIC_CAP2);
+ dev->dev_addr[5] = val >> 8;
+ dev->dev_addr[4] = val & 0xff;
+ val = SONIC_READ(SONIC_CAP1);
+ dev->dev_addr[3] = val >> 8;
+ dev->dev_addr[2] = val & 0xff;
+ val = SONIC_READ(SONIC_CAP0);
+ dev->dev_addr[1] = val >> 8;
+ dev->dev_addr[0] = val & 0xff;
+
+ if (!INVALID_MAC(dev->dev_addr))
+ return;
+
+ /* Still nonsense ... messed up someplace! */
+
+ printk(KERN_WARNING "macsonic: MAC address in CAM entry 15 "
+ "seems invalid, will use a random MAC\n");
+ random_ether_addr(dev->dev_addr);
+}
+
+static int __devinit mac_onboard_sonic_probe(struct net_device *dev)
+{
+ struct sonic_local* lp = netdev_priv(dev);
+ int sr;
+ int commslot = 0;
+
+ if (!MACH_IS_MAC)
+ return -ENODEV;
+
+ printk(KERN_INFO "Checking for internal Macintosh ethernet (SONIC).. ");
+
+ /* Bogus probing, on the models which may or may not have
+ Ethernet (BTW, the Ethernet *is* always at the same
+ address, and nothing else lives there, at least if Apple's
+ documentation is to be believed) */
+ if (macintosh_config->ident == MAC_MODEL_Q630 ||
+ macintosh_config->ident == MAC_MODEL_P588 ||
+ macintosh_config->ident == MAC_MODEL_P575 ||
+ macintosh_config->ident == MAC_MODEL_C610) {
+ unsigned long flags;
+ int card_present;
+
+ local_irq_save(flags);
+ card_present = hwreg_present((void*)ONBOARD_SONIC_REGISTERS);
+ local_irq_restore(flags);
+
+ if (!card_present) {
+ printk("none.\n");
+ return -ENODEV;
+ }
+ commslot = 1;
+ }
+
+ printk("yes\n");
+
+ /* Danger! My arms are flailing wildly! You *must* set lp->reg_offset
+ * and dev->base_addr before using SONIC_READ() or SONIC_WRITE() */
+ dev->base_addr = ONBOARD_SONIC_REGISTERS;
+ if (via_alt_mapping)
+ dev->irq = IRQ_AUTO_3;
+ else
+ dev->irq = IRQ_NUBUS_9;
+
+ if (!sonic_version_printed) {
+ printk(KERN_INFO "%s", version);
+ sonic_version_printed = 1;
+ }
+ printk(KERN_INFO "%s: onboard / comm-slot SONIC at 0x%08lx\n",
+ dev_name(lp->device), dev->base_addr);
+
+ /* The PowerBook's SONIC is 16 bit always. */
+ if (macintosh_config->ident == MAC_MODEL_PB520) {
+ lp->reg_offset = 0;
+ lp->dma_bitmode = SONIC_BITMODE16;
+ sr = SONIC_READ(SONIC_SR);
+ } else if (commslot) {
+ /* Some of the comm-slot cards are 16 bit. But some
+ of them are not. The 32-bit cards use offset 2 and
+ have known revisions, we try reading the revision
+ register at offset 2, if we don't get a known revision
+ we assume 16 bit at offset 0. */
+ lp->reg_offset = 2;
+ lp->dma_bitmode = SONIC_BITMODE16;
+
+ sr = SONIC_READ(SONIC_SR);
+ if (sr == 0x0004 || sr == 0x0006 || sr == 0x0100 || sr == 0x0101)
+ /* 83932 is 0x0004 or 0x0006, 83934 is 0x0100 or 0x0101 */
+ lp->dma_bitmode = SONIC_BITMODE32;
+ else {
+ lp->dma_bitmode = SONIC_BITMODE16;
+ lp->reg_offset = 0;
+ sr = SONIC_READ(SONIC_SR);
+ }
+ } else {
+ /* All onboard cards are at offset 2 with 32 bit DMA. */
+ lp->reg_offset = 2;
+ lp->dma_bitmode = SONIC_BITMODE32;
+ sr = SONIC_READ(SONIC_SR);
+ }
+ printk(KERN_INFO
+ "%s: revision 0x%04x, using %d bit DMA and register offset %d\n",
+ dev_name(lp->device), sr, lp->dma_bitmode?32:16, lp->reg_offset);
+
+#if 0 /* This is sometimes useful to find out how MacOS configured the card. */
+ printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", dev_name(lp->device),
+ SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff);
+#endif
+
+ /* Software reset, then initialize control registers. */
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
+
+ SONIC_WRITE(SONIC_DCR, SONIC_DCR_EXBUS | SONIC_DCR_BMS |
+ SONIC_DCR_RFT1 | SONIC_DCR_TFT0 |
+ (lp->dma_bitmode ? SONIC_DCR_DW : 0));
+
+ /* This *must* be written back to in order to restore the
+ * extended programmable output bits, as it may not have been
+ * initialised since the hardware reset. */
+ SONIC_WRITE(SONIC_DCR2, 0);
+
+ /* Clear *and* disable interrupts to be on the safe side */
+ SONIC_WRITE(SONIC_IMR, 0);
+ SONIC_WRITE(SONIC_ISR, 0x7fff);
+
+ /* Now look for the MAC address. */
+ mac_onboard_sonic_ethernet_addr(dev);
+
+ /* Shared init code */
+ return macsonic_init(dev);
+}
+
+static int __devinit mac_nubus_sonic_ethernet_addr(struct net_device *dev,
+ unsigned long prom_addr,
+ int id)
+{
+ int i;
+ for(i = 0; i < 6; i++)
+ dev->dev_addr[i] = SONIC_READ_PROM(i);
+
+ /* Some of the addresses are bit-reversed */
+ if (id != MACSONIC_DAYNA)
+ bit_reverse_addr(dev->dev_addr);
+
+ return 0;
+}
+
+static int __devinit macsonic_ident(struct nubus_dev *ndev)
+{
+ if (ndev->dr_hw == NUBUS_DRHW_ASANTE_LC &&
+ ndev->dr_sw == NUBUS_DRSW_SONIC_LC)
+ return MACSONIC_DAYNALINK;
+ if (ndev->dr_hw == NUBUS_DRHW_SONIC &&
+ ndev->dr_sw == NUBUS_DRSW_APPLE) {
+ /* There has to be a better way to do this... */
+ if (strstr(ndev->board->name, "DuoDock"))
+ return MACSONIC_DUODOCK;
+ else
+ return MACSONIC_APPLE;
+ }
+
+ if (ndev->dr_hw == NUBUS_DRHW_SMC9194 &&
+ ndev->dr_sw == NUBUS_DRSW_DAYNA)
+ return MACSONIC_DAYNA;
+
+ if (ndev->dr_hw == NUBUS_DRHW_APPLE_SONIC_LC &&
+ ndev->dr_sw == 0) { /* huh? */
+ return MACSONIC_APPLE16;
+ }
+ return -1;
+}
+
+static int __devinit mac_nubus_sonic_probe(struct net_device *dev)
+{
+ static int slots;
+ struct nubus_dev* ndev = NULL;
+ struct sonic_local* lp = netdev_priv(dev);
+ unsigned long base_addr, prom_addr;
+ u16 sonic_dcr;
+ int id = -1;
+ int reg_offset, dma_bitmode;
+
+ /* Find the first SONIC that hasn't been initialized already */
+ while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK,
+ NUBUS_TYPE_ETHERNET, ndev)) != NULL)
+ {
+ /* Have we seen it already? */
+ if (slots & (1<<ndev->board->slot))
+ continue;
+ slots |= 1<<ndev->board->slot;
+
+ /* Is it one of ours? */
+ if ((id = macsonic_ident(ndev)) != -1)
+ break;
+ }
+
+ if (ndev == NULL)
+ return -ENODEV;
+
+ switch (id) {
+ case MACSONIC_DUODOCK:
+ base_addr = ndev->board->slot_addr + DUODOCK_SONIC_REGISTERS;
+ prom_addr = ndev->board->slot_addr + DUODOCK_SONIC_PROM_BASE;
+ sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT0 | SONIC_DCR_RFT1 |
+ SONIC_DCR_TFT0;
+ reg_offset = 2;
+ dma_bitmode = SONIC_BITMODE32;
+ break;
+ case MACSONIC_APPLE:
+ base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS;
+ prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE;
+ sonic_dcr = SONIC_DCR_BMS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0;
+ reg_offset = 0;
+ dma_bitmode = SONIC_BITMODE32;
+ break;
+ case MACSONIC_APPLE16:
+ base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS;
+ prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE;
+ sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 |
+ SONIC_DCR_PO1 | SONIC_DCR_BMS;
+ reg_offset = 0;
+ dma_bitmode = SONIC_BITMODE16;
+ break;
+ case MACSONIC_DAYNALINK:
+ base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS;
+ prom_addr = ndev->board->slot_addr + DAYNALINK_PROM_BASE;
+ sonic_dcr = SONIC_DCR_RFT1 | SONIC_DCR_TFT0 |
+ SONIC_DCR_PO1 | SONIC_DCR_BMS;
+ reg_offset = 0;
+ dma_bitmode = SONIC_BITMODE16;
+ break;
+ case MACSONIC_DAYNA:
+ base_addr = ndev->board->slot_addr + DAYNA_SONIC_REGISTERS;
+ prom_addr = ndev->board->slot_addr + DAYNA_SONIC_MAC_ADDR;
+ sonic_dcr = SONIC_DCR_BMS |
+ SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1;
+ reg_offset = 0;
+ dma_bitmode = SONIC_BITMODE16;
+ break;
+ default:
+ printk(KERN_ERR "macsonic: WTF, id is %d\n", id);
+ return -ENODEV;
+ }
+
+ /* Danger! My arms are flailing wildly! You *must* set lp->reg_offset
+ * and dev->base_addr before using SONIC_READ() or SONIC_WRITE() */
+ dev->base_addr = base_addr;
+ lp->reg_offset = reg_offset;
+ lp->dma_bitmode = dma_bitmode;
+ dev->irq = SLOT2IRQ(ndev->board->slot);
+
+ if (!sonic_version_printed) {
+ printk(KERN_INFO "%s", version);
+ sonic_version_printed = 1;
+ }
+ printk(KERN_INFO "%s: %s in slot %X\n",
+ dev_name(lp->device), ndev->board->name, ndev->board->slot);
+ printk(KERN_INFO "%s: revision 0x%04x, using %d bit DMA and register offset %d\n",
+ dev_name(lp->device), SONIC_READ(SONIC_SR), dma_bitmode?32:16, reg_offset);
+
+#if 0 /* This is sometimes useful to find out how MacOS configured the card. */
+ printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", dev_name(lp->device),
+ SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff);
+#endif
+
+ /* Software reset, then initialize control registers. */
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
+ SONIC_WRITE(SONIC_DCR, sonic_dcr | (dma_bitmode ? SONIC_DCR_DW : 0));
+ /* This *must* be written back to in order to restore the
+ * extended programmable output bits, since it may not have been
+ * initialised since the hardware reset. */
+ SONIC_WRITE(SONIC_DCR2, 0);
+
+ /* Clear *and* disable interrupts to be on the safe side */
+ SONIC_WRITE(SONIC_IMR, 0);
+ SONIC_WRITE(SONIC_ISR, 0x7fff);
+
+ /* Now look for the MAC address. */
+ if (mac_nubus_sonic_ethernet_addr(dev, prom_addr, id) != 0)
+ return -ENODEV;
+
+ /* Shared init code */
+ return macsonic_init(dev);
+}
+
+static int __devinit mac_sonic_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct sonic_local *lp;
+ int err;
+
+ dev = alloc_etherdev(sizeof(struct sonic_local));
+ if (!dev)
+ return -ENOMEM;
+
+ lp = netdev_priv(dev);
+ lp->device = &pdev->dev;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ platform_set_drvdata(pdev, dev);
+
+ /* This will catch fatal stuff like -ENOMEM as well as success */
+ err = mac_onboard_sonic_probe(dev);
+ if (err == 0)
+ goto found;
+ if (err != -ENODEV)
+ goto out;
+ err = mac_nubus_sonic_probe(dev);
+ if (err)
+ goto out;
+found:
+ err = register_netdev(dev);
+ if (err)
+ goto out;
+
+ printk("%s: MAC %pM IRQ %d\n", dev->name, dev->dev_addr, dev->irq);
+
+ return 0;
+
+out:
+ free_netdev(dev);
+
+ return err;
+}
+
+MODULE_DESCRIPTION("Macintosh SONIC ethernet driver");
+module_param(sonic_debug, int, 0);
+MODULE_PARM_DESC(sonic_debug, "macsonic debug level (1-4)");
+MODULE_ALIAS("platform:macsonic");
+
+#include "sonic.c"
+
+static int __devexit mac_sonic_device_remove (struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct sonic_local* lp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
+ free_netdev(dev);
+
+ return 0;
+}
+
+static struct platform_driver mac_sonic_driver = {
+ .probe = mac_sonic_probe,
+ .remove = __devexit_p(mac_sonic_device_remove),
+ .driver = {
+ .name = mac_sonic_string,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mac_sonic_init_module(void)
+{
+ return platform_driver_register(&mac_sonic_driver);
+}
+
+static void __exit mac_sonic_cleanup_module(void)
+{
+ platform_driver_unregister(&mac_sonic_driver);
+}
+
+module_init(mac_sonic_init_module);
+module_exit(mac_sonic_cleanup_module);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
new file mode 100644
index 000000000000..6ca047aab793
--- /dev/null
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -0,0 +1,3370 @@
+/* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */
+/*
+ Written/copyright 1999-2001 by Donald Becker.
+ Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com)
+ Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com)
+ Portions copyright 2004 Harald Welte <laforge@gnumonks.org>
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL. License for under other terms may be
+ available. Contact the original author for details.
+
+ The original author may be reached as becker@scyld.com, or at
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/netsemi.html
+ [link no longer provides useful info -jgarzik]
+
+
+ TODO:
+ * big endian support with CFG:BEM instead of cpu_to_le32
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+#include <linux/rtnetlink.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+#include <linux/prefetch.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#define DRV_NAME "natsemi"
+#define DRV_VERSION "2.1"
+#define DRV_RELDATE "Sept 11, 2006"
+
+#define RX_OFFSET 2
+
+/* Updated to recommendations in pci-skeleton v2.03. */
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+#define NATSEMI_DEF_MSG (NETIF_MSG_DRV | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_WOL | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+static int debug = -1;
+
+static int mtu;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ This chip uses a 512 element hash table based on the Ethernet CRC. */
+static const int multicast_filter_limit = 100;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak;
+
+static int dspcfg_workaround = 1;
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability.
+ The media type is usually passed in 'options[]'.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS];
+static int full_duplex[MAX_UNITS];
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */
+#define RX_RING_SIZE 32
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2*HZ)
+
+#define NATSEMI_HW_TIMEOUT 400
+#define NATSEMI_TIMER_FREQ 5*HZ
+#define NATSEMI_PG0_NREGS 64
+#define NATSEMI_RFDR_NREGS 8
+#define NATSEMI_PG1_NREGS 4
+#define NATSEMI_NREGS (NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \
+ NATSEMI_PG1_NREGS)
+#define NATSEMI_REGS_VER 1 /* v1 added RFDR registers */
+#define NATSEMI_REGS_SIZE (NATSEMI_NREGS * sizeof(u32))
+
+/* Buffer sizes:
+ * The nic writes 32-bit values, even if the upper bytes of
+ * a 32-bit value are beyond the end of the buffer.
+ */
+#define NATSEMI_HEADERS 22 /* 2*mac,type,vlan,crc */
+#define NATSEMI_PADDING 16 /* 2 bytes should be sufficient */
+#define NATSEMI_LONGPKT 1518 /* limit for normal packets */
+#define NATSEMI_RX_LIMIT 2046 /* maximum supported by hardware */
+
+/* These identify the driver base version and may not be removed. */
+static const char version[] __devinitconst =
+ KERN_INFO DRV_NAME " dp8381x driver, version "
+ DRV_VERSION ", " DRV_RELDATE "\n"
+ " originally by Donald Becker <becker@scyld.com>\n"
+ " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(mtu, int, 0);
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param(dspcfg_workaround, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
+MODULE_PARM_DESC(debug, "DP8381x default debug level");
+MODULE_PARM_DESC(rx_copybreak,
+ "DP8381x copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(dspcfg_workaround, "DP8381x: control DspCfg workaround");
+MODULE_PARM_DESC(options,
+ "DP8381x: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
+It also works with other chips in in the DP83810 series.
+
+II. Board-specific settings
+
+This driver requires the PCI interrupt line to be valid.
+It honors the EEPROM-set values.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+The NatSemi design uses a 'next descriptor' pointer that the driver forms
+into a list.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack. Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets. When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine. Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that unaligned buffers are not permitted
+by the hardware. Thus the IP header at offset 14 in an ethernet frame isn't
+longword aligned for further processing. On copies frames are put into the
+skbuff at an offset of "+2", 16-byte aligning the IP header.
+
+IIId. Synchronization
+
+Most operations are synchronized on the np->lock irq spinlock, except the
+receive and transmit paths which are synchronised using a combination of
+hardware descriptor ownership, disabling interrupts and NAPI poll scheduling.
+
+IVb. References
+
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+Datasheet is available from:
+http://www.national.com/pf/DP/DP83815.html
+
+IVc. Errata
+
+None characterised.
+*/
+
+
+
+/*
+ * Support for fibre connections on Am79C874:
+ * This phy needs a special setup when connected to a fibre cable.
+ * http://www.amd.com/files/connectivitysolutions/networking/archivednetworking/22235.pdf
+ */
+#define PHYID_AM79C874 0x0022561b
+
+enum {
+ MII_MCTRL = 0x15, /* mode control register */
+ MII_FX_SEL = 0x0001, /* 100BASE-FX (fiber) */
+ MII_EN_SCRM = 0x0004, /* enable scrambler (tp) */
+};
+
+enum {
+ NATSEMI_FLAG_IGNORE_PHY = 0x1,
+};
+
+/* array of board data directly indexed by pci_tbl[x].driver_data */
+static struct {
+ const char *name;
+ unsigned long flags;
+ unsigned int eeprom_size;
+} natsemi_pci_info[] __devinitdata = {
+ { "Aculab E1/T1 PMXc cPCI carrier card", NATSEMI_FLAG_IGNORE_PHY, 128 },
+ { "NatSemi DP8381[56]", 0, 24 },
+};
+
+static DEFINE_PCI_DEVICE_TABLE(natsemi_pci_tbl) = {
+ { PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 },
+ { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { } /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
+
+/* Offsets to the device registers.
+ Unlike software-only systems, device drivers interact with complex hardware.
+ It's not useful to define symbolic names for every register bit in the
+ device.
+*/
+enum register_offsets {
+ ChipCmd = 0x00,
+ ChipConfig = 0x04,
+ EECtrl = 0x08,
+ PCIBusCfg = 0x0C,
+ IntrStatus = 0x10,
+ IntrMask = 0x14,
+ IntrEnable = 0x18,
+ IntrHoldoff = 0x1C, /* DP83816 only */
+ TxRingPtr = 0x20,
+ TxConfig = 0x24,
+ RxRingPtr = 0x30,
+ RxConfig = 0x34,
+ ClkRun = 0x3C,
+ WOLCmd = 0x40,
+ PauseCmd = 0x44,
+ RxFilterAddr = 0x48,
+ RxFilterData = 0x4C,
+ BootRomAddr = 0x50,
+ BootRomData = 0x54,
+ SiliconRev = 0x58,
+ StatsCtrl = 0x5C,
+ StatsData = 0x60,
+ RxPktErrs = 0x60,
+ RxMissed = 0x68,
+ RxCRCErrs = 0x64,
+ BasicControl = 0x80,
+ BasicStatus = 0x84,
+ AnegAdv = 0x90,
+ AnegPeer = 0x94,
+ PhyStatus = 0xC0,
+ MIntrCtrl = 0xC4,
+ MIntrStatus = 0xC8,
+ PhyCtrl = 0xE4,
+
+ /* These are from the spec, around page 78... on a separate table.
+ * The meaning of these registers depend on the value of PGSEL. */
+ PGSEL = 0xCC,
+ PMDCSR = 0xE4,
+ TSTDAT = 0xFC,
+ DSPCFG = 0xF4,
+ SDCFG = 0xF8
+};
+/* the values for the 'magic' registers above (PGSEL=1) */
+#define PMDCSR_VAL 0x189c /* enable preferred adaptation circuitry */
+#define TSTDAT_VAL 0x0
+#define DSPCFG_VAL 0x5040
+#define SDCFG_VAL 0x008c /* set voltage thresholds for Signal Detect */
+#define DSPCFG_LOCK 0x20 /* coefficient lock bit in DSPCFG */
+#define DSPCFG_COEF 0x1000 /* see coefficient (in TSTDAT) bit in DSPCFG */
+#define TSTDAT_FIXED 0xe8 /* magic number for bad coefficients */
+
+/* misc PCI space registers */
+enum pci_register_offsets {
+ PCIPM = 0x44,
+};
+
+enum ChipCmd_bits {
+ ChipReset = 0x100,
+ RxReset = 0x20,
+ TxReset = 0x10,
+ RxOff = 0x08,
+ RxOn = 0x04,
+ TxOff = 0x02,
+ TxOn = 0x01,
+};
+
+enum ChipConfig_bits {
+ CfgPhyDis = 0x200,
+ CfgPhyRst = 0x400,
+ CfgExtPhy = 0x1000,
+ CfgAnegEnable = 0x2000,
+ CfgAneg100 = 0x4000,
+ CfgAnegFull = 0x8000,
+ CfgAnegDone = 0x8000000,
+ CfgFullDuplex = 0x20000000,
+ CfgSpeed100 = 0x40000000,
+ CfgLink = 0x80000000,
+};
+
+enum EECtrl_bits {
+ EE_ShiftClk = 0x04,
+ EE_DataIn = 0x01,
+ EE_ChipSelect = 0x08,
+ EE_DataOut = 0x02,
+ MII_Data = 0x10,
+ MII_Write = 0x20,
+ MII_ShiftClk = 0x40,
+};
+
+enum PCIBusCfg_bits {
+ EepromReload = 0x4,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum IntrStatus_bits {
+ IntrRxDone = 0x0001,
+ IntrRxIntr = 0x0002,
+ IntrRxErr = 0x0004,
+ IntrRxEarly = 0x0008,
+ IntrRxIdle = 0x0010,
+ IntrRxOverrun = 0x0020,
+ IntrTxDone = 0x0040,
+ IntrTxIntr = 0x0080,
+ IntrTxErr = 0x0100,
+ IntrTxIdle = 0x0200,
+ IntrTxUnderrun = 0x0400,
+ StatsMax = 0x0800,
+ SWInt = 0x1000,
+ WOLPkt = 0x2000,
+ LinkChange = 0x4000,
+ IntrHighBits = 0x8000,
+ RxStatusFIFOOver = 0x10000,
+ IntrPCIErr = 0xf00000,
+ RxResetDone = 0x1000000,
+ TxResetDone = 0x2000000,
+ IntrAbnormalSummary = 0xCD20,
+};
+
+/*
+ * Default Interrupts:
+ * Rx OK, Rx Packet Error, Rx Overrun,
+ * Tx OK, Tx Packet Error, Tx Underrun,
+ * MIB Service, Phy Interrupt, High Bits,
+ * Rx Status FIFO overrun,
+ * Received Target Abort, Received Master Abort,
+ * Signalled System Error, Received Parity Error
+ */
+#define DEFAULT_INTR 0x00f1cd65
+
+enum TxConfig_bits {
+ TxDrthMask = 0x3f,
+ TxFlthMask = 0x3f00,
+ TxMxdmaMask = 0x700000,
+ TxMxdma_512 = 0x0,
+ TxMxdma_4 = 0x100000,
+ TxMxdma_8 = 0x200000,
+ TxMxdma_16 = 0x300000,
+ TxMxdma_32 = 0x400000,
+ TxMxdma_64 = 0x500000,
+ TxMxdma_128 = 0x600000,
+ TxMxdma_256 = 0x700000,
+ TxCollRetry = 0x800000,
+ TxAutoPad = 0x10000000,
+ TxMacLoop = 0x20000000,
+ TxHeartIgn = 0x40000000,
+ TxCarrierIgn = 0x80000000
+};
+
+/*
+ * Tx Configuration:
+ * - 256 byte DMA burst length
+ * - fill threshold 512 bytes (i.e. restart DMA when 512 bytes are free)
+ * - 64 bytes initial drain threshold (i.e. begin actual transmission
+ * when 64 byte are in the fifo)
+ * - on tx underruns, increase drain threshold by 64.
+ * - at most use a drain threshold of 1472 bytes: The sum of the fill
+ * threshold and the drain threshold must be less than 2016 bytes.
+ *
+ */
+#define TX_FLTH_VAL ((512/32) << 8)
+#define TX_DRTH_VAL_START (64/32)
+#define TX_DRTH_VAL_INC 2
+#define TX_DRTH_VAL_LIMIT (1472/32)
+
+enum RxConfig_bits {
+ RxDrthMask = 0x3e,
+ RxMxdmaMask = 0x700000,
+ RxMxdma_512 = 0x0,
+ RxMxdma_4 = 0x100000,
+ RxMxdma_8 = 0x200000,
+ RxMxdma_16 = 0x300000,
+ RxMxdma_32 = 0x400000,
+ RxMxdma_64 = 0x500000,
+ RxMxdma_128 = 0x600000,
+ RxMxdma_256 = 0x700000,
+ RxAcceptLong = 0x8000000,
+ RxAcceptTx = 0x10000000,
+ RxAcceptRunt = 0x40000000,
+ RxAcceptErr = 0x80000000
+};
+#define RX_DRTH_VAL (128/8)
+
+enum ClkRun_bits {
+ PMEEnable = 0x100,
+ PMEStatus = 0x8000,
+};
+
+enum WolCmd_bits {
+ WakePhy = 0x1,
+ WakeUnicast = 0x2,
+ WakeMulticast = 0x4,
+ WakeBroadcast = 0x8,
+ WakeArp = 0x10,
+ WakePMatch0 = 0x20,
+ WakePMatch1 = 0x40,
+ WakePMatch2 = 0x80,
+ WakePMatch3 = 0x100,
+ WakeMagic = 0x200,
+ WakeMagicSecure = 0x400,
+ SecureHack = 0x100000,
+ WokePhy = 0x400000,
+ WokeUnicast = 0x800000,
+ WokeMulticast = 0x1000000,
+ WokeBroadcast = 0x2000000,
+ WokeArp = 0x4000000,
+ WokePMatch0 = 0x8000000,
+ WokePMatch1 = 0x10000000,
+ WokePMatch2 = 0x20000000,
+ WokePMatch3 = 0x40000000,
+ WokeMagic = 0x80000000,
+ WakeOptsSummary = 0x7ff
+};
+
+enum RxFilterAddr_bits {
+ RFCRAddressMask = 0x3ff,
+ AcceptMulticast = 0x00200000,
+ AcceptMyPhys = 0x08000000,
+ AcceptAllPhys = 0x10000000,
+ AcceptAllMulticast = 0x20000000,
+ AcceptBroadcast = 0x40000000,
+ RxFilterEnable = 0x80000000
+};
+
+enum StatsCtrl_bits {
+ StatsWarn = 0x1,
+ StatsFreeze = 0x2,
+ StatsClear = 0x4,
+ StatsStrobe = 0x8,
+};
+
+enum MIntrCtrl_bits {
+ MICRIntEn = 0x2,
+};
+
+enum PhyCtrl_bits {
+ PhyAddrMask = 0x1f,
+};
+
+#define PHY_ADDR_NONE 32
+#define PHY_ADDR_INTERNAL 1
+
+/* values we might find in the silicon revision register */
+#define SRR_DP83815_C 0x0302
+#define SRR_DP83815_D 0x0403
+#define SRR_DP83816_A4 0x0504
+#define SRR_DP83816_A5 0x0505
+
+/* The Rx and Tx buffer descriptors. */
+/* Note that using only 32 bit fields simplifies conversion to big-endian
+ architectures. */
+struct netdev_desc {
+ __le32 next_desc;
+ __le32 cmd_status;
+ __le32 addr;
+ __le32 software_use;
+};
+
+/* Bits in network_desc.status */
+enum desc_status_bits {
+ DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
+ DescNoCRC=0x10000000, DescPktOK=0x08000000,
+ DescSizeMask=0xfff,
+
+ DescTxAbort=0x04000000, DescTxFIFO=0x02000000,
+ DescTxCarrier=0x01000000, DescTxDefer=0x00800000,
+ DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000,
+ DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000,
+
+ DescRxAbort=0x04000000, DescRxOver=0x02000000,
+ DescRxDest=0x01800000, DescRxLong=0x00400000,
+ DescRxRunt=0x00200000, DescRxInvalid=0x00100000,
+ DescRxCRC=0x00080000, DescRxAlign=0x00040000,
+ DescRxLoop=0x00020000, DesRxColl=0x00010000,
+};
+
+struct netdev_private {
+ /* Descriptor rings first for alignment */
+ dma_addr_t ring_dma;
+ struct netdev_desc *rx_ring;
+ struct netdev_desc *tx_ring;
+ /* The addresses of receive-in-place skbuffs */
+ struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ dma_addr_t rx_dma[RX_RING_SIZE];
+ /* address of a sent-in-place packet/buffer, for later free() */
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+ dma_addr_t tx_dma[TX_RING_SIZE];
+ struct net_device *dev;
+ struct napi_struct napi;
+ /* Media monitoring timer */
+ struct timer_list timer;
+ /* Frequently used values: keep some adjacent for cache effect */
+ struct pci_dev *pci_dev;
+ struct netdev_desc *rx_head_desc;
+ /* Producer/consumer ring indices */
+ unsigned int cur_rx, dirty_rx;
+ unsigned int cur_tx, dirty_tx;
+ /* Based on MTU+slack. */
+ unsigned int rx_buf_sz;
+ int oom;
+ /* Interrupt status */
+ u32 intr_status;
+ /* Do not touch the nic registers */
+ int hands_off;
+ /* Don't pay attention to the reported link state. */
+ int ignore_phy;
+ /* external phy that is used: only valid if dev->if_port != PORT_TP */
+ int mii;
+ int phy_addr_external;
+ unsigned int full_duplex;
+ /* Rx filter */
+ u32 cur_rx_mode;
+ u32 rx_filter[16];
+ /* FIFO and PCI burst thresholds */
+ u32 tx_config, rx_config;
+ /* original contents of ClkRun register */
+ u32 SavedClkRun;
+ /* silicon revision */
+ u32 srr;
+ /* expected DSPCFG value */
+ u16 dspcfg;
+ int dspcfg_workaround;
+ /* parms saved in ethtool format */
+ u16 speed; /* The forced speed, 10Mb, 100Mb, gigabit */
+ u8 duplex; /* Duplex, half or full */
+ u8 autoneg; /* Autonegotiation enabled */
+ /* MII transceiver section */
+ u16 advertising;
+ unsigned int iosize;
+ spinlock_t lock;
+ u32 msg_enable;
+ /* EEPROM data */
+ int eeprom_size;
+};
+
+static void move_int_phy(struct net_device *dev, int addr);
+static int eeprom_read(void __iomem *ioaddr, int location);
+static int mdio_read(struct net_device *dev, int reg);
+static void mdio_write(struct net_device *dev, int reg, u16 data);
+static void init_phy_fixup(struct net_device *dev);
+static int miiport_read(struct net_device *dev, int phy_id, int reg);
+static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data);
+static int find_mii(struct net_device *dev);
+static void natsemi_reset(struct net_device *dev);
+static void natsemi_reload_eeprom(struct net_device *dev);
+static void natsemi_stop_rxtx(struct net_device *dev);
+static int netdev_open(struct net_device *dev);
+static void do_cable_magic(struct net_device *dev);
+static void undo_cable_magic(struct net_device *dev);
+static void check_link(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void dump_ring(struct net_device *dev);
+static void ns_tx_timeout(struct net_device *dev);
+static int alloc_ring(struct net_device *dev);
+static void refill_rx(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static void drain_tx(struct net_device *dev);
+static void drain_ring(struct net_device *dev);
+static void free_ring(struct net_device *dev);
+static void reinit_ring(struct net_device *dev);
+static void init_registers(struct net_device *dev);
+static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t intr_handler(int irq, void *dev_instance);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int natsemi_poll(struct napi_struct *napi, int budget);
+static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do);
+static void netdev_tx_done(struct net_device *dev);
+static int natsemi_change_mtu(struct net_device *dev, int new_mtu);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void natsemi_poll_controller(struct net_device *dev);
+#endif
+static void __set_rx_mode(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static void __get_stats(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_set_wol(struct net_device *dev, u32 newval);
+static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur);
+static int netdev_set_sopass(struct net_device *dev, u8 *newval);
+static int netdev_get_sopass(struct net_device *dev, u8 *data);
+static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
+static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
+static void enable_wol_mode(struct net_device *dev, int enable_intr);
+static int netdev_close(struct net_device *dev);
+static int netdev_get_regs(struct net_device *dev, u8 *buf);
+static int netdev_get_eeprom(struct net_device *dev, u8 *buf);
+static const struct ethtool_ops ethtool_ops;
+
+#define NATSEMI_ATTR(_name) \
+static ssize_t natsemi_show_##_name(struct device *dev, \
+ struct device_attribute *attr, char *buf); \
+ static ssize_t natsemi_set_##_name(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count); \
+ static DEVICE_ATTR(_name, 0644, natsemi_show_##_name, natsemi_set_##_name)
+
+#define NATSEMI_CREATE_FILE(_dev, _name) \
+ device_create_file(&_dev->dev, &dev_attr_##_name)
+#define NATSEMI_REMOVE_FILE(_dev, _name) \
+ device_remove_file(&_dev->dev, &dev_attr_##_name)
+
+NATSEMI_ATTR(dspcfg_workaround);
+
+static ssize_t natsemi_show_dspcfg_workaround(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct netdev_private *np = netdev_priv(to_net_dev(dev));
+
+ return sprintf(buf, "%s\n", np->dspcfg_workaround ? "on" : "off");
+}
+
+static ssize_t natsemi_set_dspcfg_workaround(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct netdev_private *np = netdev_priv(to_net_dev(dev));
+ int new_setting;
+ unsigned long flags;
+
+ /* Find out the new setting */
+ if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
+ new_setting = 1;
+ else if (!strncmp("off", buf, count - 1) ||
+ !strncmp("0", buf, count - 1))
+ new_setting = 0;
+ else
+ return count;
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ np->dspcfg_workaround = new_setting;
+
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ return count;
+}
+
+static inline void __iomem *ns_ioaddr(struct net_device *dev)
+{
+ return (void __iomem *) dev->base_addr;
+}
+
+static inline void natsemi_irq_enable(struct net_device *dev)
+{
+ writel(1, ns_ioaddr(dev) + IntrEnable);
+ readl(ns_ioaddr(dev) + IntrEnable);
+}
+
+static inline void natsemi_irq_disable(struct net_device *dev)
+{
+ writel(0, ns_ioaddr(dev) + IntrEnable);
+ readl(ns_ioaddr(dev) + IntrEnable);
+}
+
+static void move_int_phy(struct net_device *dev, int addr)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = ns_ioaddr(dev);
+ int target = 31;
+
+ /*
+ * The internal phy is visible on the external mii bus. Therefore we must
+ * move it away before we can send commands to an external phy.
+ * There are two addresses we must avoid:
+ * - the address on the external phy that is used for transmission.
+ * - the address that we want to access. User space can access phys
+ * on the mii bus with SIOCGMIIREG/SIOCSMIIREG, independent from the
+ * phy that is used for transmission.
+ */
+
+ if (target == addr)
+ target--;
+ if (target == np->phy_addr_external)
+ target--;
+ writew(target, ioaddr + PhyCtrl);
+ readw(ioaddr + PhyCtrl);
+ udelay(1);
+}
+
+static void __devinit natsemi_init_media (struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ u32 tmp;
+
+ if (np->ignore_phy)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ /* get the initial settings from hardware */
+ tmp = mdio_read(dev, MII_BMCR);
+ np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10;
+ np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF;
+ np->autoneg = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE;
+ np->advertising= mdio_read(dev, MII_ADVERTISE);
+
+ if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL &&
+ netif_msg_probe(np)) {
+ printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s "
+ "10%s %s duplex.\n",
+ pci_name(np->pci_dev),
+ (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)?
+ "enabled, advertise" : "disabled, force",
+ (np->advertising &
+ (ADVERTISE_100FULL|ADVERTISE_100HALF))?
+ "0" : "",
+ (np->advertising &
+ (ADVERTISE_100FULL|ADVERTISE_10FULL))?
+ "full" : "half");
+ }
+ if (netif_msg_probe(np))
+ printk(KERN_INFO
+ "natsemi %s: Transceiver status %#04x advertising %#04x.\n",
+ pci_name(np->pci_dev), mdio_read(dev, MII_BMSR),
+ np->advertising);
+
+}
+
+static const struct net_device_ops natsemi_netdev_ops = {
+ .ndo_open = netdev_open,
+ .ndo_stop = netdev_close,
+ .ndo_start_xmit = start_tx,
+ .ndo_get_stats = get_stats,
+ .ndo_set_rx_mode = set_rx_mode,
+ .ndo_change_mtu = natsemi_change_mtu,
+ .ndo_do_ioctl = netdev_ioctl,
+ .ndo_tx_timeout = ns_tx_timeout,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = natsemi_poll_controller,
+#endif
+};
+
+static int __devinit natsemi_probe1 (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ int i, option, irq, chip_idx = ent->driver_data;
+ static int find_cnt = -1;
+ resource_size_t iostart;
+ unsigned long iosize;
+ void __iomem *ioaddr;
+ const int pcibar = 1; /* PCI base address register */
+ int prev_eedata;
+ u32 tmp;
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(version);
+#endif
+
+ i = pci_enable_device(pdev);
+ if (i) return i;
+
+ /* natsemi has a non-standard PM control register
+ * in PCI config space. Some boards apparently need
+ * to be brought to D0 in this manner.
+ */
+ pci_read_config_dword(pdev, PCIPM, &tmp);
+ if (tmp & PCI_PM_CTRL_STATE_MASK) {
+ /* D0 state, disable PME assertion */
+ u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK;
+ pci_write_config_dword(pdev, PCIPM, newtmp);
+ }
+
+ find_cnt++;
+ iostart = pci_resource_start(pdev, pcibar);
+ iosize = pci_resource_len(pdev, pcibar);
+ irq = pdev->irq;
+
+ pci_set_master(pdev);
+
+ dev = alloc_etherdev(sizeof (struct netdev_private));
+ if (!dev)
+ return -ENOMEM;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ i = pci_request_regions(pdev, DRV_NAME);
+ if (i)
+ goto err_pci_request_regions;
+
+ ioaddr = ioremap(iostart, iosize);
+ if (!ioaddr) {
+ i = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ /* Work around the dropped serial bit. */
+ prev_eedata = eeprom_read(ioaddr, 6);
+ for (i = 0; i < 3; i++) {
+ int eedata = eeprom_read(ioaddr, i + 7);
+ dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
+ dev->dev_addr[i*2+1] = eedata >> 7;
+ prev_eedata = eedata;
+ }
+
+ /* Store MAC Address in perm_addr */
+ memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
+
+ dev->base_addr = (unsigned long __force) ioaddr;
+ dev->irq = irq;
+
+ np = netdev_priv(dev);
+ netif_napi_add(dev, &np->napi, natsemi_poll, 64);
+ np->dev = dev;
+
+ np->pci_dev = pdev;
+ pci_set_drvdata(pdev, dev);
+ np->iosize = iosize;
+ spin_lock_init(&np->lock);
+ np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG;
+ np->hands_off = 0;
+ np->intr_status = 0;
+ np->eeprom_size = natsemi_pci_info[chip_idx].eeprom_size;
+ if (natsemi_pci_info[chip_idx].flags & NATSEMI_FLAG_IGNORE_PHY)
+ np->ignore_phy = 1;
+ else
+ np->ignore_phy = 0;
+ np->dspcfg_workaround = dspcfg_workaround;
+
+ /* Initial port:
+ * - If configured to ignore the PHY set up for external.
+ * - If the nic was configured to use an external phy and if find_mii
+ * finds a phy: use external port, first phy that replies.
+ * - Otherwise: internal port.
+ * Note that the phy address for the internal phy doesn't matter:
+ * The address would be used to access a phy over the mii bus, but
+ * the internal phy is accessed through mapped registers.
+ */
+ if (np->ignore_phy || readl(ioaddr + ChipConfig) & CfgExtPhy)
+ dev->if_port = PORT_MII;
+ else
+ dev->if_port = PORT_TP;
+ /* Reset the chip to erase previous misconfiguration. */
+ natsemi_reload_eeprom(dev);
+ natsemi_reset(dev);
+
+ if (dev->if_port != PORT_TP) {
+ np->phy_addr_external = find_mii(dev);
+ /* If we're ignoring the PHY it doesn't matter if we can't
+ * find one. */
+ if (!np->ignore_phy && np->phy_addr_external == PHY_ADDR_NONE) {
+ dev->if_port = PORT_TP;
+ np->phy_addr_external = PHY_ADDR_INTERNAL;
+ }
+ } else {
+ np->phy_addr_external = PHY_ADDR_INTERNAL;
+ }
+
+ option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option) {
+ if (option & 0x200)
+ np->full_duplex = 1;
+ if (option & 15)
+ printk(KERN_INFO
+ "natsemi %s: ignoring user supplied media type %d",
+ pci_name(np->pci_dev), option & 15);
+ }
+ if (find_cnt < MAX_UNITS && full_duplex[find_cnt])
+ np->full_duplex = 1;
+
+ dev->netdev_ops = &natsemi_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ SET_ETHTOOL_OPS(dev, &ethtool_ops);
+
+ if (mtu)
+ dev->mtu = mtu;
+
+ natsemi_init_media(dev);
+
+ /* save the silicon revision for later querying */
+ np->srr = readl(ioaddr + SiliconRev);
+ if (netif_msg_hw(np))
+ printk(KERN_INFO "natsemi %s: silicon revision %#04x.\n",
+ pci_name(np->pci_dev), np->srr);
+
+ i = register_netdev(dev);
+ if (i)
+ goto err_register_netdev;
+
+ if (NATSEMI_CREATE_FILE(pdev, dspcfg_workaround))
+ goto err_create_file;
+
+ if (netif_msg_drv(np)) {
+ printk(KERN_INFO "natsemi %s: %s at %#08llx "
+ "(%s), %pM, IRQ %d",
+ dev->name, natsemi_pci_info[chip_idx].name,
+ (unsigned long long)iostart, pci_name(np->pci_dev),
+ dev->dev_addr, irq);
+ if (dev->if_port == PORT_TP)
+ printk(", port TP.\n");
+ else if (np->ignore_phy)
+ printk(", port MII, ignoring PHY\n");
+ else
+ printk(", port MII, phy ad %d.\n", np->phy_addr_external);
+ }
+ return 0;
+
+ err_create_file:
+ unregister_netdev(dev);
+
+ err_register_netdev:
+ iounmap(ioaddr);
+
+ err_ioremap:
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ err_pci_request_regions:
+ free_netdev(dev);
+ return i;
+}
+
+
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
+ The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */
+
+/* Delay between EEPROM clock transitions.
+ No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
+ a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
+ made udelay() unreliable.
+ The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
+ deprecated.
+*/
+#define eeprom_delay(ee_addr) readl(ee_addr)
+
+#define EE_Write0 (EE_ChipSelect)
+#define EE_Write1 (EE_ChipSelect | EE_DataIn)
+
+/* The EEPROM commands include the alway-set leading bit. */
+enum EEPROM_Cmds {
+ EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
+};
+
+static int eeprom_read(void __iomem *addr, int location)
+{
+ int i;
+ int retval = 0;
+ void __iomem *ee_addr = addr + EECtrl;
+ int read_cmd = location | EE_ReadCmd;
+
+ writel(EE_Write0, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 10; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
+ writel(dataval, ee_addr);
+ eeprom_delay(ee_addr);
+ writel(dataval | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ }
+ writel(EE_ChipSelect, ee_addr);
+ eeprom_delay(ee_addr);
+
+ for (i = 0; i < 16; i++) {
+ writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0;
+ writel(EE_ChipSelect, ee_addr);
+ eeprom_delay(ee_addr);
+ }
+
+ /* Terminate the EEPROM access. */
+ writel(EE_Write0, ee_addr);
+ writel(0, ee_addr);
+ return retval;
+}
+
+/* MII transceiver control section.
+ * The 83815 series has an internal transceiver, and we present the
+ * internal management registers as if they were MII connected.
+ * External Phy registers are referenced through the MII interface.
+ */
+
+/* clock transitions >= 20ns (25MHz)
+ * One readl should be good to PCI @ 100MHz
+ */
+#define mii_delay(ioaddr) readl(ioaddr + EECtrl)
+
+static int mii_getbit (struct net_device *dev)
+{
+ int data;
+ void __iomem *ioaddr = ns_ioaddr(dev);
+
+ writel(MII_ShiftClk, ioaddr + EECtrl);
+ data = readl(ioaddr + EECtrl);
+ writel(0, ioaddr + EECtrl);
+ mii_delay(ioaddr);
+ return (data & MII_Data)? 1 : 0;
+}
+
+static void mii_send_bits (struct net_device *dev, u32 data, int len)
+{
+ u32 i;
+ void __iomem *ioaddr = ns_ioaddr(dev);
+
+ for (i = (1 << (len-1)); i; i >>= 1)
+ {
+ u32 mdio_val = MII_Write | ((data & i)? MII_Data : 0);
+ writel(mdio_val, ioaddr + EECtrl);
+ mii_delay(ioaddr);
+ writel(mdio_val | MII_ShiftClk, ioaddr + EECtrl);
+ mii_delay(ioaddr);
+ }
+ writel(0, ioaddr + EECtrl);
+ mii_delay(ioaddr);
+}
+
+static int miiport_read(struct net_device *dev, int phy_id, int reg)
+{
+ u32 cmd;
+ int i;
+ u32 retval = 0;
+
+ /* Ensure sync */
+ mii_send_bits (dev, 0xffffffff, 32);
+ /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
+ /* ST,OP = 0110'b for read operation */
+ cmd = (0x06 << 10) | (phy_id << 5) | reg;
+ mii_send_bits (dev, cmd, 14);
+ /* Turnaround */
+ if (mii_getbit (dev))
+ return 0;
+ /* Read data */
+ for (i = 0; i < 16; i++) {
+ retval <<= 1;
+ retval |= mii_getbit (dev);
+ }
+ /* End cycle */
+ mii_getbit (dev);
+ return retval;
+}
+
+static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data)
+{
+ u32 cmd;
+
+ /* Ensure sync */
+ mii_send_bits (dev, 0xffffffff, 32);
+ /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
+ /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
+ cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data;
+ mii_send_bits (dev, cmd, 32);
+ /* End cycle */
+ mii_getbit (dev);
+}
+
+static int mdio_read(struct net_device *dev, int reg)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = ns_ioaddr(dev);
+
+ /* The 83815 series has two ports:
+ * - an internal transceiver
+ * - an external mii bus
+ */
+ if (dev->if_port == PORT_TP)
+ return readw(ioaddr+BasicControl+(reg<<2));
+ else
+ return miiport_read(dev, np->phy_addr_external, reg);
+}
+
+static void mdio_write(struct net_device *dev, int reg, u16 data)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = ns_ioaddr(dev);
+
+ /* The 83815 series has an internal transceiver; handle separately */
+ if (dev->if_port == PORT_TP)
+ writew(data, ioaddr+BasicControl+(reg<<2));
+ else
+ miiport_write(dev, np->phy_addr_external, reg, data);
+}
+
+static void init_phy_fixup(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = ns_ioaddr(dev);
+ int i;
+ u32 cfg;
+ u16 tmp;
+
+ /* restore stuff lost when power was out */
+ tmp = mdio_read(dev, MII_BMCR);
+ if (np->autoneg == AUTONEG_ENABLE) {
+ /* renegotiate if something changed */
+ if ((tmp & BMCR_ANENABLE) == 0 ||
+ np->advertising != mdio_read(dev, MII_ADVERTISE))
+ {
+ /* turn on autonegotiation and force negotiation */
+ tmp |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mdio_write(dev, MII_ADVERTISE, np->advertising);
+ }
+ } else {
+ /* turn off auto negotiation, set speed and duplexity */
+ tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
+ if (np->speed == SPEED_100)
+ tmp |= BMCR_SPEED100;
+ if (np->duplex == DUPLEX_FULL)
+ tmp |= BMCR_FULLDPLX;
+ /*
+ * Note: there is no good way to inform the link partner
+ * that our capabilities changed. The user has to unplug
+ * and replug the network cable after some changes, e.g.
+ * after switching from 10HD, autoneg off to 100 HD,
+ * autoneg off.
+ */
+ }
+ mdio_write(dev, MII_BMCR, tmp);
+ readl(ioaddr + ChipConfig);
+ udelay(1);
+
+ /* find out what phy this is */
+ np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
+ + mdio_read(dev, MII_PHYSID2);
+
+ /* handle external phys here */
+ switch (np->mii) {
+ case PHYID_AM79C874:
+ /* phy specific configuration for fibre/tp operation */
+ tmp = mdio_read(dev, MII_MCTRL);
+ tmp &= ~(MII_FX_SEL | MII_EN_SCRM);
+ if (dev->if_port == PORT_FIBRE)
+ tmp |= MII_FX_SEL;
+ else
+ tmp |= MII_EN_SCRM;
+ mdio_write(dev, MII_MCTRL, tmp);
+ break;
+ default:
+ break;
+ }
+ cfg = readl(ioaddr + ChipConfig);
+ if (cfg & CfgExtPhy)
+ return;
+
+ /* On page 78 of the spec, they recommend some settings for "optimum
+ performance" to be done in sequence. These settings optimize some
+ of the 100Mbit autodetection circuitry. They say we only want to
+ do this for rev C of the chip, but engineers at NSC (Bradley
+ Kennedy) recommends always setting them. If you don't, you get
+ errors on some autonegotiations that make the device unusable.
+
+ It seems that the DSP needs a few usec to reinitialize after
+ the start of the phy. Just retry writing these values until they
+ stick.
+ */
+ for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
+
+ int dspcfg;
+ writew(1, ioaddr + PGSEL);
+ writew(PMDCSR_VAL, ioaddr + PMDCSR);
+ writew(TSTDAT_VAL, ioaddr + TSTDAT);
+ np->dspcfg = (np->srr <= SRR_DP83815_C)?
+ DSPCFG_VAL : (DSPCFG_COEF | readw(ioaddr + DSPCFG));
+ writew(np->dspcfg, ioaddr + DSPCFG);
+ writew(SDCFG_VAL, ioaddr + SDCFG);
+ writew(0, ioaddr + PGSEL);
+ readl(ioaddr + ChipConfig);
+ udelay(10);
+
+ writew(1, ioaddr + PGSEL);
+ dspcfg = readw(ioaddr + DSPCFG);
+ writew(0, ioaddr + PGSEL);
+ if (np->dspcfg == dspcfg)
+ break;
+ }
+
+ if (netif_msg_link(np)) {
+ if (i==NATSEMI_HW_TIMEOUT) {
+ printk(KERN_INFO
+ "%s: DSPCFG mismatch after retrying for %d usec.\n",
+ dev->name, i*10);
+ } else {
+ printk(KERN_INFO
+ "%s: DSPCFG accepted after %d usec.\n",
+ dev->name, i*10);
+ }
+ }
+ /*
+ * Enable PHY Specific event based interrupts. Link state change
+ * and Auto-Negotiation Completion are among the affected.
+ * Read the intr status to clear it (needed for wake events).
+ */
+ readw(ioaddr + MIntrStatus);
+ writew(MICRIntEn, ioaddr + MIntrCtrl);
+}
+
+static int switch_port_external(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = ns_ioaddr(dev);
+ u32 cfg;
+
+ cfg = readl(ioaddr + ChipConfig);
+ if (cfg & CfgExtPhy)
+ return 0;
+
+ if (netif_msg_link(np)) {
+ printk(KERN_INFO "%s: switching to external transceiver.\n",
+ dev->name);
+ }
+
+ /* 1) switch back to external phy */
+ writel(cfg | (CfgExtPhy | CfgPhyDis), ioaddr + ChipConfig);
+ readl(ioaddr + ChipConfig);
+ udelay(1);
+
+ /* 2) reset the external phy: */
+ /* resetting the external PHY has been known to cause a hub supplying
+ * power over Ethernet to kill the power. We don't want to kill
+ * power to this computer, so we avoid resetting the phy.
+ */
+
+ /* 3) reinit the phy fixup, it got lost during power down. */
+ move_int_phy(dev, np->phy_addr_external);
+ init_phy_fixup(dev);
+
+ return 1;
+}
+
+static int switch_port_internal(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = ns_ioaddr(dev);
+ int i;
+ u32 cfg;
+ u16 bmcr;
+
+ cfg = readl(ioaddr + ChipConfig);
+ if (!(cfg &CfgExtPhy))
+ return 0;
+
+ if (netif_msg_link(np)) {
+ printk(KERN_INFO "%s: switching to internal transceiver.\n",
+ dev->name);
+ }
+ /* 1) switch back to internal phy: */
+ cfg = cfg & ~(CfgExtPhy | CfgPhyDis);
+ writel(cfg, ioaddr + ChipConfig);
+ readl(ioaddr + ChipConfig);
+ udelay(1);
+
+ /* 2) reset the internal phy: */
+ bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
+ writel(bmcr | BMCR_RESET, ioaddr+BasicControl+(MII_BMCR<<2));
+ readl(ioaddr + ChipConfig);
+ udelay(10);
+ for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
+ bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
+ if (!(bmcr & BMCR_RESET))
+ break;
+ udelay(10);
+ }
+ if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) {
+ printk(KERN_INFO
+ "%s: phy reset did not complete in %d usec.\n",
+ dev->name, i*10);
+ }
+ /* 3) reinit the phy fixup, it got lost during power down. */
+ init_phy_fixup(dev);
+
+ return 1;
+}
+
+/* Scan for a PHY on the external mii bus.
+ * There are two tricky points:
+ * - Do not scan while the internal phy is enabled. The internal phy will
+ * crash: e.g. reads from the DSPCFG register will return odd values and
+ * the nasty random phy reset code will reset the nic every few seconds.
+ * - The internal phy must be moved around, an external phy could
+ * have the same address as the internal phy.
+ */
+static int find_mii(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int tmp;
+ int i;
+ int did_switch;
+
+ /* Switch to external phy */
+ did_switch = switch_port_external(dev);
+
+ /* Scan the possible phy addresses:
+ *
+ * PHY address 0 means that the phy is in isolate mode. Not yet
+ * supported due to lack of test hardware. User space should
+ * handle it through ethtool.
+ */
+ for (i = 1; i <= 31; i++) {
+ move_int_phy(dev, i);
+ tmp = miiport_read(dev, i, MII_BMSR);
+ if (tmp != 0xffff && tmp != 0x0000) {
+ /* found something! */
+ np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
+ + mdio_read(dev, MII_PHYSID2);
+ if (netif_msg_probe(np)) {
+ printk(KERN_INFO "natsemi %s: found external phy %08x at address %d.\n",
+ pci_name(np->pci_dev), np->mii, i);
+ }
+ break;
+ }
+ }
+ /* And switch back to internal phy: */
+ if (did_switch)
+ switch_port_internal(dev);
+ return i;
+}
+
+/* CFG bits [13:16] [18:23] */
+#define CFG_RESET_SAVE 0xfde000
+/* WCSR bits [0:4] [9:10] */
+#define WCSR_RESET_SAVE 0x61f
+/* RFCR bits [20] [22] [27:31] */
+#define RFCR_RESET_SAVE 0xf8500000
+
+static void natsemi_reset(struct net_device *dev)
+{
+ int i;
+ u32 cfg;
+ u32 wcsr;
+ u32 rfcr;
+ u16 pmatch[3];
+ u16 sopass[3];
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = ns_ioaddr(dev);
+
+ /*
+ * Resetting the chip causes some registers to be lost.
+ * Natsemi suggests NOT reloading the EEPROM while live, so instead
+ * we save the state that would have been loaded from EEPROM
+ * on a normal power-up (see the spec EEPROM map). This assumes
+ * whoever calls this will follow up with init_registers() eventually.
+ */
+
+ /* CFG */
+ cfg = readl(ioaddr + ChipConfig) & CFG_RESET_SAVE;
+ /* WCSR */
+ wcsr = readl(ioaddr + WOLCmd) & WCSR_RESET_SAVE;
+ /* RFCR */
+ rfcr = readl(ioaddr + RxFilterAddr) & RFCR_RESET_SAVE;
+ /* PMATCH */
+ for (i = 0; i < 3; i++) {
+ writel(i*2, ioaddr + RxFilterAddr);
+ pmatch[i] = readw(ioaddr + RxFilterData);
+ }
+ /* SOPAS */
+ for (i = 0; i < 3; i++) {
+ writel(0xa+(i*2), ioaddr + RxFilterAddr);
+ sopass[i] = readw(ioaddr + RxFilterData);
+ }
+
+ /* now whack the chip */
+ writel(ChipReset, ioaddr + ChipCmd);
+ for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
+ if (!(readl(ioaddr + ChipCmd) & ChipReset))
+ break;
+ udelay(5);
+ }
+ if (i==NATSEMI_HW_TIMEOUT) {
+ printk(KERN_WARNING "%s: reset did not complete in %d usec.\n",
+ dev->name, i*5);
+ } else if (netif_msg_hw(np)) {
+ printk(KERN_DEBUG "%s: reset completed in %d usec.\n",
+ dev->name, i*5);
+ }
+
+ /* restore CFG */
+ cfg |= readl(ioaddr + ChipConfig) & ~CFG_RESET_SAVE;
+ /* turn on external phy if it was selected */
+ if (dev->if_port == PORT_TP)
+ cfg &= ~(CfgExtPhy | CfgPhyDis);
+ else
+ cfg |= (CfgExtPhy | CfgPhyDis);
+ writel(cfg, ioaddr + ChipConfig);
+ /* restore WCSR */
+ wcsr |= readl(ioaddr + WOLCmd) & ~WCSR_RESET_SAVE;
+ writel(wcsr, ioaddr + WOLCmd);
+ /* read RFCR */
+ rfcr |= readl(ioaddr + RxFilterAddr) & ~RFCR_RESET_SAVE;
+ /* restore PMATCH */
+ for (i = 0; i < 3; i++) {
+ writel(i*2, ioaddr + RxFilterAddr);
+ writew(pmatch[i], ioaddr + RxFilterData);
+ }
+ for (i = 0; i < 3; i++) {
+ writel(0xa+(i*2), ioaddr + RxFilterAddr);
+ writew(sopass[i], ioaddr + RxFilterData);
+ }
+ /* restore RFCR */
+ writel(rfcr, ioaddr + RxFilterAddr);
+}
+
+static void reset_rx(struct net_device *dev)
+{
+ int i;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = ns_ioaddr(dev);
+
+ np->intr_status &= ~RxResetDone;
+
+ writel(RxReset, ioaddr + ChipCmd);
+
+ for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
+ np->intr_status |= readl(ioaddr + IntrStatus);
+ if (np->intr_status & RxResetDone)
+ break;
+ udelay(15);
+ }
+ if (i==NATSEMI_HW_TIMEOUT) {
+ printk(KERN_WARNING "%s: RX reset did not complete in %d usec.\n",
+ dev->name, i*15);
+ } else if (netif_msg_hw(np)) {
+ printk(KERN_WARNING "%s: RX reset took %d usec.\n",
+ dev->name, i*15);
+ }
+}
+
+static void natsemi_reload_eeprom(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = ns_ioaddr(dev);
+ int i;
+
+ writel(EepromReload, ioaddr + PCIBusCfg);
+ for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
+ udelay(50);
+ if (!(readl(ioaddr + PCIBusCfg) & EepromReload))
+ break;
+ }
+ if (i==NATSEMI_HW_TIMEOUT) {
+ printk(KERN_WARNING "natsemi %s: EEPROM did not reload in %d usec.\n",
+ pci_name(np->pci_dev), i*50);
+ } else if (netif_msg_hw(np)) {
+ printk(KERN_DEBUG "natsemi %s: EEPROM reloaded in %d usec.\n",
+ pci_name(np->pci_dev), i*50);
+ }
+}
+
+static void natsemi_stop_rxtx(struct net_device *dev)
+{
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ writel(RxOff | TxOff, ioaddr + ChipCmd);
+ for(i=0;i< NATSEMI_HW_TIMEOUT;i++) {
+ if ((readl(ioaddr + ChipCmd) & (TxOn|RxOn)) == 0)
+ break;
+ udelay(5);
+ }
+ if (i==NATSEMI_HW_TIMEOUT) {
+ printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n",
+ dev->name, i*5);
+ } else if (netif_msg_hw(np)) {
+ printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n",
+ dev->name, i*5);
+ }
+}
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ int i;
+
+ /* Reset the chip, just in case. */
+ natsemi_reset(dev);
+
+ i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
+ if (i) return i;
+
+ if (netif_msg_ifup(np))
+ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+ dev->name, dev->irq);
+ i = alloc_ring(dev);
+ if (i < 0) {
+ free_irq(dev->irq, dev);
+ return i;
+ }
+ napi_enable(&np->napi);
+
+ init_ring(dev);
+ spin_lock_irq(&np->lock);
+ init_registers(dev);
+ /* now set the MAC address according to dev->dev_addr */
+ for (i = 0; i < 3; i++) {
+ u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i];
+
+ writel(i*2, ioaddr + RxFilterAddr);
+ writew(mac, ioaddr + RxFilterData);
+ }
+ writel(np->cur_rx_mode, ioaddr + RxFilterAddr);
+ spin_unlock_irq(&np->lock);
+
+ netif_start_queue(dev);
+
+ if (netif_msg_ifup(np))
+ printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n",
+ dev->name, (int)readl(ioaddr + ChipCmd));
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ);
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ return 0;
+}
+
+static void do_cable_magic(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = ns_ioaddr(dev);
+
+ if (dev->if_port != PORT_TP)
+ return;
+
+ if (np->srr >= SRR_DP83816_A5)
+ return;
+
+ /*
+ * 100 MBit links with short cables can trip an issue with the chip.
+ * The problem manifests as lots of CRC errors and/or flickering
+ * activity LED while idle. This process is based on instructions
+ * from engineers at National.
+ */
+ if (readl(ioaddr + ChipConfig) & CfgSpeed100) {
+ u16 data;
+
+ writew(1, ioaddr + PGSEL);
+ /*
+ * coefficient visibility should already be enabled via
+ * DSPCFG | 0x1000
+ */
+ data = readw(ioaddr + TSTDAT) & 0xff;
+ /*
+ * the value must be negative, and within certain values
+ * (these values all come from National)
+ */
+ if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) {
+ np = netdev_priv(dev);
+
+ /* the bug has been triggered - fix the coefficient */
+ writew(TSTDAT_FIXED, ioaddr + TSTDAT);
+ /* lock the value */
+ data = readw(ioaddr + DSPCFG);
+ np->dspcfg = data | DSPCFG_LOCK;
+ writew(np->dspcfg, ioaddr + DSPCFG);
+ }
+ writew(0, ioaddr + PGSEL);
+ }
+}
+
+static void undo_cable_magic(struct net_device *dev)
+{
+ u16 data;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+
+ if (dev->if_port != PORT_TP)
+ return;
+
+ if (np->srr >= SRR_DP83816_A5)
+ return;
+
+ writew(1, ioaddr + PGSEL);
+ /* make sure the lock bit is clear */
+ data = readw(ioaddr + DSPCFG);
+ np->dspcfg = data & ~DSPCFG_LOCK;
+ writew(np->dspcfg, ioaddr + DSPCFG);
+ writew(0, ioaddr + PGSEL);
+}
+
+static void check_link(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ int duplex = np->duplex;
+ u16 bmsr;
+
+ /* If we are ignoring the PHY then don't try reading it. */
+ if (np->ignore_phy)
+ goto propagate_state;
+
+ /* The link status field is latched: it remains low after a temporary
+ * link failure until it's read. We need the current link status,
+ * thus read twice.
+ */
+ mdio_read(dev, MII_BMSR);
+ bmsr = mdio_read(dev, MII_BMSR);
+
+ if (!(bmsr & BMSR_LSTATUS)) {
+ if (netif_carrier_ok(dev)) {
+ if (netif_msg_link(np))
+ printk(KERN_NOTICE "%s: link down.\n",
+ dev->name);
+ netif_carrier_off(dev);
+ undo_cable_magic(dev);
+ }
+ return;
+ }
+ if (!netif_carrier_ok(dev)) {
+ if (netif_msg_link(np))
+ printk(KERN_NOTICE "%s: link up.\n", dev->name);
+ netif_carrier_on(dev);
+ do_cable_magic(dev);
+ }
+
+ duplex = np->full_duplex;
+ if (!duplex) {
+ if (bmsr & BMSR_ANEGCOMPLETE) {
+ int tmp = mii_nway_result(
+ np->advertising & mdio_read(dev, MII_LPA));
+ if (tmp == LPA_100FULL || tmp == LPA_10FULL)
+ duplex = 1;
+ } else if (mdio_read(dev, MII_BMCR) & BMCR_FULLDPLX)
+ duplex = 1;
+ }
+
+propagate_state:
+ /* if duplex is set then bit 28 must be set, too */
+ if (duplex ^ !!(np->rx_config & RxAcceptTx)) {
+ if (netif_msg_link(np))
+ printk(KERN_INFO
+ "%s: Setting %s-duplex based on negotiated "
+ "link capability.\n", dev->name,
+ duplex ? "full" : "half");
+ if (duplex) {
+ np->rx_config |= RxAcceptTx;
+ np->tx_config |= TxCarrierIgn | TxHeartIgn;
+ } else {
+ np->rx_config &= ~RxAcceptTx;
+ np->tx_config &= ~(TxCarrierIgn | TxHeartIgn);
+ }
+ writel(np->tx_config, ioaddr + TxConfig);
+ writel(np->rx_config, ioaddr + RxConfig);
+ }
+}
+
+static void init_registers(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+
+ init_phy_fixup(dev);
+
+ /* clear any interrupts that are pending, such as wake events */
+ readl(ioaddr + IntrStatus);
+
+ writel(np->ring_dma, ioaddr + RxRingPtr);
+ writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc),
+ ioaddr + TxRingPtr);
+
+ /* Initialize other registers.
+ * Configure the PCI bus bursts and FIFO thresholds.
+ * Configure for standard, in-spec Ethernet.
+ * Start with half-duplex. check_link will update
+ * to the correct settings.
+ */
+
+ /* DRTH: 2: start tx if 64 bytes are in the fifo
+ * FLTH: 0x10: refill with next packet if 512 bytes are free
+ * MXDMA: 0: up to 256 byte bursts.
+ * MXDMA must be <= FLTH
+ * ECRETRY=1
+ * ATP=1
+ */
+ np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 |
+ TX_FLTH_VAL | TX_DRTH_VAL_START;
+ writel(np->tx_config, ioaddr + TxConfig);
+
+ /* DRTH 0x10: start copying to memory if 128 bytes are in the fifo
+ * MXDMA 0: up to 256 byte bursts
+ */
+ np->rx_config = RxMxdma_256 | RX_DRTH_VAL;
+ /* if receive ring now has bigger buffers than normal, enable jumbo */
+ if (np->rx_buf_sz > NATSEMI_LONGPKT)
+ np->rx_config |= RxAcceptLong;
+
+ writel(np->rx_config, ioaddr + RxConfig);
+
+ /* Disable PME:
+ * The PME bit is initialized from the EEPROM contents.
+ * PCI cards probably have PME disabled, but motherboard
+ * implementations may have PME set to enable WakeOnLan.
+ * With PME set the chip will scan incoming packets but
+ * nothing will be written to memory. */
+ np->SavedClkRun = readl(ioaddr + ClkRun);
+ writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun);
+ if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) {
+ printk(KERN_NOTICE "%s: Wake-up event %#08x\n",
+ dev->name, readl(ioaddr + WOLCmd));
+ }
+
+ check_link(dev);
+ __set_rx_mode(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ writel(DEFAULT_INTR, ioaddr + IntrMask);
+ natsemi_irq_enable(dev);
+
+ writel(RxOn | TxOn, ioaddr + ChipCmd);
+ writel(StatsClear, ioaddr + StatsCtrl); /* Clear Stats */
+}
+
+/*
+ * netdev_timer:
+ * Purpose:
+ * 1) check for link changes. Usually they are handled by the MII interrupt
+ * but it doesn't hurt to check twice.
+ * 2) check for sudden death of the NIC:
+ * It seems that a reference set for this chip went out with incorrect info,
+ * and there exist boards that aren't quite right. An unexpected voltage
+ * drop can cause the PHY to get itself in a weird state (basically reset).
+ * NOTE: this only seems to affect revC chips. The user can disable
+ * this check via dspcfg_workaround sysfs option.
+ * 3) check of death of the RX path due to OOM
+ */
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ int next_tick = NATSEMI_TIMER_FREQ;
+
+ if (netif_msg_timer(np)) {
+ /* DO NOT read the IntrStatus register,
+ * a read clears any pending interrupts.
+ */
+ printk(KERN_DEBUG "%s: Media selection timer tick.\n",
+ dev->name);
+ }
+
+ if (dev->if_port == PORT_TP) {
+ u16 dspcfg;
+
+ spin_lock_irq(&np->lock);
+ /* check for a nasty random phy-reset - use dspcfg as a flag */
+ writew(1, ioaddr+PGSEL);
+ dspcfg = readw(ioaddr+DSPCFG);
+ writew(0, ioaddr+PGSEL);
+ if (np->dspcfg_workaround && dspcfg != np->dspcfg) {
+ if (!netif_queue_stopped(dev)) {
+ spin_unlock_irq(&np->lock);
+ if (netif_msg_drv(np))
+ printk(KERN_NOTICE "%s: possible phy reset: "
+ "re-initializing\n", dev->name);
+ disable_irq(dev->irq);
+ spin_lock_irq(&np->lock);
+ natsemi_stop_rxtx(dev);
+ dump_ring(dev);
+ reinit_ring(dev);
+ init_registers(dev);
+ spin_unlock_irq(&np->lock);
+ enable_irq(dev->irq);
+ } else {
+ /* hurry back */
+ next_tick = HZ;
+ spin_unlock_irq(&np->lock);
+ }
+ } else {
+ /* init_registers() calls check_link() for the above case */
+ check_link(dev);
+ spin_unlock_irq(&np->lock);
+ }
+ } else {
+ spin_lock_irq(&np->lock);
+ check_link(dev);
+ spin_unlock_irq(&np->lock);
+ }
+ if (np->oom) {
+ disable_irq(dev->irq);
+ np->oom = 0;
+ refill_rx(dev);
+ enable_irq(dev->irq);
+ if (!np->oom) {
+ writel(RxOn, ioaddr + ChipCmd);
+ } else {
+ next_tick = 1;
+ }
+ }
+
+ if (next_tick > 1)
+ mod_timer(&np->timer, round_jiffies(jiffies + next_tick));
+ else
+ mod_timer(&np->timer, jiffies + next_tick);
+}
+
+static void dump_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ if (netif_msg_pktdata(np)) {
+ int i;
+ printk(KERN_DEBUG " Tx ring at %p:\n", np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
+ i, np->tx_ring[i].next_desc,
+ np->tx_ring[i].cmd_status,
+ np->tx_ring[i].addr);
+ }
+ printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
+ i, np->rx_ring[i].next_desc,
+ np->rx_ring[i].cmd_status,
+ np->rx_ring[i].addr);
+ }
+ }
+}
+
+static void ns_tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+
+ disable_irq(dev->irq);
+ spin_lock_irq(&np->lock);
+ if (!np->hands_off) {
+ if (netif_msg_tx_err(np))
+ printk(KERN_WARNING
+ "%s: Transmit timed out, status %#08x,"
+ " resetting...\n",
+ dev->name, readl(ioaddr + IntrStatus));
+ dump_ring(dev);
+
+ natsemi_reset(dev);
+ reinit_ring(dev);
+ init_registers(dev);
+ } else {
+ printk(KERN_WARNING
+ "%s: tx_timeout while in hands_off state?\n",
+ dev->name);
+ }
+ spin_unlock_irq(&np->lock);
+ enable_irq(dev->irq);
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ dev->stats.tx_errors++;
+ netif_wake_queue(dev);
+}
+
+static int alloc_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ np->rx_ring = pci_alloc_consistent(np->pci_dev,
+ sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
+ &np->ring_dma);
+ if (!np->rx_ring)
+ return -ENOMEM;
+ np->tx_ring = &np->rx_ring[RX_RING_SIZE];
+ return 0;
+}
+
+static void refill_rx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ /* Refill the Rx ring buffers. */
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ struct sk_buff *skb;
+ int entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING;
+ skb = dev_alloc_skb(buflen);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_dma[entry] = pci_map_single(np->pci_dev,
+ skb->data, buflen, PCI_DMA_FROMDEVICE);
+ np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
+ }
+ np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
+ }
+ if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) {
+ if (netif_msg_rx_err(np))
+ printk(KERN_WARNING "%s: going OOM.\n", dev->name);
+ np->oom = 1;
+ }
+}
+
+static void set_bufsize(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ if (dev->mtu <= ETH_DATA_LEN)
+ np->rx_buf_sz = ETH_DATA_LEN + NATSEMI_HEADERS;
+ else
+ np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS;
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ /* 1) TX ring */
+ np->dirty_tx = np->cur_tx = 0;
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = NULL;
+ np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
+ +sizeof(struct netdev_desc)
+ *((i+1)%TX_RING_SIZE+RX_RING_SIZE));
+ np->tx_ring[i].cmd_status = 0;
+ }
+
+ /* 2) RX ring */
+ np->dirty_rx = 0;
+ np->cur_rx = RX_RING_SIZE;
+ np->oom = 0;
+ set_bufsize(dev);
+
+ np->rx_head_desc = &np->rx_ring[0];
+
+ /* Please be careful before changing this loop - at least gcc-2.95.1
+ * miscompiles it otherwise.
+ */
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma
+ +sizeof(struct netdev_desc)
+ *((i+1)%RX_RING_SIZE));
+ np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
+ np->rx_skbuff[i] = NULL;
+ }
+ refill_rx(dev);
+ dump_ring(dev);
+}
+
+static void drain_tx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (np->tx_skbuff[i]) {
+ pci_unmap_single(np->pci_dev,
+ np->tx_dma[i], np->tx_skbuff[i]->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(np->tx_skbuff[i]);
+ dev->stats.tx_dropped++;
+ }
+ np->tx_skbuff[i] = NULL;
+ }
+}
+
+static void drain_rx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ unsigned int buflen = np->rx_buf_sz;
+ int i;
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].cmd_status = 0;
+ np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
+ if (np->rx_skbuff[i]) {
+ pci_unmap_single(np->pci_dev, np->rx_dma[i],
+ buflen + NATSEMI_PADDING,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = NULL;
+ }
+}
+
+static void drain_ring(struct net_device *dev)
+{
+ drain_rx(dev);
+ drain_tx(dev);
+}
+
+static void free_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ pci_free_consistent(np->pci_dev,
+ sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
+ np->rx_ring, np->ring_dma);
+}
+
+static void reinit_rx(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ /* RX Ring */
+ np->dirty_rx = 0;
+ np->cur_rx = RX_RING_SIZE;
+ np->rx_head_desc = &np->rx_ring[0];
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++)
+ np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
+
+ refill_rx(dev);
+}
+
+static void reinit_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ /* drain TX ring */
+ drain_tx(dev);
+ np->dirty_tx = np->cur_tx = 0;
+ for (i=0;i<TX_RING_SIZE;i++)
+ np->tx_ring[i].cmd_status = 0;
+
+ reinit_rx(dev);
+}
+
+static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ unsigned entry;
+ unsigned long flags;
+
+ /* Note: Ordering is important here, set the field with the
+ "ownership" bit last, and only then increment cur_tx. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+
+ np->tx_skbuff[entry] = skb;
+ np->tx_dma[entry] = pci_map_single(np->pci_dev,
+ skb->data,skb->len, PCI_DMA_TODEVICE);
+
+ np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ if (!np->hands_off) {
+ np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
+ /* StrongARM: Explicitly cache flush np->tx_ring and
+ * skb->data,skb->len. */
+ wmb();
+ np->cur_tx++;
+ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+ netdev_tx_done(dev);
+ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
+ netif_stop_queue(dev);
+ }
+ /* Wake the potentially-idle transmit channel. */
+ writel(TxOn, ioaddr + ChipCmd);
+ } else {
+ dev_kfree_skb_irq(skb);
+ dev->stats.tx_dropped++;
+ }
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ if (netif_msg_tx_queued(np)) {
+ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
+ dev->name, np->cur_tx, entry);
+ }
+ return NETDEV_TX_OK;
+}
+
+static void netdev_tx_done(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
+ break;
+ if (netif_msg_tx_done(np))
+ printk(KERN_DEBUG
+ "%s: tx frame #%d finished, status %#08x.\n",
+ dev->name, np->dirty_tx,
+ le32_to_cpu(np->tx_ring[entry].cmd_status));
+ if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += np->tx_skbuff[entry]->len;
+ } else { /* Various Tx errors */
+ int tx_status =
+ le32_to_cpu(np->tx_ring[entry].cmd_status);
+ if (tx_status & (DescTxAbort|DescTxExcColl))
+ dev->stats.tx_aborted_errors++;
+ if (tx_status & DescTxFIFO)
+ dev->stats.tx_fifo_errors++;
+ if (tx_status & DescTxCarrier)
+ dev->stats.tx_carrier_errors++;
+ if (tx_status & DescTxOOWCol)
+ dev->stats.tx_window_errors++;
+ dev->stats.tx_errors++;
+ }
+ pci_unmap_single(np->pci_dev,np->tx_dma[entry],
+ np->tx_skbuff[entry]->len,
+ PCI_DMA_TODEVICE);
+ /* Free the original skb. */
+ dev_kfree_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = NULL;
+ }
+ if (netif_queue_stopped(dev) &&
+ np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, wake queue. */
+ netif_wake_queue(dev);
+ }
+}
+
+/* The interrupt handler doesn't actually handle interrupts itself, it
+ * schedules a NAPI poll if there is anything to do. */
+static irqreturn_t intr_handler(int irq, void *dev_instance)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+
+ /* Reading IntrStatus automatically acknowledges so don't do
+ * that while interrupts are disabled, (for example, while a
+ * poll is scheduled). */
+ if (np->hands_off || !readl(ioaddr + IntrEnable))
+ return IRQ_NONE;
+
+ np->intr_status = readl(ioaddr + IntrStatus);
+
+ if (!np->intr_status)
+ return IRQ_NONE;
+
+ if (netif_msg_intr(np))
+ printk(KERN_DEBUG
+ "%s: Interrupt, status %#08x, mask %#08x.\n",
+ dev->name, np->intr_status,
+ readl(ioaddr + IntrMask));
+
+ prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]);
+
+ if (napi_schedule_prep(&np->napi)) {
+ /* Disable interrupts and register for poll */
+ natsemi_irq_disable(dev);
+ __napi_schedule(&np->napi);
+ } else
+ printk(KERN_WARNING
+ "%s: Ignoring interrupt, status %#08x, mask %#08x.\n",
+ dev->name, np->intr_status,
+ readl(ioaddr + IntrMask));
+
+ return IRQ_HANDLED;
+}
+
+/* This is the NAPI poll routine. As well as the standard RX handling
+ * it also handles all other interrupts that the chip might raise.
+ */
+static int natsemi_poll(struct napi_struct *napi, int budget)
+{
+ struct netdev_private *np = container_of(napi, struct netdev_private, napi);
+ struct net_device *dev = np->dev;
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ int work_done = 0;
+
+ do {
+ if (netif_msg_intr(np))
+ printk(KERN_DEBUG
+ "%s: Poll, status %#08x, mask %#08x.\n",
+ dev->name, np->intr_status,
+ readl(ioaddr + IntrMask));
+
+ /* netdev_rx() may read IntrStatus again if the RX state
+ * machine falls over so do it first. */
+ if (np->intr_status &
+ (IntrRxDone | IntrRxIntr | RxStatusFIFOOver |
+ IntrRxErr | IntrRxOverrun)) {
+ netdev_rx(dev, &work_done, budget);
+ }
+
+ if (np->intr_status &
+ (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) {
+ spin_lock(&np->lock);
+ netdev_tx_done(dev);
+ spin_unlock(&np->lock);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (np->intr_status & IntrAbnormalSummary)
+ netdev_error(dev, np->intr_status);
+
+ if (work_done >= budget)
+ return work_done;
+
+ np->intr_status = readl(ioaddr + IntrStatus);
+ } while (np->intr_status);
+
+ napi_complete(napi);
+
+ /* Reenable interrupts providing nothing is trying to shut
+ * the chip down. */
+ spin_lock(&np->lock);
+ if (!np->hands_off)
+ natsemi_irq_enable(dev);
+ spin_unlock(&np->lock);
+
+ return work_done;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+ s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
+ unsigned int buflen = np->rx_buf_sz;
+ void __iomem * ioaddr = ns_ioaddr(dev);
+
+ /* If the driver owns the next entry it's a new packet. Send it up. */
+ while (desc_status < 0) { /* e.g. & DescOwn */
+ int pkt_len;
+ if (netif_msg_rx_status(np))
+ printk(KERN_DEBUG
+ " netdev_rx() entry %d status was %#08x.\n",
+ entry, desc_status);
+ if (--boguscnt < 0)
+ break;
+
+ if (*work_done >= work_to_do)
+ break;
+
+ (*work_done)++;
+
+ pkt_len = (desc_status & DescSizeMask) - 4;
+ if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
+ if (desc_status & DescMore) {
+ unsigned long flags;
+
+ if (netif_msg_rx_err(np))
+ printk(KERN_WARNING
+ "%s: Oversized(?) Ethernet "
+ "frame spanned multiple "
+ "buffers, entry %#08x "
+ "status %#08x.\n", dev->name,
+ np->cur_rx, desc_status);
+ dev->stats.rx_length_errors++;
+
+ /* The RX state machine has probably
+ * locked up beneath us. Follow the
+ * reset procedure documented in
+ * AN-1287. */
+
+ spin_lock_irqsave(&np->lock, flags);
+ reset_rx(dev);
+ reinit_rx(dev);
+ writel(np->ring_dma, ioaddr + RxRingPtr);
+ check_link(dev);
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ /* We'll enable RX on exit from this
+ * function. */
+ break;
+
+ } else {
+ /* There was an error. */
+ dev->stats.rx_errors++;
+ if (desc_status & (DescRxAbort|DescRxOver))
+ dev->stats.rx_over_errors++;
+ if (desc_status & (DescRxLong|DescRxRunt))
+ dev->stats.rx_length_errors++;
+ if (desc_status & (DescRxInvalid|DescRxAlign))
+ dev->stats.rx_frame_errors++;
+ if (desc_status & DescRxCRC)
+ dev->stats.rx_crc_errors++;
+ }
+ } else if (pkt_len > np->rx_buf_sz) {
+ /* if this is the tail of a double buffer
+ * packet, we've already counted the error
+ * on the first part. Ignore the second half.
+ */
+ } else {
+ struct sk_buff *skb;
+ /* Omit CRC size. */
+ /* Check if the packet is long enough to accept
+ * without copying to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak &&
+ (skb = dev_alloc_skb(pkt_len + RX_OFFSET)) != NULL) {
+ /* 16 byte align the IP header */
+ skb_reserve(skb, RX_OFFSET);
+ pci_dma_sync_single_for_cpu(np->pci_dev,
+ np->rx_dma[entry],
+ buflen,
+ PCI_DMA_FROMDEVICE);
+ skb_copy_to_linear_data(skb,
+ np->rx_skbuff[entry]->data, pkt_len);
+ skb_put(skb, pkt_len);
+ pci_dma_sync_single_for_device(np->pci_dev,
+ np->rx_dma[entry],
+ buflen,
+ PCI_DMA_FROMDEVICE);
+ } else {
+ pci_unmap_single(np->pci_dev, np->rx_dma[entry],
+ buflen + NATSEMI_PADDING,
+ PCI_DMA_FROMDEVICE);
+ skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+ entry = (++np->cur_rx) % RX_RING_SIZE;
+ np->rx_head_desc = &np->rx_ring[entry];
+ desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
+ }
+ refill_rx(dev);
+
+ /* Restart Rx engine if stopped. */
+ if (np->oom)
+ mod_timer(&np->timer, jiffies + 1);
+ else
+ writel(RxOn, ioaddr + ChipCmd);
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+
+ spin_lock(&np->lock);
+ if (intr_status & LinkChange) {
+ u16 lpa = mdio_read(dev, MII_LPA);
+ if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE &&
+ netif_msg_link(np)) {
+ printk(KERN_INFO
+ "%s: Autonegotiation advertising"
+ " %#04x partner %#04x.\n", dev->name,
+ np->advertising, lpa);
+ }
+
+ /* read MII int status to clear the flag */
+ readw(ioaddr + MIntrStatus);
+ check_link(dev);
+ }
+ if (intr_status & StatsMax) {
+ __get_stats(dev);
+ }
+ if (intr_status & IntrTxUnderrun) {
+ if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) {
+ np->tx_config += TX_DRTH_VAL_INC;
+ if (netif_msg_tx_err(np))
+ printk(KERN_NOTICE
+ "%s: increased tx threshold, txcfg %#08x.\n",
+ dev->name, np->tx_config);
+ } else {
+ if (netif_msg_tx_err(np))
+ printk(KERN_NOTICE
+ "%s: tx underrun with maximum tx threshold, txcfg %#08x.\n",
+ dev->name, np->tx_config);
+ }
+ writel(np->tx_config, ioaddr + TxConfig);
+ }
+ if (intr_status & WOLPkt && netif_msg_wol(np)) {
+ int wol_status = readl(ioaddr + WOLCmd);
+ printk(KERN_NOTICE "%s: Link wake-up event %#08x\n",
+ dev->name, wol_status);
+ }
+ if (intr_status & RxStatusFIFOOver) {
+ if (netif_msg_rx_err(np) && netif_msg_intr(np)) {
+ printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
+ dev->name);
+ }
+ dev->stats.rx_fifo_errors++;
+ dev->stats.rx_errors++;
+ }
+ /* Hmmmmm, it's not clear how to recover from PCI faults. */
+ if (intr_status & IntrPCIErr) {
+ printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
+ intr_status & IntrPCIErr);
+ dev->stats.tx_fifo_errors++;
+ dev->stats.tx_errors++;
+ dev->stats.rx_fifo_errors++;
+ dev->stats.rx_errors++;
+ }
+ spin_unlock(&np->lock);
+}
+
+static void __get_stats(struct net_device *dev)
+{
+ void __iomem * ioaddr = ns_ioaddr(dev);
+
+ /* The chip only need report frame silently dropped. */
+ dev->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
+ dev->stats.rx_missed_errors += readl(ioaddr + RxMissed);
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ /* The chip only need report frame silently dropped. */
+ spin_lock_irq(&np->lock);
+ if (netif_running(dev) && !np->hands_off)
+ __get_stats(dev);
+ spin_unlock_irq(&np->lock);
+
+ return &dev->stats;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void natsemi_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ intr_handler(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+#define HASH_TABLE 0x200
+static void __set_rx_mode(struct net_device *dev)
+{
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ struct netdev_private *np = netdev_priv(dev);
+ u8 mc_filter[64]; /* Multicast hash filter */
+ u32 rx_mode;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ rx_mode = RxFilterEnable | AcceptBroadcast
+ | AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ rx_mode = RxFilterEnable | AcceptBroadcast
+ | AcceptAllMulticast | AcceptMyPhys;
+ } else {
+ struct netdev_hw_addr *ha;
+ int i;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff;
+ mc_filter[b/8] |= (1 << (b & 0x07));
+ }
+ rx_mode = RxFilterEnable | AcceptBroadcast
+ | AcceptMulticast | AcceptMyPhys;
+ for (i = 0; i < 64; i += 2) {
+ writel(HASH_TABLE + i, ioaddr + RxFilterAddr);
+ writel((mc_filter[i + 1] << 8) + mc_filter[i],
+ ioaddr + RxFilterData);
+ }
+ }
+ writel(rx_mode, ioaddr + RxFilterAddr);
+ np->cur_rx_mode = rx_mode;
+}
+
+static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (new_mtu < 64 || new_mtu > NATSEMI_RX_LIMIT-NATSEMI_HEADERS)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+
+ /* synchronized against open : rtnl_lock() held by caller */
+ if (netif_running(dev)) {
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+
+ disable_irq(dev->irq);
+ spin_lock(&np->lock);
+ /* stop engines */
+ natsemi_stop_rxtx(dev);
+ /* drain rx queue */
+ drain_rx(dev);
+ /* change buffers */
+ set_bufsize(dev);
+ reinit_rx(dev);
+ writel(np->ring_dma, ioaddr + RxRingPtr);
+ /* restart engines */
+ writel(RxOn | TxOn, ioaddr + ChipCmd);
+ spin_unlock(&np->lock);
+ enable_irq(dev->irq);
+ }
+ return 0;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ spin_lock_irq(&np->lock);
+ if (!np->hands_off)
+ __set_rx_mode(dev);
+ spin_unlock_irq(&np->lock);
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ strncpy(info->driver, DRV_NAME, ETHTOOL_BUSINFO_LEN);
+ strncpy(info->version, DRV_VERSION, ETHTOOL_BUSINFO_LEN);
+ strncpy(info->bus_info, pci_name(np->pci_dev), ETHTOOL_BUSINFO_LEN);
+}
+
+static int get_regs_len(struct net_device *dev)
+{
+ return NATSEMI_REGS_SIZE;
+}
+
+static int get_eeprom_len(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return np->eeprom_size;
+}
+
+static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ spin_lock_irq(&np->lock);
+ netdev_get_ecmd(dev, ecmd);
+ spin_unlock_irq(&np->lock);
+ return 0;
+}
+
+static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int res;
+ spin_lock_irq(&np->lock);
+ res = netdev_set_ecmd(dev, ecmd);
+ spin_unlock_irq(&np->lock);
+ return res;
+}
+
+static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ spin_lock_irq(&np->lock);
+ netdev_get_wol(dev, &wol->supported, &wol->wolopts);
+ netdev_get_sopass(dev, wol->sopass);
+ spin_unlock_irq(&np->lock);
+}
+
+static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int res;
+ spin_lock_irq(&np->lock);
+ netdev_set_wol(dev, wol->wolopts);
+ res = netdev_set_sopass(dev, wol->sopass);
+ spin_unlock_irq(&np->lock);
+ return res;
+}
+
+static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ regs->version = NATSEMI_REGS_VER;
+ spin_lock_irq(&np->lock);
+ netdev_get_regs(dev, buf);
+ spin_unlock_irq(&np->lock);
+}
+
+static u32 get_msglevel(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return np->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ np->msg_enable = val;
+}
+
+static int nway_reset(struct net_device *dev)
+{
+ int tmp;
+ int r = -EINVAL;
+ /* if autoneg is off, it's an error */
+ tmp = mdio_read(dev, MII_BMCR);
+ if (tmp & BMCR_ANENABLE) {
+ tmp |= (BMCR_ANRESTART);
+ mdio_write(dev, MII_BMCR, tmp);
+ r = 0;
+ }
+ return r;
+}
+
+static u32 get_link(struct net_device *dev)
+{
+ /* LSTATUS is latched low until a read - so read twice */
+ mdio_read(dev, MII_BMSR);
+ return (mdio_read(dev, MII_BMSR)&BMSR_LSTATUS) ? 1:0;
+}
+
+static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ u8 *eebuf;
+ int res;
+
+ eebuf = kmalloc(np->eeprom_size, GFP_KERNEL);
+ if (!eebuf)
+ return -ENOMEM;
+
+ eeprom->magic = PCI_VENDOR_ID_NS | (PCI_DEVICE_ID_NS_83815<<16);
+ spin_lock_irq(&np->lock);
+ res = netdev_get_eeprom(dev, eebuf);
+ spin_unlock_irq(&np->lock);
+ if (!res)
+ memcpy(data, eebuf+eeprom->offset, eeprom->len);
+ kfree(eebuf);
+ return res;
+}
+
+static const struct ethtool_ops ethtool_ops = {
+ .get_drvinfo = get_drvinfo,
+ .get_regs_len = get_regs_len,
+ .get_eeprom_len = get_eeprom_len,
+ .get_settings = get_settings,
+ .set_settings = set_settings,
+ .get_wol = get_wol,
+ .set_wol = set_wol,
+ .get_regs = get_regs,
+ .get_msglevel = get_msglevel,
+ .set_msglevel = set_msglevel,
+ .nway_reset = nway_reset,
+ .get_link = get_link,
+ .get_eeprom = get_eeprom,
+};
+
+static int netdev_set_wol(struct net_device *dev, u32 newval)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ u32 data = readl(ioaddr + WOLCmd) & ~WakeOptsSummary;
+
+ /* translate to bitmasks this chip understands */
+ if (newval & WAKE_PHY)
+ data |= WakePhy;
+ if (newval & WAKE_UCAST)
+ data |= WakeUnicast;
+ if (newval & WAKE_MCAST)
+ data |= WakeMulticast;
+ if (newval & WAKE_BCAST)
+ data |= WakeBroadcast;
+ if (newval & WAKE_ARP)
+ data |= WakeArp;
+ if (newval & WAKE_MAGIC)
+ data |= WakeMagic;
+ if (np->srr >= SRR_DP83815_D) {
+ if (newval & WAKE_MAGICSECURE) {
+ data |= WakeMagicSecure;
+ }
+ }
+
+ writel(data, ioaddr + WOLCmd);
+
+ return 0;
+}
+
+static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ u32 regval = readl(ioaddr + WOLCmd);
+
+ *supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST
+ | WAKE_ARP | WAKE_MAGIC);
+
+ if (np->srr >= SRR_DP83815_D) {
+ /* SOPASS works on revD and higher */
+ *supported |= WAKE_MAGICSECURE;
+ }
+ *cur = 0;
+
+ /* translate from chip bitmasks */
+ if (regval & WakePhy)
+ *cur |= WAKE_PHY;
+ if (regval & WakeUnicast)
+ *cur |= WAKE_UCAST;
+ if (regval & WakeMulticast)
+ *cur |= WAKE_MCAST;
+ if (regval & WakeBroadcast)
+ *cur |= WAKE_BCAST;
+ if (regval & WakeArp)
+ *cur |= WAKE_ARP;
+ if (regval & WakeMagic)
+ *cur |= WAKE_MAGIC;
+ if (regval & WakeMagicSecure) {
+ /* this can be on in revC, but it's broken */
+ *cur |= WAKE_MAGICSECURE;
+ }
+
+ return 0;
+}
+
+static int netdev_set_sopass(struct net_device *dev, u8 *newval)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ u16 *sval = (u16 *)newval;
+ u32 addr;
+
+ if (np->srr < SRR_DP83815_D) {
+ return 0;
+ }
+
+ /* enable writing to these registers by disabling the RX filter */
+ addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
+ addr &= ~RxFilterEnable;
+ writel(addr, ioaddr + RxFilterAddr);
+
+ /* write the three words to (undocumented) RFCR vals 0xa, 0xc, 0xe */
+ writel(addr | 0xa, ioaddr + RxFilterAddr);
+ writew(sval[0], ioaddr + RxFilterData);
+
+ writel(addr | 0xc, ioaddr + RxFilterAddr);
+ writew(sval[1], ioaddr + RxFilterData);
+
+ writel(addr | 0xe, ioaddr + RxFilterAddr);
+ writew(sval[2], ioaddr + RxFilterData);
+
+ /* re-enable the RX filter */
+ writel(addr | RxFilterEnable, ioaddr + RxFilterAddr);
+
+ return 0;
+}
+
+static int netdev_get_sopass(struct net_device *dev, u8 *data)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ u16 *sval = (u16 *)data;
+ u32 addr;
+
+ if (np->srr < SRR_DP83815_D) {
+ sval[0] = sval[1] = sval[2] = 0;
+ return 0;
+ }
+
+ /* read the three words from (undocumented) RFCR vals 0xa, 0xc, 0xe */
+ addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
+
+ writel(addr | 0xa, ioaddr + RxFilterAddr);
+ sval[0] = readw(ioaddr + RxFilterData);
+
+ writel(addr | 0xc, ioaddr + RxFilterAddr);
+ sval[1] = readw(ioaddr + RxFilterData);
+
+ writel(addr | 0xe, ioaddr + RxFilterAddr);
+ sval[2] = readw(ioaddr + RxFilterData);
+
+ writel(addr, ioaddr + RxFilterAddr);
+
+ return 0;
+}
+
+static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ u32 tmp;
+
+ ecmd->port = dev->if_port;
+ ethtool_cmd_speed_set(ecmd, np->speed);
+ ecmd->duplex = np->duplex;
+ ecmd->autoneg = np->autoneg;
+ ecmd->advertising = 0;
+ if (np->advertising & ADVERTISE_10HALF)
+ ecmd->advertising |= ADVERTISED_10baseT_Half;
+ if (np->advertising & ADVERTISE_10FULL)
+ ecmd->advertising |= ADVERTISED_10baseT_Full;
+ if (np->advertising & ADVERTISE_100HALF)
+ ecmd->advertising |= ADVERTISED_100baseT_Half;
+ if (np->advertising & ADVERTISE_100FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ ecmd->supported = (SUPPORTED_Autoneg |
+ SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE);
+ ecmd->phy_address = np->phy_addr_external;
+ /*
+ * We intentionally report the phy address of the external
+ * phy, even if the internal phy is used. This is necessary
+ * to work around a deficiency of the ethtool interface:
+ * It's only possible to query the settings of the active
+ * port. Therefore
+ * # ethtool -s ethX port mii
+ * actually sends an ioctl to switch to port mii with the
+ * settings that are used for the current active port.
+ * If we would report a different phy address in this
+ * command, then
+ * # ethtool -s ethX port tp;ethtool -s ethX port mii
+ * would unintentionally change the phy address.
+ *
+ * Fortunately the phy address doesn't matter with the
+ * internal phy...
+ */
+
+ /* set information based on active port type */
+ switch (ecmd->port) {
+ default:
+ case PORT_TP:
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->transceiver = XCVR_INTERNAL;
+ break;
+ case PORT_MII:
+ ecmd->advertising |= ADVERTISED_MII;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ case PORT_FIBRE:
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ }
+
+ /* if autonegotiation is on, try to return the active speed/duplex */
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ tmp = mii_nway_result(
+ np->advertising & mdio_read(dev, MII_LPA));
+ if (tmp == LPA_100FULL || tmp == LPA_100HALF)
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
+ else
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
+ if (tmp == LPA_100FULL || tmp == LPA_10FULL)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ }
+
+ /* ignore maxtxpkt, maxrxpkt for now */
+
+ return 0;
+}
+
+static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ if (ecmd->port != PORT_TP && ecmd->port != PORT_MII && ecmd->port != PORT_FIBRE)
+ return -EINVAL;
+ if (ecmd->transceiver != XCVR_INTERNAL && ecmd->transceiver != XCVR_EXTERNAL)
+ return -EINVAL;
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full)) == 0) {
+ return -EINVAL;
+ }
+ } else if (ecmd->autoneg == AUTONEG_DISABLE) {
+ u32 speed = ethtool_cmd_speed(ecmd);
+ if (speed != SPEED_10 && speed != SPEED_100)
+ return -EINVAL;
+ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ /*
+ * If we're ignoring the PHY then autoneg and the internal
+ * transceiver are really not going to work so don't let the
+ * user select them.
+ */
+ if (np->ignore_phy && (ecmd->autoneg == AUTONEG_ENABLE ||
+ ecmd->port == PORT_TP))
+ return -EINVAL;
+
+ /*
+ * maxtxpkt, maxrxpkt: ignored for now.
+ *
+ * transceiver:
+ * PORT_TP is always XCVR_INTERNAL, PORT_MII and PORT_FIBRE are always
+ * XCVR_EXTERNAL. The implementation thus ignores ecmd->transceiver and
+ * selects based on ecmd->port.
+ *
+ * Actually PORT_FIBRE is nearly identical to PORT_MII: it's for fibre
+ * phys that are connected to the mii bus. It's used to apply fibre
+ * specific updates.
+ */
+
+ /* WHEW! now lets bang some bits */
+
+ /* save the parms */
+ dev->if_port = ecmd->port;
+ np->autoneg = ecmd->autoneg;
+ np->phy_addr_external = ecmd->phy_address & PhyAddrMask;
+ if (np->autoneg == AUTONEG_ENABLE) {
+ /* advertise only what has been requested */
+ np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ if (ecmd->advertising & ADVERTISED_10baseT_Half)
+ np->advertising |= ADVERTISE_10HALF;
+ if (ecmd->advertising & ADVERTISED_10baseT_Full)
+ np->advertising |= ADVERTISE_10FULL;
+ if (ecmd->advertising & ADVERTISED_100baseT_Half)
+ np->advertising |= ADVERTISE_100HALF;
+ if (ecmd->advertising & ADVERTISED_100baseT_Full)
+ np->advertising |= ADVERTISE_100FULL;
+ } else {
+ np->speed = ethtool_cmd_speed(ecmd);
+ np->duplex = ecmd->duplex;
+ /* user overriding the initial full duplex parm? */
+ if (np->duplex == DUPLEX_HALF)
+ np->full_duplex = 0;
+ }
+
+ /* get the right phy enabled */
+ if (ecmd->port == PORT_TP)
+ switch_port_internal(dev);
+ else
+ switch_port_external(dev);
+
+ /* set parms and see how this affected our link status */
+ init_phy_fixup(dev);
+ check_link(dev);
+ return 0;
+}
+
+static int netdev_get_regs(struct net_device *dev, u8 *buf)
+{
+ int i;
+ int j;
+ u32 rfcr;
+ u32 *rbuf = (u32 *)buf;
+ void __iomem * ioaddr = ns_ioaddr(dev);
+
+ /* read non-mii page 0 of registers */
+ for (i = 0; i < NATSEMI_PG0_NREGS/2; i++) {
+ rbuf[i] = readl(ioaddr + i*4);
+ }
+
+ /* read current mii registers */
+ for (i = NATSEMI_PG0_NREGS/2; i < NATSEMI_PG0_NREGS; i++)
+ rbuf[i] = mdio_read(dev, i & 0x1f);
+
+ /* read only the 'magic' registers from page 1 */
+ writew(1, ioaddr + PGSEL);
+ rbuf[i++] = readw(ioaddr + PMDCSR);
+ rbuf[i++] = readw(ioaddr + TSTDAT);
+ rbuf[i++] = readw(ioaddr + DSPCFG);
+ rbuf[i++] = readw(ioaddr + SDCFG);
+ writew(0, ioaddr + PGSEL);
+
+ /* read RFCR indexed registers */
+ rfcr = readl(ioaddr + RxFilterAddr);
+ for (j = 0; j < NATSEMI_RFDR_NREGS; j++) {
+ writel(j*2, ioaddr + RxFilterAddr);
+ rbuf[i++] = readw(ioaddr + RxFilterData);
+ }
+ writel(rfcr, ioaddr + RxFilterAddr);
+
+ /* the interrupt status is clear-on-read - see if we missed any */
+ if (rbuf[4] & rbuf[5]) {
+ printk(KERN_WARNING
+ "%s: shoot, we dropped an interrupt (%#08x)\n",
+ dev->name, rbuf[4] & rbuf[5]);
+ }
+
+ return 0;
+}
+
+#define SWAP_BITS(x) ( (((x) & 0x0001) << 15) | (((x) & 0x0002) << 13) \
+ | (((x) & 0x0004) << 11) | (((x) & 0x0008) << 9) \
+ | (((x) & 0x0010) << 7) | (((x) & 0x0020) << 5) \
+ | (((x) & 0x0040) << 3) | (((x) & 0x0080) << 1) \
+ | (((x) & 0x0100) >> 1) | (((x) & 0x0200) >> 3) \
+ | (((x) & 0x0400) >> 5) | (((x) & 0x0800) >> 7) \
+ | (((x) & 0x1000) >> 9) | (((x) & 0x2000) >> 11) \
+ | (((x) & 0x4000) >> 13) | (((x) & 0x8000) >> 15) )
+
+static int netdev_get_eeprom(struct net_device *dev, u8 *buf)
+{
+ int i;
+ u16 *ebuf = (u16 *)buf;
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ struct netdev_private *np = netdev_priv(dev);
+
+ /* eeprom_read reads 16 bits, and indexes by 16 bits */
+ for (i = 0; i < np->eeprom_size/2; i++) {
+ ebuf[i] = eeprom_read(ioaddr, i);
+ /* The EEPROM itself stores data bit-swapped, but eeprom_read
+ * reads it back "sanely". So we swap it back here in order to
+ * present it to userland as it is stored. */
+ ebuf[i] = SWAP_BITS(ebuf[i]);
+ }
+ return 0;
+}
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(rq);
+ struct netdev_private *np = netdev_priv(dev);
+
+ switch(cmd) {
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ data->phy_id = np->phy_addr_external;
+ /* Fall Through */
+
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ /* The phy_id is not enough to uniquely identify
+ * the intended target. Therefore the command is sent to
+ * the given mii on the current port.
+ */
+ if (dev->if_port == PORT_TP) {
+ if ((data->phy_id & 0x1f) == np->phy_addr_external)
+ data->val_out = mdio_read(dev,
+ data->reg_num & 0x1f);
+ else
+ data->val_out = 0;
+ } else {
+ move_int_phy(dev, data->phy_id & 0x1f);
+ data->val_out = miiport_read(dev, data->phy_id & 0x1f,
+ data->reg_num & 0x1f);
+ }
+ return 0;
+
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ if (dev->if_port == PORT_TP) {
+ if ((data->phy_id & 0x1f) == np->phy_addr_external) {
+ if ((data->reg_num & 0x1f) == MII_ADVERTISE)
+ np->advertising = data->val_in;
+ mdio_write(dev, data->reg_num & 0x1f,
+ data->val_in);
+ }
+ } else {
+ if ((data->phy_id & 0x1f) == np->phy_addr_external) {
+ if ((data->reg_num & 0x1f) == MII_ADVERTISE)
+ np->advertising = data->val_in;
+ }
+ move_int_phy(dev, data->phy_id & 0x1f);
+ miiport_write(dev, data->phy_id & 0x1f,
+ data->reg_num & 0x1f,
+ data->val_in);
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void enable_wol_mode(struct net_device *dev, int enable_intr)
+{
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ struct netdev_private *np = netdev_priv(dev);
+
+ if (netif_msg_wol(np))
+ printk(KERN_INFO "%s: remaining active for wake-on-lan\n",
+ dev->name);
+
+ /* For WOL we must restart the rx process in silent mode.
+ * Write NULL to the RxRingPtr. Only possible if
+ * rx process is stopped
+ */
+ writel(0, ioaddr + RxRingPtr);
+
+ /* read WoL status to clear */
+ readl(ioaddr + WOLCmd);
+
+ /* PME on, clear status */
+ writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun);
+
+ /* and restart the rx process */
+ writel(RxOn, ioaddr + ChipCmd);
+
+ if (enable_intr) {
+ /* enable the WOL interrupt.
+ * Could be used to send a netlink message.
+ */
+ writel(WOLPkt | LinkChange, ioaddr + IntrMask);
+ natsemi_irq_enable(dev);
+ }
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ void __iomem * ioaddr = ns_ioaddr(dev);
+ struct netdev_private *np = netdev_priv(dev);
+
+ if (netif_msg_ifdown(np))
+ printk(KERN_DEBUG
+ "%s: Shutting down ethercard, status was %#04x.\n",
+ dev->name, (int)readl(ioaddr + ChipCmd));
+ if (netif_msg_pktdata(np))
+ printk(KERN_DEBUG
+ "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx,
+ np->cur_rx, np->dirty_rx);
+
+ napi_disable(&np->napi);
+
+ /*
+ * FIXME: what if someone tries to close a device
+ * that is suspended?
+ * Should we reenable the nic to switch to
+ * the final WOL settings?
+ */
+
+ del_timer_sync(&np->timer);
+ disable_irq(dev->irq);
+ spin_lock_irq(&np->lock);
+ natsemi_irq_disable(dev);
+ np->hands_off = 1;
+ spin_unlock_irq(&np->lock);
+ enable_irq(dev->irq);
+
+ free_irq(dev->irq, dev);
+
+ /* Interrupt disabled, interrupt handler released,
+ * queue stopped, timer deleted, rtnl_lock held
+ * All async codepaths that access the driver are disabled.
+ */
+ spin_lock_irq(&np->lock);
+ np->hands_off = 0;
+ readl(ioaddr + IntrMask);
+ readw(ioaddr + MIntrStatus);
+
+ /* Freeze Stats */
+ writel(StatsFreeze, ioaddr + StatsCtrl);
+
+ /* Stop the chip's Tx and Rx processes. */
+ natsemi_stop_rxtx(dev);
+
+ __get_stats(dev);
+ spin_unlock_irq(&np->lock);
+
+ /* clear the carrier last - an interrupt could reenable it otherwise */
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+
+ dump_ring(dev);
+ drain_ring(dev);
+ free_ring(dev);
+
+ {
+ u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
+ if (wol) {
+ /* restart the NIC in WOL mode.
+ * The nic must be stopped for this.
+ */
+ enable_wol_mode(dev, 0);
+ } else {
+ /* Restore PME enable bit unmolested */
+ writel(np->SavedClkRun, ioaddr + ClkRun);
+ }
+ }
+ return 0;
+}
+
+
+static void __devexit natsemi_remove1 (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+
+ NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround);
+ unregister_netdev (dev);
+ pci_release_regions (pdev);
+ iounmap(ioaddr);
+ free_netdev (dev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+#ifdef CONFIG_PM
+
+/*
+ * The ns83815 chip doesn't have explicit RxStop bits.
+ * Kicking the Rx or Tx process for a new packet reenables the Rx process
+ * of the nic, thus this function must be very careful:
+ *
+ * suspend/resume synchronization:
+ * entry points:
+ * netdev_open, netdev_close, netdev_ioctl, set_rx_mode, intr_handler,
+ * start_tx, ns_tx_timeout
+ *
+ * No function accesses the hardware without checking np->hands_off.
+ * the check occurs under spin_lock_irq(&np->lock);
+ * exceptions:
+ * * netdev_ioctl: noncritical access.
+ * * netdev_open: cannot happen due to the device_detach
+ * * netdev_close: doesn't hurt.
+ * * netdev_timer: timer stopped by natsemi_suspend.
+ * * intr_handler: doesn't acquire the spinlock. suspend calls
+ * disable_irq() to enforce synchronization.
+ * * natsemi_poll: checks before reenabling interrupts. suspend
+ * sets hands_off, disables interrupts and then waits with
+ * napi_disable().
+ *
+ * Interrupts must be disabled, otherwise hands_off can cause irq storms.
+ */
+
+static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem * ioaddr = ns_ioaddr(dev);
+
+ rtnl_lock();
+ if (netif_running (dev)) {
+ del_timer_sync(&np->timer);
+
+ disable_irq(dev->irq);
+ spin_lock_irq(&np->lock);
+
+ natsemi_irq_disable(dev);
+ np->hands_off = 1;
+ natsemi_stop_rxtx(dev);
+ netif_stop_queue(dev);
+
+ spin_unlock_irq(&np->lock);
+ enable_irq(dev->irq);
+
+ napi_disable(&np->napi);
+
+ /* Update the error counts. */
+ __get_stats(dev);
+
+ /* pci_power_off(pdev, -1); */
+ drain_ring(dev);
+ {
+ u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
+ /* Restore PME enable bit */
+ if (wol) {
+ /* restart the NIC in WOL mode.
+ * The nic must be stopped for this.
+ * FIXME: use the WOL interrupt
+ */
+ enable_wol_mode(dev, 0);
+ } else {
+ /* Restore PME enable bit unmolested */
+ writel(np->SavedClkRun, ioaddr + ClkRun);
+ }
+ }
+ }
+ netif_device_detach(dev);
+ rtnl_unlock();
+ return 0;
+}
+
+
+static int natsemi_resume (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct netdev_private *np = netdev_priv(dev);
+ int ret = 0;
+
+ rtnl_lock();
+ if (netif_device_present(dev))
+ goto out;
+ if (netif_running(dev)) {
+ BUG_ON(!np->hands_off);
+ ret = pci_enable_device(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "pci_enable_device() failed: %d\n", ret);
+ goto out;
+ }
+ /* pci_power_on(pdev); */
+
+ napi_enable(&np->napi);
+
+ natsemi_reset(dev);
+ init_ring(dev);
+ disable_irq(dev->irq);
+ spin_lock_irq(&np->lock);
+ np->hands_off = 0;
+ init_registers(dev);
+ netif_device_attach(dev);
+ spin_unlock_irq(&np->lock);
+ enable_irq(dev->irq);
+
+ mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ));
+ }
+ netif_device_attach(dev);
+out:
+ rtnl_unlock();
+ return ret;
+}
+
+#endif /* CONFIG_PM */
+
+static struct pci_driver natsemi_driver = {
+ .name = DRV_NAME,
+ .id_table = natsemi_pci_tbl,
+ .probe = natsemi_probe1,
+ .remove = __devexit_p(natsemi_remove1),
+#ifdef CONFIG_PM
+ .suspend = natsemi_suspend,
+ .resume = natsemi_resume,
+#endif
+};
+
+static int __init natsemi_init_mod (void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk(version);
+#endif
+
+ return pci_register_driver(&natsemi_driver);
+}
+
+static void __exit natsemi_exit_mod (void)
+{
+ pci_unregister_driver (&natsemi_driver);
+}
+
+module_init(natsemi_init_mod);
+module_exit(natsemi_exit_mod);
+
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
new file mode 100644
index 000000000000..73616b911327
--- /dev/null
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -0,0 +1,2311 @@
+#define VERSION "0.23"
+/* ns83820.c by Benjamin LaHaise with contributions.
+ *
+ * Questions/comments/discussion to linux-ns83820@kvack.org.
+ *
+ * $Revision: 1.34.2.23 $
+ *
+ * Copyright 2001 Benjamin LaHaise.
+ * Copyright 2001, 2002 Red Hat.
+ *
+ * Mmmm, chocolate vanilla mocha...
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * ChangeLog
+ * =========
+ * 20010414 0.1 - created
+ * 20010622 0.2 - basic rx and tx.
+ * 20010711 0.3 - added duplex and link state detection support.
+ * 20010713 0.4 - zero copy, no hangs.
+ * 0.5 - 64 bit dma support (davem will hate me for this)
+ * - disable jumbo frames to avoid tx hangs
+ * - work around tx deadlocks on my 1.02 card via
+ * fiddling with TXCFG
+ * 20010810 0.6 - use pci dma api for ringbuffers, work on ia64
+ * 20010816 0.7 - misc cleanups
+ * 20010826 0.8 - fix critical zero copy bugs
+ * 0.9 - internal experiment
+ * 20010827 0.10 - fix ia64 unaligned access.
+ * 20010906 0.11 - accept all packets with checksum errors as
+ * otherwise fragments get lost
+ * - fix >> 32 bugs
+ * 0.12 - add statistics counters
+ * - add allmulti/promisc support
+ * 20011009 0.13 - hotplug support, other smaller pci api cleanups
+ * 20011204 0.13a - optical transceiver support added
+ * by Michael Clark <michael@metaparadigm.com>
+ * 20011205 0.13b - call register_netdev earlier in initialization
+ * suppress duplicate link status messages
+ * 20011117 0.14 - ethtool GDRVINFO, GLINK support from jgarzik
+ * 20011204 0.15 get ppc (big endian) working
+ * 20011218 0.16 various cleanups
+ * 20020310 0.17 speedups
+ * 20020610 0.18 - actually use the pci dma api for highmem
+ * - remove pci latency register fiddling
+ * 0.19 - better bist support
+ * - add ihr and reset_phy parameters
+ * - gmii bus probing
+ * - fix missed txok introduced during performance
+ * tuning
+ * 0.20 - fix stupid RFEN thinko. i am such a smurf.
+ * 20040828 0.21 - add hardware vlan accleration
+ * by Neil Horman <nhorman@redhat.com>
+ * 20050406 0.22 - improved DAC ifdefs from Andi Kleen
+ * - removal of dead code from Adrian Bunk
+ * - fix half duplex collision behaviour
+ * Driver Overview
+ * ===============
+ *
+ * This driver was originally written for the National Semiconductor
+ * 83820 chip, a 10/100/1000 Mbps 64 bit PCI ethernet NIC. Hopefully
+ * this code will turn out to be a) clean, b) correct, and c) fast.
+ * With that in mind, I'm aiming to split the code up as much as
+ * reasonably possible. At present there are X major sections that
+ * break down into a) packet receive, b) packet transmit, c) link
+ * management, d) initialization and configuration. Where possible,
+ * these code paths are designed to run in parallel.
+ *
+ * This driver has been tested and found to work with the following
+ * cards (in no particular order):
+ *
+ * Cameo SOHO-GA2000T SOHO-GA2500T
+ * D-Link DGE-500T
+ * PureData PDP8023Z-TG
+ * SMC SMC9452TX SMC9462TX
+ * Netgear GA621
+ *
+ * Special thanks to SMC for providing hardware to test this driver on.
+ *
+ * Reports of success or failure would be greatly appreciated.
+ */
+//#define dprintk printk
+#define dprintk(x...) do { } while (0)
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h> /* for iph */
+#include <linux/in.h> /* for IPPROTO_... */
+#include <linux/compiler.h>
+#include <linux/prefetch.h>
+#include <linux/ethtool.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/if_vlan.h>
+#include <linux/rtnetlink.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#define DRV_NAME "ns83820"
+
+/* Global parameters. See module_param near the bottom. */
+static int ihr = 2;
+static int reset_phy = 0;
+static int lnksts = 0; /* CFG_LNKSTS bit polarity */
+
+/* Dprintk is used for more interesting debug events */
+#undef Dprintk
+#define Dprintk dprintk
+
+/* tunables */
+#define RX_BUF_SIZE 1500 /* 8192 */
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define NS83820_VLAN_ACCEL_SUPPORT
+#endif
+
+/* Must not exceed ~65000. */
+#define NR_RX_DESC 64
+#define NR_TX_DESC 128
+
+/* not tunable */
+#define REAL_RX_BUF_SIZE (RX_BUF_SIZE + 14) /* rx/tx mac addr + type */
+
+#define MIN_TX_DESC_FREE 8
+
+/* register defines */
+#define CFGCS 0x04
+
+#define CR_TXE 0x00000001
+#define CR_TXD 0x00000002
+/* Ramit : Here's a tip, don't do a RXD immediately followed by an RXE
+ * The Receive engine skips one descriptor and moves
+ * onto the next one!! */
+#define CR_RXE 0x00000004
+#define CR_RXD 0x00000008
+#define CR_TXR 0x00000010
+#define CR_RXR 0x00000020
+#define CR_SWI 0x00000080
+#define CR_RST 0x00000100
+
+#define PTSCR_EEBIST_FAIL 0x00000001
+#define PTSCR_EEBIST_EN 0x00000002
+#define PTSCR_EELOAD_EN 0x00000004
+#define PTSCR_RBIST_FAIL 0x000001b8
+#define PTSCR_RBIST_DONE 0x00000200
+#define PTSCR_RBIST_EN 0x00000400
+#define PTSCR_RBIST_RST 0x00002000
+
+#define MEAR_EEDI 0x00000001
+#define MEAR_EEDO 0x00000002
+#define MEAR_EECLK 0x00000004
+#define MEAR_EESEL 0x00000008
+#define MEAR_MDIO 0x00000010
+#define MEAR_MDDIR 0x00000020
+#define MEAR_MDC 0x00000040
+
+#define ISR_TXDESC3 0x40000000
+#define ISR_TXDESC2 0x20000000
+#define ISR_TXDESC1 0x10000000
+#define ISR_TXDESC0 0x08000000
+#define ISR_RXDESC3 0x04000000
+#define ISR_RXDESC2 0x02000000
+#define ISR_RXDESC1 0x01000000
+#define ISR_RXDESC0 0x00800000
+#define ISR_TXRCMP 0x00400000
+#define ISR_RXRCMP 0x00200000
+#define ISR_DPERR 0x00100000
+#define ISR_SSERR 0x00080000
+#define ISR_RMABT 0x00040000
+#define ISR_RTABT 0x00020000
+#define ISR_RXSOVR 0x00010000
+#define ISR_HIBINT 0x00008000
+#define ISR_PHY 0x00004000
+#define ISR_PME 0x00002000
+#define ISR_SWI 0x00001000
+#define ISR_MIB 0x00000800
+#define ISR_TXURN 0x00000400
+#define ISR_TXIDLE 0x00000200
+#define ISR_TXERR 0x00000100
+#define ISR_TXDESC 0x00000080
+#define ISR_TXOK 0x00000040
+#define ISR_RXORN 0x00000020
+#define ISR_RXIDLE 0x00000010
+#define ISR_RXEARLY 0x00000008
+#define ISR_RXERR 0x00000004
+#define ISR_RXDESC 0x00000002
+#define ISR_RXOK 0x00000001
+
+#define TXCFG_CSI 0x80000000
+#define TXCFG_HBI 0x40000000
+#define TXCFG_MLB 0x20000000
+#define TXCFG_ATP 0x10000000
+#define TXCFG_ECRETRY 0x00800000
+#define TXCFG_BRST_DIS 0x00080000
+#define TXCFG_MXDMA1024 0x00000000
+#define TXCFG_MXDMA512 0x00700000
+#define TXCFG_MXDMA256 0x00600000
+#define TXCFG_MXDMA128 0x00500000
+#define TXCFG_MXDMA64 0x00400000
+#define TXCFG_MXDMA32 0x00300000
+#define TXCFG_MXDMA16 0x00200000
+#define TXCFG_MXDMA8 0x00100000
+
+#define CFG_LNKSTS 0x80000000
+#define CFG_SPDSTS 0x60000000
+#define CFG_SPDSTS1 0x40000000
+#define CFG_SPDSTS0 0x20000000
+#define CFG_DUPSTS 0x10000000
+#define CFG_TBI_EN 0x01000000
+#define CFG_MODE_1000 0x00400000
+/* Ramit : Dont' ever use AUTO_1000, it never works and is buggy.
+ * Read the Phy response and then configure the MAC accordingly */
+#define CFG_AUTO_1000 0x00200000
+#define CFG_PINT_CTL 0x001c0000
+#define CFG_PINT_DUPSTS 0x00100000
+#define CFG_PINT_LNKSTS 0x00080000
+#define CFG_PINT_SPDSTS 0x00040000
+#define CFG_TMRTEST 0x00020000
+#define CFG_MRM_DIS 0x00010000
+#define CFG_MWI_DIS 0x00008000
+#define CFG_T64ADDR 0x00004000
+#define CFG_PCI64_DET 0x00002000
+#define CFG_DATA64_EN 0x00001000
+#define CFG_M64ADDR 0x00000800
+#define CFG_PHY_RST 0x00000400
+#define CFG_PHY_DIS 0x00000200
+#define CFG_EXTSTS_EN 0x00000100
+#define CFG_REQALG 0x00000080
+#define CFG_SB 0x00000040
+#define CFG_POW 0x00000020
+#define CFG_EXD 0x00000010
+#define CFG_PESEL 0x00000008
+#define CFG_BROM_DIS 0x00000004
+#define CFG_EXT_125 0x00000002
+#define CFG_BEM 0x00000001
+
+#define EXTSTS_UDPPKT 0x00200000
+#define EXTSTS_TCPPKT 0x00080000
+#define EXTSTS_IPPKT 0x00020000
+#define EXTSTS_VPKT 0x00010000
+#define EXTSTS_VTG_MASK 0x0000ffff
+
+#define SPDSTS_POLARITY (CFG_SPDSTS1 | CFG_SPDSTS0 | CFG_DUPSTS | (lnksts ? CFG_LNKSTS : 0))
+
+#define MIBC_MIBS 0x00000008
+#define MIBC_ACLR 0x00000004
+#define MIBC_FRZ 0x00000002
+#define MIBC_WRN 0x00000001
+
+#define PCR_PSEN (1 << 31)
+#define PCR_PS_MCAST (1 << 30)
+#define PCR_PS_DA (1 << 29)
+#define PCR_STHI_8 (3 << 23)
+#define PCR_STLO_4 (1 << 23)
+#define PCR_FFHI_8K (3 << 21)
+#define PCR_FFLO_4K (1 << 21)
+#define PCR_PAUSE_CNT 0xFFFE
+
+#define RXCFG_AEP 0x80000000
+#define RXCFG_ARP 0x40000000
+#define RXCFG_STRIPCRC 0x20000000
+#define RXCFG_RX_FD 0x10000000
+#define RXCFG_ALP 0x08000000
+#define RXCFG_AIRL 0x04000000
+#define RXCFG_MXDMA512 0x00700000
+#define RXCFG_DRTH 0x0000003e
+#define RXCFG_DRTH0 0x00000002
+
+#define RFCR_RFEN 0x80000000
+#define RFCR_AAB 0x40000000
+#define RFCR_AAM 0x20000000
+#define RFCR_AAU 0x10000000
+#define RFCR_APM 0x08000000
+#define RFCR_APAT 0x07800000
+#define RFCR_APAT3 0x04000000
+#define RFCR_APAT2 0x02000000
+#define RFCR_APAT1 0x01000000
+#define RFCR_APAT0 0x00800000
+#define RFCR_AARP 0x00400000
+#define RFCR_MHEN 0x00200000
+#define RFCR_UHEN 0x00100000
+#define RFCR_ULM 0x00080000
+
+#define VRCR_RUDPE 0x00000080
+#define VRCR_RTCPE 0x00000040
+#define VRCR_RIPE 0x00000020
+#define VRCR_IPEN 0x00000010
+#define VRCR_DUTF 0x00000008
+#define VRCR_DVTF 0x00000004
+#define VRCR_VTREN 0x00000002
+#define VRCR_VTDEN 0x00000001
+
+#define VTCR_PPCHK 0x00000008
+#define VTCR_GCHK 0x00000004
+#define VTCR_VPPTI 0x00000002
+#define VTCR_VGTI 0x00000001
+
+#define CR 0x00
+#define CFG 0x04
+#define MEAR 0x08
+#define PTSCR 0x0c
+#define ISR 0x10
+#define IMR 0x14
+#define IER 0x18
+#define IHR 0x1c
+#define TXDP 0x20
+#define TXDP_HI 0x24
+#define TXCFG 0x28
+#define GPIOR 0x2c
+#define RXDP 0x30
+#define RXDP_HI 0x34
+#define RXCFG 0x38
+#define PQCR 0x3c
+#define WCSR 0x40
+#define PCR 0x44
+#define RFCR 0x48
+#define RFDR 0x4c
+
+#define SRR 0x58
+
+#define VRCR 0xbc
+#define VTCR 0xc0
+#define VDR 0xc4
+#define CCSR 0xcc
+
+#define TBICR 0xe0
+#define TBISR 0xe4
+#define TANAR 0xe8
+#define TANLPAR 0xec
+#define TANER 0xf0
+#define TESR 0xf4
+
+#define TBICR_MR_AN_ENABLE 0x00001000
+#define TBICR_MR_RESTART_AN 0x00000200
+
+#define TBISR_MR_LINK_STATUS 0x00000020
+#define TBISR_MR_AN_COMPLETE 0x00000004
+
+#define TANAR_PS2 0x00000100
+#define TANAR_PS1 0x00000080
+#define TANAR_HALF_DUP 0x00000040
+#define TANAR_FULL_DUP 0x00000020
+
+#define GPIOR_GP5_OE 0x00000200
+#define GPIOR_GP4_OE 0x00000100
+#define GPIOR_GP3_OE 0x00000080
+#define GPIOR_GP2_OE 0x00000040
+#define GPIOR_GP1_OE 0x00000020
+#define GPIOR_GP3_OUT 0x00000004
+#define GPIOR_GP1_OUT 0x00000001
+
+#define LINK_AUTONEGOTIATE 0x01
+#define LINK_DOWN 0x02
+#define LINK_UP 0x04
+
+#define HW_ADDR_LEN sizeof(dma_addr_t)
+#define desc_addr_set(desc, addr) \
+ do { \
+ ((desc)[0] = cpu_to_le32(addr)); \
+ if (HW_ADDR_LEN == 8) \
+ (desc)[1] = cpu_to_le32(((u64)addr) >> 32); \
+ } while(0)
+#define desc_addr_get(desc) \
+ (le32_to_cpu((desc)[0]) | \
+ (HW_ADDR_LEN == 8 ? ((dma_addr_t)le32_to_cpu((desc)[1]))<<32 : 0))
+
+#define DESC_LINK 0
+#define DESC_BUFPTR (DESC_LINK + HW_ADDR_LEN/4)
+#define DESC_CMDSTS (DESC_BUFPTR + HW_ADDR_LEN/4)
+#define DESC_EXTSTS (DESC_CMDSTS + 4/4)
+
+#define CMDSTS_OWN 0x80000000
+#define CMDSTS_MORE 0x40000000
+#define CMDSTS_INTR 0x20000000
+#define CMDSTS_ERR 0x10000000
+#define CMDSTS_OK 0x08000000
+#define CMDSTS_RUNT 0x00200000
+#define CMDSTS_LEN_MASK 0x0000ffff
+
+#define CMDSTS_DEST_MASK 0x01800000
+#define CMDSTS_DEST_SELF 0x00800000
+#define CMDSTS_DEST_MULTI 0x01000000
+
+#define DESC_SIZE 8 /* Should be cache line sized */
+
+struct rx_info {
+ spinlock_t lock;
+ int up;
+ unsigned long idle;
+
+ struct sk_buff *skbs[NR_RX_DESC];
+
+ __le32 *next_rx_desc;
+ u16 next_rx, next_empty;
+
+ __le32 *descs;
+ dma_addr_t phy_descs;
+};
+
+
+struct ns83820 {
+ u8 __iomem *base;
+
+ struct pci_dev *pci_dev;
+ struct net_device *ndev;
+
+ struct rx_info rx_info;
+ struct tasklet_struct rx_tasklet;
+
+ unsigned ihr;
+ struct work_struct tq_refill;
+
+ /* protects everything below. irqsave when using. */
+ spinlock_t misc_lock;
+
+ u32 CFG_cache;
+
+ u32 MEAR_cache;
+ u32 IMR_cache;
+
+ unsigned linkstate;
+
+ spinlock_t tx_lock;
+
+ u16 tx_done_idx;
+ u16 tx_idx;
+ volatile u16 tx_free_idx; /* idx of free desc chain */
+ u16 tx_intr_idx;
+
+ atomic_t nr_tx_skbs;
+ struct sk_buff *tx_skbs[NR_TX_DESC];
+
+ char pad[16] __attribute__((aligned(16)));
+ __le32 *tx_descs;
+ dma_addr_t tx_phy_descs;
+
+ struct timer_list tx_watchdog;
+};
+
+static inline struct ns83820 *PRIV(struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+#define __kick_rx(dev) writel(CR_RXE, dev->base + CR)
+
+static inline void kick_rx(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ dprintk("kick_rx: maybe kicking\n");
+ if (test_and_clear_bit(0, &dev->rx_info.idle)) {
+ dprintk("actually kicking\n");
+ writel(dev->rx_info.phy_descs +
+ (4 * DESC_SIZE * dev->rx_info.next_rx),
+ dev->base + RXDP);
+ if (dev->rx_info.next_rx == dev->rx_info.next_empty)
+ printk(KERN_DEBUG "%s: uh-oh: next_rx == next_empty???\n",
+ ndev->name);
+ __kick_rx(dev);
+ }
+}
+
+//free = (tx_done_idx + NR_TX_DESC-2 - free_idx) % NR_TX_DESC
+#define start_tx_okay(dev) \
+ (((NR_TX_DESC-2 + dev->tx_done_idx - dev->tx_free_idx) % NR_TX_DESC) > MIN_TX_DESC_FREE)
+
+/* Packet Receiver
+ *
+ * The hardware supports linked lists of receive descriptors for
+ * which ownership is transferred back and forth by means of an
+ * ownership bit. While the hardware does support the use of a
+ * ring for receive descriptors, we only make use of a chain in
+ * an attempt to reduce bus traffic under heavy load scenarios.
+ * This will also make bugs a bit more obvious. The current code
+ * only makes use of a single rx chain; I hope to implement
+ * priority based rx for version 1.0. Goal: even under overload
+ * conditions, still route realtime traffic with as low jitter as
+ * possible.
+ */
+static inline void build_rx_desc(struct ns83820 *dev, __le32 *desc, dma_addr_t link, dma_addr_t buf, u32 cmdsts, u32 extsts)
+{
+ desc_addr_set(desc + DESC_LINK, link);
+ desc_addr_set(desc + DESC_BUFPTR, buf);
+ desc[DESC_EXTSTS] = cpu_to_le32(extsts);
+ mb();
+ desc[DESC_CMDSTS] = cpu_to_le32(cmdsts);
+}
+
+#define nr_rx_empty(dev) ((NR_RX_DESC-2 + dev->rx_info.next_rx - dev->rx_info.next_empty) % NR_RX_DESC)
+static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
+{
+ unsigned next_empty;
+ u32 cmdsts;
+ __le32 *sg;
+ dma_addr_t buf;
+
+ next_empty = dev->rx_info.next_empty;
+
+ /* don't overrun last rx marker */
+ if (unlikely(nr_rx_empty(dev) <= 2)) {
+ kfree_skb(skb);
+ return 1;
+ }
+
+#if 0
+ dprintk("next_empty[%d] nr_used[%d] next_rx[%d]\n",
+ dev->rx_info.next_empty,
+ dev->rx_info.nr_used,
+ dev->rx_info.next_rx
+ );
+#endif
+
+ sg = dev->rx_info.descs + (next_empty * DESC_SIZE);
+ BUG_ON(NULL != dev->rx_info.skbs[next_empty]);
+ dev->rx_info.skbs[next_empty] = skb;
+
+ dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC;
+ cmdsts = REAL_RX_BUF_SIZE | CMDSTS_INTR;
+ buf = pci_map_single(dev->pci_dev, skb->data,
+ REAL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ build_rx_desc(dev, sg, 0, buf, cmdsts, 0);
+ /* update link of previous rx */
+ if (likely(next_empty != dev->rx_info.next_rx))
+ dev->rx_info.descs[((NR_RX_DESC + next_empty - 1) % NR_RX_DESC) * DESC_SIZE] = cpu_to_le32(dev->rx_info.phy_descs + (next_empty * DESC_SIZE * 4));
+
+ return 0;
+}
+
+static inline int rx_refill(struct net_device *ndev, gfp_t gfp)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ unsigned i;
+ unsigned long flags = 0;
+
+ if (unlikely(nr_rx_empty(dev) <= 2))
+ return 0;
+
+ dprintk("rx_refill(%p)\n", ndev);
+ if (gfp == GFP_ATOMIC)
+ spin_lock_irqsave(&dev->rx_info.lock, flags);
+ for (i=0; i<NR_RX_DESC; i++) {
+ struct sk_buff *skb;
+ long res;
+
+ /* extra 16 bytes for alignment */
+ skb = __netdev_alloc_skb(ndev, REAL_RX_BUF_SIZE+16, gfp);
+ if (unlikely(!skb))
+ break;
+
+ skb_reserve(skb, skb->data - PTR_ALIGN(skb->data, 16));
+ if (gfp != GFP_ATOMIC)
+ spin_lock_irqsave(&dev->rx_info.lock, flags);
+ res = ns83820_add_rx_skb(dev, skb);
+ if (gfp != GFP_ATOMIC)
+ spin_unlock_irqrestore(&dev->rx_info.lock, flags);
+ if (res) {
+ i = 1;
+ break;
+ }
+ }
+ if (gfp == GFP_ATOMIC)
+ spin_unlock_irqrestore(&dev->rx_info.lock, flags);
+
+ return i ? 0 : -ENOMEM;
+}
+
+static void rx_refill_atomic(struct net_device *ndev)
+{
+ rx_refill(ndev, GFP_ATOMIC);
+}
+
+/* REFILL */
+static inline void queue_refill(struct work_struct *work)
+{
+ struct ns83820 *dev = container_of(work, struct ns83820, tq_refill);
+ struct net_device *ndev = dev->ndev;
+
+ rx_refill(ndev, GFP_KERNEL);
+ if (dev->rx_info.up)
+ kick_rx(ndev);
+}
+
+static inline void clear_rx_desc(struct ns83820 *dev, unsigned i)
+{
+ build_rx_desc(dev, dev->rx_info.descs + (DESC_SIZE * i), 0, 0, CMDSTS_OWN, 0);
+}
+
+static void phy_intr(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ static const char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" };
+ u32 cfg, new_cfg;
+ u32 tbisr, tanar, tanlpar;
+ int speed, fullduplex, newlinkstate;
+
+ cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;
+
+ if (dev->CFG_cache & CFG_TBI_EN) {
+ /* we have an optical transceiver */
+ tbisr = readl(dev->base + TBISR);
+ tanar = readl(dev->base + TANAR);
+ tanlpar = readl(dev->base + TANLPAR);
+ dprintk("phy_intr: tbisr=%08x, tanar=%08x, tanlpar=%08x\n",
+ tbisr, tanar, tanlpar);
+
+ if ( (fullduplex = (tanlpar & TANAR_FULL_DUP) &&
+ (tanar & TANAR_FULL_DUP)) ) {
+
+ /* both of us are full duplex */
+ writel(readl(dev->base + TXCFG)
+ | TXCFG_CSI | TXCFG_HBI | TXCFG_ATP,
+ dev->base + TXCFG);
+ writel(readl(dev->base + RXCFG) | RXCFG_RX_FD,
+ dev->base + RXCFG);
+ /* Light up full duplex LED */
+ writel(readl(dev->base + GPIOR) | GPIOR_GP1_OUT,
+ dev->base + GPIOR);
+
+ } else if (((tanlpar & TANAR_HALF_DUP) &&
+ (tanar & TANAR_HALF_DUP)) ||
+ ((tanlpar & TANAR_FULL_DUP) &&
+ (tanar & TANAR_HALF_DUP)) ||
+ ((tanlpar & TANAR_HALF_DUP) &&
+ (tanar & TANAR_FULL_DUP))) {
+
+ /* one or both of us are half duplex */
+ writel((readl(dev->base + TXCFG)
+ & ~(TXCFG_CSI | TXCFG_HBI)) | TXCFG_ATP,
+ dev->base + TXCFG);
+ writel(readl(dev->base + RXCFG) & ~RXCFG_RX_FD,
+ dev->base + RXCFG);
+ /* Turn off full duplex LED */
+ writel(readl(dev->base + GPIOR) & ~GPIOR_GP1_OUT,
+ dev->base + GPIOR);
+ }
+
+ speed = 4; /* 1000F */
+
+ } else {
+ /* we have a copper transceiver */
+ new_cfg = dev->CFG_cache & ~(CFG_SB | CFG_MODE_1000 | CFG_SPDSTS);
+
+ if (cfg & CFG_SPDSTS1)
+ new_cfg |= CFG_MODE_1000;
+ else
+ new_cfg &= ~CFG_MODE_1000;
+
+ speed = ((cfg / CFG_SPDSTS0) & 3);
+ fullduplex = (cfg & CFG_DUPSTS);
+
+ if (fullduplex) {
+ new_cfg |= CFG_SB;
+ writel(readl(dev->base + TXCFG)
+ | TXCFG_CSI | TXCFG_HBI,
+ dev->base + TXCFG);
+ writel(readl(dev->base + RXCFG) | RXCFG_RX_FD,
+ dev->base + RXCFG);
+ } else {
+ writel(readl(dev->base + TXCFG)
+ & ~(TXCFG_CSI | TXCFG_HBI),
+ dev->base + TXCFG);
+ writel(readl(dev->base + RXCFG) & ~(RXCFG_RX_FD),
+ dev->base + RXCFG);
+ }
+
+ if ((cfg & CFG_LNKSTS) &&
+ ((new_cfg ^ dev->CFG_cache) != 0)) {
+ writel(new_cfg, dev->base + CFG);
+ dev->CFG_cache = new_cfg;
+ }
+
+ dev->CFG_cache &= ~CFG_SPDSTS;
+ dev->CFG_cache |= cfg & CFG_SPDSTS;
+ }
+
+ newlinkstate = (cfg & CFG_LNKSTS) ? LINK_UP : LINK_DOWN;
+
+ if (newlinkstate & LINK_UP &&
+ dev->linkstate != newlinkstate) {
+ netif_start_queue(ndev);
+ netif_wake_queue(ndev);
+ printk(KERN_INFO "%s: link now %s mbps, %s duplex and up.\n",
+ ndev->name,
+ speeds[speed],
+ fullduplex ? "full" : "half");
+ } else if (newlinkstate & LINK_DOWN &&
+ dev->linkstate != newlinkstate) {
+ netif_stop_queue(ndev);
+ printk(KERN_INFO "%s: link now down.\n", ndev->name);
+ }
+
+ dev->linkstate = newlinkstate;
+}
+
+static int ns83820_setup_rx(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ unsigned i;
+ int ret;
+
+ dprintk("ns83820_setup_rx(%p)\n", ndev);
+
+ dev->rx_info.idle = 1;
+ dev->rx_info.next_rx = 0;
+ dev->rx_info.next_rx_desc = dev->rx_info.descs;
+ dev->rx_info.next_empty = 0;
+
+ for (i=0; i<NR_RX_DESC; i++)
+ clear_rx_desc(dev, i);
+
+ writel(0, dev->base + RXDP_HI);
+ writel(dev->rx_info.phy_descs, dev->base + RXDP);
+
+ ret = rx_refill(ndev, GFP_KERNEL);
+ if (!ret) {
+ dprintk("starting receiver\n");
+ /* prevent the interrupt handler from stomping on us */
+ spin_lock_irq(&dev->rx_info.lock);
+
+ writel(0x0001, dev->base + CCSR);
+ writel(0, dev->base + RFCR);
+ writel(0x7fc00000, dev->base + RFCR);
+ writel(0xffc00000, dev->base + RFCR);
+
+ dev->rx_info.up = 1;
+
+ phy_intr(ndev);
+
+ /* Okay, let it rip */
+ spin_lock(&dev->misc_lock);
+ dev->IMR_cache |= ISR_PHY;
+ dev->IMR_cache |= ISR_RXRCMP;
+ //dev->IMR_cache |= ISR_RXERR;
+ //dev->IMR_cache |= ISR_RXOK;
+ dev->IMR_cache |= ISR_RXORN;
+ dev->IMR_cache |= ISR_RXSOVR;
+ dev->IMR_cache |= ISR_RXDESC;
+ dev->IMR_cache |= ISR_RXIDLE;
+ dev->IMR_cache |= ISR_TXDESC;
+ dev->IMR_cache |= ISR_TXIDLE;
+
+ writel(dev->IMR_cache, dev->base + IMR);
+ writel(1, dev->base + IER);
+ spin_unlock(&dev->misc_lock);
+
+ kick_rx(ndev);
+
+ spin_unlock_irq(&dev->rx_info.lock);
+ }
+ return ret;
+}
+
+static void ns83820_cleanup_rx(struct ns83820 *dev)
+{
+ unsigned i;
+ unsigned long flags;
+
+ dprintk("ns83820_cleanup_rx(%p)\n", dev);
+
+ /* disable receive interrupts */
+ spin_lock_irqsave(&dev->misc_lock, flags);
+ dev->IMR_cache &= ~(ISR_RXOK | ISR_RXDESC | ISR_RXERR | ISR_RXEARLY | ISR_RXIDLE);
+ writel(dev->IMR_cache, dev->base + IMR);
+ spin_unlock_irqrestore(&dev->misc_lock, flags);
+
+ /* synchronize with the interrupt handler and kill it */
+ dev->rx_info.up = 0;
+ synchronize_irq(dev->pci_dev->irq);
+
+ /* touch the pci bus... */
+ readl(dev->base + IMR);
+
+ /* assumes the transmitter is already disabled and reset */
+ writel(0, dev->base + RXDP_HI);
+ writel(0, dev->base + RXDP);
+
+ for (i=0; i<NR_RX_DESC; i++) {
+ struct sk_buff *skb = dev->rx_info.skbs[i];
+ dev->rx_info.skbs[i] = NULL;
+ clear_rx_desc(dev, i);
+ kfree_skb(skb);
+ }
+}
+
+static void ns83820_rx_kick(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ /*if (nr_rx_empty(dev) >= NR_RX_DESC/4)*/ {
+ if (dev->rx_info.up) {
+ rx_refill_atomic(ndev);
+ kick_rx(ndev);
+ }
+ }
+
+ if (dev->rx_info.up && nr_rx_empty(dev) > NR_RX_DESC*3/4)
+ schedule_work(&dev->tq_refill);
+ else
+ kick_rx(ndev);
+ if (dev->rx_info.idle)
+ printk(KERN_DEBUG "%s: BAD\n", ndev->name);
+}
+
+/* rx_irq
+ *
+ */
+static void rx_irq(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ struct rx_info *info = &dev->rx_info;
+ unsigned next_rx;
+ int rx_rc, len;
+ u32 cmdsts;
+ __le32 *desc;
+ unsigned long flags;
+ int nr = 0;
+
+ dprintk("rx_irq(%p)\n", ndev);
+ dprintk("rxdp: %08x, descs: %08lx next_rx[%d]: %p next_empty[%d]: %p\n",
+ readl(dev->base + RXDP),
+ (long)(dev->rx_info.phy_descs),
+ (int)dev->rx_info.next_rx,
+ (dev->rx_info.descs + (DESC_SIZE * dev->rx_info.next_rx)),
+ (int)dev->rx_info.next_empty,
+ (dev->rx_info.descs + (DESC_SIZE * dev->rx_info.next_empty))
+ );
+
+ spin_lock_irqsave(&info->lock, flags);
+ if (!info->up)
+ goto out;
+
+ dprintk("walking descs\n");
+ next_rx = info->next_rx;
+ desc = info->next_rx_desc;
+ while ((CMDSTS_OWN & (cmdsts = le32_to_cpu(desc[DESC_CMDSTS]))) &&
+ (cmdsts != CMDSTS_OWN)) {
+ struct sk_buff *skb;
+ u32 extsts = le32_to_cpu(desc[DESC_EXTSTS]);
+ dma_addr_t bufptr = desc_addr_get(desc + DESC_BUFPTR);
+
+ dprintk("cmdsts: %08x\n", cmdsts);
+ dprintk("link: %08x\n", cpu_to_le32(desc[DESC_LINK]));
+ dprintk("extsts: %08x\n", extsts);
+
+ skb = info->skbs[next_rx];
+ info->skbs[next_rx] = NULL;
+ info->next_rx = (next_rx + 1) % NR_RX_DESC;
+
+ mb();
+ clear_rx_desc(dev, next_rx);
+
+ pci_unmap_single(dev->pci_dev, bufptr,
+ RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ len = cmdsts & CMDSTS_LEN_MASK;
+#ifdef NS83820_VLAN_ACCEL_SUPPORT
+ /* NH: As was mentioned below, this chip is kinda
+ * brain dead about vlan tag stripping. Frames
+ * that are 64 bytes with a vlan header appended
+ * like arp frames, or pings, are flagged as Runts
+ * when the tag is stripped and hardware. This
+ * also means that the OK bit in the descriptor
+ * is cleared when the frame comes in so we have
+ * to do a specific length check here to make sure
+ * the frame would have been ok, had we not stripped
+ * the tag.
+ */
+ if (likely((CMDSTS_OK & cmdsts) ||
+ ((cmdsts & CMDSTS_RUNT) && len >= 56))) {
+#else
+ if (likely(CMDSTS_OK & cmdsts)) {
+#endif
+ skb_put(skb, len);
+ if (unlikely(!skb))
+ goto netdev_mangle_me_harder_failed;
+ if (cmdsts & CMDSTS_DEST_MULTI)
+ ndev->stats.multicast++;
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+ if ((extsts & 0x002a0000) && !(extsts & 0x00540000)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb_checksum_none_assert(skb);
+ }
+ skb->protocol = eth_type_trans(skb, ndev);
+#ifdef NS83820_VLAN_ACCEL_SUPPORT
+ if(extsts & EXTSTS_VPKT) {
+ unsigned short tag;
+
+ tag = ntohs(extsts & EXTSTS_VTG_MASK);
+ __vlan_hwaccel_put_tag(skb, tag);
+ }
+#endif
+ rx_rc = netif_rx(skb);
+ if (NET_RX_DROP == rx_rc) {
+netdev_mangle_me_harder_failed:
+ ndev->stats.rx_dropped++;
+ }
+ } else {
+ kfree_skb(skb);
+ }
+
+ nr++;
+ next_rx = info->next_rx;
+ desc = info->descs + (DESC_SIZE * next_rx);
+ }
+ info->next_rx = next_rx;
+ info->next_rx_desc = info->descs + (DESC_SIZE * next_rx);
+
+out:
+ if (0 && !nr) {
+ Dprintk("dazed: cmdsts_f: %08x\n", cmdsts);
+ }
+
+ spin_unlock_irqrestore(&info->lock, flags);
+}
+
+static void rx_action(unsigned long _dev)
+{
+ struct net_device *ndev = (void *)_dev;
+ struct ns83820 *dev = PRIV(ndev);
+ rx_irq(ndev);
+ writel(ihr, dev->base + IHR);
+
+ spin_lock_irq(&dev->misc_lock);
+ dev->IMR_cache |= ISR_RXDESC;
+ writel(dev->IMR_cache, dev->base + IMR);
+ spin_unlock_irq(&dev->misc_lock);
+
+ rx_irq(ndev);
+ ns83820_rx_kick(ndev);
+}
+
+/* Packet Transmit code
+ */
+static inline void kick_tx(struct ns83820 *dev)
+{
+ dprintk("kick_tx(%p): tx_idx=%d free_idx=%d\n",
+ dev, dev->tx_idx, dev->tx_free_idx);
+ writel(CR_TXE, dev->base + CR);
+}
+
+/* No spinlock needed on the transmit irq path as the interrupt handler is
+ * serialized.
+ */
+static void do_tx_done(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ u32 cmdsts, tx_done_idx;
+ __le32 *desc;
+
+ dprintk("do_tx_done(%p)\n", ndev);
+ tx_done_idx = dev->tx_done_idx;
+ desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);
+
+ dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n",
+ tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS]));
+ while ((tx_done_idx != dev->tx_free_idx) &&
+ !(CMDSTS_OWN & (cmdsts = le32_to_cpu(desc[DESC_CMDSTS]))) ) {
+ struct sk_buff *skb;
+ unsigned len;
+ dma_addr_t addr;
+
+ if (cmdsts & CMDSTS_ERR)
+ ndev->stats.tx_errors++;
+ if (cmdsts & CMDSTS_OK)
+ ndev->stats.tx_packets++;
+ if (cmdsts & CMDSTS_OK)
+ ndev->stats.tx_bytes += cmdsts & 0xffff;
+
+ dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n",
+ tx_done_idx, dev->tx_free_idx, cmdsts);
+ skb = dev->tx_skbs[tx_done_idx];
+ dev->tx_skbs[tx_done_idx] = NULL;
+ dprintk("done(%p)\n", skb);
+
+ len = cmdsts & CMDSTS_LEN_MASK;
+ addr = desc_addr_get(desc + DESC_BUFPTR);
+ if (skb) {
+ pci_unmap_single(dev->pci_dev,
+ addr,
+ len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ atomic_dec(&dev->nr_tx_skbs);
+ } else
+ pci_unmap_page(dev->pci_dev,
+ addr,
+ len,
+ PCI_DMA_TODEVICE);
+
+ tx_done_idx = (tx_done_idx + 1) % NR_TX_DESC;
+ dev->tx_done_idx = tx_done_idx;
+ desc[DESC_CMDSTS] = cpu_to_le32(0);
+ mb();
+ desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);
+ }
+
+ /* Allow network stack to resume queueing packets after we've
+ * finished transmitting at least 1/4 of the packets in the queue.
+ */
+ if (netif_queue_stopped(ndev) && start_tx_okay(dev)) {
+ dprintk("start_queue(%p)\n", ndev);
+ netif_start_queue(ndev);
+ netif_wake_queue(ndev);
+ }
+}
+
+static void ns83820_cleanup_tx(struct ns83820 *dev)
+{
+ unsigned i;
+
+ for (i=0; i<NR_TX_DESC; i++) {
+ struct sk_buff *skb = dev->tx_skbs[i];
+ dev->tx_skbs[i] = NULL;
+ if (skb) {
+ __le32 *desc = dev->tx_descs + (i * DESC_SIZE);
+ pci_unmap_single(dev->pci_dev,
+ desc_addr_get(desc + DESC_BUFPTR),
+ le32_to_cpu(desc[DESC_CMDSTS]) & CMDSTS_LEN_MASK,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ atomic_dec(&dev->nr_tx_skbs);
+ }
+ }
+
+ memset(dev->tx_descs, 0, NR_TX_DESC * DESC_SIZE * 4);
+}
+
+/* transmit routine. This code relies on the network layer serializing
+ * its calls in, but will run happily in parallel with the interrupt
+ * handler. This code currently has provisions for fragmenting tx buffers
+ * while trying to track down a bug in either the zero copy code or
+ * the tx fifo (hence the MAX_FRAG_LEN).
+ */
+static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ u32 free_idx, cmdsts, extsts;
+ int nr_free, nr_frags;
+ unsigned tx_done_idx, last_idx;
+ dma_addr_t buf;
+ unsigned len;
+ skb_frag_t *frag;
+ int stopped = 0;
+ int do_intr = 0;
+ volatile __le32 *first_desc;
+
+ dprintk("ns83820_hard_start_xmit\n");
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+again:
+ if (unlikely(dev->CFG_cache & CFG_LNKSTS)) {
+ netif_stop_queue(ndev);
+ if (unlikely(dev->CFG_cache & CFG_LNKSTS))
+ return NETDEV_TX_BUSY;
+ netif_start_queue(ndev);
+ }
+
+ last_idx = free_idx = dev->tx_free_idx;
+ tx_done_idx = dev->tx_done_idx;
+ nr_free = (tx_done_idx + NR_TX_DESC-2 - free_idx) % NR_TX_DESC;
+ nr_free -= 1;
+ if (nr_free <= nr_frags) {
+ dprintk("stop_queue - not enough(%p)\n", ndev);
+ netif_stop_queue(ndev);
+
+ /* Check again: we may have raced with a tx done irq */
+ if (dev->tx_done_idx != tx_done_idx) {
+ dprintk("restart queue(%p)\n", ndev);
+ netif_start_queue(ndev);
+ goto again;
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ if (free_idx == dev->tx_intr_idx) {
+ do_intr = 1;
+ dev->tx_intr_idx = (dev->tx_intr_idx + NR_TX_DESC/4) % NR_TX_DESC;
+ }
+
+ nr_free -= nr_frags;
+ if (nr_free < MIN_TX_DESC_FREE) {
+ dprintk("stop_queue - last entry(%p)\n", ndev);
+ netif_stop_queue(ndev);
+ stopped = 1;
+ }
+
+ frag = skb_shinfo(skb)->frags;
+ if (!nr_frags)
+ frag = NULL;
+ extsts = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ extsts |= EXTSTS_IPPKT;
+ if (IPPROTO_TCP == ip_hdr(skb)->protocol)
+ extsts |= EXTSTS_TCPPKT;
+ else if (IPPROTO_UDP == ip_hdr(skb)->protocol)
+ extsts |= EXTSTS_UDPPKT;
+ }
+
+#ifdef NS83820_VLAN_ACCEL_SUPPORT
+ if(vlan_tx_tag_present(skb)) {
+ /* fetch the vlan tag info out of the
+ * ancillary data if the vlan code
+ * is using hw vlan acceleration
+ */
+ short tag = vlan_tx_tag_get(skb);
+ extsts |= (EXTSTS_VPKT | htons(tag));
+ }
+#endif
+
+ len = skb->len;
+ if (nr_frags)
+ len -= skb->data_len;
+ buf = pci_map_single(dev->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+
+ first_desc = dev->tx_descs + (free_idx * DESC_SIZE);
+
+ for (;;) {
+ volatile __le32 *desc = dev->tx_descs + (free_idx * DESC_SIZE);
+
+ dprintk("frag[%3u]: %4u @ 0x%08Lx\n", free_idx, len,
+ (unsigned long long)buf);
+ last_idx = free_idx;
+ free_idx = (free_idx + 1) % NR_TX_DESC;
+ desc[DESC_LINK] = cpu_to_le32(dev->tx_phy_descs + (free_idx * DESC_SIZE * 4));
+ desc_addr_set(desc + DESC_BUFPTR, buf);
+ desc[DESC_EXTSTS] = cpu_to_le32(extsts);
+
+ cmdsts = ((nr_frags) ? CMDSTS_MORE : do_intr ? CMDSTS_INTR : 0);
+ cmdsts |= (desc == first_desc) ? 0 : CMDSTS_OWN;
+ cmdsts |= len;
+ desc[DESC_CMDSTS] = cpu_to_le32(cmdsts);
+
+ if (!nr_frags)
+ break;
+
+ buf = skb_frag_dma_map(&dev->pci_dev->dev, frag, 0,
+ frag->size, DMA_TO_DEVICE);
+ dprintk("frag: buf=%08Lx page=%08lx offset=%08lx\n",
+ (long long)buf, (long) page_to_pfn(frag->page),
+ frag->page_offset);
+ len = frag->size;
+ frag++;
+ nr_frags--;
+ }
+ dprintk("done pkt\n");
+
+ spin_lock_irq(&dev->tx_lock);
+ dev->tx_skbs[last_idx] = skb;
+ first_desc[DESC_CMDSTS] |= cpu_to_le32(CMDSTS_OWN);
+ dev->tx_free_idx = free_idx;
+ atomic_inc(&dev->nr_tx_skbs);
+ spin_unlock_irq(&dev->tx_lock);
+
+ kick_tx(dev);
+
+ /* Check again: we may have raced with a tx done irq */
+ if (stopped && (dev->tx_done_idx != tx_done_idx) && start_tx_okay(dev))
+ netif_start_queue(ndev);
+
+ return NETDEV_TX_OK;
+}
+
+static void ns83820_update_stats(struct ns83820 *dev)
+{
+ struct net_device *ndev = dev->ndev;
+ u8 __iomem *base = dev->base;
+
+ /* the DP83820 will freeze counters, so we need to read all of them */
+ ndev->stats.rx_errors += readl(base + 0x60) & 0xffff;
+ ndev->stats.rx_crc_errors += readl(base + 0x64) & 0xffff;
+ ndev->stats.rx_missed_errors += readl(base + 0x68) & 0xffff;
+ ndev->stats.rx_frame_errors += readl(base + 0x6c) & 0xffff;
+ /*ndev->stats.rx_symbol_errors +=*/ readl(base + 0x70);
+ ndev->stats.rx_length_errors += readl(base + 0x74) & 0xffff;
+ ndev->stats.rx_length_errors += readl(base + 0x78) & 0xffff;
+ /*ndev->stats.rx_badopcode_errors += */ readl(base + 0x7c);
+ /*ndev->stats.rx_pause_count += */ readl(base + 0x80);
+ /*ndev->stats.tx_pause_count += */ readl(base + 0x84);
+ ndev->stats.tx_carrier_errors += readl(base + 0x88) & 0xff;
+}
+
+static struct net_device_stats *ns83820_get_stats(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+
+ /* somewhat overkill */
+ spin_lock_irq(&dev->misc_lock);
+ ns83820_update_stats(dev);
+ spin_unlock_irq(&dev->misc_lock);
+
+ return &ndev->stats;
+}
+
+/* Let ethtool retrieve info */
+static int ns83820_get_settings(struct net_device *ndev,
+ struct ethtool_cmd *cmd)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ u32 cfg, tanar, tbicr;
+ int fullduplex = 0;
+
+ /*
+ * Here's the list of available ethtool commands from other drivers:
+ * cmd->advertising =
+ * ethtool_cmd_speed_set(cmd, ...)
+ * cmd->duplex =
+ * cmd->port = 0;
+ * cmd->phy_address =
+ * cmd->transceiver = 0;
+ * cmd->autoneg =
+ * cmd->maxtxpkt = 0;
+ * cmd->maxrxpkt = 0;
+ */
+
+ /* read current configuration */
+ cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;
+ tanar = readl(dev->base + TANAR);
+ tbicr = readl(dev->base + TBICR);
+
+ fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;
+
+ cmd->supported = SUPPORTED_Autoneg;
+
+ if (dev->CFG_cache & CFG_TBI_EN) {
+ /* we have optical interface */
+ cmd->supported |= SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE;
+ cmd->port = PORT_FIBRE;
+ } else {
+ /* we have copper */
+ cmd->supported |= SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_MII;
+ cmd->port = PORT_MII;
+ }
+
+ cmd->duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF;
+ switch (cfg / CFG_SPDSTS0 & 3) {
+ case 2:
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
+ break;
+ case 1:
+ ethtool_cmd_speed_set(cmd, SPEED_100);
+ break;
+ default:
+ ethtool_cmd_speed_set(cmd, SPEED_10);
+ break;
+ }
+ cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE)
+ ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ return 0;
+}
+
+/* Let ethool change settings*/
+static int ns83820_set_settings(struct net_device *ndev,
+ struct ethtool_cmd *cmd)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ u32 cfg, tanar;
+ int have_optical = 0;
+ int fullduplex = 0;
+
+ /* read current configuration */
+ cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;
+ tanar = readl(dev->base + TANAR);
+
+ if (dev->CFG_cache & CFG_TBI_EN) {
+ /* we have optical */
+ have_optical = 1;
+ fullduplex = (tanar & TANAR_FULL_DUP);
+
+ } else {
+ /* we have copper */
+ fullduplex = cfg & CFG_DUPSTS;
+ }
+
+ spin_lock_irq(&dev->misc_lock);
+ spin_lock(&dev->tx_lock);
+
+ /* Set duplex */
+ if (cmd->duplex != fullduplex) {
+ if (have_optical) {
+ /*set full duplex*/
+ if (cmd->duplex == DUPLEX_FULL) {
+ /* force full duplex */
+ writel(readl(dev->base + TXCFG)
+ | TXCFG_CSI | TXCFG_HBI | TXCFG_ATP,
+ dev->base + TXCFG);
+ writel(readl(dev->base + RXCFG) | RXCFG_RX_FD,
+ dev->base + RXCFG);
+ /* Light up full duplex LED */
+ writel(readl(dev->base + GPIOR) | GPIOR_GP1_OUT,
+ dev->base + GPIOR);
+ } else {
+ /*TODO: set half duplex */
+ }
+
+ } else {
+ /*we have copper*/
+ /* TODO: Set duplex for copper cards */
+ }
+ printk(KERN_INFO "%s: Duplex set via ethtool\n",
+ ndev->name);
+ }
+
+ /* Set autonegotiation */
+ if (1) {
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ /* restart auto negotiation */
+ writel(TBICR_MR_AN_ENABLE | TBICR_MR_RESTART_AN,
+ dev->base + TBICR);
+ writel(TBICR_MR_AN_ENABLE, dev->base + TBICR);
+ dev->linkstate = LINK_AUTONEGOTIATE;
+
+ printk(KERN_INFO "%s: autoneg enabled via ethtool\n",
+ ndev->name);
+ } else {
+ /* disable auto negotiation */
+ writel(0x00000000, dev->base + TBICR);
+ }
+
+ printk(KERN_INFO "%s: autoneg %s via ethtool\n", ndev->name,
+ cmd->autoneg ? "ENABLED" : "DISABLED");
+ }
+
+ phy_intr(ndev);
+ spin_unlock(&dev->tx_lock);
+ spin_unlock_irq(&dev->misc_lock);
+
+ return 0;
+}
+/* end ethtool get/set support -df */
+
+static void ns83820_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ strcpy(info->driver, "ns83820");
+ strcpy(info->version, VERSION);
+ strcpy(info->bus_info, pci_name(dev->pci_dev));
+}
+
+static u32 ns83820_get_link(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ u32 cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;
+ return cfg & CFG_LNKSTS ? 1 : 0;
+}
+
+static const struct ethtool_ops ops = {
+ .get_settings = ns83820_get_settings,
+ .set_settings = ns83820_set_settings,
+ .get_drvinfo = ns83820_get_drvinfo,
+ .get_link = ns83820_get_link
+};
+
+static inline void ns83820_disable_interrupts(struct ns83820 *dev)
+{
+ writel(0, dev->base + IMR);
+ writel(0, dev->base + IER);
+ readl(dev->base + IER);
+}
+
+/* this function is called in irq context from the ISR */
+static void ns83820_mib_isr(struct ns83820 *dev)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&dev->misc_lock, flags);
+ ns83820_update_stats(dev);
+ spin_unlock_irqrestore(&dev->misc_lock, flags);
+}
+
+static void ns83820_do_isr(struct net_device *ndev, u32 isr);
+static irqreturn_t ns83820_irq(int foo, void *data)
+{
+ struct net_device *ndev = data;
+ struct ns83820 *dev = PRIV(ndev);
+ u32 isr;
+ dprintk("ns83820_irq(%p)\n", ndev);
+
+ dev->ihr = 0;
+
+ isr = readl(dev->base + ISR);
+ dprintk("irq: %08x\n", isr);
+ ns83820_do_isr(ndev, isr);
+ return IRQ_HANDLED;
+}
+
+static void ns83820_do_isr(struct net_device *ndev, u32 isr)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ unsigned long flags;
+
+#ifdef DEBUG
+ if (isr & ~(ISR_PHY | ISR_RXDESC | ISR_RXEARLY | ISR_RXOK | ISR_RXERR | ISR_TXIDLE | ISR_TXOK | ISR_TXDESC))
+ Dprintk("odd isr? 0x%08x\n", isr);
+#endif
+
+ if (ISR_RXIDLE & isr) {
+ dev->rx_info.idle = 1;
+ Dprintk("oh dear, we are idle\n");
+ ns83820_rx_kick(ndev);
+ }
+
+ if ((ISR_RXDESC | ISR_RXOK) & isr) {
+ prefetch(dev->rx_info.next_rx_desc);
+
+ spin_lock_irqsave(&dev->misc_lock, flags);
+ dev->IMR_cache &= ~(ISR_RXDESC | ISR_RXOK);
+ writel(dev->IMR_cache, dev->base + IMR);
+ spin_unlock_irqrestore(&dev->misc_lock, flags);
+
+ tasklet_schedule(&dev->rx_tasklet);
+ //rx_irq(ndev);
+ //writel(4, dev->base + IHR);
+ }
+
+ if ((ISR_RXIDLE | ISR_RXORN | ISR_RXDESC | ISR_RXOK | ISR_RXERR) & isr)
+ ns83820_rx_kick(ndev);
+
+ if (unlikely(ISR_RXSOVR & isr)) {
+ //printk("overrun: rxsovr\n");
+ ndev->stats.rx_fifo_errors++;
+ }
+
+ if (unlikely(ISR_RXORN & isr)) {
+ //printk("overrun: rxorn\n");
+ ndev->stats.rx_fifo_errors++;
+ }
+
+ if ((ISR_RXRCMP & isr) && dev->rx_info.up)
+ writel(CR_RXE, dev->base + CR);
+
+ if (ISR_TXIDLE & isr) {
+ u32 txdp;
+ txdp = readl(dev->base + TXDP);
+ dprintk("txdp: %08x\n", txdp);
+ txdp -= dev->tx_phy_descs;
+ dev->tx_idx = txdp / (DESC_SIZE * 4);
+ if (dev->tx_idx >= NR_TX_DESC) {
+ printk(KERN_ALERT "%s: BUG -- txdp out of range\n", ndev->name);
+ dev->tx_idx = 0;
+ }
+ /* The may have been a race between a pci originated read
+ * and the descriptor update from the cpu. Just in case,
+ * kick the transmitter if the hardware thinks it is on a
+ * different descriptor than we are.
+ */
+ if (dev->tx_idx != dev->tx_free_idx)
+ kick_tx(dev);
+ }
+
+ /* Defer tx ring processing until more than a minimum amount of
+ * work has accumulated
+ */
+ if ((ISR_TXDESC | ISR_TXIDLE | ISR_TXOK | ISR_TXERR) & isr) {
+ spin_lock_irqsave(&dev->tx_lock, flags);
+ do_tx_done(ndev);
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+ /* Disable TxOk if there are no outstanding tx packets.
+ */
+ if ((dev->tx_done_idx == dev->tx_free_idx) &&
+ (dev->IMR_cache & ISR_TXOK)) {
+ spin_lock_irqsave(&dev->misc_lock, flags);
+ dev->IMR_cache &= ~ISR_TXOK;
+ writel(dev->IMR_cache, dev->base + IMR);
+ spin_unlock_irqrestore(&dev->misc_lock, flags);
+ }
+ }
+
+ /* The TxIdle interrupt can come in before the transmit has
+ * completed. Normally we reap packets off of the combination
+ * of TxDesc and TxIdle and leave TxOk disabled (since it
+ * occurs on every packet), but when no further irqs of this
+ * nature are expected, we must enable TxOk.
+ */
+ if ((ISR_TXIDLE & isr) && (dev->tx_done_idx != dev->tx_free_idx)) {
+ spin_lock_irqsave(&dev->misc_lock, flags);
+ dev->IMR_cache |= ISR_TXOK;
+ writel(dev->IMR_cache, dev->base + IMR);
+ spin_unlock_irqrestore(&dev->misc_lock, flags);
+ }
+
+ /* MIB interrupt: one of the statistics counters is about to overflow */
+ if (unlikely(ISR_MIB & isr))
+ ns83820_mib_isr(dev);
+
+ /* PHY: Link up/down/negotiation state change */
+ if (unlikely(ISR_PHY & isr))
+ phy_intr(ndev);
+
+#if 0 /* Still working on the interrupt mitigation strategy */
+ if (dev->ihr)
+ writel(dev->ihr, dev->base + IHR);
+#endif
+}
+
+static void ns83820_do_reset(struct ns83820 *dev, u32 which)
+{
+ Dprintk("resetting chip...\n");
+ writel(which, dev->base + CR);
+ do {
+ schedule();
+ } while (readl(dev->base + CR) & which);
+ Dprintk("okay!\n");
+}
+
+static int ns83820_stop(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+
+ /* FIXME: protect against interrupt handler? */
+ del_timer_sync(&dev->tx_watchdog);
+
+ ns83820_disable_interrupts(dev);
+
+ dev->rx_info.up = 0;
+ synchronize_irq(dev->pci_dev->irq);
+
+ ns83820_do_reset(dev, CR_RST);
+
+ synchronize_irq(dev->pci_dev->irq);
+
+ spin_lock_irq(&dev->misc_lock);
+ dev->IMR_cache &= ~(ISR_TXURN | ISR_TXIDLE | ISR_TXERR | ISR_TXDESC | ISR_TXOK);
+ spin_unlock_irq(&dev->misc_lock);
+
+ ns83820_cleanup_rx(dev);
+ ns83820_cleanup_tx(dev);
+
+ return 0;
+}
+
+static void ns83820_tx_timeout(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ u32 tx_done_idx;
+ __le32 *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ tx_done_idx = dev->tx_done_idx;
+ desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);
+
+ printk(KERN_INFO "%s: tx_timeout: tx_done_idx=%d free_idx=%d cmdsts=%08x\n",
+ ndev->name,
+ tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS]));
+
+#if defined(DEBUG)
+ {
+ u32 isr;
+ isr = readl(dev->base + ISR);
+ printk("irq: %08x imr: %08x\n", isr, dev->IMR_cache);
+ ns83820_do_isr(ndev, isr);
+ }
+#endif
+
+ do_tx_done(ndev);
+
+ tx_done_idx = dev->tx_done_idx;
+ desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);
+
+ printk(KERN_INFO "%s: after: tx_done_idx=%d free_idx=%d cmdsts=%08x\n",
+ ndev->name,
+ tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS]));
+
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+static void ns83820_tx_watch(unsigned long data)
+{
+ struct net_device *ndev = (void *)data;
+ struct ns83820 *dev = PRIV(ndev);
+
+#if defined(DEBUG)
+ printk("ns83820_tx_watch: %u %u %d\n",
+ dev->tx_done_idx, dev->tx_free_idx, atomic_read(&dev->nr_tx_skbs)
+ );
+#endif
+
+ if (time_after(jiffies, dev_trans_start(ndev) + 1*HZ) &&
+ dev->tx_done_idx != dev->tx_free_idx) {
+ printk(KERN_DEBUG "%s: ns83820_tx_watch: %u %u %d\n",
+ ndev->name,
+ dev->tx_done_idx, dev->tx_free_idx,
+ atomic_read(&dev->nr_tx_skbs));
+ ns83820_tx_timeout(ndev);
+ }
+
+ mod_timer(&dev->tx_watchdog, jiffies + 2*HZ);
+}
+
+static int ns83820_open(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ unsigned i;
+ u32 desc;
+ int ret;
+
+ dprintk("ns83820_open\n");
+
+ writel(0, dev->base + PQCR);
+
+ ret = ns83820_setup_rx(ndev);
+ if (ret)
+ goto failed;
+
+ memset(dev->tx_descs, 0, 4 * NR_TX_DESC * DESC_SIZE);
+ for (i=0; i<NR_TX_DESC; i++) {
+ dev->tx_descs[(i * DESC_SIZE) + DESC_LINK]
+ = cpu_to_le32(
+ dev->tx_phy_descs
+ + ((i+1) % NR_TX_DESC) * DESC_SIZE * 4);
+ }
+
+ dev->tx_idx = 0;
+ dev->tx_done_idx = 0;
+ desc = dev->tx_phy_descs;
+ writel(0, dev->base + TXDP_HI);
+ writel(desc, dev->base + TXDP);
+
+ init_timer(&dev->tx_watchdog);
+ dev->tx_watchdog.data = (unsigned long)ndev;
+ dev->tx_watchdog.function = ns83820_tx_watch;
+ mod_timer(&dev->tx_watchdog, jiffies + 2*HZ);
+
+ netif_start_queue(ndev); /* FIXME: wait for phy to come up */
+
+ return 0;
+
+failed:
+ ns83820_stop(ndev);
+ return ret;
+}
+
+static void ns83820_getmac(struct ns83820 *dev, u8 *mac)
+{
+ unsigned i;
+ for (i=0; i<3; i++) {
+ u32 data;
+
+ /* Read from the perfect match memory: this is loaded by
+ * the chip from the EEPROM via the EELOAD self test.
+ */
+ writel(i*2, dev->base + RFCR);
+ data = readl(dev->base + RFDR);
+
+ *mac++ = data;
+ *mac++ = data >> 8;
+ }
+}
+
+static int ns83820_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ if (new_mtu > RX_BUF_SIZE)
+ return -EINVAL;
+ ndev->mtu = new_mtu;
+ return 0;
+}
+
+static void ns83820_set_multicast(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ u8 __iomem *rfcr = dev->base + RFCR;
+ u32 and_mask = 0xffffffff;
+ u32 or_mask = 0;
+ u32 val;
+
+ if (ndev->flags & IFF_PROMISC)
+ or_mask |= RFCR_AAU | RFCR_AAM;
+ else
+ and_mask &= ~(RFCR_AAU | RFCR_AAM);
+
+ if (ndev->flags & IFF_ALLMULTI || netdev_mc_count(ndev))
+ or_mask |= RFCR_AAM;
+ else
+ and_mask &= ~RFCR_AAM;
+
+ spin_lock_irq(&dev->misc_lock);
+ val = (readl(rfcr) & and_mask) | or_mask;
+ /* Ramit : RFCR Write Fix doc says RFEN must be 0 modify other bits */
+ writel(val & ~RFCR_RFEN, rfcr);
+ writel(val, rfcr);
+ spin_unlock_irq(&dev->misc_lock);
+}
+
+static void ns83820_run_bist(struct net_device *ndev, const char *name, u32 enable, u32 done, u32 fail)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ int timed_out = 0;
+ unsigned long start;
+ u32 status;
+ int loops = 0;
+
+ dprintk("%s: start %s\n", ndev->name, name);
+
+ start = jiffies;
+
+ writel(enable, dev->base + PTSCR);
+ for (;;) {
+ loops++;
+ status = readl(dev->base + PTSCR);
+ if (!(status & enable))
+ break;
+ if (status & done)
+ break;
+ if (status & fail)
+ break;
+ if (time_after_eq(jiffies, start + HZ)) {
+ timed_out = 1;
+ break;
+ }
+ schedule_timeout_uninterruptible(1);
+ }
+
+ if (status & fail)
+ printk(KERN_INFO "%s: %s failed! (0x%08x & 0x%08x)\n",
+ ndev->name, name, status, fail);
+ else if (timed_out)
+ printk(KERN_INFO "%s: run_bist %s timed out! (%08x)\n",
+ ndev->name, name, status);
+
+ dprintk("%s: done %s in %d loops\n", ndev->name, name, loops);
+}
+
+#ifdef PHY_CODE_IS_FINISHED
+static void ns83820_mii_write_bit(struct ns83820 *dev, int bit)
+{
+ /* drive MDC low */
+ dev->MEAR_cache &= ~MEAR_MDC;
+ writel(dev->MEAR_cache, dev->base + MEAR);
+ readl(dev->base + MEAR);
+
+ /* enable output, set bit */
+ dev->MEAR_cache |= MEAR_MDDIR;
+ if (bit)
+ dev->MEAR_cache |= MEAR_MDIO;
+ else
+ dev->MEAR_cache &= ~MEAR_MDIO;
+
+ /* set the output bit */
+ writel(dev->MEAR_cache, dev->base + MEAR);
+ readl(dev->base + MEAR);
+
+ /* Wait. Max clock rate is 2.5MHz, this way we come in under 1MHz */
+ udelay(1);
+
+ /* drive MDC high causing the data bit to be latched */
+ dev->MEAR_cache |= MEAR_MDC;
+ writel(dev->MEAR_cache, dev->base + MEAR);
+ readl(dev->base + MEAR);
+
+ /* Wait again... */
+ udelay(1);
+}
+
+static int ns83820_mii_read_bit(struct ns83820 *dev)
+{
+ int bit;
+
+ /* drive MDC low, disable output */
+ dev->MEAR_cache &= ~MEAR_MDC;
+ dev->MEAR_cache &= ~MEAR_MDDIR;
+ writel(dev->MEAR_cache, dev->base + MEAR);
+ readl(dev->base + MEAR);
+
+ /* Wait. Max clock rate is 2.5MHz, this way we come in under 1MHz */
+ udelay(1);
+
+ /* drive MDC high causing the data bit to be latched */
+ bit = (readl(dev->base + MEAR) & MEAR_MDIO) ? 1 : 0;
+ dev->MEAR_cache |= MEAR_MDC;
+ writel(dev->MEAR_cache, dev->base + MEAR);
+
+ /* Wait again... */
+ udelay(1);
+
+ return bit;
+}
+
+static unsigned ns83820_mii_read_reg(struct ns83820 *dev, unsigned phy, unsigned reg)
+{
+ unsigned data = 0;
+ int i;
+
+ /* read some garbage so that we eventually sync up */
+ for (i=0; i<64; i++)
+ ns83820_mii_read_bit(dev);
+
+ ns83820_mii_write_bit(dev, 0); /* start */
+ ns83820_mii_write_bit(dev, 1);
+ ns83820_mii_write_bit(dev, 1); /* opcode read */
+ ns83820_mii_write_bit(dev, 0);
+
+ /* write out the phy address: 5 bits, msb first */
+ for (i=0; i<5; i++)
+ ns83820_mii_write_bit(dev, phy & (0x10 >> i));
+
+ /* write out the register address, 5 bits, msb first */
+ for (i=0; i<5; i++)
+ ns83820_mii_write_bit(dev, reg & (0x10 >> i));
+
+ ns83820_mii_read_bit(dev); /* turn around cycles */
+ ns83820_mii_read_bit(dev);
+
+ /* read in the register data, 16 bits msb first */
+ for (i=0; i<16; i++) {
+ data <<= 1;
+ data |= ns83820_mii_read_bit(dev);
+ }
+
+ return data;
+}
+
+static unsigned ns83820_mii_write_reg(struct ns83820 *dev, unsigned phy, unsigned reg, unsigned data)
+{
+ int i;
+
+ /* read some garbage so that we eventually sync up */
+ for (i=0; i<64; i++)
+ ns83820_mii_read_bit(dev);
+
+ ns83820_mii_write_bit(dev, 0); /* start */
+ ns83820_mii_write_bit(dev, 1);
+ ns83820_mii_write_bit(dev, 0); /* opcode read */
+ ns83820_mii_write_bit(dev, 1);
+
+ /* write out the phy address: 5 bits, msb first */
+ for (i=0; i<5; i++)
+ ns83820_mii_write_bit(dev, phy & (0x10 >> i));
+
+ /* write out the register address, 5 bits, msb first */
+ for (i=0; i<5; i++)
+ ns83820_mii_write_bit(dev, reg & (0x10 >> i));
+
+ ns83820_mii_read_bit(dev); /* turn around cycles */
+ ns83820_mii_read_bit(dev);
+
+ /* read in the register data, 16 bits msb first */
+ for (i=0; i<16; i++)
+ ns83820_mii_write_bit(dev, (data >> (15 - i)) & 1);
+
+ return data;
+}
+
+static void ns83820_probe_phy(struct net_device *ndev)
+{
+ struct ns83820 *dev = PRIV(ndev);
+ static int first;
+ int i;
+#define MII_PHYIDR1 0x02
+#define MII_PHYIDR2 0x03
+
+#if 0
+ if (!first) {
+ unsigned tmp;
+ ns83820_mii_read_reg(dev, 1, 0x09);
+ ns83820_mii_write_reg(dev, 1, 0x10, 0x0d3e);
+
+ tmp = ns83820_mii_read_reg(dev, 1, 0x00);
+ ns83820_mii_write_reg(dev, 1, 0x00, tmp | 0x8000);
+ udelay(1300);
+ ns83820_mii_read_reg(dev, 1, 0x09);
+ }
+#endif
+ first = 1;
+
+ for (i=1; i<2; i++) {
+ int j;
+ unsigned a, b;
+ a = ns83820_mii_read_reg(dev, i, MII_PHYIDR1);
+ b = ns83820_mii_read_reg(dev, i, MII_PHYIDR2);
+
+ //printk("%s: phy %d: 0x%04x 0x%04x\n",
+ // ndev->name, i, a, b);
+
+ for (j=0; j<0x16; j+=4) {
+ dprintk("%s: [0x%02x] %04x %04x %04x %04x\n",
+ ndev->name, j,
+ ns83820_mii_read_reg(dev, i, 0 + j),
+ ns83820_mii_read_reg(dev, i, 1 + j),
+ ns83820_mii_read_reg(dev, i, 2 + j),
+ ns83820_mii_read_reg(dev, i, 3 + j)
+ );
+ }
+ }
+ {
+ unsigned a, b;
+ /* read firmware version: memory addr is 0x8402 and 0x8403 */
+ ns83820_mii_write_reg(dev, 1, 0x16, 0x000d);
+ ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e);
+ a = ns83820_mii_read_reg(dev, 1, 0x1d);
+
+ ns83820_mii_write_reg(dev, 1, 0x16, 0x000d);
+ ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e);
+ b = ns83820_mii_read_reg(dev, 1, 0x1d);
+ dprintk("version: 0x%04x 0x%04x\n", a, b);
+ }
+}
+#endif
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = ns83820_open,
+ .ndo_stop = ns83820_stop,
+ .ndo_start_xmit = ns83820_hard_start_xmit,
+ .ndo_get_stats = ns83820_get_stats,
+ .ndo_change_mtu = ns83820_change_mtu,
+ .ndo_set_rx_mode = ns83820_set_multicast,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_tx_timeout = ns83820_tx_timeout,
+};
+
+static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
+{
+ struct net_device *ndev;
+ struct ns83820 *dev;
+ long addr;
+ int err;
+ int using_dac = 0;
+
+ /* See if we can set the dma mask early on; failure is fatal. */
+ if (sizeof(dma_addr_t) == 8 &&
+ !pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
+ using_dac = 1;
+ } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
+ using_dac = 0;
+ } else {
+ dev_warn(&pci_dev->dev, "pci_set_dma_mask failed!\n");
+ return -ENODEV;
+ }
+
+ ndev = alloc_etherdev(sizeof(struct ns83820));
+ err = -ENOMEM;
+ if (!ndev)
+ goto out;
+
+ dev = PRIV(ndev);
+ dev->ndev = ndev;
+
+ spin_lock_init(&dev->rx_info.lock);
+ spin_lock_init(&dev->tx_lock);
+ spin_lock_init(&dev->misc_lock);
+ dev->pci_dev = pci_dev;
+
+ SET_NETDEV_DEV(ndev, &pci_dev->dev);
+
+ INIT_WORK(&dev->tq_refill, queue_refill);
+ tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev);
+
+ err = pci_enable_device(pci_dev);
+ if (err) {
+ dev_info(&pci_dev->dev, "pci_enable_dev failed: %d\n", err);
+ goto out_free;
+ }
+
+ pci_set_master(pci_dev);
+ addr = pci_resource_start(pci_dev, 1);
+ dev->base = ioremap_nocache(addr, PAGE_SIZE);
+ dev->tx_descs = pci_alloc_consistent(pci_dev,
+ 4 * DESC_SIZE * NR_TX_DESC, &dev->tx_phy_descs);
+ dev->rx_info.descs = pci_alloc_consistent(pci_dev,
+ 4 * DESC_SIZE * NR_RX_DESC, &dev->rx_info.phy_descs);
+ err = -ENOMEM;
+ if (!dev->base || !dev->tx_descs || !dev->rx_info.descs)
+ goto out_disable;
+
+ dprintk("%p: %08lx %p: %08lx\n",
+ dev->tx_descs, (long)dev->tx_phy_descs,
+ dev->rx_info.descs, (long)dev->rx_info.phy_descs);
+
+ ns83820_disable_interrupts(dev);
+
+ dev->IMR_cache = 0;
+
+ err = request_irq(pci_dev->irq, ns83820_irq, IRQF_SHARED,
+ DRV_NAME, ndev);
+ if (err) {
+ dev_info(&pci_dev->dev, "unable to register irq %d, err %d\n",
+ pci_dev->irq, err);
+ goto out_disable;
+ }
+
+ /*
+ * FIXME: we are holding rtnl_lock() over obscenely long area only
+ * because some of the setup code uses dev->name. It's Wrong(tm) -
+ * we should be using driver-specific names for all that stuff.
+ * For now that will do, but we really need to come back and kill
+ * most of the dev_alloc_name() users later.
+ */
+ rtnl_lock();
+ err = dev_alloc_name(ndev, ndev->name);
+ if (err < 0) {
+ dev_info(&pci_dev->dev, "unable to get netdev name: %d\n", err);
+ goto out_free_irq;
+ }
+
+ printk("%s: ns83820.c: 0x22c: %08x, subsystem: %04x:%04x\n",
+ ndev->name, le32_to_cpu(readl(dev->base + 0x22c)),
+ pci_dev->subsystem_vendor, pci_dev->subsystem_device);
+
+ ndev->netdev_ops = &netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &ops);
+ ndev->watchdog_timeo = 5 * HZ;
+ pci_set_drvdata(pci_dev, ndev);
+
+ ns83820_do_reset(dev, CR_RST);
+
+ /* Must reset the ram bist before running it */
+ writel(PTSCR_RBIST_RST, dev->base + PTSCR);
+ ns83820_run_bist(ndev, "sram bist", PTSCR_RBIST_EN,
+ PTSCR_RBIST_DONE, PTSCR_RBIST_FAIL);
+ ns83820_run_bist(ndev, "eeprom bist", PTSCR_EEBIST_EN, 0,
+ PTSCR_EEBIST_FAIL);
+ ns83820_run_bist(ndev, "eeprom load", PTSCR_EELOAD_EN, 0, 0);
+
+ /* I love config registers */
+ dev->CFG_cache = readl(dev->base + CFG);
+
+ if ((dev->CFG_cache & CFG_PCI64_DET)) {
+ printk(KERN_INFO "%s: detected 64 bit PCI data bus.\n",
+ ndev->name);
+ /*dev->CFG_cache |= CFG_DATA64_EN;*/
+ if (!(dev->CFG_cache & CFG_DATA64_EN))
+ printk(KERN_INFO "%s: EEPROM did not enable 64 bit bus. Disabled.\n",
+ ndev->name);
+ } else
+ dev->CFG_cache &= ~(CFG_DATA64_EN);
+
+ dev->CFG_cache &= (CFG_TBI_EN | CFG_MRM_DIS | CFG_MWI_DIS |
+ CFG_T64ADDR | CFG_DATA64_EN | CFG_EXT_125 |
+ CFG_M64ADDR);
+ dev->CFG_cache |= CFG_PINT_DUPSTS | CFG_PINT_LNKSTS | CFG_PINT_SPDSTS |
+ CFG_EXTSTS_EN | CFG_EXD | CFG_PESEL;
+ dev->CFG_cache |= CFG_REQALG;
+ dev->CFG_cache |= CFG_POW;
+ dev->CFG_cache |= CFG_TMRTEST;
+
+ /* When compiled with 64 bit addressing, we must always enable
+ * the 64 bit descriptor format.
+ */
+ if (sizeof(dma_addr_t) == 8)
+ dev->CFG_cache |= CFG_M64ADDR;
+ if (using_dac)
+ dev->CFG_cache |= CFG_T64ADDR;
+
+ /* Big endian mode does not seem to do what the docs suggest */
+ dev->CFG_cache &= ~CFG_BEM;
+
+ /* setup optical transceiver if we have one */
+ if (dev->CFG_cache & CFG_TBI_EN) {
+ printk(KERN_INFO "%s: enabling optical transceiver\n",
+ ndev->name);
+ writel(readl(dev->base + GPIOR) | 0x3e8, dev->base + GPIOR);
+
+ /* setup auto negotiation feature advertisement */
+ writel(readl(dev->base + TANAR)
+ | TANAR_HALF_DUP | TANAR_FULL_DUP,
+ dev->base + TANAR);
+
+ /* start auto negotiation */
+ writel(TBICR_MR_AN_ENABLE | TBICR_MR_RESTART_AN,
+ dev->base + TBICR);
+ writel(TBICR_MR_AN_ENABLE, dev->base + TBICR);
+ dev->linkstate = LINK_AUTONEGOTIATE;
+
+ dev->CFG_cache |= CFG_MODE_1000;
+ }
+
+ writel(dev->CFG_cache, dev->base + CFG);
+ dprintk("CFG: %08x\n", dev->CFG_cache);
+
+ if (reset_phy) {
+ printk(KERN_INFO "%s: resetting phy\n", ndev->name);
+ writel(dev->CFG_cache | CFG_PHY_RST, dev->base + CFG);
+ msleep(10);
+ writel(dev->CFG_cache, dev->base + CFG);
+ }
+
+#if 0 /* Huh? This sets the PCI latency register. Should be done via
+ * the PCI layer. FIXME.
+ */
+ if (readl(dev->base + SRR))
+ writel(readl(dev->base+0x20c) | 0xfe00, dev->base + 0x20c);
+#endif
+
+ /* Note! The DMA burst size interacts with packet
+ * transmission, such that the largest packet that
+ * can be transmitted is 8192 - FLTH - burst size.
+ * If only the transmit fifo was larger...
+ */
+ /* Ramit : 1024 DMA is not a good idea, it ends up banging
+ * some DELL and COMPAQ SMP systems */
+ writel(TXCFG_CSI | TXCFG_HBI | TXCFG_ATP | TXCFG_MXDMA512
+ | ((1600 / 32) * 0x100),
+ dev->base + TXCFG);
+
+ /* Flush the interrupt holdoff timer */
+ writel(0x000, dev->base + IHR);
+ writel(0x100, dev->base + IHR);
+ writel(0x000, dev->base + IHR);
+
+ /* Set Rx to full duplex, don't accept runt, errored, long or length
+ * range errored packets. Use 512 byte DMA.
+ */
+ /* Ramit : 1024 DMA is not a good idea, it ends up banging
+ * some DELL and COMPAQ SMP systems
+ * Turn on ALP, only we are accpeting Jumbo Packets */
+ writel(RXCFG_AEP | RXCFG_ARP | RXCFG_AIRL | RXCFG_RX_FD
+ | RXCFG_STRIPCRC
+ //| RXCFG_ALP
+ | (RXCFG_MXDMA512) | 0, dev->base + RXCFG);
+
+ /* Disable priority queueing */
+ writel(0, dev->base + PQCR);
+
+ /* Enable IP checksum validation and detetion of VLAN headers.
+ * Note: do not set the reject options as at least the 0x102
+ * revision of the chip does not properly accept IP fragments
+ * at least for UDP.
+ */
+ /* Ramit : Be sure to turn on RXCFG_ARP if VLAN's are enabled, since
+ * the MAC it calculates the packetsize AFTER stripping the VLAN
+ * header, and if a VLAN Tagged packet of 64 bytes is received (like
+ * a ping with a VLAN header) then the card, strips the 4 byte VLAN
+ * tag and then checks the packet size, so if RXCFG_ARP is not enabled,
+ * it discrards it!. These guys......
+ * also turn on tag stripping if hardware acceleration is enabled
+ */
+#ifdef NS83820_VLAN_ACCEL_SUPPORT
+#define VRCR_INIT_VALUE (VRCR_IPEN|VRCR_VTDEN|VRCR_VTREN)
+#else
+#define VRCR_INIT_VALUE (VRCR_IPEN|VRCR_VTDEN)
+#endif
+ writel(VRCR_INIT_VALUE, dev->base + VRCR);
+
+ /* Enable per-packet TCP/UDP/IP checksumming
+ * and per packet vlan tag insertion if
+ * vlan hardware acceleration is enabled
+ */
+#ifdef NS83820_VLAN_ACCEL_SUPPORT
+#define VTCR_INIT_VALUE (VTCR_PPCHK|VTCR_VPPTI)
+#else
+#define VTCR_INIT_VALUE VTCR_PPCHK
+#endif
+ writel(VTCR_INIT_VALUE, dev->base + VTCR);
+
+ /* Ramit : Enable async and sync pause frames */
+ /* writel(0, dev->base + PCR); */
+ writel((PCR_PS_MCAST | PCR_PS_DA | PCR_PSEN | PCR_FFLO_4K |
+ PCR_FFHI_8K | PCR_STLO_4 | PCR_STHI_8 | PCR_PAUSE_CNT),
+ dev->base + PCR);
+
+ /* Disable Wake On Lan */
+ writel(0, dev->base + WCSR);
+
+ ns83820_getmac(dev, ndev->dev_addr);
+
+ /* Yes, we support dumb IP checksum on transmit */
+ ndev->features |= NETIF_F_SG;
+ ndev->features |= NETIF_F_IP_CSUM;
+
+#ifdef NS83820_VLAN_ACCEL_SUPPORT
+ /* We also support hardware vlan acceleration */
+ ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+#endif
+
+ if (using_dac) {
+ printk(KERN_INFO "%s: using 64 bit addressing.\n",
+ ndev->name);
+ ndev->features |= NETIF_F_HIGHDMA;
+ }
+
+ printk(KERN_INFO "%s: ns83820 v" VERSION ": DP83820 v%u.%u: %pM io=0x%08lx irq=%d f=%s\n",
+ ndev->name,
+ (unsigned)readl(dev->base + SRR) >> 8,
+ (unsigned)readl(dev->base + SRR) & 0xff,
+ ndev->dev_addr, addr, pci_dev->irq,
+ (ndev->features & NETIF_F_HIGHDMA) ? "h,sg" : "sg"
+ );
+
+#ifdef PHY_CODE_IS_FINISHED
+ ns83820_probe_phy(ndev);
+#endif
+
+ err = register_netdevice(ndev);
+ if (err) {
+ printk(KERN_INFO "ns83820: unable to register netdev: %d\n", err);
+ goto out_cleanup;
+ }
+ rtnl_unlock();
+
+ return 0;
+
+out_cleanup:
+ ns83820_disable_interrupts(dev); /* paranoia */
+out_free_irq:
+ rtnl_unlock();
+ free_irq(pci_dev->irq, ndev);
+out_disable:
+ if (dev->base)
+ iounmap(dev->base);
+ pci_free_consistent(pci_dev, 4 * DESC_SIZE * NR_TX_DESC, dev->tx_descs, dev->tx_phy_descs);
+ pci_free_consistent(pci_dev, 4 * DESC_SIZE * NR_RX_DESC, dev->rx_info.descs, dev->rx_info.phy_descs);
+ pci_disable_device(pci_dev);
+out_free:
+ free_netdev(ndev);
+ pci_set_drvdata(pci_dev, NULL);
+out:
+ return err;
+}
+
+static void __devexit ns83820_remove_one(struct pci_dev *pci_dev)
+{
+ struct net_device *ndev = pci_get_drvdata(pci_dev);
+ struct ns83820 *dev = PRIV(ndev); /* ok even if NULL */
+
+ if (!ndev) /* paranoia */
+ return;
+
+ ns83820_disable_interrupts(dev); /* paranoia */
+
+ unregister_netdev(ndev);
+ free_irq(dev->pci_dev->irq, ndev);
+ iounmap(dev->base);
+ pci_free_consistent(dev->pci_dev, 4 * DESC_SIZE * NR_TX_DESC,
+ dev->tx_descs, dev->tx_phy_descs);
+ pci_free_consistent(dev->pci_dev, 4 * DESC_SIZE * NR_RX_DESC,
+ dev->rx_info.descs, dev->rx_info.phy_descs);
+ pci_disable_device(dev->pci_dev);
+ free_netdev(ndev);
+ pci_set_drvdata(pci_dev, NULL);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(ns83820_pci_tbl) = {
+ { 0x100b, 0x0022, PCI_ANY_ID, PCI_ANY_ID, 0, .driver_data = 0, },
+ { 0, },
+};
+
+static struct pci_driver driver = {
+ .name = "ns83820",
+ .id_table = ns83820_pci_tbl,
+ .probe = ns83820_init_one,
+ .remove = __devexit_p(ns83820_remove_one),
+#if 0 /* FIXME: implement */
+ .suspend = ,
+ .resume = ,
+#endif
+};
+
+
+static int __init ns83820_init(void)
+{
+ printk(KERN_INFO "ns83820.c: National Semiconductor DP83820 10/100/1000 driver.\n");
+ return pci_register_driver(&driver);
+}
+
+static void __exit ns83820_exit(void)
+{
+ pci_unregister_driver(&driver);
+}
+
+MODULE_AUTHOR("Benjamin LaHaise <bcrl@kvack.org>");
+MODULE_DESCRIPTION("National Semiconductor DP83820 10/100/1000 driver");
+MODULE_LICENSE("GPL");
+
+MODULE_DEVICE_TABLE(pci, ns83820_pci_tbl);
+
+module_param(lnksts, int, 0);
+MODULE_PARM_DESC(lnksts, "Polarity of LNKSTS bit");
+
+module_param(ihr, int, 0);
+MODULE_PARM_DESC(ihr, "Time in 100 us increments to delay interrupts (range 0-127)");
+
+module_param(reset_phy, int, 0);
+MODULE_PARM_DESC(reset_phy, "Set to 1 to reset the PHY on startup");
+
+module_init(ns83820_init);
+module_exit(ns83820_exit);
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
new file mode 100644
index 000000000000..26e25d7f5829
--- /dev/null
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -0,0 +1,742 @@
+/*
+ * sonic.c
+ *
+ * (C) 2005 Finn Thain
+ *
+ * Converted to DMA API, added zero-copy buffer handling, and
+ * (from the mac68k project) introduced dhd's support for 16-bit cards.
+ *
+ * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
+ *
+ * This driver is based on work from Andreas Busse, but most of
+ * the code is rewritten.
+ *
+ * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
+ *
+ * Core code included by system sonic drivers
+ *
+ * And... partially rewritten again by David Huggins-Daines in order
+ * to cope with screwed up Macintosh NICs that may or may not use
+ * 16-bit DMA.
+ *
+ * (C) 1999 David Huggins-Daines <dhd@debian.org>
+ *
+ */
+
+/*
+ * Sources: Olivetti M700-10 Risc Personal Computer hardware handbook,
+ * National Semiconductors data sheet for the DP83932B Sonic Ethernet
+ * controller, and the files "8390.c" and "skeleton.c" in this directory.
+ *
+ * Additional sources: Nat Semi data sheet for the DP83932C and Nat Semi
+ * Application Note AN-746, the files "lance.c" and "ibmlana.c". See also
+ * the NetBSD file "sys/arch/mac68k/dev/if_sn.c".
+ */
+
+
+
+/*
+ * Open/initialize the SONIC controller.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is non-reboot way to recover if something goes wrong.
+ */
+static int sonic_open(struct net_device *dev)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+ int i;
+
+ if (sonic_debug > 2)
+ printk("sonic_open: initializing sonic driver.\n");
+
+ for (i = 0; i < SONIC_NUM_RRS; i++) {
+ struct sk_buff *skb = dev_alloc_skb(SONIC_RBSIZE + 2);
+ if (skb == NULL) {
+ while(i > 0) { /* free any that were allocated successfully */
+ i--;
+ dev_kfree_skb(lp->rx_skb[i]);
+ lp->rx_skb[i] = NULL;
+ }
+ printk(KERN_ERR "%s: couldn't allocate receive buffers\n",
+ dev->name);
+ return -ENOMEM;
+ }
+ /* align IP header unless DMA requires otherwise */
+ if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
+ skb_reserve(skb, 2);
+ lp->rx_skb[i] = skb;
+ }
+
+ for (i = 0; i < SONIC_NUM_RRS; i++) {
+ dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
+ SONIC_RBSIZE, DMA_FROM_DEVICE);
+ if (!laddr) {
+ while(i > 0) { /* free any that were mapped successfully */
+ i--;
+ dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
+ lp->rx_laddr[i] = (dma_addr_t)0;
+ }
+ for (i = 0; i < SONIC_NUM_RRS; i++) {
+ dev_kfree_skb(lp->rx_skb[i]);
+ lp->rx_skb[i] = NULL;
+ }
+ printk(KERN_ERR "%s: couldn't map rx DMA buffers\n",
+ dev->name);
+ return -ENOMEM;
+ }
+ lp->rx_laddr[i] = laddr;
+ }
+
+ /*
+ * Initialize the SONIC
+ */
+ sonic_init(dev);
+
+ netif_start_queue(dev);
+
+ if (sonic_debug > 2)
+ printk("sonic_open: Initialization done.\n");
+
+ return 0;
+}
+
+
+/*
+ * Close the SONIC device
+ */
+static int sonic_close(struct net_device *dev)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+ int i;
+
+ if (sonic_debug > 2)
+ printk("sonic_close\n");
+
+ netif_stop_queue(dev);
+
+ /*
+ * stop the SONIC, disable interrupts
+ */
+ SONIC_WRITE(SONIC_IMR, 0);
+ SONIC_WRITE(SONIC_ISR, 0x7fff);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
+
+ /* unmap and free skbs that haven't been transmitted */
+ for (i = 0; i < SONIC_NUM_TDS; i++) {
+ if(lp->tx_laddr[i]) {
+ dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
+ lp->tx_laddr[i] = (dma_addr_t)0;
+ }
+ if(lp->tx_skb[i]) {
+ dev_kfree_skb(lp->tx_skb[i]);
+ lp->tx_skb[i] = NULL;
+ }
+ }
+
+ /* unmap and free the receive buffers */
+ for (i = 0; i < SONIC_NUM_RRS; i++) {
+ if(lp->rx_laddr[i]) {
+ dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
+ lp->rx_laddr[i] = (dma_addr_t)0;
+ }
+ if(lp->rx_skb[i]) {
+ dev_kfree_skb(lp->rx_skb[i]);
+ lp->rx_skb[i] = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static void sonic_tx_timeout(struct net_device *dev)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+ int i;
+ /*
+ * put the Sonic into software-reset mode and
+ * disable all interrupts before releasing DMA buffers
+ */
+ SONIC_WRITE(SONIC_IMR, 0);
+ SONIC_WRITE(SONIC_ISR, 0x7fff);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
+ /* We could resend the original skbs. Easier to re-initialise. */
+ for (i = 0; i < SONIC_NUM_TDS; i++) {
+ if(lp->tx_laddr[i]) {
+ dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
+ lp->tx_laddr[i] = (dma_addr_t)0;
+ }
+ if(lp->tx_skb[i]) {
+ dev_kfree_skb(lp->tx_skb[i]);
+ lp->tx_skb[i] = NULL;
+ }
+ }
+ /* Try to restart the adaptor. */
+ sonic_init(dev);
+ lp->stats.tx_errors++;
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+
+/*
+ * transmit packet
+ *
+ * Appends new TD during transmission thus avoiding any TX interrupts
+ * until we run out of TDs.
+ * This routine interacts closely with the ISR in that it may,
+ * set tx_skb[i]
+ * reset the status flags of the new TD
+ * set and reset EOL flags
+ * stop the tx queue
+ * The ISR interacts with this routine in various ways. It may,
+ * reset tx_skb[i]
+ * test the EOL and status flags of the TDs
+ * wake the tx queue
+ * Concurrently with all of this, the SONIC is potentially writing to
+ * the status flags of the TDs.
+ * Until some mutual exclusion is added, this code will not work with SMP. However,
+ * MIPS Jazz machines and m68k Macs were all uni-processor machines.
+ */
+
+static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+ dma_addr_t laddr;
+ int length;
+ int entry = lp->next_tx;
+
+ if (sonic_debug > 2)
+ printk("sonic_send_packet: skb=%p, dev=%p\n", skb, dev);
+
+ length = skb->len;
+ if (length < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ length = ETH_ZLEN;
+ }
+
+ /*
+ * Map the packet data into the logical DMA address space
+ */
+
+ laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
+ if (!laddr) {
+ printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_BUSY;
+ }
+
+ sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
+ sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
+ sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
+ sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff);
+ sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16);
+ sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length);
+ sonic_tda_put(dev, entry, SONIC_TD_LINK,
+ sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
+
+ /*
+ * Must set tx_skb[entry] only after clearing status, and
+ * before clearing EOL and before stopping queue
+ */
+ wmb();
+ lp->tx_len[entry] = length;
+ lp->tx_laddr[entry] = laddr;
+ lp->tx_skb[entry] = skb;
+
+ wmb();
+ sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK,
+ sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL);
+ lp->eol_tx = entry;
+
+ lp->next_tx = (entry + 1) & SONIC_TDS_MASK;
+ if (lp->tx_skb[lp->next_tx] != NULL) {
+ /* The ring is full, the ISR has yet to process the next TD. */
+ if (sonic_debug > 3)
+ printk("%s: stopping queue\n", dev->name);
+ netif_stop_queue(dev);
+ /* after this packet, wait for ISR to free up some TDAs */
+ } else netif_start_queue(dev);
+
+ if (sonic_debug > 2)
+ printk("sonic_send_packet: issuing Tx command\n");
+
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * The typical workload of the driver:
+ * Handle the network interface interrupts.
+ */
+static irqreturn_t sonic_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct sonic_local *lp = netdev_priv(dev);
+ int status;
+
+ if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
+ return IRQ_NONE;
+
+ do {
+ if (status & SONIC_INT_PKTRX) {
+ if (sonic_debug > 2)
+ printk("%s: packet rx\n", dev->name);
+ sonic_rx(dev); /* got packet(s) */
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
+ }
+
+ if (status & SONIC_INT_TXDN) {
+ int entry = lp->cur_tx;
+ int td_status;
+ int freed_some = 0;
+
+ /* At this point, cur_tx is the index of a TD that is one of:
+ * unallocated/freed (status set & tx_skb[entry] clear)
+ * allocated and sent (status set & tx_skb[entry] set )
+ * allocated and not yet sent (status clear & tx_skb[entry] set )
+ * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
+ */
+
+ if (sonic_debug > 2)
+ printk("%s: tx done\n", dev->name);
+
+ while (lp->tx_skb[entry] != NULL) {
+ if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
+ break;
+
+ if (td_status & 0x0001) {
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
+ } else {
+ lp->stats.tx_errors++;
+ if (td_status & 0x0642)
+ lp->stats.tx_aborted_errors++;
+ if (td_status & 0x0180)
+ lp->stats.tx_carrier_errors++;
+ if (td_status & 0x0020)
+ lp->stats.tx_window_errors++;
+ if (td_status & 0x0004)
+ lp->stats.tx_fifo_errors++;
+ }
+
+ /* We must free the original skb */
+ dev_kfree_skb_irq(lp->tx_skb[entry]);
+ lp->tx_skb[entry] = NULL;
+ /* and unmap DMA buffer */
+ dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE);
+ lp->tx_laddr[entry] = (dma_addr_t)0;
+ freed_some = 1;
+
+ if (sonic_tda_get(dev, entry, SONIC_TD_LINK) & SONIC_EOL) {
+ entry = (entry + 1) & SONIC_TDS_MASK;
+ break;
+ }
+ entry = (entry + 1) & SONIC_TDS_MASK;
+ }
+
+ if (freed_some || lp->tx_skb[entry] == NULL)
+ netif_wake_queue(dev); /* The ring is no longer full */
+ lp->cur_tx = entry;
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
+ }
+
+ /*
+ * check error conditions
+ */
+ if (status & SONIC_INT_RFO) {
+ if (sonic_debug > 1)
+ printk("%s: rx fifo overrun\n", dev->name);
+ lp->stats.rx_fifo_errors++;
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
+ }
+ if (status & SONIC_INT_RDE) {
+ if (sonic_debug > 1)
+ printk("%s: rx descriptors exhausted\n", dev->name);
+ lp->stats.rx_dropped++;
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
+ }
+ if (status & SONIC_INT_RBAE) {
+ if (sonic_debug > 1)
+ printk("%s: rx buffer area exceeded\n", dev->name);
+ lp->stats.rx_dropped++;
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
+ }
+
+ /* counter overruns; all counters are 16bit wide */
+ if (status & SONIC_INT_FAE) {
+ lp->stats.rx_frame_errors += 65536;
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
+ }
+ if (status & SONIC_INT_CRC) {
+ lp->stats.rx_crc_errors += 65536;
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
+ }
+ if (status & SONIC_INT_MP) {
+ lp->stats.rx_missed_errors += 65536;
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
+ }
+
+ /* transmit error */
+ if (status & SONIC_INT_TXER) {
+ if ((SONIC_READ(SONIC_TCR) & SONIC_TCR_FU) && (sonic_debug > 2))
+ printk(KERN_ERR "%s: tx fifo underrun\n", dev->name);
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
+ }
+
+ /* bus retry */
+ if (status & SONIC_INT_BR) {
+ printk(KERN_ERR "%s: Bus retry occurred! Device interrupt disabled.\n",
+ dev->name);
+ /* ... to help debug DMA problems causing endless interrupts. */
+ /* Bounce the eth interface to turn on the interrupt again. */
+ SONIC_WRITE(SONIC_IMR, 0);
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
+ }
+
+ /* load CAM done */
+ if (status & SONIC_INT_LCD)
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
+ } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
+ return IRQ_HANDLED;
+}
+
+/*
+ * We have a good packet(s), pass it/them up the network stack.
+ */
+static void sonic_rx(struct net_device *dev)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+ int status;
+ int entry = lp->cur_rx;
+
+ while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
+ struct sk_buff *used_skb;
+ struct sk_buff *new_skb;
+ dma_addr_t new_laddr;
+ u16 bufadr_l;
+ u16 bufadr_h;
+ int pkt_len;
+
+ status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
+ if (status & SONIC_RCR_PRX) {
+ /* Malloc up new buffer. */
+ new_skb = dev_alloc_skb(SONIC_RBSIZE + 2);
+ if (new_skb == NULL) {
+ printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ /* provide 16 byte IP header alignment unless DMA requires otherwise */
+ if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
+ skb_reserve(new_skb, 2);
+
+ new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
+ SONIC_RBSIZE, DMA_FROM_DEVICE);
+ if (!new_laddr) {
+ dev_kfree_skb(new_skb);
+ printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+
+ /* now we have a new skb to replace it, pass the used one up the stack */
+ dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
+ used_skb = lp->rx_skb[entry];
+ pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
+ skb_trim(used_skb, pkt_len);
+ used_skb->protocol = eth_type_trans(used_skb, dev);
+ netif_rx(used_skb);
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+
+ /* and insert the new skb */
+ lp->rx_laddr[entry] = new_laddr;
+ lp->rx_skb[entry] = new_skb;
+
+ bufadr_l = (unsigned long)new_laddr & 0xffff;
+ bufadr_h = (unsigned long)new_laddr >> 16;
+ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
+ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
+ } else {
+ /* This should only happen, if we enable accepting broken packets. */
+ lp->stats.rx_errors++;
+ if (status & SONIC_RCR_FAER)
+ lp->stats.rx_frame_errors++;
+ if (status & SONIC_RCR_CRCR)
+ lp->stats.rx_crc_errors++;
+ }
+ if (status & SONIC_RCR_LPKT) {
+ /*
+ * this was the last packet out of the current receive buffer
+ * give the buffer back to the SONIC
+ */
+ lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
+ if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
+ SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
+ if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
+ if (sonic_debug > 2)
+ printk("%s: rx buffer exhausted\n", dev->name);
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
+ }
+ } else
+ printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
+ dev->name);
+ /*
+ * give back the descriptor
+ */
+ sonic_rda_put(dev, entry, SONIC_RD_LINK,
+ sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
+ sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
+ sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
+ sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
+ lp->eol_rx = entry;
+ lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
+ }
+ /*
+ * If any worth-while packets have been received, netif_rx()
+ * has done a mark_bh(NET_BH) for us and will work on them
+ * when we get to the bottom-half routine.
+ */
+}
+
+
+/*
+ * Get the current statistics.
+ * This may be called with the device open or closed.
+ */
+static struct net_device_stats *sonic_get_stats(struct net_device *dev)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+
+ /* read the tally counter from the SONIC and reset them */
+ lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT);
+ SONIC_WRITE(SONIC_CRCT, 0xffff);
+ lp->stats.rx_frame_errors += SONIC_READ(SONIC_FAET);
+ SONIC_WRITE(SONIC_FAET, 0xffff);
+ lp->stats.rx_missed_errors += SONIC_READ(SONIC_MPT);
+ SONIC_WRITE(SONIC_MPT, 0xffff);
+
+ return &lp->stats;
+}
+
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+static void sonic_multicast_list(struct net_device *dev)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+ unsigned int rcr;
+ struct netdev_hw_addr *ha;
+ unsigned char *addr;
+ int i;
+
+ rcr = SONIC_READ(SONIC_RCR) & ~(SONIC_RCR_PRO | SONIC_RCR_AMC);
+ rcr |= SONIC_RCR_BRD; /* accept broadcast packets */
+
+ if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
+ rcr |= SONIC_RCR_PRO;
+ } else {
+ if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > 15)) {
+ rcr |= SONIC_RCR_AMC;
+ } else {
+ if (sonic_debug > 2)
+ printk("sonic_multicast_list: mc_count %d\n",
+ netdev_mc_count(dev));
+ sonic_set_cam_enable(dev, 1); /* always enable our own address */
+ i = 1;
+ netdev_for_each_mc_addr(ha, dev) {
+ addr = ha->addr;
+ sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
+ sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
+ sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
+ sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i));
+ i++;
+ }
+ SONIC_WRITE(SONIC_CDC, 16);
+ /* issue Load CAM command */
+ SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
+ }
+ }
+
+ if (sonic_debug > 2)
+ printk("sonic_multicast_list: setting RCR=%x\n", rcr);
+
+ SONIC_WRITE(SONIC_RCR, rcr);
+}
+
+
+/*
+ * Initialize the SONIC ethernet controller.
+ */
+static int sonic_init(struct net_device *dev)
+{
+ unsigned int cmd;
+ struct sonic_local *lp = netdev_priv(dev);
+ int i;
+
+ /*
+ * put the Sonic into software-reset mode and
+ * disable all interrupts
+ */
+ SONIC_WRITE(SONIC_IMR, 0);
+ SONIC_WRITE(SONIC_ISR, 0x7fff);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
+
+ /*
+ * clear software reset flag, disable receiver, clear and
+ * enable interrupts, then completely initialize the SONIC
+ */
+ SONIC_WRITE(SONIC_CMD, 0);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+
+ /*
+ * initialize the receive resource area
+ */
+ if (sonic_debug > 2)
+ printk("sonic_init: initialize receive resource area\n");
+
+ for (i = 0; i < SONIC_NUM_RRS; i++) {
+ u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff;
+ u16 bufadr_h = (unsigned long)lp->rx_laddr[i] >> 16;
+ sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l);
+ sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h);
+ sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_L, SONIC_RBSIZE >> 1);
+ sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_H, 0);
+ }
+
+ /* initialize all RRA registers */
+ lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
+ SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
+ lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
+ SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
+
+ SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
+ SONIC_WRITE(SONIC_REA, lp->rra_end);
+ SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
+ SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
+ SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
+ SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
+
+ /* load the resource pointers */
+ if (sonic_debug > 3)
+ printk("sonic_init: issuing RRRA command\n");
+
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
+ i = 0;
+ while (i++ < 100) {
+ if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
+ break;
+ }
+
+ if (sonic_debug > 2)
+ printk("sonic_init: status=%x i=%d\n", SONIC_READ(SONIC_CMD), i);
+
+ /*
+ * Initialize the receive descriptors so that they
+ * become a circular linked list, ie. let the last
+ * descriptor point to the first again.
+ */
+ if (sonic_debug > 2)
+ printk("sonic_init: initialize receive descriptors\n");
+ for (i=0; i<SONIC_NUM_RDS; i++) {
+ sonic_rda_put(dev, i, SONIC_RD_STATUS, 0);
+ sonic_rda_put(dev, i, SONIC_RD_PKTLEN, 0);
+ sonic_rda_put(dev, i, SONIC_RD_PKTPTR_L, 0);
+ sonic_rda_put(dev, i, SONIC_RD_PKTPTR_H, 0);
+ sonic_rda_put(dev, i, SONIC_RD_SEQNO, 0);
+ sonic_rda_put(dev, i, SONIC_RD_IN_USE, 1);
+ sonic_rda_put(dev, i, SONIC_RD_LINK,
+ lp->rda_laddr +
+ ((i+1) * SIZEOF_SONIC_RD * SONIC_BUS_SCALE(lp->dma_bitmode)));
+ }
+ /* fix last descriptor */
+ sonic_rda_put(dev, SONIC_NUM_RDS - 1, SONIC_RD_LINK,
+ (lp->rda_laddr & 0xffff) | SONIC_EOL);
+ lp->eol_rx = SONIC_NUM_RDS - 1;
+ lp->cur_rx = 0;
+ SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16);
+ SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff);
+
+ /*
+ * initialize transmit descriptors
+ */
+ if (sonic_debug > 2)
+ printk("sonic_init: initialize transmit descriptors\n");
+ for (i = 0; i < SONIC_NUM_TDS; i++) {
+ sonic_tda_put(dev, i, SONIC_TD_STATUS, 0);
+ sonic_tda_put(dev, i, SONIC_TD_CONFIG, 0);
+ sonic_tda_put(dev, i, SONIC_TD_PKTSIZE, 0);
+ sonic_tda_put(dev, i, SONIC_TD_FRAG_COUNT, 0);
+ sonic_tda_put(dev, i, SONIC_TD_LINK,
+ (lp->tda_laddr & 0xffff) +
+ (i + 1) * SIZEOF_SONIC_TD * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->tx_skb[i] = NULL;
+ }
+ /* fix last descriptor */
+ sonic_tda_put(dev, SONIC_NUM_TDS - 1, SONIC_TD_LINK,
+ (lp->tda_laddr & 0xffff));
+
+ SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16);
+ SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff);
+ lp->cur_tx = lp->next_tx = 0;
+ lp->eol_tx = SONIC_NUM_TDS - 1;
+
+ /*
+ * put our own address to CAM desc[0]
+ */
+ sonic_cda_put(dev, 0, SONIC_CD_CAP0, dev->dev_addr[1] << 8 | dev->dev_addr[0]);
+ sonic_cda_put(dev, 0, SONIC_CD_CAP1, dev->dev_addr[3] << 8 | dev->dev_addr[2]);
+ sonic_cda_put(dev, 0, SONIC_CD_CAP2, dev->dev_addr[5] << 8 | dev->dev_addr[4]);
+ sonic_set_cam_enable(dev, 1);
+
+ for (i = 0; i < 16; i++)
+ sonic_cda_put(dev, i, SONIC_CD_ENTRY_POINTER, i);
+
+ /*
+ * initialize CAM registers
+ */
+ SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
+ SONIC_WRITE(SONIC_CDC, 16);
+
+ /*
+ * load the CAM
+ */
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
+
+ i = 0;
+ while (i++ < 100) {
+ if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD)
+ break;
+ }
+ if (sonic_debug > 2) {
+ printk("sonic_init: CMD=%x, ISR=%x\n, i=%d",
+ SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
+ }
+
+ /*
+ * enable receiver, disable loopback
+ * and enable all interrupts
+ */
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP);
+ SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
+ SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
+ SONIC_WRITE(SONIC_ISR, 0x7fff);
+ SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
+
+ cmd = SONIC_READ(SONIC_CMD);
+ if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
+ printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
+
+ if (sonic_debug > 2)
+ printk("sonic_init: new status=%x\n",
+ SONIC_READ(SONIC_CMD));
+
+ return 0;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h
new file mode 100644
index 000000000000..07091dd27e5d
--- /dev/null
+++ b/drivers/net/ethernet/natsemi/sonic.h
@@ -0,0 +1,450 @@
+/*
+ * Header file for sonic.c
+ *
+ * (C) Waldorf Electronics, Germany
+ * Written by Andreas Busse
+ *
+ * NOTE: most of the structure definitions here are endian dependent.
+ * If you want to use this driver on big endian machines, the data
+ * and pad structure members must be exchanged. Also, the structures
+ * need to be changed accordingly to the bus size.
+ *
+ * 981229 MSch: did just that for the 68k Mac port (32 bit, big endian)
+ *
+ * 990611 David Huggins-Daines <dhd@debian.org>: This machine abstraction
+ * does not cope with 16-bit bus sizes very well. Therefore I have
+ * rewritten it with ugly macros and evil inlines.
+ *
+ * 050625 Finn Thain: introduced more 32-bit cards and dhd's support
+ * for 16-bit cards (from the mac68k project).
+ */
+
+#ifndef SONIC_H
+#define SONIC_H
+
+
+/*
+ * SONIC register offsets
+ */
+
+#define SONIC_CMD 0x00
+#define SONIC_DCR 0x01
+#define SONIC_RCR 0x02
+#define SONIC_TCR 0x03
+#define SONIC_IMR 0x04
+#define SONIC_ISR 0x05
+
+#define SONIC_UTDA 0x06
+#define SONIC_CTDA 0x07
+
+#define SONIC_URDA 0x0d
+#define SONIC_CRDA 0x0e
+#define SONIC_EOBC 0x13
+#define SONIC_URRA 0x14
+#define SONIC_RSA 0x15
+#define SONIC_REA 0x16
+#define SONIC_RRP 0x17
+#define SONIC_RWP 0x18
+#define SONIC_RSC 0x2b
+
+#define SONIC_CEP 0x21
+#define SONIC_CAP2 0x22
+#define SONIC_CAP1 0x23
+#define SONIC_CAP0 0x24
+#define SONIC_CE 0x25
+#define SONIC_CDP 0x26
+#define SONIC_CDC 0x27
+
+#define SONIC_WT0 0x29
+#define SONIC_WT1 0x2a
+
+#define SONIC_SR 0x28
+
+
+/* test-only registers */
+
+#define SONIC_TPS 0x08
+#define SONIC_TFC 0x09
+#define SONIC_TSA0 0x0a
+#define SONIC_TSA1 0x0b
+#define SONIC_TFS 0x0c
+
+#define SONIC_CRBA0 0x0f
+#define SONIC_CRBA1 0x10
+#define SONIC_RBWC0 0x11
+#define SONIC_RBWC1 0x12
+#define SONIC_TTDA 0x20
+#define SONIC_MDT 0x2f
+
+#define SONIC_TRBA0 0x19
+#define SONIC_TRBA1 0x1a
+#define SONIC_TBWC0 0x1b
+#define SONIC_TBWC1 0x1c
+#define SONIC_LLFA 0x1f
+
+#define SONIC_ADDR0 0x1d
+#define SONIC_ADDR1 0x1e
+
+/*
+ * Error counters
+ */
+
+#define SONIC_CRCT 0x2c
+#define SONIC_FAET 0x2d
+#define SONIC_MPT 0x2e
+
+#define SONIC_DCR2 0x3f
+
+/*
+ * SONIC command bits
+ */
+
+#define SONIC_CR_LCAM 0x0200
+#define SONIC_CR_RRRA 0x0100
+#define SONIC_CR_RST 0x0080
+#define SONIC_CR_ST 0x0020
+#define SONIC_CR_STP 0x0010
+#define SONIC_CR_RXEN 0x0008
+#define SONIC_CR_RXDIS 0x0004
+#define SONIC_CR_TXP 0x0002
+#define SONIC_CR_HTX 0x0001
+
+/*
+ * SONIC data configuration bits
+ */
+
+#define SONIC_DCR_EXBUS 0x8000
+#define SONIC_DCR_LBR 0x2000
+#define SONIC_DCR_PO1 0x1000
+#define SONIC_DCR_PO0 0x0800
+#define SONIC_DCR_SBUS 0x0400
+#define SONIC_DCR_USR1 0x0200
+#define SONIC_DCR_USR0 0x0100
+#define SONIC_DCR_WC1 0x0080
+#define SONIC_DCR_WC0 0x0040
+#define SONIC_DCR_DW 0x0020
+#define SONIC_DCR_BMS 0x0010
+#define SONIC_DCR_RFT1 0x0008
+#define SONIC_DCR_RFT0 0x0004
+#define SONIC_DCR_TFT1 0x0002
+#define SONIC_DCR_TFT0 0x0001
+
+/*
+ * Constants for the SONIC receive control register.
+ */
+
+#define SONIC_RCR_ERR 0x8000
+#define SONIC_RCR_RNT 0x4000
+#define SONIC_RCR_BRD 0x2000
+#define SONIC_RCR_PRO 0x1000
+#define SONIC_RCR_AMC 0x0800
+#define SONIC_RCR_LB1 0x0400
+#define SONIC_RCR_LB0 0x0200
+
+#define SONIC_RCR_MC 0x0100
+#define SONIC_RCR_BC 0x0080
+#define SONIC_RCR_LPKT 0x0040
+#define SONIC_RCR_CRS 0x0020
+#define SONIC_RCR_COL 0x0010
+#define SONIC_RCR_CRCR 0x0008
+#define SONIC_RCR_FAER 0x0004
+#define SONIC_RCR_LBK 0x0002
+#define SONIC_RCR_PRX 0x0001
+
+#define SONIC_RCR_LB_OFF 0
+#define SONIC_RCR_LB_MAC SONIC_RCR_LB0
+#define SONIC_RCR_LB_ENDEC SONIC_RCR_LB1
+#define SONIC_RCR_LB_TRANS (SONIC_RCR_LB0 | SONIC_RCR_LB1)
+
+/* default RCR setup */
+
+#define SONIC_RCR_DEFAULT (SONIC_RCR_BRD)
+
+
+/*
+ * SONIC Transmit Control register bits
+ */
+
+#define SONIC_TCR_PINTR 0x8000
+#define SONIC_TCR_POWC 0x4000
+#define SONIC_TCR_CRCI 0x2000
+#define SONIC_TCR_EXDIS 0x1000
+#define SONIC_TCR_EXD 0x0400
+#define SONIC_TCR_DEF 0x0200
+#define SONIC_TCR_NCRS 0x0100
+#define SONIC_TCR_CRLS 0x0080
+#define SONIC_TCR_EXC 0x0040
+#define SONIC_TCR_PMB 0x0008
+#define SONIC_TCR_FU 0x0004
+#define SONIC_TCR_BCM 0x0002
+#define SONIC_TCR_PTX 0x0001
+
+#define SONIC_TCR_DEFAULT 0x0000
+
+/*
+ * Constants for the SONIC_INTERRUPT_MASK and
+ * SONIC_INTERRUPT_STATUS registers.
+ */
+
+#define SONIC_INT_BR 0x4000
+#define SONIC_INT_HBL 0x2000
+#define SONIC_INT_LCD 0x1000
+#define SONIC_INT_PINT 0x0800
+#define SONIC_INT_PKTRX 0x0400
+#define SONIC_INT_TXDN 0x0200
+#define SONIC_INT_TXER 0x0100
+#define SONIC_INT_TC 0x0080
+#define SONIC_INT_RDE 0x0040
+#define SONIC_INT_RBE 0x0020
+#define SONIC_INT_RBAE 0x0010
+#define SONIC_INT_CRC 0x0008
+#define SONIC_INT_FAE 0x0004
+#define SONIC_INT_MP 0x0002
+#define SONIC_INT_RFO 0x0001
+
+
+/*
+ * The interrupts we allow.
+ */
+
+#define SONIC_IMR_DEFAULT ( SONIC_INT_BR | \
+ SONIC_INT_LCD | \
+ SONIC_INT_RFO | \
+ SONIC_INT_PKTRX | \
+ SONIC_INT_TXDN | \
+ SONIC_INT_TXER | \
+ SONIC_INT_RDE | \
+ SONIC_INT_RBAE | \
+ SONIC_INT_CRC | \
+ SONIC_INT_FAE | \
+ SONIC_INT_MP)
+
+
+#define SONIC_EOL 0x0001
+#define CAM_DESCRIPTORS 16
+
+/* Offsets in the various DMA buffers accessed by the SONIC */
+
+#define SONIC_BITMODE16 0
+#define SONIC_BITMODE32 1
+#define SONIC_BUS_SCALE(bitmode) ((bitmode) ? 4 : 2)
+/* Note! These are all measured in bus-size units, so use SONIC_BUS_SCALE */
+#define SIZEOF_SONIC_RR 4
+#define SONIC_RR_BUFADR_L 0
+#define SONIC_RR_BUFADR_H 1
+#define SONIC_RR_BUFSIZE_L 2
+#define SONIC_RR_BUFSIZE_H 3
+
+#define SIZEOF_SONIC_RD 7
+#define SONIC_RD_STATUS 0
+#define SONIC_RD_PKTLEN 1
+#define SONIC_RD_PKTPTR_L 2
+#define SONIC_RD_PKTPTR_H 3
+#define SONIC_RD_SEQNO 4
+#define SONIC_RD_LINK 5
+#define SONIC_RD_IN_USE 6
+
+#define SIZEOF_SONIC_TD 8
+#define SONIC_TD_STATUS 0
+#define SONIC_TD_CONFIG 1
+#define SONIC_TD_PKTSIZE 2
+#define SONIC_TD_FRAG_COUNT 3
+#define SONIC_TD_FRAG_PTR_L 4
+#define SONIC_TD_FRAG_PTR_H 5
+#define SONIC_TD_FRAG_SIZE 6
+#define SONIC_TD_LINK 7
+
+#define SIZEOF_SONIC_CD 4
+#define SONIC_CD_ENTRY_POINTER 0
+#define SONIC_CD_CAP0 1
+#define SONIC_CD_CAP1 2
+#define SONIC_CD_CAP2 3
+
+#define SIZEOF_SONIC_CDA ((CAM_DESCRIPTORS * SIZEOF_SONIC_CD) + 1)
+#define SONIC_CDA_CAM_ENABLE (CAM_DESCRIPTORS * SIZEOF_SONIC_CD)
+
+/*
+ * Some tunables for the buffer areas. Power of 2 is required
+ * the current driver uses one receive buffer for each descriptor.
+ *
+ * MSch: use more buffer space for the slow m68k Macs!
+ */
+#define SONIC_NUM_RRS 16 /* number of receive resources */
+#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
+#define SONIC_NUM_TDS 16 /* number of transmit descriptors */
+
+#define SONIC_RDS_MASK (SONIC_NUM_RDS-1)
+#define SONIC_TDS_MASK (SONIC_NUM_TDS-1)
+
+#define SONIC_RBSIZE 1520 /* size of one resource buffer */
+
+/* Again, measured in bus size units! */
+#define SIZEOF_SONIC_DESC (SIZEOF_SONIC_CDA \
+ + (SIZEOF_SONIC_TD * SONIC_NUM_TDS) \
+ + (SIZEOF_SONIC_RD * SONIC_NUM_RDS) \
+ + (SIZEOF_SONIC_RR * SONIC_NUM_RRS))
+
+/* Information that need to be kept for each board. */
+struct sonic_local {
+ /* Bus size. 0 == 16 bits, 1 == 32 bits. */
+ int dma_bitmode;
+ /* Register offset within the longword (independent of endianness,
+ and varies from one type of Macintosh SONIC to another
+ (Aarrgh)) */
+ int reg_offset;
+ void *descriptors;
+ /* Crud. These areas have to be within the same 64K. Therefore
+ we allocate a desriptors page, and point these to places within it. */
+ void *cda; /* CAM descriptor area */
+ void *tda; /* Transmit descriptor area */
+ void *rra; /* Receive resource area */
+ void *rda; /* Receive descriptor area */
+ struct sk_buff* volatile rx_skb[SONIC_NUM_RRS]; /* packets to be received */
+ struct sk_buff* volatile tx_skb[SONIC_NUM_TDS]; /* packets to be transmitted */
+ unsigned int tx_len[SONIC_NUM_TDS]; /* lengths of tx DMA mappings */
+ /* Logical DMA addresses on MIPS, bus addresses on m68k
+ * (so "laddr" is a bit misleading) */
+ dma_addr_t descriptors_laddr;
+ u32 cda_laddr; /* logical DMA address of CDA */
+ u32 tda_laddr; /* logical DMA address of TDA */
+ u32 rra_laddr; /* logical DMA address of RRA */
+ u32 rda_laddr; /* logical DMA address of RDA */
+ dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */
+ dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */
+ unsigned int rra_end;
+ unsigned int cur_rwp;
+ unsigned int cur_rx;
+ unsigned int cur_tx; /* first unacked transmit packet */
+ unsigned int eol_rx;
+ unsigned int eol_tx; /* last unacked transmit packet */
+ unsigned int next_tx; /* next free TD */
+ struct device *device; /* generic device */
+ struct net_device_stats stats;
+};
+
+#define TX_TIMEOUT (3 * HZ)
+
+/* Index to functions, as function prototypes. */
+
+static int sonic_open(struct net_device *dev);
+static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t sonic_interrupt(int irq, void *dev_id);
+static void sonic_rx(struct net_device *dev);
+static int sonic_close(struct net_device *dev);
+static struct net_device_stats *sonic_get_stats(struct net_device *dev);
+static void sonic_multicast_list(struct net_device *dev);
+static int sonic_init(struct net_device *dev);
+static void sonic_tx_timeout(struct net_device *dev);
+
+/* Internal inlines for reading/writing DMA buffers. Note that bus
+ size and endianness matter here, whereas they don't for registers,
+ as far as we can tell. */
+/* OpenBSD calls this "SWO". I'd like to think that sonic_buf_put()
+ is a much better name. */
+static inline void sonic_buf_put(void* base, int bitmode,
+ int offset, __u16 val)
+{
+ if (bitmode)
+#ifdef __BIG_ENDIAN
+ ((__u16 *) base + (offset*2))[1] = val;
+#else
+ ((__u16 *) base + (offset*2))[0] = val;
+#endif
+ else
+ ((__u16 *) base)[offset] = val;
+}
+
+static inline __u16 sonic_buf_get(void* base, int bitmode,
+ int offset)
+{
+ if (bitmode)
+#ifdef __BIG_ENDIAN
+ return ((volatile __u16 *) base + (offset*2))[1];
+#else
+ return ((volatile __u16 *) base + (offset*2))[0];
+#endif
+ else
+ return ((volatile __u16 *) base)[offset];
+}
+
+/* Inlines that you should actually use for reading/writing DMA buffers */
+static inline void sonic_cda_put(struct net_device* dev, int entry,
+ int offset, __u16 val)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+ sonic_buf_put(lp->cda, lp->dma_bitmode,
+ (entry * SIZEOF_SONIC_CD) + offset, val);
+}
+
+static inline __u16 sonic_cda_get(struct net_device* dev, int entry,
+ int offset)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+ return sonic_buf_get(lp->cda, lp->dma_bitmode,
+ (entry * SIZEOF_SONIC_CD) + offset);
+}
+
+static inline void sonic_set_cam_enable(struct net_device* dev, __u16 val)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+ sonic_buf_put(lp->cda, lp->dma_bitmode, SONIC_CDA_CAM_ENABLE, val);
+}
+
+static inline __u16 sonic_get_cam_enable(struct net_device* dev)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+ return sonic_buf_get(lp->cda, lp->dma_bitmode, SONIC_CDA_CAM_ENABLE);
+}
+
+static inline void sonic_tda_put(struct net_device* dev, int entry,
+ int offset, __u16 val)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+ sonic_buf_put(lp->tda, lp->dma_bitmode,
+ (entry * SIZEOF_SONIC_TD) + offset, val);
+}
+
+static inline __u16 sonic_tda_get(struct net_device* dev, int entry,
+ int offset)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+ return sonic_buf_get(lp->tda, lp->dma_bitmode,
+ (entry * SIZEOF_SONIC_TD) + offset);
+}
+
+static inline void sonic_rda_put(struct net_device* dev, int entry,
+ int offset, __u16 val)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+ sonic_buf_put(lp->rda, lp->dma_bitmode,
+ (entry * SIZEOF_SONIC_RD) + offset, val);
+}
+
+static inline __u16 sonic_rda_get(struct net_device* dev, int entry,
+ int offset)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+ return sonic_buf_get(lp->rda, lp->dma_bitmode,
+ (entry * SIZEOF_SONIC_RD) + offset);
+}
+
+static inline void sonic_rra_put(struct net_device* dev, int entry,
+ int offset, __u16 val)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+ sonic_buf_put(lp->rra, lp->dma_bitmode,
+ (entry * SIZEOF_SONIC_RR) + offset, val);
+}
+
+static inline __u16 sonic_rra_get(struct net_device* dev, int entry,
+ int offset)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+ return sonic_buf_get(lp->rra, lp->dma_bitmode,
+ (entry * SIZEOF_SONIC_RR) + offset);
+}
+
+static const char *version =
+ "sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n";
+
+#endif /* SONIC_H */
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
new file mode 100644
index 000000000000..ccf61b9da8d1
--- /dev/null
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -0,0 +1,333 @@
+/*
+ * xtsonic.c
+ *
+ * (C) 2001 - 2007 Tensilica Inc.
+ * Kevin Chea <kchea@yahoo.com>
+ * Marc Gauthier <marc@linux-xtensa.org>
+ * Chris Zankel <chris@zankel.net>
+ *
+ * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
+ *
+ * This driver is based on work from Andreas Busse, but most of
+ * the code is rewritten.
+ *
+ * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
+ *
+ * A driver for the onboard Sonic ethernet controller on the XT2000.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/dma.h>
+
+static char xtsonic_string[] = "xtsonic";
+
+extern unsigned xtboard_nvram_valid(void);
+extern void xtboard_get_ether_addr(unsigned char *buf);
+
+#include "sonic.h"
+
+/*
+ * According to the documentation for the Sonic ethernet controller,
+ * EOBC should be 760 words (1520 bytes) for 32-bit applications, and,
+ * as such, 2 words less than the buffer size. The value for RBSIZE
+ * defined in sonic.h, however is only 1520.
+ *
+ * (Note that in 16-bit configurations, EOBC is 759 words (1518 bytes) and
+ * RBSIZE 1520 bytes)
+ */
+#undef SONIC_RBSIZE
+#define SONIC_RBSIZE 1524
+
+/*
+ * The chip provides 256 byte register space.
+ */
+#define SONIC_MEM_SIZE 0x100
+
+/*
+ * Macros to access SONIC registers
+ */
+#define SONIC_READ(reg) \
+ (0xffff & *((volatile unsigned int *)dev->base_addr+reg))
+
+#define SONIC_WRITE(reg,val) \
+ *((volatile unsigned int *)dev->base_addr+reg) = val
+
+
+/* Use 0 for production, 1 for verification, and >2 for debug */
+#ifdef SONIC_DEBUG
+static unsigned int sonic_debug = SONIC_DEBUG;
+#else
+static unsigned int sonic_debug = 1;
+#endif
+
+/*
+ * We cannot use station (ethernet) address prefixes to detect the
+ * sonic controller since these are board manufacturer depended.
+ * So we check for known Silicon Revision IDs instead.
+ */
+static unsigned short known_revisions[] =
+{
+ 0x101, /* SONIC 83934 */
+ 0xffff /* end of list */
+};
+
+static int xtsonic_open(struct net_device *dev)
+{
+ int retval;
+
+ retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED,
+ "sonic", dev);
+ if (retval) {
+ printk(KERN_ERR "%s: unable to get IRQ %d.\n",
+ dev->name, dev->irq);
+ return -EAGAIN;
+ }
+
+ retval = sonic_open(dev);
+ if (retval)
+ free_irq(dev->irq, dev);
+ return retval;
+}
+
+static int xtsonic_close(struct net_device *dev)
+{
+ int err;
+ err = sonic_close(dev);
+ free_irq(dev->irq, dev);
+ return err;
+}
+
+static const struct net_device_ops xtsonic_netdev_ops = {
+ .ndo_open = xtsonic_open,
+ .ndo_stop = xtsonic_close,
+ .ndo_start_xmit = sonic_send_packet,
+ .ndo_get_stats = sonic_get_stats,
+ .ndo_set_rx_mode = sonic_multicast_list,
+ .ndo_tx_timeout = sonic_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int __init sonic_probe1(struct net_device *dev)
+{
+ static unsigned version_printed = 0;
+ unsigned int silicon_revision;
+ struct sonic_local *lp = netdev_priv(dev);
+ unsigned int base_addr = dev->base_addr;
+ int i;
+ int err = 0;
+
+ if (!request_mem_region(base_addr, 0x100, xtsonic_string))
+ return -EBUSY;
+
+ /*
+ * get the Silicon Revision ID. If this is one of the known
+ * one assume that we found a SONIC ethernet controller at
+ * the expected location.
+ */
+ silicon_revision = SONIC_READ(SONIC_SR);
+ if (sonic_debug > 1)
+ printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision);
+
+ i = 0;
+ while ((known_revisions[i] != 0xffff) &&
+ (known_revisions[i] != silicon_revision))
+ i++;
+
+ if (known_revisions[i] == 0xffff) {
+ printk("SONIC ethernet controller not found (0x%4x)\n",
+ silicon_revision);
+ return -ENODEV;
+ }
+
+ if (sonic_debug && version_printed++ == 0)
+ printk(version);
+
+ /*
+ * Put the sonic into software reset, then retrieve ethernet address.
+ * Note: we are assuming that the boot-loader has initialized the cam.
+ */
+ SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
+ SONIC_WRITE(SONIC_DCR,
+ SONIC_DCR_WC0|SONIC_DCR_DW|SONIC_DCR_LBR|SONIC_DCR_SBUS);
+ SONIC_WRITE(SONIC_CEP,0);
+ SONIC_WRITE(SONIC_IMR,0);
+
+ SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
+ SONIC_WRITE(SONIC_CEP,0);
+
+ for (i=0; i<3; i++) {
+ unsigned int val = SONIC_READ(SONIC_CAP0-i);
+ dev->dev_addr[i*2] = val;
+ dev->dev_addr[i*2+1] = val >> 8;
+ }
+
+ /* Initialize the device structure. */
+
+ lp->dma_bitmode = SONIC_BITMODE32;
+
+ /*
+ * Allocate local private descriptor areas in uncached space.
+ * The entire structure must be located within the same 64kb segment.
+ * A simple way to ensure this is to allocate twice the
+ * size of the structure -- given that the structure is
+ * much less than 64 kB, at least one of the halves of
+ * the allocated area will be contained entirely in 64 kB.
+ * We also allocate extra space for a pointer to allow freeing
+ * this structure later on (in xtsonic_cleanup_module()).
+ */
+ lp->descriptors =
+ dma_alloc_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ &lp->descriptors_laddr, GFP_KERNEL);
+
+ if (lp->descriptors == NULL) {
+ printk(KERN_ERR "%s: couldn't alloc DMA memory for "
+ " descriptors.\n", dev_name(lp->device));
+ goto out;
+ }
+
+ lp->cda = lp->descriptors;
+ lp->tda = lp->cda + (SIZEOF_SONIC_CDA
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+
+ /* get the virtual dma address */
+
+ lp->cda_laddr = lp->descriptors_laddr;
+ lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+
+ dev->netdev_ops = &xtsonic_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ /*
+ * clear tally counter
+ */
+ SONIC_WRITE(SONIC_CRCT,0xffff);
+ SONIC_WRITE(SONIC_FAET,0xffff);
+ SONIC_WRITE(SONIC_MPT,0xffff);
+
+ return 0;
+out:
+ release_region(dev->base_addr, SONIC_MEM_SIZE);
+ return err;
+}
+
+
+/*
+ * Probe for a SONIC ethernet controller on an XT2000 board.
+ * Actually probing is superfluous but we're paranoid.
+ */
+
+int __devinit xtsonic_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct sonic_local *lp;
+ struct resource *resmem, *resirq;
+ int err = 0;
+
+ if ((resmem = platform_get_resource(pdev, IORESOURCE_MEM, 0)) == NULL)
+ return -ENODEV;
+
+ if ((resirq = platform_get_resource(pdev, IORESOURCE_IRQ, 0)) == NULL)
+ return -ENODEV;
+
+ if ((dev = alloc_etherdev(sizeof(struct sonic_local))) == NULL)
+ return -ENOMEM;
+
+ lp = netdev_priv(dev);
+ lp->device = &pdev->dev;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ netdev_boot_setup_check(dev);
+
+ dev->base_addr = resmem->start;
+ dev->irq = resirq->start;
+
+ if ((err = sonic_probe1(dev)))
+ goto out;
+ if ((err = register_netdev(dev)))
+ goto out1;
+
+ printk("%s: SONIC ethernet @%08lx, MAC %pM, IRQ %d\n", dev->name,
+ dev->base_addr, dev->dev_addr, dev->irq);
+
+ return 0;
+
+out1:
+ release_region(dev->base_addr, SONIC_MEM_SIZE);
+out:
+ free_netdev(dev);
+
+ return err;
+}
+
+MODULE_DESCRIPTION("Xtensa XT2000 SONIC ethernet driver");
+module_param(sonic_debug, int, 0);
+MODULE_PARM_DESC(sonic_debug, "xtsonic debug level (1-4)");
+
+#include "sonic.c"
+
+static int __devexit xtsonic_device_remove (struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct sonic_local *lp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
+ release_region (dev->base_addr, SONIC_MEM_SIZE);
+ free_netdev(dev);
+
+ return 0;
+}
+
+static struct platform_driver xtsonic_driver = {
+ .probe = xtsonic_probe,
+ .remove = __devexit_p(xtsonic_device_remove),
+ .driver = {
+ .name = xtsonic_string,
+ },
+};
+
+static int __init xtsonic_init(void)
+{
+ return platform_driver_register(&xtsonic_driver);
+}
+
+static void __exit xtsonic_cleanup(void)
+{
+ platform_driver_unregister(&xtsonic_driver);
+}
+
+module_init(xtsonic_init);
+module_exit(xtsonic_cleanup);
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
new file mode 100644
index 000000000000..ff26b54bd3fb
--- /dev/null
+++ b/drivers/net/ethernet/neterion/Kconfig
@@ -0,0 +1,55 @@
+#
+# Exar device configuration
+#
+
+config NET_VENDOR_EXAR
+ bool "Exar devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say
+ Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Exar cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_EXAR
+
+config S2IO
+ tristate "Exar Xframe 10Gb Ethernet Adapter"
+ depends on PCI
+ ---help---
+ This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/s2io.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called s2io.
+
+config VXGE
+ tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
+ depends on PCI && INET
+ ---help---
+ This driver supports Exar Corp's X3100 Series 10 GbE PCIe
+ I/O Virtualized Server Adapter.
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/vxge.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called vxge.
+
+config VXGE_DEBUG_TRACE_ALL
+ bool "Enabling All Debug trace statements in driver"
+ default n
+ depends on VXGE
+ ---help---
+ Say Y here if you want to enabling all the debug trace statements in
+ the vxge driver. By default only few debug trace statements are
+ enabled.
+
+endif # NET_VENDOR_EXAR
diff --git a/drivers/net/ethernet/neterion/Makefile b/drivers/net/ethernet/neterion/Makefile
new file mode 100644
index 000000000000..70c8058a601a
--- /dev/null
+++ b/drivers/net/ethernet/neterion/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Exar network device drivers.
+#
+
+obj-$(CONFIG_S2IO) += s2io.o
+obj-$(CONFIG_VXGE) += vxge/
diff --git a/drivers/net/ethernet/neterion/s2io-regs.h b/drivers/net/ethernet/neterion/s2io-regs.h
new file mode 100644
index 000000000000..3688325c11f5
--- /dev/null
+++ b/drivers/net/ethernet/neterion/s2io-regs.h
@@ -0,0 +1,958 @@
+/************************************************************************
+ * regs.h: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
+ * Copyright(c) 2002-2010 Exar Corp.
+
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ * Drivers based on or derived from this code fall under the GPL and must
+ * retain the authorship, copyright and license notice. This file is not
+ * a complete program and may only be used when the entire operating
+ * system is licensed under the GPL.
+ * See the file COPYING in this distribution for more information.
+ ************************************************************************/
+#ifndef _REGS_H
+#define _REGS_H
+
+#define TBD 0
+
+struct XENA_dev_config {
+/* Convention: mHAL_XXX is mask, vHAL_XXX is value */
+
+/* General Control-Status Registers */
+ u64 general_int_status;
+#define GEN_INTR_TXPIC s2BIT(0)
+#define GEN_INTR_TXDMA s2BIT(1)
+#define GEN_INTR_TXMAC s2BIT(2)
+#define GEN_INTR_TXXGXS s2BIT(3)
+#define GEN_INTR_TXTRAFFIC s2BIT(8)
+#define GEN_INTR_RXPIC s2BIT(32)
+#define GEN_INTR_RXDMA s2BIT(33)
+#define GEN_INTR_RXMAC s2BIT(34)
+#define GEN_INTR_MC s2BIT(35)
+#define GEN_INTR_RXXGXS s2BIT(36)
+#define GEN_INTR_RXTRAFFIC s2BIT(40)
+#define GEN_ERROR_INTR GEN_INTR_TXPIC | GEN_INTR_RXPIC | \
+ GEN_INTR_TXDMA | GEN_INTR_RXDMA | \
+ GEN_INTR_TXMAC | GEN_INTR_RXMAC | \
+ GEN_INTR_TXXGXS| GEN_INTR_RXXGXS| \
+ GEN_INTR_MC
+
+ u64 general_int_mask;
+
+ u8 unused0[0x100 - 0x10];
+
+ u64 sw_reset;
+/* XGXS must be removed from reset only once. */
+#define SW_RESET_XENA vBIT(0xA5,0,8)
+#define SW_RESET_FLASH vBIT(0xA5,8,8)
+#define SW_RESET_EOI vBIT(0xA5,16,8)
+#define SW_RESET_ALL (SW_RESET_XENA | \
+ SW_RESET_FLASH | \
+ SW_RESET_EOI)
+/* The SW_RESET register must read this value after a successful reset. */
+#define SW_RESET_RAW_VAL 0xA5000000
+
+
+ u64 adapter_status;
+#define ADAPTER_STATUS_TDMA_READY s2BIT(0)
+#define ADAPTER_STATUS_RDMA_READY s2BIT(1)
+#define ADAPTER_STATUS_PFC_READY s2BIT(2)
+#define ADAPTER_STATUS_TMAC_BUF_EMPTY s2BIT(3)
+#define ADAPTER_STATUS_PIC_QUIESCENT s2BIT(5)
+#define ADAPTER_STATUS_RMAC_REMOTE_FAULT s2BIT(6)
+#define ADAPTER_STATUS_RMAC_LOCAL_FAULT s2BIT(7)
+#define ADAPTER_STATUS_RMAC_PCC_IDLE vBIT(0xFF,8,8)
+#define ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE vBIT(0x0F,8,8)
+#define ADAPTER_STATUS_RC_PRC_QUIESCENT vBIT(0xFF,16,8)
+#define ADAPTER_STATUS_MC_DRAM_READY s2BIT(24)
+#define ADAPTER_STATUS_MC_QUEUES_READY s2BIT(25)
+#define ADAPTER_STATUS_RIC_RUNNING s2BIT(26)
+#define ADAPTER_STATUS_M_PLL_LOCK s2BIT(30)
+#define ADAPTER_STATUS_P_PLL_LOCK s2BIT(31)
+
+ u64 adapter_control;
+#define ADAPTER_CNTL_EN s2BIT(7)
+#define ADAPTER_EOI_TX_ON s2BIT(15)
+#define ADAPTER_LED_ON s2BIT(23)
+#define ADAPTER_UDPI(val) vBIT(val,36,4)
+#define ADAPTER_WAIT_INT s2BIT(48)
+#define ADAPTER_ECC_EN s2BIT(55)
+
+ u64 serr_source;
+#define SERR_SOURCE_PIC s2BIT(0)
+#define SERR_SOURCE_TXDMA s2BIT(1)
+#define SERR_SOURCE_RXDMA s2BIT(2)
+#define SERR_SOURCE_MAC s2BIT(3)
+#define SERR_SOURCE_MC s2BIT(4)
+#define SERR_SOURCE_XGXS s2BIT(5)
+#define SERR_SOURCE_ANY (SERR_SOURCE_PIC | \
+ SERR_SOURCE_TXDMA | \
+ SERR_SOURCE_RXDMA | \
+ SERR_SOURCE_MAC | \
+ SERR_SOURCE_MC | \
+ SERR_SOURCE_XGXS)
+
+ u64 pci_mode;
+#define GET_PCI_MODE(val) ((val & vBIT(0xF, 0, 4)) >> 60)
+#define PCI_MODE_PCI_33 0
+#define PCI_MODE_PCI_66 0x1
+#define PCI_MODE_PCIX_M1_66 0x2
+#define PCI_MODE_PCIX_M1_100 0x3
+#define PCI_MODE_PCIX_M1_133 0x4
+#define PCI_MODE_PCIX_M2_66 0x5
+#define PCI_MODE_PCIX_M2_100 0x6
+#define PCI_MODE_PCIX_M2_133 0x7
+#define PCI_MODE_UNSUPPORTED s2BIT(0)
+#define PCI_MODE_32_BITS s2BIT(8)
+#define PCI_MODE_UNKNOWN_MODE s2BIT(9)
+
+ u8 unused_0[0x800 - 0x128];
+
+/* PCI-X Controller registers */
+ u64 pic_int_status;
+ u64 pic_int_mask;
+#define PIC_INT_TX s2BIT(0)
+#define PIC_INT_FLSH s2BIT(1)
+#define PIC_INT_MDIO s2BIT(2)
+#define PIC_INT_IIC s2BIT(3)
+#define PIC_INT_GPIO s2BIT(4)
+#define PIC_INT_RX s2BIT(32)
+
+ u64 txpic_int_reg;
+ u64 txpic_int_mask;
+#define PCIX_INT_REG_ECC_SG_ERR s2BIT(0)
+#define PCIX_INT_REG_ECC_DB_ERR s2BIT(1)
+#define PCIX_INT_REG_FLASHR_R_FSM_ERR s2BIT(8)
+#define PCIX_INT_REG_FLASHR_W_FSM_ERR s2BIT(9)
+#define PCIX_INT_REG_INI_TX_FSM_SERR s2BIT(10)
+#define PCIX_INT_REG_INI_TXO_FSM_ERR s2BIT(11)
+#define PCIX_INT_REG_TRT_FSM_SERR s2BIT(13)
+#define PCIX_INT_REG_SRT_FSM_SERR s2BIT(14)
+#define PCIX_INT_REG_PIFR_FSM_SERR s2BIT(15)
+#define PCIX_INT_REG_WRC_TX_SEND_FSM_SERR s2BIT(21)
+#define PCIX_INT_REG_RRC_TX_REQ_FSM_SERR s2BIT(23)
+#define PCIX_INT_REG_INI_RX_FSM_SERR s2BIT(48)
+#define PCIX_INT_REG_RA_RX_FSM_SERR s2BIT(50)
+/*
+#define PCIX_INT_REG_WRC_RX_SEND_FSM_SERR s2BIT(52)
+#define PCIX_INT_REG_RRC_RX_REQ_FSM_SERR s2BIT(54)
+#define PCIX_INT_REG_RRC_RX_SPLIT_FSM_SERR s2BIT(58)
+*/
+ u64 txpic_alarms;
+ u64 rxpic_int_reg;
+ u64 rxpic_int_mask;
+ u64 rxpic_alarms;
+
+ u64 flsh_int_reg;
+ u64 flsh_int_mask;
+#define PIC_FLSH_INT_REG_CYCLE_FSM_ERR s2BIT(63)
+#define PIC_FLSH_INT_REG_ERR s2BIT(62)
+ u64 flash_alarms;
+
+ u64 mdio_int_reg;
+ u64 mdio_int_mask;
+#define MDIO_INT_REG_MDIO_BUS_ERR s2BIT(0)
+#define MDIO_INT_REG_DTX_BUS_ERR s2BIT(8)
+#define MDIO_INT_REG_LASI s2BIT(39)
+ u64 mdio_alarms;
+
+ u64 iic_int_reg;
+ u64 iic_int_mask;
+#define IIC_INT_REG_BUS_FSM_ERR s2BIT(4)
+#define IIC_INT_REG_BIT_FSM_ERR s2BIT(5)
+#define IIC_INT_REG_CYCLE_FSM_ERR s2BIT(6)
+#define IIC_INT_REG_REQ_FSM_ERR s2BIT(7)
+#define IIC_INT_REG_ACK_ERR s2BIT(8)
+ u64 iic_alarms;
+
+ u8 unused4[0x08];
+
+ u64 gpio_int_reg;
+#define GPIO_INT_REG_DP_ERR_INT s2BIT(0)
+#define GPIO_INT_REG_LINK_DOWN s2BIT(1)
+#define GPIO_INT_REG_LINK_UP s2BIT(2)
+ u64 gpio_int_mask;
+#define GPIO_INT_MASK_LINK_DOWN s2BIT(1)
+#define GPIO_INT_MASK_LINK_UP s2BIT(2)
+ u64 gpio_alarms;
+
+ u8 unused5[0x38];
+
+ u64 tx_traffic_int;
+#define TX_TRAFFIC_INT_n(n) s2BIT(n)
+ u64 tx_traffic_mask;
+
+ u64 rx_traffic_int;
+#define RX_TRAFFIC_INT_n(n) s2BIT(n)
+ u64 rx_traffic_mask;
+
+/* PIC Control registers */
+ u64 pic_control;
+#define PIC_CNTL_RX_ALARM_MAP_1 s2BIT(0)
+#define PIC_CNTL_SHARED_SPLITS(n) vBIT(n,11,5)
+
+ u64 swapper_ctrl;
+#define SWAPPER_CTRL_PIF_R_FE s2BIT(0)
+#define SWAPPER_CTRL_PIF_R_SE s2BIT(1)
+#define SWAPPER_CTRL_PIF_W_FE s2BIT(8)
+#define SWAPPER_CTRL_PIF_W_SE s2BIT(9)
+#define SWAPPER_CTRL_TXP_FE s2BIT(16)
+#define SWAPPER_CTRL_TXP_SE s2BIT(17)
+#define SWAPPER_CTRL_TXD_R_FE s2BIT(18)
+#define SWAPPER_CTRL_TXD_R_SE s2BIT(19)
+#define SWAPPER_CTRL_TXD_W_FE s2BIT(20)
+#define SWAPPER_CTRL_TXD_W_SE s2BIT(21)
+#define SWAPPER_CTRL_TXF_R_FE s2BIT(22)
+#define SWAPPER_CTRL_TXF_R_SE s2BIT(23)
+#define SWAPPER_CTRL_RXD_R_FE s2BIT(32)
+#define SWAPPER_CTRL_RXD_R_SE s2BIT(33)
+#define SWAPPER_CTRL_RXD_W_FE s2BIT(34)
+#define SWAPPER_CTRL_RXD_W_SE s2BIT(35)
+#define SWAPPER_CTRL_RXF_W_FE s2BIT(36)
+#define SWAPPER_CTRL_RXF_W_SE s2BIT(37)
+#define SWAPPER_CTRL_XMSI_FE s2BIT(40)
+#define SWAPPER_CTRL_XMSI_SE s2BIT(41)
+#define SWAPPER_CTRL_STATS_FE s2BIT(48)
+#define SWAPPER_CTRL_STATS_SE s2BIT(49)
+
+ u64 pif_rd_swapper_fb;
+#define IF_RD_SWAPPER_FB 0x0123456789ABCDEF
+
+ u64 scheduled_int_ctrl;
+#define SCHED_INT_CTRL_TIMER_EN s2BIT(0)
+#define SCHED_INT_CTRL_ONE_SHOT s2BIT(1)
+#define SCHED_INT_CTRL_INT2MSI(val) vBIT(val,10,6)
+#define SCHED_INT_PERIOD TBD
+
+ u64 txreqtimeout;
+#define TXREQTO_VAL(val) vBIT(val,0,32)
+#define TXREQTO_EN s2BIT(63)
+
+ u64 statsreqtimeout;
+#define STATREQTO_VAL(n) TBD
+#define STATREQTO_EN s2BIT(63)
+
+ u64 read_retry_delay;
+ u64 read_retry_acceleration;
+ u64 write_retry_delay;
+ u64 write_retry_acceleration;
+
+ u64 xmsi_control;
+ u64 xmsi_access;
+ u64 xmsi_address;
+ u64 xmsi_data;
+
+ u64 rx_mat;
+#define RX_MAT_SET(ring, msi) vBIT(msi, (8 * ring), 8)
+
+ u8 unused6[0x8];
+
+ u64 tx_mat0_n[0x8];
+#define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8)
+
+ u64 xmsi_mask_reg;
+ u64 stat_byte_cnt;
+#define STAT_BC(n) vBIT(n,4,12)
+
+ /* Automated statistics collection */
+ u64 stat_cfg;
+#define STAT_CFG_STAT_EN s2BIT(0)
+#define STAT_CFG_ONE_SHOT_EN s2BIT(1)
+#define STAT_CFG_STAT_NS_EN s2BIT(8)
+#define STAT_CFG_STAT_RO s2BIT(9)
+#define STAT_TRSF_PER(n) TBD
+#define PER_SEC 0x208d5
+#define SET_UPDT_PERIOD(n) vBIT((PER_SEC*n),32,32)
+#define SET_UPDT_CLICKS(val) vBIT(val, 32, 32)
+
+ u64 stat_addr;
+
+ /* General Configuration */
+ u64 mdio_control;
+#define MDIO_MMD_INDX_ADDR(val) vBIT(val, 0, 16)
+#define MDIO_MMD_DEV_ADDR(val) vBIT(val, 19, 5)
+#define MDIO_MMS_PRT_ADDR(val) vBIT(val, 27, 5)
+#define MDIO_CTRL_START_TRANS(val) vBIT(val, 56, 4)
+#define MDIO_OP(val) vBIT(val, 60, 2)
+#define MDIO_OP_ADDR_TRANS 0x0
+#define MDIO_OP_WRITE_TRANS 0x1
+#define MDIO_OP_READ_POST_INC_TRANS 0x2
+#define MDIO_OP_READ_TRANS 0x3
+#define MDIO_MDIO_DATA(val) vBIT(val, 32, 16)
+
+ u64 dtx_control;
+
+ u64 i2c_control;
+#define I2C_CONTROL_DEV_ID(id) vBIT(id,1,3)
+#define I2C_CONTROL_ADDR(addr) vBIT(addr,5,11)
+#define I2C_CONTROL_BYTE_CNT(cnt) vBIT(cnt,22,2)
+#define I2C_CONTROL_READ s2BIT(24)
+#define I2C_CONTROL_NACK s2BIT(25)
+#define I2C_CONTROL_CNTL_START vBIT(0xE,28,4)
+#define I2C_CONTROL_CNTL_END(val) (val & vBIT(0x1,28,4))
+#define I2C_CONTROL_GET_DATA(val) (u32)(val & 0xFFFFFFFF)
+#define I2C_CONTROL_SET_DATA(val) vBIT(val,32,32)
+
+ u64 gpio_control;
+#define GPIO_CTRL_GPIO_0 s2BIT(8)
+ u64 misc_control;
+#define FAULT_BEHAVIOUR s2BIT(0)
+#define EXT_REQ_EN s2BIT(1)
+#define MISC_LINK_STABILITY_PRD(val) vBIT(val,29,3)
+
+ u8 unused7_1[0x230 - 0x208];
+
+ u64 pic_control2;
+ u64 ini_dperr_ctrl;
+
+ u64 wreq_split_mask;
+#define WREQ_SPLIT_MASK_SET_MASK(val) vBIT(val, 52, 12)
+
+ u8 unused7_2[0x800 - 0x248];
+
+/* TxDMA registers */
+ u64 txdma_int_status;
+ u64 txdma_int_mask;
+#define TXDMA_PFC_INT s2BIT(0)
+#define TXDMA_TDA_INT s2BIT(1)
+#define TXDMA_PCC_INT s2BIT(2)
+#define TXDMA_TTI_INT s2BIT(3)
+#define TXDMA_LSO_INT s2BIT(4)
+#define TXDMA_TPA_INT s2BIT(5)
+#define TXDMA_SM_INT s2BIT(6)
+ u64 pfc_err_reg;
+#define PFC_ECC_SG_ERR s2BIT(7)
+#define PFC_ECC_DB_ERR s2BIT(15)
+#define PFC_SM_ERR_ALARM s2BIT(23)
+#define PFC_MISC_0_ERR s2BIT(31)
+#define PFC_MISC_1_ERR s2BIT(32)
+#define PFC_PCIX_ERR s2BIT(39)
+ u64 pfc_err_mask;
+ u64 pfc_err_alarm;
+
+ u64 tda_err_reg;
+#define TDA_Fn_ECC_SG_ERR vBIT(0xff,0,8)
+#define TDA_Fn_ECC_DB_ERR vBIT(0xff,8,8)
+#define TDA_SM0_ERR_ALARM s2BIT(22)
+#define TDA_SM1_ERR_ALARM s2BIT(23)
+#define TDA_PCIX_ERR s2BIT(39)
+ u64 tda_err_mask;
+ u64 tda_err_alarm;
+
+ u64 pcc_err_reg;
+#define PCC_FB_ECC_SG_ERR vBIT(0xFF,0,8)
+#define PCC_TXB_ECC_SG_ERR vBIT(0xFF,8,8)
+#define PCC_FB_ECC_DB_ERR vBIT(0xFF,16, 8)
+#define PCC_TXB_ECC_DB_ERR vBIT(0xff,24,8)
+#define PCC_SM_ERR_ALARM vBIT(0xff,32,8)
+#define PCC_WR_ERR_ALARM vBIT(0xff,40,8)
+#define PCC_N_SERR vBIT(0xff,48,8)
+#define PCC_6_COF_OV_ERR s2BIT(56)
+#define PCC_7_COF_OV_ERR s2BIT(57)
+#define PCC_6_LSO_OV_ERR s2BIT(58)
+#define PCC_7_LSO_OV_ERR s2BIT(59)
+#define PCC_ENABLE_FOUR vBIT(0x0F,0,8)
+ u64 pcc_err_mask;
+ u64 pcc_err_alarm;
+
+ u64 tti_err_reg;
+#define TTI_ECC_SG_ERR s2BIT(7)
+#define TTI_ECC_DB_ERR s2BIT(15)
+#define TTI_SM_ERR_ALARM s2BIT(23)
+ u64 tti_err_mask;
+ u64 tti_err_alarm;
+
+ u64 lso_err_reg;
+#define LSO6_SEND_OFLOW s2BIT(12)
+#define LSO7_SEND_OFLOW s2BIT(13)
+#define LSO6_ABORT s2BIT(14)
+#define LSO7_ABORT s2BIT(15)
+#define LSO6_SM_ERR_ALARM s2BIT(22)
+#define LSO7_SM_ERR_ALARM s2BIT(23)
+ u64 lso_err_mask;
+ u64 lso_err_alarm;
+
+ u64 tpa_err_reg;
+#define TPA_TX_FRM_DROP s2BIT(7)
+#define TPA_SM_ERR_ALARM s2BIT(23)
+
+ u64 tpa_err_mask;
+ u64 tpa_err_alarm;
+
+ u64 sm_err_reg;
+#define SM_SM_ERR_ALARM s2BIT(15)
+ u64 sm_err_mask;
+ u64 sm_err_alarm;
+
+ u8 unused8[0x100 - 0xB8];
+
+/* TxDMA arbiter */
+ u64 tx_dma_wrap_stat;
+
+/* Tx FIFO controller */
+#define X_MAX_FIFOS 8
+#define X_FIFO_MAX_LEN 0x1FFF /*8191 */
+ u64 tx_fifo_partition_0;
+#define TX_FIFO_PARTITION_EN s2BIT(0)
+#define TX_FIFO_PARTITION_0_PRI(val) vBIT(val,5,3)
+#define TX_FIFO_PARTITION_0_LEN(val) vBIT(val,19,13)
+#define TX_FIFO_PARTITION_1_PRI(val) vBIT(val,37,3)
+#define TX_FIFO_PARTITION_1_LEN(val) vBIT(val,51,13 )
+
+ u64 tx_fifo_partition_1;
+#define TX_FIFO_PARTITION_2_PRI(val) vBIT(val,5,3)
+#define TX_FIFO_PARTITION_2_LEN(val) vBIT(val,19,13)
+#define TX_FIFO_PARTITION_3_PRI(val) vBIT(val,37,3)
+#define TX_FIFO_PARTITION_3_LEN(val) vBIT(val,51,13)
+
+ u64 tx_fifo_partition_2;
+#define TX_FIFO_PARTITION_4_PRI(val) vBIT(val,5,3)
+#define TX_FIFO_PARTITION_4_LEN(val) vBIT(val,19,13)
+#define TX_FIFO_PARTITION_5_PRI(val) vBIT(val,37,3)
+#define TX_FIFO_PARTITION_5_LEN(val) vBIT(val,51,13)
+
+ u64 tx_fifo_partition_3;
+#define TX_FIFO_PARTITION_6_PRI(val) vBIT(val,5,3)
+#define TX_FIFO_PARTITION_6_LEN(val) vBIT(val,19,13)
+#define TX_FIFO_PARTITION_7_PRI(val) vBIT(val,37,3)
+#define TX_FIFO_PARTITION_7_LEN(val) vBIT(val,51,13)
+
+#define TX_FIFO_PARTITION_PRI_0 0 /* highest */
+#define TX_FIFO_PARTITION_PRI_1 1
+#define TX_FIFO_PARTITION_PRI_2 2
+#define TX_FIFO_PARTITION_PRI_3 3
+#define TX_FIFO_PARTITION_PRI_4 4
+#define TX_FIFO_PARTITION_PRI_5 5
+#define TX_FIFO_PARTITION_PRI_6 6
+#define TX_FIFO_PARTITION_PRI_7 7 /* lowest */
+
+ u64 tx_w_round_robin_0;
+ u64 tx_w_round_robin_1;
+ u64 tx_w_round_robin_2;
+ u64 tx_w_round_robin_3;
+ u64 tx_w_round_robin_4;
+
+ u64 tti_command_mem;
+#define TTI_CMD_MEM_WE s2BIT(7)
+#define TTI_CMD_MEM_STROBE_NEW_CMD s2BIT(15)
+#define TTI_CMD_MEM_STROBE_BEING_EXECUTED s2BIT(15)
+#define TTI_CMD_MEM_OFFSET(n) vBIT(n,26,6)
+
+ u64 tti_data1_mem;
+#define TTI_DATA1_MEM_TX_TIMER_VAL(n) vBIT(n,6,26)
+#define TTI_DATA1_MEM_TX_TIMER_AC_CI(n) vBIT(n,38,2)
+#define TTI_DATA1_MEM_TX_TIMER_AC_EN s2BIT(38)
+#define TTI_DATA1_MEM_TX_TIMER_CI_EN s2BIT(39)
+#define TTI_DATA1_MEM_TX_URNG_A(n) vBIT(n,41,7)
+#define TTI_DATA1_MEM_TX_URNG_B(n) vBIT(n,49,7)
+#define TTI_DATA1_MEM_TX_URNG_C(n) vBIT(n,57,7)
+
+ u64 tti_data2_mem;
+#define TTI_DATA2_MEM_TX_UFC_A(n) vBIT(n,0,16)
+#define TTI_DATA2_MEM_TX_UFC_B(n) vBIT(n,16,16)
+#define TTI_DATA2_MEM_TX_UFC_C(n) vBIT(n,32,16)
+#define TTI_DATA2_MEM_TX_UFC_D(n) vBIT(n,48,16)
+
+/* Tx Protocol assist */
+ u64 tx_pa_cfg;
+#define TX_PA_CFG_IGNORE_FRM_ERR s2BIT(1)
+#define TX_PA_CFG_IGNORE_SNAP_OUI s2BIT(2)
+#define TX_PA_CFG_IGNORE_LLC_CTRL s2BIT(3)
+#define TX_PA_CFG_IGNORE_L2_ERR s2BIT(6)
+#define RX_PA_CFG_STRIP_VLAN_TAG s2BIT(15)
+
+/* Recent add, used only debug purposes. */
+ u64 pcc_enable;
+
+ u8 unused9[0x700 - 0x178];
+
+ u64 txdma_debug_ctrl;
+
+ u8 unused10[0x1800 - 0x1708];
+
+/* RxDMA Registers */
+ u64 rxdma_int_status;
+ u64 rxdma_int_mask;
+#define RXDMA_INT_RC_INT_M s2BIT(0)
+#define RXDMA_INT_RPA_INT_M s2BIT(1)
+#define RXDMA_INT_RDA_INT_M s2BIT(2)
+#define RXDMA_INT_RTI_INT_M s2BIT(3)
+
+ u64 rda_err_reg;
+#define RDA_RXDn_ECC_SG_ERR vBIT(0xFF,0,8)
+#define RDA_RXDn_ECC_DB_ERR vBIT(0xFF,8,8)
+#define RDA_FRM_ECC_SG_ERR s2BIT(23)
+#define RDA_FRM_ECC_DB_N_AERR s2BIT(31)
+#define RDA_SM1_ERR_ALARM s2BIT(38)
+#define RDA_SM0_ERR_ALARM s2BIT(39)
+#define RDA_MISC_ERR s2BIT(47)
+#define RDA_PCIX_ERR s2BIT(55)
+#define RDA_RXD_ECC_DB_SERR s2BIT(63)
+ u64 rda_err_mask;
+ u64 rda_err_alarm;
+
+ u64 rc_err_reg;
+#define RC_PRCn_ECC_SG_ERR vBIT(0xFF,0,8)
+#define RC_PRCn_ECC_DB_ERR vBIT(0xFF,8,8)
+#define RC_FTC_ECC_SG_ERR s2BIT(23)
+#define RC_FTC_ECC_DB_ERR s2BIT(31)
+#define RC_PRCn_SM_ERR_ALARM vBIT(0xFF,32,8)
+#define RC_FTC_SM_ERR_ALARM s2BIT(47)
+#define RC_RDA_FAIL_WR_Rn vBIT(0xFF,48,8)
+ u64 rc_err_mask;
+ u64 rc_err_alarm;
+
+ u64 prc_pcix_err_reg;
+#define PRC_PCI_AB_RD_Rn vBIT(0xFF,0,8)
+#define PRC_PCI_DP_RD_Rn vBIT(0xFF,8,8)
+#define PRC_PCI_AB_WR_Rn vBIT(0xFF,16,8)
+#define PRC_PCI_DP_WR_Rn vBIT(0xFF,24,8)
+#define PRC_PCI_AB_F_WR_Rn vBIT(0xFF,32,8)
+#define PRC_PCI_DP_F_WR_Rn vBIT(0xFF,40,8)
+ u64 prc_pcix_err_mask;
+ u64 prc_pcix_err_alarm;
+
+ u64 rpa_err_reg;
+#define RPA_ECC_SG_ERR s2BIT(7)
+#define RPA_ECC_DB_ERR s2BIT(15)
+#define RPA_FLUSH_REQUEST s2BIT(22)
+#define RPA_SM_ERR_ALARM s2BIT(23)
+#define RPA_CREDIT_ERR s2BIT(31)
+ u64 rpa_err_mask;
+ u64 rpa_err_alarm;
+
+ u64 rti_err_reg;
+#define RTI_ECC_SG_ERR s2BIT(7)
+#define RTI_ECC_DB_ERR s2BIT(15)
+#define RTI_SM_ERR_ALARM s2BIT(23)
+ u64 rti_err_mask;
+ u64 rti_err_alarm;
+
+ u8 unused11[0x100 - 0x88];
+
+/* DMA arbiter */
+ u64 rx_queue_priority;
+#define RX_QUEUE_0_PRIORITY(val) vBIT(val,5,3)
+#define RX_QUEUE_1_PRIORITY(val) vBIT(val,13,3)
+#define RX_QUEUE_2_PRIORITY(val) vBIT(val,21,3)
+#define RX_QUEUE_3_PRIORITY(val) vBIT(val,29,3)
+#define RX_QUEUE_4_PRIORITY(val) vBIT(val,37,3)
+#define RX_QUEUE_5_PRIORITY(val) vBIT(val,45,3)
+#define RX_QUEUE_6_PRIORITY(val) vBIT(val,53,3)
+#define RX_QUEUE_7_PRIORITY(val) vBIT(val,61,3)
+
+#define RX_QUEUE_PRI_0 0 /* highest */
+#define RX_QUEUE_PRI_1 1
+#define RX_QUEUE_PRI_2 2
+#define RX_QUEUE_PRI_3 3
+#define RX_QUEUE_PRI_4 4
+#define RX_QUEUE_PRI_5 5
+#define RX_QUEUE_PRI_6 6
+#define RX_QUEUE_PRI_7 7 /* lowest */
+
+ u64 rx_w_round_robin_0;
+ u64 rx_w_round_robin_1;
+ u64 rx_w_round_robin_2;
+ u64 rx_w_round_robin_3;
+ u64 rx_w_round_robin_4;
+
+ /* Per-ring controller regs */
+#define RX_MAX_RINGS 8
+#if 0
+#define RX_MAX_RINGS_SZ 0xFFFF /* 65536 */
+#define RX_MIN_RINGS_SZ 0x3F /* 63 */
+#endif
+ u64 prc_rxd0_n[RX_MAX_RINGS];
+ u64 prc_ctrl_n[RX_MAX_RINGS];
+#define PRC_CTRL_RC_ENABLED s2BIT(7)
+#define PRC_CTRL_RING_MODE (s2BIT(14)|s2BIT(15))
+#define PRC_CTRL_RING_MODE_1 vBIT(0,14,2)
+#define PRC_CTRL_RING_MODE_3 vBIT(1,14,2)
+#define PRC_CTRL_RING_MODE_5 vBIT(2,14,2)
+#define PRC_CTRL_RING_MODE_x vBIT(3,14,2)
+#define PRC_CTRL_NO_SNOOP (s2BIT(22)|s2BIT(23))
+#define PRC_CTRL_NO_SNOOP_DESC s2BIT(22)
+#define PRC_CTRL_NO_SNOOP_BUFF s2BIT(23)
+#define PRC_CTRL_BIMODAL_INTERRUPT s2BIT(37)
+#define PRC_CTRL_GROUP_READS s2BIT(38)
+#define PRC_CTRL_RXD_BACKOFF_INTERVAL(val) vBIT(val,40,24)
+
+ u64 prc_alarm_action;
+#define PRC_ALARM_ACTION_RR_R0_STOP s2BIT(3)
+#define PRC_ALARM_ACTION_RW_R0_STOP s2BIT(7)
+#define PRC_ALARM_ACTION_RR_R1_STOP s2BIT(11)
+#define PRC_ALARM_ACTION_RW_R1_STOP s2BIT(15)
+#define PRC_ALARM_ACTION_RR_R2_STOP s2BIT(19)
+#define PRC_ALARM_ACTION_RW_R2_STOP s2BIT(23)
+#define PRC_ALARM_ACTION_RR_R3_STOP s2BIT(27)
+#define PRC_ALARM_ACTION_RW_R3_STOP s2BIT(31)
+#define PRC_ALARM_ACTION_RR_R4_STOP s2BIT(35)
+#define PRC_ALARM_ACTION_RW_R4_STOP s2BIT(39)
+#define PRC_ALARM_ACTION_RR_R5_STOP s2BIT(43)
+#define PRC_ALARM_ACTION_RW_R5_STOP s2BIT(47)
+#define PRC_ALARM_ACTION_RR_R6_STOP s2BIT(51)
+#define PRC_ALARM_ACTION_RW_R6_STOP s2BIT(55)
+#define PRC_ALARM_ACTION_RR_R7_STOP s2BIT(59)
+#define PRC_ALARM_ACTION_RW_R7_STOP s2BIT(63)
+
+/* Receive traffic interrupts */
+ u64 rti_command_mem;
+#define RTI_CMD_MEM_WE s2BIT(7)
+#define RTI_CMD_MEM_STROBE s2BIT(15)
+#define RTI_CMD_MEM_STROBE_NEW_CMD s2BIT(15)
+#define RTI_CMD_MEM_STROBE_CMD_BEING_EXECUTED s2BIT(15)
+#define RTI_CMD_MEM_OFFSET(n) vBIT(n,29,3)
+
+ u64 rti_data1_mem;
+#define RTI_DATA1_MEM_RX_TIMER_VAL(n) vBIT(n,3,29)
+#define RTI_DATA1_MEM_RX_TIMER_AC_EN s2BIT(38)
+#define RTI_DATA1_MEM_RX_TIMER_CI_EN s2BIT(39)
+#define RTI_DATA1_MEM_RX_URNG_A(n) vBIT(n,41,7)
+#define RTI_DATA1_MEM_RX_URNG_B(n) vBIT(n,49,7)
+#define RTI_DATA1_MEM_RX_URNG_C(n) vBIT(n,57,7)
+
+ u64 rti_data2_mem;
+#define RTI_DATA2_MEM_RX_UFC_A(n) vBIT(n,0,16)
+#define RTI_DATA2_MEM_RX_UFC_B(n) vBIT(n,16,16)
+#define RTI_DATA2_MEM_RX_UFC_C(n) vBIT(n,32,16)
+#define RTI_DATA2_MEM_RX_UFC_D(n) vBIT(n,48,16)
+
+ u64 rx_pa_cfg;
+#define RX_PA_CFG_IGNORE_FRM_ERR s2BIT(1)
+#define RX_PA_CFG_IGNORE_SNAP_OUI s2BIT(2)
+#define RX_PA_CFG_IGNORE_LLC_CTRL s2BIT(3)
+#define RX_PA_CFG_IGNORE_L2_ERR s2BIT(6)
+
+ u64 unused_11_1;
+
+ u64 ring_bump_counter1;
+ u64 ring_bump_counter2;
+
+ u8 unused12[0x700 - 0x1F0];
+
+ u64 rxdma_debug_ctrl;
+
+ u8 unused13[0x2000 - 0x1f08];
+
+/* Media Access Controller Register */
+ u64 mac_int_status;
+ u64 mac_int_mask;
+#define MAC_INT_STATUS_TMAC_INT s2BIT(0)
+#define MAC_INT_STATUS_RMAC_INT s2BIT(1)
+
+ u64 mac_tmac_err_reg;
+#define TMAC_ECC_SG_ERR s2BIT(7)
+#define TMAC_ECC_DB_ERR s2BIT(15)
+#define TMAC_TX_BUF_OVRN s2BIT(23)
+#define TMAC_TX_CRI_ERR s2BIT(31)
+#define TMAC_TX_SM_ERR s2BIT(39)
+#define TMAC_DESC_ECC_SG_ERR s2BIT(47)
+#define TMAC_DESC_ECC_DB_ERR s2BIT(55)
+
+ u64 mac_tmac_err_mask;
+ u64 mac_tmac_err_alarm;
+
+ u64 mac_rmac_err_reg;
+#define RMAC_RX_BUFF_OVRN s2BIT(0)
+#define RMAC_FRM_RCVD_INT s2BIT(1)
+#define RMAC_UNUSED_INT s2BIT(2)
+#define RMAC_RTS_PNUM_ECC_SG_ERR s2BIT(5)
+#define RMAC_RTS_DS_ECC_SG_ERR s2BIT(6)
+#define RMAC_RD_BUF_ECC_SG_ERR s2BIT(7)
+#define RMAC_RTH_MAP_ECC_SG_ERR s2BIT(8)
+#define RMAC_RTH_SPDM_ECC_SG_ERR s2BIT(9)
+#define RMAC_RTS_VID_ECC_SG_ERR s2BIT(10)
+#define RMAC_DA_SHADOW_ECC_SG_ERR s2BIT(11)
+#define RMAC_RTS_PNUM_ECC_DB_ERR s2BIT(13)
+#define RMAC_RTS_DS_ECC_DB_ERR s2BIT(14)
+#define RMAC_RD_BUF_ECC_DB_ERR s2BIT(15)
+#define RMAC_RTH_MAP_ECC_DB_ERR s2BIT(16)
+#define RMAC_RTH_SPDM_ECC_DB_ERR s2BIT(17)
+#define RMAC_RTS_VID_ECC_DB_ERR s2BIT(18)
+#define RMAC_DA_SHADOW_ECC_DB_ERR s2BIT(19)
+#define RMAC_LINK_STATE_CHANGE_INT s2BIT(31)
+#define RMAC_RX_SM_ERR s2BIT(39)
+#define RMAC_SINGLE_ECC_ERR (s2BIT(5) | s2BIT(6) | s2BIT(7) |\
+ s2BIT(8) | s2BIT(9) | s2BIT(10)|\
+ s2BIT(11))
+#define RMAC_DOUBLE_ECC_ERR (s2BIT(13) | s2BIT(14) | s2BIT(15) |\
+ s2BIT(16) | s2BIT(17) | s2BIT(18)|\
+ s2BIT(19))
+ u64 mac_rmac_err_mask;
+ u64 mac_rmac_err_alarm;
+
+ u8 unused14[0x100 - 0x40];
+
+ u64 mac_cfg;
+#define MAC_CFG_TMAC_ENABLE s2BIT(0)
+#define MAC_CFG_RMAC_ENABLE s2BIT(1)
+#define MAC_CFG_LAN_NOT_WAN s2BIT(2)
+#define MAC_CFG_TMAC_LOOPBACK s2BIT(3)
+#define MAC_CFG_TMAC_APPEND_PAD s2BIT(4)
+#define MAC_CFG_RMAC_STRIP_FCS s2BIT(5)
+#define MAC_CFG_RMAC_STRIP_PAD s2BIT(6)
+#define MAC_CFG_RMAC_PROM_ENABLE s2BIT(7)
+#define MAC_RMAC_DISCARD_PFRM s2BIT(8)
+#define MAC_RMAC_BCAST_ENABLE s2BIT(9)
+#define MAC_RMAC_ALL_ADDR_ENABLE s2BIT(10)
+#define MAC_RMAC_INVLD_IPG_THR(val) vBIT(val,16,8)
+
+ u64 tmac_avg_ipg;
+#define TMAC_AVG_IPG(val) vBIT(val,0,8)
+
+ u64 rmac_max_pyld_len;
+#define RMAC_MAX_PYLD_LEN(val) vBIT(val,2,14)
+#define RMAC_MAX_PYLD_LEN_DEF vBIT(1500,2,14)
+#define RMAC_MAX_PYLD_LEN_JUMBO_DEF vBIT(9600,2,14)
+
+ u64 rmac_err_cfg;
+#define RMAC_ERR_FCS s2BIT(0)
+#define RMAC_ERR_FCS_ACCEPT s2BIT(1)
+#define RMAC_ERR_TOO_LONG s2BIT(1)
+#define RMAC_ERR_TOO_LONG_ACCEPT s2BIT(1)
+#define RMAC_ERR_RUNT s2BIT(2)
+#define RMAC_ERR_RUNT_ACCEPT s2BIT(2)
+#define RMAC_ERR_LEN_MISMATCH s2BIT(3)
+#define RMAC_ERR_LEN_MISMATCH_ACCEPT s2BIT(3)
+
+ u64 rmac_cfg_key;
+#define RMAC_CFG_KEY(val) vBIT(val,0,16)
+
+#define S2IO_MAC_ADDR_START_OFFSET 0
+
+#define S2IO_XENA_MAX_MC_ADDRESSES 64 /* multicast addresses */
+#define S2IO_HERC_MAX_MC_ADDRESSES 256
+
+#define S2IO_XENA_MAX_MAC_ADDRESSES 16
+#define S2IO_HERC_MAX_MAC_ADDRESSES 64
+
+#define S2IO_XENA_MC_ADDR_START_OFFSET 16
+#define S2IO_HERC_MC_ADDR_START_OFFSET 64
+
+ u64 rmac_addr_cmd_mem;
+#define RMAC_ADDR_CMD_MEM_WE s2BIT(7)
+#define RMAC_ADDR_CMD_MEM_RD 0
+#define RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD s2BIT(15)
+#define RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING s2BIT(15)
+#define RMAC_ADDR_CMD_MEM_OFFSET(n) vBIT(n,26,6)
+
+ u64 rmac_addr_data0_mem;
+#define RMAC_ADDR_DATA0_MEM_ADDR(n) vBIT(n,0,48)
+#define RMAC_ADDR_DATA0_MEM_USER s2BIT(48)
+
+ u64 rmac_addr_data1_mem;
+#define RMAC_ADDR_DATA1_MEM_MASK(n) vBIT(n,0,48)
+
+ u8 unused15[0x8];
+
+/*
+ u64 rmac_addr_cfg;
+#define RMAC_ADDR_UCASTn_EN(n) mBIT(0)_n(n)
+#define RMAC_ADDR_MCASTn_EN(n) mBIT(0)_n(n)
+#define RMAC_ADDR_BCAST_EN vBIT(0)_48
+#define RMAC_ADDR_ALL_ADDR_EN vBIT(0)_49
+*/
+ u64 tmac_ipg_cfg;
+
+ u64 rmac_pause_cfg;
+#define RMAC_PAUSE_GEN s2BIT(0)
+#define RMAC_PAUSE_GEN_ENABLE s2BIT(0)
+#define RMAC_PAUSE_RX s2BIT(1)
+#define RMAC_PAUSE_RX_ENABLE s2BIT(1)
+#define RMAC_PAUSE_HG_PTIME_DEF vBIT(0xFFFF,16,16)
+#define RMAC_PAUSE_HG_PTIME(val) vBIT(val,16,16)
+
+ u64 rmac_red_cfg;
+
+ u64 rmac_red_rate_q0q3;
+ u64 rmac_red_rate_q4q7;
+
+ u64 mac_link_util;
+#define MAC_TX_LINK_UTIL vBIT(0xFE,1,7)
+#define MAC_TX_LINK_UTIL_DISABLE vBIT(0xF, 8,4)
+#define MAC_TX_LINK_UTIL_VAL( n ) vBIT(n,8,4)
+#define MAC_RX_LINK_UTIL vBIT(0xFE,33,7)
+#define MAC_RX_LINK_UTIL_DISABLE vBIT(0xF,40,4)
+#define MAC_RX_LINK_UTIL_VAL( n ) vBIT(n,40,4)
+
+#define MAC_LINK_UTIL_DISABLE MAC_TX_LINK_UTIL_DISABLE | \
+ MAC_RX_LINK_UTIL_DISABLE
+
+ u64 rmac_invalid_ipg;
+
+/* rx traffic steering */
+#define MAC_RTS_FRM_LEN_SET(len) vBIT(len,2,14)
+ u64 rts_frm_len_n[8];
+
+ u64 rts_qos_steering;
+
+#define MAX_DIX_MAP 4
+ u64 rts_dix_map_n[MAX_DIX_MAP];
+#define RTS_DIX_MAP_ETYPE(val) vBIT(val,0,16)
+#define RTS_DIX_MAP_SCW(val) s2BIT(val,21)
+
+ u64 rts_q_alternates;
+ u64 rts_default_q;
+
+ u64 rts_ctrl;
+#define RTS_CTRL_IGNORE_SNAP_OUI s2BIT(2)
+#define RTS_CTRL_IGNORE_LLC_CTRL s2BIT(3)
+
+ u64 rts_pn_cam_ctrl;
+#define RTS_PN_CAM_CTRL_WE s2BIT(7)
+#define RTS_PN_CAM_CTRL_STROBE_NEW_CMD s2BIT(15)
+#define RTS_PN_CAM_CTRL_STROBE_BEING_EXECUTED s2BIT(15)
+#define RTS_PN_CAM_CTRL_OFFSET(n) vBIT(n,24,8)
+ u64 rts_pn_cam_data;
+#define RTS_PN_CAM_DATA_TCP_SELECT s2BIT(7)
+#define RTS_PN_CAM_DATA_PORT(val) vBIT(val,8,16)
+#define RTS_PN_CAM_DATA_SCW(val) vBIT(val,24,8)
+
+ u64 rts_ds_mem_ctrl;
+#define RTS_DS_MEM_CTRL_WE s2BIT(7)
+#define RTS_DS_MEM_CTRL_STROBE_NEW_CMD s2BIT(15)
+#define RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED s2BIT(15)
+#define RTS_DS_MEM_CTRL_OFFSET(n) vBIT(n,26,6)
+ u64 rts_ds_mem_data;
+#define RTS_DS_MEM_DATA(n) vBIT(n,0,8)
+
+ u8 unused16[0x700 - 0x220];
+
+ u64 mac_debug_ctrl;
+#define MAC_DBG_ACTIVITY_VALUE 0x411040400000000ULL
+
+ u8 unused17[0x2800 - 0x2708];
+
+/* memory controller registers */
+ u64 mc_int_status;
+#define MC_INT_STATUS_MC_INT s2BIT(0)
+ u64 mc_int_mask;
+#define MC_INT_MASK_MC_INT s2BIT(0)
+
+ u64 mc_err_reg;
+#define MC_ERR_REG_ECC_DB_ERR_L s2BIT(14)
+#define MC_ERR_REG_ECC_DB_ERR_U s2BIT(15)
+#define MC_ERR_REG_MIRI_ECC_DB_ERR_0 s2BIT(18)
+#define MC_ERR_REG_MIRI_ECC_DB_ERR_1 s2BIT(20)
+#define MC_ERR_REG_MIRI_CRI_ERR_0 s2BIT(22)
+#define MC_ERR_REG_MIRI_CRI_ERR_1 s2BIT(23)
+#define MC_ERR_REG_SM_ERR s2BIT(31)
+#define MC_ERR_REG_ECC_ALL_SNG (s2BIT(2) | s2BIT(3) | s2BIT(4) | s2BIT(5) |\
+ s2BIT(17) | s2BIT(19))
+#define MC_ERR_REG_ECC_ALL_DBL (s2BIT(10) | s2BIT(11) | s2BIT(12) |\
+ s2BIT(13) | s2BIT(18) | s2BIT(20))
+#define PLL_LOCK_N s2BIT(39)
+ u64 mc_err_mask;
+ u64 mc_err_alarm;
+
+ u8 unused18[0x100 - 0x28];
+
+/* MC configuration */
+ u64 rx_queue_cfg;
+#define RX_QUEUE_CFG_Q0_SZ(n) vBIT(n,0,8)
+#define RX_QUEUE_CFG_Q1_SZ(n) vBIT(n,8,8)
+#define RX_QUEUE_CFG_Q2_SZ(n) vBIT(n,16,8)
+#define RX_QUEUE_CFG_Q3_SZ(n) vBIT(n,24,8)
+#define RX_QUEUE_CFG_Q4_SZ(n) vBIT(n,32,8)
+#define RX_QUEUE_CFG_Q5_SZ(n) vBIT(n,40,8)
+#define RX_QUEUE_CFG_Q6_SZ(n) vBIT(n,48,8)
+#define RX_QUEUE_CFG_Q7_SZ(n) vBIT(n,56,8)
+
+ u64 mc_rldram_mrs;
+#define MC_RLDRAM_QUEUE_SIZE_ENABLE s2BIT(39)
+#define MC_RLDRAM_MRS_ENABLE s2BIT(47)
+
+ u64 mc_rldram_interleave;
+
+ u64 mc_pause_thresh_q0q3;
+ u64 mc_pause_thresh_q4q7;
+
+ u64 mc_red_thresh_q[8];
+
+ u8 unused19[0x200 - 0x168];
+ u64 mc_rldram_ref_per;
+ u8 unused20[0x220 - 0x208];
+ u64 mc_rldram_test_ctrl;
+#define MC_RLDRAM_TEST_MODE s2BIT(47)
+#define MC_RLDRAM_TEST_WRITE s2BIT(7)
+#define MC_RLDRAM_TEST_GO s2BIT(15)
+#define MC_RLDRAM_TEST_DONE s2BIT(23)
+#define MC_RLDRAM_TEST_PASS s2BIT(31)
+
+ u8 unused21[0x240 - 0x228];
+ u64 mc_rldram_test_add;
+ u8 unused22[0x260 - 0x248];
+ u64 mc_rldram_test_d0;
+ u8 unused23[0x280 - 0x268];
+ u64 mc_rldram_test_d1;
+ u8 unused24[0x300 - 0x288];
+ u64 mc_rldram_test_d2;
+
+ u8 unused24_1[0x360 - 0x308];
+ u64 mc_rldram_ctrl;
+#define MC_RLDRAM_ENABLE_ODT s2BIT(7)
+
+ u8 unused24_2[0x640 - 0x368];
+ u64 mc_rldram_ref_per_herc;
+#define MC_RLDRAM_SET_REF_PERIOD(val) vBIT(val, 0, 16)
+
+ u8 unused24_3[0x660 - 0x648];
+ u64 mc_rldram_mrs_herc;
+
+ u8 unused25[0x700 - 0x668];
+ u64 mc_debug_ctrl;
+
+ u8 unused26[0x3000 - 0x2f08];
+
+/* XGXG */
+ /* XGXS control registers */
+
+ u64 xgxs_int_status;
+#define XGXS_INT_STATUS_TXGXS s2BIT(0)
+#define XGXS_INT_STATUS_RXGXS s2BIT(1)
+ u64 xgxs_int_mask;
+#define XGXS_INT_MASK_TXGXS s2BIT(0)
+#define XGXS_INT_MASK_RXGXS s2BIT(1)
+
+ u64 xgxs_txgxs_err_reg;
+#define TXGXS_ECC_SG_ERR s2BIT(7)
+#define TXGXS_ECC_DB_ERR s2BIT(15)
+#define TXGXS_ESTORE_UFLOW s2BIT(31)
+#define TXGXS_TX_SM_ERR s2BIT(39)
+
+ u64 xgxs_txgxs_err_mask;
+ u64 xgxs_txgxs_err_alarm;
+
+ u64 xgxs_rxgxs_err_reg;
+#define RXGXS_ESTORE_OFLOW s2BIT(7)
+#define RXGXS_RX_SM_ERR s2BIT(39)
+ u64 xgxs_rxgxs_err_mask;
+ u64 xgxs_rxgxs_err_alarm;
+
+ u8 unused27[0x100 - 0x40];
+
+ u64 xgxs_cfg;
+ u64 xgxs_status;
+
+ u64 xgxs_cfg_key;
+ u64 xgxs_efifo_cfg; /* CHANGED */
+ u64 rxgxs_ber_0; /* CHANGED */
+ u64 rxgxs_ber_1; /* CHANGED */
+
+ u64 spi_control;
+#define SPI_CONTROL_KEY(key) vBIT(key,0,4)
+#define SPI_CONTROL_BYTECNT(cnt) vBIT(cnt,29,3)
+#define SPI_CONTROL_CMD(cmd) vBIT(cmd,32,8)
+#define SPI_CONTROL_ADDR(addr) vBIT(addr,40,24)
+#define SPI_CONTROL_SEL1 s2BIT(4)
+#define SPI_CONTROL_REQ s2BIT(7)
+#define SPI_CONTROL_NACK s2BIT(5)
+#define SPI_CONTROL_DONE s2BIT(6)
+ u64 spi_data;
+#define SPI_DATA_WRITE(data,len) vBIT(data,0,len)
+};
+
+#define XENA_REG_SPACE sizeof(struct XENA_dev_config)
+#define XENA_EEPROM_SPACE (0x01 << 11)
+
+#endif /* _REGS_H */
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
new file mode 100644
index 000000000000..4ec7e3f46cc6
--- /dev/null
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -0,0 +1,8674 @@
+/************************************************************************
+ * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
+ * Copyright(c) 2002-2010 Exar Corp.
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ * Drivers based on or derived from this code fall under the GPL and must
+ * retain the authorship, copyright and license notice. This file is not
+ * a complete program and may only be used when the entire operating
+ * system is licensed under the GPL.
+ * See the file COPYING in this distribution for more information.
+ *
+ * Credits:
+ * Jeff Garzik : For pointing out the improper error condition
+ * check in the s2io_xmit routine and also some
+ * issues in the Tx watch dog function. Also for
+ * patiently answering all those innumerable
+ * questions regaring the 2.6 porting issues.
+ * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
+ * macros available only in 2.6 Kernel.
+ * Francois Romieu : For pointing out all code part that were
+ * deprecated and also styling related comments.
+ * Grant Grundler : For helping me get rid of some Architecture
+ * dependent code.
+ * Christopher Hellwig : Some more 2.6 specific issues in the driver.
+ *
+ * The module loadable parameters that are supported by the driver and a brief
+ * explanation of all the variables.
+ *
+ * rx_ring_num : This can be used to program the number of receive rings used
+ * in the driver.
+ * rx_ring_sz: This defines the number of receive blocks each ring can have.
+ * This is also an array of size 8.
+ * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
+ * values are 1, 2.
+ * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
+ * tx_fifo_len: This too is an array of 8. Each element defines the number of
+ * Tx descriptors that can be associated with each corresponding FIFO.
+ * intr_type: This defines the type of interrupt. The values can be 0(INTA),
+ * 2(MSI_X). Default value is '2(MSI_X)'
+ * lro_max_pkts: This parameter defines maximum number of packets can be
+ * aggregated as a single large packet
+ * napi: This parameter used to enable/disable NAPI (polling Rx)
+ * Possible values '1' for enable and '0' for disable. Default is '1'
+ * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
+ * Possible values '1' for enable and '0' for disable. Default is '0'
+ * vlan_tag_strip: This can be used to enable or disable vlan stripping.
+ * Possible values '1' for enable , '0' for disable.
+ * Default is '2' - which means disable in promisc mode
+ * and enable in non-promiscuous mode.
+ * multiq: This parameter used to enable/disable MULTIQUEUE support.
+ * Possible values '1' for enable and '0' for disable. Default is '0'
+ ************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mdio.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/stddef.h>
+#include <linux/ioctl.h>
+#include <linux/timex.h>
+#include <linux/ethtool.h>
+#include <linux/workqueue.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/prefetch.h>
+#include <net/tcp.h>
+
+#include <asm/system.h>
+#include <asm/div64.h>
+#include <asm/irq.h>
+
+/* local include */
+#include "s2io.h"
+#include "s2io-regs.h"
+
+#define DRV_VERSION "2.0.26.28"
+
+/* S2io Driver name & version. */
+static const char s2io_driver_name[] = "Neterion";
+static const char s2io_driver_version[] = DRV_VERSION;
+
+static const int rxd_size[2] = {32, 48};
+static const int rxd_count[2] = {127, 85};
+
+static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
+{
+ int ret;
+
+ ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
+ (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
+
+ return ret;
+}
+
+/*
+ * Cards with following subsystem_id have a link state indication
+ * problem, 600B, 600C, 600D, 640B, 640C and 640D.
+ * macro below identifies these cards given the subsystem_id.
+ */
+#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
+ (dev_type == XFRAME_I_DEVICE) ? \
+ ((((subid >= 0x600B) && (subid <= 0x600D)) || \
+ ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
+
+#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
+ ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
+
+static inline int is_s2io_card_up(const struct s2io_nic *sp)
+{
+ return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
+}
+
+/* Ethtool related variables and Macros. */
+static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
+ "Register test\t(offline)",
+ "Eeprom test\t(offline)",
+ "Link test\t(online)",
+ "RLDRAM test\t(offline)",
+ "BIST Test\t(offline)"
+};
+
+static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
+ {"tmac_frms"},
+ {"tmac_data_octets"},
+ {"tmac_drop_frms"},
+ {"tmac_mcst_frms"},
+ {"tmac_bcst_frms"},
+ {"tmac_pause_ctrl_frms"},
+ {"tmac_ttl_octets"},
+ {"tmac_ucst_frms"},
+ {"tmac_nucst_frms"},
+ {"tmac_any_err_frms"},
+ {"tmac_ttl_less_fb_octets"},
+ {"tmac_vld_ip_octets"},
+ {"tmac_vld_ip"},
+ {"tmac_drop_ip"},
+ {"tmac_icmp"},
+ {"tmac_rst_tcp"},
+ {"tmac_tcp"},
+ {"tmac_udp"},
+ {"rmac_vld_frms"},
+ {"rmac_data_octets"},
+ {"rmac_fcs_err_frms"},
+ {"rmac_drop_frms"},
+ {"rmac_vld_mcst_frms"},
+ {"rmac_vld_bcst_frms"},
+ {"rmac_in_rng_len_err_frms"},
+ {"rmac_out_rng_len_err_frms"},
+ {"rmac_long_frms"},
+ {"rmac_pause_ctrl_frms"},
+ {"rmac_unsup_ctrl_frms"},
+ {"rmac_ttl_octets"},
+ {"rmac_accepted_ucst_frms"},
+ {"rmac_accepted_nucst_frms"},
+ {"rmac_discarded_frms"},
+ {"rmac_drop_events"},
+ {"rmac_ttl_less_fb_octets"},
+ {"rmac_ttl_frms"},
+ {"rmac_usized_frms"},
+ {"rmac_osized_frms"},
+ {"rmac_frag_frms"},
+ {"rmac_jabber_frms"},
+ {"rmac_ttl_64_frms"},
+ {"rmac_ttl_65_127_frms"},
+ {"rmac_ttl_128_255_frms"},
+ {"rmac_ttl_256_511_frms"},
+ {"rmac_ttl_512_1023_frms"},
+ {"rmac_ttl_1024_1518_frms"},
+ {"rmac_ip"},
+ {"rmac_ip_octets"},
+ {"rmac_hdr_err_ip"},
+ {"rmac_drop_ip"},
+ {"rmac_icmp"},
+ {"rmac_tcp"},
+ {"rmac_udp"},
+ {"rmac_err_drp_udp"},
+ {"rmac_xgmii_err_sym"},
+ {"rmac_frms_q0"},
+ {"rmac_frms_q1"},
+ {"rmac_frms_q2"},
+ {"rmac_frms_q3"},
+ {"rmac_frms_q4"},
+ {"rmac_frms_q5"},
+ {"rmac_frms_q6"},
+ {"rmac_frms_q7"},
+ {"rmac_full_q0"},
+ {"rmac_full_q1"},
+ {"rmac_full_q2"},
+ {"rmac_full_q3"},
+ {"rmac_full_q4"},
+ {"rmac_full_q5"},
+ {"rmac_full_q6"},
+ {"rmac_full_q7"},
+ {"rmac_pause_cnt"},
+ {"rmac_xgmii_data_err_cnt"},
+ {"rmac_xgmii_ctrl_err_cnt"},
+ {"rmac_accepted_ip"},
+ {"rmac_err_tcp"},
+ {"rd_req_cnt"},
+ {"new_rd_req_cnt"},
+ {"new_rd_req_rtry_cnt"},
+ {"rd_rtry_cnt"},
+ {"wr_rtry_rd_ack_cnt"},
+ {"wr_req_cnt"},
+ {"new_wr_req_cnt"},
+ {"new_wr_req_rtry_cnt"},
+ {"wr_rtry_cnt"},
+ {"wr_disc_cnt"},
+ {"rd_rtry_wr_ack_cnt"},
+ {"txp_wr_cnt"},
+ {"txd_rd_cnt"},
+ {"txd_wr_cnt"},
+ {"rxd_rd_cnt"},
+ {"rxd_wr_cnt"},
+ {"txf_rd_cnt"},
+ {"rxf_wr_cnt"}
+};
+
+static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
+ {"rmac_ttl_1519_4095_frms"},
+ {"rmac_ttl_4096_8191_frms"},
+ {"rmac_ttl_8192_max_frms"},
+ {"rmac_ttl_gt_max_frms"},
+ {"rmac_osized_alt_frms"},
+ {"rmac_jabber_alt_frms"},
+ {"rmac_gt_max_alt_frms"},
+ {"rmac_vlan_frms"},
+ {"rmac_len_discard"},
+ {"rmac_fcs_discard"},
+ {"rmac_pf_discard"},
+ {"rmac_da_discard"},
+ {"rmac_red_discard"},
+ {"rmac_rts_discard"},
+ {"rmac_ingm_full_discard"},
+ {"link_fault_cnt"}
+};
+
+static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
+ {"\n DRIVER STATISTICS"},
+ {"single_bit_ecc_errs"},
+ {"double_bit_ecc_errs"},
+ {"parity_err_cnt"},
+ {"serious_err_cnt"},
+ {"soft_reset_cnt"},
+ {"fifo_full_cnt"},
+ {"ring_0_full_cnt"},
+ {"ring_1_full_cnt"},
+ {"ring_2_full_cnt"},
+ {"ring_3_full_cnt"},
+ {"ring_4_full_cnt"},
+ {"ring_5_full_cnt"},
+ {"ring_6_full_cnt"},
+ {"ring_7_full_cnt"},
+ {"alarm_transceiver_temp_high"},
+ {"alarm_transceiver_temp_low"},
+ {"alarm_laser_bias_current_high"},
+ {"alarm_laser_bias_current_low"},
+ {"alarm_laser_output_power_high"},
+ {"alarm_laser_output_power_low"},
+ {"warn_transceiver_temp_high"},
+ {"warn_transceiver_temp_low"},
+ {"warn_laser_bias_current_high"},
+ {"warn_laser_bias_current_low"},
+ {"warn_laser_output_power_high"},
+ {"warn_laser_output_power_low"},
+ {"lro_aggregated_pkts"},
+ {"lro_flush_both_count"},
+ {"lro_out_of_sequence_pkts"},
+ {"lro_flush_due_to_max_pkts"},
+ {"lro_avg_aggr_pkts"},
+ {"mem_alloc_fail_cnt"},
+ {"pci_map_fail_cnt"},
+ {"watchdog_timer_cnt"},
+ {"mem_allocated"},
+ {"mem_freed"},
+ {"link_up_cnt"},
+ {"link_down_cnt"},
+ {"link_up_time"},
+ {"link_down_time"},
+ {"tx_tcode_buf_abort_cnt"},
+ {"tx_tcode_desc_abort_cnt"},
+ {"tx_tcode_parity_err_cnt"},
+ {"tx_tcode_link_loss_cnt"},
+ {"tx_tcode_list_proc_err_cnt"},
+ {"rx_tcode_parity_err_cnt"},
+ {"rx_tcode_abort_cnt"},
+ {"rx_tcode_parity_abort_cnt"},
+ {"rx_tcode_rda_fail_cnt"},
+ {"rx_tcode_unkn_prot_cnt"},
+ {"rx_tcode_fcs_err_cnt"},
+ {"rx_tcode_buf_size_err_cnt"},
+ {"rx_tcode_rxd_corrupt_cnt"},
+ {"rx_tcode_unkn_err_cnt"},
+ {"tda_err_cnt"},
+ {"pfc_err_cnt"},
+ {"pcc_err_cnt"},
+ {"tti_err_cnt"},
+ {"tpa_err_cnt"},
+ {"sm_err_cnt"},
+ {"lso_err_cnt"},
+ {"mac_tmac_err_cnt"},
+ {"mac_rmac_err_cnt"},
+ {"xgxs_txgxs_err_cnt"},
+ {"xgxs_rxgxs_err_cnt"},
+ {"rc_err_cnt"},
+ {"prc_pcix_err_cnt"},
+ {"rpa_err_cnt"},
+ {"rda_err_cnt"},
+ {"rti_err_cnt"},
+ {"mc_err_cnt"}
+};
+
+#define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
+#define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
+#define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
+
+#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
+#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
+
+#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
+#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
+
+#define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
+#define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN)
+
+#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
+ init_timer(&timer); \
+ timer.function = handle; \
+ timer.data = (unsigned long)arg; \
+ mod_timer(&timer, (jiffies + exp)) \
+
+/* copy mac addr to def_mac_addr array */
+static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
+{
+ sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
+ sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
+ sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
+ sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
+ sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
+ sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
+}
+
+/*
+ * Constants to be programmed into the Xena's registers, to configure
+ * the XAUI.
+ */
+
+#define END_SIGN 0x0
+static const u64 herc_act_dtx_cfg[] = {
+ /* Set address */
+ 0x8000051536750000ULL, 0x80000515367500E0ULL,
+ /* Write data */
+ 0x8000051536750004ULL, 0x80000515367500E4ULL,
+ /* Set address */
+ 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
+ /* Write data */
+ 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
+ /* Set address */
+ 0x801205150D440000ULL, 0x801205150D4400E0ULL,
+ /* Write data */
+ 0x801205150D440004ULL, 0x801205150D4400E4ULL,
+ /* Set address */
+ 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
+ /* Write data */
+ 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
+ /* Done */
+ END_SIGN
+};
+
+static const u64 xena_dtx_cfg[] = {
+ /* Set address */
+ 0x8000051500000000ULL, 0x80000515000000E0ULL,
+ /* Write data */
+ 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
+ /* Set address */
+ 0x8001051500000000ULL, 0x80010515000000E0ULL,
+ /* Write data */
+ 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
+ /* Set address */
+ 0x8002051500000000ULL, 0x80020515000000E0ULL,
+ /* Write data */
+ 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
+ END_SIGN
+};
+
+/*
+ * Constants for Fixing the MacAddress problem seen mostly on
+ * Alpha machines.
+ */
+static const u64 fix_mac[] = {
+ 0x0060000000000000ULL, 0x0060600000000000ULL,
+ 0x0040600000000000ULL, 0x0000600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0000600000000000ULL,
+ 0x0040600000000000ULL, 0x0060600000000000ULL,
+ END_SIGN
+};
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+
+/* Module Loadable parameters. */
+S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
+S2IO_PARM_INT(rx_ring_num, 1);
+S2IO_PARM_INT(multiq, 0);
+S2IO_PARM_INT(rx_ring_mode, 1);
+S2IO_PARM_INT(use_continuous_tx_intrs, 1);
+S2IO_PARM_INT(rmac_pause_time, 0x100);
+S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
+S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
+S2IO_PARM_INT(shared_splits, 0);
+S2IO_PARM_INT(tmac_util_period, 5);
+S2IO_PARM_INT(rmac_util_period, 5);
+S2IO_PARM_INT(l3l4hdr_size, 128);
+/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
+S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
+/* Frequency of Rx desc syncs expressed as power of 2 */
+S2IO_PARM_INT(rxsync_frequency, 3);
+/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
+S2IO_PARM_INT(intr_type, 2);
+/* Large receive offload feature */
+
+/* Max pkts to be aggregated by LRO at one time. If not specified,
+ * aggregation happens until we hit max IP pkt size(64K)
+ */
+S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
+S2IO_PARM_INT(indicate_max_pkts, 0);
+
+S2IO_PARM_INT(napi, 1);
+S2IO_PARM_INT(ufo, 0);
+S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
+
+static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
+{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
+static unsigned int rx_ring_sz[MAX_RX_RINGS] =
+{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
+static unsigned int rts_frm_len[MAX_RX_RINGS] =
+{[0 ...(MAX_RX_RINGS - 1)] = 0 };
+
+module_param_array(tx_fifo_len, uint, NULL, 0);
+module_param_array(rx_ring_sz, uint, NULL, 0);
+module_param_array(rts_frm_len, uint, NULL, 0);
+
+/*
+ * S2IO device table.
+ * This table lists all the devices that this driver supports.
+ */
+static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = {
+ {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
+ PCI_ANY_ID, PCI_ANY_ID},
+ {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
+ PCI_ANY_ID, PCI_ANY_ID},
+ {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
+ PCI_ANY_ID, PCI_ANY_ID},
+ {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
+ PCI_ANY_ID, PCI_ANY_ID},
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, s2io_tbl);
+
+static struct pci_error_handlers s2io_err_handler = {
+ .error_detected = s2io_io_error_detected,
+ .slot_reset = s2io_io_slot_reset,
+ .resume = s2io_io_resume,
+};
+
+static struct pci_driver s2io_driver = {
+ .name = "S2IO",
+ .id_table = s2io_tbl,
+ .probe = s2io_init_nic,
+ .remove = __devexit_p(s2io_rem_nic),
+ .err_handler = &s2io_err_handler,
+};
+
+/* A simplifier macro used both by init and free shared_mem Fns(). */
+#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
+
+/* netqueue manipulation helper functions */
+static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
+{
+ if (!sp->config.multiq) {
+ int i;
+
+ for (i = 0; i < sp->config.tx_fifo_num; i++)
+ sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
+ }
+ netif_tx_stop_all_queues(sp->dev);
+}
+
+static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
+{
+ if (!sp->config.multiq)
+ sp->mac_control.fifos[fifo_no].queue_state =
+ FIFO_QUEUE_STOP;
+
+ netif_tx_stop_all_queues(sp->dev);
+}
+
+static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
+{
+ if (!sp->config.multiq) {
+ int i;
+
+ for (i = 0; i < sp->config.tx_fifo_num; i++)
+ sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
+ }
+ netif_tx_start_all_queues(sp->dev);
+}
+
+static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
+{
+ if (!sp->config.multiq)
+ sp->mac_control.fifos[fifo_no].queue_state =
+ FIFO_QUEUE_START;
+
+ netif_tx_start_all_queues(sp->dev);
+}
+
+static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
+{
+ if (!sp->config.multiq) {
+ int i;
+
+ for (i = 0; i < sp->config.tx_fifo_num; i++)
+ sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
+ }
+ netif_tx_wake_all_queues(sp->dev);
+}
+
+static inline void s2io_wake_tx_queue(
+ struct fifo_info *fifo, int cnt, u8 multiq)
+{
+
+ if (multiq) {
+ if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
+ netif_wake_subqueue(fifo->dev, fifo->fifo_no);
+ } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
+ if (netif_queue_stopped(fifo->dev)) {
+ fifo->queue_state = FIFO_QUEUE_START;
+ netif_wake_queue(fifo->dev);
+ }
+ }
+}
+
+/**
+ * init_shared_mem - Allocation and Initialization of Memory
+ * @nic: Device private variable.
+ * Description: The function allocates all the memory areas shared
+ * between the NIC and the driver. This includes Tx descriptors,
+ * Rx descriptors and the statistics block.
+ */
+
+static int init_shared_mem(struct s2io_nic *nic)
+{
+ u32 size;
+ void *tmp_v_addr, *tmp_v_addr_next;
+ dma_addr_t tmp_p_addr, tmp_p_addr_next;
+ struct RxD_block *pre_rxd_blk = NULL;
+ int i, j, blk_cnt;
+ int lst_size, lst_per_page;
+ struct net_device *dev = nic->dev;
+ unsigned long tmp;
+ struct buffAdd *ba;
+ struct config_param *config = &nic->config;
+ struct mac_info *mac_control = &nic->mac_control;
+ unsigned long long mem_allocated = 0;
+
+ /* Allocation and initialization of TXDLs in FIFOs */
+ size = 0;
+ for (i = 0; i < config->tx_fifo_num; i++) {
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+
+ size += tx_cfg->fifo_len;
+ }
+ if (size > MAX_AVAILABLE_TXDS) {
+ DBG_PRINT(ERR_DBG,
+ "Too many TxDs requested: %d, max supported: %d\n",
+ size, MAX_AVAILABLE_TXDS);
+ return -EINVAL;
+ }
+
+ size = 0;
+ for (i = 0; i < config->tx_fifo_num; i++) {
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+
+ size = tx_cfg->fifo_len;
+ /*
+ * Legal values are from 2 to 8192
+ */
+ if (size < 2) {
+ DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
+ "Valid lengths are 2 through 8192\n",
+ i, size);
+ return -EINVAL;
+ }
+ }
+
+ lst_size = (sizeof(struct TxD) * config->max_txds);
+ lst_per_page = PAGE_SIZE / lst_size;
+
+ for (i = 0; i < config->tx_fifo_num; i++) {
+ struct fifo_info *fifo = &mac_control->fifos[i];
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+ int fifo_len = tx_cfg->fifo_len;
+ int list_holder_size = fifo_len * sizeof(struct list_info_hold);
+
+ fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
+ if (!fifo->list_info) {
+ DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
+ return -ENOMEM;
+ }
+ mem_allocated += list_holder_size;
+ }
+ for (i = 0; i < config->tx_fifo_num; i++) {
+ int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
+ lst_per_page);
+ struct fifo_info *fifo = &mac_control->fifos[i];
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+
+ fifo->tx_curr_put_info.offset = 0;
+ fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
+ fifo->tx_curr_get_info.offset = 0;
+ fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
+ fifo->fifo_no = i;
+ fifo->nic = nic;
+ fifo->max_txds = MAX_SKB_FRAGS + 2;
+ fifo->dev = dev;
+
+ for (j = 0; j < page_num; j++) {
+ int k = 0;
+ dma_addr_t tmp_p;
+ void *tmp_v;
+ tmp_v = pci_alloc_consistent(nic->pdev,
+ PAGE_SIZE, &tmp_p);
+ if (!tmp_v) {
+ DBG_PRINT(INFO_DBG,
+ "pci_alloc_consistent failed for TxDL\n");
+ return -ENOMEM;
+ }
+ /* If we got a zero DMA address(can happen on
+ * certain platforms like PPC), reallocate.
+ * Store virtual address of page we don't want,
+ * to be freed later.
+ */
+ if (!tmp_p) {
+ mac_control->zerodma_virt_addr = tmp_v;
+ DBG_PRINT(INIT_DBG,
+ "%s: Zero DMA address for TxDL. "
+ "Virtual address %p\n",
+ dev->name, tmp_v);
+ tmp_v = pci_alloc_consistent(nic->pdev,
+ PAGE_SIZE, &tmp_p);
+ if (!tmp_v) {
+ DBG_PRINT(INFO_DBG,
+ "pci_alloc_consistent failed for TxDL\n");
+ return -ENOMEM;
+ }
+ mem_allocated += PAGE_SIZE;
+ }
+ while (k < lst_per_page) {
+ int l = (j * lst_per_page) + k;
+ if (l == tx_cfg->fifo_len)
+ break;
+ fifo->list_info[l].list_virt_addr =
+ tmp_v + (k * lst_size);
+ fifo->list_info[l].list_phy_addr =
+ tmp_p + (k * lst_size);
+ k++;
+ }
+ }
+ }
+
+ for (i = 0; i < config->tx_fifo_num; i++) {
+ struct fifo_info *fifo = &mac_control->fifos[i];
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+
+ size = tx_cfg->fifo_len;
+ fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
+ if (!fifo->ufo_in_band_v)
+ return -ENOMEM;
+ mem_allocated += (size * sizeof(u64));
+ }
+
+ /* Allocation and initialization of RXDs in Rings */
+ size = 0;
+ for (i = 0; i < config->rx_ring_num; i++) {
+ struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
+ struct ring_info *ring = &mac_control->rings[i];
+
+ if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
+ DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
+ "multiple of RxDs per Block\n",
+ dev->name, i);
+ return FAILURE;
+ }
+ size += rx_cfg->num_rxd;
+ ring->block_count = rx_cfg->num_rxd /
+ (rxd_count[nic->rxd_mode] + 1);
+ ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
+ }
+ if (nic->rxd_mode == RXD_MODE_1)
+ size = (size * (sizeof(struct RxD1)));
+ else
+ size = (size * (sizeof(struct RxD3)));
+
+ for (i = 0; i < config->rx_ring_num; i++) {
+ struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
+ struct ring_info *ring = &mac_control->rings[i];
+
+ ring->rx_curr_get_info.block_index = 0;
+ ring->rx_curr_get_info.offset = 0;
+ ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
+ ring->rx_curr_put_info.block_index = 0;
+ ring->rx_curr_put_info.offset = 0;
+ ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
+ ring->nic = nic;
+ ring->ring_no = i;
+
+ blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
+ /* Allocating all the Rx blocks */
+ for (j = 0; j < blk_cnt; j++) {
+ struct rx_block_info *rx_blocks;
+ int l;
+
+ rx_blocks = &ring->rx_blocks[j];
+ size = SIZE_OF_BLOCK; /* size is always page size */
+ tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
+ &tmp_p_addr);
+ if (tmp_v_addr == NULL) {
+ /*
+ * In case of failure, free_shared_mem()
+ * is called, which should free any
+ * memory that was alloced till the
+ * failure happened.
+ */
+ rx_blocks->block_virt_addr = tmp_v_addr;
+ return -ENOMEM;
+ }
+ mem_allocated += size;
+ memset(tmp_v_addr, 0, size);
+
+ size = sizeof(struct rxd_info) *
+ rxd_count[nic->rxd_mode];
+ rx_blocks->block_virt_addr = tmp_v_addr;
+ rx_blocks->block_dma_addr = tmp_p_addr;
+ rx_blocks->rxds = kmalloc(size, GFP_KERNEL);
+ if (!rx_blocks->rxds)
+ return -ENOMEM;
+ mem_allocated += size;
+ for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
+ rx_blocks->rxds[l].virt_addr =
+ rx_blocks->block_virt_addr +
+ (rxd_size[nic->rxd_mode] * l);
+ rx_blocks->rxds[l].dma_addr =
+ rx_blocks->block_dma_addr +
+ (rxd_size[nic->rxd_mode] * l);
+ }
+ }
+ /* Interlinking all Rx Blocks */
+ for (j = 0; j < blk_cnt; j++) {
+ int next = (j + 1) % blk_cnt;
+ tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
+ tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
+ tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
+ tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
+
+ pre_rxd_blk = tmp_v_addr;
+ pre_rxd_blk->reserved_2_pNext_RxD_block =
+ (unsigned long)tmp_v_addr_next;
+ pre_rxd_blk->pNext_RxD_Blk_physical =
+ (u64)tmp_p_addr_next;
+ }
+ }
+ if (nic->rxd_mode == RXD_MODE_3B) {
+ /*
+ * Allocation of Storages for buffer addresses in 2BUFF mode
+ * and the buffers as well.
+ */
+ for (i = 0; i < config->rx_ring_num; i++) {
+ struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
+ struct ring_info *ring = &mac_control->rings[i];
+
+ blk_cnt = rx_cfg->num_rxd /
+ (rxd_count[nic->rxd_mode] + 1);
+ size = sizeof(struct buffAdd *) * blk_cnt;
+ ring->ba = kmalloc(size, GFP_KERNEL);
+ if (!ring->ba)
+ return -ENOMEM;
+ mem_allocated += size;
+ for (j = 0; j < blk_cnt; j++) {
+ int k = 0;
+
+ size = sizeof(struct buffAdd) *
+ (rxd_count[nic->rxd_mode] + 1);
+ ring->ba[j] = kmalloc(size, GFP_KERNEL);
+ if (!ring->ba[j])
+ return -ENOMEM;
+ mem_allocated += size;
+ while (k != rxd_count[nic->rxd_mode]) {
+ ba = &ring->ba[j][k];
+ size = BUF0_LEN + ALIGN_SIZE;
+ ba->ba_0_org = kmalloc(size, GFP_KERNEL);
+ if (!ba->ba_0_org)
+ return -ENOMEM;
+ mem_allocated += size;
+ tmp = (unsigned long)ba->ba_0_org;
+ tmp += ALIGN_SIZE;
+ tmp &= ~((unsigned long)ALIGN_SIZE);
+ ba->ba_0 = (void *)tmp;
+
+ size = BUF1_LEN + ALIGN_SIZE;
+ ba->ba_1_org = kmalloc(size, GFP_KERNEL);
+ if (!ba->ba_1_org)
+ return -ENOMEM;
+ mem_allocated += size;
+ tmp = (unsigned long)ba->ba_1_org;
+ tmp += ALIGN_SIZE;
+ tmp &= ~((unsigned long)ALIGN_SIZE);
+ ba->ba_1 = (void *)tmp;
+ k++;
+ }
+ }
+ }
+ }
+
+ /* Allocation and initialization of Statistics block */
+ size = sizeof(struct stat_block);
+ mac_control->stats_mem =
+ pci_alloc_consistent(nic->pdev, size,
+ &mac_control->stats_mem_phy);
+
+ if (!mac_control->stats_mem) {
+ /*
+ * In case of failure, free_shared_mem() is called, which
+ * should free any memory that was alloced till the
+ * failure happened.
+ */
+ return -ENOMEM;
+ }
+ mem_allocated += size;
+ mac_control->stats_mem_sz = size;
+
+ tmp_v_addr = mac_control->stats_mem;
+ mac_control->stats_info = tmp_v_addr;
+ memset(tmp_v_addr, 0, size);
+ DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
+ dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
+ mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
+ return SUCCESS;
+}
+
+/**
+ * free_shared_mem - Free the allocated Memory
+ * @nic: Device private variable.
+ * Description: This function is to free all memory locations allocated by
+ * the init_shared_mem() function and return it to the kernel.
+ */
+
+static void free_shared_mem(struct s2io_nic *nic)
+{
+ int i, j, blk_cnt, size;
+ void *tmp_v_addr;
+ dma_addr_t tmp_p_addr;
+ int lst_size, lst_per_page;
+ struct net_device *dev;
+ int page_num = 0;
+ struct config_param *config;
+ struct mac_info *mac_control;
+ struct stat_block *stats;
+ struct swStat *swstats;
+
+ if (!nic)
+ return;
+
+ dev = nic->dev;
+
+ config = &nic->config;
+ mac_control = &nic->mac_control;
+ stats = mac_control->stats_info;
+ swstats = &stats->sw_stat;
+
+ lst_size = sizeof(struct TxD) * config->max_txds;
+ lst_per_page = PAGE_SIZE / lst_size;
+
+ for (i = 0; i < config->tx_fifo_num; i++) {
+ struct fifo_info *fifo = &mac_control->fifos[i];
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+
+ page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
+ for (j = 0; j < page_num; j++) {
+ int mem_blks = (j * lst_per_page);
+ struct list_info_hold *fli;
+
+ if (!fifo->list_info)
+ return;
+
+ fli = &fifo->list_info[mem_blks];
+ if (!fli->list_virt_addr)
+ break;
+ pci_free_consistent(nic->pdev, PAGE_SIZE,
+ fli->list_virt_addr,
+ fli->list_phy_addr);
+ swstats->mem_freed += PAGE_SIZE;
+ }
+ /* If we got a zero DMA address during allocation,
+ * free the page now
+ */
+ if (mac_control->zerodma_virt_addr) {
+ pci_free_consistent(nic->pdev, PAGE_SIZE,
+ mac_control->zerodma_virt_addr,
+ (dma_addr_t)0);
+ DBG_PRINT(INIT_DBG,
+ "%s: Freeing TxDL with zero DMA address. "
+ "Virtual address %p\n",
+ dev->name, mac_control->zerodma_virt_addr);
+ swstats->mem_freed += PAGE_SIZE;
+ }
+ kfree(fifo->list_info);
+ swstats->mem_freed += tx_cfg->fifo_len *
+ sizeof(struct list_info_hold);
+ }
+
+ size = SIZE_OF_BLOCK;
+ for (i = 0; i < config->rx_ring_num; i++) {
+ struct ring_info *ring = &mac_control->rings[i];
+
+ blk_cnt = ring->block_count;
+ for (j = 0; j < blk_cnt; j++) {
+ tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
+ tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
+ if (tmp_v_addr == NULL)
+ break;
+ pci_free_consistent(nic->pdev, size,
+ tmp_v_addr, tmp_p_addr);
+ swstats->mem_freed += size;
+ kfree(ring->rx_blocks[j].rxds);
+ swstats->mem_freed += sizeof(struct rxd_info) *
+ rxd_count[nic->rxd_mode];
+ }
+ }
+
+ if (nic->rxd_mode == RXD_MODE_3B) {
+ /* Freeing buffer storage addresses in 2BUFF mode. */
+ for (i = 0; i < config->rx_ring_num; i++) {
+ struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
+ struct ring_info *ring = &mac_control->rings[i];
+
+ blk_cnt = rx_cfg->num_rxd /
+ (rxd_count[nic->rxd_mode] + 1);
+ for (j = 0; j < blk_cnt; j++) {
+ int k = 0;
+ if (!ring->ba[j])
+ continue;
+ while (k != rxd_count[nic->rxd_mode]) {
+ struct buffAdd *ba = &ring->ba[j][k];
+ kfree(ba->ba_0_org);
+ swstats->mem_freed +=
+ BUF0_LEN + ALIGN_SIZE;
+ kfree(ba->ba_1_org);
+ swstats->mem_freed +=
+ BUF1_LEN + ALIGN_SIZE;
+ k++;
+ }
+ kfree(ring->ba[j]);
+ swstats->mem_freed += sizeof(struct buffAdd) *
+ (rxd_count[nic->rxd_mode] + 1);
+ }
+ kfree(ring->ba);
+ swstats->mem_freed += sizeof(struct buffAdd *) *
+ blk_cnt;
+ }
+ }
+
+ for (i = 0; i < nic->config.tx_fifo_num; i++) {
+ struct fifo_info *fifo = &mac_control->fifos[i];
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+
+ if (fifo->ufo_in_band_v) {
+ swstats->mem_freed += tx_cfg->fifo_len *
+ sizeof(u64);
+ kfree(fifo->ufo_in_band_v);
+ }
+ }
+
+ if (mac_control->stats_mem) {
+ swstats->mem_freed += mac_control->stats_mem_sz;
+ pci_free_consistent(nic->pdev,
+ mac_control->stats_mem_sz,
+ mac_control->stats_mem,
+ mac_control->stats_mem_phy);
+ }
+}
+
+/**
+ * s2io_verify_pci_mode -
+ */
+
+static int s2io_verify_pci_mode(struct s2io_nic *nic)
+{
+ struct XENA_dev_config __iomem *bar0 = nic->bar0;
+ register u64 val64 = 0;
+ int mode;
+
+ val64 = readq(&bar0->pci_mode);
+ mode = (u8)GET_PCI_MODE(val64);
+
+ if (val64 & PCI_MODE_UNKNOWN_MODE)
+ return -1; /* Unknown PCI mode */
+ return mode;
+}
+
+#define NEC_VENID 0x1033
+#define NEC_DEVID 0x0125
+static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
+{
+ struct pci_dev *tdev = NULL;
+ while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
+ if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
+ if (tdev->bus == s2io_pdev->bus->parent) {
+ pci_dev_put(tdev);
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
+/**
+ * s2io_print_pci_mode -
+ */
+static int s2io_print_pci_mode(struct s2io_nic *nic)
+{
+ struct XENA_dev_config __iomem *bar0 = nic->bar0;
+ register u64 val64 = 0;
+ int mode;
+ struct config_param *config = &nic->config;
+ const char *pcimode;
+
+ val64 = readq(&bar0->pci_mode);
+ mode = (u8)GET_PCI_MODE(val64);
+
+ if (val64 & PCI_MODE_UNKNOWN_MODE)
+ return -1; /* Unknown PCI mode */
+
+ config->bus_speed = bus_speed[mode];
+
+ if (s2io_on_nec_bridge(nic->pdev)) {
+ DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
+ nic->dev->name);
+ return mode;
+ }
+
+ switch (mode) {
+ case PCI_MODE_PCI_33:
+ pcimode = "33MHz PCI bus";
+ break;
+ case PCI_MODE_PCI_66:
+ pcimode = "66MHz PCI bus";
+ break;
+ case PCI_MODE_PCIX_M1_66:
+ pcimode = "66MHz PCIX(M1) bus";
+ break;
+ case PCI_MODE_PCIX_M1_100:
+ pcimode = "100MHz PCIX(M1) bus";
+ break;
+ case PCI_MODE_PCIX_M1_133:
+ pcimode = "133MHz PCIX(M1) bus";
+ break;
+ case PCI_MODE_PCIX_M2_66:
+ pcimode = "133MHz PCIX(M2) bus";
+ break;
+ case PCI_MODE_PCIX_M2_100:
+ pcimode = "200MHz PCIX(M2) bus";
+ break;
+ case PCI_MODE_PCIX_M2_133:
+ pcimode = "266MHz PCIX(M2) bus";
+ break;
+ default:
+ pcimode = "unsupported bus!";
+ mode = -1;
+ }
+
+ DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
+ nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
+
+ return mode;
+}
+
+/**
+ * init_tti - Initialization transmit traffic interrupt scheme
+ * @nic: device private variable
+ * @link: link status (UP/DOWN) used to enable/disable continuous
+ * transmit interrupts
+ * Description: The function configures transmit traffic interrupts
+ * Return Value: SUCCESS on success and
+ * '-1' on failure
+ */
+
+static int init_tti(struct s2io_nic *nic, int link)
+{
+ struct XENA_dev_config __iomem *bar0 = nic->bar0;
+ register u64 val64 = 0;
+ int i;
+ struct config_param *config = &nic->config;
+
+ for (i = 0; i < config->tx_fifo_num; i++) {
+ /*
+ * TTI Initialization. Default Tx timer gets us about
+ * 250 interrupts per sec. Continuous interrupts are enabled
+ * by default.
+ */
+ if (nic->device_type == XFRAME_II_DEVICE) {
+ int count = (nic->config.bus_speed * 125)/2;
+ val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
+ } else
+ val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
+
+ val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
+ TTI_DATA1_MEM_TX_URNG_B(0x10) |
+ TTI_DATA1_MEM_TX_URNG_C(0x30) |
+ TTI_DATA1_MEM_TX_TIMER_AC_EN;
+ if (i == 0)
+ if (use_continuous_tx_intrs && (link == LINK_UP))
+ val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
+ writeq(val64, &bar0->tti_data1_mem);
+
+ if (nic->config.intr_type == MSI_X) {
+ val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
+ TTI_DATA2_MEM_TX_UFC_B(0x100) |
+ TTI_DATA2_MEM_TX_UFC_C(0x200) |
+ TTI_DATA2_MEM_TX_UFC_D(0x300);
+ } else {
+ if ((nic->config.tx_steering_type ==
+ TX_DEFAULT_STEERING) &&
+ (config->tx_fifo_num > 1) &&
+ (i >= nic->udp_fifo_idx) &&
+ (i < (nic->udp_fifo_idx +
+ nic->total_udp_fifos)))
+ val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
+ TTI_DATA2_MEM_TX_UFC_B(0x80) |
+ TTI_DATA2_MEM_TX_UFC_C(0x100) |
+ TTI_DATA2_MEM_TX_UFC_D(0x120);
+ else
+ val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
+ TTI_DATA2_MEM_TX_UFC_B(0x20) |
+ TTI_DATA2_MEM_TX_UFC_C(0x40) |
+ TTI_DATA2_MEM_TX_UFC_D(0x80);
+ }
+
+ writeq(val64, &bar0->tti_data2_mem);
+
+ val64 = TTI_CMD_MEM_WE |
+ TTI_CMD_MEM_STROBE_NEW_CMD |
+ TTI_CMD_MEM_OFFSET(i);
+ writeq(val64, &bar0->tti_command_mem);
+
+ if (wait_for_cmd_complete(&bar0->tti_command_mem,
+ TTI_CMD_MEM_STROBE_NEW_CMD,
+ S2IO_BIT_RESET) != SUCCESS)
+ return FAILURE;
+ }
+
+ return SUCCESS;
+}
+
+/**
+ * init_nic - Initialization of hardware
+ * @nic: device private variable
+ * Description: The function sequentially configures every block
+ * of the H/W from their reset values.
+ * Return Value: SUCCESS on success and
+ * '-1' on failure (endian settings incorrect).
+ */
+
+static int init_nic(struct s2io_nic *nic)
+{
+ struct XENA_dev_config __iomem *bar0 = nic->bar0;
+ struct net_device *dev = nic->dev;
+ register u64 val64 = 0;
+ void __iomem *add;
+ u32 time;
+ int i, j;
+ int dtx_cnt = 0;
+ unsigned long long mem_share;
+ int mem_size;
+ struct config_param *config = &nic->config;
+ struct mac_info *mac_control = &nic->mac_control;
+
+ /* to set the swapper controle on the card */
+ if (s2io_set_swapper(nic)) {
+ DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
+ return -EIO;
+ }
+
+ /*
+ * Herc requires EOI to be removed from reset before XGXS, so..
+ */
+ if (nic->device_type & XFRAME_II_DEVICE) {
+ val64 = 0xA500000000ULL;
+ writeq(val64, &bar0->sw_reset);
+ msleep(500);
+ val64 = readq(&bar0->sw_reset);
+ }
+
+ /* Remove XGXS from reset state */
+ val64 = 0;
+ writeq(val64, &bar0->sw_reset);
+ msleep(500);
+ val64 = readq(&bar0->sw_reset);
+
+ /* Ensure that it's safe to access registers by checking
+ * RIC_RUNNING bit is reset. Check is valid only for XframeII.
+ */
+ if (nic->device_type == XFRAME_II_DEVICE) {
+ for (i = 0; i < 50; i++) {
+ val64 = readq(&bar0->adapter_status);
+ if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
+ break;
+ msleep(10);
+ }
+ if (i == 50)
+ return -ENODEV;
+ }
+
+ /* Enable Receiving broadcasts */
+ add = &bar0->mac_cfg;
+ val64 = readq(&bar0->mac_cfg);
+ val64 |= MAC_RMAC_BCAST_ENABLE;
+ writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
+ writel((u32)val64, add);
+ writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
+ writel((u32) (val64 >> 32), (add + 4));
+
+ /* Read registers in all blocks */
+ val64 = readq(&bar0->mac_int_mask);
+ val64 = readq(&bar0->mc_int_mask);
+ val64 = readq(&bar0->xgxs_int_mask);
+
+ /* Set MTU */
+ val64 = dev->mtu;
+ writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
+
+ if (nic->device_type & XFRAME_II_DEVICE) {
+ while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
+ SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
+ &bar0->dtx_control, UF);
+ if (dtx_cnt & 0x1)
+ msleep(1); /* Necessary!! */
+ dtx_cnt++;
+ }
+ } else {
+ while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
+ SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
+ &bar0->dtx_control, UF);
+ val64 = readq(&bar0->dtx_control);
+ dtx_cnt++;
+ }
+ }
+
+ /* Tx DMA Initialization */
+ val64 = 0;
+ writeq(val64, &bar0->tx_fifo_partition_0);
+ writeq(val64, &bar0->tx_fifo_partition_1);
+ writeq(val64, &bar0->tx_fifo_partition_2);
+ writeq(val64, &bar0->tx_fifo_partition_3);
+
+ for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+
+ val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
+ vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
+
+ if (i == (config->tx_fifo_num - 1)) {
+ if (i % 2 == 0)
+ i++;
+ }
+
+ switch (i) {
+ case 1:
+ writeq(val64, &bar0->tx_fifo_partition_0);
+ val64 = 0;
+ j = 0;
+ break;
+ case 3:
+ writeq(val64, &bar0->tx_fifo_partition_1);
+ val64 = 0;
+ j = 0;
+ break;
+ case 5:
+ writeq(val64, &bar0->tx_fifo_partition_2);
+ val64 = 0;
+ j = 0;
+ break;
+ case 7:
+ writeq(val64, &bar0->tx_fifo_partition_3);
+ val64 = 0;
+ j = 0;
+ break;
+ default:
+ j++;
+ break;
+ }
+ }
+
+ /*
+ * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
+ * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
+ */
+ if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
+ writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
+
+ val64 = readq(&bar0->tx_fifo_partition_0);
+ DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
+ &bar0->tx_fifo_partition_0, (unsigned long long)val64);
+
+ /*
+ * Initialization of Tx_PA_CONFIG register to ignore packet
+ * integrity checking.
+ */
+ val64 = readq(&bar0->tx_pa_cfg);
+ val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
+ TX_PA_CFG_IGNORE_SNAP_OUI |
+ TX_PA_CFG_IGNORE_LLC_CTRL |
+ TX_PA_CFG_IGNORE_L2_ERR;
+ writeq(val64, &bar0->tx_pa_cfg);
+
+ /* Rx DMA intialization. */
+ val64 = 0;
+ for (i = 0; i < config->rx_ring_num; i++) {
+ struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
+
+ val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
+ }
+ writeq(val64, &bar0->rx_queue_priority);
+
+ /*
+ * Allocating equal share of memory to all the
+ * configured Rings.
+ */
+ val64 = 0;
+ if (nic->device_type & XFRAME_II_DEVICE)
+ mem_size = 32;
+ else
+ mem_size = 64;
+
+ for (i = 0; i < config->rx_ring_num; i++) {
+ switch (i) {
+ case 0:
+ mem_share = (mem_size / config->rx_ring_num +
+ mem_size % config->rx_ring_num);
+ val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
+ continue;
+ case 1:
+ mem_share = (mem_size / config->rx_ring_num);
+ val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
+ continue;
+ case 2:
+ mem_share = (mem_size / config->rx_ring_num);
+ val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
+ continue;
+ case 3:
+ mem_share = (mem_size / config->rx_ring_num);
+ val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
+ continue;
+ case 4:
+ mem_share = (mem_size / config->rx_ring_num);
+ val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
+ continue;
+ case 5:
+ mem_share = (mem_size / config->rx_ring_num);
+ val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
+ continue;
+ case 6:
+ mem_share = (mem_size / config->rx_ring_num);
+ val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
+ continue;
+ case 7:
+ mem_share = (mem_size / config->rx_ring_num);
+ val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
+ continue;
+ }
+ }
+ writeq(val64, &bar0->rx_queue_cfg);
+
+ /*
+ * Filling Tx round robin registers
+ * as per the number of FIFOs for equal scheduling priority
+ */
+ switch (config->tx_fifo_num) {
+ case 1:
+ val64 = 0x0;
+ writeq(val64, &bar0->tx_w_round_robin_0);
+ writeq(val64, &bar0->tx_w_round_robin_1);
+ writeq(val64, &bar0->tx_w_round_robin_2);
+ writeq(val64, &bar0->tx_w_round_robin_3);
+ writeq(val64, &bar0->tx_w_round_robin_4);
+ break;
+ case 2:
+ val64 = 0x0001000100010001ULL;
+ writeq(val64, &bar0->tx_w_round_robin_0);
+ writeq(val64, &bar0->tx_w_round_robin_1);
+ writeq(val64, &bar0->tx_w_round_robin_2);
+ writeq(val64, &bar0->tx_w_round_robin_3);
+ val64 = 0x0001000100000000ULL;
+ writeq(val64, &bar0->tx_w_round_robin_4);
+ break;
+ case 3:
+ val64 = 0x0001020001020001ULL;
+ writeq(val64, &bar0->tx_w_round_robin_0);
+ val64 = 0x0200010200010200ULL;
+ writeq(val64, &bar0->tx_w_round_robin_1);
+ val64 = 0x0102000102000102ULL;
+ writeq(val64, &bar0->tx_w_round_robin_2);
+ val64 = 0x0001020001020001ULL;
+ writeq(val64, &bar0->tx_w_round_robin_3);
+ val64 = 0x0200010200000000ULL;
+ writeq(val64, &bar0->tx_w_round_robin_4);
+ break;
+ case 4:
+ val64 = 0x0001020300010203ULL;
+ writeq(val64, &bar0->tx_w_round_robin_0);
+ writeq(val64, &bar0->tx_w_round_robin_1);
+ writeq(val64, &bar0->tx_w_round_robin_2);
+ writeq(val64, &bar0->tx_w_round_robin_3);
+ val64 = 0x0001020300000000ULL;
+ writeq(val64, &bar0->tx_w_round_robin_4);
+ break;
+ case 5:
+ val64 = 0x0001020304000102ULL;
+ writeq(val64, &bar0->tx_w_round_robin_0);
+ val64 = 0x0304000102030400ULL;
+ writeq(val64, &bar0->tx_w_round_robin_1);
+ val64 = 0x0102030400010203ULL;
+ writeq(val64, &bar0->tx_w_round_robin_2);
+ val64 = 0x0400010203040001ULL;
+ writeq(val64, &bar0->tx_w_round_robin_3);
+ val64 = 0x0203040000000000ULL;
+ writeq(val64, &bar0->tx_w_round_robin_4);
+ break;
+ case 6:
+ val64 = 0x0001020304050001ULL;
+ writeq(val64, &bar0->tx_w_round_robin_0);
+ val64 = 0x0203040500010203ULL;
+ writeq(val64, &bar0->tx_w_round_robin_1);
+ val64 = 0x0405000102030405ULL;
+ writeq(val64, &bar0->tx_w_round_robin_2);
+ val64 = 0x0001020304050001ULL;
+ writeq(val64, &bar0->tx_w_round_robin_3);
+ val64 = 0x0203040500000000ULL;
+ writeq(val64, &bar0->tx_w_round_robin_4);
+ break;
+ case 7:
+ val64 = 0x0001020304050600ULL;
+ writeq(val64, &bar0->tx_w_round_robin_0);
+ val64 = 0x0102030405060001ULL;
+ writeq(val64, &bar0->tx_w_round_robin_1);
+ val64 = 0x0203040506000102ULL;
+ writeq(val64, &bar0->tx_w_round_robin_2);
+ val64 = 0x0304050600010203ULL;
+ writeq(val64, &bar0->tx_w_round_robin_3);
+ val64 = 0x0405060000000000ULL;
+ writeq(val64, &bar0->tx_w_round_robin_4);
+ break;
+ case 8:
+ val64 = 0x0001020304050607ULL;
+ writeq(val64, &bar0->tx_w_round_robin_0);
+ writeq(val64, &bar0->tx_w_round_robin_1);
+ writeq(val64, &bar0->tx_w_round_robin_2);
+ writeq(val64, &bar0->tx_w_round_robin_3);
+ val64 = 0x0001020300000000ULL;
+ writeq(val64, &bar0->tx_w_round_robin_4);
+ break;
+ }
+
+ /* Enable all configured Tx FIFO partitions */
+ val64 = readq(&bar0->tx_fifo_partition_0);
+ val64 |= (TX_FIFO_PARTITION_EN);
+ writeq(val64, &bar0->tx_fifo_partition_0);
+
+ /* Filling the Rx round robin registers as per the
+ * number of Rings and steering based on QoS with
+ * equal priority.
+ */
+ switch (config->rx_ring_num) {
+ case 1:
+ val64 = 0x0;
+ writeq(val64, &bar0->rx_w_round_robin_0);
+ writeq(val64, &bar0->rx_w_round_robin_1);
+ writeq(val64, &bar0->rx_w_round_robin_2);
+ writeq(val64, &bar0->rx_w_round_robin_3);
+ writeq(val64, &bar0->rx_w_round_robin_4);
+
+ val64 = 0x8080808080808080ULL;
+ writeq(val64, &bar0->rts_qos_steering);
+ break;
+ case 2:
+ val64 = 0x0001000100010001ULL;
+ writeq(val64, &bar0->rx_w_round_robin_0);
+ writeq(val64, &bar0->rx_w_round_robin_1);
+ writeq(val64, &bar0->rx_w_round_robin_2);
+ writeq(val64, &bar0->rx_w_round_robin_3);
+ val64 = 0x0001000100000000ULL;
+ writeq(val64, &bar0->rx_w_round_robin_4);
+
+ val64 = 0x8080808040404040ULL;
+ writeq(val64, &bar0->rts_qos_steering);
+ break;
+ case 3:
+ val64 = 0x0001020001020001ULL;
+ writeq(val64, &bar0->rx_w_round_robin_0);
+ val64 = 0x0200010200010200ULL;
+ writeq(val64, &bar0->rx_w_round_robin_1);
+ val64 = 0x0102000102000102ULL;
+ writeq(val64, &bar0->rx_w_round_robin_2);
+ val64 = 0x0001020001020001ULL;
+ writeq(val64, &bar0->rx_w_round_robin_3);
+ val64 = 0x0200010200000000ULL;
+ writeq(val64, &bar0->rx_w_round_robin_4);
+
+ val64 = 0x8080804040402020ULL;
+ writeq(val64, &bar0->rts_qos_steering);
+ break;
+ case 4:
+ val64 = 0x0001020300010203ULL;
+ writeq(val64, &bar0->rx_w_round_robin_0);
+ writeq(val64, &bar0->rx_w_round_robin_1);
+ writeq(val64, &bar0->rx_w_round_robin_2);
+ writeq(val64, &bar0->rx_w_round_robin_3);
+ val64 = 0x0001020300000000ULL;
+ writeq(val64, &bar0->rx_w_round_robin_4);
+
+ val64 = 0x8080404020201010ULL;
+ writeq(val64, &bar0->rts_qos_steering);
+ break;
+ case 5:
+ val64 = 0x0001020304000102ULL;
+ writeq(val64, &bar0->rx_w_round_robin_0);
+ val64 = 0x0304000102030400ULL;
+ writeq(val64, &bar0->rx_w_round_robin_1);
+ val64 = 0x0102030400010203ULL;
+ writeq(val64, &bar0->rx_w_round_robin_2);
+ val64 = 0x0400010203040001ULL;
+ writeq(val64, &bar0->rx_w_round_robin_3);
+ val64 = 0x0203040000000000ULL;
+ writeq(val64, &bar0->rx_w_round_robin_4);
+
+ val64 = 0x8080404020201008ULL;
+ writeq(val64, &bar0->rts_qos_steering);
+ break;
+ case 6:
+ val64 = 0x0001020304050001ULL;
+ writeq(val64, &bar0->rx_w_round_robin_0);
+ val64 = 0x0203040500010203ULL;
+ writeq(val64, &bar0->rx_w_round_robin_1);
+ val64 = 0x0405000102030405ULL;
+ writeq(val64, &bar0->rx_w_round_robin_2);
+ val64 = 0x0001020304050001ULL;
+ writeq(val64, &bar0->rx_w_round_robin_3);
+ val64 = 0x0203040500000000ULL;
+ writeq(val64, &bar0->rx_w_round_robin_4);
+
+ val64 = 0x8080404020100804ULL;
+ writeq(val64, &bar0->rts_qos_steering);
+ break;
+ case 7:
+ val64 = 0x0001020304050600ULL;
+ writeq(val64, &bar0->rx_w_round_robin_0);
+ val64 = 0x0102030405060001ULL;
+ writeq(val64, &bar0->rx_w_round_robin_1);
+ val64 = 0x0203040506000102ULL;
+ writeq(val64, &bar0->rx_w_round_robin_2);
+ val64 = 0x0304050600010203ULL;
+ writeq(val64, &bar0->rx_w_round_robin_3);
+ val64 = 0x0405060000000000ULL;
+ writeq(val64, &bar0->rx_w_round_robin_4);
+
+ val64 = 0x8080402010080402ULL;
+ writeq(val64, &bar0->rts_qos_steering);
+ break;
+ case 8:
+ val64 = 0x0001020304050607ULL;
+ writeq(val64, &bar0->rx_w_round_robin_0);
+ writeq(val64, &bar0->rx_w_round_robin_1);
+ writeq(val64, &bar0->rx_w_round_robin_2);
+ writeq(val64, &bar0->rx_w_round_robin_3);
+ val64 = 0x0001020300000000ULL;
+ writeq(val64, &bar0->rx_w_round_robin_4);
+
+ val64 = 0x8040201008040201ULL;
+ writeq(val64, &bar0->rts_qos_steering);
+ break;
+ }
+
+ /* UDP Fix */
+ val64 = 0;
+ for (i = 0; i < 8; i++)
+ writeq(val64, &bar0->rts_frm_len_n[i]);
+
+ /* Set the default rts frame length for the rings configured */
+ val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
+ for (i = 0 ; i < config->rx_ring_num ; i++)
+ writeq(val64, &bar0->rts_frm_len_n[i]);
+
+ /* Set the frame length for the configured rings
+ * desired by the user
+ */
+ for (i = 0; i < config->rx_ring_num; i++) {
+ /* If rts_frm_len[i] == 0 then it is assumed that user not
+ * specified frame length steering.
+ * If the user provides the frame length then program
+ * the rts_frm_len register for those values or else
+ * leave it as it is.
+ */
+ if (rts_frm_len[i] != 0) {
+ writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
+ &bar0->rts_frm_len_n[i]);
+ }
+ }
+
+ /* Disable differentiated services steering logic */
+ for (i = 0; i < 64; i++) {
+ if (rts_ds_steer(nic, i, 0) == FAILURE) {
+ DBG_PRINT(ERR_DBG,
+ "%s: rts_ds_steer failed on codepoint %d\n",
+ dev->name, i);
+ return -ENODEV;
+ }
+ }
+
+ /* Program statistics memory */
+ writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
+
+ if (nic->device_type == XFRAME_II_DEVICE) {
+ val64 = STAT_BC(0x320);
+ writeq(val64, &bar0->stat_byte_cnt);
+ }
+
+ /*
+ * Initializing the sampling rate for the device to calculate the
+ * bandwidth utilization.
+ */
+ val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
+ MAC_RX_LINK_UTIL_VAL(rmac_util_period);
+ writeq(val64, &bar0->mac_link_util);
+
+ /*
+ * Initializing the Transmit and Receive Traffic Interrupt
+ * Scheme.
+ */
+
+ /* Initialize TTI */
+ if (SUCCESS != init_tti(nic, nic->last_link_state))
+ return -ENODEV;
+
+ /* RTI Initialization */
+ if (nic->device_type == XFRAME_II_DEVICE) {
+ /*
+ * Programmed to generate Apprx 500 Intrs per
+ * second
+ */
+ int count = (nic->config.bus_speed * 125)/4;
+ val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
+ } else
+ val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
+ val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
+ RTI_DATA1_MEM_RX_URNG_B(0x10) |
+ RTI_DATA1_MEM_RX_URNG_C(0x30) |
+ RTI_DATA1_MEM_RX_TIMER_AC_EN;
+
+ writeq(val64, &bar0->rti_data1_mem);
+
+ val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
+ RTI_DATA2_MEM_RX_UFC_B(0x2) ;
+ if (nic->config.intr_type == MSI_X)
+ val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
+ RTI_DATA2_MEM_RX_UFC_D(0x40));
+ else
+ val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
+ RTI_DATA2_MEM_RX_UFC_D(0x80));
+ writeq(val64, &bar0->rti_data2_mem);
+
+ for (i = 0; i < config->rx_ring_num; i++) {
+ val64 = RTI_CMD_MEM_WE |
+ RTI_CMD_MEM_STROBE_NEW_CMD |
+ RTI_CMD_MEM_OFFSET(i);
+ writeq(val64, &bar0->rti_command_mem);
+
+ /*
+ * Once the operation completes, the Strobe bit of the
+ * command register will be reset. We poll for this
+ * particular condition. We wait for a maximum of 500ms
+ * for the operation to complete, if it's not complete
+ * by then we return error.
+ */
+ time = 0;
+ while (true) {
+ val64 = readq(&bar0->rti_command_mem);
+ if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
+ break;
+
+ if (time > 10) {
+ DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
+ dev->name);
+ return -ENODEV;
+ }
+ time++;
+ msleep(50);
+ }
+ }
+
+ /*
+ * Initializing proper values as Pause threshold into all
+ * the 8 Queues on Rx side.
+ */
+ writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
+ writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
+
+ /* Disable RMAC PAD STRIPPING */
+ add = &bar0->mac_cfg;
+ val64 = readq(&bar0->mac_cfg);
+ val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
+ writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
+ writel((u32) (val64), add);
+ writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
+ writel((u32) (val64 >> 32), (add + 4));
+ val64 = readq(&bar0->mac_cfg);
+
+ /* Enable FCS stripping by adapter */
+ add = &bar0->mac_cfg;
+ val64 = readq(&bar0->mac_cfg);
+ val64 |= MAC_CFG_RMAC_STRIP_FCS;
+ if (nic->device_type == XFRAME_II_DEVICE)
+ writeq(val64, &bar0->mac_cfg);
+ else {
+ writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
+ writel((u32) (val64), add);
+ writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
+ writel((u32) (val64 >> 32), (add + 4));
+ }
+
+ /*
+ * Set the time value to be inserted in the pause frame
+ * generated by xena.
+ */
+ val64 = readq(&bar0->rmac_pause_cfg);
+ val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
+ val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
+ writeq(val64, &bar0->rmac_pause_cfg);
+
+ /*
+ * Set the Threshold Limit for Generating the pause frame
+ * If the amount of data in any Queue exceeds ratio of
+ * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
+ * pause frame is generated
+ */
+ val64 = 0;
+ for (i = 0; i < 4; i++) {
+ val64 |= (((u64)0xFF00 |
+ nic->mac_control.mc_pause_threshold_q0q3)
+ << (i * 2 * 8));
+ }
+ writeq(val64, &bar0->mc_pause_thresh_q0q3);
+
+ val64 = 0;
+ for (i = 0; i < 4; i++) {
+ val64 |= (((u64)0xFF00 |
+ nic->mac_control.mc_pause_threshold_q4q7)
+ << (i * 2 * 8));
+ }
+ writeq(val64, &bar0->mc_pause_thresh_q4q7);
+
+ /*
+ * TxDMA will stop Read request if the number of read split has
+ * exceeded the limit pointed by shared_splits
+ */
+ val64 = readq(&bar0->pic_control);
+ val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
+ writeq(val64, &bar0->pic_control);
+
+ if (nic->config.bus_speed == 266) {
+ writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
+ writeq(0x0, &bar0->read_retry_delay);
+ writeq(0x0, &bar0->write_retry_delay);
+ }
+
+ /*
+ * Programming the Herc to split every write transaction
+ * that does not start on an ADB to reduce disconnects.
+ */
+ if (nic->device_type == XFRAME_II_DEVICE) {
+ val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
+ MISC_LINK_STABILITY_PRD(3);
+ writeq(val64, &bar0->misc_control);
+ val64 = readq(&bar0->pic_control2);
+ val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
+ writeq(val64, &bar0->pic_control2);
+ }
+ if (strstr(nic->product_name, "CX4")) {
+ val64 = TMAC_AVG_IPG(0x17);
+ writeq(val64, &bar0->tmac_avg_ipg);
+ }
+
+ return SUCCESS;
+}
+#define LINK_UP_DOWN_INTERRUPT 1
+#define MAC_RMAC_ERR_TIMER 2
+
+static int s2io_link_fault_indication(struct s2io_nic *nic)
+{
+ if (nic->device_type == XFRAME_II_DEVICE)
+ return LINK_UP_DOWN_INTERRUPT;
+ else
+ return MAC_RMAC_ERR_TIMER;
+}
+
+/**
+ * do_s2io_write_bits - update alarm bits in alarm register
+ * @value: alarm bits
+ * @flag: interrupt status
+ * @addr: address value
+ * Description: update alarm bits in alarm register
+ * Return Value:
+ * NONE.
+ */
+static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
+{
+ u64 temp64;
+
+ temp64 = readq(addr);
+
+ if (flag == ENABLE_INTRS)
+ temp64 &= ~((u64)value);
+ else
+ temp64 |= ((u64)value);
+ writeq(temp64, addr);
+}
+
+static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
+{
+ struct XENA_dev_config __iomem *bar0 = nic->bar0;
+ register u64 gen_int_mask = 0;
+ u64 interruptible;
+
+ writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
+ if (mask & TX_DMA_INTR) {
+ gen_int_mask |= TXDMA_INT_M;
+
+ do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
+ TXDMA_PCC_INT | TXDMA_TTI_INT |
+ TXDMA_LSO_INT | TXDMA_TPA_INT |
+ TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
+
+ do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
+ PFC_MISC_0_ERR | PFC_MISC_1_ERR |
+ PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
+ &bar0->pfc_err_mask);
+
+ do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
+ TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
+ TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
+
+ do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
+ PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
+ PCC_N_SERR | PCC_6_COF_OV_ERR |
+ PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
+ PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
+ PCC_TXB_ECC_SG_ERR,
+ flag, &bar0->pcc_err_mask);
+
+ do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
+ TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
+
+ do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
+ LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
+ LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
+ flag, &bar0->lso_err_mask);
+
+ do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
+ flag, &bar0->tpa_err_mask);
+
+ do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
+ }
+
+ if (mask & TX_MAC_INTR) {
+ gen_int_mask |= TXMAC_INT_M;
+ do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
+ &bar0->mac_int_mask);
+ do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
+ TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
+ TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
+ flag, &bar0->mac_tmac_err_mask);
+ }
+
+ if (mask & TX_XGXS_INTR) {
+ gen_int_mask |= TXXGXS_INT_M;
+ do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
+ &bar0->xgxs_int_mask);
+ do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
+ TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
+ flag, &bar0->xgxs_txgxs_err_mask);
+ }
+
+ if (mask & RX_DMA_INTR) {
+ gen_int_mask |= RXDMA_INT_M;
+ do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
+ RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
+ flag, &bar0->rxdma_int_mask);
+ do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
+ RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
+ RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
+ RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
+ do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
+ PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
+ PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
+ &bar0->prc_pcix_err_mask);
+ do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
+ RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
+ &bar0->rpa_err_mask);
+ do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
+ RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
+ RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
+ RDA_FRM_ECC_SG_ERR |
+ RDA_MISC_ERR|RDA_PCIX_ERR,
+ flag, &bar0->rda_err_mask);
+ do_s2io_write_bits(RTI_SM_ERR_ALARM |
+ RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
+ flag, &bar0->rti_err_mask);
+ }
+
+ if (mask & RX_MAC_INTR) {
+ gen_int_mask |= RXMAC_INT_M;
+ do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
+ &bar0->mac_int_mask);
+ interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
+ RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
+ RMAC_DOUBLE_ECC_ERR);
+ if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
+ interruptible |= RMAC_LINK_STATE_CHANGE_INT;
+ do_s2io_write_bits(interruptible,
+ flag, &bar0->mac_rmac_err_mask);
+ }
+
+ if (mask & RX_XGXS_INTR) {
+ gen_int_mask |= RXXGXS_INT_M;
+ do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
+ &bar0->xgxs_int_mask);
+ do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
+ &bar0->xgxs_rxgxs_err_mask);
+ }
+
+ if (mask & MC_INTR) {
+ gen_int_mask |= MC_INT_M;
+ do_s2io_write_bits(MC_INT_MASK_MC_INT,
+ flag, &bar0->mc_int_mask);
+ do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
+ MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
+ &bar0->mc_err_mask);
+ }
+ nic->general_int_mask = gen_int_mask;
+
+ /* Remove this line when alarm interrupts are enabled */
+ nic->general_int_mask = 0;
+}
+
+/**
+ * en_dis_able_nic_intrs - Enable or Disable the interrupts
+ * @nic: device private variable,
+ * @mask: A mask indicating which Intr block must be modified and,
+ * @flag: A flag indicating whether to enable or disable the Intrs.
+ * Description: This function will either disable or enable the interrupts
+ * depending on the flag argument. The mask argument can be used to
+ * enable/disable any Intr block.
+ * Return Value: NONE.
+ */
+
+static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
+{
+ struct XENA_dev_config __iomem *bar0 = nic->bar0;
+ register u64 temp64 = 0, intr_mask = 0;
+
+ intr_mask = nic->general_int_mask;
+
+ /* Top level interrupt classification */
+ /* PIC Interrupts */
+ if (mask & TX_PIC_INTR) {
+ /* Enable PIC Intrs in the general intr mask register */
+ intr_mask |= TXPIC_INT_M;
+ if (flag == ENABLE_INTRS) {
+ /*
+ * If Hercules adapter enable GPIO otherwise
+ * disable all PCIX, Flash, MDIO, IIC and GPIO
+ * interrupts for now.
+ * TODO
+ */
+ if (s2io_link_fault_indication(nic) ==
+ LINK_UP_DOWN_INTERRUPT) {
+ do_s2io_write_bits(PIC_INT_GPIO, flag,
+ &bar0->pic_int_mask);
+ do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
+ &bar0->gpio_int_mask);
+ } else
+ writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
+ } else if (flag == DISABLE_INTRS) {
+ /*
+ * Disable PIC Intrs in the general
+ * intr mask register
+ */
+ writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
+ }
+ }
+
+ /* Tx traffic interrupts */
+ if (mask & TX_TRAFFIC_INTR) {
+ intr_mask |= TXTRAFFIC_INT_M;
+ if (flag == ENABLE_INTRS) {
+ /*
+ * Enable all the Tx side interrupts
+ * writing 0 Enables all 64 TX interrupt levels
+ */
+ writeq(0x0, &bar0->tx_traffic_mask);
+ } else if (flag == DISABLE_INTRS) {
+ /*
+ * Disable Tx Traffic Intrs in the general intr mask
+ * register.
+ */
+ writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
+ }
+ }
+
+ /* Rx traffic interrupts */
+ if (mask & RX_TRAFFIC_INTR) {
+ intr_mask |= RXTRAFFIC_INT_M;
+ if (flag == ENABLE_INTRS) {
+ /* writing 0 Enables all 8 RX interrupt levels */
+ writeq(0x0, &bar0->rx_traffic_mask);
+ } else if (flag == DISABLE_INTRS) {
+ /*
+ * Disable Rx Traffic Intrs in the general intr mask
+ * register.
+ */
+ writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
+ }
+ }
+
+ temp64 = readq(&bar0->general_int_mask);
+ if (flag == ENABLE_INTRS)
+ temp64 &= ~((u64)intr_mask);
+ else
+ temp64 = DISABLE_ALL_INTRS;
+ writeq(temp64, &bar0->general_int_mask);
+
+ nic->general_int_mask = readq(&bar0->general_int_mask);
+}
+
+/**
+ * verify_pcc_quiescent- Checks for PCC quiescent state
+ * Return: 1 If PCC is quiescence
+ * 0 If PCC is not quiescence
+ */
+static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
+{
+ int ret = 0, herc;
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ u64 val64 = readq(&bar0->adapter_status);
+
+ herc = (sp->device_type == XFRAME_II_DEVICE);
+
+ if (flag == false) {
+ if ((!herc && (sp->pdev->revision >= 4)) || herc) {
+ if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
+ ret = 1;
+ } else {
+ if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
+ ret = 1;
+ }
+ } else {
+ if ((!herc && (sp->pdev->revision >= 4)) || herc) {
+ if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
+ ADAPTER_STATUS_RMAC_PCC_IDLE))
+ ret = 1;
+ } else {
+ if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
+ ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
+ ret = 1;
+ }
+ }
+
+ return ret;
+}
+/**
+ * verify_xena_quiescence - Checks whether the H/W is ready
+ * Description: Returns whether the H/W is ready to go or not. Depending
+ * on whether adapter enable bit was written or not the comparison
+ * differs and the calling function passes the input argument flag to
+ * indicate this.
+ * Return: 1 If xena is quiescence
+ * 0 If Xena is not quiescence
+ */
+
+static int verify_xena_quiescence(struct s2io_nic *sp)
+{
+ int mode;
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ u64 val64 = readq(&bar0->adapter_status);
+ mode = s2io_verify_pci_mode(sp);
+
+ if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
+ DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
+ return 0;
+ }
+ if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
+ DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
+ return 0;
+ }
+ if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
+ DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
+ return 0;
+ }
+ if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
+ DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
+ return 0;
+ }
+ if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
+ DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
+ return 0;
+ }
+ if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
+ DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
+ return 0;
+ }
+ if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
+ DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
+ return 0;
+ }
+ if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
+ DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
+ return 0;
+ }
+
+ /*
+ * In PCI 33 mode, the P_PLL is not used, and therefore,
+ * the the P_PLL_LOCK bit in the adapter_status register will
+ * not be asserted.
+ */
+ if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
+ sp->device_type == XFRAME_II_DEVICE &&
+ mode != PCI_MODE_PCI_33) {
+ DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
+ return 0;
+ }
+ if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
+ ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
+ DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * fix_mac_address - Fix for Mac addr problem on Alpha platforms
+ * @sp: Pointer to device specifc structure
+ * Description :
+ * New procedure to clear mac address reading problems on Alpha platforms
+ *
+ */
+
+static void fix_mac_address(struct s2io_nic *sp)
+{
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ int i = 0;
+
+ while (fix_mac[i] != END_SIGN) {
+ writeq(fix_mac[i++], &bar0->gpio_control);
+ udelay(10);
+ (void) readq(&bar0->gpio_control);
+ }
+}
+
+/**
+ * start_nic - Turns the device on
+ * @nic : device private variable.
+ * Description:
+ * This function actually turns the device on. Before this function is
+ * called,all Registers are configured from their reset states
+ * and shared memory is allocated but the NIC is still quiescent. On
+ * calling this function, the device interrupts are cleared and the NIC is
+ * literally switched on by writing into the adapter control register.
+ * Return Value:
+ * SUCCESS on success and -1 on failure.
+ */
+
+static int start_nic(struct s2io_nic *nic)
+{
+ struct XENA_dev_config __iomem *bar0 = nic->bar0;
+ struct net_device *dev = nic->dev;
+ register u64 val64 = 0;
+ u16 subid, i;
+ struct config_param *config = &nic->config;
+ struct mac_info *mac_control = &nic->mac_control;
+
+ /* PRC Initialization and configuration */
+ for (i = 0; i < config->rx_ring_num; i++) {
+ struct ring_info *ring = &mac_control->rings[i];
+
+ writeq((u64)ring->rx_blocks[0].block_dma_addr,
+ &bar0->prc_rxd0_n[i]);
+
+ val64 = readq(&bar0->prc_ctrl_n[i]);
+ if (nic->rxd_mode == RXD_MODE_1)
+ val64 |= PRC_CTRL_RC_ENABLED;
+ else
+ val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
+ if (nic->device_type == XFRAME_II_DEVICE)
+ val64 |= PRC_CTRL_GROUP_READS;
+ val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
+ val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
+ writeq(val64, &bar0->prc_ctrl_n[i]);
+ }
+
+ if (nic->rxd_mode == RXD_MODE_3B) {
+ /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
+ val64 = readq(&bar0->rx_pa_cfg);
+ val64 |= RX_PA_CFG_IGNORE_L2_ERR;
+ writeq(val64, &bar0->rx_pa_cfg);
+ }
+
+ if (vlan_tag_strip == 0) {
+ val64 = readq(&bar0->rx_pa_cfg);
+ val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
+ writeq(val64, &bar0->rx_pa_cfg);
+ nic->vlan_strip_flag = 0;
+ }
+
+ /*
+ * Enabling MC-RLDRAM. After enabling the device, we timeout
+ * for around 100ms, which is approximately the time required
+ * for the device to be ready for operation.
+ */
+ val64 = readq(&bar0->mc_rldram_mrs);
+ val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
+ SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
+ val64 = readq(&bar0->mc_rldram_mrs);
+
+ msleep(100); /* Delay by around 100 ms. */
+
+ /* Enabling ECC Protection. */
+ val64 = readq(&bar0->adapter_control);
+ val64 &= ~ADAPTER_ECC_EN;
+ writeq(val64, &bar0->adapter_control);
+
+ /*
+ * Verify if the device is ready to be enabled, if so enable
+ * it.
+ */
+ val64 = readq(&bar0->adapter_status);
+ if (!verify_xena_quiescence(nic)) {
+ DBG_PRINT(ERR_DBG, "%s: device is not ready, "
+ "Adapter status reads: 0x%llx\n",
+ dev->name, (unsigned long long)val64);
+ return FAILURE;
+ }
+
+ /*
+ * With some switches, link might be already up at this point.
+ * Because of this weird behavior, when we enable laser,
+ * we may not get link. We need to handle this. We cannot
+ * figure out which switch is misbehaving. So we are forced to
+ * make a global change.
+ */
+
+ /* Enabling Laser. */
+ val64 = readq(&bar0->adapter_control);
+ val64 |= ADAPTER_EOI_TX_ON;
+ writeq(val64, &bar0->adapter_control);
+
+ if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
+ /*
+ * Dont see link state interrupts initially on some switches,
+ * so directly scheduling the link state task here.
+ */
+ schedule_work(&nic->set_link_task);
+ }
+ /* SXE-002: Initialize link and activity LED */
+ subid = nic->pdev->subsystem_device;
+ if (((subid & 0xFF) >= 0x07) &&
+ (nic->device_type == XFRAME_I_DEVICE)) {
+ val64 = readq(&bar0->gpio_control);
+ val64 |= 0x0000800000000000ULL;
+ writeq(val64, &bar0->gpio_control);
+ val64 = 0x0411040400000000ULL;
+ writeq(val64, (void __iomem *)bar0 + 0x2700);
+ }
+
+ return SUCCESS;
+}
+/**
+ * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
+ */
+static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
+ struct TxD *txdlp, int get_off)
+{
+ struct s2io_nic *nic = fifo_data->nic;
+ struct sk_buff *skb;
+ struct TxD *txds;
+ u16 j, frg_cnt;
+
+ txds = txdlp;
+ if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
+ pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
+ sizeof(u64), PCI_DMA_TODEVICE);
+ txds++;
+ }
+
+ skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
+ if (!skb) {
+ memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
+ return NULL;
+ }
+ pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ frg_cnt = skb_shinfo(skb)->nr_frags;
+ if (frg_cnt) {
+ txds++;
+ for (j = 0; j < frg_cnt; j++, txds++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
+ if (!txds->Buffer_Pointer)
+ break;
+ pci_unmap_page(nic->pdev,
+ (dma_addr_t)txds->Buffer_Pointer,
+ frag->size, PCI_DMA_TODEVICE);
+ }
+ }
+ memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
+ return skb;
+}
+
+/**
+ * free_tx_buffers - Free all queued Tx buffers
+ * @nic : device private variable.
+ * Description:
+ * Free all queued Tx buffers.
+ * Return Value: void
+ */
+
+static void free_tx_buffers(struct s2io_nic *nic)
+{
+ struct net_device *dev = nic->dev;
+ struct sk_buff *skb;
+ struct TxD *txdp;
+ int i, j;
+ int cnt = 0;
+ struct config_param *config = &nic->config;
+ struct mac_info *mac_control = &nic->mac_control;
+ struct stat_block *stats = mac_control->stats_info;
+ struct swStat *swstats = &stats->sw_stat;
+
+ for (i = 0; i < config->tx_fifo_num; i++) {
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+ struct fifo_info *fifo = &mac_control->fifos[i];
+ unsigned long flags;
+
+ spin_lock_irqsave(&fifo->tx_lock, flags);
+ for (j = 0; j < tx_cfg->fifo_len; j++) {
+ txdp = fifo->list_info[j].list_virt_addr;
+ skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
+ if (skb) {
+ swstats->mem_freed += skb->truesize;
+ dev_kfree_skb(skb);
+ cnt++;
+ }
+ }
+ DBG_PRINT(INTR_DBG,
+ "%s: forcibly freeing %d skbs on FIFO%d\n",
+ dev->name, cnt, i);
+ fifo->tx_curr_get_info.offset = 0;
+ fifo->tx_curr_put_info.offset = 0;
+ spin_unlock_irqrestore(&fifo->tx_lock, flags);
+ }
+}
+
+/**
+ * stop_nic - To stop the nic
+ * @nic ; device private variable.
+ * Description:
+ * This function does exactly the opposite of what the start_nic()
+ * function does. This function is called to stop the device.
+ * Return Value:
+ * void.
+ */
+
+static void stop_nic(struct s2io_nic *nic)
+{
+ struct XENA_dev_config __iomem *bar0 = nic->bar0;
+ register u64 val64 = 0;
+ u16 interruptible;
+
+ /* Disable all interrupts */
+ en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
+ interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
+ interruptible |= TX_PIC_INTR;
+ en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
+
+ /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
+ val64 = readq(&bar0->adapter_control);
+ val64 &= ~(ADAPTER_CNTL_EN);
+ writeq(val64, &bar0->adapter_control);
+}
+
+/**
+ * fill_rx_buffers - Allocates the Rx side skbs
+ * @ring_info: per ring structure
+ * @from_card_up: If this is true, we will map the buffer to get
+ * the dma address for buf0 and buf1 to give it to the card.
+ * Else we will sync the already mapped buffer to give it to the card.
+ * Description:
+ * The function allocates Rx side skbs and puts the physical
+ * address of these buffers into the RxD buffer pointers, so that the NIC
+ * can DMA the received frame into these locations.
+ * The NIC supports 3 receive modes, viz
+ * 1. single buffer,
+ * 2. three buffer and
+ * 3. Five buffer modes.
+ * Each mode defines how many fragments the received frame will be split
+ * up into by the NIC. The frame is split into L3 header, L4 Header,
+ * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
+ * is split into 3 fragments. As of now only single buffer mode is
+ * supported.
+ * Return Value:
+ * SUCCESS on success or an appropriate -ve value on failure.
+ */
+static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
+ int from_card_up)
+{
+ struct sk_buff *skb;
+ struct RxD_t *rxdp;
+ int off, size, block_no, block_no1;
+ u32 alloc_tab = 0;
+ u32 alloc_cnt;
+ u64 tmp;
+ struct buffAdd *ba;
+ struct RxD_t *first_rxdp = NULL;
+ u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
+ int rxd_index = 0;
+ struct RxD1 *rxdp1;
+ struct RxD3 *rxdp3;
+ struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
+
+ alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
+
+ block_no1 = ring->rx_curr_get_info.block_index;
+ while (alloc_tab < alloc_cnt) {
+ block_no = ring->rx_curr_put_info.block_index;
+
+ off = ring->rx_curr_put_info.offset;
+
+ rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
+
+ rxd_index = off + 1;
+ if (block_no)
+ rxd_index += (block_no * ring->rxd_count);
+
+ if ((block_no == block_no1) &&
+ (off == ring->rx_curr_get_info.offset) &&
+ (rxdp->Host_Control)) {
+ DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
+ ring->dev->name);
+ goto end;
+ }
+ if (off && (off == ring->rxd_count)) {
+ ring->rx_curr_put_info.block_index++;
+ if (ring->rx_curr_put_info.block_index ==
+ ring->block_count)
+ ring->rx_curr_put_info.block_index = 0;
+ block_no = ring->rx_curr_put_info.block_index;
+ off = 0;
+ ring->rx_curr_put_info.offset = off;
+ rxdp = ring->rx_blocks[block_no].block_virt_addr;
+ DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
+ ring->dev->name, rxdp);
+
+ }
+
+ if ((rxdp->Control_1 & RXD_OWN_XENA) &&
+ ((ring->rxd_mode == RXD_MODE_3B) &&
+ (rxdp->Control_2 & s2BIT(0)))) {
+ ring->rx_curr_put_info.offset = off;
+ goto end;
+ }
+ /* calculate size of skb based on ring mode */
+ size = ring->mtu +
+ HEADER_ETHERNET_II_802_3_SIZE +
+ HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
+ if (ring->rxd_mode == RXD_MODE_1)
+ size += NET_IP_ALIGN;
+ else
+ size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
+
+ /* allocate skb */
+ skb = dev_alloc_skb(size);
+ if (!skb) {
+ DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
+ ring->dev->name);
+ if (first_rxdp) {
+ wmb();
+ first_rxdp->Control_1 |= RXD_OWN_XENA;
+ }
+ swstats->mem_alloc_fail_cnt++;
+
+ return -ENOMEM ;
+ }
+ swstats->mem_allocated += skb->truesize;
+
+ if (ring->rxd_mode == RXD_MODE_1) {
+ /* 1 buffer mode - normal operation mode */
+ rxdp1 = (struct RxD1 *)rxdp;
+ memset(rxdp, 0, sizeof(struct RxD1));
+ skb_reserve(skb, NET_IP_ALIGN);
+ rxdp1->Buffer0_ptr =
+ pci_map_single(ring->pdev, skb->data,
+ size - NET_IP_ALIGN,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(nic->pdev,
+ rxdp1->Buffer0_ptr))
+ goto pci_map_failed;
+
+ rxdp->Control_2 =
+ SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
+ rxdp->Host_Control = (unsigned long)skb;
+ } else if (ring->rxd_mode == RXD_MODE_3B) {
+ /*
+ * 2 buffer mode -
+ * 2 buffer mode provides 128
+ * byte aligned receive buffers.
+ */
+
+ rxdp3 = (struct RxD3 *)rxdp;
+ /* save buffer pointers to avoid frequent dma mapping */
+ Buffer0_ptr = rxdp3->Buffer0_ptr;
+ Buffer1_ptr = rxdp3->Buffer1_ptr;
+ memset(rxdp, 0, sizeof(struct RxD3));
+ /* restore the buffer pointers for dma sync*/
+ rxdp3->Buffer0_ptr = Buffer0_ptr;
+ rxdp3->Buffer1_ptr = Buffer1_ptr;
+
+ ba = &ring->ba[block_no][off];
+ skb_reserve(skb, BUF0_LEN);
+ tmp = (u64)(unsigned long)skb->data;
+ tmp += ALIGN_SIZE;
+ tmp &= ~ALIGN_SIZE;
+ skb->data = (void *) (unsigned long)tmp;
+ skb_reset_tail_pointer(skb);
+
+ if (from_card_up) {
+ rxdp3->Buffer0_ptr =
+ pci_map_single(ring->pdev, ba->ba_0,
+ BUF0_LEN,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(nic->pdev,
+ rxdp3->Buffer0_ptr))
+ goto pci_map_failed;
+ } else
+ pci_dma_sync_single_for_device(ring->pdev,
+ (dma_addr_t)rxdp3->Buffer0_ptr,
+ BUF0_LEN,
+ PCI_DMA_FROMDEVICE);
+
+ rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
+ if (ring->rxd_mode == RXD_MODE_3B) {
+ /* Two buffer mode */
+
+ /*
+ * Buffer2 will have L3/L4 header plus
+ * L4 payload
+ */
+ rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
+ skb->data,
+ ring->mtu + 4,
+ PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(nic->pdev,
+ rxdp3->Buffer2_ptr))
+ goto pci_map_failed;
+
+ if (from_card_up) {
+ rxdp3->Buffer1_ptr =
+ pci_map_single(ring->pdev,
+ ba->ba_1,
+ BUF1_LEN,
+ PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(nic->pdev,
+ rxdp3->Buffer1_ptr)) {
+ pci_unmap_single(ring->pdev,
+ (dma_addr_t)(unsigned long)
+ skb->data,
+ ring->mtu + 4,
+ PCI_DMA_FROMDEVICE);
+ goto pci_map_failed;
+ }
+ }
+ rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
+ rxdp->Control_2 |= SET_BUFFER2_SIZE_3
+ (ring->mtu + 4);
+ }
+ rxdp->Control_2 |= s2BIT(0);
+ rxdp->Host_Control = (unsigned long) (skb);
+ }
+ if (alloc_tab & ((1 << rxsync_frequency) - 1))
+ rxdp->Control_1 |= RXD_OWN_XENA;
+ off++;
+ if (off == (ring->rxd_count + 1))
+ off = 0;
+ ring->rx_curr_put_info.offset = off;
+
+ rxdp->Control_2 |= SET_RXD_MARKER;
+ if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
+ if (first_rxdp) {
+ wmb();
+ first_rxdp->Control_1 |= RXD_OWN_XENA;
+ }
+ first_rxdp = rxdp;
+ }
+ ring->rx_bufs_left += 1;
+ alloc_tab++;
+ }
+
+end:
+ /* Transfer ownership of first descriptor to adapter just before
+ * exiting. Before that, use memory barrier so that ownership
+ * and other fields are seen by adapter correctly.
+ */
+ if (first_rxdp) {
+ wmb();
+ first_rxdp->Control_1 |= RXD_OWN_XENA;
+ }
+
+ return SUCCESS;
+
+pci_map_failed:
+ swstats->pci_map_fail_cnt++;
+ swstats->mem_freed += skb->truesize;
+ dev_kfree_skb_irq(skb);
+ return -ENOMEM;
+}
+
+static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
+{
+ struct net_device *dev = sp->dev;
+ int j;
+ struct sk_buff *skb;
+ struct RxD_t *rxdp;
+ struct RxD1 *rxdp1;
+ struct RxD3 *rxdp3;
+ struct mac_info *mac_control = &sp->mac_control;
+ struct stat_block *stats = mac_control->stats_info;
+ struct swStat *swstats = &stats->sw_stat;
+
+ for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
+ rxdp = mac_control->rings[ring_no].
+ rx_blocks[blk].rxds[j].virt_addr;
+ skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
+ if (!skb)
+ continue;
+ if (sp->rxd_mode == RXD_MODE_1) {
+ rxdp1 = (struct RxD1 *)rxdp;
+ pci_unmap_single(sp->pdev,
+ (dma_addr_t)rxdp1->Buffer0_ptr,
+ dev->mtu +
+ HEADER_ETHERNET_II_802_3_SIZE +
+ HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
+ PCI_DMA_FROMDEVICE);
+ memset(rxdp, 0, sizeof(struct RxD1));
+ } else if (sp->rxd_mode == RXD_MODE_3B) {
+ rxdp3 = (struct RxD3 *)rxdp;
+ pci_unmap_single(sp->pdev,
+ (dma_addr_t)rxdp3->Buffer0_ptr,
+ BUF0_LEN,
+ PCI_DMA_FROMDEVICE);
+ pci_unmap_single(sp->pdev,
+ (dma_addr_t)rxdp3->Buffer1_ptr,
+ BUF1_LEN,
+ PCI_DMA_FROMDEVICE);
+ pci_unmap_single(sp->pdev,
+ (dma_addr_t)rxdp3->Buffer2_ptr,
+ dev->mtu + 4,
+ PCI_DMA_FROMDEVICE);
+ memset(rxdp, 0, sizeof(struct RxD3));
+ }
+ swstats->mem_freed += skb->truesize;
+ dev_kfree_skb(skb);
+ mac_control->rings[ring_no].rx_bufs_left -= 1;
+ }
+}
+
+/**
+ * free_rx_buffers - Frees all Rx buffers
+ * @sp: device private variable.
+ * Description:
+ * This function will free all Rx buffers allocated by host.
+ * Return Value:
+ * NONE.
+ */
+
+static void free_rx_buffers(struct s2io_nic *sp)
+{
+ struct net_device *dev = sp->dev;
+ int i, blk = 0, buf_cnt = 0;
+ struct config_param *config = &sp->config;
+ struct mac_info *mac_control = &sp->mac_control;
+
+ for (i = 0; i < config->rx_ring_num; i++) {
+ struct ring_info *ring = &mac_control->rings[i];
+
+ for (blk = 0; blk < rx_ring_sz[i]; blk++)
+ free_rxd_blk(sp, i, blk);
+
+ ring->rx_curr_put_info.block_index = 0;
+ ring->rx_curr_get_info.block_index = 0;
+ ring->rx_curr_put_info.offset = 0;
+ ring->rx_curr_get_info.offset = 0;
+ ring->rx_bufs_left = 0;
+ DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
+ dev->name, buf_cnt, i);
+ }
+}
+
+static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
+{
+ if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
+ DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
+ ring->dev->name);
+ }
+ return 0;
+}
+
+/**
+ * s2io_poll - Rx interrupt handler for NAPI support
+ * @napi : pointer to the napi structure.
+ * @budget : The number of packets that were budgeted to be processed
+ * during one pass through the 'Poll" function.
+ * Description:
+ * Comes into picture only if NAPI support has been incorporated. It does
+ * the same thing that rx_intr_handler does, but not in a interrupt context
+ * also It will process only a given number of packets.
+ * Return value:
+ * 0 on success and 1 if there are No Rx packets to be processed.
+ */
+
+static int s2io_poll_msix(struct napi_struct *napi, int budget)
+{
+ struct ring_info *ring = container_of(napi, struct ring_info, napi);
+ struct net_device *dev = ring->dev;
+ int pkts_processed = 0;
+ u8 __iomem *addr = NULL;
+ u8 val8 = 0;
+ struct s2io_nic *nic = netdev_priv(dev);
+ struct XENA_dev_config __iomem *bar0 = nic->bar0;
+ int budget_org = budget;
+
+ if (unlikely(!is_s2io_card_up(nic)))
+ return 0;
+
+ pkts_processed = rx_intr_handler(ring, budget);
+ s2io_chk_rx_buffers(nic, ring);
+
+ if (pkts_processed < budget_org) {
+ napi_complete(napi);
+ /*Re Enable MSI-Rx Vector*/
+ addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
+ addr += 7 - ring->ring_no;
+ val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
+ writeb(val8, addr);
+ val8 = readb(addr);
+ }
+ return pkts_processed;
+}
+
+static int s2io_poll_inta(struct napi_struct *napi, int budget)
+{
+ struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
+ int pkts_processed = 0;
+ int ring_pkts_processed, i;
+ struct XENA_dev_config __iomem *bar0 = nic->bar0;
+ int budget_org = budget;
+ struct config_param *config = &nic->config;
+ struct mac_info *mac_control = &nic->mac_control;
+
+ if (unlikely(!is_s2io_card_up(nic)))
+ return 0;
+
+ for (i = 0; i < config->rx_ring_num; i++) {
+ struct ring_info *ring = &mac_control->rings[i];
+ ring_pkts_processed = rx_intr_handler(ring, budget);
+ s2io_chk_rx_buffers(nic, ring);
+ pkts_processed += ring_pkts_processed;
+ budget -= ring_pkts_processed;
+ if (budget <= 0)
+ break;
+ }
+ if (pkts_processed < budget_org) {
+ napi_complete(napi);
+ /* Re enable the Rx interrupts for the ring */
+ writeq(0, &bar0->rx_traffic_mask);
+ readl(&bar0->rx_traffic_mask);
+ }
+ return pkts_processed;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * s2io_netpoll - netpoll event handler entry point
+ * @dev : pointer to the device structure.
+ * Description:
+ * This function will be called by upper layer to check for events on the
+ * interface in situations where interrupts are disabled. It is used for
+ * specific in-kernel networking tasks, such as remote consoles and kernel
+ * debugging over the network (example netdump in RedHat).
+ */
+static void s2io_netpoll(struct net_device *dev)
+{
+ struct s2io_nic *nic = netdev_priv(dev);
+ struct XENA_dev_config __iomem *bar0 = nic->bar0;
+ u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
+ int i;
+ struct config_param *config = &nic->config;
+ struct mac_info *mac_control = &nic->mac_control;
+
+ if (pci_channel_offline(nic->pdev))
+ return;
+
+ disable_irq(dev->irq);
+
+ writeq(val64, &bar0->rx_traffic_int);
+ writeq(val64, &bar0->tx_traffic_int);
+
+ /* we need to free up the transmitted skbufs or else netpoll will
+ * run out of skbs and will fail and eventually netpoll application such
+ * as netdump will fail.
+ */
+ for (i = 0; i < config->tx_fifo_num; i++)
+ tx_intr_handler(&mac_control->fifos[i]);
+
+ /* check for received packet and indicate up to network */
+ for (i = 0; i < config->rx_ring_num; i++) {
+ struct ring_info *ring = &mac_control->rings[i];
+
+ rx_intr_handler(ring, 0);
+ }
+
+ for (i = 0; i < config->rx_ring_num; i++) {
+ struct ring_info *ring = &mac_control->rings[i];
+
+ if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
+ DBG_PRINT(INFO_DBG,
+ "%s: Out of memory in Rx Netpoll!!\n",
+ dev->name);
+ break;
+ }
+ }
+ enable_irq(dev->irq);
+}
+#endif
+
+/**
+ * rx_intr_handler - Rx interrupt handler
+ * @ring_info: per ring structure.
+ * @budget: budget for napi processing.
+ * Description:
+ * If the interrupt is because of a received frame or if the
+ * receive ring contains fresh as yet un-processed frames,this function is
+ * called. It picks out the RxD at which place the last Rx processing had
+ * stopped and sends the skb to the OSM's Rx handler and then increments
+ * the offset.
+ * Return Value:
+ * No. of napi packets processed.
+ */
+static int rx_intr_handler(struct ring_info *ring_data, int budget)
+{
+ int get_block, put_block;
+ struct rx_curr_get_info get_info, put_info;
+ struct RxD_t *rxdp;
+ struct sk_buff *skb;
+ int pkt_cnt = 0, napi_pkts = 0;
+ int i;
+ struct RxD1 *rxdp1;
+ struct RxD3 *rxdp3;
+
+ get_info = ring_data->rx_curr_get_info;
+ get_block = get_info.block_index;
+ memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
+ put_block = put_info.block_index;
+ rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
+
+ while (RXD_IS_UP2DT(rxdp)) {
+ /*
+ * If your are next to put index then it's
+ * FIFO full condition
+ */
+ if ((get_block == put_block) &&
+ (get_info.offset + 1) == put_info.offset) {
+ DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
+ ring_data->dev->name);
+ break;
+ }
+ skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
+ if (skb == NULL) {
+ DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
+ ring_data->dev->name);
+ return 0;
+ }
+ if (ring_data->rxd_mode == RXD_MODE_1) {
+ rxdp1 = (struct RxD1 *)rxdp;
+ pci_unmap_single(ring_data->pdev, (dma_addr_t)
+ rxdp1->Buffer0_ptr,
+ ring_data->mtu +
+ HEADER_ETHERNET_II_802_3_SIZE +
+ HEADER_802_2_SIZE +
+ HEADER_SNAP_SIZE,
+ PCI_DMA_FROMDEVICE);
+ } else if (ring_data->rxd_mode == RXD_MODE_3B) {
+ rxdp3 = (struct RxD3 *)rxdp;
+ pci_dma_sync_single_for_cpu(ring_data->pdev,
+ (dma_addr_t)rxdp3->Buffer0_ptr,
+ BUF0_LEN,
+ PCI_DMA_FROMDEVICE);
+ pci_unmap_single(ring_data->pdev,
+ (dma_addr_t)rxdp3->Buffer2_ptr,
+ ring_data->mtu + 4,
+ PCI_DMA_FROMDEVICE);
+ }
+ prefetch(skb->data);
+ rx_osm_handler(ring_data, rxdp);
+ get_info.offset++;
+ ring_data->rx_curr_get_info.offset = get_info.offset;
+ rxdp = ring_data->rx_blocks[get_block].
+ rxds[get_info.offset].virt_addr;
+ if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
+ get_info.offset = 0;
+ ring_data->rx_curr_get_info.offset = get_info.offset;
+ get_block++;
+ if (get_block == ring_data->block_count)
+ get_block = 0;
+ ring_data->rx_curr_get_info.block_index = get_block;
+ rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
+ }
+
+ if (ring_data->nic->config.napi) {
+ budget--;
+ napi_pkts++;
+ if (!budget)
+ break;
+ }
+ pkt_cnt++;
+ if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
+ break;
+ }
+ if (ring_data->lro) {
+ /* Clear all LRO sessions before exiting */
+ for (i = 0; i < MAX_LRO_SESSIONS; i++) {
+ struct lro *lro = &ring_data->lro0_n[i];
+ if (lro->in_use) {
+ update_L3L4_header(ring_data->nic, lro);
+ queue_rx_frame(lro->parent, lro->vlan_tag);
+ clear_lro_session(lro);
+ }
+ }
+ }
+ return napi_pkts;
+}
+
+/**
+ * tx_intr_handler - Transmit interrupt handler
+ * @nic : device private variable
+ * Description:
+ * If an interrupt was raised to indicate DMA complete of the
+ * Tx packet, this function is called. It identifies the last TxD
+ * whose buffer was freed and frees all skbs whose data have already
+ * DMA'ed into the NICs internal memory.
+ * Return Value:
+ * NONE
+ */
+
+static void tx_intr_handler(struct fifo_info *fifo_data)
+{
+ struct s2io_nic *nic = fifo_data->nic;
+ struct tx_curr_get_info get_info, put_info;
+ struct sk_buff *skb = NULL;
+ struct TxD *txdlp;
+ int pkt_cnt = 0;
+ unsigned long flags = 0;
+ u8 err_mask;
+ struct stat_block *stats = nic->mac_control.stats_info;
+ struct swStat *swstats = &stats->sw_stat;
+
+ if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
+ return;
+
+ get_info = fifo_data->tx_curr_get_info;
+ memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
+ txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
+ while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
+ (get_info.offset != put_info.offset) &&
+ (txdlp->Host_Control)) {
+ /* Check for TxD errors */
+ if (txdlp->Control_1 & TXD_T_CODE) {
+ unsigned long long err;
+ err = txdlp->Control_1 & TXD_T_CODE;
+ if (err & 0x1) {
+ swstats->parity_err_cnt++;
+ }
+
+ /* update t_code statistics */
+ err_mask = err >> 48;
+ switch (err_mask) {
+ case 2:
+ swstats->tx_buf_abort_cnt++;
+ break;
+
+ case 3:
+ swstats->tx_desc_abort_cnt++;
+ break;
+
+ case 7:
+ swstats->tx_parity_err_cnt++;
+ break;
+
+ case 10:
+ swstats->tx_link_loss_cnt++;
+ break;
+
+ case 15:
+ swstats->tx_list_proc_err_cnt++;
+ break;
+ }
+ }
+
+ skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
+ if (skb == NULL) {
+ spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
+ DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
+ __func__);
+ return;
+ }
+ pkt_cnt++;
+
+ /* Updating the statistics block */
+ swstats->mem_freed += skb->truesize;
+ dev_kfree_skb_irq(skb);
+
+ get_info.offset++;
+ if (get_info.offset == get_info.fifo_len + 1)
+ get_info.offset = 0;
+ txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
+ fifo_data->tx_curr_get_info.offset = get_info.offset;
+ }
+
+ s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
+
+ spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
+}
+
+/**
+ * s2io_mdio_write - Function to write in to MDIO registers
+ * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
+ * @addr : address value
+ * @value : data value
+ * @dev : pointer to net_device structure
+ * Description:
+ * This function is used to write values to the MDIO registers
+ * NONE
+ */
+static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
+ struct net_device *dev)
+{
+ u64 val64;
+ struct s2io_nic *sp = netdev_priv(dev);
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+
+ /* address transaction */
+ val64 = MDIO_MMD_INDX_ADDR(addr) |
+ MDIO_MMD_DEV_ADDR(mmd_type) |
+ MDIO_MMS_PRT_ADDR(0x0);
+ writeq(val64, &bar0->mdio_control);
+ val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
+ writeq(val64, &bar0->mdio_control);
+ udelay(100);
+
+ /* Data transaction */
+ val64 = MDIO_MMD_INDX_ADDR(addr) |
+ MDIO_MMD_DEV_ADDR(mmd_type) |
+ MDIO_MMS_PRT_ADDR(0x0) |
+ MDIO_MDIO_DATA(value) |
+ MDIO_OP(MDIO_OP_WRITE_TRANS);
+ writeq(val64, &bar0->mdio_control);
+ val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
+ writeq(val64, &bar0->mdio_control);
+ udelay(100);
+
+ val64 = MDIO_MMD_INDX_ADDR(addr) |
+ MDIO_MMD_DEV_ADDR(mmd_type) |
+ MDIO_MMS_PRT_ADDR(0x0) |
+ MDIO_OP(MDIO_OP_READ_TRANS);
+ writeq(val64, &bar0->mdio_control);
+ val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
+ writeq(val64, &bar0->mdio_control);
+ udelay(100);
+}
+
+/**
+ * s2io_mdio_read - Function to write in to MDIO registers
+ * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
+ * @addr : address value
+ * @dev : pointer to net_device structure
+ * Description:
+ * This function is used to read values to the MDIO registers
+ * NONE
+ */
+static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
+{
+ u64 val64 = 0x0;
+ u64 rval64 = 0x0;
+ struct s2io_nic *sp = netdev_priv(dev);
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+
+ /* address transaction */
+ val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
+ | MDIO_MMD_DEV_ADDR(mmd_type)
+ | MDIO_MMS_PRT_ADDR(0x0));
+ writeq(val64, &bar0->mdio_control);
+ val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
+ writeq(val64, &bar0->mdio_control);
+ udelay(100);
+
+ /* Data transaction */
+ val64 = MDIO_MMD_INDX_ADDR(addr) |
+ MDIO_MMD_DEV_ADDR(mmd_type) |
+ MDIO_MMS_PRT_ADDR(0x0) |
+ MDIO_OP(MDIO_OP_READ_TRANS);
+ writeq(val64, &bar0->mdio_control);
+ val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
+ writeq(val64, &bar0->mdio_control);
+ udelay(100);
+
+ /* Read the value from regs */
+ rval64 = readq(&bar0->mdio_control);
+ rval64 = rval64 & 0xFFFF0000;
+ rval64 = rval64 >> 16;
+ return rval64;
+}
+
+/**
+ * s2io_chk_xpak_counter - Function to check the status of the xpak counters
+ * @counter : counter value to be updated
+ * @flag : flag to indicate the status
+ * @type : counter type
+ * Description:
+ * This function is to check the status of the xpak counters value
+ * NONE
+ */
+
+static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
+ u16 flag, u16 type)
+{
+ u64 mask = 0x3;
+ u64 val64;
+ int i;
+ for (i = 0; i < index; i++)
+ mask = mask << 0x2;
+
+ if (flag > 0) {
+ *counter = *counter + 1;
+ val64 = *regs_stat & mask;
+ val64 = val64 >> (index * 0x2);
+ val64 = val64 + 1;
+ if (val64 == 3) {
+ switch (type) {
+ case 1:
+ DBG_PRINT(ERR_DBG,
+ "Take Xframe NIC out of service.\n");
+ DBG_PRINT(ERR_DBG,
+"Excessive temperatures may result in premature transceiver failure.\n");
+ break;
+ case 2:
+ DBG_PRINT(ERR_DBG,
+ "Take Xframe NIC out of service.\n");
+ DBG_PRINT(ERR_DBG,
+"Excessive bias currents may indicate imminent laser diode failure.\n");
+ break;
+ case 3:
+ DBG_PRINT(ERR_DBG,
+ "Take Xframe NIC out of service.\n");
+ DBG_PRINT(ERR_DBG,
+"Excessive laser output power may saturate far-end receiver.\n");
+ break;
+ default:
+ DBG_PRINT(ERR_DBG,
+ "Incorrect XPAK Alarm type\n");
+ }
+ val64 = 0x0;
+ }
+ val64 = val64 << (index * 0x2);
+ *regs_stat = (*regs_stat & (~mask)) | (val64);
+
+ } else {
+ *regs_stat = *regs_stat & (~mask);
+ }
+}
+
+/**
+ * s2io_updt_xpak_counter - Function to update the xpak counters
+ * @dev : pointer to net_device struct
+ * Description:
+ * This function is to upate the status of the xpak counters value
+ * NONE
+ */
+static void s2io_updt_xpak_counter(struct net_device *dev)
+{
+ u16 flag = 0x0;
+ u16 type = 0x0;
+ u16 val16 = 0x0;
+ u64 val64 = 0x0;
+ u64 addr = 0x0;
+
+ struct s2io_nic *sp = netdev_priv(dev);
+ struct stat_block *stats = sp->mac_control.stats_info;
+ struct xpakStat *xstats = &stats->xpak_stat;
+
+ /* Check the communication with the MDIO slave */
+ addr = MDIO_CTRL1;
+ val64 = 0x0;
+ val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
+ if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
+ DBG_PRINT(ERR_DBG,
+ "ERR: MDIO slave access failed - Returned %llx\n",
+ (unsigned long long)val64);
+ return;
+ }
+
+ /* Check for the expected value of control reg 1 */
+ if (val64 != MDIO_CTRL1_SPEED10G) {
+ DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
+ "Returned: %llx- Expected: 0x%x\n",
+ (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
+ return;
+ }
+
+ /* Loading the DOM register to MDIO register */
+ addr = 0xA100;
+ s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
+ val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
+
+ /* Reading the Alarm flags */
+ addr = 0xA070;
+ val64 = 0x0;
+ val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
+
+ flag = CHECKBIT(val64, 0x7);
+ type = 1;
+ s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
+ &xstats->xpak_regs_stat,
+ 0x0, flag, type);
+
+ if (CHECKBIT(val64, 0x6))
+ xstats->alarm_transceiver_temp_low++;
+
+ flag = CHECKBIT(val64, 0x3);
+ type = 2;
+ s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
+ &xstats->xpak_regs_stat,
+ 0x2, flag, type);
+
+ if (CHECKBIT(val64, 0x2))
+ xstats->alarm_laser_bias_current_low++;
+
+ flag = CHECKBIT(val64, 0x1);
+ type = 3;
+ s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
+ &xstats->xpak_regs_stat,
+ 0x4, flag, type);
+
+ if (CHECKBIT(val64, 0x0))
+ xstats->alarm_laser_output_power_low++;
+
+ /* Reading the Warning flags */
+ addr = 0xA074;
+ val64 = 0x0;
+ val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
+
+ if (CHECKBIT(val64, 0x7))
+ xstats->warn_transceiver_temp_high++;
+
+ if (CHECKBIT(val64, 0x6))
+ xstats->warn_transceiver_temp_low++;
+
+ if (CHECKBIT(val64, 0x3))
+ xstats->warn_laser_bias_current_high++;
+
+ if (CHECKBIT(val64, 0x2))
+ xstats->warn_laser_bias_current_low++;
+
+ if (CHECKBIT(val64, 0x1))
+ xstats->warn_laser_output_power_high++;
+
+ if (CHECKBIT(val64, 0x0))
+ xstats->warn_laser_output_power_low++;
+}
+
+/**
+ * wait_for_cmd_complete - waits for a command to complete.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * Description: Function that waits for a command to Write into RMAC
+ * ADDR DATA registers to be completed and returns either success or
+ * error depending on whether the command was complete or not.
+ * Return value:
+ * SUCCESS on success and FAILURE on failure.
+ */
+
+static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
+ int bit_state)
+{
+ int ret = FAILURE, cnt = 0, delay = 1;
+ u64 val64;
+
+ if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
+ return FAILURE;
+
+ do {
+ val64 = readq(addr);
+ if (bit_state == S2IO_BIT_RESET) {
+ if (!(val64 & busy_bit)) {
+ ret = SUCCESS;
+ break;
+ }
+ } else {
+ if (val64 & busy_bit) {
+ ret = SUCCESS;
+ break;
+ }
+ }
+
+ if (in_interrupt())
+ mdelay(delay);
+ else
+ msleep(delay);
+
+ if (++cnt >= 10)
+ delay = 50;
+ } while (cnt < 20);
+ return ret;
+}
+/*
+ * check_pci_device_id - Checks if the device id is supported
+ * @id : device id
+ * Description: Function to check if the pci device id is supported by driver.
+ * Return value: Actual device id if supported else PCI_ANY_ID
+ */
+static u16 check_pci_device_id(u16 id)
+{
+ switch (id) {
+ case PCI_DEVICE_ID_HERC_WIN:
+ case PCI_DEVICE_ID_HERC_UNI:
+ return XFRAME_II_DEVICE;
+ case PCI_DEVICE_ID_S2IO_UNI:
+ case PCI_DEVICE_ID_S2IO_WIN:
+ return XFRAME_I_DEVICE;
+ default:
+ return PCI_ANY_ID;
+ }
+}
+
+/**
+ * s2io_reset - Resets the card.
+ * @sp : private member of the device structure.
+ * Description: Function to Reset the card. This function then also
+ * restores the previously saved PCI configuration space registers as
+ * the card reset also resets the configuration space.
+ * Return value:
+ * void.
+ */
+
+static void s2io_reset(struct s2io_nic *sp)
+{
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ u64 val64;
+ u16 subid, pci_cmd;
+ int i;
+ u16 val16;
+ unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
+ unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
+ struct stat_block *stats;
+ struct swStat *swstats;
+
+ DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
+ __func__, pci_name(sp->pdev));
+
+ /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
+ pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
+
+ val64 = SW_RESET_ALL;
+ writeq(val64, &bar0->sw_reset);
+ if (strstr(sp->product_name, "CX4"))
+ msleep(750);
+ msleep(250);
+ for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
+
+ /* Restore the PCI state saved during initialization. */
+ pci_restore_state(sp->pdev);
+ pci_save_state(sp->pdev);
+ pci_read_config_word(sp->pdev, 0x2, &val16);
+ if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
+ break;
+ msleep(200);
+ }
+
+ if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
+ DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
+
+ pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
+
+ s2io_init_pci(sp);
+
+ /* Set swapper to enable I/O register access */
+ s2io_set_swapper(sp);
+
+ /* restore mac_addr entries */
+ do_s2io_restore_unicast_mc(sp);
+
+ /* Restore the MSIX table entries from local variables */
+ restore_xmsi_data(sp);
+
+ /* Clear certain PCI/PCI-X fields after reset */
+ if (sp->device_type == XFRAME_II_DEVICE) {
+ /* Clear "detected parity error" bit */
+ pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
+
+ /* Clearing PCIX Ecc status register */
+ pci_write_config_dword(sp->pdev, 0x68, 0x7C);
+
+ /* Clearing PCI_STATUS error reflected here */
+ writeq(s2BIT(62), &bar0->txpic_int_reg);
+ }
+
+ /* Reset device statistics maintained by OS */
+ memset(&sp->stats, 0, sizeof(struct net_device_stats));
+
+ stats = sp->mac_control.stats_info;
+ swstats = &stats->sw_stat;
+
+ /* save link up/down time/cnt, reset/memory/watchdog cnt */
+ up_cnt = swstats->link_up_cnt;
+ down_cnt = swstats->link_down_cnt;
+ up_time = swstats->link_up_time;
+ down_time = swstats->link_down_time;
+ reset_cnt = swstats->soft_reset_cnt;
+ mem_alloc_cnt = swstats->mem_allocated;
+ mem_free_cnt = swstats->mem_freed;
+ watchdog_cnt = swstats->watchdog_timer_cnt;
+
+ memset(stats, 0, sizeof(struct stat_block));
+
+ /* restore link up/down time/cnt, reset/memory/watchdog cnt */
+ swstats->link_up_cnt = up_cnt;
+ swstats->link_down_cnt = down_cnt;
+ swstats->link_up_time = up_time;
+ swstats->link_down_time = down_time;
+ swstats->soft_reset_cnt = reset_cnt;
+ swstats->mem_allocated = mem_alloc_cnt;
+ swstats->mem_freed = mem_free_cnt;
+ swstats->watchdog_timer_cnt = watchdog_cnt;
+
+ /* SXE-002: Configure link and activity LED to turn it off */
+ subid = sp->pdev->subsystem_device;
+ if (((subid & 0xFF) >= 0x07) &&
+ (sp->device_type == XFRAME_I_DEVICE)) {
+ val64 = readq(&bar0->gpio_control);
+ val64 |= 0x0000800000000000ULL;
+ writeq(val64, &bar0->gpio_control);
+ val64 = 0x0411040400000000ULL;
+ writeq(val64, (void __iomem *)bar0 + 0x2700);
+ }
+
+ /*
+ * Clear spurious ECC interrupts that would have occurred on
+ * XFRAME II cards after reset.
+ */
+ if (sp->device_type == XFRAME_II_DEVICE) {
+ val64 = readq(&bar0->pcc_err_reg);
+ writeq(val64, &bar0->pcc_err_reg);
+ }
+
+ sp->device_enabled_once = false;
+}
+
+/**
+ * s2io_set_swapper - to set the swapper controle on the card
+ * @sp : private member of the device structure,
+ * pointer to the s2io_nic structure.
+ * Description: Function to set the swapper control on the card
+ * correctly depending on the 'endianness' of the system.
+ * Return value:
+ * SUCCESS on success and FAILURE on failure.
+ */
+
+static int s2io_set_swapper(struct s2io_nic *sp)
+{
+ struct net_device *dev = sp->dev;
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ u64 val64, valt, valr;
+
+ /*
+ * Set proper endian settings and verify the same by reading
+ * the PIF Feed-back register.
+ */
+
+ val64 = readq(&bar0->pif_rd_swapper_fb);
+ if (val64 != 0x0123456789ABCDEFULL) {
+ int i = 0;
+ static const u64 value[] = {
+ 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
+ 0x8100008181000081ULL, /* FE=1, SE=0 */
+ 0x4200004242000042ULL, /* FE=0, SE=1 */
+ 0 /* FE=0, SE=0 */
+ };
+
+ while (i < 4) {
+ writeq(value[i], &bar0->swapper_ctrl);
+ val64 = readq(&bar0->pif_rd_swapper_fb);
+ if (val64 == 0x0123456789ABCDEFULL)
+ break;
+ i++;
+ }
+ if (i == 4) {
+ DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
+ "feedback read %llx\n",
+ dev->name, (unsigned long long)val64);
+ return FAILURE;
+ }
+ valr = value[i];
+ } else {
+ valr = readq(&bar0->swapper_ctrl);
+ }
+
+ valt = 0x0123456789ABCDEFULL;
+ writeq(valt, &bar0->xmsi_address);
+ val64 = readq(&bar0->xmsi_address);
+
+ if (val64 != valt) {
+ int i = 0;
+ static const u64 value[] = {
+ 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
+ 0x0081810000818100ULL, /* FE=1, SE=0 */
+ 0x0042420000424200ULL, /* FE=0, SE=1 */
+ 0 /* FE=0, SE=0 */
+ };
+
+ while (i < 4) {
+ writeq((value[i] | valr), &bar0->swapper_ctrl);
+ writeq(valt, &bar0->xmsi_address);
+ val64 = readq(&bar0->xmsi_address);
+ if (val64 == valt)
+ break;
+ i++;
+ }
+ if (i == 4) {
+ unsigned long long x = val64;
+ DBG_PRINT(ERR_DBG,
+ "Write failed, Xmsi_addr reads:0x%llx\n", x);
+ return FAILURE;
+ }
+ }
+ val64 = readq(&bar0->swapper_ctrl);
+ val64 &= 0xFFFF000000000000ULL;
+
+#ifdef __BIG_ENDIAN
+ /*
+ * The device by default set to a big endian format, so a
+ * big endian driver need not set anything.
+ */
+ val64 |= (SWAPPER_CTRL_TXP_FE |
+ SWAPPER_CTRL_TXP_SE |
+ SWAPPER_CTRL_TXD_R_FE |
+ SWAPPER_CTRL_TXD_W_FE |
+ SWAPPER_CTRL_TXF_R_FE |
+ SWAPPER_CTRL_RXD_R_FE |
+ SWAPPER_CTRL_RXD_W_FE |
+ SWAPPER_CTRL_RXF_W_FE |
+ SWAPPER_CTRL_XMSI_FE |
+ SWAPPER_CTRL_STATS_FE |
+ SWAPPER_CTRL_STATS_SE);
+ if (sp->config.intr_type == INTA)
+ val64 |= SWAPPER_CTRL_XMSI_SE;
+ writeq(val64, &bar0->swapper_ctrl);
+#else
+ /*
+ * Initially we enable all bits to make it accessible by the
+ * driver, then we selectively enable only those bits that
+ * we want to set.
+ */
+ val64 |= (SWAPPER_CTRL_TXP_FE |
+ SWAPPER_CTRL_TXP_SE |
+ SWAPPER_CTRL_TXD_R_FE |
+ SWAPPER_CTRL_TXD_R_SE |
+ SWAPPER_CTRL_TXD_W_FE |
+ SWAPPER_CTRL_TXD_W_SE |
+ SWAPPER_CTRL_TXF_R_FE |
+ SWAPPER_CTRL_RXD_R_FE |
+ SWAPPER_CTRL_RXD_R_SE |
+ SWAPPER_CTRL_RXD_W_FE |
+ SWAPPER_CTRL_RXD_W_SE |
+ SWAPPER_CTRL_RXF_W_FE |
+ SWAPPER_CTRL_XMSI_FE |
+ SWAPPER_CTRL_STATS_FE |
+ SWAPPER_CTRL_STATS_SE);
+ if (sp->config.intr_type == INTA)
+ val64 |= SWAPPER_CTRL_XMSI_SE;
+ writeq(val64, &bar0->swapper_ctrl);
+#endif
+ val64 = readq(&bar0->swapper_ctrl);
+
+ /*
+ * Verifying if endian settings are accurate by reading a
+ * feedback register.
+ */
+ val64 = readq(&bar0->pif_rd_swapper_fb);
+ if (val64 != 0x0123456789ABCDEFULL) {
+ /* Endian settings are incorrect, calls for another dekko. */
+ DBG_PRINT(ERR_DBG,
+ "%s: Endian settings are wrong, feedback read %llx\n",
+ dev->name, (unsigned long long)val64);
+ return FAILURE;
+ }
+
+ return SUCCESS;
+}
+
+static int wait_for_msix_trans(struct s2io_nic *nic, int i)
+{
+ struct XENA_dev_config __iomem *bar0 = nic->bar0;
+ u64 val64;
+ int ret = 0, cnt = 0;
+
+ do {
+ val64 = readq(&bar0->xmsi_access);
+ if (!(val64 & s2BIT(15)))
+ break;
+ mdelay(1);
+ cnt++;
+ } while (cnt < 5);
+ if (cnt == 5) {
+ DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static void restore_xmsi_data(struct s2io_nic *nic)
+{
+ struct XENA_dev_config __iomem *bar0 = nic->bar0;
+ u64 val64;
+ int i, msix_index;
+
+ if (nic->device_type == XFRAME_I_DEVICE)
+ return;
+
+ for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
+ msix_index = (i) ? ((i-1) * 8 + 1) : 0;
+ writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
+ writeq(nic->msix_info[i].data, &bar0->xmsi_data);
+ val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
+ writeq(val64, &bar0->xmsi_access);
+ if (wait_for_msix_trans(nic, msix_index)) {
+ DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
+ __func__, msix_index);
+ continue;
+ }
+ }
+}
+
+static void store_xmsi_data(struct s2io_nic *nic)
+{
+ struct XENA_dev_config __iomem *bar0 = nic->bar0;
+ u64 val64, addr, data;
+ int i, msix_index;
+
+ if (nic->device_type == XFRAME_I_DEVICE)
+ return;
+
+ /* Store and display */
+ for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
+ msix_index = (i) ? ((i-1) * 8 + 1) : 0;
+ val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
+ writeq(val64, &bar0->xmsi_access);
+ if (wait_for_msix_trans(nic, msix_index)) {
+ DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
+ __func__, msix_index);
+ continue;
+ }
+ addr = readq(&bar0->xmsi_address);
+ data = readq(&bar0->xmsi_data);
+ if (addr && data) {
+ nic->msix_info[i].addr = addr;
+ nic->msix_info[i].data = data;
+ }
+ }
+}
+
+static int s2io_enable_msi_x(struct s2io_nic *nic)
+{
+ struct XENA_dev_config __iomem *bar0 = nic->bar0;
+ u64 rx_mat;
+ u16 msi_control; /* Temp variable */
+ int ret, i, j, msix_indx = 1;
+ int size;
+ struct stat_block *stats = nic->mac_control.stats_info;
+ struct swStat *swstats = &stats->sw_stat;
+
+ size = nic->num_entries * sizeof(struct msix_entry);
+ nic->entries = kzalloc(size, GFP_KERNEL);
+ if (!nic->entries) {
+ DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
+ __func__);
+ swstats->mem_alloc_fail_cnt++;
+ return -ENOMEM;
+ }
+ swstats->mem_allocated += size;
+
+ size = nic->num_entries * sizeof(struct s2io_msix_entry);
+ nic->s2io_entries = kzalloc(size, GFP_KERNEL);
+ if (!nic->s2io_entries) {
+ DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
+ __func__);
+ swstats->mem_alloc_fail_cnt++;
+ kfree(nic->entries);
+ swstats->mem_freed
+ += (nic->num_entries * sizeof(struct msix_entry));
+ return -ENOMEM;
+ }
+ swstats->mem_allocated += size;
+
+ nic->entries[0].entry = 0;
+ nic->s2io_entries[0].entry = 0;
+ nic->s2io_entries[0].in_use = MSIX_FLG;
+ nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
+ nic->s2io_entries[0].arg = &nic->mac_control.fifos;
+
+ for (i = 1; i < nic->num_entries; i++) {
+ nic->entries[i].entry = ((i - 1) * 8) + 1;
+ nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
+ nic->s2io_entries[i].arg = NULL;
+ nic->s2io_entries[i].in_use = 0;
+ }
+
+ rx_mat = readq(&bar0->rx_mat);
+ for (j = 0; j < nic->config.rx_ring_num; j++) {
+ rx_mat |= RX_MAT_SET(j, msix_indx);
+ nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
+ nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
+ nic->s2io_entries[j+1].in_use = MSIX_FLG;
+ msix_indx += 8;
+ }
+ writeq(rx_mat, &bar0->rx_mat);
+ readq(&bar0->rx_mat);
+
+ ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
+ /* We fail init if error or we get less vectors than min required */
+ if (ret) {
+ DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
+ kfree(nic->entries);
+ swstats->mem_freed += nic->num_entries *
+ sizeof(struct msix_entry);
+ kfree(nic->s2io_entries);
+ swstats->mem_freed += nic->num_entries *
+ sizeof(struct s2io_msix_entry);
+ nic->entries = NULL;
+ nic->s2io_entries = NULL;
+ return -ENOMEM;
+ }
+
+ /*
+ * To enable MSI-X, MSI also needs to be enabled, due to a bug
+ * in the herc NIC. (Temp change, needs to be removed later)
+ */
+ pci_read_config_word(nic->pdev, 0x42, &msi_control);
+ msi_control |= 0x1; /* Enable MSI */
+ pci_write_config_word(nic->pdev, 0x42, msi_control);
+
+ return 0;
+}
+
+/* Handle software interrupt used during MSI(X) test */
+static irqreturn_t s2io_test_intr(int irq, void *dev_id)
+{
+ struct s2io_nic *sp = dev_id;
+
+ sp->msi_detected = 1;
+ wake_up(&sp->msi_wait);
+
+ return IRQ_HANDLED;
+}
+
+/* Test interrupt path by forcing a a software IRQ */
+static int s2io_test_msi(struct s2io_nic *sp)
+{
+ struct pci_dev *pdev = sp->pdev;
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ int err;
+ u64 val64, saved64;
+
+ err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
+ sp->name, sp);
+ if (err) {
+ DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
+ sp->dev->name, pci_name(pdev), pdev->irq);
+ return err;
+ }
+
+ init_waitqueue_head(&sp->msi_wait);
+ sp->msi_detected = 0;
+
+ saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
+ val64 |= SCHED_INT_CTRL_ONE_SHOT;
+ val64 |= SCHED_INT_CTRL_TIMER_EN;
+ val64 |= SCHED_INT_CTRL_INT2MSI(1);
+ writeq(val64, &bar0->scheduled_int_ctrl);
+
+ wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
+
+ if (!sp->msi_detected) {
+ /* MSI(X) test failed, go back to INTx mode */
+ DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
+ "using MSI(X) during test\n",
+ sp->dev->name, pci_name(pdev));
+
+ err = -EOPNOTSUPP;
+ }
+
+ free_irq(sp->entries[1].vector, sp);
+
+ writeq(saved64, &bar0->scheduled_int_ctrl);
+
+ return err;
+}
+
+static void remove_msix_isr(struct s2io_nic *sp)
+{
+ int i;
+ u16 msi_control;
+
+ for (i = 0; i < sp->num_entries; i++) {
+ if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
+ int vector = sp->entries[i].vector;
+ void *arg = sp->s2io_entries[i].arg;
+ free_irq(vector, arg);
+ }
+ }
+
+ kfree(sp->entries);
+ kfree(sp->s2io_entries);
+ sp->entries = NULL;
+ sp->s2io_entries = NULL;
+
+ pci_read_config_word(sp->pdev, 0x42, &msi_control);
+ msi_control &= 0xFFFE; /* Disable MSI */
+ pci_write_config_word(sp->pdev, 0x42, msi_control);
+
+ pci_disable_msix(sp->pdev);
+}
+
+static void remove_inta_isr(struct s2io_nic *sp)
+{
+ struct net_device *dev = sp->dev;
+
+ free_irq(sp->pdev->irq, dev);
+}
+
+/* ********************************************************* *
+ * Functions defined below concern the OS part of the driver *
+ * ********************************************************* */
+
+/**
+ * s2io_open - open entry point of the driver
+ * @dev : pointer to the device structure.
+ * Description:
+ * This function is the open entry point of the driver. It mainly calls a
+ * function to allocate Rx buffers and inserts them into the buffer
+ * descriptors and then enables the Rx part of the NIC.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+
+static int s2io_open(struct net_device *dev)
+{
+ struct s2io_nic *sp = netdev_priv(dev);
+ struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
+ int err = 0;
+
+ /*
+ * Make sure you have link off by default every time
+ * Nic is initialized
+ */
+ netif_carrier_off(dev);
+ sp->last_link_state = 0;
+
+ /* Initialize H/W and enable interrupts */
+ err = s2io_card_up(sp);
+ if (err) {
+ DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
+ dev->name);
+ goto hw_init_failed;
+ }
+
+ if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
+ DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
+ s2io_card_down(sp);
+ err = -ENODEV;
+ goto hw_init_failed;
+ }
+ s2io_start_all_tx_queue(sp);
+ return 0;
+
+hw_init_failed:
+ if (sp->config.intr_type == MSI_X) {
+ if (sp->entries) {
+ kfree(sp->entries);
+ swstats->mem_freed += sp->num_entries *
+ sizeof(struct msix_entry);
+ }
+ if (sp->s2io_entries) {
+ kfree(sp->s2io_entries);
+ swstats->mem_freed += sp->num_entries *
+ sizeof(struct s2io_msix_entry);
+ }
+ }
+ return err;
+}
+
+/**
+ * s2io_close -close entry point of the driver
+ * @dev : device pointer.
+ * Description:
+ * This is the stop entry point of the driver. It needs to undo exactly
+ * whatever was done by the open entry point,thus it's usually referred to
+ * as the close function.Among other things this function mainly stops the
+ * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+
+static int s2io_close(struct net_device *dev)
+{
+ struct s2io_nic *sp = netdev_priv(dev);
+ struct config_param *config = &sp->config;
+ u64 tmp64;
+ int offset;
+
+ /* Return if the device is already closed *
+ * Can happen when s2io_card_up failed in change_mtu *
+ */
+ if (!is_s2io_card_up(sp))
+ return 0;
+
+ s2io_stop_all_tx_queue(sp);
+ /* delete all populated mac entries */
+ for (offset = 1; offset < config->max_mc_addr; offset++) {
+ tmp64 = do_s2io_read_unicast_mc(sp, offset);
+ if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
+ do_s2io_delete_unicast_mc(sp, tmp64);
+ }
+
+ s2io_card_down(sp);
+
+ return 0;
+}
+
+/**
+ * s2io_xmit - Tx entry point of te driver
+ * @skb : the socket buffer containing the Tx data.
+ * @dev : device pointer.
+ * Description :
+ * This function is the Tx entry point of the driver. S2IO NIC supports
+ * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
+ * NOTE: when device can't queue the pkt,just the trans_start variable will
+ * not be upadted.
+ * Return value:
+ * 0 on success & 1 on failure.
+ */
+
+static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct s2io_nic *sp = netdev_priv(dev);
+ u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
+ register u64 val64;
+ struct TxD *txdp;
+ struct TxFIFO_element __iomem *tx_fifo;
+ unsigned long flags = 0;
+ u16 vlan_tag = 0;
+ struct fifo_info *fifo = NULL;
+ int do_spin_lock = 1;
+ int offload_type;
+ int enable_per_list_interrupt = 0;
+ struct config_param *config = &sp->config;
+ struct mac_info *mac_control = &sp->mac_control;
+ struct stat_block *stats = mac_control->stats_info;
+ struct swStat *swstats = &stats->sw_stat;
+
+ DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
+
+ if (unlikely(skb->len <= 0)) {
+ DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (!is_s2io_card_up(sp)) {
+ DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
+ dev->name);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ queue = 0;
+ if (vlan_tx_tag_present(skb))
+ vlan_tag = vlan_tx_tag_get(skb);
+ if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *ip;
+ struct tcphdr *th;
+ ip = ip_hdr(skb);
+
+ if (!ip_is_fragment(ip)) {
+ th = (struct tcphdr *)(((unsigned char *)ip) +
+ ip->ihl*4);
+
+ if (ip->protocol == IPPROTO_TCP) {
+ queue_len = sp->total_tcp_fifos;
+ queue = (ntohs(th->source) +
+ ntohs(th->dest)) &
+ sp->fifo_selector[queue_len - 1];
+ if (queue >= queue_len)
+ queue = queue_len - 1;
+ } else if (ip->protocol == IPPROTO_UDP) {
+ queue_len = sp->total_udp_fifos;
+ queue = (ntohs(th->source) +
+ ntohs(th->dest)) &
+ sp->fifo_selector[queue_len - 1];
+ if (queue >= queue_len)
+ queue = queue_len - 1;
+ queue += sp->udp_fifo_idx;
+ if (skb->len > 1024)
+ enable_per_list_interrupt = 1;
+ do_spin_lock = 0;
+ }
+ }
+ }
+ } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
+ /* get fifo number based on skb->priority value */
+ queue = config->fifo_mapping
+ [skb->priority & (MAX_TX_FIFOS - 1)];
+ fifo = &mac_control->fifos[queue];
+
+ if (do_spin_lock)
+ spin_lock_irqsave(&fifo->tx_lock, flags);
+ else {
+ if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
+ return NETDEV_TX_LOCKED;
+ }
+
+ if (sp->config.multiq) {
+ if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
+ spin_unlock_irqrestore(&fifo->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+ } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
+ if (netif_queue_stopped(dev)) {
+ spin_unlock_irqrestore(&fifo->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ put_off = (u16)fifo->tx_curr_put_info.offset;
+ get_off = (u16)fifo->tx_curr_get_info.offset;
+ txdp = fifo->list_info[put_off].list_virt_addr;
+
+ queue_len = fifo->tx_curr_put_info.fifo_len + 1;
+ /* Avoid "put" pointer going beyond "get" pointer */
+ if (txdp->Host_Control ||
+ ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
+ DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
+ s2io_stop_tx_queue(sp, fifo->fifo_no);
+ dev_kfree_skb(skb);
+ spin_unlock_irqrestore(&fifo->tx_lock, flags);
+ return NETDEV_TX_OK;
+ }
+
+ offload_type = s2io_offload_type(skb);
+ if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ txdp->Control_1 |= TXD_TCP_LSO_EN;
+ txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
+ }
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
+ TXD_TX_CKO_TCP_EN |
+ TXD_TX_CKO_UDP_EN);
+ }
+ txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
+ txdp->Control_1 |= TXD_LIST_OWN_XENA;
+ txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
+ if (enable_per_list_interrupt)
+ if (put_off & (queue_len >> 5))
+ txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
+ if (vlan_tag) {
+ txdp->Control_2 |= TXD_VLAN_ENABLE;
+ txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
+ }
+
+ frg_len = skb_headlen(skb);
+ if (offload_type == SKB_GSO_UDP) {
+ int ufo_size;
+
+ ufo_size = s2io_udp_mss(skb);
+ ufo_size &= ~7;
+ txdp->Control_1 |= TXD_UFO_EN;
+ txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
+ txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
+#ifdef __BIG_ENDIAN
+ /* both variants do cpu_to_be64(be32_to_cpu(...)) */
+ fifo->ufo_in_band_v[put_off] =
+ (__force u64)skb_shinfo(skb)->ip6_frag_id;
+#else
+ fifo->ufo_in_band_v[put_off] =
+ (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
+#endif
+ txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
+ txdp->Buffer_Pointer = pci_map_single(sp->pdev,
+ fifo->ufo_in_band_v,
+ sizeof(u64),
+ PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
+ goto pci_map_failed;
+ txdp++;
+ }
+
+ txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
+ frg_len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
+ goto pci_map_failed;
+
+ txdp->Host_Control = (unsigned long)skb;
+ txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
+ if (offload_type == SKB_GSO_UDP)
+ txdp->Control_1 |= TXD_UFO_EN;
+
+ frg_cnt = skb_shinfo(skb)->nr_frags;
+ /* For fragmented SKB. */
+ for (i = 0; i < frg_cnt; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ /* A '0' length fragment will be ignored */
+ if (!frag->size)
+ continue;
+ txdp++;
+ txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
+ frag, 0,
+ frag->size,
+ DMA_TO_DEVICE);
+ txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
+ if (offload_type == SKB_GSO_UDP)
+ txdp->Control_1 |= TXD_UFO_EN;
+ }
+ txdp->Control_1 |= TXD_GATHER_CODE_LAST;
+
+ if (offload_type == SKB_GSO_UDP)
+ frg_cnt++; /* as Txd0 was used for inband header */
+
+ tx_fifo = mac_control->tx_FIFO_start[queue];
+ val64 = fifo->list_info[put_off].list_phy_addr;
+ writeq(val64, &tx_fifo->TxDL_Pointer);
+
+ val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
+ TX_FIFO_LAST_LIST);
+ if (offload_type)
+ val64 |= TX_FIFO_SPECIAL_FUNC;
+
+ writeq(val64, &tx_fifo->List_Control);
+
+ mmiowb();
+
+ put_off++;
+ if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
+ put_off = 0;
+ fifo->tx_curr_put_info.offset = put_off;
+
+ /* Avoid "put" pointer going beyond "get" pointer */
+ if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
+ swstats->fifo_full_cnt++;
+ DBG_PRINT(TX_DBG,
+ "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
+ put_off, get_off);
+ s2io_stop_tx_queue(sp, fifo->fifo_no);
+ }
+ swstats->mem_allocated += skb->truesize;
+ spin_unlock_irqrestore(&fifo->tx_lock, flags);
+
+ if (sp->config.intr_type == MSI_X)
+ tx_intr_handler(fifo);
+
+ return NETDEV_TX_OK;
+
+pci_map_failed:
+ swstats->pci_map_fail_cnt++;
+ s2io_stop_tx_queue(sp, fifo->fifo_no);
+ swstats->mem_freed += skb->truesize;
+ dev_kfree_skb(skb);
+ spin_unlock_irqrestore(&fifo->tx_lock, flags);
+ return NETDEV_TX_OK;
+}
+
+static void
+s2io_alarm_handle(unsigned long data)
+{
+ struct s2io_nic *sp = (struct s2io_nic *)data;
+ struct net_device *dev = sp->dev;
+
+ s2io_handle_errors(dev);
+ mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
+}
+
+static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
+{
+ struct ring_info *ring = (struct ring_info *)dev_id;
+ struct s2io_nic *sp = ring->nic;
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+
+ if (unlikely(!is_s2io_card_up(sp)))
+ return IRQ_HANDLED;
+
+ if (sp->config.napi) {
+ u8 __iomem *addr = NULL;
+ u8 val8 = 0;
+
+ addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
+ addr += (7 - ring->ring_no);
+ val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
+ writeb(val8, addr);
+ val8 = readb(addr);
+ napi_schedule(&ring->napi);
+ } else {
+ rx_intr_handler(ring, 0);
+ s2io_chk_rx_buffers(sp, ring);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
+{
+ int i;
+ struct fifo_info *fifos = (struct fifo_info *)dev_id;
+ struct s2io_nic *sp = fifos->nic;
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ struct config_param *config = &sp->config;
+ u64 reason;
+
+ if (unlikely(!is_s2io_card_up(sp)))
+ return IRQ_NONE;
+
+ reason = readq(&bar0->general_int_status);
+ if (unlikely(reason == S2IO_MINUS_ONE))
+ /* Nothing much can be done. Get out */
+ return IRQ_HANDLED;
+
+ if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
+ writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
+
+ if (reason & GEN_INTR_TXPIC)
+ s2io_txpic_intr_handle(sp);
+
+ if (reason & GEN_INTR_TXTRAFFIC)
+ writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
+
+ for (i = 0; i < config->tx_fifo_num; i++)
+ tx_intr_handler(&fifos[i]);
+
+ writeq(sp->general_int_mask, &bar0->general_int_mask);
+ readl(&bar0->general_int_status);
+ return IRQ_HANDLED;
+ }
+ /* The interrupt was not raised by us */
+ return IRQ_NONE;
+}
+
+static void s2io_txpic_intr_handle(struct s2io_nic *sp)
+{
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ u64 val64;
+
+ val64 = readq(&bar0->pic_int_status);
+ if (val64 & PIC_INT_GPIO) {
+ val64 = readq(&bar0->gpio_int_reg);
+ if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
+ (val64 & GPIO_INT_REG_LINK_UP)) {
+ /*
+ * This is unstable state so clear both up/down
+ * interrupt and adapter to re-evaluate the link state.
+ */
+ val64 |= GPIO_INT_REG_LINK_DOWN;
+ val64 |= GPIO_INT_REG_LINK_UP;
+ writeq(val64, &bar0->gpio_int_reg);
+ val64 = readq(&bar0->gpio_int_mask);
+ val64 &= ~(GPIO_INT_MASK_LINK_UP |
+ GPIO_INT_MASK_LINK_DOWN);
+ writeq(val64, &bar0->gpio_int_mask);
+ } else if (val64 & GPIO_INT_REG_LINK_UP) {
+ val64 = readq(&bar0->adapter_status);
+ /* Enable Adapter */
+ val64 = readq(&bar0->adapter_control);
+ val64 |= ADAPTER_CNTL_EN;
+ writeq(val64, &bar0->adapter_control);
+ val64 |= ADAPTER_LED_ON;
+ writeq(val64, &bar0->adapter_control);
+ if (!sp->device_enabled_once)
+ sp->device_enabled_once = 1;
+
+ s2io_link(sp, LINK_UP);
+ /*
+ * unmask link down interrupt and mask link-up
+ * intr
+ */
+ val64 = readq(&bar0->gpio_int_mask);
+ val64 &= ~GPIO_INT_MASK_LINK_DOWN;
+ val64 |= GPIO_INT_MASK_LINK_UP;
+ writeq(val64, &bar0->gpio_int_mask);
+
+ } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
+ val64 = readq(&bar0->adapter_status);
+ s2io_link(sp, LINK_DOWN);
+ /* Link is down so unmaks link up interrupt */
+ val64 = readq(&bar0->gpio_int_mask);
+ val64 &= ~GPIO_INT_MASK_LINK_UP;
+ val64 |= GPIO_INT_MASK_LINK_DOWN;
+ writeq(val64, &bar0->gpio_int_mask);
+
+ /* turn off LED */
+ val64 = readq(&bar0->adapter_control);
+ val64 = val64 & (~ADAPTER_LED_ON);
+ writeq(val64, &bar0->adapter_control);
+ }
+ }
+ val64 = readq(&bar0->gpio_int_mask);
+}
+
+/**
+ * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
+ * @value: alarm bits
+ * @addr: address value
+ * @cnt: counter variable
+ * Description: Check for alarm and increment the counter
+ * Return Value:
+ * 1 - if alarm bit set
+ * 0 - if alarm bit is not set
+ */
+static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
+ unsigned long long *cnt)
+{
+ u64 val64;
+ val64 = readq(addr);
+ if (val64 & value) {
+ writeq(val64, addr);
+ (*cnt)++;
+ return 1;
+ }
+ return 0;
+
+}
+
+/**
+ * s2io_handle_errors - Xframe error indication handler
+ * @nic: device private variable
+ * Description: Handle alarms such as loss of link, single or
+ * double ECC errors, critical and serious errors.
+ * Return Value:
+ * NONE
+ */
+static void s2io_handle_errors(void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct s2io_nic *sp = netdev_priv(dev);
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ u64 temp64 = 0, val64 = 0;
+ int i = 0;
+
+ struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
+ struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
+
+ if (!is_s2io_card_up(sp))
+ return;
+
+ if (pci_channel_offline(sp->pdev))
+ return;
+
+ memset(&sw_stat->ring_full_cnt, 0,
+ sizeof(sw_stat->ring_full_cnt));
+
+ /* Handling the XPAK counters update */
+ if (stats->xpak_timer_count < 72000) {
+ /* waiting for an hour */
+ stats->xpak_timer_count++;
+ } else {
+ s2io_updt_xpak_counter(dev);
+ /* reset the count to zero */
+ stats->xpak_timer_count = 0;
+ }
+
+ /* Handling link status change error Intr */
+ if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
+ val64 = readq(&bar0->mac_rmac_err_reg);
+ writeq(val64, &bar0->mac_rmac_err_reg);
+ if (val64 & RMAC_LINK_STATE_CHANGE_INT)
+ schedule_work(&sp->set_link_task);
+ }
+
+ /* In case of a serious error, the device will be Reset. */
+ if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
+ &sw_stat->serious_err_cnt))
+ goto reset;
+
+ /* Check for data parity error */
+ if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
+ &sw_stat->parity_err_cnt))
+ goto reset;
+
+ /* Check for ring full counter */
+ if (sp->device_type == XFRAME_II_DEVICE) {
+ val64 = readq(&bar0->ring_bump_counter1);
+ for (i = 0; i < 4; i++) {
+ temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
+ temp64 >>= 64 - ((i+1)*16);
+ sw_stat->ring_full_cnt[i] += temp64;
+ }
+
+ val64 = readq(&bar0->ring_bump_counter2);
+ for (i = 0; i < 4; i++) {
+ temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
+ temp64 >>= 64 - ((i+1)*16);
+ sw_stat->ring_full_cnt[i+4] += temp64;
+ }
+ }
+
+ val64 = readq(&bar0->txdma_int_status);
+ /*check for pfc_err*/
+ if (val64 & TXDMA_PFC_INT) {
+ if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
+ PFC_MISC_0_ERR | PFC_MISC_1_ERR |
+ PFC_PCIX_ERR,
+ &bar0->pfc_err_reg,
+ &sw_stat->pfc_err_cnt))
+ goto reset;
+ do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
+ &bar0->pfc_err_reg,
+ &sw_stat->pfc_err_cnt);
+ }
+
+ /*check for tda_err*/
+ if (val64 & TXDMA_TDA_INT) {
+ if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
+ TDA_SM0_ERR_ALARM |
+ TDA_SM1_ERR_ALARM,
+ &bar0->tda_err_reg,
+ &sw_stat->tda_err_cnt))
+ goto reset;
+ do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
+ &bar0->tda_err_reg,
+ &sw_stat->tda_err_cnt);
+ }
+ /*check for pcc_err*/
+ if (val64 & TXDMA_PCC_INT) {
+ if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
+ PCC_N_SERR | PCC_6_COF_OV_ERR |
+ PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
+ PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
+ PCC_TXB_ECC_DB_ERR,
+ &bar0->pcc_err_reg,
+ &sw_stat->pcc_err_cnt))
+ goto reset;
+ do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
+ &bar0->pcc_err_reg,
+ &sw_stat->pcc_err_cnt);
+ }
+
+ /*check for tti_err*/
+ if (val64 & TXDMA_TTI_INT) {
+ if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
+ &bar0->tti_err_reg,
+ &sw_stat->tti_err_cnt))
+ goto reset;
+ do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
+ &bar0->tti_err_reg,
+ &sw_stat->tti_err_cnt);
+ }
+
+ /*check for lso_err*/
+ if (val64 & TXDMA_LSO_INT) {
+ if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
+ LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
+ &bar0->lso_err_reg,
+ &sw_stat->lso_err_cnt))
+ goto reset;
+ do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
+ &bar0->lso_err_reg,
+ &sw_stat->lso_err_cnt);
+ }
+
+ /*check for tpa_err*/
+ if (val64 & TXDMA_TPA_INT) {
+ if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
+ &bar0->tpa_err_reg,
+ &sw_stat->tpa_err_cnt))
+ goto reset;
+ do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
+ &bar0->tpa_err_reg,
+ &sw_stat->tpa_err_cnt);
+ }
+
+ /*check for sm_err*/
+ if (val64 & TXDMA_SM_INT) {
+ if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
+ &bar0->sm_err_reg,
+ &sw_stat->sm_err_cnt))
+ goto reset;
+ }
+
+ val64 = readq(&bar0->mac_int_status);
+ if (val64 & MAC_INT_STATUS_TMAC_INT) {
+ if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
+ &bar0->mac_tmac_err_reg,
+ &sw_stat->mac_tmac_err_cnt))
+ goto reset;
+ do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
+ TMAC_DESC_ECC_SG_ERR |
+ TMAC_DESC_ECC_DB_ERR,
+ &bar0->mac_tmac_err_reg,
+ &sw_stat->mac_tmac_err_cnt);
+ }
+
+ val64 = readq(&bar0->xgxs_int_status);
+ if (val64 & XGXS_INT_STATUS_TXGXS) {
+ if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
+ &bar0->xgxs_txgxs_err_reg,
+ &sw_stat->xgxs_txgxs_err_cnt))
+ goto reset;
+ do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
+ &bar0->xgxs_txgxs_err_reg,
+ &sw_stat->xgxs_txgxs_err_cnt);
+ }
+
+ val64 = readq(&bar0->rxdma_int_status);
+ if (val64 & RXDMA_INT_RC_INT_M) {
+ if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
+ RC_FTC_ECC_DB_ERR |
+ RC_PRCn_SM_ERR_ALARM |
+ RC_FTC_SM_ERR_ALARM,
+ &bar0->rc_err_reg,
+ &sw_stat->rc_err_cnt))
+ goto reset;
+ do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
+ RC_FTC_ECC_SG_ERR |
+ RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
+ &sw_stat->rc_err_cnt);
+ if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
+ PRC_PCI_AB_WR_Rn |
+ PRC_PCI_AB_F_WR_Rn,
+ &bar0->prc_pcix_err_reg,
+ &sw_stat->prc_pcix_err_cnt))
+ goto reset;
+ do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
+ PRC_PCI_DP_WR_Rn |
+ PRC_PCI_DP_F_WR_Rn,
+ &bar0->prc_pcix_err_reg,
+ &sw_stat->prc_pcix_err_cnt);
+ }
+
+ if (val64 & RXDMA_INT_RPA_INT_M) {
+ if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
+ &bar0->rpa_err_reg,
+ &sw_stat->rpa_err_cnt))
+ goto reset;
+ do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
+ &bar0->rpa_err_reg,
+ &sw_stat->rpa_err_cnt);
+ }
+
+ if (val64 & RXDMA_INT_RDA_INT_M) {
+ if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
+ RDA_FRM_ECC_DB_N_AERR |
+ RDA_SM1_ERR_ALARM |
+ RDA_SM0_ERR_ALARM |
+ RDA_RXD_ECC_DB_SERR,
+ &bar0->rda_err_reg,
+ &sw_stat->rda_err_cnt))
+ goto reset;
+ do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
+ RDA_FRM_ECC_SG_ERR |
+ RDA_MISC_ERR |
+ RDA_PCIX_ERR,
+ &bar0->rda_err_reg,
+ &sw_stat->rda_err_cnt);
+ }
+
+ if (val64 & RXDMA_INT_RTI_INT_M) {
+ if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
+ &bar0->rti_err_reg,
+ &sw_stat->rti_err_cnt))
+ goto reset;
+ do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
+ &bar0->rti_err_reg,
+ &sw_stat->rti_err_cnt);
+ }
+
+ val64 = readq(&bar0->mac_int_status);
+ if (val64 & MAC_INT_STATUS_RMAC_INT) {
+ if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
+ &bar0->mac_rmac_err_reg,
+ &sw_stat->mac_rmac_err_cnt))
+ goto reset;
+ do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
+ RMAC_SINGLE_ECC_ERR |
+ RMAC_DOUBLE_ECC_ERR,
+ &bar0->mac_rmac_err_reg,
+ &sw_stat->mac_rmac_err_cnt);
+ }
+
+ val64 = readq(&bar0->xgxs_int_status);
+ if (val64 & XGXS_INT_STATUS_RXGXS) {
+ if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
+ &bar0->xgxs_rxgxs_err_reg,
+ &sw_stat->xgxs_rxgxs_err_cnt))
+ goto reset;
+ }
+
+ val64 = readq(&bar0->mc_int_status);
+ if (val64 & MC_INT_STATUS_MC_INT) {
+ if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
+ &bar0->mc_err_reg,
+ &sw_stat->mc_err_cnt))
+ goto reset;
+
+ /* Handling Ecc errors */
+ if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
+ writeq(val64, &bar0->mc_err_reg);
+ if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
+ sw_stat->double_ecc_errs++;
+ if (sp->device_type != XFRAME_II_DEVICE) {
+ /*
+ * Reset XframeI only if critical error
+ */
+ if (val64 &
+ (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
+ MC_ERR_REG_MIRI_ECC_DB_ERR_1))
+ goto reset;
+ }
+ } else
+ sw_stat->single_ecc_errs++;
+ }
+ }
+ return;
+
+reset:
+ s2io_stop_all_tx_queue(sp);
+ schedule_work(&sp->rst_timer_task);
+ sw_stat->soft_reset_cnt++;
+}
+
+/**
+ * s2io_isr - ISR handler of the device .
+ * @irq: the irq of the device.
+ * @dev_id: a void pointer to the dev structure of the NIC.
+ * Description: This function is the ISR handler of the device. It
+ * identifies the reason for the interrupt and calls the relevant
+ * service routines. As a contongency measure, this ISR allocates the
+ * recv buffers, if their numbers are below the panic value which is
+ * presently set to 25% of the original number of rcv buffers allocated.
+ * Return value:
+ * IRQ_HANDLED: will be returned if IRQ was handled by this routine
+ * IRQ_NONE: will be returned if interrupt is not from our device
+ */
+static irqreturn_t s2io_isr(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct s2io_nic *sp = netdev_priv(dev);
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ int i;
+ u64 reason = 0;
+ struct mac_info *mac_control;
+ struct config_param *config;
+
+ /* Pretend we handled any irq's from a disconnected card */
+ if (pci_channel_offline(sp->pdev))
+ return IRQ_NONE;
+
+ if (!is_s2io_card_up(sp))
+ return IRQ_NONE;
+
+ config = &sp->config;
+ mac_control = &sp->mac_control;
+
+ /*
+ * Identify the cause for interrupt and call the appropriate
+ * interrupt handler. Causes for the interrupt could be;
+ * 1. Rx of packet.
+ * 2. Tx complete.
+ * 3. Link down.
+ */
+ reason = readq(&bar0->general_int_status);
+
+ if (unlikely(reason == S2IO_MINUS_ONE))
+ return IRQ_HANDLED; /* Nothing much can be done. Get out */
+
+ if (reason &
+ (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
+ writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
+
+ if (config->napi) {
+ if (reason & GEN_INTR_RXTRAFFIC) {
+ napi_schedule(&sp->napi);
+ writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
+ writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
+ readl(&bar0->rx_traffic_int);
+ }
+ } else {
+ /*
+ * rx_traffic_int reg is an R1 register, writing all 1's
+ * will ensure that the actual interrupt causing bit
+ * get's cleared and hence a read can be avoided.
+ */
+ if (reason & GEN_INTR_RXTRAFFIC)
+ writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
+
+ for (i = 0; i < config->rx_ring_num; i++) {
+ struct ring_info *ring = &mac_control->rings[i];
+
+ rx_intr_handler(ring, 0);
+ }
+ }
+
+ /*
+ * tx_traffic_int reg is an R1 register, writing all 1's
+ * will ensure that the actual interrupt causing bit get's
+ * cleared and hence a read can be avoided.
+ */
+ if (reason & GEN_INTR_TXTRAFFIC)
+ writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
+
+ for (i = 0; i < config->tx_fifo_num; i++)
+ tx_intr_handler(&mac_control->fifos[i]);
+
+ if (reason & GEN_INTR_TXPIC)
+ s2io_txpic_intr_handle(sp);
+
+ /*
+ * Reallocate the buffers from the interrupt handler itself.
+ */
+ if (!config->napi) {
+ for (i = 0; i < config->rx_ring_num; i++) {
+ struct ring_info *ring = &mac_control->rings[i];
+
+ s2io_chk_rx_buffers(sp, ring);
+ }
+ }
+ writeq(sp->general_int_mask, &bar0->general_int_mask);
+ readl(&bar0->general_int_status);
+
+ return IRQ_HANDLED;
+
+ } else if (!reason) {
+ /* The interrupt was not raised by us */
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * s2io_updt_stats -
+ */
+static void s2io_updt_stats(struct s2io_nic *sp)
+{
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ u64 val64;
+ int cnt = 0;
+
+ if (is_s2io_card_up(sp)) {
+ /* Apprx 30us on a 133 MHz bus */
+ val64 = SET_UPDT_CLICKS(10) |
+ STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
+ writeq(val64, &bar0->stat_cfg);
+ do {
+ udelay(100);
+ val64 = readq(&bar0->stat_cfg);
+ if (!(val64 & s2BIT(0)))
+ break;
+ cnt++;
+ if (cnt == 5)
+ break; /* Updt failed */
+ } while (1);
+ }
+}
+
+/**
+ * s2io_get_stats - Updates the device statistics structure.
+ * @dev : pointer to the device structure.
+ * Description:
+ * This function updates the device statistics structure in the s2io_nic
+ * structure and returns a pointer to the same.
+ * Return value:
+ * pointer to the updated net_device_stats structure.
+ */
+static struct net_device_stats *s2io_get_stats(struct net_device *dev)
+{
+ struct s2io_nic *sp = netdev_priv(dev);
+ struct mac_info *mac_control = &sp->mac_control;
+ struct stat_block *stats = mac_control->stats_info;
+ u64 delta;
+
+ /* Configure Stats for immediate updt */
+ s2io_updt_stats(sp);
+
+ /* A device reset will cause the on-adapter statistics to be zero'ed.
+ * This can be done while running by changing the MTU. To prevent the
+ * system from having the stats zero'ed, the driver keeps a copy of the
+ * last update to the system (which is also zero'ed on reset). This
+ * enables the driver to accurately know the delta between the last
+ * update and the current update.
+ */
+ delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
+ le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
+ sp->stats.rx_packets += delta;
+ dev->stats.rx_packets += delta;
+
+ delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
+ le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
+ sp->stats.tx_packets += delta;
+ dev->stats.tx_packets += delta;
+
+ delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
+ le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
+ sp->stats.rx_bytes += delta;
+ dev->stats.rx_bytes += delta;
+
+ delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
+ le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
+ sp->stats.tx_bytes += delta;
+ dev->stats.tx_bytes += delta;
+
+ delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
+ sp->stats.rx_errors += delta;
+ dev->stats.rx_errors += delta;
+
+ delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
+ le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
+ sp->stats.tx_errors += delta;
+ dev->stats.tx_errors += delta;
+
+ delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
+ sp->stats.rx_dropped += delta;
+ dev->stats.rx_dropped += delta;
+
+ delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
+ sp->stats.tx_dropped += delta;
+ dev->stats.tx_dropped += delta;
+
+ /* The adapter MAC interprets pause frames as multicast packets, but
+ * does not pass them up. This erroneously increases the multicast
+ * packet count and needs to be deducted when the multicast frame count
+ * is queried.
+ */
+ delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
+ le32_to_cpu(stats->rmac_vld_mcst_frms);
+ delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
+ delta -= sp->stats.multicast;
+ sp->stats.multicast += delta;
+ dev->stats.multicast += delta;
+
+ delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
+ le32_to_cpu(stats->rmac_usized_frms)) +
+ le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
+ sp->stats.rx_length_errors += delta;
+ dev->stats.rx_length_errors += delta;
+
+ delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
+ sp->stats.rx_crc_errors += delta;
+ dev->stats.rx_crc_errors += delta;
+
+ return &dev->stats;
+}
+
+/**
+ * s2io_set_multicast - entry point for multicast address enable/disable.
+ * @dev : pointer to the device structure
+ * Description:
+ * This function is a driver entry point which gets called by the kernel
+ * whenever multicast addresses must be enabled/disabled. This also gets
+ * called to set/reset promiscuous mode. Depending on the deivce flag, we
+ * determine, if multicast address must be enabled or if promiscuous mode
+ * is to be disabled etc.
+ * Return value:
+ * void.
+ */
+
+static void s2io_set_multicast(struct net_device *dev)
+{
+ int i, j, prev_cnt;
+ struct netdev_hw_addr *ha;
+ struct s2io_nic *sp = netdev_priv(dev);
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
+ 0xfeffffffffffULL;
+ u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
+ void __iomem *add;
+ struct config_param *config = &sp->config;
+
+ if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
+ /* Enable all Multicast addresses */
+ writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
+ &bar0->rmac_addr_data0_mem);
+ writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
+ &bar0->rmac_addr_data1_mem);
+ val64 = RMAC_ADDR_CMD_MEM_WE |
+ RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
+ RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
+ writeq(val64, &bar0->rmac_addr_cmd_mem);
+ /* Wait till command completes */
+ wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
+ S2IO_BIT_RESET);
+
+ sp->m_cast_flg = 1;
+ sp->all_multi_pos = config->max_mc_addr - 1;
+ } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
+ /* Disable all Multicast addresses */
+ writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
+ &bar0->rmac_addr_data0_mem);
+ writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
+ &bar0->rmac_addr_data1_mem);
+ val64 = RMAC_ADDR_CMD_MEM_WE |
+ RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
+ RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
+ writeq(val64, &bar0->rmac_addr_cmd_mem);
+ /* Wait till command completes */
+ wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
+ S2IO_BIT_RESET);
+
+ sp->m_cast_flg = 0;
+ sp->all_multi_pos = 0;
+ }
+
+ if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
+ /* Put the NIC into promiscuous mode */
+ add = &bar0->mac_cfg;
+ val64 = readq(&bar0->mac_cfg);
+ val64 |= MAC_CFG_RMAC_PROM_ENABLE;
+
+ writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
+ writel((u32)val64, add);
+ writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
+ writel((u32) (val64 >> 32), (add + 4));
+
+ if (vlan_tag_strip != 1) {
+ val64 = readq(&bar0->rx_pa_cfg);
+ val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
+ writeq(val64, &bar0->rx_pa_cfg);
+ sp->vlan_strip_flag = 0;
+ }
+
+ val64 = readq(&bar0->mac_cfg);
+ sp->promisc_flg = 1;
+ DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
+ dev->name);
+ } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
+ /* Remove the NIC from promiscuous mode */
+ add = &bar0->mac_cfg;
+ val64 = readq(&bar0->mac_cfg);
+ val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
+
+ writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
+ writel((u32)val64, add);
+ writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
+ writel((u32) (val64 >> 32), (add + 4));
+
+ if (vlan_tag_strip != 0) {
+ val64 = readq(&bar0->rx_pa_cfg);
+ val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
+ writeq(val64, &bar0->rx_pa_cfg);
+ sp->vlan_strip_flag = 1;
+ }
+
+ val64 = readq(&bar0->mac_cfg);
+ sp->promisc_flg = 0;
+ DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
+ }
+
+ /* Update individual M_CAST address list */
+ if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
+ if (netdev_mc_count(dev) >
+ (config->max_mc_addr - config->max_mac_addr)) {
+ DBG_PRINT(ERR_DBG,
+ "%s: No more Rx filters can be added - "
+ "please enable ALL_MULTI instead\n",
+ dev->name);
+ return;
+ }
+
+ prev_cnt = sp->mc_addr_count;
+ sp->mc_addr_count = netdev_mc_count(dev);
+
+ /* Clear out the previous list of Mc in the H/W. */
+ for (i = 0; i < prev_cnt; i++) {
+ writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
+ &bar0->rmac_addr_data0_mem);
+ writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
+ &bar0->rmac_addr_data1_mem);
+ val64 = RMAC_ADDR_CMD_MEM_WE |
+ RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
+ RMAC_ADDR_CMD_MEM_OFFSET
+ (config->mc_start_offset + i);
+ writeq(val64, &bar0->rmac_addr_cmd_mem);
+
+ /* Wait for command completes */
+ if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
+ S2IO_BIT_RESET)) {
+ DBG_PRINT(ERR_DBG,
+ "%s: Adding Multicasts failed\n",
+ dev->name);
+ return;
+ }
+ }
+
+ /* Create the new Rx filter list and update the same in H/W. */
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ mac_addr = 0;
+ for (j = 0; j < ETH_ALEN; j++) {
+ mac_addr |= ha->addr[j];
+ mac_addr <<= 8;
+ }
+ mac_addr >>= 8;
+ writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
+ &bar0->rmac_addr_data0_mem);
+ writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
+ &bar0->rmac_addr_data1_mem);
+ val64 = RMAC_ADDR_CMD_MEM_WE |
+ RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
+ RMAC_ADDR_CMD_MEM_OFFSET
+ (i + config->mc_start_offset);
+ writeq(val64, &bar0->rmac_addr_cmd_mem);
+
+ /* Wait for command completes */
+ if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
+ S2IO_BIT_RESET)) {
+ DBG_PRINT(ERR_DBG,
+ "%s: Adding Multicasts failed\n",
+ dev->name);
+ return;
+ }
+ i++;
+ }
+ }
+}
+
+/* read from CAM unicast & multicast addresses and store it in
+ * def_mac_addr structure
+ */
+static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
+{
+ int offset;
+ u64 mac_addr = 0x0;
+ struct config_param *config = &sp->config;
+
+ /* store unicast & multicast mac addresses */
+ for (offset = 0; offset < config->max_mc_addr; offset++) {
+ mac_addr = do_s2io_read_unicast_mc(sp, offset);
+ /* if read fails disable the entry */
+ if (mac_addr == FAILURE)
+ mac_addr = S2IO_DISABLE_MAC_ENTRY;
+ do_s2io_copy_mac_addr(sp, offset, mac_addr);
+ }
+}
+
+/* restore unicast & multicast MAC to CAM from def_mac_addr structure */
+static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
+{
+ int offset;
+ struct config_param *config = &sp->config;
+ /* restore unicast mac address */
+ for (offset = 0; offset < config->max_mac_addr; offset++)
+ do_s2io_prog_unicast(sp->dev,
+ sp->def_mac_addr[offset].mac_addr);
+
+ /* restore multicast mac address */
+ for (offset = config->mc_start_offset;
+ offset < config->max_mc_addr; offset++)
+ do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
+}
+
+/* add a multicast MAC address to CAM */
+static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
+{
+ int i;
+ u64 mac_addr = 0;
+ struct config_param *config = &sp->config;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac_addr <<= 8;
+ mac_addr |= addr[i];
+ }
+ if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
+ return SUCCESS;
+
+ /* check if the multicast mac already preset in CAM */
+ for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
+ u64 tmp64;
+ tmp64 = do_s2io_read_unicast_mc(sp, i);
+ if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
+ break;
+
+ if (tmp64 == mac_addr)
+ return SUCCESS;
+ }
+ if (i == config->max_mc_addr) {
+ DBG_PRINT(ERR_DBG,
+ "CAM full no space left for multicast MAC\n");
+ return FAILURE;
+ }
+ /* Update the internal structure with this new mac address */
+ do_s2io_copy_mac_addr(sp, i, mac_addr);
+
+ return do_s2io_add_mac(sp, mac_addr, i);
+}
+
+/* add MAC address to CAM */
+static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
+{
+ u64 val64;
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+
+ writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
+ &bar0->rmac_addr_data0_mem);
+
+ val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
+ RMAC_ADDR_CMD_MEM_OFFSET(off);
+ writeq(val64, &bar0->rmac_addr_cmd_mem);
+
+ /* Wait till command completes */
+ if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
+ S2IO_BIT_RESET)) {
+ DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
+ return FAILURE;
+ }
+ return SUCCESS;
+}
+/* deletes a specified unicast/multicast mac entry from CAM */
+static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
+{
+ int offset;
+ u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
+ struct config_param *config = &sp->config;
+
+ for (offset = 1;
+ offset < config->max_mc_addr; offset++) {
+ tmp64 = do_s2io_read_unicast_mc(sp, offset);
+ if (tmp64 == addr) {
+ /* disable the entry by writing 0xffffffffffffULL */
+ if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
+ return FAILURE;
+ /* store the new mac list from CAM */
+ do_s2io_store_unicast_mc(sp);
+ return SUCCESS;
+ }
+ }
+ DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
+ (unsigned long long)addr);
+ return FAILURE;
+}
+
+/* read mac entries from CAM */
+static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
+{
+ u64 tmp64 = 0xffffffffffff0000ULL, val64;
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+
+ /* read mac addr */
+ val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
+ RMAC_ADDR_CMD_MEM_OFFSET(offset);
+ writeq(val64, &bar0->rmac_addr_cmd_mem);
+
+ /* Wait till command completes */
+ if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
+ S2IO_BIT_RESET)) {
+ DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
+ return FAILURE;
+ }
+ tmp64 = readq(&bar0->rmac_addr_data0_mem);
+
+ return tmp64 >> 16;
+}
+
+/**
+ * s2io_set_mac_addr driver entry point
+ */
+
+static int s2io_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ /* store the MAC address in CAM */
+ return do_s2io_prog_unicast(dev, dev->dev_addr);
+}
+/**
+ * do_s2io_prog_unicast - Programs the Xframe mac address
+ * @dev : pointer to the device structure.
+ * @addr: a uchar pointer to the new mac address which is to be set.
+ * Description : This procedure will program the Xframe to receive
+ * frames with new Mac Address
+ * Return value: SUCCESS on success and an appropriate (-)ve integer
+ * as defined in errno.h file on failure.
+ */
+
+static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
+{
+ struct s2io_nic *sp = netdev_priv(dev);
+ register u64 mac_addr = 0, perm_addr = 0;
+ int i;
+ u64 tmp64;
+ struct config_param *config = &sp->config;
+
+ /*
+ * Set the new MAC address as the new unicast filter and reflect this
+ * change on the device address registered with the OS. It will be
+ * at offset 0.
+ */
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac_addr <<= 8;
+ mac_addr |= addr[i];
+ perm_addr <<= 8;
+ perm_addr |= sp->def_mac_addr[0].mac_addr[i];
+ }
+
+ /* check if the dev_addr is different than perm_addr */
+ if (mac_addr == perm_addr)
+ return SUCCESS;
+
+ /* check if the mac already preset in CAM */
+ for (i = 1; i < config->max_mac_addr; i++) {
+ tmp64 = do_s2io_read_unicast_mc(sp, i);
+ if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
+ break;
+
+ if (tmp64 == mac_addr) {
+ DBG_PRINT(INFO_DBG,
+ "MAC addr:0x%llx already present in CAM\n",
+ (unsigned long long)mac_addr);
+ return SUCCESS;
+ }
+ }
+ if (i == config->max_mac_addr) {
+ DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
+ return FAILURE;
+ }
+ /* Update the internal structure with this new mac address */
+ do_s2io_copy_mac_addr(sp, i, mac_addr);
+
+ return do_s2io_add_mac(sp, mac_addr, i);
+}
+
+/**
+ * s2io_ethtool_sset - Sets different link parameters.
+ * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
+ * @info: pointer to the structure with parameters given by ethtool to set
+ * link information.
+ * Description:
+ * The function sets different link parameters provided by the user onto
+ * the NIC.
+ * Return value:
+ * 0 on success.
+ */
+
+static int s2io_ethtool_sset(struct net_device *dev,
+ struct ethtool_cmd *info)
+{
+ struct s2io_nic *sp = netdev_priv(dev);
+ if ((info->autoneg == AUTONEG_ENABLE) ||
+ (ethtool_cmd_speed(info) != SPEED_10000) ||
+ (info->duplex != DUPLEX_FULL))
+ return -EINVAL;
+ else {
+ s2io_close(sp->dev);
+ s2io_open(sp->dev);
+ }
+
+ return 0;
+}
+
+/**
+ * s2io_ethtol_gset - Return link specific information.
+ * @sp : private member of the device structure, pointer to the
+ * s2io_nic structure.
+ * @info : pointer to the structure with parameters given by ethtool
+ * to return link information.
+ * Description:
+ * Returns link specific information like speed, duplex etc.. to ethtool.
+ * Return value :
+ * return 0 on success.
+ */
+
+static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
+{
+ struct s2io_nic *sp = netdev_priv(dev);
+ info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ info->port = PORT_FIBRE;
+
+ /* info->transceiver */
+ info->transceiver = XCVR_EXTERNAL;
+
+ if (netif_carrier_ok(sp->dev)) {
+ ethtool_cmd_speed_set(info, SPEED_10000);
+ info->duplex = DUPLEX_FULL;
+ } else {
+ ethtool_cmd_speed_set(info, -1);
+ info->duplex = -1;
+ }
+
+ info->autoneg = AUTONEG_DISABLE;
+ return 0;
+}
+
+/**
+ * s2io_ethtool_gdrvinfo - Returns driver specific information.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @info : pointer to the structure with parameters given by ethtool to
+ * return driver information.
+ * Description:
+ * Returns driver specefic information like name, version etc.. to ethtool.
+ * Return value:
+ * void
+ */
+
+static void s2io_ethtool_gdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct s2io_nic *sp = netdev_priv(dev);
+
+ strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
+ strncpy(info->version, s2io_driver_version, sizeof(info->version));
+ strncpy(info->fw_version, "", sizeof(info->fw_version));
+ strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
+ info->regdump_len = XENA_REG_SPACE;
+ info->eedump_len = XENA_EEPROM_SPACE;
+}
+
+/**
+ * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
+ * @sp: private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @regs : pointer to the structure with parameters given by ethtool for
+ * dumping the registers.
+ * @reg_space: The input argumnet into which all the registers are dumped.
+ * Description:
+ * Dumps the entire register space of xFrame NIC into the user given
+ * buffer area.
+ * Return value :
+ * void .
+ */
+
+static void s2io_ethtool_gregs(struct net_device *dev,
+ struct ethtool_regs *regs, void *space)
+{
+ int i;
+ u64 reg;
+ u8 *reg_space = (u8 *)space;
+ struct s2io_nic *sp = netdev_priv(dev);
+
+ regs->len = XENA_REG_SPACE;
+ regs->version = sp->pdev->subsystem_device;
+
+ for (i = 0; i < regs->len; i += 8) {
+ reg = readq(sp->bar0 + i);
+ memcpy((reg_space + i), &reg, 8);
+ }
+}
+
+/*
+ * s2io_set_led - control NIC led
+ */
+static void s2io_set_led(struct s2io_nic *sp, bool on)
+{
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ u16 subid = sp->pdev->subsystem_device;
+ u64 val64;
+
+ if ((sp->device_type == XFRAME_II_DEVICE) ||
+ ((subid & 0xFF) >= 0x07)) {
+ val64 = readq(&bar0->gpio_control);
+ if (on)
+ val64 |= GPIO_CTRL_GPIO_0;
+ else
+ val64 &= ~GPIO_CTRL_GPIO_0;
+
+ writeq(val64, &bar0->gpio_control);
+ } else {
+ val64 = readq(&bar0->adapter_control);
+ if (on)
+ val64 |= ADAPTER_LED_ON;
+ else
+ val64 &= ~ADAPTER_LED_ON;
+
+ writeq(val64, &bar0->adapter_control);
+ }
+
+}
+
+/**
+ * s2io_ethtool_set_led - To physically identify the nic on the system.
+ * @dev : network device
+ * @state: led setting
+ *
+ * Description: Used to physically identify the NIC on the system.
+ * The Link LED will blink for a time specified by the user for
+ * identification.
+ * NOTE: The Link has to be Up to be able to blink the LED. Hence
+ * identification is possible only if it's link is up.
+ */
+
+static int s2io_ethtool_set_led(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct s2io_nic *sp = netdev_priv(dev);
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ u16 subid = sp->pdev->subsystem_device;
+
+ if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
+ u64 val64 = readq(&bar0->adapter_control);
+ if (!(val64 & ADAPTER_CNTL_EN)) {
+ pr_err("Adapter Link down, cannot blink LED\n");
+ return -EAGAIN;
+ }
+ }
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ sp->adapt_ctrl_org = readq(&bar0->gpio_control);
+ return 1; /* cycle on/off once per second */
+
+ case ETHTOOL_ID_ON:
+ s2io_set_led(sp, true);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ s2io_set_led(sp, false);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
+ writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
+ }
+
+ return 0;
+}
+
+static void s2io_ethtool_gringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct s2io_nic *sp = netdev_priv(dev);
+ int i, tx_desc_count = 0, rx_desc_count = 0;
+
+ if (sp->rxd_mode == RXD_MODE_1) {
+ ering->rx_max_pending = MAX_RX_DESC_1;
+ ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
+ } else {
+ ering->rx_max_pending = MAX_RX_DESC_2;
+ ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
+ }
+
+ ering->rx_mini_max_pending = 0;
+ ering->tx_max_pending = MAX_TX_DESC;
+
+ for (i = 0; i < sp->config.rx_ring_num; i++)
+ rx_desc_count += sp->config.rx_cfg[i].num_rxd;
+ ering->rx_pending = rx_desc_count;
+ ering->rx_jumbo_pending = rx_desc_count;
+ ering->rx_mini_pending = 0;
+
+ for (i = 0; i < sp->config.tx_fifo_num; i++)
+ tx_desc_count += sp->config.tx_cfg[i].fifo_len;
+ ering->tx_pending = tx_desc_count;
+ DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
+}
+
+/**
+ * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @ep : pointer to the structure with pause parameters given by ethtool.
+ * Description:
+ * Returns the Pause frame generation and reception capability of the NIC.
+ * Return value:
+ * void
+ */
+static void s2io_ethtool_getpause_data(struct net_device *dev,
+ struct ethtool_pauseparam *ep)
+{
+ u64 val64;
+ struct s2io_nic *sp = netdev_priv(dev);
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+
+ val64 = readq(&bar0->rmac_pause_cfg);
+ if (val64 & RMAC_PAUSE_GEN_ENABLE)
+ ep->tx_pause = true;
+ if (val64 & RMAC_PAUSE_RX_ENABLE)
+ ep->rx_pause = true;
+ ep->autoneg = false;
+}
+
+/**
+ * s2io_ethtool_setpause_data - set/reset pause frame generation.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @ep : pointer to the structure with pause parameters given by ethtool.
+ * Description:
+ * It can be used to set or reset Pause frame generation or reception
+ * support of the NIC.
+ * Return value:
+ * int, returns 0 on Success
+ */
+
+static int s2io_ethtool_setpause_data(struct net_device *dev,
+ struct ethtool_pauseparam *ep)
+{
+ u64 val64;
+ struct s2io_nic *sp = netdev_priv(dev);
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+
+ val64 = readq(&bar0->rmac_pause_cfg);
+ if (ep->tx_pause)
+ val64 |= RMAC_PAUSE_GEN_ENABLE;
+ else
+ val64 &= ~RMAC_PAUSE_GEN_ENABLE;
+ if (ep->rx_pause)
+ val64 |= RMAC_PAUSE_RX_ENABLE;
+ else
+ val64 &= ~RMAC_PAUSE_RX_ENABLE;
+ writeq(val64, &bar0->rmac_pause_cfg);
+ return 0;
+}
+
+/**
+ * read_eeprom - reads 4 bytes of data from user given offset.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @off : offset at which the data must be written
+ * @data : Its an output parameter where the data read at the given
+ * offset is stored.
+ * Description:
+ * Will read 4 bytes of data from the user given offset and return the
+ * read data.
+ * NOTE: Will allow to read only part of the EEPROM visible through the
+ * I2C bus.
+ * Return value:
+ * -1 on failure and 0 on success.
+ */
+
+#define S2IO_DEV_ID 5
+static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
+{
+ int ret = -1;
+ u32 exit_cnt = 0;
+ u64 val64;
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+
+ if (sp->device_type == XFRAME_I_DEVICE) {
+ val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
+ I2C_CONTROL_ADDR(off) |
+ I2C_CONTROL_BYTE_CNT(0x3) |
+ I2C_CONTROL_READ |
+ I2C_CONTROL_CNTL_START;
+ SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
+
+ while (exit_cnt < 5) {
+ val64 = readq(&bar0->i2c_control);
+ if (I2C_CONTROL_CNTL_END(val64)) {
+ *data = I2C_CONTROL_GET_DATA(val64);
+ ret = 0;
+ break;
+ }
+ msleep(50);
+ exit_cnt++;
+ }
+ }
+
+ if (sp->device_type == XFRAME_II_DEVICE) {
+ val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
+ SPI_CONTROL_BYTECNT(0x3) |
+ SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
+ SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
+ val64 |= SPI_CONTROL_REQ;
+ SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
+ while (exit_cnt < 5) {
+ val64 = readq(&bar0->spi_control);
+ if (val64 & SPI_CONTROL_NACK) {
+ ret = 1;
+ break;
+ } else if (val64 & SPI_CONTROL_DONE) {
+ *data = readq(&bar0->spi_data);
+ *data &= 0xffffff;
+ ret = 0;
+ break;
+ }
+ msleep(50);
+ exit_cnt++;
+ }
+ }
+ return ret;
+}
+
+/**
+ * write_eeprom - actually writes the relevant part of the data value.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @off : offset at which the data must be written
+ * @data : The data that is to be written
+ * @cnt : Number of bytes of the data that are actually to be written into
+ * the Eeprom. (max of 3)
+ * Description:
+ * Actually writes the relevant part of the data value into the Eeprom
+ * through the I2C bus.
+ * Return value:
+ * 0 on success, -1 on failure.
+ */
+
+static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
+{
+ int exit_cnt = 0, ret = -1;
+ u64 val64;
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+
+ if (sp->device_type == XFRAME_I_DEVICE) {
+ val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
+ I2C_CONTROL_ADDR(off) |
+ I2C_CONTROL_BYTE_CNT(cnt) |
+ I2C_CONTROL_SET_DATA((u32)data) |
+ I2C_CONTROL_CNTL_START;
+ SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
+
+ while (exit_cnt < 5) {
+ val64 = readq(&bar0->i2c_control);
+ if (I2C_CONTROL_CNTL_END(val64)) {
+ if (!(val64 & I2C_CONTROL_NACK))
+ ret = 0;
+ break;
+ }
+ msleep(50);
+ exit_cnt++;
+ }
+ }
+
+ if (sp->device_type == XFRAME_II_DEVICE) {
+ int write_cnt = (cnt == 8) ? 0 : cnt;
+ writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
+
+ val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
+ SPI_CONTROL_BYTECNT(write_cnt) |
+ SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
+ SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
+ val64 |= SPI_CONTROL_REQ;
+ SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
+ while (exit_cnt < 5) {
+ val64 = readq(&bar0->spi_control);
+ if (val64 & SPI_CONTROL_NACK) {
+ ret = 1;
+ break;
+ } else if (val64 & SPI_CONTROL_DONE) {
+ ret = 0;
+ break;
+ }
+ msleep(50);
+ exit_cnt++;
+ }
+ }
+ return ret;
+}
+static void s2io_vpd_read(struct s2io_nic *nic)
+{
+ u8 *vpd_data;
+ u8 data;
+ int i = 0, cnt, len, fail = 0;
+ int vpd_addr = 0x80;
+ struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
+
+ if (nic->device_type == XFRAME_II_DEVICE) {
+ strcpy(nic->product_name, "Xframe II 10GbE network adapter");
+ vpd_addr = 0x80;
+ } else {
+ strcpy(nic->product_name, "Xframe I 10GbE network adapter");
+ vpd_addr = 0x50;
+ }
+ strcpy(nic->serial_num, "NOT AVAILABLE");
+
+ vpd_data = kmalloc(256, GFP_KERNEL);
+ if (!vpd_data) {
+ swstats->mem_alloc_fail_cnt++;
+ return;
+ }
+ swstats->mem_allocated += 256;
+
+ for (i = 0; i < 256; i += 4) {
+ pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
+ pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
+ pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
+ for (cnt = 0; cnt < 5; cnt++) {
+ msleep(2);
+ pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
+ if (data == 0x80)
+ break;
+ }
+ if (cnt >= 5) {
+ DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
+ fail = 1;
+ break;
+ }
+ pci_read_config_dword(nic->pdev, (vpd_addr + 4),
+ (u32 *)&vpd_data[i]);
+ }
+
+ if (!fail) {
+ /* read serial number of adapter */
+ for (cnt = 0; cnt < 252; cnt++) {
+ if ((vpd_data[cnt] == 'S') &&
+ (vpd_data[cnt+1] == 'N')) {
+ len = vpd_data[cnt+2];
+ if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
+ memcpy(nic->serial_num,
+ &vpd_data[cnt + 3],
+ len);
+ memset(nic->serial_num+len,
+ 0,
+ VPD_STRING_LEN-len);
+ break;
+ }
+ }
+ }
+ }
+
+ if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
+ len = vpd_data[1];
+ memcpy(nic->product_name, &vpd_data[3], len);
+ nic->product_name[len] = 0;
+ }
+ kfree(vpd_data);
+ swstats->mem_freed += 256;
+}
+
+/**
+ * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
+ * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
+ * @eeprom : pointer to the user level structure provided by ethtool,
+ * containing all relevant information.
+ * @data_buf : user defined value to be written into Eeprom.
+ * Description: Reads the values stored in the Eeprom at given offset
+ * for a given length. Stores these values int the input argument data
+ * buffer 'data_buf' and returns these to the caller (ethtool.)
+ * Return value:
+ * int 0 on success
+ */
+
+static int s2io_ethtool_geeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 * data_buf)
+{
+ u32 i, valid;
+ u64 data;
+ struct s2io_nic *sp = netdev_priv(dev);
+
+ eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
+
+ if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
+ eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
+
+ for (i = 0; i < eeprom->len; i += 4) {
+ if (read_eeprom(sp, (eeprom->offset + i), &data)) {
+ DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
+ return -EFAULT;
+ }
+ valid = INV(data);
+ memcpy((data_buf + i), &valid, 4);
+ }
+ return 0;
+}
+
+/**
+ * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @eeprom : pointer to the user level structure provided by ethtool,
+ * containing all relevant information.
+ * @data_buf ; user defined value to be written into Eeprom.
+ * Description:
+ * Tries to write the user provided value in the Eeprom, at the offset
+ * given by the user.
+ * Return value:
+ * 0 on success, -EFAULT on failure.
+ */
+
+static int s2io_ethtool_seeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom,
+ u8 *data_buf)
+{
+ int len = eeprom->len, cnt = 0;
+ u64 valid = 0, data;
+ struct s2io_nic *sp = netdev_priv(dev);
+
+ if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
+ DBG_PRINT(ERR_DBG,
+ "ETHTOOL_WRITE_EEPROM Err: "
+ "Magic value is wrong, it is 0x%x should be 0x%x\n",
+ (sp->pdev->vendor | (sp->pdev->device << 16)),
+ eeprom->magic);
+ return -EFAULT;
+ }
+
+ while (len) {
+ data = (u32)data_buf[cnt] & 0x000000FF;
+ if (data)
+ valid = (u32)(data << 24);
+ else
+ valid = data;
+
+ if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
+ DBG_PRINT(ERR_DBG,
+ "ETHTOOL_WRITE_EEPROM Err: "
+ "Cannot write into the specified offset\n");
+ return -EFAULT;
+ }
+ cnt++;
+ len--;
+ }
+
+ return 0;
+}
+
+/**
+ * s2io_register_test - reads and writes into all clock domains.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @data : variable that returns the result of each of the test conducted b
+ * by the driver.
+ * Description:
+ * Read and write into all clock domains. The NIC has 3 clock domains,
+ * see that registers in all the three regions are accessible.
+ * Return value:
+ * 0 on success.
+ */
+
+static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
+{
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ u64 val64 = 0, exp_val;
+ int fail = 0;
+
+ val64 = readq(&bar0->pif_rd_swapper_fb);
+ if (val64 != 0x123456789abcdefULL) {
+ fail = 1;
+ DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
+ }
+
+ val64 = readq(&bar0->rmac_pause_cfg);
+ if (val64 != 0xc000ffff00000000ULL) {
+ fail = 1;
+ DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
+ }
+
+ val64 = readq(&bar0->rx_queue_cfg);
+ if (sp->device_type == XFRAME_II_DEVICE)
+ exp_val = 0x0404040404040404ULL;
+ else
+ exp_val = 0x0808080808080808ULL;
+ if (val64 != exp_val) {
+ fail = 1;
+ DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
+ }
+
+ val64 = readq(&bar0->xgxs_efifo_cfg);
+ if (val64 != 0x000000001923141EULL) {
+ fail = 1;
+ DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
+ }
+
+ val64 = 0x5A5A5A5A5A5A5A5AULL;
+ writeq(val64, &bar0->xmsi_data);
+ val64 = readq(&bar0->xmsi_data);
+ if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
+ fail = 1;
+ DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
+ }
+
+ val64 = 0xA5A5A5A5A5A5A5A5ULL;
+ writeq(val64, &bar0->xmsi_data);
+ val64 = readq(&bar0->xmsi_data);
+ if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
+ fail = 1;
+ DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
+ }
+
+ *data = fail;
+ return fail;
+}
+
+/**
+ * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @data:variable that returns the result of each of the test conducted by
+ * the driver.
+ * Description:
+ * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
+ * register.
+ * Return value:
+ * 0 on success.
+ */
+
+static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
+{
+ int fail = 0;
+ u64 ret_data, org_4F0, org_7F0;
+ u8 saved_4F0 = 0, saved_7F0 = 0;
+ struct net_device *dev = sp->dev;
+
+ /* Test Write Error at offset 0 */
+ /* Note that SPI interface allows write access to all areas
+ * of EEPROM. Hence doing all negative testing only for Xframe I.
+ */
+ if (sp->device_type == XFRAME_I_DEVICE)
+ if (!write_eeprom(sp, 0, 0, 3))
+ fail = 1;
+
+ /* Save current values at offsets 0x4F0 and 0x7F0 */
+ if (!read_eeprom(sp, 0x4F0, &org_4F0))
+ saved_4F0 = 1;
+ if (!read_eeprom(sp, 0x7F0, &org_7F0))
+ saved_7F0 = 1;
+
+ /* Test Write at offset 4f0 */
+ if (write_eeprom(sp, 0x4F0, 0x012345, 3))
+ fail = 1;
+ if (read_eeprom(sp, 0x4F0, &ret_data))
+ fail = 1;
+
+ if (ret_data != 0x012345) {
+ DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
+ "Data written %llx Data read %llx\n",
+ dev->name, (unsigned long long)0x12345,
+ (unsigned long long)ret_data);
+ fail = 1;
+ }
+
+ /* Reset the EEPROM data go FFFF */
+ write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
+
+ /* Test Write Request Error at offset 0x7c */
+ if (sp->device_type == XFRAME_I_DEVICE)
+ if (!write_eeprom(sp, 0x07C, 0, 3))
+ fail = 1;
+
+ /* Test Write Request at offset 0x7f0 */
+ if (write_eeprom(sp, 0x7F0, 0x012345, 3))
+ fail = 1;
+ if (read_eeprom(sp, 0x7F0, &ret_data))
+ fail = 1;
+
+ if (ret_data != 0x012345) {
+ DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
+ "Data written %llx Data read %llx\n",
+ dev->name, (unsigned long long)0x12345,
+ (unsigned long long)ret_data);
+ fail = 1;
+ }
+
+ /* Reset the EEPROM data go FFFF */
+ write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
+
+ if (sp->device_type == XFRAME_I_DEVICE) {
+ /* Test Write Error at offset 0x80 */
+ if (!write_eeprom(sp, 0x080, 0, 3))
+ fail = 1;
+
+ /* Test Write Error at offset 0xfc */
+ if (!write_eeprom(sp, 0x0FC, 0, 3))
+ fail = 1;
+
+ /* Test Write Error at offset 0x100 */
+ if (!write_eeprom(sp, 0x100, 0, 3))
+ fail = 1;
+
+ /* Test Write Error at offset 4ec */
+ if (!write_eeprom(sp, 0x4EC, 0, 3))
+ fail = 1;
+ }
+
+ /* Restore values at offsets 0x4F0 and 0x7F0 */
+ if (saved_4F0)
+ write_eeprom(sp, 0x4F0, org_4F0, 3);
+ if (saved_7F0)
+ write_eeprom(sp, 0x7F0, org_7F0, 3);
+
+ *data = fail;
+ return fail;
+}
+
+/**
+ * s2io_bist_test - invokes the MemBist test of the card .
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @data:variable that returns the result of each of the test conducted by
+ * the driver.
+ * Description:
+ * This invokes the MemBist test of the card. We give around
+ * 2 secs time for the Test to complete. If it's still not complete
+ * within this peiod, we consider that the test failed.
+ * Return value:
+ * 0 on success and -1 on failure.
+ */
+
+static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
+{
+ u8 bist = 0;
+ int cnt = 0, ret = -1;
+
+ pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
+ bist |= PCI_BIST_START;
+ pci_write_config_word(sp->pdev, PCI_BIST, bist);
+
+ while (cnt < 20) {
+ pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
+ if (!(bist & PCI_BIST_START)) {
+ *data = (bist & PCI_BIST_CODE_MASK);
+ ret = 0;
+ break;
+ }
+ msleep(100);
+ cnt++;
+ }
+
+ return ret;
+}
+
+/**
+ * s2io-link_test - verifies the link state of the nic
+ * @sp ; private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @data: variable that returns the result of each of the test conducted by
+ * the driver.
+ * Description:
+ * The function verifies the link state of the NIC and updates the input
+ * argument 'data' appropriately.
+ * Return value:
+ * 0 on success.
+ */
+
+static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
+{
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ u64 val64;
+
+ val64 = readq(&bar0->adapter_status);
+ if (!(LINK_IS_UP(val64)))
+ *data = 1;
+ else
+ *data = 0;
+
+ return *data;
+}
+
+/**
+ * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
+ * @sp - private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @data - variable that returns the result of each of the test
+ * conducted by the driver.
+ * Description:
+ * This is one of the offline test that tests the read and write
+ * access to the RldRam chip on the NIC.
+ * Return value:
+ * 0 on success.
+ */
+
+static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
+{
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ u64 val64;
+ int cnt, iteration = 0, test_fail = 0;
+
+ val64 = readq(&bar0->adapter_control);
+ val64 &= ~ADAPTER_ECC_EN;
+ writeq(val64, &bar0->adapter_control);
+
+ val64 = readq(&bar0->mc_rldram_test_ctrl);
+ val64 |= MC_RLDRAM_TEST_MODE;
+ SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
+
+ val64 = readq(&bar0->mc_rldram_mrs);
+ val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
+ SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
+
+ val64 |= MC_RLDRAM_MRS_ENABLE;
+ SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
+
+ while (iteration < 2) {
+ val64 = 0x55555555aaaa0000ULL;
+ if (iteration == 1)
+ val64 ^= 0xFFFFFFFFFFFF0000ULL;
+ writeq(val64, &bar0->mc_rldram_test_d0);
+
+ val64 = 0xaaaa5a5555550000ULL;
+ if (iteration == 1)
+ val64 ^= 0xFFFFFFFFFFFF0000ULL;
+ writeq(val64, &bar0->mc_rldram_test_d1);
+
+ val64 = 0x55aaaaaaaa5a0000ULL;
+ if (iteration == 1)
+ val64 ^= 0xFFFFFFFFFFFF0000ULL;
+ writeq(val64, &bar0->mc_rldram_test_d2);
+
+ val64 = (u64) (0x0000003ffffe0100ULL);
+ writeq(val64, &bar0->mc_rldram_test_add);
+
+ val64 = MC_RLDRAM_TEST_MODE |
+ MC_RLDRAM_TEST_WRITE |
+ MC_RLDRAM_TEST_GO;
+ SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
+
+ for (cnt = 0; cnt < 5; cnt++) {
+ val64 = readq(&bar0->mc_rldram_test_ctrl);
+ if (val64 & MC_RLDRAM_TEST_DONE)
+ break;
+ msleep(200);
+ }
+
+ if (cnt == 5)
+ break;
+
+ val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
+ SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
+
+ for (cnt = 0; cnt < 5; cnt++) {
+ val64 = readq(&bar0->mc_rldram_test_ctrl);
+ if (val64 & MC_RLDRAM_TEST_DONE)
+ break;
+ msleep(500);
+ }
+
+ if (cnt == 5)
+ break;
+
+ val64 = readq(&bar0->mc_rldram_test_ctrl);
+ if (!(val64 & MC_RLDRAM_TEST_PASS))
+ test_fail = 1;
+
+ iteration++;
+ }
+
+ *data = test_fail;
+
+ /* Bring the adapter out of test mode */
+ SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
+
+ return test_fail;
+}
+
+/**
+ * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @ethtest : pointer to a ethtool command specific structure that will be
+ * returned to the user.
+ * @data : variable that returns the result of each of the test
+ * conducted by the driver.
+ * Description:
+ * This function conducts 6 tests ( 4 offline and 2 online) to determine
+ * the health of the card.
+ * Return value:
+ * void
+ */
+
+static void s2io_ethtool_test(struct net_device *dev,
+ struct ethtool_test *ethtest,
+ uint64_t *data)
+{
+ struct s2io_nic *sp = netdev_priv(dev);
+ int orig_state = netif_running(sp->dev);
+
+ if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline Tests. */
+ if (orig_state)
+ s2io_close(sp->dev);
+
+ if (s2io_register_test(sp, &data[0]))
+ ethtest->flags |= ETH_TEST_FL_FAILED;
+
+ s2io_reset(sp);
+
+ if (s2io_rldram_test(sp, &data[3]))
+ ethtest->flags |= ETH_TEST_FL_FAILED;
+
+ s2io_reset(sp);
+
+ if (s2io_eeprom_test(sp, &data[1]))
+ ethtest->flags |= ETH_TEST_FL_FAILED;
+
+ if (s2io_bist_test(sp, &data[4]))
+ ethtest->flags |= ETH_TEST_FL_FAILED;
+
+ if (orig_state)
+ s2io_open(sp->dev);
+
+ data[2] = 0;
+ } else {
+ /* Online Tests. */
+ if (!orig_state) {
+ DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
+ dev->name);
+ data[0] = -1;
+ data[1] = -1;
+ data[2] = -1;
+ data[3] = -1;
+ data[4] = -1;
+ }
+
+ if (s2io_link_test(sp, &data[2]))
+ ethtest->flags |= ETH_TEST_FL_FAILED;
+
+ data[0] = 0;
+ data[1] = 0;
+ data[3] = 0;
+ data[4] = 0;
+ }
+}
+
+static void s2io_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *estats,
+ u64 *tmp_stats)
+{
+ int i = 0, k;
+ struct s2io_nic *sp = netdev_priv(dev);
+ struct stat_block *stats = sp->mac_control.stats_info;
+ struct swStat *swstats = &stats->sw_stat;
+ struct xpakStat *xstats = &stats->xpak_stat;
+
+ s2io_updt_stats(sp);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 |
+ le32_to_cpu(stats->tmac_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
+ le32_to_cpu(stats->tmac_data_octets);
+ tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
+ le32_to_cpu(stats->tmac_mcst_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
+ le32_to_cpu(stats->tmac_bcst_frms);
+ tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
+ le32_to_cpu(stats->tmac_ttl_octets);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
+ le32_to_cpu(stats->tmac_ucst_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
+ le32_to_cpu(stats->tmac_nucst_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
+ le32_to_cpu(stats->tmac_any_err_frms);
+ tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
+ tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
+ le32_to_cpu(stats->tmac_vld_ip);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
+ le32_to_cpu(stats->tmac_drop_ip);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
+ le32_to_cpu(stats->tmac_icmp);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
+ le32_to_cpu(stats->tmac_rst_tcp);
+ tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
+ tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
+ le32_to_cpu(stats->tmac_udp);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
+ le32_to_cpu(stats->rmac_vld_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
+ le32_to_cpu(stats->rmac_data_octets);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
+ le32_to_cpu(stats->rmac_vld_mcst_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
+ le32_to_cpu(stats->rmac_vld_bcst_frms);
+ tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
+ tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
+ le32_to_cpu(stats->rmac_ttl_octets);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
+ | le32_to_cpu(stats->rmac_accepted_ucst_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
+ << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
+ le32_to_cpu(stats->rmac_discarded_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->rmac_drop_events_oflow)
+ << 32 | le32_to_cpu(stats->rmac_drop_events);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
+ le32_to_cpu(stats->rmac_usized_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
+ le32_to_cpu(stats->rmac_osized_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
+ le32_to_cpu(stats->rmac_frag_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
+ le32_to_cpu(stats->rmac_jabber_frms);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
+ le32_to_cpu(stats->rmac_ip);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
+ tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
+ le32_to_cpu(stats->rmac_drop_ip);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
+ le32_to_cpu(stats->rmac_icmp);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
+ le32_to_cpu(stats->rmac_udp);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
+ le32_to_cpu(stats->rmac_err_drp_udp);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
+ tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
+ tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
+ tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
+ tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
+ tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
+ tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
+ tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
+ tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
+ le32_to_cpu(stats->rmac_pause_cnt);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
+ le32_to_cpu(stats->rmac_accepted_ip);
+ tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
+ tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
+ tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
+ tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
+ tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
+ tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
+ tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
+ tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
+ tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
+ tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
+ tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
+ tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
+ tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
+ tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
+ tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
+ tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
+ tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
+ tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
+ tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
+
+ /* Enhanced statistics exist only for Hercules */
+ if (sp->device_type == XFRAME_II_DEVICE) {
+ tmp_stats[i++] =
+ le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
+ tmp_stats[i++] =
+ le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
+ tmp_stats[i++] =
+ le64_to_cpu(stats->rmac_ttl_8192_max_frms);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
+ tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
+ tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
+ tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
+ tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
+ tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
+ tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
+ tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
+ tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
+ tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
+ }
+
+ tmp_stats[i++] = 0;
+ tmp_stats[i++] = swstats->single_ecc_errs;
+ tmp_stats[i++] = swstats->double_ecc_errs;
+ tmp_stats[i++] = swstats->parity_err_cnt;
+ tmp_stats[i++] = swstats->serious_err_cnt;
+ tmp_stats[i++] = swstats->soft_reset_cnt;
+ tmp_stats[i++] = swstats->fifo_full_cnt;
+ for (k = 0; k < MAX_RX_RINGS; k++)
+ tmp_stats[i++] = swstats->ring_full_cnt[k];
+ tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
+ tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
+ tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
+ tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
+ tmp_stats[i++] = xstats->alarm_laser_output_power_high;
+ tmp_stats[i++] = xstats->alarm_laser_output_power_low;
+ tmp_stats[i++] = xstats->warn_transceiver_temp_high;
+ tmp_stats[i++] = xstats->warn_transceiver_temp_low;
+ tmp_stats[i++] = xstats->warn_laser_bias_current_high;
+ tmp_stats[i++] = xstats->warn_laser_bias_current_low;
+ tmp_stats[i++] = xstats->warn_laser_output_power_high;
+ tmp_stats[i++] = xstats->warn_laser_output_power_low;
+ tmp_stats[i++] = swstats->clubbed_frms_cnt;
+ tmp_stats[i++] = swstats->sending_both;
+ tmp_stats[i++] = swstats->outof_sequence_pkts;
+ tmp_stats[i++] = swstats->flush_max_pkts;
+ if (swstats->num_aggregations) {
+ u64 tmp = swstats->sum_avg_pkts_aggregated;
+ int count = 0;
+ /*
+ * Since 64-bit divide does not work on all platforms,
+ * do repeated subtraction.
+ */
+ while (tmp >= swstats->num_aggregations) {
+ tmp -= swstats->num_aggregations;
+ count++;
+ }
+ tmp_stats[i++] = count;
+ } else
+ tmp_stats[i++] = 0;
+ tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
+ tmp_stats[i++] = swstats->pci_map_fail_cnt;
+ tmp_stats[i++] = swstats->watchdog_timer_cnt;
+ tmp_stats[i++] = swstats->mem_allocated;
+ tmp_stats[i++] = swstats->mem_freed;
+ tmp_stats[i++] = swstats->link_up_cnt;
+ tmp_stats[i++] = swstats->link_down_cnt;
+ tmp_stats[i++] = swstats->link_up_time;
+ tmp_stats[i++] = swstats->link_down_time;
+
+ tmp_stats[i++] = swstats->tx_buf_abort_cnt;
+ tmp_stats[i++] = swstats->tx_desc_abort_cnt;
+ tmp_stats[i++] = swstats->tx_parity_err_cnt;
+ tmp_stats[i++] = swstats->tx_link_loss_cnt;
+ tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
+
+ tmp_stats[i++] = swstats->rx_parity_err_cnt;
+ tmp_stats[i++] = swstats->rx_abort_cnt;
+ tmp_stats[i++] = swstats->rx_parity_abort_cnt;
+ tmp_stats[i++] = swstats->rx_rda_fail_cnt;
+ tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
+ tmp_stats[i++] = swstats->rx_fcs_err_cnt;
+ tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
+ tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
+ tmp_stats[i++] = swstats->rx_unkn_err_cnt;
+ tmp_stats[i++] = swstats->tda_err_cnt;
+ tmp_stats[i++] = swstats->pfc_err_cnt;
+ tmp_stats[i++] = swstats->pcc_err_cnt;
+ tmp_stats[i++] = swstats->tti_err_cnt;
+ tmp_stats[i++] = swstats->tpa_err_cnt;
+ tmp_stats[i++] = swstats->sm_err_cnt;
+ tmp_stats[i++] = swstats->lso_err_cnt;
+ tmp_stats[i++] = swstats->mac_tmac_err_cnt;
+ tmp_stats[i++] = swstats->mac_rmac_err_cnt;
+ tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
+ tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
+ tmp_stats[i++] = swstats->rc_err_cnt;
+ tmp_stats[i++] = swstats->prc_pcix_err_cnt;
+ tmp_stats[i++] = swstats->rpa_err_cnt;
+ tmp_stats[i++] = swstats->rda_err_cnt;
+ tmp_stats[i++] = swstats->rti_err_cnt;
+ tmp_stats[i++] = swstats->mc_err_cnt;
+}
+
+static int s2io_ethtool_get_regs_len(struct net_device *dev)
+{
+ return XENA_REG_SPACE;
+}
+
+
+static int s2io_get_eeprom_len(struct net_device *dev)
+{
+ return XENA_EEPROM_SPACE;
+}
+
+static int s2io_get_sset_count(struct net_device *dev, int sset)
+{
+ struct s2io_nic *sp = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_TEST:
+ return S2IO_TEST_LEN;
+ case ETH_SS_STATS:
+ switch (sp->device_type) {
+ case XFRAME_I_DEVICE:
+ return XFRAME_I_STAT_LEN;
+ case XFRAME_II_DEVICE:
+ return XFRAME_II_STAT_LEN;
+ default:
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void s2io_ethtool_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ int stat_size = 0;
+ struct s2io_nic *sp = netdev_priv(dev);
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
+ break;
+ case ETH_SS_STATS:
+ stat_size = sizeof(ethtool_xena_stats_keys);
+ memcpy(data, &ethtool_xena_stats_keys, stat_size);
+ if (sp->device_type == XFRAME_II_DEVICE) {
+ memcpy(data + stat_size,
+ &ethtool_enhanced_stats_keys,
+ sizeof(ethtool_enhanced_stats_keys));
+ stat_size += sizeof(ethtool_enhanced_stats_keys);
+ }
+
+ memcpy(data + stat_size, &ethtool_driver_stats_keys,
+ sizeof(ethtool_driver_stats_keys));
+ }
+}
+
+static int s2io_set_features(struct net_device *dev, u32 features)
+{
+ struct s2io_nic *sp = netdev_priv(dev);
+ u32 changed = (features ^ dev->features) & NETIF_F_LRO;
+
+ if (changed && netif_running(dev)) {
+ int rc;
+
+ s2io_stop_all_tx_queue(sp);
+ s2io_card_down(sp);
+ dev->features = features;
+ rc = s2io_card_up(sp);
+ if (rc)
+ s2io_reset(sp);
+ else
+ s2io_start_all_tx_queue(sp);
+
+ return rc ? rc : 1;
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_settings = s2io_ethtool_gset,
+ .set_settings = s2io_ethtool_sset,
+ .get_drvinfo = s2io_ethtool_gdrvinfo,
+ .get_regs_len = s2io_ethtool_get_regs_len,
+ .get_regs = s2io_ethtool_gregs,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = s2io_get_eeprom_len,
+ .get_eeprom = s2io_ethtool_geeprom,
+ .set_eeprom = s2io_ethtool_seeprom,
+ .get_ringparam = s2io_ethtool_gringparam,
+ .get_pauseparam = s2io_ethtool_getpause_data,
+ .set_pauseparam = s2io_ethtool_setpause_data,
+ .self_test = s2io_ethtool_test,
+ .get_strings = s2io_ethtool_get_strings,
+ .set_phys_id = s2io_ethtool_set_led,
+ .get_ethtool_stats = s2io_get_ethtool_stats,
+ .get_sset_count = s2io_get_sset_count,
+};
+
+/**
+ * s2io_ioctl - Entry point for the Ioctl
+ * @dev : Device pointer.
+ * @ifr : An IOCTL specefic structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * @cmd : This is used to distinguish between the different commands that
+ * can be passed to the IOCTL functions.
+ * Description:
+ * Currently there are no special functionality supported in IOCTL, hence
+ * function always return EOPNOTSUPPORTED
+ */
+
+static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * s2io_change_mtu - entry point to change MTU size for the device.
+ * @dev : device pointer.
+ * @new_mtu : the new MTU size for the device.
+ * Description: A driver entry point to change MTU size for the device.
+ * Before changing the MTU the device must be stopped.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+
+static int s2io_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct s2io_nic *sp = netdev_priv(dev);
+ int ret = 0;
+
+ if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
+ DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", dev->name);
+ return -EPERM;
+ }
+
+ dev->mtu = new_mtu;
+ if (netif_running(dev)) {
+ s2io_stop_all_tx_queue(sp);
+ s2io_card_down(sp);
+ ret = s2io_card_up(sp);
+ if (ret) {
+ DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
+ __func__);
+ return ret;
+ }
+ s2io_wake_all_tx_queue(sp);
+ } else { /* Device is down */
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ u64 val64 = new_mtu;
+
+ writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
+ }
+
+ return ret;
+}
+
+/**
+ * s2io_set_link - Set the LInk status
+ * @data: long pointer to device private structue
+ * Description: Sets the link status for the adapter
+ */
+
+static void s2io_set_link(struct work_struct *work)
+{
+ struct s2io_nic *nic = container_of(work, struct s2io_nic,
+ set_link_task);
+ struct net_device *dev = nic->dev;
+ struct XENA_dev_config __iomem *bar0 = nic->bar0;
+ register u64 val64;
+ u16 subid;
+
+ rtnl_lock();
+
+ if (!netif_running(dev))
+ goto out_unlock;
+
+ if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
+ /* The card is being reset, no point doing anything */
+ goto out_unlock;
+ }
+
+ subid = nic->pdev->subsystem_device;
+ if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
+ /*
+ * Allow a small delay for the NICs self initiated
+ * cleanup to complete.
+ */
+ msleep(100);
+ }
+
+ val64 = readq(&bar0->adapter_status);
+ if (LINK_IS_UP(val64)) {
+ if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
+ if (verify_xena_quiescence(nic)) {
+ val64 = readq(&bar0->adapter_control);
+ val64 |= ADAPTER_CNTL_EN;
+ writeq(val64, &bar0->adapter_control);
+ if (CARDS_WITH_FAULTY_LINK_INDICATORS(
+ nic->device_type, subid)) {
+ val64 = readq(&bar0->gpio_control);
+ val64 |= GPIO_CTRL_GPIO_0;
+ writeq(val64, &bar0->gpio_control);
+ val64 = readq(&bar0->gpio_control);
+ } else {
+ val64 |= ADAPTER_LED_ON;
+ writeq(val64, &bar0->adapter_control);
+ }
+ nic->device_enabled_once = true;
+ } else {
+ DBG_PRINT(ERR_DBG,
+ "%s: Error: device is not Quiescent\n",
+ dev->name);
+ s2io_stop_all_tx_queue(nic);
+ }
+ }
+ val64 = readq(&bar0->adapter_control);
+ val64 |= ADAPTER_LED_ON;
+ writeq(val64, &bar0->adapter_control);
+ s2io_link(nic, LINK_UP);
+ } else {
+ if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
+ subid)) {
+ val64 = readq(&bar0->gpio_control);
+ val64 &= ~GPIO_CTRL_GPIO_0;
+ writeq(val64, &bar0->gpio_control);
+ val64 = readq(&bar0->gpio_control);
+ }
+ /* turn off LED */
+ val64 = readq(&bar0->adapter_control);
+ val64 = val64 & (~ADAPTER_LED_ON);
+ writeq(val64, &bar0->adapter_control);
+ s2io_link(nic, LINK_DOWN);
+ }
+ clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
+
+out_unlock:
+ rtnl_unlock();
+}
+
+static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
+ struct buffAdd *ba,
+ struct sk_buff **skb, u64 *temp0, u64 *temp1,
+ u64 *temp2, int size)
+{
+ struct net_device *dev = sp->dev;
+ struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
+
+ if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
+ struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
+ /* allocate skb */
+ if (*skb) {
+ DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
+ /*
+ * As Rx frame are not going to be processed,
+ * using same mapped address for the Rxd
+ * buffer pointer
+ */
+ rxdp1->Buffer0_ptr = *temp0;
+ } else {
+ *skb = dev_alloc_skb(size);
+ if (!(*skb)) {
+ DBG_PRINT(INFO_DBG,
+ "%s: Out of memory to allocate %s\n",
+ dev->name, "1 buf mode SKBs");
+ stats->mem_alloc_fail_cnt++;
+ return -ENOMEM ;
+ }
+ stats->mem_allocated += (*skb)->truesize;
+ /* storing the mapped addr in a temp variable
+ * such it will be used for next rxd whose
+ * Host Control is NULL
+ */
+ rxdp1->Buffer0_ptr = *temp0 =
+ pci_map_single(sp->pdev, (*skb)->data,
+ size - NET_IP_ALIGN,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
+ goto memalloc_failed;
+ rxdp->Host_Control = (unsigned long) (*skb);
+ }
+ } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
+ struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
+ /* Two buffer Mode */
+ if (*skb) {
+ rxdp3->Buffer2_ptr = *temp2;
+ rxdp3->Buffer0_ptr = *temp0;
+ rxdp3->Buffer1_ptr = *temp1;
+ } else {
+ *skb = dev_alloc_skb(size);
+ if (!(*skb)) {
+ DBG_PRINT(INFO_DBG,
+ "%s: Out of memory to allocate %s\n",
+ dev->name,
+ "2 buf mode SKBs");
+ stats->mem_alloc_fail_cnt++;
+ return -ENOMEM;
+ }
+ stats->mem_allocated += (*skb)->truesize;
+ rxdp3->Buffer2_ptr = *temp2 =
+ pci_map_single(sp->pdev, (*skb)->data,
+ dev->mtu + 4,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
+ goto memalloc_failed;
+ rxdp3->Buffer0_ptr = *temp0 =
+ pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(sp->pdev,
+ rxdp3->Buffer0_ptr)) {
+ pci_unmap_single(sp->pdev,
+ (dma_addr_t)rxdp3->Buffer2_ptr,
+ dev->mtu + 4,
+ PCI_DMA_FROMDEVICE);
+ goto memalloc_failed;
+ }
+ rxdp->Host_Control = (unsigned long) (*skb);
+
+ /* Buffer-1 will be dummy buffer not used */
+ rxdp3->Buffer1_ptr = *temp1 =
+ pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(sp->pdev,
+ rxdp3->Buffer1_ptr)) {
+ pci_unmap_single(sp->pdev,
+ (dma_addr_t)rxdp3->Buffer0_ptr,
+ BUF0_LEN, PCI_DMA_FROMDEVICE);
+ pci_unmap_single(sp->pdev,
+ (dma_addr_t)rxdp3->Buffer2_ptr,
+ dev->mtu + 4,
+ PCI_DMA_FROMDEVICE);
+ goto memalloc_failed;
+ }
+ }
+ }
+ return 0;
+
+memalloc_failed:
+ stats->pci_map_fail_cnt++;
+ stats->mem_freed += (*skb)->truesize;
+ dev_kfree_skb(*skb);
+ return -ENOMEM;
+}
+
+static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
+ int size)
+{
+ struct net_device *dev = sp->dev;
+ if (sp->rxd_mode == RXD_MODE_1) {
+ rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
+ } else if (sp->rxd_mode == RXD_MODE_3B) {
+ rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
+ rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
+ rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
+ }
+}
+
+static int rxd_owner_bit_reset(struct s2io_nic *sp)
+{
+ int i, j, k, blk_cnt = 0, size;
+ struct config_param *config = &sp->config;
+ struct mac_info *mac_control = &sp->mac_control;
+ struct net_device *dev = sp->dev;
+ struct RxD_t *rxdp = NULL;
+ struct sk_buff *skb = NULL;
+ struct buffAdd *ba = NULL;
+ u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
+
+ /* Calculate the size based on ring mode */
+ size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
+ HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
+ if (sp->rxd_mode == RXD_MODE_1)
+ size += NET_IP_ALIGN;
+ else if (sp->rxd_mode == RXD_MODE_3B)
+ size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
+
+ for (i = 0; i < config->rx_ring_num; i++) {
+ struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
+ struct ring_info *ring = &mac_control->rings[i];
+
+ blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
+
+ for (j = 0; j < blk_cnt; j++) {
+ for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
+ rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
+ if (sp->rxd_mode == RXD_MODE_3B)
+ ba = &ring->ba[j][k];
+ if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
+ (u64 *)&temp0_64,
+ (u64 *)&temp1_64,
+ (u64 *)&temp2_64,
+ size) == -ENOMEM) {
+ return 0;
+ }
+
+ set_rxd_buffer_size(sp, rxdp, size);
+ wmb();
+ /* flip the Ownership bit to Hardware */
+ rxdp->Control_1 |= RXD_OWN_XENA;
+ }
+ }
+ }
+ return 0;
+
+}
+
+static int s2io_add_isr(struct s2io_nic *sp)
+{
+ int ret = 0;
+ struct net_device *dev = sp->dev;
+ int err = 0;
+
+ if (sp->config.intr_type == MSI_X)
+ ret = s2io_enable_msi_x(sp);
+ if (ret) {
+ DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
+ sp->config.intr_type = INTA;
+ }
+
+ /*
+ * Store the values of the MSIX table in
+ * the struct s2io_nic structure
+ */
+ store_xmsi_data(sp);
+
+ /* After proper initialization of H/W, register ISR */
+ if (sp->config.intr_type == MSI_X) {
+ int i, msix_rx_cnt = 0;
+
+ for (i = 0; i < sp->num_entries; i++) {
+ if (sp->s2io_entries[i].in_use == MSIX_FLG) {
+ if (sp->s2io_entries[i].type ==
+ MSIX_RING_TYPE) {
+ sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
+ dev->name, i);
+ err = request_irq(sp->entries[i].vector,
+ s2io_msix_ring_handle,
+ 0,
+ sp->desc[i],
+ sp->s2io_entries[i].arg);
+ } else if (sp->s2io_entries[i].type ==
+ MSIX_ALARM_TYPE) {
+ sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
+ dev->name, i);
+ err = request_irq(sp->entries[i].vector,
+ s2io_msix_fifo_handle,
+ 0,
+ sp->desc[i],
+ sp->s2io_entries[i].arg);
+
+ }
+ /* if either data or addr is zero print it. */
+ if (!(sp->msix_info[i].addr &&
+ sp->msix_info[i].data)) {
+ DBG_PRINT(ERR_DBG,
+ "%s @Addr:0x%llx Data:0x%llx\n",
+ sp->desc[i],
+ (unsigned long long)
+ sp->msix_info[i].addr,
+ (unsigned long long)
+ ntohl(sp->msix_info[i].data));
+ } else
+ msix_rx_cnt++;
+ if (err) {
+ remove_msix_isr(sp);
+
+ DBG_PRINT(ERR_DBG,
+ "%s:MSI-X-%d registration "
+ "failed\n", dev->name, i);
+
+ DBG_PRINT(ERR_DBG,
+ "%s: Defaulting to INTA\n",
+ dev->name);
+ sp->config.intr_type = INTA;
+ break;
+ }
+ sp->s2io_entries[i].in_use =
+ MSIX_REGISTERED_SUCCESS;
+ }
+ }
+ if (!err) {
+ pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
+ DBG_PRINT(INFO_DBG,
+ "MSI-X-TX entries enabled through alarm vector\n");
+ }
+ }
+ if (sp->config.intr_type == INTA) {
+ err = request_irq((int)sp->pdev->irq, s2io_isr, IRQF_SHARED,
+ sp->name, dev);
+ if (err) {
+ DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
+ dev->name);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static void s2io_rem_isr(struct s2io_nic *sp)
+{
+ if (sp->config.intr_type == MSI_X)
+ remove_msix_isr(sp);
+ else
+ remove_inta_isr(sp);
+}
+
+static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
+{
+ int cnt = 0;
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ register u64 val64 = 0;
+ struct config_param *config;
+ config = &sp->config;
+
+ if (!is_s2io_card_up(sp))
+ return;
+
+ del_timer_sync(&sp->alarm_timer);
+ /* If s2io_set_link task is executing, wait till it completes. */
+ while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
+ msleep(50);
+ clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
+
+ /* Disable napi */
+ if (sp->config.napi) {
+ int off = 0;
+ if (config->intr_type == MSI_X) {
+ for (; off < sp->config.rx_ring_num; off++)
+ napi_disable(&sp->mac_control.rings[off].napi);
+ }
+ else
+ napi_disable(&sp->napi);
+ }
+
+ /* disable Tx and Rx traffic on the NIC */
+ if (do_io)
+ stop_nic(sp);
+
+ s2io_rem_isr(sp);
+
+ /* stop the tx queue, indicate link down */
+ s2io_link(sp, LINK_DOWN);
+
+ /* Check if the device is Quiescent and then Reset the NIC */
+ while (do_io) {
+ /* As per the HW requirement we need to replenish the
+ * receive buffer to avoid the ring bump. Since there is
+ * no intention of processing the Rx frame at this pointwe are
+ * just setting the ownership bit of rxd in Each Rx
+ * ring to HW and set the appropriate buffer size
+ * based on the ring mode
+ */
+ rxd_owner_bit_reset(sp);
+
+ val64 = readq(&bar0->adapter_status);
+ if (verify_xena_quiescence(sp)) {
+ if (verify_pcc_quiescent(sp, sp->device_enabled_once))
+ break;
+ }
+
+ msleep(50);
+ cnt++;
+ if (cnt == 10) {
+ DBG_PRINT(ERR_DBG, "Device not Quiescent - "
+ "adapter status reads 0x%llx\n",
+ (unsigned long long)val64);
+ break;
+ }
+ }
+ if (do_io)
+ s2io_reset(sp);
+
+ /* Free all Tx buffers */
+ free_tx_buffers(sp);
+
+ /* Free all Rx buffers */
+ free_rx_buffers(sp);
+
+ clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
+}
+
+static void s2io_card_down(struct s2io_nic *sp)
+{
+ do_s2io_card_down(sp, 1);
+}
+
+static int s2io_card_up(struct s2io_nic *sp)
+{
+ int i, ret = 0;
+ struct config_param *config;
+ struct mac_info *mac_control;
+ struct net_device *dev = (struct net_device *)sp->dev;
+ u16 interruptible;
+
+ /* Initialize the H/W I/O registers */
+ ret = init_nic(sp);
+ if (ret != 0) {
+ DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
+ dev->name);
+ if (ret != -EIO)
+ s2io_reset(sp);
+ return ret;
+ }
+
+ /*
+ * Initializing the Rx buffers. For now we are considering only 1
+ * Rx ring and initializing buffers into 30 Rx blocks
+ */
+ config = &sp->config;
+ mac_control = &sp->mac_control;
+
+ for (i = 0; i < config->rx_ring_num; i++) {
+ struct ring_info *ring = &mac_control->rings[i];
+
+ ring->mtu = dev->mtu;
+ ring->lro = !!(dev->features & NETIF_F_LRO);
+ ret = fill_rx_buffers(sp, ring, 1);
+ if (ret) {
+ DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
+ dev->name);
+ s2io_reset(sp);
+ free_rx_buffers(sp);
+ return -ENOMEM;
+ }
+ DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
+ ring->rx_bufs_left);
+ }
+
+ /* Initialise napi */
+ if (config->napi) {
+ if (config->intr_type == MSI_X) {
+ for (i = 0; i < sp->config.rx_ring_num; i++)
+ napi_enable(&sp->mac_control.rings[i].napi);
+ } else {
+ napi_enable(&sp->napi);
+ }
+ }
+
+ /* Maintain the state prior to the open */
+ if (sp->promisc_flg)
+ sp->promisc_flg = 0;
+ if (sp->m_cast_flg) {
+ sp->m_cast_flg = 0;
+ sp->all_multi_pos = 0;
+ }
+
+ /* Setting its receive mode */
+ s2io_set_multicast(dev);
+
+ if (dev->features & NETIF_F_LRO) {
+ /* Initialize max aggregatable pkts per session based on MTU */
+ sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
+ /* Check if we can use (if specified) user provided value */
+ if (lro_max_pkts < sp->lro_max_aggr_per_sess)
+ sp->lro_max_aggr_per_sess = lro_max_pkts;
+ }
+
+ /* Enable Rx Traffic and interrupts on the NIC */
+ if (start_nic(sp)) {
+ DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
+ s2io_reset(sp);
+ free_rx_buffers(sp);
+ return -ENODEV;
+ }
+
+ /* Add interrupt service routine */
+ if (s2io_add_isr(sp) != 0) {
+ if (sp->config.intr_type == MSI_X)
+ s2io_rem_isr(sp);
+ s2io_reset(sp);
+ free_rx_buffers(sp);
+ return -ENODEV;
+ }
+
+ S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
+
+ set_bit(__S2IO_STATE_CARD_UP, &sp->state);
+
+ /* Enable select interrupts */
+ en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
+ if (sp->config.intr_type != INTA) {
+ interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
+ en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
+ } else {
+ interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
+ interruptible |= TX_PIC_INTR;
+ en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
+ }
+
+ return 0;
+}
+
+/**
+ * s2io_restart_nic - Resets the NIC.
+ * @data : long pointer to the device private structure
+ * Description:
+ * This function is scheduled to be run by the s2io_tx_watchdog
+ * function after 0.5 secs to reset the NIC. The idea is to reduce
+ * the run time of the watch dog routine which is run holding a
+ * spin lock.
+ */
+
+static void s2io_restart_nic(struct work_struct *work)
+{
+ struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
+ struct net_device *dev = sp->dev;
+
+ rtnl_lock();
+
+ if (!netif_running(dev))
+ goto out_unlock;
+
+ s2io_card_down(sp);
+ if (s2io_card_up(sp)) {
+ DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
+ }
+ s2io_wake_all_tx_queue(sp);
+ DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
+out_unlock:
+ rtnl_unlock();
+}
+
+/**
+ * s2io_tx_watchdog - Watchdog for transmit side.
+ * @dev : Pointer to net device structure
+ * Description:
+ * This function is triggered if the Tx Queue is stopped
+ * for a pre-defined amount of time when the Interface is still up.
+ * If the Interface is jammed in such a situation, the hardware is
+ * reset (by s2io_close) and restarted again (by s2io_open) to
+ * overcome any problem that might have been caused in the hardware.
+ * Return value:
+ * void
+ */
+
+static void s2io_tx_watchdog(struct net_device *dev)
+{
+ struct s2io_nic *sp = netdev_priv(dev);
+ struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
+
+ if (netif_carrier_ok(dev)) {
+ swstats->watchdog_timer_cnt++;
+ schedule_work(&sp->rst_timer_task);
+ swstats->soft_reset_cnt++;
+ }
+}
+
+/**
+ * rx_osm_handler - To perform some OS related operations on SKB.
+ * @sp: private member of the device structure,pointer to s2io_nic structure.
+ * @skb : the socket buffer pointer.
+ * @len : length of the packet
+ * @cksum : FCS checksum of the frame.
+ * @ring_no : the ring from which this RxD was extracted.
+ * Description:
+ * This function is called by the Rx interrupt serivce routine to perform
+ * some OS related operations on the SKB before passing it to the upper
+ * layers. It mainly checks if the checksum is OK, if so adds it to the
+ * SKBs cksum variable, increments the Rx packet count and passes the SKB
+ * to the upper layer. If the checksum is wrong, it increments the Rx
+ * packet error count, frees the SKB and returns error.
+ * Return value:
+ * SUCCESS on success and -1 on failure.
+ */
+static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
+{
+ struct s2io_nic *sp = ring_data->nic;
+ struct net_device *dev = (struct net_device *)ring_data->dev;
+ struct sk_buff *skb = (struct sk_buff *)
+ ((unsigned long)rxdp->Host_Control);
+ int ring_no = ring_data->ring_no;
+ u16 l3_csum, l4_csum;
+ unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
+ struct lro *uninitialized_var(lro);
+ u8 err_mask;
+ struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
+
+ skb->dev = dev;
+
+ if (err) {
+ /* Check for parity error */
+ if (err & 0x1)
+ swstats->parity_err_cnt++;
+
+ err_mask = err >> 48;
+ switch (err_mask) {
+ case 1:
+ swstats->rx_parity_err_cnt++;
+ break;
+
+ case 2:
+ swstats->rx_abort_cnt++;
+ break;
+
+ case 3:
+ swstats->rx_parity_abort_cnt++;
+ break;
+
+ case 4:
+ swstats->rx_rda_fail_cnt++;
+ break;
+
+ case 5:
+ swstats->rx_unkn_prot_cnt++;
+ break;
+
+ case 6:
+ swstats->rx_fcs_err_cnt++;
+ break;
+
+ case 7:
+ swstats->rx_buf_size_err_cnt++;
+ break;
+
+ case 8:
+ swstats->rx_rxd_corrupt_cnt++;
+ break;
+
+ case 15:
+ swstats->rx_unkn_err_cnt++;
+ break;
+ }
+ /*
+ * Drop the packet if bad transfer code. Exception being
+ * 0x5, which could be due to unsupported IPv6 extension header.
+ * In this case, we let stack handle the packet.
+ * Note that in this case, since checksum will be incorrect,
+ * stack will validate the same.
+ */
+ if (err_mask != 0x5) {
+ DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
+ dev->name, err_mask);
+ dev->stats.rx_crc_errors++;
+ swstats->mem_freed
+ += skb->truesize;
+ dev_kfree_skb(skb);
+ ring_data->rx_bufs_left -= 1;
+ rxdp->Host_Control = 0;
+ return 0;
+ }
+ }
+
+ rxdp->Host_Control = 0;
+ if (sp->rxd_mode == RXD_MODE_1) {
+ int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
+
+ skb_put(skb, len);
+ } else if (sp->rxd_mode == RXD_MODE_3B) {
+ int get_block = ring_data->rx_curr_get_info.block_index;
+ int get_off = ring_data->rx_curr_get_info.offset;
+ int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
+ int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
+ unsigned char *buff = skb_push(skb, buf0_len);
+
+ struct buffAdd *ba = &ring_data->ba[get_block][get_off];
+ memcpy(buff, ba->ba_0, buf0_len);
+ skb_put(skb, buf2_len);
+ }
+
+ if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
+ ((!ring_data->lro) ||
+ (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
+ (dev->features & NETIF_F_RXCSUM)) {
+ l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
+ l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
+ if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
+ /*
+ * NIC verifies if the Checksum of the received
+ * frame is Ok or not and accordingly returns
+ * a flag in the RxD.
+ */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (ring_data->lro) {
+ u32 tcp_len = 0;
+ u8 *tcp;
+ int ret = 0;
+
+ ret = s2io_club_tcp_session(ring_data,
+ skb->data, &tcp,
+ &tcp_len, &lro,
+ rxdp, sp);
+ switch (ret) {
+ case 3: /* Begin anew */
+ lro->parent = skb;
+ goto aggregate;
+ case 1: /* Aggregate */
+ lro_append_pkt(sp, lro, skb, tcp_len);
+ goto aggregate;
+ case 4: /* Flush session */
+ lro_append_pkt(sp, lro, skb, tcp_len);
+ queue_rx_frame(lro->parent,
+ lro->vlan_tag);
+ clear_lro_session(lro);
+ swstats->flush_max_pkts++;
+ goto aggregate;
+ case 2: /* Flush both */
+ lro->parent->data_len = lro->frags_len;
+ swstats->sending_both++;
+ queue_rx_frame(lro->parent,
+ lro->vlan_tag);
+ clear_lro_session(lro);
+ goto send_up;
+ case 0: /* sessions exceeded */
+ case -1: /* non-TCP or not L2 aggregatable */
+ case 5: /*
+ * First pkt in session not
+ * L3/L4 aggregatable
+ */
+ break;
+ default:
+ DBG_PRINT(ERR_DBG,
+ "%s: Samadhana!!\n",
+ __func__);
+ BUG();
+ }
+ }
+ } else {
+ /*
+ * Packet with erroneous checksum, let the
+ * upper layers deal with it.
+ */
+ skb_checksum_none_assert(skb);
+ }
+ } else
+ skb_checksum_none_assert(skb);
+
+ swstats->mem_freed += skb->truesize;
+send_up:
+ skb_record_rx_queue(skb, ring_no);
+ queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
+aggregate:
+ sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
+ return SUCCESS;
+}
+
+/**
+ * s2io_link - stops/starts the Tx queue.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @link : inidicates whether link is UP/DOWN.
+ * Description:
+ * This function stops/starts the Tx queue depending on whether the link
+ * status of the NIC is is down or up. This is called by the Alarm
+ * interrupt handler whenever a link change interrupt comes up.
+ * Return value:
+ * void.
+ */
+
+static void s2io_link(struct s2io_nic *sp, int link)
+{
+ struct net_device *dev = (struct net_device *)sp->dev;
+ struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
+
+ if (link != sp->last_link_state) {
+ init_tti(sp, link);
+ if (link == LINK_DOWN) {
+ DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
+ s2io_stop_all_tx_queue(sp);
+ netif_carrier_off(dev);
+ if (swstats->link_up_cnt)
+ swstats->link_up_time =
+ jiffies - sp->start_time;
+ swstats->link_down_cnt++;
+ } else {
+ DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
+ if (swstats->link_down_cnt)
+ swstats->link_down_time =
+ jiffies - sp->start_time;
+ swstats->link_up_cnt++;
+ netif_carrier_on(dev);
+ s2io_wake_all_tx_queue(sp);
+ }
+ }
+ sp->last_link_state = link;
+ sp->start_time = jiffies;
+}
+
+/**
+ * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * Description:
+ * This function initializes a few of the PCI and PCI-X configuration registers
+ * with recommended values.
+ * Return value:
+ * void
+ */
+
+static void s2io_init_pci(struct s2io_nic *sp)
+{
+ u16 pci_cmd = 0, pcix_cmd = 0;
+
+ /* Enable Data Parity Error Recovery in PCI-X command register. */
+ pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
+ &(pcix_cmd));
+ pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
+ (pcix_cmd | 1));
+ pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
+ &(pcix_cmd));
+
+ /* Set the PErr Response bit in PCI command register. */
+ pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
+ pci_write_config_word(sp->pdev, PCI_COMMAND,
+ (pci_cmd | PCI_COMMAND_PARITY));
+ pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
+}
+
+static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
+ u8 *dev_multiq)
+{
+ int i;
+
+ if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
+ DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
+ "(%d) not supported\n", tx_fifo_num);
+
+ if (tx_fifo_num < 1)
+ tx_fifo_num = 1;
+ else
+ tx_fifo_num = MAX_TX_FIFOS;
+
+ DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
+ }
+
+ if (multiq)
+ *dev_multiq = multiq;
+
+ if (tx_steering_type && (1 == tx_fifo_num)) {
+ if (tx_steering_type != TX_DEFAULT_STEERING)
+ DBG_PRINT(ERR_DBG,
+ "Tx steering is not supported with "
+ "one fifo. Disabling Tx steering.\n");
+ tx_steering_type = NO_STEERING;
+ }
+
+ if ((tx_steering_type < NO_STEERING) ||
+ (tx_steering_type > TX_DEFAULT_STEERING)) {
+ DBG_PRINT(ERR_DBG,
+ "Requested transmit steering not supported\n");
+ DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
+ tx_steering_type = NO_STEERING;
+ }
+
+ if (rx_ring_num > MAX_RX_RINGS) {
+ DBG_PRINT(ERR_DBG,
+ "Requested number of rx rings not supported\n");
+ DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
+ MAX_RX_RINGS);
+ rx_ring_num = MAX_RX_RINGS;
+ }
+
+ if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
+ DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
+ "Defaulting to INTA\n");
+ *dev_intr_type = INTA;
+ }
+
+ if ((*dev_intr_type == MSI_X) &&
+ ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
+ (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
+ DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
+ "Defaulting to INTA\n");
+ *dev_intr_type = INTA;
+ }
+
+ if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
+ DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
+ DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
+ rx_ring_mode = 1;
+ }
+
+ for (i = 0; i < MAX_RX_RINGS; i++)
+ if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
+ DBG_PRINT(ERR_DBG, "Requested rx ring size not "
+ "supported\nDefaulting to %d\n",
+ MAX_RX_BLOCKS_PER_RING);
+ rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
+ }
+
+ return SUCCESS;
+}
+
+/**
+ * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
+ * or Traffic class respectively.
+ * @nic: device private variable
+ * Description: The function configures the receive steering to
+ * desired receive ring.
+ * Return Value: SUCCESS on success and
+ * '-1' on failure (endian settings incorrect).
+ */
+static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
+{
+ struct XENA_dev_config __iomem *bar0 = nic->bar0;
+ register u64 val64 = 0;
+
+ if (ds_codepoint > 63)
+ return FAILURE;
+
+ val64 = RTS_DS_MEM_DATA(ring);
+ writeq(val64, &bar0->rts_ds_mem_data);
+
+ val64 = RTS_DS_MEM_CTRL_WE |
+ RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
+ RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
+
+ writeq(val64, &bar0->rts_ds_mem_ctrl);
+
+ return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
+ RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
+ S2IO_BIT_RESET);
+}
+
+static const struct net_device_ops s2io_netdev_ops = {
+ .ndo_open = s2io_open,
+ .ndo_stop = s2io_close,
+ .ndo_get_stats = s2io_get_stats,
+ .ndo_start_xmit = s2io_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = s2io_set_multicast,
+ .ndo_do_ioctl = s2io_ioctl,
+ .ndo_set_mac_address = s2io_set_mac_addr,
+ .ndo_change_mtu = s2io_change_mtu,
+ .ndo_set_features = s2io_set_features,
+ .ndo_tx_timeout = s2io_tx_watchdog,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = s2io_netpoll,
+#endif
+};
+
+/**
+ * s2io_init_nic - Initialization of the adapter .
+ * @pdev : structure containing the PCI related information of the device.
+ * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
+ * Description:
+ * The function initializes an adapter identified by the pci_dec structure.
+ * All OS related initialization including memory and device structure and
+ * initlaization of the device private variable is done. Also the swapper
+ * control register is initialized to enable read and write into the I/O
+ * registers of the device.
+ * Return value:
+ * returns 0 on success and negative on failure.
+ */
+
+static int __devinit
+s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
+{
+ struct s2io_nic *sp;
+ struct net_device *dev;
+ int i, j, ret;
+ int dma_flag = false;
+ u32 mac_up, mac_down;
+ u64 val64 = 0, tmp64 = 0;
+ struct XENA_dev_config __iomem *bar0 = NULL;
+ u16 subid;
+ struct config_param *config;
+ struct mac_info *mac_control;
+ int mode;
+ u8 dev_intr_type = intr_type;
+ u8 dev_multiq = 0;
+
+ ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
+ if (ret)
+ return ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ DBG_PRINT(ERR_DBG,
+ "%s: pci_enable_device failed\n", __func__);
+ return ret;
+ }
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
+ dma_flag = true;
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ DBG_PRINT(ERR_DBG,
+ "Unable to obtain 64bit DMA "
+ "for consistent allocations\n");
+ pci_disable_device(pdev);
+ return -ENOMEM;
+ }
+ } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
+ } else {
+ pci_disable_device(pdev);
+ return -ENOMEM;
+ }
+ ret = pci_request_regions(pdev, s2io_driver_name);
+ if (ret) {
+ DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
+ __func__, ret);
+ pci_disable_device(pdev);
+ return -ENODEV;
+ }
+ if (dev_multiq)
+ dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
+ else
+ dev = alloc_etherdev(sizeof(struct s2io_nic));
+ if (dev == NULL) {
+ DBG_PRINT(ERR_DBG, "Device allocation failed\n");
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+ return -ENODEV;
+ }
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* Private member variable initialized to s2io NIC structure */
+ sp = netdev_priv(dev);
+ sp->dev = dev;
+ sp->pdev = pdev;
+ sp->high_dma_flag = dma_flag;
+ sp->device_enabled_once = false;
+ if (rx_ring_mode == 1)
+ sp->rxd_mode = RXD_MODE_1;
+ if (rx_ring_mode == 2)
+ sp->rxd_mode = RXD_MODE_3B;
+
+ sp->config.intr_type = dev_intr_type;
+
+ if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
+ (pdev->device == PCI_DEVICE_ID_HERC_UNI))
+ sp->device_type = XFRAME_II_DEVICE;
+ else
+ sp->device_type = XFRAME_I_DEVICE;
+
+
+ /* Initialize some PCI/PCI-X fields of the NIC. */
+ s2io_init_pci(sp);
+
+ /*
+ * Setting the device configuration parameters.
+ * Most of these parameters can be specified by the user during
+ * module insertion as they are module loadable parameters. If
+ * these parameters are not not specified during load time, they
+ * are initialized with default values.
+ */
+ config = &sp->config;
+ mac_control = &sp->mac_control;
+
+ config->napi = napi;
+ config->tx_steering_type = tx_steering_type;
+
+ /* Tx side parameters. */
+ if (config->tx_steering_type == TX_PRIORITY_STEERING)
+ config->tx_fifo_num = MAX_TX_FIFOS;
+ else
+ config->tx_fifo_num = tx_fifo_num;
+
+ /* Initialize the fifos used for tx steering */
+ if (config->tx_fifo_num < 5) {
+ if (config->tx_fifo_num == 1)
+ sp->total_tcp_fifos = 1;
+ else
+ sp->total_tcp_fifos = config->tx_fifo_num - 1;
+ sp->udp_fifo_idx = config->tx_fifo_num - 1;
+ sp->total_udp_fifos = 1;
+ sp->other_fifo_idx = sp->total_tcp_fifos - 1;
+ } else {
+ sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
+ FIFO_OTHER_MAX_NUM);
+ sp->udp_fifo_idx = sp->total_tcp_fifos;
+ sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
+ sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
+ }
+
+ config->multiq = dev_multiq;
+ for (i = 0; i < config->tx_fifo_num; i++) {
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+
+ tx_cfg->fifo_len = tx_fifo_len[i];
+ tx_cfg->fifo_priority = i;
+ }
+
+ /* mapping the QoS priority to the configured fifos */
+ for (i = 0; i < MAX_TX_FIFOS; i++)
+ config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
+
+ /* map the hashing selector table to the configured fifos */
+ for (i = 0; i < config->tx_fifo_num; i++)
+ sp->fifo_selector[i] = fifo_selector[i];
+
+
+ config->tx_intr_type = TXD_INT_TYPE_UTILZ;
+ for (i = 0; i < config->tx_fifo_num; i++) {
+ struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
+
+ tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
+ if (tx_cfg->fifo_len < 65) {
+ config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
+ break;
+ }
+ }
+ /* + 2 because one Txd for skb->data and one Txd for UFO */
+ config->max_txds = MAX_SKB_FRAGS + 2;
+
+ /* Rx side parameters. */
+ config->rx_ring_num = rx_ring_num;
+ for (i = 0; i < config->rx_ring_num; i++) {
+ struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
+ struct ring_info *ring = &mac_control->rings[i];
+
+ rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
+ rx_cfg->ring_priority = i;
+ ring->rx_bufs_left = 0;
+ ring->rxd_mode = sp->rxd_mode;
+ ring->rxd_count = rxd_count[sp->rxd_mode];
+ ring->pdev = sp->pdev;
+ ring->dev = sp->dev;
+ }
+
+ for (i = 0; i < rx_ring_num; i++) {
+ struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
+
+ rx_cfg->ring_org = RING_ORG_BUFF1;
+ rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
+ }
+
+ /* Setting Mac Control parameters */
+ mac_control->rmac_pause_time = rmac_pause_time;
+ mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
+ mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
+
+
+ /* initialize the shared memory used by the NIC and the host */
+ if (init_shared_mem(sp)) {
+ DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
+ ret = -ENOMEM;
+ goto mem_alloc_failed;
+ }
+
+ sp->bar0 = pci_ioremap_bar(pdev, 0);
+ if (!sp->bar0) {
+ DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
+ dev->name);
+ ret = -ENOMEM;
+ goto bar0_remap_failed;
+ }
+
+ sp->bar1 = pci_ioremap_bar(pdev, 2);
+ if (!sp->bar1) {
+ DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
+ dev->name);
+ ret = -ENOMEM;
+ goto bar1_remap_failed;
+ }
+
+ dev->irq = pdev->irq;
+ dev->base_addr = (unsigned long)sp->bar0;
+
+ /* Initializing the BAR1 address as the start of the FIFO pointer. */
+ for (j = 0; j < MAX_TX_FIFOS; j++) {
+ mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
+ }
+
+ /* Driver entry points */
+ dev->netdev_ops = &s2io_netdev_ops;
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_RXCSUM | NETIF_F_LRO;
+ dev->features |= dev->hw_features |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ if (sp->device_type & XFRAME_II_DEVICE) {
+ dev->hw_features |= NETIF_F_UFO;
+ if (ufo)
+ dev->features |= NETIF_F_UFO;
+ }
+ if (sp->high_dma_flag == true)
+ dev->features |= NETIF_F_HIGHDMA;
+ dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
+ INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
+ INIT_WORK(&sp->set_link_task, s2io_set_link);
+
+ pci_save_state(sp->pdev);
+
+ /* Setting swapper control on the NIC, for proper reset operation */
+ if (s2io_set_swapper(sp)) {
+ DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
+ dev->name);
+ ret = -EAGAIN;
+ goto set_swap_failed;
+ }
+
+ /* Verify if the Herc works on the slot its placed into */
+ if (sp->device_type & XFRAME_II_DEVICE) {
+ mode = s2io_verify_pci_mode(sp);
+ if (mode < 0) {
+ DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
+ __func__);
+ ret = -EBADSLT;
+ goto set_swap_failed;
+ }
+ }
+
+ if (sp->config.intr_type == MSI_X) {
+ sp->num_entries = config->rx_ring_num + 1;
+ ret = s2io_enable_msi_x(sp);
+
+ if (!ret) {
+ ret = s2io_test_msi(sp);
+ /* rollback MSI-X, will re-enable during add_isr() */
+ remove_msix_isr(sp);
+ }
+ if (ret) {
+
+ DBG_PRINT(ERR_DBG,
+ "MSI-X requested but failed to enable\n");
+ sp->config.intr_type = INTA;
+ }
+ }
+
+ if (config->intr_type == MSI_X) {
+ for (i = 0; i < config->rx_ring_num ; i++) {
+ struct ring_info *ring = &mac_control->rings[i];
+
+ netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
+ }
+ } else {
+ netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
+ }
+
+ /* Not needed for Herc */
+ if (sp->device_type & XFRAME_I_DEVICE) {
+ /*
+ * Fix for all "FFs" MAC address problems observed on
+ * Alpha platforms
+ */
+ fix_mac_address(sp);
+ s2io_reset(sp);
+ }
+
+ /*
+ * MAC address initialization.
+ * For now only one mac address will be read and used.
+ */
+ bar0 = sp->bar0;
+ val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
+ RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
+ writeq(val64, &bar0->rmac_addr_cmd_mem);
+ wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
+ S2IO_BIT_RESET);
+ tmp64 = readq(&bar0->rmac_addr_data0_mem);
+ mac_down = (u32)tmp64;
+ mac_up = (u32) (tmp64 >> 32);
+
+ sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
+ sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
+ sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
+ sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
+ sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
+ sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
+
+ /* Set the factory defined MAC address initially */
+ dev->addr_len = ETH_ALEN;
+ memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
+ memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
+
+ /* initialize number of multicast & unicast MAC entries variables */
+ if (sp->device_type == XFRAME_I_DEVICE) {
+ config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
+ config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
+ config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
+ } else if (sp->device_type == XFRAME_II_DEVICE) {
+ config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
+ config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
+ config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
+ }
+
+ /* store mac addresses from CAM to s2io_nic structure */
+ do_s2io_store_unicast_mc(sp);
+
+ /* Configure MSIX vector for number of rings configured plus one */
+ if ((sp->device_type == XFRAME_II_DEVICE) &&
+ (config->intr_type == MSI_X))
+ sp->num_entries = config->rx_ring_num + 1;
+
+ /* Store the values of the MSIX table in the s2io_nic structure */
+ store_xmsi_data(sp);
+ /* reset Nic and bring it to known state */
+ s2io_reset(sp);
+
+ /*
+ * Initialize link state flags
+ * and the card state parameter
+ */
+ sp->state = 0;
+
+ /* Initialize spinlocks */
+ for (i = 0; i < sp->config.tx_fifo_num; i++) {
+ struct fifo_info *fifo = &mac_control->fifos[i];
+
+ spin_lock_init(&fifo->tx_lock);
+ }
+
+ /*
+ * SXE-002: Configure link and activity LED to init state
+ * on driver load.
+ */
+ subid = sp->pdev->subsystem_device;
+ if ((subid & 0xFF) >= 0x07) {
+ val64 = readq(&bar0->gpio_control);
+ val64 |= 0x0000800000000000ULL;
+ writeq(val64, &bar0->gpio_control);
+ val64 = 0x0411040400000000ULL;
+ writeq(val64, (void __iomem *)bar0 + 0x2700);
+ val64 = readq(&bar0->gpio_control);
+ }
+
+ sp->rx_csum = 1; /* Rx chksum verify enabled by default */
+
+ if (register_netdev(dev)) {
+ DBG_PRINT(ERR_DBG, "Device registration failed\n");
+ ret = -ENODEV;
+ goto register_failed;
+ }
+ s2io_vpd_read(sp);
+ DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
+ DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
+ sp->product_name, pdev->revision);
+ DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
+ s2io_driver_version);
+ DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
+ DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
+ if (sp->device_type & XFRAME_II_DEVICE) {
+ mode = s2io_print_pci_mode(sp);
+ if (mode < 0) {
+ ret = -EBADSLT;
+ unregister_netdev(dev);
+ goto set_swap_failed;
+ }
+ }
+ switch (sp->rxd_mode) {
+ case RXD_MODE_1:
+ DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
+ dev->name);
+ break;
+ case RXD_MODE_3B:
+ DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
+ dev->name);
+ break;
+ }
+
+ switch (sp->config.napi) {
+ case 0:
+ DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
+ break;
+ case 1:
+ DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
+ break;
+ }
+
+ DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
+ sp->config.tx_fifo_num);
+
+ DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
+ sp->config.rx_ring_num);
+
+ switch (sp->config.intr_type) {
+ case INTA:
+ DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
+ break;
+ case MSI_X:
+ DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
+ break;
+ }
+ if (sp->config.multiq) {
+ for (i = 0; i < sp->config.tx_fifo_num; i++) {
+ struct fifo_info *fifo = &mac_control->fifos[i];
+
+ fifo->multiq = config->multiq;
+ }
+ DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
+ dev->name);
+ } else
+ DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
+ dev->name);
+
+ switch (sp->config.tx_steering_type) {
+ case NO_STEERING:
+ DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
+ dev->name);
+ break;
+ case TX_PRIORITY_STEERING:
+ DBG_PRINT(ERR_DBG,
+ "%s: Priority steering enabled for transmit\n",
+ dev->name);
+ break;
+ case TX_DEFAULT_STEERING:
+ DBG_PRINT(ERR_DBG,
+ "%s: Default steering enabled for transmit\n",
+ dev->name);
+ }
+
+ DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
+ dev->name);
+ if (ufo)
+ DBG_PRINT(ERR_DBG,
+ "%s: UDP Fragmentation Offload(UFO) enabled\n",
+ dev->name);
+ /* Initialize device name */
+ sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
+
+ if (vlan_tag_strip)
+ sp->vlan_strip_flag = 1;
+ else
+ sp->vlan_strip_flag = 0;
+
+ /*
+ * Make Link state as off at this point, when the Link change
+ * interrupt comes the state will be automatically changed to
+ * the right state.
+ */
+ netif_carrier_off(dev);
+
+ return 0;
+
+register_failed:
+set_swap_failed:
+ iounmap(sp->bar1);
+bar1_remap_failed:
+ iounmap(sp->bar0);
+bar0_remap_failed:
+mem_alloc_failed:
+ free_shared_mem(sp);
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(dev);
+
+ return ret;
+}
+
+/**
+ * s2io_rem_nic - Free the PCI device
+ * @pdev: structure containing the PCI related information of the device.
+ * Description: This function is called by the Pci subsystem to release a
+ * PCI device and free up all resource held up by the device. This could
+ * be in response to a Hot plug event or when the driver is to be removed
+ * from memory.
+ */
+
+static void __devexit s2io_rem_nic(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct s2io_nic *sp;
+
+ if (dev == NULL) {
+ DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
+ return;
+ }
+
+ sp = netdev_priv(dev);
+
+ cancel_work_sync(&sp->rst_timer_task);
+ cancel_work_sync(&sp->set_link_task);
+
+ unregister_netdev(dev);
+
+ free_shared_mem(sp);
+ iounmap(sp->bar0);
+ iounmap(sp->bar1);
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(dev);
+ pci_disable_device(pdev);
+}
+
+/**
+ * s2io_starter - Entry point for the driver
+ * Description: This function is the entry point for the driver. It verifies
+ * the module loadable parameters and initializes PCI configuration space.
+ */
+
+static int __init s2io_starter(void)
+{
+ return pci_register_driver(&s2io_driver);
+}
+
+/**
+ * s2io_closer - Cleanup routine for the driver
+ * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
+ */
+
+static __exit void s2io_closer(void)
+{
+ pci_unregister_driver(&s2io_driver);
+ DBG_PRINT(INIT_DBG, "cleanup done\n");
+}
+
+module_init(s2io_starter);
+module_exit(s2io_closer);
+
+static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
+ struct tcphdr **tcp, struct RxD_t *rxdp,
+ struct s2io_nic *sp)
+{
+ int ip_off;
+ u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
+
+ if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
+ DBG_PRINT(INIT_DBG,
+ "%s: Non-TCP frames not supported for LRO\n",
+ __func__);
+ return -1;
+ }
+
+ /* Checking for DIX type or DIX type with VLAN */
+ if ((l2_type == 0) || (l2_type == 4)) {
+ ip_off = HEADER_ETHERNET_II_802_3_SIZE;
+ /*
+ * If vlan stripping is disabled and the frame is VLAN tagged,
+ * shift the offset by the VLAN header size bytes.
+ */
+ if ((!sp->vlan_strip_flag) &&
+ (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
+ ip_off += HEADER_VLAN_SIZE;
+ } else {
+ /* LLC, SNAP etc are considered non-mergeable */
+ return -1;
+ }
+
+ *ip = (struct iphdr *)((u8 *)buffer + ip_off);
+ ip_len = (u8)((*ip)->ihl);
+ ip_len <<= 2;
+ *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
+
+ return 0;
+}
+
+static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
+ struct tcphdr *tcp)
+{
+ DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
+ if ((lro->iph->saddr != ip->saddr) ||
+ (lro->iph->daddr != ip->daddr) ||
+ (lro->tcph->source != tcp->source) ||
+ (lro->tcph->dest != tcp->dest))
+ return -1;
+ return 0;
+}
+
+static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
+{
+ return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
+}
+
+static void initiate_new_session(struct lro *lro, u8 *l2h,
+ struct iphdr *ip, struct tcphdr *tcp,
+ u32 tcp_pyld_len, u16 vlan_tag)
+{
+ DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
+ lro->l2h = l2h;
+ lro->iph = ip;
+ lro->tcph = tcp;
+ lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
+ lro->tcp_ack = tcp->ack_seq;
+ lro->sg_num = 1;
+ lro->total_len = ntohs(ip->tot_len);
+ lro->frags_len = 0;
+ lro->vlan_tag = vlan_tag;
+ /*
+ * Check if we saw TCP timestamp.
+ * Other consistency checks have already been done.
+ */
+ if (tcp->doff == 8) {
+ __be32 *ptr;
+ ptr = (__be32 *)(tcp+1);
+ lro->saw_ts = 1;
+ lro->cur_tsval = ntohl(*(ptr+1));
+ lro->cur_tsecr = *(ptr+2);
+ }
+ lro->in_use = 1;
+}
+
+static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
+{
+ struct iphdr *ip = lro->iph;
+ struct tcphdr *tcp = lro->tcph;
+ __sum16 nchk;
+ struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
+
+ DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
+
+ /* Update L3 header */
+ ip->tot_len = htons(lro->total_len);
+ ip->check = 0;
+ nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
+ ip->check = nchk;
+
+ /* Update L4 header */
+ tcp->ack_seq = lro->tcp_ack;
+ tcp->window = lro->window;
+
+ /* Update tsecr field if this session has timestamps enabled */
+ if (lro->saw_ts) {
+ __be32 *ptr = (__be32 *)(tcp + 1);
+ *(ptr+2) = lro->cur_tsecr;
+ }
+
+ /* Update counters required for calculation of
+ * average no. of packets aggregated.
+ */
+ swstats->sum_avg_pkts_aggregated += lro->sg_num;
+ swstats->num_aggregations++;
+}
+
+static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
+ struct tcphdr *tcp, u32 l4_pyld)
+{
+ DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
+ lro->total_len += l4_pyld;
+ lro->frags_len += l4_pyld;
+ lro->tcp_next_seq += l4_pyld;
+ lro->sg_num++;
+
+ /* Update ack seq no. and window ad(from this pkt) in LRO object */
+ lro->tcp_ack = tcp->ack_seq;
+ lro->window = tcp->window;
+
+ if (lro->saw_ts) {
+ __be32 *ptr;
+ /* Update tsecr and tsval from this packet */
+ ptr = (__be32 *)(tcp+1);
+ lro->cur_tsval = ntohl(*(ptr+1));
+ lro->cur_tsecr = *(ptr + 2);
+ }
+}
+
+static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
+ struct tcphdr *tcp, u32 tcp_pyld_len)
+{
+ u8 *ptr;
+
+ DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
+
+ if (!tcp_pyld_len) {
+ /* Runt frame or a pure ack */
+ return -1;
+ }
+
+ if (ip->ihl != 5) /* IP has options */
+ return -1;
+
+ /* If we see CE codepoint in IP header, packet is not mergeable */
+ if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
+ return -1;
+
+ /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
+ if (tcp->urg || tcp->psh || tcp->rst ||
+ tcp->syn || tcp->fin ||
+ tcp->ece || tcp->cwr || !tcp->ack) {
+ /*
+ * Currently recognize only the ack control word and
+ * any other control field being set would result in
+ * flushing the LRO session
+ */
+ return -1;
+ }
+
+ /*
+ * Allow only one TCP timestamp option. Don't aggregate if
+ * any other options are detected.
+ */
+ if (tcp->doff != 5 && tcp->doff != 8)
+ return -1;
+
+ if (tcp->doff == 8) {
+ ptr = (u8 *)(tcp + 1);
+ while (*ptr == TCPOPT_NOP)
+ ptr++;
+ if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
+ return -1;
+
+ /* Ensure timestamp value increases monotonically */
+ if (l_lro)
+ if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
+ return -1;
+
+ /* timestamp echo reply should be non-zero */
+ if (*((__be32 *)(ptr+6)) == 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
+ u8 **tcp, u32 *tcp_len, struct lro **lro,
+ struct RxD_t *rxdp, struct s2io_nic *sp)
+{
+ struct iphdr *ip;
+ struct tcphdr *tcph;
+ int ret = 0, i;
+ u16 vlan_tag = 0;
+ struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
+
+ ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
+ rxdp, sp);
+ if (ret)
+ return ret;
+
+ DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
+
+ vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
+ tcph = (struct tcphdr *)*tcp;
+ *tcp_len = get_l4_pyld_length(ip, tcph);
+ for (i = 0; i < MAX_LRO_SESSIONS; i++) {
+ struct lro *l_lro = &ring_data->lro0_n[i];
+ if (l_lro->in_use) {
+ if (check_for_socket_match(l_lro, ip, tcph))
+ continue;
+ /* Sock pair matched */
+ *lro = l_lro;
+
+ if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
+ DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
+ "expected 0x%x, actual 0x%x\n",
+ __func__,
+ (*lro)->tcp_next_seq,
+ ntohl(tcph->seq));
+
+ swstats->outof_sequence_pkts++;
+ ret = 2;
+ break;
+ }
+
+ if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
+ *tcp_len))
+ ret = 1; /* Aggregate */
+ else
+ ret = 2; /* Flush both */
+ break;
+ }
+ }
+
+ if (ret == 0) {
+ /* Before searching for available LRO objects,
+ * check if the pkt is L3/L4 aggregatable. If not
+ * don't create new LRO session. Just send this
+ * packet up.
+ */
+ if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
+ return 5;
+
+ for (i = 0; i < MAX_LRO_SESSIONS; i++) {
+ struct lro *l_lro = &ring_data->lro0_n[i];
+ if (!(l_lro->in_use)) {
+ *lro = l_lro;
+ ret = 3; /* Begin anew */
+ break;
+ }
+ }
+ }
+
+ if (ret == 0) { /* sessions exceeded */
+ DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
+ __func__);
+ *lro = NULL;
+ return ret;
+ }
+
+ switch (ret) {
+ case 3:
+ initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
+ vlan_tag);
+ break;
+ case 2:
+ update_L3L4_header(sp, *lro);
+ break;
+ case 1:
+ aggregate_new_rx(*lro, ip, tcph, *tcp_len);
+ if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
+ update_L3L4_header(sp, *lro);
+ ret = 4; /* Flush the LRO */
+ }
+ break;
+ default:
+ DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
+ break;
+ }
+
+ return ret;
+}
+
+static void clear_lro_session(struct lro *lro)
+{
+ static u16 lro_struct_size = sizeof(struct lro);
+
+ memset(lro, 0, lro_struct_size);
+}
+
+static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
+{
+ struct net_device *dev = skb->dev;
+ struct s2io_nic *sp = netdev_priv(dev);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ if (vlan_tag && sp->vlan_strip_flag)
+ __vlan_hwaccel_put_tag(skb, vlan_tag);
+ if (sp->config.napi)
+ netif_receive_skb(skb);
+ else
+ netif_rx(skb);
+}
+
+static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
+ struct sk_buff *skb, u32 tcp_len)
+{
+ struct sk_buff *first = lro->parent;
+ struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
+
+ first->len += tcp_len;
+ first->data_len = lro->frags_len;
+ skb_pull(skb, (skb->len - tcp_len));
+ if (skb_shinfo(first)->frag_list)
+ lro->last_frag->next = skb;
+ else
+ skb_shinfo(first)->frag_list = skb;
+ first->truesize += skb->truesize;
+ lro->last_frag = skb;
+ swstats->clubbed_frms_cnt++;
+}
+
+/**
+ * s2io_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct s2io_nic *sp = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev)) {
+ /* Bring down the card, while avoiding PCI I/O */
+ do_s2io_card_down(sp, 0);
+ }
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * s2io_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ * At this point, the card has exprienced a hard reset,
+ * followed by fixups by BIOS, and has its config space
+ * set up identically to what it was at cold boot.
+ */
+static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct s2io_nic *sp = netdev_priv(netdev);
+
+ if (pci_enable_device(pdev)) {
+ pr_err("Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ s2io_reset(sp);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * s2io_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells
+ * us that its OK to resume normal operation.
+ */
+static void s2io_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct s2io_nic *sp = netdev_priv(netdev);
+
+ if (netif_running(netdev)) {
+ if (s2io_card_up(sp)) {
+ pr_err("Can't bring device back up after reset.\n");
+ return;
+ }
+
+ if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
+ s2io_card_down(sp);
+ pr_err("Can't restore mac addr after reset.\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+ netif_tx_wake_all_queues(netdev);
+}
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
new file mode 100644
index 000000000000..d5596926a1ef
--- /dev/null
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -0,0 +1,1148 @@
+/************************************************************************
+ * s2io.h: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
+ * Copyright(c) 2002-2010 Exar Corp.
+
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ * Drivers based on or derived from this code fall under the GPL and must
+ * retain the authorship, copyright and license notice. This file is not
+ * a complete program and may only be used when the entire operating
+ * system is licensed under the GPL.
+ * See the file COPYING in this distribution for more information.
+ ************************************************************************/
+#ifndef _S2IO_H
+#define _S2IO_H
+
+#define TBD 0
+#define s2BIT(loc) (0x8000000000000000ULL >> (loc))
+#define vBIT(val, loc, sz) (((u64)val) << (64-loc-sz))
+#define INV(d) ((d&0xff)<<24) | (((d>>8)&0xff)<<16) | (((d>>16)&0xff)<<8)| ((d>>24)&0xff)
+
+#undef SUCCESS
+#define SUCCESS 0
+#define FAILURE -1
+#define S2IO_MINUS_ONE 0xFFFFFFFFFFFFFFFFULL
+#define S2IO_DISABLE_MAC_ENTRY 0xFFFFFFFFFFFFULL
+#define S2IO_MAX_PCI_CONFIG_SPACE_REINIT 100
+#define S2IO_BIT_RESET 1
+#define S2IO_BIT_SET 2
+#define CHECKBIT(value, nbit) (value & (1 << nbit))
+
+/* Maximum time to flicker LED when asked to identify NIC using ethtool */
+#define MAX_FLICKER_TIME 60000 /* 60 Secs */
+
+/* Maximum outstanding splits to be configured into xena. */
+enum {
+ XENA_ONE_SPLIT_TRANSACTION = 0,
+ XENA_TWO_SPLIT_TRANSACTION = 1,
+ XENA_THREE_SPLIT_TRANSACTION = 2,
+ XENA_FOUR_SPLIT_TRANSACTION = 3,
+ XENA_EIGHT_SPLIT_TRANSACTION = 4,
+ XENA_TWELVE_SPLIT_TRANSACTION = 5,
+ XENA_SIXTEEN_SPLIT_TRANSACTION = 6,
+ XENA_THIRTYTWO_SPLIT_TRANSACTION = 7
+};
+#define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4)
+
+/* OS concerned variables and constants */
+#define WATCH_DOG_TIMEOUT 15*HZ
+#define EFILL 0x1234
+#define ALIGN_SIZE 127
+#define PCIX_COMMAND_REGISTER 0x62
+
+/*
+ * Debug related variables.
+ */
+/* different debug levels. */
+#define ERR_DBG 0
+#define INIT_DBG 1
+#define INFO_DBG 2
+#define TX_DBG 3
+#define INTR_DBG 4
+
+/* Global variable that defines the present debug level of the driver. */
+static int debug_level = ERR_DBG;
+
+/* DEBUG message print. */
+#define DBG_PRINT(dbg_level, fmt, args...) do { \
+ if (dbg_level <= debug_level) \
+ pr_info(fmt, ##args); \
+ } while (0)
+
+/* Protocol assist features of the NIC */
+#define L3_CKSUM_OK 0xFFFF
+#define L4_CKSUM_OK 0xFFFF
+#define S2IO_JUMBO_SIZE 9600
+
+/* Driver statistics maintained by driver */
+struct swStat {
+ unsigned long long single_ecc_errs;
+ unsigned long long double_ecc_errs;
+ unsigned long long parity_err_cnt;
+ unsigned long long serious_err_cnt;
+ unsigned long long soft_reset_cnt;
+ unsigned long long fifo_full_cnt;
+ unsigned long long ring_full_cnt[8];
+ /* LRO statistics */
+ unsigned long long clubbed_frms_cnt;
+ unsigned long long sending_both;
+ unsigned long long outof_sequence_pkts;
+ unsigned long long flush_max_pkts;
+ unsigned long long sum_avg_pkts_aggregated;
+ unsigned long long num_aggregations;
+ /* Other statistics */
+ unsigned long long mem_alloc_fail_cnt;
+ unsigned long long pci_map_fail_cnt;
+ unsigned long long watchdog_timer_cnt;
+ unsigned long long mem_allocated;
+ unsigned long long mem_freed;
+ unsigned long long link_up_cnt;
+ unsigned long long link_down_cnt;
+ unsigned long long link_up_time;
+ unsigned long long link_down_time;
+
+ /* Transfer Code statistics */
+ unsigned long long tx_buf_abort_cnt;
+ unsigned long long tx_desc_abort_cnt;
+ unsigned long long tx_parity_err_cnt;
+ unsigned long long tx_link_loss_cnt;
+ unsigned long long tx_list_proc_err_cnt;
+
+ unsigned long long rx_parity_err_cnt;
+ unsigned long long rx_abort_cnt;
+ unsigned long long rx_parity_abort_cnt;
+ unsigned long long rx_rda_fail_cnt;
+ unsigned long long rx_unkn_prot_cnt;
+ unsigned long long rx_fcs_err_cnt;
+ unsigned long long rx_buf_size_err_cnt;
+ unsigned long long rx_rxd_corrupt_cnt;
+ unsigned long long rx_unkn_err_cnt;
+
+ /* Error/alarm statistics*/
+ unsigned long long tda_err_cnt;
+ unsigned long long pfc_err_cnt;
+ unsigned long long pcc_err_cnt;
+ unsigned long long tti_err_cnt;
+ unsigned long long lso_err_cnt;
+ unsigned long long tpa_err_cnt;
+ unsigned long long sm_err_cnt;
+ unsigned long long mac_tmac_err_cnt;
+ unsigned long long mac_rmac_err_cnt;
+ unsigned long long xgxs_txgxs_err_cnt;
+ unsigned long long xgxs_rxgxs_err_cnt;
+ unsigned long long rc_err_cnt;
+ unsigned long long prc_pcix_err_cnt;
+ unsigned long long rpa_err_cnt;
+ unsigned long long rda_err_cnt;
+ unsigned long long rti_err_cnt;
+ unsigned long long mc_err_cnt;
+
+};
+
+/* Xpak releated alarm and warnings */
+struct xpakStat {
+ u64 alarm_transceiver_temp_high;
+ u64 alarm_transceiver_temp_low;
+ u64 alarm_laser_bias_current_high;
+ u64 alarm_laser_bias_current_low;
+ u64 alarm_laser_output_power_high;
+ u64 alarm_laser_output_power_low;
+ u64 warn_transceiver_temp_high;
+ u64 warn_transceiver_temp_low;
+ u64 warn_laser_bias_current_high;
+ u64 warn_laser_bias_current_low;
+ u64 warn_laser_output_power_high;
+ u64 warn_laser_output_power_low;
+ u64 xpak_regs_stat;
+ u32 xpak_timer_count;
+};
+
+
+/* The statistics block of Xena */
+struct stat_block {
+/* Tx MAC statistics counters. */
+ __le32 tmac_data_octets;
+ __le32 tmac_frms;
+ __le64 tmac_drop_frms;
+ __le32 tmac_bcst_frms;
+ __le32 tmac_mcst_frms;
+ __le64 tmac_pause_ctrl_frms;
+ __le32 tmac_ucst_frms;
+ __le32 tmac_ttl_octets;
+ __le32 tmac_any_err_frms;
+ __le32 tmac_nucst_frms;
+ __le64 tmac_ttl_less_fb_octets;
+ __le64 tmac_vld_ip_octets;
+ __le32 tmac_drop_ip;
+ __le32 tmac_vld_ip;
+ __le32 tmac_rst_tcp;
+ __le32 tmac_icmp;
+ __le64 tmac_tcp;
+ __le32 reserved_0;
+ __le32 tmac_udp;
+
+/* Rx MAC Statistics counters. */
+ __le32 rmac_data_octets;
+ __le32 rmac_vld_frms;
+ __le64 rmac_fcs_err_frms;
+ __le64 rmac_drop_frms;
+ __le32 rmac_vld_bcst_frms;
+ __le32 rmac_vld_mcst_frms;
+ __le32 rmac_out_rng_len_err_frms;
+ __le32 rmac_in_rng_len_err_frms;
+ __le64 rmac_long_frms;
+ __le64 rmac_pause_ctrl_frms;
+ __le64 rmac_unsup_ctrl_frms;
+ __le32 rmac_accepted_ucst_frms;
+ __le32 rmac_ttl_octets;
+ __le32 rmac_discarded_frms;
+ __le32 rmac_accepted_nucst_frms;
+ __le32 reserved_1;
+ __le32 rmac_drop_events;
+ __le64 rmac_ttl_less_fb_octets;
+ __le64 rmac_ttl_frms;
+ __le64 reserved_2;
+ __le32 rmac_usized_frms;
+ __le32 reserved_3;
+ __le32 rmac_frag_frms;
+ __le32 rmac_osized_frms;
+ __le32 reserved_4;
+ __le32 rmac_jabber_frms;
+ __le64 rmac_ttl_64_frms;
+ __le64 rmac_ttl_65_127_frms;
+ __le64 reserved_5;
+ __le64 rmac_ttl_128_255_frms;
+ __le64 rmac_ttl_256_511_frms;
+ __le64 reserved_6;
+ __le64 rmac_ttl_512_1023_frms;
+ __le64 rmac_ttl_1024_1518_frms;
+ __le32 rmac_ip;
+ __le32 reserved_7;
+ __le64 rmac_ip_octets;
+ __le32 rmac_drop_ip;
+ __le32 rmac_hdr_err_ip;
+ __le32 reserved_8;
+ __le32 rmac_icmp;
+ __le64 rmac_tcp;
+ __le32 rmac_err_drp_udp;
+ __le32 rmac_udp;
+ __le64 rmac_xgmii_err_sym;
+ __le64 rmac_frms_q0;
+ __le64 rmac_frms_q1;
+ __le64 rmac_frms_q2;
+ __le64 rmac_frms_q3;
+ __le64 rmac_frms_q4;
+ __le64 rmac_frms_q5;
+ __le64 rmac_frms_q6;
+ __le64 rmac_frms_q7;
+ __le16 rmac_full_q3;
+ __le16 rmac_full_q2;
+ __le16 rmac_full_q1;
+ __le16 rmac_full_q0;
+ __le16 rmac_full_q7;
+ __le16 rmac_full_q6;
+ __le16 rmac_full_q5;
+ __le16 rmac_full_q4;
+ __le32 reserved_9;
+ __le32 rmac_pause_cnt;
+ __le64 rmac_xgmii_data_err_cnt;
+ __le64 rmac_xgmii_ctrl_err_cnt;
+ __le32 rmac_err_tcp;
+ __le32 rmac_accepted_ip;
+
+/* PCI/PCI-X Read transaction statistics. */
+ __le32 new_rd_req_cnt;
+ __le32 rd_req_cnt;
+ __le32 rd_rtry_cnt;
+ __le32 new_rd_req_rtry_cnt;
+
+/* PCI/PCI-X Write/Read transaction statistics. */
+ __le32 wr_req_cnt;
+ __le32 wr_rtry_rd_ack_cnt;
+ __le32 new_wr_req_rtry_cnt;
+ __le32 new_wr_req_cnt;
+ __le32 wr_disc_cnt;
+ __le32 wr_rtry_cnt;
+
+/* PCI/PCI-X Write / DMA Transaction statistics. */
+ __le32 txp_wr_cnt;
+ __le32 rd_rtry_wr_ack_cnt;
+ __le32 txd_wr_cnt;
+ __le32 txd_rd_cnt;
+ __le32 rxd_wr_cnt;
+ __le32 rxd_rd_cnt;
+ __le32 rxf_wr_cnt;
+ __le32 txf_rd_cnt;
+
+/* Tx MAC statistics overflow counters. */
+ __le32 tmac_data_octets_oflow;
+ __le32 tmac_frms_oflow;
+ __le32 tmac_bcst_frms_oflow;
+ __le32 tmac_mcst_frms_oflow;
+ __le32 tmac_ucst_frms_oflow;
+ __le32 tmac_ttl_octets_oflow;
+ __le32 tmac_any_err_frms_oflow;
+ __le32 tmac_nucst_frms_oflow;
+ __le64 tmac_vlan_frms;
+ __le32 tmac_drop_ip_oflow;
+ __le32 tmac_vld_ip_oflow;
+ __le32 tmac_rst_tcp_oflow;
+ __le32 tmac_icmp_oflow;
+ __le32 tpa_unknown_protocol;
+ __le32 tmac_udp_oflow;
+ __le32 reserved_10;
+ __le32 tpa_parse_failure;
+
+/* Rx MAC Statistics overflow counters. */
+ __le32 rmac_data_octets_oflow;
+ __le32 rmac_vld_frms_oflow;
+ __le32 rmac_vld_bcst_frms_oflow;
+ __le32 rmac_vld_mcst_frms_oflow;
+ __le32 rmac_accepted_ucst_frms_oflow;
+ __le32 rmac_ttl_octets_oflow;
+ __le32 rmac_discarded_frms_oflow;
+ __le32 rmac_accepted_nucst_frms_oflow;
+ __le32 rmac_usized_frms_oflow;
+ __le32 rmac_drop_events_oflow;
+ __le32 rmac_frag_frms_oflow;
+ __le32 rmac_osized_frms_oflow;
+ __le32 rmac_ip_oflow;
+ __le32 rmac_jabber_frms_oflow;
+ __le32 rmac_icmp_oflow;
+ __le32 rmac_drop_ip_oflow;
+ __le32 rmac_err_drp_udp_oflow;
+ __le32 rmac_udp_oflow;
+ __le32 reserved_11;
+ __le32 rmac_pause_cnt_oflow;
+ __le64 rmac_ttl_1519_4095_frms;
+ __le64 rmac_ttl_4096_8191_frms;
+ __le64 rmac_ttl_8192_max_frms;
+ __le64 rmac_ttl_gt_max_frms;
+ __le64 rmac_osized_alt_frms;
+ __le64 rmac_jabber_alt_frms;
+ __le64 rmac_gt_max_alt_frms;
+ __le64 rmac_vlan_frms;
+ __le32 rmac_len_discard;
+ __le32 rmac_fcs_discard;
+ __le32 rmac_pf_discard;
+ __le32 rmac_da_discard;
+ __le32 rmac_red_discard;
+ __le32 rmac_rts_discard;
+ __le32 reserved_12;
+ __le32 rmac_ingm_full_discard;
+ __le32 reserved_13;
+ __le32 rmac_accepted_ip_oflow;
+ __le32 reserved_14;
+ __le32 link_fault_cnt;
+ u8 buffer[20];
+ struct swStat sw_stat;
+ struct xpakStat xpak_stat;
+};
+
+/* Default value for 'vlan_strip_tag' configuration parameter */
+#define NO_STRIP_IN_PROMISC 2
+
+/*
+ * Structures representing different init time configuration
+ * parameters of the NIC.
+ */
+
+#define MAX_TX_FIFOS 8
+#define MAX_RX_RINGS 8
+
+#define FIFO_DEFAULT_NUM 5
+#define FIFO_UDP_MAX_NUM 2 /* 0 - even, 1 -odd ports */
+#define FIFO_OTHER_MAX_NUM 1
+
+
+#define MAX_RX_DESC_1 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 128)
+#define MAX_RX_DESC_2 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 86)
+#define MAX_TX_DESC (MAX_AVAILABLE_TXDS)
+
+/* FIFO mappings for all possible number of fifos configured */
+static const int fifo_map[][MAX_TX_FIFOS] = {
+ {0, 0, 0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 1, 1, 1, 1},
+ {0, 0, 0, 1, 1, 1, 2, 2},
+ {0, 0, 1, 1, 2, 2, 3, 3},
+ {0, 0, 1, 1, 2, 2, 3, 4},
+ {0, 0, 1, 1, 2, 3, 4, 5},
+ {0, 0, 1, 2, 3, 4, 5, 6},
+ {0, 1, 2, 3, 4, 5, 6, 7},
+};
+
+static const u16 fifo_selector[MAX_TX_FIFOS] = {0, 1, 3, 3, 7, 7, 7, 7};
+
+/* Maintains Per FIFO related information. */
+struct tx_fifo_config {
+#define MAX_AVAILABLE_TXDS 8192
+ u32 fifo_len; /* specifies len of FIFO up to 8192, ie no of TxDLs */
+/* Priority definition */
+#define TX_FIFO_PRI_0 0 /*Highest */
+#define TX_FIFO_PRI_1 1
+#define TX_FIFO_PRI_2 2
+#define TX_FIFO_PRI_3 3
+#define TX_FIFO_PRI_4 4
+#define TX_FIFO_PRI_5 5
+#define TX_FIFO_PRI_6 6
+#define TX_FIFO_PRI_7 7 /*lowest */
+ u8 fifo_priority; /* specifies pointer level for FIFO */
+ /* user should not set twos fifos with same pri */
+ u8 f_no_snoop;
+#define NO_SNOOP_TXD 0x01
+#define NO_SNOOP_TXD_BUFFER 0x02
+};
+
+
+/* Maintains per Ring related information */
+struct rx_ring_config {
+ u32 num_rxd; /*No of RxDs per Rx Ring */
+#define RX_RING_PRI_0 0 /* highest */
+#define RX_RING_PRI_1 1
+#define RX_RING_PRI_2 2
+#define RX_RING_PRI_3 3
+#define RX_RING_PRI_4 4
+#define RX_RING_PRI_5 5
+#define RX_RING_PRI_6 6
+#define RX_RING_PRI_7 7 /* lowest */
+
+ u8 ring_priority; /*Specifies service priority of ring */
+ /* OSM should not set any two rings with same priority */
+ u8 ring_org; /*Organization of ring */
+#define RING_ORG_BUFF1 0x01
+#define RX_RING_ORG_BUFF3 0x03
+#define RX_RING_ORG_BUFF5 0x05
+
+ u8 f_no_snoop;
+#define NO_SNOOP_RXD 0x01
+#define NO_SNOOP_RXD_BUFFER 0x02
+};
+
+/* This structure provides contains values of the tunable parameters
+ * of the H/W
+ */
+struct config_param {
+/* Tx Side */
+ u32 tx_fifo_num; /*Number of Tx FIFOs */
+
+ /* 0-No steering, 1-Priority steering, 2-Default fifo map */
+#define NO_STEERING 0
+#define TX_PRIORITY_STEERING 0x1
+#define TX_DEFAULT_STEERING 0x2
+ u8 tx_steering_type;
+
+ u8 fifo_mapping[MAX_TX_FIFOS];
+ struct tx_fifo_config tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */
+ u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */
+ u64 tx_intr_type;
+#define INTA 0
+#define MSI_X 2
+ u8 intr_type;
+ u8 napi;
+
+ /* Specifies if Tx Intr is UTILZ or PER_LIST type. */
+
+/* Rx Side */
+ u32 rx_ring_num; /*Number of receive rings */
+#define MAX_RX_BLOCKS_PER_RING 150
+
+ struct rx_ring_config rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */
+
+#define HEADER_ETHERNET_II_802_3_SIZE 14
+#define HEADER_802_2_SIZE 3
+#define HEADER_SNAP_SIZE 5
+#define HEADER_VLAN_SIZE 4
+
+#define MIN_MTU 46
+#define MAX_PYLD 1500
+#define MAX_MTU (MAX_PYLD+18)
+#define MAX_MTU_VLAN (MAX_PYLD+22)
+#define MAX_PYLD_JUMBO 9600
+#define MAX_MTU_JUMBO (MAX_PYLD_JUMBO+18)
+#define MAX_MTU_JUMBO_VLAN (MAX_PYLD_JUMBO+22)
+ u16 bus_speed;
+ int max_mc_addr; /* xena=64 herc=256 */
+ int max_mac_addr; /* xena=16 herc=64 */
+ int mc_start_offset; /* xena=16 herc=64 */
+ u8 multiq;
+};
+
+/* Structure representing MAC Addrs */
+struct mac_addr {
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Structure that represent every FIFO element in the BAR1
+ * Address location.
+ */
+struct TxFIFO_element {
+ u64 TxDL_Pointer;
+
+ u64 List_Control;
+#define TX_FIFO_LAST_TXD_NUM( val) vBIT(val,0,8)
+#define TX_FIFO_FIRST_LIST s2BIT(14)
+#define TX_FIFO_LAST_LIST s2BIT(15)
+#define TX_FIFO_FIRSTNLAST_LIST vBIT(3,14,2)
+#define TX_FIFO_SPECIAL_FUNC s2BIT(23)
+#define TX_FIFO_DS_NO_SNOOP s2BIT(31)
+#define TX_FIFO_BUFF_NO_SNOOP s2BIT(30)
+};
+
+/* Tx descriptor structure */
+struct TxD {
+ u64 Control_1;
+/* bit mask */
+#define TXD_LIST_OWN_XENA s2BIT(7)
+#define TXD_T_CODE (s2BIT(12)|s2BIT(13)|s2BIT(14)|s2BIT(15))
+#define TXD_T_CODE_OK(val) (|(val & TXD_T_CODE))
+#define GET_TXD_T_CODE(val) ((val & TXD_T_CODE)<<12)
+#define TXD_GATHER_CODE (s2BIT(22) | s2BIT(23))
+#define TXD_GATHER_CODE_FIRST s2BIT(22)
+#define TXD_GATHER_CODE_LAST s2BIT(23)
+#define TXD_TCP_LSO_EN s2BIT(30)
+#define TXD_UDP_COF_EN s2BIT(31)
+#define TXD_UFO_EN s2BIT(31) | s2BIT(30)
+#define TXD_TCP_LSO_MSS(val) vBIT(val,34,14)
+#define TXD_UFO_MSS(val) vBIT(val,34,14)
+#define TXD_BUFFER0_SIZE(val) vBIT(val,48,16)
+
+ u64 Control_2;
+#define TXD_TX_CKO_CONTROL (s2BIT(5)|s2BIT(6)|s2BIT(7))
+#define TXD_TX_CKO_IPV4_EN s2BIT(5)
+#define TXD_TX_CKO_TCP_EN s2BIT(6)
+#define TXD_TX_CKO_UDP_EN s2BIT(7)
+#define TXD_VLAN_ENABLE s2BIT(15)
+#define TXD_VLAN_TAG(val) vBIT(val,16,16)
+#define TXD_INT_NUMBER(val) vBIT(val,34,6)
+#define TXD_INT_TYPE_PER_LIST s2BIT(47)
+#define TXD_INT_TYPE_UTILZ s2BIT(46)
+#define TXD_SET_MARKER vBIT(0x6,0,4)
+
+ u64 Buffer_Pointer;
+ u64 Host_Control; /* reserved for host */
+};
+
+/* Structure to hold the phy and virt addr of every TxDL. */
+struct list_info_hold {
+ dma_addr_t list_phy_addr;
+ void *list_virt_addr;
+};
+
+/* Rx descriptor structure for 1 buffer mode */
+struct RxD_t {
+ u64 Host_Control; /* reserved for host */
+ u64 Control_1;
+#define RXD_OWN_XENA s2BIT(7)
+#define RXD_T_CODE (s2BIT(12)|s2BIT(13)|s2BIT(14)|s2BIT(15))
+#define RXD_FRAME_PROTO vBIT(0xFFFF,24,8)
+#define RXD_FRAME_VLAN_TAG s2BIT(24)
+#define RXD_FRAME_PROTO_IPV4 s2BIT(27)
+#define RXD_FRAME_PROTO_IPV6 s2BIT(28)
+#define RXD_FRAME_IP_FRAG s2BIT(29)
+#define RXD_FRAME_PROTO_TCP s2BIT(30)
+#define RXD_FRAME_PROTO_UDP s2BIT(31)
+#define TCP_OR_UDP_FRAME (RXD_FRAME_PROTO_TCP | RXD_FRAME_PROTO_UDP)
+#define RXD_GET_L3_CKSUM(val) ((u16)(val>> 16) & 0xFFFF)
+#define RXD_GET_L4_CKSUM(val) ((u16)(val) & 0xFFFF)
+
+ u64 Control_2;
+#define THE_RXD_MARK 0x3
+#define SET_RXD_MARKER vBIT(THE_RXD_MARK, 0, 2)
+#define GET_RXD_MARKER(ctrl) ((ctrl & SET_RXD_MARKER) >> 62)
+
+#define MASK_VLAN_TAG vBIT(0xFFFF,48,16)
+#define SET_VLAN_TAG(val) vBIT(val,48,16)
+#define SET_NUM_TAG(val) vBIT(val,16,32)
+
+
+};
+/* Rx descriptor structure for 1 buffer mode */
+struct RxD1 {
+ struct RxD_t h;
+
+#define MASK_BUFFER0_SIZE_1 vBIT(0x3FFF,2,14)
+#define SET_BUFFER0_SIZE_1(val) vBIT(val,2,14)
+#define RXD_GET_BUFFER0_SIZE_1(_Control_2) \
+ (u16)((_Control_2 & MASK_BUFFER0_SIZE_1) >> 48)
+ u64 Buffer0_ptr;
+};
+/* Rx descriptor structure for 3 or 2 buffer mode */
+
+struct RxD3 {
+ struct RxD_t h;
+
+#define MASK_BUFFER0_SIZE_3 vBIT(0xFF,2,14)
+#define MASK_BUFFER1_SIZE_3 vBIT(0xFFFF,16,16)
+#define MASK_BUFFER2_SIZE_3 vBIT(0xFFFF,32,16)
+#define SET_BUFFER0_SIZE_3(val) vBIT(val,8,8)
+#define SET_BUFFER1_SIZE_3(val) vBIT(val,16,16)
+#define SET_BUFFER2_SIZE_3(val) vBIT(val,32,16)
+#define RXD_GET_BUFFER0_SIZE_3(Control_2) \
+ (u8)((Control_2 & MASK_BUFFER0_SIZE_3) >> 48)
+#define RXD_GET_BUFFER1_SIZE_3(Control_2) \
+ (u16)((Control_2 & MASK_BUFFER1_SIZE_3) >> 32)
+#define RXD_GET_BUFFER2_SIZE_3(Control_2) \
+ (u16)((Control_2 & MASK_BUFFER2_SIZE_3) >> 16)
+#define BUF0_LEN 40
+#define BUF1_LEN 1
+
+ u64 Buffer0_ptr;
+ u64 Buffer1_ptr;
+ u64 Buffer2_ptr;
+};
+
+
+/* Structure that represents the Rx descriptor block which contains
+ * 128 Rx descriptors.
+ */
+struct RxD_block {
+#define MAX_RXDS_PER_BLOCK_1 127
+ struct RxD1 rxd[MAX_RXDS_PER_BLOCK_1];
+
+ u64 reserved_0;
+#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
+ u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last
+ * Rxd in this blk */
+ u64 reserved_2_pNext_RxD_block; /* Logical ptr to next */
+ u64 pNext_RxD_Blk_physical; /* Buff0_ptr.In a 32 bit arch
+ * the upper 32 bits should
+ * be 0 */
+};
+
+#define SIZE_OF_BLOCK 4096
+
+#define RXD_MODE_1 0 /* One Buffer mode */
+#define RXD_MODE_3B 1 /* Two Buffer mode */
+
+/* Structure to hold virtual addresses of Buf0 and Buf1 in
+ * 2buf mode. */
+struct buffAdd {
+ void *ba_0_org;
+ void *ba_1_org;
+ void *ba_0;
+ void *ba_1;
+};
+
+/* Structure which stores all the MAC control parameters */
+
+/* This structure stores the offset of the RxD in the ring
+ * from which the Rx Interrupt processor can start picking
+ * up the RxDs for processing.
+ */
+struct rx_curr_get_info {
+ u32 block_index;
+ u32 offset;
+ u32 ring_len;
+};
+
+struct rx_curr_put_info {
+ u32 block_index;
+ u32 offset;
+ u32 ring_len;
+};
+
+/* This structure stores the offset of the TxDl in the FIFO
+ * from which the Tx Interrupt processor can start picking
+ * up the TxDLs for send complete interrupt processing.
+ */
+struct tx_curr_get_info {
+ u32 offset;
+ u32 fifo_len;
+};
+
+struct tx_curr_put_info {
+ u32 offset;
+ u32 fifo_len;
+};
+
+struct rxd_info {
+ void *virt_addr;
+ dma_addr_t dma_addr;
+};
+
+/* Structure that holds the Phy and virt addresses of the Blocks */
+struct rx_block_info {
+ void *block_virt_addr;
+ dma_addr_t block_dma_addr;
+ struct rxd_info *rxds;
+};
+
+/* Data structure to represent a LRO session */
+struct lro {
+ struct sk_buff *parent;
+ struct sk_buff *last_frag;
+ u8 *l2h;
+ struct iphdr *iph;
+ struct tcphdr *tcph;
+ u32 tcp_next_seq;
+ __be32 tcp_ack;
+ int total_len;
+ int frags_len;
+ int sg_num;
+ int in_use;
+ __be16 window;
+ u16 vlan_tag;
+ u32 cur_tsval;
+ __be32 cur_tsecr;
+ u8 saw_ts;
+} ____cacheline_aligned;
+
+/* Ring specific structure */
+struct ring_info {
+ /* The ring number */
+ int ring_no;
+
+ /* per-ring buffer counter */
+ u32 rx_bufs_left;
+
+#define MAX_LRO_SESSIONS 32
+ struct lro lro0_n[MAX_LRO_SESSIONS];
+ u8 lro;
+
+ /* copy of sp->rxd_mode flag */
+ int rxd_mode;
+
+ /* Number of rxds per block for the rxd_mode */
+ int rxd_count;
+
+ /* copy of sp pointer */
+ struct s2io_nic *nic;
+
+ /* copy of sp->dev pointer */
+ struct net_device *dev;
+
+ /* copy of sp->pdev pointer */
+ struct pci_dev *pdev;
+
+ /* Per ring napi struct */
+ struct napi_struct napi;
+
+ unsigned long interrupt_count;
+
+ /*
+ * Place holders for the virtual and physical addresses of
+ * all the Rx Blocks
+ */
+ struct rx_block_info rx_blocks[MAX_RX_BLOCKS_PER_RING];
+ int block_count;
+ int pkt_cnt;
+
+ /*
+ * Put pointer info which indictes which RxD has to be replenished
+ * with a new buffer.
+ */
+ struct rx_curr_put_info rx_curr_put_info;
+
+ /*
+ * Get pointer info which indictes which is the last RxD that was
+ * processed by the driver.
+ */
+ struct rx_curr_get_info rx_curr_get_info;
+
+ /* interface MTU value */
+ unsigned mtu;
+
+ /* Buffer Address store. */
+ struct buffAdd **ba;
+} ____cacheline_aligned;
+
+/* Fifo specific structure */
+struct fifo_info {
+ /* FIFO number */
+ int fifo_no;
+
+ /* Maximum TxDs per TxDL */
+ int max_txds;
+
+ /* Place holder of all the TX List's Phy and Virt addresses. */
+ struct list_info_hold *list_info;
+
+ /*
+ * Current offset within the tx FIFO where driver would write
+ * new Tx frame
+ */
+ struct tx_curr_put_info tx_curr_put_info;
+
+ /*
+ * Current offset within tx FIFO from where the driver would start freeing
+ * the buffers
+ */
+ struct tx_curr_get_info tx_curr_get_info;
+#define FIFO_QUEUE_START 0
+#define FIFO_QUEUE_STOP 1
+ int queue_state;
+
+ /* copy of sp->dev pointer */
+ struct net_device *dev;
+
+ /* copy of multiq status */
+ u8 multiq;
+
+ /* Per fifo lock */
+ spinlock_t tx_lock;
+
+ /* Per fifo UFO in band structure */
+ u64 *ufo_in_band_v;
+
+ struct s2io_nic *nic;
+} ____cacheline_aligned;
+
+/* Information related to the Tx and Rx FIFOs and Rings of Xena
+ * is maintained in this structure.
+ */
+struct mac_info {
+/* tx side stuff */
+ /* logical pointer of start of each Tx FIFO */
+ struct TxFIFO_element __iomem *tx_FIFO_start[MAX_TX_FIFOS];
+
+ /* Fifo specific structure */
+ struct fifo_info fifos[MAX_TX_FIFOS];
+
+ /* Save virtual address of TxD page with zero DMA addr(if any) */
+ void *zerodma_virt_addr;
+
+/* rx side stuff */
+ /* Ring specific structure */
+ struct ring_info rings[MAX_RX_RINGS];
+
+ u16 rmac_pause_time;
+ u16 mc_pause_threshold_q0q3;
+ u16 mc_pause_threshold_q4q7;
+
+ void *stats_mem; /* orignal pointer to allocated mem */
+ dma_addr_t stats_mem_phy; /* Physical address of the stat block */
+ u32 stats_mem_sz;
+ struct stat_block *stats_info; /* Logical address of the stat block */
+};
+
+/* Default Tunable parameters of the NIC. */
+#define DEFAULT_FIFO_0_LEN 4096
+#define DEFAULT_FIFO_1_7_LEN 512
+#define SMALL_BLK_CNT 30
+#define LARGE_BLK_CNT 100
+
+/*
+ * Structure to keep track of the MSI-X vectors and the corresponding
+ * argument registered against each vector
+ */
+#define MAX_REQUESTED_MSI_X 9
+struct s2io_msix_entry
+{
+ u16 vector;
+ u16 entry;
+ void *arg;
+
+ u8 type;
+#define MSIX_ALARM_TYPE 1
+#define MSIX_RING_TYPE 2
+
+ u8 in_use;
+#define MSIX_REGISTERED_SUCCESS 0xAA
+};
+
+struct msix_info_st {
+ u64 addr;
+ u64 data;
+};
+
+/* These flags represent the devices temporary state */
+enum s2io_device_state_t
+{
+ __S2IO_STATE_LINK_TASK=0,
+ __S2IO_STATE_CARD_UP
+};
+
+/* Structure representing one instance of the NIC */
+struct s2io_nic {
+ int rxd_mode;
+ /*
+ * Count of packets to be processed in a given iteration, it will be indicated
+ * by the quota field of the device structure when NAPI is enabled.
+ */
+ int pkts_to_process;
+ struct net_device *dev;
+ struct mac_info mac_control;
+ struct config_param config;
+ struct pci_dev *pdev;
+ void __iomem *bar0;
+ void __iomem *bar1;
+#define MAX_MAC_SUPPORTED 16
+#define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED
+
+ struct mac_addr def_mac_addr[256];
+
+ struct net_device_stats stats;
+ int high_dma_flag;
+ int device_enabled_once;
+
+ char name[60];
+
+ /* Timer that handles I/O errors/exceptions */
+ struct timer_list alarm_timer;
+
+ /* Space to back up the PCI config space */
+ u32 config_space[256 / sizeof(u32)];
+
+#define PROMISC 1
+#define ALL_MULTI 2
+
+#define MAX_ADDRS_SUPPORTED 64
+ u16 mc_addr_count;
+
+ u16 m_cast_flg;
+ u16 all_multi_pos;
+ u16 promisc_flg;
+
+ /* Restart timer, used to restart NIC if the device is stuck and
+ * a schedule task that will set the correct Link state once the
+ * NIC's PHY has stabilized after a state change.
+ */
+ struct work_struct rst_timer_task;
+ struct work_struct set_link_task;
+
+ /* Flag that can be used to turn on or turn off the Rx checksum
+ * offload feature.
+ */
+ int rx_csum;
+
+ /* Below variables are used for fifo selection to transmit a packet */
+ u16 fifo_selector[MAX_TX_FIFOS];
+
+ /* Total fifos for tcp packets */
+ u8 total_tcp_fifos;
+
+ /*
+ * Beginning index of udp for udp packets
+ * Value will be equal to
+ * (tx_fifo_num - FIFO_UDP_MAX_NUM - FIFO_OTHER_MAX_NUM)
+ */
+ u8 udp_fifo_idx;
+
+ u8 total_udp_fifos;
+
+ /*
+ * Beginning index of fifo for all other packets
+ * Value will be equal to (tx_fifo_num - FIFO_OTHER_MAX_NUM)
+ */
+ u8 other_fifo_idx;
+
+ struct napi_struct napi;
+ /* after blink, the adapter must be restored with original
+ * values.
+ */
+ u64 adapt_ctrl_org;
+
+ /* Last known link state. */
+ u16 last_link_state;
+#define LINK_DOWN 1
+#define LINK_UP 2
+
+ int task_flag;
+ unsigned long long start_time;
+ int vlan_strip_flag;
+#define MSIX_FLG 0xA5
+ int num_entries;
+ struct msix_entry *entries;
+ int msi_detected;
+ wait_queue_head_t msi_wait;
+ struct s2io_msix_entry *s2io_entries;
+ char desc[MAX_REQUESTED_MSI_X][25];
+
+ int avail_msix_vectors; /* No. of MSI-X vectors granted by system */
+
+ struct msix_info_st msix_info[0x3f];
+
+#define XFRAME_I_DEVICE 1
+#define XFRAME_II_DEVICE 2
+ u8 device_type;
+
+ unsigned long clubbed_frms_cnt;
+ unsigned long sending_both;
+ u16 lro_max_aggr_per_sess;
+ volatile unsigned long state;
+ u64 general_int_mask;
+
+#define VPD_STRING_LEN 80
+ u8 product_name[VPD_STRING_LEN];
+ u8 serial_num[VPD_STRING_LEN];
+};
+
+#define RESET_ERROR 1
+#define CMD_ERROR 2
+
+/* OS related system calls */
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ u64 ret = 0;
+ ret = readl(addr + 4);
+ ret <<= 32;
+ ret |= readl(addr);
+
+ return ret;
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel((u32) (val), addr);
+ writel((u32) (val >> 32), (addr + 4));
+}
+#endif
+
+/*
+ * Some registers have to be written in a particular order to
+ * expect correct hardware operation. The macro SPECIAL_REG_WRITE
+ * is used to perform such ordered writes. Defines UF (Upper First)
+ * and LF (Lower First) will be used to specify the required write order.
+ */
+#define UF 1
+#define LF 2
+static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order)
+{
+ if (order == LF) {
+ writel((u32) (val), addr);
+ (void) readl(addr);
+ writel((u32) (val >> 32), (addr + 4));
+ (void) readl(addr + 4);
+ } else {
+ writel((u32) (val >> 32), (addr + 4));
+ (void) readl(addr + 4);
+ writel((u32) (val), addr);
+ (void) readl(addr);
+ }
+}
+
+/* Interrupt related values of Xena */
+
+#define ENABLE_INTRS 1
+#define DISABLE_INTRS 2
+
+/* Highest level interrupt blocks */
+#define TX_PIC_INTR (0x0001<<0)
+#define TX_DMA_INTR (0x0001<<1)
+#define TX_MAC_INTR (0x0001<<2)
+#define TX_XGXS_INTR (0x0001<<3)
+#define TX_TRAFFIC_INTR (0x0001<<4)
+#define RX_PIC_INTR (0x0001<<5)
+#define RX_DMA_INTR (0x0001<<6)
+#define RX_MAC_INTR (0x0001<<7)
+#define RX_XGXS_INTR (0x0001<<8)
+#define RX_TRAFFIC_INTR (0x0001<<9)
+#define MC_INTR (0x0001<<10)
+#define ENA_ALL_INTRS ( TX_PIC_INTR | \
+ TX_DMA_INTR | \
+ TX_MAC_INTR | \
+ TX_XGXS_INTR | \
+ TX_TRAFFIC_INTR | \
+ RX_PIC_INTR | \
+ RX_DMA_INTR | \
+ RX_MAC_INTR | \
+ RX_XGXS_INTR | \
+ RX_TRAFFIC_INTR | \
+ MC_INTR )
+
+/* Interrupt masks for the general interrupt mask register */
+#define DISABLE_ALL_INTRS 0xFFFFFFFFFFFFFFFFULL
+
+#define TXPIC_INT_M s2BIT(0)
+#define TXDMA_INT_M s2BIT(1)
+#define TXMAC_INT_M s2BIT(2)
+#define TXXGXS_INT_M s2BIT(3)
+#define TXTRAFFIC_INT_M s2BIT(8)
+#define PIC_RX_INT_M s2BIT(32)
+#define RXDMA_INT_M s2BIT(33)
+#define RXMAC_INT_M s2BIT(34)
+#define MC_INT_M s2BIT(35)
+#define RXXGXS_INT_M s2BIT(36)
+#define RXTRAFFIC_INT_M s2BIT(40)
+
+/* PIC level Interrupts TODO*/
+
+/* DMA level Inressupts */
+#define TXDMA_PFC_INT_M s2BIT(0)
+#define TXDMA_PCC_INT_M s2BIT(2)
+
+/* PFC block interrupts */
+#define PFC_MISC_ERR_1 s2BIT(0) /* Interrupt to indicate FIFO full */
+
+/* PCC block interrupts. */
+#define PCC_FB_ECC_ERR vBIT(0xff, 16, 8) /* Interrupt to indicate
+ PCC_FB_ECC Error. */
+
+#define RXD_GET_VLAN_TAG(Control_2) (u16)(Control_2 & MASK_VLAN_TAG)
+/*
+ * Prototype declaration.
+ */
+static int __devinit s2io_init_nic(struct pci_dev *pdev,
+ const struct pci_device_id *pre);
+static void __devexit s2io_rem_nic(struct pci_dev *pdev);
+static int init_shared_mem(struct s2io_nic *sp);
+static void free_shared_mem(struct s2io_nic *sp);
+static int init_nic(struct s2io_nic *nic);
+static int rx_intr_handler(struct ring_info *ring_data, int budget);
+static void s2io_txpic_intr_handle(struct s2io_nic *sp);
+static void tx_intr_handler(struct fifo_info *fifo_data);
+static void s2io_handle_errors(void * dev_id);
+
+static int s2io_starter(void);
+static void s2io_closer(void);
+static void s2io_tx_watchdog(struct net_device *dev);
+static void s2io_set_multicast(struct net_device *dev);
+static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
+static void s2io_link(struct s2io_nic * sp, int link);
+static void s2io_reset(struct s2io_nic * sp);
+static int s2io_poll_msix(struct napi_struct *napi, int budget);
+static int s2io_poll_inta(struct napi_struct *napi, int budget);
+static void s2io_init_pci(struct s2io_nic * sp);
+static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr);
+static void s2io_alarm_handle(unsigned long data);
+static irqreturn_t
+s2io_msix_ring_handle(int irq, void *dev_id);
+static irqreturn_t
+s2io_msix_fifo_handle(int irq, void *dev_id);
+static irqreturn_t s2io_isr(int irq, void *dev_id);
+static int verify_xena_quiescence(struct s2io_nic *sp);
+static const struct ethtool_ops netdev_ethtool_ops;
+static void s2io_set_link(struct work_struct *work);
+static int s2io_set_swapper(struct s2io_nic * sp);
+static void s2io_card_down(struct s2io_nic *nic);
+static int s2io_card_up(struct s2io_nic *nic);
+static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
+ int bit_state);
+static int s2io_add_isr(struct s2io_nic * sp);
+static void s2io_rem_isr(struct s2io_nic * sp);
+
+static void restore_xmsi_data(struct s2io_nic *nic);
+static void do_s2io_store_unicast_mc(struct s2io_nic *sp);
+static void do_s2io_restore_unicast_mc(struct s2io_nic *sp);
+static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset);
+static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr);
+static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int offset);
+static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr);
+
+static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
+ u8 **tcp, u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
+ struct s2io_nic *sp);
+static void clear_lro_session(struct lro *lro);
+static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag);
+static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro);
+static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
+ struct sk_buff *skb, u32 tcp_len);
+static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring);
+
+static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev);
+static void s2io_io_resume(struct pci_dev *pdev);
+
+#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size
+#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size
+#define s2io_offload_type(skb) skb_shinfo(skb)->gso_type
+
+#define S2IO_PARM_INT(X, def_val) \
+ static unsigned int X = def_val;\
+ module_param(X , uint, 0);
+
+#endif /* _S2IO_H */
diff --git a/drivers/net/ethernet/neterion/vxge/Makefile b/drivers/net/ethernet/neterion/vxge/Makefile
new file mode 100644
index 000000000000..b625e2c503f5
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for Exar Corp's X3100 Series 10 GbE PCIe I/O
+# Virtualized Server Adapter linux driver
+
+obj-$(CONFIG_VXGE) += vxge.o
+
+vxge-objs := vxge-config.o vxge-traffic.o vxge-ethtool.o vxge-main.o
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
new file mode 100644
index 000000000000..98e2c10ae08b
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -0,0 +1,5122 @@
+/******************************************************************************
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ * Drivers based on or derived from this code fall under the GPL and must
+ * retain the authorship, copyright and license notice. This file is not
+ * a complete program and may only be used when the entire operating
+ * system is licensed under the GPL.
+ * See the file COPYING in this distribution for more information.
+ *
+ * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
+ * Virtualized Server Adapter.
+ * Copyright(c) 2002-2010 Exar Corp.
+ ******************************************************************************/
+#include <linux/vmalloc.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/slab.h>
+
+#include "vxge-traffic.h"
+#include "vxge-config.h"
+#include "vxge-main.h"
+
+#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
+ status = __vxge_hw_vpath_stats_access(vpath, \
+ VXGE_HW_STATS_OP_READ, \
+ offset, \
+ &val64); \
+ if (status != VXGE_HW_OK) \
+ return status; \
+}
+
+static void
+vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
+{
+ u64 val64;
+
+ val64 = readq(&vp_reg->rxmac_vcfg0);
+ val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
+ writeq(val64, &vp_reg->rxmac_vcfg0);
+ val64 = readq(&vp_reg->rxmac_vcfg0);
+}
+
+/*
+ * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
+ */
+int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
+{
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+ struct __vxge_hw_virtualpath *vpath;
+ u64 val64, rxd_count, rxd_spat;
+ int count = 0, total_count = 0;
+
+ vpath = &hldev->virtual_paths[vp_id];
+ vp_reg = vpath->vp_reg;
+
+ vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
+
+ /* Check that the ring controller for this vpath has enough free RxDs
+ * to send frames to the host. This is done by reading the
+ * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
+ * RXD_SPAT value for the vpath.
+ */
+ val64 = readq(&vp_reg->prc_cfg6);
+ rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
+ /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
+ * leg room.
+ */
+ rxd_spat *= 2;
+
+ do {
+ mdelay(1);
+
+ rxd_count = readq(&vp_reg->prc_rxd_doorbell);
+
+ /* Check that the ring controller for this vpath does
+ * not have any frame in its pipeline.
+ */
+ val64 = readq(&vp_reg->frm_in_progress_cnt);
+ if ((rxd_count <= rxd_spat) || (val64 > 0))
+ count = 0;
+ else
+ count++;
+ total_count++;
+ } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
+ (total_count < VXGE_HW_MAX_POLLING_COUNT));
+
+ if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
+ printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
+ __func__);
+
+ return total_count;
+}
+
+/* vxge_hw_device_wait_receive_idle - This function waits until all frames
+ * stored in the frame buffer for each vpath assigned to the given
+ * function (hldev) have been sent to the host.
+ */
+void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
+{
+ int i, total_count = 0;
+
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+ if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
+ continue;
+
+ total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
+ if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
+ break;
+ }
+}
+
+/*
+ * __vxge_hw_device_register_poll
+ * Will poll certain register for specified amount of time.
+ * Will poll until masked bit is not cleared.
+ */
+static enum vxge_hw_status
+__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
+{
+ u64 val64;
+ u32 i = 0;
+ enum vxge_hw_status ret = VXGE_HW_FAIL;
+
+ udelay(10);
+
+ do {
+ val64 = readq(reg);
+ if (!(val64 & mask))
+ return VXGE_HW_OK;
+ udelay(100);
+ } while (++i <= 9);
+
+ i = 0;
+ do {
+ val64 = readq(reg);
+ if (!(val64 & mask))
+ return VXGE_HW_OK;
+ mdelay(1);
+ } while (++i <= max_millis);
+
+ return ret;
+}
+
+static inline enum vxge_hw_status
+__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
+ u64 mask, u32 max_millis)
+{
+ __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
+ wmb();
+ __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
+ wmb();
+
+ return __vxge_hw_device_register_poll(addr, mask, max_millis);
+}
+
+static enum vxge_hw_status
+vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
+ u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
+ u64 *steer_ctrl)
+{
+ struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
+ enum vxge_hw_status status;
+ u64 val64;
+ u32 retry = 0, max_retry = 3;
+
+ spin_lock(&vpath->lock);
+ if (!vpath->vp_open) {
+ spin_unlock(&vpath->lock);
+ max_retry = 100;
+ }
+
+ writeq(*data0, &vp_reg->rts_access_steer_data0);
+ writeq(*data1, &vp_reg->rts_access_steer_data1);
+ wmb();
+
+ val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
+ *steer_ctrl;
+
+ status = __vxge_hw_pio_mem_write64(val64,
+ &vp_reg->rts_access_steer_ctrl,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
+ VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+
+ /* The __vxge_hw_device_register_poll can udelay for a significant
+ * amount of time, blocking other process from the CPU. If it delays
+ * for ~5secs, a NMI error can occur. A way around this is to give up
+ * the processor via msleep, but this is not allowed is under lock.
+ * So, only allow it to sleep for ~4secs if open. Otherwise, delay for
+ * 1sec and sleep for 10ms until the firmware operation has completed
+ * or timed-out.
+ */
+ while ((status != VXGE_HW_OK) && retry++ < max_retry) {
+ if (!vpath->vp_open)
+ msleep(20);
+ status = __vxge_hw_device_register_poll(
+ &vp_reg->rts_access_steer_ctrl,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
+ VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+ }
+
+ if (status != VXGE_HW_OK)
+ goto out;
+
+ val64 = readq(&vp_reg->rts_access_steer_ctrl);
+ if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
+ *data0 = readq(&vp_reg->rts_access_steer_data0);
+ *data1 = readq(&vp_reg->rts_access_steer_data1);
+ *steer_ctrl = val64;
+ } else
+ status = VXGE_HW_FAIL;
+
+out:
+ if (vpath->vp_open)
+ spin_unlock(&vpath->lock);
+ return status;
+}
+
+enum vxge_hw_status
+vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
+ u32 *minor, u32 *build)
+{
+ u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status;
+
+ vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_UPGRADE_ACTION,
+ VXGE_HW_FW_UPGRADE_MEMO,
+ VXGE_HW_FW_UPGRADE_OFFSET_READ,
+ &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ return status;
+
+ *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
+ *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
+ *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
+
+ return status;
+}
+
+enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
+{
+ u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status;
+ u32 ret;
+
+ vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_UPGRADE_ACTION,
+ VXGE_HW_FW_UPGRADE_MEMO,
+ VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
+ &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
+ goto exit;
+ }
+
+ ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
+ if (ret != 1) {
+ vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
+ __func__, ret);
+ status = VXGE_HW_FAIL;
+ }
+
+exit:
+ return status;
+}
+
+enum vxge_hw_status
+vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
+{
+ u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status;
+ int ret_code, sec_code;
+
+ vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+ /* send upgrade start command */
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_UPGRADE_ACTION,
+ VXGE_HW_FW_UPGRADE_MEMO,
+ VXGE_HW_FW_UPGRADE_OFFSET_START,
+ &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
+ __func__);
+ return status;
+ }
+
+ /* Transfer fw image to adapter 16 bytes at a time */
+ for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
+ steer_ctrl = 0;
+
+ /* The next 128bits of fwdata to be loaded onto the adapter */
+ data0 = *((u64 *)fwdata);
+ data1 = *((u64 *)fwdata + 1);
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_UPGRADE_ACTION,
+ VXGE_HW_FW_UPGRADE_MEMO,
+ VXGE_HW_FW_UPGRADE_OFFSET_SEND,
+ &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
+ __func__);
+ goto out;
+ }
+
+ ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
+ switch (ret_code) {
+ case VXGE_HW_FW_UPGRADE_OK:
+ /* All OK, send next 16 bytes. */
+ break;
+ case VXGE_FW_UPGRADE_BYTES2SKIP:
+ /* skip bytes in the stream */
+ fwdata += (data0 >> 8) & 0xFFFFFFFF;
+ break;
+ case VXGE_HW_FW_UPGRADE_DONE:
+ goto out;
+ case VXGE_HW_FW_UPGRADE_ERR:
+ sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
+ switch (sec_code) {
+ case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
+ case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
+ printk(KERN_ERR
+ "corrupted data from .ncf file\n");
+ break;
+ case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
+ case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
+ case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
+ case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
+ case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
+ printk(KERN_ERR "invalid .ncf file\n");
+ break;
+ case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
+ printk(KERN_ERR "buffer overflow\n");
+ break;
+ case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
+ printk(KERN_ERR "failed to flash the image\n");
+ break;
+ case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
+ printk(KERN_ERR
+ "generic error. Unknown error type\n");
+ break;
+ default:
+ printk(KERN_ERR "Unknown error of type %d\n",
+ sec_code);
+ break;
+ }
+ status = VXGE_HW_FAIL;
+ goto out;
+ default:
+ printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
+ status = VXGE_HW_FAIL;
+ goto out;
+ }
+ /* point to next 16 bytes */
+ fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
+ }
+out:
+ return status;
+}
+
+enum vxge_hw_status
+vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
+ struct eprom_image *img)
+{
+ u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status;
+ int i;
+
+ vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+ for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
+ data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
+ data1 = steer_ctrl = 0;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_API_GET_EPROM_REV,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ break;
+
+ img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
+ img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
+ img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
+ img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
+ }
+
+ return status;
+}
+
+/*
+ * __vxge_hw_channel_free - Free memory allocated for channel
+ * This function deallocates memory from the channel and various arrays
+ * in the channel
+ */
+static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
+{
+ kfree(channel->work_arr);
+ kfree(channel->free_arr);
+ kfree(channel->reserve_arr);
+ kfree(channel->orig_arr);
+ kfree(channel);
+}
+
+/*
+ * __vxge_hw_channel_initialize - Initialize a channel
+ * This function initializes a channel by properly setting the
+ * various references
+ */
+static enum vxge_hw_status
+__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
+{
+ u32 i;
+ struct __vxge_hw_virtualpath *vpath;
+
+ vpath = channel->vph->vpath;
+
+ if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
+ for (i = 0; i < channel->length; i++)
+ channel->orig_arr[i] = channel->reserve_arr[i];
+ }
+
+ switch (channel->type) {
+ case VXGE_HW_CHANNEL_TYPE_FIFO:
+ vpath->fifoh = (struct __vxge_hw_fifo *)channel;
+ channel->stats = &((struct __vxge_hw_fifo *)
+ channel)->stats->common_stats;
+ break;
+ case VXGE_HW_CHANNEL_TYPE_RING:
+ vpath->ringh = (struct __vxge_hw_ring *)channel;
+ channel->stats = &((struct __vxge_hw_ring *)
+ channel)->stats->common_stats;
+ break;
+ default:
+ break;
+ }
+
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_channel_reset - Resets a channel
+ * This function resets a channel by properly setting the various references
+ */
+static enum vxge_hw_status
+__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
+{
+ u32 i;
+
+ for (i = 0; i < channel->length; i++) {
+ if (channel->reserve_arr != NULL)
+ channel->reserve_arr[i] = channel->orig_arr[i];
+ if (channel->free_arr != NULL)
+ channel->free_arr[i] = NULL;
+ if (channel->work_arr != NULL)
+ channel->work_arr[i] = NULL;
+ }
+ channel->free_ptr = channel->length;
+ channel->reserve_ptr = channel->length;
+ channel->reserve_top = 0;
+ channel->post_index = 0;
+ channel->compl_index = 0;
+
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_pci_e_init
+ * Initialize certain PCI/PCI-X configuration registers
+ * with recommended values. Save config space for future hw resets.
+ */
+static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
+{
+ u16 cmd = 0;
+
+ /* Set the PErr Repconse bit and SERR in PCI command register. */
+ pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
+ cmd |= 0x140;
+ pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
+
+ pci_save_state(hldev->pdev);
+}
+
+/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
+ * in progress
+ * This routine checks the vpath reset in progress register is turned zero
+ */
+static enum vxge_hw_status
+__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
+{
+ enum vxge_hw_status status;
+ status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
+ VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
+ VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+ return status;
+}
+
+/*
+ * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
+ * Set the swapper bits appropriately for the lagacy section.
+ */
+static enum vxge_hw_status
+__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
+{
+ u64 val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ val64 = readq(&legacy_reg->toc_swapper_fb);
+
+ wmb();
+
+ switch (val64) {
+ case VXGE_HW_SWAPPER_INITIAL_VALUE:
+ return status;
+
+ case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
+ writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
+ &legacy_reg->pifm_rd_swap_en);
+ writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
+ &legacy_reg->pifm_rd_flip_en);
+ writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
+ &legacy_reg->pifm_wr_swap_en);
+ writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
+ &legacy_reg->pifm_wr_flip_en);
+ break;
+
+ case VXGE_HW_SWAPPER_BYTE_SWAPPED:
+ writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
+ &legacy_reg->pifm_rd_swap_en);
+ writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
+ &legacy_reg->pifm_wr_swap_en);
+ break;
+
+ case VXGE_HW_SWAPPER_BIT_FLIPPED:
+ writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
+ &legacy_reg->pifm_rd_flip_en);
+ writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
+ &legacy_reg->pifm_wr_flip_en);
+ break;
+ }
+
+ wmb();
+
+ val64 = readq(&legacy_reg->toc_swapper_fb);
+
+ if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
+ status = VXGE_HW_ERR_SWAPPER_CTRL;
+
+ return status;
+}
+
+/*
+ * __vxge_hw_device_toc_get
+ * This routine sets the swapper and reads the toc pointer and returns the
+ * memory mapped address of the toc
+ */
+static struct vxge_hw_toc_reg __iomem *
+__vxge_hw_device_toc_get(void __iomem *bar0)
+{
+ u64 val64;
+ struct vxge_hw_toc_reg __iomem *toc = NULL;
+ enum vxge_hw_status status;
+
+ struct vxge_hw_legacy_reg __iomem *legacy_reg =
+ (struct vxge_hw_legacy_reg __iomem *)bar0;
+
+ status = __vxge_hw_legacy_swapper_set(legacy_reg);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ val64 = readq(&legacy_reg->toc_first_pointer);
+ toc = bar0 + val64;
+exit:
+ return toc;
+}
+
+/*
+ * __vxge_hw_device_reg_addr_get
+ * This routine sets the swapper and reads the toc pointer and initializes the
+ * register location pointers in the device object. It waits until the ric is
+ * completed initializing registers.
+ */
+static enum vxge_hw_status
+__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
+{
+ u64 val64;
+ u32 i;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ hldev->legacy_reg = hldev->bar0;
+
+ hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
+ if (hldev->toc_reg == NULL) {
+ status = VXGE_HW_FAIL;
+ goto exit;
+ }
+
+ val64 = readq(&hldev->toc_reg->toc_common_pointer);
+ hldev->common_reg = hldev->bar0 + val64;
+
+ val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
+ hldev->mrpcim_reg = hldev->bar0 + val64;
+
+ for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
+ val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
+ hldev->srpcim_reg[i] = hldev->bar0 + val64;
+ }
+
+ for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
+ val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
+ hldev->vpmgmt_reg[i] = hldev->bar0 + val64;
+ }
+
+ for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
+ val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
+ hldev->vpath_reg[i] = hldev->bar0 + val64;
+ }
+
+ val64 = readq(&hldev->toc_reg->toc_kdfc);
+
+ switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
+ case 0:
+ hldev->kdfc = hldev->bar0 + VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64) ;
+ break;
+ default:
+ break;
+ }
+
+ status = __vxge_hw_device_vpath_reset_in_prog_check(
+ (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
+ * This routine returns the Access Rights of the driver
+ */
+static u32
+__vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
+{
+ u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
+
+ switch (host_type) {
+ case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
+ if (func_id == 0) {
+ access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
+ VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
+ }
+ break;
+ case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
+ access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
+ VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
+ break;
+ case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
+ access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
+ VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
+ break;
+ case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
+ case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
+ case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
+ break;
+ case VXGE_HW_SR_VH_FUNCTION0:
+ case VXGE_HW_VH_NORMAL_FUNCTION:
+ access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
+ break;
+ }
+
+ return access_rights;
+}
+/*
+ * __vxge_hw_device_is_privilaged
+ * This routine checks if the device function is privilaged or not
+ */
+
+enum vxge_hw_status
+__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
+{
+ if (__vxge_hw_device_access_rights_get(host_type,
+ func_id) &
+ VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
+ return VXGE_HW_OK;
+ else
+ return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
+}
+
+/*
+ * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
+ * Returns the function number of the vpath.
+ */
+static u32
+__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
+{
+ u64 val64;
+
+ val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
+
+ return
+ (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
+}
+
+/*
+ * __vxge_hw_device_host_info_get
+ * This routine returns the host type assignments
+ */
+static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
+{
+ u64 val64;
+ u32 i;
+
+ val64 = readq(&hldev->common_reg->host_type_assignments);
+
+ hldev->host_type =
+ (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
+
+ hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
+
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+ if (!(hldev->vpath_assignments & vxge_mBIT(i)))
+ continue;
+
+ hldev->func_id =
+ __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
+
+ hldev->access_rights = __vxge_hw_device_access_rights_get(
+ hldev->host_type, hldev->func_id);
+
+ hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
+ hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
+
+ hldev->first_vp_id = i;
+ break;
+ }
+}
+
+/*
+ * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
+ * link width and signalling rate.
+ */
+static enum vxge_hw_status
+__vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
+{
+ struct pci_dev *dev = hldev->pdev;
+ u16 lnk;
+
+ /* Get the negotiated link width and speed from PCI config space */
+ pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk);
+
+ if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
+ return VXGE_HW_ERR_INVALID_PCI_INFO;
+
+ switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
+ case PCIE_LNK_WIDTH_RESRV:
+ case PCIE_LNK_X1:
+ case PCIE_LNK_X2:
+ case PCIE_LNK_X4:
+ case PCIE_LNK_X8:
+ break;
+ default:
+ return VXGE_HW_ERR_INVALID_PCI_INFO;
+ }
+
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_initialize
+ * Initialize Titan-V hardware.
+ */
+static enum vxge_hw_status
+__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
+ hldev->func_id)) {
+ /* Validate the pci-e link width and speed */
+ status = __vxge_hw_verify_pci_e_info(hldev);
+ if (status != VXGE_HW_OK)
+ goto exit;
+ }
+
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_fw_ver_get - Get the fw version
+ * Returns FW Version
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_device_hw_info *hw_info)
+{
+ struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
+ struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
+ struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
+ struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
+ u64 data0, data1 = 0, steer_ctrl = 0;
+ enum vxge_hw_status status;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ fw_date->day =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
+ fw_date->month =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
+ fw_date->year =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
+
+ snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
+ fw_date->month, fw_date->day, fw_date->year);
+
+ fw_version->major =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
+ fw_version->minor =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
+ fw_version->build =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
+
+ snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
+ fw_version->major, fw_version->minor, fw_version->build);
+
+ flash_date->day =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
+ flash_date->month =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
+ flash_date->year =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
+
+ snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
+ flash_date->month, flash_date->day, flash_date->year);
+
+ flash_version->major =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
+ flash_version->minor =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
+ flash_version->build =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
+
+ snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
+ flash_version->major, flash_version->minor,
+ flash_version->build);
+
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_card_info_get - Get the serial numbers,
+ * part number and product description.
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_device_hw_info *hw_info)
+{
+ enum vxge_hw_status status;
+ u64 data0, data1 = 0, steer_ctrl = 0;
+ u8 *serial_number = hw_info->serial_number;
+ u8 *part_number = hw_info->part_number;
+ u8 *product_desc = hw_info->product_desc;
+ u32 i, j = 0;
+
+ data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ return status;
+
+ ((u64 *)serial_number)[0] = be64_to_cpu(data0);
+ ((u64 *)serial_number)[1] = be64_to_cpu(data1);
+
+ data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
+ data1 = steer_ctrl = 0;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ return status;
+
+ ((u64 *)part_number)[0] = be64_to_cpu(data0);
+ ((u64 *)part_number)[1] = be64_to_cpu(data1);
+
+ for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
+ i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
+ data0 = i;
+ data1 = steer_ctrl = 0;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ return status;
+
+ ((u64 *)product_desc)[j++] = be64_to_cpu(data0);
+ ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
+ }
+
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
+ * Returns pci function mode
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_device_hw_info *hw_info)
+{
+ u64 data0, data1 = 0, steer_ctrl = 0;
+ enum vxge_hw_status status;
+
+ data0 = 0;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_API_GET_FUNC_MODE,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ return status;
+
+ hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
+ * from MAC address table.
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
+ u8 *macaddr, u8 *macaddr_mask)
+{
+ u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
+ data0 = 0, data1 = 0, steer_ctrl = 0;
+ enum vxge_hw_status status;
+ int i;
+
+ do {
+ status = vxge_hw_vpath_fw_api(vpath, action,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
+ data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
+ data1);
+
+ for (i = ETH_ALEN; i > 0; i--) {
+ macaddr[i - 1] = (u8) (data0 & 0xFF);
+ data0 >>= 8;
+
+ macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
+ data1 >>= 8;
+ }
+
+ action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
+ data0 = 0, data1 = 0, steer_ctrl = 0;
+
+ } while (!is_valid_ether_addr(macaddr));
+exit:
+ return status;
+}
+
+/**
+ * vxge_hw_device_hw_info_get - Get the hw information
+ * Returns the vpath mask that has the bits set for each vpath allocated
+ * for the driver, FW version information, and the first mac address for
+ * each vpath
+ */
+enum vxge_hw_status __devinit
+vxge_hw_device_hw_info_get(void __iomem *bar0,
+ struct vxge_hw_device_hw_info *hw_info)
+{
+ u32 i;
+ u64 val64;
+ struct vxge_hw_toc_reg __iomem *toc;
+ struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
+ struct vxge_hw_common_reg __iomem *common_reg;
+ struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
+ enum vxge_hw_status status;
+ struct __vxge_hw_virtualpath vpath;
+
+ memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
+
+ toc = __vxge_hw_device_toc_get(bar0);
+ if (toc == NULL) {
+ status = VXGE_HW_ERR_CRITICAL;
+ goto exit;
+ }
+
+ val64 = readq(&toc->toc_common_pointer);
+ common_reg = bar0 + val64;
+
+ status = __vxge_hw_device_vpath_reset_in_prog_check(
+ (u64 __iomem *)&common_reg->vpath_rst_in_prog);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
+
+ val64 = readq(&common_reg->host_type_assignments);
+
+ hw_info->host_type =
+ (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
+
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+ if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
+ continue;
+
+ val64 = readq(&toc->toc_vpmgmt_pointer[i]);
+
+ vpmgmt_reg = bar0 + val64;
+
+ hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
+ if (__vxge_hw_device_access_rights_get(hw_info->host_type,
+ hw_info->func_id) &
+ VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
+
+ val64 = readq(&toc->toc_mrpcim_pointer);
+
+ mrpcim_reg = bar0 + val64;
+
+ writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
+ wmb();
+ }
+
+ val64 = readq(&toc->toc_vpath_pointer[i]);
+
+ spin_lock_init(&vpath.lock);
+ vpath.vp_reg = bar0 + val64;
+ vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
+
+ status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ break;
+ }
+
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+ if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
+ continue;
+
+ val64 = readq(&toc->toc_vpath_pointer[i]);
+ vpath.vp_reg = bar0 + val64;
+ vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
+
+ status = __vxge_hw_vpath_addr_get(&vpath,
+ hw_info->mac_addrs[i],
+ hw_info->mac_addr_masks[i]);
+ if (status != VXGE_HW_OK)
+ goto exit;
+ }
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_blockpool_destroy - Deallocates the block pool
+ */
+static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
+{
+ struct __vxge_hw_device *hldev;
+ struct list_head *p, *n;
+ u16 ret;
+
+ if (blockpool == NULL) {
+ ret = 1;
+ goto exit;
+ }
+
+ hldev = blockpool->hldev;
+
+ list_for_each_safe(p, n, &blockpool->free_block_list) {
+ pci_unmap_single(hldev->pdev,
+ ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
+ ((struct __vxge_hw_blockpool_entry *)p)->length,
+ PCI_DMA_BIDIRECTIONAL);
+
+ vxge_os_dma_free(hldev->pdev,
+ ((struct __vxge_hw_blockpool_entry *)p)->memblock,
+ &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
+
+ list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
+ kfree(p);
+ blockpool->pool_size--;
+ }
+
+ list_for_each_safe(p, n, &blockpool->free_entry_list) {
+ list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
+ kfree((void *)p);
+ }
+ ret = 0;
+exit:
+ return;
+}
+
+/*
+ * __vxge_hw_blockpool_create - Create block pool
+ */
+static enum vxge_hw_status
+__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
+ struct __vxge_hw_blockpool *blockpool,
+ u32 pool_size,
+ u32 pool_max)
+{
+ u32 i;
+ struct __vxge_hw_blockpool_entry *entry = NULL;
+ void *memblock;
+ dma_addr_t dma_addr;
+ struct pci_dev *dma_handle;
+ struct pci_dev *acc_handle;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if (blockpool == NULL) {
+ status = VXGE_HW_FAIL;
+ goto blockpool_create_exit;
+ }
+
+ blockpool->hldev = hldev;
+ blockpool->block_size = VXGE_HW_BLOCK_SIZE;
+ blockpool->pool_size = 0;
+ blockpool->pool_max = pool_max;
+ blockpool->req_out = 0;
+
+ INIT_LIST_HEAD(&blockpool->free_block_list);
+ INIT_LIST_HEAD(&blockpool->free_entry_list);
+
+ for (i = 0; i < pool_size + pool_max; i++) {
+ entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
+ GFP_KERNEL);
+ if (entry == NULL) {
+ __vxge_hw_blockpool_destroy(blockpool);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto blockpool_create_exit;
+ }
+ list_add(&entry->item, &blockpool->free_entry_list);
+ }
+
+ for (i = 0; i < pool_size; i++) {
+ memblock = vxge_os_dma_malloc(
+ hldev->pdev,
+ VXGE_HW_BLOCK_SIZE,
+ &dma_handle,
+ &acc_handle);
+ if (memblock == NULL) {
+ __vxge_hw_blockpool_destroy(blockpool);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto blockpool_create_exit;
+ }
+
+ dma_addr = pci_map_single(hldev->pdev, memblock,
+ VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(pci_dma_mapping_error(hldev->pdev,
+ dma_addr))) {
+ vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
+ __vxge_hw_blockpool_destroy(blockpool);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto blockpool_create_exit;
+ }
+
+ if (!list_empty(&blockpool->free_entry_list))
+ entry = (struct __vxge_hw_blockpool_entry *)
+ list_first_entry(&blockpool->free_entry_list,
+ struct __vxge_hw_blockpool_entry,
+ item);
+
+ if (entry == NULL)
+ entry =
+ kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
+ GFP_KERNEL);
+ if (entry != NULL) {
+ list_del(&entry->item);
+ entry->length = VXGE_HW_BLOCK_SIZE;
+ entry->memblock = memblock;
+ entry->dma_addr = dma_addr;
+ entry->acc_handle = acc_handle;
+ entry->dma_handle = dma_handle;
+ list_add(&entry->item,
+ &blockpool->free_block_list);
+ blockpool->pool_size++;
+ } else {
+ __vxge_hw_blockpool_destroy(blockpool);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto blockpool_create_exit;
+ }
+ }
+
+blockpool_create_exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_device_fifo_config_check - Check fifo configuration.
+ * Check the fifo configuration
+ */
+static enum vxge_hw_status
+__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
+{
+ if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
+ (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
+ return VXGE_HW_BADCFG_FIFO_BLOCKS;
+
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_vpath_config_check - Check vpath configuration.
+ * Check the vpath configuration
+ */
+static enum vxge_hw_status
+__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
+{
+ enum vxge_hw_status status;
+
+ if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
+ (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX))
+ return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
+
+ status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
+ if (status != VXGE_HW_OK)
+ return status;
+
+ if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
+ ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
+ (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
+ return VXGE_HW_BADCFG_VPATH_MTU;
+
+ if ((vp_config->rpa_strip_vlan_tag !=
+ VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
+ (vp_config->rpa_strip_vlan_tag !=
+ VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
+ (vp_config->rpa_strip_vlan_tag !=
+ VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
+ return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
+
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_config_check - Check device configuration.
+ * Check the device configuration
+ */
+static enum vxge_hw_status
+__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
+{
+ u32 i;
+ enum vxge_hw_status status;
+
+ if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
+ (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
+ (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
+ (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
+ return VXGE_HW_BADCFG_INTR_MODE;
+
+ if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
+ (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
+ return VXGE_HW_BADCFG_RTS_MAC_EN;
+
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+ status = __vxge_hw_device_vpath_config_check(
+ &new_config->vp_config[i]);
+ if (status != VXGE_HW_OK)
+ return status;
+ }
+
+ return VXGE_HW_OK;
+}
+
+/*
+ * vxge_hw_device_initialize - Initialize Titan device.
+ * Initialize Titan device. Note that all the arguments of this public API
+ * are 'IN', including @hldev. Driver cooperates with
+ * OS to find new Titan device, locate its PCI and memory spaces.
+ *
+ * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
+ * to enable the latter to perform Titan hardware initialization.
+ */
+enum vxge_hw_status __devinit
+vxge_hw_device_initialize(
+ struct __vxge_hw_device **devh,
+ struct vxge_hw_device_attr *attr,
+ struct vxge_hw_device_config *device_config)
+{
+ u32 i;
+ u32 nblocks = 0;
+ struct __vxge_hw_device *hldev = NULL;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ status = __vxge_hw_device_config_check(device_config);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ hldev = vzalloc(sizeof(struct __vxge_hw_device));
+ if (hldev == NULL) {
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+ hldev->magic = VXGE_HW_DEVICE_MAGIC;
+
+ vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
+
+ /* apply config */
+ memcpy(&hldev->config, device_config,
+ sizeof(struct vxge_hw_device_config));
+
+ hldev->bar0 = attr->bar0;
+ hldev->pdev = attr->pdev;
+
+ hldev->uld_callbacks = attr->uld_callbacks;
+
+ __vxge_hw_device_pci_e_init(hldev);
+
+ status = __vxge_hw_device_reg_addr_get(hldev);
+ if (status != VXGE_HW_OK) {
+ vfree(hldev);
+ goto exit;
+ }
+
+ __vxge_hw_device_host_info_get(hldev);
+
+ /* Incrementing for stats blocks */
+ nblocks++;
+
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+ if (!(hldev->vpath_assignments & vxge_mBIT(i)))
+ continue;
+
+ if (device_config->vp_config[i].ring.enable ==
+ VXGE_HW_RING_ENABLE)
+ nblocks += device_config->vp_config[i].ring.ring_blocks;
+
+ if (device_config->vp_config[i].fifo.enable ==
+ VXGE_HW_FIFO_ENABLE)
+ nblocks += device_config->vp_config[i].fifo.fifo_blocks;
+ nblocks++;
+ }
+
+ if (__vxge_hw_blockpool_create(hldev,
+ &hldev->block_pool,
+ device_config->dma_blockpool_initial + nblocks,
+ device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
+
+ vxge_hw_device_terminate(hldev);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+ status = __vxge_hw_device_initialize(hldev);
+ if (status != VXGE_HW_OK) {
+ vxge_hw_device_terminate(hldev);
+ goto exit;
+ }
+
+ *devh = hldev;
+exit:
+ return status;
+}
+
+/*
+ * vxge_hw_device_terminate - Terminate Titan device.
+ * Terminate HW device.
+ */
+void
+vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
+{
+ vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
+
+ hldev->magic = VXGE_HW_DEVICE_DEAD;
+ __vxge_hw_blockpool_destroy(&hldev->block_pool);
+ vfree(hldev);
+}
+
+/*
+ * __vxge_hw_vpath_stats_access - Get the statistics from the given location
+ * and offset and perform an operation
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
+ u32 operation, u32 offset, u64 *stat)
+{
+ u64 val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+ status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+ goto vpath_stats_access_exit;
+ }
+
+ vp_reg = vpath->vp_reg;
+
+ val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
+ VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
+ VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
+
+ status = __vxge_hw_pio_mem_write64(val64,
+ &vp_reg->xmac_stats_access_cmd,
+ VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
+ vpath->hldev->config.device_poll_millis);
+ if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
+ *stat = readq(&vp_reg->xmac_stats_access_data);
+ else
+ *stat = 0;
+
+vpath_stats_access_exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
+{
+ u64 *val64;
+ int i;
+ u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ val64 = (u64 *)vpath_tx_stats;
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+ status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+ goto exit;
+ }
+
+ for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
+ status = __vxge_hw_vpath_stats_access(vpath,
+ VXGE_HW_STATS_OP_READ,
+ offset, val64);
+ if (status != VXGE_HW_OK)
+ goto exit;
+ offset++;
+ val64++;
+ }
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
+{
+ u64 *val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ int i;
+ u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
+ val64 = (u64 *) vpath_rx_stats;
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+ status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+ goto exit;
+ }
+ for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
+ status = __vxge_hw_vpath_stats_access(vpath,
+ VXGE_HW_STATS_OP_READ,
+ offset >> 3, val64);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ offset += 8;
+ val64++;
+ }
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_vpath_stats_hw_info *hw_stats)
+{
+ u64 val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+ status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+ goto exit;
+ }
+ vp_reg = vpath->vp_reg;
+
+ val64 = readq(&vp_reg->vpath_debug_stats0);
+ hw_stats->ini_num_mwr_sent =
+ (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
+
+ val64 = readq(&vp_reg->vpath_debug_stats1);
+ hw_stats->ini_num_mrd_sent =
+ (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
+
+ val64 = readq(&vp_reg->vpath_debug_stats2);
+ hw_stats->ini_num_cpl_rcvd =
+ (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
+
+ val64 = readq(&vp_reg->vpath_debug_stats3);
+ hw_stats->ini_num_mwr_byte_sent =
+ VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
+
+ val64 = readq(&vp_reg->vpath_debug_stats4);
+ hw_stats->ini_num_cpl_byte_rcvd =
+ VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
+
+ val64 = readq(&vp_reg->vpath_debug_stats5);
+ hw_stats->wrcrdtarb_xoff =
+ (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
+
+ val64 = readq(&vp_reg->vpath_debug_stats6);
+ hw_stats->rdcrdtarb_xoff =
+ (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
+
+ val64 = readq(&vp_reg->vpath_genstats_count01);
+ hw_stats->vpath_genstats_count0 =
+ (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
+ val64);
+
+ val64 = readq(&vp_reg->vpath_genstats_count01);
+ hw_stats->vpath_genstats_count1 =
+ (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
+ val64);
+
+ val64 = readq(&vp_reg->vpath_genstats_count23);
+ hw_stats->vpath_genstats_count2 =
+ (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
+ val64);
+
+ val64 = readq(&vp_reg->vpath_genstats_count01);
+ hw_stats->vpath_genstats_count3 =
+ (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
+ val64);
+
+ val64 = readq(&vp_reg->vpath_genstats_count4);
+ hw_stats->vpath_genstats_count4 =
+ (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
+ val64);
+
+ val64 = readq(&vp_reg->vpath_genstats_count5);
+ hw_stats->vpath_genstats_count5 =
+ (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
+ val64);
+
+ status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ VXGE_HW_VPATH_STATS_PIO_READ(
+ VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
+
+ hw_stats->prog_event_vnum0 =
+ (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
+
+ hw_stats->prog_event_vnum1 =
+ (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
+
+ VXGE_HW_VPATH_STATS_PIO_READ(
+ VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
+
+ hw_stats->prog_event_vnum2 =
+ (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
+
+ hw_stats->prog_event_vnum3 =
+ (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
+
+ val64 = readq(&vp_reg->rx_multi_cast_stats);
+ hw_stats->rx_multi_cast_frame_discard =
+ (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
+
+ val64 = readq(&vp_reg->rx_frm_transferred);
+ hw_stats->rx_frm_transferred =
+ (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
+
+ val64 = readq(&vp_reg->rxd_returned);
+ hw_stats->rxd_returned =
+ (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
+
+ val64 = readq(&vp_reg->dbg_stats_rx_mpa);
+ hw_stats->rx_mpa_len_fail_frms =
+ (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
+ hw_stats->rx_mpa_mrk_fail_frms =
+ (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
+ hw_stats->rx_mpa_crc_fail_frms =
+ (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
+
+ val64 = readq(&vp_reg->dbg_stats_rx_fau);
+ hw_stats->rx_permitted_frms =
+ (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
+ hw_stats->rx_vp_reset_discarded_frms =
+ (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
+ hw_stats->rx_wol_frms =
+ (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
+
+ val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
+ hw_stats->tx_vp_reset_discarded_frms =
+ (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
+ val64);
+exit:
+ return status;
+}
+
+/*
+ * vxge_hw_device_stats_get - Get the device hw statistics.
+ * Returns the vpath h/w stats for the device.
+ */
+enum vxge_hw_status
+vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
+ struct vxge_hw_device_stats_hw_info *hw_stats)
+{
+ u32 i;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+ if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
+ (hldev->virtual_paths[i].vp_open ==
+ VXGE_HW_VP_NOT_OPEN))
+ continue;
+
+ memcpy(hldev->virtual_paths[i].hw_stats_sav,
+ hldev->virtual_paths[i].hw_stats,
+ sizeof(struct vxge_hw_vpath_stats_hw_info));
+
+ status = __vxge_hw_vpath_stats_get(
+ &hldev->virtual_paths[i],
+ hldev->virtual_paths[i].hw_stats);
+ }
+
+ memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
+ sizeof(struct vxge_hw_device_stats_hw_info));
+
+ return status;
+}
+
+/*
+ * vxge_hw_driver_stats_get - Get the device sw statistics.
+ * Returns the vpath s/w stats for the device.
+ */
+enum vxge_hw_status vxge_hw_driver_stats_get(
+ struct __vxge_hw_device *hldev,
+ struct vxge_hw_device_stats_sw_info *sw_stats)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
+ sizeof(struct vxge_hw_device_stats_sw_info));
+
+ return status;
+}
+
+/*
+ * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
+ * and offset and perform an operation
+ * Get the statistics from the given location and offset.
+ */
+enum vxge_hw_status
+vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
+ u32 operation, u32 location, u32 offset, u64 *stat)
+{
+ u64 val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ status = __vxge_hw_device_is_privilaged(hldev->host_type,
+ hldev->func_id);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
+ VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
+ VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
+ VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
+
+ status = __vxge_hw_pio_mem_write64(val64,
+ &hldev->mrpcim_reg->xmac_stats_sys_cmd,
+ VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
+ hldev->config.device_poll_millis);
+
+ if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
+ *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
+ else
+ *stat = 0;
+exit:
+ return status;
+}
+
+/*
+ * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
+ * Get the Statistics on aggregate port
+ */
+static enum vxge_hw_status
+vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
+ struct vxge_hw_xmac_aggr_stats *aggr_stats)
+{
+ u64 *val64;
+ int i;
+ u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ val64 = (u64 *)aggr_stats;
+
+ status = __vxge_hw_device_is_privilaged(hldev->host_type,
+ hldev->func_id);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
+ status = vxge_hw_mrpcim_stats_access(hldev,
+ VXGE_HW_STATS_OP_READ,
+ VXGE_HW_STATS_LOC_AGGR,
+ ((offset + (104 * port)) >> 3), val64);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ offset += 8;
+ val64++;
+ }
+exit:
+ return status;
+}
+
+/*
+ * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
+ * Get the Statistics on port
+ */
+static enum vxge_hw_status
+vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
+ struct vxge_hw_xmac_port_stats *port_stats)
+{
+ u64 *val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ int i;
+ u32 offset = 0x0;
+ val64 = (u64 *) port_stats;
+
+ status = __vxge_hw_device_is_privilaged(hldev->host_type,
+ hldev->func_id);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
+ status = vxge_hw_mrpcim_stats_access(hldev,
+ VXGE_HW_STATS_OP_READ,
+ VXGE_HW_STATS_LOC_AGGR,
+ ((offset + (608 * port)) >> 3), val64);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ offset += 8;
+ val64++;
+ }
+
+exit:
+ return status;
+}
+
+/*
+ * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
+ * Get the XMAC Statistics
+ */
+enum vxge_hw_status
+vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
+ struct vxge_hw_xmac_stats *xmac_stats)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ u32 i;
+
+ status = vxge_hw_device_xmac_aggr_stats_get(hldev,
+ 0, &xmac_stats->aggr_stats[0]);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ status = vxge_hw_device_xmac_aggr_stats_get(hldev,
+ 1, &xmac_stats->aggr_stats[1]);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
+
+ status = vxge_hw_device_xmac_port_stats_get(hldev,
+ i, &xmac_stats->port_stats[i]);
+ if (status != VXGE_HW_OK)
+ goto exit;
+ }
+
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+
+ if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
+ continue;
+
+ status = __vxge_hw_vpath_xmac_tx_stats_get(
+ &hldev->virtual_paths[i],
+ &xmac_stats->vpath_tx_stats[i]);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ status = __vxge_hw_vpath_xmac_rx_stats_get(
+ &hldev->virtual_paths[i],
+ &xmac_stats->vpath_rx_stats[i]);
+ if (status != VXGE_HW_OK)
+ goto exit;
+ }
+exit:
+ return status;
+}
+
+/*
+ * vxge_hw_device_debug_set - Set the debug module, level and timestamp
+ * This routine is used to dynamically change the debug output
+ */
+void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
+ enum vxge_debug_level level, u32 mask)
+{
+ if (hldev == NULL)
+ return;
+
+#if defined(VXGE_DEBUG_TRACE_MASK) || \
+ defined(VXGE_DEBUG_ERR_MASK)
+ hldev->debug_module_mask = mask;
+ hldev->debug_level = level;
+#endif
+
+#if defined(VXGE_DEBUG_ERR_MASK)
+ hldev->level_err = level & VXGE_ERR;
+#endif
+
+#if defined(VXGE_DEBUG_TRACE_MASK)
+ hldev->level_trace = level & VXGE_TRACE;
+#endif
+}
+
+/*
+ * vxge_hw_device_error_level_get - Get the error level
+ * This routine returns the current error level set
+ */
+u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
+{
+#if defined(VXGE_DEBUG_ERR_MASK)
+ if (hldev == NULL)
+ return VXGE_ERR;
+ else
+ return hldev->level_err;
+#else
+ return 0;
+#endif
+}
+
+/*
+ * vxge_hw_device_trace_level_get - Get the trace level
+ * This routine returns the current trace level set
+ */
+u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
+{
+#if defined(VXGE_DEBUG_TRACE_MASK)
+ if (hldev == NULL)
+ return VXGE_TRACE;
+ else
+ return hldev->level_trace;
+#else
+ return 0;
+#endif
+}
+
+/*
+ * vxge_hw_getpause_data -Pause frame frame generation and reception.
+ * Returns the Pause frame generation and reception capability of the NIC.
+ */
+enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
+ u32 port, u32 *tx, u32 *rx)
+{
+ u64 val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
+ status = VXGE_HW_ERR_INVALID_DEVICE;
+ goto exit;
+ }
+
+ if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
+ status = VXGE_HW_ERR_INVALID_PORT;
+ goto exit;
+ }
+
+ if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
+ status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
+ goto exit;
+ }
+
+ val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
+ if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
+ *tx = 1;
+ if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
+ *rx = 1;
+exit:
+ return status;
+}
+
+/*
+ * vxge_hw_device_setpause_data - set/reset pause frame generation.
+ * It can be used to set or reset Pause frame generation or reception
+ * support of the NIC.
+ */
+enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
+ u32 port, u32 tx, u32 rx)
+{
+ u64 val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
+ status = VXGE_HW_ERR_INVALID_DEVICE;
+ goto exit;
+ }
+
+ if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
+ status = VXGE_HW_ERR_INVALID_PORT;
+ goto exit;
+ }
+
+ status = __vxge_hw_device_is_privilaged(hldev->host_type,
+ hldev->func_id);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
+ if (tx)
+ val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
+ else
+ val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
+ if (rx)
+ val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
+ else
+ val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
+
+ writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
+exit:
+ return status;
+}
+
+u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
+{
+ struct pci_dev *dev = hldev->pdev;
+ u16 lnk;
+
+ pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk);
+ return (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
+}
+
+/*
+ * __vxge_hw_ring_block_memblock_idx - Return the memblock index
+ * This function returns the index of memory block
+ */
+static inline u32
+__vxge_hw_ring_block_memblock_idx(u8 *block)
+{
+ return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
+}
+
+/*
+ * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
+ * This function sets index to a memory block
+ */
+static inline void
+__vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
+{
+ *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
+}
+
+/*
+ * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
+ * in RxD block
+ * Sets the next block pointer in RxD block
+ */
+static inline void
+__vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
+{
+ *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
+}
+
+/*
+ * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
+ * first block
+ * Returns the dma address of the first RxD block
+ */
+static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
+{
+ struct vxge_hw_mempool_dma *dma_object;
+
+ dma_object = ring->mempool->memblocks_dma_arr;
+ vxge_assert(dma_object != NULL);
+
+ return dma_object->addr;
+}
+
+/*
+ * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
+ * This function returns the dma address of a given item
+ */
+static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
+ void *item)
+{
+ u32 memblock_idx;
+ void *memblock;
+ struct vxge_hw_mempool_dma *memblock_dma_object;
+ ptrdiff_t dma_item_offset;
+
+ /* get owner memblock index */
+ memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
+
+ /* get owner memblock by memblock index */
+ memblock = mempoolh->memblocks_arr[memblock_idx];
+
+ /* get memblock DMA object by memblock index */
+ memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
+
+ /* calculate offset in the memblock of this item */
+ dma_item_offset = (u8 *)item - (u8 *)memblock;
+
+ return memblock_dma_object->addr + dma_item_offset;
+}
+
+/*
+ * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
+ * This function returns the dma address of a given item
+ */
+static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
+ struct __vxge_hw_ring *ring, u32 from,
+ u32 to)
+{
+ u8 *to_item , *from_item;
+ dma_addr_t to_dma;
+
+ /* get "from" RxD block */
+ from_item = mempoolh->items_arr[from];
+ vxge_assert(from_item);
+
+ /* get "to" RxD block */
+ to_item = mempoolh->items_arr[to];
+ vxge_assert(to_item);
+
+ /* return address of the beginning of previous RxD block */
+ to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
+
+ /* set next pointer for this RxD block to point on
+ * previous item's DMA start address */
+ __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
+}
+
+/*
+ * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
+ * block callback
+ * This function is callback passed to __vxge_hw_mempool_create to create memory
+ * pool for RxD block
+ */
+static void
+__vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
+ u32 memblock_index,
+ struct vxge_hw_mempool_dma *dma_object,
+ u32 index, u32 is_last)
+{
+ u32 i;
+ void *item = mempoolh->items_arr[index];
+ struct __vxge_hw_ring *ring =
+ (struct __vxge_hw_ring *)mempoolh->userdata;
+
+ /* format rxds array */
+ for (i = 0; i < ring->rxds_per_block; i++) {
+ void *rxdblock_priv;
+ void *uld_priv;
+ struct vxge_hw_ring_rxd_1 *rxdp;
+
+ u32 reserve_index = ring->channel.reserve_ptr -
+ (index * ring->rxds_per_block + i + 1);
+ u32 memblock_item_idx;
+
+ ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
+ i * ring->rxd_size;
+
+ /* Note: memblock_item_idx is index of the item within
+ * the memblock. For instance, in case of three RxD-blocks
+ * per memblock this value can be 0, 1 or 2. */
+ rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
+ memblock_index, item,
+ &memblock_item_idx);
+
+ rxdp = ring->channel.reserve_arr[reserve_index];
+
+ uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
+
+ /* pre-format Host_Control */
+ rxdp->host_control = (u64)(size_t)uld_priv;
+ }
+
+ __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
+
+ if (is_last) {
+ /* link last one with first one */
+ __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
+ }
+
+ if (index > 0) {
+ /* link this RxD block with previous one */
+ __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
+ }
+}
+
+/*
+ * __vxge_hw_ring_replenish - Initial replenish of RxDs
+ * This function replenishes the RxDs from reserve array to work array
+ */
+enum vxge_hw_status
+vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
+{
+ void *rxd;
+ struct __vxge_hw_channel *channel;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ channel = &ring->channel;
+
+ while (vxge_hw_channel_dtr_count(channel) > 0) {
+
+ status = vxge_hw_ring_rxd_reserve(ring, &rxd);
+
+ vxge_assert(status == VXGE_HW_OK);
+
+ if (ring->rxd_init) {
+ status = ring->rxd_init(rxd, channel->userdata);
+ if (status != VXGE_HW_OK) {
+ vxge_hw_ring_rxd_free(ring, rxd);
+ goto exit;
+ }
+ }
+
+ vxge_hw_ring_rxd_post(ring, rxd);
+ }
+ status = VXGE_HW_OK;
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_channel_allocate - Allocate memory for channel
+ * This function allocates required memory for the channel and various arrays
+ * in the channel
+ */
+static struct __vxge_hw_channel *
+__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
+ enum __vxge_hw_channel_type type,
+ u32 length, u32 per_dtr_space,
+ void *userdata)
+{
+ struct __vxge_hw_channel *channel;
+ struct __vxge_hw_device *hldev;
+ int size = 0;
+ u32 vp_id;
+
+ hldev = vph->vpath->hldev;
+ vp_id = vph->vpath->vp_id;
+
+ switch (type) {
+ case VXGE_HW_CHANNEL_TYPE_FIFO:
+ size = sizeof(struct __vxge_hw_fifo);
+ break;
+ case VXGE_HW_CHANNEL_TYPE_RING:
+ size = sizeof(struct __vxge_hw_ring);
+ break;
+ default:
+ break;
+ }
+
+ channel = kzalloc(size, GFP_KERNEL);
+ if (channel == NULL)
+ goto exit0;
+ INIT_LIST_HEAD(&channel->item);
+
+ channel->common_reg = hldev->common_reg;
+ channel->first_vp_id = hldev->first_vp_id;
+ channel->type = type;
+ channel->devh = hldev;
+ channel->vph = vph;
+ channel->userdata = userdata;
+ channel->per_dtr_space = per_dtr_space;
+ channel->length = length;
+ channel->vp_id = vp_id;
+
+ channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+ if (channel->work_arr == NULL)
+ goto exit1;
+
+ channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+ if (channel->free_arr == NULL)
+ goto exit1;
+ channel->free_ptr = length;
+
+ channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+ if (channel->reserve_arr == NULL)
+ goto exit1;
+ channel->reserve_ptr = length;
+ channel->reserve_top = 0;
+
+ channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+ if (channel->orig_arr == NULL)
+ goto exit1;
+
+ return channel;
+exit1:
+ __vxge_hw_channel_free(channel);
+
+exit0:
+ return NULL;
+}
+
+/*
+ * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
+ * Adds a block to block pool
+ */
+static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
+ void *block_addr,
+ u32 length,
+ struct pci_dev *dma_h,
+ struct pci_dev *acc_handle)
+{
+ struct __vxge_hw_blockpool *blockpool;
+ struct __vxge_hw_blockpool_entry *entry = NULL;
+ dma_addr_t dma_addr;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ u32 req_out;
+
+ blockpool = &devh->block_pool;
+
+ if (block_addr == NULL) {
+ blockpool->req_out--;
+ status = VXGE_HW_FAIL;
+ goto exit;
+ }
+
+ dma_addr = pci_map_single(devh->pdev, block_addr, length,
+ PCI_DMA_BIDIRECTIONAL);
+
+ if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
+ vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
+ blockpool->req_out--;
+ status = VXGE_HW_FAIL;
+ goto exit;
+ }
+
+ if (!list_empty(&blockpool->free_entry_list))
+ entry = (struct __vxge_hw_blockpool_entry *)
+ list_first_entry(&blockpool->free_entry_list,
+ struct __vxge_hw_blockpool_entry,
+ item);
+
+ if (entry == NULL)
+ entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
+ else
+ list_del(&entry->item);
+
+ if (entry != NULL) {
+ entry->length = length;
+ entry->memblock = block_addr;
+ entry->dma_addr = dma_addr;
+ entry->acc_handle = acc_handle;
+ entry->dma_handle = dma_h;
+ list_add(&entry->item, &blockpool->free_block_list);
+ blockpool->pool_size++;
+ status = VXGE_HW_OK;
+ } else
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+
+ blockpool->req_out--;
+
+ req_out = blockpool->req_out;
+exit:
+ return;
+}
+
+static inline void
+vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
+{
+ gfp_t flags;
+ void *vaddr;
+
+ if (in_interrupt())
+ flags = GFP_ATOMIC | GFP_DMA;
+ else
+ flags = GFP_KERNEL | GFP_DMA;
+
+ vaddr = kmalloc((size), flags);
+
+ vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
+}
+
+/*
+ * __vxge_hw_blockpool_blocks_add - Request additional blocks
+ */
+static
+void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
+{
+ u32 nreq = 0, i;
+
+ if ((blockpool->pool_size + blockpool->req_out) <
+ VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
+ nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
+ blockpool->req_out += nreq;
+ }
+
+ for (i = 0; i < nreq; i++)
+ vxge_os_dma_malloc_async(
+ ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ blockpool->hldev, VXGE_HW_BLOCK_SIZE);
+}
+
+/*
+ * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
+ * Allocates a block of memory of given size, either from block pool
+ * or by calling vxge_os_dma_malloc()
+ */
+static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
+ struct vxge_hw_mempool_dma *dma_object)
+{
+ struct __vxge_hw_blockpool_entry *entry = NULL;
+ struct __vxge_hw_blockpool *blockpool;
+ void *memblock = NULL;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ blockpool = &devh->block_pool;
+
+ if (size != blockpool->block_size) {
+
+ memblock = vxge_os_dma_malloc(devh->pdev, size,
+ &dma_object->handle,
+ &dma_object->acc_handle);
+
+ if (memblock == NULL) {
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+ dma_object->addr = pci_map_single(devh->pdev, memblock, size,
+ PCI_DMA_BIDIRECTIONAL);
+
+ if (unlikely(pci_dma_mapping_error(devh->pdev,
+ dma_object->addr))) {
+ vxge_os_dma_free(devh->pdev, memblock,
+ &dma_object->acc_handle);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+ } else {
+
+ if (!list_empty(&blockpool->free_block_list))
+ entry = (struct __vxge_hw_blockpool_entry *)
+ list_first_entry(&blockpool->free_block_list,
+ struct __vxge_hw_blockpool_entry,
+ item);
+
+ if (entry != NULL) {
+ list_del(&entry->item);
+ dma_object->addr = entry->dma_addr;
+ dma_object->handle = entry->dma_handle;
+ dma_object->acc_handle = entry->acc_handle;
+ memblock = entry->memblock;
+
+ list_add(&entry->item,
+ &blockpool->free_entry_list);
+ blockpool->pool_size--;
+ }
+
+ if (memblock != NULL)
+ __vxge_hw_blockpool_blocks_add(blockpool);
+ }
+exit:
+ return memblock;
+}
+
+/*
+ * __vxge_hw_blockpool_blocks_remove - Free additional blocks
+ */
+static void
+__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
+{
+ struct list_head *p, *n;
+
+ list_for_each_safe(p, n, &blockpool->free_block_list) {
+
+ if (blockpool->pool_size < blockpool->pool_max)
+ break;
+
+ pci_unmap_single(
+ ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
+ ((struct __vxge_hw_blockpool_entry *)p)->length,
+ PCI_DMA_BIDIRECTIONAL);
+
+ vxge_os_dma_free(
+ ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ ((struct __vxge_hw_blockpool_entry *)p)->memblock,
+ &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
+
+ list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
+
+ list_add(p, &blockpool->free_entry_list);
+
+ blockpool->pool_size--;
+
+ }
+}
+
+/*
+ * __vxge_hw_blockpool_free - Frees the memory allcoated with
+ * __vxge_hw_blockpool_malloc
+ */
+static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
+ void *memblock, u32 size,
+ struct vxge_hw_mempool_dma *dma_object)
+{
+ struct __vxge_hw_blockpool_entry *entry = NULL;
+ struct __vxge_hw_blockpool *blockpool;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ blockpool = &devh->block_pool;
+
+ if (size != blockpool->block_size) {
+ pci_unmap_single(devh->pdev, dma_object->addr, size,
+ PCI_DMA_BIDIRECTIONAL);
+ vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
+ } else {
+
+ if (!list_empty(&blockpool->free_entry_list))
+ entry = (struct __vxge_hw_blockpool_entry *)
+ list_first_entry(&blockpool->free_entry_list,
+ struct __vxge_hw_blockpool_entry,
+ item);
+
+ if (entry == NULL)
+ entry = vmalloc(sizeof(
+ struct __vxge_hw_blockpool_entry));
+ else
+ list_del(&entry->item);
+
+ if (entry != NULL) {
+ entry->length = size;
+ entry->memblock = memblock;
+ entry->dma_addr = dma_object->addr;
+ entry->acc_handle = dma_object->acc_handle;
+ entry->dma_handle = dma_object->handle;
+ list_add(&entry->item,
+ &blockpool->free_block_list);
+ blockpool->pool_size++;
+ status = VXGE_HW_OK;
+ } else
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+
+ if (status == VXGE_HW_OK)
+ __vxge_hw_blockpool_blocks_remove(blockpool);
+ }
+}
+
+/*
+ * vxge_hw_mempool_destroy
+ */
+static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
+{
+ u32 i, j;
+ struct __vxge_hw_device *devh = mempool->devh;
+
+ for (i = 0; i < mempool->memblocks_allocated; i++) {
+ struct vxge_hw_mempool_dma *dma_object;
+
+ vxge_assert(mempool->memblocks_arr[i]);
+ vxge_assert(mempool->memblocks_dma_arr + i);
+
+ dma_object = mempool->memblocks_dma_arr + i;
+
+ for (j = 0; j < mempool->items_per_memblock; j++) {
+ u32 index = i * mempool->items_per_memblock + j;
+
+ /* to skip last partially filled(if any) memblock */
+ if (index >= mempool->items_current)
+ break;
+ }
+
+ vfree(mempool->memblocks_priv_arr[i]);
+
+ __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
+ mempool->memblock_size, dma_object);
+ }
+
+ vfree(mempool->items_arr);
+ vfree(mempool->memblocks_dma_arr);
+ vfree(mempool->memblocks_priv_arr);
+ vfree(mempool->memblocks_arr);
+ vfree(mempool);
+}
+
+/*
+ * __vxge_hw_mempool_grow
+ * Will resize mempool up to %num_allocate value.
+ */
+static enum vxge_hw_status
+__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
+ u32 *num_allocated)
+{
+ u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
+ u32 n_items = mempool->items_per_memblock;
+ u32 start_block_idx = mempool->memblocks_allocated;
+ u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ *num_allocated = 0;
+
+ if (end_block_idx > mempool->memblocks_max) {
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+ for (i = start_block_idx; i < end_block_idx; i++) {
+ u32 j;
+ u32 is_last = ((end_block_idx - 1) == i);
+ struct vxge_hw_mempool_dma *dma_object =
+ mempool->memblocks_dma_arr + i;
+ void *the_memblock;
+
+ /* allocate memblock's private part. Each DMA memblock
+ * has a space allocated for item's private usage upon
+ * mempool's user request. Each time mempool grows, it will
+ * allocate new memblock and its private part at once.
+ * This helps to minimize memory usage a lot. */
+ mempool->memblocks_priv_arr[i] =
+ vzalloc(mempool->items_priv_size * n_items);
+ if (mempool->memblocks_priv_arr[i] == NULL) {
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+ /* allocate DMA-capable memblock */
+ mempool->memblocks_arr[i] =
+ __vxge_hw_blockpool_malloc(mempool->devh,
+ mempool->memblock_size, dma_object);
+ if (mempool->memblocks_arr[i] == NULL) {
+ vfree(mempool->memblocks_priv_arr[i]);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+ (*num_allocated)++;
+ mempool->memblocks_allocated++;
+
+ memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
+
+ the_memblock = mempool->memblocks_arr[i];
+
+ /* fill the items hash array */
+ for (j = 0; j < n_items; j++) {
+ u32 index = i * n_items + j;
+
+ if (first_time && index >= mempool->items_initial)
+ break;
+
+ mempool->items_arr[index] =
+ ((char *)the_memblock + j*mempool->item_size);
+
+ /* let caller to do more job on each item */
+ if (mempool->item_func_alloc != NULL)
+ mempool->item_func_alloc(mempool, i,
+ dma_object, index, is_last);
+
+ mempool->items_current = index + 1;
+ }
+
+ if (first_time && mempool->items_current ==
+ mempool->items_initial)
+ break;
+ }
+exit:
+ return status;
+}
+
+/*
+ * vxge_hw_mempool_create
+ * This function will create memory pool object. Pool may grow but will
+ * never shrink. Pool consists of number of dynamically allocated blocks
+ * with size enough to hold %items_initial number of items. Memory is
+ * DMA-able but client must map/unmap before interoperating with the device.
+ */
+static struct vxge_hw_mempool *
+__vxge_hw_mempool_create(struct __vxge_hw_device *devh,
+ u32 memblock_size,
+ u32 item_size,
+ u32 items_priv_size,
+ u32 items_initial,
+ u32 items_max,
+ const struct vxge_hw_mempool_cbs *mp_callback,
+ void *userdata)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ u32 memblocks_to_allocate;
+ struct vxge_hw_mempool *mempool = NULL;
+ u32 allocated;
+
+ if (memblock_size < item_size) {
+ status = VXGE_HW_FAIL;
+ goto exit;
+ }
+
+ mempool = vzalloc(sizeof(struct vxge_hw_mempool));
+ if (mempool == NULL) {
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+ mempool->devh = devh;
+ mempool->memblock_size = memblock_size;
+ mempool->items_max = items_max;
+ mempool->items_initial = items_initial;
+ mempool->item_size = item_size;
+ mempool->items_priv_size = items_priv_size;
+ mempool->item_func_alloc = mp_callback->item_func_alloc;
+ mempool->userdata = userdata;
+
+ mempool->memblocks_allocated = 0;
+
+ mempool->items_per_memblock = memblock_size / item_size;
+
+ mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
+ mempool->items_per_memblock;
+
+ /* allocate array of memblocks */
+ mempool->memblocks_arr =
+ vzalloc(sizeof(void *) * mempool->memblocks_max);
+ if (mempool->memblocks_arr == NULL) {
+ __vxge_hw_mempool_destroy(mempool);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ mempool = NULL;
+ goto exit;
+ }
+
+ /* allocate array of private parts of items per memblocks */
+ mempool->memblocks_priv_arr =
+ vzalloc(sizeof(void *) * mempool->memblocks_max);
+ if (mempool->memblocks_priv_arr == NULL) {
+ __vxge_hw_mempool_destroy(mempool);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ mempool = NULL;
+ goto exit;
+ }
+
+ /* allocate array of memblocks DMA objects */
+ mempool->memblocks_dma_arr =
+ vzalloc(sizeof(struct vxge_hw_mempool_dma) *
+ mempool->memblocks_max);
+ if (mempool->memblocks_dma_arr == NULL) {
+ __vxge_hw_mempool_destroy(mempool);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ mempool = NULL;
+ goto exit;
+ }
+
+ /* allocate hash array of items */
+ mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max);
+ if (mempool->items_arr == NULL) {
+ __vxge_hw_mempool_destroy(mempool);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ mempool = NULL;
+ goto exit;
+ }
+
+ /* calculate initial number of memblocks */
+ memblocks_to_allocate = (mempool->items_initial +
+ mempool->items_per_memblock - 1) /
+ mempool->items_per_memblock;
+
+ /* pre-allocate the mempool */
+ status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
+ &allocated);
+ if (status != VXGE_HW_OK) {
+ __vxge_hw_mempool_destroy(mempool);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ mempool = NULL;
+ goto exit;
+ }
+
+exit:
+ return mempool;
+}
+
+/*
+ * __vxge_hw_ring_abort - Returns the RxD
+ * This function terminates the RxDs of ring
+ */
+static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
+{
+ void *rxdh;
+ struct __vxge_hw_channel *channel;
+
+ channel = &ring->channel;
+
+ for (;;) {
+ vxge_hw_channel_dtr_try_complete(channel, &rxdh);
+
+ if (rxdh == NULL)
+ break;
+
+ vxge_hw_channel_dtr_complete(channel);
+
+ if (ring->rxd_term)
+ ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
+ channel->userdata);
+
+ vxge_hw_channel_dtr_free(channel, rxdh);
+ }
+
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_ring_reset - Resets the ring
+ * This function resets the ring during vpath reset operation
+ */
+static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct __vxge_hw_channel *channel;
+
+ channel = &ring->channel;
+
+ __vxge_hw_ring_abort(ring);
+
+ status = __vxge_hw_channel_reset(channel);
+
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ if (ring->rxd_init) {
+ status = vxge_hw_ring_replenish(ring);
+ if (status != VXGE_HW_OK)
+ goto exit;
+ }
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_ring_delete - Removes the ring
+ * This function freeup the memory pool and removes the ring
+ */
+static enum vxge_hw_status
+__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
+{
+ struct __vxge_hw_ring *ring = vp->vpath->ringh;
+
+ __vxge_hw_ring_abort(ring);
+
+ if (ring->mempool)
+ __vxge_hw_mempool_destroy(ring->mempool);
+
+ vp->vpath->ringh = NULL;
+ __vxge_hw_channel_free(&ring->channel);
+
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_ring_create - Create a Ring
+ * This function creates Ring and initializes it.
+ */
+static enum vxge_hw_status
+__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
+ struct vxge_hw_ring_attr *attr)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct __vxge_hw_ring *ring;
+ u32 ring_length;
+ struct vxge_hw_ring_config *config;
+ struct __vxge_hw_device *hldev;
+ u32 vp_id;
+ static const struct vxge_hw_mempool_cbs ring_mp_callback = {
+ .item_func_alloc = __vxge_hw_ring_mempool_item_alloc,
+ };
+
+ if ((vp == NULL) || (attr == NULL)) {
+ status = VXGE_HW_FAIL;
+ goto exit;
+ }
+
+ hldev = vp->vpath->hldev;
+ vp_id = vp->vpath->vp_id;
+
+ config = &hldev->config.vp_config[vp_id].ring;
+
+ ring_length = config->ring_blocks *
+ vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
+
+ ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
+ VXGE_HW_CHANNEL_TYPE_RING,
+ ring_length,
+ attr->per_rxd_space,
+ attr->userdata);
+ if (ring == NULL) {
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+ vp->vpath->ringh = ring;
+ ring->vp_id = vp_id;
+ ring->vp_reg = vp->vpath->vp_reg;
+ ring->common_reg = hldev->common_reg;
+ ring->stats = &vp->vpath->sw_stats->ring_stats;
+ ring->config = config;
+ ring->callback = attr->callback;
+ ring->rxd_init = attr->rxd_init;
+ ring->rxd_term = attr->rxd_term;
+ ring->buffer_mode = config->buffer_mode;
+ ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
+ ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
+ ring->rxds_limit = config->rxds_limit;
+
+ ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
+ ring->rxd_priv_size =
+ sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
+ ring->per_rxd_space = attr->per_rxd_space;
+
+ ring->rxd_priv_size =
+ ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
+ VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
+
+ /* how many RxDs can fit into one block. Depends on configured
+ * buffer_mode. */
+ ring->rxds_per_block =
+ vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
+
+ /* calculate actual RxD block private size */
+ ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
+ ring->mempool = __vxge_hw_mempool_create(hldev,
+ VXGE_HW_BLOCK_SIZE,
+ VXGE_HW_BLOCK_SIZE,
+ ring->rxdblock_priv_size,
+ ring->config->ring_blocks,
+ ring->config->ring_blocks,
+ &ring_mp_callback,
+ ring);
+ if (ring->mempool == NULL) {
+ __vxge_hw_ring_delete(vp);
+ return VXGE_HW_ERR_OUT_OF_MEMORY;
+ }
+
+ status = __vxge_hw_channel_initialize(&ring->channel);
+ if (status != VXGE_HW_OK) {
+ __vxge_hw_ring_delete(vp);
+ goto exit;
+ }
+
+ /* Note:
+ * Specifying rxd_init callback means two things:
+ * 1) rxds need to be initialized by driver at channel-open time;
+ * 2) rxds need to be posted at channel-open time
+ * (that's what the initial_replenish() below does)
+ * Currently we don't have a case when the 1) is done without the 2).
+ */
+ if (ring->rxd_init) {
+ status = vxge_hw_ring_replenish(ring);
+ if (status != VXGE_HW_OK) {
+ __vxge_hw_ring_delete(vp);
+ goto exit;
+ }
+ }
+
+ /* initial replenish will increment the counter in its post() routine,
+ * we have to reset it */
+ ring->stats->common_stats.usage_cnt = 0;
+exit:
+ return status;
+}
+
+/*
+ * vxge_hw_device_config_default_get - Initialize device config with defaults.
+ * Initialize Titan device config with default values.
+ */
+enum vxge_hw_status __devinit
+vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
+{
+ u32 i;
+
+ device_config->dma_blockpool_initial =
+ VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
+ device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
+ device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
+ device_config->rth_en = VXGE_HW_RTH_DEFAULT;
+ device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
+ device_config->device_poll_millis = VXGE_HW_DEF_DEVICE_POLL_MILLIS;
+ device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT;
+
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+ device_config->vp_config[i].vp_id = i;
+
+ device_config->vp_config[i].min_bandwidth =
+ VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
+
+ device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
+
+ device_config->vp_config[i].ring.ring_blocks =
+ VXGE_HW_DEF_RING_BLOCKS;
+
+ device_config->vp_config[i].ring.buffer_mode =
+ VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
+
+ device_config->vp_config[i].ring.scatter_mode =
+ VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].ring.rxds_limit =
+ VXGE_HW_DEF_RING_RXDS_LIMIT;
+
+ device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
+
+ device_config->vp_config[i].fifo.fifo_blocks =
+ VXGE_HW_MIN_FIFO_BLOCKS;
+
+ device_config->vp_config[i].fifo.max_frags =
+ VXGE_HW_MAX_FIFO_FRAGS;
+
+ device_config->vp_config[i].fifo.memblock_size =
+ VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
+
+ device_config->vp_config[i].fifo.alignment_size =
+ VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
+
+ device_config->vp_config[i].fifo.intr =
+ VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
+
+ device_config->vp_config[i].fifo.no_snoop_bits =
+ VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
+ device_config->vp_config[i].tti.intr_enable =
+ VXGE_HW_TIM_INTR_DEFAULT;
+
+ device_config->vp_config[i].tti.btimer_val =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].tti.timer_ac_en =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].tti.timer_ci_en =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].tti.timer_ri_en =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].tti.rtimer_val =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].tti.util_sel =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].tti.ltimer_val =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].tti.urange_a =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].tti.uec_a =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].tti.urange_b =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].tti.uec_b =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].tti.urange_c =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].tti.uec_c =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].tti.uec_d =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].rti.intr_enable =
+ VXGE_HW_TIM_INTR_DEFAULT;
+
+ device_config->vp_config[i].rti.btimer_val =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].rti.timer_ac_en =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].rti.timer_ci_en =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].rti.timer_ri_en =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].rti.rtimer_val =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].rti.util_sel =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].rti.ltimer_val =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].rti.urange_a =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].rti.uec_a =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].rti.urange_b =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].rti.uec_b =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].rti.urange_c =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].rti.uec_c =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].rti.uec_d =
+ VXGE_HW_USE_FLASH_DEFAULT;
+
+ device_config->vp_config[i].mtu =
+ VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
+
+ device_config->vp_config[i].rpa_strip_vlan_tag =
+ VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
+ }
+
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
+ * Set the swapper bits appropriately for the vpath.
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
+{
+#ifndef __BIG_ENDIAN
+ u64 val64;
+
+ val64 = readq(&vpath_reg->vpath_general_cfg1);
+ wmb();
+ val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
+ writeq(val64, &vpath_reg->vpath_general_cfg1);
+ wmb();
+#endif
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
+ * Set the swapper bits appropriately for the vpath.
+ */
+static enum vxge_hw_status
+__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
+ struct vxge_hw_vpath_reg __iomem *vpath_reg)
+{
+ u64 val64;
+
+ val64 = readq(&legacy_reg->pifm_wr_swap_en);
+
+ if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
+ val64 = readq(&vpath_reg->kdfcctl_cfg0);
+ wmb();
+
+ val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
+ VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 |
+ VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
+
+ writeq(val64, &vpath_reg->kdfcctl_cfg0);
+ wmb();
+ }
+
+ return VXGE_HW_OK;
+}
+
+/*
+ * vxge_hw_mgmt_reg_read - Read Titan register.
+ */
+enum vxge_hw_status
+vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
+ enum vxge_hw_mgmt_reg_type type,
+ u32 index, u32 offset, u64 *value)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
+ status = VXGE_HW_ERR_INVALID_DEVICE;
+ goto exit;
+ }
+
+ switch (type) {
+ case vxge_hw_mgmt_reg_type_legacy:
+ if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
+ status = VXGE_HW_ERR_INVALID_OFFSET;
+ break;
+ }
+ *value = readq((void __iomem *)hldev->legacy_reg + offset);
+ break;
+ case vxge_hw_mgmt_reg_type_toc:
+ if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
+ status = VXGE_HW_ERR_INVALID_OFFSET;
+ break;
+ }
+ *value = readq((void __iomem *)hldev->toc_reg + offset);
+ break;
+ case vxge_hw_mgmt_reg_type_common:
+ if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
+ status = VXGE_HW_ERR_INVALID_OFFSET;
+ break;
+ }
+ *value = readq((void __iomem *)hldev->common_reg + offset);
+ break;
+ case vxge_hw_mgmt_reg_type_mrpcim:
+ if (!(hldev->access_rights &
+ VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
+ status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
+ break;
+ }
+ if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
+ status = VXGE_HW_ERR_INVALID_OFFSET;
+ break;
+ }
+ *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
+ break;
+ case vxge_hw_mgmt_reg_type_srpcim:
+ if (!(hldev->access_rights &
+ VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
+ status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
+ break;
+ }
+ if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
+ status = VXGE_HW_ERR_INVALID_INDEX;
+ break;
+ }
+ if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
+ status = VXGE_HW_ERR_INVALID_OFFSET;
+ break;
+ }
+ *value = readq((void __iomem *)hldev->srpcim_reg[index] +
+ offset);
+ break;
+ case vxge_hw_mgmt_reg_type_vpmgmt:
+ if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
+ (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
+ status = VXGE_HW_ERR_INVALID_INDEX;
+ break;
+ }
+ if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
+ status = VXGE_HW_ERR_INVALID_OFFSET;
+ break;
+ }
+ *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
+ offset);
+ break;
+ case vxge_hw_mgmt_reg_type_vpath:
+ if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
+ (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
+ status = VXGE_HW_ERR_INVALID_INDEX;
+ break;
+ }
+ if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
+ status = VXGE_HW_ERR_INVALID_INDEX;
+ break;
+ }
+ if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
+ status = VXGE_HW_ERR_INVALID_OFFSET;
+ break;
+ }
+ *value = readq((void __iomem *)hldev->vpath_reg[index] +
+ offset);
+ break;
+ default:
+ status = VXGE_HW_ERR_INVALID_TYPE;
+ break;
+ }
+
+exit:
+ return status;
+}
+
+/*
+ * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
+ */
+enum vxge_hw_status
+vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
+{
+ struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ int i = 0, j = 0;
+
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+ if (!((vpath_mask) & vxge_mBIT(i)))
+ continue;
+ vpmgmt_reg = hldev->vpmgmt_reg[i];
+ for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
+ if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
+ & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
+ return VXGE_HW_FAIL;
+ }
+ }
+ return status;
+}
+/*
+ * vxge_hw_mgmt_reg_Write - Write Titan register.
+ */
+enum vxge_hw_status
+vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
+ enum vxge_hw_mgmt_reg_type type,
+ u32 index, u32 offset, u64 value)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
+ status = VXGE_HW_ERR_INVALID_DEVICE;
+ goto exit;
+ }
+
+ switch (type) {
+ case vxge_hw_mgmt_reg_type_legacy:
+ if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
+ status = VXGE_HW_ERR_INVALID_OFFSET;
+ break;
+ }
+ writeq(value, (void __iomem *)hldev->legacy_reg + offset);
+ break;
+ case vxge_hw_mgmt_reg_type_toc:
+ if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
+ status = VXGE_HW_ERR_INVALID_OFFSET;
+ break;
+ }
+ writeq(value, (void __iomem *)hldev->toc_reg + offset);
+ break;
+ case vxge_hw_mgmt_reg_type_common:
+ if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
+ status = VXGE_HW_ERR_INVALID_OFFSET;
+ break;
+ }
+ writeq(value, (void __iomem *)hldev->common_reg + offset);
+ break;
+ case vxge_hw_mgmt_reg_type_mrpcim:
+ if (!(hldev->access_rights &
+ VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
+ status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
+ break;
+ }
+ if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
+ status = VXGE_HW_ERR_INVALID_OFFSET;
+ break;
+ }
+ writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
+ break;
+ case vxge_hw_mgmt_reg_type_srpcim:
+ if (!(hldev->access_rights &
+ VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
+ status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
+ break;
+ }
+ if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
+ status = VXGE_HW_ERR_INVALID_INDEX;
+ break;
+ }
+ if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
+ status = VXGE_HW_ERR_INVALID_OFFSET;
+ break;
+ }
+ writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
+ offset);
+
+ break;
+ case vxge_hw_mgmt_reg_type_vpmgmt:
+ if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
+ (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
+ status = VXGE_HW_ERR_INVALID_INDEX;
+ break;
+ }
+ if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
+ status = VXGE_HW_ERR_INVALID_OFFSET;
+ break;
+ }
+ writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
+ offset);
+ break;
+ case vxge_hw_mgmt_reg_type_vpath:
+ if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
+ (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
+ status = VXGE_HW_ERR_INVALID_INDEX;
+ break;
+ }
+ if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
+ status = VXGE_HW_ERR_INVALID_OFFSET;
+ break;
+ }
+ writeq(value, (void __iomem *)hldev->vpath_reg[index] +
+ offset);
+ break;
+ default:
+ status = VXGE_HW_ERR_INVALID_TYPE;
+ break;
+ }
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_fifo_abort - Returns the TxD
+ * This function terminates the TxDs of fifo
+ */
+static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
+{
+ void *txdlh;
+
+ for (;;) {
+ vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
+
+ if (txdlh == NULL)
+ break;
+
+ vxge_hw_channel_dtr_complete(&fifo->channel);
+
+ if (fifo->txdl_term) {
+ fifo->txdl_term(txdlh,
+ VXGE_HW_TXDL_STATE_POSTED,
+ fifo->channel.userdata);
+ }
+
+ vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
+ }
+
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_fifo_reset - Resets the fifo
+ * This function resets the fifo during vpath reset operation
+ */
+static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ __vxge_hw_fifo_abort(fifo);
+ status = __vxge_hw_channel_reset(&fifo->channel);
+
+ return status;
+}
+
+/*
+ * __vxge_hw_fifo_delete - Removes the FIFO
+ * This function freeup the memory pool and removes the FIFO
+ */
+static enum vxge_hw_status
+__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
+{
+ struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
+
+ __vxge_hw_fifo_abort(fifo);
+
+ if (fifo->mempool)
+ __vxge_hw_mempool_destroy(fifo->mempool);
+
+ vp->vpath->fifoh = NULL;
+
+ __vxge_hw_channel_free(&fifo->channel);
+
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
+ * list callback
+ * This function is callback passed to __vxge_hw_mempool_create to create memory
+ * pool for TxD list
+ */
+static void
+__vxge_hw_fifo_mempool_item_alloc(
+ struct vxge_hw_mempool *mempoolh,
+ u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
+ u32 index, u32 is_last)
+{
+ u32 memblock_item_idx;
+ struct __vxge_hw_fifo_txdl_priv *txdl_priv;
+ struct vxge_hw_fifo_txd *txdp =
+ (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
+ struct __vxge_hw_fifo *fifo =
+ (struct __vxge_hw_fifo *)mempoolh->userdata;
+ void *memblock = mempoolh->memblocks_arr[memblock_index];
+
+ vxge_assert(txdp);
+
+ txdp->host_control = (u64) (size_t)
+ __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
+ &memblock_item_idx);
+
+ txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
+
+ vxge_assert(txdl_priv);
+
+ fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
+
+ /* pre-format HW's TxDL's private */
+ txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
+ txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
+ txdl_priv->dma_handle = dma_object->handle;
+ txdl_priv->memblock = memblock;
+ txdl_priv->first_txdp = txdp;
+ txdl_priv->next_txdl_priv = NULL;
+ txdl_priv->alloc_frags = 0;
+}
+
+/*
+ * __vxge_hw_fifo_create - Create a FIFO
+ * This function creates FIFO and initializes it.
+ */
+static enum vxge_hw_status
+__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
+ struct vxge_hw_fifo_attr *attr)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct __vxge_hw_fifo *fifo;
+ struct vxge_hw_fifo_config *config;
+ u32 txdl_size, txdl_per_memblock;
+ struct vxge_hw_mempool_cbs fifo_mp_callback;
+ struct __vxge_hw_virtualpath *vpath;
+
+ if ((vp == NULL) || (attr == NULL)) {
+ status = VXGE_HW_ERR_INVALID_HANDLE;
+ goto exit;
+ }
+ vpath = vp->vpath;
+ config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
+
+ txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
+
+ txdl_per_memblock = config->memblock_size / txdl_size;
+
+ fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
+ VXGE_HW_CHANNEL_TYPE_FIFO,
+ config->fifo_blocks * txdl_per_memblock,
+ attr->per_txdl_space, attr->userdata);
+
+ if (fifo == NULL) {
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+ vpath->fifoh = fifo;
+ fifo->nofl_db = vpath->nofl_db;
+
+ fifo->vp_id = vpath->vp_id;
+ fifo->vp_reg = vpath->vp_reg;
+ fifo->stats = &vpath->sw_stats->fifo_stats;
+
+ fifo->config = config;
+
+ /* apply "interrupts per txdl" attribute */
+ fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
+ fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
+ fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
+
+ if (fifo->config->intr)
+ fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
+
+ fifo->no_snoop_bits = config->no_snoop_bits;
+
+ /*
+ * FIFO memory management strategy:
+ *
+ * TxDL split into three independent parts:
+ * - set of TxD's
+ * - TxD HW private part
+ * - driver private part
+ *
+ * Adaptative memory allocation used. i.e. Memory allocated on
+ * demand with the size which will fit into one memory block.
+ * One memory block may contain more than one TxDL.
+ *
+ * During "reserve" operations more memory can be allocated on demand
+ * for example due to FIFO full condition.
+ *
+ * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
+ * routine which will essentially stop the channel and free resources.
+ */
+
+ /* TxDL common private size == TxDL private + driver private */
+ fifo->priv_size =
+ sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
+ fifo->priv_size = ((fifo->priv_size + VXGE_CACHE_LINE_SIZE - 1) /
+ VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
+
+ fifo->per_txdl_space = attr->per_txdl_space;
+
+ /* recompute txdl size to be cacheline aligned */
+ fifo->txdl_size = txdl_size;
+ fifo->txdl_per_memblock = txdl_per_memblock;
+
+ fifo->txdl_term = attr->txdl_term;
+ fifo->callback = attr->callback;
+
+ if (fifo->txdl_per_memblock == 0) {
+ __vxge_hw_fifo_delete(vp);
+ status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
+ goto exit;
+ }
+
+ fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
+
+ fifo->mempool =
+ __vxge_hw_mempool_create(vpath->hldev,
+ fifo->config->memblock_size,
+ fifo->txdl_size,
+ fifo->priv_size,
+ (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
+ (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
+ &fifo_mp_callback,
+ fifo);
+
+ if (fifo->mempool == NULL) {
+ __vxge_hw_fifo_delete(vp);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+ status = __vxge_hw_channel_initialize(&fifo->channel);
+ if (status != VXGE_HW_OK) {
+ __vxge_hw_fifo_delete(vp);
+ goto exit;
+ }
+
+ vxge_assert(fifo->channel.reserve_ptr);
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_pci_read - Read the content of given address
+ * in pci config space.
+ * Read from the vpath pci config space.
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
+ u32 phy_func_0, u32 offset, u32 *val)
+{
+ u64 val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
+
+ val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
+
+ if (phy_func_0)
+ val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
+
+ writeq(val64, &vp_reg->pci_config_access_cfg1);
+ wmb();
+ writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
+ &vp_reg->pci_config_access_cfg2);
+ wmb();
+
+ status = __vxge_hw_device_register_poll(
+ &vp_reg->pci_config_access_cfg2,
+ VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ val64 = readq(&vp_reg->pci_config_access_status);
+
+ if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
+ status = VXGE_HW_FAIL;
+ *val = 0;
+ } else
+ *val = (u32)vxge_bVALn(val64, 32, 32);
+exit:
+ return status;
+}
+
+/**
+ * vxge_hw_device_flick_link_led - Flick (blink) link LED.
+ * @hldev: HW device.
+ * @on_off: TRUE if flickering to be on, FALSE to be off
+ *
+ * Flicker the link LED.
+ */
+enum vxge_hw_status
+vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
+{
+ struct __vxge_hw_virtualpath *vpath;
+ u64 data0, data1 = 0, steer_ctrl = 0;
+ enum vxge_hw_status status;
+
+ if (hldev == NULL) {
+ status = VXGE_HW_ERR_INVALID_DEVICE;
+ goto exit;
+ }
+
+ vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+ data0 = on_off;
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
+ */
+enum vxge_hw_status
+__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
+ u32 action, u32 rts_table, u32 offset,
+ u64 *data0, u64 *data1)
+{
+ enum vxge_hw_status status;
+ u64 steer_ctrl = 0;
+
+ if (vp == NULL) {
+ status = VXGE_HW_ERR_INVALID_HANDLE;
+ goto exit;
+ }
+
+ if ((rts_table ==
+ VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
+ (rts_table ==
+ VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
+ (rts_table ==
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
+ (rts_table ==
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
+ steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
+ }
+
+ status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
+ data0, data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
+ (rts_table !=
+ VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
+ *data1 = 0;
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
+ */
+enum vxge_hw_status
+__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
+ u32 rts_table, u32 offset, u64 steer_data0,
+ u64 steer_data1)
+{
+ u64 data0, data1 = 0, steer_ctrl = 0;
+ enum vxge_hw_status status;
+
+ if (vp == NULL) {
+ status = VXGE_HW_ERR_INVALID_HANDLE;
+ goto exit;
+ }
+
+ data0 = steer_data0;
+
+ if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
+ (rts_table ==
+ VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
+ data1 = steer_data1;
+
+ status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
+ &data0, &data1, &steer_ctrl);
+exit:
+ return status;
+}
+
+/*
+ * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
+ */
+enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
+ struct __vxge_hw_vpath_handle *vp,
+ enum vxge_hw_rth_algoritms algorithm,
+ struct vxge_hw_rth_hash_types *hash_type,
+ u16 bucket_size)
+{
+ u64 data0, data1;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if (vp == NULL) {
+ status = VXGE_HW_ERR_INVALID_HANDLE;
+ goto exit;
+ }
+
+ status = __vxge_hw_vpath_rts_table_get(vp,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
+ 0, &data0, &data1);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
+ VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
+
+ data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
+ VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
+ VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
+
+ if (hash_type->hash_type_tcpipv4_en)
+ data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
+
+ if (hash_type->hash_type_ipv4_en)
+ data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
+
+ if (hash_type->hash_type_tcpipv6_en)
+ data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
+
+ if (hash_type->hash_type_ipv6_en)
+ data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
+
+ if (hash_type->hash_type_tcpipv6ex_en)
+ data0 |=
+ VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
+
+ if (hash_type->hash_type_ipv6ex_en)
+ data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
+
+ if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
+ data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
+ else
+ data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
+
+ status = __vxge_hw_vpath_rts_table_set(vp,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
+ 0, data0, 0);
+exit:
+ return status;
+}
+
+static void
+vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
+ u16 flag, u8 *itable)
+{
+ switch (flag) {
+ case 1:
+ *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
+ VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
+ VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
+ itable[j]);
+ case 2:
+ *data0 |=
+ VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
+ VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
+ VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
+ itable[j]);
+ case 3:
+ *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
+ VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
+ VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
+ itable[j]);
+ case 4:
+ *data1 |=
+ VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
+ VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
+ VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
+ itable[j]);
+ default:
+ return;
+ }
+}
+/*
+ * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
+ */
+enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
+ struct __vxge_hw_vpath_handle **vpath_handles,
+ u32 vpath_count,
+ u8 *mtable,
+ u8 *itable,
+ u32 itable_size)
+{
+ u32 i, j, action, rts_table;
+ u64 data0;
+ u64 data1;
+ u32 max_entries;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
+
+ if (vp == NULL) {
+ status = VXGE_HW_ERR_INVALID_HANDLE;
+ goto exit;
+ }
+
+ max_entries = (((u32)1) << itable_size);
+
+ if (vp->vpath->hldev->config.rth_it_type
+ == VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
+ action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
+ rts_table =
+ VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
+
+ for (j = 0; j < max_entries; j++) {
+
+ data1 = 0;
+
+ data0 =
+ VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
+ itable[j]);
+
+ status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
+ action, rts_table, j, data0, data1);
+
+ if (status != VXGE_HW_OK)
+ goto exit;
+ }
+
+ for (j = 0; j < max_entries; j++) {
+
+ data1 = 0;
+
+ data0 =
+ VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
+ VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
+ itable[j]);
+
+ status = __vxge_hw_vpath_rts_table_set(
+ vpath_handles[mtable[itable[j]]], action,
+ rts_table, j, data0, data1);
+
+ if (status != VXGE_HW_OK)
+ goto exit;
+ }
+ } else {
+ action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
+ rts_table =
+ VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
+ for (i = 0; i < vpath_count; i++) {
+
+ for (j = 0; j < max_entries;) {
+
+ data0 = 0;
+ data1 = 0;
+
+ while (j < max_entries) {
+ if (mtable[itable[j]] != i) {
+ j++;
+ continue;
+ }
+ vxge_hw_rts_rth_data0_data1_get(j,
+ &data0, &data1, 1, itable);
+ j++;
+ break;
+ }
+
+ while (j < max_entries) {
+ if (mtable[itable[j]] != i) {
+ j++;
+ continue;
+ }
+ vxge_hw_rts_rth_data0_data1_get(j,
+ &data0, &data1, 2, itable);
+ j++;
+ break;
+ }
+
+ while (j < max_entries) {
+ if (mtable[itable[j]] != i) {
+ j++;
+ continue;
+ }
+ vxge_hw_rts_rth_data0_data1_get(j,
+ &data0, &data1, 3, itable);
+ j++;
+ break;
+ }
+
+ while (j < max_entries) {
+ if (mtable[itable[j]] != i) {
+ j++;
+ continue;
+ }
+ vxge_hw_rts_rth_data0_data1_get(j,
+ &data0, &data1, 4, itable);
+ j++;
+ break;
+ }
+
+ if (data0 != 0) {
+ status = __vxge_hw_vpath_rts_table_set(
+ vpath_handles[i],
+ action, rts_table,
+ 0, data0, data1);
+
+ if (status != VXGE_HW_OK)
+ goto exit;
+ }
+ }
+ }
+ }
+exit:
+ return status;
+}
+
+/**
+ * vxge_hw_vpath_check_leak - Check for memory leak
+ * @ringh: Handle to the ring object used for receive
+ *
+ * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
+ * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
+ * Returns: VXGE_HW_FAIL, if leak has occurred.
+ *
+ */
+enum vxge_hw_status
+vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ u64 rxd_new_count, rxd_spat;
+
+ if (ring == NULL)
+ return status;
+
+ rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
+ rxd_spat = readq(&ring->vp_reg->prc_cfg6);
+ rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
+
+ if (rxd_new_count >= rxd_spat)
+ status = VXGE_HW_FAIL;
+
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_mgmt_read
+ * This routine reads the vpath_mgmt registers
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_mgmt_read(
+ struct __vxge_hw_device *hldev,
+ struct __vxge_hw_virtualpath *vpath)
+{
+ u32 i, mtu = 0, max_pyld = 0;
+ u64 val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
+
+ val64 = readq(&vpath->vpmgmt_reg->
+ rxmac_cfg0_port_vpmgmt_clone[i]);
+ max_pyld =
+ (u32)
+ VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
+ (val64);
+ if (mtu < max_pyld)
+ mtu = max_pyld;
+ }
+
+ vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
+
+ val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
+
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+ if (val64 & vxge_mBIT(i))
+ vpath->vsport_number = i;
+ }
+
+ val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
+
+ if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
+ VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
+ else
+ VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
+
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
+ * This routine checks the vpath_rst_in_prog register to see if
+ * adapter completed the reset process for the vpath
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
+{
+ enum vxge_hw_status status;
+
+ status = __vxge_hw_device_register_poll(
+ &vpath->hldev->common_reg->vpath_rst_in_prog,
+ VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
+ 1 << (16 - vpath->vp_id)),
+ vpath->hldev->config.device_poll_millis);
+
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_reset
+ * This routine resets the vpath on the device
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
+{
+ u64 val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
+
+ __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
+ &hldev->common_reg->cmn_rsthdlr_cfg0);
+
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_sw_reset
+ * This routine resets the vpath structures
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct __vxge_hw_virtualpath *vpath;
+
+ vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
+
+ if (vpath->ringh) {
+ status = __vxge_hw_ring_reset(vpath->ringh);
+ if (status != VXGE_HW_OK)
+ goto exit;
+ }
+
+ if (vpath->fifoh)
+ status = __vxge_hw_fifo_reset(vpath->fifoh);
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_prc_configure
+ * This routine configures the prc registers of virtual path using the config
+ * passed
+ */
+static void
+__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
+{
+ u64 val64;
+ struct __vxge_hw_virtualpath *vpath;
+ struct vxge_hw_vp_config *vp_config;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+
+ vpath = &hldev->virtual_paths[vp_id];
+ vp_reg = vpath->vp_reg;
+ vp_config = vpath->vp_config;
+
+ if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
+ return;
+
+ val64 = readq(&vp_reg->prc_cfg1);
+ val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
+ writeq(val64, &vp_reg->prc_cfg1);
+
+ val64 = readq(&vpath->vp_reg->prc_cfg6);
+ val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
+ writeq(val64, &vpath->vp_reg->prc_cfg6);
+
+ val64 = readq(&vp_reg->prc_cfg7);
+
+ if (vpath->vp_config->ring.scatter_mode !=
+ VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
+
+ val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
+
+ switch (vpath->vp_config->ring.scatter_mode) {
+ case VXGE_HW_RING_SCATTER_MODE_A:
+ val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
+ VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
+ break;
+ case VXGE_HW_RING_SCATTER_MODE_B:
+ val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
+ VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
+ break;
+ case VXGE_HW_RING_SCATTER_MODE_C:
+ val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
+ VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
+ break;
+ }
+ }
+
+ writeq(val64, &vp_reg->prc_cfg7);
+
+ writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
+ __vxge_hw_ring_first_block_address_get(
+ vpath->ringh) >> 3), &vp_reg->prc_cfg5);
+
+ val64 = readq(&vp_reg->prc_cfg4);
+ val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
+ val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
+
+ val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
+ VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
+
+ if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
+ val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
+ else
+ val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
+
+ writeq(val64, &vp_reg->prc_cfg4);
+}
+
+/*
+ * __vxge_hw_vpath_kdfc_configure
+ * This routine configures the kdfc registers of virtual path using the
+ * config passed
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
+{
+ u64 val64;
+ u64 vpath_stride;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct __vxge_hw_virtualpath *vpath;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+
+ vpath = &hldev->virtual_paths[vp_id];
+ vp_reg = vpath->vp_reg;
+ status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
+
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
+
+ vpath->max_kdfc_db =
+ (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
+ val64+1)/2;
+
+ if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
+
+ vpath->max_nofl_db = vpath->max_kdfc_db;
+
+ if (vpath->max_nofl_db <
+ ((vpath->vp_config->fifo.memblock_size /
+ (vpath->vp_config->fifo.max_frags *
+ sizeof(struct vxge_hw_fifo_txd))) *
+ vpath->vp_config->fifo.fifo_blocks)) {
+
+ return VXGE_HW_BADCFG_FIFO_BLOCKS;
+ }
+ val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
+ (vpath->max_nofl_db*2)-1);
+ }
+
+ writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
+
+ writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
+ &vp_reg->kdfc_fifo_trpl_ctrl);
+
+ val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
+
+ val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
+ VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
+
+ val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
+ VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
+#ifndef __BIG_ENDIAN
+ VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
+#endif
+ VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
+
+ writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
+ writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
+ wmb();
+ vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
+
+ vpath->nofl_db =
+ (struct __vxge_hw_non_offload_db_wrapper __iomem *)
+ (hldev->kdfc + (vp_id *
+ VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
+ vpath_stride)));
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_mac_configure
+ * This routine configures the mac of virtual path using the config passed
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
+{
+ u64 val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct __vxge_hw_virtualpath *vpath;
+ struct vxge_hw_vp_config *vp_config;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+
+ vpath = &hldev->virtual_paths[vp_id];
+ vp_reg = vpath->vp_reg;
+ vp_config = vpath->vp_config;
+
+ writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
+ vpath->vsport_number), &vp_reg->xmac_vsport_choice);
+
+ if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
+
+ val64 = readq(&vp_reg->xmac_rpa_vcfg);
+
+ if (vp_config->rpa_strip_vlan_tag !=
+ VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
+ if (vp_config->rpa_strip_vlan_tag)
+ val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
+ else
+ val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
+ }
+
+ writeq(val64, &vp_reg->xmac_rpa_vcfg);
+ val64 = readq(&vp_reg->rxmac_vcfg0);
+
+ if (vp_config->mtu !=
+ VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
+ val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
+ if ((vp_config->mtu +
+ VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
+ val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
+ vp_config->mtu +
+ VXGE_HW_MAC_HEADER_MAX_SIZE);
+ else
+ val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
+ vpath->max_mtu);
+ }
+
+ writeq(val64, &vp_reg->rxmac_vcfg0);
+
+ val64 = readq(&vp_reg->rxmac_vcfg1);
+
+ val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
+ VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
+
+ if (hldev->config.rth_it_type ==
+ VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
+ val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
+ 0x2) |
+ VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
+ }
+
+ writeq(val64, &vp_reg->rxmac_vcfg1);
+ }
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_tim_configure
+ * This routine configures the tim registers of virtual path using the config
+ * passed
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
+{
+ u64 val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct __vxge_hw_virtualpath *vpath;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+ struct vxge_hw_vp_config *config;
+
+ vpath = &hldev->virtual_paths[vp_id];
+ vp_reg = vpath->vp_reg;
+ config = vpath->vp_config;
+
+ writeq(0, &vp_reg->tim_dest_addr);
+ writeq(0, &vp_reg->tim_vpath_map);
+ writeq(0, &vp_reg->tim_bitmap);
+ writeq(0, &vp_reg->tim_remap);
+
+ if (config->ring.enable == VXGE_HW_RING_ENABLE)
+ writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
+ (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
+ VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
+
+ val64 = readq(&vp_reg->tim_pci_cfg);
+ val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
+ writeq(val64, &vp_reg->tim_pci_cfg);
+
+ if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
+
+ val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+
+ if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
+ 0x3ffffff);
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
+ config->tti.btimer_val);
+ }
+
+ val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
+
+ if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
+ if (config->tti.timer_ac_en)
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
+ else
+ val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
+ }
+
+ if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
+ if (config->tti.timer_ci_en)
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+ else
+ val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+ }
+
+ if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
+ config->tti.urange_a);
+ }
+
+ if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
+ config->tti.urange_b);
+ }
+
+ if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
+ config->tti.urange_c);
+ }
+
+ writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+ vpath->tim_tti_cfg1_saved = val64;
+
+ val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
+
+ if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
+ val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
+ config->tti.uec_a);
+ }
+
+ if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
+ val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
+ config->tti.uec_b);
+ }
+
+ if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
+ val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
+ config->tti.uec_c);
+ }
+
+ if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
+ val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
+ config->tti.uec_d);
+ }
+
+ writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
+ val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
+
+ if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
+ if (config->tti.timer_ri_en)
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
+ else
+ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
+ }
+
+ if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
+ 0x3ffffff);
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
+ config->tti.rtimer_val);
+ }
+
+ if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
+ }
+
+ if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
+ 0x3ffffff);
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
+ config->tti.ltimer_val);
+ }
+
+ writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
+ vpath->tim_tti_cfg3_saved = val64;
+ }
+
+ if (config->ring.enable == VXGE_HW_RING_ENABLE) {
+
+ val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
+
+ if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
+ 0x3ffffff);
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
+ config->rti.btimer_val);
+ }
+
+ val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
+
+ if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
+ if (config->rti.timer_ac_en)
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
+ else
+ val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
+ }
+
+ if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
+ if (config->rti.timer_ci_en)
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+ else
+ val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+ }
+
+ if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
+ config->rti.urange_a);
+ }
+
+ if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
+ config->rti.urange_b);
+ }
+
+ if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
+ config->rti.urange_c);
+ }
+
+ writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
+ vpath->tim_rti_cfg1_saved = val64;
+
+ val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
+
+ if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
+ val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
+ config->rti.uec_a);
+ }
+
+ if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
+ val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
+ config->rti.uec_b);
+ }
+
+ if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
+ val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
+ config->rti.uec_c);
+ }
+
+ if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
+ val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
+ config->rti.uec_d);
+ }
+
+ writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
+ val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
+
+ if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
+ if (config->rti.timer_ri_en)
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
+ else
+ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
+ }
+
+ if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
+ 0x3ffffff);
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
+ config->rti.rtimer_val);
+ }
+
+ if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
+ }
+
+ if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
+ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
+ 0x3ffffff);
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
+ config->rti.ltimer_val);
+ }
+
+ writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
+ vpath->tim_rti_cfg3_saved = val64;
+ }
+
+ val64 = 0;
+ writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
+ writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
+ writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
+ writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
+ writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
+ writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
+
+ val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150);
+ val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0);
+ val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
+ writeq(val64, &vp_reg->tim_wrkld_clc);
+
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_initialize
+ * This routine is the final phase of init which initializes the
+ * registers of the vpath using the configuration passed.
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
+{
+ u64 val64;
+ u32 val32;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct __vxge_hw_virtualpath *vpath;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+
+ vpath = &hldev->virtual_paths[vp_id];
+
+ if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
+ status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
+ goto exit;
+ }
+ vp_reg = vpath->vp_reg;
+
+ status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
+
+ /* Get MRRS value from device control */
+ status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
+ if (status == VXGE_HW_OK) {
+ val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
+ val64 &=
+ ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
+ val64 |=
+ VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
+
+ val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
+ }
+
+ val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
+ val64 |=
+ VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
+ VXGE_HW_MAX_PAYLOAD_SIZE_512);
+
+ val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
+ writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
+
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_vp_terminate - Terminate Virtual Path structure
+ * This routine closes all channels it opened and freeup memory
+ */
+static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
+{
+ struct __vxge_hw_virtualpath *vpath;
+
+ vpath = &hldev->virtual_paths[vp_id];
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
+ goto exit;
+
+ VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
+ vpath->hldev->tim_int_mask1, vpath->vp_id);
+ hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
+
+ /* If the whole struct __vxge_hw_virtualpath is zeroed, nothing will
+ * work after the interface is brought down.
+ */
+ spin_lock(&vpath->lock);
+ vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
+ spin_unlock(&vpath->lock);
+
+ vpath->vpmgmt_reg = NULL;
+ vpath->nofl_db = NULL;
+ vpath->max_mtu = 0;
+ vpath->vsport_number = 0;
+ vpath->max_kdfc_db = 0;
+ vpath->max_nofl_db = 0;
+ vpath->ringh = NULL;
+ vpath->fifoh = NULL;
+ memset(&vpath->vpath_handles, 0, sizeof(struct list_head));
+ vpath->stats_block = 0;
+ vpath->hw_stats = NULL;
+ vpath->hw_stats_sav = NULL;
+ vpath->sw_stats = NULL;
+
+exit:
+ return;
+}
+
+/*
+ * __vxge_hw_vp_initialize - Initialize Virtual Path structure
+ * This routine is the initial phase of init which resets the vpath and
+ * initializes the software support structures.
+ */
+static enum vxge_hw_status
+__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
+ struct vxge_hw_vp_config *config)
+{
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
+ status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
+ goto exit;
+ }
+
+ vpath = &hldev->virtual_paths[vp_id];
+
+ spin_lock_init(&vpath->lock);
+ vpath->vp_id = vp_id;
+ vpath->vp_open = VXGE_HW_VP_OPEN;
+ vpath->hldev = hldev;
+ vpath->vp_config = config;
+ vpath->vp_reg = hldev->vpath_reg[vp_id];
+ vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
+
+ __vxge_hw_vpath_reset(hldev, vp_id);
+
+ status = __vxge_hw_vpath_reset_check(vpath);
+ if (status != VXGE_HW_OK) {
+ memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
+ goto exit;
+ }
+
+ status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
+ if (status != VXGE_HW_OK) {
+ memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
+ goto exit;
+ }
+
+ INIT_LIST_HEAD(&vpath->vpath_handles);
+
+ vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
+
+ VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
+ hldev->tim_int_mask1, vp_id);
+
+ status = __vxge_hw_vpath_initialize(hldev, vp_id);
+ if (status != VXGE_HW_OK)
+ __vxge_hw_vp_terminate(hldev, vp_id);
+exit:
+ return status;
+}
+
+/*
+ * vxge_hw_vpath_mtu_set - Set MTU.
+ * Set new MTU value. Example, to use jumbo frames:
+ * vxge_hw_vpath_mtu_set(my_device, 9600);
+ */
+enum vxge_hw_status
+vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
+{
+ u64 val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct __vxge_hw_virtualpath *vpath;
+
+ if (vp == NULL) {
+ status = VXGE_HW_ERR_INVALID_HANDLE;
+ goto exit;
+ }
+ vpath = vp->vpath;
+
+ new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
+
+ if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
+ status = VXGE_HW_ERR_INVALID_MTU_SIZE;
+
+ val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
+
+ val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
+ val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
+
+ writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+
+ vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
+
+exit:
+ return status;
+}
+
+/*
+ * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
+ * Enable the DMA vpath statistics. The function is to be called to re-enable
+ * the adapter to update stats into the host memory
+ */
+static enum vxge_hw_status
+vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct __vxge_hw_virtualpath *vpath;
+
+ vpath = vp->vpath;
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+ status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+ goto exit;
+ }
+
+ memcpy(vpath->hw_stats_sav, vpath->hw_stats,
+ sizeof(struct vxge_hw_vpath_stats_hw_info));
+
+ status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
+ * This function allocates a block from block pool or from the system
+ */
+static struct __vxge_hw_blockpool_entry *
+__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
+{
+ struct __vxge_hw_blockpool_entry *entry = NULL;
+ struct __vxge_hw_blockpool *blockpool;
+
+ blockpool = &devh->block_pool;
+
+ if (size == blockpool->block_size) {
+
+ if (!list_empty(&blockpool->free_block_list))
+ entry = (struct __vxge_hw_blockpool_entry *)
+ list_first_entry(&blockpool->free_block_list,
+ struct __vxge_hw_blockpool_entry,
+ item);
+
+ if (entry != NULL) {
+ list_del(&entry->item);
+ blockpool->pool_size--;
+ }
+ }
+
+ if (entry != NULL)
+ __vxge_hw_blockpool_blocks_add(blockpool);
+
+ return entry;
+}
+
+/*
+ * vxge_hw_vpath_open - Open a virtual path on a given adapter
+ * This function is used to open access to virtual path of an
+ * adapter for offload, GRO operations. This function returns
+ * synchronously.
+ */
+enum vxge_hw_status
+vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
+ struct vxge_hw_vpath_attr *attr,
+ struct __vxge_hw_vpath_handle **vpath_handle)
+{
+ struct __vxge_hw_virtualpath *vpath;
+ struct __vxge_hw_vpath_handle *vp;
+ enum vxge_hw_status status;
+
+ vpath = &hldev->virtual_paths[attr->vp_id];
+
+ if (vpath->vp_open == VXGE_HW_VP_OPEN) {
+ status = VXGE_HW_ERR_INVALID_STATE;
+ goto vpath_open_exit1;
+ }
+
+ status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
+ &hldev->config.vp_config[attr->vp_id]);
+ if (status != VXGE_HW_OK)
+ goto vpath_open_exit1;
+
+ vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
+ if (vp == NULL) {
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto vpath_open_exit2;
+ }
+
+ vp->vpath = vpath;
+
+ if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
+ status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
+ if (status != VXGE_HW_OK)
+ goto vpath_open_exit6;
+ }
+
+ if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
+ status = __vxge_hw_ring_create(vp, &attr->ring_attr);
+ if (status != VXGE_HW_OK)
+ goto vpath_open_exit7;
+
+ __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
+ }
+
+ vpath->fifoh->tx_intr_num =
+ (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP) +
+ VXGE_HW_VPATH_INTR_TX;
+
+ vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
+ VXGE_HW_BLOCK_SIZE);
+ if (vpath->stats_block == NULL) {
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto vpath_open_exit8;
+ }
+
+ vpath->hw_stats = vpath->stats_block->memblock;
+ memset(vpath->hw_stats, 0,
+ sizeof(struct vxge_hw_vpath_stats_hw_info));
+
+ hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
+ vpath->hw_stats;
+
+ vpath->hw_stats_sav =
+ &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
+ memset(vpath->hw_stats_sav, 0,
+ sizeof(struct vxge_hw_vpath_stats_hw_info));
+
+ writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
+
+ status = vxge_hw_vpath_stats_enable(vp);
+ if (status != VXGE_HW_OK)
+ goto vpath_open_exit8;
+
+ list_add(&vp->item, &vpath->vpath_handles);
+
+ hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
+
+ *vpath_handle = vp;
+
+ attr->fifo_attr.userdata = vpath->fifoh;
+ attr->ring_attr.userdata = vpath->ringh;
+
+ return VXGE_HW_OK;
+
+vpath_open_exit8:
+ if (vpath->ringh != NULL)
+ __vxge_hw_ring_delete(vp);
+vpath_open_exit7:
+ if (vpath->fifoh != NULL)
+ __vxge_hw_fifo_delete(vp);
+vpath_open_exit6:
+ vfree(vp);
+vpath_open_exit2:
+ __vxge_hw_vp_terminate(hldev, attr->vp_id);
+vpath_open_exit1:
+
+ return status;
+}
+
+/**
+ * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
+ * (vpath) open
+ * @vp: Handle got from previous vpath open
+ *
+ * This function is used to close access to virtual path opened
+ * earlier.
+ */
+void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
+{
+ struct __vxge_hw_virtualpath *vpath = vp->vpath;
+ struct __vxge_hw_ring *ring = vpath->ringh;
+ struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
+ u64 new_count, val64, val164;
+
+ if (vdev->titan1) {
+ new_count = readq(&vpath->vp_reg->rxdmem_size);
+ new_count &= 0x1fff;
+ } else
+ new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
+
+ val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
+
+ writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
+ &vpath->vp_reg->prc_rxd_doorbell);
+ readl(&vpath->vp_reg->prc_rxd_doorbell);
+
+ val164 /= 2;
+ val64 = readq(&vpath->vp_reg->prc_cfg6);
+ val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
+ val64 &= 0x1ff;
+
+ /*
+ * Each RxD is of 4 qwords
+ */
+ new_count -= (val64 + 1);
+ val64 = min(val164, new_count) / 4;
+
+ ring->rxds_limit = min(ring->rxds_limit, val64);
+ if (ring->rxds_limit < 4)
+ ring->rxds_limit = 4;
+}
+
+/*
+ * __vxge_hw_blockpool_block_free - Frees a block from block pool
+ * @devh: Hal device
+ * @entry: Entry of block to be freed
+ *
+ * This function frees a block from block pool
+ */
+static void
+__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
+ struct __vxge_hw_blockpool_entry *entry)
+{
+ struct __vxge_hw_blockpool *blockpool;
+
+ blockpool = &devh->block_pool;
+
+ if (entry->length == blockpool->block_size) {
+ list_add(&entry->item, &blockpool->free_block_list);
+ blockpool->pool_size++;
+ }
+
+ __vxge_hw_blockpool_blocks_remove(blockpool);
+}
+
+/*
+ * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
+ * This function is used to close access to virtual path opened
+ * earlier.
+ */
+enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
+{
+ struct __vxge_hw_virtualpath *vpath = NULL;
+ struct __vxge_hw_device *devh = NULL;
+ u32 vp_id = vp->vpath->vp_id;
+ u32 is_empty = TRUE;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ vpath = vp->vpath;
+ devh = vpath->hldev;
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+ status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+ goto vpath_close_exit;
+ }
+
+ list_del(&vp->item);
+
+ if (!list_empty(&vpath->vpath_handles)) {
+ list_add(&vp->item, &vpath->vpath_handles);
+ is_empty = FALSE;
+ }
+
+ if (!is_empty) {
+ status = VXGE_HW_FAIL;
+ goto vpath_close_exit;
+ }
+
+ devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
+
+ if (vpath->ringh != NULL)
+ __vxge_hw_ring_delete(vp);
+
+ if (vpath->fifoh != NULL)
+ __vxge_hw_fifo_delete(vp);
+
+ if (vpath->stats_block != NULL)
+ __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
+
+ vfree(vp);
+
+ __vxge_hw_vp_terminate(devh, vp_id);
+
+vpath_close_exit:
+ return status;
+}
+
+/*
+ * vxge_hw_vpath_reset - Resets vpath
+ * This function is used to request a reset of vpath
+ */
+enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
+{
+ enum vxge_hw_status status;
+ u32 vp_id;
+ struct __vxge_hw_virtualpath *vpath = vp->vpath;
+
+ vp_id = vpath->vp_id;
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+ status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+ goto exit;
+ }
+
+ status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
+ if (status == VXGE_HW_OK)
+ vpath->sw_stats->soft_reset_cnt++;
+exit:
+ return status;
+}
+
+/*
+ * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
+ * This function poll's for the vpath reset completion and re initializes
+ * the vpath.
+ */
+enum vxge_hw_status
+vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
+{
+ struct __vxge_hw_virtualpath *vpath = NULL;
+ enum vxge_hw_status status;
+ struct __vxge_hw_device *hldev;
+ u32 vp_id;
+
+ vp_id = vp->vpath->vp_id;
+ vpath = vp->vpath;
+ hldev = vpath->hldev;
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+ status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+ goto exit;
+ }
+
+ status = __vxge_hw_vpath_reset_check(vpath);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ status = __vxge_hw_vpath_initialize(hldev, vp_id);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ if (vpath->ringh != NULL)
+ __vxge_hw_vpath_prc_configure(hldev, vp_id);
+
+ memset(vpath->hw_stats, 0,
+ sizeof(struct vxge_hw_vpath_stats_hw_info));
+
+ memset(vpath->hw_stats_sav, 0,
+ sizeof(struct vxge_hw_vpath_stats_hw_info));
+
+ writeq(vpath->stats_block->dma_addr,
+ &vpath->vp_reg->stats_cfg);
+
+ status = vxge_hw_vpath_stats_enable(vp);
+
+exit:
+ return status;
+}
+
+/*
+ * vxge_hw_vpath_enable - Enable vpath.
+ * This routine clears the vpath reset thereby enabling a vpath
+ * to start forwarding frames and generating interrupts.
+ */
+void
+vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
+{
+ struct __vxge_hw_device *hldev;
+ u64 val64;
+
+ hldev = vp->vpath->hldev;
+
+ val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
+ 1 << (16 - vp->vpath->vp_id));
+
+ __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
+ &hldev->common_reg->cmn_rsthdlr_cfg1);
+}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
new file mode 100644
index 000000000000..5046a64f0fe8
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
@@ -0,0 +1,2111 @@
+/******************************************************************************
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ * Drivers based on or derived from this code fall under the GPL and must
+ * retain the authorship, copyright and license notice. This file is not
+ * a complete program and may only be used when the entire operating
+ * system is licensed under the GPL.
+ * See the file COPYING in this distribution for more information.
+ *
+ * vxge-config.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
+ * Virtualized Server Adapter.
+ * Copyright(c) 2002-2010 Exar Corp.
+ ******************************************************************************/
+#ifndef VXGE_CONFIG_H
+#define VXGE_CONFIG_H
+#include <linux/hardirq.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+#ifndef VXGE_CACHE_LINE_SIZE
+#define VXGE_CACHE_LINE_SIZE 128
+#endif
+
+#ifndef VXGE_ALIGN
+#define VXGE_ALIGN(adrs, size) \
+ (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
+#endif
+
+#define VXGE_HW_MIN_MTU 68
+#define VXGE_HW_MAX_MTU 9600
+#define VXGE_HW_DEFAULT_MTU 1500
+
+#define VXGE_HW_MAX_ROM_IMAGES 8
+
+struct eprom_image {
+ u8 is_valid:1;
+ u8 index;
+ u8 type;
+ u16 version;
+};
+
+#ifdef VXGE_DEBUG_ASSERT
+/**
+ * vxge_assert
+ * @test: C-condition to check
+ * @fmt: printf like format string
+ *
+ * This function implements traditional assert. By default assertions
+ * are enabled. It can be disabled by undefining VXGE_DEBUG_ASSERT macro in
+ * compilation
+ * time.
+ */
+#define vxge_assert(test) BUG_ON(!(test))
+#else
+#define vxge_assert(test)
+#endif /* end of VXGE_DEBUG_ASSERT */
+
+/**
+ * enum vxge_debug_level
+ * @VXGE_NONE: debug disabled
+ * @VXGE_ERR: all errors going to be logged out
+ * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
+ * going to be logged out. Very noisy.
+ *
+ * This enumeration going to be used to switch between different
+ * debug levels during runtime if DEBUG macro defined during
+ * compilation. If DEBUG macro not defined than code will be
+ * compiled out.
+ */
+enum vxge_debug_level {
+ VXGE_NONE = 0,
+ VXGE_TRACE = 1,
+ VXGE_ERR = 2
+};
+
+#define NULL_VPID 0xFFFFFFFF
+#ifdef CONFIG_VXGE_DEBUG_TRACE_ALL
+#define VXGE_DEBUG_MODULE_MASK 0xffffffff
+#define VXGE_DEBUG_TRACE_MASK 0xffffffff
+#define VXGE_DEBUG_ERR_MASK 0xffffffff
+#define VXGE_DEBUG_MASK 0x000001ff
+#else
+#define VXGE_DEBUG_MODULE_MASK 0x20000000
+#define VXGE_DEBUG_TRACE_MASK 0x20000000
+#define VXGE_DEBUG_ERR_MASK 0x20000000
+#define VXGE_DEBUG_MASK 0x00000001
+#endif
+
+/*
+ * @VXGE_COMPONENT_LL: do debug for vxge link layer module
+ * @VXGE_COMPONENT_ALL: activate debug for all modules with no exceptions
+ *
+ * This enumeration going to be used to distinguish modules
+ * or libraries during compilation and runtime. Makefile must declare
+ * VXGE_DEBUG_MODULE_MASK macro and set it to proper value.
+ */
+#define VXGE_COMPONENT_LL 0x20000000
+#define VXGE_COMPONENT_ALL 0xffffffff
+
+#define VXGE_HW_BASE_INF 100
+#define VXGE_HW_BASE_ERR 200
+#define VXGE_HW_BASE_BADCFG 300
+
+enum vxge_hw_status {
+ VXGE_HW_OK = 0,
+ VXGE_HW_FAIL = 1,
+ VXGE_HW_PENDING = 2,
+ VXGE_HW_COMPLETIONS_REMAIN = 3,
+
+ VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1,
+ VXGE_HW_INF_OUT_OF_DESCRIPTORS = VXGE_HW_BASE_INF + 2,
+
+ VXGE_HW_ERR_INVALID_HANDLE = VXGE_HW_BASE_ERR + 1,
+ VXGE_HW_ERR_OUT_OF_MEMORY = VXGE_HW_BASE_ERR + 2,
+ VXGE_HW_ERR_VPATH_NOT_AVAILABLE = VXGE_HW_BASE_ERR + 3,
+ VXGE_HW_ERR_VPATH_NOT_OPEN = VXGE_HW_BASE_ERR + 4,
+ VXGE_HW_ERR_WRONG_IRQ = VXGE_HW_BASE_ERR + 5,
+ VXGE_HW_ERR_SWAPPER_CTRL = VXGE_HW_BASE_ERR + 6,
+ VXGE_HW_ERR_INVALID_MTU_SIZE = VXGE_HW_BASE_ERR + 7,
+ VXGE_HW_ERR_INVALID_INDEX = VXGE_HW_BASE_ERR + 8,
+ VXGE_HW_ERR_INVALID_TYPE = VXGE_HW_BASE_ERR + 9,
+ VXGE_HW_ERR_INVALID_OFFSET = VXGE_HW_BASE_ERR + 10,
+ VXGE_HW_ERR_INVALID_DEVICE = VXGE_HW_BASE_ERR + 11,
+ VXGE_HW_ERR_VERSION_CONFLICT = VXGE_HW_BASE_ERR + 12,
+ VXGE_HW_ERR_INVALID_PCI_INFO = VXGE_HW_BASE_ERR + 13,
+ VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14,
+ VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15,
+ VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16,
+ VXGE_HW_ERR_PRIVILAGED_OPEARATION = VXGE_HW_BASE_ERR + 17,
+ VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18,
+ VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19,
+ VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20,
+ VXGE_HW_ERR_CRITICAL = VXGE_HW_BASE_ERR + 21,
+ VXGE_HW_ERR_SLOT_FREEZE = VXGE_HW_BASE_ERR + 22,
+
+ VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS = VXGE_HW_BASE_BADCFG + 1,
+ VXGE_HW_BADCFG_FIFO_BLOCKS = VXGE_HW_BASE_BADCFG + 2,
+ VXGE_HW_BADCFG_VPATH_MTU = VXGE_HW_BASE_BADCFG + 3,
+ VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG = VXGE_HW_BASE_BADCFG + 4,
+ VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH = VXGE_HW_BASE_BADCFG + 5,
+ VXGE_HW_BADCFG_INTR_MODE = VXGE_HW_BASE_BADCFG + 6,
+ VXGE_HW_BADCFG_RTS_MAC_EN = VXGE_HW_BASE_BADCFG + 7,
+
+ VXGE_HW_EOF_TRACE_BUF = -1
+};
+
+/**
+ * enum enum vxge_hw_device_link_state - Link state enumeration.
+ * @VXGE_HW_LINK_NONE: Invalid link state.
+ * @VXGE_HW_LINK_DOWN: Link is down.
+ * @VXGE_HW_LINK_UP: Link is up.
+ *
+ */
+enum vxge_hw_device_link_state {
+ VXGE_HW_LINK_NONE,
+ VXGE_HW_LINK_DOWN,
+ VXGE_HW_LINK_UP
+};
+
+/**
+ * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes.
+ * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes
+ * @VXGE_HW_FW_UPGRADE_DONE: upload completed
+ * @VXGE_HW_FW_UPGRADE_ERR: upload error
+ * @VXGE_FW_UPGRADE_BYTES2SKIP: skip bytes in the stream
+ *
+ */
+enum vxge_hw_fw_upgrade_code {
+ VXGE_HW_FW_UPGRADE_OK = 0,
+ VXGE_HW_FW_UPGRADE_DONE = 1,
+ VXGE_HW_FW_UPGRADE_ERR = 2,
+ VXGE_FW_UPGRADE_BYTES2SKIP = 3
+};
+
+/**
+ * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes.
+ * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data
+ * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type
+ * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed
+ */
+enum vxge_hw_fw_upgrade_err_code {
+ VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1 = 1,
+ VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW = 2,
+ VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3 = 3,
+ VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4 = 4,
+ VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5 = 5,
+ VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6 = 6,
+ VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7 = 7,
+ VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8 = 8,
+ VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN = 9,
+ VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH = 10
+};
+
+/**
+ * struct vxge_hw_device_date - Date Format
+ * @day: Day
+ * @month: Month
+ * @year: Year
+ * @date: Date in string format
+ *
+ * Structure for returning date
+ */
+
+#define VXGE_HW_FW_STRLEN 32
+struct vxge_hw_device_date {
+ u32 day;
+ u32 month;
+ u32 year;
+ char date[VXGE_HW_FW_STRLEN];
+};
+
+struct vxge_hw_device_version {
+ u32 major;
+ u32 minor;
+ u32 build;
+ char version[VXGE_HW_FW_STRLEN];
+};
+
+/**
+ * struct vxge_hw_fifo_config - Configuration of fifo.
+ * @enable: Is this fifo to be commissioned
+ * @fifo_blocks: Numbers of TxDL (that is, lists of Tx descriptors)
+ * blocks per queue.
+ * @max_frags: Max number of Tx buffers per TxDL (that is, per single
+ * transmit operation).
+ * No more than 256 transmit buffers can be specified.
+ * @memblock_size: Fifo descriptors are allocated in blocks of @mem_block_size
+ * bytes. Setting @memblock_size to page size ensures
+ * by-page allocation of descriptors. 128K bytes is the
+ * maximum supported block size.
+ * @alignment_size: per Tx fragment DMA-able memory used to align transmit data
+ * (e.g., to align on a cache line).
+ * @intr: Boolean. Use 1 to generate interrupt for each completed TxDL.
+ * Use 0 otherwise.
+ * @no_snoop_bits: If non-zero, specifies no-snoop PCI operation,
+ * which generally improves latency of the host bridge operation
+ * (see PCI specification). For valid values please refer
+ * to struct vxge_hw_fifo_config{} in the driver sources.
+ * Configuration of all Titan fifos.
+ * Note: Valid (min, max) range for each attribute is specified in the body of
+ * the struct vxge_hw_fifo_config{} structure.
+ */
+struct vxge_hw_fifo_config {
+ u32 enable;
+#define VXGE_HW_FIFO_ENABLE 1
+#define VXGE_HW_FIFO_DISABLE 0
+
+ u32 fifo_blocks;
+#define VXGE_HW_MIN_FIFO_BLOCKS 2
+#define VXGE_HW_MAX_FIFO_BLOCKS 128
+
+ u32 max_frags;
+#define VXGE_HW_MIN_FIFO_FRAGS 1
+#define VXGE_HW_MAX_FIFO_FRAGS 256
+
+ u32 memblock_size;
+#define VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE VXGE_HW_BLOCK_SIZE
+#define VXGE_HW_MAX_FIFO_MEMBLOCK_SIZE 131072
+#define VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE 8096
+
+ u32 alignment_size;
+#define VXGE_HW_MIN_FIFO_ALIGNMENT_SIZE 0
+#define VXGE_HW_MAX_FIFO_ALIGNMENT_SIZE 65536
+#define VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE VXGE_CACHE_LINE_SIZE
+
+ u32 intr;
+#define VXGE_HW_FIFO_QUEUE_INTR_ENABLE 1
+#define VXGE_HW_FIFO_QUEUE_INTR_DISABLE 0
+#define VXGE_HW_FIFO_QUEUE_INTR_DEFAULT 0
+
+ u32 no_snoop_bits;
+#define VXGE_HW_FIFO_NO_SNOOP_DISABLED 0
+#define VXGE_HW_FIFO_NO_SNOOP_TXD 1
+#define VXGE_HW_FIFO_NO_SNOOP_FRM 2
+#define VXGE_HW_FIFO_NO_SNOOP_ALL 3
+#define VXGE_HW_FIFO_NO_SNOOP_DEFAULT 0
+
+};
+/**
+ * struct vxge_hw_ring_config - Ring configurations.
+ * @enable: Is this ring to be commissioned
+ * @ring_blocks: Numbers of RxD blocks in the ring
+ * @buffer_mode: Receive buffer mode (1, 2, 3, or 5); for details please refer
+ * to Titan User Guide.
+ * @scatter_mode: Titan supports two receive scatter modes: A and B.
+ * For details please refer to Titan User Guide.
+ * @rx_timer_val: The number of 32ns periods that would be counted between two
+ * timer interrupts.
+ * @greedy_return: If Set it forces the device to return absolutely all RxD
+ * that are consumed and still on board when a timer interrupt
+ * triggers. If Clear, then if the device has already returned
+ * RxD before current timer interrupt trigerred and after the
+ * previous timer interrupt triggered, then the device is not
+ * forced to returned the rest of the consumed RxD that it has
+ * on board which account for a byte count less than the one
+ * programmed into PRC_CFG6.RXD_CRXDT field
+ * @rx_timer_ci: TBD
+ * @backoff_interval_us: Time (in microseconds), after which Titan
+ * tries to download RxDs posted by the host.
+ * Note that the "backoff" does not happen if host posts receive
+ * descriptors in the timely fashion.
+ * Ring configuration.
+ */
+struct vxge_hw_ring_config {
+ u32 enable;
+#define VXGE_HW_RING_ENABLE 1
+#define VXGE_HW_RING_DISABLE 0
+#define VXGE_HW_RING_DEFAULT 1
+
+ u32 ring_blocks;
+#define VXGE_HW_MIN_RING_BLOCKS 1
+#define VXGE_HW_MAX_RING_BLOCKS 128
+#define VXGE_HW_DEF_RING_BLOCKS 2
+
+ u32 buffer_mode;
+#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1
+#define VXGE_HW_RING_RXD_BUFFER_MODE_3 3
+#define VXGE_HW_RING_RXD_BUFFER_MODE_5 5
+#define VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT 1
+
+ u32 scatter_mode;
+#define VXGE_HW_RING_SCATTER_MODE_A 0
+#define VXGE_HW_RING_SCATTER_MODE_B 1
+#define VXGE_HW_RING_SCATTER_MODE_C 2
+#define VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT 0xffffffff
+
+ u64 rxds_limit;
+#define VXGE_HW_DEF_RING_RXDS_LIMIT 44
+};
+
+/**
+ * struct vxge_hw_vp_config - Configuration of virtual path
+ * @vp_id: Virtual Path Id
+ * @min_bandwidth: Minimum Guaranteed bandwidth
+ * @ring: See struct vxge_hw_ring_config{}.
+ * @fifo: See struct vxge_hw_fifo_config{}.
+ * @tti: Configuration of interrupt associated with Transmit.
+ * see struct vxge_hw_tim_intr_config();
+ * @rti: Configuration of interrupt associated with Receive.
+ * see struct vxge_hw_tim_intr_config();
+ * @mtu: mtu size used on this port.
+ * @rpa_strip_vlan_tag: Strip VLAN Tag enable/disable. Instructs the device to
+ * remove the VLAN tag from all received tagged frames that are not
+ * replicated at the internal L2 switch.
+ * 0 - Do not strip the VLAN tag.
+ * 1 - Strip the VLAN tag. Regardless of this setting, VLAN tags are
+ * always placed into the RxDMA descriptor.
+ *
+ * This structure is used by the driver to pass the configuration parameters to
+ * configure Virtual Path.
+ */
+struct vxge_hw_vp_config {
+ u32 vp_id;
+
+#define VXGE_HW_VPATH_PRIORITY_MIN 0
+#define VXGE_HW_VPATH_PRIORITY_MAX 16
+#define VXGE_HW_VPATH_PRIORITY_DEFAULT 0
+
+ u32 min_bandwidth;
+#define VXGE_HW_VPATH_BANDWIDTH_MIN 0
+#define VXGE_HW_VPATH_BANDWIDTH_MAX 100
+#define VXGE_HW_VPATH_BANDWIDTH_DEFAULT 0
+
+ struct vxge_hw_ring_config ring;
+ struct vxge_hw_fifo_config fifo;
+ struct vxge_hw_tim_intr_config tti;
+ struct vxge_hw_tim_intr_config rti;
+
+ u32 mtu;
+#define VXGE_HW_VPATH_MIN_INITIAL_MTU VXGE_HW_MIN_MTU
+#define VXGE_HW_VPATH_MAX_INITIAL_MTU VXGE_HW_MAX_MTU
+#define VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU 0xffffffff
+
+ u32 rpa_strip_vlan_tag;
+#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE 1
+#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE 0
+#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT 0xffffffff
+
+};
+/**
+ * struct vxge_hw_device_config - Device configuration.
+ * @dma_blockpool_initial: Initial size of DMA Pool
+ * @dma_blockpool_max: Maximum blocks in DMA pool
+ * @intr_mode: Line, or MSI-X interrupt.
+ *
+ * @rth_en: Enable Receive Traffic Hashing(RTH) using IT(Indirection Table).
+ * @rth_it_type: RTH IT table programming type
+ * @rts_mac_en: Enable Receive Traffic Steering using MAC destination address
+ * @vp_config: Configuration for virtual paths
+ * @device_poll_millis: Specify the interval (in mulliseconds)
+ * to wait for register reads
+ *
+ * Titan configuration.
+ * Contains per-device configuration parameters, including:
+ * - stats sampling interval, etc.
+ *
+ * In addition, struct vxge_hw_device_config{} includes "subordinate"
+ * configurations, including:
+ * - fifos and rings;
+ * - MAC (done at firmware level).
+ *
+ * See Titan User Guide for more details.
+ * Note: Valid (min, max) range for each attribute is specified in the body of
+ * the struct vxge_hw_device_config{} structure. Please refer to the
+ * corresponding include file.
+ * See also: struct vxge_hw_tim_intr_config{}.
+ */
+struct vxge_hw_device_config {
+ u32 device_poll_millis;
+#define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1
+#define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000
+#define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000
+
+ u32 dma_blockpool_initial;
+ u32 dma_blockpool_max;
+#define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0
+#define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0
+#define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4
+#define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096
+
+#define VXGE_HW_MAX_PAYLOAD_SIZE_512 2
+
+ u32 intr_mode:2,
+#define VXGE_HW_INTR_MODE_IRQLINE 0
+#define VXGE_HW_INTR_MODE_MSIX 1
+#define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2
+
+#define VXGE_HW_INTR_MODE_DEF 0
+
+ rth_en:1,
+#define VXGE_HW_RTH_DISABLE 0
+#define VXGE_HW_RTH_ENABLE 1
+#define VXGE_HW_RTH_DEFAULT 0
+
+ rth_it_type:1,
+#define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0
+#define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1
+#define VXGE_HW_RTH_IT_TYPE_DEFAULT 0
+
+ rts_mac_en:1,
+#define VXGE_HW_RTS_MAC_DISABLE 0
+#define VXGE_HW_RTS_MAC_ENABLE 1
+#define VXGE_HW_RTS_MAC_DEFAULT 0
+
+ hwts_en:1;
+#define VXGE_HW_HWTS_DISABLE 0
+#define VXGE_HW_HWTS_ENABLE 1
+#define VXGE_HW_HWTS_DEFAULT 1
+
+ struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS];
+};
+
+/**
+ * function vxge_uld_link_up_f - Link-Up callback provided by driver.
+ * @devh: HW device handle.
+ * Link-up notification callback provided by the driver.
+ * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
+ *
+ * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_down_f{},
+ * vxge_hw_driver_initialize().
+ */
+
+/**
+ * function vxge_uld_link_down_f - Link-Down callback provided by
+ * driver.
+ * @devh: HW device handle.
+ *
+ * Link-Down notification callback provided by the driver.
+ * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
+ *
+ * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
+ * vxge_hw_driver_initialize().
+ */
+
+/**
+ * function vxge_uld_crit_err_f - Critical Error notification callback.
+ * @devh: HW device handle.
+ * (typically - at HW device iinitialization time).
+ * @type: Enumerated hw error, e.g.: double ECC.
+ * @serr_data: Titan status.
+ * @ext_data: Extended data. The contents depends on the @type.
+ *
+ * Link-Down notification callback provided by the driver.
+ * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
+ *
+ * See also: struct vxge_hw_uld_cbs{}, enum vxge_hw_event{},
+ * vxge_hw_driver_initialize().
+ */
+
+/**
+ * struct vxge_hw_uld_cbs - driver "slow-path" callbacks.
+ * @link_up: See vxge_uld_link_up_f{}.
+ * @link_down: See vxge_uld_link_down_f{}.
+ * @crit_err: See vxge_uld_crit_err_f{}.
+ *
+ * Driver slow-path (per-driver) callbacks.
+ * Implemented by driver and provided to HW via
+ * vxge_hw_driver_initialize().
+ * Note that these callbacks are not mandatory: HW will not invoke
+ * a callback if NULL is specified.
+ *
+ * See also: vxge_hw_driver_initialize().
+ */
+struct vxge_hw_uld_cbs {
+ void (*link_up)(struct __vxge_hw_device *devh);
+ void (*link_down)(struct __vxge_hw_device *devh);
+ void (*crit_err)(struct __vxge_hw_device *devh,
+ enum vxge_hw_event type, u64 ext_data);
+};
+
+/*
+ * struct __vxge_hw_blockpool_entry - Block private data structure
+ * @item: List header used to link.
+ * @length: Length of the block
+ * @memblock: Virtual address block
+ * @dma_addr: DMA Address of the block.
+ * @dma_handle: DMA handle of the block.
+ * @acc_handle: DMA acc handle
+ *
+ * Block is allocated with a header to put the blocks into list.
+ *
+ */
+struct __vxge_hw_blockpool_entry {
+ struct list_head item;
+ u32 length;
+ void *memblock;
+ dma_addr_t dma_addr;
+ struct pci_dev *dma_handle;
+ struct pci_dev *acc_handle;
+};
+
+/*
+ * struct __vxge_hw_blockpool - Block Pool
+ * @hldev: HW device
+ * @block_size: size of each block.
+ * @Pool_size: Number of blocks in the pool
+ * @pool_max: Maximum number of blocks above which to free additional blocks
+ * @req_out: Number of block requests with OS out standing
+ * @free_block_list: List of free blocks
+ *
+ * Block pool contains the DMA blocks preallocated.
+ *
+ */
+struct __vxge_hw_blockpool {
+ struct __vxge_hw_device *hldev;
+ u32 block_size;
+ u32 pool_size;
+ u32 pool_max;
+ u32 req_out;
+ struct list_head free_block_list;
+ struct list_head free_entry_list;
+};
+
+/*
+ * enum enum __vxge_hw_channel_type - Enumerated channel types.
+ * @VXGE_HW_CHANNEL_TYPE_UNKNOWN: Unknown channel.
+ * @VXGE_HW_CHANNEL_TYPE_FIFO: fifo.
+ * @VXGE_HW_CHANNEL_TYPE_RING: ring.
+ * @VXGE_HW_CHANNEL_TYPE_MAX: Maximum number of HW-supported
+ * (and recognized) channel types. Currently: 2.
+ *
+ * Enumerated channel types. Currently there are only two link-layer
+ * channels - Titan fifo and Titan ring. In the future the list will grow.
+ */
+enum __vxge_hw_channel_type {
+ VXGE_HW_CHANNEL_TYPE_UNKNOWN = 0,
+ VXGE_HW_CHANNEL_TYPE_FIFO = 1,
+ VXGE_HW_CHANNEL_TYPE_RING = 2,
+ VXGE_HW_CHANNEL_TYPE_MAX = 3
+};
+
+/*
+ * struct __vxge_hw_channel
+ * @item: List item; used to maintain a list of open channels.
+ * @type: Channel type. See enum vxge_hw_channel_type{}.
+ * @devh: Device handle. HW device object that contains _this_ channel.
+ * @vph: Virtual path handle. Virtual Path Object that contains _this_ channel.
+ * @length: Channel length. Currently allocated number of descriptors.
+ * The channel length "grows" when more descriptors get allocated.
+ * See _hw_mempool_grow.
+ * @reserve_arr: Reserve array. Contains descriptors that can be reserved
+ * by driver for the subsequent send or receive operation.
+ * See vxge_hw_fifo_txdl_reserve(),
+ * vxge_hw_ring_rxd_reserve().
+ * @reserve_ptr: Current pointer in the resrve array
+ * @reserve_top: Reserve top gives the maximum number of dtrs available in
+ * reserve array.
+ * @work_arr: Work array. Contains descriptors posted to the channel.
+ * Note that at any point in time @work_arr contains 3 types of
+ * descriptors:
+ * 1) posted but not yet consumed by Titan device;
+ * 2) consumed but not yet completed;
+ * 3) completed but not yet freed
+ * (via vxge_hw_fifo_txdl_free() or vxge_hw_ring_rxd_free())
+ * @post_index: Post index. At any point in time points on the
+ * position in the channel, which'll contain next to-be-posted
+ * descriptor.
+ * @compl_index: Completion index. At any point in time points on the
+ * position in the channel, which will contain next
+ * to-be-completed descriptor.
+ * @free_arr: Free array. Contains completed descriptors that were freed
+ * (i.e., handed over back to HW) by driver.
+ * See vxge_hw_fifo_txdl_free(), vxge_hw_ring_rxd_free().
+ * @free_ptr: current pointer in free array
+ * @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize
+ * to store per-operation control information.
+ * @stats: Pointer to common statistics
+ * @userdata: Per-channel opaque (void*) user-defined context, which may be
+ * driver object, ULP connection, etc.
+ * Once channel is open, @userdata is passed back to user via
+ * vxge_hw_channel_callback_f.
+ *
+ * HW channel object.
+ *
+ * See also: enum vxge_hw_channel_type{}, enum vxge_hw_channel_flag
+ */
+struct __vxge_hw_channel {
+ struct list_head item;
+ enum __vxge_hw_channel_type type;
+ struct __vxge_hw_device *devh;
+ struct __vxge_hw_vpath_handle *vph;
+ u32 length;
+ u32 vp_id;
+ void **reserve_arr;
+ u32 reserve_ptr;
+ u32 reserve_top;
+ void **work_arr;
+ u32 post_index ____cacheline_aligned;
+ u32 compl_index ____cacheline_aligned;
+ void **free_arr;
+ u32 free_ptr;
+ void **orig_arr;
+ u32 per_dtr_space;
+ void *userdata;
+ struct vxge_hw_common_reg __iomem *common_reg;
+ u32 first_vp_id;
+ struct vxge_hw_vpath_stats_sw_common_info *stats;
+
+} ____cacheline_aligned;
+
+/*
+ * struct __vxge_hw_virtualpath - Virtual Path
+ *
+ * @vp_id: Virtual path id
+ * @vp_open: This flag specifies if vxge_hw_vp_open is called from LL Driver
+ * @hldev: Hal device
+ * @vp_config: Virtual Path Config
+ * @vp_reg: VPATH Register map address in BAR0
+ * @vpmgmt_reg: VPATH_MGMT register map address
+ * @max_mtu: Max mtu that can be supported
+ * @vsport_number: vsport attached to this vpath
+ * @max_kdfc_db: Maximum kernel mode doorbells
+ * @max_nofl_db: Maximum non offload doorbells
+ * @tx_intr_num: Interrupt Number associated with the TX
+
+ * @ringh: Ring Queue
+ * @fifoh: FIFO Queue
+ * @vpath_handles: Virtual Path handles list
+ * @stats_block: Memory for DMAing stats
+ * @stats: Vpath statistics
+ *
+ * Virtual path structure to encapsulate the data related to a virtual path.
+ * Virtual paths are allocated by the HW upon getting configuration from the
+ * driver and inserted into the list of virtual paths.
+ */
+struct __vxge_hw_virtualpath {
+ u32 vp_id;
+
+ u32 vp_open;
+#define VXGE_HW_VP_NOT_OPEN 0
+#define VXGE_HW_VP_OPEN 1
+
+ struct __vxge_hw_device *hldev;
+ struct vxge_hw_vp_config *vp_config;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+ struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
+ struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
+
+ u32 max_mtu;
+ u32 vsport_number;
+ u32 max_kdfc_db;
+ u32 max_nofl_db;
+ u64 tim_tti_cfg1_saved;
+ u64 tim_tti_cfg3_saved;
+ u64 tim_rti_cfg1_saved;
+ u64 tim_rti_cfg3_saved;
+
+ struct __vxge_hw_ring *____cacheline_aligned ringh;
+ struct __vxge_hw_fifo *____cacheline_aligned fifoh;
+ struct list_head vpath_handles;
+ struct __vxge_hw_blockpool_entry *stats_block;
+ struct vxge_hw_vpath_stats_hw_info *hw_stats;
+ struct vxge_hw_vpath_stats_hw_info *hw_stats_sav;
+ struct vxge_hw_vpath_stats_sw_info *sw_stats;
+ spinlock_t lock;
+};
+
+/*
+ * struct __vxge_hw_vpath_handle - List item to store callback information
+ * @item: List head to keep the item in linked list
+ * @vpath: Virtual path to which this item belongs
+ *
+ * This structure is used to store the callback information.
+ */
+struct __vxge_hw_vpath_handle {
+ struct list_head item;
+ struct __vxge_hw_virtualpath *vpath;
+};
+
+/*
+ * struct __vxge_hw_device
+ *
+ * HW device object.
+ */
+/**
+ * struct __vxge_hw_device - Hal device object
+ * @magic: Magic Number
+ * @bar0: BAR0 virtual address.
+ * @pdev: Physical device handle
+ * @config: Confguration passed by the LL driver at initialization
+ * @link_state: Link state
+ *
+ * HW device object. Represents Titan adapter
+ */
+struct __vxge_hw_device {
+ u32 magic;
+#define VXGE_HW_DEVICE_MAGIC 0x12345678
+#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
+ void __iomem *bar0;
+ struct pci_dev *pdev;
+ struct net_device *ndev;
+ struct vxge_hw_device_config config;
+ enum vxge_hw_device_link_state link_state;
+
+ const struct vxge_hw_uld_cbs *uld_callbacks;
+
+ u32 host_type;
+ u32 func_id;
+ u32 access_rights;
+#define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH 0x1
+#define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM 0x2
+#define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM 0x4
+ struct vxge_hw_legacy_reg __iomem *legacy_reg;
+ struct vxge_hw_toc_reg __iomem *toc_reg;
+ struct vxge_hw_common_reg __iomem *common_reg;
+ struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
+ struct vxge_hw_srpcim_reg __iomem *srpcim_reg \
+ [VXGE_HW_TITAN_SRPCIM_REG_SPACES];
+ struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg \
+ [VXGE_HW_TITAN_VPMGMT_REG_SPACES];
+ struct vxge_hw_vpath_reg __iomem *vpath_reg \
+ [VXGE_HW_TITAN_VPATH_REG_SPACES];
+ u8 __iomem *kdfc;
+ u8 __iomem *usdc;
+ struct __vxge_hw_virtualpath virtual_paths \
+ [VXGE_HW_MAX_VIRTUAL_PATHS];
+ u64 vpath_assignments;
+ u64 vpaths_deployed;
+ u32 first_vp_id;
+ u64 tim_int_mask0[4];
+ u32 tim_int_mask1[4];
+
+ struct __vxge_hw_blockpool block_pool;
+ struct vxge_hw_device_stats stats;
+ u32 debug_module_mask;
+ u32 debug_level;
+ u32 level_err;
+ u32 level_trace;
+ u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES];
+};
+
+#define VXGE_HW_INFO_LEN 64
+/**
+ * struct vxge_hw_device_hw_info - Device information
+ * @host_type: Host Type
+ * @func_id: Function Id
+ * @vpath_mask: vpath bit mask
+ * @fw_version: Firmware version
+ * @fw_date: Firmware Date
+ * @flash_version: Firmware version
+ * @flash_date: Firmware Date
+ * @mac_addrs: Mac addresses for each vpath
+ * @mac_addr_masks: Mac address masks for each vpath
+ *
+ * Returns the vpath mask that has the bits set for each vpath allocated
+ * for the driver and the first mac address for each vpath
+ */
+struct vxge_hw_device_hw_info {
+ u32 host_type;
+#define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION 0
+#define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION 1
+#define VXGE_HW_NO_MR_SR_VH0_FUNCTION0 2
+#define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION 3
+#define VXGE_HW_MR_SR_VH0_INVALID_CONFIG 4
+#define VXGE_HW_SR_VH_FUNCTION0 5
+#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
+#define VXGE_HW_VH_NORMAL_FUNCTION 7
+ u64 function_mode;
+#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1
+#define VXGE_HW_FUNCTION_MODE_SRIOV 2
+#define VXGE_HW_FUNCTION_MODE_MRIOV 3
+#define VXGE_HW_FUNCTION_MODE_MRIOV_8 4
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5
+#define VXGE_HW_FUNCTION_MODE_SRIOV_8 6
+#define VXGE_HW_FUNCTION_MODE_SRIOV_4 7
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9
+#define VXGE_HW_FUNCTION_MODE_MRIOV_4 10
+
+ u32 func_id;
+ u64 vpath_mask;
+ struct vxge_hw_device_version fw_version;
+ struct vxge_hw_device_date fw_date;
+ struct vxge_hw_device_version flash_version;
+ struct vxge_hw_device_date flash_date;
+ u8 serial_number[VXGE_HW_INFO_LEN];
+ u8 part_number[VXGE_HW_INFO_LEN];
+ u8 product_desc[VXGE_HW_INFO_LEN];
+ u8 mac_addrs[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
+ u8 mac_addr_masks[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
+};
+
+/**
+ * struct vxge_hw_device_attr - Device memory spaces.
+ * @bar0: BAR0 virtual address.
+ * @pdev: PCI device object.
+ *
+ * Device memory spaces. Includes configuration, BAR0 etc. per device
+ * mapped memories. Also, includes a pointer to OS-specific PCI device object.
+ */
+struct vxge_hw_device_attr {
+ void __iomem *bar0;
+ struct pci_dev *pdev;
+ const struct vxge_hw_uld_cbs *uld_callbacks;
+};
+
+#define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
+
+#define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) { \
+ if (i < 16) { \
+ m0[0] |= vxge_vBIT(0x8, (i*4), 4); \
+ m0[1] |= vxge_vBIT(0x4, (i*4), 4); \
+ } \
+ else { \
+ m1[0] = 0x80000000; \
+ m1[1] = 0x40000000; \
+ } \
+}
+
+#define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) { \
+ if (i < 16) { \
+ m0[0] &= ~vxge_vBIT(0x8, (i*4), 4); \
+ m0[1] &= ~vxge_vBIT(0x4, (i*4), 4); \
+ } \
+ else { \
+ m1[0] = 0; \
+ m1[1] = 0; \
+ } \
+}
+
+#define VXGE_HW_DEVICE_STATS_PIO_READ(loc, offset) { \
+ status = vxge_hw_mrpcim_stats_access(hldev, \
+ VXGE_HW_STATS_OP_READ, \
+ loc, \
+ offset, \
+ &val64); \
+ if (status != VXGE_HW_OK) \
+ return status; \
+}
+
+/*
+ * struct __vxge_hw_ring - Ring channel.
+ * @channel: Channel "base" of this ring, the common part of all HW
+ * channels.
+ * @mempool: Memory pool, the pool from which descriptors get allocated.
+ * (See vxge_hw_mm.h).
+ * @config: Ring configuration, part of device configuration
+ * (see struct vxge_hw_device_config{}).
+ * @ring_length: Length of the ring
+ * @buffer_mode: 1, 3, or 5. The value specifies a receive buffer mode,
+ * as per Titan User Guide.
+ * @rxd_size: RxD sizes for 1-, 3- or 5- buffer modes. As per Titan spec,
+ * 1-buffer mode descriptor is 32 byte long, etc.
+ * @rxd_priv_size: Per RxD size reserved (by HW) for driver to keep
+ * per-descriptor data (e.g., DMA handle for Solaris)
+ * @per_rxd_space: Per rxd space requested by driver
+ * @rxds_per_block: Number of descriptors per hardware-defined RxD
+ * block. Depends on the (1-, 3-, 5-) buffer mode.
+ * @rxdblock_priv_size: Reserved at the end of each RxD block. HW internal
+ * usage. Not to confuse with @rxd_priv_size.
+ * @cmpl_cnt: Completion counter. Is reset to zero upon entering the ISR.
+ * @callback: Channel completion callback. HW invokes the callback when there
+ * are new completions on that channel. In many implementations
+ * the @callback executes in the hw interrupt context.
+ * @rxd_init: Channel's descriptor-initialize callback.
+ * See vxge_hw_ring_rxd_init_f{}.
+ * If not NULL, HW invokes the callback when opening
+ * the ring.
+ * @rxd_term: Channel's descriptor-terminate callback. If not NULL,
+ * HW invokes the callback when closing the corresponding channel.
+ * See also vxge_hw_channel_rxd_term_f{}.
+ * @stats: Statistics for ring
+ * Ring channel.
+ *
+ * Note: The structure is cache line aligned to better utilize
+ * CPU cache performance.
+ */
+struct __vxge_hw_ring {
+ struct __vxge_hw_channel channel;
+ struct vxge_hw_mempool *mempool;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+ struct vxge_hw_common_reg __iomem *common_reg;
+ u32 ring_length;
+ u32 buffer_mode;
+ u32 rxd_size;
+ u32 rxd_priv_size;
+ u32 per_rxd_space;
+ u32 rxds_per_block;
+ u32 rxdblock_priv_size;
+ u32 cmpl_cnt;
+ u32 vp_id;
+ u32 doorbell_cnt;
+ u32 total_db_cnt;
+ u64 rxds_limit;
+ u32 rtimer;
+ u64 tim_rti_cfg1_saved;
+ u64 tim_rti_cfg3_saved;
+
+ enum vxge_hw_status (*callback)(
+ struct __vxge_hw_ring *ringh,
+ void *rxdh,
+ u8 t_code,
+ void *userdata);
+
+ enum vxge_hw_status (*rxd_init)(
+ void *rxdh,
+ void *userdata);
+
+ void (*rxd_term)(
+ void *rxdh,
+ enum vxge_hw_rxd_state state,
+ void *userdata);
+
+ struct vxge_hw_vpath_stats_sw_ring_info *stats ____cacheline_aligned;
+ struct vxge_hw_ring_config *config;
+} ____cacheline_aligned;
+
+/**
+ * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state.
+ * @VXGE_HW_TXDL_STATE_NONE: Invalid state.
+ * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation.
+ * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the
+ * device.
+ * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for
+ * filling-in and posting later.
+ *
+ * Titan/HW descriptor states.
+ *
+ */
+enum vxge_hw_txdl_state {
+ VXGE_HW_TXDL_STATE_NONE = 0,
+ VXGE_HW_TXDL_STATE_AVAIL = 1,
+ VXGE_HW_TXDL_STATE_POSTED = 2,
+ VXGE_HW_TXDL_STATE_FREED = 3
+};
+/*
+ * struct __vxge_hw_fifo - Fifo.
+ * @channel: Channel "base" of this fifo, the common part of all HW
+ * channels.
+ * @mempool: Memory pool, from which descriptors get allocated.
+ * @config: Fifo configuration, part of device configuration
+ * (see struct vxge_hw_device_config{}).
+ * @interrupt_type: Interrupt type to be used
+ * @no_snoop_bits: See struct vxge_hw_fifo_config{}.
+ * @txdl_per_memblock: Number of TxDLs (TxD lists) per memblock.
+ * on TxDL please refer to Titan UG.
+ * @txdl_size: Configured TxDL size (i.e., number of TxDs in a list), plus
+ * per-TxDL HW private space (struct __vxge_hw_fifo_txdl_priv).
+ * @priv_size: Per-Tx descriptor space reserved for driver
+ * usage.
+ * @per_txdl_space: Per txdl private space for the driver
+ * @callback: Fifo completion callback. HW invokes the callback when there
+ * are new completions on that fifo. In many implementations
+ * the @callback executes in the hw interrupt context.
+ * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
+ * HW invokes the callback when closing the corresponding fifo.
+ * See also vxge_hw_fifo_txdl_term_f{}.
+ * @stats: Statistics of this fifo
+ *
+ * Fifo channel.
+ * Note: The structure is cache line aligned.
+ */
+struct __vxge_hw_fifo {
+ struct __vxge_hw_channel channel;
+ struct vxge_hw_mempool *mempool;
+ struct vxge_hw_fifo_config *config;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+ struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
+ u64 interrupt_type;
+ u32 no_snoop_bits;
+ u32 txdl_per_memblock;
+ u32 txdl_size;
+ u32 priv_size;
+ u32 per_txdl_space;
+ u32 vp_id;
+ u32 tx_intr_num;
+ u32 rtimer;
+ u64 tim_tti_cfg1_saved;
+ u64 tim_tti_cfg3_saved;
+
+ enum vxge_hw_status (*callback)(
+ struct __vxge_hw_fifo *fifo_handle,
+ void *txdlh,
+ enum vxge_hw_fifo_tcode t_code,
+ void *userdata,
+ struct sk_buff ***skb_ptr,
+ int nr_skb,
+ int *more);
+
+ void (*txdl_term)(
+ void *txdlh,
+ enum vxge_hw_txdl_state state,
+ void *userdata);
+
+ struct vxge_hw_vpath_stats_sw_fifo_info *stats ____cacheline_aligned;
+} ____cacheline_aligned;
+
+/*
+ * struct __vxge_hw_fifo_txdl_priv - Transmit descriptor HW-private data.
+ * @dma_addr: DMA (mapped) address of _this_ descriptor.
+ * @dma_handle: DMA handle used to map the descriptor onto device.
+ * @dma_offset: Descriptor's offset in the memory block. HW allocates
+ * descriptors in memory blocks (see struct vxge_hw_fifo_config{})
+ * Each memblock is a contiguous block of DMA-able memory.
+ * @frags: Total number of fragments (that is, contiguous data buffers)
+ * carried by this TxDL.
+ * @align_vaddr_start: Aligned virtual address start
+ * @align_vaddr: Virtual address of the per-TxDL area in memory used for
+ * alignement. Used to place one or more mis-aligned fragments
+ * @align_dma_addr: DMA address translated from the @align_vaddr.
+ * @align_dma_handle: DMA handle that corresponds to @align_dma_addr.
+ * @align_dma_acch: DMA access handle corresponds to @align_dma_addr.
+ * @align_dma_offset: The current offset into the @align_vaddr area.
+ * Grows while filling the descriptor, gets reset.
+ * @align_used_frags: Number of fragments used.
+ * @alloc_frags: Total number of fragments allocated.
+ * @unused: TODO
+ * @next_txdl_priv: (TODO).
+ * @first_txdp: (TODO).
+ * @linked_txdl_priv: Pointer to any linked TxDL for creating contiguous
+ * TxDL list.
+ * @txdlh: Corresponding txdlh to this TxDL.
+ * @memblock: Pointer to the TxDL memory block or memory page.
+ * on the next send operation.
+ * @dma_object: DMA address and handle of the memory block that contains
+ * the descriptor. This member is used only in the "checked"
+ * version of the HW (to enforce certain assertions);
+ * otherwise it gets compiled out.
+ * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
+ *
+ * Per-transmit decsriptor HW-private data. HW uses the space to keep DMA
+ * information associated with the descriptor. Note that driver can ask HW
+ * to allocate additional per-descriptor space for its own (driver-specific)
+ * purposes.
+ *
+ * See also: struct vxge_hw_ring_rxd_priv{}.
+ */
+struct __vxge_hw_fifo_txdl_priv {
+ dma_addr_t dma_addr;
+ struct pci_dev *dma_handle;
+ ptrdiff_t dma_offset;
+ u32 frags;
+ u8 *align_vaddr_start;
+ u8 *align_vaddr;
+ dma_addr_t align_dma_addr;
+ struct pci_dev *align_dma_handle;
+ struct pci_dev *align_dma_acch;
+ ptrdiff_t align_dma_offset;
+ u32 align_used_frags;
+ u32 alloc_frags;
+ u32 unused;
+ struct __vxge_hw_fifo_txdl_priv *next_txdl_priv;
+ struct vxge_hw_fifo_txd *first_txdp;
+ void *memblock;
+};
+
+/*
+ * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper
+ * @control_0: Bits 0 to 7 - Doorbell type.
+ * Bits 8 to 31 - Reserved.
+ * Bits 32 to 39 - The highest TxD in this TxDL.
+ * Bits 40 to 47 - Reserved.
+ * Bits 48 to 55 - Reserved.
+ * Bits 56 to 63 - No snoop flags.
+ * @txdl_ptr: The starting location of the TxDL in host memory.
+ *
+ * Created by the host and written to the adapter via PIO to a Kernel Doorbell
+ * FIFO. All non-offload doorbell wrapper fields must be written by the host as
+ * part of a doorbell write. Consumed by the adapter but is not written by the
+ * adapter.
+ */
+struct __vxge_hw_non_offload_db_wrapper {
+ u64 control_0;
+#define VXGE_HW_NODBW_GET_TYPE(ctrl0) vxge_bVALn(ctrl0, 0, 8)
+#define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8)
+#define VXGE_HW_NODBW_TYPE_NODBW 0
+
+#define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0) vxge_bVALn(ctrl0, 32, 8)
+#define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8)
+
+#define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0) vxge_bVALn(ctrl0, 56, 8)
+#define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8)
+#define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE 0x2
+#define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ 0x1
+
+ u64 txdl_ptr;
+};
+
+/*
+ * TX Descriptor
+ */
+
+/**
+ * struct vxge_hw_fifo_txd - Transmit Descriptor
+ * @control_0: Bits 0 to 6 - Reserved.
+ * Bit 7 - List Ownership. This field should be initialized
+ * to '1' by the driver before the transmit list pointer is
+ * written to the adapter. This field will be set to '0' by the
+ * adapter once it has completed transmitting the frame or frames in
+ * the list. Note - This field is only valid in TxD0. Additionally,
+ * for multi-list sequences, the driver should not release any
+ * buffers until the ownership of the last list in the multi-list
+ * sequence has been returned to the host.
+ * Bits 8 to 11 - Reserved
+ * Bits 12 to 15 - Transfer_Code. This field is only valid in
+ * TxD0. It is used to describe the status of the transmit data
+ * buffer transfer. This field is always overwritten by the
+ * adapter, so this field may be initialized to any value.
+ * Bits 16 to 17 - Host steering. This field allows the host to
+ * override the selection of the physical transmit port.
+ * Attention:
+ * Normal sounds as if learned from the switch rather than from
+ * the aggregation algorythms.
+ * 00: Normal. Use Destination/MAC Address
+ * lookup to determine the transmit port.
+ * 01: Send on physical Port1.
+ * 10: Send on physical Port0.
+ * 11: Send on both ports.
+ * Bits 18 to 21 - Reserved
+ * Bits 22 to 23 - Gather_Code. This field is set by the host and
+ * is used to describe how individual buffers comprise a frame.
+ * 10: First descriptor of a frame.
+ * 00: Middle of a multi-descriptor frame.
+ * 01: Last descriptor of a frame.
+ * 11: First and last descriptor of a frame (the entire frame
+ * resides in a single buffer).
+ * For multi-descriptor frames, the only valid gather code sequence
+ * is {10, [00], 01}. In other words, the descriptors must be placed
+ * in the list in the correct order.
+ * Bits 24 to 27 - Reserved
+ * Bits 28 to 29 - LSO_Frm_Encap. LSO Frame Encapsulation
+ * definition. Only valid in TxD0. This field allows the host to
+ * indicate the Ethernet encapsulation of an outbound LSO packet.
+ * 00 - classic mode (best guess)
+ * 01 - LLC
+ * 10 - SNAP
+ * 11 - DIX
+ * If "classic mode" is selected, the adapter will attempt to
+ * decode the frame's Ethernet encapsulation by examining the L/T
+ * field as follows:
+ * <= 0x05DC LLC/SNAP encoding; must examine DSAP/SSAP to determine
+ * if packet is IPv4 or IPv6.
+ * 0x8870 Jumbo-SNAP encoding.
+ * 0x0800 IPv4 DIX encoding
+ * 0x86DD IPv6 DIX encoding
+ * others illegal encapsulation
+ * Bits 30 - LSO_ Flag. Large Send Offload (LSO) flag.
+ * Set to 1 to perform segmentation offload for TCP/UDP.
+ * This field is valid only in TxD0.
+ * Bits 31 to 33 - Reserved.
+ * Bits 34 to 47 - LSO_MSS. TCP/UDP LSO Maximum Segment Size
+ * This field is meaningful only when LSO_Control is non-zero.
+ * When LSO_Control is set to TCP_LSO, the single (possibly large)
+ * TCP segment described by this TxDL will be sent as a series of
+ * TCP segments each of which contains no more than LSO_MSS
+ * payload bytes.
+ * When LSO_Control is set to UDP_LSO, the single (possibly large)
+ * UDP datagram described by this TxDL will be sent as a series of
+ * UDP datagrams each of which contains no more than LSO_MSS
+ * payload bytes.
+ * All outgoing frames from this TxDL will have LSO_MSS bytes of UDP
+ * or TCP payload, with the exception of the last, which will have
+ * <= LSO_MSS bytes of payload.
+ * Bits 48 to 63 - Buffer_Size. Number of valid bytes in the
+ * buffer to be read by the adapter. This field is written by the
+ * host. A value of 0 is illegal.
+ * Bits 32 to 63 - This value is written by the adapter upon
+ * completion of a UDP or TCP LSO operation and indicates the number
+ * of UDP or TCP payload bytes that were transmitted. 0x0000 will be
+ * returned for any non-LSO operation.
+ * @control_1: Bits 0 to 4 - Reserved.
+ * Bit 5 - Tx_CKO_IPv4 Set to a '1' to enable IPv4 header checksum
+ * offload. This field is only valid in the first TxD of a frame.
+ * Bit 6 - Tx_CKO_TCP Set to a '1' to enable TCP checksum offload.
+ * This field is only valid in the first TxD of a frame (the TxD's
+ * gather code must be 10 or 11). The driver should only set this
+ * bit if it can guarantee that TCP is present.
+ * Bit 7 - Tx_CKO_UDP Set to a '1' to enable UDP checksum offload.
+ * This field is only valid in the first TxD of a frame (the TxD's
+ * gather code must be 10 or 11). The driver should only set this
+ * bit if it can guarantee that UDP is present.
+ * Bits 8 to 14 - Reserved.
+ * Bit 15 - Tx_VLAN_Enable VLAN tag insertion flag. Set to a '1' to
+ * instruct the adapter to insert the VLAN tag specified by the
+ * Tx_VLAN_Tag field. This field is only valid in the first TxD of
+ * a frame.
+ * Bits 16 to 31 - Tx_VLAN_Tag. Variable portion of the VLAN tag
+ * to be inserted into the frame by the adapter (the first two bytes
+ * of a VLAN tag are always 0x8100). This field is only valid if the
+ * Tx_VLAN_Enable field is set to '1'.
+ * Bits 32 to 33 - Reserved.
+ * Bits 34 to 39 - Tx_Int_Number. Indicates which Tx interrupt
+ * number the frame associated with. This field is written by the
+ * host. It is only valid in the first TxD of a frame.
+ * Bits 40 to 42 - Reserved.
+ * Bit 43 - Set to 1 to exclude the frame from bandwidth metering
+ * functions. This field is valid only in the first TxD
+ * of a frame.
+ * Bits 44 to 45 - Reserved.
+ * Bit 46 - Tx_Int_Per_List Set to a '1' to instruct the adapter to
+ * generate an interrupt as soon as all of the frames in the list
+ * have been transmitted. In order to have per-frame interrupts,
+ * the driver should place a maximum of one frame per list. This
+ * field is only valid in the first TxD of a frame.
+ * Bit 47 - Tx_Int_Utilization Set to a '1' to instruct the adapter
+ * to count the frame toward the utilization interrupt specified in
+ * the Tx_Int_Number field. This field is only valid in the first
+ * TxD of a frame.
+ * Bits 48 to 63 - Reserved.
+ * @buffer_pointer: Buffer start address.
+ * @host_control: Host_Control.Opaque 64bit data stored by driver inside the
+ * Titan descriptor prior to posting the latter on the fifo
+ * via vxge_hw_fifo_txdl_post().The %host_control is returned as is
+ * to the driver with each completed descriptor.
+ *
+ * Transmit descriptor (TxD).Fifo descriptor contains configured number
+ * (list) of TxDs. * For more details please refer to Titan User Guide,
+ * Section 5.4.2 "Transmit Descriptor (TxD) Format".
+ */
+struct vxge_hw_fifo_txd {
+ u64 control_0;
+#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER vxge_mBIT(7)
+
+#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
+#define VXGE_HW_FIFO_TXD_T_CODE(val) vxge_vBIT(val, 12, 4)
+#define VXGE_HW_FIFO_TXD_T_CODE_UNUSED VXGE_HW_FIFO_T_CODE_UNUSED
+
+
+#define VXGE_HW_FIFO_TXD_GATHER_CODE(val) vxge_vBIT(val, 22, 2)
+#define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST VXGE_HW_FIFO_GATHER_CODE_FIRST
+#define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST VXGE_HW_FIFO_GATHER_CODE_LAST
+
+
+#define VXGE_HW_FIFO_TXD_LSO_EN vxge_mBIT(30)
+
+#define VXGE_HW_FIFO_TXD_LSO_MSS(val) vxge_vBIT(val, 34, 14)
+
+#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) vxge_vBIT(val, 48, 16)
+
+ u64 control_1;
+#define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN vxge_mBIT(5)
+#define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN vxge_mBIT(6)
+#define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN vxge_mBIT(7)
+#define VXGE_HW_FIFO_TXD_VLAN_ENABLE vxge_mBIT(15)
+
+#define VXGE_HW_FIFO_TXD_VLAN_TAG(val) vxge_vBIT(val, 16, 16)
+
+#define VXGE_HW_FIFO_TXD_INT_NUMBER(val) vxge_vBIT(val, 34, 6)
+
+#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST vxge_mBIT(46)
+#define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ vxge_mBIT(47)
+
+ u64 buffer_pointer;
+
+ u64 host_control;
+};
+
+/**
+ * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
+ * @host_control: This field is exclusively for host use and is "readonly"
+ * from the adapter's perspective.
+ * @control_0:Bits 0 to 6 - RTH_Bucket get
+ * Bit 7 - Own Descriptor ownership bit. This bit is set to 1
+ * by the host, and is set to 0 by the adapter.
+ * 0 - Host owns RxD and buffer.
+ * 1 - The adapter owns RxD and buffer.
+ * Bit 8 - Fast_Path_Eligible When set, indicates that the
+ * received frame meets all of the criteria for fast path processing.
+ * The required criteria are as follows:
+ * !SYN &
+ * (Transfer_Code == "Transfer OK") &
+ * (!Is_IP_Fragment) &
+ * ((Is_IPv4 & computed_L3_checksum == 0xFFFF) |
+ * (Is_IPv6)) &
+ * ((Is_TCP & computed_L4_checksum == 0xFFFF) |
+ * (Is_UDP & (computed_L4_checksum == 0xFFFF |
+ * computed _L4_checksum == 0x0000)))
+ * (same meaning for all RxD buffer modes)
+ * Bit 9 - L3 Checksum Correct
+ * Bit 10 - L4 Checksum Correct
+ * Bit 11 - Reserved
+ * Bit 12 to 15 - This field is written by the adapter. It is
+ * used to report the status of the frame transfer to the host.
+ * 0x0 - Transfer OK
+ * 0x4 - RDA Failure During Transfer
+ * 0x5 - Unparseable Packet, such as unknown IPv6 header.
+ * 0x6 - Frame integrity error (FCS or ECC).
+ * 0x7 - Buffer Size Error. The provided buffer(s) were not
+ * appropriately sized and data loss occurred.
+ * 0x8 - Internal ECC Error. RxD corrupted.
+ * 0x9 - IPv4 Checksum error
+ * 0xA - TCP/UDP Checksum error
+ * 0xF - Unknown Error or Multiple Error. Indicates an
+ * unknown problem or that more than one of transfer codes is set.
+ * Bit 16 - SYN The adapter sets this field to indicate that
+ * the incoming frame contained a TCP segment with its SYN bit
+ * set and its ACK bit NOT set. (same meaning for all RxD buffer
+ * modes)
+ * Bit 17 - Is ICMP
+ * Bit 18 - RTH_SPDM_HIT Set to 1 if there was a match in the
+ * Socket Pair Direct Match Table and the frame was steered based
+ * on SPDM.
+ * Bit 19 - RTH_IT_HIT Set to 1 if there was a match in the
+ * Indirection Table and the frame was steered based on hash
+ * indirection.
+ * Bit 20 to 23 - RTH_HASH_TYPE Indicates the function (hash
+ * type) that was used to calculate the hash.
+ * Bit 19 - IS_VLAN Set to '1' if the frame was/is VLAN
+ * tagged.
+ * Bit 25 to 26 - ETHER_ENCAP Reflects the Ethernet encapsulation
+ * of the received frame.
+ * 0x0 - Ethernet DIX
+ * 0x1 - LLC
+ * 0x2 - SNAP (includes Jumbo-SNAP)
+ * 0x3 - IPX
+ * Bit 27 - IS_IPV4 Set to '1' if the frame contains an IPv4 packet.
+ * Bit 28 - IS_IPV6 Set to '1' if the frame contains an IPv6 packet.
+ * Bit 29 - IS_IP_FRAG Set to '1' if the frame contains a fragmented
+ * IP packet.
+ * Bit 30 - IS_TCP Set to '1' if the frame contains a TCP segment.
+ * Bit 31 - IS_UDP Set to '1' if the frame contains a UDP message.
+ * Bit 32 to 47 - L3_Checksum[0:15] The IPv4 checksum value that
+ * arrived with the frame. If the resulting computed IPv4 header
+ * checksum for the frame did not produce the expected 0xFFFF value,
+ * then the transfer code would be set to 0x9.
+ * Bit 48 to 63 - L4_Checksum[0:15] The TCP/UDP checksum value that
+ * arrived with the frame. If the resulting computed TCP/UDP checksum
+ * for the frame did not produce the expected 0xFFFF value, then the
+ * transfer code would be set to 0xA.
+ * @control_1:Bits 0 to 1 - Reserved
+ * Bits 2 to 15 - Buffer0_Size.This field is set by the host and
+ * eventually overwritten by the adapter. The host writes the
+ * available buffer size in bytes when it passes the descriptor to
+ * the adapter. When a frame is delivered the host, the adapter
+ * populates this field with the number of bytes written into the
+ * buffer. The largest supported buffer is 16, 383 bytes.
+ * Bit 16 to 47 - RTH Hash Value 32-bit RTH hash value. Only valid if
+ * RTH_HASH_TYPE (Control_0, bits 20:23) is nonzero.
+ * Bit 48 to 63 - VLAN_Tag[0:15] The contents of the variable portion
+ * of the VLAN tag, if one was detected by the adapter. This field is
+ * populated even if VLAN-tag stripping is enabled.
+ * @buffer0_ptr: Pointer to buffer. This field is populated by the driver.
+ *
+ * One buffer mode RxD for ring structure
+ */
+struct vxge_hw_ring_rxd_1 {
+ u64 host_control;
+ u64 control_0;
+#define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0) vxge_bVALn(ctrl0, 0, 7)
+
+#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER vxge_mBIT(7)
+
+#define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0) vxge_bVALn(ctrl0, 8, 1)
+
+#define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 9, 1)
+
+#define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 10, 1)
+
+#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
+#define VXGE_HW_RING_RXD_T_CODE(val) vxge_vBIT(val, 12, 4)
+
+#define VXGE_HW_RING_RXD_T_CODE_UNUSED VXGE_HW_RING_T_CODE_UNUSED
+
+#define VXGE_HW_RING_RXD_SYN_GET(ctrl0) vxge_bVALn(ctrl0, 16, 1)
+
+#define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0) vxge_bVALn(ctrl0, 17, 1)
+
+#define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 18, 1)
+
+#define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 19, 1)
+
+#define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0) vxge_bVALn(ctrl0, 20, 4)
+
+#define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0) vxge_bVALn(ctrl0, 24, 1)
+
+#define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0) vxge_bVALn(ctrl0, 25, 2)
+
+#define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0) vxge_bVALn(ctrl0, 27, 5)
+
+#define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 32, 16)
+
+#define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 48, 16)
+
+ u64 control_1;
+
+#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1) vxge_bVALn(ctrl1, 2, 14)
+#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14)
+#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK vxge_vBIT(0x3FFF, 2, 14)
+
+#define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1) vxge_bVALn(ctrl1, 16, 32)
+
+#define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1) vxge_bVALn(ctrl1, 48, 16)
+
+ u64 buffer0_ptr;
+};
+
+enum vxge_hw_rth_algoritms {
+ RTH_ALG_JENKINS = 0,
+ RTH_ALG_MS_RSS = 1,
+ RTH_ALG_CRC32C = 2
+};
+
+/**
+ * struct vxge_hw_rth_hash_types - RTH hash types.
+ * @hash_type_tcpipv4_en: Enables RTH field type HashTypeTcpIPv4
+ * @hash_type_ipv4_en: Enables RTH field type HashTypeIPv4
+ * @hash_type_tcpipv6_en: Enables RTH field type HashTypeTcpIPv6
+ * @hash_type_ipv6_en: Enables RTH field type HashTypeIPv6
+ * @hash_type_tcpipv6ex_en: Enables RTH field type HashTypeTcpIPv6Ex
+ * @hash_type_ipv6ex_en: Enables RTH field type HashTypeIPv6Ex
+ *
+ * Used to pass RTH hash types to rts_rts_set.
+ *
+ * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
+ */
+struct vxge_hw_rth_hash_types {
+ u8 hash_type_tcpipv4_en:1,
+ hash_type_ipv4_en:1,
+ hash_type_tcpipv6_en:1,
+ hash_type_ipv6_en:1,
+ hash_type_tcpipv6ex_en:1,
+ hash_type_ipv6ex_en:1;
+};
+
+void vxge_hw_device_debug_set(
+ struct __vxge_hw_device *devh,
+ enum vxge_debug_level level,
+ u32 mask);
+
+u32
+vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
+
+u32
+vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
+
+/**
+ * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor.
+ * @buf_mode: Buffer mode (1, 3 or 5)
+ *
+ * This function returns the size of RxD for given buffer mode
+ */
+static inline u32 vxge_hw_ring_rxd_size_get(u32 buf_mode)
+{
+ return sizeof(struct vxge_hw_ring_rxd_1);
+}
+
+/**
+ * vxge_hw_ring_rxds_per_block_get - Get the number of rxds per block.
+ * @buf_mode: Buffer mode (1 buffer mode only)
+ *
+ * This function returns the number of RxD for RxD block for given buffer mode
+ */
+static inline u32 vxge_hw_ring_rxds_per_block_get(u32 buf_mode)
+{
+ return (u32)((VXGE_HW_BLOCK_SIZE-16) /
+ sizeof(struct vxge_hw_ring_rxd_1));
+}
+
+/**
+ * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
+ * @rxdh: Descriptor handle.
+ * @dma_pointer: DMA address of a single receive buffer this descriptor
+ * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called,
+ * the receive buffer should be already mapped to the device
+ * @size: Size of the receive @dma_pointer buffer.
+ *
+ * Prepare 1-buffer-mode Rx descriptor for posting
+ * (via vxge_hw_ring_rxd_post()).
+ *
+ * This inline helper-function does not return any parameters and always
+ * succeeds.
+ *
+ */
+static inline
+void vxge_hw_ring_rxd_1b_set(
+ void *rxdh,
+ dma_addr_t dma_pointer,
+ u32 size)
+{
+ struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
+ rxdp->buffer0_ptr = dma_pointer;
+ rxdp->control_1 &= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK;
+ rxdp->control_1 |= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size);
+}
+
+/**
+ * vxge_hw_ring_rxd_1b_get - Get data from the completed 1-buf
+ * descriptor.
+ * @vpath_handle: Virtual Path handle.
+ * @rxdh: Descriptor handle.
+ * @dma_pointer: DMA address of a single receive buffer this descriptor
+ * carries. Returned by HW.
+ * @pkt_length: Length (in bytes) of the data in the buffer pointed by
+ *
+ * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor.
+ * This inline helper-function uses completed descriptor to populate receive
+ * buffer pointer and other "out" parameters. The function always succeeds.
+ *
+ */
+static inline
+void vxge_hw_ring_rxd_1b_get(
+ struct __vxge_hw_ring *ring_handle,
+ void *rxdh,
+ u32 *pkt_length)
+{
+ struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
+
+ *pkt_length =
+ (u32)VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxdp->control_1);
+}
+
+/**
+ * vxge_hw_ring_rxd_1b_info_get - Get extended information associated with
+ * a completed receive descriptor for 1b mode.
+ * @vpath_handle: Virtual Path handle.
+ * @rxdh: Descriptor handle.
+ * @rxd_info: Descriptor information
+ *
+ * Retrieve extended information associated with a completed receive descriptor.
+ *
+ */
+static inline
+void vxge_hw_ring_rxd_1b_info_get(
+ struct __vxge_hw_ring *ring_handle,
+ void *rxdh,
+ struct vxge_hw_ring_rxd_info *rxd_info)
+{
+
+ struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
+ rxd_info->syn_flag =
+ (u32)VXGE_HW_RING_RXD_SYN_GET(rxdp->control_0);
+ rxd_info->is_icmp =
+ (u32)VXGE_HW_RING_RXD_IS_ICMP_GET(rxdp->control_0);
+ rxd_info->fast_path_eligible =
+ (u32)VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(rxdp->control_0);
+ rxd_info->l3_cksum_valid =
+ (u32)VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(rxdp->control_0);
+ rxd_info->l3_cksum =
+ (u32)VXGE_HW_RING_RXD_L3_CKSUM_GET(rxdp->control_0);
+ rxd_info->l4_cksum_valid =
+ (u32)VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(rxdp->control_0);
+ rxd_info->l4_cksum =
+ (u32)VXGE_HW_RING_RXD_L4_CKSUM_GET(rxdp->control_0);
+ rxd_info->frame =
+ (u32)VXGE_HW_RING_RXD_ETHER_ENCAP_GET(rxdp->control_0);
+ rxd_info->proto =
+ (u32)VXGE_HW_RING_RXD_FRAME_PROTO_GET(rxdp->control_0);
+ rxd_info->is_vlan =
+ (u32)VXGE_HW_RING_RXD_IS_VLAN_GET(rxdp->control_0);
+ rxd_info->vlan =
+ (u32)VXGE_HW_RING_RXD_VLAN_TAG_GET(rxdp->control_1);
+ rxd_info->rth_bucket =
+ (u32)VXGE_HW_RING_RXD_RTH_BUCKET_GET(rxdp->control_0);
+ rxd_info->rth_it_hit =
+ (u32)VXGE_HW_RING_RXD_RTH_IT_HIT_GET(rxdp->control_0);
+ rxd_info->rth_spdm_hit =
+ (u32)VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(rxdp->control_0);
+ rxd_info->rth_hash_type =
+ (u32)VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(rxdp->control_0);
+ rxd_info->rth_value =
+ (u32)VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(rxdp->control_1);
+}
+
+/**
+ * vxge_hw_ring_rxd_private_get - Get driver private per-descriptor data
+ * of 1b mode 3b mode ring.
+ * @rxdh: Descriptor handle.
+ *
+ * Returns: private driver info associated with the descriptor.
+ * driver requests per-descriptor space via vxge_hw_ring_attr.
+ *
+ */
+static inline void *vxge_hw_ring_rxd_private_get(void *rxdh)
+{
+ struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
+ return (void *)(size_t)rxdp->host_control;
+}
+
+/**
+ * vxge_hw_fifo_txdl_cksum_set_bits - Offload checksum.
+ * @txdlh: Descriptor handle.
+ * @cksum_bits: Specifies which checksums are to be offloaded: IPv4,
+ * and/or TCP and/or UDP.
+ *
+ * Ask Titan to calculate IPv4 & transport checksums for _this_ transmit
+ * descriptor.
+ * This API is part of the preparation of the transmit descriptor for posting
+ * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
+ * vxge_hw_fifo_txdl_mss_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
+ * and vxge_hw_fifo_txdl_buffer_set().
+ * All these APIs fill in the fields of the fifo descriptor,
+ * in accordance with the Titan specification.
+ *
+ */
+static inline void vxge_hw_fifo_txdl_cksum_set_bits(void *txdlh, u64 cksum_bits)
+{
+ struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
+ txdp->control_1 |= cksum_bits;
+}
+
+/**
+ * vxge_hw_fifo_txdl_mss_set - Set MSS.
+ * @txdlh: Descriptor handle.
+ * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the
+ * driver, which in turn inserts the MSS into the @txdlh.
+ *
+ * This API is part of the preparation of the transmit descriptor for posting
+ * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
+ * vxge_hw_fifo_txdl_buffer_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
+ * and vxge_hw_fifo_txdl_cksum_set_bits().
+ * All these APIs fill in the fields of the fifo descriptor,
+ * in accordance with the Titan specification.
+ *
+ */
+static inline void vxge_hw_fifo_txdl_mss_set(void *txdlh, int mss)
+{
+ struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
+
+ txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_EN;
+ txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_MSS(mss);
+}
+
+/**
+ * vxge_hw_fifo_txdl_vlan_set - Set VLAN tag.
+ * @txdlh: Descriptor handle.
+ * @vlan_tag: 16bit VLAN tag.
+ *
+ * Insert VLAN tag into specified transmit descriptor.
+ * The actual insertion of the tag into outgoing frame is done by the hardware.
+ */
+static inline void vxge_hw_fifo_txdl_vlan_set(void *txdlh, u16 vlan_tag)
+{
+ struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
+
+ txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_ENABLE;
+ txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_TAG(vlan_tag);
+}
+
+/**
+ * vxge_hw_fifo_txdl_private_get - Retrieve per-descriptor private data.
+ * @txdlh: Descriptor handle.
+ *
+ * Retrieve per-descriptor private data.
+ * Note that driver requests per-descriptor space via
+ * struct vxge_hw_fifo_attr passed to
+ * vxge_hw_vpath_open().
+ *
+ * Returns: private driver data associated with the descriptor.
+ */
+static inline void *vxge_hw_fifo_txdl_private_get(void *txdlh)
+{
+ struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
+
+ return (void *)(size_t)txdp->host_control;
+}
+
+/**
+ * struct vxge_hw_ring_attr - Ring open "template".
+ * @callback: Ring completion callback. HW invokes the callback when there
+ * are new completions on that ring. In many implementations
+ * the @callback executes in the hw interrupt context.
+ * @rxd_init: Ring's descriptor-initialize callback.
+ * See vxge_hw_ring_rxd_init_f{}.
+ * If not NULL, HW invokes the callback when opening
+ * the ring.
+ * @rxd_term: Ring's descriptor-terminate callback. If not NULL,
+ * HW invokes the callback when closing the corresponding ring.
+ * See also vxge_hw_ring_rxd_term_f{}.
+ * @userdata: User-defined "context" of _that_ ring. Passed back to the
+ * user as one of the @callback, @rxd_init, and @rxd_term arguments.
+ * @per_rxd_space: If specified (i.e., greater than zero): extra space
+ * reserved by HW per each receive descriptor.
+ * Can be used to store
+ * and retrieve on completion, information specific
+ * to the driver.
+ *
+ * Ring open "template". User fills the structure with ring
+ * attributes and passes it to vxge_hw_vpath_open().
+ */
+struct vxge_hw_ring_attr {
+ enum vxge_hw_status (*callback)(
+ struct __vxge_hw_ring *ringh,
+ void *rxdh,
+ u8 t_code,
+ void *userdata);
+
+ enum vxge_hw_status (*rxd_init)(
+ void *rxdh,
+ void *userdata);
+
+ void (*rxd_term)(
+ void *rxdh,
+ enum vxge_hw_rxd_state state,
+ void *userdata);
+
+ void *userdata;
+ u32 per_rxd_space;
+};
+
+/**
+ * function vxge_hw_fifo_callback_f - FIFO callback.
+ * @vpath_handle: Virtual path whose Fifo "containing" 1 or more completed
+ * descriptors.
+ * @txdlh: First completed descriptor.
+ * @txdl_priv: Pointer to per txdl space allocated
+ * @t_code: Transfer code, as per Titan User Guide.
+ * Returned by HW.
+ * @host_control: Opaque 64bit data stored by driver inside the Titan
+ * descriptor prior to posting the latter on the fifo
+ * via vxge_hw_fifo_txdl_post(). The @host_control is returned
+ * as is to the driver with each completed descriptor.
+ * @userdata: Opaque per-fifo data specified at fifo open
+ * time, via vxge_hw_vpath_open().
+ *
+ * Fifo completion callback (type declaration). A single per-fifo
+ * callback is specified at fifo open time, via
+ * vxge_hw_vpath_open(). Typically gets called as part of the processing
+ * of the Interrupt Service Routine.
+ *
+ * Fifo callback gets called by HW if, and only if, there is at least
+ * one new completion on a given fifo. Upon processing the first @txdlh driver
+ * is _supposed_ to continue consuming completions using:
+ * - vxge_hw_fifo_txdl_next_completed()
+ *
+ * Note that failure to process new completions in a timely fashion
+ * leads to VXGE_HW_INF_OUT_OF_DESCRIPTORS condition.
+ *
+ * Non-zero @t_code means failure to process transmit descriptor.
+ *
+ * In the "transmit" case the failure could happen, for instance, when the
+ * link is down, in which case Titan completes the descriptor because it
+ * is not able to send the data out.
+ *
+ * For details please refer to Titan User Guide.
+ *
+ * See also: vxge_hw_fifo_txdl_next_completed(), vxge_hw_fifo_txdl_term_f{}.
+ */
+/**
+ * function vxge_hw_fifo_txdl_term_f - Terminate descriptor callback.
+ * @txdlh: First completed descriptor.
+ * @txdl_priv: Pointer to per txdl space allocated
+ * @state: One of the enum vxge_hw_txdl_state{} enumerated states.
+ * @userdata: Per-fifo user data (a.k.a. context) specified at
+ * fifo open time, via vxge_hw_vpath_open().
+ *
+ * Terminate descriptor callback. Unless NULL is specified in the
+ * struct vxge_hw_fifo_attr{} structure passed to vxge_hw_vpath_open()),
+ * HW invokes the callback as part of closing fifo, prior to
+ * de-allocating the ring and associated data structures
+ * (including descriptors).
+ * driver should utilize the callback to (for instance) unmap
+ * and free DMA data buffers associated with the posted (state =
+ * VXGE_HW_TXDL_STATE_POSTED) descriptors,
+ * as well as other relevant cleanup functions.
+ *
+ * See also: struct vxge_hw_fifo_attr{}
+ */
+/**
+ * struct vxge_hw_fifo_attr - Fifo open "template".
+ * @callback: Fifo completion callback. HW invokes the callback when there
+ * are new completions on that fifo. In many implementations
+ * the @callback executes in the hw interrupt context.
+ * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
+ * HW invokes the callback when closing the corresponding fifo.
+ * See also vxge_hw_fifo_txdl_term_f{}.
+ * @userdata: User-defined "context" of _that_ fifo. Passed back to the
+ * user as one of the @callback, and @txdl_term arguments.
+ * @per_txdl_space: If specified (i.e., greater than zero): extra space
+ * reserved by HW per each transmit descriptor. Can be used to
+ * store, and retrieve on completion, information specific
+ * to the driver.
+ *
+ * Fifo open "template". User fills the structure with fifo
+ * attributes and passes it to vxge_hw_vpath_open().
+ */
+struct vxge_hw_fifo_attr {
+
+ enum vxge_hw_status (*callback)(
+ struct __vxge_hw_fifo *fifo_handle,
+ void *txdlh,
+ enum vxge_hw_fifo_tcode t_code,
+ void *userdata,
+ struct sk_buff ***skb_ptr,
+ int nr_skb, int *more);
+
+ void (*txdl_term)(
+ void *txdlh,
+ enum vxge_hw_txdl_state state,
+ void *userdata);
+
+ void *userdata;
+ u32 per_txdl_space;
+};
+
+/**
+ * struct vxge_hw_vpath_attr - Attributes of virtual path
+ * @vp_id: Identifier of Virtual Path
+ * @ring_attr: Attributes of ring for non-offload receive
+ * @fifo_attr: Attributes of fifo for non-offload transmit
+ *
+ * Attributes of virtual path. This structure is passed as parameter
+ * to the vxge_hw_vpath_open() routine to set the attributes of ring and fifo.
+ */
+struct vxge_hw_vpath_attr {
+ u32 vp_id;
+ struct vxge_hw_ring_attr ring_attr;
+ struct vxge_hw_fifo_attr fifo_attr;
+};
+
+enum vxge_hw_status __devinit vxge_hw_device_hw_info_get(
+ void __iomem *bar0,
+ struct vxge_hw_device_hw_info *hw_info);
+
+enum vxge_hw_status __devinit vxge_hw_device_config_default_get(
+ struct vxge_hw_device_config *device_config);
+
+/**
+ * vxge_hw_device_link_state_get - Get link state.
+ * @devh: HW device handle.
+ *
+ * Get link state.
+ * Returns: link state.
+ */
+static inline
+enum vxge_hw_device_link_state vxge_hw_device_link_state_get(
+ struct __vxge_hw_device *devh)
+{
+ return devh->link_state;
+}
+
+void vxge_hw_device_terminate(struct __vxge_hw_device *devh);
+
+const u8 *
+vxge_hw_device_serial_number_get(struct __vxge_hw_device *devh);
+
+u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *devh);
+
+const u8 *
+vxge_hw_device_product_name_get(struct __vxge_hw_device *devh);
+
+enum vxge_hw_status __devinit vxge_hw_device_initialize(
+ struct __vxge_hw_device **devh,
+ struct vxge_hw_device_attr *attr,
+ struct vxge_hw_device_config *device_config);
+
+enum vxge_hw_status vxge_hw_device_getpause_data(
+ struct __vxge_hw_device *devh,
+ u32 port,
+ u32 *tx,
+ u32 *rx);
+
+enum vxge_hw_status vxge_hw_device_setpause_data(
+ struct __vxge_hw_device *devh,
+ u32 port,
+ u32 tx,
+ u32 rx);
+
+static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
+ unsigned long size,
+ struct pci_dev **p_dmah,
+ struct pci_dev **p_dma_acch)
+{
+ gfp_t flags;
+ void *vaddr;
+ unsigned long misaligned = 0;
+ int realloc_flag = 0;
+ *p_dma_acch = *p_dmah = NULL;
+
+ if (in_interrupt())
+ flags = GFP_ATOMIC | GFP_DMA;
+ else
+ flags = GFP_KERNEL | GFP_DMA;
+realloc:
+ vaddr = kmalloc((size), flags);
+ if (vaddr == NULL)
+ return vaddr;
+ misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr,
+ VXGE_CACHE_LINE_SIZE);
+ if (realloc_flag)
+ goto out;
+
+ if (misaligned) {
+ /* misaligned, free current one and try allocating
+ * size + VXGE_CACHE_LINE_SIZE memory
+ */
+ kfree((void *) vaddr);
+ size += VXGE_CACHE_LINE_SIZE;
+ realloc_flag = 1;
+ goto realloc;
+ }
+out:
+ *(unsigned long *)p_dma_acch = misaligned;
+ vaddr = (void *)((u8 *)vaddr + misaligned);
+ return vaddr;
+}
+
+static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
+ struct pci_dev **p_dma_acch)
+{
+ unsigned long misaligned = *(unsigned long *)p_dma_acch;
+ u8 *tmp = (u8 *)vaddr;
+ tmp -= misaligned;
+ kfree((void *)tmp);
+}
+
+/*
+ * __vxge_hw_mempool_item_priv - will return pointer on per item private space
+ */
+static inline void*
+__vxge_hw_mempool_item_priv(
+ struct vxge_hw_mempool *mempool,
+ u32 memblock_idx,
+ void *item,
+ u32 *memblock_item_idx)
+{
+ ptrdiff_t offset;
+ void *memblock = mempool->memblocks_arr[memblock_idx];
+
+
+ offset = (u32)((u8 *)item - (u8 *)memblock);
+ vxge_assert(offset >= 0 && (u32)offset < mempool->memblock_size);
+
+ (*memblock_item_idx) = (u32) offset / mempool->item_size;
+ vxge_assert((*memblock_item_idx) < mempool->items_per_memblock);
+
+ return (u8 *)mempool->memblocks_priv_arr[memblock_idx] +
+ (*memblock_item_idx) * mempool->items_priv_size;
+}
+
+/*
+ * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
+ * for the fifo.
+ * @fifo: Fifo
+ * @txdp: Poniter to a TxD
+ */
+static inline struct __vxge_hw_fifo_txdl_priv *
+__vxge_hw_fifo_txdl_priv(
+ struct __vxge_hw_fifo *fifo,
+ struct vxge_hw_fifo_txd *txdp)
+{
+ return (struct __vxge_hw_fifo_txdl_priv *)
+ (((char *)((ulong)txdp->host_control)) +
+ fifo->per_txdl_space);
+}
+
+enum vxge_hw_status vxge_hw_vpath_open(
+ struct __vxge_hw_device *devh,
+ struct vxge_hw_vpath_attr *attr,
+ struct __vxge_hw_vpath_handle **vpath_handle);
+
+enum vxge_hw_status vxge_hw_vpath_close(
+ struct __vxge_hw_vpath_handle *vpath_handle);
+
+enum vxge_hw_status
+vxge_hw_vpath_reset(
+ struct __vxge_hw_vpath_handle *vpath_handle);
+
+enum vxge_hw_status
+vxge_hw_vpath_recover_from_reset(
+ struct __vxge_hw_vpath_handle *vpath_handle);
+
+void
+vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp);
+
+enum vxge_hw_status
+vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ringh);
+
+enum vxge_hw_status vxge_hw_vpath_mtu_set(
+ struct __vxge_hw_vpath_handle *vpath_handle,
+ u32 new_mtu);
+
+void
+vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
+
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ u64 ret = 0;
+ ret = readl(addr + 4);
+ ret <<= 32;
+ ret |= readl(addr);
+
+ return ret;
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel((u32) (val), addr);
+ writel((u32) (val >> 32), (addr + 4));
+}
+#endif
+
+static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
+{
+ writel(val, addr + 4);
+}
+
+static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
+{
+ writel(val, addr);
+}
+
+enum vxge_hw_status
+vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
+
+enum vxge_hw_status
+vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
+
+/**
+ * vxge_debug_ll
+ * @level: level of debug verbosity.
+ * @mask: mask for the debug
+ * @buf: Circular buffer for tracing
+ * @fmt: printf like format string
+ *
+ * Provides logging facilities. Can be customized on per-module
+ * basis or/and with debug levels. Input parameters, except
+ * module and level, are the same as posix printf. This function
+ * may be compiled out if DEBUG macro was never defined.
+ * See also: enum vxge_debug_level{}.
+ */
+#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
+#define vxge_debug_ll(level, mask, fmt, ...) do { \
+ if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
+ (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
+ if ((mask & VXGE_DEBUG_MASK) == mask) \
+ printk(fmt "\n", __VA_ARGS__); \
+} while (0)
+#else
+#define vxge_debug_ll(level, mask, fmt, ...)
+#endif
+
+enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
+ struct __vxge_hw_vpath_handle **vpath_handles,
+ u32 vpath_count,
+ u8 *mtable,
+ u8 *itable,
+ u32 itable_size);
+
+enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
+ struct __vxge_hw_vpath_handle *vpath_handle,
+ enum vxge_hw_rth_algoritms algorithm,
+ struct vxge_hw_rth_hash_types *hash_type,
+ u16 bucket_size);
+
+enum vxge_hw_status
+__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
+
+#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5
+#define VXGE_HW_MAX_POLLING_COUNT 100
+
+void
+vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev);
+
+enum vxge_hw_status
+vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
+ u32 *minor, u32 *build);
+
+enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev);
+
+enum vxge_hw_status
+vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf,
+ int size);
+
+enum vxge_hw_status
+vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
+ struct eprom_image *eprom_image_data);
+
+int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id);
+#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
new file mode 100644
index 000000000000..92dd72d3f9de
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
@@ -0,0 +1,1132 @@
+/******************************************************************************
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ * Drivers based on or derived from this code fall under the GPL and must
+ * retain the authorship, copyright and license notice. This file is not
+ * a complete program and may only be used when the entire operating
+ * system is licensed under the GPL.
+ * See the file COPYING in this distribution for more information.
+ *
+ * vxge-ethtool.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
+ * Virtualized Server Adapter.
+ * Copyright(c) 2002-2010 Exar Corp.
+ ******************************************************************************/
+#include <linux/ethtool.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+
+#include "vxge-ethtool.h"
+
+/**
+ * vxge_ethtool_sset - Sets different link parameters.
+ * @dev: device pointer.
+ * @info: pointer to the structure with parameters given by ethtool to set
+ * link information.
+ *
+ * The function sets different link parameters provided by the user onto
+ * the NIC.
+ * Return value:
+ * 0 on success.
+ */
+static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info)
+{
+ /* We currently only support 10Gb/FULL */
+ if ((info->autoneg == AUTONEG_ENABLE) ||
+ (ethtool_cmd_speed(info) != SPEED_10000) ||
+ (info->duplex != DUPLEX_FULL))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * vxge_ethtool_gset - Return link specific information.
+ * @dev: device pointer.
+ * @info: pointer to the structure with parameters given by ethtool
+ * to return link information.
+ *
+ * Returns link specific information like speed, duplex etc.. to ethtool.
+ * Return value :
+ * return 0 on success.
+ */
+static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
+{
+ info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ info->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
+ info->port = PORT_FIBRE;
+
+ info->transceiver = XCVR_EXTERNAL;
+
+ if (netif_carrier_ok(dev)) {
+ ethtool_cmd_speed_set(info, SPEED_10000);
+ info->duplex = DUPLEX_FULL;
+ } else {
+ ethtool_cmd_speed_set(info, -1);
+ info->duplex = -1;
+ }
+
+ info->autoneg = AUTONEG_DISABLE;
+ return 0;
+}
+
+/**
+ * vxge_ethtool_gdrvinfo - Returns driver specific information.
+ * @dev: device pointer.
+ * @info: pointer to the structure with parameters given by ethtool to
+ * return driver information.
+ *
+ * Returns driver specefic information like name, version etc.. to ethtool.
+ */
+static void vxge_ethtool_gdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct vxgedev *vdev = netdev_priv(dev);
+ strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME));
+ strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION));
+ strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN);
+ strlcpy(info->bus_info, pci_name(vdev->pdev), sizeof(info->bus_info));
+ info->regdump_len = sizeof(struct vxge_hw_vpath_reg)
+ * vdev->no_of_vpath;
+
+ info->n_stats = STAT_LEN;
+}
+
+/**
+ * vxge_ethtool_gregs - dumps the entire space of Titan into the buffer.
+ * @dev: device pointer.
+ * @regs: pointer to the structure with parameters given by ethtool for
+ * dumping the registers.
+ * @reg_space: The input argumnet into which all the registers are dumped.
+ *
+ * Dumps the vpath register space of Titan NIC into the user given
+ * buffer area.
+ */
+static void vxge_ethtool_gregs(struct net_device *dev,
+ struct ethtool_regs *regs, void *space)
+{
+ int index, offset;
+ enum vxge_hw_status status;
+ u64 reg;
+ u64 *reg_space = (u64 *)space;
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct __vxge_hw_device *hldev = vdev->devh;
+
+ regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
+ regs->version = vdev->pdev->subsystem_device;
+ for (index = 0; index < vdev->no_of_vpath; index++) {
+ for (offset = 0; offset < sizeof(struct vxge_hw_vpath_reg);
+ offset += 8) {
+ status = vxge_hw_mgmt_reg_read(hldev,
+ vxge_hw_mgmt_reg_type_vpath,
+ vdev->vpaths[index].device_id,
+ offset, &reg);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "%s:%d Getting reg dump Failed",
+ __func__, __LINE__);
+ return;
+ }
+ *reg_space++ = reg;
+ }
+ }
+}
+
+/**
+ * vxge_ethtool_idnic - To physically identify the nic on the system.
+ * @dev : device pointer.
+ * @state : requested LED state
+ *
+ * Used to physically identify the NIC on the system.
+ * 0 on success
+ */
+static int vxge_ethtool_idnic(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct __vxge_hw_device *hldev = vdev->devh;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_OFF);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vxge_ethtool_getpause_data - Pause frame frame generation and reception.
+ * @dev : device pointer.
+ * @ep : pointer to the structure with pause parameters given by ethtool.
+ * Description:
+ * Returns the Pause frame generation and reception capability of the NIC.
+ * Return value:
+ * void
+ */
+static void vxge_ethtool_getpause_data(struct net_device *dev,
+ struct ethtool_pauseparam *ep)
+{
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct __vxge_hw_device *hldev = vdev->devh;
+
+ vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause);
+}
+
+/**
+ * vxge_ethtool_setpause_data - set/reset pause frame generation.
+ * @dev : device pointer.
+ * @ep : pointer to the structure with pause parameters given by ethtool.
+ * Description:
+ * It can be used to set or reset Pause frame generation or reception
+ * support of the NIC.
+ * Return value:
+ * int, returns 0 on Success
+ */
+static int vxge_ethtool_setpause_data(struct net_device *dev,
+ struct ethtool_pauseparam *ep)
+{
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct __vxge_hw_device *hldev = vdev->devh;
+
+ vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause);
+
+ vdev->config.tx_pause_enable = ep->tx_pause;
+ vdev->config.rx_pause_enable = ep->rx_pause;
+
+ return 0;
+}
+
+static void vxge_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *estats, u64 *tmp_stats)
+{
+ int j, k;
+ enum vxge_hw_status status;
+ enum vxge_hw_status swstatus;
+ struct vxge_vpath *vpath = NULL;
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct __vxge_hw_device *hldev = vdev->devh;
+ struct vxge_hw_xmac_stats *xmac_stats;
+ struct vxge_hw_device_stats_sw_info *sw_stats;
+ struct vxge_hw_device_stats_hw_info *hw_stats;
+
+ u64 *ptr = tmp_stats;
+
+ memset(tmp_stats, 0,
+ vxge_ethtool_get_sset_count(dev, ETH_SS_STATS) * sizeof(u64));
+
+ xmac_stats = kzalloc(sizeof(struct vxge_hw_xmac_stats), GFP_KERNEL);
+ if (xmac_stats == NULL) {
+ vxge_debug_init(VXGE_ERR,
+ "%s : %d Memory Allocation failed for xmac_stats",
+ __func__, __LINE__);
+ return;
+ }
+
+ sw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_sw_info),
+ GFP_KERNEL);
+ if (sw_stats == NULL) {
+ kfree(xmac_stats);
+ vxge_debug_init(VXGE_ERR,
+ "%s : %d Memory Allocation failed for sw_stats",
+ __func__, __LINE__);
+ return;
+ }
+
+ hw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_hw_info),
+ GFP_KERNEL);
+ if (hw_stats == NULL) {
+ kfree(xmac_stats);
+ kfree(sw_stats);
+ vxge_debug_init(VXGE_ERR,
+ "%s : %d Memory Allocation failed for hw_stats",
+ __func__, __LINE__);
+ return;
+ }
+
+ *ptr++ = 0;
+ status = vxge_hw_device_xmac_stats_get(hldev, xmac_stats);
+ if (status != VXGE_HW_OK) {
+ if (status != VXGE_HW_ERR_PRIVILAGED_OPEARATION) {
+ vxge_debug_init(VXGE_ERR,
+ "%s : %d Failure in getting xmac stats",
+ __func__, __LINE__);
+ }
+ }
+ swstatus = vxge_hw_driver_stats_get(hldev, sw_stats);
+ if (swstatus != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "%s : %d Failure in getting sw stats",
+ __func__, __LINE__);
+ }
+
+ status = vxge_hw_device_stats_get(hldev, hw_stats);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "%s : %d hw_stats_get error", __func__, __LINE__);
+ }
+
+ for (k = 0; k < vdev->no_of_vpath; k++) {
+ struct vxge_hw_vpath_stats_hw_info *vpath_info;
+
+ vpath = &vdev->vpaths[k];
+ j = vpath->device_id;
+ vpath_info = hw_stats->vpath_info[j];
+ if (!vpath_info) {
+ memset(ptr, 0, (VXGE_HW_VPATH_TX_STATS_LEN +
+ VXGE_HW_VPATH_RX_STATS_LEN) * sizeof(u64));
+ ptr += (VXGE_HW_VPATH_TX_STATS_LEN +
+ VXGE_HW_VPATH_RX_STATS_LEN);
+ continue;
+ }
+
+ *ptr++ = vpath_info->tx_stats.tx_ttl_eth_frms;
+ *ptr++ = vpath_info->tx_stats.tx_ttl_eth_octets;
+ *ptr++ = vpath_info->tx_stats.tx_data_octets;
+ *ptr++ = vpath_info->tx_stats.tx_mcast_frms;
+ *ptr++ = vpath_info->tx_stats.tx_bcast_frms;
+ *ptr++ = vpath_info->tx_stats.tx_ucast_frms;
+ *ptr++ = vpath_info->tx_stats.tx_tagged_frms;
+ *ptr++ = vpath_info->tx_stats.tx_vld_ip;
+ *ptr++ = vpath_info->tx_stats.tx_vld_ip_octets;
+ *ptr++ = vpath_info->tx_stats.tx_icmp;
+ *ptr++ = vpath_info->tx_stats.tx_tcp;
+ *ptr++ = vpath_info->tx_stats.tx_rst_tcp;
+ *ptr++ = vpath_info->tx_stats.tx_udp;
+ *ptr++ = vpath_info->tx_stats.tx_unknown_protocol;
+ *ptr++ = vpath_info->tx_stats.tx_lost_ip;
+ *ptr++ = vpath_info->tx_stats.tx_parse_error;
+ *ptr++ = vpath_info->tx_stats.tx_tcp_offload;
+ *ptr++ = vpath_info->tx_stats.tx_retx_tcp_offload;
+ *ptr++ = vpath_info->tx_stats.tx_lost_ip_offload;
+ *ptr++ = vpath_info->rx_stats.rx_ttl_eth_frms;
+ *ptr++ = vpath_info->rx_stats.rx_vld_frms;
+ *ptr++ = vpath_info->rx_stats.rx_offload_frms;
+ *ptr++ = vpath_info->rx_stats.rx_ttl_eth_octets;
+ *ptr++ = vpath_info->rx_stats.rx_data_octets;
+ *ptr++ = vpath_info->rx_stats.rx_offload_octets;
+ *ptr++ = vpath_info->rx_stats.rx_vld_mcast_frms;
+ *ptr++ = vpath_info->rx_stats.rx_vld_bcast_frms;
+ *ptr++ = vpath_info->rx_stats.rx_accepted_ucast_frms;
+ *ptr++ = vpath_info->rx_stats.rx_accepted_nucast_frms;
+ *ptr++ = vpath_info->rx_stats.rx_tagged_frms;
+ *ptr++ = vpath_info->rx_stats.rx_long_frms;
+ *ptr++ = vpath_info->rx_stats.rx_usized_frms;
+ *ptr++ = vpath_info->rx_stats.rx_osized_frms;
+ *ptr++ = vpath_info->rx_stats.rx_frag_frms;
+ *ptr++ = vpath_info->rx_stats.rx_jabber_frms;
+ *ptr++ = vpath_info->rx_stats.rx_ttl_64_frms;
+ *ptr++ = vpath_info->rx_stats.rx_ttl_65_127_frms;
+ *ptr++ = vpath_info->rx_stats.rx_ttl_128_255_frms;
+ *ptr++ = vpath_info->rx_stats.rx_ttl_256_511_frms;
+ *ptr++ = vpath_info->rx_stats.rx_ttl_512_1023_frms;
+ *ptr++ = vpath_info->rx_stats.rx_ttl_1024_1518_frms;
+ *ptr++ = vpath_info->rx_stats.rx_ttl_1519_4095_frms;
+ *ptr++ = vpath_info->rx_stats.rx_ttl_4096_8191_frms;
+ *ptr++ = vpath_info->rx_stats.rx_ttl_8192_max_frms;
+ *ptr++ = vpath_info->rx_stats.rx_ttl_gt_max_frms;
+ *ptr++ = vpath_info->rx_stats.rx_ip;
+ *ptr++ = vpath_info->rx_stats.rx_accepted_ip;
+ *ptr++ = vpath_info->rx_stats.rx_ip_octets;
+ *ptr++ = vpath_info->rx_stats.rx_err_ip;
+ *ptr++ = vpath_info->rx_stats.rx_icmp;
+ *ptr++ = vpath_info->rx_stats.rx_tcp;
+ *ptr++ = vpath_info->rx_stats.rx_udp;
+ *ptr++ = vpath_info->rx_stats.rx_err_tcp;
+ *ptr++ = vpath_info->rx_stats.rx_lost_frms;
+ *ptr++ = vpath_info->rx_stats.rx_lost_ip;
+ *ptr++ = vpath_info->rx_stats.rx_lost_ip_offload;
+ *ptr++ = vpath_info->rx_stats.rx_various_discard;
+ *ptr++ = vpath_info->rx_stats.rx_sleep_discard;
+ *ptr++ = vpath_info->rx_stats.rx_red_discard;
+ *ptr++ = vpath_info->rx_stats.rx_queue_full_discard;
+ *ptr++ = vpath_info->rx_stats.rx_mpa_ok_frms;
+ }
+ *ptr++ = 0;
+ for (k = 0; k < vdev->max_config_port; k++) {
+ *ptr++ = xmac_stats->aggr_stats[k].tx_frms;
+ *ptr++ = xmac_stats->aggr_stats[k].tx_data_octets;
+ *ptr++ = xmac_stats->aggr_stats[k].tx_mcast_frms;
+ *ptr++ = xmac_stats->aggr_stats[k].tx_bcast_frms;
+ *ptr++ = xmac_stats->aggr_stats[k].tx_discarded_frms;
+ *ptr++ = xmac_stats->aggr_stats[k].tx_errored_frms;
+ *ptr++ = xmac_stats->aggr_stats[k].rx_frms;
+ *ptr++ = xmac_stats->aggr_stats[k].rx_data_octets;
+ *ptr++ = xmac_stats->aggr_stats[k].rx_mcast_frms;
+ *ptr++ = xmac_stats->aggr_stats[k].rx_bcast_frms;
+ *ptr++ = xmac_stats->aggr_stats[k].rx_discarded_frms;
+ *ptr++ = xmac_stats->aggr_stats[k].rx_errored_frms;
+ *ptr++ = xmac_stats->aggr_stats[k].rx_unknown_slow_proto_frms;
+ }
+ *ptr++ = 0;
+ for (k = 0; k < vdev->max_config_port; k++) {
+ *ptr++ = xmac_stats->port_stats[k].tx_ttl_frms;
+ *ptr++ = xmac_stats->port_stats[k].tx_ttl_octets;
+ *ptr++ = xmac_stats->port_stats[k].tx_data_octets;
+ *ptr++ = xmac_stats->port_stats[k].tx_mcast_frms;
+ *ptr++ = xmac_stats->port_stats[k].tx_bcast_frms;
+ *ptr++ = xmac_stats->port_stats[k].tx_ucast_frms;
+ *ptr++ = xmac_stats->port_stats[k].tx_tagged_frms;
+ *ptr++ = xmac_stats->port_stats[k].tx_vld_ip;
+ *ptr++ = xmac_stats->port_stats[k].tx_vld_ip_octets;
+ *ptr++ = xmac_stats->port_stats[k].tx_icmp;
+ *ptr++ = xmac_stats->port_stats[k].tx_tcp;
+ *ptr++ = xmac_stats->port_stats[k].tx_rst_tcp;
+ *ptr++ = xmac_stats->port_stats[k].tx_udp;
+ *ptr++ = xmac_stats->port_stats[k].tx_parse_error;
+ *ptr++ = xmac_stats->port_stats[k].tx_unknown_protocol;
+ *ptr++ = xmac_stats->port_stats[k].tx_pause_ctrl_frms;
+ *ptr++ = xmac_stats->port_stats[k].tx_marker_pdu_frms;
+ *ptr++ = xmac_stats->port_stats[k].tx_lacpdu_frms;
+ *ptr++ = xmac_stats->port_stats[k].tx_drop_ip;
+ *ptr++ = xmac_stats->port_stats[k].tx_marker_resp_pdu_frms;
+ *ptr++ = xmac_stats->port_stats[k].tx_xgmii_char2_match;
+ *ptr++ = xmac_stats->port_stats[k].tx_xgmii_char1_match;
+ *ptr++ = xmac_stats->port_stats[k].tx_xgmii_column2_match;
+ *ptr++ = xmac_stats->port_stats[k].tx_xgmii_column1_match;
+ *ptr++ = xmac_stats->port_stats[k].tx_any_err_frms;
+ *ptr++ = xmac_stats->port_stats[k].tx_drop_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_ttl_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_vld_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_offload_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_ttl_octets;
+ *ptr++ = xmac_stats->port_stats[k].rx_data_octets;
+ *ptr++ = xmac_stats->port_stats[k].rx_offload_octets;
+ *ptr++ = xmac_stats->port_stats[k].rx_vld_mcast_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_vld_bcast_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_accepted_ucast_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_accepted_nucast_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_tagged_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_long_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_usized_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_osized_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_frag_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_jabber_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_ttl_64_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_ttl_65_127_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_ttl_128_255_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_ttl_256_511_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_ttl_512_1023_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_ttl_1024_1518_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_ttl_1519_4095_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_ttl_4096_8191_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_ttl_8192_max_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_ttl_gt_max_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_ip;
+ *ptr++ = xmac_stats->port_stats[k].rx_accepted_ip;
+ *ptr++ = xmac_stats->port_stats[k].rx_ip_octets;
+ *ptr++ = xmac_stats->port_stats[k].rx_err_ip;
+ *ptr++ = xmac_stats->port_stats[k].rx_icmp;
+ *ptr++ = xmac_stats->port_stats[k].rx_tcp;
+ *ptr++ = xmac_stats->port_stats[k].rx_udp;
+ *ptr++ = xmac_stats->port_stats[k].rx_err_tcp;
+ *ptr++ = xmac_stats->port_stats[k].rx_pause_count;
+ *ptr++ = xmac_stats->port_stats[k].rx_pause_ctrl_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_unsup_ctrl_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_fcs_err_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_in_rng_len_err_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_out_rng_len_err_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_drop_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_discarded_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_drop_ip;
+ *ptr++ = xmac_stats->port_stats[k].rx_drop_udp;
+ *ptr++ = xmac_stats->port_stats[k].rx_marker_pdu_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_lacpdu_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_unknown_pdu_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_marker_resp_pdu_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_fcs_discard;
+ *ptr++ = xmac_stats->port_stats[k].rx_illegal_pdu_frms;
+ *ptr++ = xmac_stats->port_stats[k].rx_switch_discard;
+ *ptr++ = xmac_stats->port_stats[k].rx_len_discard;
+ *ptr++ = xmac_stats->port_stats[k].rx_rpa_discard;
+ *ptr++ = xmac_stats->port_stats[k].rx_l2_mgmt_discard;
+ *ptr++ = xmac_stats->port_stats[k].rx_rts_discard;
+ *ptr++ = xmac_stats->port_stats[k].rx_trash_discard;
+ *ptr++ = xmac_stats->port_stats[k].rx_buff_full_discard;
+ *ptr++ = xmac_stats->port_stats[k].rx_red_discard;
+ *ptr++ = xmac_stats->port_stats[k].rx_xgmii_ctrl_err_cnt;
+ *ptr++ = xmac_stats->port_stats[k].rx_xgmii_data_err_cnt;
+ *ptr++ = xmac_stats->port_stats[k].rx_xgmii_char1_match;
+ *ptr++ = xmac_stats->port_stats[k].rx_xgmii_err_sym;
+ *ptr++ = xmac_stats->port_stats[k].rx_xgmii_column1_match;
+ *ptr++ = xmac_stats->port_stats[k].rx_xgmii_char2_match;
+ *ptr++ = xmac_stats->port_stats[k].rx_local_fault;
+ *ptr++ = xmac_stats->port_stats[k].rx_xgmii_column2_match;
+ *ptr++ = xmac_stats->port_stats[k].rx_jettison;
+ *ptr++ = xmac_stats->port_stats[k].rx_remote_fault;
+ }
+
+ *ptr++ = 0;
+ for (k = 0; k < vdev->no_of_vpath; k++) {
+ struct vxge_hw_vpath_stats_sw_info *vpath_info;
+
+ vpath = &vdev->vpaths[k];
+ j = vpath->device_id;
+ vpath_info = (struct vxge_hw_vpath_stats_sw_info *)
+ &sw_stats->vpath_info[j];
+ *ptr++ = vpath_info->soft_reset_cnt;
+ *ptr++ = vpath_info->error_stats.unknown_alarms;
+ *ptr++ = vpath_info->error_stats.network_sustained_fault;
+ *ptr++ = vpath_info->error_stats.network_sustained_ok;
+ *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_overwrite;
+ *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_poison;
+ *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_dma_error;
+ *ptr++ = vpath_info->error_stats.dblgen_fifo0_overflow;
+ *ptr++ = vpath_info->error_stats.statsb_pif_chain_error;
+ *ptr++ = vpath_info->error_stats.statsb_drop_timeout;
+ *ptr++ = vpath_info->error_stats.target_illegal_access;
+ *ptr++ = vpath_info->error_stats.ini_serr_det;
+ *ptr++ = vpath_info->error_stats.prc_ring_bumps;
+ *ptr++ = vpath_info->error_stats.prc_rxdcm_sc_err;
+ *ptr++ = vpath_info->error_stats.prc_rxdcm_sc_abort;
+ *ptr++ = vpath_info->error_stats.prc_quanta_size_err;
+ *ptr++ = vpath_info->ring_stats.common_stats.full_cnt;
+ *ptr++ = vpath_info->ring_stats.common_stats.usage_cnt;
+ *ptr++ = vpath_info->ring_stats.common_stats.usage_max;
+ *ptr++ = vpath_info->ring_stats.common_stats.
+ reserve_free_swaps_cnt;
+ *ptr++ = vpath_info->ring_stats.common_stats.total_compl_cnt;
+ for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
+ *ptr++ = vpath_info->ring_stats.rxd_t_code_err_cnt[j];
+ *ptr++ = vpath_info->fifo_stats.common_stats.full_cnt;
+ *ptr++ = vpath_info->fifo_stats.common_stats.usage_cnt;
+ *ptr++ = vpath_info->fifo_stats.common_stats.usage_max;
+ *ptr++ = vpath_info->fifo_stats.common_stats.
+ reserve_free_swaps_cnt;
+ *ptr++ = vpath_info->fifo_stats.common_stats.total_compl_cnt;
+ *ptr++ = vpath_info->fifo_stats.total_posts;
+ *ptr++ = vpath_info->fifo_stats.total_buffers;
+ for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
+ *ptr++ = vpath_info->fifo_stats.txd_t_code_err_cnt[j];
+ }
+
+ *ptr++ = 0;
+ for (k = 0; k < vdev->no_of_vpath; k++) {
+ struct vxge_hw_vpath_stats_hw_info *vpath_info;
+ vpath = &vdev->vpaths[k];
+ j = vpath->device_id;
+ vpath_info = hw_stats->vpath_info[j];
+ if (!vpath_info) {
+ memset(ptr, 0, VXGE_HW_VPATH_STATS_LEN * sizeof(u64));
+ ptr += VXGE_HW_VPATH_STATS_LEN;
+ continue;
+ }
+ *ptr++ = vpath_info->ini_num_mwr_sent;
+ *ptr++ = vpath_info->ini_num_mrd_sent;
+ *ptr++ = vpath_info->ini_num_cpl_rcvd;
+ *ptr++ = vpath_info->ini_num_mwr_byte_sent;
+ *ptr++ = vpath_info->ini_num_cpl_byte_rcvd;
+ *ptr++ = vpath_info->wrcrdtarb_xoff;
+ *ptr++ = vpath_info->rdcrdtarb_xoff;
+ *ptr++ = vpath_info->vpath_genstats_count0;
+ *ptr++ = vpath_info->vpath_genstats_count1;
+ *ptr++ = vpath_info->vpath_genstats_count2;
+ *ptr++ = vpath_info->vpath_genstats_count3;
+ *ptr++ = vpath_info->vpath_genstats_count4;
+ *ptr++ = vpath_info->vpath_genstats_count5;
+ *ptr++ = vpath_info->prog_event_vnum0;
+ *ptr++ = vpath_info->prog_event_vnum1;
+ *ptr++ = vpath_info->prog_event_vnum2;
+ *ptr++ = vpath_info->prog_event_vnum3;
+ *ptr++ = vpath_info->rx_multi_cast_frame_discard;
+ *ptr++ = vpath_info->rx_frm_transferred;
+ *ptr++ = vpath_info->rxd_returned;
+ *ptr++ = vpath_info->rx_mpa_len_fail_frms;
+ *ptr++ = vpath_info->rx_mpa_mrk_fail_frms;
+ *ptr++ = vpath_info->rx_mpa_crc_fail_frms;
+ *ptr++ = vpath_info->rx_permitted_frms;
+ *ptr++ = vpath_info->rx_vp_reset_discarded_frms;
+ *ptr++ = vpath_info->rx_wol_frms;
+ *ptr++ = vpath_info->tx_vp_reset_discarded_frms;
+ }
+
+ *ptr++ = 0;
+ *ptr++ = vdev->stats.vpaths_open;
+ *ptr++ = vdev->stats.vpath_open_fail;
+ *ptr++ = vdev->stats.link_up;
+ *ptr++ = vdev->stats.link_down;
+
+ for (k = 0; k < vdev->no_of_vpath; k++) {
+ *ptr += vdev->vpaths[k].fifo.stats.tx_frms;
+ *(ptr + 1) += vdev->vpaths[k].fifo.stats.tx_errors;
+ *(ptr + 2) += vdev->vpaths[k].fifo.stats.tx_bytes;
+ *(ptr + 3) += vdev->vpaths[k].fifo.stats.txd_not_free;
+ *(ptr + 4) += vdev->vpaths[k].fifo.stats.txd_out_of_desc;
+ *(ptr + 5) += vdev->vpaths[k].ring.stats.rx_frms;
+ *(ptr + 6) += vdev->vpaths[k].ring.stats.rx_errors;
+ *(ptr + 7) += vdev->vpaths[k].ring.stats.rx_bytes;
+ *(ptr + 8) += vdev->vpaths[k].ring.stats.rx_mcast;
+ *(ptr + 9) += vdev->vpaths[k].fifo.stats.pci_map_fail +
+ vdev->vpaths[k].ring.stats.pci_map_fail;
+ *(ptr + 10) += vdev->vpaths[k].ring.stats.skb_alloc_fail;
+ }
+
+ ptr += 12;
+
+ kfree(xmac_stats);
+ kfree(sw_stats);
+ kfree(hw_stats);
+}
+
+static void vxge_ethtool_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
+{
+ int stat_size = 0;
+ int i, j;
+ struct vxgedev *vdev = netdev_priv(dev);
+ switch (stringset) {
+ case ETH_SS_STATS:
+ vxge_add_string("VPATH STATISTICS%s\t\t\t",
+ &stat_size, data, "");
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ vxge_add_string("tx_ttl_eth_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_ttl_eth_octects_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_data_octects_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_mcast_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_bcast_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_ucast_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_tagged_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_vld_ip_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_vld_ip_octects_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_icmp_%d\t\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_tcp_%d\t\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_rst_tcp_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_udp_%d\t\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_unknown_proto_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_lost_ip_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_parse_error_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_tcp_offload_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_retx_tcp_offload_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_lost_ip_offload_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_eth_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_vld_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_offload_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_eth_octects_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_data_octects_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_offload_octects_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_vld_mcast_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_vld_bcast_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_accepted_ucast_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_accepted_nucast_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_tagged_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_long_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_usized_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_osized_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_frag_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_jabber_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_64_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_65_127_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_128_255_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_256_511_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_512_1023_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_8192_max_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_gt_max_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ip%d\t\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_accepted_ip_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ip_octects_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_err_ip_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_icmp_%d\t\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_tcp_%d\t\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_udp_%d\t\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_err_tcp_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_lost_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_lost_ip_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_lost_ip_offload_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_various_discard_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_sleep_discard_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_red_discard_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_queue_full_discard_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_mpa_ok_frms_%d\t\t\t",
+ &stat_size, data, i);
+ }
+
+ vxge_add_string("\nAGGR STATISTICS%s\t\t\t\t",
+ &stat_size, data, "");
+ for (i = 0; i < vdev->max_config_port; i++) {
+ vxge_add_string("tx_frms_%d\t\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_data_octects_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_mcast_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_bcast_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_discarded_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_errored_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_frms_%d\t\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_data_octects_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_mcast_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_bcast_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_discarded_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_errored_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_unknown_slow_proto_frms_%d\t",
+ &stat_size, data, i);
+ }
+
+ vxge_add_string("\nPORT STATISTICS%s\t\t\t\t",
+ &stat_size, data, "");
+ for (i = 0; i < vdev->max_config_port; i++) {
+ vxge_add_string("tx_ttl_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_ttl_octects_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_data_octects_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_mcast_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_bcast_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_ucast_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_tagged_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_vld_ip_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_vld_ip_octects_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_icmp_%d\t\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_tcp_%d\t\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_rst_tcp_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_udp_%d\t\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_parse_error_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_unknown_protocol_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_pause_ctrl_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_marker_pdu_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_lacpdu_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_drop_ip_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_marker_resp_pdu_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_xgmii_char2_match_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_xgmii_char1_match_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_xgmii_column2_match_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_xgmii_column1_match_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_any_err_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_drop_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_vld_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_offload_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_octects_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_data_octects_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_offload_octects_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_vld_mcast_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_vld_bcast_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_accepted_ucast_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_accepted_nucast_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_tagged_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_long_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_usized_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_osized_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_frag_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_jabber_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_64_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_65_127_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_128_255_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_256_511_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_512_1023_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_8192_max_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ttl_gt_max_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ip_%d\t\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_accepted_ip_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_ip_octets_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_err_ip_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_icmp_%d\t\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_tcp_%d\t\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_udp_%d\t\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_err_tcp_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_pause_count_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_pause_ctrl_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_unsup_ctrl_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_fcs_err_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_in_rng_len_err_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_out_rng_len_err_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_drop_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_discard_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_drop_ip_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_drop_udp_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_marker_pdu_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_lacpdu_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_unknown_pdu_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_marker_resp_pdu_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_fcs_discard_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_illegal_pdu_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_switch_discard_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_len_discard_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_rpa_discard_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_l2_mgmt_discard_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_rts_discard_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_trash_discard_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_buff_full_discard_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_red_discard_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_xgmii_ctrl_err_cnt_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_xgmii_data_err_cnt_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_xgmii_char1_match_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_xgmii_err_sym_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_xgmii_column1_match_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_xgmii_char2_match_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_local_fault_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_xgmii_column2_match_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_jettison_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_remote_fault_%d\t\t\t",
+ &stat_size, data, i);
+ }
+
+ vxge_add_string("\n SOFTWARE STATISTICS%s\t\t\t",
+ &stat_size, data, "");
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ vxge_add_string("soft_reset_cnt_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("unknown_alarms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("network_sustained_fault_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("network_sustained_ok_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("kdfcctl_fifo0_overwrite_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("kdfcctl_fifo0_poison_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("kdfcctl_fifo0_dma_error_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("dblgen_fifo0_overflow_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("statsb_pif_chain_error_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("statsb_drop_timeout_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("target_illegal_access_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("ini_serr_det_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("prc_ring_bumps_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("prc_rxdcm_sc_err_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("prc_rxdcm_sc_abort_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("prc_quanta_size_err_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("ring_full_cnt_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("ring_usage_cnt_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("ring_usage_max_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("ring_reserve_free_swaps_cnt_%d\t",
+ &stat_size, data, i);
+ vxge_add_string("ring_total_compl_cnt_%d\t\t",
+ &stat_size, data, i);
+ for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
+ vxge_add_string("rxd_t_code_err_cnt%d_%d\t\t",
+ &stat_size, data, j, i);
+ vxge_add_string("fifo_full_cnt_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("fifo_usage_cnt_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("fifo_usage_max_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("fifo_reserve_free_swaps_cnt_%d\t",
+ &stat_size, data, i);
+ vxge_add_string("fifo_total_compl_cnt_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("fifo_total_posts_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("fifo_total_buffers_%d\t\t",
+ &stat_size, data, i);
+ for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
+ vxge_add_string("txd_t_code_err_cnt%d_%d\t\t",
+ &stat_size, data, j, i);
+ }
+
+ vxge_add_string("\n HARDWARE STATISTICS%s\t\t\t",
+ &stat_size, data, "");
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ vxge_add_string("ini_num_mwr_sent_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("ini_num_mrd_sent_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("ini_num_cpl_rcvd_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("ini_num_mwr_byte_sent_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("ini_num_cpl_byte_rcvd_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("wrcrdtarb_xoff_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rdcrdtarb_xoff_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("vpath_genstats_count0_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("vpath_genstats_count1_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("vpath_genstats_count2_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("vpath_genstats_count3_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("vpath_genstats_count4_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("vpath_genstats_count5_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("prog_event_vnum0_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("prog_event_vnum1_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("prog_event_vnum2_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("prog_event_vnum3_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_multi_cast_frame_discard_%d\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_frm_transferred_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rxd_returned_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_mpa_len_fail_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_mpa_mrk_fail_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_mpa_crc_fail_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_permitted_frms_%d\t\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_vp_reset_discarded_frms_%d\t",
+ &stat_size, data, i);
+ vxge_add_string("rx_wol_frms_%d\t\t\t",
+ &stat_size, data, i);
+ vxge_add_string("tx_vp_reset_discarded_frms_%d\t",
+ &stat_size, data, i);
+ }
+
+ memcpy(data + stat_size, &ethtool_driver_stats_keys,
+ sizeof(ethtool_driver_stats_keys));
+ }
+}
+
+static int vxge_ethtool_get_regs_len(struct net_device *dev)
+{
+ struct vxgedev *vdev = netdev_priv(dev);
+
+ return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
+}
+
+static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+ struct vxgedev *vdev = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return VXGE_TITLE_LEN +
+ (vdev->no_of_vpath * VXGE_HW_VPATH_STATS_LEN) +
+ (vdev->max_config_port * VXGE_HW_AGGR_STATS_LEN) +
+ (vdev->max_config_port * VXGE_HW_PORT_STATS_LEN) +
+ (vdev->no_of_vpath * VXGE_HW_VPATH_TX_STATS_LEN) +
+ (vdev->no_of_vpath * VXGE_HW_VPATH_RX_STATS_LEN) +
+ (vdev->no_of_vpath * VXGE_SW_STATS_LEN) +
+ DRIVER_STAT_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
+{
+ struct vxgedev *vdev = netdev_priv(dev);
+
+ if (vdev->max_vpath_supported != VXGE_HW_MAX_VIRTUAL_PATHS) {
+ printk(KERN_INFO "Single Function Mode is required to flash the"
+ " firmware\n");
+ return -EINVAL;
+ }
+
+ if (netif_running(dev)) {
+ printk(KERN_INFO "Interface %s must be down to flash the "
+ "firmware\n", dev->name);
+ return -EBUSY;
+ }
+
+ return vxge_fw_upgrade(vdev, parms->data, 1);
+}
+
+static const struct ethtool_ops vxge_ethtool_ops = {
+ .get_settings = vxge_ethtool_gset,
+ .set_settings = vxge_ethtool_sset,
+ .get_drvinfo = vxge_ethtool_gdrvinfo,
+ .get_regs_len = vxge_ethtool_get_regs_len,
+ .get_regs = vxge_ethtool_gregs,
+ .get_link = ethtool_op_get_link,
+ .get_pauseparam = vxge_ethtool_getpause_data,
+ .set_pauseparam = vxge_ethtool_setpause_data,
+ .get_strings = vxge_ethtool_get_strings,
+ .set_phys_id = vxge_ethtool_idnic,
+ .get_sset_count = vxge_ethtool_get_sset_count,
+ .get_ethtool_stats = vxge_get_ethtool_stats,
+ .flash_device = vxge_fw_flash,
+};
+
+void vxge_initialize_ethtool_ops(struct net_device *ndev)
+{
+ SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
new file mode 100644
index 000000000000..6cf3044d7f43
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
@@ -0,0 +1,67 @@
+/******************************************************************************
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ * Drivers based on or derived from this code fall under the GPL and must
+ * retain the authorship, copyright and license notice. This file is not
+ * a complete program and may only be used when the entire operating
+ * system is licensed under the GPL.
+ * See the file COPYING in this distribution for more information.
+ *
+ * vxge-ethtool.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
+ * Virtualized Server Adapter.
+ * Copyright(c) 2002-2010 Exar Corp.
+ ******************************************************************************/
+#ifndef _VXGE_ETHTOOL_H
+#define _VXGE_ETHTOOL_H
+
+#include "vxge-main.h"
+
+/* Ethtool related variables and Macros. */
+static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset);
+
+static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
+ {"\n DRIVER STATISTICS"},
+ {"vpaths_opened"},
+ {"vpath_open_fail_cnt"},
+ {"link_up_cnt"},
+ {"link_down_cnt"},
+ {"tx_frms"},
+ {"tx_errors"},
+ {"tx_bytes"},
+ {"txd_not_free"},
+ {"txd_out_of_desc"},
+ {"rx_frms"},
+ {"rx_errors"},
+ {"rx_bytes"},
+ {"rx_mcast"},
+ {"pci_map_fail_cnt"},
+ {"skb_alloc_fail_cnt"}
+};
+
+#define VXGE_TITLE_LEN 5
+#define VXGE_HW_VPATH_STATS_LEN 27
+#define VXGE_HW_AGGR_STATS_LEN 13
+#define VXGE_HW_PORT_STATS_LEN 94
+#define VXGE_HW_VPATH_TX_STATS_LEN 19
+#define VXGE_HW_VPATH_RX_STATS_LEN 42
+#define VXGE_SW_STATS_LEN 60
+#define VXGE_HW_STATS_LEN (VXGE_HW_VPATH_STATS_LEN +\
+ VXGE_HW_AGGR_STATS_LEN +\
+ VXGE_HW_PORT_STATS_LEN +\
+ VXGE_HW_VPATH_TX_STATS_LEN +\
+ VXGE_HW_VPATH_RX_STATS_LEN)
+
+#define DRIVER_STAT_LEN (sizeof(ethtool_driver_stats_keys)/ETH_GSTRING_LEN)
+#define STAT_LEN (VXGE_HW_STATS_LEN + DRIVER_STAT_LEN + VXGE_SW_STATS_LEN)
+
+/* Maximum flicker time of adapter LED */
+#define VXGE_MAX_FLICKER_TIME (60 * HZ) /* 60 seconds */
+#define VXGE_FLICKER_ON 1
+#define VXGE_FLICKER_OFF 0
+
+#define vxge_add_string(fmt, size, buf, ...) {\
+ snprintf(buf + *size, ETH_GSTRING_LEN, fmt, __VA_ARGS__); \
+ *size += ETH_GSTRING_LEN; \
+}
+
+#endif /*_VXGE_ETHTOOL_H*/
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
new file mode 100644
index 000000000000..a66f8fc0401e
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -0,0 +1,4858 @@
+/******************************************************************************
+* This software may be used and distributed according to the terms of
+* the GNU General Public License (GPL), incorporated herein by reference.
+* Drivers based on or derived from this code fall under the GPL and must
+* retain the authorship, copyright and license notice. This file is not
+* a complete program and may only be used when the entire operating
+* system is licensed under the GPL.
+* See the file COPYING in this distribution for more information.
+*
+* vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
+* Virtualized Server Adapter.
+* Copyright(c) 2002-2010 Exar Corp.
+*
+* The module loadable parameters that are supported by the driver and a brief
+* explanation of all the variables:
+* vlan_tag_strip:
+* Strip VLAN Tag enable/disable. Instructs the device to remove
+* the VLAN tag from all received tagged frames that are not
+* replicated at the internal L2 switch.
+* 0 - Do not strip the VLAN tag.
+* 1 - Strip the VLAN tag.
+*
+* addr_learn_en:
+* Enable learning the mac address of the guest OS interface in
+* a virtualization environment.
+* 0 - DISABLE
+* 1 - ENABLE
+*
+* max_config_port:
+* Maximum number of port to be supported.
+* MIN -1 and MAX - 2
+*
+* max_config_vpath:
+* This configures the maximum no of VPATH configures for each
+* device function.
+* MIN - 1 and MAX - 17
+*
+* max_config_dev:
+* This configures maximum no of Device function to be enabled.
+* MIN - 1 and MAX - 17
+*
+******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/tcp.h>
+#include <net/ip.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/net_tstamp.h>
+#include <linux/prefetch.h>
+#include "vxge-main.h"
+#include "vxge-reg.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
+ "Virtualized Server Adapter");
+
+static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
+ {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
+ PCI_ANY_ID},
+ {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
+ PCI_ANY_ID},
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, vxge_id_table);
+
+VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
+VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
+VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
+VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
+VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
+VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
+
+static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
+ {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
+static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
+ {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
+module_param_array(bw_percentage, uint, NULL, 0);
+
+static struct vxge_drv_config *driver_config;
+
+static inline int is_vxge_card_up(struct vxgedev *vdev)
+{
+ return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
+}
+
+static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
+{
+ struct sk_buff **skb_ptr = NULL;
+ struct sk_buff **temp;
+#define NR_SKB_COMPLETED 128
+ struct sk_buff *completed[NR_SKB_COMPLETED];
+ int more;
+
+ do {
+ more = 0;
+ skb_ptr = completed;
+
+ if (__netif_tx_trylock(fifo->txq)) {
+ vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
+ NR_SKB_COMPLETED, &more);
+ __netif_tx_unlock(fifo->txq);
+ }
+
+ /* free SKBs */
+ for (temp = completed; temp != skb_ptr; temp++)
+ dev_kfree_skb_irq(*temp);
+ } while (more);
+}
+
+static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
+{
+ int i;
+
+ /* Complete all transmits */
+ for (i = 0; i < vdev->no_of_vpath; i++)
+ VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
+}
+
+static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
+{
+ int i;
+ struct vxge_ring *ring;
+
+ /* Complete all receives*/
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ ring = &vdev->vpaths[i].ring;
+ vxge_hw_vpath_poll_rx(ring->handle);
+ }
+}
+
+/*
+ * vxge_callback_link_up
+ *
+ * This function is called during interrupt context to notify link up state
+ * change.
+ */
+static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
+{
+ struct net_device *dev = hldev->ndev;
+ struct vxgedev *vdev = netdev_priv(dev);
+
+ vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
+ vdev->ndev->name, __func__, __LINE__);
+ netdev_notice(vdev->ndev, "Link Up\n");
+ vdev->stats.link_up++;
+
+ netif_carrier_on(vdev->ndev);
+ netif_tx_wake_all_queues(vdev->ndev);
+
+ vxge_debug_entryexit(VXGE_TRACE,
+ "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
+}
+
+/*
+ * vxge_callback_link_down
+ *
+ * This function is called during interrupt context to notify link down state
+ * change.
+ */
+static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
+{
+ struct net_device *dev = hldev->ndev;
+ struct vxgedev *vdev = netdev_priv(dev);
+
+ vxge_debug_entryexit(VXGE_TRACE,
+ "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
+ netdev_notice(vdev->ndev, "Link Down\n");
+
+ vdev->stats.link_down++;
+ netif_carrier_off(vdev->ndev);
+ netif_tx_stop_all_queues(vdev->ndev);
+
+ vxge_debug_entryexit(VXGE_TRACE,
+ "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
+}
+
+/*
+ * vxge_rx_alloc
+ *
+ * Allocate SKB.
+ */
+static struct sk_buff *
+vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
+{
+ struct net_device *dev;
+ struct sk_buff *skb;
+ struct vxge_rx_priv *rx_priv;
+
+ dev = ring->ndev;
+ vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
+ ring->ndev->name, __func__, __LINE__);
+
+ rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
+
+ /* try to allocate skb first. this one may fail */
+ skb = netdev_alloc_skb(dev, skb_size +
+ VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
+ if (skb == NULL) {
+ vxge_debug_mem(VXGE_ERR,
+ "%s: out of memory to allocate SKB", dev->name);
+ ring->stats.skb_alloc_fail++;
+ return NULL;
+ }
+
+ vxge_debug_mem(VXGE_TRACE,
+ "%s: %s:%d Skb : 0x%p", ring->ndev->name,
+ __func__, __LINE__, skb);
+
+ skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
+
+ rx_priv->skb = skb;
+ rx_priv->skb_data = NULL;
+ rx_priv->data_size = skb_size;
+ vxge_debug_entryexit(VXGE_TRACE,
+ "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
+
+ return skb;
+}
+
+/*
+ * vxge_rx_map
+ */
+static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
+{
+ struct vxge_rx_priv *rx_priv;
+ dma_addr_t dma_addr;
+
+ vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
+ ring->ndev->name, __func__, __LINE__);
+ rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
+
+ rx_priv->skb_data = rx_priv->skb->data;
+ dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
+ rx_priv->data_size, PCI_DMA_FROMDEVICE);
+
+ if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
+ ring->stats.pci_map_fail++;
+ return -EIO;
+ }
+ vxge_debug_mem(VXGE_TRACE,
+ "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
+ ring->ndev->name, __func__, __LINE__,
+ (unsigned long long)dma_addr);
+ vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
+
+ rx_priv->data_dma = dma_addr;
+ vxge_debug_entryexit(VXGE_TRACE,
+ "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
+
+ return 0;
+}
+
+/*
+ * vxge_rx_initial_replenish
+ * Allocation of RxD as an initial replenish procedure.
+ */
+static enum vxge_hw_status
+vxge_rx_initial_replenish(void *dtrh, void *userdata)
+{
+ struct vxge_ring *ring = (struct vxge_ring *)userdata;
+ struct vxge_rx_priv *rx_priv;
+
+ vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
+ ring->ndev->name, __func__, __LINE__);
+ if (vxge_rx_alloc(dtrh, ring,
+ VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
+ return VXGE_HW_FAIL;
+
+ if (vxge_rx_map(dtrh, ring)) {
+ rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
+ dev_kfree_skb(rx_priv->skb);
+
+ return VXGE_HW_FAIL;
+ }
+ vxge_debug_entryexit(VXGE_TRACE,
+ "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
+
+ return VXGE_HW_OK;
+}
+
+static inline void
+vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
+ int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
+{
+
+ vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
+ ring->ndev->name, __func__, __LINE__);
+ skb_record_rx_queue(skb, ring->driver_id);
+ skb->protocol = eth_type_trans(skb, ring->ndev);
+
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.rx_frms++;
+ ring->stats.rx_bytes += pkt_length;
+
+ if (skb->pkt_type == PACKET_MULTICAST)
+ ring->stats.rx_mcast++;
+ u64_stats_update_end(&ring->stats.syncp);
+
+ vxge_debug_rx(VXGE_TRACE,
+ "%s: %s:%d skb protocol = %d",
+ ring->ndev->name, __func__, __LINE__, skb->protocol);
+
+ if (ext_info->vlan &&
+ ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)
+ __vlan_hwaccel_put_tag(skb, ext_info->vlan);
+ napi_gro_receive(ring->napi_p, skb);
+
+ vxge_debug_entryexit(VXGE_TRACE,
+ "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
+}
+
+static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
+ struct vxge_rx_priv *rx_priv)
+{
+ pci_dma_sync_single_for_device(ring->pdev,
+ rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE);
+
+ vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
+ vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
+}
+
+static inline void vxge_post(int *dtr_cnt, void **first_dtr,
+ void *post_dtr, struct __vxge_hw_ring *ringh)
+{
+ int dtr_count = *dtr_cnt;
+ if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
+ if (*first_dtr)
+ vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
+ *first_dtr = post_dtr;
+ } else
+ vxge_hw_ring_rxd_post_post(ringh, post_dtr);
+ dtr_count++;
+ *dtr_cnt = dtr_count;
+}
+
+/*
+ * vxge_rx_1b_compl
+ *
+ * If the interrupt is because of a received frame or if the receive ring
+ * contains fresh as yet un-processed frames, this function is called.
+ */
+static enum vxge_hw_status
+vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
+ u8 t_code, void *userdata)
+{
+ struct vxge_ring *ring = (struct vxge_ring *)userdata;
+ struct net_device *dev = ring->ndev;
+ unsigned int dma_sizes;
+ void *first_dtr = NULL;
+ int dtr_cnt = 0;
+ int data_size;
+ dma_addr_t data_dma;
+ int pkt_length;
+ struct sk_buff *skb;
+ struct vxge_rx_priv *rx_priv;
+ struct vxge_hw_ring_rxd_info ext_info;
+ vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
+ ring->ndev->name, __func__, __LINE__);
+
+ do {
+ prefetch((char *)dtr + L1_CACHE_BYTES);
+ rx_priv = vxge_hw_ring_rxd_private_get(dtr);
+ skb = rx_priv->skb;
+ data_size = rx_priv->data_size;
+ data_dma = rx_priv->data_dma;
+ prefetch(rx_priv->skb_data);
+
+ vxge_debug_rx(VXGE_TRACE,
+ "%s: %s:%d skb = 0x%p",
+ ring->ndev->name, __func__, __LINE__, skb);
+
+ vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
+ pkt_length = dma_sizes;
+
+ pkt_length -= ETH_FCS_LEN;
+
+ vxge_debug_rx(VXGE_TRACE,
+ "%s: %s:%d Packet Length = %d",
+ ring->ndev->name, __func__, __LINE__, pkt_length);
+
+ vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
+
+ /* check skb validity */
+ vxge_assert(skb);
+
+ prefetch((char *)skb + L1_CACHE_BYTES);
+ if (unlikely(t_code)) {
+ if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
+ VXGE_HW_OK) {
+
+ ring->stats.rx_errors++;
+ vxge_debug_rx(VXGE_TRACE,
+ "%s: %s :%d Rx T_code is %d",
+ ring->ndev->name, __func__,
+ __LINE__, t_code);
+
+ /* If the t_code is not supported and if the
+ * t_code is other than 0x5 (unparseable packet
+ * such as unknown UPV6 header), Drop it !!!
+ */
+ vxge_re_pre_post(dtr, ring, rx_priv);
+
+ vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
+ ring->stats.rx_dropped++;
+ continue;
+ }
+ }
+
+ if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
+ if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
+ if (!vxge_rx_map(dtr, ring)) {
+ skb_put(skb, pkt_length);
+
+ pci_unmap_single(ring->pdev, data_dma,
+ data_size, PCI_DMA_FROMDEVICE);
+
+ vxge_hw_ring_rxd_pre_post(ringh, dtr);
+ vxge_post(&dtr_cnt, &first_dtr, dtr,
+ ringh);
+ } else {
+ dev_kfree_skb(rx_priv->skb);
+ rx_priv->skb = skb;
+ rx_priv->data_size = data_size;
+ vxge_re_pre_post(dtr, ring, rx_priv);
+
+ vxge_post(&dtr_cnt, &first_dtr, dtr,
+ ringh);
+ ring->stats.rx_dropped++;
+ break;
+ }
+ } else {
+ vxge_re_pre_post(dtr, ring, rx_priv);
+
+ vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
+ ring->stats.rx_dropped++;
+ break;
+ }
+ } else {
+ struct sk_buff *skb_up;
+
+ skb_up = netdev_alloc_skb(dev, pkt_length +
+ VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
+ if (skb_up != NULL) {
+ skb_reserve(skb_up,
+ VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
+
+ pci_dma_sync_single_for_cpu(ring->pdev,
+ data_dma, data_size,
+ PCI_DMA_FROMDEVICE);
+
+ vxge_debug_mem(VXGE_TRACE,
+ "%s: %s:%d skb_up = %p",
+ ring->ndev->name, __func__,
+ __LINE__, skb);
+ memcpy(skb_up->data, skb->data, pkt_length);
+
+ vxge_re_pre_post(dtr, ring, rx_priv);
+
+ vxge_post(&dtr_cnt, &first_dtr, dtr,
+ ringh);
+ /* will netif_rx small SKB instead */
+ skb = skb_up;
+ skb_put(skb, pkt_length);
+ } else {
+ vxge_re_pre_post(dtr, ring, rx_priv);
+
+ vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
+ vxge_debug_rx(VXGE_ERR,
+ "%s: vxge_rx_1b_compl: out of "
+ "memory", dev->name);
+ ring->stats.skb_alloc_fail++;
+ break;
+ }
+ }
+
+ if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
+ !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
+ (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */
+ ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
+ ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+
+
+ if (ring->rx_hwts) {
+ struct skb_shared_hwtstamps *skb_hwts;
+ u32 ns = *(u32 *)(skb->head + pkt_length);
+
+ skb_hwts = skb_hwtstamps(skb);
+ skb_hwts->hwtstamp = ns_to_ktime(ns);
+ skb_hwts->syststamp.tv64 = 0;
+ }
+
+ /* rth_hash_type and rth_it_hit are non-zero regardless of
+ * whether rss is enabled. Only the rth_value is zero/non-zero
+ * if rss is disabled/enabled, so key off of that.
+ */
+ if (ext_info.rth_value)
+ skb->rxhash = ext_info.rth_value;
+
+ vxge_rx_complete(ring, skb, ext_info.vlan,
+ pkt_length, &ext_info);
+
+ ring->budget--;
+ ring->pkts_processed++;
+ if (!ring->budget)
+ break;
+
+ } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
+ &t_code) == VXGE_HW_OK);
+
+ if (first_dtr)
+ vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
+
+ vxge_debug_entryexit(VXGE_TRACE,
+ "%s:%d Exiting...",
+ __func__, __LINE__);
+ return VXGE_HW_OK;
+}
+
+/*
+ * vxge_xmit_compl
+ *
+ * If an interrupt was raised to indicate DMA complete of the Tx packet,
+ * this function is called. It identifies the last TxD whose buffer was
+ * freed and frees all skbs whose data have already DMA'ed into the NICs
+ * internal memory.
+ */
+static enum vxge_hw_status
+vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
+ enum vxge_hw_fifo_tcode t_code, void *userdata,
+ struct sk_buff ***skb_ptr, int nr_skb, int *more)
+{
+ struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
+ struct sk_buff *skb, **done_skb = *skb_ptr;
+ int pkt_cnt = 0;
+
+ vxge_debug_entryexit(VXGE_TRACE,
+ "%s:%d Entered....", __func__, __LINE__);
+
+ do {
+ int frg_cnt;
+ skb_frag_t *frag;
+ int i = 0, j;
+ struct vxge_tx_priv *txd_priv =
+ vxge_hw_fifo_txdl_private_get(dtr);
+
+ skb = txd_priv->skb;
+ frg_cnt = skb_shinfo(skb)->nr_frags;
+ frag = &skb_shinfo(skb)->frags[0];
+
+ vxge_debug_tx(VXGE_TRACE,
+ "%s: %s:%d fifo_hw = %p dtr = %p "
+ "tcode = 0x%x", fifo->ndev->name, __func__,
+ __LINE__, fifo_hw, dtr, t_code);
+ /* check skb validity */
+ vxge_assert(skb);
+ vxge_debug_tx(VXGE_TRACE,
+ "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
+ fifo->ndev->name, __func__, __LINE__,
+ skb, txd_priv, frg_cnt);
+ if (unlikely(t_code)) {
+ fifo->stats.tx_errors++;
+ vxge_debug_tx(VXGE_ERR,
+ "%s: tx: dtr %p completed due to "
+ "error t_code %01x", fifo->ndev->name,
+ dtr, t_code);
+ vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
+ }
+
+ /* for unfragmented skb */
+ pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+
+ for (j = 0; j < frg_cnt; j++) {
+ pci_unmap_page(fifo->pdev,
+ txd_priv->dma_buffers[i++],
+ frag->size, PCI_DMA_TODEVICE);
+ frag += 1;
+ }
+
+ vxge_hw_fifo_txdl_free(fifo_hw, dtr);
+
+ /* Updating the statistics block */
+ u64_stats_update_begin(&fifo->stats.syncp);
+ fifo->stats.tx_frms++;
+ fifo->stats.tx_bytes += skb->len;
+ u64_stats_update_end(&fifo->stats.syncp);
+
+ *done_skb++ = skb;
+
+ if (--nr_skb <= 0) {
+ *more = 1;
+ break;
+ }
+
+ pkt_cnt++;
+ if (pkt_cnt > fifo->indicate_max_pkts)
+ break;
+
+ } while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
+ &dtr, &t_code) == VXGE_HW_OK);
+
+ *skb_ptr = done_skb;
+ if (netif_tx_queue_stopped(fifo->txq))
+ netif_tx_wake_queue(fifo->txq);
+
+ vxge_debug_entryexit(VXGE_TRACE,
+ "%s: %s:%d Exiting...",
+ fifo->ndev->name, __func__, __LINE__);
+ return VXGE_HW_OK;
+}
+
+/* select a vpath to transmit the packet */
+static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
+{
+ u16 queue_len, counter = 0;
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *ip;
+ struct tcphdr *th;
+
+ ip = ip_hdr(skb);
+
+ if (!ip_is_fragment(ip)) {
+ th = (struct tcphdr *)(((unsigned char *)ip) +
+ ip->ihl*4);
+
+ queue_len = vdev->no_of_vpath;
+ counter = (ntohs(th->source) +
+ ntohs(th->dest)) &
+ vdev->vpath_selector[queue_len - 1];
+ if (counter >= queue_len)
+ counter = queue_len - 1;
+ }
+ }
+ return counter;
+}
+
+static enum vxge_hw_status vxge_search_mac_addr_in_list(
+ struct vxge_vpath *vpath, u64 del_mac)
+{
+ struct list_head *entry, *next;
+ list_for_each_safe(entry, next, &vpath->mac_addr_list) {
+ if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
+ return TRUE;
+ }
+ return FALSE;
+}
+
+static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
+{
+ struct vxge_mac_addrs *new_mac_entry;
+ u8 *mac_address = NULL;
+
+ if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
+ return TRUE;
+
+ new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
+ if (!new_mac_entry) {
+ vxge_debug_mem(VXGE_ERR,
+ "%s: memory allocation failed",
+ VXGE_DRIVER_NAME);
+ return FALSE;
+ }
+
+ list_add(&new_mac_entry->item, &vpath->mac_addr_list);
+
+ /* Copy the new mac address to the list */
+ mac_address = (u8 *)&new_mac_entry->macaddr;
+ memcpy(mac_address, mac->macaddr, ETH_ALEN);
+
+ new_mac_entry->state = mac->state;
+ vpath->mac_addr_cnt++;
+
+ if (is_multicast_ether_addr(mac->macaddr))
+ vpath->mcast_addr_cnt++;
+
+ return TRUE;
+}
+
+/* Add a mac address to DA table */
+static enum vxge_hw_status
+vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxge_vpath *vpath;
+ enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
+
+ if (is_multicast_ether_addr(mac->macaddr))
+ duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
+ else
+ duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
+
+ vpath = &vdev->vpaths[mac->vpath_no];
+ status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
+ mac->macmask, duplicate_mode);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "DA config add entry failed for vpath:%d",
+ vpath->device_id);
+ } else
+ if (FALSE == vxge_mac_list_add(vpath, mac))
+ status = -EPERM;
+
+ return status;
+}
+
+static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
+{
+ struct macInfo mac_info;
+ u8 *mac_address = NULL;
+ u64 mac_addr = 0, vpath_vector = 0;
+ int vpath_idx = 0;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxge_vpath *vpath = NULL;
+ struct __vxge_hw_device *hldev;
+
+ hldev = pci_get_drvdata(vdev->pdev);
+
+ mac_address = (u8 *)&mac_addr;
+ memcpy(mac_address, mac_header, ETH_ALEN);
+
+ /* Is this mac address already in the list? */
+ for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
+ vpath = &vdev->vpaths[vpath_idx];
+ if (vxge_search_mac_addr_in_list(vpath, mac_addr))
+ return vpath_idx;
+ }
+
+ memset(&mac_info, 0, sizeof(struct macInfo));
+ memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
+
+ /* Any vpath has room to add mac address to its da table? */
+ for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
+ vpath = &vdev->vpaths[vpath_idx];
+ if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
+ /* Add this mac address to this vpath */
+ mac_info.vpath_no = vpath_idx;
+ mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
+ status = vxge_add_mac_addr(vdev, &mac_info);
+ if (status != VXGE_HW_OK)
+ return -EPERM;
+ return vpath_idx;
+ }
+ }
+
+ mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
+ vpath_idx = 0;
+ mac_info.vpath_no = vpath_idx;
+ /* Is the first vpath already selected as catch-basin ? */
+ vpath = &vdev->vpaths[vpath_idx];
+ if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
+ /* Add this mac address to this vpath */
+ if (FALSE == vxge_mac_list_add(vpath, &mac_info))
+ return -EPERM;
+ return vpath_idx;
+ }
+
+ /* Select first vpath as catch-basin */
+ vpath_vector = vxge_mBIT(vpath->device_id);
+ status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
+ vxge_hw_mgmt_reg_type_mrpcim,
+ 0,
+ (ulong)offsetof(
+ struct vxge_hw_mrpcim_reg,
+ rts_mgr_cbasin_cfg),
+ vpath_vector);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_tx(VXGE_ERR,
+ "%s: Unable to set the vpath-%d in catch-basin mode",
+ VXGE_DRIVER_NAME, vpath->device_id);
+ return -EPERM;
+ }
+
+ if (FALSE == vxge_mac_list_add(vpath, &mac_info))
+ return -EPERM;
+
+ return vpath_idx;
+}
+
+/**
+ * vxge_xmit
+ * @skb : the socket buffer containing the Tx data.
+ * @dev : device pointer.
+ *
+ * This function is the Tx entry point of the driver. Neterion NIC supports
+ * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
+*/
+static netdev_tx_t
+vxge_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct vxge_fifo *fifo = NULL;
+ void *dtr_priv;
+ void *dtr = NULL;
+ struct vxgedev *vdev = NULL;
+ enum vxge_hw_status status;
+ int frg_cnt, first_frg_len;
+ skb_frag_t *frag;
+ int i = 0, j = 0, avail;
+ u64 dma_pointer;
+ struct vxge_tx_priv *txdl_priv = NULL;
+ struct __vxge_hw_fifo *fifo_hw;
+ int offload_type;
+ int vpath_no = 0;
+
+ vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
+ dev->name, __func__, __LINE__);
+
+ /* A buffer with no data will be dropped */
+ if (unlikely(skb->len <= 0)) {
+ vxge_debug_tx(VXGE_ERR,
+ "%s: Buffer has no data..", dev->name);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ vdev = netdev_priv(dev);
+
+ if (unlikely(!is_vxge_card_up(vdev))) {
+ vxge_debug_tx(VXGE_ERR,
+ "%s: vdev not initialized", dev->name);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (vdev->config.addr_learn_en) {
+ vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
+ if (vpath_no == -EPERM) {
+ vxge_debug_tx(VXGE_ERR,
+ "%s: Failed to store the mac address",
+ dev->name);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
+ vpath_no = skb_get_queue_mapping(skb);
+ else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
+ vpath_no = vxge_get_vpath_no(vdev, skb);
+
+ vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
+
+ if (vpath_no >= vdev->no_of_vpath)
+ vpath_no = 0;
+
+ fifo = &vdev->vpaths[vpath_no].fifo;
+ fifo_hw = fifo->handle;
+
+ if (netif_tx_queue_stopped(fifo->txq))
+ return NETDEV_TX_BUSY;
+
+ avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
+ if (avail == 0) {
+ vxge_debug_tx(VXGE_ERR,
+ "%s: No free TXDs available", dev->name);
+ fifo->stats.txd_not_free++;
+ goto _exit0;
+ }
+
+ /* Last TXD? Stop tx queue to avoid dropping packets. TX
+ * completion will resume the queue.
+ */
+ if (avail == 1)
+ netif_tx_stop_queue(fifo->txq);
+
+ status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
+ if (unlikely(status != VXGE_HW_OK)) {
+ vxge_debug_tx(VXGE_ERR,
+ "%s: Out of descriptors .", dev->name);
+ fifo->stats.txd_out_of_desc++;
+ goto _exit0;
+ }
+
+ vxge_debug_tx(VXGE_TRACE,
+ "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
+ dev->name, __func__, __LINE__,
+ fifo_hw, dtr, dtr_priv);
+
+ if (vlan_tx_tag_present(skb)) {
+ u16 vlan_tag = vlan_tx_tag_get(skb);
+ vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
+ }
+
+ first_frg_len = skb_headlen(skb);
+
+ dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len,
+ PCI_DMA_TODEVICE);
+
+ if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
+ vxge_hw_fifo_txdl_free(fifo_hw, dtr);
+ fifo->stats.pci_map_fail++;
+ goto _exit0;
+ }
+
+ txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
+ txdl_priv->skb = skb;
+ txdl_priv->dma_buffers[j] = dma_pointer;
+
+ frg_cnt = skb_shinfo(skb)->nr_frags;
+ vxge_debug_tx(VXGE_TRACE,
+ "%s: %s:%d skb = %p txdl_priv = %p "
+ "frag_cnt = %d dma_pointer = 0x%llx", dev->name,
+ __func__, __LINE__, skb, txdl_priv,
+ frg_cnt, (unsigned long long)dma_pointer);
+
+ vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
+ first_frg_len);
+
+ frag = &skb_shinfo(skb)->frags[0];
+ for (i = 0; i < frg_cnt; i++) {
+ /* ignore 0 length fragment */
+ if (!frag->size)
+ continue;
+
+ dma_pointer = (u64)skb_frag_dma_map(&fifo->pdev->dev, frag,
+ 0, frag->size,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer)))
+ goto _exit2;
+ vxge_debug_tx(VXGE_TRACE,
+ "%s: %s:%d frag = %d dma_pointer = 0x%llx",
+ dev->name, __func__, __LINE__, i,
+ (unsigned long long)dma_pointer);
+
+ txdl_priv->dma_buffers[j] = dma_pointer;
+ vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
+ frag->size);
+ frag += 1;
+ }
+
+ offload_type = vxge_offload_type(skb);
+
+ if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ int mss = vxge_tcp_mss(skb);
+ if (mss) {
+ vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
+ dev->name, __func__, __LINE__, mss);
+ vxge_hw_fifo_txdl_mss_set(dtr, mss);
+ } else {
+ vxge_assert(skb->len <=
+ dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
+ vxge_assert(0);
+ goto _exit1;
+ }
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ vxge_hw_fifo_txdl_cksum_set_bits(dtr,
+ VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
+ VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
+ VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
+
+ vxge_hw_fifo_txdl_post(fifo_hw, dtr);
+
+ vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
+ dev->name, __func__, __LINE__);
+ return NETDEV_TX_OK;
+
+_exit2:
+ vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
+_exit1:
+ j = 0;
+ frag = &skb_shinfo(skb)->frags[0];
+
+ pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++],
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+
+ for (; j < i; j++) {
+ pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
+ frag->size, PCI_DMA_TODEVICE);
+ frag += 1;
+ }
+
+ vxge_hw_fifo_txdl_free(fifo_hw, dtr);
+_exit0:
+ netif_tx_stop_queue(fifo->txq);
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * vxge_rx_term
+ *
+ * Function will be called by hw function to abort all outstanding receive
+ * descriptors.
+ */
+static void
+vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
+{
+ struct vxge_ring *ring = (struct vxge_ring *)userdata;
+ struct vxge_rx_priv *rx_priv =
+ vxge_hw_ring_rxd_private_get(dtrh);
+
+ vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
+ ring->ndev->name, __func__, __LINE__);
+ if (state != VXGE_HW_RXD_STATE_POSTED)
+ return;
+
+ pci_unmap_single(ring->pdev, rx_priv->data_dma,
+ rx_priv->data_size, PCI_DMA_FROMDEVICE);
+
+ dev_kfree_skb(rx_priv->skb);
+ rx_priv->skb_data = NULL;
+
+ vxge_debug_entryexit(VXGE_TRACE,
+ "%s: %s:%d Exiting...",
+ ring->ndev->name, __func__, __LINE__);
+}
+
+/*
+ * vxge_tx_term
+ *
+ * Function will be called to abort all outstanding tx descriptors
+ */
+static void
+vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
+{
+ struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
+ skb_frag_t *frag;
+ int i = 0, j, frg_cnt;
+ struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
+ struct sk_buff *skb = txd_priv->skb;
+
+ vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
+
+ if (state != VXGE_HW_TXDL_STATE_POSTED)
+ return;
+
+ /* check skb validity */
+ vxge_assert(skb);
+ frg_cnt = skb_shinfo(skb)->nr_frags;
+ frag = &skb_shinfo(skb)->frags[0];
+
+ /* for unfragmented skb */
+ pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+
+ for (j = 0; j < frg_cnt; j++) {
+ pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
+ frag->size, PCI_DMA_TODEVICE);
+ frag += 1;
+ }
+
+ dev_kfree_skb(skb);
+
+ vxge_debug_entryexit(VXGE_TRACE,
+ "%s:%d Exiting...", __func__, __LINE__);
+}
+
+static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
+{
+ struct list_head *entry, *next;
+ u64 del_mac = 0;
+ u8 *mac_address = (u8 *) (&del_mac);
+
+ /* Copy the mac address to delete from the list */
+ memcpy(mac_address, mac->macaddr, ETH_ALEN);
+
+ list_for_each_safe(entry, next, &vpath->mac_addr_list) {
+ if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
+ list_del(entry);
+ kfree((struct vxge_mac_addrs *)entry);
+ vpath->mac_addr_cnt--;
+
+ if (is_multicast_ether_addr(mac->macaddr))
+ vpath->mcast_addr_cnt--;
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+/* delete a mac address from DA table */
+static enum vxge_hw_status
+vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxge_vpath *vpath;
+
+ vpath = &vdev->vpaths[mac->vpath_no];
+ status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
+ mac->macmask);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "DA config delete entry failed for vpath:%d",
+ vpath->device_id);
+ } else
+ vxge_mac_list_del(vpath, mac);
+ return status;
+}
+
+/**
+ * vxge_set_multicast
+ * @dev: pointer to the device structure
+ *
+ * Entry point for multicast address enable/disable
+ * This function is a driver entry point which gets called by the kernel
+ * whenever multicast addresses must be enabled/disabled. This also gets
+ * called to set/reset promiscuous mode. Depending on the deivce flag, we
+ * determine, if multicast address must be enabled or if promiscuous mode
+ * is to be disabled etc.
+ */
+static void vxge_set_multicast(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ struct vxgedev *vdev;
+ int i, mcast_cnt = 0;
+ struct __vxge_hw_device *hldev;
+ struct vxge_vpath *vpath;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct macInfo mac_info;
+ int vpath_idx = 0;
+ struct vxge_mac_addrs *mac_entry;
+ struct list_head *list_head;
+ struct list_head *entry, *next;
+ u8 *mac_address = NULL;
+
+ vxge_debug_entryexit(VXGE_TRACE,
+ "%s:%d", __func__, __LINE__);
+
+ vdev = netdev_priv(dev);
+ hldev = (struct __vxge_hw_device *)vdev->devh;
+
+ if (unlikely(!is_vxge_card_up(vdev)))
+ return;
+
+ if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ vpath = &vdev->vpaths[i];
+ vxge_assert(vpath->is_open);
+ status = vxge_hw_vpath_mcast_enable(vpath->handle);
+ if (status != VXGE_HW_OK)
+ vxge_debug_init(VXGE_ERR, "failed to enable "
+ "multicast, status %d", status);
+ vdev->all_multi_flg = 1;
+ }
+ } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ vpath = &vdev->vpaths[i];
+ vxge_assert(vpath->is_open);
+ status = vxge_hw_vpath_mcast_disable(vpath->handle);
+ if (status != VXGE_HW_OK)
+ vxge_debug_init(VXGE_ERR, "failed to disable "
+ "multicast, status %d", status);
+ vdev->all_multi_flg = 0;
+ }
+ }
+
+
+ if (!vdev->config.addr_learn_en) {
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ vpath = &vdev->vpaths[i];
+ vxge_assert(vpath->is_open);
+
+ if (dev->flags & IFF_PROMISC)
+ status = vxge_hw_vpath_promisc_enable(
+ vpath->handle);
+ else
+ status = vxge_hw_vpath_promisc_disable(
+ vpath->handle);
+ if (status != VXGE_HW_OK)
+ vxge_debug_init(VXGE_ERR, "failed to %s promisc"
+ ", status %d", dev->flags&IFF_PROMISC ?
+ "enable" : "disable", status);
+ }
+ }
+
+ memset(&mac_info, 0, sizeof(struct macInfo));
+ /* Update individual M_CAST address list */
+ if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
+ mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
+ list_head = &vdev->vpaths[0].mac_addr_list;
+ if ((netdev_mc_count(dev) +
+ (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
+ vdev->vpaths[0].max_mac_addr_cnt)
+ goto _set_all_mcast;
+
+ /* Delete previous MC's */
+ for (i = 0; i < mcast_cnt; i++) {
+ list_for_each_safe(entry, next, list_head) {
+ mac_entry = (struct vxge_mac_addrs *)entry;
+ /* Copy the mac address to delete */
+ mac_address = (u8 *)&mac_entry->macaddr;
+ memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
+
+ if (is_multicast_ether_addr(mac_info.macaddr)) {
+ for (vpath_idx = 0; vpath_idx <
+ vdev->no_of_vpath;
+ vpath_idx++) {
+ mac_info.vpath_no = vpath_idx;
+ status = vxge_del_mac_addr(
+ vdev,
+ &mac_info);
+ }
+ }
+ }
+ }
+
+ /* Add new ones */
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
+ for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
+ vpath_idx++) {
+ mac_info.vpath_no = vpath_idx;
+ mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
+ status = vxge_add_mac_addr(vdev, &mac_info);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "%s:%d Setting individual"
+ "multicast address failed",
+ __func__, __LINE__);
+ goto _set_all_mcast;
+ }
+ }
+ }
+
+ return;
+_set_all_mcast:
+ mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
+ /* Delete previous MC's */
+ for (i = 0; i < mcast_cnt; i++) {
+ list_for_each_safe(entry, next, list_head) {
+ mac_entry = (struct vxge_mac_addrs *)entry;
+ /* Copy the mac address to delete */
+ mac_address = (u8 *)&mac_entry->macaddr;
+ memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
+
+ if (is_multicast_ether_addr(mac_info.macaddr))
+ break;
+ }
+
+ for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
+ vpath_idx++) {
+ mac_info.vpath_no = vpath_idx;
+ status = vxge_del_mac_addr(vdev, &mac_info);
+ }
+ }
+
+ /* Enable all multicast */
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ vpath = &vdev->vpaths[i];
+ vxge_assert(vpath->is_open);
+
+ status = vxge_hw_vpath_mcast_enable(vpath->handle);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "%s:%d Enabling all multicasts failed",
+ __func__, __LINE__);
+ }
+ vdev->all_multi_flg = 1;
+ }
+ dev->flags |= IFF_ALLMULTI;
+ }
+
+ vxge_debug_entryexit(VXGE_TRACE,
+ "%s:%d Exiting...", __func__, __LINE__);
+}
+
+/**
+ * vxge_set_mac_addr
+ * @dev: pointer to the device structure
+ *
+ * Update entry "0" (default MAC addr)
+ */
+static int vxge_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct vxgedev *vdev;
+ struct __vxge_hw_device *hldev;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct macInfo mac_info_new, mac_info_old;
+ int vpath_idx = 0;
+
+ vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
+
+ vdev = netdev_priv(dev);
+ hldev = vdev->devh;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memset(&mac_info_new, 0, sizeof(struct macInfo));
+ memset(&mac_info_old, 0, sizeof(struct macInfo));
+
+ vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...",
+ __func__, __LINE__);
+
+ /* Get the old address */
+ memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
+
+ /* Copy the new address */
+ memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
+
+ /* First delete the old mac address from all the vpaths
+ as we can't specify the index while adding new mac address */
+ for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
+ struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
+ if (!vpath->is_open) {
+ /* This can happen when this interface is added/removed
+ to the bonding interface. Delete this station address
+ from the linked list */
+ vxge_mac_list_del(vpath, &mac_info_old);
+
+ /* Add this new address to the linked list
+ for later restoring */
+ vxge_mac_list_add(vpath, &mac_info_new);
+
+ continue;
+ }
+ /* Delete the station address */
+ mac_info_old.vpath_no = vpath_idx;
+ status = vxge_del_mac_addr(vdev, &mac_info_old);
+ }
+
+ if (unlikely(!is_vxge_card_up(vdev))) {
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ return VXGE_HW_OK;
+ }
+
+ /* Set this mac address to all the vpaths */
+ for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
+ mac_info_new.vpath_no = vpath_idx;
+ mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
+ status = vxge_add_mac_addr(vdev, &mac_info_new);
+ if (status != VXGE_HW_OK)
+ return -EINVAL;
+ }
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ return status;
+}
+
+/*
+ * vxge_vpath_intr_enable
+ * @vdev: pointer to vdev
+ * @vp_id: vpath for which to enable the interrupts
+ *
+ * Enables the interrupts for the vpath
+*/
+static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
+{
+ struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
+ int msix_id = 0;
+ int tim_msix_id[4] = {0, 1, 0, 0};
+ int alarm_msix_id = VXGE_ALARM_MSIX_ID;
+
+ vxge_hw_vpath_intr_enable(vpath->handle);
+
+ if (vdev->config.intr_type == INTA)
+ vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
+ else {
+ vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
+ alarm_msix_id);
+
+ msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
+ vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
+ vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
+
+ /* enable the alarm vector */
+ msix_id = (vpath->handle->vpath->hldev->first_vp_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
+ vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
+ }
+}
+
+/*
+ * vxge_vpath_intr_disable
+ * @vdev: pointer to vdev
+ * @vp_id: vpath for which to disable the interrupts
+ *
+ * Disables the interrupts for the vpath
+*/
+static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
+{
+ struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
+ struct __vxge_hw_device *hldev;
+ int msix_id;
+
+ hldev = pci_get_drvdata(vdev->pdev);
+
+ vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
+
+ vxge_hw_vpath_intr_disable(vpath->handle);
+
+ if (vdev->config.intr_type == INTA)
+ vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
+ else {
+ msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
+ vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
+ vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
+
+ /* disable the alarm vector */
+ msix_id = (vpath->handle->vpath->hldev->first_vp_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
+ vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
+ }
+}
+
+/* list all mac addresses from DA table */
+static enum vxge_hw_status
+vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ unsigned char macmask[ETH_ALEN];
+ unsigned char macaddr[ETH_ALEN];
+
+ status = vxge_hw_vpath_mac_addr_get(vpath->handle,
+ macaddr, macmask);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "DA config list entry failed for vpath:%d",
+ vpath->device_id);
+ return status;
+ }
+
+ while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
+ status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
+ macaddr, macmask);
+ if (status != VXGE_HW_OK)
+ break;
+ }
+
+ return status;
+}
+
+/* Store all mac addresses from the list to the DA table */
+static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct macInfo mac_info;
+ u8 *mac_address = NULL;
+ struct list_head *entry, *next;
+
+ memset(&mac_info, 0, sizeof(struct macInfo));
+
+ if (vpath->is_open) {
+ list_for_each_safe(entry, next, &vpath->mac_addr_list) {
+ mac_address =
+ (u8 *)&
+ ((struct vxge_mac_addrs *)entry)->macaddr;
+ memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
+ ((struct vxge_mac_addrs *)entry)->state =
+ VXGE_LL_MAC_ADDR_IN_DA_TABLE;
+ /* does this mac address already exist in da table? */
+ status = vxge_search_mac_addr_in_da_table(vpath,
+ &mac_info);
+ if (status != VXGE_HW_OK) {
+ /* Add this mac address to the DA table */
+ status = vxge_hw_vpath_mac_addr_add(
+ vpath->handle, mac_info.macaddr,
+ mac_info.macmask,
+ VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "DA add entry failed for vpath:%d",
+ vpath->device_id);
+ ((struct vxge_mac_addrs *)entry)->state
+ = VXGE_LL_MAC_ADDR_IN_LIST;
+ }
+ }
+ }
+ }
+
+ return status;
+}
+
+/* Store all vlan ids from the list to the vid table */
+static enum vxge_hw_status
+vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxgedev *vdev = vpath->vdev;
+ u16 vid;
+
+ if (!vpath->is_open)
+ return status;
+
+ for_each_set_bit(vid, vdev->active_vlans, VLAN_N_VID)
+ status = vxge_hw_vpath_vid_add(vpath->handle, vid);
+
+ return status;
+}
+
+/*
+ * vxge_reset_vpath
+ * @vdev: pointer to vdev
+ * @vp_id: vpath to reset
+ *
+ * Resets the vpath
+*/
+static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
+ int ret = 0;
+
+ /* check if device is down already */
+ if (unlikely(!is_vxge_card_up(vdev)))
+ return 0;
+
+ /* is device reset already scheduled */
+ if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
+ return 0;
+
+ if (vpath->handle) {
+ if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
+ if (is_vxge_card_up(vdev) &&
+ vxge_hw_vpath_recover_from_reset(vpath->handle)
+ != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "vxge_hw_vpath_recover_from_reset"
+ "failed for vpath:%d", vp_id);
+ return status;
+ }
+ } else {
+ vxge_debug_init(VXGE_ERR,
+ "vxge_hw_vpath_reset failed for"
+ "vpath:%d", vp_id);
+ return status;
+ }
+ } else
+ return VXGE_HW_FAIL;
+
+ vxge_restore_vpath_mac_addr(vpath);
+ vxge_restore_vpath_vid_table(vpath);
+
+ /* Enable all broadcast */
+ vxge_hw_vpath_bcast_enable(vpath->handle);
+
+ /* Enable all multicast */
+ if (vdev->all_multi_flg) {
+ status = vxge_hw_vpath_mcast_enable(vpath->handle);
+ if (status != VXGE_HW_OK)
+ vxge_debug_init(VXGE_ERR,
+ "%s:%d Enabling multicast failed",
+ __func__, __LINE__);
+ }
+
+ /* Enable the interrupts */
+ vxge_vpath_intr_enable(vdev, vp_id);
+
+ smp_wmb();
+
+ /* Enable the flow of traffic through the vpath */
+ vxge_hw_vpath_enable(vpath->handle);
+
+ smp_wmb();
+ vxge_hw_vpath_rx_doorbell_init(vpath->handle);
+ vpath->ring.last_status = VXGE_HW_OK;
+
+ /* Vpath reset done */
+ clear_bit(vp_id, &vdev->vp_reset);
+
+ /* Start the vpath queue */
+ if (netif_tx_queue_stopped(vpath->fifo.txq))
+ netif_tx_wake_queue(vpath->fifo.txq);
+
+ return ret;
+}
+
+/* Configure CI */
+static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
+{
+ int i = 0;
+
+ /* Enable CI for RTI */
+ if (vdev->config.intr_type == MSI_X) {
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ struct __vxge_hw_ring *hw_ring;
+
+ hw_ring = vdev->vpaths[i].ring.handle;
+ vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
+ }
+ }
+
+ /* Enable CI for TTI */
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
+ vxge_hw_vpath_tti_ci_set(hw_fifo);
+ /*
+ * For Inta (with or without napi), Set CI ON for only one
+ * vpath. (Have only one free running timer).
+ */
+ if ((vdev->config.intr_type == INTA) && (i == 0))
+ break;
+ }
+
+ return;
+}
+
+static int do_vxge_reset(struct vxgedev *vdev, int event)
+{
+ enum vxge_hw_status status;
+ int ret = 0, vp_id, i;
+
+ vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
+
+ if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
+ /* check if device is down already */
+ if (unlikely(!is_vxge_card_up(vdev)))
+ return 0;
+
+ /* is reset already scheduled */
+ if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
+ return 0;
+ }
+
+ if (event == VXGE_LL_FULL_RESET) {
+ netif_carrier_off(vdev->ndev);
+
+ /* wait for all the vpath reset to complete */
+ for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
+ while (test_bit(vp_id, &vdev->vp_reset))
+ msleep(50);
+ }
+
+ netif_carrier_on(vdev->ndev);
+
+ /* if execution mode is set to debug, don't reset the adapter */
+ if (unlikely(vdev->exec_mode)) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: execution mode is debug, returning..",
+ vdev->ndev->name);
+ clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
+ netif_tx_stop_all_queues(vdev->ndev);
+ return 0;
+ }
+ }
+
+ if (event == VXGE_LL_FULL_RESET) {
+ vxge_hw_device_wait_receive_idle(vdev->devh);
+ vxge_hw_device_intr_disable(vdev->devh);
+
+ switch (vdev->cric_err_event) {
+ case VXGE_HW_EVENT_UNKNOWN:
+ netif_tx_stop_all_queues(vdev->ndev);
+ vxge_debug_init(VXGE_ERR,
+ "fatal: %s: Disabling device due to"
+ "unknown error",
+ vdev->ndev->name);
+ ret = -EPERM;
+ goto out;
+ case VXGE_HW_EVENT_RESET_START:
+ break;
+ case VXGE_HW_EVENT_RESET_COMPLETE:
+ case VXGE_HW_EVENT_LINK_DOWN:
+ case VXGE_HW_EVENT_LINK_UP:
+ case VXGE_HW_EVENT_ALARM_CLEARED:
+ case VXGE_HW_EVENT_ECCERR:
+ case VXGE_HW_EVENT_MRPCIM_ECCERR:
+ ret = -EPERM;
+ goto out;
+ case VXGE_HW_EVENT_FIFO_ERR:
+ case VXGE_HW_EVENT_VPATH_ERR:
+ break;
+ case VXGE_HW_EVENT_CRITICAL_ERR:
+ netif_tx_stop_all_queues(vdev->ndev);
+ vxge_debug_init(VXGE_ERR,
+ "fatal: %s: Disabling device due to"
+ "serious error",
+ vdev->ndev->name);
+ /* SOP or device reset required */
+ /* This event is not currently used */
+ ret = -EPERM;
+ goto out;
+ case VXGE_HW_EVENT_SERR:
+ netif_tx_stop_all_queues(vdev->ndev);
+ vxge_debug_init(VXGE_ERR,
+ "fatal: %s: Disabling device due to"
+ "serious error",
+ vdev->ndev->name);
+ ret = -EPERM;
+ goto out;
+ case VXGE_HW_EVENT_SRPCIM_SERR:
+ case VXGE_HW_EVENT_MRPCIM_SERR:
+ ret = -EPERM;
+ goto out;
+ case VXGE_HW_EVENT_SLOT_FREEZE:
+ netif_tx_stop_all_queues(vdev->ndev);
+ vxge_debug_init(VXGE_ERR,
+ "fatal: %s: Disabling device due to"
+ "slot freeze",
+ vdev->ndev->name);
+ ret = -EPERM;
+ goto out;
+ default:
+ break;
+
+ }
+ }
+
+ if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
+ netif_tx_stop_all_queues(vdev->ndev);
+
+ if (event == VXGE_LL_FULL_RESET) {
+ status = vxge_reset_all_vpaths(vdev);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "fatal: %s: can not reset vpaths",
+ vdev->ndev->name);
+ ret = -EPERM;
+ goto out;
+ }
+ }
+
+ if (event == VXGE_LL_COMPL_RESET) {
+ for (i = 0; i < vdev->no_of_vpath; i++)
+ if (vdev->vpaths[i].handle) {
+ if (vxge_hw_vpath_recover_from_reset(
+ vdev->vpaths[i].handle)
+ != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "vxge_hw_vpath_recover_"
+ "from_reset failed for vpath: "
+ "%d", i);
+ ret = -EPERM;
+ goto out;
+ }
+ } else {
+ vxge_debug_init(VXGE_ERR,
+ "vxge_hw_vpath_reset failed for "
+ "vpath:%d", i);
+ ret = -EPERM;
+ goto out;
+ }
+ }
+
+ if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
+ /* Reprogram the DA table with populated mac addresses */
+ for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
+ vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
+ vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
+ }
+
+ /* enable vpath interrupts */
+ for (i = 0; i < vdev->no_of_vpath; i++)
+ vxge_vpath_intr_enable(vdev, i);
+
+ vxge_hw_device_intr_enable(vdev->devh);
+
+ smp_wmb();
+
+ /* Indicate card up */
+ set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
+
+ /* Get the traffic to flow through the vpaths */
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ vxge_hw_vpath_enable(vdev->vpaths[i].handle);
+ smp_wmb();
+ vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
+ }
+
+ netif_tx_wake_all_queues(vdev->ndev);
+ }
+
+ /* configure CI */
+ vxge_config_ci_for_tti_rti(vdev);
+
+out:
+ vxge_debug_entryexit(VXGE_TRACE,
+ "%s:%d Exiting...", __func__, __LINE__);
+
+ /* Indicate reset done */
+ if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
+ clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
+ return ret;
+}
+
+/*
+ * vxge_reset
+ * @vdev: pointer to ll device
+ *
+ * driver may reset the chip on events of serr, eccerr, etc
+ */
+static void vxge_reset(struct work_struct *work)
+{
+ struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task);
+
+ if (!netif_running(vdev->ndev))
+ return;
+
+ do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
+}
+
+/**
+ * vxge_poll - Receive handler when Receive Polling is used.
+ * @dev: pointer to the device structure.
+ * @budget: Number of packets budgeted to be processed in this iteration.
+ *
+ * This function comes into picture only if Receive side is being handled
+ * through polling (called NAPI in linux). It mostly does what the normal
+ * Rx interrupt handler does in terms of descriptor and packet processing
+ * but not in an interrupt context. Also it will process a specified number
+ * of packets at most in one iteration. This value is passed down by the
+ * kernel as the function argument 'budget'.
+ */
+static int vxge_poll_msix(struct napi_struct *napi, int budget)
+{
+ struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
+ int pkts_processed;
+ int budget_org = budget;
+
+ ring->budget = budget;
+ ring->pkts_processed = 0;
+ vxge_hw_vpath_poll_rx(ring->handle);
+ pkts_processed = ring->pkts_processed;
+
+ if (ring->pkts_processed < budget_org) {
+ napi_complete(napi);
+
+ /* Re enable the Rx interrupts for the vpath */
+ vxge_hw_channel_msix_unmask(
+ (struct __vxge_hw_channel *)ring->handle,
+ ring->rx_vector_no);
+ mmiowb();
+ }
+
+ /* We are copying and returning the local variable, in case if after
+ * clearing the msix interrupt above, if the interrupt fires right
+ * away which can preempt this NAPI thread */
+ return pkts_processed;
+}
+
+static int vxge_poll_inta(struct napi_struct *napi, int budget)
+{
+ struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
+ int pkts_processed = 0;
+ int i;
+ int budget_org = budget;
+ struct vxge_ring *ring;
+
+ struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
+
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ ring = &vdev->vpaths[i].ring;
+ ring->budget = budget;
+ ring->pkts_processed = 0;
+ vxge_hw_vpath_poll_rx(ring->handle);
+ pkts_processed += ring->pkts_processed;
+ budget -= ring->pkts_processed;
+ if (budget <= 0)
+ break;
+ }
+
+ VXGE_COMPLETE_ALL_TX(vdev);
+
+ if (pkts_processed < budget_org) {
+ napi_complete(napi);
+ /* Re enable the Rx interrupts for the ring */
+ vxge_hw_device_unmask_all(hldev);
+ vxge_hw_device_flush_io(hldev);
+ }
+
+ return pkts_processed;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * vxge_netpoll - netpoll event handler entry point
+ * @dev : pointer to the device structure.
+ * Description:
+ * This function will be called by upper layer to check for events on the
+ * interface in situations where interrupts are disabled. It is used for
+ * specific in-kernel networking tasks, such as remote consoles and kernel
+ * debugging over the network (example netdump in RedHat).
+ */
+static void vxge_netpoll(struct net_device *dev)
+{
+ struct __vxge_hw_device *hldev;
+ struct vxgedev *vdev;
+
+ vdev = netdev_priv(dev);
+ hldev = pci_get_drvdata(vdev->pdev);
+
+ vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
+
+ if (pci_channel_offline(vdev->pdev))
+ return;
+
+ disable_irq(dev->irq);
+ vxge_hw_device_clear_tx_rx(hldev);
+
+ vxge_hw_device_clear_tx_rx(hldev);
+ VXGE_COMPLETE_ALL_RX(vdev);
+ VXGE_COMPLETE_ALL_TX(vdev);
+
+ enable_irq(dev->irq);
+
+ vxge_debug_entryexit(VXGE_TRACE,
+ "%s:%d Exiting...", __func__, __LINE__);
+}
+#endif
+
+/* RTH configuration */
+static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxge_hw_rth_hash_types hash_types;
+ u8 itable[256] = {0}; /* indirection table */
+ u8 mtable[256] = {0}; /* CPU to vpath mapping */
+ int index;
+
+ /*
+ * Filling
+ * - itable with bucket numbers
+ * - mtable with bucket-to-vpath mapping
+ */
+ for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
+ itable[index] = index;
+ mtable[index] = index % vdev->no_of_vpath;
+ }
+
+ /* set indirection table, bucket-to-vpath mapping */
+ status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
+ vdev->no_of_vpath,
+ mtable, itable,
+ vdev->config.rth_bkt_sz);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "RTH indirection table configuration failed "
+ "for vpath:%d", vdev->vpaths[0].device_id);
+ return status;
+ }
+
+ /* Fill RTH hash types */
+ hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
+ hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
+ hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
+ hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
+ hash_types.hash_type_tcpipv6ex_en =
+ vdev->config.rth_hash_type_tcpipv6ex;
+ hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
+
+ /*
+ * Because the itable_set() method uses the active_table field
+ * for the target virtual path the RTH config should be updated
+ * for all VPATHs. The h/w only uses the lowest numbered VPATH
+ * when steering frames.
+ */
+ for (index = 0; index < vdev->no_of_vpath; index++) {
+ status = vxge_hw_vpath_rts_rth_set(
+ vdev->vpaths[index].handle,
+ vdev->config.rth_algorithm,
+ &hash_types,
+ vdev->config.rth_bkt_sz);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "RTH configuration failed for vpath:%d",
+ vdev->vpaths[index].device_id);
+ return status;
+ }
+ }
+
+ return status;
+}
+
+/* reset vpaths */
+enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxge_vpath *vpath;
+ int i;
+
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ vpath = &vdev->vpaths[i];
+ if (vpath->handle) {
+ if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
+ if (is_vxge_card_up(vdev) &&
+ vxge_hw_vpath_recover_from_reset(
+ vpath->handle) != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "vxge_hw_vpath_recover_"
+ "from_reset failed for vpath: "
+ "%d", i);
+ return status;
+ }
+ } else {
+ vxge_debug_init(VXGE_ERR,
+ "vxge_hw_vpath_reset failed for "
+ "vpath:%d", i);
+ return status;
+ }
+ }
+ }
+
+ return status;
+}
+
+/* close vpaths */
+static void vxge_close_vpaths(struct vxgedev *vdev, int index)
+{
+ struct vxge_vpath *vpath;
+ int i;
+
+ for (i = index; i < vdev->no_of_vpath; i++) {
+ vpath = &vdev->vpaths[i];
+
+ if (vpath->handle && vpath->is_open) {
+ vxge_hw_vpath_close(vpath->handle);
+ vdev->stats.vpaths_open--;
+ }
+ vpath->is_open = 0;
+ vpath->handle = NULL;
+ }
+}
+
+/* open vpaths */
+static int vxge_open_vpaths(struct vxgedev *vdev)
+{
+ struct vxge_hw_vpath_attr attr;
+ enum vxge_hw_status status;
+ struct vxge_vpath *vpath;
+ u32 vp_id = 0;
+ int i;
+
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ vpath = &vdev->vpaths[i];
+ vxge_assert(vpath->is_configured);
+
+ if (!vdev->titan1) {
+ struct vxge_hw_vp_config *vcfg;
+ vcfg = &vdev->devh->config.vp_config[vpath->device_id];
+
+ vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
+ vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
+ vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
+ vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
+ vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
+ vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
+ vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
+ vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
+ vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
+ }
+
+ attr.vp_id = vpath->device_id;
+ attr.fifo_attr.callback = vxge_xmit_compl;
+ attr.fifo_attr.txdl_term = vxge_tx_term;
+ attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
+ attr.fifo_attr.userdata = &vpath->fifo;
+
+ attr.ring_attr.callback = vxge_rx_1b_compl;
+ attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
+ attr.ring_attr.rxd_term = vxge_rx_term;
+ attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
+ attr.ring_attr.userdata = &vpath->ring;
+
+ vpath->ring.ndev = vdev->ndev;
+ vpath->ring.pdev = vdev->pdev;
+
+ status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
+ if (status == VXGE_HW_OK) {
+ vpath->fifo.handle =
+ (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
+ vpath->ring.handle =
+ (struct __vxge_hw_ring *)attr.ring_attr.userdata;
+ vpath->fifo.tx_steering_type =
+ vdev->config.tx_steering_type;
+ vpath->fifo.ndev = vdev->ndev;
+ vpath->fifo.pdev = vdev->pdev;
+ if (vdev->config.tx_steering_type)
+ vpath->fifo.txq =
+ netdev_get_tx_queue(vdev->ndev, i);
+ else
+ vpath->fifo.txq =
+ netdev_get_tx_queue(vdev->ndev, 0);
+ vpath->fifo.indicate_max_pkts =
+ vdev->config.fifo_indicate_max_pkts;
+ vpath->fifo.tx_vector_no = 0;
+ vpath->ring.rx_vector_no = 0;
+ vpath->ring.rx_hwts = vdev->rx_hwts;
+ vpath->is_open = 1;
+ vdev->vp_handles[i] = vpath->handle;
+ vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
+ vdev->stats.vpaths_open++;
+ } else {
+ vdev->stats.vpath_open_fail++;
+ vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
+ "open with status: %d",
+ vdev->ndev->name, vpath->device_id,
+ status);
+ vxge_close_vpaths(vdev, 0);
+ return -EPERM;
+ }
+
+ vp_id = vpath->handle->vpath->vp_id;
+ vdev->vpaths_deployed |= vxge_mBIT(vp_id);
+ }
+
+ return VXGE_HW_OK;
+}
+
+/**
+ * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
+ * if the interrupts are not within a range
+ * @fifo: pointer to transmit fifo structure
+ * Description: The function changes boundary timer and restriction timer
+ * value depends on the traffic
+ * Return Value: None
+ */
+static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
+{
+ fifo->interrupt_count++;
+ if (jiffies > fifo->jiffies + HZ / 100) {
+ struct __vxge_hw_fifo *hw_fifo = fifo->handle;
+
+ fifo->jiffies = jiffies;
+ if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
+ hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
+ hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
+ vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
+ } else if (hw_fifo->rtimer != 0) {
+ hw_fifo->rtimer = 0;
+ vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
+ }
+ fifo->interrupt_count = 0;
+ }
+}
+
+/**
+ * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
+ * if the interrupts are not within a range
+ * @ring: pointer to receive ring structure
+ * Description: The function increases of decreases the packet counts within
+ * the ranges of traffic utilization, if the interrupts due to this ring are
+ * not within a fixed range.
+ * Return Value: Nothing
+ */
+static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
+{
+ ring->interrupt_count++;
+ if (jiffies > ring->jiffies + HZ / 100) {
+ struct __vxge_hw_ring *hw_ring = ring->handle;
+
+ ring->jiffies = jiffies;
+ if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
+ hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
+ hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
+ vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
+ } else if (hw_ring->rtimer != 0) {
+ hw_ring->rtimer = 0;
+ vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
+ }
+ ring->interrupt_count = 0;
+ }
+}
+
+/*
+ * vxge_isr_napi
+ * @irq: the irq of the device.
+ * @dev_id: a void pointer to the hldev structure of the Titan device
+ * @ptregs: pointer to the registers pushed on the stack.
+ *
+ * This function is the ISR handler of the device when napi is enabled. It
+ * identifies the reason for the interrupt and calls the relevant service
+ * routines.
+ */
+static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
+{
+ struct net_device *dev;
+ struct __vxge_hw_device *hldev;
+ u64 reason;
+ enum vxge_hw_status status;
+ struct vxgedev *vdev = (struct vxgedev *)dev_id;
+
+ vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
+
+ dev = vdev->ndev;
+ hldev = pci_get_drvdata(vdev->pdev);
+
+ if (pci_channel_offline(vdev->pdev))
+ return IRQ_NONE;
+
+ if (unlikely(!is_vxge_card_up(vdev)))
+ return IRQ_HANDLED;
+
+ status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
+ if (status == VXGE_HW_OK) {
+ vxge_hw_device_mask_all(hldev);
+
+ if (reason &
+ VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
+ vdev->vpaths_deployed >>
+ (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
+
+ vxge_hw_device_clear_tx_rx(hldev);
+ napi_schedule(&vdev->napi);
+ vxge_debug_intr(VXGE_TRACE,
+ "%s:%d Exiting...", __func__, __LINE__);
+ return IRQ_HANDLED;
+ } else
+ vxge_hw_device_unmask_all(hldev);
+ } else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
+ (status == VXGE_HW_ERR_CRITICAL) ||
+ (status == VXGE_HW_ERR_FIFO))) {
+ vxge_hw_device_mask_all(hldev);
+ vxge_hw_device_flush_io(hldev);
+ return IRQ_HANDLED;
+ } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
+ return IRQ_HANDLED;
+
+ vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__);
+ return IRQ_NONE;
+}
+
+#ifdef CONFIG_PCI_MSI
+
+static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
+{
+ struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
+
+ adaptive_coalesce_tx_interrupts(fifo);
+
+ vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
+ fifo->tx_vector_no);
+
+ vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
+ fifo->tx_vector_no);
+
+ VXGE_COMPLETE_VPATH_TX(fifo);
+
+ vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
+ fifo->tx_vector_no);
+
+ mmiowb();
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
+{
+ struct vxge_ring *ring = (struct vxge_ring *)dev_id;
+
+ adaptive_coalesce_rx_interrupts(ring);
+
+ vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
+ ring->rx_vector_no);
+
+ vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
+ ring->rx_vector_no);
+
+ napi_schedule(&ring->napi);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+vxge_alarm_msix_handle(int irq, void *dev_id)
+{
+ int i;
+ enum vxge_hw_status status;
+ struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
+ struct vxgedev *vdev = vpath->vdev;
+ int msix_id = (vpath->handle->vpath->vp_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
+
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ /* Reduce the chance of losing alarm interrupts by masking
+ * the vector. A pending bit will be set if an alarm is
+ * generated and on unmask the interrupt will be fired.
+ */
+ vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
+ vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
+ mmiowb();
+
+ status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
+ vdev->exec_mode);
+ if (status == VXGE_HW_OK) {
+ vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
+ msix_id);
+ mmiowb();
+ continue;
+ }
+ vxge_debug_intr(VXGE_ERR,
+ "%s: vxge_hw_vpath_alarm_process failed %x ",
+ VXGE_DRIVER_NAME, status);
+ }
+ return IRQ_HANDLED;
+}
+
+static int vxge_alloc_msix(struct vxgedev *vdev)
+{
+ int j, i, ret = 0;
+ int msix_intr_vect = 0, temp;
+ vdev->intr_cnt = 0;
+
+start:
+ /* Tx/Rx MSIX Vectors count */
+ vdev->intr_cnt = vdev->no_of_vpath * 2;
+
+ /* Alarm MSIX Vectors count */
+ vdev->intr_cnt++;
+
+ vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!vdev->entries) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: memory allocation failed",
+ VXGE_DRIVER_NAME);
+ ret = -ENOMEM;
+ goto alloc_entries_failed;
+ }
+
+ vdev->vxge_entries = kcalloc(vdev->intr_cnt,
+ sizeof(struct vxge_msix_entry),
+ GFP_KERNEL);
+ if (!vdev->vxge_entries) {
+ vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
+ VXGE_DRIVER_NAME);
+ ret = -ENOMEM;
+ goto alloc_vxge_entries_failed;
+ }
+
+ for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
+
+ msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
+
+ /* Initialize the fifo vector */
+ vdev->entries[j].entry = msix_intr_vect;
+ vdev->vxge_entries[j].entry = msix_intr_vect;
+ vdev->vxge_entries[j].in_use = 0;
+ j++;
+
+ /* Initialize the ring vector */
+ vdev->entries[j].entry = msix_intr_vect + 1;
+ vdev->vxge_entries[j].entry = msix_intr_vect + 1;
+ vdev->vxge_entries[j].in_use = 0;
+ j++;
+ }
+
+ /* Initialize the alarm vector */
+ vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
+ vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
+ vdev->vxge_entries[j].in_use = 0;
+
+ ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
+ if (ret > 0) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: MSI-X enable failed for %d vectors, ret: %d",
+ VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
+ if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) {
+ ret = -ENODEV;
+ goto enable_msix_failed;
+ }
+
+ kfree(vdev->entries);
+ kfree(vdev->vxge_entries);
+ vdev->entries = NULL;
+ vdev->vxge_entries = NULL;
+ /* Try with less no of vector by reducing no of vpaths count */
+ temp = (ret - 1)/2;
+ vxge_close_vpaths(vdev, temp);
+ vdev->no_of_vpath = temp;
+ goto start;
+ } else if (ret < 0) {
+ ret = -ENODEV;
+ goto enable_msix_failed;
+ }
+ return 0;
+
+enable_msix_failed:
+ kfree(vdev->vxge_entries);
+alloc_vxge_entries_failed:
+ kfree(vdev->entries);
+alloc_entries_failed:
+ return ret;
+}
+
+static int vxge_enable_msix(struct vxgedev *vdev)
+{
+
+ int i, ret = 0;
+ /* 0 - Tx, 1 - Rx */
+ int tim_msix_id[4] = {0, 1, 0, 0};
+
+ vdev->intr_cnt = 0;
+
+ /* allocate msix vectors */
+ ret = vxge_alloc_msix(vdev);
+ if (!ret) {
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ struct vxge_vpath *vpath = &vdev->vpaths[i];
+
+ /* If fifo or ring are not enabled, the MSIX vector for
+ * it should be set to 0.
+ */
+ vpath->ring.rx_vector_no = (vpath->device_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
+
+ vpath->fifo.tx_vector_no = (vpath->device_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE);
+
+ vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
+ VXGE_ALARM_MSIX_ID);
+ }
+ }
+
+ return ret;
+}
+
+static void vxge_rem_msix_isr(struct vxgedev *vdev)
+{
+ int intr_cnt;
+
+ for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
+ intr_cnt++) {
+ if (vdev->vxge_entries[intr_cnt].in_use) {
+ synchronize_irq(vdev->entries[intr_cnt].vector);
+ free_irq(vdev->entries[intr_cnt].vector,
+ vdev->vxge_entries[intr_cnt].arg);
+ vdev->vxge_entries[intr_cnt].in_use = 0;
+ }
+ }
+
+ kfree(vdev->entries);
+ kfree(vdev->vxge_entries);
+ vdev->entries = NULL;
+ vdev->vxge_entries = NULL;
+
+ if (vdev->config.intr_type == MSI_X)
+ pci_disable_msix(vdev->pdev);
+}
+#endif
+
+static void vxge_rem_isr(struct vxgedev *vdev)
+{
+ struct __vxge_hw_device *hldev;
+ hldev = pci_get_drvdata(vdev->pdev);
+
+#ifdef CONFIG_PCI_MSI
+ if (vdev->config.intr_type == MSI_X) {
+ vxge_rem_msix_isr(vdev);
+ } else
+#endif
+ if (vdev->config.intr_type == INTA) {
+ synchronize_irq(vdev->pdev->irq);
+ free_irq(vdev->pdev->irq, vdev);
+ }
+}
+
+static int vxge_add_isr(struct vxgedev *vdev)
+{
+ int ret = 0;
+#ifdef CONFIG_PCI_MSI
+ int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
+ int pci_fun = PCI_FUNC(vdev->pdev->devfn);
+
+ if (vdev->config.intr_type == MSI_X)
+ ret = vxge_enable_msix(vdev);
+
+ if (ret) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
+ vxge_debug_init(VXGE_ERR,
+ "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
+ vdev->config.intr_type = INTA;
+ }
+
+ if (vdev->config.intr_type == MSI_X) {
+ for (intr_idx = 0;
+ intr_idx < (vdev->no_of_vpath *
+ VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
+
+ msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
+ irq_req = 0;
+
+ switch (msix_idx) {
+ case 0:
+ snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
+ "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
+ vdev->ndev->name,
+ vdev->entries[intr_cnt].entry,
+ pci_fun, vp_idx);
+ ret = request_irq(
+ vdev->entries[intr_cnt].vector,
+ vxge_tx_msix_handle, 0,
+ vdev->desc[intr_cnt],
+ &vdev->vpaths[vp_idx].fifo);
+ vdev->vxge_entries[intr_cnt].arg =
+ &vdev->vpaths[vp_idx].fifo;
+ irq_req = 1;
+ break;
+ case 1:
+ snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
+ "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
+ vdev->ndev->name,
+ vdev->entries[intr_cnt].entry,
+ pci_fun, vp_idx);
+ ret = request_irq(
+ vdev->entries[intr_cnt].vector,
+ vxge_rx_msix_napi_handle,
+ 0,
+ vdev->desc[intr_cnt],
+ &vdev->vpaths[vp_idx].ring);
+ vdev->vxge_entries[intr_cnt].arg =
+ &vdev->vpaths[vp_idx].ring;
+ irq_req = 1;
+ break;
+ }
+
+ if (ret) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: MSIX - %d Registration failed",
+ vdev->ndev->name, intr_cnt);
+ vxge_rem_msix_isr(vdev);
+ vdev->config.intr_type = INTA;
+ vxge_debug_init(VXGE_ERR,
+ "%s: Defaulting to INTA"
+ , vdev->ndev->name);
+ goto INTA_MODE;
+ }
+
+ if (irq_req) {
+ /* We requested for this msix interrupt */
+ vdev->vxge_entries[intr_cnt].in_use = 1;
+ msix_idx += vdev->vpaths[vp_idx].device_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE;
+ vxge_hw_vpath_msix_unmask(
+ vdev->vpaths[vp_idx].handle,
+ msix_idx);
+ intr_cnt++;
+ }
+
+ /* Point to next vpath handler */
+ if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) &&
+ (vp_idx < (vdev->no_of_vpath - 1)))
+ vp_idx++;
+ }
+
+ intr_cnt = vdev->no_of_vpath * 2;
+ snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
+ "%s:vxge:MSI-X %d - Alarm - fn:%d",
+ vdev->ndev->name,
+ vdev->entries[intr_cnt].entry,
+ pci_fun);
+ /* For Alarm interrupts */
+ ret = request_irq(vdev->entries[intr_cnt].vector,
+ vxge_alarm_msix_handle, 0,
+ vdev->desc[intr_cnt],
+ &vdev->vpaths[0]);
+ if (ret) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: MSIX - %d Registration failed",
+ vdev->ndev->name, intr_cnt);
+ vxge_rem_msix_isr(vdev);
+ vdev->config.intr_type = INTA;
+ vxge_debug_init(VXGE_ERR,
+ "%s: Defaulting to INTA",
+ vdev->ndev->name);
+ goto INTA_MODE;
+ }
+
+ msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
+ vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
+ msix_idx);
+ vdev->vxge_entries[intr_cnt].in_use = 1;
+ vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
+ }
+INTA_MODE:
+#endif
+
+ if (vdev->config.intr_type == INTA) {
+ snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
+ "%s:vxge:INTA", vdev->ndev->name);
+ vxge_hw_device_set_intr_type(vdev->devh,
+ VXGE_HW_INTR_MODE_IRQLINE);
+
+ vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
+
+ ret = request_irq((int) vdev->pdev->irq,
+ vxge_isr_napi,
+ IRQF_SHARED, vdev->desc[0], vdev);
+ if (ret) {
+ vxge_debug_init(VXGE_ERR,
+ "%s %s-%d: ISR registration failed",
+ VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
+ return -ENODEV;
+ }
+ vxge_debug_init(VXGE_TRACE,
+ "new %s-%d line allocated",
+ "IRQ", vdev->pdev->irq);
+ }
+
+ return VXGE_HW_OK;
+}
+
+static void vxge_poll_vp_reset(unsigned long data)
+{
+ struct vxgedev *vdev = (struct vxgedev *)data;
+ int i, j = 0;
+
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ if (test_bit(i, &vdev->vp_reset)) {
+ vxge_reset_vpath(vdev, i);
+ j++;
+ }
+ }
+ if (j && (vdev->config.intr_type != MSI_X)) {
+ vxge_hw_device_unmask_all(vdev->devh);
+ vxge_hw_device_flush_io(vdev->devh);
+ }
+
+ mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
+}
+
+static void vxge_poll_vp_lockup(unsigned long data)
+{
+ struct vxgedev *vdev = (struct vxgedev *)data;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxge_vpath *vpath;
+ struct vxge_ring *ring;
+ int i;
+ unsigned long rx_frms;
+
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ ring = &vdev->vpaths[i].ring;
+
+ /* Truncated to machine word size number of frames */
+ rx_frms = ACCESS_ONCE(ring->stats.rx_frms);
+
+ /* Did this vpath received any packets */
+ if (ring->stats.prev_rx_frms == rx_frms) {
+ status = vxge_hw_vpath_check_leak(ring->handle);
+
+ /* Did it received any packets last time */
+ if ((VXGE_HW_FAIL == status) &&
+ (VXGE_HW_FAIL == ring->last_status)) {
+
+ /* schedule vpath reset */
+ if (!test_and_set_bit(i, &vdev->vp_reset)) {
+ vpath = &vdev->vpaths[i];
+
+ /* disable interrupts for this vpath */
+ vxge_vpath_intr_disable(vdev, i);
+
+ /* stop the queue for this vpath */
+ netif_tx_stop_queue(vpath->fifo.txq);
+ continue;
+ }
+ }
+ }
+ ring->stats.prev_rx_frms = rx_frms;
+ ring->last_status = status;
+ }
+
+ /* Check every 1 milli second */
+ mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
+}
+
+static u32 vxge_fix_features(struct net_device *dev, u32 features)
+{
+ u32 changed = dev->features ^ features;
+
+ /* Enabling RTH requires some of the logic in vxge_device_register and a
+ * vpath reset. Due to these restrictions, only allow modification
+ * while the interface is down.
+ */
+ if ((changed & NETIF_F_RXHASH) && netif_running(dev))
+ features ^= NETIF_F_RXHASH;
+
+ return features;
+}
+
+static int vxge_set_features(struct net_device *dev, u32 features)
+{
+ struct vxgedev *vdev = netdev_priv(dev);
+ u32 changed = dev->features ^ features;
+
+ if (!(changed & NETIF_F_RXHASH))
+ return 0;
+
+ /* !netif_running() ensured by vxge_fix_features() */
+
+ vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
+ if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) {
+ dev->features = features ^ NETIF_F_RXHASH;
+ vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * vxge_open
+ * @dev: pointer to the device structure.
+ *
+ * This function is the open entry point of the driver. It mainly calls a
+ * function to allocate Rx buffers and inserts them into the buffer
+ * descriptors and then enables the Rx part of the NIC.
+ * Return value: '0' on success and an appropriate (-)ve integer as
+ * defined in errno.h file on failure.
+ */
+static int vxge_open(struct net_device *dev)
+{
+ enum vxge_hw_status status;
+ struct vxgedev *vdev;
+ struct __vxge_hw_device *hldev;
+ struct vxge_vpath *vpath;
+ int ret = 0;
+ int i;
+ u64 val64, function_mode;
+
+ vxge_debug_entryexit(VXGE_TRACE,
+ "%s: %s:%d", dev->name, __func__, __LINE__);
+
+ vdev = netdev_priv(dev);
+ hldev = pci_get_drvdata(vdev->pdev);
+ function_mode = vdev->config.device_hw_info.function_mode;
+
+ /* make sure you have link off by default every time Nic is
+ * initialized */
+ netif_carrier_off(dev);
+
+ /* Open VPATHs */
+ status = vxge_open_vpaths(vdev);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: fatal: Vpath open failed", vdev->ndev->name);
+ ret = -EPERM;
+ goto out0;
+ }
+
+ vdev->mtu = dev->mtu;
+
+ status = vxge_add_isr(vdev);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: fatal: ISR add failed", dev->name);
+ ret = -EPERM;
+ goto out1;
+ }
+
+ if (vdev->config.intr_type != MSI_X) {
+ netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
+ vdev->config.napi_weight);
+ napi_enable(&vdev->napi);
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ vpath = &vdev->vpaths[i];
+ vpath->ring.napi_p = &vdev->napi;
+ }
+ } else {
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ vpath = &vdev->vpaths[i];
+ netif_napi_add(dev, &vpath->ring.napi,
+ vxge_poll_msix, vdev->config.napi_weight);
+ napi_enable(&vpath->ring.napi);
+ vpath->ring.napi_p = &vpath->ring.napi;
+ }
+ }
+
+ /* configure RTH */
+ if (vdev->config.rth_steering) {
+ status = vxge_rth_configure(vdev);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: fatal: RTH configuration failed",
+ dev->name);
+ ret = -EPERM;
+ goto out2;
+ }
+ }
+ printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
+ hldev->config.rth_en ? "enabled" : "disabled");
+
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ vpath = &vdev->vpaths[i];
+
+ /* set initial mtu before enabling the device */
+ status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: fatal: can not set new MTU", dev->name);
+ ret = -EPERM;
+ goto out2;
+ }
+ }
+
+ VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
+ vxge_debug_init(vdev->level_trace,
+ "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
+ VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
+
+ /* Restore the DA, VID table and also multicast and promiscuous mode
+ * states
+ */
+ if (vdev->all_multi_flg) {
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ vpath = &vdev->vpaths[i];
+ vxge_restore_vpath_mac_addr(vpath);
+ vxge_restore_vpath_vid_table(vpath);
+
+ status = vxge_hw_vpath_mcast_enable(vpath->handle);
+ if (status != VXGE_HW_OK)
+ vxge_debug_init(VXGE_ERR,
+ "%s:%d Enabling multicast failed",
+ __func__, __LINE__);
+ }
+ }
+
+ /* Enable vpath to sniff all unicast/multicast traffic that not
+ * addressed to them. We allow promiscuous mode for PF only
+ */
+
+ val64 = 0;
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
+ val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
+
+ vxge_hw_mgmt_reg_write(vdev->devh,
+ vxge_hw_mgmt_reg_type_mrpcim,
+ 0,
+ (ulong)offsetof(struct vxge_hw_mrpcim_reg,
+ rxmac_authorize_all_addr),
+ val64);
+
+ vxge_hw_mgmt_reg_write(vdev->devh,
+ vxge_hw_mgmt_reg_type_mrpcim,
+ 0,
+ (ulong)offsetof(struct vxge_hw_mrpcim_reg,
+ rxmac_authorize_all_vid),
+ val64);
+
+ vxge_set_multicast(dev);
+
+ /* Enabling Bcast and mcast for all vpath */
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ vpath = &vdev->vpaths[i];
+ status = vxge_hw_vpath_bcast_enable(vpath->handle);
+ if (status != VXGE_HW_OK)
+ vxge_debug_init(VXGE_ERR,
+ "%s : Can not enable bcast for vpath "
+ "id %d", dev->name, i);
+ if (vdev->config.addr_learn_en) {
+ status = vxge_hw_vpath_mcast_enable(vpath->handle);
+ if (status != VXGE_HW_OK)
+ vxge_debug_init(VXGE_ERR,
+ "%s : Can not enable mcast for vpath "
+ "id %d", dev->name, i);
+ }
+ }
+
+ vxge_hw_device_setpause_data(vdev->devh, 0,
+ vdev->config.tx_pause_enable,
+ vdev->config.rx_pause_enable);
+
+ if (vdev->vp_reset_timer.function == NULL)
+ vxge_os_timer(vdev->vp_reset_timer,
+ vxge_poll_vp_reset, vdev, (HZ/2));
+
+ /* There is no need to check for RxD leak and RxD lookup on Titan1A */
+ if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
+ vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
+ HZ / 2);
+
+ set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
+
+ smp_wmb();
+
+ if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
+ netif_carrier_on(vdev->ndev);
+ netdev_notice(vdev->ndev, "Link Up\n");
+ vdev->stats.link_up++;
+ }
+
+ vxge_hw_device_intr_enable(vdev->devh);
+
+ smp_wmb();
+
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ vpath = &vdev->vpaths[i];
+
+ vxge_hw_vpath_enable(vpath->handle);
+ smp_wmb();
+ vxge_hw_vpath_rx_doorbell_init(vpath->handle);
+ }
+
+ netif_tx_start_all_queues(vdev->ndev);
+
+ /* configure CI */
+ vxge_config_ci_for_tti_rti(vdev);
+
+ goto out0;
+
+out2:
+ vxge_rem_isr(vdev);
+
+ /* Disable napi */
+ if (vdev->config.intr_type != MSI_X)
+ napi_disable(&vdev->napi);
+ else {
+ for (i = 0; i < vdev->no_of_vpath; i++)
+ napi_disable(&vdev->vpaths[i].ring.napi);
+ }
+
+out1:
+ vxge_close_vpaths(vdev, 0);
+out0:
+ vxge_debug_entryexit(VXGE_TRACE,
+ "%s: %s:%d Exiting...",
+ dev->name, __func__, __LINE__);
+ return ret;
+}
+
+/* Loop through the mac address list and delete all the entries */
+static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
+{
+
+ struct list_head *entry, *next;
+ if (list_empty(&vpath->mac_addr_list))
+ return;
+
+ list_for_each_safe(entry, next, &vpath->mac_addr_list) {
+ list_del(entry);
+ kfree((struct vxge_mac_addrs *)entry);
+ }
+}
+
+static void vxge_napi_del_all(struct vxgedev *vdev)
+{
+ int i;
+ if (vdev->config.intr_type != MSI_X)
+ netif_napi_del(&vdev->napi);
+ else {
+ for (i = 0; i < vdev->no_of_vpath; i++)
+ netif_napi_del(&vdev->vpaths[i].ring.napi);
+ }
+}
+
+static int do_vxge_close(struct net_device *dev, int do_io)
+{
+ enum vxge_hw_status status;
+ struct vxgedev *vdev;
+ struct __vxge_hw_device *hldev;
+ int i;
+ u64 val64, vpath_vector;
+ vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
+ dev->name, __func__, __LINE__);
+
+ vdev = netdev_priv(dev);
+ hldev = pci_get_drvdata(vdev->pdev);
+
+ if (unlikely(!is_vxge_card_up(vdev)))
+ return 0;
+
+ /* If vxge_handle_crit_err task is executing,
+ * wait till it completes. */
+ while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
+ msleep(50);
+
+ if (do_io) {
+ /* Put the vpath back in normal mode */
+ vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
+ status = vxge_hw_mgmt_reg_read(vdev->devh,
+ vxge_hw_mgmt_reg_type_mrpcim,
+ 0,
+ (ulong)offsetof(
+ struct vxge_hw_mrpcim_reg,
+ rts_mgr_cbasin_cfg),
+ &val64);
+ if (status == VXGE_HW_OK) {
+ val64 &= ~vpath_vector;
+ status = vxge_hw_mgmt_reg_write(vdev->devh,
+ vxge_hw_mgmt_reg_type_mrpcim,
+ 0,
+ (ulong)offsetof(
+ struct vxge_hw_mrpcim_reg,
+ rts_mgr_cbasin_cfg),
+ val64);
+ }
+
+ /* Remove the function 0 from promiscuous mode */
+ vxge_hw_mgmt_reg_write(vdev->devh,
+ vxge_hw_mgmt_reg_type_mrpcim,
+ 0,
+ (ulong)offsetof(struct vxge_hw_mrpcim_reg,
+ rxmac_authorize_all_addr),
+ 0);
+
+ vxge_hw_mgmt_reg_write(vdev->devh,
+ vxge_hw_mgmt_reg_type_mrpcim,
+ 0,
+ (ulong)offsetof(struct vxge_hw_mrpcim_reg,
+ rxmac_authorize_all_vid),
+ 0);
+
+ smp_wmb();
+ }
+
+ if (vdev->titan1)
+ del_timer_sync(&vdev->vp_lockup_timer);
+
+ del_timer_sync(&vdev->vp_reset_timer);
+
+ if (do_io)
+ vxge_hw_device_wait_receive_idle(hldev);
+
+ clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
+
+ /* Disable napi */
+ if (vdev->config.intr_type != MSI_X)
+ napi_disable(&vdev->napi);
+ else {
+ for (i = 0; i < vdev->no_of_vpath; i++)
+ napi_disable(&vdev->vpaths[i].ring.napi);
+ }
+
+ netif_carrier_off(vdev->ndev);
+ netdev_notice(vdev->ndev, "Link Down\n");
+ netif_tx_stop_all_queues(vdev->ndev);
+
+ /* Note that at this point xmit() is stopped by upper layer */
+ if (do_io)
+ vxge_hw_device_intr_disable(vdev->devh);
+
+ vxge_rem_isr(vdev);
+
+ vxge_napi_del_all(vdev);
+
+ if (do_io)
+ vxge_reset_all_vpaths(vdev);
+
+ vxge_close_vpaths(vdev, 0);
+
+ vxge_debug_entryexit(VXGE_TRACE,
+ "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
+
+ clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
+
+ return 0;
+}
+
+/**
+ * vxge_close
+ * @dev: device pointer.
+ *
+ * This is the stop entry point of the driver. It needs to undo exactly
+ * whatever was done by the open entry point, thus it's usually referred to
+ * as the close function.Among other things this function mainly stops the
+ * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
+ * Return value: '0' on success and an appropriate (-)ve integer as
+ * defined in errno.h file on failure.
+ */
+static int vxge_close(struct net_device *dev)
+{
+ do_vxge_close(dev, 1);
+ return 0;
+}
+
+/**
+ * vxge_change_mtu
+ * @dev: net device pointer.
+ * @new_mtu :the new MTU size for the device.
+ *
+ * A driver entry point to change MTU size for the device. Before changing
+ * the MTU the device must be stopped.
+ */
+static int vxge_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct vxgedev *vdev = netdev_priv(dev);
+
+ vxge_debug_entryexit(vdev->level_trace,
+ "%s:%d", __func__, __LINE__);
+ if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) {
+ vxge_debug_init(vdev->level_err,
+ "%s: mtu size is invalid", dev->name);
+ return -EPERM;
+ }
+
+ /* check if device is down already */
+ if (unlikely(!is_vxge_card_up(vdev))) {
+ /* just store new value, will use later on open() */
+ dev->mtu = new_mtu;
+ vxge_debug_init(vdev->level_err,
+ "%s", "device is down on MTU change");
+ return 0;
+ }
+
+ vxge_debug_init(vdev->level_trace,
+ "trying to apply new MTU %d", new_mtu);
+
+ if (vxge_close(dev))
+ return -EIO;
+
+ dev->mtu = new_mtu;
+ vdev->mtu = new_mtu;
+
+ if (vxge_open(dev))
+ return -EIO;
+
+ vxge_debug_init(vdev->level_trace,
+ "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
+
+ vxge_debug_entryexit(vdev->level_trace,
+ "%s:%d Exiting...", __func__, __LINE__);
+
+ return 0;
+}
+
+/**
+ * vxge_get_stats64
+ * @dev: pointer to the device structure
+ * @stats: pointer to struct rtnl_link_stats64
+ *
+ */
+static struct rtnl_link_stats64 *
+vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
+{
+ struct vxgedev *vdev = netdev_priv(dev);
+ int k;
+
+ /* net_stats already zeroed by caller */
+ for (k = 0; k < vdev->no_of_vpath; k++) {
+ struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats;
+ struct vxge_fifo_stats *txstats = &vdev->vpaths[k].fifo.stats;
+ unsigned int start;
+ u64 packets, bytes, multicast;
+
+ do {
+ start = u64_stats_fetch_begin(&rxstats->syncp);
+
+ packets = rxstats->rx_frms;
+ multicast = rxstats->rx_mcast;
+ bytes = rxstats->rx_bytes;
+ } while (u64_stats_fetch_retry(&rxstats->syncp, start));
+
+ net_stats->rx_packets += packets;
+ net_stats->rx_bytes += bytes;
+ net_stats->multicast += multicast;
+
+ net_stats->rx_errors += rxstats->rx_errors;
+ net_stats->rx_dropped += rxstats->rx_dropped;
+
+ do {
+ start = u64_stats_fetch_begin(&txstats->syncp);
+
+ packets = txstats->tx_frms;
+ bytes = txstats->tx_bytes;
+ } while (u64_stats_fetch_retry(&txstats->syncp, start));
+
+ net_stats->tx_packets += packets;
+ net_stats->tx_bytes += bytes;
+ net_stats->tx_errors += txstats->tx_errors;
+ }
+
+ return net_stats;
+}
+
+static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
+{
+ enum vxge_hw_status status;
+ u64 val64;
+
+ /* Timestamp is passed to the driver via the FCS, therefore we
+ * must disable the FCS stripping by the adapter. Since this is
+ * required for the driver to load (due to a hardware bug),
+ * there is no need to do anything special here.
+ */
+ val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
+ VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
+ VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
+
+ status = vxge_hw_mgmt_reg_write(devh,
+ vxge_hw_mgmt_reg_type_mrpcim,
+ 0,
+ offsetof(struct vxge_hw_mrpcim_reg,
+ xmac_timestamp),
+ val64);
+ vxge_hw_device_flush_io(devh);
+ devh->config.hwts_en = VXGE_HW_HWTS_ENABLE;
+ return status;
+}
+
+static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
+{
+ struct hwtstamp_config config;
+ int i;
+
+ if (copy_from_user(&config, data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ /* Transmit HW Timestamp not supported */
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+ case HWTSTAMP_TX_ON:
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ vdev->rx_hwts = 0;
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE)
+ return -EFAULT;
+
+ vdev->rx_hwts = 1;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ for (i = 0; i < vdev->no_of_vpath; i++)
+ vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
+
+ if (copy_to_user(data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * vxge_ioctl
+ * @dev: Device pointer.
+ * @ifr: An IOCTL specific structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * @cmd: This is used to distinguish between the different commands that
+ * can be passed to the IOCTL functions.
+ *
+ * Entry point for the Ioctl.
+ */
+static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct vxgedev *vdev = netdev_priv(dev);
+ int ret;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * vxge_tx_watchdog
+ * @dev: pointer to net device structure
+ *
+ * Watchdog for transmit side.
+ * This function is triggered if the Tx Queue is stopped
+ * for a pre-defined amount of time when the Interface is still up.
+ */
+static void vxge_tx_watchdog(struct net_device *dev)
+{
+ struct vxgedev *vdev;
+
+ vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
+
+ vdev = netdev_priv(dev);
+
+ vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
+
+ schedule_work(&vdev->reset_task);
+ vxge_debug_entryexit(VXGE_TRACE,
+ "%s:%d Exiting...", __func__, __LINE__);
+}
+
+/**
+ * vxge_vlan_rx_add_vid
+ * @dev: net device pointer.
+ * @vid: vid
+ *
+ * Add the vlan id to the devices vlan id table
+ */
+static void
+vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct vxge_vpath *vpath;
+ int vp_id;
+
+ /* Add these vlan to the vid table */
+ for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
+ vpath = &vdev->vpaths[vp_id];
+ if (!vpath->is_open)
+ continue;
+ vxge_hw_vpath_vid_add(vpath->handle, vid);
+ }
+ set_bit(vid, vdev->active_vlans);
+}
+
+/**
+ * vxge_vlan_rx_add_vid
+ * @dev: net device pointer.
+ * @vid: vid
+ *
+ * Remove the vlan id from the device's vlan id table
+ */
+static void
+vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct vxge_vpath *vpath;
+ int vp_id;
+
+ vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
+
+ /* Delete this vlan from the vid table */
+ for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
+ vpath = &vdev->vpaths[vp_id];
+ if (!vpath->is_open)
+ continue;
+ vxge_hw_vpath_vid_delete(vpath->handle, vid);
+ }
+ vxge_debug_entryexit(VXGE_TRACE,
+ "%s:%d Exiting...", __func__, __LINE__);
+ clear_bit(vid, vdev->active_vlans);
+}
+
+static const struct net_device_ops vxge_netdev_ops = {
+ .ndo_open = vxge_open,
+ .ndo_stop = vxge_close,
+ .ndo_get_stats64 = vxge_get_stats64,
+ .ndo_start_xmit = vxge_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = vxge_set_multicast,
+ .ndo_do_ioctl = vxge_ioctl,
+ .ndo_set_mac_address = vxge_set_mac_addr,
+ .ndo_change_mtu = vxge_change_mtu,
+ .ndo_fix_features = vxge_fix_features,
+ .ndo_set_features = vxge_set_features,
+ .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
+ .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
+ .ndo_tx_timeout = vxge_tx_watchdog,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = vxge_netpoll,
+#endif
+};
+
+static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
+ struct vxge_config *config,
+ int high_dma, int no_of_vpath,
+ struct vxgedev **vdev_out)
+{
+ struct net_device *ndev;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxgedev *vdev;
+ int ret = 0, no_of_queue = 1;
+ u64 stat;
+
+ *vdev_out = NULL;
+ if (config->tx_steering_type)
+ no_of_queue = no_of_vpath;
+
+ ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
+ no_of_queue);
+ if (ndev == NULL) {
+ vxge_debug_init(
+ vxge_hw_device_trace_level_get(hldev),
+ "%s : device allocation failed", __func__);
+ ret = -ENODEV;
+ goto _out0;
+ }
+
+ vxge_debug_entryexit(
+ vxge_hw_device_trace_level_get(hldev),
+ "%s: %s:%d Entering...",
+ ndev->name, __func__, __LINE__);
+
+ vdev = netdev_priv(ndev);
+ memset(vdev, 0, sizeof(struct vxgedev));
+
+ vdev->ndev = ndev;
+ vdev->devh = hldev;
+ vdev->pdev = hldev->pdev;
+ memcpy(&vdev->config, config, sizeof(struct vxge_config));
+ vdev->rx_hwts = 0;
+ vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
+
+ SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
+
+ ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_HW_VLAN_TX;
+ if (vdev->config.rth_steering != NO_STEERING)
+ ndev->hw_features |= NETIF_F_RXHASH;
+
+ ndev->features |= ndev->hw_features |
+ NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+
+ /* Driver entry points */
+ ndev->irq = vdev->pdev->irq;
+ ndev->base_addr = (unsigned long) hldev->bar0;
+
+ ndev->netdev_ops = &vxge_netdev_ops;
+
+ ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
+ INIT_WORK(&vdev->reset_task, vxge_reset);
+
+ vxge_initialize_ethtool_ops(ndev);
+
+ /* Allocate memory for vpath */
+ vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
+ no_of_vpath, GFP_KERNEL);
+ if (!vdev->vpaths) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: vpath memory allocation failed",
+ vdev->ndev->name);
+ ret = -ENOMEM;
+ goto _out1;
+ }
+
+ vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
+ "%s : checksuming enabled", __func__);
+
+ if (high_dma) {
+ ndev->features |= NETIF_F_HIGHDMA;
+ vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
+ "%s : using High DMA", __func__);
+ }
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
+ "%s: %s : device registration failed!",
+ ndev->name, __func__);
+ goto _out2;
+ }
+
+ /* Set the factory defined MAC address initially */
+ ndev->addr_len = ETH_ALEN;
+
+ /* Make Link state as off at this point, when the Link change
+ * interrupt comes the state will be automatically changed to
+ * the right state.
+ */
+ netif_carrier_off(ndev);
+
+ vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
+ "%s: Ethernet device registered",
+ ndev->name);
+
+ hldev->ndev = ndev;
+ *vdev_out = vdev;
+
+ /* Resetting the Device stats */
+ status = vxge_hw_mrpcim_stats_access(
+ hldev,
+ VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
+ 0,
+ 0,
+ &stat);
+
+ if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION)
+ vxge_debug_init(
+ vxge_hw_device_trace_level_get(hldev),
+ "%s: device stats clear returns"
+ "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name);
+
+ vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
+ "%s: %s:%d Exiting...",
+ ndev->name, __func__, __LINE__);
+
+ return ret;
+_out2:
+ kfree(vdev->vpaths);
+_out1:
+ free_netdev(ndev);
+_out0:
+ return ret;
+}
+
+/*
+ * vxge_device_unregister
+ *
+ * This function will unregister and free network device
+ */
+static void vxge_device_unregister(struct __vxge_hw_device *hldev)
+{
+ struct vxgedev *vdev;
+ struct net_device *dev;
+ char buf[IFNAMSIZ];
+
+ dev = hldev->ndev;
+ vdev = netdev_priv(dev);
+
+ vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
+ __func__, __LINE__);
+
+ strncpy(buf, dev->name, IFNAMSIZ);
+
+ flush_work_sync(&vdev->reset_task);
+
+ /* in 2.6 will call stop() if device is up */
+ unregister_netdev(dev);
+
+ kfree(vdev->vpaths);
+
+ /* we are safe to free it now */
+ free_netdev(dev);
+
+ vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
+ buf);
+ vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
+ __func__, __LINE__);
+}
+
+/*
+ * vxge_callback_crit_err
+ *
+ * This function is called by the alarm handler in interrupt context.
+ * Driver must analyze it based on the event type.
+ */
+static void
+vxge_callback_crit_err(struct __vxge_hw_device *hldev,
+ enum vxge_hw_event type, u64 vp_id)
+{
+ struct net_device *dev = hldev->ndev;
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct vxge_vpath *vpath = NULL;
+ int vpath_idx;
+
+ vxge_debug_entryexit(vdev->level_trace,
+ "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
+
+ /* Note: This event type should be used for device wide
+ * indications only - Serious errors, Slot freeze and critical errors
+ */
+ vdev->cric_err_event = type;
+
+ for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
+ vpath = &vdev->vpaths[vpath_idx];
+ if (vpath->device_id == vp_id)
+ break;
+ }
+
+ if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
+ if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: Slot is frozen", vdev->ndev->name);
+ } else if (type == VXGE_HW_EVENT_SERR) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: Encountered Serious Error",
+ vdev->ndev->name);
+ } else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
+ vxge_debug_init(VXGE_ERR,
+ "%s: Encountered Critical Error",
+ vdev->ndev->name);
+ }
+
+ if ((type == VXGE_HW_EVENT_SERR) ||
+ (type == VXGE_HW_EVENT_SLOT_FREEZE)) {
+ if (unlikely(vdev->exec_mode))
+ clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
+ } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
+ vxge_hw_device_mask_all(hldev);
+ if (unlikely(vdev->exec_mode))
+ clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
+ } else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
+ (type == VXGE_HW_EVENT_VPATH_ERR)) {
+
+ if (unlikely(vdev->exec_mode))
+ clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
+ else {
+ /* check if this vpath is already set for reset */
+ if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
+
+ /* disable interrupts for this vpath */
+ vxge_vpath_intr_disable(vdev, vpath_idx);
+
+ /* stop the queue for this vpath */
+ netif_tx_stop_queue(vpath->fifo.txq);
+ }
+ }
+ }
+
+ vxge_debug_entryexit(vdev->level_trace,
+ "%s: %s:%d Exiting...",
+ vdev->ndev->name, __func__, __LINE__);
+}
+
+static void verify_bandwidth(void)
+{
+ int i, band_width, total = 0, equal_priority = 0;
+
+ /* 1. If user enters 0 for some fifo, give equal priority to all */
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+ if (bw_percentage[i] == 0) {
+ equal_priority = 1;
+ break;
+ }
+ }
+
+ if (!equal_priority) {
+ /* 2. If sum exceeds 100, give equal priority to all */
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+ if (bw_percentage[i] == 0xFF)
+ break;
+
+ total += bw_percentage[i];
+ if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
+ equal_priority = 1;
+ break;
+ }
+ }
+ }
+
+ if (!equal_priority) {
+ /* Is all the bandwidth consumed? */
+ if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
+ if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
+ /* Split rest of bw equally among next VPs*/
+ band_width =
+ (VXGE_HW_VPATH_BANDWIDTH_MAX - total) /
+ (VXGE_HW_MAX_VIRTUAL_PATHS - i);
+ if (band_width < 2) /* min of 2% */
+ equal_priority = 1;
+ else {
+ for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
+ i++)
+ bw_percentage[i] =
+ band_width;
+ }
+ }
+ } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
+ equal_priority = 1;
+ }
+
+ if (equal_priority) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: Assigning equal bandwidth to all the vpaths",
+ VXGE_DRIVER_NAME);
+ bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
+ VXGE_HW_MAX_VIRTUAL_PATHS;
+ for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
+ bw_percentage[i] = bw_percentage[0];
+ }
+}
+
+/*
+ * Vpath configuration
+ */
+static int __devinit vxge_config_vpaths(
+ struct vxge_hw_device_config *device_config,
+ u64 vpath_mask, struct vxge_config *config_param)
+{
+ int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
+ u32 txdl_size, txdl_per_memblock;
+
+ temp = driver_config->vpath_per_dev;
+ if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
+ (max_config_dev == VXGE_MAX_CONFIG_DEV)) {
+ /* No more CPU. Return vpath number as zero.*/
+ if (driver_config->g_no_cpus == -1)
+ return 0;
+
+ if (!driver_config->g_no_cpus)
+ driver_config->g_no_cpus = num_online_cpus();
+
+ driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
+ if (!driver_config->vpath_per_dev)
+ driver_config->vpath_per_dev = 1;
+
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
+ if (!vxge_bVALn(vpath_mask, i, 1))
+ continue;
+ else
+ default_no_vpath++;
+ if (default_no_vpath < driver_config->vpath_per_dev)
+ driver_config->vpath_per_dev = default_no_vpath;
+
+ driver_config->g_no_cpus = driver_config->g_no_cpus -
+ (driver_config->vpath_per_dev * 2);
+ if (driver_config->g_no_cpus <= 0)
+ driver_config->g_no_cpus = -1;
+ }
+
+ if (driver_config->vpath_per_dev == 1) {
+ vxge_debug_ll_config(VXGE_TRACE,
+ "%s: Disable tx and rx steering, "
+ "as single vpath is configured", VXGE_DRIVER_NAME);
+ config_param->rth_steering = NO_STEERING;
+ config_param->tx_steering_type = NO_STEERING;
+ device_config->rth_en = 0;
+ }
+
+ /* configure bandwidth */
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
+ device_config->vp_config[i].min_bandwidth = bw_percentage[i];
+
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+ device_config->vp_config[i].vp_id = i;
+ device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
+ if (no_of_vpaths < driver_config->vpath_per_dev) {
+ if (!vxge_bVALn(vpath_mask, i, 1)) {
+ vxge_debug_ll_config(VXGE_TRACE,
+ "%s: vpath: %d is not available",
+ VXGE_DRIVER_NAME, i);
+ continue;
+ } else {
+ vxge_debug_ll_config(VXGE_TRACE,
+ "%s: vpath: %d available",
+ VXGE_DRIVER_NAME, i);
+ no_of_vpaths++;
+ }
+ } else {
+ vxge_debug_ll_config(VXGE_TRACE,
+ "%s: vpath: %d is not configured, "
+ "max_config_vpath exceeded",
+ VXGE_DRIVER_NAME, i);
+ break;
+ }
+
+ /* Configure Tx fifo's */
+ device_config->vp_config[i].fifo.enable =
+ VXGE_HW_FIFO_ENABLE;
+ device_config->vp_config[i].fifo.max_frags =
+ MAX_SKB_FRAGS + 1;
+ device_config->vp_config[i].fifo.memblock_size =
+ VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
+
+ txdl_size = device_config->vp_config[i].fifo.max_frags *
+ sizeof(struct vxge_hw_fifo_txd);
+ txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
+
+ device_config->vp_config[i].fifo.fifo_blocks =
+ ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
+
+ device_config->vp_config[i].fifo.intr =
+ VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
+
+ /* Configure tti properties */
+ device_config->vp_config[i].tti.intr_enable =
+ VXGE_HW_TIM_INTR_ENABLE;
+
+ device_config->vp_config[i].tti.btimer_val =
+ (VXGE_TTI_BTIMER_VAL * 1000) / 272;
+
+ device_config->vp_config[i].tti.timer_ac_en =
+ VXGE_HW_TIM_TIMER_AC_ENABLE;
+
+ /* For msi-x with napi (each vector has a handler of its own) -
+ * Set CI to OFF for all vpaths
+ */
+ device_config->vp_config[i].tti.timer_ci_en =
+ VXGE_HW_TIM_TIMER_CI_DISABLE;
+
+ device_config->vp_config[i].tti.timer_ri_en =
+ VXGE_HW_TIM_TIMER_RI_DISABLE;
+
+ device_config->vp_config[i].tti.util_sel =
+ VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
+
+ device_config->vp_config[i].tti.ltimer_val =
+ (VXGE_TTI_LTIMER_VAL * 1000) / 272;
+
+ device_config->vp_config[i].tti.rtimer_val =
+ (VXGE_TTI_RTIMER_VAL * 1000) / 272;
+
+ device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
+ device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
+ device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
+ device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
+ device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
+ device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
+ device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
+
+ /* Configure Rx rings */
+ device_config->vp_config[i].ring.enable =
+ VXGE_HW_RING_ENABLE;
+
+ device_config->vp_config[i].ring.ring_blocks =
+ VXGE_HW_DEF_RING_BLOCKS;
+
+ device_config->vp_config[i].ring.buffer_mode =
+ VXGE_HW_RING_RXD_BUFFER_MODE_1;
+
+ device_config->vp_config[i].ring.rxds_limit =
+ VXGE_HW_DEF_RING_RXDS_LIMIT;
+
+ device_config->vp_config[i].ring.scatter_mode =
+ VXGE_HW_RING_SCATTER_MODE_A;
+
+ /* Configure rti properties */
+ device_config->vp_config[i].rti.intr_enable =
+ VXGE_HW_TIM_INTR_ENABLE;
+
+ device_config->vp_config[i].rti.btimer_val =
+ (VXGE_RTI_BTIMER_VAL * 1000)/272;
+
+ device_config->vp_config[i].rti.timer_ac_en =
+ VXGE_HW_TIM_TIMER_AC_ENABLE;
+
+ device_config->vp_config[i].rti.timer_ci_en =
+ VXGE_HW_TIM_TIMER_CI_DISABLE;
+
+ device_config->vp_config[i].rti.timer_ri_en =
+ VXGE_HW_TIM_TIMER_RI_DISABLE;
+
+ device_config->vp_config[i].rti.util_sel =
+ VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
+
+ device_config->vp_config[i].rti.urange_a =
+ RTI_RX_URANGE_A;
+ device_config->vp_config[i].rti.urange_b =
+ RTI_RX_URANGE_B;
+ device_config->vp_config[i].rti.urange_c =
+ RTI_RX_URANGE_C;
+ device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
+ device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
+ device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
+ device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
+
+ device_config->vp_config[i].rti.rtimer_val =
+ (VXGE_RTI_RTIMER_VAL * 1000) / 272;
+
+ device_config->vp_config[i].rti.ltimer_val =
+ (VXGE_RTI_LTIMER_VAL * 1000) / 272;
+
+ device_config->vp_config[i].rpa_strip_vlan_tag =
+ vlan_tag_strip;
+ }
+
+ driver_config->vpath_per_dev = temp;
+ return no_of_vpaths;
+}
+
+/* initialize device configuratrions */
+static void __devinit vxge_device_config_init(
+ struct vxge_hw_device_config *device_config,
+ int *intr_type)
+{
+ /* Used for CQRQ/SRQ. */
+ device_config->dma_blockpool_initial =
+ VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
+
+ device_config->dma_blockpool_max =
+ VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
+
+ if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
+ max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
+
+#ifndef CONFIG_PCI_MSI
+ vxge_debug_init(VXGE_ERR,
+ "%s: This Kernel does not support "
+ "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
+ *intr_type = INTA;
+#endif
+
+ /* Configure whether MSI-X or IRQL. */
+ switch (*intr_type) {
+ case INTA:
+ device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
+ break;
+
+ case MSI_X:
+ device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
+ break;
+ }
+
+ /* Timer period between device poll */
+ device_config->device_poll_millis = VXGE_TIMER_DELAY;
+
+ /* Configure mac based steering. */
+ device_config->rts_mac_en = addr_learn_en;
+
+ /* Configure Vpaths */
+ device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
+
+ vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
+ __func__);
+ vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
+ device_config->intr_mode);
+ vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
+ device_config->device_poll_millis);
+ vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
+ device_config->rth_en);
+ vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
+ device_config->rth_it_type);
+}
+
+static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
+{
+ int i;
+
+ vxge_debug_init(VXGE_TRACE,
+ "%s: %d Vpath(s) opened",
+ vdev->ndev->name, vdev->no_of_vpath);
+
+ switch (vdev->config.intr_type) {
+ case INTA:
+ vxge_debug_init(VXGE_TRACE,
+ "%s: Interrupt type INTA", vdev->ndev->name);
+ break;
+
+ case MSI_X:
+ vxge_debug_init(VXGE_TRACE,
+ "%s: Interrupt type MSI-X", vdev->ndev->name);
+ break;
+ }
+
+ if (vdev->config.rth_steering) {
+ vxge_debug_init(VXGE_TRACE,
+ "%s: RTH steering enabled for TCP_IPV4",
+ vdev->ndev->name);
+ } else {
+ vxge_debug_init(VXGE_TRACE,
+ "%s: RTH steering disabled", vdev->ndev->name);
+ }
+
+ switch (vdev->config.tx_steering_type) {
+ case NO_STEERING:
+ vxge_debug_init(VXGE_TRACE,
+ "%s: Tx steering disabled", vdev->ndev->name);
+ break;
+ case TX_PRIORITY_STEERING:
+ vxge_debug_init(VXGE_TRACE,
+ "%s: Unsupported tx steering option",
+ vdev->ndev->name);
+ vxge_debug_init(VXGE_TRACE,
+ "%s: Tx steering disabled", vdev->ndev->name);
+ vdev->config.tx_steering_type = 0;
+ break;
+ case TX_VLAN_STEERING:
+ vxge_debug_init(VXGE_TRACE,
+ "%s: Unsupported tx steering option",
+ vdev->ndev->name);
+ vxge_debug_init(VXGE_TRACE,
+ "%s: Tx steering disabled", vdev->ndev->name);
+ vdev->config.tx_steering_type = 0;
+ break;
+ case TX_MULTIQ_STEERING:
+ vxge_debug_init(VXGE_TRACE,
+ "%s: Tx multiqueue steering enabled",
+ vdev->ndev->name);
+ break;
+ case TX_PORT_STEERING:
+ vxge_debug_init(VXGE_TRACE,
+ "%s: Tx port steering enabled",
+ vdev->ndev->name);
+ break;
+ default:
+ vxge_debug_init(VXGE_ERR,
+ "%s: Unsupported tx steering type",
+ vdev->ndev->name);
+ vxge_debug_init(VXGE_TRACE,
+ "%s: Tx steering disabled", vdev->ndev->name);
+ vdev->config.tx_steering_type = 0;
+ }
+
+ if (vdev->config.addr_learn_en)
+ vxge_debug_init(VXGE_TRACE,
+ "%s: MAC Address learning enabled", vdev->ndev->name);
+
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+ if (!vxge_bVALn(vpath_mask, i, 1))
+ continue;
+ vxge_debug_ll_config(VXGE_TRACE,
+ "%s: MTU size - %d", vdev->ndev->name,
+ ((struct __vxge_hw_device *)(vdev->devh))->
+ config.vp_config[i].mtu);
+ vxge_debug_init(VXGE_TRACE,
+ "%s: VLAN tag stripping %s", vdev->ndev->name,
+ ((struct __vxge_hw_device *)(vdev->devh))->
+ config.vp_config[i].rpa_strip_vlan_tag
+ ? "Enabled" : "Disabled");
+ vxge_debug_ll_config(VXGE_TRACE,
+ "%s: Max frags : %d", vdev->ndev->name,
+ ((struct __vxge_hw_device *)(vdev->devh))->
+ config.vp_config[i].fifo.max_frags);
+ break;
+ }
+}
+
+#ifdef CONFIG_PM
+/**
+ * vxge_pm_suspend - vxge power management suspend entry point
+ *
+ */
+static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ return -ENOSYS;
+}
+/**
+ * vxge_pm_resume - vxge power management resume entry point
+ *
+ */
+static int vxge_pm_resume(struct pci_dev *pdev)
+{
+ return -ENOSYS;
+}
+
+#endif
+
+/**
+ * vxge_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
+ struct net_device *netdev = hldev->ndev;
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev)) {
+ /* Bring down the card, while avoiding PCI I/O */
+ do_vxge_close(netdev, 0);
+ }
+
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * vxge_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ * At this point, the card has exprienced a hard reset,
+ * followed by fixups by BIOS, and has its config space
+ * set up identically to what it was at cold boot.
+ */
+static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
+{
+ struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
+ struct net_device *netdev = hldev->ndev;
+
+ struct vxgedev *vdev = netdev_priv(netdev);
+
+ if (pci_enable_device(pdev)) {
+ netdev_err(netdev, "Cannot re-enable device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * vxge_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells
+ * us that its OK to resume normal operation.
+ */
+static void vxge_io_resume(struct pci_dev *pdev)
+{
+ struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
+ struct net_device *netdev = hldev->ndev;
+
+ if (netif_running(netdev)) {
+ if (vxge_open(netdev)) {
+ netdev_err(netdev,
+ "Can't bring device back up after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+}
+
+static inline u32 vxge_get_num_vfs(u64 function_mode)
+{
+ u32 num_functions = 0;
+
+ switch (function_mode) {
+ case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
+ case VXGE_HW_FUNCTION_MODE_SRIOV_8:
+ num_functions = 8;
+ break;
+ case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
+ num_functions = 1;
+ break;
+ case VXGE_HW_FUNCTION_MODE_SRIOV:
+ case VXGE_HW_FUNCTION_MODE_MRIOV:
+ case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
+ num_functions = 17;
+ break;
+ case VXGE_HW_FUNCTION_MODE_SRIOV_4:
+ num_functions = 4;
+ break;
+ case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
+ num_functions = 2;
+ break;
+ case VXGE_HW_FUNCTION_MODE_MRIOV_8:
+ num_functions = 8; /* TODO */
+ break;
+ }
+ return num_functions;
+}
+
+int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
+{
+ struct __vxge_hw_device *hldev = vdev->devh;
+ u32 maj, min, bld, cmaj, cmin, cbld;
+ enum vxge_hw_status status;
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
+ if (ret) {
+ vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
+ VXGE_DRIVER_NAME, fw_name);
+ goto out;
+ }
+
+ /* Load the new firmware onto the adapter */
+ status = vxge_update_fw_image(hldev, fw->data, fw->size);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: FW image download to adapter failed '%s'.",
+ VXGE_DRIVER_NAME, fw_name);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Read the version of the new firmware */
+ status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: Upgrade read version failed '%s'.",
+ VXGE_DRIVER_NAME, fw_name);
+ ret = -EIO;
+ goto out;
+ }
+
+ cmaj = vdev->config.device_hw_info.fw_version.major;
+ cmin = vdev->config.device_hw_info.fw_version.minor;
+ cbld = vdev->config.device_hw_info.fw_version.build;
+ /* It's possible the version in /lib/firmware is not the latest version.
+ * If so, we could get into a loop of trying to upgrade to the latest
+ * and flashing the older version.
+ */
+ if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
+ !override) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
+ maj, min, bld);
+
+ /* Flash the adapter with the new firmware */
+ status = vxge_hw_flash_fw(hldev);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
+ VXGE_DRIVER_NAME, fw_name);
+ ret = -EIO;
+ goto out;
+ }
+
+ printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
+ "hard reset before using, thus requiring a system reboot or a "
+ "hotplug event.\n");
+
+out:
+ release_firmware(fw);
+ return ret;
+}
+
+static int vxge_probe_fw_update(struct vxgedev *vdev)
+{
+ u32 maj, min, bld;
+ int ret, gpxe = 0;
+ char *fw_name;
+
+ maj = vdev->config.device_hw_info.fw_version.major;
+ min = vdev->config.device_hw_info.fw_version.minor;
+ bld = vdev->config.device_hw_info.fw_version.build;
+
+ if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
+ return 0;
+
+ /* Ignore the build number when determining if the current firmware is
+ * "too new" to load the driver
+ */
+ if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
+ vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
+ "version, unable to load driver\n",
+ VXGE_DRIVER_NAME);
+ return -EINVAL;
+ }
+
+ /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
+ * work with this driver.
+ */
+ if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
+ vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
+ "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
+ return -EINVAL;
+ }
+
+ /* If file not specified, determine gPXE or not */
+ if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
+ int i;
+ for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
+ if (vdev->devh->eprom_versions[i]) {
+ gpxe = 1;
+ break;
+ }
+ }
+ if (gpxe)
+ fw_name = "vxge/X3fw-pxe.ncf";
+ else
+ fw_name = "vxge/X3fw.ncf";
+
+ ret = vxge_fw_upgrade(vdev, fw_name, 0);
+ /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
+ * probe, so ignore them
+ */
+ if (ret != -EINVAL && ret != -ENOENT)
+ return -EIO;
+ else
+ ret = 0;
+
+ if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
+ VXGE_FW_VER(maj, min, 0)) {
+ vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
+ " be used with this driver.\n"
+ "Please get the latest version from "
+ "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
+ VXGE_DRIVER_NAME, maj, min, bld);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int __devinit is_sriov_initialized(struct pci_dev *pdev)
+{
+ int pos;
+ u16 ctrl;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
+ if (ctrl & PCI_SRIOV_CTRL_VFE)
+ return 1;
+ }
+ return 0;
+}
+
+static const struct vxge_hw_uld_cbs vxge_callbacks = {
+ .link_up = vxge_callback_link_up,
+ .link_down = vxge_callback_link_down,
+ .crit_err = vxge_callback_crit_err,
+};
+
+/**
+ * vxge_probe
+ * @pdev : structure containing the PCI related information of the device.
+ * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
+ * Description:
+ * This function is called when a new PCI device gets detected and initializes
+ * it.
+ * Return value:
+ * returns 0 on success and negative on failure.
+ *
+ */
+static int __devinit
+vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
+{
+ struct __vxge_hw_device *hldev;
+ enum vxge_hw_status status;
+ int ret;
+ int high_dma = 0;
+ u64 vpath_mask = 0;
+ struct vxgedev *vdev;
+ struct vxge_config *ll_config = NULL;
+ struct vxge_hw_device_config *device_config = NULL;
+ struct vxge_hw_device_attr attr;
+ int i, j, no_of_vpath = 0, max_vpath_supported = 0;
+ u8 *macaddr;
+ struct vxge_mac_addrs *entry;
+ static int bus = -1, device = -1;
+ u32 host_type;
+ u8 new_device = 0;
+ enum vxge_hw_status is_privileged;
+ u32 function_mode;
+ u32 num_vfs = 0;
+
+ vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
+ attr.pdev = pdev;
+
+ /* In SRIOV-17 mode, functions of the same adapter
+ * can be deployed on different buses
+ */
+ if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
+ !pdev->is_virtfn)
+ new_device = 1;
+
+ bus = pdev->bus->number;
+ device = PCI_SLOT(pdev->devfn);
+
+ if (new_device) {
+ if (driver_config->config_dev_cnt &&
+ (driver_config->config_dev_cnt !=
+ driver_config->total_dev_cnt))
+ vxge_debug_init(VXGE_ERR,
+ "%s: Configured %d of %d devices",
+ VXGE_DRIVER_NAME,
+ driver_config->config_dev_cnt,
+ driver_config->total_dev_cnt);
+ driver_config->config_dev_cnt = 0;
+ driver_config->total_dev_cnt = 0;
+ }
+
+ /* Now making the CPU based no of vpath calculation
+ * applicable for individual functions as well.
+ */
+ driver_config->g_no_cpus = 0;
+ driver_config->vpath_per_dev = max_config_vpath;
+
+ driver_config->total_dev_cnt++;
+ if (++driver_config->config_dev_cnt > max_config_dev) {
+ ret = 0;
+ goto _exit0;
+ }
+
+ device_config = kzalloc(sizeof(struct vxge_hw_device_config),
+ GFP_KERNEL);
+ if (!device_config) {
+ ret = -ENOMEM;
+ vxge_debug_init(VXGE_ERR,
+ "device_config : malloc failed %s %d",
+ __FILE__, __LINE__);
+ goto _exit0;
+ }
+
+ ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
+ if (!ll_config) {
+ ret = -ENOMEM;
+ vxge_debug_init(VXGE_ERR,
+ "device_config : malloc failed %s %d",
+ __FILE__, __LINE__);
+ goto _exit0;
+ }
+ ll_config->tx_steering_type = TX_MULTIQ_STEERING;
+ ll_config->intr_type = MSI_X;
+ ll_config->napi_weight = NEW_NAPI_WEIGHT;
+ ll_config->rth_steering = RTH_STEERING;
+
+ /* get the default configuration parameters */
+ vxge_hw_device_config_default_get(device_config);
+
+ /* initialize configuration parameters */
+ vxge_device_config_init(device_config, &ll_config->intr_type);
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ vxge_debug_init(VXGE_ERR,
+ "%s : can not enable PCI device", __func__);
+ goto _exit0;
+ }
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ vxge_debug_ll_config(VXGE_TRACE,
+ "%s : using 64bit DMA", __func__);
+
+ high_dma = 1;
+
+ if (pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(64))) {
+ vxge_debug_init(VXGE_ERR,
+ "%s : unable to obtain 64bit DMA for "
+ "consistent allocations", __func__);
+ ret = -ENOMEM;
+ goto _exit1;
+ }
+ } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ vxge_debug_ll_config(VXGE_TRACE,
+ "%s : using 32bit DMA", __func__);
+ } else {
+ ret = -ENOMEM;
+ goto _exit1;
+ }
+
+ ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
+ if (ret) {
+ vxge_debug_init(VXGE_ERR,
+ "%s : request regions failed", __func__);
+ goto _exit1;
+ }
+
+ pci_set_master(pdev);
+
+ attr.bar0 = pci_ioremap_bar(pdev, 0);
+ if (!attr.bar0) {
+ vxge_debug_init(VXGE_ERR,
+ "%s : cannot remap io memory bar0", __func__);
+ ret = -ENODEV;
+ goto _exit2;
+ }
+ vxge_debug_ll_config(VXGE_TRACE,
+ "pci ioremap bar0: %p:0x%llx",
+ attr.bar0,
+ (unsigned long long)pci_resource_start(pdev, 0));
+
+ status = vxge_hw_device_hw_info_get(attr.bar0,
+ &ll_config->device_hw_info);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: Reading of hardware info failed."
+ "Please try upgrading the firmware.", VXGE_DRIVER_NAME);
+ ret = -EINVAL;
+ goto _exit3;
+ }
+
+ vpath_mask = ll_config->device_hw_info.vpath_mask;
+ if (vpath_mask == 0) {
+ vxge_debug_ll_config(VXGE_TRACE,
+ "%s: No vpaths available in device", VXGE_DRIVER_NAME);
+ ret = -EINVAL;
+ goto _exit3;
+ }
+
+ vxge_debug_ll_config(VXGE_TRACE,
+ "%s:%d Vpath mask = %llx", __func__, __LINE__,
+ (unsigned long long)vpath_mask);
+
+ function_mode = ll_config->device_hw_info.function_mode;
+ host_type = ll_config->device_hw_info.host_type;
+ is_privileged = __vxge_hw_device_is_privilaged(host_type,
+ ll_config->device_hw_info.func_id);
+
+ /* Check how many vpaths are available */
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+ if (!((vpath_mask) & vxge_mBIT(i)))
+ continue;
+ max_vpath_supported++;
+ }
+
+ if (new_device)
+ num_vfs = vxge_get_num_vfs(function_mode) - 1;
+
+ /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
+ if (is_sriov(function_mode) && !is_sriov_initialized(pdev) &&
+ (ll_config->intr_type != INTA)) {
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret)
+ vxge_debug_ll_config(VXGE_ERR,
+ "Failed in enabling SRIOV mode: %d\n", ret);
+ /* No need to fail out, as an error here is non-fatal */
+ }
+
+ /*
+ * Configure vpaths and get driver configured number of vpaths
+ * which is less than or equal to the maximum vpaths per function.
+ */
+ no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
+ if (!no_of_vpath) {
+ vxge_debug_ll_config(VXGE_ERR,
+ "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
+ ret = 0;
+ goto _exit3;
+ }
+
+ /* Setting driver callbacks */
+ attr.uld_callbacks = &vxge_callbacks;
+
+ status = vxge_hw_device_initialize(&hldev, &attr, device_config);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "Failed to initialize device (%d)", status);
+ ret = -EINVAL;
+ goto _exit3;
+ }
+
+ if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
+ ll_config->device_hw_info.fw_version.minor,
+ ll_config->device_hw_info.fw_version.build) >=
+ VXGE_EPROM_FW_VER) {
+ struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
+
+ status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
+ VXGE_DRIVER_NAME);
+ /* This is a non-fatal error, continue */
+ }
+
+ for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
+ hldev->eprom_versions[i] = img[i].version;
+ if (!img[i].is_valid)
+ break;
+ vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
+ "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
+ VXGE_EPROM_IMG_MAJOR(img[i].version),
+ VXGE_EPROM_IMG_MINOR(img[i].version),
+ VXGE_EPROM_IMG_FIX(img[i].version),
+ VXGE_EPROM_IMG_BUILD(img[i].version));
+ }
+ }
+
+ /* if FCS stripping is not disabled in MAC fail driver load */
+ status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
+ " failing driver load", VXGE_DRIVER_NAME);
+ ret = -EINVAL;
+ goto _exit4;
+ }
+
+ /* Always enable HWTS. This will always cause the FCS to be invalid,
+ * due to the fact that HWTS is using the FCS as the location of the
+ * timestamp. The HW FCS checking will still correctly determine if
+ * there is a valid checksum, and the FCS is being removed by the driver
+ * anyway. So no fucntionality is being lost. Since it is always
+ * enabled, we now simply use the ioctl call to set whether or not the
+ * driver should be paying attention to the HWTS.
+ */
+ if (is_privileged == VXGE_HW_OK) {
+ status = vxge_timestamp_config(hldev);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: HWTS enable failed",
+ VXGE_DRIVER_NAME);
+ ret = -EFAULT;
+ goto _exit4;
+ }
+ }
+
+ vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
+
+ /* set private device info */
+ pci_set_drvdata(pdev, hldev);
+
+ ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
+ ll_config->addr_learn_en = addr_learn_en;
+ ll_config->rth_algorithm = RTH_ALG_JENKINS;
+ ll_config->rth_hash_type_tcpipv4 = 1;
+ ll_config->rth_hash_type_ipv4 = 0;
+ ll_config->rth_hash_type_tcpipv6 = 0;
+ ll_config->rth_hash_type_ipv6 = 0;
+ ll_config->rth_hash_type_tcpipv6ex = 0;
+ ll_config->rth_hash_type_ipv6ex = 0;
+ ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
+ ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
+ ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
+
+ ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
+ &vdev);
+ if (ret) {
+ ret = -EINVAL;
+ goto _exit4;
+ }
+
+ ret = vxge_probe_fw_update(vdev);
+ if (ret)
+ goto _exit5;
+
+ vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
+ VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
+ vxge_hw_device_trace_level_get(hldev));
+
+ /* set private HW device info */
+ vdev->mtu = VXGE_HW_DEFAULT_MTU;
+ vdev->bar0 = attr.bar0;
+ vdev->max_vpath_supported = max_vpath_supported;
+ vdev->no_of_vpath = no_of_vpath;
+
+ /* Virtual Path count */
+ for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+ if (!vxge_bVALn(vpath_mask, i, 1))
+ continue;
+ if (j >= vdev->no_of_vpath)
+ break;
+
+ vdev->vpaths[j].is_configured = 1;
+ vdev->vpaths[j].device_id = i;
+ vdev->vpaths[j].ring.driver_id = j;
+ vdev->vpaths[j].vdev = vdev;
+ vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
+ memcpy((u8 *)vdev->vpaths[j].macaddr,
+ ll_config->device_hw_info.mac_addrs[i],
+ ETH_ALEN);
+
+ /* Initialize the mac address list header */
+ INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
+
+ vdev->vpaths[j].mac_addr_cnt = 0;
+ vdev->vpaths[j].mcast_addr_cnt = 0;
+ j++;
+ }
+ vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
+ vdev->max_config_port = max_config_port;
+
+ vdev->vlan_tag_strip = vlan_tag_strip;
+
+ /* map the hashing selector table to the configured vpaths */
+ for (i = 0; i < vdev->no_of_vpath; i++)
+ vdev->vpath_selector[i] = vpath_selector[i];
+
+ macaddr = (u8 *)vdev->vpaths[0].macaddr;
+
+ ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
+ ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
+ ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
+
+ vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
+ vdev->ndev->name, ll_config->device_hw_info.serial_number);
+
+ vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
+ vdev->ndev->name, ll_config->device_hw_info.part_number);
+
+ vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
+ vdev->ndev->name, ll_config->device_hw_info.product_desc);
+
+ vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
+ vdev->ndev->name, macaddr);
+
+ vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
+ vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
+
+ vxge_debug_init(VXGE_TRACE,
+ "%s: Firmware version : %s Date : %s", vdev->ndev->name,
+ ll_config->device_hw_info.fw_version.version,
+ ll_config->device_hw_info.fw_date.date);
+
+ if (new_device) {
+ switch (ll_config->device_hw_info.function_mode) {
+ case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
+ vxge_debug_init(VXGE_TRACE,
+ "%s: Single Function Mode Enabled", vdev->ndev->name);
+ break;
+ case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
+ vxge_debug_init(VXGE_TRACE,
+ "%s: Multi Function Mode Enabled", vdev->ndev->name);
+ break;
+ case VXGE_HW_FUNCTION_MODE_SRIOV:
+ vxge_debug_init(VXGE_TRACE,
+ "%s: Single Root IOV Mode Enabled", vdev->ndev->name);
+ break;
+ case VXGE_HW_FUNCTION_MODE_MRIOV:
+ vxge_debug_init(VXGE_TRACE,
+ "%s: Multi Root IOV Mode Enabled", vdev->ndev->name);
+ break;
+ }
+ }
+
+ vxge_print_parm(vdev, vpath_mask);
+
+ /* Store the fw version for ethttool option */
+ strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
+ memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
+ memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
+
+ /* Copy the station mac address to the list */
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
+ if (NULL == entry) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: mac_addr_list : memory allocation failed",
+ vdev->ndev->name);
+ ret = -EPERM;
+ goto _exit6;
+ }
+ macaddr = (u8 *)&entry->macaddr;
+ memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
+ list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
+ vdev->vpaths[i].mac_addr_cnt = 1;
+ }
+
+ kfree(device_config);
+
+ /*
+ * INTA is shared in multi-function mode. This is unlike the INTA
+ * implementation in MR mode, where each VH has its own INTA message.
+ * - INTA is masked (disabled) as long as at least one function sets
+ * its TITAN_MASK_ALL_INT.ALARM bit.
+ * - INTA is unmasked (enabled) when all enabled functions have cleared
+ * their own TITAN_MASK_ALL_INT.ALARM bit.
+ * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
+ * Though this driver leaves the top level interrupts unmasked while
+ * leaving the required module interrupt bits masked on exit, there
+ * could be a rougue driver around that does not follow this procedure
+ * resulting in a failure to generate interrupts. The following code is
+ * present to prevent such a failure.
+ */
+
+ if (ll_config->device_hw_info.function_mode ==
+ VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
+ if (vdev->config.intr_type == INTA)
+ vxge_hw_device_unmask_all(hldev);
+
+ vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
+ vdev->ndev->name, __func__, __LINE__);
+
+ vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
+ VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
+ vxge_hw_device_trace_level_get(hldev));
+
+ kfree(ll_config);
+ return 0;
+
+_exit6:
+ for (i = 0; i < vdev->no_of_vpath; i++)
+ vxge_free_mac_add_list(&vdev->vpaths[i]);
+_exit5:
+ vxge_device_unregister(hldev);
+_exit4:
+ pci_set_drvdata(pdev, NULL);
+ vxge_hw_device_terminate(hldev);
+ pci_disable_sriov(pdev);
+_exit3:
+ iounmap(attr.bar0);
+_exit2:
+ pci_release_region(pdev, 0);
+_exit1:
+ pci_disable_device(pdev);
+_exit0:
+ kfree(ll_config);
+ kfree(device_config);
+ driver_config->config_dev_cnt--;
+ driver_config->total_dev_cnt--;
+ return ret;
+}
+
+/**
+ * vxge_rem_nic - Free the PCI device
+ * @pdev: structure containing the PCI related information of the device.
+ * Description: This function is called by the Pci subsystem to release a
+ * PCI device and free up all resource held up by the device.
+ */
+static void __devexit vxge_remove(struct pci_dev *pdev)
+{
+ struct __vxge_hw_device *hldev;
+ struct vxgedev *vdev;
+ int i;
+
+ hldev = pci_get_drvdata(pdev);
+ if (hldev == NULL)
+ return;
+
+ vdev = netdev_priv(hldev->ndev);
+
+ vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
+ vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
+ __func__);
+
+ for (i = 0; i < vdev->no_of_vpath; i++)
+ vxge_free_mac_add_list(&vdev->vpaths[i]);
+
+ vxge_device_unregister(hldev);
+ pci_set_drvdata(pdev, NULL);
+ /* Do not call pci_disable_sriov here, as it will break child devices */
+ vxge_hw_device_terminate(hldev);
+ iounmap(vdev->bar0);
+ pci_release_region(pdev, 0);
+ pci_disable_device(pdev);
+ driver_config->config_dev_cnt--;
+ driver_config->total_dev_cnt--;
+
+ vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
+ __func__, __LINE__);
+ vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
+ __LINE__);
+}
+
+static struct pci_error_handlers vxge_err_handler = {
+ .error_detected = vxge_io_error_detected,
+ .slot_reset = vxge_io_slot_reset,
+ .resume = vxge_io_resume,
+};
+
+static struct pci_driver vxge_driver = {
+ .name = VXGE_DRIVER_NAME,
+ .id_table = vxge_id_table,
+ .probe = vxge_probe,
+ .remove = __devexit_p(vxge_remove),
+#ifdef CONFIG_PM
+ .suspend = vxge_pm_suspend,
+ .resume = vxge_pm_resume,
+#endif
+ .err_handler = &vxge_err_handler,
+};
+
+static int __init
+vxge_starter(void)
+{
+ int ret = 0;
+
+ pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
+ pr_info("Driver version: %s\n", DRV_VERSION);
+
+ verify_bandwidth();
+
+ driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
+ if (!driver_config)
+ return -ENOMEM;
+
+ ret = pci_register_driver(&vxge_driver);
+ if (ret) {
+ kfree(driver_config);
+ goto err;
+ }
+
+ if (driver_config->config_dev_cnt &&
+ (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
+ vxge_debug_init(VXGE_ERR,
+ "%s: Configured %d of %d devices",
+ VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
+ driver_config->total_dev_cnt);
+err:
+ return ret;
+}
+
+static void __exit
+vxge_closer(void)
+{
+ pci_unregister_driver(&vxge_driver);
+ kfree(driver_config);
+}
+module_init(vxge_starter);
+module_exit(vxge_closer);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
new file mode 100644
index 000000000000..f52a42d1dbb7
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -0,0 +1,519 @@
+/******************************************************************************
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ * Drivers based on or derived from this code fall under the GPL and must
+ * retain the authorship, copyright and license notice. This file is not
+ * a complete program and may only be used when the entire operating
+ * system is licensed under the GPL.
+ * See the file COPYING in this distribution for more information.
+ *
+ * vxge-main.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
+ * Virtualized Server Adapter.
+ * Copyright(c) 2002-2010 Exar Corp.
+ ******************************************************************************/
+#ifndef VXGE_MAIN_H
+#define VXGE_MAIN_H
+
+#include "vxge-traffic.h"
+#include "vxge-config.h"
+#include "vxge-version.h"
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+
+#define VXGE_DRIVER_NAME "vxge"
+#define VXGE_DRIVER_VENDOR "Neterion, Inc"
+#define VXGE_DRIVER_FW_VERSION_MAJOR 1
+
+#define DRV_VERSION VXGE_VERSION_MAJOR"."VXGE_VERSION_MINOR"."\
+ VXGE_VERSION_FIX"."VXGE_VERSION_BUILD"-"\
+ VXGE_VERSION_FOR
+
+#define PCI_DEVICE_ID_TITAN_WIN 0x5733
+#define PCI_DEVICE_ID_TITAN_UNI 0x5833
+#define VXGE_HW_TITAN1_PCI_REVISION 1
+#define VXGE_HW_TITAN1A_PCI_REVISION 2
+
+#define VXGE_USE_DEFAULT 0xffffffff
+#define VXGE_HW_VPATH_MSIX_ACTIVE 4
+#define VXGE_ALARM_MSIX_ID 2
+#define VXGE_HW_RXSYNC_FREQ_CNT 4
+#define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ)
+#define VXGE_LL_RX_COPY_THRESHOLD 256
+#define VXGE_DEF_FIFO_LENGTH 84
+
+#define NO_STEERING 0
+#define PORT_STEERING 0x1
+#define RTH_STEERING 0x2
+#define RX_TOS_STEERING 0x3
+#define RX_VLAN_STEERING 0x4
+#define RTH_BUCKET_SIZE 4
+
+#define TX_PRIORITY_STEERING 1
+#define TX_VLAN_STEERING 2
+#define TX_PORT_STEERING 3
+#define TX_MULTIQ_STEERING 4
+
+#define VXGE_HW_MAC_ADDR_LEARN_DEFAULT VXGE_HW_RTS_MAC_DISABLE
+
+#define VXGE_TTI_BTIMER_VAL 250000
+
+#define VXGE_TTI_LTIMER_VAL 1000
+#define VXGE_T1A_TTI_LTIMER_VAL 80
+#define VXGE_TTI_RTIMER_VAL 0
+#define VXGE_TTI_RTIMER_ADAPT_VAL 10
+#define VXGE_T1A_TTI_RTIMER_VAL 400
+#define VXGE_RTI_BTIMER_VAL 250
+#define VXGE_RTI_LTIMER_VAL 100
+#define VXGE_RTI_RTIMER_VAL 0
+#define VXGE_RTI_RTIMER_ADAPT_VAL 15
+#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
+#define VXGE_ISR_POLLING_CNT 8
+#define VXGE_MAX_CONFIG_DEV 0xFF
+#define VXGE_EXEC_MODE_DISABLE 0
+#define VXGE_EXEC_MODE_ENABLE 1
+#define VXGE_MAX_CONFIG_PORT 1
+#define VXGE_ALL_VID_DISABLE 0
+#define VXGE_ALL_VID_ENABLE 1
+#define VXGE_PAUSE_CTRL_DISABLE 0
+#define VXGE_PAUSE_CTRL_ENABLE 1
+
+#define TTI_TX_URANGE_A 5
+#define TTI_TX_URANGE_B 15
+#define TTI_TX_URANGE_C 40
+#define TTI_TX_UFC_A 5
+#define TTI_TX_UFC_B 40
+#define TTI_TX_UFC_C 60
+#define TTI_TX_UFC_D 100
+#define TTI_T1A_TX_UFC_A 30
+#define TTI_T1A_TX_UFC_B 80
+/* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */
+/* Slope - 93 */
+/* 60 - 9k Mtu, 140 - 1.5k mtu */
+#define TTI_T1A_TX_UFC_C(mtu) (60 + ((VXGE_HW_MAX_MTU - mtu) / 93))
+
+/* Slope - 37 */
+/* 100 - 9k Mtu, 300 - 1.5k mtu */
+#define TTI_T1A_TX_UFC_D(mtu) (100 + ((VXGE_HW_MAX_MTU - mtu) / 37))
+
+
+#define RTI_RX_URANGE_A 5
+#define RTI_RX_URANGE_B 15
+#define RTI_RX_URANGE_C 40
+#define RTI_T1A_RX_URANGE_A 1
+#define RTI_T1A_RX_URANGE_B 20
+#define RTI_T1A_RX_URANGE_C 50
+#define RTI_RX_UFC_A 1
+#define RTI_RX_UFC_B 5
+#define RTI_RX_UFC_C 10
+#define RTI_RX_UFC_D 15
+#define RTI_T1A_RX_UFC_B 20
+#define RTI_T1A_RX_UFC_C 50
+#define RTI_T1A_RX_UFC_D 60
+
+/*
+ * The interrupt rate is maintained at 3k per second with the moderation
+ * parameters for most traffic but not all. This is the maximum interrupt
+ * count allowed per function with INTA or per vector in the case of
+ * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A.
+ */
+#define VXGE_T1A_MAX_INTERRUPT_COUNT 100
+#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT 200
+
+/* Milli secs timer period */
+#define VXGE_TIMER_DELAY 10000
+
+#define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE)
+
+#define is_sriov(function_mode) \
+ ((function_mode == VXGE_HW_FUNCTION_MODE_SRIOV) || \
+ (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_8) || \
+ (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_4))
+
+enum vxge_reset_event {
+ /* reset events */
+ VXGE_LL_VPATH_RESET = 0,
+ VXGE_LL_DEVICE_RESET = 1,
+ VXGE_LL_FULL_RESET = 2,
+ VXGE_LL_START_RESET = 3,
+ VXGE_LL_COMPL_RESET = 4
+};
+/* These flags represent the devices temporary state */
+enum vxge_device_state_t {
+__VXGE_STATE_RESET_CARD = 0,
+__VXGE_STATE_CARD_UP
+};
+
+enum vxge_mac_addr_state {
+ /* mac address states */
+ VXGE_LL_MAC_ADDR_IN_LIST = 0,
+ VXGE_LL_MAC_ADDR_IN_DA_TABLE = 1
+};
+
+struct vxge_drv_config {
+ int config_dev_cnt;
+ int total_dev_cnt;
+ int g_no_cpus;
+ unsigned int vpath_per_dev;
+};
+
+struct macInfo {
+ unsigned char macaddr[ETH_ALEN];
+ unsigned char macmask[ETH_ALEN];
+ unsigned int vpath_no;
+ enum vxge_mac_addr_state state;
+};
+
+struct vxge_config {
+ int tx_pause_enable;
+ int rx_pause_enable;
+
+#define NEW_NAPI_WEIGHT 64
+ int napi_weight;
+ int intr_type;
+#define INTA 0
+#define MSI 1
+#define MSI_X 2
+
+ int addr_learn_en;
+
+ u32 rth_steering:2,
+ rth_algorithm:2,
+ rth_hash_type_tcpipv4:1,
+ rth_hash_type_ipv4:1,
+ rth_hash_type_tcpipv6:1,
+ rth_hash_type_ipv6:1,
+ rth_hash_type_tcpipv6ex:1,
+ rth_hash_type_ipv6ex:1,
+ rth_bkt_sz:8;
+ int rth_jhash_golden_ratio;
+ int tx_steering_type;
+ int fifo_indicate_max_pkts;
+ struct vxge_hw_device_hw_info device_hw_info;
+};
+
+struct vxge_msix_entry {
+ /* Mimicing the msix_entry struct of Kernel. */
+ u16 vector;
+ u16 entry;
+ u16 in_use;
+ void *arg;
+};
+
+/* Software Statistics */
+
+struct vxge_sw_stats {
+
+ /* Virtual Path */
+ unsigned long vpaths_open;
+ unsigned long vpath_open_fail;
+
+ /* Misc. */
+ unsigned long link_up;
+ unsigned long link_down;
+};
+
+struct vxge_mac_addrs {
+ struct list_head item;
+ u64 macaddr;
+ u64 macmask;
+ enum vxge_mac_addr_state state;
+};
+
+struct vxgedev;
+
+struct vxge_fifo_stats {
+ struct u64_stats_sync syncp;
+ u64 tx_frms;
+ u64 tx_bytes;
+
+ unsigned long tx_errors;
+ unsigned long txd_not_free;
+ unsigned long txd_out_of_desc;
+ unsigned long pci_map_fail;
+};
+
+struct vxge_fifo {
+ struct net_device *ndev;
+ struct pci_dev *pdev;
+ struct __vxge_hw_fifo *handle;
+ struct netdev_queue *txq;
+
+ int tx_steering_type;
+ int indicate_max_pkts;
+
+ /* Adaptive interrupt moderation parameters used in T1A */
+ unsigned long interrupt_count;
+ unsigned long jiffies;
+
+ u32 tx_vector_no;
+ /* Tx stats */
+ struct vxge_fifo_stats stats;
+} ____cacheline_aligned;
+
+struct vxge_ring_stats {
+ struct u64_stats_sync syncp;
+ u64 rx_frms;
+ u64 rx_mcast;
+ u64 rx_bytes;
+
+ unsigned long rx_errors;
+ unsigned long rx_dropped;
+ unsigned long prev_rx_frms;
+ unsigned long pci_map_fail;
+ unsigned long skb_alloc_fail;
+};
+
+struct vxge_ring {
+ struct net_device *ndev;
+ struct pci_dev *pdev;
+ struct __vxge_hw_ring *handle;
+ /* The vpath id maintained in the driver -
+ * 0 to 'maximum_vpaths_in_function - 1'
+ */
+ int driver_id;
+
+ /* Adaptive interrupt moderation parameters used in T1A */
+ unsigned long interrupt_count;
+ unsigned long jiffies;
+
+ /* copy of the flag indicating whether rx_hwts is to be used */
+ u32 rx_hwts:1;
+
+ int pkts_processed;
+ int budget;
+
+ struct napi_struct napi;
+ struct napi_struct *napi_p;
+
+#define VXGE_MAX_MAC_ADDR_COUNT 30
+
+ int vlan_tag_strip;
+ u32 rx_vector_no;
+ enum vxge_hw_status last_status;
+
+ /* Rx stats */
+ struct vxge_ring_stats stats;
+} ____cacheline_aligned;
+
+struct vxge_vpath {
+ struct vxge_fifo fifo;
+ struct vxge_ring ring;
+
+ struct __vxge_hw_vpath_handle *handle;
+
+ /* Actual vpath id for this vpath in the device - 0 to 16 */
+ int device_id;
+ int max_mac_addr_cnt;
+ int is_configured;
+ int is_open;
+ struct vxgedev *vdev;
+ u8 macaddr[ETH_ALEN];
+ u8 macmask[ETH_ALEN];
+
+#define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048
+ /* mac addresses currently programmed into NIC */
+ u16 mac_addr_cnt;
+ u16 mcast_addr_cnt;
+ struct list_head mac_addr_list;
+
+ u32 level_err;
+ u32 level_trace;
+};
+#define VXGE_COPY_DEBUG_INFO_TO_LL(vdev, err, trace) { \
+ for (i = 0; i < vdev->no_of_vpath; i++) { \
+ vdev->vpaths[i].level_err = err; \
+ vdev->vpaths[i].level_trace = trace; \
+ } \
+ vdev->level_err = err; \
+ vdev->level_trace = trace; \
+}
+
+struct vxgedev {
+ struct net_device *ndev;
+ struct pci_dev *pdev;
+ struct __vxge_hw_device *devh;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ int vlan_tag_strip;
+ struct vxge_config config;
+ unsigned long state;
+
+ /* Indicates which vpath to reset */
+ unsigned long vp_reset;
+
+ /* Timer used for polling vpath resets */
+ struct timer_list vp_reset_timer;
+
+ /* Timer used for polling vpath lockup */
+ struct timer_list vp_lockup_timer;
+
+ /*
+ * Flags to track whether device is in All Multicast
+ * or in promiscuous mode.
+ */
+ u16 all_multi_flg;
+
+ /* A flag indicating whether rx_hwts is to be used or not. */
+ u32 rx_hwts:1,
+ titan1:1;
+
+ struct vxge_msix_entry *vxge_entries;
+ struct msix_entry *entries;
+ /*
+ * 4 for each vpath * 17;
+ * total is 68
+ */
+#define VXGE_MAX_REQUESTED_MSIX 68
+#define VXGE_INTR_STRLEN 80
+ char desc[VXGE_MAX_REQUESTED_MSIX][VXGE_INTR_STRLEN];
+
+ enum vxge_hw_event cric_err_event;
+
+ int max_vpath_supported;
+ int no_of_vpath;
+
+ struct napi_struct napi;
+ /* A debug option, when enabled and if error condition occurs,
+ * the driver will do following steps:
+ * - mask all interrupts
+ * - Not clear the source of the alarm
+ * - gracefully stop all I/O
+ * A diagnostic dump of register and stats at this point
+ * reveals very useful information.
+ */
+ int exec_mode;
+ int max_config_port;
+ struct vxge_vpath *vpaths;
+
+ struct __vxge_hw_vpath_handle *vp_handles[VXGE_HW_MAX_VIRTUAL_PATHS];
+ void __iomem *bar0;
+ struct vxge_sw_stats stats;
+ int mtu;
+ /* Below variables are used for vpath selection to transmit a packet */
+ u8 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS];
+ u64 vpaths_deployed;
+
+ u32 intr_cnt;
+ u32 level_err;
+ u32 level_trace;
+ char fw_version[VXGE_HW_FW_STRLEN];
+ struct work_struct reset_task;
+};
+
+struct vxge_rx_priv {
+ struct sk_buff *skb;
+ unsigned char *skb_data;
+ dma_addr_t data_dma;
+ dma_addr_t data_size;
+};
+
+struct vxge_tx_priv {
+ struct sk_buff *skb;
+ dma_addr_t dma_buffers[MAX_SKB_FRAGS+1];
+};
+
+#define VXGE_MODULE_PARAM_INT(p, val) \
+ static int p = val; \
+ module_param(p, int, 0)
+
+#define vxge_os_timer(timer, handle, arg, exp) do { \
+ init_timer(&timer); \
+ timer.function = handle; \
+ timer.data = (unsigned long) arg; \
+ mod_timer(&timer, (jiffies + exp)); \
+ } while (0);
+
+void vxge_initialize_ethtool_ops(struct net_device *ndev);
+enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
+int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
+
+/**
+ * #define VXGE_DEBUG_INIT: debug for initialization functions
+ * #define VXGE_DEBUG_TX : debug transmit related functions
+ * #define VXGE_DEBUG_RX : debug recevice related functions
+ * #define VXGE_DEBUG_MEM : debug memory module
+ * #define VXGE_DEBUG_LOCK: debug locks
+ * #define VXGE_DEBUG_SEM : debug semaphore
+ * #define VXGE_DEBUG_ENTRYEXIT: debug functions by adding entry exit statements
+*/
+#define VXGE_DEBUG_INIT 0x00000001
+#define VXGE_DEBUG_TX 0x00000002
+#define VXGE_DEBUG_RX 0x00000004
+#define VXGE_DEBUG_MEM 0x00000008
+#define VXGE_DEBUG_LOCK 0x00000010
+#define VXGE_DEBUG_SEM 0x00000020
+#define VXGE_DEBUG_ENTRYEXIT 0x00000040
+#define VXGE_DEBUG_INTR 0x00000080
+#define VXGE_DEBUG_LL_CONFIG 0x00000100
+
+/* Debug tracing for VXGE driver */
+#ifndef VXGE_DEBUG_MASK
+#define VXGE_DEBUG_MASK 0x0
+#endif
+
+#if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK)
+#define vxge_debug_ll_config(level, fmt, ...) \
+ vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, __VA_ARGS__)
+#else
+#define vxge_debug_ll_config(level, fmt, ...)
+#endif
+
+#if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK)
+#define vxge_debug_init(level, fmt, ...) \
+ vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, __VA_ARGS__)
+#else
+#define vxge_debug_init(level, fmt, ...)
+#endif
+
+#if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK)
+#define vxge_debug_tx(level, fmt, ...) \
+ vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, __VA_ARGS__)
+#else
+#define vxge_debug_tx(level, fmt, ...)
+#endif
+
+#if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK)
+#define vxge_debug_rx(level, fmt, ...) \
+ vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, __VA_ARGS__)
+#else
+#define vxge_debug_rx(level, fmt, ...)
+#endif
+
+#if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK)
+#define vxge_debug_mem(level, fmt, ...) \
+ vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, __VA_ARGS__)
+#else
+#define vxge_debug_mem(level, fmt, ...)
+#endif
+
+#if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)
+#define vxge_debug_entryexit(level, fmt, ...) \
+ vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, __VA_ARGS__)
+#else
+#define vxge_debug_entryexit(level, fmt, ...)
+#endif
+
+#if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK)
+#define vxge_debug_intr(level, fmt, ...) \
+ vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, __VA_ARGS__)
+#else
+#define vxge_debug_intr(level, fmt, ...)
+#endif
+
+#define VXGE_DEVICE_DEBUG_LEVEL_SET(level, mask, vdev) {\
+ vxge_hw_device_debug_set((struct __vxge_hw_device *)vdev->devh, \
+ level, mask);\
+ VXGE_COPY_DEBUG_INFO_TO_LL(vdev, \
+ vxge_hw_device_error_level_get((struct __vxge_hw_device *) \
+ vdev->devh), \
+ vxge_hw_device_trace_level_get((struct __vxge_hw_device *) \
+ vdev->devh));\
+}
+
+#ifdef NETIF_F_GSO
+#define vxge_tcp_mss(skb) (skb_shinfo(skb)->gso_size)
+#define vxge_udp_mss(skb) (skb_shinfo(skb)->gso_size)
+#define vxge_offload_type(skb) (skb_shinfo(skb)->gso_type)
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-reg.h b/drivers/net/ethernet/neterion/vxge/vxge-reg.h
new file mode 100644
index 000000000000..3e658b175947
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/vxge-reg.h
@@ -0,0 +1,4636 @@
+/******************************************************************************
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ * Drivers based on or derived from this code fall under the GPL and must
+ * retain the authorship, copyright and license notice. This file is not
+ * a complete program and may only be used when the entire operating
+ * system is licensed under the GPL.
+ * See the file COPYING in this distribution for more information.
+ *
+ * vxge-reg.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O Virtualized
+ * Server Adapter.
+ * Copyright(c) 2002-2010 Exar Corp.
+ ******************************************************************************/
+#ifndef VXGE_REG_H
+#define VXGE_REG_H
+
+/*
+ * vxge_mBIT(loc) - set bit at offset
+ */
+#define vxge_mBIT(loc) (0x8000000000000000ULL >> (loc))
+
+/*
+ * vxge_vBIT(val, loc, sz) - set bits at offset
+ */
+#define vxge_vBIT(val, loc, sz) (((u64)(val)) << (64-(loc)-(sz)))
+#define vxge_vBIT32(val, loc, sz) (((u32)(val)) << (32-(loc)-(sz)))
+
+/*
+ * vxge_bVALn(bits, loc, n) - Get the value of n bits at location
+ */
+#define vxge_bVALn(bits, loc, n) \
+ ((((u64)bits) >> (64-(loc+n))) & ((0x1ULL << n) - 1))
+
+#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(bits) \
+ vxge_bVALn(bits, 0, 16)
+#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(bits) \
+ vxge_bVALn(bits, 48, 8)
+#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(bits) \
+ vxge_bVALn(bits, 56, 8)
+
+#define VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(bits) \
+ vxge_bVALn(bits, 3, 5)
+#define VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(bits) \
+ vxge_bVALn(bits, 5, 3)
+#define VXGE_HW_PF_SW_RESET_COMMAND 0xA5
+
+#define VXGE_HW_TITAN_PCICFGMGMT_REG_SPACES 17
+#define VXGE_HW_TITAN_SRPCIM_REG_SPACES 17
+#define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17
+#define VXGE_HW_TITAN_VPATH_REG_SPACES 17
+
+#define VXGE_HW_FW_API_GET_EPROM_REV 31
+
+#define VXGE_EPROM_IMG_MAJOR(val) (u32) vxge_bVALn(val, 48, 4)
+#define VXGE_EPROM_IMG_MINOR(val) (u32) vxge_bVALn(val, 52, 4)
+#define VXGE_EPROM_IMG_FIX(val) (u32) vxge_bVALn(val, 56, 4)
+#define VXGE_EPROM_IMG_BUILD(val) (u32) vxge_bVALn(val, 60, 4)
+
+#define VXGE_HW_GET_EPROM_IMAGE_INDEX(val) vxge_bVALn(val, 16, 8)
+#define VXGE_HW_GET_EPROM_IMAGE_VALID(val) vxge_bVALn(val, 31, 1)
+#define VXGE_HW_GET_EPROM_IMAGE_TYPE(val) vxge_bVALn(val, 40, 8)
+#define VXGE_HW_GET_EPROM_IMAGE_REV(val) vxge_bVALn(val, 48, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(val) vxge_vBIT(val, 16, 8)
+
+#define VXGE_HW_FW_API_GET_FUNC_MODE 29
+#define VXGE_HW_GET_FUNC_MODE_VAL(val) (val & 0xFF)
+
+#define VXGE_HW_FW_UPGRADE_MEMO 13
+#define VXGE_HW_FW_UPGRADE_ACTION 16
+#define VXGE_HW_FW_UPGRADE_OFFSET_START 2
+#define VXGE_HW_FW_UPGRADE_OFFSET_SEND 3
+#define VXGE_HW_FW_UPGRADE_OFFSET_COMMIT 4
+#define VXGE_HW_FW_UPGRADE_OFFSET_READ 5
+
+#define VXGE_HW_FW_UPGRADE_BLK_SIZE 16
+#define VXGE_HW_UPGRADE_GET_RET_ERR_CODE(val) (val & 0xff)
+#define VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(val) ((val >> 8) & 0xff)
+
+#define VXGE_HW_ASIC_MODE_RESERVED 0
+#define VXGE_HW_ASIC_MODE_NO_IOV 1
+#define VXGE_HW_ASIC_MODE_SR_IOV 2
+#define VXGE_HW_ASIC_MODE_MR_IOV 3
+
+#define VXGE_HW_TXMAC_GEN_CFG1_TMAC_PERMA_STOP_EN vxge_mBIT(3)
+#define VXGE_HW_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_WIRE vxge_mBIT(19)
+#define VXGE_HW_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_SWITCH vxge_mBIT(23)
+#define VXGE_HW_TXMAC_GEN_CFG1_HOST_APPEND_FCS vxge_mBIT(31)
+
+#define VXGE_HW_VPATH_IS_FIRST_GET_VPATH_IS_FIRST(bits) vxge_bVALn(bits, 3, 1)
+
+#define VXGE_HW_TIM_VPATH_ASSIGNMENT_GET_BMAP_ROOT(bits) \
+ vxge_bVALn(bits, 0, 32)
+
+#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN(bits) \
+ vxge_bVALn(bits, 50, 14)
+
+#define VXGE_HW_XMAC_VSPORT_CHOICES_VP_GET_VSPORT_VECTOR(bits) \
+ vxge_bVALn(bits, 0, 17)
+
+#define VXGE_HW_XMAC_VPATH_TO_VSPORT_VPMGMT_CLONE_GET_VSPORT_NUMBER(bits) \
+ vxge_bVALn(bits, 3, 5)
+
+#define VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(bits) \
+ vxge_bVALn(bits, 17, 15)
+
+#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_LEGACY_MODE 0
+#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY 1
+#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_MULTI_OP_MODE 2
+
+#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE_MESSAGES_ONLY 0
+#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE_MULTI_OP_MODE 1
+
+#define VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val) \
+ (val&~VXGE_HW_TOC_KDFC_INITIAL_BIR(7))
+#define VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val) \
+ vxge_bVALn(val, 61, 3)
+#define VXGE_HW_TOC_GET_USDC_INITIAL_OFFSET(val) \
+ (val&~VXGE_HW_TOC_USDC_INITIAL_BIR(7))
+#define VXGE_HW_TOC_GET_USDC_INITIAL_BIR(val) \
+ vxge_bVALn(val, 61, 3)
+
+#define VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(bits) bits
+#define VXGE_HW_TOC_KDFC_FIFO_STRIDE_GET_TOC_KDFC_FIFO_STRIDE(bits) bits
+
+#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR0(bits) \
+ vxge_bVALn(bits, 1, 15)
+#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR1(bits) \
+ vxge_bVALn(bits, 17, 15)
+#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR2(bits) \
+ vxge_bVALn(bits, 33, 15)
+
+#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_VAPTH_NUM(val) vxge_vBIT(val, 42, 5)
+#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_FIFO_NUM(val) vxge_vBIT(val, 47, 2)
+#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_FIFO_OFFSET(val) \
+ vxge_vBIT(val, 49, 15)
+
+#define VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER 0
+#define VXGE_HW_PRC_CFG4_RING_MODE_THREE_BUFFER 1
+#define VXGE_HW_PRC_CFG4_RING_MODE_FIVE_BUFFER 2
+
+#define VXGE_HW_PRC_CFG7_SCATTER_MODE_A 0
+#define VXGE_HW_PRC_CFG7_SCATTER_MODE_B 2
+#define VXGE_HW_PRC_CFG7_SCATTER_MODE_C 1
+
+#define VXGE_HW_RTS_MGR_STEER_CTRL_WE_READ 0
+#define VXGE_HW_RTS_MGR_STEER_CTRL_WE_WRITE 1
+
+#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DA 0
+#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_VID 1
+#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
+#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_PN 3
+#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RANGE_PN 4
+#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
+#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
+#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
+#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
+#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
+#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
+#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DS 11
+#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
+#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_FW_VERSION 13
+
+#define VXGE_HW_RTS_MGR_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
+ vxge_bVALn(bits, 0, 48)
+#define VXGE_HW_RTS_MGR_STEER_DATA0_DA_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
+
+#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_MASK(bits) \
+ vxge_bVALn(bits, 0, 48)
+#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MASK(val) vxge_vBIT(val, 0, 48)
+#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_ADD_PRIVILEGED_MODE \
+ vxge_mBIT(54)
+#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_ADD_VPATH(bits) \
+ vxge_bVALn(bits, 55, 5)
+#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_ADD_VPATH(val) \
+ vxge_vBIT(val, 55, 5)
+#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_ADD_MODE(bits) \
+ vxge_bVALn(bits, 62, 2)
+#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MODE(val) vxge_vBIT(val, 62, 2)
+
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY 0
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY 1
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY 2
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY 3
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY 0
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY 1
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY 3
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL 4
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ALL_CLEAR 172
+
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA 0
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID 1
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
+#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11
+#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13
+
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
+ vxge_bVALn(bits, 0, 48)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
+
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(bits) vxge_bVALn(bits, 0, 12)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(val) vxge_vBIT(val, 0, 12)
+
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_ETYPE(bits) vxge_bVALn(bits, 0, 11)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_ETYPE(val) vxge_vBIT(val, 0, 16)
+
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_SRC_DEST_SEL(bits) \
+ vxge_bVALn(bits, 3, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_SRC_DEST_SEL vxge_mBIT(3)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_TCP_UDP_SEL(bits) \
+ vxge_bVALn(bits, 7, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_TCP_UDP_SEL vxge_mBIT(7)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_PORT_NUM(bits) \
+ vxge_bVALn(bits, 8, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_PORT_NUM(val) vxge_vBIT(val, 8, 16)
+
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_EN(bits) \
+ vxge_bVALn(bits, 3, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN vxge_mBIT(3)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_BUCKET_SIZE(bits) \
+ vxge_bVALn(bits, 4, 4)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(val) \
+ vxge_vBIT(val, 4, 4)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ALG_SEL(bits) \
+ vxge_bVALn(bits, 10, 2)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(val) \
+ vxge_vBIT(val, 10, 2)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_JENKINS 0
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_MS_RSS 1
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_CRC32C 2
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV4_EN(bits) \
+ vxge_bVALn(bits, 15, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN vxge_mBIT(15)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV4_EN(bits) \
+ vxge_bVALn(bits, 19, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN vxge_mBIT(19)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV6_EN(bits) \
+ vxge_bVALn(bits, 23, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN vxge_mBIT(23)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV6_EN(bits) \
+ vxge_bVALn(bits, 27, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN vxge_mBIT(27)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV6_EX_EN(bits) \
+ vxge_bVALn(bits, 31, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN vxge_mBIT(31)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV6_EX_EN(bits) \
+ vxge_bVALn(bits, 35, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN vxge_mBIT(35)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(bits) \
+ vxge_bVALn(bits, 39, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE vxge_mBIT(39)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_REPL_ENTRY_EN(bits) \
+ vxge_bVALn(bits, 43, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_REPL_ENTRY_EN vxge_mBIT(43)
+
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_SOLO_IT_ENTRY_EN(bits) \
+ vxge_bVALn(bits, 3, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN vxge_mBIT(3)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_SOLO_IT_BUCKET_DATA(bits) \
+ vxge_bVALn(bits, 9, 7)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(val) \
+ vxge_vBIT(val, 9, 7)
+
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_BUCKET_NUM(bits) \
+ vxge_bVALn(bits, 0, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(val) \
+ vxge_vBIT(val, 0, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_ENTRY_EN(bits) \
+ vxge_bVALn(bits, 8, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN vxge_mBIT(8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_BUCKET_DATA(bits) \
+ vxge_bVALn(bits, 9, 7)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(val) \
+ vxge_vBIT(val, 9, 7)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_BUCKET_NUM(bits) \
+ vxge_bVALn(bits, 16, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(val) \
+ vxge_vBIT(val, 16, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_ENTRY_EN(bits) \
+ vxge_bVALn(bits, 24, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN vxge_mBIT(24)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_BUCKET_DATA(bits) \
+ vxge_bVALn(bits, 25, 7)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(val) \
+ vxge_vBIT(val, 25, 7)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_BUCKET_NUM(bits) \
+ vxge_bVALn(bits, 0, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(val) \
+ vxge_vBIT(val, 0, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_ENTRY_EN(bits) \
+ vxge_bVALn(bits, 8, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN vxge_mBIT(8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_BUCKET_DATA(bits) \
+ vxge_bVALn(bits, 9, 7)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(val) \
+ vxge_vBIT(val, 9, 7)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_BUCKET_NUM(bits) \
+ vxge_bVALn(bits, 16, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(val) \
+ vxge_vBIT(val, 16, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_ENTRY_EN(bits) \
+ vxge_bVALn(bits, 24, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN vxge_mBIT(24)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_BUCKET_DATA(bits) \
+ vxge_bVALn(bits, 25, 7)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(val) \
+ vxge_vBIT(val, 25, 7)
+
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_JHASH_CFG_GOLDEN_RATIO(bits) \
+ vxge_bVALn(bits, 0, 32)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_JHASH_CFG_GOLDEN_RATIO(val) \
+ vxge_vBIT(val, 0, 32)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_JHASH_CFG_INIT_VALUE(bits) \
+ vxge_bVALn(bits, 32, 32)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_JHASH_CFG_INIT_VALUE(val) \
+ vxge_vBIT(val, 32, 32)
+
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV6_SA_MASK(bits) \
+ vxge_bVALn(bits, 0, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV6_SA_MASK(val) \
+ vxge_vBIT(val, 0, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV6_DA_MASK(bits) \
+ vxge_bVALn(bits, 16, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV6_DA_MASK(val) \
+ vxge_vBIT(val, 16, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV4_SA_MASK(bits) \
+ vxge_bVALn(bits, 32, 4)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV4_SA_MASK(val) \
+ vxge_vBIT(val, 32, 4)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV4_DA_MASK(bits) \
+ vxge_bVALn(bits, 36, 4)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV4_DA_MASK(val) \
+ vxge_vBIT(val, 36, 4)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_L4SP_MASK(bits) \
+ vxge_bVALn(bits, 40, 2)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_L4SP_MASK(val) \
+ vxge_vBIT(val, 40, 2)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_L4DP_MASK(bits) \
+ vxge_bVALn(bits, 42, 2)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_L4DP_MASK(val) \
+ vxge_vBIT(val, 42, 2)
+
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_KEY_KEY(bits) \
+ vxge_bVALn(bits, 0, 64)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_KEY_KEY vxge_vBIT(val, 0, 64)
+
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_QOS_ENTRY_EN(bits) \
+ vxge_bVALn(bits, 3, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_QOS_ENTRY_EN vxge_mBIT(3)
+
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DS_ENTRY_EN(bits) \
+ vxge_bVALn(bits, 3, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DS_ENTRY_EN vxge_mBIT(3)
+
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(bits) \
+ vxge_bVALn(bits, 0, 48)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(val) \
+ vxge_vBIT(val, 0, 48)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(val) \
+ vxge_vBIT(val, 62, 2)
+
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_BUCKET_NUM(bits) \
+ vxge_bVALn(bits, 0, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_BUCKET_NUM(val) \
+ vxge_vBIT(val, 0, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_ENTRY_EN(bits) \
+ vxge_bVALn(bits, 8, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_ENTRY_EN vxge_mBIT(8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_BUCKET_DATA(bits) \
+ vxge_bVALn(bits, 9, 7)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_BUCKET_DATA(val) \
+ vxge_vBIT(val, 9, 7)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_BUCKET_NUM(bits) \
+ vxge_bVALn(bits, 16, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_BUCKET_NUM(val) \
+ vxge_vBIT(val, 16, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_ENTRY_EN(bits) \
+ vxge_bVALn(bits, 24, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_ENTRY_EN vxge_mBIT(24)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_BUCKET_DATA(bits) \
+ vxge_bVALn(bits, 25, 7)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_BUCKET_DATA(val) \
+ vxge_vBIT(val, 25, 7)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_BUCKET_NUM(bits) \
+ vxge_bVALn(bits, 32, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_BUCKET_NUM(val) \
+ vxge_vBIT(val, 32, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_ENTRY_EN(bits) \
+ vxge_bVALn(bits, 40, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_ENTRY_EN vxge_mBIT(40)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_BUCKET_DATA(bits) \
+ vxge_bVALn(bits, 41, 7)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_BUCKET_DATA(val) \
+ vxge_vBIT(val, 41, 7)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_BUCKET_NUM(bits) \
+ vxge_bVALn(bits, 48, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_BUCKET_NUM(val) \
+ vxge_vBIT(val, 48, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_ENTRY_EN(bits) \
+ vxge_bVALn(bits, 56, 1)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_ENTRY_EN vxge_mBIT(56)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_BUCKET_DATA(bits) \
+ vxge_bVALn(bits, 57, 7)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_BUCKET_DATA(val) \
+ vxge_vBIT(val, 57, 7)
+
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER 0
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER 1
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_VERSION 2
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE 3
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0 4
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_1 5
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_2 6
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3 7
+
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_LED_CONTROL_ON 1
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_LED_CONTROL_OFF 0
+
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(bits) \
+ vxge_bVALn(bits, 0, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_DAY(val) vxge_vBIT(val, 0, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(bits) \
+ vxge_bVALn(bits, 8, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MONTH(val) vxge_vBIT(val, 8, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(bits) \
+ vxge_bVALn(bits, 16, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_YEAR(val) \
+ vxge_vBIT(val, 16, 16)
+
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(bits) \
+ vxge_bVALn(bits, 32, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MAJOR vxge_vBIT(val, 32, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(bits) \
+ vxge_bVALn(bits, 40, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MINOR vxge_vBIT(val, 40, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(bits) \
+ vxge_bVALn(bits, 48, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_BUILD vxge_vBIT(val, 48, 16)
+
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(bits) \
+ vxge_bVALn(bits, 0, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_DAY(val) vxge_vBIT(val, 0, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(bits) \
+ vxge_bVALn(bits, 8, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MONTH(val) vxge_vBIT(val, 8, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(bits) \
+ vxge_bVALn(bits, 16, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_YEAR(val) \
+ vxge_vBIT(val, 16, 16)
+
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(bits) \
+ vxge_bVALn(bits, 32, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MAJOR vxge_vBIT(val, 32, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(bits) \
+ vxge_bVALn(bits, 40, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MINOR vxge_vBIT(val, 40, 8)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \
+ vxge_bVALn(bits, 48, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(bits) vxge_bVALn(bits, 0, 8)
+
+#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\
+ vxge_bVALn(bits, 0, 18)
+
+#define VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(bits) \
+ vxge_bVALn(bits, 48, 16)
+#define VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(bits) \
+ vxge_bVALn(bits, 32, 32)
+#define VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(bits) vxge_bVALn(bits, 48, 16)
+#define VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(bits) \
+ vxge_bVALn(bits, 0, 32)
+#define VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(bits) \
+ vxge_bVALn(bits, 0, 32)
+#define VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(bits) \
+ vxge_bVALn(bits, 0, 32)
+#define VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(bits) (bits)
+#define VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(bits) (bits)
+#define VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(bits) \
+ vxge_bVALn(bits, 32, 32)
+#define VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(bits) \
+ vxge_bVALn(bits, 32, 32)
+#define VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(bits) \
+ vxge_bVALn(bits, 0, 32)
+#define VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(bits) \
+ vxge_bVALn(bits, 32, 32)
+#define VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(bits) \
+ vxge_bVALn(bits, 0, 32)
+#define VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(bits) \
+ vxge_bVALn(bits, 32, 32)
+#define VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(bits) \
+ vxge_bVALn(bits, 0, 32)
+#define VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(bits) \
+ vxge_bVALn(bits, 32, 32)
+#define VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(bits\
+) vxge_bVALn(bits, 48, 16)
+#define VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(bits) vxge_bVALn(bits, 0, 16)
+#define VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(bits) \
+ vxge_bVALn(bits, 16, 16)
+#define VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(bits) \
+ vxge_bVALn(bits, 32, 16)
+#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(bits) vxge_bVALn(bits, 0, 16)
+#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(bits) \
+ vxge_bVALn(bits, 16, 16)
+#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(bits) \
+ vxge_bVALn(bits, 32, 16)
+
+#define VXGE_HW_MRPCIM_DEBUG_STATS0_GET_INI_WR_DROP(bits) \
+ vxge_bVALn(bits, 0, 32)
+#define VXGE_HW_MRPCIM_DEBUG_STATS0_GET_INI_RD_DROP(bits) \
+ vxge_bVALn(bits, 32, 32)
+#define VXGE_HW_MRPCIM_DEBUG_STATS1_GET_VPLANE_WRCRDTARB_PH_CRDT_DEPLETED(bits\
+) vxge_bVALn(bits, 32, 32)
+#define VXGE_HW_MRPCIM_DEBUG_STATS2_GET_VPLANE_WRCRDTARB_PD_CRDT_DEPLETED(bits\
+) vxge_bVALn(bits, 32, 32)
+#define \
+VXGE_HW_MRPCIM_DEBUG_STATS3_GET_VPLANE_RDCRDTARB_NPH_CRDT_DEPLETED(bits) \
+ vxge_bVALn(bits, 32, 32)
+#define VXGE_HW_MRPCIM_DEBUG_STATS4_GET_INI_WR_VPIN_DROP(bits) \
+ vxge_bVALn(bits, 0, 32)
+#define VXGE_HW_MRPCIM_DEBUG_STATS4_GET_INI_RD_VPIN_DROP(bits) \
+ vxge_bVALn(bits, 32, 32)
+#define VXGE_HW_GENSTATS_COUNT01_GET_GENSTATS_COUNT1(bits) \
+ vxge_bVALn(bits, 0, 32)
+#define VXGE_HW_GENSTATS_COUNT01_GET_GENSTATS_COUNT0(bits) \
+ vxge_bVALn(bits, 32, 32)
+#define VXGE_HW_GENSTATS_COUNT23_GET_GENSTATS_COUNT3(bits) \
+ vxge_bVALn(bits, 0, 32)
+#define VXGE_HW_GENSTATS_COUNT23_GET_GENSTATS_COUNT2(bits) \
+ vxge_bVALn(bits, 32, 32)
+#define VXGE_HW_GENSTATS_COUNT4_GET_GENSTATS_COUNT4(bits) \
+ vxge_bVALn(bits, 32, 32)
+#define VXGE_HW_GENSTATS_COUNT5_GET_GENSTATS_COUNT5(bits) \
+ vxge_bVALn(bits, 32, 32)
+
+#define VXGE_HW_DEBUG_STATS0_GET_RSTDROP_MSG(bits) vxge_bVALn(bits, 0, 32)
+#define VXGE_HW_DEBUG_STATS0_GET_RSTDROP_CPL(bits) vxge_bVALn(bits, 32, 32)
+#define VXGE_HW_DEBUG_STATS1_GET_RSTDROP_CLIENT0(bits) vxge_bVALn(bits, 0, 32)
+#define VXGE_HW_DEBUG_STATS1_GET_RSTDROP_CLIENT1(bits) vxge_bVALn(bits, 32, 32)
+#define VXGE_HW_DEBUG_STATS2_GET_RSTDROP_CLIENT2(bits) vxge_bVALn(bits, 0, 32)
+#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_PH(bits) vxge_bVALn(bits, 0, 16)
+#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_NPH(bits) vxge_bVALn(bits, 16, 16)
+#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_CPLH(bits) vxge_bVALn(bits, 32, 16)
+#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_PD(bits) vxge_bVALn(bits, 0, 16)
+#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_NPD(bits) bVAL(bits, 16, 16)
+#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_CPLD(bits) vxge_bVALn(bits, 32, 16)
+
+#define VXGE_HW_DBG_STATS_TPA_TX_PATH_GET_TX_PERMITTED_FRMS(bits) \
+ vxge_bVALn(bits, 32, 32)
+
+#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT0_TX_ANY_FRMS(bits) \
+ vxge_bVALn(bits, 0, 8)
+#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT1_TX_ANY_FRMS(bits) \
+ vxge_bVALn(bits, 8, 8)
+#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT2_TX_ANY_FRMS(bits) \
+ vxge_bVALn(bits, 16, 8)
+
+#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT0_RX_ANY_FRMS(bits) \
+ vxge_bVALn(bits, 0, 8)
+#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT1_RX_ANY_FRMS(bits) \
+ vxge_bVALn(bits, 8, 8)
+#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT2_RX_ANY_FRMS(bits) \
+ vxge_bVALn(bits, 16, 8)
+
+#define VXGE_HW_CONFIG_PRIV_H
+
+#define VXGE_HW_SWAPPER_INITIAL_VALUE 0x0123456789abcdefULL
+#define VXGE_HW_SWAPPER_BYTE_SWAPPED 0xefcdab8967452301ULL
+#define VXGE_HW_SWAPPER_BIT_FLIPPED 0x80c4a2e691d5b3f7ULL
+#define VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED 0xf7b3d591e6a2c480ULL
+
+#define VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
+#define VXGE_HW_SWAPPER_READ_BYTE_SWAP_DISABLE 0x0000000000000000ULL
+
+#define VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
+#define VXGE_HW_SWAPPER_READ_BIT_FLAP_DISABLE 0x0000000000000000ULL
+
+#define VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
+#define VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_DISABLE 0x0000000000000000ULL
+
+#define VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE 0xFFFFFFFFFFFFFFFFULL
+#define VXGE_HW_SWAPPER_WRITE_BIT_FLAP_DISABLE 0x0000000000000000ULL
+
+/*
+ * The registers are memory mapped and are native big-endian byte order. The
+ * little-endian hosts are handled by enabling hardware byte-swapping for
+ * register and dma operations.
+ */
+struct vxge_hw_legacy_reg {
+
+ u8 unused00010[0x00010];
+
+/*0x00010*/ u64 toc_swapper_fb;
+#define VXGE_HW_TOC_SWAPPER_FB_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
+/*0x00018*/ u64 pifm_rd_swap_en;
+#define VXGE_HW_PIFM_RD_SWAP_EN_PIFM_RD_SWAP_EN(val) vxge_vBIT(val, 0, 64)
+/*0x00020*/ u64 pifm_rd_flip_en;
+#define VXGE_HW_PIFM_RD_FLIP_EN_PIFM_RD_FLIP_EN(val) vxge_vBIT(val, 0, 64)
+/*0x00028*/ u64 pifm_wr_swap_en;
+#define VXGE_HW_PIFM_WR_SWAP_EN_PIFM_WR_SWAP_EN(val) vxge_vBIT(val, 0, 64)
+/*0x00030*/ u64 pifm_wr_flip_en;
+#define VXGE_HW_PIFM_WR_FLIP_EN_PIFM_WR_FLIP_EN(val) vxge_vBIT(val, 0, 64)
+/*0x00038*/ u64 toc_first_pointer;
+#define VXGE_HW_TOC_FIRST_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
+/*0x00040*/ u64 host_access_en;
+#define VXGE_HW_HOST_ACCESS_EN_HOST_ACCESS_EN(val) vxge_vBIT(val, 0, 64)
+
+} __packed;
+
+struct vxge_hw_toc_reg {
+
+ u8 unused00050[0x00050];
+
+/*0x00050*/ u64 toc_common_pointer;
+#define VXGE_HW_TOC_COMMON_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
+/*0x00058*/ u64 toc_memrepair_pointer;
+#define VXGE_HW_TOC_MEMREPAIR_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
+/*0x00060*/ u64 toc_pcicfgmgmt_pointer[17];
+#define VXGE_HW_TOC_PCICFGMGMT_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
+ u8 unused001e0[0x001e0-0x000e8];
+
+/*0x001e0*/ u64 toc_mrpcim_pointer;
+#define VXGE_HW_TOC_MRPCIM_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
+/*0x001e8*/ u64 toc_srpcim_pointer[17];
+#define VXGE_HW_TOC_SRPCIM_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
+ u8 unused00278[0x00278-0x00270];
+
+/*0x00278*/ u64 toc_vpmgmt_pointer[17];
+#define VXGE_HW_TOC_VPMGMT_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
+ u8 unused00390[0x00390-0x00300];
+
+/*0x00390*/ u64 toc_vpath_pointer[17];
+#define VXGE_HW_TOC_VPATH_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64)
+ u8 unused004a0[0x004a0-0x00418];
+
+/*0x004a0*/ u64 toc_kdfc;
+#define VXGE_HW_TOC_KDFC_INITIAL_OFFSET(val) vxge_vBIT(val, 0, 61)
+#define VXGE_HW_TOC_KDFC_INITIAL_BIR(val) vxge_vBIT(val, 61, 3)
+/*0x004a8*/ u64 toc_usdc;
+#define VXGE_HW_TOC_USDC_INITIAL_OFFSET(val) vxge_vBIT(val, 0, 61)
+#define VXGE_HW_TOC_USDC_INITIAL_BIR(val) vxge_vBIT(val, 61, 3)
+/*0x004b0*/ u64 toc_kdfc_vpath_stride;
+#define VXGE_HW_TOC_KDFC_VPATH_STRIDE_INITIAL_TOC_KDFC_VPATH_STRIDE(val) \
+ vxge_vBIT(val, 0, 64)
+/*0x004b8*/ u64 toc_kdfc_fifo_stride;
+#define VXGE_HW_TOC_KDFC_FIFO_STRIDE_INITIAL_TOC_KDFC_FIFO_STRIDE(val) \
+ vxge_vBIT(val, 0, 64)
+
+} __packed;
+
+struct vxge_hw_common_reg {
+
+ u8 unused00a00[0x00a00];
+
+/*0x00a00*/ u64 prc_status1;
+#define VXGE_HW_PRC_STATUS1_PRC_VP_QUIESCENT(n) vxge_mBIT(n)
+/*0x00a08*/ u64 rxdcm_reset_in_progress;
+#define VXGE_HW_RXDCM_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n)
+/*0x00a10*/ u64 replicq_flush_in_progress;
+#define VXGE_HW_REPLICQ_FLUSH_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
+/*0x00a18*/ u64 rxpe_cmds_reset_in_progress;
+#define VXGE_HW_RXPE_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
+/*0x00a20*/ u64 mxp_cmds_reset_in_progress;
+#define VXGE_HW_MXP_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
+/*0x00a28*/ u64 noffload_reset_in_progress;
+#define VXGE_HW_NOFFLOAD_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n)
+/*0x00a30*/ u64 rd_req_in_progress;
+#define VXGE_HW_RD_REQ_IN_PROGRESS_VP(n) vxge_mBIT(n)
+/*0x00a38*/ u64 rd_req_outstanding;
+#define VXGE_HW_RD_REQ_OUTSTANDING_VP(n) vxge_mBIT(n)
+/*0x00a40*/ u64 kdfc_reset_in_progress;
+#define VXGE_HW_KDFC_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n)
+ u8 unused00b00[0x00b00-0x00a48];
+
+/*0x00b00*/ u64 one_cfg_vp;
+#define VXGE_HW_ONE_CFG_VP_RDY(n) vxge_mBIT(n)
+/*0x00b08*/ u64 one_common;
+#define VXGE_HW_ONE_COMMON_PET_VPATH_RESET_IN_PROGRESS(n) vxge_mBIT(n)
+ u8 unused00b80[0x00b80-0x00b10];
+
+/*0x00b80*/ u64 tim_int_en;
+#define VXGE_HW_TIM_INT_EN_TIM_VP(n) vxge_mBIT(n)
+/*0x00b88*/ u64 tim_set_int_en;
+#define VXGE_HW_TIM_SET_INT_EN_VP(n) vxge_mBIT(n)
+/*0x00b90*/ u64 tim_clr_int_en;
+#define VXGE_HW_TIM_CLR_INT_EN_VP(n) vxge_mBIT(n)
+/*0x00b98*/ u64 tim_mask_int_during_reset;
+#define VXGE_HW_TIM_MASK_INT_DURING_RESET_VPATH(n) vxge_mBIT(n)
+/*0x00ba0*/ u64 tim_reset_in_progress;
+#define VXGE_HW_TIM_RESET_IN_PROGRESS_TIM_VPATH(n) vxge_mBIT(n)
+/*0x00ba8*/ u64 tim_outstanding_bmap;
+#define VXGE_HW_TIM_OUTSTANDING_BMAP_TIM_VPATH(n) vxge_mBIT(n)
+ u8 unused00c00[0x00c00-0x00bb0];
+
+/*0x00c00*/ u64 msg_reset_in_progress;
+#define VXGE_HW_MSG_RESET_IN_PROGRESS_MSG_COMPOSITE(val) vxge_vBIT(val, 0, 17)
+/*0x00c08*/ u64 msg_mxp_mr_ready;
+#define VXGE_HW_MSG_MXP_MR_READY_MP_BOOTED(n) vxge_mBIT(n)
+/*0x00c10*/ u64 msg_uxp_mr_ready;
+#define VXGE_HW_MSG_UXP_MR_READY_UP_BOOTED(n) vxge_mBIT(n)
+/*0x00c18*/ u64 msg_dmq_noni_rtl_prefetch;
+#define VXGE_HW_MSG_DMQ_NONI_RTL_PREFETCH_BYPASS_ENABLE(n) vxge_mBIT(n)
+/*0x00c20*/ u64 msg_umq_rtl_bwr;
+#define VXGE_HW_MSG_UMQ_RTL_BWR_PREFETCH_DISABLE(n) vxge_mBIT(n)
+ u8 unused00d00[0x00d00-0x00c28];
+
+/*0x00d00*/ u64 cmn_rsthdlr_cfg0;
+#define VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(val) vxge_vBIT(val, 0, 17)
+/*0x00d08*/ u64 cmn_rsthdlr_cfg1;
+#define VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(val) vxge_vBIT(val, 0, 17)
+/*0x00d10*/ u64 cmn_rsthdlr_cfg2;
+#define VXGE_HW_CMN_RSTHDLR_CFG2_SW_RESET_FIFO0(val) vxge_vBIT(val, 0, 17)
+/*0x00d18*/ u64 cmn_rsthdlr_cfg3;
+#define VXGE_HW_CMN_RSTHDLR_CFG3_SW_RESET_FIFO1(val) vxge_vBIT(val, 0, 17)
+/*0x00d20*/ u64 cmn_rsthdlr_cfg4;
+#define VXGE_HW_CMN_RSTHDLR_CFG4_SW_RESET_FIFO2(val) vxge_vBIT(val, 0, 17)
+ u8 unused00d40[0x00d40-0x00d28];
+
+/*0x00d40*/ u64 cmn_rsthdlr_cfg8;
+#define VXGE_HW_CMN_RSTHDLR_CFG8_INCR_VPATH_INST_NUM(val) vxge_vBIT(val, 0, 17)
+/*0x00d48*/ u64 stats_cfg0;
+#define VXGE_HW_STATS_CFG0_STATS_ENABLE(val) vxge_vBIT(val, 0, 17)
+ u8 unused00da8[0x00da8-0x00d50];
+
+/*0x00da8*/ u64 clear_msix_mask_vect[4];
+#define VXGE_HW_CLEAR_MSIX_MASK_VECT_CLEAR_MSIX_MASK_VECT(val) \
+ vxge_vBIT(val, 0, 17)
+/*0x00dc8*/ u64 set_msix_mask_vect[4];
+#define VXGE_HW_SET_MSIX_MASK_VECT_SET_MSIX_MASK_VECT(val) vxge_vBIT(val, 0, 17)
+/*0x00de8*/ u64 clear_msix_mask_all_vect;
+#define VXGE_HW_CLEAR_MSIX_MASK_ALL_VECT_CLEAR_MSIX_MASK_ALL_VECT(val) \
+ vxge_vBIT(val, 0, 17)
+/*0x00df0*/ u64 set_msix_mask_all_vect;
+#define VXGE_HW_SET_MSIX_MASK_ALL_VECT_SET_MSIX_MASK_ALL_VECT(val) \
+ vxge_vBIT(val, 0, 17)
+/*0x00df8*/ u64 mask_vector[4];
+#define VXGE_HW_MASK_VECTOR_MASK_VECTOR(val) vxge_vBIT(val, 0, 17)
+/*0x00e18*/ u64 msix_pending_vector[4];
+#define VXGE_HW_MSIX_PENDING_VECTOR_MSIX_PENDING_VECTOR(val) \
+ vxge_vBIT(val, 0, 17)
+/*0x00e38*/ u64 clr_msix_one_shot_vec[4];
+#define VXGE_HW_CLR_MSIX_ONE_SHOT_VEC_CLR_MSIX_ONE_SHOT_VEC(val) \
+ vxge_vBIT(val, 0, 17)
+/*0x00e58*/ u64 titan_asic_id;
+#define VXGE_HW_TITAN_ASIC_ID_INITIAL_DEVICE_ID(val) vxge_vBIT(val, 0, 16)
+#define VXGE_HW_TITAN_ASIC_ID_INITIAL_MAJOR_REVISION(val) vxge_vBIT(val, 48, 8)
+#define VXGE_HW_TITAN_ASIC_ID_INITIAL_MINOR_REVISION(val) vxge_vBIT(val, 56, 8)
+/*0x00e60*/ u64 titan_general_int_status;
+#define VXGE_HW_TITAN_GENERAL_INT_STATUS_MRPCIM_ALARM_INT vxge_mBIT(0)
+#define VXGE_HW_TITAN_GENERAL_INT_STATUS_SRPCIM_ALARM_INT vxge_mBIT(1)
+#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT vxge_mBIT(2)
+#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(val) \
+ vxge_vBIT(val, 3, 17)
+ u8 unused00e70[0x00e70-0x00e68];
+
+/*0x00e70*/ u64 titan_mask_all_int;
+#define VXGE_HW_TITAN_MASK_ALL_INT_ALARM vxge_mBIT(7)
+#define VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC vxge_mBIT(15)
+ u8 unused00e80[0x00e80-0x00e78];
+
+/*0x00e80*/ u64 tim_int_status0;
+#define VXGE_HW_TIM_INT_STATUS0_TIM_INT_STATUS0(val) vxge_vBIT(val, 0, 64)
+/*0x00e88*/ u64 tim_int_mask0;
+#define VXGE_HW_TIM_INT_MASK0_TIM_INT_MASK0(val) vxge_vBIT(val, 0, 64)
+/*0x00e90*/ u64 tim_int_status1;
+#define VXGE_HW_TIM_INT_STATUS1_TIM_INT_STATUS1(val) vxge_vBIT(val, 0, 4)
+/*0x00e98*/ u64 tim_int_mask1;
+#define VXGE_HW_TIM_INT_MASK1_TIM_INT_MASK1(val) vxge_vBIT(val, 0, 4)
+/*0x00ea0*/ u64 rti_int_status;
+#define VXGE_HW_RTI_INT_STATUS_RTI_INT_STATUS(val) vxge_vBIT(val, 0, 17)
+/*0x00ea8*/ u64 rti_int_mask;
+#define VXGE_HW_RTI_INT_MASK_RTI_INT_MASK(val) vxge_vBIT(val, 0, 17)
+/*0x00eb0*/ u64 adapter_status;
+#define VXGE_HW_ADAPTER_STATUS_RTDMA_RTDMA_READY vxge_mBIT(0)
+#define VXGE_HW_ADAPTER_STATUS_WRDMA_WRDMA_READY vxge_mBIT(1)
+#define VXGE_HW_ADAPTER_STATUS_KDFC_KDFC_READY vxge_mBIT(2)
+#define VXGE_HW_ADAPTER_STATUS_TPA_TMAC_BUF_EMPTY vxge_mBIT(3)
+#define VXGE_HW_ADAPTER_STATUS_RDCTL_PIC_QUIESCENT vxge_mBIT(4)
+#define VXGE_HW_ADAPTER_STATUS_XGMAC_NETWORK_FAULT vxge_mBIT(5)
+#define VXGE_HW_ADAPTER_STATUS_ROCRC_OFFLOAD_QUIESCENT vxge_mBIT(6)
+#define VXGE_HW_ADAPTER_STATUS_G3IF_FB_G3IF_FB_GDDR3_READY vxge_mBIT(7)
+#define VXGE_HW_ADAPTER_STATUS_G3IF_CM_G3IF_CM_GDDR3_READY vxge_mBIT(8)
+#define VXGE_HW_ADAPTER_STATUS_RIC_RIC_RUNNING vxge_mBIT(9)
+#define VXGE_HW_ADAPTER_STATUS_CMG_C_PLL_IN_LOCK vxge_mBIT(10)
+#define VXGE_HW_ADAPTER_STATUS_XGMAC_X_PLL_IN_LOCK vxge_mBIT(11)
+#define VXGE_HW_ADAPTER_STATUS_FBIF_M_PLL_IN_LOCK vxge_mBIT(12)
+#define VXGE_HW_ADAPTER_STATUS_PCC_PCC_IDLE(val) vxge_vBIT(val, 24, 8)
+#define VXGE_HW_ADAPTER_STATUS_ROCRC_RC_PRC_QUIESCENT(val) vxge_vBIT(val, 44, 8)
+/*0x00eb8*/ u64 gen_ctrl;
+#define VXGE_HW_GEN_CTRL_SPI_MRPCIM_WR_DIS vxge_mBIT(0)
+#define VXGE_HW_GEN_CTRL_SPI_MRPCIM_RD_DIS vxge_mBIT(1)
+#define VXGE_HW_GEN_CTRL_SPI_SRPCIM_WR_DIS vxge_mBIT(2)
+#define VXGE_HW_GEN_CTRL_SPI_SRPCIM_RD_DIS vxge_mBIT(3)
+#define VXGE_HW_GEN_CTRL_SPI_DEBUG_DIS vxge_mBIT(4)
+#define VXGE_HW_GEN_CTRL_SPI_APP_LTSSM_TIMER_DIS vxge_mBIT(5)
+#define VXGE_HW_GEN_CTRL_SPI_NOT_USED(val) vxge_vBIT(val, 6, 4)
+ u8 unused00ed0[0x00ed0-0x00ec0];
+
+/*0x00ed0*/ u64 adapter_ready;
+#define VXGE_HW_ADAPTER_READY_ADAPTER_READY vxge_mBIT(63)
+/*0x00ed8*/ u64 outstanding_read;
+#define VXGE_HW_OUTSTANDING_READ_OUTSTANDING_READ(val) vxge_vBIT(val, 0, 17)
+/*0x00ee0*/ u64 vpath_rst_in_prog;
+#define VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(val) vxge_vBIT(val, 0, 17)
+/*0x00ee8*/ u64 vpath_reg_modified;
+#define VXGE_HW_VPATH_REG_MODIFIED_VPATH_REG_MODIFIED(val) vxge_vBIT(val, 0, 17)
+ u8 unused00fc0[0x00fc0-0x00ef0];
+
+/*0x00fc0*/ u64 cp_reset_in_progress;
+#define VXGE_HW_CP_RESET_IN_PROGRESS_CP_VPATH(n) vxge_mBIT(n)
+ u8 unused01080[0x01080-0x00fc8];
+
+/*0x01080*/ u64 xgmac_ready;
+#define VXGE_HW_XGMAC_READY_XMACJ_READY(val) vxge_vBIT(val, 0, 17)
+ u8 unused010c0[0x010c0-0x01088];
+
+/*0x010c0*/ u64 fbif_ready;
+#define VXGE_HW_FBIF_READY_FAU_READY(val) vxge_vBIT(val, 0, 17)
+ u8 unused01100[0x01100-0x010c8];
+
+/*0x01100*/ u64 vplane_assignments;
+#define VXGE_HW_VPLANE_ASSIGNMENTS_VPLANE_ASSIGNMENTS(val) vxge_vBIT(val, 3, 5)
+/*0x01108*/ u64 vpath_assignments;
+#define VXGE_HW_VPATH_ASSIGNMENTS_VPATH_ASSIGNMENTS(val) vxge_vBIT(val, 0, 17)
+/*0x01110*/ u64 resource_assignments;
+#define VXGE_HW_RESOURCE_ASSIGNMENTS_RESOURCE_ASSIGNMENTS(val) \
+ vxge_vBIT(val, 0, 17)
+/*0x01118*/ u64 host_type_assignments;
+#define VXGE_HW_HOST_TYPE_ASSIGNMENTS_HOST_TYPE_ASSIGNMENTS(val) \
+ vxge_vBIT(val, 5, 3)
+ u8 unused01128[0x01128-0x01120];
+
+/*0x01128*/ u64 max_resource_assignments;
+#define VXGE_HW_MAX_RESOURCE_ASSIGNMENTS_PCI_MAX_VPLANE(val) \
+ vxge_vBIT(val, 3, 5)
+#define VXGE_HW_MAX_RESOURCE_ASSIGNMENTS_PCI_MAX_VPATHS(val) \
+ vxge_vBIT(val, 11, 5)
+/*0x01130*/ u64 pf_vpath_assignments;
+#define VXGE_HW_PF_VPATH_ASSIGNMENTS_PF_VPATH_ASSIGNMENTS(val) \
+ vxge_vBIT(val, 0, 17)
+ u8 unused01200[0x01200-0x01138];
+
+/*0x01200*/ u64 rts_access_icmp;
+#define VXGE_HW_RTS_ACCESS_ICMP_EN(val) vxge_vBIT(val, 0, 17)
+/*0x01208*/ u64 rts_access_tcpsyn;
+#define VXGE_HW_RTS_ACCESS_TCPSYN_EN(val) vxge_vBIT(val, 0, 17)
+/*0x01210*/ u64 rts_access_zl4pyld;
+#define VXGE_HW_RTS_ACCESS_ZL4PYLD_EN(val) vxge_vBIT(val, 0, 17)
+/*0x01218*/ u64 rts_access_l4prtcl_tcp;
+#define VXGE_HW_RTS_ACCESS_L4PRTCL_TCP_EN(val) vxge_vBIT(val, 0, 17)
+/*0x01220*/ u64 rts_access_l4prtcl_udp;
+#define VXGE_HW_RTS_ACCESS_L4PRTCL_UDP_EN(val) vxge_vBIT(val, 0, 17)
+/*0x01228*/ u64 rts_access_l4prtcl_flex;
+#define VXGE_HW_RTS_ACCESS_L4PRTCL_FLEX_EN(val) vxge_vBIT(val, 0, 17)
+/*0x01230*/ u64 rts_access_ipfrag;
+#define VXGE_HW_RTS_ACCESS_IPFRAG_EN(val) vxge_vBIT(val, 0, 17)
+
+} __packed;
+
+struct vxge_hw_memrepair_reg {
+ u64 unused1;
+ u64 unused2;
+} __packed;
+
+struct vxge_hw_pcicfgmgmt_reg {
+
+/*0x00000*/ u64 resource_no;
+#define VXGE_HW_RESOURCE_NO_PFN_OR_VF BIT(3)
+/*0x00008*/ u64 bargrp_pf_or_vf_bar0_mask;
+#define VXGE_HW_BARGRP_PF_OR_VF_BAR0_MASK_BARGRP_PF_OR_VF_BAR0_MASK(val) \
+ vxge_vBIT(val, 2, 6)
+/*0x00010*/ u64 bargrp_pf_or_vf_bar1_mask;
+#define VXGE_HW_BARGRP_PF_OR_VF_BAR1_MASK_BARGRP_PF_OR_VF_BAR1_MASK(val) \
+ vxge_vBIT(val, 2, 6)
+/*0x00018*/ u64 bargrp_pf_or_vf_bar2_mask;
+#define VXGE_HW_BARGRP_PF_OR_VF_BAR2_MASK_BARGRP_PF_OR_VF_BAR2_MASK(val) \
+ vxge_vBIT(val, 2, 6)
+/*0x00020*/ u64 msixgrp_no;
+#define VXGE_HW_MSIXGRP_NO_TABLE_SIZE(val) vxge_vBIT(val, 5, 11)
+
+} __packed;
+
+struct vxge_hw_mrpcim_reg {
+/*0x00000*/ u64 g3fbct_int_status;
+#define VXGE_HW_G3FBCT_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
+/*0x00008*/ u64 g3fbct_int_mask;
+/*0x00010*/ u64 g3fbct_err_reg;
+#define VXGE_HW_G3FBCT_ERR_REG_G3IF_SM_ERR vxge_mBIT(4)
+#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_DECC vxge_mBIT(5)
+#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_U_DECC vxge_mBIT(6)
+#define VXGE_HW_G3FBCT_ERR_REG_G3IF_CTRL_FIFO_DECC vxge_mBIT(7)
+#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_SECC vxge_mBIT(29)
+#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_U_SECC vxge_mBIT(30)
+#define VXGE_HW_G3FBCT_ERR_REG_G3IF_CTRL_FIFO_SECC vxge_mBIT(31)
+/*0x00018*/ u64 g3fbct_err_mask;
+/*0x00020*/ u64 g3fbct_err_alarm;
+
+ u8 unused00a00[0x00a00-0x00028];
+
+/*0x00a00*/ u64 wrdma_int_status;
+#define VXGE_HW_WRDMA_INT_STATUS_RC_ALARM_RC_INT vxge_mBIT(0)
+#define VXGE_HW_WRDMA_INT_STATUS_RXDRM_SM_ERR_RXDRM_INT vxge_mBIT(1)
+#define VXGE_HW_WRDMA_INT_STATUS_RXDCM_SM_ERR_RXDCM_SM_INT vxge_mBIT(2)
+#define VXGE_HW_WRDMA_INT_STATUS_RXDWM_SM_ERR_RXDWM_INT vxge_mBIT(3)
+#define VXGE_HW_WRDMA_INT_STATUS_RDA_ERR_RDA_INT vxge_mBIT(6)
+#define VXGE_HW_WRDMA_INT_STATUS_RDA_ECC_DB_RDA_ECC_DB_INT vxge_mBIT(8)
+#define VXGE_HW_WRDMA_INT_STATUS_RDA_ECC_SG_RDA_ECC_SG_INT vxge_mBIT(9)
+#define VXGE_HW_WRDMA_INT_STATUS_FRF_ALARM_FRF_INT vxge_mBIT(12)
+#define VXGE_HW_WRDMA_INT_STATUS_ROCRC_ALARM_ROCRC_INT vxge_mBIT(13)
+#define VXGE_HW_WRDMA_INT_STATUS_WDE0_ALARM_WDE0_INT vxge_mBIT(14)
+#define VXGE_HW_WRDMA_INT_STATUS_WDE1_ALARM_WDE1_INT vxge_mBIT(15)
+#define VXGE_HW_WRDMA_INT_STATUS_WDE2_ALARM_WDE2_INT vxge_mBIT(16)
+#define VXGE_HW_WRDMA_INT_STATUS_WDE3_ALARM_WDE3_INT vxge_mBIT(17)
+/*0x00a08*/ u64 wrdma_int_mask;
+/*0x00a10*/ u64 rc_alarm_reg;
+#define VXGE_HW_RC_ALARM_REG_FTC_SM_ERR vxge_mBIT(0)
+#define VXGE_HW_RC_ALARM_REG_FTC_SM_PHASE_ERR vxge_mBIT(1)
+#define VXGE_HW_RC_ALARM_REG_BTDWM_SM_ERR vxge_mBIT(2)
+#define VXGE_HW_RC_ALARM_REG_BTC_SM_ERR vxge_mBIT(3)
+#define VXGE_HW_RC_ALARM_REG_BTDCM_SM_ERR vxge_mBIT(4)
+#define VXGE_HW_RC_ALARM_REG_BTDRM_SM_ERR vxge_mBIT(5)
+#define VXGE_HW_RC_ALARM_REG_RMM_RXD_RC_ECC_DB_ERR vxge_mBIT(6)
+#define VXGE_HW_RC_ALARM_REG_RMM_RXD_RC_ECC_SG_ERR vxge_mBIT(7)
+#define VXGE_HW_RC_ALARM_REG_RHS_RXD_RHS_ECC_DB_ERR vxge_mBIT(8)
+#define VXGE_HW_RC_ALARM_REG_RHS_RXD_RHS_ECC_SG_ERR vxge_mBIT(9)
+#define VXGE_HW_RC_ALARM_REG_RMM_SM_ERR vxge_mBIT(10)
+#define VXGE_HW_RC_ALARM_REG_BTC_VPATH_MISMATCH_ERR vxge_mBIT(12)
+/*0x00a18*/ u64 rc_alarm_mask;
+/*0x00a20*/ u64 rc_alarm_alarm;
+/*0x00a28*/ u64 rxdrm_sm_err_reg;
+#define VXGE_HW_RXDRM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
+/*0x00a30*/ u64 rxdrm_sm_err_mask;
+/*0x00a38*/ u64 rxdrm_sm_err_alarm;
+/*0x00a40*/ u64 rxdcm_sm_err_reg;
+#define VXGE_HW_RXDCM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
+/*0x00a48*/ u64 rxdcm_sm_err_mask;
+/*0x00a50*/ u64 rxdcm_sm_err_alarm;
+/*0x00a58*/ u64 rxdwm_sm_err_reg;
+#define VXGE_HW_RXDWM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n)
+/*0x00a60*/ u64 rxdwm_sm_err_mask;
+/*0x00a68*/ u64 rxdwm_sm_err_alarm;
+/*0x00a70*/ u64 rda_err_reg;
+#define VXGE_HW_RDA_ERR_REG_RDA_SM0_ERR_ALARM vxge_mBIT(0)
+#define VXGE_HW_RDA_ERR_REG_RDA_MISC_ERR vxge_mBIT(1)
+#define VXGE_HW_RDA_ERR_REG_RDA_PCIX_ERR vxge_mBIT(2)
+#define VXGE_HW_RDA_ERR_REG_RDA_RXD_ECC_DB_ERR vxge_mBIT(3)
+#define VXGE_HW_RDA_ERR_REG_RDA_FRM_ECC_DB_ERR vxge_mBIT(4)
+#define VXGE_HW_RDA_ERR_REG_RDA_UQM_ECC_DB_ERR vxge_mBIT(5)
+#define VXGE_HW_RDA_ERR_REG_RDA_IMM_ECC_DB_ERR vxge_mBIT(6)
+#define VXGE_HW_RDA_ERR_REG_RDA_TIM_ECC_DB_ERR vxge_mBIT(7)
+/*0x00a78*/ u64 rda_err_mask;
+/*0x00a80*/ u64 rda_err_alarm;
+/*0x00a88*/ u64 rda_ecc_db_reg;
+#define VXGE_HW_RDA_ECC_DB_REG_RDA_RXD_ERR(n) vxge_mBIT(n)
+/*0x00a90*/ u64 rda_ecc_db_mask;
+/*0x00a98*/ u64 rda_ecc_db_alarm;
+/*0x00aa0*/ u64 rda_ecc_sg_reg;
+#define VXGE_HW_RDA_ECC_SG_REG_RDA_RXD_ERR(n) vxge_mBIT(n)
+/*0x00aa8*/ u64 rda_ecc_sg_mask;
+/*0x00ab0*/ u64 rda_ecc_sg_alarm;
+/*0x00ab8*/ u64 rqa_err_reg;
+#define VXGE_HW_RQA_ERR_REG_RQA_SM_ERR_ALARM vxge_mBIT(0)
+/*0x00ac0*/ u64 rqa_err_mask;
+/*0x00ac8*/ u64 rqa_err_alarm;
+/*0x00ad0*/ u64 frf_alarm_reg;
+#define VXGE_HW_FRF_ALARM_REG_PRC_VP_FRF_SM_ERR(n) vxge_mBIT(n)
+/*0x00ad8*/ u64 frf_alarm_mask;
+/*0x00ae0*/ u64 frf_alarm_alarm;
+/*0x00ae8*/ u64 rocrc_alarm_reg;
+#define VXGE_HW_ROCRC_ALARM_REG_QCQ_QCC_BYP_ECC_DB vxge_mBIT(0)
+#define VXGE_HW_ROCRC_ALARM_REG_QCQ_QCC_BYP_ECC_SG vxge_mBIT(1)
+#define VXGE_HW_ROCRC_ALARM_REG_NOA_NMA_SM_ERR vxge_mBIT(2)
+#define VXGE_HW_ROCRC_ALARM_REG_NOA_IMMM_ECC_DB vxge_mBIT(3)
+#define VXGE_HW_ROCRC_ALARM_REG_NOA_IMMM_ECC_SG vxge_mBIT(4)
+#define VXGE_HW_ROCRC_ALARM_REG_UDQ_UMQM_ECC_DB vxge_mBIT(5)
+#define VXGE_HW_ROCRC_ALARM_REG_UDQ_UMQM_ECC_SG vxge_mBIT(6)
+#define VXGE_HW_ROCRC_ALARM_REG_NOA_RCBM_ECC_DB vxge_mBIT(11)
+#define VXGE_HW_ROCRC_ALARM_REG_NOA_RCBM_ECC_SG vxge_mBIT(12)
+#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_EGB_RSVD_ERR vxge_mBIT(13)
+#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_EGB_OWN_ERR vxge_mBIT(14)
+#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_BYP_OWN_ERR vxge_mBIT(15)
+#define VXGE_HW_ROCRC_ALARM_REG_QCQ_OWN_NOT_ASSIGNED_ERR vxge_mBIT(16)
+#define VXGE_HW_ROCRC_ALARM_REG_QCQ_OWN_RSVD_SYNC_ERR vxge_mBIT(17)
+#define VXGE_HW_ROCRC_ALARM_REG_QCQ_LOST_EGB_ERR vxge_mBIT(18)
+#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ0_OVERFLOW vxge_mBIT(19)
+#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ1_OVERFLOW vxge_mBIT(20)
+#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ2_OVERFLOW vxge_mBIT(21)
+#define VXGE_HW_ROCRC_ALARM_REG_NOA_WCT_CMD_FIFO_ERR vxge_mBIT(22)
+/*0x00af0*/ u64 rocrc_alarm_mask;
+/*0x00af8*/ u64 rocrc_alarm_alarm;
+/*0x00b00*/ u64 wde0_alarm_reg;
+#define VXGE_HW_WDE0_ALARM_REG_WDE0_DCC_SM_ERR vxge_mBIT(0)
+#define VXGE_HW_WDE0_ALARM_REG_WDE0_PRM_SM_ERR vxge_mBIT(1)
+#define VXGE_HW_WDE0_ALARM_REG_WDE0_CP_SM_ERR vxge_mBIT(2)
+#define VXGE_HW_WDE0_ALARM_REG_WDE0_CP_CMD_ERR vxge_mBIT(3)
+#define VXGE_HW_WDE0_ALARM_REG_WDE0_PCR_SM_ERR vxge_mBIT(4)
+/*0x00b08*/ u64 wde0_alarm_mask;
+/*0x00b10*/ u64 wde0_alarm_alarm;
+/*0x00b18*/ u64 wde1_alarm_reg;
+#define VXGE_HW_WDE1_ALARM_REG_WDE1_DCC_SM_ERR vxge_mBIT(0)
+#define VXGE_HW_WDE1_ALARM_REG_WDE1_PRM_SM_ERR vxge_mBIT(1)
+#define VXGE_HW_WDE1_ALARM_REG_WDE1_CP_SM_ERR vxge_mBIT(2)
+#define VXGE_HW_WDE1_ALARM_REG_WDE1_CP_CMD_ERR vxge_mBIT(3)
+#define VXGE_HW_WDE1_ALARM_REG_WDE1_PCR_SM_ERR vxge_mBIT(4)
+/*0x00b20*/ u64 wde1_alarm_mask;
+/*0x00b28*/ u64 wde1_alarm_alarm;
+/*0x00b30*/ u64 wde2_alarm_reg;
+#define VXGE_HW_WDE2_ALARM_REG_WDE2_DCC_SM_ERR vxge_mBIT(0)
+#define VXGE_HW_WDE2_ALARM_REG_WDE2_PRM_SM_ERR vxge_mBIT(1)
+#define VXGE_HW_WDE2_ALARM_REG_WDE2_CP_SM_ERR vxge_mBIT(2)
+#define VXGE_HW_WDE2_ALARM_REG_WDE2_CP_CMD_ERR vxge_mBIT(3)
+#define VXGE_HW_WDE2_ALARM_REG_WDE2_PCR_SM_ERR vxge_mBIT(4)
+/*0x00b38*/ u64 wde2_alarm_mask;
+/*0x00b40*/ u64 wde2_alarm_alarm;
+/*0x00b48*/ u64 wde3_alarm_reg;
+#define VXGE_HW_WDE3_ALARM_REG_WDE3_DCC_SM_ERR vxge_mBIT(0)
+#define VXGE_HW_WDE3_ALARM_REG_WDE3_PRM_SM_ERR vxge_mBIT(1)
+#define VXGE_HW_WDE3_ALARM_REG_WDE3_CP_SM_ERR vxge_mBIT(2)
+#define VXGE_HW_WDE3_ALARM_REG_WDE3_CP_CMD_ERR vxge_mBIT(3)
+#define VXGE_HW_WDE3_ALARM_REG_WDE3_PCR_SM_ERR vxge_mBIT(4)
+/*0x00b50*/ u64 wde3_alarm_mask;
+/*0x00b58*/ u64 wde3_alarm_alarm;
+
+ u8 unused00be8[0x00be8-0x00b60];
+
+/*0x00be8*/ u64 rx_w_round_robin_0;
+#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_0(val) vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_1(val) vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_2(val) vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_3(val) vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_4(val) vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_5(val) vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_6(val) vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_7(val) vxge_vBIT(val, 59, 5)
+/*0x00bf0*/ u64 rx_w_round_robin_1;
+#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_8(val) vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_9(val) vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_10(val) \
+ vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_11(val) \
+ vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_12(val) \
+ vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_13(val) \
+ vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_14(val) \
+ vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_15(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00bf8*/ u64 rx_w_round_robin_2;
+#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_16(val) vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_17(val) \
+ vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_18(val) \
+ vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_19(val) \
+ vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_20(val) \
+ vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_21(val) \
+ vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_22(val) \
+ vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_23(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00c00*/ u64 rx_w_round_robin_3;
+#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_24(val) vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_25(val) \
+ vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_26(val) \
+ vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_27(val) \
+ vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_28(val) \
+ vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_29(val) \
+ vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_30(val) \
+ vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_31(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00c08*/ u64 rx_w_round_robin_4;
+#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_32(val) vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_33(val) \
+ vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_34(val) \
+ vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_35(val) \
+ vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_36(val) \
+ vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_37(val) \
+ vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_38(val) \
+ vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_39(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00c10*/ u64 rx_w_round_robin_5;
+#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_40(val) vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_41(val) \
+ vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_42(val) \
+ vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_43(val) \
+ vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_44(val) \
+ vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_45(val) \
+ vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_46(val) \
+ vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_47(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00c18*/ u64 rx_w_round_robin_6;
+#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_48(val) vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_49(val) \
+ vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_50(val) \
+ vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_51(val) \
+ vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_52(val) \
+ vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_53(val) \
+ vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_54(val) \
+ vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_55(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00c20*/ u64 rx_w_round_robin_7;
+#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_56(val) vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_57(val) \
+ vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_58(val) \
+ vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_59(val) \
+ vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_60(val) \
+ vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_61(val) \
+ vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_62(val) \
+ vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_63(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00c28*/ u64 rx_w_round_robin_8;
+#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_64(val) vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_65(val) \
+ vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_66(val) \
+ vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_67(val) \
+ vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_68(val) \
+ vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_69(val) \
+ vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_70(val) \
+ vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_71(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00c30*/ u64 rx_w_round_robin_9;
+#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_72(val) vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_73(val) \
+ vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_74(val) \
+ vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_75(val) \
+ vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_76(val) \
+ vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_77(val) \
+ vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_78(val) \
+ vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_79(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00c38*/ u64 rx_w_round_robin_10;
+#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_80(val) \
+ vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_81(val) \
+ vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_82(val) \
+ vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_83(val) \
+ vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_84(val) \
+ vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_85(val) \
+ vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_86(val) \
+ vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_87(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00c40*/ u64 rx_w_round_robin_11;
+#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_88(val) \
+ vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_89(val) \
+ vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_90(val) \
+ vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_91(val) \
+ vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_92(val) \
+ vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_93(val) \
+ vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_94(val) \
+ vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_95(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00c48*/ u64 rx_w_round_robin_12;
+#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_96(val) \
+ vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_97(val) \
+ vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_98(val) \
+ vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_99(val) \
+ vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_100(val) \
+ vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_101(val) \
+ vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_102(val) \
+ vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_103(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00c50*/ u64 rx_w_round_robin_13;
+#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_104(val) \
+ vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_105(val) \
+ vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_106(val) \
+ vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_107(val) \
+ vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_108(val) \
+ vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_109(val) \
+ vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_110(val) \
+ vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_111(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00c58*/ u64 rx_w_round_robin_14;
+#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_112(val) \
+ vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_113(val) \
+ vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_114(val) \
+ vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_115(val) \
+ vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_116(val) \
+ vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_117(val) \
+ vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_118(val) \
+ vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_119(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00c60*/ u64 rx_w_round_robin_15;
+#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_120(val) \
+ vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_121(val) \
+ vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_122(val) \
+ vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_123(val) \
+ vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_124(val) \
+ vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_125(val) \
+ vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_126(val) \
+ vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_127(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00c68*/ u64 rx_w_round_robin_16;
+#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_128(val) \
+ vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_129(val) \
+ vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_130(val) \
+ vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_131(val) \
+ vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_132(val) \
+ vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_133(val) \
+ vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_134(val) \
+ vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_135(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00c70*/ u64 rx_w_round_robin_17;
+#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_136(val) \
+ vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_137(val) \
+ vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_138(val) \
+ vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_139(val) \
+ vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_140(val) \
+ vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_141(val) \
+ vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_142(val) \
+ vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_143(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00c78*/ u64 rx_w_round_robin_18;
+#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_144(val) \
+ vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_145(val) \
+ vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_146(val) \
+ vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_147(val) \
+ vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_148(val) \
+ vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_149(val) \
+ vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_150(val) \
+ vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_151(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00c80*/ u64 rx_w_round_robin_19;
+#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_152(val) \
+ vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_153(val) \
+ vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_154(val) \
+ vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_155(val) \
+ vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_156(val) \
+ vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_157(val) \
+ vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_158(val) \
+ vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_159(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00c88*/ u64 rx_w_round_robin_20;
+#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_160(val) \
+ vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_161(val) \
+ vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_162(val) \
+ vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_163(val) \
+ vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_164(val) \
+ vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_165(val) \
+ vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_166(val) \
+ vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_167(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00c90*/ u64 rx_w_round_robin_21;
+#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_168(val) \
+ vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_169(val) \
+ vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_170(val) \
+ vxge_vBIT(val, 19, 5)
+
+#define VXGE_HW_WRR_RING_SERVICE_STATES 171
+#define VXGE_HW_WRR_RING_COUNT 22
+
+/*0x00c98*/ u64 rx_queue_priority_0;
+#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_0(val) vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_1(val) vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_2(val) vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_3(val) vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_4(val) vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_5(val) vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_6(val) vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_7(val) vxge_vBIT(val, 59, 5)
+/*0x00ca0*/ u64 rx_queue_priority_1;
+#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_8(val) vxge_vBIT(val, 3, 5)
+#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_9(val) vxge_vBIT(val, 11, 5)
+#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_10(val) vxge_vBIT(val, 19, 5)
+#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_11(val) vxge_vBIT(val, 27, 5)
+#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_12(val) vxge_vBIT(val, 35, 5)
+#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_13(val) vxge_vBIT(val, 43, 5)
+#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_14(val) vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_15(val) vxge_vBIT(val, 59, 5)
+/*0x00ca8*/ u64 rx_queue_priority_2;
+#define VXGE_HW_RX_QUEUE_PRIORITY_2_RX_Q_NUMBER_16(val) vxge_vBIT(val, 3, 5)
+ u8 unused00cc8[0x00cc8-0x00cb0];
+
+/*0x00cc8*/ u64 replication_queue_priority;
+#define VXGE_HW_REPLICATION_QUEUE_PRIORITY_REPLICATION_QUEUE_PRIORITY(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00cd0*/ u64 rx_queue_select;
+#define VXGE_HW_RX_QUEUE_SELECT_NUMBER(n) vxge_mBIT(n)
+#define VXGE_HW_RX_QUEUE_SELECT_ENABLE_CODE vxge_mBIT(15)
+#define VXGE_HW_RX_QUEUE_SELECT_ENABLE_HIERARCHICAL_PRTY vxge_mBIT(23)
+/*0x00cd8*/ u64 rqa_vpbp_ctrl;
+#define VXGE_HW_RQA_VPBP_CTRL_WR_XON_DIS vxge_mBIT(15)
+#define VXGE_HW_RQA_VPBP_CTRL_ROCRC_DIS vxge_mBIT(23)
+#define VXGE_HW_RQA_VPBP_CTRL_TXPE_DIS vxge_mBIT(31)
+/*0x00ce0*/ u64 rx_multi_cast_ctrl;
+#define VXGE_HW_RX_MULTI_CAST_CTRL_TIME_OUT_DIS vxge_mBIT(0)
+#define VXGE_HW_RX_MULTI_CAST_CTRL_FRM_DROP_DIS vxge_mBIT(1)
+#define VXGE_HW_RX_MULTI_CAST_CTRL_NO_RXD_TIME_OUT_CNT(val) \
+ vxge_vBIT(val, 2, 30)
+#define VXGE_HW_RX_MULTI_CAST_CTRL_TIME_OUT_CNT(val) vxge_vBIT(val, 32, 32)
+/*0x00ce8*/ u64 wde_prm_ctrl;
+#define VXGE_HW_WDE_PRM_CTRL_SPAV_THRESHOLD(val) vxge_vBIT(val, 2, 10)
+#define VXGE_HW_WDE_PRM_CTRL_SPLIT_THRESHOLD(val) vxge_vBIT(val, 18, 14)
+#define VXGE_HW_WDE_PRM_CTRL_SPLIT_ON_1ST_ROW vxge_mBIT(32)
+#define VXGE_HW_WDE_PRM_CTRL_SPLIT_ON_ROW_BNDRY vxge_mBIT(33)
+#define VXGE_HW_WDE_PRM_CTRL_FB_ROW_SIZE(val) vxge_vBIT(val, 46, 2)
+/*0x00cf0*/ u64 noa_ctrl;
+#define VXGE_HW_NOA_CTRL_FRM_PRTY_QUOTA(val) vxge_vBIT(val, 3, 5)
+#define VXGE_HW_NOA_CTRL_NON_FRM_PRTY_QUOTA(val) vxge_vBIT(val, 11, 5)
+#define VXGE_HW_NOA_CTRL_IGNORE_KDFC_IF_STATUS vxge_mBIT(16)
+#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE0(val) vxge_vBIT(val, 37, 4)
+#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE1(val) vxge_vBIT(val, 45, 4)
+#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE2(val) vxge_vBIT(val, 53, 4)
+#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE3(val) vxge_vBIT(val, 60, 4)
+/*0x00cf8*/ u64 phase_cfg;
+#define VXGE_HW_PHASE_CFG_QCC_WR_PHASE_EN vxge_mBIT(0)
+#define VXGE_HW_PHASE_CFG_QCC_RD_PHASE_EN vxge_mBIT(3)
+#define VXGE_HW_PHASE_CFG_IMMM_WR_PHASE_EN vxge_mBIT(7)
+#define VXGE_HW_PHASE_CFG_IMMM_RD_PHASE_EN vxge_mBIT(11)
+#define VXGE_HW_PHASE_CFG_UMQM_WR_PHASE_EN vxge_mBIT(15)
+#define VXGE_HW_PHASE_CFG_UMQM_RD_PHASE_EN vxge_mBIT(19)
+#define VXGE_HW_PHASE_CFG_RCBM_WR_PHASE_EN vxge_mBIT(23)
+#define VXGE_HW_PHASE_CFG_RCBM_RD_PHASE_EN vxge_mBIT(27)
+#define VXGE_HW_PHASE_CFG_RXD_RC_WR_PHASE_EN vxge_mBIT(31)
+#define VXGE_HW_PHASE_CFG_RXD_RC_RD_PHASE_EN vxge_mBIT(35)
+#define VXGE_HW_PHASE_CFG_RXD_RHS_WR_PHASE_EN vxge_mBIT(39)
+#define VXGE_HW_PHASE_CFG_RXD_RHS_RD_PHASE_EN vxge_mBIT(43)
+/*0x00d00*/ u64 rcq_bypq_cfg;
+#define VXGE_HW_RCQ_BYPQ_CFG_OVERFLOW_THRESHOLD(val) vxge_vBIT(val, 10, 22)
+#define VXGE_HW_RCQ_BYPQ_CFG_BYP_ON_THRESHOLD(val) vxge_vBIT(val, 39, 9)
+#define VXGE_HW_RCQ_BYPQ_CFG_BYP_OFF_THRESHOLD(val) vxge_vBIT(val, 55, 9)
+ u8 unused00e00[0x00e00-0x00d08];
+
+/*0x00e00*/ u64 doorbell_int_status;
+#define VXGE_HW_DOORBELL_INT_STATUS_KDFC_ERR_REG_TXDMA_KDFC_INT vxge_mBIT(7)
+#define VXGE_HW_DOORBELL_INT_STATUS_USDC_ERR_REG_TXDMA_USDC_INT vxge_mBIT(15)
+/*0x00e08*/ u64 doorbell_int_mask;
+/*0x00e10*/ u64 kdfc_err_reg;
+#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_ECC_SG_ERR vxge_mBIT(7)
+#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_ECC_DB_ERR vxge_mBIT(15)
+#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_SM_ERR_ALARM vxge_mBIT(23)
+#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_MISC_ERR_1 vxge_mBIT(32)
+#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_PCIX_ERR vxge_mBIT(39)
+/*0x00e18*/ u64 kdfc_err_mask;
+/*0x00e20*/ u64 kdfc_err_reg_alarm;
+#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_ECC_SG_ERR vxge_mBIT(7)
+#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_ECC_DB_ERR vxge_mBIT(15)
+#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_SM_ERR_ALARM vxge_mBIT(23)
+#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_MISC_ERR_1 vxge_mBIT(32)
+#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_PCIX_ERR vxge_mBIT(39)
+ u8 unused00e40[0x00e40-0x00e28];
+/*0x00e40*/ u64 kdfc_vp_partition_0;
+#define VXGE_HW_KDFC_VP_PARTITION_0_ENABLE vxge_mBIT(0)
+#define VXGE_HW_KDFC_VP_PARTITION_0_NUMBER_0(val) vxge_vBIT(val, 5, 3)
+#define VXGE_HW_KDFC_VP_PARTITION_0_LENGTH_0(val) vxge_vBIT(val, 17, 15)
+#define VXGE_HW_KDFC_VP_PARTITION_0_NUMBER_1(val) vxge_vBIT(val, 37, 3)
+#define VXGE_HW_KDFC_VP_PARTITION_0_LENGTH_1(val) vxge_vBIT(val, 49, 15)
+/*0x00e48*/ u64 kdfc_vp_partition_1;
+#define VXGE_HW_KDFC_VP_PARTITION_1_NUMBER_2(val) vxge_vBIT(val, 5, 3)
+#define VXGE_HW_KDFC_VP_PARTITION_1_LENGTH_2(val) vxge_vBIT(val, 17, 15)
+#define VXGE_HW_KDFC_VP_PARTITION_1_NUMBER_3(val) vxge_vBIT(val, 37, 3)
+#define VXGE_HW_KDFC_VP_PARTITION_1_LENGTH_3(val) vxge_vBIT(val, 49, 15)
+/*0x00e50*/ u64 kdfc_vp_partition_2;
+#define VXGE_HW_KDFC_VP_PARTITION_2_NUMBER_4(val) vxge_vBIT(val, 5, 3)
+#define VXGE_HW_KDFC_VP_PARTITION_2_LENGTH_4(val) vxge_vBIT(val, 17, 15)
+#define VXGE_HW_KDFC_VP_PARTITION_2_NUMBER_5(val) vxge_vBIT(val, 37, 3)
+#define VXGE_HW_KDFC_VP_PARTITION_2_LENGTH_5(val) vxge_vBIT(val, 49, 15)
+/*0x00e58*/ u64 kdfc_vp_partition_3;
+#define VXGE_HW_KDFC_VP_PARTITION_3_NUMBER_6(val) vxge_vBIT(val, 5, 3)
+#define VXGE_HW_KDFC_VP_PARTITION_3_LENGTH_6(val) vxge_vBIT(val, 17, 15)
+#define VXGE_HW_KDFC_VP_PARTITION_3_NUMBER_7(val) vxge_vBIT(val, 37, 3)
+#define VXGE_HW_KDFC_VP_PARTITION_3_LENGTH_7(val) vxge_vBIT(val, 49, 15)
+/*0x00e60*/ u64 kdfc_vp_partition_4;
+#define VXGE_HW_KDFC_VP_PARTITION_4_LENGTH_8(val) vxge_vBIT(val, 17, 15)
+#define VXGE_HW_KDFC_VP_PARTITION_4_LENGTH_9(val) vxge_vBIT(val, 49, 15)
+/*0x00e68*/ u64 kdfc_vp_partition_5;
+#define VXGE_HW_KDFC_VP_PARTITION_5_LENGTH_10(val) vxge_vBIT(val, 17, 15)
+#define VXGE_HW_KDFC_VP_PARTITION_5_LENGTH_11(val) vxge_vBIT(val, 49, 15)
+/*0x00e70*/ u64 kdfc_vp_partition_6;
+#define VXGE_HW_KDFC_VP_PARTITION_6_LENGTH_12(val) vxge_vBIT(val, 17, 15)
+#define VXGE_HW_KDFC_VP_PARTITION_6_LENGTH_13(val) vxge_vBIT(val, 49, 15)
+/*0x00e78*/ u64 kdfc_vp_partition_7;
+#define VXGE_HW_KDFC_VP_PARTITION_7_LENGTH_14(val) vxge_vBIT(val, 17, 15)
+#define VXGE_HW_KDFC_VP_PARTITION_7_LENGTH_15(val) vxge_vBIT(val, 49, 15)
+/*0x00e80*/ u64 kdfc_vp_partition_8;
+#define VXGE_HW_KDFC_VP_PARTITION_8_LENGTH_16(val) vxge_vBIT(val, 17, 15)
+/*0x00e88*/ u64 kdfc_w_round_robin_0;
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_0(val) vxge_vBIT(val, 3, 5)
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_1(val) vxge_vBIT(val, 11, 5)
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_2(val) vxge_vBIT(val, 19, 5)
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_3(val) vxge_vBIT(val, 27, 5)
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_4(val) vxge_vBIT(val, 35, 5)
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_5(val) vxge_vBIT(val, 43, 5)
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_6(val) vxge_vBIT(val, 51, 5)
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_7(val) vxge_vBIT(val, 59, 5)
+
+ u8 unused0f28[0x0f28-0x0e90];
+
+/*0x00f28*/ u64 kdfc_w_round_robin_20;
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_0(val) vxge_vBIT(val, 3, 5)
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_1(val) vxge_vBIT(val, 11, 5)
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_2(val) vxge_vBIT(val, 19, 5)
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_3(val) vxge_vBIT(val, 27, 5)
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_4(val) vxge_vBIT(val, 35, 5)
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_5(val) vxge_vBIT(val, 43, 5)
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_6(val) vxge_vBIT(val, 51, 5)
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_7(val) vxge_vBIT(val, 59, 5)
+
+#define VXGE_HW_WRR_FIFO_COUNT 20
+
+ u8 unused0fc8[0x0fc8-0x0f30];
+
+/*0x00fc8*/ u64 kdfc_w_round_robin_40;
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_0(val) vxge_vBIT(val, 3, 5)
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_1(val) vxge_vBIT(val, 11, 5)
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_2(val) vxge_vBIT(val, 19, 5)
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_3(val) vxge_vBIT(val, 27, 5)
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_4(val) vxge_vBIT(val, 35, 5)
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_5(val) vxge_vBIT(val, 43, 5)
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_6(val) vxge_vBIT(val, 51, 5)
+#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_7(val) vxge_vBIT(val, 59, 5)
+
+ u8 unused1068[0x01068-0x0fd0];
+
+/*0x01068*/ u64 kdfc_entry_type_sel_0;
+#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_0(val) vxge_vBIT(val, 6, 2)
+#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_1(val) vxge_vBIT(val, 14, 2)
+#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_2(val) vxge_vBIT(val, 22, 2)
+#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_3(val) vxge_vBIT(val, 30, 2)
+#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_4(val) vxge_vBIT(val, 38, 2)
+#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_5(val) vxge_vBIT(val, 46, 2)
+#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_6(val) vxge_vBIT(val, 54, 2)
+#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_7(val) vxge_vBIT(val, 62, 2)
+/*0x01070*/ u64 kdfc_entry_type_sel_1;
+#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_1_NUMBER_8(val) vxge_vBIT(val, 6, 2)
+/*0x01078*/ u64 kdfc_fifo_0_ctrl;
+#define VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(val) vxge_vBIT(val, 3, 5)
+#define VXGE_HW_WEIGHTED_RR_SERVICE_STATES 176
+#define VXGE_HW_WRR_FIFO_SERVICE_STATES 153
+
+ u8 unused1100[0x01100-0x1080];
+
+/*0x01100*/ u64 kdfc_fifo_17_ctrl;
+#define VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(val) vxge_vBIT(val, 3, 5)
+
+ u8 unused1600[0x01600-0x1108];
+
+/*0x01600*/ u64 rxmac_int_status;
+#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_GEN_ERR_RXMAC_GEN_INT vxge_mBIT(3)
+#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_ECC_ERR_RXMAC_ECC_INT vxge_mBIT(7)
+#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_VARIOUS_ERR_RXMAC_VARIOUS_INT \
+ vxge_mBIT(11)
+/*0x01608*/ u64 rxmac_int_mask;
+ u8 unused01618[0x01618-0x01610];
+
+/*0x01618*/ u64 rxmac_gen_err_reg;
+/*0x01620*/ u64 rxmac_gen_err_mask;
+/*0x01628*/ u64 rxmac_gen_err_alarm;
+/*0x01630*/ u64 rxmac_ecc_err_reg;
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT0_RMAC_RTS_PART_SG_ERR(val) \
+ vxge_vBIT(val, 0, 4)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT0_RMAC_RTS_PART_DB_ERR(val) \
+ vxge_vBIT(val, 4, 4)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT1_RMAC_RTS_PART_SG_ERR(val) \
+ vxge_vBIT(val, 8, 4)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT1_RMAC_RTS_PART_DB_ERR(val) \
+ vxge_vBIT(val, 12, 4)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT2_RMAC_RTS_PART_SG_ERR(val) \
+ vxge_vBIT(val, 16, 4)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT2_RMAC_RTS_PART_DB_ERR(val) \
+ vxge_vBIT(val, 20, 4)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT0_SG_ERR(val) \
+ vxge_vBIT(val, 24, 2)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT0_DB_ERR(val) \
+ vxge_vBIT(val, 26, 2)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT1_SG_ERR(val) \
+ vxge_vBIT(val, 28, 2)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT1_DB_ERR(val) \
+ vxge_vBIT(val, 30, 2)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_VID_LKP_SG_ERR vxge_mBIT(32)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_VID_LKP_DB_ERR vxge_mBIT(33)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT0_SG_ERR vxge_mBIT(34)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT0_DB_ERR vxge_mBIT(35)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT1_SG_ERR vxge_mBIT(36)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT1_DB_ERR vxge_mBIT(37)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT2_SG_ERR vxge_mBIT(38)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT2_DB_ERR vxge_mBIT(39)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_MASK_SG_ERR(val) \
+ vxge_vBIT(val, 40, 7)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_MASK_DB_ERR(val) \
+ vxge_vBIT(val, 47, 7)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_LKP_SG_ERR(val) \
+ vxge_vBIT(val, 54, 3)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_LKP_DB_ERR(val) \
+ vxge_vBIT(val, 57, 3)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DS_LKP_SG_ERR \
+ vxge_mBIT(60)
+#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DS_LKP_DB_ERR \
+ vxge_mBIT(61)
+/*0x01638*/ u64 rxmac_ecc_err_mask;
+/*0x01640*/ u64 rxmac_ecc_err_alarm;
+/*0x01648*/ u64 rxmac_various_err_reg;
+#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT0_FSM_ERR vxge_mBIT(0)
+#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT1_FSM_ERR vxge_mBIT(1)
+#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT2_FSM_ERR vxge_mBIT(2)
+#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMACJ_RMACJ_FSM_ERR vxge_mBIT(3)
+/*0x01650*/ u64 rxmac_various_err_mask;
+/*0x01658*/ u64 rxmac_various_err_alarm;
+/*0x01660*/ u64 rxmac_gen_cfg;
+#define VXGE_HW_RXMAC_GEN_CFG_SCALE_RMAC_UTIL vxge_mBIT(11)
+/*0x01668*/ u64 rxmac_authorize_all_addr;
+#define VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(n) vxge_mBIT(n)
+/*0x01670*/ u64 rxmac_authorize_all_vid;
+#define VXGE_HW_RXMAC_AUTHORIZE_ALL_VID_VP(n) vxge_mBIT(n)
+ u8 unused016c0[0x016c0-0x01678];
+
+/*0x016c0*/ u64 rxmac_red_rate_repl_queue;
+#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR0(val) vxge_vBIT(val, 0, 4)
+#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR1(val) vxge_vBIT(val, 4, 4)
+#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR2(val) vxge_vBIT(val, 8, 4)
+#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR3(val) vxge_vBIT(val, 12, 4)
+#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR0(val) vxge_vBIT(val, 16, 4)
+#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR1(val) vxge_vBIT(val, 20, 4)
+#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR2(val) vxge_vBIT(val, 24, 4)
+#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR3(val) vxge_vBIT(val, 28, 4)
+#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_TRICKLE_EN vxge_mBIT(35)
+ u8 unused016e0[0x016e0-0x016c8];
+
+/*0x016e0*/ u64 rxmac_cfg0_port[3];
+#define VXGE_HW_RXMAC_CFG0_PORT_RMAC_EN vxge_mBIT(3)
+#define VXGE_HW_RXMAC_CFG0_PORT_STRIP_FCS vxge_mBIT(7)
+#define VXGE_HW_RXMAC_CFG0_PORT_DISCARD_PFRM vxge_mBIT(11)
+#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_FCS_ERR vxge_mBIT(15)
+#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_LONG_ERR vxge_mBIT(19)
+#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_USIZED_ERR vxge_mBIT(23)
+#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_LEN_MISMATCH vxge_mBIT(27)
+#define VXGE_HW_RXMAC_CFG0_PORT_MAX_PYLD_LEN(val) vxge_vBIT(val, 50, 14)
+ u8 unused01710[0x01710-0x016f8];
+
+/*0x01710*/ u64 rxmac_cfg2_port[3];
+#define VXGE_HW_RXMAC_CFG2_PORT_PROM_EN vxge_mBIT(3)
+/*0x01728*/ u64 rxmac_pause_cfg_port[3];
+#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN vxge_mBIT(3)
+#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN vxge_mBIT(7)
+#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_ACCEL_SEND(val) vxge_vBIT(val, 9, 3)
+#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_DUAL_THR vxge_mBIT(15)
+#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_HIGH_PTIME(val) vxge_vBIT(val, 20, 16)
+#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_IGNORE_PF_FCS_ERR vxge_mBIT(39)
+#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_IGNORE_PF_LEN_ERR vxge_mBIT(43)
+#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_LIMITER_EN vxge_mBIT(47)
+#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_MAX_LIMIT(val) vxge_vBIT(val, 48, 8)
+#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_PERMIT_RATEMGMT_CTRL vxge_mBIT(59)
+ u8 unused01758[0x01758-0x01740];
+
+/*0x01758*/ u64 rxmac_red_cfg0_port[3];
+#define VXGE_HW_RXMAC_RED_CFG0_PORT_RED_EN_VP(n) vxge_mBIT(n)
+/*0x01770*/ u64 rxmac_red_cfg1_port[3];
+#define VXGE_HW_RXMAC_RED_CFG1_PORT_FINE_EN vxge_mBIT(3)
+#define VXGE_HW_RXMAC_RED_CFG1_PORT_RED_EN_REPL_QUEUE vxge_mBIT(11)
+/*0x01788*/ u64 rxmac_red_cfg2_port[3];
+#define VXGE_HW_RXMAC_RED_CFG2_PORT_TRICKLE_EN_VP(n) vxge_mBIT(n)
+/*0x017a0*/ u64 rxmac_link_util_port[3];
+#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_UTILIZATION(val) \
+ vxge_vBIT(val, 1, 7)
+#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_UTIL_CFG(val) vxge_vBIT(val, 8, 4)
+#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_FRAC_UTIL(val) \
+ vxge_vBIT(val, 12, 4)
+#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_PKT_WEIGHT(val) vxge_vBIT(val, 16, 4)
+#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_SCALE_FACTOR vxge_mBIT(23)
+ u8 unused017d0[0x017d0-0x017b8];
+
+/*0x017d0*/ u64 rxmac_status_port[3];
+#define VXGE_HW_RXMAC_STATUS_PORT_RMAC_RX_FRM_RCVD vxge_mBIT(3)
+ u8 unused01800[0x01800-0x017e8];
+
+/*0x01800*/ u64 rxmac_rx_pa_cfg0;
+#define VXGE_HW_RXMAC_RX_PA_CFG0_IGNORE_FRAME_ERR vxge_mBIT(3)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_SUPPORT_SNAP_AB_N vxge_mBIT(7)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_SEARCH_FOR_HAO vxge_mBIT(18)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_SUPPORT_MOBILE_IPV6_HDRS vxge_mBIT(19)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_IPV6_STOP_SEARCHING vxge_mBIT(23)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_NO_PS_IF_UNKNOWN vxge_mBIT(27)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_SEARCH_FOR_ETYPE vxge_mBIT(35)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_L3_CSUM_ERR vxge_mBIT(39)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_L3_CSUM_ERR vxge_mBIT(43)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_L4_CSUM_ERR vxge_mBIT(47)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_L4_CSUM_ERR vxge_mBIT(51)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_RPA_ERR vxge_mBIT(55)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_RPA_ERR vxge_mBIT(59)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_JUMBO_SNAP_EN vxge_mBIT(63)
+/*0x01808*/ u64 rxmac_rx_pa_cfg1;
+#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV4_TCP_INCL_PH vxge_mBIT(3)
+#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV6_TCP_INCL_PH vxge_mBIT(7)
+#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV4_UDP_INCL_PH vxge_mBIT(11)
+#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV6_UDP_INCL_PH vxge_mBIT(15)
+#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_L4_INCL_CF vxge_mBIT(19)
+#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_STRIP_VLAN_TAG vxge_mBIT(23)
+ u8 unused01828[0x01828-0x01810];
+
+/*0x01828*/ u64 rts_mgr_cfg0;
+#define VXGE_HW_RTS_MGR_CFG0_RTS_DP_SP_PRIORITY vxge_mBIT(3)
+#define VXGE_HW_RTS_MGR_CFG0_FLEX_L4PRTCL_VALUE(val) vxge_vBIT(val, 24, 8)
+#define VXGE_HW_RTS_MGR_CFG0_ICMP_TRASH vxge_mBIT(35)
+#define VXGE_HW_RTS_MGR_CFG0_TCPSYN_TRASH vxge_mBIT(39)
+#define VXGE_HW_RTS_MGR_CFG0_ZL4PYLD_TRASH vxge_mBIT(43)
+#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_TCP_TRASH vxge_mBIT(47)
+#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_UDP_TRASH vxge_mBIT(51)
+#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_FLEX_TRASH vxge_mBIT(55)
+#define VXGE_HW_RTS_MGR_CFG0_IPFRAG_TRASH vxge_mBIT(59)
+/*0x01830*/ u64 rts_mgr_cfg1;
+#define VXGE_HW_RTS_MGR_CFG1_DA_ACTIVE_TABLE vxge_mBIT(3)
+#define VXGE_HW_RTS_MGR_CFG1_PN_ACTIVE_TABLE vxge_mBIT(7)
+/*0x01838*/ u64 rts_mgr_criteria_priority;
+#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ETYPE(val) vxge_vBIT(val, 5, 3)
+#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ICMP_TCPSYN(val) vxge_vBIT(val, 9, 3)
+#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_L4PN(val) vxge_vBIT(val, 13, 3)
+#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_RANGE_L4PN(val) vxge_vBIT(val, 17, 3)
+#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_RTH_IT(val) vxge_vBIT(val, 21, 3)
+#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_DS(val) vxge_vBIT(val, 25, 3)
+#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_QOS(val) vxge_vBIT(val, 29, 3)
+#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ZL4PYLD(val) vxge_vBIT(val, 33, 3)
+#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_L4PRTCL(val) vxge_vBIT(val, 37, 3)
+/*0x01840*/ u64 rts_mgr_da_pause_cfg;
+#define VXGE_HW_RTS_MGR_DA_PAUSE_CFG_VPATH_VECTOR(val) vxge_vBIT(val, 0, 17)
+/*0x01848*/ u64 rts_mgr_da_slow_proto_cfg;
+#define VXGE_HW_RTS_MGR_DA_SLOW_PROTO_CFG_VPATH_VECTOR(val) \
+ vxge_vBIT(val, 0, 17)
+ u8 unused01890[0x01890-0x01850];
+/*0x01890*/ u64 rts_mgr_cbasin_cfg;
+ u8 unused01968[0x01968-0x01898];
+
+/*0x01968*/ u64 dbg_stat_rx_any_frms;
+#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT0_RX_ANY_FRMS(val) vxge_vBIT(val, 0, 8)
+#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT1_RX_ANY_FRMS(val) vxge_vBIT(val, 8, 8)
+#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT2_RX_ANY_FRMS(val) \
+ vxge_vBIT(val, 16, 8)
+ u8 unused01a00[0x01a00-0x01970];
+
+/*0x01a00*/ u64 rxmac_red_rate_vp[17];
+#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR0(val) vxge_vBIT(val, 0, 4)
+#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR1(val) vxge_vBIT(val, 4, 4)
+#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR2(val) vxge_vBIT(val, 8, 4)
+#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR3(val) vxge_vBIT(val, 12, 4)
+#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR0(val) vxge_vBIT(val, 16, 4)
+#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR1(val) vxge_vBIT(val, 20, 4)
+#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR2(val) vxge_vBIT(val, 24, 4)
+#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR3(val) vxge_vBIT(val, 28, 4)
+ u8 unused01e00[0x01e00-0x01a88];
+
+/*0x01e00*/ u64 xgmac_int_status;
+#define VXGE_HW_XGMAC_INT_STATUS_XMAC_GEN_ERR_XMAC_GEN_INT vxge_mBIT(3)
+#define VXGE_HW_XGMAC_INT_STATUS_XMAC_LINK_ERR_PORT0_XMAC_LINK_INT_PORT0 \
+ vxge_mBIT(7)
+#define VXGE_HW_XGMAC_INT_STATUS_XMAC_LINK_ERR_PORT1_XMAC_LINK_INT_PORT1 \
+ vxge_mBIT(11)
+#define VXGE_HW_XGMAC_INT_STATUS_XGXS_GEN_ERR_XGXS_GEN_INT vxge_mBIT(15)
+#define VXGE_HW_XGMAC_INT_STATUS_ASIC_NTWK_ERR_ASIC_NTWK_INT vxge_mBIT(19)
+#define VXGE_HW_XGMAC_INT_STATUS_ASIC_GPIO_ERR_ASIC_GPIO_INT vxge_mBIT(23)
+/*0x01e08*/ u64 xgmac_int_mask;
+/*0x01e10*/ u64 xmac_gen_err_reg;
+#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_ACTOR_CHURN_DETECTED \
+ vxge_mBIT(7)
+#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_PARTNER_CHURN_DETECTED \
+ vxge_mBIT(11)
+#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_RECEIVED_LACPDU vxge_mBIT(15)
+#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_ACTOR_CHURN_DETECTED \
+ vxge_mBIT(19)
+#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_PARTNER_CHURN_DETECTED \
+ vxge_mBIT(23)
+#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_RECEIVED_LACPDU vxge_mBIT(27)
+#define VXGE_HW_XMAC_GEN_ERR_REG_XLCM_LAG_FAILOVER_DETECTED vxge_mBIT(31)
+#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE0_SG_ERR(val) \
+ vxge_vBIT(val, 40, 2)
+#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE0_DB_ERR(val) \
+ vxge_vBIT(val, 42, 2)
+#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE1_SG_ERR(val) \
+ vxge_vBIT(val, 44, 2)
+#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE1_DB_ERR(val) \
+ vxge_vBIT(val, 46, 2)
+#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE2_SG_ERR(val) \
+ vxge_vBIT(val, 48, 2)
+#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE2_DB_ERR(val) \
+ vxge_vBIT(val, 50, 2)
+#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE3_SG_ERR(val) \
+ vxge_vBIT(val, 52, 2)
+#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE3_DB_ERR(val) \
+ vxge_vBIT(val, 54, 2)
+#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE4_SG_ERR(val) \
+ vxge_vBIT(val, 56, 2)
+#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE4_DB_ERR(val) \
+ vxge_vBIT(val, 58, 2)
+#define VXGE_HW_XMAC_GEN_ERR_REG_XMACJ_XMAC_FSM_ERR vxge_mBIT(63)
+/*0x01e18*/ u64 xmac_gen_err_mask;
+/*0x01e20*/ u64 xmac_gen_err_alarm;
+/*0x01e28*/ u64 xmac_link_err_port0_reg;
+#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_DOWN vxge_mBIT(3)
+#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_UP vxge_mBIT(7)
+#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_WENT_DOWN vxge_mBIT(11)
+#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_WENT_UP vxge_mBIT(15)
+#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_REAFFIRMED_FAULT \
+ vxge_mBIT(19)
+#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_REAFFIRMED_OK vxge_mBIT(23)
+#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_LINK_DOWN vxge_mBIT(27)
+#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_LINK_UP vxge_mBIT(31)
+#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_RATEMGMT_RATE_CHANGE vxge_mBIT(35)
+#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_RATEMGMT_LASI_INV vxge_mBIT(39)
+#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMDIO_MDIO_MGR_ACCESS_COMPLETE \
+ vxge_mBIT(47)
+/*0x01e30*/ u64 xmac_link_err_port0_mask;
+/*0x01e38*/ u64 xmac_link_err_port0_alarm;
+/*0x01e40*/ u64 xmac_link_err_port1_reg;
+/*0x01e48*/ u64 xmac_link_err_port1_mask;
+/*0x01e50*/ u64 xmac_link_err_port1_alarm;
+/*0x01e58*/ u64 xgxs_gen_err_reg;
+#define VXGE_HW_XGXS_GEN_ERR_REG_XGXS_XGXS_FSM_ERR vxge_mBIT(63)
+/*0x01e60*/ u64 xgxs_gen_err_mask;
+/*0x01e68*/ u64 xgxs_gen_err_alarm;
+/*0x01e70*/ u64 asic_ntwk_err_reg;
+#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_DOWN vxge_mBIT(3)
+#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_UP vxge_mBIT(7)
+#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_DOWN vxge_mBIT(11)
+#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_UP vxge_mBIT(15)
+#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT vxge_mBIT(19)
+#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK vxge_mBIT(23)
+/*0x01e78*/ u64 asic_ntwk_err_mask;
+/*0x01e80*/ u64 asic_ntwk_err_alarm;
+/*0x01e88*/ u64 asic_gpio_err_reg;
+#define VXGE_HW_ASIC_GPIO_ERR_REG_XMACJ_GPIO_INT(n) vxge_mBIT(n)
+/*0x01e90*/ u64 asic_gpio_err_mask;
+/*0x01e98*/ u64 asic_gpio_err_alarm;
+/*0x01ea0*/ u64 xgmac_gen_status;
+#define VXGE_HW_XGMAC_GEN_STATUS_XMACJ_NTWK_OK vxge_mBIT(3)
+#define VXGE_HW_XGMAC_GEN_STATUS_XMACJ_NTWK_DATA_RATE vxge_mBIT(11)
+/*0x01ea8*/ u64 xgmac_gen_fw_memo_status;
+#define VXGE_HW_XGMAC_GEN_FW_MEMO_STATUS_XMACJ_EVENTS_PENDING(val) \
+ vxge_vBIT(val, 0, 17)
+/*0x01eb0*/ u64 xgmac_gen_fw_memo_mask;
+#define VXGE_HW_XGMAC_GEN_FW_MEMO_MASK_MASK(val) vxge_vBIT(val, 0, 64)
+/*0x01eb8*/ u64 xgmac_gen_fw_vpath_to_vsport_status;
+#define VXGE_HW_XGMAC_GEN_FW_VPATH_TO_VSPORT_STATUS_XMACJ_EVENTS_PENDING(val) \
+ vxge_vBIT(val, 0, 17)
+/*0x01ec0*/ u64 xgmac_main_cfg_port[2];
+#define VXGE_HW_XGMAC_MAIN_CFG_PORT_PORT_EN vxge_mBIT(3)
+ u8 unused01f40[0x01f40-0x01ed0];
+
+/*0x01f40*/ u64 xmac_gen_cfg;
+#define VXGE_HW_XMAC_GEN_CFG_RATEMGMT_MAC_RATE_SEL(val) vxge_vBIT(val, 2, 2)
+#define VXGE_HW_XMAC_GEN_CFG_TX_HEAD_DROP_WHEN_FAULT vxge_mBIT(7)
+#define VXGE_HW_XMAC_GEN_CFG_FAULT_BEHAVIOUR vxge_mBIT(27)
+#define VXGE_HW_XMAC_GEN_CFG_PERIOD_NTWK_UP(val) vxge_vBIT(val, 28, 4)
+#define VXGE_HW_XMAC_GEN_CFG_PERIOD_NTWK_DOWN(val) vxge_vBIT(val, 32, 4)
+/*0x01f48*/ u64 xmac_timestamp;
+#define VXGE_HW_XMAC_TIMESTAMP_EN vxge_mBIT(3)
+#define VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(val) vxge_vBIT(val, 6, 2)
+#define VXGE_HW_XMAC_TIMESTAMP_INTERVAL(val) vxge_vBIT(val, 12, 4)
+#define VXGE_HW_XMAC_TIMESTAMP_TIMER_RESTART vxge_mBIT(19)
+#define VXGE_HW_XMAC_TIMESTAMP_XMACJ_ROLLOVER_CNT(val) vxge_vBIT(val, 32, 16)
+/*0x01f50*/ u64 xmac_stats_gen_cfg;
+#define VXGE_HW_XMAC_STATS_GEN_CFG_PRTAGGR_CUM_TIMER(val) vxge_vBIT(val, 4, 4)
+#define VXGE_HW_XMAC_STATS_GEN_CFG_VPATH_CUM_TIMER(val) vxge_vBIT(val, 8, 4)
+#define VXGE_HW_XMAC_STATS_GEN_CFG_VLAN_HANDLING vxge_mBIT(15)
+/*0x01f58*/ u64 xmac_stats_sys_cmd;
+#define VXGE_HW_XMAC_STATS_SYS_CMD_OP(val) vxge_vBIT(val, 5, 3)
+#define VXGE_HW_XMAC_STATS_SYS_CMD_STROBE vxge_mBIT(15)
+#define VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(val) vxge_vBIT(val, 27, 5)
+#define VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(val) vxge_vBIT(val, 32, 8)
+/*0x01f60*/ u64 xmac_stats_sys_data;
+#define VXGE_HW_XMAC_STATS_SYS_DATA_XSMGR_DATA(val) vxge_vBIT(val, 0, 64)
+ u8 unused01f80[0x01f80-0x01f68];
+
+/*0x01f80*/ u64 asic_ntwk_ctrl;
+#define VXGE_HW_ASIC_NTWK_CTRL_REQ_TEST_NTWK vxge_mBIT(3)
+#define VXGE_HW_ASIC_NTWK_CTRL_PORT0_REQ_TEST_PORT vxge_mBIT(11)
+#define VXGE_HW_ASIC_NTWK_CTRL_PORT1_REQ_TEST_PORT vxge_mBIT(15)
+/*0x01f88*/ u64 asic_ntwk_cfg_show_port_info;
+#define VXGE_HW_ASIC_NTWK_CFG_SHOW_PORT_INFO_VP(n) vxge_mBIT(n)
+/*0x01f90*/ u64 asic_ntwk_cfg_port_num;
+#define VXGE_HW_ASIC_NTWK_CFG_PORT_NUM_VP(n) vxge_mBIT(n)
+/*0x01f98*/ u64 xmac_cfg_port[3];
+#define VXGE_HW_XMAC_CFG_PORT_XGMII_LOOPBACK vxge_mBIT(3)
+#define VXGE_HW_XMAC_CFG_PORT_XGMII_REVERSE_LOOPBACK vxge_mBIT(7)
+#define VXGE_HW_XMAC_CFG_PORT_XGMII_TX_BEHAV vxge_mBIT(11)
+#define VXGE_HW_XMAC_CFG_PORT_XGMII_RX_BEHAV vxge_mBIT(15)
+/*0x01fb0*/ u64 xmac_station_addr_port[2];
+#define VXGE_HW_XMAC_STATION_ADDR_PORT_MAC_ADDR(val) vxge_vBIT(val, 0, 48)
+ u8 unused02020[0x02020-0x01fc0];
+
+/*0x02020*/ u64 lag_cfg;
+#define VXGE_HW_LAG_CFG_EN vxge_mBIT(3)
+#define VXGE_HW_LAG_CFG_MODE(val) vxge_vBIT(val, 6, 2)
+#define VXGE_HW_LAG_CFG_TX_DISCARD_BEHAV vxge_mBIT(11)
+#define VXGE_HW_LAG_CFG_RX_DISCARD_BEHAV vxge_mBIT(15)
+#define VXGE_HW_LAG_CFG_PREF_INDIV_PORT_NUM vxge_mBIT(19)
+/*0x02028*/ u64 lag_status;
+#define VXGE_HW_LAG_STATUS_XLCM_WAITING_TO_FAILBACK vxge_mBIT(3)
+#define VXGE_HW_LAG_STATUS_XLCM_TIMER_VAL_COLD_FAILOVER(val) \
+ vxge_vBIT(val, 8, 8)
+/*0x02030*/ u64 lag_active_passive_cfg;
+#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_HOT_STANDBY vxge_mBIT(3)
+#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_LACP_DECIDES vxge_mBIT(7)
+#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_PREF_ACTIVE_PORT_NUM vxge_mBIT(11)
+#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_AUTO_FAILBACK vxge_mBIT(15)
+#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_FAILBACK_EN vxge_mBIT(19)
+#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_COLD_FAILOVER_TIMEOUT(val) \
+ vxge_vBIT(val, 32, 16)
+ u8 unused02040[0x02040-0x02038];
+
+/*0x02040*/ u64 lag_lacp_cfg;
+#define VXGE_HW_LAG_LACP_CFG_EN vxge_mBIT(3)
+#define VXGE_HW_LAG_LACP_CFG_LACP_BEGIN vxge_mBIT(7)
+#define VXGE_HW_LAG_LACP_CFG_DISCARD_LACP vxge_mBIT(11)
+#define VXGE_HW_LAG_LACP_CFG_LIBERAL_LEN_CHK vxge_mBIT(15)
+/*0x02048*/ u64 lag_timer_cfg_1;
+#define VXGE_HW_LAG_TIMER_CFG_1_FAST_PER(val) vxge_vBIT(val, 0, 16)
+#define VXGE_HW_LAG_TIMER_CFG_1_SLOW_PER(val) vxge_vBIT(val, 16, 16)
+#define VXGE_HW_LAG_TIMER_CFG_1_SHORT_TIMEOUT(val) vxge_vBIT(val, 32, 16)
+#define VXGE_HW_LAG_TIMER_CFG_1_LONG_TIMEOUT(val) vxge_vBIT(val, 48, 16)
+/*0x02050*/ u64 lag_timer_cfg_2;
+#define VXGE_HW_LAG_TIMER_CFG_2_CHURN_DET(val) vxge_vBIT(val, 0, 16)
+#define VXGE_HW_LAG_TIMER_CFG_2_AGGR_WAIT(val) vxge_vBIT(val, 16, 16)
+#define VXGE_HW_LAG_TIMER_CFG_2_SHORT_TIMER_SCALE(val) vxge_vBIT(val, 32, 16)
+#define VXGE_HW_LAG_TIMER_CFG_2_LONG_TIMER_SCALE(val) vxge_vBIT(val, 48, 16)
+/*0x02058*/ u64 lag_sys_id;
+#define VXGE_HW_LAG_SYS_ID_ADDR(val) vxge_vBIT(val, 0, 48)
+#define VXGE_HW_LAG_SYS_ID_USE_PORT_ADDR vxge_mBIT(51)
+#define VXGE_HW_LAG_SYS_ID_ADDR_SEL vxge_mBIT(55)
+/*0x02060*/ u64 lag_sys_cfg;
+#define VXGE_HW_LAG_SYS_CFG_SYS_PRI(val) vxge_vBIT(val, 0, 16)
+ u8 unused02070[0x02070-0x02068];
+
+/*0x02070*/ u64 lag_aggr_addr_cfg[2];
+#define VXGE_HW_LAG_AGGR_ADDR_CFG_ADDR(val) vxge_vBIT(val, 0, 48)
+#define VXGE_HW_LAG_AGGR_ADDR_CFG_USE_PORT_ADDR vxge_mBIT(51)
+#define VXGE_HW_LAG_AGGR_ADDR_CFG_ADDR_SEL vxge_mBIT(55)
+/*0x02080*/ u64 lag_aggr_id_cfg[2];
+#define VXGE_HW_LAG_AGGR_ID_CFG_ID(val) vxge_vBIT(val, 0, 16)
+/*0x02090*/ u64 lag_aggr_admin_key[2];
+#define VXGE_HW_LAG_AGGR_ADMIN_KEY_KEY(val) vxge_vBIT(val, 0, 16)
+/*0x020a0*/ u64 lag_aggr_alt_admin_key;
+#define VXGE_HW_LAG_AGGR_ALT_ADMIN_KEY_KEY(val) vxge_vBIT(val, 0, 16)
+#define VXGE_HW_LAG_AGGR_ALT_ADMIN_KEY_ALT_AGGR vxge_mBIT(19)
+/*0x020a8*/ u64 lag_aggr_oper_key[2];
+#define VXGE_HW_LAG_AGGR_OPER_KEY_LAGC_KEY(val) vxge_vBIT(val, 0, 16)
+/*0x020b8*/ u64 lag_aggr_partner_sys_id[2];
+#define VXGE_HW_LAG_AGGR_PARTNER_SYS_ID_LAGC_ADDR(val) vxge_vBIT(val, 0, 48)
+/*0x020c8*/ u64 lag_aggr_partner_info[2];
+#define VXGE_HW_LAG_AGGR_PARTNER_INFO_LAGC_SYS_PRI(val) vxge_vBIT(val, 0, 16)
+#define VXGE_HW_LAG_AGGR_PARTNER_INFO_LAGC_OPER_KEY(val) \
+ vxge_vBIT(val, 16, 16)
+/*0x020d8*/ u64 lag_aggr_state[2];
+#define VXGE_HW_LAG_AGGR_STATE_LAGC_TX vxge_mBIT(3)
+#define VXGE_HW_LAG_AGGR_STATE_LAGC_RX vxge_mBIT(7)
+#define VXGE_HW_LAG_AGGR_STATE_LAGC_READY vxge_mBIT(11)
+#define VXGE_HW_LAG_AGGR_STATE_LAGC_INDIVIDUAL vxge_mBIT(15)
+ u8 unused020f0[0x020f0-0x020e8];
+
+/*0x020f0*/ u64 lag_port_cfg[2];
+#define VXGE_HW_LAG_PORT_CFG_EN vxge_mBIT(3)
+#define VXGE_HW_LAG_PORT_CFG_DISCARD_SLOW_PROTO vxge_mBIT(7)
+#define VXGE_HW_LAG_PORT_CFG_HOST_CHOSEN_AGGR vxge_mBIT(11)
+#define VXGE_HW_LAG_PORT_CFG_DISCARD_UNKNOWN_SLOW_PROTO vxge_mBIT(15)
+/*0x02100*/ u64 lag_port_actor_admin_cfg[2];
+#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_PORT_NUM(val) vxge_vBIT(val, 0, 16)
+#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_PORT_PRI(val) vxge_vBIT(val, 16, 16)
+#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_KEY_10G(val) vxge_vBIT(val, 32, 16)
+#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_KEY_1G(val) vxge_vBIT(val, 48, 16)
+/*0x02110*/ u64 lag_port_actor_admin_state[2];
+#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_LACP_ACTIVITY vxge_mBIT(3)
+#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_LACP_TIMEOUT vxge_mBIT(7)
+#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_AGGREGATION vxge_mBIT(11)
+#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_SYNCHRONIZATION vxge_mBIT(15)
+#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_COLLECTING vxge_mBIT(19)
+#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_DISTRIBUTING vxge_mBIT(23)
+#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_DEFAULTED vxge_mBIT(27)
+#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_EXPIRED vxge_mBIT(31)
+/*0x02120*/ u64 lag_port_partner_admin_sys_id[2];
+#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_SYS_ID_ADDR(val) vxge_vBIT(val, 0, 48)
+/*0x02130*/ u64 lag_port_partner_admin_cfg[2];
+#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_SYS_PRI(val) vxge_vBIT(val, 0, 16)
+#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_KEY(val) vxge_vBIT(val, 16, 16)
+#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_PORT_NUM(val) \
+ vxge_vBIT(val, 32, 16)
+#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_PORT_PRI(val) \
+ vxge_vBIT(val, 48, 16)
+/*0x02140*/ u64 lag_port_partner_admin_state[2];
+#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_LACP_ACTIVITY vxge_mBIT(3)
+#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_LACP_TIMEOUT vxge_mBIT(7)
+#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_AGGREGATION vxge_mBIT(11)
+#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_SYNCHRONIZATION vxge_mBIT(15)
+#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_COLLECTING vxge_mBIT(19)
+#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_DISTRIBUTING vxge_mBIT(23)
+#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_DEFAULTED vxge_mBIT(27)
+#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_EXPIRED vxge_mBIT(31)
+/*0x02150*/ u64 lag_port_to_aggr[2];
+#define VXGE_HW_LAG_PORT_TO_AGGR_LAGC_AGGR_ID(val) vxge_vBIT(val, 0, 16)
+#define VXGE_HW_LAG_PORT_TO_AGGR_LAGC_AGGR_VLD_ID vxge_mBIT(19)
+/*0x02160*/ u64 lag_port_actor_oper_key[2];
+#define VXGE_HW_LAG_PORT_ACTOR_OPER_KEY_LAGC_KEY(val) vxge_vBIT(val, 0, 16)
+/*0x02170*/ u64 lag_port_actor_oper_state[2];
+#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_LACP_ACTIVITY vxge_mBIT(3)
+#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_LACP_TIMEOUT vxge_mBIT(7)
+#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_AGGREGATION vxge_mBIT(11)
+#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_SYNCHRONIZATION vxge_mBIT(15)
+#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_COLLECTING vxge_mBIT(19)
+#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_DISTRIBUTING vxge_mBIT(23)
+#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_DEFAULTED vxge_mBIT(27)
+#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_EXPIRED vxge_mBIT(31)
+/*0x02180*/ u64 lag_port_partner_oper_sys_id[2];
+#define VXGE_HW_LAG_PORT_PARTNER_OPER_SYS_ID_LAGC_ADDR(val) \
+ vxge_vBIT(val, 0, 48)
+/*0x02190*/ u64 lag_port_partner_oper_info[2];
+#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_SYS_PRI(val) \
+ vxge_vBIT(val, 0, 16)
+#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_KEY(val) \
+ vxge_vBIT(val, 16, 16)
+#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_PORT_NUM(val) \
+ vxge_vBIT(val, 32, 16)
+#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_PORT_PRI(val) \
+ vxge_vBIT(val, 48, 16)
+/*0x021a0*/ u64 lag_port_partner_oper_state[2];
+#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_LACP_ACTIVITY vxge_mBIT(3)
+#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_LACP_TIMEOUT vxge_mBIT(7)
+#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_AGGREGATION vxge_mBIT(11)
+#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_SYNCHRONIZATION \
+ vxge_mBIT(15)
+#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_COLLECTING vxge_mBIT(19)
+#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_DISTRIBUTING vxge_mBIT(23)
+#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_DEFAULTED vxge_mBIT(27)
+#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_EXPIRED vxge_mBIT(31)
+/*0x021b0*/ u64 lag_port_state_vars[2];
+#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_READY vxge_mBIT(3)
+#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_SELECTED(val) vxge_vBIT(val, 6, 2)
+#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_AGGR_NUM vxge_mBIT(11)
+#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_MOVED vxge_mBIT(15)
+#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_ENABLED vxge_mBIT(18)
+#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_DISABLED vxge_mBIT(19)
+#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_NTT vxge_mBIT(23)
+#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN vxge_mBIT(27)
+#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN vxge_mBIT(31)
+#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_INFO_LEN_MISMATCH \
+ vxge_mBIT(32)
+#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_INFO_LEN_MISMATCH \
+ vxge_mBIT(33)
+#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_COLL_INFO_LEN_MISMATCH vxge_mBIT(34)
+#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_TERM_INFO_LEN_MISMATCH vxge_mBIT(35)
+#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_RX_FSM_STATE(val) vxge_vBIT(val, 37, 3)
+#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_MUX_FSM_STATE(val) \
+ vxge_vBIT(val, 41, 3)
+#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_MUX_REASON(val) vxge_vBIT(val, 44, 4)
+#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN_STATE vxge_mBIT(54)
+#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN_STATE vxge_mBIT(55)
+#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN_COUNT(val) \
+ vxge_vBIT(val, 56, 4)
+#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN_COUNT(val) \
+ vxge_vBIT(val, 60, 4)
+/*0x021c0*/ u64 lag_port_timer_cntr[2];
+#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_CURRENT_WHILE(val) vxge_vBIT(val, 0, 8)
+#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PERIODIC_WHILE(val) \
+ vxge_vBIT(val, 8, 8)
+#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_WAIT_WHILE(val) vxge_vBIT(val, 16, 8)
+#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_TX_LACP(val) vxge_vBIT(val, 24, 8)
+#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_ACTOR_SYNC_TRANSITION_COUNT(val) \
+ vxge_vBIT(val, 32, 8)
+#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PARTNER_SYNC_TRANSITION_COUNT(val) \
+ vxge_vBIT(val, 40, 8)
+#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_ACTOR_CHANGE_COUNT(val) \
+ vxge_vBIT(val, 48, 8)
+#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PARTNER_CHANGE_COUNT(val) \
+ vxge_vBIT(val, 56, 8)
+ u8 unused02208[0x02700-0x021d0];
+
+/*0x02700*/ u64 rtdma_int_status;
+#define VXGE_HW_RTDMA_INT_STATUS_PDA_ALARM_PDA_INT vxge_mBIT(1)
+#define VXGE_HW_RTDMA_INT_STATUS_PCC_ERROR_PCC_INT vxge_mBIT(2)
+#define VXGE_HW_RTDMA_INT_STATUS_LSO_ERROR_LSO_INT vxge_mBIT(4)
+#define VXGE_HW_RTDMA_INT_STATUS_SM_ERROR_SM_INT vxge_mBIT(5)
+/*0x02708*/ u64 rtdma_int_mask;
+/*0x02710*/ u64 pda_alarm_reg;
+#define VXGE_HW_PDA_ALARM_REG_PDA_HSC_FIFO_ERR vxge_mBIT(0)
+#define VXGE_HW_PDA_ALARM_REG_PDA_SM_ERR vxge_mBIT(1)
+/*0x02718*/ u64 pda_alarm_mask;
+/*0x02720*/ u64 pda_alarm_alarm;
+/*0x02728*/ u64 pcc_error_reg;
+#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_SBE(n) vxge_mBIT(n)
+#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_SBE(n) vxge_mBIT(n)
+#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_DBE(n) vxge_mBIT(n)
+#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_DBE(n) vxge_mBIT(n)
+#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FSM_ERR_ALARM(n) vxge_mBIT(n)
+#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_SERR(n) vxge_mBIT(n)
+/*0x02730*/ u64 pcc_error_mask;
+/*0x02738*/ u64 pcc_error_alarm;
+/*0x02740*/ u64 lso_error_reg;
+#define VXGE_HW_LSO_ERROR_REG_PCC_LSO_ABORT(n) vxge_mBIT(n)
+#define VXGE_HW_LSO_ERROR_REG_PCC_LSO_FSM_ERR_ALARM(n) vxge_mBIT(n)
+/*0x02748*/ u64 lso_error_mask;
+/*0x02750*/ u64 lso_error_alarm;
+/*0x02758*/ u64 sm_error_reg;
+#define VXGE_HW_SM_ERROR_REG_SM_FSM_ERR_ALARM vxge_mBIT(15)
+/*0x02760*/ u64 sm_error_mask;
+/*0x02768*/ u64 sm_error_alarm;
+
+ u8 unused027a8[0x027a8-0x02770];
+
+/*0x027a8*/ u64 txd_ownership_ctrl;
+#define VXGE_HW_TXD_OWNERSHIP_CTRL_KEEP_OWNERSHIP vxge_mBIT(7)
+/*0x027b0*/ u64 pcc_cfg;
+#define VXGE_HW_PCC_CFG_PCC_ENABLE(n) vxge_mBIT(n)
+#define VXGE_HW_PCC_CFG_PCC_ECC_ENABLE_N(n) vxge_mBIT(n)
+/*0x027b8*/ u64 pcc_control;
+#define VXGE_HW_PCC_CONTROL_FE_ENABLE(val) vxge_vBIT(val, 6, 2)
+#define VXGE_HW_PCC_CONTROL_EARLY_ASSIGN_EN vxge_mBIT(15)
+#define VXGE_HW_PCC_CONTROL_UNBLOCK_DB_ERR vxge_mBIT(31)
+/*0x027c0*/ u64 pda_status1;
+#define VXGE_HW_PDA_STATUS1_PDA_WRAP_0_CTR(val) vxge_vBIT(val, 4, 4)
+#define VXGE_HW_PDA_STATUS1_PDA_WRAP_1_CTR(val) vxge_vBIT(val, 12, 4)
+#define VXGE_HW_PDA_STATUS1_PDA_WRAP_2_CTR(val) vxge_vBIT(val, 20, 4)
+#define VXGE_HW_PDA_STATUS1_PDA_WRAP_3_CTR(val) vxge_vBIT(val, 28, 4)
+#define VXGE_HW_PDA_STATUS1_PDA_WRAP_4_CTR(val) vxge_vBIT(val, 36, 4)
+#define VXGE_HW_PDA_STATUS1_PDA_WRAP_5_CTR(val) vxge_vBIT(val, 44, 4)
+#define VXGE_HW_PDA_STATUS1_PDA_WRAP_6_CTR(val) vxge_vBIT(val, 52, 4)
+#define VXGE_HW_PDA_STATUS1_PDA_WRAP_7_CTR(val) vxge_vBIT(val, 60, 4)
+/*0x027c8*/ u64 rtdma_bw_timer;
+#define VXGE_HW_RTDMA_BW_TIMER_TIMER_CTRL(val) vxge_vBIT(val, 12, 4)
+
+ u8 unused02900[0x02900-0x027d0];
+/*0x02900*/ u64 g3cmct_int_status;
+#define VXGE_HW_G3CMCT_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
+/*0x02908*/ u64 g3cmct_int_mask;
+/*0x02910*/ u64 g3cmct_err_reg;
+#define VXGE_HW_G3CMCT_ERR_REG_G3IF_SM_ERR vxge_mBIT(4)
+#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_DECC vxge_mBIT(5)
+#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_U_DECC vxge_mBIT(6)
+#define VXGE_HW_G3CMCT_ERR_REG_G3IF_CTRL_FIFO_DECC vxge_mBIT(7)
+#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_SECC vxge_mBIT(29)
+#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_U_SECC vxge_mBIT(30)
+#define VXGE_HW_G3CMCT_ERR_REG_G3IF_CTRL_FIFO_SECC vxge_mBIT(31)
+/*0x02918*/ u64 g3cmct_err_mask;
+/*0x02920*/ u64 g3cmct_err_alarm;
+ u8 unused03000[0x03000-0x02928];
+
+/*0x03000*/ u64 mc_int_status;
+#define VXGE_HW_MC_INT_STATUS_MC_ERR_MC_INT vxge_mBIT(3)
+#define VXGE_HW_MC_INT_STATUS_GROCRC_ALARM_ROCRC_INT vxge_mBIT(7)
+#define VXGE_HW_MC_INT_STATUS_FAU_GEN_ERR_FAU_GEN_INT vxge_mBIT(11)
+#define VXGE_HW_MC_INT_STATUS_FAU_ECC_ERR_FAU_ECC_INT vxge_mBIT(15)
+/*0x03008*/ u64 mc_int_mask;
+/*0x03010*/ u64 mc_err_reg;
+#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_SG_ERR_A vxge_mBIT(3)
+#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_SG_ERR_B vxge_mBIT(4)
+#define VXGE_HW_MC_ERR_REG_MC_G3IF_RD_FIFO_ECC_SG_ERR vxge_mBIT(5)
+#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_SG_ERR_0 vxge_mBIT(6)
+#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_SG_ERR_1 vxge_mBIT(7)
+#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_A vxge_mBIT(10)
+#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_B vxge_mBIT(11)
+#define VXGE_HW_MC_ERR_REG_MC_G3IF_RD_FIFO_ECC_DB_ERR vxge_mBIT(12)
+#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_0 vxge_mBIT(13)
+#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_1 vxge_mBIT(14)
+#define VXGE_HW_MC_ERR_REG_MC_SM_ERR vxge_mBIT(15)
+/*0x03018*/ u64 mc_err_mask;
+/*0x03020*/ u64 mc_err_alarm;
+/*0x03028*/ u64 grocrc_alarm_reg;
+#define VXGE_HW_GROCRC_ALARM_REG_XFMD_WR_FIFO_ERR vxge_mBIT(3)
+#define VXGE_HW_GROCRC_ALARM_REG_WDE2MSR_RD_FIFO_ERR vxge_mBIT(7)
+/*0x03030*/ u64 grocrc_alarm_mask;
+/*0x03038*/ u64 grocrc_alarm_alarm;
+ u8 unused03100[0x03100-0x03040];
+
+/*0x03100*/ u64 rx_thresh_cfg_repl;
+#define VXGE_HW_RX_THRESH_CFG_REPL_PAUSE_LOW_THR(val) vxge_vBIT(val, 0, 8)
+#define VXGE_HW_RX_THRESH_CFG_REPL_PAUSE_HIGH_THR(val) vxge_vBIT(val, 8, 8)
+#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_0(val) vxge_vBIT(val, 16, 8)
+#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_1(val) vxge_vBIT(val, 24, 8)
+#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_2(val) vxge_vBIT(val, 32, 8)
+#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_3(val) vxge_vBIT(val, 40, 8)
+#define VXGE_HW_RX_THRESH_CFG_REPL_GLOBAL_WOL_EN vxge_mBIT(62)
+#define VXGE_HW_RX_THRESH_CFG_REPL_EXACT_VP_MATCH_REQ vxge_mBIT(63)
+ u8 unused033b8[0x033b8-0x03108];
+
+/*0x033b8*/ u64 fbmc_ecc_cfg;
+#define VXGE_HW_FBMC_ECC_CFG_ENABLE(val) vxge_vBIT(val, 3, 5)
+ u8 unused03400[0x03400-0x033c0];
+
+/*0x03400*/ u64 pcipif_int_status;
+#define VXGE_HW_PCIPIF_INT_STATUS_DBECC_ERR_DBECC_ERR_INT vxge_mBIT(3)
+#define VXGE_HW_PCIPIF_INT_STATUS_SBECC_ERR_SBECC_ERR_INT vxge_mBIT(7)
+#define VXGE_HW_PCIPIF_INT_STATUS_GENERAL_ERR_GENERAL_ERR_INT vxge_mBIT(11)
+#define VXGE_HW_PCIPIF_INT_STATUS_SRPCIM_MSG_SRPCIM_MSG_INT vxge_mBIT(15)
+#define VXGE_HW_PCIPIF_INT_STATUS_MRPCIM_SPARE_R1_MRPCIM_SPARE_R1_INT \
+ vxge_mBIT(19)
+/*0x03408*/ u64 pcipif_int_mask;
+/*0x03410*/ u64 dbecc_err_reg;
+#define VXGE_HW_DBECC_ERR_REG_PCI_RETRY_BUF_DB_ERR vxge_mBIT(3)
+#define VXGE_HW_DBECC_ERR_REG_PCI_RETRY_SOT_DB_ERR vxge_mBIT(7)
+#define VXGE_HW_DBECC_ERR_REG_PCI_P_HDR_DB_ERR vxge_mBIT(11)
+#define VXGE_HW_DBECC_ERR_REG_PCI_P_DATA_DB_ERR vxge_mBIT(15)
+#define VXGE_HW_DBECC_ERR_REG_PCI_NP_HDR_DB_ERR vxge_mBIT(19)
+#define VXGE_HW_DBECC_ERR_REG_PCI_NP_DATA_DB_ERR vxge_mBIT(23)
+/*0x03418*/ u64 dbecc_err_mask;
+/*0x03420*/ u64 dbecc_err_alarm;
+/*0x03428*/ u64 sbecc_err_reg;
+#define VXGE_HW_SBECC_ERR_REG_PCI_RETRY_BUF_SG_ERR vxge_mBIT(3)
+#define VXGE_HW_SBECC_ERR_REG_PCI_RETRY_SOT_SG_ERR vxge_mBIT(7)
+#define VXGE_HW_SBECC_ERR_REG_PCI_P_HDR_SG_ERR vxge_mBIT(11)
+#define VXGE_HW_SBECC_ERR_REG_PCI_P_DATA_SG_ERR vxge_mBIT(15)
+#define VXGE_HW_SBECC_ERR_REG_PCI_NP_HDR_SG_ERR vxge_mBIT(19)
+#define VXGE_HW_SBECC_ERR_REG_PCI_NP_DATA_SG_ERR vxge_mBIT(23)
+/*0x03430*/ u64 sbecc_err_mask;
+/*0x03438*/ u64 sbecc_err_alarm;
+/*0x03440*/ u64 general_err_reg;
+#define VXGE_HW_GENERAL_ERR_REG_PCI_DROPPED_ILLEGAL_CFG vxge_mBIT(3)
+#define VXGE_HW_GENERAL_ERR_REG_PCI_ILLEGAL_MEM_MAP_PROG vxge_mBIT(7)
+#define VXGE_HW_GENERAL_ERR_REG_PCI_LINK_RST_FSM_ERR vxge_mBIT(11)
+#define VXGE_HW_GENERAL_ERR_REG_PCI_RX_ILLEGAL_TLP_VPLANE vxge_mBIT(15)
+#define VXGE_HW_GENERAL_ERR_REG_PCI_TRAINING_RESET_DET vxge_mBIT(19)
+#define VXGE_HW_GENERAL_ERR_REG_PCI_PCI_LINK_DOWN_DET vxge_mBIT(23)
+#define VXGE_HW_GENERAL_ERR_REG_PCI_RESET_ACK_DLLP vxge_mBIT(27)
+/*0x03448*/ u64 general_err_mask;
+/*0x03450*/ u64 general_err_alarm;
+/*0x03458*/ u64 srpcim_msg_reg;
+#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE0_RMSG_INT \
+ vxge_mBIT(0)
+#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE1_RMSG_INT \
+ vxge_mBIT(1)
+#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE2_RMSG_INT \
+ vxge_mBIT(2)
+#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE3_RMSG_INT \
+ vxge_mBIT(3)
+#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE4_RMSG_INT \
+ vxge_mBIT(4)
+#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE5_RMSG_INT \
+ vxge_mBIT(5)
+#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE6_RMSG_INT \
+ vxge_mBIT(6)
+#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE7_RMSG_INT \
+ vxge_mBIT(7)
+#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE8_RMSG_INT \
+ vxge_mBIT(8)
+#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE9_RMSG_INT \
+ vxge_mBIT(9)
+#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE10_RMSG_INT \
+ vxge_mBIT(10)
+#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE11_RMSG_INT \
+ vxge_mBIT(11)
+#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE12_RMSG_INT \
+ vxge_mBIT(12)
+#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE13_RMSG_INT \
+ vxge_mBIT(13)
+#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE14_RMSG_INT \
+ vxge_mBIT(14)
+#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE15_RMSG_INT \
+ vxge_mBIT(15)
+#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE16_RMSG_INT \
+ vxge_mBIT(16)
+/*0x03460*/ u64 srpcim_msg_mask;
+/*0x03468*/ u64 srpcim_msg_alarm;
+ u8 unused03600[0x03600-0x03470];
+
+/*0x03600*/ u64 gcmg1_int_status;
+#define VXGE_HW_GCMG1_INT_STATUS_GSSCC_ERR_GSSCC_INT vxge_mBIT(0)
+#define VXGE_HW_GCMG1_INT_STATUS_GSSC0_ERR0_GSSC0_0_INT vxge_mBIT(1)
+#define VXGE_HW_GCMG1_INT_STATUS_GSSC0_ERR1_GSSC0_1_INT vxge_mBIT(2)
+#define VXGE_HW_GCMG1_INT_STATUS_GSSC1_ERR0_GSSC1_0_INT vxge_mBIT(3)
+#define VXGE_HW_GCMG1_INT_STATUS_GSSC1_ERR1_GSSC1_1_INT vxge_mBIT(4)
+#define VXGE_HW_GCMG1_INT_STATUS_GSSC2_ERR0_GSSC2_0_INT vxge_mBIT(5)
+#define VXGE_HW_GCMG1_INT_STATUS_GSSC2_ERR1_GSSC2_1_INT vxge_mBIT(6)
+#define VXGE_HW_GCMG1_INT_STATUS_UQM_ERR_UQM_INT vxge_mBIT(7)
+#define VXGE_HW_GCMG1_INT_STATUS_GQCC_ERR_GQCC_INT vxge_mBIT(8)
+/*0x03608*/ u64 gcmg1_int_mask;
+ u8 unused03a00[0x03a00-0x03610];
+
+/*0x03a00*/ u64 pcmg1_int_status;
+#define VXGE_HW_PCMG1_INT_STATUS_PSSCC_ERR_PSSCC_INT vxge_mBIT(0)
+#define VXGE_HW_PCMG1_INT_STATUS_PQCC_ERR_PQCC_INT vxge_mBIT(1)
+#define VXGE_HW_PCMG1_INT_STATUS_PQCC_CQM_ERR_PQCC_CQM_INT vxge_mBIT(2)
+#define VXGE_HW_PCMG1_INT_STATUS_PQCC_SQM_ERR_PQCC_SQM_INT vxge_mBIT(3)
+/*0x03a08*/ u64 pcmg1_int_mask;
+ u8 unused04000[0x04000-0x03a10];
+
+/*0x04000*/ u64 one_int_status;
+#define VXGE_HW_ONE_INT_STATUS_RXPE_ERR_RXPE_INT vxge_mBIT(7)
+#define VXGE_HW_ONE_INT_STATUS_TXPE_BCC_MEM_SG_ECC_ERR_TXPE_BCC_MEM_SG_ECC_INT \
+ vxge_mBIT(13)
+#define VXGE_HW_ONE_INT_STATUS_TXPE_BCC_MEM_DB_ECC_ERR_TXPE_BCC_MEM_DB_ECC_INT \
+ vxge_mBIT(14)
+#define VXGE_HW_ONE_INT_STATUS_TXPE_ERR_TXPE_INT vxge_mBIT(15)
+#define VXGE_HW_ONE_INT_STATUS_DLM_ERR_DLM_INT vxge_mBIT(23)
+#define VXGE_HW_ONE_INT_STATUS_PE_ERR_PE_INT vxge_mBIT(31)
+#define VXGE_HW_ONE_INT_STATUS_RPE_ERR_RPE_INT vxge_mBIT(39)
+#define VXGE_HW_ONE_INT_STATUS_RPE_FSM_ERR_RPE_FSM_INT vxge_mBIT(47)
+#define VXGE_HW_ONE_INT_STATUS_OES_ERR_OES_INT vxge_mBIT(55)
+/*0x04008*/ u64 one_int_mask;
+ u8 unused04818[0x04818-0x04010];
+
+/*0x04818*/ u64 noa_wct_ctrl;
+#define VXGE_HW_NOA_WCT_CTRL_VP_INT_NUM vxge_mBIT(0)
+/*0x04820*/ u64 rc_cfg2;
+#define VXGE_HW_RC_CFG2_BUFF1_SIZE(val) vxge_vBIT(val, 0, 16)
+#define VXGE_HW_RC_CFG2_BUFF2_SIZE(val) vxge_vBIT(val, 16, 16)
+#define VXGE_HW_RC_CFG2_BUFF3_SIZE(val) vxge_vBIT(val, 32, 16)
+#define VXGE_HW_RC_CFG2_BUFF4_SIZE(val) vxge_vBIT(val, 48, 16)
+/*0x04828*/ u64 rc_cfg3;
+#define VXGE_HW_RC_CFG3_BUFF5_SIZE(val) vxge_vBIT(val, 0, 16)
+/*0x04830*/ u64 rx_multi_cast_ctrl1;
+#define VXGE_HW_RX_MULTI_CAST_CTRL1_ENABLE vxge_mBIT(7)
+#define VXGE_HW_RX_MULTI_CAST_CTRL1_DELAY_COUNT(val) vxge_vBIT(val, 11, 5)
+/*0x04838*/ u64 rxdm_dbg_rd;
+#define VXGE_HW_RXDM_DBG_RD_ADDR(val) vxge_vBIT(val, 0, 12)
+#define VXGE_HW_RXDM_DBG_RD_ENABLE vxge_mBIT(31)
+/*0x04840*/ u64 rxdm_dbg_rd_data;
+#define VXGE_HW_RXDM_DBG_RD_DATA_RMC_RXDM_DBG_RD_DATA(val) vxge_vBIT(val, 0, 64)
+/*0x04848*/ u64 rqa_top_prty_for_vh[17];
+#define VXGE_HW_RQA_TOP_PRTY_FOR_VH_RQA_TOP_PRTY_FOR_VH(val) \
+ vxge_vBIT(val, 59, 5)
+ u8 unused04900[0x04900-0x048d0];
+
+/*0x04900*/ u64 tim_status;
+#define VXGE_HW_TIM_STATUS_TIM_RESET_IN_PROGRESS vxge_mBIT(0)
+/*0x04908*/ u64 tim_ecc_enable;
+#define VXGE_HW_TIM_ECC_ENABLE_VBLS_N vxge_mBIT(7)
+#define VXGE_HW_TIM_ECC_ENABLE_BMAP_N vxge_mBIT(15)
+#define VXGE_HW_TIM_ECC_ENABLE_BMAP_MSG_N vxge_mBIT(23)
+/*0x04910*/ u64 tim_bp_ctrl;
+#define VXGE_HW_TIM_BP_CTRL_RD_XON vxge_mBIT(7)
+#define VXGE_HW_TIM_BP_CTRL_WR_XON vxge_mBIT(15)
+#define VXGE_HW_TIM_BP_CTRL_ROCRC_BYP vxge_mBIT(23)
+/*0x04918*/ u64 tim_resource_assignment_vh[17];
+#define VXGE_HW_TIM_RESOURCE_ASSIGNMENT_VH_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
+/*0x049a0*/ u64 tim_bmap_mapping_vp_err[17];
+#define VXGE_HW_TIM_BMAP_MAPPING_VP_ERR_TIM_DEST_VPATH(val) vxge_vBIT(val, 3, 5)
+ u8 unused04b00[0x04b00-0x04a28];
+
+/*0x04b00*/ u64 gcmg2_int_status;
+#define VXGE_HW_GCMG2_INT_STATUS_GXTMC_ERR_GXTMC_INT vxge_mBIT(7)
+#define VXGE_HW_GCMG2_INT_STATUS_GCP_ERR_GCP_INT vxge_mBIT(15)
+#define VXGE_HW_GCMG2_INT_STATUS_CMC_ERR_CMC_INT vxge_mBIT(23)
+/*0x04b08*/ u64 gcmg2_int_mask;
+/*0x04b10*/ u64 gxtmc_err_reg;
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_MEM_DB_ERR(val) vxge_vBIT(val, 0, 4)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_MEM_SG_ERR(val) vxge_vBIT(val, 4, 4)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMC_RD_DATA_DB_ERR vxge_mBIT(8)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_REQ_FIFO_ERR vxge_mBIT(9)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR vxge_mBIT(10)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR vxge_mBIT(11)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR vxge_mBIT(12)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_WRP_FIFO_ERR vxge_mBIT(13)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_WRP_ERR vxge_mBIT(14)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_RRP_FIFO_ERR vxge_mBIT(15)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_RRP_ERR vxge_mBIT(16)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_DATA_SM_ERR vxge_mBIT(17)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_CMC0_IF_ERR vxge_mBIT(18)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_ARB_SM_ERR vxge_mBIT(19)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_CFC_SM_ERR vxge_mBIT(20)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_OVERFLOW \
+ vxge_mBIT(21)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_UNDERFLOW \
+ vxge_mBIT(22)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_SM_ERR vxge_mBIT(23)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_OVERFLOW \
+ vxge_mBIT(24)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_UNDERFLOW \
+ vxge_mBIT(25)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_SM_ERR vxge_mBIT(26)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_SM_ERR vxge_mBIT(27)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_TAG_ERR vxge_mBIT(28)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_SM_ERR vxge_mBIT(29)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_FIFO_ERR vxge_mBIT(30)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_POP_ERR vxge_mBIT(31)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_CMI_OP_ERR vxge_mBIT(32)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFETCH_OP_ERR vxge_mBIT(33)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFIFO_ERR vxge_mBIT(34)
+#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_ARB_SM_ERR vxge_mBIT(35)
+/*0x04b18*/ u64 gxtmc_err_mask;
+/*0x04b20*/ u64 gxtmc_err_alarm;
+/*0x04b28*/ u64 cmc_err_reg;
+#define VXGE_HW_CMC_ERR_REG_CMC_CMC_SM_ERR vxge_mBIT(0)
+/*0x04b30*/ u64 cmc_err_mask;
+/*0x04b38*/ u64 cmc_err_alarm;
+/*0x04b40*/ u64 gcp_err_reg;
+#define VXGE_HW_GCP_ERR_REG_CP_H2L2CP_FIFO_ERR vxge_mBIT(0)
+#define VXGE_HW_GCP_ERR_REG_CP_STC2CP_FIFO_ERR vxge_mBIT(1)
+#define VXGE_HW_GCP_ERR_REG_CP_STE2CP_FIFO_ERR vxge_mBIT(2)
+#define VXGE_HW_GCP_ERR_REG_CP_TTE2CP_FIFO_ERR vxge_mBIT(3)
+/*0x04b48*/ u64 gcp_err_mask;
+/*0x04b50*/ u64 gcp_err_alarm;
+ u8 unused04f00[0x04f00-0x04b58];
+
+/*0x04f00*/ u64 pcmg2_int_status;
+#define VXGE_HW_PCMG2_INT_STATUS_PXTMC_ERR_PXTMC_INT vxge_mBIT(7)
+#define VXGE_HW_PCMG2_INT_STATUS_CP_EXC_CP_XT_EXC_INT vxge_mBIT(15)
+#define VXGE_HW_PCMG2_INT_STATUS_CP_ERR_CP_ERR_INT vxge_mBIT(23)
+/*0x04f08*/ u64 pcmg2_int_mask;
+/*0x04f10*/ u64 pxtmc_err_reg;
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_XT_PIF_SRAM_DB_ERR(val) vxge_vBIT(val, 0, 2)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_FIFO_ERR vxge_mBIT(2)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_PRSP_FIFO_ERR vxge_mBIT(3)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_WRSP_FIFO_ERR vxge_mBIT(4)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_FIFO_ERR vxge_mBIT(5)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_PRSP_FIFO_ERR vxge_mBIT(6)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_WRSP_FIFO_ERR vxge_mBIT(7)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_FIFO_ERR vxge_mBIT(8)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_PRSP_FIFO_ERR vxge_mBIT(9)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_WRSP_FIFO_ERR vxge_mBIT(10)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_REQ_FIFO_ERR vxge_mBIT(11)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR vxge_mBIT(12)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR vxge_mBIT(13)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR vxge_mBIT(14)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_SHADOW_ERR vxge_mBIT(15)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_RSP_SHADOW_ERR vxge_mBIT(16)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_SHADOW_ERR vxge_mBIT(17)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_RSP_SHADOW_ERR vxge_mBIT(18)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_SHADOW_ERR vxge_mBIT(19)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_RSP_SHADOW_ERR vxge_mBIT(20)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_XIL_SHADOW_ERR vxge_mBIT(21)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_ARB_SHADOW_ERR vxge_mBIT(22)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_RAM_SHADOW_ERR vxge_mBIT(23)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMW_SHADOW_ERR vxge_mBIT(24)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMR_SHADOW_ERR vxge_mBIT(25)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_FSM_ERR vxge_mBIT(26)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_RSP_FSM_ERR vxge_mBIT(27)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_FSM_ERR vxge_mBIT(28)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_RSP_FSM_ERR vxge_mBIT(29)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_FSM_ERR vxge_mBIT(30)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_RSP_FSM_ERR vxge_mBIT(31)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_XIL_FSM_ERR vxge_mBIT(32)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_ARB_FSM_ERR vxge_mBIT(33)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMW_FSM_ERR vxge_mBIT(34)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMR_FSM_ERR vxge_mBIT(35)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_RD_PROT_ERR vxge_mBIT(36)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_RD_PROT_ERR vxge_mBIT(37)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_RD_PROT_ERR vxge_mBIT(38)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_WR_PROT_ERR vxge_mBIT(39)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_WR_PROT_ERR vxge_mBIT(40)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_WR_PROT_ERR vxge_mBIT(41)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_INV_ADDR_ERR vxge_mBIT(42)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_INV_ADDR_ERR vxge_mBIT(43)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_INV_ADDR_ERR vxge_mBIT(44)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_RD_PROT_INFO_ERR vxge_mBIT(45)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_RD_PROT_INFO_ERR vxge_mBIT(46)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_RD_PROT_INFO_ERR vxge_mBIT(47)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_WR_PROT_INFO_ERR vxge_mBIT(48)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_WR_PROT_INFO_ERR vxge_mBIT(49)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_WR_PROT_INFO_ERR vxge_mBIT(50)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_INV_ADDR_INFO_ERR vxge_mBIT(51)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_INV_ADDR_INFO_ERR vxge_mBIT(52)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_INV_ADDR_INFO_ERR vxge_mBIT(53)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_XT_PIF_SRAM_SG_ERR(val) vxge_vBIT(val, 54, 2)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_CP2BDT_DFIFO_PUSH_ERR vxge_mBIT(56)
+#define VXGE_HW_PXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_PUSH_ERR vxge_mBIT(57)
+/*0x04f18*/ u64 pxtmc_err_mask;
+/*0x04f20*/ u64 pxtmc_err_alarm;
+/*0x04f28*/ u64 cp_err_reg;
+#define VXGE_HW_CP_ERR_REG_CP_CP_DCACHE_SG_ERR(val) vxge_vBIT(val, 0, 8)
+#define VXGE_HW_CP_ERR_REG_CP_CP_ICACHE_SG_ERR(val) vxge_vBIT(val, 8, 2)
+#define VXGE_HW_CP_ERR_REG_CP_CP_DTAG_SG_ERR vxge_mBIT(10)
+#define VXGE_HW_CP_ERR_REG_CP_CP_ITAG_SG_ERR vxge_mBIT(11)
+#define VXGE_HW_CP_ERR_REG_CP_CP_TRACE_SG_ERR vxge_mBIT(12)
+#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_SG_ERR vxge_mBIT(13)
+#define VXGE_HW_CP_ERR_REG_CP_MP2CP_SG_ERR vxge_mBIT(14)
+#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_SG_ERR vxge_mBIT(15)
+#define VXGE_HW_CP_ERR_REG_CP_STC2CP_SG_ERR(val) vxge_vBIT(val, 16, 2)
+#define VXGE_HW_CP_ERR_REG_CP_CP_DCACHE_DB_ERR(val) vxge_vBIT(val, 24, 8)
+#define VXGE_HW_CP_ERR_REG_CP_CP_ICACHE_DB_ERR(val) vxge_vBIT(val, 32, 2)
+#define VXGE_HW_CP_ERR_REG_CP_CP_DTAG_DB_ERR vxge_mBIT(34)
+#define VXGE_HW_CP_ERR_REG_CP_CP_ITAG_DB_ERR vxge_mBIT(35)
+#define VXGE_HW_CP_ERR_REG_CP_CP_TRACE_DB_ERR vxge_mBIT(36)
+#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_DB_ERR vxge_mBIT(37)
+#define VXGE_HW_CP_ERR_REG_CP_MP2CP_DB_ERR vxge_mBIT(38)
+#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_DB_ERR vxge_mBIT(39)
+#define VXGE_HW_CP_ERR_REG_CP_STC2CP_DB_ERR(val) vxge_vBIT(val, 40, 2)
+#define VXGE_HW_CP_ERR_REG_CP_H2L2CP_FIFO_ERR vxge_mBIT(48)
+#define VXGE_HW_CP_ERR_REG_CP_STC2CP_FIFO_ERR vxge_mBIT(49)
+#define VXGE_HW_CP_ERR_REG_CP_STE2CP_FIFO_ERR vxge_mBIT(50)
+#define VXGE_HW_CP_ERR_REG_CP_TTE2CP_FIFO_ERR vxge_mBIT(51)
+#define VXGE_HW_CP_ERR_REG_CP_SWIF2CP_FIFO_ERR vxge_mBIT(52)
+#define VXGE_HW_CP_ERR_REG_CP_CP2DMA_FIFO_ERR vxge_mBIT(53)
+#define VXGE_HW_CP_ERR_REG_CP_DAM2CP_FIFO_ERR vxge_mBIT(54)
+#define VXGE_HW_CP_ERR_REG_CP_MP2CP_FIFO_ERR vxge_mBIT(55)
+#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_FIFO_ERR vxge_mBIT(56)
+#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_FIFO_ERR vxge_mBIT(57)
+#define VXGE_HW_CP_ERR_REG_CP_CP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(60)
+#define VXGE_HW_CP_ERR_REG_CP_CP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(61)
+#define VXGE_HW_CP_ERR_REG_CP_DMA_RD_SHADOW_ERR vxge_mBIT(62)
+#define VXGE_HW_CP_ERR_REG_CP_PIFT_CREDIT_ERR vxge_mBIT(63)
+/*0x04f30*/ u64 cp_err_mask;
+/*0x04f38*/ u64 cp_err_alarm;
+ u8 unused04fe8[0x04f50-0x04f40];
+
+/*0x04f50*/ u64 cp_exc_reg;
+#define VXGE_HW_CP_EXC_REG_CP_CP_CAUSE_INFO_INT vxge_mBIT(47)
+#define VXGE_HW_CP_EXC_REG_CP_CP_CAUSE_CRIT_INT vxge_mBIT(55)
+#define VXGE_HW_CP_EXC_REG_CP_CP_SERR vxge_mBIT(63)
+/*0x04f58*/ u64 cp_exc_mask;
+/*0x04f60*/ u64 cp_exc_alarm;
+/*0x04f68*/ u64 cp_exc_cause;
+#define VXGE_HW_CP_EXC_CAUSE_CP_CP_CAUSE(val) vxge_vBIT(val, 32, 32)
+ u8 unused05200[0x05200-0x04f70];
+
+/*0x05200*/ u64 msg_int_status;
+#define VXGE_HW_MSG_INT_STATUS_TIM_ERR_TIM_INT vxge_mBIT(7)
+#define VXGE_HW_MSG_INT_STATUS_MSG_EXC_MSG_XT_EXC_INT vxge_mBIT(60)
+#define VXGE_HW_MSG_INT_STATUS_MSG_ERR3_MSG_ERR3_INT vxge_mBIT(61)
+#define VXGE_HW_MSG_INT_STATUS_MSG_ERR2_MSG_ERR2_INT vxge_mBIT(62)
+#define VXGE_HW_MSG_INT_STATUS_MSG_ERR_MSG_ERR_INT vxge_mBIT(63)
+/*0x05208*/ u64 msg_int_mask;
+/*0x05210*/ u64 tim_err_reg;
+#define VXGE_HW_TIM_ERR_REG_TIM_VBLS_SG_ERR vxge_mBIT(4)
+#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PA_SG_ERR vxge_mBIT(5)
+#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PB_SG_ERR vxge_mBIT(6)
+#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_SG_ERR vxge_mBIT(7)
+#define VXGE_HW_TIM_ERR_REG_TIM_VBLS_DB_ERR vxge_mBIT(12)
+#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PA_DB_ERR vxge_mBIT(13)
+#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PB_DB_ERR vxge_mBIT(14)
+#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_DB_ERR vxge_mBIT(15)
+#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MEM_CNTRL_SM_ERR vxge_mBIT(18)
+#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_MEM_CNTRL_SM_ERR vxge_mBIT(19)
+#define VXGE_HW_TIM_ERR_REG_TIM_MPIF_PCIWR_ERR vxge_mBIT(20)
+#define VXGE_HW_TIM_ERR_REG_TIM_ROCRC_BMAP_UPDT_FIFO_ERR vxge_mBIT(22)
+#define VXGE_HW_TIM_ERR_REG_TIM_CREATE_BMAPMSG_FIFO_ERR vxge_mBIT(23)
+#define VXGE_HW_TIM_ERR_REG_TIM_ROCRCIF_MISMATCH vxge_mBIT(46)
+#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MAPPING_VP_ERR(n) vxge_mBIT(n)
+/*0x05218*/ u64 tim_err_mask;
+/*0x05220*/ u64 tim_err_alarm;
+/*0x05228*/ u64 msg_err_reg;
+#define VXGE_HW_MSG_ERR_REG_UP_UXP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(0)
+#define VXGE_HW_MSG_ERR_REG_MP_MXP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(1)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_DMA_READ_CMD_FSM_INTEGRITY_ERR \
+ vxge_mBIT(2)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_DMA_RESP_FSM_INTEGRITY_ERR \
+ vxge_mBIT(3)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_OWN_FSM_INTEGRITY_ERR vxge_mBIT(4)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_PDA_ACC_FSM_INTEGRITY_ERR vxge_mBIT(5)
+#define VXGE_HW_MSG_ERR_REG_MP_MXP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(6)
+#define VXGE_HW_MSG_ERR_REG_UP_UXP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(7)
+#define VXGE_HW_MSG_ERR_REG_UP_UXP_DTAG_SG_ERR vxge_mBIT(8)
+#define VXGE_HW_MSG_ERR_REG_UP_UXP_ITAG_SG_ERR vxge_mBIT(10)
+#define VXGE_HW_MSG_ERR_REG_MP_MXP_DTAG_SG_ERR vxge_mBIT(12)
+#define VXGE_HW_MSG_ERR_REG_MP_MXP_ITAG_SG_ERR vxge_mBIT(14)
+#define VXGE_HW_MSG_ERR_REG_UP_UXP_TRACE_SG_ERR vxge_mBIT(16)
+#define VXGE_HW_MSG_ERR_REG_MP_MXP_TRACE_SG_ERR vxge_mBIT(17)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CMG2MSG_SG_ERR vxge_mBIT(18)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_TXPE2MSG_SG_ERR vxge_mBIT(19)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RXPE2MSG_SG_ERR vxge_mBIT(20)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RPE2MSG_SG_ERR vxge_mBIT(21)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_SG_ERR vxge_mBIT(26)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_PF_SG_ERR vxge_mBIT(27)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_ECC_SG_ERR vxge_mBIT(29)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_RESP_ECC_SG_ERR vxge_mBIT(31)
+#define VXGE_HW_MSG_ERR_REG_MSG_XFMDQRY_FSM_INTEGRITY_ERR vxge_mBIT(33)
+#define VXGE_HW_MSG_ERR_REG_MSG_FRMQRY_FSM_INTEGRITY_ERR vxge_mBIT(34)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_WRITE_FSM_INTEGRITY_ERR vxge_mBIT(35)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_BWR_PF_FSM_INTEGRITY_ERR \
+ vxge_mBIT(36)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_REG_RESP_FIFO_ERR vxge_mBIT(38)
+#define VXGE_HW_MSG_ERR_REG_UP_UXP_DTAG_DB_ERR vxge_mBIT(39)
+#define VXGE_HW_MSG_ERR_REG_UP_UXP_ITAG_DB_ERR vxge_mBIT(41)
+#define VXGE_HW_MSG_ERR_REG_MP_MXP_DTAG_DB_ERR vxge_mBIT(43)
+#define VXGE_HW_MSG_ERR_REG_MP_MXP_ITAG_DB_ERR vxge_mBIT(45)
+#define VXGE_HW_MSG_ERR_REG_UP_UXP_TRACE_DB_ERR vxge_mBIT(47)
+#define VXGE_HW_MSG_ERR_REG_MP_MXP_TRACE_DB_ERR vxge_mBIT(48)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CMG2MSG_DB_ERR vxge_mBIT(49)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_TXPE2MSG_DB_ERR vxge_mBIT(50)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RXPE2MSG_DB_ERR vxge_mBIT(51)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RPE2MSG_DB_ERR vxge_mBIT(52)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_REG_READ_FIFO_ERR vxge_mBIT(53)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_MXP2UXP_FIFO_ERR vxge_mBIT(54)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_KDFC_SIF_FIFO_ERR vxge_mBIT(55)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CXP2SWIF_FIFO_ERR vxge_mBIT(56)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_DB_ERR vxge_mBIT(57)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_PF_DB_ERR vxge_mBIT(58)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_SIF_FIFO_ERR vxge_mBIT(59)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_ECC_DB_ERR vxge_mBIT(60)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_READ_FIFO_ERR vxge_mBIT(61)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_RESP_ECC_DB_ERR vxge_mBIT(62)
+#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UXP2MXP_FIFO_ERR vxge_mBIT(63)
+/*0x05230*/ u64 msg_err_mask;
+/*0x05238*/ u64 msg_err_alarm;
+ u8 unused05340[0x05340-0x05240];
+
+/*0x05340*/ u64 msg_exc_reg;
+#define VXGE_HW_MSG_EXC_REG_MP_MXP_CAUSE_INFO_INT vxge_mBIT(50)
+#define VXGE_HW_MSG_EXC_REG_MP_MXP_CAUSE_CRIT_INT vxge_mBIT(51)
+#define VXGE_HW_MSG_EXC_REG_UP_UXP_CAUSE_INFO_INT vxge_mBIT(54)
+#define VXGE_HW_MSG_EXC_REG_UP_UXP_CAUSE_CRIT_INT vxge_mBIT(55)
+#define VXGE_HW_MSG_EXC_REG_MP_MXP_SERR vxge_mBIT(62)
+#define VXGE_HW_MSG_EXC_REG_UP_UXP_SERR vxge_mBIT(63)
+/*0x05348*/ u64 msg_exc_mask;
+/*0x05350*/ u64 msg_exc_alarm;
+/*0x05358*/ u64 msg_exc_cause;
+#define VXGE_HW_MSG_EXC_CAUSE_MP_MXP(val) vxge_vBIT(val, 0, 32)
+#define VXGE_HW_MSG_EXC_CAUSE_UP_UXP(val) vxge_vBIT(val, 32, 32)
+ u8 unused05368[0x05380-0x05360];
+
+/*0x05380*/ u64 msg_err2_reg;
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_CMG2MSG_DISPATCH_FSM_INTEGRITY_ERR \
+ vxge_mBIT(0)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_DMQ_DISPATCH_FSM_INTEGRITY_ERR \
+ vxge_mBIT(1)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIF_DISPATCH_FSM_INTEGRITY_ERR \
+ vxge_mBIT(2)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_PIC_WRITE_FSM_INTEGRITY_ERR \
+ vxge_mBIT(3)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIFREG_FSM_INTEGRITY_ERR vxge_mBIT(4)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TIM_WRITE_FSM_INTEGRITY_ERR \
+ vxge_mBIT(5)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_UMQ_TA_FSM_INTEGRITY_ERR vxge_mBIT(6)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TXPE_TA_FSM_INTEGRITY_ERR vxge_mBIT(7)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RXPE_TA_FSM_INTEGRITY_ERR vxge_mBIT(8)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIF_TA_FSM_INTEGRITY_ERR vxge_mBIT(9)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_DMA_TA_FSM_INTEGRITY_ERR vxge_mBIT(10)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_CP_TA_FSM_INTEGRITY_ERR vxge_mBIT(11)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA16_FSM_INTEGRITY_ERR \
+ vxge_mBIT(12)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA15_FSM_INTEGRITY_ERR \
+ vxge_mBIT(13)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA14_FSM_INTEGRITY_ERR \
+ vxge_mBIT(14)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA13_FSM_INTEGRITY_ERR \
+ vxge_mBIT(15)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA12_FSM_INTEGRITY_ERR \
+ vxge_mBIT(16)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA11_FSM_INTEGRITY_ERR \
+ vxge_mBIT(17)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA10_FSM_INTEGRITY_ERR \
+ vxge_mBIT(18)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA9_FSM_INTEGRITY_ERR \
+ vxge_mBIT(19)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA8_FSM_INTEGRITY_ERR \
+ vxge_mBIT(20)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA7_FSM_INTEGRITY_ERR \
+ vxge_mBIT(21)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA6_FSM_INTEGRITY_ERR \
+ vxge_mBIT(22)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA5_FSM_INTEGRITY_ERR \
+ vxge_mBIT(23)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA4_FSM_INTEGRITY_ERR \
+ vxge_mBIT(24)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA3_FSM_INTEGRITY_ERR \
+ vxge_mBIT(25)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA2_FSM_INTEGRITY_ERR \
+ vxge_mBIT(26)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA1_FSM_INTEGRITY_ERR \
+ vxge_mBIT(27)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA0_FSM_INTEGRITY_ERR \
+ vxge_mBIT(28)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_FBMC_OWN_FSM_INTEGRITY_ERR vxge_mBIT(29)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
+ vxge_mBIT(30)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
+ vxge_mBIT(31)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \
+ vxge_mBIT(32)
+#define VXGE_HW_MSG_ERR2_REG_MP_MP_PIFT_IF_CREDIT_CNT_ERR vxge_mBIT(33)
+#define VXGE_HW_MSG_ERR2_REG_UP_UP_PIFT_IF_CREDIT_CNT_ERR vxge_mBIT(34)
+#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_UMQ2PIC_CMD_FIFO_ERR vxge_mBIT(62)
+#define VXGE_HW_MSG_ERR2_REG_TIM_TIM2MSG_CMD_FIFO_ERR vxge_mBIT(63)
+/*0x05388*/ u64 msg_err2_mask;
+/*0x05390*/ u64 msg_err2_alarm;
+/*0x05398*/ u64 msg_err3_reg;
+#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR0 vxge_mBIT(0)
+#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR1 vxge_mBIT(1)
+#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR2 vxge_mBIT(2)
+#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR3 vxge_mBIT(3)
+#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR4 vxge_mBIT(4)
+#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR5 vxge_mBIT(5)
+#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR6 vxge_mBIT(6)
+#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR7 vxge_mBIT(7)
+#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_SG_ERR0 vxge_mBIT(8)
+#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_SG_ERR1 vxge_mBIT(9)
+#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR0 vxge_mBIT(16)
+#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR1 vxge_mBIT(17)
+#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR2 vxge_mBIT(18)
+#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR3 vxge_mBIT(19)
+#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR4 vxge_mBIT(20)
+#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR5 vxge_mBIT(21)
+#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR6 vxge_mBIT(22)
+#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR7 vxge_mBIT(23)
+#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_SG_ERR0 vxge_mBIT(24)
+#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_SG_ERR1 vxge_mBIT(25)
+#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR0 vxge_mBIT(32)
+#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR1 vxge_mBIT(33)
+#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR2 vxge_mBIT(34)
+#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR3 vxge_mBIT(35)
+#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR4 vxge_mBIT(36)
+#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR5 vxge_mBIT(37)
+#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR6 vxge_mBIT(38)
+#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR7 vxge_mBIT(39)
+#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR0 vxge_mBIT(40)
+#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR1 vxge_mBIT(41)
+#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR0 vxge_mBIT(48)
+#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR1 vxge_mBIT(49)
+#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR2 vxge_mBIT(50)
+#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR3 vxge_mBIT(51)
+#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR4 vxge_mBIT(52)
+#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR5 vxge_mBIT(53)
+#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR6 vxge_mBIT(54)
+#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR7 vxge_mBIT(55)
+#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR0 vxge_mBIT(56)
+#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR1 vxge_mBIT(57)
+/*0x053a0*/ u64 msg_err3_mask;
+/*0x053a8*/ u64 msg_err3_alarm;
+ u8 unused05600[0x05600-0x053b0];
+
+/*0x05600*/ u64 fau_gen_err_reg;
+#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT0_PERMANENT_STOP vxge_mBIT(3)
+#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT1_PERMANENT_STOP vxge_mBIT(7)
+#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT2_PERMANENT_STOP vxge_mBIT(11)
+#define VXGE_HW_FAU_GEN_ERR_REG_FALR_AUTO_LRO_NOTIFICATION vxge_mBIT(15)
+/*0x05608*/ u64 fau_gen_err_mask;
+/*0x05610*/ u64 fau_gen_err_alarm;
+/*0x05618*/ u64 fau_ecc_err_reg;
+#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_N_SG_ERR vxge_mBIT(0)
+#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_N_DB_ERR vxge_mBIT(1)
+#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_W_SG_ERR(val) \
+ vxge_vBIT(val, 2, 2)
+#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_W_DB_ERR(val) \
+ vxge_vBIT(val, 4, 2)
+#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_N_SG_ERR vxge_mBIT(6)
+#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_N_DB_ERR vxge_mBIT(7)
+#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_W_SG_ERR(val) \
+ vxge_vBIT(val, 8, 2)
+#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_W_DB_ERR(val) \
+ vxge_vBIT(val, 10, 2)
+#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_N_SG_ERR vxge_mBIT(12)
+#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_N_DB_ERR vxge_mBIT(13)
+#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_W_SG_ERR(val) \
+ vxge_vBIT(val, 14, 2)
+#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_W_DB_ERR(val) \
+ vxge_vBIT(val, 16, 2)
+#define VXGE_HW_FAU_ECC_ERR_REG_FAU_FAU_XFMD_INS_SG_ERR(val) \
+ vxge_vBIT(val, 18, 2)
+#define VXGE_HW_FAU_ECC_ERR_REG_FAU_FAU_XFMD_INS_DB_ERR(val) \
+ vxge_vBIT(val, 20, 2)
+#define VXGE_HW_FAU_ECC_ERR_REG_FAUJ_FAU_FSM_ERR vxge_mBIT(31)
+/*0x05620*/ u64 fau_ecc_err_mask;
+/*0x05628*/ u64 fau_ecc_err_alarm;
+ u8 unused05658[0x05658-0x05630];
+/*0x05658*/ u64 fau_pa_cfg;
+#define VXGE_HW_FAU_PA_CFG_REPL_L4_COMP_CSUM vxge_mBIT(3)
+#define VXGE_HW_FAU_PA_CFG_REPL_L3_INCL_CF vxge_mBIT(7)
+#define VXGE_HW_FAU_PA_CFG_REPL_L3_COMP_CSUM vxge_mBIT(11)
+ u8 unused05668[0x05668-0x05660];
+
+/*0x05668*/ u64 dbg_stats_fau_rx_path;
+#define VXGE_HW_DBG_STATS_FAU_RX_PATH_RX_PERMITTED_FRMS(val) \
+ vxge_vBIT(val, 32, 32)
+ u8 unused056c0[0x056c0-0x05670];
+
+/*0x056c0*/ u64 fau_lag_cfg;
+#define VXGE_HW_FAU_LAG_CFG_COLL_ALG(val) vxge_vBIT(val, 2, 2)
+#define VXGE_HW_FAU_LAG_CFG_INCR_RX_AGGR_STATS vxge_mBIT(7)
+ u8 unused05800[0x05800-0x056c8];
+
+/*0x05800*/ u64 tpa_int_status;
+#define VXGE_HW_TPA_INT_STATUS_ORP_ERR_ORP_INT vxge_mBIT(15)
+#define VXGE_HW_TPA_INT_STATUS_PTM_ALARM_PTM_INT vxge_mBIT(23)
+#define VXGE_HW_TPA_INT_STATUS_TPA_ERROR_TPA_INT vxge_mBIT(31)
+/*0x05808*/ u64 tpa_int_mask;
+/*0x05810*/ u64 orp_err_reg;
+#define VXGE_HW_ORP_ERR_REG_ORP_FIFO_SG_ERR vxge_mBIT(3)
+#define VXGE_HW_ORP_ERR_REG_ORP_FIFO_DB_ERR vxge_mBIT(7)
+#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_FIFO_UFLOW_ERR vxge_mBIT(11)
+#define VXGE_HW_ORP_ERR_REG_ORP_FRM_FIFO_UFLOW_ERR vxge_mBIT(15)
+#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_RCV_FSM_ERR vxge_mBIT(19)
+#define VXGE_HW_ORP_ERR_REG_ORP_OUTREAD_FSM_ERR vxge_mBIT(23)
+#define VXGE_HW_ORP_ERR_REG_ORP_OUTQEM_FSM_ERR vxge_mBIT(27)
+#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_RCV_SHADOW_ERR vxge_mBIT(31)
+#define VXGE_HW_ORP_ERR_REG_ORP_OUTREAD_SHADOW_ERR vxge_mBIT(35)
+#define VXGE_HW_ORP_ERR_REG_ORP_OUTQEM_SHADOW_ERR vxge_mBIT(39)
+#define VXGE_HW_ORP_ERR_REG_ORP_OUTFRM_SHADOW_ERR vxge_mBIT(43)
+#define VXGE_HW_ORP_ERR_REG_ORP_OPTPRS_SHADOW_ERR vxge_mBIT(47)
+/*0x05818*/ u64 orp_err_mask;
+/*0x05820*/ u64 orp_err_alarm;
+/*0x05828*/ u64 ptm_alarm_reg;
+#define VXGE_HW_PTM_ALARM_REG_PTM_RDCTRL_SYNC_ERR vxge_mBIT(3)
+#define VXGE_HW_PTM_ALARM_REG_PTM_RDCTRL_FIFO_ERR vxge_mBIT(7)
+#define VXGE_HW_PTM_ALARM_REG_XFMD_RD_FIFO_ERR vxge_mBIT(11)
+#define VXGE_HW_PTM_ALARM_REG_WDE2MSR_WR_FIFO_ERR vxge_mBIT(15)
+#define VXGE_HW_PTM_ALARM_REG_PTM_FRMM_ECC_DB_ERR(val) vxge_vBIT(val, 18, 2)
+#define VXGE_HW_PTM_ALARM_REG_PTM_FRMM_ECC_SG_ERR(val) vxge_vBIT(val, 22, 2)
+/*0x05830*/ u64 ptm_alarm_mask;
+/*0x05838*/ u64 ptm_alarm_alarm;
+/*0x05840*/ u64 tpa_error_reg;
+#define VXGE_HW_TPA_ERROR_REG_TPA_FSM_ERR_ALARM vxge_mBIT(3)
+#define VXGE_HW_TPA_ERROR_REG_TPA_TPA_DA_LKUP_PRT0_DB_ERR vxge_mBIT(7)
+#define VXGE_HW_TPA_ERROR_REG_TPA_TPA_DA_LKUP_PRT0_SG_ERR vxge_mBIT(11)
+/*0x05848*/ u64 tpa_error_mask;
+/*0x05850*/ u64 tpa_error_alarm;
+/*0x05858*/ u64 tpa_global_cfg;
+#define VXGE_HW_TPA_GLOBAL_CFG_SUPPORT_SNAP_AB_N vxge_mBIT(7)
+#define VXGE_HW_TPA_GLOBAL_CFG_ECC_ENABLE_N vxge_mBIT(35)
+ u8 unused05868[0x05870-0x05860];
+
+/*0x05870*/ u64 ptm_ecc_cfg;
+#define VXGE_HW_PTM_ECC_CFG_PTM_FRMM_ECC_EN_N vxge_mBIT(3)
+/*0x05878*/ u64 ptm_phase_cfg;
+#define VXGE_HW_PTM_PHASE_CFG_FRMM_WR_PHASE_EN vxge_mBIT(3)
+#define VXGE_HW_PTM_PHASE_CFG_FRMM_RD_PHASE_EN vxge_mBIT(7)
+ u8 unused05898[0x05898-0x05880];
+
+/*0x05898*/ u64 dbg_stats_tpa_tx_path;
+#define VXGE_HW_DBG_STATS_TPA_TX_PATH_TX_PERMITTED_FRMS(val) \
+ vxge_vBIT(val, 32, 32)
+ u8 unused05900[0x05900-0x058a0];
+
+/*0x05900*/ u64 tmac_int_status;
+#define VXGE_HW_TMAC_INT_STATUS_TXMAC_GEN_ERR_TXMAC_GEN_INT vxge_mBIT(3)
+#define VXGE_HW_TMAC_INT_STATUS_TXMAC_ECC_ERR_TXMAC_ECC_INT vxge_mBIT(7)
+/*0x05908*/ u64 tmac_int_mask;
+/*0x05910*/ u64 txmac_gen_err_reg;
+#define VXGE_HW_TXMAC_GEN_ERR_REG_TMACJ_PERMANENT_STOP vxge_mBIT(3)
+#define VXGE_HW_TXMAC_GEN_ERR_REG_TMACJ_NO_VALID_VSPORT vxge_mBIT(7)
+/*0x05918*/ u64 txmac_gen_err_mask;
+/*0x05920*/ u64 txmac_gen_err_alarm;
+/*0x05928*/ u64 txmac_ecc_err_reg;
+#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2MAC_SG_ERR vxge_mBIT(3)
+#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2MAC_DB_ERR vxge_mBIT(7)
+#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_SB_SG_ERR vxge_mBIT(11)
+#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_SB_DB_ERR vxge_mBIT(15)
+#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_DA_SG_ERR vxge_mBIT(19)
+#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_DA_DB_ERR vxge_mBIT(23)
+#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT0_FSM_ERR vxge_mBIT(27)
+#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT1_FSM_ERR vxge_mBIT(31)
+#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT2_FSM_ERR vxge_mBIT(35)
+#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMACJ_FSM_ERR vxge_mBIT(39)
+/*0x05930*/ u64 txmac_ecc_err_mask;
+/*0x05938*/ u64 txmac_ecc_err_alarm;
+ u8 unused05978[0x05978-0x05940];
+
+/*0x05978*/ u64 dbg_stat_tx_any_frms;
+#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT0_TX_ANY_FRMS(val) vxge_vBIT(val, 0, 8)
+#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT1_TX_ANY_FRMS(val) vxge_vBIT(val, 8, 8)
+#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT2_TX_ANY_FRMS(val) \
+ vxge_vBIT(val, 16, 8)
+ u8 unused059a0[0x059a0-0x05980];
+
+/*0x059a0*/ u64 txmac_link_util_port[3];
+#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_UTILIZATION(val) \
+ vxge_vBIT(val, 1, 7)
+#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_UTIL_CFG(val) vxge_vBIT(val, 8, 4)
+#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_FRAC_UTIL(val) \
+ vxge_vBIT(val, 12, 4)
+#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_PKT_WEIGHT(val) vxge_vBIT(val, 16, 4)
+#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_SCALE_FACTOR vxge_mBIT(23)
+/*0x059b8*/ u64 txmac_cfg0_port[3];
+#define VXGE_HW_TXMAC_CFG0_PORT_TMAC_EN vxge_mBIT(3)
+#define VXGE_HW_TXMAC_CFG0_PORT_APPEND_PAD vxge_mBIT(7)
+#define VXGE_HW_TXMAC_CFG0_PORT_PAD_BYTE(val) vxge_vBIT(val, 8, 8)
+/*0x059d0*/ u64 txmac_cfg1_port[3];
+#define VXGE_HW_TXMAC_CFG1_PORT_AVG_IPG(val) vxge_vBIT(val, 40, 8)
+/*0x059e8*/ u64 txmac_status_port[3];
+#define VXGE_HW_TXMAC_STATUS_PORT_TMAC_TX_FRM_SENT vxge_mBIT(3)
+ u8 unused05a20[0x05a20-0x05a00];
+
+/*0x05a20*/ u64 lag_distrib_dest;
+#define VXGE_HW_LAG_DISTRIB_DEST_MAP_VPATH(n) vxge_mBIT(n)
+/*0x05a28*/ u64 lag_marker_cfg;
+#define VXGE_HW_LAG_MARKER_CFG_GEN_RCVR_EN vxge_mBIT(3)
+#define VXGE_HW_LAG_MARKER_CFG_RESP_EN vxge_mBIT(7)
+#define VXGE_HW_LAG_MARKER_CFG_RESP_TIMEOUT(val) vxge_vBIT(val, 16, 16)
+#define VXGE_HW_LAG_MARKER_CFG_SLOW_PROTO_MRKR_MIN_INTERVAL(val) \
+ vxge_vBIT(val, 32, 16)
+#define VXGE_HW_LAG_MARKER_CFG_THROTTLE_MRKR_RESP vxge_mBIT(51)
+/*0x05a30*/ u64 lag_tx_cfg;
+#define VXGE_HW_LAG_TX_CFG_INCR_TX_AGGR_STATS vxge_mBIT(3)
+#define VXGE_HW_LAG_TX_CFG_DISTRIB_ALG_SEL(val) vxge_vBIT(val, 6, 2)
+#define VXGE_HW_LAG_TX_CFG_DISTRIB_REMAP_IF_FAIL vxge_mBIT(11)
+#define VXGE_HW_LAG_TX_CFG_COLL_MAX_DELAY(val) vxge_vBIT(val, 16, 16)
+/*0x05a38*/ u64 lag_tx_status;
+#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_EMPTIED_LINK(val) \
+ vxge_vBIT(val, 0, 8)
+#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_SLOW_PROTO_MRKR(val) \
+ vxge_vBIT(val, 8, 8)
+#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_SLOW_PROTO_MRKRRESP(val) \
+ vxge_vBIT(val, 16, 8)
+ u8 unused05d48[0x05d48-0x05a40];
+
+/*0x05d48*/ u64 srpcim_to_mrpcim_vplane_rmsg[17];
+#define \
+VXGE_HAL_SRPCIM_TO_MRPCIM_VPLANE_RMSG_SWIF_SRPCIM_TO_MRPCIM_VPLANE_RMSG(val)\
+ vxge_vBIT(val, 0, 64)
+ u8 unused06420[0x06420-0x05dd0];
+
+/*0x06420*/ u64 mrpcim_to_srpcim_vplane_wmsg[17];
+#define VXGE_HW_MRPCIM_TO_SRPCIM_VPLANE_WMSG_MRPCIM_TO_SRPCIM_VPLANE_WMSG(val) \
+ vxge_vBIT(val, 0, 64)
+/*0x064a8*/ u64 mrpcim_to_srpcim_vplane_wmsg_trig[17];
+
+/*0x06530*/ u64 debug_stats0;
+#define VXGE_HW_DEBUG_STATS0_RSTDROP_MSG(val) vxge_vBIT(val, 0, 32)
+#define VXGE_HW_DEBUG_STATS0_RSTDROP_CPL(val) vxge_vBIT(val, 32, 32)
+/*0x06538*/ u64 debug_stats1;
+#define VXGE_HW_DEBUG_STATS1_RSTDROP_CLIENT0(val) vxge_vBIT(val, 0, 32)
+#define VXGE_HW_DEBUG_STATS1_RSTDROP_CLIENT1(val) vxge_vBIT(val, 32, 32)
+/*0x06540*/ u64 debug_stats2;
+#define VXGE_HW_DEBUG_STATS2_RSTDROP_CLIENT2(val) vxge_vBIT(val, 0, 32)
+/*0x06548*/ u64 debug_stats3_vplane[17];
+#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_PH(val) vxge_vBIT(val, 0, 16)
+#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_NPH(val) vxge_vBIT(val, 16, 16)
+#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_CPLH(val) vxge_vBIT(val, 32, 16)
+/*0x065d0*/ u64 debug_stats4_vplane[17];
+#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_PD(val) vxge_vBIT(val, 0, 16)
+#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_NPD(val) vxge_vBIT(val, 16, 16)
+#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_CPLD(val) vxge_vBIT(val, 32, 16)
+
+ u8 unused07000[0x07000-0x06658];
+
+/*0x07000*/ u64 mrpcim_general_int_status;
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PIC_INT vxge_mBIT(0)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCI_INT vxge_mBIT(1)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_RTDMA_INT vxge_mBIT(2)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_WRDMA_INT vxge_mBIT(3)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMCT_INT vxge_mBIT(4)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG1_INT vxge_mBIT(5)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG2_INT vxge_mBIT(6)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG3_INT vxge_mBIT(7)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMIFL_INT vxge_mBIT(8)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMIFU_INT vxge_mBIT(9)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG1_INT vxge_mBIT(10)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG2_INT vxge_mBIT(11)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG3_INT vxge_mBIT(12)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_XMAC_INT vxge_mBIT(13)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_RXMAC_INT vxge_mBIT(14)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_TMAC_INT vxge_mBIT(15)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3FBIF_INT vxge_mBIT(16)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_FBMC_INT vxge_mBIT(17)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3FBCT_INT vxge_mBIT(18)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_TPA_INT vxge_mBIT(19)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_DRBELL_INT vxge_mBIT(20)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_ONE_INT vxge_mBIT(21)
+#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_MSG_INT vxge_mBIT(22)
+/*0x07008*/ u64 mrpcim_general_int_mask;
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PIC_INT vxge_mBIT(0)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCI_INT vxge_mBIT(1)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_RTDMA_INT vxge_mBIT(2)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_WRDMA_INT vxge_mBIT(3)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMCT_INT vxge_mBIT(4)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG1_INT vxge_mBIT(5)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG2_INT vxge_mBIT(6)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG3_INT vxge_mBIT(7)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMIFL_INT vxge_mBIT(8)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMIFU_INT vxge_mBIT(9)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG1_INT vxge_mBIT(10)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG2_INT vxge_mBIT(11)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG3_INT vxge_mBIT(12)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_XMAC_INT vxge_mBIT(13)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_RXMAC_INT vxge_mBIT(14)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_TMAC_INT vxge_mBIT(15)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3FBIF_INT vxge_mBIT(16)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_FBMC_INT vxge_mBIT(17)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3FBCT_INT vxge_mBIT(18)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_TPA_INT vxge_mBIT(19)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_DRBELL_INT vxge_mBIT(20)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_ONE_INT vxge_mBIT(21)
+#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_MSG_INT vxge_mBIT(22)
+/*0x07010*/ u64 mrpcim_ppif_int_status;
+#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_INI_ERRORS_INI_INT vxge_mBIT(3)
+#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_DMA_ERRORS_DMA_INT vxge_mBIT(7)
+#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_TGT_ERRORS_TGT_INT vxge_mBIT(11)
+#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CONFIG_ERRORS_CONFIG_INT vxge_mBIT(15)
+#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_CRDT_INT vxge_mBIT(19)
+#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_PLL_ERRORS_PLL_INT vxge_mBIT(27)
+#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE0_CRD_INT_VPLANE0_INT\
+ vxge_mBIT(31)
+#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE1_CRD_INT_VPLANE1_INT\
+ vxge_mBIT(32)
+#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE2_CRD_INT_VPLANE2_INT\
+ vxge_mBIT(33)
+#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE3_CRD_INT_VPLANE3_INT\
+ vxge_mBIT(34)
+#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE4_CRD_INT_VPLANE4_INT\
+ vxge_mBIT(35)
+#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE5_CRD_INT_VPLANE5_INT\
+ vxge_mBIT(36)
+#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE6_CRD_INT_VPLANE6_INT\
+ vxge_mBIT(37)
+#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE7_CRD_INT_VPLANE7_INT\
+ vxge_mBIT(38)
+#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE8_CRD_INT_VPLANE8_INT\
+ vxge_mBIT(39)
+#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE9_CRD_INT_VPLANE9_INT\
+ vxge_mBIT(40)
+#define \
+VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE10_CRD_INT_VPLANE10_INT \
+ vxge_mBIT(41)
+#define \
+VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE11_CRD_INT_VPLANE11_INT \
+ vxge_mBIT(42)
+#define \
+VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE12_CRD_INT_VPLANE12_INT \
+ vxge_mBIT(43)
+#define \
+VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE13_CRD_INT_VPLANE13_INT \
+ vxge_mBIT(44)
+#define \
+VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE14_CRD_INT_VPLANE14_INT \
+ vxge_mBIT(45)
+#define \
+VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE15_CRD_INT_VPLANE15_INT \
+ vxge_mBIT(46)
+#define \
+VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE16_CRD_INT_VPLANE16_INT \
+ vxge_mBIT(47)
+#define \
+VXGE_HW_MRPCIM_PPIF_INT_STATUS_VPATH_TO_MRPCIM_ALARM_VPATH_TO_MRPCIM_ALARM_INT \
+ vxge_mBIT(55)
+/*0x07018*/ u64 mrpcim_ppif_int_mask;
+ u8 unused07028[0x07028-0x07020];
+
+/*0x07028*/ u64 ini_errors_reg;
+#define VXGE_HW_INI_ERRORS_REG_SCPL_CPL_TIMEOUT_UNUSED_TAG vxge_mBIT(3)
+#define VXGE_HW_INI_ERRORS_REG_SCPL_CPL_TIMEOUT vxge_mBIT(7)
+#define VXGE_HW_INI_ERRORS_REG_DCPL_FSM_ERR vxge_mBIT(11)
+#define VXGE_HW_INI_ERRORS_REG_DCPL_POISON vxge_mBIT(12)
+#define VXGE_HW_INI_ERRORS_REG_DCPL_UNSUPPORTED vxge_mBIT(15)
+#define VXGE_HW_INI_ERRORS_REG_DCPL_ABORT vxge_mBIT(19)
+#define VXGE_HW_INI_ERRORS_REG_INI_TLP_ABORT vxge_mBIT(23)
+#define VXGE_HW_INI_ERRORS_REG_INI_DLLP_ABORT vxge_mBIT(27)
+#define VXGE_HW_INI_ERRORS_REG_INI_ECRC_ERR vxge_mBIT(31)
+#define VXGE_HW_INI_ERRORS_REG_INI_BUF_DB_ERR vxge_mBIT(35)
+#define VXGE_HW_INI_ERRORS_REG_INI_BUF_SG_ERR vxge_mBIT(39)
+#define VXGE_HW_INI_ERRORS_REG_INI_DATA_OVERFLOW vxge_mBIT(43)
+#define VXGE_HW_INI_ERRORS_REG_INI_HDR_OVERFLOW vxge_mBIT(47)
+#define VXGE_HW_INI_ERRORS_REG_INI_MRD_SYS_DROP vxge_mBIT(51)
+#define VXGE_HW_INI_ERRORS_REG_INI_MWR_SYS_DROP vxge_mBIT(55)
+#define VXGE_HW_INI_ERRORS_REG_INI_MRD_CLIENT_DROP vxge_mBIT(59)
+#define VXGE_HW_INI_ERRORS_REG_INI_MWR_CLIENT_DROP vxge_mBIT(63)
+/*0x07030*/ u64 ini_errors_mask;
+/*0x07038*/ u64 ini_errors_alarm;
+/*0x07040*/ u64 dma_errors_reg;
+#define VXGE_HW_DMA_ERRORS_REG_RDARB_FSM_ERR vxge_mBIT(3)
+#define VXGE_HW_DMA_ERRORS_REG_WRARB_FSM_ERR vxge_mBIT(7)
+#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_OVERFLOW vxge_mBIT(8)
+#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_UNDERFLOW vxge_mBIT(9)
+#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_OVERFLOW vxge_mBIT(10)
+#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_UNDERFLOW vxge_mBIT(11)
+#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_HDR_OVERFLOW vxge_mBIT(12)
+#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_HDR_UNDERFLOW vxge_mBIT(13)
+#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_DATA_OVERFLOW vxge_mBIT(14)
+#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_DATA_UNDERFLOW vxge_mBIT(15)
+#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_HDR_OVERFLOW vxge_mBIT(16)
+#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_HDR_UNDERFLOW vxge_mBIT(17)
+#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_DATA_OVERFLOW vxge_mBIT(18)
+#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_DATA_UNDERFLOW vxge_mBIT(19)
+#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_OVERFLOW vxge_mBIT(20)
+#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_UNDERFLOW vxge_mBIT(21)
+#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_OVERFLOW vxge_mBIT(22)
+#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_UNDERFLOW vxge_mBIT(23)
+#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_OVERFLOW vxge_mBIT(24)
+#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_UNDERFLOW vxge_mBIT(25)
+#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_OVERFLOW vxge_mBIT(28)
+#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_UNDERFLOW vxge_mBIT(29)
+#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_FSM_ERR vxge_mBIT(32)
+#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_CREDIT_FSM_ERR vxge_mBIT(33)
+#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_DMA_WRR_SM_ERR vxge_mBIT(34)
+/*0x07048*/ u64 dma_errors_mask;
+/*0x07050*/ u64 dma_errors_alarm;
+/*0x07058*/ u64 tgt_errors_reg;
+#define VXGE_HW_TGT_ERRORS_REG_TGT_VENDOR_MSG vxge_mBIT(0)
+#define VXGE_HW_TGT_ERRORS_REG_TGT_MSG_UNLOCK vxge_mBIT(1)
+#define VXGE_HW_TGT_ERRORS_REG_TGT_ILLEGAL_TLP_BE vxge_mBIT(2)
+#define VXGE_HW_TGT_ERRORS_REG_TGT_BOOT_WRITE vxge_mBIT(3)
+#define VXGE_HW_TGT_ERRORS_REG_TGT_PIF_WR_CROSS_QWRANGE vxge_mBIT(4)
+#define VXGE_HW_TGT_ERRORS_REG_TGT_PIF_READ_CROSS_QWRANGE vxge_mBIT(5)
+#define VXGE_HW_TGT_ERRORS_REG_TGT_KDFC_READ vxge_mBIT(6)
+#define VXGE_HW_TGT_ERRORS_REG_TGT_USDC_READ vxge_mBIT(7)
+#define VXGE_HW_TGT_ERRORS_REG_TGT_USDC_WR_CROSS_QWRANGE vxge_mBIT(8)
+#define VXGE_HW_TGT_ERRORS_REG_TGT_MSIX_BEYOND_RANGE vxge_mBIT(9)
+#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_KDFC_POISON vxge_mBIT(10)
+#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_USDC_POISON vxge_mBIT(11)
+#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_PIF_POISON vxge_mBIT(12)
+#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_MSIX_POISON vxge_mBIT(13)
+#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_MRIOV_POISON vxge_mBIT(14)
+#define VXGE_HW_TGT_ERRORS_REG_TGT_NOT_MEM_TLP vxge_mBIT(15)
+#define VXGE_HW_TGT_ERRORS_REG_TGT_UNKNOWN_MEM_TLP vxge_mBIT(16)
+#define VXGE_HW_TGT_ERRORS_REG_TGT_REQ_FSM_ERR vxge_mBIT(17)
+#define VXGE_HW_TGT_ERRORS_REG_TGT_CPL_FSM_ERR vxge_mBIT(18)
+#define VXGE_HW_TGT_ERRORS_REG_TGT_KDFC_PROT_ERR vxge_mBIT(19)
+#define VXGE_HW_TGT_ERRORS_REG_TGT_SWIF_PROT_ERR vxge_mBIT(20)
+#define VXGE_HW_TGT_ERRORS_REG_TGT_MRIOV_MEM_MAP_CFG_ERR vxge_mBIT(21)
+/*0x07060*/ u64 tgt_errors_mask;
+/*0x07068*/ u64 tgt_errors_alarm;
+/*0x07070*/ u64 config_errors_reg;
+#define VXGE_HW_CONFIG_ERRORS_REG_I2C_ILLEGAL_STOP_COND vxge_mBIT(3)
+#define VXGE_HW_CONFIG_ERRORS_REG_I2C_ILLEGAL_START_COND vxge_mBIT(7)
+#define VXGE_HW_CONFIG_ERRORS_REG_I2C_EXP_RD_CNT vxge_mBIT(11)
+#define VXGE_HW_CONFIG_ERRORS_REG_I2C_EXTRA_CYCLE vxge_mBIT(15)
+#define VXGE_HW_CONFIG_ERRORS_REG_I2C_MAIN_FSM_ERR vxge_mBIT(19)
+#define VXGE_HW_CONFIG_ERRORS_REG_I2C_REQ_COLLISION vxge_mBIT(23)
+#define VXGE_HW_CONFIG_ERRORS_REG_I2C_REG_FSM_ERR vxge_mBIT(27)
+#define VXGE_HW_CONFIG_ERRORS_REG_CFGM_I2C_TIMEOUT vxge_mBIT(31)
+#define VXGE_HW_CONFIG_ERRORS_REG_RIC_I2C_TIMEOUT vxge_mBIT(35)
+#define VXGE_HW_CONFIG_ERRORS_REG_CFGM_FSM_ERR vxge_mBIT(39)
+#define VXGE_HW_CONFIG_ERRORS_REG_RIC_FSM_ERR vxge_mBIT(43)
+#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_ILLEGAL_ACCESS vxge_mBIT(47)
+#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_TIMEOUT vxge_mBIT(51)
+#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_FSM_ERR vxge_mBIT(55)
+#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_TO_FSM_ERR vxge_mBIT(59)
+#define VXGE_HW_CONFIG_ERRORS_REG_RIC_RIC_RD_TIMEOUT vxge_mBIT(63)
+/*0x07078*/ u64 config_errors_mask;
+/*0x07080*/ u64 config_errors_alarm;
+ u8 unused07090[0x07090-0x07088];
+
+/*0x07090*/ u64 crdt_errors_reg;
+#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_FSM_ERR vxge_mBIT(11)
+#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_INTCTL_ILLEGAL_CRD_DEAL \
+ vxge_mBIT(15)
+#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_PDA_ILLEGAL_CRD_DEAL vxge_mBIT(19)
+#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_PCI_MSG_ILLEGAL_CRD_DEAL \
+ vxge_mBIT(23)
+#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_FSM_ERR vxge_mBIT(35)
+#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_RDA_ILLEGAL_CRD_DEAL vxge_mBIT(39)
+#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_PDA_ILLEGAL_CRD_DEAL vxge_mBIT(43)
+#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_DBLGEN_ILLEGAL_CRD_DEAL \
+ vxge_mBIT(47)
+/*0x07098*/ u64 crdt_errors_mask;
+/*0x070a0*/ u64 crdt_errors_alarm;
+ u8 unused070b0[0x070b0-0x070a8];
+
+/*0x070b0*/ u64 mrpcim_general_errors_reg;
+#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_STATSB_FSM_ERR vxge_mBIT(3)
+#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_XGEN_FSM_ERR vxge_mBIT(7)
+#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_XMEM_FSM_ERR vxge_mBIT(11)
+#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_KDFCCTL_FSM_ERR vxge_mBIT(15)
+#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_MRIOVCTL_FSM_ERR vxge_mBIT(19)
+#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_FLSH_ERR vxge_mBIT(23)
+#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_ACK_ERR vxge_mBIT(27)
+#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_CHKSUM_ERR vxge_mBIT(31)
+#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INI_SERR_DET vxge_mBIT(35)
+#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSIX_FSM_ERR vxge_mBIT(39)
+#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSI_OVERFLOW vxge_mBIT(43)
+#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_PPIF_PCI_NOT_FLUSH_DURING_SW_RESET \
+ vxge_mBIT(47)
+#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_PPIF_SW_RESET_FSM_ERR vxge_mBIT(51)
+/*0x070b8*/ u64 mrpcim_general_errors_mask;
+/*0x070c0*/ u64 mrpcim_general_errors_alarm;
+ u8 unused070d0[0x070d0-0x070c8];
+
+/*0x070d0*/ u64 pll_errors_reg;
+#define VXGE_HW_PLL_ERRORS_REG_CORE_CMG_PLL_OOL vxge_mBIT(3)
+#define VXGE_HW_PLL_ERRORS_REG_CORE_FB_PLL_OOL vxge_mBIT(7)
+#define VXGE_HW_PLL_ERRORS_REG_CORE_X_PLL_OOL vxge_mBIT(11)
+/*0x070d8*/ u64 pll_errors_mask;
+/*0x070e0*/ u64 pll_errors_alarm;
+/*0x070e8*/ u64 srpcim_to_mrpcim_alarm_reg;
+#define VXGE_HW_SRPCIM_TO_MRPCIM_ALARM_REG_PPIF_SRPCIM_TO_MRPCIM_ALARM(val) \
+ vxge_vBIT(val, 0, 17)
+/*0x070f0*/ u64 srpcim_to_mrpcim_alarm_mask;
+/*0x070f8*/ u64 srpcim_to_mrpcim_alarm_alarm;
+/*0x07100*/ u64 vpath_to_mrpcim_alarm_reg;
+#define VXGE_HW_VPATH_TO_MRPCIM_ALARM_REG_PPIF_VPATH_TO_MRPCIM_ALARM(val) \
+ vxge_vBIT(val, 0, 17)
+/*0x07108*/ u64 vpath_to_mrpcim_alarm_mask;
+/*0x07110*/ u64 vpath_to_mrpcim_alarm_alarm;
+ u8 unused07128[0x07128-0x07118];
+
+/*0x07128*/ u64 crdt_errors_vplane_reg[17];
+#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_H_CONSUME_CRDT_ERR \
+ vxge_mBIT(3)
+#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_D_CONSUME_CRDT_ERR \
+ vxge_mBIT(7)
+#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_H_RETURN_CRDT_ERR \
+ vxge_mBIT(11)
+#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_D_RETURN_CRDT_ERR \
+ vxge_mBIT(15)
+#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_NP_H_CONSUME_CRDT_ERR \
+ vxge_mBIT(19)
+#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_NP_H_RETURN_CRDT_ERR \
+ vxge_mBIT(23)
+#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_TAG_CONSUME_TAG_ERR \
+ vxge_mBIT(27)
+#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_TAG_RETURN_TAG_ERR \
+ vxge_mBIT(31)
+/*0x07130*/ u64 crdt_errors_vplane_mask[17];
+/*0x07138*/ u64 crdt_errors_vplane_alarm[17];
+ u8 unused072f0[0x072f0-0x072c0];
+
+/*0x072f0*/ u64 mrpcim_rst_in_prog;
+#define VXGE_HW_MRPCIM_RST_IN_PROG_MRPCIM_RST_IN_PROG vxge_mBIT(7)
+/*0x072f8*/ u64 mrpcim_reg_modified;
+#define VXGE_HW_MRPCIM_REG_MODIFIED_MRPCIM_REG_MODIFIED vxge_mBIT(7)
+
+ u8 unused07378[0x07378-0x07300];
+
+/*0x07378*/ u64 write_arb_pending;
+#define VXGE_HW_WRITE_ARB_PENDING_WRARB_WRDMA vxge_mBIT(3)
+#define VXGE_HW_WRITE_ARB_PENDING_WRARB_RTDMA vxge_mBIT(7)
+#define VXGE_HW_WRITE_ARB_PENDING_WRARB_MSG vxge_mBIT(11)
+#define VXGE_HW_WRITE_ARB_PENDING_WRARB_STATSB vxge_mBIT(15)
+#define VXGE_HW_WRITE_ARB_PENDING_WRARB_INTCTL vxge_mBIT(19)
+/*0x07380*/ u64 read_arb_pending;
+#define VXGE_HW_READ_ARB_PENDING_RDARB_WRDMA vxge_mBIT(3)
+#define VXGE_HW_READ_ARB_PENDING_RDARB_RTDMA vxge_mBIT(7)
+#define VXGE_HW_READ_ARB_PENDING_RDARB_DBLGEN vxge_mBIT(11)
+/*0x07388*/ u64 dmaif_dmadbl_pending;
+#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_WRDMA_WR vxge_mBIT(0)
+#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_WRDMA_RD vxge_mBIT(1)
+#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_RTDMA_WR vxge_mBIT(2)
+#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_RTDMA_RD vxge_mBIT(3)
+#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_MSG_WR vxge_mBIT(4)
+#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_STATS_WR vxge_mBIT(5)
+#define VXGE_HW_DMAIF_DMADBL_PENDING_DBLGEN_IN_PROG(val) \
+ vxge_vBIT(val, 13, 51)
+/*0x07390*/ u64 wrcrdtarb_status0_vplane[17];
+#define VXGE_HW_WRCRDTARB_STATUS0_VPLANE_WRCRDTARB_ABS_AVAIL_P_H(val) \
+ vxge_vBIT(val, 0, 8)
+/*0x07418*/ u64 wrcrdtarb_status1_vplane[17];
+#define VXGE_HW_WRCRDTARB_STATUS1_VPLANE_WRCRDTARB_ABS_AVAIL_P_D(val) \
+ vxge_vBIT(val, 4, 12)
+ u8 unused07500[0x07500-0x074a0];
+
+/*0x07500*/ u64 mrpcim_general_cfg1;
+#define VXGE_HW_MRPCIM_GENERAL_CFG1_CLEAR_SERR vxge_mBIT(7)
+/*0x07508*/ u64 mrpcim_general_cfg2;
+#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_WR_TD vxge_mBIT(3)
+#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_RD_TD vxge_mBIT(7)
+#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_CPL_TD vxge_mBIT(11)
+#define VXGE_HW_MRPCIM_GENERAL_CFG2_INI_TIMEOUT_EN_MWR vxge_mBIT(15)
+#define VXGE_HW_MRPCIM_GENERAL_CFG2_INI_TIMEOUT_EN_MRD vxge_mBIT(19)
+#define VXGE_HW_MRPCIM_GENERAL_CFG2_IGNORE_VPATH_RST_FOR_MSIX vxge_mBIT(23)
+#define VXGE_HW_MRPCIM_GENERAL_CFG2_FLASH_READ_MSB vxge_mBIT(27)
+#define VXGE_HW_MRPCIM_GENERAL_CFG2_DIS_HOST_PIPELINE_WR vxge_mBIT(31)
+#define VXGE_HW_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_ENABLE vxge_mBIT(43)
+#define VXGE_HW_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_MAP_TO_VPATH(val) \
+ vxge_vBIT(val, 47, 5)
+#define VXGE_HW_MRPCIM_GENERAL_CFG2_EN_BLOCK_MSIX_DUE_TO_SERR vxge_mBIT(55)
+#define VXGE_HW_MRPCIM_GENERAL_CFG2_FORCE_SENDING_INTA vxge_mBIT(59)
+#define VXGE_HW_MRPCIM_GENERAL_CFG2_DIS_SWIF_PROT_ON_RDS vxge_mBIT(63)
+/*0x07510*/ u64 mrpcim_general_cfg3;
+#define VXGE_HW_MRPCIM_GENERAL_CFG3_PROTECTION_CA_OR_UNSUPN vxge_mBIT(0)
+#define VXGE_HW_MRPCIM_GENERAL_CFG3_ILLEGAL_RD_CA_OR_UNSUPN vxge_mBIT(3)
+#define VXGE_HW_MRPCIM_GENERAL_CFG3_RD_BYTE_SWAPEN vxge_mBIT(7)
+#define VXGE_HW_MRPCIM_GENERAL_CFG3_RD_BIT_FLIPEN vxge_mBIT(11)
+#define VXGE_HW_MRPCIM_GENERAL_CFG3_WR_BYTE_SWAPEN vxge_mBIT(15)
+#define VXGE_HW_MRPCIM_GENERAL_CFG3_WR_BIT_FLIPEN vxge_mBIT(19)
+#define VXGE_HW_MRPCIM_GENERAL_CFG3_MR_MAX_MVFS(val) vxge_vBIT(val, 20, 16)
+#define VXGE_HW_MRPCIM_GENERAL_CFG3_MR_MVF_TBL_SIZE(val) \
+ vxge_vBIT(val, 36, 16)
+#define VXGE_HW_MRPCIM_GENERAL_CFG3_PF0_SW_RESET_EN vxge_mBIT(55)
+#define VXGE_HW_MRPCIM_GENERAL_CFG3_REG_MODIFIED_CFG(val) vxge_vBIT(val, 56, 2)
+#define VXGE_HW_MRPCIM_GENERAL_CFG3_CPL_ECC_ENABLE_N vxge_mBIT(59)
+#define VXGE_HW_MRPCIM_GENERAL_CFG3_BYPASS_DAISY_CHAIN vxge_mBIT(63)
+/*0x07518*/ u64 mrpcim_stats_start_host_addr;
+#define VXGE_HW_MRPCIM_STATS_START_HOST_ADDR_MRPCIM_STATS_START_HOST_ADDR(val)\
+ vxge_vBIT(val, 0, 57)
+
+ u8 unused07950[0x07950-0x07520];
+
+/*0x07950*/ u64 rdcrdtarb_cfg0;
+#define VXGE_HW_RDCRDTARB_CFG0_RDA_MAX_OUTSTANDING_RDS(val) \
+ vxge_vBIT(val, 18, 6)
+#define VXGE_HW_RDCRDTARB_CFG0_PDA_MAX_OUTSTANDING_RDS(val) \
+ vxge_vBIT(val, 26, 6)
+#define VXGE_HW_RDCRDTARB_CFG0_DBLGEN_MAX_OUTSTANDING_RDS(val) \
+ vxge_vBIT(val, 34, 6)
+#define VXGE_HW_RDCRDTARB_CFG0_WAIT_CNT(val) vxge_vBIT(val, 48, 4)
+#define VXGE_HW_RDCRDTARB_CFG0_MAX_OUTSTANDING_RDS(val) vxge_vBIT(val, 54, 6)
+#define VXGE_HW_RDCRDTARB_CFG0_EN_XON vxge_mBIT(63)
+ u8 unused07be8[0x07be8-0x07958];
+
+/*0x07be8*/ u64 bf_sw_reset;
+#define VXGE_HW_BF_SW_RESET_BF_SW_RESET(val) vxge_vBIT(val, 0, 8)
+/*0x07bf0*/ u64 sw_reset_status;
+#define VXGE_HW_SW_RESET_STATUS_RESET_CMPLT vxge_mBIT(7)
+#define VXGE_HW_SW_RESET_STATUS_INIT_CMPLT vxge_mBIT(15)
+ u8 unused07d30[0x07d30-0x07bf8];
+
+/*0x07d30*/ u64 mrpcim_debug_stats0;
+#define VXGE_HW_MRPCIM_DEBUG_STATS0_INI_WR_DROP(val) vxge_vBIT(val, 0, 32)
+#define VXGE_HW_MRPCIM_DEBUG_STATS0_INI_RD_DROP(val) vxge_vBIT(val, 32, 32)
+/*0x07d38*/ u64 mrpcim_debug_stats1_vplane[17];
+#define VXGE_HW_MRPCIM_DEBUG_STATS1_VPLANE_WRCRDTARB_PH_CRDT_DEPLETED(val) \
+ vxge_vBIT(val, 32, 32)
+/*0x07dc0*/ u64 mrpcim_debug_stats2_vplane[17];
+#define VXGE_HW_MRPCIM_DEBUG_STATS2_VPLANE_WRCRDTARB_PD_CRDT_DEPLETED(val) \
+ vxge_vBIT(val, 32, 32)
+/*0x07e48*/ u64 mrpcim_debug_stats3_vplane[17];
+#define VXGE_HW_MRPCIM_DEBUG_STATS3_VPLANE_RDCRDTARB_NPH_CRDT_DEPLETED(val) \
+ vxge_vBIT(val, 32, 32)
+/*0x07ed0*/ u64 mrpcim_debug_stats4;
+#define VXGE_HW_MRPCIM_DEBUG_STATS4_INI_WR_VPIN_DROP(val) vxge_vBIT(val, 0, 32)
+#define VXGE_HW_MRPCIM_DEBUG_STATS4_INI_RD_VPIN_DROP(val) \
+ vxge_vBIT(val, 32, 32)
+/*0x07ed8*/ u64 genstats_count01;
+#define VXGE_HW_GENSTATS_COUNT01_GENSTATS_COUNT1(val) vxge_vBIT(val, 0, 32)
+#define VXGE_HW_GENSTATS_COUNT01_GENSTATS_COUNT0(val) vxge_vBIT(val, 32, 32)
+/*0x07ee0*/ u64 genstats_count23;
+#define VXGE_HW_GENSTATS_COUNT23_GENSTATS_COUNT3(val) vxge_vBIT(val, 0, 32)
+#define VXGE_HW_GENSTATS_COUNT23_GENSTATS_COUNT2(val) vxge_vBIT(val, 32, 32)
+/*0x07ee8*/ u64 genstats_count4;
+#define VXGE_HW_GENSTATS_COUNT4_GENSTATS_COUNT4(val) vxge_vBIT(val, 32, 32)
+/*0x07ef0*/ u64 genstats_count5;
+#define VXGE_HW_GENSTATS_COUNT5_GENSTATS_COUNT5(val) vxge_vBIT(val, 32, 32)
+
+ u8 unused07f08[0x07f08-0x07ef8];
+
+/*0x07f08*/ u64 genstats_cfg[6];
+#define VXGE_HW_GENSTATS_CFG_DTYPE_SEL(val) vxge_vBIT(val, 3, 5)
+#define VXGE_HW_GENSTATS_CFG_CLIENT_NO_SEL(val) vxge_vBIT(val, 9, 3)
+#define VXGE_HW_GENSTATS_CFG_WR_RD_CPL_SEL(val) vxge_vBIT(val, 14, 2)
+#define VXGE_HW_GENSTATS_CFG_VPATH_SEL(val) vxge_vBIT(val, 31, 17)
+/*0x07f38*/ u64 genstat_64bit_cfg;
+#define VXGE_HW_GENSTAT_64BIT_CFG_EN_FOR_GENSTATS0 vxge_mBIT(3)
+#define VXGE_HW_GENSTAT_64BIT_CFG_EN_FOR_GENSTATS2 vxge_mBIT(7)
+ u8 unused08000[0x08000-0x07f40];
+/*0x08000*/ u64 gcmg3_int_status;
+#define VXGE_HW_GCMG3_INT_STATUS_GSTC_ERR0_GSTC0_INT vxge_mBIT(0)
+#define VXGE_HW_GCMG3_INT_STATUS_GSTC_ERR1_GSTC1_INT vxge_mBIT(1)
+#define VXGE_HW_GCMG3_INT_STATUS_GH2L_ERR0_GH2L0_INT vxge_mBIT(2)
+#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR_GH2L1_INT vxge_mBIT(3)
+#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR2_GH2L2_INT vxge_mBIT(4)
+#define VXGE_HW_GCMG3_INT_STATUS_GH2L_SMERR0_GH2L3_INT vxge_mBIT(5)
+#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR3_GH2L4_INT vxge_mBIT(6)
+/*0x08008*/ u64 gcmg3_int_mask;
+ u8 unused09000[0x09000-0x8010];
+
+/*0x09000*/ u64 g3ifcmd_fb_int_status;
+#define VXGE_HW_G3IFCMD_FB_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
+/*0x09008*/ u64 g3ifcmd_fb_int_mask;
+/*0x09010*/ u64 g3ifcmd_fb_err_reg;
+#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
+#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
+#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
+ vxge_vBIT(val, 24, 8)
+#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
+/*0x09018*/ u64 g3ifcmd_fb_err_mask;
+/*0x09020*/ u64 g3ifcmd_fb_err_alarm;
+
+ u8 unused09400[0x09400-0x09028];
+
+/*0x09400*/ u64 g3ifcmd_cmu_int_status;
+#define VXGE_HW_G3IFCMD_CMU_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
+/*0x09408*/ u64 g3ifcmd_cmu_int_mask;
+/*0x09410*/ u64 g3ifcmd_cmu_err_reg;
+#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
+#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
+#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
+ vxge_vBIT(val, 24, 8)
+#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
+/*0x09418*/ u64 g3ifcmd_cmu_err_mask;
+/*0x09420*/ u64 g3ifcmd_cmu_err_alarm;
+
+ u8 unused09800[0x09800-0x09428];
+
+/*0x09800*/ u64 g3ifcmd_cml_int_status;
+#define VXGE_HW_G3IFCMD_CML_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0)
+/*0x09808*/ u64 g3ifcmd_cml_int_mask;
+/*0x09810*/ u64 g3ifcmd_cml_err_reg;
+#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6)
+#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_SM_ERR vxge_mBIT(7)
+#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \
+ vxge_vBIT(val, 24, 8)
+#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55)
+/*0x09818*/ u64 g3ifcmd_cml_err_mask;
+/*0x09820*/ u64 g3ifcmd_cml_err_alarm;
+ u8 unused09b00[0x09b00-0x09828];
+
+/*0x09b00*/ u64 vpath_to_vplane_map[17];
+#define VXGE_HW_VPATH_TO_VPLANE_MAP_VPATH_TO_VPLANE_MAP(val) \
+ vxge_vBIT(val, 3, 5)
+ u8 unused09c30[0x09c30-0x09b88];
+
+/*0x09c30*/ u64 xgxs_cfg_port[2];
+#define VXGE_HW_XGXS_CFG_PORT_SIG_DETECT_FORCE_LOS(val) vxge_vBIT(val, 16, 4)
+#define VXGE_HW_XGXS_CFG_PORT_SIG_DETECT_FORCE_VALID(val) vxge_vBIT(val, 20, 4)
+#define VXGE_HW_XGXS_CFG_PORT_SEL_INFO_0 vxge_mBIT(27)
+#define VXGE_HW_XGXS_CFG_PORT_SEL_INFO_1(val) vxge_vBIT(val, 29, 3)
+#define VXGE_HW_XGXS_CFG_PORT_TX_LANE0_SKEW(val) vxge_vBIT(val, 32, 4)
+#define VXGE_HW_XGXS_CFG_PORT_TX_LANE1_SKEW(val) vxge_vBIT(val, 36, 4)
+#define VXGE_HW_XGXS_CFG_PORT_TX_LANE2_SKEW(val) vxge_vBIT(val, 40, 4)
+#define VXGE_HW_XGXS_CFG_PORT_TX_LANE3_SKEW(val) vxge_vBIT(val, 44, 4)
+/*0x09c40*/ u64 xgxs_rxber_cfg_port[2];
+#define VXGE_HW_XGXS_RXBER_CFG_PORT_INTERVAL_DUR(val) vxge_vBIT(val, 0, 4)
+#define VXGE_HW_XGXS_RXBER_CFG_PORT_RXGXS_INTERVAL_CNT(val) \
+ vxge_vBIT(val, 16, 48)
+/*0x09c50*/ u64 xgxs_rxber_status_port[2];
+#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_A_ERR_CNT(val) \
+ vxge_vBIT(val, 0, 16)
+#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_B_ERR_CNT(val) \
+ vxge_vBIT(val, 16, 16)
+#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_C_ERR_CNT(val) \
+ vxge_vBIT(val, 32, 16)
+#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_D_ERR_CNT(val) \
+ vxge_vBIT(val, 48, 16)
+/*0x09c60*/ u64 xgxs_status_port[2];
+#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_TX_ACTIVITY(val) vxge_vBIT(val, 0, 4)
+#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_RX_ACTIVITY(val) vxge_vBIT(val, 4, 4)
+#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_CTC_FIFO_ERR BIT(11)
+#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_BYTE_SYNC_LOST(val) \
+ vxge_vBIT(val, 12, 4)
+#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_CTC_ERR(val) vxge_vBIT(val, 16, 4)
+#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_ALIGNMENT_ERR vxge_mBIT(23)
+#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_DEC_ERR(val) vxge_vBIT(val, 24, 8)
+#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_SKIP_INS_REQ(val) \
+ vxge_vBIT(val, 32, 4)
+#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_SKIP_DEL_REQ(val) \
+ vxge_vBIT(val, 36, 4)
+/*0x09c70*/ u64 xgxs_pma_reset_port[2];
+#define VXGE_HW_XGXS_PMA_RESET_PORT_SERDES_RESET(val) vxge_vBIT(val, 0, 8)
+ u8 unused09c90[0x09c90-0x09c80];
+
+/*0x09c90*/ u64 xgxs_static_cfg_port[2];
+#define VXGE_HW_XGXS_STATIC_CFG_PORT_FW_CTRL_SERDES vxge_mBIT(3)
+ u8 unused09d40[0x09d40-0x09ca0];
+
+/*0x09d40*/ u64 xgxs_info_port[2];
+#define VXGE_HW_XGXS_INFO_PORT_XMACJ_INFO_0(val) vxge_vBIT(val, 0, 32)
+#define VXGE_HW_XGXS_INFO_PORT_XMACJ_INFO_1(val) vxge_vBIT(val, 32, 32)
+/*0x09d50*/ u64 ratemgmt_cfg_port[2];
+#define VXGE_HW_RATEMGMT_CFG_PORT_MODE(val) vxge_vBIT(val, 2, 2)
+#define VXGE_HW_RATEMGMT_CFG_PORT_RATE vxge_mBIT(7)
+#define VXGE_HW_RATEMGMT_CFG_PORT_FIXED_USE_FSM vxge_mBIT(11)
+#define VXGE_HW_RATEMGMT_CFG_PORT_ANTP_USE_FSM vxge_mBIT(15)
+#define VXGE_HW_RATEMGMT_CFG_PORT_ANBE_USE_FSM vxge_mBIT(19)
+/*0x09d60*/ u64 ratemgmt_status_port[2];
+#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_COMPLETE vxge_mBIT(3)
+#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_RATE vxge_mBIT(7)
+#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_MAC_MATCHES_PHY vxge_mBIT(11)
+ u8 unused09d80[0x09d80-0x09d70];
+
+/*0x09d80*/ u64 ratemgmt_fixed_cfg_port[2];
+#define VXGE_HW_RATEMGMT_FIXED_CFG_PORT_RESTART vxge_mBIT(7)
+/*0x09d90*/ u64 ratemgmt_antp_cfg_port[2];
+#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_RESTART vxge_mBIT(7)
+#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_USE_PREAMBLE_EXT_PHY vxge_mBIT(11)
+#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_USE_ACT_SEL vxge_mBIT(15)
+#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_RETRY_PHY_QUERY(val) \
+ vxge_vBIT(val, 16, 4)
+#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_WAIT_MDIO_RESPONSE(val) \
+ vxge_vBIT(val, 20, 4)
+#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_LDOWN_REAUTO_RESPONSE(val) \
+ vxge_vBIT(val, 24, 4)
+#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_ADVERTISE_10G vxge_mBIT(31)
+#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_ADVERTISE_1G vxge_mBIT(35)
+/*0x09da0*/ u64 ratemgmt_anbe_cfg_port[2];
+#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_RESTART vxge_mBIT(7)
+#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_PARALLEL_DETECT_10G_KX4_ENABLE \
+ vxge_mBIT(11)
+#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_PARALLEL_DETECT_1G_KX_ENABLE \
+ vxge_mBIT(15)
+#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_SYNC_10G_KX4(val) vxge_vBIT(val, 16, 4)
+#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_SYNC_1G_KX(val) vxge_vBIT(val, 20, 4)
+#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_DME_EXCHANGE(val) vxge_vBIT(val, 24, 4)
+#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_ADVERTISE_10G_KX4 vxge_mBIT(31)
+#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_ADVERTISE_1G_KX vxge_mBIT(35)
+/*0x09db0*/ u64 anbe_cfg_port[2];
+#define VXGE_HW_ANBE_CFG_PORT_RESET_CFG_REGS(val) vxge_vBIT(val, 0, 8)
+#define VXGE_HW_ANBE_CFG_PORT_ALIGN_10G_KX4_OVERRIDE(val) vxge_vBIT(val, 10, 2)
+#define VXGE_HW_ANBE_CFG_PORT_SYNC_1G_KX_OVERRIDE(val) vxge_vBIT(val, 14, 2)
+/*0x09dc0*/ u64 anbe_mgr_ctrl_port[2];
+#define VXGE_HW_ANBE_MGR_CTRL_PORT_WE vxge_mBIT(3)
+#define VXGE_HW_ANBE_MGR_CTRL_PORT_STROBE vxge_mBIT(7)
+#define VXGE_HW_ANBE_MGR_CTRL_PORT_ADDR(val) vxge_vBIT(val, 15, 9)
+#define VXGE_HW_ANBE_MGR_CTRL_PORT_DATA(val) vxge_vBIT(val, 32, 32)
+ u8 unused09de0[0x09de0-0x09dd0];
+
+/*0x09de0*/ u64 anbe_fw_mstr_port[2];
+#define VXGE_HW_ANBE_FW_MSTR_PORT_CONNECT_BEAN_TO_SERDES vxge_mBIT(3)
+#define VXGE_HW_ANBE_FW_MSTR_PORT_TX_ZEROES_TO_SERDES vxge_mBIT(7)
+/*0x09df0*/ u64 anbe_hwfsm_gen_status_port[2];
+#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G_KX4_USING_PD \
+ vxge_mBIT(3)
+#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G_KX4_USING_DME \
+ vxge_mBIT(7)
+#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G_KX_USING_PD \
+ vxge_mBIT(11)
+#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G_KX_USING_DME \
+ vxge_mBIT(15)
+#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_ANBEFSM_STATE(val) \
+ vxge_vBIT(val, 18, 6)
+#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_NEXT_PAGE_RECEIVED \
+ vxge_mBIT(27)
+#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_BASE_PAGE_RECEIVED \
+ vxge_mBIT(35)
+#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_AUTONEG_COMPLETE \
+ vxge_mBIT(39)
+#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NP_BEFORE_BP \
+ vxge_mBIT(43)
+#define \
+VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_AN_COMPLETE_BEFORE_BP \
+ vxge_mBIT(47)
+#define \
+VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_AN_COMPLETE_BEFORE_NP \
+vxge_mBIT(51)
+#define \
+VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_MODE_WHEN_AN_COMPLETE \
+ vxge_mBIT(55)
+#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_COUNT_BP(val) \
+ vxge_vBIT(val, 56, 4)
+#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_COUNT_NP(val) \
+ vxge_vBIT(val, 60, 4)
+/*0x09e00*/ u64 anbe_hwfsm_bp_status_port[2];
+#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_FEC_ENABLE \
+ vxge_mBIT(32)
+#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_FEC_ABILITY \
+ vxge_mBIT(33)
+#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_10G_KR_CAPABLE \
+ vxge_mBIT(40)
+#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_10G_KX4_CAPABLE \
+ vxge_mBIT(41)
+#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_1G_KX_CAPABLE \
+ vxge_mBIT(42)
+#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_TX_NONCE(val) \
+ vxge_vBIT(val, 43, 5)
+#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_NP vxge_mBIT(48)
+#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ACK vxge_mBIT(49)
+#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_REMOTE_FAULT \
+ vxge_mBIT(50)
+#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ASM_DIR vxge_mBIT(51)
+#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_PAUSE vxge_mBIT(53)
+#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ECHOED_NONCE(val) \
+ vxge_vBIT(val, 54, 5)
+#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_SELECTOR_FIELD(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x09e10*/ u64 anbe_hwfsm_np_status_port[2];
+#define VXGE_HW_ANBE_HWFSM_NP_STATUS_PORT_RATEMGMT_NP_BITS_47_TO_32(val) \
+ vxge_vBIT(val, 16, 16)
+#define VXGE_HW_ANBE_HWFSM_NP_STATUS_PORT_RATEMGMT_NP_BITS_31_TO_0(val) \
+ vxge_vBIT(val, 32, 32)
+ u8 unused09e30[0x09e30-0x09e20];
+
+/*0x09e30*/ u64 antp_gen_cfg_port[2];
+/*0x09e40*/ u64 antp_hwfsm_gen_status_port[2];
+#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G vxge_mBIT(3)
+#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G vxge_mBIT(7)
+#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_ANTPFSM_STATE(val) \
+ vxge_vBIT(val, 10, 6)
+#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_AUTONEG_COMPLETE \
+ vxge_mBIT(23)
+#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NO_LP_XNP \
+ vxge_mBIT(27)
+#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_GOT_LP_XNP vxge_mBIT(31)
+#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_MESSAGE_CODE \
+ vxge_mBIT(35)
+#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NO_HCD \
+ vxge_mBIT(43)
+#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_FOUND_HCD vxge_mBIT(47)
+#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_INVALID_RATE \
+ vxge_mBIT(51)
+#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_VALID_RATE vxge_mBIT(55)
+#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_PERSISTENT_LDOWN \
+ vxge_mBIT(59)
+/*0x09e50*/ u64 antp_hwfsm_bp_status_port[2];
+#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_NP vxge_mBIT(0)
+#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ACK vxge_mBIT(1)
+#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_RF vxge_mBIT(2)
+#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_XNP vxge_mBIT(3)
+#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ABILITY_FIELD(val) \
+ vxge_vBIT(val, 4, 7)
+#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_SELECTOR_FIELD(val) \
+ vxge_vBIT(val, 11, 5)
+/*0x09e60*/ u64 antp_hwfsm_xnp_status_port[2];
+#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_NP vxge_mBIT(0)
+#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_ACK vxge_mBIT(1)
+#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_MP vxge_mBIT(2)
+#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_ACK2 vxge_mBIT(3)
+#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_TOGGLE vxge_mBIT(4)
+#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_MESSAGE_CODE(val) \
+ vxge_vBIT(val, 5, 11)
+#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_UNF_CODE_FIELD1(val) \
+ vxge_vBIT(val, 16, 16)
+#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_UNF_CODE_FIELD2(val) \
+ vxge_vBIT(val, 32, 16)
+/*0x09e70*/ u64 mdio_mgr_access_port[2];
+#define VXGE_HW_MDIO_MGR_ACCESS_PORT_STROBE_ONE BIT(3)
+#define VXGE_HW_MDIO_MGR_ACCESS_PORT_OP_TYPE(val) vxge_vBIT(val, 5, 3)
+#define VXGE_HW_MDIO_MGR_ACCESS_PORT_DEVAD(val) vxge_vBIT(val, 11, 5)
+#define VXGE_HW_MDIO_MGR_ACCESS_PORT_ADDR(val) vxge_vBIT(val, 16, 16)
+#define VXGE_HW_MDIO_MGR_ACCESS_PORT_DATA(val) vxge_vBIT(val, 32, 16)
+#define VXGE_HW_MDIO_MGR_ACCESS_PORT_ST_PATTERN(val) vxge_vBIT(val, 49, 2)
+#define VXGE_HW_MDIO_MGR_ACCESS_PORT_PREAMBLE vxge_mBIT(51)
+#define VXGE_HW_MDIO_MGR_ACCESS_PORT_PRTAD(val) vxge_vBIT(val, 55, 5)
+#define VXGE_HW_MDIO_MGR_ACCESS_PORT_STROBE_TWO vxge_mBIT(63)
+ u8 unused0a200[0x0a200-0x09e80];
+/*0x0a200*/ u64 xmac_vsport_choices_vh[17];
+#define VXGE_HW_XMAC_VSPORT_CHOICES_VH_VSPORT_VECTOR(val) vxge_vBIT(val, 0, 17)
+ u8 unused0a400[0x0a400-0x0a288];
+
+/*0x0a400*/ u64 rx_thresh_cfg_vp[17];
+#define VXGE_HW_RX_THRESH_CFG_VP_PAUSE_LOW_THR(val) vxge_vBIT(val, 0, 8)
+#define VXGE_HW_RX_THRESH_CFG_VP_PAUSE_HIGH_THR(val) vxge_vBIT(val, 8, 8)
+#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_0(val) vxge_vBIT(val, 16, 8)
+#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_1(val) vxge_vBIT(val, 24, 8)
+#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_2(val) vxge_vBIT(val, 32, 8)
+#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_3(val) vxge_vBIT(val, 40, 8)
+ u8 unused0ac90[0x0ac90-0x0a488];
+} __packed;
+
+/*VXGE_HW_SRPCIM_REGS_H*/
+struct vxge_hw_srpcim_reg {
+
+/*0x00000*/ u64 tim_mr2sr_resource_assignment_vh;
+#define VXGE_HW_TIM_MR2SR_RESOURCE_ASSIGNMENT_VH_BMAP_ROOT(val) \
+ vxge_vBIT(val, 0, 32)
+ u8 unused00100[0x00100-0x00008];
+
+/*0x00100*/ u64 srpcim_pcipif_int_status;
+#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_MRPCIM_MSG_MRPCIM_MSG_INT BIT(3)
+#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_VPATH_MSG_VPATH_MSG_INT BIT(7)
+#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_SRPCIM_SPARE_R1_SRPCIM_SPARE_R1_INT \
+ BIT(11)
+/*0x00108*/ u64 srpcim_pcipif_int_mask;
+/*0x00110*/ u64 mrpcim_msg_reg;
+#define VXGE_HW_MRPCIM_MSG_REG_SWIF_MRPCIM_TO_SRPCIM_RMSG_INT BIT(3)
+/*0x00118*/ u64 mrpcim_msg_mask;
+/*0x00120*/ u64 mrpcim_msg_alarm;
+/*0x00128*/ u64 vpath_msg_reg;
+#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH0_TO_SRPCIM_RMSG_INT BIT(0)
+#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH1_TO_SRPCIM_RMSG_INT BIT(1)
+#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH2_TO_SRPCIM_RMSG_INT BIT(2)
+#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH3_TO_SRPCIM_RMSG_INT BIT(3)
+#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH4_TO_SRPCIM_RMSG_INT BIT(4)
+#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH5_TO_SRPCIM_RMSG_INT BIT(5)
+#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH6_TO_SRPCIM_RMSG_INT BIT(6)
+#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH7_TO_SRPCIM_RMSG_INT BIT(7)
+#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH8_TO_SRPCIM_RMSG_INT BIT(8)
+#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH9_TO_SRPCIM_RMSG_INT BIT(9)
+#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH10_TO_SRPCIM_RMSG_INT BIT(10)
+#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH11_TO_SRPCIM_RMSG_INT BIT(11)
+#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH12_TO_SRPCIM_RMSG_INT BIT(12)
+#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH13_TO_SRPCIM_RMSG_INT BIT(13)
+#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH14_TO_SRPCIM_RMSG_INT BIT(14)
+#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH15_TO_SRPCIM_RMSG_INT BIT(15)
+#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH16_TO_SRPCIM_RMSG_INT BIT(16)
+/*0x00130*/ u64 vpath_msg_mask;
+/*0x00138*/ u64 vpath_msg_alarm;
+ u8 unused00160[0x00160-0x00140];
+
+/*0x00160*/ u64 srpcim_to_mrpcim_wmsg;
+#define VXGE_HW_SRPCIM_TO_MRPCIM_WMSG_SRPCIM_TO_MRPCIM_WMSG(val) \
+ vxge_vBIT(val, 0, 64)
+/*0x00168*/ u64 srpcim_to_mrpcim_wmsg_trig;
+#define VXGE_HW_SRPCIM_TO_MRPCIM_WMSG_TRIG_SRPCIM_TO_MRPCIM_WMSG_TRIG BIT(0)
+/*0x00170*/ u64 mrpcim_to_srpcim_rmsg;
+#define VXGE_HW_MRPCIM_TO_SRPCIM_RMSG_SWIF_MRPCIM_TO_SRPCIM_RMSG(val) \
+ vxge_vBIT(val, 0, 64)
+/*0x00178*/ u64 vpath_to_srpcim_rmsg_sel;
+#define VXGE_HW_VPATH_TO_SRPCIM_RMSG_SEL_VPATH_TO_SRPCIM_RMSG_SEL(val) \
+ vxge_vBIT(val, 0, 5)
+/*0x00180*/ u64 vpath_to_srpcim_rmsg;
+#define VXGE_HW_VPATH_TO_SRPCIM_RMSG_SWIF_VPATH_TO_SRPCIM_RMSG(val) \
+ vxge_vBIT(val, 0, 64)
+ u8 unused00200[0x00200-0x00188];
+
+/*0x00200*/ u64 srpcim_general_int_status;
+#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_PIC_INT BIT(0)
+#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_PCI_INT BIT(3)
+#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_XMAC_INT BIT(7)
+ u8 unused00210[0x00210-0x00208];
+
+/*0x00210*/ u64 srpcim_general_int_mask;
+#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_PIC_INT BIT(0)
+#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_PCI_INT BIT(3)
+#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_XMAC_INT BIT(7)
+ u8 unused00220[0x00220-0x00218];
+
+/*0x00220*/ u64 srpcim_ppif_int_status;
+
+/*0x00228*/ u64 srpcim_ppif_int_mask;
+/*0x00230*/ u64 srpcim_gen_errors_reg;
+#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_STATUS_ERR BIT(3)
+#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_UNCOR_ERR BIT(7)
+#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_COR_ERR BIT(11)
+#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_INTCTRL_SCHED_INT BIT(15)
+#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_INI_SERR_DET BIT(19)
+#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_TGT_PF_ILLEGAL_ACCESS BIT(23)
+/*0x00238*/ u64 srpcim_gen_errors_mask;
+/*0x00240*/ u64 srpcim_gen_errors_alarm;
+/*0x00248*/ u64 mrpcim_to_srpcim_alarm_reg;
+#define VXGE_HW_MRPCIM_TO_SRPCIM_ALARM_REG_PPIF_MRPCIM_TO_SRPCIM_ALARM BIT(3)
+/*0x00250*/ u64 mrpcim_to_srpcim_alarm_mask;
+/*0x00258*/ u64 mrpcim_to_srpcim_alarm_alarm;
+/*0x00260*/ u64 vpath_to_srpcim_alarm_reg;
+
+/*0x00268*/ u64 vpath_to_srpcim_alarm_mask;
+/*0x00270*/ u64 vpath_to_srpcim_alarm_alarm;
+ u8 unused00280[0x00280-0x00278];
+
+/*0x00280*/ u64 pf_sw_reset;
+#define VXGE_HW_PF_SW_RESET_PF_SW_RESET(val) vxge_vBIT(val, 0, 8)
+/*0x00288*/ u64 srpcim_general_cfg1;
+#define VXGE_HW_SRPCIM_GENERAL_CFG1_BOOT_BYTE_SWAPEN BIT(19)
+#define VXGE_HW_SRPCIM_GENERAL_CFG1_BOOT_BIT_FLIPEN BIT(23)
+#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_ADDR_SWAPEN BIT(27)
+#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_ADDR_FLIPEN BIT(31)
+#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_DATA_SWAPEN BIT(35)
+#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_DATA_FLIPEN BIT(39)
+/*0x00290*/ u64 srpcim_interrupt_cfg1;
+#define VXGE_HW_SRPCIM_INTERRUPT_CFG1_ALARM_MAP_TO_MSG(val) vxge_vBIT(val, 1, 7)
+#define VXGE_HW_SRPCIM_INTERRUPT_CFG1_TRAFFIC_CLASS(val) vxge_vBIT(val, 9, 3)
+ u8 unused002a8[0x002a8-0x00298];
+
+/*0x002a8*/ u64 srpcim_clear_msix_mask;
+#define VXGE_HW_SRPCIM_CLEAR_MSIX_MASK_SRPCIM_CLEAR_MSIX_MASK BIT(0)
+/*0x002b0*/ u64 srpcim_set_msix_mask;
+#define VXGE_HW_SRPCIM_SET_MSIX_MASK_SRPCIM_SET_MSIX_MASK BIT(0)
+/*0x002b8*/ u64 srpcim_clr_msix_one_shot;
+#define VXGE_HW_SRPCIM_CLR_MSIX_ONE_SHOT_SRPCIM_CLR_MSIX_ONE_SHOT BIT(0)
+/*0x002c0*/ u64 srpcim_rst_in_prog;
+#define VXGE_HW_SRPCIM_RST_IN_PROG_SRPCIM_RST_IN_PROG BIT(7)
+/*0x002c8*/ u64 srpcim_reg_modified;
+#define VXGE_HW_SRPCIM_REG_MODIFIED_SRPCIM_REG_MODIFIED BIT(7)
+/*0x002d0*/ u64 tgt_pf_illegal_access;
+#define VXGE_HW_TGT_PF_ILLEGAL_ACCESS_SWIF_REGION(val) vxge_vBIT(val, 1, 7)
+/*0x002d8*/ u64 srpcim_msix_status;
+#define VXGE_HW_SRPCIM_MSIX_STATUS_INTCTL_SRPCIM_MSIX_MASK BIT(3)
+#define VXGE_HW_SRPCIM_MSIX_STATUS_INTCTL_SRPCIM_MSIX_PENDING_VECTOR BIT(7)
+ u8 unused00880[0x00880-0x002e0];
+
+/*0x00880*/ u64 xgmac_sr_int_status;
+#define VXGE_HW_XGMAC_SR_INT_STATUS_ASIC_NTWK_SR_ERR_ASIC_NTWK_SR_INT BIT(3)
+/*0x00888*/ u64 xgmac_sr_int_mask;
+/*0x00890*/ u64 asic_ntwk_sr_err_reg;
+#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_FAULT BIT(3)
+#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_OK BIT(7)
+#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_FAULT_OCCURRED \
+ BIT(11)
+#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_OK_OCCURRED BIT(15)
+/*0x00898*/ u64 asic_ntwk_sr_err_mask;
+/*0x008a0*/ u64 asic_ntwk_sr_err_alarm;
+ u8 unused008c0[0x008c0-0x008a8];
+
+/*0x008c0*/ u64 xmac_vsport_choices_sr_clone;
+#define VXGE_HW_XMAC_VSPORT_CHOICES_SR_CLONE_VSPORT_VECTOR(val) \
+ vxge_vBIT(val, 0, 17)
+ u8 unused00900[0x00900-0x008c8];
+
+/*0x00900*/ u64 mr_rqa_top_prty_for_vh;
+#define VXGE_HW_MR_RQA_TOP_PRTY_FOR_VH_RQA_TOP_PRTY_FOR_VH(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00908*/ u64 umq_vh_data_list_empty;
+#define VXGE_HW_UMQ_VH_DATA_LIST_EMPTY_ROCRC_UMQ_VH_DATA_LIST_EMPTY \
+ BIT(0)
+/*0x00910*/ u64 wde_cfg;
+#define VXGE_HW_WDE_CFG_NS0_FORCE_MWB_START BIT(0)
+#define VXGE_HW_WDE_CFG_NS0_FORCE_MWB_END BIT(1)
+#define VXGE_HW_WDE_CFG_NS0_FORCE_QB_START BIT(2)
+#define VXGE_HW_WDE_CFG_NS0_FORCE_QB_END BIT(3)
+#define VXGE_HW_WDE_CFG_NS0_FORCE_MPSB_START BIT(4)
+#define VXGE_HW_WDE_CFG_NS0_FORCE_MPSB_END BIT(5)
+#define VXGE_HW_WDE_CFG_NS0_MWB_OPT_EN BIT(6)
+#define VXGE_HW_WDE_CFG_NS0_QB_OPT_EN BIT(7)
+#define VXGE_HW_WDE_CFG_NS0_MPSB_OPT_EN BIT(8)
+#define VXGE_HW_WDE_CFG_NS1_FORCE_MWB_START BIT(9)
+#define VXGE_HW_WDE_CFG_NS1_FORCE_MWB_END BIT(10)
+#define VXGE_HW_WDE_CFG_NS1_FORCE_QB_START BIT(11)
+#define VXGE_HW_WDE_CFG_NS1_FORCE_QB_END BIT(12)
+#define VXGE_HW_WDE_CFG_NS1_FORCE_MPSB_START BIT(13)
+#define VXGE_HW_WDE_CFG_NS1_FORCE_MPSB_END BIT(14)
+#define VXGE_HW_WDE_CFG_NS1_MWB_OPT_EN BIT(15)
+#define VXGE_HW_WDE_CFG_NS1_QB_OPT_EN BIT(16)
+#define VXGE_HW_WDE_CFG_NS1_MPSB_OPT_EN BIT(17)
+#define VXGE_HW_WDE_CFG_DISABLE_QPAD_FOR_UNALIGNED_ADDR BIT(19)
+#define VXGE_HW_WDE_CFG_ALIGNMENT_PREFERENCE(val) vxge_vBIT(val, 30, 2)
+#define VXGE_HW_WDE_CFG_MEM_WORD_SIZE(val) vxge_vBIT(val, 46, 2)
+
+} __packed;
+
+/*VXGE_HW_VPMGMT_REGS_H*/
+struct vxge_hw_vpmgmt_reg {
+
+ u8 unused00040[0x00040-0x00000];
+
+/*0x00040*/ u64 vpath_to_func_map_cfg1;
+#define VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_VPATH_TO_FUNC_MAP_CFG1(val) \
+ vxge_vBIT(val, 3, 5)
+/*0x00048*/ u64 vpath_is_first;
+#define VXGE_HW_VPATH_IS_FIRST_VPATH_IS_FIRST vxge_mBIT(3)
+/*0x00050*/ u64 srpcim_to_vpath_wmsg;
+#define VXGE_HW_SRPCIM_TO_VPATH_WMSG_SRPCIM_TO_VPATH_WMSG(val) \
+ vxge_vBIT(val, 0, 64)
+/*0x00058*/ u64 srpcim_to_vpath_wmsg_trig;
+#define VXGE_HW_SRPCIM_TO_VPATH_WMSG_TRIG_SRPCIM_TO_VPATH_WMSG_TRIG \
+ vxge_mBIT(0)
+ u8 unused00100[0x00100-0x00060];
+
+/*0x00100*/ u64 tim_vpath_assignment;
+#define VXGE_HW_TIM_VPATH_ASSIGNMENT_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
+ u8 unused00140[0x00140-0x00108];
+
+/*0x00140*/ u64 rqa_top_prty_for_vp;
+#define VXGE_HW_RQA_TOP_PRTY_FOR_VP_RQA_TOP_PRTY_FOR_VP(val) \
+ vxge_vBIT(val, 59, 5)
+ u8 unused001c0[0x001c0-0x00148];
+
+/*0x001c0*/ u64 rxmac_rx_pa_cfg0_vpmgmt_clone;
+#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_IGNORE_FRAME_ERR vxge_mBIT(3)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SUPPORT_SNAP_AB_N vxge_mBIT(7)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SEARCH_FOR_HAO vxge_mBIT(18)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SUPPORT_MOBILE_IPV6_HDRS \
+ vxge_mBIT(19)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_IPV6_STOP_SEARCHING \
+ vxge_mBIT(23)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_NO_PS_IF_UNKNOWN vxge_mBIT(27)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SEARCH_FOR_ETYPE vxge_mBIT(35)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_L3_CSUM_ERR \
+ vxge_mBIT(39)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_L3_CSUM_ERR \
+ vxge_mBIT(43)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_L4_CSUM_ERR \
+ vxge_mBIT(47)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_L4_CSUM_ERR \
+ vxge_mBIT(51)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_RPA_ERR \
+ vxge_mBIT(55)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_RPA_ERR \
+ vxge_mBIT(59)
+#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_JUMBO_SNAP_EN vxge_mBIT(63)
+/*0x001c8*/ u64 rts_mgr_cfg0_vpmgmt_clone;
+#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_RTS_DP_SP_PRIORITY vxge_mBIT(3)
+#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_FLEX_L4PRTCL_VALUE(val) \
+ vxge_vBIT(val, 24, 8)
+#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_ICMP_TRASH vxge_mBIT(35)
+#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_TCPSYN_TRASH vxge_mBIT(39)
+#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_ZL4PYLD_TRASH vxge_mBIT(43)
+#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_TCP_TRASH vxge_mBIT(47)
+#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_UDP_TRASH vxge_mBIT(51)
+#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_FLEX_TRASH vxge_mBIT(55)
+#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_IPFRAG_TRASH vxge_mBIT(59)
+/*0x001d0*/ u64 rts_mgr_criteria_priority_vpmgmt_clone;
+#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ETYPE(val) \
+ vxge_vBIT(val, 5, 3)
+#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ICMP_TCPSYN(val) \
+ vxge_vBIT(val, 9, 3)
+#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_L4PN(val) \
+ vxge_vBIT(val, 13, 3)
+#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_RANGE_L4PN(val) \
+ vxge_vBIT(val, 17, 3)
+#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_RTH_IT(val) \
+ vxge_vBIT(val, 21, 3)
+#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_DS(val) \
+ vxge_vBIT(val, 25, 3)
+#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_QOS(val) \
+ vxge_vBIT(val, 29, 3)
+#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ZL4PYLD(val) \
+ vxge_vBIT(val, 33, 3)
+#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_L4PRTCL(val) \
+ vxge_vBIT(val, 37, 3)
+/*0x001d8*/ u64 rxmac_cfg0_port_vpmgmt_clone[3];
+#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_RMAC_EN vxge_mBIT(3)
+#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS vxge_mBIT(7)
+#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_DISCARD_PFRM vxge_mBIT(11)
+#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_FCS_ERR vxge_mBIT(15)
+#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_LONG_ERR vxge_mBIT(19)
+#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_USIZED_ERR vxge_mBIT(23)
+#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_LEN_MISMATCH \
+ vxge_mBIT(27)
+#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_MAX_PYLD_LEN(val) \
+ vxge_vBIT(val, 50, 14)
+/*0x001f0*/ u64 rxmac_pause_cfg_port_vpmgmt_clone[3];
+#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_GEN_EN vxge_mBIT(3)
+#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_RCV_EN vxge_mBIT(7)
+#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_ACCEL_SEND(val) \
+ vxge_vBIT(val, 9, 3)
+#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_DUAL_THR vxge_mBIT(15)
+#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_HIGH_PTIME(val) \
+ vxge_vBIT(val, 20, 16)
+#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_IGNORE_PF_FCS_ERR \
+ vxge_mBIT(39)
+#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_IGNORE_PF_LEN_ERR \
+ vxge_mBIT(43)
+#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_LIMITER_EN vxge_mBIT(47)
+#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_MAX_LIMIT(val) \
+ vxge_vBIT(val, 48, 8)
+#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_PERMIT_RATEMGMT_CTRL \
+ vxge_mBIT(59)
+ u8 unused00240[0x00240-0x00208];
+
+/*0x00240*/ u64 xmac_vsport_choices_vp;
+#define VXGE_HW_XMAC_VSPORT_CHOICES_VP_VSPORT_VECTOR(val) vxge_vBIT(val, 0, 17)
+ u8 unused00260[0x00260-0x00248];
+
+/*0x00260*/ u64 xgmac_gen_status_vpmgmt_clone;
+#define VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK vxge_mBIT(3)
+#define VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_DATA_RATE \
+ vxge_mBIT(11)
+/*0x00268*/ u64 xgmac_status_port_vpmgmt_clone[2];
+#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_RMAC_REMOTE_FAULT \
+ vxge_mBIT(3)
+#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_RMAC_LOCAL_FAULT vxge_mBIT(7)
+#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_XMACJ_MAC_PHY_LAYER_AVAIL \
+ vxge_mBIT(11)
+#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_XMACJ_PORT_OK vxge_mBIT(15)
+/*0x00278*/ u64 xmac_gen_cfg_vpmgmt_clone;
+#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_RATEMGMT_MAC_RATE_SEL(val) \
+ vxge_vBIT(val, 2, 2)
+#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_TX_HEAD_DROP_WHEN_FAULT \
+ vxge_mBIT(7)
+#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_FAULT_BEHAVIOUR vxge_mBIT(27)
+#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_PERIOD_NTWK_UP(val) \
+ vxge_vBIT(val, 28, 4)
+#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_PERIOD_NTWK_DOWN(val) \
+ vxge_vBIT(val, 32, 4)
+/*0x00280*/ u64 xmac_timestamp_vpmgmt_clone;
+#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_EN vxge_mBIT(3)
+#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_USE_LINK_ID(val) \
+ vxge_vBIT(val, 6, 2)
+#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_INTERVAL(val) vxge_vBIT(val, 12, 4)
+#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_TIMER_RESTART vxge_mBIT(19)
+#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_XMACJ_ROLLOVER_CNT(val) \
+ vxge_vBIT(val, 32, 16)
+/*0x00288*/ u64 xmac_stats_gen_cfg_vpmgmt_clone;
+#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_PRTAGGR_CUM_TIMER(val) \
+ vxge_vBIT(val, 4, 4)
+#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_VPATH_CUM_TIMER(val) \
+ vxge_vBIT(val, 8, 4)
+#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_VLAN_HANDLING vxge_mBIT(15)
+/*0x00290*/ u64 xmac_cfg_port_vpmgmt_clone[3];
+#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_LOOPBACK vxge_mBIT(3)
+#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_REVERSE_LOOPBACK \
+ vxge_mBIT(7)
+#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_TX_BEHAV vxge_mBIT(11)
+#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_RX_BEHAV vxge_mBIT(15)
+ u8 unused002c0[0x002c0-0x002a8];
+
+/*0x002c0*/ u64 txmac_gen_cfg0_vpmgmt_clone;
+#define VXGE_HW_TXMAC_GEN_CFG0_VPMGMT_CLONE_CHOSEN_TX_PORT vxge_mBIT(7)
+/*0x002c8*/ u64 txmac_cfg0_port_vpmgmt_clone[3];
+#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_TMAC_EN vxge_mBIT(3)
+#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_APPEND_PAD vxge_mBIT(7)
+#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_PAD_BYTE(val) vxge_vBIT(val, 8, 8)
+ u8 unused00300[0x00300-0x002e0];
+
+/*0x00300*/ u64 wol_mp_crc;
+#define VXGE_HW_WOL_MP_CRC_CRC(val) vxge_vBIT(val, 0, 32)
+#define VXGE_HW_WOL_MP_CRC_RC_EN vxge_mBIT(63)
+/*0x00308*/ u64 wol_mp_mask_a;
+#define VXGE_HW_WOL_MP_MASK_A_MASK(val) vxge_vBIT(val, 0, 64)
+/*0x00310*/ u64 wol_mp_mask_b;
+#define VXGE_HW_WOL_MP_MASK_B_MASK(val) vxge_vBIT(val, 0, 64)
+ u8 unused00360[0x00360-0x00318];
+
+/*0x00360*/ u64 fau_pa_cfg_vpmgmt_clone;
+#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L4_COMP_CSUM vxge_mBIT(3)
+#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L3_INCL_CF vxge_mBIT(7)
+#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L3_COMP_CSUM vxge_mBIT(11)
+/*0x00368*/ u64 rx_datapath_util_vp_clone;
+#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_FAU_RX_UTILIZATION(val) \
+ vxge_vBIT(val, 7, 9)
+#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_RX_UTIL_CFG(val) \
+ vxge_vBIT(val, 16, 4)
+#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_FAU_RX_FRAC_UTIL(val) \
+ vxge_vBIT(val, 20, 4)
+#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_RX_PKT_WEIGHT(val) \
+ vxge_vBIT(val, 24, 4)
+ u8 unused00380[0x00380-0x00370];
+
+/*0x00380*/ u64 tx_datapath_util_vp_clone;
+#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TPA_TX_UTILIZATION(val) \
+ vxge_vBIT(val, 7, 9)
+#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TX_UTIL_CFG(val) \
+ vxge_vBIT(val, 16, 4)
+#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TPA_TX_FRAC_UTIL(val) \
+ vxge_vBIT(val, 20, 4)
+#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TX_PKT_WEIGHT(val) \
+ vxge_vBIT(val, 24, 4)
+
+} __packed;
+
+struct vxge_hw_vpath_reg {
+
+ u8 unused00300[0x00300];
+
+/*0x00300*/ u64 usdc_vpath;
+#define VXGE_HW_USDC_VPATH_SGRP_ASSIGN(val) vxge_vBIT(val, 0, 32)
+ u8 unused00a00[0x00a00-0x00308];
+
+/*0x00a00*/ u64 wrdma_alarm_status;
+#define VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT vxge_mBIT(1)
+/*0x00a08*/ u64 wrdma_alarm_mask;
+ u8 unused00a30[0x00a30-0x00a10];
+
+/*0x00a30*/ u64 prc_alarm_reg;
+#define VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP vxge_mBIT(0)
+#define VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR vxge_mBIT(1)
+#define VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT vxge_mBIT(2)
+#define VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR vxge_mBIT(3)
+/*0x00a38*/ u64 prc_alarm_mask;
+/*0x00a40*/ u64 prc_alarm_alarm;
+/*0x00a48*/ u64 prc_cfg1;
+#define VXGE_HW_PRC_CFG1_RX_TIMER_VAL(val) vxge_vBIT(val, 3, 29)
+#define VXGE_HW_PRC_CFG1_TIM_RING_BUMP_INT_ENABLE vxge_mBIT(34)
+#define VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE vxge_mBIT(35)
+#define VXGE_HW_PRC_CFG1_GREEDY_RETURN vxge_mBIT(36)
+#define VXGE_HW_PRC_CFG1_QUICK_SHOT vxge_mBIT(37)
+#define VXGE_HW_PRC_CFG1_RX_TIMER_CI vxge_mBIT(39)
+#define VXGE_HW_PRC_CFG1_RESET_TIMER_ON_RXD_RET(val) vxge_vBIT(val, 40, 2)
+ u8 unused00a60[0x00a60-0x00a50];
+
+/*0x00a60*/ u64 prc_cfg4;
+#define VXGE_HW_PRC_CFG4_IN_SVC vxge_mBIT(7)
+#define VXGE_HW_PRC_CFG4_RING_MODE(val) vxge_vBIT(val, 14, 2)
+#define VXGE_HW_PRC_CFG4_RXD_NO_SNOOP vxge_mBIT(22)
+#define VXGE_HW_PRC_CFG4_FRM_NO_SNOOP vxge_mBIT(23)
+#define VXGE_HW_PRC_CFG4_RTH_DISABLE vxge_mBIT(31)
+#define VXGE_HW_PRC_CFG4_IGNORE_OWNERSHIP vxge_mBIT(32)
+#define VXGE_HW_PRC_CFG4_SIGNAL_BENIGN_OVFLW vxge_mBIT(36)
+#define VXGE_HW_PRC_CFG4_BIMODAL_INTERRUPT vxge_mBIT(37)
+#define VXGE_HW_PRC_CFG4_BACKOFF_INTERVAL(val) vxge_vBIT(val, 40, 24)
+/*0x00a68*/ u64 prc_cfg5;
+#define VXGE_HW_PRC_CFG5_RXD0_ADD(val) vxge_vBIT(val, 0, 61)
+/*0x00a70*/ u64 prc_cfg6;
+#define VXGE_HW_PRC_CFG6_FRM_PAD_EN vxge_mBIT(0)
+#define VXGE_HW_PRC_CFG6_QSIZE_ALIGNED_RXD vxge_mBIT(2)
+#define VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN vxge_mBIT(5)
+#define VXGE_HW_PRC_CFG6_L3_CPC_TRSFR_CODE_EN vxge_mBIT(8)
+#define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9)
+#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9)
+#define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9)
+#define VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val) vxge_bVALn(val, 36, 9)
+/*0x00a78*/ u64 prc_cfg7;
+#define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2)
+#define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11)
+#define VXGE_HW_PRC_CFG7_RXD_NS_CHG_EN vxge_mBIT(12)
+#define VXGE_HW_PRC_CFG7_NO_HDR_SEPARATION vxge_mBIT(14)
+#define VXGE_HW_PRC_CFG7_RXD_BUFF_SIZE_MASK(val) vxge_vBIT(val, 20, 4)
+#define VXGE_HW_PRC_CFG7_BUFF_SIZE0_MASK(val) vxge_vBIT(val, 27, 5)
+/*0x00a80*/ u64 tim_dest_addr;
+#define VXGE_HW_TIM_DEST_ADDR_TIM_DEST_ADDR(val) vxge_vBIT(val, 0, 64)
+/*0x00a88*/ u64 prc_rxd_doorbell;
+#define VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val) vxge_vBIT(val, 48, 16)
+/*0x00a90*/ u64 rqa_prty_for_vp;
+#define VXGE_HW_RQA_PRTY_FOR_VP_RQA_PRTY_FOR_VP(val) vxge_vBIT(val, 59, 5)
+/*0x00a98*/ u64 rxdmem_size;
+#define VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(val) vxge_vBIT(val, 51, 13)
+/*0x00aa0*/ u64 frm_in_progress_cnt;
+#define VXGE_HW_FRM_IN_PROGRESS_CNT_PRC_FRM_IN_PROGRESS_CNT(val) \
+ vxge_vBIT(val, 59, 5)
+/*0x00aa8*/ u64 rx_multi_cast_stats;
+#define VXGE_HW_RX_MULTI_CAST_STATS_FRAME_DISCARD(val) vxge_vBIT(val, 48, 16)
+/*0x00ab0*/ u64 rx_frm_transferred;
+#define VXGE_HW_RX_FRM_TRANSFERRED_RX_FRM_TRANSFERRED(val) \
+ vxge_vBIT(val, 32, 32)
+/*0x00ab8*/ u64 rxd_returned;
+#define VXGE_HW_RXD_RETURNED_RXD_RETURNED(val) vxge_vBIT(val, 48, 16)
+ u8 unused00c00[0x00c00-0x00ac0];
+
+/*0x00c00*/ u64 kdfc_fifo_trpl_partition;
+#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(val) vxge_vBIT(val, 17, 15)
+#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_1(val) vxge_vBIT(val, 33, 15)
+#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_2(val) vxge_vBIT(val, 49, 15)
+/*0x00c08*/ u64 kdfc_fifo_trpl_ctrl;
+#define VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE vxge_mBIT(7)
+/*0x00c10*/ u64 kdfc_trpl_fifo_0_ctrl;
+#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(val) vxge_vBIT(val, 14, 2)
+#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_FLIP_EN vxge_mBIT(22)
+#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN vxge_mBIT(23)
+#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
+#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_CTRL_STRUC vxge_mBIT(28)
+#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_ADD_PAD vxge_mBIT(29)
+#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_NO_SNOOP vxge_mBIT(30)
+#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_RLX_ORD vxge_mBIT(31)
+#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
+#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
+#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
+/*0x00c18*/ u64 kdfc_trpl_fifo_1_ctrl;
+#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE(val) vxge_vBIT(val, 14, 2)
+#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_FLIP_EN vxge_mBIT(22)
+#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_SWAP_EN vxge_mBIT(23)
+#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
+#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_CTRL_STRUC vxge_mBIT(28)
+#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_ADD_PAD vxge_mBIT(29)
+#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_NO_SNOOP vxge_mBIT(30)
+#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_RLX_ORD vxge_mBIT(31)
+#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
+#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
+#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
+/*0x00c20*/ u64 kdfc_trpl_fifo_2_ctrl;
+#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_FLIP_EN vxge_mBIT(22)
+#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_SWAP_EN vxge_mBIT(23)
+#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2)
+#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_CTRL_STRUC vxge_mBIT(28)
+#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_ADD_PAD vxge_mBIT(29)
+#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_NO_SNOOP vxge_mBIT(30)
+#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_RLX_ORD vxge_mBIT(31)
+#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_SELECT(val) vxge_vBIT(val, 32, 8)
+#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7)
+#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16)
+/*0x00c28*/ u64 kdfc_trpl_fifo_0_wb_address;
+#define VXGE_HW_KDFC_TRPL_FIFO_0_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
+/*0x00c30*/ u64 kdfc_trpl_fifo_1_wb_address;
+#define VXGE_HW_KDFC_TRPL_FIFO_1_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
+/*0x00c38*/ u64 kdfc_trpl_fifo_2_wb_address;
+#define VXGE_HW_KDFC_TRPL_FIFO_2_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64)
+/*0x00c40*/ u64 kdfc_trpl_fifo_offset;
+#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR0(val) vxge_vBIT(val, 1, 15)
+#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR1(val) vxge_vBIT(val, 17, 15)
+#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR2(val) vxge_vBIT(val, 33, 15)
+/*0x00c48*/ u64 kdfc_drbl_triplet_total;
+#define VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_KDFC_MAX_SIZE(val) \
+ vxge_vBIT(val, 17, 15)
+ u8 unused00c60[0x00c60-0x00c50];
+
+/*0x00c60*/ u64 usdc_drbl_ctrl;
+#define VXGE_HW_USDC_DRBL_CTRL_FLIP_EN vxge_mBIT(22)
+#define VXGE_HW_USDC_DRBL_CTRL_SWAP_EN vxge_mBIT(23)
+/*0x00c68*/ u64 usdc_vp_ready;
+#define VXGE_HW_USDC_VP_READY_USDC_HTN_READY vxge_mBIT(7)
+#define VXGE_HW_USDC_VP_READY_USDC_SRQ_READY vxge_mBIT(15)
+#define VXGE_HW_USDC_VP_READY_USDC_CQRQ_READY vxge_mBIT(23)
+/*0x00c70*/ u64 kdfc_status;
+#define VXGE_HW_KDFC_STATUS_KDFC_WRR_0_READY vxge_mBIT(0)
+#define VXGE_HW_KDFC_STATUS_KDFC_WRR_1_READY vxge_mBIT(1)
+#define VXGE_HW_KDFC_STATUS_KDFC_WRR_2_READY vxge_mBIT(2)
+ u8 unused00c80[0x00c80-0x00c78];
+
+/*0x00c80*/ u64 xmac_rpa_vcfg;
+#define VXGE_HW_XMAC_RPA_VCFG_IPV4_TCP_INCL_PH vxge_mBIT(3)
+#define VXGE_HW_XMAC_RPA_VCFG_IPV6_TCP_INCL_PH vxge_mBIT(7)
+#define VXGE_HW_XMAC_RPA_VCFG_IPV4_UDP_INCL_PH vxge_mBIT(11)
+#define VXGE_HW_XMAC_RPA_VCFG_IPV6_UDP_INCL_PH vxge_mBIT(15)
+#define VXGE_HW_XMAC_RPA_VCFG_L4_INCL_CF vxge_mBIT(19)
+#define VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG vxge_mBIT(23)
+/*0x00c88*/ u64 rxmac_vcfg0;
+#define VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(val) vxge_vBIT(val, 2, 14)
+#define VXGE_HW_RXMAC_VCFG0_RTS_USE_MIN_LEN vxge_mBIT(19)
+#define VXGE_HW_RXMAC_VCFG0_RTS_MIN_FRM_LEN(val) vxge_vBIT(val, 26, 14)
+#define VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN vxge_mBIT(43)
+#define VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN vxge_mBIT(47)
+#define VXGE_HW_RXMAC_VCFG0_BCAST_EN vxge_mBIT(51)
+#define VXGE_HW_RXMAC_VCFG0_ALL_VID_EN vxge_mBIT(55)
+/*0x00c90*/ u64 rxmac_vcfg1;
+#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(val) vxge_vBIT(val, 42, 2)
+#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE vxge_mBIT(47)
+#define VXGE_HW_RXMAC_VCFG1_CONTRIB_L2_FLOW vxge_mBIT(51)
+/*0x00c98*/ u64 rts_access_steer_ctrl;
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(val) vxge_vBIT(val, 1, 7)
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(val) vxge_vBIT(val, 8, 4)
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE vxge_mBIT(15)
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_BEHAV_TBL_SEL vxge_mBIT(23)
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL vxge_mBIT(27)
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS vxge_mBIT(0)
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(val) vxge_vBIT(val, 40, 8)
+/*0x00ca0*/ u64 rts_access_steer_data0;
+#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DATA(val) vxge_vBIT(val, 0, 64)
+/*0x00ca8*/ u64 rts_access_steer_data1;
+#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DATA(val) vxge_vBIT(val, 0, 64)
+ u8 unused00d00[0x00d00-0x00cb0];
+
+/*0x00d00*/ u64 xmac_vsport_choice;
+#define VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(val) vxge_vBIT(val, 3, 5)
+/*0x00d08*/ u64 xmac_stats_cfg;
+/*0x00d10*/ u64 xmac_stats_access_cmd;
+#define VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(val) vxge_vBIT(val, 6, 2)
+#define VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE vxge_mBIT(15)
+#define VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(val) vxge_vBIT(val, 32, 8)
+/*0x00d18*/ u64 xmac_stats_access_data;
+#define VXGE_HW_XMAC_STATS_ACCESS_DATA_XSMGR_DATA(val) vxge_vBIT(val, 0, 64)
+/*0x00d20*/ u64 asic_ntwk_vp_ctrl;
+#define VXGE_HW_ASIC_NTWK_VP_CTRL_REQ_TEST_NTWK vxge_mBIT(3)
+#define VXGE_HW_ASIC_NTWK_VP_CTRL_XMACJ_SHOW_PORT_INFO vxge_mBIT(55)
+#define VXGE_HW_ASIC_NTWK_VP_CTRL_XMACJ_PORT_NUM vxge_mBIT(63)
+ u8 unused00d30[0x00d30-0x00d28];
+
+/*0x00d30*/ u64 xgmac_vp_int_status;
+#define VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT \
+ vxge_mBIT(3)
+/*0x00d38*/ u64 xgmac_vp_int_mask;
+/*0x00d40*/ u64 asic_ntwk_vp_err_reg;
+#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT vxge_mBIT(3)
+#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK vxge_mBIT(7)
+#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR \
+ vxge_mBIT(11)
+#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR \
+ vxge_mBIT(15)
+#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT \
+ vxge_mBIT(19)
+#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK vxge_mBIT(23)
+/*0x00d48*/ u64 asic_ntwk_vp_err_mask;
+/*0x00d50*/ u64 asic_ntwk_vp_err_alarm;
+ u8 unused00d80[0x00d80-0x00d58];
+
+/*0x00d80*/ u64 rtdma_bw_ctrl;
+#define VXGE_HW_RTDMA_BW_CTRL_BW_CTRL_EN vxge_mBIT(39)
+#define VXGE_HW_RTDMA_BW_CTRL_DESIRED_BW(val) vxge_vBIT(val, 46, 18)
+/*0x00d88*/ u64 rtdma_rd_optimization_ctrl;
+#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_GEN_INT_AFTER_ABORT vxge_mBIT(3)
+#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_PAD_MODE(val) vxge_vBIT(val, 6, 2)
+#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_PAD_PATTERN(val) vxge_vBIT(val, 8, 8)
+#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE vxge_mBIT(19)
+#define VXGE_HW_PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
+#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val) \
+ vxge_vBIT(val, 21, 3)
+#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_PYLD_WMARK_EN vxge_mBIT(28)
+#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_PYLD_WMARK(val) \
+ vxge_vBIT(val, 29, 3)
+#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN vxge_mBIT(35)
+#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(val) \
+ vxge_vBIT(val, 37, 3)
+#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_WAIT_FOR_SPACE vxge_mBIT(43)
+#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_FILL_THRESH(val) \
+ vxge_vBIT(val, 51, 5)
+#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_ADDR_BDRY_EN vxge_mBIT(59)
+#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_ADDR_BDRY(val) \
+ vxge_vBIT(val, 61, 3)
+/*0x00d90*/ u64 pda_pcc_job_monitor;
+#define VXGE_HW_PDA_PCC_JOB_MONITOR_PDA_PCC_JOB_STATUS vxge_mBIT(7)
+/*0x00d98*/ u64 tx_protocol_assist_cfg;
+#define VXGE_HW_TX_PROTOCOL_ASSIST_CFG_LSOV2_EN vxge_mBIT(6)
+#define VXGE_HW_TX_PROTOCOL_ASSIST_CFG_IPV6_KEEP_SEARCHING vxge_mBIT(7)
+ u8 unused01000[0x01000-0x00da0];
+
+/*0x01000*/ u64 tim_cfg1_int_num[4];
+#define VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(val) vxge_vBIT(val, 6, 26)
+#define VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN vxge_mBIT(35)
+#define VXGE_HW_TIM_CFG1_INT_NUM_TXFRM_CNT_EN vxge_mBIT(36)
+#define VXGE_HW_TIM_CFG1_INT_NUM_TXD_CNT_EN vxge_mBIT(37)
+#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC vxge_mBIT(38)
+#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI vxge_mBIT(39)
+#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(val) vxge_vBIT(val, 41, 7)
+#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(val) vxge_vBIT(val, 49, 7)
+#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(val) vxge_vBIT(val, 57, 7)
+/*0x01020*/ u64 tim_cfg2_int_num[4];
+#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(val) vxge_vBIT(val, 0, 16)
+#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(val) vxge_vBIT(val, 16, 16)
+#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(val) vxge_vBIT(val, 32, 16)
+#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(val) vxge_vBIT(val, 48, 16)
+/*0x01040*/ u64 tim_cfg3_int_num[4];
+#define VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI vxge_mBIT(0)
+#define VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(val) vxge_vBIT(val, 1, 4)
+#define VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(val) vxge_vBIT(val, 6, 26)
+#define VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(val) vxge_vBIT(val, 32, 6)
+#define VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(val) vxge_vBIT(val, 38, 26)
+/*0x01060*/ u64 tim_wrkld_clc;
+#define VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(val) vxge_vBIT(val, 0, 32)
+#define VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(val) vxge_vBIT(val, 35, 5)
+#define VXGE_HW_TIM_WRKLD_CLC_CNT_FRM_BYTE vxge_mBIT(40)
+#define VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(val) vxge_vBIT(val, 41, 2)
+#define VXGE_HW_TIM_WRKLD_CLC_CNT_LNK_EN vxge_mBIT(43)
+#define VXGE_HW_TIM_WRKLD_CLC_HOST_UTIL(val) vxge_vBIT(val, 57, 7)
+/*0x01068*/ u64 tim_bitmap;
+#define VXGE_HW_TIM_BITMAP_MASK(val) vxge_vBIT(val, 0, 32)
+#define VXGE_HW_TIM_BITMAP_LLROOT_RXD_EN vxge_mBIT(32)
+#define VXGE_HW_TIM_BITMAP_LLROOT_TXD_EN vxge_mBIT(33)
+/*0x01070*/ u64 tim_ring_assn;
+#define VXGE_HW_TIM_RING_ASSN_INT_NUM(val) vxge_vBIT(val, 6, 2)
+/*0x01078*/ u64 tim_remap;
+#define VXGE_HW_TIM_REMAP_TX_EN vxge_mBIT(5)
+#define VXGE_HW_TIM_REMAP_RX_EN vxge_mBIT(6)
+#define VXGE_HW_TIM_REMAP_OFFLOAD_EN vxge_mBIT(7)
+#define VXGE_HW_TIM_REMAP_TO_VPATH_NUM(val) vxge_vBIT(val, 11, 5)
+/*0x01080*/ u64 tim_vpath_map;
+#define VXGE_HW_TIM_VPATH_MAP_BMAP_ROOT(val) vxge_vBIT(val, 0, 32)
+/*0x01088*/ u64 tim_pci_cfg;
+#define VXGE_HW_TIM_PCI_CFG_ADD_PAD vxge_mBIT(7)
+#define VXGE_HW_TIM_PCI_CFG_NO_SNOOP vxge_mBIT(15)
+#define VXGE_HW_TIM_PCI_CFG_RELAXED vxge_mBIT(23)
+#define VXGE_HW_TIM_PCI_CFG_CTL_STR vxge_mBIT(31)
+ u8 unused01100[0x01100-0x01090];
+
+/*0x01100*/ u64 sgrp_assign;
+#define VXGE_HW_SGRP_ASSIGN_SGRP_ASSIGN(val) vxge_vBIT(val, 0, 64)
+/*0x01108*/ u64 sgrp_aoa_and_result;
+#define VXGE_HW_SGRP_AOA_AND_RESULT_PET_SGRP_AOA_AND_RESULT(val) \
+ vxge_vBIT(val, 0, 64)
+/*0x01110*/ u64 rpe_pci_cfg;
+#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_DATA_ENABLE vxge_mBIT(7)
+#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_HDR_ENABLE vxge_mBIT(8)
+#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_CQE_ENABLE vxge_mBIT(9)
+#define VXGE_HW_RPE_PCI_CFG_PAD_NONLL_CQE_ENABLE vxge_mBIT(10)
+#define VXGE_HW_RPE_PCI_CFG_PAD_BASE_LL_CQE_ENABLE vxge_mBIT(11)
+#define VXGE_HW_RPE_PCI_CFG_PAD_LL_CQE_IDATA_ENABLE vxge_mBIT(12)
+#define VXGE_HW_RPE_PCI_CFG_PAD_CQRQ_IR_ENABLE vxge_mBIT(13)
+#define VXGE_HW_RPE_PCI_CFG_PAD_CQSQ_IR_ENABLE vxge_mBIT(14)
+#define VXGE_HW_RPE_PCI_CFG_PAD_CQRR_IR_ENABLE vxge_mBIT(15)
+#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_DATA vxge_mBIT(18)
+#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_NONLL_CQE vxge_mBIT(19)
+#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_LL_CQE vxge_mBIT(20)
+#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQRQ_IR vxge_mBIT(21)
+#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQSQ_IR vxge_mBIT(22)
+#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQRR_IR vxge_mBIT(23)
+#define VXGE_HW_RPE_PCI_CFG_RELAXED_DATA vxge_mBIT(26)
+#define VXGE_HW_RPE_PCI_CFG_RELAXED_NONLL_CQE vxge_mBIT(27)
+#define VXGE_HW_RPE_PCI_CFG_RELAXED_LL_CQE vxge_mBIT(28)
+#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQRQ_IR vxge_mBIT(29)
+#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQSQ_IR vxge_mBIT(30)
+#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQRR_IR vxge_mBIT(31)
+/*0x01118*/ u64 rpe_lro_cfg;
+#define VXGE_HW_RPE_LRO_CFG_SUPPRESS_LRO_ETH_TRLR vxge_mBIT(7)
+#define VXGE_HW_RPE_LRO_CFG_ALLOW_LRO_SNAP_SNAPJUMBO_MRG vxge_mBIT(11)
+#define VXGE_HW_RPE_LRO_CFG_ALLOW_LRO_LLC_LLCJUMBO_MRG vxge_mBIT(15)
+#define VXGE_HW_RPE_LRO_CFG_INCL_ACK_CNT_IN_CQE vxge_mBIT(23)
+/*0x01120*/ u64 pe_mr2vp_ack_blk_limit;
+#define VXGE_HW_PE_MR2VP_ACK_BLK_LIMIT_BLK_LIMIT(val) vxge_vBIT(val, 32, 32)
+/*0x01128*/ u64 pe_mr2vp_rirr_lirr_blk_limit;
+#define VXGE_HW_PE_MR2VP_RIRR_LIRR_BLK_LIMIT_RIRR_BLK_LIMIT(val) \
+ vxge_vBIT(val, 0, 32)
+#define VXGE_HW_PE_MR2VP_RIRR_LIRR_BLK_LIMIT_LIRR_BLK_LIMIT(val) \
+ vxge_vBIT(val, 32, 32)
+/*0x01130*/ u64 txpe_pci_nce_cfg;
+#define VXGE_HW_TXPE_PCI_NCE_CFG_NCE_THRESH(val) vxge_vBIT(val, 0, 32)
+#define VXGE_HW_TXPE_PCI_NCE_CFG_PAD_TOWI_ENABLE vxge_mBIT(55)
+#define VXGE_HW_TXPE_PCI_NCE_CFG_NOSNOOP_TOWI vxge_mBIT(63)
+ u8 unused01180[0x01180-0x01138];
+
+/*0x01180*/ u64 msg_qpad_en_cfg;
+#define VXGE_HW_MSG_QPAD_EN_CFG_UMQ_BWR_READ vxge_mBIT(3)
+#define VXGE_HW_MSG_QPAD_EN_CFG_DMQ_BWR_READ vxge_mBIT(7)
+#define VXGE_HW_MSG_QPAD_EN_CFG_MXP_GENDMA_READ vxge_mBIT(11)
+#define VXGE_HW_MSG_QPAD_EN_CFG_UXP_GENDMA_READ vxge_mBIT(15)
+#define VXGE_HW_MSG_QPAD_EN_CFG_UMQ_MSG_WRITE vxge_mBIT(19)
+#define VXGE_HW_MSG_QPAD_EN_CFG_UMQDMQ_IR_WRITE vxge_mBIT(23)
+#define VXGE_HW_MSG_QPAD_EN_CFG_MXP_GENDMA_WRITE vxge_mBIT(27)
+#define VXGE_HW_MSG_QPAD_EN_CFG_UXP_GENDMA_WRITE vxge_mBIT(31)
+/*0x01188*/ u64 msg_pci_cfg;
+#define VXGE_HW_MSG_PCI_CFG_GENDMA_NO_SNOOP vxge_mBIT(3)
+#define VXGE_HW_MSG_PCI_CFG_UMQDMQ_IR_NO_SNOOP vxge_mBIT(7)
+#define VXGE_HW_MSG_PCI_CFG_UMQ_NO_SNOOP vxge_mBIT(11)
+#define VXGE_HW_MSG_PCI_CFG_DMQ_NO_SNOOP vxge_mBIT(15)
+/*0x01190*/ u64 umqdmq_ir_init;
+#define VXGE_HW_UMQDMQ_IR_INIT_HOST_WRITE_ADD(val) vxge_vBIT(val, 0, 64)
+/*0x01198*/ u64 dmq_ir_int;
+#define VXGE_HW_DMQ_IR_INT_IMMED_ENABLE vxge_mBIT(6)
+#define VXGE_HW_DMQ_IR_INT_EVENT_ENABLE vxge_mBIT(7)
+#define VXGE_HW_DMQ_IR_INT_NUMBER(val) vxge_vBIT(val, 9, 7)
+#define VXGE_HW_DMQ_IR_INT_BITMAP(val) vxge_vBIT(val, 16, 16)
+/*0x011a0*/ u64 dmq_bwr_init_add;
+#define VXGE_HW_DMQ_BWR_INIT_ADD_HOST(val) vxge_vBIT(val, 0, 64)
+/*0x011a8*/ u64 dmq_bwr_init_byte;
+#define VXGE_HW_DMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32)
+/*0x011b0*/ u64 dmq_ir;
+#define VXGE_HW_DMQ_IR_POLICY(val) vxge_vBIT(val, 0, 8)
+/*0x011b8*/ u64 umq_int;
+#define VXGE_HW_UMQ_INT_IMMED_ENABLE vxge_mBIT(6)
+#define VXGE_HW_UMQ_INT_EVENT_ENABLE vxge_mBIT(7)
+#define VXGE_HW_UMQ_INT_NUMBER(val) vxge_vBIT(val, 9, 7)
+#define VXGE_HW_UMQ_INT_BITMAP(val) vxge_vBIT(val, 16, 16)
+/*0x011c0*/ u64 umq_mr2vp_bwr_pfch_init;
+#define VXGE_HW_UMQ_MR2VP_BWR_PFCH_INIT_NUMBER(val) vxge_vBIT(val, 0, 8)
+/*0x011c8*/ u64 umq_bwr_pfch_ctrl;
+#define VXGE_HW_UMQ_BWR_PFCH_CTRL_POLL_EN vxge_mBIT(3)
+/*0x011d0*/ u64 umq_mr2vp_bwr_eol;
+#define VXGE_HW_UMQ_MR2VP_BWR_EOL_POLL_LATENCY(val) vxge_vBIT(val, 32, 32)
+/*0x011d8*/ u64 umq_bwr_init_add;
+#define VXGE_HW_UMQ_BWR_INIT_ADD_HOST(val) vxge_vBIT(val, 0, 64)
+/*0x011e0*/ u64 umq_bwr_init_byte;
+#define VXGE_HW_UMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32)
+/*0x011e8*/ u64 gendma_int;
+/*0x011f0*/ u64 umqdmq_ir_init_notify;
+#define VXGE_HW_UMQDMQ_IR_INIT_NOTIFY_PULSE vxge_mBIT(3)
+/*0x011f8*/ u64 dmq_init_notify;
+#define VXGE_HW_DMQ_INIT_NOTIFY_PULSE vxge_mBIT(3)
+/*0x01200*/ u64 umq_init_notify;
+#define VXGE_HW_UMQ_INIT_NOTIFY_PULSE vxge_mBIT(3)
+ u8 unused01380[0x01380-0x01208];
+
+/*0x01380*/ u64 tpa_cfg;
+#define VXGE_HW_TPA_CFG_IGNORE_FRAME_ERR vxge_mBIT(3)
+#define VXGE_HW_TPA_CFG_IPV6_STOP_SEARCHING vxge_mBIT(7)
+#define VXGE_HW_TPA_CFG_L4_PSHDR_PRESENT vxge_mBIT(11)
+#define VXGE_HW_TPA_CFG_SUPPORT_MOBILE_IPV6_HDRS vxge_mBIT(15)
+ u8 unused01400[0x01400-0x01388];
+
+/*0x01400*/ u64 tx_vp_reset_discarded_frms;
+#define VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_TX_VP_RESET_DISCARDED_FRMS(val) \
+ vxge_vBIT(val, 48, 16)
+ u8 unused01480[0x01480-0x01408];
+
+/*0x01480*/ u64 fau_rpa_vcfg;
+#define VXGE_HW_FAU_RPA_VCFG_L4_COMP_CSUM vxge_mBIT(7)
+#define VXGE_HW_FAU_RPA_VCFG_L3_INCL_CF vxge_mBIT(11)
+#define VXGE_HW_FAU_RPA_VCFG_L3_COMP_CSUM vxge_mBIT(15)
+ u8 unused014d0[0x014d0-0x01488];
+
+/*0x014d0*/ u64 dbg_stats_rx_mpa;
+#define VXGE_HW_DBG_STATS_RX_MPA_CRC_FAIL_FRMS(val) vxge_vBIT(val, 0, 16)
+#define VXGE_HW_DBG_STATS_RX_MPA_MRK_FAIL_FRMS(val) vxge_vBIT(val, 16, 16)
+#define VXGE_HW_DBG_STATS_RX_MPA_LEN_FAIL_FRMS(val) vxge_vBIT(val, 32, 16)
+/*0x014d8*/ u64 dbg_stats_rx_fau;
+#define VXGE_HW_DBG_STATS_RX_FAU_RX_WOL_FRMS(val) vxge_vBIT(val, 0, 16)
+#define VXGE_HW_DBG_STATS_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val) \
+ vxge_vBIT(val, 16, 16)
+#define VXGE_HW_DBG_STATS_RX_FAU_RX_PERMITTED_FRMS(val) \
+ vxge_vBIT(val, 32, 32)
+ u8 unused014f0[0x014f0-0x014e0];
+
+/*0x014f0*/ u64 fbmc_vp_rdy;
+#define VXGE_HW_FBMC_VP_RDY_QUEUE_SPAV_FM vxge_mBIT(0)
+ u8 unused01e00[0x01e00-0x014f8];
+
+/*0x01e00*/ u64 vpath_pcipif_int_status;
+#define \
+VXGE_HW_VPATH_PCIPIF_INT_STATUS_SRPCIM_MSG_TO_VPATH_SRPCIM_MSG_TO_VPATH_INT \
+ vxge_mBIT(3)
+#define VXGE_HW_VPATH_PCIPIF_INT_STATUS_VPATH_SPARE_R1_VPATH_SPARE_R1_INT \
+ vxge_mBIT(7)
+/*0x01e08*/ u64 vpath_pcipif_int_mask;
+ u8 unused01e20[0x01e20-0x01e10];
+
+/*0x01e20*/ u64 srpcim_msg_to_vpath_reg;
+#define VXGE_HW_SRPCIM_MSG_TO_VPATH_REG_SWIF_SRPCIM_TO_VPATH_RMSG_INT \
+ vxge_mBIT(3)
+/*0x01e28*/ u64 srpcim_msg_to_vpath_mask;
+/*0x01e30*/ u64 srpcim_msg_to_vpath_alarm;
+ u8 unused01ea0[0x01ea0-0x01e38];
+
+/*0x01ea0*/ u64 vpath_to_srpcim_wmsg;
+#define VXGE_HW_VPATH_TO_SRPCIM_WMSG_VPATH_TO_SRPCIM_WMSG(val) \
+ vxge_vBIT(val, 0, 64)
+/*0x01ea8*/ u64 vpath_to_srpcim_wmsg_trig;
+#define VXGE_HW_VPATH_TO_SRPCIM_WMSG_TRIG_VPATH_TO_SRPCIM_WMSG_TRIG \
+ vxge_mBIT(0)
+ u8 unused02000[0x02000-0x01eb0];
+
+/*0x02000*/ u64 vpath_general_int_status;
+#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT vxge_mBIT(3)
+#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT vxge_mBIT(7)
+#define VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT vxge_mBIT(15)
+#define VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT vxge_mBIT(19)
+/*0x02008*/ u64 vpath_general_int_mask;
+#define VXGE_HW_VPATH_GENERAL_INT_MASK_PIC_INT vxge_mBIT(3)
+#define VXGE_HW_VPATH_GENERAL_INT_MASK_PCI_INT vxge_mBIT(7)
+#define VXGE_HW_VPATH_GENERAL_INT_MASK_WRDMA_INT vxge_mBIT(15)
+#define VXGE_HW_VPATH_GENERAL_INT_MASK_XMAC_INT vxge_mBIT(19)
+/*0x02010*/ u64 vpath_ppif_int_status;
+#define VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT \
+ vxge_mBIT(3)
+#define VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT \
+ vxge_mBIT(7)
+#define VXGE_HW_VPATH_PPIF_INT_STATUS_PCI_CONFIG_ERRORS_PCI_CONFIG_INT \
+ vxge_mBIT(11)
+#define \
+VXGE_HW_VPATH_PPIF_INT_STATUS_MRPCIM_TO_VPATH_ALARM_MRPCIM_TO_VPATH_ALARM_INT \
+ vxge_mBIT(15)
+#define \
+VXGE_HW_VPATH_PPIF_INT_STATUS_SRPCIM_TO_VPATH_ALARM_SRPCIM_TO_VPATH_ALARM_INT \
+ vxge_mBIT(19)
+/*0x02018*/ u64 vpath_ppif_int_mask;
+/*0x02020*/ u64 kdfcctl_errors_reg;
+#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR vxge_mBIT(3)
+#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR vxge_mBIT(7)
+#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR vxge_mBIT(11)
+#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON vxge_mBIT(15)
+#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON vxge_mBIT(19)
+#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON vxge_mBIT(23)
+#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR vxge_mBIT(31)
+#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR vxge_mBIT(35)
+#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR vxge_mBIT(39)
+/*0x02028*/ u64 kdfcctl_errors_mask;
+/*0x02030*/ u64 kdfcctl_errors_alarm;
+ u8 unused02040[0x02040-0x02038];
+
+/*0x02040*/ u64 general_errors_reg;
+#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW vxge_mBIT(3)
+#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW vxge_mBIT(7)
+#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW vxge_mBIT(11)
+#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR vxge_mBIT(15)
+#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ vxge_mBIT(19)
+#define VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS vxge_mBIT(27)
+#define VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET vxge_mBIT(31)
+/*0x02048*/ u64 general_errors_mask;
+/*0x02050*/ u64 general_errors_alarm;
+/*0x02058*/ u64 pci_config_errors_reg;
+#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_STATUS_ERR vxge_mBIT(3)
+#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_UNCOR_ERR vxge_mBIT(7)
+#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_COR_ERR vxge_mBIT(11)
+/*0x02060*/ u64 pci_config_errors_mask;
+/*0x02068*/ u64 pci_config_errors_alarm;
+/*0x02070*/ u64 mrpcim_to_vpath_alarm_reg;
+#define VXGE_HW_MRPCIM_TO_VPATH_ALARM_REG_PPIF_MRPCIM_TO_VPATH_ALARM \
+ vxge_mBIT(3)
+/*0x02078*/ u64 mrpcim_to_vpath_alarm_mask;
+/*0x02080*/ u64 mrpcim_to_vpath_alarm_alarm;
+/*0x02088*/ u64 srpcim_to_vpath_alarm_reg;
+#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_PPIF_SRPCIM_TO_VPATH_ALARM(val) \
+ vxge_vBIT(val, 0, 17)
+/*0x02090*/ u64 srpcim_to_vpath_alarm_mask;
+/*0x02098*/ u64 srpcim_to_vpath_alarm_alarm;
+ u8 unused02108[0x02108-0x020a0];
+
+/*0x02108*/ u64 kdfcctl_status;
+#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO0_PRES(val) vxge_vBIT(val, 0, 8)
+#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO1_PRES(val) vxge_vBIT(val, 8, 8)
+#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO2_PRES(val) vxge_vBIT(val, 16, 8)
+#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO0_OVRWR(val) vxge_vBIT(val, 24, 8)
+#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO1_OVRWR(val) vxge_vBIT(val, 32, 8)
+#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO2_OVRWR(val) vxge_vBIT(val, 40, 8)
+/*0x02110*/ u64 rsthdlr_status;
+#define VXGE_HW_RSTHDLR_STATUS_RSTHDLR_CURRENT_RESET vxge_mBIT(3)
+#define VXGE_HW_RSTHDLR_STATUS_RSTHDLR_CURRENT_VPIN(val) vxge_vBIT(val, 6, 2)
+/*0x02118*/ u64 fifo0_status;
+#define VXGE_HW_FIFO0_STATUS_DBLGEN_FIFO0_RDIDX(val) vxge_vBIT(val, 0, 12)
+/*0x02120*/ u64 fifo1_status;
+#define VXGE_HW_FIFO1_STATUS_DBLGEN_FIFO1_RDIDX(val) vxge_vBIT(val, 0, 12)
+/*0x02128*/ u64 fifo2_status;
+#define VXGE_HW_FIFO2_STATUS_DBLGEN_FIFO2_RDIDX(val) vxge_vBIT(val, 0, 12)
+ u8 unused02158[0x02158-0x02130];
+
+/*0x02158*/ u64 tgt_illegal_access;
+#define VXGE_HW_TGT_ILLEGAL_ACCESS_SWIF_REGION(val) vxge_vBIT(val, 1, 7)
+ u8 unused02200[0x02200-0x02160];
+
+/*0x02200*/ u64 vpath_general_cfg1;
+#define VXGE_HW_VPATH_GENERAL_CFG1_TC_VALUE(val) vxge_vBIT(val, 1, 3)
+#define VXGE_HW_VPATH_GENERAL_CFG1_DATA_BYTE_SWAPEN vxge_mBIT(7)
+#define VXGE_HW_VPATH_GENERAL_CFG1_DATA_FLIPEN vxge_mBIT(11)
+#define VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN vxge_mBIT(15)
+#define VXGE_HW_VPATH_GENERAL_CFG1_CTL_FLIPEN vxge_mBIT(23)
+#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_ADDR_SWAPEN vxge_mBIT(51)
+#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_ADDR_FLIPEN vxge_mBIT(55)
+#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_DATA_SWAPEN vxge_mBIT(59)
+#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_DATA_FLIPEN vxge_mBIT(63)
+/*0x02208*/ u64 vpath_general_cfg2;
+#define VXGE_HW_VPATH_GENERAL_CFG2_SIZE_QUANTUM(val) vxge_vBIT(val, 1, 3)
+/*0x02210*/ u64 vpath_general_cfg3;
+#define VXGE_HW_VPATH_GENERAL_CFG3_IGNORE_VPATH_RST_FOR_INTA vxge_mBIT(3)
+ u8 unused02220[0x02220-0x02218];
+
+/*0x02220*/ u64 kdfcctl_cfg0;
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 vxge_mBIT(1)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 vxge_mBIT(2)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2 vxge_mBIT(3)
+#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO0 vxge_mBIT(5)
+#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO1 vxge_mBIT(6)
+#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO2 vxge_mBIT(7)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO0 vxge_mBIT(9)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO1 vxge_mBIT(10)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO2 vxge_mBIT(11)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO0 vxge_mBIT(13)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO1 vxge_mBIT(14)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO2 vxge_mBIT(15)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO0 vxge_mBIT(17)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO1 vxge_mBIT(18)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO2 vxge_mBIT(19)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO0 vxge_mBIT(21)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO1 vxge_mBIT(22)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO2 vxge_mBIT(23)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO0 vxge_mBIT(25)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO1 vxge_mBIT(26)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO2 vxge_mBIT(27)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO0 vxge_mBIT(29)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO1 vxge_mBIT(30)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO2 vxge_mBIT(31)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO0 vxge_mBIT(33)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO1 vxge_mBIT(34)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO2 vxge_mBIT(35)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO0 vxge_mBIT(37)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO1 vxge_mBIT(38)
+#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO2 vxge_mBIT(39)
+
+ u8 unused02268[0x02268-0x02228];
+
+/*0x02268*/ u64 stats_cfg;
+#define VXGE_HW_STATS_CFG_START_HOST_ADDR(val) vxge_vBIT(val, 0, 57)
+/*0x02270*/ u64 interrupt_cfg0;
+#define VXGE_HW_INTERRUPT_CFG0_MSIX_FOR_RXTI(val) vxge_vBIT(val, 1, 7)
+#define VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(val) vxge_vBIT(val, 9, 7)
+#define VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(val) vxge_vBIT(val, 17, 7)
+#define VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(val) vxge_vBIT(val, 25, 7)
+#define VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(val) vxge_vBIT(val, 33, 7)
+ u8 unused02280[0x02280-0x02278];
+
+/*0x02280*/ u64 interrupt_cfg2;
+#define VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(val) vxge_vBIT(val, 1, 7)
+/*0x02288*/ u64 one_shot_vect0_en;
+#define VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN vxge_mBIT(3)
+/*0x02290*/ u64 one_shot_vect1_en;
+#define VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN vxge_mBIT(3)
+/*0x02298*/ u64 one_shot_vect2_en;
+#define VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN vxge_mBIT(3)
+/*0x022a0*/ u64 one_shot_vect3_en;
+#define VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN vxge_mBIT(3)
+ u8 unused022b0[0x022b0-0x022a8];
+
+/*0x022b0*/ u64 pci_config_access_cfg1;
+#define VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(val) vxge_vBIT(val, 0, 12)
+#define VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0 vxge_mBIT(15)
+/*0x022b8*/ u64 pci_config_access_cfg2;
+#define VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ vxge_mBIT(0)
+/*0x022c0*/ u64 pci_config_access_status;
+#define VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR vxge_mBIT(0)
+#define VXGE_HW_PCI_CONFIG_ACCESS_STATUS_DATA(val) vxge_vBIT(val, 32, 32)
+ u8 unused02300[0x02300-0x022c8];
+
+/*0x02300*/ u64 vpath_debug_stats0;
+#define VXGE_HW_VPATH_DEBUG_STATS0_INI_NUM_MWR_SENT(val) vxge_vBIT(val, 0, 32)
+/*0x02308*/ u64 vpath_debug_stats1;
+#define VXGE_HW_VPATH_DEBUG_STATS1_INI_NUM_MRD_SENT(val) vxge_vBIT(val, 0, 32)
+/*0x02310*/ u64 vpath_debug_stats2;
+#define VXGE_HW_VPATH_DEBUG_STATS2_INI_NUM_CPL_RCVD(val) vxge_vBIT(val, 0, 32)
+/*0x02318*/ u64 vpath_debug_stats3;
+#define VXGE_HW_VPATH_DEBUG_STATS3_INI_NUM_MWR_BYTE_SENT(val) \
+ vxge_vBIT(val, 0, 64)
+/*0x02320*/ u64 vpath_debug_stats4;
+#define VXGE_HW_VPATH_DEBUG_STATS4_INI_NUM_CPL_BYTE_RCVD(val) \
+ vxge_vBIT(val, 0, 64)
+/*0x02328*/ u64 vpath_debug_stats5;
+#define VXGE_HW_VPATH_DEBUG_STATS5_WRCRDTARB_XOFF(val) vxge_vBIT(val, 32, 32)
+/*0x02330*/ u64 vpath_debug_stats6;
+#define VXGE_HW_VPATH_DEBUG_STATS6_RDCRDTARB_XOFF(val) vxge_vBIT(val, 32, 32)
+/*0x02338*/ u64 vpath_genstats_count01;
+#define VXGE_HW_VPATH_GENSTATS_COUNT01_PPIF_VPATH_GENSTATS_COUNT1(val) \
+ vxge_vBIT(val, 0, 32)
+#define VXGE_HW_VPATH_GENSTATS_COUNT01_PPIF_VPATH_GENSTATS_COUNT0(val) \
+ vxge_vBIT(val, 32, 32)
+/*0x02340*/ u64 vpath_genstats_count23;
+#define VXGE_HW_VPATH_GENSTATS_COUNT23_PPIF_VPATH_GENSTATS_COUNT3(val) \
+ vxge_vBIT(val, 0, 32)
+#define VXGE_HW_VPATH_GENSTATS_COUNT23_PPIF_VPATH_GENSTATS_COUNT2(val) \
+ vxge_vBIT(val, 32, 32)
+/*0x02348*/ u64 vpath_genstats_count4;
+#define VXGE_HW_VPATH_GENSTATS_COUNT4_PPIF_VPATH_GENSTATS_COUNT4(val) \
+ vxge_vBIT(val, 32, 32)
+/*0x02350*/ u64 vpath_genstats_count5;
+#define VXGE_HW_VPATH_GENSTATS_COUNT5_PPIF_VPATH_GENSTATS_COUNT5(val) \
+ vxge_vBIT(val, 32, 32)
+ u8 unused02648[0x02648-0x02358];
+} __packed;
+
+#define VXGE_HW_EEPROM_SIZE (0x01 << 11)
+
+/* Capability lists */
+#define VXGE_HW_PCI_EXP_LNKCAP_LNK_SPEED 0xf /* Supported Link speeds */
+#define VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH 0x3f0 /* Supported Link speeds. */
+#define VXGE_HW_PCI_EXP_LNKCAP_LW_RES 0x0 /* Reserved. */
+
+#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
new file mode 100644
index 000000000000..5954fa264da1
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -0,0 +1,2514 @@
+/******************************************************************************
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ * Drivers based on or derived from this code fall under the GPL and must
+ * retain the authorship, copyright and license notice. This file is not
+ * a complete program and may only be used when the entire operating
+ * system is licensed under the GPL.
+ * See the file COPYING in this distribution for more information.
+ *
+ * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
+ * Virtualized Server Adapter.
+ * Copyright(c) 2002-2010 Exar Corp.
+ ******************************************************************************/
+#include <linux/etherdevice.h>
+#include <linux/prefetch.h>
+
+#include "vxge-traffic.h"
+#include "vxge-config.h"
+#include "vxge-main.h"
+
+/*
+ * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
+ * @vp: Virtual Path handle.
+ *
+ * Enable vpath interrupts. The function is to be executed the last in
+ * vpath initialization sequence.
+ *
+ * See also: vxge_hw_vpath_intr_disable()
+ */
+enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
+{
+ u64 val64;
+
+ struct __vxge_hw_virtualpath *vpath;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ if (vp == NULL) {
+ status = VXGE_HW_ERR_INVALID_HANDLE;
+ goto exit;
+ }
+
+ vpath = vp->vpath;
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+ status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+ goto exit;
+ }
+
+ vp_reg = vpath->vp_reg;
+
+ writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->general_errors_reg);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->pci_config_errors_reg);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->mrpcim_to_vpath_alarm_reg);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->srpcim_to_vpath_alarm_reg);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->vpath_ppif_int_status);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->srpcim_msg_to_vpath_reg);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->vpath_pcipif_int_status);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->prc_alarm_reg);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->wrdma_alarm_status);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->asic_ntwk_vp_err_reg);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->xgmac_vp_int_status);
+
+ val64 = readq(&vp_reg->vpath_general_int_status);
+
+ /* Mask unwanted interrupts */
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->vpath_pcipif_int_mask);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->srpcim_msg_to_vpath_mask);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->srpcim_to_vpath_alarm_mask);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->mrpcim_to_vpath_alarm_mask);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->pci_config_errors_mask);
+
+ /* Unmask the individual interrupts */
+
+ writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
+ VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
+ VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
+ VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
+ &vp_reg->general_errors_mask);
+
+ __vxge_hw_pio_mem_write32_upper(
+ (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
+ VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
+ VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
+ VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
+ VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
+ VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
+ &vp_reg->kdfcctl_errors_mask);
+
+ __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
+
+ __vxge_hw_pio_mem_write32_upper(
+ (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
+ &vp_reg->prc_alarm_mask);
+
+ __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
+ __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
+
+ if (vpath->hldev->first_vp_id != vpath->vp_id)
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->asic_ntwk_vp_err_mask);
+ else
+ __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
+ VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
+ VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
+ &vp_reg->asic_ntwk_vp_err_mask);
+
+ __vxge_hw_pio_mem_write32_upper(0,
+ &vp_reg->vpath_general_int_mask);
+exit:
+ return status;
+
+}
+
+/*
+ * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
+ * @vp: Virtual Path handle.
+ *
+ * Disable vpath interrupts. The function is to be executed the last in
+ * vpath initialization sequence.
+ *
+ * See also: vxge_hw_vpath_intr_enable()
+ */
+enum vxge_hw_status vxge_hw_vpath_intr_disable(
+ struct __vxge_hw_vpath_handle *vp)
+{
+ u64 val64;
+
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+ if (vp == NULL) {
+ status = VXGE_HW_ERR_INVALID_HANDLE;
+ goto exit;
+ }
+
+ vpath = vp->vpath;
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+ status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+ goto exit;
+ }
+ vp_reg = vpath->vp_reg;
+
+ __vxge_hw_pio_mem_write32_upper(
+ (u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->vpath_general_int_mask);
+
+ val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
+
+ writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->general_errors_mask);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->pci_config_errors_mask);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->mrpcim_to_vpath_alarm_mask);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->srpcim_to_vpath_alarm_mask);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->vpath_ppif_int_mask);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->srpcim_msg_to_vpath_mask);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->vpath_pcipif_int_mask);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->wrdma_alarm_mask);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->prc_alarm_mask);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->xgmac_vp_int_mask);
+
+ __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->asic_ntwk_vp_err_mask);
+
+exit:
+ return status;
+}
+
+void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
+{
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+ struct vxge_hw_vp_config *config;
+ u64 val64;
+
+ if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
+ return;
+
+ vp_reg = fifo->vp_reg;
+ config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
+
+ if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
+ config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
+ val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+ fifo->tim_tti_cfg1_saved = val64;
+ writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+ }
+}
+
+void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
+{
+ u64 val64 = ring->tim_rti_cfg1_saved;
+
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+ ring->tim_rti_cfg1_saved = val64;
+ writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
+}
+
+void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
+{
+ u64 val64 = fifo->tim_tti_cfg3_saved;
+ u64 timer = (fifo->rtimer * 1000) / 272;
+
+ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
+ if (timer)
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
+ VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
+
+ writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
+ /* tti_cfg3_saved is not updated again because it is
+ * initialized at one place only - init time.
+ */
+}
+
+void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
+{
+ u64 val64 = ring->tim_rti_cfg3_saved;
+ u64 timer = (ring->rtimer * 1000) / 272;
+
+ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
+ if (timer)
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
+ VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
+
+ writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
+ /* rti_cfg3_saved is not updated again because it is
+ * initialized at one place only - init time.
+ */
+}
+
+/**
+ * vxge_hw_channel_msix_mask - Mask MSIX Vector.
+ * @channeh: Channel for rx or tx handle
+ * @msix_id: MSIX ID
+ *
+ * The function masks the msix interrupt for the given msix_id
+ *
+ * Returns: 0
+ */
+void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
+{
+
+ __vxge_hw_pio_mem_write32_upper(
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
+ &channel->common_reg->set_msix_mask_vect[msix_id%4]);
+}
+
+/**
+ * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
+ * @channeh: Channel for rx or tx handle
+ * @msix_id: MSI ID
+ *
+ * The function unmasks the msix interrupt for the given msix_id
+ *
+ * Returns: 0
+ */
+void
+vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
+{
+
+ __vxge_hw_pio_mem_write32_upper(
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
+ &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
+}
+
+/**
+ * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
+ * @channel: Channel for rx or tx handle
+ * @msix_id: MSI ID
+ *
+ * The function unmasks the msix interrupt for the given msix_id
+ * if configured in MSIX oneshot mode
+ *
+ * Returns: 0
+ */
+void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
+{
+ __vxge_hw_pio_mem_write32_upper(
+ (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
+ &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
+}
+
+/**
+ * vxge_hw_device_set_intr_type - Updates the configuration
+ * with new interrupt type.
+ * @hldev: HW device handle.
+ * @intr_mode: New interrupt type
+ */
+u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
+{
+
+ if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
+ (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
+ (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
+ (intr_mode != VXGE_HW_INTR_MODE_DEF))
+ intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
+
+ hldev->config.intr_mode = intr_mode;
+ return intr_mode;
+}
+
+/**
+ * vxge_hw_device_intr_enable - Enable interrupts.
+ * @hldev: HW device handle.
+ * @op: One of the enum vxge_hw_device_intr enumerated values specifying
+ * the type(s) of interrupts to enable.
+ *
+ * Enable Titan interrupts. The function is to be executed the last in
+ * Titan initialization sequence.
+ *
+ * See also: vxge_hw_device_intr_disable()
+ */
+void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
+{
+ u32 i;
+ u64 val64;
+ u32 val32;
+
+ vxge_hw_device_mask_all(hldev);
+
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+
+ if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
+ continue;
+
+ vxge_hw_vpath_intr_enable(
+ VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
+ }
+
+ if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
+ val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
+ hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
+
+ if (val64 != 0) {
+ writeq(val64, &hldev->common_reg->tim_int_status0);
+
+ writeq(~val64, &hldev->common_reg->tim_int_mask0);
+ }
+
+ val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
+ hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
+
+ if (val32 != 0) {
+ __vxge_hw_pio_mem_write32_upper(val32,
+ &hldev->common_reg->tim_int_status1);
+
+ __vxge_hw_pio_mem_write32_upper(~val32,
+ &hldev->common_reg->tim_int_mask1);
+ }
+ }
+
+ val64 = readq(&hldev->common_reg->titan_general_int_status);
+
+ vxge_hw_device_unmask_all(hldev);
+}
+
+/**
+ * vxge_hw_device_intr_disable - Disable Titan interrupts.
+ * @hldev: HW device handle.
+ * @op: One of the enum vxge_hw_device_intr enumerated values specifying
+ * the type(s) of interrupts to disable.
+ *
+ * Disable Titan interrupts.
+ *
+ * See also: vxge_hw_device_intr_enable()
+ */
+void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
+{
+ u32 i;
+
+ vxge_hw_device_mask_all(hldev);
+
+ /* mask all the tim interrupts */
+ writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
+ __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
+ &hldev->common_reg->tim_int_mask1);
+
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+
+ if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
+ continue;
+
+ vxge_hw_vpath_intr_disable(
+ VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
+ }
+}
+
+/**
+ * vxge_hw_device_mask_all - Mask all device interrupts.
+ * @hldev: HW device handle.
+ *
+ * Mask all device interrupts.
+ *
+ * See also: vxge_hw_device_unmask_all()
+ */
+void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
+{
+ u64 val64;
+
+ val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
+ VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
+
+ __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
+ &hldev->common_reg->titan_mask_all_int);
+}
+
+/**
+ * vxge_hw_device_unmask_all - Unmask all device interrupts.
+ * @hldev: HW device handle.
+ *
+ * Unmask all device interrupts.
+ *
+ * See also: vxge_hw_device_mask_all()
+ */
+void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
+{
+ u64 val64 = 0;
+
+ if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
+ val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
+
+ __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
+ &hldev->common_reg->titan_mask_all_int);
+}
+
+/**
+ * vxge_hw_device_flush_io - Flush io writes.
+ * @hldev: HW device handle.
+ *
+ * The function performs a read operation to flush io writes.
+ *
+ * Returns: void
+ */
+void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
+{
+ u32 val32;
+
+ val32 = readl(&hldev->common_reg->titan_general_int_status);
+}
+
+/**
+ * __vxge_hw_device_handle_error - Handle error
+ * @hldev: HW device
+ * @vp_id: Vpath Id
+ * @type: Error type. Please see enum vxge_hw_event{}
+ *
+ * Handle error.
+ */
+static enum vxge_hw_status
+__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
+ enum vxge_hw_event type)
+{
+ switch (type) {
+ case VXGE_HW_EVENT_UNKNOWN:
+ break;
+ case VXGE_HW_EVENT_RESET_START:
+ case VXGE_HW_EVENT_RESET_COMPLETE:
+ case VXGE_HW_EVENT_LINK_DOWN:
+ case VXGE_HW_EVENT_LINK_UP:
+ goto out;
+ case VXGE_HW_EVENT_ALARM_CLEARED:
+ goto out;
+ case VXGE_HW_EVENT_ECCERR:
+ case VXGE_HW_EVENT_MRPCIM_ECCERR:
+ goto out;
+ case VXGE_HW_EVENT_FIFO_ERR:
+ case VXGE_HW_EVENT_VPATH_ERR:
+ case VXGE_HW_EVENT_CRITICAL_ERR:
+ case VXGE_HW_EVENT_SERR:
+ break;
+ case VXGE_HW_EVENT_SRPCIM_SERR:
+ case VXGE_HW_EVENT_MRPCIM_SERR:
+ goto out;
+ case VXGE_HW_EVENT_SLOT_FREEZE:
+ break;
+ default:
+ vxge_assert(0);
+ goto out;
+ }
+
+ /* notify driver */
+ if (hldev->uld_callbacks->crit_err)
+ hldev->uld_callbacks->crit_err(
+ (struct __vxge_hw_device *)hldev,
+ type, vp_id);
+out:
+
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_handle_link_down_ind
+ * @hldev: HW device handle.
+ *
+ * Link down indication handler. The function is invoked by HW when
+ * Titan indicates that the link is down.
+ */
+static enum vxge_hw_status
+__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
+{
+ /*
+ * If the previous link state is not down, return.
+ */
+ if (hldev->link_state == VXGE_HW_LINK_DOWN)
+ goto exit;
+
+ hldev->link_state = VXGE_HW_LINK_DOWN;
+
+ /* notify driver */
+ if (hldev->uld_callbacks->link_down)
+ hldev->uld_callbacks->link_down(hldev);
+exit:
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_handle_link_up_ind
+ * @hldev: HW device handle.
+ *
+ * Link up indication handler. The function is invoked by HW when
+ * Titan indicates that the link is up for programmable amount of time.
+ */
+static enum vxge_hw_status
+__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
+{
+ /*
+ * If the previous link state is not down, return.
+ */
+ if (hldev->link_state == VXGE_HW_LINK_UP)
+ goto exit;
+
+ hldev->link_state = VXGE_HW_LINK_UP;
+
+ /* notify driver */
+ if (hldev->uld_callbacks->link_up)
+ hldev->uld_callbacks->link_up(hldev);
+exit:
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_vpath_alarm_process - Process Alarms.
+ * @vpath: Virtual Path.
+ * @skip_alarms: Do not clear the alarms
+ *
+ * Process vpath alarms.
+ *
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
+ u32 skip_alarms)
+{
+ u64 val64;
+ u64 alarm_status;
+ u64 pic_status;
+ struct __vxge_hw_device *hldev = NULL;
+ enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
+ u64 mask64;
+ struct vxge_hw_vpath_stats_sw_info *sw_stats;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+
+ if (vpath == NULL) {
+ alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
+ alarm_event);
+ goto out2;
+ }
+
+ hldev = vpath->hldev;
+ vp_reg = vpath->vp_reg;
+ alarm_status = readq(&vp_reg->vpath_general_int_status);
+
+ if (alarm_status == VXGE_HW_ALL_FOXES) {
+ alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
+ alarm_event);
+ goto out;
+ }
+
+ sw_stats = vpath->sw_stats;
+
+ if (alarm_status & ~(
+ VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
+ VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
+ VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
+ VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
+ sw_stats->error_stats.unknown_alarms++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
+ alarm_event);
+ goto out;
+ }
+
+ if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
+
+ val64 = readq(&vp_reg->xgmac_vp_int_status);
+
+ if (val64 &
+ VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
+
+ val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
+
+ if (((val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
+ (!(val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
+ ((val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
+ (!(val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
+ ))) {
+ sw_stats->error_stats.network_sustained_fault++;
+
+ writeq(
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
+ &vp_reg->asic_ntwk_vp_err_mask);
+
+ __vxge_hw_device_handle_link_down_ind(hldev);
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_LINK_DOWN, alarm_event);
+ }
+
+ if (((val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
+ (!(val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
+ ((val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
+ (!(val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
+ ))) {
+
+ sw_stats->error_stats.network_sustained_ok++;
+
+ writeq(
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
+ &vp_reg->asic_ntwk_vp_err_mask);
+
+ __vxge_hw_device_handle_link_up_ind(hldev);
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_LINK_UP, alarm_event);
+ }
+
+ writeq(VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->asic_ntwk_vp_err_reg);
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
+
+ if (skip_alarms)
+ return VXGE_HW_OK;
+ }
+ }
+
+ if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
+
+ pic_status = readq(&vp_reg->vpath_ppif_int_status);
+
+ if (pic_status &
+ VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
+
+ val64 = readq(&vp_reg->general_errors_reg);
+ mask64 = readq(&vp_reg->general_errors_mask);
+
+ if ((val64 &
+ VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
+ ~mask64) {
+ sw_stats->error_stats.ini_serr_det++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_SERR, alarm_event);
+ }
+
+ if ((val64 &
+ VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
+ ~mask64) {
+ sw_stats->error_stats.dblgen_fifo0_overflow++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_FIFO_ERR, alarm_event);
+ }
+
+ if ((val64 &
+ VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
+ ~mask64)
+ sw_stats->error_stats.statsb_pif_chain_error++;
+
+ if ((val64 &
+ VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
+ ~mask64)
+ sw_stats->error_stats.statsb_drop_timeout++;
+
+ if ((val64 &
+ VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
+ ~mask64)
+ sw_stats->error_stats.target_illegal_access++;
+
+ if (!skip_alarms) {
+ writeq(VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->general_errors_reg);
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_ALARM_CLEARED,
+ alarm_event);
+ }
+ }
+
+ if (pic_status &
+ VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
+
+ val64 = readq(&vp_reg->kdfcctl_errors_reg);
+ mask64 = readq(&vp_reg->kdfcctl_errors_mask);
+
+ if ((val64 &
+ VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
+ ~mask64) {
+ sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_FIFO_ERR,
+ alarm_event);
+ }
+
+ if ((val64 &
+ VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
+ ~mask64) {
+ sw_stats->error_stats.kdfcctl_fifo0_poison++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_FIFO_ERR,
+ alarm_event);
+ }
+
+ if ((val64 &
+ VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
+ ~mask64) {
+ sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_FIFO_ERR,
+ alarm_event);
+ }
+
+ if (!skip_alarms) {
+ writeq(VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->kdfcctl_errors_reg);
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_ALARM_CLEARED,
+ alarm_event);
+ }
+ }
+
+ }
+
+ if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
+
+ val64 = readq(&vp_reg->wrdma_alarm_status);
+
+ if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
+
+ val64 = readq(&vp_reg->prc_alarm_reg);
+ mask64 = readq(&vp_reg->prc_alarm_mask);
+
+ if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
+ ~mask64)
+ sw_stats->error_stats.prc_ring_bumps++;
+
+ if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
+ ~mask64) {
+ sw_stats->error_stats.prc_rxdcm_sc_err++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_VPATH_ERR,
+ alarm_event);
+ }
+
+ if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
+ & ~mask64) {
+ sw_stats->error_stats.prc_rxdcm_sc_abort++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_VPATH_ERR,
+ alarm_event);
+ }
+
+ if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
+ & ~mask64) {
+ sw_stats->error_stats.prc_quanta_size_err++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_VPATH_ERR,
+ alarm_event);
+ }
+
+ if (!skip_alarms) {
+ writeq(VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->prc_alarm_reg);
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_ALARM_CLEARED,
+ alarm_event);
+ }
+ }
+ }
+out:
+ hldev->stats.sw_dev_err_stats.vpath_alarms++;
+out2:
+ if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
+ (alarm_event == VXGE_HW_EVENT_UNKNOWN))
+ return VXGE_HW_OK;
+
+ __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
+
+ if (alarm_event == VXGE_HW_EVENT_SERR)
+ return VXGE_HW_ERR_CRITICAL;
+
+ return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
+ VXGE_HW_ERR_SLOT_FREEZE :
+ (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
+ VXGE_HW_ERR_VPATH;
+}
+
+/**
+ * vxge_hw_device_begin_irq - Begin IRQ processing.
+ * @hldev: HW device handle.
+ * @skip_alarms: Do not clear the alarms
+ * @reason: "Reason" for the interrupt, the value of Titan's
+ * general_int_status register.
+ *
+ * The function performs two actions, It first checks whether (shared IRQ) the
+ * interrupt was raised by the device. Next, it masks the device interrupts.
+ *
+ * Note:
+ * vxge_hw_device_begin_irq() does not flush MMIO writes through the
+ * bridge. Therefore, two back-to-back interrupts are potentially possible.
+ *
+ * Returns: 0, if the interrupt is not "ours" (note that in this case the
+ * device remain enabled).
+ * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
+ * status.
+ */
+enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
+ u32 skip_alarms, u64 *reason)
+{
+ u32 i;
+ u64 val64;
+ u64 adapter_status;
+ u64 vpath_mask;
+ enum vxge_hw_status ret = VXGE_HW_OK;
+
+ val64 = readq(&hldev->common_reg->titan_general_int_status);
+
+ if (unlikely(!val64)) {
+ /* not Titan interrupt */
+ *reason = 0;
+ ret = VXGE_HW_ERR_WRONG_IRQ;
+ goto exit;
+ }
+
+ if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
+
+ adapter_status = readq(&hldev->common_reg->adapter_status);
+
+ if (adapter_status == VXGE_HW_ALL_FOXES) {
+
+ __vxge_hw_device_handle_error(hldev,
+ NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
+ *reason = 0;
+ ret = VXGE_HW_ERR_SLOT_FREEZE;
+ goto exit;
+ }
+ }
+
+ hldev->stats.sw_dev_info_stats.total_intr_cnt++;
+
+ *reason = val64;
+
+ vpath_mask = hldev->vpaths_deployed >>
+ (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
+
+ if (val64 &
+ VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
+ hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
+
+ return VXGE_HW_OK;
+ }
+
+ hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
+
+ if (unlikely(val64 &
+ VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
+
+ enum vxge_hw_status error_level = VXGE_HW_OK;
+
+ hldev->stats.sw_dev_err_stats.vpath_alarms++;
+
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+
+ if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
+ continue;
+
+ ret = __vxge_hw_vpath_alarm_process(
+ &hldev->virtual_paths[i], skip_alarms);
+
+ error_level = VXGE_HW_SET_LEVEL(ret, error_level);
+
+ if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
+ (ret == VXGE_HW_ERR_SLOT_FREEZE)))
+ break;
+ }
+
+ ret = error_level;
+ }
+exit:
+ return ret;
+}
+
+/**
+ * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
+ * condition that has caused the Tx and RX interrupt.
+ * @hldev: HW device.
+ *
+ * Acknowledge (that is, clear) the condition that has caused
+ * the Tx and Rx interrupt.
+ * See also: vxge_hw_device_begin_irq(),
+ * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
+ */
+void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
+{
+
+ if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
+ (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
+ writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
+ hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
+ &hldev->common_reg->tim_int_status0);
+ }
+
+ if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
+ (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
+ __vxge_hw_pio_mem_write32_upper(
+ (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
+ hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
+ &hldev->common_reg->tim_int_status1);
+ }
+}
+
+/*
+ * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
+ * @channel: Channel
+ * @dtrh: Buffer to return the DTR pointer
+ *
+ * Allocates a dtr from the reserve array. If the reserve array is empty,
+ * it swaps the reserve and free arrays.
+ *
+ */
+static enum vxge_hw_status
+vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
+{
+ void **tmp_arr;
+
+ if (channel->reserve_ptr - channel->reserve_top > 0) {
+_alloc_after_swap:
+ *dtrh = channel->reserve_arr[--channel->reserve_ptr];
+
+ return VXGE_HW_OK;
+ }
+
+ /* switch between empty and full arrays */
+
+ /* the idea behind such a design is that by having free and reserved
+ * arrays separated we basically separated irq and non-irq parts.
+ * i.e. no additional lock need to be done when we free a resource */
+
+ if (channel->length - channel->free_ptr > 0) {
+
+ tmp_arr = channel->reserve_arr;
+ channel->reserve_arr = channel->free_arr;
+ channel->free_arr = tmp_arr;
+ channel->reserve_ptr = channel->length;
+ channel->reserve_top = channel->free_ptr;
+ channel->free_ptr = channel->length;
+
+ channel->stats->reserve_free_swaps_cnt++;
+
+ goto _alloc_after_swap;
+ }
+
+ channel->stats->full_cnt++;
+
+ *dtrh = NULL;
+ return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
+}
+
+/*
+ * vxge_hw_channel_dtr_post - Post a dtr to the channel
+ * @channelh: Channel
+ * @dtrh: DTR pointer
+ *
+ * Posts a dtr to work array.
+ *
+ */
+static void
+vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
+{
+ vxge_assert(channel->work_arr[channel->post_index] == NULL);
+
+ channel->work_arr[channel->post_index++] = dtrh;
+
+ /* wrap-around */
+ if (channel->post_index == channel->length)
+ channel->post_index = 0;
+}
+
+/*
+ * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
+ * @channel: Channel
+ * @dtr: Buffer to return the next completed DTR pointer
+ *
+ * Returns the next completed dtr with out removing it from work array
+ *
+ */
+void
+vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
+{
+ vxge_assert(channel->compl_index < channel->length);
+
+ *dtrh = channel->work_arr[channel->compl_index];
+ prefetch(*dtrh);
+}
+
+/*
+ * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
+ * @channel: Channel handle
+ *
+ * Removes the next completed dtr from work array
+ *
+ */
+void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
+{
+ channel->work_arr[channel->compl_index] = NULL;
+
+ /* wrap-around */
+ if (++channel->compl_index == channel->length)
+ channel->compl_index = 0;
+
+ channel->stats->total_compl_cnt++;
+}
+
+/*
+ * vxge_hw_channel_dtr_free - Frees a dtr
+ * @channel: Channel handle
+ * @dtr: DTR pointer
+ *
+ * Returns the dtr to free array
+ *
+ */
+void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
+{
+ channel->free_arr[--channel->free_ptr] = dtrh;
+}
+
+/*
+ * vxge_hw_channel_dtr_count
+ * @channel: Channel handle. Obtained via vxge_hw_channel_open().
+ *
+ * Retrieve number of DTRs available. This function can not be called
+ * from data path. ring_initial_replenishi() is the only user.
+ */
+int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
+{
+ return (channel->reserve_ptr - channel->reserve_top) +
+ (channel->length - channel->free_ptr);
+}
+
+/**
+ * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
+ * @ring: Handle to the ring object used for receive
+ * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
+ * with a valid handle.
+ *
+ * Reserve Rx descriptor for the subsequent filling-in driver
+ * and posting on the corresponding channel (@channelh)
+ * via vxge_hw_ring_rxd_post().
+ *
+ * Returns: VXGE_HW_OK - success.
+ * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
+ *
+ */
+enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
+ void **rxdh)
+{
+ enum vxge_hw_status status;
+ struct __vxge_hw_channel *channel;
+
+ channel = &ring->channel;
+
+ status = vxge_hw_channel_dtr_alloc(channel, rxdh);
+
+ if (status == VXGE_HW_OK) {
+ struct vxge_hw_ring_rxd_1 *rxdp =
+ (struct vxge_hw_ring_rxd_1 *)*rxdh;
+
+ rxdp->control_0 = rxdp->control_1 = 0;
+ }
+
+ return status;
+}
+
+/**
+ * vxge_hw_ring_rxd_free - Free descriptor.
+ * @ring: Handle to the ring object used for receive
+ * @rxdh: Descriptor handle.
+ *
+ * Free the reserved descriptor. This operation is "symmetrical" to
+ * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
+ * lifecycle.
+ *
+ * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
+ * be:
+ *
+ * - reserved (vxge_hw_ring_rxd_reserve);
+ *
+ * - posted (vxge_hw_ring_rxd_post);
+ *
+ * - completed (vxge_hw_ring_rxd_next_completed);
+ *
+ * - and recycled again (vxge_hw_ring_rxd_free).
+ *
+ * For alternative state transitions and more details please refer to
+ * the design doc.
+ *
+ */
+void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
+{
+ struct __vxge_hw_channel *channel;
+
+ channel = &ring->channel;
+
+ vxge_hw_channel_dtr_free(channel, rxdh);
+
+}
+
+/**
+ * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
+ * @ring: Handle to the ring object used for receive
+ * @rxdh: Descriptor handle.
+ *
+ * This routine prepares a rxd and posts
+ */
+void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
+{
+ struct __vxge_hw_channel *channel;
+
+ channel = &ring->channel;
+
+ vxge_hw_channel_dtr_post(channel, rxdh);
+}
+
+/**
+ * vxge_hw_ring_rxd_post_post - Process rxd after post.
+ * @ring: Handle to the ring object used for receive
+ * @rxdh: Descriptor handle.
+ *
+ * Processes rxd after post
+ */
+void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
+{
+ struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
+ struct __vxge_hw_channel *channel;
+
+ channel = &ring->channel;
+
+ rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
+
+ if (ring->stats->common_stats.usage_cnt > 0)
+ ring->stats->common_stats.usage_cnt--;
+}
+
+/**
+ * vxge_hw_ring_rxd_post - Post descriptor on the ring.
+ * @ring: Handle to the ring object used for receive
+ * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
+ *
+ * Post descriptor on the ring.
+ * Prior to posting the descriptor should be filled in accordance with
+ * Host/Titan interface specification for a given service (LL, etc.).
+ *
+ */
+void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
+{
+ struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
+ struct __vxge_hw_channel *channel;
+
+ channel = &ring->channel;
+
+ wmb();
+ rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
+
+ vxge_hw_channel_dtr_post(channel, rxdh);
+
+ if (ring->stats->common_stats.usage_cnt > 0)
+ ring->stats->common_stats.usage_cnt--;
+}
+
+/**
+ * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
+ * @ring: Handle to the ring object used for receive
+ * @rxdh: Descriptor handle.
+ *
+ * Processes rxd after post with memory barrier.
+ */
+void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
+{
+ wmb();
+ vxge_hw_ring_rxd_post_post(ring, rxdh);
+}
+
+/**
+ * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
+ * @ring: Handle to the ring object used for receive
+ * @rxdh: Descriptor handle. Returned by HW.
+ * @t_code: Transfer code, as per Titan User Guide,
+ * Receive Descriptor Format. Returned by HW.
+ *
+ * Retrieve the _next_ completed descriptor.
+ * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
+ * driver of new completed descriptors. After that
+ * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
+ * completions (the very first completion is passed by HW via
+ * vxge_hw_ring_callback_f).
+ *
+ * Implementation-wise, the driver is free to call
+ * vxge_hw_ring_rxd_next_completed either immediately from inside the
+ * ring callback, or in a deferred fashion and separate (from HW)
+ * context.
+ *
+ * Non-zero @t_code means failure to fill-in receive buffer(s)
+ * of the descriptor.
+ * For instance, parity error detected during the data transfer.
+ * In this case Titan will complete the descriptor and indicate
+ * for the host that the received data is not to be used.
+ * For details please refer to Titan User Guide.
+ *
+ * Returns: VXGE_HW_OK - success.
+ * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
+ * are currently available for processing.
+ *
+ * See also: vxge_hw_ring_callback_f{},
+ * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
+ */
+enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
+ struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
+{
+ struct __vxge_hw_channel *channel;
+ struct vxge_hw_ring_rxd_1 *rxdp;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ u64 control_0, own;
+
+ channel = &ring->channel;
+
+ vxge_hw_channel_dtr_try_complete(channel, rxdh);
+
+ rxdp = *rxdh;
+ if (rxdp == NULL) {
+ status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+ goto exit;
+ }
+
+ control_0 = rxdp->control_0;
+ own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
+ *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
+
+ /* check whether it is not the end */
+ if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
+
+ vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
+ 0);
+
+ ++ring->cmpl_cnt;
+ vxge_hw_channel_dtr_complete(channel);
+
+ vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
+
+ ring->stats->common_stats.usage_cnt++;
+ if (ring->stats->common_stats.usage_max <
+ ring->stats->common_stats.usage_cnt)
+ ring->stats->common_stats.usage_max =
+ ring->stats->common_stats.usage_cnt;
+
+ status = VXGE_HW_OK;
+ goto exit;
+ }
+
+ /* reset it. since we don't want to return
+ * garbage to the driver */
+ *rxdh = NULL;
+ status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+exit:
+ return status;
+}
+
+/**
+ * vxge_hw_ring_handle_tcode - Handle transfer code.
+ * @ring: Handle to the ring object used for receive
+ * @rxdh: Descriptor handle.
+ * @t_code: One of the enumerated (and documented in the Titan user guide)
+ * "transfer codes".
+ *
+ * Handle descriptor's transfer code. The latter comes with each completed
+ * descriptor.
+ *
+ * Returns: one of the enum vxge_hw_status{} enumerated types.
+ * VXGE_HW_OK - for success.
+ * VXGE_HW_ERR_CRITICAL - when encounters critical error.
+ */
+enum vxge_hw_status vxge_hw_ring_handle_tcode(
+ struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
+{
+ struct __vxge_hw_channel *channel;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ channel = &ring->channel;
+
+ /* If the t_code is not supported and if the
+ * t_code is other than 0x5 (unparseable packet
+ * such as unknown UPV6 header), Drop it !!!
+ */
+
+ if (t_code == VXGE_HW_RING_T_CODE_OK ||
+ t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
+ status = VXGE_HW_OK;
+ goto exit;
+ }
+
+ if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
+ status = VXGE_HW_ERR_INVALID_TCODE;
+ goto exit;
+ }
+
+ ring->stats->rxd_t_code_err_cnt[t_code]++;
+exit:
+ return status;
+}
+
+/**
+ * __vxge_hw_non_offload_db_post - Post non offload doorbell
+ *
+ * @fifo: fifohandle
+ * @txdl_ptr: The starting location of the TxDL in host memory
+ * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
+ * @no_snoop: No snoop flags
+ *
+ * This function posts a non-offload doorbell to doorbell FIFO
+ *
+ */
+static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
+ u64 txdl_ptr, u32 num_txds, u32 no_snoop)
+{
+ struct __vxge_hw_channel *channel;
+
+ channel = &fifo->channel;
+
+ writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
+ VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
+ VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
+ &fifo->nofl_db->control_0);
+
+ mmiowb();
+
+ writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
+
+ mmiowb();
+}
+
+/**
+ * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
+ * the fifo
+ * @fifoh: Handle to the fifo object used for non offload send
+ */
+u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
+{
+ return vxge_hw_channel_dtr_count(&fifoh->channel);
+}
+
+/**
+ * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
+ * @fifoh: Handle to the fifo object used for non offload send
+ * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
+ * with a valid handle.
+ * @txdl_priv: Buffer to return the pointer to per txdl space
+ *
+ * Reserve a single TxDL (that is, fifo descriptor)
+ * for the subsequent filling-in by driver)
+ * and posting on the corresponding channel (@channelh)
+ * via vxge_hw_fifo_txdl_post().
+ *
+ * Note: it is the responsibility of driver to reserve multiple descriptors
+ * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
+ * carries up to configured number (fifo.max_frags) of contiguous buffers.
+ *
+ * Returns: VXGE_HW_OK - success;
+ * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
+ *
+ */
+enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
+ struct __vxge_hw_fifo *fifo,
+ void **txdlh, void **txdl_priv)
+{
+ struct __vxge_hw_channel *channel;
+ enum vxge_hw_status status;
+ int i;
+
+ channel = &fifo->channel;
+
+ status = vxge_hw_channel_dtr_alloc(channel, txdlh);
+
+ if (status == VXGE_HW_OK) {
+ struct vxge_hw_fifo_txd *txdp =
+ (struct vxge_hw_fifo_txd *)*txdlh;
+ struct __vxge_hw_fifo_txdl_priv *priv;
+
+ priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
+
+ /* reset the TxDL's private */
+ priv->align_dma_offset = 0;
+ priv->align_vaddr_start = priv->align_vaddr;
+ priv->align_used_frags = 0;
+ priv->frags = 0;
+ priv->alloc_frags = fifo->config->max_frags;
+ priv->next_txdl_priv = NULL;
+
+ *txdl_priv = (void *)(size_t)txdp->host_control;
+
+ for (i = 0; i < fifo->config->max_frags; i++) {
+ txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
+ txdp->control_0 = txdp->control_1 = 0;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
+ * descriptor.
+ * @fifo: Handle to the fifo object used for non offload send
+ * @txdlh: Descriptor handle.
+ * @frag_idx: Index of the data buffer in the caller's scatter-gather list
+ * (of buffers).
+ * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
+ * @size: Size of the data buffer (in bytes).
+ *
+ * This API is part of the preparation of the transmit descriptor for posting
+ * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
+ * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
+ * All three APIs fill in the fields of the fifo descriptor,
+ * in accordance with the Titan specification.
+ *
+ */
+void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
+ void *txdlh, u32 frag_idx,
+ dma_addr_t dma_pointer, u32 size)
+{
+ struct __vxge_hw_fifo_txdl_priv *txdl_priv;
+ struct vxge_hw_fifo_txd *txdp, *txdp_last;
+ struct __vxge_hw_channel *channel;
+
+ channel = &fifo->channel;
+
+ txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
+ txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
+
+ if (frag_idx != 0)
+ txdp->control_0 = txdp->control_1 = 0;
+ else {
+ txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
+ VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
+ txdp->control_1 |= fifo->interrupt_type;
+ txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
+ fifo->tx_intr_num);
+ if (txdl_priv->frags) {
+ txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
+ (txdl_priv->frags - 1);
+ txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
+ VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
+ }
+ }
+
+ vxge_assert(frag_idx < txdl_priv->alloc_frags);
+
+ txdp->buffer_pointer = (u64)dma_pointer;
+ txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
+ fifo->stats->total_buffers++;
+ txdl_priv->frags++;
+}
+
+/**
+ * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
+ * @fifo: Handle to the fifo object used for non offload send
+ * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
+ * @frags: Number of contiguous buffers that are part of a single
+ * transmit operation.
+ *
+ * Post descriptor on the 'fifo' type channel for transmission.
+ * Prior to posting the descriptor should be filled in accordance with
+ * Host/Titan interface specification for a given service (LL, etc.).
+ *
+ */
+void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
+{
+ struct __vxge_hw_fifo_txdl_priv *txdl_priv;
+ struct vxge_hw_fifo_txd *txdp_last;
+ struct vxge_hw_fifo_txd *txdp_first;
+ struct __vxge_hw_channel *channel;
+
+ channel = &fifo->channel;
+
+ txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
+ txdp_first = txdlh;
+
+ txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
+ txdp_last->control_0 |=
+ VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
+ txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
+
+ vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
+
+ __vxge_hw_non_offload_db_post(fifo,
+ (u64)txdl_priv->dma_addr,
+ txdl_priv->frags - 1,
+ fifo->no_snoop_bits);
+
+ fifo->stats->total_posts++;
+ fifo->stats->common_stats.usage_cnt++;
+ if (fifo->stats->common_stats.usage_max <
+ fifo->stats->common_stats.usage_cnt)
+ fifo->stats->common_stats.usage_max =
+ fifo->stats->common_stats.usage_cnt;
+}
+
+/**
+ * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
+ * @fifo: Handle to the fifo object used for non offload send
+ * @txdlh: Descriptor handle. Returned by HW.
+ * @t_code: Transfer code, as per Titan User Guide,
+ * Transmit Descriptor Format.
+ * Returned by HW.
+ *
+ * Retrieve the _next_ completed descriptor.
+ * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
+ * driver of new completed descriptors. After that
+ * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
+ * completions (the very first completion is passed by HW via
+ * vxge_hw_channel_callback_f).
+ *
+ * Implementation-wise, the driver is free to call
+ * vxge_hw_fifo_txdl_next_completed either immediately from inside the
+ * channel callback, or in a deferred fashion and separate (from HW)
+ * context.
+ *
+ * Non-zero @t_code means failure to process the descriptor.
+ * The failure could happen, for instance, when the link is
+ * down, in which case Titan completes the descriptor because it
+ * is not able to send the data out.
+ *
+ * For details please refer to Titan User Guide.
+ *
+ * Returns: VXGE_HW_OK - success.
+ * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
+ * are currently available for processing.
+ *
+ */
+enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
+ struct __vxge_hw_fifo *fifo, void **txdlh,
+ enum vxge_hw_fifo_tcode *t_code)
+{
+ struct __vxge_hw_channel *channel;
+ struct vxge_hw_fifo_txd *txdp;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ channel = &fifo->channel;
+
+ vxge_hw_channel_dtr_try_complete(channel, txdlh);
+
+ txdp = *txdlh;
+ if (txdp == NULL) {
+ status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+ goto exit;
+ }
+
+ /* check whether host owns it */
+ if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
+
+ vxge_assert(txdp->host_control != 0);
+
+ vxge_hw_channel_dtr_complete(channel);
+
+ *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
+
+ if (fifo->stats->common_stats.usage_cnt > 0)
+ fifo->stats->common_stats.usage_cnt--;
+
+ status = VXGE_HW_OK;
+ goto exit;
+ }
+
+ /* no more completions */
+ *txdlh = NULL;
+ status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+exit:
+ return status;
+}
+
+/**
+ * vxge_hw_fifo_handle_tcode - Handle transfer code.
+ * @fifo: Handle to the fifo object used for non offload send
+ * @txdlh: Descriptor handle.
+ * @t_code: One of the enumerated (and documented in the Titan user guide)
+ * "transfer codes".
+ *
+ * Handle descriptor's transfer code. The latter comes with each completed
+ * descriptor.
+ *
+ * Returns: one of the enum vxge_hw_status{} enumerated types.
+ * VXGE_HW_OK - for success.
+ * VXGE_HW_ERR_CRITICAL - when encounters critical error.
+ */
+enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
+ void *txdlh,
+ enum vxge_hw_fifo_tcode t_code)
+{
+ struct __vxge_hw_channel *channel;
+
+ enum vxge_hw_status status = VXGE_HW_OK;
+ channel = &fifo->channel;
+
+ if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
+ status = VXGE_HW_ERR_INVALID_TCODE;
+ goto exit;
+ }
+
+ fifo->stats->txd_t_code_err_cnt[t_code]++;
+exit:
+ return status;
+}
+
+/**
+ * vxge_hw_fifo_txdl_free - Free descriptor.
+ * @fifo: Handle to the fifo object used for non offload send
+ * @txdlh: Descriptor handle.
+ *
+ * Free the reserved descriptor. This operation is "symmetrical" to
+ * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
+ * lifecycle.
+ *
+ * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
+ * be:
+ *
+ * - reserved (vxge_hw_fifo_txdl_reserve);
+ *
+ * - posted (vxge_hw_fifo_txdl_post);
+ *
+ * - completed (vxge_hw_fifo_txdl_next_completed);
+ *
+ * - and recycled again (vxge_hw_fifo_txdl_free).
+ *
+ * For alternative state transitions and more details please refer to
+ * the design doc.
+ *
+ */
+void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
+{
+ struct __vxge_hw_fifo_txdl_priv *txdl_priv;
+ u32 max_frags;
+ struct __vxge_hw_channel *channel;
+
+ channel = &fifo->channel;
+
+ txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
+ (struct vxge_hw_fifo_txd *)txdlh);
+
+ max_frags = fifo->config->max_frags;
+
+ vxge_hw_channel_dtr_free(channel, txdlh);
+}
+
+/**
+ * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
+ * to MAC address table.
+ * @vp: Vpath handle.
+ * @macaddr: MAC address to be added for this vpath into the list
+ * @macaddr_mask: MAC address mask for macaddr
+ * @duplicate_mode: Duplicate MAC address add mode. Please see
+ * enum vxge_hw_vpath_mac_addr_add_mode{}
+ *
+ * Adds the given mac address and mac address mask into the list for this
+ * vpath.
+ * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
+ * vxge_hw_vpath_mac_addr_get_next
+ *
+ */
+enum vxge_hw_status
+vxge_hw_vpath_mac_addr_add(
+ struct __vxge_hw_vpath_handle *vp,
+ u8 (macaddr)[ETH_ALEN],
+ u8 (macaddr_mask)[ETH_ALEN],
+ enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
+{
+ u32 i;
+ u64 data1 = 0ULL;
+ u64 data2 = 0ULL;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if (vp == NULL) {
+ status = VXGE_HW_ERR_INVALID_HANDLE;
+ goto exit;
+ }
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ data1 <<= 8;
+ data1 |= (u8)macaddr[i];
+
+ data2 <<= 8;
+ data2 |= (u8)macaddr_mask[i];
+ }
+
+ switch (duplicate_mode) {
+ case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
+ i = 0;
+ break;
+ case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
+ i = 1;
+ break;
+ case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
+ i = 2;
+ break;
+ default:
+ i = 0;
+ break;
+ }
+
+ status = __vxge_hw_vpath_rts_table_set(vp,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
+ 0,
+ VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
+ VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
+ VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
+exit:
+ return status;
+}
+
+/**
+ * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
+ * from MAC address table.
+ * @vp: Vpath handle.
+ * @macaddr: First MAC address entry for this vpath in the list
+ * @macaddr_mask: MAC address mask for macaddr
+ *
+ * Returns the first mac address and mac address mask in the list for this
+ * vpath.
+ * see also: vxge_hw_vpath_mac_addr_get_next
+ *
+ */
+enum vxge_hw_status
+vxge_hw_vpath_mac_addr_get(
+ struct __vxge_hw_vpath_handle *vp,
+ u8 (macaddr)[ETH_ALEN],
+ u8 (macaddr_mask)[ETH_ALEN])
+{
+ u32 i;
+ u64 data1 = 0ULL;
+ u64 data2 = 0ULL;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if (vp == NULL) {
+ status = VXGE_HW_ERR_INVALID_HANDLE;
+ goto exit;
+ }
+
+ status = __vxge_hw_vpath_rts_table_get(vp,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
+ 0, &data1, &data2);
+
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
+
+ data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
+
+ for (i = ETH_ALEN; i > 0; i--) {
+ macaddr[i-1] = (u8)(data1 & 0xFF);
+ data1 >>= 8;
+
+ macaddr_mask[i-1] = (u8)(data2 & 0xFF);
+ data2 >>= 8;
+ }
+exit:
+ return status;
+}
+
+/**
+ * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
+ * vpath
+ * from MAC address table.
+ * @vp: Vpath handle.
+ * @macaddr: Next MAC address entry for this vpath in the list
+ * @macaddr_mask: MAC address mask for macaddr
+ *
+ * Returns the next mac address and mac address mask in the list for this
+ * vpath.
+ * see also: vxge_hw_vpath_mac_addr_get
+ *
+ */
+enum vxge_hw_status
+vxge_hw_vpath_mac_addr_get_next(
+ struct __vxge_hw_vpath_handle *vp,
+ u8 (macaddr)[ETH_ALEN],
+ u8 (macaddr_mask)[ETH_ALEN])
+{
+ u32 i;
+ u64 data1 = 0ULL;
+ u64 data2 = 0ULL;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if (vp == NULL) {
+ status = VXGE_HW_ERR_INVALID_HANDLE;
+ goto exit;
+ }
+
+ status = __vxge_hw_vpath_rts_table_get(vp,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
+ 0, &data1, &data2);
+
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
+
+ data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
+
+ for (i = ETH_ALEN; i > 0; i--) {
+ macaddr[i-1] = (u8)(data1 & 0xFF);
+ data1 >>= 8;
+
+ macaddr_mask[i-1] = (u8)(data2 & 0xFF);
+ data2 >>= 8;
+ }
+
+exit:
+ return status;
+}
+
+/**
+ * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
+ * to MAC address table.
+ * @vp: Vpath handle.
+ * @macaddr: MAC address to be added for this vpath into the list
+ * @macaddr_mask: MAC address mask for macaddr
+ *
+ * Delete the given mac address and mac address mask into the list for this
+ * vpath.
+ * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
+ * vxge_hw_vpath_mac_addr_get_next
+ *
+ */
+enum vxge_hw_status
+vxge_hw_vpath_mac_addr_delete(
+ struct __vxge_hw_vpath_handle *vp,
+ u8 (macaddr)[ETH_ALEN],
+ u8 (macaddr_mask)[ETH_ALEN])
+{
+ u32 i;
+ u64 data1 = 0ULL;
+ u64 data2 = 0ULL;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if (vp == NULL) {
+ status = VXGE_HW_ERR_INVALID_HANDLE;
+ goto exit;
+ }
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ data1 <<= 8;
+ data1 |= (u8)macaddr[i];
+
+ data2 <<= 8;
+ data2 |= (u8)macaddr_mask[i];
+ }
+
+ status = __vxge_hw_vpath_rts_table_set(vp,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
+ 0,
+ VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
+ VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
+exit:
+ return status;
+}
+
+/**
+ * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
+ * to vlan id table.
+ * @vp: Vpath handle.
+ * @vid: vlan id to be added for this vpath into the list
+ *
+ * Adds the given vlan id into the list for this vpath.
+ * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
+ * vxge_hw_vpath_vid_get_next
+ *
+ */
+enum vxge_hw_status
+vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if (vp == NULL) {
+ status = VXGE_HW_ERR_INVALID_HANDLE;
+ goto exit;
+ }
+
+ status = __vxge_hw_vpath_rts_table_set(vp,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
+ 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
+exit:
+ return status;
+}
+
+/**
+ * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
+ * from vlan id table.
+ * @vp: Vpath handle.
+ * @vid: Buffer to return vlan id
+ *
+ * Returns the first vlan id in the list for this vpath.
+ * see also: vxge_hw_vpath_vid_get_next
+ *
+ */
+enum vxge_hw_status
+vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
+{
+ u64 data;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if (vp == NULL) {
+ status = VXGE_HW_ERR_INVALID_HANDLE;
+ goto exit;
+ }
+
+ status = __vxge_hw_vpath_rts_table_get(vp,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
+ 0, vid, &data);
+
+ *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
+exit:
+ return status;
+}
+
+/**
+ * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
+ * to vlan id table.
+ * @vp: Vpath handle.
+ * @vid: vlan id to be added for this vpath into the list
+ *
+ * Adds the given vlan id into the list for this vpath.
+ * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
+ * vxge_hw_vpath_vid_get_next
+ *
+ */
+enum vxge_hw_status
+vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if (vp == NULL) {
+ status = VXGE_HW_ERR_INVALID_HANDLE;
+ goto exit;
+ }
+
+ status = __vxge_hw_vpath_rts_table_set(vp,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
+ 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
+exit:
+ return status;
+}
+
+/**
+ * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
+ * @vp: Vpath handle.
+ *
+ * Enable promiscuous mode of Titan-e operation.
+ *
+ * See also: vxge_hw_vpath_promisc_disable().
+ */
+enum vxge_hw_status vxge_hw_vpath_promisc_enable(
+ struct __vxge_hw_vpath_handle *vp)
+{
+ u64 val64;
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+ status = VXGE_HW_ERR_INVALID_HANDLE;
+ goto exit;
+ }
+
+ vpath = vp->vpath;
+
+ /* Enable promiscuous mode for function 0 only */
+ if (!(vpath->hldev->access_rights &
+ VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
+ return VXGE_HW_OK;
+
+ val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
+
+ if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
+
+ val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
+ VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
+ VXGE_HW_RXMAC_VCFG0_BCAST_EN |
+ VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
+
+ writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+ }
+exit:
+ return status;
+}
+
+/**
+ * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
+ * @vp: Vpath handle.
+ *
+ * Disable promiscuous mode of Titan-e operation.
+ *
+ * See also: vxge_hw_vpath_promisc_enable().
+ */
+enum vxge_hw_status vxge_hw_vpath_promisc_disable(
+ struct __vxge_hw_vpath_handle *vp)
+{
+ u64 val64;
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+ status = VXGE_HW_ERR_INVALID_HANDLE;
+ goto exit;
+ }
+
+ vpath = vp->vpath;
+
+ val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
+
+ if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
+
+ val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
+ VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
+ VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
+
+ writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+ }
+exit:
+ return status;
+}
+
+/*
+ * vxge_hw_vpath_bcast_enable - Enable broadcast
+ * @vp: Vpath handle.
+ *
+ * Enable receiving broadcasts.
+ */
+enum vxge_hw_status vxge_hw_vpath_bcast_enable(
+ struct __vxge_hw_vpath_handle *vp)
+{
+ u64 val64;
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+ status = VXGE_HW_ERR_INVALID_HANDLE;
+ goto exit;
+ }
+
+ vpath = vp->vpath;
+
+ val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
+
+ if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
+ val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
+ writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+ }
+exit:
+ return status;
+}
+
+/**
+ * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
+ * @vp: Vpath handle.
+ *
+ * Enable Titan-e multicast addresses.
+ * Returns: VXGE_HW_OK on success.
+ *
+ */
+enum vxge_hw_status vxge_hw_vpath_mcast_enable(
+ struct __vxge_hw_vpath_handle *vp)
+{
+ u64 val64;
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+ status = VXGE_HW_ERR_INVALID_HANDLE;
+ goto exit;
+ }
+
+ vpath = vp->vpath;
+
+ val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
+
+ if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
+ val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
+ writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+ }
+exit:
+ return status;
+}
+
+/**
+ * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
+ * @vp: Vpath handle.
+ *
+ * Disable Titan-e multicast addresses.
+ * Returns: VXGE_HW_OK - success.
+ * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
+ *
+ */
+enum vxge_hw_status
+vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
+{
+ u64 val64;
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+ status = VXGE_HW_ERR_INVALID_HANDLE;
+ goto exit;
+ }
+
+ vpath = vp->vpath;
+
+ val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
+
+ if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
+ val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
+ writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+ }
+exit:
+ return status;
+}
+
+/*
+ * vxge_hw_vpath_alarm_process - Process Alarms.
+ * @vpath: Virtual Path.
+ * @skip_alarms: Do not clear the alarms
+ *
+ * Process vpath alarms.
+ *
+ */
+enum vxge_hw_status vxge_hw_vpath_alarm_process(
+ struct __vxge_hw_vpath_handle *vp,
+ u32 skip_alarms)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if (vp == NULL) {
+ status = VXGE_HW_ERR_INVALID_HANDLE;
+ goto exit;
+ }
+
+ status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
+exit:
+ return status;
+}
+
+/**
+ * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
+ * alrms
+ * @vp: Virtual Path handle.
+ * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
+ * interrupts(Can be repeated). If fifo or ring are not enabled
+ * the MSIX vector for that should be set to 0
+ * @alarm_msix_id: MSIX vector for alarm.
+ *
+ * This API will associate a given MSIX vector numbers with the four TIM
+ * interrupts and alarm interrupt.
+ */
+void
+vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
+ int alarm_msix_id)
+{
+ u64 val64;
+ struct __vxge_hw_virtualpath *vpath = vp->vpath;
+ struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
+ u32 vp_id = vp->vpath->vp_id;
+
+ val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
+ (vp_id * 4) + tim_msix_id[0]) |
+ VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
+ (vp_id * 4) + tim_msix_id[1]);
+
+ writeq(val64, &vp_reg->interrupt_cfg0);
+
+ writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
+ (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
+ &vp_reg->interrupt_cfg2);
+
+ if (vpath->hldev->config.intr_mode ==
+ VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
+ __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
+ VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
+ 0, 32), &vp_reg->one_shot_vect0_en);
+ __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
+ VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
+ 0, 32), &vp_reg->one_shot_vect1_en);
+ __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
+ VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
+ 0, 32), &vp_reg->one_shot_vect2_en);
+ }
+}
+
+/**
+ * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
+ * @vp: Virtual Path handle.
+ * @msix_id: MSIX ID
+ *
+ * The function masks the msix interrupt for the given msix_id
+ *
+ * Returns: 0,
+ * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
+ * status.
+ * See also:
+ */
+void
+vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
+{
+ struct __vxge_hw_device *hldev = vp->vpath->hldev;
+ __vxge_hw_pio_mem_write32_upper(
+ (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
+ &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
+}
+
+/**
+ * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
+ * @vp: Virtual Path handle.
+ * @msix_id: MSI ID
+ *
+ * The function clears the msix interrupt for the given msix_id
+ *
+ * Returns: 0,
+ * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
+ * status.
+ * See also:
+ */
+void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
+{
+ struct __vxge_hw_device *hldev = vp->vpath->hldev;
+
+ if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
+ __vxge_hw_pio_mem_write32_upper(
+ (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
+ &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
+ else
+ __vxge_hw_pio_mem_write32_upper(
+ (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
+ &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
+}
+
+/**
+ * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
+ * @vp: Virtual Path handle.
+ * @msix_id: MSI ID
+ *
+ * The function unmasks the msix interrupt for the given msix_id
+ *
+ * Returns: 0,
+ * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
+ * status.
+ * See also:
+ */
+void
+vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
+{
+ struct __vxge_hw_device *hldev = vp->vpath->hldev;
+ __vxge_hw_pio_mem_write32_upper(
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
+ &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
+}
+
+/**
+ * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
+ * @vp: Virtual Path handle.
+ *
+ * Mask Tx and Rx vpath interrupts.
+ *
+ * See also: vxge_hw_vpath_inta_mask_tx_rx()
+ */
+void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
+{
+ u64 tim_int_mask0[4] = {[0 ...3] = 0};
+ u32 tim_int_mask1[4] = {[0 ...3] = 0};
+ u64 val64;
+ struct __vxge_hw_device *hldev = vp->vpath->hldev;
+
+ VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
+ tim_int_mask1, vp->vpath->vp_id);
+
+ val64 = readq(&hldev->common_reg->tim_int_mask0);
+
+ if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
+ (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
+ writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
+ tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
+ &hldev->common_reg->tim_int_mask0);
+ }
+
+ val64 = readl(&hldev->common_reg->tim_int_mask1);
+
+ if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
+ (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
+ __vxge_hw_pio_mem_write32_upper(
+ (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
+ tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
+ &hldev->common_reg->tim_int_mask1);
+ }
+}
+
+/**
+ * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
+ * @vp: Virtual Path handle.
+ *
+ * Unmask Tx and Rx vpath interrupts.
+ *
+ * See also: vxge_hw_vpath_inta_mask_tx_rx()
+ */
+void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
+{
+ u64 tim_int_mask0[4] = {[0 ...3] = 0};
+ u32 tim_int_mask1[4] = {[0 ...3] = 0};
+ u64 val64;
+ struct __vxge_hw_device *hldev = vp->vpath->hldev;
+
+ VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
+ tim_int_mask1, vp->vpath->vp_id);
+
+ val64 = readq(&hldev->common_reg->tim_int_mask0);
+
+ if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
+ (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
+ writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
+ tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
+ &hldev->common_reg->tim_int_mask0);
+ }
+
+ if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
+ (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
+ __vxge_hw_pio_mem_write32_upper(
+ (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
+ tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
+ &hldev->common_reg->tim_int_mask1);
+ }
+}
+
+/**
+ * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
+ * descriptors and process the same.
+ * @ring: Handle to the ring object used for receive
+ *
+ * The function polls the Rx for the completed descriptors and calls
+ * the driver via supplied completion callback.
+ *
+ * Returns: VXGE_HW_OK, if the polling is completed successful.
+ * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
+ * descriptors available which are yet to be processed.
+ *
+ * See also: vxge_hw_vpath_poll_rx()
+ */
+enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
+{
+ u8 t_code;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ void *first_rxdh;
+ u64 val64 = 0;
+ int new_count = 0;
+
+ ring->cmpl_cnt = 0;
+
+ status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
+ if (status == VXGE_HW_OK)
+ ring->callback(ring, first_rxdh,
+ t_code, ring->channel.userdata);
+
+ if (ring->cmpl_cnt != 0) {
+ ring->doorbell_cnt += ring->cmpl_cnt;
+ if (ring->doorbell_cnt >= ring->rxds_limit) {
+ /*
+ * Each RxD is of 4 qwords, update the number of
+ * qwords replenished
+ */
+ new_count = (ring->doorbell_cnt * 4);
+
+ /* For each block add 4 more qwords */
+ ring->total_db_cnt += ring->doorbell_cnt;
+ if (ring->total_db_cnt >= ring->rxds_per_block) {
+ new_count += 4;
+ /* Reset total count */
+ ring->total_db_cnt %= ring->rxds_per_block;
+ }
+ writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
+ &ring->vp_reg->prc_rxd_doorbell);
+ val64 =
+ readl(&ring->common_reg->titan_general_int_status);
+ ring->doorbell_cnt = 0;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
+ * the same.
+ * @fifo: Handle to the fifo object used for non offload send
+ *
+ * The function polls the Tx for the completed descriptors and calls
+ * the driver via supplied completion callback.
+ *
+ * Returns: VXGE_HW_OK, if the polling is completed successful.
+ * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
+ * descriptors available which are yet to be processed.
+ */
+enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
+ struct sk_buff ***skb_ptr, int nr_skb,
+ int *more)
+{
+ enum vxge_hw_fifo_tcode t_code;
+ void *first_txdlh;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct __vxge_hw_channel *channel;
+
+ channel = &fifo->channel;
+
+ status = vxge_hw_fifo_txdl_next_completed(fifo,
+ &first_txdlh, &t_code);
+ if (status == VXGE_HW_OK)
+ if (fifo->callback(fifo, first_txdlh, t_code,
+ channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
+ status = VXGE_HW_COMPLETIONS_REMAIN;
+
+ return status;
+}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
new file mode 100644
index 000000000000..4a518a3b131c
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
@@ -0,0 +1,2298 @@
+/******************************************************************************
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ * Drivers based on or derived from this code fall under the GPL and must
+ * retain the authorship, copyright and license notice. This file is not
+ * a complete program and may only be used when the entire operating
+ * system is licensed under the GPL.
+ * See the file COPYING in this distribution for more information.
+ *
+ * vxge-traffic.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
+ * Virtualized Server Adapter.
+ * Copyright(c) 2002-2010 Exar Corp.
+ ******************************************************************************/
+#ifndef VXGE_TRAFFIC_H
+#define VXGE_TRAFFIC_H
+
+#include "vxge-reg.h"
+#include "vxge-version.h"
+
+#define VXGE_HW_DTR_MAX_T_CODE 16
+#define VXGE_HW_ALL_FOXES 0xFFFFFFFFFFFFFFFFULL
+#define VXGE_HW_INTR_MASK_ALL 0xFFFFFFFFFFFFFFFFULL
+#define VXGE_HW_MAX_VIRTUAL_PATHS 17
+
+#define VXGE_HW_MAC_MAX_MAC_PORT_ID 2
+
+#define VXGE_HW_DEFAULT_32 0xffffffff
+/* frames sizes */
+#define VXGE_HW_HEADER_802_2_SIZE 3
+#define VXGE_HW_HEADER_SNAP_SIZE 5
+#define VXGE_HW_HEADER_VLAN_SIZE 4
+#define VXGE_HW_MAC_HEADER_MAX_SIZE \
+ (ETH_HLEN + \
+ VXGE_HW_HEADER_802_2_SIZE + \
+ VXGE_HW_HEADER_VLAN_SIZE + \
+ VXGE_HW_HEADER_SNAP_SIZE)
+
+/* 32bit alignments */
+#define VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN 2
+#define VXGE_HW_HEADER_802_2_SNAP_ALIGN 2
+#define VXGE_HW_HEADER_802_2_ALIGN 3
+#define VXGE_HW_HEADER_SNAP_ALIGN 1
+
+#define VXGE_HW_L3_CKSUM_OK 0xFFFF
+#define VXGE_HW_L4_CKSUM_OK 0xFFFF
+
+/* Forward declarations */
+struct __vxge_hw_device;
+struct __vxge_hw_vpath_handle;
+struct vxge_hw_vp_config;
+struct __vxge_hw_virtualpath;
+struct __vxge_hw_channel;
+struct __vxge_hw_fifo;
+struct __vxge_hw_ring;
+struct vxge_hw_ring_attr;
+struct vxge_hw_mempool;
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+/*VXGE_HW_STATUS_H*/
+
+#define VXGE_HW_EVENT_BASE 0
+#define VXGE_LL_EVENT_BASE 100
+
+/**
+ * enum vxge_hw_event- Enumerates slow-path HW events.
+ * @VXGE_HW_EVENT_UNKNOWN: Unknown (and invalid) event.
+ * @VXGE_HW_EVENT_SERR: Serious vpath hardware error event.
+ * @VXGE_HW_EVENT_ECCERR: vpath ECC error event.
+ * @VXGE_HW_EVENT_VPATH_ERR: Error local to the respective vpath
+ * @VXGE_HW_EVENT_FIFO_ERR: FIFO Doorbell fifo error.
+ * @VXGE_HW_EVENT_SRPCIM_SERR: srpcim hardware error event.
+ * @VXGE_HW_EVENT_MRPCIM_SERR: mrpcim hardware error event.
+ * @VXGE_HW_EVENT_MRPCIM_ECCERR: mrpcim ecc error event.
+ * @VXGE_HW_EVENT_RESET_START: Privileged entity is starting device reset
+ * @VXGE_HW_EVENT_RESET_COMPLETE: Device reset has been completed
+ * @VXGE_HW_EVENT_SLOT_FREEZE: Slot-freeze event. Driver tries to distinguish
+ * slot-freeze from the rest critical events (e.g. ECC) when it is
+ * impossible to PIO read "through" the bus, i.e. when getting all-foxes.
+ *
+ * enum vxge_hw_event enumerates slow-path HW eventis.
+ *
+ * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
+ * vxge_uld_link_down_f{}.
+ */
+enum vxge_hw_event {
+ VXGE_HW_EVENT_UNKNOWN = 0,
+ /* HW events */
+ VXGE_HW_EVENT_RESET_START = VXGE_HW_EVENT_BASE + 1,
+ VXGE_HW_EVENT_RESET_COMPLETE = VXGE_HW_EVENT_BASE + 2,
+ VXGE_HW_EVENT_LINK_DOWN = VXGE_HW_EVENT_BASE + 3,
+ VXGE_HW_EVENT_LINK_UP = VXGE_HW_EVENT_BASE + 4,
+ VXGE_HW_EVENT_ALARM_CLEARED = VXGE_HW_EVENT_BASE + 5,
+ VXGE_HW_EVENT_ECCERR = VXGE_HW_EVENT_BASE + 6,
+ VXGE_HW_EVENT_MRPCIM_ECCERR = VXGE_HW_EVENT_BASE + 7,
+ VXGE_HW_EVENT_FIFO_ERR = VXGE_HW_EVENT_BASE + 8,
+ VXGE_HW_EVENT_VPATH_ERR = VXGE_HW_EVENT_BASE + 9,
+ VXGE_HW_EVENT_CRITICAL_ERR = VXGE_HW_EVENT_BASE + 10,
+ VXGE_HW_EVENT_SERR = VXGE_HW_EVENT_BASE + 11,
+ VXGE_HW_EVENT_SRPCIM_SERR = VXGE_HW_EVENT_BASE + 12,
+ VXGE_HW_EVENT_MRPCIM_SERR = VXGE_HW_EVENT_BASE + 13,
+ VXGE_HW_EVENT_SLOT_FREEZE = VXGE_HW_EVENT_BASE + 14,
+};
+
+#define VXGE_HW_SET_LEVEL(a, b) (((a) > (b)) ? (a) : (b))
+
+/*
+ * struct vxge_hw_mempool_dma - Represents DMA objects passed to the
+ caller.
+ */
+struct vxge_hw_mempool_dma {
+ dma_addr_t addr;
+ struct pci_dev *handle;
+ struct pci_dev *acc_handle;
+};
+
+/*
+ * vxge_hw_mempool_item_f - Mempool item alloc/free callback
+ * @mempoolh: Memory pool handle.
+ * @memblock: Address of memory block
+ * @memblock_index: Index of memory block
+ * @item: Item that gets allocated or freed.
+ * @index: Item's index in the memory pool.
+ * @is_last: True, if this item is the last one in the pool; false - otherwise.
+ * userdata: Per-pool user context.
+ *
+ * Memory pool allocation/deallocation callback.
+ */
+
+/*
+ * struct vxge_hw_mempool - Memory pool.
+ */
+struct vxge_hw_mempool {
+
+ void (*item_func_alloc)(
+ struct vxge_hw_mempool *mempoolh,
+ u32 memblock_index,
+ struct vxge_hw_mempool_dma *dma_object,
+ u32 index,
+ u32 is_last);
+
+ void *userdata;
+ void **memblocks_arr;
+ void **memblocks_priv_arr;
+ struct vxge_hw_mempool_dma *memblocks_dma_arr;
+ struct __vxge_hw_device *devh;
+ u32 memblock_size;
+ u32 memblocks_max;
+ u32 memblocks_allocated;
+ u32 item_size;
+ u32 items_max;
+ u32 items_initial;
+ u32 items_current;
+ u32 items_per_memblock;
+ void **items_arr;
+ u32 items_priv_size;
+};
+
+#define VXGE_HW_MAX_INTR_PER_VP 4
+#define VXGE_HW_VPATH_INTR_TX 0
+#define VXGE_HW_VPATH_INTR_RX 1
+#define VXGE_HW_VPATH_INTR_EINTA 2
+#define VXGE_HW_VPATH_INTR_BMAP 3
+
+#define VXGE_HW_BLOCK_SIZE 4096
+
+/**
+ * struct vxge_hw_tim_intr_config - Titan Tim interrupt configuration.
+ * @intr_enable: Set to 1, if interrupt is enabled.
+ * @btimer_val: Boundary Timer Initialization value in units of 272 ns.
+ * @timer_ac_en: Timer Automatic Cancel. 1 : Automatic Canceling Enable: when
+ * asserted, other interrupt-generating entities will cancel the
+ * scheduled timer interrupt.
+ * @timer_ci_en: Timer Continuous Interrupt. 1 : Continuous Interrupting Enable:
+ * When asserted, an interrupt will be generated every time the
+ * boundary timer expires, even if no traffic has been transmitted
+ * on this interrupt.
+ * @timer_ri_en: Timer Consecutive (Re-) Interrupt 1 : Consecutive
+ * (Re-) Interrupt Enable: When asserted, an interrupt will be
+ * generated the next time the timer expires, even if no traffic has
+ * been transmitted on this interrupt. (This will only happen once
+ * each time that this value is written to the TIM.) This bit is
+ * cleared by H/W at the end of the current-timer-interval when
+ * the interrupt is triggered.
+ * @rtimer_val: Restriction Timer Initialization value in units of 272 ns.
+ * @util_sel: Utilization Selector. Selects which of the workload approximations
+ * to use (e.g. legacy Tx utilization, Tx/Rx utilization, host
+ * specified utilization etc.), selects one of
+ * the 17 host configured values.
+ * 0-Virtual Path 0
+ * 1-Virtual Path 1
+ * ...
+ * 16-Virtual Path 17
+ * 17-Legacy Tx network utilization, provided by TPA
+ * 18-Legacy Rx network utilization, provided by FAU
+ * 19-Average of legacy Rx and Tx utilization calculated from link
+ * utilization values.
+ * 20-31-Invalid configurations
+ * 32-Host utilization for Virtual Path 0
+ * 33-Host utilization for Virtual Path 1
+ * ...
+ * 48-Host utilization for Virtual Path 17
+ * 49-Legacy Tx network utilization, provided by TPA
+ * 50-Legacy Rx network utilization, provided by FAU
+ * 51-Average of legacy Rx and Tx utilization calculated from
+ * link utilization values.
+ * 52-63-Invalid configurations
+ * @ltimer_val: Latency Timer Initialization Value in units of 272 ns.
+ * @txd_cnt_en: TxD Return Event Count Enable. This configuration bit when set
+ * to 1 enables counting of TxD0 returns (signalled by PCC's),
+ * towards utilization event count values.
+ * @urange_a: Defines the upper limit (in percent) for this utilization range
+ * to be active. This range is considered active
+ * if 0 = UTIL = URNG_A
+ * and the UEC_A field (below) is non-zero.
+ * @uec_a: Utilization Event Count A. If this range is active, the adapter will
+ * wait until UEC_A events have occurred on the interrupt before
+ * generating an interrupt.
+ * @urange_b: Link utilization range B.
+ * @uec_b: Utilization Event Count B.
+ * @urange_c: Link utilization range C.
+ * @uec_c: Utilization Event Count C.
+ * @urange_d: Link utilization range D.
+ * @uec_d: Utilization Event Count D.
+ * Traffic Interrupt Controller Module interrupt configuration.
+ */
+struct vxge_hw_tim_intr_config {
+
+ u32 intr_enable;
+#define VXGE_HW_TIM_INTR_ENABLE 1
+#define VXGE_HW_TIM_INTR_DISABLE 0
+#define VXGE_HW_TIM_INTR_DEFAULT 0
+
+ u32 btimer_val;
+#define VXGE_HW_MIN_TIM_BTIMER_VAL 0
+#define VXGE_HW_MAX_TIM_BTIMER_VAL 67108864
+#define VXGE_HW_USE_FLASH_DEFAULT (~0)
+
+ u32 timer_ac_en;
+#define VXGE_HW_TIM_TIMER_AC_ENABLE 1
+#define VXGE_HW_TIM_TIMER_AC_DISABLE 0
+
+ u32 timer_ci_en;
+#define VXGE_HW_TIM_TIMER_CI_ENABLE 1
+#define VXGE_HW_TIM_TIMER_CI_DISABLE 0
+
+ u32 timer_ri_en;
+#define VXGE_HW_TIM_TIMER_RI_ENABLE 1
+#define VXGE_HW_TIM_TIMER_RI_DISABLE 0
+
+ u32 rtimer_val;
+#define VXGE_HW_MIN_TIM_RTIMER_VAL 0
+#define VXGE_HW_MAX_TIM_RTIMER_VAL 67108864
+
+ u32 util_sel;
+#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL 17
+#define VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL 18
+#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_RX_AVE_NET_UTIL 19
+#define VXGE_HW_TIM_UTIL_SEL_PER_VPATH 63
+
+ u32 ltimer_val;
+#define VXGE_HW_MIN_TIM_LTIMER_VAL 0
+#define VXGE_HW_MAX_TIM_LTIMER_VAL 67108864
+
+ /* Line utilization interrupts */
+ u32 urange_a;
+#define VXGE_HW_MIN_TIM_URANGE_A 0
+#define VXGE_HW_MAX_TIM_URANGE_A 100
+
+ u32 uec_a;
+#define VXGE_HW_MIN_TIM_UEC_A 0
+#define VXGE_HW_MAX_TIM_UEC_A 65535
+
+ u32 urange_b;
+#define VXGE_HW_MIN_TIM_URANGE_B 0
+#define VXGE_HW_MAX_TIM_URANGE_B 100
+
+ u32 uec_b;
+#define VXGE_HW_MIN_TIM_UEC_B 0
+#define VXGE_HW_MAX_TIM_UEC_B 65535
+
+ u32 urange_c;
+#define VXGE_HW_MIN_TIM_URANGE_C 0
+#define VXGE_HW_MAX_TIM_URANGE_C 100
+
+ u32 uec_c;
+#define VXGE_HW_MIN_TIM_UEC_C 0
+#define VXGE_HW_MAX_TIM_UEC_C 65535
+
+ u32 uec_d;
+#define VXGE_HW_MIN_TIM_UEC_D 0
+#define VXGE_HW_MAX_TIM_UEC_D 65535
+};
+
+#define VXGE_HW_STATS_OP_READ 0
+#define VXGE_HW_STATS_OP_CLEAR_STAT 1
+#define VXGE_HW_STATS_OP_CLEAR_ALL_VPATH_STATS 2
+#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS_OF_LOC 2
+#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS 3
+
+#define VXGE_HW_STATS_LOC_AGGR 17
+#define VXGE_HW_STATS_AGGRn_OFFSET 0x00720
+
+#define VXGE_HW_STATS_VPATH_TX_OFFSET 0x0
+#define VXGE_HW_STATS_VPATH_RX_OFFSET 0x00090
+
+#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET (0x001d0 >> 3)
+#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(bits) \
+ vxge_bVALn(bits, 0, 32)
+
+#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(bits) \
+ vxge_bVALn(bits, 32, 32)
+
+#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET (0x001d8 >> 3)
+#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(bits) \
+ vxge_bVALn(bits, 0, 32)
+
+#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(bits) \
+ vxge_bVALn(bits, 32, 32)
+
+/**
+ * struct vxge_hw_xmac_aggr_stats - Per-Aggregator XMAC Statistics
+ *
+ * @tx_frms: Count of data frames transmitted on this Aggregator on all
+ * its Aggregation ports. Does not include LACPDUs or Marker PDUs.
+ * However, does include frames discarded by the Distribution
+ * function.
+ * @tx_data_octets: Count of data and padding octets of frames transmitted
+ * on this Aggregator on all its Aggregation ports. Does not include
+ * octets of LACPDUs or Marker PDUs. However, does include octets of
+ * frames discarded by the Distribution function.
+ * @tx_mcast_frms: Count of data frames transmitted (to a group destination
+ * address other than the broadcast address) on this Aggregator on
+ * all its Aggregation ports. Does not include LACPDUs or Marker
+ * PDUs. However, does include frames discarded by the Distribution
+ * function.
+ * @tx_bcast_frms: Count of broadcast data frames transmitted on this Aggregator
+ * on all its Aggregation ports. Does not include LACPDUs or Marker
+ * PDUs. However, does include frames discarded by the Distribution
+ * function.
+ * @tx_discarded_frms: Count of data frames to be transmitted on this Aggregator
+ * that are discarded by the Distribution function. This occurs when
+ * conversation are allocated to different ports and have to be
+ * flushed on old ports
+ * @tx_errored_frms: Count of data frames transmitted on this Aggregator that
+ * experience transmission errors on its Aggregation ports.
+ * @rx_frms: Count of data frames received on this Aggregator on all its
+ * Aggregation ports. Does not include LACPDUs or Marker PDUs.
+ * Also, does not include frames discarded by the Collection
+ * function.
+ * @rx_data_octets: Count of data and padding octets of frames received on this
+ * Aggregator on all its Aggregation ports. Does not include octets
+ * of LACPDUs or Marker PDUs. Also, does not include
+ * octets of frames
+ * discarded by the Collection function.
+ * @rx_mcast_frms: Count of data frames received (from a group destination
+ * address other than the broadcast address) on this Aggregator on
+ * all its Aggregation ports. Does not include LACPDUs or Marker
+ * PDUs. Also, does not include frames discarded by the Collection
+ * function.
+ * @rx_bcast_frms: Count of broadcast data frames received on this Aggregator on
+ * all its Aggregation ports. Does not include LACPDUs or Marker
+ * PDUs. Also, does not include frames discarded by the Collection
+ * function.
+ * @rx_discarded_frms: Count of data frames received on this Aggregator that are
+ * discarded by the Collection function because the Collection
+ * function was disabled on the port which the frames are received.
+ * @rx_errored_frms: Count of data frames received on this Aggregator that are
+ * discarded by its Aggregation ports, or are discarded by the
+ * Collection function of the Aggregator, or that are discarded by
+ * the Aggregator due to detection of an illegal Slow Protocols PDU.
+ * @rx_unknown_slow_proto_frms: Count of data frames received on this Aggregator
+ * that are discarded by its Aggregation ports due to detection of
+ * an unknown Slow Protocols PDU.
+ *
+ * Per aggregator XMAC RX statistics.
+ */
+struct vxge_hw_xmac_aggr_stats {
+/*0x000*/ u64 tx_frms;
+/*0x008*/ u64 tx_data_octets;
+/*0x010*/ u64 tx_mcast_frms;
+/*0x018*/ u64 tx_bcast_frms;
+/*0x020*/ u64 tx_discarded_frms;
+/*0x028*/ u64 tx_errored_frms;
+/*0x030*/ u64 rx_frms;
+/*0x038*/ u64 rx_data_octets;
+/*0x040*/ u64 rx_mcast_frms;
+/*0x048*/ u64 rx_bcast_frms;
+/*0x050*/ u64 rx_discarded_frms;
+/*0x058*/ u64 rx_errored_frms;
+/*0x060*/ u64 rx_unknown_slow_proto_frms;
+} __packed;
+
+/**
+ * struct vxge_hw_xmac_port_stats - XMAC Port Statistics
+ *
+ * @tx_ttl_frms: Count of successfully transmitted MAC frames
+ * @tx_ttl_octets: Count of total octets of transmitted frames, not including
+ * framing characters (i.e. less framing bits). To determine the
+ * total octets of transmitted frames, including framing characters,
+ * multiply PORTn_TX_TTL_FRMS by 8 and add it to this stat (unless
+ * otherwise configured, this stat only counts frames that have
+ * 8 bytes of preamble for each frame). This stat can be configured
+ * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything
+ * including the preamble octets.
+ * @tx_data_octets: Count of data and padding octets of successfully transmitted
+ * frames.
+ * @tx_mcast_frms: Count of successfully transmitted frames to a group address
+ * other than the broadcast address.
+ * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast
+ * group address.
+ * @tx_ucast_frms: Count of transmitted frames containing a unicast address.
+ * Includes discarded frames that are not sent to the network.
+ * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag.
+ * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network.
+ * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that
+ * are passed to the network.
+ * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent
+ * due to problems within ICMP.
+ * @tx_tcp: Count of transmitted TCP segments. Does not include segments
+ * containing retransmitted octets.
+ * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag.
+ * @tx_udp: Count of transmitted UDP datagrams.
+ * @tx_parse_error: Increments when the TPA is unable to parse a packet. This
+ * generally occurs when a packet is corrupt somehow, including
+ * packets that have IP version mismatches, invalid Layer 2 control
+ * fields, etc. L3/L4 checksums are not offloaded, but the packet
+ * is still be transmitted.
+ * @tx_unknown_protocol: Increments when the TPA encounters an unknown
+ * protocol, such as a new IPv6 extension header, or an unsupported
+ * Routing Type. The packet still has a checksum calculated but it
+ * may be incorrect.
+ * @tx_pause_ctrl_frms: Count of MAC PAUSE control frames that are transmitted.
+ * Since, the only control frames supported by this device are
+ * PAUSE frames, this register is a count of all transmitted MAC
+ * control frames.
+ * @tx_marker_pdu_frms: Count of Marker PDUs transmitted
+ * on this Aggregation port.
+ * @tx_lacpdu_frms: Count of LACPDUs transmitted on this Aggregation port.
+ * @tx_drop_ip: Count of transmitted IP datagrams that could not be passed to
+ * the network. Increments because of:
+ * 1) An internal processing error
+ * (such as an uncorrectable ECC error). 2) A frame parsing error
+ * during IP checksum calculation.
+ * @tx_marker_resp_pdu_frms: Count of Marker Response PDUs transmitted on this
+ * Aggregation port.
+ * @tx_xgmii_char2_match: Maintains a count of the number of transmitted XGMII
+ * characters that match a pattern that is programmable through
+ * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern
+ * is set to /T/ (i.e. the terminate character), thus the statistic
+ * tracks the number of transmitted Terminate characters.
+ * @tx_xgmii_char1_match: Maintains a count of the number of transmitted XGMII
+ * characters that match a pattern that is programmable through
+ * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern
+ * is set to /S/ (i.e. the start character),
+ * thus the statistic tracks
+ * the number of transmitted Start characters.
+ * @tx_xgmii_column2_match: Maintains a count of the number of transmitted XGMII
+ * columns that match a pattern that is programmable through register
+ * XMAC_STATS_TX_XGMII_COLUMN2_PORTn. By default, the pattern is set
+ * to 4 x /E/ (i.e. a column containing all error characters), thus
+ * the statistic tracks the number of Error columns transmitted at
+ * any time. If XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is
+ * set to 1, then this stat increments when COLUMN2 is found within
+ * 'n' clocks after COLUMN1. Here, 'n' is defined by
+ * XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set
+ * to 0, then it means to search anywhere for COLUMN2).
+ * @tx_xgmii_column1_match: Maintains a count of the number of transmitted XGMII
+ * columns that match a pattern that is programmable through register
+ * XMAC_STATS_TX_XGMII_COLUMN1_PORTn. By default, the pattern is set
+ * to 4 x /I/ (i.e. a column containing all idle characters),
+ * thus the statistic tracks the number of transmitted Idle columns.
+ * @tx_any_err_frms: Count of transmitted frames containing any error that
+ * prevents them from being passed to the network. Increments if
+ * there is an ECC while reading the frame out of the transmit
+ * buffer. Also increments if the transmit protocol assist (TPA)
+ * block determines that the frame should not be sent.
+ * @tx_drop_frms: Count of frames that could not be sent for no other reason
+ * than internal MAC processing. Increments once whenever the
+ * transmit buffer is flushed (due to an ECC error on a memory
+ * descriptor).
+ * @rx_ttl_frms: Count of total received MAC frames, including frames received
+ * with frame-too-long, FCS, or length errors. This stat can be
+ * configured (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count
+ * everything, even "frames" as small one byte of preamble.
+ * @rx_vld_frms: Count of successfully received MAC frames. Does not include
+ * frames received with frame-too-long, FCS, or length errors.
+ * @rx_offload_frms: Count of offloaded received frames that are passed to
+ * the host.
+ * @rx_ttl_octets: Count of total octets of received frames, not including
+ * framing characters (i.e. less framing bits). To determine the
+ * total octets of received frames, including framing characters,
+ * multiply PORTn_RX_TTL_FRMS by 8 and add it to this stat (unless
+ * otherwise configured, this stat only counts frames that have 8
+ * bytes of preamble for each frame). This stat can be configured
+ * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything,
+ * even the preamble octets of "frames" as small one byte of preamble
+ * @rx_data_octets: Count of data and padding octets of successfully received
+ * frames. Does not include frames received with frame-too-long,
+ * FCS, or length errors.
+ * @rx_offload_octets: Count of total octets, not including framing
+ * characters, of offloaded received frames that are passed
+ * to the host.
+ * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a
+ * nonbroadcast group address. Does not include frames received
+ * with frame-too-long, FCS, or length errors.
+ * @rx_vld_bcast_frms: Count of successfully received MAC frames containing
+ * the broadcast group address. Does not include frames received
+ * with frame-too-long, FCS, or length errors.
+ * @rx_accepted_ucast_frms: Count of successfully received frames containing
+ * a unicast address. Only includes frames that are passed to
+ * the system.
+ * @rx_accepted_nucast_frms: Count of successfully received frames containing
+ * a non-unicast (broadcast or multicast) address. Only includes
+ * frames that are passed to the system. Could include, for instance,
+ * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG
+ * register is set to pass FCS-errored frames to the host.
+ * @rx_tagged_frms: Count of received frames containing a VLAN tag.
+ * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN
+ * + 18 bytes (+ 22 bytes if VLAN-tagged).
+ * @rx_usized_frms: Count of received frames of length (including FCS, but not
+ * framing bits) less than 64 octets, that are otherwise well-formed.
+ * In other words, counts runts.
+ * @rx_osized_frms: Count of received frames of length (including FCS, but not
+ * framing bits) more than 1518 octets, that are otherwise
+ * well-formed. Note: If register XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING
+ * is set to 1, then "more than 1518 octets" becomes "more than 1518
+ * (1522 if VLAN-tagged) octets".
+ * @rx_frag_frms: Count of received frames of length (including FCS, but not
+ * framing bits) less than 64 octets that had bad FCS. In other
+ * words, counts fragments.
+ * @rx_jabber_frms: Count of received frames of length (including FCS, but not
+ * framing bits) more than 1518 octets that had bad FCS. In other
+ * words, counts jabbers. Note: If register
+ * XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING is set to 1, then "more than
+ * 1518 octets" becomes "more than 1518 (1522 if VLAN-tagged)
+ * octets".
+ * @rx_ttl_64_frms: Count of total received MAC frames with length (including
+ * FCS, but not framing bits) of exactly 64 octets. Includes frames
+ * received with frame-too-long, FCS, or length errors.
+ * @rx_ttl_65_127_frms: Count of total received MAC frames with length
+ * (including FCS, but not framing bits) of between 65 and 127
+ * octets inclusive. Includes frames received with frame-too-long,
+ * FCS, or length errors.
+ * @rx_ttl_128_255_frms: Count of total received MAC frames with length
+ * (including FCS, but not framing bits) of between 128 and 255
+ * octets inclusive. Includes frames received with frame-too-long,
+ * FCS, or length errors.
+ * @rx_ttl_256_511_frms: Count of total received MAC frames with length
+ * (including FCS, but not framing bits) of between 256 and 511
+ * octets inclusive. Includes frames received with frame-too-long,
+ * FCS, or length errors.
+ * @rx_ttl_512_1023_frms: Count of total received MAC frames with length
+ * (including FCS, but not framing bits) of between 512 and 1023
+ * octets inclusive. Includes frames received with frame-too-long,
+ * FCS, or length errors.
+ * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length
+ * (including FCS, but not framing bits) of between 1024 and 1518
+ * octets inclusive. Includes frames received with frame-too-long,
+ * FCS, or length errors.
+ * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length
+ * (including FCS, but not framing bits) of between 1519 and 4095
+ * octets inclusive. Includes frames received with frame-too-long,
+ * FCS, or length errors.
+ * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length
+ * (including FCS, but not framing bits) of between 4096 and 8191
+ * octets inclusive. Includes frames received with frame-too-long,
+ * FCS, or length errors.
+ * @rx_ttl_8192_max_frms: Count of total received MAC frames with length
+ * (including FCS, but not framing bits) of between 8192 and
+ * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received
+ * with frame-too-long, FCS, or length errors.
+ * @rx_ttl_gt_max_frms: Count of total received MAC frames with length
+ * (including FCS, but not framing bits) exceeding
+ * RX_MAX_PYLD_LEN+18 (+22 bytes if VLAN-tagged) octets inclusive.
+ * Includes frames received with frame-too-long,
+ * FCS, or length errors.
+ * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams.
+ * @rx_accepted_ip: Count of received IP datagrams that
+ * are passed to the system.
+ * @rx_ip_octets: Count of number of octets in received IP datagrams. Includes
+ * errored IP datagrams.
+ * @rx_err_ip: Count of received IP datagrams containing errors. For example,
+ * bad IP checksum.
+ * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages.
+ * @rx_tcp: Count of received TCP segments. Includes errored TCP segments.
+ * Note: This stat contains a count of all received TCP segments,
+ * regardless of whether or not they pertain to an established
+ * connection.
+ * @rx_udp: Count of received UDP datagrams.
+ * @rx_err_tcp: Count of received TCP segments containing errors. For example,
+ * bad TCP checksum.
+ * @rx_pause_count: Count of number of pause quanta that the MAC has been in
+ * the paused state. Recall, one pause quantum equates to 512
+ * bit times.
+ * @rx_pause_ctrl_frms: Count of received MAC PAUSE control frames.
+ * @rx_unsup_ctrl_frms: Count of received MAC control frames that do not
+ * contain the PAUSE opcode. The sum of RX_PAUSE_CTRL_FRMS and
+ * this register is a count of all received MAC control frames.
+ * Note: This stat may be configured to count all layer 2 errors
+ * (i.e. length errors and FCS errors).
+ * @rx_fcs_err_frms: Count of received MAC frames that do not pass FCS. Does
+ * not include frames received with frame-too-long or
+ * frame-too-short error.
+ * @rx_in_rng_len_err_frms: Count of received frames with a length/type field
+ * value between 46 (42 for VLAN-tagged frames) and 1500 (also 1500
+ * for VLAN-tagged frames), inclusive, that does not match the
+ * number of data octets (including pad) received. Also contains
+ * a count of received frames with a length/type field less than
+ * 46 (42 for VLAN-tagged frames) and the number of data octets
+ * (including pad) received is greater than 46 (42 for VLAN-tagged
+ * frames).
+ * @rx_out_rng_len_err_frms: Count of received frames with length/type field
+ * between 1501 and 1535 decimal, inclusive.
+ * @rx_drop_frms: Count of received frames that could not be passed to the host.
+ * See PORTn_RX_L2_MGMT_DISCARD, PORTn_RX_RPA_DISCARD,
+ * PORTn_RX_TRASH_DISCARD, PORTn_RX_RTS_DISCARD, PORTn_RX_RED_DISCARD
+ * for a list of reasons. Because the RMAC drops one frame at a time,
+ * this stat also indicates the number of drop events.
+ * @rx_discarded_frms: Count of received frames containing
+ * any error that prevents
+ * them from being passed to the system. See PORTn_RX_FCS_DISCARD,
+ * PORTn_RX_LEN_DISCARD, and PORTn_RX_SWITCH_DISCARD for a list of
+ * reasons.
+ * @rx_drop_ip: Count of received IP datagrams that could not be passed to the
+ * host. See PORTn_RX_DROP_FRMS for a list of reasons.
+ * @rx_drop_udp: Count of received UDP datagrams that are not delivered to the
+ * host. See PORTn_RX_DROP_FRMS for a list of reasons.
+ * @rx_marker_pdu_frms: Count of valid Marker PDUs received on this Aggregation
+ * port.
+ * @rx_lacpdu_frms: Count of valid LACPDUs received on this Aggregation port.
+ * @rx_unknown_pdu_frms: Count of received frames (on this Aggregation port)
+ * that carry the Slow Protocols EtherType, but contain an unknown
+ * PDU. Or frames that contain the Slow Protocols group MAC address,
+ * but do not carry the Slow Protocols EtherType.
+ * @rx_marker_resp_pdu_frms: Count of valid Marker Response PDUs received on
+ * this Aggregation port.
+ * @rx_fcs_discard: Count of received frames that are discarded because the
+ * FCS check failed.
+ * @rx_illegal_pdu_frms: Count of received frames (on this Aggregation port)
+ * that carry the Slow Protocols EtherType, but contain a badly
+ * formed PDU. Or frames that carry the Slow Protocols EtherType,
+ * but contain an illegal value of Protocol Subtype.
+ * @rx_switch_discard: Count of received frames that are discarded by the
+ * internal switch because they did not have an entry in the
+ * Filtering Database. This includes frames that had an invalid
+ * destination MAC address or VLAN ID. It also includes frames are
+ * discarded because they did not satisfy the length requirements
+ * of the target VPATH.
+ * @rx_len_discard: Count of received frames that are discarded because of an
+ * invalid frame length (includes fragments, oversized frames and
+ * mismatch between frame length and length/type field). This stat
+ * can be configured
+ * (see XMAC_STATS_GLOBAL_CFG.LEN_DISCARD_HANDLING).
+ * @rx_rpa_discard: Count of received frames that were discarded because the
+ * receive protocol assist (RPA) discovered and error in the frame
+ * or was unable to parse the frame.
+ * @rx_l2_mgmt_discard: Count of Layer 2 management frames (eg. pause frames,
+ * Link Aggregation Control Protocol (LACP) frames, etc.) that are
+ * discarded.
+ * @rx_rts_discard: Count of received frames that are discarded by the receive
+ * traffic steering (RTS) logic. Includes those frame discarded
+ * because the SSC response contradicted the switch table, because
+ * the SSC timed out, or because the target queue could not fit the
+ * frame.
+ * @rx_trash_discard: Count of received frames that are discarded because
+ * receive traffic steering (RTS) steered the frame to the trash
+ * queue.
+ * @rx_buff_full_discard: Count of received frames that are discarded because
+ * internal buffers are full. Includes frames discarded because the
+ * RTS logic is waiting for an SSC lookup that has no timeout bound.
+ * Also, includes frames that are dropped because the MAC2FAU buffer
+ * is nearly full -- this can happen if the external receive buffer
+ * is full and the receive path is backing up.
+ * @rx_red_discard: Count of received frames that are discarded because of RED
+ * (Random Early Discard).
+ * @rx_xgmii_ctrl_err_cnt: Maintains a count of unexpected or misplaced control
+ * characters occurring between times of normal data transmission
+ * (i.e. not included in RX_XGMII_DATA_ERR_CNT). This counter is
+ * incremented when either -
+ * 1) The Reconciliation Sublayer (RS) is expecting one control
+ * character and gets another (i.e. is expecting a Start
+ * character, but gets another control character).
+ * 2) Start control character is not in lane 0
+ * Only increments the count by one for each XGMII column.
+ * @rx_xgmii_data_err_cnt: Maintains a count of unexpected control characters
+ * during normal data transmission. If the Reconciliation Sublayer
+ * (RS) receives a control character, other than a terminate control
+ * character, during receipt of data octets then this register is
+ * incremented. Also increments if the start frame delimiter is not
+ * found in the correct location. Only increments the count by one
+ * for each XGMII column.
+ * @rx_xgmii_char1_match: Maintains a count of the number of XGMII characters
+ * that match a pattern that is programmable through register
+ * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set
+ * to /E/ (i.e. the error character), thus the statistic tracks the
+ * number of Error characters received at any time.
+ * @rx_xgmii_err_sym: Count of the number of symbol errors in the received
+ * XGMII data (i.e. PHY indicates "Receive Error" on the XGMII).
+ * Only includes symbol errors that are observed between the XGMII
+ * Start Frame Delimiter and End Frame Delimiter, inclusive. And
+ * only increments the count by one for each frame.
+ * @rx_xgmii_column1_match: Maintains a count of the number of XGMII columns
+ * that match a pattern that is programmable through register
+ * XMAC_STATS_RX_XGMII_COLUMN1_PORTn. By default, the pattern is set
+ * to 4 x /E/ (i.e. a column containing all error characters), thus
+ * the statistic tracks the number of Error columns received at any
+ * time.
+ * @rx_xgmii_char2_match: Maintains a count of the number of XGMII characters
+ * that match a pattern that is programmable through register
+ * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set
+ * to /E/ (i.e. the error character), thus the statistic tracks the
+ * number of Error characters received at any time.
+ * @rx_local_fault: Maintains a count of the number of times that link
+ * transitioned from "up" to "down" due to a local fault.
+ * @rx_xgmii_column2_match: Maintains a count of the number of XGMII columns
+ * that match a pattern that is programmable through register
+ * XMAC_STATS_RX_XGMII_COLUMN2_PORTn. By default, the pattern is set
+ * to 4 x /E/ (i.e. a column containing all error characters), thus
+ * the statistic tracks the number of Error columns received at any
+ * time. If XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is set
+ * to 1, then this stat increments when COLUMN2 is found within 'n'
+ * clocks after COLUMN1. Here, 'n' is defined by
+ * XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set to
+ * 0, then it means to search anywhere for COLUMN2).
+ * @rx_jettison: Count of received frames that are jettisoned because internal
+ * buffers are full.
+ * @rx_remote_fault: Maintains a count of the number of times that link
+ * transitioned from "up" to "down" due to a remote fault.
+ *
+ * XMAC Port Statistics.
+ */
+struct vxge_hw_xmac_port_stats {
+/*0x000*/ u64 tx_ttl_frms;
+/*0x008*/ u64 tx_ttl_octets;
+/*0x010*/ u64 tx_data_octets;
+/*0x018*/ u64 tx_mcast_frms;
+/*0x020*/ u64 tx_bcast_frms;
+/*0x028*/ u64 tx_ucast_frms;
+/*0x030*/ u64 tx_tagged_frms;
+/*0x038*/ u64 tx_vld_ip;
+/*0x040*/ u64 tx_vld_ip_octets;
+/*0x048*/ u64 tx_icmp;
+/*0x050*/ u64 tx_tcp;
+/*0x058*/ u64 tx_rst_tcp;
+/*0x060*/ u64 tx_udp;
+/*0x068*/ u32 tx_parse_error;
+/*0x06c*/ u32 tx_unknown_protocol;
+/*0x070*/ u64 tx_pause_ctrl_frms;
+/*0x078*/ u32 tx_marker_pdu_frms;
+/*0x07c*/ u32 tx_lacpdu_frms;
+/*0x080*/ u32 tx_drop_ip;
+/*0x084*/ u32 tx_marker_resp_pdu_frms;
+/*0x088*/ u32 tx_xgmii_char2_match;
+/*0x08c*/ u32 tx_xgmii_char1_match;
+/*0x090*/ u32 tx_xgmii_column2_match;
+/*0x094*/ u32 tx_xgmii_column1_match;
+/*0x098*/ u32 unused1;
+/*0x09c*/ u16 tx_any_err_frms;
+/*0x09e*/ u16 tx_drop_frms;
+/*0x0a0*/ u64 rx_ttl_frms;
+/*0x0a8*/ u64 rx_vld_frms;
+/*0x0b0*/ u64 rx_offload_frms;
+/*0x0b8*/ u64 rx_ttl_octets;
+/*0x0c0*/ u64 rx_data_octets;
+/*0x0c8*/ u64 rx_offload_octets;
+/*0x0d0*/ u64 rx_vld_mcast_frms;
+/*0x0d8*/ u64 rx_vld_bcast_frms;
+/*0x0e0*/ u64 rx_accepted_ucast_frms;
+/*0x0e8*/ u64 rx_accepted_nucast_frms;
+/*0x0f0*/ u64 rx_tagged_frms;
+/*0x0f8*/ u64 rx_long_frms;
+/*0x100*/ u64 rx_usized_frms;
+/*0x108*/ u64 rx_osized_frms;
+/*0x110*/ u64 rx_frag_frms;
+/*0x118*/ u64 rx_jabber_frms;
+/*0x120*/ u64 rx_ttl_64_frms;
+/*0x128*/ u64 rx_ttl_65_127_frms;
+/*0x130*/ u64 rx_ttl_128_255_frms;
+/*0x138*/ u64 rx_ttl_256_511_frms;
+/*0x140*/ u64 rx_ttl_512_1023_frms;
+/*0x148*/ u64 rx_ttl_1024_1518_frms;
+/*0x150*/ u64 rx_ttl_1519_4095_frms;
+/*0x158*/ u64 rx_ttl_4096_8191_frms;
+/*0x160*/ u64 rx_ttl_8192_max_frms;
+/*0x168*/ u64 rx_ttl_gt_max_frms;
+/*0x170*/ u64 rx_ip;
+/*0x178*/ u64 rx_accepted_ip;
+/*0x180*/ u64 rx_ip_octets;
+/*0x188*/ u64 rx_err_ip;
+/*0x190*/ u64 rx_icmp;
+/*0x198*/ u64 rx_tcp;
+/*0x1a0*/ u64 rx_udp;
+/*0x1a8*/ u64 rx_err_tcp;
+/*0x1b0*/ u64 rx_pause_count;
+/*0x1b8*/ u64 rx_pause_ctrl_frms;
+/*0x1c0*/ u64 rx_unsup_ctrl_frms;
+/*0x1c8*/ u64 rx_fcs_err_frms;
+/*0x1d0*/ u64 rx_in_rng_len_err_frms;
+/*0x1d8*/ u64 rx_out_rng_len_err_frms;
+/*0x1e0*/ u64 rx_drop_frms;
+/*0x1e8*/ u64 rx_discarded_frms;
+/*0x1f0*/ u64 rx_drop_ip;
+/*0x1f8*/ u64 rx_drop_udp;
+/*0x200*/ u32 rx_marker_pdu_frms;
+/*0x204*/ u32 rx_lacpdu_frms;
+/*0x208*/ u32 rx_unknown_pdu_frms;
+/*0x20c*/ u32 rx_marker_resp_pdu_frms;
+/*0x210*/ u32 rx_fcs_discard;
+/*0x214*/ u32 rx_illegal_pdu_frms;
+/*0x218*/ u32 rx_switch_discard;
+/*0x21c*/ u32 rx_len_discard;
+/*0x220*/ u32 rx_rpa_discard;
+/*0x224*/ u32 rx_l2_mgmt_discard;
+/*0x228*/ u32 rx_rts_discard;
+/*0x22c*/ u32 rx_trash_discard;
+/*0x230*/ u32 rx_buff_full_discard;
+/*0x234*/ u32 rx_red_discard;
+/*0x238*/ u32 rx_xgmii_ctrl_err_cnt;
+/*0x23c*/ u32 rx_xgmii_data_err_cnt;
+/*0x240*/ u32 rx_xgmii_char1_match;
+/*0x244*/ u32 rx_xgmii_err_sym;
+/*0x248*/ u32 rx_xgmii_column1_match;
+/*0x24c*/ u32 rx_xgmii_char2_match;
+/*0x250*/ u32 rx_local_fault;
+/*0x254*/ u32 rx_xgmii_column2_match;
+/*0x258*/ u32 rx_jettison;
+/*0x25c*/ u32 rx_remote_fault;
+} __packed;
+
+/**
+ * struct vxge_hw_xmac_vpath_tx_stats - XMAC Vpath Tx Statistics
+ *
+ * @tx_ttl_eth_frms: Count of successfully transmitted MAC frames.
+ * @tx_ttl_eth_octets: Count of total octets of transmitted frames,
+ * not including framing characters (i.e. less framing bits).
+ * To determine the total octets of transmitted frames, including
+ * framing characters, multiply TX_TTL_ETH_FRMS by 8 and add it to
+ * this stat (the device always prepends 8 bytes of preamble for
+ * each frame)
+ * @tx_data_octets: Count of data and padding octets of successfully transmitted
+ * frames.
+ * @tx_mcast_frms: Count of successfully transmitted frames to a group address
+ * other than the broadcast address.
+ * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast
+ * group address.
+ * @tx_ucast_frms: Count of transmitted frames containing a unicast address.
+ * Includes discarded frames that are not sent to the network.
+ * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag.
+ * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network.
+ * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that
+ * are passed to the network.
+ * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent due
+ * to problems within ICMP.
+ * @tx_tcp: Count of transmitted TCP segments. Does not include segments
+ * containing retransmitted octets.
+ * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag.
+ * @tx_udp: Count of transmitted UDP datagrams.
+ * @tx_unknown_protocol: Increments when the TPA encounters an unknown protocol,
+ * such as a new IPv6 extension header, or an unsupported Routing
+ * Type. The packet still has a checksum calculated but it may be
+ * incorrect.
+ * @tx_lost_ip: Count of transmitted IP datagrams that could not be passed
+ * to the network. Increments because of: 1) An internal processing
+ * error (such as an uncorrectable ECC error). 2) A frame parsing
+ * error during IP checksum calculation.
+ * @tx_parse_error: Increments when the TPA is unable to parse a packet. This
+ * generally occurs when a packet is corrupt somehow, including
+ * packets that have IP version mismatches, invalid Layer 2 control
+ * fields, etc. L3/L4 checksums are not offloaded, but the packet
+ * is still be transmitted.
+ * @tx_tcp_offload: For frames belonging to offloaded sessions only, a count
+ * of transmitted TCP segments. Does not include segments containing
+ * retransmitted octets.
+ * @tx_retx_tcp_offload: For frames belonging to offloaded sessions only, the
+ * total number of segments retransmitted. Retransmitted segments
+ * that are sourced by the host are counted by the host.
+ * @tx_lost_ip_offload: For frames belonging to offloaded sessions only, a count
+ * of transmitted IP datagrams that could not be passed to the
+ * network.
+ *
+ * XMAC Vpath TX Statistics.
+ */
+struct vxge_hw_xmac_vpath_tx_stats {
+ u64 tx_ttl_eth_frms;
+ u64 tx_ttl_eth_octets;
+ u64 tx_data_octets;
+ u64 tx_mcast_frms;
+ u64 tx_bcast_frms;
+ u64 tx_ucast_frms;
+ u64 tx_tagged_frms;
+ u64 tx_vld_ip;
+ u64 tx_vld_ip_octets;
+ u64 tx_icmp;
+ u64 tx_tcp;
+ u64 tx_rst_tcp;
+ u64 tx_udp;
+ u32 tx_unknown_protocol;
+ u32 tx_lost_ip;
+ u32 unused1;
+ u32 tx_parse_error;
+ u64 tx_tcp_offload;
+ u64 tx_retx_tcp_offload;
+ u64 tx_lost_ip_offload;
+} __packed;
+
+/**
+ * struct vxge_hw_xmac_vpath_rx_stats - XMAC Vpath RX Statistics
+ *
+ * @rx_ttl_eth_frms: Count of successfully received MAC frames.
+ * @rx_vld_frms: Count of successfully received MAC frames. Does not include
+ * frames received with frame-too-long, FCS, or length errors.
+ * @rx_offload_frms: Count of offloaded received frames that are passed to
+ * the host.
+ * @rx_ttl_eth_octets: Count of total octets of received frames, not including
+ * framing characters (i.e. less framing bits). Only counts octets
+ * of frames that are at least 14 bytes (18 bytes for VLAN-tagged)
+ * before FCS. To determine the total octets of received frames,
+ * including framing characters, multiply RX_TTL_ETH_FRMS by 8 and
+ * add it to this stat (the stat RX_TTL_ETH_FRMS only counts frames
+ * that have the required 8 bytes of preamble).
+ * @rx_data_octets: Count of data and padding octets of successfully received
+ * frames. Does not include frames received with frame-too-long,
+ * FCS, or length errors.
+ * @rx_offload_octets: Count of total octets, not including framing characters,
+ * of offloaded received frames that are passed to the host.
+ * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a
+ * nonbroadcast group address. Does not include frames received with
+ * frame-too-long, FCS, or length errors.
+ * @rx_vld_bcast_frms: Count of successfully received MAC frames containing the
+ * broadcast group address. Does not include frames received with
+ * frame-too-long, FCS, or length errors.
+ * @rx_accepted_ucast_frms: Count of successfully received frames containing
+ * a unicast address. Only includes frames that are passed to the
+ * system.
+ * @rx_accepted_nucast_frms: Count of successfully received frames containing
+ * a non-unicast (broadcast or multicast) address. Only includes
+ * frames that are passed to the system. Could include, for instance,
+ * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG
+ * register is set to pass FCS-errored frames to the host.
+ * @rx_tagged_frms: Count of received frames containing a VLAN tag.
+ * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN
+ * + 18 bytes (+ 22 bytes if VLAN-tagged).
+ * @rx_usized_frms: Count of received frames of length (including FCS, but not
+ * framing bits) less than 64 octets, that are otherwise well-formed.
+ * In other words, counts runts.
+ * @rx_osized_frms: Count of received frames of length (including FCS, but not
+ * framing bits) more than 1518 octets, that are otherwise
+ * well-formed.
+ * @rx_frag_frms: Count of received frames of length (including FCS, but not
+ * framing bits) less than 64 octets that had bad FCS.
+ * In other words, counts fragments.
+ * @rx_jabber_frms: Count of received frames of length (including FCS, but not
+ * framing bits) more than 1518 octets that had bad FCS. In other
+ * words, counts jabbers.
+ * @rx_ttl_64_frms: Count of total received MAC frames with length (including
+ * FCS, but not framing bits) of exactly 64 octets. Includes frames
+ * received with frame-too-long, FCS, or length errors.
+ * @rx_ttl_65_127_frms: Count of total received MAC frames
+ * with length (including
+ * FCS, but not framing bits) of between 65 and 127 octets inclusive.
+ * Includes frames received with frame-too-long, FCS,
+ * or length errors.
+ * @rx_ttl_128_255_frms: Count of total received MAC frames with length
+ * (including FCS, but not framing bits)
+ * of between 128 and 255 octets
+ * inclusive. Includes frames received with frame-too-long, FCS,
+ * or length errors.
+ * @rx_ttl_256_511_frms: Count of total received MAC frames with length
+ * (including FCS, but not framing bits)
+ * of between 256 and 511 octets
+ * inclusive. Includes frames received with frame-too-long, FCS, or
+ * length errors.
+ * @rx_ttl_512_1023_frms: Count of total received MAC frames with length
+ * (including FCS, but not framing bits) of between 512 and 1023
+ * octets inclusive. Includes frames received with frame-too-long,
+ * FCS, or length errors.
+ * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length
+ * (including FCS, but not framing bits) of between 1024 and 1518
+ * octets inclusive. Includes frames received with frame-too-long,
+ * FCS, or length errors.
+ * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length
+ * (including FCS, but not framing bits) of between 1519 and 4095
+ * octets inclusive. Includes frames received with frame-too-long,
+ * FCS, or length errors.
+ * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length
+ * (including FCS, but not framing bits) of between 4096 and 8191
+ * octets inclusive. Includes frames received with frame-too-long,
+ * FCS, or length errors.
+ * @rx_ttl_8192_max_frms: Count of total received MAC frames with length
+ * (including FCS, but not framing bits) of between 8192 and
+ * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received
+ * with frame-too-long, FCS, or length errors.
+ * @rx_ttl_gt_max_frms: Count of total received MAC frames with length
+ * (including FCS, but not framing bits) exceeding RX_MAX_PYLD_LEN+18
+ * (+22 bytes if VLAN-tagged) octets inclusive. Includes frames
+ * received with frame-too-long, FCS, or length errors.
+ * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams.
+ * @rx_accepted_ip: Count of received IP datagrams that
+ * are passed to the system.
+ * @rx_ip_octets: Count of number of octets in received IP datagrams.
+ * Includes errored IP datagrams.
+ * @rx_err_ip: Count of received IP datagrams containing errors. For example,
+ * bad IP checksum.
+ * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages.
+ * @rx_tcp: Count of received TCP segments. Includes errored TCP segments.
+ * Note: This stat contains a count of all received TCP segments,
+ * regardless of whether or not they pertain to an established
+ * connection.
+ * @rx_udp: Count of received UDP datagrams.
+ * @rx_err_tcp: Count of received TCP segments containing errors. For example,
+ * bad TCP checksum.
+ * @rx_lost_frms: Count of received frames that could not be passed to the host.
+ * See RX_QUEUE_FULL_DISCARD and RX_RED_DISCARD
+ * for a list of reasons.
+ * @rx_lost_ip: Count of received IP datagrams that could not be passed to
+ * the host. See RX_LOST_FRMS for a list of reasons.
+ * @rx_lost_ip_offload: For frames belonging to offloaded sessions only, a count
+ * of received IP datagrams that could not be passed to the host.
+ * See RX_LOST_FRMS for a list of reasons.
+ * @rx_various_discard: Count of received frames that are discarded because
+ * the target receive queue is full.
+ * @rx_sleep_discard: Count of received frames that are discarded because the
+ * target VPATH is asleep (a Wake-on-LAN magic packet can be used
+ * to awaken the VPATH).
+ * @rx_red_discard: Count of received frames that are discarded because of RED
+ * (Random Early Discard).
+ * @rx_queue_full_discard: Count of received frames that are discarded because
+ * the target receive queue is full.
+ * @rx_mpa_ok_frms: Count of received frames that pass the MPA checks.
+ *
+ * XMAC Vpath RX Statistics.
+ */
+struct vxge_hw_xmac_vpath_rx_stats {
+ u64 rx_ttl_eth_frms;
+ u64 rx_vld_frms;
+ u64 rx_offload_frms;
+ u64 rx_ttl_eth_octets;
+ u64 rx_data_octets;
+ u64 rx_offload_octets;
+ u64 rx_vld_mcast_frms;
+ u64 rx_vld_bcast_frms;
+ u64 rx_accepted_ucast_frms;
+ u64 rx_accepted_nucast_frms;
+ u64 rx_tagged_frms;
+ u64 rx_long_frms;
+ u64 rx_usized_frms;
+ u64 rx_osized_frms;
+ u64 rx_frag_frms;
+ u64 rx_jabber_frms;
+ u64 rx_ttl_64_frms;
+ u64 rx_ttl_65_127_frms;
+ u64 rx_ttl_128_255_frms;
+ u64 rx_ttl_256_511_frms;
+ u64 rx_ttl_512_1023_frms;
+ u64 rx_ttl_1024_1518_frms;
+ u64 rx_ttl_1519_4095_frms;
+ u64 rx_ttl_4096_8191_frms;
+ u64 rx_ttl_8192_max_frms;
+ u64 rx_ttl_gt_max_frms;
+ u64 rx_ip;
+ u64 rx_accepted_ip;
+ u64 rx_ip_octets;
+ u64 rx_err_ip;
+ u64 rx_icmp;
+ u64 rx_tcp;
+ u64 rx_udp;
+ u64 rx_err_tcp;
+ u64 rx_lost_frms;
+ u64 rx_lost_ip;
+ u64 rx_lost_ip_offload;
+ u16 rx_various_discard;
+ u16 rx_sleep_discard;
+ u16 rx_red_discard;
+ u16 rx_queue_full_discard;
+ u64 rx_mpa_ok_frms;
+} __packed;
+
+/**
+ * struct vxge_hw_xmac_stats - XMAC Statistics
+ *
+ * @aggr_stats: Statistics on aggregate port(port 0, port 1)
+ * @port_stats: Staticstics on ports(wire 0, wire 1, lag)
+ * @vpath_tx_stats: Per vpath XMAC TX stats
+ * @vpath_rx_stats: Per vpath XMAC RX stats
+ *
+ * XMAC Statistics.
+ */
+struct vxge_hw_xmac_stats {
+ struct vxge_hw_xmac_aggr_stats
+ aggr_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID];
+ struct vxge_hw_xmac_port_stats
+ port_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID+1];
+ struct vxge_hw_xmac_vpath_tx_stats
+ vpath_tx_stats[VXGE_HW_MAX_VIRTUAL_PATHS];
+ struct vxge_hw_xmac_vpath_rx_stats
+ vpath_rx_stats[VXGE_HW_MAX_VIRTUAL_PATHS];
+};
+
+/**
+ * struct vxge_hw_vpath_stats_hw_info - Titan vpath hardware statistics.
+ * @ini_num_mwr_sent: The number of PCI memory writes initiated by the PIC block
+ * for the given VPATH
+ * @ini_num_mrd_sent: The number of PCI memory reads initiated by the PIC block
+ * @ini_num_cpl_rcvd: The number of PCI read completions received by the
+ * PIC block
+ * @ini_num_mwr_byte_sent: The number of PCI memory write bytes sent by the PIC
+ * block to the host
+ * @ini_num_cpl_byte_rcvd: The number of PCI read completion bytes received by
+ * the PIC block
+ * @wrcrdtarb_xoff: TBD
+ * @rdcrdtarb_xoff: TBD
+ * @vpath_genstats_count0: TBD
+ * @vpath_genstats_count1: TBD
+ * @vpath_genstats_count2: TBD
+ * @vpath_genstats_count3: TBD
+ * @vpath_genstats_count4: TBD
+ * @vpath_gennstats_count5: TBD
+ * @tx_stats: Transmit stats
+ * @rx_stats: Receive stats
+ * @prog_event_vnum1: Programmable statistic. Increments when internal logic
+ * detects a certain event. See register
+ * XMAC_STATS_CFG.EVENT_VNUM1_CFG for more information.
+ * @prog_event_vnum0: Programmable statistic. Increments when internal logic
+ * detects a certain event. See register
+ * XMAC_STATS_CFG.EVENT_VNUM0_CFG for more information.
+ * @prog_event_vnum3: Programmable statistic. Increments when internal logic
+ * detects a certain event. See register
+ * XMAC_STATS_CFG.EVENT_VNUM3_CFG for more information.
+ * @prog_event_vnum2: Programmable statistic. Increments when internal logic
+ * detects a certain event. See register
+ * XMAC_STATS_CFG.EVENT_VNUM2_CFG for more information.
+ * @rx_multi_cast_frame_discard: TBD
+ * @rx_frm_transferred: TBD
+ * @rxd_returned: TBD
+ * @rx_mpa_len_fail_frms: Count of received frames
+ * that fail the MPA length check
+ * @rx_mpa_mrk_fail_frms: Count of received frames
+ * that fail the MPA marker check
+ * @rx_mpa_crc_fail_frms: Count of received frames that fail the MPA CRC check
+ * @rx_permitted_frms: Count of frames that pass through the FAU and on to the
+ * frame buffer (and subsequently to the host).
+ * @rx_vp_reset_discarded_frms: Count of receive frames that are discarded
+ * because the VPATH is in reset
+ * @rx_wol_frms: Count of received "magic packet" frames. Stat increments
+ * whenever the received frame matches the VPATH's Wake-on-LAN
+ * signature(s) CRC.
+ * @tx_vp_reset_discarded_frms: Count of transmit frames that are discarded
+ * because the VPATH is in reset. Includes frames that are discarded
+ * because the current VPIN does not match that VPIN of the frame
+ *
+ * Titan vpath hardware statistics.
+ */
+struct vxge_hw_vpath_stats_hw_info {
+/*0x000*/ u32 ini_num_mwr_sent;
+/*0x004*/ u32 unused1;
+/*0x008*/ u32 ini_num_mrd_sent;
+/*0x00c*/ u32 unused2;
+/*0x010*/ u32 ini_num_cpl_rcvd;
+/*0x014*/ u32 unused3;
+/*0x018*/ u64 ini_num_mwr_byte_sent;
+/*0x020*/ u64 ini_num_cpl_byte_rcvd;
+/*0x028*/ u32 wrcrdtarb_xoff;
+/*0x02c*/ u32 unused4;
+/*0x030*/ u32 rdcrdtarb_xoff;
+/*0x034*/ u32 unused5;
+/*0x038*/ u32 vpath_genstats_count0;
+/*0x03c*/ u32 vpath_genstats_count1;
+/*0x040*/ u32 vpath_genstats_count2;
+/*0x044*/ u32 vpath_genstats_count3;
+/*0x048*/ u32 vpath_genstats_count4;
+/*0x04c*/ u32 unused6;
+/*0x050*/ u32 vpath_genstats_count5;
+/*0x054*/ u32 unused7;
+/*0x058*/ struct vxge_hw_xmac_vpath_tx_stats tx_stats;
+/*0x0e8*/ struct vxge_hw_xmac_vpath_rx_stats rx_stats;
+/*0x220*/ u64 unused9;
+/*0x228*/ u32 prog_event_vnum1;
+/*0x22c*/ u32 prog_event_vnum0;
+/*0x230*/ u32 prog_event_vnum3;
+/*0x234*/ u32 prog_event_vnum2;
+/*0x238*/ u16 rx_multi_cast_frame_discard;
+/*0x23a*/ u8 unused10[6];
+/*0x240*/ u32 rx_frm_transferred;
+/*0x244*/ u32 unused11;
+/*0x248*/ u16 rxd_returned;
+/*0x24a*/ u8 unused12[6];
+/*0x252*/ u16 rx_mpa_len_fail_frms;
+/*0x254*/ u16 rx_mpa_mrk_fail_frms;
+/*0x256*/ u16 rx_mpa_crc_fail_frms;
+/*0x258*/ u16 rx_permitted_frms;
+/*0x25c*/ u64 rx_vp_reset_discarded_frms;
+/*0x25e*/ u64 rx_wol_frms;
+/*0x260*/ u64 tx_vp_reset_discarded_frms;
+} __packed;
+
+
+/**
+ * struct vxge_hw_device_stats_mrpcim_info - Titan mrpcim hardware statistics.
+ * @pic.ini_rd_drop 0x0000 4 Number of DMA reads initiated
+ * by the adapter that were discarded because the VPATH is out of service
+ * @pic.ini_wr_drop 0x0004 4 Number of DMA writes initiated by the
+ * adapter that were discared because the VPATH is out of service
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane0] 0x0008 4 Number of times
+ * the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane1] 0x0010 4 Number of times
+ * the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane2] 0x0018 4 Number of times
+ * the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane3] 0x0020 4 Number of times
+ * the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane4] 0x0028 4 Number of times
+ * the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane5] 0x0030 4 Number of times
+ * the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane6] 0x0038 4 Number of times
+ * the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane7] 0x0040 4 Number of times
+ * the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane8] 0x0048 4 Number of times
+ * the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane9] 0x0050 4 Number of times
+ * the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane10] 0x0058 4 Number of times
+ * the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane11] 0x0060 4 Number of times
+ * the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane12] 0x0068 4 Number of times
+ * the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane13] 0x0070 4 Number of times
+ * the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane14] 0x0078 4 Number of times
+ * the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane15] 0x0080 4 Number of times
+ * the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane16] 0x0088 4 Number of times
+ * the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane0] 0x0090 4 Number of times
+ * the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane1] 0x0098 4 Number of times
+ * the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane2] 0x00a0 4 Number of times
+ * the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane3] 0x00a8 4 Number of times
+ * the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane4] 0x00b0 4 Number of times
+ * the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane5] 0x00b8 4 Number of times
+ * the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane6] 0x00c0 4 Number of times
+ * the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane7] 0x00c8 4 Number of times
+ * the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane8] 0x00d0 4 Number of times
+ * the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane9] 0x00d8 4 Number of times
+ * the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane10] 0x00e0 4 Number of times
+ * the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane11] 0x00e8 4 Number of times
+ * the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane12] 0x00f0 4 Number of times
+ * the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane13] 0x00f8 4 Number of times
+ * the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane14] 0x0100 4 Number of times
+ * the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane15] 0x0108 4 Number of times
+ * the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane16] 0x0110 4 Number of times
+ * the posted data credits for upstream PCI writes were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane0] 0x0118 4 Number of times
+ * the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane1] 0x0120 4 Number of times
+ * the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane2] 0x0128 4 Number of times
+ * the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane3] 0x0130 4 Number of times
+ * the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane4] 0x0138 4 Number of times
+ * the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane5] 0x0140 4 Number of times
+ * the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane6] 0x0148 4 Number of times
+ * the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane7] 0x0150 4 Number of times
+ * the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane8] 0x0158 4 Number of times
+ * the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane9] 0x0160 4 Number of times
+ * the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane10] 0x0168 4 Number of times
+ * the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane11] 0x0170 4 Number of times
+ * the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane12] 0x0178 4 Number of times
+ * the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane13] 0x0180 4 Number of times
+ * the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane14] 0x0188 4 Number of times
+ * the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane15] 0x0190 4 Number of times
+ * the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane16] 0x0198 4 Number of times
+ * the non-posted header credits for upstream PCI reads were depleted
+ * @pic.ini_rd_vpin_drop 0x01a0 4 Number of DMA reads initiated by
+ * the adapter that were discarded because the VPATH instance number does
+ * not match
+ * @pic.ini_wr_vpin_drop 0x01a4 4 Number of DMA writes initiated
+ * by the adapter that were discarded because the VPATH instance number
+ * does not match
+ * @pic.genstats_count0 0x01a8 4 Configurable statistic #1. Refer
+ * to the GENSTATS0_CFG for information on configuring this statistic
+ * @pic.genstats_count1 0x01ac 4 Configurable statistic #2. Refer
+ * to the GENSTATS1_CFG for information on configuring this statistic
+ * @pic.genstats_count2 0x01b0 4 Configurable statistic #3. Refer
+ * to the GENSTATS2_CFG for information on configuring this statistic
+ * @pic.genstats_count3 0x01b4 4 Configurable statistic #4. Refer
+ * to the GENSTATS3_CFG for information on configuring this statistic
+ * @pic.genstats_count4 0x01b8 4 Configurable statistic #5. Refer
+ * to the GENSTATS4_CFG for information on configuring this statistic
+ * @pic.genstats_count5 0x01c0 4 Configurable statistic #6. Refer
+ * to the GENSTATS5_CFG for information on configuring this statistic
+ * @pci.rstdrop_cpl 0x01c8 4
+ * @pci.rstdrop_msg 0x01cc 4
+ * @pci.rstdrop_client1 0x01d0 4
+ * @pci.rstdrop_client0 0x01d4 4
+ * @pci.rstdrop_client2 0x01d8 4
+ * @pci.depl_cplh[vplane0] 0x01e2 2 Number of times completion
+ * header credits were depleted
+ * @pci.depl_nph[vplane0] 0x01e4 2 Number of times non posted
+ * header credits were depleted
+ * @pci.depl_ph[vplane0] 0x01e6 2 Number of times the posted
+ * header credits were depleted
+ * @pci.depl_cplh[vplane1] 0x01ea 2
+ * @pci.depl_nph[vplane1] 0x01ec 2
+ * @pci.depl_ph[vplane1] 0x01ee 2
+ * @pci.depl_cplh[vplane2] 0x01f2 2
+ * @pci.depl_nph[vplane2] 0x01f4 2
+ * @pci.depl_ph[vplane2] 0x01f6 2
+ * @pci.depl_cplh[vplane3] 0x01fa 2
+ * @pci.depl_nph[vplane3] 0x01fc 2
+ * @pci.depl_ph[vplane3] 0x01fe 2
+ * @pci.depl_cplh[vplane4] 0x0202 2
+ * @pci.depl_nph[vplane4] 0x0204 2
+ * @pci.depl_ph[vplane4] 0x0206 2
+ * @pci.depl_cplh[vplane5] 0x020a 2
+ * @pci.depl_nph[vplane5] 0x020c 2
+ * @pci.depl_ph[vplane5] 0x020e 2
+ * @pci.depl_cplh[vplane6] 0x0212 2
+ * @pci.depl_nph[vplane6] 0x0214 2
+ * @pci.depl_ph[vplane6] 0x0216 2
+ * @pci.depl_cplh[vplane7] 0x021a 2
+ * @pci.depl_nph[vplane7] 0x021c 2
+ * @pci.depl_ph[vplane7] 0x021e 2
+ * @pci.depl_cplh[vplane8] 0x0222 2
+ * @pci.depl_nph[vplane8] 0x0224 2
+ * @pci.depl_ph[vplane8] 0x0226 2
+ * @pci.depl_cplh[vplane9] 0x022a 2
+ * @pci.depl_nph[vplane9] 0x022c 2
+ * @pci.depl_ph[vplane9] 0x022e 2
+ * @pci.depl_cplh[vplane10] 0x0232 2
+ * @pci.depl_nph[vplane10] 0x0234 2
+ * @pci.depl_ph[vplane10] 0x0236 2
+ * @pci.depl_cplh[vplane11] 0x023a 2
+ * @pci.depl_nph[vplane11] 0x023c 2
+ * @pci.depl_ph[vplane11] 0x023e 2
+ * @pci.depl_cplh[vplane12] 0x0242 2
+ * @pci.depl_nph[vplane12] 0x0244 2
+ * @pci.depl_ph[vplane12] 0x0246 2
+ * @pci.depl_cplh[vplane13] 0x024a 2
+ * @pci.depl_nph[vplane13] 0x024c 2
+ * @pci.depl_ph[vplane13] 0x024e 2
+ * @pci.depl_cplh[vplane14] 0x0252 2
+ * @pci.depl_nph[vplane14] 0x0254 2
+ * @pci.depl_ph[vplane14] 0x0256 2
+ * @pci.depl_cplh[vplane15] 0x025a 2
+ * @pci.depl_nph[vplane15] 0x025c 2
+ * @pci.depl_ph[vplane15] 0x025e 2
+ * @pci.depl_cplh[vplane16] 0x0262 2
+ * @pci.depl_nph[vplane16] 0x0264 2
+ * @pci.depl_ph[vplane16] 0x0266 2
+ * @pci.depl_cpld[vplane0] 0x026a 2 Number of times completion data
+ * credits were depleted
+ * @pci.depl_npd[vplane0] 0x026c 2 Number of times non posted data
+ * credits were depleted
+ * @pci.depl_pd[vplane0] 0x026e 2 Number of times the posted data
+ * credits were depleted
+ * @pci.depl_cpld[vplane1] 0x0272 2
+ * @pci.depl_npd[vplane1] 0x0274 2
+ * @pci.depl_pd[vplane1] 0x0276 2
+ * @pci.depl_cpld[vplane2] 0x027a 2
+ * @pci.depl_npd[vplane2] 0x027c 2
+ * @pci.depl_pd[vplane2] 0x027e 2
+ * @pci.depl_cpld[vplane3] 0x0282 2
+ * @pci.depl_npd[vplane3] 0x0284 2
+ * @pci.depl_pd[vplane3] 0x0286 2
+ * @pci.depl_cpld[vplane4] 0x028a 2
+ * @pci.depl_npd[vplane4] 0x028c 2
+ * @pci.depl_pd[vplane4] 0x028e 2
+ * @pci.depl_cpld[vplane5] 0x0292 2
+ * @pci.depl_npd[vplane5] 0x0294 2
+ * @pci.depl_pd[vplane5] 0x0296 2
+ * @pci.depl_cpld[vplane6] 0x029a 2
+ * @pci.depl_npd[vplane6] 0x029c 2
+ * @pci.depl_pd[vplane6] 0x029e 2
+ * @pci.depl_cpld[vplane7] 0x02a2 2
+ * @pci.depl_npd[vplane7] 0x02a4 2
+ * @pci.depl_pd[vplane7] 0x02a6 2
+ * @pci.depl_cpld[vplane8] 0x02aa 2
+ * @pci.depl_npd[vplane8] 0x02ac 2
+ * @pci.depl_pd[vplane8] 0x02ae 2
+ * @pci.depl_cpld[vplane9] 0x02b2 2
+ * @pci.depl_npd[vplane9] 0x02b4 2
+ * @pci.depl_pd[vplane9] 0x02b6 2
+ * @pci.depl_cpld[vplane10] 0x02ba 2
+ * @pci.depl_npd[vplane10] 0x02bc 2
+ * @pci.depl_pd[vplane10] 0x02be 2
+ * @pci.depl_cpld[vplane11] 0x02c2 2
+ * @pci.depl_npd[vplane11] 0x02c4 2
+ * @pci.depl_pd[vplane11] 0x02c6 2
+ * @pci.depl_cpld[vplane12] 0x02ca 2
+ * @pci.depl_npd[vplane12] 0x02cc 2
+ * @pci.depl_pd[vplane12] 0x02ce 2
+ * @pci.depl_cpld[vplane13] 0x02d2 2
+ * @pci.depl_npd[vplane13] 0x02d4 2
+ * @pci.depl_pd[vplane13] 0x02d6 2
+ * @pci.depl_cpld[vplane14] 0x02da 2
+ * @pci.depl_npd[vplane14] 0x02dc 2
+ * @pci.depl_pd[vplane14] 0x02de 2
+ * @pci.depl_cpld[vplane15] 0x02e2 2
+ * @pci.depl_npd[vplane15] 0x02e4 2
+ * @pci.depl_pd[vplane15] 0x02e6 2
+ * @pci.depl_cpld[vplane16] 0x02ea 2
+ * @pci.depl_npd[vplane16] 0x02ec 2
+ * @pci.depl_pd[vplane16] 0x02ee 2
+ * @xgmac_port[3];
+ * @xgmac_aggr[2];
+ * @xgmac.global_prog_event_gnum0 0x0ae0 8 Programmable statistic.
+ * Increments when internal logic detects a certain event. See register
+ * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM0_CFG for more information.
+ * @xgmac.global_prog_event_gnum1 0x0ae8 8 Programmable statistic.
+ * Increments when internal logic detects a certain event. See register
+ * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM1_CFG for more information.
+ * @xgmac.orp_lro_events 0x0af8 8
+ * @xgmac.orp_bs_events 0x0b00 8
+ * @xgmac.orp_iwarp_events 0x0b08 8
+ * @xgmac.tx_permitted_frms 0x0b14 4
+ * @xgmac.port2_tx_any_frms 0x0b1d 1
+ * @xgmac.port1_tx_any_frms 0x0b1e 1
+ * @xgmac.port0_tx_any_frms 0x0b1f 1
+ * @xgmac.port2_rx_any_frms 0x0b25 1
+ * @xgmac.port1_rx_any_frms 0x0b26 1
+ * @xgmac.port0_rx_any_frms 0x0b27 1
+ *
+ * Titan mrpcim hardware statistics.
+ */
+struct vxge_hw_device_stats_mrpcim_info {
+/*0x0000*/ u32 pic_ini_rd_drop;
+/*0x0004*/ u32 pic_ini_wr_drop;
+/*0x0008*/ struct {
+ /*0x0000*/ u32 pic_wrcrdtarb_ph_crdt_depleted;
+ /*0x0004*/ u32 unused1;
+ } pic_wrcrdtarb_ph_crdt_depleted_vplane[17];
+/*0x0090*/ struct {
+ /*0x0000*/ u32 pic_wrcrdtarb_pd_crdt_depleted;
+ /*0x0004*/ u32 unused2;
+ } pic_wrcrdtarb_pd_crdt_depleted_vplane[17];
+/*0x0118*/ struct {
+ /*0x0000*/ u32 pic_rdcrdtarb_nph_crdt_depleted;
+ /*0x0004*/ u32 unused3;
+ } pic_rdcrdtarb_nph_crdt_depleted_vplane[17];
+/*0x01a0*/ u32 pic_ini_rd_vpin_drop;
+/*0x01a4*/ u32 pic_ini_wr_vpin_drop;
+/*0x01a8*/ u32 pic_genstats_count0;
+/*0x01ac*/ u32 pic_genstats_count1;
+/*0x01b0*/ u32 pic_genstats_count2;
+/*0x01b4*/ u32 pic_genstats_count3;
+/*0x01b8*/ u32 pic_genstats_count4;
+/*0x01bc*/ u32 unused4;
+/*0x01c0*/ u32 pic_genstats_count5;
+/*0x01c4*/ u32 unused5;
+/*0x01c8*/ u32 pci_rstdrop_cpl;
+/*0x01cc*/ u32 pci_rstdrop_msg;
+/*0x01d0*/ u32 pci_rstdrop_client1;
+/*0x01d4*/ u32 pci_rstdrop_client0;
+/*0x01d8*/ u32 pci_rstdrop_client2;
+/*0x01dc*/ u32 unused6;
+/*0x01e0*/ struct {
+ /*0x0000*/ u16 unused7;
+ /*0x0002*/ u16 pci_depl_cplh;
+ /*0x0004*/ u16 pci_depl_nph;
+ /*0x0006*/ u16 pci_depl_ph;
+ } pci_depl_h_vplane[17];
+/*0x0268*/ struct {
+ /*0x0000*/ u16 unused8;
+ /*0x0002*/ u16 pci_depl_cpld;
+ /*0x0004*/ u16 pci_depl_npd;
+ /*0x0006*/ u16 pci_depl_pd;
+ } pci_depl_d_vplane[17];
+/*0x02f0*/ struct vxge_hw_xmac_port_stats xgmac_port[3];
+/*0x0a10*/ struct vxge_hw_xmac_aggr_stats xgmac_aggr[2];
+/*0x0ae0*/ u64 xgmac_global_prog_event_gnum0;
+/*0x0ae8*/ u64 xgmac_global_prog_event_gnum1;
+/*0x0af0*/ u64 unused7;
+/*0x0af8*/ u64 unused8;
+/*0x0b00*/ u64 unused9;
+/*0x0b08*/ u64 unused10;
+/*0x0b10*/ u32 unused11;
+/*0x0b14*/ u32 xgmac_tx_permitted_frms;
+/*0x0b18*/ u32 unused12;
+/*0x0b1c*/ u8 unused13;
+/*0x0b1d*/ u8 xgmac_port2_tx_any_frms;
+/*0x0b1e*/ u8 xgmac_port1_tx_any_frms;
+/*0x0b1f*/ u8 xgmac_port0_tx_any_frms;
+/*0x0b20*/ u32 unused14;
+/*0x0b24*/ u8 unused15;
+/*0x0b25*/ u8 xgmac_port2_rx_any_frms;
+/*0x0b26*/ u8 xgmac_port1_rx_any_frms;
+/*0x0b27*/ u8 xgmac_port0_rx_any_frms;
+} __packed;
+
+/**
+ * struct vxge_hw_device_stats_hw_info - Titan hardware statistics.
+ * @vpath_info: VPath statistics
+ * @vpath_info_sav: Vpath statistics saved
+ *
+ * Titan hardware statistics.
+ */
+struct vxge_hw_device_stats_hw_info {
+ struct vxge_hw_vpath_stats_hw_info
+ *vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS];
+ struct vxge_hw_vpath_stats_hw_info
+ vpath_info_sav[VXGE_HW_MAX_VIRTUAL_PATHS];
+};
+
+/**
+ * struct vxge_hw_vpath_stats_sw_common_info - HW common
+ * statistics for queues.
+ * @full_cnt: Number of times the queue was full
+ * @usage_cnt: usage count.
+ * @usage_max: Maximum usage
+ * @reserve_free_swaps_cnt: Reserve/free swap counter. Internal usage.
+ * @total_compl_cnt: Total descriptor completion count.
+ *
+ * Hw queue counters
+ * See also: struct vxge_hw_vpath_stats_sw_fifo_info{},
+ * struct vxge_hw_vpath_stats_sw_ring_info{},
+ */
+struct vxge_hw_vpath_stats_sw_common_info {
+ u32 full_cnt;
+ u32 usage_cnt;
+ u32 usage_max;
+ u32 reserve_free_swaps_cnt;
+ u32 total_compl_cnt;
+};
+
+/**
+ * struct vxge_hw_vpath_stats_sw_fifo_info - HW fifo statistics
+ * @common_stats: Common counters for all queues
+ * @total_posts: Total number of postings on the queue.
+ * @total_buffers: Total number of buffers posted.
+ * @txd_t_code_err_cnt: Array of transmit transfer codes. The position
+ * (index) in this array reflects the transfer code type, for instance
+ * 0xA - "loss of link".
+ * Value txd_t_code_err_cnt[i] reflects the
+ * number of times the corresponding transfer code was encountered.
+ *
+ * HW fifo counters
+ * See also: struct vxge_hw_vpath_stats_sw_common_info{},
+ * struct vxge_hw_vpath_stats_sw_ring_info{},
+ */
+struct vxge_hw_vpath_stats_sw_fifo_info {
+ struct vxge_hw_vpath_stats_sw_common_info common_stats;
+ u32 total_posts;
+ u32 total_buffers;
+ u32 txd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE];
+};
+
+/**
+ * struct vxge_hw_vpath_stats_sw_ring_info - HW ring statistics
+ * @common_stats: Common counters for all queues
+ * @rxd_t_code_err_cnt: Array of receive transfer codes. The position
+ * (index) in this array reflects the transfer code type,
+ * for instance
+ * 0x7 - for "invalid receive buffer size", or 0x8 - for ECC.
+ * Value rxd_t_code_err_cnt[i] reflects the
+ * number of times the corresponding transfer code was encountered.
+ *
+ * HW ring counters
+ * See also: struct vxge_hw_vpath_stats_sw_common_info{},
+ * struct vxge_hw_vpath_stats_sw_fifo_info{},
+ */
+struct vxge_hw_vpath_stats_sw_ring_info {
+ struct vxge_hw_vpath_stats_sw_common_info common_stats;
+ u32 rxd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE];
+
+};
+
+/**
+ * struct vxge_hw_vpath_stats_sw_err - HW vpath error statistics
+ * @unknown_alarms:
+ * @network_sustained_fault:
+ * @network_sustained_ok:
+ * @kdfcctl_fifo0_overwrite:
+ * @kdfcctl_fifo0_poison:
+ * @kdfcctl_fifo0_dma_error:
+ * @dblgen_fifo0_overflow:
+ * @statsb_pif_chain_error:
+ * @statsb_drop_timeout:
+ * @target_illegal_access:
+ * @ini_serr_det:
+ * @prc_ring_bumps:
+ * @prc_rxdcm_sc_err:
+ * @prc_rxdcm_sc_abort:
+ * @prc_quanta_size_err:
+ *
+ * HW vpath error statistics
+ */
+struct vxge_hw_vpath_stats_sw_err {
+ u32 unknown_alarms;
+ u32 network_sustained_fault;
+ u32 network_sustained_ok;
+ u32 kdfcctl_fifo0_overwrite;
+ u32 kdfcctl_fifo0_poison;
+ u32 kdfcctl_fifo0_dma_error;
+ u32 dblgen_fifo0_overflow;
+ u32 statsb_pif_chain_error;
+ u32 statsb_drop_timeout;
+ u32 target_illegal_access;
+ u32 ini_serr_det;
+ u32 prc_ring_bumps;
+ u32 prc_rxdcm_sc_err;
+ u32 prc_rxdcm_sc_abort;
+ u32 prc_quanta_size_err;
+};
+
+/**
+ * struct vxge_hw_vpath_stats_sw_info - HW vpath sw statistics
+ * @soft_reset_cnt: Number of times soft reset is done on this vpath.
+ * @error_stats: error counters for the vpath
+ * @ring_stats: counters for ring belonging to the vpath
+ * @fifo_stats: counters for fifo belonging to the vpath
+ *
+ * HW vpath sw statistics
+ * See also: struct vxge_hw_device_info{} }.
+ */
+struct vxge_hw_vpath_stats_sw_info {
+ u32 soft_reset_cnt;
+ struct vxge_hw_vpath_stats_sw_err error_stats;
+ struct vxge_hw_vpath_stats_sw_ring_info ring_stats;
+ struct vxge_hw_vpath_stats_sw_fifo_info fifo_stats;
+};
+
+/**
+ * struct vxge_hw_device_stats_sw_info - HW own per-device statistics.
+ *
+ * @not_traffic_intr_cnt: Number of times the host was interrupted
+ * without new completions.
+ * "Non-traffic interrupt counter".
+ * @traffic_intr_cnt: Number of traffic interrupts for the device.
+ * @total_intr_cnt: Total number of traffic interrupts for the device.
+ * @total_intr_cnt == @traffic_intr_cnt +
+ * @not_traffic_intr_cnt
+ * @soft_reset_cnt: Number of times soft reset is done on this device.
+ * @vpath_info: please see struct vxge_hw_vpath_stats_sw_info{}
+ * HW per-device statistics.
+ */
+struct vxge_hw_device_stats_sw_info {
+ u32 not_traffic_intr_cnt;
+ u32 traffic_intr_cnt;
+ u32 total_intr_cnt;
+ u32 soft_reset_cnt;
+ struct vxge_hw_vpath_stats_sw_info
+ vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS];
+};
+
+/**
+ * struct vxge_hw_device_stats_sw_err - HW device error statistics.
+ * @vpath_alarms: Number of vpath alarms
+ *
+ * HW Device error stats
+ */
+struct vxge_hw_device_stats_sw_err {
+ u32 vpath_alarms;
+};
+
+/**
+ * struct vxge_hw_device_stats - Contains HW per-device statistics,
+ * including hw.
+ * @devh: HW device handle.
+ * @dma_addr: DMA address of the %hw_info. Given to device to fill-in the stats.
+ * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory
+ * space.
+ * @hw_info_dma_acch: One more DMA handle used subsequently to free the
+ * DMA object. Note that this and the previous handle have
+ * physical meaning for Solaris; on Windows and Linux the
+ * corresponding value will be simply pointer to PCI device.
+ *
+ * @hw_dev_info_stats: Titan statistics maintained by the hardware.
+ * @sw_dev_info_stats: HW's "soft" device informational statistics, e.g. number
+ * of completions per interrupt.
+ * @sw_dev_err_stats: HW's "soft" device error statistics.
+ *
+ * Structure-container of HW per-device statistics. Note that per-channel
+ * statistics are kept in separate structures under HW's fifo and ring
+ * channels.
+ */
+struct vxge_hw_device_stats {
+ /* handles */
+ struct __vxge_hw_device *devh;
+
+ /* HW device hardware statistics */
+ struct vxge_hw_device_stats_hw_info hw_dev_info_stats;
+
+ /* HW device "soft" stats */
+ struct vxge_hw_device_stats_sw_err sw_dev_err_stats;
+ struct vxge_hw_device_stats_sw_info sw_dev_info_stats;
+
+};
+
+enum vxge_hw_status vxge_hw_device_hw_stats_enable(
+ struct __vxge_hw_device *devh);
+
+enum vxge_hw_status vxge_hw_device_stats_get(
+ struct __vxge_hw_device *devh,
+ struct vxge_hw_device_stats_hw_info *hw_stats);
+
+enum vxge_hw_status vxge_hw_driver_stats_get(
+ struct __vxge_hw_device *devh,
+ struct vxge_hw_device_stats_sw_info *sw_stats);
+
+enum vxge_hw_status vxge_hw_mrpcim_stats_enable(struct __vxge_hw_device *devh);
+
+enum vxge_hw_status vxge_hw_mrpcim_stats_disable(struct __vxge_hw_device *devh);
+
+enum vxge_hw_status
+vxge_hw_mrpcim_stats_access(
+ struct __vxge_hw_device *devh,
+ u32 operation,
+ u32 location,
+ u32 offset,
+ u64 *stat);
+
+enum vxge_hw_status
+vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh,
+ struct vxge_hw_xmac_stats *xmac_stats);
+
+/**
+ * enum enum vxge_hw_mgmt_reg_type - Register types.
+ *
+ * @vxge_hw_mgmt_reg_type_legacy: Legacy registers
+ * @vxge_hw_mgmt_reg_type_toc: TOC Registers
+ * @vxge_hw_mgmt_reg_type_common: Common Registers
+ * @vxge_hw_mgmt_reg_type_mrpcim: mrpcim registers
+ * @vxge_hw_mgmt_reg_type_srpcim: srpcim registers
+ * @vxge_hw_mgmt_reg_type_vpmgmt: vpath management registers
+ * @vxge_hw_mgmt_reg_type_vpath: vpath registers
+ *
+ * Register type enumaration
+ */
+enum vxge_hw_mgmt_reg_type {
+ vxge_hw_mgmt_reg_type_legacy = 0,
+ vxge_hw_mgmt_reg_type_toc = 1,
+ vxge_hw_mgmt_reg_type_common = 2,
+ vxge_hw_mgmt_reg_type_mrpcim = 3,
+ vxge_hw_mgmt_reg_type_srpcim = 4,
+ vxge_hw_mgmt_reg_type_vpmgmt = 5,
+ vxge_hw_mgmt_reg_type_vpath = 6
+};
+
+enum vxge_hw_status
+vxge_hw_mgmt_reg_read(struct __vxge_hw_device *devh,
+ enum vxge_hw_mgmt_reg_type type,
+ u32 index,
+ u32 offset,
+ u64 *value);
+
+enum vxge_hw_status
+vxge_hw_mgmt_reg_write(struct __vxge_hw_device *devh,
+ enum vxge_hw_mgmt_reg_type type,
+ u32 index,
+ u32 offset,
+ u64 value);
+
+/**
+ * enum enum vxge_hw_rxd_state - Descriptor (RXD) state.
+ * @VXGE_HW_RXD_STATE_NONE: Invalid state.
+ * @VXGE_HW_RXD_STATE_AVAIL: Descriptor is available for reservation.
+ * @VXGE_HW_RXD_STATE_POSTED: Descriptor is posted for processing by the
+ * device.
+ * @VXGE_HW_RXD_STATE_FREED: Descriptor is free and can be reused for
+ * filling-in and posting later.
+ *
+ * Titan/HW descriptor states.
+ *
+ */
+enum vxge_hw_rxd_state {
+ VXGE_HW_RXD_STATE_NONE = 0,
+ VXGE_HW_RXD_STATE_AVAIL = 1,
+ VXGE_HW_RXD_STATE_POSTED = 2,
+ VXGE_HW_RXD_STATE_FREED = 3
+};
+
+/**
+ * struct vxge_hw_ring_rxd_info - Extended information associated with a
+ * completed ring descriptor.
+ * @syn_flag: SYN flag
+ * @is_icmp: Is ICMP
+ * @fast_path_eligible: Fast Path Eligible flag
+ * @l3_cksum: in L3 checksum is valid
+ * @l3_cksum: Result of IP checksum check (by Titan hardware).
+ * This field containing VXGE_HW_L3_CKSUM_OK would mean that
+ * the checksum is correct, otherwise - the datagram is
+ * corrupted.
+ * @l4_cksum: in L4 checksum is valid
+ * @l4_cksum: Result of TCP/UDP checksum check (by Titan hardware).
+ * This field containing VXGE_HW_L4_CKSUM_OK would mean that
+ * the checksum is correct. Otherwise - the packet is
+ * corrupted.
+ * @frame: Zero or more of enum vxge_hw_frame_type flags.
+ * See enum vxge_hw_frame_type{}.
+ * @proto: zero or more of enum vxge_hw_frame_proto flags. Reporting bits for
+ * various higher-layer protocols, including (but note restricted to)
+ * TCP and UDP. See enum vxge_hw_frame_proto{}.
+ * @is_vlan: If vlan tag is valid
+ * @vlan: VLAN tag extracted from the received frame.
+ * @rth_bucket: RTH bucket
+ * @rth_it_hit: Set, If RTH hash value calculated by the Titan hardware
+ * has a matching entry in the Indirection table.
+ * @rth_spdm_hit: Set, If RTH hash value calculated by the Titan hardware
+ * has a matching entry in the Socket Pair Direct Match table.
+ * @rth_hash_type: RTH hash code of the function used to calculate the hash.
+ * @rth_value: Receive Traffic Hashing(RTH) hash value. Produced by Titan
+ * hardware if RTH is enabled.
+ */
+struct vxge_hw_ring_rxd_info {
+ u32 syn_flag;
+ u32 is_icmp;
+ u32 fast_path_eligible;
+ u32 l3_cksum_valid;
+ u32 l3_cksum;
+ u32 l4_cksum_valid;
+ u32 l4_cksum;
+ u32 frame;
+ u32 proto;
+ u32 is_vlan;
+ u32 vlan;
+ u32 rth_bucket;
+ u32 rth_it_hit;
+ u32 rth_spdm_hit;
+ u32 rth_hash_type;
+ u32 rth_value;
+};
+/**
+ * enum vxge_hw_ring_tcode - Transfer codes returned by adapter
+ * @VXGE_HW_RING_T_CODE_OK: Transfer ok.
+ * @VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH: Layer 3 checksum presentation
+ * configuration mismatch.
+ * @VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH: Layer 4 checksum presentation
+ * configuration mismatch.
+ * @VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH: Layer 3 and Layer 4 checksum
+ * presentation configuration mismatch.
+ * @VXGE_HW_RING_T_CODE_L3_PKT_ERR: Layer 3 error unparseable packet,
+ * such as unknown IPv6 header.
+ * @VXGE_HW_RING_T_CODE_L2_FRM_ERR: Layer 2 error frame integrity
+ * error, such as FCS or ECC).
+ * @VXGE_HW_RING_T_CODE_BUF_SIZE_ERR: Buffer size error the RxD buffer(
+ * s) were not appropriately sized and data loss occurred.
+ * @VXGE_HW_RING_T_CODE_INT_ECC_ERR: Internal ECC error RxD corrupted.
+ * @VXGE_HW_RING_T_CODE_BENIGN_OVFLOW: Benign overflow the contents of
+ * Segment1 exceeded the capacity of Buffer1 and the remainder
+ * was placed in Buffer2. Segment2 now starts in Buffer3.
+ * No data loss or errors occurred.
+ * @VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF: Buffer size 0 one of the RxDs
+ * assigned buffers has a size of 0 bytes.
+ * @VXGE_HW_RING_T_CODE_FRM_DROP: Frame dropped either due to
+ * VPath Reset or because of a VPIN mismatch.
+ * @VXGE_HW_RING_T_CODE_UNUSED: Unused
+ * @VXGE_HW_RING_T_CODE_MULTI_ERR: Multiple errors more than one
+ * transfer code condition occurred.
+ *
+ * Transfer codes returned by adapter.
+ */
+enum vxge_hw_ring_tcode {
+ VXGE_HW_RING_T_CODE_OK = 0x0,
+ VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH = 0x1,
+ VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH = 0x2,
+ VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH = 0x3,
+ VXGE_HW_RING_T_CODE_L3_PKT_ERR = 0x5,
+ VXGE_HW_RING_T_CODE_L2_FRM_ERR = 0x6,
+ VXGE_HW_RING_T_CODE_BUF_SIZE_ERR = 0x7,
+ VXGE_HW_RING_T_CODE_INT_ECC_ERR = 0x8,
+ VXGE_HW_RING_T_CODE_BENIGN_OVFLOW = 0x9,
+ VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF = 0xA,
+ VXGE_HW_RING_T_CODE_FRM_DROP = 0xC,
+ VXGE_HW_RING_T_CODE_UNUSED = 0xE,
+ VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
+};
+
+enum vxge_hw_status vxge_hw_ring_rxd_reserve(
+ struct __vxge_hw_ring *ring_handle,
+ void **rxdh);
+
+void
+vxge_hw_ring_rxd_pre_post(
+ struct __vxge_hw_ring *ring_handle,
+ void *rxdh);
+
+void
+vxge_hw_ring_rxd_post_post(
+ struct __vxge_hw_ring *ring_handle,
+ void *rxdh);
+
+enum vxge_hw_status
+vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle);
+
+void
+vxge_hw_ring_rxd_post_post_wmb(
+ struct __vxge_hw_ring *ring_handle,
+ void *rxdh);
+
+void vxge_hw_ring_rxd_post(
+ struct __vxge_hw_ring *ring_handle,
+ void *rxdh);
+
+enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
+ struct __vxge_hw_ring *ring_handle,
+ void **rxdh,
+ u8 *t_code);
+
+enum vxge_hw_status vxge_hw_ring_handle_tcode(
+ struct __vxge_hw_ring *ring_handle,
+ void *rxdh,
+ u8 t_code);
+
+void vxge_hw_ring_rxd_free(
+ struct __vxge_hw_ring *ring_handle,
+ void *rxdh);
+
+/**
+ * enum enum vxge_hw_frame_proto - Higher-layer ethernet protocols.
+ * @VXGE_HW_FRAME_PROTO_VLAN_TAGGED: VLAN.
+ * @VXGE_HW_FRAME_PROTO_IPV4: IPv4.
+ * @VXGE_HW_FRAME_PROTO_IPV6: IPv6.
+ * @VXGE_HW_FRAME_PROTO_IP_FRAG: IP fragmented.
+ * @VXGE_HW_FRAME_PROTO_TCP: TCP.
+ * @VXGE_HW_FRAME_PROTO_UDP: UDP.
+ * @VXGE_HW_FRAME_PROTO_TCP_OR_UDP: TCP or UDP.
+ *
+ * Higher layer ethernet protocols and options.
+ */
+enum vxge_hw_frame_proto {
+ VXGE_HW_FRAME_PROTO_VLAN_TAGGED = 0x80,
+ VXGE_HW_FRAME_PROTO_IPV4 = 0x10,
+ VXGE_HW_FRAME_PROTO_IPV6 = 0x08,
+ VXGE_HW_FRAME_PROTO_IP_FRAG = 0x04,
+ VXGE_HW_FRAME_PROTO_TCP = 0x02,
+ VXGE_HW_FRAME_PROTO_UDP = 0x01,
+ VXGE_HW_FRAME_PROTO_TCP_OR_UDP = (VXGE_HW_FRAME_PROTO_TCP | \
+ VXGE_HW_FRAME_PROTO_UDP)
+};
+
+/**
+ * enum enum vxge_hw_fifo_gather_code - Gather codes used in fifo TxD
+ * @VXGE_HW_FIFO_GATHER_CODE_FIRST: First TxDL
+ * @VXGE_HW_FIFO_GATHER_CODE_MIDDLE: Middle TxDL
+ * @VXGE_HW_FIFO_GATHER_CODE_LAST: Last TxDL
+ * @VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST: First and Last TxDL.
+ *
+ * These gather codes are used to indicate the position of a TxD in a TxD list
+ */
+enum vxge_hw_fifo_gather_code {
+ VXGE_HW_FIFO_GATHER_CODE_FIRST = 0x2,
+ VXGE_HW_FIFO_GATHER_CODE_MIDDLE = 0x0,
+ VXGE_HW_FIFO_GATHER_CODE_LAST = 0x1,
+ VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST = 0x3
+};
+
+/**
+ * enum enum vxge_hw_fifo_tcode - tcodes used in fifo
+ * @VXGE_HW_FIFO_T_CODE_OK: Transfer OK
+ * @VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT: PCI read transaction (either TxD or
+ * frame data) returned with corrupt data.
+ * @VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL:PCI read transaction was returned
+ * with no data.
+ * @VXGE_HW_FIFO_T_CODE_INVALID_MSS: The host attempted to send either a
+ * frame or LSO MSS that was too long (>9800B).
+ * @VXGE_HW_FIFO_T_CODE_LSO_ERROR: Error detected during TCP/UDP Large Send
+ * Offload operation, due to improper header template,
+ * unsupported protocol, etc.
+ * @VXGE_HW_FIFO_T_CODE_UNUSED: Unused
+ * @VXGE_HW_FIFO_T_CODE_MULTI_ERROR: Set to 1 by the adapter if multiple
+ * data buffer transfer errors are encountered (see below).
+ * Otherwise it is set to 0.
+ *
+ * These tcodes are returned in various API for TxD status
+ */
+enum vxge_hw_fifo_tcode {
+ VXGE_HW_FIFO_T_CODE_OK = 0x0,
+ VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT = 0x1,
+ VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL = 0x2,
+ VXGE_HW_FIFO_T_CODE_INVALID_MSS = 0x3,
+ VXGE_HW_FIFO_T_CODE_LSO_ERROR = 0x4,
+ VXGE_HW_FIFO_T_CODE_UNUSED = 0x7,
+ VXGE_HW_FIFO_T_CODE_MULTI_ERROR = 0x8
+};
+
+enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
+ struct __vxge_hw_fifo *fifoh,
+ void **txdlh,
+ void **txdl_priv);
+
+void vxge_hw_fifo_txdl_buffer_set(
+ struct __vxge_hw_fifo *fifo_handle,
+ void *txdlh,
+ u32 frag_idx,
+ dma_addr_t dma_pointer,
+ u32 size);
+
+void vxge_hw_fifo_txdl_post(
+ struct __vxge_hw_fifo *fifo_handle,
+ void *txdlh);
+
+u32 vxge_hw_fifo_free_txdl_count_get(
+ struct __vxge_hw_fifo *fifo_handle);
+
+enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
+ struct __vxge_hw_fifo *fifoh,
+ void **txdlh,
+ enum vxge_hw_fifo_tcode *t_code);
+
+enum vxge_hw_status vxge_hw_fifo_handle_tcode(
+ struct __vxge_hw_fifo *fifoh,
+ void *txdlh,
+ enum vxge_hw_fifo_tcode t_code);
+
+void vxge_hw_fifo_txdl_free(
+ struct __vxge_hw_fifo *fifoh,
+ void *txdlh);
+
+/*
+ * Device
+ */
+
+#define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8)
+#define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16)
+
+/*
+ * struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data.
+ * @dma_addr: DMA (mapped) address of _this_ descriptor.
+ * @dma_handle: DMA handle used to map the descriptor onto device.
+ * @dma_offset: Descriptor's offset in the memory block. HW allocates
+ * descriptors in memory blocks of %VXGE_HW_BLOCK_SIZE
+ * bytes. Each memblock is contiguous DMA-able memory. Each
+ * memblock contains 1 or more 4KB RxD blocks visible to the
+ * Titan hardware.
+ * @dma_object: DMA address and handle of the memory block that contains
+ * the descriptor. This member is used only in the "checked"
+ * version of the HW (to enforce certain assertions);
+ * otherwise it gets compiled out.
+ * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
+ *
+ * Per-receive decsriptor HW-private data. HW uses the space to keep DMA
+ * information associated with the descriptor. Note that driver can ask HW
+ * to allocate additional per-descriptor space for its own (driver-specific)
+ * purposes.
+ */
+struct __vxge_hw_ring_rxd_priv {
+ dma_addr_t dma_addr;
+ struct pci_dev *dma_handle;
+ ptrdiff_t dma_offset;
+#ifdef VXGE_DEBUG_ASSERT
+ struct vxge_hw_mempool_dma *dma_object;
+#endif
+};
+
+struct vxge_hw_mempool_cbs {
+ void (*item_func_alloc)(
+ struct vxge_hw_mempool *mempoolh,
+ u32 memblock_index,
+ struct vxge_hw_mempool_dma *dma_object,
+ u32 index,
+ u32 is_last);
+};
+
+#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
+ ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
+
+enum vxge_hw_status
+__vxge_hw_vpath_rts_table_get(
+ struct __vxge_hw_vpath_handle *vpath_handle,
+ u32 action,
+ u32 rts_table,
+ u32 offset,
+ u64 *data1,
+ u64 *data2);
+
+enum vxge_hw_status
+__vxge_hw_vpath_rts_table_set(
+ struct __vxge_hw_vpath_handle *vpath_handle,
+ u32 action,
+ u32 rts_table,
+ u32 offset,
+ u64 data1,
+ u64 data2);
+
+enum vxge_hw_status
+__vxge_hw_vpath_enable(
+ struct __vxge_hw_device *devh,
+ u32 vp_id);
+
+void vxge_hw_device_intr_enable(
+ struct __vxge_hw_device *devh);
+
+u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *devh, u32 intr_mode);
+
+void vxge_hw_device_intr_disable(
+ struct __vxge_hw_device *devh);
+
+void vxge_hw_device_mask_all(
+ struct __vxge_hw_device *devh);
+
+void vxge_hw_device_unmask_all(
+ struct __vxge_hw_device *devh);
+
+enum vxge_hw_status vxge_hw_device_begin_irq(
+ struct __vxge_hw_device *devh,
+ u32 skip_alarms,
+ u64 *reason);
+
+void vxge_hw_device_clear_tx_rx(
+ struct __vxge_hw_device *devh);
+
+/*
+ * Virtual Paths
+ */
+
+void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring);
+
+void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo);
+
+u32 vxge_hw_vpath_id(
+ struct __vxge_hw_vpath_handle *vpath_handle);
+
+enum vxge_hw_vpath_mac_addr_add_mode {
+ VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE = 0,
+ VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE = 1,
+ VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE = 2
+};
+
+enum vxge_hw_status
+vxge_hw_vpath_mac_addr_add(
+ struct __vxge_hw_vpath_handle *vpath_handle,
+ u8 *macaddr,
+ u8 *macaddr_mask,
+ enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode);
+
+enum vxge_hw_status
+vxge_hw_vpath_mac_addr_get(
+ struct __vxge_hw_vpath_handle *vpath_handle,
+ u8 *macaddr,
+ u8 *macaddr_mask);
+
+enum vxge_hw_status
+vxge_hw_vpath_mac_addr_get_next(
+ struct __vxge_hw_vpath_handle *vpath_handle,
+ u8 *macaddr,
+ u8 *macaddr_mask);
+
+enum vxge_hw_status
+vxge_hw_vpath_mac_addr_delete(
+ struct __vxge_hw_vpath_handle *vpath_handle,
+ u8 *macaddr,
+ u8 *macaddr_mask);
+
+enum vxge_hw_status
+vxge_hw_vpath_vid_add(
+ struct __vxge_hw_vpath_handle *vpath_handle,
+ u64 vid);
+
+enum vxge_hw_status
+vxge_hw_vpath_vid_get(
+ struct __vxge_hw_vpath_handle *vpath_handle,
+ u64 *vid);
+
+enum vxge_hw_status
+vxge_hw_vpath_vid_delete(
+ struct __vxge_hw_vpath_handle *vpath_handle,
+ u64 vid);
+
+enum vxge_hw_status
+vxge_hw_vpath_etype_add(
+ struct __vxge_hw_vpath_handle *vpath_handle,
+ u64 etype);
+
+enum vxge_hw_status
+vxge_hw_vpath_etype_get(
+ struct __vxge_hw_vpath_handle *vpath_handle,
+ u64 *etype);
+
+enum vxge_hw_status
+vxge_hw_vpath_etype_get_next(
+ struct __vxge_hw_vpath_handle *vpath_handle,
+ u64 *etype);
+
+enum vxge_hw_status
+vxge_hw_vpath_etype_delete(
+ struct __vxge_hw_vpath_handle *vpath_handle,
+ u64 etype);
+
+enum vxge_hw_status vxge_hw_vpath_promisc_enable(
+ struct __vxge_hw_vpath_handle *vpath_handle);
+
+enum vxge_hw_status vxge_hw_vpath_promisc_disable(
+ struct __vxge_hw_vpath_handle *vpath_handle);
+
+enum vxge_hw_status vxge_hw_vpath_bcast_enable(
+ struct __vxge_hw_vpath_handle *vpath_handle);
+
+enum vxge_hw_status vxge_hw_vpath_mcast_enable(
+ struct __vxge_hw_vpath_handle *vpath_handle);
+
+enum vxge_hw_status vxge_hw_vpath_mcast_disable(
+ struct __vxge_hw_vpath_handle *vpath_handle);
+
+enum vxge_hw_status vxge_hw_vpath_poll_rx(
+ struct __vxge_hw_ring *ringh);
+
+enum vxge_hw_status vxge_hw_vpath_poll_tx(
+ struct __vxge_hw_fifo *fifoh,
+ struct sk_buff ***skb_ptr, int nr_skb, int *more);
+
+enum vxge_hw_status vxge_hw_vpath_alarm_process(
+ struct __vxge_hw_vpath_handle *vpath_handle,
+ u32 skip_alarms);
+
+void
+vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle,
+ int *tim_msix_id, int alarm_msix_id);
+
+void
+vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
+ int msix_id);
+
+void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id);
+
+void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
+
+void
+vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle,
+ int msix_id);
+
+enum vxge_hw_status vxge_hw_vpath_intr_enable(
+ struct __vxge_hw_vpath_handle *vpath_handle);
+
+enum vxge_hw_status vxge_hw_vpath_intr_disable(
+ struct __vxge_hw_vpath_handle *vpath_handle);
+
+void vxge_hw_vpath_inta_mask_tx_rx(
+ struct __vxge_hw_vpath_handle *vpath_handle);
+
+void vxge_hw_vpath_inta_unmask_tx_rx(
+ struct __vxge_hw_vpath_handle *vpath_handle);
+
+void
+vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
+
+void
+vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
+
+void
+vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id);
+
+void
+vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
+ void **dtrh);
+
+void
+vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel);
+
+void
+vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
+
+int
+vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
+
+void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo);
+
+void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring);
+
+#endif
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-version.h b/drivers/net/ethernet/neterion/vxge/vxge-version.h
new file mode 100644
index 000000000000..b9efa28bab3e
--- /dev/null
+++ b/drivers/net/ethernet/neterion/vxge/vxge-version.h
@@ -0,0 +1,49 @@
+/******************************************************************************
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ * Drivers based on or derived from this code fall under the GPL and must
+ * retain the authorship, copyright and license notice. This file is not
+ * a complete program and may only be used when the entire operating
+ * system is licensed under the GPL.
+ * See the file COPYING in this distribution for more information.
+ *
+ * vxge-version.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
+ * Virtualized Server Adapter.
+ * Copyright(c) 2002-2010 Exar Corp.
+ ******************************************************************************/
+#ifndef VXGE_VERSION_H
+#define VXGE_VERSION_H
+
+#define VXGE_VERSION_MAJOR "2"
+#define VXGE_VERSION_MINOR "5"
+#define VXGE_VERSION_FIX "3"
+#define VXGE_VERSION_BUILD "22640"
+#define VXGE_VERSION_FOR "k"
+
+#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
+
+#define VXGE_DEAD_FW_VER_MAJOR 1
+#define VXGE_DEAD_FW_VER_MINOR 4
+#define VXGE_DEAD_FW_VER_BUILD 4
+
+#define VXGE_FW_DEAD_VER VXGE_FW_VER(VXGE_DEAD_FW_VER_MAJOR, \
+ VXGE_DEAD_FW_VER_MINOR, \
+ VXGE_DEAD_FW_VER_BUILD)
+
+#define VXGE_EPROM_FW_VER_MAJOR 1
+#define VXGE_EPROM_FW_VER_MINOR 6
+#define VXGE_EPROM_FW_VER_BUILD 1
+
+#define VXGE_EPROM_FW_VER VXGE_FW_VER(VXGE_EPROM_FW_VER_MAJOR, \
+ VXGE_EPROM_FW_VER_MINOR, \
+ VXGE_EPROM_FW_VER_BUILD)
+
+#define VXGE_CERT_FW_VER_MAJOR 1
+#define VXGE_CERT_FW_VER_MINOR 8
+#define VXGE_CERT_FW_VER_BUILD 1
+
+#define VXGE_CERT_FW_VER VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, \
+ VXGE_CERT_FW_VER_MINOR, \
+ VXGE_CERT_FW_VER_BUILD)
+
+#endif
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
new file mode 100644
index 000000000000..8d288af16fc9
--- /dev/null
+++ b/drivers/net/ethernet/netx-eth.c
@@ -0,0 +1,515 @@
+/*
+ * drivers/net/netx-eth.c
+ *
+ * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/mii.h>
+
+#include <asm/io.h>
+#include <mach/hardware.h>
+#include <mach/netx-regs.h>
+#include <mach/pfifo.h>
+#include <mach/xc.h>
+#include <mach/eth.h>
+
+/* XC Fifo Offsets */
+#define EMPTY_PTR_FIFO(xcno) (0 + ((xcno) << 3)) /* Index of the empty pointer FIFO */
+#define IND_FIFO_PORT_HI(xcno) (1 + ((xcno) << 3)) /* Index of the FIFO where received */
+ /* Data packages are indicated by XC */
+#define IND_FIFO_PORT_LO(xcno) (2 + ((xcno) << 3)) /* Index of the FIFO where received */
+ /* Data packages are indicated by XC */
+#define REQ_FIFO_PORT_HI(xcno) (3 + ((xcno) << 3)) /* Index of the FIFO where Data packages */
+ /* have to be indicated by ARM which */
+ /* shall be sent */
+#define REQ_FIFO_PORT_LO(xcno) (4 + ((xcno) << 3)) /* Index of the FIFO where Data packages */
+ /* have to be indicated by ARM which shall */
+ /* be sent */
+#define CON_FIFO_PORT_HI(xcno) (5 + ((xcno) << 3)) /* Index of the FIFO where sent Data packages */
+ /* are confirmed */
+#define CON_FIFO_PORT_LO(xcno) (6 + ((xcno) << 3)) /* Index of the FIFO where sent Data */
+ /* packages are confirmed */
+#define PFIFO_MASK(xcno) (0x7f << (xcno*8))
+
+#define FIFO_PTR_FRAMELEN_SHIFT 0
+#define FIFO_PTR_FRAMELEN_MASK (0x7ff << 0)
+#define FIFO_PTR_FRAMELEN(len) (((len) << 0) & FIFO_PTR_FRAMELEN_MASK)
+#define FIFO_PTR_TIMETRIG (1<<11)
+#define FIFO_PTR_MULTI_REQ
+#define FIFO_PTR_ORIGIN (1<<14)
+#define FIFO_PTR_VLAN (1<<15)
+#define FIFO_PTR_FRAMENO_SHIFT 16
+#define FIFO_PTR_FRAMENO_MASK (0x3f << 16)
+#define FIFO_PTR_FRAMENO(no) (((no) << 16) & FIFO_PTR_FRAMENO_MASK)
+#define FIFO_PTR_SEGMENT_SHIFT 22
+#define FIFO_PTR_SEGMENT_MASK (0xf << 22)
+#define FIFO_PTR_SEGMENT(seg) (((seg) & 0xf) << 22)
+#define FIFO_PTR_ERROR_SHIFT 28
+#define FIFO_PTR_ERROR_MASK (0xf << 28)
+
+#define ISR_LINK_STATUS_CHANGE (1<<4)
+#define ISR_IND_LO (1<<3)
+#define ISR_CON_LO (1<<2)
+#define ISR_IND_HI (1<<1)
+#define ISR_CON_HI (1<<0)
+
+#define ETH_MAC_LOCAL_CONFIG 0x1560
+#define ETH_MAC_4321 0x1564
+#define ETH_MAC_65 0x1568
+
+#define MAC_TRAFFIC_CLASS_ARRANGEMENT_SHIFT 16
+#define MAC_TRAFFIC_CLASS_ARRANGEMENT_MASK (0xf<<MAC_TRAFFIC_CLASS_ARRANGEMENT_SHIFT)
+#define MAC_TRAFFIC_CLASS_ARRANGEMENT(x) (((x)<<MAC_TRAFFIC_CLASS_ARRANGEMENT_SHIFT) & MAC_TRAFFIC_CLASS_ARRANGEMENT_MASK)
+#define LOCAL_CONFIG_LINK_STATUS_IRQ_EN (1<<24)
+#define LOCAL_CONFIG_CON_LO_IRQ_EN (1<<23)
+#define LOCAL_CONFIG_CON_HI_IRQ_EN (1<<22)
+#define LOCAL_CONFIG_IND_LO_IRQ_EN (1<<21)
+#define LOCAL_CONFIG_IND_HI_IRQ_EN (1<<20)
+
+#define CARDNAME "netx-eth"
+
+/* LSB must be zero */
+#define INTERNAL_PHY_ADR 0x1c
+
+struct netx_eth_priv {
+ void __iomem *sram_base, *xpec_base, *xmac_base;
+ int id;
+ struct mii_if_info mii;
+ u32 msg_enable;
+ struct xc *xc;
+ spinlock_t lock;
+};
+
+static void netx_eth_set_multicast_list(struct net_device *ndev)
+{
+ /* implement me */
+}
+
+static int
+netx_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct netx_eth_priv *priv = netdev_priv(ndev);
+ unsigned char *buf = skb->data;
+ unsigned int len = skb->len;
+
+ spin_lock_irq(&priv->lock);
+ memcpy_toio(priv->sram_base + 1560, (void *)buf, len);
+ if (len < 60) {
+ memset_io(priv->sram_base + 1560 + len, 0, 60 - len);
+ len = 60;
+ }
+
+ pfifo_push(REQ_FIFO_PORT_LO(priv->id),
+ FIFO_PTR_SEGMENT(priv->id) |
+ FIFO_PTR_FRAMENO(1) |
+ FIFO_PTR_FRAMELEN(len));
+
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+
+ netif_stop_queue(ndev);
+ spin_unlock_irq(&priv->lock);
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static void netx_eth_receive(struct net_device *ndev)
+{
+ struct netx_eth_priv *priv = netdev_priv(ndev);
+ unsigned int val, frameno, seg, len;
+ unsigned char *data;
+ struct sk_buff *skb;
+
+ val = pfifo_pop(IND_FIFO_PORT_LO(priv->id));
+
+ frameno = (val & FIFO_PTR_FRAMENO_MASK) >> FIFO_PTR_FRAMENO_SHIFT;
+ seg = (val & FIFO_PTR_SEGMENT_MASK) >> FIFO_PTR_SEGMENT_SHIFT;
+ len = (val & FIFO_PTR_FRAMELEN_MASK) >> FIFO_PTR_FRAMELEN_SHIFT;
+
+ skb = dev_alloc_skb(len);
+ if (unlikely(skb == NULL)) {
+ printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
+ ndev->name);
+ ndev->stats.rx_dropped++;
+ return;
+ }
+
+ data = skb_put(skb, len);
+
+ memcpy_fromio(data, priv->sram_base + frameno * 1560, len);
+
+ pfifo_push(EMPTY_PTR_FIFO(priv->id),
+ FIFO_PTR_SEGMENT(seg) | FIFO_PTR_FRAMENO(frameno));
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_rx(skb);
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+}
+
+static irqreturn_t
+netx_eth_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct netx_eth_priv *priv = netdev_priv(ndev);
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ status = readl(NETX_PFIFO_XPEC_ISR(priv->id));
+ while (status) {
+ int fill_level;
+ writel(status, NETX_PFIFO_XPEC_ISR(priv->id));
+
+ if ((status & ISR_CON_HI) || (status & ISR_IND_HI))
+ printk("%s: unexpected status: 0x%08x\n",
+ __func__, status);
+
+ fill_level =
+ readl(NETX_PFIFO_FILL_LEVEL(IND_FIFO_PORT_LO(priv->id)));
+ while (fill_level--)
+ netx_eth_receive(ndev);
+
+ if (status & ISR_CON_LO)
+ netif_wake_queue(ndev);
+
+ if (status & ISR_LINK_STATUS_CHANGE)
+ mii_check_media(&priv->mii, netif_msg_link(priv), 1);
+
+ status = readl(NETX_PFIFO_XPEC_ISR(priv->id));
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static int netx_eth_open(struct net_device *ndev)
+{
+ struct netx_eth_priv *priv = netdev_priv(ndev);
+
+ if (request_irq
+ (ndev->irq, netx_eth_interrupt, IRQF_SHARED, ndev->name, ndev))
+ return -EAGAIN;
+
+ writel(ndev->dev_addr[0] |
+ ndev->dev_addr[1]<<8 |
+ ndev->dev_addr[2]<<16 |
+ ndev->dev_addr[3]<<24,
+ priv->xpec_base + NETX_XPEC_RAM_START_OFS + ETH_MAC_4321);
+ writel(ndev->dev_addr[4] |
+ ndev->dev_addr[5]<<8,
+ priv->xpec_base + NETX_XPEC_RAM_START_OFS + ETH_MAC_65);
+
+ writel(LOCAL_CONFIG_LINK_STATUS_IRQ_EN |
+ LOCAL_CONFIG_CON_LO_IRQ_EN |
+ LOCAL_CONFIG_CON_HI_IRQ_EN |
+ LOCAL_CONFIG_IND_LO_IRQ_EN |
+ LOCAL_CONFIG_IND_HI_IRQ_EN,
+ priv->xpec_base + NETX_XPEC_RAM_START_OFS +
+ ETH_MAC_LOCAL_CONFIG);
+
+ mii_check_media(&priv->mii, netif_msg_link(priv), 1);
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int netx_eth_close(struct net_device *ndev)
+{
+ struct netx_eth_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+
+ writel(0,
+ priv->xpec_base + NETX_XPEC_RAM_START_OFS + ETH_MAC_LOCAL_CONFIG);
+
+ free_irq(ndev->irq, ndev);
+
+ return 0;
+}
+
+static void netx_eth_timeout(struct net_device *ndev)
+{
+ struct netx_eth_priv *priv = netdev_priv(ndev);
+ int i;
+
+ printk(KERN_ERR "%s: transmit timed out, resetting\n", ndev->name);
+
+ spin_lock_irq(&priv->lock);
+
+ xc_reset(priv->xc);
+ xc_start(priv->xc);
+
+ for (i=2; i<=18; i++)
+ pfifo_push(EMPTY_PTR_FIFO(priv->id),
+ FIFO_PTR_FRAMENO(i) | FIFO_PTR_SEGMENT(priv->id));
+
+ spin_unlock_irq(&priv->lock);
+
+ netif_wake_queue(ndev);
+}
+
+static int
+netx_eth_phy_read(struct net_device *ndev, int phy_id, int reg)
+{
+ unsigned int val;
+
+ val = MIIMU_SNRDY | MIIMU_PREAMBLE | MIIMU_PHYADDR(phy_id) |
+ MIIMU_REGADDR(reg) | MIIMU_PHY_NRES;
+
+ writel(val, NETX_MIIMU);
+ while (readl(NETX_MIIMU) & MIIMU_SNRDY);
+
+ return readl(NETX_MIIMU) >> 16;
+
+}
+
+static void
+netx_eth_phy_write(struct net_device *ndev, int phy_id, int reg, int value)
+{
+ unsigned int val;
+
+ val = MIIMU_SNRDY | MIIMU_PREAMBLE | MIIMU_PHYADDR(phy_id) |
+ MIIMU_REGADDR(reg) | MIIMU_PHY_NRES | MIIMU_OPMODE_WRITE |
+ MIIMU_DATA(value);
+
+ writel(val, NETX_MIIMU);
+ while (readl(NETX_MIIMU) & MIIMU_SNRDY);
+}
+
+static const struct net_device_ops netx_eth_netdev_ops = {
+ .ndo_open = netx_eth_open,
+ .ndo_stop = netx_eth_close,
+ .ndo_start_xmit = netx_eth_hard_start_xmit,
+ .ndo_tx_timeout = netx_eth_timeout,
+ .ndo_set_rx_mode = netx_eth_set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int netx_eth_enable(struct net_device *ndev)
+{
+ struct netx_eth_priv *priv = netdev_priv(ndev);
+ unsigned int mac4321, mac65;
+ int running, i;
+
+ ether_setup(ndev);
+
+ ndev->netdev_ops = &netx_eth_netdev_ops;
+ ndev->watchdog_timeo = msecs_to_jiffies(5000);
+
+ priv->msg_enable = NETIF_MSG_LINK;
+ priv->mii.phy_id_mask = 0x1f;
+ priv->mii.reg_num_mask = 0x1f;
+ priv->mii.force_media = 0;
+ priv->mii.full_duplex = 0;
+ priv->mii.dev = ndev;
+ priv->mii.mdio_read = netx_eth_phy_read;
+ priv->mii.mdio_write = netx_eth_phy_write;
+ priv->mii.phy_id = INTERNAL_PHY_ADR + priv->id;
+
+ running = xc_running(priv->xc);
+ xc_stop(priv->xc);
+
+ /* if the xc engine is already running, assume the bootloader has
+ * loaded the firmware for us
+ */
+ if (running) {
+ /* get Node Address from hardware */
+ mac4321 = readl(priv->xpec_base +
+ NETX_XPEC_RAM_START_OFS + ETH_MAC_4321);
+ mac65 = readl(priv->xpec_base +
+ NETX_XPEC_RAM_START_OFS + ETH_MAC_65);
+
+ ndev->dev_addr[0] = mac4321 & 0xff;
+ ndev->dev_addr[1] = (mac4321 >> 8) & 0xff;
+ ndev->dev_addr[2] = (mac4321 >> 16) & 0xff;
+ ndev->dev_addr[3] = (mac4321 >> 24) & 0xff;
+ ndev->dev_addr[4] = mac65 & 0xff;
+ ndev->dev_addr[5] = (mac65 >> 8) & 0xff;
+ } else {
+ if (xc_request_firmware(priv->xc)) {
+ printk(CARDNAME ": requesting firmware failed\n");
+ return -ENODEV;
+ }
+ }
+
+ xc_reset(priv->xc);
+ xc_start(priv->xc);
+
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ printk("%s: Invalid ethernet MAC address. Please "
+ "set using ifconfig\n", ndev->name);
+
+ for (i=2; i<=18; i++)
+ pfifo_push(EMPTY_PTR_FIFO(priv->id),
+ FIFO_PTR_FRAMENO(i) | FIFO_PTR_SEGMENT(priv->id));
+
+ return register_netdev(ndev);
+
+}
+
+static int netx_eth_drv_probe(struct platform_device *pdev)
+{
+ struct netx_eth_priv *priv;
+ struct net_device *ndev;
+ struct netxeth_platform_data *pdata;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof (struct netx_eth_priv));
+ if (!ndev) {
+ printk("%s: could not allocate device.\n", CARDNAME);
+ ret = -ENOMEM;
+ goto exit;
+ }
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ platform_set_drvdata(pdev, ndev);
+
+ priv = netdev_priv(ndev);
+
+ pdata = (struct netxeth_platform_data *)pdev->dev.platform_data;
+ priv->xc = request_xc(pdata->xcno, &pdev->dev);
+ if (!priv->xc) {
+ dev_err(&pdev->dev, "unable to request xc engine\n");
+ ret = -ENODEV;
+ goto exit_free_netdev;
+ }
+
+ ndev->irq = priv->xc->irq;
+ priv->id = pdev->id;
+ priv->xpec_base = priv->xc->xpec_base;
+ priv->xmac_base = priv->xc->xmac_base;
+ priv->sram_base = priv->xc->sram_base;
+
+ spin_lock_init(&priv->lock);
+
+ ret = pfifo_request(PFIFO_MASK(priv->id));
+ if (ret) {
+ printk("unable to request PFIFO\n");
+ goto exit_free_xc;
+ }
+
+ ret = netx_eth_enable(ndev);
+ if (ret)
+ goto exit_free_pfifo;
+
+ return 0;
+exit_free_pfifo:
+ pfifo_free(PFIFO_MASK(priv->id));
+exit_free_xc:
+ free_xc(priv->xc);
+exit_free_netdev:
+ platform_set_drvdata(pdev, NULL);
+ free_netdev(ndev);
+exit:
+ return ret;
+}
+
+static int netx_eth_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = dev_get_drvdata(&pdev->dev);
+ struct netx_eth_priv *priv = netdev_priv(ndev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ unregister_netdev(ndev);
+ xc_stop(priv->xc);
+ free_xc(priv->xc);
+ free_netdev(ndev);
+ pfifo_free(PFIFO_MASK(priv->id));
+
+ return 0;
+}
+
+static int netx_eth_drv_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ dev_err(&pdev->dev, "suspend not implemented\n");
+ return 0;
+}
+
+static int netx_eth_drv_resume(struct platform_device *pdev)
+{
+ dev_err(&pdev->dev, "resume not implemented\n");
+ return 0;
+}
+
+static struct platform_driver netx_eth_driver = {
+ .probe = netx_eth_drv_probe,
+ .remove = netx_eth_drv_remove,
+ .suspend = netx_eth_drv_suspend,
+ .resume = netx_eth_drv_resume,
+ .driver = {
+ .name = CARDNAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init netx_eth_init(void)
+{
+ unsigned int phy_control, val;
+
+ printk("NetX Ethernet driver\n");
+
+ phy_control = PHY_CONTROL_PHY_ADDRESS(INTERNAL_PHY_ADR>>1) |
+ PHY_CONTROL_PHY1_MODE(PHY_MODE_ALL) |
+ PHY_CONTROL_PHY1_AUTOMDIX |
+ PHY_CONTROL_PHY1_EN |
+ PHY_CONTROL_PHY0_MODE(PHY_MODE_ALL) |
+ PHY_CONTROL_PHY0_AUTOMDIX |
+ PHY_CONTROL_PHY0_EN |
+ PHY_CONTROL_CLK_XLATIN;
+
+ val = readl(NETX_SYSTEM_IOC_ACCESS_KEY);
+ writel(val, NETX_SYSTEM_IOC_ACCESS_KEY);
+
+ writel(phy_control | PHY_CONTROL_RESET, NETX_SYSTEM_PHY_CONTROL);
+ udelay(100);
+
+ val = readl(NETX_SYSTEM_IOC_ACCESS_KEY);
+ writel(val, NETX_SYSTEM_IOC_ACCESS_KEY);
+
+ writel(phy_control, NETX_SYSTEM_PHY_CONTROL);
+
+ return platform_driver_register(&netx_eth_driver);
+}
+
+static void __exit netx_eth_cleanup(void)
+{
+ platform_driver_unregister(&netx_eth_driver);
+}
+
+module_init(netx_eth_init);
+module_exit(netx_eth_cleanup);
+
+MODULE_AUTHOR("Sascha Hauer, Pengutronix");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" CARDNAME);
+MODULE_FIRMWARE("xc0.bin");
+MODULE_FIRMWARE("xc1.bin");
+MODULE_FIRMWARE("xc2.bin");
diff --git a/drivers/net/ethernet/nuvoton/Kconfig b/drivers/net/ethernet/nuvoton/Kconfig
new file mode 100644
index 000000000000..334c17183095
--- /dev/null
+++ b/drivers/net/ethernet/nuvoton/Kconfig
@@ -0,0 +1,31 @@
+#
+# Nuvoton network device configuration
+#
+
+config NET_VENDOR_NUVOTON
+ bool "Nuvoton devices"
+ default y
+ depends on ARM && ARCH_W90X900
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Nuvoton cards. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_NUVOTON
+
+config W90P910_ETH
+ tristate "Nuvoton w90p910 Ethernet support"
+ depends on ARM && ARCH_W90X900
+ select PHYLIB
+ select NET_CORE
+ select MII
+ ---help---
+ Say Y here if you want to use built-in Ethernet ports
+ on w90p910 processor.
+
+endif # NET_VENDOR_NUVOTON
diff --git a/drivers/net/ethernet/nuvoton/Makefile b/drivers/net/ethernet/nuvoton/Makefile
new file mode 100644
index 000000000000..171aa044bd3b
--- /dev/null
+++ b/drivers/net/ethernet/nuvoton/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Nuvoton network device drivers.
+#
+
+obj-$(CONFIG_W90P910_ETH) += w90p910_ether.o
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
new file mode 100644
index 000000000000..f1bfb8f8fcf0
--- /dev/null
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -0,0 +1,1123 @@
+/*
+ * Copyright (c) 2008-2009 Nuvoton technology corporation.
+ *
+ * Wan ZongShun <mcuos.com@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;version 2 of the License.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/gfp.h>
+
+#define DRV_MODULE_NAME "w90p910-emc"
+#define DRV_MODULE_VERSION "0.1"
+
+/* Ethernet MAC Registers */
+#define REG_CAMCMR 0x00
+#define REG_CAMEN 0x04
+#define REG_CAMM_BASE 0x08
+#define REG_CAML_BASE 0x0c
+#define REG_TXDLSA 0x88
+#define REG_RXDLSA 0x8C
+#define REG_MCMDR 0x90
+#define REG_MIID 0x94
+#define REG_MIIDA 0x98
+#define REG_FFTCR 0x9C
+#define REG_TSDR 0xa0
+#define REG_RSDR 0xa4
+#define REG_DMARFC 0xa8
+#define REG_MIEN 0xac
+#define REG_MISTA 0xb0
+#define REG_CTXDSA 0xcc
+#define REG_CTXBSA 0xd0
+#define REG_CRXDSA 0xd4
+#define REG_CRXBSA 0xd8
+
+/* mac controller bit */
+#define MCMDR_RXON 0x01
+#define MCMDR_ACP (0x01 << 3)
+#define MCMDR_SPCRC (0x01 << 5)
+#define MCMDR_TXON (0x01 << 8)
+#define MCMDR_FDUP (0x01 << 18)
+#define MCMDR_ENMDC (0x01 << 19)
+#define MCMDR_OPMOD (0x01 << 20)
+#define SWR (0x01 << 24)
+
+/* cam command regiser */
+#define CAMCMR_AUP 0x01
+#define CAMCMR_AMP (0x01 << 1)
+#define CAMCMR_ABP (0x01 << 2)
+#define CAMCMR_CCAM (0x01 << 3)
+#define CAMCMR_ECMP (0x01 << 4)
+#define CAM0EN 0x01
+
+/* mac mii controller bit */
+#define MDCCR (0x0a << 20)
+#define PHYAD (0x01 << 8)
+#define PHYWR (0x01 << 16)
+#define PHYBUSY (0x01 << 17)
+#define PHYPRESP (0x01 << 18)
+#define CAM_ENTRY_SIZE 0x08
+
+/* rx and tx status */
+#define TXDS_TXCP (0x01 << 19)
+#define RXDS_CRCE (0x01 << 17)
+#define RXDS_PTLE (0x01 << 19)
+#define RXDS_RXGD (0x01 << 20)
+#define RXDS_ALIE (0x01 << 21)
+#define RXDS_RP (0x01 << 22)
+
+/* mac interrupt status*/
+#define MISTA_EXDEF (0x01 << 19)
+#define MISTA_TXBERR (0x01 << 24)
+#define MISTA_TDU (0x01 << 23)
+#define MISTA_RDU (0x01 << 10)
+#define MISTA_RXBERR (0x01 << 11)
+
+#define ENSTART 0x01
+#define ENRXINTR 0x01
+#define ENRXGD (0x01 << 4)
+#define ENRXBERR (0x01 << 11)
+#define ENTXINTR (0x01 << 16)
+#define ENTXCP (0x01 << 18)
+#define ENTXABT (0x01 << 21)
+#define ENTXBERR (0x01 << 24)
+#define ENMDC (0x01 << 19)
+#define PHYBUSY (0x01 << 17)
+#define MDCCR_VAL 0xa00000
+
+/* rx and tx owner bit */
+#define RX_OWEN_DMA (0x01 << 31)
+#define RX_OWEN_CPU (~(0x03 << 30))
+#define TX_OWEN_DMA (0x01 << 31)
+#define TX_OWEN_CPU (~(0x01 << 31))
+
+/* tx frame desc controller bit */
+#define MACTXINTEN 0x04
+#define CRCMODE 0x02
+#define PADDINGMODE 0x01
+
+/* fftcr controller bit */
+#define TXTHD (0x03 << 8)
+#define BLENGTH (0x01 << 20)
+
+/* global setting for driver */
+#define RX_DESC_SIZE 50
+#define TX_DESC_SIZE 10
+#define MAX_RBUFF_SZ 0x600
+#define MAX_TBUFF_SZ 0x600
+#define TX_TIMEOUT (HZ/2)
+#define DELAY 1000
+#define CAM0 0x0
+
+static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg);
+
+struct w90p910_rxbd {
+ unsigned int sl;
+ unsigned int buffer;
+ unsigned int reserved;
+ unsigned int next;
+};
+
+struct w90p910_txbd {
+ unsigned int mode;
+ unsigned int buffer;
+ unsigned int sl;
+ unsigned int next;
+};
+
+struct recv_pdesc {
+ struct w90p910_rxbd desclist[RX_DESC_SIZE];
+ char recv_buf[RX_DESC_SIZE][MAX_RBUFF_SZ];
+};
+
+struct tran_pdesc {
+ struct w90p910_txbd desclist[TX_DESC_SIZE];
+ char tran_buf[TX_DESC_SIZE][MAX_TBUFF_SZ];
+};
+
+struct w90p910_ether {
+ struct recv_pdesc *rdesc;
+ struct tran_pdesc *tdesc;
+ dma_addr_t rdesc_phys;
+ dma_addr_t tdesc_phys;
+ struct net_device_stats stats;
+ struct platform_device *pdev;
+ struct resource *res;
+ struct sk_buff *skb;
+ struct clk *clk;
+ struct clk *rmiiclk;
+ struct mii_if_info mii;
+ struct timer_list check_timer;
+ void __iomem *reg;
+ int rxirq;
+ int txirq;
+ unsigned int cur_tx;
+ unsigned int cur_rx;
+ unsigned int finish_tx;
+ unsigned int rx_packets;
+ unsigned int rx_bytes;
+ unsigned int start_tx_ptr;
+ unsigned int start_rx_ptr;
+ unsigned int linkflag;
+};
+
+static void update_linkspeed_register(struct net_device *dev,
+ unsigned int speed, unsigned int duplex)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+ unsigned int val;
+
+ val = __raw_readl(ether->reg + REG_MCMDR);
+
+ if (speed == SPEED_100) {
+ /* 100 full/half duplex */
+ if (duplex == DUPLEX_FULL) {
+ val |= (MCMDR_OPMOD | MCMDR_FDUP);
+ } else {
+ val |= MCMDR_OPMOD;
+ val &= ~MCMDR_FDUP;
+ }
+ } else {
+ /* 10 full/half duplex */
+ if (duplex == DUPLEX_FULL) {
+ val |= MCMDR_FDUP;
+ val &= ~MCMDR_OPMOD;
+ } else {
+ val &= ~(MCMDR_FDUP | MCMDR_OPMOD);
+ }
+ }
+
+ __raw_writel(val, ether->reg + REG_MCMDR);
+}
+
+static void update_linkspeed(struct net_device *dev)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+ struct platform_device *pdev;
+ unsigned int bmsr, bmcr, lpa, speed, duplex;
+
+ pdev = ether->pdev;
+
+ if (!mii_link_ok(&ether->mii)) {
+ ether->linkflag = 0x0;
+ netif_carrier_off(dev);
+ dev_warn(&pdev->dev, "%s: Link down.\n", dev->name);
+ return;
+ }
+
+ if (ether->linkflag == 1)
+ return;
+
+ bmsr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMSR);
+ bmcr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMCR);
+
+ if (bmcr & BMCR_ANENABLE) {
+ if (!(bmsr & BMSR_ANEGCOMPLETE))
+ return;
+
+ lpa = w90p910_mdio_read(dev, ether->mii.phy_id, MII_LPA);
+
+ if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF))
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+
+ if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL))
+ duplex = DUPLEX_FULL;
+ else
+ duplex = DUPLEX_HALF;
+
+ } else {
+ speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
+ duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+ }
+
+ update_linkspeed_register(dev, speed, duplex);
+
+ dev_info(&pdev->dev, "%s: Link now %i-%s\n", dev->name, speed,
+ (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
+ ether->linkflag = 0x01;
+
+ netif_carrier_on(dev);
+}
+
+static void w90p910_check_link(unsigned long dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct w90p910_ether *ether = netdev_priv(dev);
+
+ update_linkspeed(dev);
+ mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000));
+}
+
+static void w90p910_write_cam(struct net_device *dev,
+ unsigned int x, unsigned char *pval)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+ unsigned int msw, lsw;
+
+ msw = (pval[0] << 24) | (pval[1] << 16) | (pval[2] << 8) | pval[3];
+
+ lsw = (pval[4] << 24) | (pval[5] << 16);
+
+ __raw_writel(lsw, ether->reg + REG_CAML_BASE + x * CAM_ENTRY_SIZE);
+ __raw_writel(msw, ether->reg + REG_CAMM_BASE + x * CAM_ENTRY_SIZE);
+}
+
+static int w90p910_init_desc(struct net_device *dev)
+{
+ struct w90p910_ether *ether;
+ struct w90p910_txbd *tdesc;
+ struct w90p910_rxbd *rdesc;
+ struct platform_device *pdev;
+ unsigned int i;
+
+ ether = netdev_priv(dev);
+ pdev = ether->pdev;
+
+ ether->tdesc = (struct tran_pdesc *)
+ dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc),
+ &ether->tdesc_phys, GFP_KERNEL);
+
+ if (!ether->tdesc) {
+ dev_err(&pdev->dev, "Failed to allocate memory for tx desc\n");
+ return -ENOMEM;
+ }
+
+ ether->rdesc = (struct recv_pdesc *)
+ dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc),
+ &ether->rdesc_phys, GFP_KERNEL);
+
+ if (!ether->rdesc) {
+ dev_err(&pdev->dev, "Failed to allocate memory for rx desc\n");
+ dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc),
+ ether->tdesc, ether->tdesc_phys);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < TX_DESC_SIZE; i++) {
+ unsigned int offset;
+
+ tdesc = &(ether->tdesc->desclist[i]);
+
+ if (i == TX_DESC_SIZE - 1)
+ offset = offsetof(struct tran_pdesc, desclist[0]);
+ else
+ offset = offsetof(struct tran_pdesc, desclist[i + 1]);
+
+ tdesc->next = ether->tdesc_phys + offset;
+ tdesc->buffer = ether->tdesc_phys +
+ offsetof(struct tran_pdesc, tran_buf[i]);
+ tdesc->sl = 0;
+ tdesc->mode = 0;
+ }
+
+ ether->start_tx_ptr = ether->tdesc_phys;
+
+ for (i = 0; i < RX_DESC_SIZE; i++) {
+ unsigned int offset;
+
+ rdesc = &(ether->rdesc->desclist[i]);
+
+ if (i == RX_DESC_SIZE - 1)
+ offset = offsetof(struct recv_pdesc, desclist[0]);
+ else
+ offset = offsetof(struct recv_pdesc, desclist[i + 1]);
+
+ rdesc->next = ether->rdesc_phys + offset;
+ rdesc->sl = RX_OWEN_DMA;
+ rdesc->buffer = ether->rdesc_phys +
+ offsetof(struct recv_pdesc, recv_buf[i]);
+ }
+
+ ether->start_rx_ptr = ether->rdesc_phys;
+
+ return 0;
+}
+
+static void w90p910_set_fifo_threshold(struct net_device *dev)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+ unsigned int val;
+
+ val = TXTHD | BLENGTH;
+ __raw_writel(val, ether->reg + REG_FFTCR);
+}
+
+static void w90p910_return_default_idle(struct net_device *dev)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+ unsigned int val;
+
+ val = __raw_readl(ether->reg + REG_MCMDR);
+ val |= SWR;
+ __raw_writel(val, ether->reg + REG_MCMDR);
+}
+
+static void w90p910_trigger_rx(struct net_device *dev)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+
+ __raw_writel(ENSTART, ether->reg + REG_RSDR);
+}
+
+static void w90p910_trigger_tx(struct net_device *dev)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+
+ __raw_writel(ENSTART, ether->reg + REG_TSDR);
+}
+
+static void w90p910_enable_mac_interrupt(struct net_device *dev)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+ unsigned int val;
+
+ val = ENTXINTR | ENRXINTR | ENRXGD | ENTXCP;
+ val |= ENTXBERR | ENRXBERR | ENTXABT;
+
+ __raw_writel(val, ether->reg + REG_MIEN);
+}
+
+static void w90p910_get_and_clear_int(struct net_device *dev,
+ unsigned int *val)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+
+ *val = __raw_readl(ether->reg + REG_MISTA);
+ __raw_writel(*val, ether->reg + REG_MISTA);
+}
+
+static void w90p910_set_global_maccmd(struct net_device *dev)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+ unsigned int val;
+
+ val = __raw_readl(ether->reg + REG_MCMDR);
+ val |= MCMDR_SPCRC | MCMDR_ENMDC | MCMDR_ACP | ENMDC;
+ __raw_writel(val, ether->reg + REG_MCMDR);
+}
+
+static void w90p910_enable_cam(struct net_device *dev)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+ unsigned int val;
+
+ w90p910_write_cam(dev, CAM0, dev->dev_addr);
+
+ val = __raw_readl(ether->reg + REG_CAMEN);
+ val |= CAM0EN;
+ __raw_writel(val, ether->reg + REG_CAMEN);
+}
+
+static void w90p910_enable_cam_command(struct net_device *dev)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+ unsigned int val;
+
+ val = CAMCMR_ECMP | CAMCMR_ABP | CAMCMR_AMP;
+ __raw_writel(val, ether->reg + REG_CAMCMR);
+}
+
+static void w90p910_enable_tx(struct net_device *dev, unsigned int enable)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+ unsigned int val;
+
+ val = __raw_readl(ether->reg + REG_MCMDR);
+
+ if (enable)
+ val |= MCMDR_TXON;
+ else
+ val &= ~MCMDR_TXON;
+
+ __raw_writel(val, ether->reg + REG_MCMDR);
+}
+
+static void w90p910_enable_rx(struct net_device *dev, unsigned int enable)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+ unsigned int val;
+
+ val = __raw_readl(ether->reg + REG_MCMDR);
+
+ if (enable)
+ val |= MCMDR_RXON;
+ else
+ val &= ~MCMDR_RXON;
+
+ __raw_writel(val, ether->reg + REG_MCMDR);
+}
+
+static void w90p910_set_curdest(struct net_device *dev)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+
+ __raw_writel(ether->start_rx_ptr, ether->reg + REG_RXDLSA);
+ __raw_writel(ether->start_tx_ptr, ether->reg + REG_TXDLSA);
+}
+
+static void w90p910_reset_mac(struct net_device *dev)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+
+ w90p910_enable_tx(dev, 0);
+ w90p910_enable_rx(dev, 0);
+ w90p910_set_fifo_threshold(dev);
+ w90p910_return_default_idle(dev);
+
+ if (!netif_queue_stopped(dev))
+ netif_stop_queue(dev);
+
+ w90p910_init_desc(dev);
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ ether->cur_tx = 0x0;
+ ether->finish_tx = 0x0;
+ ether->cur_rx = 0x0;
+
+ w90p910_set_curdest(dev);
+ w90p910_enable_cam(dev);
+ w90p910_enable_cam_command(dev);
+ w90p910_enable_mac_interrupt(dev);
+ w90p910_enable_tx(dev, 1);
+ w90p910_enable_rx(dev, 1);
+ w90p910_trigger_tx(dev);
+ w90p910_trigger_rx(dev);
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+}
+
+static void w90p910_mdio_write(struct net_device *dev,
+ int phy_id, int reg, int data)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+ struct platform_device *pdev;
+ unsigned int val, i;
+
+ pdev = ether->pdev;
+
+ __raw_writel(data, ether->reg + REG_MIID);
+
+ val = (phy_id << 0x08) | reg;
+ val |= PHYBUSY | PHYWR | MDCCR_VAL;
+ __raw_writel(val, ether->reg + REG_MIIDA);
+
+ for (i = 0; i < DELAY; i++) {
+ if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0)
+ break;
+ }
+
+ if (i == DELAY)
+ dev_warn(&pdev->dev, "mdio write timed out\n");
+}
+
+static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+ struct platform_device *pdev;
+ unsigned int val, i, data;
+
+ pdev = ether->pdev;
+
+ val = (phy_id << 0x08) | reg;
+ val |= PHYBUSY | MDCCR_VAL;
+ __raw_writel(val, ether->reg + REG_MIIDA);
+
+ for (i = 0; i < DELAY; i++) {
+ if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0)
+ break;
+ }
+
+ if (i == DELAY) {
+ dev_warn(&pdev->dev, "mdio read timed out\n");
+ data = 0xffff;
+ } else {
+ data = __raw_readl(ether->reg + REG_MIID);
+ }
+
+ return data;
+}
+
+static int w90p910_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *address = addr;
+
+ if (!is_valid_ether_addr(address->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
+ w90p910_write_cam(dev, CAM0, dev->dev_addr);
+
+ return 0;
+}
+
+static int w90p910_ether_close(struct net_device *dev)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+ struct platform_device *pdev;
+
+ pdev = ether->pdev;
+
+ dma_free_coherent(&pdev->dev, sizeof(struct recv_pdesc),
+ ether->rdesc, ether->rdesc_phys);
+ dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc),
+ ether->tdesc, ether->tdesc_phys);
+
+ netif_stop_queue(dev);
+
+ del_timer_sync(&ether->check_timer);
+ clk_disable(ether->rmiiclk);
+ clk_disable(ether->clk);
+
+ free_irq(ether->txirq, dev);
+ free_irq(ether->rxirq, dev);
+
+ return 0;
+}
+
+static struct net_device_stats *w90p910_ether_stats(struct net_device *dev)
+{
+ struct w90p910_ether *ether;
+
+ ether = netdev_priv(dev);
+
+ return &ether->stats;
+}
+
+static int w90p910_send_frame(struct net_device *dev,
+ unsigned char *data, int length)
+{
+ struct w90p910_ether *ether;
+ struct w90p910_txbd *txbd;
+ struct platform_device *pdev;
+ unsigned char *buffer;
+
+ ether = netdev_priv(dev);
+ pdev = ether->pdev;
+
+ txbd = &ether->tdesc->desclist[ether->cur_tx];
+ buffer = ether->tdesc->tran_buf[ether->cur_tx];
+
+ if (length > 1514) {
+ dev_err(&pdev->dev, "send data %d bytes, check it\n", length);
+ length = 1514;
+ }
+
+ txbd->sl = length & 0xFFFF;
+
+ memcpy(buffer, data, length);
+
+ txbd->mode = TX_OWEN_DMA | PADDINGMODE | CRCMODE | MACTXINTEN;
+
+ w90p910_enable_tx(dev, 1);
+
+ w90p910_trigger_tx(dev);
+
+ if (++ether->cur_tx >= TX_DESC_SIZE)
+ ether->cur_tx = 0;
+
+ txbd = &ether->tdesc->desclist[ether->cur_tx];
+
+ if (txbd->mode & TX_OWEN_DMA)
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+static int w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+
+ if (!(w90p910_send_frame(dev, skb->data, skb->len))) {
+ ether->skb = skb;
+ dev_kfree_skb_irq(skb);
+ return 0;
+ }
+ return -EAGAIN;
+}
+
+static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id)
+{
+ struct w90p910_ether *ether;
+ struct w90p910_txbd *txbd;
+ struct platform_device *pdev;
+ struct net_device *dev;
+ unsigned int cur_entry, entry, status;
+
+ dev = dev_id;
+ ether = netdev_priv(dev);
+ pdev = ether->pdev;
+
+ w90p910_get_and_clear_int(dev, &status);
+
+ cur_entry = __raw_readl(ether->reg + REG_CTXDSA);
+
+ entry = ether->tdesc_phys +
+ offsetof(struct tran_pdesc, desclist[ether->finish_tx]);
+
+ while (entry != cur_entry) {
+ txbd = &ether->tdesc->desclist[ether->finish_tx];
+
+ if (++ether->finish_tx >= TX_DESC_SIZE)
+ ether->finish_tx = 0;
+
+ if (txbd->sl & TXDS_TXCP) {
+ ether->stats.tx_packets++;
+ ether->stats.tx_bytes += txbd->sl & 0xFFFF;
+ } else {
+ ether->stats.tx_errors++;
+ }
+
+ txbd->sl = 0x0;
+ txbd->mode = 0x0;
+
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+
+ entry = ether->tdesc_phys +
+ offsetof(struct tran_pdesc, desclist[ether->finish_tx]);
+ }
+
+ if (status & MISTA_EXDEF) {
+ dev_err(&pdev->dev, "emc defer exceed interrupt\n");
+ } else if (status & MISTA_TXBERR) {
+ dev_err(&pdev->dev, "emc bus error interrupt\n");
+ w90p910_reset_mac(dev);
+ } else if (status & MISTA_TDU) {
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void netdev_rx(struct net_device *dev)
+{
+ struct w90p910_ether *ether;
+ struct w90p910_rxbd *rxbd;
+ struct platform_device *pdev;
+ struct sk_buff *skb;
+ unsigned char *data;
+ unsigned int length, status, val, entry;
+
+ ether = netdev_priv(dev);
+ pdev = ether->pdev;
+
+ rxbd = &ether->rdesc->desclist[ether->cur_rx];
+
+ do {
+ val = __raw_readl(ether->reg + REG_CRXDSA);
+
+ entry = ether->rdesc_phys +
+ offsetof(struct recv_pdesc, desclist[ether->cur_rx]);
+
+ if (val == entry)
+ break;
+
+ status = rxbd->sl;
+ length = status & 0xFFFF;
+
+ if (status & RXDS_RXGD) {
+ data = ether->rdesc->recv_buf[ether->cur_rx];
+ skb = dev_alloc_skb(length+2);
+ if (!skb) {
+ dev_err(&pdev->dev, "get skb buffer error\n");
+ ether->stats.rx_dropped++;
+ return;
+ }
+
+ skb_reserve(skb, 2);
+ skb_put(skb, length);
+ skb_copy_to_linear_data(skb, data, length);
+ skb->protocol = eth_type_trans(skb, dev);
+ ether->stats.rx_packets++;
+ ether->stats.rx_bytes += length;
+ netif_rx(skb);
+ } else {
+ ether->stats.rx_errors++;
+
+ if (status & RXDS_RP) {
+ dev_err(&pdev->dev, "rx runt err\n");
+ ether->stats.rx_length_errors++;
+ } else if (status & RXDS_CRCE) {
+ dev_err(&pdev->dev, "rx crc err\n");
+ ether->stats.rx_crc_errors++;
+ } else if (status & RXDS_ALIE) {
+ dev_err(&pdev->dev, "rx aligment err\n");
+ ether->stats.rx_frame_errors++;
+ } else if (status & RXDS_PTLE) {
+ dev_err(&pdev->dev, "rx longer err\n");
+ ether->stats.rx_over_errors++;
+ }
+ }
+
+ rxbd->sl = RX_OWEN_DMA;
+ rxbd->reserved = 0x0;
+
+ if (++ether->cur_rx >= RX_DESC_SIZE)
+ ether->cur_rx = 0;
+
+ rxbd = &ether->rdesc->desclist[ether->cur_rx];
+
+ } while (1);
+}
+
+static irqreturn_t w90p910_rx_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev;
+ struct w90p910_ether *ether;
+ struct platform_device *pdev;
+ unsigned int status;
+
+ dev = dev_id;
+ ether = netdev_priv(dev);
+ pdev = ether->pdev;
+
+ w90p910_get_and_clear_int(dev, &status);
+
+ if (status & MISTA_RDU) {
+ netdev_rx(dev);
+ w90p910_trigger_rx(dev);
+
+ return IRQ_HANDLED;
+ } else if (status & MISTA_RXBERR) {
+ dev_err(&pdev->dev, "emc rx bus error\n");
+ w90p910_reset_mac(dev);
+ }
+
+ netdev_rx(dev);
+ return IRQ_HANDLED;
+}
+
+static int w90p910_ether_open(struct net_device *dev)
+{
+ struct w90p910_ether *ether;
+ struct platform_device *pdev;
+
+ ether = netdev_priv(dev);
+ pdev = ether->pdev;
+
+ w90p910_reset_mac(dev);
+ w90p910_set_fifo_threshold(dev);
+ w90p910_set_curdest(dev);
+ w90p910_enable_cam(dev);
+ w90p910_enable_cam_command(dev);
+ w90p910_enable_mac_interrupt(dev);
+ w90p910_set_global_maccmd(dev);
+ w90p910_enable_rx(dev, 1);
+
+ clk_enable(ether->rmiiclk);
+ clk_enable(ether->clk);
+
+ ether->rx_packets = 0x0;
+ ether->rx_bytes = 0x0;
+
+ if (request_irq(ether->txirq, w90p910_tx_interrupt,
+ 0x0, pdev->name, dev)) {
+ dev_err(&pdev->dev, "register irq tx failed\n");
+ return -EAGAIN;
+ }
+
+ if (request_irq(ether->rxirq, w90p910_rx_interrupt,
+ 0x0, pdev->name, dev)) {
+ dev_err(&pdev->dev, "register irq rx failed\n");
+ free_irq(ether->txirq, dev);
+ return -EAGAIN;
+ }
+
+ mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000));
+ netif_start_queue(dev);
+ w90p910_trigger_rx(dev);
+
+ dev_info(&pdev->dev, "%s is OPENED\n", dev->name);
+
+ return 0;
+}
+
+static void w90p910_ether_set_multicast_list(struct net_device *dev)
+{
+ struct w90p910_ether *ether;
+ unsigned int rx_mode;
+
+ ether = netdev_priv(dev);
+
+ if (dev->flags & IFF_PROMISC)
+ rx_mode = CAMCMR_AUP | CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
+ else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
+ rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
+ else
+ rx_mode = CAMCMR_ECMP | CAMCMR_ABP;
+ __raw_writel(rx_mode, ether->reg + REG_CAMCMR);
+}
+
+static int w90p910_ether_ioctl(struct net_device *dev,
+ struct ifreq *ifr, int cmd)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ return generic_mii_ioctl(&ether->mii, data, cmd, NULL);
+}
+
+static void w90p910_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_MODULE_NAME);
+ strcpy(info->version, DRV_MODULE_VERSION);
+}
+
+static int w90p910_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+ return mii_ethtool_gset(&ether->mii, cmd);
+}
+
+static int w90p910_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+ return mii_ethtool_sset(&ether->mii, cmd);
+}
+
+static int w90p910_nway_reset(struct net_device *dev)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+ return mii_nway_restart(&ether->mii);
+}
+
+static u32 w90p910_get_link(struct net_device *dev)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+ return mii_link_ok(&ether->mii);
+}
+
+static const struct ethtool_ops w90p910_ether_ethtool_ops = {
+ .get_settings = w90p910_get_settings,
+ .set_settings = w90p910_set_settings,
+ .get_drvinfo = w90p910_get_drvinfo,
+ .nway_reset = w90p910_nway_reset,
+ .get_link = w90p910_get_link,
+};
+
+static const struct net_device_ops w90p910_ether_netdev_ops = {
+ .ndo_open = w90p910_ether_open,
+ .ndo_stop = w90p910_ether_close,
+ .ndo_start_xmit = w90p910_ether_start_xmit,
+ .ndo_get_stats = w90p910_ether_stats,
+ .ndo_set_rx_mode = w90p910_ether_set_multicast_list,
+ .ndo_set_mac_address = w90p910_set_mac_address,
+ .ndo_do_ioctl = w90p910_ether_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static void __init get_mac_address(struct net_device *dev)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+ struct platform_device *pdev;
+ char addr[6];
+
+ pdev = ether->pdev;
+
+ addr[0] = 0x00;
+ addr[1] = 0x02;
+ addr[2] = 0xac;
+ addr[3] = 0x55;
+ addr[4] = 0x88;
+ addr[5] = 0xa8;
+
+ if (is_valid_ether_addr(addr))
+ memcpy(dev->dev_addr, &addr, 0x06);
+ else
+ dev_err(&pdev->dev, "invalid mac address\n");
+}
+
+static int w90p910_ether_setup(struct net_device *dev)
+{
+ struct w90p910_ether *ether = netdev_priv(dev);
+
+ ether_setup(dev);
+ dev->netdev_ops = &w90p910_ether_netdev_ops;
+ dev->ethtool_ops = &w90p910_ether_ethtool_ops;
+
+ dev->tx_queue_len = 16;
+ dev->dma = 0x0;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ get_mac_address(dev);
+
+ ether->cur_tx = 0x0;
+ ether->cur_rx = 0x0;
+ ether->finish_tx = 0x0;
+ ether->linkflag = 0x0;
+ ether->mii.phy_id = 0x01;
+ ether->mii.phy_id_mask = 0x1f;
+ ether->mii.reg_num_mask = 0x1f;
+ ether->mii.dev = dev;
+ ether->mii.mdio_read = w90p910_mdio_read;
+ ether->mii.mdio_write = w90p910_mdio_write;
+
+ setup_timer(&ether->check_timer, w90p910_check_link,
+ (unsigned long)dev);
+
+ return 0;
+}
+
+static int __devinit w90p910_ether_probe(struct platform_device *pdev)
+{
+ struct w90p910_ether *ether;
+ struct net_device *dev;
+ int error;
+
+ dev = alloc_etherdev(sizeof(struct w90p910_ether));
+ if (!dev)
+ return -ENOMEM;
+
+ ether = netdev_priv(dev);
+
+ ether->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (ether->res == NULL) {
+ dev_err(&pdev->dev, "failed to get I/O memory\n");
+ error = -ENXIO;
+ goto failed_free;
+ }
+
+ if (!request_mem_region(ether->res->start,
+ resource_size(ether->res), pdev->name)) {
+ dev_err(&pdev->dev, "failed to request I/O memory\n");
+ error = -EBUSY;
+ goto failed_free;
+ }
+
+ ether->reg = ioremap(ether->res->start, resource_size(ether->res));
+ if (ether->reg == NULL) {
+ dev_err(&pdev->dev, "failed to remap I/O memory\n");
+ error = -ENXIO;
+ goto failed_free_mem;
+ }
+
+ ether->txirq = platform_get_irq(pdev, 0);
+ if (ether->txirq < 0) {
+ dev_err(&pdev->dev, "failed to get ether tx irq\n");
+ error = -ENXIO;
+ goto failed_free_io;
+ }
+
+ ether->rxirq = platform_get_irq(pdev, 1);
+ if (ether->rxirq < 0) {
+ dev_err(&pdev->dev, "failed to get ether rx irq\n");
+ error = -ENXIO;
+ goto failed_free_txirq;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ ether->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ether->clk)) {
+ dev_err(&pdev->dev, "failed to get ether clock\n");
+ error = PTR_ERR(ether->clk);
+ goto failed_free_rxirq;
+ }
+
+ ether->rmiiclk = clk_get(&pdev->dev, "RMII");
+ if (IS_ERR(ether->rmiiclk)) {
+ dev_err(&pdev->dev, "failed to get ether clock\n");
+ error = PTR_ERR(ether->rmiiclk);
+ goto failed_put_clk;
+ }
+
+ ether->pdev = pdev;
+
+ w90p910_ether_setup(dev);
+
+ error = register_netdev(dev);
+ if (error != 0) {
+ dev_err(&pdev->dev, "Regiter EMC w90p910 FAILED\n");
+ error = -ENODEV;
+ goto failed_put_rmiiclk;
+ }
+
+ return 0;
+failed_put_rmiiclk:
+ clk_put(ether->rmiiclk);
+failed_put_clk:
+ clk_put(ether->clk);
+failed_free_rxirq:
+ free_irq(ether->rxirq, pdev);
+ platform_set_drvdata(pdev, NULL);
+failed_free_txirq:
+ free_irq(ether->txirq, pdev);
+failed_free_io:
+ iounmap(ether->reg);
+failed_free_mem:
+ release_mem_region(ether->res->start, resource_size(ether->res));
+failed_free:
+ free_netdev(dev);
+ return error;
+}
+
+static int __devexit w90p910_ether_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct w90p910_ether *ether = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ clk_put(ether->rmiiclk);
+ clk_put(ether->clk);
+
+ iounmap(ether->reg);
+ release_mem_region(ether->res->start, resource_size(ether->res));
+
+ free_irq(ether->txirq, dev);
+ free_irq(ether->rxirq, dev);
+
+ del_timer_sync(&ether->check_timer);
+ platform_set_drvdata(pdev, NULL);
+
+ free_netdev(dev);
+ return 0;
+}
+
+static struct platform_driver w90p910_ether_driver = {
+ .probe = w90p910_ether_probe,
+ .remove = __devexit_p(w90p910_ether_remove),
+ .driver = {
+ .name = "nuc900-emc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init w90p910_ether_init(void)
+{
+ return platform_driver_register(&w90p910_ether_driver);
+}
+
+static void __exit w90p910_ether_exit(void)
+{
+ platform_driver_unregister(&w90p910_ether_driver);
+}
+
+module_init(w90p910_ether_init);
+module_exit(w90p910_ether_exit);
+
+MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
+MODULE_DESCRIPTION("w90p910 MAC driver!");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:nuc900-emc");
+
diff --git a/drivers/net/ethernet/nvidia/Kconfig b/drivers/net/ethernet/nvidia/Kconfig
new file mode 100644
index 000000000000..ace19e7f6d13
--- /dev/null
+++ b/drivers/net/ethernet/nvidia/Kconfig
@@ -0,0 +1,32 @@
+#
+# NVIDIA network device configuration
+#
+
+config NET_VENDOR_NVIDIA
+ bool "NVIDIA devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about NVIDIA cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_NVIDIA
+
+config FORCEDETH
+ tristate "nForce Ethernet support"
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) controller of this type, say Y and
+ read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called forcedeth.
+
+endif # NET_VENDOR_NVIDIA
diff --git a/drivers/net/ethernet/nvidia/Makefile b/drivers/net/ethernet/nvidia/Makefile
new file mode 100644
index 000000000000..e079ae5771d5
--- /dev/null
+++ b/drivers/net/ethernet/nvidia/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the NVIDIA network device drivers.
+#
+
+obj-$(CONFIG_FORCEDETH) += forcedeth.o
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
new file mode 100644
index 000000000000..84baa59430bb
--- /dev/null
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -0,0 +1,6019 @@
+/*
+ * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
+ *
+ * Note: This driver is a cleanroom reimplementation based on reverse
+ * engineered documentation written by Carl-Daniel Hailfinger
+ * and Andrew de Quincey.
+ *
+ * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
+ * trademarks of NVIDIA Corporation in the United States and other
+ * countries.
+ *
+ * Copyright (C) 2003,4,5 Manfred Spraul
+ * Copyright (C) 2004 Andrew de Quincey (wol support)
+ * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
+ * IRQ rate fixes, bigendian fixes, cleanups, verification)
+ * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Known bugs:
+ * We suspect that on some hardware no TX done interrupts are generated.
+ * This means recovery from netif_stop_queue only happens if the hw timer
+ * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
+ * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
+ * If your hardware reliably generates tx done interrupts, then you can remove
+ * DEV_NEED_TIMERIRQ from the driver_data flags.
+ * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
+ * superfluous timer interrupts from the nic.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define FORCEDETH_VERSION "0.64"
+#define DRV_NAME "forcedeth"
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/timer.h>
+#include <linux/skbuff.h>
+#include <linux/mii.h>
+#include <linux/random.h>
+#include <linux/init.h>
+#include <linux/if_vlan.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/prefetch.h>
+#include <linux/io.h>
+
+#include <asm/irq.h>
+#include <asm/system.h>
+
+#define TX_WORK_PER_LOOP 64
+#define RX_WORK_PER_LOOP 64
+
+/*
+ * Hardware access:
+ */
+
+#define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */
+#define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */
+#define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */
+#define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */
+#define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */
+#define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */
+#define DEV_HAS_MSI 0x0000040 /* device supports MSI */
+#define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */
+#define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */
+#define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */
+#define DEV_HAS_STATISTICS_V2 0x0000400 /* device supports hw statistics version 2 */
+#define DEV_HAS_STATISTICS_V3 0x0000800 /* device supports hw statistics version 3 */
+#define DEV_HAS_STATISTICS_V12 0x0000600 /* device supports hw statistics version 1 and 2 */
+#define DEV_HAS_STATISTICS_V123 0x0000e00 /* device supports hw statistics version 1, 2, and 3 */
+#define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */
+#define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */
+#define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */
+#define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */
+#define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */
+#define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */
+#define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */
+#define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */
+#define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */
+#define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */
+#define DEV_NEED_PHY_INIT_FIX 0x0400000 /* device needs specific phy workaround */
+#define DEV_NEED_LOW_POWER_FIX 0x0800000 /* device needs special power up workaround */
+#define DEV_NEED_MSI_FIX 0x1000000 /* device needs msi workaround */
+
+enum {
+ NvRegIrqStatus = 0x000,
+#define NVREG_IRQSTAT_MIIEVENT 0x040
+#define NVREG_IRQSTAT_MASK 0x83ff
+ NvRegIrqMask = 0x004,
+#define NVREG_IRQ_RX_ERROR 0x0001
+#define NVREG_IRQ_RX 0x0002
+#define NVREG_IRQ_RX_NOBUF 0x0004
+#define NVREG_IRQ_TX_ERR 0x0008
+#define NVREG_IRQ_TX_OK 0x0010
+#define NVREG_IRQ_TIMER 0x0020
+#define NVREG_IRQ_LINK 0x0040
+#define NVREG_IRQ_RX_FORCED 0x0080
+#define NVREG_IRQ_TX_FORCED 0x0100
+#define NVREG_IRQ_RECOVER_ERROR 0x8200
+#define NVREG_IRQMASK_THROUGHPUT 0x00df
+#define NVREG_IRQMASK_CPU 0x0060
+#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
+#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
+#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
+
+ NvRegUnknownSetupReg6 = 0x008,
+#define NVREG_UNKSETUP6_VAL 3
+
+/*
+ * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
+ * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
+ */
+ NvRegPollingInterval = 0x00c,
+#define NVREG_POLL_DEFAULT_THROUGHPUT 65535 /* backup tx cleanup if loop max reached */
+#define NVREG_POLL_DEFAULT_CPU 13
+ NvRegMSIMap0 = 0x020,
+ NvRegMSIMap1 = 0x024,
+ NvRegMSIIrqMask = 0x030,
+#define NVREG_MSI_VECTOR_0_ENABLED 0x01
+ NvRegMisc1 = 0x080,
+#define NVREG_MISC1_PAUSE_TX 0x01
+#define NVREG_MISC1_HD 0x02
+#define NVREG_MISC1_FORCE 0x3b0f3c
+
+ NvRegMacReset = 0x34,
+#define NVREG_MAC_RESET_ASSERT 0x0F3
+ NvRegTransmitterControl = 0x084,
+#define NVREG_XMITCTL_START 0x01
+#define NVREG_XMITCTL_MGMT_ST 0x40000000
+#define NVREG_XMITCTL_SYNC_MASK 0x000f0000
+#define NVREG_XMITCTL_SYNC_NOT_READY 0x0
+#define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
+#define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
+#define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
+#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
+#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
+#define NVREG_XMITCTL_HOST_LOADED 0x00004000
+#define NVREG_XMITCTL_TX_PATH_EN 0x01000000
+#define NVREG_XMITCTL_DATA_START 0x00100000
+#define NVREG_XMITCTL_DATA_READY 0x00010000
+#define NVREG_XMITCTL_DATA_ERROR 0x00020000
+ NvRegTransmitterStatus = 0x088,
+#define NVREG_XMITSTAT_BUSY 0x01
+
+ NvRegPacketFilterFlags = 0x8c,
+#define NVREG_PFF_PAUSE_RX 0x08
+#define NVREG_PFF_ALWAYS 0x7F0000
+#define NVREG_PFF_PROMISC 0x80
+#define NVREG_PFF_MYADDR 0x20
+#define NVREG_PFF_LOOPBACK 0x10
+
+ NvRegOffloadConfig = 0x90,
+#define NVREG_OFFLOAD_HOMEPHY 0x601
+#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
+ NvRegReceiverControl = 0x094,
+#define NVREG_RCVCTL_START 0x01
+#define NVREG_RCVCTL_RX_PATH_EN 0x01000000
+ NvRegReceiverStatus = 0x98,
+#define NVREG_RCVSTAT_BUSY 0x01
+
+ NvRegSlotTime = 0x9c,
+#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
+#define NVREG_SLOTTIME_10_100_FULL 0x00007f00
+#define NVREG_SLOTTIME_1000_FULL 0x0003ff00
+#define NVREG_SLOTTIME_HALF 0x0000ff00
+#define NVREG_SLOTTIME_DEFAULT 0x00007f00
+#define NVREG_SLOTTIME_MASK 0x000000ff
+
+ NvRegTxDeferral = 0xA0,
+#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
+#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
+#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
+#define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
+#define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
+#define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
+ NvRegRxDeferral = 0xA4,
+#define NVREG_RX_DEFERRAL_DEFAULT 0x16
+ NvRegMacAddrA = 0xA8,
+ NvRegMacAddrB = 0xAC,
+ NvRegMulticastAddrA = 0xB0,
+#define NVREG_MCASTADDRA_FORCE 0x01
+ NvRegMulticastAddrB = 0xB4,
+ NvRegMulticastMaskA = 0xB8,
+#define NVREG_MCASTMASKA_NONE 0xffffffff
+ NvRegMulticastMaskB = 0xBC,
+#define NVREG_MCASTMASKB_NONE 0xffff
+
+ NvRegPhyInterface = 0xC0,
+#define PHY_RGMII 0x10000000
+ NvRegBackOffControl = 0xC4,
+#define NVREG_BKOFFCTRL_DEFAULT 0x70000000
+#define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff
+#define NVREG_BKOFFCTRL_SELECT 24
+#define NVREG_BKOFFCTRL_GEAR 12
+
+ NvRegTxRingPhysAddr = 0x100,
+ NvRegRxRingPhysAddr = 0x104,
+ NvRegRingSizes = 0x108,
+#define NVREG_RINGSZ_TXSHIFT 0
+#define NVREG_RINGSZ_RXSHIFT 16
+ NvRegTransmitPoll = 0x10c,
+#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
+ NvRegLinkSpeed = 0x110,
+#define NVREG_LINKSPEED_FORCE 0x10000
+#define NVREG_LINKSPEED_10 1000
+#define NVREG_LINKSPEED_100 100
+#define NVREG_LINKSPEED_1000 50
+#define NVREG_LINKSPEED_MASK (0xFFF)
+ NvRegUnknownSetupReg5 = 0x130,
+#define NVREG_UNKSETUP5_BIT31 (1<<31)
+ NvRegTxWatermark = 0x13c,
+#define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
+#define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
+#define NVREG_TX_WM_DESC2_3_1000 0xfe08000
+ NvRegTxRxControl = 0x144,
+#define NVREG_TXRXCTL_KICK 0x0001
+#define NVREG_TXRXCTL_BIT1 0x0002
+#define NVREG_TXRXCTL_BIT2 0x0004
+#define NVREG_TXRXCTL_IDLE 0x0008
+#define NVREG_TXRXCTL_RESET 0x0010
+#define NVREG_TXRXCTL_RXCHECK 0x0400
+#define NVREG_TXRXCTL_DESC_1 0
+#define NVREG_TXRXCTL_DESC_2 0x002100
+#define NVREG_TXRXCTL_DESC_3 0xc02200
+#define NVREG_TXRXCTL_VLANSTRIP 0x00040
+#define NVREG_TXRXCTL_VLANINS 0x00080
+ NvRegTxRingPhysAddrHigh = 0x148,
+ NvRegRxRingPhysAddrHigh = 0x14C,
+ NvRegTxPauseFrame = 0x170,
+#define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
+#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
+#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
+#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
+ NvRegTxPauseFrameLimit = 0x174,
+#define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000
+ NvRegMIIStatus = 0x180,
+#define NVREG_MIISTAT_ERROR 0x0001
+#define NVREG_MIISTAT_LINKCHANGE 0x0008
+#define NVREG_MIISTAT_MASK_RW 0x0007
+#define NVREG_MIISTAT_MASK_ALL 0x000f
+ NvRegMIIMask = 0x184,
+#define NVREG_MII_LINKCHANGE 0x0008
+
+ NvRegAdapterControl = 0x188,
+#define NVREG_ADAPTCTL_START 0x02
+#define NVREG_ADAPTCTL_LINKUP 0x04
+#define NVREG_ADAPTCTL_PHYVALID 0x40000
+#define NVREG_ADAPTCTL_RUNNING 0x100000
+#define NVREG_ADAPTCTL_PHYSHIFT 24
+ NvRegMIISpeed = 0x18c,
+#define NVREG_MIISPEED_BIT8 (1<<8)
+#define NVREG_MIIDELAY 5
+ NvRegMIIControl = 0x190,
+#define NVREG_MIICTL_INUSE 0x08000
+#define NVREG_MIICTL_WRITE 0x00400
+#define NVREG_MIICTL_ADDRSHIFT 5
+ NvRegMIIData = 0x194,
+ NvRegTxUnicast = 0x1a0,
+ NvRegTxMulticast = 0x1a4,
+ NvRegTxBroadcast = 0x1a8,
+ NvRegWakeUpFlags = 0x200,
+#define NVREG_WAKEUPFLAGS_VAL 0x7770
+#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
+#define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
+#define NVREG_WAKEUPFLAGS_D3SHIFT 12
+#define NVREG_WAKEUPFLAGS_D2SHIFT 8
+#define NVREG_WAKEUPFLAGS_D1SHIFT 4
+#define NVREG_WAKEUPFLAGS_D0SHIFT 0
+#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
+#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
+#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
+#define NVREG_WAKEUPFLAGS_ENABLE 0x1111
+
+ NvRegMgmtUnitGetVersion = 0x204,
+#define NVREG_MGMTUNITGETVERSION 0x01
+ NvRegMgmtUnitVersion = 0x208,
+#define NVREG_MGMTUNITVERSION 0x08
+ NvRegPowerCap = 0x268,
+#define NVREG_POWERCAP_D3SUPP (1<<30)
+#define NVREG_POWERCAP_D2SUPP (1<<26)
+#define NVREG_POWERCAP_D1SUPP (1<<25)
+ NvRegPowerState = 0x26c,
+#define NVREG_POWERSTATE_POWEREDUP 0x8000
+#define NVREG_POWERSTATE_VALID 0x0100
+#define NVREG_POWERSTATE_MASK 0x0003
+#define NVREG_POWERSTATE_D0 0x0000
+#define NVREG_POWERSTATE_D1 0x0001
+#define NVREG_POWERSTATE_D2 0x0002
+#define NVREG_POWERSTATE_D3 0x0003
+ NvRegMgmtUnitControl = 0x278,
+#define NVREG_MGMTUNITCONTROL_INUSE 0x20000
+ NvRegTxCnt = 0x280,
+ NvRegTxZeroReXmt = 0x284,
+ NvRegTxOneReXmt = 0x288,
+ NvRegTxManyReXmt = 0x28c,
+ NvRegTxLateCol = 0x290,
+ NvRegTxUnderflow = 0x294,
+ NvRegTxLossCarrier = 0x298,
+ NvRegTxExcessDef = 0x29c,
+ NvRegTxRetryErr = 0x2a0,
+ NvRegRxFrameErr = 0x2a4,
+ NvRegRxExtraByte = 0x2a8,
+ NvRegRxLateCol = 0x2ac,
+ NvRegRxRunt = 0x2b0,
+ NvRegRxFrameTooLong = 0x2b4,
+ NvRegRxOverflow = 0x2b8,
+ NvRegRxFCSErr = 0x2bc,
+ NvRegRxFrameAlignErr = 0x2c0,
+ NvRegRxLenErr = 0x2c4,
+ NvRegRxUnicast = 0x2c8,
+ NvRegRxMulticast = 0x2cc,
+ NvRegRxBroadcast = 0x2d0,
+ NvRegTxDef = 0x2d4,
+ NvRegTxFrame = 0x2d8,
+ NvRegRxCnt = 0x2dc,
+ NvRegTxPause = 0x2e0,
+ NvRegRxPause = 0x2e4,
+ NvRegRxDropFrame = 0x2e8,
+ NvRegVlanControl = 0x300,
+#define NVREG_VLANCONTROL_ENABLE 0x2000
+ NvRegMSIXMap0 = 0x3e0,
+ NvRegMSIXMap1 = 0x3e4,
+ NvRegMSIXIrqStatus = 0x3f0,
+
+ NvRegPowerState2 = 0x600,
+#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15
+#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
+#define NVREG_POWERSTATE2_PHY_RESET 0x0004
+#define NVREG_POWERSTATE2_GATE_CLOCKS 0x0F00
+};
+
+/* Big endian: should work, but is untested */
+struct ring_desc {
+ __le32 buf;
+ __le32 flaglen;
+};
+
+struct ring_desc_ex {
+ __le32 bufhigh;
+ __le32 buflow;
+ __le32 txvlan;
+ __le32 flaglen;
+};
+
+union ring_type {
+ struct ring_desc *orig;
+ struct ring_desc_ex *ex;
+};
+
+#define FLAG_MASK_V1 0xffff0000
+#define FLAG_MASK_V2 0xffffc000
+#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
+#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
+
+#define NV_TX_LASTPACKET (1<<16)
+#define NV_TX_RETRYERROR (1<<19)
+#define NV_TX_RETRYCOUNT_MASK (0xF<<20)
+#define NV_TX_FORCED_INTERRUPT (1<<24)
+#define NV_TX_DEFERRED (1<<26)
+#define NV_TX_CARRIERLOST (1<<27)
+#define NV_TX_LATECOLLISION (1<<28)
+#define NV_TX_UNDERFLOW (1<<29)
+#define NV_TX_ERROR (1<<30)
+#define NV_TX_VALID (1<<31)
+
+#define NV_TX2_LASTPACKET (1<<29)
+#define NV_TX2_RETRYERROR (1<<18)
+#define NV_TX2_RETRYCOUNT_MASK (0xF<<19)
+#define NV_TX2_FORCED_INTERRUPT (1<<30)
+#define NV_TX2_DEFERRED (1<<25)
+#define NV_TX2_CARRIERLOST (1<<26)
+#define NV_TX2_LATECOLLISION (1<<27)
+#define NV_TX2_UNDERFLOW (1<<28)
+/* error and valid are the same for both */
+#define NV_TX2_ERROR (1<<30)
+#define NV_TX2_VALID (1<<31)
+#define NV_TX2_TSO (1<<28)
+#define NV_TX2_TSO_SHIFT 14
+#define NV_TX2_TSO_MAX_SHIFT 14
+#define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
+#define NV_TX2_CHECKSUM_L3 (1<<27)
+#define NV_TX2_CHECKSUM_L4 (1<<26)
+
+#define NV_TX3_VLAN_TAG_PRESENT (1<<18)
+
+#define NV_RX_DESCRIPTORVALID (1<<16)
+#define NV_RX_MISSEDFRAME (1<<17)
+#define NV_RX_SUBSTRACT1 (1<<18)
+#define NV_RX_ERROR1 (1<<23)
+#define NV_RX_ERROR2 (1<<24)
+#define NV_RX_ERROR3 (1<<25)
+#define NV_RX_ERROR4 (1<<26)
+#define NV_RX_CRCERR (1<<27)
+#define NV_RX_OVERFLOW (1<<28)
+#define NV_RX_FRAMINGERR (1<<29)
+#define NV_RX_ERROR (1<<30)
+#define NV_RX_AVAIL (1<<31)
+#define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
+
+#define NV_RX2_CHECKSUMMASK (0x1C000000)
+#define NV_RX2_CHECKSUM_IP (0x10000000)
+#define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
+#define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
+#define NV_RX2_DESCRIPTORVALID (1<<29)
+#define NV_RX2_SUBSTRACT1 (1<<25)
+#define NV_RX2_ERROR1 (1<<18)
+#define NV_RX2_ERROR2 (1<<19)
+#define NV_RX2_ERROR3 (1<<20)
+#define NV_RX2_ERROR4 (1<<21)
+#define NV_RX2_CRCERR (1<<22)
+#define NV_RX2_OVERFLOW (1<<23)
+#define NV_RX2_FRAMINGERR (1<<24)
+/* error and avail are the same for both */
+#define NV_RX2_ERROR (1<<30)
+#define NV_RX2_AVAIL (1<<31)
+#define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
+
+#define NV_RX3_VLAN_TAG_PRESENT (1<<16)
+#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
+
+/* Miscellaneous hardware related defines: */
+#define NV_PCI_REGSZ_VER1 0x270
+#define NV_PCI_REGSZ_VER2 0x2d4
+#define NV_PCI_REGSZ_VER3 0x604
+#define NV_PCI_REGSZ_MAX 0x604
+
+/* various timeout delays: all in usec */
+#define NV_TXRX_RESET_DELAY 4
+#define NV_TXSTOP_DELAY1 10
+#define NV_TXSTOP_DELAY1MAX 500000
+#define NV_TXSTOP_DELAY2 100
+#define NV_RXSTOP_DELAY1 10
+#define NV_RXSTOP_DELAY1MAX 500000
+#define NV_RXSTOP_DELAY2 100
+#define NV_SETUP5_DELAY 5
+#define NV_SETUP5_DELAYMAX 50000
+#define NV_POWERUP_DELAY 5
+#define NV_POWERUP_DELAYMAX 5000
+#define NV_MIIBUSY_DELAY 50
+#define NV_MIIPHY_DELAY 10
+#define NV_MIIPHY_DELAYMAX 10000
+#define NV_MAC_RESET_DELAY 64
+
+#define NV_WAKEUPPATTERNS 5
+#define NV_WAKEUPMASKENTRIES 4
+
+/* General driver defaults */
+#define NV_WATCHDOG_TIMEO (5*HZ)
+
+#define RX_RING_DEFAULT 512
+#define TX_RING_DEFAULT 256
+#define RX_RING_MIN 128
+#define TX_RING_MIN 64
+#define RING_MAX_DESC_VER_1 1024
+#define RING_MAX_DESC_VER_2_3 16384
+
+/* rx/tx mac addr + type + vlan + align + slack*/
+#define NV_RX_HEADERS (64)
+/* even more slack. */
+#define NV_RX_ALLOC_PAD (64)
+
+/* maximum mtu size */
+#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
+#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
+
+#define OOM_REFILL (1+HZ/20)
+#define POLL_WAIT (1+HZ/100)
+#define LINK_TIMEOUT (3*HZ)
+#define STATS_INTERVAL (10*HZ)
+
+/*
+ * desc_ver values:
+ * The nic supports three different descriptor types:
+ * - DESC_VER_1: Original
+ * - DESC_VER_2: support for jumbo frames.
+ * - DESC_VER_3: 64-bit format.
+ */
+#define DESC_VER_1 1
+#define DESC_VER_2 2
+#define DESC_VER_3 3
+
+/* PHY defines */
+#define PHY_OUI_MARVELL 0x5043
+#define PHY_OUI_CICADA 0x03f1
+#define PHY_OUI_VITESSE 0x01c1
+#define PHY_OUI_REALTEK 0x0732
+#define PHY_OUI_REALTEK2 0x0020
+#define PHYID1_OUI_MASK 0x03ff
+#define PHYID1_OUI_SHFT 6
+#define PHYID2_OUI_MASK 0xfc00
+#define PHYID2_OUI_SHFT 10
+#define PHYID2_MODEL_MASK 0x03f0
+#define PHY_MODEL_REALTEK_8211 0x0110
+#define PHY_REV_MASK 0x0001
+#define PHY_REV_REALTEK_8211B 0x0000
+#define PHY_REV_REALTEK_8211C 0x0001
+#define PHY_MODEL_REALTEK_8201 0x0200
+#define PHY_MODEL_MARVELL_E3016 0x0220
+#define PHY_MARVELL_E3016_INITMASK 0x0300
+#define PHY_CICADA_INIT1 0x0f000
+#define PHY_CICADA_INIT2 0x0e00
+#define PHY_CICADA_INIT3 0x01000
+#define PHY_CICADA_INIT4 0x0200
+#define PHY_CICADA_INIT5 0x0004
+#define PHY_CICADA_INIT6 0x02000
+#define PHY_VITESSE_INIT_REG1 0x1f
+#define PHY_VITESSE_INIT_REG2 0x10
+#define PHY_VITESSE_INIT_REG3 0x11
+#define PHY_VITESSE_INIT_REG4 0x12
+#define PHY_VITESSE_INIT_MSK1 0xc
+#define PHY_VITESSE_INIT_MSK2 0x0180
+#define PHY_VITESSE_INIT1 0x52b5
+#define PHY_VITESSE_INIT2 0xaf8a
+#define PHY_VITESSE_INIT3 0x8
+#define PHY_VITESSE_INIT4 0x8f8a
+#define PHY_VITESSE_INIT5 0xaf86
+#define PHY_VITESSE_INIT6 0x8f86
+#define PHY_VITESSE_INIT7 0xaf82
+#define PHY_VITESSE_INIT8 0x0100
+#define PHY_VITESSE_INIT9 0x8f82
+#define PHY_VITESSE_INIT10 0x0
+#define PHY_REALTEK_INIT_REG1 0x1f
+#define PHY_REALTEK_INIT_REG2 0x19
+#define PHY_REALTEK_INIT_REG3 0x13
+#define PHY_REALTEK_INIT_REG4 0x14
+#define PHY_REALTEK_INIT_REG5 0x18
+#define PHY_REALTEK_INIT_REG6 0x11
+#define PHY_REALTEK_INIT_REG7 0x01
+#define PHY_REALTEK_INIT1 0x0000
+#define PHY_REALTEK_INIT2 0x8e00
+#define PHY_REALTEK_INIT3 0x0001
+#define PHY_REALTEK_INIT4 0xad17
+#define PHY_REALTEK_INIT5 0xfb54
+#define PHY_REALTEK_INIT6 0xf5c7
+#define PHY_REALTEK_INIT7 0x1000
+#define PHY_REALTEK_INIT8 0x0003
+#define PHY_REALTEK_INIT9 0x0008
+#define PHY_REALTEK_INIT10 0x0005
+#define PHY_REALTEK_INIT11 0x0200
+#define PHY_REALTEK_INIT_MSK1 0x0003
+
+#define PHY_GIGABIT 0x0100
+
+#define PHY_TIMEOUT 0x1
+#define PHY_ERROR 0x2
+
+#define PHY_100 0x1
+#define PHY_1000 0x2
+#define PHY_HALF 0x100
+
+#define NV_PAUSEFRAME_RX_CAPABLE 0x0001
+#define NV_PAUSEFRAME_TX_CAPABLE 0x0002
+#define NV_PAUSEFRAME_RX_ENABLE 0x0004
+#define NV_PAUSEFRAME_TX_ENABLE 0x0008
+#define NV_PAUSEFRAME_RX_REQ 0x0010
+#define NV_PAUSEFRAME_TX_REQ 0x0020
+#define NV_PAUSEFRAME_AUTONEG 0x0040
+
+/* MSI/MSI-X defines */
+#define NV_MSI_X_MAX_VECTORS 8
+#define NV_MSI_X_VECTORS_MASK 0x000f
+#define NV_MSI_CAPABLE 0x0010
+#define NV_MSI_X_CAPABLE 0x0020
+#define NV_MSI_ENABLED 0x0040
+#define NV_MSI_X_ENABLED 0x0080
+
+#define NV_MSI_X_VECTOR_ALL 0x0
+#define NV_MSI_X_VECTOR_RX 0x0
+#define NV_MSI_X_VECTOR_TX 0x1
+#define NV_MSI_X_VECTOR_OTHER 0x2
+
+#define NV_MSI_PRIV_OFFSET 0x68
+#define NV_MSI_PRIV_VALUE 0xffffffff
+
+#define NV_RESTART_TX 0x1
+#define NV_RESTART_RX 0x2
+
+#define NV_TX_LIMIT_COUNT 16
+
+#define NV_DYNAMIC_THRESHOLD 4
+#define NV_DYNAMIC_MAX_QUIET_COUNT 2048
+
+/* statistics */
+struct nv_ethtool_str {
+ char name[ETH_GSTRING_LEN];
+};
+
+static const struct nv_ethtool_str nv_estats_str[] = {
+ { "tx_bytes" },
+ { "tx_zero_rexmt" },
+ { "tx_one_rexmt" },
+ { "tx_many_rexmt" },
+ { "tx_late_collision" },
+ { "tx_fifo_errors" },
+ { "tx_carrier_errors" },
+ { "tx_excess_deferral" },
+ { "tx_retry_error" },
+ { "rx_frame_error" },
+ { "rx_extra_byte" },
+ { "rx_late_collision" },
+ { "rx_runt" },
+ { "rx_frame_too_long" },
+ { "rx_over_errors" },
+ { "rx_crc_errors" },
+ { "rx_frame_align_error" },
+ { "rx_length_error" },
+ { "rx_unicast" },
+ { "rx_multicast" },
+ { "rx_broadcast" },
+ { "rx_packets" },
+ { "rx_errors_total" },
+ { "tx_errors_total" },
+
+ /* version 2 stats */
+ { "tx_deferral" },
+ { "tx_packets" },
+ { "rx_bytes" },
+ { "tx_pause" },
+ { "rx_pause" },
+ { "rx_drop_frame" },
+
+ /* version 3 stats */
+ { "tx_unicast" },
+ { "tx_multicast" },
+ { "tx_broadcast" }
+};
+
+struct nv_ethtool_stats {
+ u64 tx_bytes;
+ u64 tx_zero_rexmt;
+ u64 tx_one_rexmt;
+ u64 tx_many_rexmt;
+ u64 tx_late_collision;
+ u64 tx_fifo_errors;
+ u64 tx_carrier_errors;
+ u64 tx_excess_deferral;
+ u64 tx_retry_error;
+ u64 rx_frame_error;
+ u64 rx_extra_byte;
+ u64 rx_late_collision;
+ u64 rx_runt;
+ u64 rx_frame_too_long;
+ u64 rx_over_errors;
+ u64 rx_crc_errors;
+ u64 rx_frame_align_error;
+ u64 rx_length_error;
+ u64 rx_unicast;
+ u64 rx_multicast;
+ u64 rx_broadcast;
+ u64 rx_packets;
+ u64 rx_errors_total;
+ u64 tx_errors_total;
+
+ /* version 2 stats */
+ u64 tx_deferral;
+ u64 tx_packets;
+ u64 rx_bytes;
+ u64 tx_pause;
+ u64 rx_pause;
+ u64 rx_drop_frame;
+
+ /* version 3 stats */
+ u64 tx_unicast;
+ u64 tx_multicast;
+ u64 tx_broadcast;
+};
+
+#define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
+#define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
+#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
+
+/* diagnostics */
+#define NV_TEST_COUNT_BASE 3
+#define NV_TEST_COUNT_EXTENDED 4
+
+static const struct nv_ethtool_str nv_etests_str[] = {
+ { "link (online/offline)" },
+ { "register (offline) " },
+ { "interrupt (offline) " },
+ { "loopback (offline) " }
+};
+
+struct register_test {
+ __u32 reg;
+ __u32 mask;
+};
+
+static const struct register_test nv_registers_test[] = {
+ { NvRegUnknownSetupReg6, 0x01 },
+ { NvRegMisc1, 0x03c },
+ { NvRegOffloadConfig, 0x03ff },
+ { NvRegMulticastAddrA, 0xffffffff },
+ { NvRegTxWatermark, 0x0ff },
+ { NvRegWakeUpFlags, 0x07777 },
+ { 0, 0 }
+};
+
+struct nv_skb_map {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ unsigned int dma_len:31;
+ unsigned int dma_single:1;
+ struct ring_desc_ex *first_tx_desc;
+ struct nv_skb_map *next_tx_ctx;
+};
+
+/*
+ * SMP locking:
+ * All hardware access under netdev_priv(dev)->lock, except the performance
+ * critical parts:
+ * - rx is (pseudo-) lockless: it relies on the single-threading provided
+ * by the arch code for interrupts.
+ * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
+ * needs netdev_priv(dev)->lock :-(
+ * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
+ */
+
+/* in dev: base, irq */
+struct fe_priv {
+ spinlock_t lock;
+
+ struct net_device *dev;
+ struct napi_struct napi;
+
+ /* General data:
+ * Locking: spin_lock(&np->lock); */
+ struct nv_ethtool_stats estats;
+ int in_shutdown;
+ u32 linkspeed;
+ int duplex;
+ int autoneg;
+ int fixed_mode;
+ int phyaddr;
+ int wolenabled;
+ unsigned int phy_oui;
+ unsigned int phy_model;
+ unsigned int phy_rev;
+ u16 gigabit;
+ int intr_test;
+ int recover_error;
+ int quiet_count;
+
+ /* General data: RO fields */
+ dma_addr_t ring_addr;
+ struct pci_dev *pci_dev;
+ u32 orig_mac[2];
+ u32 events;
+ u32 irqmask;
+ u32 desc_ver;
+ u32 txrxctl_bits;
+ u32 vlanctl_bits;
+ u32 driver_data;
+ u32 device_id;
+ u32 register_size;
+ u32 mac_in_use;
+ int mgmt_version;
+ int mgmt_sema;
+
+ void __iomem *base;
+
+ /* rx specific fields.
+ * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
+ */
+ union ring_type get_rx, put_rx, first_rx, last_rx;
+ struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
+ struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
+ struct nv_skb_map *rx_skb;
+
+ union ring_type rx_ring;
+ unsigned int rx_buf_sz;
+ unsigned int pkt_limit;
+ struct timer_list oom_kick;
+ struct timer_list nic_poll;
+ struct timer_list stats_poll;
+ u32 nic_poll_irq;
+ int rx_ring_size;
+
+ /* media detection workaround.
+ * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
+ */
+ int need_linktimer;
+ unsigned long link_timeout;
+ /*
+ * tx specific fields.
+ */
+ union ring_type get_tx, put_tx, first_tx, last_tx;
+ struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
+ struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
+ struct nv_skb_map *tx_skb;
+
+ union ring_type tx_ring;
+ u32 tx_flags;
+ int tx_ring_size;
+ int tx_limit;
+ u32 tx_pkts_in_progress;
+ struct nv_skb_map *tx_change_owner;
+ struct nv_skb_map *tx_end_flip;
+ int tx_stop;
+
+ /* msi/msi-x fields */
+ u32 msi_flags;
+ struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
+
+ /* flow control */
+ u32 pause_flags;
+
+ /* power saved state */
+ u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
+
+ /* for different msi-x irq type */
+ char name_rx[IFNAMSIZ + 3]; /* -rx */
+ char name_tx[IFNAMSIZ + 3]; /* -tx */
+ char name_other[IFNAMSIZ + 6]; /* -other */
+};
+
+/*
+ * Maximum number of loops until we assume that a bit in the irq mask
+ * is stuck. Overridable with module param.
+ */
+static int max_interrupt_work = 4;
+
+/*
+ * Optimization can be either throuput mode or cpu mode
+ *
+ * Throughput Mode: Every tx and rx packet will generate an interrupt.
+ * CPU Mode: Interrupts are controlled by a timer.
+ */
+enum {
+ NV_OPTIMIZATION_MODE_THROUGHPUT,
+ NV_OPTIMIZATION_MODE_CPU,
+ NV_OPTIMIZATION_MODE_DYNAMIC
+};
+static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC;
+
+/*
+ * Poll interval for timer irq
+ *
+ * This interval determines how frequent an interrupt is generated.
+ * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
+ * Min = 0, and Max = 65535
+ */
+static int poll_interval = -1;
+
+/*
+ * MSI interrupts
+ */
+enum {
+ NV_MSI_INT_DISABLED,
+ NV_MSI_INT_ENABLED
+};
+static int msi = NV_MSI_INT_ENABLED;
+
+/*
+ * MSIX interrupts
+ */
+enum {
+ NV_MSIX_INT_DISABLED,
+ NV_MSIX_INT_ENABLED
+};
+static int msix = NV_MSIX_INT_ENABLED;
+
+/*
+ * DMA 64bit
+ */
+enum {
+ NV_DMA_64BIT_DISABLED,
+ NV_DMA_64BIT_ENABLED
+};
+static int dma_64bit = NV_DMA_64BIT_ENABLED;
+
+/*
+ * Crossover Detection
+ * Realtek 8201 phy + some OEM boards do not work properly.
+ */
+enum {
+ NV_CROSSOVER_DETECTION_DISABLED,
+ NV_CROSSOVER_DETECTION_ENABLED
+};
+static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
+
+/*
+ * Power down phy when interface is down (persists through reboot;
+ * older Linux and other OSes may not power it up again)
+ */
+static int phy_power_down;
+
+static inline struct fe_priv *get_nvpriv(struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+static inline u8 __iomem *get_hwbase(struct net_device *dev)
+{
+ return ((struct fe_priv *)netdev_priv(dev))->base;
+}
+
+static inline void pci_push(u8 __iomem *base)
+{
+ /* force out pending posted writes */
+ readl(base);
+}
+
+static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
+{
+ return le32_to_cpu(prd->flaglen)
+ & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
+}
+
+static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
+{
+ return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
+}
+
+static bool nv_optimized(struct fe_priv *np)
+{
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ return false;
+ return true;
+}
+
+static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
+ int delay, int delaymax)
+{
+ u8 __iomem *base = get_hwbase(dev);
+
+ pci_push(base);
+ do {
+ udelay(delay);
+ delaymax -= delay;
+ if (delaymax < 0)
+ return 1;
+ } while ((readl(base + offset) & mask) != target);
+ return 0;
+}
+
+#define NV_SETUP_RX_RING 0x01
+#define NV_SETUP_TX_RING 0x02
+
+static inline u32 dma_low(dma_addr_t addr)
+{
+ return addr;
+}
+
+static inline u32 dma_high(dma_addr_t addr)
+{
+ return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
+}
+
+static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ if (!nv_optimized(np)) {
+ if (rxtx_flags & NV_SETUP_RX_RING)
+ writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
+ if (rxtx_flags & NV_SETUP_TX_RING)
+ writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
+ } else {
+ if (rxtx_flags & NV_SETUP_RX_RING) {
+ writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
+ writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
+ }
+ if (rxtx_flags & NV_SETUP_TX_RING) {
+ writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
+ writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
+ }
+ }
+}
+
+static void free_rings(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+ if (!nv_optimized(np)) {
+ if (np->rx_ring.orig)
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
+ np->rx_ring.orig, np->ring_addr);
+ } else {
+ if (np->rx_ring.ex)
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
+ np->rx_ring.ex, np->ring_addr);
+ }
+ kfree(np->rx_skb);
+ kfree(np->tx_skb);
+}
+
+static int using_multi_irqs(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+ if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
+ ((np->msi_flags & NV_MSI_X_ENABLED) &&
+ ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
+ return 0;
+ else
+ return 1;
+}
+
+static void nv_txrx_gate(struct net_device *dev, bool gate)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 powerstate;
+
+ if (!np->mac_in_use &&
+ (np->driver_data & DEV_HAS_POWER_CNTRL)) {
+ powerstate = readl(base + NvRegPowerState2);
+ if (gate)
+ powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS;
+ else
+ powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS;
+ writel(powerstate, base + NvRegPowerState2);
+ }
+}
+
+static void nv_enable_irq(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+ if (!using_multi_irqs(dev)) {
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+ enable_irq(np->pci_dev->irq);
+ } else {
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+ }
+}
+
+static void nv_disable_irq(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+ if (!using_multi_irqs(dev)) {
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+ disable_irq(np->pci_dev->irq);
+ } else {
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+ }
+}
+
+/* In MSIX mode, a write to irqmask behaves as XOR */
+static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
+{
+ u8 __iomem *base = get_hwbase(dev);
+
+ writel(mask, base + NvRegIrqMask);
+}
+
+static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ if (np->msi_flags & NV_MSI_X_ENABLED) {
+ writel(mask, base + NvRegIrqMask);
+ } else {
+ if (np->msi_flags & NV_MSI_ENABLED)
+ writel(0, base + NvRegMSIIrqMask);
+ writel(0, base + NvRegIrqMask);
+ }
+}
+
+static void nv_napi_enable(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+ napi_enable(&np->napi);
+}
+
+static void nv_napi_disable(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+ napi_disable(&np->napi);
+}
+
+#define MII_READ (-1)
+/* mii_rw: read/write a register on the PHY.
+ *
+ * Caller must guarantee serialization
+ */
+static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
+{
+ u8 __iomem *base = get_hwbase(dev);
+ u32 reg;
+ int retval;
+
+ writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
+
+ reg = readl(base + NvRegMIIControl);
+ if (reg & NVREG_MIICTL_INUSE) {
+ writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
+ udelay(NV_MIIBUSY_DELAY);
+ }
+
+ reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
+ if (value != MII_READ) {
+ writel(value, base + NvRegMIIData);
+ reg |= NVREG_MIICTL_WRITE;
+ }
+ writel(reg, base + NvRegMIIControl);
+
+ if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
+ NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) {
+ retval = -1;
+ } else if (value != MII_READ) {
+ /* it was a write operation - fewer failures are detectable */
+ retval = 0;
+ } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
+ retval = -1;
+ } else {
+ retval = readl(base + NvRegMIIData);
+ }
+
+ return retval;
+}
+
+static int phy_reset(struct net_device *dev, u32 bmcr_setup)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u32 miicontrol;
+ unsigned int tries = 0;
+
+ miicontrol = BMCR_RESET | bmcr_setup;
+ if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
+ return -1;
+
+ /* wait for 500ms */
+ msleep(500);
+
+ /* must wait till reset is deasserted */
+ while (miicontrol & BMCR_RESET) {
+ usleep_range(10000, 20000);
+ miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ /* FIXME: 100 tries seem excessive */
+ if (tries++ > 100)
+ return -1;
+ }
+ return 0;
+}
+
+static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
+{
+ static const struct {
+ int reg;
+ int init;
+ } ri[] = {
+ { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
+ { PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 },
+ { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 },
+ { PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 },
+ { PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 },
+ { PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 },
+ { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ri); i++) {
+ if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
+ return PHY_ERROR;
+ }
+
+ return 0;
+}
+
+static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
+{
+ u32 reg;
+ u8 __iomem *base = get_hwbase(dev);
+ u32 powerstate = readl(base + NvRegPowerState2);
+
+ /* need to perform hw phy reset */
+ powerstate |= NVREG_POWERSTATE2_PHY_RESET;
+ writel(powerstate, base + NvRegPowerState2);
+ msleep(25);
+
+ powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
+ writel(powerstate, base + NvRegPowerState2);
+ msleep(25);
+
+ reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
+ reg |= PHY_REALTEK_INIT9;
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10))
+ return PHY_ERROR;
+ reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
+ if (!(reg & PHY_REALTEK_INIT11)) {
+ reg |= PHY_REALTEK_INIT11;
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
+ return PHY_ERROR;
+
+ return 0;
+}
+
+static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
+{
+ u32 phy_reserved;
+
+ if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG6, MII_READ);
+ phy_reserved |= PHY_REALTEK_INIT7;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG6, phy_reserved))
+ return PHY_ERROR;
+ }
+
+ return 0;
+}
+
+static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
+{
+ u32 phy_reserved;
+
+ if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
+ if (mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG2, MII_READ);
+ phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
+ phy_reserved |= PHY_REALTEK_INIT3;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG2, phy_reserved))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
+ return PHY_ERROR;
+ }
+
+ return 0;
+}
+
+static int init_cicada(struct net_device *dev, struct fe_priv *np,
+ u32 phyinterface)
+{
+ u32 phy_reserved;
+
+ if (phyinterface & PHY_RGMII) {
+ phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
+ phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
+ phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
+ if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
+ phy_reserved |= PHY_CICADA_INIT5;
+ if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
+ return PHY_ERROR;
+ }
+ phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
+ phy_reserved |= PHY_CICADA_INIT6;
+ if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
+ return PHY_ERROR;
+
+ return 0;
+}
+
+static int init_vitesse(struct net_device *dev, struct fe_priv *np)
+{
+ u32 phy_reserved;
+
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG4, MII_READ);
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG3, MII_READ);
+ phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
+ phy_reserved |= PHY_VITESSE_INIT3;
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG4, MII_READ);
+ phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
+ phy_reserved |= PHY_VITESSE_INIT3;
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG3, MII_READ);
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG4, MII_READ);
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG3, MII_READ);
+ phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
+ phy_reserved |= PHY_VITESSE_INIT8;
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10))
+ return PHY_ERROR;
+
+ return 0;
+}
+
+static int phy_init(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 phyinterface;
+ u32 mii_status, mii_control, mii_control_1000, reg;
+
+ /* phy errata for E3016 phy */
+ if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
+ reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
+ reg &= ~PHY_MARVELL_E3016_INITMASK;
+ if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
+ netdev_info(dev, "%s: phy write to errata reg failed\n",
+ pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ }
+ if (np->phy_oui == PHY_OUI_REALTEK) {
+ if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
+ np->phy_rev == PHY_REV_REALTEK_8211B) {
+ if (init_realtek_8211b(dev, np)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ } else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
+ np->phy_rev == PHY_REV_REALTEK_8211C) {
+ if (init_realtek_8211c(dev, np)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
+ if (init_realtek_8201(dev, np)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ }
+ }
+
+ /* set advertise register */
+ reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+ reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL |
+ ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
+ if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
+ netdev_info(dev, "%s: phy write to advertise failed\n",
+ pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+
+ /* get phy interface type */
+ phyinterface = readl(base + NvRegPhyInterface);
+
+ /* see if gigabit phy */
+ mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+ if (mii_status & PHY_GIGABIT) {
+ np->gigabit = PHY_GIGABIT;
+ mii_control_1000 = mii_rw(dev, np->phyaddr,
+ MII_CTRL1000, MII_READ);
+ mii_control_1000 &= ~ADVERTISE_1000HALF;
+ if (phyinterface & PHY_RGMII)
+ mii_control_1000 |= ADVERTISE_1000FULL;
+ else
+ mii_control_1000 &= ~ADVERTISE_1000FULL;
+
+ if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ } else
+ np->gigabit = 0;
+
+ mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ mii_control |= BMCR_ANENABLE;
+
+ if (np->phy_oui == PHY_OUI_REALTEK &&
+ np->phy_model == PHY_MODEL_REALTEK_8211 &&
+ np->phy_rev == PHY_REV_REALTEK_8211C) {
+ /* start autoneg since we already performed hw reset above */
+ mii_control |= BMCR_ANRESTART;
+ if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ } else {
+ /* reset the phy
+ * (certain phys need bmcr to be setup with reset)
+ */
+ if (phy_reset(dev, mii_control)) {
+ netdev_info(dev, "%s: phy reset failed\n",
+ pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ }
+
+ /* phy vendor specific configuration */
+ if ((np->phy_oui == PHY_OUI_CICADA)) {
+ if (init_cicada(dev, np, phyinterface)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ } else if (np->phy_oui == PHY_OUI_VITESSE) {
+ if (init_vitesse(dev, np)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ } else if (np->phy_oui == PHY_OUI_REALTEK) {
+ if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
+ np->phy_rev == PHY_REV_REALTEK_8211B) {
+ /* reset could have cleared these out, set them back */
+ if (init_realtek_8211b(dev, np)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
+ if (init_realtek_8201(dev, np) ||
+ init_realtek_8201_cross(dev, np)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ }
+ }
+
+ /* some phys clear out pause advertisement on reset, set it back */
+ mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
+
+ /* restart auto negotiation, power down phy */
+ mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
+ if (phy_power_down)
+ mii_control |= BMCR_PDOWN;
+ if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
+ return PHY_ERROR;
+
+ return 0;
+}
+
+static void nv_start_rx(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 rx_ctrl = readl(base + NvRegReceiverControl);
+
+ /* Already running? Stop it. */
+ if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
+ rx_ctrl &= ~NVREG_RCVCTL_START;
+ writel(rx_ctrl, base + NvRegReceiverControl);
+ pci_push(base);
+ }
+ writel(np->linkspeed, base + NvRegLinkSpeed);
+ pci_push(base);
+ rx_ctrl |= NVREG_RCVCTL_START;
+ if (np->mac_in_use)
+ rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
+ writel(rx_ctrl, base + NvRegReceiverControl);
+ pci_push(base);
+}
+
+static void nv_stop_rx(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 rx_ctrl = readl(base + NvRegReceiverControl);
+
+ if (!np->mac_in_use)
+ rx_ctrl &= ~NVREG_RCVCTL_START;
+ else
+ rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
+ writel(rx_ctrl, base + NvRegReceiverControl);
+ if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
+ NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX))
+ netdev_info(dev, "%s: ReceiverStatus remained busy\n",
+ __func__);
+
+ udelay(NV_RXSTOP_DELAY2);
+ if (!np->mac_in_use)
+ writel(0, base + NvRegLinkSpeed);
+}
+
+static void nv_start_tx(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 tx_ctrl = readl(base + NvRegTransmitterControl);
+
+ tx_ctrl |= NVREG_XMITCTL_START;
+ if (np->mac_in_use)
+ tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
+ writel(tx_ctrl, base + NvRegTransmitterControl);
+ pci_push(base);
+}
+
+static void nv_stop_tx(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 tx_ctrl = readl(base + NvRegTransmitterControl);
+
+ if (!np->mac_in_use)
+ tx_ctrl &= ~NVREG_XMITCTL_START;
+ else
+ tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
+ writel(tx_ctrl, base + NvRegTransmitterControl);
+ if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
+ NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX))
+ netdev_info(dev, "%s: TransmitterStatus remained busy\n",
+ __func__);
+
+ udelay(NV_TXSTOP_DELAY2);
+ if (!np->mac_in_use)
+ writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
+ base + NvRegTransmitPoll);
+}
+
+static void nv_start_rxtx(struct net_device *dev)
+{
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+}
+
+static void nv_stop_rxtx(struct net_device *dev)
+{
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+}
+
+static void nv_txrx_reset(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
+ pci_push(base);
+ udelay(NV_TXRX_RESET_DELAY);
+ writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
+ pci_push(base);
+}
+
+static void nv_mac_reset(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 temp1, temp2, temp3;
+
+ writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
+ pci_push(base);
+
+ /* save registers since they will be cleared on reset */
+ temp1 = readl(base + NvRegMacAddrA);
+ temp2 = readl(base + NvRegMacAddrB);
+ temp3 = readl(base + NvRegTransmitPoll);
+
+ writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
+ pci_push(base);
+ udelay(NV_MAC_RESET_DELAY);
+ writel(0, base + NvRegMacReset);
+ pci_push(base);
+ udelay(NV_MAC_RESET_DELAY);
+
+ /* restore saved registers */
+ writel(temp1, base + NvRegMacAddrA);
+ writel(temp2, base + NvRegMacAddrB);
+ writel(temp3, base + NvRegTransmitPoll);
+
+ writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
+ pci_push(base);
+}
+
+static void nv_get_hw_stats(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ np->estats.tx_bytes += readl(base + NvRegTxCnt);
+ np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
+ np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
+ np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
+ np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
+ np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
+ np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
+ np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
+ np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
+ np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
+ np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
+ np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
+ np->estats.rx_runt += readl(base + NvRegRxRunt);
+ np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
+ np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
+ np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
+ np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
+ np->estats.rx_length_error += readl(base + NvRegRxLenErr);
+ np->estats.rx_unicast += readl(base + NvRegRxUnicast);
+ np->estats.rx_multicast += readl(base + NvRegRxMulticast);
+ np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
+ np->estats.rx_packets =
+ np->estats.rx_unicast +
+ np->estats.rx_multicast +
+ np->estats.rx_broadcast;
+ np->estats.rx_errors_total =
+ np->estats.rx_crc_errors +
+ np->estats.rx_over_errors +
+ np->estats.rx_frame_error +
+ (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
+ np->estats.rx_late_collision +
+ np->estats.rx_runt +
+ np->estats.rx_frame_too_long;
+ np->estats.tx_errors_total =
+ np->estats.tx_late_collision +
+ np->estats.tx_fifo_errors +
+ np->estats.tx_carrier_errors +
+ np->estats.tx_excess_deferral +
+ np->estats.tx_retry_error;
+
+ if (np->driver_data & DEV_HAS_STATISTICS_V2) {
+ np->estats.tx_deferral += readl(base + NvRegTxDef);
+ np->estats.tx_packets += readl(base + NvRegTxFrame);
+ np->estats.rx_bytes += readl(base + NvRegRxCnt);
+ np->estats.tx_pause += readl(base + NvRegTxPause);
+ np->estats.rx_pause += readl(base + NvRegRxPause);
+ np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
+ }
+
+ if (np->driver_data & DEV_HAS_STATISTICS_V3) {
+ np->estats.tx_unicast += readl(base + NvRegTxUnicast);
+ np->estats.tx_multicast += readl(base + NvRegTxMulticast);
+ np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
+ }
+}
+
+/*
+ * nv_get_stats: dev->get_stats function
+ * Get latest stats value from the nic.
+ * Called with read_lock(&dev_base_lock) held for read -
+ * only synchronized against unregister_netdevice.
+ */
+static struct net_device_stats *nv_get_stats(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ /* If the nic supports hw counters then retrieve latest values */
+ if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
+ nv_get_hw_stats(dev);
+
+ /* copy to net_device stats */
+ dev->stats.tx_bytes = np->estats.tx_bytes;
+ dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
+ dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
+ dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
+ dev->stats.rx_over_errors = np->estats.rx_over_errors;
+ dev->stats.rx_errors = np->estats.rx_errors_total;
+ dev->stats.tx_errors = np->estats.tx_errors_total;
+ }
+
+ return &dev->stats;
+}
+
+/*
+ * nv_alloc_rx: fill rx ring entries.
+ * Return 1 if the allocations for the skbs failed and the
+ * rx engine is without Available descriptors
+ */
+static int nv_alloc_rx(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ struct ring_desc *less_rx;
+
+ less_rx = np->get_rx.orig;
+ if (less_rx-- == np->first_rx.orig)
+ less_rx = np->last_rx.orig;
+
+ while (np->put_rx.orig != less_rx) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
+ if (skb) {
+ np->put_rx_ctx->skb = skb;
+ np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
+ skb->data,
+ skb_tailroom(skb),
+ PCI_DMA_FROMDEVICE);
+ np->put_rx_ctx->dma_len = skb_tailroom(skb);
+ np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
+ wmb();
+ np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
+ if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
+ np->put_rx.orig = np->first_rx.orig;
+ if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
+ np->put_rx_ctx = np->first_rx_ctx;
+ } else
+ return 1;
+ }
+ return 0;
+}
+
+static int nv_alloc_rx_optimized(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ struct ring_desc_ex *less_rx;
+
+ less_rx = np->get_rx.ex;
+ if (less_rx-- == np->first_rx.ex)
+ less_rx = np->last_rx.ex;
+
+ while (np->put_rx.ex != less_rx) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
+ if (skb) {
+ np->put_rx_ctx->skb = skb;
+ np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
+ skb->data,
+ skb_tailroom(skb),
+ PCI_DMA_FROMDEVICE);
+ np->put_rx_ctx->dma_len = skb_tailroom(skb);
+ np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
+ np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
+ wmb();
+ np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
+ if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
+ np->put_rx.ex = np->first_rx.ex;
+ if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
+ np->put_rx_ctx = np->first_rx_ctx;
+ } else
+ return 1;
+ }
+ return 0;
+}
+
+/* If rx bufs are exhausted called after 50ms to attempt to refresh */
+static void nv_do_rx_refill(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct fe_priv *np = netdev_priv(dev);
+
+ /* Just reschedule NAPI rx processing */
+ napi_schedule(&np->napi);
+}
+
+static void nv_init_rx(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ int i;
+
+ np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
+
+ if (!nv_optimized(np))
+ np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
+ else
+ np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
+ np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
+ np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
+
+ for (i = 0; i < np->rx_ring_size; i++) {
+ if (!nv_optimized(np)) {
+ np->rx_ring.orig[i].flaglen = 0;
+ np->rx_ring.orig[i].buf = 0;
+ } else {
+ np->rx_ring.ex[i].flaglen = 0;
+ np->rx_ring.ex[i].txvlan = 0;
+ np->rx_ring.ex[i].bufhigh = 0;
+ np->rx_ring.ex[i].buflow = 0;
+ }
+ np->rx_skb[i].skb = NULL;
+ np->rx_skb[i].dma = 0;
+ }
+}
+
+static void nv_init_tx(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ int i;
+
+ np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
+
+ if (!nv_optimized(np))
+ np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
+ else
+ np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
+ np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
+ np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
+ np->tx_pkts_in_progress = 0;
+ np->tx_change_owner = NULL;
+ np->tx_end_flip = NULL;
+ np->tx_stop = 0;
+
+ for (i = 0; i < np->tx_ring_size; i++) {
+ if (!nv_optimized(np)) {
+ np->tx_ring.orig[i].flaglen = 0;
+ np->tx_ring.orig[i].buf = 0;
+ } else {
+ np->tx_ring.ex[i].flaglen = 0;
+ np->tx_ring.ex[i].txvlan = 0;
+ np->tx_ring.ex[i].bufhigh = 0;
+ np->tx_ring.ex[i].buflow = 0;
+ }
+ np->tx_skb[i].skb = NULL;
+ np->tx_skb[i].dma = 0;
+ np->tx_skb[i].dma_len = 0;
+ np->tx_skb[i].dma_single = 0;
+ np->tx_skb[i].first_tx_desc = NULL;
+ np->tx_skb[i].next_tx_ctx = NULL;
+ }
+}
+
+static int nv_init_ring(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ nv_init_tx(dev);
+ nv_init_rx(dev);
+
+ if (!nv_optimized(np))
+ return nv_alloc_rx(dev);
+ else
+ return nv_alloc_rx_optimized(dev);
+}
+
+static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
+{
+ if (tx_skb->dma) {
+ if (tx_skb->dma_single)
+ pci_unmap_single(np->pci_dev, tx_skb->dma,
+ tx_skb->dma_len,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(np->pci_dev, tx_skb->dma,
+ tx_skb->dma_len,
+ PCI_DMA_TODEVICE);
+ tx_skb->dma = 0;
+ }
+}
+
+static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
+{
+ nv_unmap_txskb(np, tx_skb);
+ if (tx_skb->skb) {
+ dev_kfree_skb_any(tx_skb->skb);
+ tx_skb->skb = NULL;
+ return 1;
+ }
+ return 0;
+}
+
+static void nv_drain_tx(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ unsigned int i;
+
+ for (i = 0; i < np->tx_ring_size; i++) {
+ if (!nv_optimized(np)) {
+ np->tx_ring.orig[i].flaglen = 0;
+ np->tx_ring.orig[i].buf = 0;
+ } else {
+ np->tx_ring.ex[i].flaglen = 0;
+ np->tx_ring.ex[i].txvlan = 0;
+ np->tx_ring.ex[i].bufhigh = 0;
+ np->tx_ring.ex[i].buflow = 0;
+ }
+ if (nv_release_txskb(np, &np->tx_skb[i]))
+ dev->stats.tx_dropped++;
+ np->tx_skb[i].dma = 0;
+ np->tx_skb[i].dma_len = 0;
+ np->tx_skb[i].dma_single = 0;
+ np->tx_skb[i].first_tx_desc = NULL;
+ np->tx_skb[i].next_tx_ctx = NULL;
+ }
+ np->tx_pkts_in_progress = 0;
+ np->tx_change_owner = NULL;
+ np->tx_end_flip = NULL;
+}
+
+static void nv_drain_rx(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < np->rx_ring_size; i++) {
+ if (!nv_optimized(np)) {
+ np->rx_ring.orig[i].flaglen = 0;
+ np->rx_ring.orig[i].buf = 0;
+ } else {
+ np->rx_ring.ex[i].flaglen = 0;
+ np->rx_ring.ex[i].txvlan = 0;
+ np->rx_ring.ex[i].bufhigh = 0;
+ np->rx_ring.ex[i].buflow = 0;
+ }
+ wmb();
+ if (np->rx_skb[i].skb) {
+ pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
+ (skb_end_pointer(np->rx_skb[i].skb) -
+ np->rx_skb[i].skb->data),
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(np->rx_skb[i].skb);
+ np->rx_skb[i].skb = NULL;
+ }
+ }
+}
+
+static void nv_drain_rxtx(struct net_device *dev)
+{
+ nv_drain_tx(dev);
+ nv_drain_rx(dev);
+}
+
+static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
+{
+ return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
+}
+
+static void nv_legacybackoff_reseed(struct net_device *dev)
+{
+ u8 __iomem *base = get_hwbase(dev);
+ u32 reg;
+ u32 low;
+ int tx_status = 0;
+
+ reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
+ get_random_bytes(&low, sizeof(low));
+ reg |= low & NVREG_SLOTTIME_MASK;
+
+ /* Need to stop tx before change takes effect.
+ * Caller has already gained np->lock.
+ */
+ tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
+ if (tx_status)
+ nv_stop_tx(dev);
+ nv_stop_rx(dev);
+ writel(reg, base + NvRegSlotTime);
+ if (tx_status)
+ nv_start_tx(dev);
+ nv_start_rx(dev);
+}
+
+/* Gear Backoff Seeds */
+#define BACKOFF_SEEDSET_ROWS 8
+#define BACKOFF_SEEDSET_LFSRS 15
+
+/* Known Good seed sets */
+static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
+ {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
+ {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
+ {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
+ {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
+ {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
+ {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
+ {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
+ {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
+
+static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
+ {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
+ {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
+ {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
+ {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
+ {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
+ {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
+ {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
+ {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
+
+static void nv_gear_backoff_reseed(struct net_device *dev)
+{
+ u8 __iomem *base = get_hwbase(dev);
+ u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
+ u32 temp, seedset, combinedSeed;
+ int i;
+
+ /* Setup seed for free running LFSR */
+ /* We are going to read the time stamp counter 3 times
+ and swizzle bits around to increase randomness */
+ get_random_bytes(&miniseed1, sizeof(miniseed1));
+ miniseed1 &= 0x0fff;
+ if (miniseed1 == 0)
+ miniseed1 = 0xabc;
+
+ get_random_bytes(&miniseed2, sizeof(miniseed2));
+ miniseed2 &= 0x0fff;
+ if (miniseed2 == 0)
+ miniseed2 = 0xabc;
+ miniseed2_reversed =
+ ((miniseed2 & 0xF00) >> 8) |
+ (miniseed2 & 0x0F0) |
+ ((miniseed2 & 0x00F) << 8);
+
+ get_random_bytes(&miniseed3, sizeof(miniseed3));
+ miniseed3 &= 0x0fff;
+ if (miniseed3 == 0)
+ miniseed3 = 0xabc;
+ miniseed3_reversed =
+ ((miniseed3 & 0xF00) >> 8) |
+ (miniseed3 & 0x0F0) |
+ ((miniseed3 & 0x00F) << 8);
+
+ combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
+ (miniseed2 ^ miniseed3_reversed);
+
+ /* Seeds can not be zero */
+ if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
+ combinedSeed |= 0x08;
+ if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
+ combinedSeed |= 0x8000;
+
+ /* No need to disable tx here */
+ temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
+ temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
+ temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
+ writel(temp, base + NvRegBackOffControl);
+
+ /* Setup seeds for all gear LFSRs. */
+ get_random_bytes(&seedset, sizeof(seedset));
+ seedset = seedset % BACKOFF_SEEDSET_ROWS;
+ for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
+ temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
+ temp |= main_seedset[seedset][i-1] & 0x3ff;
+ temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
+ writel(temp, base + NvRegBackOffControl);
+ }
+}
+
+/*
+ * nv_start_xmit: dev->hard_start_xmit function
+ * Called with netif_tx_lock held.
+ */
+static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u32 tx_flags = 0;
+ u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
+ unsigned int fragments = skb_shinfo(skb)->nr_frags;
+ unsigned int i;
+ u32 offset = 0;
+ u32 bcnt;
+ u32 size = skb_headlen(skb);
+ u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+ u32 empty_slots;
+ struct ring_desc *put_tx;
+ struct ring_desc *start_tx;
+ struct ring_desc *prev_tx;
+ struct nv_skb_map *prev_tx_ctx;
+ unsigned long flags;
+
+ /* add fragments to entries count */
+ for (i = 0; i < fragments; i++) {
+ entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
+ ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+ }
+
+ spin_lock_irqsave(&np->lock, flags);
+ empty_slots = nv_get_empty_tx_slots(np);
+ if (unlikely(empty_slots <= entries)) {
+ netif_stop_queue(dev);
+ np->tx_stop = 1;
+ spin_unlock_irqrestore(&np->lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ start_tx = put_tx = np->put_tx.orig;
+
+ /* setup the header buffer */
+ do {
+ prev_tx = put_tx;
+ prev_tx_ctx = np->put_tx_ctx;
+ bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
+ np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
+ PCI_DMA_TODEVICE);
+ np->put_tx_ctx->dma_len = bcnt;
+ np->put_tx_ctx->dma_single = 1;
+ put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
+ put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
+
+ tx_flags = np->tx_flags;
+ offset += bcnt;
+ size -= bcnt;
+ if (unlikely(put_tx++ == np->last_tx.orig))
+ put_tx = np->first_tx.orig;
+ if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
+ np->put_tx_ctx = np->first_tx_ctx;
+ } while (size);
+
+ /* setup the fragments */
+ for (i = 0; i < fragments; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ u32 size = frag->size;
+ offset = 0;
+
+ do {
+ prev_tx = put_tx;
+ prev_tx_ctx = np->put_tx_ctx;
+ bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
+ np->put_tx_ctx->dma = skb_frag_dma_map(
+ &np->pci_dev->dev,
+ frag, offset,
+ bcnt,
+ DMA_TO_DEVICE);
+ np->put_tx_ctx->dma_len = bcnt;
+ np->put_tx_ctx->dma_single = 0;
+ put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
+ put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
+
+ offset += bcnt;
+ size -= bcnt;
+ if (unlikely(put_tx++ == np->last_tx.orig))
+ put_tx = np->first_tx.orig;
+ if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
+ np->put_tx_ctx = np->first_tx_ctx;
+ } while (size);
+ }
+
+ /* set last fragment flag */
+ prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
+
+ /* save skb in this slot's context area */
+ prev_tx_ctx->skb = skb;
+
+ if (skb_is_gso(skb))
+ tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
+ else
+ tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
+ NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ /* set tx flags */
+ start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
+ np->put_tx.orig = put_tx;
+
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+ return NETDEV_TX_OK;
+}
+
+static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u32 tx_flags = 0;
+ u32 tx_flags_extra;
+ unsigned int fragments = skb_shinfo(skb)->nr_frags;
+ unsigned int i;
+ u32 offset = 0;
+ u32 bcnt;
+ u32 size = skb_headlen(skb);
+ u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+ u32 empty_slots;
+ struct ring_desc_ex *put_tx;
+ struct ring_desc_ex *start_tx;
+ struct ring_desc_ex *prev_tx;
+ struct nv_skb_map *prev_tx_ctx;
+ struct nv_skb_map *start_tx_ctx;
+ unsigned long flags;
+
+ /* add fragments to entries count */
+ for (i = 0; i < fragments; i++) {
+ entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
+ ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+ }
+
+ spin_lock_irqsave(&np->lock, flags);
+ empty_slots = nv_get_empty_tx_slots(np);
+ if (unlikely(empty_slots <= entries)) {
+ netif_stop_queue(dev);
+ np->tx_stop = 1;
+ spin_unlock_irqrestore(&np->lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ start_tx = put_tx = np->put_tx.ex;
+ start_tx_ctx = np->put_tx_ctx;
+
+ /* setup the header buffer */
+ do {
+ prev_tx = put_tx;
+ prev_tx_ctx = np->put_tx_ctx;
+ bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
+ np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
+ PCI_DMA_TODEVICE);
+ np->put_tx_ctx->dma_len = bcnt;
+ np->put_tx_ctx->dma_single = 1;
+ put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
+ put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
+ put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
+
+ tx_flags = NV_TX2_VALID;
+ offset += bcnt;
+ size -= bcnt;
+ if (unlikely(put_tx++ == np->last_tx.ex))
+ put_tx = np->first_tx.ex;
+ if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
+ np->put_tx_ctx = np->first_tx_ctx;
+ } while (size);
+
+ /* setup the fragments */
+ for (i = 0; i < fragments; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ u32 size = frag->size;
+ offset = 0;
+
+ do {
+ prev_tx = put_tx;
+ prev_tx_ctx = np->put_tx_ctx;
+ bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
+ np->put_tx_ctx->dma = skb_frag_dma_map(
+ &np->pci_dev->dev,
+ frag, offset,
+ bcnt,
+ DMA_TO_DEVICE);
+ np->put_tx_ctx->dma_len = bcnt;
+ np->put_tx_ctx->dma_single = 0;
+ put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
+ put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
+ put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
+
+ offset += bcnt;
+ size -= bcnt;
+ if (unlikely(put_tx++ == np->last_tx.ex))
+ put_tx = np->first_tx.ex;
+ if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
+ np->put_tx_ctx = np->first_tx_ctx;
+ } while (size);
+ }
+
+ /* set last fragment flag */
+ prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
+
+ /* save skb in this slot's context area */
+ prev_tx_ctx->skb = skb;
+
+ if (skb_is_gso(skb))
+ tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
+ else
+ tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
+ NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
+
+ /* vlan tag */
+ if (vlan_tx_tag_present(skb))
+ start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
+ vlan_tx_tag_get(skb));
+ else
+ start_tx->txvlan = 0;
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ if (np->tx_limit) {
+ /* Limit the number of outstanding tx. Setup all fragments, but
+ * do not set the VALID bit on the first descriptor. Save a pointer
+ * to that descriptor and also for next skb_map element.
+ */
+
+ if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
+ if (!np->tx_change_owner)
+ np->tx_change_owner = start_tx_ctx;
+
+ /* remove VALID bit */
+ tx_flags &= ~NV_TX2_VALID;
+ start_tx_ctx->first_tx_desc = start_tx;
+ start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
+ np->tx_end_flip = np->put_tx_ctx;
+ } else {
+ np->tx_pkts_in_progress++;
+ }
+ }
+
+ /* set tx flags */
+ start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
+ np->put_tx.ex = put_tx;
+
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+ return NETDEV_TX_OK;
+}
+
+static inline void nv_tx_flip_ownership(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ np->tx_pkts_in_progress--;
+ if (np->tx_change_owner) {
+ np->tx_change_owner->first_tx_desc->flaglen |=
+ cpu_to_le32(NV_TX2_VALID);
+ np->tx_pkts_in_progress++;
+
+ np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
+ if (np->tx_change_owner == np->tx_end_flip)
+ np->tx_change_owner = NULL;
+
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+ }
+}
+
+/*
+ * nv_tx_done: check for completed packets, release the skbs.
+ *
+ * Caller must own np->lock.
+ */
+static int nv_tx_done(struct net_device *dev, int limit)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u32 flags;
+ int tx_work = 0;
+ struct ring_desc *orig_get_tx = np->get_tx.orig;
+
+ while ((np->get_tx.orig != np->put_tx.orig) &&
+ !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
+ (tx_work < limit)) {
+
+ nv_unmap_txskb(np, np->get_tx_ctx);
+
+ if (np->desc_ver == DESC_VER_1) {
+ if (flags & NV_TX_LASTPACKET) {
+ if (flags & NV_TX_ERROR) {
+ if (flags & NV_TX_UNDERFLOW)
+ dev->stats.tx_fifo_errors++;
+ if (flags & NV_TX_CARRIERLOST)
+ dev->stats.tx_carrier_errors++;
+ if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
+ nv_legacybackoff_reseed(dev);
+ dev->stats.tx_errors++;
+ } else {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
+ }
+ dev_kfree_skb_any(np->get_tx_ctx->skb);
+ np->get_tx_ctx->skb = NULL;
+ tx_work++;
+ }
+ } else {
+ if (flags & NV_TX2_LASTPACKET) {
+ if (flags & NV_TX2_ERROR) {
+ if (flags & NV_TX2_UNDERFLOW)
+ dev->stats.tx_fifo_errors++;
+ if (flags & NV_TX2_CARRIERLOST)
+ dev->stats.tx_carrier_errors++;
+ if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
+ nv_legacybackoff_reseed(dev);
+ dev->stats.tx_errors++;
+ } else {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
+ }
+ dev_kfree_skb_any(np->get_tx_ctx->skb);
+ np->get_tx_ctx->skb = NULL;
+ tx_work++;
+ }
+ }
+ if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
+ np->get_tx.orig = np->first_tx.orig;
+ if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
+ np->get_tx_ctx = np->first_tx_ctx;
+ }
+ if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
+ np->tx_stop = 0;
+ netif_wake_queue(dev);
+ }
+ return tx_work;
+}
+
+static int nv_tx_done_optimized(struct net_device *dev, int limit)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u32 flags;
+ int tx_work = 0;
+ struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
+
+ while ((np->get_tx.ex != np->put_tx.ex) &&
+ !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
+ (tx_work < limit)) {
+
+ nv_unmap_txskb(np, np->get_tx_ctx);
+
+ if (flags & NV_TX2_LASTPACKET) {
+ if (!(flags & NV_TX2_ERROR))
+ dev->stats.tx_packets++;
+ else {
+ if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
+ if (np->driver_data & DEV_HAS_GEAR_MODE)
+ nv_gear_backoff_reseed(dev);
+ else
+ nv_legacybackoff_reseed(dev);
+ }
+ }
+
+ dev_kfree_skb_any(np->get_tx_ctx->skb);
+ np->get_tx_ctx->skb = NULL;
+ tx_work++;
+
+ if (np->tx_limit)
+ nv_tx_flip_ownership(dev);
+ }
+ if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
+ np->get_tx.ex = np->first_tx.ex;
+ if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
+ np->get_tx_ctx = np->first_tx_ctx;
+ }
+ if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
+ np->tx_stop = 0;
+ netif_wake_queue(dev);
+ }
+ return tx_work;
+}
+
+/*
+ * nv_tx_timeout: dev->tx_timeout function
+ * Called with netif_tx_lock held.
+ */
+static void nv_tx_timeout(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 status;
+ union ring_type put_tx;
+ int saved_tx_limit;
+ int i;
+
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
+ else
+ status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
+
+ netdev_info(dev, "Got tx_timeout. irq: %08x\n", status);
+
+ netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
+ netdev_info(dev, "Dumping tx registers\n");
+ for (i = 0; i <= np->register_size; i += 32) {
+ netdev_info(dev,
+ "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ i,
+ readl(base + i + 0), readl(base + i + 4),
+ readl(base + i + 8), readl(base + i + 12),
+ readl(base + i + 16), readl(base + i + 20),
+ readl(base + i + 24), readl(base + i + 28));
+ }
+ netdev_info(dev, "Dumping tx ring\n");
+ for (i = 0; i < np->tx_ring_size; i += 4) {
+ if (!nv_optimized(np)) {
+ netdev_info(dev,
+ "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
+ i,
+ le32_to_cpu(np->tx_ring.orig[i].buf),
+ le32_to_cpu(np->tx_ring.orig[i].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+1].buf),
+ le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+2].buf),
+ le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+3].buf),
+ le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
+ } else {
+ netdev_info(dev,
+ "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
+ i,
+ le32_to_cpu(np->tx_ring.ex[i].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i].buflow),
+ le32_to_cpu(np->tx_ring.ex[i].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+1].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+2].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+3].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
+ }
+ }
+
+ spin_lock_irq(&np->lock);
+
+ /* 1) stop tx engine */
+ nv_stop_tx(dev);
+
+ /* 2) complete any outstanding tx and do not give HW any limited tx pkts */
+ saved_tx_limit = np->tx_limit;
+ np->tx_limit = 0; /* prevent giving HW any limited pkts */
+ np->tx_stop = 0; /* prevent waking tx queue */
+ if (!nv_optimized(np))
+ nv_tx_done(dev, np->tx_ring_size);
+ else
+ nv_tx_done_optimized(dev, np->tx_ring_size);
+
+ /* save current HW position */
+ if (np->tx_change_owner)
+ put_tx.ex = np->tx_change_owner->first_tx_desc;
+ else
+ put_tx = np->put_tx;
+
+ /* 3) clear all tx state */
+ nv_drain_tx(dev);
+ nv_init_tx(dev);
+
+ /* 4) restore state to current HW position */
+ np->get_tx = np->put_tx = put_tx;
+ np->tx_limit = saved_tx_limit;
+
+ /* 5) restart tx engine */
+ nv_start_tx(dev);
+ netif_wake_queue(dev);
+ spin_unlock_irq(&np->lock);
+}
+
+/*
+ * Called when the nic notices a mismatch between the actual data len on the
+ * wire and the len indicated in the 802 header
+ */
+static int nv_getlen(struct net_device *dev, void *packet, int datalen)
+{
+ int hdrlen; /* length of the 802 header */
+ int protolen; /* length as stored in the proto field */
+
+ /* 1) calculate len according to header */
+ if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
+ protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
+ hdrlen = VLAN_HLEN;
+ } else {
+ protolen = ntohs(((struct ethhdr *)packet)->h_proto);
+ hdrlen = ETH_HLEN;
+ }
+ if (protolen > ETH_DATA_LEN)
+ return datalen; /* Value in proto field not a len, no checks possible */
+
+ protolen += hdrlen;
+ /* consistency checks: */
+ if (datalen > ETH_ZLEN) {
+ if (datalen >= protolen) {
+ /* more data on wire than in 802 header, trim of
+ * additional data.
+ */
+ return protolen;
+ } else {
+ /* less data on wire than mentioned in header.
+ * Discard the packet.
+ */
+ return -1;
+ }
+ } else {
+ /* short packet. Accept only if 802 values are also short */
+ if (protolen > ETH_ZLEN) {
+ return -1;
+ }
+ return datalen;
+ }
+}
+
+static int nv_rx_process(struct net_device *dev, int limit)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u32 flags;
+ int rx_work = 0;
+ struct sk_buff *skb;
+ int len;
+
+ while ((np->get_rx.orig != np->put_rx.orig) &&
+ !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
+ (rx_work < limit)) {
+
+ /*
+ * the packet is for us - immediately tear down the pci mapping.
+ * TODO: check if a prefetch of the first cacheline improves
+ * the performance.
+ */
+ pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
+ np->get_rx_ctx->dma_len,
+ PCI_DMA_FROMDEVICE);
+ skb = np->get_rx_ctx->skb;
+ np->get_rx_ctx->skb = NULL;
+
+ /* look at what we actually got: */
+ if (np->desc_ver == DESC_VER_1) {
+ if (likely(flags & NV_RX_DESCRIPTORVALID)) {
+ len = flags & LEN_MASK_V1;
+ if (unlikely(flags & NV_RX_ERROR)) {
+ if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
+ len = nv_getlen(dev, skb->data, len);
+ if (len < 0) {
+ dev->stats.rx_errors++;
+ dev_kfree_skb(skb);
+ goto next_pkt;
+ }
+ }
+ /* framing errors are soft errors */
+ else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
+ if (flags & NV_RX_SUBSTRACT1)
+ len--;
+ }
+ /* the rest are hard errors */
+ else {
+ if (flags & NV_RX_MISSEDFRAME)
+ dev->stats.rx_missed_errors++;
+ if (flags & NV_RX_CRCERR)
+ dev->stats.rx_crc_errors++;
+ if (flags & NV_RX_OVERFLOW)
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ dev_kfree_skb(skb);
+ goto next_pkt;
+ }
+ }
+ } else {
+ dev_kfree_skb(skb);
+ goto next_pkt;
+ }
+ } else {
+ if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
+ len = flags & LEN_MASK_V2;
+ if (unlikely(flags & NV_RX2_ERROR)) {
+ if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
+ len = nv_getlen(dev, skb->data, len);
+ if (len < 0) {
+ dev->stats.rx_errors++;
+ dev_kfree_skb(skb);
+ goto next_pkt;
+ }
+ }
+ /* framing errors are soft errors */
+ else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
+ if (flags & NV_RX2_SUBSTRACT1)
+ len--;
+ }
+ /* the rest are hard errors */
+ else {
+ if (flags & NV_RX2_CRCERR)
+ dev->stats.rx_crc_errors++;
+ if (flags & NV_RX2_OVERFLOW)
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ dev_kfree_skb(skb);
+ goto next_pkt;
+ }
+ }
+ if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
+ ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ dev_kfree_skb(skb);
+ goto next_pkt;
+ }
+ }
+ /* got a valid packet - forward it to the network core */
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, dev);
+ napi_gro_receive(&np->napi, skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+next_pkt:
+ if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
+ np->get_rx.orig = np->first_rx.orig;
+ if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
+ np->get_rx_ctx = np->first_rx_ctx;
+
+ rx_work++;
+ }
+
+ return rx_work;
+}
+
+static int nv_rx_process_optimized(struct net_device *dev, int limit)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u32 flags;
+ u32 vlanflags = 0;
+ int rx_work = 0;
+ struct sk_buff *skb;
+ int len;
+
+ while ((np->get_rx.ex != np->put_rx.ex) &&
+ !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
+ (rx_work < limit)) {
+
+ /*
+ * the packet is for us - immediately tear down the pci mapping.
+ * TODO: check if a prefetch of the first cacheline improves
+ * the performance.
+ */
+ pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
+ np->get_rx_ctx->dma_len,
+ PCI_DMA_FROMDEVICE);
+ skb = np->get_rx_ctx->skb;
+ np->get_rx_ctx->skb = NULL;
+
+ /* look at what we actually got: */
+ if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
+ len = flags & LEN_MASK_V2;
+ if (unlikely(flags & NV_RX2_ERROR)) {
+ if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
+ len = nv_getlen(dev, skb->data, len);
+ if (len < 0) {
+ dev_kfree_skb(skb);
+ goto next_pkt;
+ }
+ }
+ /* framing errors are soft errors */
+ else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
+ if (flags & NV_RX2_SUBSTRACT1)
+ len--;
+ }
+ /* the rest are hard errors */
+ else {
+ dev_kfree_skb(skb);
+ goto next_pkt;
+ }
+ }
+
+ if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
+ ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* got a valid packet - forward it to the network core */
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, dev);
+ prefetch(skb->data);
+
+ vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
+
+ /*
+ * There's need to check for NETIF_F_HW_VLAN_RX here.
+ * Even if vlan rx accel is disabled,
+ * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
+ */
+ if (dev->features & NETIF_F_HW_VLAN_RX &&
+ vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
+ u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
+
+ __vlan_hwaccel_put_tag(skb, vid);
+ }
+ napi_gro_receive(&np->napi, skb);
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ } else {
+ dev_kfree_skb(skb);
+ }
+next_pkt:
+ if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
+ np->get_rx.ex = np->first_rx.ex;
+ if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
+ np->get_rx_ctx = np->first_rx_ctx;
+
+ rx_work++;
+ }
+
+ return rx_work;
+}
+
+static void set_bufsize(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ if (dev->mtu <= ETH_DATA_LEN)
+ np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
+ else
+ np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
+}
+
+/*
+ * nv_change_mtu: dev->change_mtu function
+ * Called with dev_base_lock held for read.
+ */
+static int nv_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ int old_mtu;
+
+ if (new_mtu < 64 || new_mtu > np->pkt_limit)
+ return -EINVAL;
+
+ old_mtu = dev->mtu;
+ dev->mtu = new_mtu;
+
+ /* return early if the buffer sizes will not change */
+ if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
+ return 0;
+ if (old_mtu == new_mtu)
+ return 0;
+
+ /* synchronized against open : rtnl_lock() held by caller */
+ if (netif_running(dev)) {
+ u8 __iomem *base = get_hwbase(dev);
+ /*
+ * It seems that the nic preloads valid ring entries into an
+ * internal buffer. The procedure for flushing everything is
+ * guessed, there is probably a simpler approach.
+ * Changing the MTU is a rare event, it shouldn't matter.
+ */
+ nv_disable_irq(dev);
+ nv_napi_disable(dev);
+ netif_tx_lock_bh(dev);
+ netif_addr_lock(dev);
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rxtx(dev);
+ nv_txrx_reset(dev);
+ /* drain rx queue */
+ nv_drain_rxtx(dev);
+ /* reinit driver view of the rx queue */
+ set_bufsize(dev);
+ if (nv_init_ring(dev)) {
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+ }
+ /* reinit nic view of the rx queue */
+ writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+ setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
+ writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ base + NvRegRingSizes);
+ pci_push(base);
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+ pci_push(base);
+
+ /* restart rx engine */
+ nv_start_rxtx(dev);
+ spin_unlock(&np->lock);
+ netif_addr_unlock(dev);
+ netif_tx_unlock_bh(dev);
+ nv_napi_enable(dev);
+ nv_enable_irq(dev);
+ }
+ return 0;
+}
+
+static void nv_copy_mac_to_hw(struct net_device *dev)
+{
+ u8 __iomem *base = get_hwbase(dev);
+ u32 mac[2];
+
+ mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
+ (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
+ mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
+
+ writel(mac[0], base + NvRegMacAddrA);
+ writel(mac[1], base + NvRegMacAddrB);
+}
+
+/*
+ * nv_set_mac_address: dev->set_mac_address function
+ * Called with rtnl_lock() held.
+ */
+static int nv_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ struct sockaddr *macaddr = (struct sockaddr *)addr;
+
+ if (!is_valid_ether_addr(macaddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ /* synchronized against open : rtnl_lock() held by caller */
+ memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
+
+ if (netif_running(dev)) {
+ netif_tx_lock_bh(dev);
+ netif_addr_lock(dev);
+ spin_lock_irq(&np->lock);
+
+ /* stop rx engine */
+ nv_stop_rx(dev);
+
+ /* set mac address */
+ nv_copy_mac_to_hw(dev);
+
+ /* restart rx engine */
+ nv_start_rx(dev);
+ spin_unlock_irq(&np->lock);
+ netif_addr_unlock(dev);
+ netif_tx_unlock_bh(dev);
+ } else {
+ nv_copy_mac_to_hw(dev);
+ }
+ return 0;
+}
+
+/*
+ * nv_set_multicast: dev->set_multicast function
+ * Called with netif_tx_lock held.
+ */
+static void nv_set_multicast(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 addr[2];
+ u32 mask[2];
+ u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
+
+ memset(addr, 0, sizeof(addr));
+ memset(mask, 0, sizeof(mask));
+
+ if (dev->flags & IFF_PROMISC) {
+ pff |= NVREG_PFF_PROMISC;
+ } else {
+ pff |= NVREG_PFF_MYADDR;
+
+ if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
+ u32 alwaysOff[2];
+ u32 alwaysOn[2];
+
+ alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
+ if (dev->flags & IFF_ALLMULTI) {
+ alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
+ } else {
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned char *addr = ha->addr;
+ u32 a, b;
+
+ a = le32_to_cpu(*(__le32 *) addr);
+ b = le16_to_cpu(*(__le16 *) (&addr[4]));
+ alwaysOn[0] &= a;
+ alwaysOff[0] &= ~a;
+ alwaysOn[1] &= b;
+ alwaysOff[1] &= ~b;
+ }
+ }
+ addr[0] = alwaysOn[0];
+ addr[1] = alwaysOn[1];
+ mask[0] = alwaysOn[0] | alwaysOff[0];
+ mask[1] = alwaysOn[1] | alwaysOff[1];
+ } else {
+ mask[0] = NVREG_MCASTMASKA_NONE;
+ mask[1] = NVREG_MCASTMASKB_NONE;
+ }
+ }
+ addr[0] |= NVREG_MCASTADDRA_FORCE;
+ pff |= NVREG_PFF_ALWAYS;
+ spin_lock_irq(&np->lock);
+ nv_stop_rx(dev);
+ writel(addr[0], base + NvRegMulticastAddrA);
+ writel(addr[1], base + NvRegMulticastAddrB);
+ writel(mask[0], base + NvRegMulticastMaskA);
+ writel(mask[1], base + NvRegMulticastMaskB);
+ writel(pff, base + NvRegPacketFilterFlags);
+ nv_start_rx(dev);
+ spin_unlock_irq(&np->lock);
+}
+
+static void nv_update_pause(struct net_device *dev, u32 pause_flags)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
+
+ if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
+ u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
+ if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
+ writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
+ np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+ } else {
+ writel(pff, base + NvRegPacketFilterFlags);
+ }
+ }
+ if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
+ u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
+ if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
+ u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
+ if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
+ pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
+ if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
+ pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
+ /* limit the number of tx pause frames to a default of 8 */
+ writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
+ }
+ writel(pause_enable, base + NvRegTxPauseFrame);
+ writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
+ np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+ } else {
+ writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
+ writel(regmisc, base + NvRegMisc1);
+ }
+ }
+}
+
+/**
+ * nv_update_linkspeed: Setup the MAC according to the link partner
+ * @dev: Network device to be configured
+ *
+ * The function queries the PHY and checks if there is a link partner.
+ * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
+ * set to 10 MBit HD.
+ *
+ * The function returns 0 if there is no link partner and 1 if there is
+ * a good link partner.
+ */
+static int nv_update_linkspeed(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int adv = 0;
+ int lpa = 0;
+ int adv_lpa, adv_pause, lpa_pause;
+ int newls = np->linkspeed;
+ int newdup = np->duplex;
+ int mii_status;
+ int retval = 0;
+ u32 control_1000, status_1000, phyreg, pause_flags, txreg;
+ u32 txrxFlags = 0;
+ u32 phy_exp;
+
+ /* BMSR_LSTATUS is latched, read it twice:
+ * we want the current value.
+ */
+ mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+ mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+
+ if (!(mii_status & BMSR_LSTATUS)) {
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
+ newdup = 0;
+ retval = 0;
+ goto set_speed;
+ }
+
+ if (np->autoneg == 0) {
+ if (np->fixed_mode & LPA_100FULL) {
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
+ newdup = 1;
+ } else if (np->fixed_mode & LPA_100HALF) {
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
+ newdup = 0;
+ } else if (np->fixed_mode & LPA_10FULL) {
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
+ newdup = 1;
+ } else {
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
+ newdup = 0;
+ }
+ retval = 1;
+ goto set_speed;
+ }
+ /* check auto negotiation is complete */
+ if (!(mii_status & BMSR_ANEGCOMPLETE)) {
+ /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
+ newdup = 0;
+ retval = 0;
+ goto set_speed;
+ }
+
+ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+ lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
+
+ retval = 1;
+ if (np->gigabit == PHY_GIGABIT) {
+ control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
+ status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
+
+ if ((control_1000 & ADVERTISE_1000FULL) &&
+ (status_1000 & LPA_1000FULL)) {
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
+ newdup = 1;
+ goto set_speed;
+ }
+ }
+
+ /* FIXME: handle parallel detection properly */
+ adv_lpa = lpa & adv;
+ if (adv_lpa & LPA_100FULL) {
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
+ newdup = 1;
+ } else if (adv_lpa & LPA_100HALF) {
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
+ newdup = 0;
+ } else if (adv_lpa & LPA_10FULL) {
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
+ newdup = 1;
+ } else if (adv_lpa & LPA_10HALF) {
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
+ newdup = 0;
+ } else {
+ newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
+ newdup = 0;
+ }
+
+set_speed:
+ if (np->duplex == newdup && np->linkspeed == newls)
+ return retval;
+
+ np->duplex = newdup;
+ np->linkspeed = newls;
+
+ /* The transmitter and receiver must be restarted for safe update */
+ if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
+ txrxFlags |= NV_RESTART_TX;
+ nv_stop_tx(dev);
+ }
+ if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
+ txrxFlags |= NV_RESTART_RX;
+ nv_stop_rx(dev);
+ }
+
+ if (np->gigabit == PHY_GIGABIT) {
+ phyreg = readl(base + NvRegSlotTime);
+ phyreg &= ~(0x3FF00);
+ if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
+ ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
+ phyreg |= NVREG_SLOTTIME_10_100_FULL;
+ else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
+ phyreg |= NVREG_SLOTTIME_1000_FULL;
+ writel(phyreg, base + NvRegSlotTime);
+ }
+
+ phyreg = readl(base + NvRegPhyInterface);
+ phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
+ if (np->duplex == 0)
+ phyreg |= PHY_HALF;
+ if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
+ phyreg |= PHY_100;
+ else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
+ phyreg |= PHY_1000;
+ writel(phyreg, base + NvRegPhyInterface);
+
+ phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
+ if (phyreg & PHY_RGMII) {
+ if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
+ txreg = NVREG_TX_DEFERRAL_RGMII_1000;
+ } else {
+ if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
+ if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
+ txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
+ else
+ txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
+ } else {
+ txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
+ }
+ }
+ } else {
+ if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
+ txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
+ else
+ txreg = NVREG_TX_DEFERRAL_DEFAULT;
+ }
+ writel(txreg, base + NvRegTxDeferral);
+
+ if (np->desc_ver == DESC_VER_1) {
+ txreg = NVREG_TX_WM_DESC1_DEFAULT;
+ } else {
+ if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
+ txreg = NVREG_TX_WM_DESC2_3_1000;
+ else
+ txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
+ }
+ writel(txreg, base + NvRegTxWatermark);
+
+ writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
+ base + NvRegMisc1);
+ pci_push(base);
+ writel(np->linkspeed, base + NvRegLinkSpeed);
+ pci_push(base);
+
+ pause_flags = 0;
+ /* setup pause frame */
+ if (np->duplex != 0) {
+ if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
+ adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+ lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
+
+ switch (adv_pause) {
+ case ADVERTISE_PAUSE_CAP:
+ if (lpa_pause & LPA_PAUSE_CAP) {
+ pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+ if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+ pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+ }
+ break;
+ case ADVERTISE_PAUSE_ASYM:
+ if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
+ pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+ break;
+ case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
+ if (lpa_pause & LPA_PAUSE_CAP) {
+ pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+ if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+ pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+ }
+ if (lpa_pause == LPA_PAUSE_ASYM)
+ pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+ break;
+ }
+ } else {
+ pause_flags = np->pause_flags;
+ }
+ }
+ nv_update_pause(dev, pause_flags);
+
+ if (txrxFlags & NV_RESTART_TX)
+ nv_start_tx(dev);
+ if (txrxFlags & NV_RESTART_RX)
+ nv_start_rx(dev);
+
+ return retval;
+}
+
+static void nv_linkchange(struct net_device *dev)
+{
+ if (nv_update_linkspeed(dev)) {
+ if (!netif_carrier_ok(dev)) {
+ netif_carrier_on(dev);
+ netdev_info(dev, "link up\n");
+ nv_txrx_gate(dev, false);
+ nv_start_rx(dev);
+ }
+ } else {
+ if (netif_carrier_ok(dev)) {
+ netif_carrier_off(dev);
+ netdev_info(dev, "link down\n");
+ nv_txrx_gate(dev, true);
+ nv_stop_rx(dev);
+ }
+ }
+}
+
+static void nv_link_irq(struct net_device *dev)
+{
+ u8 __iomem *base = get_hwbase(dev);
+ u32 miistat;
+
+ miistat = readl(base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
+
+ if (miistat & (NVREG_MIISTAT_LINKCHANGE))
+ nv_linkchange(dev);
+}
+
+static void nv_msi_workaround(struct fe_priv *np)
+{
+
+ /* Need to toggle the msi irq mask within the ethernet device,
+ * otherwise, future interrupts will not be detected.
+ */
+ if (np->msi_flags & NV_MSI_ENABLED) {
+ u8 __iomem *base = np->base;
+
+ writel(0, base + NvRegMSIIrqMask);
+ writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
+ }
+}
+
+static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) {
+ if (total_work > NV_DYNAMIC_THRESHOLD) {
+ /* transition to poll based interrupts */
+ np->quiet_count = 0;
+ if (np->irqmask != NVREG_IRQMASK_CPU) {
+ np->irqmask = NVREG_IRQMASK_CPU;
+ return 1;
+ }
+ } else {
+ if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) {
+ np->quiet_count++;
+ } else {
+ /* reached a period of low activity, switch
+ to per tx/rx packet interrupts */
+ if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) {
+ np->irqmask = NVREG_IRQMASK_THROUGHPUT;
+ return 1;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static irqreturn_t nv_nic_irq(int foo, void *data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
+ np->events = readl(base + NvRegIrqStatus);
+ writel(np->events, base + NvRegIrqStatus);
+ } else {
+ np->events = readl(base + NvRegMSIXIrqStatus);
+ writel(np->events, base + NvRegMSIXIrqStatus);
+ }
+ if (!(np->events & np->irqmask))
+ return IRQ_NONE;
+
+ nv_msi_workaround(np);
+
+ if (napi_schedule_prep(&np->napi)) {
+ /*
+ * Disable further irq's (msix not enabled with napi)
+ */
+ writel(0, base + NvRegIrqMask);
+ __napi_schedule(&np->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * All _optimized functions are used to help increase performance
+ * (reduce CPU and increase throughput). They use descripter version 3,
+ * compiler directives, and reduce memory accesses.
+ */
+static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
+ np->events = readl(base + NvRegIrqStatus);
+ writel(np->events, base + NvRegIrqStatus);
+ } else {
+ np->events = readl(base + NvRegMSIXIrqStatus);
+ writel(np->events, base + NvRegMSIXIrqStatus);
+ }
+ if (!(np->events & np->irqmask))
+ return IRQ_NONE;
+
+ nv_msi_workaround(np);
+
+ if (napi_schedule_prep(&np->napi)) {
+ /*
+ * Disable further irq's (msix not enabled with napi)
+ */
+ writel(0, base + NvRegIrqMask);
+ __napi_schedule(&np->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nv_nic_irq_tx(int foo, void *data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 events;
+ int i;
+ unsigned long flags;
+
+ for (i = 0;; i++) {
+ events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
+ writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
+ if (!(events & np->irqmask))
+ break;
+
+ spin_lock_irqsave(&np->lock, flags);
+ nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ if (unlikely(i > max_interrupt_work)) {
+ spin_lock_irqsave(&np->lock, flags);
+ /* disable interrupts on the nic */
+ writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
+ pci_push(base);
+
+ if (!np->in_shutdown) {
+ np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
+ mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
+ }
+ spin_unlock_irqrestore(&np->lock, flags);
+ netdev_dbg(dev, "%s: too many iterations (%d)\n",
+ __func__, i);
+ break;
+ }
+
+ }
+
+ return IRQ_RETVAL(i);
+}
+
+static int nv_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct fe_priv *np = container_of(napi, struct fe_priv, napi);
+ struct net_device *dev = np->dev;
+ u8 __iomem *base = get_hwbase(dev);
+ unsigned long flags;
+ int retcode;
+ int rx_count, tx_work = 0, rx_work = 0;
+
+ do {
+ if (!nv_optimized(np)) {
+ spin_lock_irqsave(&np->lock, flags);
+ tx_work += nv_tx_done(dev, np->tx_ring_size);
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ rx_count = nv_rx_process(dev, budget - rx_work);
+ retcode = nv_alloc_rx(dev);
+ } else {
+ spin_lock_irqsave(&np->lock, flags);
+ tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ rx_count = nv_rx_process_optimized(dev,
+ budget - rx_work);
+ retcode = nv_alloc_rx_optimized(dev);
+ }
+ } while (retcode == 0 &&
+ rx_count > 0 && (rx_work += rx_count) < budget);
+
+ if (retcode) {
+ spin_lock_irqsave(&np->lock, flags);
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+ spin_unlock_irqrestore(&np->lock, flags);
+ }
+
+ nv_change_interrupt_mode(dev, tx_work + rx_work);
+
+ if (unlikely(np->events & NVREG_IRQ_LINK)) {
+ spin_lock_irqsave(&np->lock, flags);
+ nv_link_irq(dev);
+ spin_unlock_irqrestore(&np->lock, flags);
+ }
+ if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
+ spin_lock_irqsave(&np->lock, flags);
+ nv_linkchange(dev);
+ spin_unlock_irqrestore(&np->lock, flags);
+ np->link_timeout = jiffies + LINK_TIMEOUT;
+ }
+ if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
+ spin_lock_irqsave(&np->lock, flags);
+ if (!np->in_shutdown) {
+ np->nic_poll_irq = np->irqmask;
+ np->recover_error = 1;
+ mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
+ }
+ spin_unlock_irqrestore(&np->lock, flags);
+ napi_complete(napi);
+ return rx_work;
+ }
+
+ if (rx_work < budget) {
+ /* re-enable interrupts
+ (msix not enabled in napi) */
+ napi_complete(napi);
+
+ writel(np->irqmask, base + NvRegIrqMask);
+ }
+ return rx_work;
+}
+
+static irqreturn_t nv_nic_irq_rx(int foo, void *data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 events;
+ int i;
+ unsigned long flags;
+
+ for (i = 0;; i++) {
+ events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
+ writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
+ if (!(events & np->irqmask))
+ break;
+
+ if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
+ if (unlikely(nv_alloc_rx_optimized(dev))) {
+ spin_lock_irqsave(&np->lock, flags);
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+ spin_unlock_irqrestore(&np->lock, flags);
+ }
+ }
+
+ if (unlikely(i > max_interrupt_work)) {
+ spin_lock_irqsave(&np->lock, flags);
+ /* disable interrupts on the nic */
+ writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
+ pci_push(base);
+
+ if (!np->in_shutdown) {
+ np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
+ mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
+ }
+ spin_unlock_irqrestore(&np->lock, flags);
+ netdev_dbg(dev, "%s: too many iterations (%d)\n",
+ __func__, i);
+ break;
+ }
+ }
+
+ return IRQ_RETVAL(i);
+}
+
+static irqreturn_t nv_nic_irq_other(int foo, void *data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 events;
+ int i;
+ unsigned long flags;
+
+ for (i = 0;; i++) {
+ events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
+ writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
+ if (!(events & np->irqmask))
+ break;
+
+ /* check tx in case we reached max loop limit in tx isr */
+ spin_lock_irqsave(&np->lock, flags);
+ nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ if (events & NVREG_IRQ_LINK) {
+ spin_lock_irqsave(&np->lock, flags);
+ nv_link_irq(dev);
+ spin_unlock_irqrestore(&np->lock, flags);
+ }
+ if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
+ spin_lock_irqsave(&np->lock, flags);
+ nv_linkchange(dev);
+ spin_unlock_irqrestore(&np->lock, flags);
+ np->link_timeout = jiffies + LINK_TIMEOUT;
+ }
+ if (events & NVREG_IRQ_RECOVER_ERROR) {
+ spin_lock_irq(&np->lock);
+ /* disable interrupts on the nic */
+ writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
+ pci_push(base);
+
+ if (!np->in_shutdown) {
+ np->nic_poll_irq |= NVREG_IRQ_OTHER;
+ np->recover_error = 1;
+ mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
+ }
+ spin_unlock_irq(&np->lock);
+ break;
+ }
+ if (unlikely(i > max_interrupt_work)) {
+ spin_lock_irqsave(&np->lock, flags);
+ /* disable interrupts on the nic */
+ writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
+ pci_push(base);
+
+ if (!np->in_shutdown) {
+ np->nic_poll_irq |= NVREG_IRQ_OTHER;
+ mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
+ }
+ spin_unlock_irqrestore(&np->lock, flags);
+ netdev_dbg(dev, "%s: too many iterations (%d)\n",
+ __func__, i);
+ break;
+ }
+
+ }
+
+ return IRQ_RETVAL(i);
+}
+
+static irqreturn_t nv_nic_irq_test(int foo, void *data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 events;
+
+ if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
+ events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
+ writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
+ } else {
+ events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
+ writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
+ }
+ pci_push(base);
+ if (!(events & NVREG_IRQ_TIMER))
+ return IRQ_RETVAL(0);
+
+ nv_msi_workaround(np);
+
+ spin_lock(&np->lock);
+ np->intr_test = 1;
+ spin_unlock(&np->lock);
+
+ return IRQ_RETVAL(1);
+}
+
+static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
+{
+ u8 __iomem *base = get_hwbase(dev);
+ int i;
+ u32 msixmap = 0;
+
+ /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
+ * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
+ * the remaining 8 interrupts.
+ */
+ for (i = 0; i < 8; i++) {
+ if ((irqmask >> i) & 0x1)
+ msixmap |= vector << (i << 2);
+ }
+ writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
+
+ msixmap = 0;
+ for (i = 0; i < 8; i++) {
+ if ((irqmask >> (i + 8)) & 0x1)
+ msixmap |= vector << (i << 2);
+ }
+ writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
+}
+
+static int nv_request_irq(struct net_device *dev, int intr_test)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int ret = 1;
+ int i;
+ irqreturn_t (*handler)(int foo, void *data);
+
+ if (intr_test) {
+ handler = nv_nic_irq_test;
+ } else {
+ if (nv_optimized(np))
+ handler = nv_nic_irq_optimized;
+ else
+ handler = nv_nic_irq;
+ }
+
+ if (np->msi_flags & NV_MSI_X_CAPABLE) {
+ for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
+ np->msi_x_entry[i].entry = i;
+ ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK));
+ if (ret == 0) {
+ np->msi_flags |= NV_MSI_X_ENABLED;
+ if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
+ /* Request irq for rx handling */
+ sprintf(np->name_rx, "%s-rx", dev->name);
+ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
+ nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
+ netdev_info(dev,
+ "request_irq failed for rx %d\n",
+ ret);
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_err;
+ }
+ /* Request irq for tx handling */
+ sprintf(np->name_tx, "%s-tx", dev->name);
+ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
+ nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
+ netdev_info(dev,
+ "request_irq failed for tx %d\n",
+ ret);
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_free_rx;
+ }
+ /* Request irq for link and timer handling */
+ sprintf(np->name_other, "%s-other", dev->name);
+ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
+ nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
+ netdev_info(dev,
+ "request_irq failed for link %d\n",
+ ret);
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_free_tx;
+ }
+ /* map interrupts to their respective vector */
+ writel(0, base + NvRegMSIXMap0);
+ writel(0, base + NvRegMSIXMap1);
+ set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
+ set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
+ set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
+ } else {
+ /* Request irq for all interrupts */
+ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
+ netdev_info(dev,
+ "request_irq failed %d\n",
+ ret);
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_err;
+ }
+
+ /* map interrupts to vector 0 */
+ writel(0, base + NvRegMSIXMap0);
+ writel(0, base + NvRegMSIXMap1);
+ }
+ }
+ }
+ if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
+ ret = pci_enable_msi(np->pci_dev);
+ if (ret == 0) {
+ np->msi_flags |= NV_MSI_ENABLED;
+ dev->irq = np->pci_dev->irq;
+ if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
+ netdev_info(dev, "request_irq failed %d\n",
+ ret);
+ pci_disable_msi(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_ENABLED;
+ dev->irq = np->pci_dev->irq;
+ goto out_err;
+ }
+
+ /* map interrupts to vector 0 */
+ writel(0, base + NvRegMSIMap0);
+ writel(0, base + NvRegMSIMap1);
+ /* enable msi vector 0 */
+ writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
+ }
+ }
+ if (ret != 0) {
+ if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
+ goto out_err;
+
+ }
+
+ return 0;
+out_free_tx:
+ free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
+out_free_rx:
+ free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
+out_err:
+ return 1;
+}
+
+static void nv_free_irq(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ int i;
+
+ if (np->msi_flags & NV_MSI_X_ENABLED) {
+ for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
+ free_irq(np->msi_x_entry[i].vector, dev);
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ } else {
+ free_irq(np->pci_dev->irq, dev);
+ if (np->msi_flags & NV_MSI_ENABLED) {
+ pci_disable_msi(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_ENABLED;
+ }
+ }
+}
+
+static void nv_do_nic_poll(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 mask = 0;
+
+ /*
+ * First disable irq(s) and then
+ * reenable interrupts on the nic, we have to do this before calling
+ * nv_nic_irq because that may decide to do otherwise
+ */
+
+ if (!using_multi_irqs(dev)) {
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+ disable_irq_lockdep(np->pci_dev->irq);
+ mask = np->irqmask;
+ } else {
+ if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
+ disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+ mask |= NVREG_IRQ_RX_ALL;
+ }
+ if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
+ disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+ mask |= NVREG_IRQ_TX_ALL;
+ }
+ if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
+ disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+ mask |= NVREG_IRQ_OTHER;
+ }
+ }
+ /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
+
+ if (np->recover_error) {
+ np->recover_error = 0;
+ netdev_info(dev, "MAC in recoverable error state\n");
+ if (netif_running(dev)) {
+ netif_tx_lock_bh(dev);
+ netif_addr_lock(dev);
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rxtx(dev);
+ if (np->driver_data & DEV_HAS_POWER_CNTRL)
+ nv_mac_reset(dev);
+ nv_txrx_reset(dev);
+ /* drain rx queue */
+ nv_drain_rxtx(dev);
+ /* reinit driver view of the rx queue */
+ set_bufsize(dev);
+ if (nv_init_ring(dev)) {
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+ }
+ /* reinit nic view of the rx queue */
+ writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+ setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
+ writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ base + NvRegRingSizes);
+ pci_push(base);
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+ pci_push(base);
+ /* clear interrupts */
+ if (!(np->msi_flags & NV_MSI_X_ENABLED))
+ writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
+ else
+ writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
+
+ /* restart rx engine */
+ nv_start_rxtx(dev);
+ spin_unlock(&np->lock);
+ netif_addr_unlock(dev);
+ netif_tx_unlock_bh(dev);
+ }
+ }
+
+ writel(mask, base + NvRegIrqMask);
+ pci_push(base);
+
+ if (!using_multi_irqs(dev)) {
+ np->nic_poll_irq = 0;
+ if (nv_optimized(np))
+ nv_nic_irq_optimized(0, dev);
+ else
+ nv_nic_irq(0, dev);
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+ enable_irq_lockdep(np->pci_dev->irq);
+ } else {
+ if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
+ np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
+ nv_nic_irq_rx(0, dev);
+ enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+ }
+ if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
+ np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
+ nv_nic_irq_tx(0, dev);
+ enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+ }
+ if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
+ np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
+ nv_nic_irq_other(0, dev);
+ enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+ }
+ }
+
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void nv_poll_controller(struct net_device *dev)
+{
+ nv_do_nic_poll((unsigned long) dev);
+}
+#endif
+
+static void nv_do_stats_poll(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct fe_priv *np = netdev_priv(dev);
+
+ nv_get_hw_stats(dev);
+
+ if (!np->in_shutdown)
+ mod_timer(&np->stats_poll,
+ round_jiffies(jiffies + STATS_INTERVAL));
+}
+
+static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, FORCEDETH_VERSION);
+ strcpy(info->bus_info, pci_name(np->pci_dev));
+}
+
+static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ wolinfo->supported = WAKE_MAGIC;
+
+ spin_lock_irq(&np->lock);
+ if (np->wolenabled)
+ wolinfo->wolopts = WAKE_MAGIC;
+ spin_unlock_irq(&np->lock);
+}
+
+static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 flags = 0;
+
+ if (wolinfo->wolopts == 0) {
+ np->wolenabled = 0;
+ } else if (wolinfo->wolopts & WAKE_MAGIC) {
+ np->wolenabled = 1;
+ flags = NVREG_WAKEUPFLAGS_ENABLE;
+ }
+ if (netif_running(dev)) {
+ spin_lock_irq(&np->lock);
+ writel(flags, base + NvRegWakeUpFlags);
+ spin_unlock_irq(&np->lock);
+ }
+ device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
+ return 0;
+}
+
+static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u32 speed;
+ int adv;
+
+ spin_lock_irq(&np->lock);
+ ecmd->port = PORT_MII;
+ if (!netif_running(dev)) {
+ /* We do not track link speed / duplex setting if the
+ * interface is disabled. Force a link check */
+ if (nv_update_linkspeed(dev)) {
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ } else {
+ if (netif_carrier_ok(dev))
+ netif_carrier_off(dev);
+ }
+ }
+
+ if (netif_carrier_ok(dev)) {
+ switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
+ case NVREG_LINKSPEED_10:
+ speed = SPEED_10;
+ break;
+ case NVREG_LINKSPEED_100:
+ speed = SPEED_100;
+ break;
+ case NVREG_LINKSPEED_1000:
+ speed = SPEED_1000;
+ break;
+ default:
+ speed = -1;
+ break;
+ }
+ ecmd->duplex = DUPLEX_HALF;
+ if (np->duplex)
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ speed = -1;
+ ecmd->duplex = -1;
+ }
+ ethtool_cmd_speed_set(ecmd, speed);
+ ecmd->autoneg = np->autoneg;
+
+ ecmd->advertising = ADVERTISED_MII;
+ if (np->autoneg) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+ if (adv & ADVERTISE_10HALF)
+ ecmd->advertising |= ADVERTISED_10baseT_Half;
+ if (adv & ADVERTISE_10FULL)
+ ecmd->advertising |= ADVERTISED_10baseT_Full;
+ if (adv & ADVERTISE_100HALF)
+ ecmd->advertising |= ADVERTISED_100baseT_Half;
+ if (adv & ADVERTISE_100FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ if (np->gigabit == PHY_GIGABIT) {
+ adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
+ if (adv & ADVERTISE_1000FULL)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ }
+ }
+ ecmd->supported = (SUPPORTED_Autoneg |
+ SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_MII);
+ if (np->gigabit == PHY_GIGABIT)
+ ecmd->supported |= SUPPORTED_1000baseT_Full;
+
+ ecmd->phy_address = np->phyaddr;
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ /* ignore maxtxpkt, maxrxpkt for now */
+ spin_unlock_irq(&np->lock);
+ return 0;
+}
+
+static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u32 speed = ethtool_cmd_speed(ecmd);
+
+ if (ecmd->port != PORT_MII)
+ return -EINVAL;
+ if (ecmd->transceiver != XCVR_EXTERNAL)
+ return -EINVAL;
+ if (ecmd->phy_address != np->phyaddr) {
+ /* TODO: support switching between multiple phys. Should be
+ * trivial, but not enabled due to lack of test hardware. */
+ return -EINVAL;
+ }
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ u32 mask;
+
+ mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
+ if (np->gigabit == PHY_GIGABIT)
+ mask |= ADVERTISED_1000baseT_Full;
+
+ if ((ecmd->advertising & mask) == 0)
+ return -EINVAL;
+
+ } else if (ecmd->autoneg == AUTONEG_DISABLE) {
+ /* Note: autonegotiation disable, speed 1000 intentionally
+ * forbidden - no one should need that. */
+
+ if (speed != SPEED_10 && speed != SPEED_100)
+ return -EINVAL;
+ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ netif_carrier_off(dev);
+ if (netif_running(dev)) {
+ unsigned long flags;
+
+ nv_disable_irq(dev);
+ netif_tx_lock_bh(dev);
+ netif_addr_lock(dev);
+ /* with plain spinlock lockdep complains */
+ spin_lock_irqsave(&np->lock, flags);
+ /* stop engines */
+ /* FIXME:
+ * this can take some time, and interrupts are disabled
+ * due to spin_lock_irqsave, but let's hope no daemon
+ * is going to change the settings very often...
+ * Worst case:
+ * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
+ * + some minor delays, which is up to a second approximately
+ */
+ nv_stop_rxtx(dev);
+ spin_unlock_irqrestore(&np->lock, flags);
+ netif_addr_unlock(dev);
+ netif_tx_unlock_bh(dev);
+ }
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ int adv, bmcr;
+
+ np->autoneg = 1;
+
+ /* advertise only what has been requested */
+ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+ if (ecmd->advertising & ADVERTISED_10baseT_Half)
+ adv |= ADVERTISE_10HALF;
+ if (ecmd->advertising & ADVERTISED_10baseT_Full)
+ adv |= ADVERTISE_10FULL;
+ if (ecmd->advertising & ADVERTISED_100baseT_Half)
+ adv |= ADVERTISE_100HALF;
+ if (ecmd->advertising & ADVERTISED_100baseT_Full)
+ adv |= ADVERTISE_100FULL;
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */
+ adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+ adv |= ADVERTISE_PAUSE_ASYM;
+ mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
+
+ if (np->gigabit == PHY_GIGABIT) {
+ adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
+ adv &= ~ADVERTISE_1000FULL;
+ if (ecmd->advertising & ADVERTISED_1000baseT_Full)
+ adv |= ADVERTISE_1000FULL;
+ mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
+ }
+
+ if (netif_running(dev))
+ netdev_info(dev, "link down\n");
+ bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
+ bmcr |= BMCR_ANENABLE;
+ /* reset the phy in order for settings to stick,
+ * and cause autoneg to start */
+ if (phy_reset(dev, bmcr)) {
+ netdev_info(dev, "phy reset failed\n");
+ return -EINVAL;
+ }
+ } else {
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ }
+ } else {
+ int adv, bmcr;
+
+ np->autoneg = 0;
+
+ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+ if (speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
+ adv |= ADVERTISE_10HALF;
+ if (speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
+ adv |= ADVERTISE_10FULL;
+ if (speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
+ adv |= ADVERTISE_100HALF;
+ if (speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
+ adv |= ADVERTISE_100FULL;
+ np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */
+ adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+ }
+ if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
+ adv |= ADVERTISE_PAUSE_ASYM;
+ np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+ }
+ mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
+ np->fixed_mode = adv;
+
+ if (np->gigabit == PHY_GIGABIT) {
+ adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
+ adv &= ~ADVERTISE_1000FULL;
+ mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
+ }
+
+ bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
+ if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
+ bmcr |= BMCR_FULLDPLX;
+ if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
+ bmcr |= BMCR_SPEED100;
+ if (np->phy_oui == PHY_OUI_MARVELL) {
+ /* reset the phy in order for forced mode settings to stick */
+ if (phy_reset(dev, bmcr)) {
+ netdev_info(dev, "phy reset failed\n");
+ return -EINVAL;
+ }
+ } else {
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ if (netif_running(dev)) {
+ /* Wait a bit and then reconfigure the nic. */
+ udelay(10);
+ nv_linkchange(dev);
+ }
+ }
+ }
+
+ if (netif_running(dev)) {
+ nv_start_rxtx(dev);
+ nv_enable_irq(dev);
+ }
+
+ return 0;
+}
+
+#define FORCEDETH_REGS_VER 1
+
+static int nv_get_regs_len(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ return np->register_size;
+}
+
+static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 *rbuf = buf;
+ int i;
+
+ regs->version = FORCEDETH_REGS_VER;
+ spin_lock_irq(&np->lock);
+ for (i = 0; i <= np->register_size/sizeof(u32); i++)
+ rbuf[i] = readl(base + i*sizeof(u32));
+ spin_unlock_irq(&np->lock);
+}
+
+static int nv_nway_reset(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ int ret;
+
+ if (np->autoneg) {
+ int bmcr;
+
+ netif_carrier_off(dev);
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
+ netif_tx_lock_bh(dev);
+ netif_addr_lock(dev);
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rxtx(dev);
+ spin_unlock(&np->lock);
+ netif_addr_unlock(dev);
+ netif_tx_unlock_bh(dev);
+ netdev_info(dev, "link down\n");
+ }
+
+ bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
+ bmcr |= BMCR_ANENABLE;
+ /* reset the phy in order for settings to stick*/
+ if (phy_reset(dev, bmcr)) {
+ netdev_info(dev, "phy reset failed\n");
+ return -EINVAL;
+ }
+ } else {
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ }
+
+ if (netif_running(dev)) {
+ nv_start_rxtx(dev);
+ nv_enable_irq(dev);
+ }
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
+
+ ring->rx_pending = np->rx_ring_size;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+ ring->tx_pending = np->tx_ring_size;
+}
+
+static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
+ dma_addr_t ring_addr;
+
+ if (ring->rx_pending < RX_RING_MIN ||
+ ring->tx_pending < TX_RING_MIN ||
+ ring->rx_mini_pending != 0 ||
+ ring->rx_jumbo_pending != 0 ||
+ (np->desc_ver == DESC_VER_1 &&
+ (ring->rx_pending > RING_MAX_DESC_VER_1 ||
+ ring->tx_pending > RING_MAX_DESC_VER_1)) ||
+ (np->desc_ver != DESC_VER_1 &&
+ (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
+ ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
+ return -EINVAL;
+ }
+
+ /* allocate new rings */
+ if (!nv_optimized(np)) {
+ rxtx_ring = pci_alloc_consistent(np->pci_dev,
+ sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
+ &ring_addr);
+ } else {
+ rxtx_ring = pci_alloc_consistent(np->pci_dev,
+ sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
+ &ring_addr);
+ }
+ rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
+ tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
+ if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
+ /* fall back to old rings */
+ if (!nv_optimized(np)) {
+ if (rxtx_ring)
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
+ rxtx_ring, ring_addr);
+ } else {
+ if (rxtx_ring)
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
+ rxtx_ring, ring_addr);
+ }
+
+ kfree(rx_skbuff);
+ kfree(tx_skbuff);
+ goto exit;
+ }
+
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
+ nv_napi_disable(dev);
+ netif_tx_lock_bh(dev);
+ netif_addr_lock(dev);
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rxtx(dev);
+ nv_txrx_reset(dev);
+ /* drain queues */
+ nv_drain_rxtx(dev);
+ /* delete queues */
+ free_rings(dev);
+ }
+
+ /* set new values */
+ np->rx_ring_size = ring->rx_pending;
+ np->tx_ring_size = ring->tx_pending;
+
+ if (!nv_optimized(np)) {
+ np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
+ np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
+ } else {
+ np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
+ np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
+ }
+ np->rx_skb = (struct nv_skb_map *)rx_skbuff;
+ np->tx_skb = (struct nv_skb_map *)tx_skbuff;
+ np->ring_addr = ring_addr;
+
+ memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
+ memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
+
+ if (netif_running(dev)) {
+ /* reinit driver view of the queues */
+ set_bufsize(dev);
+ if (nv_init_ring(dev)) {
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+ }
+
+ /* reinit nic view of the queues */
+ writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+ setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
+ writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ base + NvRegRingSizes);
+ pci_push(base);
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+ pci_push(base);
+
+ /* restart engines */
+ nv_start_rxtx(dev);
+ spin_unlock(&np->lock);
+ netif_addr_unlock(dev);
+ netif_tx_unlock_bh(dev);
+ nv_napi_enable(dev);
+ nv_enable_irq(dev);
+ }
+ return 0;
+exit:
+ return -ENOMEM;
+}
+
+static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
+ pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
+ pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
+}
+
+static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ int adv, bmcr;
+
+ if ((!np->autoneg && np->duplex == 0) ||
+ (np->autoneg && !pause->autoneg && np->duplex == 0)) {
+ netdev_info(dev, "can not set pause settings when forced link is in half duplex\n");
+ return -EINVAL;
+ }
+ if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
+ netdev_info(dev, "hardware does not support tx pause frames\n");
+ return -EINVAL;
+ }
+
+ netif_carrier_off(dev);
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
+ netif_tx_lock_bh(dev);
+ netif_addr_lock(dev);
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rxtx(dev);
+ spin_unlock(&np->lock);
+ netif_addr_unlock(dev);
+ netif_tx_unlock_bh(dev);
+ }
+
+ np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
+ if (pause->rx_pause)
+ np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
+ if (pause->tx_pause)
+ np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
+
+ if (np->autoneg && pause->autoneg) {
+ np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
+
+ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+ adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */
+ adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+ adv |= ADVERTISE_PAUSE_ASYM;
+ mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
+
+ if (netif_running(dev))
+ netdev_info(dev, "link down\n");
+ bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+ } else {
+ np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
+ if (pause->rx_pause)
+ np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+ if (pause->tx_pause)
+ np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+
+ if (!netif_running(dev))
+ nv_update_linkspeed(dev);
+ else
+ nv_update_pause(dev, np->pause_flags);
+ }
+
+ if (netif_running(dev)) {
+ nv_start_rxtx(dev);
+ nv_enable_irq(dev);
+ }
+ return 0;
+}
+
+static u32 nv_fix_features(struct net_device *dev, u32 features)
+{
+ /* vlan is dependent on rx checksum offload */
+ if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
+ features |= NETIF_F_RXCSUM;
+
+ return features;
+}
+
+static void nv_vlan_mode(struct net_device *dev, u32 features)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+ spin_lock_irq(&np->lock);
+
+ if (features & NETIF_F_HW_VLAN_RX)
+ np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP;
+ else
+ np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
+
+ if (features & NETIF_F_HW_VLAN_TX)
+ np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS;
+ else
+ np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
+
+ writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+
+ spin_unlock_irq(&np->lock);
+}
+
+static int nv_set_features(struct net_device *dev, u32 features)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 changed = dev->features ^ features;
+
+ if (changed & NETIF_F_RXCSUM) {
+ spin_lock_irq(&np->lock);
+
+ if (features & NETIF_F_RXCSUM)
+ np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
+ else
+ np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
+
+ if (netif_running(dev))
+ writel(np->txrxctl_bits, base + NvRegTxRxControl);
+
+ spin_unlock_irq(&np->lock);
+ }
+
+ if (changed & (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX))
+ nv_vlan_mode(dev, features);
+
+ return 0;
+}
+
+static int nv_get_sset_count(struct net_device *dev, int sset)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_TEST:
+ if (np->driver_data & DEV_HAS_TEST_EXTENDED)
+ return NV_TEST_COUNT_EXTENDED;
+ else
+ return NV_TEST_COUNT_BASE;
+ case ETH_SS_STATS:
+ if (np->driver_data & DEV_HAS_STATISTICS_V3)
+ return NV_DEV_STATISTICS_V3_COUNT;
+ else if (np->driver_data & DEV_HAS_STATISTICS_V2)
+ return NV_DEV_STATISTICS_V2_COUNT;
+ else if (np->driver_data & DEV_HAS_STATISTICS_V1)
+ return NV_DEV_STATISTICS_V1_COUNT;
+ else
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ /* update stats */
+ nv_do_stats_poll((unsigned long)dev);
+
+ memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
+}
+
+static int nv_link_test(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ int mii_status;
+
+ mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+ mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+
+ /* check phy link status */
+ if (!(mii_status & BMSR_LSTATUS))
+ return 0;
+ else
+ return 1;
+}
+
+static int nv_register_test(struct net_device *dev)
+{
+ u8 __iomem *base = get_hwbase(dev);
+ int i = 0;
+ u32 orig_read, new_read;
+
+ do {
+ orig_read = readl(base + nv_registers_test[i].reg);
+
+ /* xor with mask to toggle bits */
+ orig_read ^= nv_registers_test[i].mask;
+
+ writel(orig_read, base + nv_registers_test[i].reg);
+
+ new_read = readl(base + nv_registers_test[i].reg);
+
+ if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
+ return 0;
+
+ /* restore original value */
+ orig_read ^= nv_registers_test[i].mask;
+ writel(orig_read, base + nv_registers_test[i].reg);
+
+ } while (nv_registers_test[++i].reg != 0);
+
+ return 1;
+}
+
+static int nv_interrupt_test(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int ret = 1;
+ int testcnt;
+ u32 save_msi_flags, save_poll_interval = 0;
+
+ if (netif_running(dev)) {
+ /* free current irq */
+ nv_free_irq(dev);
+ save_poll_interval = readl(base+NvRegPollingInterval);
+ }
+
+ /* flag to test interrupt handler */
+ np->intr_test = 0;
+
+ /* setup test irq */
+ save_msi_flags = np->msi_flags;
+ np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
+ np->msi_flags |= 0x001; /* setup 1 vector */
+ if (nv_request_irq(dev, 1))
+ return 0;
+
+ /* setup timer interrupt */
+ writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
+ writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
+
+ nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
+
+ /* wait for at least one interrupt */
+ msleep(100);
+
+ spin_lock_irq(&np->lock);
+
+ /* flag should be set within ISR */
+ testcnt = np->intr_test;
+ if (!testcnt)
+ ret = 2;
+
+ nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
+ if (!(np->msi_flags & NV_MSI_X_ENABLED))
+ writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
+ else
+ writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
+
+ spin_unlock_irq(&np->lock);
+
+ nv_free_irq(dev);
+
+ np->msi_flags = save_msi_flags;
+
+ if (netif_running(dev)) {
+ writel(save_poll_interval, base + NvRegPollingInterval);
+ writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
+ /* restore original irq */
+ if (nv_request_irq(dev, 0))
+ return 0;
+ }
+
+ return ret;
+}
+
+static int nv_loopback_test(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ struct sk_buff *tx_skb, *rx_skb;
+ dma_addr_t test_dma_addr;
+ u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
+ u32 flags;
+ int len, i, pkt_len;
+ u8 *pkt_data;
+ u32 filter_flags = 0;
+ u32 misc1_flags = 0;
+ int ret = 1;
+
+ if (netif_running(dev)) {
+ nv_disable_irq(dev);
+ filter_flags = readl(base + NvRegPacketFilterFlags);
+ misc1_flags = readl(base + NvRegMisc1);
+ } else {
+ nv_txrx_reset(dev);
+ }
+
+ /* reinit driver view of the rx queue */
+ set_bufsize(dev);
+ nv_init_ring(dev);
+
+ /* setup hardware for loopback */
+ writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
+ writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
+
+ /* reinit nic view of the rx queue */
+ writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+ setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
+ writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ base + NvRegRingSizes);
+ pci_push(base);
+
+ /* restart rx engine */
+ nv_start_rxtx(dev);
+
+ /* setup packet for tx */
+ pkt_len = ETH_DATA_LEN;
+ tx_skb = dev_alloc_skb(pkt_len);
+ if (!tx_skb) {
+ netdev_err(dev, "dev_alloc_skb() failed during loopback test\n");
+ ret = 0;
+ goto out;
+ }
+ test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
+ skb_tailroom(tx_skb),
+ PCI_DMA_FROMDEVICE);
+ pkt_data = skb_put(tx_skb, pkt_len);
+ for (i = 0; i < pkt_len; i++)
+ pkt_data[i] = (u8)(i & 0xff);
+
+ if (!nv_optimized(np)) {
+ np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
+ np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
+ } else {
+ np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
+ np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
+ np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
+ }
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+ pci_push(get_hwbase(dev));
+
+ msleep(500);
+
+ /* check for rx of the packet */
+ if (!nv_optimized(np)) {
+ flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
+ len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
+
+ } else {
+ flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
+ len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
+ }
+
+ if (flags & NV_RX_AVAIL) {
+ ret = 0;
+ } else if (np->desc_ver == DESC_VER_1) {
+ if (flags & NV_RX_ERROR)
+ ret = 0;
+ } else {
+ if (flags & NV_RX2_ERROR)
+ ret = 0;
+ }
+
+ if (ret) {
+ if (len != pkt_len) {
+ ret = 0;
+ } else {
+ rx_skb = np->rx_skb[0].skb;
+ for (i = 0; i < pkt_len; i++) {
+ if (rx_skb->data[i] != (u8)(i & 0xff)) {
+ ret = 0;
+ break;
+ }
+ }
+ }
+ }
+
+ pci_unmap_single(np->pci_dev, test_dma_addr,
+ (skb_end_pointer(tx_skb) - tx_skb->data),
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(tx_skb);
+ out:
+ /* stop engines */
+ nv_stop_rxtx(dev);
+ nv_txrx_reset(dev);
+ /* drain rx queue */
+ nv_drain_rxtx(dev);
+
+ if (netif_running(dev)) {
+ writel(misc1_flags, base + NvRegMisc1);
+ writel(filter_flags, base + NvRegPacketFilterFlags);
+ nv_enable_irq(dev);
+ }
+
+ return ret;
+}
+
+static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int result;
+ memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64));
+
+ if (!nv_link_test(dev)) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ buffer[0] = 1;
+ }
+
+ if (test->flags & ETH_TEST_FL_OFFLINE) {
+ if (netif_running(dev)) {
+ netif_stop_queue(dev);
+ nv_napi_disable(dev);
+ netif_tx_lock_bh(dev);
+ netif_addr_lock(dev);
+ spin_lock_irq(&np->lock);
+ nv_disable_hw_interrupts(dev, np->irqmask);
+ if (!(np->msi_flags & NV_MSI_X_ENABLED))
+ writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
+ else
+ writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
+ /* stop engines */
+ nv_stop_rxtx(dev);
+ nv_txrx_reset(dev);
+ /* drain rx queue */
+ nv_drain_rxtx(dev);
+ spin_unlock_irq(&np->lock);
+ netif_addr_unlock(dev);
+ netif_tx_unlock_bh(dev);
+ }
+
+ if (!nv_register_test(dev)) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ buffer[1] = 1;
+ }
+
+ result = nv_interrupt_test(dev);
+ if (result != 1) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ buffer[2] = 1;
+ }
+ if (result == 0) {
+ /* bail out */
+ return;
+ }
+
+ if (!nv_loopback_test(dev)) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ buffer[3] = 1;
+ }
+
+ if (netif_running(dev)) {
+ /* reinit driver view of the rx queue */
+ set_bufsize(dev);
+ if (nv_init_ring(dev)) {
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+ }
+ /* reinit nic view of the rx queue */
+ writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+ setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
+ writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ base + NvRegRingSizes);
+ pci_push(base);
+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+ pci_push(base);
+ /* restart rx engine */
+ nv_start_rxtx(dev);
+ netif_start_queue(dev);
+ nv_napi_enable(dev);
+ nv_enable_hw_interrupts(dev, np->irqmask);
+ }
+ }
+}
+
+static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
+ break;
+ case ETH_SS_TEST:
+ memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
+ break;
+ }
+}
+
+static const struct ethtool_ops ops = {
+ .get_drvinfo = nv_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_wol = nv_get_wol,
+ .set_wol = nv_set_wol,
+ .get_settings = nv_get_settings,
+ .set_settings = nv_set_settings,
+ .get_regs_len = nv_get_regs_len,
+ .get_regs = nv_get_regs,
+ .nway_reset = nv_nway_reset,
+ .get_ringparam = nv_get_ringparam,
+ .set_ringparam = nv_set_ringparam,
+ .get_pauseparam = nv_get_pauseparam,
+ .set_pauseparam = nv_set_pauseparam,
+ .get_strings = nv_get_strings,
+ .get_ethtool_stats = nv_get_ethtool_stats,
+ .get_sset_count = nv_get_sset_count,
+ .self_test = nv_self_test,
+};
+
+/* The mgmt unit and driver use a semaphore to access the phy during init */
+static int nv_mgmt_acquire_sema(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int i;
+ u32 tx_ctrl, mgmt_sema;
+
+ for (i = 0; i < 10; i++) {
+ mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
+ if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
+ break;
+ msleep(500);
+ }
+
+ if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
+ return 0;
+
+ for (i = 0; i < 2; i++) {
+ tx_ctrl = readl(base + NvRegTransmitterControl);
+ tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
+ writel(tx_ctrl, base + NvRegTransmitterControl);
+
+ /* verify that semaphore was acquired */
+ tx_ctrl = readl(base + NvRegTransmitterControl);
+ if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
+ ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
+ np->mgmt_sema = 1;
+ return 1;
+ } else
+ udelay(50);
+ }
+
+ return 0;
+}
+
+static void nv_mgmt_release_sema(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 tx_ctrl;
+
+ if (np->driver_data & DEV_HAS_MGMT_UNIT) {
+ if (np->mgmt_sema) {
+ tx_ctrl = readl(base + NvRegTransmitterControl);
+ tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ;
+ writel(tx_ctrl, base + NvRegTransmitterControl);
+ }
+ }
+}
+
+
+static int nv_mgmt_get_version(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 data_ready = readl(base + NvRegTransmitterControl);
+ u32 data_ready2 = 0;
+ unsigned long start;
+ int ready = 0;
+
+ writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion);
+ writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl);
+ start = jiffies;
+ while (time_before(jiffies, start + 5*HZ)) {
+ data_ready2 = readl(base + NvRegTransmitterControl);
+ if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) {
+ ready = 1;
+ break;
+ }
+ schedule_timeout_uninterruptible(1);
+ }
+
+ if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR))
+ return 0;
+
+ np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION;
+
+ return 1;
+}
+
+static int nv_open(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int ret = 1;
+ int oom, i;
+ u32 low;
+
+ /* power up phy */
+ mii_rw(dev, np->phyaddr, MII_BMCR,
+ mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
+
+ nv_txrx_gate(dev, false);
+ /* erase previous misconfiguration */
+ if (np->driver_data & DEV_HAS_POWER_CNTRL)
+ nv_mac_reset(dev);
+ writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
+ writel(0, base + NvRegMulticastAddrB);
+ writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
+ writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
+ writel(0, base + NvRegPacketFilterFlags);
+
+ writel(0, base + NvRegTransmitterControl);
+ writel(0, base + NvRegReceiverControl);
+
+ writel(0, base + NvRegAdapterControl);
+
+ if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
+ writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
+
+ /* initialize descriptor rings */
+ set_bufsize(dev);
+ oom = nv_init_ring(dev);
+
+ writel(0, base + NvRegLinkSpeed);
+ writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
+ nv_txrx_reset(dev);
+ writel(0, base + NvRegUnknownSetupReg6);
+
+ np->in_shutdown = 0;
+
+ /* give hw rings */
+ setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
+ writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ base + NvRegRingSizes);
+
+ writel(np->linkspeed, base + NvRegLinkSpeed);
+ if (np->desc_ver == DESC_VER_1)
+ writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
+ else
+ writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
+ writel(np->txrxctl_bits, base + NvRegTxRxControl);
+ writel(np->vlanctl_bits, base + NvRegVlanControl);
+ pci_push(base);
+ writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
+ if (reg_delay(dev, NvRegUnknownSetupReg5,
+ NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
+ NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX))
+ netdev_info(dev,
+ "%s: SetupReg5, Bit 31 remained off\n", __func__);
+
+ writel(0, base + NvRegMIIMask);
+ writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
+ writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
+
+ writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
+ writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
+ writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
+ writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+
+ writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
+
+ get_random_bytes(&low, sizeof(low));
+ low &= NVREG_SLOTTIME_MASK;
+ if (np->desc_ver == DESC_VER_1) {
+ writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
+ } else {
+ if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
+ /* setup legacy backoff */
+ writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
+ } else {
+ writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
+ nv_gear_backoff_reseed(dev);
+ }
+ }
+ writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
+ writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
+ if (poll_interval == -1) {
+ if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
+ writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
+ else
+ writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
+ } else
+ writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
+ writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
+ writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
+ base + NvRegAdapterControl);
+ writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
+ writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
+ if (np->wolenabled)
+ writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
+
+ i = readl(base + NvRegPowerState);
+ if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
+ writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
+
+ pci_push(base);
+ udelay(10);
+ writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
+
+ nv_disable_hw_interrupts(dev, np->irqmask);
+ pci_push(base);
+ writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
+ writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
+ pci_push(base);
+
+ if (nv_request_irq(dev, 0))
+ goto out_drain;
+
+ /* ask for interrupts */
+ nv_enable_hw_interrupts(dev, np->irqmask);
+
+ spin_lock_irq(&np->lock);
+ writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
+ writel(0, base + NvRegMulticastAddrB);
+ writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
+ writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
+ writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
+ /* One manual link speed update: Interrupts are enabled, future link
+ * speed changes cause interrupts and are handled by nv_link_irq().
+ */
+ {
+ u32 miistat;
+ miistat = readl(base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
+ }
+ /* set linkspeed to invalid value, thus force nv_update_linkspeed
+ * to init hw */
+ np->linkspeed = 0;
+ ret = nv_update_linkspeed(dev);
+ nv_start_rxtx(dev);
+ netif_start_queue(dev);
+ nv_napi_enable(dev);
+
+ if (ret) {
+ netif_carrier_on(dev);
+ } else {
+ netdev_info(dev, "no link during initialization\n");
+ netif_carrier_off(dev);
+ }
+ if (oom)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+
+ /* start statistics timer */
+ if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
+ mod_timer(&np->stats_poll,
+ round_jiffies(jiffies + STATS_INTERVAL));
+
+ spin_unlock_irq(&np->lock);
+
+ return 0;
+out_drain:
+ nv_drain_rxtx(dev);
+ return ret;
+}
+
+static int nv_close(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base;
+
+ spin_lock_irq(&np->lock);
+ np->in_shutdown = 1;
+ spin_unlock_irq(&np->lock);
+ nv_napi_disable(dev);
+ synchronize_irq(np->pci_dev->irq);
+
+ del_timer_sync(&np->oom_kick);
+ del_timer_sync(&np->nic_poll);
+ del_timer_sync(&np->stats_poll);
+
+ netif_stop_queue(dev);
+ spin_lock_irq(&np->lock);
+ nv_stop_rxtx(dev);
+ nv_txrx_reset(dev);
+
+ /* disable interrupts on the nic or we will lock up */
+ base = get_hwbase(dev);
+ nv_disable_hw_interrupts(dev, np->irqmask);
+ pci_push(base);
+
+ spin_unlock_irq(&np->lock);
+
+ nv_free_irq(dev);
+
+ nv_drain_rxtx(dev);
+
+ if (np->wolenabled || !phy_power_down) {
+ nv_txrx_gate(dev, false);
+ writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
+ nv_start_rx(dev);
+ } else {
+ /* power down phy */
+ mii_rw(dev, np->phyaddr, MII_BMCR,
+ mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
+ nv_txrx_gate(dev, true);
+ }
+
+ /* FIXME: power down nic */
+
+ return 0;
+}
+
+static const struct net_device_ops nv_netdev_ops = {
+ .ndo_open = nv_open,
+ .ndo_stop = nv_close,
+ .ndo_get_stats = nv_get_stats,
+ .ndo_start_xmit = nv_start_xmit,
+ .ndo_tx_timeout = nv_tx_timeout,
+ .ndo_change_mtu = nv_change_mtu,
+ .ndo_fix_features = nv_fix_features,
+ .ndo_set_features = nv_set_features,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = nv_set_mac_address,
+ .ndo_set_rx_mode = nv_set_multicast,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = nv_poll_controller,
+#endif
+};
+
+static const struct net_device_ops nv_netdev_ops_optimized = {
+ .ndo_open = nv_open,
+ .ndo_stop = nv_close,
+ .ndo_get_stats = nv_get_stats,
+ .ndo_start_xmit = nv_start_xmit_optimized,
+ .ndo_tx_timeout = nv_tx_timeout,
+ .ndo_change_mtu = nv_change_mtu,
+ .ndo_fix_features = nv_fix_features,
+ .ndo_set_features = nv_set_features,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = nv_set_mac_address,
+ .ndo_set_rx_mode = nv_set_multicast,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = nv_poll_controller,
+#endif
+};
+
+static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
+{
+ struct net_device *dev;
+ struct fe_priv *np;
+ unsigned long addr;
+ u8 __iomem *base;
+ int err, i;
+ u32 powerstate, txreg;
+ u32 phystate_orig = 0, phystate;
+ int phyinitialized = 0;
+ static int printed_version;
+
+ if (!printed_version++)
+ pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
+ FORCEDETH_VERSION);
+
+ dev = alloc_etherdev(sizeof(struct fe_priv));
+ err = -ENOMEM;
+ if (!dev)
+ goto out;
+
+ np = netdev_priv(dev);
+ np->dev = dev;
+ np->pci_dev = pci_dev;
+ spin_lock_init(&np->lock);
+ SET_NETDEV_DEV(dev, &pci_dev->dev);
+
+ init_timer(&np->oom_kick);
+ np->oom_kick.data = (unsigned long) dev;
+ np->oom_kick.function = nv_do_rx_refill; /* timer handler */
+ init_timer(&np->nic_poll);
+ np->nic_poll.data = (unsigned long) dev;
+ np->nic_poll.function = nv_do_nic_poll; /* timer handler */
+ init_timer(&np->stats_poll);
+ np->stats_poll.data = (unsigned long) dev;
+ np->stats_poll.function = nv_do_stats_poll; /* timer handler */
+
+ err = pci_enable_device(pci_dev);
+ if (err)
+ goto out_free;
+
+ pci_set_master(pci_dev);
+
+ err = pci_request_regions(pci_dev, DRV_NAME);
+ if (err < 0)
+ goto out_disable;
+
+ if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
+ np->register_size = NV_PCI_REGSZ_VER3;
+ else if (id->driver_data & DEV_HAS_STATISTICS_V1)
+ np->register_size = NV_PCI_REGSZ_VER2;
+ else
+ np->register_size = NV_PCI_REGSZ_VER1;
+
+ err = -EINVAL;
+ addr = 0;
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
+ pci_resource_len(pci_dev, i) >= np->register_size) {
+ addr = pci_resource_start(pci_dev, i);
+ break;
+ }
+ }
+ if (i == DEVICE_COUNT_RESOURCE) {
+ dev_info(&pci_dev->dev, "Couldn't find register window\n");
+ goto out_relreg;
+ }
+
+ /* copy of driver data */
+ np->driver_data = id->driver_data;
+ /* copy of device id */
+ np->device_id = id->device;
+
+ /* handle different descriptor versions */
+ if (id->driver_data & DEV_HAS_HIGH_DMA) {
+ /* packet format 3: supports 40-bit addressing */
+ np->desc_ver = DESC_VER_3;
+ np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
+ if (dma_64bit) {
+ if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39)))
+ dev_info(&pci_dev->dev,
+ "64-bit DMA failed, using 32-bit addressing\n");
+ else
+ dev->features |= NETIF_F_HIGHDMA;
+ if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) {
+ dev_info(&pci_dev->dev,
+ "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
+ }
+ }
+ } else if (id->driver_data & DEV_HAS_LARGEDESC) {
+ /* packet format 2: supports jumbo frames */
+ np->desc_ver = DESC_VER_2;
+ np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
+ } else {
+ /* original packet format */
+ np->desc_ver = DESC_VER_1;
+ np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
+ }
+
+ np->pkt_limit = NV_PKTLIMIT_1;
+ if (id->driver_data & DEV_HAS_LARGEDESC)
+ np->pkt_limit = NV_PKTLIMIT_2;
+
+ if (id->driver_data & DEV_HAS_CHECKSUM) {
+ np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
+ dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_RXCSUM;
+ }
+
+ np->vlanctl_bits = 0;
+ if (id->driver_data & DEV_HAS_VLAN) {
+ np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
+ dev->hw_features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
+ }
+
+ dev->features |= dev->hw_features;
+
+ np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
+ if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
+ (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
+ (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
+ np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
+ }
+
+ err = -ENOMEM;
+ np->base = ioremap(addr, np->register_size);
+ if (!np->base)
+ goto out_relreg;
+ dev->base_addr = (unsigned long)np->base;
+
+ dev->irq = pci_dev->irq;
+
+ np->rx_ring_size = RX_RING_DEFAULT;
+ np->tx_ring_size = TX_RING_DEFAULT;
+
+ if (!nv_optimized(np)) {
+ np->rx_ring.orig = pci_alloc_consistent(pci_dev,
+ sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
+ &np->ring_addr);
+ if (!np->rx_ring.orig)
+ goto out_unmap;
+ np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
+ } else {
+ np->rx_ring.ex = pci_alloc_consistent(pci_dev,
+ sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
+ &np->ring_addr);
+ if (!np->rx_ring.ex)
+ goto out_unmap;
+ np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
+ }
+ np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
+ np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
+ if (!np->rx_skb || !np->tx_skb)
+ goto out_freering;
+
+ if (!nv_optimized(np))
+ dev->netdev_ops = &nv_netdev_ops;
+ else
+ dev->netdev_ops = &nv_netdev_ops_optimized;
+
+ netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
+ SET_ETHTOOL_OPS(dev, &ops);
+ dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
+
+ pci_set_drvdata(pci_dev, dev);
+
+ /* read the mac address */
+ base = get_hwbase(dev);
+ np->orig_mac[0] = readl(base + NvRegMacAddrA);
+ np->orig_mac[1] = readl(base + NvRegMacAddrB);
+
+ /* check the workaround bit for correct mac address order */
+ txreg = readl(base + NvRegTransmitPoll);
+ if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
+ /* mac address is already in correct order */
+ dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
+ dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
+ dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
+ dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
+ dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
+ dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
+ } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
+ /* mac address is already in correct order */
+ dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
+ dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
+ dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
+ dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
+ dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
+ dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
+ /*
+ * Set orig mac address back to the reversed version.
+ * This flag will be cleared during low power transition.
+ * Therefore, we should always put back the reversed address.
+ */
+ np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
+ (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
+ np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
+ } else {
+ /* need to reverse mac address to correct order */
+ dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
+ dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
+ dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
+ dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
+ dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
+ dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
+ writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
+ dev_dbg(&pci_dev->dev,
+ "%s: set workaround bit for reversed mac addr\n",
+ __func__);
+ }
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ if (!is_valid_ether_addr(dev->perm_addr)) {
+ /*
+ * Bad mac address. At least one bios sets the mac address
+ * to 01:23:45:67:89:ab
+ */
+ dev_err(&pci_dev->dev,
+ "Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
+ dev->dev_addr);
+ random_ether_addr(dev->dev_addr);
+ dev_err(&pci_dev->dev,
+ "Using random MAC address: %pM\n", dev->dev_addr);
+ }
+
+ /* set mac address */
+ nv_copy_mac_to_hw(dev);
+
+ /* disable WOL */
+ writel(0, base + NvRegWakeUpFlags);
+ np->wolenabled = 0;
+ device_set_wakeup_enable(&pci_dev->dev, false);
+
+ if (id->driver_data & DEV_HAS_POWER_CNTRL) {
+
+ /* take phy and nic out of low power mode */
+ powerstate = readl(base + NvRegPowerState2);
+ powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
+ if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) &&
+ pci_dev->revision >= 0xA3)
+ powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
+ writel(powerstate, base + NvRegPowerState2);
+ }
+
+ if (np->desc_ver == DESC_VER_1)
+ np->tx_flags = NV_TX_VALID;
+ else
+ np->tx_flags = NV_TX2_VALID;
+
+ np->msi_flags = 0;
+ if ((id->driver_data & DEV_HAS_MSI) && msi)
+ np->msi_flags |= NV_MSI_CAPABLE;
+
+ if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
+ /* msix has had reported issues when modifying irqmask
+ as in the case of napi, therefore, disable for now
+ */
+#if 0
+ np->msi_flags |= NV_MSI_X_CAPABLE;
+#endif
+ }
+
+ if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) {
+ np->irqmask = NVREG_IRQMASK_CPU;
+ if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
+ np->msi_flags |= 0x0001;
+ } else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC &&
+ !(id->driver_data & DEV_NEED_TIMERIRQ)) {
+ /* start off in throughput mode */
+ np->irqmask = NVREG_IRQMASK_THROUGHPUT;
+ /* remove support for msix mode */
+ np->msi_flags &= ~NV_MSI_X_CAPABLE;
+ } else {
+ optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
+ np->irqmask = NVREG_IRQMASK_THROUGHPUT;
+ if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
+ np->msi_flags |= 0x0003;
+ }
+
+ if (id->driver_data & DEV_NEED_TIMERIRQ)
+ np->irqmask |= NVREG_IRQ_TIMER;
+ if (id->driver_data & DEV_NEED_LINKTIMER) {
+ np->need_linktimer = 1;
+ np->link_timeout = jiffies + LINK_TIMEOUT;
+ } else {
+ np->need_linktimer = 0;
+ }
+
+ /* Limit the number of tx's outstanding for hw bug */
+ if (id->driver_data & DEV_NEED_TX_LIMIT) {
+ np->tx_limit = 1;
+ if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
+ pci_dev->revision >= 0xA2)
+ np->tx_limit = 0;
+ }
+
+ /* clear phy state and temporarily halt phy interrupts */
+ writel(0, base + NvRegMIIMask);
+ phystate = readl(base + NvRegAdapterControl);
+ if (phystate & NVREG_ADAPTCTL_RUNNING) {
+ phystate_orig = 1;
+ phystate &= ~NVREG_ADAPTCTL_RUNNING;
+ writel(phystate, base + NvRegAdapterControl);
+ }
+ writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
+
+ if (id->driver_data & DEV_HAS_MGMT_UNIT) {
+ /* management unit running on the mac? */
+ if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) &&
+ (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) &&
+ nv_mgmt_acquire_sema(dev) &&
+ nv_mgmt_get_version(dev)) {
+ np->mac_in_use = 1;
+ if (np->mgmt_version > 0)
+ np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
+ /* management unit setup the phy already? */
+ if (np->mac_in_use &&
+ ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
+ NVREG_XMITCTL_SYNC_PHY_INIT)) {
+ /* phy is inited by mgmt unit */
+ phyinitialized = 1;
+ } else {
+ /* we need to init the phy */
+ }
+ }
+ }
+
+ /* find a suitable phy */
+ for (i = 1; i <= 32; i++) {
+ int id1, id2;
+ int phyaddr = i & 0x1F;
+
+ spin_lock_irq(&np->lock);
+ id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
+ spin_unlock_irq(&np->lock);
+ if (id1 < 0 || id1 == 0xffff)
+ continue;
+ spin_lock_irq(&np->lock);
+ id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
+ spin_unlock_irq(&np->lock);
+ if (id2 < 0 || id2 == 0xffff)
+ continue;
+
+ np->phy_model = id2 & PHYID2_MODEL_MASK;
+ id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
+ id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
+ np->phyaddr = phyaddr;
+ np->phy_oui = id1 | id2;
+
+ /* Realtek hardcoded phy id1 to all zero's on certain phys */
+ if (np->phy_oui == PHY_OUI_REALTEK2)
+ np->phy_oui = PHY_OUI_REALTEK;
+ /* Setup phy revision for Realtek */
+ if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
+ np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
+
+ break;
+ }
+ if (i == 33) {
+ dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n");
+ goto out_error;
+ }
+
+ if (!phyinitialized) {
+ /* reset it */
+ phy_init(dev);
+ } else {
+ /* see if it is a gigabit phy */
+ u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+ if (mii_status & PHY_GIGABIT)
+ np->gigabit = PHY_GIGABIT;
+ }
+
+ /* set default link speed settings */
+ np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
+ np->duplex = 0;
+ np->autoneg = 1;
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err);
+ goto out_error;
+ }
+
+ if (id->driver_data & DEV_HAS_VLAN)
+ nv_vlan_mode(dev, dev->features);
+
+ netif_carrier_off(dev);
+
+ dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
+ dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
+
+ dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
+ dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
+ dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
+ "csum " : "",
+ dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
+ "vlan " : "",
+ id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
+ id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
+ id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
+ np->gigabit == PHY_GIGABIT ? "gbit " : "",
+ np->need_linktimer ? "lnktim " : "",
+ np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
+ np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
+ np->desc_ver);
+
+ return 0;
+
+out_error:
+ if (phystate_orig)
+ writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
+ pci_set_drvdata(pci_dev, NULL);
+out_freering:
+ free_rings(dev);
+out_unmap:
+ iounmap(get_hwbase(dev));
+out_relreg:
+ pci_release_regions(pci_dev);
+out_disable:
+ pci_disable_device(pci_dev);
+out_free:
+ free_netdev(dev);
+out:
+ return err;
+}
+
+static void nv_restore_phy(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u16 phy_reserved, mii_control;
+
+ if (np->phy_oui == PHY_OUI_REALTEK &&
+ np->phy_model == PHY_MODEL_REALTEK_8201 &&
+ phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
+ mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
+ phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
+ phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
+ phy_reserved |= PHY_REALTEK_INIT8;
+ mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
+ mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
+
+ /* restart auto negotiation */
+ mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
+ mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
+ }
+}
+
+static void nv_restore_mac_addr(struct pci_dev *pci_dev)
+{
+ struct net_device *dev = pci_get_drvdata(pci_dev);
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ /* special op: write back the misordered MAC address - otherwise
+ * the next nv_probe would see a wrong address.
+ */
+ writel(np->orig_mac[0], base + NvRegMacAddrA);
+ writel(np->orig_mac[1], base + NvRegMacAddrB);
+ writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
+ base + NvRegTransmitPoll);
+}
+
+static void __devexit nv_remove(struct pci_dev *pci_dev)
+{
+ struct net_device *dev = pci_get_drvdata(pci_dev);
+
+ unregister_netdev(dev);
+
+ nv_restore_mac_addr(pci_dev);
+
+ /* restore any phy related changes */
+ nv_restore_phy(dev);
+
+ nv_mgmt_release_sema(dev);
+
+ /* free all structures */
+ free_rings(dev);
+ iounmap(get_hwbase(dev));
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
+ free_netdev(dev);
+ pci_set_drvdata(pci_dev, NULL);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int nv_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int i;
+
+ if (netif_running(dev)) {
+ /* Gross. */
+ nv_close(dev);
+ }
+ netif_device_detach(dev);
+
+ /* save non-pci configuration space */
+ for (i = 0; i <= np->register_size/sizeof(u32); i++)
+ np->saved_config_space[i] = readl(base + i*sizeof(u32));
+
+ return 0;
+}
+
+static int nv_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int i, rc = 0;
+
+ /* restore non-pci configuration space */
+ for (i = 0; i <= np->register_size/sizeof(u32); i++)
+ writel(np->saved_config_space[i], base+i*sizeof(u32));
+
+ if (np->driver_data & DEV_NEED_MSI_FIX)
+ pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
+
+ /* restore phy state, including autoneg */
+ phy_init(dev);
+
+ netif_device_attach(dev);
+ if (netif_running(dev)) {
+ rc = nv_open(dev);
+ nv_set_multicast(dev);
+ }
+ return rc;
+}
+
+static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
+#define NV_PM_OPS (&nv_pm_ops)
+
+#else
+#define NV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static void nv_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct fe_priv *np = netdev_priv(dev);
+
+ if (netif_running(dev))
+ nv_close(dev);
+
+ /*
+ * Restore the MAC so a kernel started by kexec won't get confused.
+ * If we really go for poweroff, we must not restore the MAC,
+ * otherwise the MAC for WOL will be reversed at least on some boards.
+ */
+ if (system_state != SYSTEM_POWER_OFF)
+ nv_restore_mac_addr(pdev);
+
+ pci_disable_device(pdev);
+ /*
+ * Apparently it is not possible to reinitialise from D3 hot,
+ * only put the device into D3 if we really go for poweroff.
+ */
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, np->wolenabled);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+#else
+#define nv_shutdown NULL
+#endif /* CONFIG_PM */
+
+static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
+ { /* nForce Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x01C3),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+ },
+ { /* nForce2 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0066),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+ },
+ { /* nForce3 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x00D6),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+ },
+ { /* nForce3 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0086),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
+ },
+ { /* nForce3 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x008C),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
+ },
+ { /* nForce3 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x00E6),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
+ },
+ { /* nForce3 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x00DF),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
+ },
+ { /* CK804 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0056),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
+ },
+ { /* CK804 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0057),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
+ },
+ { /* MCP04 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0037),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
+ },
+ { /* MCP04 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0038),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
+ },
+ { /* MCP51 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0268),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
+ },
+ { /* MCP51 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0269),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
+ },
+ { /* MCP55 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0372),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP55 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0373),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP61 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x03E5),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP61 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x03E6),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP61 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x03EE),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP61 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x03EF),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP65 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0450),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP65 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0451),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP65 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0452),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP65 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0453),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP67 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x054C),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP67 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x054D),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP67 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x054E),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP67 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x054F),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP73 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x07DC),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP73 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x07DD),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP73 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x07DE),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP73 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x07DF),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP77 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0760),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP77 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0761),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP77 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0762),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP77 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0763),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP79 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0AB0),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP79 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0AB1),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP79 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0AB2),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP79 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0AB3),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+ },
+ { /* MCP89 Ethernet Controller */
+ PCI_DEVICE(0x10DE, 0x0D7D),
+ .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
+ },
+ {0,},
+};
+
+static struct pci_driver driver = {
+ .name = DRV_NAME,
+ .id_table = pci_tbl,
+ .probe = nv_probe,
+ .remove = __devexit_p(nv_remove),
+ .shutdown = nv_shutdown,
+ .driver.pm = NV_PM_OPS,
+};
+
+static int __init init_nic(void)
+{
+ return pci_register_driver(&driver);
+}
+
+static void __exit exit_nic(void)
+{
+ pci_unregister_driver(&driver);
+}
+
+module_param(max_interrupt_work, int, 0);
+MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
+module_param(optimization_mode, int, 0);
+MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load.");
+module_param(poll_interval, int, 0);
+MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
+module_param(msi, int, 0);
+MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
+module_param(msix, int, 0);
+MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
+module_param(dma_64bit, int, 0);
+MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
+module_param(phy_cross, int, 0);
+MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
+module_param(phy_power_down, int, 0);
+MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
+
+MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
+MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
+MODULE_LICENSE("GPL");
+
+MODULE_DEVICE_TABLE(pci, pci_tbl);
+
+module_init(init_nic);
+module_exit(exit_nic);
diff --git a/drivers/net/ethernet/octeon/Kconfig b/drivers/net/ethernet/octeon/Kconfig
new file mode 100644
index 000000000000..3de52ffd2872
--- /dev/null
+++ b/drivers/net/ethernet/octeon/Kconfig
@@ -0,0 +1,14 @@
+#
+# Cavium network device configuration
+#
+
+config OCTEON_MGMT_ETHERNET
+ tristate "Octeon Management port ethernet driver (CN5XXX, CN6XXX)"
+ depends on CPU_CAVIUM_OCTEON
+ select PHYLIB
+ select MDIO_OCTEON
+ default y
+ ---help---
+ This option enables the ethernet driver for the management
+ port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX,
+ CN54XX, CN52XX, and CN6XXX chips.
diff --git a/drivers/net/ethernet/octeon/Makefile b/drivers/net/ethernet/octeon/Makefile
new file mode 100644
index 000000000000..efa41c1d91c5
--- /dev/null
+++ b/drivers/net/ethernet/octeon/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Cavium network device drivers.
+#
+
+obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon_mgmt.o
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
new file mode 100644
index 000000000000..bc1d946b7971
--- /dev/null
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -0,0 +1,1168 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009 Cavium Networks
+ */
+
+#include <linux/capability.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/slab.h>
+#include <linux/phy.h>
+#include <linux/spinlock.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-mixx-defs.h>
+#include <asm/octeon/cvmx-agl-defs.h>
+
+#define DRV_NAME "octeon_mgmt"
+#define DRV_VERSION "2.0"
+#define DRV_DESCRIPTION \
+ "Cavium Networks Octeon MII (management) port Network Driver"
+
+#define OCTEON_MGMT_NAPI_WEIGHT 16
+
+/*
+ * Ring sizes that are powers of two allow for more efficient modulo
+ * opertions.
+ */
+#define OCTEON_MGMT_RX_RING_SIZE 512
+#define OCTEON_MGMT_TX_RING_SIZE 128
+
+/* Allow 8 bytes for vlan and FCS. */
+#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
+
+union mgmt_port_ring_entry {
+ u64 d64;
+ struct {
+ u64 reserved_62_63:2;
+ /* Length of the buffer/packet in bytes */
+ u64 len:14;
+ /* For TX, signals that the packet should be timestamped */
+ u64 tstamp:1;
+ /* The RX error code */
+ u64 code:7;
+#define RING_ENTRY_CODE_DONE 0xf
+#define RING_ENTRY_CODE_MORE 0x10
+ /* Physical address of the buffer */
+ u64 addr:40;
+ } s;
+};
+
+struct octeon_mgmt {
+ struct net_device *netdev;
+ int port;
+ int irq;
+ u64 *tx_ring;
+ dma_addr_t tx_ring_handle;
+ unsigned int tx_next;
+ unsigned int tx_next_clean;
+ unsigned int tx_current_fill;
+ /* The tx_list lock also protects the ring related variables */
+ struct sk_buff_head tx_list;
+
+ /* RX variables only touched in napi_poll. No locking necessary. */
+ u64 *rx_ring;
+ dma_addr_t rx_ring_handle;
+ unsigned int rx_next;
+ unsigned int rx_next_fill;
+ unsigned int rx_current_fill;
+ struct sk_buff_head rx_list;
+
+ spinlock_t lock;
+ unsigned int last_duplex;
+ unsigned int last_link;
+ struct device *dev;
+ struct napi_struct napi;
+ struct tasklet_struct tx_clean_tasklet;
+ struct phy_device *phydev;
+};
+
+static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
+{
+ int port = p->port;
+ union cvmx_mixx_intena mix_intena;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port));
+ mix_intena.s.ithena = enable ? 1 : 0;
+ cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
+{
+ int port = p->port;
+ union cvmx_mixx_intena mix_intena;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port));
+ mix_intena.s.othena = enable ? 1 : 0;
+ cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
+{
+ octeon_mgmt_set_rx_irq(p, 1);
+}
+
+static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
+{
+ octeon_mgmt_set_rx_irq(p, 0);
+}
+
+static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
+{
+ octeon_mgmt_set_tx_irq(p, 1);
+}
+
+static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
+{
+ octeon_mgmt_set_tx_irq(p, 0);
+}
+
+static unsigned int ring_max_fill(unsigned int ring_size)
+{
+ return ring_size - 8;
+}
+
+static unsigned int ring_size_to_bytes(unsigned int ring_size)
+{
+ return ring_size * sizeof(union mgmt_port_ring_entry);
+}
+
+static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+
+ while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
+ unsigned int size;
+ union mgmt_port_ring_entry re;
+ struct sk_buff *skb;
+
+ /* CN56XX pass 1 needs 8 bytes of padding. */
+ size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
+
+ skb = netdev_alloc_skb(netdev, size);
+ if (!skb)
+ break;
+ skb_reserve(skb, NET_IP_ALIGN);
+ __skb_queue_tail(&p->rx_list, skb);
+
+ re.d64 = 0;
+ re.s.len = size;
+ re.s.addr = dma_map_single(p->dev, skb->data,
+ size,
+ DMA_FROM_DEVICE);
+
+ /* Put it in the ring. */
+ p->rx_ring[p->rx_next_fill] = re.d64;
+ dma_sync_single_for_device(p->dev, p->rx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ p->rx_next_fill =
+ (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
+ p->rx_current_fill++;
+ /* Ring the bell. */
+ cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
+ }
+}
+
+static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
+{
+ int port = p->port;
+ union cvmx_mixx_orcnt mix_orcnt;
+ union mgmt_port_ring_entry re;
+ struct sk_buff *skb;
+ int cleaned = 0;
+ unsigned long flags;
+
+ mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
+ while (mix_orcnt.s.orcnt) {
+ spin_lock_irqsave(&p->tx_list.lock, flags);
+
+ mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
+
+ if (mix_orcnt.s.orcnt == 0) {
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+ break;
+ }
+
+ dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+
+ re.d64 = p->tx_ring[p->tx_next_clean];
+ p->tx_next_clean =
+ (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
+ skb = __skb_dequeue(&p->tx_list);
+
+ mix_orcnt.u64 = 0;
+ mix_orcnt.s.orcnt = 1;
+
+ /* Acknowledge to hardware that we have the buffer. */
+ cvmx_write_csr(CVMX_MIXX_ORCNT(port), mix_orcnt.u64);
+ p->tx_current_fill--;
+
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+
+ dma_unmap_single(p->dev, re.s.addr, re.s.len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ cleaned++;
+
+ mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
+ }
+
+ if (cleaned && netif_queue_stopped(p->netdev))
+ netif_wake_queue(p->netdev);
+}
+
+static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
+{
+ struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
+ octeon_mgmt_clean_tx_buffers(p);
+ octeon_mgmt_enable_tx_irq(p);
+}
+
+static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ unsigned long flags;
+ u64 drop, bad;
+
+ /* These reads also clear the count registers. */
+ drop = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port));
+ bad = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port));
+
+ if (drop || bad) {
+ /* Do an atomic update. */
+ spin_lock_irqsave(&p->lock, flags);
+ netdev->stats.rx_errors += bad;
+ netdev->stats.rx_dropped += drop;
+ spin_unlock_irqrestore(&p->lock, flags);
+ }
+}
+
+static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ unsigned long flags;
+
+ union cvmx_agl_gmx_txx_stat0 s0;
+ union cvmx_agl_gmx_txx_stat1 s1;
+
+ /* These reads also clear the count registers. */
+ s0.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT0(port));
+ s1.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT1(port));
+
+ if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
+ /* Do an atomic update. */
+ spin_lock_irqsave(&p->lock, flags);
+ netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
+ netdev->stats.collisions += s1.s.scol + s1.s.mcol;
+ spin_unlock_irqrestore(&p->lock, flags);
+ }
+}
+
+/*
+ * Dequeue a receive skb and its corresponding ring entry. The ring
+ * entry is returned, *pskb is updated to point to the skb.
+ */
+static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
+ struct sk_buff **pskb)
+{
+ union mgmt_port_ring_entry re;
+
+ dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+
+ re.d64 = p->rx_ring[p->rx_next];
+ p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
+ p->rx_current_fill--;
+ *pskb = __skb_dequeue(&p->rx_list);
+
+ dma_unmap_single(p->dev, re.s.addr,
+ ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
+ DMA_FROM_DEVICE);
+
+ return re.d64;
+}
+
+
+static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
+{
+ int port = p->port;
+ struct net_device *netdev = p->netdev;
+ union cvmx_mixx_ircnt mix_ircnt;
+ union mgmt_port_ring_entry re;
+ struct sk_buff *skb;
+ struct sk_buff *skb2;
+ struct sk_buff *skb_new;
+ union mgmt_port_ring_entry re2;
+ int rc = 1;
+
+
+ re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
+ if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
+ /* A good packet, send it up. */
+ skb_put(skb, re.s.len);
+good:
+ skb->protocol = eth_type_trans(skb, netdev);
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += skb->len;
+ netif_receive_skb(skb);
+ rc = 0;
+ } else if (re.s.code == RING_ENTRY_CODE_MORE) {
+ /*
+ * Packet split across skbs. This can happen if we
+ * increase the MTU. Buffers that are already in the
+ * rx ring can then end up being too small. As the rx
+ * ring is refilled, buffers sized for the new MTU
+ * will be used and we should go back to the normal
+ * non-split case.
+ */
+ skb_put(skb, re.s.len);
+ do {
+ re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
+ if (re2.s.code != RING_ENTRY_CODE_MORE
+ && re2.s.code != RING_ENTRY_CODE_DONE)
+ goto split_error;
+ skb_put(skb2, re2.s.len);
+ skb_new = skb_copy_expand(skb, 0, skb2->len,
+ GFP_ATOMIC);
+ if (!skb_new)
+ goto split_error;
+ if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
+ skb2->len))
+ goto split_error;
+ skb_put(skb_new, skb2->len);
+ dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb2);
+ skb = skb_new;
+ } while (re2.s.code == RING_ENTRY_CODE_MORE);
+ goto good;
+ } else {
+ /* Some other error, discard it. */
+ dev_kfree_skb_any(skb);
+ /*
+ * Error statistics are accumulated in
+ * octeon_mgmt_update_rx_stats.
+ */
+ }
+ goto done;
+split_error:
+ /* Discard the whole mess. */
+ dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb2);
+ while (re2.s.code == RING_ENTRY_CODE_MORE) {
+ re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
+ dev_kfree_skb_any(skb2);
+ }
+ netdev->stats.rx_errors++;
+
+done:
+ /* Tell the hardware we processed a packet. */
+ mix_ircnt.u64 = 0;
+ mix_ircnt.s.ircnt = 1;
+ cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64);
+ return rc;
+}
+
+static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
+{
+ int port = p->port;
+ unsigned int work_done = 0;
+ union cvmx_mixx_ircnt mix_ircnt;
+ int rc;
+
+ mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
+ while (work_done < budget && mix_ircnt.s.ircnt) {
+
+ rc = octeon_mgmt_receive_one(p);
+ if (!rc)
+ work_done++;
+
+ /* Check for more packets. */
+ mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
+ }
+
+ octeon_mgmt_rx_fill_ring(p->netdev);
+
+ return work_done;
+}
+
+static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
+ struct net_device *netdev = p->netdev;
+ unsigned int work_done = 0;
+
+ work_done = octeon_mgmt_receive_packets(p, budget);
+
+ if (work_done < budget) {
+ /* We stopped because no more packets were available. */
+ napi_complete(napi);
+ octeon_mgmt_enable_rx_irq(p);
+ }
+ octeon_mgmt_update_rx_stats(netdev);
+
+ return work_done;
+}
+
+/* Reset the hardware to clean state. */
+static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
+{
+ union cvmx_mixx_ctl mix_ctl;
+ union cvmx_mixx_bist mix_bist;
+ union cvmx_agl_gmx_bist agl_gmx_bist;
+
+ mix_ctl.u64 = 0;
+ cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64);
+ do {
+ mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(p->port));
+ } while (mix_ctl.s.busy);
+ mix_ctl.s.reset = 1;
+ cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64);
+ cvmx_read_csr(CVMX_MIXX_CTL(p->port));
+ cvmx_wait(64);
+
+ mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(p->port));
+ if (mix_bist.u64)
+ dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
+ (unsigned long long)mix_bist.u64);
+
+ agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
+ if (agl_gmx_bist.u64)
+ dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
+ (unsigned long long)agl_gmx_bist.u64);
+}
+
+struct octeon_mgmt_cam_state {
+ u64 cam[6];
+ u64 cam_mask;
+ int cam_index;
+};
+
+static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
+ unsigned char *addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i++)
+ cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
+ cs->cam_mask |= (1ULL << cs->cam_index);
+ cs->cam_index++;
+}
+
+static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
+ union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
+ unsigned long flags;
+ unsigned int prev_packet_enable;
+ unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
+ unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
+ struct octeon_mgmt_cam_state cam_state;
+ struct netdev_hw_addr *ha;
+ int available_cam_entries;
+
+ memset(&cam_state, 0, sizeof(cam_state));
+
+ if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
+ cam_mode = 0;
+ available_cam_entries = 8;
+ } else {
+ /*
+ * One CAM entry for the primary address, leaves seven
+ * for the secondary addresses.
+ */
+ available_cam_entries = 7 - netdev->uc.count;
+ }
+
+ if (netdev->flags & IFF_MULTICAST) {
+ if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
+ netdev_mc_count(netdev) > available_cam_entries)
+ multicast_mode = 2; /* 2 - Accept all multicast. */
+ else
+ multicast_mode = 0; /* 0 - Use CAM. */
+ }
+
+ if (cam_mode == 1) {
+ /* Add primary address. */
+ octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
+ netdev_for_each_uc_addr(ha, netdev)
+ octeon_mgmt_cam_state_add(&cam_state, ha->addr);
+ }
+ if (multicast_mode == 0) {
+ netdev_for_each_mc_addr(ha, netdev)
+ octeon_mgmt_cam_state_add(&cam_state, ha->addr);
+ }
+
+ spin_lock_irqsave(&p->lock, flags);
+
+ /* Disable packet I/O. */
+ agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
+ prev_packet_enable = agl_gmx_prtx.s.en;
+ agl_gmx_prtx.s.en = 0;
+ cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
+
+ adr_ctl.u64 = 0;
+ adr_ctl.s.cam_mode = cam_mode;
+ adr_ctl.s.mcst = multicast_mode;
+ adr_ctl.s.bcst = 1; /* Allow broadcast */
+
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64);
+
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask);
+
+ /* Restore packet I/O. */
+ agl_gmx_prtx.s.en = prev_packet_enable;
+ cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
+
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct sockaddr *sa = addr;
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
+
+ octeon_mgmt_set_rx_filtering(netdev);
+
+ return 0;
+}
+
+static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
+
+ /*
+ * Limit the MTU to make sure the ethernet packets are between
+ * 64 bytes and 16383 bytes.
+ */
+ if (size_without_fcs < 64 || size_without_fcs > 16383) {
+ dev_warn(p->dev, "MTU must be between %d and %d.\n",
+ 64 - OCTEON_MGMT_RX_HEADROOM,
+ 16383 - OCTEON_MGMT_RX_HEADROOM);
+ return -EINVAL;
+ }
+
+ netdev->mtu = new_mtu;
+
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port),
+ (size_without_fcs + 7) & 0xfff8);
+
+ return 0;
+}
+
+static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
+{
+ struct net_device *netdev = dev_id;
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ union cvmx_mixx_isr mixx_isr;
+
+ mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port));
+
+ /* Clear any pending interrupts */
+ cvmx_write_csr(CVMX_MIXX_ISR(port), mixx_isr.u64);
+ cvmx_read_csr(CVMX_MIXX_ISR(port));
+
+ if (mixx_isr.s.irthresh) {
+ octeon_mgmt_disable_rx_irq(p);
+ napi_schedule(&p->napi);
+ }
+ if (mixx_isr.s.orthresh) {
+ octeon_mgmt_disable_tx_irq(p);
+ tasklet_schedule(&p->tx_clean_tasklet);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int octeon_mgmt_ioctl(struct net_device *netdev,
+ struct ifreq *rq, int cmd)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ if (!p->phydev)
+ return -EINVAL;
+
+ return phy_mii_ioctl(p->phydev, rq, cmd);
+}
+
+static void octeon_mgmt_adjust_link(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ union cvmx_agl_gmx_prtx_cfg prtx_cfg;
+ unsigned long flags;
+ int link_changed = 0;
+
+ spin_lock_irqsave(&p->lock, flags);
+ if (p->phydev->link) {
+ if (!p->last_link)
+ link_changed = 1;
+ if (p->last_duplex != p->phydev->duplex) {
+ p->last_duplex = p->phydev->duplex;
+ prtx_cfg.u64 =
+ cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
+ prtx_cfg.s.duplex = p->phydev->duplex;
+ cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port),
+ prtx_cfg.u64);
+ }
+ } else {
+ if (p->last_link)
+ link_changed = -1;
+ }
+ p->last_link = p->phydev->link;
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ if (link_changed != 0) {
+ if (link_changed > 0) {
+ netif_carrier_on(netdev);
+ pr_info("%s: Link is up - %d/%s\n", netdev->name,
+ p->phydev->speed,
+ DUPLEX_FULL == p->phydev->duplex ?
+ "Full" : "Half");
+ } else {
+ netif_carrier_off(netdev);
+ pr_info("%s: Link is down\n", netdev->name);
+ }
+ }
+}
+
+static int octeon_mgmt_init_phy(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ char phy_id[20];
+
+ if (octeon_is_simulation()) {
+ /* No PHYs in the simulator. */
+ netif_carrier_on(netdev);
+ return 0;
+ }
+
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", p->port);
+
+ p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+
+ if (IS_ERR(p->phydev)) {
+ p->phydev = NULL;
+ return -1;
+ }
+
+ phy_start_aneg(p->phydev);
+
+ return 0;
+}
+
+static int octeon_mgmt_open(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ union cvmx_mixx_ctl mix_ctl;
+ union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
+ union cvmx_mixx_oring1 oring1;
+ union cvmx_mixx_iring1 iring1;
+ union cvmx_agl_gmx_prtx_cfg prtx_cfg;
+ union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
+ union cvmx_mixx_irhwm mix_irhwm;
+ union cvmx_mixx_orhwm mix_orhwm;
+ union cvmx_mixx_intena mix_intena;
+ struct sockaddr sa;
+
+ /* Allocate ring buffers. */
+ p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ GFP_KERNEL);
+ if (!p->tx_ring)
+ return -ENOMEM;
+ p->tx_ring_handle =
+ dma_map_single(p->dev, p->tx_ring,
+ ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ p->tx_next = 0;
+ p->tx_next_clean = 0;
+ p->tx_current_fill = 0;
+
+
+ p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ GFP_KERNEL);
+ if (!p->rx_ring)
+ goto err_nomem;
+ p->rx_ring_handle =
+ dma_map_single(p->dev, p->rx_ring,
+ ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+
+ p->rx_next = 0;
+ p->rx_next_fill = 0;
+ p->rx_current_fill = 0;
+
+ octeon_mgmt_reset_hw(p);
+
+ mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
+
+ /* Bring it out of reset if needed. */
+ if (mix_ctl.s.reset) {
+ mix_ctl.s.reset = 0;
+ cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
+ do {
+ mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
+ } while (mix_ctl.s.reset);
+ }
+
+ agl_gmx_inf_mode.u64 = 0;
+ agl_gmx_inf_mode.s.en = 1;
+ cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
+
+ oring1.u64 = 0;
+ oring1.s.obase = p->tx_ring_handle >> 3;
+ oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
+ cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64);
+
+ iring1.u64 = 0;
+ iring1.s.ibase = p->rx_ring_handle >> 3;
+ iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
+ cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64);
+
+ /* Disable packet I/O. */
+ prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
+ prtx_cfg.s.en = 0;
+ cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);
+
+ memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
+ octeon_mgmt_set_mac_address(netdev, &sa);
+
+ octeon_mgmt_change_mtu(netdev, netdev->mtu);
+
+ /*
+ * Enable the port HW. Packets are not allowed until
+ * cvmx_mgmt_port_enable() is called.
+ */
+ mix_ctl.u64 = 0;
+ mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
+ mix_ctl.s.en = 1; /* Enable the port */
+ mix_ctl.s.nbtarb = 0; /* Arbitration mode */
+ /* MII CB-request FIFO programmable high watermark */
+ mix_ctl.s.mrq_hwm = 1;
+ cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
+ /*
+ * Force compensation values, as they are not
+ * determined properly by HW
+ */
+ union cvmx_agl_gmx_drv_ctl drv_ctl;
+
+ drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
+ if (port) {
+ drv_ctl.s.byp_en1 = 1;
+ drv_ctl.s.nctl1 = 6;
+ drv_ctl.s.pctl1 = 6;
+ } else {
+ drv_ctl.s.byp_en = 1;
+ drv_ctl.s.nctl = 6;
+ drv_ctl.s.pctl = 6;
+ }
+ cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
+ }
+
+ octeon_mgmt_rx_fill_ring(netdev);
+
+ /* Clear statistics. */
+ /* Clear on read. */
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0);
+
+ cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1);
+ cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0);
+ cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0);
+
+ /* Clear any pending interrupts */
+ cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port)));
+
+ if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
+ netdev)) {
+ dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
+ goto err_noirq;
+ }
+
+ /* Interrupt every single RX packet */
+ mix_irhwm.u64 = 0;
+ mix_irhwm.s.irhwm = 0;
+ cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64);
+
+ /* Interrupt when we have 1 or more packets to clean. */
+ mix_orhwm.u64 = 0;
+ mix_orhwm.s.orhwm = 1;
+ cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64);
+
+ /* Enable receive and transmit interrupts */
+ mix_intena.u64 = 0;
+ mix_intena.s.ithena = 1;
+ mix_intena.s.othena = 1;
+ cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
+
+
+ /* Enable packet I/O. */
+
+ rxx_frm_ctl.u64 = 0;
+ rxx_frm_ctl.s.pre_align = 1;
+ /*
+ * When set, disables the length check for non-min sized pkts
+ * with padding in the client data.
+ */
+ rxx_frm_ctl.s.pad_len = 1;
+ /* When set, disables the length check for VLAN pkts */
+ rxx_frm_ctl.s.vlan_len = 1;
+ /* When set, PREAMBLE checking is less strict */
+ rxx_frm_ctl.s.pre_free = 1;
+ /* Control Pause Frames can match station SMAC */
+ rxx_frm_ctl.s.ctl_smac = 0;
+ /* Control Pause Frames can match globally assign Multicast address */
+ rxx_frm_ctl.s.ctl_mcst = 1;
+ /* Forward pause information to TX block */
+ rxx_frm_ctl.s.ctl_bck = 1;
+ /* Drop Control Pause Frames */
+ rxx_frm_ctl.s.ctl_drp = 1;
+ /* Strip off the preamble */
+ rxx_frm_ctl.s.pre_strp = 1;
+ /*
+ * This port is configured to send PREAMBLE+SFD to begin every
+ * frame. GMX checks that the PREAMBLE is sent correctly.
+ */
+ rxx_frm_ctl.s.pre_chk = 1;
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);
+
+ /* Enable the AGL block */
+ agl_gmx_inf_mode.u64 = 0;
+ agl_gmx_inf_mode.s.en = 1;
+ cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
+
+ /* Configure the port duplex and enables */
+ prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
+ prtx_cfg.s.tx_en = 1;
+ prtx_cfg.s.rx_en = 1;
+ prtx_cfg.s.en = 1;
+ p->last_duplex = 1;
+ prtx_cfg.s.duplex = p->last_duplex;
+ cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);
+
+ p->last_link = 0;
+ netif_carrier_off(netdev);
+
+ if (octeon_mgmt_init_phy(netdev)) {
+ dev_err(p->dev, "Cannot initialize PHY.\n");
+ goto err_noirq;
+ }
+
+ netif_wake_queue(netdev);
+ napi_enable(&p->napi);
+
+ return 0;
+err_noirq:
+ octeon_mgmt_reset_hw(p);
+ dma_unmap_single(p->dev, p->rx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ kfree(p->rx_ring);
+err_nomem:
+ dma_unmap_single(p->dev, p->tx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ kfree(p->tx_ring);
+ return -ENOMEM;
+}
+
+static int octeon_mgmt_stop(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ napi_disable(&p->napi);
+ netif_stop_queue(netdev);
+
+ if (p->phydev)
+ phy_disconnect(p->phydev);
+
+ netif_carrier_off(netdev);
+
+ octeon_mgmt_reset_hw(p);
+
+ free_irq(p->irq, netdev);
+
+ /* dma_unmap is a nop on Octeon, so just free everything. */
+ skb_queue_purge(&p->tx_list);
+ skb_queue_purge(&p->rx_list);
+
+ dma_unmap_single(p->dev, p->rx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ kfree(p->rx_ring);
+
+ dma_unmap_single(p->dev, p->tx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ kfree(p->tx_ring);
+
+ return 0;
+}
+
+static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ union mgmt_port_ring_entry re;
+ unsigned long flags;
+ int rv = NETDEV_TX_BUSY;
+
+ re.d64 = 0;
+ re.s.len = skb->len;
+ re.s.addr = dma_map_single(p->dev, skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
+
+ spin_lock_irqsave(&p->tx_list.lock, flags);
+
+ if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+ netif_stop_queue(netdev);
+ spin_lock_irqsave(&p->tx_list.lock, flags);
+ }
+
+ if (unlikely(p->tx_current_fill >=
+ ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+ dma_unmap_single(p->dev, re.s.addr, re.s.len,
+ DMA_TO_DEVICE);
+ goto out;
+ }
+
+ __skb_queue_tail(&p->tx_list, skb);
+
+ /* Put it in the ring. */
+ p->tx_ring[p->tx_next] = re.d64;
+ p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
+ p->tx_current_fill++;
+
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+
+ dma_sync_single_for_device(p->dev, p->tx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+
+ /* Ring the bell. */
+ cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
+
+ rv = NETDEV_TX_OK;
+out:
+ octeon_mgmt_update_tx_stats(netdev);
+ return rv;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void octeon_mgmt_poll_controller(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ octeon_mgmt_receive_packets(p, 16);
+ octeon_mgmt_update_rx_stats(netdev);
+}
+#endif
+
+static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strncpy(info->version, DRV_VERSION, sizeof(info->version));
+ strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strncpy(info->bus_info, "N/A", sizeof(info->bus_info));
+ info->n_stats = 0;
+ info->testinfo_len = 0;
+ info->regdump_len = 0;
+ info->eedump_len = 0;
+}
+
+static int octeon_mgmt_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ if (p->phydev)
+ return phy_ethtool_gset(p->phydev, cmd);
+
+ return -EINVAL;
+}
+
+static int octeon_mgmt_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (p->phydev)
+ return phy_ethtool_sset(p->phydev, cmd);
+
+ return -EINVAL;
+}
+
+static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
+ .get_drvinfo = octeon_mgmt_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_settings = octeon_mgmt_get_settings,
+ .set_settings = octeon_mgmt_set_settings
+};
+
+static const struct net_device_ops octeon_mgmt_ops = {
+ .ndo_open = octeon_mgmt_open,
+ .ndo_stop = octeon_mgmt_stop,
+ .ndo_start_xmit = octeon_mgmt_xmit,
+ .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
+ .ndo_set_mac_address = octeon_mgmt_set_mac_address,
+ .ndo_do_ioctl = octeon_mgmt_ioctl,
+ .ndo_change_mtu = octeon_mgmt_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = octeon_mgmt_poll_controller,
+#endif
+};
+
+static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
+{
+ struct resource *res_irq;
+ struct net_device *netdev;
+ struct octeon_mgmt *p;
+ int i;
+
+ netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
+ if (netdev == NULL)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, netdev);
+ p = netdev_priv(netdev);
+ netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
+ OCTEON_MGMT_NAPI_WEIGHT);
+
+ p->netdev = netdev;
+ p->dev = &pdev->dev;
+
+ p->port = pdev->id;
+ snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq)
+ goto err;
+
+ p->irq = res_irq->start;
+ spin_lock_init(&p->lock);
+
+ skb_queue_head_init(&p->tx_list);
+ skb_queue_head_init(&p->rx_list);
+ tasklet_init(&p->tx_clean_tasklet,
+ octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ netdev->netdev_ops = &octeon_mgmt_ops;
+ netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
+
+ /* The mgmt ports get the first N MACs. */
+ for (i = 0; i < 6; i++)
+ netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i];
+ netdev->dev_addr[5] += p->port;
+
+ if (p->port >= octeon_bootinfo->mac_addr_count)
+ dev_err(&pdev->dev,
+ "Error %s: Using MAC outside of the assigned range: %pM\n",
+ netdev->name, netdev->dev_addr);
+
+ if (register_netdev(netdev))
+ goto err;
+
+ dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
+ return 0;
+err:
+ free_netdev(netdev);
+ return -ENOENT;
+}
+
+static int __devexit octeon_mgmt_remove(struct platform_device *pdev)
+{
+ struct net_device *netdev = dev_get_drvdata(&pdev->dev);
+
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+ return 0;
+}
+
+static struct platform_driver octeon_mgmt_driver = {
+ .driver = {
+ .name = "octeon_mgmt",
+ .owner = THIS_MODULE,
+ },
+ .probe = octeon_mgmt_probe,
+ .remove = __devexit_p(octeon_mgmt_remove),
+};
+
+extern void octeon_mdiobus_force_mod_depencency(void);
+
+static int __init octeon_mgmt_mod_init(void)
+{
+ /* Force our mdiobus driver module to be loaded first. */
+ octeon_mdiobus_force_mod_depencency();
+ return platform_driver_register(&octeon_mgmt_driver);
+}
+
+static void __exit octeon_mgmt_mod_exit(void)
+{
+ platform_driver_unregister(&octeon_mgmt_driver);
+}
+
+module_init(octeon_mgmt_mod_init);
+module_exit(octeon_mgmt_mod_exit);
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR("David Daney");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/oki-semi/Kconfig b/drivers/net/ethernet/oki-semi/Kconfig
new file mode 100644
index 000000000000..ecd45f9ea9d9
--- /dev/null
+++ b/drivers/net/ethernet/oki-semi/Kconfig
@@ -0,0 +1,23 @@
+#
+# OKI Semiconductor device configuration
+#
+
+config NET_VENDOR_OKI
+ bool "OKI Semiconductor devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about OKI Semiconductor cards. If you say Y, you will
+ be asked for your specific card in the following questions.
+
+if NET_VENDOR_OKI
+
+source "drivers/net/ethernet/oki-semi/pch_gbe/Kconfig"
+
+endif # NET_VENDOR_OKI
diff --git a/drivers/net/ethernet/oki-semi/Makefile b/drivers/net/ethernet/oki-semi/Makefile
new file mode 100644
index 000000000000..b6780c877c19
--- /dev/null
+++ b/drivers/net/ethernet/oki-semi/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the OKI Semiconductor device drivers.
+#
+
+obj-$(CONFIG_PCH_GBE) += pch_gbe/
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
new file mode 100644
index 000000000000..00bc4fc968c7
--- /dev/null
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -0,0 +1,22 @@
+#
+# OKI Semiconductor device configuration
+#
+
+config PCH_GBE
+ tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
+ depends on PCI
+ select NET_CORE
+ select MII
+ ---help---
+ This is a gigabit ethernet driver for EG20T PCH.
+ EG20T PCH is the platform controller hub that is used in Intel's
+ general embedded platform. EG20T PCH has Gigabit Ethernet interface.
+ Using this interface, it is able to access system devices connected
+ to Gigabit Ethernet. This driver enables Gigabit Ethernet function.
+
+ This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
+ Output Hub), ML7223/ML7831.
+ ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general
+ purpose use.
+ ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7223/ML7831 is completely compatible for Intel EG20T PCH.
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Makefile b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile
new file mode 100644
index 000000000000..31288d4ad248
--- /dev/null
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_PCH_GBE) += pch_gbe.o
+
+pch_gbe-y := pch_gbe_phy.o pch_gbe_ethtool.o pch_gbe_param.o
+pch_gbe-y += pch_gbe_api.o pch_gbe_main.o
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
new file mode 100644
index 000000000000..a09a07197eb5
--- /dev/null
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -0,0 +1,663 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This code was derived from the Intel e1000e Linux driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _PCH_GBE_H_
+#define _PCH_GBE_H_
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+
+/**
+ * pch_gbe_regs_mac_adr - Structure holding values of mac address registers
+ * @high Denotes the 1st to 4th byte from the initial of MAC address
+ * @low Denotes the 5th to 6th byte from the initial of MAC address
+ */
+struct pch_gbe_regs_mac_adr {
+ u32 high;
+ u32 low;
+};
+/**
+ * pch_udc_regs - Structure holding values of MAC registers
+ */
+struct pch_gbe_regs {
+ u32 INT_ST;
+ u32 INT_EN;
+ u32 MODE;
+ u32 RESET;
+ u32 TCPIP_ACC;
+ u32 EX_LIST;
+ u32 INT_ST_HOLD;
+ u32 PHY_INT_CTRL;
+ u32 MAC_RX_EN;
+ u32 RX_FCTRL;
+ u32 PAUSE_REQ;
+ u32 RX_MODE;
+ u32 TX_MODE;
+ u32 RX_FIFO_ST;
+ u32 TX_FIFO_ST;
+ u32 TX_FID;
+ u32 TX_RESULT;
+ u32 PAUSE_PKT1;
+ u32 PAUSE_PKT2;
+ u32 PAUSE_PKT3;
+ u32 PAUSE_PKT4;
+ u32 PAUSE_PKT5;
+ u32 reserve[2];
+ struct pch_gbe_regs_mac_adr mac_adr[16];
+ u32 ADDR_MASK;
+ u32 MIIM;
+ u32 MAC_ADDR_LOAD;
+ u32 RGMII_ST;
+ u32 RGMII_CTRL;
+ u32 reserve3[3];
+ u32 DMA_CTRL;
+ u32 reserve4[3];
+ u32 RX_DSC_BASE;
+ u32 RX_DSC_SIZE;
+ u32 RX_DSC_HW_P;
+ u32 RX_DSC_HW_P_HLD;
+ u32 RX_DSC_SW_P;
+ u32 reserve5[3];
+ u32 TX_DSC_BASE;
+ u32 TX_DSC_SIZE;
+ u32 TX_DSC_HW_P;
+ u32 TX_DSC_HW_P_HLD;
+ u32 TX_DSC_SW_P;
+ u32 reserve6[3];
+ u32 RX_DMA_ST;
+ u32 TX_DMA_ST;
+ u32 reserve7[2];
+ u32 WOL_ST;
+ u32 WOL_CTRL;
+ u32 WOL_ADDR_MASK;
+};
+
+/* Interrupt Status */
+/* Interrupt Status Hold */
+/* Interrupt Enable */
+#define PCH_GBE_INT_RX_DMA_CMPLT 0x00000001 /* Receive DMA Transfer Complete */
+#define PCH_GBE_INT_RX_VALID 0x00000002 /* MAC Normal Receive Complete */
+#define PCH_GBE_INT_RX_FRAME_ERR 0x00000004 /* Receive frame error */
+#define PCH_GBE_INT_RX_FIFO_ERR 0x00000008 /* Receive FIFO Overflow */
+#define PCH_GBE_INT_RX_DMA_ERR 0x00000010 /* Receive DMA Transfer Error */
+#define PCH_GBE_INT_RX_DSC_EMP 0x00000020 /* Receive Descriptor Empty */
+#define PCH_GBE_INT_TX_CMPLT 0x00000100 /* MAC Transmission Complete */
+#define PCH_GBE_INT_TX_DMA_CMPLT 0x00000200 /* DMA Transfer Complete */
+#define PCH_GBE_INT_TX_FIFO_ERR 0x00000400 /* Transmission FIFO underflow. */
+#define PCH_GBE_INT_TX_DMA_ERR 0x00000800 /* Transmission DMA Error */
+#define PCH_GBE_INT_PAUSE_CMPLT 0x00001000 /* Pause Transmission complete */
+#define PCH_GBE_INT_MIIM_CMPLT 0x00010000 /* MIIM I/F Read completion */
+#define PCH_GBE_INT_PHY_INT 0x00100000 /* Interruption from PHY */
+#define PCH_GBE_INT_WOL_DET 0x01000000 /* Wake On LAN Event detection. */
+#define PCH_GBE_INT_TCPIP_ERR 0x10000000 /* TCP/IP Accelerator Error */
+
+/* Mode */
+#define PCH_GBE_MODE_MII_ETHER 0x00000000 /* GIGA Ethernet Mode [MII] */
+#define PCH_GBE_MODE_GMII_ETHER 0x80000000 /* GIGA Ethernet Mode [GMII] */
+#define PCH_GBE_MODE_HALF_DUPLEX 0x00000000 /* Duplex Mode [half duplex] */
+#define PCH_GBE_MODE_FULL_DUPLEX 0x40000000 /* Duplex Mode [full duplex] */
+#define PCH_GBE_MODE_FR_BST 0x04000000 /* Frame bursting is done */
+
+/* Reset */
+#define PCH_GBE_ALL_RST 0x80000000 /* All reset */
+#define PCH_GBE_TX_RST 0x00008000 /* TX MAC, TX FIFO, TX DMA reset */
+#define PCH_GBE_RX_RST 0x00004000 /* RX MAC, RX FIFO, RX DMA reset */
+
+/* TCP/IP Accelerator Control */
+#define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */
+#define PCH_GBE_RX_TCPIPACC_OFF 0x00000004 /* RX TCP/IP ACC Disabled */
+#define PCH_GBE_TX_TCPIPACC_EN 0x00000002 /* TX TCP/IP ACC Enable */
+#define PCH_GBE_RX_TCPIPACC_EN 0x00000001 /* RX TCP/IP ACC Enable */
+
+/* MAC RX Enable */
+#define PCH_GBE_MRE_MAC_RX_EN 0x00000001 /* MAC Receive Enable */
+
+/* RX Flow Control */
+#define PCH_GBE_FL_CTRL_EN 0x80000000 /* Pause packet is enabled */
+
+/* Pause Packet Request */
+#define PCH_GBE_PS_PKT_RQ 0x80000000 /* Pause packet Request */
+
+/* RX Mode */
+#define PCH_GBE_ADD_FIL_EN 0x80000000 /* Address Filtering Enable */
+/* Multicast Filtering Enable */
+#define PCH_GBE_MLT_FIL_EN 0x40000000
+/* Receive Almost Empty Threshold */
+#define PCH_GBE_RH_ALM_EMP_4 0x00000000 /* 4 words */
+#define PCH_GBE_RH_ALM_EMP_8 0x00004000 /* 8 words */
+#define PCH_GBE_RH_ALM_EMP_16 0x00008000 /* 16 words */
+#define PCH_GBE_RH_ALM_EMP_32 0x0000C000 /* 32 words */
+/* Receive Almost Full Threshold */
+#define PCH_GBE_RH_ALM_FULL_4 0x00000000 /* 4 words */
+#define PCH_GBE_RH_ALM_FULL_8 0x00001000 /* 8 words */
+#define PCH_GBE_RH_ALM_FULL_16 0x00002000 /* 16 words */
+#define PCH_GBE_RH_ALM_FULL_32 0x00003000 /* 32 words */
+/* RX FIFO Read Triger Threshold */
+#define PCH_GBE_RH_RD_TRG_4 0x00000000 /* 4 words */
+#define PCH_GBE_RH_RD_TRG_8 0x00000200 /* 8 words */
+#define PCH_GBE_RH_RD_TRG_16 0x00000400 /* 16 words */
+#define PCH_GBE_RH_RD_TRG_32 0x00000600 /* 32 words */
+#define PCH_GBE_RH_RD_TRG_64 0x00000800 /* 64 words */
+#define PCH_GBE_RH_RD_TRG_128 0x00000A00 /* 128 words */
+#define PCH_GBE_RH_RD_TRG_256 0x00000C00 /* 256 words */
+#define PCH_GBE_RH_RD_TRG_512 0x00000E00 /* 512 words */
+
+/* Receive Descriptor bit definitions */
+#define PCH_GBE_RXD_ACC_STAT_BCAST 0x00000400
+#define PCH_GBE_RXD_ACC_STAT_MCAST 0x00000200
+#define PCH_GBE_RXD_ACC_STAT_UCAST 0x00000100
+#define PCH_GBE_RXD_ACC_STAT_TCPIPOK 0x000000C0
+#define PCH_GBE_RXD_ACC_STAT_IPOK 0x00000080
+#define PCH_GBE_RXD_ACC_STAT_TCPOK 0x00000040
+#define PCH_GBE_RXD_ACC_STAT_IP6ERR 0x00000020
+#define PCH_GBE_RXD_ACC_STAT_OFLIST 0x00000010
+#define PCH_GBE_RXD_ACC_STAT_TYPEIP 0x00000008
+#define PCH_GBE_RXD_ACC_STAT_MACL 0x00000004
+#define PCH_GBE_RXD_ACC_STAT_PPPOE 0x00000002
+#define PCH_GBE_RXD_ACC_STAT_VTAGT 0x00000001
+#define PCH_GBE_RXD_GMAC_STAT_PAUSE 0x0200
+#define PCH_GBE_RXD_GMAC_STAT_MARBR 0x0100
+#define PCH_GBE_RXD_GMAC_STAT_MARMLT 0x0080
+#define PCH_GBE_RXD_GMAC_STAT_MARIND 0x0040
+#define PCH_GBE_RXD_GMAC_STAT_MARNOTMT 0x0020
+#define PCH_GBE_RXD_GMAC_STAT_TLONG 0x0010
+#define PCH_GBE_RXD_GMAC_STAT_TSHRT 0x0008
+#define PCH_GBE_RXD_GMAC_STAT_NOTOCTAL 0x0004
+#define PCH_GBE_RXD_GMAC_STAT_NBLERR 0x0002
+#define PCH_GBE_RXD_GMAC_STAT_CRCERR 0x0001
+
+/* Transmit Descriptor bit definitions */
+#define PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF 0x0008
+#define PCH_GBE_TXD_CTRL_ITAG 0x0004
+#define PCH_GBE_TXD_CTRL_ICRC 0x0002
+#define PCH_GBE_TXD_CTRL_APAD 0x0001
+#define PCH_GBE_TXD_WORDS_SHIFT 2
+#define PCH_GBE_TXD_GMAC_STAT_CMPLT 0x2000
+#define PCH_GBE_TXD_GMAC_STAT_ABT 0x1000
+#define PCH_GBE_TXD_GMAC_STAT_EXCOL 0x0800
+#define PCH_GBE_TXD_GMAC_STAT_SNGCOL 0x0400
+#define PCH_GBE_TXD_GMAC_STAT_MLTCOL 0x0200
+#define PCH_GBE_TXD_GMAC_STAT_CRSER 0x0100
+#define PCH_GBE_TXD_GMAC_STAT_TLNG 0x0080
+#define PCH_GBE_TXD_GMAC_STAT_TSHRT 0x0040
+#define PCH_GBE_TXD_GMAC_STAT_LTCOL 0x0020
+#define PCH_GBE_TXD_GMAC_STAT_TFUNDFLW 0x0010
+#define PCH_GBE_TXD_GMAC_STAT_RTYCNT_MASK 0x000F
+
+/* TX Mode */
+#define PCH_GBE_TM_NO_RTRY 0x80000000 /* No Retransmission */
+#define PCH_GBE_TM_LONG_PKT 0x40000000 /* Long Packt TX Enable */
+#define PCH_GBE_TM_ST_AND_FD 0x20000000 /* Stare and Forward */
+#define PCH_GBE_TM_SHORT_PKT 0x10000000 /* Short Packet TX Enable */
+#define PCH_GBE_TM_LTCOL_RETX 0x08000000 /* Retransmission at Late Collision */
+/* Frame Start Threshold */
+#define PCH_GBE_TM_TH_TX_STRT_4 0x00000000 /* 4 words */
+#define PCH_GBE_TM_TH_TX_STRT_8 0x00004000 /* 8 words */
+#define PCH_GBE_TM_TH_TX_STRT_16 0x00008000 /* 16 words */
+#define PCH_GBE_TM_TH_TX_STRT_32 0x0000C000 /* 32 words */
+/* Transmit Almost Empty Threshold */
+#define PCH_GBE_TM_TH_ALM_EMP_4 0x00000000 /* 4 words */
+#define PCH_GBE_TM_TH_ALM_EMP_8 0x00000800 /* 8 words */
+#define PCH_GBE_TM_TH_ALM_EMP_16 0x00001000 /* 16 words */
+#define PCH_GBE_TM_TH_ALM_EMP_32 0x00001800 /* 32 words */
+#define PCH_GBE_TM_TH_ALM_EMP_64 0x00002000 /* 64 words */
+#define PCH_GBE_TM_TH_ALM_EMP_128 0x00002800 /* 128 words */
+#define PCH_GBE_TM_TH_ALM_EMP_256 0x00003000 /* 256 words */
+#define PCH_GBE_TM_TH_ALM_EMP_512 0x00003800 /* 512 words */
+/* Transmit Almost Full Threshold */
+#define PCH_GBE_TM_TH_ALM_FULL_4 0x00000000 /* 4 words */
+#define PCH_GBE_TM_TH_ALM_FULL_8 0x00000200 /* 8 words */
+#define PCH_GBE_TM_TH_ALM_FULL_16 0x00000400 /* 16 words */
+#define PCH_GBE_TM_TH_ALM_FULL_32 0x00000600 /* 32 words */
+
+/* RX FIFO Status */
+#define PCH_GBE_RF_ALM_FULL 0x80000000 /* RX FIFO is almost full. */
+#define PCH_GBE_RF_ALM_EMP 0x40000000 /* RX FIFO is almost empty. */
+#define PCH_GBE_RF_RD_TRG 0x20000000 /* Become more than RH_RD_TRG. */
+#define PCH_GBE_RF_STRWD 0x1FFE0000 /* The word count of RX FIFO. */
+#define PCH_GBE_RF_RCVING 0x00010000 /* Stored in RX FIFO. */
+
+/* MAC Address Mask */
+#define PCH_GBE_BUSY 0x80000000
+
+/* MIIM */
+#define PCH_GBE_MIIM_OPER_WRITE 0x04000000
+#define PCH_GBE_MIIM_OPER_READ 0x00000000
+#define PCH_GBE_MIIM_OPER_READY 0x04000000
+#define PCH_GBE_MIIM_PHY_ADDR_SHIFT 21
+#define PCH_GBE_MIIM_REG_ADDR_SHIFT 16
+
+/* RGMII Status */
+#define PCH_GBE_LINK_UP 0x80000008
+#define PCH_GBE_RXC_SPEED_MSK 0x00000006
+#define PCH_GBE_RXC_SPEED_2_5M 0x00000000 /* 2.5MHz */
+#define PCH_GBE_RXC_SPEED_25M 0x00000002 /* 25MHz */
+#define PCH_GBE_RXC_SPEED_125M 0x00000004 /* 100MHz */
+#define PCH_GBE_DUPLEX_FULL 0x00000001
+
+/* RGMII Control */
+#define PCH_GBE_CRS_SEL 0x00000010
+#define PCH_GBE_RGMII_RATE_125M 0x00000000
+#define PCH_GBE_RGMII_RATE_25M 0x00000008
+#define PCH_GBE_RGMII_RATE_2_5M 0x0000000C
+#define PCH_GBE_RGMII_MODE_GMII 0x00000000
+#define PCH_GBE_RGMII_MODE_RGMII 0x00000002
+#define PCH_GBE_CHIP_TYPE_EXTERNAL 0x00000000
+#define PCH_GBE_CHIP_TYPE_INTERNAL 0x00000001
+
+/* DMA Control */
+#define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */
+#define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */
+
+/* RX DMA STATUS */
+#define PCH_GBE_IDLE_CHECK 0xFFFFFFFE
+
+/* Wake On LAN Status */
+#define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */
+#define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */
+
+/* The Frame registered in Address Recognizer */
+#define PCH_GBE_WLS_IND 0x00000002
+#define PCH_GBE_WLS_MP 0x00000001 /* Magic packet Address */
+
+/* Wake On LAN Control */
+#define PCH_GBE_WLC_WOL_MODE 0x00010000
+#define PCH_GBE_WLC_IGN_TLONG 0x00000100
+#define PCH_GBE_WLC_IGN_TSHRT 0x00000080
+#define PCH_GBE_WLC_IGN_OCTER 0x00000040
+#define PCH_GBE_WLC_IGN_NBLER 0x00000020
+#define PCH_GBE_WLC_IGN_CRCER 0x00000010
+#define PCH_GBE_WLC_BR 0x00000008
+#define PCH_GBE_WLC_MLT 0x00000004
+#define PCH_GBE_WLC_IND 0x00000002
+#define PCH_GBE_WLC_MP 0x00000001
+
+/* Wake On LAN Address Mask */
+#define PCH_GBE_WLA_BUSY 0x80000000
+
+
+
+/* TX/RX descriptor defines */
+#define PCH_GBE_MAX_TXD 4096
+#define PCH_GBE_DEFAULT_TXD 256
+#define PCH_GBE_MIN_TXD 8
+#define PCH_GBE_MAX_RXD 4096
+#define PCH_GBE_DEFAULT_RXD 256
+#define PCH_GBE_MIN_RXD 8
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define PCH_GBE_TX_DESC_MULTIPLE 8
+#define PCH_GBE_RX_DESC_MULTIPLE 8
+
+/* Read/Write operation is done through MII Management IF */
+#define PCH_GBE_HAL_MIIM_READ ((u32)0x00000000)
+#define PCH_GBE_HAL_MIIM_WRITE ((u32)0x04000000)
+
+/* flow control values */
+#define PCH_GBE_FC_NONE 0
+#define PCH_GBE_FC_RX_PAUSE 1
+#define PCH_GBE_FC_TX_PAUSE 2
+#define PCH_GBE_FC_FULL 3
+#define PCH_GBE_FC_DEFAULT PCH_GBE_FC_FULL
+
+
+struct pch_gbe_hw;
+/**
+ * struct pch_gbe_functions - HAL APi function pointer
+ * @get_bus_info: for pch_gbe_hal_get_bus_info
+ * @init_hw: for pch_gbe_hal_init_hw
+ * @read_phy_reg: for pch_gbe_hal_read_phy_reg
+ * @write_phy_reg: for pch_gbe_hal_write_phy_reg
+ * @reset_phy: for pch_gbe_hal_phy_hw_reset
+ * @sw_reset_phy: for pch_gbe_hal_phy_sw_reset
+ * @power_up_phy: for pch_gbe_hal_power_up_phy
+ * @power_down_phy: for pch_gbe_hal_power_down_phy
+ * @read_mac_addr: for pch_gbe_hal_read_mac_addr
+ */
+struct pch_gbe_functions {
+ void (*get_bus_info) (struct pch_gbe_hw *);
+ s32 (*init_hw) (struct pch_gbe_hw *);
+ s32 (*read_phy_reg) (struct pch_gbe_hw *, u32, u16 *);
+ s32 (*write_phy_reg) (struct pch_gbe_hw *, u32, u16);
+ void (*reset_phy) (struct pch_gbe_hw *);
+ void (*sw_reset_phy) (struct pch_gbe_hw *);
+ void (*power_up_phy) (struct pch_gbe_hw *hw);
+ void (*power_down_phy) (struct pch_gbe_hw *hw);
+ s32 (*read_mac_addr) (struct pch_gbe_hw *);
+};
+
+/**
+ * struct pch_gbe_mac_info - MAC information
+ * @addr[6]: Store the MAC address
+ * @fc: Mode of flow control
+ * @fc_autoneg: Auto negotiation enable for flow control setting
+ * @tx_fc_enable: Enable flag of Transmit flow control
+ * @max_frame_size: Max transmit frame size
+ * @min_frame_size: Min transmit frame size
+ * @autoneg: Auto negotiation enable
+ * @link_speed: Link speed
+ * @link_duplex: Link duplex
+ */
+struct pch_gbe_mac_info {
+ u8 addr[6];
+ u8 fc;
+ u8 fc_autoneg;
+ u8 tx_fc_enable;
+ u32 max_frame_size;
+ u32 min_frame_size;
+ u8 autoneg;
+ u16 link_speed;
+ u16 link_duplex;
+};
+
+/**
+ * struct pch_gbe_phy_info - PHY information
+ * @addr: PHY address
+ * @id: PHY's identifier
+ * @revision: PHY's revision
+ * @reset_delay_us: HW reset delay time[us]
+ * @autoneg_advertised: Autoneg advertised
+ */
+struct pch_gbe_phy_info {
+ u32 addr;
+ u32 id;
+ u32 revision;
+ u32 reset_delay_us;
+ u16 autoneg_advertised;
+};
+
+/*!
+ * @ingroup Gigabit Ether driver Layer
+ * @struct pch_gbe_bus_info
+ * @brief Bus information
+ */
+struct pch_gbe_bus_info {
+ u8 type;
+ u8 speed;
+ u8 width;
+};
+
+/*!
+ * @ingroup Gigabit Ether driver Layer
+ * @struct pch_gbe_hw
+ * @brief Hardware information
+ */
+struct pch_gbe_hw {
+ void *back;
+
+ struct pch_gbe_regs __iomem *reg;
+ spinlock_t miim_lock;
+
+ const struct pch_gbe_functions *func;
+ struct pch_gbe_mac_info mac;
+ struct pch_gbe_phy_info phy;
+ struct pch_gbe_bus_info bus;
+};
+
+/**
+ * struct pch_gbe_rx_desc - Receive Descriptor
+ * @buffer_addr: RX Frame Buffer Address
+ * @tcp_ip_status: TCP/IP Accelerator Status
+ * @rx_words_eob: RX word count and Byte position
+ * @gbec_status: GMAC Status
+ * @dma_status: DMA Status
+ * @reserved1: Reserved
+ * @reserved2: Reserved
+ */
+struct pch_gbe_rx_desc {
+ u32 buffer_addr;
+ u32 tcp_ip_status;
+ u16 rx_words_eob;
+ u16 gbec_status;
+ u8 dma_status;
+ u8 reserved1;
+ u16 reserved2;
+};
+
+/**
+ * struct pch_gbe_tx_desc - Transmit Descriptor
+ * @buffer_addr: TX Frame Buffer Address
+ * @length: Data buffer length
+ * @reserved1: Reserved
+ * @tx_words_eob: TX word count and Byte position
+ * @tx_frame_ctrl: TX Frame Control
+ * @dma_status: DMA Status
+ * @reserved2: Reserved
+ * @gbec_status: GMAC Status
+ */
+struct pch_gbe_tx_desc {
+ u32 buffer_addr;
+ u16 length;
+ u16 reserved1;
+ u16 tx_words_eob;
+ u16 tx_frame_ctrl;
+ u8 dma_status;
+ u8 reserved2;
+ u16 gbec_status;
+};
+
+
+/**
+ * struct pch_gbe_buffer - Buffer information
+ * @skb: pointer to a socket buffer
+ * @dma: DMA address
+ * @time_stamp: time stamp
+ * @length: data size
+ */
+struct pch_gbe_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ unsigned char *rx_buffer;
+ unsigned long time_stamp;
+ u16 length;
+ bool mapped;
+};
+
+/**
+ * struct pch_gbe_tx_ring - tx ring information
+ * @tx_lock: spinlock structs
+ * @desc: pointer to the descriptor ring memory
+ * @dma: physical address of the descriptor ring
+ * @size: length of descriptor ring in bytes
+ * @count: number of descriptors in the ring
+ * @next_to_use: next descriptor to associate a buffer with
+ * @next_to_clean: next descriptor to check for DD status bit
+ * @buffer_info: array of buffer information structs
+ */
+struct pch_gbe_tx_ring {
+ spinlock_t tx_lock;
+ struct pch_gbe_tx_desc *desc;
+ dma_addr_t dma;
+ unsigned int size;
+ unsigned int count;
+ unsigned int next_to_use;
+ unsigned int next_to_clean;
+ struct pch_gbe_buffer *buffer_info;
+};
+
+/**
+ * struct pch_gbe_rx_ring - rx ring information
+ * @desc: pointer to the descriptor ring memory
+ * @dma: physical address of the descriptor ring
+ * @size: length of descriptor ring in bytes
+ * @count: number of descriptors in the ring
+ * @next_to_use: next descriptor to associate a buffer with
+ * @next_to_clean: next descriptor to check for DD status bit
+ * @buffer_info: array of buffer information structs
+ */
+struct pch_gbe_rx_ring {
+ struct pch_gbe_rx_desc *desc;
+ dma_addr_t dma;
+ unsigned char *rx_buff_pool;
+ dma_addr_t rx_buff_pool_logic;
+ unsigned int rx_buff_pool_size;
+ unsigned int size;
+ unsigned int count;
+ unsigned int next_to_use;
+ unsigned int next_to_clean;
+ struct pch_gbe_buffer *buffer_info;
+};
+
+/**
+ * struct pch_gbe_hw_stats - Statistics counters collected by the MAC
+ * @rx_packets: total packets received
+ * @tx_packets: total packets transmitted
+ * @rx_bytes: total bytes received
+ * @tx_bytes: total bytes transmitted
+ * @rx_errors: bad packets received
+ * @tx_errors: packet transmit problems
+ * @rx_dropped: no space in Linux buffers
+ * @tx_dropped: no space available in Linux
+ * @multicast: multicast packets received
+ * @collisions: collisions
+ * @rx_crc_errors: received packet with crc error
+ * @rx_frame_errors: received frame alignment error
+ * @rx_alloc_buff_failed: allocate failure of a receive buffer
+ * @tx_length_errors: transmit length error
+ * @tx_aborted_errors: transmit aborted error
+ * @tx_carrier_errors: transmit carrier error
+ * @tx_timeout_count: Number of transmit timeout
+ * @tx_restart_count: Number of transmit restert
+ * @intr_rx_dsc_empty_count: Interrupt count of receive descriptor empty
+ * @intr_rx_frame_err_count: Interrupt count of receive frame error
+ * @intr_rx_fifo_err_count: Interrupt count of receive FIFO error
+ * @intr_rx_dma_err_count: Interrupt count of receive DMA error
+ * @intr_tx_fifo_err_count: Interrupt count of transmit FIFO error
+ * @intr_tx_dma_err_count: Interrupt count of transmit DMA error
+ * @intr_tcpip_err_count: Interrupt count of TCP/IP Accelerator
+ */
+struct pch_gbe_hw_stats {
+ u32 rx_packets;
+ u32 tx_packets;
+ u32 rx_bytes;
+ u32 tx_bytes;
+ u32 rx_errors;
+ u32 tx_errors;
+ u32 rx_dropped;
+ u32 tx_dropped;
+ u32 multicast;
+ u32 collisions;
+ u32 rx_crc_errors;
+ u32 rx_frame_errors;
+ u32 rx_alloc_buff_failed;
+ u32 tx_length_errors;
+ u32 tx_aborted_errors;
+ u32 tx_carrier_errors;
+ u32 tx_timeout_count;
+ u32 tx_restart_count;
+ u32 intr_rx_dsc_empty_count;
+ u32 intr_rx_frame_err_count;
+ u32 intr_rx_fifo_err_count;
+ u32 intr_rx_dma_err_count;
+ u32 intr_tx_fifo_err_count;
+ u32 intr_tx_dma_err_count;
+ u32 intr_tcpip_err_count;
+};
+
+/**
+ * struct pch_gbe_adapter - board specific private data structure
+ * @stats_lock: Spinlock structure for status
+ * @tx_queue_lock: Spinlock structure for transmit
+ * @ethtool_lock: Spinlock structure for ethtool
+ * @irq_sem: Semaphore for interrupt
+ * @netdev: Pointer of network device structure
+ * @pdev: Pointer of pci device structure
+ * @polling_netdev: Pointer of polling network device structure
+ * @napi: NAPI structure
+ * @hw: Pointer of hardware structure
+ * @stats: Hardware status
+ * @reset_task: Reset task
+ * @mii: MII information structure
+ * @watchdog_timer: Watchdog timer list
+ * @wake_up_evt: Wake up event
+ * @config_space: Configuration space
+ * @msg_enable: Driver message level
+ * @led_status: LED status
+ * @tx_ring: Pointer of Tx descriptor ring structure
+ * @rx_ring: Pointer of Rx descriptor ring structure
+ * @rx_buffer_len: Receive buffer length
+ * @tx_queue_len: Transmit queue length
+ * @have_msi: PCI MSI mode flag
+ */
+
+struct pch_gbe_adapter {
+ spinlock_t stats_lock;
+ spinlock_t tx_queue_lock;
+ spinlock_t ethtool_lock;
+ atomic_t irq_sem;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct net_device *polling_netdev;
+ struct napi_struct napi;
+ struct pch_gbe_hw hw;
+ struct pch_gbe_hw_stats stats;
+ struct work_struct reset_task;
+ struct mii_if_info mii;
+ struct timer_list watchdog_timer;
+ u32 wake_up_evt;
+ u32 *config_space;
+ unsigned long led_status;
+ struct pch_gbe_tx_ring *tx_ring;
+ struct pch_gbe_rx_ring *rx_ring;
+ unsigned long rx_buffer_len;
+ unsigned long tx_queue_len;
+ bool have_msi;
+ bool rx_stop_flag;
+};
+
+extern const char pch_driver_version[];
+
+/* pch_gbe_main.c */
+extern int pch_gbe_up(struct pch_gbe_adapter *adapter);
+extern void pch_gbe_down(struct pch_gbe_adapter *adapter);
+extern void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter);
+extern void pch_gbe_reset(struct pch_gbe_adapter *adapter);
+extern int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *txdr);
+extern int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rxdr);
+extern void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring);
+extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring);
+extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
+
+/* pch_gbe_param.c */
+extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
+
+/* pch_gbe_ethtool.c */
+extern void pch_gbe_set_ethtool_ops(struct net_device *netdev);
+
+/* pch_gbe_mac.c */
+extern s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
+extern s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
+extern u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw,
+ u32 addr, u32 dir, u32 reg, u16 data);
+#endif /* _PCH_GBE_H_ */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
new file mode 100644
index 000000000000..e48f084ad226
--- /dev/null
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This code was derived from the Intel e1000e Linux driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include "pch_gbe.h"
+#include "pch_gbe_phy.h"
+
+/* bus type values */
+#define pch_gbe_bus_type_unknown 0
+#define pch_gbe_bus_type_pci 1
+#define pch_gbe_bus_type_pcix 2
+#define pch_gbe_bus_type_pci_express 3
+#define pch_gbe_bus_type_reserved 4
+
+/* bus speed values */
+#define pch_gbe_bus_speed_unknown 0
+#define pch_gbe_bus_speed_33 1
+#define pch_gbe_bus_speed_66 2
+#define pch_gbe_bus_speed_100 3
+#define pch_gbe_bus_speed_120 4
+#define pch_gbe_bus_speed_133 5
+#define pch_gbe_bus_speed_2500 6
+#define pch_gbe_bus_speed_reserved 7
+
+/* bus width values */
+#define pch_gbe_bus_width_unknown 0
+#define pch_gbe_bus_width_pcie_x1 1
+#define pch_gbe_bus_width_pcie_x2 2
+#define pch_gbe_bus_width_pcie_x4 4
+#define pch_gbe_bus_width_32 5
+#define pch_gbe_bus_width_64 6
+#define pch_gbe_bus_width_reserved 7
+
+/**
+ * pch_gbe_plat_get_bus_info - Obtain bus information for adapter
+ * @hw: Pointer to the HW structure
+ */
+static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw)
+{
+ hw->bus.type = pch_gbe_bus_type_pci_express;
+ hw->bus.speed = pch_gbe_bus_speed_2500;
+ hw->bus.width = pch_gbe_bus_width_pcie_x1;
+}
+
+/**
+ * pch_gbe_plat_init_hw - Initialize hardware
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed-EBUSY
+ */
+static s32 pch_gbe_plat_init_hw(struct pch_gbe_hw *hw)
+{
+ s32 ret_val;
+
+ ret_val = pch_gbe_phy_get_id(hw);
+ if (ret_val) {
+ pr_err("pch_gbe_phy_get_id error\n");
+ return ret_val;
+ }
+ pch_gbe_phy_init_setting(hw);
+ /* Setup Mac interface option RGMII */
+#ifdef PCH_GBE_MAC_IFOP_RGMII
+ pch_gbe_phy_set_rgmii(hw);
+#endif
+ return ret_val;
+}
+
+static const struct pch_gbe_functions pch_gbe_ops = {
+ .get_bus_info = pch_gbe_plat_get_bus_info,
+ .init_hw = pch_gbe_plat_init_hw,
+ .read_phy_reg = pch_gbe_phy_read_reg_miic,
+ .write_phy_reg = pch_gbe_phy_write_reg_miic,
+ .reset_phy = pch_gbe_phy_hw_reset,
+ .sw_reset_phy = pch_gbe_phy_sw_reset,
+ .power_up_phy = pch_gbe_phy_power_up,
+ .power_down_phy = pch_gbe_phy_power_down,
+ .read_mac_addr = pch_gbe_mac_read_mac_addr
+};
+
+/**
+ * pch_gbe_plat_init_function_pointers - Init func ptrs
+ * @hw: Pointer to the HW structure
+ */
+static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw)
+{
+ /* Set PHY parameter */
+ hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US;
+ /* Set function pointers */
+ hw->func = &pch_gbe_ops;
+}
+
+/**
+ * pch_gbe_hal_setup_init_funcs - Initializes function pointers
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successfully
+ * ENOSYS: Function is not registered
+ */
+inline s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw)
+{
+ if (!hw->reg) {
+ pr_err("ERROR: Registers not mapped\n");
+ return -ENOSYS;
+ }
+ pch_gbe_plat_init_function_pointers(hw);
+ return 0;
+}
+
+/**
+ * pch_gbe_hal_get_bus_info - Obtain bus information for adapter
+ * @hw: Pointer to the HW structure
+ */
+inline void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw)
+{
+ if (!hw->func->get_bus_info)
+ pr_err("ERROR: configuration\n");
+ else
+ hw->func->get_bus_info(hw);
+}
+
+/**
+ * pch_gbe_hal_init_hw - Initialize hardware
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successfully
+ * ENOSYS: Function is not registered
+ */
+inline s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw)
+{
+ if (!hw->func->init_hw) {
+ pr_err("ERROR: configuration\n");
+ return -ENOSYS;
+ }
+ return hw->func->init_hw(hw);
+}
+
+/**
+ * pch_gbe_hal_read_phy_reg - Reads PHY register
+ * @hw: Pointer to the HW structure
+ * @offset: The register to read
+ * @data: The buffer to store the 16-bit read.
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+inline s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset,
+ u16 *data)
+{
+ if (!hw->func->read_phy_reg)
+ return 0;
+ return hw->func->read_phy_reg(hw, offset, data);
+}
+
+/**
+ * pch_gbe_hal_write_phy_reg - Writes PHY register
+ * @hw: Pointer to the HW structure
+ * @offset: The register to read
+ * @data: The value to write.
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+inline s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset,
+ u16 data)
+{
+ if (!hw->func->write_phy_reg)
+ return 0;
+ return hw->func->write_phy_reg(hw, offset, data);
+}
+
+/**
+ * pch_gbe_hal_phy_hw_reset - Hard PHY reset
+ * @hw: Pointer to the HW structure
+ */
+inline void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw)
+{
+ if (!hw->func->reset_phy)
+ pr_err("ERROR: configuration\n");
+ else
+ hw->func->reset_phy(hw);
+}
+
+/**
+ * pch_gbe_hal_phy_sw_reset - Soft PHY reset
+ * @hw: Pointer to the HW structure
+ */
+inline void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw)
+{
+ if (!hw->func->sw_reset_phy)
+ pr_err("ERROR: configuration\n");
+ else
+ hw->func->sw_reset_phy(hw);
+}
+
+/**
+ * pch_gbe_hal_read_mac_addr - Reads MAC address
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successfully
+ * ENOSYS: Function is not registered
+ */
+inline s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw)
+{
+ if (!hw->func->read_mac_addr) {
+ pr_err("ERROR: configuration\n");
+ return -ENOSYS;
+ }
+ return hw->func->read_mac_addr(hw);
+}
+
+/**
+ * pch_gbe_hal_power_up_phy - Power up PHY
+ * @hw: Pointer to the HW structure
+ */
+inline void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw)
+{
+ if (hw->func->power_up_phy)
+ hw->func->power_up_phy(hw);
+}
+
+/**
+ * pch_gbe_hal_power_down_phy - Power down PHY
+ * @hw: Pointer to the HW structure
+ */
+inline void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw)
+{
+ if (hw->func->power_down_phy)
+ hw->func->power_down_phy(hw);
+}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
new file mode 100644
index 000000000000..94aaac5b057b
--- /dev/null
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This code was derived from the Intel e1000e Linux driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _PCH_GBE_API_H_
+#define _PCH_GBE_API_H_
+
+#include "pch_gbe_phy.h"
+
+s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw);
+void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw);
+s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw);
+s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 *data);
+s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 data);
+void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw);
+void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw);
+s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw);
+void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw);
+void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw);
+
+#endif
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
new file mode 100644
index 000000000000..ea2d8e41887a
--- /dev/null
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -0,0 +1,517 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This code was derived from the Intel e1000e Linux driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include "pch_gbe.h"
+#include "pch_gbe_api.h"
+
+/**
+ * pch_gbe_stats - Stats item information
+ */
+struct pch_gbe_stats {
+ char string[ETH_GSTRING_LEN];
+ size_t size;
+ size_t offset;
+};
+
+#define PCH_GBE_STAT(m) \
+{ \
+ .string = #m, \
+ .size = FIELD_SIZEOF(struct pch_gbe_hw_stats, m), \
+ .offset = offsetof(struct pch_gbe_hw_stats, m), \
+}
+
+/**
+ * pch_gbe_gstrings_stats - ethtool information status name list
+ */
+static const struct pch_gbe_stats pch_gbe_gstrings_stats[] = {
+ PCH_GBE_STAT(rx_packets),
+ PCH_GBE_STAT(tx_packets),
+ PCH_GBE_STAT(rx_bytes),
+ PCH_GBE_STAT(tx_bytes),
+ PCH_GBE_STAT(rx_errors),
+ PCH_GBE_STAT(tx_errors),
+ PCH_GBE_STAT(rx_dropped),
+ PCH_GBE_STAT(tx_dropped),
+ PCH_GBE_STAT(multicast),
+ PCH_GBE_STAT(collisions),
+ PCH_GBE_STAT(rx_crc_errors),
+ PCH_GBE_STAT(rx_frame_errors),
+ PCH_GBE_STAT(rx_alloc_buff_failed),
+ PCH_GBE_STAT(tx_length_errors),
+ PCH_GBE_STAT(tx_aborted_errors),
+ PCH_GBE_STAT(tx_carrier_errors),
+ PCH_GBE_STAT(tx_timeout_count),
+ PCH_GBE_STAT(tx_restart_count),
+ PCH_GBE_STAT(intr_rx_dsc_empty_count),
+ PCH_GBE_STAT(intr_rx_frame_err_count),
+ PCH_GBE_STAT(intr_rx_fifo_err_count),
+ PCH_GBE_STAT(intr_rx_dma_err_count),
+ PCH_GBE_STAT(intr_tx_fifo_err_count),
+ PCH_GBE_STAT(intr_tx_dma_err_count),
+ PCH_GBE_STAT(intr_tcpip_err_count)
+};
+
+#define PCH_GBE_QUEUE_STATS_LEN 0
+#define PCH_GBE_GLOBAL_STATS_LEN ARRAY_SIZE(pch_gbe_gstrings_stats)
+#define PCH_GBE_STATS_LEN (PCH_GBE_GLOBAL_STATS_LEN + PCH_GBE_QUEUE_STATS_LEN)
+
+#define PCH_GBE_MAC_REGS_LEN (sizeof(struct pch_gbe_regs) / 4)
+#define PCH_GBE_REGS_LEN (PCH_GBE_MAC_REGS_LEN + PCH_GBE_PHY_REGS_LEN)
+/**
+ * pch_gbe_get_settings - Get device-specific settings
+ * @netdev: Network interface device structure
+ * @ecmd: Ethtool command
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+static int pch_gbe_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ ret = mii_ethtool_gset(&adapter->mii, ecmd);
+ ecmd->supported &= ~(SUPPORTED_TP | SUPPORTED_1000baseT_Half);
+ ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half);
+
+ if (!netif_carrier_ok(adapter->netdev))
+ ethtool_cmd_speed_set(ecmd, -1);
+ return ret;
+}
+
+/**
+ * pch_gbe_set_settings - Set device-specific settings
+ * @netdev: Network interface device structure
+ * @ecmd: Ethtool command
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+static int pch_gbe_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 speed = ethtool_cmd_speed(ecmd);
+ int ret;
+
+ pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET);
+
+ /* when set_settings() is called with a ethtool_cmd previously
+ * filled by get_settings() on a down link, speed is -1: */
+ if (speed == UINT_MAX) {
+ speed = SPEED_1000;
+ ecmd->duplex = DUPLEX_FULL;
+ }
+ ret = mii_ethtool_sset(&adapter->mii, ecmd);
+ if (ret) {
+ pr_err("Error: mii_ethtool_sset\n");
+ return ret;
+ }
+ hw->mac.link_speed = speed;
+ hw->mac.link_duplex = ecmd->duplex;
+ hw->phy.autoneg_advertised = ecmd->advertising;
+ hw->mac.autoneg = ecmd->autoneg;
+ pch_gbe_hal_phy_sw_reset(hw);
+
+ /* reset the link */
+ if (netif_running(adapter->netdev)) {
+ pch_gbe_down(adapter);
+ ret = pch_gbe_up(adapter);
+ } else {
+ pch_gbe_reset(adapter);
+ }
+ return ret;
+}
+
+/**
+ * pch_gbe_get_regs_len - Report the size of device registers
+ * @netdev: Network interface device structure
+ * Returns: the size of device registers.
+ */
+static int pch_gbe_get_regs_len(struct net_device *netdev)
+{
+ return PCH_GBE_REGS_LEN * (int)sizeof(u32);
+}
+
+/**
+ * pch_gbe_get_drvinfo - Report driver information
+ * @netdev: Network interface device structure
+ * @drvinfo: Driver information structure
+ */
+static void pch_gbe_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ strcpy(drvinfo->driver, KBUILD_MODNAME);
+ strcpy(drvinfo->version, pch_driver_version);
+ strcpy(drvinfo->fw_version, "N/A");
+ strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
+ drvinfo->regdump_len = pch_gbe_get_regs_len(netdev);
+}
+
+/**
+ * pch_gbe_get_regs - Get device registers
+ * @netdev: Network interface device structure
+ * @regs: Ethtool register structure
+ * @p: Buffer pointer of read device register date
+ */
+static void pch_gbe_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ u32 *regs_buff = p;
+ u16 i, tmp;
+
+ regs->version = 0x1000000 | (__u32)pdev->revision << 16 | pdev->device;
+ for (i = 0; i < PCH_GBE_MAC_REGS_LEN; i++)
+ *regs_buff++ = ioread32(&hw->reg->INT_ST + i);
+ /* PHY register */
+ for (i = 0; i < PCH_GBE_PHY_REGS_LEN; i++) {
+ pch_gbe_hal_read_phy_reg(&adapter->hw, i, &tmp);
+ *regs_buff++ = tmp;
+ }
+}
+
+/**
+ * pch_gbe_get_wol - Report whether Wake-on-Lan is enabled
+ * @netdev: Network interface device structure
+ * @wol: Wake-on-Lan information
+ */
+static void pch_gbe_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
+ wol->wolopts = 0;
+
+ if ((adapter->wake_up_evt & PCH_GBE_WLC_IND))
+ wol->wolopts |= WAKE_UCAST;
+ if ((adapter->wake_up_evt & PCH_GBE_WLC_MLT))
+ wol->wolopts |= WAKE_MCAST;
+ if ((adapter->wake_up_evt & PCH_GBE_WLC_BR))
+ wol->wolopts |= WAKE_BCAST;
+ if ((adapter->wake_up_evt & PCH_GBE_WLC_MP))
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+/**
+ * pch_gbe_set_wol - Turn Wake-on-Lan on or off
+ * @netdev: Network interface device structure
+ * @wol: Pointer of wake-on-Lan information straucture
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+static int pch_gbe_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ if ((wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)))
+ return -EOPNOTSUPP;
+ /* these settings will always override what we currently have */
+ adapter->wake_up_evt = 0;
+
+ if ((wol->wolopts & WAKE_UCAST))
+ adapter->wake_up_evt |= PCH_GBE_WLC_IND;
+ if ((wol->wolopts & WAKE_MCAST))
+ adapter->wake_up_evt |= PCH_GBE_WLC_MLT;
+ if ((wol->wolopts & WAKE_BCAST))
+ adapter->wake_up_evt |= PCH_GBE_WLC_BR;
+ if ((wol->wolopts & WAKE_MAGIC))
+ adapter->wake_up_evt |= PCH_GBE_WLC_MP;
+ return 0;
+}
+
+/**
+ * pch_gbe_nway_reset - Restart autonegotiation
+ * @netdev: Network interface device structure
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+static int pch_gbe_nway_reset(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ return mii_nway_restart(&adapter->mii);
+}
+
+/**
+ * pch_gbe_get_ringparam - Report ring sizes
+ * @netdev: Network interface device structure
+ * @ring: Ring param structure
+ */
+static void pch_gbe_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_tx_ring *txdr = adapter->tx_ring;
+ struct pch_gbe_rx_ring *rxdr = adapter->rx_ring;
+
+ ring->rx_max_pending = PCH_GBE_MAX_RXD;
+ ring->tx_max_pending = PCH_GBE_MAX_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rxdr->count;
+ ring->tx_pending = txdr->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+/**
+ * pch_gbe_set_ringparam - Set ring sizes
+ * @netdev: Network interface device structure
+ * @ring: Ring param structure
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+static int pch_gbe_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_tx_ring *txdr, *tx_old;
+ struct pch_gbe_rx_ring *rxdr, *rx_old;
+ int tx_ring_size, rx_ring_size;
+ int err = 0;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+ tx_ring_size = (int)sizeof(struct pch_gbe_tx_ring);
+ rx_ring_size = (int)sizeof(struct pch_gbe_rx_ring);
+
+ if ((netif_running(adapter->netdev)))
+ pch_gbe_down(adapter);
+ tx_old = adapter->tx_ring;
+ rx_old = adapter->rx_ring;
+
+ txdr = kzalloc(tx_ring_size, GFP_KERNEL);
+ if (!txdr) {
+ err = -ENOMEM;
+ goto err_alloc_tx;
+ }
+ rxdr = kzalloc(rx_ring_size, GFP_KERNEL);
+ if (!rxdr) {
+ err = -ENOMEM;
+ goto err_alloc_rx;
+ }
+ adapter->tx_ring = txdr;
+ adapter->rx_ring = rxdr;
+
+ rxdr->count =
+ clamp_val(ring->rx_pending, PCH_GBE_MIN_RXD, PCH_GBE_MAX_RXD);
+ rxdr->count = roundup(rxdr->count, PCH_GBE_RX_DESC_MULTIPLE);
+
+ txdr->count =
+ clamp_val(ring->tx_pending, PCH_GBE_MIN_RXD, PCH_GBE_MAX_RXD);
+ txdr->count = roundup(txdr->count, PCH_GBE_TX_DESC_MULTIPLE);
+
+ if ((netif_running(adapter->netdev))) {
+ /* Try to get new resources before deleting old */
+ err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
+ if (err)
+ goto err_setup_rx;
+ err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
+ if (err)
+ goto err_setup_tx;
+ /* save the new, restore the old in order to free it,
+ * then restore the new back again */
+#ifdef RINGFREE
+ adapter->rx_ring = rx_old;
+ adapter->tx_ring = tx_old;
+ pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
+ pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
+ kfree(tx_old);
+ kfree(rx_old);
+ adapter->rx_ring = rxdr;
+ adapter->tx_ring = txdr;
+#else
+ pch_gbe_free_rx_resources(adapter, rx_old);
+ pch_gbe_free_tx_resources(adapter, tx_old);
+ kfree(tx_old);
+ kfree(rx_old);
+ adapter->rx_ring = rxdr;
+ adapter->tx_ring = txdr;
+#endif
+ err = pch_gbe_up(adapter);
+ }
+ return err;
+
+err_setup_tx:
+ pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
+err_setup_rx:
+ adapter->rx_ring = rx_old;
+ adapter->tx_ring = tx_old;
+ kfree(rxdr);
+err_alloc_rx:
+ kfree(txdr);
+err_alloc_tx:
+ if (netif_running(adapter->netdev))
+ pch_gbe_up(adapter);
+ return err;
+}
+
+/**
+ * pch_gbe_get_pauseparam - Report pause parameters
+ * @netdev: Network interface device structure
+ * @pause: Pause parameters structure
+ */
+static void pch_gbe_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ pause->autoneg =
+ ((hw->mac.fc_autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ if (hw->mac.fc == PCH_GBE_FC_RX_PAUSE) {
+ pause->rx_pause = 1;
+ } else if (hw->mac.fc == PCH_GBE_FC_TX_PAUSE) {
+ pause->tx_pause = 1;
+ } else if (hw->mac.fc == PCH_GBE_FC_FULL) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ }
+}
+
+/**
+ * pch_gbe_set_pauseparam - Set pause paramters
+ * @netdev: Network interface device structure
+ * @pause: Pause parameters structure
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+static int pch_gbe_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ int ret = 0;
+
+ hw->mac.fc_autoneg = pause->autoneg;
+ if ((pause->rx_pause) && (pause->tx_pause))
+ hw->mac.fc = PCH_GBE_FC_FULL;
+ else if ((pause->rx_pause) && (!pause->tx_pause))
+ hw->mac.fc = PCH_GBE_FC_RX_PAUSE;
+ else if ((!pause->rx_pause) && (pause->tx_pause))
+ hw->mac.fc = PCH_GBE_FC_TX_PAUSE;
+ else if ((!pause->rx_pause) && (!pause->tx_pause))
+ hw->mac.fc = PCH_GBE_FC_NONE;
+
+ if (hw->mac.fc_autoneg == AUTONEG_ENABLE) {
+ if ((netif_running(adapter->netdev))) {
+ pch_gbe_down(adapter);
+ ret = pch_gbe_up(adapter);
+ } else {
+ pch_gbe_reset(adapter);
+ }
+ } else {
+ ret = pch_gbe_mac_force_mac_fc(hw);
+ }
+ return ret;
+}
+
+/**
+ * pch_gbe_get_strings - Return a set of strings that describe the requested
+ * objects
+ * @netdev: Network interface device structure
+ * @stringset: Select the stringset. [ETH_SS_TEST] [ETH_SS_STATS]
+ * @data: Pointer of read string data.
+ */
+static void pch_gbe_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case (u32) ETH_SS_STATS:
+ for (i = 0; i < PCH_GBE_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, pch_gbe_gstrings_stats[i].string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+/**
+ * pch_gbe_get_ethtool_stats - Return statistics about the device
+ * @netdev: Network interface device structure
+ * @stats: Ethtool statue structure
+ * @data: Pointer of read status area
+ */
+static void pch_gbe_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ int i;
+ const struct pch_gbe_stats *gstats = pch_gbe_gstrings_stats;
+ char *hw_stats = (char *)&adapter->stats;
+
+ pch_gbe_update_stats(adapter);
+ for (i = 0; i < PCH_GBE_GLOBAL_STATS_LEN; i++) {
+ char *p = hw_stats + gstats->offset;
+ data[i] = gstats->size == sizeof(u64) ? *(u64 *)p:(*(u32 *)p);
+ gstats++;
+ }
+}
+
+static int pch_gbe_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return PCH_GBE_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct ethtool_ops pch_gbe_ethtool_ops = {
+ .get_settings = pch_gbe_get_settings,
+ .set_settings = pch_gbe_set_settings,
+ .get_drvinfo = pch_gbe_get_drvinfo,
+ .get_regs_len = pch_gbe_get_regs_len,
+ .get_regs = pch_gbe_get_regs,
+ .get_wol = pch_gbe_get_wol,
+ .set_wol = pch_gbe_set_wol,
+ .nway_reset = pch_gbe_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = pch_gbe_get_ringparam,
+ .set_ringparam = pch_gbe_set_ringparam,
+ .get_pauseparam = pch_gbe_get_pauseparam,
+ .set_pauseparam = pch_gbe_set_pauseparam,
+ .get_strings = pch_gbe_get_strings,
+ .get_ethtool_stats = pch_gbe_get_ethtool_stats,
+ .get_sset_count = pch_gbe_get_sset_count,
+};
+
+void pch_gbe_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &pch_gbe_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
new file mode 100644
index 000000000000..b89f3a684aec
--- /dev/null
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -0,0 +1,2604 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ *
+ * This code was derived from the Intel e1000e Linux driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include "pch_gbe.h"
+#include "pch_gbe_api.h"
+
+#define DRV_VERSION "1.00"
+const char pch_driver_version[] = DRV_VERSION;
+
+#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802 /* Pci device ID */
+#define PCH_GBE_MAR_ENTRIES 16
+#define PCH_GBE_SHORT_PKT 64
+#define DSC_INIT16 0xC000
+#define PCH_GBE_DMA_ALIGN 0
+#define PCH_GBE_DMA_PADDING 2
+#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
+#define PCH_GBE_COPYBREAK_DEFAULT 256
+#define PCH_GBE_PCI_BAR 1
+#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
+
+/* Macros for ML7223 */
+#define PCI_VENDOR_ID_ROHM 0x10db
+#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
+
+/* Macros for ML7831 */
+#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
+
+#define PCH_GBE_TX_WEIGHT 64
+#define PCH_GBE_RX_WEIGHT 64
+#define PCH_GBE_RX_BUFFER_WRITE 16
+
+/* Initialize the wake-on-LAN settings */
+#define PCH_GBE_WL_INIT_SETTING (PCH_GBE_WLC_MP)
+
+#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
+ PCH_GBE_CHIP_TYPE_INTERNAL | \
+ PCH_GBE_RGMII_MODE_RGMII \
+ )
+
+/* Ethertype field values */
+#define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
+#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
+#define PCH_GBE_FRAME_SIZE_2048 2048
+#define PCH_GBE_FRAME_SIZE_4096 4096
+#define PCH_GBE_FRAME_SIZE_8192 8192
+
+#define PCH_GBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
+#define PCH_GBE_RX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
+#define PCH_GBE_TX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
+#define PCH_GBE_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* Pause packet value */
+#define PCH_GBE_PAUSE_PKT1_VALUE 0x00C28001
+#define PCH_GBE_PAUSE_PKT2_VALUE 0x00000100
+#define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888
+#define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF
+
+#define PCH_GBE_ETH_ALEN 6
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register. Each bit is documented below:
+ * o RXT0 = Receiver Timer Interrupt (ring 0)
+ * o TXDW = Transmit Descriptor Written Back
+ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ * o RXSEQ = Receive Sequence Error
+ * o LSC = Link Status Change
+ */
+#define PCH_GBE_INT_ENABLE_MASK ( \
+ PCH_GBE_INT_RX_DMA_CMPLT | \
+ PCH_GBE_INT_RX_DSC_EMP | \
+ PCH_GBE_INT_RX_FIFO_ERR | \
+ PCH_GBE_INT_WOL_DET | \
+ PCH_GBE_INT_TX_CMPLT \
+ )
+
+#define PCH_GBE_INT_DISABLE_ALL 0
+
+static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
+
+static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
+static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
+ int data);
+
+inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
+{
+ iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
+}
+
+/**
+ * pch_gbe_mac_read_mac_addr - Read MAC address
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successful.
+ */
+s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
+{
+ u32 adr1a, adr1b;
+
+ adr1a = ioread32(&hw->reg->mac_adr[0].high);
+ adr1b = ioread32(&hw->reg->mac_adr[0].low);
+
+ hw->mac.addr[0] = (u8)(adr1a & 0xFF);
+ hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
+ hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
+ hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
+ hw->mac.addr[4] = (u8)(adr1b & 0xFF);
+ hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
+
+ pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
+ return 0;
+}
+
+/**
+ * pch_gbe_wait_clr_bit - Wait to clear a bit
+ * @reg: Pointer of register
+ * @busy: Busy bit
+ */
+static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
+{
+ u32 tmp;
+ /* wait busy */
+ tmp = 1000;
+ while ((ioread32(reg) & bit) && --tmp)
+ cpu_relax();
+ if (!tmp)
+ pr_err("Error: busy bit is not cleared\n");
+}
+
+/**
+ * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
+ * @reg: Pointer of register
+ * @busy: Busy bit
+ */
+static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
+{
+ u32 tmp;
+ int ret = -1;
+ /* wait busy */
+ tmp = 20;
+ while ((ioread32(reg) & bit) && --tmp)
+ udelay(5);
+ if (!tmp)
+ pr_err("Error: busy bit is not cleared\n");
+ else
+ ret = 0;
+ return ret;
+}
+
+/**
+ * pch_gbe_mac_mar_set - Set MAC address register
+ * @hw: Pointer to the HW structure
+ * @addr: Pointer to the MAC address
+ * @index: MAC address array register
+ */
+static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
+{
+ u32 mar_low, mar_high, adrmask;
+
+ pr_debug("index : 0x%x\n", index);
+
+ /*
+ * HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+ mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
+ /* Stop the MAC Address of index. */
+ adrmask = ioread32(&hw->reg->ADDR_MASK);
+ iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+ /* Set the MAC address to the MAC address 1A/1B register */
+ iowrite32(mar_high, &hw->reg->mac_adr[index].high);
+ iowrite32(mar_low, &hw->reg->mac_adr[index].low);
+ /* Start the MAC address of index */
+ iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+}
+
+/**
+ * pch_gbe_mac_reset_hw - Reset hardware
+ * @hw: Pointer to the HW structure
+ */
+static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
+{
+ /* Read the MAC address. and store to the private data */
+ pch_gbe_mac_read_mac_addr(hw);
+ iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
+#ifdef PCH_GBE_MAC_IFOP_RGMII
+ iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
+#endif
+ pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
+ /* Setup the receive address */
+ pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
+ return;
+}
+
+static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
+{
+ /* Read the MAC address. and store to the private data */
+ pch_gbe_mac_read_mac_addr(hw);
+ iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
+ pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
+ /* Setup the MAC address */
+ pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
+ return;
+}
+
+/**
+ * pch_gbe_mac_init_rx_addrs - Initialize receive address's
+ * @hw: Pointer to the HW structure
+ * @mar_count: Receive address registers
+ */
+static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
+{
+ u32 i;
+
+ /* Setup the receive address */
+ pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
+
+ /* Zero out the other receive addresses */
+ for (i = 1; i < mar_count; i++) {
+ iowrite32(0, &hw->reg->mac_adr[i].high);
+ iowrite32(0, &hw->reg->mac_adr[i].low);
+ }
+ iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+}
+
+
+/**
+ * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
+ * @hw: Pointer to the HW structure
+ * @mc_addr_list: Array of multicast addresses to program
+ * @mc_addr_count: Number of multicast addresses to program
+ * @mar_used_count: The first MAC Address register free to program
+ * @mar_total_num: Total number of supported MAC Address Registers
+ */
+static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count,
+ u32 mar_used_count, u32 mar_total_num)
+{
+ u32 i, adrmask;
+
+ /* Load the first set of multicast addresses into the exact
+ * filters (RAR). If there are not enough to fill the RAR
+ * array, clear the filters.
+ */
+ for (i = mar_used_count; i < mar_total_num; i++) {
+ if (mc_addr_count) {
+ pch_gbe_mac_mar_set(hw, mc_addr_list, i);
+ mc_addr_count--;
+ mc_addr_list += PCH_GBE_ETH_ALEN;
+ } else {
+ /* Clear MAC address mask */
+ adrmask = ioread32(&hw->reg->ADDR_MASK);
+ iowrite32((adrmask | (0x0001 << i)),
+ &hw->reg->ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+ /* Clear MAC address */
+ iowrite32(0, &hw->reg->mac_adr[i].high);
+ iowrite32(0, &hw->reg->mac_adr[i].low);
+ }
+ }
+}
+
+/**
+ * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
+{
+ struct pch_gbe_mac_info *mac = &hw->mac;
+ u32 rx_fctrl;
+
+ pr_debug("mac->fc = %u\n", mac->fc);
+
+ rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
+
+ switch (mac->fc) {
+ case PCH_GBE_FC_NONE:
+ rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
+ mac->tx_fc_enable = false;
+ break;
+ case PCH_GBE_FC_RX_PAUSE:
+ rx_fctrl |= PCH_GBE_FL_CTRL_EN;
+ mac->tx_fc_enable = false;
+ break;
+ case PCH_GBE_FC_TX_PAUSE:
+ rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
+ mac->tx_fc_enable = true;
+ break;
+ case PCH_GBE_FC_FULL:
+ rx_fctrl |= PCH_GBE_FL_CTRL_EN;
+ mac->tx_fc_enable = true;
+ break;
+ default:
+ pr_err("Flow control param set incorrectly\n");
+ return -EINVAL;
+ }
+ if (mac->link_duplex == DUPLEX_HALF)
+ rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
+ iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
+ pr_debug("RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n",
+ ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
+ return 0;
+}
+
+/**
+ * pch_gbe_mac_set_wol_event - Set wake-on-lan event
+ * @hw: Pointer to the HW structure
+ * @wu_evt: Wake up event
+ */
+static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
+{
+ u32 addr_mask;
+
+ pr_debug("wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n",
+ wu_evt, ioread32(&hw->reg->ADDR_MASK));
+
+ if (wu_evt) {
+ /* Set Wake-On-Lan address mask */
+ addr_mask = ioread32(&hw->reg->ADDR_MASK);
+ iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
+ iowrite32(0, &hw->reg->WOL_ST);
+ iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
+ iowrite32(0x02, &hw->reg->TCPIP_ACC);
+ iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
+ } else {
+ iowrite32(0, &hw->reg->WOL_CTRL);
+ iowrite32(0, &hw->reg->WOL_ST);
+ }
+ return;
+}
+
+/**
+ * pch_gbe_mac_ctrl_miim - Control MIIM interface
+ * @hw: Pointer to the HW structure
+ * @addr: Address of PHY
+ * @dir: Operetion. (Write or Read)
+ * @reg: Access register of PHY
+ * @data: Write data.
+ *
+ * Returns: Read date.
+ */
+u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
+ u16 data)
+{
+ u32 data_out = 0;
+ unsigned int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->miim_lock, flags);
+
+ for (i = 100; i; --i) {
+ if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
+ break;
+ udelay(20);
+ }
+ if (i == 0) {
+ pr_err("pch-gbe.miim won't go Ready\n");
+ spin_unlock_irqrestore(&hw->miim_lock, flags);
+ return 0; /* No way to indicate timeout error */
+ }
+ iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
+ (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
+ dir | data), &hw->reg->MIIM);
+ for (i = 0; i < 100; i++) {
+ udelay(20);
+ data_out = ioread32(&hw->reg->MIIM);
+ if ((data_out & PCH_GBE_MIIM_OPER_READY))
+ break;
+ }
+ spin_unlock_irqrestore(&hw->miim_lock, flags);
+
+ pr_debug("PHY %s: reg=%d, data=0x%04X\n",
+ dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
+ dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
+ return (u16) data_out;
+}
+
+/**
+ * pch_gbe_mac_set_pause_packet - Set pause packet
+ * @hw: Pointer to the HW structure
+ */
+static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
+{
+ unsigned long tmp2, tmp3;
+
+ /* Set Pause packet */
+ tmp2 = hw->mac.addr[1];
+ tmp2 = (tmp2 << 8) | hw->mac.addr[0];
+ tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
+
+ tmp3 = hw->mac.addr[5];
+ tmp3 = (tmp3 << 8) | hw->mac.addr[4];
+ tmp3 = (tmp3 << 8) | hw->mac.addr[3];
+ tmp3 = (tmp3 << 8) | hw->mac.addr[2];
+
+ iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
+ iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
+ iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
+ iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
+ iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
+
+ /* Transmit Pause Packet */
+ iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
+
+ pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
+ ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
+ ioread32(&hw->reg->PAUSE_PKT5));
+
+ return;
+}
+
+
+/**
+ * pch_gbe_alloc_queues - Allocate memory for all rings
+ * @adapter: Board private structure to initialize
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
+{
+ int size;
+
+ size = (int)sizeof(struct pch_gbe_tx_ring);
+ adapter->tx_ring = kzalloc(size, GFP_KERNEL);
+ if (!adapter->tx_ring)
+ return -ENOMEM;
+ size = (int)sizeof(struct pch_gbe_rx_ring);
+ adapter->rx_ring = kzalloc(size, GFP_KERNEL);
+ if (!adapter->rx_ring) {
+ kfree(adapter->tx_ring);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * pch_gbe_init_stats - Initialize status
+ * @adapter: Board private structure to initialize
+ */
+static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
+{
+ memset(&adapter->stats, 0, sizeof(adapter->stats));
+ return;
+}
+
+/**
+ * pch_gbe_init_phy - Initialize PHY
+ * @adapter: Board private structure to initialize
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 addr;
+ u16 bmcr, stat;
+
+ /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
+ for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
+ adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
+ bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
+ stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
+ stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
+ if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
+ break;
+ }
+ adapter->hw.phy.addr = adapter->mii.phy_id;
+ pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
+ if (addr == 32)
+ return -EAGAIN;
+ /* Selected the phy and isolate the rest */
+ for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
+ if (addr != adapter->mii.phy_id) {
+ pch_gbe_mdio_write(netdev, addr, MII_BMCR,
+ BMCR_ISOLATE);
+ } else {
+ bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
+ pch_gbe_mdio_write(netdev, addr, MII_BMCR,
+ bmcr & ~BMCR_ISOLATE);
+ }
+ }
+
+ /* MII setup */
+ adapter->mii.phy_id_mask = 0x1F;
+ adapter->mii.reg_num_mask = 0x1F;
+ adapter->mii.dev = adapter->netdev;
+ adapter->mii.mdio_read = pch_gbe_mdio_read;
+ adapter->mii.mdio_write = pch_gbe_mdio_write;
+ adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
+ return 0;
+}
+
+/**
+ * pch_gbe_mdio_read - The read function for mii
+ * @netdev: Network interface device structure
+ * @addr: Phy ID
+ * @reg: Access location
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
+ (u16) 0);
+}
+
+/**
+ * pch_gbe_mdio_write - The write function for mii
+ * @netdev: Network interface device structure
+ * @addr: Phy ID (not used)
+ * @reg: Access location
+ * @data: Write data
+ */
+static void pch_gbe_mdio_write(struct net_device *netdev,
+ int addr, int reg, int data)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
+}
+
+/**
+ * pch_gbe_reset_task - Reset processing at the time of transmission timeout
+ * @work: Pointer of board private structure
+ */
+static void pch_gbe_reset_task(struct work_struct *work)
+{
+ struct pch_gbe_adapter *adapter;
+ adapter = container_of(work, struct pch_gbe_adapter, reset_task);
+
+ rtnl_lock();
+ pch_gbe_reinit_locked(adapter);
+ rtnl_unlock();
+}
+
+/**
+ * pch_gbe_reinit_locked- Re-initialization
+ * @adapter: Board private structure
+ */
+void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
+{
+ pch_gbe_down(adapter);
+ pch_gbe_up(adapter);
+}
+
+/**
+ * pch_gbe_reset - Reset GbE
+ * @adapter: Board private structure
+ */
+void pch_gbe_reset(struct pch_gbe_adapter *adapter)
+{
+ pch_gbe_mac_reset_hw(&adapter->hw);
+ /* Setup the receive address. */
+ pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
+ if (pch_gbe_hal_init_hw(&adapter->hw))
+ pr_err("Hardware Error\n");
+}
+
+/**
+ * pch_gbe_free_irq - Free an interrupt
+ * @adapter: Board private structure
+ */
+static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ free_irq(adapter->pdev->irq, netdev);
+ if (adapter->have_msi) {
+ pci_disable_msi(adapter->pdev);
+ pr_debug("call pci_disable_msi\n");
+ }
+}
+
+/**
+ * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: Board private structure
+ */
+static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ atomic_inc(&adapter->irq_sem);
+ iowrite32(0, &hw->reg->INT_EN);
+ ioread32(&hw->reg->INT_ST);
+ synchronize_irq(adapter->pdev->irq);
+
+ pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
+}
+
+/**
+ * pch_gbe_irq_enable - Enable default interrupt generation settings
+ * @adapter: Board private structure
+ */
+static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ if (likely(atomic_dec_and_test(&adapter->irq_sem)))
+ iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
+ ioread32(&hw->reg->INT_ST);
+ pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
+}
+
+
+
+/**
+ * pch_gbe_setup_tctl - configure the Transmit control registers
+ * @adapter: Board private structure
+ */
+static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 tx_mode, tcpip;
+
+ tx_mode = PCH_GBE_TM_LONG_PKT |
+ PCH_GBE_TM_ST_AND_FD |
+ PCH_GBE_TM_SHORT_PKT |
+ PCH_GBE_TM_TH_TX_STRT_8 |
+ PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
+
+ iowrite32(tx_mode, &hw->reg->TX_MODE);
+
+ tcpip = ioread32(&hw->reg->TCPIP_ACC);
+ tcpip |= PCH_GBE_TX_TCPIPACC_EN;
+ iowrite32(tcpip, &hw->reg->TCPIP_ACC);
+ return;
+}
+
+/**
+ * pch_gbe_configure_tx - Configure Transmit Unit after Reset
+ * @adapter: Board private structure
+ */
+static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 tdba, tdlen, dctrl;
+
+ pr_debug("dma addr = 0x%08llx size = 0x%08x\n",
+ (unsigned long long)adapter->tx_ring->dma,
+ adapter->tx_ring->size);
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ tdba = adapter->tx_ring->dma;
+ tdlen = adapter->tx_ring->size - 0x10;
+ iowrite32(tdba, &hw->reg->TX_DSC_BASE);
+ iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
+ iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
+
+ /* Enables Transmission DMA */
+ dctrl = ioread32(&hw->reg->DMA_CTRL);
+ dctrl |= PCH_GBE_TX_DMA_EN;
+ iowrite32(dctrl, &hw->reg->DMA_CTRL);
+}
+
+/**
+ * pch_gbe_setup_rctl - Configure the receive control registers
+ * @adapter: Board private structure
+ */
+static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 rx_mode, tcpip;
+
+ rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
+ PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
+
+ iowrite32(rx_mode, &hw->reg->RX_MODE);
+
+ tcpip = ioread32(&hw->reg->TCPIP_ACC);
+
+ tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
+ tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
+ iowrite32(tcpip, &hw->reg->TCPIP_ACC);
+ return;
+}
+
+/**
+ * pch_gbe_configure_rx - Configure Receive Unit after Reset
+ * @adapter: Board private structure
+ */
+static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 rdba, rdlen, rctl, rxdma;
+
+ pr_debug("dma adr = 0x%08llx size = 0x%08x\n",
+ (unsigned long long)adapter->rx_ring->dma,
+ adapter->rx_ring->size);
+
+ pch_gbe_mac_force_mac_fc(hw);
+
+ /* Disables Receive MAC */
+ rctl = ioread32(&hw->reg->MAC_RX_EN);
+ iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
+
+ /* Disables Receive DMA */
+ rxdma = ioread32(&hw->reg->DMA_CTRL);
+ rxdma &= ~PCH_GBE_RX_DMA_EN;
+ iowrite32(rxdma, &hw->reg->DMA_CTRL);
+
+ pr_debug("MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n",
+ ioread32(&hw->reg->MAC_RX_EN),
+ ioread32(&hw->reg->DMA_CTRL));
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring */
+ rdba = adapter->rx_ring->dma;
+ rdlen = adapter->rx_ring->size - 0x10;
+ iowrite32(rdba, &hw->reg->RX_DSC_BASE);
+ iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
+ iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
+}
+
+/**
+ * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
+ * @adapter: Board private structure
+ * @buffer_info: Buffer information structure
+ */
+static void pch_gbe_unmap_and_free_tx_resource(
+ struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
+{
+ if (buffer_info->mapped) {
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
+ buffer_info->mapped = false;
+ }
+ if (buffer_info->skb) {
+ dev_kfree_skb_any(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+}
+
+/**
+ * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
+ * @adapter: Board private structure
+ * @buffer_info: Buffer information structure
+ */
+static void pch_gbe_unmap_and_free_rx_resource(
+ struct pch_gbe_adapter *adapter,
+ struct pch_gbe_buffer *buffer_info)
+{
+ if (buffer_info->mapped) {
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
+ buffer_info->mapped = false;
+ }
+ if (buffer_info->skb) {
+ dev_kfree_skb_any(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+}
+
+/**
+ * pch_gbe_clean_tx_ring - Free Tx Buffers
+ * @adapter: Board private structure
+ * @tx_ring: Ring to be cleaned
+ */
+static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct pch_gbe_buffer *buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->buffer_info[i];
+ pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
+ }
+ pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
+
+ size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
+ memset(tx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
+ iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
+}
+
+/**
+ * pch_gbe_clean_rx_ring - Free Rx Buffers
+ * @adapter: Board private structure
+ * @rx_ring: Ring to free buffers from
+ */
+static void
+pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct pch_gbe_buffer *buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
+ }
+ pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
+ size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
+ memset(rx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
+ iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
+}
+
+static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
+ u16 duplex)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ unsigned long rgmii = 0;
+
+ /* Set the RGMII control. */
+#ifdef PCH_GBE_MAC_IFOP_RGMII
+ switch (speed) {
+ case SPEED_10:
+ rgmii = (PCH_GBE_RGMII_RATE_2_5M |
+ PCH_GBE_MAC_RGMII_CTRL_SETTING);
+ break;
+ case SPEED_100:
+ rgmii = (PCH_GBE_RGMII_RATE_25M |
+ PCH_GBE_MAC_RGMII_CTRL_SETTING);
+ break;
+ case SPEED_1000:
+ rgmii = (PCH_GBE_RGMII_RATE_125M |
+ PCH_GBE_MAC_RGMII_CTRL_SETTING);
+ break;
+ }
+ iowrite32(rgmii, &hw->reg->RGMII_CTRL);
+#else /* GMII */
+ rgmii = 0;
+ iowrite32(rgmii, &hw->reg->RGMII_CTRL);
+#endif
+}
+static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
+ u16 duplex)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_hw *hw = &adapter->hw;
+ unsigned long mode = 0;
+
+ /* Set the communication mode */
+ switch (speed) {
+ case SPEED_10:
+ mode = PCH_GBE_MODE_MII_ETHER;
+ netdev->tx_queue_len = 10;
+ break;
+ case SPEED_100:
+ mode = PCH_GBE_MODE_MII_ETHER;
+ netdev->tx_queue_len = 100;
+ break;
+ case SPEED_1000:
+ mode = PCH_GBE_MODE_GMII_ETHER;
+ break;
+ }
+ if (duplex == DUPLEX_FULL)
+ mode |= PCH_GBE_MODE_FULL_DUPLEX;
+ else
+ mode |= PCH_GBE_MODE_HALF_DUPLEX;
+ iowrite32(mode, &hw->reg->MODE);
+}
+
+/**
+ * pch_gbe_watchdog - Watchdog process
+ * @data: Board private structure
+ */
+static void pch_gbe_watchdog(unsigned long data)
+{
+ struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
+ struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ pr_debug("right now = %ld\n", jiffies);
+
+ pch_gbe_update_stats(adapter);
+ if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
+ struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
+ netdev->tx_queue_len = adapter->tx_queue_len;
+ /* mii library handles link maintenance tasks */
+ if (mii_ethtool_gset(&adapter->mii, &cmd)) {
+ pr_err("ethtool get setting Error\n");
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies +
+ PCH_GBE_WATCHDOG_PERIOD));
+ return;
+ }
+ hw->mac.link_speed = ethtool_cmd_speed(&cmd);
+ hw->mac.link_duplex = cmd.duplex;
+ /* Set the RGMII control. */
+ pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
+ hw->mac.link_duplex);
+ /* Set the communication mode */
+ pch_gbe_set_mode(adapter, hw->mac.link_speed,
+ hw->mac.link_duplex);
+ netdev_dbg(netdev,
+ "Link is Up %d Mbps %s-Duplex\n",
+ hw->mac.link_speed,
+ cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ } else if ((!mii_link_ok(&adapter->mii)) &&
+ (netif_carrier_ok(netdev))) {
+ netdev_dbg(netdev, "NIC Link is Down\n");
+ hw->mac.link_speed = SPEED_10;
+ hw->mac.link_duplex = DUPLEX_HALF;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
+}
+
+/**
+ * pch_gbe_tx_queue - Carry out queuing of the transmission data
+ * @adapter: Board private structure
+ * @tx_ring: Tx descriptor ring structure
+ * @skb: Sockt buffer structure
+ */
+static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring,
+ struct sk_buff *skb)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct pch_gbe_tx_desc *tx_desc;
+ struct pch_gbe_buffer *buffer_info;
+ struct sk_buff *tmp_skb;
+ unsigned int frame_ctrl;
+ unsigned int ring_num;
+ unsigned long flags;
+
+ /*-- Set frame control --*/
+ frame_ctrl = 0;
+ if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
+ frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
+ if (skb->ip_summed == CHECKSUM_NONE)
+ frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
+
+ /* Performs checksum processing */
+ /*
+ * It is because the hardware accelerator does not support a checksum,
+ * when the received data size is less than 64 bytes.
+ */
+ if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
+ frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
+ PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ unsigned int offset;
+ iph->check = 0;
+ iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
+ offset = skb_transport_offset(skb);
+ if (iph->protocol == IPPROTO_TCP) {
+ skb->csum = 0;
+ tcp_hdr(skb)->check = 0;
+ skb->csum = skb_checksum(skb, offset,
+ skb->len - offset, 0);
+ tcp_hdr(skb)->check =
+ csum_tcpudp_magic(iph->saddr,
+ iph->daddr,
+ skb->len - offset,
+ IPPROTO_TCP,
+ skb->csum);
+ } else if (iph->protocol == IPPROTO_UDP) {
+ skb->csum = 0;
+ udp_hdr(skb)->check = 0;
+ skb->csum =
+ skb_checksum(skb, offset,
+ skb->len - offset, 0);
+ udp_hdr(skb)->check =
+ csum_tcpudp_magic(iph->saddr,
+ iph->daddr,
+ skb->len - offset,
+ IPPROTO_UDP,
+ skb->csum);
+ }
+ }
+ }
+ spin_lock_irqsave(&tx_ring->tx_lock, flags);
+ ring_num = tx_ring->next_to_use;
+ if (unlikely((ring_num + 1) == tx_ring->count))
+ tx_ring->next_to_use = 0;
+ else
+ tx_ring->next_to_use = ring_num + 1;
+
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+ buffer_info = &tx_ring->buffer_info[ring_num];
+ tmp_skb = buffer_info->skb;
+
+ /* [Header:14][payload] ---> [Header:14][paddong:2][payload] */
+ memcpy(tmp_skb->data, skb->data, ETH_HLEN);
+ tmp_skb->data[ETH_HLEN] = 0x00;
+ tmp_skb->data[ETH_HLEN + 1] = 0x00;
+ tmp_skb->len = skb->len;
+ memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
+ (skb->len - ETH_HLEN));
+ /*-- Set Buffer information --*/
+ buffer_info->length = tmp_skb->len;
+ buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
+ buffer_info->length,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
+ pr_err("TX DMA map failed\n");
+ buffer_info->dma = 0;
+ buffer_info->time_stamp = 0;
+ tx_ring->next_to_use = ring_num;
+ return;
+ }
+ buffer_info->mapped = true;
+ buffer_info->time_stamp = jiffies;
+
+ /*-- Set Tx descriptor --*/
+ tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
+ tx_desc->buffer_addr = (buffer_info->dma);
+ tx_desc->length = (tmp_skb->len);
+ tx_desc->tx_words_eob = ((tmp_skb->len + 3));
+ tx_desc->tx_frame_ctrl = (frame_ctrl);
+ tx_desc->gbec_status = (DSC_INIT16);
+
+ if (unlikely(++ring_num == tx_ring->count))
+ ring_num = 0;
+
+ /* Update software pointer of TX descriptor */
+ iowrite32(tx_ring->dma +
+ (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
+ &hw->reg->TX_DSC_SW_P);
+ dev_kfree_skb_any(skb);
+}
+
+/**
+ * pch_gbe_update_stats - Update the board statistics counters
+ * @adapter: Board private structure
+ */
+void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_hw_stats *stats = &adapter->stats;
+ unsigned long flags;
+
+ /*
+ * Prevent stats update while adapter is being reset, or if the pci
+ * connection is down.
+ */
+ if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
+ return;
+
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+
+ /* Update device status "adapter->stats" */
+ stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
+ stats->tx_errors = stats->tx_length_errors +
+ stats->tx_aborted_errors +
+ stats->tx_carrier_errors + stats->tx_timeout_count;
+
+ /* Update network device status "adapter->net_stats" */
+ netdev->stats.rx_packets = stats->rx_packets;
+ netdev->stats.rx_bytes = stats->rx_bytes;
+ netdev->stats.rx_dropped = stats->rx_dropped;
+ netdev->stats.tx_packets = stats->tx_packets;
+ netdev->stats.tx_bytes = stats->tx_bytes;
+ netdev->stats.tx_dropped = stats->tx_dropped;
+ /* Fill out the OS statistics structure */
+ netdev->stats.multicast = stats->multicast;
+ netdev->stats.collisions = stats->collisions;
+ /* Rx Errors */
+ netdev->stats.rx_errors = stats->rx_errors;
+ netdev->stats.rx_crc_errors = stats->rx_crc_errors;
+ netdev->stats.rx_frame_errors = stats->rx_frame_errors;
+ /* Tx Errors */
+ netdev->stats.tx_errors = stats->tx_errors;
+ netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
+ netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
+
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+}
+
+static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 rxdma;
+ u16 value;
+ int ret;
+
+ /* Disable Receive DMA */
+ rxdma = ioread32(&hw->reg->DMA_CTRL);
+ rxdma &= ~PCH_GBE_RX_DMA_EN;
+ iowrite32(rxdma, &hw->reg->DMA_CTRL);
+ /* Wait Rx DMA BUS is IDLE */
+ ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
+ if (ret) {
+ /* Disable Bus master */
+ pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
+ value &= ~PCI_COMMAND_MASTER;
+ pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
+ /* Stop Receive */
+ pch_gbe_mac_reset_rx(hw);
+ /* Enable Bus master */
+ value |= PCI_COMMAND_MASTER;
+ pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
+ } else {
+ /* Stop Receive */
+ pch_gbe_mac_reset_rx(hw);
+ }
+}
+
+static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
+{
+ u32 rxdma;
+
+ /* Enables Receive DMA */
+ rxdma = ioread32(&hw->reg->DMA_CTRL);
+ rxdma |= PCH_GBE_RX_DMA_EN;
+ iowrite32(rxdma, &hw->reg->DMA_CTRL);
+ /* Enables Receive */
+ iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
+ return;
+}
+
+/**
+ * pch_gbe_intr - Interrupt Handler
+ * @irq: Interrupt number
+ * @data: Pointer to a network interface device structure
+ * Returns
+ * - IRQ_HANDLED: Our interrupt
+ * - IRQ_NONE: Not our interrupt
+ */
+static irqreturn_t pch_gbe_intr(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 int_st;
+ u32 int_en;
+
+ /* Check request status */
+ int_st = ioread32(&hw->reg->INT_ST);
+ int_st = int_st & ioread32(&hw->reg->INT_EN);
+ /* When request status is no interruption factor */
+ if (unlikely(!int_st))
+ return IRQ_NONE; /* Not our interrupt. End processing. */
+ pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
+ if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
+ adapter->stats.intr_rx_frame_err_count++;
+ if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
+ if (!adapter->rx_stop_flag) {
+ adapter->stats.intr_rx_fifo_err_count++;
+ pr_debug("Rx fifo over run\n");
+ adapter->rx_stop_flag = true;
+ int_en = ioread32(&hw->reg->INT_EN);
+ iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
+ &hw->reg->INT_EN);
+ pch_gbe_stop_receive(adapter);
+ int_st |= ioread32(&hw->reg->INT_ST);
+ int_st = int_st & ioread32(&hw->reg->INT_EN);
+ }
+ if (int_st & PCH_GBE_INT_RX_DMA_ERR)
+ adapter->stats.intr_rx_dma_err_count++;
+ if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
+ adapter->stats.intr_tx_fifo_err_count++;
+ if (int_st & PCH_GBE_INT_TX_DMA_ERR)
+ adapter->stats.intr_tx_dma_err_count++;
+ if (int_st & PCH_GBE_INT_TCPIP_ERR)
+ adapter->stats.intr_tcpip_err_count++;
+ /* When Rx descriptor is empty */
+ if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
+ adapter->stats.intr_rx_dsc_empty_count++;
+ pr_debug("Rx descriptor is empty\n");
+ int_en = ioread32(&hw->reg->INT_EN);
+ iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
+ if (hw->mac.tx_fc_enable) {
+ /* Set Pause packet */
+ pch_gbe_mac_set_pause_packet(hw);
+ }
+ }
+
+ /* When request status is Receive interruption */
+ if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
+ (adapter->rx_stop_flag == true)) {
+ if (likely(napi_schedule_prep(&adapter->napi))) {
+ /* Enable only Rx Descriptor empty */
+ atomic_inc(&adapter->irq_sem);
+ int_en = ioread32(&hw->reg->INT_EN);
+ int_en &=
+ ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
+ iowrite32(int_en, &hw->reg->INT_EN);
+ /* Start polling for NAPI */
+ __napi_schedule(&adapter->napi);
+ }
+ }
+ pr_debug("return = 0x%08x INT_EN reg = 0x%08x\n",
+ IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
+ return IRQ_HANDLED;
+}
+
+/**
+ * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
+ * @adapter: Board private structure
+ * @rx_ring: Rx descriptor ring
+ * @cleaned_count: Cleaned count
+ */
+static void
+pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct pch_gbe_rx_desc *rx_desc;
+ struct pch_gbe_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz;
+
+ bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
+ i = rx_ring->next_to_use;
+
+ while ((cleaned_count--)) {
+ buffer_info = &rx_ring->buffer_info[i];
+ skb = netdev_alloc_skb(netdev, bufsz);
+ if (unlikely(!skb)) {
+ /* Better luck next round */
+ adapter->stats.rx_alloc_buff_failed++;
+ break;
+ }
+ /* align */
+ skb_reserve(skb, NET_IP_ALIGN);
+ buffer_info->skb = skb;
+
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ buffer_info->rx_buffer,
+ buffer_info->length,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
+ dev_kfree_skb(skb);
+ buffer_info->skb = NULL;
+ buffer_info->dma = 0;
+ adapter->stats.rx_alloc_buff_failed++;
+ break; /* while !buffer_info->skb */
+ }
+ buffer_info->mapped = true;
+ rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
+ rx_desc->buffer_addr = (buffer_info->dma);
+ rx_desc->gbec_status = DSC_INIT16;
+
+ pr_debug("i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n",
+ i, (unsigned long long)buffer_info->dma,
+ buffer_info->length);
+
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
+ }
+ if (likely(rx_ring->next_to_use != i)) {
+ rx_ring->next_to_use = i;
+ if (unlikely(i-- == 0))
+ i = (rx_ring->count - 1);
+ iowrite32(rx_ring->dma +
+ (int)sizeof(struct pch_gbe_rx_desc) * i,
+ &hw->reg->RX_DSC_SW_P);
+ }
+ return;
+}
+
+static int
+pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_buffer *buffer_info;
+ unsigned int i;
+ unsigned int bufsz;
+ unsigned int size;
+
+ bufsz = adapter->rx_buffer_len;
+
+ size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
+ rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
+ &rx_ring->rx_buff_pool_logic,
+ GFP_KERNEL);
+ if (!rx_ring->rx_buff_pool) {
+ pr_err("Unable to allocate memory for the receive poll buffer\n");
+ return -ENOMEM;
+ }
+ memset(rx_ring->rx_buff_pool, 0, size);
+ rx_ring->rx_buff_pool_size = size;
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
+ buffer_info->length = bufsz;
+ }
+ return 0;
+}
+
+/**
+ * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
+ * @adapter: Board private structure
+ * @tx_ring: Tx descriptor ring
+ */
+static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring)
+{
+ struct pch_gbe_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz;
+ struct pch_gbe_tx_desc *tx_desc;
+
+ bufsz =
+ adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
+
+ for (i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->buffer_info[i];
+ skb = netdev_alloc_skb(adapter->netdev, bufsz);
+ skb_reserve(skb, PCH_GBE_DMA_ALIGN);
+ buffer_info->skb = skb;
+ tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
+ tx_desc->gbec_status = (DSC_INIT16);
+ }
+ return;
+}
+
+/**
+ * pch_gbe_clean_tx - Reclaim resources after transmit completes
+ * @adapter: Board private structure
+ * @tx_ring: Tx descriptor ring
+ * Returns
+ * true: Cleaned the descriptor
+ * false: Not cleaned the descriptor
+ */
+static bool
+pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring)
+{
+ struct pch_gbe_tx_desc *tx_desc;
+ struct pch_gbe_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int cleaned_count = 0;
+ bool cleaned = true;
+
+ pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
+
+ i = tx_ring->next_to_clean;
+ tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
+ pr_debug("gbec_status:0x%04x dma_status:0x%04x\n",
+ tx_desc->gbec_status, tx_desc->dma_status);
+
+ while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
+ pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
+ buffer_info = &tx_ring->buffer_info[i];
+ skb = buffer_info->skb;
+
+ if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
+ adapter->stats.tx_aborted_errors++;
+ pr_err("Transfer Abort Error\n");
+ } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
+ ) {
+ adapter->stats.tx_carrier_errors++;
+ pr_err("Transfer Carrier Sense Error\n");
+ } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
+ ) {
+ adapter->stats.tx_aborted_errors++;
+ pr_err("Transfer Collision Abort Error\n");
+ } else if ((tx_desc->gbec_status &
+ (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
+ PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
+ adapter->stats.collisions++;
+ adapter->stats.tx_packets++;
+ adapter->stats.tx_bytes += skb->len;
+ pr_debug("Transfer Collision\n");
+ } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
+ ) {
+ adapter->stats.tx_packets++;
+ adapter->stats.tx_bytes += skb->len;
+ }
+ if (buffer_info->mapped) {
+ pr_debug("unmap buffer_info->dma : %d\n", i);
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
+ buffer_info->mapped = false;
+ }
+ if (buffer_info->skb) {
+ pr_debug("trim buffer_info->skb : %d\n", i);
+ skb_trim(buffer_info->skb, 0);
+ }
+ tx_desc->gbec_status = DSC_INIT16;
+ if (unlikely(++i == tx_ring->count))
+ i = 0;
+ tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
+
+ /* weight of a sort for tx, to avoid endless transmit cleanup */
+ if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
+ cleaned = false;
+ break;
+ }
+ }
+ pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
+ cleaned_count);
+ /* Recover from running out of Tx resources in xmit_frame */
+ if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
+ netif_wake_queue(adapter->netdev);
+ adapter->stats.tx_restart_count++;
+ pr_debug("Tx wake queue\n");
+ }
+ spin_lock(&adapter->tx_queue_lock);
+ tx_ring->next_to_clean = i;
+ spin_unlock(&adapter->tx_queue_lock);
+ pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
+ return cleaned;
+}
+
+/**
+ * pch_gbe_clean_rx - Send received data up the network stack; legacy
+ * @adapter: Board private structure
+ * @rx_ring: Rx descriptor ring
+ * @work_done: Completed count
+ * @work_to_do: Request count
+ * Returns
+ * true: Cleaned the descriptor
+ * false: Not cleaned the descriptor
+ */
+static bool
+pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring,
+ int *work_done, int work_to_do)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_buffer *buffer_info;
+ struct pch_gbe_rx_desc *rx_desc;
+ u32 length;
+ unsigned int i;
+ unsigned int cleaned_count = 0;
+ bool cleaned = false;
+ struct sk_buff *skb;
+ u8 dma_status;
+ u16 gbec_status;
+ u32 tcp_ip_status;
+
+ i = rx_ring->next_to_clean;
+
+ while (*work_done < work_to_do) {
+ /* Check Rx descriptor status */
+ rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
+ if (rx_desc->gbec_status == DSC_INIT16)
+ break;
+ cleaned = true;
+ cleaned_count++;
+
+ dma_status = rx_desc->dma_status;
+ gbec_status = rx_desc->gbec_status;
+ tcp_ip_status = rx_desc->tcp_ip_status;
+ rx_desc->gbec_status = DSC_INIT16;
+ buffer_info = &rx_ring->buffer_info[i];
+ skb = buffer_info->skb;
+ buffer_info->skb = NULL;
+
+ /* unmap dma */
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
+ buffer_info->mapped = false;
+
+ pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
+ "TCP:0x%08x] BufInf = 0x%p\n",
+ i, dma_status, gbec_status, tcp_ip_status,
+ buffer_info);
+ /* Error check */
+ if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
+ adapter->stats.rx_frame_errors++;
+ pr_err("Receive Not Octal Error\n");
+ } else if (unlikely(gbec_status &
+ PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
+ adapter->stats.rx_frame_errors++;
+ pr_err("Receive Nibble Error\n");
+ } else if (unlikely(gbec_status &
+ PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
+ adapter->stats.rx_crc_errors++;
+ pr_err("Receive CRC Error\n");
+ } else {
+ /* get receive length */
+ /* length convert[-3], length includes FCS length */
+ length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
+ if (rx_desc->rx_words_eob & 0x02)
+ length = length - 4;
+ /*
+ * buffer_info->rx_buffer: [Header:14][payload]
+ * skb->data: [Reserve:2][Header:14][payload]
+ */
+ memcpy(skb->data, buffer_info->rx_buffer, length);
+
+ /* update status of driver */
+ adapter->stats.rx_bytes += length;
+ adapter->stats.rx_packets++;
+ if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
+ adapter->stats.multicast++;
+ /* Write meta date of skb */
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, netdev);
+ if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
+ skb->ip_summed = CHECKSUM_NONE;
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ napi_gro_receive(&adapter->napi, skb);
+ (*work_done)++;
+ pr_debug("Receive skb->ip_summed: %d length: %d\n",
+ skb->ip_summed, length);
+ }
+ /* return some buffers to hardware, one at a time is too slow */
+ if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
+ pch_gbe_alloc_rx_buffers(adapter, rx_ring,
+ cleaned_count);
+ cleaned_count = 0;
+ }
+ if (++i == rx_ring->count)
+ i = 0;
+ }
+ rx_ring->next_to_clean = i;
+ if (cleaned_count)
+ pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+ return cleaned;
+}
+
+/**
+ * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
+ * @adapter: Board private structure
+ * @tx_ring: Tx descriptor ring (for a specific queue) to setup
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_tx_desc *tx_desc;
+ int size;
+ int desNo;
+
+ size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
+ tx_ring->buffer_info = vzalloc(size);
+ if (!tx_ring->buffer_info) {
+ pr_err("Unable to allocate memory for the buffer information\n");
+ return -ENOMEM;
+ }
+
+ tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
+
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc) {
+ vfree(tx_ring->buffer_info);
+ pr_err("Unable to allocate memory for the transmit descriptor ring\n");
+ return -ENOMEM;
+ }
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ spin_lock_init(&tx_ring->tx_lock);
+
+ for (desNo = 0; desNo < tx_ring->count; desNo++) {
+ tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
+ tx_desc->gbec_status = DSC_INIT16;
+ }
+ pr_debug("tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx\n"
+ "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
+ tx_ring->desc, (unsigned long long)tx_ring->dma,
+ tx_ring->next_to_clean, tx_ring->next_to_use);
+ return 0;
+}
+
+/**
+ * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
+ * @adapter: Board private structure
+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_rx_desc *rx_desc;
+ int size;
+ int desNo;
+
+ size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
+ rx_ring->buffer_info = vzalloc(size);
+ if (!rx_ring->buffer_info) {
+ pr_err("Unable to allocate memory for the receive descriptor ring\n");
+ return -ENOMEM;
+ }
+ rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+
+ if (!rx_ring->desc) {
+ pr_err("Unable to allocate memory for the receive descriptor ring\n");
+ vfree(rx_ring->buffer_info);
+ return -ENOMEM;
+ }
+ memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ for (desNo = 0; desNo < rx_ring->count; desNo++) {
+ rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
+ rx_desc->gbec_status = DSC_INIT16;
+ }
+ pr_debug("rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx "
+ "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
+ rx_ring->desc, (unsigned long long)rx_ring->dma,
+ rx_ring->next_to_clean, rx_ring->next_to_use);
+ return 0;
+}
+
+/**
+ * pch_gbe_free_tx_resources - Free Tx Resources
+ * @adapter: Board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ */
+void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ pch_gbe_clean_tx_ring(adapter, tx_ring);
+ vfree(tx_ring->buffer_info);
+ tx_ring->buffer_info = NULL;
+ pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ tx_ring->desc = NULL;
+}
+
+/**
+ * pch_gbe_free_rx_resources - Free Rx Resources
+ * @adapter: Board private structure
+ * @rx_ring: Ring to clean the resources from
+ */
+void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ pch_gbe_clean_rx_ring(adapter, rx_ring);
+ vfree(rx_ring->buffer_info);
+ rx_ring->buffer_info = NULL;
+ pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ rx_ring->desc = NULL;
+}
+
+/**
+ * pch_gbe_request_irq - Allocate an interrupt line
+ * @adapter: Board private structure
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+ int flags;
+
+ flags = IRQF_SHARED;
+ adapter->have_msi = false;
+ err = pci_enable_msi(adapter->pdev);
+ pr_debug("call pci_enable_msi\n");
+ if (err) {
+ pr_debug("call pci_enable_msi - Error: %d\n", err);
+ } else {
+ flags = 0;
+ adapter->have_msi = true;
+ }
+ err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
+ flags, netdev->name, netdev);
+ if (err)
+ pr_err("Unable to allocate interrupt Error: %d\n", err);
+ pr_debug("adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n",
+ adapter->have_msi, flags, err);
+ return err;
+}
+
+
+static void pch_gbe_set_multi(struct net_device *netdev);
+/**
+ * pch_gbe_up - Up GbE network device
+ * @adapter: Board private structure
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+int pch_gbe_up(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
+ struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
+ int err;
+
+ /* hardware has been reset, we need to reload some things */
+ pch_gbe_set_multi(netdev);
+
+ pch_gbe_setup_tctl(adapter);
+ pch_gbe_configure_tx(adapter);
+ pch_gbe_setup_rctl(adapter);
+ pch_gbe_configure_rx(adapter);
+
+ err = pch_gbe_request_irq(adapter);
+ if (err) {
+ pr_err("Error: can't bring device up\n");
+ return err;
+ }
+ err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
+ if (err) {
+ pr_err("Error: can't bring device up\n");
+ return err;
+ }
+ pch_gbe_alloc_tx_buffers(adapter, tx_ring);
+ pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
+ adapter->tx_queue_len = netdev->tx_queue_len;
+ pch_gbe_start_receive(&adapter->hw);
+
+ mod_timer(&adapter->watchdog_timer, jiffies);
+
+ napi_enable(&adapter->napi);
+ pch_gbe_irq_enable(adapter);
+ netif_start_queue(adapter->netdev);
+
+ return 0;
+}
+
+/**
+ * pch_gbe_down - Down GbE network device
+ * @adapter: Board private structure
+ */
+void pch_gbe_down(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
+
+ /* signal that we're down so the interrupt handler does not
+ * reschedule our watchdog timer */
+ napi_disable(&adapter->napi);
+ atomic_set(&adapter->irq_sem, 0);
+
+ pch_gbe_irq_disable(adapter);
+ pch_gbe_free_irq(adapter);
+
+ del_timer_sync(&adapter->watchdog_timer);
+
+ netdev->tx_queue_len = adapter->tx_queue_len;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ pch_gbe_reset(adapter);
+ pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
+ pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
+
+ pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
+ rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
+ rx_ring->rx_buff_pool_logic = 0;
+ rx_ring->rx_buff_pool_size = 0;
+ rx_ring->rx_buff_pool = NULL;
+}
+
+/**
+ * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
+ * @adapter: Board private structure to initialize
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
+ hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+ /* Initialize the hardware-specific values */
+ if (pch_gbe_hal_setup_init_funcs(hw)) {
+ pr_err("Hardware Initialization Failure\n");
+ return -EIO;
+ }
+ if (pch_gbe_alloc_queues(adapter)) {
+ pr_err("Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+ spin_lock_init(&adapter->hw.miim_lock);
+ spin_lock_init(&adapter->tx_queue_lock);
+ spin_lock_init(&adapter->stats_lock);
+ spin_lock_init(&adapter->ethtool_lock);
+ atomic_set(&adapter->irq_sem, 0);
+ pch_gbe_irq_disable(adapter);
+
+ pch_gbe_init_stats(adapter);
+
+ pr_debug("rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n",
+ (u32) adapter->rx_buffer_len,
+ hw->mac.min_frame_size, hw->mac.max_frame_size);
+ return 0;
+}
+
+/**
+ * pch_gbe_open - Called when a network interface is made active
+ * @netdev: Network interface device structure
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_open(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ int err;
+
+ /* allocate transmit descriptors */
+ err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
+ if (err)
+ goto err_setup_tx;
+ /* allocate receive descriptors */
+ err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
+ if (err)
+ goto err_setup_rx;
+ pch_gbe_hal_power_up_phy(hw);
+ err = pch_gbe_up(adapter);
+ if (err)
+ goto err_up;
+ pr_debug("Success End\n");
+ return 0;
+
+err_up:
+ if (!adapter->wake_up_evt)
+ pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
+err_setup_rx:
+ pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
+err_setup_tx:
+ pch_gbe_reset(adapter);
+ pr_err("Error End\n");
+ return err;
+}
+
+/**
+ * pch_gbe_stop - Disables a network interface
+ * @netdev: Network interface device structure
+ * Returns
+ * 0: Successfully
+ */
+static int pch_gbe_stop(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ pch_gbe_down(adapter);
+ if (!adapter->wake_up_evt)
+ pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
+ pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
+ return 0;
+}
+
+/**
+ * pch_gbe_xmit_frame - Packet transmitting start
+ * @skb: Socket buffer structure
+ * @netdev: Network interface device structure
+ * Returns
+ * - NETDEV_TX_OK: Normal end
+ * - NETDEV_TX_BUSY: Error end
+ */
+static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
+ unsigned long flags;
+
+ if (unlikely(skb->len > (adapter->hw.mac.max_frame_size - 4))) {
+ pr_err("Transfer length Error: skb len: %d > max: %d\n",
+ skb->len, adapter->hw.mac.max_frame_size);
+ dev_kfree_skb_any(skb);
+ adapter->stats.tx_length_errors++;
+ return NETDEV_TX_OK;
+ }
+ if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
+ /* Collision - tell upper layer to requeue */
+ return NETDEV_TX_LOCKED;
+ }
+ if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
+ netif_stop_queue(netdev);
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+ pr_debug("Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
+ tx_ring->next_to_use, tx_ring->next_to_clean);
+ return NETDEV_TX_BUSY;
+ }
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+
+ /* CRC,ITAG no support */
+ pch_gbe_tx_queue(adapter, tx_ring, skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * pch_gbe_get_stats - Get System Network Statistics
+ * @netdev: Network interface device structure
+ * Returns: The current stats
+ */
+static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
+{
+ /* only return the current stats */
+ return &netdev->stats;
+}
+
+/**
+ * pch_gbe_set_multi - Multicast and Promiscuous mode set
+ * @netdev: Network interface device structure
+ */
+static void pch_gbe_set_multi(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u8 *mta_list;
+ u32 rctl;
+ int i;
+ int mc_count;
+
+ pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
+
+ /* Check for Promiscuous and All Multicast modes */
+ rctl = ioread32(&hw->reg->RX_MODE);
+ mc_count = netdev_mc_count(netdev);
+ if ((netdev->flags & IFF_PROMISC)) {
+ rctl &= ~PCH_GBE_ADD_FIL_EN;
+ rctl &= ~PCH_GBE_MLT_FIL_EN;
+ } else if ((netdev->flags & IFF_ALLMULTI)) {
+ /* all the multicasting receive permissions */
+ rctl |= PCH_GBE_ADD_FIL_EN;
+ rctl &= ~PCH_GBE_MLT_FIL_EN;
+ } else {
+ if (mc_count >= PCH_GBE_MAR_ENTRIES) {
+ /* all the multicasting receive permissions */
+ rctl |= PCH_GBE_ADD_FIL_EN;
+ rctl &= ~PCH_GBE_MLT_FIL_EN;
+ } else {
+ rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
+ }
+ }
+ iowrite32(rctl, &hw->reg->RX_MODE);
+
+ if (mc_count >= PCH_GBE_MAR_ENTRIES)
+ return;
+ mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
+ if (!mta_list)
+ return;
+
+ /* The shared function expects a packed array of only addresses. */
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (i == mc_count)
+ break;
+ memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
+ }
+ pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
+ PCH_GBE_MAR_ENTRIES);
+ kfree(mta_list);
+
+ pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
+ ioread32(&hw->reg->RX_MODE), mc_count);
+}
+
+/**
+ * pch_gbe_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: Network interface device structure
+ * @addr: Pointer to an address structure
+ * Returns
+ * 0: Successfully
+ * -EADDRNOTAVAIL: Failed
+ */
+static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *skaddr = addr;
+ int ret_val;
+
+ if (!is_valid_ether_addr(skaddr->sa_data)) {
+ ret_val = -EADDRNOTAVAIL;
+ } else {
+ memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
+ memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
+ pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+ ret_val = 0;
+ }
+ pr_debug("ret_val : 0x%08x\n", ret_val);
+ pr_debug("dev_addr : %pM\n", netdev->dev_addr);
+ pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
+ pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
+ ioread32(&adapter->hw.reg->mac_adr[0].high),
+ ioread32(&adapter->hw.reg->mac_adr[0].low));
+ return ret_val;
+}
+
+/**
+ * pch_gbe_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: Network interface device structure
+ * @new_mtu: New value for maximum frame size
+ * Returns
+ * 0: Successfully
+ * -EINVAL: Failed
+ */
+static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ int max_frame;
+ unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
+ int err;
+
+ max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+ if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
+ (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
+ pr_err("Invalid MTU setting\n");
+ return -EINVAL;
+ }
+ if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
+ adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
+ else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
+ adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
+ else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
+ adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
+ else
+ adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
+
+ if (netif_running(netdev)) {
+ pch_gbe_down(adapter);
+ err = pch_gbe_up(adapter);
+ if (err) {
+ adapter->rx_buffer_len = old_rx_buffer_len;
+ pch_gbe_up(adapter);
+ return -ENOMEM;
+ } else {
+ netdev->mtu = new_mtu;
+ adapter->hw.mac.max_frame_size = max_frame;
+ }
+ } else {
+ pch_gbe_reset(adapter);
+ netdev->mtu = new_mtu;
+ adapter->hw.mac.max_frame_size = max_frame;
+ }
+
+ pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
+ max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
+ adapter->hw.mac.max_frame_size);
+ return 0;
+}
+
+/**
+ * pch_gbe_set_features - Reset device after features changed
+ * @netdev: Network interface device structure
+ * @features: New features
+ * Returns
+ * 0: HW state updated successfully
+ */
+static int pch_gbe_set_features(struct net_device *netdev, u32 features)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ u32 changed = features ^ netdev->features;
+
+ if (!(changed & NETIF_F_RXCSUM))
+ return 0;
+
+ if (netif_running(netdev))
+ pch_gbe_reinit_locked(adapter);
+ else
+ pch_gbe_reset(adapter);
+
+ return 0;
+}
+
+/**
+ * pch_gbe_ioctl - Controls register through a MII interface
+ * @netdev: Network interface device structure
+ * @ifr: Pointer to ifr structure
+ * @cmd: Control command
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ pr_debug("cmd : 0x%04x\n", cmd);
+
+ return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
+}
+
+/**
+ * pch_gbe_tx_timeout - Respond to a Tx Hang
+ * @netdev: Network interface device structure
+ */
+static void pch_gbe_tx_timeout(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ adapter->stats.tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+}
+
+/**
+ * pch_gbe_napi_poll - NAPI receive and transfer polling callback
+ * @napi: Pointer of polling device struct
+ * @budget: The maximum number of a packet
+ * Returns
+ * false: Exit the polling mode
+ * true: Continue the polling mode
+ */
+static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct pch_gbe_adapter *adapter =
+ container_of(napi, struct pch_gbe_adapter, napi);
+ int work_done = 0;
+ bool poll_end_flag = false;
+ bool cleaned = false;
+ u32 int_en;
+
+ pr_debug("budget : %d\n", budget);
+
+ pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
+ cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
+
+ if (!cleaned)
+ work_done = budget;
+ /* If no Tx and not enough Rx work done,
+ * exit the polling mode
+ */
+ if (work_done < budget)
+ poll_end_flag = true;
+
+ if (poll_end_flag) {
+ napi_complete(napi);
+ if (adapter->rx_stop_flag) {
+ adapter->rx_stop_flag = false;
+ pch_gbe_start_receive(&adapter->hw);
+ }
+ pch_gbe_irq_enable(adapter);
+ } else
+ if (adapter->rx_stop_flag) {
+ adapter->rx_stop_flag = false;
+ pch_gbe_start_receive(&adapter->hw);
+ int_en = ioread32(&adapter->hw.reg->INT_EN);
+ iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
+ &adapter->hw.reg->INT_EN);
+ }
+
+ pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
+ poll_end_flag, work_done, budget);
+
+ return work_done;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * pch_gbe_netpoll - Used by things like netconsole to send skbs
+ * @netdev: Network interface device structure
+ */
+static void pch_gbe_netpoll(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ disable_irq(adapter->pdev->irq);
+ pch_gbe_intr(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+}
+#endif
+
+static const struct net_device_ops pch_gbe_netdev_ops = {
+ .ndo_open = pch_gbe_open,
+ .ndo_stop = pch_gbe_stop,
+ .ndo_start_xmit = pch_gbe_xmit_frame,
+ .ndo_get_stats = pch_gbe_get_stats,
+ .ndo_set_mac_address = pch_gbe_set_mac,
+ .ndo_tx_timeout = pch_gbe_tx_timeout,
+ .ndo_change_mtu = pch_gbe_change_mtu,
+ .ndo_set_features = pch_gbe_set_features,
+ .ndo_do_ioctl = pch_gbe_ioctl,
+ .ndo_set_rx_mode = pch_gbe_set_multi,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = pch_gbe_netpoll,
+#endif
+};
+
+static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+ if (netif_running(netdev))
+ pch_gbe_down(adapter);
+ pci_disable_device(pdev);
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ if (pci_enable_device(pdev)) {
+ pr_err("Cannot re-enable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pch_gbe_hal_power_up_phy(hw);
+ pch_gbe_reset(adapter);
+ /* Clear wake up status */
+ pch_gbe_mac_set_wol_event(hw, 0);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void pch_gbe_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev)) {
+ if (pch_gbe_up(adapter)) {
+ pr_debug("can't bring device back up after reset\n");
+ return;
+ }
+ }
+ netif_device_attach(netdev);
+}
+
+static int __pch_gbe_suspend(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 wufc = adapter->wake_up_evt;
+ int retval = 0;
+
+ netif_device_detach(netdev);
+ if (netif_running(netdev))
+ pch_gbe_down(adapter);
+ if (wufc) {
+ pch_gbe_set_multi(netdev);
+ pch_gbe_setup_rctl(adapter);
+ pch_gbe_configure_rx(adapter);
+ pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
+ hw->mac.link_duplex);
+ pch_gbe_set_mode(adapter, hw->mac.link_speed,
+ hw->mac.link_duplex);
+ pch_gbe_mac_set_wol_event(hw, wufc);
+ pci_disable_device(pdev);
+ } else {
+ pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_mac_set_wol_event(hw, wufc);
+ pci_disable_device(pdev);
+ }
+ return retval;
+}
+
+#ifdef CONFIG_PM
+static int pch_gbe_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+
+ return __pch_gbe_suspend(pdev);
+}
+
+static int pch_gbe_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ pr_err("Cannot enable PCI device from suspend\n");
+ return err;
+ }
+ pci_set_master(pdev);
+ pch_gbe_hal_power_up_phy(hw);
+ pch_gbe_reset(adapter);
+ /* Clear wake on lan control and status */
+ pch_gbe_mac_set_wol_event(hw, 0);
+
+ if (netif_running(netdev))
+ pch_gbe_up(adapter);
+ netif_device_attach(netdev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static void pch_gbe_shutdown(struct pci_dev *pdev)
+{
+ __pch_gbe_suspend(pdev);
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, true);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+static void pch_gbe_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ cancel_work_sync(&adapter->reset_task);
+ unregister_netdev(netdev);
+
+ pch_gbe_hal_phy_hw_reset(&adapter->hw);
+
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+
+ iounmap(adapter->hw.reg);
+ pci_release_regions(pdev);
+ free_netdev(netdev);
+ pci_disable_device(pdev);
+}
+
+static int pch_gbe_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
+{
+ struct net_device *netdev;
+ struct pch_gbe_adapter *adapter;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ ret = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "ERR: No usable DMA "
+ "configuration, aborting\n");
+ goto err_disable_device;
+ }
+ }
+ }
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "ERR: Can't reserve PCI I/O and memory resources\n");
+ goto err_disable_device;
+ }
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
+ if (!netdev) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev,
+ "ERR: Can't allocate and set up an Ethernet device\n");
+ goto err_release_pci;
+ }
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->hw.back = adapter;
+ adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
+ if (!adapter->hw.reg) {
+ ret = -EIO;
+ dev_err(&pdev->dev, "Can't ioremap\n");
+ goto err_free_netdev;
+ }
+
+ netdev->netdev_ops = &pch_gbe_netdev_ops;
+ netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
+ netif_napi_add(netdev, &adapter->napi,
+ pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
+ netdev->hw_features = NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->features = netdev->hw_features;
+ pch_gbe_set_ethtool_ops(netdev);
+
+ pch_gbe_mac_load_mac_addr(&adapter->hw);
+ pch_gbe_mac_reset_hw(&adapter->hw);
+
+ /* setup the private structure */
+ ret = pch_gbe_sw_init(adapter);
+ if (ret)
+ goto err_iounmap;
+
+ /* Initialize PHY */
+ ret = pch_gbe_init_phy(adapter);
+ if (ret) {
+ dev_err(&pdev->dev, "PHY initialize error\n");
+ goto err_free_adapter;
+ }
+ pch_gbe_hal_get_bus_info(&adapter->hw);
+
+ /* Read the MAC address. and store to the private data */
+ ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
+ if (ret) {
+ dev_err(&pdev->dev, "MAC address Read Error\n");
+ goto err_free_adapter;
+ }
+
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ dev_err(&pdev->dev, "Invalid MAC Address\n");
+ ret = -EIO;
+ goto err_free_adapter;
+ }
+ setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
+ (unsigned long)adapter);
+
+ INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
+
+ pch_gbe_check_options(adapter);
+
+ /* initialize the wol settings based on the eeprom settings */
+ adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
+ dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
+
+ /* reset the hardware with the new settings */
+ pch_gbe_reset(adapter);
+
+ ret = register_netdev(netdev);
+ if (ret)
+ goto err_free_adapter;
+ /* tell the stack to leave us alone until pch_gbe_open() is called */
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ dev_dbg(&pdev->dev, "OKIsemi(R) PCH Network Connection\n");
+
+ device_set_wakeup_enable(&pdev->dev, 1);
+ return 0;
+
+err_free_adapter:
+ pch_gbe_hal_phy_hw_reset(&adapter->hw);
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+err_iounmap:
+ iounmap(adapter->hw.reg);
+err_free_netdev:
+ free_netdev(netdev);
+err_release_pci:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
+ {.vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+ .class_mask = (0xFFFF00)
+ },
+ {.vendor = PCI_VENDOR_ID_ROHM,
+ .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+ .class_mask = (0xFFFF00)
+ },
+ {.vendor = PCI_VENDOR_ID_ROHM,
+ .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+ .class_mask = (0xFFFF00)
+ },
+ /* required last entry */
+ {0}
+};
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops pch_gbe_pm_ops = {
+ .suspend = pch_gbe_suspend,
+ .resume = pch_gbe_resume,
+ .freeze = pch_gbe_suspend,
+ .thaw = pch_gbe_resume,
+ .poweroff = pch_gbe_suspend,
+ .restore = pch_gbe_resume,
+};
+#endif
+
+static struct pci_error_handlers pch_gbe_err_handler = {
+ .error_detected = pch_gbe_io_error_detected,
+ .slot_reset = pch_gbe_io_slot_reset,
+ .resume = pch_gbe_io_resume
+};
+
+static struct pci_driver pch_gbe_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = pch_gbe_pcidev_id,
+ .probe = pch_gbe_probe,
+ .remove = pch_gbe_remove,
+#ifdef CONFIG_PM
+ .driver.pm = &pch_gbe_pm_ops,
+#endif
+ .shutdown = pch_gbe_shutdown,
+ .err_handler = &pch_gbe_err_handler
+};
+
+
+static int __init pch_gbe_init_module(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&pch_gbe_driver);
+ if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
+ if (copybreak == 0) {
+ pr_info("copybreak disabled\n");
+ } else {
+ pr_info("copybreak enabled for packets <= %u bytes\n",
+ copybreak);
+ }
+ }
+ return ret;
+}
+
+static void __exit pch_gbe_exit_module(void)
+{
+ pci_unregister_driver(&pch_gbe_driver);
+}
+
+module_init(pch_gbe_init_module);
+module_exit(pch_gbe_exit_module);
+
+MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
+MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
+
+module_param(copybreak, uint, 0644);
+MODULE_PARM_DESC(copybreak,
+ "Maximum size of packet that is copied to a new buffer on receive");
+
+/* pch_gbe_main.c */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
new file mode 100644
index 000000000000..5b5d90a47e29
--- /dev/null
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This code was derived from the Intel e1000e Linux driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include "pch_gbe.h"
+
+#define OPTION_UNSET -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED 1
+
+/**
+ * TxDescriptors - Transmit Descriptor Count
+ * @Valid Range: PCH_GBE_MIN_TXD - PCH_GBE_MAX_TXD
+ * @Default Value: PCH_GBE_DEFAULT_TXD
+ */
+static int TxDescriptors = OPTION_UNSET;
+module_param(TxDescriptors, int, 0);
+MODULE_PARM_DESC(TxDescriptors, "Number of transmit descriptors");
+
+/**
+ * RxDescriptors -Receive Descriptor Count
+ * @Valid Range: PCH_GBE_MIN_RXD - PCH_GBE_MAX_RXD
+ * @Default Value: PCH_GBE_DEFAULT_RXD
+ */
+static int RxDescriptors = OPTION_UNSET;
+module_param(RxDescriptors, int, 0);
+MODULE_PARM_DESC(RxDescriptors, "Number of receive descriptors");
+
+/**
+ * Speed - User Specified Speed Override
+ * @Valid Range: 0, 10, 100, 1000
+ * - 0: auto-negotiate at all supported speeds
+ * - 10: only link at 10 Mbps
+ * - 100: only link at 100 Mbps
+ * - 1000: only link at 1000 Mbps
+ * @Default Value: 0
+ */
+static int Speed = OPTION_UNSET;
+module_param(Speed, int, 0);
+MODULE_PARM_DESC(Speed, "Speed setting");
+
+/**
+ * Duplex - User Specified Duplex Override
+ * @Valid Range: 0-2
+ * - 0: auto-negotiate for duplex
+ * - 1: only link at half duplex
+ * - 2: only link at full duplex
+ * @Default Value: 0
+ */
+static int Duplex = OPTION_UNSET;
+module_param(Duplex, int, 0);
+MODULE_PARM_DESC(Duplex, "Duplex setting");
+
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+/**
+ * AutoNeg - Auto-negotiation Advertisement Override
+ * @Valid Range: 0x01-0x0F, 0x20-0x2F
+ *
+ * The AutoNeg value is a bit mask describing which speed and duplex
+ * combinations should be advertised during auto-negotiation.
+ * The supported speed and duplex modes are listed below
+ *
+ * Bit 7 6 5 4 3 2 1 0
+ * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10
+ * Duplex Full Full Half Full Half
+ *
+ * @Default Value: 0x2F (copper)
+ */
+static int AutoNeg = OPTION_UNSET;
+module_param(AutoNeg, int, 0);
+MODULE_PARM_DESC(AutoNeg, "Advertised auto-negotiation setting");
+
+#define PHY_ADVERTISE_10_HALF 0x0001
+#define PHY_ADVERTISE_10_FULL 0x0002
+#define PHY_ADVERTISE_100_HALF 0x0004
+#define PHY_ADVERTISE_100_FULL 0x0008
+#define PHY_ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
+#define PHY_ADVERTISE_1000_FULL 0x0020
+#define PCH_AUTONEG_ADVERTISE_DEFAULT 0x2F
+
+/**
+ * FlowControl - User Specified Flow Control Override
+ * @Valid Range: 0-3
+ * - 0: No Flow Control
+ * - 1: Rx only, respond to PAUSE frames but do not generate them
+ * - 2: Tx only, generate PAUSE frames but ignore them on receive
+ * - 3: Full Flow Control Support
+ * @Default Value: Read flow control settings from the EEPROM
+ */
+static int FlowControl = OPTION_UNSET;
+module_param(FlowControl, int, 0);
+MODULE_PARM_DESC(FlowControl, "Flow Control setting");
+
+/*
+ * XsumRX - Receive Checksum Offload Enable/Disable
+ * @Valid Range: 0, 1
+ * - 0: disables all checksum offload
+ * - 1: enables receive IP/TCP/UDP checksum offload
+ * @Default Value: PCH_GBE_DEFAULT_RX_CSUM
+ */
+static int XsumRX = OPTION_UNSET;
+module_param(XsumRX, int, 0);
+MODULE_PARM_DESC(XsumRX, "Disable or enable Receive Checksum offload");
+
+#define PCH_GBE_DEFAULT_RX_CSUM true /* trueorfalse */
+
+/*
+ * XsumTX - Transmit Checksum Offload Enable/Disable
+ * @Valid Range: 0, 1
+ * - 0: disables all checksum offload
+ * - 1: enables transmit IP/TCP/UDP checksum offload
+ * @Default Value: PCH_GBE_DEFAULT_TX_CSUM
+ */
+static int XsumTX = OPTION_UNSET;
+module_param(XsumTX, int, 0);
+MODULE_PARM_DESC(XsumTX, "Disable or enable Transmit Checksum offload");
+
+#define PCH_GBE_DEFAULT_TX_CSUM true /* trueorfalse */
+
+/**
+ * pch_gbe_option - Force the MAC's flow control settings
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+struct pch_gbe_option {
+ enum { enable_option, range_option, list_option } type;
+ char *name;
+ char *err;
+ int def;
+ union {
+ struct { /* range_option info */
+ int min;
+ int max;
+ } r;
+ struct { /* list_option info */
+ int nr;
+ const struct pch_gbe_opt_list { int i; char *str; } *p;
+ } l;
+ } arg;
+};
+
+static const struct pch_gbe_opt_list speed_list[] = {
+ { 0, "" },
+ { SPEED_10, "" },
+ { SPEED_100, "" },
+ { SPEED_1000, "" }
+};
+
+static const struct pch_gbe_opt_list dplx_list[] = {
+ { 0, "" },
+ { HALF_DUPLEX, "" },
+ { FULL_DUPLEX, "" }
+};
+
+static const struct pch_gbe_opt_list an_list[] =
+ #define AA "AutoNeg advertising "
+ {{ 0x01, AA "10/HD" },
+ { 0x02, AA "10/FD" },
+ { 0x03, AA "10/FD, 10/HD" },
+ { 0x04, AA "100/HD" },
+ { 0x05, AA "100/HD, 10/HD" },
+ { 0x06, AA "100/HD, 10/FD" },
+ { 0x07, AA "100/HD, 10/FD, 10/HD" },
+ { 0x08, AA "100/FD" },
+ { 0x09, AA "100/FD, 10/HD" },
+ { 0x0a, AA "100/FD, 10/FD" },
+ { 0x0b, AA "100/FD, 10/FD, 10/HD" },
+ { 0x0c, AA "100/FD, 100/HD" },
+ { 0x0d, AA "100/FD, 100/HD, 10/HD" },
+ { 0x0e, AA "100/FD, 100/HD, 10/FD" },
+ { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" },
+ { 0x20, AA "1000/FD" },
+ { 0x21, AA "1000/FD, 10/HD" },
+ { 0x22, AA "1000/FD, 10/FD" },
+ { 0x23, AA "1000/FD, 10/FD, 10/HD" },
+ { 0x24, AA "1000/FD, 100/HD" },
+ { 0x25, AA "1000/FD, 100/HD, 10/HD" },
+ { 0x26, AA "1000/FD, 100/HD, 10/FD" },
+ { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" },
+ { 0x28, AA "1000/FD, 100/FD" },
+ { 0x29, AA "1000/FD, 100/FD, 10/HD" },
+ { 0x2a, AA "1000/FD, 100/FD, 10/FD" },
+ { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" },
+ { 0x2c, AA "1000/FD, 100/FD, 100/HD" },
+ { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" },
+ { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
+ { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }
+};
+
+static const struct pch_gbe_opt_list fc_list[] = {
+ { PCH_GBE_FC_NONE, "Flow Control Disabled" },
+ { PCH_GBE_FC_RX_PAUSE, "Flow Control Receive Only" },
+ { PCH_GBE_FC_TX_PAUSE, "Flow Control Transmit Only" },
+ { PCH_GBE_FC_FULL, "Flow Control Enabled" }
+};
+
+/**
+ * pch_gbe_validate_option - Validate option
+ * @value: value
+ * @opt: option
+ * @adapter: Board private structure
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+static int pch_gbe_validate_option(int *value,
+ const struct pch_gbe_option *opt,
+ struct pch_gbe_adapter *adapter)
+{
+ if (*value == OPTION_UNSET) {
+ *value = opt->def;
+ return 0;
+ }
+
+ switch (opt->type) {
+ case enable_option:
+ switch (*value) {
+ case OPTION_ENABLED:
+ pr_debug("%s Enabled\n", opt->name);
+ return 0;
+ case OPTION_DISABLED:
+ pr_debug("%s Disabled\n", opt->name);
+ return 0;
+ }
+ break;
+ case range_option:
+ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+ pr_debug("%s set to %i\n", opt->name, *value);
+ return 0;
+ }
+ break;
+ case list_option: {
+ int i;
+ const struct pch_gbe_opt_list *ent;
+
+ for (i = 0; i < opt->arg.l.nr; i++) {
+ ent = &opt->arg.l.p[i];
+ if (*value == ent->i) {
+ if (ent->str[0] != '\0')
+ pr_debug("%s\n", ent->str);
+ return 0;
+ }
+ }
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ pr_debug("Invalid %s value specified (%i) %s\n",
+ opt->name, *value, opt->err);
+ *value = opt->def;
+ return -1;
+}
+
+/**
+ * pch_gbe_check_copper_options - Range Checking for Link Options, Copper Version
+ * @adapter: Board private structure
+ */
+static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ int speed, dplx;
+
+ { /* Speed */
+ static const struct pch_gbe_option opt = {
+ .type = list_option,
+ .name = "Speed",
+ .err = "parameter ignored",
+ .def = 0,
+ .arg = { .l = { .nr = (int)ARRAY_SIZE(speed_list),
+ .p = speed_list } }
+ };
+ speed = Speed;
+ pch_gbe_validate_option(&speed, &opt, adapter);
+ }
+ { /* Duplex */
+ static const struct pch_gbe_option opt = {
+ .type = list_option,
+ .name = "Duplex",
+ .err = "parameter ignored",
+ .def = 0,
+ .arg = { .l = { .nr = (int)ARRAY_SIZE(dplx_list),
+ .p = dplx_list } }
+ };
+ dplx = Duplex;
+ pch_gbe_validate_option(&dplx, &opt, adapter);
+ }
+
+ { /* Autoneg */
+ static const struct pch_gbe_option opt = {
+ .type = list_option,
+ .name = "AutoNeg",
+ .err = "parameter ignored",
+ .def = PCH_AUTONEG_ADVERTISE_DEFAULT,
+ .arg = { .l = { .nr = (int)ARRAY_SIZE(an_list),
+ .p = an_list} }
+ };
+ if (speed || dplx) {
+ pr_debug("AutoNeg specified along with Speed or Duplex, AutoNeg parameter ignored\n");
+ hw->phy.autoneg_advertised = opt.def;
+ } else {
+ hw->phy.autoneg_advertised = AutoNeg;
+ pch_gbe_validate_option(
+ (int *)(&hw->phy.autoneg_advertised),
+ &opt, adapter);
+ }
+ }
+
+ switch (speed + dplx) {
+ case 0:
+ hw->mac.autoneg = hw->mac.fc_autoneg = 1;
+ if ((speed || dplx))
+ pr_debug("Speed and duplex autonegotiation enabled\n");
+ hw->mac.link_speed = SPEED_10;
+ hw->mac.link_duplex = DUPLEX_HALF;
+ break;
+ case HALF_DUPLEX:
+ pr_debug("Half Duplex specified without Speed\n");
+ pr_debug("Using Autonegotiation at Half Duplex only\n");
+ hw->mac.autoneg = hw->mac.fc_autoneg = 1;
+ hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF |
+ PHY_ADVERTISE_100_HALF;
+ hw->mac.link_speed = SPEED_10;
+ hw->mac.link_duplex = DUPLEX_HALF;
+ break;
+ case FULL_DUPLEX:
+ pr_debug("Full Duplex specified without Speed\n");
+ pr_debug("Using Autonegotiation at Full Duplex only\n");
+ hw->mac.autoneg = hw->mac.fc_autoneg = 1;
+ hw->phy.autoneg_advertised = PHY_ADVERTISE_10_FULL |
+ PHY_ADVERTISE_100_FULL |
+ PHY_ADVERTISE_1000_FULL;
+ hw->mac.link_speed = SPEED_10;
+ hw->mac.link_duplex = DUPLEX_FULL;
+ break;
+ case SPEED_10:
+ pr_debug("10 Mbps Speed specified without Duplex\n");
+ pr_debug("Using Autonegotiation at 10 Mbps only\n");
+ hw->mac.autoneg = hw->mac.fc_autoneg = 1;
+ hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF |
+ PHY_ADVERTISE_10_FULL;
+ hw->mac.link_speed = SPEED_10;
+ hw->mac.link_duplex = DUPLEX_HALF;
+ break;
+ case SPEED_10 + HALF_DUPLEX:
+ pr_debug("Forcing to 10 Mbps Half Duplex\n");
+ hw->mac.autoneg = hw->mac.fc_autoneg = 0;
+ hw->phy.autoneg_advertised = 0;
+ hw->mac.link_speed = SPEED_10;
+ hw->mac.link_duplex = DUPLEX_HALF;
+ break;
+ case SPEED_10 + FULL_DUPLEX:
+ pr_debug("Forcing to 10 Mbps Full Duplex\n");
+ hw->mac.autoneg = hw->mac.fc_autoneg = 0;
+ hw->phy.autoneg_advertised = 0;
+ hw->mac.link_speed = SPEED_10;
+ hw->mac.link_duplex = DUPLEX_FULL;
+ break;
+ case SPEED_100:
+ pr_debug("100 Mbps Speed specified without Duplex\n");
+ pr_debug("Using Autonegotiation at 100 Mbps only\n");
+ hw->mac.autoneg = hw->mac.fc_autoneg = 1;
+ hw->phy.autoneg_advertised = PHY_ADVERTISE_100_HALF |
+ PHY_ADVERTISE_100_FULL;
+ hw->mac.link_speed = SPEED_100;
+ hw->mac.link_duplex = DUPLEX_HALF;
+ break;
+ case SPEED_100 + HALF_DUPLEX:
+ pr_debug("Forcing to 100 Mbps Half Duplex\n");
+ hw->mac.autoneg = hw->mac.fc_autoneg = 0;
+ hw->phy.autoneg_advertised = 0;
+ hw->mac.link_speed = SPEED_100;
+ hw->mac.link_duplex = DUPLEX_HALF;
+ break;
+ case SPEED_100 + FULL_DUPLEX:
+ pr_debug("Forcing to 100 Mbps Full Duplex\n");
+ hw->mac.autoneg = hw->mac.fc_autoneg = 0;
+ hw->phy.autoneg_advertised = 0;
+ hw->mac.link_speed = SPEED_100;
+ hw->mac.link_duplex = DUPLEX_FULL;
+ break;
+ case SPEED_1000:
+ pr_debug("1000 Mbps Speed specified without Duplex\n");
+ goto full_duplex_only;
+ case SPEED_1000 + HALF_DUPLEX:
+ pr_debug("Half Duplex is not supported at 1000 Mbps\n");
+ /* fall through */
+ case SPEED_1000 + FULL_DUPLEX:
+full_duplex_only:
+ pr_debug("Using Autonegotiation at 1000 Mbps Full Duplex only\n");
+ hw->mac.autoneg = hw->mac.fc_autoneg = 1;
+ hw->phy.autoneg_advertised = PHY_ADVERTISE_1000_FULL;
+ hw->mac.link_speed = SPEED_1000;
+ hw->mac.link_duplex = DUPLEX_FULL;
+ break;
+ default:
+ BUG();
+ }
+}
+
+/**
+ * pch_gbe_check_options - Range Checking for Command Line Parameters
+ * @adapter: Board private structure
+ */
+void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct net_device *dev = adapter->netdev;
+ int val;
+
+ { /* Transmit Descriptor Count */
+ static const struct pch_gbe_option opt = {
+ .type = range_option,
+ .name = "Transmit Descriptors",
+ .err = "using default of "
+ __MODULE_STRING(PCH_GBE_DEFAULT_TXD),
+ .def = PCH_GBE_DEFAULT_TXD,
+ .arg = { .r = { .min = PCH_GBE_MIN_TXD,
+ .max = PCH_GBE_MAX_TXD } }
+ };
+ struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
+ tx_ring->count = TxDescriptors;
+ pch_gbe_validate_option(&tx_ring->count, &opt, adapter);
+ tx_ring->count = roundup(tx_ring->count,
+ PCH_GBE_TX_DESC_MULTIPLE);
+ }
+ { /* Receive Descriptor Count */
+ static const struct pch_gbe_option opt = {
+ .type = range_option,
+ .name = "Receive Descriptors",
+ .err = "using default of "
+ __MODULE_STRING(PCH_GBE_DEFAULT_RXD),
+ .def = PCH_GBE_DEFAULT_RXD,
+ .arg = { .r = { .min = PCH_GBE_MIN_RXD,
+ .max = PCH_GBE_MAX_RXD } }
+ };
+ struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
+ rx_ring->count = RxDescriptors;
+ pch_gbe_validate_option(&rx_ring->count, &opt, adapter);
+ rx_ring->count = roundup(rx_ring->count,
+ PCH_GBE_RX_DESC_MULTIPLE);
+ }
+ { /* Checksum Offload Enable/Disable */
+ static const struct pch_gbe_option opt = {
+ .type = enable_option,
+ .name = "Checksum Offload",
+ .err = "defaulting to Enabled",
+ .def = PCH_GBE_DEFAULT_RX_CSUM
+ };
+ val = XsumRX;
+ pch_gbe_validate_option(&val, &opt, adapter);
+ if (!val)
+ dev->features &= ~NETIF_F_RXCSUM;
+ }
+ { /* Checksum Offload Enable/Disable */
+ static const struct pch_gbe_option opt = {
+ .type = enable_option,
+ .name = "Checksum Offload",
+ .err = "defaulting to Enabled",
+ .def = PCH_GBE_DEFAULT_TX_CSUM
+ };
+ val = XsumTX;
+ pch_gbe_validate_option(&val, &opt, adapter);
+ if (!val)
+ dev->features &= ~NETIF_F_ALL_CSUM;
+ }
+ { /* Flow Control */
+ static const struct pch_gbe_option opt = {
+ .type = list_option,
+ .name = "Flow Control",
+ .err = "reading default settings from EEPROM",
+ .def = PCH_GBE_FC_DEFAULT,
+ .arg = { .l = { .nr = (int)ARRAY_SIZE(fc_list),
+ .p = fc_list } }
+ };
+ hw->mac.fc = FlowControl;
+ pch_gbe_validate_option((int *)(&hw->mac.fc),
+ &opt, adapter);
+ }
+
+ pch_gbe_check_copper_options(adapter);
+}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
new file mode 100644
index 000000000000..28bb9603d736
--- /dev/null
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This code was derived from the Intel e1000e Linux driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include "pch_gbe.h"
+#include "pch_gbe_phy.h"
+
+#define PHY_MAX_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CONTROL 0x00 /* Control Register */
+#define PHY_STATUS 0x01 /* Status Regiser */
+#define PHY_ID1 0x02 /* Phy Id Register (word 1) */
+#define PHY_ID2 0x03 /* Phy Id Register (word 2) */
+#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Register */
+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */
+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
+#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Register */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Register */
+#define PHY_EXT_STATUS 0x0F /* Extended Status Register */
+#define PHY_PHYSP_CONTROL 0x10 /* PHY Specific Control Register */
+#define PHY_EXT_PHYSP_CONTROL 0x14 /* Extended PHY Specific Control Register */
+#define PHY_LED_CONTROL 0x18 /* LED Control Register */
+#define PHY_EXT_PHYSP_STATUS 0x1B /* Extended PHY Specific Status Register */
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
+#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN 0x0800 /* Power down */
+#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_1000 0x0040
+#define MII_CR_SPEED_100 0x2000
+#define MII_CR_SPEED_10 0x0000
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
+#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
+
+/* Phy Id Register (word 2) */
+#define PHY_REVISION_MASK 0x000F
+
+/* PHY Specific Control Register */
+#define PHYSP_CTRL_ASSERT_CRS_TX 0x0800
+
+
+/* Default value of PHY register */
+#define PHY_CONTROL_DEFAULT 0x1140 /* Control Register */
+#define PHY_AUTONEG_ADV_DEFAULT 0x01e0 /* Autoneg Advertisement */
+#define PHY_NEXT_PAGE_TX_DEFAULT 0x2001 /* Next Page TX */
+#define PHY_1000T_CTRL_DEFAULT 0x0300 /* 1000Base-T Control Register */
+#define PHY_PHYSP_CONTROL_DEFAULT 0x01EE /* PHY Specific Control Register */
+
+/**
+ * pch_gbe_phy_get_id - Retrieve the PHY ID and revision
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw)
+{
+ struct pch_gbe_phy_info *phy = &hw->phy;
+ s32 ret;
+ u16 phy_id1;
+ u16 phy_id2;
+
+ ret = pch_gbe_phy_read_reg_miic(hw, PHY_ID1, &phy_id1);
+ if (ret)
+ return ret;
+ ret = pch_gbe_phy_read_reg_miic(hw, PHY_ID2, &phy_id2);
+ if (ret)
+ return ret;
+ /*
+ * PHY_ID1: [bit15-0:ID(21-6)]
+ * PHY_ID2: [bit15-10:ID(5-0)][bit9-4:Model][bit3-0:revision]
+ */
+ phy->id = (u32)phy_id1;
+ phy->id = ((phy->id << 6) | ((phy_id2 & 0xFC00) >> 10));
+ phy->revision = (u32) (phy_id2 & 0x000F);
+ pr_debug("phy->id : 0x%08x phy->revision : 0x%08x\n",
+ phy->id, phy->revision);
+ return 0;
+}
+
+/**
+ * pch_gbe_phy_read_reg_miic - Read MII control register
+ * @hw: Pointer to the HW structure
+ * @offset: Register offset to be read
+ * @data: Pointer to the read data
+ * Returns
+ * 0: Successful.
+ * -EINVAL: Invalid argument.
+ */
+s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data)
+{
+ struct pch_gbe_phy_info *phy = &hw->phy;
+
+ if (offset > PHY_MAX_REG_ADDRESS) {
+ pr_err("PHY Address %d is out of range\n", offset);
+ return -EINVAL;
+ }
+ *data = pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_READ,
+ offset, (u16)0);
+ return 0;
+}
+
+/**
+ * pch_gbe_phy_write_reg_miic - Write MII control register
+ * @hw: Pointer to the HW structure
+ * @offset: Register offset to be read
+ * @data: data to write to register at offset
+ * Returns
+ * 0: Successful.
+ * -EINVAL: Invalid argument.
+ */
+s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data)
+{
+ struct pch_gbe_phy_info *phy = &hw->phy;
+
+ if (offset > PHY_MAX_REG_ADDRESS) {
+ pr_err("PHY Address %d is out of range\n", offset);
+ return -EINVAL;
+ }
+ pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_WRITE,
+ offset, data);
+ return 0;
+}
+
+/**
+ * pch_gbe_phy_sw_reset - PHY software reset
+ * @hw: Pointer to the HW structure
+ */
+void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw)
+{
+ u16 phy_ctrl;
+
+ pch_gbe_phy_read_reg_miic(hw, PHY_CONTROL, &phy_ctrl);
+ phy_ctrl |= MII_CR_RESET;
+ pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, phy_ctrl);
+ udelay(1);
+}
+
+/**
+ * pch_gbe_phy_hw_reset - PHY hardware reset
+ * @hw: Pointer to the HW structure
+ */
+void pch_gbe_phy_hw_reset(struct pch_gbe_hw *hw)
+{
+ pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, PHY_CONTROL_DEFAULT);
+ pch_gbe_phy_write_reg_miic(hw, PHY_AUTONEG_ADV,
+ PHY_AUTONEG_ADV_DEFAULT);
+ pch_gbe_phy_write_reg_miic(hw, PHY_NEXT_PAGE_TX,
+ PHY_NEXT_PAGE_TX_DEFAULT);
+ pch_gbe_phy_write_reg_miic(hw, PHY_1000T_CTRL, PHY_1000T_CTRL_DEFAULT);
+ pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL,
+ PHY_PHYSP_CONTROL_DEFAULT);
+}
+
+/**
+ * pch_gbe_phy_power_up - restore link in case the phy was powered down
+ * @hw: Pointer to the HW structure
+ */
+void pch_gbe_phy_power_up(struct pch_gbe_hw *hw)
+{
+ u16 mii_reg;
+
+ mii_reg = 0;
+ /* Just clear the power down bit to wake the phy back up */
+ /* according to the manual, the phy will retain its
+ * settings across a power-down/up cycle */
+ pch_gbe_phy_read_reg_miic(hw, PHY_CONTROL, &mii_reg);
+ mii_reg &= ~MII_CR_POWER_DOWN;
+ pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, mii_reg);
+}
+
+/**
+ * pch_gbe_phy_power_down - Power down PHY
+ * @hw: Pointer to the HW structure
+ */
+void pch_gbe_phy_power_down(struct pch_gbe_hw *hw)
+{
+ u16 mii_reg;
+
+ mii_reg = 0;
+ /* Power down the PHY so no link is implied when interface is down *
+ * The PHY cannot be powered down if any of the following is TRUE *
+ * (a) WoL is enabled
+ * (b) AMT is active
+ */
+ pch_gbe_phy_read_reg_miic(hw, PHY_CONTROL, &mii_reg);
+ mii_reg |= MII_CR_POWER_DOWN;
+ pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, mii_reg);
+ mdelay(1);
+}
+
+/**
+ * pch_gbe_phy_set_rgmii - RGMII interface setting
+ * @hw: Pointer to the HW structure
+ */
+inline void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw)
+{
+ pch_gbe_phy_sw_reset(hw);
+}
+
+/**
+ * pch_gbe_phy_init_setting - PHY initial setting
+ * @hw: Pointer to the HW structure
+ */
+void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw)
+{
+ struct pch_gbe_adapter *adapter;
+ struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
+ int ret;
+ u16 mii_reg;
+
+ adapter = container_of(hw, struct pch_gbe_adapter, hw);
+ ret = mii_ethtool_gset(&adapter->mii, &cmd);
+ if (ret)
+ pr_err("Error: mii_ethtool_gset\n");
+
+ ethtool_cmd_speed_set(&cmd, hw->mac.link_speed);
+ cmd.duplex = hw->mac.link_duplex;
+ cmd.advertising = hw->phy.autoneg_advertised;
+ cmd.autoneg = hw->mac.autoneg;
+ pch_gbe_phy_write_reg_miic(hw, MII_BMCR, BMCR_RESET);
+ ret = mii_ethtool_sset(&adapter->mii, &cmd);
+ if (ret)
+ pr_err("Error: mii_ethtool_sset\n");
+
+ pch_gbe_phy_sw_reset(hw);
+
+ pch_gbe_phy_read_reg_miic(hw, PHY_PHYSP_CONTROL, &mii_reg);
+ mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX;
+ pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg);
+
+}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
new file mode 100644
index 000000000000..03264dc7b5ec
--- /dev/null
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This code was derived from the Intel e1000e Linux driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _PCH_GBE_PHY_H_
+#define _PCH_GBE_PHY_H_
+
+#define PCH_GBE_PHY_REGS_LEN 32
+#define PCH_GBE_PHY_RESET_DELAY_US 10
+#define PCH_GBE_MAC_IFOP_RGMII
+
+s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw);
+s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data);
+s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data);
+void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw);
+void pch_gbe_phy_hw_reset(struct pch_gbe_hw *hw);
+void pch_gbe_phy_power_up(struct pch_gbe_hw *hw);
+void pch_gbe_phy_power_down(struct pch_gbe_hw *hw);
+void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw);
+void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw);
+
+#endif /* _PCH_GBE_PHY_H_ */
diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig
new file mode 100644
index 000000000000..b97132d9dff0
--- /dev/null
+++ b/drivers/net/ethernet/packetengines/Kconfig
@@ -0,0 +1,47 @@
+#
+# Packet engine device configuration
+#
+
+config NET_PACKET_ENGINE
+ bool "Packet Engine devices"
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about packet engine devices. If you say Y, you will
+ be asked for your specific card in the following questions.
+
+if NET_PACKET_ENGINE
+
+config HAMACHI
+ tristate "Packet Engines Hamachi GNIC-II support"
+ depends on PCI
+ select NET_CORE
+ select MII
+ ---help---
+ If you have a Gigabit Ethernet card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module will be
+ called hamachi.
+
+config YELLOWFIN
+ tristate "Packet Engines Yellowfin Gigabit-NIC support (EXPERIMENTAL)"
+ depends on PCI && EXPERIMENTAL
+ select CRC32
+ ---help---
+ Say Y here if you have a Packet Engines G-NIC PCI Gigabit Ethernet
+ adapter or the SYM53C885 Ethernet controller. The Gigabit adapter is
+ used by the Beowulf Linux cluster project. See
+ <http://cesdis.gsfc.nasa.gov/linux/drivers/yellowfin.html> for more
+ information about this driver in particular and Beowulf in general.
+
+ To compile this driver as a module, choose M here: the module
+ will be called yellowfin. This is recommended.
+
+endif # NET_PACKET_ENGINE
diff --git a/drivers/net/ethernet/packetengines/Makefile b/drivers/net/ethernet/packetengines/Makefile
new file mode 100644
index 000000000000..995ccd077d0c
--- /dev/null
+++ b/drivers/net/ethernet/packetengines/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Packet Engine network device drivers.
+#
+
+obj-$(CONFIG_HAMACHI) += hamachi.o
+obj-$(CONFIG_YELLOWFIN) += yellowfin.o
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
new file mode 100644
index 000000000000..3458df3780b8
--- /dev/null
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -0,0 +1,1952 @@
+/* hamachi.c: A Packet Engines GNIC-II Gigabit Ethernet driver for Linux. */
+/*
+ Written 1998-2000 by Donald Becker.
+ Updates 2000 by Keith Underwood.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ This driver is for the Packet Engines GNIC-II PCI Gigabit Ethernet
+ adapter.
+
+ Support and updates available at
+ http://www.scyld.com/network/hamachi.html
+ [link no longer provides useful info -jgarzik]
+ or
+ http://www.parl.clemson.edu/~keithu/hamachi.html
+
+*/
+
+#define DRV_NAME "hamachi"
+#define DRV_VERSION "2.1"
+#define DRV_RELDATE "Sept 11, 2006"
+
+
+/* A few user-configurable values. */
+
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+#define final_version
+#define hamachi_debug debug
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 40;
+static int mtu;
+/* Default values selected by testing on a dual processor PIII-450 */
+/* These six interrupt control parameters may be set directly when loading the
+ * module, or through the rx_params and tx_params variables
+ */
+static int max_rx_latency = 0x11;
+static int max_rx_gap = 0x05;
+static int min_rx_pkt = 0x18;
+static int max_tx_latency = 0x00;
+static int max_tx_gap = 0x00;
+static int min_tx_pkt = 0x30;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ -Setting to > 1518 causes all frames to be copied
+ -Setting to 0 disables copies
+*/
+static int rx_copybreak;
+
+/* An override for the hardware detection of bus width.
+ Set to 1 to force 32 bit PCI bus detection. Set to 4 to force 64 bit.
+ Add 2 to disable parity detection.
+*/
+static int force32;
+
+
+/* Used to pass the media type, etc.
+ These exist for driver interoperability.
+ No media types are currently defined.
+ - The lower 4 bits are reserved for the media type.
+ - The next three bits may be set to one of the following:
+ 0x00000000 : Autodetect PCI bus
+ 0x00000010 : Force 32 bit PCI bus
+ 0x00000020 : Disable parity detection
+ 0x00000040 : Force 64 bit PCI bus
+ Default is autodetect
+ - The next bit can be used to force half-duplex. This is a bad
+ idea since no known implementations implement half-duplex, and,
+ in general, half-duplex for gigabit ethernet is a bad idea.
+ 0x00000080 : Force half-duplex
+ Default is full-duplex.
+ - In the original driver, the ninth bit could be used to force
+ full-duplex. Maintain that for compatibility
+ 0x00000200 : Force full-duplex
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+/* The Hamachi chipset supports 3 parameters each for Rx and Tx
+ * interruput management. Parameters will be loaded as specified into
+ * the TxIntControl and RxIntControl registers.
+ *
+ * The registers are arranged as follows:
+ * 23 - 16 15 - 8 7 - 0
+ * _________________________________
+ * | min_pkt | max_gap | max_latency |
+ * ---------------------------------
+ * min_pkt : The minimum number of packets processed between
+ * interrupts.
+ * max_gap : The maximum inter-packet gap in units of 8.192 us
+ * max_latency : The absolute time between interrupts in units of 8.192 us
+ *
+ */
+static int rx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings, except for
+ excessive memory usage */
+/* Empirically it appears that the Tx ring needs to be a little bigger
+ for these Gbit adapters or you get into an overrun condition really
+ easily. Also, things appear to work a bit better in back-to-back
+ configurations if the Rx ring is 8 times the size of the Tx ring
+*/
+#define TX_RING_SIZE 64
+#define RX_RING_SIZE 512
+#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct hamachi_desc)
+#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct hamachi_desc)
+
+/*
+ * Enable netdev_ioctl. Added interrupt coalescing parameter adjustment.
+ * 2/19/99 Pete Wyckoff <wyckoff@ca.sandia.gov>
+ */
+
+/* play with 64-bit addrlen; seems to be a teensy bit slower --pw */
+/* #define ADDRLEN 64 */
+
+/*
+ * RX_CHECKSUM turns on card-generated receive checksum generation for
+ * TCP and UDP packets. Otherwise the upper layers do the calculation.
+ * 3/10/1999 Pete Wyckoff <wyckoff@ca.sandia.gov>
+ */
+#define RX_CHECKSUM
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (5*HZ)
+
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <asm/unaligned.h>
+#include <asm/cache.h>
+
+static const char version[] __devinitconst =
+KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
+" Some modifications by Eric kasten <kasten@nscl.msu.edu>\n"
+" Further modifications by Keith Underwood <keithu@parl.clemson.edu>\n";
+
+
+/* IP_MF appears to be only defined in <netinet/ip.h>, however,
+ we need it for hardware checksumming support. FYI... some of
+ the definitions in <netinet/ip.h> conflict/duplicate those in
+ other linux headers causing many compiler warnings.
+*/
+#ifndef IP_MF
+ #define IP_MF 0x2000 /* IP more frags from <netinet/ip.h> */
+#endif
+
+/* Define IP_OFFSET to be IPOPT_OFFSET */
+#ifndef IP_OFFSET
+ #ifdef IPOPT_OFFSET
+ #define IP_OFFSET IPOPT_OFFSET
+ #else
+ #define IP_OFFSET 2
+ #endif
+#endif
+
+#define RUN_AT(x) (jiffies + (x))
+
+#ifndef ADDRLEN
+#define ADDRLEN 32
+#endif
+
+/* Condensed bus+endian portability operations. */
+#if ADDRLEN == 64
+#define cpu_to_leXX(addr) cpu_to_le64(addr)
+#define leXX_to_cpu(addr) le64_to_cpu(addr)
+#else
+#define cpu_to_leXX(addr) cpu_to_le32(addr)
+#define leXX_to_cpu(addr) le32_to_cpu(addr)
+#endif
+
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the Packet Engines "Hamachi"
+Gigabit Ethernet chip. The only PCA currently supported is the GNIC-II 64-bit
+66Mhz PCI card.
+
+II. Board-specific settings
+
+No jumpers exist on the board. The chip supports software correction of
+various motherboard wiring errors, however this driver does not support
+that feature.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+The Hamachi uses a typical descriptor based bus-master architecture.
+The descriptor list is similar to that used by the Digital Tulip.
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+
+This driver uses a zero-copy receive and transmit scheme similar my other
+network drivers.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the Hamachi as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack and replaced by a newly allocated skbuff.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. Gigabit cards are typically used on generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets.
+
+IIIb/c. Transmit/Receive Structure
+
+The Rx and Tx descriptor structure are straight-forward, with no historical
+baggage that must be explained. Unlike the awkward DBDMA structure, there
+are no unused fields or option bits that had only one allowable setting.
+
+Two details should be noted about the descriptors: The chip supports both 32
+bit and 64 bit address structures, and the length field is overwritten on
+the receive descriptors. The descriptor length is set in the control word
+for each channel. The development driver uses 32 bit addresses only, however
+64 bit addresses may be enabled for 64 bit architectures e.g. the Alpha.
+
+IIId. Synchronization
+
+This driver is very similar to my other network drivers.
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'hmp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'hmp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+Thanks to Kim Stearns of Packet Engines for providing a pair of GNIC-II boards.
+
+IVb. References
+
+Hamachi Engineering Design Specification, 5/15/97
+(Note: This version was marked "Confidential".)
+
+IVc. Errata
+
+None noted.
+
+V. Recent Changes
+
+01/15/1999 EPK Enlargement of the TX and RX ring sizes. This appears
+ to help avoid some stall conditions -- this needs further research.
+
+01/15/1999 EPK Creation of the hamachi_tx function. This function cleans
+ the Tx ring and is called from hamachi_start_xmit (this used to be
+ called from hamachi_interrupt but it tends to delay execution of the
+ interrupt handler and thus reduce bandwidth by reducing the latency
+ between hamachi_rx()'s). Notably, some modification has been made so
+ that the cleaning loop checks only to make sure that the DescOwn bit
+ isn't set in the status flag since the card is not required
+ to set the entire flag to zero after processing.
+
+01/15/1999 EPK In the hamachi_start_tx function, the Tx ring full flag is
+ checked before attempting to add a buffer to the ring. If the ring is full
+ an attempt is made to free any dirty buffers and thus find space for
+ the new buffer or the function returns non-zero which should case the
+ scheduler to reschedule the buffer later.
+
+01/15/1999 EPK Some adjustments were made to the chip initialization.
+ End-to-end flow control should now be fully active and the interrupt
+ algorithm vars have been changed. These could probably use further tuning.
+
+01/15/1999 EPK Added the max_{rx,tx}_latency options. These are used to
+ set the rx and tx latencies for the Hamachi interrupts. If you're having
+ problems with network stalls, try setting these to higher values.
+ Valid values are 0x00 through 0xff.
+
+01/15/1999 EPK In general, the overall bandwidth has increased and
+ latencies are better (sometimes by a factor of 2). Stalls are rare at
+ this point, however there still appears to be a bug somewhere between the
+ hardware and driver. TCP checksum errors under load also appear to be
+ eliminated at this point.
+
+01/18/1999 EPK Ensured that the DescEndRing bit was being set on both the
+ Rx and Tx rings. This appears to have been affecting whether a particular
+ peer-to-peer connection would hang under high load. I believe the Rx
+ rings was typically getting set correctly, but the Tx ring wasn't getting
+ the DescEndRing bit set during initialization. ??? Does this mean the
+ hamachi card is using the DescEndRing in processing even if a particular
+ slot isn't in use -- hypothetically, the card might be searching the
+ entire Tx ring for slots with the DescOwn bit set and then processing
+ them. If the DescEndRing bit isn't set, then it might just wander off
+ through memory until it hits a chunk of data with that bit set
+ and then looping back.
+
+02/09/1999 EPK Added Michel Mueller's TxDMA Interrupt and Tx-timeout
+ problem (TxCmd and RxCmd need only to be set when idle or stopped.
+
+02/09/1999 EPK Added code to check/reset dev->tbusy in hamachi_interrupt.
+ (Michel Mueller pointed out the ``permanently busy'' potential
+ problem here).
+
+02/22/1999 EPK Added Pete Wyckoff's ioctl to control the Tx/Rx latencies.
+
+02/23/1999 EPK Verified that the interrupt status field bits for Tx were
+ incorrectly defined and corrected (as per Michel Mueller).
+
+02/23/1999 EPK Corrected the Tx full check to check that at least 4 slots
+ were available before reseting the tbusy and tx_full flags
+ (as per Michel Mueller).
+
+03/11/1999 EPK Added Pete Wyckoff's hardware checksumming support.
+
+12/31/1999 KDU Cleaned up assorted things and added Don's code to force
+32 bit.
+
+02/20/2000 KDU Some of the control was just plain odd. Cleaned up the
+hamachi_start_xmit() and hamachi_interrupt() code. There is still some
+re-structuring I would like to do.
+
+03/01/2000 KDU Experimenting with a WIDE range of interrupt mitigation
+parameters on a dual P3-450 setup yielded the new default interrupt
+mitigation parameters. Tx should interrupt VERY infrequently due to
+Eric's scheme. Rx should be more often...
+
+03/13/2000 KDU Added a patch to make the Rx Checksum code interact
+nicely with non-linux machines.
+
+03/13/2000 KDU Experimented with some of the configuration values:
+
+ -It seems that enabling PCI performance commands for descriptors
+ (changing RxDMACtrl and TxDMACtrl lower nibble from 5 to D) has minimal
+ performance impact for any of my tests. (ttcp, netpipe, netperf) I will
+ leave them that way until I hear further feedback.
+
+ -Increasing the PCI_LATENCY_TIMER to 130
+ (2 + (burst size of 128 * (0 wait states + 1))) seems to slightly
+ degrade performance. Leaving default at 64 pending further information.
+
+03/14/2000 KDU Further tuning:
+
+ -adjusted boguscnt in hamachi_rx() to depend on interrupt
+ mitigation parameters chosen.
+
+ -Selected a set of interrupt parameters based on some extensive testing.
+ These may change with more testing.
+
+TO DO:
+
+-Consider borrowing from the acenic driver code to check PCI_COMMAND for
+PCI_COMMAND_INVALIDATE. Set maximum burst size to cache line size in
+that case.
+
+-fix the reset procedure. It doesn't quite work.
+*/
+
+/* A few values that may be tweaked. */
+/* Size of each temporary Rx buffer, calculated as:
+ * 1518 bytes (ethernet packet) + 2 bytes (to get 8 byte alignment for
+ * the card) + 8 bytes of status info + 8 bytes for the Rx Checksum
+ */
+#define PKT_BUF_SZ 1536
+
+/* For now, this is going to be set to the maximum size of an ethernet
+ * packet. Eventually, we may want to make it a variable that is
+ * related to the MTU
+ */
+#define MAX_FRAME_SIZE 1518
+
+/* The rest of these values should never change. */
+
+static void hamachi_timer(unsigned long data);
+
+enum capability_flags {CanHaveMII=1, };
+static const struct chip_info {
+ u16 vendor_id, device_id, device_id_mask, pad;
+ const char *name;
+ void (*media_timer)(unsigned long data);
+ int flags;
+} chip_tbl[] = {
+ {0x1318, 0x0911, 0xffff, 0, "Hamachi GNIC-II", hamachi_timer, 0},
+ {0,},
+};
+
+/* Offsets to the Hamachi registers. Various sizes. */
+enum hamachi_offsets {
+ TxDMACtrl=0x00, TxCmd=0x04, TxStatus=0x06, TxPtr=0x08, TxCurPtr=0x10,
+ RxDMACtrl=0x20, RxCmd=0x24, RxStatus=0x26, RxPtr=0x28, RxCurPtr=0x30,
+ PCIClkMeas=0x060, MiscStatus=0x066, ChipRev=0x68, ChipReset=0x06B,
+ LEDCtrl=0x06C, VirtualJumpers=0x06D, GPIO=0x6E,
+ TxChecksum=0x074, RxChecksum=0x076,
+ TxIntrCtrl=0x078, RxIntrCtrl=0x07C,
+ InterruptEnable=0x080, InterruptClear=0x084, IntrStatus=0x088,
+ EventStatus=0x08C,
+ MACCnfg=0x0A0, FrameGap0=0x0A2, FrameGap1=0x0A4,
+ /* See enum MII_offsets below. */
+ MACCnfg2=0x0B0, RxDepth=0x0B8, FlowCtrl=0x0BC, MaxFrameSize=0x0CE,
+ AddrMode=0x0D0, StationAddr=0x0D2,
+ /* Gigabit AutoNegotiation. */
+ ANCtrl=0x0E0, ANStatus=0x0E2, ANXchngCtrl=0x0E4, ANAdvertise=0x0E8,
+ ANLinkPartnerAbility=0x0EA,
+ EECmdStatus=0x0F0, EEData=0x0F1, EEAddr=0x0F2,
+ FIFOcfg=0x0F8,
+};
+
+/* Offsets to the MII-mode registers. */
+enum MII_offsets {
+ MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
+ MII_Status=0xAE,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrRxDone=0x01, IntrRxPCIFault=0x02, IntrRxPCIErr=0x04,
+ IntrTxDone=0x100, IntrTxPCIFault=0x200, IntrTxPCIErr=0x400,
+ LinkChange=0x10000, NegotiationChange=0x20000, StatsMax=0x40000, };
+
+/* The Hamachi Rx and Tx buffer descriptors. */
+struct hamachi_desc {
+ __le32 status_n_length;
+#if ADDRLEN == 64
+ u32 pad;
+ __le64 addr;
+#else
+ __le32 addr;
+#endif
+};
+
+/* Bits in hamachi_desc.status_n_length */
+enum desc_status_bits {
+ DescOwn=0x80000000, DescEndPacket=0x40000000, DescEndRing=0x20000000,
+ DescIntr=0x10000000,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+#define MII_CNT 4
+struct hamachi_private {
+ /* Descriptor rings first for alignment. Tx requires a second descriptor
+ for status. */
+ struct hamachi_desc *rx_ring;
+ struct hamachi_desc *tx_ring;
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ dma_addr_t tx_ring_dma;
+ dma_addr_t rx_ring_dma;
+ struct timer_list timer; /* Media selection timer. */
+ /* Frequently used and paired value: keep adjacent for cache effect. */
+ spinlock_t lock;
+ int chip_id;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int cur_tx, dirty_tx;
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int duplex_lock:1;
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ struct mii_if_info mii_if; /* MII lib hooks/info */
+ unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
+ u32 rx_int_var, tx_int_var; /* interrupt control variables */
+ u32 option; /* Hold on to a copy of the options */
+ struct pci_dev *pci_dev;
+ void __iomem *base;
+};
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>, Eric Kasten <kasten@nscl.msu.edu>, Keith Underwood <keithu@parl.clemson.edu>");
+MODULE_DESCRIPTION("Packet Engines 'Hamachi' GNIC-II Gigabit Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(max_interrupt_work, int, 0);
+module_param(mtu, int, 0);
+module_param(debug, int, 0);
+module_param(min_rx_pkt, int, 0);
+module_param(max_rx_gap, int, 0);
+module_param(max_rx_latency, int, 0);
+module_param(min_tx_pkt, int, 0);
+module_param(max_tx_gap, int, 0);
+module_param(max_tx_latency, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param_array(rx_params, int, NULL, 0);
+module_param_array(tx_params, int, NULL, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+module_param(force32, int, 0);
+MODULE_PARM_DESC(max_interrupt_work, "GNIC-II maximum events handled per interrupt");
+MODULE_PARM_DESC(mtu, "GNIC-II MTU (all boards)");
+MODULE_PARM_DESC(debug, "GNIC-II debug level (0-7)");
+MODULE_PARM_DESC(min_rx_pkt, "GNIC-II minimum Rx packets processed between interrupts");
+MODULE_PARM_DESC(max_rx_gap, "GNIC-II maximum Rx inter-packet gap in 8.192 microsecond units");
+MODULE_PARM_DESC(max_rx_latency, "GNIC-II time between Rx interrupts in 8.192 microsecond units");
+MODULE_PARM_DESC(min_tx_pkt, "GNIC-II minimum Tx packets processed between interrupts");
+MODULE_PARM_DESC(max_tx_gap, "GNIC-II maximum Tx inter-packet gap in 8.192 microsecond units");
+MODULE_PARM_DESC(max_tx_latency, "GNIC-II time between Tx interrupts in 8.192 microsecond units");
+MODULE_PARM_DESC(rx_copybreak, "GNIC-II copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(rx_params, "GNIC-II min_rx_pkt+max_rx_gap+max_rx_latency");
+MODULE_PARM_DESC(tx_params, "GNIC-II min_tx_pkt+max_tx_gap+max_tx_latency");
+MODULE_PARM_DESC(options, "GNIC-II Bits 0-3: media type, bits 4-6: as force32, bit 7: half duplex, bit 9 full duplex");
+MODULE_PARM_DESC(full_duplex, "GNIC-II full duplex setting(s) (1)");
+MODULE_PARM_DESC(force32, "GNIC-II: Bit 0: 32 bit PCI, bit 1: disable parity, bit 2: 64 bit PCI (all boards)");
+
+static int read_eeprom(void __iomem *ioaddr, int location);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int hamachi_open(struct net_device *dev);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void hamachi_timer(unsigned long data);
+static void hamachi_tx_timeout(struct net_device *dev);
+static void hamachi_init_ring(struct net_device *dev);
+static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t hamachi_interrupt(int irq, void *dev_instance);
+static int hamachi_rx(struct net_device *dev);
+static inline int hamachi_tx(struct net_device *dev);
+static void hamachi_error(struct net_device *dev, int intr_status);
+static int hamachi_close(struct net_device *dev);
+static struct net_device_stats *hamachi_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static const struct ethtool_ops ethtool_ops;
+static const struct ethtool_ops ethtool_ops_no_mii;
+
+static const struct net_device_ops hamachi_netdev_ops = {
+ .ndo_open = hamachi_open,
+ .ndo_stop = hamachi_close,
+ .ndo_start_xmit = hamachi_start_xmit,
+ .ndo_get_stats = hamachi_get_stats,
+ .ndo_set_rx_mode = set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_tx_timeout = hamachi_tx_timeout,
+ .ndo_do_ioctl = netdev_ioctl,
+};
+
+
+static int __devinit hamachi_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct hamachi_private *hmp;
+ int option, i, rx_int_var, tx_int_var, boguscnt;
+ int chip_id = ent->driver_data;
+ int irq;
+ void __iomem *ioaddr;
+ unsigned long base;
+ static int card_idx;
+ struct net_device *dev;
+ void *ring_space;
+ dma_addr_t ring_dma;
+ int ret = -ENOMEM;
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(version);
+#endif
+
+ if (pci_enable_device(pdev)) {
+ ret = -EIO;
+ goto err_out;
+ }
+
+ base = pci_resource_start(pdev, 0);
+#ifdef __alpha__ /* Really "64 bit addrs" */
+ base |= (pci_resource_start(pdev, 1) << 32);
+#endif
+
+ pci_set_master(pdev);
+
+ i = pci_request_regions(pdev, DRV_NAME);
+ if (i)
+ return i;
+
+ irq = pdev->irq;
+ ioaddr = ioremap(base, 0x400);
+ if (!ioaddr)
+ goto err_out_release;
+
+ dev = alloc_etherdev(sizeof(struct hamachi_private));
+ if (!dev)
+ goto err_out_iounmap;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = 1 ? read_eeprom(ioaddr, 4 + i)
+ : readb(ioaddr + StationAddr + i);
+
+#if ! defined(final_version)
+ if (hamachi_debug > 4)
+ for (i = 0; i < 0x10; i++)
+ printk("%2.2x%s",
+ read_eeprom(ioaddr, i), i % 16 != 15 ? " " : "\n");
+#endif
+
+ hmp = netdev_priv(dev);
+ spin_lock_init(&hmp->lock);
+
+ hmp->mii_if.dev = dev;
+ hmp->mii_if.mdio_read = mdio_read;
+ hmp->mii_if.mdio_write = mdio_write;
+ hmp->mii_if.phy_id_mask = 0x1f;
+ hmp->mii_if.reg_num_mask = 0x1f;
+
+ ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space)
+ goto err_out_cleardev;
+ hmp->tx_ring = ring_space;
+ hmp->tx_ring_dma = ring_dma;
+
+ ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space)
+ goto err_out_unmap_tx;
+ hmp->rx_ring = ring_space;
+ hmp->rx_ring_dma = ring_dma;
+
+ /* Check for options being passed in */
+ option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* If the bus size is misidentified, do the following. */
+ force32 = force32 ? force32 :
+ ((option >= 0) ? ((option & 0x00000070) >> 4) : 0 );
+ if (force32)
+ writeb(force32, ioaddr + VirtualJumpers);
+
+ /* Hmmm, do we really need to reset the chip???. */
+ writeb(0x01, ioaddr + ChipReset);
+
+ /* After a reset, the clock speed measurement of the PCI bus will not
+ * be valid for a moment. Wait for a little while until it is. If
+ * it takes more than 10ms, forget it.
+ */
+ udelay(10);
+ i = readb(ioaddr + PCIClkMeas);
+ for (boguscnt = 0; (!(i & 0x080)) && boguscnt < 1000; boguscnt++){
+ udelay(10);
+ i = readb(ioaddr + PCIClkMeas);
+ }
+
+ hmp->base = ioaddr;
+ dev->base_addr = (unsigned long)ioaddr;
+ dev->irq = irq;
+ pci_set_drvdata(pdev, dev);
+
+ hmp->chip_id = chip_id;
+ hmp->pci_dev = pdev;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ hmp->option = option;
+ if (option & 0x200)
+ hmp->mii_if.full_duplex = 1;
+ else if (option & 0x080)
+ hmp->mii_if.full_duplex = 0;
+ hmp->default_port = option & 15;
+ if (hmp->default_port)
+ hmp->mii_if.force_media = 1;
+ }
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ hmp->mii_if.full_duplex = 1;
+
+ /* lock the duplex mode if someone specified a value */
+ if (hmp->mii_if.full_duplex || (option & 0x080))
+ hmp->duplex_lock = 1;
+
+ /* Set interrupt tuning parameters */
+ max_rx_latency = max_rx_latency & 0x00ff;
+ max_rx_gap = max_rx_gap & 0x00ff;
+ min_rx_pkt = min_rx_pkt & 0x00ff;
+ max_tx_latency = max_tx_latency & 0x00ff;
+ max_tx_gap = max_tx_gap & 0x00ff;
+ min_tx_pkt = min_tx_pkt & 0x00ff;
+
+ rx_int_var = card_idx < MAX_UNITS ? rx_params[card_idx] : -1;
+ tx_int_var = card_idx < MAX_UNITS ? tx_params[card_idx] : -1;
+ hmp->rx_int_var = rx_int_var >= 0 ? rx_int_var :
+ (min_rx_pkt << 16 | max_rx_gap << 8 | max_rx_latency);
+ hmp->tx_int_var = tx_int_var >= 0 ? tx_int_var :
+ (min_tx_pkt << 16 | max_tx_gap << 8 | max_tx_latency);
+
+
+ /* The Hamachi-specific entries in the device structure. */
+ dev->netdev_ops = &hamachi_netdev_ops;
+ if (chip_tbl[hmp->chip_id].flags & CanHaveMII)
+ SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ else
+ SET_ETHTOOL_OPS(dev, &ethtool_ops_no_mii);
+ dev->watchdog_timeo = TX_TIMEOUT;
+ if (mtu)
+ dev->mtu = mtu;
+
+ i = register_netdev(dev);
+ if (i) {
+ ret = i;
+ goto err_out_unmap_rx;
+ }
+
+ printk(KERN_INFO "%s: %s type %x at %p, %pM, IRQ %d.\n",
+ dev->name, chip_tbl[chip_id].name, readl(ioaddr + ChipRev),
+ ioaddr, dev->dev_addr, irq);
+ i = readb(ioaddr + PCIClkMeas);
+ printk(KERN_INFO "%s: %d-bit %d Mhz PCI bus (%d), Virtual Jumpers "
+ "%2.2x, LPA %4.4x.\n",
+ dev->name, readw(ioaddr + MiscStatus) & 1 ? 64 : 32,
+ i ? 2000/(i&0x7f) : 0, i&0x7f, (int)readb(ioaddr + VirtualJumpers),
+ readw(ioaddr + ANLinkPartnerAbility));
+
+ if (chip_tbl[hmp->chip_id].flags & CanHaveMII) {
+ int phy, phy_idx = 0;
+ for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
+ int mii_status = mdio_read(dev, phy, MII_BMSR);
+ if (mii_status != 0xffff &&
+ mii_status != 0x0000) {
+ hmp->phys[phy_idx++] = phy;
+ hmp->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x.\n",
+ dev->name, phy, mii_status, hmp->mii_if.advertising);
+ }
+ }
+ hmp->mii_cnt = phy_idx;
+ if (hmp->mii_cnt > 0)
+ hmp->mii_if.phy_id = hmp->phys[0];
+ else
+ memset(&hmp->mii_if, 0, sizeof(hmp->mii_if));
+ }
+ /* Configure gigabit autonegotiation. */
+ writew(0x0400, ioaddr + ANXchngCtrl); /* Enable legacy links. */
+ writew(0x08e0, ioaddr + ANAdvertise); /* Set our advertise word. */
+ writew(0x1000, ioaddr + ANCtrl); /* Enable negotiation */
+
+ card_idx++;
+ return 0;
+
+err_out_unmap_rx:
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring,
+ hmp->rx_ring_dma);
+err_out_unmap_tx:
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring,
+ hmp->tx_ring_dma);
+err_out_cleardev:
+ free_netdev (dev);
+err_out_iounmap:
+ iounmap(ioaddr);
+err_out_release:
+ pci_release_regions(pdev);
+err_out:
+ return ret;
+}
+
+static int __devinit read_eeprom(void __iomem *ioaddr, int location)
+{
+ int bogus_cnt = 1000;
+
+ /* We should check busy first - per docs -KDU */
+ while ((readb(ioaddr + EECmdStatus) & 0x40) && --bogus_cnt > 0);
+ writew(location, ioaddr + EEAddr);
+ writeb(0x02, ioaddr + EECmdStatus);
+ bogus_cnt = 1000;
+ while ((readb(ioaddr + EECmdStatus) & 0x40) && --bogus_cnt > 0);
+ if (hamachi_debug > 5)
+ printk(" EEPROM status is %2.2x after %d ticks.\n",
+ (int)readb(ioaddr + EECmdStatus), 1000- bogus_cnt);
+ return readb(ioaddr + EEData);
+}
+
+/* MII Managemen Data I/O accesses.
+ These routines assume the MDIO controller is idle, and do not exit until
+ the command is finished. */
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+ void __iomem *ioaddr = hmp->base;
+ int i;
+
+ /* We should check busy first - per docs -KDU */
+ for (i = 10000; i >= 0; i--)
+ if ((readw(ioaddr + MII_Status) & 1) == 0)
+ break;
+ writew((phy_id<<8) + location, ioaddr + MII_Addr);
+ writew(0x0001, ioaddr + MII_Cmd);
+ for (i = 10000; i >= 0; i--)
+ if ((readw(ioaddr + MII_Status) & 1) == 0)
+ break;
+ return readw(ioaddr + MII_Rd_Data);
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+ void __iomem *ioaddr = hmp->base;
+ int i;
+
+ /* We should check busy first - per docs -KDU */
+ for (i = 10000; i >= 0; i--)
+ if ((readw(ioaddr + MII_Status) & 1) == 0)
+ break;
+ writew((phy_id<<8) + location, ioaddr + MII_Addr);
+ writew(value, ioaddr + MII_Wr_Data);
+
+ /* Wait for the command to finish. */
+ for (i = 10000; i >= 0; i--)
+ if ((readw(ioaddr + MII_Status) & 1) == 0)
+ break;
+}
+
+
+static int hamachi_open(struct net_device *dev)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+ void __iomem *ioaddr = hmp->base;
+ int i;
+ u32 rx_int_var, tx_int_var;
+ u16 fifo_info;
+
+ i = request_irq(dev->irq, hamachi_interrupt, IRQF_SHARED, dev->name, dev);
+ if (i)
+ return i;
+
+ if (hamachi_debug > 1)
+ printk(KERN_DEBUG "%s: hamachi_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ hamachi_init_ring(dev);
+
+#if ADDRLEN == 64
+ /* writellll anyone ? */
+ writel(hmp->rx_ring_dma, ioaddr + RxPtr);
+ writel(hmp->rx_ring_dma >> 32, ioaddr + RxPtr + 4);
+ writel(hmp->tx_ring_dma, ioaddr + TxPtr);
+ writel(hmp->tx_ring_dma >> 32, ioaddr + TxPtr + 4);
+#else
+ writel(hmp->rx_ring_dma, ioaddr + RxPtr);
+ writel(hmp->tx_ring_dma, ioaddr + TxPtr);
+#endif
+
+ /* TODO: It would make sense to organize this as words since the card
+ * documentation does. -KDU
+ */
+ for (i = 0; i < 6; i++)
+ writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
+
+ /* Initialize other registers: with so many this eventually this will
+ converted to an offset/value list. */
+
+ /* Configure the FIFO */
+ fifo_info = (readw(ioaddr + GPIO) & 0x00C0) >> 6;
+ switch (fifo_info){
+ case 0 :
+ /* No FIFO */
+ writew(0x0000, ioaddr + FIFOcfg);
+ break;
+ case 1 :
+ /* Configure the FIFO for 512K external, 16K used for Tx. */
+ writew(0x0028, ioaddr + FIFOcfg);
+ break;
+ case 2 :
+ /* Configure the FIFO for 1024 external, 32K used for Tx. */
+ writew(0x004C, ioaddr + FIFOcfg);
+ break;
+ case 3 :
+ /* Configure the FIFO for 2048 external, 32K used for Tx. */
+ writew(0x006C, ioaddr + FIFOcfg);
+ break;
+ default :
+ printk(KERN_WARNING "%s: Unsupported external memory config!\n",
+ dev->name);
+ /* Default to no FIFO */
+ writew(0x0000, ioaddr + FIFOcfg);
+ break;
+ }
+
+ if (dev->if_port == 0)
+ dev->if_port = hmp->default_port;
+
+
+ /* Setting the Rx mode will start the Rx process. */
+ /* If someone didn't choose a duplex, default to full-duplex */
+ if (hmp->duplex_lock != 1)
+ hmp->mii_if.full_duplex = 1;
+
+ /* always 1, takes no more time to do it */
+ writew(0x0001, ioaddr + RxChecksum);
+ writew(0x0000, ioaddr + TxChecksum);
+ writew(0x8000, ioaddr + MACCnfg); /* Soft reset the MAC */
+ writew(0x215F, ioaddr + MACCnfg);
+ writew(0x000C, ioaddr + FrameGap0);
+ /* WHAT?!?!? Why isn't this documented somewhere? -KDU */
+ writew(0x1018, ioaddr + FrameGap1);
+ /* Why do we enable receives/transmits here? -KDU */
+ writew(0x0780, ioaddr + MACCnfg2); /* Upper 16 bits control LEDs. */
+ /* Enable automatic generation of flow control frames, period 0xffff. */
+ writel(0x0030FFFF, ioaddr + FlowCtrl);
+ writew(MAX_FRAME_SIZE, ioaddr + MaxFrameSize); /* dev->mtu+14 ??? */
+
+ /* Enable legacy links. */
+ writew(0x0400, ioaddr + ANXchngCtrl); /* Enable legacy links. */
+ /* Initial Link LED to blinking red. */
+ writeb(0x03, ioaddr + LEDCtrl);
+
+ /* Configure interrupt mitigation. This has a great effect on
+ performance, so systems tuning should start here!. */
+
+ rx_int_var = hmp->rx_int_var;
+ tx_int_var = hmp->tx_int_var;
+
+ if (hamachi_debug > 1) {
+ printk("max_tx_latency: %d, max_tx_gap: %d, min_tx_pkt: %d\n",
+ tx_int_var & 0x00ff, (tx_int_var & 0x00ff00) >> 8,
+ (tx_int_var & 0x00ff0000) >> 16);
+ printk("max_rx_latency: %d, max_rx_gap: %d, min_rx_pkt: %d\n",
+ rx_int_var & 0x00ff, (rx_int_var & 0x00ff00) >> 8,
+ (rx_int_var & 0x00ff0000) >> 16);
+ printk("rx_int_var: %x, tx_int_var: %x\n", rx_int_var, tx_int_var);
+ }
+
+ writel(tx_int_var, ioaddr + TxIntrCtrl);
+ writel(rx_int_var, ioaddr + RxIntrCtrl);
+
+ set_rx_mode(dev);
+
+ netif_start_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ writel(0x80878787, ioaddr + InterruptEnable);
+ writew(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
+
+ /* Configure and start the DMA channels. */
+ /* Burst sizes are in the low three bits: size = 4<<(val&7) */
+#if ADDRLEN == 64
+ writew(0x005D, ioaddr + RxDMACtrl); /* 128 dword bursts */
+ writew(0x005D, ioaddr + TxDMACtrl);
+#else
+ writew(0x001D, ioaddr + RxDMACtrl);
+ writew(0x001D, ioaddr + TxDMACtrl);
+#endif
+ writew(0x0001, ioaddr + RxCmd);
+
+ if (hamachi_debug > 2) {
+ printk(KERN_DEBUG "%s: Done hamachi_open(), status: Rx %x Tx %x.\n",
+ dev->name, readw(ioaddr + RxStatus), readw(ioaddr + TxStatus));
+ }
+ /* Set the timer to check for link beat. */
+ init_timer(&hmp->timer);
+ hmp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
+ hmp->timer.data = (unsigned long)dev;
+ hmp->timer.function = hamachi_timer; /* timer handler */
+ add_timer(&hmp->timer);
+
+ return 0;
+}
+
+static inline int hamachi_tx(struct net_device *dev)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+
+ /* Update the dirty pointer until we find an entry that is
+ still owned by the card */
+ for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++) {
+ int entry = hmp->dirty_tx % TX_RING_SIZE;
+ struct sk_buff *skb;
+
+ if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn))
+ break;
+ /* Free the original skb. */
+ skb = hmp->tx_skbuff[entry];
+ if (skb) {
+ pci_unmap_single(hmp->pci_dev,
+ leXX_to_cpu(hmp->tx_ring[entry].addr),
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ hmp->tx_skbuff[entry] = NULL;
+ }
+ hmp->tx_ring[entry].status_n_length = 0;
+ if (entry >= TX_RING_SIZE-1)
+ hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
+ cpu_to_le32(DescEndRing);
+ dev->stats.tx_packets++;
+ }
+
+ return 0;
+}
+
+static void hamachi_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct hamachi_private *hmp = netdev_priv(dev);
+ void __iomem *ioaddr = hmp->base;
+ int next_tick = 10*HZ;
+
+ if (hamachi_debug > 2) {
+ printk(KERN_INFO "%s: Hamachi Autonegotiation status %4.4x, LPA "
+ "%4.4x.\n", dev->name, readw(ioaddr + ANStatus),
+ readw(ioaddr + ANLinkPartnerAbility));
+ printk(KERN_INFO "%s: Autonegotiation regs %4.4x %4.4x %4.4x "
+ "%4.4x %4.4x %4.4x.\n", dev->name,
+ readw(ioaddr + 0x0e0),
+ readw(ioaddr + 0x0e2),
+ readw(ioaddr + 0x0e4),
+ readw(ioaddr + 0x0e6),
+ readw(ioaddr + 0x0e8),
+ readw(ioaddr + 0x0eA));
+ }
+ /* We could do something here... nah. */
+ hmp->timer.expires = RUN_AT(next_tick);
+ add_timer(&hmp->timer);
+}
+
+static void hamachi_tx_timeout(struct net_device *dev)
+{
+ int i;
+ struct hamachi_private *hmp = netdev_priv(dev);
+ void __iomem *ioaddr = hmp->base;
+
+ printk(KERN_WARNING "%s: Hamachi transmit timed out, status %8.8x,"
+ " resetting...\n", dev->name, (int)readw(ioaddr + TxStatus));
+
+ {
+ printk(KERN_DEBUG " Rx ring %p: ", hmp->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(KERN_CONT " %8.8x",
+ le32_to_cpu(hmp->rx_ring[i].status_n_length));
+ printk(KERN_CONT "\n");
+ printk(KERN_DEBUG" Tx ring %p: ", hmp->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(KERN_CONT " %4.4x",
+ le32_to_cpu(hmp->tx_ring[i].status_n_length));
+ printk(KERN_CONT "\n");
+ }
+
+ /* Reinit the hardware and make sure the Rx and Tx processes
+ are up and running.
+ */
+ dev->if_port = 0;
+ /* The right way to do Reset. -KDU
+ * -Clear OWN bit in all Rx/Tx descriptors
+ * -Wait 50 uS for channels to go idle
+ * -Turn off MAC receiver
+ * -Issue Reset
+ */
+
+ for (i = 0; i < RX_RING_SIZE; i++)
+ hmp->rx_ring[i].status_n_length &= cpu_to_le32(~DescOwn);
+
+ /* Presume that all packets in the Tx queue are gone if we have to
+ * re-init the hardware.
+ */
+ for (i = 0; i < TX_RING_SIZE; i++){
+ struct sk_buff *skb;
+
+ if (i >= TX_RING_SIZE - 1)
+ hmp->tx_ring[i].status_n_length =
+ cpu_to_le32(DescEndRing) |
+ (hmp->tx_ring[i].status_n_length &
+ cpu_to_le32(0x0000ffff));
+ else
+ hmp->tx_ring[i].status_n_length &= cpu_to_le32(0x0000ffff);
+ skb = hmp->tx_skbuff[i];
+ if (skb){
+ pci_unmap_single(hmp->pci_dev, leXX_to_cpu(hmp->tx_ring[i].addr),
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ hmp->tx_skbuff[i] = NULL;
+ }
+ }
+
+ udelay(60); /* Sleep 60 us just for safety sake */
+ writew(0x0002, ioaddr + RxCmd); /* STOP Rx */
+
+ writeb(0x01, ioaddr + ChipReset); /* Reinit the hardware */
+
+ hmp->tx_full = 0;
+ hmp->cur_rx = hmp->cur_tx = 0;
+ hmp->dirty_rx = hmp->dirty_tx = 0;
+ /* Rx packets are also presumed lost; however, we need to make sure a
+ * ring of buffers is in tact. -KDU
+ */
+ for (i = 0; i < RX_RING_SIZE; i++){
+ struct sk_buff *skb = hmp->rx_skbuff[i];
+
+ if (skb){
+ pci_unmap_single(hmp->pci_dev,
+ leXX_to_cpu(hmp->rx_ring[i].addr),
+ hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(skb);
+ hmp->rx_skbuff[i] = NULL;
+ }
+ }
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(dev, hmp->rx_buf_sz);
+ hmp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+
+ hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
+ skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+ hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
+ DescEndPacket | DescIntr | (hmp->rx_buf_sz - 2));
+ }
+ hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+ /* Mark the last entry as wrapping the ring. */
+ hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
+
+ /* Trigger an immediate transmit demand. */
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ dev->stats.tx_errors++;
+
+ /* Restart the chip's Tx/Rx processes . */
+ writew(0x0002, ioaddr + TxCmd); /* STOP Tx */
+ writew(0x0001, ioaddr + TxCmd); /* START Tx */
+ writew(0x0001, ioaddr + RxCmd); /* START Rx */
+
+ netif_wake_queue(dev);
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void hamachi_init_ring(struct net_device *dev)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+ int i;
+
+ hmp->tx_full = 0;
+ hmp->cur_rx = hmp->cur_tx = 0;
+ hmp->dirty_rx = hmp->dirty_tx = 0;
+
+ /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
+ * card needs room to do 8 byte alignment, +2 so we can reserve
+ * the first 2 bytes, and +16 gets room for the status word from the
+ * card. -KDU
+ */
+ hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ :
+ (((dev->mtu+26+7) & ~7) + 16));
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ hmp->rx_ring[i].status_n_length = 0;
+ hmp->rx_skbuff[i] = NULL;
+ }
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2);
+ hmp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
+ skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+ /* -2 because it doesn't REALLY have that first 2 bytes -KDU */
+ hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
+ DescEndPacket | DescIntr | (hmp->rx_buf_sz -2));
+ }
+ hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+ hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ hmp->tx_skbuff[i] = NULL;
+ hmp->tx_ring[i].status_n_length = 0;
+ }
+ /* Mark the last entry of the ring */
+ hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
+}
+
+
+static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+ unsigned entry;
+ u16 status;
+
+ /* Ok, now make sure that the queue has space before trying to
+ add another skbuff. if we return non-zero the scheduler
+ should interpret this as a queue full and requeue the buffer
+ for later.
+ */
+ if (hmp->tx_full) {
+ /* We should NEVER reach this point -KDU */
+ printk(KERN_WARNING "%s: Hamachi transmit queue full at slot %d.\n",dev->name, hmp->cur_tx);
+
+ /* Wake the potentially-idle transmit channel. */
+ /* If we don't need to read status, DON'T -KDU */
+ status=readw(hmp->base + TxStatus);
+ if( !(status & 0x0001) || (status & 0x0002))
+ writew(0x0001, hmp->base + TxCmd);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Caution: the write order is important here, set the field
+ with the "ownership" bits last. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = hmp->cur_tx % TX_RING_SIZE;
+
+ hmp->tx_skbuff[entry] = skb;
+
+ hmp->tx_ring[entry].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
+ skb->data, skb->len, PCI_DMA_TODEVICE));
+
+ /* Hmmmm, could probably put a DescIntr on these, but the way
+ the driver is currently coded makes Tx interrupts unnecessary
+ since the clearing of the Tx ring is handled by the start_xmit
+ routine. This organization helps mitigate the interrupts a
+ bit and probably renders the max_tx_latency param useless.
+
+ Update: Putting a DescIntr bit on all of the descriptors and
+ mitigating interrupt frequency with the tx_min_pkt parameter. -KDU
+ */
+ if (entry >= TX_RING_SIZE-1) /* Wrap ring */
+ hmp->tx_ring[entry].status_n_length = cpu_to_le32(DescOwn |
+ DescEndPacket | DescEndRing | DescIntr | skb->len);
+ else
+ hmp->tx_ring[entry].status_n_length = cpu_to_le32(DescOwn |
+ DescEndPacket | DescIntr | skb->len);
+ hmp->cur_tx++;
+
+ /* Non-x86 Todo: explicitly flush cache lines here. */
+
+ /* Wake the potentially-idle transmit channel. */
+ /* If we don't need to read status, DON'T -KDU */
+ status=readw(hmp->base + TxStatus);
+ if( !(status & 0x0001) || (status & 0x0002))
+ writew(0x0001, hmp->base + TxCmd);
+
+ /* Immediately before returning, let's clear as many entries as we can. */
+ hamachi_tx(dev);
+
+ /* We should kick the bottom half here, since we are not accepting
+ * interrupts with every packet. i.e. realize that Gigabit ethernet
+ * can transmit faster than ordinary machines can load packets;
+ * hence, any packet that got put off because we were in the transmit
+ * routine should IMMEDIATELY get a chance to be re-queued. -KDU
+ */
+ if ((hmp->cur_tx - hmp->dirty_tx) < (TX_RING_SIZE - 4))
+ netif_wake_queue(dev); /* Typical path */
+ else {
+ hmp->tx_full = 1;
+ netif_stop_queue(dev);
+ }
+
+ if (hamachi_debug > 4) {
+ printk(KERN_DEBUG "%s: Hamachi transmit frame #%d queued in slot %d.\n",
+ dev->name, hmp->cur_tx, entry);
+ }
+ return NETDEV_TX_OK;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t hamachi_interrupt(int irq, void *dev_instance)
+{
+ struct net_device *dev = dev_instance;
+ struct hamachi_private *hmp = netdev_priv(dev);
+ void __iomem *ioaddr = hmp->base;
+ long boguscnt = max_interrupt_work;
+ int handled = 0;
+
+#ifndef final_version /* Can never occur. */
+ if (dev == NULL) {
+ printk (KERN_ERR "hamachi_interrupt(): irq %d for unknown device.\n", irq);
+ return IRQ_NONE;
+ }
+#endif
+
+ spin_lock(&hmp->lock);
+
+ do {
+ u32 intr_status = readl(ioaddr + InterruptClear);
+
+ if (hamachi_debug > 4)
+ printk(KERN_DEBUG "%s: Hamachi interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (intr_status == 0)
+ break;
+
+ handled = 1;
+
+ if (intr_status & IntrRxDone)
+ hamachi_rx(dev);
+
+ if (intr_status & IntrTxDone){
+ /* This code should RARELY need to execute. After all, this is
+ * a gigabit link, it should consume packets as fast as we put
+ * them in AND we clear the Tx ring in hamachi_start_xmit().
+ */
+ if (hmp->tx_full){
+ for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++){
+ int entry = hmp->dirty_tx % TX_RING_SIZE;
+ struct sk_buff *skb;
+
+ if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn))
+ break;
+ skb = hmp->tx_skbuff[entry];
+ /* Free the original skb. */
+ if (skb){
+ pci_unmap_single(hmp->pci_dev,
+ leXX_to_cpu(hmp->tx_ring[entry].addr),
+ skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ hmp->tx_skbuff[entry] = NULL;
+ }
+ hmp->tx_ring[entry].status_n_length = 0;
+ if (entry >= TX_RING_SIZE-1)
+ hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
+ cpu_to_le32(DescEndRing);
+ dev->stats.tx_packets++;
+ }
+ if (hmp->cur_tx - hmp->dirty_tx < TX_RING_SIZE - 4){
+ /* The ring is no longer full */
+ hmp->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+ } else {
+ netif_wake_queue(dev);
+ }
+ }
+
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status &
+ (IntrTxPCIFault | IntrTxPCIErr | IntrRxPCIFault | IntrRxPCIErr |
+ LinkChange | NegotiationChange | StatsMax))
+ hamachi_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, status=0x%4.4x.\n",
+ dev->name, intr_status);
+ break;
+ }
+ } while (1);
+
+ if (hamachi_debug > 3)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, readl(ioaddr + IntrStatus));
+
+#ifndef final_version
+ /* Code that should never be run! Perhaps remove after testing.. */
+ {
+ static int stopit = 10;
+ if (dev->start == 0 && --stopit < 0) {
+ printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n",
+ dev->name);
+ free_irq(irq, dev);
+ }
+ }
+#endif
+
+ spin_unlock(&hmp->lock);
+ return IRQ_RETVAL(handled);
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int hamachi_rx(struct net_device *dev)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+ int entry = hmp->cur_rx % RX_RING_SIZE;
+ int boguscnt = (hmp->dirty_rx + RX_RING_SIZE) - hmp->cur_rx;
+
+ if (hamachi_debug > 4) {
+ printk(KERN_DEBUG " In hamachi_rx(), entry %d status %4.4x.\n",
+ entry, hmp->rx_ring[entry].status_n_length);
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (1) {
+ struct hamachi_desc *desc = &(hmp->rx_ring[entry]);
+ u32 desc_status = le32_to_cpu(desc->status_n_length);
+ u16 data_size = desc_status; /* Implicit truncate */
+ u8 *buf_addr;
+ s32 frame_status;
+
+ if (desc_status & DescOwn)
+ break;
+ pci_dma_sync_single_for_cpu(hmp->pci_dev,
+ leXX_to_cpu(desc->addr),
+ hmp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ buf_addr = (u8 *) hmp->rx_skbuff[entry]->data;
+ frame_status = get_unaligned_le32(&(buf_addr[data_size - 12]));
+ if (hamachi_debug > 4)
+ printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n",
+ frame_status);
+ if (--boguscnt < 0)
+ break;
+ if ( ! (desc_status & DescEndPacket)) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+ "multiple buffers, entry %#x length %d status %4.4x!\n",
+ dev->name, hmp->cur_rx, data_size, desc_status);
+ printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n",
+ dev->name, desc, &hmp->rx_ring[hmp->cur_rx % RX_RING_SIZE]);
+ printk(KERN_WARNING "%s: Oversized Ethernet frame -- next status %x/%x last status %x.\n",
+ dev->name,
+ le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0xffff0000,
+ le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0x0000ffff,
+ le32_to_cpu(hmp->rx_ring[(hmp->cur_rx-1) % RX_RING_SIZE].status_n_length));
+ dev->stats.rx_length_errors++;
+ } /* else Omit for prototype errata??? */
+ if (frame_status & 0x00380000) {
+ /* There was an error. */
+ if (hamachi_debug > 2)
+ printk(KERN_DEBUG " hamachi_rx() Rx error was %8.8x.\n",
+ frame_status);
+ dev->stats.rx_errors++;
+ if (frame_status & 0x00600000)
+ dev->stats.rx_length_errors++;
+ if (frame_status & 0x00080000)
+ dev->stats.rx_frame_errors++;
+ if (frame_status & 0x00100000)
+ dev->stats.rx_crc_errors++;
+ if (frame_status < 0)
+ dev->stats.rx_dropped++;
+ } else {
+ struct sk_buff *skb;
+ /* Omit CRC */
+ u16 pkt_len = (frame_status & 0x07ff) - 4;
+#ifdef RX_CHECKSUM
+ u32 pfck = *(u32 *) &buf_addr[data_size - 8];
+#endif
+
+
+#ifndef final_version
+ if (hamachi_debug > 4)
+ printk(KERN_DEBUG " hamachi_rx() normal Rx pkt length %d"
+ " of %d, bogus_cnt %d.\n",
+ pkt_len, data_size, boguscnt);
+ if (hamachi_debug > 5)
+ printk(KERN_DEBUG"%s: rx status %8.8x %8.8x %8.8x %8.8x %8.8x.\n",
+ dev->name,
+ *(s32*)&(buf_addr[data_size - 20]),
+ *(s32*)&(buf_addr[data_size - 16]),
+ *(s32*)&(buf_addr[data_size - 12]),
+ *(s32*)&(buf_addr[data_size - 8]),
+ *(s32*)&(buf_addr[data_size - 4]));
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak &&
+ (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+#ifdef RX_CHECKSUM
+ printk(KERN_ERR "%s: rx_copybreak non-zero "
+ "not good with RX_CHECKSUM\n", dev->name);
+#endif
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ pci_dma_sync_single_for_cpu(hmp->pci_dev,
+ leXX_to_cpu(hmp->rx_ring[entry].addr),
+ hmp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ /* Call copy + cksum if available. */
+#if 1 || USE_IP_COPYSUM
+ skb_copy_to_linear_data(skb,
+ hmp->rx_skbuff[entry]->data, pkt_len);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), hmp->rx_ring_dma
+ + entry*sizeof(*desc), pkt_len);
+#endif
+ pci_dma_sync_single_for_device(hmp->pci_dev,
+ leXX_to_cpu(hmp->rx_ring[entry].addr),
+ hmp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ } else {
+ pci_unmap_single(hmp->pci_dev,
+ leXX_to_cpu(hmp->rx_ring[entry].addr),
+ hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ skb_put(skb = hmp->rx_skbuff[entry], pkt_len);
+ hmp->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+
+
+#ifdef RX_CHECKSUM
+ /* TCP or UDP on ipv4, DIX encoding */
+ if (pfck>>24 == 0x91 || pfck>>24 == 0x51) {
+ struct iphdr *ih = (struct iphdr *) skb->data;
+ /* Check that IP packet is at least 46 bytes, otherwise,
+ * there may be pad bytes included in the hardware checksum.
+ * This wouldn't happen if everyone padded with 0.
+ */
+ if (ntohs(ih->tot_len) >= 46){
+ /* don't worry about frags */
+ if (!(ih->frag_off & cpu_to_be16(IP_MF|IP_OFFSET))) {
+ u32 inv = *(u32 *) &buf_addr[data_size - 16];
+ u32 *p = (u32 *) &buf_addr[data_size - 20];
+ register u32 crc, p_r, p_r1;
+
+ if (inv & 4) {
+ inv &= ~4;
+ --p;
+ }
+ p_r = *p;
+ p_r1 = *(p-1);
+ switch (inv) {
+ case 0:
+ crc = (p_r & 0xffff) + (p_r >> 16);
+ break;
+ case 1:
+ crc = (p_r >> 16) + (p_r & 0xffff)
+ + (p_r1 >> 16 & 0xff00);
+ break;
+ case 2:
+ crc = p_r + (p_r1 >> 16);
+ break;
+ case 3:
+ crc = p_r + (p_r1 & 0xff00) + (p_r1 >> 16);
+ break;
+ default: /*NOTREACHED*/ crc = 0;
+ }
+ if (crc & 0xffff0000) {
+ crc &= 0xffff;
+ ++crc;
+ }
+ /* tcp/udp will add in pseudo */
+ skb->csum = ntohs(pfck & 0xffff);
+ if (skb->csum > crc)
+ skb->csum -= crc;
+ else
+ skb->csum += (~crc & 0xffff);
+ /*
+ * could do the pseudo myself and return
+ * CHECKSUM_UNNECESSARY
+ */
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+ }
+ }
+#endif /* RX_CHECKSUM */
+
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ }
+ entry = (++hmp->cur_rx) % RX_RING_SIZE;
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; hmp->cur_rx - hmp->dirty_rx > 0; hmp->dirty_rx++) {
+ struct hamachi_desc *desc;
+
+ entry = hmp->dirty_rx % RX_RING_SIZE;
+ desc = &(hmp->rx_ring[entry]);
+ if (hmp->rx_skbuff[entry] == NULL) {
+ struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2);
+
+ hmp->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
+ skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+ }
+ desc->status_n_length = cpu_to_le32(hmp->rx_buf_sz);
+ if (entry >= RX_RING_SIZE-1)
+ desc->status_n_length |= cpu_to_le32(DescOwn |
+ DescEndPacket | DescEndRing | DescIntr);
+ else
+ desc->status_n_length |= cpu_to_le32(DescOwn |
+ DescEndPacket | DescIntr);
+ }
+
+ /* Restart Rx engine if stopped. */
+ /* If we don't need to check status, don't. -KDU */
+ if (readw(hmp->base + RxStatus) & 0x0002)
+ writew(0x0001, hmp->base + RxCmd);
+
+ return 0;
+}
+
+/* This is more properly named "uncommon interrupt events", as it covers more
+ than just errors. */
+static void hamachi_error(struct net_device *dev, int intr_status)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+ void __iomem *ioaddr = hmp->base;
+
+ if (intr_status & (LinkChange|NegotiationChange)) {
+ if (hamachi_debug > 1)
+ printk(KERN_INFO "%s: Link changed: AutoNegotiation Ctrl"
+ " %4.4x, Status %4.4x %4.4x Intr status %4.4x.\n",
+ dev->name, readw(ioaddr + 0x0E0), readw(ioaddr + 0x0E2),
+ readw(ioaddr + ANLinkPartnerAbility),
+ readl(ioaddr + IntrStatus));
+ if (readw(ioaddr + ANStatus) & 0x20)
+ writeb(0x01, ioaddr + LEDCtrl);
+ else
+ writeb(0x03, ioaddr + LEDCtrl);
+ }
+ if (intr_status & StatsMax) {
+ hamachi_get_stats(dev);
+ /* Read the overflow bits to clear. */
+ readl(ioaddr + 0x370);
+ readl(ioaddr + 0x3F0);
+ }
+ if ((intr_status & ~(LinkChange|StatsMax|NegotiationChange|IntrRxDone|IntrTxDone)) &&
+ hamachi_debug)
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* Hmmmmm, it's not clear how to recover from PCI faults. */
+ if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
+ dev->stats.tx_fifo_errors++;
+ if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
+ dev->stats.rx_fifo_errors++;
+}
+
+static int hamachi_close(struct net_device *dev)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+ void __iomem *ioaddr = hmp->base;
+ struct sk_buff *skb;
+ int i;
+
+ netif_stop_queue(dev);
+
+ if (hamachi_debug > 1) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x Rx %4.4x Int %2.2x.\n",
+ dev->name, readw(ioaddr + TxStatus),
+ readw(ioaddr + RxStatus), readl(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, hmp->cur_tx, hmp->dirty_tx, hmp->cur_rx, hmp->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ writel(0x0000, ioaddr + InterruptEnable);
+
+ /* Stop the chip's Tx and Rx processes. */
+ writel(2, ioaddr + RxCmd);
+ writew(2, ioaddr + TxCmd);
+
+#ifdef __i386__
+ if (hamachi_debug > 2) {
+ printk(KERN_DEBUG " Tx ring at %8.8x:\n",
+ (int)hmp->tx_ring_dma);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x.\n",
+ readl(ioaddr + TxCurPtr) == (long)&hmp->tx_ring[i] ? '>' : ' ',
+ i, hmp->tx_ring[i].status_n_length, hmp->tx_ring[i].addr);
+ printk(KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)hmp->rx_ring_dma);
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " %c #%d desc. %4.4x %8.8x\n",
+ readl(ioaddr + RxCurPtr) == (long)&hmp->rx_ring[i] ? '>' : ' ',
+ i, hmp->rx_ring[i].status_n_length, hmp->rx_ring[i].addr);
+ if (hamachi_debug > 6) {
+ if (*(u8*)hmp->rx_skbuff[i]->data != 0x69) {
+ u16 *addr = (u16 *)
+ hmp->rx_skbuff[i]->data;
+ int j;
+ printk(KERN_DEBUG "Addr: ");
+ for (j = 0; j < 0x50; j++)
+ printk(" %4.4x", addr[j]);
+ printk("\n");
+ }
+ }
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ del_timer_sync(&hmp->timer);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ skb = hmp->rx_skbuff[i];
+ hmp->rx_ring[i].status_n_length = 0;
+ if (skb) {
+ pci_unmap_single(hmp->pci_dev,
+ leXX_to_cpu(hmp->rx_ring[i].addr),
+ hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(skb);
+ hmp->rx_skbuff[i] = NULL;
+ }
+ hmp->rx_ring[i].addr = cpu_to_leXX(0xBADF00D0); /* An invalid address. */
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ skb = hmp->tx_skbuff[i];
+ if (skb) {
+ pci_unmap_single(hmp->pci_dev,
+ leXX_to_cpu(hmp->tx_ring[i].addr),
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ hmp->tx_skbuff[i] = NULL;
+ }
+ }
+
+ writeb(0x00, ioaddr + LEDCtrl);
+
+ return 0;
+}
+
+static struct net_device_stats *hamachi_get_stats(struct net_device *dev)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+ void __iomem *ioaddr = hmp->base;
+
+ /* We should lock this segment of code for SMP eventually, although
+ the vulnerability window is very small and statistics are
+ non-critical. */
+ /* Ok, what goes here? This appears to be stuck at 21 packets
+ according to ifconfig. It does get incremented in hamachi_tx(),
+ so I think I'll comment it out here and see if better things
+ happen.
+ */
+ /* dev->stats.tx_packets = readl(ioaddr + 0x000); */
+
+ /* Total Uni+Brd+Multi */
+ dev->stats.rx_bytes = readl(ioaddr + 0x330);
+ /* Total Uni+Brd+Multi */
+ dev->stats.tx_bytes = readl(ioaddr + 0x3B0);
+ /* Multicast Rx */
+ dev->stats.multicast = readl(ioaddr + 0x320);
+
+ /* Over+Undersized */
+ dev->stats.rx_length_errors = readl(ioaddr + 0x368);
+ /* Jabber */
+ dev->stats.rx_over_errors = readl(ioaddr + 0x35C);
+ /* Jabber */
+ dev->stats.rx_crc_errors = readl(ioaddr + 0x360);
+ /* Symbol Errs */
+ dev->stats.rx_frame_errors = readl(ioaddr + 0x364);
+ /* Dropped */
+ dev->stats.rx_missed_errors = readl(ioaddr + 0x36C);
+
+ return &dev->stats;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct hamachi_private *hmp = netdev_priv(dev);
+ void __iomem *ioaddr = hmp->base;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ writew(0x000F, ioaddr + AddrMode);
+ } else if ((netdev_mc_count(dev) > 63) || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ writew(0x000B, ioaddr + AddrMode);
+ } else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */
+ struct netdev_hw_addr *ha;
+ int i = 0;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ writel(*(u32 *)(ha->addr), ioaddr + 0x100 + i*8);
+ writel(0x20000 | (*(u16 *)&ha->addr[4]),
+ ioaddr + 0x104 + i*8);
+ i++;
+ }
+ /* Clear remaining entries. */
+ for (; i < 64; i++)
+ writel(0, ioaddr + 0x104 + i*8);
+ writew(0x0003, ioaddr + AddrMode);
+ } else { /* Normal, unicast/broadcast-only mode. */
+ writew(0x0001, ioaddr + AddrMode);
+ }
+}
+
+static int check_if_running(struct net_device *dev)
+{
+ if (!netif_running(dev))
+ return -EINVAL;
+ return 0;
+}
+
+static void hamachi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct hamachi_private *np = netdev_priv(dev);
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(np->pci_dev));
+}
+
+static int hamachi_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct hamachi_private *np = netdev_priv(dev);
+ spin_lock_irq(&np->lock);
+ mii_ethtool_gset(&np->mii_if, ecmd);
+ spin_unlock_irq(&np->lock);
+ return 0;
+}
+
+static int hamachi_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct hamachi_private *np = netdev_priv(dev);
+ int res;
+ spin_lock_irq(&np->lock);
+ res = mii_ethtool_sset(&np->mii_if, ecmd);
+ spin_unlock_irq(&np->lock);
+ return res;
+}
+
+static int hamachi_nway_reset(struct net_device *dev)
+{
+ struct hamachi_private *np = netdev_priv(dev);
+ return mii_nway_restart(&np->mii_if);
+}
+
+static u32 hamachi_get_link(struct net_device *dev)
+{
+ struct hamachi_private *np = netdev_priv(dev);
+ return mii_link_ok(&np->mii_if);
+}
+
+static const struct ethtool_ops ethtool_ops = {
+ .begin = check_if_running,
+ .get_drvinfo = hamachi_get_drvinfo,
+ .get_settings = hamachi_get_settings,
+ .set_settings = hamachi_set_settings,
+ .nway_reset = hamachi_nway_reset,
+ .get_link = hamachi_get_link,
+};
+
+static const struct ethtool_ops ethtool_ops_no_mii = {
+ .begin = check_if_running,
+ .get_drvinfo = hamachi_get_drvinfo,
+};
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct hamachi_private *np = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(rq);
+ int rc;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (cmd == (SIOCDEVPRIVATE+3)) { /* set rx,tx intr params */
+ u32 *d = (u32 *)&rq->ifr_ifru;
+ /* Should add this check here or an ordinary user can do nasty
+ * things. -KDU
+ *
+ * TODO: Shut down the Rx and Tx engines while doing this.
+ */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ writel(d[0], np->base + TxIntrCtrl);
+ writel(d[1], np->base + RxIntrCtrl);
+ printk(KERN_NOTICE "%s: tx %08x, rx %08x intr\n", dev->name,
+ (u32) readl(np->base + TxIntrCtrl),
+ (u32) readl(np->base + RxIntrCtrl));
+ rc = 0;
+ }
+
+ else {
+ spin_lock_irq(&np->lock);
+ rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
+ spin_unlock_irq(&np->lock);
+ }
+
+ return rc;
+}
+
+
+static void __devexit hamachi_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct hamachi_private *hmp = netdev_priv(dev);
+
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring,
+ hmp->rx_ring_dma);
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring,
+ hmp->tx_ring_dma);
+ unregister_netdev(dev);
+ iounmap(hmp->base);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+static DEFINE_PCI_DEVICE_TABLE(hamachi_pci_tbl) = {
+ { 0x1318, 0x0911, PCI_ANY_ID, PCI_ANY_ID, },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, hamachi_pci_tbl);
+
+static struct pci_driver hamachi_driver = {
+ .name = DRV_NAME,
+ .id_table = hamachi_pci_tbl,
+ .probe = hamachi_init_one,
+ .remove = __devexit_p(hamachi_remove_one),
+};
+
+static int __init hamachi_init (void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk(version);
+#endif
+ return pci_register_driver(&hamachi_driver);
+}
+
+static void __exit hamachi_exit (void)
+{
+ pci_unregister_driver(&hamachi_driver);
+}
+
+
+module_init(hamachi_init);
+module_exit(hamachi_exit);
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
new file mode 100644
index 000000000000..db44e9af03c3
--- /dev/null
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -0,0 +1,1430 @@
+/* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
+/*
+ Written 1997-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
+ It also supports the Symbios Logic version of the same chip core.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support and updates available at
+ http://www.scyld.com/network/yellowfin.html
+ [link no longer provides useful info -jgarzik]
+
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DRV_NAME "yellowfin"
+#define DRV_VERSION "2.1"
+#define DRV_RELDATE "Sep 11, 2006"
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+static int mtu;
+#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
+/* System-wide count of bogus-rx frames. */
+static int bogus_rx;
+static int dma_ctrl = 0x004A0263; /* Constrained by errata */
+static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
+#elif defined(YF_NEW) /* A future perfect board :->. */
+static int dma_ctrl = 0x00CAC277; /* Override when loading module! */
+static int fifo_cfg = 0x0028;
+#else
+static const int dma_ctrl = 0x004A0263; /* Constrained by errata */
+static const int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
+#endif
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1514 effectively disables this feature. */
+static int rx_copybreak;
+
+/* Used to pass the media type, etc.
+ No media types are currently defined. These exist for driver
+ interoperability.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Do ugly workaround for GX server chipset errata. */
+static int gx_fix;
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for efficiency.
+ Making the Tx ring too long decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE 16
+#define TX_QUEUE_SIZE 12 /* Must be > 4 && <= TX_RING_SIZE */
+#define RX_RING_SIZE 64
+#define STATUS_TOTAL_SIZE TX_RING_SIZE*sizeof(struct tx_status_words)
+#define TX_TOTAL_SIZE 2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
+#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct yellowfin_desc)
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2*HZ)
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+#define yellowfin_debug debug
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/unaligned.h>
+#include <asm/io.h>
+
+/* These identify the driver base version and may not be removed. */
+static const char version[] __devinitconst =
+ KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
+ " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(max_interrupt_work, int, 0);
+module_param(mtu, int, 0);
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+module_param(gx_fix, int, 0);
+MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
+MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
+MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
+MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
+MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the Packet Engines "Yellowfin" Gigabit
+Ethernet adapter. The G-NIC 64-bit PCI card is supported, as well as the
+Symbios 53C885E dual function chip.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS preferably should assign the
+PCI INTA signal to an otherwise unused system IRQ line.
+Note: Kernel versions earlier than 1.3.73 do not support shared PCI
+interrupt lines.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
+This is a descriptor list scheme similar to that used by the EEPro100 and
+Tulip. This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the Yellowfin as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack and replaced by a newly allocated skbuff.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. For small frames the copying cost is negligible (esp. considering
+that we are pre-loading the cache with immediately useful header
+information). For large frames the copying cost is non-trivial, and the
+larger copy might flush the cache of useful data.
+
+IIIC. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'yp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
+Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
+and an AlphaStation to verifty the Alpha port!
+
+IVb. References
+
+Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
+Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
+ Data Manual v3.0
+http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
+http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
+
+IVc. Errata
+
+See Packet Engines confidential appendix (prototype chips only).
+*/
+
+
+
+enum capability_flags {
+ HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
+ HasMACAddrBug=32, /* Only on early revs. */
+ DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
+};
+
+/* The PCI I/O space extent. */
+enum {
+ YELLOWFIN_SIZE = 0x100,
+};
+
+struct pci_id_info {
+ const char *name;
+ struct match_info {
+ int pci, pci_mask, subsystem, subsystem_mask;
+ int revision, revision_mask; /* Only 8 bits. */
+ } id;
+ int drv_flags; /* Driver use, intended as capability flags. */
+};
+
+static const struct pci_id_info pci_id_tbl[] = {
+ {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
+ FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
+ {"Symbios SYM83C885", { 0x07011000, 0xffffffff},
+ HasMII | DontUseEeprom },
+ { }
+};
+
+static DEFINE_PCI_DEVICE_TABLE(yellowfin_pci_tbl) = {
+ { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { }
+};
+MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
+
+
+/* Offsets to the Yellowfin registers. Various sizes and alignments. */
+enum yellowfin_offsets {
+ TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
+ TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
+ RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
+ RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
+ EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
+ ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
+ Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
+ MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
+ MII_Status=0xAE,
+ RxDepth=0xB8, FlowCtrl=0xBC,
+ AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
+ EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
+ EEFeature=0xF5,
+};
+
+/* The Yellowfin Rx and Tx buffer descriptors.
+ Elements are written as 32 bit for endian portability. */
+struct yellowfin_desc {
+ __le32 dbdma_cmd;
+ __le32 addr;
+ __le32 branch_addr;
+ __le32 result_status;
+};
+
+struct tx_status_words {
+#ifdef __BIG_ENDIAN
+ u16 tx_errs;
+ u16 tx_cnt;
+ u16 paused;
+ u16 total_tx_cnt;
+#else /* Little endian chips. */
+ u16 tx_cnt;
+ u16 tx_errs;
+ u16 total_tx_cnt;
+ u16 paused;
+#endif /* __BIG_ENDIAN */
+};
+
+/* Bits in yellowfin_desc.cmd */
+enum desc_cmd_bits {
+ CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
+ CMD_NOP=0x60000000, CMD_STOP=0x70000000,
+ BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
+ BRANCH_IFTRUE=0x040000,
+};
+
+/* Bits in yellowfin_desc.status */
+enum desc_status_bits { RX_EOP=0x0040, };
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
+ IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
+ IntrEarlyRx=0x100, IntrWakeup=0x200, };
+
+#define PRIV_ALIGN 31 /* Required alignment mask */
+#define MII_CNT 4
+struct yellowfin_private {
+ /* Descriptor rings first for alignment.
+ Tx requires a second descriptor for status. */
+ struct yellowfin_desc *rx_ring;
+ struct yellowfin_desc *tx_ring;
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+
+ struct tx_status_words *tx_status;
+ dma_addr_t tx_status_dma;
+
+ struct timer_list timer; /* Media selection timer. */
+ /* Frequently used and paired value: keep adjacent for cache effect. */
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ struct tx_status_words *tx_tail_desc;
+ unsigned int cur_tx, dirty_tx;
+ int tx_threshold;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[MII_CNT]; /* MII device addresses, only first one used */
+ spinlock_t lock;
+ void __iomem *base;
+};
+
+static int read_eeprom(void __iomem *ioaddr, int location);
+static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
+static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int yellowfin_open(struct net_device *dev);
+static void yellowfin_timer(unsigned long data);
+static void yellowfin_tx_timeout(struct net_device *dev);
+static int yellowfin_init_ring(struct net_device *dev);
+static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
+static int yellowfin_rx(struct net_device *dev);
+static void yellowfin_error(struct net_device *dev, int intr_status);
+static int yellowfin_close(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static const struct ethtool_ops ethtool_ops;
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = yellowfin_open,
+ .ndo_stop = yellowfin_close,
+ .ndo_start_xmit = yellowfin_start_xmit,
+ .ndo_set_rx_mode = set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_do_ioctl = netdev_ioctl,
+ .ndo_tx_timeout = yellowfin_tx_timeout,
+};
+
+static int __devinit yellowfin_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct yellowfin_private *np;
+ int irq;
+ int chip_idx = ent->driver_data;
+ static int find_cnt;
+ void __iomem *ioaddr;
+ int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
+ int drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ void *ring_space;
+ dma_addr_t ring_dma;
+#ifdef USE_IO_OPS
+ int bar = 0;
+#else
+ int bar = 1;
+#endif
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(version);
+#endif
+
+ i = pci_enable_device(pdev);
+ if (i) return i;
+
+ dev = alloc_etherdev(sizeof(*np));
+ if (!dev) {
+ pr_err("cannot allocate ethernet device\n");
+ return -ENOMEM;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ np = netdev_priv(dev);
+
+ if (pci_request_regions(pdev, DRV_NAME))
+ goto err_out_free_netdev;
+
+ pci_set_master (pdev);
+
+ ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
+ if (!ioaddr)
+ goto err_out_free_res;
+
+ irq = pdev->irq;
+
+ if (drv_flags & DontUseEeprom)
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
+ else {
+ int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
+ }
+
+ /* Reset the chip. */
+ iowrite32(0x80000000, ioaddr + DMACtrl);
+
+ dev->base_addr = (unsigned long)ioaddr;
+ dev->irq = irq;
+
+ pci_set_drvdata(pdev, dev);
+ spin_lock_init(&np->lock);
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = drv_flags;
+ np->base = ioaddr;
+
+ ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space)
+ goto err_out_cleardev;
+ np->tx_ring = ring_space;
+ np->tx_ring_dma = ring_dma;
+
+ ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space)
+ goto err_out_unmap_tx;
+ np->rx_ring = ring_space;
+ np->rx_ring_dma = ring_dma;
+
+ ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
+ if (!ring_space)
+ goto err_out_unmap_rx;
+ np->tx_status = ring_space;
+ np->tx_status_dma = ring_dma;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ if (option & 0x200)
+ np->full_duplex = 1;
+ np->default_port = option & 15;
+ if (np->default_port)
+ np->medialock = 1;
+ }
+ if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
+ np->full_duplex = 1;
+
+ if (np->full_duplex)
+ np->duplex_lock = 1;
+
+ /* The Yellowfin-specific entries in the device structure. */
+ dev->netdev_ops = &netdev_ops;
+ SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ if (mtu)
+ dev->mtu = mtu;
+
+ i = register_netdev(dev);
+ if (i)
+ goto err_out_unmap_status;
+
+ netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n",
+ pci_id_tbl[chip_idx].name,
+ ioread32(ioaddr + ChipRev), ioaddr,
+ dev->dev_addr, irq);
+
+ if (np->drv_flags & HasMII) {
+ int phy, phy_idx = 0;
+ for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
+ int mii_status = mdio_read(ioaddr, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->advertising = mdio_read(ioaddr, phy, 4);
+ netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n",
+ phy, mii_status, np->advertising);
+ }
+ }
+ np->mii_cnt = phy_idx;
+ }
+
+ find_cnt++;
+
+ return 0;
+
+err_out_unmap_status:
+ pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
+ np->tx_status_dma);
+err_out_unmap_rx:
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
+err_out_unmap_tx:
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
+err_out_cleardev:
+ pci_set_drvdata(pdev, NULL);
+ pci_iounmap(pdev, ioaddr);
+err_out_free_res:
+ pci_release_regions(pdev);
+err_out_free_netdev:
+ free_netdev (dev);
+ return -ENODEV;
+}
+
+static int __devinit read_eeprom(void __iomem *ioaddr, int location)
+{
+ int bogus_cnt = 10000; /* Typical 33Mhz: 1050 ticks */
+
+ iowrite8(location, ioaddr + EEAddr);
+ iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
+ while ((ioread8(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0)
+ ;
+ return ioread8(ioaddr + EERead);
+}
+
+/* MII Managemen Data I/O accesses.
+ These routines assume the MDIO controller is idle, and do not exit until
+ the command is finished. */
+
+static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
+{
+ int i;
+
+ iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
+ iowrite16(1, ioaddr + MII_Cmd);
+ for (i = 10000; i >= 0; i--)
+ if ((ioread16(ioaddr + MII_Status) & 1) == 0)
+ break;
+ return ioread16(ioaddr + MII_Rd_Data);
+}
+
+static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
+{
+ int i;
+
+ iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
+ iowrite16(value, ioaddr + MII_Wr_Data);
+
+ /* Wait for the command to finish. */
+ for (i = 10000; i >= 0; i--)
+ if ((ioread16(ioaddr + MII_Status) & 1) == 0)
+ break;
+}
+
+
+static int yellowfin_open(struct net_device *dev)
+{
+ struct yellowfin_private *yp = netdev_priv(dev);
+ void __iomem *ioaddr = yp->base;
+ int i, ret;
+
+ /* Reset the chip. */
+ iowrite32(0x80000000, ioaddr + DMACtrl);
+
+ ret = request_irq(dev->irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
+ if (ret)
+ return ret;
+
+ if (yellowfin_debug > 1)
+ netdev_printk(KERN_DEBUG, dev, "%s() irq %d\n",
+ __func__, dev->irq);
+
+ ret = yellowfin_init_ring(dev);
+ if (ret) {
+ free_irq(dev->irq, dev);
+ return ret;
+ }
+
+ iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
+ iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
+
+ for (i = 0; i < 6; i++)
+ iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
+
+ /* Set up various condition 'select' registers.
+ There are no options here. */
+ iowrite32(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */
+ iowrite32(0x00800080, ioaddr + TxBranchSel); /* Branch on Tx abort */
+ iowrite32(0x00400040, ioaddr + TxWaitSel); /* Wait on Tx status */
+ iowrite32(0x00400040, ioaddr + RxIntrSel); /* Interrupt on Rx done */
+ iowrite32(0x00400040, ioaddr + RxBranchSel); /* Branch on Rx error */
+ iowrite32(0x00400040, ioaddr + RxWaitSel); /* Wait on Rx done */
+
+ /* Initialize other registers: with so many this eventually this will
+ converted to an offset/value list. */
+ iowrite32(dma_ctrl, ioaddr + DMACtrl);
+ iowrite16(fifo_cfg, ioaddr + FIFOcfg);
+ /* Enable automatic generation of flow control frames, period 0xffff. */
+ iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
+
+ yp->tx_threshold = 32;
+ iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
+
+ if (dev->if_port == 0)
+ dev->if_port = yp->default_port;
+
+ netif_start_queue(dev);
+
+ /* Setting the Rx mode will start the Rx process. */
+ if (yp->drv_flags & IsGigabit) {
+ /* We are always in full-duplex mode with gigabit! */
+ yp->full_duplex = 1;
+ iowrite16(0x01CF, ioaddr + Cnfg);
+ } else {
+ iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
+ iowrite16(0x1018, ioaddr + FrameGap1);
+ iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
+ }
+ set_rx_mode(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ iowrite16(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */
+ iowrite16(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
+ iowrite32(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */
+ iowrite32(0x80008000, ioaddr + TxCtrl);
+
+ if (yellowfin_debug > 2) {
+ netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__);
+ }
+
+ /* Set the timer to check for link beat. */
+ init_timer(&yp->timer);
+ yp->timer.expires = jiffies + 3*HZ;
+ yp->timer.data = (unsigned long)dev;
+ yp->timer.function = yellowfin_timer; /* timer handler */
+ add_timer(&yp->timer);
+
+ return 0;
+}
+
+static void yellowfin_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct yellowfin_private *yp = netdev_priv(dev);
+ void __iomem *ioaddr = yp->base;
+ int next_tick = 60*HZ;
+
+ if (yellowfin_debug > 3) {
+ netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n",
+ ioread16(ioaddr + IntrStatus));
+ }
+
+ if (yp->mii_cnt) {
+ int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
+ int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
+ int negotiated = lpa & yp->advertising;
+ if (yellowfin_debug > 1)
+ netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n",
+ yp->phys[0], bmsr, lpa);
+
+ yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
+
+ iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
+
+ if (bmsr & BMSR_LSTATUS)
+ next_tick = 60*HZ;
+ else
+ next_tick = 3*HZ;
+ }
+
+ yp->timer.expires = jiffies + next_tick;
+ add_timer(&yp->timer);
+}
+
+static void yellowfin_tx_timeout(struct net_device *dev)
+{
+ struct yellowfin_private *yp = netdev_priv(dev);
+ void __iomem *ioaddr = yp->base;
+
+ netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n",
+ yp->cur_tx, yp->dirty_tx,
+ ioread32(ioaddr + TxStatus),
+ ioread32(ioaddr + RxStatus));
+
+ /* Note: these should be KERN_DEBUG. */
+ if (yellowfin_debug) {
+ int i;
+ pr_warning(" Rx ring %p: ", yp->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ pr_cont(" %08x", yp->rx_ring[i].result_status);
+ pr_cont("\n");
+ pr_warning(" Tx ring %p: ", yp->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ pr_cont(" %04x /%08x",
+ yp->tx_status[i].tx_errs,
+ yp->tx_ring[i].result_status);
+ pr_cont("\n");
+ }
+
+ /* If the hardware is found to hang regularly, we will update the code
+ to reinitialize the chip here. */
+ dev->if_port = 0;
+
+ /* Wake the potentially-idle transmit channel. */
+ iowrite32(0x10001000, yp->base + TxCtrl);
+ if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
+ netif_wake_queue (dev); /* Typical path */
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ dev->stats.tx_errors++;
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static int yellowfin_init_ring(struct net_device *dev)
+{
+ struct yellowfin_private *yp = netdev_priv(dev);
+ int i, j;
+
+ yp->tx_full = 0;
+ yp->cur_rx = yp->cur_tx = 0;
+ yp->dirty_tx = 0;
+
+ yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ yp->rx_ring[i].dbdma_cmd =
+ cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
+ yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
+ ((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
+ }
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
+ yp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
+ skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+ }
+ if (i != RX_RING_SIZE) {
+ for (j = 0; j < i; j++)
+ dev_kfree_skb(yp->rx_skbuff[j]);
+ return -ENOMEM;
+ }
+ yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+#define NO_TXSTATS
+#ifdef NO_TXSTATS
+ /* In this mode the Tx ring needs only a single descriptor. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ yp->tx_skbuff[i] = NULL;
+ yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
+ ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
+ }
+ /* Wrap ring */
+ yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
+#else
+{
+ /* Tx ring needs a pair of descriptors, the second for the status. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ j = 2*i;
+ yp->tx_skbuff[i] = 0;
+ /* Branch on Tx error. */
+ yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
+ (j+1)*sizeof(struct yellowfin_desc));
+ j++;
+ if (yp->flags & FullTxStatus) {
+ yp->tx_ring[j].dbdma_cmd =
+ cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
+ yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
+ yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
+ i*sizeof(struct tx_status_words));
+ } else {
+ /* Symbios chips write only tx_errs word. */
+ yp->tx_ring[j].dbdma_cmd =
+ cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
+ yp->tx_ring[j].request_cnt = 2;
+ /* Om pade ummmmm... */
+ yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
+ i*sizeof(struct tx_status_words) +
+ &(yp->tx_status[0].tx_errs) -
+ &(yp->tx_status[0]));
+ }
+ yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
+ ((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
+ }
+ /* Wrap ring */
+ yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
+}
+#endif
+ yp->tx_tail_desc = &yp->tx_status[0];
+ return 0;
+}
+
+static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct yellowfin_private *yp = netdev_priv(dev);
+ unsigned entry;
+ int len = skb->len;
+
+ netif_stop_queue (dev);
+
+ /* Note: Ordering is important here, set the field with the
+ "ownership" bit last, and only then increment cur_tx. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = yp->cur_tx % TX_RING_SIZE;
+
+ if (gx_fix) { /* Note: only works for paddable protocols e.g. IP. */
+ int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
+ /* Fix GX chipset errata. */
+ if (cacheline_end > 24 || cacheline_end == 0) {
+ len = skb->len + 32 - cacheline_end + 1;
+ if (skb_padto(skb, len)) {
+ yp->tx_skbuff[entry] = NULL;
+ netif_wake_queue(dev);
+ return NETDEV_TX_OK;
+ }
+ }
+ }
+ yp->tx_skbuff[entry] = skb;
+
+#ifdef NO_TXSTATS
+ yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
+ skb->data, len, PCI_DMA_TODEVICE));
+ yp->tx_ring[entry].result_status = 0;
+ if (entry >= TX_RING_SIZE-1) {
+ /* New stop command. */
+ yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
+ cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
+ } else {
+ yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->tx_ring[entry].dbdma_cmd =
+ cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
+ }
+ yp->cur_tx++;
+#else
+ yp->tx_ring[entry<<1].request_cnt = len;
+ yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
+ skb->data, len, PCI_DMA_TODEVICE));
+ /* The input_last (status-write) command is constant, but we must
+ rewrite the subsequent 'stop' command. */
+
+ yp->cur_tx++;
+ {
+ unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
+ yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ }
+ /* Final step -- overwrite the old 'stop' command. */
+
+ yp->tx_ring[entry<<1].dbdma_cmd =
+ cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
+ CMD_TX_PKT | BRANCH_IFTRUE) | len);
+#endif
+
+ /* Non-x86 Todo: explicitly flush cache lines here. */
+
+ /* Wake the potentially-idle transmit channel. */
+ iowrite32(0x10001000, yp->base + TxCtrl);
+
+ if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
+ netif_start_queue (dev); /* Typical path */
+ else
+ yp->tx_full = 1;
+
+ if (yellowfin_debug > 4) {
+ netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n",
+ yp->cur_tx, entry);
+ }
+ return NETDEV_TX_OK;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
+{
+ struct net_device *dev = dev_instance;
+ struct yellowfin_private *yp;
+ void __iomem *ioaddr;
+ int boguscnt = max_interrupt_work;
+ unsigned int handled = 0;
+
+ yp = netdev_priv(dev);
+ ioaddr = yp->base;
+
+ spin_lock (&yp->lock);
+
+ do {
+ u16 intr_status = ioread16(ioaddr + IntrClear);
+
+ if (yellowfin_debug > 4)
+ netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n",
+ intr_status);
+
+ if (intr_status == 0)
+ break;
+ handled = 1;
+
+ if (intr_status & (IntrRxDone | IntrEarlyRx)) {
+ yellowfin_rx(dev);
+ iowrite32(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */
+ }
+
+#ifdef NO_TXSTATS
+ for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
+ int entry = yp->dirty_tx % TX_RING_SIZE;
+ struct sk_buff *skb;
+
+ if (yp->tx_ring[entry].result_status == 0)
+ break;
+ skb = yp->tx_skbuff[entry];
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ /* Free the original skb. */
+ pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr),
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ yp->tx_skbuff[entry] = NULL;
+ }
+ if (yp->tx_full &&
+ yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
+ /* The ring is no longer full, clear tbusy. */
+ yp->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+#else
+ if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
+ unsigned dirty_tx = yp->dirty_tx;
+
+ for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
+ dirty_tx++) {
+ /* Todo: optimize this. */
+ int entry = dirty_tx % TX_RING_SIZE;
+ u16 tx_errs = yp->tx_status[entry].tx_errs;
+ struct sk_buff *skb;
+
+#ifndef final_version
+ if (yellowfin_debug > 5)
+ netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n",
+ entry,
+ yp->tx_status[entry].tx_cnt,
+ yp->tx_status[entry].tx_errs,
+ yp->tx_status[entry].total_tx_cnt,
+ yp->tx_status[entry].paused);
+#endif
+ if (tx_errs == 0)
+ break; /* It still hasn't been Txed */
+ skb = yp->tx_skbuff[entry];
+ if (tx_errs & 0xF810) {
+ /* There was an major error, log it. */
+#ifndef final_version
+ if (yellowfin_debug > 1)
+ netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n",
+ tx_errs);
+#endif
+ dev->stats.tx_errors++;
+ if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
+ if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++;
+ if (tx_errs & 0x2000) dev->stats.tx_window_errors++;
+ if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++;
+ } else {
+#ifndef final_version
+ if (yellowfin_debug > 4)
+ netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n",
+ tx_errs);
+#endif
+ dev->stats.tx_bytes += skb->len;
+ dev->stats.collisions += tx_errs & 15;
+ dev->stats.tx_packets++;
+ }
+ /* Free the original skb. */
+ pci_unmap_single(yp->pci_dev,
+ yp->tx_ring[entry<<1].addr, skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ yp->tx_skbuff[entry] = 0;
+ /* Mark status as empty. */
+ yp->tx_status[entry].tx_errs = 0;
+ }
+
+#ifndef final_version
+ if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
+ netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n",
+ dirty_tx, yp->cur_tx, yp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (yp->tx_full &&
+ yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
+ /* The ring is no longer full, clear tbusy. */
+ yp->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+
+ yp->dirty_tx = dirty_tx;
+ yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
+ }
+#endif
+
+ /* Log errors and other uncommon events. */
+ if (intr_status & 0x2ee) /* Abnormal error summary. */
+ yellowfin_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ netdev_warn(dev, "Too much work at interrupt, status=%#04x\n",
+ intr_status);
+ break;
+ }
+ } while (1);
+
+ if (yellowfin_debug > 3)
+ netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n",
+ ioread16(ioaddr + IntrStatus));
+
+ spin_unlock (&yp->lock);
+ return IRQ_RETVAL(handled);
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int yellowfin_rx(struct net_device *dev)
+{
+ struct yellowfin_private *yp = netdev_priv(dev);
+ int entry = yp->cur_rx % RX_RING_SIZE;
+ int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
+
+ if (yellowfin_debug > 4) {
+ printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n",
+ entry, yp->rx_ring[entry].result_status);
+ printk(KERN_DEBUG " #%d desc. %08x %08x %08x\n",
+ entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
+ yp->rx_ring[entry].result_status);
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (1) {
+ struct yellowfin_desc *desc = &yp->rx_ring[entry];
+ struct sk_buff *rx_skb = yp->rx_skbuff[entry];
+ s16 frame_status;
+ u16 desc_status;
+ int data_size;
+ u8 *buf_addr;
+
+ if(!desc->result_status)
+ break;
+ pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr),
+ yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ desc_status = le32_to_cpu(desc->result_status) >> 16;
+ buf_addr = rx_skb->data;
+ data_size = (le32_to_cpu(desc->dbdma_cmd) -
+ le32_to_cpu(desc->result_status)) & 0xffff;
+ frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
+ if (yellowfin_debug > 4)
+ printk(KERN_DEBUG " %s() status was %04x\n",
+ __func__, frame_status);
+ if (--boguscnt < 0)
+ break;
+ if ( ! (desc_status & RX_EOP)) {
+ if (data_size != 0)
+ netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
+ desc_status, data_size);
+ dev->stats.rx_length_errors++;
+ } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
+ /* There was a error. */
+ if (yellowfin_debug > 3)
+ printk(KERN_DEBUG " %s() Rx error was %04x\n",
+ __func__, frame_status);
+ dev->stats.rx_errors++;
+ if (frame_status & 0x0060) dev->stats.rx_length_errors++;
+ if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
+ if (frame_status & 0x0010) dev->stats.rx_crc_errors++;
+ if (frame_status < 0) dev->stats.rx_dropped++;
+ } else if ( !(yp->drv_flags & IsGigabit) &&
+ ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
+ u8 status1 = buf_addr[data_size-2];
+ u8 status2 = buf_addr[data_size-1];
+ dev->stats.rx_errors++;
+ if (status1 & 0xC0) dev->stats.rx_length_errors++;
+ if (status2 & 0x03) dev->stats.rx_frame_errors++;
+ if (status2 & 0x04) dev->stats.rx_crc_errors++;
+ if (status2 & 0x80) dev->stats.rx_dropped++;
+#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
+ } else if ((yp->flags & HasMACAddrBug) &&
+ memcmp(le32_to_cpu(yp->rx_ring_dma +
+ entry*sizeof(struct yellowfin_desc)),
+ dev->dev_addr, 6) != 0 &&
+ memcmp(le32_to_cpu(yp->rx_ring_dma +
+ entry*sizeof(struct yellowfin_desc)),
+ "\377\377\377\377\377\377", 6) != 0) {
+ if (bogus_rx++ == 0)
+ netdev_warn(dev, "Bad frame to %pM\n",
+ buf_addr);
+#endif
+ } else {
+ struct sk_buff *skb;
+ int pkt_len = data_size -
+ (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
+ /* To verify: Yellowfin Length should omit the CRC! */
+
+#ifndef final_version
+ if (yellowfin_debug > 4)
+ printk(KERN_DEBUG " %s() normal Rx pkt length %d of %d, bogus_cnt %d\n",
+ __func__, pkt_len, data_size, boguscnt);
+#endif
+ /* Check if the packet is long enough to just pass up the skbuff
+ without copying to a properly sized skbuff. */
+ if (pkt_len > rx_copybreak) {
+ skb_put(skb = rx_skb, pkt_len);
+ pci_unmap_single(yp->pci_dev,
+ le32_to_cpu(yp->rx_ring[entry].addr),
+ yp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ yp->rx_skbuff[entry] = NULL;
+ } else {
+ skb = dev_alloc_skb(pkt_len + 2);
+ if (skb == NULL)
+ break;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
+ skb_put(skb, pkt_len);
+ pci_dma_sync_single_for_device(yp->pci_dev,
+ le32_to_cpu(desc->addr),
+ yp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+ entry = (++yp->cur_rx) % RX_RING_SIZE;
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
+ entry = yp->dirty_rx % RX_RING_SIZE;
+ if (yp->rx_skbuff[entry] == NULL) {
+ struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ yp->rx_skbuff[entry] = skb;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
+ skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+ }
+ yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
+ if (entry != 0)
+ yp->rx_ring[entry - 1].dbdma_cmd =
+ cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
+ else
+ yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
+ cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
+ | yp->rx_buf_sz);
+ }
+
+ return 0;
+}
+
+static void yellowfin_error(struct net_device *dev, int intr_status)
+{
+ netdev_err(dev, "Something Wicked happened! %04x\n", intr_status);
+ /* Hmmmmm, it's not clear what to do here. */
+ if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
+ dev->stats.tx_errors++;
+ if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
+ dev->stats.rx_errors++;
+}
+
+static int yellowfin_close(struct net_device *dev)
+{
+ struct yellowfin_private *yp = netdev_priv(dev);
+ void __iomem *ioaddr = yp->base;
+ int i;
+
+ netif_stop_queue (dev);
+
+ if (yellowfin_debug > 1) {
+ netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n",
+ ioread16(ioaddr + TxStatus),
+ ioread16(ioaddr + RxStatus),
+ ioread16(ioaddr + IntrStatus));
+ netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
+ yp->cur_tx, yp->dirty_tx,
+ yp->cur_rx, yp->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ iowrite16(0x0000, ioaddr + IntrEnb);
+
+ /* Stop the chip's Tx and Rx processes. */
+ iowrite32(0x80000000, ioaddr + RxCtrl);
+ iowrite32(0x80000000, ioaddr + TxCtrl);
+
+ del_timer(&yp->timer);
+
+#if defined(__i386__)
+ if (yellowfin_debug > 2) {
+ printk(KERN_DEBUG " Tx ring at %08llx:\n",
+ (unsigned long long)yp->tx_ring_dma);
+ for (i = 0; i < TX_RING_SIZE*2; i++)
+ printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n",
+ ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
+ i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
+ yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
+ printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(KERN_DEBUG " #%d status %04x %04x %04x %04x\n",
+ i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
+ yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
+
+ printk(KERN_DEBUG " Rx ring %08llx:\n",
+ (unsigned long long)yp->rx_ring_dma);
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n",
+ ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
+ i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
+ yp->rx_ring[i].result_status);
+ if (yellowfin_debug > 6) {
+ if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
+ int j;
+
+ printk(KERN_DEBUG);
+ for (j = 0; j < 0x50; j++)
+ pr_cont(" %04x",
+ get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
+ pr_cont("\n");
+ }
+ }
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
+ if (yp->rx_skbuff[i]) {
+ dev_kfree_skb(yp->rx_skbuff[i]);
+ }
+ yp->rx_skbuff[i] = NULL;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (yp->tx_skbuff[i])
+ dev_kfree_skb(yp->tx_skbuff[i]);
+ yp->tx_skbuff[i] = NULL;
+ }
+
+#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
+ if (yellowfin_debug > 0) {
+ netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n",
+ bogus_rx);
+ }
+#endif
+
+ return 0;
+}
+
+/* Set or clear the multicast filter for this adaptor. */
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct yellowfin_private *yp = netdev_priv(dev);
+ void __iomem *ioaddr = yp->base;
+ u16 cfg_value = ioread16(ioaddr + Cnfg);
+
+ /* Stop the Rx process to change any value. */
+ iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ iowrite16(0x000F, ioaddr + AddrMode);
+ } else if ((netdev_mc_count(dev) > 64) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter well, or accept all multicasts. */
+ iowrite16(0x000B, ioaddr + AddrMode);
+ } else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
+ struct netdev_hw_addr *ha;
+ u16 hash_table[4];
+ int i;
+
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned int bit;
+
+ /* Due to a bug in the early chip versions, multiple filter
+ slots must be set for each address. */
+ if (yp->drv_flags & HasMulticastBug) {
+ bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
+ hash_table[bit >> 4] |= (1 << bit);
+ bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
+ hash_table[bit >> 4] |= (1 << bit);
+ bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
+ hash_table[bit >> 4] |= (1 << bit);
+ }
+ bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
+ hash_table[bit >> 4] |= (1 << bit);
+ }
+ /* Copy the hash table to the chip. */
+ for (i = 0; i < 4; i++)
+ iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
+ iowrite16(0x0003, ioaddr + AddrMode);
+ } else { /* Normal, unicast/broadcast-only mode. */
+ iowrite16(0x0001, ioaddr + AddrMode);
+ }
+ /* Restart the Rx process. */
+ iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
+}
+
+static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct yellowfin_private *np = netdev_priv(dev);
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(np->pci_dev));
+}
+
+static const struct ethtool_ops ethtool_ops = {
+ .get_drvinfo = yellowfin_get_drvinfo
+};
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct yellowfin_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ struct mii_ioctl_data *data = if_mii(rq);
+
+ switch(cmd) {
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ data->phy_id = np->phys[0] & 0x1f;
+ /* Fall Through */
+
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
+ return 0;
+
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ if (data->phy_id == np->phys[0]) {
+ u16 value = data->val_in;
+ switch (data->reg_num) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ np->medialock = (value & 0x9000) ? 0 : 1;
+ if (np->medialock)
+ np->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: np->advertising = value; break;
+ }
+ /* Perhaps check_duplex(dev), depending on chip semantics. */
+ }
+ mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+
+static void __devexit yellowfin_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct yellowfin_private *np;
+
+ BUG_ON(!dev);
+ np = netdev_priv(dev);
+
+ pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
+ np->tx_status_dma);
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
+ unregister_netdev (dev);
+
+ pci_iounmap(pdev, np->base);
+
+ pci_release_regions (pdev);
+
+ free_netdev (dev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+
+static struct pci_driver yellowfin_driver = {
+ .name = DRV_NAME,
+ .id_table = yellowfin_pci_tbl,
+ .probe = yellowfin_init_one,
+ .remove = __devexit_p(yellowfin_remove_one),
+};
+
+
+static int __init yellowfin_init (void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk(version);
+#endif
+ return pci_register_driver(&yellowfin_driver);
+}
+
+
+static void __exit yellowfin_cleanup (void)
+{
+ pci_unregister_driver (&yellowfin_driver);
+}
+
+
+module_init(yellowfin_init);
+module_exit(yellowfin_cleanup);
diff --git a/drivers/net/ethernet/pasemi/Kconfig b/drivers/net/ethernet/pasemi/Kconfig
new file mode 100644
index 000000000000..01e6c329d78c
--- /dev/null
+++ b/drivers/net/ethernet/pasemi/Kconfig
@@ -0,0 +1,30 @@
+#
+# PA Semi network device configuration
+#
+
+config NET_VENDOR_PASEMI
+ bool "PA Semi devices"
+ default y
+ depends on PPC_PASEMI && PCI && INET
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about PA Semi cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_PASEMI
+
+config PASEMI_MAC
+ tristate "PA Semi 1/10Gbit MAC"
+ depends on PPC_PASEMI && PCI && INET
+ select PHYLIB
+ select INET_LRO
+ ---help---
+ This driver supports the on-chip 1/10Gbit Ethernet controller on
+ PA Semi's PWRficient line of chips.
+
+endif # NET_VENDOR_PASEMI
diff --git a/drivers/net/ethernet/pasemi/Makefile b/drivers/net/ethernet/pasemi/Makefile
new file mode 100644
index 000000000000..05db5434bafc
--- /dev/null
+++ b/drivers/net/ethernet/pasemi/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the A Semi network device drivers.
+#
+
+obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o pasemi_mac_ethtool.o
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
new file mode 100644
index 000000000000..c6f005684677
--- /dev/null
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -0,0 +1,1909 @@
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/of_mdio.h>
+#include <linux/etherdevice.h>
+#include <asm/dma-mapping.h>
+#include <linux/in.h>
+#include <linux/skbuff.h>
+
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <net/checksum.h>
+#include <linux/inet_lro.h>
+#include <linux/prefetch.h>
+
+#include <asm/irq.h>
+#include <asm/firmware.h>
+#include <asm/pasemi_dma.h>
+
+#include "pasemi_mac.h"
+
+/* We have our own align, since ppc64 in general has it at 0 because
+ * of design flaws in some of the server bridge chips. However, for
+ * PWRficient doing the unaligned copies is more expensive than doing
+ * unaligned DMA, so make sure the data is aligned instead.
+ */
+#define LOCAL_SKB_ALIGN 2
+
+/* TODO list
+ *
+ * - Multicast support
+ * - Large MTU support
+ * - SW LRO
+ * - Multiqueue RX/TX
+ */
+
+#define LRO_MAX_AGGR 64
+
+#define PE_MIN_MTU 64
+#define PE_MAX_MTU 9000
+#define PE_DEF_MTU ETH_DATA_LEN
+
+#define DEFAULT_MSG_ENABLE \
+ (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
+MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
+
+static int debug = -1; /* -1 == use DEFAULT_MSG_ENABLE as value */
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value");
+
+extern const struct ethtool_ops pasemi_mac_ethtool_ops;
+
+static int translation_enabled(void)
+{
+#if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
+ return 1;
+#else
+ return firmware_has_feature(FW_FEATURE_LPAR);
+#endif
+}
+
+static void write_iob_reg(unsigned int reg, unsigned int val)
+{
+ pasemi_write_iob_reg(reg, val);
+}
+
+static unsigned int read_mac_reg(const struct pasemi_mac *mac, unsigned int reg)
+{
+ return pasemi_read_mac_reg(mac->dma_if, reg);
+}
+
+static void write_mac_reg(const struct pasemi_mac *mac, unsigned int reg,
+ unsigned int val)
+{
+ pasemi_write_mac_reg(mac->dma_if, reg, val);
+}
+
+static unsigned int read_dma_reg(unsigned int reg)
+{
+ return pasemi_read_dma_reg(reg);
+}
+
+static void write_dma_reg(unsigned int reg, unsigned int val)
+{
+ pasemi_write_dma_reg(reg, val);
+}
+
+static struct pasemi_mac_rxring *rx_ring(const struct pasemi_mac *mac)
+{
+ return mac->rx;
+}
+
+static struct pasemi_mac_txring *tx_ring(const struct pasemi_mac *mac)
+{
+ return mac->tx;
+}
+
+static inline void prefetch_skb(const struct sk_buff *skb)
+{
+ const void *d = skb;
+
+ prefetch(d);
+ prefetch(d+64);
+ prefetch(d+128);
+ prefetch(d+192);
+}
+
+static int mac_to_intf(struct pasemi_mac *mac)
+{
+ struct pci_dev *pdev = mac->pdev;
+ u32 tmp;
+ int nintf, off, i, j;
+ int devfn = pdev->devfn;
+
+ tmp = read_dma_reg(PAS_DMA_CAP_IFI);
+ nintf = (tmp & PAS_DMA_CAP_IFI_NIN_M) >> PAS_DMA_CAP_IFI_NIN_S;
+ off = (tmp & PAS_DMA_CAP_IFI_IOFF_M) >> PAS_DMA_CAP_IFI_IOFF_S;
+
+ /* IOFF contains the offset to the registers containing the
+ * DMA interface-to-MAC-pci-id mappings, and NIN contains number
+ * of total interfaces. Each register contains 4 devfns.
+ * Just do a linear search until we find the devfn of the MAC
+ * we're trying to look up.
+ */
+
+ for (i = 0; i < (nintf+3)/4; i++) {
+ tmp = read_dma_reg(off+4*i);
+ for (j = 0; j < 4; j++) {
+ if (((tmp >> (8*j)) & 0xff) == devfn)
+ return i*4 + j;
+ }
+ }
+ return -1;
+}
+
+static void pasemi_mac_intf_disable(struct pasemi_mac *mac)
+{
+ unsigned int flags;
+
+ flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
+ flags &= ~PAS_MAC_CFG_PCFG_PE;
+ write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
+}
+
+static void pasemi_mac_intf_enable(struct pasemi_mac *mac)
+{
+ unsigned int flags;
+
+ flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
+ flags |= PAS_MAC_CFG_PCFG_PE;
+ write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
+}
+
+static int pasemi_get_mac_addr(struct pasemi_mac *mac)
+{
+ struct pci_dev *pdev = mac->pdev;
+ struct device_node *dn = pci_device_to_OF_node(pdev);
+ int len;
+ const u8 *maddr;
+ u8 addr[6];
+
+ if (!dn) {
+ dev_dbg(&pdev->dev,
+ "No device node for mac, not configuring\n");
+ return -ENOENT;
+ }
+
+ maddr = of_get_property(dn, "local-mac-address", &len);
+
+ if (maddr && len == 6) {
+ memcpy(mac->mac_addr, maddr, 6);
+ return 0;
+ }
+
+ /* Some old versions of firmware mistakenly uses mac-address
+ * (and as a string) instead of a byte array in local-mac-address.
+ */
+
+ if (maddr == NULL)
+ maddr = of_get_property(dn, "mac-address", NULL);
+
+ if (maddr == NULL) {
+ dev_warn(&pdev->dev,
+ "no mac address in device tree, not configuring\n");
+ return -ENOENT;
+ }
+
+ if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
+ &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
+ dev_warn(&pdev->dev,
+ "can't parse mac address, not configuring\n");
+ return -EINVAL;
+ }
+
+ memcpy(mac->mac_addr, addr, 6);
+
+ return 0;
+}
+
+static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct pasemi_mac *mac = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ unsigned int adr0, adr1;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ adr0 = dev->dev_addr[2] << 24 |
+ dev->dev_addr[3] << 16 |
+ dev->dev_addr[4] << 8 |
+ dev->dev_addr[5];
+ adr1 = read_mac_reg(mac, PAS_MAC_CFG_ADR1);
+ adr1 &= ~0xffff;
+ adr1 |= dev->dev_addr[0] << 8 | dev->dev_addr[1];
+
+ pasemi_mac_intf_disable(mac);
+ write_mac_reg(mac, PAS_MAC_CFG_ADR0, adr0);
+ write_mac_reg(mac, PAS_MAC_CFG_ADR1, adr1);
+ pasemi_mac_intf_enable(mac);
+
+ return 0;
+}
+
+static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
+ void **tcph, u64 *hdr_flags, void *data)
+{
+ u64 macrx = (u64) data;
+ unsigned int ip_len;
+ struct iphdr *iph;
+
+ /* IPv4 header checksum failed */
+ if ((macrx & XCT_MACRX_HTY_M) != XCT_MACRX_HTY_IPV4_OK)
+ return -1;
+
+ /* non tcp packet */
+ skb_reset_network_header(skb);
+ iph = ip_hdr(skb);
+ if (iph->protocol != IPPROTO_TCP)
+ return -1;
+
+ ip_len = ip_hdrlen(skb);
+ skb_set_transport_header(skb, ip_len);
+ *tcph = tcp_hdr(skb);
+
+ /* check if ip header and tcp header are complete */
+ if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
+ return -1;
+
+ *hdr_flags = LRO_IPV4 | LRO_TCP;
+ *iphdr = iph;
+
+ return 0;
+}
+
+static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac,
+ const int nfrags,
+ struct sk_buff *skb,
+ const dma_addr_t *dmas)
+{
+ int f;
+ struct pci_dev *pdev = mac->dma_pdev;
+
+ pci_unmap_single(pdev, dmas[0], skb_headlen(skb), PCI_DMA_TODEVICE);
+
+ for (f = 0; f < nfrags; f++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
+
+ pci_unmap_page(pdev, dmas[f+1], frag->size, PCI_DMA_TODEVICE);
+ }
+ dev_kfree_skb_irq(skb);
+
+ /* Freed descriptor slot + main SKB ptr + nfrags additional ptrs,
+ * aligned up to a power of 2
+ */
+ return (nfrags + 3) & ~1;
+}
+
+static struct pasemi_mac_csring *pasemi_mac_setup_csring(struct pasemi_mac *mac)
+{
+ struct pasemi_mac_csring *ring;
+ u32 val;
+ unsigned int cfg;
+ int chno;
+
+ ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_csring),
+ offsetof(struct pasemi_mac_csring, chan));
+
+ if (!ring) {
+ dev_err(&mac->pdev->dev, "Can't allocate checksum channel\n");
+ goto out_chan;
+ }
+
+ chno = ring->chan.chno;
+
+ ring->size = CS_RING_SIZE;
+ ring->next_to_fill = 0;
+
+ /* Allocate descriptors */
+ if (pasemi_dma_alloc_ring(&ring->chan, CS_RING_SIZE))
+ goto out_ring_desc;
+
+ write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno),
+ PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma));
+ val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32);
+ val |= PAS_DMA_TXCHAN_BASEU_SIZ(CS_RING_SIZE >> 3);
+
+ write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val);
+
+ ring->events[0] = pasemi_dma_alloc_flag();
+ ring->events[1] = pasemi_dma_alloc_flag();
+ if (ring->events[0] < 0 || ring->events[1] < 0)
+ goto out_flags;
+
+ pasemi_dma_clear_flag(ring->events[0]);
+ pasemi_dma_clear_flag(ring->events[1]);
+
+ ring->fun = pasemi_dma_alloc_fun();
+ if (ring->fun < 0)
+ goto out_fun;
+
+ cfg = PAS_DMA_TXCHAN_CFG_TY_FUNC | PAS_DMA_TXCHAN_CFG_UP |
+ PAS_DMA_TXCHAN_CFG_TATTR(ring->fun) |
+ PAS_DMA_TXCHAN_CFG_LPSQ | PAS_DMA_TXCHAN_CFG_LPDQ;
+
+ if (translation_enabled())
+ cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR;
+
+ write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg);
+
+ /* enable channel */
+ pasemi_dma_start_chan(&ring->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ |
+ PAS_DMA_TXCHAN_TCMDSTA_DB |
+ PAS_DMA_TXCHAN_TCMDSTA_DE |
+ PAS_DMA_TXCHAN_TCMDSTA_DA);
+
+ return ring;
+
+out_fun:
+out_flags:
+ if (ring->events[0] >= 0)
+ pasemi_dma_free_flag(ring->events[0]);
+ if (ring->events[1] >= 0)
+ pasemi_dma_free_flag(ring->events[1]);
+ pasemi_dma_free_ring(&ring->chan);
+out_ring_desc:
+ pasemi_dma_free_chan(&ring->chan);
+out_chan:
+
+ return NULL;
+}
+
+static void pasemi_mac_setup_csrings(struct pasemi_mac *mac)
+{
+ int i;
+ mac->cs[0] = pasemi_mac_setup_csring(mac);
+ if (mac->type == MAC_TYPE_XAUI)
+ mac->cs[1] = pasemi_mac_setup_csring(mac);
+ else
+ mac->cs[1] = 0;
+
+ for (i = 0; i < MAX_CS; i++)
+ if (mac->cs[i])
+ mac->num_cs++;
+}
+
+static void pasemi_mac_free_csring(struct pasemi_mac_csring *csring)
+{
+ pasemi_dma_stop_chan(&csring->chan);
+ pasemi_dma_free_flag(csring->events[0]);
+ pasemi_dma_free_flag(csring->events[1]);
+ pasemi_dma_free_ring(&csring->chan);
+ pasemi_dma_free_chan(&csring->chan);
+ pasemi_dma_free_fun(csring->fun);
+}
+
+static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
+{
+ struct pasemi_mac_rxring *ring;
+ struct pasemi_mac *mac = netdev_priv(dev);
+ int chno;
+ unsigned int cfg;
+
+ ring = pasemi_dma_alloc_chan(RXCHAN, sizeof(struct pasemi_mac_rxring),
+ offsetof(struct pasemi_mac_rxring, chan));
+
+ if (!ring) {
+ dev_err(&mac->pdev->dev, "Can't allocate RX channel\n");
+ goto out_chan;
+ }
+ chno = ring->chan.chno;
+
+ spin_lock_init(&ring->lock);
+
+ ring->size = RX_RING_SIZE;
+ ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
+ RX_RING_SIZE, GFP_KERNEL);
+
+ if (!ring->ring_info)
+ goto out_ring_info;
+
+ /* Allocate descriptors */
+ if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE))
+ goto out_ring_desc;
+
+ ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
+ RX_RING_SIZE * sizeof(u64),
+ &ring->buf_dma, GFP_KERNEL);
+ if (!ring->buffers)
+ goto out_ring_desc;
+
+ memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64));
+
+ write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno),
+ PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma));
+
+ write_dma_reg(PAS_DMA_RXCHAN_BASEU(chno),
+ PAS_DMA_RXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32) |
+ PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3));
+
+ cfg = PAS_DMA_RXCHAN_CFG_HBU(2);
+
+ if (translation_enabled())
+ cfg |= PAS_DMA_RXCHAN_CFG_CTR;
+
+ write_dma_reg(PAS_DMA_RXCHAN_CFG(chno), cfg);
+
+ write_dma_reg(PAS_DMA_RXINT_BASEL(mac->dma_if),
+ PAS_DMA_RXINT_BASEL_BRBL(ring->buf_dma));
+
+ write_dma_reg(PAS_DMA_RXINT_BASEU(mac->dma_if),
+ PAS_DMA_RXINT_BASEU_BRBH(ring->buf_dma >> 32) |
+ PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3));
+
+ cfg = PAS_DMA_RXINT_CFG_DHL(2) | PAS_DMA_RXINT_CFG_L2 |
+ PAS_DMA_RXINT_CFG_LW | PAS_DMA_RXINT_CFG_RBP |
+ PAS_DMA_RXINT_CFG_HEN;
+
+ if (translation_enabled())
+ cfg |= PAS_DMA_RXINT_CFG_ITRR | PAS_DMA_RXINT_CFG_ITR;
+
+ write_dma_reg(PAS_DMA_RXINT_CFG(mac->dma_if), cfg);
+
+ ring->next_to_fill = 0;
+ ring->next_to_clean = 0;
+ ring->mac = mac;
+ mac->rx = ring;
+
+ return 0;
+
+out_ring_desc:
+ kfree(ring->ring_info);
+out_ring_info:
+ pasemi_dma_free_chan(&ring->chan);
+out_chan:
+ return -ENOMEM;
+}
+
+static struct pasemi_mac_txring *
+pasemi_mac_setup_tx_resources(const struct net_device *dev)
+{
+ struct pasemi_mac *mac = netdev_priv(dev);
+ u32 val;
+ struct pasemi_mac_txring *ring;
+ unsigned int cfg;
+ int chno;
+
+ ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_txring),
+ offsetof(struct pasemi_mac_txring, chan));
+
+ if (!ring) {
+ dev_err(&mac->pdev->dev, "Can't allocate TX channel\n");
+ goto out_chan;
+ }
+
+ chno = ring->chan.chno;
+
+ spin_lock_init(&ring->lock);
+
+ ring->size = TX_RING_SIZE;
+ ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
+ TX_RING_SIZE, GFP_KERNEL);
+ if (!ring->ring_info)
+ goto out_ring_info;
+
+ /* Allocate descriptors */
+ if (pasemi_dma_alloc_ring(&ring->chan, TX_RING_SIZE))
+ goto out_ring_desc;
+
+ write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno),
+ PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma));
+ val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32);
+ val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3);
+
+ write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val);
+
+ cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE |
+ PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
+ PAS_DMA_TXCHAN_CFG_UP |
+ PAS_DMA_TXCHAN_CFG_WT(4);
+
+ if (translation_enabled())
+ cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR;
+
+ write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg);
+
+ ring->next_to_fill = 0;
+ ring->next_to_clean = 0;
+ ring->mac = mac;
+
+ return ring;
+
+out_ring_desc:
+ kfree(ring->ring_info);
+out_ring_info:
+ pasemi_dma_free_chan(&ring->chan);
+out_chan:
+ return NULL;
+}
+
+static void pasemi_mac_free_tx_resources(struct pasemi_mac *mac)
+{
+ struct pasemi_mac_txring *txring = tx_ring(mac);
+ unsigned int i, j;
+ struct pasemi_mac_buffer *info;
+ dma_addr_t dmas[MAX_SKB_FRAGS+1];
+ int freed, nfrags;
+ int start, limit;
+
+ start = txring->next_to_clean;
+ limit = txring->next_to_fill;
+
+ /* Compensate for when fill has wrapped and clean has not */
+ if (start > limit)
+ limit += TX_RING_SIZE;
+
+ for (i = start; i < limit; i += freed) {
+ info = &txring->ring_info[(i+1) & (TX_RING_SIZE-1)];
+ if (info->dma && info->skb) {
+ nfrags = skb_shinfo(info->skb)->nr_frags;
+ for (j = 0; j <= nfrags; j++)
+ dmas[j] = txring->ring_info[(i+1+j) &
+ (TX_RING_SIZE-1)].dma;
+ freed = pasemi_mac_unmap_tx_skb(mac, nfrags,
+ info->skb, dmas);
+ } else
+ freed = 2;
+ }
+
+ kfree(txring->ring_info);
+ pasemi_dma_free_chan(&txring->chan);
+
+}
+
+static void pasemi_mac_free_rx_buffers(struct pasemi_mac *mac)
+{
+ struct pasemi_mac_rxring *rx = rx_ring(mac);
+ unsigned int i;
+ struct pasemi_mac_buffer *info;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ info = &RX_DESC_INFO(rx, i);
+ if (info->skb && info->dma) {
+ pci_unmap_single(mac->dma_pdev,
+ info->dma,
+ info->skb->len,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(info->skb);
+ }
+ info->dma = 0;
+ info->skb = NULL;
+ }
+
+ for (i = 0; i < RX_RING_SIZE; i++)
+ RX_BUFF(rx, i) = 0;
+}
+
+static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
+{
+ pasemi_mac_free_rx_buffers(mac);
+
+ dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64),
+ rx_ring(mac)->buffers, rx_ring(mac)->buf_dma);
+
+ kfree(rx_ring(mac)->ring_info);
+ pasemi_dma_free_chan(&rx_ring(mac)->chan);
+ mac->rx = NULL;
+}
+
+static void pasemi_mac_replenish_rx_ring(const struct net_device *dev,
+ const int limit)
+{
+ const struct pasemi_mac *mac = netdev_priv(dev);
+ struct pasemi_mac_rxring *rx = rx_ring(mac);
+ int fill, count;
+
+ if (limit <= 0)
+ return;
+
+ fill = rx_ring(mac)->next_to_fill;
+ for (count = 0; count < limit; count++) {
+ struct pasemi_mac_buffer *info = &RX_DESC_INFO(rx, fill);
+ u64 *buff = &RX_BUFF(rx, fill);
+ struct sk_buff *skb;
+ dma_addr_t dma;
+
+ /* Entry in use? */
+ WARN_ON(*buff);
+
+ skb = dev_alloc_skb(mac->bufsz);
+ skb_reserve(skb, LOCAL_SKB_ALIGN);
+
+ if (unlikely(!skb))
+ break;
+
+ dma = pci_map_single(mac->dma_pdev, skb->data,
+ mac->bufsz - LOCAL_SKB_ALIGN,
+ PCI_DMA_FROMDEVICE);
+
+ if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) {
+ dev_kfree_skb_irq(info->skb);
+ break;
+ }
+
+ info->skb = skb;
+ info->dma = dma;
+ *buff = XCT_RXB_LEN(mac->bufsz) | XCT_RXB_ADDR(dma);
+ fill++;
+ }
+
+ wmb();
+
+ write_dma_reg(PAS_DMA_RXINT_INCR(mac->dma_if), count);
+
+ rx_ring(mac)->next_to_fill = (rx_ring(mac)->next_to_fill + count) &
+ (RX_RING_SIZE - 1);
+}
+
+static void pasemi_mac_restart_rx_intr(const struct pasemi_mac *mac)
+{
+ struct pasemi_mac_rxring *rx = rx_ring(mac);
+ unsigned int reg, pcnt;
+ /* Re-enable packet count interrupts: finally
+ * ack the packet count interrupt we got in rx_intr.
+ */
+
+ pcnt = *rx->chan.status & PAS_STATUS_PCNT_M;
+
+ reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC;
+
+ if (*rx->chan.status & PAS_STATUS_TIMER)
+ reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;
+
+ write_iob_reg(PAS_IOB_DMA_RXCH_RESET(mac->rx->chan.chno), reg);
+}
+
+static void pasemi_mac_restart_tx_intr(const struct pasemi_mac *mac)
+{
+ unsigned int reg, pcnt;
+
+ /* Re-enable packet count interrupts */
+ pcnt = *tx_ring(mac)->chan.status & PAS_STATUS_PCNT_M;
+
+ reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC;
+
+ write_iob_reg(PAS_IOB_DMA_TXCH_RESET(tx_ring(mac)->chan.chno), reg);
+}
+
+
+static inline void pasemi_mac_rx_error(const struct pasemi_mac *mac,
+ const u64 macrx)
+{
+ unsigned int rcmdsta, ccmdsta;
+ struct pasemi_dmachan *chan = &rx_ring(mac)->chan;
+
+ if (!netif_msg_rx_err(mac))
+ return;
+
+ rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
+ ccmdsta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno));
+
+ printk(KERN_ERR "pasemi_mac: rx error. macrx %016llx, rx status %llx\n",
+ macrx, *chan->status);
+
+ printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n",
+ rcmdsta, ccmdsta);
+}
+
+static inline void pasemi_mac_tx_error(const struct pasemi_mac *mac,
+ const u64 mactx)
+{
+ unsigned int cmdsta;
+ struct pasemi_dmachan *chan = &tx_ring(mac)->chan;
+
+ if (!netif_msg_tx_err(mac))
+ return;
+
+ cmdsta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno));
+
+ printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016llx, "\
+ "tx status 0x%016llx\n", mactx, *chan->status);
+
+ printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta);
+}
+
+static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx,
+ const int limit)
+{
+ const struct pasemi_dmachan *chan = &rx->chan;
+ struct pasemi_mac *mac = rx->mac;
+ struct pci_dev *pdev = mac->dma_pdev;
+ unsigned int n;
+ int count, buf_index, tot_bytes, packets;
+ struct pasemi_mac_buffer *info;
+ struct sk_buff *skb;
+ unsigned int len;
+ u64 macrx, eval;
+ dma_addr_t dma;
+
+ tot_bytes = 0;
+ packets = 0;
+
+ spin_lock(&rx->lock);
+
+ n = rx->next_to_clean;
+
+ prefetch(&RX_DESC(rx, n));
+
+ for (count = 0; count < limit; count++) {
+ macrx = RX_DESC(rx, n);
+ prefetch(&RX_DESC(rx, n+4));
+
+ if ((macrx & XCT_MACRX_E) ||
+ (*chan->status & PAS_STATUS_ERROR))
+ pasemi_mac_rx_error(mac, macrx);
+
+ if (!(macrx & XCT_MACRX_O))
+ break;
+
+ info = NULL;
+
+ BUG_ON(!(macrx & XCT_MACRX_RR_8BRES));
+
+ eval = (RX_DESC(rx, n+1) & XCT_RXRES_8B_EVAL_M) >>
+ XCT_RXRES_8B_EVAL_S;
+ buf_index = eval-1;
+
+ dma = (RX_DESC(rx, n+2) & XCT_PTR_ADDR_M);
+ info = &RX_DESC_INFO(rx, buf_index);
+
+ skb = info->skb;
+
+ prefetch_skb(skb);
+
+ len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
+
+ pci_unmap_single(pdev, dma, mac->bufsz - LOCAL_SKB_ALIGN,
+ PCI_DMA_FROMDEVICE);
+
+ if (macrx & XCT_MACRX_CRC) {
+ /* CRC error flagged */
+ mac->netdev->stats.rx_errors++;
+ mac->netdev->stats.rx_crc_errors++;
+ /* No need to free skb, it'll be reused */
+ goto next;
+ }
+
+ info->skb = NULL;
+ info->dma = 0;
+
+ if (likely((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum = (macrx & XCT_MACRX_CSUM_M) >>
+ XCT_MACRX_CSUM_S;
+ } else
+ skb_checksum_none_assert(skb);
+
+ packets++;
+ tot_bytes += len;
+
+ /* Don't include CRC */
+ skb_put(skb, len-4);
+
+ skb->protocol = eth_type_trans(skb, mac->netdev);
+ lro_receive_skb(&mac->lro_mgr, skb, (void *)macrx);
+
+next:
+ RX_DESC(rx, n) = 0;
+ RX_DESC(rx, n+1) = 0;
+
+ /* Need to zero it out since hardware doesn't, since the
+ * replenish loop uses it to tell when it's done.
+ */
+ RX_BUFF(rx, buf_index) = 0;
+
+ n += 4;
+ }
+
+ if (n > RX_RING_SIZE) {
+ /* Errata 5971 workaround: L2 target of headers */
+ write_iob_reg(PAS_IOB_COM_PKTHDRCNT, 0);
+ n &= (RX_RING_SIZE-1);
+ }
+
+ rx_ring(mac)->next_to_clean = n;
+
+ lro_flush_all(&mac->lro_mgr);
+
+ /* Increase is in number of 16-byte entries, and since each descriptor
+ * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with
+ * count*2.
+ */
+ write_dma_reg(PAS_DMA_RXCHAN_INCR(mac->rx->chan.chno), count << 1);
+
+ pasemi_mac_replenish_rx_ring(mac->netdev, count);
+
+ mac->netdev->stats.rx_bytes += tot_bytes;
+ mac->netdev->stats.rx_packets += packets;
+
+ spin_unlock(&rx_ring(mac)->lock);
+
+ return count;
+}
+
+/* Can't make this too large or we blow the kernel stack limits */
+#define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS)
+
+static int pasemi_mac_clean_tx(struct pasemi_mac_txring *txring)
+{
+ struct pasemi_dmachan *chan = &txring->chan;
+ struct pasemi_mac *mac = txring->mac;
+ int i, j;
+ unsigned int start, descr_count, buf_count, batch_limit;
+ unsigned int ring_limit;
+ unsigned int total_count;
+ unsigned long flags;
+ struct sk_buff *skbs[TX_CLEAN_BATCHSIZE];
+ dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1];
+ int nf[TX_CLEAN_BATCHSIZE];
+ int nr_frags;
+
+ total_count = 0;
+ batch_limit = TX_CLEAN_BATCHSIZE;
+restart:
+ spin_lock_irqsave(&txring->lock, flags);
+
+ start = txring->next_to_clean;
+ ring_limit = txring->next_to_fill;
+
+ prefetch(&TX_DESC_INFO(txring, start+1).skb);
+
+ /* Compensate for when fill has wrapped but clean has not */
+ if (start > ring_limit)
+ ring_limit += TX_RING_SIZE;
+
+ buf_count = 0;
+ descr_count = 0;
+
+ for (i = start;
+ descr_count < batch_limit && i < ring_limit;
+ i += buf_count) {
+ u64 mactx = TX_DESC(txring, i);
+ struct sk_buff *skb;
+
+ if ((mactx & XCT_MACTX_E) ||
+ (*chan->status & PAS_STATUS_ERROR))
+ pasemi_mac_tx_error(mac, mactx);
+
+ /* Skip over control descriptors */
+ if (!(mactx & XCT_MACTX_LLEN_M)) {
+ TX_DESC(txring, i) = 0;
+ TX_DESC(txring, i+1) = 0;
+ buf_count = 2;
+ continue;
+ }
+
+ skb = TX_DESC_INFO(txring, i+1).skb;
+ nr_frags = TX_DESC_INFO(txring, i).dma;
+
+ if (unlikely(mactx & XCT_MACTX_O))
+ /* Not yet transmitted */
+ break;
+
+ buf_count = 2 + nr_frags;
+ /* Since we always fill with an even number of entries, make
+ * sure we skip any unused one at the end as well.
+ */
+ if (buf_count & 1)
+ buf_count++;
+
+ for (j = 0; j <= nr_frags; j++)
+ dmas[descr_count][j] = TX_DESC_INFO(txring, i+1+j).dma;
+
+ skbs[descr_count] = skb;
+ nf[descr_count] = nr_frags;
+
+ TX_DESC(txring, i) = 0;
+ TX_DESC(txring, i+1) = 0;
+
+ descr_count++;
+ }
+ txring->next_to_clean = i & (TX_RING_SIZE-1);
+
+ spin_unlock_irqrestore(&txring->lock, flags);
+ netif_wake_queue(mac->netdev);
+
+ for (i = 0; i < descr_count; i++)
+ pasemi_mac_unmap_tx_skb(mac, nf[i], skbs[i], dmas[i]);
+
+ total_count += descr_count;
+
+ /* If the batch was full, try to clean more */
+ if (descr_count == batch_limit)
+ goto restart;
+
+ return total_count;
+}
+
+
+static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
+{
+ const struct pasemi_mac_rxring *rxring = data;
+ struct pasemi_mac *mac = rxring->mac;
+ const struct pasemi_dmachan *chan = &rxring->chan;
+ unsigned int reg;
+
+ if (!(*chan->status & PAS_STATUS_CAUSE_M))
+ return IRQ_NONE;
+
+ /* Don't reset packet count so it won't fire again but clear
+ * all others.
+ */
+
+ reg = 0;
+ if (*chan->status & PAS_STATUS_SOFT)
+ reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
+ if (*chan->status & PAS_STATUS_ERROR)
+ reg |= PAS_IOB_DMA_RXCH_RESET_DINTC;
+
+ napi_schedule(&mac->napi);
+
+ write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg);
+
+ return IRQ_HANDLED;
+}
+
+#define TX_CLEAN_INTERVAL HZ
+
+static void pasemi_mac_tx_timer(unsigned long data)
+{
+ struct pasemi_mac_txring *txring = (struct pasemi_mac_txring *)data;
+ struct pasemi_mac *mac = txring->mac;
+
+ pasemi_mac_clean_tx(txring);
+
+ mod_timer(&txring->clean_timer, jiffies + TX_CLEAN_INTERVAL);
+
+ pasemi_mac_restart_tx_intr(mac);
+}
+
+static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
+{
+ struct pasemi_mac_txring *txring = data;
+ const struct pasemi_dmachan *chan = &txring->chan;
+ struct pasemi_mac *mac = txring->mac;
+ unsigned int reg;
+
+ if (!(*chan->status & PAS_STATUS_CAUSE_M))
+ return IRQ_NONE;
+
+ reg = 0;
+
+ if (*chan->status & PAS_STATUS_SOFT)
+ reg |= PAS_IOB_DMA_TXCH_RESET_SINTC;
+ if (*chan->status & PAS_STATUS_ERROR)
+ reg |= PAS_IOB_DMA_TXCH_RESET_DINTC;
+
+ mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2);
+
+ napi_schedule(&mac->napi);
+
+ if (reg)
+ write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg);
+
+ return IRQ_HANDLED;
+}
+
+static void pasemi_adjust_link(struct net_device *dev)
+{
+ struct pasemi_mac *mac = netdev_priv(dev);
+ int msg;
+ unsigned int flags;
+ unsigned int new_flags;
+
+ if (!mac->phydev->link) {
+ /* If no link, MAC speed settings don't matter. Just report
+ * link down and return.
+ */
+ if (mac->link && netif_msg_link(mac))
+ printk(KERN_INFO "%s: Link is down.\n", dev->name);
+
+ netif_carrier_off(dev);
+ pasemi_mac_intf_disable(mac);
+ mac->link = 0;
+
+ return;
+ } else {
+ pasemi_mac_intf_enable(mac);
+ netif_carrier_on(dev);
+ }
+
+ flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
+ new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M |
+ PAS_MAC_CFG_PCFG_TSR_M);
+
+ if (!mac->phydev->duplex)
+ new_flags |= PAS_MAC_CFG_PCFG_HD;
+
+ switch (mac->phydev->speed) {
+ case 1000:
+ new_flags |= PAS_MAC_CFG_PCFG_SPD_1G |
+ PAS_MAC_CFG_PCFG_TSR_1G;
+ break;
+ case 100:
+ new_flags |= PAS_MAC_CFG_PCFG_SPD_100M |
+ PAS_MAC_CFG_PCFG_TSR_100M;
+ break;
+ case 10:
+ new_flags |= PAS_MAC_CFG_PCFG_SPD_10M |
+ PAS_MAC_CFG_PCFG_TSR_10M;
+ break;
+ default:
+ printk("Unsupported speed %d\n", mac->phydev->speed);
+ }
+
+ /* Print on link or speed/duplex change */
+ msg = mac->link != mac->phydev->link || flags != new_flags;
+
+ mac->duplex = mac->phydev->duplex;
+ mac->speed = mac->phydev->speed;
+ mac->link = mac->phydev->link;
+
+ if (new_flags != flags)
+ write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags);
+
+ if (msg && netif_msg_link(mac))
+ printk(KERN_INFO "%s: Link is up at %d Mbps, %s duplex.\n",
+ dev->name, mac->speed, mac->duplex ? "full" : "half");
+}
+
+static int pasemi_mac_phy_init(struct net_device *dev)
+{
+ struct pasemi_mac *mac = netdev_priv(dev);
+ struct device_node *dn, *phy_dn;
+ struct phy_device *phydev;
+
+ dn = pci_device_to_OF_node(mac->pdev);
+ phy_dn = of_parse_phandle(dn, "phy-handle", 0);
+ of_node_put(phy_dn);
+
+ mac->link = 0;
+ mac->speed = 0;
+ mac->duplex = -1;
+
+ phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0,
+ PHY_INTERFACE_MODE_SGMII);
+
+ if (IS_ERR(phydev)) {
+ printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
+ return PTR_ERR(phydev);
+ }
+
+ mac->phydev = phydev;
+
+ return 0;
+}
+
+
+static int pasemi_mac_open(struct net_device *dev)
+{
+ struct pasemi_mac *mac = netdev_priv(dev);
+ unsigned int flags;
+ int i, ret;
+
+ flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
+ PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
+ PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
+
+ write_mac_reg(mac, PAS_MAC_CFG_TXP, flags);
+
+ ret = pasemi_mac_setup_rx_resources(dev);
+ if (ret)
+ goto out_rx_resources;
+
+ mac->tx = pasemi_mac_setup_tx_resources(dev);
+
+ if (!mac->tx)
+ goto out_tx_ring;
+
+ /* We might already have allocated rings in case mtu was changed
+ * before interface was brought up.
+ */
+ if (dev->mtu > 1500 && !mac->num_cs) {
+ pasemi_mac_setup_csrings(mac);
+ if (!mac->num_cs)
+ goto out_tx_ring;
+ }
+
+ /* Zero out rmon counters */
+ for (i = 0; i < 32; i++)
+ write_mac_reg(mac, PAS_MAC_RMON(i), 0);
+
+ /* 0x3ff with 33MHz clock is about 31us */
+ write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG,
+ PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0x3ff));
+
+ write_iob_reg(PAS_IOB_DMA_RXCH_CFG(mac->rx->chan.chno),
+ PAS_IOB_DMA_RXCH_CFG_CNTTH(256));
+
+ write_iob_reg(PAS_IOB_DMA_TXCH_CFG(mac->tx->chan.chno),
+ PAS_IOB_DMA_TXCH_CFG_CNTTH(32));
+
+ write_mac_reg(mac, PAS_MAC_IPC_CHNL,
+ PAS_MAC_IPC_CHNL_DCHNO(mac->rx->chan.chno) |
+ PAS_MAC_IPC_CHNL_BCH(mac->rx->chan.chno));
+
+ /* enable rx if */
+ write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+ PAS_DMA_RXINT_RCMDSTA_EN |
+ PAS_DMA_RXINT_RCMDSTA_DROPS_M |
+ PAS_DMA_RXINT_RCMDSTA_BP |
+ PAS_DMA_RXINT_RCMDSTA_OO |
+ PAS_DMA_RXINT_RCMDSTA_BT);
+
+ /* enable rx channel */
+ pasemi_dma_start_chan(&rx_ring(mac)->chan, PAS_DMA_RXCHAN_CCMDSTA_DU |
+ PAS_DMA_RXCHAN_CCMDSTA_OD |
+ PAS_DMA_RXCHAN_CCMDSTA_FD |
+ PAS_DMA_RXCHAN_CCMDSTA_DT);
+
+ /* enable tx channel */
+ pasemi_dma_start_chan(&tx_ring(mac)->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ |
+ PAS_DMA_TXCHAN_TCMDSTA_DB |
+ PAS_DMA_TXCHAN_TCMDSTA_DE |
+ PAS_DMA_TXCHAN_TCMDSTA_DA);
+
+ pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE);
+
+ write_dma_reg(PAS_DMA_RXCHAN_INCR(rx_ring(mac)->chan.chno),
+ RX_RING_SIZE>>1);
+
+ /* Clear out any residual packet count state from firmware */
+ pasemi_mac_restart_rx_intr(mac);
+ pasemi_mac_restart_tx_intr(mac);
+
+ flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
+
+ if (mac->type == MAC_TYPE_GMAC)
+ flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
+ else
+ flags |= PAS_MAC_CFG_PCFG_TSR_10G | PAS_MAC_CFG_PCFG_SPD_10G;
+
+ /* Enable interface in MAC */
+ write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
+
+ ret = pasemi_mac_phy_init(dev);
+ if (ret) {
+ /* Since we won't get link notification, just enable RX */
+ pasemi_mac_intf_enable(mac);
+ if (mac->type == MAC_TYPE_GMAC) {
+ /* Warn for missing PHY on SGMII (1Gig) ports */
+ dev_warn(&mac->pdev->dev,
+ "PHY init failed: %d.\n", ret);
+ dev_warn(&mac->pdev->dev,
+ "Defaulting to 1Gbit full duplex\n");
+ }
+ }
+
+ netif_start_queue(dev);
+ napi_enable(&mac->napi);
+
+ snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx",
+ dev->name);
+
+ ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, IRQF_DISABLED,
+ mac->tx_irq_name, mac->tx);
+ if (ret) {
+ dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
+ mac->tx->chan.irq, ret);
+ goto out_tx_int;
+ }
+
+ snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx",
+ dev->name);
+
+ ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, IRQF_DISABLED,
+ mac->rx_irq_name, mac->rx);
+ if (ret) {
+ dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
+ mac->rx->chan.irq, ret);
+ goto out_rx_int;
+ }
+
+ if (mac->phydev)
+ phy_start(mac->phydev);
+
+ init_timer(&mac->tx->clean_timer);
+ mac->tx->clean_timer.function = pasemi_mac_tx_timer;
+ mac->tx->clean_timer.data = (unsigned long)mac->tx;
+ mac->tx->clean_timer.expires = jiffies+HZ;
+ add_timer(&mac->tx->clean_timer);
+
+ return 0;
+
+out_rx_int:
+ free_irq(mac->tx->chan.irq, mac->tx);
+out_tx_int:
+ napi_disable(&mac->napi);
+ netif_stop_queue(dev);
+out_tx_ring:
+ if (mac->tx)
+ pasemi_mac_free_tx_resources(mac);
+ pasemi_mac_free_rx_resources(mac);
+out_rx_resources:
+
+ return ret;
+}
+
+#define MAX_RETRIES 5000
+
+static void pasemi_mac_pause_txchan(struct pasemi_mac *mac)
+{
+ unsigned int sta, retries;
+ int txch = tx_ring(mac)->chan.chno;
+
+ write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch),
+ PAS_DMA_TXCHAN_TCMDSTA_ST);
+
+ for (retries = 0; retries < MAX_RETRIES; retries++) {
+ sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch));
+ if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT))
+ break;
+ cond_resched();
+ }
+
+ if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)
+ dev_err(&mac->dma_pdev->dev,
+ "Failed to stop tx channel, tcmdsta %08x\n", sta);
+
+ write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0);
+}
+
+static void pasemi_mac_pause_rxchan(struct pasemi_mac *mac)
+{
+ unsigned int sta, retries;
+ int rxch = rx_ring(mac)->chan.chno;
+
+ write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch),
+ PAS_DMA_RXCHAN_CCMDSTA_ST);
+ for (retries = 0; retries < MAX_RETRIES; retries++) {
+ sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
+ if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT))
+ break;
+ cond_resched();
+ }
+
+ if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)
+ dev_err(&mac->dma_pdev->dev,
+ "Failed to stop rx channel, ccmdsta 08%x\n", sta);
+ write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0);
+}
+
+static void pasemi_mac_pause_rxint(struct pasemi_mac *mac)
+{
+ unsigned int sta, retries;
+
+ write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+ PAS_DMA_RXINT_RCMDSTA_ST);
+ for (retries = 0; retries < MAX_RETRIES; retries++) {
+ sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
+ if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT))
+ break;
+ cond_resched();
+ }
+
+ if (sta & PAS_DMA_RXINT_RCMDSTA_ACT)
+ dev_err(&mac->dma_pdev->dev,
+ "Failed to stop rx interface, rcmdsta %08x\n", sta);
+ write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
+}
+
+static int pasemi_mac_close(struct net_device *dev)
+{
+ struct pasemi_mac *mac = netdev_priv(dev);
+ unsigned int sta;
+ int rxch, txch, i;
+
+ rxch = rx_ring(mac)->chan.chno;
+ txch = tx_ring(mac)->chan.chno;
+
+ if (mac->phydev) {
+ phy_stop(mac->phydev);
+ phy_disconnect(mac->phydev);
+ }
+
+ del_timer_sync(&mac->tx->clean_timer);
+
+ netif_stop_queue(dev);
+ napi_disable(&mac->napi);
+
+ sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
+ if (sta & (PAS_DMA_RXINT_RCMDSTA_BP |
+ PAS_DMA_RXINT_RCMDSTA_OO |
+ PAS_DMA_RXINT_RCMDSTA_BT))
+ printk(KERN_DEBUG "pasemi_mac: rcmdsta error: 0x%08x\n", sta);
+
+ sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
+ if (sta & (PAS_DMA_RXCHAN_CCMDSTA_DU |
+ PAS_DMA_RXCHAN_CCMDSTA_OD |
+ PAS_DMA_RXCHAN_CCMDSTA_FD |
+ PAS_DMA_RXCHAN_CCMDSTA_DT))
+ printk(KERN_DEBUG "pasemi_mac: ccmdsta error: 0x%08x\n", sta);
+
+ sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch));
+ if (sta & (PAS_DMA_TXCHAN_TCMDSTA_SZ | PAS_DMA_TXCHAN_TCMDSTA_DB |
+ PAS_DMA_TXCHAN_TCMDSTA_DE | PAS_DMA_TXCHAN_TCMDSTA_DA))
+ printk(KERN_DEBUG "pasemi_mac: tcmdsta error: 0x%08x\n", sta);
+
+ /* Clean out any pending buffers */
+ pasemi_mac_clean_tx(tx_ring(mac));
+ pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
+
+ pasemi_mac_pause_txchan(mac);
+ pasemi_mac_pause_rxint(mac);
+ pasemi_mac_pause_rxchan(mac);
+ pasemi_mac_intf_disable(mac);
+
+ free_irq(mac->tx->chan.irq, mac->tx);
+ free_irq(mac->rx->chan.irq, mac->rx);
+
+ for (i = 0; i < mac->num_cs; i++) {
+ pasemi_mac_free_csring(mac->cs[i]);
+ mac->cs[i] = NULL;
+ }
+
+ mac->num_cs = 0;
+
+ /* Free resources */
+ pasemi_mac_free_rx_resources(mac);
+ pasemi_mac_free_tx_resources(mac);
+
+ return 0;
+}
+
+static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
+ const dma_addr_t *map,
+ const unsigned int *map_size,
+ struct pasemi_mac_txring *txring,
+ struct pasemi_mac_csring *csring)
+{
+ u64 fund;
+ dma_addr_t cs_dest;
+ const int nh_off = skb_network_offset(skb);
+ const int nh_len = skb_network_header_len(skb);
+ const int nfrags = skb_shinfo(skb)->nr_frags;
+ int cs_size, i, fill, hdr, cpyhdr, evt;
+ dma_addr_t csdma;
+
+ fund = XCT_FUN_ST | XCT_FUN_RR_8BRES |
+ XCT_FUN_O | XCT_FUN_FUN(csring->fun) |
+ XCT_FUN_CRM_SIG | XCT_FUN_LLEN(skb->len - nh_off) |
+ XCT_FUN_SHL(nh_len >> 2) | XCT_FUN_SE;
+
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_TCP:
+ fund |= XCT_FUN_SIG_TCP4;
+ /* TCP checksum is 16 bytes into the header */
+ cs_dest = map[0] + skb_transport_offset(skb) + 16;
+ break;
+ case IPPROTO_UDP:
+ fund |= XCT_FUN_SIG_UDP4;
+ /* UDP checksum is 6 bytes into the header */
+ cs_dest = map[0] + skb_transport_offset(skb) + 6;
+ break;
+ default:
+ BUG();
+ }
+
+ /* Do the checksum offloaded */
+ fill = csring->next_to_fill;
+ hdr = fill;
+
+ CS_DESC(csring, fill++) = fund;
+ /* Room for 8BRES. Checksum result is really 2 bytes into it */
+ csdma = csring->chan.ring_dma + (fill & (CS_RING_SIZE-1)) * 8 + 2;
+ CS_DESC(csring, fill++) = 0;
+
+ CS_DESC(csring, fill) = XCT_PTR_LEN(map_size[0]-nh_off) | XCT_PTR_ADDR(map[0]+nh_off);
+ for (i = 1; i <= nfrags; i++)
+ CS_DESC(csring, fill+i) = XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]);
+
+ fill += i;
+ if (fill & 1)
+ fill++;
+
+ /* Copy the result into the TCP packet */
+ cpyhdr = fill;
+ CS_DESC(csring, fill++) = XCT_FUN_O | XCT_FUN_FUN(csring->fun) |
+ XCT_FUN_LLEN(2) | XCT_FUN_SE;
+ CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(cs_dest) | XCT_PTR_T;
+ CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(csdma);
+ fill++;
+
+ evt = !csring->last_event;
+ csring->last_event = evt;
+
+ /* Event handshaking with MAC TX */
+ CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
+ CTRL_CMD_ETYPE_SET | CTRL_CMD_REG(csring->events[evt]);
+ CS_DESC(csring, fill++) = 0;
+ CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
+ CTRL_CMD_ETYPE_WCLR | CTRL_CMD_REG(csring->events[!evt]);
+ CS_DESC(csring, fill++) = 0;
+ csring->next_to_fill = fill & (CS_RING_SIZE-1);
+
+ cs_size = fill - hdr;
+ write_dma_reg(PAS_DMA_TXCHAN_INCR(csring->chan.chno), (cs_size) >> 1);
+
+ /* TX-side event handshaking */
+ fill = txring->next_to_fill;
+ TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
+ CTRL_CMD_ETYPE_WSET | CTRL_CMD_REG(csring->events[evt]);
+ TX_DESC(txring, fill++) = 0;
+ TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
+ CTRL_CMD_ETYPE_CLR | CTRL_CMD_REG(csring->events[!evt]);
+ TX_DESC(txring, fill++) = 0;
+ txring->next_to_fill = fill;
+
+ write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2);
+}
+
+static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct pasemi_mac * const mac = netdev_priv(dev);
+ struct pasemi_mac_txring * const txring = tx_ring(mac);
+ struct pasemi_mac_csring *csring;
+ u64 dflags = 0;
+ u64 mactx;
+ dma_addr_t map[MAX_SKB_FRAGS+1];
+ unsigned int map_size[MAX_SKB_FRAGS+1];
+ unsigned long flags;
+ int i, nfrags;
+ int fill;
+ const int nh_off = skb_network_offset(skb);
+ const int nh_len = skb_network_header_len(skb);
+
+ prefetch(&txring->ring_info);
+
+ dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD;
+
+ nfrags = skb_shinfo(skb)->nr_frags;
+
+ map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+ map_size[0] = skb_headlen(skb);
+ if (pci_dma_mapping_error(mac->dma_pdev, map[0]))
+ goto out_err_nolock;
+
+ for (i = 0; i < nfrags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ map[i + 1] = skb_frag_dma_map(&mac->dma_pdev->dev, frag, 0,
+ frag->size, DMA_TO_DEVICE);
+ map_size[i+1] = frag->size;
+ if (dma_mapping_error(&mac->dma_pdev->dev, map[i + 1])) {
+ nfrags = i;
+ goto out_err_nolock;
+ }
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL && skb->len <= 1540) {
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_TCP:
+ dflags |= XCT_MACTX_CSUM_TCP;
+ dflags |= XCT_MACTX_IPH(nh_len >> 2);
+ dflags |= XCT_MACTX_IPO(nh_off);
+ break;
+ case IPPROTO_UDP:
+ dflags |= XCT_MACTX_CSUM_UDP;
+ dflags |= XCT_MACTX_IPH(nh_len >> 2);
+ dflags |= XCT_MACTX_IPO(nh_off);
+ break;
+ default:
+ WARN_ON(1);
+ }
+ }
+
+ mactx = dflags | XCT_MACTX_LLEN(skb->len);
+
+ spin_lock_irqsave(&txring->lock, flags);
+
+ /* Avoid stepping on the same cache line that the DMA controller
+ * is currently about to send, so leave at least 8 words available.
+ * Total free space needed is mactx + fragments + 8
+ */
+ if (RING_AVAIL(txring) < nfrags + 14) {
+ /* no room -- stop the queue and wait for tx intr */
+ netif_stop_queue(dev);
+ goto out_err;
+ }
+
+ /* Queue up checksum + event descriptors, if needed */
+ if (mac->num_cs && skb->ip_summed == CHECKSUM_PARTIAL && skb->len > 1540) {
+ csring = mac->cs[mac->last_cs];
+ mac->last_cs = (mac->last_cs + 1) % mac->num_cs;
+
+ pasemi_mac_queue_csdesc(skb, map, map_size, txring, csring);
+ }
+
+ fill = txring->next_to_fill;
+ TX_DESC(txring, fill) = mactx;
+ TX_DESC_INFO(txring, fill).dma = nfrags;
+ fill++;
+ TX_DESC_INFO(txring, fill).skb = skb;
+ for (i = 0; i <= nfrags; i++) {
+ TX_DESC(txring, fill+i) =
+ XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]);
+ TX_DESC_INFO(txring, fill+i).dma = map[i];
+ }
+
+ /* We have to add an even number of 8-byte entries to the ring
+ * even if the last one is unused. That means always an odd number
+ * of pointers + one mactx descriptor.
+ */
+ if (nfrags & 1)
+ nfrags++;
+
+ txring->next_to_fill = (fill + nfrags + 1) & (TX_RING_SIZE-1);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ spin_unlock_irqrestore(&txring->lock, flags);
+
+ write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), (nfrags+2) >> 1);
+
+ return NETDEV_TX_OK;
+
+out_err:
+ spin_unlock_irqrestore(&txring->lock, flags);
+out_err_nolock:
+ while (nfrags--)
+ pci_unmap_single(mac->dma_pdev, map[nfrags], map_size[nfrags],
+ PCI_DMA_TODEVICE);
+
+ return NETDEV_TX_BUSY;
+}
+
+static void pasemi_mac_set_rx_mode(struct net_device *dev)
+{
+ const struct pasemi_mac *mac = netdev_priv(dev);
+ unsigned int flags;
+
+ flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
+
+ /* Set promiscuous */
+ if (dev->flags & IFF_PROMISC)
+ flags |= PAS_MAC_CFG_PCFG_PR;
+ else
+ flags &= ~PAS_MAC_CFG_PCFG_PR;
+
+ write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
+}
+
+
+static int pasemi_mac_poll(struct napi_struct *napi, int budget)
+{
+ struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi);
+ int pkts;
+
+ pasemi_mac_clean_tx(tx_ring(mac));
+ pkts = pasemi_mac_clean_rx(rx_ring(mac), budget);
+ if (pkts < budget) {
+ /* all done, no more packets present */
+ napi_complete(napi);
+
+ pasemi_mac_restart_rx_intr(mac);
+ pasemi_mac_restart_tx_intr(mac);
+ }
+ return pkts;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void pasemi_mac_netpoll(struct net_device *dev)
+{
+ const struct pasemi_mac *mac = netdev_priv(dev);
+
+ disable_irq(mac->tx->chan.irq);
+ pasemi_mac_tx_intr(mac->tx->chan.irq, mac->tx);
+ enable_irq(mac->tx->chan.irq);
+
+ disable_irq(mac->rx->chan.irq);
+ pasemi_mac_rx_intr(mac->rx->chan.irq, mac->rx);
+ enable_irq(mac->rx->chan.irq);
+}
+#endif
+
+static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct pasemi_mac *mac = netdev_priv(dev);
+ unsigned int reg;
+ unsigned int rcmdsta = 0;
+ int running;
+ int ret = 0;
+
+ if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU)
+ return -EINVAL;
+
+ running = netif_running(dev);
+
+ if (running) {
+ /* Need to stop the interface, clean out all already
+ * received buffers, free all unused buffers on the RX
+ * interface ring, then finally re-fill the rx ring with
+ * the new-size buffers and restart.
+ */
+
+ napi_disable(&mac->napi);
+ netif_tx_disable(dev);
+ pasemi_mac_intf_disable(mac);
+
+ rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
+ pasemi_mac_pause_rxint(mac);
+ pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
+ pasemi_mac_free_rx_buffers(mac);
+
+ }
+
+ /* Setup checksum channels if large MTU and none already allocated */
+ if (new_mtu > 1500 && !mac->num_cs) {
+ pasemi_mac_setup_csrings(mac);
+ if (!mac->num_cs) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ /* Change maxf, i.e. what size frames are accepted.
+ * Need room for ethernet header and CRC word
+ */
+ reg = read_mac_reg(mac, PAS_MAC_CFG_MACCFG);
+ reg &= ~PAS_MAC_CFG_MACCFG_MAXF_M;
+ reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4);
+ write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg);
+
+ dev->mtu = new_mtu;
+ /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
+ mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
+
+out:
+ if (running) {
+ write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+ rcmdsta | PAS_DMA_RXINT_RCMDSTA_EN);
+
+ rx_ring(mac)->next_to_fill = 0;
+ pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE-1);
+
+ napi_enable(&mac->napi);
+ netif_start_queue(dev);
+ pasemi_mac_intf_enable(mac);
+ }
+
+ return ret;
+}
+
+static const struct net_device_ops pasemi_netdev_ops = {
+ .ndo_open = pasemi_mac_open,
+ .ndo_stop = pasemi_mac_close,
+ .ndo_start_xmit = pasemi_mac_start_tx,
+ .ndo_set_rx_mode = pasemi_mac_set_rx_mode,
+ .ndo_set_mac_address = pasemi_mac_set_mac_addr,
+ .ndo_change_mtu = pasemi_mac_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = pasemi_mac_netpoll,
+#endif
+};
+
+static int __devinit
+pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct pasemi_mac *mac;
+ int err, ret;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ dev = alloc_etherdev(sizeof(struct pasemi_mac));
+ if (dev == NULL) {
+ dev_err(&pdev->dev,
+ "pasemi_mac: Could not allocate ethernet device.\n");
+ err = -ENOMEM;
+ goto out_disable_device;
+ }
+
+ pci_set_drvdata(pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ mac = netdev_priv(dev);
+
+ mac->pdev = pdev;
+ mac->netdev = dev;
+
+ netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64);
+
+ dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG |
+ NETIF_F_HIGHDMA | NETIF_F_GSO;
+
+ mac->lro_mgr.max_aggr = LRO_MAX_AGGR;
+ mac->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
+ mac->lro_mgr.lro_arr = mac->lro_desc;
+ mac->lro_mgr.get_skb_header = get_skb_hdr;
+ mac->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
+ mac->lro_mgr.dev = mac->netdev;
+ mac->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
+ mac->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
+
+
+ mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
+ if (!mac->dma_pdev) {
+ dev_err(&mac->pdev->dev, "Can't find DMA Controller\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
+ if (!mac->iob_pdev) {
+ dev_err(&mac->pdev->dev, "Can't find I/O Bridge\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ /* get mac addr from device tree */
+ if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) {
+ err = -ENODEV;
+ goto out;
+ }
+ memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
+
+ ret = mac_to_intf(mac);
+ if (ret < 0) {
+ dev_err(&mac->pdev->dev, "Can't map DMA interface\n");
+ err = -ENODEV;
+ goto out;
+ }
+ mac->dma_if = ret;
+
+ switch (pdev->device) {
+ case 0xa005:
+ mac->type = MAC_TYPE_GMAC;
+ break;
+ case 0xa006:
+ mac->type = MAC_TYPE_XAUI;
+ break;
+ default:
+ err = -ENODEV;
+ goto out;
+ }
+
+ dev->netdev_ops = &pasemi_netdev_ops;
+ dev->mtu = PE_DEF_MTU;
+ /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
+ mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
+
+ dev->ethtool_ops = &pasemi_mac_ethtool_ops;
+
+ if (err)
+ goto out;
+
+ mac->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+
+ /* Enable most messages by default */
+ mac->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
+
+ err = register_netdev(dev);
+
+ if (err) {
+ dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n",
+ err);
+ goto out;
+ } else if netif_msg_probe(mac)
+ printk(KERN_INFO "%s: PA Semi %s: intf %d, hw addr %pM\n",
+ dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
+ mac->dma_if, dev->dev_addr);
+
+ return err;
+
+out:
+ if (mac->iob_pdev)
+ pci_dev_put(mac->iob_pdev);
+ if (mac->dma_pdev)
+ pci_dev_put(mac->dma_pdev);
+
+ free_netdev(dev);
+out_disable_device:
+ pci_disable_device(pdev);
+ return err;
+
+}
+
+static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pasemi_mac *mac;
+
+ if (!netdev)
+ return;
+
+ mac = netdev_priv(netdev);
+
+ unregister_netdev(netdev);
+
+ pci_disable_device(pdev);
+ pci_dev_put(mac->dma_pdev);
+ pci_dev_put(mac->iob_pdev);
+
+ pasemi_dma_free_chan(&mac->tx->chan);
+ pasemi_dma_free_chan(&mac->rx->chan);
+
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(pasemi_mac_pci_tbl) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
+ { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
+ { },
+};
+
+MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
+
+static struct pci_driver pasemi_mac_driver = {
+ .name = "pasemi_mac",
+ .id_table = pasemi_mac_pci_tbl,
+ .probe = pasemi_mac_probe,
+ .remove = __devexit_p(pasemi_mac_remove),
+};
+
+static void __exit pasemi_mac_cleanup_module(void)
+{
+ pci_unregister_driver(&pasemi_mac_driver);
+}
+
+int pasemi_mac_init_module(void)
+{
+ int err;
+
+ err = pasemi_dma_init();
+ if (err)
+ return err;
+
+ return pci_register_driver(&pasemi_mac_driver);
+}
+
+module_init(pasemi_mac_init_module);
+module_exit(pasemi_mac_cleanup_module);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h
new file mode 100644
index 000000000000..e2f4efa8ad46
--- /dev/null
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2006 PA Semi, Inc
+ *
+ * Driver for the PA6T-1682M onchip 1G/10G Ethernet MACs, soft state and
+ * hardware register layouts.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef PASEMI_MAC_H
+#define PASEMI_MAC_H
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/phy.h>
+
+/* Must be a power of two */
+#define RX_RING_SIZE 2048
+#define TX_RING_SIZE 4096
+#define CS_RING_SIZE (TX_RING_SIZE*2)
+
+
+#define MAX_LRO_DESCRIPTORS 8
+#define MAX_CS 2
+
+struct pasemi_mac_txring {
+ struct pasemi_dmachan chan; /* Must be first */
+ spinlock_t lock;
+ unsigned int size;
+ unsigned int next_to_fill;
+ unsigned int next_to_clean;
+ struct pasemi_mac_buffer *ring_info;
+ struct pasemi_mac *mac; /* Needed in intr handler */
+ struct timer_list clean_timer;
+};
+
+struct pasemi_mac_rxring {
+ struct pasemi_dmachan chan; /* Must be first */
+ spinlock_t lock;
+ u64 *buffers; /* RX interface buffer ring */
+ dma_addr_t buf_dma;
+ unsigned int size;
+ unsigned int next_to_fill;
+ unsigned int next_to_clean;
+ struct pasemi_mac_buffer *ring_info;
+ struct pasemi_mac *mac; /* Needed in intr handler */
+};
+
+struct pasemi_mac_csring {
+ struct pasemi_dmachan chan;
+ unsigned int size;
+ unsigned int next_to_fill;
+ int events[2];
+ int last_event;
+ int fun;
+};
+
+struct pasemi_mac {
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct pci_dev *dma_pdev;
+ struct pci_dev *iob_pdev;
+ struct phy_device *phydev;
+ struct napi_struct napi;
+
+ int bufsz; /* RX ring buffer size */
+ int last_cs;
+ int num_cs;
+ u32 dma_if;
+ u8 type;
+#define MAC_TYPE_GMAC 1
+#define MAC_TYPE_XAUI 2
+
+ u8 mac_addr[6];
+
+ struct net_lro_mgr lro_mgr;
+ struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
+ struct timer_list rxtimer;
+ unsigned int lro_max_aggr;
+
+ struct pasemi_mac_txring *tx;
+ struct pasemi_mac_rxring *rx;
+ struct pasemi_mac_csring *cs[MAX_CS];
+ char tx_irq_name[10]; /* "eth%d tx" */
+ char rx_irq_name[10]; /* "eth%d rx" */
+ int link;
+ int speed;
+ int duplex;
+
+ unsigned int msg_enable;
+};
+
+/* Software status descriptor (ring_info) */
+struct pasemi_mac_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+};
+
+#define TX_DESC(tx, num) ((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)])
+#define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)])
+#define RX_DESC(rx, num) ((rx)->chan.ring_virt[(num) & (RX_RING_SIZE-1)])
+#define RX_DESC_INFO(rx, num) ((rx)->ring_info[(num) & (RX_RING_SIZE-1)])
+#define RX_BUFF(rx, num) ((rx)->buffers[(num) & (RX_RING_SIZE-1)])
+#define CS_DESC(cs, num) ((cs)->chan.ring_virt[(num) & (CS_RING_SIZE-1)])
+
+#define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \
+ & ((ring)->size - 1))
+#define RING_AVAIL(ring) ((ring->size) - RING_USED(ring))
+
+/* PCI register offsets and formats */
+
+
+/* MAC CFG register offsets */
+enum {
+ PAS_MAC_CFG_PCFG = 0x80,
+ PAS_MAC_CFG_MACCFG = 0x84,
+ PAS_MAC_CFG_ADR0 = 0x8c,
+ PAS_MAC_CFG_ADR1 = 0x90,
+ PAS_MAC_CFG_TXP = 0x98,
+ PAS_MAC_CFG_RMON = 0x100,
+ PAS_MAC_IPC_CHNL = 0x208,
+};
+
+/* MAC CFG register fields */
+#define PAS_MAC_CFG_PCFG_PE 0x80000000
+#define PAS_MAC_CFG_PCFG_CE 0x40000000
+#define PAS_MAC_CFG_PCFG_BU 0x20000000
+#define PAS_MAC_CFG_PCFG_TT 0x10000000
+#define PAS_MAC_CFG_PCFG_TSR_M 0x0c000000
+#define PAS_MAC_CFG_PCFG_TSR_10M 0x00000000
+#define PAS_MAC_CFG_PCFG_TSR_100M 0x04000000
+#define PAS_MAC_CFG_PCFG_TSR_1G 0x08000000
+#define PAS_MAC_CFG_PCFG_TSR_10G 0x0c000000
+#define PAS_MAC_CFG_PCFG_T24 0x02000000
+#define PAS_MAC_CFG_PCFG_PR 0x01000000
+#define PAS_MAC_CFG_PCFG_CRO_M 0x00ff0000
+#define PAS_MAC_CFG_PCFG_CRO_S 16
+#define PAS_MAC_CFG_PCFG_IPO_M 0x0000ff00
+#define PAS_MAC_CFG_PCFG_IPO_S 8
+#define PAS_MAC_CFG_PCFG_S1 0x00000080
+#define PAS_MAC_CFG_PCFG_IO_M 0x00000060
+#define PAS_MAC_CFG_PCFG_IO_MAC 0x00000000
+#define PAS_MAC_CFG_PCFG_IO_OFF 0x00000020
+#define PAS_MAC_CFG_PCFG_IO_IND_ETH 0x00000040
+#define PAS_MAC_CFG_PCFG_IO_IND_IP 0x00000060
+#define PAS_MAC_CFG_PCFG_LP 0x00000010
+#define PAS_MAC_CFG_PCFG_TS 0x00000008
+#define PAS_MAC_CFG_PCFG_HD 0x00000004
+#define PAS_MAC_CFG_PCFG_SPD_M 0x00000003
+#define PAS_MAC_CFG_PCFG_SPD_10M 0x00000000
+#define PAS_MAC_CFG_PCFG_SPD_100M 0x00000001
+#define PAS_MAC_CFG_PCFG_SPD_1G 0x00000002
+#define PAS_MAC_CFG_PCFG_SPD_10G 0x00000003
+
+#define PAS_MAC_CFG_MACCFG_TXT_M 0x70000000
+#define PAS_MAC_CFG_MACCFG_TXT_S 28
+#define PAS_MAC_CFG_MACCFG_PRES_M 0x0f000000
+#define PAS_MAC_CFG_MACCFG_PRES_S 24
+#define PAS_MAC_CFG_MACCFG_MAXF_M 0x00ffff00
+#define PAS_MAC_CFG_MACCFG_MAXF_S 8
+#define PAS_MAC_CFG_MACCFG_MAXF(x) (((x) << PAS_MAC_CFG_MACCFG_MAXF_S) & \
+ PAS_MAC_CFG_MACCFG_MAXF_M)
+#define PAS_MAC_CFG_MACCFG_MINF_M 0x000000ff
+#define PAS_MAC_CFG_MACCFG_MINF_S 0
+
+#define PAS_MAC_CFG_TXP_FCF 0x01000000
+#define PAS_MAC_CFG_TXP_FCE 0x00800000
+#define PAS_MAC_CFG_TXP_FC 0x00400000
+#define PAS_MAC_CFG_TXP_FPC_M 0x00300000
+#define PAS_MAC_CFG_TXP_FPC_S 20
+#define PAS_MAC_CFG_TXP_FPC(x) (((x) << PAS_MAC_CFG_TXP_FPC_S) & \
+ PAS_MAC_CFG_TXP_FPC_M)
+#define PAS_MAC_CFG_TXP_RT 0x00080000
+#define PAS_MAC_CFG_TXP_BL 0x00040000
+#define PAS_MAC_CFG_TXP_SL_M 0x00030000
+#define PAS_MAC_CFG_TXP_SL_S 16
+#define PAS_MAC_CFG_TXP_SL(x) (((x) << PAS_MAC_CFG_TXP_SL_S) & \
+ PAS_MAC_CFG_TXP_SL_M)
+#define PAS_MAC_CFG_TXP_COB_M 0x0000f000
+#define PAS_MAC_CFG_TXP_COB_S 12
+#define PAS_MAC_CFG_TXP_COB(x) (((x) << PAS_MAC_CFG_TXP_COB_S) & \
+ PAS_MAC_CFG_TXP_COB_M)
+#define PAS_MAC_CFG_TXP_TIFT_M 0x00000f00
+#define PAS_MAC_CFG_TXP_TIFT_S 8
+#define PAS_MAC_CFG_TXP_TIFT(x) (((x) << PAS_MAC_CFG_TXP_TIFT_S) & \
+ PAS_MAC_CFG_TXP_TIFT_M)
+#define PAS_MAC_CFG_TXP_TIFG_M 0x000000ff
+#define PAS_MAC_CFG_TXP_TIFG_S 0
+#define PAS_MAC_CFG_TXP_TIFG(x) (((x) << PAS_MAC_CFG_TXP_TIFG_S) & \
+ PAS_MAC_CFG_TXP_TIFG_M)
+
+#define PAS_MAC_RMON(r) (0x100+(r)*4)
+
+#define PAS_MAC_IPC_CHNL_DCHNO_M 0x003f0000
+#define PAS_MAC_IPC_CHNL_DCHNO_S 16
+#define PAS_MAC_IPC_CHNL_DCHNO(x) (((x) << PAS_MAC_IPC_CHNL_DCHNO_S) & \
+ PAS_MAC_IPC_CHNL_DCHNO_M)
+#define PAS_MAC_IPC_CHNL_BCH_M 0x0000003f
+#define PAS_MAC_IPC_CHNL_BCH_S 0
+#define PAS_MAC_IPC_CHNL_BCH(x) (((x) << PAS_MAC_IPC_CHNL_BCH_S) & \
+ PAS_MAC_IPC_CHNL_BCH_M)
+
+
+#endif /* PASEMI_MAC_H */
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
new file mode 100644
index 000000000000..4825959a0efe
--- /dev/null
+++ b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2006-2008 PA Semi, Inc
+ *
+ * Ethtool hooks for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/inet_lro.h>
+
+#include <asm/pasemi_dma.h>
+#include "pasemi_mac.h"
+
+static struct {
+ const char str[ETH_GSTRING_LEN];
+} ethtool_stats_keys[] = {
+ { "rx-drops" },
+ { "rx-bytes" },
+ { "rx-packets" },
+ { "rx-broadcast-packets" },
+ { "rx-multicast-packets" },
+ { "rx-crc-errors" },
+ { "rx-undersize-errors" },
+ { "rx-oversize-errors" },
+ { "rx-short-fragment-errors" },
+ { "rx-jabber-errors" },
+ { "rx-64-byte-packets" },
+ { "rx-65-127-byte-packets" },
+ { "rx-128-255-byte-packets" },
+ { "rx-256-511-byte-packets" },
+ { "rx-512-1023-byte-packets" },
+ { "rx-1024-1518-byte-packets" },
+ { "rx-pause-frames" },
+ { "tx-bytes" },
+ { "tx-packets" },
+ { "tx-broadcast-packets" },
+ { "tx-multicast-packets" },
+ { "tx-collisions" },
+ { "tx-late-collisions" },
+ { "tx-excessive-collisions" },
+ { "tx-crc-errors" },
+ { "tx-undersize-errors" },
+ { "tx-oversize-errors" },
+ { "tx-64-byte-packets" },
+ { "tx-65-127-byte-packets" },
+ { "tx-128-255-byte-packets" },
+ { "tx-256-511-byte-packets" },
+ { "tx-512-1023-byte-packets" },
+ { "tx-1024-1518-byte-packets" },
+};
+
+static int
+pasemi_mac_ethtool_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct pasemi_mac *mac = netdev_priv(netdev);
+ struct phy_device *phydev = mac->phydev;
+
+ if (!phydev)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_gset(phydev, cmd);
+}
+
+static int
+pasemi_mac_ethtool_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct pasemi_mac *mac = netdev_priv(netdev);
+ struct phy_device *phydev = mac->phydev;
+
+ if (!phydev)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_sset(phydev, cmd);
+}
+
+static u32
+pasemi_mac_ethtool_get_msglevel(struct net_device *netdev)
+{
+ struct pasemi_mac *mac = netdev_priv(netdev);
+ return mac->msg_enable;
+}
+
+static void
+pasemi_mac_ethtool_set_msglevel(struct net_device *netdev,
+ u32 level)
+{
+ struct pasemi_mac *mac = netdev_priv(netdev);
+ mac->msg_enable = level;
+}
+
+
+static void
+pasemi_mac_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering)
+{
+ struct pasemi_mac *mac = netdev_priv(netdev);
+
+ ering->tx_max_pending = TX_RING_SIZE/2;
+ ering->tx_pending = RING_USED(mac->tx)/2;
+ ering->rx_max_pending = RX_RING_SIZE/4;
+ ering->rx_pending = RING_USED(mac->rx)/4;
+}
+
+static int pasemi_mac_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(ethtool_stats_keys);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void pasemi_mac_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct pasemi_mac *mac = netdev_priv(netdev);
+ int i;
+
+ data[0] = pasemi_read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if))
+ >> PAS_DMA_RXINT_RCMDSTA_DROPS_S;
+ for (i = 0; i < 32; i++)
+ data[1+i] = pasemi_read_mac_reg(mac->dma_if, PAS_MAC_RMON(i));
+}
+
+static void pasemi_mac_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
+}
+
+const struct ethtool_ops pasemi_mac_ethtool_ops = {
+ .get_settings = pasemi_mac_ethtool_get_settings,
+ .set_settings = pasemi_mac_ethtool_set_settings,
+ .get_msglevel = pasemi_mac_ethtool_get_msglevel,
+ .set_msglevel = pasemi_mac_ethtool_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = pasemi_mac_ethtool_get_ringparam,
+ .get_strings = pasemi_mac_get_strings,
+ .get_sset_count = pasemi_mac_get_sset_count,
+ .get_ethtool_stats = pasemi_mac_get_ethtool_stats,
+};
+
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
new file mode 100644
index 000000000000..a8669adecc97
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -0,0 +1,54 @@
+#
+# QLogic network device configuration
+#
+
+config NET_VENDOR_QLOGIC
+ bool "QLogic devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about QLogic cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_QLOGIC
+
+config QLA3XXX
+ tristate "QLogic QLA3XXX Network Driver Support"
+ depends on PCI
+ ---help---
+ This driver supports QLogic ISP3XXX gigabit Ethernet cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called qla3xxx.
+
+config QLCNIC
+ tristate "QLOGIC QLCNIC 1/10Gb Converged Ethernet NIC Support"
+ depends on PCI
+ select FW_LOADER
+ ---help---
+ This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet
+ devices.
+
+config QLGE
+ tristate "QLogic QLGE 10Gb Ethernet Driver Support"
+ depends on PCI
+ ---help---
+ This driver supports QLogic ISP8XXX 10Gb Ethernet cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called qlge.
+
+config NETXEN_NIC
+ tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC"
+ depends on PCI
+ select FW_LOADER
+ ---help---
+ This enables the support for NetXen's Gigabit Ethernet card.
+
+endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/Makefile b/drivers/net/ethernet/qlogic/Makefile
new file mode 100644
index 000000000000..b2a283d9ae60
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the QLogic network device drivers.
+#
+
+obj-$(CONFIG_QLA3XXX) += qla3xxx.o
+obj-$(CONFIG_QLCNIC) += qlcnic/
+obj-$(CONFIG_QLGE) += qlge/
+obj-$(CONFIG_NETXEN_NIC) += netxen/
diff --git a/drivers/net/ethernet/qlogic/netxen/Makefile b/drivers/net/ethernet/qlogic/netxen/Makefile
new file mode 100644
index 000000000000..861a0590b1f4
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/netxen/Makefile
@@ -0,0 +1,29 @@
+# Copyright (C) 2003 - 2009 NetXen, Inc.
+# Copyright (C) 2009 - QLogic Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+# MA 02111-1307, USA.
+#
+# The full GNU General Public License is included in this distribution
+# in the file called "COPYING".
+#
+#
+
+
+obj-$(CONFIG_NETXEN_NIC) := netxen_nic.o
+
+netxen_nic-y := netxen_nic_hw.o netxen_nic_main.o netxen_nic_init.o \
+ netxen_nic_ethtool.o netxen_nic_ctx.o
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
new file mode 100644
index 000000000000..a876dffd7101
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -0,0 +1,1441 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#ifndef _NETXEN_NIC_H_
+#define _NETXEN_NIC_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/timer.h>
+
+#include <linux/vmalloc.h>
+
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#include "netxen_nic_hdr.h"
+#include "netxen_nic_hw.h"
+
+#define _NETXEN_NIC_LINUX_MAJOR 4
+#define _NETXEN_NIC_LINUX_MINOR 0
+#define _NETXEN_NIC_LINUX_SUBVERSION 77
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.77"
+
+#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
+#define _major(v) (((v) >> 24) & 0xff)
+#define _minor(v) (((v) >> 16) & 0xff)
+#define _build(v) ((v) & 0xffff)
+
+/* version in image has weird encoding:
+ * 7:0 - major
+ * 15:8 - minor
+ * 31:16 - build (little endian)
+ */
+#define NETXEN_DECODE_VERSION(v) \
+ NETXEN_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
+
+#define NETXEN_NUM_FLASH_SECTORS (64)
+#define NETXEN_FLASH_SECTOR_SIZE (64 * 1024)
+#define NETXEN_FLASH_TOTAL_SIZE (NETXEN_NUM_FLASH_SECTORS \
+ * NETXEN_FLASH_SECTOR_SIZE)
+
+#define RCV_DESC_RINGSIZE(rds_ring) \
+ (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
+#define RCV_BUFF_RINGSIZE(rds_ring) \
+ (sizeof(struct netxen_rx_buffer) * rds_ring->num_desc)
+#define STATUS_DESC_RINGSIZE(sds_ring) \
+ (sizeof(struct status_desc) * (sds_ring)->num_desc)
+#define TX_BUFF_RINGSIZE(tx_ring) \
+ (sizeof(struct netxen_cmd_buffer) * tx_ring->num_desc)
+#define TX_DESC_RINGSIZE(tx_ring) \
+ (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
+
+#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a)))
+
+#define NETXEN_RCV_PRODUCER_OFFSET 0
+#define NETXEN_RCV_PEG_DB_ID 2
+#define NETXEN_HOST_DUMMY_DMA_SIZE 1024
+#define FLASH_SUCCESS 0
+
+#define ADDR_IN_WINDOW1(off) \
+ ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
+
+#define ADDR_IN_RANGE(addr, low, high) \
+ (((addr) < (high)) && ((addr) >= (low)))
+
+/*
+ * normalize a 64MB crb address to 32MB PCI window
+ * To use NETXEN_CRB_NORMALIZE, window _must_ be set to 1
+ */
+#define NETXEN_CRB_NORMAL(reg) \
+ ((reg) - NETXEN_CRB_PCIX_HOST2 + NETXEN_CRB_PCIX_HOST)
+
+#define NETXEN_CRB_NORMALIZE(adapter, reg) \
+ pci_base_offset(adapter, NETXEN_CRB_NORMAL(reg))
+
+#define DB_NORMALIZE(adapter, off) \
+ (adapter->ahw.db_base + (off))
+
+#define NX_P2_C0 0x24
+#define NX_P2_C1 0x25
+#define NX_P3_A0 0x30
+#define NX_P3_A2 0x30
+#define NX_P3_B0 0x40
+#define NX_P3_B1 0x41
+#define NX_P3_B2 0x42
+#define NX_P3P_A0 0x50
+
+#define NX_IS_REVISION_P2(REVISION) (REVISION <= NX_P2_C1)
+#define NX_IS_REVISION_P3(REVISION) (REVISION >= NX_P3_A0)
+#define NX_IS_REVISION_P3P(REVISION) (REVISION >= NX_P3P_A0)
+
+#define FIRST_PAGE_GROUP_START 0
+#define FIRST_PAGE_GROUP_END 0x100000
+
+#define SECOND_PAGE_GROUP_START 0x6000000
+#define SECOND_PAGE_GROUP_END 0x68BC000
+
+#define THIRD_PAGE_GROUP_START 0x70E4000
+#define THIRD_PAGE_GROUP_END 0x8000000
+
+#define FIRST_PAGE_GROUP_SIZE FIRST_PAGE_GROUP_END - FIRST_PAGE_GROUP_START
+#define SECOND_PAGE_GROUP_SIZE SECOND_PAGE_GROUP_END - SECOND_PAGE_GROUP_START
+#define THIRD_PAGE_GROUP_SIZE THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START
+
+#define P2_MAX_MTU (8000)
+#define P3_MAX_MTU (9600)
+#define NX_ETHERMTU 1500
+#define NX_MAX_ETHERHDR 32 /* This contains some padding */
+
+#define NX_P2_RX_BUF_MAX_LEN 1760
+#define NX_P3_RX_BUF_MAX_LEN (NX_MAX_ETHERHDR + NX_ETHERMTU)
+#define NX_P2_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P2_MAX_MTU)
+#define NX_P3_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P3_MAX_MTU)
+#define NX_CT_DEFAULT_RX_BUF_LEN 2048
+#define NX_LRO_BUFFER_EXTRA 2048
+
+#define NX_RX_LRO_BUFFER_LENGTH (8060)
+
+/*
+ * Maximum number of ring contexts
+ */
+#define MAX_RING_CTX 1
+
+/* Opcodes to be used with the commands */
+#define TX_ETHER_PKT 0x01
+#define TX_TCP_PKT 0x02
+#define TX_UDP_PKT 0x03
+#define TX_IP_PKT 0x04
+#define TX_TCP_LSO 0x05
+#define TX_TCP_LSO6 0x06
+#define TX_IPSEC 0x07
+#define TX_IPSEC_CMD 0x0a
+#define TX_TCPV6_PKT 0x0b
+#define TX_UDPV6_PKT 0x0c
+
+/* The following opcodes are for internal consumption. */
+#define NETXEN_CONTROL_OP 0x10
+#define PEGNET_REQUEST 0x11
+
+#define MAX_NUM_CARDS 4
+
+#define NETXEN_MAX_FRAGS_PER_TX 14
+#define MAX_TSO_HEADER_DESC 2
+#define MGMT_CMD_DESC_RESV 4
+#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+ + MGMT_CMD_DESC_RESV)
+#define NX_MAX_TX_TIMEOUTS 2
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+ */
+#define PHAN_INITIALIZE_START 0xff00
+#define PHAN_INITIALIZE_FAILED 0xffff
+#define PHAN_INITIALIZE_COMPLETE 0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK 0xf00f
+
+#define NUM_RCV_DESC_RINGS 3
+#define NUM_STS_DESC_RINGS 4
+
+#define RCV_RING_NORMAL 0
+#define RCV_RING_JUMBO 1
+#define RCV_RING_LRO 2
+
+#define MIN_CMD_DESCRIPTORS 64
+#define MIN_RCV_DESCRIPTORS 64
+#define MIN_JUMBO_DESCRIPTORS 32
+
+#define MAX_CMD_DESCRIPTORS 1024
+#define MAX_RCV_DESCRIPTORS_1G 4096
+#define MAX_RCV_DESCRIPTORS_10G 8192
+#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
+#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
+#define MAX_LRO_RCV_DESCRIPTORS 8
+
+#define DEFAULT_RCV_DESCRIPTORS_1G 2048
+#define DEFAULT_RCV_DESCRIPTORS_10G 4096
+
+#define NETXEN_CTX_SIGNATURE 0xdee0
+#define NETXEN_CTX_SIGNATURE_V2 0x0002dee0
+#define NETXEN_CTX_RESET 0xbad0
+#define NETXEN_CTX_D3_RESET 0xacc0
+#define NETXEN_RCV_PRODUCER(ringid) (ringid)
+
+#define PHAN_PEG_RCV_INITIALIZED 0xff01
+#define PHAN_PEG_RCV_START_INITIALIZE 0xff00
+
+#define get_next_index(index, length) \
+ (((index) + 1) & ((length) - 1))
+
+#define get_index_range(index,length,count) \
+ (((index) + (count)) & ((length) - 1))
+
+#define MPORT_SINGLE_FUNCTION_MODE 0x1111
+#define MPORT_MULTI_FUNCTION_MODE 0x2222
+
+#define NX_MAX_PCI_FUNC 8
+
+/*
+ * NetXen host-peg signal message structure
+ *
+ * Bit 0-1 : peg_id => 0x2 for tx and 01 for rx
+ * Bit 2 : priv_id => must be 1
+ * Bit 3-17 : count => for doorbell
+ * Bit 18-27 : ctx_id => Context id
+ * Bit 28-31 : opcode
+ */
+
+typedef u32 netxen_ctx_msg;
+
+#define netxen_set_msg_peg_id(config_word, val) \
+ ((config_word) &= ~3, (config_word) |= val & 3)
+#define netxen_set_msg_privid(config_word) \
+ ((config_word) |= 1 << 2)
+#define netxen_set_msg_count(config_word, val) \
+ ((config_word) &= ~(0x7fff<<3), (config_word) |= (val & 0x7fff) << 3)
+#define netxen_set_msg_ctxid(config_word, val) \
+ ((config_word) &= ~(0x3ff<<18), (config_word) |= (val & 0x3ff) << 18)
+#define netxen_set_msg_opcode(config_word, val) \
+ ((config_word) &= ~(0xf<<28), (config_word) |= (val & 0xf) << 28)
+
+struct netxen_rcv_ring {
+ __le64 addr;
+ __le32 size;
+ __le32 rsrvd;
+};
+
+struct netxen_sts_ring {
+ __le64 addr;
+ __le32 size;
+ __le16 msi_index;
+ __le16 rsvd;
+} ;
+
+struct netxen_ring_ctx {
+
+ /* one command ring */
+ __le64 cmd_consumer_offset;
+ __le64 cmd_ring_addr;
+ __le32 cmd_ring_size;
+ __le32 rsrvd;
+
+ /* three receive rings */
+ struct netxen_rcv_ring rcv_rings[NUM_RCV_DESC_RINGS];
+
+ __le64 sts_ring_addr;
+ __le32 sts_ring_size;
+
+ __le32 ctx_id;
+
+ __le64 rsrvd_2[3];
+ __le32 sts_ring_count;
+ __le32 rsrvd_3;
+ struct netxen_sts_ring sts_rings[NUM_STS_DESC_RINGS];
+
+} __attribute__ ((aligned(64)));
+
+/*
+ * Following data structures describe the descriptors that will be used.
+ * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
+ * we are doing LSO (above the 1500 size packet) only.
+ */
+
+/*
+ * The size of reference handle been changed to 16 bits to pass the MSS fields
+ * for the LSO packet
+ */
+
+#define FLAGS_CHECKSUM_ENABLED 0x01
+#define FLAGS_LSO_ENABLED 0x02
+#define FLAGS_IPSEC_SA_ADD 0x04
+#define FLAGS_IPSEC_SA_DELETE 0x08
+#define FLAGS_VLAN_TAGGED 0x10
+#define FLAGS_VLAN_OOB 0x40
+
+#define netxen_set_tx_vlan_tci(cmd_desc, v) \
+ (cmd_desc)->vlan_TCI = cpu_to_le16(v);
+
+#define netxen_set_cmd_desc_port(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
+#define netxen_set_cmd_desc_ctxid(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
+
+#define netxen_set_tx_port(_desc, _port) \
+ (_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0)
+
+#define netxen_set_tx_flags_opcode(_desc, _flags, _opcode) \
+ (_desc)->flags_opcode = \
+ cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7))
+
+#define netxen_set_tx_frags_len(_desc, _frags, _len) \
+ (_desc)->nfrags__length = \
+ cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8))
+
+struct cmd_desc_type0 {
+ u8 tcp_hdr_offset; /* For LSO only */
+ u8 ip_hdr_offset; /* For LSO only */
+ __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */
+ __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */
+
+ __le64 addr_buffer2;
+
+ __le16 reference_handle;
+ __le16 mss;
+ u8 port_ctxid; /* 7:4 ctxid 3:0 port */
+ u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
+ __le16 conn_id; /* IPSec offoad only */
+
+ __le64 addr_buffer3;
+ __le64 addr_buffer1;
+
+ __le16 buffer_length[4];
+
+ __le64 addr_buffer4;
+
+ __le32 reserved2;
+ __le16 reserved;
+ __le16 vlan_TCI;
+
+} __attribute__ ((aligned(64)));
+
+/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
+struct rcv_desc {
+ __le16 reference_handle;
+ __le16 reserved;
+ __le32 buffer_length; /* allocated buffer length (usually 2K) */
+ __le64 addr_buffer;
+};
+
+/* opcode field in status_desc */
+#define NETXEN_NIC_SYN_OFFLOAD 0x03
+#define NETXEN_NIC_RXPKT_DESC 0x04
+#define NETXEN_OLD_RXPKT_DESC 0x3f
+#define NETXEN_NIC_RESPONSE_DESC 0x05
+#define NETXEN_NIC_LRO_DESC 0x12
+
+/* for status field in status_desc */
+#define STATUS_NEED_CKSUM (1)
+#define STATUS_CKSUM_OK (2)
+
+/* owner bits of status_desc */
+#define STATUS_OWNER_HOST (0x1ULL << 56)
+#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
+
+/* Status descriptor:
+ 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
+ 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
+ 53-55 desc_cnt, 56-57 owner, 58-63 opcode
+ */
+#define netxen_get_sts_port(sts_data) \
+ ((sts_data) & 0x0F)
+#define netxen_get_sts_status(sts_data) \
+ (((sts_data) >> 4) & 0x0F)
+#define netxen_get_sts_type(sts_data) \
+ (((sts_data) >> 8) & 0x0F)
+#define netxen_get_sts_totallength(sts_data) \
+ (((sts_data) >> 12) & 0xFFFF)
+#define netxen_get_sts_refhandle(sts_data) \
+ (((sts_data) >> 28) & 0xFFFF)
+#define netxen_get_sts_prot(sts_data) \
+ (((sts_data) >> 44) & 0x0F)
+#define netxen_get_sts_pkt_offset(sts_data) \
+ (((sts_data) >> 48) & 0x1F)
+#define netxen_get_sts_desc_cnt(sts_data) \
+ (((sts_data) >> 53) & 0x7)
+#define netxen_get_sts_opcode(sts_data) \
+ (((sts_data) >> 58) & 0x03F)
+
+#define netxen_get_lro_sts_refhandle(sts_data) \
+ ((sts_data) & 0x0FFFF)
+#define netxen_get_lro_sts_length(sts_data) \
+ (((sts_data) >> 16) & 0x0FFFF)
+#define netxen_get_lro_sts_l2_hdr_offset(sts_data) \
+ (((sts_data) >> 32) & 0x0FF)
+#define netxen_get_lro_sts_l4_hdr_offset(sts_data) \
+ (((sts_data) >> 40) & 0x0FF)
+#define netxen_get_lro_sts_timestamp(sts_data) \
+ (((sts_data) >> 48) & 0x1)
+#define netxen_get_lro_sts_type(sts_data) \
+ (((sts_data) >> 49) & 0x7)
+#define netxen_get_lro_sts_push_flag(sts_data) \
+ (((sts_data) >> 52) & 0x1)
+#define netxen_get_lro_sts_seq_number(sts_data) \
+ ((sts_data) & 0x0FFFFFFFF)
+
+
+struct status_desc {
+ __le64 status_desc_data[2];
+} __attribute__ ((aligned(16)));
+
+/* UNIFIED ROMIMAGE *************************/
+#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0
+#define NX_UNI_DIR_SECT_BOOTLD 0x6
+#define NX_UNI_DIR_SECT_FW 0x7
+
+/*Offsets */
+#define NX_UNI_CHIP_REV_OFF 10
+#define NX_UNI_FLAGS_OFF 11
+#define NX_UNI_BIOS_VERSION_OFF 12
+#define NX_UNI_BOOTLD_IDX_OFF 27
+#define NX_UNI_FIRMWARE_IDX_OFF 29
+
+struct uni_table_desc{
+ uint32_t findex;
+ uint32_t num_entries;
+ uint32_t entry_size;
+ uint32_t reserved[5];
+};
+
+struct uni_data_desc{
+ uint32_t findex;
+ uint32_t size;
+ uint32_t reserved[5];
+};
+
+/* UNIFIED ROMIMAGE *************************/
+
+/* The version of the main data structure */
+#define NETXEN_BDINFO_VERSION 1
+
+/* Magic number to let user know flash is programmed */
+#define NETXEN_BDINFO_MAGIC 0x12345678
+
+/* Max number of Gig ports on a Phantom board */
+#define NETXEN_MAX_PORTS 4
+
+#define NETXEN_BRDTYPE_P1_BD 0x0000
+#define NETXEN_BRDTYPE_P1_SB 0x0001
+#define NETXEN_BRDTYPE_P1_SMAX 0x0002
+#define NETXEN_BRDTYPE_P1_SOCK 0x0003
+
+#define NETXEN_BRDTYPE_P2_SOCK_31 0x0008
+#define NETXEN_BRDTYPE_P2_SOCK_35 0x0009
+#define NETXEN_BRDTYPE_P2_SB35_4G 0x000a
+#define NETXEN_BRDTYPE_P2_SB31_10G 0x000b
+#define NETXEN_BRDTYPE_P2_SB31_2G 0x000c
+
+#define NETXEN_BRDTYPE_P2_SB31_10G_IMEZ 0x000d
+#define NETXEN_BRDTYPE_P2_SB31_10G_HMEZ 0x000e
+#define NETXEN_BRDTYPE_P2_SB31_10G_CX4 0x000f
+
+#define NETXEN_BRDTYPE_P3_REF_QG 0x0021
+#define NETXEN_BRDTYPE_P3_HMEZ 0x0022
+#define NETXEN_BRDTYPE_P3_10G_CX4_LP 0x0023
+#define NETXEN_BRDTYPE_P3_4_GB 0x0024
+#define NETXEN_BRDTYPE_P3_IMEZ 0x0025
+#define NETXEN_BRDTYPE_P3_10G_SFP_PLUS 0x0026
+#define NETXEN_BRDTYPE_P3_10000_BASE_T 0x0027
+#define NETXEN_BRDTYPE_P3_XG_LOM 0x0028
+#define NETXEN_BRDTYPE_P3_4_GB_MM 0x0029
+#define NETXEN_BRDTYPE_P3_10G_SFP_CT 0x002a
+#define NETXEN_BRDTYPE_P3_10G_SFP_QT 0x002b
+#define NETXEN_BRDTYPE_P3_10G_CX4 0x0031
+#define NETXEN_BRDTYPE_P3_10G_XFP 0x0032
+#define NETXEN_BRDTYPE_P3_10G_TP 0x0080
+
+/* Flash memory map */
+#define NETXEN_CRBINIT_START 0 /* crbinit section */
+#define NETXEN_BRDCFG_START 0x4000 /* board config */
+#define NETXEN_INITCODE_START 0x6000 /* pegtune code */
+#define NETXEN_BOOTLD_START 0x10000 /* bootld */
+#define NETXEN_IMAGE_START 0x43000 /* compressed image */
+#define NETXEN_SECONDARY_START 0x200000 /* backup images */
+#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */
+#define NETXEN_USER_START 0x3E8000 /* Firmare info */
+#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */
+#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */
+
+#define NX_OLD_MAC_ADDR_OFFSET (NETXEN_USER_START)
+#define NX_FW_VERSION_OFFSET (NETXEN_USER_START+0x408)
+#define NX_FW_SIZE_OFFSET (NETXEN_USER_START+0x40c)
+#define NX_FW_MAC_ADDR_OFFSET (NETXEN_USER_START+0x418)
+#define NX_FW_SERIAL_NUM_OFFSET (NETXEN_USER_START+0x81c)
+#define NX_BIOS_VERSION_OFFSET (NETXEN_USER_START+0x83c)
+
+#define NX_HDR_VERSION_OFFSET (NETXEN_BRDCFG_START)
+#define NX_BRDTYPE_OFFSET (NETXEN_BRDCFG_START+0x8)
+#define NX_FW_MAGIC_OFFSET (NETXEN_BRDCFG_START+0x128)
+
+#define NX_FW_MIN_SIZE (0x3fffff)
+#define NX_P2_MN_ROMIMAGE 0
+#define NX_P3_CT_ROMIMAGE 1
+#define NX_P3_MN_ROMIMAGE 2
+#define NX_UNIFIED_ROMIMAGE 3
+#define NX_FLASH_ROMIMAGE 4
+#define NX_UNKNOWN_ROMIMAGE 0xff
+
+#define NX_P2_MN_ROMIMAGE_NAME "nxromimg.bin"
+#define NX_P3_CT_ROMIMAGE_NAME "nx3fwct.bin"
+#define NX_P3_MN_ROMIMAGE_NAME "nx3fwmn.bin"
+#define NX_UNIFIED_ROMIMAGE_NAME "phanfw.bin"
+#define NX_FLASH_ROMIMAGE_NAME "flash"
+
+extern char netxen_nic_driver_name[];
+
+/* Number of status descriptors to handle per interrupt */
+#define MAX_STATUS_HANDLE (64)
+
+/*
+ * netxen_skb_frag{} is to contain mapping info for each SG list. This
+ * has to be freed when DMA is complete. This is part of netxen_tx_buffer{}.
+ */
+struct netxen_skb_frag {
+ u64 dma;
+ u64 length;
+};
+
+struct netxen_recv_crb {
+ u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
+ u32 crb_sts_consumer[NUM_STS_DESC_RINGS];
+ u32 sw_int_mask[NUM_STS_DESC_RINGS];
+};
+
+/* Following defines are for the state of the buffers */
+#define NETXEN_BUFFER_FREE 0
+#define NETXEN_BUFFER_BUSY 1
+
+/*
+ * There will be one netxen_buffer per skb packet. These will be
+ * used to save the dma info for pci_unmap_page()
+ */
+struct netxen_cmd_buffer {
+ struct sk_buff *skb;
+ struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1];
+ u32 frag_count;
+};
+
+/* In rx_buffer, we do not need multiple fragments as is a single buffer */
+struct netxen_rx_buffer {
+ struct list_head list;
+ struct sk_buff *skb;
+ u64 dma;
+ u16 ref_handle;
+ u16 state;
+};
+
+/* Board types */
+#define NETXEN_NIC_GBE 0x01
+#define NETXEN_NIC_XGBE 0x02
+
+/*
+ * One hardware_context{} per adapter
+ * contains interrupt info as well shared hardware info.
+ */
+struct netxen_hardware_context {
+ void __iomem *pci_base0;
+ void __iomem *pci_base1;
+ void __iomem *pci_base2;
+ void __iomem *db_base;
+ void __iomem *ocm_win_crb;
+
+ unsigned long db_len;
+ unsigned long pci_len0;
+
+ u32 ocm_win;
+ u32 crb_win;
+
+ rwlock_t crb_lock;
+ spinlock_t mem_lock;
+
+ u8 cut_through;
+ u8 revision_id;
+ u8 pci_func;
+ u8 linkup;
+ u16 port_type;
+ u16 board_type;
+};
+
+#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */
+#define ETHERNET_FCS_SIZE 4
+
+struct netxen_adapter_stats {
+ u64 xmitcalled;
+ u64 xmitfinished;
+ u64 rxdropped;
+ u64 txdropped;
+ u64 csummed;
+ u64 rx_pkts;
+ u64 lro_pkts;
+ u64 rxbytes;
+ u64 txbytes;
+};
+
+/*
+ * Rcv Descriptor Context. One such per Rcv Descriptor. There may
+ * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
+ */
+struct nx_host_rds_ring {
+ u32 producer;
+ u32 num_desc;
+ u32 dma_size;
+ u32 skb_size;
+ u32 flags;
+ void __iomem *crb_rcv_producer;
+ struct rcv_desc *desc_head;
+ struct netxen_rx_buffer *rx_buf_arr;
+ struct list_head free_list;
+ spinlock_t lock;
+ dma_addr_t phys_addr;
+};
+
+struct nx_host_sds_ring {
+ u32 consumer;
+ u32 num_desc;
+ void __iomem *crb_sts_consumer;
+ void __iomem *crb_intr_mask;
+
+ struct status_desc *desc_head;
+ struct netxen_adapter *adapter;
+ struct napi_struct napi;
+ struct list_head free_list[NUM_RCV_DESC_RINGS];
+
+ int irq;
+
+ dma_addr_t phys_addr;
+ char name[IFNAMSIZ+4];
+};
+
+struct nx_host_tx_ring {
+ u32 producer;
+ __le32 *hw_consumer;
+ u32 sw_consumer;
+ void __iomem *crb_cmd_producer;
+ void __iomem *crb_cmd_consumer;
+ u32 num_desc;
+
+ struct netdev_queue *txq;
+
+ struct netxen_cmd_buffer *cmd_buf_arr;
+ struct cmd_desc_type0 *desc_head;
+ dma_addr_t phys_addr;
+};
+
+/*
+ * Receive context. There is one such structure per instance of the
+ * receive processing. Any state information that is relevant to
+ * the receive, and is must be in this structure. The global data may be
+ * present elsewhere.
+ */
+struct netxen_recv_context {
+ u32 state;
+ u16 context_id;
+ u16 virt_port;
+
+ struct nx_host_rds_ring *rds_rings;
+ struct nx_host_sds_ring *sds_rings;
+
+ struct netxen_ring_ctx *hwctx;
+ dma_addr_t phys_addr;
+};
+
+/* New HW context creation */
+
+#define NX_OS_CRB_RETRY_COUNT 4000
+#define NX_CDRP_SIGNATURE_MAKE(pcifn, version) \
+ (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
+
+#define NX_CDRP_CLEAR 0x00000000
+#define NX_CDRP_CMD_BIT 0x80000000
+
+/*
+ * All responses must have the NX_CDRP_CMD_BIT cleared
+ * in the crb NX_CDRP_CRB_OFFSET.
+ */
+#define NX_CDRP_FORM_RSP(rsp) (rsp)
+#define NX_CDRP_IS_RSP(rsp) (((rsp) & NX_CDRP_CMD_BIT) == 0)
+
+#define NX_CDRP_RSP_OK 0x00000001
+#define NX_CDRP_RSP_FAIL 0x00000002
+#define NX_CDRP_RSP_TIMEOUT 0x00000003
+
+/*
+ * All commands must have the NX_CDRP_CMD_BIT set in
+ * the crb NX_CDRP_CRB_OFFSET.
+ */
+#define NX_CDRP_FORM_CMD(cmd) (NX_CDRP_CMD_BIT | (cmd))
+#define NX_CDRP_IS_CMD(cmd) (((cmd) & NX_CDRP_CMD_BIT) != 0)
+
+#define NX_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001
+#define NX_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002
+#define NX_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003
+#define NX_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004
+#define NX_CDRP_CMD_READ_MAX_RX_CTX 0x00000005
+#define NX_CDRP_CMD_READ_MAX_TX_CTX 0x00000006
+#define NX_CDRP_CMD_CREATE_RX_CTX 0x00000007
+#define NX_CDRP_CMD_DESTROY_RX_CTX 0x00000008
+#define NX_CDRP_CMD_CREATE_TX_CTX 0x00000009
+#define NX_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
+#define NX_CDRP_CMD_SETUP_STATISTICS 0x0000000e
+#define NX_CDRP_CMD_GET_STATISTICS 0x0000000f
+#define NX_CDRP_CMD_DELETE_STATISTICS 0x00000010
+#define NX_CDRP_CMD_SET_MTU 0x00000012
+#define NX_CDRP_CMD_READ_PHY 0x00000013
+#define NX_CDRP_CMD_WRITE_PHY 0x00000014
+#define NX_CDRP_CMD_READ_HW_REG 0x00000015
+#define NX_CDRP_CMD_GET_FLOW_CTL 0x00000016
+#define NX_CDRP_CMD_SET_FLOW_CTL 0x00000017
+#define NX_CDRP_CMD_READ_MAX_MTU 0x00000018
+#define NX_CDRP_CMD_READ_MAX_LRO 0x00000019
+#define NX_CDRP_CMD_CONFIGURE_TOE 0x0000001a
+#define NX_CDRP_CMD_FUNC_ATTRIB 0x0000001b
+#define NX_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c
+#define NX_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d
+#define NX_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e
+#define NX_CDRP_CMD_CONFIG_GBE_PORT 0x0000001f
+#define NX_CDRP_CMD_MAX 0x00000020
+
+#define NX_RCODE_SUCCESS 0
+#define NX_RCODE_NO_HOST_MEM 1
+#define NX_RCODE_NO_HOST_RESOURCE 2
+#define NX_RCODE_NO_CARD_CRB 3
+#define NX_RCODE_NO_CARD_MEM 4
+#define NX_RCODE_NO_CARD_RESOURCE 5
+#define NX_RCODE_INVALID_ARGS 6
+#define NX_RCODE_INVALID_ACTION 7
+#define NX_RCODE_INVALID_STATE 8
+#define NX_RCODE_NOT_SUPPORTED 9
+#define NX_RCODE_NOT_PERMITTED 10
+#define NX_RCODE_NOT_READY 11
+#define NX_RCODE_DOES_NOT_EXIST 12
+#define NX_RCODE_ALREADY_EXISTS 13
+#define NX_RCODE_BAD_SIGNATURE 14
+#define NX_RCODE_CMD_NOT_IMPL 15
+#define NX_RCODE_CMD_INVALID 16
+#define NX_RCODE_TIMEOUT 17
+#define NX_RCODE_CMD_FAILED 18
+#define NX_RCODE_MAX_EXCEEDED 19
+#define NX_RCODE_MAX 20
+
+#define NX_DESTROY_CTX_RESET 0
+#define NX_DESTROY_CTX_D3_RESET 1
+#define NX_DESTROY_CTX_MAX 2
+
+/*
+ * Capabilities
+ */
+#define NX_CAP_BIT(class, bit) (1 << bit)
+#define NX_CAP0_LEGACY_CONTEXT NX_CAP_BIT(0, 0)
+#define NX_CAP0_MULTI_CONTEXT NX_CAP_BIT(0, 1)
+#define NX_CAP0_LEGACY_MN NX_CAP_BIT(0, 2)
+#define NX_CAP0_LEGACY_MS NX_CAP_BIT(0, 3)
+#define NX_CAP0_CUT_THROUGH NX_CAP_BIT(0, 4)
+#define NX_CAP0_LRO NX_CAP_BIT(0, 5)
+#define NX_CAP0_LSO NX_CAP_BIT(0, 6)
+#define NX_CAP0_JUMBO_CONTIGUOUS NX_CAP_BIT(0, 7)
+#define NX_CAP0_LRO_CONTIGUOUS NX_CAP_BIT(0, 8)
+#define NX_CAP0_HW_LRO NX_CAP_BIT(0, 10)
+
+/*
+ * Context state
+ */
+#define NX_HOST_CTX_STATE_FREED 0
+#define NX_HOST_CTX_STATE_ALLOCATED 1
+#define NX_HOST_CTX_STATE_ACTIVE 2
+#define NX_HOST_CTX_STATE_DISABLED 3
+#define NX_HOST_CTX_STATE_QUIESCED 4
+#define NX_HOST_CTX_STATE_MAX 5
+
+/*
+ * Rx context
+ */
+
+typedef struct {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le32 ring_size; /* Ring entries */
+ __le16 msi_index;
+ __le16 rsvd; /* Padding */
+} nx_hostrq_sds_ring_t;
+
+typedef struct {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le64 buff_size; /* Packet buffer size */
+ __le32 ring_size; /* Ring entries */
+ __le32 ring_kind; /* Class of ring */
+} nx_hostrq_rds_ring_t;
+
+typedef struct {
+ __le64 host_rsp_dma_addr; /* Response dma'd here */
+ __le32 capabilities[4]; /* Flag bit vector */
+ __le32 host_int_crb_mode; /* Interrupt crb usage */
+ __le32 host_rds_crb_mode; /* RDS crb usage */
+ /* These ring offsets are relative to data[0] below */
+ __le32 rds_ring_offset; /* Offset to RDS config */
+ __le32 sds_ring_offset; /* Offset to SDS config */
+ __le16 num_rds_rings; /* Count of RDS rings */
+ __le16 num_sds_rings; /* Count of SDS rings */
+ __le16 rsvd1; /* Padding */
+ __le16 rsvd2; /* Padding */
+ u8 reserved[128]; /* reserve space for future expansion*/
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N hostrq_rds_rings
+ - N hostrq_sds_rings */
+ char data[0];
+} nx_hostrq_rx_ctx_t;
+
+typedef struct {
+ __le32 host_producer_crb; /* Crb to use */
+ __le32 rsvd1; /* Padding */
+} nx_cardrsp_rds_ring_t;
+
+typedef struct {
+ __le32 host_consumer_crb; /* Crb to use */
+ __le32 interrupt_crb; /* Crb to use */
+} nx_cardrsp_sds_ring_t;
+
+typedef struct {
+ /* These ring offsets are relative to data[0] below */
+ __le32 rds_ring_offset; /* Offset to RDS config */
+ __le32 sds_ring_offset; /* Offset to SDS config */
+ __le32 host_ctx_state; /* Starting State */
+ __le32 num_fn_per_port; /* How many PCI fn share the port */
+ __le16 num_rds_rings; /* Count of RDS rings */
+ __le16 num_sds_rings; /* Count of SDS rings */
+ __le16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ u8 reserved[128]; /* save space for future expansion */
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N cardrsp_rds_rings
+ - N cardrs_sds_rings */
+ char data[0];
+} nx_cardrsp_rx_ctx_t;
+
+#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
+ (sizeof(HOSTRQ_RX) + \
+ (rds_rings)*(sizeof(nx_hostrq_rds_ring_t)) + \
+ (sds_rings)*(sizeof(nx_hostrq_sds_ring_t)))
+
+#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \
+ (sizeof(CARDRSP_RX) + \
+ (rds_rings)*(sizeof(nx_cardrsp_rds_ring_t)) + \
+ (sds_rings)*(sizeof(nx_cardrsp_sds_ring_t)))
+
+/*
+ * Tx context
+ */
+
+typedef struct {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le32 ring_size; /* Ring entries */
+ __le32 rsvd; /* Padding */
+} nx_hostrq_cds_ring_t;
+
+typedef struct {
+ __le64 host_rsp_dma_addr; /* Response dma'd here */
+ __le64 cmd_cons_dma_addr; /* */
+ __le64 dummy_dma_addr; /* */
+ __le32 capabilities[4]; /* Flag bit vector */
+ __le32 host_int_crb_mode; /* Interrupt crb usage */
+ __le32 rsvd1; /* Padding */
+ __le16 rsvd2; /* Padding */
+ __le16 interrupt_ctl;
+ __le16 msi_index;
+ __le16 rsvd3; /* Padding */
+ nx_hostrq_cds_ring_t cds_ring; /* Desc of cds ring */
+ u8 reserved[128]; /* future expansion */
+} nx_hostrq_tx_ctx_t;
+
+typedef struct {
+ __le32 host_producer_crb; /* Crb to use */
+ __le32 interrupt_crb; /* Crb to use */
+} nx_cardrsp_cds_ring_t;
+
+typedef struct {
+ __le32 host_ctx_state; /* Starting state */
+ __le16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ nx_cardrsp_cds_ring_t cds_ring; /* Card cds settings */
+ u8 reserved[128]; /* future expansion */
+} nx_cardrsp_tx_ctx_t;
+
+#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
+#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
+
+/* CRB */
+
+#define NX_HOST_RDS_CRB_MODE_UNIQUE 0
+#define NX_HOST_RDS_CRB_MODE_SHARED 1
+#define NX_HOST_RDS_CRB_MODE_CUSTOM 2
+#define NX_HOST_RDS_CRB_MODE_MAX 3
+
+#define NX_HOST_INT_CRB_MODE_UNIQUE 0
+#define NX_HOST_INT_CRB_MODE_SHARED 1
+#define NX_HOST_INT_CRB_MODE_NORX 2
+#define NX_HOST_INT_CRB_MODE_NOTX 3
+#define NX_HOST_INT_CRB_MODE_NORXTX 4
+
+
+/* MAC */
+
+#define MC_COUNT_P2 16
+#define MC_COUNT_P3 38
+
+#define NETXEN_MAC_NOOP 0
+#define NETXEN_MAC_ADD 1
+#define NETXEN_MAC_DEL 2
+
+typedef struct nx_mac_list_s {
+ struct list_head list;
+ uint8_t mac_addr[ETH_ALEN+2];
+} nx_mac_list_t;
+
+struct nx_vlan_ip_list {
+ struct list_head list;
+ u32 ip_addr;
+};
+
+/*
+ * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
+ * adjusted based on configured MTU.
+ */
+#define NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US 3
+#define NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS 256
+#define NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS 64
+#define NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US 4
+
+#define NETXEN_NIC_INTR_DEFAULT 0x04
+
+typedef union {
+ struct {
+ uint16_t rx_packets;
+ uint16_t rx_time_us;
+ uint16_t tx_packets;
+ uint16_t tx_time_us;
+ } data;
+ uint64_t word;
+} nx_nic_intr_coalesce_data_t;
+
+typedef struct {
+ uint16_t stats_time_us;
+ uint16_t rate_sample_time;
+ uint16_t flags;
+ uint16_t rsvd_1;
+ uint32_t low_threshold;
+ uint32_t high_threshold;
+ nx_nic_intr_coalesce_data_t normal;
+ nx_nic_intr_coalesce_data_t low;
+ nx_nic_intr_coalesce_data_t high;
+ nx_nic_intr_coalesce_data_t irq;
+} nx_nic_intr_coalesce_t;
+
+#define NX_HOST_REQUEST 0x13
+#define NX_NIC_REQUEST 0x14
+
+#define NX_MAC_EVENT 0x1
+
+#define NX_IP_UP 2
+#define NX_IP_DOWN 3
+
+/*
+ * Driver --> Firmware
+ */
+#define NX_NIC_H2C_OPCODE_START 0
+#define NX_NIC_H2C_OPCODE_CONFIG_RSS 1
+#define NX_NIC_H2C_OPCODE_CONFIG_RSS_TBL 2
+#define NX_NIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3
+#define NX_NIC_H2C_OPCODE_CONFIG_LED 4
+#define NX_NIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5
+#define NX_NIC_H2C_OPCODE_CONFIG_L2_MAC 6
+#define NX_NIC_H2C_OPCODE_LRO_REQUEST 7
+#define NX_NIC_H2C_OPCODE_GET_SNMP_STATS 8
+#define NX_NIC_H2C_OPCODE_PROXY_START_REQUEST 9
+#define NX_NIC_H2C_OPCODE_PROXY_STOP_REQUEST 10
+#define NX_NIC_H2C_OPCODE_PROXY_SET_MTU 11
+#define NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12
+#define NX_NIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13
+#define NX_NIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14
+#define NX_NIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15
+#define NX_NIC_H2C_OPCODE_GET_NET_STATS 16
+#define NX_NIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
+#define NX_NIC_H2C_OPCODE_CONFIG_IPADDR 18
+#define NX_NIC_H2C_OPCODE_CONFIG_LOOPBACK 19
+#define NX_NIC_H2C_OPCODE_PROXY_STOP_DONE 20
+#define NX_NIC_H2C_OPCODE_GET_LINKEVENT 21
+#define NX_NIC_C2C_OPCODE 22
+#define NX_NIC_H2C_OPCODE_CONFIG_BRIDGING 23
+#define NX_NIC_H2C_OPCODE_CONFIG_HW_LRO 24
+#define NX_NIC_H2C_OPCODE_LAST 25
+
+/*
+ * Firmware --> Driver
+ */
+
+#define NX_NIC_C2H_OPCODE_START 128
+#define NX_NIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129
+#define NX_NIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130
+#define NX_NIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131
+#define NX_NIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132
+#define NX_NIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133
+#define NX_NIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134
+#define NX_NIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135
+#define NX_NIC_C2H_OPCODE_GET_SNMP_STATS 136
+#define NX_NIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137
+#define NX_NIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138
+#define NX_NIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139
+#define NX_NIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140
+#define NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
+#define NX_NIC_C2H_OPCODE_LAST 142
+
+#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
+#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
+#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
+
+#define NX_NIC_LRO_REQUEST_FIRST 0
+#define NX_NIC_LRO_REQUEST_ADD_FLOW 1
+#define NX_NIC_LRO_REQUEST_DELETE_FLOW 2
+#define NX_NIC_LRO_REQUEST_TIMER 3
+#define NX_NIC_LRO_REQUEST_CLEANUP 4
+#define NX_NIC_LRO_REQUEST_ADD_FLOW_SCHEDULED 5
+#define NX_TOE_LRO_REQUEST_ADD_FLOW 6
+#define NX_TOE_LRO_REQUEST_ADD_FLOW_RESPONSE 7
+#define NX_TOE_LRO_REQUEST_DELETE_FLOW 8
+#define NX_TOE_LRO_REQUEST_DELETE_FLOW_RESPONSE 9
+#define NX_TOE_LRO_REQUEST_TIMER 10
+#define NX_NIC_LRO_REQUEST_LAST 11
+
+#define NX_FW_CAPABILITY_LINK_NOTIFICATION (1 << 5)
+#define NX_FW_CAPABILITY_SWITCHING (1 << 6)
+#define NX_FW_CAPABILITY_PEXQ (1 << 7)
+#define NX_FW_CAPABILITY_BDG (1 << 8)
+#define NX_FW_CAPABILITY_FVLANTX (1 << 9)
+#define NX_FW_CAPABILITY_HW_LRO (1 << 10)
+#define NX_FW_CAPABILITY_GBE_LINK_CFG (1 << 11)
+
+/* module types */
+#define LINKEVENT_MODULE_NOT_PRESENT 1
+#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
+#define LINKEVENT_MODULE_OPTICAL_SRLR 3
+#define LINKEVENT_MODULE_OPTICAL_LRM 4
+#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7
+#define LINKEVENT_MODULE_TWINAX 8
+
+#define LINKSPEED_10GBPS 10000
+#define LINKSPEED_1GBPS 1000
+#define LINKSPEED_100MBPS 100
+#define LINKSPEED_10MBPS 10
+
+#define LINKSPEED_ENCODED_10MBPS 0
+#define LINKSPEED_ENCODED_100MBPS 1
+#define LINKSPEED_ENCODED_1GBPS 2
+
+#define LINKEVENT_AUTONEG_DISABLED 0
+#define LINKEVENT_AUTONEG_ENABLED 1
+
+#define LINKEVENT_HALF_DUPLEX 0
+#define LINKEVENT_FULL_DUPLEX 1
+
+#define LINKEVENT_LINKSPEED_MBPS 0
+#define LINKEVENT_LINKSPEED_ENCODED 1
+
+#define AUTO_FW_RESET_ENABLED 0xEF10AF12
+#define AUTO_FW_RESET_DISABLED 0xDCBAAF12
+
+/* firmware response header:
+ * 63:58 - message type
+ * 57:56 - owner
+ * 55:53 - desc count
+ * 52:48 - reserved
+ * 47:40 - completion id
+ * 39:32 - opcode
+ * 31:16 - error code
+ * 15:00 - reserved
+ */
+#define netxen_get_nic_msgtype(msg_hdr) \
+ ((msg_hdr >> 58) & 0x3F)
+#define netxen_get_nic_msg_compid(msg_hdr) \
+ ((msg_hdr >> 40) & 0xFF)
+#define netxen_get_nic_msg_opcode(msg_hdr) \
+ ((msg_hdr >> 32) & 0xFF)
+#define netxen_get_nic_msg_errcode(msg_hdr) \
+ ((msg_hdr >> 16) & 0xFFFF)
+
+typedef struct {
+ union {
+ struct {
+ u64 hdr;
+ u64 body[7];
+ };
+ u64 words[8];
+ };
+} nx_fw_msg_t;
+
+typedef struct {
+ __le64 qhdr;
+ __le64 req_hdr;
+ __le64 words[6];
+} nx_nic_req_t;
+
+typedef struct {
+ u8 op;
+ u8 tag;
+ u8 mac_addr[6];
+} nx_mac_req_t;
+
+#define MAX_PENDING_DESC_BLOCK_SIZE 64
+
+#define NETXEN_NIC_MSI_ENABLED 0x02
+#define NETXEN_NIC_MSIX_ENABLED 0x04
+#define NETXEN_NIC_LRO_ENABLED 0x08
+#define NETXEN_NIC_LRO_DISABLED 0x00
+#define NETXEN_NIC_BRIDGE_ENABLED 0X10
+#define NETXEN_NIC_DIAG_ENABLED 0x20
+#define NETXEN_IS_MSI_FAMILY(adapter) \
+ ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED))
+
+#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS
+#define NETXEN_MSIX_TBL_SPACE 8192
+#define NETXEN_PCI_REG_MSIX_TBL 0x44
+
+#define NETXEN_DB_MAPSIZE_BYTES 0x1000
+
+#define NETXEN_NETDEV_WEIGHT 128
+#define NETXEN_ADAPTER_UP_MAGIC 777
+#define NETXEN_NIC_PEG_TUNE 0
+
+#define __NX_FW_ATTACHED 0
+#define __NX_DEV_UP 1
+#define __NX_RESETTING 2
+
+struct netxen_dummy_dma {
+ void *addr;
+ dma_addr_t phys_addr;
+};
+
+struct netxen_adapter {
+ struct netxen_hardware_context ahw;
+
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct list_head mac_list;
+ struct list_head vlan_ip_list;
+
+ spinlock_t tx_clean_lock;
+
+ u16 num_txd;
+ u16 num_rxd;
+ u16 num_jumbo_rxd;
+ u16 num_lro_rxd;
+
+ u8 max_rds_rings;
+ u8 max_sds_rings;
+ u8 driver_mismatch;
+ u8 msix_supported;
+ u8 __pad;
+ u8 pci_using_dac;
+ u8 portnum;
+ u8 physical_port;
+
+ u8 mc_enabled;
+ u8 max_mc_count;
+ u8 rss_supported;
+ u8 link_changed;
+ u8 fw_wait_cnt;
+ u8 fw_fail_cnt;
+ u8 tx_timeo_cnt;
+ u8 need_fw_reset;
+
+ u8 has_link_events;
+ u8 fw_type;
+ u16 tx_context_id;
+ u16 mtu;
+ u16 is_up;
+
+ u16 link_speed;
+ u16 link_duplex;
+ u16 link_autoneg;
+ u16 module_type;
+
+ u32 capabilities;
+ u32 flags;
+ u32 irq;
+ u32 temp;
+
+ u32 int_vec_bit;
+ u32 heartbit;
+
+ u8 mac_addr[ETH_ALEN];
+
+ struct netxen_adapter_stats stats;
+
+ struct netxen_recv_context recv_ctx;
+ struct nx_host_tx_ring *tx_ring;
+
+ int (*macaddr_set) (struct netxen_adapter *, u8 *);
+ int (*set_mtu) (struct netxen_adapter *, int);
+ int (*set_promisc) (struct netxen_adapter *, u32);
+ void (*set_multi) (struct net_device *);
+ int (*phy_read) (struct netxen_adapter *, u32 reg, u32 *);
+ int (*phy_write) (struct netxen_adapter *, u32 reg, u32 val);
+ int (*init_port) (struct netxen_adapter *, int);
+ int (*stop_port) (struct netxen_adapter *);
+
+ u32 (*crb_read)(struct netxen_adapter *, ulong);
+ int (*crb_write)(struct netxen_adapter *, ulong, u32);
+
+ int (*pci_mem_read)(struct netxen_adapter *, u64, u64 *);
+ int (*pci_mem_write)(struct netxen_adapter *, u64, u64);
+
+ int (*pci_set_window)(struct netxen_adapter *, u64, u32 *);
+
+ u32 (*io_read)(struct netxen_adapter *, void __iomem *);
+ void (*io_write)(struct netxen_adapter *, void __iomem *, u32);
+
+ void __iomem *tgt_mask_reg;
+ void __iomem *pci_int_reg;
+ void __iomem *tgt_status_reg;
+ void __iomem *crb_int_state_reg;
+ void __iomem *isr_int_vec;
+
+ struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER];
+
+ struct netxen_dummy_dma dummy_dma;
+
+ struct delayed_work fw_work;
+
+ struct work_struct tx_timeout_task;
+
+ nx_nic_intr_coalesce_t coal;
+
+ unsigned long state;
+ __le32 file_prd_off; /*File fw product offset*/
+ u32 fw_version;
+ const struct firmware *fw;
+};
+
+int nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val);
+int nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val);
+
+#define NXRD32(adapter, off) \
+ (adapter->crb_read(adapter, off))
+#define NXWR32(adapter, off, val) \
+ (adapter->crb_write(adapter, off, val))
+#define NXRDIO(adapter, addr) \
+ (adapter->io_read(adapter, addr))
+#define NXWRIO(adapter, addr, val) \
+ (adapter->io_write(adapter, addr, val))
+
+int netxen_pcie_sem_lock(struct netxen_adapter *, int, u32);
+void netxen_pcie_sem_unlock(struct netxen_adapter *, int);
+
+#define netxen_rom_lock(a) \
+ netxen_pcie_sem_lock((a), 2, NETXEN_ROM_LOCK_ID)
+#define netxen_rom_unlock(a) \
+ netxen_pcie_sem_unlock((a), 2)
+#define netxen_phy_lock(a) \
+ netxen_pcie_sem_lock((a), 3, NETXEN_PHY_LOCK_ID)
+#define netxen_phy_unlock(a) \
+ netxen_pcie_sem_unlock((a), 3)
+#define netxen_api_lock(a) \
+ netxen_pcie_sem_lock((a), 5, 0)
+#define netxen_api_unlock(a) \
+ netxen_pcie_sem_unlock((a), 5)
+#define netxen_sw_lock(a) \
+ netxen_pcie_sem_lock((a), 6, 0)
+#define netxen_sw_unlock(a) \
+ netxen_pcie_sem_unlock((a), 6)
+#define crb_win_lock(a) \
+ netxen_pcie_sem_lock((a), 7, NETXEN_CRB_WIN_LOCK_ID)
+#define crb_win_unlock(a) \
+ netxen_pcie_sem_unlock((a), 7)
+
+int netxen_nic_get_board_info(struct netxen_adapter *adapter);
+int netxen_nic_wol_supported(struct netxen_adapter *adapter);
+
+/* Functions from netxen_nic_init.c */
+int netxen_init_dummy_dma(struct netxen_adapter *adapter);
+void netxen_free_dummy_dma(struct netxen_adapter *adapter);
+
+int netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter);
+int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
+int netxen_load_firmware(struct netxen_adapter *adapter);
+int netxen_need_fw_reset(struct netxen_adapter *adapter);
+void netxen_request_firmware(struct netxen_adapter *adapter);
+void netxen_release_firmware(struct netxen_adapter *adapter);
+int netxen_pinit_from_rom(struct netxen_adapter *adapter);
+
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
+int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
+ u8 *bytes, size_t size);
+int netxen_rom_fast_write_words(struct netxen_adapter *adapter, int addr,
+ u8 *bytes, size_t size);
+int netxen_flash_unlock(struct netxen_adapter *adapter);
+int netxen_backup_crbinit(struct netxen_adapter *adapter);
+int netxen_flash_erase_secondary(struct netxen_adapter *adapter);
+int netxen_flash_erase_primary(struct netxen_adapter *adapter);
+void netxen_halt_pegs(struct netxen_adapter *adapter);
+
+int netxen_rom_se(struct netxen_adapter *adapter, int addr);
+
+int netxen_alloc_sw_resources(struct netxen_adapter *adapter);
+void netxen_free_sw_resources(struct netxen_adapter *adapter);
+
+void netxen_setup_hwops(struct netxen_adapter *adapter);
+void __iomem *netxen_get_ioaddr(struct netxen_adapter *, u32);
+
+int netxen_alloc_hw_resources(struct netxen_adapter *adapter);
+void netxen_free_hw_resources(struct netxen_adapter *adapter);
+
+void netxen_release_rx_buffers(struct netxen_adapter *adapter);
+void netxen_release_tx_buffers(struct netxen_adapter *adapter);
+
+int netxen_init_firmware(struct netxen_adapter *adapter);
+void netxen_nic_clear_stats(struct netxen_adapter *adapter);
+void netxen_watchdog_task(struct work_struct *work);
+void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
+ struct nx_host_rds_ring *rds_ring);
+int netxen_process_cmd_ring(struct netxen_adapter *adapter);
+int netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max);
+
+void netxen_p3_free_mac_list(struct netxen_adapter *adapter);
+int netxen_config_intr_coalesce(struct netxen_adapter *adapter);
+int netxen_config_rss(struct netxen_adapter *adapter, int enable);
+int netxen_config_ipaddr(struct netxen_adapter *adapter, u32 ip, int cmd);
+int netxen_linkevent_request(struct netxen_adapter *adapter, int enable);
+void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup);
+void netxen_pci_camqm_read_2M(struct netxen_adapter *, u64, u64 *);
+void netxen_pci_camqm_write_2M(struct netxen_adapter *, u64, u64);
+
+int nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
+ u32 speed, u32 duplex, u32 autoneg);
+int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu);
+int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
+int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable);
+int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable);
+int netxen_send_lro_cleanup(struct netxen_adapter *adapter);
+
+void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
+ struct nx_host_tx_ring *tx_ring);
+
+/* Functions from netxen_nic_main.c */
+int netxen_nic_reset_context(struct netxen_adapter *);
+
+/*
+ * NetXen Board information
+ */
+
+#define NETXEN_MAX_SHORT_NAME 32
+struct netxen_brdinfo {
+ int brdtype; /* type of board */
+ long ports; /* max no of physical ports */
+ char short_name[NETXEN_MAX_SHORT_NAME];
+};
+
+static const struct netxen_brdinfo netxen_boards[] = {
+ {NETXEN_BRDTYPE_P2_SB31_10G_CX4, 1, "XGb CX4"},
+ {NETXEN_BRDTYPE_P2_SB31_10G_HMEZ, 1, "XGb HMEZ"},
+ {NETXEN_BRDTYPE_P2_SB31_10G_IMEZ, 2, "XGb IMEZ"},
+ {NETXEN_BRDTYPE_P2_SB31_10G, 1, "XGb XFP"},
+ {NETXEN_BRDTYPE_P2_SB35_4G, 4, "Quad Gb"},
+ {NETXEN_BRDTYPE_P2_SB31_2G, 2, "Dual Gb"},
+ {NETXEN_BRDTYPE_P3_REF_QG, 4, "Reference Quad Gig "},
+ {NETXEN_BRDTYPE_P3_HMEZ, 2, "Dual XGb HMEZ"},
+ {NETXEN_BRDTYPE_P3_10G_CX4_LP, 2, "Dual XGb CX4 LP"},
+ {NETXEN_BRDTYPE_P3_4_GB, 4, "Quad Gig LP"},
+ {NETXEN_BRDTYPE_P3_IMEZ, 2, "Dual XGb IMEZ"},
+ {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"},
+ {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"},
+ {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"},
+ {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "NX3031 Gigabit Ethernet"},
+ {NETXEN_BRDTYPE_P3_10G_SFP_CT, 2, "NX3031 10 Gigabit Ethernet"},
+ {NETXEN_BRDTYPE_P3_10G_SFP_QT, 2, "Quanta Dual XGb SFP+"},
+ {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"},
+ {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"}
+};
+
+#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards)
+
+static inline void get_brd_name_by_type(u32 type, char *name)
+{
+ int i, found = 0;
+ for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
+ if (netxen_boards[i].brdtype == type) {
+ strcpy(name, netxen_boards[i].short_name);
+ found = 1;
+ break;
+ }
+
+ }
+ if (!found)
+ name = "Unknown";
+}
+
+static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
+{
+ smp_mb();
+ return find_diff_among(tx_ring->producer,
+ tx_ring->sw_consumer, tx_ring->num_desc);
+
+}
+
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac);
+int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac);
+extern void netxen_change_ringparam(struct netxen_adapter *adapter);
+extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
+ int *valp);
+
+extern const struct ethtool_ops netxen_nic_ethtool_ops;
+
+#endif /* __NETXEN_NIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
new file mode 100644
index 000000000000..a925392abd6f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -0,0 +1,793 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include "netxen_nic_hw.h"
+#include "netxen_nic.h"
+
+#define NXHAL_VERSION 1
+
+static u32
+netxen_poll_rsp(struct netxen_adapter *adapter)
+{
+ u32 rsp = NX_CDRP_RSP_OK;
+ int timeout = 0;
+
+ do {
+ /* give atleast 1ms for firmware to respond */
+ msleep(1);
+
+ if (++timeout > NX_OS_CRB_RETRY_COUNT)
+ return NX_CDRP_RSP_TIMEOUT;
+
+ rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET);
+ } while (!NX_CDRP_IS_RSP(rsp));
+
+ return rsp;
+}
+
+static u32
+netxen_issue_cmd(struct netxen_adapter *adapter,
+ u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
+{
+ u32 rsp;
+ u32 signature = 0;
+ u32 rcode = NX_RCODE_SUCCESS;
+
+ signature = NX_CDRP_SIGNATURE_MAKE(pci_fn, version);
+
+ /* Acquire semaphore before accessing CRB */
+ if (netxen_api_lock(adapter))
+ return NX_RCODE_TIMEOUT;
+
+ NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature);
+
+ NXWR32(adapter, NX_ARG1_CRB_OFFSET, arg1);
+
+ NXWR32(adapter, NX_ARG2_CRB_OFFSET, arg2);
+
+ NXWR32(adapter, NX_ARG3_CRB_OFFSET, arg3);
+
+ NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd));
+
+ rsp = netxen_poll_rsp(adapter);
+
+ if (rsp == NX_CDRP_RSP_TIMEOUT) {
+ printk(KERN_ERR "%s: card response timeout.\n",
+ netxen_nic_driver_name);
+
+ rcode = NX_RCODE_TIMEOUT;
+ } else if (rsp == NX_CDRP_RSP_FAIL) {
+ rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
+
+ printk(KERN_ERR "%s: failed card response code:0x%x\n",
+ netxen_nic_driver_name, rcode);
+ }
+
+ /* Release semaphore */
+ netxen_api_unlock(adapter);
+
+ return rcode;
+}
+
+int
+nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
+{
+ u32 rcode = NX_RCODE_SUCCESS;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
+ rcode = netxen_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ NXHAL_VERSION,
+ recv_ctx->context_id,
+ mtu,
+ 0,
+ NX_CDRP_CMD_SET_MTU);
+
+ if (rcode != NX_RCODE_SUCCESS)
+ return -EIO;
+
+ return 0;
+}
+
+int
+nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
+ u32 speed, u32 duplex, u32 autoneg)
+{
+
+ return netxen_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ NXHAL_VERSION,
+ speed,
+ duplex,
+ autoneg,
+ NX_CDRP_CMD_CONFIG_GBE_PORT);
+
+}
+
+static int
+nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
+{
+ void *addr;
+ nx_hostrq_rx_ctx_t *prq;
+ nx_cardrsp_rx_ctx_t *prsp;
+ nx_hostrq_rds_ring_t *prq_rds;
+ nx_hostrq_sds_ring_t *prq_sds;
+ nx_cardrsp_rds_ring_t *prsp_rds;
+ nx_cardrsp_sds_ring_t *prsp_sds;
+ struct nx_host_rds_ring *rds_ring;
+ struct nx_host_sds_ring *sds_ring;
+
+ dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
+ u64 phys_addr;
+
+ int i, nrds_rings, nsds_rings;
+ size_t rq_size, rsp_size;
+ u32 cap, reg, val;
+
+ int err;
+
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ nrds_rings = adapter->max_rds_rings;
+ nsds_rings = adapter->max_sds_rings;
+
+ rq_size =
+ SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings);
+ rsp_size =
+ SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
+
+ addr = pci_alloc_consistent(adapter->pdev,
+ rq_size, &hostrq_phys_addr);
+ if (addr == NULL)
+ return -ENOMEM;
+ prq = addr;
+
+ addr = pci_alloc_consistent(adapter->pdev,
+ rsp_size, &cardrsp_phys_addr);
+ if (addr == NULL) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+ prsp = addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
+
+ cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN);
+ cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS);
+
+ prq->capabilities[0] = cpu_to_le32(cap);
+ prq->host_int_crb_mode =
+ cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
+ prq->host_rds_crb_mode =
+ cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE);
+
+ prq->num_rds_rings = cpu_to_le16(nrds_rings);
+ prq->num_sds_rings = cpu_to_le16(nsds_rings);
+ prq->rds_ring_offset = cpu_to_le32(0);
+
+ val = le32_to_cpu(prq->rds_ring_offset) +
+ (sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
+ prq->sds_ring_offset = cpu_to_le32(val);
+
+ prq_rds = (nx_hostrq_rds_ring_t *)(prq->data +
+ le32_to_cpu(prq->rds_ring_offset));
+
+ for (i = 0; i < nrds_rings; i++) {
+
+ rds_ring = &recv_ctx->rds_rings[i];
+
+ prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
+ prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
+ prq_rds[i].ring_kind = cpu_to_le32(i);
+ prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
+ }
+
+ prq_sds = (nx_hostrq_sds_ring_t *)(prq->data +
+ le32_to_cpu(prq->sds_ring_offset));
+
+ for (i = 0; i < nsds_rings; i++) {
+
+ sds_ring = &recv_ctx->sds_rings[i];
+
+ prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
+ prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
+ prq_sds[i].msi_index = cpu_to_le16(i);
+ }
+
+ phys_addr = hostrq_phys_addr;
+ err = netxen_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ NXHAL_VERSION,
+ (u32)(phys_addr >> 32),
+ (u32)(phys_addr & 0xffffffff),
+ rq_size,
+ NX_CDRP_CMD_CREATE_RX_CTX);
+ if (err) {
+ printk(KERN_WARNING
+ "Failed to create rx ctx in firmware%d\n", err);
+ goto out_free_rsp;
+ }
+
+
+ prsp_rds = ((nx_cardrsp_rds_ring_t *)
+ &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
+
+ for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
+ rds_ring = &recv_ctx->rds_rings[i];
+
+ reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
+ rds_ring->crb_rcv_producer = netxen_get_ioaddr(adapter,
+ NETXEN_NIC_REG(reg - 0x200));
+ }
+
+ prsp_sds = ((nx_cardrsp_sds_ring_t *)
+ &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
+
+ for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
+ sds_ring = &recv_ctx->sds_rings[i];
+
+ reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
+ sds_ring->crb_sts_consumer = netxen_get_ioaddr(adapter,
+ NETXEN_NIC_REG(reg - 0x200));
+
+ reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
+ sds_ring->crb_intr_mask = netxen_get_ioaddr(adapter,
+ NETXEN_NIC_REG(reg - 0x200));
+ }
+
+ recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
+ recv_ctx->context_id = le16_to_cpu(prsp->context_id);
+ recv_ctx->virt_port = prsp->virt_port;
+
+out_free_rsp:
+ pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
+out_free_rq:
+ pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
+ return err;
+}
+
+static void
+nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (netxen_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ NXHAL_VERSION,
+ recv_ctx->context_id,
+ NX_DESTROY_CTX_RESET,
+ 0,
+ NX_CDRP_CMD_DESTROY_RX_CTX)) {
+
+ printk(KERN_WARNING
+ "%s: Failed to destroy rx ctx in firmware\n",
+ netxen_nic_driver_name);
+ }
+}
+
+static int
+nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
+{
+ nx_hostrq_tx_ctx_t *prq;
+ nx_hostrq_cds_ring_t *prq_cds;
+ nx_cardrsp_tx_ctx_t *prsp;
+ void *rq_addr, *rsp_addr;
+ size_t rq_size, rsp_size;
+ u32 temp;
+ int err = 0;
+ u64 offset, phys_addr;
+ dma_addr_t rq_phys_addr, rsp_phys_addr;
+ struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
+ rq_addr = pci_alloc_consistent(adapter->pdev,
+ rq_size, &rq_phys_addr);
+ if (!rq_addr)
+ return -ENOMEM;
+
+ rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
+ rsp_addr = pci_alloc_consistent(adapter->pdev,
+ rsp_size, &rsp_phys_addr);
+ if (!rsp_addr) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+
+ memset(rq_addr, 0, rq_size);
+ prq = rq_addr;
+
+ memset(rsp_addr, 0, rsp_size);
+ prsp = rsp_addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
+
+ temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO);
+ prq->capabilities[0] = cpu_to_le32(temp);
+
+ prq->host_int_crb_mode =
+ cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
+
+ prq->interrupt_ctl = 0;
+ prq->msi_index = 0;
+
+ prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr);
+
+ offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx);
+ prq->cmd_cons_dma_addr = cpu_to_le64(offset);
+
+ prq_cds = &prq->cds_ring;
+
+ prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
+ prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
+
+ phys_addr = rq_phys_addr;
+ err = netxen_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ NXHAL_VERSION,
+ (u32)(phys_addr >> 32),
+ ((u32)phys_addr & 0xffffffff),
+ rq_size,
+ NX_CDRP_CMD_CREATE_TX_CTX);
+
+ if (err == NX_RCODE_SUCCESS) {
+ temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
+ tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter,
+ NETXEN_NIC_REG(temp - 0x200));
+#if 0
+ adapter->tx_state =
+ le32_to_cpu(prsp->host_ctx_state);
+#endif
+ adapter->tx_context_id =
+ le16_to_cpu(prsp->context_id);
+ } else {
+ printk(KERN_WARNING
+ "Failed to create tx ctx in firmware%d\n", err);
+ err = -EIO;
+ }
+
+ pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
+
+out_free_rq:
+ pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
+
+ return err;
+}
+
+static void
+nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter)
+{
+ if (netxen_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ NXHAL_VERSION,
+ adapter->tx_context_id,
+ NX_DESTROY_CTX_RESET,
+ 0,
+ NX_CDRP_CMD_DESTROY_TX_CTX)) {
+
+ printk(KERN_WARNING
+ "%s: Failed to destroy tx ctx in firmware\n",
+ netxen_nic_driver_name);
+ }
+}
+
+int
+nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val)
+{
+ u32 rcode;
+
+ rcode = netxen_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ NXHAL_VERSION,
+ reg,
+ 0,
+ 0,
+ NX_CDRP_CMD_READ_PHY);
+
+ if (rcode != NX_RCODE_SUCCESS)
+ return -EIO;
+
+ return NXRD32(adapter, NX_ARG1_CRB_OFFSET);
+}
+
+int
+nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val)
+{
+ u32 rcode;
+
+ rcode = netxen_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ NXHAL_VERSION,
+ reg,
+ val,
+ 0,
+ NX_CDRP_CMD_WRITE_PHY);
+
+ if (rcode != NX_RCODE_SUCCESS)
+ return -EIO;
+
+ return 0;
+}
+
+static u64 ctx_addr_sig_regs[][3] = {
+ {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
+ {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
+ {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
+ {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
+};
+
+#define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0])
+#define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
+#define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
+
+#define lower32(x) ((u32)((x) & 0xffffffff))
+#define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff))
+
+static struct netxen_recv_crb recv_crb_registers[] = {
+ /* Instance 0 */
+ {
+ /* crb_rcv_producer: */
+ {
+ NETXEN_NIC_REG(0x100),
+ /* Jumbo frames */
+ NETXEN_NIC_REG(0x110),
+ /* LRO */
+ NETXEN_NIC_REG(0x120)
+ },
+ /* crb_sts_consumer: */
+ {
+ NETXEN_NIC_REG(0x138),
+ NETXEN_NIC_REG_2(0x000),
+ NETXEN_NIC_REG_2(0x004),
+ NETXEN_NIC_REG_2(0x008),
+ },
+ /* sw_int_mask */
+ {
+ CRB_SW_INT_MASK_0,
+ NETXEN_NIC_REG_2(0x044),
+ NETXEN_NIC_REG_2(0x048),
+ NETXEN_NIC_REG_2(0x04c),
+ },
+ },
+ /* Instance 1 */
+ {
+ /* crb_rcv_producer: */
+ {
+ NETXEN_NIC_REG(0x144),
+ /* Jumbo frames */
+ NETXEN_NIC_REG(0x154),
+ /* LRO */
+ NETXEN_NIC_REG(0x164)
+ },
+ /* crb_sts_consumer: */
+ {
+ NETXEN_NIC_REG(0x17c),
+ NETXEN_NIC_REG_2(0x020),
+ NETXEN_NIC_REG_2(0x024),
+ NETXEN_NIC_REG_2(0x028),
+ },
+ /* sw_int_mask */
+ {
+ CRB_SW_INT_MASK_1,
+ NETXEN_NIC_REG_2(0x064),
+ NETXEN_NIC_REG_2(0x068),
+ NETXEN_NIC_REG_2(0x06c),
+ },
+ },
+ /* Instance 2 */
+ {
+ /* crb_rcv_producer: */
+ {
+ NETXEN_NIC_REG(0x1d8),
+ /* Jumbo frames */
+ NETXEN_NIC_REG(0x1f8),
+ /* LRO */
+ NETXEN_NIC_REG(0x208)
+ },
+ /* crb_sts_consumer: */
+ {
+ NETXEN_NIC_REG(0x220),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ },
+ /* sw_int_mask */
+ {
+ CRB_SW_INT_MASK_2,
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ },
+ },
+ /* Instance 3 */
+ {
+ /* crb_rcv_producer: */
+ {
+ NETXEN_NIC_REG(0x22c),
+ /* Jumbo frames */
+ NETXEN_NIC_REG(0x23c),
+ /* LRO */
+ NETXEN_NIC_REG(0x24c)
+ },
+ /* crb_sts_consumer: */
+ {
+ NETXEN_NIC_REG(0x264),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ },
+ /* sw_int_mask */
+ {
+ CRB_SW_INT_MASK_3,
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ },
+ },
+};
+
+static int
+netxen_init_old_ctx(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct nx_host_tx_ring *tx_ring;
+ int ring;
+ int port = adapter->portnum;
+ struct netxen_ring_ctx *hwctx;
+ u32 signature;
+
+ tx_ring = adapter->tx_ring;
+ recv_ctx = &adapter->recv_ctx;
+ hwctx = recv_ctx->hwctx;
+
+ hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr);
+ hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc);
+
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ hwctx->rcv_rings[ring].addr =
+ cpu_to_le64(rds_ring->phys_addr);
+ hwctx->rcv_rings[ring].size =
+ cpu_to_le32(rds_ring->num_desc);
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ if (ring == 0) {
+ hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr);
+ hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc);
+ }
+ hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr);
+ hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc);
+ hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring);
+ }
+ hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings);
+
+ signature = (adapter->max_sds_rings > 1) ?
+ NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE;
+
+ NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port),
+ lower32(recv_ctx->phys_addr));
+ NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port),
+ upper32(recv_ctx->phys_addr));
+ NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
+ signature | port);
+ return 0;
+}
+
+int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
+{
+ void *addr;
+ int err = 0;
+ int ring;
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct nx_host_tx_ring *tx_ring;
+
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
+ int port = adapter->portnum;
+
+ recv_ctx = &adapter->recv_ctx;
+ tx_ring = adapter->tx_ring;
+
+ addr = pci_alloc_consistent(pdev,
+ sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
+ &recv_ctx->phys_addr);
+ if (addr == NULL) {
+ dev_err(&pdev->dev, "failed to allocate hw context\n");
+ return -ENOMEM;
+ }
+
+ memset(addr, 0, sizeof(struct netxen_ring_ctx));
+ recv_ctx->hwctx = addr;
+ recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
+ recv_ctx->hwctx->cmd_consumer_offset =
+ cpu_to_le64(recv_ctx->phys_addr +
+ sizeof(struct netxen_ring_ctx));
+ tx_ring->hw_consumer =
+ (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
+
+ /* cmd desc ring */
+ addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
+ &tx_ring->phys_addr);
+
+ if (addr == NULL) {
+ dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
+ netdev->name);
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+
+ tx_ring->desc_head = addr;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ addr = pci_alloc_consistent(adapter->pdev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ &rds_ring->phys_addr);
+ if (addr == NULL) {
+ dev_err(&pdev->dev,
+ "%s: failed to allocate rds ring [%d]\n",
+ netdev->name, ring);
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ rds_ring->desc_head = addr;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ rds_ring->crb_rcv_producer =
+ netxen_get_ioaddr(adapter,
+ recv_crb_registers[port].crb_rcv_producer[ring]);
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ addr = pci_alloc_consistent(adapter->pdev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ &sds_ring->phys_addr);
+ if (addr == NULL) {
+ dev_err(&pdev->dev,
+ "%s: failed to allocate sds ring [%d]\n",
+ netdev->name, ring);
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ sds_ring->desc_head = addr;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ sds_ring->crb_sts_consumer =
+ netxen_get_ioaddr(adapter,
+ recv_crb_registers[port].crb_sts_consumer[ring]);
+
+ sds_ring->crb_intr_mask =
+ netxen_get_ioaddr(adapter,
+ recv_crb_registers[port].sw_int_mask[ring]);
+ }
+ }
+
+
+ if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state))
+ goto done;
+ err = nx_fw_cmd_create_rx_ctx(adapter);
+ if (err)
+ goto err_out_free;
+ err = nx_fw_cmd_create_tx_ctx(adapter);
+ if (err)
+ goto err_out_free;
+ } else {
+ err = netxen_init_old_ctx(adapter);
+ if (err)
+ goto err_out_free;
+ }
+
+done:
+ return 0;
+
+err_out_free:
+ netxen_free_hw_resources(adapter);
+ return err;
+}
+
+void netxen_free_hw_resources(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct nx_host_tx_ring *tx_ring;
+ int ring;
+
+ int port = adapter->portnum;
+
+ if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ if (!test_and_clear_bit(__NX_FW_ATTACHED, &adapter->state))
+ goto done;
+
+ nx_fw_cmd_destroy_rx_ctx(adapter);
+ nx_fw_cmd_destroy_tx_ctx(adapter);
+ } else {
+ netxen_api_lock(adapter);
+ NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
+ NETXEN_CTX_D3_RESET | port);
+ netxen_api_unlock(adapter);
+ }
+
+ /* Allow dma queues to drain after context reset */
+ msleep(20);
+
+done:
+ recv_ctx = &adapter->recv_ctx;
+
+ if (recv_ctx->hwctx != NULL) {
+ pci_free_consistent(adapter->pdev,
+ sizeof(struct netxen_ring_ctx) +
+ sizeof(uint32_t),
+ recv_ctx->hwctx,
+ recv_ctx->phys_addr);
+ recv_ctx->hwctx = NULL;
+ }
+
+ tx_ring = adapter->tx_ring;
+ if (tx_ring->desc_head != NULL) {
+ pci_free_consistent(adapter->pdev,
+ TX_DESC_RINGSIZE(tx_ring),
+ tx_ring->desc_head, tx_ring->phys_addr);
+ tx_ring->desc_head = NULL;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ if (rds_ring->desc_head != NULL) {
+ pci_free_consistent(adapter->pdev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ rds_ring->desc_head,
+ rds_ring->phys_addr);
+ rds_ring->desc_head = NULL;
+ }
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ if (sds_ring->desc_head != NULL) {
+ pci_free_consistent(adapter->pdev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ sds_ring->desc_head,
+ sds_ring->phys_addr);
+ sds_ring->desc_head = NULL;
+ }
+ }
+}
+
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
new file mode 100644
index 000000000000..b34fb74d07e3
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -0,0 +1,835 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "netxen_nic.h"
+#include "netxen_nic_hw.h"
+
+struct netxen_nic_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define NETXEN_NIC_STAT(m) sizeof(((struct netxen_adapter *)0)->m), \
+ offsetof(struct netxen_adapter, m)
+
+#define NETXEN_NIC_PORT_WINDOW 0x10000
+#define NETXEN_NIC_INVALID_DATA 0xDEADBEEF
+
+static const struct netxen_nic_stats netxen_nic_gstrings_stats[] = {
+ {"xmit_called", NETXEN_NIC_STAT(stats.xmitcalled)},
+ {"xmit_finished", NETXEN_NIC_STAT(stats.xmitfinished)},
+ {"rx_dropped", NETXEN_NIC_STAT(stats.rxdropped)},
+ {"tx_dropped", NETXEN_NIC_STAT(stats.txdropped)},
+ {"csummed", NETXEN_NIC_STAT(stats.csummed)},
+ {"rx_pkts", NETXEN_NIC_STAT(stats.rx_pkts)},
+ {"lro_pkts", NETXEN_NIC_STAT(stats.lro_pkts)},
+ {"rx_bytes", NETXEN_NIC_STAT(stats.rxbytes)},
+ {"tx_bytes", NETXEN_NIC_STAT(stats.txbytes)},
+};
+
+#define NETXEN_NIC_STATS_LEN ARRAY_SIZE(netxen_nic_gstrings_stats)
+
+static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register_Test_on_offline",
+ "Link_Test_on_offline"
+};
+
+#define NETXEN_NIC_TEST_LEN ARRAY_SIZE(netxen_nic_gstrings_test)
+
+#define NETXEN_NIC_REGS_COUNT 30
+#define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32))
+#define NETXEN_MAX_EEPROM_LEN 1024
+
+static int netxen_nic_get_eeprom_len(struct net_device *dev)
+{
+ return NETXEN_FLASH_TOTAL_SIZE;
+}
+
+static void
+netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u32 fw_major = 0;
+ u32 fw_minor = 0;
+ u32 fw_build = 0;
+
+ strncpy(drvinfo->driver, netxen_nic_driver_name, 32);
+ strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32);
+ fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
+ fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
+ fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
+ sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
+
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ drvinfo->regdump_len = NETXEN_NIC_REGS_LEN;
+ drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev);
+}
+
+static int
+netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ int check_sfp_module = 0;
+
+ /* read which mode */
+ if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+
+ ecmd->advertising = (ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full);
+
+ ecmd->port = PORT_TP;
+
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
+ ecmd->duplex = adapter->link_duplex;
+ ecmd->autoneg = adapter->link_autoneg;
+
+ } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+ u32 val;
+
+ val = NXRD32(adapter, NETXEN_PORT_MODE_ADDR);
+ if (val == NETXEN_PORT_MODE_802_3_AP) {
+ ecmd->supported = SUPPORTED_1000baseT_Full;
+ ecmd->advertising = ADVERTISED_1000baseT_Full;
+ } else {
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ }
+
+ if (netif_running(dev) && adapter->has_link_events) {
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
+ ecmd->autoneg = adapter->link_autoneg;
+ ecmd->duplex = adapter->link_duplex;
+ goto skip;
+ }
+
+ ecmd->port = PORT_TP;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ u16 pcifn = adapter->ahw.pci_func;
+
+ val = NXRD32(adapter, P3_LINK_SPEED_REG(pcifn));
+ ethtool_cmd_speed_set(ecmd, P3_LINK_SPEED_MHZ *
+ P3_LINK_SPEED_VAL(pcifn, val));
+ } else
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
+
+ ecmd->duplex = DUPLEX_FULL;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ } else
+ return -EIO;
+
+skip:
+ ecmd->phy_address = adapter->physical_port;
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ switch (adapter->ahw.board_type) {
+ case NETXEN_BRDTYPE_P2_SB35_4G:
+ case NETXEN_BRDTYPE_P2_SB31_2G:
+ case NETXEN_BRDTYPE_P3_REF_QG:
+ case NETXEN_BRDTYPE_P3_4_GB:
+ case NETXEN_BRDTYPE_P3_4_GB_MM:
+
+ ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+ case NETXEN_BRDTYPE_P3_10G_CX4:
+ case NETXEN_BRDTYPE_P3_10G_CX4_LP:
+ case NETXEN_BRDTYPE_P3_10000_BASE_T:
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->port = PORT_TP;
+ ecmd->autoneg = (adapter->ahw.board_type ==
+ NETXEN_BRDTYPE_P2_SB31_10G_CX4) ?
+ (AUTONEG_DISABLE) : (adapter->link_autoneg);
+ break;
+ case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
+ case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
+ case NETXEN_BRDTYPE_P3_IMEZ:
+ case NETXEN_BRDTYPE_P3_XG_LOM:
+ case NETXEN_BRDTYPE_P3_HMEZ:
+ ecmd->supported |= SUPPORTED_MII;
+ ecmd->advertising |= ADVERTISED_MII;
+ ecmd->port = PORT_MII;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ break;
+ case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
+ case NETXEN_BRDTYPE_P3_10G_SFP_CT:
+ case NETXEN_BRDTYPE_P3_10G_SFP_QT:
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->supported |= SUPPORTED_TP;
+ check_sfp_module = netif_running(dev) &&
+ adapter->has_link_events;
+ case NETXEN_BRDTYPE_P2_SB31_10G:
+ case NETXEN_BRDTYPE_P3_10G_XFP:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ break;
+ case NETXEN_BRDTYPE_P3_10G_TP:
+ if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
+ ecmd->advertising |=
+ (ADVERTISED_FIBRE | ADVERTISED_TP);
+ ecmd->port = PORT_FIBRE;
+ check_sfp_module = netif_running(dev) &&
+ adapter->has_link_events;
+ } else {
+ ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg);
+ ecmd->advertising |=
+ (ADVERTISED_TP | ADVERTISED_Autoneg);
+ ecmd->port = PORT_TP;
+ }
+ break;
+ default:
+ printk(KERN_ERR "netxen-nic: Unsupported board model %d\n",
+ adapter->ahw.board_type);
+ return -EIO;
+ }
+
+ if (check_sfp_module) {
+ switch (adapter->module_type) {
+ case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
+ case LINKEVENT_MODULE_OPTICAL_SRLR:
+ case LINKEVENT_MODULE_OPTICAL_LRM:
+ case LINKEVENT_MODULE_OPTICAL_SFP_1G:
+ ecmd->port = PORT_FIBRE;
+ break;
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
+ case LINKEVENT_MODULE_TWINAX:
+ ecmd->port = PORT_TP;
+ break;
+ default:
+ ecmd->port = -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u32 speed = ethtool_cmd_speed(ecmd);
+ int ret;
+
+ if (adapter->ahw.port_type != NETXEN_NIC_GBE)
+ return -EOPNOTSUPP;
+
+ if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG))
+ return -EOPNOTSUPP;
+
+ ret = nx_fw_cmd_set_gbe_port(adapter, speed, ecmd->duplex,
+ ecmd->autoneg);
+ if (ret == NX_RCODE_NOT_SUPPORTED)
+ return -EOPNOTSUPP;
+ else if (ret)
+ return -EIO;
+
+ adapter->link_speed = speed;
+ adapter->link_duplex = ecmd->duplex;
+ adapter->link_autoneg = ecmd->autoneg;
+
+ if (!netif_running(dev))
+ return 0;
+
+ dev->netdev_ops->ndo_stop(dev);
+ return dev->netdev_ops->ndo_open(dev);
+}
+
+static int netxen_nic_get_regs_len(struct net_device *dev)
+{
+ return NETXEN_NIC_REGS_LEN;
+}
+
+static void
+netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct nx_host_sds_ring *sds_ring;
+ u32 *regs_buff = p;
+ int ring, i = 0;
+ int port = adapter->physical_port;
+
+ memset(p, 0, NETXEN_NIC_REGS_LEN);
+
+ regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
+ (adapter->pdev)->device;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return;
+
+ regs_buff[i++] = NXRD32(adapter, CRB_CMDPEG_STATE);
+ regs_buff[i++] = NXRD32(adapter, CRB_RCVPEG_STATE);
+ regs_buff[i++] = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
+ regs_buff[i++] = NXRDIO(adapter, adapter->crb_int_state_reg);
+ regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+ regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_STATE);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS2);
+
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_0+0x3c);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_1+0x3c);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_2+0x3c);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_3+0x3c);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_4+0x3c);
+ i += 2;
+
+ regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE_P3);
+ regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
+
+ } else {
+ i++;
+
+ regs_buff[i++] = NXRD32(adapter,
+ NETXEN_NIU_XGE_CONFIG_0+(0x10000*port));
+ regs_buff[i++] = NXRD32(adapter,
+ NETXEN_NIU_XGE_CONFIG_1+(0x10000*port));
+
+ regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE);
+ regs_buff[i++] = NXRDIO(adapter,
+ adapter->tx_ring->crb_cmd_consumer);
+ }
+
+ regs_buff[i++] = NXRDIO(adapter, adapter->tx_ring->crb_cmd_producer);
+
+ regs_buff[i++] = NXRDIO(adapter,
+ recv_ctx->rds_rings[0].crb_rcv_producer);
+ regs_buff[i++] = NXRDIO(adapter,
+ recv_ctx->rds_rings[1].crb_rcv_producer);
+
+ regs_buff[i++] = adapter->max_sds_rings;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &(recv_ctx->sds_rings[ring]);
+ regs_buff[i++] = NXRDIO(adapter,
+ sds_ring->crb_sts_consumer);
+ }
+}
+
+static u32 netxen_nic_test_link(struct net_device *dev)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u32 val, port;
+
+ port = adapter->physical_port;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ val = NXRD32(adapter, CRB_XG_STATE_P3);
+ val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
+ return (val == XG_LINK_UP_P3) ? 0 : 1;
+ } else {
+ val = NXRD32(adapter, CRB_XG_STATE);
+ val = (val >> port*8) & 0xff;
+ return (val == XG_LINK_UP) ? 0 : 1;
+ }
+}
+
+static int
+netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 * bytes)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ int offset;
+ int ret;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = (adapter->pdev)->vendor |
+ ((adapter->pdev)->device << 16);
+ offset = eeprom->offset;
+
+ ret = netxen_rom_fast_read_words(adapter, offset, bytes,
+ eeprom->len);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void
+netxen_nic_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+
+ ring->rx_pending = adapter->num_rxd;
+ ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
+ ring->rx_jumbo_pending += adapter->num_lro_rxd;
+ ring->tx_pending = adapter->num_txd;
+
+ if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+ ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
+ ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ } else {
+ ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
+ ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ }
+
+ ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
+
+ ring->rx_mini_max_pending = 0;
+ ring->rx_mini_pending = 0;
+}
+
+static u32
+netxen_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
+{
+ u32 num_desc;
+ num_desc = max(val, min);
+ num_desc = min(num_desc, max);
+ num_desc = roundup_pow_of_two(num_desc);
+
+ if (val != num_desc) {
+ printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n",
+ netxen_nic_driver_name, r_name, num_desc, val);
+ }
+
+ return num_desc;
+}
+
+static int
+netxen_nic_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
+ u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ u16 num_rxd, num_jumbo_rxd, num_txd;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return -EOPNOTSUPP;
+
+ if (ring->rx_mini_pending)
+ return -EOPNOTSUPP;
+
+ if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+ max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
+ max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ }
+
+ num_rxd = netxen_validate_ringparam(ring->rx_pending,
+ MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx");
+
+ num_jumbo_rxd = netxen_validate_ringparam(ring->rx_jumbo_pending,
+ MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo");
+
+ num_txd = netxen_validate_ringparam(ring->tx_pending,
+ MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
+
+ if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd &&
+ num_jumbo_rxd == adapter->num_jumbo_rxd)
+ return 0;
+
+ adapter->num_rxd = num_rxd;
+ adapter->num_jumbo_rxd = num_jumbo_rxd;
+ adapter->num_txd = num_txd;
+
+ return netxen_nic_reset_context(adapter);
+}
+
+static void
+netxen_nic_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ __u32 val;
+ int port = adapter->physical_port;
+
+ if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+ if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
+ return;
+ /* get flow control settings */
+ val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
+ pause->rx_pause = netxen_gb_get_rx_flowctl(val);
+ val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
+ switch (port) {
+ case 0:
+ pause->tx_pause = !(netxen_gb_get_gb0_mask(val));
+ break;
+ case 1:
+ pause->tx_pause = !(netxen_gb_get_gb1_mask(val));
+ break;
+ case 2:
+ pause->tx_pause = !(netxen_gb_get_gb2_mask(val));
+ break;
+ case 3:
+ default:
+ pause->tx_pause = !(netxen_gb_get_gb3_mask(val));
+ break;
+ }
+ } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+ if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS))
+ return;
+ pause->rx_pause = 1;
+ val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
+ if (port == 0)
+ pause->tx_pause = !(netxen_xg_get_xg0_mask(val));
+ else
+ pause->tx_pause = !(netxen_xg_get_xg1_mask(val));
+ } else {
+ printk(KERN_ERR"%s: Unknown board type: %x\n",
+ netxen_nic_driver_name, adapter->ahw.port_type);
+ }
+}
+
+static int
+netxen_nic_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ __u32 val;
+ int port = adapter->physical_port;
+ /* read mode */
+ if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+ if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
+ return -EIO;
+ /* set flow control */
+ val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
+
+ if (pause->rx_pause)
+ netxen_gb_rx_flowctl(val);
+ else
+ netxen_gb_unset_rx_flowctl(val);
+
+ NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+ val);
+ /* set autoneg */
+ val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
+ switch (port) {
+ case 0:
+ if (pause->tx_pause)
+ netxen_gb_unset_gb0_mask(val);
+ else
+ netxen_gb_set_gb0_mask(val);
+ break;
+ case 1:
+ if (pause->tx_pause)
+ netxen_gb_unset_gb1_mask(val);
+ else
+ netxen_gb_set_gb1_mask(val);
+ break;
+ case 2:
+ if (pause->tx_pause)
+ netxen_gb_unset_gb2_mask(val);
+ else
+ netxen_gb_set_gb2_mask(val);
+ break;
+ case 3:
+ default:
+ if (pause->tx_pause)
+ netxen_gb_unset_gb3_mask(val);
+ else
+ netxen_gb_set_gb3_mask(val);
+ break;
+ }
+ NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val);
+ } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+ if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS))
+ return -EIO;
+ val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
+ if (port == 0) {
+ if (pause->tx_pause)
+ netxen_xg_unset_xg0_mask(val);
+ else
+ netxen_xg_set_xg0_mask(val);
+ } else {
+ if (pause->tx_pause)
+ netxen_xg_unset_xg1_mask(val);
+ else
+ netxen_xg_set_xg1_mask(val);
+ }
+ NXWR32(adapter, NETXEN_NIU_XG_PAUSE_CTL, val);
+ } else {
+ printk(KERN_ERR "%s: Unknown board type: %x\n",
+ netxen_nic_driver_name,
+ adapter->ahw.port_type);
+ }
+ return 0;
+}
+
+static int netxen_nic_reg_test(struct net_device *dev)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u32 data_read, data_written;
+
+ data_read = NXRD32(adapter, NETXEN_PCIX_PH_REG(0));
+ if ((data_read & 0xffff) != adapter->pdev->vendor)
+ return 1;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return 0;
+
+ data_written = (u32)0xa5a5a5a5;
+
+ NXWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
+ data_read = NXRD32(adapter, CRB_SCRATCHPAD_TEST);
+ if (data_written != data_read)
+ return 1;
+
+ return 0;
+}
+
+static int netxen_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return NETXEN_NIC_TEST_LEN;
+ case ETH_SS_STATS:
+ return NETXEN_NIC_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
+ u64 * data)
+{
+ memset(data, 0, sizeof(uint64_t) * NETXEN_NIC_TEST_LEN);
+ if ((data[0] = netxen_nic_reg_test(dev)))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ /* link test */
+ if ((data[1] = (u64) netxen_nic_test_link(dev)))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+}
+
+static void
+netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+ int index;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *netxen_nic_gstrings_test,
+ NETXEN_NIC_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) {
+ memcpy(data + index * ETH_GSTRING_LEN,
+ netxen_nic_gstrings_stats[index].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ }
+}
+
+static void
+netxen_nic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 * data)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ int index;
+
+ for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) {
+ char *p =
+ (char *)adapter +
+ netxen_nic_gstrings_stats[index].stat_offset;
+ data[index] =
+ (netxen_nic_gstrings_stats[index].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *) p : *(u32 *) p;
+ }
+}
+
+static void
+netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u32 wol_cfg = 0;
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return;
+
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV);
+ if (wol_cfg & (1UL << adapter->portnum))
+ wol->supported |= WAKE_MAGIC;
+
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG);
+ if (wol_cfg & (1UL << adapter->portnum))
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+static int
+netxen_nic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u32 wol_cfg = 0;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return -EOPNOTSUPP;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EOPNOTSUPP;
+
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV);
+ if (!(wol_cfg & (1 << adapter->portnum)))
+ return -EOPNOTSUPP;
+
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG);
+ if (wol->wolopts & WAKE_MAGIC)
+ wol_cfg |= 1UL << adapter->portnum;
+ else
+ wol_cfg &= ~(1UL << adapter->portnum);
+ NXWR32(adapter, NETXEN_WOL_CONFIG, wol_cfg);
+
+ return 0;
+}
+
+/*
+ * Set the coalescing parameters. Currently only normal is supported.
+ * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
+ * firmware coalescing to default.
+ */
+static int netxen_set_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+
+ if (!NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return -EINVAL;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return -EINVAL;
+
+ /*
+ * Return Error if unsupported values or
+ * unsupported parameters are set.
+ */
+ if (ethcoal->rx_coalesce_usecs > 0xffff ||
+ ethcoal->rx_max_coalesced_frames > 0xffff ||
+ ethcoal->tx_coalesce_usecs > 0xffff ||
+ ethcoal->tx_max_coalesced_frames > 0xffff ||
+ ethcoal->rx_coalesce_usecs_irq ||
+ ethcoal->rx_max_coalesced_frames_irq ||
+ ethcoal->tx_coalesce_usecs_irq ||
+ ethcoal->tx_max_coalesced_frames_irq ||
+ ethcoal->stats_block_coalesce_usecs ||
+ ethcoal->use_adaptive_rx_coalesce ||
+ ethcoal->use_adaptive_tx_coalesce ||
+ ethcoal->pkt_rate_low ||
+ ethcoal->rx_coalesce_usecs_low ||
+ ethcoal->rx_max_coalesced_frames_low ||
+ ethcoal->tx_coalesce_usecs_low ||
+ ethcoal->tx_max_coalesced_frames_low ||
+ ethcoal->pkt_rate_high ||
+ ethcoal->rx_coalesce_usecs_high ||
+ ethcoal->rx_max_coalesced_frames_high ||
+ ethcoal->tx_coalesce_usecs_high ||
+ ethcoal->tx_max_coalesced_frames_high)
+ return -EINVAL;
+
+ if (!ethcoal->rx_coalesce_usecs ||
+ !ethcoal->rx_max_coalesced_frames) {
+ adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT;
+ adapter->coal.normal.data.rx_time_us =
+ NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US;
+ adapter->coal.normal.data.rx_packets =
+ NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS;
+ } else {
+ adapter->coal.flags = 0;
+ adapter->coal.normal.data.rx_time_us =
+ ethcoal->rx_coalesce_usecs;
+ adapter->coal.normal.data.rx_packets =
+ ethcoal->rx_max_coalesced_frames;
+ }
+ adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs;
+ adapter->coal.normal.data.tx_packets =
+ ethcoal->tx_max_coalesced_frames;
+
+ netxen_config_intr_coalesce(adapter);
+
+ return 0;
+}
+
+static int netxen_get_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+
+ if (!NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return -EINVAL;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return -EINVAL;
+
+ ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us;
+ ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us;
+ ethcoal->rx_max_coalesced_frames =
+ adapter->coal.normal.data.rx_packets;
+ ethcoal->tx_max_coalesced_frames =
+ adapter->coal.normal.data.tx_packets;
+
+ return 0;
+}
+
+const struct ethtool_ops netxen_nic_ethtool_ops = {
+ .get_settings = netxen_nic_get_settings,
+ .set_settings = netxen_nic_set_settings,
+ .get_drvinfo = netxen_nic_get_drvinfo,
+ .get_regs_len = netxen_nic_get_regs_len,
+ .get_regs = netxen_nic_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = netxen_nic_get_eeprom_len,
+ .get_eeprom = netxen_nic_get_eeprom,
+ .get_ringparam = netxen_nic_get_ringparam,
+ .set_ringparam = netxen_nic_set_ringparam,
+ .get_pauseparam = netxen_nic_get_pauseparam,
+ .set_pauseparam = netxen_nic_set_pauseparam,
+ .get_wol = netxen_nic_get_wol,
+ .set_wol = netxen_nic_set_wol,
+ .self_test = netxen_nic_diag_test,
+ .get_strings = netxen_nic_get_strings,
+ .get_ethtool_stats = netxen_nic_get_ethtool_stats,
+ .get_sset_count = netxen_get_sset_count,
+ .get_coalesce = netxen_get_intr_coalesce,
+ .set_coalesce = netxen_set_intr_coalesce,
+};
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
new file mode 100644
index 000000000000..dc1967c1f312
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
@@ -0,0 +1,1050 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#ifndef __NETXEN_NIC_HDR_H_
+#define __NETXEN_NIC_HDR_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/*
+ * The basic unit of access when reading/writing control registers.
+ */
+
+typedef __le32 netxen_crbword_t; /* single word in CRB space */
+
+enum {
+ NETXEN_HW_H0_CH_HUB_ADR = 0x05,
+ NETXEN_HW_H1_CH_HUB_ADR = 0x0E,
+ NETXEN_HW_H2_CH_HUB_ADR = 0x03,
+ NETXEN_HW_H3_CH_HUB_ADR = 0x01,
+ NETXEN_HW_H4_CH_HUB_ADR = 0x06,
+ NETXEN_HW_H5_CH_HUB_ADR = 0x07,
+ NETXEN_HW_H6_CH_HUB_ADR = 0x08
+};
+
+/* Hub 0 */
+enum {
+ NETXEN_HW_MN_CRB_AGT_ADR = 0x15,
+ NETXEN_HW_MS_CRB_AGT_ADR = 0x25
+};
+
+/* Hub 1 */
+enum {
+ NETXEN_HW_PS_CRB_AGT_ADR = 0x73,
+ NETXEN_HW_SS_CRB_AGT_ADR = 0x20,
+ NETXEN_HW_RPMX3_CRB_AGT_ADR = 0x0b,
+ NETXEN_HW_QMS_CRB_AGT_ADR = 0x00,
+ NETXEN_HW_SQGS0_CRB_AGT_ADR = 0x01,
+ NETXEN_HW_SQGS1_CRB_AGT_ADR = 0x02,
+ NETXEN_HW_SQGS2_CRB_AGT_ADR = 0x03,
+ NETXEN_HW_SQGS3_CRB_AGT_ADR = 0x04,
+ NETXEN_HW_C2C0_CRB_AGT_ADR = 0x58,
+ NETXEN_HW_C2C1_CRB_AGT_ADR = 0x59,
+ NETXEN_HW_C2C2_CRB_AGT_ADR = 0x5a,
+ NETXEN_HW_RPMX2_CRB_AGT_ADR = 0x0a,
+ NETXEN_HW_RPMX4_CRB_AGT_ADR = 0x0c,
+ NETXEN_HW_RPMX7_CRB_AGT_ADR = 0x0f,
+ NETXEN_HW_RPMX9_CRB_AGT_ADR = 0x12,
+ NETXEN_HW_SMB_CRB_AGT_ADR = 0x18
+};
+
+/* Hub 2 */
+enum {
+ NETXEN_HW_NIU_CRB_AGT_ADR = 0x31,
+ NETXEN_HW_I2C0_CRB_AGT_ADR = 0x19,
+ NETXEN_HW_I2C1_CRB_AGT_ADR = 0x29,
+
+ NETXEN_HW_SN_CRB_AGT_ADR = 0x10,
+ NETXEN_HW_I2Q_CRB_AGT_ADR = 0x20,
+ NETXEN_HW_LPC_CRB_AGT_ADR = 0x22,
+ NETXEN_HW_ROMUSB_CRB_AGT_ADR = 0x21,
+ NETXEN_HW_QM_CRB_AGT_ADR = 0x66,
+ NETXEN_HW_SQG0_CRB_AGT_ADR = 0x60,
+ NETXEN_HW_SQG1_CRB_AGT_ADR = 0x61,
+ NETXEN_HW_SQG2_CRB_AGT_ADR = 0x62,
+ NETXEN_HW_SQG3_CRB_AGT_ADR = 0x63,
+ NETXEN_HW_RPMX1_CRB_AGT_ADR = 0x09,
+ NETXEN_HW_RPMX5_CRB_AGT_ADR = 0x0d,
+ NETXEN_HW_RPMX6_CRB_AGT_ADR = 0x0e,
+ NETXEN_HW_RPMX8_CRB_AGT_ADR = 0x11
+};
+
+/* Hub 3 */
+enum {
+ NETXEN_HW_PH_CRB_AGT_ADR = 0x1A,
+ NETXEN_HW_SRE_CRB_AGT_ADR = 0x50,
+ NETXEN_HW_EG_CRB_AGT_ADR = 0x51,
+ NETXEN_HW_RPMX0_CRB_AGT_ADR = 0x08
+};
+
+/* Hub 4 */
+enum {
+ NETXEN_HW_PEGN0_CRB_AGT_ADR = 0x40,
+ NETXEN_HW_PEGN1_CRB_AGT_ADR,
+ NETXEN_HW_PEGN2_CRB_AGT_ADR,
+ NETXEN_HW_PEGN3_CRB_AGT_ADR,
+ NETXEN_HW_PEGNI_CRB_AGT_ADR,
+ NETXEN_HW_PEGND_CRB_AGT_ADR,
+ NETXEN_HW_PEGNC_CRB_AGT_ADR,
+ NETXEN_HW_PEGR0_CRB_AGT_ADR,
+ NETXEN_HW_PEGR1_CRB_AGT_ADR,
+ NETXEN_HW_PEGR2_CRB_AGT_ADR,
+ NETXEN_HW_PEGR3_CRB_AGT_ADR,
+ NETXEN_HW_PEGN4_CRB_AGT_ADR
+};
+
+/* Hub 5 */
+enum {
+ NETXEN_HW_PEGS0_CRB_AGT_ADR = 0x40,
+ NETXEN_HW_PEGS1_CRB_AGT_ADR,
+ NETXEN_HW_PEGS2_CRB_AGT_ADR,
+ NETXEN_HW_PEGS3_CRB_AGT_ADR,
+ NETXEN_HW_PEGSI_CRB_AGT_ADR,
+ NETXEN_HW_PEGSD_CRB_AGT_ADR,
+ NETXEN_HW_PEGSC_CRB_AGT_ADR
+};
+
+/* Hub 6 */
+enum {
+ NETXEN_HW_CAS0_CRB_AGT_ADR = 0x46,
+ NETXEN_HW_CAS1_CRB_AGT_ADR = 0x47,
+ NETXEN_HW_CAS2_CRB_AGT_ADR = 0x48,
+ NETXEN_HW_CAS3_CRB_AGT_ADR = 0x49,
+ NETXEN_HW_NCM_CRB_AGT_ADR = 0x16,
+ NETXEN_HW_TMR_CRB_AGT_ADR = 0x17,
+ NETXEN_HW_XDMA_CRB_AGT_ADR = 0x05,
+ NETXEN_HW_OCM0_CRB_AGT_ADR = 0x06,
+ NETXEN_HW_OCM1_CRB_AGT_ADR = 0x07
+};
+
+/* Floaters - non existent modules */
+#define NETXEN_HW_EFC_RPMX0_CRB_AGT_ADR 0x67
+
+/* This field defines PCI/X adr [25:20] of agents on the CRB */
+enum {
+ NETXEN_HW_PX_MAP_CRB_PH = 0,
+ NETXEN_HW_PX_MAP_CRB_PS,
+ NETXEN_HW_PX_MAP_CRB_MN,
+ NETXEN_HW_PX_MAP_CRB_MS,
+ NETXEN_HW_PX_MAP_CRB_PGR1,
+ NETXEN_HW_PX_MAP_CRB_SRE,
+ NETXEN_HW_PX_MAP_CRB_NIU,
+ NETXEN_HW_PX_MAP_CRB_QMN,
+ NETXEN_HW_PX_MAP_CRB_SQN0,
+ NETXEN_HW_PX_MAP_CRB_SQN1,
+ NETXEN_HW_PX_MAP_CRB_SQN2,
+ NETXEN_HW_PX_MAP_CRB_SQN3,
+ NETXEN_HW_PX_MAP_CRB_QMS,
+ NETXEN_HW_PX_MAP_CRB_SQS0,
+ NETXEN_HW_PX_MAP_CRB_SQS1,
+ NETXEN_HW_PX_MAP_CRB_SQS2,
+ NETXEN_HW_PX_MAP_CRB_SQS3,
+ NETXEN_HW_PX_MAP_CRB_PGN0,
+ NETXEN_HW_PX_MAP_CRB_PGN1,
+ NETXEN_HW_PX_MAP_CRB_PGN2,
+ NETXEN_HW_PX_MAP_CRB_PGN3,
+ NETXEN_HW_PX_MAP_CRB_PGND,
+ NETXEN_HW_PX_MAP_CRB_PGNI,
+ NETXEN_HW_PX_MAP_CRB_PGS0,
+ NETXEN_HW_PX_MAP_CRB_PGS1,
+ NETXEN_HW_PX_MAP_CRB_PGS2,
+ NETXEN_HW_PX_MAP_CRB_PGS3,
+ NETXEN_HW_PX_MAP_CRB_PGSD,
+ NETXEN_HW_PX_MAP_CRB_PGSI,
+ NETXEN_HW_PX_MAP_CRB_SN,
+ NETXEN_HW_PX_MAP_CRB_PGR2,
+ NETXEN_HW_PX_MAP_CRB_EG,
+ NETXEN_HW_PX_MAP_CRB_PH2,
+ NETXEN_HW_PX_MAP_CRB_PS2,
+ NETXEN_HW_PX_MAP_CRB_CAM,
+ NETXEN_HW_PX_MAP_CRB_CAS0,
+ NETXEN_HW_PX_MAP_CRB_CAS1,
+ NETXEN_HW_PX_MAP_CRB_CAS2,
+ NETXEN_HW_PX_MAP_CRB_C2C0,
+ NETXEN_HW_PX_MAP_CRB_C2C1,
+ NETXEN_HW_PX_MAP_CRB_TIMR,
+ NETXEN_HW_PX_MAP_CRB_PGR3,
+ NETXEN_HW_PX_MAP_CRB_RPMX1,
+ NETXEN_HW_PX_MAP_CRB_RPMX2,
+ NETXEN_HW_PX_MAP_CRB_RPMX3,
+ NETXEN_HW_PX_MAP_CRB_RPMX4,
+ NETXEN_HW_PX_MAP_CRB_RPMX5,
+ NETXEN_HW_PX_MAP_CRB_RPMX6,
+ NETXEN_HW_PX_MAP_CRB_RPMX7,
+ NETXEN_HW_PX_MAP_CRB_XDMA,
+ NETXEN_HW_PX_MAP_CRB_I2Q,
+ NETXEN_HW_PX_MAP_CRB_ROMUSB,
+ NETXEN_HW_PX_MAP_CRB_CAS3,
+ NETXEN_HW_PX_MAP_CRB_RPMX0,
+ NETXEN_HW_PX_MAP_CRB_RPMX8,
+ NETXEN_HW_PX_MAP_CRB_RPMX9,
+ NETXEN_HW_PX_MAP_CRB_OCM0,
+ NETXEN_HW_PX_MAP_CRB_OCM1,
+ NETXEN_HW_PX_MAP_CRB_SMB,
+ NETXEN_HW_PX_MAP_CRB_I2C0,
+ NETXEN_HW_PX_MAP_CRB_I2C1,
+ NETXEN_HW_PX_MAP_CRB_LPC,
+ NETXEN_HW_PX_MAP_CRB_PGNC,
+ NETXEN_HW_PX_MAP_CRB_PGR0
+};
+
+/* This field defines CRB adr [31:20] of the agents */
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_MN \
+ ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MN_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PH \
+ ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_PH_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_MS \
+ ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MS_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PS \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_PS_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SS \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SS_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_QMS \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_QMS_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS0 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS1 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS2 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS3 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C0 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C1 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX4_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX7_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX9_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SMB \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SMB_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_NIU \
+ ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_NIU_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C0 \
+ ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C1 \
+ ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C1_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SRE \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SRE_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_EG \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_EG_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_QMN \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_QM_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN0 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN1 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN2 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN3 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX5_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX6_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX8_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS0 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS1 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS2 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS3 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS3_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNI \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNI_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGND \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGND_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN0 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN1 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN2 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN3 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN4 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN4_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNC \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNC_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR0 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR1 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR2 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR3 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR3_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSI \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSI_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSD \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSD_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS0 \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS1 \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS2 \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS3 \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSC \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSC_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAM \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_NCM_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_TIMR \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_TMR_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_XDMA \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_XDMA_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SN \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_SN_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_I2Q \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_I2Q_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_ROMUSB_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM0 \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM1 \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_LPC \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_LPC_CRB_AGT_ADR)
+
+#define NETXEN_SRE_MISC (NETXEN_CRB_SRE + 0x0002c)
+#define NETXEN_SRE_INT_STATUS (NETXEN_CRB_SRE + 0x00034)
+#define NETXEN_SRE_PBI_ACTIVE_STATUS (NETXEN_CRB_SRE + 0x01014)
+#define NETXEN_SRE_L1RE_CTL (NETXEN_CRB_SRE + 0x03000)
+#define NETXEN_SRE_L2RE_CTL (NETXEN_CRB_SRE + 0x05000)
+#define NETXEN_SRE_BUF_CTL (NETXEN_CRB_SRE + 0x01000)
+
+#define NETXEN_DMA_BASE(U) (NETXEN_CRB_PCIX_MD + 0x20000 + ((U)<<16))
+#define NETXEN_DMA_COMMAND(U) (NETXEN_DMA_BASE(U) + 0x00008)
+
+#define NETXEN_I2Q_CLR_PCI_HI (NETXEN_CRB_I2Q + 0x00034)
+
+#define PEG_NETWORK_BASE(N) (NETXEN_CRB_PEG_NET_0 + (((N)&3) << 20))
+#define CRB_REG_EX_PC 0x3c
+
+#define ROMUSB_GLB (NETXEN_CRB_ROMUSB + 0x00000)
+#define ROMUSB_ROM (NETXEN_CRB_ROMUSB + 0x10000)
+
+#define NETXEN_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
+#define NETXEN_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
+#define NETXEN_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c)
+#define NETXEN_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
+#define NETXEN_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044)
+#define NETXEN_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
+#define NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8)
+
+#define NETXEN_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n)))
+
+#define NETXEN_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
+#define NETXEN_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
+#define NETXEN_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
+#define NETXEN_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
+#define NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
+#define NETXEN_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
+
+/* Lock IDs for ROM lock */
+#define ROM_LOCK_DRIVER 0x0d417340
+
+/******************************************************************************
+*
+* Definitions specific to M25P flash
+*
+*******************************************************************************
+* Instructions
+*/
+#define M25P_INSTR_WREN 0x06
+#define M25P_INSTR_WRDI 0x04
+#define M25P_INSTR_RDID 0x9f
+#define M25P_INSTR_RDSR 0x05
+#define M25P_INSTR_WRSR 0x01
+#define M25P_INSTR_READ 0x03
+#define M25P_INSTR_FAST_READ 0x0b
+#define M25P_INSTR_PP 0x02
+#define M25P_INSTR_SE 0xd8
+#define M25P_INSTR_BE 0xc7
+#define M25P_INSTR_DP 0xb9
+#define M25P_INSTR_RES 0xab
+
+/* all are 1MB windows */
+
+#define NETXEN_PCI_CRB_WINDOWSIZE 0x00100000
+#define NETXEN_PCI_CRB_WINDOW(A) \
+ (NETXEN_PCI_CRBSPACE + (A)*NETXEN_PCI_CRB_WINDOWSIZE)
+
+#define NETXEN_CRB_NIU NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_NIU)
+#define NETXEN_CRB_SRE NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SRE)
+#define NETXEN_CRB_ROMUSB \
+ NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB)
+#define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q)
+#define NETXEN_CRB_I2C0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2C0)
+#define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB)
+#define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64)
+
+#define NETXEN_CRB_PCIX_HOST NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH)
+#define NETXEN_CRB_PCIX_HOST2 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH2)
+#define NETXEN_CRB_PEG_NET_0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN0)
+#define NETXEN_CRB_PEG_NET_1 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN1)
+#define NETXEN_CRB_PEG_NET_2 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN2)
+#define NETXEN_CRB_PEG_NET_3 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN3)
+#define NETXEN_CRB_PEG_NET_4 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SQS2)
+#define NETXEN_CRB_PEG_NET_D NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGND)
+#define NETXEN_CRB_PEG_NET_I NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGNI)
+#define NETXEN_CRB_DDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_MN)
+#define NETXEN_CRB_QDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SN)
+
+#define NETXEN_CRB_PCIX_MD NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PS)
+#define NETXEN_CRB_PCIE NETXEN_CRB_PCIX_MD
+
+#define ISR_INT_VECTOR (NETXEN_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK (NETXEN_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_MASK_SLOW (NETXEN_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_TARGET_STATUS (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_MASK (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK))
+#define ISR_INT_TARGET_STATUS_F1 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
+#define ISR_INT_TARGET_MASK_F1 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
+#define ISR_INT_TARGET_STATUS_F2 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
+#define ISR_INT_TARGET_MASK_F2 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
+#define ISR_INT_TARGET_STATUS_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
+#define ISR_INT_TARGET_MASK_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
+#define ISR_INT_TARGET_STATUS_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
+#define ISR_INT_TARGET_MASK_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
+#define ISR_INT_TARGET_STATUS_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
+#define ISR_INT_TARGET_MASK_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
+#define ISR_INT_TARGET_STATUS_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
+#define ISR_INT_TARGET_MASK_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
+#define ISR_INT_TARGET_STATUS_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
+#define ISR_INT_TARGET_MASK_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
+
+#define NETXEN_PCI_MAPSIZE 128
+#define NETXEN_PCI_DDR_NET (0x00000000UL)
+#define NETXEN_PCI_QDR_NET (0x04000000UL)
+#define NETXEN_PCI_DIRECT_CRB (0x04400000UL)
+#define NETXEN_PCI_CAMQM (0x04800000UL)
+#define NETXEN_PCI_CAMQM_MAX (0x04ffffffUL)
+#define NETXEN_PCI_OCM0 (0x05000000UL)
+#define NETXEN_PCI_OCM0_MAX (0x050fffffUL)
+#define NETXEN_PCI_OCM1 (0x05100000UL)
+#define NETXEN_PCI_OCM1_MAX (0x051fffffUL)
+#define NETXEN_PCI_CRBSPACE (0x06000000UL)
+#define NETXEN_PCI_128MB_SIZE (0x08000000UL)
+#define NETXEN_PCI_32MB_SIZE (0x02000000UL)
+#define NETXEN_PCI_2MB_SIZE (0x00200000UL)
+
+#define NETXEN_PCI_MN_2M (0)
+#define NETXEN_PCI_MS_2M (0x80000)
+#define NETXEN_PCI_OCM0_2M (0x000c0000UL)
+#define NETXEN_PCI_CAMQM_2M_BASE (0x000ff800UL)
+#define NETXEN_PCI_CAMQM_2M_END (0x04800800UL)
+
+#define NETXEN_CRB_CAM NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_CAM)
+
+#define NETXEN_ADDR_DDR_NET (0x0000000000000000ULL)
+#define NETXEN_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+#define NETXEN_ADDR_OCM0 (0x0000000200000000ULL)
+#define NETXEN_ADDR_OCM0_MAX (0x00000002000fffffULL)
+#define NETXEN_ADDR_OCM1 (0x0000000200400000ULL)
+#define NETXEN_ADDR_OCM1_MAX (0x00000002004fffffULL)
+#define NETXEN_ADDR_QDR_NET (0x0000000300000000ULL)
+#define NETXEN_ADDR_QDR_NET_MAX_P2 (0x00000003003fffffULL)
+#define NETXEN_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL)
+
+/*
+ * Register offsets for MN
+ */
+#define NETXEN_MIU_CONTROL (0x000)
+#define NETXEN_MIU_MN_CONTROL (NETXEN_CRB_DDR_NET+NETXEN_MIU_CONTROL)
+
+ /* 200ms delay in each loop */
+#define NETXEN_NIU_PHY_WAITLEN 200000
+ /* 10 seconds before we give up */
+#define NETXEN_NIU_PHY_WAITMAX 50
+#define NETXEN_NIU_MAX_GBE_PORTS 4
+#define NETXEN_NIU_MAX_XG_PORTS 2
+
+#define NETXEN_NIU_MODE (NETXEN_CRB_NIU + 0x00000)
+
+#define NETXEN_NIU_XG_SINGLE_TERM (NETXEN_CRB_NIU + 0x00004)
+#define NETXEN_NIU_XG_DRIVE_HI (NETXEN_CRB_NIU + 0x00008)
+#define NETXEN_NIU_XG_DRIVE_LO (NETXEN_CRB_NIU + 0x0000c)
+#define NETXEN_NIU_XG_DTX (NETXEN_CRB_NIU + 0x00010)
+#define NETXEN_NIU_XG_DEQ (NETXEN_CRB_NIU + 0x00014)
+#define NETXEN_NIU_XG_WORD_ALIGN (NETXEN_CRB_NIU + 0x00018)
+#define NETXEN_NIU_XG_RESET (NETXEN_CRB_NIU + 0x0001c)
+#define NETXEN_NIU_XG_POWER_DOWN (NETXEN_CRB_NIU + 0x00020)
+#define NETXEN_NIU_XG_RESET_PLL (NETXEN_CRB_NIU + 0x00024)
+#define NETXEN_NIU_XG_SERDES_LOOPBACK (NETXEN_CRB_NIU + 0x00028)
+#define NETXEN_NIU_XG_DO_BYTE_ALIGN (NETXEN_CRB_NIU + 0x0002c)
+#define NETXEN_NIU_XG_TX_ENABLE (NETXEN_CRB_NIU + 0x00030)
+#define NETXEN_NIU_XG_RX_ENABLE (NETXEN_CRB_NIU + 0x00034)
+#define NETXEN_NIU_XG_STATUS (NETXEN_CRB_NIU + 0x00038)
+#define NETXEN_NIU_XG_PAUSE_THRESHOLD (NETXEN_CRB_NIU + 0x0003c)
+#define NETXEN_NIU_INT_MASK (NETXEN_CRB_NIU + 0x00040)
+#define NETXEN_NIU_ACTIVE_INT (NETXEN_CRB_NIU + 0x00044)
+#define NETXEN_NIU_MASKABLE_INT (NETXEN_CRB_NIU + 0x00048)
+
+#define NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER (NETXEN_CRB_NIU + 0x0004c)
+
+#define NETXEN_NIU_GB_SERDES_RESET (NETXEN_CRB_NIU + 0x00050)
+#define NETXEN_NIU_GB0_GMII_MODE (NETXEN_CRB_NIU + 0x00054)
+#define NETXEN_NIU_GB0_MII_MODE (NETXEN_CRB_NIU + 0x00058)
+#define NETXEN_NIU_GB1_GMII_MODE (NETXEN_CRB_NIU + 0x0005c)
+#define NETXEN_NIU_GB1_MII_MODE (NETXEN_CRB_NIU + 0x00060)
+#define NETXEN_NIU_GB2_GMII_MODE (NETXEN_CRB_NIU + 0x00064)
+#define NETXEN_NIU_GB2_MII_MODE (NETXEN_CRB_NIU + 0x00068)
+#define NETXEN_NIU_GB3_GMII_MODE (NETXEN_CRB_NIU + 0x0006c)
+#define NETXEN_NIU_GB3_MII_MODE (NETXEN_CRB_NIU + 0x00070)
+#define NETXEN_NIU_REMOTE_LOOPBACK (NETXEN_CRB_NIU + 0x00074)
+#define NETXEN_NIU_GB0_HALF_DUPLEX (NETXEN_CRB_NIU + 0x00078)
+#define NETXEN_NIU_GB1_HALF_DUPLEX (NETXEN_CRB_NIU + 0x0007c)
+#define NETXEN_NIU_RESET_SYS_FIFOS (NETXEN_CRB_NIU + 0x00088)
+#define NETXEN_NIU_GB_CRC_DROP (NETXEN_CRB_NIU + 0x0008c)
+#define NETXEN_NIU_GB_DROP_WRONGADDR (NETXEN_CRB_NIU + 0x00090)
+#define NETXEN_NIU_TEST_MUX_CTL (NETXEN_CRB_NIU + 0x00094)
+#define NETXEN_NIU_XG_PAUSE_CTL (NETXEN_CRB_NIU + 0x00098)
+#define NETXEN_NIU_XG_PAUSE_LEVEL (NETXEN_CRB_NIU + 0x000dc)
+#define NETXEN_NIU_FRAME_COUNT_SELECT (NETXEN_CRB_NIU + 0x000ac)
+#define NETXEN_NIU_FRAME_COUNT (NETXEN_CRB_NIU + 0x000b0)
+#define NETXEN_NIU_XG_SEL (NETXEN_CRB_NIU + 0x00128)
+#define NETXEN_NIU_GB_PAUSE_CTL (NETXEN_CRB_NIU + 0x0030c)
+
+#define NETXEN_NIU_FULL_LEVEL_XG (NETXEN_CRB_NIU + 0x00450)
+
+#define NETXEN_NIU_XG1_RESET (NETXEN_CRB_NIU + 0x0011c)
+#define NETXEN_NIU_XG1_POWER_DOWN (NETXEN_CRB_NIU + 0x00120)
+#define NETXEN_NIU_XG1_RESET_PLL (NETXEN_CRB_NIU + 0x00124)
+
+#define NETXEN_MAC_ADDR_CNTL_REG (NETXEN_CRB_NIU + 0x1000)
+
+#define NETXEN_MULTICAST_ADDR_HI_0 (NETXEN_CRB_NIU + 0x1010)
+#define NETXEN_MULTICAST_ADDR_HI_1 (NETXEN_CRB_NIU + 0x1014)
+#define NETXEN_MULTICAST_ADDR_HI_2 (NETXEN_CRB_NIU + 0x1018)
+#define NETXEN_MULTICAST_ADDR_HI_3 (NETXEN_CRB_NIU + 0x101c)
+
+#define NETXEN_UNICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1080)
+#define NETXEN_MULTICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1100)
+
+#define NETXEN_NIU_GB_MAC_CONFIG_0(I) \
+ (NETXEN_CRB_NIU + 0x30000 + (I)*0x10000)
+#define NETXEN_NIU_GB_MAC_CONFIG_1(I) \
+ (NETXEN_CRB_NIU + 0x30004 + (I)*0x10000)
+#define NETXEN_NIU_GB_MAC_IPG_IFG(I) \
+ (NETXEN_CRB_NIU + 0x30008 + (I)*0x10000)
+#define NETXEN_NIU_GB_HALF_DUPLEX_CTRL(I) \
+ (NETXEN_CRB_NIU + 0x3000c + (I)*0x10000)
+#define NETXEN_NIU_GB_MAX_FRAME_SIZE(I) \
+ (NETXEN_CRB_NIU + 0x30010 + (I)*0x10000)
+#define NETXEN_NIU_GB_TEST_REG(I) \
+ (NETXEN_CRB_NIU + 0x3001c + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_CONFIG(I) \
+ (NETXEN_CRB_NIU + 0x30020 + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_COMMAND(I) \
+ (NETXEN_CRB_NIU + 0x30024 + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_ADDR(I) \
+ (NETXEN_CRB_NIU + 0x30028 + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_CTRL(I) \
+ (NETXEN_CRB_NIU + 0x3002c + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_STATUS(I) \
+ (NETXEN_CRB_NIU + 0x30030 + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_INDICATE(I) \
+ (NETXEN_CRB_NIU + 0x30034 + (I)*0x10000)
+#define NETXEN_NIU_GB_INTERFACE_CTRL(I) \
+ (NETXEN_CRB_NIU + 0x30038 + (I)*0x10000)
+#define NETXEN_NIU_GB_INTERFACE_STATUS(I) \
+ (NETXEN_CRB_NIU + 0x3003c + (I)*0x10000)
+#define NETXEN_NIU_GB_STATION_ADDR_0(I) \
+ (NETXEN_CRB_NIU + 0x30040 + (I)*0x10000)
+#define NETXEN_NIU_GB_STATION_ADDR_1(I) \
+ (NETXEN_CRB_NIU + 0x30044 + (I)*0x10000)
+
+#define NETXEN_NIU_XGE_CONFIG_0 (NETXEN_CRB_NIU + 0x70000)
+#define NETXEN_NIU_XGE_CONFIG_1 (NETXEN_CRB_NIU + 0x70004)
+#define NETXEN_NIU_XGE_IPG (NETXEN_CRB_NIU + 0x70008)
+#define NETXEN_NIU_XGE_STATION_ADDR_0_HI (NETXEN_CRB_NIU + 0x7000c)
+#define NETXEN_NIU_XGE_STATION_ADDR_0_1 (NETXEN_CRB_NIU + 0x70010)
+#define NETXEN_NIU_XGE_STATION_ADDR_1_LO (NETXEN_CRB_NIU + 0x70014)
+#define NETXEN_NIU_XGE_STATUS (NETXEN_CRB_NIU + 0x70018)
+#define NETXEN_NIU_XGE_MAX_FRAME_SIZE (NETXEN_CRB_NIU + 0x7001c)
+#define NETXEN_NIU_XGE_PAUSE_FRAME_VALUE (NETXEN_CRB_NIU + 0x70020)
+#define NETXEN_NIU_XGE_TX_BYTE_CNT (NETXEN_CRB_NIU + 0x70024)
+#define NETXEN_NIU_XGE_TX_FRAME_CNT (NETXEN_CRB_NIU + 0x70028)
+#define NETXEN_NIU_XGE_RX_BYTE_CNT (NETXEN_CRB_NIU + 0x7002c)
+#define NETXEN_NIU_XGE_RX_FRAME_CNT (NETXEN_CRB_NIU + 0x70030)
+#define NETXEN_NIU_XGE_AGGR_ERROR_CNT (NETXEN_CRB_NIU + 0x70034)
+#define NETXEN_NIU_XGE_MULTICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x70038)
+#define NETXEN_NIU_XGE_UNICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x7003c)
+#define NETXEN_NIU_XGE_CRC_ERROR_CNT (NETXEN_CRB_NIU + 0x70040)
+#define NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x70044)
+#define NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x70048)
+#define NETXEN_NIU_XGE_LOCAL_ERROR_CNT (NETXEN_CRB_NIU + 0x7004c)
+#define NETXEN_NIU_XGE_REMOTE_ERROR_CNT (NETXEN_CRB_NIU + 0x70050)
+#define NETXEN_NIU_XGE_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x70054)
+#define NETXEN_NIU_XGE_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x70058)
+#define NETXEN_NIU_XG1_CONFIG_0 (NETXEN_CRB_NIU + 0x80000)
+#define NETXEN_NIU_XG1_CONFIG_1 (NETXEN_CRB_NIU + 0x80004)
+#define NETXEN_NIU_XG1_IPG (NETXEN_CRB_NIU + 0x80008)
+#define NETXEN_NIU_XG1_STATION_ADDR_0_HI (NETXEN_CRB_NIU + 0x8000c)
+#define NETXEN_NIU_XG1_STATION_ADDR_0_1 (NETXEN_CRB_NIU + 0x80010)
+#define NETXEN_NIU_XG1_STATION_ADDR_1_LO (NETXEN_CRB_NIU + 0x80014)
+#define NETXEN_NIU_XG1_STATUS (NETXEN_CRB_NIU + 0x80018)
+#define NETXEN_NIU_XG1_MAX_FRAME_SIZE (NETXEN_CRB_NIU + 0x8001c)
+#define NETXEN_NIU_XG1_PAUSE_FRAME_VALUE (NETXEN_CRB_NIU + 0x80020)
+#define NETXEN_NIU_XG1_TX_BYTE_CNT (NETXEN_CRB_NIU + 0x80024)
+#define NETXEN_NIU_XG1_TX_FRAME_CNT (NETXEN_CRB_NIU + 0x80028)
+#define NETXEN_NIU_XG1_RX_BYTE_CNT (NETXEN_CRB_NIU + 0x8002c)
+#define NETXEN_NIU_XG1_RX_FRAME_CNT (NETXEN_CRB_NIU + 0x80030)
+#define NETXEN_NIU_XG1_AGGR_ERROR_CNT (NETXEN_CRB_NIU + 0x80034)
+#define NETXEN_NIU_XG1_MULTICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x80038)
+#define NETXEN_NIU_XG1_UNICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x8003c)
+#define NETXEN_NIU_XG1_CRC_ERROR_CNT (NETXEN_CRB_NIU + 0x80040)
+#define NETXEN_NIU_XG1_OVERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x80044)
+#define NETXEN_NIU_XG1_UNDERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x80048)
+#define NETXEN_NIU_XG1_LOCAL_ERROR_CNT (NETXEN_CRB_NIU + 0x8004c)
+#define NETXEN_NIU_XG1_REMOTE_ERROR_CNT (NETXEN_CRB_NIU + 0x80050)
+#define NETXEN_NIU_XG1_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x80054)
+#define NETXEN_NIU_XG1_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x80058)
+
+/* P3 802.3ap */
+#define NETXEN_NIU_AP_MAC_CONFIG_0(I) (NETXEN_CRB_NIU+0xa0000+(I)*0x10000)
+#define NETXEN_NIU_AP_MAC_CONFIG_1(I) (NETXEN_CRB_NIU+0xa0004+(I)*0x10000)
+#define NETXEN_NIU_AP_MAC_IPG_IFG(I) (NETXEN_CRB_NIU+0xa0008+(I)*0x10000)
+#define NETXEN_NIU_AP_HALF_DUPLEX_CTRL(I) (NETXEN_CRB_NIU+0xa000c+(I)*0x10000)
+#define NETXEN_NIU_AP_MAX_FRAME_SIZE(I) (NETXEN_CRB_NIU+0xa0010+(I)*0x10000)
+#define NETXEN_NIU_AP_TEST_REG(I) (NETXEN_CRB_NIU+0xa001c+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_CONFIG(I) (NETXEN_CRB_NIU+0xa0020+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_COMMAND(I) (NETXEN_CRB_NIU+0xa0024+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_ADDR(I) (NETXEN_CRB_NIU+0xa0028+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_CTRL(I) (NETXEN_CRB_NIU+0xa002c+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_STATUS(I) (NETXEN_CRB_NIU+0xa0030+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_INDICATE(I) (NETXEN_CRB_NIU+0xa0034+(I)*0x10000)
+#define NETXEN_NIU_AP_INTERFACE_CTRL(I) (NETXEN_CRB_NIU+0xa0038+(I)*0x10000)
+#define NETXEN_NIU_AP_INTERFACE_STATUS(I) (NETXEN_CRB_NIU+0xa003c+(I)*0x10000)
+#define NETXEN_NIU_AP_STATION_ADDR_0(I) (NETXEN_CRB_NIU+0xa0040+(I)*0x10000)
+#define NETXEN_NIU_AP_STATION_ADDR_1(I) (NETXEN_CRB_NIU+0xa0044+(I)*0x10000)
+
+
+#define TEST_AGT_CTRL (0x00)
+
+#define TA_CTL_START 1
+#define TA_CTL_ENABLE 2
+#define TA_CTL_WRITE 4
+#define TA_CTL_BUSY 8
+
+/*
+ * Register offsets for MN
+ */
+#define MIU_TEST_AGT_BASE (0x90)
+
+#define MIU_TEST_AGT_ADDR_LO (0x04)
+#define MIU_TEST_AGT_ADDR_HI (0x08)
+#define MIU_TEST_AGT_WRDATA_LO (0x10)
+#define MIU_TEST_AGT_WRDATA_HI (0x14)
+#define MIU_TEST_AGT_RDDATA_LO (0x18)
+#define MIU_TEST_AGT_RDDATA_HI (0x1c)
+
+#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
+#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
+
+/*
+ * Register offsets for MS
+ */
+#define SIU_TEST_AGT_BASE (0x60)
+
+#define SIU_TEST_AGT_ADDR_LO (0x04)
+#define SIU_TEST_AGT_ADDR_HI (0x18)
+#define SIU_TEST_AGT_WRDATA_LO (0x08)
+#define SIU_TEST_AGT_WRDATA_HI (0x0c)
+#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i)))
+#define SIU_TEST_AGT_RDDATA_LO (0x10)
+#define SIU_TEST_AGT_RDDATA_HI (0x14)
+#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i)))
+
+#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
+#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
+
+/* XG Link status */
+#define XG_LINK_UP 0x10
+#define XG_LINK_DOWN 0x20
+
+#define XG_LINK_UP_P3 0x01
+#define XG_LINK_DOWN_P3 0x02
+#define XG_LINK_STATE_P3_MASK 0xf
+#define XG_LINK_STATE_P3(pcifn,val) \
+ (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK)
+
+#define P3_LINK_SPEED_MHZ 100
+#define P3_LINK_SPEED_MASK 0xff
+#define P3_LINK_SPEED_REG(pcifn) \
+ (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
+#define P3_LINK_SPEED_VAL(pcifn, reg) \
+ (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK)
+
+#define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000)
+#define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg))
+#define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150))
+#define NETXEN_FW_VERSION_MINOR (NETXEN_CAM_RAM(0x154))
+#define NETXEN_FW_VERSION_SUB (NETXEN_CAM_RAM(0x158))
+#define NETXEN_ROM_LOCK_ID (NETXEN_CAM_RAM(0x100))
+#define NETXEN_PHY_LOCK_ID (NETXEN_CAM_RAM(0x120))
+#define NETXEN_CRB_WIN_LOCK_ID (NETXEN_CAM_RAM(0x124))
+
+#define NIC_CRB_BASE (NETXEN_CAM_RAM(0x200))
+#define NIC_CRB_BASE_2 (NETXEN_CAM_RAM(0x700))
+#define NETXEN_NIC_REG(X) (NIC_CRB_BASE+(X))
+#define NETXEN_NIC_REG_2(X) (NIC_CRB_BASE_2+(X))
+
+#define NX_CDRP_CRB_OFFSET (NETXEN_NIC_REG(0x18))
+#define NX_ARG1_CRB_OFFSET (NETXEN_NIC_REG(0x1c))
+#define NX_ARG2_CRB_OFFSET (NETXEN_NIC_REG(0x20))
+#define NX_ARG3_CRB_OFFSET (NETXEN_NIC_REG(0x24))
+#define NX_SIGN_CRB_OFFSET (NETXEN_NIC_REG(0x28))
+
+#define CRB_HOST_DUMMY_BUF_ADDR_HI (NETXEN_NIC_REG(0x3c))
+#define CRB_HOST_DUMMY_BUF_ADDR_LO (NETXEN_NIC_REG(0x40))
+
+#define CRB_CMDPEG_STATE (NETXEN_NIC_REG(0x50))
+#define CRB_RCVPEG_STATE (NETXEN_NIC_REG(0x13c))
+
+#define CRB_XG_STATE (NETXEN_NIC_REG(0x94))
+#define CRB_XG_STATE_P3 (NETXEN_NIC_REG(0x98))
+#define CRB_PF_LINK_SPEED_1 (NETXEN_NIC_REG(0xe8))
+#define CRB_PF_LINK_SPEED_2 (NETXEN_NIC_REG(0xec))
+
+#define CRB_MPORT_MODE (NETXEN_NIC_REG(0xc4))
+#define CRB_DMA_SHIFT (NETXEN_NIC_REG(0xcc))
+#define CRB_INT_VECTOR (NETXEN_NIC_REG(0xd4))
+
+#define CRB_CMD_PRODUCER_OFFSET (NETXEN_NIC_REG(0x08))
+#define CRB_CMD_CONSUMER_OFFSET (NETXEN_NIC_REG(0x0c))
+#define CRB_CMD_PRODUCER_OFFSET_1 (NETXEN_NIC_REG(0x1ac))
+#define CRB_CMD_CONSUMER_OFFSET_1 (NETXEN_NIC_REG(0x1b0))
+#define CRB_CMD_PRODUCER_OFFSET_2 (NETXEN_NIC_REG(0x1b8))
+#define CRB_CMD_CONSUMER_OFFSET_2 (NETXEN_NIC_REG(0x1bc))
+#define CRB_CMD_PRODUCER_OFFSET_3 (NETXEN_NIC_REG(0x1d0))
+#define CRB_CMD_CONSUMER_OFFSET_3 (NETXEN_NIC_REG(0x1d4))
+#define CRB_TEMP_STATE (NETXEN_NIC_REG(0x1b4))
+
+#define CRB_V2P_0 (NETXEN_NIC_REG(0x290))
+#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
+#define CRB_DRIVER_VERSION (NETXEN_NIC_REG(0x2a0))
+
+#define CRB_SW_INT_MASK_0 (NETXEN_NIC_REG(0x1d8))
+#define CRB_SW_INT_MASK_1 (NETXEN_NIC_REG(0x1e0))
+#define CRB_SW_INT_MASK_2 (NETXEN_NIC_REG(0x1e4))
+#define CRB_SW_INT_MASK_3 (NETXEN_NIC_REG(0x1e8))
+
+#define CRB_FW_CAPABILITIES_1 (NETXEN_CAM_RAM(0x128))
+#define CRB_MAC_BLOCK_START (NETXEN_CAM_RAM(0x1c0))
+
+/*
+ * capabilities register, can be used to selectively enable/disable features
+ * for backward compatibility
+ */
+#define CRB_NIC_CAPABILITIES_HOST NETXEN_NIC_REG(0x1a8)
+#define CRB_NIC_MSI_MODE_HOST NETXEN_NIC_REG(0x270)
+
+#define INTR_SCHEME_PERPORT 0x1
+#define MSI_MODE_MULTIFUNC 0x1
+
+/* used for ethtool tests */
+#define CRB_SCRATCHPAD_TEST NETXEN_NIC_REG(0x280)
+
+/*
+ * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
+ * which can be read by the Phantom host to get producer/consumer indexes from
+ * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
+ * registers will be used for the addresses of the ring's shared memory
+ * on the Phantom.
+ */
+
+#define nx_get_temp_val(x) ((x) >> 16)
+#define nx_get_temp_state(x) ((x) & 0xffff)
+#define nx_encode_temp(val, state) (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+ NX_TEMP_NORMAL = 0x1, /* Normal operating range */
+ NX_TEMP_WARN, /* Sound alert, temperature getting high */
+ NX_TEMP_PANIC /* Fatal error, hardware has shut down. */
+};
+
+/* Lock IDs for PHY lock */
+#define PHY_LOCK_DRIVER 0x44524956
+
+/* Used for PS PCI Memory access */
+#define PCIX_PS_OP_ADDR_LO (0x10000)
+/* via CRB (PS side only) */
+#define PCIX_PS_OP_ADDR_HI (0x10004)
+
+#define PCIX_INT_VECTOR (0x10100)
+#define PCIX_INT_MASK (0x10104)
+
+#define PCIX_CRB_WINDOW (0x10210)
+#define PCIX_CRB_WINDOW_F0 (0x10210)
+#define PCIX_CRB_WINDOW_F1 (0x10230)
+#define PCIX_CRB_WINDOW_F2 (0x10250)
+#define PCIX_CRB_WINDOW_F3 (0x10270)
+#define PCIX_CRB_WINDOW_F4 (0x102ac)
+#define PCIX_CRB_WINDOW_F5 (0x102bc)
+#define PCIX_CRB_WINDOW_F6 (0x102cc)
+#define PCIX_CRB_WINDOW_F7 (0x102dc)
+#define PCIE_CRB_WINDOW_REG(func) (((func) < 4) ? \
+ (PCIX_CRB_WINDOW_F0 + (0x20 * (func))) :\
+ (PCIX_CRB_WINDOW_F4 + (0x10 * ((func)-4))))
+
+#define PCIX_MN_WINDOW (0x10200)
+#define PCIX_MN_WINDOW_F0 (0x10200)
+#define PCIX_MN_WINDOW_F1 (0x10220)
+#define PCIX_MN_WINDOW_F2 (0x10240)
+#define PCIX_MN_WINDOW_F3 (0x10260)
+#define PCIX_MN_WINDOW_F4 (0x102a0)
+#define PCIX_MN_WINDOW_F5 (0x102b0)
+#define PCIX_MN_WINDOW_F6 (0x102c0)
+#define PCIX_MN_WINDOW_F7 (0x102d0)
+#define PCIE_MN_WINDOW_REG(func) (((func) < 4) ? \
+ (PCIX_MN_WINDOW_F0 + (0x20 * (func))) :\
+ (PCIX_MN_WINDOW_F4 + (0x10 * ((func)-4))))
+
+#define PCIX_SN_WINDOW (0x10208)
+#define PCIX_SN_WINDOW_F0 (0x10208)
+#define PCIX_SN_WINDOW_F1 (0x10228)
+#define PCIX_SN_WINDOW_F2 (0x10248)
+#define PCIX_SN_WINDOW_F3 (0x10268)
+#define PCIX_SN_WINDOW_F4 (0x102a8)
+#define PCIX_SN_WINDOW_F5 (0x102b8)
+#define PCIX_SN_WINDOW_F6 (0x102c8)
+#define PCIX_SN_WINDOW_F7 (0x102d8)
+#define PCIE_SN_WINDOW_REG(func) (((func) < 4) ? \
+ (PCIX_SN_WINDOW_F0 + (0x20 * (func))) :\
+ (PCIX_SN_WINDOW_F4 + (0x10 * ((func)-4))))
+
+#define PCIX_OCM_WINDOW (0x10800)
+#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x20 * (func))
+
+#define PCIX_TARGET_STATUS (0x10118)
+#define PCIX_TARGET_STATUS_F1 (0x10160)
+#define PCIX_TARGET_STATUS_F2 (0x10164)
+#define PCIX_TARGET_STATUS_F3 (0x10168)
+#define PCIX_TARGET_STATUS_F4 (0x10360)
+#define PCIX_TARGET_STATUS_F5 (0x10364)
+#define PCIX_TARGET_STATUS_F6 (0x10368)
+#define PCIX_TARGET_STATUS_F7 (0x1036c)
+
+#define PCIX_TARGET_MASK (0x10128)
+#define PCIX_TARGET_MASK_F1 (0x10170)
+#define PCIX_TARGET_MASK_F2 (0x10174)
+#define PCIX_TARGET_MASK_F3 (0x10178)
+#define PCIX_TARGET_MASK_F4 (0x10370)
+#define PCIX_TARGET_MASK_F5 (0x10374)
+#define PCIX_TARGET_MASK_F6 (0x10378)
+#define PCIX_TARGET_MASK_F7 (0x1037c)
+
+#define PCIX_MSI_F0 (0x13000)
+#define PCIX_MSI_F1 (0x13004)
+#define PCIX_MSI_F2 (0x13008)
+#define PCIX_MSI_F3 (0x1300c)
+#define PCIX_MSI_F4 (0x13010)
+#define PCIX_MSI_F5 (0x13014)
+#define PCIX_MSI_F6 (0x13018)
+#define PCIX_MSI_F7 (0x1301c)
+#define PCIX_MSI_F(i) (0x13000+((i)*4))
+
+#define PCIX_PS_MEM_SPACE (0x90000)
+
+#define NETXEN_PCIX_PH_REG(reg) (NETXEN_CRB_PCIE + (reg))
+#define NETXEN_PCIX_PS_REG(reg) (NETXEN_CRB_PCIX_MD + (reg))
+
+#define NETXEN_PCIE_REG(reg) (NETXEN_CRB_PCIE + (reg))
+
+#define PCIE_MAX_DMA_XFER_SIZE (0x1404c)
+
+#define PCIE_DCR 0x00d8
+
+#define PCIE_SEM0_LOCK (0x1c000)
+#define PCIE_SEM0_UNLOCK (0x1c004)
+#define PCIE_SEM1_LOCK (0x1c008)
+#define PCIE_SEM1_UNLOCK (0x1c00c)
+#define PCIE_SEM2_LOCK (0x1c010) /* Flash lock */
+#define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */
+#define PCIE_SEM3_LOCK (0x1c018) /* Phy lock */
+#define PCIE_SEM3_UNLOCK (0x1c01c) /* Phy unlock */
+#define PCIE_SEM4_LOCK (0x1c020)
+#define PCIE_SEM4_UNLOCK (0x1c024)
+#define PCIE_SEM5_LOCK (0x1c028) /* API lock */
+#define PCIE_SEM5_UNLOCK (0x1c02c) /* API unlock */
+#define PCIE_SEM6_LOCK (0x1c030) /* sw lock */
+#define PCIE_SEM6_UNLOCK (0x1c034) /* sw unlock */
+#define PCIE_SEM7_LOCK (0x1c038) /* crb win lock */
+#define PCIE_SEM7_UNLOCK (0x1c03c) /* crbwin unlock*/
+#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N))
+#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N))
+
+#define PCIE_SETUP_FUNCTION (0x12040)
+#define PCIE_SETUP_FUNCTION2 (0x12048)
+#define PCIE_MISCCFG_RC (0x1206c)
+#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
+#define PCIE_CHICKEN3 (0x120c8)
+
+#define ISR_INT_STATE_REG (NETXEN_PCIX_PS_REG(PCIE_MISCCFG_RC))
+#define PCIE_MAX_MASTER_SPLIT (0x14048)
+
+#define NETXEN_PORT_MODE_NONE 0
+#define NETXEN_PORT_MODE_XG 1
+#define NETXEN_PORT_MODE_GB 2
+#define NETXEN_PORT_MODE_802_3_AP 3
+#define NETXEN_PORT_MODE_AUTO_NEG 4
+#define NETXEN_PORT_MODE_AUTO_NEG_1G 5
+#define NETXEN_PORT_MODE_AUTO_NEG_XG 6
+#define NETXEN_PORT_MODE_ADDR (NETXEN_CAM_RAM(0x24))
+#define NETXEN_WOL_PORT_MODE (NETXEN_CAM_RAM(0x198))
+
+#define NETXEN_WOL_CONFIG_NV (NETXEN_CAM_RAM(0x184))
+#define NETXEN_WOL_CONFIG (NETXEN_CAM_RAM(0x188))
+
+#define NX_PEG_TUNE_MN_PRESENT 0x1
+#define NX_PEG_TUNE_CAPABILITY (NETXEN_CAM_RAM(0x02c))
+
+#define NETXEN_DMA_WATCHDOG_CTRL (NETXEN_CAM_RAM(0x14))
+#define NETXEN_PEG_ALIVE_COUNTER (NETXEN_CAM_RAM(0xb0))
+#define NETXEN_PEG_HALT_STATUS1 (NETXEN_CAM_RAM(0xa8))
+#define NETXEN_PEG_HALT_STATUS2 (NETXEN_CAM_RAM(0xac))
+#define NX_CRB_DEV_REF_COUNT (NETXEN_CAM_RAM(0x138))
+#define NX_CRB_DEV_STATE (NETXEN_CAM_RAM(0x140))
+
+/* Device State */
+#define NX_DEV_COLD 1
+#define NX_DEV_INITALIZING 2
+#define NX_DEV_READY 3
+#define NX_DEV_NEED_RESET 4
+#define NX_DEV_NEED_QUISCENT 5
+#define NX_DEV_NEED_AER 6
+#define NX_DEV_FAILED 7
+
+#define NX_RCODE_DRIVER_INFO 0x20000000
+#define NX_RCODE_DRIVER_CAN_RELOAD 0x40000000
+#define NX_RCODE_FATAL_ERROR 0x80000000
+#define NX_FWERROR_PEGNUM(code) ((code) & 0xff)
+#define NX_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
+
+#define FW_POLL_DELAY (2 * HZ)
+#define FW_FAIL_THRESH 3
+#define FW_POLL_THRESH 10
+
+#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
+
+/*
+ * PCI Interrupt Vector Values.
+ */
+#define PCIX_INT_VECTOR_BIT_F0 0x0080
+#define PCIX_INT_VECTOR_BIT_F1 0x0100
+#define PCIX_INT_VECTOR_BIT_F2 0x0200
+#define PCIX_INT_VECTOR_BIT_F3 0x0400
+#define PCIX_INT_VECTOR_BIT_F4 0x0800
+#define PCIX_INT_VECTOR_BIT_F5 0x1000
+#define PCIX_INT_VECTOR_BIT_F6 0x2000
+#define PCIX_INT_VECTOR_BIT_F7 0x4000
+
+struct netxen_legacy_intr_set {
+ uint32_t int_vec_bit;
+ uint32_t tgt_status_reg;
+ uint32_t tgt_mask_reg;
+ uint32_t pci_int_reg;
+};
+
+#define NX_LEGACY_INTR_CONFIG \
+{ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
+}
+
+#endif /* __NETXEN_NIC_HDR_H_ */
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
new file mode 100644
index 000000000000..3f89e57cae50
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -0,0 +1,1976 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/slab.h>
+#include "netxen_nic.h"
+#include "netxen_nic_hw.h"
+
+#include <net/ip.h>
+
+#define MASK(n) ((1ULL<<(n))-1)
+#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
+#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
+#define MS_WIN(addr) (addr & 0x0ffc0000)
+
+#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
+
+#define CRB_BLK(off) ((off >> 20) & 0x3f)
+#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
+#define CRB_WINDOW_2M (0x130060)
+#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
+#define CRB_INDIRECT_2M (0x1e0000UL)
+
+static void netxen_nic_io_write_128M(struct netxen_adapter *adapter,
+ void __iomem *addr, u32 data);
+static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter,
+ void __iomem *addr);
+
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel(((u32) (val)), (addr));
+ writel(((u32) (val >> 32)), (addr + 4));
+}
+#endif
+
+#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
+ ((adapter)->ahw.pci_base0 + (off))
+#define PCI_OFFSET_SECOND_RANGE(adapter, off) \
+ ((adapter)->ahw.pci_base1 + (off) - SECOND_PAGE_GROUP_START)
+#define PCI_OFFSET_THIRD_RANGE(adapter, off) \
+ ((adapter)->ahw.pci_base2 + (off) - THIRD_PAGE_GROUP_START)
+
+static void __iomem *pci_base_offset(struct netxen_adapter *adapter,
+ unsigned long off)
+{
+ if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
+ return PCI_OFFSET_FIRST_RANGE(adapter, off);
+
+ if (ADDR_IN_RANGE(off, SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_END))
+ return PCI_OFFSET_SECOND_RANGE(adapter, off);
+
+ if (ADDR_IN_RANGE(off, THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_END))
+ return PCI_OFFSET_THIRD_RANGE(adapter, off);
+
+ return NULL;
+}
+
+static crb_128M_2M_block_map_t
+crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
+ {{{0, 0, 0, 0} } }, /* 0: PCI */
+ {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
+ {1, 0x0110000, 0x0120000, 0x130000},
+ {1, 0x0120000, 0x0122000, 0x124000},
+ {1, 0x0130000, 0x0132000, 0x126000},
+ {1, 0x0140000, 0x0142000, 0x128000},
+ {1, 0x0150000, 0x0152000, 0x12a000},
+ {1, 0x0160000, 0x0170000, 0x110000},
+ {1, 0x0170000, 0x0172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x01e0000, 0x01e0800, 0x122000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
+ {{{0, 0, 0, 0} } }, /* 3: */
+ {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
+ {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
+ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
+ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
+ {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x08f0000, 0x08f2000, 0x172000} } },
+ {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x09f0000, 0x09f2000, 0x176000} } },
+ {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0af0000, 0x0af2000, 0x17a000} } },
+ {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
+ {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
+ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
+ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
+ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
+ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
+ {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
+ {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
+ {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
+ {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
+ {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
+ {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
+ {{{0, 0, 0, 0} } }, /* 23: */
+ {{{0, 0, 0, 0} } }, /* 24: */
+ {{{0, 0, 0, 0} } }, /* 25: */
+ {{{0, 0, 0, 0} } }, /* 26: */
+ {{{0, 0, 0, 0} } }, /* 27: */
+ {{{0, 0, 0, 0} } }, /* 28: */
+ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
+ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
+ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
+ {{{0} } }, /* 32: PCI */
+ {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
+ {1, 0x2110000, 0x2120000, 0x130000},
+ {1, 0x2120000, 0x2122000, 0x124000},
+ {1, 0x2130000, 0x2132000, 0x126000},
+ {1, 0x2140000, 0x2142000, 0x128000},
+ {1, 0x2150000, 0x2152000, 0x12a000},
+ {1, 0x2160000, 0x2170000, 0x110000},
+ {1, 0x2170000, 0x2172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
+ {{{0} } }, /* 35: */
+ {{{0} } }, /* 36: */
+ {{{0} } }, /* 37: */
+ {{{0} } }, /* 38: */
+ {{{0} } }, /* 39: */
+ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
+ {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
+ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
+ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
+ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
+ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
+ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
+ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
+ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
+ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
+ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
+ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
+ {{{0} } }, /* 52: */
+ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
+ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
+ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
+ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
+ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
+ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
+ {{{0} } }, /* 59: I2C0 */
+ {{{0} } }, /* 60: I2C1 */
+ {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
+ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
+ {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
+};
+
+/*
+ * top 12 bits of crb internal address (hub, agent)
+ */
+static unsigned crb_hub_agt[64] =
+{
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PS,
+ NETXEN_HW_CRB_HUB_AGT_ADR_MN,
+ NETXEN_HW_CRB_HUB_AGT_ADR_MS,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SRE,
+ NETXEN_HW_CRB_HUB_AGT_ADR_NIU,
+ NETXEN_HW_CRB_HUB_AGT_ADR_QMN,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SQN0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SQN1,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SQN2,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SQN3,
+ NETXEN_HW_CRB_HUB_AGT_ADR_I2Q,
+ NETXEN_HW_CRB_HUB_AGT_ADR_TIMR,
+ NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGN4,
+ NETXEN_HW_CRB_HUB_AGT_ADR_XDMA,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGN0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGN1,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGN2,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGN3,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGND,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGNI,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGS0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGS1,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGS2,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGS3,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGSI,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SN,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_EG,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PS,
+ NETXEN_HW_CRB_HUB_AGT_ADR_CAM,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_TIMR,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7,
+ NETXEN_HW_CRB_HUB_AGT_ADR_XDMA,
+ NETXEN_HW_CRB_HUB_AGT_ADR_I2Q,
+ NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9,
+ NETXEN_HW_CRB_HUB_AGT_ADR_OCM0,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SMB,
+ NETXEN_HW_CRB_HUB_AGT_ADR_I2C0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_I2C1,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGNC,
+ 0,
+};
+
+/* PCI Windowing for DDR regions. */
+
+#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */
+
+#define NETXEN_PCIE_SEM_TIMEOUT 10000
+
+static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu);
+
+int
+netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg)
+{
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_LOCK(sem)));
+ if (done == 1)
+ break;
+ if (++timeout >= NETXEN_PCIE_SEM_TIMEOUT)
+ return -EIO;
+ msleep(1);
+ }
+
+ if (id_reg)
+ NXWR32(adapter, id_reg, adapter->portnum);
+
+ return 0;
+}
+
+void
+netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem)
+{
+ NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
+}
+
+static int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port)
+{
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1+(0x10000*port), 0x1447);
+ NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0+(0x10000*port), 0x5);
+ }
+
+ return 0;
+}
+
+/* Disable an XG interface */
+static int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
+{
+ __u32 mac_cfg;
+ u32 port = adapter->physical_port;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return 0;
+
+ if (port > NETXEN_NIU_MAX_XG_PORTS)
+ return -EINVAL;
+
+ mac_cfg = 0;
+ if (NXWR32(adapter,
+ NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg))
+ return -EIO;
+ return 0;
+}
+
+#define NETXEN_UNICAST_ADDR(port, index) \
+ (NETXEN_UNICAST_ADDR_BASE+(port*32)+(index*8))
+#define NETXEN_MCAST_ADDR(port, index) \
+ (NETXEN_MULTICAST_ADDR_BASE+(port*0x80)+(index*8))
+#define MAC_HI(addr) \
+ ((addr[2] << 16) | (addr[1] << 8) | (addr[0]))
+#define MAC_LO(addr) \
+ ((addr[5] << 16) | (addr[4] << 8) | (addr[3]))
+
+static int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
+{
+ u32 mac_cfg;
+ u32 cnt = 0;
+ __u32 reg = 0x0200;
+ u32 port = adapter->physical_port;
+ u16 board_type = adapter->ahw.board_type;
+
+ if (port > NETXEN_NIU_MAX_XG_PORTS)
+ return -EINVAL;
+
+ mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port));
+ mac_cfg &= ~0x4;
+ NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg);
+
+ if ((board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) ||
+ (board_type == NETXEN_BRDTYPE_P2_SB31_10G_HMEZ))
+ reg = (0x20 << port);
+
+ NXWR32(adapter, NETXEN_NIU_FRAME_COUNT_SELECT, reg);
+
+ mdelay(10);
+
+ while (NXRD32(adapter, NETXEN_NIU_FRAME_COUNT) && ++cnt < 20)
+ mdelay(10);
+
+ if (cnt < 20) {
+
+ reg = NXRD32(adapter,
+ NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port));
+
+ if (mode == NETXEN_NIU_PROMISC_MODE)
+ reg = (reg | 0x2000UL);
+ else
+ reg = (reg & ~0x2000UL);
+
+ if (mode == NETXEN_NIU_ALLMULTI_MODE)
+ reg = (reg | 0x1000UL);
+ else
+ reg = (reg & ~0x1000UL);
+
+ NXWR32(adapter,
+ NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg);
+ }
+
+ mac_cfg |= 0x4;
+ NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg);
+
+ return 0;
+}
+
+static int netxen_p2_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr)
+{
+ u32 mac_hi, mac_lo;
+ u32 reg_hi, reg_lo;
+
+ u8 phy = adapter->physical_port;
+
+ if (phy >= NETXEN_NIU_MAX_XG_PORTS)
+ return -EINVAL;
+
+ mac_lo = ((u32)addr[0] << 16) | ((u32)addr[1] << 24);
+ mac_hi = addr[2] | ((u32)addr[3] << 8) |
+ ((u32)addr[4] << 16) | ((u32)addr[5] << 24);
+
+ reg_lo = NETXEN_NIU_XGE_STATION_ADDR_0_1 + (0x10000 * phy);
+ reg_hi = NETXEN_NIU_XGE_STATION_ADDR_0_HI + (0x10000 * phy);
+
+ /* write twice to flush */
+ if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi))
+ return -EIO;
+ if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi))
+ return -EIO;
+
+ return 0;
+}
+
+static int
+netxen_nic_enable_mcast_filter(struct netxen_adapter *adapter)
+{
+ u32 val = 0;
+ u16 port = adapter->physical_port;
+ u8 *addr = adapter->mac_addr;
+
+ if (adapter->mc_enabled)
+ return 0;
+
+ val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG);
+ val |= (1UL << (28+port));
+ NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
+
+ /* add broadcast addr to filter */
+ val = 0xffffff;
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val);
+
+ /* add station addr to filter */
+ val = MAC_HI(addr);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), val);
+ val = MAC_LO(addr);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, val);
+
+ adapter->mc_enabled = 1;
+ return 0;
+}
+
+static int
+netxen_nic_disable_mcast_filter(struct netxen_adapter *adapter)
+{
+ u32 val = 0;
+ u16 port = adapter->physical_port;
+ u8 *addr = adapter->mac_addr;
+
+ if (!adapter->mc_enabled)
+ return 0;
+
+ val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG);
+ val &= ~(1UL << (28+port));
+ NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
+
+ val = MAC_HI(addr);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val);
+ val = MAC_LO(addr);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val);
+
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), 0);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, 0);
+
+ adapter->mc_enabled = 0;
+ return 0;
+}
+
+static int
+netxen_nic_set_mcast_addr(struct netxen_adapter *adapter,
+ int index, u8 *addr)
+{
+ u32 hi = 0, lo = 0;
+ u16 port = adapter->physical_port;
+
+ lo = MAC_LO(addr);
+ hi = MAC_HI(addr);
+
+ NXWR32(adapter, NETXEN_MCAST_ADDR(port, index), hi);
+ NXWR32(adapter, NETXEN_MCAST_ADDR(port, index)+4, lo);
+
+ return 0;
+}
+
+static void netxen_p2_nic_set_multi(struct net_device *netdev)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct netdev_hw_addr *ha;
+ u8 null_addr[6];
+ int i;
+
+ memset(null_addr, 0, 6);
+
+ if (netdev->flags & IFF_PROMISC) {
+
+ adapter->set_promisc(adapter,
+ NETXEN_NIU_PROMISC_MODE);
+
+ /* Full promiscuous mode */
+ netxen_nic_disable_mcast_filter(adapter);
+
+ return;
+ }
+
+ if (netdev_mc_empty(netdev)) {
+ adapter->set_promisc(adapter,
+ NETXEN_NIU_NON_PROMISC_MODE);
+ netxen_nic_disable_mcast_filter(adapter);
+ return;
+ }
+
+ adapter->set_promisc(adapter, NETXEN_NIU_ALLMULTI_MODE);
+ if (netdev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(netdev) > adapter->max_mc_count) {
+ netxen_nic_disable_mcast_filter(adapter);
+ return;
+ }
+
+ netxen_nic_enable_mcast_filter(adapter);
+
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev)
+ netxen_nic_set_mcast_addr(adapter, i++, ha->addr);
+
+ /* Clear out remaining addresses */
+ while (i < adapter->max_mc_count)
+ netxen_nic_set_mcast_addr(adapter, i++, null_addr);
+}
+
+static int
+netxen_send_cmd_descs(struct netxen_adapter *adapter,
+ struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
+{
+ u32 i, producer, consumer;
+ struct netxen_cmd_buffer *pbuf;
+ struct cmd_desc_type0 *cmd_desc;
+ struct nx_host_tx_ring *tx_ring;
+
+ i = 0;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return -EIO;
+
+ tx_ring = adapter->tx_ring;
+ __netif_tx_lock_bh(tx_ring->txq);
+
+ producer = tx_ring->producer;
+ consumer = tx_ring->sw_consumer;
+
+ if (nr_desc >= netxen_tx_avail(tx_ring)) {
+ netif_tx_stop_queue(tx_ring->txq);
+ smp_mb();
+ if (netxen_tx_avail(tx_ring) > nr_desc) {
+ if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
+ netif_tx_wake_queue(tx_ring->txq);
+ } else {
+ __netif_tx_unlock_bh(tx_ring->txq);
+ return -EBUSY;
+ }
+ }
+
+ do {
+ cmd_desc = &cmd_desc_arr[i];
+
+ pbuf = &tx_ring->cmd_buf_arr[producer];
+ pbuf->skb = NULL;
+ pbuf->frag_count = 0;
+
+ memcpy(&tx_ring->desc_head[producer],
+ &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ i++;
+
+ } while (i != nr_desc);
+
+ tx_ring->producer = producer;
+
+ netxen_nic_update_cmd_producer(adapter, tx_ring);
+
+ __netif_tx_unlock_bh(tx_ring->txq);
+
+ return 0;
+}
+
+static int
+nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op)
+{
+ nx_nic_req_t req;
+ nx_mac_req_t *mac_req;
+ u64 word;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+ req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23);
+
+ word = NX_MAC_EVENT | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ mac_req = (nx_mac_req_t *)&req.words[0];
+ mac_req->op = op;
+ memcpy(mac_req->mac_addr, addr, 6);
+
+ return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+}
+
+static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
+ const u8 *addr, struct list_head *del_list)
+{
+ struct list_head *head;
+ nx_mac_list_t *cur;
+
+ /* look up if already exists */
+ list_for_each(head, del_list) {
+ cur = list_entry(head, nx_mac_list_t, list);
+
+ if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+ list_move_tail(head, &adapter->mac_list);
+ return 0;
+ }
+ }
+
+ cur = kzalloc(sizeof(nx_mac_list_t), GFP_ATOMIC);
+ if (cur == NULL) {
+ printk(KERN_ERR "%s: failed to add mac address filter\n",
+ adapter->netdev->name);
+ return -ENOMEM;
+ }
+ memcpy(cur->mac_addr, addr, ETH_ALEN);
+ list_add_tail(&cur->list, &adapter->mac_list);
+ return nx_p3_sre_macaddr_change(adapter,
+ cur->mac_addr, NETXEN_MAC_ADD);
+}
+
+static void netxen_p3_nic_set_multi(struct net_device *netdev)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct netdev_hw_addr *ha;
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+ u32 mode = VPORT_MISS_MODE_DROP;
+ LIST_HEAD(del_list);
+ struct list_head *head;
+ nx_mac_list_t *cur;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return;
+
+ list_splice_tail_init(&adapter->mac_list, &del_list);
+
+ nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list);
+ nx_p3_nic_add_mac(adapter, bcast_addr, &del_list);
+
+ if (netdev->flags & IFF_PROMISC) {
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ goto send_fw_cmd;
+ }
+
+ if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(netdev) > adapter->max_mc_count)) {
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ goto send_fw_cmd;
+ }
+
+ if (!netdev_mc_empty(netdev)) {
+ netdev_for_each_mc_addr(ha, netdev)
+ nx_p3_nic_add_mac(adapter, ha->addr, &del_list);
+ }
+
+send_fw_cmd:
+ adapter->set_promisc(adapter, mode);
+ head = &del_list;
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, nx_mac_list_t, list);
+
+ nx_p3_sre_macaddr_change(adapter,
+ cur->mac_addr, NETXEN_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+static int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
+{
+ nx_nic_req_t req;
+ u64 word;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(mode);
+
+ return netxen_send_cmd_descs(adapter,
+ (struct cmd_desc_type0 *)&req, 1);
+}
+
+void netxen_p3_free_mac_list(struct netxen_adapter *adapter)
+{
+ nx_mac_list_t *cur;
+ struct list_head *head = &adapter->mac_list;
+
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, nx_mac_list_t, list);
+ nx_p3_sre_macaddr_change(adapter,
+ cur->mac_addr, NETXEN_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+static int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr)
+{
+ /* assuming caller has already copied new addr to netdev */
+ netxen_p3_nic_set_multi(adapter->netdev);
+ return 0;
+}
+
+#define NETXEN_CONFIG_INTR_COALESCE 3
+
+/*
+ * Send the interrupt coalescing parameter set by ethtool to the card.
+ */
+int netxen_config_intr_coalesce(struct netxen_adapter *adapter)
+{
+ nx_nic_req_t req;
+ u64 word[6];
+ int rv, i;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+ memset(word, 0, sizeof(word));
+
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word[0] = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word[0]);
+
+ memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
+ for (i = 0; i < 6; i++)
+ req.words[i] = cpu_to_le64(word[i]);
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "ERROR. Could not send "
+ "interrupt coalescing parameters\n");
+ }
+
+ return rv;
+}
+
+int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable)
+{
+ nx_nic_req_t req;
+ u64 word;
+ int rv = 0;
+
+ if (!test_bit(__NX_FW_ATTACHED, &adapter->state))
+ return 0;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(enable);
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "ERROR. Could not send "
+ "configure hw lro request\n");
+ }
+
+ return rv;
+}
+
+int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable)
+{
+ nx_nic_req_t req;
+ u64 word;
+ int rv = 0;
+
+ if (!!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED) == enable)
+ return rv;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_CONFIG_BRIDGING |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(enable);
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "ERROR. Could not send "
+ "configure bridge mode request\n");
+ }
+
+ adapter->flags ^= NETXEN_NIC_BRIDGE_ENABLED;
+
+ return rv;
+}
+
+
+#define RSS_HASHTYPE_IP_TCP 0x3
+
+int netxen_config_rss(struct netxen_adapter *adapter, int enable)
+{
+ nx_nic_req_t req;
+ u64 word;
+ int i, rv;
+
+ static const u64 key[] = {
+ 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL
+ };
+
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ /*
+ * RSS request:
+ * bits 3-0: hash_method
+ * 5-4: hash_type_ipv4
+ * 7-6: hash_type_ipv6
+ * 8: enable
+ * 9: use indirection table
+ * 47-10: reserved
+ * 63-48: indirection table mask
+ */
+ word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+ ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+ ((u64)(enable & 0x1) << 8) |
+ ((0x7ULL) << 48);
+ req.words[0] = cpu_to_le64(word);
+ for (i = 0; i < ARRAY_SIZE(key); i++)
+ req.words[i+1] = cpu_to_le64(key[i]);
+
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "%s: could not configure RSS\n",
+ adapter->netdev->name);
+ }
+
+ return rv;
+}
+
+int netxen_config_ipaddr(struct netxen_adapter *adapter, u32 ip, int cmd)
+{
+ nx_nic_req_t req;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(cmd);
+ req.words[1] = cpu_to_le64(ip);
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "%s: could not notify %s IP 0x%x reuqest\n",
+ adapter->netdev->name,
+ (cmd == NX_IP_UP) ? "Add" : "Remove", ip);
+ }
+ return rv;
+}
+
+int netxen_linkevent_request(struct netxen_adapter *adapter, int enable)
+{
+ nx_nic_req_t req;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+ req.words[0] = cpu_to_le64(enable | (enable << 8));
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "%s: could not configure link notification\n",
+ adapter->netdev->name);
+ }
+
+ return rv;
+}
+
+int netxen_send_lro_cleanup(struct netxen_adapter *adapter)
+{
+ nx_nic_req_t req;
+ u64 word;
+ int rv;
+
+ if (!test_bit(__NX_FW_ATTACHED, &adapter->state))
+ return 0;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_LRO_REQUEST |
+ ((u64)adapter->portnum << 16) |
+ ((u64)NX_NIC_LRO_REQUEST_CLEANUP << 56) ;
+
+ req.req_hdr = cpu_to_le64(word);
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "%s: could not cleanup lro flows\n",
+ adapter->netdev->name);
+ }
+ return rv;
+}
+
+/*
+ * netxen_nic_change_mtu - Change the Maximum Transfer Unit
+ * @returns 0 on success, negative on failure
+ */
+
+#define MTU_FUDGE_FACTOR 100
+
+int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ int max_mtu;
+ int rc = 0;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ max_mtu = P3_MAX_MTU;
+ else
+ max_mtu = P2_MAX_MTU;
+
+ if (mtu > max_mtu) {
+ printk(KERN_ERR "%s: mtu > %d bytes unsupported\n",
+ netdev->name, max_mtu);
+ return -EINVAL;
+ }
+
+ if (adapter->set_mtu)
+ rc = adapter->set_mtu(adapter, mtu);
+
+ if (!rc)
+ netdev->mtu = mtu;
+
+ return rc;
+}
+
+static int netxen_get_flash_block(struct netxen_adapter *adapter, int base,
+ int size, __le32 * buf)
+{
+ int i, v, addr;
+ __le32 *ptr32;
+
+ addr = base;
+ ptr32 = buf;
+ for (i = 0; i < size / sizeof(u32); i++) {
+ if (netxen_rom_fast_read(adapter, addr, &v) == -1)
+ return -1;
+ *ptr32 = cpu_to_le32(v);
+ ptr32++;
+ addr += sizeof(u32);
+ }
+ if ((char *)buf + size > (char *)ptr32) {
+ __le32 local;
+ if (netxen_rom_fast_read(adapter, addr, &v) == -1)
+ return -1;
+ local = cpu_to_le32(v);
+ memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32);
+ }
+
+ return 0;
+}
+
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac)
+{
+ __le32 *pmac = (__le32 *) mac;
+ u32 offset;
+
+ offset = NX_FW_MAC_ADDR_OFFSET + (adapter->portnum * sizeof(u64));
+
+ if (netxen_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1)
+ return -1;
+
+ if (*mac == cpu_to_le64(~0ULL)) {
+
+ offset = NX_OLD_MAC_ADDR_OFFSET +
+ (adapter->portnum * sizeof(u64));
+
+ if (netxen_get_flash_block(adapter,
+ offset, sizeof(u64), pmac) == -1)
+ return -1;
+
+ if (*mac == cpu_to_le64(~0ULL))
+ return -1;
+ }
+ return 0;
+}
+
+int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac)
+{
+ uint32_t crbaddr, mac_hi, mac_lo;
+ int pci_func = adapter->ahw.pci_func;
+
+ crbaddr = CRB_MAC_BLOCK_START +
+ (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
+
+ mac_lo = NXRD32(adapter, crbaddr);
+ mac_hi = NXRD32(adapter, crbaddr+4);
+
+ if (pci_func & 1)
+ *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
+ else
+ *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
+
+ return 0;
+}
+
+/*
+ * Changes the CRB window to the specified window.
+ */
+static void
+netxen_nic_pci_set_crbwindow_128M(struct netxen_adapter *adapter,
+ u32 window)
+{
+ void __iomem *offset;
+ int count = 10;
+ u8 func = adapter->ahw.pci_func;
+
+ if (adapter->ahw.crb_win == window)
+ return;
+
+ offset = PCI_OFFSET_SECOND_RANGE(adapter,
+ NETXEN_PCIX_PH_REG(PCIE_CRB_WINDOW_REG(func)));
+
+ writel(window, offset);
+ do {
+ if (window == readl(offset))
+ break;
+
+ if (printk_ratelimit())
+ dev_warn(&adapter->pdev->dev,
+ "failed to set CRB window to %d\n",
+ (window == NETXEN_WINDOW_ONE));
+ udelay(1);
+
+ } while (--count > 0);
+
+ if (count > 0)
+ adapter->ahw.crb_win = window;
+}
+
+/*
+ * Returns < 0 if off is not valid,
+ * 1 if window access is needed. 'off' is set to offset from
+ * CRB space in 128M pci map
+ * 0 if no window access is needed. 'off' is set to 2M addr
+ * In: 'off' is offset from base in 128M pci map
+ */
+static int
+netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter,
+ ulong off, void __iomem **addr)
+{
+ crb_128M_2M_sub_block_map_t *m;
+
+
+ if ((off >= NETXEN_CRB_MAX) || (off < NETXEN_PCI_CRBSPACE))
+ return -EINVAL;
+
+ off -= NETXEN_PCI_CRBSPACE;
+
+ /*
+ * Try direct map
+ */
+ m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
+
+ if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
+ *addr = adapter->ahw.pci_base0 + m->start_2M +
+ (off - m->start_128M);
+ return 0;
+ }
+
+ /*
+ * Not in direct map, use crb window
+ */
+ *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M +
+ (off & MASK(16));
+ return 1;
+}
+
+/*
+ * In: 'off' is offset from CRB space in 128M pci map
+ * Out: 'off' is 2M pci map addr
+ * side effect: lock crb window
+ */
+static void
+netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong off)
+{
+ u32 window;
+ void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M;
+
+ off -= NETXEN_PCI_CRBSPACE;
+
+ window = CRB_HI(off);
+
+ writel(window, addr);
+ if (readl(addr) != window) {
+ if (printk_ratelimit())
+ dev_warn(&adapter->pdev->dev,
+ "failed to set CRB window to %d off 0x%lx\n",
+ window, off);
+ }
+}
+
+static void __iomem *
+netxen_nic_map_indirect_address_128M(struct netxen_adapter *adapter,
+ ulong win_off, void __iomem **mem_ptr)
+{
+ ulong off = win_off;
+ void __iomem *addr;
+ resource_size_t mem_base;
+
+ if (ADDR_IN_WINDOW1(win_off))
+ off = NETXEN_CRB_NORMAL(win_off);
+
+ addr = pci_base_offset(adapter, off);
+ if (addr)
+ return addr;
+
+ if (adapter->ahw.pci_len0 == 0)
+ off -= NETXEN_PCI_CRBSPACE;
+
+ mem_base = pci_resource_start(adapter->pdev, 0);
+ *mem_ptr = ioremap(mem_base + (off & PAGE_MASK), PAGE_SIZE);
+ if (*mem_ptr)
+ addr = *mem_ptr + (off & (PAGE_SIZE - 1));
+
+ return addr;
+}
+
+static int
+netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter, ulong off, u32 data)
+{
+ unsigned long flags;
+ void __iomem *addr, *mem_ptr = NULL;
+
+ addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr);
+ if (!addr)
+ return -EIO;
+
+ if (ADDR_IN_WINDOW1(off)) { /* Window 1 */
+ netxen_nic_io_write_128M(adapter, addr, data);
+ } else { /* Window 0 */
+ write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ netxen_nic_pci_set_crbwindow_128M(adapter, 0);
+ writel(data, addr);
+ netxen_nic_pci_set_crbwindow_128M(adapter,
+ NETXEN_WINDOW_ONE);
+ write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ }
+
+ if (mem_ptr)
+ iounmap(mem_ptr);
+
+ return 0;
+}
+
+static u32
+netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, ulong off)
+{
+ unsigned long flags;
+ void __iomem *addr, *mem_ptr = NULL;
+ u32 data;
+
+ addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr);
+ if (!addr)
+ return -EIO;
+
+ if (ADDR_IN_WINDOW1(off)) { /* Window 1 */
+ data = netxen_nic_io_read_128M(adapter, addr);
+ } else { /* Window 0 */
+ write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ netxen_nic_pci_set_crbwindow_128M(adapter, 0);
+ data = readl(addr);
+ netxen_nic_pci_set_crbwindow_128M(adapter,
+ NETXEN_WINDOW_ONE);
+ write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ }
+
+ if (mem_ptr)
+ iounmap(mem_ptr);
+
+ return data;
+}
+
+static int
+netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, ulong off, u32 data)
+{
+ unsigned long flags;
+ int rv;
+ void __iomem *addr = NULL;
+
+ rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr);
+
+ if (rv == 0) {
+ writel(data, addr);
+ return 0;
+ }
+
+ if (rv > 0) {
+ /* indirect access */
+ write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ crb_win_lock(adapter);
+ netxen_nic_pci_set_crbwindow_2M(adapter, off);
+ writel(data, addr);
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ return 0;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: invalid offset: 0x%016lx\n", __func__, off);
+ dump_stack();
+ return -EIO;
+}
+
+static u32
+netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, ulong off)
+{
+ unsigned long flags;
+ int rv;
+ u32 data;
+ void __iomem *addr = NULL;
+
+ rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr);
+
+ if (rv == 0)
+ return readl(addr);
+
+ if (rv > 0) {
+ /* indirect access */
+ write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ crb_win_lock(adapter);
+ netxen_nic_pci_set_crbwindow_2M(adapter, off);
+ data = readl(addr);
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ return data;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: invalid offset: 0x%016lx\n", __func__, off);
+ dump_stack();
+ return -1;
+}
+
+/* window 1 registers only */
+static void netxen_nic_io_write_128M(struct netxen_adapter *adapter,
+ void __iomem *addr, u32 data)
+{
+ read_lock(&adapter->ahw.crb_lock);
+ writel(data, addr);
+ read_unlock(&adapter->ahw.crb_lock);
+}
+
+static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter,
+ void __iomem *addr)
+{
+ u32 val;
+
+ read_lock(&adapter->ahw.crb_lock);
+ val = readl(addr);
+ read_unlock(&adapter->ahw.crb_lock);
+
+ return val;
+}
+
+static void netxen_nic_io_write_2M(struct netxen_adapter *adapter,
+ void __iomem *addr, u32 data)
+{
+ writel(data, addr);
+}
+
+static u32 netxen_nic_io_read_2M(struct netxen_adapter *adapter,
+ void __iomem *addr)
+{
+ return readl(addr);
+}
+
+void __iomem *
+netxen_get_ioaddr(struct netxen_adapter *adapter, u32 offset)
+{
+ void __iomem *addr = NULL;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ if ((offset < NETXEN_CRB_PCIX_HOST2) &&
+ (offset > NETXEN_CRB_PCIX_HOST))
+ addr = PCI_OFFSET_SECOND_RANGE(adapter, offset);
+ else
+ addr = NETXEN_CRB_NORMALIZE(adapter, offset);
+ } else {
+ WARN_ON(netxen_nic_pci_get_crb_addr_2M(adapter,
+ offset, &addr));
+ }
+
+ return addr;
+}
+
+static int
+netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter,
+ u64 addr, u32 *start)
+{
+ if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
+ *start = (addr - NETXEN_ADDR_OCM0 + NETXEN_PCI_OCM0);
+ return 0;
+ } else if (ADDR_IN_RANGE(addr,
+ NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
+ *start = (addr - NETXEN_ADDR_OCM1 + NETXEN_PCI_OCM1);
+ return 0;
+ }
+
+ return -EIO;
+}
+
+static int
+netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
+ u64 addr, u32 *start)
+{
+ u32 window;
+
+ window = OCM_WIN(addr);
+
+ writel(window, adapter->ahw.ocm_win_crb);
+ /* read back to flush */
+ readl(adapter->ahw.ocm_win_crb);
+
+ adapter->ahw.ocm_win = window;
+ *start = NETXEN_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
+ return 0;
+}
+
+static int
+netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off,
+ u64 *data, int op)
+{
+ void __iomem *addr, *mem_ptr = NULL;
+ resource_size_t mem_base;
+ int ret;
+ u32 start;
+
+ spin_lock(&adapter->ahw.mem_lock);
+
+ ret = adapter->pci_set_window(adapter, off, &start);
+ if (ret != 0)
+ goto unlock;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ addr = adapter->ahw.pci_base0 + start;
+ } else {
+ addr = pci_base_offset(adapter, start);
+ if (addr)
+ goto noremap;
+
+ mem_base = pci_resource_start(adapter->pdev, 0) +
+ (start & PAGE_MASK);
+ mem_ptr = ioremap(mem_base, PAGE_SIZE);
+ if (mem_ptr == NULL) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ addr = mem_ptr + (start & (PAGE_SIZE-1));
+ }
+noremap:
+ if (op == 0) /* read */
+ *data = readq(addr);
+ else /* write */
+ writeq(*data, addr);
+
+unlock:
+ spin_unlock(&adapter->ahw.mem_lock);
+
+ if (mem_ptr)
+ iounmap(mem_ptr);
+ return ret;
+}
+
+void
+netxen_pci_camqm_read_2M(struct netxen_adapter *adapter, u64 off, u64 *data)
+{
+ void __iomem *addr = adapter->ahw.pci_base0 +
+ NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM);
+
+ spin_lock(&adapter->ahw.mem_lock);
+ *data = readq(addr);
+ spin_unlock(&adapter->ahw.mem_lock);
+}
+
+void
+netxen_pci_camqm_write_2M(struct netxen_adapter *adapter, u64 off, u64 data)
+{
+ void __iomem *addr = adapter->ahw.pci_base0 +
+ NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM);
+
+ spin_lock(&adapter->ahw.mem_lock);
+ writeq(data, addr);
+ spin_unlock(&adapter->ahw.mem_lock);
+}
+
+#define MAX_CTL_CHECK 1000
+
+static int
+netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter,
+ u64 off, u64 data)
+{
+ int j, ret;
+ u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P2 has different SIU and MIU test agent base addr */
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
+ NETXEN_ADDR_QDR_NET_MAX_P2)) {
+ mem_crb = pci_base_offset(adapter,
+ NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE);
+ addr_hi = SIU_TEST_AGT_ADDR_HI;
+ data_lo = SIU_TEST_AGT_WRDATA_LO;
+ data_hi = SIU_TEST_AGT_WRDATA_HI;
+ off_lo = off & SIU_TEST_AGT_ADDR_MASK;
+ off_hi = SIU_TEST_AGT_UPPER_ADDR(off);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
+ mem_crb = pci_base_offset(adapter,
+ NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ addr_hi = MIU_TEST_AGT_ADDR_HI;
+ data_lo = MIU_TEST_AGT_WRDATA_LO;
+ data_hi = MIU_TEST_AGT_WRDATA_HI;
+ off_lo = off & MIU_TEST_AGT_ADDR_MASK;
+ off_hi = 0;
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) ||
+ ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
+ if (adapter->ahw.pci_len0 != 0) {
+ return netxen_nic_pci_mem_access_direct(adapter,
+ off, &data, 1);
+ }
+ }
+
+ return -EIO;
+
+correct:
+ spin_lock(&adapter->ahw.mem_lock);
+ netxen_nic_pci_set_crbwindow_128M(adapter, 0);
+
+ writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(off_hi, (mem_crb + addr_hi));
+ writel(data & 0xffffffff, (mem_crb + data_lo));
+ writel((data >> 32) & 0xffffffff, (mem_crb + data_hi));
+ writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
+ (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl((mem_crb + TEST_AGT_CTRL));
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to write through agent\n");
+ ret = -EIO;
+ } else
+ ret = 0;
+
+ netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE);
+ spin_unlock(&adapter->ahw.mem_lock);
+ return ret;
+}
+
+static int
+netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter,
+ u64 off, u64 *data)
+{
+ int j, ret;
+ u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo;
+ u64 val;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P2 has different SIU and MIU test agent base addr */
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
+ NETXEN_ADDR_QDR_NET_MAX_P2)) {
+ mem_crb = pci_base_offset(adapter,
+ NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE);
+ addr_hi = SIU_TEST_AGT_ADDR_HI;
+ data_lo = SIU_TEST_AGT_RDDATA_LO;
+ data_hi = SIU_TEST_AGT_RDDATA_HI;
+ off_lo = off & SIU_TEST_AGT_ADDR_MASK;
+ off_hi = SIU_TEST_AGT_UPPER_ADDR(off);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
+ mem_crb = pci_base_offset(adapter,
+ NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ addr_hi = MIU_TEST_AGT_ADDR_HI;
+ data_lo = MIU_TEST_AGT_RDDATA_LO;
+ data_hi = MIU_TEST_AGT_RDDATA_HI;
+ off_lo = off & MIU_TEST_AGT_ADDR_MASK;
+ off_hi = 0;
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) ||
+ ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
+ if (adapter->ahw.pci_len0 != 0) {
+ return netxen_nic_pci_mem_access_direct(adapter,
+ off, data, 0);
+ }
+ }
+
+ return -EIO;
+
+correct:
+ spin_lock(&adapter->ahw.mem_lock);
+ netxen_nic_pci_set_crbwindow_128M(adapter, 0);
+
+ writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(off_hi, (mem_crb + addr_hi));
+ writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START|TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to read through agent\n");
+ ret = -EIO;
+ } else {
+
+ temp = readl(mem_crb + data_hi);
+ val = ((u64)temp << 32);
+ val |= readl(mem_crb + data_lo);
+ *data = val;
+ ret = 0;
+ }
+
+ netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE);
+ spin_unlock(&adapter->ahw.mem_lock);
+
+ return ret;
+}
+
+static int
+netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
+ u64 off, u64 data)
+{
+ int j, ret;
+ u32 temp, off8;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P3 onward, test agent base for MIU and SIU is same */
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
+ NETXEN_ADDR_QDR_NET_MAX_P3)) {
+ mem_crb = netxen_get_ioaddr(adapter,
+ NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
+ mem_crb = netxen_get_ioaddr(adapter,
+ NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX))
+ return netxen_nic_pci_mem_access_direct(adapter, off, &data, 1);
+
+ return -EIO;
+
+correct:
+ off8 = off & 0xfffffff8;
+
+ spin_lock(&adapter->ahw.mem_lock);
+
+ writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+
+ writel(data & 0xffffffff,
+ mem_crb + MIU_TEST_AGT_WRDATA_LO);
+ writel((data >> 32) & 0xffffffff,
+ mem_crb + MIU_TEST_AGT_WRDATA_HI);
+
+ writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
+ (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to write through agent\n");
+ ret = -EIO;
+ } else
+ ret = 0;
+
+ spin_unlock(&adapter->ahw.mem_lock);
+
+ return ret;
+}
+
+static int
+netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
+ u64 off, u64 *data)
+{
+ int j, ret;
+ u32 temp, off8;
+ u64 val;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P3 onward, test agent base for MIU and SIU is same */
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
+ NETXEN_ADDR_QDR_NET_MAX_P3)) {
+ mem_crb = netxen_get_ioaddr(adapter,
+ NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
+ mem_crb = netxen_get_ioaddr(adapter,
+ NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
+ return netxen_nic_pci_mem_access_direct(adapter,
+ off, data, 0);
+ }
+
+ return -EIO;
+
+correct:
+ off8 = off & 0xfffffff8;
+
+ spin_lock(&adapter->ahw.mem_lock);
+
+ writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+ writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to read through agent\n");
+ ret = -EIO;
+ } else {
+ val = (u64)(readl(mem_crb + MIU_TEST_AGT_RDDATA_HI)) << 32;
+ val |= readl(mem_crb + MIU_TEST_AGT_RDDATA_LO);
+ *data = val;
+ ret = 0;
+ }
+
+ spin_unlock(&adapter->ahw.mem_lock);
+
+ return ret;
+}
+
+void
+netxen_setup_hwops(struct netxen_adapter *adapter)
+{
+ adapter->init_port = netxen_niu_xg_init_port;
+ adapter->stop_port = netxen_niu_disable_xg_port;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ adapter->crb_read = netxen_nic_hw_read_wx_128M,
+ adapter->crb_write = netxen_nic_hw_write_wx_128M,
+ adapter->pci_set_window = netxen_nic_pci_set_window_128M,
+ adapter->pci_mem_read = netxen_nic_pci_mem_read_128M,
+ adapter->pci_mem_write = netxen_nic_pci_mem_write_128M,
+ adapter->io_read = netxen_nic_io_read_128M,
+ adapter->io_write = netxen_nic_io_write_128M,
+
+ adapter->macaddr_set = netxen_p2_nic_set_mac_addr;
+ adapter->set_multi = netxen_p2_nic_set_multi;
+ adapter->set_mtu = netxen_nic_set_mtu_xgb;
+ adapter->set_promisc = netxen_p2_nic_set_promisc;
+
+ } else {
+ adapter->crb_read = netxen_nic_hw_read_wx_2M,
+ adapter->crb_write = netxen_nic_hw_write_wx_2M,
+ adapter->pci_set_window = netxen_nic_pci_set_window_2M,
+ adapter->pci_mem_read = netxen_nic_pci_mem_read_2M,
+ adapter->pci_mem_write = netxen_nic_pci_mem_write_2M,
+ adapter->io_read = netxen_nic_io_read_2M,
+ adapter->io_write = netxen_nic_io_write_2M,
+
+ adapter->set_mtu = nx_fw_cmd_set_mtu;
+ adapter->set_promisc = netxen_p3_nic_set_promisc;
+ adapter->macaddr_set = netxen_p3_nic_set_mac_addr;
+ adapter->set_multi = netxen_p3_nic_set_multi;
+
+ adapter->phy_read = nx_fw_cmd_query_phy;
+ adapter->phy_write = nx_fw_cmd_set_phy;
+ }
+}
+
+int netxen_nic_get_board_info(struct netxen_adapter *adapter)
+{
+ int offset, board_type, magic;
+ struct pci_dev *pdev = adapter->pdev;
+
+ offset = NX_FW_MAGIC_OFFSET;
+ if (netxen_rom_fast_read(adapter, offset, &magic))
+ return -EIO;
+
+ if (magic != NETXEN_BDINFO_MAGIC) {
+ dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
+ magic);
+ return -EIO;
+ }
+
+ offset = NX_BRDTYPE_OFFSET;
+ if (netxen_rom_fast_read(adapter, offset, &board_type))
+ return -EIO;
+
+ if (board_type == NETXEN_BRDTYPE_P3_4_GB_MM) {
+ u32 gpio = NXRD32(adapter, NETXEN_ROMUSB_GLB_PAD_GPIO_I);
+ if ((gpio & 0x8000) == 0)
+ board_type = NETXEN_BRDTYPE_P3_10G_TP;
+ }
+
+ adapter->ahw.board_type = board_type;
+
+ switch (board_type) {
+ case NETXEN_BRDTYPE_P2_SB35_4G:
+ adapter->ahw.port_type = NETXEN_NIC_GBE;
+ break;
+ case NETXEN_BRDTYPE_P2_SB31_10G:
+ case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
+ case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
+ case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+ case NETXEN_BRDTYPE_P3_HMEZ:
+ case NETXEN_BRDTYPE_P3_XG_LOM:
+ case NETXEN_BRDTYPE_P3_10G_CX4:
+ case NETXEN_BRDTYPE_P3_10G_CX4_LP:
+ case NETXEN_BRDTYPE_P3_IMEZ:
+ case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
+ case NETXEN_BRDTYPE_P3_10G_SFP_CT:
+ case NETXEN_BRDTYPE_P3_10G_SFP_QT:
+ case NETXEN_BRDTYPE_P3_10G_XFP:
+ case NETXEN_BRDTYPE_P3_10000_BASE_T:
+ adapter->ahw.port_type = NETXEN_NIC_XGBE;
+ break;
+ case NETXEN_BRDTYPE_P1_BD:
+ case NETXEN_BRDTYPE_P1_SB:
+ case NETXEN_BRDTYPE_P1_SMAX:
+ case NETXEN_BRDTYPE_P1_SOCK:
+ case NETXEN_BRDTYPE_P3_REF_QG:
+ case NETXEN_BRDTYPE_P3_4_GB:
+ case NETXEN_BRDTYPE_P3_4_GB_MM:
+ adapter->ahw.port_type = NETXEN_NIC_GBE;
+ break;
+ case NETXEN_BRDTYPE_P3_10G_TP:
+ adapter->ahw.port_type = (adapter->portnum < 2) ?
+ NETXEN_NIC_XGBE : NETXEN_NIC_GBE;
+ break;
+ default:
+ dev_err(&pdev->dev, "unknown board type %x\n", board_type);
+ adapter->ahw.port_type = NETXEN_NIC_XGBE;
+ break;
+ }
+
+ return 0;
+}
+
+/* NIU access sections */
+static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu)
+{
+ new_mtu += MTU_FUDGE_FACTOR;
+ if (adapter->physical_port == 0)
+ NXWR32(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, new_mtu);
+ else
+ NXWR32(adapter, NETXEN_NIU_XG1_MAX_FRAME_SIZE, new_mtu);
+ return 0;
+}
+
+void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
+{
+ __u32 status;
+ __u32 autoneg;
+ __u32 port_mode;
+
+ if (!netif_carrier_ok(adapter->netdev)) {
+ adapter->link_speed = 0;
+ adapter->link_duplex = -1;
+ adapter->link_autoneg = AUTONEG_ENABLE;
+ return;
+ }
+
+ if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+ port_mode = NXRD32(adapter, NETXEN_PORT_MODE_ADDR);
+ if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
+ adapter->link_speed = SPEED_1000;
+ adapter->link_duplex = DUPLEX_FULL;
+ adapter->link_autoneg = AUTONEG_DISABLE;
+ return;
+ }
+
+ if (adapter->phy_read &&
+ adapter->phy_read(adapter,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+ &status) == 0) {
+ if (netxen_get_phy_link(status)) {
+ switch (netxen_get_phy_speed(status)) {
+ case 0:
+ adapter->link_speed = SPEED_10;
+ break;
+ case 1:
+ adapter->link_speed = SPEED_100;
+ break;
+ case 2:
+ adapter->link_speed = SPEED_1000;
+ break;
+ default:
+ adapter->link_speed = 0;
+ break;
+ }
+ switch (netxen_get_phy_duplex(status)) {
+ case 0:
+ adapter->link_duplex = DUPLEX_HALF;
+ break;
+ case 1:
+ adapter->link_duplex = DUPLEX_FULL;
+ break;
+ default:
+ adapter->link_duplex = -1;
+ break;
+ }
+ if (adapter->phy_read &&
+ adapter->phy_read(adapter,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
+ &autoneg) != 0)
+ adapter->link_autoneg = autoneg;
+ } else
+ goto link_down;
+ } else {
+ link_down:
+ adapter->link_speed = 0;
+ adapter->link_duplex = -1;
+ }
+ }
+}
+
+int
+netxen_nic_wol_supported(struct netxen_adapter *adapter)
+{
+ u32 wol_cfg;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 0;
+
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV);
+ if (wol_cfg & (1UL << adapter->portnum)) {
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG);
+ if (wol_cfg & (1 << adapter->portnum))
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
new file mode 100644
index 000000000000..e2c5b6f2df03
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
@@ -0,0 +1,287 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#ifndef __NETXEN_NIC_HW_H_
+#define __NETXEN_NIC_HW_H_
+
+/* Hardware memory size of 128 meg */
+#define NETXEN_MEMADDR_MAX (128 * 1024 * 1024)
+
+struct netxen_adapter;
+
+#define NETXEN_PCI_MAPSIZE_BYTES (NETXEN_PCI_MAPSIZE << 20)
+
+void netxen_nic_set_link_parameters(struct netxen_adapter *adapter);
+
+/* Nibble or Byte mode for phy interface (GbE mode only) */
+
+#define _netxen_crb_get_bit(var, bit) ((var >> bit) & 0x1)
+
+/*
+ * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3)
+ *
+ * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable
+ * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream
+ * Bit 2 : enable_rx => 1:enable frame recv, 0:disable
+ * Bit 3 : rx_synced => R/O: recv enable synched to recv stream
+ * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable
+ * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore
+ * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal
+ * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op
+ * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op
+ * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op
+ * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op
+ * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op
+ */
+
+#define netxen_gb_tx_flowctl(config_word) \
+ ((config_word) |= 1 << 4)
+#define netxen_gb_rx_flowctl(config_word) \
+ ((config_word) |= 1 << 5)
+#define netxen_gb_tx_reset_pb(config_word) \
+ ((config_word) |= 1 << 16)
+#define netxen_gb_rx_reset_pb(config_word) \
+ ((config_word) |= 1 << 17)
+#define netxen_gb_tx_reset_mac(config_word) \
+ ((config_word) |= 1 << 18)
+#define netxen_gb_rx_reset_mac(config_word) \
+ ((config_word) |= 1 << 19)
+
+#define netxen_gb_unset_tx_flowctl(config_word) \
+ ((config_word) &= ~(1 << 4))
+#define netxen_gb_unset_rx_flowctl(config_word) \
+ ((config_word) &= ~(1 << 5))
+
+#define netxen_gb_get_tx_synced(config_word) \
+ _netxen_crb_get_bit((config_word), 1)
+#define netxen_gb_get_rx_synced(config_word) \
+ _netxen_crb_get_bit((config_word), 3)
+#define netxen_gb_get_tx_flowctl(config_word) \
+ _netxen_crb_get_bit((config_word), 4)
+#define netxen_gb_get_rx_flowctl(config_word) \
+ _netxen_crb_get_bit((config_word), 5)
+#define netxen_gb_get_soft_reset(config_word) \
+ _netxen_crb_get_bit((config_word), 31)
+
+#define netxen_gb_get_stationaddress_low(config_word) ((config_word) >> 16)
+
+#define netxen_gb_set_mii_mgmt_clockselect(config_word, val) \
+ ((config_word) |= ((val) & 0x07))
+#define netxen_gb_mii_mgmt_reset(config_word) \
+ ((config_word) |= 1 << 31)
+#define netxen_gb_mii_mgmt_unset(config_word) \
+ ((config_word) &= ~(1 << 31))
+
+/*
+ * NIU GB MII Mgmt Command Register (applies to GB0, GB1, GB2, GB3)
+ * Bit 0 : read_cycle => 1:perform single read cycle, 0:no-op
+ * Bit 1 : scan_cycle => 1:perform continuous read cycles, 0:no-op
+ */
+
+#define netxen_gb_mii_mgmt_set_read_cycle(config_word) \
+ ((config_word) |= 1 << 0)
+#define netxen_gb_mii_mgmt_reg_addr(config_word, val) \
+ ((config_word) |= ((val) & 0x1F))
+#define netxen_gb_mii_mgmt_phy_addr(config_word, val) \
+ ((config_word) |= (((val) & 0x1F) << 8))
+
+/*
+ * NIU GB MII Mgmt Indicators Register (applies to GB0, GB1, GB2, GB3)
+ * Read-only register.
+ * Bit 0 : busy => 1:performing an MII mgmt cycle, 0:idle
+ * Bit 1 : scanning => 1:scan operation in progress, 0:idle
+ * Bit 2 : notvalid => :mgmt result data not yet valid, 0:idle
+ */
+#define netxen_get_gb_mii_mgmt_busy(config_word) \
+ _netxen_crb_get_bit(config_word, 0)
+#define netxen_get_gb_mii_mgmt_scanning(config_word) \
+ _netxen_crb_get_bit(config_word, 1)
+#define netxen_get_gb_mii_mgmt_notvalid(config_word) \
+ _netxen_crb_get_bit(config_word, 2)
+/*
+ * NIU XG Pause Ctl Register
+ *
+ * Bit 0 : xg0_mask => 1:disable tx pause frames
+ * Bit 1 : xg0_request => 1:request single pause frame
+ * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
+ * Bit 3 : xg1_mask => 1:disable tx pause frames
+ * Bit 4 : xg1_request => 1:request single pause frame
+ * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
+ */
+
+#define netxen_xg_set_xg0_mask(config_word) \
+ ((config_word) |= 1 << 0)
+#define netxen_xg_set_xg1_mask(config_word) \
+ ((config_word) |= 1 << 3)
+
+#define netxen_xg_get_xg0_mask(config_word) \
+ _netxen_crb_get_bit((config_word), 0)
+#define netxen_xg_get_xg1_mask(config_word) \
+ _netxen_crb_get_bit((config_word), 3)
+
+#define netxen_xg_unset_xg0_mask(config_word) \
+ ((config_word) &= ~(1 << 0))
+#define netxen_xg_unset_xg1_mask(config_word) \
+ ((config_word) &= ~(1 << 3))
+
+/*
+ * NIU XG Pause Ctl Register
+ *
+ * Bit 0 : xg0_mask => 1:disable tx pause frames
+ * Bit 1 : xg0_request => 1:request single pause frame
+ * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
+ * Bit 3 : xg1_mask => 1:disable tx pause frames
+ * Bit 4 : xg1_request => 1:request single pause frame
+ * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
+ */
+#define netxen_gb_set_gb0_mask(config_word) \
+ ((config_word) |= 1 << 0)
+#define netxen_gb_set_gb1_mask(config_word) \
+ ((config_word) |= 1 << 2)
+#define netxen_gb_set_gb2_mask(config_word) \
+ ((config_word) |= 1 << 4)
+#define netxen_gb_set_gb3_mask(config_word) \
+ ((config_word) |= 1 << 6)
+
+#define netxen_gb_get_gb0_mask(config_word) \
+ _netxen_crb_get_bit((config_word), 0)
+#define netxen_gb_get_gb1_mask(config_word) \
+ _netxen_crb_get_bit((config_word), 2)
+#define netxen_gb_get_gb2_mask(config_word) \
+ _netxen_crb_get_bit((config_word), 4)
+#define netxen_gb_get_gb3_mask(config_word) \
+ _netxen_crb_get_bit((config_word), 6)
+
+#define netxen_gb_unset_gb0_mask(config_word) \
+ ((config_word) &= ~(1 << 0))
+#define netxen_gb_unset_gb1_mask(config_word) \
+ ((config_word) &= ~(1 << 2))
+#define netxen_gb_unset_gb2_mask(config_word) \
+ ((config_word) &= ~(1 << 4))
+#define netxen_gb_unset_gb3_mask(config_word) \
+ ((config_word) &= ~(1 << 6))
+
+
+/*
+ * PHY-Specific MII control/status registers.
+ */
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_CONTROL 0
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_STATUS 1
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_0 2
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_1 3
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG 4
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART 5
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG_MORE 6
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_NEXTPAGE_XMIT 7
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART_NEXTPAGE 8
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_CONTROL 9
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_STATUS 10
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_EXTENDED_STATUS 15
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL 16
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE 18
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS 19
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE 20
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_RECV_ERROR_COUNT 21
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_CONTROL 24
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_OVERRIDE 25
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE_YET 26
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS_MORE 27
+
+/*
+ * PHY-Specific Status Register (reg 17).
+ *
+ * Bit 0 : jabber => 1:jabber detected, 0:not
+ * Bit 1 : polarity => 1:polarity reversed, 0:normal
+ * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled
+ * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled
+ * Bit 4 : energydetect => 1:sleep, 0:active
+ * Bit 5 : downshift => 1:downshift, 0:no downshift
+ * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover)
+ * Bits 7-9 : cablelen => not valid in 10Mb/s mode
+ * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m
+ * Bit 10 : link => 1:link up, 0:link down
+ * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet
+ * Bit 12 : pagercvd => 1:page received, 0:page not received
+ * Bit 13 : duplex => 1:full duplex, 0:half duplex
+ * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd
+ */
+
+#define netxen_get_phy_speed(config_word) (((config_word) >> 14) & 0x03)
+
+#define netxen_set_phy_speed(config_word, val) \
+ ((config_word) |= ((val & 0x03) << 14))
+#define netxen_set_phy_duplex(config_word) \
+ ((config_word) |= 1 << 13)
+#define netxen_clear_phy_duplex(config_word) \
+ ((config_word) &= ~(1 << 13))
+
+#define netxen_get_phy_link(config_word) \
+ _netxen_crb_get_bit(config_word, 10)
+#define netxen_get_phy_duplex(config_word) \
+ _netxen_crb_get_bit(config_word, 13)
+
+/*
+ * NIU Mode Register.
+ * Bit 0 : enable FibreChannel
+ * Bit 1 : enable 10/100/1000 Ethernet
+ * Bit 2 : enable 10Gb Ethernet
+ */
+
+#define netxen_get_niu_enable_ge(config_word) \
+ _netxen_crb_get_bit(config_word, 1)
+
+#define NETXEN_NIU_NON_PROMISC_MODE 0
+#define NETXEN_NIU_PROMISC_MODE 1
+#define NETXEN_NIU_ALLMULTI_MODE 2
+
+/*
+ * NIU XG MAC Config Register
+ *
+ * Bit 0 : tx_enable => 1:enable frame xmit, 0:disable
+ * Bit 2 : rx_enable => 1:enable frame recv, 0:disable
+ * Bit 4 : soft_reset => 1:reset the MAC , 0:no-op
+ * Bit 27: xaui_framer_reset
+ * Bit 28: xaui_rx_reset
+ * Bit 29: xaui_tx_reset
+ * Bit 30: xg_ingress_afifo_reset
+ * Bit 31: xg_egress_afifo_reset
+ */
+
+#define netxen_xg_soft_reset(config_word) \
+ ((config_word) |= 1 << 4)
+
+typedef struct {
+ unsigned valid;
+ unsigned start_128M;
+ unsigned end_128M;
+ unsigned start_2M;
+} crb_128M_2M_sub_block_map_t;
+
+typedef struct {
+ crb_128M_2M_sub_block_map_t sub_block[16];
+} crb_128M_2M_block_map_t;
+
+#endif /* __NETXEN_NIC_HW_H_ */
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
new file mode 100644
index 000000000000..a8259cc19a63
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -0,0 +1,1949 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include "netxen_nic.h"
+#include "netxen_nic_hw.h"
+
+struct crb_addr_pair {
+ u32 addr;
+ u32 data;
+};
+
+#define NETXEN_MAX_CRB_XFORM 60
+static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
+#define NETXEN_ADDR_ERROR (0xffffffff)
+
+#define crb_addr_transform(name) \
+ crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \
+ NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20
+
+#define NETXEN_NIC_XDMA_RESET 0x8000ff
+
+static void
+netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
+ struct nx_host_rds_ring *rds_ring);
+static int netxen_p3_has_mn(struct netxen_adapter *adapter);
+
+static void crb_addr_transform_setup(void)
+{
+ crb_addr_transform(XDMA);
+ crb_addr_transform(TIMR);
+ crb_addr_transform(SRE);
+ crb_addr_transform(SQN3);
+ crb_addr_transform(SQN2);
+ crb_addr_transform(SQN1);
+ crb_addr_transform(SQN0);
+ crb_addr_transform(SQS3);
+ crb_addr_transform(SQS2);
+ crb_addr_transform(SQS1);
+ crb_addr_transform(SQS0);
+ crb_addr_transform(RPMX7);
+ crb_addr_transform(RPMX6);
+ crb_addr_transform(RPMX5);
+ crb_addr_transform(RPMX4);
+ crb_addr_transform(RPMX3);
+ crb_addr_transform(RPMX2);
+ crb_addr_transform(RPMX1);
+ crb_addr_transform(RPMX0);
+ crb_addr_transform(ROMUSB);
+ crb_addr_transform(SN);
+ crb_addr_transform(QMN);
+ crb_addr_transform(QMS);
+ crb_addr_transform(PGNI);
+ crb_addr_transform(PGND);
+ crb_addr_transform(PGN3);
+ crb_addr_transform(PGN2);
+ crb_addr_transform(PGN1);
+ crb_addr_transform(PGN0);
+ crb_addr_transform(PGSI);
+ crb_addr_transform(PGSD);
+ crb_addr_transform(PGS3);
+ crb_addr_transform(PGS2);
+ crb_addr_transform(PGS1);
+ crb_addr_transform(PGS0);
+ crb_addr_transform(PS);
+ crb_addr_transform(PH);
+ crb_addr_transform(NIU);
+ crb_addr_transform(I2Q);
+ crb_addr_transform(EG);
+ crb_addr_transform(MN);
+ crb_addr_transform(MS);
+ crb_addr_transform(CAS2);
+ crb_addr_transform(CAS1);
+ crb_addr_transform(CAS0);
+ crb_addr_transform(CAM);
+ crb_addr_transform(C2C1);
+ crb_addr_transform(C2C0);
+ crb_addr_transform(SMB);
+ crb_addr_transform(OCM0);
+ crb_addr_transform(I2C0);
+}
+
+void netxen_release_rx_buffers(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ struct netxen_rx_buffer *rx_buf;
+ int i, ring;
+
+ recv_ctx = &adapter->recv_ctx;
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ for (i = 0; i < rds_ring->num_desc; ++i) {
+ rx_buf = &(rds_ring->rx_buf_arr[i]);
+ if (rx_buf->state == NETXEN_BUFFER_FREE)
+ continue;
+ pci_unmap_single(adapter->pdev,
+ rx_buf->dma,
+ rds_ring->dma_size,
+ PCI_DMA_FROMDEVICE);
+ if (rx_buf->skb != NULL)
+ dev_kfree_skb_any(rx_buf->skb);
+ }
+ }
+}
+
+void netxen_release_tx_buffers(struct netxen_adapter *adapter)
+{
+ struct netxen_cmd_buffer *cmd_buf;
+ struct netxen_skb_frag *buffrag;
+ int i, j;
+ struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ cmd_buf = tx_ring->cmd_buf_arr;
+ for (i = 0; i < tx_ring->num_desc; i++) {
+ buffrag = cmd_buf->frag_array;
+ if (buffrag->dma) {
+ pci_unmap_single(adapter->pdev, buffrag->dma,
+ buffrag->length, PCI_DMA_TODEVICE);
+ buffrag->dma = 0ULL;
+ }
+ for (j = 0; j < cmd_buf->frag_count; j++) {
+ buffrag++;
+ if (buffrag->dma) {
+ pci_unmap_page(adapter->pdev, buffrag->dma,
+ buffrag->length,
+ PCI_DMA_TODEVICE);
+ buffrag->dma = 0ULL;
+ }
+ }
+ if (cmd_buf->skb) {
+ dev_kfree_skb_any(cmd_buf->skb);
+ cmd_buf->skb = NULL;
+ }
+ cmd_buf++;
+ }
+}
+
+void netxen_free_sw_resources(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ struct nx_host_tx_ring *tx_ring;
+ int ring;
+
+ recv_ctx = &adapter->recv_ctx;
+
+ if (recv_ctx->rds_rings == NULL)
+ goto skip_rds;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ vfree(rds_ring->rx_buf_arr);
+ rds_ring->rx_buf_arr = NULL;
+ }
+ kfree(recv_ctx->rds_rings);
+
+skip_rds:
+ if (adapter->tx_ring == NULL)
+ return;
+
+ tx_ring = adapter->tx_ring;
+ vfree(tx_ring->cmd_buf_arr);
+ kfree(tx_ring);
+ adapter->tx_ring = NULL;
+}
+
+int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct nx_host_tx_ring *tx_ring;
+ struct netxen_rx_buffer *rx_buf;
+ int ring, i, size;
+
+ struct netxen_cmd_buffer *cmd_buf_arr;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ size = sizeof(struct nx_host_tx_ring);
+ tx_ring = kzalloc(size, GFP_KERNEL);
+ if (tx_ring == NULL) {
+ dev_err(&pdev->dev, "%s: failed to allocate tx ring struct\n",
+ netdev->name);
+ return -ENOMEM;
+ }
+ adapter->tx_ring = tx_ring;
+
+ tx_ring->num_desc = adapter->num_txd;
+ tx_ring->txq = netdev_get_tx_queue(netdev, 0);
+
+ cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
+ if (cmd_buf_arr == NULL) {
+ dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n",
+ netdev->name);
+ goto err_out;
+ }
+ tx_ring->cmd_buf_arr = cmd_buf_arr;
+
+ recv_ctx = &adapter->recv_ctx;
+
+ size = adapter->max_rds_rings * sizeof (struct nx_host_rds_ring);
+ rds_ring = kzalloc(size, GFP_KERNEL);
+ if (rds_ring == NULL) {
+ dev_err(&pdev->dev, "%s: failed to allocate rds ring struct\n",
+ netdev->name);
+ goto err_out;
+ }
+ recv_ctx->rds_rings = rds_ring;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ switch (ring) {
+ case RCV_RING_NORMAL:
+ rds_ring->num_desc = adapter->num_rxd;
+ if (adapter->ahw.cut_through) {
+ rds_ring->dma_size =
+ NX_CT_DEFAULT_RX_BUF_LEN;
+ rds_ring->skb_size =
+ NX_CT_DEFAULT_RX_BUF_LEN;
+ } else {
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ rds_ring->dma_size =
+ NX_P3_RX_BUF_MAX_LEN;
+ else
+ rds_ring->dma_size =
+ NX_P2_RX_BUF_MAX_LEN;
+ rds_ring->skb_size =
+ rds_ring->dma_size + NET_IP_ALIGN;
+ }
+ break;
+
+ case RCV_RING_JUMBO:
+ rds_ring->num_desc = adapter->num_jumbo_rxd;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ rds_ring->dma_size =
+ NX_P3_RX_JUMBO_BUF_MAX_LEN;
+ else
+ rds_ring->dma_size =
+ NX_P2_RX_JUMBO_BUF_MAX_LEN;
+
+ if (adapter->capabilities & NX_CAP0_HW_LRO)
+ rds_ring->dma_size += NX_LRO_BUFFER_EXTRA;
+
+ rds_ring->skb_size =
+ rds_ring->dma_size + NET_IP_ALIGN;
+ break;
+
+ case RCV_RING_LRO:
+ rds_ring->num_desc = adapter->num_lro_rxd;
+ rds_ring->dma_size = NX_RX_LRO_BUFFER_LENGTH;
+ rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
+ break;
+
+ }
+ rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
+ if (rds_ring->rx_buf_arr == NULL) {
+ printk(KERN_ERR "%s: Failed to allocate "
+ "rx buffer ring %d\n",
+ netdev->name, ring);
+ /* free whatever was already allocated */
+ goto err_out;
+ }
+ INIT_LIST_HEAD(&rds_ring->free_list);
+ /*
+ * Now go through all of them, set reference handles
+ * and put them in the queues.
+ */
+ rx_buf = rds_ring->rx_buf_arr;
+ for (i = 0; i < rds_ring->num_desc; i++) {
+ list_add_tail(&rx_buf->list,
+ &rds_ring->free_list);
+ rx_buf->ref_handle = i;
+ rx_buf->state = NETXEN_BUFFER_FREE;
+ rx_buf++;
+ }
+ spin_lock_init(&rds_ring->lock);
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ sds_ring->irq = adapter->msix_entries[ring].vector;
+ sds_ring->adapter = adapter;
+ sds_ring->num_desc = adapter->num_rxd;
+
+ for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
+ INIT_LIST_HEAD(&sds_ring->free_list[i]);
+ }
+
+ return 0;
+
+err_out:
+ netxen_free_sw_resources(adapter);
+ return -ENOMEM;
+}
+
+/*
+ * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB
+ * address to external PCI CRB address.
+ */
+static u32 netxen_decode_crb_addr(u32 addr)
+{
+ int i;
+ u32 base_addr, offset, pci_base;
+
+ crb_addr_transform_setup();
+
+ pci_base = NETXEN_ADDR_ERROR;
+ base_addr = addr & 0xfff00000;
+ offset = addr & 0x000fffff;
+
+ for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) {
+ if (crb_addr_xform[i] == base_addr) {
+ pci_base = i << 20;
+ break;
+ }
+ }
+ if (pci_base == NETXEN_ADDR_ERROR)
+ return pci_base;
+ else
+ return pci_base + offset;
+}
+
+#define NETXEN_MAX_ROM_WAIT_USEC 100
+
+static int netxen_wait_rom_done(struct netxen_adapter *adapter)
+{
+ long timeout = 0;
+ long done = 0;
+
+ cond_resched();
+
+ while (done == 0) {
+ done = NXRD32(adapter, NETXEN_ROMUSB_GLB_STATUS);
+ done &= 2;
+ if (++timeout >= NETXEN_MAX_ROM_WAIT_USEC) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout reached waiting for rom done");
+ return -EIO;
+ }
+ udelay(1);
+ }
+ return 0;
+}
+
+static int do_rom_fast_read(struct netxen_adapter *adapter,
+ int addr, int *valp)
+{
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+ if (netxen_wait_rom_done(adapter)) {
+ printk("Error waiting for rom done\n");
+ return -EIO;
+ }
+ /* reset abyte_cnt and dummy_byte_cnt */
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
+ udelay(10);
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+
+ *valp = NXRD32(adapter, NETXEN_ROMUSB_ROM_RDATA);
+ return 0;
+}
+
+static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
+ u8 *bytes, size_t size)
+{
+ int addridx;
+ int ret = 0;
+
+ for (addridx = addr; addridx < (addr + size); addridx += 4) {
+ int v;
+ ret = do_rom_fast_read(adapter, addridx, &v);
+ if (ret != 0)
+ break;
+ *(__le32 *)bytes = cpu_to_le32(v);
+ bytes += 4;
+ }
+
+ return ret;
+}
+
+int
+netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
+ u8 *bytes, size_t size)
+{
+ int ret;
+
+ ret = netxen_rom_lock(adapter);
+ if (ret < 0)
+ return ret;
+
+ ret = do_rom_fast_read_words(adapter, addr, bytes, size);
+
+ netxen_rom_unlock(adapter);
+ return ret;
+}
+
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
+{
+ int ret;
+
+ if (netxen_rom_lock(adapter) != 0)
+ return -EIO;
+
+ ret = do_rom_fast_read(adapter, addr, valp);
+ netxen_rom_unlock(adapter);
+ return ret;
+}
+
+#define NETXEN_BOARDTYPE 0x4008
+#define NETXEN_BOARDNUM 0x400c
+#define NETXEN_CHIPNUM 0x4010
+
+int netxen_pinit_from_rom(struct netxen_adapter *adapter)
+{
+ int addr, val;
+ int i, n, init_delay = 0;
+ struct crb_addr_pair *buf;
+ unsigned offset;
+ u32 off;
+
+ /* resetall */
+ netxen_rom_lock(adapter);
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xffffffff);
+ netxen_rom_unlock(adapter);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
+ (n != 0xcafecafe) ||
+ netxen_rom_fast_read(adapter, 4, &n) != 0) {
+ printk(KERN_ERR "%s: ERROR Reading crb_init area: "
+ "n: %08x\n", netxen_nic_driver_name, n);
+ return -EIO;
+ }
+ offset = n & 0xffffU;
+ n = (n >> 16) & 0xffffU;
+ } else {
+ if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
+ !(n & 0x80000000)) {
+ printk(KERN_ERR "%s: ERROR Reading crb_init area: "
+ "n: %08x\n", netxen_nic_driver_name, n);
+ return -EIO;
+ }
+ offset = 1;
+ n &= ~0x80000000;
+ }
+
+ if (n >= 1024) {
+ printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not"
+ " initialized.\n", __func__, n);
+ return -EIO;
+ }
+
+ buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
+ if (buf == NULL) {
+ printk("%s: netxen_pinit_from_rom: Unable to calloc memory.\n",
+ netxen_nic_driver_name);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < n; i++) {
+ if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
+ netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
+ kfree(buf);
+ return -EIO;
+ }
+
+ buf[i].addr = addr;
+ buf[i].data = val;
+
+ }
+
+ for (i = 0; i < n; i++) {
+
+ off = netxen_decode_crb_addr(buf[i].addr);
+ if (off == NETXEN_ADDR_ERROR) {
+ printk(KERN_ERR"CRB init value out of range %x\n",
+ buf[i].addr);
+ continue;
+ }
+ off += NETXEN_PCI_CRBSPACE;
+
+ if (off & 1)
+ continue;
+
+ /* skipping cold reboot MAGIC */
+ if (off == NETXEN_CAM_RAM(0x1fc))
+ continue;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ if (off == (NETXEN_CRB_I2C0 + 0x1c))
+ continue;
+ /* do not reset PCI */
+ if (off == (ROMUSB_GLB + 0xbc))
+ continue;
+ if (off == (ROMUSB_GLB + 0xa8))
+ continue;
+ if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
+ continue;
+ if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
+ continue;
+ if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
+ continue;
+ if ((off & 0x0ff00000) == NETXEN_CRB_DDR_NET)
+ continue;
+ if (off == (NETXEN_CRB_PEG_NET_1 + 0x18) &&
+ !NX_IS_REVISION_P3P(adapter->ahw.revision_id))
+ buf[i].data = 0x1020;
+ /* skip the function enable register */
+ if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION))
+ continue;
+ if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2))
+ continue;
+ if ((off & 0x0ff00000) == NETXEN_CRB_SMB)
+ continue;
+ }
+
+ init_delay = 1;
+ /* After writing this register, HW needs time for CRB */
+ /* to quiet down (else crb_window returns 0xffffffff) */
+ if (off == NETXEN_ROMUSB_GLB_SW_RESET) {
+ init_delay = 1000;
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ /* hold xdma in reset also */
+ buf[i].data = NETXEN_NIC_XDMA_RESET;
+ buf[i].data = 0x8000ff;
+ }
+ }
+
+ NXWR32(adapter, off, buf[i].data);
+
+ msleep(init_delay);
+ }
+ kfree(buf);
+
+ /* disable_peg_cache_all */
+
+ /* unreset_net_cache */
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ val = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f));
+ }
+
+ /* p2dn replyCount */
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e);
+ /* disable_peg_cache 0 */
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8);
+ /* disable_peg_cache 1 */
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8);
+
+ /* peg_clr_all */
+
+ /* peg_clr 0 */
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0);
+ /* peg_clr 1 */
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0);
+ /* peg_clr 2 */
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0);
+ /* peg_clr 3 */
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0);
+ return 0;
+}
+
+static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section)
+{
+ uint32_t i;
+ struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+ __le32 entries = cpu_to_le32(directory->num_entries);
+
+ for (i = 0; i < entries; i++) {
+
+ __le32 offs = cpu_to_le32(directory->findex) +
+ (i * cpu_to_le32(directory->entry_size));
+ __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
+
+ if (tab_type == section)
+ return (struct uni_table_desc *) &unirom[offs];
+ }
+
+ return NULL;
+}
+
+#define QLCNIC_FILEHEADER_SIZE (14 * 4)
+
+static int
+netxen_nic_validate_header(struct netxen_adapter *adapter)
+ {
+ const u8 *unirom = adapter->fw->data;
+ struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+ u32 fw_file_size = adapter->fw->size;
+ u32 tab_size;
+ __le32 entries;
+ __le32 entry_size;
+
+ if (fw_file_size < QLCNIC_FILEHEADER_SIZE)
+ return -EINVAL;
+
+ entries = cpu_to_le32(directory->num_entries);
+ entry_size = cpu_to_le32(directory->entry_size);
+ tab_size = cpu_to_le32(directory->findex) + (entries * entry_size);
+
+ if (fw_file_size < tab_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+netxen_nic_validate_bootld(struct netxen_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ const u8 *unirom = adapter->fw->data;
+ __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ NX_UNI_BOOTLD_IDX_OFF));
+ u32 offs;
+ u32 tab_size;
+ u32 data_size;
+
+ tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx));
+ descr = (struct uni_data_desc *)&unirom[offs];
+
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+netxen_nic_validate_fw(struct netxen_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ const u8 *unirom = adapter->fw->data;
+ __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ NX_UNI_FIRMWARE_IDX_OFF));
+ u32 offs;
+ u32 tab_size;
+ u32 data_size;
+
+ tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx));
+ descr = (struct uni_data_desc *)&unirom[offs];
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+
+static int
+netxen_nic_validate_product_offs(struct netxen_adapter *adapter)
+{
+ struct uni_table_desc *ptab_descr;
+ const u8 *unirom = adapter->fw->data;
+ int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ?
+ 1 : netxen_p3_has_mn(adapter);
+ __le32 entries;
+ __le32 entry_size;
+ u32 tab_size;
+ u32 i;
+
+ ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL);
+ if (ptab_descr == NULL)
+ return -EINVAL;
+
+ entries = cpu_to_le32(ptab_descr->num_entries);
+ entry_size = cpu_to_le32(ptab_descr->entry_size);
+ tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size);
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+nomn:
+ for (i = 0; i < entries; i++) {
+
+ __le32 flags, file_chiprev, offs;
+ u8 chiprev = adapter->ahw.revision_id;
+ uint32_t flagbit;
+
+ offs = cpu_to_le32(ptab_descr->findex) +
+ (i * cpu_to_le32(ptab_descr->entry_size));
+ flags = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_FLAGS_OFF));
+ file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
+ NX_UNI_CHIP_REV_OFF));
+
+ flagbit = mn_present ? 1 : 2;
+
+ if ((chiprev == file_chiprev) &&
+ ((1ULL << flagbit) & flags)) {
+ adapter->file_prd_off = offs;
+ return 0;
+ }
+ }
+
+ if (mn_present && NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ mn_present = 0;
+ goto nomn;
+ }
+
+ return -EINVAL;
+}
+
+static int
+netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter)
+{
+ if (netxen_nic_validate_header(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: header validation failed\n");
+ return -EINVAL;
+ }
+
+ if (netxen_nic_validate_product_offs(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: product validation failed\n");
+ return -EINVAL;
+ }
+
+ if (netxen_nic_validate_bootld(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: bootld validation failed\n");
+ return -EINVAL;
+ }
+
+ if (netxen_nic_validate_fw(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: firmware validation failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter,
+ u32 section, u32 idx_offset)
+{
+ const u8 *unirom = adapter->fw->data;
+ int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ idx_offset));
+ struct uni_table_desc *tab_desc;
+ __le32 offs;
+
+ tab_desc = nx_get_table_desc(unirom, section);
+
+ if (tab_desc == NULL)
+ return NULL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * idx);
+
+ return (struct uni_data_desc *)&unirom[offs];
+}
+
+static u8 *
+nx_get_bootld_offs(struct netxen_adapter *adapter)
+{
+ u32 offs = NETXEN_BOOTLD_START;
+
+ if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
+ offs = cpu_to_le32((nx_get_data_desc(adapter,
+ NX_UNI_DIR_SECT_BOOTLD,
+ NX_UNI_BOOTLD_IDX_OFF))->findex);
+
+ return (u8 *)&adapter->fw->data[offs];
+}
+
+static u8 *
+nx_get_fw_offs(struct netxen_adapter *adapter)
+{
+ u32 offs = NETXEN_IMAGE_START;
+
+ if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
+ offs = cpu_to_le32((nx_get_data_desc(adapter,
+ NX_UNI_DIR_SECT_FW,
+ NX_UNI_FIRMWARE_IDX_OFF))->findex);
+
+ return (u8 *)&adapter->fw->data[offs];
+}
+
+static __le32
+nx_get_fw_size(struct netxen_adapter *adapter)
+{
+ if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
+ return cpu_to_le32((nx_get_data_desc(adapter,
+ NX_UNI_DIR_SECT_FW,
+ NX_UNI_FIRMWARE_IDX_OFF))->size);
+ else
+ return cpu_to_le32(
+ *(u32 *)&adapter->fw->data[NX_FW_SIZE_OFFSET]);
+}
+
+static __le32
+nx_get_fw_version(struct netxen_adapter *adapter)
+{
+ struct uni_data_desc *fw_data_desc;
+ const struct firmware *fw = adapter->fw;
+ __le32 major, minor, sub;
+ const u8 *ver_str;
+ int i, ret = 0;
+
+ if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
+
+ fw_data_desc = nx_get_data_desc(adapter,
+ NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF);
+ ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
+ cpu_to_le32(fw_data_desc->size) - 17;
+
+ for (i = 0; i < 12; i++) {
+ if (!strncmp(&ver_str[i], "REV=", 4)) {
+ ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
+ &major, &minor, &sub);
+ break;
+ }
+ }
+
+ if (ret != 3)
+ return 0;
+
+ return major + (minor << 8) + (sub << 16);
+
+ } else
+ return cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
+}
+
+static __le32
+nx_get_bios_version(struct netxen_adapter *adapter)
+{
+ const struct firmware *fw = adapter->fw;
+ __le32 bios_ver, prd_off = adapter->file_prd_off;
+
+ if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
+ bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
+ + NX_UNI_BIOS_VERSION_OFF));
+ return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) +
+ (bios_ver >> 24);
+ } else
+ return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
+
+}
+
+int
+netxen_need_fw_reset(struct netxen_adapter *adapter)
+{
+ u32 count, old_count;
+ u32 val, version, major, minor, build;
+ int i, timeout;
+ u8 fw_type;
+
+ /* NX2031 firmware doesn't support heartbit */
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 1;
+
+ if (adapter->need_fw_reset)
+ return 1;
+
+ /* last attempt had failed */
+ if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
+ return 1;
+
+ old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+
+ for (i = 0; i < 10; i++) {
+
+ timeout = msleep_interruptible(200);
+ if (timeout) {
+ NXWR32(adapter, CRB_CMDPEG_STATE,
+ PHAN_INITIALIZE_FAILED);
+ return -EINTR;
+ }
+
+ count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+ if (count != old_count)
+ break;
+ }
+
+ /* firmware is dead */
+ if (count == old_count)
+ return 1;
+
+ /* check if we have got newer or different file firmware */
+ if (adapter->fw) {
+
+ val = nx_get_fw_version(adapter);
+
+ version = NETXEN_DECODE_VERSION(val);
+
+ major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
+ minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
+ build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
+
+ if (version > NETXEN_VERSION_CODE(major, minor, build))
+ return 1;
+
+ if (version == NETXEN_VERSION_CODE(major, minor, build) &&
+ adapter->fw_type != NX_UNIFIED_ROMIMAGE) {
+
+ val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL);
+ fw_type = (val & 0x4) ?
+ NX_P3_CT_ROMIMAGE : NX_P3_MN_ROMIMAGE;
+
+ if (adapter->fw_type != fw_type)
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+#define NETXEN_MIN_P3_FW_SUPP NETXEN_VERSION_CODE(4, 0, 505)
+
+int
+netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter)
+{
+ u32 flash_fw_ver, min_fw_ver;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 0;
+
+ if (netxen_rom_fast_read(adapter,
+ NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) {
+ dev_err(&adapter->pdev->dev, "Unable to read flash fw"
+ "version\n");
+ return -EIO;
+ }
+
+ flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver);
+ min_fw_ver = NETXEN_MIN_P3_FW_SUPP;
+ if (flash_fw_ver >= min_fw_ver)
+ return 0;
+
+ dev_info(&adapter->pdev->dev, "Flash fw[%d.%d.%d] is < min fw supported"
+ "[4.0.505]. Please update firmware on flash\n",
+ _major(flash_fw_ver), _minor(flash_fw_ver),
+ _build(flash_fw_ver));
+ return -EINVAL;
+}
+
+static char *fw_name[] = {
+ NX_P2_MN_ROMIMAGE_NAME,
+ NX_P3_CT_ROMIMAGE_NAME,
+ NX_P3_MN_ROMIMAGE_NAME,
+ NX_UNIFIED_ROMIMAGE_NAME,
+ NX_FLASH_ROMIMAGE_NAME,
+};
+
+int
+netxen_load_firmware(struct netxen_adapter *adapter)
+{
+ u64 *ptr64;
+ u32 i, flashaddr, size;
+ const struct firmware *fw = adapter->fw;
+ struct pci_dev *pdev = adapter->pdev;
+
+ dev_info(&pdev->dev, "loading firmware from %s\n",
+ fw_name[adapter->fw_type]);
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1);
+
+ if (fw) {
+ __le64 data;
+
+ size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8;
+
+ ptr64 = (u64 *)nx_get_bootld_offs(adapter);
+ flashaddr = NETXEN_BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (adapter->pci_mem_write(adapter, flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+
+ size = (__force u32)nx_get_fw_size(adapter) / 8;
+
+ ptr64 = (u64 *)nx_get_fw_offs(adapter);
+ flashaddr = NETXEN_IMAGE_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (adapter->pci_mem_write(adapter,
+ flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+
+ size = (__force u32)nx_get_fw_size(adapter) % 8;
+ if (size) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (adapter->pci_mem_write(adapter,
+ flashaddr, data))
+ return -EIO;
+ }
+
+ } else {
+ u64 data;
+ u32 hi, lo;
+
+ size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8;
+ flashaddr = NETXEN_BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ if (netxen_rom_fast_read(adapter,
+ flashaddr, (int *)&lo) != 0)
+ return -EIO;
+ if (netxen_rom_fast_read(adapter,
+ flashaddr + 4, (int *)&hi) != 0)
+ return -EIO;
+
+ /* hi, lo are already in host endian byteorder */
+ data = (((u64)hi << 32) | lo);
+
+ if (adapter->pci_mem_write(adapter,
+ flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+ }
+ msleep(1);
+
+ if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x18, 0x1020);
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001e);
+ } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d);
+ else {
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff);
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 0);
+ }
+
+ return 0;
+}
+
+static int
+netxen_validate_firmware(struct netxen_adapter *adapter)
+{
+ __le32 val;
+ __le32 flash_fw_ver;
+ u32 file_fw_ver, min_ver, bios;
+ struct pci_dev *pdev = adapter->pdev;
+ const struct firmware *fw = adapter->fw;
+ u8 fw_type = adapter->fw_type;
+ u32 crbinit_fix_fw;
+
+ if (fw_type == NX_UNIFIED_ROMIMAGE) {
+ if (netxen_nic_validate_unified_romimage(adapter))
+ return -EINVAL;
+ } else {
+ val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
+ if ((__force u32)val != NETXEN_BDINFO_MAGIC)
+ return -EINVAL;
+
+ if (fw->size < NX_FW_MIN_SIZE)
+ return -EINVAL;
+ }
+
+ val = nx_get_fw_version(adapter);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ min_ver = NETXEN_MIN_P3_FW_SUPP;
+ else
+ min_ver = NETXEN_VERSION_CODE(3, 4, 216);
+
+ file_fw_ver = NETXEN_DECODE_VERSION(val);
+
+ if ((_major(file_fw_ver) > _NETXEN_NIC_LINUX_MAJOR) ||
+ (file_fw_ver < min_ver)) {
+ dev_err(&pdev->dev,
+ "%s: firmware version %d.%d.%d unsupported\n",
+ fw_name[fw_type], _major(file_fw_ver), _minor(file_fw_ver),
+ _build(file_fw_ver));
+ return -EINVAL;
+ }
+
+ val = nx_get_bios_version(adapter);
+ netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios);
+ if ((__force u32)val != bios) {
+ dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
+ fw_name[fw_type]);
+ return -EINVAL;
+ }
+
+ if (netxen_rom_fast_read(adapter,
+ NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) {
+ dev_err(&pdev->dev, "Unable to read flash fw version\n");
+ return -EIO;
+ }
+ flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver);
+
+ /* New fw from file is not allowed, if fw on flash is < 4.0.554 */
+ crbinit_fix_fw = NETXEN_VERSION_CODE(4, 0, 554);
+ if (file_fw_ver >= crbinit_fix_fw && flash_fw_ver < crbinit_fix_fw &&
+ NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ dev_err(&pdev->dev, "Incompatibility detected between driver "
+ "and firmware version on flash. This configuration "
+ "is not recommended. Please update the firmware on "
+ "flash immediately\n");
+ return -EINVAL;
+ }
+
+ /* check if flashed firmware is newer only for no-mn and P2 case*/
+ if (!netxen_p3_has_mn(adapter) ||
+ NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ if (flash_fw_ver > file_fw_ver) {
+ dev_info(&pdev->dev, "%s: firmware is older than flash\n",
+ fw_name[fw_type]);
+ return -EINVAL;
+ }
+ }
+
+ NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
+ return 0;
+}
+
+static void
+nx_get_next_fwtype(struct netxen_adapter *adapter)
+{
+ u8 fw_type;
+
+ switch (adapter->fw_type) {
+ case NX_UNKNOWN_ROMIMAGE:
+ fw_type = NX_UNIFIED_ROMIMAGE;
+ break;
+
+ case NX_UNIFIED_ROMIMAGE:
+ if (NX_IS_REVISION_P3P(adapter->ahw.revision_id))
+ fw_type = NX_FLASH_ROMIMAGE;
+ else if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ fw_type = NX_P2_MN_ROMIMAGE;
+ else if (netxen_p3_has_mn(adapter))
+ fw_type = NX_P3_MN_ROMIMAGE;
+ else
+ fw_type = NX_P3_CT_ROMIMAGE;
+ break;
+
+ case NX_P3_MN_ROMIMAGE:
+ fw_type = NX_P3_CT_ROMIMAGE;
+ break;
+
+ case NX_P2_MN_ROMIMAGE:
+ case NX_P3_CT_ROMIMAGE:
+ default:
+ fw_type = NX_FLASH_ROMIMAGE;
+ break;
+ }
+
+ adapter->fw_type = fw_type;
+}
+
+static int
+netxen_p3_has_mn(struct netxen_adapter *adapter)
+{
+ u32 capability, flashed_ver;
+ capability = 0;
+
+ /* NX2031 always had MN */
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 1;
+
+ netxen_rom_fast_read(adapter,
+ NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
+ flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
+
+ if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) {
+
+ capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY);
+ if (capability & NX_PEG_TUNE_MN_PRESENT)
+ return 1;
+ }
+ return 0;
+}
+
+void netxen_request_firmware(struct netxen_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int rc = 0;
+
+ adapter->fw_type = NX_UNKNOWN_ROMIMAGE;
+
+next:
+ nx_get_next_fwtype(adapter);
+
+ if (adapter->fw_type == NX_FLASH_ROMIMAGE) {
+ adapter->fw = NULL;
+ } else {
+ rc = request_firmware(&adapter->fw,
+ fw_name[adapter->fw_type], &pdev->dev);
+ if (rc != 0)
+ goto next;
+
+ rc = netxen_validate_firmware(adapter);
+ if (rc != 0) {
+ release_firmware(adapter->fw);
+ msleep(1);
+ goto next;
+ }
+ }
+}
+
+
+void
+netxen_release_firmware(struct netxen_adapter *adapter)
+{
+ if (adapter->fw)
+ release_firmware(adapter->fw);
+ adapter->fw = NULL;
+}
+
+int netxen_init_dummy_dma(struct netxen_adapter *adapter)
+{
+ u64 addr;
+ u32 hi, lo;
+
+ if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 0;
+
+ adapter->dummy_dma.addr = pci_alloc_consistent(adapter->pdev,
+ NETXEN_HOST_DUMMY_DMA_SIZE,
+ &adapter->dummy_dma.phys_addr);
+ if (adapter->dummy_dma.addr == NULL) {
+ dev_err(&adapter->pdev->dev,
+ "ERROR: Could not allocate dummy DMA memory\n");
+ return -ENOMEM;
+ }
+
+ addr = (uint64_t) adapter->dummy_dma.phys_addr;
+ hi = (addr >> 32) & 0xffffffff;
+ lo = addr & 0xffffffff;
+
+ NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi);
+ NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo);
+
+ return 0;
+}
+
+/*
+ * NetXen DMA watchdog control:
+ *
+ * Bit 0 : enabled => R/O: 1 watchdog active, 0 inactive
+ * Bit 1 : disable_request => 1 req disable dma watchdog
+ * Bit 2 : enable_request => 1 req enable dma watchdog
+ * Bit 3-31 : unused
+ */
+void netxen_free_dummy_dma(struct netxen_adapter *adapter)
+{
+ int i = 100;
+ u32 ctrl;
+
+ if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return;
+
+ if (!adapter->dummy_dma.addr)
+ return;
+
+ ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL);
+ if ((ctrl & 0x1) != 0) {
+ NXWR32(adapter, NETXEN_DMA_WATCHDOG_CTRL, (ctrl | 0x2));
+
+ while ((ctrl & 0x1) != 0) {
+
+ msleep(50);
+
+ ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL);
+
+ if (--i == 0)
+ break;
+ }
+ }
+
+ if (i) {
+ pci_free_consistent(adapter->pdev,
+ NETXEN_HOST_DUMMY_DMA_SIZE,
+ adapter->dummy_dma.addr,
+ adapter->dummy_dma.phys_addr);
+ adapter->dummy_dma.addr = NULL;
+ } else
+ dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n");
+}
+
+int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
+{
+ u32 val = 0;
+ int retries = 60;
+
+ if (pegtune_val)
+ return 0;
+
+ do {
+ val = NXRD32(adapter, CRB_CMDPEG_STATE);
+
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return 0;
+ case PHAN_INITIALIZE_FAILED:
+ goto out_err;
+ default:
+ break;
+ }
+
+ msleep(500);
+
+ } while (--retries);
+
+ NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+
+out_err:
+ dev_warn(&adapter->pdev->dev, "firmware init failed\n");
+ return -EIO;
+}
+
+static int
+netxen_receive_peg_ready(struct netxen_adapter *adapter)
+{
+ u32 val = 0;
+ int retries = 2000;
+
+ do {
+ val = NXRD32(adapter, CRB_RCVPEG_STATE);
+
+ if (val == PHAN_PEG_RCV_INITIALIZED)
+ return 0;
+
+ msleep(10);
+
+ } while (--retries);
+
+ if (!retries) {
+ printk(KERN_ERR "Receive Peg initialization not "
+ "complete, state: 0x%x.\n", val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int netxen_init_firmware(struct netxen_adapter *adapter)
+{
+ int err;
+
+ err = netxen_receive_peg_ready(adapter);
+ if (err)
+ return err;
+
+ NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
+ NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
+ NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
+
+ return err;
+}
+
+static void
+netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg)
+{
+ u32 cable_OUI;
+ u16 cable_len;
+ u16 link_speed;
+ u8 link_status, module, duplex, autoneg;
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->has_link_events = 1;
+
+ cable_OUI = msg->body[1] & 0xffffffff;
+ cable_len = (msg->body[1] >> 32) & 0xffff;
+ link_speed = (msg->body[1] >> 48) & 0xffff;
+
+ link_status = msg->body[2] & 0xff;
+ duplex = (msg->body[2] >> 16) & 0xff;
+ autoneg = (msg->body[2] >> 24) & 0xff;
+
+ module = (msg->body[2] >> 8) & 0xff;
+ if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) {
+ printk(KERN_INFO "%s: unsupported cable: OUI 0x%x, length %d\n",
+ netdev->name, cable_OUI, cable_len);
+ } else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) {
+ printk(KERN_INFO "%s: unsupported cable length %d\n",
+ netdev->name, cable_len);
+ }
+
+ netxen_advert_link_change(adapter, link_status);
+
+ /* update link parameters */
+ if (duplex == LINKEVENT_FULL_DUPLEX)
+ adapter->link_duplex = DUPLEX_FULL;
+ else
+ adapter->link_duplex = DUPLEX_HALF;
+ adapter->module_type = module;
+ adapter->link_autoneg = autoneg;
+ adapter->link_speed = link_speed;
+}
+
+static void
+netxen_handle_fw_message(int desc_cnt, int index,
+ struct nx_host_sds_ring *sds_ring)
+{
+ nx_fw_msg_t msg;
+ struct status_desc *desc;
+ int i = 0, opcode;
+
+ while (desc_cnt > 0 && i < 8) {
+ desc = &sds_ring->desc_head[index];
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
+
+ index = get_next_index(index, sds_ring->num_desc);
+ desc_cnt--;
+ }
+
+ opcode = netxen_get_nic_msg_opcode(msg.body[0]);
+ switch (opcode) {
+ case NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
+ netxen_handle_linkevent(sds_ring->adapter, &msg);
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+netxen_alloc_rx_skb(struct netxen_adapter *adapter,
+ struct nx_host_rds_ring *rds_ring,
+ struct netxen_rx_buffer *buffer)
+{
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct pci_dev *pdev = adapter->pdev;
+
+ buffer->skb = dev_alloc_skb(rds_ring->skb_size);
+ if (!buffer->skb)
+ return 1;
+
+ skb = buffer->skb;
+
+ if (!adapter->ahw.cut_through)
+ skb_reserve(skb, 2);
+
+ dma = pci_map_single(pdev, skb->data,
+ rds_ring->dma_size, PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(pdev, dma)) {
+ dev_kfree_skb_any(skb);
+ buffer->skb = NULL;
+ return 1;
+ }
+
+ buffer->skb = skb;
+ buffer->dma = dma;
+ buffer->state = NETXEN_BUFFER_BUSY;
+
+ return 0;
+}
+
+static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
+ struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum)
+{
+ struct netxen_rx_buffer *buffer;
+ struct sk_buff *skb;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
+ PCI_DMA_FROMDEVICE);
+
+ skb = buffer->skb;
+ if (!skb)
+ goto no_skb;
+
+ if (likely((adapter->netdev->features & NETIF_F_RXCSUM)
+ && cksum == STATUS_CKSUM_OK)) {
+ adapter->stats.csummed++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb->dev = adapter->netdev;
+
+ buffer->skb = NULL;
+no_skb:
+ buffer->state = NETXEN_BUFFER_FREE;
+ return skb;
+}
+
+static struct netxen_rx_buffer *
+netxen_process_rcv(struct netxen_adapter *adapter,
+ struct nx_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct netxen_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct nx_host_rds_ring *rds_ring;
+ int index, length, cksum, pkt_offset;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = netxen_get_sts_refhandle(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ length = netxen_get_sts_totallength(sts_data0);
+ cksum = netxen_get_sts_status(sts_data0);
+ pkt_offset = netxen_get_sts_pkt_offset(sts_data0);
+
+ skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return buffer;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ napi_gro_receive(&sds_ring->napi, skb);
+
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+#define TCP_HDR_SIZE 20
+#define TCP_TS_OPTION_SIZE 12
+#define TCP_TS_HDR_SIZE (TCP_HDR_SIZE + TCP_TS_OPTION_SIZE)
+
+static struct netxen_rx_buffer *
+netxen_process_lro(struct netxen_adapter *adapter,
+ struct nx_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0, u64 sts_data1)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct netxen_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct nx_host_rds_ring *rds_ring;
+ struct iphdr *iph;
+ struct tcphdr *th;
+ bool push, timestamp;
+ int l2_hdr_offset, l4_hdr_offset;
+ int index;
+ u16 lro_length, length, data_offset;
+ u32 seq_number;
+ u8 vhdr_len = 0;
+
+ if (unlikely(ring > adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = netxen_get_lro_sts_refhandle(sts_data0);
+ if (unlikely(index > rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ timestamp = netxen_get_lro_sts_timestamp(sts_data0);
+ lro_length = netxen_get_lro_sts_length(sts_data0);
+ l2_hdr_offset = netxen_get_lro_sts_l2_hdr_offset(sts_data0);
+ l4_hdr_offset = netxen_get_lro_sts_l4_hdr_offset(sts_data0);
+ push = netxen_get_lro_sts_push_flag(sts_data0);
+ seq_number = netxen_get_lro_sts_seq_number(sts_data1);
+
+ skb = netxen_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return buffer;
+
+ if (timestamp)
+ data_offset = l4_hdr_offset + TCP_TS_HDR_SIZE;
+ else
+ data_offset = l4_hdr_offset + TCP_HDR_SIZE;
+
+ skb_put(skb, lro_length + data_offset);
+
+ skb_pull(skb, l2_hdr_offset);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (skb->protocol == htons(ETH_P_8021Q))
+ vhdr_len = VLAN_HLEN;
+ iph = (struct iphdr *)(skb->data + vhdr_len);
+ th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2));
+
+ length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ iph->tot_len = htons(length);
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ th->psh = push;
+ th->seq = htonl(seq_number);
+
+ length = skb->len;
+
+ netif_receive_skb(skb);
+
+ adapter->stats.lro_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+#define netxen_merge_rx_buffers(list, head) \
+ do { list_splice_tail_init(list, head); } while (0);
+
+int
+netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max)
+{
+ struct netxen_adapter *adapter = sds_ring->adapter;
+
+ struct list_head *cur;
+
+ struct status_desc *desc;
+ struct netxen_rx_buffer *rxbuf;
+
+ u32 consumer = sds_ring->consumer;
+
+ int count = 0;
+ u64 sts_data0, sts_data1;
+ int opcode, ring = 0, desc_cnt;
+
+ while (count < max) {
+ desc = &sds_ring->desc_head[consumer];
+ sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+ if (!(sts_data0 & STATUS_OWNER_HOST))
+ break;
+
+ desc_cnt = netxen_get_sts_desc_cnt(sts_data0);
+
+ opcode = netxen_get_sts_opcode(sts_data0);
+
+ switch (opcode) {
+ case NETXEN_NIC_RXPKT_DESC:
+ case NETXEN_OLD_RXPKT_DESC:
+ case NETXEN_NIC_SYN_OFFLOAD:
+ ring = netxen_get_sts_type(sts_data0);
+ rxbuf = netxen_process_rcv(adapter, sds_ring,
+ ring, sts_data0);
+ break;
+ case NETXEN_NIC_LRO_DESC:
+ ring = netxen_get_lro_sts_type(sts_data0);
+ sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
+ rxbuf = netxen_process_lro(adapter, sds_ring,
+ ring, sts_data0, sts_data1);
+ break;
+ case NETXEN_NIC_RESPONSE_DESC:
+ netxen_handle_fw_message(desc_cnt, consumer, sds_ring);
+ default:
+ goto skip;
+ }
+
+ WARN_ON(desc_cnt > 1);
+
+ if (rxbuf)
+ list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+
+skip:
+ for (; desc_cnt > 0; desc_cnt--) {
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] =
+ cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ }
+ count++;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ struct nx_host_rds_ring *rds_ring =
+ &adapter->recv_ctx.rds_rings[ring];
+
+ if (!list_empty(&sds_ring->free_list[ring])) {
+ list_for_each(cur, &sds_ring->free_list[ring]) {
+ rxbuf = list_entry(cur,
+ struct netxen_rx_buffer, list);
+ netxen_alloc_rx_skb(adapter, rds_ring, rxbuf);
+ }
+ spin_lock(&rds_ring->lock);
+ netxen_merge_rx_buffers(&sds_ring->free_list[ring],
+ &rds_ring->free_list);
+ spin_unlock(&rds_ring->lock);
+ }
+
+ netxen_post_rx_buffers_nodb(adapter, rds_ring);
+ }
+
+ if (count) {
+ sds_ring->consumer = consumer;
+ NXWRIO(adapter, sds_ring->crb_sts_consumer, consumer);
+ }
+
+ return count;
+}
+
+/* Process Command status ring */
+int netxen_process_cmd_ring(struct netxen_adapter *adapter)
+{
+ u32 sw_consumer, hw_consumer;
+ int count = 0, i;
+ struct netxen_cmd_buffer *buffer;
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
+ struct netxen_skb_frag *frag;
+ int done = 0;
+ struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ if (!spin_trylock(&adapter->tx_clean_lock))
+ return 1;
+
+ sw_consumer = tx_ring->sw_consumer;
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+
+ while (sw_consumer != hw_consumer) {
+ buffer = &tx_ring->cmd_buf_arr[sw_consumer];
+ if (buffer->skb) {
+ frag = &buffer->frag_array[0];
+ pci_unmap_single(pdev, frag->dma, frag->length,
+ PCI_DMA_TODEVICE);
+ frag->dma = 0ULL;
+ for (i = 1; i < buffer->frag_count; i++) {
+ frag++; /* Get the next frag */
+ pci_unmap_page(pdev, frag->dma, frag->length,
+ PCI_DMA_TODEVICE);
+ frag->dma = 0ULL;
+ }
+
+ adapter->stats.xmitfinished++;
+ dev_kfree_skb_any(buffer->skb);
+ buffer->skb = NULL;
+ }
+
+ sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
+ if (++count >= MAX_STATUS_HANDLE)
+ break;
+ }
+
+ if (count && netif_running(netdev)) {
+ tx_ring->sw_consumer = sw_consumer;
+
+ smp_mb();
+
+ if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
+ if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
+ netif_wake_queue(netdev);
+ adapter->tx_timeo_cnt = 0;
+ }
+ /*
+ * If everything is freed up to consumer then check if the ring is full
+ * If the ring is full then check if more needs to be freed and
+ * schedule the call back again.
+ *
+ * This happens when there are 2 CPUs. One could be freeing and the
+ * other filling it. If the ring is full when we get out of here and
+ * the card has already interrupted the host then the host can miss the
+ * interrupt.
+ *
+ * There is still a possible race condition and the host could miss an
+ * interrupt. The card has to take care of this.
+ */
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+ done = (sw_consumer == hw_consumer);
+ spin_unlock(&adapter->tx_clean_lock);
+
+ return done;
+}
+
+void
+netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
+ struct nx_host_rds_ring *rds_ring)
+{
+ struct rcv_desc *pdesc;
+ struct netxen_rx_buffer *buffer;
+ int producer, count = 0;
+ netxen_ctx_msg msg = 0;
+ struct list_head *head;
+
+ producer = rds_ring->producer;
+
+ head = &rds_ring->free_list;
+ while (!list_empty(head)) {
+
+ buffer = list_entry(head->next, struct netxen_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (netxen_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+ pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+
+ if (count) {
+ rds_ring->producer = producer;
+ NXWRIO(adapter, rds_ring->crb_rcv_producer,
+ (producer-1) & (rds_ring->num_desc-1));
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ /*
+ * Write a doorbell msg to tell phanmon of change in
+ * receive ring producer
+ * Only for firmware version < 4.0.0
+ */
+ netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID);
+ netxen_set_msg_privid(msg);
+ netxen_set_msg_count(msg,
+ ((producer - 1) &
+ (rds_ring->num_desc - 1)));
+ netxen_set_msg_ctxid(msg, adapter->portnum);
+ netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
+ NXWRIO(adapter, DB_NORMALIZE(adapter,
+ NETXEN_RCV_PRODUCER_OFFSET), msg);
+ }
+ }
+}
+
+static void
+netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
+ struct nx_host_rds_ring *rds_ring)
+{
+ struct rcv_desc *pdesc;
+ struct netxen_rx_buffer *buffer;
+ int producer, count = 0;
+ struct list_head *head;
+
+ if (!spin_trylock(&rds_ring->lock))
+ return;
+
+ producer = rds_ring->producer;
+
+ head = &rds_ring->free_list;
+ while (!list_empty(head)) {
+
+ buffer = list_entry(head->next, struct netxen_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (netxen_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+
+ if (count) {
+ rds_ring->producer = producer;
+ NXWRIO(adapter, rds_ring->crb_rcv_producer,
+ (producer - 1) & (rds_ring->num_desc - 1));
+ }
+ spin_unlock(&rds_ring->lock);
+}
+
+void netxen_nic_clear_stats(struct netxen_adapter *adapter)
+{
+ memset(&adapter->stats, 0, sizeof(adapter->stats));
+}
+
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
new file mode 100644
index 000000000000..e2ba78be1c2a
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -0,0 +1,3161 @@
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include "netxen_nic_hw.h"
+
+#include "netxen_nic.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inetdevice.h>
+#include <linux/sysfs.h>
+#include <linux/aer.h>
+
+MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Intelligent Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
+MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
+
+char netxen_nic_driver_name[] = "netxen_nic";
+static char netxen_nic_driver_string[] = "QLogic/NetXen Network Driver v"
+ NETXEN_NIC_LINUX_VERSIONID;
+
+static int port_mode = NETXEN_PORT_MODE_AUTO_NEG;
+
+/* Default to restricted 1G auto-neg mode */
+static int wol_port_mode = 5;
+
+static int use_msi = 1;
+
+static int use_msi_x = 1;
+
+static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
+module_param(auto_fw_reset, int, 0644);
+MODULE_PARM_DESC(auto_fw_reset,"Auto firmware reset (0=disabled, 1=enabled");
+
+static int __devinit netxen_nic_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+static void __devexit netxen_nic_remove(struct pci_dev *pdev);
+static int netxen_nic_open(struct net_device *netdev);
+static int netxen_nic_close(struct net_device *netdev);
+static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *,
+ struct net_device *);
+static void netxen_tx_timeout(struct net_device *netdev);
+static void netxen_tx_timeout_task(struct work_struct *work);
+static void netxen_fw_poll_work(struct work_struct *work);
+static void netxen_schedule_work(struct netxen_adapter *adapter,
+ work_func_t func, int delay);
+static void netxen_cancel_fw_work(struct netxen_adapter *adapter);
+static int netxen_nic_poll(struct napi_struct *napi, int budget);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void netxen_nic_poll_controller(struct net_device *netdev);
+#endif
+
+static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
+static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
+static void netxen_create_diag_entries(struct netxen_adapter *adapter);
+static void netxen_remove_diag_entries(struct netxen_adapter *adapter);
+
+static int nx_dev_request_aer(struct netxen_adapter *adapter);
+static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter);
+static int netxen_can_start_firmware(struct netxen_adapter *adapter);
+
+static irqreturn_t netxen_intr(int irq, void *data);
+static irqreturn_t netxen_msi_intr(int irq, void *data);
+static irqreturn_t netxen_msix_intr(int irq, void *data);
+
+static void netxen_free_vlan_ip_list(struct netxen_adapter *);
+static void netxen_restore_indev_addr(struct net_device *dev, unsigned long);
+static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
+static int netxen_nic_set_mac(struct net_device *netdev, void *p);
+
+/* PCI Device ID Table */
+#define ENTRY(device) \
+ {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
+ .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
+
+static DEFINE_PCI_DEVICE_TABLE(netxen_pci_tbl) = {
+ ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
+ ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
+ ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
+ ENTRY(PCI_DEVICE_ID_NX2031_IMEZ),
+ ENTRY(PCI_DEVICE_ID_NX2031_HMEZ),
+ ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT),
+ ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2),
+ ENTRY(PCI_DEVICE_ID_NX3031),
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
+
+static uint32_t crb_cmd_producer[4] = {
+ CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1,
+ CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3
+};
+
+void
+netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
+ struct nx_host_tx_ring *tx_ring)
+{
+ NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer);
+}
+
+static uint32_t crb_cmd_consumer[4] = {
+ CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1,
+ CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3
+};
+
+static inline void
+netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
+ struct nx_host_tx_ring *tx_ring)
+{
+ NXWRIO(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer);
+}
+
+static uint32_t msi_tgt_status[8] = {
+ ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
+ ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
+ ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
+ ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
+};
+
+static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG;
+
+static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring)
+{
+ struct netxen_adapter *adapter = sds_ring->adapter;
+
+ NXWRIO(adapter, sds_ring->crb_intr_mask, 0);
+}
+
+static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring)
+{
+ struct netxen_adapter *adapter = sds_ring->adapter;
+
+ NXWRIO(adapter, sds_ring->crb_intr_mask, 0x1);
+
+ if (!NETXEN_IS_MSI_FAMILY(adapter))
+ NXWRIO(adapter, adapter->tgt_mask_reg, 0xfbff);
+}
+
+static int
+netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count)
+{
+ int size = sizeof(struct nx_host_sds_ring) * count;
+
+ recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
+
+ return recv_ctx->sds_rings == NULL;
+}
+
+static void
+netxen_free_sds_rings(struct netxen_recv_context *recv_ctx)
+{
+ if (recv_ctx->sds_rings != NULL)
+ kfree(recv_ctx->sds_rings);
+
+ recv_ctx->sds_rings = NULL;
+}
+
+static int
+netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+ return -ENOMEM;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_add(netdev, &sds_ring->napi,
+ netxen_nic_poll, NETXEN_NETDEV_WEIGHT);
+ }
+
+ return 0;
+}
+
+static void
+netxen_napi_del(struct netxen_adapter *adapter)
+{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_del(&sds_ring->napi);
+ }
+
+ netxen_free_sds_rings(&adapter->recv_ctx);
+}
+
+static void
+netxen_napi_enable(struct netxen_adapter *adapter)
+{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ napi_enable(&sds_ring->napi);
+ netxen_nic_enable_int(sds_ring);
+ }
+}
+
+static void
+netxen_napi_disable(struct netxen_adapter *adapter)
+{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netxen_nic_disable_int(sds_ring);
+ napi_synchronize(&sds_ring->napi);
+ napi_disable(&sds_ring->napi);
+ }
+}
+
+static int nx_set_dma_mask(struct netxen_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ uint64_t mask, cmask;
+
+ adapter->pci_using_dac = 0;
+
+ mask = DMA_BIT_MASK(32);
+ cmask = DMA_BIT_MASK(32);
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+#ifndef CONFIG_IA64
+ mask = DMA_BIT_MASK(35);
+#endif
+ } else {
+ mask = DMA_BIT_MASK(39);
+ cmask = mask;
+ }
+
+ if (pci_set_dma_mask(pdev, mask) == 0 &&
+ pci_set_consistent_dma_mask(pdev, cmask) == 0) {
+ adapter->pci_using_dac = 1;
+ return 0;
+ }
+
+ return -EIO;
+}
+
+/* Update addressable range if firmware supports it */
+static int
+nx_update_dma_mask(struct netxen_adapter *adapter)
+{
+ int change, shift, err;
+ uint64_t mask, old_mask, old_cmask;
+ struct pci_dev *pdev = adapter->pdev;
+
+ change = 0;
+
+ shift = NXRD32(adapter, CRB_DMA_SHIFT);
+ if (shift > 32)
+ return 0;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9))
+ change = 1;
+ else if ((adapter->ahw.revision_id == NX_P2_C1) && (shift <= 4))
+ change = 1;
+
+ if (change) {
+ old_mask = pdev->dma_mask;
+ old_cmask = pdev->dev.coherent_dma_mask;
+
+ mask = DMA_BIT_MASK(32+shift);
+
+ err = pci_set_dma_mask(pdev, mask);
+ if (err)
+ goto err_out;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+
+ err = pci_set_consistent_dma_mask(pdev, mask);
+ if (err)
+ goto err_out;
+ }
+ dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
+ }
+
+ return 0;
+
+err_out:
+ pci_set_dma_mask(pdev, old_mask);
+ pci_set_consistent_dma_mask(pdev, old_cmask);
+ return err;
+}
+
+static int
+netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
+{
+ u32 val, timeout;
+
+ if (first_boot == 0x55555555) {
+ /* This is the first boot after power up */
+ NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
+
+ if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 0;
+
+ /* PCI bus master workaround */
+ first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
+ if (!(first_boot & 0x4)) {
+ first_boot |= 0x4;
+ NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot);
+ NXRD32(adapter, NETXEN_PCIE_REG(0x4));
+ }
+
+ /* This is the first boot after power up */
+ first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
+ if (first_boot != 0x80000f) {
+ /* clear the register for future unloads/loads */
+ NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0);
+ return -EIO;
+ }
+
+ /* Start P2 boot loader */
+ val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1);
+ timeout = 0;
+ do {
+ msleep(1);
+ val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
+
+ if (++timeout > 5000)
+ return -EIO;
+
+ } while (val == NETXEN_BDINFO_MAGIC);
+ }
+ return 0;
+}
+
+static void netxen_set_port_mode(struct netxen_adapter *adapter)
+{
+ u32 val, data;
+
+ val = adapter->ahw.board_type;
+ if ((val == NETXEN_BRDTYPE_P3_HMEZ) ||
+ (val == NETXEN_BRDTYPE_P3_XG_LOM)) {
+ if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
+ data = NETXEN_PORT_MODE_802_3_AP;
+ NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
+ } else if (port_mode == NETXEN_PORT_MODE_XG) {
+ data = NETXEN_PORT_MODE_XG;
+ NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
+ } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) {
+ data = NETXEN_PORT_MODE_AUTO_NEG_1G;
+ NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
+ } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) {
+ data = NETXEN_PORT_MODE_AUTO_NEG_XG;
+ NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
+ } else {
+ data = NETXEN_PORT_MODE_AUTO_NEG;
+ NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
+ }
+
+ if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) &&
+ (wol_port_mode != NETXEN_PORT_MODE_XG) &&
+ (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) &&
+ (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) {
+ wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG;
+ }
+ NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode);
+ }
+}
+
+#define PCI_CAP_ID_GEN 0x10
+
+static void netxen_pcie_strap_init(struct netxen_adapter *adapter)
+{
+ u32 pdevfuncsave;
+ u32 c8c9value = 0;
+ u32 chicken = 0;
+ u32 control = 0;
+ int i, pos;
+ struct pci_dev *pdev;
+
+ pdev = adapter->pdev;
+
+ chicken = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3));
+ /* clear chicken3.25:24 */
+ chicken &= 0xFCFFFFFF;
+ /*
+ * if gen1 and B0, set F1020 - if gen 2, do nothing
+ * if gen2 set to F1000
+ */
+ pos = pci_find_capability(pdev, PCI_CAP_ID_GEN);
+ if (pos == 0xC0) {
+ pci_read_config_dword(pdev, pos + 0x10, &control);
+ if ((control & 0x000F0000) != 0x00020000) {
+ /* set chicken3.24 if gen1 */
+ chicken |= 0x01000000;
+ }
+ dev_info(&adapter->pdev->dev, "Gen2 strapping detected\n");
+ c8c9value = 0xF1000;
+ } else {
+ /* set chicken3.24 if gen1 */
+ chicken |= 0x01000000;
+ dev_info(&adapter->pdev->dev, "Gen1 strapping detected\n");
+ if (adapter->ahw.revision_id == NX_P3_B0)
+ c8c9value = 0xF1020;
+ else
+ c8c9value = 0;
+ }
+
+ NXWR32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3), chicken);
+
+ if (!c8c9value)
+ return;
+
+ pdevfuncsave = pdev->devfn;
+ if (pdevfuncsave & 0x07)
+ return;
+
+ for (i = 0; i < 8; i++) {
+ pci_read_config_dword(pdev, pos + 8, &control);
+ pci_read_config_dword(pdev, pos + 8, &control);
+ pci_write_config_dword(pdev, pos + 8, c8c9value);
+ pdev->devfn++;
+ }
+ pdev->devfn = pdevfuncsave;
+}
+
+static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
+{
+ u32 control;
+ int pos;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_dword(pdev, pos, &control);
+ if (enable)
+ control |= PCI_MSIX_FLAGS_ENABLE;
+ else
+ control = 0;
+ pci_write_config_dword(pdev, pos, control);
+ }
+}
+
+static void netxen_init_msix_entries(struct netxen_adapter *adapter, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ adapter->msix_entries[i].entry = i;
+}
+
+static int
+netxen_read_mac_addr(struct netxen_adapter *adapter)
+{
+ int i;
+ unsigned char *p;
+ u64 mac_addr;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0)
+ return -EIO;
+ } else {
+ if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0)
+ return -EIO;
+ }
+
+ p = (unsigned char *)&mac_addr;
+ for (i = 0; i < 6; i++)
+ netdev->dev_addr[i] = *(p + 5 - i);
+
+ memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
+ memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
+
+ /* set station address */
+
+ if (!is_valid_ether_addr(netdev->perm_addr))
+ dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
+
+ return 0;
+}
+
+static int netxen_nic_set_mac(struct net_device *netdev, void *p)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ if (netif_running(netdev)) {
+ netif_device_detach(netdev);
+ netxen_napi_disable(adapter);
+ }
+
+ memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ adapter->macaddr_set(adapter, addr->sa_data);
+
+ if (netif_running(netdev)) {
+ netif_device_attach(netdev);
+ netxen_napi_enable(adapter);
+ }
+ return 0;
+}
+
+static void netxen_set_multicast_list(struct net_device *dev)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+
+ adapter->set_multi(dev);
+}
+
+static u32 netxen_fix_features(struct net_device *dev, u32 features)
+{
+ if (!(features & NETIF_F_RXCSUM)) {
+ netdev_info(dev, "disabling LRO as RXCSUM is off\n");
+
+ features &= ~NETIF_F_LRO;
+ }
+
+ return features;
+}
+
+static int netxen_set_features(struct net_device *dev, u32 features)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ int hw_lro;
+
+ if (!((dev->features ^ features) & NETIF_F_LRO))
+ return 0;
+
+ hw_lro = (features & NETIF_F_LRO) ? NETXEN_NIC_LRO_ENABLED
+ : NETXEN_NIC_LRO_DISABLED;
+
+ if (netxen_config_hw_lro(adapter, hw_lro))
+ return -EIO;
+
+ if (!(features & NETIF_F_LRO) && netxen_send_lro_cleanup(adapter))
+ return -EIO;
+
+ return 0;
+}
+
+static const struct net_device_ops netxen_netdev_ops = {
+ .ndo_open = netxen_nic_open,
+ .ndo_stop = netxen_nic_close,
+ .ndo_start_xmit = netxen_nic_xmit_frame,
+ .ndo_get_stats64 = netxen_nic_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = netxen_set_multicast_list,
+ .ndo_set_mac_address = netxen_nic_set_mac,
+ .ndo_change_mtu = netxen_nic_change_mtu,
+ .ndo_tx_timeout = netxen_tx_timeout,
+ .ndo_fix_features = netxen_fix_features,
+ .ndo_set_features = netxen_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = netxen_nic_poll_controller,
+#endif
+};
+
+static void
+netxen_setup_intr(struct netxen_adapter *adapter)
+{
+ struct netxen_legacy_intr_set *legacy_intrp;
+ struct pci_dev *pdev = adapter->pdev;
+ int err, num_msix;
+
+ if (adapter->rss_supported) {
+ num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
+ MSIX_ENTRIES_PER_ADAPTER : 2;
+ } else
+ num_msix = 1;
+
+ adapter->max_sds_rings = 1;
+
+ adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
+
+ if (adapter->ahw.revision_id >= NX_P3_B0)
+ legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
+ else
+ legacy_intrp = &legacy_intr[0];
+
+ adapter->int_vec_bit = legacy_intrp->int_vec_bit;
+ adapter->tgt_status_reg = netxen_get_ioaddr(adapter,
+ legacy_intrp->tgt_status_reg);
+ adapter->tgt_mask_reg = netxen_get_ioaddr(adapter,
+ legacy_intrp->tgt_mask_reg);
+ adapter->pci_int_reg = netxen_get_ioaddr(adapter,
+ legacy_intrp->pci_int_reg);
+ adapter->isr_int_vec = netxen_get_ioaddr(adapter, ISR_INT_VECTOR);
+
+ if (adapter->ahw.revision_id >= NX_P3_B1)
+ adapter->crb_int_state_reg = netxen_get_ioaddr(adapter,
+ ISR_INT_STATE_REG);
+ else
+ adapter->crb_int_state_reg = netxen_get_ioaddr(adapter,
+ CRB_INT_VECTOR);
+
+ netxen_set_msix_bit(pdev, 0);
+
+ if (adapter->msix_supported) {
+
+ netxen_init_msix_entries(adapter, num_msix);
+ err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
+ if (err == 0) {
+ adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
+ netxen_set_msix_bit(pdev, 1);
+
+ if (adapter->rss_supported)
+ adapter->max_sds_rings = num_msix;
+
+ dev_info(&pdev->dev, "using msi-x interrupts\n");
+ return;
+ }
+
+ if (err > 0)
+ pci_disable_msix(pdev);
+
+ /* fall through for msi */
+ }
+
+ if (use_msi && !pci_enable_msi(pdev)) {
+ adapter->flags |= NETXEN_NIC_MSI_ENABLED;
+ adapter->tgt_status_reg = netxen_get_ioaddr(adapter,
+ msi_tgt_status[adapter->ahw.pci_func]);
+ dev_info(&pdev->dev, "using msi interrupts\n");
+ adapter->msix_entries[0].vector = pdev->irq;
+ return;
+ }
+
+ dev_info(&pdev->dev, "using legacy interrupts\n");
+ adapter->msix_entries[0].vector = pdev->irq;
+}
+
+static void
+netxen_teardown_intr(struct netxen_adapter *adapter)
+{
+ if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
+ pci_disable_msix(adapter->pdev);
+ if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
+ pci_disable_msi(adapter->pdev);
+}
+
+static void
+netxen_cleanup_pci_map(struct netxen_adapter *adapter)
+{
+ if (adapter->ahw.db_base != NULL)
+ iounmap(adapter->ahw.db_base);
+ if (adapter->ahw.pci_base0 != NULL)
+ iounmap(adapter->ahw.pci_base0);
+ if (adapter->ahw.pci_base1 != NULL)
+ iounmap(adapter->ahw.pci_base1);
+ if (adapter->ahw.pci_base2 != NULL)
+ iounmap(adapter->ahw.pci_base2);
+}
+
+static int
+netxen_setup_pci_map(struct netxen_adapter *adapter)
+{
+ void __iomem *db_ptr = NULL;
+
+ resource_size_t mem_base, db_base;
+ unsigned long mem_len, db_len = 0;
+
+ struct pci_dev *pdev = adapter->pdev;
+ int pci_func = adapter->ahw.pci_func;
+ struct netxen_hardware_context *ahw = &adapter->ahw;
+
+ int err = 0;
+
+ /*
+ * Set the CRB window to invalid. If any register in window 0 is
+ * accessed it should set the window to 0 and then reset it to 1.
+ */
+ adapter->ahw.crb_win = -1;
+ adapter->ahw.ocm_win = -1;
+
+ /* remap phys address */
+ mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
+ mem_len = pci_resource_len(pdev, 0);
+
+ /* 128 Meg of memory */
+ if (mem_len == NETXEN_PCI_128MB_SIZE) {
+
+ ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
+ ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
+ SECOND_PAGE_GROUP_SIZE);
+ ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
+ THIRD_PAGE_GROUP_SIZE);
+ if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL ||
+ ahw->pci_base2 == NULL) {
+ dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+ err = -EIO;
+ goto err_out;
+ }
+
+ ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE;
+
+ } else if (mem_len == NETXEN_PCI_32MB_SIZE) {
+
+ ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
+ ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
+ SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
+ if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) {
+ dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+ err = -EIO;
+ goto err_out;
+ }
+
+ } else if (mem_len == NETXEN_PCI_2MB_SIZE) {
+
+ ahw->pci_base0 = pci_ioremap_bar(pdev, 0);
+ if (ahw->pci_base0 == NULL) {
+ dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+ return -EIO;
+ }
+ ahw->pci_len0 = mem_len;
+ } else {
+ return -EIO;
+ }
+
+ netxen_setup_hwops(adapter);
+
+ dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
+
+ if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
+ adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
+ NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
+
+ } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
+ NETXEN_PCIX_PS_REG(PCIE_MN_WINDOW_REG(pci_func)));
+ }
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ goto skip_doorbell;
+
+ db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
+ db_len = pci_resource_len(pdev, 4);
+
+ if (db_len == 0) {
+ printk(KERN_ERR "%s: doorbell is disabled\n",
+ netxen_nic_driver_name);
+ err = -EIO;
+ goto err_out;
+ }
+
+ db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES);
+ if (!db_ptr) {
+ printk(KERN_ERR "%s: Failed to allocate doorbell map.",
+ netxen_nic_driver_name);
+ err = -EIO;
+ goto err_out;
+ }
+
+skip_doorbell:
+ adapter->ahw.db_base = db_ptr;
+ adapter->ahw.db_len = db_len;
+ return 0;
+
+err_out:
+ netxen_cleanup_pci_map(adapter);
+ return err;
+}
+
+static void
+netxen_check_options(struct netxen_adapter *adapter)
+{
+ u32 fw_major, fw_minor, fw_build;
+ char brd_name[NETXEN_MAX_SHORT_NAME];
+ char serial_num[32];
+ int i, offset, val;
+ int *ptr32;
+ struct pci_dev *pdev = adapter->pdev;
+
+ adapter->driver_mismatch = 0;
+
+ ptr32 = (int *)&serial_num;
+ offset = NX_FW_SERIAL_NUM_OFFSET;
+ for (i = 0; i < 8; i++) {
+ if (netxen_rom_fast_read(adapter, offset, &val) == -1) {
+ dev_err(&pdev->dev, "error reading board info\n");
+ adapter->driver_mismatch = 1;
+ return;
+ }
+ ptr32[i] = cpu_to_le32(val);
+ offset += sizeof(u32);
+ }
+
+ fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
+ fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
+ fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
+
+ adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build);
+
+ if (adapter->portnum == 0) {
+ get_brd_name_by_type(adapter->ahw.board_type, brd_name);
+
+ pr_info("%s: %s Board S/N %s Chip rev 0x%x\n",
+ module_name(THIS_MODULE),
+ brd_name, serial_num, adapter->ahw.revision_id);
+ }
+
+ if (adapter->fw_version < NETXEN_VERSION_CODE(3, 4, 216)) {
+ adapter->driver_mismatch = 1;
+ dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
+ fw_major, fw_minor, fw_build);
+ return;
+ }
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ i = NXRD32(adapter, NETXEN_SRE_MISC);
+ adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
+ }
+
+ dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n",
+ fw_major, fw_minor, fw_build,
+ adapter->ahw.cut_through ? "cut-through" : "legacy");
+
+ if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222))
+ adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
+
+ if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ } else if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ }
+
+ adapter->msix_supported = 0;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ adapter->msix_supported = !!use_msi_x;
+ adapter->rss_supported = !!use_msi_x;
+ } else {
+ u32 flashed_ver = 0;
+ netxen_rom_fast_read(adapter,
+ NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
+ flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
+
+ if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) {
+ switch (adapter->ahw.board_type) {
+ case NETXEN_BRDTYPE_P2_SB31_10G:
+ case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+ adapter->msix_supported = !!use_msi_x;
+ adapter->rss_supported = !!use_msi_x;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS;
+ adapter->max_rds_rings = 3;
+ } else {
+ adapter->num_lro_rxd = 0;
+ adapter->max_rds_rings = 2;
+ }
+}
+
+static int
+netxen_start_firmware(struct netxen_adapter *adapter)
+{
+ int val, err, first_boot;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* required for NX2031 dummy dma */
+ err = nx_set_dma_mask(adapter);
+ if (err)
+ return err;
+
+ if (!netxen_can_start_firmware(adapter))
+ goto wait_init;
+
+ first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
+
+ err = netxen_check_hw_init(adapter, first_boot);
+ if (err) {
+ dev_err(&pdev->dev, "error in init HW init sequence\n");
+ return err;
+ }
+
+ netxen_request_firmware(adapter);
+
+ err = netxen_need_fw_reset(adapter);
+ if (err < 0)
+ goto err_out;
+ if (err == 0)
+ goto pcie_strap_init;
+
+ if (first_boot != 0x55555555) {
+ NXWR32(adapter, CRB_CMDPEG_STATE, 0);
+ netxen_pinit_from_rom(adapter);
+ msleep(1);
+ }
+
+ NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
+ NXWR32(adapter, NETXEN_PEG_HALT_STATUS1, 0);
+ NXWR32(adapter, NETXEN_PEG_HALT_STATUS2, 0);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netxen_set_port_mode(adapter);
+
+ err = netxen_load_firmware(adapter);
+ if (err)
+ goto err_out;
+
+ netxen_release_firmware(adapter);
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+
+ /* Initialize multicast addr pool owners */
+ val = 0x7654;
+ if (adapter->ahw.port_type == NETXEN_NIC_XGBE)
+ val |= 0x0f000000;
+ NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
+
+ }
+
+ err = netxen_init_dummy_dma(adapter);
+ if (err)
+ goto err_out;
+
+ /*
+ * Tell the hardware our version number.
+ */
+ val = (_NETXEN_NIC_LINUX_MAJOR << 16)
+ | ((_NETXEN_NIC_LINUX_MINOR << 8))
+ | (_NETXEN_NIC_LINUX_SUBVERSION);
+ NXWR32(adapter, CRB_DRIVER_VERSION, val);
+
+pcie_strap_init:
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netxen_pcie_strap_init(adapter);
+
+wait_init:
+ /* Handshake with the card before we register the devices. */
+ err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
+ if (err) {
+ netxen_free_dummy_dma(adapter);
+ goto err_out;
+ }
+
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_READY);
+
+ nx_update_dma_mask(adapter);
+
+ netxen_check_options(adapter);
+
+ adapter->need_fw_reset = 0;
+
+ /* fall through and release firmware */
+
+err_out:
+ netxen_release_firmware(adapter);
+ return err;
+}
+
+static int
+netxen_nic_request_irq(struct netxen_adapter *adapter)
+{
+ irq_handler_t handler;
+ struct nx_host_sds_ring *sds_ring;
+ int err, ring;
+
+ unsigned long flags = 0;
+ struct net_device *netdev = adapter->netdev;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
+ handler = netxen_msix_intr;
+ else if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
+ handler = netxen_msi_intr;
+ else {
+ flags |= IRQF_SHARED;
+ handler = netxen_intr;
+ }
+ adapter->irq = netdev->irq;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
+ err = request_irq(sds_ring->irq, handler,
+ flags, sds_ring->name, sds_ring);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+netxen_nic_free_irq(struct netxen_adapter *adapter)
+{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
+
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ free_irq(sds_ring->irq, sds_ring);
+ }
+}
+
+static void
+netxen_nic_init_coalesce_defaults(struct netxen_adapter *adapter)
+{
+ adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT;
+ adapter->coal.normal.data.rx_time_us =
+ NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US;
+ adapter->coal.normal.data.rx_packets =
+ NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS;
+ adapter->coal.normal.data.tx_time_us =
+ NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US;
+ adapter->coal.normal.data.tx_packets =
+ NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS;
+}
+
+/* with rtnl_lock */
+static int
+__netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+ int err;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return -EIO;
+
+ err = adapter->init_port(adapter, adapter->physical_port);
+ if (err) {
+ printk(KERN_ERR "%s: Failed to initialize port %d\n",
+ netxen_nic_driver_name, adapter->portnum);
+ return err;
+ }
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ adapter->macaddr_set(adapter, adapter->mac_addr);
+
+ adapter->set_multi(netdev);
+ adapter->set_mtu(adapter, netdev->mtu);
+
+ adapter->ahw.linkup = 0;
+
+ if (adapter->max_sds_rings > 1)
+ netxen_config_rss(adapter, 1);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netxen_config_intr_coalesce(adapter);
+
+ if (netdev->features & NETIF_F_LRO)
+ netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_ENABLED);
+
+ netxen_napi_enable(adapter);
+
+ if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
+ netxen_linkevent_request(adapter, 1);
+ else
+ netxen_nic_set_link_parameters(adapter);
+
+ set_bit(__NX_DEV_UP, &adapter->state);
+ return 0;
+}
+
+/* Usage: During resume and firmware recovery module.*/
+
+static inline int
+netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+ int err = 0;
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ err = __netxen_nic_up(adapter, netdev);
+ rtnl_unlock();
+
+ return err;
+}
+
+/* with rtnl_lock */
+static void
+__netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return;
+
+ if (!test_and_clear_bit(__NX_DEV_UP, &adapter->state))
+ return;
+
+ smp_mb();
+ spin_lock(&adapter->tx_clean_lock);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
+ netxen_linkevent_request(adapter, 0);
+
+ if (adapter->stop_port)
+ adapter->stop_port(adapter);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netxen_p3_free_mac_list(adapter);
+
+ adapter->set_promisc(adapter, NETXEN_NIU_NON_PROMISC_MODE);
+
+ netxen_napi_disable(adapter);
+
+ netxen_release_tx_buffers(adapter);
+ spin_unlock(&adapter->tx_clean_lock);
+}
+
+/* Usage: During suspend and firmware recovery module */
+
+static inline void
+netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+ rtnl_lock();
+ if (netif_running(netdev))
+ __netxen_nic_down(adapter, netdev);
+ rtnl_unlock();
+
+}
+
+static int
+netxen_nic_attach(struct netxen_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err, ring;
+ struct nx_host_rds_ring *rds_ring;
+ struct nx_host_tx_ring *tx_ring;
+
+ if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
+ return 0;
+
+ err = netxen_init_firmware(adapter);
+ if (err)
+ return err;
+
+ err = netxen_napi_add(adapter, netdev);
+ if (err)
+ return err;
+
+ err = netxen_alloc_sw_resources(adapter);
+ if (err) {
+ printk(KERN_ERR "%s: Error in setting sw resources\n",
+ netdev->name);
+ return err;
+ }
+
+ err = netxen_alloc_hw_resources(adapter);
+ if (err) {
+ printk(KERN_ERR "%s: Error in setting hw resources\n",
+ netdev->name);
+ goto err_out_free_sw;
+ }
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ tx_ring = adapter->tx_ring;
+ tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter,
+ crb_cmd_producer[adapter->portnum]);
+ tx_ring->crb_cmd_consumer = netxen_get_ioaddr(adapter,
+ crb_cmd_consumer[adapter->portnum]);
+
+ tx_ring->producer = 0;
+ tx_ring->sw_consumer = 0;
+
+ netxen_nic_update_cmd_producer(adapter, tx_ring);
+ netxen_nic_update_cmd_consumer(adapter, tx_ring);
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx.rds_rings[ring];
+ netxen_post_rx_buffers(adapter, ring, rds_ring);
+ }
+
+ err = netxen_nic_request_irq(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "%s: failed to setup interrupt\n",
+ netdev->name);
+ goto err_out_free_rxbuf;
+ }
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netxen_nic_init_coalesce_defaults(adapter);
+
+ netxen_create_sysfs_entries(adapter);
+
+ adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
+ return 0;
+
+err_out_free_rxbuf:
+ netxen_release_rx_buffers(adapter);
+ netxen_free_hw_resources(adapter);
+err_out_free_sw:
+ netxen_free_sw_resources(adapter);
+ return err;
+}
+
+static void
+netxen_nic_detach(struct netxen_adapter *adapter)
+{
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return;
+
+ netxen_remove_sysfs_entries(adapter);
+
+ netxen_free_hw_resources(adapter);
+ netxen_release_rx_buffers(adapter);
+ netxen_nic_free_irq(adapter);
+ netxen_napi_del(adapter);
+ netxen_free_sw_resources(adapter);
+
+ adapter->is_up = 0;
+}
+
+int
+netxen_nic_reset_context(struct netxen_adapter *adapter)
+{
+ int err = 0;
+ struct net_device *netdev = adapter->netdev;
+
+ if (test_and_set_bit(__NX_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __netxen_nic_down(adapter, netdev);
+
+ netxen_nic_detach(adapter);
+
+ if (netif_running(netdev)) {
+ err = netxen_nic_attach(adapter);
+ if (!err)
+ err = __netxen_nic_up(adapter, netdev);
+
+ if (err)
+ goto done;
+ }
+
+ netif_device_attach(netdev);
+ }
+
+done:
+ clear_bit(__NX_RESETTING, &adapter->state);
+ return err;
+}
+
+static int
+netxen_setup_netdev(struct netxen_adapter *adapter,
+ struct net_device *netdev)
+{
+ int err = 0;
+ struct pci_dev *pdev = adapter->pdev;
+
+ adapter->mc_enabled = 0;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ adapter->max_mc_count = 38;
+ else
+ adapter->max_mc_count = 16;
+
+ netdev->netdev_ops = &netxen_netdev_ops;
+ netdev->watchdog_timeo = 5*HZ;
+
+ netxen_nic_change_mtu(netdev, netdev->mtu);
+
+ SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
+
+ netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_RXCSUM;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netdev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+
+ netdev->vlan_features |= netdev->hw_features;
+
+ if (adapter->pci_using_dac) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
+
+ if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX)
+ netdev->hw_features |= NETIF_F_HW_VLAN_TX;
+
+ if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)
+ netdev->hw_features |= NETIF_F_LRO;
+
+ netdev->features |= netdev->hw_features;
+
+ netdev->irq = adapter->msix_entries[0].vector;
+
+ INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
+
+ if (netxen_read_mac_addr(adapter))
+ dev_warn(&pdev->dev, "failed to read mac addr\n");
+
+ netif_carrier_off(netdev);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register net device\n");
+ return err;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PCIEAER
+static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pci_dev *root = pdev->bus->self;
+ u32 aer_pos;
+
+ if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM &&
+ adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP)
+ return;
+
+ if (root->pcie_type != PCI_EXP_TYPE_ROOT_PORT)
+ return;
+
+ aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR);
+ if (!aer_pos)
+ return;
+
+ pci_write_config_dword(root, aer_pos + PCI_ERR_COR_MASK, 0xffff);
+}
+#endif
+
+static int __devinit
+netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev = NULL;
+ struct netxen_adapter *adapter = NULL;
+ int i = 0, err;
+ int pci_func_id = PCI_FUNC(pdev->devfn);
+ uint8_t revision_id;
+ u32 val;
+
+ if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) {
+ pr_warning("%s: chip revisions between 0x%x-0x%x "
+ "will not be enabled.\n",
+ module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1);
+ return -ENODEV;
+ }
+
+ if ((err = pci_enable_device(pdev)))
+ return err;
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ err = -ENODEV;
+ goto err_out_disable_pdev;
+ }
+
+ if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
+ goto err_out_disable_pdev;
+
+ if (NX_IS_REVISION_P3(pdev->revision))
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev(sizeof(struct netxen_adapter));
+ if(!netdev) {
+ dev_err(&pdev->dev, "failed to allocate net_device\n");
+ err = -ENOMEM;
+ goto err_out_free_res;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->ahw.pci_func = pci_func_id;
+
+ revision_id = pdev->revision;
+ adapter->ahw.revision_id = revision_id;
+
+ rwlock_init(&adapter->ahw.crb_lock);
+ spin_lock_init(&adapter->ahw.mem_lock);
+
+ spin_lock_init(&adapter->tx_clean_lock);
+ INIT_LIST_HEAD(&adapter->mac_list);
+ INIT_LIST_HEAD(&adapter->vlan_ip_list);
+
+ err = netxen_setup_pci_map(adapter);
+ if (err)
+ goto err_out_free_netdev;
+
+ /* This will be reset for mezz cards */
+ adapter->portnum = pci_func_id;
+
+ err = netxen_nic_get_board_info(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error getting board config info.\n");
+ goto err_out_iounmap;
+ }
+
+#ifdef CONFIG_PCIEAER
+ netxen_mask_aer_correctable(adapter);
+#endif
+
+ /* Mezz cards have PCI function 0,2,3 enabled */
+ switch (adapter->ahw.board_type) {
+ case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
+ case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
+ if (pci_func_id >= 2)
+ adapter->portnum = pci_func_id - 2;
+ break;
+ default:
+ break;
+ }
+
+ err = netxen_check_flash_fw_compatibility(adapter);
+ if (err)
+ goto err_out_iounmap;
+
+ if (adapter->portnum == 0) {
+ val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+ if (val != 0xffffffff && val != 0) {
+ NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0);
+ adapter->need_fw_reset = 1;
+ }
+ }
+
+ err = netxen_start_firmware(adapter);
+ if (err)
+ goto err_out_decr_ref;
+
+ /*
+ * See if the firmware gave us a virtual-physical port mapping.
+ */
+ adapter->physical_port = adapter->portnum;
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ i = NXRD32(adapter, CRB_V2P(adapter->portnum));
+ if (i != 0x55555555)
+ adapter->physical_port = i;
+ }
+
+ netxen_nic_clear_stats(adapter);
+
+ netxen_setup_intr(adapter);
+
+ err = netxen_setup_netdev(adapter, netdev);
+ if (err)
+ goto err_out_disable_msi;
+
+ pci_set_drvdata(pdev, adapter);
+
+ netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
+
+ switch (adapter->ahw.port_type) {
+ case NETXEN_NIC_GBE:
+ dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
+ adapter->netdev->name);
+ break;
+ case NETXEN_NIC_XGBE:
+ dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
+ adapter->netdev->name);
+ break;
+ }
+
+ netxen_create_diag_entries(adapter);
+
+ return 0;
+
+err_out_disable_msi:
+ netxen_teardown_intr(adapter);
+
+ netxen_free_dummy_dma(adapter);
+
+err_out_decr_ref:
+ nx_decr_dev_ref_cnt(adapter);
+
+err_out_iounmap:
+ netxen_cleanup_pci_map(adapter);
+
+err_out_free_netdev:
+ free_netdev(netdev);
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_out_disable_pdev:
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void __devexit netxen_nic_remove(struct pci_dev *pdev)
+{
+ struct netxen_adapter *adapter;
+ struct net_device *netdev;
+
+ adapter = pci_get_drvdata(pdev);
+ if (adapter == NULL)
+ return;
+
+ netdev = adapter->netdev;
+
+ netxen_cancel_fw_work(adapter);
+
+ unregister_netdev(netdev);
+
+ cancel_work_sync(&adapter->tx_timeout_task);
+
+ netxen_free_vlan_ip_list(adapter);
+ netxen_nic_detach(adapter);
+
+ nx_decr_dev_ref_cnt(adapter);
+
+ if (adapter->portnum == 0)
+ netxen_free_dummy_dma(adapter);
+
+ clear_bit(__NX_RESETTING, &adapter->state);
+
+ netxen_teardown_intr(adapter);
+
+ netxen_remove_diag_entries(adapter);
+
+ netxen_cleanup_pci_map(adapter);
+
+ netxen_release_firmware(adapter);
+
+ if (NX_IS_REVISION_P3(pdev->revision))
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ free_netdev(netdev);
+}
+
+static void netxen_nic_detach_func(struct netxen_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+
+ netxen_cancel_fw_work(adapter);
+
+ if (netif_running(netdev))
+ netxen_nic_down(adapter, netdev);
+
+ cancel_work_sync(&adapter->tx_timeout_task);
+
+ netxen_nic_detach(adapter);
+
+ if (adapter->portnum == 0)
+ netxen_free_dummy_dma(adapter);
+
+ nx_decr_dev_ref_cnt(adapter);
+
+ clear_bit(__NX_RESETTING, &adapter->state);
+}
+
+static int netxen_nic_attach_func(struct pci_dev *pdev)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ adapter->ahw.crb_win = -1;
+ adapter->ahw.ocm_win = -1;
+
+ err = netxen_start_firmware(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to start firmware\n");
+ return err;
+ }
+
+ if (netif_running(netdev)) {
+ err = netxen_nic_attach(adapter);
+ if (err)
+ goto err_out;
+
+ err = netxen_nic_up(adapter, netdev);
+ if (err)
+ goto err_out_detach;
+
+ netxen_restore_indev_addr(netdev, NETDEV_UP);
+ }
+
+ netif_device_attach(netdev);
+ netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
+ return 0;
+
+err_out_detach:
+ netxen_nic_detach(adapter);
+err_out:
+ nx_decr_dev_ref_cnt(adapter);
+ return err;
+}
+
+static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (nx_dev_request_aer(adapter))
+ return PCI_ERS_RESULT_RECOVERED;
+
+ netxen_nic_detach_func(adapter);
+
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev)
+{
+ int err = 0;
+
+ err = netxen_nic_attach_func(pdev);
+
+ return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
+
+static void netxen_io_resume(struct pci_dev *pdev)
+{
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+}
+
+static void netxen_nic_shutdown(struct pci_dev *pdev)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+
+ netxen_nic_detach_func(adapter);
+
+ if (pci_save_state(pdev))
+ return;
+
+ if (netxen_nic_wol_supported(adapter)) {
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
+
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int
+netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+ int retval;
+
+ netxen_nic_detach_func(adapter);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ if (netxen_nic_wol_supported(adapter)) {
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int
+netxen_nic_resume(struct pci_dev *pdev)
+{
+ return netxen_nic_attach_func(pdev);
+}
+#endif
+
+static int netxen_nic_open(struct net_device *netdev)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
+
+ if (adapter->driver_mismatch)
+ return -EIO;
+
+ err = netxen_nic_attach(adapter);
+ if (err)
+ return err;
+
+ err = __netxen_nic_up(adapter, netdev);
+ if (err)
+ goto err_out;
+
+ netif_start_queue(netdev);
+
+ return 0;
+
+err_out:
+ netxen_nic_detach(adapter);
+ return err;
+}
+
+/*
+ * netxen_nic_close - Disables a network interface entry point
+ */
+static int netxen_nic_close(struct net_device *netdev)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+
+ __netxen_nic_down(adapter, netdev);
+ return 0;
+}
+
+static void
+netxen_tso_check(struct net_device *netdev,
+ struct nx_host_tx_ring *tx_ring,
+ struct cmd_desc_type0 *first_desc,
+ struct sk_buff *skb)
+{
+ u8 opcode = TX_ETHER_PKT;
+ __be16 protocol = skb->protocol;
+ u16 flags = 0, vid = 0;
+ u32 producer;
+ int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
+ struct cmd_desc_type0 *hwdesc;
+ struct vlan_ethhdr *vh;
+
+ if (protocol == cpu_to_be16(ETH_P_8021Q)) {
+
+ vh = (struct vlan_ethhdr *)skb->data;
+ protocol = vh->h_vlan_encapsulated_proto;
+ flags = FLAGS_VLAN_TAGGED;
+
+ } else if (vlan_tx_tag_present(skb)) {
+
+ flags = FLAGS_VLAN_OOB;
+ vid = vlan_tx_tag_get(skb);
+ netxen_set_tx_vlan_tci(first_desc, vid);
+ vlan_oob = 1;
+ }
+
+ if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
+ skb_shinfo(skb)->gso_size > 0) {
+
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ first_desc->total_hdr_length = hdr_len;
+ if (vlan_oob) {
+ first_desc->total_hdr_length += VLAN_HLEN;
+ first_desc->tcp_hdr_offset = VLAN_HLEN;
+ first_desc->ip_hdr_offset = VLAN_HLEN;
+ /* Only in case of TSO on vlan device */
+ flags |= FLAGS_VLAN_TAGGED;
+ }
+
+ opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
+ TX_TCP_LSO6 : TX_TCP_LSO;
+ tso = 1;
+
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 l4proto;
+
+ if (protocol == cpu_to_be16(ETH_P_IP)) {
+ l4proto = ip_hdr(skb)->protocol;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = TX_TCP_PKT;
+ else if(l4proto == IPPROTO_UDP)
+ opcode = TX_UDP_PKT;
+ } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
+ l4proto = ipv6_hdr(skb)->nexthdr;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = TX_TCPV6_PKT;
+ else if(l4proto == IPPROTO_UDP)
+ opcode = TX_UDPV6_PKT;
+ }
+ }
+
+ first_desc->tcp_hdr_offset += skb_transport_offset(skb);
+ first_desc->ip_hdr_offset += skb_network_offset(skb);
+ netxen_set_tx_flags_opcode(first_desc, flags, opcode);
+
+ if (!tso)
+ return;
+
+ /* For LSO, we need to copy the MAC/IP/TCP headers into
+ * the descriptor ring
+ */
+ producer = tx_ring->producer;
+ copied = 0;
+ offset = 2;
+
+ if (vlan_oob) {
+ /* Create a TSO vlan header template for firmware */
+
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
+ hdr_len + VLAN_HLEN);
+
+ vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
+ skb_copy_from_linear_data(skb, vh, 12);
+ vh->h_vlan_proto = htons(ETH_P_8021Q);
+ vh->h_vlan_TCI = htons(vid);
+ skb_copy_from_linear_data_offset(skb, 12,
+ (char *)vh + 16, copy_len - 16);
+
+ copied = copy_len - VLAN_HLEN;
+ offset = 0;
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ while (copied < hdr_len) {
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
+ (hdr_len - copied));
+
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ skb_copy_from_linear_data_offset(skb, copied,
+ (char *)hwdesc + offset, copy_len);
+
+ copied += copy_len;
+ offset = 0;
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ tx_ring->producer = producer;
+ barrier();
+}
+
+static int
+netxen_map_tx_skb(struct pci_dev *pdev,
+ struct sk_buff *skb, struct netxen_cmd_buffer *pbuf)
+{
+ struct netxen_skb_frag *nf;
+ struct skb_frag_struct *frag;
+ int i, nr_frags;
+ dma_addr_t map;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ nf = &pbuf->frag_array[0];
+
+ map = pci_map_single(pdev, skb->data,
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, map))
+ goto out_err;
+
+ nf->dma = map;
+ nf->length = skb_headlen(skb);
+
+ for (i = 0; i < nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ nf = &pbuf->frag_array[i+1];
+
+ map = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, map))
+ goto unwind;
+
+ nf->dma = map;
+ nf->length = frag->size;
+ }
+
+ return 0;
+
+unwind:
+ while (--i >= 0) {
+ nf = &pbuf->frag_array[i+1];
+ pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ }
+
+ nf = &pbuf->frag_array[0];
+ pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+
+out_err:
+ return -ENOMEM;
+}
+
+static inline void
+netxen_clear_cmddesc(u64 *desc)
+{
+ desc[0] = 0ULL;
+ desc[2] = 0ULL;
+}
+
+static netdev_tx_t
+netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
+ struct netxen_cmd_buffer *pbuf;
+ struct netxen_skb_frag *buffrag;
+ struct cmd_desc_type0 *hwdesc, *first_desc;
+ struct pci_dev *pdev;
+ int i, k;
+ int delta = 0;
+ struct skb_frag_struct *frag;
+
+ u32 producer;
+ int frag_count, no_of_desc;
+ u32 num_txd = tx_ring->num_desc;
+
+ frag_count = skb_shinfo(skb)->nr_frags + 1;
+
+ /* 14 frags supported for normal packet and
+ * 32 frags supported for TSO packet
+ */
+ if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) {
+
+ for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ delta += frag->size;
+ }
+
+ if (!__pskb_pull_tail(skb, delta))
+ goto drop_packet;
+
+ frag_count = 1 + skb_shinfo(skb)->nr_frags;
+ }
+ /* 4 fragments per cmd des */
+ no_of_desc = (frag_count + 3) >> 2;
+
+ if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
+ netif_stop_queue(netdev);
+ smp_mb();
+ if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
+ netif_start_queue(netdev);
+ else
+ return NETDEV_TX_BUSY;
+ }
+
+ producer = tx_ring->producer;
+ pbuf = &tx_ring->cmd_buf_arr[producer];
+
+ pdev = adapter->pdev;
+
+ if (netxen_map_tx_skb(pdev, skb, pbuf))
+ goto drop_packet;
+
+ pbuf->skb = skb;
+ pbuf->frag_count = frag_count;
+
+ first_desc = hwdesc = &tx_ring->desc_head[producer];
+ netxen_clear_cmddesc((u64 *)hwdesc);
+
+ netxen_set_tx_frags_len(first_desc, frag_count, skb->len);
+ netxen_set_tx_port(first_desc, adapter->portnum);
+
+ for (i = 0; i < frag_count; i++) {
+
+ k = i % 4;
+
+ if ((k == 0) && (i > 0)) {
+ /* move to next desc.*/
+ producer = get_next_index(producer, num_txd);
+ hwdesc = &tx_ring->desc_head[producer];
+ netxen_clear_cmddesc((u64 *)hwdesc);
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+ }
+
+ buffrag = &pbuf->frag_array[i];
+
+ hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
+ switch (k) {
+ case 0:
+ hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
+ break;
+ case 1:
+ hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
+ break;
+ case 2:
+ hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
+ break;
+ case 3:
+ hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
+ break;
+ }
+ }
+
+ tx_ring->producer = get_next_index(producer, num_txd);
+
+ netxen_tso_check(netdev, tx_ring, first_desc, skb);
+
+ adapter->stats.txbytes += skb->len;
+ adapter->stats.xmitcalled++;
+
+ netxen_nic_update_cmd_producer(adapter, tx_ring);
+
+ return NETDEV_TX_OK;
+
+drop_packet:
+ adapter->stats.txdropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static int netxen_nic_check_temp(struct netxen_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ uint32_t temp, temp_state, temp_val;
+ int rv = 0;
+
+ temp = NXRD32(adapter, CRB_TEMP_STATE);
+
+ temp_state = nx_get_temp_state(temp);
+ temp_val = nx_get_temp_val(temp);
+
+ if (temp_state == NX_TEMP_PANIC) {
+ printk(KERN_ALERT
+ "%s: Device temperature %d degrees C exceeds"
+ " maximum allowed. Hardware has been shut down.\n",
+ netdev->name, temp_val);
+ rv = 1;
+ } else if (temp_state == NX_TEMP_WARN) {
+ if (adapter->temp == NX_TEMP_NORMAL) {
+ printk(KERN_ALERT
+ "%s: Device temperature %d degrees C "
+ "exceeds operating range."
+ " Immediate action needed.\n",
+ netdev->name, temp_val);
+ }
+ } else {
+ if (adapter->temp == NX_TEMP_WARN) {
+ printk(KERN_INFO
+ "%s: Device temperature is now %d degrees C"
+ " in normal range.\n", netdev->name,
+ temp_val);
+ }
+ }
+ adapter->temp = temp_state;
+ return rv;
+}
+
+void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (adapter->ahw.linkup && !linkup) {
+ printk(KERN_INFO "%s: %s NIC Link is down\n",
+ netxen_nic_driver_name, netdev->name);
+ adapter->ahw.linkup = 0;
+ if (netif_running(netdev)) {
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ adapter->link_changed = !adapter->has_link_events;
+ } else if (!adapter->ahw.linkup && linkup) {
+ printk(KERN_INFO "%s: %s NIC Link is up\n",
+ netxen_nic_driver_name, netdev->name);
+ adapter->ahw.linkup = 1;
+ if (netif_running(netdev)) {
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ }
+ adapter->link_changed = !adapter->has_link_events;
+ }
+}
+
+static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
+{
+ u32 val, port, linkup;
+
+ port = adapter->physical_port;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ val = NXRD32(adapter, CRB_XG_STATE_P3);
+ val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
+ linkup = (val == XG_LINK_UP_P3);
+ } else {
+ val = NXRD32(adapter, CRB_XG_STATE);
+ val = (val >> port*8) & 0xff;
+ linkup = (val == XG_LINK_UP);
+ }
+
+ netxen_advert_link_change(adapter, linkup);
+}
+
+static void netxen_tx_timeout(struct net_device *netdev)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+
+ if (test_bit(__NX_RESETTING, &adapter->state))
+ return;
+
+ dev_err(&netdev->dev, "transmit timeout, resetting.\n");
+ schedule_work(&adapter->tx_timeout_task);
+}
+
+static void netxen_tx_timeout_task(struct work_struct *work)
+{
+ struct netxen_adapter *adapter =
+ container_of(work, struct netxen_adapter, tx_timeout_task);
+
+ if (!netif_running(adapter->netdev))
+ return;
+
+ if (test_and_set_bit(__NX_RESETTING, &adapter->state))
+ return;
+
+ if (++adapter->tx_timeo_cnt >= NX_MAX_TX_TIMEOUTS)
+ goto request_reset;
+
+ rtnl_lock();
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ /* try to scrub interrupt */
+ netxen_napi_disable(adapter);
+
+ netxen_napi_enable(adapter);
+
+ netif_wake_queue(adapter->netdev);
+
+ clear_bit(__NX_RESETTING, &adapter->state);
+ } else {
+ clear_bit(__NX_RESETTING, &adapter->state);
+ if (netxen_nic_reset_context(adapter)) {
+ rtnl_unlock();
+ goto request_reset;
+ }
+ }
+ adapter->netdev->trans_start = jiffies;
+ rtnl_unlock();
+ return;
+
+request_reset:
+ adapter->need_fw_reset = 1;
+ clear_bit(__NX_RESETTING, &adapter->state);
+}
+
+static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+
+ stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
+ stats->tx_packets = adapter->stats.xmitfinished;
+ stats->rx_bytes = adapter->stats.rxbytes;
+ stats->tx_bytes = adapter->stats.txbytes;
+ stats->rx_dropped = adapter->stats.rxdropped;
+ stats->tx_dropped = adapter->stats.txdropped;
+
+ return stats;
+}
+
+static irqreturn_t netxen_intr(int irq, void *data)
+{
+ struct nx_host_sds_ring *sds_ring = data;
+ struct netxen_adapter *adapter = sds_ring->adapter;
+ u32 status = 0;
+
+ status = readl(adapter->isr_int_vec);
+
+ if (!(status & adapter->int_vec_bit))
+ return IRQ_NONE;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ /* check interrupt state machine, to be sure */
+ status = readl(adapter->crb_int_state_reg);
+ if (!ISR_LEGACY_INT_TRIGGERED(status))
+ return IRQ_NONE;
+
+ } else {
+ unsigned long our_int = 0;
+
+ our_int = readl(adapter->crb_int_state_reg);
+
+ /* not our interrupt */
+ if (!test_and_clear_bit((7 + adapter->portnum), &our_int))
+ return IRQ_NONE;
+
+ /* claim interrupt */
+ writel((our_int & 0xffffffff), adapter->crb_int_state_reg);
+
+ /* clear interrupt */
+ netxen_nic_disable_int(sds_ring);
+ }
+
+ writel(0xffffffff, adapter->tgt_status_reg);
+ /* read twice to ensure write is flushed */
+ readl(adapter->isr_int_vec);
+ readl(adapter->isr_int_vec);
+
+ napi_schedule(&sds_ring->napi);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t netxen_msi_intr(int irq, void *data)
+{
+ struct nx_host_sds_ring *sds_ring = data;
+ struct netxen_adapter *adapter = sds_ring->adapter;
+
+ /* clear interrupt */
+ writel(0xffffffff, adapter->tgt_status_reg);
+
+ napi_schedule(&sds_ring->napi);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t netxen_msix_intr(int irq, void *data)
+{
+ struct nx_host_sds_ring *sds_ring = data;
+
+ napi_schedule(&sds_ring->napi);
+ return IRQ_HANDLED;
+}
+
+static int netxen_nic_poll(struct napi_struct *napi, int budget)
+{
+ struct nx_host_sds_ring *sds_ring =
+ container_of(napi, struct nx_host_sds_ring, napi);
+
+ struct netxen_adapter *adapter = sds_ring->adapter;
+
+ int tx_complete;
+ int work_done;
+
+ tx_complete = netxen_process_cmd_ring(adapter);
+
+ work_done = netxen_process_rcv_ring(sds_ring, budget);
+
+ if ((work_done < budget) && tx_complete) {
+ napi_complete(&sds_ring->napi);
+ if (test_bit(__NX_DEV_UP, &adapter->state))
+ netxen_nic_enable_int(sds_ring);
+ }
+
+ return work_done;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void netxen_nic_poll_controller(struct net_device *netdev)
+{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ disable_irq(adapter->irq);
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netxen_intr(adapter->irq, sds_ring);
+ }
+ enable_irq(adapter->irq);
+}
+#endif
+
+static int
+nx_incr_dev_ref_cnt(struct netxen_adapter *adapter)
+{
+ int count;
+ if (netxen_api_lock(adapter))
+ return -EIO;
+
+ count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+
+ NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count);
+
+ netxen_api_unlock(adapter);
+ return count;
+}
+
+static int
+nx_decr_dev_ref_cnt(struct netxen_adapter *adapter)
+{
+ int count;
+ if (netxen_api_lock(adapter))
+ return -EIO;
+
+ count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+ WARN_ON(count == 0);
+
+ NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count);
+
+ if (count == 0)
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD);
+
+ netxen_api_unlock(adapter);
+ return count;
+}
+
+static int
+nx_dev_request_aer(struct netxen_adapter *adapter)
+{
+ u32 state;
+ int ret = -EINVAL;
+
+ if (netxen_api_lock(adapter))
+ return ret;
+
+ state = NXRD32(adapter, NX_CRB_DEV_STATE);
+
+ if (state == NX_DEV_NEED_AER)
+ ret = 0;
+ else if (state == NX_DEV_READY) {
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER);
+ ret = 0;
+ }
+
+ netxen_api_unlock(adapter);
+ return ret;
+}
+
+static int
+nx_dev_request_reset(struct netxen_adapter *adapter)
+{
+ u32 state;
+ int ret = -EINVAL;
+
+ if (netxen_api_lock(adapter))
+ return ret;
+
+ state = NXRD32(adapter, NX_CRB_DEV_STATE);
+
+ if (state == NX_DEV_NEED_RESET)
+ ret = 0;
+ else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) {
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET);
+ ret = 0;
+ }
+
+ netxen_api_unlock(adapter);
+
+ return ret;
+}
+
+static int
+netxen_can_start_firmware(struct netxen_adapter *adapter)
+{
+ int count;
+ int can_start = 0;
+
+ if (netxen_api_lock(adapter))
+ return 0;
+
+ count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+
+ if ((count < 0) || (count >= NX_MAX_PCI_FUNC))
+ count = 0;
+
+ if (count == 0) {
+ can_start = 1;
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_INITALIZING);
+ }
+
+ NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count);
+
+ netxen_api_unlock(adapter);
+
+ return can_start;
+}
+
+static void
+netxen_schedule_work(struct netxen_adapter *adapter,
+ work_func_t func, int delay)
+{
+ INIT_DELAYED_WORK(&adapter->fw_work, func);
+ schedule_delayed_work(&adapter->fw_work, delay);
+}
+
+static void
+netxen_cancel_fw_work(struct netxen_adapter *adapter)
+{
+ while (test_and_set_bit(__NX_RESETTING, &adapter->state))
+ msleep(10);
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+static void
+netxen_attach_work(struct work_struct *work)
+{
+ struct netxen_adapter *adapter = container_of(work,
+ struct netxen_adapter, fw_work.work);
+ struct net_device *netdev = adapter->netdev;
+ int err = 0;
+
+ if (netif_running(netdev)) {
+ err = netxen_nic_attach(adapter);
+ if (err)
+ goto done;
+
+ err = netxen_nic_up(adapter, netdev);
+ if (err) {
+ netxen_nic_detach(adapter);
+ goto done;
+ }
+
+ netxen_restore_indev_addr(netdev, NETDEV_UP);
+ }
+
+ netif_device_attach(netdev);
+
+done:
+ adapter->fw_fail_cnt = 0;
+ clear_bit(__NX_RESETTING, &adapter->state);
+ netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
+}
+
+static void
+netxen_fwinit_work(struct work_struct *work)
+{
+ struct netxen_adapter *adapter = container_of(work,
+ struct netxen_adapter, fw_work.work);
+ int dev_state;
+
+ dev_state = NXRD32(adapter, NX_CRB_DEV_STATE);
+
+ switch (dev_state) {
+ case NX_DEV_COLD:
+ case NX_DEV_READY:
+ if (!netxen_start_firmware(adapter)) {
+ netxen_schedule_work(adapter, netxen_attach_work, 0);
+ return;
+ }
+ break;
+
+ case NX_DEV_NEED_RESET:
+ case NX_DEV_INITALIZING:
+ if (++adapter->fw_wait_cnt < FW_POLL_THRESH) {
+ netxen_schedule_work(adapter,
+ netxen_fwinit_work, 2 * FW_POLL_DELAY);
+ return;
+ }
+
+ case NX_DEV_FAILED:
+ default:
+ nx_incr_dev_ref_cnt(adapter);
+ break;
+ }
+
+ clear_bit(__NX_RESETTING, &adapter->state);
+}
+
+static void
+netxen_detach_work(struct work_struct *work)
+{
+ struct netxen_adapter *adapter = container_of(work,
+ struct netxen_adapter, fw_work.work);
+ struct net_device *netdev = adapter->netdev;
+ int ref_cnt, delay;
+ u32 status;
+
+ netif_device_detach(netdev);
+
+ netxen_nic_down(adapter, netdev);
+
+ rtnl_lock();
+ netxen_nic_detach(adapter);
+ rtnl_unlock();
+
+ status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
+
+ if (status & NX_RCODE_FATAL_ERROR)
+ goto err_ret;
+
+ if (adapter->temp == NX_TEMP_PANIC)
+ goto err_ret;
+
+ ref_cnt = nx_decr_dev_ref_cnt(adapter);
+
+ if (ref_cnt == -EIO)
+ goto err_ret;
+
+ delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY);
+
+ adapter->fw_wait_cnt = 0;
+ netxen_schedule_work(adapter, netxen_fwinit_work, delay);
+
+ return;
+
+err_ret:
+ clear_bit(__NX_RESETTING, &adapter->state);
+}
+
+static int
+netxen_check_health(struct netxen_adapter *adapter)
+{
+ u32 state, heartbit;
+ struct net_device *netdev = adapter->netdev;
+
+ state = NXRD32(adapter, NX_CRB_DEV_STATE);
+ if (state == NX_DEV_NEED_AER)
+ return 0;
+
+ if (netxen_nic_check_temp(adapter))
+ goto detach;
+
+ if (adapter->need_fw_reset) {
+ if (nx_dev_request_reset(adapter))
+ return 0;
+ goto detach;
+ }
+
+ /* NX_DEV_NEED_RESET, this state can be marked in two cases
+ * 1. Tx timeout 2. Fw hang
+ * Send request to destroy context in case of tx timeout only
+ * and doesn't required in case of Fw hang
+ */
+ if (state == NX_DEV_NEED_RESET) {
+ adapter->need_fw_reset = 1;
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ goto detach;
+ }
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 0;
+
+ heartbit = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+ if (heartbit != adapter->heartbit) {
+ adapter->heartbit = heartbit;
+ adapter->fw_fail_cnt = 0;
+ if (adapter->need_fw_reset)
+ goto detach;
+ return 0;
+ }
+
+ if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
+ return 0;
+
+ if (nx_dev_request_reset(adapter))
+ return 0;
+
+ clear_bit(__NX_FW_ATTACHED, &adapter->state);
+
+ dev_info(&netdev->dev, "firmware hang detected\n");
+
+detach:
+ if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
+ !test_and_set_bit(__NX_RESETTING, &adapter->state))
+ netxen_schedule_work(adapter, netxen_detach_work, 0);
+ return 1;
+}
+
+static void
+netxen_fw_poll_work(struct work_struct *work)
+{
+ struct netxen_adapter *adapter = container_of(work,
+ struct netxen_adapter, fw_work.work);
+
+ if (test_bit(__NX_RESETTING, &adapter->state))
+ goto reschedule;
+
+ if (test_bit(__NX_DEV_UP, &adapter->state)) {
+ if (!adapter->has_link_events) {
+
+ netxen_nic_handle_phy_intr(adapter);
+
+ if (adapter->link_changed)
+ netxen_nic_set_link_parameters(adapter);
+ }
+ }
+
+ if (netxen_check_health(adapter))
+ return;
+
+reschedule:
+ netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
+}
+
+static ssize_t
+netxen_store_bridged_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct net_device *net = to_net_dev(dev);
+ struct netxen_adapter *adapter = netdev_priv(net);
+ unsigned long new;
+ int ret = -EINVAL;
+
+ if (!(adapter->capabilities & NX_FW_CAPABILITY_BDG))
+ goto err_out;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ goto err_out;
+
+ if (strict_strtoul(buf, 2, &new))
+ goto err_out;
+
+ if (!netxen_config_bridged_mode(adapter, !!new))
+ ret = len;
+
+err_out:
+ return ret;
+}
+
+static ssize_t
+netxen_show_bridged_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *net = to_net_dev(dev);
+ struct netxen_adapter *adapter;
+ int bridged_mode = 0;
+
+ adapter = netdev_priv(net);
+
+ if (adapter->capabilities & NX_FW_CAPABILITY_BDG)
+ bridged_mode = !!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED);
+
+ return sprintf(buf, "%d\n", bridged_mode);
+}
+
+static struct device_attribute dev_attr_bridged_mode = {
+ .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
+ .show = netxen_show_bridged_mode,
+ .store = netxen_store_bridged_mode,
+};
+
+static ssize_t
+netxen_store_diag_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct netxen_adapter *adapter = dev_get_drvdata(dev);
+ unsigned long new;
+
+ if (strict_strtoul(buf, 2, &new))
+ return -EINVAL;
+
+ if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
+ adapter->flags ^= NETXEN_NIC_DIAG_ENABLED;
+
+ return len;
+}
+
+static ssize_t
+netxen_show_diag_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct netxen_adapter *adapter = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n",
+ !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED));
+}
+
+static struct device_attribute dev_attr_diag_mode = {
+ .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
+ .show = netxen_show_diag_mode,
+ .store = netxen_store_diag_mode,
+};
+
+static int
+netxen_sysfs_validate_crb(struct netxen_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ size_t crb_size = 4;
+
+ if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
+ return -EIO;
+
+ if (offset < NETXEN_PCI_CRBSPACE) {
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return -EINVAL;
+
+ if (ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
+ NETXEN_PCI_CAMQM_2M_END))
+ crb_size = 8;
+ else
+ return -EINVAL;
+ }
+
+ if ((size != crb_size) || (offset & (crb_size-1)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t
+netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct netxen_adapter *adapter = dev_get_drvdata(dev);
+ u32 data;
+ u64 qmdata;
+ int ret;
+
+ ret = netxen_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id) &&
+ ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
+ NETXEN_PCI_CAMQM_2M_END)) {
+ netxen_pci_camqm_read_2M(adapter, offset, &qmdata);
+ memcpy(buf, &qmdata, size);
+ } else {
+ data = NXRD32(adapter, offset);
+ memcpy(buf, &data, size);
+ }
+
+ return size;
+}
+
+static ssize_t
+netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct netxen_adapter *adapter = dev_get_drvdata(dev);
+ u32 data;
+ u64 qmdata;
+ int ret;
+
+ ret = netxen_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id) &&
+ ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
+ NETXEN_PCI_CAMQM_2M_END)) {
+ memcpy(&qmdata, buf, size);
+ netxen_pci_camqm_write_2M(adapter, offset, qmdata);
+ } else {
+ memcpy(&data, buf, size);
+ NXWR32(adapter, offset, data);
+ }
+
+ return size;
+}
+
+static int
+netxen_sysfs_validate_mem(struct netxen_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
+ return -EIO;
+
+ if ((size != 8) || (offset & 0x7))
+ return -EIO;
+
+ return 0;
+}
+
+static ssize_t
+netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct netxen_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = netxen_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ if (adapter->pci_mem_read(adapter, offset, &data))
+ return -EIO;
+
+ memcpy(buf, &data, size);
+
+ return size;
+}
+
+static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct netxen_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = netxen_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ memcpy(&data, buf, size);
+
+ if (adapter->pci_mem_write(adapter, offset, data))
+ return -EIO;
+
+ return size;
+}
+
+
+static struct bin_attribute bin_attr_crb = {
+ .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = netxen_sysfs_read_crb,
+ .write = netxen_sysfs_write_crb,
+};
+
+static struct bin_attribute bin_attr_mem = {
+ .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = netxen_sysfs_read_mem,
+ .write = netxen_sysfs_write_mem,
+};
+
+
+static void
+netxen_create_sysfs_entries(struct netxen_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct device *dev = &netdev->dev;
+
+ if (adapter->capabilities & NX_FW_CAPABILITY_BDG) {
+ /* bridged_mode control */
+ if (device_create_file(dev, &dev_attr_bridged_mode)) {
+ dev_warn(&netdev->dev,
+ "failed to create bridged_mode sysfs entry\n");
+ }
+ }
+}
+
+static void
+netxen_remove_sysfs_entries(struct netxen_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct device *dev = &netdev->dev;
+
+ if (adapter->capabilities & NX_FW_CAPABILITY_BDG)
+ device_remove_file(dev, &dev_attr_bridged_mode);
+}
+
+static void
+netxen_create_diag_entries(struct netxen_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct device *dev;
+
+ dev = &pdev->dev;
+ if (device_create_file(dev, &dev_attr_diag_mode))
+ dev_info(dev, "failed to create diag_mode sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_crb))
+ dev_info(dev, "failed to create crb sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_mem))
+ dev_info(dev, "failed to create mem sysfs entry\n");
+}
+
+
+static void
+netxen_remove_diag_entries(struct netxen_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = &pdev->dev;
+
+ device_remove_file(dev, &dev_attr_diag_mode);
+ device_remove_bin_file(dev, &bin_attr_crb);
+ device_remove_bin_file(dev, &bin_attr_mem);
+}
+
+#ifdef CONFIG_INET
+
+#define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops)
+
+static int
+netxen_destip_supported(struct netxen_adapter *adapter)
+{
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 0;
+
+ if (adapter->ahw.cut_through)
+ return 0;
+
+ return 1;
+}
+
+static void
+netxen_free_vlan_ip_list(struct netxen_adapter *adapter)
+{
+ struct nx_vlan_ip_list *cur;
+ struct list_head *head = &adapter->vlan_ip_list;
+
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, struct nx_vlan_ip_list, list);
+ netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+
+}
+static void
+netxen_list_config_vlan_ip(struct netxen_adapter *adapter,
+ struct in_ifaddr *ifa, unsigned long event)
+{
+ struct net_device *dev;
+ struct nx_vlan_ip_list *cur, *tmp_cur;
+ struct list_head *head;
+
+ dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+
+ if (dev == NULL)
+ return;
+
+ if (!is_vlan_dev(dev))
+ return;
+
+ switch (event) {
+ case NX_IP_UP:
+ list_for_each(head, &adapter->vlan_ip_list) {
+ cur = list_entry(head, struct nx_vlan_ip_list, list);
+
+ if (cur->ip_addr == ifa->ifa_address)
+ return;
+ }
+
+ cur = kzalloc(sizeof(struct nx_vlan_ip_list), GFP_ATOMIC);
+ if (cur == NULL) {
+ printk(KERN_ERR "%s: failed to add vlan ip to list\n",
+ adapter->netdev->name);
+ return;
+ }
+
+ cur->ip_addr = ifa->ifa_address;
+ list_add_tail(&cur->list, &adapter->vlan_ip_list);
+ break;
+ case NX_IP_DOWN:
+ list_for_each_entry_safe(cur, tmp_cur,
+ &adapter->vlan_ip_list, list) {
+ if (cur->ip_addr == ifa->ifa_address) {
+ list_del(&cur->list);
+ kfree(cur);
+ break;
+ }
+ }
+ }
+}
+static void
+netxen_config_indev_addr(struct netxen_adapter *adapter,
+ struct net_device *dev, unsigned long event)
+{
+ struct in_device *indev;
+
+ if (!netxen_destip_supported(adapter))
+ return;
+
+ indev = in_dev_get(dev);
+ if (!indev)
+ return;
+
+ for_ifa(indev) {
+ switch (event) {
+ case NETDEV_UP:
+ netxen_config_ipaddr(adapter,
+ ifa->ifa_address, NX_IP_UP);
+ netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP);
+ break;
+ case NETDEV_DOWN:
+ netxen_config_ipaddr(adapter,
+ ifa->ifa_address, NX_IP_DOWN);
+ netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN);
+ break;
+ default:
+ break;
+ }
+ } endfor_ifa(indev);
+
+ in_dev_put(indev);
+}
+
+static void
+netxen_restore_indev_addr(struct net_device *netdev, unsigned long event)
+
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct nx_vlan_ip_list *pos, *tmp_pos;
+ unsigned long ip_event;
+
+ ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
+ netxen_config_indev_addr(adapter, netdev, event);
+
+ list_for_each_entry_safe(pos, tmp_pos, &adapter->vlan_ip_list, list) {
+ netxen_config_ipaddr(adapter, pos->ip_addr, ip_event);
+ }
+}
+
+static int netxen_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct netxen_adapter *adapter;
+ struct net_device *dev = (struct net_device *)ptr;
+ struct net_device *orig_dev = dev;
+
+recheck:
+ if (dev == NULL)
+ goto done;
+
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ dev = vlan_dev_real_dev(dev);
+ goto recheck;
+ }
+
+ if (!is_netxen_netdev(dev))
+ goto done;
+
+ adapter = netdev_priv(dev);
+
+ if (!adapter)
+ goto done;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ goto done;
+
+ netxen_config_indev_addr(adapter, orig_dev, event);
+done:
+ return NOTIFY_DONE;
+}
+
+static int
+netxen_inetaddr_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct netxen_adapter *adapter;
+ struct net_device *dev;
+
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+
+ dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+
+recheck:
+ if (dev == NULL)
+ goto done;
+
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ dev = vlan_dev_real_dev(dev);
+ goto recheck;
+ }
+
+ if (!is_netxen_netdev(dev))
+ goto done;
+
+ adapter = netdev_priv(dev);
+
+ if (!adapter || !netxen_destip_supported(adapter))
+ goto done;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ goto done;
+
+ switch (event) {
+ case NETDEV_UP:
+ netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP);
+ netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP);
+ break;
+ case NETDEV_DOWN:
+ netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_DOWN);
+ netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN);
+ break;
+ default:
+ break;
+ }
+
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block netxen_netdev_cb = {
+ .notifier_call = netxen_netdev_event,
+};
+
+static struct notifier_block netxen_inetaddr_cb = {
+ .notifier_call = netxen_inetaddr_event,
+};
+#else
+static void
+netxen_restore_indev_addr(struct net_device *dev, unsigned long event)
+{ }
+static void
+netxen_free_vlan_ip_list(struct netxen_adapter *adapter)
+{ }
+#endif
+
+static struct pci_error_handlers netxen_err_handler = {
+ .error_detected = netxen_io_error_detected,
+ .slot_reset = netxen_io_slot_reset,
+ .resume = netxen_io_resume,
+};
+
+static struct pci_driver netxen_driver = {
+ .name = netxen_nic_driver_name,
+ .id_table = netxen_pci_tbl,
+ .probe = netxen_nic_probe,
+ .remove = __devexit_p(netxen_nic_remove),
+#ifdef CONFIG_PM
+ .suspend = netxen_nic_suspend,
+ .resume = netxen_nic_resume,
+#endif
+ .shutdown = netxen_nic_shutdown,
+ .err_handler = &netxen_err_handler
+};
+
+static int __init netxen_init_module(void)
+{
+ printk(KERN_INFO "%s\n", netxen_nic_driver_string);
+
+#ifdef CONFIG_INET
+ register_netdevice_notifier(&netxen_netdev_cb);
+ register_inetaddr_notifier(&netxen_inetaddr_cb);
+#endif
+ return pci_register_driver(&netxen_driver);
+}
+
+module_init(netxen_init_module);
+
+static void __exit netxen_exit_module(void)
+{
+ pci_unregister_driver(&netxen_driver);
+
+#ifdef CONFIG_INET
+ unregister_inetaddr_notifier(&netxen_inetaddr_cb);
+ unregister_netdevice_notifier(&netxen_netdev_cb);
+#endif
+}
+
+module_exit(netxen_exit_module);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
new file mode 100644
index 000000000000..46f9b6499f9b
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -0,0 +1,3968 @@
+/*
+ * QLogic QLA3xxx NIC HBA Driver
+ * Copyright (c) 2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla3xxx for copyright and licensing details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/prefetch.h>
+
+#include "qla3xxx.h"
+
+#define DRV_NAME "qla3xxx"
+#define DRV_STRING "QLogic ISP3XXX Network Driver"
+#define DRV_VERSION "v2.03.00-k5"
+
+static const char ql3xxx_driver_name[] = DRV_NAME;
+static const char ql3xxx_driver_version[] = DRV_VERSION;
+
+#define TIMED_OUT_MSG \
+"Timed out waiting for management port to get free before issuing command\n"
+
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static const u32 default_msg
+ = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
+ | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
+
+static int debug = -1; /* defaults above */
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static int msi;
+module_param(msi, int, 0);
+MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
+
+static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
+ /* required last entry */
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
+
+/*
+ * These are the known PHY's which are used
+ */
+enum PHY_DEVICE_TYPE {
+ PHY_TYPE_UNKNOWN = 0,
+ PHY_VITESSE_VSC8211,
+ PHY_AGERE_ET1011C,
+ MAX_PHY_DEV_TYPES
+};
+
+struct PHY_DEVICE_INFO {
+ const enum PHY_DEVICE_TYPE phyDevice;
+ const u32 phyIdOUI;
+ const u16 phyIdModel;
+ const char *name;
+};
+
+static const struct PHY_DEVICE_INFO PHY_DEVICES[] = {
+ {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
+ {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
+ {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
+};
+
+
+/*
+ * Caller must take hw_lock.
+ */
+static int ql_sem_spinlock(struct ql3_adapter *qdev,
+ u32 sem_mask, u32 sem_bits)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+ unsigned int seconds = 3;
+
+ do {
+ writel((sem_mask | sem_bits),
+ &port_regs->CommonRegs.semaphoreReg);
+ value = readl(&port_regs->CommonRegs.semaphoreReg);
+ if ((value & (sem_mask >> 16)) == sem_bits)
+ return 0;
+ ssleep(1);
+ } while (--seconds);
+ return -1;
+}
+
+static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
+ readl(&port_regs->CommonRegs.semaphoreReg);
+}
+
+static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
+ value = readl(&port_regs->CommonRegs.semaphoreReg);
+ return ((value & (sem_mask >> 16)) == sem_bits);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
+{
+ int i = 0;
+
+ while (i < 10) {
+ if (i)
+ ssleep(1);
+
+ if (ql_sem_lock(qdev,
+ QL_DRVR_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
+ * 2) << 1)) {
+ netdev_printk(KERN_DEBUG, qdev->ndev,
+ "driver lock acquired\n");
+ return 1;
+ }
+ }
+
+ netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
+ return 0;
+}
+
+static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ writel(((ISP_CONTROL_NP_MASK << 16) | page),
+ &port_regs->CommonRegs.ispControlStatus);
+ readl(&port_regs->CommonRegs.ispControlStatus);
+ qdev->current_page = page;
+}
+
+static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
+{
+ u32 value;
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ value = readl(reg);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ return value;
+}
+
+static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
+{
+ return readl(reg);
+}
+
+static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
+{
+ u32 value;
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+ if (qdev->current_page != 0)
+ ql_set_register_page(qdev, 0);
+ value = readl(reg);
+
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return value;
+}
+
+static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
+{
+ if (qdev->current_page != 0)
+ ql_set_register_page(qdev, 0);
+ return readl(reg);
+}
+
+static void ql_write_common_reg_l(struct ql3_adapter *qdev,
+ u32 __iomem *reg, u32 value)
+{
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ writel(value, reg);
+ readl(reg);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+}
+
+static void ql_write_common_reg(struct ql3_adapter *qdev,
+ u32 __iomem *reg, u32 value)
+{
+ writel(value, reg);
+ readl(reg);
+}
+
+static void ql_write_nvram_reg(struct ql3_adapter *qdev,
+ u32 __iomem *reg, u32 value)
+{
+ writel(value, reg);
+ readl(reg);
+ udelay(1);
+}
+
+static void ql_write_page0_reg(struct ql3_adapter *qdev,
+ u32 __iomem *reg, u32 value)
+{
+ if (qdev->current_page != 0)
+ ql_set_register_page(qdev, 0);
+ writel(value, reg);
+ readl(reg);
+}
+
+/*
+ * Caller holds hw_lock. Only called during init.
+ */
+static void ql_write_page1_reg(struct ql3_adapter *qdev,
+ u32 __iomem *reg, u32 value)
+{
+ if (qdev->current_page != 1)
+ ql_set_register_page(qdev, 1);
+ writel(value, reg);
+ readl(reg);
+}
+
+/*
+ * Caller holds hw_lock. Only called during init.
+ */
+static void ql_write_page2_reg(struct ql3_adapter *qdev,
+ u32 __iomem *reg, u32 value)
+{
+ if (qdev->current_page != 2)
+ ql_set_register_page(qdev, 2);
+ writel(value, reg);
+ readl(reg);
+}
+
+static void ql_disable_interrupts(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
+ (ISP_IMR_ENABLE_INT << 16));
+
+}
+
+static void ql_enable_interrupts(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
+ ((0xff << 16) | ISP_IMR_ENABLE_INT));
+
+}
+
+static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
+ struct ql_rcv_buf_cb *lrg_buf_cb)
+{
+ dma_addr_t map;
+ int err;
+ lrg_buf_cb->next = NULL;
+
+ if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
+ qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
+ } else {
+ qdev->lrg_buf_free_tail->next = lrg_buf_cb;
+ qdev->lrg_buf_free_tail = lrg_buf_cb;
+ }
+
+ if (!lrg_buf_cb->skb) {
+ lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
+ qdev->lrg_buffer_len);
+ if (unlikely(!lrg_buf_cb->skb)) {
+ netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n");
+ qdev->lrg_buf_skb_check++;
+ } else {
+ /*
+ * We save some space to copy the ethhdr from first
+ * buffer
+ */
+ skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
+ map = pci_map_single(qdev->pdev,
+ lrg_buf_cb->skb->data,
+ qdev->lrg_buffer_len -
+ QL_HEADER_SPACE,
+ PCI_DMA_FROMDEVICE);
+ err = pci_dma_mapping_error(qdev->pdev, map);
+ if (err) {
+ netdev_err(qdev->ndev,
+ "PCI mapping failed with error: %d\n",
+ err);
+ dev_kfree_skb(lrg_buf_cb->skb);
+ lrg_buf_cb->skb = NULL;
+
+ qdev->lrg_buf_skb_check++;
+ return;
+ }
+
+ lrg_buf_cb->buf_phy_addr_low =
+ cpu_to_le32(LS_64BITS(map));
+ lrg_buf_cb->buf_phy_addr_high =
+ cpu_to_le32(MS_64BITS(map));
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
+ qdev->lrg_buffer_len -
+ QL_HEADER_SPACE);
+ }
+ }
+
+ qdev->lrg_buf_free_count++;
+}
+
+static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
+ *qdev)
+{
+ struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
+
+ if (lrg_buf_cb != NULL) {
+ qdev->lrg_buf_free_head = lrg_buf_cb->next;
+ if (qdev->lrg_buf_free_head == NULL)
+ qdev->lrg_buf_free_tail = NULL;
+ qdev->lrg_buf_free_count--;
+ }
+
+ return lrg_buf_cb;
+}
+
+static u32 addrBits = EEPROM_NO_ADDR_BITS;
+static u32 dataBits = EEPROM_NO_DATA_BITS;
+
+static void fm93c56a_deselect(struct ql3_adapter *qdev);
+static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
+ unsigned short *value);
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_select(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+
+ qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
+ ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
+ ql_write_nvram_reg(qdev, spir,
+ ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
+{
+ int i;
+ u32 mask;
+ u32 dataBit;
+ u32 previousBit;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+
+ /* Clock in a zero, then do the start bit */
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ AUBURN_EEPROM_DO_1));
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE));
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL));
+
+ mask = 1 << (FM93C56A_CMD_BITS - 1);
+ /* Force the previous data bit to be different */
+ previousBit = 0xffff;
+ for (i = 0; i < FM93C56A_CMD_BITS; i++) {
+ dataBit = (cmd & mask)
+ ? AUBURN_EEPROM_DO_1
+ : AUBURN_EEPROM_DO_0;
+ if (previousBit != dataBit) {
+ /* If the bit changed, change the DO state to match */
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK |
+ qdev->eeprom_cmd_data | dataBit));
+ previousBit = dataBit;
+ }
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ dataBit | AUBURN_EEPROM_CLK_RISE));
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ dataBit | AUBURN_EEPROM_CLK_FALL));
+ cmd = cmd << 1;
+ }
+
+ mask = 1 << (addrBits - 1);
+ /* Force the previous data bit to be different */
+ previousBit = 0xffff;
+ for (i = 0; i < addrBits; i++) {
+ dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1
+ : AUBURN_EEPROM_DO_0;
+ if (previousBit != dataBit) {
+ /*
+ * If the bit changed, then change the DO state to
+ * match
+ */
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK |
+ qdev->eeprom_cmd_data | dataBit));
+ previousBit = dataBit;
+ }
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ dataBit | AUBURN_EEPROM_CLK_RISE));
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ dataBit | AUBURN_EEPROM_CLK_FALL));
+ eepromAddr = eepromAddr << 1;
+ }
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_deselect(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+
+ qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
+ ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
+{
+ int i;
+ u32 data = 0;
+ u32 dataBit;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+
+ /* Read the data bits */
+ /* The first bit is a dummy. Clock right over it. */
+ for (i = 0; i < dataBits; i++) {
+ ql_write_nvram_reg(qdev, spir,
+ ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ AUBURN_EEPROM_CLK_RISE);
+ ql_write_nvram_reg(qdev, spir,
+ ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ AUBURN_EEPROM_CLK_FALL);
+ dataBit = (ql_read_common_reg(qdev, spir) &
+ AUBURN_EEPROM_DI_1) ? 1 : 0;
+ data = (data << 1) | dataBit;
+ }
+ *value = (u16)data;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void eeprom_readword(struct ql3_adapter *qdev,
+ u32 eepromAddr, unsigned short *value)
+{
+ fm93c56a_select(qdev);
+ fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
+ fm93c56a_datain(qdev, value);
+ fm93c56a_deselect(qdev);
+}
+
+static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
+{
+ __le16 *p = (__le16 *)ndev->dev_addr;
+ p[0] = cpu_to_le16(addr[0]);
+ p[1] = cpu_to_le16(addr[1]);
+ p[2] = cpu_to_le16(addr[2]);
+}
+
+static int ql_get_nvram_params(struct ql3_adapter *qdev)
+{
+ u16 *pEEPROMData;
+ u16 checksum = 0;
+ u32 index;
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+ pEEPROMData = (u16 *)&qdev->nvram_data;
+ qdev->eeprom_cmd_data = 0;
+ if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 10)) {
+ pr_err("%s: Failed ql_sem_spinlock()\n", __func__);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return -1;
+ }
+
+ for (index = 0; index < EEPROM_SIZE; index++) {
+ eeprom_readword(qdev, index, pEEPROMData);
+ checksum += *pEEPROMData;
+ pEEPROMData++;
+ }
+ ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
+
+ if (checksum != 0) {
+ netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
+ checksum);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return -1;
+ }
+
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return checksum;
+}
+
+static const u32 PHYAddr[2] = {
+ PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
+};
+
+static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 temp;
+ int count = 1000;
+
+ while (count) {
+ temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
+ if (!(temp & MAC_MII_STATUS_BSY))
+ return 0;
+ udelay(10);
+ count--;
+ }
+ return -1;
+}
+
+static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 scanControl;
+
+ if (qdev->numPorts > 1) {
+ /* Auto scan will cycle through multiple ports */
+ scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
+ } else {
+ scanControl = MAC_MII_CONTROL_SC;
+ }
+
+ /*
+ * Scan register 1 of PHY/PETBI,
+ * Set up to scan both devices
+ * The autoscan starts from the first register, completes
+ * the last one before rolling over to the first
+ */
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ PHYAddr[0] | MII_SCAN_REGISTER);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ (scanControl) |
+ ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
+}
+
+static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
+{
+ u8 ret;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ /* See if scan mode is enabled before we turn it off */
+ if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
+ (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
+ /* Scan is enabled */
+ ret = 1;
+ } else {
+ /* Scan is disabled */
+ ret = 0;
+ }
+
+ /*
+ * When disabling scan mode you must first change the MII register
+ * address
+ */
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ PHYAddr[0] | MII_SCAN_REGISTER);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
+ MAC_MII_CONTROL_RC) << 16));
+
+ return ret;
+}
+
+static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
+ u16 regAddr, u16 value, u32 phyAddr)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u8 scanWasEnabled;
+
+ scanWasEnabled = ql_mii_disable_scan_mode(qdev);
+
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ phyAddr | regAddr);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
+
+ /* Wait for write to complete 9/10/04 SJP */
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ if (scanWasEnabled)
+ ql_mii_enable_scan_mode(qdev);
+
+ return 0;
+}
+
+static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
+ u16 *value, u32 phyAddr)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u8 scanWasEnabled;
+ u32 temp;
+
+ scanWasEnabled = ql_mii_disable_scan_mode(qdev);
+
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ phyAddr | regAddr);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ (MAC_MII_CONTROL_RC << 16));
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
+
+ /* Wait for the read to complete */
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
+ *value = (u16) temp;
+
+ if (scanWasEnabled)
+ ql_mii_enable_scan_mode(qdev);
+
+ return 0;
+}
+
+static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ ql_mii_disable_scan_mode(qdev);
+
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ qdev->PHYAddr | regAddr);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
+
+ /* Wait for write to complete. */
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ ql_mii_enable_scan_mode(qdev);
+
+ return 0;
+}
+
+static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
+{
+ u32 temp;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ ql_mii_disable_scan_mode(qdev);
+
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ qdev->PHYAddr | regAddr);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ (MAC_MII_CONTROL_RC << 16));
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
+
+ /* Wait for the read to complete */
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
+ *value = (u16) temp;
+
+ ql_mii_enable_scan_mode(qdev);
+
+ return 0;
+}
+
+static void ql_petbi_reset(struct ql3_adapter *qdev)
+{
+ ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
+}
+
+static void ql_petbi_start_neg(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ /* Enable Auto-negotiation sense */
+ ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
+ reg |= PETBI_TBI_AUTO_SENSE;
+ ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
+
+ ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
+ PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
+
+ ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
+ PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
+ PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
+
+}
+
+static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
+{
+ ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
+ PHYAddr[qdev->mac_index]);
+}
+
+static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ /* Enable Auto-negotiation sense */
+ ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
+ PHYAddr[qdev->mac_index]);
+ reg |= PETBI_TBI_AUTO_SENSE;
+ ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
+ PHYAddr[qdev->mac_index]);
+
+ ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
+ PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
+ PHYAddr[qdev->mac_index]);
+
+ ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
+ PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
+ PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
+ PHYAddr[qdev->mac_index]);
+}
+
+static void ql_petbi_init(struct ql3_adapter *qdev)
+{
+ ql_petbi_reset(qdev);
+ ql_petbi_start_neg(qdev);
+}
+
+static void ql_petbi_init_ex(struct ql3_adapter *qdev)
+{
+ ql_petbi_reset_ex(qdev);
+ ql_petbi_start_neg_ex(qdev);
+}
+
+static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
+ return 0;
+
+ return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
+}
+
+static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
+{
+ netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
+ /* power down device bit 11 = 1 */
+ ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
+ /* enable diagnostic mode bit 2 = 1 */
+ ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
+ /* 1000MB amplitude adjust (see Agere errata) */
+ ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
+ /* 1000MB amplitude adjust (see Agere errata) */
+ ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
+ /* 100MB amplitude adjust (see Agere errata) */
+ ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
+ /* 100MB amplitude adjust (see Agere errata) */
+ ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
+ /* 10MB amplitude adjust (see Agere errata) */
+ ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
+ /* 10MB amplitude adjust (see Agere errata) */
+ ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
+ /* point to hidden reg 0x2806 */
+ ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
+ /* Write new PHYAD w/bit 5 set */
+ ql_mii_write_reg_ex(qdev, 0x11,
+ 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
+ /*
+ * Disable diagnostic mode bit 2 = 0
+ * Power up device bit 11 = 0
+ * Link up (on) and activity (blink)
+ */
+ ql_mii_write_reg(qdev, 0x12, 0x840a);
+ ql_mii_write_reg(qdev, 0x00, 0x1140);
+ ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
+}
+
+static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev,
+ u16 phyIdReg0, u16 phyIdReg1)
+{
+ enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN;
+ u32 oui;
+ u16 model;
+ int i;
+
+ if (phyIdReg0 == 0xffff)
+ return result;
+
+ if (phyIdReg1 == 0xffff)
+ return result;
+
+ /* oui is split between two registers */
+ oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
+
+ model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
+
+ /* Scan table for this PHY */
+ for (i = 0; i < MAX_PHY_DEV_TYPES; i++) {
+ if ((oui == PHY_DEVICES[i].phyIdOUI) &&
+ (model == PHY_DEVICES[i].phyIdModel)) {
+ netdev_info(qdev->ndev, "Phy: %s\n",
+ PHY_DEVICES[i].name);
+ result = PHY_DEVICES[i].phyDevice;
+ break;
+ }
+ }
+
+ return result;
+}
+
+static int ql_phy_get_speed(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ switch (qdev->phyType) {
+ case PHY_AGERE_ET1011C: {
+ if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
+ return 0;
+
+ reg = (reg >> 8) & 3;
+ break;
+ }
+ default:
+ if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
+ return 0;
+
+ reg = (((reg & 0x18) >> 3) & 3);
+ }
+
+ switch (reg) {
+ case 2:
+ return SPEED_1000;
+ case 1:
+ return SPEED_100;
+ case 0:
+ return SPEED_10;
+ default:
+ return -1;
+ }
+}
+
+static int ql_is_full_dup(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ switch (qdev->phyType) {
+ case PHY_AGERE_ET1011C: {
+ if (ql_mii_read_reg(qdev, 0x1A, &reg))
+ return 0;
+
+ return ((reg & 0x0080) && (reg & 0x1000)) != 0;
+ }
+ case PHY_VITESSE_VSC8211:
+ default: {
+ if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
+ return 0;
+ return (reg & PHY_AUX_DUPLEX_STAT) != 0;
+ }
+ }
+}
+
+static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
+ return 0;
+
+ return (reg & PHY_NEG_PAUSE) != 0;
+}
+
+static int PHY_Setup(struct ql3_adapter *qdev)
+{
+ u16 reg1;
+ u16 reg2;
+ bool agereAddrChangeNeeded = false;
+ u32 miiAddr = 0;
+ int err;
+
+ /* Determine the PHY we are using by reading the ID's */
+ err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
+ if (err != 0) {
+ netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
+ return err;
+ }
+
+ err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
+ if (err != 0) {
+ netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
+ return err;
+ }
+
+ /* Check if we have a Agere PHY */
+ if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
+
+ /* Determine which MII address we should be using
+ determined by the index of the card */
+ if (qdev->mac_index == 0)
+ miiAddr = MII_AGERE_ADDR_1;
+ else
+ miiAddr = MII_AGERE_ADDR_2;
+
+ err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
+ if (err != 0) {
+ netdev_err(qdev->ndev,
+ "Could not read from reg PHY_ID_0_REG after Agere detected\n");
+ return err;
+ }
+
+ err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
+ if (err != 0) {
+ netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
+ return err;
+ }
+
+ /* We need to remember to initialize the Agere PHY */
+ agereAddrChangeNeeded = true;
+ }
+
+ /* Determine the particular PHY we have on board to apply
+ PHY specific initializations */
+ qdev->phyType = getPhyType(qdev, reg1, reg2);
+
+ if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
+ /* need this here so address gets changed */
+ phyAgereSpecificInit(qdev, miiAddr);
+ } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
+ netdev_err(qdev->ndev, "PHY is unknown\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ if (enable)
+ value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
+ else
+ value = (MAC_CONFIG_REG_PE << 16);
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+ else
+ ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ if (enable)
+ value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
+ else
+ value = (MAC_CONFIG_REG_SR << 16);
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+ else
+ ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ if (enable)
+ value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
+ else
+ value = (MAC_CONFIG_REG_GM << 16);
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+ else
+ ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ if (enable)
+ value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
+ else
+ value = (MAC_CONFIG_REG_FD << 16);
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+ else
+ ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ if (enable)
+ value =
+ ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
+ ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
+ else
+ value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+ else
+ ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_is_fiber(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = PORT_STATUS_SM0;
+ break;
+ case 1:
+ bitToCheck = PORT_STATUS_SM1;
+ break;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ return (temp & bitToCheck) != 0;
+}
+
+static int ql_is_auto_cfg(struct ql3_adapter *qdev)
+{
+ u16 reg;
+ ql_mii_read_reg(qdev, 0x00, &reg);
+ return (reg & 0x1000) != 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = PORT_STATUS_AC0;
+ break;
+ case 1:
+ bitToCheck = PORT_STATUS_AC1;
+ break;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ if (temp & bitToCheck) {
+ netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
+ return 1;
+ }
+ netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
+ return 0;
+}
+
+/*
+ * ql_is_neg_pause() returns 1 if pause was negotiated to be on
+ */
+static int ql_is_neg_pause(struct ql3_adapter *qdev)
+{
+ if (ql_is_fiber(qdev))
+ return ql_is_petbi_neg_pause(qdev);
+ else
+ return ql_is_phy_neg_pause(qdev);
+}
+
+static int ql_auto_neg_error(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = PORT_STATUS_AE0;
+ break;
+ case 1:
+ bitToCheck = PORT_STATUS_AE1;
+ break;
+ }
+ temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ return (temp & bitToCheck) != 0;
+}
+
+static u32 ql_get_link_speed(struct ql3_adapter *qdev)
+{
+ if (ql_is_fiber(qdev))
+ return SPEED_1000;
+ else
+ return ql_phy_get_speed(qdev);
+}
+
+static int ql_is_link_full_dup(struct ql3_adapter *qdev)
+{
+ if (ql_is_fiber(qdev))
+ return 1;
+ else
+ return ql_is_full_dup(qdev);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_link_down_detect(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = ISP_CONTROL_LINK_DN_0;
+ break;
+ case 1:
+ bitToCheck = ISP_CONTROL_LINK_DN_1;
+ break;
+ }
+
+ temp =
+ ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
+ return (temp & bitToCheck) != 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ switch (qdev->mac_index) {
+ case 0:
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.ispControlStatus,
+ (ISP_CONTROL_LINK_DN_0) |
+ (ISP_CONTROL_LINK_DN_0 << 16));
+ break;
+
+ case 1:
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.ispControlStatus,
+ (ISP_CONTROL_LINK_DN_1) |
+ (ISP_CONTROL_LINK_DN_1 << 16));
+ break;
+
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = PORT_STATUS_F1_ENABLED;
+ break;
+ case 1:
+ bitToCheck = PORT_STATUS_F3_ENABLED;
+ break;
+ default:
+ break;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ if (temp & bitToCheck) {
+ netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+ "not link master\n");
+ return 0;
+ }
+
+ netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
+ return 1;
+}
+
+static void ql_phy_reset_ex(struct ql3_adapter *qdev)
+{
+ ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
+ PHYAddr[qdev->mac_index]);
+}
+
+static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
+{
+ u16 reg;
+ u16 portConfiguration;
+
+ if (qdev->phyType == PHY_AGERE_ET1011C)
+ ql_mii_write_reg(qdev, 0x13, 0x0000);
+ /* turn off external loopback */
+
+ if (qdev->mac_index == 0)
+ portConfiguration =
+ qdev->nvram_data.macCfg_port0.portConfiguration;
+ else
+ portConfiguration =
+ qdev->nvram_data.macCfg_port1.portConfiguration;
+
+ /* Some HBA's in the field are set to 0 and they need to
+ be reinterpreted with a default value */
+ if (portConfiguration == 0)
+ portConfiguration = PORT_CONFIG_DEFAULT;
+
+ /* Set the 1000 advertisements */
+ ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
+ PHYAddr[qdev->mac_index]);
+ reg &= ~PHY_GIG_ALL_PARAMS;
+
+ if (portConfiguration & PORT_CONFIG_1000MB_SPEED) {
+ if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
+ reg |= PHY_GIG_ADV_1000F;
+ else
+ reg |= PHY_GIG_ADV_1000H;
+ }
+
+ ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
+ PHYAddr[qdev->mac_index]);
+
+ /* Set the 10/100 & pause negotiation advertisements */
+ ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
+ PHYAddr[qdev->mac_index]);
+ reg &= ~PHY_NEG_ALL_PARAMS;
+
+ if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
+ reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
+
+ if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
+ if (portConfiguration & PORT_CONFIG_100MB_SPEED)
+ reg |= PHY_NEG_ADV_100F;
+
+ if (portConfiguration & PORT_CONFIG_10MB_SPEED)
+ reg |= PHY_NEG_ADV_10F;
+ }
+
+ if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
+ if (portConfiguration & PORT_CONFIG_100MB_SPEED)
+ reg |= PHY_NEG_ADV_100H;
+
+ if (portConfiguration & PORT_CONFIG_10MB_SPEED)
+ reg |= PHY_NEG_ADV_10H;
+ }
+
+ if (portConfiguration & PORT_CONFIG_1000MB_SPEED)
+ reg |= 1;
+
+ ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
+ PHYAddr[qdev->mac_index]);
+
+ ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
+
+ ql_mii_write_reg_ex(qdev, CONTROL_REG,
+ reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
+ PHYAddr[qdev->mac_index]);
+}
+
+static void ql_phy_init_ex(struct ql3_adapter *qdev)
+{
+ ql_phy_reset_ex(qdev);
+ PHY_Setup(qdev);
+ ql_phy_start_neg_ex(qdev);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static u32 ql_get_link_state(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp, linkState;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = PORT_STATUS_UP0;
+ break;
+ case 1:
+ bitToCheck = PORT_STATUS_UP1;
+ break;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ if (temp & bitToCheck)
+ linkState = LS_UP;
+ else
+ linkState = LS_DOWN;
+
+ return linkState;
+}
+
+static int ql_port_start(struct ql3_adapter *qdev)
+{
+ if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 7)) {
+ netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
+ return -1;
+ }
+
+ if (ql_is_fiber(qdev)) {
+ ql_petbi_init(qdev);
+ } else {
+ /* Copper port */
+ ql_phy_init_ex(qdev);
+ }
+
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ return 0;
+}
+
+static int ql_finish_auto_neg(struct ql3_adapter *qdev)
+{
+
+ if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 7))
+ return -1;
+
+ if (!ql_auto_neg_error(qdev)) {
+ if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
+ /* configure the MAC */
+ netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+ "Configuring link\n");
+ ql_mac_cfg_soft_reset(qdev, 1);
+ ql_mac_cfg_gig(qdev,
+ (ql_get_link_speed
+ (qdev) ==
+ SPEED_1000));
+ ql_mac_cfg_full_dup(qdev,
+ ql_is_link_full_dup
+ (qdev));
+ ql_mac_cfg_pause(qdev,
+ ql_is_neg_pause
+ (qdev));
+ ql_mac_cfg_soft_reset(qdev, 0);
+
+ /* enable the MAC */
+ netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+ "Enabling mac\n");
+ ql_mac_enable(qdev, 1);
+ }
+
+ qdev->port_link_state = LS_UP;
+ netif_start_queue(qdev->ndev);
+ netif_carrier_on(qdev->ndev);
+ netif_info(qdev, link, qdev->ndev,
+ "Link is up at %d Mbps, %s duplex\n",
+ ql_get_link_speed(qdev),
+ ql_is_link_full_dup(qdev) ? "full" : "half");
+
+ } else { /* Remote error detected */
+
+ if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
+ netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+ "Remote error detected. Calling ql_port_start()\n");
+ /*
+ * ql_port_start() is shared code and needs
+ * to lock the PHY on it's own.
+ */
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ if (ql_port_start(qdev)) /* Restart port */
+ return -1;
+ return 0;
+ }
+ }
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ return 0;
+}
+
+static void ql_link_state_machine_work(struct work_struct *work)
+{
+ struct ql3_adapter *qdev =
+ container_of(work, struct ql3_adapter, link_state_work.work);
+
+ u32 curr_link_state;
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+ curr_link_state = ql_get_link_state(qdev);
+
+ if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) {
+ netif_info(qdev, link, qdev->ndev,
+ "Reset in progress, skip processing link state\n");
+
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ /* Restart timer on 2 second interval. */
+ mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
+
+ return;
+ }
+
+ switch (qdev->port_link_state) {
+ default:
+ if (test_bit(QL_LINK_MASTER, &qdev->flags))
+ ql_port_start(qdev);
+ qdev->port_link_state = LS_DOWN;
+ /* Fall Through */
+
+ case LS_DOWN:
+ if (curr_link_state == LS_UP) {
+ netif_info(qdev, link, qdev->ndev, "Link is up\n");
+ if (ql_is_auto_neg_complete(qdev))
+ ql_finish_auto_neg(qdev);
+
+ if (qdev->port_link_state == LS_UP)
+ ql_link_down_detect_clear(qdev);
+
+ qdev->port_link_state = LS_UP;
+ }
+ break;
+
+ case LS_UP:
+ /*
+ * See if the link is currently down or went down and came
+ * back up
+ */
+ if (curr_link_state == LS_DOWN) {
+ netif_info(qdev, link, qdev->ndev, "Link is down\n");
+ qdev->port_link_state = LS_DOWN;
+ }
+ if (ql_link_down_detect(qdev))
+ qdev->port_link_state = LS_DOWN;
+ break;
+ }
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ /* Restart timer on 2 second interval. */
+ mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
+}
+
+/*
+ * Caller must take hw_lock and QL_PHY_GIO_SEM.
+ */
+static void ql_get_phy_owner(struct ql3_adapter *qdev)
+{
+ if (ql_this_adapter_controls_port(qdev))
+ set_bit(QL_LINK_MASTER, &qdev->flags);
+ else
+ clear_bit(QL_LINK_MASTER, &qdev->flags);
+}
+
+/*
+ * Caller must take hw_lock and QL_PHY_GIO_SEM.
+ */
+static void ql_init_scan_mode(struct ql3_adapter *qdev)
+{
+ ql_mii_enable_scan_mode(qdev);
+
+ if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
+ if (ql_this_adapter_controls_port(qdev))
+ ql_petbi_init_ex(qdev);
+ } else {
+ if (ql_this_adapter_controls_port(qdev))
+ ql_phy_init_ex(qdev);
+ }
+}
+
+/*
+ * MII_Setup needs to be called before taking the PHY out of reset
+ * so that the management interface clock speed can be set properly.
+ * It would be better if we had a way to disable MDC until after the
+ * PHY is out of reset, but we don't have that capability.
+ */
+static int ql_mii_setup(struct ql3_adapter *qdev)
+{
+ u32 reg;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 7))
+ return -1;
+
+ if (qdev->device_id == QL3032_DEVICE_ID)
+ ql_write_page0_reg(qdev,
+ &port_regs->macMIIMgmtControlReg, 0x0f00000);
+
+ /* Divide 125MHz clock by 28 to meet PHY timing requirements */
+ reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
+
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ return 0;
+}
+
+#define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \
+ SUPPORTED_FIBRE | \
+ SUPPORTED_Autoneg)
+#define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \
+ SUPPORTED_10baseT_Full | \
+ SUPPORTED_100baseT_Half | \
+ SUPPORTED_100baseT_Full | \
+ SUPPORTED_1000baseT_Half | \
+ SUPPORTED_1000baseT_Full | \
+ SUPPORTED_Autoneg | \
+ SUPPORTED_TP) \
+
+static u32 ql_supported_modes(struct ql3_adapter *qdev)
+{
+ if (test_bit(QL_LINK_OPTICAL, &qdev->flags))
+ return SUPPORTED_OPTICAL_MODES;
+
+ return SUPPORTED_TP_MODES;
+}
+
+static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
+{
+ int status;
+ unsigned long hw_flags;
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE |
+ (qdev->mac_index) * 2) << 7)) {
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return 0;
+ }
+ status = ql_is_auto_cfg(qdev);
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return status;
+}
+
+static u32 ql_get_speed(struct ql3_adapter *qdev)
+{
+ u32 status;
+ unsigned long hw_flags;
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE |
+ (qdev->mac_index) * 2) << 7)) {
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return 0;
+ }
+ status = ql_get_link_speed(qdev);
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return status;
+}
+
+static int ql_get_full_dup(struct ql3_adapter *qdev)
+{
+ int status;
+ unsigned long hw_flags;
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE |
+ (qdev->mac_index) * 2) << 7)) {
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return 0;
+ }
+ status = ql_is_link_full_dup(qdev);
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return status;
+}
+
+static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+
+ ecmd->transceiver = XCVR_INTERNAL;
+ ecmd->supported = ql_supported_modes(qdev);
+
+ if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
+ ecmd->port = PORT_FIBRE;
+ } else {
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = qdev->PHYAddr;
+ }
+ ecmd->advertising = ql_supported_modes(qdev);
+ ecmd->autoneg = ql_get_auto_cfg_status(qdev);
+ ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev));
+ ecmd->duplex = ql_get_full_dup(qdev);
+ return 0;
+}
+
+static void ql_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
+ strncpy(drvinfo->version, ql3xxx_driver_version, 32);
+ strncpy(drvinfo->fw_version, "N/A", 32);
+ strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
+ drvinfo->regdump_len = 0;
+ drvinfo->eedump_len = 0;
+}
+
+static u32 ql_get_msglevel(struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ return qdev->msg_enable;
+}
+
+static void ql_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ qdev->msg_enable = value;
+}
+
+static void ql_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ u32 reg;
+ if (qdev->mac_index == 0)
+ reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
+ else
+ reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
+
+ pause->autoneg = ql_get_auto_cfg_status(qdev);
+ pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
+ pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
+}
+
+static const struct ethtool_ops ql3xxx_ethtool_ops = {
+ .get_settings = ql_get_settings,
+ .get_drvinfo = ql_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = ql_get_msglevel,
+ .set_msglevel = ql_set_msglevel,
+ .get_pauseparam = ql_get_pauseparam,
+};
+
+static int ql_populate_free_queue(struct ql3_adapter *qdev)
+{
+ struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
+ dma_addr_t map;
+ int err;
+
+ while (lrg_buf_cb) {
+ if (!lrg_buf_cb->skb) {
+ lrg_buf_cb->skb =
+ netdev_alloc_skb(qdev->ndev,
+ qdev->lrg_buffer_len);
+ if (unlikely(!lrg_buf_cb->skb)) {
+ netdev_printk(KERN_DEBUG, qdev->ndev,
+ "Failed netdev_alloc_skb()\n");
+ break;
+ } else {
+ /*
+ * We save some space to copy the ethhdr from
+ * first buffer
+ */
+ skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
+ map = pci_map_single(qdev->pdev,
+ lrg_buf_cb->skb->data,
+ qdev->lrg_buffer_len -
+ QL_HEADER_SPACE,
+ PCI_DMA_FROMDEVICE);
+
+ err = pci_dma_mapping_error(qdev->pdev, map);
+ if (err) {
+ netdev_err(qdev->ndev,
+ "PCI mapping failed with error: %d\n",
+ err);
+ dev_kfree_skb(lrg_buf_cb->skb);
+ lrg_buf_cb->skb = NULL;
+ break;
+ }
+
+
+ lrg_buf_cb->buf_phy_addr_low =
+ cpu_to_le32(LS_64BITS(map));
+ lrg_buf_cb->buf_phy_addr_high =
+ cpu_to_le32(MS_64BITS(map));
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
+ qdev->lrg_buffer_len -
+ QL_HEADER_SPACE);
+ --qdev->lrg_buf_skb_check;
+ if (!qdev->lrg_buf_skb_check)
+ return 1;
+ }
+ }
+ lrg_buf_cb = lrg_buf_cb->next;
+ }
+ return 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ if (qdev->small_buf_release_cnt >= 16) {
+ while (qdev->small_buf_release_cnt >= 16) {
+ qdev->small_buf_q_producer_index++;
+
+ if (qdev->small_buf_q_producer_index ==
+ NUM_SBUFQ_ENTRIES)
+ qdev->small_buf_q_producer_index = 0;
+ qdev->small_buf_release_cnt -= 8;
+ }
+ wmb();
+ writel(qdev->small_buf_q_producer_index,
+ &port_regs->CommonRegs.rxSmallQProducerIndex);
+ }
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
+{
+ struct bufq_addr_element *lrg_buf_q_ele;
+ int i;
+ struct ql_rcv_buf_cb *lrg_buf_cb;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ if ((qdev->lrg_buf_free_count >= 8) &&
+ (qdev->lrg_buf_release_cnt >= 16)) {
+
+ if (qdev->lrg_buf_skb_check)
+ if (!ql_populate_free_queue(qdev))
+ return;
+
+ lrg_buf_q_ele = qdev->lrg_buf_next_free;
+
+ while ((qdev->lrg_buf_release_cnt >= 16) &&
+ (qdev->lrg_buf_free_count >= 8)) {
+
+ for (i = 0; i < 8; i++) {
+ lrg_buf_cb =
+ ql_get_from_lrg_buf_free_list(qdev);
+ lrg_buf_q_ele->addr_high =
+ lrg_buf_cb->buf_phy_addr_high;
+ lrg_buf_q_ele->addr_low =
+ lrg_buf_cb->buf_phy_addr_low;
+ lrg_buf_q_ele++;
+
+ qdev->lrg_buf_release_cnt--;
+ }
+
+ qdev->lrg_buf_q_producer_index++;
+
+ if (qdev->lrg_buf_q_producer_index ==
+ qdev->num_lbufq_entries)
+ qdev->lrg_buf_q_producer_index = 0;
+
+ if (qdev->lrg_buf_q_producer_index ==
+ (qdev->num_lbufq_entries - 1)) {
+ lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
+ }
+ }
+ wmb();
+ qdev->lrg_buf_next_free = lrg_buf_q_ele;
+ writel(qdev->lrg_buf_q_producer_index,
+ &port_regs->CommonRegs.rxLargeQProducerIndex);
+ }
+}
+
+static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
+ struct ob_mac_iocb_rsp *mac_rsp)
+{
+ struct ql_tx_buf_cb *tx_cb;
+ int i;
+ int retval = 0;
+
+ if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
+ netdev_warn(qdev->ndev,
+ "Frame too short but it was padded and sent\n");
+ }
+
+ tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
+
+ /* Check the transmit response flags for any errors */
+ if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
+ netdev_err(qdev->ndev,
+ "Frame too short to be legal, frame not sent\n");
+
+ qdev->ndev->stats.tx_errors++;
+ retval = -EIO;
+ goto frame_not_sent;
+ }
+
+ if (tx_cb->seg_count == 0) {
+ netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
+ mac_rsp->transaction_id);
+
+ qdev->ndev->stats.tx_errors++;
+ retval = -EIO;
+ goto invalid_seg_count;
+ }
+
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_len(&tx_cb->map[0], maplen),
+ PCI_DMA_TODEVICE);
+ tx_cb->seg_count--;
+ if (tx_cb->seg_count) {
+ for (i = 1; i < tx_cb->seg_count; i++) {
+ pci_unmap_page(qdev->pdev,
+ dma_unmap_addr(&tx_cb->map[i],
+ mapaddr),
+ dma_unmap_len(&tx_cb->map[i], maplen),
+ PCI_DMA_TODEVICE);
+ }
+ }
+ qdev->ndev->stats.tx_packets++;
+ qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
+
+frame_not_sent:
+ dev_kfree_skb_irq(tx_cb->skb);
+ tx_cb->skb = NULL;
+
+invalid_seg_count:
+ atomic_inc(&qdev->tx_count);
+}
+
+static void ql_get_sbuf(struct ql3_adapter *qdev)
+{
+ if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
+ qdev->small_buf_index = 0;
+ qdev->small_buf_release_cnt++;
+}
+
+static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
+{
+ struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
+ lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
+ qdev->lrg_buf_release_cnt++;
+ if (++qdev->lrg_buf_index == qdev->num_large_buffers)
+ qdev->lrg_buf_index = 0;
+ return lrg_buf_cb;
+}
+
+/*
+ * The difference between 3022 and 3032 for inbound completions:
+ * 3022 uses two buffers per completion. The first buffer contains
+ * (some) header info, the second the remainder of the headers plus
+ * the data. For this chip we reserve some space at the top of the
+ * receive buffer so that the header info in buffer one can be
+ * prepended to the buffer two. Buffer two is the sent up while
+ * buffer one is returned to the hardware to be reused.
+ * 3032 receives all of it's data and headers in one buffer for a
+ * simpler process. 3032 also supports checksum verification as
+ * can be seen in ql_process_macip_rx_intr().
+ */
+static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
+ struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
+{
+ struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
+ struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
+ struct sk_buff *skb;
+ u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
+
+ /*
+ * Get the inbound address list (small buffer).
+ */
+ ql_get_sbuf(qdev);
+
+ if (qdev->device_id == QL3022_DEVICE_ID)
+ lrg_buf_cb1 = ql_get_lbuf(qdev);
+
+ /* start of second buffer */
+ lrg_buf_cb2 = ql_get_lbuf(qdev);
+ skb = lrg_buf_cb2->skb;
+
+ qdev->ndev->stats.rx_packets++;
+ qdev->ndev->stats.rx_bytes += length;
+
+ skb_put(skb, length);
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(lrg_buf_cb2, mapaddr),
+ dma_unmap_len(lrg_buf_cb2, maplen),
+ PCI_DMA_FROMDEVICE);
+ prefetch(skb->data);
+ skb_checksum_none_assert(skb);
+ skb->protocol = eth_type_trans(skb, qdev->ndev);
+
+ netif_receive_skb(skb);
+ lrg_buf_cb2->skb = NULL;
+
+ if (qdev->device_id == QL3022_DEVICE_ID)
+ ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
+ ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
+}
+
+static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
+ struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
+{
+ struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
+ struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
+ struct sk_buff *skb1 = NULL, *skb2;
+ struct net_device *ndev = qdev->ndev;
+ u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
+ u16 size = 0;
+
+ /*
+ * Get the inbound address list (small buffer).
+ */
+
+ ql_get_sbuf(qdev);
+
+ if (qdev->device_id == QL3022_DEVICE_ID) {
+ /* start of first buffer on 3022 */
+ lrg_buf_cb1 = ql_get_lbuf(qdev);
+ skb1 = lrg_buf_cb1->skb;
+ size = ETH_HLEN;
+ if (*((u16 *) skb1->data) != 0xFFFF)
+ size += VLAN_ETH_HLEN - ETH_HLEN;
+ }
+
+ /* start of second buffer */
+ lrg_buf_cb2 = ql_get_lbuf(qdev);
+ skb2 = lrg_buf_cb2->skb;
+
+ skb_put(skb2, length); /* Just the second buffer length here. */
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(lrg_buf_cb2, mapaddr),
+ dma_unmap_len(lrg_buf_cb2, maplen),
+ PCI_DMA_FROMDEVICE);
+ prefetch(skb2->data);
+
+ skb_checksum_none_assert(skb2);
+ if (qdev->device_id == QL3022_DEVICE_ID) {
+ /*
+ * Copy the ethhdr from first buffer to second. This
+ * is necessary for 3022 IP completions.
+ */
+ skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
+ skb_push(skb2, size), size);
+ } else {
+ u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
+ if (checksum &
+ (IB_IP_IOCB_RSP_3032_ICE |
+ IB_IP_IOCB_RSP_3032_CE)) {
+ netdev_err(ndev,
+ "%s: Bad checksum for this %s packet, checksum = %x\n",
+ __func__,
+ ((checksum & IB_IP_IOCB_RSP_3032_TCP) ?
+ "TCP" : "UDP"), checksum);
+ } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
+ (checksum & IB_IP_IOCB_RSP_3032_UDP &&
+ !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
+ skb2->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ }
+ skb2->protocol = eth_type_trans(skb2, qdev->ndev);
+
+ netif_receive_skb(skb2);
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += length;
+ lrg_buf_cb2->skb = NULL;
+
+ if (qdev->device_id == QL3022_DEVICE_ID)
+ ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
+ ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
+}
+
+static int ql_tx_rx_clean(struct ql3_adapter *qdev,
+ int *tx_cleaned, int *rx_cleaned, int work_to_do)
+{
+ struct net_rsp_iocb *net_rsp;
+ struct net_device *ndev = qdev->ndev;
+ int work_done = 0;
+
+ /* While there are entries in the completion queue. */
+ while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
+ qdev->rsp_consumer_index) && (work_done < work_to_do)) {
+
+ net_rsp = qdev->rsp_current;
+ rmb();
+ /*
+ * Fix 4032 chip's undocumented "feature" where bit-8 is set
+ * if the inbound completion is for a VLAN.
+ */
+ if (qdev->device_id == QL3032_DEVICE_ID)
+ net_rsp->opcode &= 0x7f;
+ switch (net_rsp->opcode) {
+
+ case OPCODE_OB_MAC_IOCB_FN0:
+ case OPCODE_OB_MAC_IOCB_FN2:
+ ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
+ net_rsp);
+ (*tx_cleaned)++;
+ break;
+
+ case OPCODE_IB_MAC_IOCB:
+ case OPCODE_IB_3032_MAC_IOCB:
+ ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
+ net_rsp);
+ (*rx_cleaned)++;
+ break;
+
+ case OPCODE_IB_IP_IOCB:
+ case OPCODE_IB_3032_IP_IOCB:
+ ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
+ net_rsp);
+ (*rx_cleaned)++;
+ break;
+ default: {
+ u32 *tmp = (u32 *)net_rsp;
+ netdev_err(ndev,
+ "Hit default case, not handled!\n"
+ " dropping the packet, opcode = %x\n"
+ "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
+ net_rsp->opcode,
+ (unsigned long int)tmp[0],
+ (unsigned long int)tmp[1],
+ (unsigned long int)tmp[2],
+ (unsigned long int)tmp[3]);
+ }
+ }
+
+ qdev->rsp_consumer_index++;
+
+ if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
+ qdev->rsp_consumer_index = 0;
+ qdev->rsp_current = qdev->rsp_q_virt_addr;
+ } else {
+ qdev->rsp_current++;
+ }
+
+ work_done = *tx_cleaned + *rx_cleaned;
+ }
+
+ return work_done;
+}
+
+static int ql_poll(struct napi_struct *napi, int budget)
+{
+ struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
+ int rx_cleaned = 0, tx_cleaned = 0;
+ unsigned long hw_flags;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
+
+ if (tx_cleaned + rx_cleaned != budget) {
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ __napi_complete(napi);
+ ql_update_small_bufq_prod_index(qdev);
+ ql_update_lrg_bufq_prod_index(qdev);
+ writel(qdev->rsp_consumer_index,
+ &port_regs->CommonRegs.rspQConsumerIndex);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ ql_enable_interrupts(qdev);
+ }
+ return tx_cleaned + rx_cleaned;
+}
+
+static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
+{
+
+ struct net_device *ndev = dev_id;
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+ int handled = 1;
+ u32 var;
+
+ value = ql_read_common_reg_l(qdev,
+ &port_regs->CommonRegs.ispControlStatus);
+
+ if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
+ spin_lock(&qdev->adapter_lock);
+ netif_stop_queue(qdev->ndev);
+ netif_carrier_off(qdev->ndev);
+ ql_disable_interrupts(qdev);
+ qdev->port_link_state = LS_DOWN;
+ set_bit(QL_RESET_ACTIVE, &qdev->flags) ;
+
+ if (value & ISP_CONTROL_FE) {
+ /*
+ * Chip Fatal Error.
+ */
+ var =
+ ql_read_page0_reg_l(qdev,
+ &port_regs->PortFatalErrStatus);
+ netdev_warn(ndev,
+ "Resetting chip. PortFatalErrStatus register = 0x%x\n",
+ var);
+ set_bit(QL_RESET_START, &qdev->flags) ;
+ } else {
+ /*
+ * Soft Reset Requested.
+ */
+ set_bit(QL_RESET_PER_SCSI, &qdev->flags) ;
+ netdev_err(ndev,
+ "Another function issued a reset to the chip. ISR value = %x\n",
+ value);
+ }
+ queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
+ spin_unlock(&qdev->adapter_lock);
+ } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
+ ql_disable_interrupts(qdev);
+ if (likely(napi_schedule_prep(&qdev->napi)))
+ __napi_schedule(&qdev->napi);
+ } else
+ return IRQ_NONE;
+
+ return IRQ_RETVAL(handled);
+}
+
+/*
+ * Get the total number of segments needed for the given number of fragments.
+ * This is necessary because outbound address lists (OAL) will be used when
+ * more than two frags are given. Each address list has 5 addr/len pairs.
+ * The 5th pair in each OAL is used to point to the next OAL if more frags
+ * are coming. That is why the frags:segment count ratio is not linear.
+ */
+static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags)
+{
+ if (qdev->device_id == QL3022_DEVICE_ID)
+ return 1;
+
+ if (frags <= 2)
+ return frags + 1;
+ else if (frags <= 6)
+ return frags + 2;
+ else if (frags <= 10)
+ return frags + 3;
+ else if (frags <= 14)
+ return frags + 4;
+ else if (frags <= 18)
+ return frags + 5;
+ return -1;
+}
+
+static void ql_hw_csum_setup(const struct sk_buff *skb,
+ struct ob_mac_iocb_req *mac_iocb_ptr)
+{
+ const struct iphdr *ip = ip_hdr(skb);
+
+ mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
+ mac_iocb_ptr->ip_hdr_len = ip->ihl;
+
+ if (ip->protocol == IPPROTO_TCP) {
+ mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
+ OB_3032MAC_IOCB_REQ_IC;
+ } else {
+ mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
+ OB_3032MAC_IOCB_REQ_IC;
+ }
+
+}
+
+/*
+ * Map the buffers for this transmit.
+ * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
+ */
+static int ql_send_map(struct ql3_adapter *qdev,
+ struct ob_mac_iocb_req *mac_iocb_ptr,
+ struct ql_tx_buf_cb *tx_cb,
+ struct sk_buff *skb)
+{
+ struct oal *oal;
+ struct oal_entry *oal_entry;
+ int len = skb_headlen(skb);
+ dma_addr_t map;
+ int err;
+ int completed_segs, i;
+ int seg_cnt, seg = 0;
+ int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
+
+ seg_cnt = tx_cb->seg_count;
+ /*
+ * Map the skb buffer first.
+ */
+ map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
+
+ err = pci_dma_mapping_error(qdev->pdev, map);
+ if (err) {
+ netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
+ err);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
+ oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
+ oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
+ oal_entry->len = cpu_to_le32(len);
+ dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+ dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
+ seg++;
+
+ if (seg_cnt == 1) {
+ /* Terminate the last segment. */
+ oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
+ return NETDEV_TX_OK;
+ }
+ oal = tx_cb->oal;
+ for (completed_segs = 0;
+ completed_segs < frag_cnt;
+ completed_segs++, seg++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
+ oal_entry++;
+ /*
+ * Check for continuation requirements.
+ * It's strange but necessary.
+ * Continuation entry points to outbound address list.
+ */
+ if ((seg == 2 && seg_cnt > 3) ||
+ (seg == 7 && seg_cnt > 8) ||
+ (seg == 12 && seg_cnt > 13) ||
+ (seg == 17 && seg_cnt > 18)) {
+ map = pci_map_single(qdev->pdev, oal,
+ sizeof(struct oal),
+ PCI_DMA_TODEVICE);
+
+ err = pci_dma_mapping_error(qdev->pdev, map);
+ if (err) {
+ netdev_err(qdev->ndev,
+ "PCI mapping outbound address list with error: %d\n",
+ err);
+ goto map_error;
+ }
+
+ oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
+ oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
+ oal_entry->len = cpu_to_le32(sizeof(struct oal) |
+ OAL_CONT_ENTRY);
+ dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+ dma_unmap_len_set(&tx_cb->map[seg], maplen,
+ sizeof(struct oal));
+ oal_entry = (struct oal_entry *)oal;
+ oal++;
+ seg++;
+ }
+
+ map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, frag->size,
+ DMA_TO_DEVICE);
+
+ err = dma_mapping_error(&qdev->pdev->dev, map);
+ if (err) {
+ netdev_err(qdev->ndev,
+ "PCI mapping frags failed with error: %d\n",
+ err);
+ goto map_error;
+ }
+
+ oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
+ oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
+ oal_entry->len = cpu_to_le32(frag->size);
+ dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+ dma_unmap_len_set(&tx_cb->map[seg], maplen, frag->size);
+ }
+ /* Terminate the last segment. */
+ oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
+ return NETDEV_TX_OK;
+
+map_error:
+ /* A PCI mapping failed and now we will need to back out
+ * We need to traverse through the oal's and associated pages which
+ * have been mapped and now we must unmap them to clean up properly
+ */
+
+ seg = 1;
+ oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
+ oal = tx_cb->oal;
+ for (i = 0; i < completed_segs; i++, seg++) {
+ oal_entry++;
+
+ /*
+ * Check for continuation requirements.
+ * It's strange but necessary.
+ */
+
+ if ((seg == 2 && seg_cnt > 3) ||
+ (seg == 7 && seg_cnt > 8) ||
+ (seg == 12 && seg_cnt > 13) ||
+ (seg == 17 && seg_cnt > 18)) {
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(&tx_cb->map[seg], mapaddr),
+ dma_unmap_len(&tx_cb->map[seg], maplen),
+ PCI_DMA_TODEVICE);
+ oal++;
+ seg++;
+ }
+
+ pci_unmap_page(qdev->pdev,
+ dma_unmap_addr(&tx_cb->map[seg], mapaddr),
+ dma_unmap_len(&tx_cb->map[seg], maplen),
+ PCI_DMA_TODEVICE);
+ }
+
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_addr(&tx_cb->map[0], maplen),
+ PCI_DMA_TODEVICE);
+
+ return NETDEV_TX_BUSY;
+
+}
+
+/*
+ * The difference between 3022 and 3032 sends:
+ * 3022 only supports a simple single segment transmission.
+ * 3032 supports checksumming and scatter/gather lists (fragments).
+ * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
+ * in the IOCB plus a chain of outbound address lists (OAL) that
+ * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
+ * will be used to point to an OAL when more ALP entries are required.
+ * The IOCB is always the top of the chain followed by one or more
+ * OALs (when necessary).
+ */
+static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ struct ql_tx_buf_cb *tx_cb;
+ u32 tot_len = skb->len;
+ struct ob_mac_iocb_req *mac_iocb_ptr;
+
+ if (unlikely(atomic_read(&qdev->tx_count) < 2))
+ return NETDEV_TX_BUSY;
+
+ tx_cb = &qdev->tx_buf[qdev->req_producer_index];
+ tx_cb->seg_count = ql_get_seg_count(qdev,
+ skb_shinfo(skb)->nr_frags);
+ if (tx_cb->seg_count == -1) {
+ netdev_err(ndev, "%s: invalid segment count!\n", __func__);
+ return NETDEV_TX_OK;
+ }
+
+ mac_iocb_ptr = tx_cb->queue_entry;
+ memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
+ mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
+ mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
+ mac_iocb_ptr->flags |= qdev->mb_bit_mask;
+ mac_iocb_ptr->transaction_id = qdev->req_producer_index;
+ mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
+ tx_cb->skb = skb;
+ if (qdev->device_id == QL3032_DEVICE_ID &&
+ skb->ip_summed == CHECKSUM_PARTIAL)
+ ql_hw_csum_setup(skb, mac_iocb_ptr);
+
+ if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
+ netdev_err(ndev, "%s: Could not map the segments!\n", __func__);
+ return NETDEV_TX_BUSY;
+ }
+
+ wmb();
+ qdev->req_producer_index++;
+ if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
+ qdev->req_producer_index = 0;
+ wmb();
+ ql_write_common_reg_l(qdev,
+ &port_regs->CommonRegs.reqQProducerIndex,
+ qdev->req_producer_index);
+
+ netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
+ "tx queued, slot %d, len %d\n",
+ qdev->req_producer_index, skb->len);
+
+ atomic_dec(&qdev->tx_count);
+ return NETDEV_TX_OK;
+}
+
+static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
+{
+ qdev->req_q_size =
+ (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
+
+ qdev->req_q_virt_addr =
+ pci_alloc_consistent(qdev->pdev,
+ (size_t) qdev->req_q_size,
+ &qdev->req_q_phy_addr);
+
+ if ((qdev->req_q_virt_addr == NULL) ||
+ LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
+ netdev_err(qdev->ndev, "reqQ failed\n");
+ return -ENOMEM;
+ }
+
+ qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
+
+ qdev->rsp_q_virt_addr =
+ pci_alloc_consistent(qdev->pdev,
+ (size_t) qdev->rsp_q_size,
+ &qdev->rsp_q_phy_addr);
+
+ if ((qdev->rsp_q_virt_addr == NULL) ||
+ LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
+ netdev_err(qdev->ndev, "rspQ allocation failed\n");
+ pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
+ qdev->req_q_virt_addr,
+ qdev->req_q_phy_addr);
+ return -ENOMEM;
+ }
+
+ set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
+
+ return 0;
+}
+
+static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
+{
+ if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) {
+ netdev_info(qdev->ndev, "Already done\n");
+ return;
+ }
+
+ pci_free_consistent(qdev->pdev,
+ qdev->req_q_size,
+ qdev->req_q_virt_addr, qdev->req_q_phy_addr);
+
+ qdev->req_q_virt_addr = NULL;
+
+ pci_free_consistent(qdev->pdev,
+ qdev->rsp_q_size,
+ qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
+
+ qdev->rsp_q_virt_addr = NULL;
+
+ clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
+}
+
+static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
+{
+ /* Create Large Buffer Queue */
+ qdev->lrg_buf_q_size =
+ qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
+ if (qdev->lrg_buf_q_size < PAGE_SIZE)
+ qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
+ else
+ qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
+
+ qdev->lrg_buf =
+ kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),
+ GFP_KERNEL);
+ if (qdev->lrg_buf == NULL) {
+ netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n");
+ return -ENOMEM;
+ }
+
+ qdev->lrg_buf_q_alloc_virt_addr =
+ pci_alloc_consistent(qdev->pdev,
+ qdev->lrg_buf_q_alloc_size,
+ &qdev->lrg_buf_q_alloc_phy_addr);
+
+ if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
+ netdev_err(qdev->ndev, "lBufQ failed\n");
+ return -ENOMEM;
+ }
+ qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
+ qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
+
+ /* Create Small Buffer Queue */
+ qdev->small_buf_q_size =
+ NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
+ if (qdev->small_buf_q_size < PAGE_SIZE)
+ qdev->small_buf_q_alloc_size = PAGE_SIZE;
+ else
+ qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
+
+ qdev->small_buf_q_alloc_virt_addr =
+ pci_alloc_consistent(qdev->pdev,
+ qdev->small_buf_q_alloc_size,
+ &qdev->small_buf_q_alloc_phy_addr);
+
+ if (qdev->small_buf_q_alloc_virt_addr == NULL) {
+ netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
+ pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
+ qdev->lrg_buf_q_alloc_virt_addr,
+ qdev->lrg_buf_q_alloc_phy_addr);
+ return -ENOMEM;
+ }
+
+ qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
+ qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
+ set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
+ return 0;
+}
+
+static void ql_free_buffer_queues(struct ql3_adapter *qdev)
+{
+ if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) {
+ netdev_info(qdev->ndev, "Already done\n");
+ return;
+ }
+ kfree(qdev->lrg_buf);
+ pci_free_consistent(qdev->pdev,
+ qdev->lrg_buf_q_alloc_size,
+ qdev->lrg_buf_q_alloc_virt_addr,
+ qdev->lrg_buf_q_alloc_phy_addr);
+
+ qdev->lrg_buf_q_virt_addr = NULL;
+
+ pci_free_consistent(qdev->pdev,
+ qdev->small_buf_q_alloc_size,
+ qdev->small_buf_q_alloc_virt_addr,
+ qdev->small_buf_q_alloc_phy_addr);
+
+ qdev->small_buf_q_virt_addr = NULL;
+
+ clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
+}
+
+static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
+{
+ int i;
+ struct bufq_addr_element *small_buf_q_entry;
+
+ /* Currently we allocate on one of memory and use it for smallbuffers */
+ qdev->small_buf_total_size =
+ (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
+ QL_SMALL_BUFFER_SIZE);
+
+ qdev->small_buf_virt_addr =
+ pci_alloc_consistent(qdev->pdev,
+ qdev->small_buf_total_size,
+ &qdev->small_buf_phy_addr);
+
+ if (qdev->small_buf_virt_addr == NULL) {
+ netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
+ return -ENOMEM;
+ }
+
+ qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
+ qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
+
+ small_buf_q_entry = qdev->small_buf_q_virt_addr;
+
+ /* Initialize the small buffer queue. */
+ for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
+ small_buf_q_entry->addr_high =
+ cpu_to_le32(qdev->small_buf_phy_addr_high);
+ small_buf_q_entry->addr_low =
+ cpu_to_le32(qdev->small_buf_phy_addr_low +
+ (i * QL_SMALL_BUFFER_SIZE));
+ small_buf_q_entry++;
+ }
+ qdev->small_buf_index = 0;
+ set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags);
+ return 0;
+}
+
+static void ql_free_small_buffers(struct ql3_adapter *qdev)
+{
+ if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) {
+ netdev_info(qdev->ndev, "Already done\n");
+ return;
+ }
+ if (qdev->small_buf_virt_addr != NULL) {
+ pci_free_consistent(qdev->pdev,
+ qdev->small_buf_total_size,
+ qdev->small_buf_virt_addr,
+ qdev->small_buf_phy_addr);
+
+ qdev->small_buf_virt_addr = NULL;
+ }
+}
+
+static void ql_free_large_buffers(struct ql3_adapter *qdev)
+{
+ int i = 0;
+ struct ql_rcv_buf_cb *lrg_buf_cb;
+
+ for (i = 0; i < qdev->num_large_buffers; i++) {
+ lrg_buf_cb = &qdev->lrg_buf[i];
+ if (lrg_buf_cb->skb) {
+ dev_kfree_skb(lrg_buf_cb->skb);
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(lrg_buf_cb, mapaddr),
+ dma_unmap_len(lrg_buf_cb, maplen),
+ PCI_DMA_FROMDEVICE);
+ memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
+ } else {
+ break;
+ }
+ }
+}
+
+static void ql_init_large_buffers(struct ql3_adapter *qdev)
+{
+ int i;
+ struct ql_rcv_buf_cb *lrg_buf_cb;
+ struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
+
+ for (i = 0; i < qdev->num_large_buffers; i++) {
+ lrg_buf_cb = &qdev->lrg_buf[i];
+ buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
+ buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
+ buf_addr_ele++;
+ }
+ qdev->lrg_buf_index = 0;
+ qdev->lrg_buf_skb_check = 0;
+}
+
+static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
+{
+ int i;
+ struct ql_rcv_buf_cb *lrg_buf_cb;
+ struct sk_buff *skb;
+ dma_addr_t map;
+ int err;
+
+ for (i = 0; i < qdev->num_large_buffers; i++) {
+ skb = netdev_alloc_skb(qdev->ndev,
+ qdev->lrg_buffer_len);
+ if (unlikely(!skb)) {
+ /* Better luck next round */
+ netdev_err(qdev->ndev,
+ "large buff alloc failed for %d bytes at index %d\n",
+ qdev->lrg_buffer_len * 2, i);
+ ql_free_large_buffers(qdev);
+ return -ENOMEM;
+ } else {
+
+ lrg_buf_cb = &qdev->lrg_buf[i];
+ memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
+ lrg_buf_cb->index = i;
+ lrg_buf_cb->skb = skb;
+ /*
+ * We save some space to copy the ethhdr from first
+ * buffer
+ */
+ skb_reserve(skb, QL_HEADER_SPACE);
+ map = pci_map_single(qdev->pdev,
+ skb->data,
+ qdev->lrg_buffer_len -
+ QL_HEADER_SPACE,
+ PCI_DMA_FROMDEVICE);
+
+ err = pci_dma_mapping_error(qdev->pdev, map);
+ if (err) {
+ netdev_err(qdev->ndev,
+ "PCI mapping failed with error: %d\n",
+ err);
+ ql_free_large_buffers(qdev);
+ return -ENOMEM;
+ }
+
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
+ qdev->lrg_buffer_len -
+ QL_HEADER_SPACE);
+ lrg_buf_cb->buf_phy_addr_low =
+ cpu_to_le32(LS_64BITS(map));
+ lrg_buf_cb->buf_phy_addr_high =
+ cpu_to_le32(MS_64BITS(map));
+ }
+ }
+ return 0;
+}
+
+static void ql_free_send_free_list(struct ql3_adapter *qdev)
+{
+ struct ql_tx_buf_cb *tx_cb;
+ int i;
+
+ tx_cb = &qdev->tx_buf[0];
+ for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+ kfree(tx_cb->oal);
+ tx_cb->oal = NULL;
+ tx_cb++;
+ }
+}
+
+static int ql_create_send_free_list(struct ql3_adapter *qdev)
+{
+ struct ql_tx_buf_cb *tx_cb;
+ int i;
+ struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
+
+ /* Create free list of transmit buffers */
+ for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+
+ tx_cb = &qdev->tx_buf[i];
+ tx_cb->skb = NULL;
+ tx_cb->queue_entry = req_q_curr;
+ req_q_curr++;
+ tx_cb->oal = kmalloc(512, GFP_KERNEL);
+ if (tx_cb->oal == NULL)
+ return -1;
+ }
+ return 0;
+}
+
+static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
+{
+ if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
+ qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
+ qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
+ } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
+ /*
+ * Bigger buffers, so less of them.
+ */
+ qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
+ qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
+ } else {
+ netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n",
+ qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
+ return -ENOMEM;
+ }
+ qdev->num_large_buffers =
+ qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
+ qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
+ qdev->max_frame_size =
+ (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
+
+ /*
+ * First allocate a page of shared memory and use it for shadow
+ * locations of Network Request Queue Consumer Address Register and
+ * Network Completion Queue Producer Index Register
+ */
+ qdev->shadow_reg_virt_addr =
+ pci_alloc_consistent(qdev->pdev,
+ PAGE_SIZE, &qdev->shadow_reg_phy_addr);
+
+ if (qdev->shadow_reg_virt_addr != NULL) {
+ qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
+ qdev->req_consumer_index_phy_addr_high =
+ MS_64BITS(qdev->shadow_reg_phy_addr);
+ qdev->req_consumer_index_phy_addr_low =
+ LS_64BITS(qdev->shadow_reg_phy_addr);
+
+ qdev->prsp_producer_index =
+ (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
+ qdev->rsp_producer_index_phy_addr_high =
+ qdev->req_consumer_index_phy_addr_high;
+ qdev->rsp_producer_index_phy_addr_low =
+ qdev->req_consumer_index_phy_addr_low + 8;
+ } else {
+ netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
+ return -ENOMEM;
+ }
+
+ if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
+ netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
+ goto err_req_rsp;
+ }
+
+ if (ql_alloc_buffer_queues(qdev) != 0) {
+ netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
+ goto err_buffer_queues;
+ }
+
+ if (ql_alloc_small_buffers(qdev) != 0) {
+ netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
+ goto err_small_buffers;
+ }
+
+ if (ql_alloc_large_buffers(qdev) != 0) {
+ netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
+ goto err_small_buffers;
+ }
+
+ /* Initialize the large buffer queue. */
+ ql_init_large_buffers(qdev);
+ if (ql_create_send_free_list(qdev))
+ goto err_free_list;
+
+ qdev->rsp_current = qdev->rsp_q_virt_addr;
+
+ return 0;
+err_free_list:
+ ql_free_send_free_list(qdev);
+err_small_buffers:
+ ql_free_buffer_queues(qdev);
+err_buffer_queues:
+ ql_free_net_req_rsp_queues(qdev);
+err_req_rsp:
+ pci_free_consistent(qdev->pdev,
+ PAGE_SIZE,
+ qdev->shadow_reg_virt_addr,
+ qdev->shadow_reg_phy_addr);
+
+ return -ENOMEM;
+}
+
+static void ql_free_mem_resources(struct ql3_adapter *qdev)
+{
+ ql_free_send_free_list(qdev);
+ ql_free_large_buffers(qdev);
+ ql_free_small_buffers(qdev);
+ ql_free_buffer_queues(qdev);
+ ql_free_net_req_rsp_queues(qdev);
+ if (qdev->shadow_reg_virt_addr != NULL) {
+ pci_free_consistent(qdev->pdev,
+ PAGE_SIZE,
+ qdev->shadow_reg_virt_addr,
+ qdev->shadow_reg_phy_addr);
+ qdev->shadow_reg_virt_addr = NULL;
+ }
+}
+
+static int ql_init_misc_registers(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_local_ram_registers __iomem *local_ram =
+ (void __iomem *)qdev->mem_map_registers;
+
+ if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 4))
+ return -1;
+
+ ql_write_page2_reg(qdev,
+ &local_ram->bufletSize, qdev->nvram_data.bufletSize);
+
+ ql_write_page2_reg(qdev,
+ &local_ram->maxBufletCount,
+ qdev->nvram_data.bufletCount);
+
+ ql_write_page2_reg(qdev,
+ &local_ram->freeBufletThresholdLow,
+ (qdev->nvram_data.tcpWindowThreshold25 << 16) |
+ (qdev->nvram_data.tcpWindowThreshold0));
+
+ ql_write_page2_reg(qdev,
+ &local_ram->freeBufletThresholdHigh,
+ qdev->nvram_data.tcpWindowThreshold50);
+
+ ql_write_page2_reg(qdev,
+ &local_ram->ipHashTableBase,
+ (qdev->nvram_data.ipHashTableBaseHi << 16) |
+ qdev->nvram_data.ipHashTableBaseLo);
+ ql_write_page2_reg(qdev,
+ &local_ram->ipHashTableCount,
+ qdev->nvram_data.ipHashTableSize);
+ ql_write_page2_reg(qdev,
+ &local_ram->tcpHashTableBase,
+ (qdev->nvram_data.tcpHashTableBaseHi << 16) |
+ qdev->nvram_data.tcpHashTableBaseLo);
+ ql_write_page2_reg(qdev,
+ &local_ram->tcpHashTableCount,
+ qdev->nvram_data.tcpHashTableSize);
+ ql_write_page2_reg(qdev,
+ &local_ram->ncbBase,
+ (qdev->nvram_data.ncbTableBaseHi << 16) |
+ qdev->nvram_data.ncbTableBaseLo);
+ ql_write_page2_reg(qdev,
+ &local_ram->maxNcbCount,
+ qdev->nvram_data.ncbTableSize);
+ ql_write_page2_reg(qdev,
+ &local_ram->drbBase,
+ (qdev->nvram_data.drbTableBaseHi << 16) |
+ qdev->nvram_data.drbTableBaseLo);
+ ql_write_page2_reg(qdev,
+ &local_ram->maxDrbCount,
+ qdev->nvram_data.drbTableSize);
+ ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
+ return 0;
+}
+
+static int ql_adapter_initialize(struct ql3_adapter *qdev)
+{
+ u32 value;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+ struct ql3xxx_host_memory_registers __iomem *hmem_regs =
+ (void __iomem *)port_regs;
+ u32 delay = 10;
+ int status = 0;
+ unsigned long hw_flags = 0;
+
+ if (ql_mii_setup(qdev))
+ return -1;
+
+ /* Bring out PHY out of reset */
+ ql_write_common_reg(qdev, spir,
+ (ISP_SERIAL_PORT_IF_WE |
+ (ISP_SERIAL_PORT_IF_WE << 16)));
+ /* Give the PHY time to come out of reset. */
+ mdelay(100);
+ qdev->port_link_state = LS_DOWN;
+ netif_carrier_off(qdev->ndev);
+
+ /* V2 chip fix for ARS-39168. */
+ ql_write_common_reg(qdev, spir,
+ (ISP_SERIAL_PORT_IF_SDE |
+ (ISP_SERIAL_PORT_IF_SDE << 16)));
+
+ /* Request Queue Registers */
+ *((u32 *)(qdev->preq_consumer_index)) = 0;
+ atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES);
+ qdev->req_producer_index = 0;
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->reqConsumerIndexAddrHigh,
+ qdev->req_consumer_index_phy_addr_high);
+ ql_write_page1_reg(qdev,
+ &hmem_regs->reqConsumerIndexAddrLow,
+ qdev->req_consumer_index_phy_addr_low);
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->reqBaseAddrHigh,
+ MS_64BITS(qdev->req_q_phy_addr));
+ ql_write_page1_reg(qdev,
+ &hmem_regs->reqBaseAddrLow,
+ LS_64BITS(qdev->req_q_phy_addr));
+ ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
+
+ /* Response Queue Registers */
+ *((__le16 *) (qdev->prsp_producer_index)) = 0;
+ qdev->rsp_consumer_index = 0;
+ qdev->rsp_current = qdev->rsp_q_virt_addr;
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rspProducerIndexAddrHigh,
+ qdev->rsp_producer_index_phy_addr_high);
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rspProducerIndexAddrLow,
+ qdev->rsp_producer_index_phy_addr_low);
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rspBaseAddrHigh,
+ MS_64BITS(qdev->rsp_q_phy_addr));
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rspBaseAddrLow,
+ LS_64BITS(qdev->rsp_q_phy_addr));
+
+ ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
+
+ /* Large Buffer Queue */
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxLargeQBaseAddrHigh,
+ MS_64BITS(qdev->lrg_buf_q_phy_addr));
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxLargeQBaseAddrLow,
+ LS_64BITS(qdev->lrg_buf_q_phy_addr));
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxLargeQLength,
+ qdev->num_lbufq_entries);
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxLargeBufferLength,
+ qdev->lrg_buffer_len);
+
+ /* Small Buffer Queue */
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxSmallQBaseAddrHigh,
+ MS_64BITS(qdev->small_buf_q_phy_addr));
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxSmallQBaseAddrLow,
+ LS_64BITS(qdev->small_buf_q_phy_addr));
+
+ ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxSmallBufferLength,
+ QL_SMALL_BUFFER_SIZE);
+
+ qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
+ qdev->small_buf_release_cnt = 8;
+ qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
+ qdev->lrg_buf_release_cnt = 8;
+ qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr;
+ qdev->small_buf_index = 0;
+ qdev->lrg_buf_index = 0;
+ qdev->lrg_buf_free_count = 0;
+ qdev->lrg_buf_free_head = NULL;
+ qdev->lrg_buf_free_tail = NULL;
+
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ rxSmallQProducerIndex,
+ qdev->small_buf_q_producer_index);
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ rxLargeQProducerIndex,
+ qdev->lrg_buf_q_producer_index);
+
+ /*
+ * Find out if the chip has already been initialized. If it has, then
+ * we skip some of the initialization.
+ */
+ clear_bit(QL_LINK_MASTER, &qdev->flags);
+ value = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ if ((value & PORT_STATUS_IC) == 0) {
+
+ /* Chip has not been configured yet, so let it rip. */
+ if (ql_init_misc_registers(qdev)) {
+ status = -1;
+ goto out;
+ }
+
+ value = qdev->nvram_data.tcpMaxWindowSize;
+ ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
+
+ value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
+
+ if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
+ * 2) << 13)) {
+ status = -1;
+ goto out;
+ }
+ ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
+ ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
+ (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
+ 16) | (INTERNAL_CHIP_SD |
+ INTERNAL_CHIP_WE)));
+ ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
+ }
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev,
+ &port_regs->mac1MaxFrameLengthReg,
+ qdev->max_frame_size);
+ else
+ ql_write_page0_reg(qdev,
+ &port_regs->mac0MaxFrameLengthReg,
+ qdev->max_frame_size);
+
+ if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 7)) {
+ status = -1;
+ goto out;
+ }
+
+ PHY_Setup(qdev);
+ ql_init_scan_mode(qdev);
+ ql_get_phy_owner(qdev);
+
+ /* Load the MAC Configuration */
+
+ /* Program lower 32 bits of the MAC address */
+ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+ (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
+ ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+ ((qdev->ndev->dev_addr[2] << 24)
+ | (qdev->ndev->dev_addr[3] << 16)
+ | (qdev->ndev->dev_addr[4] << 8)
+ | qdev->ndev->dev_addr[5]));
+
+ /* Program top 16 bits of the MAC address */
+ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+ ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
+ ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+ ((qdev->ndev->dev_addr[0] << 8)
+ | qdev->ndev->dev_addr[1]));
+
+ /* Enable Primary MAC */
+ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+ ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
+ MAC_ADDR_INDIRECT_PTR_REG_PE));
+
+ /* Clear Primary and Secondary IP addresses */
+ ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
+ ((IP_ADDR_INDEX_REG_MASK << 16) |
+ (qdev->mac_index << 2)));
+ ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
+
+ ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
+ ((IP_ADDR_INDEX_REG_MASK << 16) |
+ ((qdev->mac_index << 2) + 1)));
+ ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
+
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+
+ /* Indicate Configuration Complete */
+ ql_write_page0_reg(qdev,
+ &port_regs->portControl,
+ ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
+
+ do {
+ value = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ if (value & PORT_STATUS_IC)
+ break;
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ msleep(500);
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ } while (--delay);
+
+ if (delay == 0) {
+ netdev_err(qdev->ndev, "Hw Initialization timeout\n");
+ status = -1;
+ goto out;
+ }
+
+ /* Enable Ethernet Function */
+ if (qdev->device_id == QL3032_DEVICE_ID) {
+ value =
+ (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
+ QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
+ QL3032_PORT_CONTROL_ET);
+ ql_write_page0_reg(qdev, &port_regs->functionControl,
+ ((value << 16) | value));
+ } else {
+ value =
+ (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
+ PORT_CONTROL_HH);
+ ql_write_page0_reg(qdev, &port_regs->portControl,
+ ((value << 16) | value));
+ }
+
+
+out:
+ return status;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_adapter_reset(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ int status = 0;
+ u16 value;
+ int max_wait_time;
+
+ set_bit(QL_RESET_ACTIVE, &qdev->flags);
+ clear_bit(QL_RESET_DONE, &qdev->flags);
+
+ /*
+ * Issue soft reset to chip.
+ */
+ netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.ispControlStatus,
+ ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
+
+ /* Wait 3 seconds for reset to complete. */
+ netdev_printk(KERN_DEBUG, qdev->ndev,
+ "Wait 10 milliseconds for reset to complete\n");
+
+ /* Wait until the firmware tells us the Soft Reset is done */
+ max_wait_time = 5;
+ do {
+ value =
+ ql_read_common_reg(qdev,
+ &port_regs->CommonRegs.ispControlStatus);
+ if ((value & ISP_CONTROL_SR) == 0)
+ break;
+
+ ssleep(1);
+ } while ((--max_wait_time));
+
+ /*
+ * Also, make sure that the Network Reset Interrupt bit has been
+ * cleared after the soft reset has taken place.
+ */
+ value =
+ ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
+ if (value & ISP_CONTROL_RI) {
+ netdev_printk(KERN_DEBUG, qdev->ndev,
+ "clearing RI after reset\n");
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ ispControlStatus,
+ ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
+ }
+
+ if (max_wait_time == 0) {
+ /* Issue Force Soft Reset */
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ ispControlStatus,
+ ((ISP_CONTROL_FSR << 16) |
+ ISP_CONTROL_FSR));
+ /*
+ * Wait until the firmware tells us the Force Soft Reset is
+ * done
+ */
+ max_wait_time = 5;
+ do {
+ value = ql_read_common_reg(qdev,
+ &port_regs->CommonRegs.
+ ispControlStatus);
+ if ((value & ISP_CONTROL_FSR) == 0)
+ break;
+ ssleep(1);
+ } while ((--max_wait_time));
+ }
+ if (max_wait_time == 0)
+ status = 1;
+
+ clear_bit(QL_RESET_ACTIVE, &qdev->flags);
+ set_bit(QL_RESET_DONE, &qdev->flags);
+ return status;
+}
+
+static void ql_set_mac_info(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value, port_status;
+ u8 func_number;
+
+ /* Get the function number */
+ value =
+ ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
+ func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
+ port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ switch (value & ISP_CONTROL_FN_MASK) {
+ case ISP_CONTROL_FN0_NET:
+ qdev->mac_index = 0;
+ qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
+ qdev->mb_bit_mask = FN0_MA_BITS_MASK;
+ qdev->PHYAddr = PORT0_PHY_ADDRESS;
+ if (port_status & PORT_STATUS_SM0)
+ set_bit(QL_LINK_OPTICAL, &qdev->flags);
+ else
+ clear_bit(QL_LINK_OPTICAL, &qdev->flags);
+ break;
+
+ case ISP_CONTROL_FN1_NET:
+ qdev->mac_index = 1;
+ qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
+ qdev->mb_bit_mask = FN1_MA_BITS_MASK;
+ qdev->PHYAddr = PORT1_PHY_ADDRESS;
+ if (port_status & PORT_STATUS_SM1)
+ set_bit(QL_LINK_OPTICAL, &qdev->flags);
+ else
+ clear_bit(QL_LINK_OPTICAL, &qdev->flags);
+ break;
+
+ case ISP_CONTROL_FN0_SCSI:
+ case ISP_CONTROL_FN1_SCSI:
+ default:
+ netdev_printk(KERN_DEBUG, qdev->ndev,
+ "Invalid function number, ispControlStatus = 0x%x\n",
+ value);
+ break;
+ }
+ qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
+}
+
+static void ql_display_dev_info(struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ struct pci_dev *pdev = qdev->pdev;
+
+ netdev_info(ndev,
+ "%s Adapter %d RevisionID %d found %s on PCI slot %d\n",
+ DRV_NAME, qdev->index, qdev->chip_rev_id,
+ qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022",
+ qdev->pci_slot);
+ netdev_info(ndev, "%s Interface\n",
+ test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
+
+ /*
+ * Print PCI bus width/type.
+ */
+ netdev_info(ndev, "Bus interface is %s %s\n",
+ ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
+ ((qdev->pci_x) ? "PCI-X" : "PCI"));
+
+ netdev_info(ndev, "mem IO base address adjusted = 0x%p\n",
+ qdev->mem_map_registers);
+ netdev_info(ndev, "Interrupt number = %d\n", pdev->irq);
+
+ netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
+}
+
+static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
+{
+ struct net_device *ndev = qdev->ndev;
+ int retval = 0;
+
+ netif_stop_queue(ndev);
+ netif_carrier_off(ndev);
+
+ clear_bit(QL_ADAPTER_UP, &qdev->flags);
+ clear_bit(QL_LINK_MASTER, &qdev->flags);
+
+ ql_disable_interrupts(qdev);
+
+ free_irq(qdev->pdev->irq, ndev);
+
+ if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
+ netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
+ clear_bit(QL_MSI_ENABLED, &qdev->flags);
+ pci_disable_msi(qdev->pdev);
+ }
+
+ del_timer_sync(&qdev->adapter_timer);
+
+ napi_disable(&qdev->napi);
+
+ if (do_reset) {
+ int soft_reset;
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if (ql_wait_for_drvr_lock(qdev)) {
+ soft_reset = ql_adapter_reset(qdev);
+ if (soft_reset) {
+ netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n",
+ qdev->index);
+ }
+ netdev_err(ndev,
+ "Releasing driver lock via chip reset\n");
+ } else {
+ netdev_err(ndev,
+ "Could not acquire driver lock to do reset!\n");
+ retval = -1;
+ }
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ }
+ ql_free_mem_resources(qdev);
+ return retval;
+}
+
+static int ql_adapter_up(struct ql3_adapter *qdev)
+{
+ struct net_device *ndev = qdev->ndev;
+ int err;
+ unsigned long irq_flags = IRQF_SHARED;
+ unsigned long hw_flags;
+
+ if (ql_alloc_mem_resources(qdev)) {
+ netdev_err(ndev, "Unable to allocate buffers\n");
+ return -ENOMEM;
+ }
+
+ if (qdev->msi) {
+ if (pci_enable_msi(qdev->pdev)) {
+ netdev_err(ndev,
+ "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n");
+ qdev->msi = 0;
+ } else {
+ netdev_info(ndev, "MSI Enabled...\n");
+ set_bit(QL_MSI_ENABLED, &qdev->flags);
+ irq_flags &= ~IRQF_SHARED;
+ }
+ }
+
+ err = request_irq(qdev->pdev->irq, ql3xxx_isr,
+ irq_flags, ndev->name, ndev);
+ if (err) {
+ netdev_err(ndev,
+ "Failed to reserve interrupt %d - already in use\n",
+ qdev->pdev->irq);
+ goto err_irq;
+ }
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+ err = ql_wait_for_drvr_lock(qdev);
+ if (err) {
+ err = ql_adapter_initialize(qdev);
+ if (err) {
+ netdev_err(ndev, "Unable to initialize adapter\n");
+ goto err_init;
+ }
+ netdev_err(ndev, "Releasing driver lock\n");
+ ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
+ } else {
+ netdev_err(ndev, "Could not acquire driver lock\n");
+ goto err_lock;
+ }
+
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ set_bit(QL_ADAPTER_UP, &qdev->flags);
+
+ mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
+
+ napi_enable(&qdev->napi);
+ ql_enable_interrupts(qdev);
+ return 0;
+
+err_init:
+ ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
+err_lock:
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ free_irq(qdev->pdev->irq, ndev);
+err_irq:
+ if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
+ netdev_info(ndev, "calling pci_disable_msi()\n");
+ clear_bit(QL_MSI_ENABLED, &qdev->flags);
+ pci_disable_msi(qdev->pdev);
+ }
+ return err;
+}
+
+static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
+{
+ if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
+ netdev_err(qdev->ndev,
+ "Driver up/down cycle failed, closing device\n");
+ rtnl_lock();
+ dev_close(qdev->ndev);
+ rtnl_unlock();
+ return -1;
+ }
+ return 0;
+}
+
+static int ql3xxx_close(struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+
+ /*
+ * Wait for device to recover from a reset.
+ * (Rarely happens, but possible.)
+ */
+ while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
+ msleep(50);
+
+ ql_adapter_down(qdev, QL_DO_RESET);
+ return 0;
+}
+
+static int ql3xxx_open(struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ return ql_adapter_up(qdev);
+}
+
+static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ struct sockaddr *addr = p;
+ unsigned long hw_flags;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ /* Program lower 32 bits of the MAC address */
+ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+ (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
+ ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+ ((ndev->dev_addr[2] << 24) | (ndev->
+ dev_addr[3] << 16) |
+ (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
+
+ /* Program top 16 bits of the MAC address */
+ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+ ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
+ ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+ ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ return 0;
+}
+
+static void ql3xxx_tx_timeout(struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+
+ netdev_err(ndev, "Resetting...\n");
+ /*
+ * Stop the queues, we've got a problem.
+ */
+ netif_stop_queue(ndev);
+
+ /*
+ * Wake up the worker to process this event.
+ */
+ queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
+}
+
+static void ql_reset_work(struct work_struct *work)
+{
+ struct ql3_adapter *qdev =
+ container_of(work, struct ql3_adapter, reset_work.work);
+ struct net_device *ndev = qdev->ndev;
+ u32 value;
+ struct ql_tx_buf_cb *tx_cb;
+ int max_wait_time, i;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ unsigned long hw_flags;
+
+ if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
+ clear_bit(QL_LINK_MASTER, &qdev->flags);
+
+ /*
+ * Loop through the active list and return the skb.
+ */
+ for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+ int j;
+ tx_cb = &qdev->tx_buf[i];
+ if (tx_cb->skb) {
+ netdev_printk(KERN_DEBUG, ndev,
+ "Freeing lost SKB\n");
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(&tx_cb->map[0],
+ mapaddr),
+ dma_unmap_len(&tx_cb->map[0], maplen),
+ PCI_DMA_TODEVICE);
+ for (j = 1; j < tx_cb->seg_count; j++) {
+ pci_unmap_page(qdev->pdev,
+ dma_unmap_addr(&tx_cb->map[j],
+ mapaddr),
+ dma_unmap_len(&tx_cb->map[j],
+ maplen),
+ PCI_DMA_TODEVICE);
+ }
+ dev_kfree_skb(tx_cb->skb);
+ tx_cb->skb = NULL;
+ }
+ }
+
+ netdev_err(ndev, "Clearing NRI after reset\n");
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ ispControlStatus,
+ ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
+ /*
+ * Wait the for Soft Reset to Complete.
+ */
+ max_wait_time = 10;
+ do {
+ value = ql_read_common_reg(qdev,
+ &port_regs->CommonRegs.
+
+ ispControlStatus);
+ if ((value & ISP_CONTROL_SR) == 0) {
+ netdev_printk(KERN_DEBUG, ndev,
+ "reset completed\n");
+ break;
+ }
+
+ if (value & ISP_CONTROL_RI) {
+ netdev_printk(KERN_DEBUG, ndev,
+ "clearing NRI after reset\n");
+ ql_write_common_reg(qdev,
+ &port_regs->
+ CommonRegs.
+ ispControlStatus,
+ ((ISP_CONTROL_RI <<
+ 16) | ISP_CONTROL_RI));
+ }
+
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ ssleep(1);
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ } while (--max_wait_time);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ if (value & ISP_CONTROL_SR) {
+
+ /*
+ * Set the reset flags and clear the board again.
+ * Nothing else to do...
+ */
+ netdev_err(ndev,
+ "Timed out waiting for reset to complete\n");
+ netdev_err(ndev, "Do a reset\n");
+ clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
+ clear_bit(QL_RESET_START, &qdev->flags);
+ ql_cycle_adapter(qdev, QL_DO_RESET);
+ return;
+ }
+
+ clear_bit(QL_RESET_ACTIVE, &qdev->flags);
+ clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
+ clear_bit(QL_RESET_START, &qdev->flags);
+ ql_cycle_adapter(qdev, QL_NO_RESET);
+ }
+}
+
+static void ql_tx_timeout_work(struct work_struct *work)
+{
+ struct ql3_adapter *qdev =
+ container_of(work, struct ql3_adapter, tx_timeout_work.work);
+
+ ql_cycle_adapter(qdev, QL_DO_RESET);
+}
+
+static void ql_get_board_info(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
+
+ qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
+ if (value & PORT_STATUS_64)
+ qdev->pci_width = 64;
+ else
+ qdev->pci_width = 32;
+ if (value & PORT_STATUS_X)
+ qdev->pci_x = 1;
+ else
+ qdev->pci_x = 0;
+ qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
+}
+
+static void ql3xxx_timer(unsigned long ptr)
+{
+ struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
+ queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
+}
+
+static const struct net_device_ops ql3xxx_netdev_ops = {
+ .ndo_open = ql3xxx_open,
+ .ndo_start_xmit = ql3xxx_send,
+ .ndo_stop = ql3xxx_close,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = ql3xxx_set_mac_address,
+ .ndo_tx_timeout = ql3xxx_tx_timeout,
+};
+
+static int __devinit ql3xxx_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_entry)
+{
+ struct net_device *ndev = NULL;
+ struct ql3_adapter *qdev = NULL;
+ static int cards_found;
+ int uninitialized_var(pci_using_dac), err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ pr_err("%s cannot enable PCI device\n", pci_name(pdev));
+ goto err_out;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ pr_err("%s cannot obtain PCI resources\n", pci_name(pdev));
+ goto err_out_disable_pdev;
+ }
+
+ pci_set_master(pdev);
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ pci_using_dac = 1;
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ pci_using_dac = 0;
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ }
+
+ if (err) {
+ pr_err("%s no usable DMA configuration\n", pci_name(pdev));
+ goto err_out_free_regions;
+ }
+
+ ndev = alloc_etherdev(sizeof(struct ql3_adapter));
+ if (!ndev) {
+ pr_err("%s could not alloc etherdev\n", pci_name(pdev));
+ err = -ENOMEM;
+ goto err_out_free_regions;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ pci_set_drvdata(pdev, ndev);
+
+ qdev = netdev_priv(ndev);
+ qdev->index = cards_found;
+ qdev->ndev = ndev;
+ qdev->pdev = pdev;
+ qdev->device_id = pci_entry->device;
+ qdev->port_link_state = LS_DOWN;
+ if (msi)
+ qdev->msi = 1;
+
+ qdev->msg_enable = netif_msg_init(debug, default_msg);
+
+ if (pci_using_dac)
+ ndev->features |= NETIF_F_HIGHDMA;
+ if (qdev->device_id == QL3032_DEVICE_ID)
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+
+ qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
+ if (!qdev->mem_map_registers) {
+ pr_err("%s: cannot map device registers\n", pci_name(pdev));
+ err = -EIO;
+ goto err_out_free_ndev;
+ }
+
+ spin_lock_init(&qdev->adapter_lock);
+ spin_lock_init(&qdev->hw_lock);
+
+ /* Set driver entry points */
+ ndev->netdev_ops = &ql3xxx_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
+ ndev->watchdog_timeo = 5 * HZ;
+
+ netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
+
+ ndev->irq = pdev->irq;
+
+ /* make sure the EEPROM is good */
+ if (ql_get_nvram_params(qdev)) {
+ pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n",
+ __func__, qdev->index);
+ err = -EIO;
+ goto err_out_iounmap;
+ }
+
+ ql_set_mac_info(qdev);
+
+ /* Validate and set parameters */
+ if (qdev->mac_index) {
+ ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
+ ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
+ } else {
+ ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
+ ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
+ }
+ memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
+
+ ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
+
+ /* Record PCI bus information. */
+ ql_get_board_info(qdev);
+
+ /*
+ * Set the Maximum Memory Read Byte Count value. We do this to handle
+ * jumbo frames.
+ */
+ if (qdev->pci_x)
+ pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
+
+ err = register_netdev(ndev);
+ if (err) {
+ pr_err("%s: cannot register net device\n", pci_name(pdev));
+ goto err_out_iounmap;
+ }
+
+ /* we're going to reset, so assume we have no link for now */
+
+ netif_carrier_off(ndev);
+ netif_stop_queue(ndev);
+
+ qdev->workqueue = create_singlethread_workqueue(ndev->name);
+ INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
+ INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
+ INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
+
+ init_timer(&qdev->adapter_timer);
+ qdev->adapter_timer.function = ql3xxx_timer;
+ qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
+ qdev->adapter_timer.data = (unsigned long)qdev;
+
+ if (!cards_found) {
+ pr_alert("%s\n", DRV_STRING);
+ pr_alert("Driver name: %s, Version: %s\n",
+ DRV_NAME, DRV_VERSION);
+ }
+ ql_display_dev_info(ndev);
+
+ cards_found++;
+ return 0;
+
+err_out_iounmap:
+ iounmap(qdev->mem_map_registers);
+err_out_free_ndev:
+ free_netdev(ndev);
+err_out_free_regions:
+ pci_release_regions(pdev);
+err_out_disable_pdev:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+err_out:
+ return err;
+}
+
+static void __devexit ql3xxx_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+
+ unregister_netdev(ndev);
+
+ ql_disable_interrupts(qdev);
+
+ if (qdev->workqueue) {
+ cancel_delayed_work(&qdev->reset_work);
+ cancel_delayed_work(&qdev->tx_timeout_work);
+ destroy_workqueue(qdev->workqueue);
+ qdev->workqueue = NULL;
+ }
+
+ iounmap(qdev->mem_map_registers);
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(ndev);
+}
+
+static struct pci_driver ql3xxx_driver = {
+
+ .name = DRV_NAME,
+ .id_table = ql3xxx_pci_tbl,
+ .probe = ql3xxx_probe,
+ .remove = __devexit_p(ql3xxx_remove),
+};
+
+static int __init ql3xxx_init_module(void)
+{
+ return pci_register_driver(&ql3xxx_driver);
+}
+
+static void __exit ql3xxx_exit(void)
+{
+ pci_unregister_driver(&ql3xxx_driver);
+}
+
+module_init(ql3xxx_init_module);
+module_exit(ql3xxx_exit);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.h b/drivers/net/ethernet/qlogic/qla3xxx.h
new file mode 100644
index 000000000000..73e234366a82
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qla3xxx.h
@@ -0,0 +1,1189 @@
+/*
+ * QLogic QLA3xxx NIC HBA Driver
+ * Copyright (c) 2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qla3xxx for copyright and licensing details.
+ */
+#ifndef _QLA3XXX_H_
+#define _QLA3XXX_H_
+
+/*
+ * IOCB Definitions...
+ */
+#pragma pack(1)
+
+#define OPCODE_OB_MAC_IOCB_FN0 0x01
+#define OPCODE_OB_MAC_IOCB_FN2 0x21
+
+#define OPCODE_IB_MAC_IOCB 0xF9
+#define OPCODE_IB_3032_MAC_IOCB 0x09
+#define OPCODE_IB_IP_IOCB 0xFA
+#define OPCODE_IB_3032_IP_IOCB 0x0A
+
+#define OPCODE_FUNC_ID_MASK 0x30
+#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */
+
+#define FN0_MA_BITS_MASK 0x00
+#define FN1_MA_BITS_MASK 0x80
+
+struct ob_mac_iocb_req {
+ u8 opcode;
+ u8 flags;
+#define OB_MAC_IOCB_REQ_MA 0xe0
+#define OB_MAC_IOCB_REQ_F 0x10
+#define OB_MAC_IOCB_REQ_X 0x08
+#define OB_MAC_IOCB_REQ_D 0x02
+#define OB_MAC_IOCB_REQ_I 0x01
+ u8 flags1;
+#define OB_3032MAC_IOCB_REQ_IC 0x04
+#define OB_3032MAC_IOCB_REQ_TC 0x02
+#define OB_3032MAC_IOCB_REQ_UC 0x01
+ u8 reserved0;
+
+ u32 transaction_id; /* opaque for hardware */
+ __le16 data_len;
+ u8 ip_hdr_off;
+ u8 ip_hdr_len;
+ __le32 reserved1;
+ __le32 reserved2;
+ __le32 buf_addr0_low;
+ __le32 buf_addr0_high;
+ __le32 buf_0_len;
+ __le32 buf_addr1_low;
+ __le32 buf_addr1_high;
+ __le32 buf_1_len;
+ __le32 buf_addr2_low;
+ __le32 buf_addr2_high;
+ __le32 buf_2_len;
+ __le32 reserved3;
+ __le32 reserved4;
+};
+/*
+ * The following constants define control bits for buffer
+ * length fields for all IOCB's.
+ */
+#define OB_MAC_IOCB_REQ_E 0x80000000 /* Last valid buffer in list. */
+#define OB_MAC_IOCB_REQ_C 0x40000000 /* points to an OAL. (continuation) */
+#define OB_MAC_IOCB_REQ_L 0x20000000 /* Auburn local address pointer. */
+#define OB_MAC_IOCB_REQ_R 0x10000000 /* 32-bit address pointer. */
+
+struct ob_mac_iocb_rsp {
+ u8 opcode;
+ u8 flags;
+#define OB_MAC_IOCB_RSP_P 0x08
+#define OB_MAC_IOCB_RSP_L 0x04
+#define OB_MAC_IOCB_RSP_S 0x02
+#define OB_MAC_IOCB_RSP_I 0x01
+
+ __le16 reserved0;
+ u32 transaction_id; /* opaque for hardware */
+ __le32 reserved1;
+ __le32 reserved2;
+};
+
+struct ib_mac_iocb_rsp {
+ u8 opcode;
+#define IB_MAC_IOCB_RSP_V 0x80
+ u8 flags;
+#define IB_MAC_IOCB_RSP_S 0x80
+#define IB_MAC_IOCB_RSP_H1 0x40
+#define IB_MAC_IOCB_RSP_H0 0x20
+#define IB_MAC_IOCB_RSP_B 0x10
+#define IB_MAC_IOCB_RSP_M 0x08
+#define IB_MAC_IOCB_RSP_MA 0x07
+
+ __le16 length;
+ __le32 reserved;
+ __le32 ial_low;
+ __le32 ial_high;
+
+};
+
+struct ob_ip_iocb_req {
+ u8 opcode;
+ __le16 flags;
+#define OB_IP_IOCB_REQ_O 0x100
+#define OB_IP_IOCB_REQ_H 0x008
+#define OB_IP_IOCB_REQ_U 0x004
+#define OB_IP_IOCB_REQ_D 0x002
+#define OB_IP_IOCB_REQ_I 0x001
+
+ u8 reserved0;
+
+ __le32 transaction_id;
+ __le16 data_len;
+ __le16 reserved1;
+ __le32 hncb_ptr_low;
+ __le32 hncb_ptr_high;
+ __le32 buf_addr0_low;
+ __le32 buf_addr0_high;
+ __le32 buf_0_len;
+ __le32 buf_addr1_low;
+ __le32 buf_addr1_high;
+ __le32 buf_1_len;
+ __le32 buf_addr2_low;
+ __le32 buf_addr2_high;
+ __le32 buf_2_len;
+ __le32 reserved2;
+ __le32 reserved3;
+};
+
+/* defines for BufferLength fields above */
+#define OB_IP_IOCB_REQ_E 0x80000000
+#define OB_IP_IOCB_REQ_C 0x40000000
+#define OB_IP_IOCB_REQ_L 0x20000000
+#define OB_IP_IOCB_REQ_R 0x10000000
+
+struct ob_ip_iocb_rsp {
+ u8 opcode;
+ u8 flags;
+#define OB_MAC_IOCB_RSP_H 0x10
+#define OB_MAC_IOCB_RSP_E 0x08
+#define OB_MAC_IOCB_RSP_L 0x04
+#define OB_MAC_IOCB_RSP_S 0x02
+#define OB_MAC_IOCB_RSP_I 0x01
+
+ __le16 reserved0;
+ __le32 transaction_id;
+ __le32 reserved1;
+ __le32 reserved2;
+};
+
+struct ib_ip_iocb_rsp {
+ u8 opcode;
+#define IB_IP_IOCB_RSP_3032_V 0x80
+#define IB_IP_IOCB_RSP_3032_O 0x40
+#define IB_IP_IOCB_RSP_3032_I 0x20
+#define IB_IP_IOCB_RSP_3032_R 0x10
+ u8 flags;
+#define IB_IP_IOCB_RSP_S 0x80
+#define IB_IP_IOCB_RSP_H1 0x40
+#define IB_IP_IOCB_RSP_H0 0x20
+#define IB_IP_IOCB_RSP_B 0x10
+#define IB_IP_IOCB_RSP_M 0x08
+#define IB_IP_IOCB_RSP_MA 0x07
+
+ __le16 length;
+ __le16 checksum;
+#define IB_IP_IOCB_RSP_3032_ICE 0x01
+#define IB_IP_IOCB_RSP_3032_CE 0x02
+#define IB_IP_IOCB_RSP_3032_NUC 0x04
+#define IB_IP_IOCB_RSP_3032_UDP 0x08
+#define IB_IP_IOCB_RSP_3032_TCP 0x10
+#define IB_IP_IOCB_RSP_3032_IPE 0x20
+ __le16 reserved;
+#define IB_IP_IOCB_RSP_R 0x01
+ __le32 ial_low;
+ __le32 ial_high;
+};
+
+struct net_rsp_iocb {
+ u8 opcode;
+ u8 flags;
+ __le16 reserved0;
+ __le32 reserved[3];
+};
+#pragma pack()
+
+/*
+ * Register Definitions...
+ */
+#define PORT0_PHY_ADDRESS 0x1e00
+#define PORT1_PHY_ADDRESS 0x1f00
+
+#define ETHERNET_CRC_SIZE 4
+
+#define MII_SCAN_REGISTER 0x00000001
+
+#define PHY_ID_0_REG 2
+#define PHY_ID_1_REG 3
+
+#define PHY_OUI_1_MASK 0xfc00
+#define PHY_MODEL_MASK 0x03f0
+
+/* Address for the Agere Phy */
+#define MII_AGERE_ADDR_1 0x00001000
+#define MII_AGERE_ADDR_2 0x00001100
+
+/* 32-bit ispControlStatus */
+enum {
+ ISP_CONTROL_NP_MASK = 0x0003,
+ ISP_CONTROL_NP_PCSR = 0x0000,
+ ISP_CONTROL_NP_HMCR = 0x0001,
+ ISP_CONTROL_NP_LRAMCR = 0x0002,
+ ISP_CONTROL_NP_PSR = 0x0003,
+ ISP_CONTROL_RI = 0x0008,
+ ISP_CONTROL_CI = 0x0010,
+ ISP_CONTROL_PI = 0x0020,
+ ISP_CONTROL_IN = 0x0040,
+ ISP_CONTROL_BE = 0x0080,
+ ISP_CONTROL_FN_MASK = 0x0700,
+ ISP_CONTROL_FN0_NET = 0x0400,
+ ISP_CONTROL_FN0_SCSI = 0x0500,
+ ISP_CONTROL_FN1_NET = 0x0600,
+ ISP_CONTROL_FN1_SCSI = 0x0700,
+ ISP_CONTROL_LINK_DN_0 = 0x0800,
+ ISP_CONTROL_LINK_DN_1 = 0x1000,
+ ISP_CONTROL_FSR = 0x2000,
+ ISP_CONTROL_FE = 0x4000,
+ ISP_CONTROL_SR = 0x8000,
+};
+
+/* 32-bit ispInterruptMaskReg */
+enum {
+ ISP_IMR_ENABLE_INT = 0x0004,
+ ISP_IMR_DISABLE_RESET_INT = 0x0008,
+ ISP_IMR_DISABLE_CMPL_INT = 0x0010,
+ ISP_IMR_DISABLE_PROC_INT = 0x0020,
+};
+
+/* 32-bit serialPortInterfaceReg */
+enum {
+ ISP_SERIAL_PORT_IF_CLK = 0x0001,
+ ISP_SERIAL_PORT_IF_CS = 0x0002,
+ ISP_SERIAL_PORT_IF_D0 = 0x0004,
+ ISP_SERIAL_PORT_IF_DI = 0x0008,
+ ISP_NVRAM_MASK = (0x000F << 16),
+ ISP_SERIAL_PORT_IF_WE = 0x0010,
+ ISP_SERIAL_PORT_IF_NVR_MASK = 0x001F,
+ ISP_SERIAL_PORT_IF_SCI = 0x0400,
+ ISP_SERIAL_PORT_IF_SC0 = 0x0800,
+ ISP_SERIAL_PORT_IF_SCE = 0x1000,
+ ISP_SERIAL_PORT_IF_SDI = 0x2000,
+ ISP_SERIAL_PORT_IF_SDO = 0x4000,
+ ISP_SERIAL_PORT_IF_SDE = 0x8000,
+ ISP_SERIAL_PORT_IF_I2C_MASK = 0xFC00,
+};
+
+/* semaphoreReg */
+enum {
+ QL_RESOURCE_MASK_BASE_CODE = 0x7,
+ QL_RESOURCE_BITS_BASE_CODE = 0x4,
+ QL_DRVR_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 1),
+ QL_DDR_RAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 4),
+ QL_PHY_GIO_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 7),
+ QL_NVRAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 10),
+ QL_FLASH_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 13),
+ QL_DRVR_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (1 + 16)),
+ QL_DDR_RAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (4 + 16)),
+ QL_PHY_GIO_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (7 + 16)),
+ QL_NVRAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (10 + 16)),
+ QL_FLASH_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (13 + 16)),
+};
+
+ /*
+ * QL3XXX memory-mapped registers
+ * QL3XXX has 4 "pages" of registers, each page occupying
+ * 256 bytes. Each page has a "common" area at the start and then
+ * page-specific registers after that.
+ */
+struct ql3xxx_common_registers {
+ u32 MB0; /* Offset 0x00 */
+ u32 MB1; /* Offset 0x04 */
+ u32 MB2; /* Offset 0x08 */
+ u32 MB3; /* Offset 0x0c */
+ u32 MB4; /* Offset 0x10 */
+ u32 MB5; /* Offset 0x14 */
+ u32 MB6; /* Offset 0x18 */
+ u32 MB7; /* Offset 0x1c */
+ u32 flashBiosAddr;
+ u32 flashBiosData;
+ u32 ispControlStatus;
+ u32 ispInterruptMaskReg;
+ u32 serialPortInterfaceReg;
+ u32 semaphoreReg;
+ u32 reqQProducerIndex;
+ u32 rspQConsumerIndex;
+
+ u32 rxLargeQProducerIndex;
+ u32 rxSmallQProducerIndex;
+ u32 arcMadiCommand;
+ u32 arcMadiData;
+};
+
+enum {
+ EXT_HW_CONFIG_SP_MASK = 0x0006,
+ EXT_HW_CONFIG_SP_NONE = 0x0000,
+ EXT_HW_CONFIG_SP_BYTE_PARITY = 0x0002,
+ EXT_HW_CONFIG_SP_ECC = 0x0004,
+ EXT_HW_CONFIG_SP_ECCx = 0x0006,
+ EXT_HW_CONFIG_SIZE_MASK = 0x0060,
+ EXT_HW_CONFIG_SIZE_128M = 0x0000,
+ EXT_HW_CONFIG_SIZE_256M = 0x0020,
+ EXT_HW_CONFIG_SIZE_512M = 0x0040,
+ EXT_HW_CONFIG_SIZE_INVALID = 0x0060,
+ EXT_HW_CONFIG_PD = 0x0080,
+ EXT_HW_CONFIG_FW = 0x0200,
+ EXT_HW_CONFIG_US = 0x0400,
+ EXT_HW_CONFIG_DCS_MASK = 0x1800,
+ EXT_HW_CONFIG_DCS_9MA = 0x0000,
+ EXT_HW_CONFIG_DCS_15MA = 0x0800,
+ EXT_HW_CONFIG_DCS_18MA = 0x1000,
+ EXT_HW_CONFIG_DCS_24MA = 0x1800,
+ EXT_HW_CONFIG_DDS_MASK = 0x6000,
+ EXT_HW_CONFIG_DDS_9MA = 0x0000,
+ EXT_HW_CONFIG_DDS_15MA = 0x2000,
+ EXT_HW_CONFIG_DDS_18MA = 0x4000,
+ EXT_HW_CONFIG_DDS_24MA = 0x6000,
+};
+
+/* InternalChipConfig */
+enum {
+ INTERNAL_CHIP_DM = 0x0001,
+ INTERNAL_CHIP_SD = 0x0002,
+ INTERNAL_CHIP_RAP_MASK = 0x000C,
+ INTERNAL_CHIP_RAP_RR = 0x0000,
+ INTERNAL_CHIP_RAP_NRM = 0x0004,
+ INTERNAL_CHIP_RAP_ERM = 0x0008,
+ INTERNAL_CHIP_RAP_ERMx = 0x000C,
+ INTERNAL_CHIP_WE = 0x0010,
+ INTERNAL_CHIP_EF = 0x0020,
+ INTERNAL_CHIP_FR = 0x0040,
+ INTERNAL_CHIP_FW = 0x0080,
+ INTERNAL_CHIP_FI = 0x0100,
+ INTERNAL_CHIP_FT = 0x0200,
+};
+
+/* portControl */
+enum {
+ PORT_CONTROL_DS = 0x0001,
+ PORT_CONTROL_HH = 0x0002,
+ PORT_CONTROL_EI = 0x0004,
+ PORT_CONTROL_ET = 0x0008,
+ PORT_CONTROL_EF = 0x0010,
+ PORT_CONTROL_DRM = 0x0020,
+ PORT_CONTROL_RLB = 0x0040,
+ PORT_CONTROL_RCB = 0x0080,
+ PORT_CONTROL_MAC = 0x0100,
+ PORT_CONTROL_IPV = 0x0200,
+ PORT_CONTROL_IFP = 0x0400,
+ PORT_CONTROL_ITP = 0x0800,
+ PORT_CONTROL_FI = 0x1000,
+ PORT_CONTROL_DFP = 0x2000,
+ PORT_CONTROL_OI = 0x4000,
+ PORT_CONTROL_CC = 0x8000,
+};
+
+/* portStatus */
+enum {
+ PORT_STATUS_SM0 = 0x0001,
+ PORT_STATUS_SM1 = 0x0002,
+ PORT_STATUS_X = 0x0008,
+ PORT_STATUS_DL = 0x0080,
+ PORT_STATUS_IC = 0x0200,
+ PORT_STATUS_MRC = 0x0400,
+ PORT_STATUS_NL = 0x0800,
+ PORT_STATUS_REV_ID_MASK = 0x7000,
+ PORT_STATUS_REV_ID_1 = 0x1000,
+ PORT_STATUS_REV_ID_2 = 0x2000,
+ PORT_STATUS_REV_ID_3 = 0x3000,
+ PORT_STATUS_64 = 0x8000,
+ PORT_STATUS_UP0 = 0x10000,
+ PORT_STATUS_AC0 = 0x20000,
+ PORT_STATUS_AE0 = 0x40000,
+ PORT_STATUS_UP1 = 0x100000,
+ PORT_STATUS_AC1 = 0x200000,
+ PORT_STATUS_AE1 = 0x400000,
+ PORT_STATUS_F0_ENABLED = 0x1000000,
+ PORT_STATUS_F1_ENABLED = 0x2000000,
+ PORT_STATUS_F2_ENABLED = 0x4000000,
+ PORT_STATUS_F3_ENABLED = 0x8000000,
+};
+
+/* macMIIMgmtControlReg */
+enum {
+ MAC_ADDR_INDIRECT_PTR_REG_RP_MASK = 0x0003,
+ MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_LWR = 0x0000,
+ MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_UPR = 0x0001,
+ MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_LWR = 0x0002,
+ MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_UPR = 0x0003,
+ MAC_ADDR_INDIRECT_PTR_REG_PR = 0x0008,
+ MAC_ADDR_INDIRECT_PTR_REG_SS = 0x0010,
+ MAC_ADDR_INDIRECT_PTR_REG_SE = 0x0020,
+ MAC_ADDR_INDIRECT_PTR_REG_SP = 0x0040,
+ MAC_ADDR_INDIRECT_PTR_REG_PE = 0x0080,
+};
+
+/* macMIIMgmtControlReg */
+enum {
+ MAC_MII_CONTROL_RC = 0x0001,
+ MAC_MII_CONTROL_SC = 0x0002,
+ MAC_MII_CONTROL_AS = 0x0004,
+ MAC_MII_CONTROL_NP = 0x0008,
+ MAC_MII_CONTROL_CLK_SEL_MASK = 0x0070,
+ MAC_MII_CONTROL_CLK_SEL_DIV2 = 0x0000,
+ MAC_MII_CONTROL_CLK_SEL_DIV4 = 0x0010,
+ MAC_MII_CONTROL_CLK_SEL_DIV6 = 0x0020,
+ MAC_MII_CONTROL_CLK_SEL_DIV8 = 0x0030,
+ MAC_MII_CONTROL_CLK_SEL_DIV10 = 0x0040,
+ MAC_MII_CONTROL_CLK_SEL_DIV14 = 0x0050,
+ MAC_MII_CONTROL_CLK_SEL_DIV20 = 0x0060,
+ MAC_MII_CONTROL_CLK_SEL_DIV28 = 0x0070,
+ MAC_MII_CONTROL_RM = 0x8000,
+};
+
+/* macMIIStatusReg */
+enum {
+ MAC_MII_STATUS_BSY = 0x0001,
+ MAC_MII_STATUS_SC = 0x0002,
+ MAC_MII_STATUS_NV = 0x0004,
+};
+
+enum {
+ MAC_CONFIG_REG_PE = 0x0001,
+ MAC_CONFIG_REG_TF = 0x0002,
+ MAC_CONFIG_REG_RF = 0x0004,
+ MAC_CONFIG_REG_FD = 0x0008,
+ MAC_CONFIG_REG_GM = 0x0010,
+ MAC_CONFIG_REG_LB = 0x0020,
+ MAC_CONFIG_REG_SR = 0x8000,
+};
+
+enum {
+ MAC_HALF_DUPLEX_REG_ED = 0x10000,
+ MAC_HALF_DUPLEX_REG_NB = 0x20000,
+ MAC_HALF_DUPLEX_REG_BNB = 0x40000,
+ MAC_HALF_DUPLEX_REG_ALT = 0x80000,
+};
+
+enum {
+ IP_ADDR_INDEX_REG_MASK = 0x000f,
+ IP_ADDR_INDEX_REG_FUNC_0_PRI = 0x0000,
+ IP_ADDR_INDEX_REG_FUNC_0_SEC = 0x0001,
+ IP_ADDR_INDEX_REG_FUNC_1_PRI = 0x0002,
+ IP_ADDR_INDEX_REG_FUNC_1_SEC = 0x0003,
+ IP_ADDR_INDEX_REG_FUNC_2_PRI = 0x0004,
+ IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005,
+ IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006,
+ IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007,
+ IP_ADDR_INDEX_REG_6 = 0x0008,
+ IP_ADDR_INDEX_REG_OFFSET_MASK = 0x0030,
+ IP_ADDR_INDEX_REG_E = 0x0040,
+};
+enum {
+ QL3032_PORT_CONTROL_DS = 0x0001,
+ QL3032_PORT_CONTROL_HH = 0x0002,
+ QL3032_PORT_CONTROL_EIv6 = 0x0004,
+ QL3032_PORT_CONTROL_EIv4 = 0x0008,
+ QL3032_PORT_CONTROL_ET = 0x0010,
+ QL3032_PORT_CONTROL_EF = 0x0020,
+ QL3032_PORT_CONTROL_DRM = 0x0040,
+ QL3032_PORT_CONTROL_RLB = 0x0080,
+ QL3032_PORT_CONTROL_RCB = 0x0100,
+ QL3032_PORT_CONTROL_KIE = 0x0200,
+};
+
+enum {
+ PROBE_MUX_ADDR_REG_MUX_SEL_MASK = 0x003f,
+ PROBE_MUX_ADDR_REG_SYSCLK = 0x0000,
+ PROBE_MUX_ADDR_REG_PCICLK = 0x0040,
+ PROBE_MUX_ADDR_REG_NRXCLK = 0x0080,
+ PROBE_MUX_ADDR_REG_CPUCLK = 0x00C0,
+ PROBE_MUX_ADDR_REG_MODULE_SEL_MASK = 0x3f00,
+ PROBE_MUX_ADDR_REG_UP = 0x4000,
+ PROBE_MUX_ADDR_REG_RE = 0x8000,
+};
+
+enum {
+ STATISTICS_INDEX_REG_MASK = 0x01ff,
+ STATISTICS_INDEX_REG_MAC0_TX_FRAME = 0x0000,
+ STATISTICS_INDEX_REG_MAC0_TX_BYTES = 0x0001,
+ STATISTICS_INDEX_REG_MAC0_TX_STAT1 = 0x0002,
+ STATISTICS_INDEX_REG_MAC0_TX_STAT2 = 0x0003,
+ STATISTICS_INDEX_REG_MAC0_TX_STAT3 = 0x0004,
+ STATISTICS_INDEX_REG_MAC0_TX_STAT4 = 0x0005,
+ STATISTICS_INDEX_REG_MAC0_TX_STAT5 = 0x0006,
+ STATISTICS_INDEX_REG_MAC0_RX_FRAME = 0x0007,
+ STATISTICS_INDEX_REG_MAC0_RX_BYTES = 0x0008,
+ STATISTICS_INDEX_REG_MAC0_RX_STAT1 = 0x0009,
+ STATISTICS_INDEX_REG_MAC0_RX_STAT2 = 0x000a,
+ STATISTICS_INDEX_REG_MAC0_RX_STAT3 = 0x000b,
+ STATISTICS_INDEX_REG_MAC0_RX_ERR_CRC = 0x000c,
+ STATISTICS_INDEX_REG_MAC0_RX_ERR_ENC = 0x000d,
+ STATISTICS_INDEX_REG_MAC0_RX_ERR_LEN = 0x000e,
+ STATISTICS_INDEX_REG_MAC0_RX_STAT4 = 0x000f,
+ STATISTICS_INDEX_REG_MAC1_TX_FRAME = 0x0010,
+ STATISTICS_INDEX_REG_MAC1_TX_BYTES = 0x0011,
+ STATISTICS_INDEX_REG_MAC1_TX_STAT1 = 0x0012,
+ STATISTICS_INDEX_REG_MAC1_TX_STAT2 = 0x0013,
+ STATISTICS_INDEX_REG_MAC1_TX_STAT3 = 0x0014,
+ STATISTICS_INDEX_REG_MAC1_TX_STAT4 = 0x0015,
+ STATISTICS_INDEX_REG_MAC1_TX_STAT5 = 0x0016,
+ STATISTICS_INDEX_REG_MAC1_RX_FRAME = 0x0017,
+ STATISTICS_INDEX_REG_MAC1_RX_BYTES = 0x0018,
+ STATISTICS_INDEX_REG_MAC1_RX_STAT1 = 0x0019,
+ STATISTICS_INDEX_REG_MAC1_RX_STAT2 = 0x001a,
+ STATISTICS_INDEX_REG_MAC1_RX_STAT3 = 0x001b,
+ STATISTICS_INDEX_REG_MAC1_RX_ERR_CRC = 0x001c,
+ STATISTICS_INDEX_REG_MAC1_RX_ERR_ENC = 0x001d,
+ STATISTICS_INDEX_REG_MAC1_RX_ERR_LEN = 0x001e,
+ STATISTICS_INDEX_REG_MAC1_RX_STAT4 = 0x001f,
+ STATISTICS_INDEX_REG_IP_TX_PKTS = 0x0020,
+ STATISTICS_INDEX_REG_IP_TX_BYTES = 0x0021,
+ STATISTICS_INDEX_REG_IP_TX_FRAG = 0x0022,
+ STATISTICS_INDEX_REG_IP_RX_PKTS = 0x0023,
+ STATISTICS_INDEX_REG_IP_RX_BYTES = 0x0024,
+ STATISTICS_INDEX_REG_IP_RX_FRAG = 0x0025,
+ STATISTICS_INDEX_REG_IP_DGRM_REASSEMBLY = 0x0026,
+ STATISTICS_INDEX_REG_IP_V6_RX_PKTS = 0x0027,
+ STATISTICS_INDEX_REG_IP_RX_PKTERR = 0x0028,
+ STATISTICS_INDEX_REG_IP_REASSEMBLY_ERR = 0x0029,
+ STATISTICS_INDEX_REG_TCP_TX_SEG = 0x0030,
+ STATISTICS_INDEX_REG_TCP_TX_BYTES = 0x0031,
+ STATISTICS_INDEX_REG_TCP_RX_SEG = 0x0032,
+ STATISTICS_INDEX_REG_TCP_RX_BYTES = 0x0033,
+ STATISTICS_INDEX_REG_TCP_TIMER_EXP = 0x0034,
+ STATISTICS_INDEX_REG_TCP_RX_ACK = 0x0035,
+ STATISTICS_INDEX_REG_TCP_TX_ACK = 0x0036,
+ STATISTICS_INDEX_REG_TCP_RX_ERR = 0x0037,
+ STATISTICS_INDEX_REG_TCP_RX_WIN_PROBE = 0x0038,
+ STATISTICS_INDEX_REG_TCP_ECC_ERR_CORR = 0x003f,
+};
+
+enum {
+ PORT_FATAL_ERROR_STATUS_OFB_RE_MAC0 = 0x00000001,
+ PORT_FATAL_ERROR_STATUS_OFB_RE_MAC1 = 0x00000002,
+ PORT_FATAL_ERROR_STATUS_OFB_WE = 0x00000004,
+ PORT_FATAL_ERROR_STATUS_IFB_RE = 0x00000008,
+ PORT_FATAL_ERROR_STATUS_IFB_WE_MAC0 = 0x00000010,
+ PORT_FATAL_ERROR_STATUS_IFB_WE_MAC1 = 0x00000020,
+ PORT_FATAL_ERROR_STATUS_ODE_RE = 0x00000040,
+ PORT_FATAL_ERROR_STATUS_ODE_WE = 0x00000080,
+ PORT_FATAL_ERROR_STATUS_IDE_RE = 0x00000100,
+ PORT_FATAL_ERROR_STATUS_IDE_WE = 0x00000200,
+ PORT_FATAL_ERROR_STATUS_SDE_RE = 0x00000400,
+ PORT_FATAL_ERROR_STATUS_SDE_WE = 0x00000800,
+ PORT_FATAL_ERROR_STATUS_BLE = 0x00001000,
+ PORT_FATAL_ERROR_STATUS_SPE = 0x00002000,
+ PORT_FATAL_ERROR_STATUS_EP0 = 0x00004000,
+ PORT_FATAL_ERROR_STATUS_EP1 = 0x00008000,
+ PORT_FATAL_ERROR_STATUS_ICE = 0x00010000,
+ PORT_FATAL_ERROR_STATUS_ILE = 0x00020000,
+ PORT_FATAL_ERROR_STATUS_OPE = 0x00040000,
+ PORT_FATAL_ERROR_STATUS_TA = 0x00080000,
+ PORT_FATAL_ERROR_STATUS_MA = 0x00100000,
+ PORT_FATAL_ERROR_STATUS_SCE = 0x00200000,
+ PORT_FATAL_ERROR_STATUS_RPE = 0x00400000,
+ PORT_FATAL_ERROR_STATUS_MPE = 0x00800000,
+ PORT_FATAL_ERROR_STATUS_OCE = 0x01000000,
+};
+
+/*
+ * port control and status page - page 0
+ */
+
+struct ql3xxx_port_registers {
+ struct ql3xxx_common_registers CommonRegs;
+
+ u32 ExternalHWConfig;
+ u32 InternalChipConfig;
+ u32 portControl;
+ u32 portStatus;
+ u32 macAddrIndirectPtrReg;
+ u32 macAddrDataReg;
+ u32 macMIIMgmtControlReg;
+ u32 macMIIMgmtAddrReg;
+ u32 macMIIMgmtDataReg;
+ u32 macMIIStatusReg;
+ u32 mac0ConfigReg;
+ u32 mac0IpgIfgReg;
+ u32 mac0HalfDuplexReg;
+ u32 mac0MaxFrameLengthReg;
+ u32 mac0PauseThresholdReg;
+ u32 mac1ConfigReg;
+ u32 mac1IpgIfgReg;
+ u32 mac1HalfDuplexReg;
+ u32 mac1MaxFrameLengthReg;
+ u32 mac1PauseThresholdReg;
+ u32 ipAddrIndexReg;
+ u32 ipAddrDataReg;
+ u32 ipReassemblyTimeout;
+ u32 tcpMaxWindow;
+ u32 currentTcpTimestamp[2];
+ u32 internalRamRWAddrReg;
+ u32 internalRamWDataReg;
+ u32 reclaimedBufferAddrRegLow;
+ u32 reclaimedBufferAddrRegHigh;
+ u32 tcpConfiguration;
+ u32 functionControl;
+ u32 fpgaRevID;
+ u32 localRamAddr;
+ u32 localRamDataAutoIncr;
+ u32 localRamDataNonIncr;
+ u32 gpOutput;
+ u32 gpInput;
+ u32 probeMuxAddr;
+ u32 probeMuxData;
+ u32 statisticsIndexReg;
+ u32 statisticsReadDataRegAutoIncr;
+ u32 statisticsReadDataRegNoIncr;
+ u32 PortFatalErrStatus;
+};
+
+/*
+ * port host memory config page - page 1
+ */
+struct ql3xxx_host_memory_registers {
+ struct ql3xxx_common_registers CommonRegs;
+
+ u32 reserved[12];
+
+ /* Network Request Queue */
+ u32 reqConsumerIndex;
+ u32 reqConsumerIndexAddrLow;
+ u32 reqConsumerIndexAddrHigh;
+ u32 reqBaseAddrLow;
+ u32 reqBaseAddrHigh;
+ u32 reqLength;
+
+ /* Network Completion Queue */
+ u32 rspProducerIndex;
+ u32 rspProducerIndexAddrLow;
+ u32 rspProducerIndexAddrHigh;
+ u32 rspBaseAddrLow;
+ u32 rspBaseAddrHigh;
+ u32 rspLength;
+
+ /* RX Large Buffer Queue */
+ u32 rxLargeQConsumerIndex;
+ u32 rxLargeQBaseAddrLow;
+ u32 rxLargeQBaseAddrHigh;
+ u32 rxLargeQLength;
+ u32 rxLargeBufferLength;
+
+ /* RX Small Buffer Queue */
+ u32 rxSmallQConsumerIndex;
+ u32 rxSmallQBaseAddrLow;
+ u32 rxSmallQBaseAddrHigh;
+ u32 rxSmallQLength;
+ u32 rxSmallBufferLength;
+
+};
+
+/*
+ * port local RAM page - page 2
+ */
+struct ql3xxx_local_ram_registers {
+ struct ql3xxx_common_registers CommonRegs;
+ u32 bufletSize;
+ u32 maxBufletCount;
+ u32 currentBufletCount;
+ u32 reserved;
+ u32 freeBufletThresholdLow;
+ u32 freeBufletThresholdHigh;
+ u32 ipHashTableBase;
+ u32 ipHashTableCount;
+ u32 tcpHashTableBase;
+ u32 tcpHashTableCount;
+ u32 ncbBase;
+ u32 maxNcbCount;
+ u32 currentNcbCount;
+ u32 drbBase;
+ u32 maxDrbCount;
+ u32 currentDrbCount;
+};
+
+/*
+ * definitions for Semaphore bits in Semaphore/Serial NVRAM interface register
+ */
+
+#define LS_64BITS(x) (u32)(0xffffffff & ((u64)x))
+#define MS_64BITS(x) (u32)(0xffffffff & (((u64)x)>>16>>16) )
+
+/*
+ * I/O register
+ */
+
+enum {
+ CONTROL_REG = 0,
+ STATUS_REG = 1,
+ PHY_STAT_LINK_UP = 0x0004,
+ PHY_CTRL_LOOPBACK = 0x4000,
+
+ PETBI_CONTROL_REG = 0x00,
+ PETBI_CTRL_ALL_PARAMS = 0x7140,
+ PETBI_CTRL_SOFT_RESET = 0x8000,
+ PETBI_CTRL_AUTO_NEG = 0x1000,
+ PETBI_CTRL_RESTART_NEG = 0x0200,
+ PETBI_CTRL_FULL_DUPLEX = 0x0100,
+ PETBI_CTRL_SPEED_1000 = 0x0040,
+
+ PETBI_STATUS_REG = 0x01,
+ PETBI_STAT_NEG_DONE = 0x0020,
+ PETBI_STAT_LINK_UP = 0x0004,
+
+ PETBI_NEG_ADVER = 0x04,
+ PETBI_NEG_PAUSE = 0x0080,
+ PETBI_NEG_PAUSE_MASK = 0x0180,
+ PETBI_NEG_DUPLEX = 0x0020,
+ PETBI_NEG_DUPLEX_MASK = 0x0060,
+
+ PETBI_NEG_PARTNER = 0x05,
+ PETBI_NEG_ERROR_MASK = 0x3000,
+
+ PETBI_EXPANSION_REG = 0x06,
+ PETBI_EXP_PAGE_RX = 0x0002,
+
+ PHY_GIG_CONTROL = 9,
+ PHY_GIG_ENABLE_MAN = 0x1000, /* Enable Master/Slave Manual Config*/
+ PHY_GIG_SET_MASTER = 0x0800, /* Set Master (slave if clear)*/
+ PHY_GIG_ALL_PARAMS = 0x0300,
+ PHY_GIG_ADV_1000F = 0x0200,
+ PHY_GIG_ADV_1000H = 0x0100,
+
+ PHY_NEG_ADVER = 4,
+ PHY_NEG_ALL_PARAMS = 0x0fe0,
+ PHY_NEG_ASY_PAUSE = 0x0800,
+ PHY_NEG_SYM_PAUSE = 0x0400,
+ PHY_NEG_ADV_SPEED = 0x01e0,
+ PHY_NEG_ADV_100F = 0x0100,
+ PHY_NEG_ADV_100H = 0x0080,
+ PHY_NEG_ADV_10F = 0x0040,
+ PHY_NEG_ADV_10H = 0x0020,
+
+ PETBI_TBI_CTRL = 0x11,
+ PETBI_TBI_RESET = 0x8000,
+ PETBI_TBI_AUTO_SENSE = 0x0100,
+ PETBI_TBI_SERDES_MODE = 0x0010,
+ PETBI_TBI_SERDES_WRAP = 0x0002,
+
+ AUX_CONTROL_STATUS = 0x1c,
+ PHY_AUX_NEG_DONE = 0x8000,
+ PHY_NEG_PARTNER = 5,
+ PHY_AUX_DUPLEX_STAT = 0x0020,
+ PHY_AUX_SPEED_STAT = 0x0018,
+ PHY_AUX_NO_HW_STRAP = 0x0004,
+ PHY_AUX_RESET_STICK = 0x0002,
+ PHY_NEG_PAUSE = 0x0400,
+ PHY_CTRL_SOFT_RESET = 0x8000,
+ PHY_CTRL_AUTO_NEG = 0x1000,
+ PHY_CTRL_RESTART_NEG = 0x0200,
+};
+enum {
+/* AM29LV Flash definitions */
+ FM93C56A_START = 0x1,
+/* Commands */
+ FM93C56A_READ = 0x2,
+ FM93C56A_WEN = 0x0,
+ FM93C56A_WRITE = 0x1,
+ FM93C56A_WRITE_ALL = 0x0,
+ FM93C56A_WDS = 0x0,
+ FM93C56A_ERASE = 0x3,
+ FM93C56A_ERASE_ALL = 0x0,
+/* Command Extensions */
+ FM93C56A_WEN_EXT = 0x3,
+ FM93C56A_WRITE_ALL_EXT = 0x1,
+ FM93C56A_WDS_EXT = 0x0,
+ FM93C56A_ERASE_ALL_EXT = 0x2,
+/* Special Bits */
+ FM93C56A_READ_DUMMY_BITS = 1,
+ FM93C56A_READY = 0,
+ FM93C56A_BUSY = 1,
+ FM93C56A_CMD_BITS = 2,
+/* AM29LV Flash definitions */
+ FM93C56A_SIZE_8 = 0x100,
+ FM93C56A_SIZE_16 = 0x80,
+ FM93C66A_SIZE_8 = 0x200,
+ FM93C66A_SIZE_16 = 0x100,
+ FM93C86A_SIZE_16 = 0x400,
+/* Address Bits */
+ FM93C56A_NO_ADDR_BITS_16 = 8,
+ FM93C56A_NO_ADDR_BITS_8 = 9,
+ FM93C86A_NO_ADDR_BITS_16 = 10,
+/* Data Bits */
+ FM93C56A_DATA_BITS_16 = 16,
+ FM93C56A_DATA_BITS_8 = 8,
+};
+enum {
+/* Auburn Bits */
+ AUBURN_EEPROM_DI = 0x8,
+ AUBURN_EEPROM_DI_0 = 0x0,
+ AUBURN_EEPROM_DI_1 = 0x8,
+ AUBURN_EEPROM_DO = 0x4,
+ AUBURN_EEPROM_DO_0 = 0x0,
+ AUBURN_EEPROM_DO_1 = 0x4,
+ AUBURN_EEPROM_CS = 0x2,
+ AUBURN_EEPROM_CS_0 = 0x0,
+ AUBURN_EEPROM_CS_1 = 0x2,
+ AUBURN_EEPROM_CLK_RISE = 0x1,
+ AUBURN_EEPROM_CLK_FALL = 0x0,
+};
+enum {EEPROM_SIZE = FM93C86A_SIZE_16,
+ EEPROM_NO_ADDR_BITS = FM93C86A_NO_ADDR_BITS_16,
+ EEPROM_NO_DATA_BITS = FM93C56A_DATA_BITS_16,
+};
+
+/*
+ * MAC Config data structure
+ */
+ struct eeprom_port_cfg {
+ u16 etherMtu_mac;
+ u16 pauseThreshold_mac;
+ u16 resumeThreshold_mac;
+ u16 portConfiguration;
+#define PORT_CONFIG_DEFAULT 0xf700
+#define PORT_CONFIG_AUTO_NEG_ENABLED 0x8000
+#define PORT_CONFIG_SYM_PAUSE_ENABLED 0x4000
+#define PORT_CONFIG_FULL_DUPLEX_ENABLED 0x2000
+#define PORT_CONFIG_HALF_DUPLEX_ENABLED 0x1000
+#define PORT_CONFIG_1000MB_SPEED 0x0400
+#define PORT_CONFIG_100MB_SPEED 0x0200
+#define PORT_CONFIG_10MB_SPEED 0x0100
+#define PORT_CONFIG_LINK_SPEED_MASK 0x0F00
+ u16 reserved[12];
+
+};
+
+/*
+ * BIOS data structure
+ */
+struct eeprom_bios_cfg {
+ u16 SpinDlyEn:1, disBios:1, EnMemMap:1, EnSelectBoot:1, Reserved:12;
+
+ u8 bootID0:7, boodID0Valid:1;
+ u8 bootLun0[8];
+
+ u8 bootID1:7, boodID1Valid:1;
+ u8 bootLun1[8];
+
+ u16 MaxLunsTrgt;
+ u8 reserved[10];
+};
+
+/*
+ * Function Specific Data structure
+ */
+struct eeprom_function_cfg {
+ u8 reserved[30];
+ u16 macAddress[3];
+ u16 macAddressSecondary[3];
+
+ u16 subsysVendorId;
+ u16 subsysDeviceId;
+};
+
+/*
+ * EEPROM format
+ */
+struct eeprom_data {
+ u8 asicId[4];
+ u16 version_and_numPorts; /* together to avoid endianness crap */
+ u16 boardId;
+
+#define EEPROM_BOARDID_STR_SIZE 16
+#define EEPROM_SERIAL_NUM_SIZE 16
+
+ u8 boardIdStr[16];
+ u8 serialNumber[16];
+ u16 extHwConfig;
+ struct eeprom_port_cfg macCfg_port0;
+ struct eeprom_port_cfg macCfg_port1;
+ u16 bufletSize;
+ u16 bufletCount;
+ u16 tcpWindowThreshold50;
+ u16 tcpWindowThreshold25;
+ u16 tcpWindowThreshold0;
+ u16 ipHashTableBaseHi;
+ u16 ipHashTableBaseLo;
+ u16 ipHashTableSize;
+ u16 tcpHashTableBaseHi;
+ u16 tcpHashTableBaseLo;
+ u16 tcpHashTableSize;
+ u16 ncbTableBaseHi;
+ u16 ncbTableBaseLo;
+ u16 ncbTableSize;
+ u16 drbTableBaseHi;
+ u16 drbTableBaseLo;
+ u16 drbTableSize;
+ u16 reserved_142[4];
+ u16 ipReassemblyTimeout;
+ u16 tcpMaxWindowSize;
+ u16 ipSecurity;
+#define IPSEC_CONFIG_PRESENT 0x0001
+ u8 reserved_156[294];
+ u16 qDebug[8];
+ struct eeprom_function_cfg funcCfg_fn0;
+ u16 reserved_510;
+ u8 oemSpace[432];
+ struct eeprom_bios_cfg biosCfg_fn1;
+ struct eeprom_function_cfg funcCfg_fn1;
+ u16 reserved_1022;
+ u8 reserved_1024[464];
+ struct eeprom_function_cfg funcCfg_fn2;
+ u16 reserved_1534;
+ u8 reserved_1536[432];
+ struct eeprom_bios_cfg biosCfg_fn3;
+ struct eeprom_function_cfg funcCfg_fn3;
+ u16 checksum;
+};
+
+/*
+ * General definitions...
+ */
+
+/*
+ * Below are a number compiler switches for controlling driver behavior.
+ * Some are not supported under certain conditions and are notated as such.
+ */
+
+#define QL3XXX_VENDOR_ID 0x1077
+#define QL3022_DEVICE_ID 0x3022
+#define QL3032_DEVICE_ID 0x3032
+
+/* MTU & Frame Size stuff */
+#define NORMAL_MTU_SIZE ETH_DATA_LEN
+#define JUMBO_MTU_SIZE 9000
+#define VLAN_ID_LEN 2
+
+/* Request Queue Related Definitions */
+#define NUM_REQ_Q_ENTRIES 256 /* so that 64 * 64 = 4096 (1 page) */
+
+/* Response Queue Related Definitions */
+#define NUM_RSP_Q_ENTRIES 256 /* so that 256 * 16 = 4096 (1 page) */
+
+/* Transmit and Receive Buffers */
+#define NUM_LBUFQ_ENTRIES 128
+#define JUMBO_NUM_LBUFQ_ENTRIES 32
+#define NUM_SBUFQ_ENTRIES 64
+#define QL_SMALL_BUFFER_SIZE 32
+#define QL_ADDR_ELE_PER_BUFQ_ENTRY \
+(sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element))
+ /* Each send has at least control block. This is how many we keep. */
+#define NUM_SMALL_BUFFERS NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
+
+#define QL_HEADER_SPACE 32 /* make header space at top of skb. */
+/*
+ * Large & Small Buffers for Receives
+ */
+struct lrg_buf_q_entry {
+
+ __le32 addr0_lower;
+#define IAL_LAST_ENTRY 0x00000001
+#define IAL_CONT_ENTRY 0x00000002
+#define IAL_FLAG_MASK 0x00000003
+ __le32 addr0_upper;
+ __le32 addr1_lower;
+ __le32 addr1_upper;
+ __le32 addr2_lower;
+ __le32 addr2_upper;
+ __le32 addr3_lower;
+ __le32 addr3_upper;
+ __le32 addr4_lower;
+ __le32 addr4_upper;
+ __le32 addr5_lower;
+ __le32 addr5_upper;
+ __le32 addr6_lower;
+ __le32 addr6_upper;
+ __le32 addr7_lower;
+ __le32 addr7_upper;
+
+};
+
+struct bufq_addr_element {
+ __le32 addr_low;
+ __le32 addr_high;
+};
+
+#define QL_NO_RESET 0
+#define QL_DO_RESET 1
+
+enum link_state_t {
+ LS_UNKNOWN = 0,
+ LS_DOWN,
+ LS_DEGRADE,
+ LS_RECOVER,
+ LS_UP,
+};
+
+struct ql_rcv_buf_cb {
+ struct ql_rcv_buf_cb *next;
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
+ __le32 buf_phy_addr_low;
+ __le32 buf_phy_addr_high;
+ int index;
+};
+
+/*
+ * Original IOCB has 3 sg entries:
+ * first points to skb-data area
+ * second points to first frag
+ * third points to next oal.
+ * OAL has 5 entries:
+ * 1 thru 4 point to frags
+ * fifth points to next oal.
+ */
+#define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1)
+
+struct oal_entry {
+ __le32 dma_lo;
+ __le32 dma_hi;
+ __le32 len;
+#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */
+#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */
+};
+
+struct oal {
+ struct oal_entry oal_entry[5];
+};
+
+struct map_list {
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
+};
+
+struct ql_tx_buf_cb {
+ struct sk_buff *skb;
+ struct ob_mac_iocb_req *queue_entry ;
+ int seg_count;
+ struct oal *oal;
+ struct map_list map[MAX_SKB_FRAGS+1];
+};
+
+/* definitions for type field */
+#define QL_BUF_TYPE_MACIOCB 0x01
+#define QL_BUF_TYPE_IPIOCB 0x02
+#define QL_BUF_TYPE_TCPIOCB 0x03
+
+/* qdev->flags definitions. */
+enum { QL_RESET_DONE = 1, /* Reset finished. */
+ QL_RESET_ACTIVE = 2, /* Waiting for reset to finish. */
+ QL_RESET_START = 3, /* Please reset the chip. */
+ QL_RESET_PER_SCSI = 4, /* SCSI driver requests reset. */
+ QL_TX_TIMEOUT = 5, /* Timeout in progress. */
+ QL_LINK_MASTER = 6, /* This driver controls the link. */
+ QL_ADAPTER_UP = 7, /* Adapter has been brought up. */
+ QL_THREAD_UP = 8, /* This flag is available. */
+ QL_LINK_UP = 9, /* Link Status. */
+ QL_ALLOC_REQ_RSP_Q_DONE = 10,
+ QL_ALLOC_BUFQS_DONE = 11,
+ QL_ALLOC_SMALL_BUF_DONE = 12,
+ QL_LINK_OPTICAL = 13,
+ QL_MSI_ENABLED = 14,
+};
+
+/*
+ * ql3_adapter - The main Adapter structure definition.
+ * This structure has all fields relevant to the hardware.
+ */
+
+struct ql3_adapter {
+ u32 reserved_00;
+ unsigned long flags;
+
+ /* PCI Configuration information for this device */
+ struct pci_dev *pdev;
+ struct net_device *ndev; /* Parent NET device */
+
+ struct napi_struct napi;
+
+ /* Hardware information */
+ u8 chip_rev_id;
+ u8 pci_slot;
+ u8 pci_width;
+ u8 pci_x;
+ u32 msi;
+ int index;
+ struct timer_list adapter_timer; /* timer used for various functions */
+
+ spinlock_t adapter_lock;
+ spinlock_t hw_lock;
+
+ /* PCI Bus Relative Register Addresses */
+ u8 __iomem *mmap_virt_base; /* stores return value from ioremap() */
+ struct ql3xxx_port_registers __iomem *mem_map_registers;
+ u32 current_page; /* tracks current register page */
+
+ u32 msg_enable;
+ u8 reserved_01[2];
+ u8 reserved_02[2];
+
+ /* Page for Shadow Registers */
+ void *shadow_reg_virt_addr;
+ dma_addr_t shadow_reg_phy_addr;
+
+ /* Net Request Queue */
+ u32 req_q_size;
+ u32 reserved_03;
+ struct ob_mac_iocb_req *req_q_virt_addr;
+ dma_addr_t req_q_phy_addr;
+ u16 req_producer_index;
+ u16 reserved_04;
+ u16 *preq_consumer_index;
+ u32 req_consumer_index_phy_addr_high;
+ u32 req_consumer_index_phy_addr_low;
+ atomic_t tx_count;
+ struct ql_tx_buf_cb tx_buf[NUM_REQ_Q_ENTRIES];
+
+ /* Net Response Queue */
+ u32 rsp_q_size;
+ u32 eeprom_cmd_data;
+ struct net_rsp_iocb *rsp_q_virt_addr;
+ dma_addr_t rsp_q_phy_addr;
+ struct net_rsp_iocb *rsp_current;
+ u16 rsp_consumer_index;
+ u16 reserved_06;
+ volatile __le32 *prsp_producer_index;
+ u32 rsp_producer_index_phy_addr_high;
+ u32 rsp_producer_index_phy_addr_low;
+
+ /* Large Buffer Queue */
+ u32 lrg_buf_q_alloc_size;
+ u32 lrg_buf_q_size;
+ void *lrg_buf_q_alloc_virt_addr;
+ void *lrg_buf_q_virt_addr;
+ dma_addr_t lrg_buf_q_alloc_phy_addr;
+ dma_addr_t lrg_buf_q_phy_addr;
+ u32 lrg_buf_q_producer_index;
+ u32 lrg_buf_release_cnt;
+ struct bufq_addr_element *lrg_buf_next_free;
+ u32 num_large_buffers;
+ u32 num_lbufq_entries;
+
+ /* Large (Receive) Buffers */
+ struct ql_rcv_buf_cb *lrg_buf;
+ struct ql_rcv_buf_cb *lrg_buf_free_head;
+ struct ql_rcv_buf_cb *lrg_buf_free_tail;
+ u32 lrg_buf_free_count;
+ u32 lrg_buffer_len;
+ u32 lrg_buf_index;
+ u32 lrg_buf_skb_check;
+
+ /* Small Buffer Queue */
+ u32 small_buf_q_alloc_size;
+ u32 small_buf_q_size;
+ u32 small_buf_q_producer_index;
+ void *small_buf_q_alloc_virt_addr;
+ void *small_buf_q_virt_addr;
+ dma_addr_t small_buf_q_alloc_phy_addr;
+ dma_addr_t small_buf_q_phy_addr;
+ u32 small_buf_index;
+
+ /* Small (Receive) Buffers */
+ void *small_buf_virt_addr;
+ dma_addr_t small_buf_phy_addr;
+ u32 small_buf_phy_addr_low;
+ u32 small_buf_phy_addr_high;
+ u32 small_buf_release_cnt;
+ u32 small_buf_total_size;
+
+ struct eeprom_data nvram_data;
+ u32 port_link_state;
+
+ /* 4022 specific */
+ u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */
+ u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */
+ u32 mac_ob_opcode; /* Opcode to use on mac transmission */
+ u32 mb_bit_mask; /* MA Bits mask to use on transmission */
+ u32 numPorts;
+ struct workqueue_struct *workqueue;
+ struct delayed_work reset_work;
+ struct delayed_work tx_timeout_work;
+ struct delayed_work link_state_work;
+ u32 max_frame_size;
+ u32 device_id;
+ u16 phyType;
+};
+
+#endif /* _QLA3XXX_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
new file mode 100644
index 000000000000..ddba83ef3f44
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for Qlogic 1G/10G Ethernet Driver for CNA devices
+#
+
+obj-$(CONFIG_QLCNIC) := qlcnic.o
+
+qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
+ qlcnic_ethtool.o qlcnic_ctx.o
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
new file mode 100644
index 000000000000..2fd1ba8fee4a
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -0,0 +1,1575 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2010 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef _QLCNIC_H_
+#define _QLCNIC_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/timer.h>
+
+#include <linux/vmalloc.h>
+
+#include <linux/io.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+
+#include "qlcnic_hdr.h"
+
+#define _QLCNIC_LINUX_MAJOR 5
+#define _QLCNIC_LINUX_MINOR 0
+#define _QLCNIC_LINUX_SUBVERSION 24
+#define QLCNIC_LINUX_VERSIONID "5.0.24"
+#define QLCNIC_DRV_IDC_VER 0x01
+#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
+ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
+
+#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
+#define _major(v) (((v) >> 24) & 0xff)
+#define _minor(v) (((v) >> 16) & 0xff)
+#define _build(v) ((v) & 0xffff)
+
+/* version in image has weird encoding:
+ * 7:0 - major
+ * 15:8 - minor
+ * 31:16 - build (little endian)
+ */
+#define QLCNIC_DECODE_VERSION(v) \
+ QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
+
+#define QLCNIC_MIN_FW_VERSION QLCNIC_VERSION_CODE(4, 4, 2)
+#define QLCNIC_NUM_FLASH_SECTORS (64)
+#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024)
+#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \
+ * QLCNIC_FLASH_SECTOR_SIZE)
+
+#define RCV_DESC_RINGSIZE(rds_ring) \
+ (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
+#define RCV_BUFF_RINGSIZE(rds_ring) \
+ (sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc)
+#define STATUS_DESC_RINGSIZE(sds_ring) \
+ (sizeof(struct status_desc) * (sds_ring)->num_desc)
+#define TX_BUFF_RINGSIZE(tx_ring) \
+ (sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc)
+#define TX_DESC_RINGSIZE(tx_ring) \
+ (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
+
+#define QLCNIC_P3P_A0 0x50
+#define QLCNIC_P3P_C0 0x58
+
+#define QLCNIC_IS_REVISION_P3P(REVISION) (REVISION >= QLCNIC_P3P_A0)
+
+#define FIRST_PAGE_GROUP_START 0
+#define FIRST_PAGE_GROUP_END 0x100000
+
+#define P3P_MAX_MTU (9600)
+#define P3P_MIN_MTU (68)
+#define QLCNIC_MAX_ETHERHDR 32 /* This contains some padding */
+
+#define QLCNIC_P3P_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN)
+#define QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3P_MAX_MTU)
+#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048
+#define QLCNIC_LRO_BUFFER_EXTRA 2048
+
+/* Opcodes to be used with the commands */
+#define TX_ETHER_PKT 0x01
+#define TX_TCP_PKT 0x02
+#define TX_UDP_PKT 0x03
+#define TX_IP_PKT 0x04
+#define TX_TCP_LSO 0x05
+#define TX_TCP_LSO6 0x06
+#define TX_TCPV6_PKT 0x0b
+#define TX_UDPV6_PKT 0x0c
+
+/* Tx defines */
+#define QLCNIC_MAX_FRAGS_PER_TX 14
+#define MAX_TSO_HEADER_DESC 2
+#define MGMT_CMD_DESC_RESV 4
+#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+ + MGMT_CMD_DESC_RESV)
+#define QLCNIC_MAX_TX_TIMEOUTS 2
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+ */
+#define PHAN_INITIALIZE_FAILED 0xffff
+#define PHAN_INITIALIZE_COMPLETE 0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK 0xf00f
+#define PHAN_PEG_RCV_INITIALIZED 0xff01
+
+#define NUM_RCV_DESC_RINGS 3
+
+#define RCV_RING_NORMAL 0
+#define RCV_RING_JUMBO 1
+
+#define MIN_CMD_DESCRIPTORS 64
+#define MIN_RCV_DESCRIPTORS 64
+#define MIN_JUMBO_DESCRIPTORS 32
+
+#define MAX_CMD_DESCRIPTORS 1024
+#define MAX_RCV_DESCRIPTORS_1G 4096
+#define MAX_RCV_DESCRIPTORS_10G 8192
+#define MAX_RCV_DESCRIPTORS_VF 2048
+#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
+#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
+
+#define DEFAULT_RCV_DESCRIPTORS_1G 2048
+#define DEFAULT_RCV_DESCRIPTORS_10G 4096
+#define DEFAULT_RCV_DESCRIPTORS_VF 1024
+#define MAX_RDS_RINGS 2
+
+#define get_next_index(index, length) \
+ (((index) + 1) & ((length) - 1))
+
+/*
+ * Following data structures describe the descriptors that will be used.
+ * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
+ * we are doing LSO (above the 1500 size packet) only.
+ */
+
+#define FLAGS_VLAN_TAGGED 0x10
+#define FLAGS_VLAN_OOB 0x40
+
+#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
+ (cmd_desc)->vlan_TCI = cpu_to_le16(v);
+#define qlcnic_set_cmd_desc_port(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
+#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
+
+#define qlcnic_set_tx_port(_desc, _port) \
+ ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
+
+#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
+ ((_desc)->flags_opcode |= \
+ cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
+
+#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
+ ((_desc)->nfrags__length = \
+ cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
+
+struct cmd_desc_type0 {
+ u8 tcp_hdr_offset; /* For LSO only */
+ u8 ip_hdr_offset; /* For LSO only */
+ __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */
+ __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */
+
+ __le64 addr_buffer2;
+
+ __le16 reference_handle;
+ __le16 mss;
+ u8 port_ctxid; /* 7:4 ctxid 3:0 port */
+ u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
+ __le16 conn_id; /* IPSec offoad only */
+
+ __le64 addr_buffer3;
+ __le64 addr_buffer1;
+
+ __le16 buffer_length[4];
+
+ __le64 addr_buffer4;
+
+ u8 eth_addr[ETH_ALEN];
+ __le16 vlan_TCI;
+
+} __attribute__ ((aligned(64)));
+
+/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
+struct rcv_desc {
+ __le16 reference_handle;
+ __le16 reserved;
+ __le32 buffer_length; /* allocated buffer length (usually 2K) */
+ __le64 addr_buffer;
+} __packed;
+
+/* opcode field in status_desc */
+#define QLCNIC_SYN_OFFLOAD 0x03
+#define QLCNIC_RXPKT_DESC 0x04
+#define QLCNIC_OLD_RXPKT_DESC 0x3f
+#define QLCNIC_RESPONSE_DESC 0x05
+#define QLCNIC_LRO_DESC 0x12
+
+/* for status field in status_desc */
+#define STATUS_CKSUM_LOOP 0
+#define STATUS_CKSUM_OK 2
+
+/* owner bits of status_desc */
+#define STATUS_OWNER_HOST (0x1ULL << 56)
+#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
+
+/* Status descriptor:
+ 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
+ 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
+ 53-55 desc_cnt, 56-57 owner, 58-63 opcode
+ */
+#define qlcnic_get_sts_port(sts_data) \
+ ((sts_data) & 0x0F)
+#define qlcnic_get_sts_status(sts_data) \
+ (((sts_data) >> 4) & 0x0F)
+#define qlcnic_get_sts_type(sts_data) \
+ (((sts_data) >> 8) & 0x0F)
+#define qlcnic_get_sts_totallength(sts_data) \
+ (((sts_data) >> 12) & 0xFFFF)
+#define qlcnic_get_sts_refhandle(sts_data) \
+ (((sts_data) >> 28) & 0xFFFF)
+#define qlcnic_get_sts_prot(sts_data) \
+ (((sts_data) >> 44) & 0x0F)
+#define qlcnic_get_sts_pkt_offset(sts_data) \
+ (((sts_data) >> 48) & 0x1F)
+#define qlcnic_get_sts_desc_cnt(sts_data) \
+ (((sts_data) >> 53) & 0x7)
+#define qlcnic_get_sts_opcode(sts_data) \
+ (((sts_data) >> 58) & 0x03F)
+
+#define qlcnic_get_lro_sts_refhandle(sts_data) \
+ ((sts_data) & 0x0FFFF)
+#define qlcnic_get_lro_sts_length(sts_data) \
+ (((sts_data) >> 16) & 0x0FFFF)
+#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
+ (((sts_data) >> 32) & 0x0FF)
+#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
+ (((sts_data) >> 40) & 0x0FF)
+#define qlcnic_get_lro_sts_timestamp(sts_data) \
+ (((sts_data) >> 48) & 0x1)
+#define qlcnic_get_lro_sts_type(sts_data) \
+ (((sts_data) >> 49) & 0x7)
+#define qlcnic_get_lro_sts_push_flag(sts_data) \
+ (((sts_data) >> 52) & 0x1)
+#define qlcnic_get_lro_sts_seq_number(sts_data) \
+ ((sts_data) & 0x0FFFFFFFF)
+
+
+struct status_desc {
+ __le64 status_desc_data[2];
+} __attribute__ ((aligned(16)));
+
+/* UNIFIED ROMIMAGE */
+#define QLCNIC_UNI_FW_MIN_SIZE 0xc8000
+#define QLCNIC_UNI_DIR_SECT_PRODUCT_TBL 0x0
+#define QLCNIC_UNI_DIR_SECT_BOOTLD 0x6
+#define QLCNIC_UNI_DIR_SECT_FW 0x7
+
+/*Offsets */
+#define QLCNIC_UNI_CHIP_REV_OFF 10
+#define QLCNIC_UNI_FLAGS_OFF 11
+#define QLCNIC_UNI_BIOS_VERSION_OFF 12
+#define QLCNIC_UNI_BOOTLD_IDX_OFF 27
+#define QLCNIC_UNI_FIRMWARE_IDX_OFF 29
+
+struct uni_table_desc{
+ u32 findex;
+ u32 num_entries;
+ u32 entry_size;
+ u32 reserved[5];
+};
+
+struct uni_data_desc{
+ u32 findex;
+ u32 size;
+ u32 reserved[5];
+};
+
+/* Flash Defines and Structures */
+#define QLCNIC_FLT_LOCATION 0x3F1000
+#define QLCNIC_B0_FW_IMAGE_REGION 0x74
+#define QLCNIC_C0_FW_IMAGE_REGION 0x97
+#define QLCNIC_BOOTLD_REGION 0X72
+struct qlcnic_flt_header {
+ u16 version;
+ u16 len;
+ u16 checksum;
+ u16 reserved;
+};
+
+struct qlcnic_flt_entry {
+ u8 region;
+ u8 reserved0;
+ u8 attrib;
+ u8 reserved1;
+ u32 size;
+ u32 start_addr;
+ u32 end_addr;
+};
+
+/* Magic number to let user know flash is programmed */
+#define QLCNIC_BDINFO_MAGIC 0x12345678
+
+#define QLCNIC_BRDTYPE_P3P_REF_QG 0x0021
+#define QLCNIC_BRDTYPE_P3P_HMEZ 0x0022
+#define QLCNIC_BRDTYPE_P3P_10G_CX4_LP 0x0023
+#define QLCNIC_BRDTYPE_P3P_4_GB 0x0024
+#define QLCNIC_BRDTYPE_P3P_IMEZ 0x0025
+#define QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS 0x0026
+#define QLCNIC_BRDTYPE_P3P_10000_BASE_T 0x0027
+#define QLCNIC_BRDTYPE_P3P_XG_LOM 0x0028
+#define QLCNIC_BRDTYPE_P3P_4_GB_MM 0x0029
+#define QLCNIC_BRDTYPE_P3P_10G_SFP_CT 0x002a
+#define QLCNIC_BRDTYPE_P3P_10G_SFP_QT 0x002b
+#define QLCNIC_BRDTYPE_P3P_10G_CX4 0x0031
+#define QLCNIC_BRDTYPE_P3P_10G_XFP 0x0032
+#define QLCNIC_BRDTYPE_P3P_10G_TP 0x0080
+
+#define QLCNIC_MSIX_TABLE_OFFSET 0x44
+
+/* Flash memory map */
+#define QLCNIC_BRDCFG_START 0x4000 /* board config */
+#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
+#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
+#define QLCNIC_USER_START 0x3E8000 /* Firmare info */
+
+#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
+#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
+#define QLCNIC_FW_SERIAL_NUM_OFFSET (QLCNIC_USER_START+0x81c)
+#define QLCNIC_BIOS_VERSION_OFFSET (QLCNIC_USER_START+0x83c)
+
+#define QLCNIC_BRDTYPE_OFFSET (QLCNIC_BRDCFG_START+0x8)
+#define QLCNIC_FW_MAGIC_OFFSET (QLCNIC_BRDCFG_START+0x128)
+
+#define QLCNIC_FW_MIN_SIZE (0x3fffff)
+#define QLCNIC_UNIFIED_ROMIMAGE 0
+#define QLCNIC_FLASH_ROMIMAGE 1
+#define QLCNIC_UNKNOWN_ROMIMAGE 0xff
+
+#define QLCNIC_UNIFIED_ROMIMAGE_NAME "phanfw.bin"
+#define QLCNIC_FLASH_ROMIMAGE_NAME "flash"
+
+extern char qlcnic_driver_name[];
+
+/* Number of status descriptors to handle per interrupt */
+#define MAX_STATUS_HANDLE (64)
+
+/*
+ * qlcnic_skb_frag{} is to contain mapping info for each SG list. This
+ * has to be freed when DMA is complete. This is part of qlcnic_tx_buffer{}.
+ */
+struct qlcnic_skb_frag {
+ u64 dma;
+ u64 length;
+};
+
+/* Following defines are for the state of the buffers */
+#define QLCNIC_BUFFER_FREE 0
+#define QLCNIC_BUFFER_BUSY 1
+
+/*
+ * There will be one qlcnic_buffer per skb packet. These will be
+ * used to save the dma info for pci_unmap_page()
+ */
+struct qlcnic_cmd_buffer {
+ struct sk_buff *skb;
+ struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1];
+ u32 frag_count;
+};
+
+/* In rx_buffer, we do not need multiple fragments as is a single buffer */
+struct qlcnic_rx_buffer {
+ u16 ref_handle;
+ struct sk_buff *skb;
+ struct list_head list;
+ u64 dma;
+};
+
+/* Board types */
+#define QLCNIC_GBE 0x01
+#define QLCNIC_XGBE 0x02
+
+/*
+ * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
+ * adjusted based on configured MTU.
+ */
+#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3
+#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256
+
+#define QLCNIC_INTR_DEFAULT 0x04
+#define QLCNIC_CONFIG_INTR_COALESCE 3
+
+struct qlcnic_nic_intr_coalesce {
+ u8 type;
+ u8 sts_ring_mask;
+ u16 rx_packets;
+ u16 rx_time_us;
+ u16 flag;
+ u32 timer_out;
+};
+
+struct qlcnic_dump_template_hdr {
+ __le32 type;
+ __le32 offset;
+ __le32 size;
+ __le32 cap_mask;
+ __le32 num_entries;
+ __le32 version;
+ __le32 timestamp;
+ __le32 checksum;
+ __le32 drv_cap_mask;
+ __le32 sys_info[3];
+ __le32 saved_state[16];
+ __le32 cap_sizes[8];
+ __le32 rsvd[0];
+};
+
+struct qlcnic_fw_dump {
+ u8 clr; /* flag to indicate if dump is cleared */
+ u8 enable; /* enable/disable dump */
+ u32 size; /* total size of the dump */
+ void *data; /* dump data area */
+ struct qlcnic_dump_template_hdr *tmpl_hdr;
+};
+
+/*
+ * One hardware_context{} per adapter
+ * contains interrupt info as well shared hardware info.
+ */
+struct qlcnic_hardware_context {
+ void __iomem *pci_base0;
+ void __iomem *ocm_win_crb;
+
+ unsigned long pci_len0;
+
+ rwlock_t crb_lock;
+ struct mutex mem_lock;
+
+ u8 revision_id;
+ u8 pci_func;
+ u8 linkup;
+ u8 loopback_state;
+ u16 port_type;
+ u16 board_type;
+
+ u8 beacon_state;
+
+ struct qlcnic_nic_intr_coalesce coal;
+ struct qlcnic_fw_dump fw_dump;
+};
+
+struct qlcnic_adapter_stats {
+ u64 xmitcalled;
+ u64 xmitfinished;
+ u64 rxdropped;
+ u64 txdropped;
+ u64 csummed;
+ u64 rx_pkts;
+ u64 lro_pkts;
+ u64 rxbytes;
+ u64 txbytes;
+ u64 lrobytes;
+ u64 lso_frames;
+ u64 xmit_on;
+ u64 xmit_off;
+ u64 skb_alloc_failure;
+ u64 null_rxbuf;
+ u64 rx_dma_map_error;
+ u64 tx_dma_map_error;
+};
+
+/*
+ * Rcv Descriptor Context. One such per Rcv Descriptor. There may
+ * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
+ */
+struct qlcnic_host_rds_ring {
+ void __iomem *crb_rcv_producer;
+ struct rcv_desc *desc_head;
+ struct qlcnic_rx_buffer *rx_buf_arr;
+ u32 num_desc;
+ u32 producer;
+ u32 dma_size;
+ u32 skb_size;
+ u32 flags;
+ struct list_head free_list;
+ spinlock_t lock;
+ dma_addr_t phys_addr;
+} ____cacheline_internodealigned_in_smp;
+
+struct qlcnic_host_sds_ring {
+ u32 consumer;
+ u32 num_desc;
+ void __iomem *crb_sts_consumer;
+
+ struct status_desc *desc_head;
+ struct qlcnic_adapter *adapter;
+ struct napi_struct napi;
+ struct list_head free_list[NUM_RCV_DESC_RINGS];
+
+ void __iomem *crb_intr_mask;
+ int irq;
+
+ dma_addr_t phys_addr;
+ char name[IFNAMSIZ+4];
+} ____cacheline_internodealigned_in_smp;
+
+struct qlcnic_host_tx_ring {
+ u32 producer;
+ u32 sw_consumer;
+ u32 num_desc;
+ void __iomem *crb_cmd_producer;
+ struct cmd_desc_type0 *desc_head;
+ struct qlcnic_cmd_buffer *cmd_buf_arr;
+ __le32 *hw_consumer;
+
+ dma_addr_t phys_addr;
+ dma_addr_t hw_cons_phys_addr;
+ struct netdev_queue *txq;
+} ____cacheline_internodealigned_in_smp;
+
+/*
+ * Receive context. There is one such structure per instance of the
+ * receive processing. Any state information that is relevant to
+ * the receive, and is must be in this structure. The global data may be
+ * present elsewhere.
+ */
+struct qlcnic_recv_context {
+ struct qlcnic_host_rds_ring *rds_rings;
+ struct qlcnic_host_sds_ring *sds_rings;
+ u32 state;
+ u16 context_id;
+ u16 virt_port;
+
+};
+
+/* HW context creation */
+
+#define QLCNIC_OS_CRB_RETRY_COUNT 4000
+#define QLCNIC_CDRP_SIGNATURE_MAKE(pcifn, version) \
+ (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
+
+#define QLCNIC_CDRP_CMD_BIT 0x80000000
+
+/*
+ * All responses must have the QLCNIC_CDRP_CMD_BIT cleared
+ * in the crb QLCNIC_CDRP_CRB_OFFSET.
+ */
+#define QLCNIC_CDRP_FORM_RSP(rsp) (rsp)
+#define QLCNIC_CDRP_IS_RSP(rsp) (((rsp) & QLCNIC_CDRP_CMD_BIT) == 0)
+
+#define QLCNIC_CDRP_RSP_OK 0x00000001
+#define QLCNIC_CDRP_RSP_FAIL 0x00000002
+#define QLCNIC_CDRP_RSP_TIMEOUT 0x00000003
+
+/*
+ * All commands must have the QLCNIC_CDRP_CMD_BIT set in
+ * the crb QLCNIC_CDRP_CRB_OFFSET.
+ */
+#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd))
+#define QLCNIC_CDRP_IS_CMD(cmd) (((cmd) & QLCNIC_CDRP_CMD_BIT) != 0)
+
+#define QLCNIC_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001
+#define QLCNIC_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002
+#define QLCNIC_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003
+#define QLCNIC_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004
+#define QLCNIC_CDRP_CMD_READ_MAX_RX_CTX 0x00000005
+#define QLCNIC_CDRP_CMD_READ_MAX_TX_CTX 0x00000006
+#define QLCNIC_CDRP_CMD_CREATE_RX_CTX 0x00000007
+#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008
+#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009
+#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
+#define QLCNIC_CDRP_CMD_INTRPT_TEST 0x00000011
+#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012
+#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013
+#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014
+#define QLCNIC_CDRP_CMD_READ_HW_REG 0x00000015
+#define QLCNIC_CDRP_CMD_GET_FLOW_CTL 0x00000016
+#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017
+#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018
+#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019
+#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f
+
+#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020
+#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021
+#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022
+#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024
+#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025
+#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026
+#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027
+#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028
+#define QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG 0x00000029
+#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATS 0x0000002a
+#define QLCNIC_CDRP_CMD_CONFIG_PORT 0x0000002E
+#define QLCNIC_CDRP_CMD_TEMP_SIZE 0x0000002f
+#define QLCNIC_CDRP_CMD_GET_TEMP_HDR 0x00000030
+
+#define QLCNIC_RCODE_SUCCESS 0
+#define QLCNIC_RCODE_NOT_SUPPORTED 9
+#define QLCNIC_RCODE_TIMEOUT 17
+#define QLCNIC_DESTROY_CTX_RESET 0
+
+/*
+ * Capabilities Announced
+ */
+#define QLCNIC_CAP0_LEGACY_CONTEXT (1)
+#define QLCNIC_CAP0_LEGACY_MN (1 << 2)
+#define QLCNIC_CAP0_LSO (1 << 6)
+#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
+#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
+#define QLCNIC_CAP0_VALIDOFF (1 << 11)
+
+/*
+ * Context state
+ */
+#define QLCNIC_HOST_CTX_STATE_FREED 0
+#define QLCNIC_HOST_CTX_STATE_ACTIVE 2
+
+/*
+ * Rx context
+ */
+
+struct qlcnic_hostrq_sds_ring {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le32 ring_size; /* Ring entries */
+ __le16 msi_index;
+ __le16 rsvd; /* Padding */
+} __packed;
+
+struct qlcnic_hostrq_rds_ring {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le64 buff_size; /* Packet buffer size */
+ __le32 ring_size; /* Ring entries */
+ __le32 ring_kind; /* Class of ring */
+} __packed;
+
+struct qlcnic_hostrq_rx_ctx {
+ __le64 host_rsp_dma_addr; /* Response dma'd here */
+ __le32 capabilities[4]; /* Flag bit vector */
+ __le32 host_int_crb_mode; /* Interrupt crb usage */
+ __le32 host_rds_crb_mode; /* RDS crb usage */
+ /* These ring offsets are relative to data[0] below */
+ __le32 rds_ring_offset; /* Offset to RDS config */
+ __le32 sds_ring_offset; /* Offset to SDS config */
+ __le16 num_rds_rings; /* Count of RDS rings */
+ __le16 num_sds_rings; /* Count of SDS rings */
+ __le16 valid_field_offset;
+ u8 txrx_sds_binding;
+ u8 msix_handler;
+ u8 reserved[128]; /* reserve space for future expansion*/
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N hostrq_rds_rings
+ - N hostrq_sds_rings */
+ char data[0];
+} __packed;
+
+struct qlcnic_cardrsp_rds_ring{
+ __le32 host_producer_crb; /* Crb to use */
+ __le32 rsvd1; /* Padding */
+} __packed;
+
+struct qlcnic_cardrsp_sds_ring {
+ __le32 host_consumer_crb; /* Crb to use */
+ __le32 interrupt_crb; /* Crb to use */
+} __packed;
+
+struct qlcnic_cardrsp_rx_ctx {
+ /* These ring offsets are relative to data[0] below */
+ __le32 rds_ring_offset; /* Offset to RDS config */
+ __le32 sds_ring_offset; /* Offset to SDS config */
+ __le32 host_ctx_state; /* Starting State */
+ __le32 num_fn_per_port; /* How many PCI fn share the port */
+ __le16 num_rds_rings; /* Count of RDS rings */
+ __le16 num_sds_rings; /* Count of SDS rings */
+ __le16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ u8 reserved[128]; /* save space for future expansion */
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N cardrsp_rds_rings
+ - N cardrs_sds_rings */
+ char data[0];
+} __packed;
+
+#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
+ (sizeof(HOSTRQ_RX) + \
+ (rds_rings)*(sizeof(struct qlcnic_hostrq_rds_ring)) + \
+ (sds_rings)*(sizeof(struct qlcnic_hostrq_sds_ring)))
+
+#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \
+ (sizeof(CARDRSP_RX) + \
+ (rds_rings)*(sizeof(struct qlcnic_cardrsp_rds_ring)) + \
+ (sds_rings)*(sizeof(struct qlcnic_cardrsp_sds_ring)))
+
+/*
+ * Tx context
+ */
+
+struct qlcnic_hostrq_cds_ring {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le32 ring_size; /* Ring entries */
+ __le32 rsvd; /* Padding */
+} __packed;
+
+struct qlcnic_hostrq_tx_ctx {
+ __le64 host_rsp_dma_addr; /* Response dma'd here */
+ __le64 cmd_cons_dma_addr; /* */
+ __le64 dummy_dma_addr; /* */
+ __le32 capabilities[4]; /* Flag bit vector */
+ __le32 host_int_crb_mode; /* Interrupt crb usage */
+ __le32 rsvd1; /* Padding */
+ __le16 rsvd2; /* Padding */
+ __le16 interrupt_ctl;
+ __le16 msi_index;
+ __le16 rsvd3; /* Padding */
+ struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */
+ u8 reserved[128]; /* future expansion */
+} __packed;
+
+struct qlcnic_cardrsp_cds_ring {
+ __le32 host_producer_crb; /* Crb to use */
+ __le32 interrupt_crb; /* Crb to use */
+} __packed;
+
+struct qlcnic_cardrsp_tx_ctx {
+ __le32 host_ctx_state; /* Starting state */
+ __le16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */
+ u8 reserved[128]; /* future expansion */
+} __packed;
+
+#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
+#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
+
+/* CRB */
+
+#define QLCNIC_HOST_RDS_CRB_MODE_UNIQUE 0
+#define QLCNIC_HOST_RDS_CRB_MODE_SHARED 1
+#define QLCNIC_HOST_RDS_CRB_MODE_CUSTOM 2
+#define QLCNIC_HOST_RDS_CRB_MODE_MAX 3
+
+#define QLCNIC_HOST_INT_CRB_MODE_UNIQUE 0
+#define QLCNIC_HOST_INT_CRB_MODE_SHARED 1
+#define QLCNIC_HOST_INT_CRB_MODE_NORX 2
+#define QLCNIC_HOST_INT_CRB_MODE_NOTX 3
+#define QLCNIC_HOST_INT_CRB_MODE_NORXTX 4
+
+
+/* MAC */
+
+#define MC_COUNT_P3P 38
+
+#define QLCNIC_MAC_NOOP 0
+#define QLCNIC_MAC_ADD 1
+#define QLCNIC_MAC_DEL 2
+#define QLCNIC_MAC_VLAN_ADD 3
+#define QLCNIC_MAC_VLAN_DEL 4
+
+struct qlcnic_mac_list_s {
+ struct list_head list;
+ uint8_t mac_addr[ETH_ALEN+2];
+};
+
+#define QLCNIC_HOST_REQUEST 0x13
+#define QLCNIC_REQUEST 0x14
+
+#define QLCNIC_MAC_EVENT 0x1
+
+#define QLCNIC_IP_UP 2
+#define QLCNIC_IP_DOWN 3
+
+#define QLCNIC_ILB_MODE 0x1
+#define QLCNIC_ELB_MODE 0x2
+
+#define QLCNIC_LINKEVENT 0x1
+#define QLCNIC_LB_RESPONSE 0x2
+#define QLCNIC_IS_LB_CONFIGURED(VAL) \
+ (VAL == (QLCNIC_LINKEVENT | QLCNIC_LB_RESPONSE))
+
+/*
+ * Driver --> Firmware
+ */
+#define QLCNIC_H2C_OPCODE_CONFIG_RSS 0x1
+#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 0x3
+#define QLCNIC_H2C_OPCODE_CONFIG_LED 0x4
+#define QLCNIC_H2C_OPCODE_LRO_REQUEST 0x7
+#define QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE 0xc
+#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 0x12
+
+#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 0x15
+#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 0x17
+#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 0x18
+#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 0x13
+
+/*
+ * Firmware --> Driver
+ */
+
+#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f
+#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
+
+#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
+#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
+#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
+
+#define QLCNIC_LRO_REQUEST_CLEANUP 4
+
+/* Capabilites received */
+#define QLCNIC_FW_CAPABILITY_TSO BIT_1
+#define QLCNIC_FW_CAPABILITY_BDG BIT_8
+#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9
+#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10
+#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27
+
+/* module types */
+#define LINKEVENT_MODULE_NOT_PRESENT 1
+#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
+#define LINKEVENT_MODULE_OPTICAL_SRLR 3
+#define LINKEVENT_MODULE_OPTICAL_LRM 4
+#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7
+#define LINKEVENT_MODULE_TWINAX 8
+
+#define LINKSPEED_10GBPS 10000
+#define LINKSPEED_1GBPS 1000
+#define LINKSPEED_100MBPS 100
+#define LINKSPEED_10MBPS 10
+
+#define LINKSPEED_ENCODED_10MBPS 0
+#define LINKSPEED_ENCODED_100MBPS 1
+#define LINKSPEED_ENCODED_1GBPS 2
+
+#define LINKEVENT_AUTONEG_DISABLED 0
+#define LINKEVENT_AUTONEG_ENABLED 1
+
+#define LINKEVENT_HALF_DUPLEX 0
+#define LINKEVENT_FULL_DUPLEX 1
+
+#define LINKEVENT_LINKSPEED_MBPS 0
+#define LINKEVENT_LINKSPEED_ENCODED 1
+
+/* firmware response header:
+ * 63:58 - message type
+ * 57:56 - owner
+ * 55:53 - desc count
+ * 52:48 - reserved
+ * 47:40 - completion id
+ * 39:32 - opcode
+ * 31:16 - error code
+ * 15:00 - reserved
+ */
+#define qlcnic_get_nic_msg_opcode(msg_hdr) \
+ ((msg_hdr >> 32) & 0xFF)
+
+struct qlcnic_fw_msg {
+ union {
+ struct {
+ u64 hdr;
+ u64 body[7];
+ };
+ u64 words[8];
+ };
+};
+
+struct qlcnic_nic_req {
+ __le64 qhdr;
+ __le64 req_hdr;
+ __le64 words[6];
+} __packed;
+
+struct qlcnic_mac_req {
+ u8 op;
+ u8 tag;
+ u8 mac_addr[6];
+};
+
+struct qlcnic_vlan_req {
+ __le16 vlan_id;
+ __le16 rsvd[3];
+} __packed;
+
+struct qlcnic_ipaddr {
+ __be32 ipv4;
+ __be32 ipv6[4];
+};
+
+#define QLCNIC_MSI_ENABLED 0x02
+#define QLCNIC_MSIX_ENABLED 0x04
+#define QLCNIC_LRO_ENABLED 0x08
+#define QLCNIC_LRO_DISABLED 0x00
+#define QLCNIC_BRIDGE_ENABLED 0X10
+#define QLCNIC_DIAG_ENABLED 0x20
+#define QLCNIC_ESWITCH_ENABLED 0x40
+#define QLCNIC_ADAPTER_INITIALIZED 0x80
+#define QLCNIC_TAGGING_ENABLED 0x100
+#define QLCNIC_MACSPOOF 0x200
+#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400
+#define QLCNIC_PROMISC_DISABLED 0x800
+#define QLCNIC_NEED_FLR 0x1000
+#define QLCNIC_FW_RESET_OWNER 0x2000
+#define QLCNIC_FW_HANG 0x4000
+#define QLCNIC_IS_MSI_FAMILY(adapter) \
+ ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
+
+#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4
+#define QLCNIC_MSIX_TBL_SPACE 8192
+#define QLCNIC_PCI_REG_MSIX_TBL 0x44
+#define QLCNIC_MSIX_TBL_PGSIZE 4096
+
+#define QLCNIC_NETDEV_WEIGHT 128
+#define QLCNIC_ADAPTER_UP_MAGIC 777
+
+#define __QLCNIC_FW_ATTACHED 0
+#define __QLCNIC_DEV_UP 1
+#define __QLCNIC_RESETTING 2
+#define __QLCNIC_START_FW 4
+#define __QLCNIC_AER 5
+#define __QLCNIC_DIAG_RES_ALLOC 6
+#define __QLCNIC_LED_ENABLE 7
+
+#define QLCNIC_INTERRUPT_TEST 1
+#define QLCNIC_LOOPBACK_TEST 2
+#define QLCNIC_LED_TEST 3
+
+#define QLCNIC_FILTER_AGE 80
+#define QLCNIC_READD_AGE 20
+#define QLCNIC_LB_MAX_FILTERS 64
+
+/* QLCNIC Driver Error Code */
+#define QLCNIC_FW_NOT_RESPOND 51
+#define QLCNIC_TEST_IN_PROGRESS 52
+#define QLCNIC_UNDEFINED_ERROR 53
+#define QLCNIC_LB_CABLE_NOT_CONN 54
+
+struct qlcnic_filter {
+ struct hlist_node fnode;
+ u8 faddr[ETH_ALEN];
+ __le16 vlan_id;
+ unsigned long ftime;
+};
+
+struct qlcnic_filter_hash {
+ struct hlist_head *fhead;
+ u8 fnum;
+ u8 fmax;
+};
+
+struct qlcnic_adapter {
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
+ unsigned long state;
+ u32 flags;
+
+ u16 num_txd;
+ u16 num_rxd;
+ u16 num_jumbo_rxd;
+ u16 max_rxd;
+ u16 max_jumbo_rxd;
+
+ u8 max_rds_rings;
+ u8 max_sds_rings;
+ u8 msix_supported;
+ u8 portnum;
+ u8 physical_port;
+ u8 reset_context;
+
+ u8 mc_enabled;
+ u8 max_mc_count;
+ u8 fw_wait_cnt;
+ u8 fw_fail_cnt;
+ u8 tx_timeo_cnt;
+ u8 need_fw_reset;
+
+ u8 has_link_events;
+ u8 fw_type;
+ u16 tx_context_id;
+ u16 is_up;
+
+ u16 link_speed;
+ u16 link_duplex;
+ u16 link_autoneg;
+ u16 module_type;
+
+ u16 op_mode;
+ u16 switch_mode;
+ u16 max_tx_ques;
+ u16 max_rx_ques;
+ u16 max_mtu;
+ u16 pvid;
+
+ u32 fw_hal_version;
+ u32 capabilities;
+ u32 irq;
+ u32 temp;
+
+ u32 int_vec_bit;
+ u32 heartbeat;
+
+ u8 max_mac_filters;
+ u8 dev_state;
+ u8 diag_test;
+ char diag_cnt;
+ u8 reset_ack_timeo;
+ u8 dev_init_timeo;
+ u16 msg_enable;
+
+ u8 mac_addr[ETH_ALEN];
+
+ u64 dev_rst_time;
+ u8 mac_learn;
+ unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
+ struct qlcnic_npar_info *npars;
+ struct qlcnic_eswitch *eswitch;
+ struct qlcnic_nic_template *nic_ops;
+
+ struct qlcnic_adapter_stats stats;
+ struct list_head mac_list;
+
+ void __iomem *tgt_mask_reg;
+ void __iomem *tgt_status_reg;
+ void __iomem *crb_int_state_reg;
+ void __iomem *isr_int_vec;
+
+ struct msix_entry *msix_entries;
+
+ struct delayed_work fw_work;
+
+
+ struct qlcnic_filter_hash fhash;
+
+ spinlock_t tx_clean_lock;
+ spinlock_t mac_learn_lock;
+ __le32 file_prd_off; /*File fw product offset*/
+ u32 fw_version;
+ const struct firmware *fw;
+};
+
+struct qlcnic_info {
+ __le16 pci_func;
+ __le16 op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */
+ __le16 phys_port;
+ __le16 switch_mode; /* 0 = disabled, 1 = int, 2 = ext */
+
+ __le32 capabilities;
+ u8 max_mac_filters;
+ u8 reserved1;
+ __le16 max_mtu;
+
+ __le16 max_tx_ques;
+ __le16 max_rx_ques;
+ __le16 min_tx_bw;
+ __le16 max_tx_bw;
+ u8 reserved2[104];
+} __packed;
+
+struct qlcnic_pci_info {
+ __le16 id; /* pci function id */
+ __le16 active; /* 1 = Enabled */
+ __le16 type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */
+ __le16 default_port; /* default port number */
+
+ __le16 tx_min_bw; /* Multiple of 100mbpc */
+ __le16 tx_max_bw;
+ __le16 reserved1[2];
+
+ u8 mac[ETH_ALEN];
+ u8 reserved2[106];
+} __packed;
+
+struct qlcnic_npar_info {
+ u16 pvid;
+ u16 min_bw;
+ u16 max_bw;
+ u8 phy_port;
+ u8 type;
+ u8 active;
+ u8 enable_pm;
+ u8 dest_npar;
+ u8 discard_tagged;
+ u8 mac_override;
+ u8 mac_anti_spoof;
+ u8 promisc_mode;
+ u8 offload_flags;
+};
+
+struct qlcnic_eswitch {
+ u8 port;
+ u8 active_vports;
+ u8 active_vlans;
+ u8 active_ucast_filters;
+ u8 max_ucast_filters;
+ u8 max_active_vlans;
+
+ u32 flags;
+#define QLCNIC_SWITCH_ENABLE BIT_1
+#define QLCNIC_SWITCH_VLAN_FILTERING BIT_2
+#define QLCNIC_SWITCH_PROMISC_MODE BIT_3
+#define QLCNIC_SWITCH_PORT_MIRRORING BIT_4
+};
+
+
+/* Return codes for Error handling */
+#define QL_STATUS_INVALID_PARAM -1
+
+#define MAX_BW 100 /* % of link speed */
+#define MAX_VLAN_ID 4095
+#define MIN_VLAN_ID 2
+#define DEFAULT_MAC_LEARN 1
+
+#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
+#define IS_VALID_BW(bw) (bw <= MAX_BW)
+
+struct qlcnic_pci_func_cfg {
+ u16 func_type;
+ u16 min_bw;
+ u16 max_bw;
+ u16 port_num;
+ u8 pci_func;
+ u8 func_state;
+ u8 def_mac_addr[6];
+};
+
+struct qlcnic_npar_func_cfg {
+ u32 fw_capab;
+ u16 port_num;
+ u16 min_bw;
+ u16 max_bw;
+ u16 max_tx_queues;
+ u16 max_rx_queues;
+ u8 pci_func;
+ u8 op_mode;
+};
+
+struct qlcnic_pm_func_cfg {
+ u8 pci_func;
+ u8 action;
+ u8 dest_npar;
+ u8 reserved[5];
+};
+
+struct qlcnic_esw_func_cfg {
+ u16 vlan_id;
+ u8 op_mode;
+ u8 op_type;
+ u8 pci_func;
+ u8 host_vlan_tag;
+ u8 promisc_mode;
+ u8 discard_tagged;
+ u8 mac_override;
+ u8 mac_anti_spoof;
+ u8 offload_flags;
+ u8 reserved[5];
+};
+
+#define QLCNIC_STATS_VERSION 1
+#define QLCNIC_STATS_PORT 1
+#define QLCNIC_STATS_ESWITCH 2
+#define QLCNIC_QUERY_RX_COUNTER 0
+#define QLCNIC_QUERY_TX_COUNTER 1
+#define QLCNIC_ESW_STATS_NOT_AVAIL 0xffffffffffffffffULL
+
+#define QLCNIC_ADD_ESW_STATS(VAL1, VAL2)\
+do { \
+ if (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) && \
+ ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \
+ (VAL1) = (VAL2); \
+ else if (((VAL1) != QLCNIC_ESW_STATS_NOT_AVAIL) && \
+ ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \
+ (VAL1) += (VAL2); \
+} while (0)
+
+struct __qlcnic_esw_statistics {
+ __le16 context_id;
+ __le16 version;
+ __le16 size;
+ __le16 unused;
+ __le64 unicast_frames;
+ __le64 multicast_frames;
+ __le64 broadcast_frames;
+ __le64 dropped_frames;
+ __le64 errors;
+ __le64 local_frames;
+ __le64 numbytes;
+ __le64 rsvd[3];
+} __packed;
+
+struct qlcnic_esw_statistics {
+ struct __qlcnic_esw_statistics rx;
+ struct __qlcnic_esw_statistics tx;
+};
+
+struct qlcnic_common_entry_hdr {
+ __le32 type;
+ __le32 offset;
+ __le32 cap_size;
+ u8 mask;
+ u8 rsvd[2];
+ u8 flags;
+} __packed;
+
+struct __crb {
+ __le32 addr;
+ u8 stride;
+ u8 rsvd1[3];
+ __le32 data_size;
+ __le32 no_ops;
+ __le32 rsvd2[4];
+} __packed;
+
+struct __ctrl {
+ __le32 addr;
+ u8 stride;
+ u8 index_a;
+ __le16 timeout;
+ __le32 data_size;
+ __le32 no_ops;
+ u8 opcode;
+ u8 index_v;
+ u8 shl_val;
+ u8 shr_val;
+ __le32 val1;
+ __le32 val2;
+ __le32 val3;
+} __packed;
+
+struct __cache {
+ __le32 addr;
+ __le16 stride;
+ __le16 init_tag_val;
+ __le32 size;
+ __le32 no_ops;
+ __le32 ctrl_addr;
+ __le32 ctrl_val;
+ __le32 read_addr;
+ u8 read_addr_stride;
+ u8 read_addr_num;
+ u8 rsvd1[2];
+} __packed;
+
+struct __ocm {
+ u8 rsvd[8];
+ __le32 size;
+ __le32 no_ops;
+ u8 rsvd1[8];
+ __le32 read_addr;
+ __le32 read_addr_stride;
+} __packed;
+
+struct __mem {
+ u8 rsvd[24];
+ __le32 addr;
+ __le32 size;
+} __packed;
+
+struct __mux {
+ __le32 addr;
+ u8 rsvd[4];
+ __le32 size;
+ __le32 no_ops;
+ __le32 val;
+ __le32 val_stride;
+ __le32 read_addr;
+ u8 rsvd2[4];
+} __packed;
+
+struct __queue {
+ __le32 sel_addr;
+ __le16 stride;
+ u8 rsvd[2];
+ __le32 size;
+ __le32 no_ops;
+ u8 rsvd2[8];
+ __le32 read_addr;
+ u8 read_addr_stride;
+ u8 read_addr_cnt;
+ u8 rsvd3[2];
+} __packed;
+
+struct qlcnic_dump_entry {
+ struct qlcnic_common_entry_hdr hdr;
+ union {
+ struct __crb crb;
+ struct __cache cache;
+ struct __ocm ocm;
+ struct __mem mem;
+ struct __mux mux;
+ struct __queue que;
+ struct __ctrl ctrl;
+ } region;
+} __packed;
+
+enum op_codes {
+ QLCNIC_DUMP_NOP = 0,
+ QLCNIC_DUMP_READ_CRB = 1,
+ QLCNIC_DUMP_READ_MUX = 2,
+ QLCNIC_DUMP_QUEUE = 3,
+ QLCNIC_DUMP_BRD_CONFIG = 4,
+ QLCNIC_DUMP_READ_OCM = 6,
+ QLCNIC_DUMP_PEG_REG = 7,
+ QLCNIC_DUMP_L1_DTAG = 8,
+ QLCNIC_DUMP_L1_ITAG = 9,
+ QLCNIC_DUMP_L1_DATA = 11,
+ QLCNIC_DUMP_L1_INST = 12,
+ QLCNIC_DUMP_L2_DTAG = 21,
+ QLCNIC_DUMP_L2_ITAG = 22,
+ QLCNIC_DUMP_L2_DATA = 23,
+ QLCNIC_DUMP_L2_INST = 24,
+ QLCNIC_DUMP_READ_ROM = 71,
+ QLCNIC_DUMP_READ_MEM = 72,
+ QLCNIC_DUMP_READ_CTRL = 98,
+ QLCNIC_DUMP_TLHDR = 99,
+ QLCNIC_DUMP_RDEND = 255
+};
+
+#define QLCNIC_DUMP_WCRB BIT_0
+#define QLCNIC_DUMP_RWCRB BIT_1
+#define QLCNIC_DUMP_ANDCRB BIT_2
+#define QLCNIC_DUMP_ORCRB BIT_3
+#define QLCNIC_DUMP_POLLCRB BIT_4
+#define QLCNIC_DUMP_RD_SAVE BIT_5
+#define QLCNIC_DUMP_WRT_SAVED BIT_6
+#define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
+#define QLCNIC_DUMP_SKIP BIT_7
+
+#define QLCNIC_DUMP_MASK_MIN 3
+#define QLCNIC_DUMP_MASK_DEF 0x1f
+#define QLCNIC_DUMP_MASK_MAX 0xff
+#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed
+#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed
+#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed
+#define QLCNIC_FORCE_FW_RESET 0xdeaddead
+
+struct qlcnic_dump_operations {
+ enum op_codes opcode;
+ u32 (*handler)(struct qlcnic_adapter *,
+ struct qlcnic_dump_entry *, u32 *);
+};
+
+struct _cdrp_cmd {
+ u32 cmd;
+ u32 arg1;
+ u32 arg2;
+ u32 arg3;
+};
+
+struct qlcnic_cmd_args {
+ struct _cdrp_cmd req;
+ struct _cdrp_cmd rsp;
+};
+
+int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
+int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config);
+
+u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
+int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
+int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
+int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
+void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *);
+void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
+
+#define ADDR_IN_RANGE(addr, low, high) \
+ (((addr) < (high)) && ((addr) >= (low)))
+
+#define QLCRD32(adapter, off) \
+ (qlcnic_hw_read_wx_2M(adapter, off))
+#define QLCWR32(adapter, off, val) \
+ (qlcnic_hw_write_wx_2M(adapter, off, val))
+
+int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32);
+void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
+
+#define qlcnic_rom_lock(a) \
+ qlcnic_pcie_sem_lock((a), 2, QLCNIC_ROM_LOCK_ID)
+#define qlcnic_rom_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 2)
+#define qlcnic_phy_lock(a) \
+ qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID)
+#define qlcnic_phy_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 3)
+#define qlcnic_api_lock(a) \
+ qlcnic_pcie_sem_lock((a), 5, 0)
+#define qlcnic_api_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 5)
+#define qlcnic_sw_lock(a) \
+ qlcnic_pcie_sem_lock((a), 6, 0)
+#define qlcnic_sw_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 6)
+#define crb_win_lock(a) \
+ qlcnic_pcie_sem_lock((a), 7, QLCNIC_CRB_WIN_LOCK_ID)
+#define crb_win_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 7)
+
+#define __QLCNIC_MAX_LED_RATE 0xf
+#define __QLCNIC_MAX_LED_STATE 0x2
+
+int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
+int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
+int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
+void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
+void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
+int qlcnic_dump_fw(struct qlcnic_adapter *);
+
+/* Functions from qlcnic_init.c */
+int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
+int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
+void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
+void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
+int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
+int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter);
+int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter);
+
+int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp);
+int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+ u8 *bytes, size_t size);
+int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter);
+void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter);
+
+void __iomem *qlcnic_get_ioaddr(struct qlcnic_adapter *, u32);
+
+int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter);
+void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter);
+
+int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter);
+void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter);
+
+void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
+void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
+
+int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
+void qlcnic_watchdog_task(struct work_struct *work);
+void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring);
+int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
+void qlcnic_set_multi(struct net_device *netdev);
+void qlcnic_free_mac_list(struct qlcnic_adapter *adapter);
+int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
+int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter);
+int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable);
+int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd);
+int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable);
+void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
+
+int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
+int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
+u32 qlcnic_fix_features(struct net_device *netdev, u32 features);
+int qlcnic_set_features(struct net_device *netdev, u32 features);
+int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
+int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
+void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring);
+void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
+void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
+void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter);
+int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode);
+
+/* Functions from qlcnic_ethtool.c */
+int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]);
+
+/* Functions from qlcnic_main.c */
+int qlcnic_reset_context(struct qlcnic_adapter *);
+void qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *);
+void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
+int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
+netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val);
+int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data);
+void qlcnic_dev_request_reset(struct qlcnic_adapter *);
+void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
+
+/* Management functions */
+int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
+int qlcnic_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
+int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
+int qlcnic_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
+
+/* eSwitch management functions */
+int qlcnic_config_switch_port(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
+int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
+int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8);
+int qlcnic_get_port_stats(struct qlcnic_adapter *, const u8, const u8,
+ struct __qlcnic_esw_statistics *);
+int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8,
+ struct __qlcnic_esw_statistics *);
+int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8);
+extern int qlcnic_config_tso;
+
+/*
+ * QLOGIC Board information
+ */
+
+#define QLCNIC_MAX_BOARD_NAME_LEN 100
+struct qlcnic_brdinfo {
+ unsigned short vendor;
+ unsigned short device;
+ unsigned short sub_vendor;
+ unsigned short sub_device;
+ char short_name[QLCNIC_MAX_BOARD_NAME_LEN];
+};
+
+static const struct qlcnic_brdinfo qlcnic_boards[] = {
+ {0x1077, 0x8020, 0x1077, 0x203,
+ "8200 Series Single Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)"},
+ {0x1077, 0x8020, 0x1077, 0x207,
+ "8200 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)"},
+ {0x1077, 0x8020, 0x1077, 0x20b,
+ "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"},
+ {0x1077, 0x8020, 0x1077, 0x20c,
+ "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
+ {0x1077, 0x8020, 0x1077, 0x20f,
+ "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
+ {0x1077, 0x8020, 0x103c, 0x3733,
+ "NC523SFP 10Gb 2-port Server Adapter"},
+ {0x1077, 0x8020, 0x103c, 0x3346,
+ "CN1000Q Dual Port Converged Network Adapter"},
+ {0x1077, 0x8020, 0x1077, 0x210,
+ "QME8242-k 10GbE Dual Port Mezzanine Card"},
+ {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
+};
+
+#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
+
+static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (likely(tx_ring->producer < tx_ring->sw_consumer))
+ return tx_ring->sw_consumer - tx_ring->producer;
+ else
+ return tx_ring->sw_consumer + tx_ring->num_desc -
+ tx_ring->producer;
+}
+
+extern const struct ethtool_ops qlcnic_ethtool_ops;
+
+struct qlcnic_nic_template {
+ int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
+ int (*config_led) (struct qlcnic_adapter *, u32, u32);
+ int (*start_firmware) (struct qlcnic_adapter *);
+};
+
+#define QLCDB(adapter, lvl, _fmt, _args...) do { \
+ if (NETIF_MSG_##lvl & adapter->msg_enable) \
+ printk(KERN_INFO "%s: %s: " _fmt, \
+ dev_name(&adapter->pdev->dev), \
+ __func__, ##_args); \
+ } while (0)
+
+#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
new file mode 100644
index 000000000000..569a837d2ac4
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -0,0 +1,1127 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2010 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+
+static u32
+qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
+{
+ u32 rsp;
+ int timeout = 0;
+
+ do {
+ /* give atleast 1ms for firmware to respond */
+ msleep(1);
+
+ if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
+ return QLCNIC_CDRP_RSP_TIMEOUT;
+
+ rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET);
+ } while (!QLCNIC_CDRP_IS_RSP(rsp));
+
+ return rsp;
+}
+
+void
+qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd)
+{
+ u32 rsp;
+ u32 signature;
+ struct pci_dev *pdev = adapter->pdev;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ signature = QLCNIC_CDRP_SIGNATURE_MAKE(ahw->pci_func,
+ adapter->fw_hal_version);
+
+ /* Acquire semaphore before accessing CRB */
+ if (qlcnic_api_lock(adapter)) {
+ cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT;
+ return;
+ }
+
+ QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
+ QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, cmd->req.arg1);
+ QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, cmd->req.arg2);
+ QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, cmd->req.arg3);
+ QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET,
+ QLCNIC_CDRP_FORM_CMD(cmd->req.cmd));
+
+ rsp = qlcnic_poll_rsp(adapter);
+
+ if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
+ dev_err(&pdev->dev, "card response timeout.\n");
+ cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT;
+ } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
+ cmd->rsp.cmd = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+ dev_err(&pdev->dev, "failed card response code:0x%x\n",
+ cmd->rsp.cmd);
+ } else if (rsp == QLCNIC_CDRP_RSP_OK) {
+ cmd->rsp.cmd = QLCNIC_RCODE_SUCCESS;
+ if (cmd->rsp.arg2)
+ cmd->rsp.arg2 = QLCRD32(adapter,
+ QLCNIC_ARG2_CRB_OFFSET);
+ if (cmd->rsp.arg3)
+ cmd->rsp.arg3 = QLCRD32(adapter,
+ QLCNIC_ARG3_CRB_OFFSET);
+ }
+ if (cmd->rsp.arg1)
+ cmd->rsp.arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+
+ /* Release semaphore */
+ qlcnic_api_unlock(adapter);
+
+}
+
+static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u16 temp_size)
+{
+ uint64_t sum = 0;
+ int count = temp_size / sizeof(uint32_t);
+ while (count-- > 0)
+ sum += *temp_buffer++;
+ while (sum >> 32)
+ sum = (sum & 0xFFFFFFFF) + (sum >> 32);
+ return ~sum;
+}
+
+int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
+{
+ int err, i;
+ u16 temp_size;
+ void *tmp_addr;
+ u32 version, csum, *template, *tmp_buf;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_dump_template_hdr *tmpl_hdr, *tmp_tmpl;
+ dma_addr_t tmp_addr_t = 0;
+
+ ahw = adapter->ahw;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_TEMP_SIZE;
+ memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
+ qlcnic_issue_cmd(adapter, &cmd);
+ if (cmd.rsp.cmd != QLCNIC_RCODE_SUCCESS) {
+ dev_info(&adapter->pdev->dev,
+ "Can't get template size %d\n", cmd.rsp.cmd);
+ err = -EIO;
+ return err;
+ }
+ temp_size = cmd.rsp.arg2;
+ version = cmd.rsp.arg3;
+ if (!temp_size)
+ return -EIO;
+
+ tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
+ &tmp_addr_t, GFP_KERNEL);
+ if (!tmp_addr) {
+ dev_err(&adapter->pdev->dev,
+ "Can't get memory for FW dump template\n");
+ return -ENOMEM;
+ }
+ memset(&cmd.rsp, 0, sizeof(struct _cdrp_cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_GET_TEMP_HDR;
+ cmd.req.arg1 = LSD(tmp_addr_t);
+ cmd.req.arg2 = MSD(tmp_addr_t);
+ cmd.req.arg3 = temp_size;
+ qlcnic_issue_cmd(adapter, &cmd);
+
+ err = cmd.rsp.cmd;
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get mini dump template header %d\n", err);
+ err = -EIO;
+ goto error;
+ }
+ tmp_tmpl = tmp_addr;
+ csum = qlcnic_temp_checksum((uint32_t *) tmp_addr, temp_size);
+ if (csum) {
+ dev_err(&adapter->pdev->dev,
+ "Template header checksum validation failed\n");
+ err = -EIO;
+ goto error;
+ }
+ ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
+ if (!ahw->fw_dump.tmpl_hdr) {
+ err = -EIO;
+ goto error;
+ }
+ tmp_buf = tmp_addr;
+ template = (u32 *) ahw->fw_dump.tmpl_hdr;
+ for (i = 0; i < temp_size/sizeof(u32); i++)
+ *template++ = __le32_to_cpu(*tmp_buf++);
+
+ tmpl_hdr = ahw->fw_dump.tmpl_hdr;
+ tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
+ ahw->fw_dump.enable = 1;
+error:
+ dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
+ return err;
+}
+
+int
+qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
+{
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_SET_MTU;
+ cmd.req.arg1 = recv_ctx->context_id;
+ cmd.req.arg2 = mtu;
+ cmd.req.arg3 = 0;
+ if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
+ qlcnic_issue_cmd(adapter, &cmd);
+ if (cmd.rsp.cmd) {
+ dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ void *addr;
+ struct qlcnic_hostrq_rx_ctx *prq;
+ struct qlcnic_cardrsp_rx_ctx *prsp;
+ struct qlcnic_hostrq_rds_ring *prq_rds;
+ struct qlcnic_hostrq_sds_ring *prq_sds;
+ struct qlcnic_cardrsp_rds_ring *prsp_rds;
+ struct qlcnic_cardrsp_sds_ring *prsp_sds;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_cmd_args cmd;
+
+ dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
+ u64 phys_addr;
+
+ u8 i, nrds_rings, nsds_rings;
+ size_t rq_size, rsp_size;
+ u32 cap, reg, val, reg2;
+ int err;
+
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ nrds_rings = adapter->max_rds_rings;
+ nsds_rings = adapter->max_sds_rings;
+
+ rq_size =
+ SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
+ nsds_rings);
+ rsp_size =
+ SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
+ nsds_rings);
+
+ addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
+ &hostrq_phys_addr, GFP_KERNEL);
+ if (addr == NULL)
+ return -ENOMEM;
+ prq = addr;
+
+ addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
+ &cardrsp_phys_addr, GFP_KERNEL);
+ if (addr == NULL) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+ prsp = addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
+
+ cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN
+ | QLCNIC_CAP0_VALIDOFF);
+ cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
+
+ prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx,
+ msix_handler);
+ prq->txrx_sds_binding = nsds_rings - 1;
+
+ prq->capabilities[0] = cpu_to_le32(cap);
+ prq->host_int_crb_mode =
+ cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
+ prq->host_rds_crb_mode =
+ cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
+
+ prq->num_rds_rings = cpu_to_le16(nrds_rings);
+ prq->num_sds_rings = cpu_to_le16(nsds_rings);
+ prq->rds_ring_offset = 0;
+
+ val = le32_to_cpu(prq->rds_ring_offset) +
+ (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
+ prq->sds_ring_offset = cpu_to_le32(val);
+
+ prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
+ le32_to_cpu(prq->rds_ring_offset));
+
+ for (i = 0; i < nrds_rings; i++) {
+
+ rds_ring = &recv_ctx->rds_rings[i];
+ rds_ring->producer = 0;
+
+ prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
+ prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
+ prq_rds[i].ring_kind = cpu_to_le32(i);
+ prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
+ }
+
+ prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
+ le32_to_cpu(prq->sds_ring_offset));
+
+ for (i = 0; i < nsds_rings; i++) {
+
+ sds_ring = &recv_ctx->sds_rings[i];
+ sds_ring->consumer = 0;
+ memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
+
+ prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
+ prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
+ prq_sds[i].msi_index = cpu_to_le16(i);
+ }
+
+ phys_addr = hostrq_phys_addr;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = (u32) (phys_addr >> 32);
+ cmd.req.arg2 = (u32) (phys_addr & 0xffffffff);
+ cmd.req.arg3 = rq_size;
+ cmd.req.cmd = QLCNIC_CDRP_CMD_CREATE_RX_CTX;
+ qlcnic_issue_cmd(adapter, &cmd);
+ err = cmd.rsp.cmd;
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to create rx ctx in firmware%d\n", err);
+ goto out_free_rsp;
+ }
+
+
+ prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
+ &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
+
+ for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
+ rds_ring = &recv_ctx->rds_rings[i];
+
+ reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
+ rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg;
+ }
+
+ prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
+ &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
+
+ for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
+ sds_ring = &recv_ctx->sds_rings[i];
+
+ reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
+ reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
+
+ sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg;
+ sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2;
+ }
+
+ recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
+ recv_ctx->context_id = le16_to_cpu(prsp->context_id);
+ recv_ctx->virt_port = prsp->virt_port;
+
+out_free_rsp:
+ dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
+ cardrsp_phys_addr);
+out_free_rq:
+ dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
+ return err;
+}
+
+static void
+qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = recv_ctx->context_id;
+ cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET;
+ cmd.req.arg3 = 0;
+ cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_RX_CTX;
+ qlcnic_issue_cmd(adapter, &cmd);
+ if (cmd.rsp.cmd)
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy rx ctx in firmware\n");
+
+ recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
+}
+
+static int
+qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hostrq_tx_ctx *prq;
+ struct qlcnic_hostrq_cds_ring *prq_cds;
+ struct qlcnic_cardrsp_tx_ctx *prsp;
+ void *rq_addr, *rsp_addr;
+ size_t rq_size, rsp_size;
+ u32 temp;
+ struct qlcnic_cmd_args cmd;
+ int err;
+ u64 phys_addr;
+ dma_addr_t rq_phys_addr, rsp_phys_addr;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ /* reset host resources */
+ tx_ring->producer = 0;
+ tx_ring->sw_consumer = 0;
+ *(tx_ring->hw_consumer) = 0;
+
+ rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
+ rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
+ &rq_phys_addr, GFP_KERNEL);
+ if (!rq_addr)
+ return -ENOMEM;
+
+ rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
+ rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
+ &rsp_phys_addr, GFP_KERNEL);
+ if (!rsp_addr) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+
+ memset(rq_addr, 0, rq_size);
+ prq = rq_addr;
+
+ memset(rsp_addr, 0, rsp_size);
+ prsp = rsp_addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
+
+ temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
+ QLCNIC_CAP0_LSO);
+ prq->capabilities[0] = cpu_to_le32(temp);
+
+ prq->host_int_crb_mode =
+ cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
+
+ prq->interrupt_ctl = 0;
+ prq->msi_index = 0;
+ prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
+
+ prq_cds = &prq->cds_ring;
+
+ prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
+ prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
+
+ phys_addr = rq_phys_addr;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = (u32)(phys_addr >> 32);
+ cmd.req.arg2 = ((u32)phys_addr & 0xffffffff);
+ cmd.req.arg3 = rq_size;
+ cmd.req.cmd = QLCNIC_CDRP_CMD_CREATE_TX_CTX;
+ qlcnic_issue_cmd(adapter, &cmd);
+ err = cmd.rsp.cmd;
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
+ tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
+
+ adapter->tx_context_id =
+ le16_to_cpu(prsp->context_id);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to create tx ctx in firmware%d\n", err);
+ err = -EIO;
+ }
+
+ dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
+ rsp_phys_addr);
+
+out_free_rq:
+ dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
+
+ return err;
+}
+
+static void
+qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = adapter->tx_context_id;
+ cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET;
+ cmd.req.arg3 = 0;
+ cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_TX_CTX;
+ qlcnic_issue_cmd(adapter, &cmd);
+ if (cmd.rsp.cmd)
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy tx ctx in firmware\n");
+}
+
+int
+qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config)
+{
+ struct qlcnic_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = config;
+ cmd.req.cmd = QLCNIC_CDRP_CMD_CONFIG_PORT;
+ qlcnic_issue_cmd(adapter, &cmd);
+
+ return cmd.rsp.cmd;
+}
+
+int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
+{
+ void *addr;
+ int err;
+ int ring;
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ struct pci_dev *pdev = adapter->pdev;
+
+ recv_ctx = adapter->recv_ctx;
+ tx_ring = adapter->tx_ring;
+
+ tx_ring->hw_consumer = (__le32 *) dma_alloc_coherent(&pdev->dev,
+ sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL);
+ if (tx_ring->hw_consumer == NULL) {
+ dev_err(&pdev->dev, "failed to allocate tx consumer\n");
+ return -ENOMEM;
+ }
+
+ /* cmd desc ring */
+ addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
+ &tx_ring->phys_addr, GFP_KERNEL);
+
+ if (addr == NULL) {
+ dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+
+ tx_ring->desc_head = addr;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ addr = dma_alloc_coherent(&adapter->pdev->dev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ &rds_ring->phys_addr, GFP_KERNEL);
+ if (addr == NULL) {
+ dev_err(&pdev->dev,
+ "failed to allocate rds ring [%d]\n", ring);
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ rds_ring->desc_head = addr;
+
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ addr = dma_alloc_coherent(&adapter->pdev->dev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ &sds_ring->phys_addr, GFP_KERNEL);
+ if (addr == NULL) {
+ dev_err(&pdev->dev,
+ "failed to allocate sds ring [%d]\n", ring);
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ sds_ring->desc_head = addr;
+ }
+
+ return 0;
+
+err_out_free:
+ qlcnic_free_hw_resources(adapter);
+ return err;
+}
+
+
+int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ if (adapter->flags & QLCNIC_NEED_FLR) {
+ pci_reset_function(adapter->pdev);
+ adapter->flags &= ~QLCNIC_NEED_FLR;
+ }
+
+ err = qlcnic_fw_cmd_create_rx_ctx(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_fw_cmd_create_tx_ctx(adapter);
+ if (err) {
+ qlcnic_fw_cmd_destroy_rx_ctx(adapter);
+ return err;
+ }
+
+ set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
+ return 0;
+}
+
+void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
+{
+ if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
+ qlcnic_fw_cmd_destroy_rx_ctx(adapter);
+ qlcnic_fw_cmd_destroy_tx_ctx(adapter);
+
+ /* Allow dma queues to drain after context reset */
+ msleep(20);
+ }
+}
+
+void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ recv_ctx = adapter->recv_ctx;
+
+ tx_ring = adapter->tx_ring;
+ if (tx_ring->hw_consumer != NULL) {
+ dma_free_coherent(&adapter->pdev->dev,
+ sizeof(u32),
+ tx_ring->hw_consumer,
+ tx_ring->hw_cons_phys_addr);
+ tx_ring->hw_consumer = NULL;
+ }
+
+ if (tx_ring->desc_head != NULL) {
+ dma_free_coherent(&adapter->pdev->dev,
+ TX_DESC_RINGSIZE(tx_ring),
+ tx_ring->desc_head, tx_ring->phys_addr);
+ tx_ring->desc_head = NULL;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ if (rds_ring->desc_head != NULL) {
+ dma_free_coherent(&adapter->pdev->dev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ rds_ring->desc_head,
+ rds_ring->phys_addr);
+ rds_ring->desc_head = NULL;
+ }
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ if (sds_ring->desc_head != NULL) {
+ dma_free_coherent(&adapter->pdev->dev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ sds_ring->desc_head,
+ sds_ring->phys_addr);
+ sds_ring->desc_head = NULL;
+ }
+ }
+}
+
+
+/* Get MAC address of a NIC partition */
+int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
+{
+ int err;
+ struct qlcnic_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = adapter->ahw->pci_func | BIT_8;
+ cmd.req.cmd = QLCNIC_CDRP_CMD_MAC_ADDRESS;
+ cmd.rsp.arg1 = cmd.rsp.arg2 = 1;
+ qlcnic_issue_cmd(adapter, &cmd);
+ err = cmd.rsp.cmd;
+
+ if (err == QLCNIC_RCODE_SUCCESS)
+ qlcnic_fetch_mac(adapter, cmd.rsp.arg1, cmd.rsp.arg2, 0, mac);
+ else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get mac address%d\n", err);
+ err = -EIO;
+ }
+
+ return err;
+}
+
+/* Get info of a NIC partition */
+int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info, u8 func_id)
+{
+ int err;
+ dma_addr_t nic_dma_t;
+ struct qlcnic_info *nic_info;
+ void *nic_info_addr;
+ struct qlcnic_cmd_args cmd;
+ size_t nic_size = sizeof(struct qlcnic_info);
+
+ nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
+ if (!nic_info_addr)
+ return -ENOMEM;
+ memset(nic_info_addr, 0, nic_size);
+
+ nic_info = nic_info_addr;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_GET_NIC_INFO;
+ cmd.req.arg1 = MSD(nic_dma_t);
+ cmd.req.arg2 = LSD(nic_dma_t);
+ cmd.req.arg3 = (func_id << 16 | nic_size);
+ qlcnic_issue_cmd(adapter, &cmd);
+ err = cmd.rsp.cmd;
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ npar_info->pci_func = le16_to_cpu(nic_info->pci_func);
+ npar_info->op_mode = le16_to_cpu(nic_info->op_mode);
+ npar_info->phys_port = le16_to_cpu(nic_info->phys_port);
+ npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode);
+ npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
+ npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
+ npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
+ npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
+ npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
+ npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
+
+ dev_info(&adapter->pdev->dev,
+ "phy port: %d switch_mode: %d,\n"
+ "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n"
+ "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n",
+ npar_info->phys_port, npar_info->switch_mode,
+ npar_info->max_tx_ques, npar_info->max_rx_ques,
+ npar_info->min_tx_bw, npar_info->max_tx_bw,
+ npar_info->max_mtu, npar_info->capabilities);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get nic info%d\n", err);
+ err = -EIO;
+ }
+
+ dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
+ nic_dma_t);
+ return err;
+}
+
+/* Configure a NIC partition */
+int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
+{
+ int err = -EIO;
+ dma_addr_t nic_dma_t;
+ void *nic_info_addr;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_info *nic_info;
+ size_t nic_size = sizeof(struct qlcnic_info);
+
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+ return err;
+
+ nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
+ if (!nic_info_addr)
+ return -ENOMEM;
+
+ memset(nic_info_addr, 0, nic_size);
+ nic_info = nic_info_addr;
+
+ nic_info->pci_func = cpu_to_le16(nic->pci_func);
+ nic_info->op_mode = cpu_to_le16(nic->op_mode);
+ nic_info->phys_port = cpu_to_le16(nic->phys_port);
+ nic_info->switch_mode = cpu_to_le16(nic->switch_mode);
+ nic_info->capabilities = cpu_to_le32(nic->capabilities);
+ nic_info->max_mac_filters = nic->max_mac_filters;
+ nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques);
+ nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques);
+ nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
+ nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_SET_NIC_INFO;
+ cmd.req.arg1 = MSD(nic_dma_t);
+ cmd.req.arg2 = LSD(nic_dma_t);
+ cmd.req.arg3 = ((nic->pci_func << 16) | nic_size);
+ qlcnic_issue_cmd(adapter, &cmd);
+ err = cmd.rsp.cmd;
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to set nic info%d\n", err);
+ err = -EIO;
+ }
+
+ dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
+ nic_dma_t);
+ return err;
+}
+
+/* Get PCI Info of a partition */
+int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_pci_info *pci_info)
+{
+ int err = 0, i;
+ struct qlcnic_cmd_args cmd;
+ dma_addr_t pci_info_dma_t;
+ struct qlcnic_pci_info *npar;
+ void *pci_info_addr;
+ size_t npar_size = sizeof(struct qlcnic_pci_info);
+ size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
+
+ pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
+ &pci_info_dma_t, GFP_KERNEL);
+ if (!pci_info_addr)
+ return -ENOMEM;
+ memset(pci_info_addr, 0, pci_size);
+
+ npar = pci_info_addr;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_GET_PCI_INFO;
+ cmd.req.arg1 = MSD(pci_info_dma_t);
+ cmd.req.arg2 = LSD(pci_info_dma_t);
+ cmd.req.arg3 = pci_size;
+ qlcnic_issue_cmd(adapter, &cmd);
+ err = cmd.rsp.cmd;
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
+ pci_info->id = le16_to_cpu(npar->id);
+ pci_info->active = le16_to_cpu(npar->active);
+ pci_info->type = le16_to_cpu(npar->type);
+ pci_info->default_port =
+ le16_to_cpu(npar->default_port);
+ pci_info->tx_min_bw =
+ le16_to_cpu(npar->tx_min_bw);
+ pci_info->tx_max_bw =
+ le16_to_cpu(npar->tx_max_bw);
+ memcpy(pci_info->mac, npar->mac, ETH_ALEN);
+ }
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get PCI Info%d\n", err);
+ err = -EIO;
+ }
+
+ dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
+ pci_info_dma_t);
+ return err;
+}
+
+/* Configure eSwitch for port mirroring */
+int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
+ u8 enable_mirroring, u8 pci_func)
+{
+ int err = -EIO;
+ u32 arg1;
+ struct qlcnic_cmd_args cmd;
+
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC ||
+ !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
+ return err;
+
+ arg1 = id | (enable_mirroring ? BIT_4 : 0);
+ arg1 |= pci_func << 8;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_SET_PORTMIRRORING;
+ cmd.req.arg1 = arg1;
+ qlcnic_issue_cmd(adapter, &cmd);
+ err = cmd.rsp.cmd;
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure port mirroring%d on eswitch:%d\n",
+ pci_func, id);
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "Configured eSwitch %d for port mirroring:%d\n",
+ id, pci_func);
+ }
+
+ return err;
+}
+
+int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
+ const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
+
+ size_t stats_size = sizeof(struct __qlcnic_esw_statistics);
+ struct __qlcnic_esw_statistics *stats;
+ dma_addr_t stats_dma_t;
+ void *stats_addr;
+ u32 arg1;
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ if (esw_stats == NULL)
+ return -ENOMEM;
+
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC &&
+ func != adapter->ahw->pci_func) {
+ dev_err(&adapter->pdev->dev,
+ "Not privilege to query stats for func=%d", func);
+ return -EIO;
+ }
+
+ stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
+ &stats_dma_t, GFP_KERNEL);
+ if (!stats_addr) {
+ dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+ memset(stats_addr, 0, stats_size);
+
+ arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
+ arg1 |= rx_tx << 15 | stats_size << 16;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_STATS;
+ cmd.req.arg1 = arg1;
+ cmd.req.arg2 = MSD(stats_dma_t);
+ cmd.req.arg3 = LSD(stats_dma_t);
+ qlcnic_issue_cmd(adapter, &cmd);
+ err = cmd.rsp.cmd;
+
+ if (!err) {
+ stats = stats_addr;
+ esw_stats->context_id = le16_to_cpu(stats->context_id);
+ esw_stats->version = le16_to_cpu(stats->version);
+ esw_stats->size = le16_to_cpu(stats->size);
+ esw_stats->multicast_frames =
+ le64_to_cpu(stats->multicast_frames);
+ esw_stats->broadcast_frames =
+ le64_to_cpu(stats->broadcast_frames);
+ esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames);
+ esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames);
+ esw_stats->local_frames = le64_to_cpu(stats->local_frames);
+ esw_stats->errors = le64_to_cpu(stats->errors);
+ esw_stats->numbytes = le64_to_cpu(stats->numbytes);
+ }
+
+ dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
+ stats_dma_t);
+ return err;
+}
+
+int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
+ const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
+
+ struct __qlcnic_esw_statistics port_stats;
+ u8 i;
+ int ret = -EIO;
+
+ if (esw_stats == NULL)
+ return -ENOMEM;
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+ return -EIO;
+ if (adapter->npars == NULL)
+ return -EIO;
+
+ memset(esw_stats, 0, sizeof(u64));
+ esw_stats->unicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
+ esw_stats->multicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
+ esw_stats->broadcast_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
+ esw_stats->dropped_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
+ esw_stats->errors = QLCNIC_ESW_STATS_NOT_AVAIL;
+ esw_stats->local_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
+ esw_stats->numbytes = QLCNIC_ESW_STATS_NOT_AVAIL;
+ esw_stats->context_id = eswitch;
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ if (adapter->npars[i].phy_port != eswitch)
+ continue;
+
+ memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics));
+ if (qlcnic_get_port_stats(adapter, i, rx_tx, &port_stats))
+ continue;
+
+ esw_stats->size = port_stats.size;
+ esw_stats->version = port_stats.version;
+ QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames,
+ port_stats.unicast_frames);
+ QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames,
+ port_stats.multicast_frames);
+ QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames,
+ port_stats.broadcast_frames);
+ QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames,
+ port_stats.dropped_frames);
+ QLCNIC_ADD_ESW_STATS(esw_stats->errors,
+ port_stats.errors);
+ QLCNIC_ADD_ESW_STATS(esw_stats->local_frames,
+ port_stats.local_frames);
+ QLCNIC_ADD_ESW_STATS(esw_stats->numbytes,
+ port_stats.numbytes);
+ ret = 0;
+ }
+ return ret;
+}
+
+int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
+ const u8 port, const u8 rx_tx)
+{
+
+ u32 arg1;
+ struct qlcnic_cmd_args cmd;
+
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+ return -EIO;
+
+ if (func_esw == QLCNIC_STATS_PORT) {
+ if (port >= QLCNIC_MAX_PCI_FUNC)
+ goto err_ret;
+ } else if (func_esw == QLCNIC_STATS_ESWITCH) {
+ if (port >= QLCNIC_NIU_MAX_XG_PORTS)
+ goto err_ret;
+ } else {
+ goto err_ret;
+ }
+
+ if (rx_tx > QLCNIC_QUERY_TX_COUNTER)
+ goto err_ret;
+
+ arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
+ arg1 |= BIT_14 | rx_tx << 15;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_STATS;
+ cmd.req.arg1 = arg1;
+ qlcnic_issue_cmd(adapter, &cmd);
+ return cmd.rsp.cmd;
+
+err_ret:
+ dev_err(&adapter->pdev->dev, "Invalid argument func_esw=%d port=%d"
+ "rx_ctx=%d\n", func_esw, port, rx_tx);
+ return -EIO;
+}
+
+static int
+__qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
+ u32 *arg1, u32 *arg2)
+{
+ int err = -EIO;
+ struct qlcnic_cmd_args cmd;
+ u8 pci_func;
+ pci_func = (*arg1 >> 8);
+
+ cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG;
+ cmd.req.arg1 = *arg1;
+ cmd.rsp.arg1 = cmd.rsp.arg2 = 1;
+ qlcnic_issue_cmd(adapter, &cmd);
+ *arg1 = cmd.rsp.arg1;
+ *arg2 = cmd.rsp.arg2;
+ err = cmd.rsp.cmd;
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ dev_info(&adapter->pdev->dev,
+ "eSwitch port config for pci func %d\n", pci_func);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get eswitch port config for pci func %d\n",
+ pci_func);
+ }
+ return err;
+}
+/* Configure eSwitch port
+op_mode = 0 for setting default port behavior
+op_mode = 1 for setting vlan id
+op_mode = 2 for deleting vlan id
+op_type = 0 for vlan_id
+op_type = 1 for port vlan_id
+*/
+int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ int err = -EIO;
+ u32 arg1, arg2 = 0;
+ struct qlcnic_cmd_args cmd;
+ u8 pci_func;
+
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+ return err;
+ pci_func = esw_cfg->pci_func;
+ arg1 = (adapter->npars[pci_func].phy_port & BIT_0);
+ arg1 |= (pci_func << 8);
+
+ if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
+ return err;
+ arg1 &= ~(0x0ff << 8);
+ arg1 |= (pci_func << 8);
+ arg1 &= ~(BIT_2 | BIT_3);
+ switch (esw_cfg->op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ arg1 |= (BIT_4 | BIT_6 | BIT_7);
+ arg2 |= (BIT_0 | BIT_1);
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+ arg2 |= (BIT_2 | BIT_3);
+ if (!(esw_cfg->discard_tagged))
+ arg1 &= ~BIT_4;
+ if (!(esw_cfg->promisc_mode))
+ arg1 &= ~BIT_6;
+ if (!(esw_cfg->mac_override))
+ arg1 &= ~BIT_7;
+ if (!(esw_cfg->mac_anti_spoof))
+ arg2 &= ~BIT_0;
+ if (!(esw_cfg->offload_flags & BIT_0))
+ arg2 &= ~(BIT_1 | BIT_2 | BIT_3);
+ if (!(esw_cfg->offload_flags & BIT_1))
+ arg2 &= ~BIT_2;
+ if (!(esw_cfg->offload_flags & BIT_2))
+ arg2 &= ~BIT_3;
+ break;
+ case QLCNIC_ADD_VLAN:
+ arg1 |= (BIT_2 | BIT_5);
+ arg1 |= (esw_cfg->vlan_id << 16);
+ break;
+ case QLCNIC_DEL_VLAN:
+ arg1 |= (BIT_3 | BIT_5);
+ arg1 &= ~(0x0ffff << 16);
+ break;
+ default:
+ return err;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH;
+ cmd.req.arg1 = arg1;
+ cmd.req.arg2 = arg2;
+ qlcnic_issue_cmd(adapter, &cmd);
+
+ err = cmd.rsp.cmd;
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure eswitch pci func %d\n", pci_func);
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "Configured eSwitch for pci func %d\n", pci_func);
+ }
+
+ return err;
+}
+
+int
+qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ u32 arg1, arg2;
+ u8 phy_port;
+ if (adapter->op_mode == QLCNIC_MGMT_FUNC)
+ phy_port = adapter->npars[esw_cfg->pci_func].phy_port;
+ else
+ phy_port = adapter->physical_port;
+ arg1 = phy_port;
+ arg1 |= (esw_cfg->pci_func << 8);
+ if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
+ return -EIO;
+
+ esw_cfg->discard_tagged = !!(arg1 & BIT_4);
+ esw_cfg->host_vlan_tag = !!(arg1 & BIT_5);
+ esw_cfg->promisc_mode = !!(arg1 & BIT_6);
+ esw_cfg->mac_override = !!(arg1 & BIT_7);
+ esw_cfg->vlan_id = LSW(arg1 >> 16);
+ esw_cfg->mac_anti_spoof = (arg2 & 0x1);
+ esw_cfg->offload_flags = ((arg2 >> 1) & 0x7);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
new file mode 100644
index 000000000000..11f4df75e84c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -0,0 +1,1247 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2010 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "qlcnic.h"
+
+struct qlcnic_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m)
+#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
+
+static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
+ {"xmit_called",
+ QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)},
+ {"xmit_finished",
+ QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)},
+ {"rx_dropped",
+ QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
+ {"tx_dropped",
+ QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
+ {"csummed",
+ QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
+ {"rx_pkts",
+ QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
+ {"lro_pkts",
+ QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
+ {"rx_bytes",
+ QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
+ {"tx_bytes",
+ QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
+ {"lrobytes",
+ QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
+ {"lso_frames",
+ QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
+ {"xmit_on",
+ QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
+ {"xmit_off",
+ QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
+ {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
+ QLC_OFF(stats.skb_alloc_failure)},
+ {"null rxbuf",
+ QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
+ {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
+ QLC_OFF(stats.rx_dma_map_error)},
+ {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
+ QLC_OFF(stats.tx_dma_map_error)},
+
+};
+
+static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "rx unicast frames",
+ "rx multicast frames",
+ "rx broadcast frames",
+ "rx dropped frames",
+ "rx errors",
+ "rx local frames",
+ "rx numbytes",
+ "tx unicast frames",
+ "tx multicast frames",
+ "tx broadcast frames",
+ "tx dropped frames",
+ "tx errors",
+ "tx local frames",
+ "tx numbytes",
+};
+
+#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
+#define QLCNIC_DEVICE_STATS_LEN ARRAY_SIZE(qlcnic_device_gstrings_stats)
+
+static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register_Test_on_offline",
+ "Link_Test_on_offline",
+ "Interrupt_Test_offline",
+ "Internal_Loopback_offline",
+ "External_Loopback_offline"
+};
+
+#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
+
+#define QLCNIC_RING_REGS_COUNT 20
+#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32))
+#define QLCNIC_MAX_EEPROM_LEN 1024
+
+static const u32 diag_registers[] = {
+ CRB_CMDPEG_STATE,
+ CRB_RCVPEG_STATE,
+ CRB_XG_STATE_P3P,
+ CRB_FW_CAPABILITIES_1,
+ ISR_INT_STATE_REG,
+ QLCNIC_CRB_DRV_ACTIVE,
+ QLCNIC_CRB_DEV_STATE,
+ QLCNIC_CRB_DRV_STATE,
+ QLCNIC_CRB_DRV_SCRATCH,
+ QLCNIC_CRB_DEV_PARTITION_INFO,
+ QLCNIC_CRB_DRV_IDC_VER,
+ QLCNIC_PEG_ALIVE_COUNTER,
+ QLCNIC_PEG_HALT_STATUS1,
+ QLCNIC_PEG_HALT_STATUS2,
+ QLCNIC_CRB_PEG_NET_0+0x3c,
+ QLCNIC_CRB_PEG_NET_1+0x3c,
+ QLCNIC_CRB_PEG_NET_2+0x3c,
+ QLCNIC_CRB_PEG_NET_4+0x3c,
+ -1
+};
+
+#define QLCNIC_MGMT_API_VERSION 2
+#define QLCNIC_DEV_INFO_SIZE 1
+#define QLCNIC_ETHTOOL_REGS_VER 2
+static int qlcnic_get_regs_len(struct net_device *dev)
+{
+ return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN +
+ QLCNIC_DEV_INFO_SIZE + 1;
+}
+
+static int qlcnic_get_eeprom_len(struct net_device *dev)
+{
+ return QLCNIC_FLASH_TOTAL_SIZE;
+}
+
+static void
+qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 fw_major, fw_minor, fw_build;
+
+ fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+ sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
+
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, qlcnic_driver_name, 32);
+ strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32);
+}
+
+static int
+qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int check_sfp_module = 0;
+ u16 pcifn = adapter->ahw->pci_func;
+
+ /* read which mode */
+ if (adapter->ahw->port_type == QLCNIC_GBE) {
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+
+ ecmd->advertising = (ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full);
+
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
+ ecmd->duplex = adapter->link_duplex;
+ ecmd->autoneg = adapter->link_autoneg;
+
+ } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
+ u32 val;
+
+ val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
+ if (val == QLCNIC_PORT_MODE_802_3_AP) {
+ ecmd->supported = SUPPORTED_1000baseT_Full;
+ ecmd->advertising = ADVERTISED_1000baseT_Full;
+ } else {
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ }
+
+ if (netif_running(dev) && adapter->has_link_events) {
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
+ ecmd->autoneg = adapter->link_autoneg;
+ ecmd->duplex = adapter->link_duplex;
+ goto skip;
+ }
+
+ val = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn));
+ ethtool_cmd_speed_set(ecmd, P3P_LINK_SPEED_MHZ *
+ P3P_LINK_SPEED_VAL(pcifn, val));
+ ecmd->duplex = DUPLEX_FULL;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ } else
+ return -EIO;
+
+skip:
+ ecmd->phy_address = adapter->physical_port;
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ switch (adapter->ahw->board_type) {
+ case QLCNIC_BRDTYPE_P3P_REF_QG:
+ case QLCNIC_BRDTYPE_P3P_4_GB:
+ case QLCNIC_BRDTYPE_P3P_4_GB_MM:
+
+ ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ case QLCNIC_BRDTYPE_P3P_10G_CX4:
+ case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
+ case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->port = PORT_TP;
+ ecmd->autoneg = adapter->link_autoneg;
+ break;
+ case QLCNIC_BRDTYPE_P3P_IMEZ:
+ case QLCNIC_BRDTYPE_P3P_XG_LOM:
+ case QLCNIC_BRDTYPE_P3P_HMEZ:
+ ecmd->supported |= SUPPORTED_MII;
+ ecmd->advertising |= ADVERTISED_MII;
+ ecmd->port = PORT_MII;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ break;
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->supported |= SUPPORTED_TP;
+ check_sfp_module = netif_running(dev) &&
+ adapter->has_link_events;
+ case QLCNIC_BRDTYPE_P3P_10G_XFP:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ break;
+ case QLCNIC_BRDTYPE_P3P_10G_TP:
+ if (adapter->ahw->port_type == QLCNIC_XGBE) {
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
+ ecmd->advertising |=
+ (ADVERTISED_FIBRE | ADVERTISED_TP);
+ ecmd->port = PORT_FIBRE;
+ check_sfp_module = netif_running(dev) &&
+ adapter->has_link_events;
+ } else {
+ ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+ ecmd->advertising |=
+ (ADVERTISED_TP | ADVERTISED_Autoneg);
+ ecmd->port = PORT_TP;
+ }
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
+ adapter->ahw->board_type);
+ return -EIO;
+ }
+
+ if (check_sfp_module) {
+ switch (adapter->module_type) {
+ case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
+ case LINKEVENT_MODULE_OPTICAL_SRLR:
+ case LINKEVENT_MODULE_OPTICAL_LRM:
+ case LINKEVENT_MODULE_OPTICAL_SFP_1G:
+ ecmd->port = PORT_FIBRE;
+ break;
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
+ case LINKEVENT_MODULE_TWINAX:
+ ecmd->port = PORT_TP;
+ break;
+ default:
+ ecmd->port = PORT_OTHER;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ u32 config = 0;
+ u32 ret = 0;
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (adapter->ahw->port_type != QLCNIC_GBE)
+ return -EOPNOTSUPP;
+
+ /* read which mode */
+ if (ecmd->duplex)
+ config |= 0x1;
+
+ if (ecmd->autoneg)
+ config |= 0x2;
+
+ switch (ethtool_cmd_speed(ecmd)) {
+ case SPEED_10:
+ config |= (0 << 8);
+ break;
+ case SPEED_100:
+ config |= (1 << 8);
+ break;
+ case SPEED_1000:
+ config |= (10 << 8);
+ break;
+ default:
+ return -EIO;
+ }
+
+ ret = qlcnic_fw_cmd_set_port(adapter, config);
+
+ if (ret == QLCNIC_RCODE_NOT_SUPPORTED)
+ return -EOPNOTSUPP;
+ else if (ret)
+ return -EIO;
+
+ adapter->link_speed = ethtool_cmd_speed(ecmd);
+ adapter->link_duplex = ecmd->duplex;
+ adapter->link_autoneg = ecmd->autoneg;
+
+ if (!netif_running(dev))
+ return 0;
+
+ dev->netdev_ops->ndo_stop(dev);
+ return dev->netdev_ops->ndo_open(dev);
+}
+
+static void
+qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_sds_ring *sds_ring;
+ u32 *regs_buff = p;
+ int ring, i = 0, j = 0;
+
+ memset(p, 0, qlcnic_get_regs_len(dev));
+ regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) |
+ (adapter->ahw->revision_id << 16) | (adapter->pdev)->device;
+
+ regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
+ regs_buff[1] = QLCNIC_MGMT_API_VERSION;
+
+ for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
+ regs_buff[i] = QLCRD32(adapter, diag_registers[j]);
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return;
+
+ regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
+
+ regs_buff[i++] = 1; /* No. of tx ring */
+ regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
+ regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer);
+
+ regs_buff[i++] = 2; /* No. of rx ring */
+ regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer);
+ regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer);
+
+ regs_buff[i++] = adapter->max_sds_rings;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &(recv_ctx->sds_rings[ring]);
+ regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
+ }
+}
+
+static u32 qlcnic_test_link(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 val;
+
+ val = QLCRD32(adapter, CRB_XG_STATE_P3P);
+ val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val);
+ return (val == XG_LINK_UP_P3P) ? 0 : 1;
+}
+
+static int
+qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int offset;
+ int ret;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = (adapter->pdev)->vendor |
+ ((adapter->pdev)->device << 16);
+ offset = eeprom->offset;
+
+ ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
+ eeprom->len);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void
+qlcnic_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ ring->rx_pending = adapter->num_rxd;
+ ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
+ ring->tx_pending = adapter->num_txd;
+
+ ring->rx_max_pending = adapter->max_rxd;
+ ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd;
+ ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
+
+ ring->rx_mini_max_pending = 0;
+ ring->rx_mini_pending = 0;
+}
+
+static u32
+qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
+{
+ u32 num_desc;
+ num_desc = max(val, min);
+ num_desc = min(num_desc, max);
+ num_desc = roundup_pow_of_two(num_desc);
+
+ if (val != num_desc) {
+ printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n",
+ qlcnic_driver_name, r_name, num_desc, val);
+ }
+
+ return num_desc;
+}
+
+static int
+qlcnic_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u16 num_rxd, num_jumbo_rxd, num_txd;
+
+ if (ring->rx_mini_pending)
+ return -EOPNOTSUPP;
+
+ num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
+ MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx");
+
+ num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
+ MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd,
+ "rx jumbo");
+
+ num_txd = qlcnic_validate_ringparam(ring->tx_pending,
+ MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
+
+ if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd &&
+ num_jumbo_rxd == adapter->num_jumbo_rxd)
+ return 0;
+
+ adapter->num_rxd = num_rxd;
+ adapter->num_jumbo_rxd = num_jumbo_rxd;
+ adapter->num_txd = num_txd;
+
+ return qlcnic_reset_context(adapter);
+}
+
+static void qlcnic_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ channel->max_rx = rounddown_pow_of_two(min_t(int,
+ adapter->max_rx_ques, num_online_cpus()));
+ channel->max_tx = adapter->max_tx_ques;
+
+ channel->rx_count = adapter->max_sds_rings;
+ channel->tx_count = adapter->max_tx_ques;
+}
+
+static int qlcnic_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int err;
+
+ if (channel->other_count || channel->combined_count ||
+ channel->tx_count != channel->max_tx)
+ return -EINVAL;
+
+ err = qlcnic_validate_max_rss(dev, channel->max_rx, channel->rx_count);
+ if (err)
+ return err;
+
+ err = qlcnic_set_max_rss(adapter, channel->rx_count);
+ netdev_info(dev, "allocated 0x%x sds rings\n",
+ adapter->max_sds_rings);
+ return err;
+}
+
+static void
+qlcnic_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int port = adapter->physical_port;
+ __u32 val;
+
+ if (adapter->ahw->port_type == QLCNIC_GBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
+ return;
+ /* get flow control settings */
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
+ pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
+ switch (port) {
+ case 0:
+ pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
+ break;
+ case 1:
+ pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val));
+ break;
+ case 2:
+ pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val));
+ break;
+ case 3:
+ default:
+ pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val));
+ break;
+ }
+ } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
+ return;
+ pause->rx_pause = 1;
+ val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
+ if (port == 0)
+ pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
+ else
+ pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val));
+ } else {
+ dev_err(&netdev->dev, "Unknown board type: %x\n",
+ adapter->ahw->port_type);
+ }
+}
+
+static int
+qlcnic_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int port = adapter->physical_port;
+ __u32 val;
+
+ /* read mode */
+ if (adapter->ahw->port_type == QLCNIC_GBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
+ return -EIO;
+ /* set flow control */
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
+
+ if (pause->rx_pause)
+ qlcnic_gb_rx_flowctl(val);
+ else
+ qlcnic_gb_unset_rx_flowctl(val);
+
+ QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port),
+ val);
+ /* set autoneg */
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
+ switch (port) {
+ case 0:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb0_mask(val);
+ else
+ qlcnic_gb_set_gb0_mask(val);
+ break;
+ case 1:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb1_mask(val);
+ else
+ qlcnic_gb_set_gb1_mask(val);
+ break;
+ case 2:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb2_mask(val);
+ else
+ qlcnic_gb_set_gb2_mask(val);
+ break;
+ case 3:
+ default:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb3_mask(val);
+ else
+ qlcnic_gb_set_gb3_mask(val);
+ break;
+ }
+ QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val);
+ } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
+ if (!pause->rx_pause || pause->autoneg)
+ return -EOPNOTSUPP;
+
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
+ return -EIO;
+
+ val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
+ if (port == 0) {
+ if (pause->tx_pause)
+ qlcnic_xg_unset_xg0_mask(val);
+ else
+ qlcnic_xg_set_xg0_mask(val);
+ } else {
+ if (pause->tx_pause)
+ qlcnic_xg_unset_xg1_mask(val);
+ else
+ qlcnic_xg_set_xg1_mask(val);
+ }
+ QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val);
+ } else {
+ dev_err(&netdev->dev, "Unknown board type: %x\n",
+ adapter->ahw->port_type);
+ }
+ return 0;
+}
+
+static int qlcnic_reg_test(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 data_read;
+
+ data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
+ if ((data_read & 0xffff) != adapter->pdev->vendor)
+ return 1;
+
+ return 0;
+}
+
+static int qlcnic_get_sset_count(struct net_device *dev, int sset)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ switch (sset) {
+ case ETH_SS_TEST:
+ return QLCNIC_TEST_LEN;
+ case ETH_SS_STATS:
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ return QLCNIC_STATS_LEN + QLCNIC_DEVICE_STATS_LEN;
+ return QLCNIC_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int qlcnic_irq_test(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int max_sds_rings = adapter->max_sds_rings;
+ int ret;
+ struct qlcnic_cmd_args cmd;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
+ if (ret)
+ goto clear_it;
+
+ adapter->diag_cnt = 0;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = QLCNIC_CDRP_CMD_INTRPT_TEST;
+ cmd.req.arg1 = adapter->ahw->pci_func;
+ qlcnic_issue_cmd(adapter, &cmd);
+ ret = cmd.rsp.cmd;
+
+ if (ret)
+ goto done;
+
+ msleep(10);
+
+ ret = !adapter->diag_cnt;
+
+done:
+ qlcnic_diag_free_res(netdev, max_sds_rings);
+
+clear_it:
+ adapter->max_sds_rings = max_sds_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+#define QLCNIC_ILB_PKT_SIZE 64
+#define QLCNIC_NUM_ILB_PKT 16
+#define QLCNIC_ILB_MAX_RCV_LOOP 10
+
+static void qlcnic_create_loopback_buff(unsigned char *data, u8 mac[])
+{
+ unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
+
+ memset(data, 0x4e, QLCNIC_ILB_PKT_SIZE);
+
+ memcpy(data, mac, ETH_ALEN);
+ memcpy(data + ETH_ALEN, mac, ETH_ALEN);
+
+ memcpy(data + 2 * ETH_ALEN, random_data, sizeof(random_data));
+}
+
+int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[])
+{
+ unsigned char buff[QLCNIC_ILB_PKT_SIZE];
+ qlcnic_create_loopback_buff(buff, mac);
+ return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE);
+}
+
+static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
+{
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
+ struct sk_buff *skb;
+ int i, loop, cnt = 0;
+
+ for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
+ skb = dev_alloc_skb(QLCNIC_ILB_PKT_SIZE);
+ qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
+ skb_put(skb, QLCNIC_ILB_PKT_SIZE);
+
+ adapter->diag_cnt = 0;
+ qlcnic_xmit_frame(skb, adapter->netdev);
+
+ loop = 0;
+ do {
+ msleep(1);
+ qlcnic_process_rcv_ring_diag(sds_ring);
+ if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP)
+ break;
+ } while (!adapter->diag_cnt);
+
+ dev_kfree_skb_any(skb);
+
+ if (!adapter->diag_cnt)
+ QLCDB(adapter, DRV,
+ "LB Test: packet #%d was not received\n", i + 1);
+ else
+ cnt++;
+ }
+ if (cnt != i) {
+ dev_warn(&adapter->pdev->dev, "LB Test failed\n");
+ if (mode != QLCNIC_ILB_MODE) {
+ dev_warn(&adapter->pdev->dev,
+ "WARNING: Please make sure external"
+ "loopback connector is plugged in\n");
+ }
+ return -1;
+ }
+ return 0;
+}
+
+static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int max_sds_rings = adapter->max_sds_rings;
+ struct qlcnic_host_sds_ring *sds_ring;
+ int loop = 0;
+ int ret;
+
+ if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) {
+ netdev_info(netdev, "Firmware is not loopback test capable\n");
+ return -EOPNOTSUPP;
+ }
+
+ QLCDB(adapter, DRV, "%s loopback test in progress\n",
+ mode == QLCNIC_ILB_MODE ? "internal" : "external");
+ if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ netdev_warn(netdev, "Loopback test not supported for non "
+ "privilege function\n");
+ return 0;
+ }
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
+ if (ret)
+ goto clear_it;
+
+ sds_ring = &adapter->recv_ctx->sds_rings[0];
+
+ ret = qlcnic_set_lb_mode(adapter, mode);
+ if (ret)
+ goto free_res;
+
+ adapter->diag_cnt = 0;
+ do {
+ msleep(500);
+ qlcnic_process_rcv_ring_diag(sds_ring);
+ if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
+ netdev_info(netdev, "firmware didnt respond to loopback"
+ " configure request\n");
+ ret = -QLCNIC_FW_NOT_RESPOND;
+ goto free_res;
+ } else if (adapter->diag_cnt) {
+ ret = adapter->diag_cnt;
+ goto free_res;
+ }
+ } while (!QLCNIC_IS_LB_CONFIGURED(adapter->ahw->loopback_state));
+
+ ret = qlcnic_do_lb_test(adapter, mode);
+
+ qlcnic_clear_lb_mode(adapter);
+
+ free_res:
+ qlcnic_diag_free_res(netdev, max_sds_rings);
+
+ clear_it:
+ adapter->max_sds_rings = max_sds_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+static void
+qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
+ u64 *data)
+{
+ memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN);
+
+ data[0] = qlcnic_reg_test(dev);
+ if (data[0])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ data[1] = (u64) qlcnic_test_link(dev);
+ if (data[1])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (eth_test->flags & ETH_TEST_FL_OFFLINE) {
+ data[2] = qlcnic_irq_test(dev);
+ if (data[2])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ data[3] = qlcnic_loopback_test(dev, QLCNIC_ILB_MODE);
+ if (data[3])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE);
+ if (data[4])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+ }
+ }
+}
+
+static void
+qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int index, i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *qlcnic_gstrings_test,
+ QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (index = 0; index < QLCNIC_STATS_LEN; index++) {
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_gstrings_stats[index].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return;
+ for (i = 0; i < QLCNIC_DEVICE_STATS_LEN; index++, i++) {
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_device_gstrings_stats[i],
+ ETH_GSTRING_LEN);
+ }
+ }
+}
+
+#define QLCNIC_FILL_ESWITCH_STATS(VAL1) \
+ (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) ? 0 : VAL1)
+
+static void
+qlcnic_fill_device_stats(int *index, u64 *data,
+ struct __qlcnic_esw_statistics *stats)
+{
+ int ind = *index;
+
+ data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->unicast_frames);
+ data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->multicast_frames);
+ data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->broadcast_frames);
+ data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->dropped_frames);
+ data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->errors);
+ data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->local_frames);
+ data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->numbytes);
+
+ *index = ind;
+}
+
+static void
+qlcnic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 * data)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ struct qlcnic_esw_statistics port_stats;
+ int index, ret;
+
+ for (index = 0; index < QLCNIC_STATS_LEN; index++) {
+ char *p =
+ (char *)adapter +
+ qlcnic_gstrings_stats[index].stat_offset;
+ data[index] =
+ (qlcnic_gstrings_stats[index].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p:(*(u32 *)p);
+ }
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return;
+
+ memset(&port_stats, 0, sizeof(struct qlcnic_esw_statistics));
+ ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
+ QLCNIC_QUERY_RX_COUNTER, &port_stats.rx);
+ if (ret)
+ return;
+
+ qlcnic_fill_device_stats(&index, data, &port_stats.rx);
+
+ ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
+ QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
+ if (ret)
+ return;
+
+ qlcnic_fill_device_stats(&index, data, &port_stats.tx);
+}
+
+static int qlcnic_set_led(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int max_sds_rings = adapter->max_sds_rings;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state))
+ return -EBUSY;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST)) {
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return -EIO;
+ }
+ set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
+ }
+
+ if (adapter->nic_ops->config_led(adapter, 1, 0xf) == 0)
+ return 0;
+
+ dev_err(&adapter->pdev->dev,
+ "Failed to set LED blink state.\n");
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ if (adapter->nic_ops->config_led(adapter, 0, 0xf))
+ dev_err(&adapter->pdev->dev,
+ "Failed to reset LED blink state.\n");
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) {
+ qlcnic_diag_free_res(dev, max_sds_rings);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ }
+
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
+ return -EIO;
+}
+
+static void
+qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 wol_cfg;
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ if (wol_cfg & (1UL << adapter->portnum))
+ wol->supported |= WAKE_MAGIC;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ if (wol_cfg & (1UL << adapter->portnum))
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+static int
+qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 wol_cfg;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EOPNOTSUPP;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ if (!(wol_cfg & (1 << adapter->portnum)))
+ return -EOPNOTSUPP;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ if (wol->wolopts & WAKE_MAGIC)
+ wol_cfg |= 1UL << adapter->portnum;
+ else
+ wol_cfg &= ~(1UL << adapter->portnum);
+
+ QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg);
+
+ return 0;
+}
+
+/*
+ * Set the coalescing parameters. Currently only normal is supported.
+ * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
+ * firmware coalescing to default.
+ */
+static int qlcnic_set_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return -EINVAL;
+
+ /*
+ * Return Error if unsupported values or
+ * unsupported parameters are set.
+ */
+ if (ethcoal->rx_coalesce_usecs > 0xffff ||
+ ethcoal->rx_max_coalesced_frames > 0xffff ||
+ ethcoal->tx_coalesce_usecs ||
+ ethcoal->tx_max_coalesced_frames ||
+ ethcoal->rx_coalesce_usecs_irq ||
+ ethcoal->rx_max_coalesced_frames_irq ||
+ ethcoal->tx_coalesce_usecs_irq ||
+ ethcoal->tx_max_coalesced_frames_irq ||
+ ethcoal->stats_block_coalesce_usecs ||
+ ethcoal->use_adaptive_rx_coalesce ||
+ ethcoal->use_adaptive_tx_coalesce ||
+ ethcoal->pkt_rate_low ||
+ ethcoal->rx_coalesce_usecs_low ||
+ ethcoal->rx_max_coalesced_frames_low ||
+ ethcoal->tx_coalesce_usecs_low ||
+ ethcoal->tx_max_coalesced_frames_low ||
+ ethcoal->pkt_rate_high ||
+ ethcoal->rx_coalesce_usecs_high ||
+ ethcoal->rx_max_coalesced_frames_high ||
+ ethcoal->tx_coalesce_usecs_high ||
+ ethcoal->tx_max_coalesced_frames_high)
+ return -EINVAL;
+
+ if (!ethcoal->rx_coalesce_usecs ||
+ !ethcoal->rx_max_coalesced_frames) {
+ adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
+ adapter->ahw->coal.rx_time_us =
+ QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
+ adapter->ahw->coal.rx_packets =
+ QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
+ } else {
+ adapter->ahw->coal.flag = 0;
+ adapter->ahw->coal.rx_time_us = ethcoal->rx_coalesce_usecs;
+ adapter->ahw->coal.rx_packets =
+ ethcoal->rx_max_coalesced_frames;
+ }
+
+ qlcnic_config_intr_coalesce(adapter);
+
+ return 0;
+}
+
+static int qlcnic_get_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EINVAL;
+
+ ethcoal->rx_coalesce_usecs = adapter->ahw->coal.rx_time_us;
+ ethcoal->rx_max_coalesced_frames = adapter->ahw->coal.rx_packets;
+
+ return 0;
+}
+
+static u32 qlcnic_get_msglevel(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ adapter->msg_enable = msglvl;
+}
+
+static int
+qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+
+ if (fw_dump->clr)
+ dump->len = fw_dump->tmpl_hdr->size + fw_dump->size;
+ else
+ dump->len = 0;
+ dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
+ dump->version = adapter->fw_version;
+ return 0;
+}
+
+static int
+qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
+ void *buffer)
+{
+ int i, copy_sz;
+ u32 *hdr_ptr, *data;
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+
+ if (!fw_dump->clr) {
+ netdev_info(netdev, "Dump not available\n");
+ qlcnic_api_unlock(adapter);
+ return -EINVAL;
+ }
+ /* Copy template header first */
+ copy_sz = fw_dump->tmpl_hdr->size;
+ hdr_ptr = (u32 *) fw_dump->tmpl_hdr;
+ data = buffer;
+ for (i = 0; i < copy_sz/sizeof(u32); i++)
+ *data++ = cpu_to_le32(*hdr_ptr++);
+
+ /* Copy captured dump data */
+ memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size);
+ dump->len = copy_sz + fw_dump->size;
+ dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
+
+ /* Free dump area once data has been captured */
+ vfree(fw_dump->data);
+ fw_dump->data = NULL;
+ fw_dump->clr = 0;
+
+ return 0;
+}
+
+static int
+qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
+{
+ int ret = 0;
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+
+ switch (val->flag) {
+ case QLCNIC_FORCE_FW_DUMP_KEY:
+ if (!fw_dump->enable) {
+ netdev_info(netdev, "FW dump not enabled\n");
+ return ret;
+ }
+ if (fw_dump->clr) {
+ dev_info(&adapter->pdev->dev,
+ "Previous dump not cleared, not forcing dump\n");
+ return ret;
+ }
+ netdev_info(netdev, "Forcing a FW dump\n");
+ qlcnic_dev_request_reset(adapter);
+ break;
+ case QLCNIC_DISABLE_FW_DUMP:
+ if (fw_dump->enable) {
+ netdev_info(netdev, "Disabling FW dump\n");
+ fw_dump->enable = 0;
+ }
+ break;
+ case QLCNIC_ENABLE_FW_DUMP:
+ if (!fw_dump->enable && fw_dump->tmpl_hdr) {
+ netdev_info(netdev, "Enabling FW dump\n");
+ fw_dump->enable = 1;
+ }
+ break;
+ case QLCNIC_FORCE_FW_RESET:
+ netdev_info(netdev, "Forcing a FW reset\n");
+ qlcnic_dev_request_reset(adapter);
+ adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
+ break;
+ default:
+ if (val->flag > QLCNIC_DUMP_MASK_MAX ||
+ val->flag < QLCNIC_DUMP_MASK_MIN) {
+ netdev_info(netdev,
+ "Invalid dump level: 0x%x\n", val->flag);
+ ret = -EINVAL;
+ goto out;
+ }
+ fw_dump->tmpl_hdr->drv_cap_mask = val->flag & 0xff;
+ netdev_info(netdev, "Driver mask changed to: 0x%x\n",
+ fw_dump->tmpl_hdr->drv_cap_mask);
+ }
+out:
+ return ret;
+}
+
+const struct ethtool_ops qlcnic_ethtool_ops = {
+ .get_settings = qlcnic_get_settings,
+ .set_settings = qlcnic_set_settings,
+ .get_drvinfo = qlcnic_get_drvinfo,
+ .get_regs_len = qlcnic_get_regs_len,
+ .get_regs = qlcnic_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = qlcnic_get_eeprom_len,
+ .get_eeprom = qlcnic_get_eeprom,
+ .get_ringparam = qlcnic_get_ringparam,
+ .set_ringparam = qlcnic_set_ringparam,
+ .get_channels = qlcnic_get_channels,
+ .set_channels = qlcnic_set_channels,
+ .get_pauseparam = qlcnic_get_pauseparam,
+ .set_pauseparam = qlcnic_set_pauseparam,
+ .get_wol = qlcnic_get_wol,
+ .set_wol = qlcnic_set_wol,
+ .self_test = qlcnic_diag_test,
+ .get_strings = qlcnic_get_strings,
+ .get_ethtool_stats = qlcnic_get_ethtool_stats,
+ .get_sset_count = qlcnic_get_sset_count,
+ .get_coalesce = qlcnic_get_intr_coalesce,
+ .set_coalesce = qlcnic_set_intr_coalesce,
+ .set_phys_id = qlcnic_set_led,
+ .set_msglevel = qlcnic_set_msglevel,
+ .get_msglevel = qlcnic_get_msglevel,
+ .get_dump_flag = qlcnic_get_dump_flag,
+ .get_dump_data = qlcnic_get_dump_data,
+ .set_dump = qlcnic_set_dump,
+};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
new file mode 100644
index 000000000000..92bc8ce9b287
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -0,0 +1,1025 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2010 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef __QLCNIC_HDR_H_
+#define __QLCNIC_HDR_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/*
+ * The basic unit of access when reading/writing control registers.
+ */
+
+enum {
+ QLCNIC_HW_H0_CH_HUB_ADR = 0x05,
+ QLCNIC_HW_H1_CH_HUB_ADR = 0x0E,
+ QLCNIC_HW_H2_CH_HUB_ADR = 0x03,
+ QLCNIC_HW_H3_CH_HUB_ADR = 0x01,
+ QLCNIC_HW_H4_CH_HUB_ADR = 0x06,
+ QLCNIC_HW_H5_CH_HUB_ADR = 0x07,
+ QLCNIC_HW_H6_CH_HUB_ADR = 0x08
+};
+
+/* Hub 0 */
+enum {
+ QLCNIC_HW_MN_CRB_AGT_ADR = 0x15,
+ QLCNIC_HW_MS_CRB_AGT_ADR = 0x25
+};
+
+/* Hub 1 */
+enum {
+ QLCNIC_HW_PS_CRB_AGT_ADR = 0x73,
+ QLCNIC_HW_SS_CRB_AGT_ADR = 0x20,
+ QLCNIC_HW_RPMX3_CRB_AGT_ADR = 0x0b,
+ QLCNIC_HW_QMS_CRB_AGT_ADR = 0x00,
+ QLCNIC_HW_SQGS0_CRB_AGT_ADR = 0x01,
+ QLCNIC_HW_SQGS1_CRB_AGT_ADR = 0x02,
+ QLCNIC_HW_SQGS2_CRB_AGT_ADR = 0x03,
+ QLCNIC_HW_SQGS3_CRB_AGT_ADR = 0x04,
+ QLCNIC_HW_C2C0_CRB_AGT_ADR = 0x58,
+ QLCNIC_HW_C2C1_CRB_AGT_ADR = 0x59,
+ QLCNIC_HW_C2C2_CRB_AGT_ADR = 0x5a,
+ QLCNIC_HW_RPMX2_CRB_AGT_ADR = 0x0a,
+ QLCNIC_HW_RPMX4_CRB_AGT_ADR = 0x0c,
+ QLCNIC_HW_RPMX7_CRB_AGT_ADR = 0x0f,
+ QLCNIC_HW_RPMX9_CRB_AGT_ADR = 0x12,
+ QLCNIC_HW_SMB_CRB_AGT_ADR = 0x18
+};
+
+/* Hub 2 */
+enum {
+ QLCNIC_HW_NIU_CRB_AGT_ADR = 0x31,
+ QLCNIC_HW_I2C0_CRB_AGT_ADR = 0x19,
+ QLCNIC_HW_I2C1_CRB_AGT_ADR = 0x29,
+
+ QLCNIC_HW_SN_CRB_AGT_ADR = 0x10,
+ QLCNIC_HW_I2Q_CRB_AGT_ADR = 0x20,
+ QLCNIC_HW_LPC_CRB_AGT_ADR = 0x22,
+ QLCNIC_HW_ROMUSB_CRB_AGT_ADR = 0x21,
+ QLCNIC_HW_QM_CRB_AGT_ADR = 0x66,
+ QLCNIC_HW_SQG0_CRB_AGT_ADR = 0x60,
+ QLCNIC_HW_SQG1_CRB_AGT_ADR = 0x61,
+ QLCNIC_HW_SQG2_CRB_AGT_ADR = 0x62,
+ QLCNIC_HW_SQG3_CRB_AGT_ADR = 0x63,
+ QLCNIC_HW_RPMX1_CRB_AGT_ADR = 0x09,
+ QLCNIC_HW_RPMX5_CRB_AGT_ADR = 0x0d,
+ QLCNIC_HW_RPMX6_CRB_AGT_ADR = 0x0e,
+ QLCNIC_HW_RPMX8_CRB_AGT_ADR = 0x11
+};
+
+/* Hub 3 */
+enum {
+ QLCNIC_HW_PH_CRB_AGT_ADR = 0x1A,
+ QLCNIC_HW_SRE_CRB_AGT_ADR = 0x50,
+ QLCNIC_HW_EG_CRB_AGT_ADR = 0x51,
+ QLCNIC_HW_RPMX0_CRB_AGT_ADR = 0x08
+};
+
+/* Hub 4 */
+enum {
+ QLCNIC_HW_PEGN0_CRB_AGT_ADR = 0x40,
+ QLCNIC_HW_PEGN1_CRB_AGT_ADR,
+ QLCNIC_HW_PEGN2_CRB_AGT_ADR,
+ QLCNIC_HW_PEGN3_CRB_AGT_ADR,
+ QLCNIC_HW_PEGNI_CRB_AGT_ADR,
+ QLCNIC_HW_PEGND_CRB_AGT_ADR,
+ QLCNIC_HW_PEGNC_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR0_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR1_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR2_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR3_CRB_AGT_ADR,
+ QLCNIC_HW_PEGN4_CRB_AGT_ADR
+};
+
+/* Hub 5 */
+enum {
+ QLCNIC_HW_PEGS0_CRB_AGT_ADR = 0x40,
+ QLCNIC_HW_PEGS1_CRB_AGT_ADR,
+ QLCNIC_HW_PEGS2_CRB_AGT_ADR,
+ QLCNIC_HW_PEGS3_CRB_AGT_ADR,
+ QLCNIC_HW_PEGSI_CRB_AGT_ADR,
+ QLCNIC_HW_PEGSD_CRB_AGT_ADR,
+ QLCNIC_HW_PEGSC_CRB_AGT_ADR
+};
+
+/* Hub 6 */
+enum {
+ QLCNIC_HW_CAS0_CRB_AGT_ADR = 0x46,
+ QLCNIC_HW_CAS1_CRB_AGT_ADR = 0x47,
+ QLCNIC_HW_CAS2_CRB_AGT_ADR = 0x48,
+ QLCNIC_HW_CAS3_CRB_AGT_ADR = 0x49,
+ QLCNIC_HW_NCM_CRB_AGT_ADR = 0x16,
+ QLCNIC_HW_TMR_CRB_AGT_ADR = 0x17,
+ QLCNIC_HW_XDMA_CRB_AGT_ADR = 0x05,
+ QLCNIC_HW_OCM0_CRB_AGT_ADR = 0x06,
+ QLCNIC_HW_OCM1_CRB_AGT_ADR = 0x07
+};
+
+/* Floaters - non existent modules */
+#define QLCNIC_HW_EFC_RPMX0_CRB_AGT_ADR 0x67
+
+/* This field defines PCI/X adr [25:20] of agents on the CRB */
+enum {
+ QLCNIC_HW_PX_MAP_CRB_PH = 0,
+ QLCNIC_HW_PX_MAP_CRB_PS,
+ QLCNIC_HW_PX_MAP_CRB_MN,
+ QLCNIC_HW_PX_MAP_CRB_MS,
+ QLCNIC_HW_PX_MAP_CRB_PGR1,
+ QLCNIC_HW_PX_MAP_CRB_SRE,
+ QLCNIC_HW_PX_MAP_CRB_NIU,
+ QLCNIC_HW_PX_MAP_CRB_QMN,
+ QLCNIC_HW_PX_MAP_CRB_SQN0,
+ QLCNIC_HW_PX_MAP_CRB_SQN1,
+ QLCNIC_HW_PX_MAP_CRB_SQN2,
+ QLCNIC_HW_PX_MAP_CRB_SQN3,
+ QLCNIC_HW_PX_MAP_CRB_QMS,
+ QLCNIC_HW_PX_MAP_CRB_SQS0,
+ QLCNIC_HW_PX_MAP_CRB_SQS1,
+ QLCNIC_HW_PX_MAP_CRB_SQS2,
+ QLCNIC_HW_PX_MAP_CRB_SQS3,
+ QLCNIC_HW_PX_MAP_CRB_PGN0,
+ QLCNIC_HW_PX_MAP_CRB_PGN1,
+ QLCNIC_HW_PX_MAP_CRB_PGN2,
+ QLCNIC_HW_PX_MAP_CRB_PGN3,
+ QLCNIC_HW_PX_MAP_CRB_PGND,
+ QLCNIC_HW_PX_MAP_CRB_PGNI,
+ QLCNIC_HW_PX_MAP_CRB_PGS0,
+ QLCNIC_HW_PX_MAP_CRB_PGS1,
+ QLCNIC_HW_PX_MAP_CRB_PGS2,
+ QLCNIC_HW_PX_MAP_CRB_PGS3,
+ QLCNIC_HW_PX_MAP_CRB_PGSD,
+ QLCNIC_HW_PX_MAP_CRB_PGSI,
+ QLCNIC_HW_PX_MAP_CRB_SN,
+ QLCNIC_HW_PX_MAP_CRB_PGR2,
+ QLCNIC_HW_PX_MAP_CRB_EG,
+ QLCNIC_HW_PX_MAP_CRB_PH2,
+ QLCNIC_HW_PX_MAP_CRB_PS2,
+ QLCNIC_HW_PX_MAP_CRB_CAM,
+ QLCNIC_HW_PX_MAP_CRB_CAS0,
+ QLCNIC_HW_PX_MAP_CRB_CAS1,
+ QLCNIC_HW_PX_MAP_CRB_CAS2,
+ QLCNIC_HW_PX_MAP_CRB_C2C0,
+ QLCNIC_HW_PX_MAP_CRB_C2C1,
+ QLCNIC_HW_PX_MAP_CRB_TIMR,
+ QLCNIC_HW_PX_MAP_CRB_PGR3,
+ QLCNIC_HW_PX_MAP_CRB_RPMX1,
+ QLCNIC_HW_PX_MAP_CRB_RPMX2,
+ QLCNIC_HW_PX_MAP_CRB_RPMX3,
+ QLCNIC_HW_PX_MAP_CRB_RPMX4,
+ QLCNIC_HW_PX_MAP_CRB_RPMX5,
+ QLCNIC_HW_PX_MAP_CRB_RPMX6,
+ QLCNIC_HW_PX_MAP_CRB_RPMX7,
+ QLCNIC_HW_PX_MAP_CRB_XDMA,
+ QLCNIC_HW_PX_MAP_CRB_I2Q,
+ QLCNIC_HW_PX_MAP_CRB_ROMUSB,
+ QLCNIC_HW_PX_MAP_CRB_CAS3,
+ QLCNIC_HW_PX_MAP_CRB_RPMX0,
+ QLCNIC_HW_PX_MAP_CRB_RPMX8,
+ QLCNIC_HW_PX_MAP_CRB_RPMX9,
+ QLCNIC_HW_PX_MAP_CRB_OCM0,
+ QLCNIC_HW_PX_MAP_CRB_OCM1,
+ QLCNIC_HW_PX_MAP_CRB_SMB,
+ QLCNIC_HW_PX_MAP_CRB_I2C0,
+ QLCNIC_HW_PX_MAP_CRB_I2C1,
+ QLCNIC_HW_PX_MAP_CRB_LPC,
+ QLCNIC_HW_PX_MAP_CRB_PGNC,
+ QLCNIC_HW_PX_MAP_CRB_PGR0
+};
+
+#define BIT_0 0x1
+#define BIT_1 0x2
+#define BIT_2 0x4
+#define BIT_3 0x8
+#define BIT_4 0x10
+#define BIT_5 0x20
+#define BIT_6 0x40
+#define BIT_7 0x80
+#define BIT_8 0x100
+#define BIT_9 0x200
+#define BIT_10 0x400
+#define BIT_11 0x800
+#define BIT_12 0x1000
+#define BIT_13 0x2000
+#define BIT_14 0x4000
+#define BIT_15 0x8000
+#define BIT_16 0x10000
+#define BIT_17 0x20000
+#define BIT_18 0x40000
+#define BIT_19 0x80000
+#define BIT_20 0x100000
+#define BIT_21 0x200000
+#define BIT_22 0x400000
+#define BIT_23 0x800000
+#define BIT_24 0x1000000
+#define BIT_25 0x2000000
+#define BIT_26 0x4000000
+#define BIT_27 0x8000000
+#define BIT_28 0x10000000
+#define BIT_29 0x20000000
+#define BIT_30 0x40000000
+#define BIT_31 0x80000000
+
+/* This field defines CRB adr [31:20] of the agents */
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \
+ ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MN_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PH \
+ ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_PH_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_MS \
+ ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MS_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PS \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_PS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SS \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMS \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_QMS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS0 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS1 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS2 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS3 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C0 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C1 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX4_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX7_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX9_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SMB \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SMB_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_NIU \
+ ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_NIU_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0 \
+ ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1 \
+ ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C1_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SRE \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SRE_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_EG \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_EG_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMN \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_QM_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX5_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX6_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX8_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS0 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS1 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS2 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS3 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS3_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNI_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGND \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGND_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN4_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNC_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR0 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR1 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR2 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR3 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR3_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSI_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSD \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSD_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSC \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSC_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAM \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_NCM_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_TMR_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_XDMA_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SN \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_SN_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_I2Q_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_ROMUSB_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0 \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM1 \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_LPC \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_LPC_CRB_AGT_ADR)
+
+#define QLCNIC_SRE_MISC (QLCNIC_CRB_SRE + 0x0002c)
+
+#define QLCNIC_I2Q_CLR_PCI_HI (QLCNIC_CRB_I2Q + 0x00034)
+
+#define ROMUSB_GLB (QLCNIC_CRB_ROMUSB + 0x00000)
+#define ROMUSB_ROM (QLCNIC_CRB_ROMUSB + 0x10000)
+
+#define QLCNIC_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
+#define QLCNIC_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
+#define QLCNIC_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c)
+#define QLCNIC_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
+#define QLCNIC_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044)
+#define QLCNIC_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
+#define QLCNIC_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8)
+
+#define QLCNIC_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n)))
+
+#define QLCNIC_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
+#define QLCNIC_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
+#define QLCNIC_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
+#define QLCNIC_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
+#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
+#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
+
+/* Lock IDs for ROM lock */
+#define ROM_LOCK_DRIVER 0x0d417340
+
+/******************************************************************************
+*
+* Definitions specific to M25P flash
+*
+*******************************************************************************
+*/
+
+/* all are 1MB windows */
+
+#define QLCNIC_PCI_CRB_WINDOWSIZE 0x00100000
+#define QLCNIC_PCI_CRB_WINDOW(A) \
+ (QLCNIC_PCI_CRBSPACE + (A)*QLCNIC_PCI_CRB_WINDOWSIZE)
+
+#define QLCNIC_CRB_NIU QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_NIU)
+#define QLCNIC_CRB_SRE QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE)
+#define QLCNIC_CRB_ROMUSB \
+ QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB)
+#define QLCNIC_CRB_I2Q QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q)
+#define QLCNIC_CRB_I2C0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0)
+#define QLCNIC_CRB_SMB QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB)
+#define QLCNIC_CRB_MAX QLCNIC_PCI_CRB_WINDOW(64)
+
+#define QLCNIC_CRB_PCIX_HOST QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH)
+#define QLCNIC_CRB_PCIX_HOST2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH2)
+#define QLCNIC_CRB_PEG_NET_0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN0)
+#define QLCNIC_CRB_PEG_NET_1 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN1)
+#define QLCNIC_CRB_PEG_NET_2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN2)
+#define QLCNIC_CRB_PEG_NET_3 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN3)
+#define QLCNIC_CRB_PEG_NET_4 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SQS2)
+#define QLCNIC_CRB_PEG_NET_D QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGND)
+#define QLCNIC_CRB_PEG_NET_I QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGNI)
+#define QLCNIC_CRB_DDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_MN)
+#define QLCNIC_CRB_QDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SN)
+
+#define QLCNIC_CRB_PCIX_MD QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PS)
+#define QLCNIC_CRB_PCIE QLCNIC_CRB_PCIX_MD
+
+#define ISR_INT_VECTOR (QLCNIC_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_MASK_SLOW (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_TARGET_STATUS (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_MASK (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK))
+#define ISR_INT_TARGET_STATUS_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
+#define ISR_INT_TARGET_MASK_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
+#define ISR_INT_TARGET_STATUS_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
+#define ISR_INT_TARGET_MASK_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
+#define ISR_INT_TARGET_STATUS_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
+#define ISR_INT_TARGET_MASK_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
+#define ISR_INT_TARGET_STATUS_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
+#define ISR_INT_TARGET_MASK_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
+#define ISR_INT_TARGET_STATUS_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
+#define ISR_INT_TARGET_MASK_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
+#define ISR_INT_TARGET_STATUS_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
+#define ISR_INT_TARGET_MASK_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
+#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
+#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
+
+#define QLCNIC_PCI_MN_2M (0)
+#define QLCNIC_PCI_MS_2M (0x80000)
+#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
+#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
+#define QLCNIC_PCI_CAMQM (0x04800000UL)
+#define QLCNIC_PCI_CAMQM_END (0x04800800UL)
+#define QLCNIC_PCI_2MB_SIZE (0x00200000UL)
+#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
+
+#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
+
+#define QLCNIC_ADDR_DDR_NET (0x0000000000000000ULL)
+#define QLCNIC_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+#define QLCNIC_ADDR_OCM0 (0x0000000200000000ULL)
+#define QLCNIC_ADDR_OCM0_MAX (0x00000002000fffffULL)
+#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL)
+#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL)
+#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL)
+#define QLCNIC_ADDR_QDR_NET_MAX (0x0000000307ffffffULL)
+
+/*
+ * Register offsets for MN
+ */
+#define QLCNIC_MIU_CONTROL (0x000)
+#define QLCNIC_MIU_MN_CONTROL (QLCNIC_CRB_DDR_NET+QLCNIC_MIU_CONTROL)
+
+/* 200ms delay in each loop */
+#define QLCNIC_NIU_PHY_WAITLEN 200000
+/* 10 seconds before we give up */
+#define QLCNIC_NIU_PHY_WAITMAX 50
+#define QLCNIC_NIU_MAX_GBE_PORTS 4
+#define QLCNIC_NIU_MAX_XG_PORTS 2
+
+#define QLCNIC_NIU_MODE (QLCNIC_CRB_NIU + 0x00000)
+#define QLCNIC_NIU_GB_PAUSE_CTL (QLCNIC_CRB_NIU + 0x0030c)
+#define QLCNIC_NIU_XG_PAUSE_CTL (QLCNIC_CRB_NIU + 0x00098)
+
+#define QLCNIC_NIU_GB_MAC_CONFIG_0(I) \
+ (QLCNIC_CRB_NIU + 0x30000 + (I)*0x10000)
+#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \
+ (QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000)
+
+
+#define TEST_AGT_CTRL (0x00)
+
+#define TA_CTL_START BIT_0
+#define TA_CTL_ENABLE BIT_1
+#define TA_CTL_WRITE BIT_2
+#define TA_CTL_BUSY BIT_3
+
+/*
+ * Register offsets for MN
+ */
+#define MIU_TEST_AGT_BASE (0x90)
+
+#define MIU_TEST_AGT_ADDR_LO (0x04)
+#define MIU_TEST_AGT_ADDR_HI (0x08)
+#define MIU_TEST_AGT_WRDATA_LO (0x10)
+#define MIU_TEST_AGT_WRDATA_HI (0x14)
+#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20)
+#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24)
+#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1)))
+#define MIU_TEST_AGT_RDDATA_LO (0x18)
+#define MIU_TEST_AGT_RDDATA_HI (0x1c)
+#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28)
+#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c)
+#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1)))
+
+#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
+#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
+
+/*
+ * Register offsets for MS
+ */
+#define SIU_TEST_AGT_BASE (0x60)
+
+#define SIU_TEST_AGT_ADDR_LO (0x04)
+#define SIU_TEST_AGT_ADDR_HI (0x18)
+#define SIU_TEST_AGT_WRDATA_LO (0x08)
+#define SIU_TEST_AGT_WRDATA_HI (0x0c)
+#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i)))
+#define SIU_TEST_AGT_RDDATA_LO (0x10)
+#define SIU_TEST_AGT_RDDATA_HI (0x14)
+#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i)))
+
+#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
+#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
+
+/* XG Link status */
+#define XG_LINK_UP 0x10
+#define XG_LINK_DOWN 0x20
+
+#define XG_LINK_UP_P3P 0x01
+#define XG_LINK_DOWN_P3P 0x02
+#define XG_LINK_STATE_P3P_MASK 0xf
+#define XG_LINK_STATE_P3P(pcifn, val) \
+ (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3P_MASK)
+
+#define P3P_LINK_SPEED_MHZ 100
+#define P3P_LINK_SPEED_MASK 0xff
+#define P3P_LINK_SPEED_REG(pcifn) \
+ (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
+#define P3P_LINK_SPEED_VAL(pcifn, reg) \
+ (((reg) >> (8 * ((pcifn) & 0x3))) & P3P_LINK_SPEED_MASK)
+
+#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000)
+#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg))
+#define QLCNIC_FW_VERSION_MAJOR (QLCNIC_CAM_RAM(0x150))
+#define QLCNIC_FW_VERSION_MINOR (QLCNIC_CAM_RAM(0x154))
+#define QLCNIC_FW_VERSION_SUB (QLCNIC_CAM_RAM(0x158))
+#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100))
+#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120))
+#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124))
+
+#define NIC_CRB_BASE (QLCNIC_CAM_RAM(0x200))
+#define NIC_CRB_BASE_2 (QLCNIC_CAM_RAM(0x700))
+#define QLCNIC_REG(X) (NIC_CRB_BASE+(X))
+#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X))
+
+#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18))
+#define QLCNIC_ARG1_CRB_OFFSET (QLCNIC_REG(0x1c))
+#define QLCNIC_ARG2_CRB_OFFSET (QLCNIC_REG(0x20))
+#define QLCNIC_ARG3_CRB_OFFSET (QLCNIC_REG(0x24))
+#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28))
+
+#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50))
+#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c))
+
+#define CRB_XG_STATE_P3P (QLCNIC_REG(0x98))
+#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
+#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec))
+
+#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4))
+
+#define CRB_V2P_0 (QLCNIC_REG(0x290))
+#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
+#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
+
+#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
+#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
+
+/*
+ * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
+ * which can be read by the Phantom host to get producer/consumer indexes from
+ * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
+ * registers will be used for the addresses of the ring's shared memory
+ * on the Phantom.
+ */
+
+#define qlcnic_get_temp_val(x) ((x) >> 16)
+#define qlcnic_get_temp_state(x) ((x) & 0xffff)
+#define qlcnic_encode_temp(val, state) (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+ QLCNIC_TEMP_NORMAL = 0x1, /* Normal operating range */
+ QLCNIC_TEMP_WARN, /* Sound alert, temperature getting high */
+ QLCNIC_TEMP_PANIC /* Fatal error, hardware has shut down. */
+};
+
+
+/* Lock IDs for PHY lock */
+#define PHY_LOCK_DRIVER 0x44524956
+
+/* Used for PS PCI Memory access */
+#define PCIX_PS_OP_ADDR_LO (0x10000)
+/* via CRB (PS side only) */
+#define PCIX_PS_OP_ADDR_HI (0x10004)
+
+#define PCIX_INT_VECTOR (0x10100)
+#define PCIX_INT_MASK (0x10104)
+
+#define PCIX_OCM_WINDOW (0x10800)
+#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x4 * (func))
+
+#define PCIX_TARGET_STATUS (0x10118)
+#define PCIX_TARGET_STATUS_F1 (0x10160)
+#define PCIX_TARGET_STATUS_F2 (0x10164)
+#define PCIX_TARGET_STATUS_F3 (0x10168)
+#define PCIX_TARGET_STATUS_F4 (0x10360)
+#define PCIX_TARGET_STATUS_F5 (0x10364)
+#define PCIX_TARGET_STATUS_F6 (0x10368)
+#define PCIX_TARGET_STATUS_F7 (0x1036c)
+
+#define PCIX_TARGET_MASK (0x10128)
+#define PCIX_TARGET_MASK_F1 (0x10170)
+#define PCIX_TARGET_MASK_F2 (0x10174)
+#define PCIX_TARGET_MASK_F3 (0x10178)
+#define PCIX_TARGET_MASK_F4 (0x10370)
+#define PCIX_TARGET_MASK_F5 (0x10374)
+#define PCIX_TARGET_MASK_F6 (0x10378)
+#define PCIX_TARGET_MASK_F7 (0x1037c)
+
+#define PCIX_MSI_F(i) (0x13000+((i)*4))
+
+#define QLCNIC_PCIX_PH_REG(reg) (QLCNIC_CRB_PCIE + (reg))
+#define QLCNIC_PCIX_PS_REG(reg) (QLCNIC_CRB_PCIX_MD + (reg))
+#define QLCNIC_PCIE_REG(reg) (QLCNIC_CRB_PCIE + (reg))
+
+#define PCIE_SEM0_LOCK (0x1c000)
+#define PCIE_SEM0_UNLOCK (0x1c004)
+#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N))
+#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N))
+
+#define PCIE_SETUP_FUNCTION (0x12040)
+#define PCIE_SETUP_FUNCTION2 (0x12048)
+#define PCIE_MISCCFG_RC (0x1206c)
+#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
+#define PCIE_CHICKEN3 (0x120c8)
+
+#define ISR_INT_STATE_REG (QLCNIC_PCIX_PS_REG(PCIE_MISCCFG_RC))
+#define PCIE_MAX_MASTER_SPLIT (0x14048)
+
+#define QLCNIC_PORT_MODE_NONE 0
+#define QLCNIC_PORT_MODE_XG 1
+#define QLCNIC_PORT_MODE_GB 2
+#define QLCNIC_PORT_MODE_802_3_AP 3
+#define QLCNIC_PORT_MODE_AUTO_NEG 4
+#define QLCNIC_PORT_MODE_AUTO_NEG_1G 5
+#define QLCNIC_PORT_MODE_AUTO_NEG_XG 6
+#define QLCNIC_PORT_MODE_ADDR (QLCNIC_CAM_RAM(0x24))
+#define QLCNIC_WOL_PORT_MODE (QLCNIC_CAM_RAM(0x198))
+
+#define QLCNIC_WOL_CONFIG_NV (QLCNIC_CAM_RAM(0x184))
+#define QLCNIC_WOL_CONFIG (QLCNIC_CAM_RAM(0x188))
+
+#define QLCNIC_PEG_TUNE_MN_PRESENT 0x1
+#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c))
+
+#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14))
+#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0))
+#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8))
+#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac))
+#define QLCNIC_CRB_DRV_ACTIVE (QLCNIC_CAM_RAM(0x138))
+#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
+
+#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
+#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
+#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
+#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174))
+#define QLCNIC_CRB_DEV_NPAR_STATE (QLCNIC_CAM_RAM(0x19c))
+#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c)
+#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860)
+
+/* Device State */
+#define QLCNIC_DEV_COLD 0x1
+#define QLCNIC_DEV_INITIALIZING 0x2
+#define QLCNIC_DEV_READY 0x3
+#define QLCNIC_DEV_NEED_RESET 0x4
+#define QLCNIC_DEV_NEED_QUISCENT 0x5
+#define QLCNIC_DEV_FAILED 0x6
+#define QLCNIC_DEV_QUISCENT 0x7
+
+#define QLCNIC_DEV_NPAR_NON_OPER 0 /* NON Operational */
+#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */
+#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */
+
+#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) & (1 << (FN * 4)))
+#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
+#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
+#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
+#define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4)))
+#define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4)))
+
+#define QLC_DEV_GET_DRV(VAL, FN) (0xf & ((VAL) >> (FN * 4)))
+#define QLC_DEV_SET_DRV(VAL, FN) ((VAL) << (FN * 4))
+
+#define QLCNIC_TYPE_NIC 1
+#define QLCNIC_TYPE_FCOE 2
+#define QLCNIC_TYPE_ISCSI 3
+
+#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
+#define QLCNIC_RCODE_DRIVER_CAN_RELOAD BIT_30
+#define QLCNIC_RCODE_FATAL_ERROR BIT_31
+#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
+#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0x1fffff)
+#define QLCNIC_FWERROR_FAN_FAILURE 0x16
+
+#define FW_POLL_DELAY (1 * HZ)
+#define FW_FAIL_THRESH 2
+
+#define QLCNIC_RESET_TIMEOUT_SECS 10
+#define QLCNIC_INIT_TIMEOUT_SECS 30
+#define QLCNIC_RCVPEG_CHECK_RETRY_COUNT 2000
+#define QLCNIC_RCVPEG_CHECK_DELAY 10
+#define QLCNIC_CMDPEG_CHECK_RETRY_COUNT 60
+#define QLCNIC_CMDPEG_CHECK_DELAY 500
+#define QLCNIC_HEARTBEAT_PERIOD_MSECS 200
+#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 45
+
+#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
+
+/*
+ * PCI Interrupt Vector Values.
+ */
+#define PCIX_INT_VECTOR_BIT_F0 0x0080
+#define PCIX_INT_VECTOR_BIT_F1 0x0100
+#define PCIX_INT_VECTOR_BIT_F2 0x0200
+#define PCIX_INT_VECTOR_BIT_F3 0x0400
+#define PCIX_INT_VECTOR_BIT_F4 0x0800
+#define PCIX_INT_VECTOR_BIT_F5 0x1000
+#define PCIX_INT_VECTOR_BIT_F6 0x2000
+#define PCIX_INT_VECTOR_BIT_F7 0x4000
+
+struct qlcnic_legacy_intr_set {
+ u32 int_vec_bit;
+ u32 tgt_status_reg;
+ u32 tgt_mask_reg;
+ u32 pci_int_reg;
+};
+
+#define QLCNIC_FW_API 0x1b216c
+#define QLCNIC_DRV_OP_MODE 0x1b2170
+#define QLCNIC_MSIX_BASE 0x132110
+#define QLCNIC_MAX_PCI_FUNC 8
+#define QLCNIC_MAX_VLAN_FILTERS 64
+
+/* FW dump defines */
+#define MIU_TEST_CTR 0x41000090
+#define MIU_TEST_ADDR_LO 0x41000094
+#define MIU_TEST_ADDR_HI 0x41000098
+#define FLASH_ROM_WINDOW 0x42110030
+#define FLASH_ROM_DATA 0x42150000
+
+static const u32 MIU_TEST_READ_DATA[] = {
+ 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC, };
+
+#define QLCNIC_FW_DUMP_REG1 0x00130060
+#define QLCNIC_FW_DUMP_REG2 0x001e0000
+#define QLCNIC_FLASH_SEM2_LK 0x0013C010
+#define QLCNIC_FLASH_SEM2_ULK 0x0013C014
+#define QLCNIC_FLASH_LOCK_ID 0x001B2100
+
+#define QLCNIC_RD_DUMP_REG(addr, bar0, data) do { \
+ writel((addr & 0xFFFF0000), (void *) (bar0 + \
+ QLCNIC_FW_DUMP_REG1)); \
+ readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1)); \
+ *data = readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 + \
+ LSW(addr))); \
+} while (0)
+
+#define QLCNIC_WR_DUMP_REG(addr, bar0, data) do { \
+ writel((addr & 0xFFFF0000), (void *) (bar0 + \
+ QLCNIC_FW_DUMP_REG1)); \
+ readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1)); \
+ writel(data, (void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr)));\
+ readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr))); \
+} while (0)
+
+/* PCI function operational mode */
+enum {
+ QLCNIC_MGMT_FUNC = 0,
+ QLCNIC_PRIV_FUNC = 1,
+ QLCNIC_NON_PRIV_FUNC = 2
+};
+
+enum {
+ QLCNIC_PORT_DEFAULTS = 0,
+ QLCNIC_ADD_VLAN = 1,
+ QLCNIC_DEL_VLAN = 2
+};
+
+#define QLC_DEV_DRV_DEFAULT 0x11111111
+
+#define LSB(x) ((uint8_t)(x))
+#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
+
+#define LSW(x) ((uint16_t)((uint32_t)(x)))
+#define MSW(x) ((uint16_t)((uint32_t)(x) >> 16))
+
+#define LSD(x) ((uint32_t)((uint64_t)(x)))
+#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
+
+#define QLCNIC_LEGACY_INTR_CONFIG \
+{ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
+}
+
+/* NIU REGS */
+
+#define _qlcnic_crb_get_bit(var, bit) ((var >> bit) & 0x1)
+
+/*
+ * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3)
+ *
+ * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable
+ * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream
+ * Bit 2 : enable_rx => 1:enable frame recv, 0:disable
+ * Bit 3 : rx_synced => R/O: recv enable synched to recv stream
+ * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable
+ * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore
+ * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal
+ * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op
+ * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op
+ * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op
+ * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op
+ * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op
+ */
+#define qlcnic_gb_rx_flowctl(config_word) \
+ ((config_word) |= 1 << 5)
+#define qlcnic_gb_get_rx_flowctl(config_word) \
+ _qlcnic_crb_get_bit((config_word), 5)
+#define qlcnic_gb_unset_rx_flowctl(config_word) \
+ ((config_word) &= ~(1 << 5))
+
+/*
+ * NIU GB Pause Ctl Register
+ */
+
+#define qlcnic_gb_set_gb0_mask(config_word) \
+ ((config_word) |= 1 << 0)
+#define qlcnic_gb_set_gb1_mask(config_word) \
+ ((config_word) |= 1 << 2)
+#define qlcnic_gb_set_gb2_mask(config_word) \
+ ((config_word) |= 1 << 4)
+#define qlcnic_gb_set_gb3_mask(config_word) \
+ ((config_word) |= 1 << 6)
+
+#define qlcnic_gb_get_gb0_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 0)
+#define qlcnic_gb_get_gb1_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 2)
+#define qlcnic_gb_get_gb2_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 4)
+#define qlcnic_gb_get_gb3_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 6)
+
+#define qlcnic_gb_unset_gb0_mask(config_word) \
+ ((config_word) &= ~(1 << 0))
+#define qlcnic_gb_unset_gb1_mask(config_word) \
+ ((config_word) &= ~(1 << 2))
+#define qlcnic_gb_unset_gb2_mask(config_word) \
+ ((config_word) &= ~(1 << 4))
+#define qlcnic_gb_unset_gb3_mask(config_word) \
+ ((config_word) &= ~(1 << 6))
+
+/*
+ * NIU XG Pause Ctl Register
+ *
+ * Bit 0 : xg0_mask => 1:disable tx pause frames
+ * Bit 1 : xg0_request => 1:request single pause frame
+ * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
+ * Bit 3 : xg1_mask => 1:disable tx pause frames
+ * Bit 4 : xg1_request => 1:request single pause frame
+ * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
+ */
+
+#define qlcnic_xg_set_xg0_mask(config_word) \
+ ((config_word) |= 1 << 0)
+#define qlcnic_xg_set_xg1_mask(config_word) \
+ ((config_word) |= 1 << 3)
+
+#define qlcnic_xg_get_xg0_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 0)
+#define qlcnic_xg_get_xg1_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 3)
+
+#define qlcnic_xg_unset_xg0_mask(config_word) \
+ ((config_word) &= ~(1 << 0))
+#define qlcnic_xg_unset_xg1_mask(config_word) \
+ ((config_word) &= ~(1 << 3))
+
+/*
+ * NIU XG Pause Ctl Register
+ *
+ * Bit 0 : xg0_mask => 1:disable tx pause frames
+ * Bit 1 : xg0_request => 1:request single pause frame
+ * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
+ * Bit 3 : xg1_mask => 1:disable tx pause frames
+ * Bit 4 : xg1_request => 1:request single pause frame
+ * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
+ */
+
+/*
+ * PHY-Specific MII control/status registers.
+ */
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG 4
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17
+
+/*
+ * PHY-Specific Status Register (reg 17).
+ *
+ * Bit 0 : jabber => 1:jabber detected, 0:not
+ * Bit 1 : polarity => 1:polarity reversed, 0:normal
+ * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled
+ * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled
+ * Bit 4 : energydetect => 1:sleep, 0:active
+ * Bit 5 : downshift => 1:downshift, 0:no downshift
+ * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover)
+ * Bits 7-9 : cablelen => not valid in 10Mb/s mode
+ * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m
+ * Bit 10 : link => 1:link up, 0:link down
+ * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet
+ * Bit 12 : pagercvd => 1:page received, 0:page not received
+ * Bit 13 : duplex => 1:full duplex, 0:half duplex
+ * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd
+ */
+
+#define qlcnic_get_phy_speed(config_word) (((config_word) >> 14) & 0x03)
+
+#define qlcnic_set_phy_speed(config_word, val) \
+ ((config_word) |= ((val & 0x03) << 14))
+#define qlcnic_set_phy_duplex(config_word) \
+ ((config_word) |= 1 << 13)
+#define qlcnic_clear_phy_duplex(config_word) \
+ ((config_word) &= ~(1 << 13))
+
+#define qlcnic_get_phy_link(config_word) \
+ _qlcnic_crb_get_bit(config_word, 10)
+#define qlcnic_get_phy_duplex(config_word) \
+ _qlcnic_crb_get_bit(config_word, 13)
+
+#define QLCNIC_NIU_NON_PROMISC_MODE 0
+#define QLCNIC_NIU_PROMISC_MODE 1
+#define QLCNIC_NIU_ALLMULTI_MODE 2
+
+struct crb_128M_2M_sub_block_map {
+ unsigned valid;
+ unsigned start_128M;
+ unsigned end_128M;
+ unsigned start_2M;
+};
+
+struct crb_128M_2M_block_map{
+ struct crb_128M_2M_sub_block_map sub_block[16];
+};
+#endif /* __QLCNIC_HDR_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
new file mode 100644
index 000000000000..74e9d7b94965
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -0,0 +1,1787 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2010 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+
+#include <linux/slab.h>
+#include <net/ip.h>
+#include <linux/bitops.h>
+
+#define MASK(n) ((1ULL<<(n))-1)
+#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
+
+#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
+
+#define CRB_BLK(off) ((off >> 20) & 0x3f)
+#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
+#define CRB_WINDOW_2M (0x130060)
+#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
+#define CRB_INDIRECT_2M (0x1e0000UL)
+
+
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel(((u32) (val)), (addr));
+ writel(((u32) (val >> 32)), (addr + 4));
+}
+#endif
+
+static const struct crb_128M_2M_block_map
+crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
+ {{{0, 0, 0, 0} } }, /* 0: PCI */
+ {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
+ {1, 0x0110000, 0x0120000, 0x130000},
+ {1, 0x0120000, 0x0122000, 0x124000},
+ {1, 0x0130000, 0x0132000, 0x126000},
+ {1, 0x0140000, 0x0142000, 0x128000},
+ {1, 0x0150000, 0x0152000, 0x12a000},
+ {1, 0x0160000, 0x0170000, 0x110000},
+ {1, 0x0170000, 0x0172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x01e0000, 0x01e0800, 0x122000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
+ {{{0, 0, 0, 0} } }, /* 3: */
+ {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
+ {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
+ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
+ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
+ {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x08f0000, 0x08f2000, 0x172000} } },
+ {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x09f0000, 0x09f2000, 0x176000} } },
+ {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0af0000, 0x0af2000, 0x17a000} } },
+ {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
+ {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
+ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
+ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
+ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
+ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
+ {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
+ {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
+ {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
+ {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
+ {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
+ {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
+ {{{0, 0, 0, 0} } }, /* 23: */
+ {{{0, 0, 0, 0} } }, /* 24: */
+ {{{0, 0, 0, 0} } }, /* 25: */
+ {{{0, 0, 0, 0} } }, /* 26: */
+ {{{0, 0, 0, 0} } }, /* 27: */
+ {{{0, 0, 0, 0} } }, /* 28: */
+ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
+ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
+ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
+ {{{0} } }, /* 32: PCI */
+ {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
+ {1, 0x2110000, 0x2120000, 0x130000},
+ {1, 0x2120000, 0x2122000, 0x124000},
+ {1, 0x2130000, 0x2132000, 0x126000},
+ {1, 0x2140000, 0x2142000, 0x128000},
+ {1, 0x2150000, 0x2152000, 0x12a000},
+ {1, 0x2160000, 0x2170000, 0x110000},
+ {1, 0x2170000, 0x2172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
+ {{{0} } }, /* 35: */
+ {{{0} } }, /* 36: */
+ {{{0} } }, /* 37: */
+ {{{0} } }, /* 38: */
+ {{{0} } }, /* 39: */
+ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
+ {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
+ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
+ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
+ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
+ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
+ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
+ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
+ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
+ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
+ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
+ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
+ {{{0} } }, /* 52: */
+ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
+ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
+ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
+ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
+ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
+ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
+ {{{0} } }, /* 59: I2C0 */
+ {{{0} } }, /* 60: I2C1 */
+ {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
+ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
+ {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
+};
+
+/*
+ * top 12 bits of crb internal address (hub, agent)
+ */
+static const unsigned crb_hub_agt[64] = {
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_MN,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_MS,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SRE,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_NIU,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_QMN,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGND,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SN,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_EG,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_CAM,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SMB,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC,
+ 0,
+};
+
+/* PCI Windowing for DDR regions. */
+
+#define QLCNIC_PCIE_SEM_TIMEOUT 10000
+
+int
+qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
+{
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
+ if (done == 1)
+ break;
+ if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock; holdby=%d\n",
+ sem, id_reg ? QLCRD32(adapter, id_reg) : -1);
+ return -EIO;
+ }
+ msleep(1);
+ }
+
+ if (id_reg)
+ QLCWR32(adapter, id_reg, adapter->portnum);
+
+ return 0;
+}
+
+void
+qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
+{
+ QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
+}
+
+static int
+qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
+ struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
+{
+ u32 i, producer, consumer;
+ struct qlcnic_cmd_buffer *pbuf;
+ struct cmd_desc_type0 *cmd_desc;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ i = 0;
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return -EIO;
+
+ tx_ring = adapter->tx_ring;
+ __netif_tx_lock_bh(tx_ring->txq);
+
+ producer = tx_ring->producer;
+ consumer = tx_ring->sw_consumer;
+
+ if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
+ netif_tx_stop_queue(tx_ring->txq);
+ smp_mb();
+ if (qlcnic_tx_avail(tx_ring) > nr_desc) {
+ if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
+ netif_tx_wake_queue(tx_ring->txq);
+ } else {
+ adapter->stats.xmit_off++;
+ __netif_tx_unlock_bh(tx_ring->txq);
+ return -EBUSY;
+ }
+ }
+
+ do {
+ cmd_desc = &cmd_desc_arr[i];
+
+ pbuf = &tx_ring->cmd_buf_arr[producer];
+ pbuf->skb = NULL;
+ pbuf->frag_count = 0;
+
+ memcpy(&tx_ring->desc_head[producer],
+ &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ i++;
+
+ } while (i != nr_desc);
+
+ tx_ring->producer = producer;
+
+ qlcnic_update_cmd_producer(adapter, tx_ring);
+
+ __netif_tx_unlock_bh(tx_ring->txq);
+
+ return 0;
+}
+
+static int
+qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
+ __le16 vlan_id, unsigned op)
+{
+ struct qlcnic_nic_req req;
+ struct qlcnic_mac_req *mac_req;
+ struct qlcnic_vlan_req *vlan_req;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
+
+ word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ mac_req = (struct qlcnic_mac_req *)&req.words[0];
+ mac_req->op = op;
+ memcpy(mac_req->mac_addr, addr, 6);
+
+ vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
+ vlan_req->vlan_id = vlan_id;
+
+ return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+}
+
+static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
+{
+ struct list_head *head;
+ struct qlcnic_mac_list_s *cur;
+
+ /* look up if already exists */
+ list_for_each(head, &adapter->mac_list) {
+ cur = list_entry(head, struct qlcnic_mac_list_s, list);
+ if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0)
+ return 0;
+ }
+
+ cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
+ if (cur == NULL) {
+ dev_err(&adapter->netdev->dev,
+ "failed to add mac address filter\n");
+ return -ENOMEM;
+ }
+ memcpy(cur->mac_addr, addr, ETH_ALEN);
+
+ if (qlcnic_sre_macaddr_change(adapter,
+ cur->mac_addr, 0, QLCNIC_MAC_ADD)) {
+ kfree(cur);
+ return -EIO;
+ }
+
+ list_add_tail(&cur->list, &adapter->mac_list);
+ return 0;
+}
+
+void qlcnic_set_multi(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct netdev_hw_addr *ha;
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+ u32 mode = VPORT_MISS_MODE_DROP;
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return;
+
+ qlcnic_nic_add_mac(adapter, adapter->mac_addr);
+ qlcnic_nic_add_mac(adapter, bcast_addr);
+
+ if (netdev->flags & IFF_PROMISC) {
+ if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ goto send_fw_cmd;
+ }
+
+ if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(netdev) > adapter->max_mc_count)) {
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ goto send_fw_cmd;
+ }
+
+ if (!netdev_mc_empty(netdev)) {
+ netdev_for_each_mc_addr(ha, netdev) {
+ qlcnic_nic_add_mac(adapter, ha->addr);
+ }
+ }
+
+send_fw_cmd:
+ if (mode == VPORT_MISS_MODE_ACCEPT_ALL) {
+ qlcnic_alloc_lb_filters_mem(adapter);
+ adapter->mac_learn = 1;
+ } else {
+ adapter->mac_learn = 0;
+ }
+
+ qlcnic_nic_set_promisc(adapter, mode);
+}
+
+int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(mode);
+
+ return qlcnic_send_cmd_descs(adapter,
+ (struct cmd_desc_type0 *)&req, 1);
+}
+
+void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mac_list_s *cur;
+ struct list_head *head = &adapter->mac_list;
+
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+ qlcnic_sre_macaddr_change(adapter,
+ cur->mac_addr, 0, QLCNIC_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_filter *tmp_fil;
+ struct hlist_node *tmp_hnode, *n;
+ struct hlist_head *head;
+ int i;
+
+ for (i = 0; i < adapter->fhash.fmax; i++) {
+ head = &(adapter->fhash.fhead[i]);
+
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
+ {
+ if (jiffies >
+ (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) {
+ qlcnic_sre_macaddr_change(adapter,
+ tmp_fil->faddr, tmp_fil->vlan_id,
+ tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
+ QLCNIC_MAC_DEL);
+ spin_lock_bh(&adapter->mac_learn_lock);
+ adapter->fhash.fnum--;
+ hlist_del(&tmp_fil->fnode);
+ spin_unlock_bh(&adapter->mac_learn_lock);
+ kfree(tmp_fil);
+ }
+ }
+ }
+}
+
+void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_filter *tmp_fil;
+ struct hlist_node *tmp_hnode, *n;
+ struct hlist_head *head;
+ int i;
+
+ for (i = 0; i < adapter->fhash.fmax; i++) {
+ head = &(adapter->fhash.fhead[i]);
+
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr,
+ tmp_fil->vlan_id, tmp_fil->vlan_id ?
+ QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL);
+ spin_lock_bh(&adapter->mac_learn_lock);
+ adapter->fhash.fnum--;
+ hlist_del(&tmp_fil->fnode);
+ spin_unlock_bh(&adapter->mac_learn_lock);
+ kfree(tmp_fil);
+ }
+ }
+}
+
+int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag)
+{
+ struct qlcnic_nic_req req;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+ req.req_hdr = cpu_to_le64(QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
+ ((u64) adapter->portnum << 16) | ((u64) 0x1 << 32));
+
+ req.words[0] = cpu_to_le64(flag);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->pdev->dev, "%sting loopback mode failed\n",
+ flag ? "Set" : "Reset");
+ return rv;
+}
+
+int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ if (qlcnic_set_fw_loopback(adapter, mode))
+ return -EIO;
+
+ if (qlcnic_nic_set_promisc(adapter, VPORT_MISS_MODE_ACCEPT_ALL)) {
+ qlcnic_set_fw_loopback(adapter, mode);
+ return -EIO;
+ }
+
+ msleep(1000);
+ return 0;
+}
+
+void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter)
+{
+ int mode = VPORT_MISS_MODE_DROP;
+ struct net_device *netdev = adapter->netdev;
+
+ qlcnic_set_fw_loopback(adapter, 0);
+
+ if (netdev->flags & IFF_PROMISC)
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ else if (netdev->flags & IFF_ALLMULTI)
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+
+ qlcnic_nic_set_promisc(adapter, mode);
+ msleep(1000);
+}
+
+/*
+ * Send the interrupt coalescing parameter set by ethtool to the card.
+ */
+int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_req req;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE |
+ ((u64) adapter->portnum << 16));
+
+ req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32);
+ req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets |
+ ((u64) adapter->ahw->coal.rx_time_us) << 16);
+ req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out |
+ ((u64) adapter->ahw->coal.type) << 32 |
+ ((u64) adapter->ahw->coal.sts_ring_mask) << 40);
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "Could not send interrupt coalescing parameters\n");
+ return rv;
+}
+
+int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return 0;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(enable);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "Could not send configure hw lro request\n");
+
+ return rv;
+}
+
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
+ return 0;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(enable);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "Could not send configure bridge mode request\n");
+
+ adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
+
+ return rv;
+}
+
+
+#define RSS_HASHTYPE_IP_TCP 0x3
+
+int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int i, rv;
+
+ static const u64 key[] = {
+ 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL
+ };
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ /*
+ * RSS request:
+ * bits 3-0: hash_method
+ * 5-4: hash_type_ipv4
+ * 7-6: hash_type_ipv6
+ * 8: enable
+ * 9: use indirection table
+ * 47-10: reserved
+ * 63-48: indirection table mask
+ */
+ word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+ ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+ ((u64)(enable & 0x1) << 8) |
+ ((0x7ULL) << 48);
+ req.words[0] = cpu_to_le64(word);
+ for (i = 0; i < 5; i++)
+ req.words[i+1] = cpu_to_le64(key[i]);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev, "could not configure RSS\n");
+
+ return rv;
+}
+
+int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd)
+{
+ struct qlcnic_nic_req req;
+ struct qlcnic_ipaddr *ipa;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(cmd);
+ ipa = (struct qlcnic_ipaddr *)&req.words[1];
+ ipa->ipv4 = ip;
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "could not notify %s IP 0x%x reuqest\n",
+ (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
+
+ return rv;
+}
+
+int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+ req.words[0] = cpu_to_le64(enable | (enable << 8));
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "could not configure link notification\n");
+
+ return rv;
+}
+
+int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return 0;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_LRO_REQUEST |
+ ((u64)adapter->portnum << 16) |
+ ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ;
+
+ req.req_hdr = cpu_to_le64(word);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "could not cleanup lro flows\n");
+
+ return rv;
+}
+
+/*
+ * qlcnic_change_mtu - Change the Maximum Transfer Unit
+ * @returns 0 on success, negative on failure
+ */
+
+int qlcnic_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int rc = 0;
+
+ if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) {
+ dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes"
+ " not supported\n", P3P_MAX_MTU, P3P_MIN_MTU);
+ return -EINVAL;
+ }
+
+ rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
+
+ if (!rc)
+ netdev->mtu = mtu;
+
+ return rc;
+}
+
+
+u32 qlcnic_fix_features(struct net_device *netdev, u32 features)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ u32 changed = features ^ netdev->features;
+ features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
+ }
+
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ return features;
+}
+
+
+int qlcnic_set_features(struct net_device *netdev, u32 features)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ u32 changed = netdev->features ^ features;
+ int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0;
+
+ if (!(changed & NETIF_F_LRO))
+ return 0;
+
+ netdev->features = features ^ NETIF_F_LRO;
+
+ if (qlcnic_config_hw_lro(adapter, hw_lro))
+ return -EIO;
+
+ if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Changes the CRB window to the specified window.
+ */
+ /* Returns < 0 if off is not valid,
+ * 1 if window access is needed. 'off' is set to offset from
+ * CRB space in 128M pci map
+ * 0 if no window access is needed. 'off' is set to 2M addr
+ * In: 'off' is offset from base in 128M pci map
+ */
+static int
+qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
+ ulong off, void __iomem **addr)
+{
+ const struct crb_128M_2M_sub_block_map *m;
+
+ if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
+ return -EINVAL;
+
+ off -= QLCNIC_PCI_CRBSPACE;
+
+ /*
+ * Try direct map
+ */
+ m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
+
+ if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
+ *addr = adapter->ahw->pci_base0 + m->start_2M +
+ (off - m->start_128M);
+ return 0;
+ }
+
+ /*
+ * Not in direct map, use crb window
+ */
+ *addr = adapter->ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
+ return 1;
+}
+
+/*
+ * In: 'off' is offset from CRB space in 128M pci map
+ * Out: 'off' is 2M pci map addr
+ * side effect: lock crb window
+ */
+static int
+qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
+{
+ u32 window;
+ void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M;
+
+ off -= QLCNIC_PCI_CRBSPACE;
+
+ window = CRB_HI(off);
+ if (window == 0) {
+ dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off);
+ return -EIO;
+ }
+
+ writel(window, addr);
+ if (readl(addr) != window) {
+ if (printk_ratelimit())
+ dev_warn(&adapter->pdev->dev,
+ "failed to set CRB window to %d off 0x%lx\n",
+ window, off);
+ return -EIO;
+ }
+ return 0;
+}
+
+int
+qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
+{
+ unsigned long flags;
+ int rv;
+ void __iomem *addr = NULL;
+
+ rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
+
+ if (rv == 0) {
+ writel(data, addr);
+ return 0;
+ }
+
+ if (rv > 0) {
+ /* indirect access */
+ write_lock_irqsave(&adapter->ahw->crb_lock, flags);
+ crb_win_lock(adapter);
+ rv = qlcnic_pci_set_crbwindow_2M(adapter, off);
+ if (!rv)
+ writel(data, addr);
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
+ return rv;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: invalid offset: 0x%016lx\n", __func__, off);
+ dump_stack();
+ return -EIO;
+}
+
+u32
+qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
+{
+ unsigned long flags;
+ int rv;
+ u32 data = -1;
+ void __iomem *addr = NULL;
+
+ rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
+
+ if (rv == 0)
+ return readl(addr);
+
+ if (rv > 0) {
+ /* indirect access */
+ write_lock_irqsave(&adapter->ahw->crb_lock, flags);
+ crb_win_lock(adapter);
+ if (!qlcnic_pci_set_crbwindow_2M(adapter, off))
+ data = readl(addr);
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
+ return data;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: invalid offset: 0x%016lx\n", __func__, off);
+ dump_stack();
+ return -1;
+}
+
+
+void __iomem *
+qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset)
+{
+ void __iomem *addr = NULL;
+
+ WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr));
+
+ return addr;
+}
+
+
+static int
+qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
+ u64 addr, u32 *start)
+{
+ u32 window;
+
+ window = OCM_WIN_P3P(addr);
+
+ writel(window, adapter->ahw->ocm_win_crb);
+ /* read back to flush */
+ readl(adapter->ahw->ocm_win_crb);
+
+ *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
+ return 0;
+}
+
+static int
+qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
+ u64 *data, int op)
+{
+ void __iomem *addr;
+ int ret;
+ u32 start;
+
+ mutex_lock(&adapter->ahw->mem_lock);
+
+ ret = qlcnic_pci_set_window_2M(adapter, off, &start);
+ if (ret != 0)
+ goto unlock;
+
+ addr = adapter->ahw->pci_base0 + start;
+
+ if (op == 0) /* read */
+ *data = readq(addr);
+ else /* write */
+ writeq(*data, addr);
+
+unlock:
+ mutex_unlock(&adapter->ahw->mem_lock);
+
+ return ret;
+}
+
+void
+qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
+{
+ void __iomem *addr = adapter->ahw->pci_base0 +
+ QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
+
+ mutex_lock(&adapter->ahw->mem_lock);
+ *data = readq(addr);
+ mutex_unlock(&adapter->ahw->mem_lock);
+}
+
+void
+qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
+{
+ void __iomem *addr = adapter->ahw->pci_base0 +
+ QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
+
+ mutex_lock(&adapter->ahw->mem_lock);
+ writeq(data, addr);
+ mutex_unlock(&adapter->ahw->mem_lock);
+}
+
+#define MAX_CTL_CHECK 1000
+
+int
+qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
+ u64 off, u64 data)
+{
+ int i, j, ret;
+ u32 temp, off8;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P3 onward, test agent base for MIU and SIU is same */
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX)) {
+ mem_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
+ mem_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
+ return qlcnic_pci_mem_access_direct(adapter, off, &data, 1);
+
+ return -EIO;
+
+correct:
+ off8 = off & ~0xf;
+
+ mutex_lock(&adapter->ahw->mem_lock);
+
+ writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+
+ i = 0;
+ writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE),
+ (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ ret = -EIO;
+ goto done;
+ }
+
+ i = (off & 0xf) ? 0 : 2;
+ writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
+ mem_crb + MIU_TEST_AGT_WRDATA(i));
+ writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
+ mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+ i = (off & 0xf) ? 2 : 0;
+
+ writel(data & 0xffffffff,
+ mem_crb + MIU_TEST_AGT_WRDATA(i));
+ writel((data >> 32) & 0xffffffff,
+ mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+
+ writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
+ (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to write through agent\n");
+ ret = -EIO;
+ } else
+ ret = 0;
+
+done:
+ mutex_unlock(&adapter->ahw->mem_lock);
+
+ return ret;
+}
+
+int
+qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
+ u64 off, u64 *data)
+{
+ int j, ret;
+ u32 temp, off8;
+ u64 val;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P3 onward, test agent base for MIU and SIU is same */
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX)) {
+ mem_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
+ mem_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) {
+ return qlcnic_pci_mem_access_direct(adapter,
+ off, data, 0);
+ }
+
+ return -EIO;
+
+correct:
+ off8 = off & ~0xf;
+
+ mutex_lock(&adapter->ahw->mem_lock);
+
+ writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+ writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to read through agent\n");
+ ret = -EIO;
+ } else {
+ off8 = MIU_TEST_AGT_RDDATA_LO;
+ if (off & 0xf)
+ off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
+
+ temp = readl(mem_crb + off8 + 4);
+ val = (u64)temp << 32;
+ val |= readl(mem_crb + off8);
+ *data = val;
+ ret = 0;
+ }
+
+ mutex_unlock(&adapter->ahw->mem_lock);
+
+ return ret;
+}
+
+int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
+{
+ int offset, board_type, magic;
+ struct pci_dev *pdev = adapter->pdev;
+
+ offset = QLCNIC_FW_MAGIC_OFFSET;
+ if (qlcnic_rom_fast_read(adapter, offset, &magic))
+ return -EIO;
+
+ if (magic != QLCNIC_BDINFO_MAGIC) {
+ dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
+ magic);
+ return -EIO;
+ }
+
+ offset = QLCNIC_BRDTYPE_OFFSET;
+ if (qlcnic_rom_fast_read(adapter, offset, &board_type))
+ return -EIO;
+
+ adapter->ahw->board_type = board_type;
+
+ if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
+ u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
+ if ((gpio & 0x8000) == 0)
+ board_type = QLCNIC_BRDTYPE_P3P_10G_TP;
+ }
+
+ switch (board_type) {
+ case QLCNIC_BRDTYPE_P3P_HMEZ:
+ case QLCNIC_BRDTYPE_P3P_XG_LOM:
+ case QLCNIC_BRDTYPE_P3P_10G_CX4:
+ case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
+ case QLCNIC_BRDTYPE_P3P_IMEZ:
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
+ case QLCNIC_BRDTYPE_P3P_10G_XFP:
+ case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
+ adapter->ahw->port_type = QLCNIC_XGBE;
+ break;
+ case QLCNIC_BRDTYPE_P3P_REF_QG:
+ case QLCNIC_BRDTYPE_P3P_4_GB:
+ case QLCNIC_BRDTYPE_P3P_4_GB_MM:
+ adapter->ahw->port_type = QLCNIC_GBE;
+ break;
+ case QLCNIC_BRDTYPE_P3P_10G_TP:
+ adapter->ahw->port_type = (adapter->portnum < 2) ?
+ QLCNIC_XGBE : QLCNIC_GBE;
+ break;
+ default:
+ dev_err(&pdev->dev, "unknown board type %x\n", board_type);
+ adapter->ahw->port_type = QLCNIC_XGBE;
+ break;
+ }
+
+ return 0;
+}
+
+int
+qlcnic_wol_supported(struct qlcnic_adapter *adapter)
+{
+ u32 wol_cfg;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ if (wol_cfg & (1UL << adapter->portnum)) {
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ if (wol_cfg & (1 << adapter->portnum))
+ return 1;
+ }
+
+ return 0;
+}
+
+int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+{
+ struct qlcnic_nic_req req;
+ int rv;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64((u64)rate << 32);
+ req.words[1] = cpu_to_le64(state);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv)
+ dev_err(&adapter->pdev->dev, "LED configuration failed.\n");
+
+ return rv;
+}
+
+/* FW dump related functions */
+static u32
+qlcnic_dump_crb(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
+ u32 *buffer)
+{
+ int i;
+ u32 addr, data;
+ struct __crb *crb = &entry->region.crb;
+ void __iomem *base = adapter->ahw->pci_base0;
+
+ addr = crb->addr;
+
+ for (i = 0; i < crb->no_ops; i++) {
+ QLCNIC_RD_DUMP_REG(addr, base, &data);
+ *buffer++ = cpu_to_le32(addr);
+ *buffer++ = cpu_to_le32(data);
+ addr += crb->stride;
+ }
+ return crb->no_ops * 2 * sizeof(u32);
+}
+
+static u32
+qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, u32 *buffer)
+{
+ int i, k, timeout = 0;
+ void __iomem *base = adapter->ahw->pci_base0;
+ u32 addr, data;
+ u8 opcode, no_ops;
+ struct __ctrl *ctr = &entry->region.ctrl;
+ struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
+
+ addr = ctr->addr;
+ no_ops = ctr->no_ops;
+
+ for (i = 0; i < no_ops; i++) {
+ k = 0;
+ opcode = 0;
+ for (k = 0; k < 8; k++) {
+ if (!(ctr->opcode & (1 << k)))
+ continue;
+ switch (1 << k) {
+ case QLCNIC_DUMP_WCRB:
+ QLCNIC_WR_DUMP_REG(addr, base, ctr->val1);
+ break;
+ case QLCNIC_DUMP_RWCRB:
+ QLCNIC_RD_DUMP_REG(addr, base, &data);
+ QLCNIC_WR_DUMP_REG(addr, base, data);
+ break;
+ case QLCNIC_DUMP_ANDCRB:
+ QLCNIC_RD_DUMP_REG(addr, base, &data);
+ QLCNIC_WR_DUMP_REG(addr, base,
+ (data & ctr->val2));
+ break;
+ case QLCNIC_DUMP_ORCRB:
+ QLCNIC_RD_DUMP_REG(addr, base, &data);
+ QLCNIC_WR_DUMP_REG(addr, base,
+ (data | ctr->val3));
+ break;
+ case QLCNIC_DUMP_POLLCRB:
+ while (timeout <= ctr->timeout) {
+ QLCNIC_RD_DUMP_REG(addr, base, &data);
+ if ((data & ctr->val2) == ctr->val1)
+ break;
+ msleep(1);
+ timeout++;
+ }
+ if (timeout > ctr->timeout) {
+ dev_info(&adapter->pdev->dev,
+ "Timed out, aborting poll CRB\n");
+ return -EINVAL;
+ }
+ break;
+ case QLCNIC_DUMP_RD_SAVE:
+ if (ctr->index_a)
+ addr = t_hdr->saved_state[ctr->index_a];
+ QLCNIC_RD_DUMP_REG(addr, base, &data);
+ t_hdr->saved_state[ctr->index_v] = data;
+ break;
+ case QLCNIC_DUMP_WRT_SAVED:
+ if (ctr->index_v)
+ data = t_hdr->saved_state[ctr->index_v];
+ else
+ data = ctr->val1;
+ if (ctr->index_a)
+ addr = t_hdr->saved_state[ctr->index_a];
+ QLCNIC_WR_DUMP_REG(addr, base, data);
+ break;
+ case QLCNIC_DUMP_MOD_SAVE_ST:
+ data = t_hdr->saved_state[ctr->index_v];
+ data <<= ctr->shl_val;
+ data >>= ctr->shr_val;
+ if (ctr->val2)
+ data &= ctr->val2;
+ data |= ctr->val3;
+ data += ctr->val1;
+ t_hdr->saved_state[ctr->index_v] = data;
+ break;
+ default:
+ dev_info(&adapter->pdev->dev,
+ "Unknown opcode\n");
+ break;
+ }
+ }
+ addr += ctr->stride;
+ }
+ return 0;
+}
+
+static u32
+qlcnic_dump_mux(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
+ u32 *buffer)
+{
+ int loop;
+ u32 val, data = 0;
+ struct __mux *mux = &entry->region.mux;
+ void __iomem *base = adapter->ahw->pci_base0;
+
+ val = mux->val;
+ for (loop = 0; loop < mux->no_ops; loop++) {
+ QLCNIC_WR_DUMP_REG(mux->addr, base, val);
+ QLCNIC_RD_DUMP_REG(mux->read_addr, base, &data);
+ *buffer++ = cpu_to_le32(val);
+ *buffer++ = cpu_to_le32(data);
+ val += mux->val_stride;
+ }
+ return 2 * mux->no_ops * sizeof(u32);
+}
+
+static u32
+qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
+ u32 *buffer)
+{
+ int i, loop;
+ u32 cnt, addr, data, que_id = 0;
+ void __iomem *base = adapter->ahw->pci_base0;
+ struct __queue *que = &entry->region.que;
+
+ addr = que->read_addr;
+ cnt = que->read_addr_cnt;
+
+ for (loop = 0; loop < que->no_ops; loop++) {
+ QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id);
+ addr = que->read_addr;
+ for (i = 0; i < cnt; i++) {
+ QLCNIC_RD_DUMP_REG(addr, base, &data);
+ *buffer++ = cpu_to_le32(data);
+ addr += que->read_addr_stride;
+ }
+ que_id += que->stride;
+ }
+ return que->no_ops * cnt * sizeof(u32);
+}
+
+static u32
+qlcnic_dump_ocm(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
+ u32 *buffer)
+{
+ int i;
+ u32 data;
+ void __iomem *addr;
+ struct __ocm *ocm = &entry->region.ocm;
+
+ addr = adapter->ahw->pci_base0 + ocm->read_addr;
+ for (i = 0; i < ocm->no_ops; i++) {
+ data = readl(addr);
+ *buffer++ = cpu_to_le32(data);
+ addr += ocm->read_addr_stride;
+ }
+ return ocm->no_ops * sizeof(u32);
+}
+
+static u32
+qlcnic_read_rom(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
+ u32 *buffer)
+{
+ int i, count = 0;
+ u32 fl_addr, size, val, lck_val, addr;
+ struct __mem *rom = &entry->region.mem;
+ void __iomem *base = adapter->ahw->pci_base0;
+
+ fl_addr = rom->addr;
+ size = rom->size/4;
+lock_try:
+ lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
+ if (!lck_val && count < MAX_CTL_CHECK) {
+ msleep(10);
+ count++;
+ goto lock_try;
+ }
+ writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
+ for (i = 0; i < size; i++) {
+ addr = fl_addr & 0xFFFF0000;
+ QLCNIC_WR_DUMP_REG(FLASH_ROM_WINDOW, base, addr);
+ addr = LSW(fl_addr) + FLASH_ROM_DATA;
+ QLCNIC_RD_DUMP_REG(addr, base, &val);
+ fl_addr += 4;
+ *buffer++ = cpu_to_le32(val);
+ }
+ readl(base + QLCNIC_FLASH_SEM2_ULK);
+ return rom->size;
+}
+
+static u32
+qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, u32 *buffer)
+{
+ int i;
+ u32 cnt, val, data, addr;
+ void __iomem *base = adapter->ahw->pci_base0;
+ struct __cache *l1 = &entry->region.cache;
+
+ val = l1->init_tag_val;
+
+ for (i = 0; i < l1->no_ops; i++) {
+ QLCNIC_WR_DUMP_REG(l1->addr, base, val);
+ QLCNIC_WR_DUMP_REG(l1->ctrl_addr, base, LSW(l1->ctrl_val));
+ addr = l1->read_addr;
+ cnt = l1->read_addr_num;
+ while (cnt) {
+ QLCNIC_RD_DUMP_REG(addr, base, &data);
+ *buffer++ = cpu_to_le32(data);
+ addr += l1->read_addr_stride;
+ cnt--;
+ }
+ val += l1->stride;
+ }
+ return l1->no_ops * l1->read_addr_num * sizeof(u32);
+}
+
+static u32
+qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, u32 *buffer)
+{
+ int i;
+ u32 cnt, val, data, addr;
+ u8 poll_mask, poll_to, time_out = 0;
+ void __iomem *base = adapter->ahw->pci_base0;
+ struct __cache *l2 = &entry->region.cache;
+
+ val = l2->init_tag_val;
+ poll_mask = LSB(MSW(l2->ctrl_val));
+ poll_to = MSB(MSW(l2->ctrl_val));
+
+ for (i = 0; i < l2->no_ops; i++) {
+ QLCNIC_WR_DUMP_REG(l2->addr, base, val);
+ if (LSW(l2->ctrl_val))
+ QLCNIC_WR_DUMP_REG(l2->ctrl_addr, base,
+ LSW(l2->ctrl_val));
+ if (!poll_mask)
+ goto skip_poll;
+ do {
+ QLCNIC_RD_DUMP_REG(l2->ctrl_addr, base, &data);
+ if (!(data & poll_mask))
+ break;
+ msleep(1);
+ time_out++;
+ } while (time_out <= poll_to);
+
+ if (time_out > poll_to) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout exceeded in %s, aborting dump\n",
+ __func__);
+ return -EINVAL;
+ }
+skip_poll:
+ addr = l2->read_addr;
+ cnt = l2->read_addr_num;
+ while (cnt) {
+ QLCNIC_RD_DUMP_REG(addr, base, &data);
+ *buffer++ = cpu_to_le32(data);
+ addr += l2->read_addr_stride;
+ cnt--;
+ }
+ val += l2->stride;
+ }
+ return l2->no_ops * l2->read_addr_num * sizeof(u32);
+}
+
+static u32
+qlcnic_read_memory(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, u32 *buffer)
+{
+ u32 addr, data, test, ret = 0;
+ int i, reg_read;
+ struct __mem *mem = &entry->region.mem;
+ void __iomem *base = adapter->ahw->pci_base0;
+
+ reg_read = mem->size;
+ addr = mem->addr;
+ /* check for data size of multiple of 16 and 16 byte alignment */
+ if ((addr & 0xf) || (reg_read%16)) {
+ dev_info(&adapter->pdev->dev,
+ "Unaligned memory addr:0x%x size:0x%x\n",
+ addr, reg_read);
+ return -EINVAL;
+ }
+
+ mutex_lock(&adapter->ahw->mem_lock);
+
+ while (reg_read != 0) {
+ QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_LO, base, addr);
+ QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_HI, base, 0);
+ QLCNIC_WR_DUMP_REG(MIU_TEST_CTR, base,
+ TA_CTL_ENABLE | TA_CTL_START);
+
+ for (i = 0; i < MAX_CTL_CHECK; i++) {
+ QLCNIC_RD_DUMP_REG(MIU_TEST_CTR, base, &test);
+ if (!(test & TA_CTL_BUSY))
+ break;
+ }
+ if (i == MAX_CTL_CHECK) {
+ if (printk_ratelimit()) {
+ dev_err(&adapter->pdev->dev,
+ "failed to read through agent\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+ for (i = 0; i < 4; i++) {
+ QLCNIC_RD_DUMP_REG(MIU_TEST_READ_DATA[i], base, &data);
+ *buffer++ = cpu_to_le32(data);
+ }
+ addr += 16;
+ reg_read -= 16;
+ ret += 16;
+ }
+out:
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return mem->size;
+}
+
+static u32
+qlcnic_dump_nop(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, u32 *buffer)
+{
+ entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ return 0;
+}
+
+struct qlcnic_dump_operations fw_dump_ops[] = {
+ { QLCNIC_DUMP_NOP, qlcnic_dump_nop },
+ { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
+ { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
+ { QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
+ { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
+ { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
+ { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
+ { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
+ { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
+ { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
+ { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
+ { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
+ { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
+ { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
+ { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
+ { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
+ { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
+ { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
+ { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
+ { QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
+};
+
+/* Walk the template and collect dump for each entry in the dump template */
+static int
+qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
+ u32 size)
+{
+ int ret = 1;
+ if (size != entry->hdr.cap_size) {
+ dev_info(dev,
+ "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
+ entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
+ dev_info(dev, "Aborting further dump capture\n");
+ ret = 0;
+ }
+ return ret;
+}
+
+int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
+{
+ u32 *buffer;
+ char mesg[64];
+ char *msg[] = {mesg, NULL};
+ int i, k, ops_cnt, ops_index, dump_size = 0;
+ u32 entry_offset, dump, no_entries, buf_offset = 0;
+ struct qlcnic_dump_entry *entry;
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
+
+ if (fw_dump->clr) {
+ dev_info(&adapter->pdev->dev,
+ "Previous dump not cleared, not capturing dump\n");
+ return -EIO;
+ }
+ /* Calculate the size for dump data area only */
+ for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
+ if (i & tmpl_hdr->drv_cap_mask)
+ dump_size += tmpl_hdr->cap_sizes[k];
+ if (!dump_size)
+ return -EIO;
+
+ fw_dump->data = vzalloc(dump_size);
+ if (!fw_dump->data) {
+ dev_info(&adapter->pdev->dev,
+ "Unable to allocate (%d KB) for fw dump\n",
+ dump_size/1024);
+ return -ENOMEM;
+ }
+ buffer = fw_dump->data;
+ fw_dump->size = dump_size;
+ no_entries = tmpl_hdr->num_entries;
+ ops_cnt = ARRAY_SIZE(fw_dump_ops);
+ entry_offset = tmpl_hdr->offset;
+ tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
+ tmpl_hdr->sys_info[1] = adapter->fw_version;
+
+ for (i = 0; i < no_entries; i++) {
+ entry = (void *)tmpl_hdr + entry_offset;
+ if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
+ entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ entry_offset += entry->hdr.offset;
+ continue;
+ }
+ /* Find the handler for this entry */
+ ops_index = 0;
+ while (ops_index < ops_cnt) {
+ if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
+ break;
+ ops_index++;
+ }
+ if (ops_index == ops_cnt) {
+ dev_info(&adapter->pdev->dev,
+ "Invalid entry type %d, exiting dump\n",
+ entry->hdr.type);
+ goto error;
+ }
+ /* Collect dump for this entry */
+ dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
+ if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
+ dump))
+ entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ buf_offset += entry->hdr.cap_size;
+ entry_offset += entry->hdr.offset;
+ buffer = fw_dump->data + buf_offset;
+ }
+ if (dump_size != buf_offset) {
+ dev_info(&adapter->pdev->dev,
+ "Captured(%d) and expected size(%d) do not match\n",
+ buf_offset, dump_size);
+ goto error;
+ } else {
+ fw_dump->clr = 1;
+ snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
+ adapter->netdev->name);
+ dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
+ fw_dump->size);
+ /* Send a udev event to notify availability of FW dump */
+ kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
+ return 0;
+ }
+error:
+ vfree(fw_dump->data);
+ return -EINVAL;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
new file mode 100644
index 000000000000..312c1c37889d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -0,0 +1,1904 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2010 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include "qlcnic.h"
+
+struct crb_addr_pair {
+ u32 addr;
+ u32 data;
+};
+
+#define QLCNIC_MAX_CRB_XFORM 60
+static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM];
+
+#define crb_addr_transform(name) \
+ (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \
+ QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20)
+
+#define QLCNIC_ADDR_ERROR (0xffffffff)
+
+static void
+qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring);
+
+static int
+qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter);
+
+static void crb_addr_transform_setup(void)
+{
+ crb_addr_transform(XDMA);
+ crb_addr_transform(TIMR);
+ crb_addr_transform(SRE);
+ crb_addr_transform(SQN3);
+ crb_addr_transform(SQN2);
+ crb_addr_transform(SQN1);
+ crb_addr_transform(SQN0);
+ crb_addr_transform(SQS3);
+ crb_addr_transform(SQS2);
+ crb_addr_transform(SQS1);
+ crb_addr_transform(SQS0);
+ crb_addr_transform(RPMX7);
+ crb_addr_transform(RPMX6);
+ crb_addr_transform(RPMX5);
+ crb_addr_transform(RPMX4);
+ crb_addr_transform(RPMX3);
+ crb_addr_transform(RPMX2);
+ crb_addr_transform(RPMX1);
+ crb_addr_transform(RPMX0);
+ crb_addr_transform(ROMUSB);
+ crb_addr_transform(SN);
+ crb_addr_transform(QMN);
+ crb_addr_transform(QMS);
+ crb_addr_transform(PGNI);
+ crb_addr_transform(PGND);
+ crb_addr_transform(PGN3);
+ crb_addr_transform(PGN2);
+ crb_addr_transform(PGN1);
+ crb_addr_transform(PGN0);
+ crb_addr_transform(PGSI);
+ crb_addr_transform(PGSD);
+ crb_addr_transform(PGS3);
+ crb_addr_transform(PGS2);
+ crb_addr_transform(PGS1);
+ crb_addr_transform(PGS0);
+ crb_addr_transform(PS);
+ crb_addr_transform(PH);
+ crb_addr_transform(NIU);
+ crb_addr_transform(I2Q);
+ crb_addr_transform(EG);
+ crb_addr_transform(MN);
+ crb_addr_transform(MS);
+ crb_addr_transform(CAS2);
+ crb_addr_transform(CAS1);
+ crb_addr_transform(CAS0);
+ crb_addr_transform(CAM);
+ crb_addr_transform(C2C1);
+ crb_addr_transform(C2C0);
+ crb_addr_transform(SMB);
+ crb_addr_transform(OCM0);
+ crb_addr_transform(I2C0);
+}
+
+void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_rx_buffer *rx_buf;
+ int i, ring;
+
+ recv_ctx = adapter->recv_ctx;
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ for (i = 0; i < rds_ring->num_desc; ++i) {
+ rx_buf = &(rds_ring->rx_buf_arr[i]);
+ if (rx_buf->skb == NULL)
+ continue;
+
+ pci_unmap_single(adapter->pdev,
+ rx_buf->dma,
+ rds_ring->dma_size,
+ PCI_DMA_FROMDEVICE);
+
+ dev_kfree_skb_any(rx_buf->skb);
+ }
+ }
+}
+
+void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_rx_buffer *rx_buf;
+ int i, ring;
+
+ recv_ctx = adapter->recv_ctx;
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ INIT_LIST_HEAD(&rds_ring->free_list);
+
+ rx_buf = rds_ring->rx_buf_arr;
+ for (i = 0; i < rds_ring->num_desc; i++) {
+ list_add_tail(&rx_buf->list,
+ &rds_ring->free_list);
+ rx_buf++;
+ }
+ }
+}
+
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_buffer *cmd_buf;
+ struct qlcnic_skb_frag *buffrag;
+ int i, j;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ cmd_buf = tx_ring->cmd_buf_arr;
+ for (i = 0; i < tx_ring->num_desc; i++) {
+ buffrag = cmd_buf->frag_array;
+ if (buffrag->dma) {
+ pci_unmap_single(adapter->pdev, buffrag->dma,
+ buffrag->length, PCI_DMA_TODEVICE);
+ buffrag->dma = 0ULL;
+ }
+ for (j = 0; j < cmd_buf->frag_count; j++) {
+ buffrag++;
+ if (buffrag->dma) {
+ pci_unmap_page(adapter->pdev, buffrag->dma,
+ buffrag->length,
+ PCI_DMA_TODEVICE);
+ buffrag->dma = 0ULL;
+ }
+ }
+ if (cmd_buf->skb) {
+ dev_kfree_skb_any(cmd_buf->skb);
+ cmd_buf->skb = NULL;
+ }
+ cmd_buf++;
+ }
+}
+
+void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ recv_ctx = adapter->recv_ctx;
+
+ if (recv_ctx->rds_rings == NULL)
+ goto skip_rds;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ vfree(rds_ring->rx_buf_arr);
+ rds_ring->rx_buf_arr = NULL;
+ }
+ kfree(recv_ctx->rds_rings);
+
+skip_rds:
+ if (adapter->tx_ring == NULL)
+ return;
+
+ tx_ring = adapter->tx_ring;
+ vfree(tx_ring->cmd_buf_arr);
+ tx_ring->cmd_buf_arr = NULL;
+ kfree(adapter->tx_ring);
+ adapter->tx_ring = NULL;
+}
+
+int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_rx_buffer *rx_buf;
+ int ring, i, size;
+
+ struct qlcnic_cmd_buffer *cmd_buf_arr;
+ struct net_device *netdev = adapter->netdev;
+
+ size = sizeof(struct qlcnic_host_tx_ring);
+ tx_ring = kzalloc(size, GFP_KERNEL);
+ if (tx_ring == NULL) {
+ dev_err(&netdev->dev, "failed to allocate tx ring struct\n");
+ return -ENOMEM;
+ }
+ adapter->tx_ring = tx_ring;
+
+ tx_ring->num_desc = adapter->num_txd;
+ tx_ring->txq = netdev_get_tx_queue(netdev, 0);
+
+ cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
+ if (cmd_buf_arr == NULL) {
+ dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
+ goto err_out;
+ }
+ tx_ring->cmd_buf_arr = cmd_buf_arr;
+
+ recv_ctx = adapter->recv_ctx;
+
+ size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring);
+ rds_ring = kzalloc(size, GFP_KERNEL);
+ if (rds_ring == NULL) {
+ dev_err(&netdev->dev, "failed to allocate rds ring struct\n");
+ goto err_out;
+ }
+ recv_ctx->rds_rings = rds_ring;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ switch (ring) {
+ case RCV_RING_NORMAL:
+ rds_ring->num_desc = adapter->num_rxd;
+ rds_ring->dma_size = QLCNIC_P3P_RX_BUF_MAX_LEN;
+ rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
+ break;
+
+ case RCV_RING_JUMBO:
+ rds_ring->num_desc = adapter->num_jumbo_rxd;
+ rds_ring->dma_size =
+ QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA;
+
+ rds_ring->skb_size =
+ rds_ring->dma_size + NET_IP_ALIGN;
+ break;
+ }
+ rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
+ if (rds_ring->rx_buf_arr == NULL) {
+ dev_err(&netdev->dev, "Failed to allocate "
+ "rx buffer ring %d\n", ring);
+ goto err_out;
+ }
+ INIT_LIST_HEAD(&rds_ring->free_list);
+ /*
+ * Now go through all of them, set reference handles
+ * and put them in the queues.
+ */
+ rx_buf = rds_ring->rx_buf_arr;
+ for (i = 0; i < rds_ring->num_desc; i++) {
+ list_add_tail(&rx_buf->list,
+ &rds_ring->free_list);
+ rx_buf->ref_handle = i;
+ rx_buf++;
+ }
+ spin_lock_init(&rds_ring->lock);
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ sds_ring->irq = adapter->msix_entries[ring].vector;
+ sds_ring->adapter = adapter;
+ sds_ring->num_desc = adapter->num_rxd;
+
+ for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
+ INIT_LIST_HEAD(&sds_ring->free_list[i]);
+ }
+
+ return 0;
+
+err_out:
+ qlcnic_free_sw_resources(adapter);
+ return -ENOMEM;
+}
+
+/*
+ * Utility to translate from internal Phantom CRB address
+ * to external PCI CRB address.
+ */
+static u32 qlcnic_decode_crb_addr(u32 addr)
+{
+ int i;
+ u32 base_addr, offset, pci_base;
+
+ crb_addr_transform_setup();
+
+ pci_base = QLCNIC_ADDR_ERROR;
+ base_addr = addr & 0xfff00000;
+ offset = addr & 0x000fffff;
+
+ for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) {
+ if (crb_addr_xform[i] == base_addr) {
+ pci_base = i << 20;
+ break;
+ }
+ }
+ if (pci_base == QLCNIC_ADDR_ERROR)
+ return pci_base;
+ else
+ return pci_base + offset;
+}
+
+#define QLCNIC_MAX_ROM_WAIT_USEC 100
+
+static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
+{
+ long timeout = 0;
+ long done = 0;
+
+ cond_resched();
+
+ while (done == 0) {
+ done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS);
+ done &= 2;
+ if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout reached waiting for rom done");
+ return -EIO;
+ }
+ udelay(1);
+ }
+ return 0;
+}
+
+static int do_rom_fast_read(struct qlcnic_adapter *adapter,
+ u32 addr, u32 *valp)
+{
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+ if (qlcnic_wait_rom_done(adapter)) {
+ dev_err(&adapter->pdev->dev, "Error waiting for rom done\n");
+ return -EIO;
+ }
+ /* reset abyte_cnt and dummy_byte_cnt */
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0);
+ udelay(10);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+
+ *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA);
+ return 0;
+}
+
+static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+ u8 *bytes, size_t size)
+{
+ int addridx;
+ int ret = 0;
+
+ for (addridx = addr; addridx < (addr + size); addridx += 4) {
+ int v;
+ ret = do_rom_fast_read(adapter, addridx, &v);
+ if (ret != 0)
+ break;
+ *(__le32 *)bytes = cpu_to_le32(v);
+ bytes += 4;
+ }
+
+ return ret;
+}
+
+int
+qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+ u8 *bytes, size_t size)
+{
+ int ret;
+
+ ret = qlcnic_rom_lock(adapter);
+ if (ret < 0)
+ return ret;
+
+ ret = do_rom_fast_read_words(adapter, addr, bytes, size);
+
+ qlcnic_rom_unlock(adapter);
+ return ret;
+}
+
+int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp)
+{
+ int ret;
+
+ if (qlcnic_rom_lock(adapter) != 0)
+ return -EIO;
+
+ ret = do_rom_fast_read(adapter, addr, valp);
+ qlcnic_rom_unlock(adapter);
+ return ret;
+}
+
+int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
+{
+ int addr, val;
+ int i, n, init_delay;
+ struct crb_addr_pair *buf;
+ unsigned offset;
+ u32 off;
+ struct pci_dev *pdev = adapter->pdev;
+
+ QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
+ QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
+
+ qlcnic_rom_lock(adapter);
+ QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
+ qlcnic_rom_unlock(adapter);
+
+ /* Init HW CRB block */
+ if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
+ qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
+ dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n);
+ return -EIO;
+ }
+ offset = n & 0xffffU;
+ n = (n >> 16) & 0xffffU;
+
+ if (n >= 1024) {
+ dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n");
+ return -EIO;
+ }
+
+ buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
+ if (buf == NULL) {
+ dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < n; i++) {
+ if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
+ qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
+ kfree(buf);
+ return -EIO;
+ }
+
+ buf[i].addr = addr;
+ buf[i].data = val;
+ }
+
+ for (i = 0; i < n; i++) {
+
+ off = qlcnic_decode_crb_addr(buf[i].addr);
+ if (off == QLCNIC_ADDR_ERROR) {
+ dev_err(&pdev->dev, "CRB init value out of range %x\n",
+ buf[i].addr);
+ continue;
+ }
+ off += QLCNIC_PCI_CRBSPACE;
+
+ if (off & 1)
+ continue;
+
+ /* skipping cold reboot MAGIC */
+ if (off == QLCNIC_CAM_RAM(0x1fc))
+ continue;
+ if (off == (QLCNIC_CRB_I2C0 + 0x1c))
+ continue;
+ if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */
+ continue;
+ if (off == (ROMUSB_GLB + 0xa8))
+ continue;
+ if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
+ continue;
+ if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
+ continue;
+ if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
+ continue;
+ if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET)
+ continue;
+ /* skip the function enable register */
+ if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION))
+ continue;
+ if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2))
+ continue;
+ if ((off & 0x0ff00000) == QLCNIC_CRB_SMB)
+ continue;
+
+ init_delay = 1;
+ /* After writing this register, HW needs time for CRB */
+ /* to quiet down (else crb_window returns 0xffffffff) */
+ if (off == QLCNIC_ROMUSB_GLB_SW_RESET)
+ init_delay = 1000;
+
+ QLCWR32(adapter, off, buf[i].data);
+
+ msleep(init_delay);
+ }
+ kfree(buf);
+
+ /* Initialize protocol process engine */
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0);
+ msleep(1);
+ QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
+ QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
+ return 0;
+}
+
+static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
+
+ do {
+ val = QLCRD32(adapter, CRB_CMDPEG_STATE);
+
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return 0;
+ case PHAN_INITIALIZE_FAILED:
+ goto out_err;
+ default:
+ break;
+ }
+
+ msleep(QLCNIC_CMDPEG_CHECK_DELAY);
+
+ } while (--retries);
+
+ QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+
+out_err:
+ dev_err(&adapter->pdev->dev, "Command Peg initialization not "
+ "complete, state: 0x%x.\n", val);
+ return -EIO;
+}
+
+static int
+qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT;
+
+ do {
+ val = QLCRD32(adapter, CRB_RCVPEG_STATE);
+
+ if (val == PHAN_PEG_RCV_INITIALIZED)
+ return 0;
+
+ msleep(QLCNIC_RCVPEG_CHECK_DELAY);
+
+ } while (--retries);
+
+ if (!retries) {
+ dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
+ "complete, state: 0x%x.\n", val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int
+qlcnic_check_fw_status(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ err = qlcnic_cmd_peg_ready(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_receive_peg_ready(adapter);
+ if (err)
+ return err;
+
+ QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+
+ return err;
+}
+
+int
+qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
+
+ int timeo;
+ u32 val;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
+ val = QLC_DEV_GET_DRV(val, adapter->portnum);
+ if ((val & 0x3) != QLCNIC_TYPE_NIC) {
+ dev_err(&adapter->pdev->dev,
+ "Not an Ethernet NIC func=%u\n", val);
+ return -EIO;
+ }
+ adapter->physical_port = (val >> 2);
+ if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
+ timeo = QLCNIC_INIT_TIMEOUT_SECS;
+
+ adapter->dev_init_timeo = timeo;
+
+ if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo))
+ timeo = QLCNIC_RESET_TIMEOUT_SECS;
+
+ adapter->reset_ack_timeo = timeo;
+
+ return 0;
+}
+
+static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region,
+ struct qlcnic_flt_entry *region_entry)
+{
+ struct qlcnic_flt_header flt_hdr;
+ struct qlcnic_flt_entry *flt_entry;
+ int i = 0, ret;
+ u32 entry_size;
+
+ memset(region_entry, 0, sizeof(struct qlcnic_flt_entry));
+ ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION,
+ (u8 *)&flt_hdr,
+ sizeof(struct qlcnic_flt_header));
+ if (ret) {
+ dev_warn(&adapter->pdev->dev,
+ "error reading flash layout header\n");
+ return -EIO;
+ }
+
+ entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header);
+ flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size);
+ if (flt_entry == NULL) {
+ dev_warn(&adapter->pdev->dev, "error allocating memory\n");
+ return -EIO;
+ }
+
+ ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION +
+ sizeof(struct qlcnic_flt_header),
+ (u8 *)flt_entry, entry_size);
+ if (ret) {
+ dev_warn(&adapter->pdev->dev,
+ "error reading flash layout entries\n");
+ goto err_out;
+ }
+
+ while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) {
+ if (flt_entry[i].region == region)
+ break;
+ i++;
+ }
+ if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) {
+ dev_warn(&adapter->pdev->dev,
+ "region=%x not found in %d regions\n", region, i);
+ ret = -EIO;
+ goto err_out;
+ }
+ memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry));
+
+err_out:
+ vfree(flt_entry);
+ return ret;
+}
+
+int
+qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_flt_entry fw_entry;
+ u32 ver = -1, min_ver;
+ int ret;
+
+ if (adapter->ahw->revision_id == QLCNIC_P3P_C0)
+ ret = qlcnic_get_flt_entry(adapter, QLCNIC_C0_FW_IMAGE_REGION,
+ &fw_entry);
+ else
+ ret = qlcnic_get_flt_entry(adapter, QLCNIC_B0_FW_IMAGE_REGION,
+ &fw_entry);
+
+ if (!ret)
+ /* 0-4:-signature, 4-8:-fw version */
+ qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4,
+ (int *)&ver);
+ else
+ qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET,
+ (int *)&ver);
+
+ ver = QLCNIC_DECODE_VERSION(ver);
+ min_ver = QLCNIC_MIN_FW_VERSION;
+
+ if (ver < min_ver) {
+ dev_err(&adapter->pdev->dev,
+ "firmware version %d.%d.%d unsupported."
+ "Min supported version %d.%d.%d\n",
+ _major(ver), _minor(ver), _build(ver),
+ _major(min_ver), _minor(min_ver), _build(min_ver));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_has_mn(struct qlcnic_adapter *adapter)
+{
+ u32 capability;
+ capability = 0;
+
+ capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
+ if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
+ return 1;
+
+ return 0;
+}
+
+static
+struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section)
+{
+ u32 i;
+ struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+ __le32 entries = cpu_to_le32(directory->num_entries);
+
+ for (i = 0; i < entries; i++) {
+
+ __le32 offs = cpu_to_le32(directory->findex) +
+ (i * cpu_to_le32(directory->entry_size));
+ __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
+
+ if (tab_type == section)
+ return (struct uni_table_desc *) &unirom[offs];
+ }
+
+ return NULL;
+}
+
+#define FILEHEADER_SIZE (14 * 4)
+
+static int
+qlcnic_validate_header(struct qlcnic_adapter *adapter)
+{
+ const u8 *unirom = adapter->fw->data;
+ struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+ __le32 fw_file_size = adapter->fw->size;
+ __le32 entries;
+ __le32 entry_size;
+ __le32 tab_size;
+
+ if (fw_file_size < FILEHEADER_SIZE)
+ return -EINVAL;
+
+ entries = cpu_to_le32(directory->num_entries);
+ entry_size = cpu_to_le32(directory->entry_size);
+ tab_size = cpu_to_le32(directory->findex) + (entries * entry_size);
+
+ if (fw_file_size < tab_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ const u8 *unirom = adapter->fw->data;
+ int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ QLCNIC_UNI_BOOTLD_IDX_OFF));
+ __le32 offs;
+ __le32 tab_size;
+ __le32 data_size;
+
+ tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_BOOTLD);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx));
+ descr = (struct uni_data_desc *)&unirom[offs];
+
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+qlcnic_validate_fw(struct qlcnic_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ const u8 *unirom = adapter->fw->data;
+ int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ QLCNIC_UNI_FIRMWARE_IDX_OFF));
+ __le32 offs;
+ __le32 tab_size;
+ __le32 data_size;
+
+ tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_FW);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx));
+ descr = (struct uni_data_desc *)&unirom[offs];
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+qlcnic_validate_product_offs(struct qlcnic_adapter *adapter)
+{
+ struct uni_table_desc *ptab_descr;
+ const u8 *unirom = adapter->fw->data;
+ int mn_present = qlcnic_has_mn(adapter);
+ __le32 entries;
+ __le32 entry_size;
+ __le32 tab_size;
+ u32 i;
+
+ ptab_descr = qlcnic_get_table_desc(unirom,
+ QLCNIC_UNI_DIR_SECT_PRODUCT_TBL);
+ if (!ptab_descr)
+ return -EINVAL;
+
+ entries = cpu_to_le32(ptab_descr->num_entries);
+ entry_size = cpu_to_le32(ptab_descr->entry_size);
+ tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size);
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+nomn:
+ for (i = 0; i < entries; i++) {
+
+ __le32 flags, file_chiprev, offs;
+ u8 chiprev = adapter->ahw->revision_id;
+ u32 flagbit;
+
+ offs = cpu_to_le32(ptab_descr->findex) +
+ (i * cpu_to_le32(ptab_descr->entry_size));
+ flags = cpu_to_le32(*((int *)&unirom[offs] +
+ QLCNIC_UNI_FLAGS_OFF));
+ file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
+ QLCNIC_UNI_CHIP_REV_OFF));
+
+ flagbit = mn_present ? 1 : 2;
+
+ if ((chiprev == file_chiprev) &&
+ ((1ULL << flagbit) & flags)) {
+ adapter->file_prd_off = offs;
+ return 0;
+ }
+ }
+ if (mn_present) {
+ mn_present = 0;
+ goto nomn;
+ }
+ return -EINVAL;
+}
+
+static int
+qlcnic_validate_unified_romimage(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_validate_header(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: header validation failed\n");
+ return -EINVAL;
+ }
+
+ if (qlcnic_validate_product_offs(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: product validation failed\n");
+ return -EINVAL;
+ }
+
+ if (qlcnic_validate_bootld(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: bootld validation failed\n");
+ return -EINVAL;
+ }
+
+ if (qlcnic_validate_fw(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: firmware validation failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static
+struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter,
+ u32 section, u32 idx_offset)
+{
+ const u8 *unirom = adapter->fw->data;
+ int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ idx_offset));
+ struct uni_table_desc *tab_desc;
+ __le32 offs;
+
+ tab_desc = qlcnic_get_table_desc(unirom, section);
+
+ if (tab_desc == NULL)
+ return NULL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * idx);
+
+ return (struct uni_data_desc *)&unirom[offs];
+}
+
+static u8 *
+qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter)
+{
+ u32 offs = QLCNIC_BOOTLD_START;
+
+ if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
+ QLCNIC_UNI_DIR_SECT_BOOTLD,
+ QLCNIC_UNI_BOOTLD_IDX_OFF))->findex);
+
+ return (u8 *)&adapter->fw->data[offs];
+}
+
+static u8 *
+qlcnic_get_fw_offs(struct qlcnic_adapter *adapter)
+{
+ u32 offs = QLCNIC_IMAGE_START;
+
+ if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
+ QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF))->findex);
+
+ return (u8 *)&adapter->fw->data[offs];
+}
+
+static __le32
+qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ return cpu_to_le32((qlcnic_get_data_desc(adapter,
+ QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF))->size);
+ else
+ return cpu_to_le32(
+ *(u32 *)&adapter->fw->data[QLCNIC_FW_SIZE_OFFSET]);
+}
+
+static __le32
+qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
+{
+ struct uni_data_desc *fw_data_desc;
+ const struct firmware *fw = adapter->fw;
+ __le32 major, minor, sub;
+ const u8 *ver_str;
+ int i, ret;
+
+ if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
+ return cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]);
+
+ fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF);
+ ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
+ cpu_to_le32(fw_data_desc->size) - 17;
+
+ for (i = 0; i < 12; i++) {
+ if (!strncmp(&ver_str[i], "REV=", 4)) {
+ ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
+ &major, &minor, &sub);
+ if (ret != 3)
+ return 0;
+ else
+ return major + (minor << 8) + (sub << 16);
+ }
+ }
+
+ return 0;
+}
+
+static __le32
+qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
+{
+ const struct firmware *fw = adapter->fw;
+ __le32 bios_ver, prd_off = adapter->file_prd_off;
+
+ if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
+ return cpu_to_le32(
+ *(u32 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]);
+
+ bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
+ + QLCNIC_UNI_BIOS_VERSION_OFF));
+
+ return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
+}
+
+static void qlcnic_rom_lock_recovery(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_pcie_sem_lock(adapter, 2, QLCNIC_ROM_LOCK_ID))
+ dev_info(&adapter->pdev->dev, "Resetting rom_lock\n");
+
+ qlcnic_pcie_sem_unlock(adapter, 2);
+}
+
+static int
+qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter)
+{
+ u32 heartbeat, ret = -EIO;
+ int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
+
+ adapter->heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+
+ do {
+ msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
+ heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ if (heartbeat != adapter->heartbeat) {
+ ret = QLCNIC_RCODE_SUCCESS;
+ break;
+ }
+ } while (--retries);
+
+ return ret;
+}
+
+int
+qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
+{
+ if ((adapter->flags & QLCNIC_FW_HANG) ||
+ qlcnic_check_fw_hearbeat(adapter)) {
+ qlcnic_rom_lock_recovery(adapter);
+ return 1;
+ }
+
+ if (adapter->need_fw_reset)
+ return 1;
+
+ if (adapter->fw)
+ return 1;
+
+ return 0;
+}
+
+static const char *fw_name[] = {
+ QLCNIC_UNIFIED_ROMIMAGE_NAME,
+ QLCNIC_FLASH_ROMIMAGE_NAME,
+};
+
+int
+qlcnic_load_firmware(struct qlcnic_adapter *adapter)
+{
+ u64 *ptr64;
+ u32 i, flashaddr, size;
+ const struct firmware *fw = adapter->fw;
+ struct pci_dev *pdev = adapter->pdev;
+
+ dev_info(&pdev->dev, "loading firmware from %s\n",
+ fw_name[adapter->fw_type]);
+
+ if (fw) {
+ __le64 data;
+
+ size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
+
+ ptr64 = (u64 *)qlcnic_get_bootld_offs(adapter);
+ flashaddr = QLCNIC_BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+
+ size = (__force u32)qlcnic_get_fw_size(adapter) / 8;
+
+ ptr64 = (u64 *)qlcnic_get_fw_offs(adapter);
+ flashaddr = QLCNIC_IMAGE_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (qlcnic_pci_mem_write_2M(adapter,
+ flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+
+ size = (__force u32)qlcnic_get_fw_size(adapter) % 8;
+ if (size) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (qlcnic_pci_mem_write_2M(adapter,
+ flashaddr, data))
+ return -EIO;
+ }
+
+ } else {
+ u64 data;
+ u32 hi, lo;
+ int ret;
+ struct qlcnic_flt_entry bootld_entry;
+
+ ret = qlcnic_get_flt_entry(adapter, QLCNIC_BOOTLD_REGION,
+ &bootld_entry);
+ if (!ret) {
+ size = bootld_entry.size / 8;
+ flashaddr = bootld_entry.start_addr;
+ } else {
+ size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
+ flashaddr = QLCNIC_BOOTLD_START;
+ dev_info(&pdev->dev,
+ "using legacy method to get flash fw region");
+ }
+
+ for (i = 0; i < size; i++) {
+ if (qlcnic_rom_fast_read(adapter,
+ flashaddr, (int *)&lo) != 0)
+ return -EIO;
+ if (qlcnic_rom_fast_read(adapter,
+ flashaddr + 4, (int *)&hi) != 0)
+ return -EIO;
+
+ data = (((u64)hi << 32) | lo);
+
+ if (qlcnic_pci_mem_write_2M(adapter,
+ flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+ }
+ msleep(1);
+
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020);
+ QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e);
+ return 0;
+}
+
+static int
+qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
+{
+ __le32 val;
+ u32 ver, bios, min_size;
+ struct pci_dev *pdev = adapter->pdev;
+ const struct firmware *fw = adapter->fw;
+ u8 fw_type = adapter->fw_type;
+
+ if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) {
+ if (qlcnic_validate_unified_romimage(adapter))
+ return -EINVAL;
+
+ min_size = QLCNIC_UNI_FW_MIN_SIZE;
+ } else {
+ val = cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
+ if ((__force u32)val != QLCNIC_BDINFO_MAGIC)
+ return -EINVAL;
+
+ min_size = QLCNIC_FW_MIN_SIZE;
+ }
+
+ if (fw->size < min_size)
+ return -EINVAL;
+
+ val = qlcnic_get_fw_version(adapter);
+ ver = QLCNIC_DECODE_VERSION(val);
+
+ if (ver < QLCNIC_MIN_FW_VERSION) {
+ dev_err(&pdev->dev,
+ "%s: firmware version %d.%d.%d unsupported\n",
+ fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
+ return -EINVAL;
+ }
+
+ val = qlcnic_get_bios_version(adapter);
+ qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios);
+ if ((__force u32)val != bios) {
+ dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
+ fw_name[fw_type]);
+ return -EINVAL;
+ }
+
+ QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
+ return 0;
+}
+
+static void
+qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter)
+{
+ u8 fw_type;
+
+ switch (adapter->fw_type) {
+ case QLCNIC_UNKNOWN_ROMIMAGE:
+ fw_type = QLCNIC_UNIFIED_ROMIMAGE;
+ break;
+
+ case QLCNIC_UNIFIED_ROMIMAGE:
+ default:
+ fw_type = QLCNIC_FLASH_ROMIMAGE;
+ break;
+ }
+
+ adapter->fw_type = fw_type;
+}
+
+
+
+void qlcnic_request_firmware(struct qlcnic_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int rc;
+
+ adapter->fw_type = QLCNIC_UNKNOWN_ROMIMAGE;
+
+next:
+ qlcnic_get_next_fwtype(adapter);
+
+ if (adapter->fw_type == QLCNIC_FLASH_ROMIMAGE) {
+ adapter->fw = NULL;
+ } else {
+ rc = request_firmware(&adapter->fw,
+ fw_name[adapter->fw_type], &pdev->dev);
+ if (rc != 0)
+ goto next;
+
+ rc = qlcnic_validate_firmware(adapter);
+ if (rc != 0) {
+ release_firmware(adapter->fw);
+ msleep(1);
+ goto next;
+ }
+ }
+}
+
+
+void
+qlcnic_release_firmware(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fw)
+ release_firmware(adapter->fw);
+ adapter->fw = NULL;
+}
+
+static void
+qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
+ struct qlcnic_fw_msg *msg)
+{
+ u32 cable_OUI;
+ u16 cable_len;
+ u16 link_speed;
+ u8 link_status, module, duplex, autoneg;
+ u8 lb_status = 0;
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->has_link_events = 1;
+
+ cable_OUI = msg->body[1] & 0xffffffff;
+ cable_len = (msg->body[1] >> 32) & 0xffff;
+ link_speed = (msg->body[1] >> 48) & 0xffff;
+
+ link_status = msg->body[2] & 0xff;
+ duplex = (msg->body[2] >> 16) & 0xff;
+ autoneg = (msg->body[2] >> 24) & 0xff;
+ lb_status = (msg->body[2] >> 32) & 0x3;
+
+ module = (msg->body[2] >> 8) & 0xff;
+ if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
+ dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, "
+ "length %d\n", cable_OUI, cable_len);
+ else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
+ dev_info(&netdev->dev, "unsupported cable length %d\n",
+ cable_len);
+
+ if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
+ lb_status == QLCNIC_ELB_MODE))
+ adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
+
+ qlcnic_advert_link_change(adapter, link_status);
+
+ if (duplex == LINKEVENT_FULL_DUPLEX)
+ adapter->link_duplex = DUPLEX_FULL;
+ else
+ adapter->link_duplex = DUPLEX_HALF;
+
+ adapter->module_type = module;
+ adapter->link_autoneg = autoneg;
+ adapter->link_speed = link_speed;
+}
+
+static void
+qlcnic_handle_fw_message(int desc_cnt, int index,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_fw_msg msg;
+ struct status_desc *desc;
+ struct qlcnic_adapter *adapter;
+ struct device *dev;
+ int i = 0, opcode, ret;
+
+ while (desc_cnt > 0 && i < 8) {
+ desc = &sds_ring->desc_head[index];
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
+
+ index = get_next_index(index, sds_ring->num_desc);
+ desc_cnt--;
+ }
+
+ adapter = sds_ring->adapter;
+ dev = &adapter->pdev->dev;
+ opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
+
+ switch (opcode) {
+ case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
+ qlcnic_handle_linkevent(adapter, &msg);
+ break;
+ case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
+ ret = (u32)(msg.body[1]);
+ switch (ret) {
+ case 0:
+ adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
+ break;
+ case 1:
+ dev_info(dev, "loopback already in progress\n");
+ adapter->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
+ break;
+ case 2:
+ dev_info(dev, "loopback cable is not connected\n");
+ adapter->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
+ break;
+ default:
+ dev_info(dev, "loopback configure request failed,"
+ " ret %x\n", ret);
+ adapter->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring,
+ struct qlcnic_rx_buffer *buffer)
+{
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct pci_dev *pdev = adapter->pdev;
+
+ skb = dev_alloc_skb(rds_ring->skb_size);
+ if (!skb) {
+ adapter->stats.skb_alloc_failure++;
+ return -ENOMEM;
+ }
+
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ dma = pci_map_single(pdev, skb->data,
+ rds_ring->dma_size, PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(pdev, dma)) {
+ adapter->stats.rx_dma_map_error++;
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+
+ buffer->skb = skb;
+ buffer->dma = dma;
+
+ return 0;
+}
+
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum)
+{
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ if (unlikely(buffer->skb == NULL)) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
+ PCI_DMA_FROMDEVICE);
+
+ skb = buffer->skb;
+
+ if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
+ (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
+ adapter->stats.csummed++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb_checksum_none_assert(skb);
+ }
+
+ skb->dev = adapter->netdev;
+
+ buffer->skb = NULL;
+
+ return skb;
+}
+
+static inline int
+qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb,
+ u16 *vlan_tag)
+{
+ struct ethhdr *eth_hdr;
+
+ if (!__vlan_get_tag(skb, vlan_tag)) {
+ eth_hdr = (struct ethhdr *) skb->data;
+ memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ }
+ if (!adapter->pvid)
+ return 0;
+
+ if (*vlan_tag == adapter->pvid) {
+ /* Outer vlan tag. Packet should follow non-vlan path */
+ *vlan_tag = 0xffff;
+ return 0;
+ }
+ if (adapter->flags & QLCNIC_TAGGING_ENABLED)
+ return 0;
+
+ return -EINVAL;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_rcv(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, pkt_offset;
+ u16 vid = 0xffff;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_sts_refhandle(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ length = qlcnic_get_sts_totallength(sts_data0);
+ cksum = qlcnic_get_sts_status(sts_data0);
+ pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return buffer;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
+
+ if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, vid);
+
+ napi_gro_receive(&sds_ring->napi, skb);
+
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+#define QLC_TCP_HDR_SIZE 20
+#define QLC_TCP_TS_OPTION_SIZE 12
+#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_lro(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0, u64 sts_data1)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct iphdr *iph;
+ struct tcphdr *th;
+ bool push, timestamp;
+ int l2_hdr_offset, l4_hdr_offset;
+ int index;
+ u16 lro_length, length, data_offset;
+ u32 seq_number;
+ u16 vid = 0xffff;
+
+ if (unlikely(ring > adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_lro_sts_refhandle(sts_data0);
+ if (unlikely(index > rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
+ lro_length = qlcnic_get_lro_sts_length(sts_data0);
+ l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
+ l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
+ push = qlcnic_get_lro_sts_push_flag(sts_data0);
+ seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return buffer;
+
+ if (timestamp)
+ data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
+ else
+ data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
+
+ skb_put(skb, lro_length + data_offset);
+
+ skb_pull(skb, l2_hdr_offset);
+
+ if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ iph = (struct iphdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+
+ length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ iph->tot_len = htons(length);
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ th->psh = push;
+ th->seq = htonl(seq_number);
+
+ length = skb->len;
+
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, vid);
+ netif_receive_skb(skb);
+
+ adapter->stats.lro_pkts++;
+ adapter->stats.lrobytes += length;
+
+ return buffer;
+}
+
+int
+qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct list_head *cur;
+ struct status_desc *desc;
+ struct qlcnic_rx_buffer *rxbuf;
+ u64 sts_data0, sts_data1;
+
+ int count = 0;
+ int opcode, ring, desc_cnt;
+ u32 consumer = sds_ring->consumer;
+
+ while (count < max) {
+ desc = &sds_ring->desc_head[consumer];
+ sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+ if (!(sts_data0 & STATUS_OWNER_HOST))
+ break;
+
+ desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+ opcode = qlcnic_get_sts_opcode(sts_data0);
+
+ switch (opcode) {
+ case QLCNIC_RXPKT_DESC:
+ case QLCNIC_OLD_RXPKT_DESC:
+ case QLCNIC_SYN_OFFLOAD:
+ ring = qlcnic_get_sts_type(sts_data0);
+ rxbuf = qlcnic_process_rcv(adapter, sds_ring,
+ ring, sts_data0);
+ break;
+ case QLCNIC_LRO_DESC:
+ ring = qlcnic_get_lro_sts_type(sts_data0);
+ sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
+ rxbuf = qlcnic_process_lro(adapter, sds_ring,
+ ring, sts_data0, sts_data1);
+ break;
+ case QLCNIC_RESPONSE_DESC:
+ qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
+ default:
+ goto skip;
+ }
+
+ WARN_ON(desc_cnt > 1);
+
+ if (likely(rxbuf))
+ list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+ else
+ adapter->stats.null_rxbuf++;
+
+skip:
+ for (; desc_cnt > 0; desc_cnt--) {
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] =
+ cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ }
+ count++;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ struct qlcnic_host_rds_ring *rds_ring =
+ &adapter->recv_ctx->rds_rings[ring];
+
+ if (!list_empty(&sds_ring->free_list[ring])) {
+ list_for_each(cur, &sds_ring->free_list[ring]) {
+ rxbuf = list_entry(cur,
+ struct qlcnic_rx_buffer, list);
+ qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
+ }
+ spin_lock(&rds_ring->lock);
+ list_splice_tail_init(&sds_ring->free_list[ring],
+ &rds_ring->free_list);
+ spin_unlock(&rds_ring->lock);
+ }
+
+ qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
+ }
+
+ if (count) {
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+ }
+
+ return count;
+}
+
+void
+qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring)
+{
+ struct rcv_desc *pdesc;
+ struct qlcnic_rx_buffer *buffer;
+ int count = 0;
+ u32 producer;
+ struct list_head *head;
+
+ producer = rds_ring->producer;
+
+ head = &rds_ring->free_list;
+ while (!list_empty(head)) {
+
+ buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+ pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+
+ if (count) {
+ rds_ring->producer = producer;
+ writel((producer-1) & (rds_ring->num_desc-1),
+ rds_ring->crb_rcv_producer);
+ }
+}
+
+static void
+qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring)
+{
+ struct rcv_desc *pdesc;
+ struct qlcnic_rx_buffer *buffer;
+ int count = 0;
+ uint32_t producer;
+ struct list_head *head;
+
+ if (!spin_trylock(&rds_ring->lock))
+ return;
+
+ producer = rds_ring->producer;
+
+ head = &rds_ring->free_list;
+ while (!list_empty(head)) {
+
+ buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+
+ if (count) {
+ rds_ring->producer = producer;
+ writel((producer - 1) & (rds_ring->num_desc - 1),
+ rds_ring->crb_rcv_producer);
+ }
+ spin_unlock(&rds_ring->lock);
+}
+
+static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
+{
+ int i;
+ unsigned char *data = skb->data;
+
+ printk(KERN_INFO "\n");
+ for (i = 0; i < skb->len; i++) {
+ QLCDB(adapter, DRV, "%02x ", data[i]);
+ if ((i & 0x0f) == 8)
+ printk(KERN_INFO "\n");
+ }
+}
+
+void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0)
+{
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, pkt_offset;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_sts_refhandle(sts_data0);
+ length = qlcnic_get_sts_totallength(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return;
+
+ cksum = qlcnic_get_sts_status(sts_data0);
+ pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
+
+ if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
+ adapter->diag_cnt++;
+ else
+ dump_skb(skb, adapter);
+
+ dev_kfree_skb_any(skb);
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return;
+}
+
+void
+qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct status_desc *desc;
+ u64 sts_data0;
+ int ring, opcode, desc_cnt;
+
+ u32 consumer = sds_ring->consumer;
+
+ desc = &sds_ring->desc_head[consumer];
+ sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+ if (!(sts_data0 & STATUS_OWNER_HOST))
+ return;
+
+ desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+ opcode = qlcnic_get_sts_opcode(sts_data0);
+ switch (opcode) {
+ case QLCNIC_RESPONSE_DESC:
+ qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
+ break;
+ default:
+ ring = qlcnic_get_sts_type(sts_data0);
+ qlcnic_process_rcv_diag(adapter, sds_ring, ring, sts_data0);
+ break;
+ }
+
+ for (; desc_cnt > 0; desc_cnt--) {
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ }
+
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+}
+
+void
+qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2,
+ u8 alt_mac, u8 *mac)
+{
+ u32 mac_low, mac_high;
+ int i;
+
+ mac_low = off1;
+ mac_high = off2;
+
+ if (alt_mac) {
+ mac_low |= (mac_low >> 16) | (mac_high << 16);
+ mac_high >>= 16;
+ }
+
+ for (i = 0; i < 2; i++)
+ mac[i] = (u8)(mac_high >> ((1 - i) * 8));
+ for (i = 2; i < 6; i++)
+ mac[i] = (u8)(mac_low >> ((5 - i) * 8));
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
new file mode 100644
index 000000000000..eac19e7d2761
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -0,0 +1,4509 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2010 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+
+#include "qlcnic.h"
+
+#include <linux/swab.h>
+#include <linux/dma-mapping.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inetdevice.h>
+#include <linux/sysfs.h>
+#include <linux/aer.h>
+#include <linux/log2.h>
+
+MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
+MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
+
+char qlcnic_driver_name[] = "qlcnic";
+static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
+ "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
+
+static struct workqueue_struct *qlcnic_wq;
+static int qlcnic_mac_learn;
+module_param(qlcnic_mac_learn, int, 0444);
+MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
+
+static int use_msi = 1;
+module_param(use_msi, int, 0444);
+MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
+
+static int use_msi_x = 1;
+module_param(use_msi_x, int, 0444);
+MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
+
+static int auto_fw_reset = 1;
+module_param(auto_fw_reset, int, 0644);
+MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
+
+static int load_fw_file;
+module_param(load_fw_file, int, 0444);
+MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
+
+static int qlcnic_config_npars;
+module_param(qlcnic_config_npars, int, 0444);
+MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
+
+static int __devinit qlcnic_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+static void __devexit qlcnic_remove(struct pci_dev *pdev);
+static int qlcnic_open(struct net_device *netdev);
+static int qlcnic_close(struct net_device *netdev);
+static void qlcnic_tx_timeout(struct net_device *netdev);
+static void qlcnic_attach_work(struct work_struct *work);
+static void qlcnic_fwinit_work(struct work_struct *work);
+static void qlcnic_fw_poll_work(struct work_struct *work);
+static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
+ work_func_t func, int delay);
+static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
+static int qlcnic_poll(struct napi_struct *napi, int budget);
+static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void qlcnic_poll_controller(struct net_device *netdev);
+#endif
+
+static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
+static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
+static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
+static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
+
+static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
+static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
+static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
+
+static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
+static irqreturn_t qlcnic_intr(int irq, void *data);
+static irqreturn_t qlcnic_msi_intr(int irq, void *data);
+static irqreturn_t qlcnic_msix_intr(int irq, void *data);
+
+static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
+static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
+static int qlcnic_start_firmware(struct qlcnic_adapter *);
+
+static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
+static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
+static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
+static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
+static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
+static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
+static void qlcnic_vlan_rx_add(struct net_device *, u16);
+static void qlcnic_vlan_rx_del(struct net_device *, u16);
+
+/* PCI Device ID Table */
+#define ENTRY(device) \
+ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
+ .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
+
+#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
+
+static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
+
+
+inline void
+qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(tx_ring->producer, tx_ring->crb_cmd_producer);
+}
+
+static const u32 msi_tgt_status[8] = {
+ ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
+ ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
+ ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
+ ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
+};
+
+static const
+struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
+
+static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(0, sds_ring->crb_intr_mask);
+}
+
+static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ writel(0x1, sds_ring->crb_intr_mask);
+
+ if (!QLCNIC_IS_MSI_FAMILY(adapter))
+ writel(0xfbff, adapter->tgt_mask_reg);
+}
+
+static int
+qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
+{
+ int size = sizeof(struct qlcnic_host_sds_ring) * count;
+
+ recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
+
+ return recv_ctx->sds_rings == NULL;
+}
+
+static void
+qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
+{
+ if (recv_ctx->sds_rings != NULL)
+ kfree(recv_ctx->sds_rings);
+
+ recv_ctx->sds_rings = NULL;
+}
+
+static int
+qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+ return -ENOMEM;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ if (ring == adapter->max_sds_rings - 1)
+ netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
+ QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
+ else
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
+ }
+
+ return 0;
+}
+
+static void
+qlcnic_napi_del(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_del(&sds_ring->napi);
+ }
+
+ qlcnic_free_sds_rings(adapter->recv_ctx);
+}
+
+static void
+qlcnic_napi_enable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ napi_enable(&sds_ring->napi);
+ qlcnic_enable_int(sds_ring);
+ }
+}
+
+static void
+qlcnic_napi_disable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ qlcnic_disable_int(sds_ring);
+ napi_synchronize(&sds_ring->napi);
+ napi_disable(&sds_ring->napi);
+ }
+}
+
+static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
+{
+ memset(&adapter->stats, 0, sizeof(adapter->stats));
+}
+
+static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
+{
+ u32 control;
+ int pos;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_dword(pdev, pos, &control);
+ if (enable)
+ control |= PCI_MSIX_FLAGS_ENABLE;
+ else
+ control = 0;
+ pci_write_config_dword(pdev, pos, control);
+ }
+}
+
+static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ adapter->msix_entries[i].entry = i;
+}
+
+static int
+qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
+{
+ u8 mac_addr[ETH_ALEN];
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
+ return -EIO;
+
+ memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
+ memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
+ memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
+
+ /* set station address */
+
+ if (!is_valid_ether_addr(netdev->perm_addr))
+ dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
+ netdev->dev_addr);
+
+ return 0;
+}
+
+static int qlcnic_set_mac(struct net_device *netdev, void *p)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
+ return -EOPNOTSUPP;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ netif_device_detach(netdev);
+ qlcnic_napi_disable(adapter);
+ }
+
+ memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ qlcnic_set_multi(adapter->netdev);
+
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ netif_device_attach(netdev);
+ qlcnic_napi_enable(adapter);
+ }
+ return 0;
+}
+
+static const struct net_device_ops qlcnic_netdev_ops = {
+ .ndo_open = qlcnic_open,
+ .ndo_stop = qlcnic_close,
+ .ndo_start_xmit = qlcnic_xmit_frame,
+ .ndo_get_stats = qlcnic_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = qlcnic_set_multi,
+ .ndo_set_mac_address = qlcnic_set_mac,
+ .ndo_change_mtu = qlcnic_change_mtu,
+ .ndo_fix_features = qlcnic_fix_features,
+ .ndo_set_features = qlcnic_set_features,
+ .ndo_tx_timeout = qlcnic_tx_timeout,
+ .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
+ .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = qlcnic_poll_controller,
+#endif
+};
+
+static struct qlcnic_nic_template qlcnic_ops = {
+ .config_bridged_mode = qlcnic_config_bridged_mode,
+ .config_led = qlcnic_config_led,
+ .start_firmware = qlcnic_start_firmware
+};
+
+static struct qlcnic_nic_template qlcnic_vf_ops = {
+ .config_bridged_mode = qlcnicvf_config_bridged_mode,
+ .config_led = qlcnicvf_config_led,
+ .start_firmware = qlcnicvf_start_firmware
+};
+
+static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int err = -1;
+
+ adapter->max_sds_rings = 1;
+ adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
+ qlcnic_set_msix_bit(pdev, 0);
+
+ if (adapter->msix_supported) {
+ enable_msix:
+ qlcnic_init_msix_entries(adapter, num_msix);
+ err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
+ if (err == 0) {
+ adapter->flags |= QLCNIC_MSIX_ENABLED;
+ qlcnic_set_msix_bit(pdev, 1);
+
+ adapter->max_sds_rings = num_msix;
+
+ dev_info(&pdev->dev, "using msi-x interrupts\n");
+ return err;
+ }
+ if (err > 0) {
+ num_msix = rounddown_pow_of_two(err);
+ if (num_msix)
+ goto enable_msix;
+ }
+ }
+ return err;
+}
+
+
+static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
+{
+ const struct qlcnic_legacy_intr_set *legacy_intrp;
+ struct pci_dev *pdev = adapter->pdev;
+
+ if (use_msi && !pci_enable_msi(pdev)) {
+ adapter->flags |= QLCNIC_MSI_ENABLED;
+ adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
+ msi_tgt_status[adapter->ahw->pci_func]);
+ dev_info(&pdev->dev, "using msi interrupts\n");
+ adapter->msix_entries[0].vector = pdev->irq;
+ return;
+ }
+
+ legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
+
+ adapter->int_vec_bit = legacy_intrp->int_vec_bit;
+ adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
+ legacy_intrp->tgt_status_reg);
+ adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
+ legacy_intrp->tgt_mask_reg);
+ adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
+
+ adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
+ ISR_INT_STATE_REG);
+ dev_info(&pdev->dev, "using legacy interrupts\n");
+ adapter->msix_entries[0].vector = pdev->irq;
+}
+
+static void
+qlcnic_setup_intr(struct qlcnic_adapter *adapter)
+{
+ int num_msix;
+
+ if (adapter->msix_supported) {
+ num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
+ QLCNIC_DEF_NUM_STS_DESC_RINGS));
+ } else
+ num_msix = 1;
+
+ if (!qlcnic_enable_msix(adapter, num_msix))
+ return;
+
+ qlcnic_enable_msi_legacy(adapter);
+}
+
+static void
+qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
+{
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ pci_disable_msix(adapter->pdev);
+ if (adapter->flags & QLCNIC_MSI_ENABLED)
+ pci_disable_msi(adapter->pdev);
+}
+
+static void
+qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw->pci_base0 != NULL)
+ iounmap(adapter->ahw->pci_base0);
+}
+
+static int
+qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_pci_info *pci_info;
+ int i, ret = 0;
+ u8 pfn;
+
+ pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
+ return -ENOMEM;
+
+ adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
+ QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
+ if (!adapter->npars) {
+ ret = -ENOMEM;
+ goto err_pci_info;
+ }
+
+ adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
+ QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
+ if (!adapter->eswitch) {
+ ret = -ENOMEM;
+ goto err_npars;
+ }
+
+ ret = qlcnic_get_pci_info(adapter, pci_info);
+ if (ret)
+ goto err_eswitch;
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ pfn = pci_info[i].id;
+ if (pfn > QLCNIC_MAX_PCI_FUNC) {
+ ret = QL_STATUS_INVALID_PARAM;
+ goto err_eswitch;
+ }
+ adapter->npars[pfn].active = (u8)pci_info[i].active;
+ adapter->npars[pfn].type = (u8)pci_info[i].type;
+ adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
+ adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
+ adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
+ }
+
+ for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
+ adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+
+ kfree(pci_info);
+ return 0;
+
+err_eswitch:
+ kfree(adapter->eswitch);
+ adapter->eswitch = NULL;
+err_npars:
+ kfree(adapter->npars);
+ adapter->npars = NULL;
+err_pci_info:
+ kfree(pci_info);
+
+ return ret;
+}
+
+static int
+qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
+{
+ u8 id;
+ u32 ref_count;
+ int i, ret = 1;
+ u32 data = QLCNIC_MGMT_FUNC;
+ void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
+
+ /* If other drivers are not in use set their privilege level */
+ ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+ ret = qlcnic_api_lock(adapter);
+ if (ret)
+ goto err_lock;
+
+ if (qlcnic_config_npars) {
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ id = i;
+ if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
+ id == adapter->ahw->pci_func)
+ continue;
+ data |= (qlcnic_config_npars &
+ QLC_DEV_SET_DRV(0xf, id));
+ }
+ } else {
+ data = readl(priv_op);
+ data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) |
+ (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
+ adapter->ahw->pci_func));
+ }
+ writel(data, priv_op);
+ qlcnic_api_unlock(adapter);
+err_lock:
+ return ret;
+}
+
+static void
+qlcnic_check_vf(struct qlcnic_adapter *adapter)
+{
+ void __iomem *msix_base_addr;
+ void __iomem *priv_op;
+ u32 func;
+ u32 msix_base;
+ u32 op_mode, priv_level;
+
+ /* Determine FW API version */
+ adapter->fw_hal_version = readl(adapter->ahw->pci_base0 +
+ QLCNIC_FW_API);
+
+ /* Find PCI function number */
+ pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
+ msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
+ msix_base = readl(msix_base_addr);
+ func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
+ adapter->ahw->pci_func = func;
+
+ /* Determine function privilege level */
+ priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
+ op_mode = readl(priv_op);
+ if (op_mode == QLC_DEV_DRV_DEFAULT)
+ priv_level = QLCNIC_MGMT_FUNC;
+ else
+ priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
+
+ if (priv_level == QLCNIC_NON_PRIV_FUNC) {
+ adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d Non Privileged function\n",
+ adapter->fw_hal_version);
+ adapter->nic_ops = &qlcnic_vf_ops;
+ } else
+ adapter->nic_ops = &qlcnic_ops;
+}
+
+static int
+qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
+{
+ void __iomem *mem_ptr0 = NULL;
+ resource_size_t mem_base;
+ unsigned long mem_len, pci_len0 = 0;
+
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* remap phys address */
+ mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
+ mem_len = pci_resource_len(pdev, 0);
+
+ if (mem_len == QLCNIC_PCI_2MB_SIZE) {
+
+ mem_ptr0 = pci_ioremap_bar(pdev, 0);
+ if (mem_ptr0 == NULL) {
+ dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+ return -EIO;
+ }
+ pci_len0 = mem_len;
+ } else {
+ return -EIO;
+ }
+
+ dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
+
+ adapter->ahw->pci_base0 = mem_ptr0;
+ adapter->ahw->pci_len0 = pci_len0;
+
+ qlcnic_check_vf(adapter);
+
+ adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(
+ adapter->ahw->pci_func)));
+
+ return 0;
+}
+
+static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int i, found = 0;
+
+ for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
+ if (qlcnic_boards[i].vendor == pdev->vendor &&
+ qlcnic_boards[i].device == pdev->device &&
+ qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
+ qlcnic_boards[i].sub_device == pdev->subsystem_device) {
+ sprintf(name, "%pM: %s" ,
+ adapter->mac_addr,
+ qlcnic_boards[i].short_name);
+ found = 1;
+ break;
+ }
+
+ }
+
+ if (!found)
+ sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
+}
+
+static void
+qlcnic_check_options(struct qlcnic_adapter *adapter)
+{
+ u32 fw_major, fw_minor, fw_build, prev_fw_version;
+ struct pci_dev *pdev = adapter->pdev;
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+
+ prev_fw_version = adapter->fw_version;
+
+ fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+
+ adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
+
+ if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC) {
+ if (fw_dump->tmpl_hdr == NULL ||
+ adapter->fw_version > prev_fw_version) {
+ if (fw_dump->tmpl_hdr)
+ vfree(fw_dump->tmpl_hdr);
+ if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
+ dev_info(&pdev->dev,
+ "Supports FW dump capability\n");
+ }
+ }
+
+ dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
+ fw_major, fw_minor, fw_build);
+ if (adapter->ahw->port_type == QLCNIC_XGBE) {
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
+ } else {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+ }
+
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
+ } else if (adapter->ahw->port_type == QLCNIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
+ }
+
+ adapter->msix_supported = !!use_msi_x;
+
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+static int
+qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
+{
+ int err;
+ struct qlcnic_info nic_info;
+
+ err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
+ if (err)
+ return err;
+
+ adapter->physical_port = (u8)nic_info.phys_port;
+ adapter->switch_mode = nic_info.switch_mode;
+ adapter->max_tx_ques = nic_info.max_tx_ques;
+ adapter->max_rx_ques = nic_info.max_rx_ques;
+ adapter->capabilities = nic_info.capabilities;
+ adapter->max_mac_filters = nic_info.max_mac_filters;
+ adapter->max_mtu = nic_info.max_mtu;
+
+ if (adapter->capabilities & BIT_6)
+ adapter->flags |= QLCNIC_ESWITCH_ENABLED;
+ else
+ adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+
+ return err;
+}
+
+static void
+qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ if (esw_cfg->discard_tagged)
+ adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
+ else
+ adapter->flags |= QLCNIC_TAGGING_ENABLED;
+
+ if (esw_cfg->vlan_id)
+ adapter->pvid = esw_cfg->vlan_id;
+ else
+ adapter->pvid = 0;
+}
+
+static void
+qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ set_bit(vid, adapter->vlans);
+}
+
+static void
+qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
+ clear_bit(vid, adapter->vlans);
+}
+
+static void
+qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
+ QLCNIC_PROMISC_DISABLED);
+
+ if (esw_cfg->mac_anti_spoof)
+ adapter->flags |= QLCNIC_MACSPOOF;
+
+ if (!esw_cfg->mac_override)
+ adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
+
+ if (!esw_cfg->promisc_mode)
+ adapter->flags |= QLCNIC_PROMISC_DISABLED;
+
+ qlcnic_set_netdev_features(adapter, esw_cfg);
+}
+
+static int
+qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_esw_func_cfg esw_cfg;
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return 0;
+
+ esw_cfg.pci_func = adapter->ahw->pci_func;
+ if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
+ return -EIO;
+ qlcnic_set_vlan_config(adapter, &esw_cfg);
+ qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
+
+ return 0;
+}
+
+static void
+qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ struct net_device *netdev = adapter->netdev;
+ unsigned long features, vlan_features;
+
+ features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_GRO);
+ vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
+ features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ }
+
+ if (netdev->features & NETIF_F_LRO)
+ features |= NETIF_F_LRO;
+
+ if (esw_cfg->offload_flags & BIT_0) {
+ netdev->features |= features;
+ if (!(esw_cfg->offload_flags & BIT_1))
+ netdev->features &= ~NETIF_F_TSO;
+ if (!(esw_cfg->offload_flags & BIT_2))
+ netdev->features &= ~NETIF_F_TSO6;
+ } else {
+ netdev->features &= ~features;
+ }
+
+ netdev->vlan_features = (features & vlan_features);
+}
+
+static int
+qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
+{
+ void __iomem *priv_op;
+ u32 op_mode, priv_level;
+ int err = 0;
+
+ err = qlcnic_initialize_nic(adapter);
+ if (err)
+ return err;
+
+ if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
+ return 0;
+
+ priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
+ op_mode = readl(priv_op);
+ priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
+
+ if (op_mode == QLC_DEV_DRV_DEFAULT)
+ priv_level = QLCNIC_MGMT_FUNC;
+ else
+ priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
+
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+ if (priv_level == QLCNIC_MGMT_FUNC) {
+ adapter->op_mode = QLCNIC_MGMT_FUNC;
+ err = qlcnic_init_pci_info(adapter);
+ if (err)
+ return err;
+ /* Set privilege level for other functions */
+ qlcnic_set_function_modes(adapter);
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d, Management function\n",
+ adapter->fw_hal_version);
+ } else if (priv_level == QLCNIC_PRIV_FUNC) {
+ adapter->op_mode = QLCNIC_PRIV_FUNC;
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d, Privileged function\n",
+ adapter->fw_hal_version);
+ }
+ }
+
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ return err;
+}
+
+static int
+qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_esw_func_cfg esw_cfg;
+ struct qlcnic_npar_info *npar;
+ u8 i;
+
+ if (adapter->need_fw_reset)
+ return 0;
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+ continue;
+ memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
+ esw_cfg.pci_func = i;
+ esw_cfg.offload_flags = BIT_0;
+ esw_cfg.mac_override = BIT_0;
+ esw_cfg.promisc_mode = BIT_0;
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+ esw_cfg.offload_flags |= (BIT_1 | BIT_2);
+ if (qlcnic_config_switch_port(adapter, &esw_cfg))
+ return -EIO;
+ npar = &adapter->npars[i];
+ npar->pvid = esw_cfg.vlan_id;
+ npar->mac_override = esw_cfg.mac_override;
+ npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
+ npar->discard_tagged = esw_cfg.discard_tagged;
+ npar->promisc_mode = esw_cfg.promisc_mode;
+ npar->offload_flags = esw_cfg.offload_flags;
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_npar_info *npar, int pci_func)
+{
+ struct qlcnic_esw_func_cfg esw_cfg;
+ esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
+ esw_cfg.pci_func = pci_func;
+ esw_cfg.vlan_id = npar->pvid;
+ esw_cfg.mac_override = npar->mac_override;
+ esw_cfg.discard_tagged = npar->discard_tagged;
+ esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
+ esw_cfg.offload_flags = npar->offload_flags;
+ esw_cfg.promisc_mode = npar->promisc_mode;
+ if (qlcnic_config_switch_port(adapter, &esw_cfg))
+ return -EIO;
+
+ esw_cfg.op_mode = QLCNIC_ADD_VLAN;
+ if (qlcnic_config_switch_port(adapter, &esw_cfg))
+ return -EIO;
+
+ return 0;
+}
+
+static int
+qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
+{
+ int i, err;
+ struct qlcnic_npar_info *npar;
+ struct qlcnic_info nic_info;
+
+ if (!adapter->need_fw_reset)
+ return 0;
+
+ /* Set the NPAR config data after FW reset */
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ npar = &adapter->npars[i];
+ if (npar->type != QLCNIC_TYPE_NIC)
+ continue;
+ err = qlcnic_get_nic_info(adapter, &nic_info, i);
+ if (err)
+ return err;
+ nic_info.min_tx_bw = npar->min_bw;
+ nic_info.max_tx_bw = npar->max_bw;
+ err = qlcnic_set_nic_info(adapter, &nic_info);
+ if (err)
+ return err;
+
+ if (npar->enable_pm) {
+ err = qlcnic_config_port_mirroring(adapter,
+ npar->dest_npar, 1, i);
+ if (err)
+ return err;
+ }
+ err = qlcnic_reset_eswitch_config(adapter, npar, i);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
+{
+ u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
+ u32 npar_state;
+
+ if (adapter->op_mode == QLCNIC_MGMT_FUNC)
+ return 0;
+
+ npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
+ msleep(1000);
+ npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ }
+ if (!npar_opt_timeo) {
+ dev_err(&adapter->pdev->dev,
+ "Waiting for NPAR state to opertional timeout\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int
+qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ adapter->op_mode != QLCNIC_MGMT_FUNC)
+ return 0;
+
+ err = qlcnic_set_default_offload_settings(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_reset_npar_config(adapter);
+ if (err)
+ return err;
+
+ qlcnic_dev_set_npar_ready(adapter);
+
+ return err;
+}
+
+static int
+qlcnic_start_firmware(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ err = qlcnic_can_start_firmware(adapter);
+ if (err < 0)
+ return err;
+ else if (!err)
+ goto check_fw_status;
+
+ if (load_fw_file)
+ qlcnic_request_firmware(adapter);
+ else {
+ err = qlcnic_check_flash_fw_ver(adapter);
+ if (err)
+ goto err_out;
+
+ adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
+ }
+
+ err = qlcnic_need_fw_reset(adapter);
+ if (err == 0)
+ goto check_fw_status;
+
+ err = qlcnic_pinit_from_rom(adapter);
+ if (err)
+ goto err_out;
+
+ err = qlcnic_load_firmware(adapter);
+ if (err)
+ goto err_out;
+
+ qlcnic_release_firmware(adapter);
+ QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
+
+check_fw_status:
+ err = qlcnic_check_fw_status(adapter);
+ if (err)
+ goto err_out;
+
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
+ qlcnic_idc_debug_info(adapter, 1);
+
+ err = qlcnic_check_eswitch_mode(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failed for eswitch\n");
+ goto err_out;
+ }
+ err = qlcnic_set_mgmt_operations(adapter);
+ if (err)
+ goto err_out;
+
+ qlcnic_check_options(adapter);
+ adapter->need_fw_reset = 0;
+
+ qlcnic_release_firmware(adapter);
+ return 0;
+
+err_out:
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
+ dev_err(&adapter->pdev->dev, "Device state set to failed\n");
+
+ qlcnic_release_firmware(adapter);
+ return err;
+}
+
+static int
+qlcnic_request_irq(struct qlcnic_adapter *adapter)
+{
+ irq_handler_t handler;
+ struct qlcnic_host_sds_ring *sds_ring;
+ int err, ring;
+
+ unsigned long flags = 0;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
+ handler = qlcnic_tmp_intr;
+ if (!QLCNIC_IS_MSI_FAMILY(adapter))
+ flags |= IRQF_SHARED;
+
+ } else {
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ handler = qlcnic_msix_intr;
+ else if (adapter->flags & QLCNIC_MSI_ENABLED)
+ handler = qlcnic_msi_intr;
+ else {
+ flags |= IRQF_SHARED;
+ handler = qlcnic_intr;
+ }
+ }
+ adapter->irq = netdev->irq;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
+ err = request_irq(sds_ring->irq, handler,
+ flags, sds_ring->name, sds_ring);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+qlcnic_free_irq(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ free_irq(sds_ring->irq, sds_ring);
+ }
+}
+
+static int
+__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int ring;
+ struct qlcnic_host_rds_ring *rds_ring;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EIO;
+
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return 0;
+ if (qlcnic_set_eswitch_port_config(adapter))
+ return -EIO;
+
+ if (qlcnic_fw_create_ctx(adapter))
+ return -EIO;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, rds_ring);
+ }
+
+ qlcnic_set_multi(netdev);
+ qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
+
+ adapter->ahw->linkup = 0;
+
+ if (adapter->max_sds_rings > 1)
+ qlcnic_config_rss(adapter, 1);
+
+ qlcnic_config_intr_coalesce(adapter);
+
+ if (netdev->features & NETIF_F_LRO)
+ qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
+
+ qlcnic_napi_enable(adapter);
+
+ qlcnic_linkevent_request(adapter, 1);
+
+ adapter->reset_context = 0;
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
+ return 0;
+}
+
+/* Usage: During resume and firmware recovery module.*/
+
+static int
+qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int err = 0;
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ err = __qlcnic_up(adapter, netdev);
+ rtnl_unlock();
+
+ return err;
+}
+
+static void
+__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return;
+
+ smp_mb();
+ spin_lock(&adapter->tx_clean_lock);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ qlcnic_free_mac_list(adapter);
+
+ if (adapter->fhash.fnum)
+ qlcnic_delete_lb_filters(adapter);
+
+ qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
+
+ qlcnic_napi_disable(adapter);
+
+ qlcnic_fw_destroy_ctx(adapter);
+
+ qlcnic_reset_rx_buffers_list(adapter);
+ qlcnic_release_tx_buffers(adapter);
+ spin_unlock(&adapter->tx_clean_lock);
+}
+
+/* Usage: During suspend and firmware recovery module */
+
+static void
+qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ rtnl_lock();
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+ rtnl_unlock();
+
+}
+
+static int
+qlcnic_attach(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+
+ if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
+ return 0;
+
+ err = qlcnic_napi_add(adapter, netdev);
+ if (err)
+ return err;
+
+ err = qlcnic_alloc_sw_resources(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error in setting sw resources\n");
+ goto err_out_napi_del;
+ }
+
+ err = qlcnic_alloc_hw_resources(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error in setting hw resources\n");
+ goto err_out_free_sw;
+ }
+
+ err = qlcnic_request_irq(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to setup interrupt\n");
+ goto err_out_free_hw;
+ }
+
+ qlcnic_create_sysfs_entries(adapter);
+
+ adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
+ return 0;
+
+err_out_free_hw:
+ qlcnic_free_hw_resources(adapter);
+err_out_free_sw:
+ qlcnic_free_sw_resources(adapter);
+err_out_napi_del:
+ qlcnic_napi_del(adapter);
+ return err;
+}
+
+static void
+qlcnic_detach(struct qlcnic_adapter *adapter)
+{
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ qlcnic_remove_sysfs_entries(adapter);
+
+ qlcnic_free_hw_resources(adapter);
+ qlcnic_release_rx_buffers(adapter);
+ qlcnic_free_irq(adapter);
+ qlcnic_napi_del(adapter);
+ qlcnic_free_sw_resources(adapter);
+
+ adapter->is_up = 0;
+}
+
+void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ int ring;
+
+ clear_bit(__QLCNIC_DEV_UP, &adapter->state);
+ if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
+ qlcnic_disable_int(sds_ring);
+ }
+ }
+
+ qlcnic_fw_destroy_ctx(adapter);
+
+ qlcnic_detach(adapter);
+
+ adapter->diag_test = 0;
+ adapter->max_sds_rings = max_sds_rings;
+
+ if (qlcnic_attach(adapter))
+ goto out;
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+out:
+ netif_device_attach(netdev);
+}
+
+static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
+{
+ int err = 0;
+ adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
+ GFP_KERNEL);
+ if (!adapter->ahw) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to allocate recv ctx resources for adapter\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+ adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
+ GFP_KERNEL);
+ if (!adapter->recv_ctx) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to allocate recv ctx resources for adapter\n");
+ kfree(adapter->ahw);
+ adapter->ahw = NULL;
+ err = -ENOMEM;
+ goto err_out;
+ }
+ /* Initialize interrupt coalesce parameters */
+ adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
+ adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
+ adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
+err_out:
+ return err;
+}
+
+static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
+{
+ kfree(adapter->recv_ctx);
+ adapter->recv_ctx = NULL;
+
+ if (adapter->ahw->fw_dump.tmpl_hdr) {
+ vfree(adapter->ahw->fw_dump.tmpl_hdr);
+ adapter->ahw->fw_dump.tmpl_hdr = NULL;
+ }
+ kfree(adapter->ahw);
+ adapter->ahw = NULL;
+}
+
+int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int ring;
+ int ret;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ adapter->max_sds_rings = 1;
+ adapter->diag_test = test;
+
+ ret = qlcnic_attach(adapter);
+ if (ret) {
+ netif_device_attach(netdev);
+ return ret;
+ }
+
+ ret = qlcnic_fw_create_ctx(adapter);
+ if (ret) {
+ qlcnic_detach(adapter);
+ netif_device_attach(netdev);
+ return ret;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, rds_ring);
+ }
+
+ if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
+ qlcnic_enable_int(sds_ring);
+ }
+ }
+
+ if (adapter->diag_test == QLCNIC_LOOPBACK_TEST) {
+ adapter->ahw->loopback_state = 0;
+ qlcnic_linkevent_request(adapter, 1);
+ }
+
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
+
+ return 0;
+}
+
+/* Reset context in hardware only */
+static int
+qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ netif_device_detach(netdev);
+
+ qlcnic_down(adapter, netdev);
+
+ qlcnic_up(adapter, netdev);
+
+ netif_device_attach(netdev);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return 0;
+}
+
+int
+qlcnic_reset_context(struct qlcnic_adapter *adapter)
+{
+ int err = 0;
+ struct net_device *netdev = adapter->netdev;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (!err)
+ __qlcnic_up(adapter, netdev);
+ }
+
+ netif_device_attach(netdev);
+ }
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return err;
+}
+
+static int
+qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
+ struct net_device *netdev, u8 pci_using_dac)
+{
+ int err;
+ struct pci_dev *pdev = adapter->pdev;
+
+ adapter->mc_enabled = 0;
+ adapter->max_mc_count = 38;
+
+ netdev->netdev_ops = &qlcnic_netdev_ops;
+ netdev->watchdog_timeo = 5*HZ;
+
+ qlcnic_change_mtu(netdev, netdev->mtu);
+
+ SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
+
+ netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+ netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ if (pci_using_dac)
+ netdev->hw_features |= NETIF_F_HIGHDMA;
+
+ netdev->vlan_features = netdev->hw_features;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
+ netdev->hw_features |= NETIF_F_HW_VLAN_TX;
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ netdev->hw_features |= NETIF_F_LRO;
+
+ netdev->features |= netdev->hw_features |
+ NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+
+ netdev->irq = adapter->msix_entries[0].vector;
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register net device\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
+{
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+ *pci_using_dac = 1;
+ else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ *pci_using_dac = 0;
+ else {
+ dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count)
+{
+ adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry),
+ GFP_KERNEL);
+
+ if (adapter->msix_entries)
+ return 0;
+
+ dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n");
+ return -ENOMEM;
+}
+
+static int __devinit
+qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev = NULL;
+ struct qlcnic_adapter *adapter = NULL;
+ int err;
+ uint8_t revision_id;
+ uint8_t pci_using_dac;
+ char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ err = -ENODEV;
+ goto err_out_disable_pdev;
+ }
+
+ err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
+ if (err)
+ goto err_out_disable_pdev;
+
+ err = pci_request_regions(pdev, qlcnic_driver_name);
+ if (err)
+ goto err_out_disable_pdev;
+
+ pci_set_master(pdev);
+ pci_enable_pcie_error_reporting(pdev);
+
+ netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
+ if (!netdev) {
+ dev_err(&pdev->dev, "failed to allocate net_device\n");
+ err = -ENOMEM;
+ goto err_out_free_res;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+
+ if (qlcnic_alloc_adapter_resources(adapter))
+ goto err_out_free_netdev;
+
+ adapter->dev_rst_time = jiffies;
+ revision_id = pdev->revision;
+ adapter->ahw->revision_id = revision_id;
+ adapter->mac_learn = qlcnic_mac_learn;
+
+ rwlock_init(&adapter->ahw->crb_lock);
+ mutex_init(&adapter->ahw->mem_lock);
+
+ spin_lock_init(&adapter->tx_clean_lock);
+ INIT_LIST_HEAD(&adapter->mac_list);
+
+ err = qlcnic_setup_pci_map(adapter);
+ if (err)
+ goto err_out_free_hw;
+
+ /* This will be reset for mezz cards */
+ adapter->portnum = adapter->ahw->pci_func;
+
+ err = qlcnic_get_board_info(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error getting board config info.\n");
+ goto err_out_iounmap;
+ }
+
+ err = qlcnic_setup_idc_param(adapter);
+ if (err)
+ goto err_out_iounmap;
+
+ adapter->flags |= QLCNIC_NEED_FLR;
+
+ err = adapter->nic_ops->start_firmware(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
+ goto err_out_decr_ref;
+ }
+
+ if (qlcnic_read_mac_addr(adapter))
+ dev_warn(&pdev->dev, "failed to read mac addr\n");
+
+ if (adapter->portnum == 0) {
+ get_brd_name(adapter, brd_name);
+
+ pr_info("%s: %s Board Chip rev 0x%x\n",
+ module_name(THIS_MODULE),
+ brd_name, adapter->ahw->revision_id);
+ }
+
+ qlcnic_clear_stats(adapter);
+
+ err = qlcnic_alloc_msix_entries(adapter, adapter->max_rx_ques);
+ if (err)
+ goto err_out_decr_ref;
+
+ qlcnic_setup_intr(adapter);
+
+ err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
+ if (err)
+ goto err_out_disable_msi;
+
+ pci_set_drvdata(pdev, adapter);
+
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+
+ switch (adapter->ahw->port_type) {
+ case QLCNIC_GBE:
+ dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
+ adapter->netdev->name);
+ break;
+ case QLCNIC_XGBE:
+ dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
+ adapter->netdev->name);
+ break;
+ }
+
+ if (adapter->mac_learn)
+ qlcnic_alloc_lb_filters_mem(adapter);
+
+ qlcnic_create_diag_entries(adapter);
+
+ return 0;
+
+err_out_disable_msi:
+ qlcnic_teardown_intr(adapter);
+ kfree(adapter->msix_entries);
+
+err_out_decr_ref:
+ qlcnic_clr_all_drv_state(adapter, 0);
+
+err_out_iounmap:
+ qlcnic_cleanup_pci_map(adapter);
+
+err_out_free_hw:
+ qlcnic_free_adapter_resources(adapter);
+
+err_out_free_netdev:
+ free_netdev(netdev);
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_out_disable_pdev:
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void __devexit qlcnic_remove(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter;
+ struct net_device *netdev;
+
+ adapter = pci_get_drvdata(pdev);
+ if (adapter == NULL)
+ return;
+
+ netdev = adapter->netdev;
+
+ qlcnic_cancel_fw_work(adapter);
+
+ unregister_netdev(netdev);
+
+ qlcnic_detach(adapter);
+
+ if (adapter->npars != NULL)
+ kfree(adapter->npars);
+ if (adapter->eswitch != NULL)
+ kfree(adapter->eswitch);
+
+ qlcnic_clr_all_drv_state(adapter, 0);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ qlcnic_free_lb_filters_mem(adapter);
+
+ qlcnic_teardown_intr(adapter);
+ kfree(adapter->msix_entries);
+
+ qlcnic_remove_diag_entries(adapter);
+
+ qlcnic_cleanup_pci_map(adapter);
+
+ qlcnic_release_firmware(adapter);
+
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ qlcnic_free_adapter_resources(adapter);
+ free_netdev(netdev);
+}
+static int __qlcnic_shutdown(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ int retval;
+
+ netif_device_detach(netdev);
+
+ qlcnic_cancel_fw_work(adapter);
+
+ if (netif_running(netdev))
+ qlcnic_down(adapter, netdev);
+
+ qlcnic_clr_all_drv_state(adapter, 0);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ if (qlcnic_wol_supported(adapter)) {
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
+
+ return 0;
+}
+
+static void qlcnic_shutdown(struct pci_dev *pdev)
+{
+ if (__qlcnic_shutdown(pdev))
+ return;
+
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int
+qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int retval;
+
+ retval = __qlcnic_shutdown(pdev);
+ if (retval)
+ return retval;
+
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+
+static int
+qlcnic_resume(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ err = adapter->nic_ops->start_firmware(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to start firmware\n");
+ return err;
+ }
+
+ if (netif_running(netdev)) {
+ err = qlcnic_up(adapter, netdev);
+ if (err)
+ goto done;
+
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+done:
+ netif_device_attach(netdev);
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+ return 0;
+}
+#endif
+
+static int qlcnic_open(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ netif_carrier_off(netdev);
+
+ err = qlcnic_attach(adapter);
+ if (err)
+ return err;
+
+ err = __qlcnic_up(adapter, netdev);
+ if (err)
+ goto err_out;
+
+ netif_start_queue(netdev);
+
+ return 0;
+
+err_out:
+ qlcnic_detach(adapter);
+ return err;
+}
+
+/*
+ * qlcnic_close - Disables a network interface entry point
+ */
+static int qlcnic_close(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ __qlcnic_down(adapter, netdev);
+ return 0;
+}
+
+void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
+{
+ void *head;
+ int i;
+
+ if (adapter->fhash.fmax && adapter->fhash.fhead)
+ return;
+
+ spin_lock_init(&adapter->mac_learn_lock);
+
+ head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
+ GFP_KERNEL);
+ if (!head)
+ return;
+
+ adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
+ adapter->fhash.fhead = head;
+
+ for (i = 0; i < adapter->fhash.fmax; i++)
+ INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
+}
+
+static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fhash.fmax && adapter->fhash.fhead)
+ kfree(adapter->fhash.fhead);
+
+ adapter->fhash.fhead = NULL;
+ adapter->fhash.fmax = 0;
+}
+
+static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
+ u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
+{
+ struct cmd_desc_type0 *hwdesc;
+ struct qlcnic_nic_req *req;
+ struct qlcnic_mac_req *mac_req;
+ struct qlcnic_vlan_req *vlan_req;
+ u32 producer;
+ u64 word;
+
+ producer = tx_ring->producer;
+ hwdesc = &tx_ring->desc_head[tx_ring->producer];
+
+ req = (struct qlcnic_nic_req *)hwdesc;
+ memset(req, 0, sizeof(struct qlcnic_nic_req));
+ req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
+
+ word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
+ req->req_hdr = cpu_to_le64(word);
+
+ mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
+ mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+ memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
+
+ vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
+ vlan_req->vlan_id = vlan_id;
+
+ tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
+ smp_mb();
+}
+
+#define QLCNIC_MAC_HASH(MAC)\
+ ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
+
+static void
+qlcnic_send_filter(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring,
+ struct cmd_desc_type0 *first_desc,
+ struct sk_buff *skb)
+{
+ struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+ struct qlcnic_filter *fil, *tmp_fil;
+ struct hlist_node *tmp_hnode, *n;
+ struct hlist_head *head;
+ u64 src_addr = 0;
+ __le16 vlan_id = 0;
+ u8 hindex;
+
+ if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
+ return;
+
+ if (adapter->fhash.fnum >= adapter->fhash.fmax)
+ return;
+
+ /* Only NPAR capable devices support vlan based learning*/
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ vlan_id = first_desc->vlan_TCI;
+ memcpy(&src_addr, phdr->h_source, ETH_ALEN);
+ hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
+ head = &(adapter->fhash.fhead[hindex]);
+
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
+ tmp_fil->vlan_id == vlan_id) {
+
+ if (jiffies >
+ (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
+ qlcnic_change_filter(adapter, src_addr, vlan_id,
+ tx_ring);
+ tmp_fil->ftime = jiffies;
+ return;
+ }
+ }
+
+ fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
+ if (!fil)
+ return;
+
+ qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
+
+ fil->ftime = jiffies;
+ fil->vlan_id = vlan_id;
+ memcpy(fil->faddr, &src_addr, ETH_ALEN);
+ spin_lock(&adapter->mac_learn_lock);
+ hlist_add_head(&(fil->fnode), head);
+ adapter->fhash.fnum++;
+ spin_unlock(&adapter->mac_learn_lock);
+}
+
+static int
+qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
+ struct cmd_desc_type0 *first_desc,
+ struct sk_buff *skb)
+{
+ u8 opcode = 0, hdr_len = 0;
+ u16 flags = 0, vlan_tci = 0;
+ int copied, offset, copy_len;
+ struct cmd_desc_type0 *hwdesc;
+ struct vlan_ethhdr *vh;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+ u16 protocol = ntohs(skb->protocol);
+ u32 producer = tx_ring->producer;
+
+ if (protocol == ETH_P_8021Q) {
+ vh = (struct vlan_ethhdr *)skb->data;
+ flags = FLAGS_VLAN_TAGGED;
+ vlan_tci = vh->h_vlan_TCI;
+ } else if (vlan_tx_tag_present(skb)) {
+ flags = FLAGS_VLAN_OOB;
+ vlan_tci = vlan_tx_tag_get(skb);
+ }
+ if (unlikely(adapter->pvid)) {
+ if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
+ return -EIO;
+ if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
+ goto set_flags;
+
+ flags = FLAGS_VLAN_OOB;
+ vlan_tci = adapter->pvid;
+ }
+set_flags:
+ qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
+ qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+ if (*(skb->data) & BIT_0) {
+ flags |= BIT_0;
+ memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
+ }
+ opcode = TX_ETHER_PKT;
+ if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
+ skb_shinfo(skb)->gso_size > 0) {
+
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ first_desc->total_hdr_length = hdr_len;
+
+ opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
+
+ /* For LSO, we need to copy the MAC/IP/TCP headers into
+ * the descriptor ring */
+ copied = 0;
+ offset = 2;
+
+ if (flags & FLAGS_VLAN_OOB) {
+ first_desc->total_hdr_length += VLAN_HLEN;
+ first_desc->tcp_hdr_offset = VLAN_HLEN;
+ first_desc->ip_hdr_offset = VLAN_HLEN;
+ /* Only in case of TSO on vlan device */
+ flags |= FLAGS_VLAN_TAGGED;
+
+ /* Create a TSO vlan header template for firmware */
+
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) -
+ offset, hdr_len + VLAN_HLEN);
+
+ vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
+ skb_copy_from_linear_data(skb, vh, 12);
+ vh->h_vlan_proto = htons(ETH_P_8021Q);
+ vh->h_vlan_TCI = htons(vlan_tci);
+
+ skb_copy_from_linear_data_offset(skb, 12,
+ (char *)vh + 16, copy_len - 16);
+
+ copied = copy_len - VLAN_HLEN;
+ offset = 0;
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ while (copied < hdr_len) {
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) -
+ offset, (hdr_len - copied));
+
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ skb_copy_from_linear_data_offset(skb, copied,
+ (char *) hwdesc + offset, copy_len);
+
+ copied += copy_len;
+ offset = 0;
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ tx_ring->producer = producer;
+ smp_mb();
+ adapter->stats.lso_frames++;
+
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 l4proto;
+
+ if (protocol == ETH_P_IP) {
+ l4proto = ip_hdr(skb)->protocol;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = TX_TCP_PKT;
+ else if (l4proto == IPPROTO_UDP)
+ opcode = TX_UDP_PKT;
+ } else if (protocol == ETH_P_IPV6) {
+ l4proto = ipv6_hdr(skb)->nexthdr;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = TX_TCPV6_PKT;
+ else if (l4proto == IPPROTO_UDP)
+ opcode = TX_UDPV6_PKT;
+ }
+ }
+ first_desc->tcp_hdr_offset += skb_transport_offset(skb);
+ first_desc->ip_hdr_offset += skb_network_offset(skb);
+ qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+ return 0;
+}
+
+static int
+qlcnic_map_tx_skb(struct pci_dev *pdev,
+ struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
+{
+ struct qlcnic_skb_frag *nf;
+ struct skb_frag_struct *frag;
+ int i, nr_frags;
+ dma_addr_t map;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ nf = &pbuf->frag_array[0];
+
+ map = pci_map_single(pdev, skb->data,
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, map))
+ goto out_err;
+
+ nf->dma = map;
+ nf->length = skb_headlen(skb);
+
+ for (i = 0; i < nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ nf = &pbuf->frag_array[i+1];
+
+ map = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, map))
+ goto unwind;
+
+ nf->dma = map;
+ nf->length = frag->size;
+ }
+
+ return 0;
+
+unwind:
+ while (--i >= 0) {
+ nf = &pbuf->frag_array[i+1];
+ pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ }
+
+ nf = &pbuf->frag_array[0];
+ pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+
+out_err:
+ return -ENOMEM;
+}
+
+static void
+qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
+ struct qlcnic_cmd_buffer *pbuf)
+{
+ struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int i;
+
+ for (i = 0; i < nr_frags; i++) {
+ nf = &pbuf->frag_array[i+1];
+ pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ }
+
+ nf = &pbuf->frag_array[0];
+ pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+ pbuf->skb = NULL;
+}
+
+static inline void
+qlcnic_clear_cmddesc(u64 *desc)
+{
+ desc[0] = 0ULL;
+ desc[2] = 0ULL;
+ desc[7] = 0ULL;
+}
+
+netdev_tx_t
+qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+ struct qlcnic_cmd_buffer *pbuf;
+ struct qlcnic_skb_frag *buffrag;
+ struct cmd_desc_type0 *hwdesc, *first_desc;
+ struct pci_dev *pdev;
+ struct ethhdr *phdr;
+ int delta = 0;
+ int i, k;
+
+ u32 producer;
+ int frag_count;
+ u32 num_txd = tx_ring->num_desc;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (adapter->flags & QLCNIC_MACSPOOF) {
+ phdr = (struct ethhdr *)skb->data;
+ if (compare_ether_addr(phdr->h_source,
+ adapter->mac_addr))
+ goto drop_packet;
+ }
+
+ frag_count = skb_shinfo(skb)->nr_frags + 1;
+ /* 14 frags supported for normal packet and
+ * 32 frags supported for TSO packet
+ */
+ if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
+
+ for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
+ delta += skb_shinfo(skb)->frags[i].size;
+
+ if (!__pskb_pull_tail(skb, delta))
+ goto drop_packet;
+
+ frag_count = 1 + skb_shinfo(skb)->nr_frags;
+ }
+
+ if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
+ netif_stop_queue(netdev);
+ if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
+ netif_start_queue(netdev);
+ else {
+ adapter->stats.xmit_off++;
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ producer = tx_ring->producer;
+ pbuf = &tx_ring->cmd_buf_arr[producer];
+
+ pdev = adapter->pdev;
+
+ first_desc = hwdesc = &tx_ring->desc_head[producer];
+ qlcnic_clear_cmddesc((u64 *)hwdesc);
+
+ if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
+ adapter->stats.tx_dma_map_error++;
+ goto drop_packet;
+ }
+
+ pbuf->skb = skb;
+ pbuf->frag_count = frag_count;
+
+ qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
+ qlcnic_set_tx_port(first_desc, adapter->portnum);
+
+ for (i = 0; i < frag_count; i++) {
+
+ k = i % 4;
+
+ if ((k == 0) && (i > 0)) {
+ /* move to next desc.*/
+ producer = get_next_index(producer, num_txd);
+ hwdesc = &tx_ring->desc_head[producer];
+ qlcnic_clear_cmddesc((u64 *)hwdesc);
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+ }
+
+ buffrag = &pbuf->frag_array[i];
+
+ hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
+ switch (k) {
+ case 0:
+ hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
+ break;
+ case 1:
+ hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
+ break;
+ case 2:
+ hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
+ break;
+ case 3:
+ hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
+ break;
+ }
+ }
+
+ tx_ring->producer = get_next_index(producer, num_txd);
+ smp_mb();
+
+ if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
+ goto unwind_buff;
+
+ if (adapter->mac_learn)
+ qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
+
+ adapter->stats.txbytes += skb->len;
+ adapter->stats.xmitcalled++;
+
+ qlcnic_update_cmd_producer(adapter, tx_ring);
+
+ return NETDEV_TX_OK;
+
+unwind_buff:
+ qlcnic_unmap_buffers(pdev, skb, pbuf);
+drop_packet:
+ adapter->stats.txdropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 temp, temp_state, temp_val;
+ int rv = 0;
+
+ temp = QLCRD32(adapter, CRB_TEMP_STATE);
+
+ temp_state = qlcnic_get_temp_state(temp);
+ temp_val = qlcnic_get_temp_val(temp);
+
+ if (temp_state == QLCNIC_TEMP_PANIC) {
+ dev_err(&netdev->dev,
+ "Device temperature %d degrees C exceeds"
+ " maximum allowed. Hardware has been shut down.\n",
+ temp_val);
+ rv = 1;
+ } else if (temp_state == QLCNIC_TEMP_WARN) {
+ if (adapter->temp == QLCNIC_TEMP_NORMAL) {
+ dev_err(&netdev->dev,
+ "Device temperature %d degrees C "
+ "exceeds operating range."
+ " Immediate action needed.\n",
+ temp_val);
+ }
+ } else {
+ if (adapter->temp == QLCNIC_TEMP_WARN) {
+ dev_info(&netdev->dev,
+ "Device temperature is now %d degrees C"
+ " in normal range.\n", temp_val);
+ }
+ }
+ adapter->temp = temp_state;
+ return rv;
+}
+
+void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (adapter->ahw->linkup && !linkup) {
+ netdev_info(netdev, "NIC Link is down\n");
+ adapter->ahw->linkup = 0;
+ if (netif_running(netdev)) {
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ } else if (!adapter->ahw->linkup && linkup) {
+ netdev_info(netdev, "NIC Link is up\n");
+ adapter->ahw->linkup = 1;
+ if (netif_running(netdev)) {
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ }
+ }
+}
+
+static void qlcnic_tx_timeout(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ return;
+
+ dev_err(&netdev->dev, "transmit timeout, resetting.\n");
+
+ if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
+ adapter->need_fw_reset = 1;
+ else
+ adapter->reset_context = 1;
+}
+
+static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct net_device_stats *stats = &netdev->stats;
+
+ stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
+ stats->tx_packets = adapter->stats.xmitfinished;
+ stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
+ stats->tx_bytes = adapter->stats.txbytes;
+ stats->rx_dropped = adapter->stats.rxdropped;
+ stats->tx_dropped = adapter->stats.txdropped;
+
+ return stats;
+}
+
+static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+ u32 status;
+
+ status = readl(adapter->isr_int_vec);
+
+ if (!(status & adapter->int_vec_bit))
+ return IRQ_NONE;
+
+ /* check interrupt state machine, to be sure */
+ status = readl(adapter->crb_int_state_reg);
+ if (!ISR_LEGACY_INT_TRIGGERED(status))
+ return IRQ_NONE;
+
+ writel(0xffffffff, adapter->tgt_status_reg);
+ /* read twice to ensure write is flushed */
+ readl(adapter->isr_int_vec);
+ readl(adapter->isr_int_vec);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ goto done;
+ else if (adapter->flags & QLCNIC_MSI_ENABLED) {
+ writel(0xffffffff, adapter->tgt_status_reg);
+ goto done;
+ }
+
+ if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+done:
+ adapter->diag_cnt++;
+ qlcnic_enable_int(sds_ring);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+ napi_schedule(&sds_ring->napi);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_msi_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ /* clear interrupt */
+ writel(0xffffffff, adapter->tgt_status_reg);
+
+ napi_schedule(&sds_ring->napi);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_msix_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+
+ napi_schedule(&sds_ring->napi);
+ return IRQ_HANDLED;
+}
+
+static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
+{
+ u32 sw_consumer, hw_consumer;
+ int count = 0, i;
+ struct qlcnic_cmd_buffer *buffer;
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_skb_frag *frag;
+ int done;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ if (!spin_trylock(&adapter->tx_clean_lock))
+ return 1;
+
+ sw_consumer = tx_ring->sw_consumer;
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+
+ while (sw_consumer != hw_consumer) {
+ buffer = &tx_ring->cmd_buf_arr[sw_consumer];
+ if (buffer->skb) {
+ frag = &buffer->frag_array[0];
+ pci_unmap_single(pdev, frag->dma, frag->length,
+ PCI_DMA_TODEVICE);
+ frag->dma = 0ULL;
+ for (i = 1; i < buffer->frag_count; i++) {
+ frag++;
+ pci_unmap_page(pdev, frag->dma, frag->length,
+ PCI_DMA_TODEVICE);
+ frag->dma = 0ULL;
+ }
+
+ adapter->stats.xmitfinished++;
+ dev_kfree_skb_any(buffer->skb);
+ buffer->skb = NULL;
+ }
+
+ sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
+ if (++count >= MAX_STATUS_HANDLE)
+ break;
+ }
+
+ if (count && netif_running(netdev)) {
+ tx_ring->sw_consumer = sw_consumer;
+
+ smp_mb();
+
+ if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
+ if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
+ netif_wake_queue(netdev);
+ adapter->stats.xmit_on++;
+ }
+ }
+ adapter->tx_timeo_cnt = 0;
+ }
+ /*
+ * If everything is freed up to consumer then check if the ring is full
+ * If the ring is full then check if more needs to be freed and
+ * schedule the call back again.
+ *
+ * This happens when there are 2 CPUs. One could be freeing and the
+ * other filling it. If the ring is full when we get out of here and
+ * the card has already interrupted the host then the host can miss the
+ * interrupt.
+ *
+ * There is still a possible race condition and the host could miss an
+ * interrupt. The card has to take care of this.
+ */
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+ done = (sw_consumer == hw_consumer);
+ spin_unlock(&adapter->tx_clean_lock);
+
+ return done;
+}
+
+static int qlcnic_poll(struct napi_struct *napi, int budget)
+{
+ struct qlcnic_host_sds_ring *sds_ring =
+ container_of(napi, struct qlcnic_host_sds_ring, napi);
+
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ int tx_complete;
+ int work_done;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter);
+
+ work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+ if ((work_done < budget) && tx_complete) {
+ napi_complete(&sds_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_int(sds_ring);
+ }
+
+ return work_done;
+}
+
+static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct qlcnic_host_sds_ring *sds_ring =
+ container_of(napi, struct qlcnic_host_sds_ring, napi);
+
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ int work_done;
+
+ work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+ if (work_done < budget) {
+ napi_complete(&sds_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_int(sds_ring);
+ }
+
+ return work_done;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void qlcnic_poll_controller(struct net_device *netdev)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ disable_irq(adapter->irq);
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ qlcnic_intr(adapter->irq, sds_ring);
+ }
+ enable_irq(adapter->irq);
+}
+#endif
+
+static void
+qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
+{
+ u32 val;
+
+ val = adapter->portnum & 0xf;
+ val |= encoding << 7;
+ val |= (jiffies - adapter->dev_rst_time) << 8;
+
+ QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
+ adapter->dev_rst_time = jiffies;
+}
+
+static int
+qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
+{
+ u32 val;
+
+ WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
+ state != QLCNIC_DEV_NEED_QUISCENT);
+
+ if (qlcnic_api_lock(adapter))
+ return -EIO;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+
+ if (state == QLCNIC_DEV_NEED_RESET)
+ QLC_DEV_SET_RST_RDY(val, adapter->portnum);
+ else if (state == QLCNIC_DEV_NEED_QUISCENT)
+ QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
+
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+
+ return 0;
+}
+
+static int
+qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ if (qlcnic_api_lock(adapter))
+ return -EBUSY;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+
+ return 0;
+}
+
+static void
+qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
+{
+ u32 val;
+
+ if (qlcnic_api_lock(adapter))
+ goto err;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+ QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
+ QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
+
+ if (failed) {
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
+ dev_info(&adapter->pdev->dev,
+ "Device state set to Failed. Please Reboot\n");
+ } else if (!(val & 0x11111111))
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+err:
+ adapter->fw_fail_cnt = 0;
+ adapter->flags &= ~QLCNIC_FW_HANG;
+ clear_bit(__QLCNIC_START_FW, &adapter->state);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+}
+
+/* Grab api lock, before checking state */
+static int
+qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
+{
+ int act, state, active_mask;
+
+ state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+
+ if (adapter->flags & QLCNIC_FW_RESET_OWNER) {
+ active_mask = (~(1 << (adapter->ahw->pci_func * 4)));
+ act = act & active_mask;
+ }
+
+ if (((state & 0x11111111) == (act & 0x11111111)) ||
+ ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
+ return 0;
+ else
+ return 1;
+}
+
+static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
+{
+ u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
+
+ if (val != QLCNIC_DRV_IDC_VER) {
+ dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
+ " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
+{
+ u32 val, prev_state;
+ u8 dev_init_timeo = adapter->dev_init_timeo;
+ u8 portnum = adapter->portnum;
+ u8 ret;
+
+ if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
+ return 1;
+
+ if (qlcnic_api_lock(adapter))
+ return -1;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+ if (!(val & (1 << (portnum * 4)))) {
+ QLC_DEV_SET_REF_CNT(val, portnum);
+ QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
+ }
+
+ prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ QLCDB(adapter, HW, "Device state = %u\n", prev_state);
+
+ switch (prev_state) {
+ case QLCNIC_DEV_COLD:
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
+ QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
+ qlcnic_idc_debug_info(adapter, 0);
+ qlcnic_api_unlock(adapter);
+ return 1;
+
+ case QLCNIC_DEV_READY:
+ ret = qlcnic_check_idc_ver(adapter);
+ qlcnic_api_unlock(adapter);
+ return ret;
+
+ case QLCNIC_DEV_NEED_RESET:
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ QLC_DEV_SET_RST_RDY(val, portnum);
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ break;
+
+ case QLCNIC_DEV_NEED_QUISCENT:
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ QLC_DEV_SET_QSCNT_RDY(val, portnum);
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ break;
+
+ case QLCNIC_DEV_FAILED:
+ dev_err(&adapter->pdev->dev, "Device in failed state.\n");
+ qlcnic_api_unlock(adapter);
+ return -1;
+
+ case QLCNIC_DEV_INITIALIZING:
+ case QLCNIC_DEV_QUISCENT:
+ break;
+ }
+
+ qlcnic_api_unlock(adapter);
+
+ do {
+ msleep(1000);
+ prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ if (prev_state == QLCNIC_DEV_QUISCENT)
+ continue;
+ } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
+
+ if (!dev_init_timeo) {
+ dev_err(&adapter->pdev->dev,
+ "Waiting for device to initialize timeout\n");
+ return -1;
+ }
+
+ if (qlcnic_api_lock(adapter))
+ return -1;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ QLC_DEV_CLR_RST_QSCNT(val, portnum);
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ ret = qlcnic_check_idc_ver(adapter);
+ qlcnic_api_unlock(adapter);
+
+ return ret;
+}
+
+static void
+qlcnic_fwinit_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+ u32 dev_state = 0xf;
+ u32 val;
+
+ if (qlcnic_api_lock(adapter))
+ goto err_ret;
+
+ dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (dev_state == QLCNIC_DEV_QUISCENT ||
+ dev_state == QLCNIC_DEV_NEED_QUISCENT) {
+ qlcnic_api_unlock(adapter);
+ qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
+ FW_POLL_DELAY * 2);
+ return;
+ }
+
+ if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ qlcnic_api_unlock(adapter);
+ goto wait_npar;
+ }
+
+ if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
+ dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
+ adapter->reset_ack_timeo);
+ goto skip_ack_check;
+ }
+
+ if (!qlcnic_check_drv_state(adapter)) {
+skip_ack_check:
+ dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ if (dev_state == QLCNIC_DEV_NEED_RESET) {
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
+ set_bit(__QLCNIC_START_FW, &adapter->state);
+ QLCDB(adapter, DRV, "Restarting fw\n");
+ qlcnic_idc_debug_info(adapter, 0);
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ QLC_DEV_SET_RST_RDY(val, adapter->portnum);
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ }
+
+ qlcnic_api_unlock(adapter);
+
+ rtnl_lock();
+ if (adapter->ahw->fw_dump.enable &&
+ (adapter->flags & QLCNIC_FW_RESET_OWNER)) {
+ QLCDB(adapter, DRV, "Take FW dump\n");
+ qlcnic_dump_fw(adapter);
+ adapter->flags |= QLCNIC_FW_HANG;
+ }
+ rtnl_unlock();
+
+ adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
+ if (!adapter->nic_ops->start_firmware(adapter)) {
+ qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ adapter->fw_wait_cnt = 0;
+ return;
+ }
+ goto err_ret;
+ }
+
+ qlcnic_api_unlock(adapter);
+
+wait_npar:
+ dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
+
+ switch (dev_state) {
+ case QLCNIC_DEV_READY:
+ if (!adapter->nic_ops->start_firmware(adapter)) {
+ qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ adapter->fw_wait_cnt = 0;
+ return;
+ }
+ case QLCNIC_DEV_FAILED:
+ break;
+ default:
+ qlcnic_schedule_work(adapter,
+ qlcnic_fwinit_work, FW_POLL_DELAY);
+ return;
+ }
+
+err_ret:
+ dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
+ "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
+ netif_device_attach(adapter->netdev);
+ qlcnic_clr_all_drv_state(adapter, 0);
+}
+
+static void
+qlcnic_detach_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+ struct net_device *netdev = adapter->netdev;
+ u32 status;
+
+ netif_device_detach(netdev);
+
+ /* Dont grab rtnl lock during Quiscent mode */
+ if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) {
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+ } else
+ qlcnic_down(adapter, netdev);
+
+ status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+
+ if (status & QLCNIC_RCODE_FATAL_ERROR) {
+ dev_err(&adapter->pdev->dev,
+ "Detaching the device: peg halt status1=0x%x\n",
+ status);
+
+ if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) {
+ dev_err(&adapter->pdev->dev,
+ "On board active cooling fan failed. "
+ "Device has been halted.\n");
+ dev_err(&adapter->pdev->dev,
+ "Replace the adapter.\n");
+ }
+
+ goto err_ret;
+ }
+
+ if (adapter->temp == QLCNIC_TEMP_PANIC) {
+ dev_err(&adapter->pdev->dev, "Detaching the device: temp=%d\n",
+ adapter->temp);
+ goto err_ret;
+ }
+
+ /* Dont ack if this instance is the reset owner */
+ if (!(adapter->flags & QLCNIC_FW_RESET_OWNER)) {
+ if (qlcnic_set_drv_state(adapter, adapter->dev_state)) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to set driver state,"
+ "detaching the device.\n");
+ goto err_ret;
+ }
+ }
+
+ adapter->fw_wait_cnt = 0;
+
+ qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
+
+ return;
+
+err_ret:
+ netif_device_attach(netdev);
+ qlcnic_clr_all_drv_state(adapter, 1);
+}
+
+/*Transit NPAR state to NON Operational */
+static void
+qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
+{
+ u32 state;
+
+ state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ if (state == QLCNIC_DEV_NPAR_NON_OPER)
+ return;
+
+ if (qlcnic_api_lock(adapter))
+ return;
+ QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
+ qlcnic_api_unlock(adapter);
+}
+
+/*Transit to RESET state from READY state only */
+void
+qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
+{
+ u32 state;
+
+ adapter->need_fw_reset = 1;
+ if (qlcnic_api_lock(adapter))
+ return;
+
+ state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ if (state == QLCNIC_DEV_READY) {
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
+ adapter->flags |= QLCNIC_FW_RESET_OWNER;
+ QLCDB(adapter, DRV, "NEED_RESET state set\n");
+ qlcnic_idc_debug_info(adapter, 0);
+ }
+
+ QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
+ qlcnic_api_unlock(adapter);
+}
+
+/* Transit to NPAR READY state from NPAR NOT READY state */
+static void
+qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_api_lock(adapter))
+ return;
+
+ QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
+ QLCDB(adapter, DRV, "NPAR operational state set\n");
+
+ qlcnic_api_unlock(adapter);
+}
+
+static void
+qlcnic_schedule_work(struct qlcnic_adapter *adapter,
+ work_func_t func, int delay)
+{
+ if (test_bit(__QLCNIC_AER, &adapter->state))
+ return;
+
+ INIT_DELAYED_WORK(&adapter->fw_work, func);
+ queue_delayed_work(qlcnic_wq, &adapter->fw_work,
+ round_jiffies_relative(delay));
+}
+
+static void
+qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
+{
+ while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ msleep(10);
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+static void
+qlcnic_attach_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+ struct net_device *netdev = adapter->netdev;
+ u32 npar_state;
+
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
+ npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
+ qlcnic_clr_all_drv_state(adapter, 0);
+ else if (npar_state != QLCNIC_DEV_NPAR_OPER)
+ qlcnic_schedule_work(adapter, qlcnic_attach_work,
+ FW_POLL_DELAY);
+ else
+ goto attach;
+ QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
+ return;
+ }
+attach:
+ if (netif_running(netdev)) {
+ if (qlcnic_up(adapter, netdev))
+ goto done;
+
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+
+done:
+ netif_device_attach(netdev);
+ adapter->fw_fail_cnt = 0;
+ adapter->flags &= ~QLCNIC_FW_HANG;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ if (!qlcnic_clr_drv_state(adapter))
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
+ FW_POLL_DELAY);
+}
+
+static int
+qlcnic_check_health(struct qlcnic_adapter *adapter)
+{
+ u32 state = 0, heartbeat;
+ u32 peg_status;
+
+ if (qlcnic_check_temp(adapter))
+ goto detach;
+
+ if (adapter->need_fw_reset)
+ qlcnic_dev_request_reset(adapter);
+
+ state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_NEED_RESET) {
+ qlcnic_set_npar_non_operational(adapter);
+ adapter->need_fw_reset = 1;
+ } else if (state == QLCNIC_DEV_NEED_QUISCENT)
+ goto detach;
+
+ heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ if (heartbeat != adapter->heartbeat) {
+ adapter->heartbeat = heartbeat;
+ adapter->fw_fail_cnt = 0;
+ if (adapter->need_fw_reset)
+ goto detach;
+
+ if (adapter->reset_context && auto_fw_reset) {
+ qlcnic_reset_hw_context(adapter);
+ adapter->netdev->trans_start = jiffies;
+ }
+
+ return 0;
+ }
+
+ if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
+ return 0;
+
+ adapter->flags |= QLCNIC_FW_HANG;
+
+ qlcnic_dev_request_reset(adapter);
+
+ if (auto_fw_reset)
+ clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
+
+ dev_err(&adapter->pdev->dev, "firmware hang detected\n");
+ dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n"
+ "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
+ "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
+ "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
+ "PEG_NET_4_PC: 0x%x\n",
+ QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1),
+ QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS2),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c));
+ peg_status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+ if (LSW(MSB(peg_status)) == 0x67)
+ dev_err(&adapter->pdev->dev,
+ "Firmware aborted with error code 0x00006700. "
+ "Device is being reset.\n");
+detach:
+ adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
+ QLCNIC_DEV_NEED_RESET;
+
+ if (auto_fw_reset &&
+ !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
+
+ qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
+ QLCDB(adapter, DRV, "fw recovery scheduled.\n");
+ }
+
+ return 1;
+}
+
+static void
+qlcnic_fw_poll_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ goto reschedule;
+
+
+ if (qlcnic_check_health(adapter))
+ return;
+
+ if (adapter->fhash.fnum)
+ qlcnic_prune_lb_filters(adapter);
+
+reschedule:
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+}
+
+static int qlcnic_is_first_func(struct pci_dev *pdev)
+{
+ struct pci_dev *oth_pdev;
+ int val = pdev->devfn;
+
+ while (val-- > 0) {
+ oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
+ (pdev->bus), pdev->bus->number,
+ PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
+ if (!oth_pdev)
+ continue;
+
+ if (oth_pdev->current_state != PCI_D3cold) {
+ pci_dev_put(oth_pdev);
+ return 0;
+ }
+ pci_dev_put(oth_pdev);
+ }
+ return 1;
+}
+
+static int qlcnic_attach_func(struct pci_dev *pdev)
+{
+ int err, first_func;
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ pdev->error_state = pci_channel_io_normal;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ first_func = qlcnic_is_first_func(pdev);
+
+ if (qlcnic_api_lock(adapter))
+ return -EINVAL;
+
+ if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
+ adapter->need_fw_reset = 1;
+ set_bit(__QLCNIC_START_FW, &adapter->state);
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
+ QLCDB(adapter, DRV, "Restarting fw\n");
+ }
+ qlcnic_api_unlock(adapter);
+
+ err = adapter->nic_ops->start_firmware(adapter);
+ if (err)
+ return err;
+
+ qlcnic_clr_drv_state(adapter);
+ qlcnic_setup_intr(adapter);
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (err) {
+ qlcnic_clr_all_drv_state(adapter, 1);
+ clear_bit(__QLCNIC_AER, &adapter->state);
+ netif_device_attach(netdev);
+ return err;
+ }
+
+ err = qlcnic_up(adapter, netdev);
+ if (err)
+ goto done;
+
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+ done:
+ netif_device_attach(netdev);
+ return err;
+}
+
+static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (state == pci_channel_io_normal)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ set_bit(__QLCNIC_AER, &adapter->state);
+ netif_device_detach(netdev);
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+
+ if (netif_running(netdev))
+ qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+ qlcnic_teardown_intr(adapter);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
+{
+ return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
+ PCI_ERS_RESULT_RECOVERED;
+}
+
+static void qlcnic_io_resume(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
+ test_and_clear_bit(__QLCNIC_AER, &adapter->state))
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
+ FW_POLL_DELAY);
+}
+
+static int
+qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ err = qlcnic_can_start_firmware(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_check_npar_opertional(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_initialize_nic(adapter);
+ if (err)
+ return err;
+
+ qlcnic_check_options(adapter);
+
+ err = qlcnic_set_eswitch_port_config(adapter);
+ if (err)
+ return err;
+
+ adapter->need_fw_reset = 0;
+
+ return err;
+}
+
+static int
+qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
+qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+{
+ return -EOPNOTSUPP;
+}
+
+static ssize_t
+qlcnic_store_bridged_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned long new;
+ int ret = -EINVAL;
+
+ if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
+ goto err_out;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ goto err_out;
+
+ if (strict_strtoul(buf, 2, &new))
+ goto err_out;
+
+ if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
+ ret = len;
+
+err_out:
+ return ret;
+}
+
+static ssize_t
+qlcnic_show_bridged_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int bridged_mode = 0;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
+
+ return sprintf(buf, "%d\n", bridged_mode);
+}
+
+static struct device_attribute dev_attr_bridged_mode = {
+ .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
+ .show = qlcnic_show_bridged_mode,
+ .store = qlcnic_store_bridged_mode,
+};
+
+static ssize_t
+qlcnic_store_diag_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned long new;
+
+ if (strict_strtoul(buf, 2, &new))
+ return -EINVAL;
+
+ if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ adapter->flags ^= QLCNIC_DIAG_ENABLED;
+
+ return len;
+}
+
+static ssize_t
+qlcnic_show_diag_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n",
+ !!(adapter->flags & QLCNIC_DIAG_ENABLED));
+}
+
+static struct device_attribute dev_attr_diag_mode = {
+ .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
+ .show = qlcnic_show_diag_mode,
+ .store = qlcnic_store_diag_mode,
+};
+
+int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
+{
+ if (!use_msi_x && !use_msi) {
+ netdev_info(netdev, "no msix or msi support, hence no rss\n");
+ return -EINVAL;
+ }
+
+ if ((val > max_hw) || (val < 2) || !is_power_of_2(val)) {
+ netdev_info(netdev, "rss_ring valid range [2 - %x] in "
+ " powers of 2\n", max_hw);
+ return -EINVAL;
+ }
+ return 0;
+
+}
+
+int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err = 0;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ netif_device_detach(netdev);
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+ qlcnic_detach(adapter);
+ qlcnic_teardown_intr(adapter);
+
+ if (qlcnic_enable_msix(adapter, data)) {
+ netdev_info(netdev, "failed setting max_rss; rss disabled\n");
+ qlcnic_enable_msi_legacy(adapter);
+ }
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (err)
+ goto done;
+ err = __qlcnic_up(adapter, netdev);
+ if (err)
+ goto done;
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+ done:
+ netif_device_attach(netdev);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return err;
+}
+
+static int
+qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon, u8 *state,
+ u8 *rate)
+{
+ *rate = LSB(beacon);
+ *state = MSB(beacon);
+
+ QLCDB(adapter, DRV, "rate %x state %x\n", *rate, *state);
+
+ if (!*state) {
+ *rate = __QLCNIC_MAX_LED_RATE;
+ return 0;
+ } else if (*state > __QLCNIC_MAX_LED_STATE)
+ return -EINVAL;
+
+ if ((!*rate) || (*rate > __QLCNIC_MAX_LED_RATE))
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t
+qlcnic_store_beacon(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int max_sds_rings = adapter->max_sds_rings;
+ int dev_down = 0;
+ u16 beacon;
+ u8 b_state, b_rate;
+ int err;
+
+ if (len != sizeof(u16))
+ return QL_STATUS_INVALID_PARAM;
+
+ memcpy(&beacon, buf, sizeof(u16));
+ err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
+ if (err)
+ return err;
+
+ if (adapter->ahw->beacon_state == b_state)
+ return len;
+
+ if (!adapter->ahw->beacon_state)
+ if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state))
+ return -EBUSY;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+ err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST);
+ if (err) {
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+ return err;
+ }
+ dev_down = 1;
+ }
+
+ err = qlcnic_config_led(adapter, b_state, b_rate);
+
+ if (!err) {
+ adapter->ahw->beacon_state = b_state;
+ err = len;
+ }
+
+ if (dev_down) {
+ qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ }
+
+ if (!b_state)
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
+ return err;
+}
+
+static ssize_t
+qlcnic_show_beacon(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", adapter->ahw->beacon_state);
+}
+
+static struct device_attribute dev_attr_beacon = {
+ .attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)},
+ .show = qlcnic_show_beacon,
+ .store = qlcnic_store_beacon,
+};
+
+static int
+qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ size_t crb_size = 4;
+
+ if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ return -EIO;
+
+ if (offset < QLCNIC_PCI_CRBSPACE) {
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
+ QLCNIC_PCI_CAMQM_END))
+ crb_size = 8;
+ else
+ return -EINVAL;
+ }
+
+ if ((size != crb_size) || (offset & (crb_size-1)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t
+qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 data;
+ u64 qmdata;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
+ memcpy(buf, &qmdata, size);
+ } else {
+ data = QLCRD32(adapter, offset);
+ memcpy(buf, &data, size);
+ }
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 data;
+ u64 qmdata;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ memcpy(&qmdata, buf, size);
+ qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
+ } else {
+ memcpy(&data, buf, size);
+ QLCWR32(adapter, offset, data);
+ }
+ return size;
+}
+
+static int
+qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ return -EIO;
+
+ if ((size != 8) || (offset & 0x7))
+ return -EIO;
+
+ return 0;
+}
+
+static ssize_t
+qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
+ return -EIO;
+
+ memcpy(buf, &data, size);
+
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ memcpy(&data, buf, size);
+
+ if (qlcnic_pci_mem_write_2M(adapter, offset, data))
+ return -EIO;
+
+ return size;
+}
+
+static struct bin_attribute bin_attr_crb = {
+ .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_crb,
+ .write = qlcnic_sysfs_write_crb,
+};
+
+static struct bin_attribute bin_attr_mem = {
+ .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_mem,
+ .write = qlcnic_sysfs_write_mem,
+};
+
+static int
+validate_pm_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_pm_func_cfg *pm_cfg, int count)
+{
+
+ u8 src_pci_func, s_esw_id, d_esw_id;
+ u8 dest_pci_func;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ src_pci_func = pm_cfg[i].pci_func;
+ dest_pci_func = pm_cfg[i].dest_npar;
+ if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
+ || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
+ return QL_STATUS_INVALID_PARAM;
+
+ if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
+ return QL_STATUS_INVALID_PARAM;
+
+ if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
+ return QL_STATUS_INVALID_PARAM;
+
+ s_esw_id = adapter->npars[src_pci_func].phy_port;
+ d_esw_id = adapter->npars[dest_pci_func].phy_port;
+
+ if (s_esw_id != d_esw_id)
+ return QL_STATUS_INVALID_PARAM;
+
+ }
+ return 0;
+
+}
+
+static ssize_t
+qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_pm_func_cfg *pm_cfg;
+ u32 id, action, pci_func;
+ int count, rem, i, ret;
+
+ count = size / sizeof(struct qlcnic_pm_func_cfg);
+ rem = size % sizeof(struct qlcnic_pm_func_cfg);
+ if (rem)
+ return QL_STATUS_INVALID_PARAM;
+
+ pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
+
+ ret = validate_pm_config(adapter, pm_cfg, count);
+ if (ret)
+ return ret;
+ for (i = 0; i < count; i++) {
+ pci_func = pm_cfg[i].pci_func;
+ action = !!pm_cfg[i].action;
+ id = adapter->npars[pci_func].phy_port;
+ ret = qlcnic_config_port_mirroring(adapter, id,
+ action, pci_func);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < count; i++) {
+ pci_func = pm_cfg[i].pci_func;
+ id = adapter->npars[pci_func].phy_port;
+ adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
+ adapter->npars[pci_func].dest_npar = id;
+ }
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
+ int i;
+
+ if (size != sizeof(pm_cfg))
+ return QL_STATUS_INVALID_PARAM;
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+ continue;
+ pm_cfg[i].action = adapter->npars[i].enable_pm;
+ pm_cfg[i].dest_npar = 0;
+ pm_cfg[i].pci_func = i;
+ }
+ memcpy(buf, &pm_cfg, size);
+
+ return size;
+}
+
+static int
+validate_esw_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg, int count)
+{
+ u32 op_mode;
+ u8 pci_func;
+ int i;
+
+ op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
+
+ for (i = 0; i < count; i++) {
+ pci_func = esw_cfg[i].pci_func;
+ if (pci_func >= QLCNIC_MAX_PCI_FUNC)
+ return QL_STATUS_INVALID_PARAM;
+
+ if (adapter->op_mode == QLCNIC_MGMT_FUNC)
+ if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
+ return QL_STATUS_INVALID_PARAM;
+
+ switch (esw_cfg[i].op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
+ QLCNIC_NON_PRIV_FUNC) {
+ if (esw_cfg[i].mac_anti_spoof != 0)
+ return QL_STATUS_INVALID_PARAM;
+ if (esw_cfg[i].mac_override != 1)
+ return QL_STATUS_INVALID_PARAM;
+ if (esw_cfg[i].promisc_mode != 1)
+ return QL_STATUS_INVALID_PARAM;
+ }
+ break;
+ case QLCNIC_ADD_VLAN:
+ if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
+ return QL_STATUS_INVALID_PARAM;
+ if (!esw_cfg[i].op_type)
+ return QL_STATUS_INVALID_PARAM;
+ break;
+ case QLCNIC_DEL_VLAN:
+ if (!esw_cfg[i].op_type)
+ return QL_STATUS_INVALID_PARAM;
+ break;
+ default:
+ return QL_STATUS_INVALID_PARAM;
+ }
+ }
+ return 0;
+}
+
+static ssize_t
+qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_func_cfg *esw_cfg;
+ struct qlcnic_npar_info *npar;
+ int count, rem, i, ret;
+ u8 pci_func, op_mode = 0;
+
+ count = size / sizeof(struct qlcnic_esw_func_cfg);
+ rem = size % sizeof(struct qlcnic_esw_func_cfg);
+ if (rem)
+ return QL_STATUS_INVALID_PARAM;
+
+ esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
+ ret = validate_esw_config(adapter, esw_cfg, count);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ if (adapter->op_mode == QLCNIC_MGMT_FUNC)
+ if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
+ return QL_STATUS_INVALID_PARAM;
+
+ if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
+ continue;
+
+ op_mode = esw_cfg[i].op_mode;
+ qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
+ esw_cfg[i].op_mode = op_mode;
+ esw_cfg[i].pci_func = adapter->ahw->pci_func;
+
+ switch (esw_cfg[i].op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
+ break;
+ case QLCNIC_ADD_VLAN:
+ qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
+ break;
+ case QLCNIC_DEL_VLAN:
+ esw_cfg[i].vlan_id = 0;
+ qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
+ break;
+ }
+ }
+
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+ goto out;
+
+ for (i = 0; i < count; i++) {
+ pci_func = esw_cfg[i].pci_func;
+ npar = &adapter->npars[pci_func];
+ switch (esw_cfg[i].op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ npar->promisc_mode = esw_cfg[i].promisc_mode;
+ npar->mac_override = esw_cfg[i].mac_override;
+ npar->offload_flags = esw_cfg[i].offload_flags;
+ npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
+ npar->discard_tagged = esw_cfg[i].discard_tagged;
+ break;
+ case QLCNIC_ADD_VLAN:
+ npar->pvid = esw_cfg[i].vlan_id;
+ break;
+ case QLCNIC_DEL_VLAN:
+ npar->pvid = 0;
+ break;
+ }
+ }
+out:
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
+ u8 i;
+
+ if (size != sizeof(esw_cfg))
+ return QL_STATUS_INVALID_PARAM;
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+ continue;
+ esw_cfg[i].pci_func = i;
+ if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
+ return QL_STATUS_INVALID_PARAM;
+ }
+ memcpy(buf, &esw_cfg, size);
+
+ return size;
+}
+
+static int
+validate_npar_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_npar_func_cfg *np_cfg, int count)
+{
+ u8 pci_func, i;
+
+ for (i = 0; i < count; i++) {
+ pci_func = np_cfg[i].pci_func;
+ if (pci_func >= QLCNIC_MAX_PCI_FUNC)
+ return QL_STATUS_INVALID_PARAM;
+
+ if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
+ return QL_STATUS_INVALID_PARAM;
+
+ if (!IS_VALID_BW(np_cfg[i].min_bw) ||
+ !IS_VALID_BW(np_cfg[i].max_bw))
+ return QL_STATUS_INVALID_PARAM;
+ }
+ return 0;
+}
+
+static ssize_t
+qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_info nic_info;
+ struct qlcnic_npar_func_cfg *np_cfg;
+ int i, count, rem, ret;
+ u8 pci_func;
+
+ count = size / sizeof(struct qlcnic_npar_func_cfg);
+ rem = size % sizeof(struct qlcnic_npar_func_cfg);
+ if (rem)
+ return QL_STATUS_INVALID_PARAM;
+
+ np_cfg = (struct qlcnic_npar_func_cfg *) buf;
+ ret = validate_npar_config(adapter, np_cfg, count);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count ; i++) {
+ pci_func = np_cfg[i].pci_func;
+ ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
+ if (ret)
+ return ret;
+ nic_info.pci_func = pci_func;
+ nic_info.min_tx_bw = np_cfg[i].min_bw;
+ nic_info.max_tx_bw = np_cfg[i].max_bw;
+ ret = qlcnic_set_nic_info(adapter, &nic_info);
+ if (ret)
+ return ret;
+ adapter->npars[i].min_bw = nic_info.min_tx_bw;
+ adapter->npars[i].max_bw = nic_info.max_tx_bw;
+ }
+
+ return size;
+
+}
+static ssize_t
+qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_info nic_info;
+ struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
+ int i, ret;
+
+ if (size != sizeof(np_cfg))
+ return QL_STATUS_INVALID_PARAM;
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
+ if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+ continue;
+ ret = qlcnic_get_nic_info(adapter, &nic_info, i);
+ if (ret)
+ return ret;
+
+ np_cfg[i].pci_func = i;
+ np_cfg[i].op_mode = (u8)nic_info.op_mode;
+ np_cfg[i].port_num = nic_info.phys_port;
+ np_cfg[i].fw_capab = nic_info.capabilities;
+ np_cfg[i].min_bw = nic_info.min_tx_bw ;
+ np_cfg[i].max_bw = nic_info.max_tx_bw;
+ np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
+ np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
+ }
+ memcpy(buf, &np_cfg, size);
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_statistics port_stats;
+ int ret;
+
+ if (size != sizeof(struct qlcnic_esw_statistics))
+ return QL_STATUS_INVALID_PARAM;
+
+ if (offset >= QLCNIC_MAX_PCI_FUNC)
+ return QL_STATUS_INVALID_PARAM;
+
+ memset(&port_stats, 0, size);
+ ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
+ &port_stats.rx);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
+ &port_stats.tx);
+ if (ret)
+ return ret;
+
+ memcpy(buf, &port_stats, size);
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_statistics esw_stats;
+ int ret;
+
+ if (size != sizeof(struct qlcnic_esw_statistics))
+ return QL_STATUS_INVALID_PARAM;
+
+ if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
+ return QL_STATUS_INVALID_PARAM;
+
+ memset(&esw_stats, 0, size);
+ ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
+ &esw_stats.rx);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
+ &esw_stats.tx);
+ if (ret)
+ return ret;
+
+ memcpy(buf, &esw_stats, size);
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int ret;
+
+ if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
+ return QL_STATUS_INVALID_PARAM;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
+ QLCNIC_QUERY_RX_COUNTER);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
+ QLCNIC_QUERY_TX_COUNTER);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int ret;
+
+ if (offset >= QLCNIC_MAX_PCI_FUNC)
+ return QL_STATUS_INVALID_PARAM;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
+ QLCNIC_QUERY_RX_COUNTER);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
+ QLCNIC_QUERY_TX_COUNTER);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
+ struct qlcnic_pci_info *pci_info;
+ int i, ret;
+
+ if (size != sizeof(pci_cfg))
+ return QL_STATUS_INVALID_PARAM;
+
+ pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
+ return -ENOMEM;
+
+ ret = qlcnic_get_pci_info(adapter, pci_info);
+ if (ret) {
+ kfree(pci_info);
+ return ret;
+ }
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
+ pci_cfg[i].pci_func = pci_info[i].id;
+ pci_cfg[i].func_type = pci_info[i].type;
+ pci_cfg[i].port_num = pci_info[i].default_port;
+ pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
+ pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
+ memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
+ }
+ memcpy(buf, &pci_cfg, size);
+ kfree(pci_info);
+ return size;
+}
+static struct bin_attribute bin_attr_npar_config = {
+ .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_npar_config,
+ .write = qlcnic_sysfs_write_npar_config,
+};
+
+static struct bin_attribute bin_attr_pci_config = {
+ .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_pci_config,
+ .write = NULL,
+};
+
+static struct bin_attribute bin_attr_port_stats = {
+ .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_get_port_stats,
+ .write = qlcnic_sysfs_clear_port_stats,
+};
+
+static struct bin_attribute bin_attr_esw_stats = {
+ .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_get_esw_stats,
+ .write = qlcnic_sysfs_clear_esw_stats,
+};
+
+static struct bin_attribute bin_attr_esw_config = {
+ .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_esw_config,
+ .write = qlcnic_sysfs_write_esw_config,
+};
+
+static struct bin_attribute bin_attr_pm_config = {
+ .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_pm_config,
+ .write = qlcnic_sysfs_write_pm_config,
+};
+
+static void
+qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ if (device_create_file(dev, &dev_attr_bridged_mode))
+ dev_warn(dev,
+ "failed to create bridged_mode sysfs entry\n");
+}
+
+static void
+qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ device_remove_file(dev, &dev_attr_bridged_mode);
+}
+
+static void
+qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (device_create_bin_file(dev, &bin_attr_port_stats))
+ dev_info(dev, "failed to create port stats sysfs entry");
+
+ if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
+ return;
+ if (device_create_file(dev, &dev_attr_diag_mode))
+ dev_info(dev, "failed to create diag_mode sysfs entry\n");
+ if (device_create_file(dev, &dev_attr_beacon))
+ dev_info(dev, "failed to create beacon sysfs entry");
+ if (device_create_bin_file(dev, &bin_attr_crb))
+ dev_info(dev, "failed to create crb sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_mem))
+ dev_info(dev, "failed to create mem sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_pci_config))
+ dev_info(dev, "failed to create pci config sysfs entry");
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return;
+ if (device_create_bin_file(dev, &bin_attr_esw_config))
+ dev_info(dev, "failed to create esw config sysfs entry");
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+ return;
+ if (device_create_bin_file(dev, &bin_attr_npar_config))
+ dev_info(dev, "failed to create npar config sysfs entry");
+ if (device_create_bin_file(dev, &bin_attr_pm_config))
+ dev_info(dev, "failed to create pm config sysfs entry");
+ if (device_create_bin_file(dev, &bin_attr_esw_stats))
+ dev_info(dev, "failed to create eswitch stats sysfs entry");
+}
+
+static void
+qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ device_remove_bin_file(dev, &bin_attr_port_stats);
+
+ if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
+ return;
+ device_remove_file(dev, &dev_attr_diag_mode);
+ device_remove_file(dev, &dev_attr_beacon);
+ device_remove_bin_file(dev, &bin_attr_crb);
+ device_remove_bin_file(dev, &bin_attr_mem);
+ device_remove_bin_file(dev, &bin_attr_pci_config);
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return;
+ device_remove_bin_file(dev, &bin_attr_esw_config);
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+ return;
+ device_remove_bin_file(dev, &bin_attr_npar_config);
+ device_remove_bin_file(dev, &bin_attr_pm_config);
+ device_remove_bin_file(dev, &bin_attr_esw_stats);
+}
+
+#ifdef CONFIG_INET
+
+#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
+
+static void
+qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
+ struct net_device *dev, unsigned long event)
+{
+ struct in_device *indev;
+
+ indev = in_dev_get(dev);
+ if (!indev)
+ return;
+
+ for_ifa(indev) {
+ switch (event) {
+ case NETDEV_UP:
+ qlcnic_config_ipaddr(adapter,
+ ifa->ifa_address, QLCNIC_IP_UP);
+ break;
+ case NETDEV_DOWN:
+ qlcnic_config_ipaddr(adapter,
+ ifa->ifa_address, QLCNIC_IP_DOWN);
+ break;
+ default:
+ break;
+ }
+ } endfor_ifa(indev);
+
+ in_dev_put(indev);
+}
+
+static void
+qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct net_device *dev;
+ u16 vid;
+
+ qlcnic_config_indev_addr(adapter, netdev, event);
+
+ for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
+ dev = __vlan_find_dev_deep(netdev, vid);
+ if (!dev)
+ continue;
+ qlcnic_config_indev_addr(adapter, dev, event);
+ }
+}
+
+static int qlcnic_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct qlcnic_adapter *adapter;
+ struct net_device *dev = (struct net_device *)ptr;
+
+recheck:
+ if (dev == NULL)
+ goto done;
+
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ dev = vlan_dev_real_dev(dev);
+ goto recheck;
+ }
+
+ if (!is_qlcnic_netdev(dev))
+ goto done;
+
+ adapter = netdev_priv(dev);
+
+ if (!adapter)
+ goto done;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ goto done;
+
+ qlcnic_config_indev_addr(adapter, dev, event);
+done:
+ return NOTIFY_DONE;
+}
+
+static int
+qlcnic_inetaddr_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct qlcnic_adapter *adapter;
+ struct net_device *dev;
+
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+
+ dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+
+recheck:
+ if (dev == NULL)
+ goto done;
+
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ dev = vlan_dev_real_dev(dev);
+ goto recheck;
+ }
+
+ if (!is_qlcnic_netdev(dev))
+ goto done;
+
+ adapter = netdev_priv(dev);
+
+ if (!adapter)
+ goto done;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ goto done;
+
+ switch (event) {
+ case NETDEV_UP:
+ qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
+ break;
+ case NETDEV_DOWN:
+ qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
+ break;
+ default:
+ break;
+ }
+
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block qlcnic_netdev_cb = {
+ .notifier_call = qlcnic_netdev_event,
+};
+
+static struct notifier_block qlcnic_inetaddr_cb = {
+ .notifier_call = qlcnic_inetaddr_event,
+};
+#else
+static void
+qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
+{ }
+#endif
+static struct pci_error_handlers qlcnic_err_handler = {
+ .error_detected = qlcnic_io_error_detected,
+ .slot_reset = qlcnic_io_slot_reset,
+ .resume = qlcnic_io_resume,
+};
+
+static struct pci_driver qlcnic_driver = {
+ .name = qlcnic_driver_name,
+ .id_table = qlcnic_pci_tbl,
+ .probe = qlcnic_probe,
+ .remove = __devexit_p(qlcnic_remove),
+#ifdef CONFIG_PM
+ .suspend = qlcnic_suspend,
+ .resume = qlcnic_resume,
+#endif
+ .shutdown = qlcnic_shutdown,
+ .err_handler = &qlcnic_err_handler
+
+};
+
+static int __init qlcnic_init_module(void)
+{
+ int ret;
+
+ printk(KERN_INFO "%s\n", qlcnic_driver_string);
+
+ qlcnic_wq = create_singlethread_workqueue("qlcnic");
+ if (qlcnic_wq == NULL) {
+ printk(KERN_ERR "qlcnic: cannot create workqueue\n");
+ return -ENOMEM;
+ }
+
+#ifdef CONFIG_INET
+ register_netdevice_notifier(&qlcnic_netdev_cb);
+ register_inetaddr_notifier(&qlcnic_inetaddr_cb);
+#endif
+
+ ret = pci_register_driver(&qlcnic_driver);
+ if (ret) {
+#ifdef CONFIG_INET
+ unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
+ unregister_netdevice_notifier(&qlcnic_netdev_cb);
+#endif
+ destroy_workqueue(qlcnic_wq);
+ }
+
+ return ret;
+}
+
+module_init(qlcnic_init_module);
+
+static void __exit qlcnic_exit_module(void)
+{
+
+ pci_unregister_driver(&qlcnic_driver);
+
+#ifdef CONFIG_INET
+ unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
+ unregister_netdevice_notifier(&qlcnic_netdev_cb);
+#endif
+ destroy_workqueue(qlcnic_wq);
+}
+
+module_exit(qlcnic_exit_module);
diff --git a/drivers/net/ethernet/qlogic/qlge/Makefile b/drivers/net/ethernet/qlogic/qlge/Makefile
new file mode 100644
index 000000000000..8a197658d76f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlge/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Qlogic 10GbE PCI Express ethernet driver
+#
+
+obj-$(CONFIG_QLGE) += qlge.o
+
+qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
new file mode 100644
index 000000000000..8731f79c9efc
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -0,0 +1,2334 @@
+/*
+ * QLogic QLA41xx NIC HBA Driver
+ * Copyright (c) 2003-2006 QLogic Corporation
+ *
+ * See LICENSE.qlge for copyright and licensing details.
+ */
+#ifndef _QLGE_H_
+#define _QLGE_H_
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+
+/*
+ * General definitions...
+ */
+#define DRV_NAME "qlge"
+#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
+#define DRV_VERSION "v1.00.00.29.00.00-01"
+
+#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
+
+#define QLGE_VENDOR_ID 0x1077
+#define QLGE_DEVICE_ID_8012 0x8012
+#define QLGE_DEVICE_ID_8000 0x8000
+#define MAX_CPUS 8
+#define MAX_TX_RINGS MAX_CPUS
+#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
+
+#define NUM_TX_RING_ENTRIES 256
+#define NUM_RX_RING_ENTRIES 256
+
+#define NUM_SMALL_BUFFERS 512
+#define NUM_LARGE_BUFFERS 512
+#define DB_PAGE_SIZE 4096
+
+/* Calculate the number of (4k) pages required to
+ * contain a buffer queue of the given length.
+ */
+#define MAX_DB_PAGES_PER_BQ(x) \
+ (((x * sizeof(u64)) / DB_PAGE_SIZE) + \
+ (((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0))
+
+#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
+ MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
+ MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
+#define LARGE_BUFFER_MAX_SIZE 8192
+#define LARGE_BUFFER_MIN_SIZE 2048
+
+#define MAX_CQ 128
+#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
+#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */
+#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
+#define UDELAY_COUNT 3
+#define UDELAY_DELAY 100
+
+
+#define TX_DESC_PER_IOCB 8
+/* The maximum number of frags we handle is based
+ * on PAGE_SIZE...
+ */
+#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */
+#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
+#else /* all other page sizes */
+#define TX_DESC_PER_OAL 0
+#endif
+
+/* Word shifting for converting 64-bit
+ * address to a series of 16-bit words.
+ * This is used for some MPI firmware
+ * mailbox commands.
+ */
+#define LSW(x) ((u16)(x))
+#define MSW(x) ((u16)((u32)(x) >> 16))
+#define LSD(x) ((u32)((u64)(x)))
+#define MSD(x) ((u32)((((u64)(x)) >> 32)))
+
+/* MPI test register definitions. This register
+ * is used for determining alternate NIC function's
+ * PCI->func number.
+ */
+enum {
+ MPI_TEST_FUNC_PORT_CFG = 0x1002,
+ MPI_TEST_FUNC_PRB_CTL = 0x100e,
+ MPI_TEST_FUNC_PRB_EN = 0x18a20000,
+ MPI_TEST_FUNC_RST_STS = 0x100a,
+ MPI_TEST_FUNC_RST_FRC = 0x00000003,
+ MPI_TEST_NIC_FUNC_MASK = 0x00000007,
+ MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0),
+ MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e,
+ MPI_TEST_NIC1_FUNC_SHIFT = 1,
+ MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4),
+ MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0,
+ MPI_TEST_NIC2_FUNC_SHIFT = 5,
+ MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8),
+ MPI_TEST_FC1_FUNCTION_MASK = 0x00000e00,
+ MPI_TEST_FC1_FUNCTION_SHIFT = 9,
+ MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12),
+ MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000,
+ MPI_TEST_FC2_FUNCTION_SHIFT = 13,
+
+ MPI_NIC_READ = 0x00000000,
+ MPI_NIC_REG_BLOCK = 0x00020000,
+ MPI_NIC_FUNCTION_SHIFT = 6,
+};
+
+/*
+ * Processor Address Register (PROC_ADDR) bit definitions.
+ */
+enum {
+
+ /* Misc. stuff */
+ MAILBOX_COUNT = 16,
+ MAILBOX_TIMEOUT = 5,
+
+ PROC_ADDR_RDY = (1 << 31),
+ PROC_ADDR_R = (1 << 30),
+ PROC_ADDR_ERR = (1 << 29),
+ PROC_ADDR_DA = (1 << 28),
+ PROC_ADDR_FUNC0_MBI = 0x00001180,
+ PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT),
+ PROC_ADDR_FUNC0_CTL = 0x000011a1,
+ PROC_ADDR_FUNC2_MBI = 0x00001280,
+ PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT),
+ PROC_ADDR_FUNC2_CTL = 0x000012a1,
+ PROC_ADDR_MPI_RISC = 0x00000000,
+ PROC_ADDR_MDE = 0x00010000,
+ PROC_ADDR_REGBLOCK = 0x00020000,
+ PROC_ADDR_RISC_REG = 0x00030000,
+};
+
+/*
+ * System Register (SYS) bit definitions.
+ */
+enum {
+ SYS_EFE = (1 << 0),
+ SYS_FAE = (1 << 1),
+ SYS_MDC = (1 << 2),
+ SYS_DST = (1 << 3),
+ SYS_DWC = (1 << 4),
+ SYS_EVW = (1 << 5),
+ SYS_OMP_DLY_MASK = 0x3f000000,
+ /*
+ * There are no values defined as of edit #15.
+ */
+ SYS_ODI = (1 << 14),
+};
+
+/*
+ * Reset/Failover Register (RST_FO) bit definitions.
+ */
+enum {
+ RST_FO_TFO = (1 << 0),
+ RST_FO_RR_MASK = 0x00060000,
+ RST_FO_RR_CQ_CAM = 0x00000000,
+ RST_FO_RR_DROP = 0x00000002,
+ RST_FO_RR_DQ = 0x00000004,
+ RST_FO_RR_RCV_FUNC_CQ = 0x00000006,
+ RST_FO_FRB = (1 << 12),
+ RST_FO_MOP = (1 << 13),
+ RST_FO_REG = (1 << 14),
+ RST_FO_FR = (1 << 15),
+};
+
+/*
+ * Function Specific Control Register (FSC) bit definitions.
+ */
+enum {
+ FSC_DBRST_MASK = 0x00070000,
+ FSC_DBRST_256 = 0x00000000,
+ FSC_DBRST_512 = 0x00000001,
+ FSC_DBRST_768 = 0x00000002,
+ FSC_DBRST_1024 = 0x00000003,
+ FSC_DBL_MASK = 0x00180000,
+ FSC_DBL_DBRST = 0x00000000,
+ FSC_DBL_MAX_PLD = 0x00000008,
+ FSC_DBL_MAX_BRST = 0x00000010,
+ FSC_DBL_128_BYTES = 0x00000018,
+ FSC_EC = (1 << 5),
+ FSC_EPC_MASK = 0x00c00000,
+ FSC_EPC_INBOUND = (1 << 6),
+ FSC_EPC_OUTBOUND = (1 << 7),
+ FSC_VM_PAGESIZE_MASK = 0x07000000,
+ FSC_VM_PAGE_2K = 0x00000100,
+ FSC_VM_PAGE_4K = 0x00000200,
+ FSC_VM_PAGE_8K = 0x00000300,
+ FSC_VM_PAGE_64K = 0x00000600,
+ FSC_SH = (1 << 11),
+ FSC_DSB = (1 << 12),
+ FSC_STE = (1 << 13),
+ FSC_FE = (1 << 15),
+};
+
+/*
+ * Host Command Status Register (CSR) bit definitions.
+ */
+enum {
+ CSR_ERR_STS_MASK = 0x0000003f,
+ /*
+ * There are no valued defined as of edit #15.
+ */
+ CSR_RR = (1 << 8),
+ CSR_HRI = (1 << 9),
+ CSR_RP = (1 << 10),
+ CSR_CMD_PARM_SHIFT = 22,
+ CSR_CMD_NOP = 0x00000000,
+ CSR_CMD_SET_RST = 0x10000000,
+ CSR_CMD_CLR_RST = 0x20000000,
+ CSR_CMD_SET_PAUSE = 0x30000000,
+ CSR_CMD_CLR_PAUSE = 0x40000000,
+ CSR_CMD_SET_H2R_INT = 0x50000000,
+ CSR_CMD_CLR_H2R_INT = 0x60000000,
+ CSR_CMD_PAR_EN = 0x70000000,
+ CSR_CMD_SET_BAD_PAR = 0x80000000,
+ CSR_CMD_CLR_BAD_PAR = 0x90000000,
+ CSR_CMD_CLR_R2PCI_INT = 0xa0000000,
+};
+
+/*
+ * Configuration Register (CFG) bit definitions.
+ */
+enum {
+ CFG_LRQ = (1 << 0),
+ CFG_DRQ = (1 << 1),
+ CFG_LR = (1 << 2),
+ CFG_DR = (1 << 3),
+ CFG_LE = (1 << 5),
+ CFG_LCQ = (1 << 6),
+ CFG_DCQ = (1 << 7),
+ CFG_Q_SHIFT = 8,
+ CFG_Q_MASK = 0x7f000000,
+};
+
+/*
+ * Status Register (STS) bit definitions.
+ */
+enum {
+ STS_FE = (1 << 0),
+ STS_PI = (1 << 1),
+ STS_PL0 = (1 << 2),
+ STS_PL1 = (1 << 3),
+ STS_PI0 = (1 << 4),
+ STS_PI1 = (1 << 5),
+ STS_FUNC_ID_MASK = 0x000000c0,
+ STS_FUNC_ID_SHIFT = 6,
+ STS_F0E = (1 << 8),
+ STS_F1E = (1 << 9),
+ STS_F2E = (1 << 10),
+ STS_F3E = (1 << 11),
+ STS_NFE = (1 << 12),
+};
+
+/*
+ * Interrupt Enable Register (INTR_EN) bit definitions.
+ */
+enum {
+ INTR_EN_INTR_MASK = 0x007f0000,
+ INTR_EN_TYPE_MASK = 0x03000000,
+ INTR_EN_TYPE_ENABLE = 0x00000100,
+ INTR_EN_TYPE_DISABLE = 0x00000200,
+ INTR_EN_TYPE_READ = 0x00000300,
+ INTR_EN_IHD = (1 << 13),
+ INTR_EN_IHD_MASK = (INTR_EN_IHD << 16),
+ INTR_EN_EI = (1 << 14),
+ INTR_EN_EN = (1 << 15),
+};
+
+/*
+ * Interrupt Mask Register (INTR_MASK) bit definitions.
+ */
+enum {
+ INTR_MASK_PI = (1 << 0),
+ INTR_MASK_HL0 = (1 << 1),
+ INTR_MASK_LH0 = (1 << 2),
+ INTR_MASK_HL1 = (1 << 3),
+ INTR_MASK_LH1 = (1 << 4),
+ INTR_MASK_SE = (1 << 5),
+ INTR_MASK_LSC = (1 << 6),
+ INTR_MASK_MC = (1 << 7),
+ INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC,
+};
+
+/*
+ * Register (REV_ID) bit definitions.
+ */
+enum {
+ REV_ID_MASK = 0x0000000f,
+ REV_ID_NICROLL_SHIFT = 0,
+ REV_ID_NICREV_SHIFT = 4,
+ REV_ID_XGROLL_SHIFT = 8,
+ REV_ID_XGREV_SHIFT = 12,
+ REV_ID_CHIPREV_SHIFT = 28,
+};
+
+/*
+ * Force ECC Error Register (FRC_ECC_ERR) bit definitions.
+ */
+enum {
+ FRC_ECC_ERR_VW = (1 << 12),
+ FRC_ECC_ERR_VB = (1 << 13),
+ FRC_ECC_ERR_NI = (1 << 14),
+ FRC_ECC_ERR_NO = (1 << 15),
+ FRC_ECC_PFE_SHIFT = 16,
+ FRC_ECC_ERR_DO = (1 << 18),
+ FRC_ECC_P14 = (1 << 19),
+};
+
+/*
+ * Error Status Register (ERR_STS) bit definitions.
+ */
+enum {
+ ERR_STS_NOF = (1 << 0),
+ ERR_STS_NIF = (1 << 1),
+ ERR_STS_DRP = (1 << 2),
+ ERR_STS_XGP = (1 << 3),
+ ERR_STS_FOU = (1 << 4),
+ ERR_STS_FOC = (1 << 5),
+ ERR_STS_FOF = (1 << 6),
+ ERR_STS_FIU = (1 << 7),
+ ERR_STS_FIC = (1 << 8),
+ ERR_STS_FIF = (1 << 9),
+ ERR_STS_MOF = (1 << 10),
+ ERR_STS_TA = (1 << 11),
+ ERR_STS_MA = (1 << 12),
+ ERR_STS_MPE = (1 << 13),
+ ERR_STS_SCE = (1 << 14),
+ ERR_STS_STE = (1 << 15),
+ ERR_STS_FOW = (1 << 16),
+ ERR_STS_UE = (1 << 17),
+ ERR_STS_MCH = (1 << 26),
+ ERR_STS_LOC_SHIFT = 27,
+};
+
+/*
+ * RAM Debug Address Register (RAM_DBG_ADDR) bit definitions.
+ */
+enum {
+ RAM_DBG_ADDR_FW = (1 << 30),
+ RAM_DBG_ADDR_FR = (1 << 31),
+};
+
+/*
+ * Semaphore Register (SEM) bit definitions.
+ */
+enum {
+ /*
+ * Example:
+ * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT)
+ */
+ SEM_CLEAR = 0,
+ SEM_SET = 1,
+ SEM_FORCE = 3,
+ SEM_XGMAC0_SHIFT = 0,
+ SEM_XGMAC1_SHIFT = 2,
+ SEM_ICB_SHIFT = 4,
+ SEM_MAC_ADDR_SHIFT = 6,
+ SEM_FLASH_SHIFT = 8,
+ SEM_PROBE_SHIFT = 10,
+ SEM_RT_IDX_SHIFT = 12,
+ SEM_PROC_REG_SHIFT = 14,
+ SEM_XGMAC0_MASK = 0x00030000,
+ SEM_XGMAC1_MASK = 0x000c0000,
+ SEM_ICB_MASK = 0x00300000,
+ SEM_MAC_ADDR_MASK = 0x00c00000,
+ SEM_FLASH_MASK = 0x03000000,
+ SEM_PROBE_MASK = 0x0c000000,
+ SEM_RT_IDX_MASK = 0x30000000,
+ SEM_PROC_REG_MASK = 0xc0000000,
+};
+
+/*
+ * 10G MAC Address Register (XGMAC_ADDR) bit definitions.
+ */
+enum {
+ XGMAC_ADDR_RDY = (1 << 31),
+ XGMAC_ADDR_R = (1 << 30),
+ XGMAC_ADDR_XME = (1 << 29),
+
+ /* XGMAC control registers */
+ PAUSE_SRC_LO = 0x00000100,
+ PAUSE_SRC_HI = 0x00000104,
+ GLOBAL_CFG = 0x00000108,
+ GLOBAL_CFG_RESET = (1 << 0),
+ GLOBAL_CFG_JUMBO = (1 << 6),
+ GLOBAL_CFG_TX_STAT_EN = (1 << 10),
+ GLOBAL_CFG_RX_STAT_EN = (1 << 11),
+ TX_CFG = 0x0000010c,
+ TX_CFG_RESET = (1 << 0),
+ TX_CFG_EN = (1 << 1),
+ TX_CFG_PREAM = (1 << 2),
+ RX_CFG = 0x00000110,
+ RX_CFG_RESET = (1 << 0),
+ RX_CFG_EN = (1 << 1),
+ RX_CFG_PREAM = (1 << 2),
+ FLOW_CTL = 0x0000011c,
+ PAUSE_OPCODE = 0x00000120,
+ PAUSE_TIMER = 0x00000124,
+ PAUSE_FRM_DEST_LO = 0x00000128,
+ PAUSE_FRM_DEST_HI = 0x0000012c,
+ MAC_TX_PARAMS = 0x00000134,
+ MAC_TX_PARAMS_JUMBO = (1 << 31),
+ MAC_TX_PARAMS_SIZE_SHIFT = 16,
+ MAC_RX_PARAMS = 0x00000138,
+ MAC_SYS_INT = 0x00000144,
+ MAC_SYS_INT_MASK = 0x00000148,
+ MAC_MGMT_INT = 0x0000014c,
+ MAC_MGMT_IN_MASK = 0x00000150,
+ EXT_ARB_MODE = 0x000001fc,
+
+ /* XGMAC TX statistics registers */
+ TX_PKTS = 0x00000200,
+ TX_BYTES = 0x00000208,
+ TX_MCAST_PKTS = 0x00000210,
+ TX_BCAST_PKTS = 0x00000218,
+ TX_UCAST_PKTS = 0x00000220,
+ TX_CTL_PKTS = 0x00000228,
+ TX_PAUSE_PKTS = 0x00000230,
+ TX_64_PKT = 0x00000238,
+ TX_65_TO_127_PKT = 0x00000240,
+ TX_128_TO_255_PKT = 0x00000248,
+ TX_256_511_PKT = 0x00000250,
+ TX_512_TO_1023_PKT = 0x00000258,
+ TX_1024_TO_1518_PKT = 0x00000260,
+ TX_1519_TO_MAX_PKT = 0x00000268,
+ TX_UNDERSIZE_PKT = 0x00000270,
+ TX_OVERSIZE_PKT = 0x00000278,
+
+ /* XGMAC statistics control registers */
+ RX_HALF_FULL_DET = 0x000002a0,
+ TX_HALF_FULL_DET = 0x000002a4,
+ RX_OVERFLOW_DET = 0x000002a8,
+ TX_OVERFLOW_DET = 0x000002ac,
+ RX_HALF_FULL_MASK = 0x000002b0,
+ TX_HALF_FULL_MASK = 0x000002b4,
+ RX_OVERFLOW_MASK = 0x000002b8,
+ TX_OVERFLOW_MASK = 0x000002bc,
+ STAT_CNT_CTL = 0x000002c0,
+ STAT_CNT_CTL_CLEAR_TX = (1 << 0),
+ STAT_CNT_CTL_CLEAR_RX = (1 << 1),
+ AUX_RX_HALF_FULL_DET = 0x000002d0,
+ AUX_TX_HALF_FULL_DET = 0x000002d4,
+ AUX_RX_OVERFLOW_DET = 0x000002d8,
+ AUX_TX_OVERFLOW_DET = 0x000002dc,
+ AUX_RX_HALF_FULL_MASK = 0x000002f0,
+ AUX_TX_HALF_FULL_MASK = 0x000002f4,
+ AUX_RX_OVERFLOW_MASK = 0x000002f8,
+ AUX_TX_OVERFLOW_MASK = 0x000002fc,
+
+ /* XGMAC RX statistics registers */
+ RX_BYTES = 0x00000300,
+ RX_BYTES_OK = 0x00000308,
+ RX_PKTS = 0x00000310,
+ RX_PKTS_OK = 0x00000318,
+ RX_BCAST_PKTS = 0x00000320,
+ RX_MCAST_PKTS = 0x00000328,
+ RX_UCAST_PKTS = 0x00000330,
+ RX_UNDERSIZE_PKTS = 0x00000338,
+ RX_OVERSIZE_PKTS = 0x00000340,
+ RX_JABBER_PKTS = 0x00000348,
+ RX_UNDERSIZE_FCERR_PKTS = 0x00000350,
+ RX_DROP_EVENTS = 0x00000358,
+ RX_FCERR_PKTS = 0x00000360,
+ RX_ALIGN_ERR = 0x00000368,
+ RX_SYMBOL_ERR = 0x00000370,
+ RX_MAC_ERR = 0x00000378,
+ RX_CTL_PKTS = 0x00000380,
+ RX_PAUSE_PKTS = 0x00000388,
+ RX_64_PKTS = 0x00000390,
+ RX_65_TO_127_PKTS = 0x00000398,
+ RX_128_255_PKTS = 0x000003a0,
+ RX_256_511_PKTS = 0x000003a8,
+ RX_512_TO_1023_PKTS = 0x000003b0,
+ RX_1024_TO_1518_PKTS = 0x000003b8,
+ RX_1519_TO_MAX_PKTS = 0x000003c0,
+ RX_LEN_ERR_PKTS = 0x000003c8,
+
+ /* XGMAC MDIO control registers */
+ MDIO_TX_DATA = 0x00000400,
+ MDIO_RX_DATA = 0x00000410,
+ MDIO_CMD = 0x00000420,
+ MDIO_PHY_ADDR = 0x00000430,
+ MDIO_PORT = 0x00000440,
+ MDIO_STATUS = 0x00000450,
+
+ XGMAC_REGISTER_END = 0x00000740,
+};
+
+/*
+ * Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions.
+ */
+enum {
+ ETS_QUEUE_SHIFT = 29,
+ ETS_REF = (1 << 26),
+ ETS_RS = (1 << 27),
+ ETS_P = (1 << 28),
+ ETS_FC_COS_SHIFT = 23,
+};
+
+/*
+ * Flash Address Register (FLASH_ADDR) bit definitions.
+ */
+enum {
+ FLASH_ADDR_RDY = (1 << 31),
+ FLASH_ADDR_R = (1 << 30),
+ FLASH_ADDR_ERR = (1 << 29),
+};
+
+/*
+ * Stop CQ Processing Register (CQ_STOP) bit definitions.
+ */
+enum {
+ CQ_STOP_QUEUE_MASK = (0x007f0000),
+ CQ_STOP_TYPE_MASK = (0x03000000),
+ CQ_STOP_TYPE_START = 0x00000100,
+ CQ_STOP_TYPE_STOP = 0x00000200,
+ CQ_STOP_TYPE_READ = 0x00000300,
+ CQ_STOP_EN = (1 << 15),
+};
+
+/*
+ * MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions.
+ */
+enum {
+ MAC_ADDR_IDX_SHIFT = 4,
+ MAC_ADDR_TYPE_SHIFT = 16,
+ MAC_ADDR_TYPE_COUNT = 10,
+ MAC_ADDR_TYPE_MASK = 0x000f0000,
+ MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
+ MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
+ MAC_ADDR_TYPE_VLAN = 0x00020000,
+ MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000,
+ MAC_ADDR_TYPE_FC_MAC = 0x00040000,
+ MAC_ADDR_TYPE_MGMT_MAC = 0x00050000,
+ MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000,
+ MAC_ADDR_TYPE_MGMT_V4 = 0x00070000,
+ MAC_ADDR_TYPE_MGMT_V6 = 0x00080000,
+ MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000,
+ MAC_ADDR_ADR = (1 << 25),
+ MAC_ADDR_RS = (1 << 26),
+ MAC_ADDR_E = (1 << 27),
+ MAC_ADDR_MR = (1 << 30),
+ MAC_ADDR_MW = (1 << 31),
+ MAX_MULTICAST_ENTRIES = 32,
+
+ /* Entry count and words per entry
+ * for each address type in the filter.
+ */
+ MAC_ADDR_MAX_CAM_ENTRIES = 512,
+ MAC_ADDR_MAX_CAM_WCOUNT = 3,
+ MAC_ADDR_MAX_MULTICAST_ENTRIES = 32,
+ MAC_ADDR_MAX_MULTICAST_WCOUNT = 2,
+ MAC_ADDR_MAX_VLAN_ENTRIES = 4096,
+ MAC_ADDR_MAX_VLAN_WCOUNT = 1,
+ MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096,
+ MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1,
+ MAC_ADDR_MAX_FC_MAC_ENTRIES = 4,
+ MAC_ADDR_MAX_FC_MAC_WCOUNT = 2,
+ MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8,
+ MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2,
+ MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16,
+ MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1,
+ MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4,
+ MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1,
+ MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4,
+ MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4,
+ MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4,
+ MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1,
+};
+
+/*
+ * MAC Protocol Address Index Register (SPLT_HDR) bit definitions.
+ */
+enum {
+ SPLT_HDR_EP = (1 << 31),
+};
+
+/*
+ * FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions.
+ */
+enum {
+ FC_RCV_CFG_ECT = (1 << 15),
+ FC_RCV_CFG_DFH = (1 << 20),
+ FC_RCV_CFG_DVF = (1 << 21),
+ FC_RCV_CFG_RCE = (1 << 27),
+ FC_RCV_CFG_RFE = (1 << 28),
+ FC_RCV_CFG_TEE = (1 << 29),
+ FC_RCV_CFG_TCE = (1 << 30),
+ FC_RCV_CFG_TFE = (1 << 31),
+};
+
+/*
+ * NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions.
+ */
+enum {
+ NIC_RCV_CFG_PPE = (1 << 0),
+ NIC_RCV_CFG_VLAN_MASK = 0x00060000,
+ NIC_RCV_CFG_VLAN_ALL = 0x00000000,
+ NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002,
+ NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004,
+ NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006,
+ NIC_RCV_CFG_RV = (1 << 3),
+ NIC_RCV_CFG_DFQ_MASK = (0x7f000000),
+ NIC_RCV_CFG_DFQ_SHIFT = 8,
+ NIC_RCV_CFG_DFQ = 0, /* HARDCODE default queue to 0. */
+};
+
+/*
+ * Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions.
+ */
+enum {
+ MGMT_RCV_CFG_ARP = (1 << 0),
+ MGMT_RCV_CFG_DHC = (1 << 1),
+ MGMT_RCV_CFG_DHS = (1 << 2),
+ MGMT_RCV_CFG_NP = (1 << 3),
+ MGMT_RCV_CFG_I6N = (1 << 4),
+ MGMT_RCV_CFG_I6R = (1 << 5),
+ MGMT_RCV_CFG_DH6 = (1 << 6),
+ MGMT_RCV_CFG_UD1 = (1 << 7),
+ MGMT_RCV_CFG_UD0 = (1 << 8),
+ MGMT_RCV_CFG_BCT = (1 << 9),
+ MGMT_RCV_CFG_MCT = (1 << 10),
+ MGMT_RCV_CFG_DM = (1 << 11),
+ MGMT_RCV_CFG_RM = (1 << 12),
+ MGMT_RCV_CFG_STL = (1 << 13),
+ MGMT_RCV_CFG_VLAN_MASK = 0xc0000000,
+ MGMT_RCV_CFG_VLAN_ALL = 0x00000000,
+ MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000,
+ MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000,
+ MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000,
+};
+
+/*
+ * Routing Index Register (RT_IDX) bit definitions.
+ */
+enum {
+ RT_IDX_IDX_SHIFT = 8,
+ RT_IDX_TYPE_MASK = 0x000f0000,
+ RT_IDX_TYPE_SHIFT = 16,
+ RT_IDX_TYPE_RT = 0x00000000,
+ RT_IDX_TYPE_RT_INV = 0x00010000,
+ RT_IDX_TYPE_NICQ = 0x00020000,
+ RT_IDX_TYPE_NICQ_INV = 0x00030000,
+ RT_IDX_DST_MASK = 0x00700000,
+ RT_IDX_DST_RSS = 0x00000000,
+ RT_IDX_DST_CAM_Q = 0x00100000,
+ RT_IDX_DST_COS_Q = 0x00200000,
+ RT_IDX_DST_DFLT_Q = 0x00300000,
+ RT_IDX_DST_DEST_Q = 0x00400000,
+ RT_IDX_RS = (1 << 26),
+ RT_IDX_E = (1 << 27),
+ RT_IDX_MR = (1 << 30),
+ RT_IDX_MW = (1 << 31),
+
+ /* Nic Queue format - type 2 bits */
+ RT_IDX_BCAST = (1 << 0),
+ RT_IDX_MCAST = (1 << 1),
+ RT_IDX_MCAST_MATCH = (1 << 2),
+ RT_IDX_MCAST_REG_MATCH = (1 << 3),
+ RT_IDX_MCAST_HASH_MATCH = (1 << 4),
+ RT_IDX_FC_MACH = (1 << 5),
+ RT_IDX_ETH_FCOE = (1 << 6),
+ RT_IDX_CAM_HIT = (1 << 7),
+ RT_IDX_CAM_BIT0 = (1 << 8),
+ RT_IDX_CAM_BIT1 = (1 << 9),
+ RT_IDX_VLAN_TAG = (1 << 10),
+ RT_IDX_VLAN_MATCH = (1 << 11),
+ RT_IDX_VLAN_FILTER = (1 << 12),
+ RT_IDX_ETH_SKIP1 = (1 << 13),
+ RT_IDX_ETH_SKIP2 = (1 << 14),
+ RT_IDX_BCAST_MCAST_MATCH = (1 << 15),
+ RT_IDX_802_3 = (1 << 16),
+ RT_IDX_LLDP = (1 << 17),
+ RT_IDX_UNUSED018 = (1 << 18),
+ RT_IDX_UNUSED019 = (1 << 19),
+ RT_IDX_UNUSED20 = (1 << 20),
+ RT_IDX_UNUSED21 = (1 << 21),
+ RT_IDX_ERR = (1 << 22),
+ RT_IDX_VALID = (1 << 23),
+ RT_IDX_TU_CSUM_ERR = (1 << 24),
+ RT_IDX_IP_CSUM_ERR = (1 << 25),
+ RT_IDX_MAC_ERR = (1 << 26),
+ RT_IDX_RSS_TCP6 = (1 << 27),
+ RT_IDX_RSS_TCP4 = (1 << 28),
+ RT_IDX_RSS_IPV6 = (1 << 29),
+ RT_IDX_RSS_IPV4 = (1 << 30),
+ RT_IDX_RSS_MATCH = (1 << 31),
+
+ /* Hierarchy for the NIC Queue Mask */
+ RT_IDX_ALL_ERR_SLOT = 0,
+ RT_IDX_MAC_ERR_SLOT = 0,
+ RT_IDX_IP_CSUM_ERR_SLOT = 1,
+ RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2,
+ RT_IDX_BCAST_SLOT = 3,
+ RT_IDX_MCAST_MATCH_SLOT = 4,
+ RT_IDX_ALLMULTI_SLOT = 5,
+ RT_IDX_UNUSED6_SLOT = 6,
+ RT_IDX_UNUSED7_SLOT = 7,
+ RT_IDX_RSS_MATCH_SLOT = 8,
+ RT_IDX_RSS_IPV4_SLOT = 8,
+ RT_IDX_RSS_IPV6_SLOT = 9,
+ RT_IDX_RSS_TCP4_SLOT = 10,
+ RT_IDX_RSS_TCP6_SLOT = 11,
+ RT_IDX_CAM_HIT_SLOT = 12,
+ RT_IDX_UNUSED013 = 13,
+ RT_IDX_UNUSED014 = 14,
+ RT_IDX_PROMISCUOUS_SLOT = 15,
+ RT_IDX_MAX_RT_SLOTS = 8,
+ RT_IDX_MAX_NIC_SLOTS = 16,
+};
+
+/*
+ * Serdes Address Register (XG_SERDES_ADDR) bit definitions.
+ */
+enum {
+ XG_SERDES_ADDR_RDY = (1 << 31),
+ XG_SERDES_ADDR_R = (1 << 30),
+
+ XG_SERDES_ADDR_STS = 0x00001E06,
+ XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005,
+ XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a,
+ XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001,
+
+ /* Serdes coredump definitions. */
+ XG_SERDES_XAUI_AN_START = 0x00000000,
+ XG_SERDES_XAUI_AN_END = 0x00000034,
+ XG_SERDES_XAUI_HSS_PCS_START = 0x00000800,
+ XG_SERDES_XAUI_HSS_PCS_END = 0x0000880,
+ XG_SERDES_XFI_AN_START = 0x00001000,
+ XG_SERDES_XFI_AN_END = 0x00001034,
+ XG_SERDES_XFI_TRAIN_START = 0x10001050,
+ XG_SERDES_XFI_TRAIN_END = 0x1000107C,
+ XG_SERDES_XFI_HSS_PCS_START = 0x00001800,
+ XG_SERDES_XFI_HSS_PCS_END = 0x00001838,
+ XG_SERDES_XFI_HSS_TX_START = 0x00001c00,
+ XG_SERDES_XFI_HSS_TX_END = 0x00001c1f,
+ XG_SERDES_XFI_HSS_RX_START = 0x00001c40,
+ XG_SERDES_XFI_HSS_RX_END = 0x00001c5f,
+ XG_SERDES_XFI_HSS_PLL_START = 0x00001e00,
+ XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f,
+};
+
+/*
+ * NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions.
+ */
+enum {
+ PRB_MX_ADDR_ARE = (1 << 16),
+ PRB_MX_ADDR_UP = (1 << 15),
+ PRB_MX_ADDR_SWP = (1 << 14),
+
+ /* Module select values. */
+ PRB_MX_ADDR_MAX_MODS = 21,
+ PRB_MX_ADDR_MOD_SEL_SHIFT = 9,
+ PRB_MX_ADDR_MOD_SEL_TBD = 0,
+ PRB_MX_ADDR_MOD_SEL_IDE1 = 1,
+ PRB_MX_ADDR_MOD_SEL_IDE2 = 2,
+ PRB_MX_ADDR_MOD_SEL_FRB = 3,
+ PRB_MX_ADDR_MOD_SEL_ODE1 = 4,
+ PRB_MX_ADDR_MOD_SEL_ODE2 = 5,
+ PRB_MX_ADDR_MOD_SEL_DA1 = 6,
+ PRB_MX_ADDR_MOD_SEL_DA2 = 7,
+ PRB_MX_ADDR_MOD_SEL_IMP1 = 8,
+ PRB_MX_ADDR_MOD_SEL_IMP2 = 9,
+ PRB_MX_ADDR_MOD_SEL_OMP1 = 10,
+ PRB_MX_ADDR_MOD_SEL_OMP2 = 11,
+ PRB_MX_ADDR_MOD_SEL_ORS1 = 12,
+ PRB_MX_ADDR_MOD_SEL_ORS2 = 13,
+ PRB_MX_ADDR_MOD_SEL_REG = 14,
+ PRB_MX_ADDR_MOD_SEL_MAC1 = 16,
+ PRB_MX_ADDR_MOD_SEL_MAC2 = 17,
+ PRB_MX_ADDR_MOD_SEL_VQM1 = 18,
+ PRB_MX_ADDR_MOD_SEL_VQM2 = 19,
+ PRB_MX_ADDR_MOD_SEL_MOP = 20,
+ /* Bit fields indicating which modules
+ * are valid for each clock domain.
+ */
+ PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7,
+ PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1,
+ PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309,
+ PRB_MX_ADDR_VALID_FC_MOD = 0x00003001,
+ PRB_MX_ADDR_VALID_TOTAL = 34,
+
+ /* Clock domain values. */
+ PRB_MX_ADDR_CLOCK_SHIFT = 6,
+ PRB_MX_ADDR_SYS_CLOCK = 0,
+ PRB_MX_ADDR_PCI_CLOCK = 2,
+ PRB_MX_ADDR_FC_CLOCK = 5,
+ PRB_MX_ADDR_XGM_CLOCK = 6,
+
+ PRB_MX_ADDR_MAX_MUX = 64,
+};
+
+/*
+ * Control Register Set Map
+ */
+enum {
+ PROC_ADDR = 0, /* Use semaphore */
+ PROC_DATA = 0x04, /* Use semaphore */
+ SYS = 0x08,
+ RST_FO = 0x0c,
+ FSC = 0x10,
+ CSR = 0x14,
+ LED = 0x18,
+ ICB_RID = 0x1c, /* Use semaphore */
+ ICB_L = 0x20, /* Use semaphore */
+ ICB_H = 0x24, /* Use semaphore */
+ CFG = 0x28,
+ BIOS_ADDR = 0x2c,
+ STS = 0x30,
+ INTR_EN = 0x34,
+ INTR_MASK = 0x38,
+ ISR1 = 0x3c,
+ ISR2 = 0x40,
+ ISR3 = 0x44,
+ ISR4 = 0x48,
+ REV_ID = 0x4c,
+ FRC_ECC_ERR = 0x50,
+ ERR_STS = 0x54,
+ RAM_DBG_ADDR = 0x58,
+ RAM_DBG_DATA = 0x5c,
+ ECC_ERR_CNT = 0x60,
+ SEM = 0x64,
+ GPIO_1 = 0x68, /* Use semaphore */
+ GPIO_2 = 0x6c, /* Use semaphore */
+ GPIO_3 = 0x70, /* Use semaphore */
+ RSVD2 = 0x74,
+ XGMAC_ADDR = 0x78, /* Use semaphore */
+ XGMAC_DATA = 0x7c, /* Use semaphore */
+ NIC_ETS = 0x80,
+ CNA_ETS = 0x84,
+ FLASH_ADDR = 0x88, /* Use semaphore */
+ FLASH_DATA = 0x8c, /* Use semaphore */
+ CQ_STOP = 0x90,
+ PAGE_TBL_RID = 0x94,
+ WQ_PAGE_TBL_LO = 0x98,
+ WQ_PAGE_TBL_HI = 0x9c,
+ CQ_PAGE_TBL_LO = 0xa0,
+ CQ_PAGE_TBL_HI = 0xa4,
+ MAC_ADDR_IDX = 0xa8, /* Use semaphore */
+ MAC_ADDR_DATA = 0xac, /* Use semaphore */
+ COS_DFLT_CQ1 = 0xb0,
+ COS_DFLT_CQ2 = 0xb4,
+ ETYPE_SKIP1 = 0xb8,
+ ETYPE_SKIP2 = 0xbc,
+ SPLT_HDR = 0xc0,
+ FC_PAUSE_THRES = 0xc4,
+ NIC_PAUSE_THRES = 0xc8,
+ FC_ETHERTYPE = 0xcc,
+ FC_RCV_CFG = 0xd0,
+ NIC_RCV_CFG = 0xd4,
+ FC_COS_TAGS = 0xd8,
+ NIC_COS_TAGS = 0xdc,
+ MGMT_RCV_CFG = 0xe0,
+ RT_IDX = 0xe4,
+ RT_DATA = 0xe8,
+ RSVD7 = 0xec,
+ XG_SERDES_ADDR = 0xf0,
+ XG_SERDES_DATA = 0xf4,
+ PRB_MX_ADDR = 0xf8, /* Use semaphore */
+ PRB_MX_DATA = 0xfc, /* Use semaphore */
+};
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#define SMALL_BUFFER_SIZE 256
+#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE
+#define SPLT_SETTING FSC_DBRST_1024
+#define SPLT_LEN 0
+#define QLGE_SB_PAD 0
+#else
+#define SMALL_BUFFER_SIZE 512
+#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
+#define SPLT_SETTING FSC_SH
+#define SPLT_LEN (SPLT_HDR_EP | \
+ min(SMALL_BUF_MAP_SIZE, 1023))
+#define QLGE_SB_PAD 32
+#endif
+
+/*
+ * CAM output format.
+ */
+enum {
+ CAM_OUT_ROUTE_FC = 0,
+ CAM_OUT_ROUTE_NIC = 1,
+ CAM_OUT_FUNC_SHIFT = 2,
+ CAM_OUT_RV = (1 << 4),
+ CAM_OUT_SH = (1 << 15),
+ CAM_OUT_CQ_ID_SHIFT = 5,
+};
+
+/*
+ * Mailbox definitions
+ */
+enum {
+ /* Asynchronous Event Notifications */
+ AEN_SYS_ERR = 0x00008002,
+ AEN_LINK_UP = 0x00008011,
+ AEN_LINK_DOWN = 0x00008012,
+ AEN_IDC_CMPLT = 0x00008100,
+ AEN_IDC_REQ = 0x00008101,
+ AEN_IDC_EXT = 0x00008102,
+ AEN_DCBX_CHG = 0x00008110,
+ AEN_AEN_LOST = 0x00008120,
+ AEN_AEN_SFP_IN = 0x00008130,
+ AEN_AEN_SFP_OUT = 0x00008131,
+ AEN_FW_INIT_DONE = 0x00008400,
+ AEN_FW_INIT_FAIL = 0x00008401,
+
+ /* Mailbox Command Opcodes. */
+ MB_CMD_NOP = 0x00000000,
+ MB_CMD_EX_FW = 0x00000002,
+ MB_CMD_MB_TEST = 0x00000006,
+ MB_CMD_CSUM_TEST = 0x00000007, /* Verify Checksum */
+ MB_CMD_ABOUT_FW = 0x00000008,
+ MB_CMD_COPY_RISC_RAM = 0x0000000a,
+ MB_CMD_LOAD_RISC_RAM = 0x0000000b,
+ MB_CMD_DUMP_RISC_RAM = 0x0000000c,
+ MB_CMD_WRITE_RAM = 0x0000000d,
+ MB_CMD_INIT_RISC_RAM = 0x0000000e,
+ MB_CMD_READ_RAM = 0x0000000f,
+ MB_CMD_STOP_FW = 0x00000014,
+ MB_CMD_MAKE_SYS_ERR = 0x0000002a,
+ MB_CMD_WRITE_SFP = 0x00000030,
+ MB_CMD_READ_SFP = 0x00000031,
+ MB_CMD_INIT_FW = 0x00000060,
+ MB_CMD_GET_IFCB = 0x00000061,
+ MB_CMD_GET_FW_STATE = 0x00000069,
+ MB_CMD_IDC_REQ = 0x00000100, /* Inter-Driver Communication */
+ MB_CMD_IDC_ACK = 0x00000101, /* Inter-Driver Communication */
+ MB_CMD_SET_WOL_MODE = 0x00000110, /* Wake On Lan */
+ MB_WOL_DISABLE = 0,
+ MB_WOL_MAGIC_PKT = (1 << 1),
+ MB_WOL_FLTR = (1 << 2),
+ MB_WOL_UCAST = (1 << 3),
+ MB_WOL_MCAST = (1 << 4),
+ MB_WOL_BCAST = (1 << 5),
+ MB_WOL_LINK_UP = (1 << 6),
+ MB_WOL_LINK_DOWN = (1 << 7),
+ MB_WOL_MODE_ON = (1 << 16), /* Wake on Lan Mode on */
+ MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */
+ MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */
+ MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */
+ MB_CMD_CLEAR_WOL_MAGIC = 0x00000114,/* Wake On Lan Magic Packet */
+ MB_CMD_SET_WOL_IMMED = 0x00000115,
+ MB_CMD_PORT_RESET = 0x00000120,
+ MB_CMD_SET_PORT_CFG = 0x00000122,
+ MB_CMD_GET_PORT_CFG = 0x00000123,
+ MB_CMD_GET_LINK_STS = 0x00000124,
+ MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */
+ QL_LED_BLINK = 0x03e803e8,
+ MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */
+ MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */
+ MB_SET_MPI_TFK_STOP = (1 << 0),
+ MB_SET_MPI_TFK_RESUME = (1 << 1),
+ MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */
+ MB_GET_MPI_TFK_STOPPED = (1 << 0),
+ MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1),
+ /* Sub-commands for IDC request.
+ * This describes the reason for the
+ * IDC request.
+ */
+ MB_CMD_IOP_NONE = 0x0000,
+ MB_CMD_IOP_PREP_UPDATE_MPI = 0x0001,
+ MB_CMD_IOP_COMP_UPDATE_MPI = 0x0002,
+ MB_CMD_IOP_PREP_LINK_DOWN = 0x0010,
+ MB_CMD_IOP_DVR_START = 0x0100,
+ MB_CMD_IOP_FLASH_ACC = 0x0101,
+ MB_CMD_IOP_RESTART_MPI = 0x0102,
+ MB_CMD_IOP_CORE_DUMP_MPI = 0x0103,
+
+ /* Mailbox Command Status. */
+ MB_CMD_STS_GOOD = 0x00004000, /* Success. */
+ MB_CMD_STS_INTRMDT = 0x00001000, /* Intermediate Complete. */
+ MB_CMD_STS_INVLD_CMD = 0x00004001, /* Invalid. */
+ MB_CMD_STS_XFC_ERR = 0x00004002, /* Interface Error. */
+ MB_CMD_STS_CSUM_ERR = 0x00004003, /* Csum Error. */
+ MB_CMD_STS_ERR = 0x00004005, /* System Error. */
+ MB_CMD_STS_PARAM_ERR = 0x00004006, /* Parameter Error. */
+};
+
+struct mbox_params {
+ u32 mbox_in[MAILBOX_COUNT];
+ u32 mbox_out[MAILBOX_COUNT];
+ int in_count;
+ int out_count;
+};
+
+struct flash_params_8012 {
+ u8 dev_id_str[4];
+ __le16 size;
+ __le16 csum;
+ __le16 ver;
+ __le16 sub_dev_id;
+ u8 mac_addr[6];
+ __le16 res;
+};
+
+/* 8000 device's flash is a different structure
+ * at a different offset in flash.
+ */
+#define FUNC0_FLASH_OFFSET 0x140200
+#define FUNC1_FLASH_OFFSET 0x140600
+
+/* Flash related data structures. */
+struct flash_params_8000 {
+ u8 dev_id_str[4]; /* "8000" */
+ __le16 ver;
+ __le16 size;
+ __le16 csum;
+ __le16 reserved0;
+ __le16 total_size;
+ __le16 entry_count;
+ u8 data_type0;
+ u8 data_size0;
+ u8 mac_addr[6];
+ u8 data_type1;
+ u8 data_size1;
+ u8 mac_addr1[6];
+ u8 data_type2;
+ u8 data_size2;
+ __le16 vlan_id;
+ u8 data_type3;
+ u8 data_size3;
+ __le16 last;
+ u8 reserved1[464];
+ __le16 subsys_ven_id;
+ __le16 subsys_dev_id;
+ u8 reserved2[4];
+};
+
+union flash_params {
+ struct flash_params_8012 flash_params_8012;
+ struct flash_params_8000 flash_params_8000;
+};
+
+/*
+ * doorbell space for the rx ring context
+ */
+struct rx_doorbell_context {
+ u32 cnsmr_idx; /* 0x00 */
+ u32 valid; /* 0x04 */
+ u32 reserved[4]; /* 0x08-0x14 */
+ u32 lbq_prod_idx; /* 0x18 */
+ u32 sbq_prod_idx; /* 0x1c */
+};
+
+/*
+ * doorbell space for the tx ring context
+ */
+struct tx_doorbell_context {
+ u32 prod_idx; /* 0x00 */
+ u32 valid; /* 0x04 */
+ u32 reserved[4]; /* 0x08-0x14 */
+ u32 lbq_prod_idx; /* 0x18 */
+ u32 sbq_prod_idx; /* 0x1c */
+};
+
+/* DATA STRUCTURES SHARED WITH HARDWARE. */
+struct tx_buf_desc {
+ __le64 addr;
+ __le32 len;
+#define TX_DESC_LEN_MASK 0x000fffff
+#define TX_DESC_C 0x40000000
+#define TX_DESC_E 0x80000000
+} __packed;
+
+/*
+ * IOCB Definitions...
+ */
+
+#define OPCODE_OB_MAC_IOCB 0x01
+#define OPCODE_OB_MAC_TSO_IOCB 0x02
+#define OPCODE_IB_MAC_IOCB 0x20
+#define OPCODE_IB_MPI_IOCB 0x21
+#define OPCODE_IB_AE_IOCB 0x3f
+
+struct ob_mac_iocb_req {
+ u8 opcode;
+ u8 flags1;
+#define OB_MAC_IOCB_REQ_OI 0x01
+#define OB_MAC_IOCB_REQ_I 0x02
+#define OB_MAC_IOCB_REQ_D 0x08
+#define OB_MAC_IOCB_REQ_F 0x10
+ u8 flags2;
+ u8 flags3;
+#define OB_MAC_IOCB_DFP 0x02
+#define OB_MAC_IOCB_V 0x04
+ __le32 reserved1[2];
+ __le16 frame_len;
+#define OB_MAC_IOCB_LEN_MASK 0x3ffff
+ __le16 reserved2;
+ u32 tid;
+ u32 txq_idx;
+ __le32 reserved3;
+ __le16 vlan_tci;
+ __le16 reserved4;
+ struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
+} __packed;
+
+struct ob_mac_iocb_rsp {
+ u8 opcode; /* */
+ u8 flags1; /* */
+#define OB_MAC_IOCB_RSP_OI 0x01 /* */
+#define OB_MAC_IOCB_RSP_I 0x02 /* */
+#define OB_MAC_IOCB_RSP_E 0x08 /* */
+#define OB_MAC_IOCB_RSP_S 0x10 /* too Short */
+#define OB_MAC_IOCB_RSP_L 0x20 /* too Large */
+#define OB_MAC_IOCB_RSP_P 0x40 /* Padded */
+ u8 flags2; /* */
+ u8 flags3; /* */
+#define OB_MAC_IOCB_RSP_B 0x80 /* */
+ u32 tid;
+ u32 txq_idx;
+ __le32 reserved[13];
+} __packed;
+
+struct ob_mac_tso_iocb_req {
+ u8 opcode;
+ u8 flags1;
+#define OB_MAC_TSO_IOCB_OI 0x01
+#define OB_MAC_TSO_IOCB_I 0x02
+#define OB_MAC_TSO_IOCB_D 0x08
+#define OB_MAC_TSO_IOCB_IP4 0x40
+#define OB_MAC_TSO_IOCB_IP6 0x80
+ u8 flags2;
+#define OB_MAC_TSO_IOCB_LSO 0x20
+#define OB_MAC_TSO_IOCB_UC 0x40
+#define OB_MAC_TSO_IOCB_TC 0x80
+ u8 flags3;
+#define OB_MAC_TSO_IOCB_IC 0x01
+#define OB_MAC_TSO_IOCB_DFP 0x02
+#define OB_MAC_TSO_IOCB_V 0x04
+ __le32 reserved1[2];
+ __le32 frame_len;
+ u32 tid;
+ u32 txq_idx;
+ __le16 total_hdrs_len;
+ __le16 net_trans_offset;
+#define OB_MAC_TRANSPORT_HDR_SHIFT 6
+ __le16 vlan_tci;
+ __le16 mss;
+ struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
+} __packed;
+
+struct ob_mac_tso_iocb_rsp {
+ u8 opcode;
+ u8 flags1;
+#define OB_MAC_TSO_IOCB_RSP_OI 0x01
+#define OB_MAC_TSO_IOCB_RSP_I 0x02
+#define OB_MAC_TSO_IOCB_RSP_E 0x08
+#define OB_MAC_TSO_IOCB_RSP_S 0x10
+#define OB_MAC_TSO_IOCB_RSP_L 0x20
+#define OB_MAC_TSO_IOCB_RSP_P 0x40
+ u8 flags2; /* */
+ u8 flags3; /* */
+#define OB_MAC_TSO_IOCB_RSP_B 0x8000
+ u32 tid;
+ u32 txq_idx;
+ __le32 reserved2[13];
+} __packed;
+
+struct ib_mac_iocb_rsp {
+ u8 opcode; /* 0x20 */
+ u8 flags1;
+#define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */
+#define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */
+#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */
+#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */
+#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */
+#define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */
+#define IB_MAC_IOCB_RSP_M_MASK 0x60 /* Multicast info */
+#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* Not mcast frame */
+#define IB_MAC_IOCB_RSP_M_HASH 0x20 /* HASH mcast frame */
+#define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */
+#define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */
+#define IB_MAC_IOCB_RSP_B 0x80 /* Broadcast frame */
+ u8 flags2;
+#define IB_MAC_IOCB_RSP_P 0x01 /* Promiscuous frame */
+#define IB_MAC_IOCB_RSP_V 0x02 /* Vlan tag present */
+#define IB_MAC_IOCB_RSP_ERR_MASK 0x1c /* */
+#define IB_MAC_IOCB_RSP_ERR_CODE_ERR 0x04
+#define IB_MAC_IOCB_RSP_ERR_OVERSIZE 0x08
+#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE 0x10
+#define IB_MAC_IOCB_RSP_ERR_PREAMBLE 0x14
+#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN 0x18
+#define IB_MAC_IOCB_RSP_ERR_CRC 0x1c
+#define IB_MAC_IOCB_RSP_U 0x20 /* UDP packet */
+#define IB_MAC_IOCB_RSP_T 0x40 /* TCP packet */
+#define IB_MAC_IOCB_RSP_FO 0x80 /* Failover port */
+ u8 flags3;
+#define IB_MAC_IOCB_RSP_RSS_MASK 0x07 /* RSS mask */
+#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */
+#define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */
+#define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */
+#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */
+#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */
+#define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */
+#define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */
+#define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */
+#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */
+#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */
+ __le32 data_len; /* */
+ __le64 data_addr; /* */
+ __le32 rss; /* */
+ __le16 vlan_id; /* 12 bits */
+#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */
+#define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */
+#define IB_MAC_IOCB_RSP_VLAN_MASK 0x0ffff
+
+ __le16 reserved1;
+ __le32 reserved2[6];
+ u8 reserved3[3];
+ u8 flags4;
+#define IB_MAC_IOCB_RSP_HV 0x20
+#define IB_MAC_IOCB_RSP_HS 0x40
+#define IB_MAC_IOCB_RSP_HL 0x80
+ __le32 hdr_len; /* */
+ __le64 hdr_addr; /* */
+} __packed;
+
+struct ib_ae_iocb_rsp {
+ u8 opcode;
+ u8 flags1;
+#define IB_AE_IOCB_RSP_OI 0x01
+#define IB_AE_IOCB_RSP_I 0x02
+ u8 event;
+#define LINK_UP_EVENT 0x00
+#define LINK_DOWN_EVENT 0x01
+#define CAM_LOOKUP_ERR_EVENT 0x06
+#define SOFT_ECC_ERROR_EVENT 0x07
+#define MGMT_ERR_EVENT 0x08
+#define TEN_GIG_MAC_EVENT 0x09
+#define GPI0_H2L_EVENT 0x10
+#define GPI0_L2H_EVENT 0x20
+#define GPI1_H2L_EVENT 0x11
+#define GPI1_L2H_EVENT 0x21
+#define PCI_ERR_ANON_BUF_RD 0x40
+ u8 q_id;
+ __le32 reserved[15];
+} __packed;
+
+/*
+ * These three structures are for generic
+ * handling of ib and ob iocbs.
+ */
+struct ql_net_rsp_iocb {
+ u8 opcode;
+ u8 flags0;
+ __le16 length;
+ __le32 tid;
+ __le32 reserved[14];
+} __packed;
+
+struct net_req_iocb {
+ u8 opcode;
+ u8 flags0;
+ __le16 flags1;
+ __le32 tid;
+ __le32 reserved1[30];
+} __packed;
+
+/*
+ * tx ring initialization control block for chip.
+ * It is defined as:
+ * "Work Queue Initialization Control Block"
+ */
+struct wqicb {
+ __le16 len;
+#define Q_LEN_V (1 << 4)
+#define Q_LEN_CPP_CONT 0x0000
+#define Q_LEN_CPP_16 0x0001
+#define Q_LEN_CPP_32 0x0002
+#define Q_LEN_CPP_64 0x0003
+#define Q_LEN_CPP_512 0x0006
+ __le16 flags;
+#define Q_PRI_SHIFT 1
+#define Q_FLAGS_LC 0x1000
+#define Q_FLAGS_LB 0x2000
+#define Q_FLAGS_LI 0x4000
+#define Q_FLAGS_LO 0x8000
+ __le16 cq_id_rss;
+#define Q_CQ_ID_RSS_RV 0x8000
+ __le16 rid;
+ __le64 addr;
+ __le64 cnsmr_idx_addr;
+} __packed;
+
+/*
+ * rx ring initialization control block for chip.
+ * It is defined as:
+ * "Completion Queue Initialization Control Block"
+ */
+struct cqicb {
+ u8 msix_vect;
+ u8 reserved1;
+ u8 reserved2;
+ u8 flags;
+#define FLAGS_LV 0x08
+#define FLAGS_LS 0x10
+#define FLAGS_LL 0x20
+#define FLAGS_LI 0x40
+#define FLAGS_LC 0x80
+ __le16 len;
+#define LEN_V (1 << 4)
+#define LEN_CPP_CONT 0x0000
+#define LEN_CPP_32 0x0001
+#define LEN_CPP_64 0x0002
+#define LEN_CPP_128 0x0003
+ __le16 rid;
+ __le64 addr;
+ __le64 prod_idx_addr;
+ __le16 pkt_delay;
+ __le16 irq_delay;
+ __le64 lbq_addr;
+ __le16 lbq_buf_size;
+ __le16 lbq_len; /* entry count */
+ __le64 sbq_addr;
+ __le16 sbq_buf_size;
+ __le16 sbq_len; /* entry count */
+} __packed;
+
+struct ricb {
+ u8 base_cq;
+#define RSS_L4K 0x80
+ u8 flags;
+#define RSS_L6K 0x01
+#define RSS_LI 0x02
+#define RSS_LB 0x04
+#define RSS_LM 0x08
+#define RSS_RI4 0x10
+#define RSS_RT4 0x20
+#define RSS_RI6 0x40
+#define RSS_RT6 0x80
+ __le16 mask;
+ u8 hash_cq_id[1024];
+ __le32 ipv6_hash_key[10];
+ __le32 ipv4_hash_key[4];
+} __packed;
+
+/* SOFTWARE/DRIVER DATA STRUCTURES. */
+
+struct oal {
+ struct tx_buf_desc oal[TX_DESC_PER_OAL];
+};
+
+struct map_list {
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
+};
+
+struct tx_ring_desc {
+ struct sk_buff *skb;
+ struct ob_mac_iocb_req *queue_entry;
+ u32 index;
+ struct oal oal;
+ struct map_list map[MAX_SKB_FRAGS + 1];
+ int map_cnt;
+ struct tx_ring_desc *next;
+};
+
+struct page_chunk {
+ struct page *page; /* master page */
+ char *va; /* virt addr for this chunk */
+ u64 map; /* mapping for master */
+ unsigned int offset; /* offset for this chunk */
+ unsigned int last_flag; /* flag set for last chunk in page */
+};
+
+struct bq_desc {
+ union {
+ struct page_chunk pg_chunk;
+ struct sk_buff *skb;
+ } p;
+ __le64 *addr;
+ u32 index;
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
+};
+
+#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
+
+struct tx_ring {
+ /*
+ * queue info.
+ */
+ struct wqicb wqicb; /* structure used to inform chip of new queue */
+ void *wq_base; /* pci_alloc:virtual addr for tx */
+ dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */
+ __le32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */
+ dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */
+ u32 wq_size; /* size in bytes of queue area */
+ u32 wq_len; /* number of entries in queue */
+ void __iomem *prod_idx_db_reg; /* doorbell area index reg at offset 0x00 */
+ void __iomem *valid_db_reg; /* doorbell area valid reg at offset 0x04 */
+ u16 prod_idx; /* current value for prod idx */
+ u16 cq_id; /* completion (rx) queue for tx completions */
+ u8 wq_id; /* queue id for this entry */
+ u8 reserved1[3];
+ struct tx_ring_desc *q; /* descriptor list for the queue */
+ spinlock_t lock;
+ atomic_t tx_count; /* counts down for every outstanding IO */
+ atomic_t queue_stopped; /* Turns queue off when full. */
+ struct delayed_work tx_work;
+ struct ql_adapter *qdev;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_errors;
+};
+
+/*
+ * Type of inbound queue.
+ */
+enum {
+ DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */
+ TX_Q = 3, /* Handles outbound completions. */
+ RX_Q = 4, /* Handles inbound completions. */
+};
+
+struct rx_ring {
+ struct cqicb cqicb; /* The chip's completion queue init control block. */
+
+ /* Completion queue elements. */
+ void *cq_base;
+ dma_addr_t cq_base_dma;
+ u32 cq_size;
+ u32 cq_len;
+ u16 cq_id;
+ __le32 *prod_idx_sh_reg; /* Shadowed producer register. */
+ dma_addr_t prod_idx_sh_reg_dma;
+ void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */
+ u32 cnsmr_idx; /* current sw idx */
+ struct ql_net_rsp_iocb *curr_entry; /* next entry on queue */
+ void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */
+
+ /* Large buffer queue elements. */
+ u32 lbq_len; /* entry count */
+ u32 lbq_size; /* size in bytes of queue */
+ u32 lbq_buf_size;
+ void *lbq_base;
+ dma_addr_t lbq_base_dma;
+ void *lbq_base_indirect;
+ dma_addr_t lbq_base_indirect_dma;
+ struct page_chunk pg_chunk; /* current page for chunks */
+ struct bq_desc *lbq; /* array of control blocks */
+ void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */
+ u32 lbq_prod_idx; /* current sw prod idx */
+ u32 lbq_curr_idx; /* next entry we expect */
+ u32 lbq_clean_idx; /* beginning of new descs */
+ u32 lbq_free_cnt; /* free buffer desc cnt */
+
+ /* Small buffer queue elements. */
+ u32 sbq_len; /* entry count */
+ u32 sbq_size; /* size in bytes of queue */
+ u32 sbq_buf_size;
+ void *sbq_base;
+ dma_addr_t sbq_base_dma;
+ void *sbq_base_indirect;
+ dma_addr_t sbq_base_indirect_dma;
+ struct bq_desc *sbq; /* array of control blocks */
+ void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */
+ u32 sbq_prod_idx; /* current sw prod idx */
+ u32 sbq_curr_idx; /* next entry we expect */
+ u32 sbq_clean_idx; /* beginning of new descs */
+ u32 sbq_free_cnt; /* free buffer desc cnt */
+
+ /* Misc. handler elements. */
+ u32 type; /* Type of queue, tx, rx. */
+ u32 irq; /* Which vector this ring is assigned. */
+ u32 cpu; /* Which CPU this should run on. */
+ char name[IFNAMSIZ + 5];
+ struct napi_struct napi;
+ u8 reserved;
+ struct ql_adapter *qdev;
+ u64 rx_packets;
+ u64 rx_multicast;
+ u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_errors;
+};
+
+/*
+ * RSS Initialization Control Block
+ */
+struct hash_id {
+ u8 value[4];
+};
+
+struct nic_stats {
+ /*
+ * These stats come from offset 200h to 278h
+ * in the XGMAC register.
+ */
+ u64 tx_pkts;
+ u64 tx_bytes;
+ u64 tx_mcast_pkts;
+ u64 tx_bcast_pkts;
+ u64 tx_ucast_pkts;
+ u64 tx_ctl_pkts;
+ u64 tx_pause_pkts;
+ u64 tx_64_pkt;
+ u64 tx_65_to_127_pkt;
+ u64 tx_128_to_255_pkt;
+ u64 tx_256_511_pkt;
+ u64 tx_512_to_1023_pkt;
+ u64 tx_1024_to_1518_pkt;
+ u64 tx_1519_to_max_pkt;
+ u64 tx_undersize_pkt;
+ u64 tx_oversize_pkt;
+
+ /*
+ * These stats come from offset 300h to 3C8h
+ * in the XGMAC register.
+ */
+ u64 rx_bytes;
+ u64 rx_bytes_ok;
+ u64 rx_pkts;
+ u64 rx_pkts_ok;
+ u64 rx_bcast_pkts;
+ u64 rx_mcast_pkts;
+ u64 rx_ucast_pkts;
+ u64 rx_undersize_pkts;
+ u64 rx_oversize_pkts;
+ u64 rx_jabber_pkts;
+ u64 rx_undersize_fcerr_pkts;
+ u64 rx_drop_events;
+ u64 rx_fcerr_pkts;
+ u64 rx_align_err;
+ u64 rx_symbol_err;
+ u64 rx_mac_err;
+ u64 rx_ctl_pkts;
+ u64 rx_pause_pkts;
+ u64 rx_64_pkts;
+ u64 rx_65_to_127_pkts;
+ u64 rx_128_255_pkts;
+ u64 rx_256_511_pkts;
+ u64 rx_512_to_1023_pkts;
+ u64 rx_1024_to_1518_pkts;
+ u64 rx_1519_to_max_pkts;
+ u64 rx_len_err_pkts;
+ /*
+ * These stats come from offset 500h to 5C8h
+ * in the XGMAC register.
+ */
+ u64 tx_cbfc_pause_frames0;
+ u64 tx_cbfc_pause_frames1;
+ u64 tx_cbfc_pause_frames2;
+ u64 tx_cbfc_pause_frames3;
+ u64 tx_cbfc_pause_frames4;
+ u64 tx_cbfc_pause_frames5;
+ u64 tx_cbfc_pause_frames6;
+ u64 tx_cbfc_pause_frames7;
+ u64 rx_cbfc_pause_frames0;
+ u64 rx_cbfc_pause_frames1;
+ u64 rx_cbfc_pause_frames2;
+ u64 rx_cbfc_pause_frames3;
+ u64 rx_cbfc_pause_frames4;
+ u64 rx_cbfc_pause_frames5;
+ u64 rx_cbfc_pause_frames6;
+ u64 rx_cbfc_pause_frames7;
+ u64 rx_nic_fifo_drop;
+};
+
+/* Firmware coredump internal register address/length pairs. */
+enum {
+ MPI_CORE_REGS_ADDR = 0x00030000,
+ MPI_CORE_REGS_CNT = 127,
+ MPI_CORE_SH_REGS_CNT = 16,
+ TEST_REGS_ADDR = 0x00001000,
+ TEST_REGS_CNT = 23,
+ RMII_REGS_ADDR = 0x00001040,
+ RMII_REGS_CNT = 64,
+ FCMAC1_REGS_ADDR = 0x00001080,
+ FCMAC2_REGS_ADDR = 0x000010c0,
+ FCMAC_REGS_CNT = 64,
+ FC1_MBX_REGS_ADDR = 0x00001100,
+ FC2_MBX_REGS_ADDR = 0x00001240,
+ FC_MBX_REGS_CNT = 64,
+ IDE_REGS_ADDR = 0x00001140,
+ IDE_REGS_CNT = 64,
+ NIC1_MBX_REGS_ADDR = 0x00001180,
+ NIC2_MBX_REGS_ADDR = 0x00001280,
+ NIC_MBX_REGS_CNT = 64,
+ SMBUS_REGS_ADDR = 0x00001200,
+ SMBUS_REGS_CNT = 64,
+ I2C_REGS_ADDR = 0x00001fc0,
+ I2C_REGS_CNT = 64,
+ MEMC_REGS_ADDR = 0x00003000,
+ MEMC_REGS_CNT = 256,
+ PBUS_REGS_ADDR = 0x00007c00,
+ PBUS_REGS_CNT = 256,
+ MDE_REGS_ADDR = 0x00010000,
+ MDE_REGS_CNT = 6,
+ CODE_RAM_ADDR = 0x00020000,
+ CODE_RAM_CNT = 0x2000,
+ MEMC_RAM_ADDR = 0x00100000,
+ MEMC_RAM_CNT = 0x2000,
+};
+
+#define MPI_COREDUMP_COOKIE 0x5555aaaa
+struct mpi_coredump_global_header {
+ u32 cookie;
+ u8 idString[16];
+ u32 timeLo;
+ u32 timeHi;
+ u32 imageSize;
+ u32 headerSize;
+ u8 info[220];
+};
+
+struct mpi_coredump_segment_header {
+ u32 cookie;
+ u32 segNum;
+ u32 segSize;
+ u32 extra;
+ u8 description[16];
+};
+
+/* Firmware coredump header segment numbers. */
+enum {
+ CORE_SEG_NUM = 1,
+ TEST_LOGIC_SEG_NUM = 2,
+ RMII_SEG_NUM = 3,
+ FCMAC1_SEG_NUM = 4,
+ FCMAC2_SEG_NUM = 5,
+ FC1_MBOX_SEG_NUM = 6,
+ IDE_SEG_NUM = 7,
+ NIC1_MBOX_SEG_NUM = 8,
+ SMBUS_SEG_NUM = 9,
+ FC2_MBOX_SEG_NUM = 10,
+ NIC2_MBOX_SEG_NUM = 11,
+ I2C_SEG_NUM = 12,
+ MEMC_SEG_NUM = 13,
+ PBUS_SEG_NUM = 14,
+ MDE_SEG_NUM = 15,
+ NIC1_CONTROL_SEG_NUM = 16,
+ NIC2_CONTROL_SEG_NUM = 17,
+ NIC1_XGMAC_SEG_NUM = 18,
+ NIC2_XGMAC_SEG_NUM = 19,
+ WCS_RAM_SEG_NUM = 20,
+ MEMC_RAM_SEG_NUM = 21,
+ XAUI_AN_SEG_NUM = 22,
+ XAUI_HSS_PCS_SEG_NUM = 23,
+ XFI_AN_SEG_NUM = 24,
+ XFI_TRAIN_SEG_NUM = 25,
+ XFI_HSS_PCS_SEG_NUM = 26,
+ XFI_HSS_TX_SEG_NUM = 27,
+ XFI_HSS_RX_SEG_NUM = 28,
+ XFI_HSS_PLL_SEG_NUM = 29,
+ MISC_NIC_INFO_SEG_NUM = 30,
+ INTR_STATES_SEG_NUM = 31,
+ CAM_ENTRIES_SEG_NUM = 32,
+ ROUTING_WORDS_SEG_NUM = 33,
+ ETS_SEG_NUM = 34,
+ PROBE_DUMP_SEG_NUM = 35,
+ ROUTING_INDEX_SEG_NUM = 36,
+ MAC_PROTOCOL_SEG_NUM = 37,
+ XAUI2_AN_SEG_NUM = 38,
+ XAUI2_HSS_PCS_SEG_NUM = 39,
+ XFI2_AN_SEG_NUM = 40,
+ XFI2_TRAIN_SEG_NUM = 41,
+ XFI2_HSS_PCS_SEG_NUM = 42,
+ XFI2_HSS_TX_SEG_NUM = 43,
+ XFI2_HSS_RX_SEG_NUM = 44,
+ XFI2_HSS_PLL_SEG_NUM = 45,
+ SEM_REGS_SEG_NUM = 50
+
+};
+
+/* There are 64 generic NIC registers. */
+#define NIC_REGS_DUMP_WORD_COUNT 64
+/* XGMAC word count. */
+#define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4)
+/* Word counts for the SERDES blocks. */
+#define XG_SERDES_XAUI_AN_COUNT 14
+#define XG_SERDES_XAUI_HSS_PCS_COUNT 33
+#define XG_SERDES_XFI_AN_COUNT 14
+#define XG_SERDES_XFI_TRAIN_COUNT 12
+#define XG_SERDES_XFI_HSS_PCS_COUNT 15
+#define XG_SERDES_XFI_HSS_TX_COUNT 32
+#define XG_SERDES_XFI_HSS_RX_COUNT 32
+#define XG_SERDES_XFI_HSS_PLL_COUNT 32
+
+/* There are 2 CNA ETS and 8 NIC ETS registers. */
+#define ETS_REGS_DUMP_WORD_COUNT 10
+
+/* Each probe mux entry stores the probe type plus 64 entries
+ * that are each each 64-bits in length. There are a total of
+ * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes.
+ */
+#define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2))
+#define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \
+ PRB_MX_ADDR_VALID_TOTAL)
+/* Each routing entry consists of 4 32-bit words.
+ * They are route type, index, index word, and result.
+ * There are 2 route blocks with 8 entries each and
+ * 2 NIC blocks with 16 entries each.
+ * The totol entries is 48 with 4 words each.
+ */
+#define RT_IDX_DUMP_ENTRIES 48
+#define RT_IDX_DUMP_WORDS_PER_ENTRY 4
+#define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \
+ RT_IDX_DUMP_WORDS_PER_ENTRY)
+/* There are 10 address blocks in filter, each with
+ * different entry counts and different word-count-per-entry.
+ */
+#define MAC_ADDR_DUMP_ENTRIES \
+ ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \
+ (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \
+ (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \
+ (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \
+ (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT))
+#define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2
+#define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \
+ MAC_ADDR_DUMP_WORDS_PER_ENTRY)
+/* Maximum of 4 functions whose semaphore registeres are
+ * in the coredump.
+ */
+#define MAX_SEMAPHORE_FUNCTIONS 4
+/* Defines for access the MPI shadow registers. */
+#define RISC_124 0x0003007c
+#define RISC_127 0x0003007f
+#define SHADOW_OFFSET 0xb0000000
+#define SHADOW_REG_SHIFT 20
+
+struct ql_nic_misc {
+ u32 rx_ring_count;
+ u32 tx_ring_count;
+ u32 intr_count;
+ u32 function;
+};
+
+struct ql_reg_dump {
+
+ /* segment 0 */
+ struct mpi_coredump_global_header mpi_global_header;
+
+ /* segment 16 */
+ struct mpi_coredump_segment_header nic_regs_seg_hdr;
+ u32 nic_regs[64];
+
+ /* segment 30 */
+ struct mpi_coredump_segment_header misc_nic_seg_hdr;
+ struct ql_nic_misc misc_nic_info;
+
+ /* segment 31 */
+ /* one interrupt state for each CQ */
+ struct mpi_coredump_segment_header intr_states_seg_hdr;
+ u32 intr_states[MAX_CPUS];
+
+ /* segment 32 */
+ /* 3 cam words each for 16 unicast,
+ * 2 cam words for each of 32 multicast.
+ */
+ struct mpi_coredump_segment_header cam_entries_seg_hdr;
+ u32 cam_entries[(16 * 3) + (32 * 3)];
+
+ /* segment 33 */
+ struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
+ u32 nic_routing_words[16];
+
+ /* segment 34 */
+ struct mpi_coredump_segment_header ets_seg_hdr;
+ u32 ets[8+2];
+};
+
+struct ql_mpi_coredump {
+ /* segment 0 */
+ struct mpi_coredump_global_header mpi_global_header;
+
+ /* segment 1 */
+ struct mpi_coredump_segment_header core_regs_seg_hdr;
+ u32 mpi_core_regs[MPI_CORE_REGS_CNT];
+ u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT];
+
+ /* segment 2 */
+ struct mpi_coredump_segment_header test_logic_regs_seg_hdr;
+ u32 test_logic_regs[TEST_REGS_CNT];
+
+ /* segment 3 */
+ struct mpi_coredump_segment_header rmii_regs_seg_hdr;
+ u32 rmii_regs[RMII_REGS_CNT];
+
+ /* segment 4 */
+ struct mpi_coredump_segment_header fcmac1_regs_seg_hdr;
+ u32 fcmac1_regs[FCMAC_REGS_CNT];
+
+ /* segment 5 */
+ struct mpi_coredump_segment_header fcmac2_regs_seg_hdr;
+ u32 fcmac2_regs[FCMAC_REGS_CNT];
+
+ /* segment 6 */
+ struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr;
+ u32 fc1_mbx_regs[FC_MBX_REGS_CNT];
+
+ /* segment 7 */
+ struct mpi_coredump_segment_header ide_regs_seg_hdr;
+ u32 ide_regs[IDE_REGS_CNT];
+
+ /* segment 8 */
+ struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr;
+ u32 nic1_mbx_regs[NIC_MBX_REGS_CNT];
+
+ /* segment 9 */
+ struct mpi_coredump_segment_header smbus_regs_seg_hdr;
+ u32 smbus_regs[SMBUS_REGS_CNT];
+
+ /* segment 10 */
+ struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr;
+ u32 fc2_mbx_regs[FC_MBX_REGS_CNT];
+
+ /* segment 11 */
+ struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr;
+ u32 nic2_mbx_regs[NIC_MBX_REGS_CNT];
+
+ /* segment 12 */
+ struct mpi_coredump_segment_header i2c_regs_seg_hdr;
+ u32 i2c_regs[I2C_REGS_CNT];
+ /* segment 13 */
+ struct mpi_coredump_segment_header memc_regs_seg_hdr;
+ u32 memc_regs[MEMC_REGS_CNT];
+
+ /* segment 14 */
+ struct mpi_coredump_segment_header pbus_regs_seg_hdr;
+ u32 pbus_regs[PBUS_REGS_CNT];
+
+ /* segment 15 */
+ struct mpi_coredump_segment_header mde_regs_seg_hdr;
+ u32 mde_regs[MDE_REGS_CNT];
+
+ /* segment 16 */
+ struct mpi_coredump_segment_header nic_regs_seg_hdr;
+ u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT];
+
+ /* segment 17 */
+ struct mpi_coredump_segment_header nic2_regs_seg_hdr;
+ u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT];
+
+ /* segment 18 */
+ struct mpi_coredump_segment_header xgmac1_seg_hdr;
+ u32 xgmac1[XGMAC_DUMP_WORD_COUNT];
+
+ /* segment 19 */
+ struct mpi_coredump_segment_header xgmac2_seg_hdr;
+ u32 xgmac2[XGMAC_DUMP_WORD_COUNT];
+
+ /* segment 20 */
+ struct mpi_coredump_segment_header code_ram_seg_hdr;
+ u32 code_ram[CODE_RAM_CNT];
+
+ /* segment 21 */
+ struct mpi_coredump_segment_header memc_ram_seg_hdr;
+ u32 memc_ram[MEMC_RAM_CNT];
+
+ /* segment 22 */
+ struct mpi_coredump_segment_header xaui_an_hdr;
+ u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT];
+
+ /* segment 23 */
+ struct mpi_coredump_segment_header xaui_hss_pcs_hdr;
+ u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
+
+ /* segment 24 */
+ struct mpi_coredump_segment_header xfi_an_hdr;
+ u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT];
+
+ /* segment 25 */
+ struct mpi_coredump_segment_header xfi_train_hdr;
+ u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
+
+ /* segment 26 */
+ struct mpi_coredump_segment_header xfi_hss_pcs_hdr;
+ u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
+
+ /* segment 27 */
+ struct mpi_coredump_segment_header xfi_hss_tx_hdr;
+ u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
+
+ /* segment 28 */
+ struct mpi_coredump_segment_header xfi_hss_rx_hdr;
+ u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
+
+ /* segment 29 */
+ struct mpi_coredump_segment_header xfi_hss_pll_hdr;
+ u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
+
+ /* segment 30 */
+ struct mpi_coredump_segment_header misc_nic_seg_hdr;
+ struct ql_nic_misc misc_nic_info;
+
+ /* segment 31 */
+ /* one interrupt state for each CQ */
+ struct mpi_coredump_segment_header intr_states_seg_hdr;
+ u32 intr_states[MAX_RX_RINGS];
+
+ /* segment 32 */
+ /* 3 cam words each for 16 unicast,
+ * 2 cam words for each of 32 multicast.
+ */
+ struct mpi_coredump_segment_header cam_entries_seg_hdr;
+ u32 cam_entries[(16 * 3) + (32 * 3)];
+
+ /* segment 33 */
+ struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
+ u32 nic_routing_words[16];
+ /* segment 34 */
+ struct mpi_coredump_segment_header ets_seg_hdr;
+ u32 ets[ETS_REGS_DUMP_WORD_COUNT];
+
+ /* segment 35 */
+ struct mpi_coredump_segment_header probe_dump_seg_hdr;
+ u32 probe_dump[PRB_MX_DUMP_TOT_COUNT];
+
+ /* segment 36 */
+ struct mpi_coredump_segment_header routing_reg_seg_hdr;
+ u32 routing_regs[RT_IDX_DUMP_TOT_WORDS];
+
+ /* segment 37 */
+ struct mpi_coredump_segment_header mac_prot_reg_seg_hdr;
+ u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS];
+
+ /* segment 38 */
+ struct mpi_coredump_segment_header xaui2_an_hdr;
+ u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT];
+
+ /* segment 39 */
+ struct mpi_coredump_segment_header xaui2_hss_pcs_hdr;
+ u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
+
+ /* segment 40 */
+ struct mpi_coredump_segment_header xfi2_an_hdr;
+ u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT];
+
+ /* segment 41 */
+ struct mpi_coredump_segment_header xfi2_train_hdr;
+ u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
+
+ /* segment 42 */
+ struct mpi_coredump_segment_header xfi2_hss_pcs_hdr;
+ u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
+
+ /* segment 43 */
+ struct mpi_coredump_segment_header xfi2_hss_tx_hdr;
+ u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
+
+ /* segment 44 */
+ struct mpi_coredump_segment_header xfi2_hss_rx_hdr;
+ u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
+
+ /* segment 45 */
+ struct mpi_coredump_segment_header xfi2_hss_pll_hdr;
+ u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
+
+ /* segment 50 */
+ /* semaphore register for all 5 functions */
+ struct mpi_coredump_segment_header sem_regs_seg_hdr;
+ u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS];
+};
+
+/*
+ * intr_context structure is used during initialization
+ * to hook the interrupts. It is also used in a single
+ * irq environment as a context to the ISR.
+ */
+struct intr_context {
+ struct ql_adapter *qdev;
+ u32 intr;
+ u32 irq_mask; /* Mask of which rings the vector services. */
+ u32 hooked;
+ u32 intr_en_mask; /* value/mask used to enable this intr */
+ u32 intr_dis_mask; /* value/mask used to disable this intr */
+ u32 intr_read_mask; /* value/mask used to read this intr */
+ char name[IFNAMSIZ * 2];
+ atomic_t irq_cnt; /* irq_cnt is used in single vector
+ * environment. It's incremented for each
+ * irq handler that is scheduled. When each
+ * handler finishes it decrements irq_cnt and
+ * enables interrupts if it's zero. */
+ irq_handler_t handler;
+};
+
+/* adapter flags definitions. */
+enum {
+ QL_ADAPTER_UP = 0, /* Adapter has been brought up. */
+ QL_LEGACY_ENABLED = 1,
+ QL_MSI_ENABLED = 2,
+ QL_MSIX_ENABLED = 3,
+ QL_DMA64 = 4,
+ QL_PROMISCUOUS = 5,
+ QL_ALLMULTI = 6,
+ QL_PORT_CFG = 7,
+ QL_CAM_RT_SET = 8,
+ QL_SELFTEST = 9,
+ QL_LB_LINK_UP = 10,
+ QL_FRC_COREDUMP = 11,
+ QL_EEH_FATAL = 12,
+ QL_ASIC_RECOVERY = 14, /* We are in ascic recovery. */
+};
+
+/* link_status bit definitions */
+enum {
+ STS_LOOPBACK_MASK = 0x00000700,
+ STS_LOOPBACK_PCS = 0x00000100,
+ STS_LOOPBACK_HSS = 0x00000200,
+ STS_LOOPBACK_EXT = 0x00000300,
+ STS_PAUSE_MASK = 0x000000c0,
+ STS_PAUSE_STD = 0x00000040,
+ STS_PAUSE_PRI = 0x00000080,
+ STS_SPEED_MASK = 0x00000038,
+ STS_SPEED_100Mb = 0x00000000,
+ STS_SPEED_1Gb = 0x00000008,
+ STS_SPEED_10Gb = 0x00000010,
+ STS_LINK_TYPE_MASK = 0x00000007,
+ STS_LINK_TYPE_XFI = 0x00000001,
+ STS_LINK_TYPE_XAUI = 0x00000002,
+ STS_LINK_TYPE_XFI_BP = 0x00000003,
+ STS_LINK_TYPE_XAUI_BP = 0x00000004,
+ STS_LINK_TYPE_10GBASET = 0x00000005,
+};
+
+/* link_config bit definitions */
+enum {
+ CFG_JUMBO_FRAME_SIZE = 0x00010000,
+ CFG_PAUSE_MASK = 0x00000060,
+ CFG_PAUSE_STD = 0x00000020,
+ CFG_PAUSE_PRI = 0x00000040,
+ CFG_DCBX = 0x00000010,
+ CFG_LOOPBACK_MASK = 0x00000007,
+ CFG_LOOPBACK_PCS = 0x00000002,
+ CFG_LOOPBACK_HSS = 0x00000004,
+ CFG_LOOPBACK_EXT = 0x00000006,
+ CFG_DEFAULT_MAX_FRAME_SIZE = 0x00002580,
+};
+
+struct nic_operations {
+
+ int (*get_flash) (struct ql_adapter *);
+ int (*port_initialize) (struct ql_adapter *);
+};
+
+/*
+ * The main Adapter structure definition.
+ * This structure has all fields relevant to the hardware.
+ */
+struct ql_adapter {
+ struct ricb ricb;
+ unsigned long flags;
+ u32 wol;
+
+ struct nic_stats nic_stats;
+
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
+ /* PCI Configuration information for this device */
+ struct pci_dev *pdev;
+ struct net_device *ndev; /* Parent NET device */
+
+ /* Hardware information */
+ u32 chip_rev_id;
+ u32 fw_rev_id;
+ u32 func; /* PCI function for this adapter */
+ u32 alt_func; /* PCI function for alternate adapter */
+ u32 port; /* Port number this adapter */
+
+ spinlock_t adapter_lock;
+ spinlock_t hw_lock;
+ spinlock_t stats_lock;
+
+ /* PCI Bus Relative Register Addresses */
+ void __iomem *reg_base;
+ void __iomem *doorbell_area;
+ u32 doorbell_area_size;
+
+ u32 msg_enable;
+
+ /* Page for Shadow Registers */
+ void *rx_ring_shadow_reg_area;
+ dma_addr_t rx_ring_shadow_reg_dma;
+ void *tx_ring_shadow_reg_area;
+ dma_addr_t tx_ring_shadow_reg_dma;
+
+ u32 mailbox_in;
+ u32 mailbox_out;
+ struct mbox_params idc_mbc;
+ struct mutex mpi_mutex;
+
+ int tx_ring_size;
+ int rx_ring_size;
+ u32 intr_count;
+ struct msix_entry *msi_x_entry;
+ struct intr_context intr_context[MAX_RX_RINGS];
+
+ int tx_ring_count; /* One per online CPU. */
+ u32 rss_ring_count; /* One per irq vector. */
+ /*
+ * rx_ring_count =
+ * (CPU count * outbound completion rx_ring) +
+ * (irq_vector_cnt * inbound (RSS) completion rx_ring)
+ */
+ int rx_ring_count;
+ int ring_mem_size;
+ void *ring_mem;
+
+ struct rx_ring rx_ring[MAX_RX_RINGS];
+ struct tx_ring tx_ring[MAX_TX_RINGS];
+ unsigned int lbq_buf_order;
+
+ int rx_csum;
+ u32 default_rx_queue;
+
+ u16 rx_coalesce_usecs; /* cqicb->int_delay */
+ u16 rx_max_coalesced_frames; /* cqicb->pkt_int_delay */
+ u16 tx_coalesce_usecs; /* cqicb->int_delay */
+ u16 tx_max_coalesced_frames; /* cqicb->pkt_int_delay */
+
+ u32 xg_sem_mask;
+ u32 port_link_up;
+ u32 port_init;
+ u32 link_status;
+ struct ql_mpi_coredump *mpi_coredump;
+ u32 core_is_dumped;
+ u32 link_config;
+ u32 led_config;
+ u32 max_frame_size;
+
+ union flash_params flash;
+
+ struct workqueue_struct *workqueue;
+ struct delayed_work asic_reset_work;
+ struct delayed_work mpi_reset_work;
+ struct delayed_work mpi_work;
+ struct delayed_work mpi_port_cfg_work;
+ struct delayed_work mpi_idc_work;
+ struct delayed_work mpi_core_to_log;
+ struct completion ide_completion;
+ const struct nic_operations *nic_ops;
+ u16 device_id;
+ struct timer_list timer;
+ atomic_t lb_count;
+ /* Keep local copy of current mac address. */
+ char current_mac_addr[6];
+};
+
+/*
+ * Typical Register accessor for memory mapped device.
+ */
+static inline u32 ql_read32(const struct ql_adapter *qdev, int reg)
+{
+ return readl(qdev->reg_base + reg);
+}
+
+/*
+ * Typical Register accessor for memory mapped device.
+ */
+static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
+{
+ writel(val, qdev->reg_base + reg);
+}
+
+/*
+ * Doorbell Registers:
+ * Doorbell registers are virtual registers in the PCI memory space.
+ * The space is allocated by the chip during PCI initialization. The
+ * device driver finds the doorbell address in BAR 3 in PCI config space.
+ * The registers are used to control outbound and inbound queues. For
+ * example, the producer index for an outbound queue. Each queue uses
+ * 1 4k chunk of memory. The lower half of the space is for outbound
+ * queues. The upper half is for inbound queues.
+ */
+static inline void ql_write_db_reg(u32 val, void __iomem *addr)
+{
+ writel(val, addr);
+ mmiowb();
+}
+
+/*
+ * Shadow Registers:
+ * Outbound queues have a consumer index that is maintained by the chip.
+ * Inbound queues have a producer index that is maintained by the chip.
+ * For lower overhead, these registers are "shadowed" to host memory
+ * which allows the device driver to track the queue progress without
+ * PCI reads. When an entry is placed on an inbound queue, the chip will
+ * update the relevant index register and then copy the value to the
+ * shadow register in host memory.
+ */
+static inline u32 ql_read_sh_reg(__le32 *addr)
+{
+ u32 reg;
+ reg = le32_to_cpu(*addr);
+ rmb();
+ return reg;
+}
+
+extern char qlge_driver_name[];
+extern const char qlge_driver_version[];
+extern const struct ethtool_ops qlge_ethtool_ops;
+
+extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
+extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
+extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
+ u32 *value);
+extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
+extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
+ u16 q_id);
+void ql_queue_fw_error(struct ql_adapter *qdev);
+void ql_mpi_work(struct work_struct *work);
+void ql_mpi_reset_work(struct work_struct *work);
+void ql_mpi_core_to_log(struct work_struct *work);
+int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
+void ql_queue_asic_error(struct ql_adapter *qdev);
+u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
+void ql_set_ethtool_ops(struct net_device *ndev);
+int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
+void ql_mpi_idc_work(struct work_struct *work);
+void ql_mpi_port_cfg_work(struct work_struct *work);
+int ql_mb_get_fw_state(struct ql_adapter *qdev);
+int ql_cam_route_initialize(struct ql_adapter *qdev);
+int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
+int ql_unpause_mpi_risc(struct ql_adapter *qdev);
+int ql_pause_mpi_risc(struct ql_adapter *qdev);
+int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
+int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
+ u32 ram_addr, int word_count);
+int ql_core_dump(struct ql_adapter *qdev,
+ struct ql_mpi_coredump *mpi_coredump);
+int ql_mb_about_fw(struct ql_adapter *qdev);
+int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
+int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
+int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
+int ql_mb_get_led_cfg(struct ql_adapter *qdev);
+void ql_link_on(struct ql_adapter *qdev);
+void ql_link_off(struct ql_adapter *qdev);
+int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
+int ql_mb_get_port_cfg(struct ql_adapter *qdev);
+int ql_mb_set_port_cfg(struct ql_adapter *qdev);
+int ql_wait_fifo_empty(struct ql_adapter *qdev);
+void ql_get_dump(struct ql_adapter *qdev, void *buff);
+void ql_gen_reg_dump(struct ql_adapter *qdev,
+ struct ql_reg_dump *mpi_coredump);
+netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
+void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
+int ql_own_firmware(struct ql_adapter *qdev);
+int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
+
+/* #define QL_ALL_DUMP */
+/* #define QL_REG_DUMP */
+/* #define QL_DEV_DUMP */
+/* #define QL_CB_DUMP */
+/* #define QL_IB_DUMP */
+/* #define QL_OB_DUMP */
+
+#ifdef QL_REG_DUMP
+extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
+extern void ql_dump_routing_entries(struct ql_adapter *qdev);
+extern void ql_dump_regs(struct ql_adapter *qdev);
+#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
+#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
+#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
+#else
+#define QL_DUMP_REGS(qdev)
+#define QL_DUMP_ROUTE(qdev)
+#define QL_DUMP_XGMAC_CONTROL_REGS(qdev)
+#endif
+
+#ifdef QL_STAT_DUMP
+extern void ql_dump_stat(struct ql_adapter *qdev);
+#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
+#else
+#define QL_DUMP_STAT(qdev)
+#endif
+
+#ifdef QL_DEV_DUMP
+extern void ql_dump_qdev(struct ql_adapter *qdev);
+#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
+#else
+#define QL_DUMP_QDEV(qdev)
+#endif
+
+#ifdef QL_CB_DUMP
+extern void ql_dump_wqicb(struct wqicb *wqicb);
+extern void ql_dump_tx_ring(struct tx_ring *tx_ring);
+extern void ql_dump_ricb(struct ricb *ricb);
+extern void ql_dump_cqicb(struct cqicb *cqicb);
+extern void ql_dump_rx_ring(struct rx_ring *rx_ring);
+extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
+#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
+#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
+#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
+#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb)
+#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring)
+#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \
+ ql_dump_hw_cb(qdev, size, bit, q_id)
+#else
+#define QL_DUMP_RICB(ricb)
+#define QL_DUMP_WQICB(wqicb)
+#define QL_DUMP_TX_RING(tx_ring)
+#define QL_DUMP_CQICB(cqicb)
+#define QL_DUMP_RX_RING(rx_ring)
+#define QL_DUMP_HW_CB(qdev, size, bit, q_id)
+#endif
+
+#ifdef QL_OB_DUMP
+extern void ql_dump_tx_desc(struct tx_buf_desc *tbd);
+extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
+extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
+#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
+#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
+#else
+#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb)
+#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp)
+#endif
+
+#ifdef QL_IB_DUMP
+extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
+#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
+#else
+#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
+#endif
+
+#ifdef QL_ALL_DUMP
+extern void ql_dump_all(struct ql_adapter *qdev);
+#define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
+#else
+#define QL_DUMP_ALL(qdev)
+#endif
+
+#endif /* _QLGE_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
new file mode 100644
index 000000000000..fca804f36d61
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -0,0 +1,2044 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+
+#include "qlge.h"
+
+/* Read a NIC register from the alternate function. */
+static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
+ u32 reg)
+{
+ u32 register_to_read;
+ u32 reg_val;
+ unsigned int status = 0;
+
+ register_to_read = MPI_NIC_REG_BLOCK
+ | MPI_NIC_READ
+ | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
+ | reg;
+ status = ql_read_mpi_reg(qdev, register_to_read, &reg_val);
+ if (status != 0)
+ return 0xffffffff;
+
+ return reg_val;
+}
+
+/* Write a NIC register from the alternate function. */
+static int ql_write_other_func_reg(struct ql_adapter *qdev,
+ u32 reg, u32 reg_val)
+{
+ u32 register_to_read;
+ int status = 0;
+
+ register_to_read = MPI_NIC_REG_BLOCK
+ | MPI_NIC_READ
+ | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
+ | reg;
+ status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
+
+ return status;
+}
+
+static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
+ u32 bit, u32 err_bit)
+{
+ u32 temp;
+ int count = 10;
+
+ while (count) {
+ temp = ql_read_other_func_reg(qdev, reg);
+
+ /* check for errors */
+ if (temp & err_bit)
+ return -1;
+ else if (temp & bit)
+ return 0;
+ mdelay(10);
+ count--;
+ }
+ return -1;
+}
+
+static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
+ u32 *data)
+{
+ int status;
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
+ XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* set up for reg read */
+ ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
+ XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* get the data */
+ *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
+exit:
+ return status;
+}
+
+/* Read out the SERDES registers */
+static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data)
+{
+ int status;
+
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* set up for reg read */
+ ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
+
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* get the data */
+ *data = ql_read32(qdev, XG_SERDES_DATA);
+exit:
+ return status;
+}
+
+static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
+ u32 *direct_ptr, u32 *indirect_ptr,
+ unsigned int direct_valid, unsigned int indirect_valid)
+{
+ unsigned int status;
+
+ status = 1;
+ if (direct_valid)
+ status = ql_read_serdes_reg(qdev, addr, direct_ptr);
+ /* Dead fill any failures or invalids. */
+ if (status)
+ *direct_ptr = 0xDEADBEEF;
+
+ status = 1;
+ if (indirect_valid)
+ status = ql_read_other_func_serdes_reg(
+ qdev, addr, indirect_ptr);
+ /* Dead fill any failures or invalids. */
+ if (status)
+ *indirect_ptr = 0xDEADBEEF;
+}
+
+static int ql_get_serdes_regs(struct ql_adapter *qdev,
+ struct ql_mpi_coredump *mpi_coredump)
+{
+ int status;
+ unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
+ unsigned int xaui_indirect_valid, i;
+ u32 *direct_ptr, temp;
+ u32 *indirect_ptr;
+
+ xfi_direct_valid = xfi_indirect_valid = 0;
+ xaui_direct_valid = xaui_indirect_valid = 1;
+
+ /* The XAUI needs to be read out per port */
+ if (qdev->func & 1) {
+ /* We are NIC 2 */
+ status = ql_read_other_func_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_indirect_valid = 0;
+
+ status = ql_read_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_direct_valid = 0;
+ } else {
+ /* We are NIC 1 */
+ status = ql_read_other_func_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_indirect_valid = 0;
+
+ status = ql_read_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_direct_valid = 0;
+ }
+
+ /*
+ * XFI register is shared so only need to read one
+ * functions and then check the bits.
+ */
+ status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
+ if (status)
+ temp = 0;
+
+ if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
+ XG_SERDES_ADDR_XFI1_PWR_UP) {
+ /* now see if i'm NIC 1 or NIC 2 */
+ if (qdev->func & 1)
+ /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
+ xfi_indirect_valid = 1;
+ else
+ xfi_direct_valid = 1;
+ }
+ if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
+ XG_SERDES_ADDR_XFI2_PWR_UP) {
+ /* now see if i'm NIC 1 or NIC 2 */
+ if (qdev->func & 1)
+ /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
+ xfi_direct_valid = 1;
+ else
+ xfi_indirect_valid = 1;
+ }
+
+ /* Get XAUI_AN register block. */
+ if (qdev->func & 1) {
+ /* Function 2 is direct */
+ direct_ptr = mpi_coredump->serdes2_xaui_an;
+ indirect_ptr = mpi_coredump->serdes_xaui_an;
+ } else {
+ /* Function 1 is direct */
+ direct_ptr = mpi_coredump->serdes_xaui_an;
+ indirect_ptr = mpi_coredump->serdes2_xaui_an;
+ }
+
+ for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xaui_direct_valid, xaui_indirect_valid);
+
+ /* Get XAUI_HSS_PCS register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xaui_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes_xaui_hss_pcs;
+ } else {
+ direct_ptr =
+ mpi_coredump->serdes_xaui_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes2_xaui_hss_pcs;
+ }
+
+ for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xaui_direct_valid, xaui_indirect_valid);
+
+ /* Get XAUI_XFI_AN register block. */
+ if (qdev->func & 1) {
+ direct_ptr = mpi_coredump->serdes2_xfi_an;
+ indirect_ptr = mpi_coredump->serdes_xfi_an;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_an;
+ indirect_ptr = mpi_coredump->serdes2_xfi_an;
+ }
+
+ for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_TRAIN register block. */
+ if (qdev->func & 1) {
+ direct_ptr = mpi_coredump->serdes2_xfi_train;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_train;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_train;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_train;
+ }
+
+ for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_HSS_PCS register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_pcs;
+ } else {
+ direct_ptr =
+ mpi_coredump->serdes_xfi_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_pcs;
+ }
+
+ for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_HSS_TX register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_tx;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_tx;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_tx;
+ }
+ for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_HSS_RX register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_rx;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_rx;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_rx;
+ }
+
+ for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+
+ /* Get XAUI_XFI_HSS_PLL register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_pll;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_pll;
+ } else {
+ direct_ptr =
+ mpi_coredump->serdes_xfi_hss_pll;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_pll;
+ }
+ for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+ return 0;
+}
+
+static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
+ u32 *data)
+{
+ int status = 0;
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
+ XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ if (status)
+ goto exit;
+
+ /* set up for reg read */
+ ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
+ XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ if (status)
+ goto exit;
+
+ /* get the data */
+ *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
+exit:
+ return status;
+}
+
+/* Read the 400 xgmac control/statistics registers
+ * skipping unused locations.
+ */
+static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf,
+ unsigned int other_function)
+{
+ int status = 0;
+ int i;
+
+ for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
+ /* We're reading 400 xgmac registers, but we filter out
+ * serveral locations that are non-responsive to reads.
+ */
+ if ((i == 0x00000114) ||
+ (i == 0x00000118) ||
+ (i == 0x0000013c) ||
+ (i == 0x00000140) ||
+ (i > 0x00000150 && i < 0x000001fc) ||
+ (i > 0x00000278 && i < 0x000002a0) ||
+ (i > 0x000002c0 && i < 0x000002cf) ||
+ (i > 0x000002dc && i < 0x000002f0) ||
+ (i > 0x000003c8 && i < 0x00000400) ||
+ (i > 0x00000400 && i < 0x00000410) ||
+ (i > 0x00000410 && i < 0x00000420) ||
+ (i > 0x00000420 && i < 0x00000430) ||
+ (i > 0x00000430 && i < 0x00000440) ||
+ (i > 0x00000440 && i < 0x00000450) ||
+ (i > 0x00000450 && i < 0x00000500) ||
+ (i > 0x0000054c && i < 0x00000568) ||
+ (i > 0x000005c8 && i < 0x00000600)) {
+ if (other_function)
+ status =
+ ql_read_other_func_xgmac_reg(qdev, i, buf);
+ else
+ status = ql_read_xgmac_reg(qdev, i, buf);
+
+ if (status)
+ *buf = 0xdeadbeef;
+ break;
+ }
+ }
+ return status;
+}
+
+static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
+{
+ int status = 0;
+ int i;
+
+ for (i = 0; i < 8; i++, buf++) {
+ ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
+ *buf = ql_read32(qdev, NIC_ETS);
+ }
+
+ for (i = 0; i < 2; i++, buf++) {
+ ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
+ *buf = ql_read32(qdev, CNA_ETS);
+ }
+
+ return status;
+}
+
+static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf)
+{
+ int i;
+
+ for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
+ ql_write32(qdev, INTR_EN,
+ qdev->intr_context[i].intr_read_mask);
+ *buf = ql_read32(qdev, INTR_EN);
+ }
+}
+
+static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
+{
+ int i, status;
+ u32 value[3];
+
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ return status;
+
+ for (i = 0; i < 16; i++) {
+ status = ql_get_mac_addr_reg(qdev,
+ MAC_ADDR_TYPE_CAM_MAC, i, value);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed read of mac index register\n");
+ goto err;
+ }
+ *buf++ = value[0]; /* lower MAC address */
+ *buf++ = value[1]; /* upper MAC address */
+ *buf++ = value[2]; /* output */
+ }
+ for (i = 0; i < 32; i++) {
+ status = ql_get_mac_addr_reg(qdev,
+ MAC_ADDR_TYPE_MULTI_MAC, i, value);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed read of mac index register\n");
+ goto err;
+ }
+ *buf++ = value[0]; /* lower Mcast address */
+ *buf++ = value[1]; /* upper Mcast address */
+ }
+err:
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ return status;
+}
+
+static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
+{
+ int status;
+ u32 value, i;
+
+ status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (status)
+ return status;
+
+ for (i = 0; i < 16; i++) {
+ status = ql_get_routing_reg(qdev, i, &value);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed read of routing index register\n");
+ goto err;
+ } else {
+ *buf++ = value;
+ }
+ }
+err:
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+ return status;
+}
+
+/* Read the MPI Processor shadow registers */
+static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf)
+{
+ u32 i;
+ int status;
+
+ for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
+ status = ql_write_mpi_reg(qdev, RISC_124,
+ (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
+ if (status)
+ goto end;
+ status = ql_read_mpi_reg(qdev, RISC_127, buf);
+ if (status)
+ goto end;
+ }
+end:
+ return status;
+}
+
+/* Read the MPI Processor core registers */
+static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf,
+ u32 offset, u32 count)
+{
+ int i, status = 0;
+ for (i = 0; i < count; i++, buf++) {
+ status = ql_read_mpi_reg(qdev, offset + i, buf);
+ if (status)
+ return status;
+ }
+ return status;
+}
+
+/* Read the ASIC probe dump */
+static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
+ u32 valid, u32 *buf)
+{
+ u32 module, mux_sel, probe, lo_val, hi_val;
+
+ for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
+ if (!((valid >> module) & 1))
+ continue;
+ for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
+ probe = clock
+ | PRB_MX_ADDR_ARE
+ | mux_sel
+ | (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
+ ql_write32(qdev, PRB_MX_ADDR, probe);
+ lo_val = ql_read32(qdev, PRB_MX_DATA);
+ if (mux_sel == 0) {
+ *buf = probe;
+ buf++;
+ }
+ probe |= PRB_MX_ADDR_UP;
+ ql_write32(qdev, PRB_MX_ADDR, probe);
+ hi_val = ql_read32(qdev, PRB_MX_DATA);
+ *buf = lo_val;
+ buf++;
+ *buf = hi_val;
+ buf++;
+ }
+ }
+ return buf;
+}
+
+static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
+{
+ /* First we have to enable the probe mux */
+ ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
+ PRB_MX_ADDR_VALID_SYS_MOD, buf);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
+ PRB_MX_ADDR_VALID_PCI_MOD, buf);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
+ PRB_MX_ADDR_VALID_XGM_MOD, buf);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
+ PRB_MX_ADDR_VALID_FC_MOD, buf);
+ return 0;
+
+}
+
+/* Read out the routing index registers */
+static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
+{
+ int status;
+ u32 type, index, index_max;
+ u32 result_index;
+ u32 result_data;
+ u32 val;
+
+ status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (status)
+ return status;
+
+ for (type = 0; type < 4; type++) {
+ if (type < 2)
+ index_max = 8;
+ else
+ index_max = 16;
+ for (index = 0; index < index_max; index++) {
+ val = RT_IDX_RS
+ | (type << RT_IDX_TYPE_SHIFT)
+ | (index << RT_IDX_IDX_SHIFT);
+ ql_write32(qdev, RT_IDX, val);
+ result_index = 0;
+ while ((result_index & RT_IDX_MR) == 0)
+ result_index = ql_read32(qdev, RT_IDX);
+ result_data = ql_read32(qdev, RT_DATA);
+ *buf = type;
+ buf++;
+ *buf = index;
+ buf++;
+ *buf = result_index;
+ buf++;
+ *buf = result_data;
+ buf++;
+ }
+ }
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+ return status;
+}
+
+/* Read out the MAC protocol registers */
+static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
+{
+ u32 result_index, result_data;
+ u32 type;
+ u32 index;
+ u32 offset;
+ u32 val;
+ u32 initial_val = MAC_ADDR_RS;
+ u32 max_index;
+ u32 max_offset;
+
+ for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
+ switch (type) {
+
+ case 0: /* CAM */
+ initial_val |= MAC_ADDR_ADR;
+ max_index = MAC_ADDR_MAX_CAM_ENTRIES;
+ max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
+ break;
+ case 1: /* Multicast MAC Address */
+ max_index = MAC_ADDR_MAX_CAM_WCOUNT;
+ max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
+ break;
+ case 2: /* VLAN filter mask */
+ case 3: /* MC filter mask */
+ max_index = MAC_ADDR_MAX_CAM_WCOUNT;
+ max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
+ break;
+ case 4: /* FC MAC addresses */
+ max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
+ max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
+ break;
+ case 5: /* Mgmt MAC addresses */
+ max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
+ break;
+ case 6: /* Mgmt VLAN addresses */
+ max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
+ break;
+ case 7: /* Mgmt IPv4 address */
+ max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
+ break;
+ case 8: /* Mgmt IPv6 address */
+ max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
+ break;
+ case 9: /* Mgmt TCP/UDP Dest port */
+ max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
+ break;
+ default:
+ pr_err("Bad type!!! 0x%08x\n", type);
+ max_index = 0;
+ max_offset = 0;
+ break;
+ }
+ for (index = 0; index < max_index; index++) {
+ for (offset = 0; offset < max_offset; offset++) {
+ val = initial_val
+ | (type << MAC_ADDR_TYPE_SHIFT)
+ | (index << MAC_ADDR_IDX_SHIFT)
+ | (offset);
+ ql_write32(qdev, MAC_ADDR_IDX, val);
+ result_index = 0;
+ while ((result_index & MAC_ADDR_MR) == 0) {
+ result_index = ql_read32(qdev,
+ MAC_ADDR_IDX);
+ }
+ result_data = ql_read32(qdev, MAC_ADDR_DATA);
+ *buf = result_index;
+ buf++;
+ *buf = result_data;
+ buf++;
+ }
+ }
+ }
+}
+
+static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
+{
+ u32 func_num, reg, reg_val;
+ int status;
+
+ for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
+ reg = MPI_NIC_REG_BLOCK
+ | (func_num << MPI_NIC_FUNCTION_SHIFT)
+ | (SEM / 4);
+ status = ql_read_mpi_reg(qdev, reg, &reg_val);
+ *buf = reg_val;
+ /* if the read failed then dead fill the element. */
+ if (!status)
+ *buf = 0xdeadbeef;
+ buf++;
+ }
+}
+
+/* Create a coredump segment header */
+static void ql_build_coredump_seg_header(
+ struct mpi_coredump_segment_header *seg_hdr,
+ u32 seg_number, u32 seg_size, u8 *desc)
+{
+ memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
+ seg_hdr->cookie = MPI_COREDUMP_COOKIE;
+ seg_hdr->segNum = seg_number;
+ seg_hdr->segSize = seg_size;
+ memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
+}
+
+/*
+ * This function should be called when a coredump / probedump
+ * is to be extracted from the HBA. It is assumed there is a
+ * qdev structure that contains the base address of the register
+ * space for this function as well as a coredump structure that
+ * will contain the dump.
+ */
+int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
+{
+ int status;
+ int i;
+
+ if (!mpi_coredump) {
+ netif_err(qdev, drv, qdev->ndev, "No memory available\n");
+ return -ENOMEM;
+ }
+
+ /* Try to get the spinlock, but dont worry if
+ * it isn't available. If the firmware died it
+ * might be holding the sem.
+ */
+ ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+
+ status = ql_pause_mpi_risc(qdev);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed RISC pause. Status = 0x%.08x\n", status);
+ goto err;
+ }
+
+ /* Insert the global header */
+ memset(&(mpi_coredump->mpi_global_header), 0,
+ sizeof(struct mpi_coredump_global_header));
+ mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
+ mpi_coredump->mpi_global_header.headerSize =
+ sizeof(struct mpi_coredump_global_header);
+ mpi_coredump->mpi_global_header.imageSize =
+ sizeof(struct ql_mpi_coredump);
+ memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+ sizeof(mpi_coredump->mpi_global_header.idString));
+
+ /* Get generic NIC reg dump */
+ ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
+ NIC1_CONTROL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
+ NIC2_CONTROL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
+
+ /* Get XGMac registers. (Segment 18, Rev C. step 21) */
+ ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
+ NIC1_XGMAC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
+ NIC2_XGMAC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
+
+ if (qdev->func & 1) {
+ /* Odd means our function is NIC 2 */
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic2_regs[i] =
+ ql_read32(qdev, i * sizeof(u32));
+
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic_regs[i] =
+ ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
+
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
+ } else {
+ /* Even means our function is NIC 1 */
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic_regs[i] =
+ ql_read32(qdev, i * sizeof(u32));
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic2_regs[i] =
+ ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
+
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
+ }
+
+ /* Rev C. Step 20a */
+ ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
+ XAUI_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xaui_an),
+ "XAUI AN Registers");
+
+ /* Rev C. Step 20b */
+ ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
+ XAUI_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xaui_hss_pcs),
+ "XAUI HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_an),
+ "XFI AN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
+ XFI_TRAIN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_train),
+ "XFI TRAIN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
+ XFI_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_pcs),
+ "XFI HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
+ XFI_HSS_TX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_tx),
+ "XFI HSS TX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
+ XFI_HSS_RX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_rx),
+ "XFI HSS RX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
+ XFI_HSS_PLL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_pll),
+ "XFI HSS PLL Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
+ XAUI2_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xaui_an),
+ "XAUI2 AN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
+ XAUI2_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
+ "XAUI2 HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
+ XFI2_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_an),
+ "XFI2 AN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
+ XFI2_TRAIN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_train),
+ "XFI2 TRAIN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
+ XFI2_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
+ "XFI2 HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
+ XFI2_HSS_TX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_tx),
+ "XFI2 HSS TX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
+ XFI2_HSS_RX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_rx),
+ "XFI2 HSS RX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
+ XFI2_HSS_PLL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_pll),
+ "XFI2 HSS PLL Registers");
+
+ status = ql_get_serdes_regs(qdev, mpi_coredump);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
+ status);
+ goto err;
+ }
+
+ ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
+ CORE_SEG_NUM,
+ sizeof(mpi_coredump->core_regs_seg_hdr) +
+ sizeof(mpi_coredump->mpi_core_regs) +
+ sizeof(mpi_coredump->mpi_core_sh_regs),
+ "Core Registers");
+
+ /* Get the MPI Core Registers */
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
+ MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
+ if (status)
+ goto err;
+ /* Get the 16 MPI shadow registers */
+ status = ql_get_mpi_shadow_regs(qdev,
+ &mpi_coredump->mpi_core_sh_regs[0]);
+ if (status)
+ goto err;
+
+ /* Get the Test Logic Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
+ TEST_LOGIC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->test_logic_regs),
+ "Test Logic Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
+ TEST_REGS_ADDR, TEST_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the RMII Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
+ RMII_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->rmii_regs),
+ "RMII Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
+ RMII_REGS_ADDR, RMII_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FCMAC1 Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
+ FCMAC1_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fcmac1_regs),
+ "FCMAC1 Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
+ FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FCMAC2 Registers */
+
+ ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
+ FCMAC2_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fcmac2_regs),
+ "FCMAC2 Registers");
+
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
+ FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FC1 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
+ FC1_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fc1_mbx_regs),
+ "FC1 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
+ FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the IDE Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
+ IDE_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->ide_regs),
+ "IDE Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
+ IDE_REGS_ADDR, IDE_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the NIC1 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
+ NIC1_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic1_mbx_regs),
+ "NIC1 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
+ NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the SMBus Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
+ SMBUS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->smbus_regs),
+ "SMBus Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
+ SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FC2 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
+ FC2_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fc2_mbx_regs),
+ "FC2 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
+ FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the NIC2 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
+ NIC2_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic2_mbx_regs),
+ "NIC2 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
+ NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the I2C Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
+ I2C_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->i2c_regs),
+ "I2C Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
+ I2C_REGS_ADDR, I2C_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the MEMC Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
+ MEMC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->memc_regs),
+ "MEMC Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
+ MEMC_REGS_ADDR, MEMC_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the PBus Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
+ PBUS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->pbus_regs),
+ "PBUS Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
+ PBUS_REGS_ADDR, PBUS_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the MDE Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
+ MDE_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->mde_regs),
+ "MDE Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
+ MDE_REGS_ADDR, MDE_REGS_CNT);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
+ MISC_NIC_INFO_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->misc_nic_info),
+ "MISC NIC INFO");
+ mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
+ mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
+ mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
+ mpi_coredump->misc_nic_info.function = qdev->func;
+
+ /* Segment 31 */
+ /* Get indexed register values. */
+ ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
+ INTR_STATES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->intr_states),
+ "INTR States");
+ ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
+
+ ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
+ CAM_ENTRIES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->cam_entries),
+ "CAM Entries");
+ status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
+ ROUTING_WORDS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic_routing_words),
+ "Routing Words");
+ status = ql_get_routing_entries(qdev,
+ &mpi_coredump->nic_routing_words[0]);
+ if (status)
+ goto err;
+
+ /* Segment 34 (Rev C. step 23) */
+ ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
+ ETS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->ets),
+ "ETS Registers");
+ status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
+ PROBE_DUMP_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->probe_dump),
+ "Probe Dump");
+ ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
+
+ ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
+ ROUTING_INDEX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->routing_regs),
+ "Routing Regs");
+ status = ql_get_routing_index_registers(qdev,
+ &mpi_coredump->routing_regs[0]);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
+ MAC_PROTOCOL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->mac_prot_regs),
+ "MAC Prot Regs");
+ ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
+
+ /* Get the semaphore registers for all 5 functions */
+ ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
+ SEM_REGS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->sem_regs), "Sem Registers");
+
+ ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
+
+ /* Prevent the mpi restarting while we dump the memory.*/
+ ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
+
+ /* clear the pause */
+ status = ql_unpause_mpi_risc(qdev);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed RISC unpause. Status = 0x%.08x\n", status);
+ goto err;
+ }
+
+ /* Reset the RISC so we can dump RAM */
+ status = ql_hard_reset_mpi_risc(qdev);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed RISC reset. Status = 0x%.08x\n", status);
+ goto err;
+ }
+
+ ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
+ WCS_RAM_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->code_ram),
+ "WCS RAM");
+ status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
+ CODE_RAM_ADDR, CODE_RAM_CNT);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Dump of CODE RAM. Status = 0x%.08x\n",
+ status);
+ goto err;
+ }
+
+ /* Insert the segment header */
+ ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
+ MEMC_RAM_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->memc_ram),
+ "MEMC RAM");
+ status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
+ MEMC_RAM_ADDR, MEMC_RAM_CNT);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Dump of MEMC RAM. Status = 0x%.08x\n",
+ status);
+ goto err;
+ }
+err:
+ ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
+ return status;
+
+}
+
+static void ql_get_core_dump(struct ql_adapter *qdev)
+{
+ if (!ql_own_firmware(qdev)) {
+ netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
+ return;
+ }
+
+ if (!netif_running(qdev->ndev)) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Force Coredump can only be done from interface that is up\n");
+ return;
+ }
+ ql_queue_fw_error(qdev);
+}
+
+void ql_gen_reg_dump(struct ql_adapter *qdev,
+ struct ql_reg_dump *mpi_coredump)
+{
+ int i, status;
+
+
+ memset(&(mpi_coredump->mpi_global_header), 0,
+ sizeof(struct mpi_coredump_global_header));
+ mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
+ mpi_coredump->mpi_global_header.headerSize =
+ sizeof(struct mpi_coredump_global_header);
+ mpi_coredump->mpi_global_header.imageSize =
+ sizeof(struct ql_reg_dump);
+ memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+ sizeof(mpi_coredump->mpi_global_header.idString));
+
+
+ /* segment 16 */
+ ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
+ MISC_NIC_INFO_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->misc_nic_info),
+ "MISC NIC INFO");
+ mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
+ mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
+ mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
+ mpi_coredump->misc_nic_info.function = qdev->func;
+
+ /* Segment 16, Rev C. Step 18 */
+ ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
+ NIC1_CONTROL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic_regs),
+ "NIC Registers");
+ /* Get generic reg dump */
+ for (i = 0; i < 64; i++)
+ mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
+
+ /* Segment 31 */
+ /* Get indexed register values. */
+ ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
+ INTR_STATES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->intr_states),
+ "INTR States");
+ ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
+
+ ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
+ CAM_ENTRIES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->cam_entries),
+ "CAM Entries");
+ status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
+ if (status)
+ return;
+
+ ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
+ ROUTING_WORDS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic_routing_words),
+ "Routing Words");
+ status = ql_get_routing_entries(qdev,
+ &mpi_coredump->nic_routing_words[0]);
+ if (status)
+ return;
+
+ /* Segment 34 (Rev C. step 23) */
+ ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
+ ETS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->ets),
+ "ETS Registers");
+ status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
+ if (status)
+ return;
+}
+
+void ql_get_dump(struct ql_adapter *qdev, void *buff)
+{
+ /*
+ * If the dump has already been taken and is stored
+ * in our internal buffer and if force dump is set then
+ * just start the spool to dump it to the log file
+ * and also, take a snapshot of the general regs to
+ * to the user's buffer or else take complete dump
+ * to the user's buffer if force is not set.
+ */
+
+ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
+ if (!ql_core_dump(qdev, buff))
+ ql_soft_reset_mpi_risc(qdev);
+ else
+ netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
+ } else {
+ ql_gen_reg_dump(qdev, buff);
+ ql_get_core_dump(qdev);
+ }
+}
+
+/* Coredump to messages log file using separate worker thread */
+void ql_mpi_core_to_log(struct work_struct *work)
+{
+ struct ql_adapter *qdev =
+ container_of(work, struct ql_adapter, mpi_core_to_log.work);
+ u32 *tmp, count;
+ int i;
+
+ count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
+ tmp = (u32 *)qdev->mpi_coredump;
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "Core is dumping to log file!\n");
+
+ for (i = 0; i < count; i += 8) {
+ pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x "
+ "%.08x %.08x %.08x\n", i,
+ tmp[i + 0],
+ tmp[i + 1],
+ tmp[i + 2],
+ tmp[i + 3],
+ tmp[i + 4],
+ tmp[i + 5],
+ tmp[i + 6],
+ tmp[i + 7]);
+ msleep(5);
+ }
+}
+
+#ifdef QL_REG_DUMP
+static void ql_dump_intr_states(struct ql_adapter *qdev)
+{
+ int i;
+ u32 value;
+ for (i = 0; i < qdev->intr_count; i++) {
+ ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
+ value = ql_read32(qdev, INTR_EN);
+ pr_err("%s: Interrupt %d is %s\n",
+ qdev->ndev->name, i,
+ (value & INTR_EN_EN ? "enabled" : "disabled"));
+ }
+}
+
+#define DUMP_XGMAC(qdev, reg) \
+do { \
+ u32 data; \
+ ql_read_xgmac_reg(qdev, reg, &data); \
+ pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \
+} while (0)
+
+void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
+{
+ if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
+ pr_err("%s: Couldn't get xgmac sem\n", __func__);
+ return;
+ }
+ DUMP_XGMAC(qdev, PAUSE_SRC_LO);
+ DUMP_XGMAC(qdev, PAUSE_SRC_HI);
+ DUMP_XGMAC(qdev, GLOBAL_CFG);
+ DUMP_XGMAC(qdev, TX_CFG);
+ DUMP_XGMAC(qdev, RX_CFG);
+ DUMP_XGMAC(qdev, FLOW_CTL);
+ DUMP_XGMAC(qdev, PAUSE_OPCODE);
+ DUMP_XGMAC(qdev, PAUSE_TIMER);
+ DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO);
+ DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI);
+ DUMP_XGMAC(qdev, MAC_TX_PARAMS);
+ DUMP_XGMAC(qdev, MAC_RX_PARAMS);
+ DUMP_XGMAC(qdev, MAC_SYS_INT);
+ DUMP_XGMAC(qdev, MAC_SYS_INT_MASK);
+ DUMP_XGMAC(qdev, MAC_MGMT_INT);
+ DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK);
+ DUMP_XGMAC(qdev, EXT_ARB_MODE);
+ ql_sem_unlock(qdev, qdev->xg_sem_mask);
+}
+
+static void ql_dump_ets_regs(struct ql_adapter *qdev)
+{
+}
+
+static void ql_dump_cam_entries(struct ql_adapter *qdev)
+{
+ int i;
+ u32 value[3];
+
+ i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (i)
+ return;
+ for (i = 0; i < 4; i++) {
+ if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
+ pr_err("%s: Failed read of mac index register\n",
+ __func__);
+ return;
+ } else {
+ if (value[0])
+ pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n",
+ qdev->ndev->name, i, value[1], value[0],
+ value[2]);
+ }
+ }
+ for (i = 0; i < 32; i++) {
+ if (ql_get_mac_addr_reg
+ (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
+ pr_err("%s: Failed read of mac index register\n",
+ __func__);
+ return;
+ } else {
+ if (value[0])
+ pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n",
+ qdev->ndev->name, i, value[1], value[0]);
+ }
+ }
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+}
+
+void ql_dump_routing_entries(struct ql_adapter *qdev)
+{
+ int i;
+ u32 value;
+ i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (i)
+ return;
+ for (i = 0; i < 16; i++) {
+ value = 0;
+ if (ql_get_routing_reg(qdev, i, &value)) {
+ pr_err("%s: Failed read of routing index register\n",
+ __func__);
+ return;
+ } else {
+ if (value)
+ pr_err("%s: Routing Mask %d = 0x%.08x\n",
+ qdev->ndev->name, i, value);
+ }
+ }
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+}
+
+#define DUMP_REG(qdev, reg) \
+ pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg))
+
+void ql_dump_regs(struct ql_adapter *qdev)
+{
+ pr_err("reg dump for function #%d\n", qdev->func);
+ DUMP_REG(qdev, SYS);
+ DUMP_REG(qdev, RST_FO);
+ DUMP_REG(qdev, FSC);
+ DUMP_REG(qdev, CSR);
+ DUMP_REG(qdev, ICB_RID);
+ DUMP_REG(qdev, ICB_L);
+ DUMP_REG(qdev, ICB_H);
+ DUMP_REG(qdev, CFG);
+ DUMP_REG(qdev, BIOS_ADDR);
+ DUMP_REG(qdev, STS);
+ DUMP_REG(qdev, INTR_EN);
+ DUMP_REG(qdev, INTR_MASK);
+ DUMP_REG(qdev, ISR1);
+ DUMP_REG(qdev, ISR2);
+ DUMP_REG(qdev, ISR3);
+ DUMP_REG(qdev, ISR4);
+ DUMP_REG(qdev, REV_ID);
+ DUMP_REG(qdev, FRC_ECC_ERR);
+ DUMP_REG(qdev, ERR_STS);
+ DUMP_REG(qdev, RAM_DBG_ADDR);
+ DUMP_REG(qdev, RAM_DBG_DATA);
+ DUMP_REG(qdev, ECC_ERR_CNT);
+ DUMP_REG(qdev, SEM);
+ DUMP_REG(qdev, GPIO_1);
+ DUMP_REG(qdev, GPIO_2);
+ DUMP_REG(qdev, GPIO_3);
+ DUMP_REG(qdev, XGMAC_ADDR);
+ DUMP_REG(qdev, XGMAC_DATA);
+ DUMP_REG(qdev, NIC_ETS);
+ DUMP_REG(qdev, CNA_ETS);
+ DUMP_REG(qdev, FLASH_ADDR);
+ DUMP_REG(qdev, FLASH_DATA);
+ DUMP_REG(qdev, CQ_STOP);
+ DUMP_REG(qdev, PAGE_TBL_RID);
+ DUMP_REG(qdev, WQ_PAGE_TBL_LO);
+ DUMP_REG(qdev, WQ_PAGE_TBL_HI);
+ DUMP_REG(qdev, CQ_PAGE_TBL_LO);
+ DUMP_REG(qdev, CQ_PAGE_TBL_HI);
+ DUMP_REG(qdev, COS_DFLT_CQ1);
+ DUMP_REG(qdev, COS_DFLT_CQ2);
+ DUMP_REG(qdev, SPLT_HDR);
+ DUMP_REG(qdev, FC_PAUSE_THRES);
+ DUMP_REG(qdev, NIC_PAUSE_THRES);
+ DUMP_REG(qdev, FC_ETHERTYPE);
+ DUMP_REG(qdev, FC_RCV_CFG);
+ DUMP_REG(qdev, NIC_RCV_CFG);
+ DUMP_REG(qdev, FC_COS_TAGS);
+ DUMP_REG(qdev, NIC_COS_TAGS);
+ DUMP_REG(qdev, MGMT_RCV_CFG);
+ DUMP_REG(qdev, XG_SERDES_ADDR);
+ DUMP_REG(qdev, XG_SERDES_DATA);
+ DUMP_REG(qdev, PRB_MX_ADDR);
+ DUMP_REG(qdev, PRB_MX_DATA);
+ ql_dump_intr_states(qdev);
+ ql_dump_xgmac_control_regs(qdev);
+ ql_dump_ets_regs(qdev);
+ ql_dump_cam_entries(qdev);
+ ql_dump_routing_entries(qdev);
+}
+#endif
+
+#ifdef QL_STAT_DUMP
+
+#define DUMP_STAT(qdev, stat) \
+ pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat)
+
+void ql_dump_stat(struct ql_adapter *qdev)
+{
+ pr_err("%s: Enter\n", __func__);
+ DUMP_STAT(qdev, tx_pkts);
+ DUMP_STAT(qdev, tx_bytes);
+ DUMP_STAT(qdev, tx_mcast_pkts);
+ DUMP_STAT(qdev, tx_bcast_pkts);
+ DUMP_STAT(qdev, tx_ucast_pkts);
+ DUMP_STAT(qdev, tx_ctl_pkts);
+ DUMP_STAT(qdev, tx_pause_pkts);
+ DUMP_STAT(qdev, tx_64_pkt);
+ DUMP_STAT(qdev, tx_65_to_127_pkt);
+ DUMP_STAT(qdev, tx_128_to_255_pkt);
+ DUMP_STAT(qdev, tx_256_511_pkt);
+ DUMP_STAT(qdev, tx_512_to_1023_pkt);
+ DUMP_STAT(qdev, tx_1024_to_1518_pkt);
+ DUMP_STAT(qdev, tx_1519_to_max_pkt);
+ DUMP_STAT(qdev, tx_undersize_pkt);
+ DUMP_STAT(qdev, tx_oversize_pkt);
+ DUMP_STAT(qdev, rx_bytes);
+ DUMP_STAT(qdev, rx_bytes_ok);
+ DUMP_STAT(qdev, rx_pkts);
+ DUMP_STAT(qdev, rx_pkts_ok);
+ DUMP_STAT(qdev, rx_bcast_pkts);
+ DUMP_STAT(qdev, rx_mcast_pkts);
+ DUMP_STAT(qdev, rx_ucast_pkts);
+ DUMP_STAT(qdev, rx_undersize_pkts);
+ DUMP_STAT(qdev, rx_oversize_pkts);
+ DUMP_STAT(qdev, rx_jabber_pkts);
+ DUMP_STAT(qdev, rx_undersize_fcerr_pkts);
+ DUMP_STAT(qdev, rx_drop_events);
+ DUMP_STAT(qdev, rx_fcerr_pkts);
+ DUMP_STAT(qdev, rx_align_err);
+ DUMP_STAT(qdev, rx_symbol_err);
+ DUMP_STAT(qdev, rx_mac_err);
+ DUMP_STAT(qdev, rx_ctl_pkts);
+ DUMP_STAT(qdev, rx_pause_pkts);
+ DUMP_STAT(qdev, rx_64_pkts);
+ DUMP_STAT(qdev, rx_65_to_127_pkts);
+ DUMP_STAT(qdev, rx_128_255_pkts);
+ DUMP_STAT(qdev, rx_256_511_pkts);
+ DUMP_STAT(qdev, rx_512_to_1023_pkts);
+ DUMP_STAT(qdev, rx_1024_to_1518_pkts);
+ DUMP_STAT(qdev, rx_1519_to_max_pkts);
+ DUMP_STAT(qdev, rx_len_err_pkts);
+};
+#endif
+
+#ifdef QL_DEV_DUMP
+
+#define DUMP_QDEV_FIELD(qdev, type, field) \
+ pr_err("qdev->%-24s = " type "\n", #field, qdev->field)
+#define DUMP_QDEV_DMA_FIELD(qdev, field) \
+ pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field)
+#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
+ pr_err("%s[%d].%s = " type "\n", \
+ #array, index, #field, qdev->array[index].field);
+void ql_dump_qdev(struct ql_adapter *qdev)
+{
+ int i;
+ DUMP_QDEV_FIELD(qdev, "%lx", flags);
+ DUMP_QDEV_FIELD(qdev, "%p", vlgrp);
+ DUMP_QDEV_FIELD(qdev, "%p", pdev);
+ DUMP_QDEV_FIELD(qdev, "%p", ndev);
+ DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id);
+ DUMP_QDEV_FIELD(qdev, "%p", reg_base);
+ DUMP_QDEV_FIELD(qdev, "%p", doorbell_area);
+ DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size);
+ DUMP_QDEV_FIELD(qdev, "%x", msg_enable);
+ DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area);
+ DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma);
+ DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area);
+ DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma);
+ DUMP_QDEV_FIELD(qdev, "%d", intr_count);
+ if (qdev->msi_x_entry)
+ for (i = 0; i < qdev->intr_count; i++) {
+ DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector);
+ DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry);
+ }
+ for (i = 0; i < qdev->intr_count; i++) {
+ DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev);
+ DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr);
+ DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked);
+ DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask);
+ DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask);
+ DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask);
+ }
+ DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count);
+ DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count);
+ DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size);
+ DUMP_QDEV_FIELD(qdev, "%p", ring_mem);
+ DUMP_QDEV_FIELD(qdev, "%d", intr_count);
+ DUMP_QDEV_FIELD(qdev, "%p", tx_ring);
+ DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count);
+ DUMP_QDEV_FIELD(qdev, "%p", rx_ring);
+ DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue);
+ DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask);
+ DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up);
+ DUMP_QDEV_FIELD(qdev, "0x%08x", port_init);
+}
+#endif
+
+#ifdef QL_CB_DUMP
+void ql_dump_wqicb(struct wqicb *wqicb)
+{
+ pr_err("Dumping wqicb stuff...\n");
+ pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
+ pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags));
+ pr_err("wqicb->cq_id_rss = %d\n",
+ le16_to_cpu(wqicb->cq_id_rss));
+ pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid));
+ pr_err("wqicb->wq_addr = 0x%llx\n",
+ (unsigned long long) le64_to_cpu(wqicb->addr));
+ pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n",
+ (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
+}
+
+void ql_dump_tx_ring(struct tx_ring *tx_ring)
+{
+ if (tx_ring == NULL)
+ return;
+ pr_err("===================== Dumping tx_ring %d ===============\n",
+ tx_ring->wq_id);
+ pr_err("tx_ring->base = %p\n", tx_ring->wq_base);
+ pr_err("tx_ring->base_dma = 0x%llx\n",
+ (unsigned long long) tx_ring->wq_base_dma);
+ pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n",
+ tx_ring->cnsmr_idx_sh_reg,
+ tx_ring->cnsmr_idx_sh_reg
+ ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
+ pr_err("tx_ring->size = %d\n", tx_ring->wq_size);
+ pr_err("tx_ring->len = %d\n", tx_ring->wq_len);
+ pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg);
+ pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg);
+ pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx);
+ pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id);
+ pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id);
+ pr_err("tx_ring->q = %p\n", tx_ring->q);
+ pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count));
+}
+
+void ql_dump_ricb(struct ricb *ricb)
+{
+ int i;
+ pr_err("===================== Dumping ricb ===============\n");
+ pr_err("Dumping ricb stuff...\n");
+
+ pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f);
+ pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n",
+ ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
+ ricb->flags & RSS_L6K ? "RSS_L6K " : "",
+ ricb->flags & RSS_LI ? "RSS_LI " : "",
+ ricb->flags & RSS_LB ? "RSS_LB " : "",
+ ricb->flags & RSS_LM ? "RSS_LM " : "",
+ ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
+ ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
+ ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
+ ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
+ pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask));
+ for (i = 0; i < 16; i++)
+ pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i,
+ le32_to_cpu(ricb->hash_cq_id[i]));
+ for (i = 0; i < 10; i++)
+ pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i,
+ le32_to_cpu(ricb->ipv6_hash_key[i]));
+ for (i = 0; i < 4; i++)
+ pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i,
+ le32_to_cpu(ricb->ipv4_hash_key[i]));
+}
+
+void ql_dump_cqicb(struct cqicb *cqicb)
+{
+ pr_err("Dumping cqicb stuff...\n");
+
+ pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect);
+ pr_err("cqicb->flags = %x\n", cqicb->flags);
+ pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len));
+ pr_err("cqicb->addr = 0x%llx\n",
+ (unsigned long long) le64_to_cpu(cqicb->addr));
+ pr_err("cqicb->prod_idx_addr = 0x%llx\n",
+ (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
+ pr_err("cqicb->pkt_delay = 0x%.04x\n",
+ le16_to_cpu(cqicb->pkt_delay));
+ pr_err("cqicb->irq_delay = 0x%.04x\n",
+ le16_to_cpu(cqicb->irq_delay));
+ pr_err("cqicb->lbq_addr = 0x%llx\n",
+ (unsigned long long) le64_to_cpu(cqicb->lbq_addr));
+ pr_err("cqicb->lbq_buf_size = 0x%.04x\n",
+ le16_to_cpu(cqicb->lbq_buf_size));
+ pr_err("cqicb->lbq_len = 0x%.04x\n",
+ le16_to_cpu(cqicb->lbq_len));
+ pr_err("cqicb->sbq_addr = 0x%llx\n",
+ (unsigned long long) le64_to_cpu(cqicb->sbq_addr));
+ pr_err("cqicb->sbq_buf_size = 0x%.04x\n",
+ le16_to_cpu(cqicb->sbq_buf_size));
+ pr_err("cqicb->sbq_len = 0x%.04x\n",
+ le16_to_cpu(cqicb->sbq_len));
+}
+
+void ql_dump_rx_ring(struct rx_ring *rx_ring)
+{
+ if (rx_ring == NULL)
+ return;
+ pr_err("===================== Dumping rx_ring %d ===============\n",
+ rx_ring->cq_id);
+ pr_err("Dumping rx_ring %d, type = %s%s%s\n",
+ rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
+ rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
+ rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
+ pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb);
+ pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base);
+ pr_err("rx_ring->cq_base_dma = %llx\n",
+ (unsigned long long) rx_ring->cq_base_dma);
+ pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size);
+ pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len);
+ pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n",
+ rx_ring->prod_idx_sh_reg,
+ rx_ring->prod_idx_sh_reg
+ ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
+ pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n",
+ (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
+ pr_err("rx_ring->cnsmr_idx_db_reg = %p\n",
+ rx_ring->cnsmr_idx_db_reg);
+ pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx);
+ pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
+ pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
+
+ pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base);
+ pr_err("rx_ring->lbq_base_dma = %llx\n",
+ (unsigned long long) rx_ring->lbq_base_dma);
+ pr_err("rx_ring->lbq_base_indirect = %p\n",
+ rx_ring->lbq_base_indirect);
+ pr_err("rx_ring->lbq_base_indirect_dma = %llx\n",
+ (unsigned long long) rx_ring->lbq_base_indirect_dma);
+ pr_err("rx_ring->lbq = %p\n", rx_ring->lbq);
+ pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len);
+ pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size);
+ pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n",
+ rx_ring->lbq_prod_idx_db_reg);
+ pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx);
+ pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx);
+ pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx);
+ pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt);
+ pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size);
+
+ pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base);
+ pr_err("rx_ring->sbq_base_dma = %llx\n",
+ (unsigned long long) rx_ring->sbq_base_dma);
+ pr_err("rx_ring->sbq_base_indirect = %p\n",
+ rx_ring->sbq_base_indirect);
+ pr_err("rx_ring->sbq_base_indirect_dma = %llx\n",
+ (unsigned long long) rx_ring->sbq_base_indirect_dma);
+ pr_err("rx_ring->sbq = %p\n", rx_ring->sbq);
+ pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len);
+ pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size);
+ pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n",
+ rx_ring->sbq_prod_idx_db_reg);
+ pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx);
+ pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx);
+ pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx);
+ pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt);
+ pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size);
+ pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
+ pr_err("rx_ring->irq = %d\n", rx_ring->irq);
+ pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
+ pr_err("rx_ring->qdev = %p\n", rx_ring->qdev);
+}
+
+void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
+{
+ void *ptr;
+
+ pr_err("%s: Enter\n", __func__);
+
+ ptr = kmalloc(size, GFP_ATOMIC);
+ if (ptr == NULL) {
+ pr_err("%s: Couldn't allocate a buffer\n", __func__);
+ return;
+ }
+
+ if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
+ pr_err("%s: Failed to upload control block!\n", __func__);
+ goto fail_it;
+ }
+ switch (bit) {
+ case CFG_DRQ:
+ ql_dump_wqicb((struct wqicb *)ptr);
+ break;
+ case CFG_DCQ:
+ ql_dump_cqicb((struct cqicb *)ptr);
+ break;
+ case CFG_DR:
+ ql_dump_ricb((struct ricb *)ptr);
+ break;
+ default:
+ pr_err("%s: Invalid bit value = %x\n", __func__, bit);
+ break;
+ }
+fail_it:
+ kfree(ptr);
+}
+#endif
+
+#ifdef QL_OB_DUMP
+void ql_dump_tx_desc(struct tx_buf_desc *tbd)
+{
+ pr_err("tbd->addr = 0x%llx\n",
+ le64_to_cpu((u64) tbd->addr));
+ pr_err("tbd->len = %d\n",
+ le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
+ pr_err("tbd->flags = %s %s\n",
+ tbd->len & TX_DESC_C ? "C" : ".",
+ tbd->len & TX_DESC_E ? "E" : ".");
+ tbd++;
+ pr_err("tbd->addr = 0x%llx\n",
+ le64_to_cpu((u64) tbd->addr));
+ pr_err("tbd->len = %d\n",
+ le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
+ pr_err("tbd->flags = %s %s\n",
+ tbd->len & TX_DESC_C ? "C" : ".",
+ tbd->len & TX_DESC_E ? "E" : ".");
+ tbd++;
+ pr_err("tbd->addr = 0x%llx\n",
+ le64_to_cpu((u64) tbd->addr));
+ pr_err("tbd->len = %d\n",
+ le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
+ pr_err("tbd->flags = %s %s\n",
+ tbd->len & TX_DESC_C ? "C" : ".",
+ tbd->len & TX_DESC_E ? "E" : ".");
+
+}
+
+void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
+{
+ struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
+ (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
+ struct tx_buf_desc *tbd;
+ u16 frame_len;
+
+ pr_err("%s\n", __func__);
+ pr_err("opcode = %s\n",
+ (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
+ pr_err("flags1 = %s %s %s %s %s\n",
+ ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
+ ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
+ ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
+ ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
+ ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
+ pr_err("flags2 = %s %s %s\n",
+ ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
+ ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
+ ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
+ pr_err("flags3 = %s %s %s\n",
+ ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
+ ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
+ ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
+ pr_err("tid = %x\n", ob_mac_iocb->tid);
+ pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx);
+ pr_err("vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci);
+ if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
+ pr_err("frame_len = %d\n",
+ le32_to_cpu(ob_mac_tso_iocb->frame_len));
+ pr_err("mss = %d\n",
+ le16_to_cpu(ob_mac_tso_iocb->mss));
+ pr_err("prot_hdr_len = %d\n",
+ le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
+ pr_err("hdr_offset = 0x%.04x\n",
+ le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
+ frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
+ } else {
+ pr_err("frame_len = %d\n",
+ le16_to_cpu(ob_mac_iocb->frame_len));
+ frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
+ }
+ tbd = &ob_mac_iocb->tbd[0];
+ ql_dump_tx_desc(tbd);
+}
+
+void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
+{
+ pr_err("%s\n", __func__);
+ pr_err("opcode = %d\n", ob_mac_rsp->opcode);
+ pr_err("flags = %s %s %s %s %s %s %s\n",
+ ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
+ ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
+ ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
+ ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
+ ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
+ ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
+ ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
+ pr_err("tid = %x\n", ob_mac_rsp->tid);
+}
+#endif
+
+#ifdef QL_IB_DUMP
+void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+ pr_err("%s\n", __func__);
+ pr_err("opcode = 0x%x\n", ib_mac_rsp->opcode);
+ pr_err("flags1 = %s%s%s%s%s%s\n",
+ ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
+ ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
+ ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
+ ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
+ ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
+ ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
+
+ if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
+ pr_err("%s%s%s Multicast\n",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+
+ pr_err("flags2 = %s%s%s%s%s\n",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
+
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
+ pr_err("%s%s%s%s%s error\n",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+ IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+ IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+ IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+ IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+ IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
+
+ pr_err("flags3 = %s%s\n",
+ ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
+ ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
+
+ if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
+ pr_err("RSS flags = %s%s%s%s\n",
+ ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+ IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
+ ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+ IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
+ ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+ IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
+ ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+ IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
+
+ pr_err("data_len = %d\n",
+ le32_to_cpu(ib_mac_rsp->data_len));
+ pr_err("data_addr = 0x%llx\n",
+ (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
+ if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
+ pr_err("rss = %x\n",
+ le32_to_cpu(ib_mac_rsp->rss));
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
+ pr_err("vlan_id = %x\n",
+ le16_to_cpu(ib_mac_rsp->vlan_id));
+
+ pr_err("flags4 = %s%s%s\n",
+ ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
+ ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
+ ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
+
+ if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
+ pr_err("hdr length = %d\n",
+ le32_to_cpu(ib_mac_rsp->hdr_len));
+ pr_err("hdr addr = 0x%llx\n",
+ (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
+ }
+}
+#endif
+
+#ifdef QL_ALL_DUMP
+void ql_dump_all(struct ql_adapter *qdev)
+{
+ int i;
+
+ QL_DUMP_REGS(qdev);
+ QL_DUMP_QDEV(qdev);
+ for (i = 0; i < qdev->tx_ring_count; i++) {
+ QL_DUMP_TX_RING(&qdev->tx_ring[i]);
+ QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
+ }
+ for (i = 0; i < qdev->rx_ring_count; i++) {
+ QL_DUMP_RX_RING(&qdev->rx_ring[i]);
+ QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
+ }
+}
+#endif
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
new file mode 100644
index 000000000000..9b67bfea035f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -0,0 +1,688 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+
+
+#include "qlge.h"
+
+static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Loopback test (offline)"
+};
+#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
+
+static int ql_update_ring_coalescing(struct ql_adapter *qdev)
+{
+ int i, status = 0;
+ struct rx_ring *rx_ring;
+ struct cqicb *cqicb;
+
+ if (!netif_running(qdev->ndev))
+ return status;
+
+ /* Skip the default queue, and update the outbound handler
+ * queues if they changed.
+ */
+ cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count];
+ if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
+ le16_to_cpu(cqicb->pkt_delay) !=
+ qdev->tx_max_coalesced_frames) {
+ for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
+ rx_ring = &qdev->rx_ring[i];
+ cqicb = (struct cqicb *)rx_ring;
+ cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
+ cqicb->pkt_delay =
+ cpu_to_le16(qdev->tx_max_coalesced_frames);
+ cqicb->flags = FLAGS_LI;
+ status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
+ CFG_LCQ, rx_ring->cq_id);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to load CQICB.\n");
+ goto exit;
+ }
+ }
+ }
+
+ /* Update the inbound (RSS) handler queues if they changed. */
+ cqicb = (struct cqicb *)&qdev->rx_ring[0];
+ if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
+ le16_to_cpu(cqicb->pkt_delay) !=
+ qdev->rx_max_coalesced_frames) {
+ for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
+ rx_ring = &qdev->rx_ring[i];
+ cqicb = (struct cqicb *)rx_ring;
+ cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
+ cqicb->pkt_delay =
+ cpu_to_le16(qdev->rx_max_coalesced_frames);
+ cqicb->flags = FLAGS_LI;
+ status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
+ CFG_LCQ, rx_ring->cq_id);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to load CQICB.\n");
+ goto exit;
+ }
+ }
+ }
+exit:
+ return status;
+}
+
+static void ql_update_stats(struct ql_adapter *qdev)
+{
+ u32 i;
+ u64 data;
+ u64 *iter = &qdev->nic_stats.tx_pkts;
+
+ spin_lock(&qdev->stats_lock);
+ if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Couldn't get xgmac sem.\n");
+ goto quit;
+ }
+ /*
+ * Get TX statistics.
+ */
+ for (i = 0x200; i < 0x280; i += 8) {
+ if (ql_read_xgmac_reg64(qdev, i, &data)) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
+ goto end;
+ } else
+ *iter = data;
+ iter++;
+ }
+
+ /*
+ * Get RX statistics.
+ */
+ for (i = 0x300; i < 0x3d0; i += 8) {
+ if (ql_read_xgmac_reg64(qdev, i, &data)) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
+ goto end;
+ } else
+ *iter = data;
+ iter++;
+ }
+
+ /*
+ * Get Per-priority TX pause frame counter statistics.
+ */
+ for (i = 0x500; i < 0x540; i += 8) {
+ if (ql_read_xgmac_reg64(qdev, i, &data)) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
+ goto end;
+ } else
+ *iter = data;
+ iter++;
+ }
+
+ /*
+ * Get Per-priority RX pause frame counter statistics.
+ */
+ for (i = 0x568; i < 0x5a8; i += 8) {
+ if (ql_read_xgmac_reg64(qdev, i, &data)) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
+ goto end;
+ } else
+ *iter = data;
+ iter++;
+ }
+
+ /*
+ * Get RX NIC FIFO DROP statistics.
+ */
+ if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n", i);
+ goto end;
+ } else
+ *iter = data;
+end:
+ ql_sem_unlock(qdev, qdev->xg_sem_mask);
+quit:
+ spin_unlock(&qdev->stats_lock);
+
+ QL_DUMP_STAT(qdev);
+}
+
+static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
+ {"tx_pkts"},
+ {"tx_bytes"},
+ {"tx_mcast_pkts"},
+ {"tx_bcast_pkts"},
+ {"tx_ucast_pkts"},
+ {"tx_ctl_pkts"},
+ {"tx_pause_pkts"},
+ {"tx_64_pkts"},
+ {"tx_65_to_127_pkts"},
+ {"tx_128_to_255_pkts"},
+ {"tx_256_511_pkts"},
+ {"tx_512_to_1023_pkts"},
+ {"tx_1024_to_1518_pkts"},
+ {"tx_1519_to_max_pkts"},
+ {"tx_undersize_pkts"},
+ {"tx_oversize_pkts"},
+ {"rx_bytes"},
+ {"rx_bytes_ok"},
+ {"rx_pkts"},
+ {"rx_pkts_ok"},
+ {"rx_bcast_pkts"},
+ {"rx_mcast_pkts"},
+ {"rx_ucast_pkts"},
+ {"rx_undersize_pkts"},
+ {"rx_oversize_pkts"},
+ {"rx_jabber_pkts"},
+ {"rx_undersize_fcerr_pkts"},
+ {"rx_drop_events"},
+ {"rx_fcerr_pkts"},
+ {"rx_align_err"},
+ {"rx_symbol_err"},
+ {"rx_mac_err"},
+ {"rx_ctl_pkts"},
+ {"rx_pause_pkts"},
+ {"rx_64_pkts"},
+ {"rx_65_to_127_pkts"},
+ {"rx_128_255_pkts"},
+ {"rx_256_511_pkts"},
+ {"rx_512_to_1023_pkts"},
+ {"rx_1024_to_1518_pkts"},
+ {"rx_1519_to_max_pkts"},
+ {"rx_len_err_pkts"},
+ {"tx_cbfc_pause_frames0"},
+ {"tx_cbfc_pause_frames1"},
+ {"tx_cbfc_pause_frames2"},
+ {"tx_cbfc_pause_frames3"},
+ {"tx_cbfc_pause_frames4"},
+ {"tx_cbfc_pause_frames5"},
+ {"tx_cbfc_pause_frames6"},
+ {"tx_cbfc_pause_frames7"},
+ {"rx_cbfc_pause_frames0"},
+ {"rx_cbfc_pause_frames1"},
+ {"rx_cbfc_pause_frames2"},
+ {"rx_cbfc_pause_frames3"},
+ {"rx_cbfc_pause_frames4"},
+ {"rx_cbfc_pause_frames5"},
+ {"rx_cbfc_pause_frames6"},
+ {"rx_cbfc_pause_frames7"},
+ {"rx_nic_fifo_drop"},
+};
+
+static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, ql_stats_str_arr, sizeof(ql_stats_str_arr));
+ break;
+ }
+}
+
+static int ql_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return QLGE_TEST_LEN;
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(ql_stats_str_arr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+ql_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ struct nic_stats *s = &qdev->nic_stats;
+
+ ql_update_stats(qdev);
+
+ *data++ = s->tx_pkts;
+ *data++ = s->tx_bytes;
+ *data++ = s->tx_mcast_pkts;
+ *data++ = s->tx_bcast_pkts;
+ *data++ = s->tx_ucast_pkts;
+ *data++ = s->tx_ctl_pkts;
+ *data++ = s->tx_pause_pkts;
+ *data++ = s->tx_64_pkt;
+ *data++ = s->tx_65_to_127_pkt;
+ *data++ = s->tx_128_to_255_pkt;
+ *data++ = s->tx_256_511_pkt;
+ *data++ = s->tx_512_to_1023_pkt;
+ *data++ = s->tx_1024_to_1518_pkt;
+ *data++ = s->tx_1519_to_max_pkt;
+ *data++ = s->tx_undersize_pkt;
+ *data++ = s->tx_oversize_pkt;
+ *data++ = s->rx_bytes;
+ *data++ = s->rx_bytes_ok;
+ *data++ = s->rx_pkts;
+ *data++ = s->rx_pkts_ok;
+ *data++ = s->rx_bcast_pkts;
+ *data++ = s->rx_mcast_pkts;
+ *data++ = s->rx_ucast_pkts;
+ *data++ = s->rx_undersize_pkts;
+ *data++ = s->rx_oversize_pkts;
+ *data++ = s->rx_jabber_pkts;
+ *data++ = s->rx_undersize_fcerr_pkts;
+ *data++ = s->rx_drop_events;
+ *data++ = s->rx_fcerr_pkts;
+ *data++ = s->rx_align_err;
+ *data++ = s->rx_symbol_err;
+ *data++ = s->rx_mac_err;
+ *data++ = s->rx_ctl_pkts;
+ *data++ = s->rx_pause_pkts;
+ *data++ = s->rx_64_pkts;
+ *data++ = s->rx_65_to_127_pkts;
+ *data++ = s->rx_128_255_pkts;
+ *data++ = s->rx_256_511_pkts;
+ *data++ = s->rx_512_to_1023_pkts;
+ *data++ = s->rx_1024_to_1518_pkts;
+ *data++ = s->rx_1519_to_max_pkts;
+ *data++ = s->rx_len_err_pkts;
+ *data++ = s->tx_cbfc_pause_frames0;
+ *data++ = s->tx_cbfc_pause_frames1;
+ *data++ = s->tx_cbfc_pause_frames2;
+ *data++ = s->tx_cbfc_pause_frames3;
+ *data++ = s->tx_cbfc_pause_frames4;
+ *data++ = s->tx_cbfc_pause_frames5;
+ *data++ = s->tx_cbfc_pause_frames6;
+ *data++ = s->tx_cbfc_pause_frames7;
+ *data++ = s->rx_cbfc_pause_frames0;
+ *data++ = s->rx_cbfc_pause_frames1;
+ *data++ = s->rx_cbfc_pause_frames2;
+ *data++ = s->rx_cbfc_pause_frames3;
+ *data++ = s->rx_cbfc_pause_frames4;
+ *data++ = s->rx_cbfc_pause_frames5;
+ *data++ = s->rx_cbfc_pause_frames6;
+ *data++ = s->rx_cbfc_pause_frames7;
+ *data++ = s->rx_nic_fifo_drop;
+}
+
+static int ql_get_settings(struct net_device *ndev,
+ struct ethtool_cmd *ecmd)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
+ STS_LINK_TYPE_10GBASET) {
+ ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+ ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
+ ecmd->port = PORT_TP;
+ } else {
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
+ }
+
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ ecmd->duplex = DUPLEX_FULL;
+
+ return 0;
+}
+
+static void ql_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ strncpy(drvinfo->driver, qlge_driver_name, 32);
+ strncpy(drvinfo->version, qlge_driver_version, 32);
+ snprintf(drvinfo->fw_version, 32, "v%d.%d.%d",
+ (qdev->fw_rev_id & 0x00ff0000) >> 16,
+ (qdev->fw_rev_id & 0x0000ff00) >> 8,
+ (qdev->fw_rev_id & 0x000000ff));
+ strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
+ drvinfo->n_stats = 0;
+ drvinfo->testinfo_len = 0;
+ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ drvinfo->regdump_len = sizeof(struct ql_mpi_coredump);
+ else
+ drvinfo->regdump_len = sizeof(struct ql_reg_dump);
+ drvinfo->eedump_len = 0;
+}
+
+static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ /* What we support. */
+ wol->supported = WAKE_MAGIC;
+ /* What we've currently got set. */
+ wol->wolopts = qdev->wol;
+}
+
+static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int status;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+ qdev->wol = wol->wolopts;
+
+ netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
+ if (!qdev->wol) {
+ u32 wol = 0;
+ status = ql_mb_wol_mode(qdev, wol);
+ netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n",
+ status == 0 ? "cleared successfully" : "clear failed",
+ wol);
+ }
+
+ return 0;
+}
+
+static int ql_set_phys_id(struct net_device *ndev,
+ enum ethtool_phys_id_state state)
+
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ /* Save the current LED settings */
+ if (ql_mb_get_led_cfg(qdev))
+ return -EIO;
+
+ /* Start blinking */
+ ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
+ return 0;
+
+ case ETHTOOL_ID_INACTIVE:
+ /* Restore LED settings */
+ if (ql_mb_set_led_cfg(qdev, qdev->led_config))
+ return -EIO;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ql_start_loopback(struct ql_adapter *qdev)
+{
+ if (netif_carrier_ok(qdev->ndev)) {
+ set_bit(QL_LB_LINK_UP, &qdev->flags);
+ netif_carrier_off(qdev->ndev);
+ } else
+ clear_bit(QL_LB_LINK_UP, &qdev->flags);
+ qdev->link_config |= CFG_LOOPBACK_PCS;
+ return ql_mb_set_port_cfg(qdev);
+}
+
+static void ql_stop_loopback(struct ql_adapter *qdev)
+{
+ qdev->link_config &= ~CFG_LOOPBACK_PCS;
+ ql_mb_set_port_cfg(qdev);
+ if (test_bit(QL_LB_LINK_UP, &qdev->flags)) {
+ netif_carrier_on(qdev->ndev);
+ clear_bit(QL_LB_LINK_UP, &qdev->flags);
+ }
+}
+
+static void ql_create_lb_frame(struct sk_buff *skb,
+ unsigned int frame_size)
+{
+ memset(skb->data, 0xFF, frame_size);
+ frame_size &= ~1;
+ memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+ memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+ memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+void ql_check_lb_frame(struct ql_adapter *qdev,
+ struct sk_buff *skb)
+{
+ unsigned int frame_size = skb->len;
+
+ if ((*(skb->data + 3) == 0xFF) &&
+ (*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+ (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+ atomic_dec(&qdev->lb_count);
+ return;
+ }
+}
+
+static int ql_run_loopback_test(struct ql_adapter *qdev)
+{
+ int i;
+ netdev_tx_t rc;
+ struct sk_buff *skb;
+ unsigned int size = SMALL_BUF_MAP_SIZE;
+
+ for (i = 0; i < 64; i++) {
+ skb = netdev_alloc_skb(qdev->ndev, size);
+ if (!skb)
+ return -ENOMEM;
+
+ skb->queue_mapping = 0;
+ skb_put(skb, size);
+ ql_create_lb_frame(skb, size);
+ rc = ql_lb_send(skb, qdev->ndev);
+ if (rc != NETDEV_TX_OK)
+ return -EPIPE;
+ atomic_inc(&qdev->lb_count);
+ }
+ /* Give queue time to settle before testing results. */
+ msleep(2);
+ ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
+ return atomic_read(&qdev->lb_count) ? -EIO : 0;
+}
+
+static int ql_loopback_test(struct ql_adapter *qdev, u64 *data)
+{
+ *data = ql_start_loopback(qdev);
+ if (*data)
+ goto out;
+ *data = ql_run_loopback_test(qdev);
+out:
+ ql_stop_loopback(qdev);
+ return *data;
+}
+
+static void ql_self_test(struct net_device *ndev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ set_bit(QL_SELFTEST, &qdev->flags);
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+ if (ql_loopback_test(qdev, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ } else {
+ /* Online tests */
+ data[0] = 0;
+ }
+ clear_bit(QL_SELFTEST, &qdev->flags);
+ /* Give link time to come up after
+ * port configuration changes.
+ */
+ msleep_interruptible(4 * 1000);
+ } else {
+ netif_err(qdev, drv, qdev->ndev,
+ "is down, Loopback test will fail.\n");
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ }
+}
+
+static int ql_get_regs_len(struct net_device *ndev)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ return sizeof(struct ql_mpi_coredump);
+ else
+ return sizeof(struct ql_reg_dump);
+}
+
+static void ql_get_regs(struct net_device *ndev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ ql_get_dump(qdev, p);
+ qdev->core_is_dumped = 0;
+ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ regs->len = sizeof(struct ql_mpi_coredump);
+ else
+ regs->len = sizeof(struct ql_reg_dump);
+}
+
+static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+ struct ql_adapter *qdev = netdev_priv(dev);
+
+ c->rx_coalesce_usecs = qdev->rx_coalesce_usecs;
+ c->tx_coalesce_usecs = qdev->tx_coalesce_usecs;
+
+ /* This chip coalesces as follows:
+ * If a packet arrives, hold off interrupts until
+ * cqicb->int_delay expires, but if no other packets arrive don't
+ * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a
+ * timer to coalesce on a frame basis. So, we have to take ethtool's
+ * max_coalesced_frames value and convert it to a delay in microseconds.
+ * We do this by using a basic thoughput of 1,000,000 frames per
+ * second @ (1024 bytes). This means one frame per usec. So it's a
+ * simple one to one ratio.
+ */
+ c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames;
+ c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames;
+
+ return 0;
+}
+
+static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ /* Validate user parameters. */
+ if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2)
+ return -EINVAL;
+ /* Don't wait more than 10 usec. */
+ if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
+ return -EINVAL;
+ if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2)
+ return -EINVAL;
+ if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
+ return -EINVAL;
+
+ /* Verify a change took place before updating the hardware. */
+ if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs &&
+ qdev->tx_coalesce_usecs == c->tx_coalesce_usecs &&
+ qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames &&
+ qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames)
+ return 0;
+
+ qdev->rx_coalesce_usecs = c->rx_coalesce_usecs;
+ qdev->tx_coalesce_usecs = c->tx_coalesce_usecs;
+ qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames;
+ qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames;
+
+ return ql_update_ring_coalescing(qdev);
+}
+
+static void ql_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ql_adapter *qdev = netdev_priv(netdev);
+
+ ql_mb_get_port_cfg(qdev);
+ if (qdev->link_config & CFG_PAUSE_STD) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ }
+}
+
+static int ql_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ql_adapter *qdev = netdev_priv(netdev);
+ int status = 0;
+
+ if ((pause->rx_pause) && (pause->tx_pause))
+ qdev->link_config |= CFG_PAUSE_STD;
+ else if (!pause->rx_pause && !pause->tx_pause)
+ qdev->link_config &= ~CFG_PAUSE_STD;
+ else
+ return -EINVAL;
+
+ status = ql_mb_set_port_cfg(qdev);
+ return status;
+}
+
+static u32 ql_get_msglevel(struct net_device *ndev)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ return qdev->msg_enable;
+}
+
+static void ql_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ qdev->msg_enable = value;
+}
+
+const struct ethtool_ops qlge_ethtool_ops = {
+ .get_settings = ql_get_settings,
+ .get_drvinfo = ql_get_drvinfo,
+ .get_wol = ql_get_wol,
+ .set_wol = ql_set_wol,
+ .get_regs_len = ql_get_regs_len,
+ .get_regs = ql_get_regs,
+ .get_msglevel = ql_get_msglevel,
+ .set_msglevel = ql_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .set_phys_id = ql_set_phys_id,
+ .self_test = ql_self_test,
+ .get_pauseparam = ql_get_pauseparam,
+ .set_pauseparam = ql_set_pauseparam,
+ .get_coalesce = ql_get_coalesce,
+ .set_coalesce = ql_set_coalesce,
+ .get_sset_count = ql_get_sset_count,
+ .get_strings = ql_get_strings,
+ .get_ethtool_stats = ql_get_ethtool_stats,
+};
+
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
new file mode 100644
index 000000000000..f2d9bb78ec7f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -0,0 +1,4981 @@
+/*
+ * QLogic qlge NIC HBA Driver
+ * Copyright (c) 2003-2008 QLogic Corporation
+ * See LICENSE.qlge for copyright and licensing details.
+ * Author: Linux qlge network device driver by
+ * Ron Mercer <ron.mercer@qlogic.com>
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/prefetch.h>
+#include <net/ip6_checksum.h>
+
+#include "qlge.h"
+
+char qlge_driver_name[] = DRV_NAME;
+const char qlge_driver_version[] = DRV_VERSION;
+
+MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
+MODULE_DESCRIPTION(DRV_STRING " ");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static const u32 default_msg =
+ NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
+/* NETIF_MSG_TIMER | */
+ NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP |
+ NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR |
+/* NETIF_MSG_TX_QUEUED | */
+/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
+/* NETIF_MSG_PKTDATA | */
+ NETIF_MSG_HW | NETIF_MSG_WOL | 0;
+
+static int debug = -1; /* defaults above */
+module_param(debug, int, 0664);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+#define MSIX_IRQ 0
+#define MSI_IRQ 1
+#define LEG_IRQ 2
+static int qlge_irq_type = MSIX_IRQ;
+module_param(qlge_irq_type, int, 0664);
+MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
+
+static int qlge_mpi_coredump;
+module_param(qlge_mpi_coredump, int, 0);
+MODULE_PARM_DESC(qlge_mpi_coredump,
+ "Option to enable MPI firmware dump. "
+ "Default is OFF - Do Not allocate memory. ");
+
+static int qlge_force_coredump;
+module_param(qlge_force_coredump, int, 0);
+MODULE_PARM_DESC(qlge_force_coredump,
+ "Option to allow force of firmware core dump. "
+ "Default is OFF - Do not allow.");
+
+static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
+ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
+ /* required last entry */
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
+
+static int ql_wol(struct ql_adapter *qdev);
+static void qlge_set_multicast_list(struct net_device *ndev);
+
+/* This hardware semaphore causes exclusive access to
+ * resources shared between the NIC driver, MPI firmware,
+ * FCOE firmware and the FC driver.
+ */
+static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
+{
+ u32 sem_bits = 0;
+
+ switch (sem_mask) {
+ case SEM_XGMAC0_MASK:
+ sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
+ break;
+ case SEM_XGMAC1_MASK:
+ sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
+ break;
+ case SEM_ICB_MASK:
+ sem_bits = SEM_SET << SEM_ICB_SHIFT;
+ break;
+ case SEM_MAC_ADDR_MASK:
+ sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
+ break;
+ case SEM_FLASH_MASK:
+ sem_bits = SEM_SET << SEM_FLASH_SHIFT;
+ break;
+ case SEM_PROBE_MASK:
+ sem_bits = SEM_SET << SEM_PROBE_SHIFT;
+ break;
+ case SEM_RT_IDX_MASK:
+ sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
+ break;
+ case SEM_PROC_REG_MASK:
+ sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
+ break;
+ default:
+ netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
+ return -EINVAL;
+ }
+
+ ql_write32(qdev, SEM, sem_bits | sem_mask);
+ return !(ql_read32(qdev, SEM) & sem_bits);
+}
+
+int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
+{
+ unsigned int wait_count = 30;
+ do {
+ if (!ql_sem_trylock(qdev, sem_mask))
+ return 0;
+ udelay(100);
+ } while (--wait_count);
+ return -ETIMEDOUT;
+}
+
+void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
+{
+ ql_write32(qdev, SEM, sem_mask);
+ ql_read32(qdev, SEM); /* flush */
+}
+
+/* This function waits for a specific bit to come ready
+ * in a given register. It is used mostly by the initialize
+ * process, but is also used in kernel thread API such as
+ * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
+ */
+int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
+{
+ u32 temp;
+ int count = UDELAY_COUNT;
+
+ while (count) {
+ temp = ql_read32(qdev, reg);
+
+ /* check for errors */
+ if (temp & err_bit) {
+ netif_alert(qdev, probe, qdev->ndev,
+ "register 0x%.08x access error, value = 0x%.08x!.\n",
+ reg, temp);
+ return -EIO;
+ } else if (temp & bit)
+ return 0;
+ udelay(UDELAY_DELAY);
+ count--;
+ }
+ netif_alert(qdev, probe, qdev->ndev,
+ "Timed out waiting for reg %x to come ready.\n", reg);
+ return -ETIMEDOUT;
+}
+
+/* The CFG register is used to download TX and RX control blocks
+ * to the chip. This function waits for an operation to complete.
+ */
+static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
+{
+ int count = UDELAY_COUNT;
+ u32 temp;
+
+ while (count) {
+ temp = ql_read32(qdev, CFG);
+ if (temp & CFG_LE)
+ return -EIO;
+ if (!(temp & bit))
+ return 0;
+ udelay(UDELAY_DELAY);
+ count--;
+ }
+ return -ETIMEDOUT;
+}
+
+
+/* Used to issue init control blocks to hw. Maps control block,
+ * sets address, triggers download, waits for completion.
+ */
+int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
+ u16 q_id)
+{
+ u64 map;
+ int status = 0;
+ int direction;
+ u32 mask;
+ u32 value;
+
+ direction =
+ (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
+ PCI_DMA_FROMDEVICE;
+
+ map = pci_map_single(qdev->pdev, ptr, size, direction);
+ if (pci_dma_mapping_error(qdev->pdev, map)) {
+ netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
+ return -ENOMEM;
+ }
+
+ status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
+ if (status)
+ return status;
+
+ status = ql_wait_cfg(qdev, bit);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Timed out waiting for CFG to come ready.\n");
+ goto exit;
+ }
+
+ ql_write32(qdev, ICB_L, (u32) map);
+ ql_write32(qdev, ICB_H, (u32) (map >> 32));
+
+ mask = CFG_Q_MASK | (bit << 16);
+ value = bit | (q_id << CFG_Q_SHIFT);
+ ql_write32(qdev, CFG, (mask | value));
+
+ /*
+ * Wait for the bit to clear after signaling hw.
+ */
+ status = ql_wait_cfg(qdev, bit);
+exit:
+ ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
+ pci_unmap_single(qdev->pdev, map, size, direction);
+ return status;
+}
+
+/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
+int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
+ u32 *value)
+{
+ u32 offset = 0;
+ int status;
+
+ switch (type) {
+ case MAC_ADDR_TYPE_MULTI_MAC:
+ case MAC_ADDR_TYPE_CAM_MAC:
+ {
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+ (index << MAC_ADDR_IDX_SHIFT) | /* index */
+ MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+ if (status)
+ goto exit;
+ *value++ = ql_read32(qdev, MAC_ADDR_DATA);
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+ (index << MAC_ADDR_IDX_SHIFT) | /* index */
+ MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+ if (status)
+ goto exit;
+ *value++ = ql_read32(qdev, MAC_ADDR_DATA);
+ if (type == MAC_ADDR_TYPE_CAM_MAC) {
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+ (index << MAC_ADDR_IDX_SHIFT) | /* index */
+ MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
+ status =
+ ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
+ MAC_ADDR_MR, 0);
+ if (status)
+ goto exit;
+ *value++ = ql_read32(qdev, MAC_ADDR_DATA);
+ }
+ break;
+ }
+ case MAC_ADDR_TYPE_VLAN:
+ case MAC_ADDR_TYPE_MULTI_FLTR:
+ default:
+ netif_crit(qdev, ifup, qdev->ndev,
+ "Address type %d not yet supported.\n", type);
+ status = -EPERM;
+ }
+exit:
+ return status;
+}
+
+/* Set up a MAC, multicast or VLAN address for the
+ * inbound frame matching.
+ */
+static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
+ u16 index)
+{
+ u32 offset = 0;
+ int status = 0;
+
+ switch (type) {
+ case MAC_ADDR_TYPE_MULTI_MAC:
+ {
+ u32 upper = (addr[0] << 8) | addr[1];
+ u32 lower = (addr[2] << 24) | (addr[3] << 16) |
+ (addr[4] << 8) | (addr[5]);
+
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
+ (index << MAC_ADDR_IDX_SHIFT) |
+ type | MAC_ADDR_E);
+ ql_write32(qdev, MAC_ADDR_DATA, lower);
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
+ (index << MAC_ADDR_IDX_SHIFT) |
+ type | MAC_ADDR_E);
+
+ ql_write32(qdev, MAC_ADDR_DATA, upper);
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ break;
+ }
+ case MAC_ADDR_TYPE_CAM_MAC:
+ {
+ u32 cam_output;
+ u32 upper = (addr[0] << 8) | addr[1];
+ u32 lower =
+ (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
+ (addr[5]);
+
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Adding %s address %pM at index %d in the CAM.\n",
+ type == MAC_ADDR_TYPE_MULTI_MAC ?
+ "MULTICAST" : "UNICAST",
+ addr, index);
+
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+ (index << MAC_ADDR_IDX_SHIFT) | /* index */
+ type); /* type */
+ ql_write32(qdev, MAC_ADDR_DATA, lower);
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+ (index << MAC_ADDR_IDX_SHIFT) | /* index */
+ type); /* type */
+ ql_write32(qdev, MAC_ADDR_DATA, upper);
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
+ (index << MAC_ADDR_IDX_SHIFT) | /* index */
+ type); /* type */
+ /* This field should also include the queue id
+ and possibly the function id. Right now we hardcode
+ the route field to NIC core.
+ */
+ cam_output = (CAM_OUT_ROUTE_NIC |
+ (qdev->
+ func << CAM_OUT_FUNC_SHIFT) |
+ (0 << CAM_OUT_CQ_ID_SHIFT));
+ if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
+ cam_output |= CAM_OUT_RV;
+ /* route to NIC core */
+ ql_write32(qdev, MAC_ADDR_DATA, cam_output);
+ break;
+ }
+ case MAC_ADDR_TYPE_VLAN:
+ {
+ u32 enable_bit = *((u32 *) &addr[0]);
+ /* For VLAN, the addr actually holds a bit that
+ * either enables or disables the vlan id we are
+ * addressing. It's either MAC_ADDR_E on or off.
+ * That's bit-27 we're talking about.
+ */
+ netif_info(qdev, ifup, qdev->ndev,
+ "%s VLAN ID %d %s the CAM.\n",
+ enable_bit ? "Adding" : "Removing",
+ index,
+ enable_bit ? "to" : "from");
+
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
+ (index << MAC_ADDR_IDX_SHIFT) | /* index */
+ type | /* type */
+ enable_bit); /* enable/disable */
+ break;
+ }
+ case MAC_ADDR_TYPE_MULTI_FLTR:
+ default:
+ netif_crit(qdev, ifup, qdev->ndev,
+ "Address type %d not yet supported.\n", type);
+ status = -EPERM;
+ }
+exit:
+ return status;
+}
+
+/* Set or clear MAC address in hardware. We sometimes
+ * have to clear it to prevent wrong frame routing
+ * especially in a bonding environment.
+ */
+static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
+{
+ int status;
+ char zero_mac_addr[ETH_ALEN];
+ char *addr;
+
+ if (set) {
+ addr = &qdev->current_mac_addr[0];
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Set Mac addr %pM\n", addr);
+ } else {
+ memset(zero_mac_addr, 0, ETH_ALEN);
+ addr = &zero_mac_addr[0];
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Clearing MAC address\n");
+ }
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ return status;
+ status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
+ MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init mac address.\n");
+ return status;
+}
+
+void ql_link_on(struct ql_adapter *qdev)
+{
+ netif_err(qdev, link, qdev->ndev, "Link is up.\n");
+ netif_carrier_on(qdev->ndev);
+ ql_set_mac_addr(qdev, 1);
+}
+
+void ql_link_off(struct ql_adapter *qdev)
+{
+ netif_err(qdev, link, qdev->ndev, "Link is down.\n");
+ netif_carrier_off(qdev->ndev);
+ ql_set_mac_addr(qdev, 0);
+}
+
+/* Get a specific frame routing value from the CAM.
+ * Used for debug and reg dump.
+ */
+int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
+{
+ int status = 0;
+
+ status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
+ if (status)
+ goto exit;
+
+ ql_write32(qdev, RT_IDX,
+ RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
+ status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
+ if (status)
+ goto exit;
+ *value = ql_read32(qdev, RT_DATA);
+exit:
+ return status;
+}
+
+/* The NIC function for this chip has 16 routing indexes. Each one can be used
+ * to route different frame types to various inbound queues. We send broadcast/
+ * multicast/error frames to the default queue for slow handling,
+ * and CAM hit/RSS frames to the fast handling queues.
+ */
+static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
+ int enable)
+{
+ int status = -EINVAL; /* Return error if no mask match. */
+ u32 value = 0;
+
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s %s mask %s the routing reg.\n",
+ enable ? "Adding" : "Removing",
+ index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
+ index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
+ index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
+ index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
+ index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
+ index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
+ index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
+ index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
+ index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
+ index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
+ index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
+ index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
+ index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
+ index == RT_IDX_UNUSED013 ? "UNUSED13" :
+ index == RT_IDX_UNUSED014 ? "UNUSED14" :
+ index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
+ "(Bad index != RT_IDX)",
+ enable ? "to" : "from");
+
+ switch (mask) {
+ case RT_IDX_CAM_HIT:
+ {
+ value = RT_IDX_DST_CAM_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
+ break;
+ }
+ case RT_IDX_VALID: /* Promiscuous Mode frames. */
+ {
+ value = RT_IDX_DST_DFLT_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
+ break;
+ }
+ case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
+ {
+ value = RT_IDX_DST_DFLT_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
+ break;
+ }
+ case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
+ {
+ value = RT_IDX_DST_DFLT_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_IP_CSUM_ERR_SLOT <<
+ RT_IDX_IDX_SHIFT); /* index */
+ break;
+ }
+ case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
+ {
+ value = RT_IDX_DST_DFLT_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
+ RT_IDX_IDX_SHIFT); /* index */
+ break;
+ }
+ case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
+ {
+ value = RT_IDX_DST_DFLT_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
+ break;
+ }
+ case RT_IDX_MCAST: /* Pass up All Multicast frames. */
+ {
+ value = RT_IDX_DST_DFLT_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
+ break;
+ }
+ case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
+ {
+ value = RT_IDX_DST_DFLT_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
+ break;
+ }
+ case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
+ {
+ value = RT_IDX_DST_RSS | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
+ break;
+ }
+ case 0: /* Clear the E-bit on an entry. */
+ {
+ value = RT_IDX_DST_DFLT_Q | /* dest */
+ RT_IDX_TYPE_NICQ | /* type */
+ (index << RT_IDX_IDX_SHIFT);/* index */
+ break;
+ }
+ default:
+ netif_err(qdev, ifup, qdev->ndev,
+ "Mask type %d not yet supported.\n", mask);
+ status = -EPERM;
+ goto exit;
+ }
+
+ if (value) {
+ status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
+ if (status)
+ goto exit;
+ value |= (enable ? RT_IDX_E : 0);
+ ql_write32(qdev, RT_IDX, value);
+ ql_write32(qdev, RT_DATA, enable ? mask : 0);
+ }
+exit:
+ return status;
+}
+
+static void ql_enable_interrupts(struct ql_adapter *qdev)
+{
+ ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
+}
+
+static void ql_disable_interrupts(struct ql_adapter *qdev)
+{
+ ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
+}
+
+/* If we're running with multiple MSI-X vectors then we enable on the fly.
+ * Otherwise, we may have multiple outstanding workers and don't want to
+ * enable until the last one finishes. In this case, the irq_cnt gets
+ * incremented every time we queue a worker and decremented every time
+ * a worker finishes. Once it hits zero we enable the interrupt.
+ */
+u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+{
+ u32 var = 0;
+ unsigned long hw_flags = 0;
+ struct intr_context *ctx = qdev->intr_context + intr;
+
+ if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
+ /* Always enable if we're MSIX multi interrupts and
+ * it's not the default (zeroeth) interrupt.
+ */
+ ql_write32(qdev, INTR_EN,
+ ctx->intr_en_mask);
+ var = ql_read32(qdev, STS);
+ return var;
+ }
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if (atomic_dec_and_test(&ctx->irq_cnt)) {
+ ql_write32(qdev, INTR_EN,
+ ctx->intr_en_mask);
+ var = ql_read32(qdev, STS);
+ }
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return var;
+}
+
+static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+{
+ u32 var = 0;
+ struct intr_context *ctx;
+
+ /* HW disables for us if we're MSIX multi interrupts and
+ * it's not the default (zeroeth) interrupt.
+ */
+ if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
+ return 0;
+
+ ctx = qdev->intr_context + intr;
+ spin_lock(&qdev->hw_lock);
+ if (!atomic_read(&ctx->irq_cnt)) {
+ ql_write32(qdev, INTR_EN,
+ ctx->intr_dis_mask);
+ var = ql_read32(qdev, STS);
+ }
+ atomic_inc(&ctx->irq_cnt);
+ spin_unlock(&qdev->hw_lock);
+ return var;
+}
+
+static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
+{
+ int i;
+ for (i = 0; i < qdev->intr_count; i++) {
+ /* The enable call does a atomic_dec_and_test
+ * and enables only if the result is zero.
+ * So we precharge it here.
+ */
+ if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
+ i == 0))
+ atomic_set(&qdev->intr_context[i].irq_cnt, 1);
+ ql_enable_completion_interrupt(qdev, i);
+ }
+
+}
+
+static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
+{
+ int status, i;
+ u16 csum = 0;
+ __le16 *flash = (__le16 *)&qdev->flash;
+
+ status = strncmp((char *)&qdev->flash, str, 4);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
+ return status;
+ }
+
+ for (i = 0; i < size; i++)
+ csum += le16_to_cpu(*flash++);
+
+ if (csum)
+ netif_err(qdev, ifup, qdev->ndev,
+ "Invalid flash checksum, csum = 0x%.04x.\n", csum);
+
+ return csum;
+}
+
+static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
+{
+ int status = 0;
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev,
+ FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
+ if (status)
+ goto exit;
+ /* set up for reg read */
+ ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev,
+ FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
+ if (status)
+ goto exit;
+ /* This data is stored on flash as an array of
+ * __le32. Since ql_read32() returns cpu endian
+ * we need to swap it back.
+ */
+ *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
+exit:
+ return status;
+}
+
+static int ql_get_8000_flash_params(struct ql_adapter *qdev)
+{
+ u32 i, size;
+ int status;
+ __le32 *p = (__le32 *)&qdev->flash;
+ u32 offset;
+ u8 mac_addr[6];
+
+ /* Get flash offset for function and adjust
+ * for dword access.
+ */
+ if (!qdev->port)
+ offset = FUNC0_FLASH_OFFSET / sizeof(u32);
+ else
+ offset = FUNC1_FLASH_OFFSET / sizeof(u32);
+
+ if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
+ return -ETIMEDOUT;
+
+ size = sizeof(struct flash_params_8000) / sizeof(u32);
+ for (i = 0; i < size; i++, p++) {
+ status = ql_read_flash_word(qdev, i+offset, p);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Error reading flash.\n");
+ goto exit;
+ }
+ }
+
+ status = ql_validate_flash(qdev,
+ sizeof(struct flash_params_8000) / sizeof(u16),
+ "8000");
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* Extract either manufacturer or BOFM modified
+ * MAC address.
+ */
+ if (qdev->flash.flash_params_8000.data_type1 == 2)
+ memcpy(mac_addr,
+ qdev->flash.flash_params_8000.mac_addr1,
+ qdev->ndev->addr_len);
+ else
+ memcpy(mac_addr,
+ qdev->flash.flash_params_8000.mac_addr,
+ qdev->ndev->addr_len);
+
+ if (!is_valid_ether_addr(mac_addr)) {
+ netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
+ status = -EINVAL;
+ goto exit;
+ }
+
+ memcpy(qdev->ndev->dev_addr,
+ mac_addr,
+ qdev->ndev->addr_len);
+
+exit:
+ ql_sem_unlock(qdev, SEM_FLASH_MASK);
+ return status;
+}
+
+static int ql_get_8012_flash_params(struct ql_adapter *qdev)
+{
+ int i;
+ int status;
+ __le32 *p = (__le32 *)&qdev->flash;
+ u32 offset = 0;
+ u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
+
+ /* Second function's parameters follow the first
+ * function's.
+ */
+ if (qdev->port)
+ offset = size;
+
+ if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
+ return -ETIMEDOUT;
+
+ for (i = 0; i < size; i++, p++) {
+ status = ql_read_flash_word(qdev, i+offset, p);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Error reading flash.\n");
+ goto exit;
+ }
+
+ }
+
+ status = ql_validate_flash(qdev,
+ sizeof(struct flash_params_8012) / sizeof(u16),
+ "8012");
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
+ status = -EINVAL;
+ goto exit;
+ }
+
+ if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
+ status = -EINVAL;
+ goto exit;
+ }
+
+ memcpy(qdev->ndev->dev_addr,
+ qdev->flash.flash_params_8012.mac_addr,
+ qdev->ndev->addr_len);
+
+exit:
+ ql_sem_unlock(qdev, SEM_FLASH_MASK);
+ return status;
+}
+
+/* xgmac register are located behind the xgmac_addr and xgmac_data
+ * register pair. Each read/write requires us to wait for the ready
+ * bit before reading/writing the data.
+ */
+static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
+{
+ int status;
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev,
+ XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ if (status)
+ return status;
+ /* write the data to the data reg */
+ ql_write32(qdev, XGMAC_DATA, data);
+ /* trigger the write */
+ ql_write32(qdev, XGMAC_ADDR, reg);
+ return status;
+}
+
+/* xgmac register are located behind the xgmac_addr and xgmac_data
+ * register pair. Each read/write requires us to wait for the ready
+ * bit before reading/writing the data.
+ */
+int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
+{
+ int status = 0;
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev,
+ XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ if (status)
+ goto exit;
+ /* set up for reg read */
+ ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev,
+ XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ if (status)
+ goto exit;
+ /* get the data */
+ *data = ql_read32(qdev, XGMAC_DATA);
+exit:
+ return status;
+}
+
+/* This is used for reading the 64-bit statistics regs. */
+int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
+{
+ int status = 0;
+ u32 hi = 0;
+ u32 lo = 0;
+
+ status = ql_read_xgmac_reg(qdev, reg, &lo);
+ if (status)
+ goto exit;
+
+ status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
+ if (status)
+ goto exit;
+
+ *data = (u64) lo | ((u64) hi << 32);
+
+exit:
+ return status;
+}
+
+static int ql_8000_port_initialize(struct ql_adapter *qdev)
+{
+ int status;
+ /*
+ * Get MPI firmware version for driver banner
+ * and ethool info.
+ */
+ status = ql_mb_about_fw(qdev);
+ if (status)
+ goto exit;
+ status = ql_mb_get_fw_state(qdev);
+ if (status)
+ goto exit;
+ /* Wake up a worker to get/set the TX/RX frame sizes. */
+ queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
+exit:
+ return status;
+}
+
+/* Take the MAC Core out of reset.
+ * Enable statistics counting.
+ * Take the transmitter/receiver out of reset.
+ * This functionality may be done in the MPI firmware at a
+ * later date.
+ */
+static int ql_8012_port_initialize(struct ql_adapter *qdev)
+{
+ int status = 0;
+ u32 data;
+
+ if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
+ /* Another function has the semaphore, so
+ * wait for the port init bit to come ready.
+ */
+ netif_info(qdev, link, qdev->ndev,
+ "Another function has the semaphore, so wait for the port init bit to come ready.\n");
+ status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
+ if (status) {
+ netif_crit(qdev, link, qdev->ndev,
+ "Port initialize timed out.\n");
+ }
+ return status;
+ }
+
+ netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
+ /* Set the core reset. */
+ status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
+ if (status)
+ goto end;
+ data |= GLOBAL_CFG_RESET;
+ status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
+ if (status)
+ goto end;
+
+ /* Clear the core reset and turn on jumbo for receiver. */
+ data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
+ data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
+ data |= GLOBAL_CFG_TX_STAT_EN;
+ data |= GLOBAL_CFG_RX_STAT_EN;
+ status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
+ if (status)
+ goto end;
+
+ /* Enable transmitter, and clear it's reset. */
+ status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
+ if (status)
+ goto end;
+ data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
+ data |= TX_CFG_EN; /* Enable the transmitter. */
+ status = ql_write_xgmac_reg(qdev, TX_CFG, data);
+ if (status)
+ goto end;
+
+ /* Enable receiver and clear it's reset. */
+ status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
+ if (status)
+ goto end;
+ data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
+ data |= RX_CFG_EN; /* Enable the receiver. */
+ status = ql_write_xgmac_reg(qdev, RX_CFG, data);
+ if (status)
+ goto end;
+
+ /* Turn on jumbo. */
+ status =
+ ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
+ if (status)
+ goto end;
+ status =
+ ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
+ if (status)
+ goto end;
+
+ /* Signal to the world that the port is enabled. */
+ ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
+end:
+ ql_sem_unlock(qdev, qdev->xg_sem_mask);
+ return status;
+}
+
+static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
+{
+ return PAGE_SIZE << qdev->lbq_buf_order;
+}
+
+/* Get the next large buffer. */
+static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
+{
+ struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
+ rx_ring->lbq_curr_idx++;
+ if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
+ rx_ring->lbq_curr_idx = 0;
+ rx_ring->lbq_free_cnt++;
+ return lbq_desc;
+}
+
+static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring)
+{
+ struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
+
+ pci_dma_sync_single_for_cpu(qdev->pdev,
+ dma_unmap_addr(lbq_desc, mapaddr),
+ rx_ring->lbq_buf_size,
+ PCI_DMA_FROMDEVICE);
+
+ /* If it's the last chunk of our master page then
+ * we unmap it.
+ */
+ if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
+ == ql_lbq_block_size(qdev))
+ pci_unmap_page(qdev->pdev,
+ lbq_desc->p.pg_chunk.map,
+ ql_lbq_block_size(qdev),
+ PCI_DMA_FROMDEVICE);
+ return lbq_desc;
+}
+
+/* Get the next small buffer. */
+static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
+{
+ struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
+ rx_ring->sbq_curr_idx++;
+ if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
+ rx_ring->sbq_curr_idx = 0;
+ rx_ring->sbq_free_cnt++;
+ return sbq_desc;
+}
+
+/* Update an rx ring index. */
+static void ql_update_cq(struct rx_ring *rx_ring)
+{
+ rx_ring->cnsmr_idx++;
+ rx_ring->curr_entry++;
+ if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
+ rx_ring->cnsmr_idx = 0;
+ rx_ring->curr_entry = rx_ring->cq_base;
+ }
+}
+
+static void ql_write_cq_idx(struct rx_ring *rx_ring)
+{
+ ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
+}
+
+static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
+ struct bq_desc *lbq_desc)
+{
+ if (!rx_ring->pg_chunk.page) {
+ u64 map;
+ rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
+ GFP_ATOMIC,
+ qdev->lbq_buf_order);
+ if (unlikely(!rx_ring->pg_chunk.page)) {
+ netif_err(qdev, drv, qdev->ndev,
+ "page allocation failed.\n");
+ return -ENOMEM;
+ }
+ rx_ring->pg_chunk.offset = 0;
+ map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
+ 0, ql_lbq_block_size(qdev),
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(qdev->pdev, map)) {
+ __free_pages(rx_ring->pg_chunk.page,
+ qdev->lbq_buf_order);
+ netif_err(qdev, drv, qdev->ndev,
+ "PCI mapping failed.\n");
+ return -ENOMEM;
+ }
+ rx_ring->pg_chunk.map = map;
+ rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
+ }
+
+ /* Copy the current master pg_chunk info
+ * to the current descriptor.
+ */
+ lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
+
+ /* Adjust the master page chunk for next
+ * buffer get.
+ */
+ rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
+ if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
+ rx_ring->pg_chunk.page = NULL;
+ lbq_desc->p.pg_chunk.last_flag = 1;
+ } else {
+ rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
+ get_page(rx_ring->pg_chunk.page);
+ lbq_desc->p.pg_chunk.last_flag = 0;
+ }
+ return 0;
+}
+/* Process (refill) a large buffer queue. */
+static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+ u32 clean_idx = rx_ring->lbq_clean_idx;
+ u32 start_idx = clean_idx;
+ struct bq_desc *lbq_desc;
+ u64 map;
+ int i;
+
+ while (rx_ring->lbq_free_cnt > 32) {
+ for (i = 0; i < 16; i++) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "lbq: try cleaning clean_idx = %d.\n",
+ clean_idx);
+ lbq_desc = &rx_ring->lbq[clean_idx];
+ if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Could not get a page chunk.\n");
+ return;
+ }
+
+ map = lbq_desc->p.pg_chunk.map +
+ lbq_desc->p.pg_chunk.offset;
+ dma_unmap_addr_set(lbq_desc, mapaddr, map);
+ dma_unmap_len_set(lbq_desc, maplen,
+ rx_ring->lbq_buf_size);
+ *lbq_desc->addr = cpu_to_le64(map);
+
+ pci_dma_sync_single_for_device(qdev->pdev, map,
+ rx_ring->lbq_buf_size,
+ PCI_DMA_FROMDEVICE);
+ clean_idx++;
+ if (clean_idx == rx_ring->lbq_len)
+ clean_idx = 0;
+ }
+
+ rx_ring->lbq_clean_idx = clean_idx;
+ rx_ring->lbq_prod_idx += 16;
+ if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
+ rx_ring->lbq_prod_idx = 0;
+ rx_ring->lbq_free_cnt -= 16;
+ }
+
+ if (start_idx != clean_idx) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "lbq: updating prod idx = %d.\n",
+ rx_ring->lbq_prod_idx);
+ ql_write_db_reg(rx_ring->lbq_prod_idx,
+ rx_ring->lbq_prod_idx_db_reg);
+ }
+}
+
+/* Process (refill) a small buffer queue. */
+static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+ u32 clean_idx = rx_ring->sbq_clean_idx;
+ u32 start_idx = clean_idx;
+ struct bq_desc *sbq_desc;
+ u64 map;
+ int i;
+
+ while (rx_ring->sbq_free_cnt > 16) {
+ for (i = 0; i < 16; i++) {
+ sbq_desc = &rx_ring->sbq[clean_idx];
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "sbq: try cleaning clean_idx = %d.\n",
+ clean_idx);
+ if (sbq_desc->p.skb == NULL) {
+ netif_printk(qdev, rx_status, KERN_DEBUG,
+ qdev->ndev,
+ "sbq: getting new skb for index %d.\n",
+ sbq_desc->index);
+ sbq_desc->p.skb =
+ netdev_alloc_skb(qdev->ndev,
+ SMALL_BUFFER_SIZE);
+ if (sbq_desc->p.skb == NULL) {
+ netif_err(qdev, probe, qdev->ndev,
+ "Couldn't get an skb.\n");
+ rx_ring->sbq_clean_idx = clean_idx;
+ return;
+ }
+ skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
+ map = pci_map_single(qdev->pdev,
+ sbq_desc->p.skb->data,
+ rx_ring->sbq_buf_size,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(qdev->pdev, map)) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "PCI mapping failed.\n");
+ rx_ring->sbq_clean_idx = clean_idx;
+ dev_kfree_skb_any(sbq_desc->p.skb);
+ sbq_desc->p.skb = NULL;
+ return;
+ }
+ dma_unmap_addr_set(sbq_desc, mapaddr, map);
+ dma_unmap_len_set(sbq_desc, maplen,
+ rx_ring->sbq_buf_size);
+ *sbq_desc->addr = cpu_to_le64(map);
+ }
+
+ clean_idx++;
+ if (clean_idx == rx_ring->sbq_len)
+ clean_idx = 0;
+ }
+ rx_ring->sbq_clean_idx = clean_idx;
+ rx_ring->sbq_prod_idx += 16;
+ if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
+ rx_ring->sbq_prod_idx = 0;
+ rx_ring->sbq_free_cnt -= 16;
+ }
+
+ if (start_idx != clean_idx) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "sbq: updating prod idx = %d.\n",
+ rx_ring->sbq_prod_idx);
+ ql_write_db_reg(rx_ring->sbq_prod_idx,
+ rx_ring->sbq_prod_idx_db_reg);
+ }
+}
+
+static void ql_update_buffer_queues(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring)
+{
+ ql_update_sbq(qdev, rx_ring);
+ ql_update_lbq(qdev, rx_ring);
+}
+
+/* Unmaps tx buffers. Can be called from send() if a pci mapping
+ * fails at some stage, or from the interrupt when a tx completes.
+ */
+static void ql_unmap_send(struct ql_adapter *qdev,
+ struct tx_ring_desc *tx_ring_desc, int mapped)
+{
+ int i;
+ for (i = 0; i < mapped; i++) {
+ if (i == 0 || (i == 7 && mapped > 7)) {
+ /*
+ * Unmap the skb->data area, or the
+ * external sglist (AKA the Outbound
+ * Address List (OAL)).
+ * If its the zeroeth element, then it's
+ * the skb->data area. If it's the 7th
+ * element and there is more than 6 frags,
+ * then its an OAL.
+ */
+ if (i == 7) {
+ netif_printk(qdev, tx_done, KERN_DEBUG,
+ qdev->ndev,
+ "unmapping OAL area.\n");
+ }
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(&tx_ring_desc->map[i],
+ mapaddr),
+ dma_unmap_len(&tx_ring_desc->map[i],
+ maplen),
+ PCI_DMA_TODEVICE);
+ } else {
+ netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
+ "unmapping frag %d.\n", i);
+ pci_unmap_page(qdev->pdev,
+ dma_unmap_addr(&tx_ring_desc->map[i],
+ mapaddr),
+ dma_unmap_len(&tx_ring_desc->map[i],
+ maplen), PCI_DMA_TODEVICE);
+ }
+ }
+
+}
+
+/* Map the buffers for this transmit. This will return
+ * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
+ */
+static int ql_map_send(struct ql_adapter *qdev,
+ struct ob_mac_iocb_req *mac_iocb_ptr,
+ struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
+{
+ int len = skb_headlen(skb);
+ dma_addr_t map;
+ int frag_idx, err, map_idx = 0;
+ struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
+ int frag_cnt = skb_shinfo(skb)->nr_frags;
+
+ if (frag_cnt) {
+ netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
+ "frag_cnt = %d.\n", frag_cnt);
+ }
+ /*
+ * Map the skb buffer first.
+ */
+ map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
+
+ err = pci_dma_mapping_error(qdev->pdev, map);
+ if (err) {
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "PCI mapping failed with error: %d\n", err);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ tbd->len = cpu_to_le32(len);
+ tbd->addr = cpu_to_le64(map);
+ dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
+ dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
+ map_idx++;
+
+ /*
+ * This loop fills the remainder of the 8 address descriptors
+ * in the IOCB. If there are more than 7 fragments, then the
+ * eighth address desc will point to an external list (OAL).
+ * When this happens, the remainder of the frags will be stored
+ * in this list.
+ */
+ for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
+ tbd++;
+ if (frag_idx == 6 && frag_cnt > 7) {
+ /* Let's tack on an sglist.
+ * Our control block will now
+ * look like this:
+ * iocb->seg[0] = skb->data
+ * iocb->seg[1] = frag[0]
+ * iocb->seg[2] = frag[1]
+ * iocb->seg[3] = frag[2]
+ * iocb->seg[4] = frag[3]
+ * iocb->seg[5] = frag[4]
+ * iocb->seg[6] = frag[5]
+ * iocb->seg[7] = ptr to OAL (external sglist)
+ * oal->seg[0] = frag[6]
+ * oal->seg[1] = frag[7]
+ * oal->seg[2] = frag[8]
+ * oal->seg[3] = frag[9]
+ * oal->seg[4] = frag[10]
+ * etc...
+ */
+ /* Tack on the OAL in the eighth segment of IOCB. */
+ map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
+ sizeof(struct oal),
+ PCI_DMA_TODEVICE);
+ err = pci_dma_mapping_error(qdev->pdev, map);
+ if (err) {
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "PCI mapping outbound address list with error: %d\n",
+ err);
+ goto map_error;
+ }
+
+ tbd->addr = cpu_to_le64(map);
+ /*
+ * The length is the number of fragments
+ * that remain to be mapped times the length
+ * of our sglist (OAL).
+ */
+ tbd->len =
+ cpu_to_le32((sizeof(struct tx_buf_desc) *
+ (frag_cnt - frag_idx)) | TX_DESC_C);
+ dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
+ map);
+ dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
+ sizeof(struct oal));
+ tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
+ map_idx++;
+ }
+
+ map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, frag->size,
+ DMA_TO_DEVICE);
+
+ err = dma_mapping_error(&qdev->pdev->dev, map);
+ if (err) {
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "PCI mapping frags failed with error: %d.\n",
+ err);
+ goto map_error;
+ }
+
+ tbd->addr = cpu_to_le64(map);
+ tbd->len = cpu_to_le32(frag->size);
+ dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
+ dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
+ frag->size);
+
+ }
+ /* Save the number of segments we've mapped. */
+ tx_ring_desc->map_cnt = map_idx;
+ /* Terminate the last segment. */
+ tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
+ return NETDEV_TX_OK;
+
+map_error:
+ /*
+ * If the first frag mapping failed, then i will be zero.
+ * This causes the unmap of the skb->data area. Otherwise
+ * we pass in the number of frags that mapped successfully
+ * so they can be umapped.
+ */
+ ql_unmap_send(qdev, tx_ring_desc, map_idx);
+ return NETDEV_TX_BUSY;
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length,
+ u16 vlan_id)
+{
+ struct sk_buff *skb;
+ struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct napi_struct *napi = &rx_ring->napi;
+
+ napi->dev = qdev->ndev;
+
+ skb = napi_get_frags(napi);
+ if (!skb) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Couldn't get an skb, exiting.\n");
+ rx_ring->rx_dropped++;
+ put_page(lbq_desc->p.pg_chunk.page);
+ return;
+ }
+ prefetch(lbq_desc->p.pg_chunk.va);
+ __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset,
+ length);
+
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+ skb_shinfo(skb)->nr_frags++;
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += length;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (vlan_id != 0xffff)
+ __vlan_hwaccel_put_tag(skb, vlan_id);
+ napi_gro_frags(napi);
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_page(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length,
+ u16 vlan_id)
+{
+ struct net_device *ndev = qdev->ndev;
+ struct sk_buff *skb = NULL;
+ void *addr;
+ struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct napi_struct *napi = &rx_ring->napi;
+
+ skb = netdev_alloc_skb(ndev, length);
+ if (!skb) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Couldn't get an skb, need to unwind!.\n");
+ rx_ring->rx_dropped++;
+ put_page(lbq_desc->p.pg_chunk.page);
+ return;
+ }
+
+ addr = lbq_desc->p.pg_chunk.va;
+ prefetch(addr);
+
+
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ netif_info(qdev, drv, qdev->ndev,
+ "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
+ rx_ring->rx_errors++;
+ goto err_out;
+ }
+
+ /* The max framesize filter on this chip is set higher than
+ * MTU since FCoE uses 2k frames.
+ */
+ if (skb->len > ndev->mtu + ETH_HLEN) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Segment too small, dropping.\n");
+ rx_ring->rx_dropped++;
+ goto err_out;
+ }
+ memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
+ length);
+ skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset+ETH_HLEN,
+ length-ETH_HLEN);
+ skb->len += length-ETH_HLEN;
+ skb->data_len += length-ETH_HLEN;
+ skb->truesize += length-ETH_HLEN;
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb_checksum_none_assert(skb);
+
+ if ((ndev->features & NETIF_F_RXCSUM) &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ /* TCP frame. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+ (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+ /* Unfragmented ipv4 UDP frame. */
+ struct iphdr *iph = (struct iphdr *) skb->data;
+ if (!(iph->frag_off &
+ cpu_to_be16(IP_MF|IP_OFFSET))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_printk(qdev, rx_status, KERN_DEBUG,
+ qdev->ndev,
+ "TCP checksum done!\n");
+ }
+ }
+ }
+
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (vlan_id != 0xffff)
+ __vlan_hwaccel_put_tag(skb, vlan_id);
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ napi_gro_receive(napi, skb);
+ else
+ netif_receive_skb(skb);
+ return;
+err_out:
+ dev_kfree_skb_any(skb);
+ put_page(lbq_desc->p.pg_chunk.page);
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length,
+ u16 vlan_id)
+{
+ struct net_device *ndev = qdev->ndev;
+ struct sk_buff *skb = NULL;
+ struct sk_buff *new_skb = NULL;
+ struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
+
+ skb = sbq_desc->p.skb;
+ /* Allocate new_skb and copy */
+ new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
+ if (new_skb == NULL) {
+ netif_err(qdev, probe, qdev->ndev,
+ "No skb available, drop the packet.\n");
+ rx_ring->rx_dropped++;
+ return;
+ }
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ memcpy(skb_put(new_skb, length), skb->data, length);
+ skb = new_skb;
+
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ netif_info(qdev, drv, qdev->ndev,
+ "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
+ dev_kfree_skb_any(skb);
+ rx_ring->rx_errors++;
+ return;
+ }
+
+ /* loopback self test for ethtool */
+ if (test_bit(QL_SELFTEST, &qdev->flags)) {
+ ql_check_lb_frame(qdev, skb);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ /* The max framesize filter on this chip is set higher than
+ * MTU since FCoE uses 2k frames.
+ */
+ if (skb->len > ndev->mtu + ETH_HLEN) {
+ dev_kfree_skb_any(skb);
+ rx_ring->rx_dropped++;
+ return;
+ }
+
+ prefetch(skb->data);
+ skb->dev = ndev;
+ if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%s Multicast.\n",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_REG ? "Registered" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+ }
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Promiscuous Packet.\n");
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb_checksum_none_assert(skb);
+
+ /* If rx checksum is on, and there are no
+ * csum or frame errors.
+ */
+ if ((ndev->features & NETIF_F_RXCSUM) &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ /* TCP frame. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+ (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+ /* Unfragmented ipv4 UDP frame. */
+ struct iphdr *iph = (struct iphdr *) skb->data;
+ if (!(iph->frag_off &
+ ntohs(IP_MF|IP_OFFSET))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_printk(qdev, rx_status, KERN_DEBUG,
+ qdev->ndev,
+ "TCP checksum done!\n");
+ }
+ }
+ }
+
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (vlan_id != 0xffff)
+ __vlan_hwaccel_put_tag(skb, vlan_id);
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ napi_gro_receive(&rx_ring->napi, skb);
+ else
+ netif_receive_skb(skb);
+}
+
+static void ql_realign_skb(struct sk_buff *skb, int len)
+{
+ void *temp_addr = skb->data;
+
+ /* Undo the skb_reserve(skb,32) we did before
+ * giving to hardware, and realign data on
+ * a 2-byte boundary.
+ */
+ skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
+ skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
+ skb_copy_to_linear_data(skb, temp_addr,
+ (unsigned int)len);
+}
+
+/*
+ * This function builds an skb for the given inbound
+ * completion. It will be rewritten for readability in the near
+ * future, but for not it works well.
+ */
+static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+ struct bq_desc *lbq_desc;
+ struct bq_desc *sbq_desc;
+ struct sk_buff *skb = NULL;
+ u32 length = le32_to_cpu(ib_mac_rsp->data_len);
+ u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+
+ /*
+ * Handle the header buffer if present.
+ */
+ if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
+ ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Header of %d bytes in small buffer.\n", hdr_len);
+ /*
+ * Headers fit nicely into a small buffer.
+ */
+ sbq_desc = ql_get_curr_sbuf(rx_ring);
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
+ PCI_DMA_FROMDEVICE);
+ skb = sbq_desc->p.skb;
+ ql_realign_skb(skb, hdr_len);
+ skb_put(skb, hdr_len);
+ sbq_desc->p.skb = NULL;
+ }
+
+ /*
+ * Handle the data buffer(s).
+ */
+ if (unlikely(!length)) { /* Is there data too? */
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "No Data buffer in this packet.\n");
+ return skb;
+ }
+
+ if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
+ if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Headers in small, data of %d bytes in small, combine them.\n",
+ length);
+ /*
+ * Data is less than small buffer size so it's
+ * stuffed in a small buffer.
+ * For this case we append the data
+ * from the "data" small buffer to the "header" small
+ * buffer.
+ */
+ sbq_desc = ql_get_curr_sbuf(rx_ring);
+ pci_dma_sync_single_for_cpu(qdev->pdev,
+ dma_unmap_addr
+ (sbq_desc, mapaddr),
+ dma_unmap_len
+ (sbq_desc, maplen),
+ PCI_DMA_FROMDEVICE);
+ memcpy(skb_put(skb, length),
+ sbq_desc->p.skb->data, length);
+ pci_dma_sync_single_for_device(qdev->pdev,
+ dma_unmap_addr
+ (sbq_desc,
+ mapaddr),
+ dma_unmap_len
+ (sbq_desc,
+ maplen),
+ PCI_DMA_FROMDEVICE);
+ } else {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes in a single small buffer.\n",
+ length);
+ sbq_desc = ql_get_curr_sbuf(rx_ring);
+ skb = sbq_desc->p.skb;
+ ql_realign_skb(skb, length);
+ skb_put(skb, length);
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(sbq_desc,
+ mapaddr),
+ dma_unmap_len(sbq_desc,
+ maplen),
+ PCI_DMA_FROMDEVICE);
+ sbq_desc->p.skb = NULL;
+ }
+ } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
+ if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Header in small, %d bytes in large. Chain large to small!\n",
+ length);
+ /*
+ * The data is in a single large buffer. We
+ * chain it to the header buffer's skb and let
+ * it rip.
+ */
+ lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Chaining page at offset = %d, for %d bytes to skb.\n",
+ lbq_desc->p.pg_chunk.offset, length);
+ skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset,
+ length);
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+ } else {
+ /*
+ * The headers and data are in a single large buffer. We
+ * copy it to a new skb and let it go. This can happen with
+ * jumbo mtu on a non-TCP/UDP frame.
+ */
+ lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ skb = netdev_alloc_skb(qdev->ndev, length);
+ if (skb == NULL) {
+ netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
+ "No skb available, drop the packet.\n");
+ return NULL;
+ }
+ pci_unmap_page(qdev->pdev,
+ dma_unmap_addr(lbq_desc,
+ mapaddr),
+ dma_unmap_len(lbq_desc, maplen),
+ PCI_DMA_FROMDEVICE);
+ skb_reserve(skb, NET_IP_ALIGN);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
+ length);
+ skb_fill_page_desc(skb, 0,
+ lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset,
+ length);
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+ length -= length;
+ __pskb_pull_tail(skb,
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
+ VLAN_ETH_HLEN : ETH_HLEN);
+ }
+ } else {
+ /*
+ * The data is in a chain of large buffers
+ * pointed to by a small buffer. We loop
+ * thru and chain them to the our small header
+ * buffer's skb.
+ * frags: There are 18 max frags and our small
+ * buffer will hold 32 of them. The thing is,
+ * we'll use 3 max for our 9000 byte jumbo
+ * frames. If the MTU goes up we could
+ * eventually be in trouble.
+ */
+ int size, i = 0;
+ sbq_desc = ql_get_curr_sbuf(rx_ring);
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
+ PCI_DMA_FROMDEVICE);
+ if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
+ /*
+ * This is an non TCP/UDP IP frame, so
+ * the headers aren't split into a small
+ * buffer. We have to use the small buffer
+ * that contains our sg list as our skb to
+ * send upstairs. Copy the sg list here to
+ * a local buffer and use it to find the
+ * pages to chain.
+ */
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes of headers & data in chain of large.\n",
+ length);
+ skb = sbq_desc->p.skb;
+ sbq_desc->p.skb = NULL;
+ skb_reserve(skb, NET_IP_ALIGN);
+ }
+ while (length > 0) {
+ lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ size = (length < rx_ring->lbq_buf_size) ? length :
+ rx_ring->lbq_buf_size;
+
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Adding page %d to skb for %d bytes.\n",
+ i, size);
+ skb_fill_page_desc(skb, i,
+ lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset,
+ size);
+ skb->len += size;
+ skb->data_len += size;
+ skb->truesize += size;
+ length -= size;
+ i++;
+ }
+ __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
+ VLAN_ETH_HLEN : ETH_HLEN);
+ }
+ return skb;
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u16 vlan_id)
+{
+ struct net_device *ndev = qdev->ndev;
+ struct sk_buff *skb = NULL;
+
+ QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
+
+ skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
+ if (unlikely(!skb)) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "No skb available, drop packet.\n");
+ rx_ring->rx_dropped++;
+ return;
+ }
+
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ netif_info(qdev, drv, qdev->ndev,
+ "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
+ dev_kfree_skb_any(skb);
+ rx_ring->rx_errors++;
+ return;
+ }
+
+ /* The max framesize filter on this chip is set higher than
+ * MTU since FCoE uses 2k frames.
+ */
+ if (skb->len > ndev->mtu + ETH_HLEN) {
+ dev_kfree_skb_any(skb);
+ rx_ring->rx_dropped++;
+ return;
+ }
+
+ /* loopback self test for ethtool */
+ if (test_bit(QL_SELFTEST, &qdev->flags)) {
+ ql_check_lb_frame(qdev, skb);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ prefetch(skb->data);
+ skb->dev = ndev;
+ if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_REG ? "Registered" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+ rx_ring->rx_multicast++;
+ }
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Promiscuous Packet.\n");
+ }
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb_checksum_none_assert(skb);
+
+ /* If rx checksum is on, and there are no
+ * csum or frame errors.
+ */
+ if ((ndev->features & NETIF_F_RXCSUM) &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ /* TCP frame. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+ (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+ /* Unfragmented ipv4 UDP frame. */
+ struct iphdr *iph = (struct iphdr *) skb->data;
+ if (!(iph->frag_off &
+ ntohs(IP_MF|IP_OFFSET))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
+ }
+ }
+ }
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += skb->len;
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
+ __vlan_hwaccel_put_tag(skb, vlan_id);
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ napi_gro_receive(&rx_ring->napi, skb);
+ else
+ netif_receive_skb(skb);
+}
+
+/* Process an inbound completion from an rx ring. */
+static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+ u32 length = le32_to_cpu(ib_mac_rsp->data_len);
+ u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
+ ((le16_to_cpu(ib_mac_rsp->vlan_id) &
+ IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
+
+ QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
+
+ if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
+ /* The data and headers are split into
+ * separate buffers.
+ */
+ ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
+ vlan_id);
+ } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
+ /* The data fit in a single small buffer.
+ * Allocate a new skb, copy the data and
+ * return the buffer to the free pool.
+ */
+ ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
+ length, vlan_id);
+ } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
+ /* TCP packet in a page chunk that's been checksummed.
+ * Tack it on to our GRO skb and let it go.
+ */
+ ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
+ length, vlan_id);
+ } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
+ /* Non-TCP packet in a page chunk. Allocate an
+ * skb, tack it on frags, and send it up.
+ */
+ ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
+ length, vlan_id);
+ } else {
+ /* Non-TCP/UDP large frames that span multiple buffers
+ * can be processed corrrectly by the split frame logic.
+ */
+ ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
+ vlan_id);
+ }
+
+ return (unsigned long)length;
+}
+
+/* Process an outbound completion from an rx ring. */
+static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
+ struct ob_mac_iocb_rsp *mac_rsp)
+{
+ struct tx_ring *tx_ring;
+ struct tx_ring_desc *tx_ring_desc;
+
+ QL_DUMP_OB_MAC_RSP(mac_rsp);
+ tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
+ tx_ring_desc = &tx_ring->q[mac_rsp->tid];
+ ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
+ tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
+ tx_ring->tx_packets++;
+ dev_kfree_skb(tx_ring_desc->skb);
+ tx_ring_desc->skb = NULL;
+
+ if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
+ OB_MAC_IOCB_RSP_S |
+ OB_MAC_IOCB_RSP_L |
+ OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
+ if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "Total descriptor length did not match transfer length.\n");
+ }
+ if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "Frame too short to be valid, not sent.\n");
+ }
+ if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "Frame too long, but sent anyway.\n");
+ }
+ if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "PCI backplane error. Frame not sent.\n");
+ }
+ }
+ atomic_inc(&tx_ring->tx_count);
+}
+
+/* Fire up a handler to reset the MPI processor. */
+void ql_queue_fw_error(struct ql_adapter *qdev)
+{
+ ql_link_off(qdev);
+ queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
+}
+
+void ql_queue_asic_error(struct ql_adapter *qdev)
+{
+ ql_link_off(qdev);
+ ql_disable_interrupts(qdev);
+ /* Clear adapter up bit to signal the recovery
+ * process that it shouldn't kill the reset worker
+ * thread
+ */
+ clear_bit(QL_ADAPTER_UP, &qdev->flags);
+ /* Set asic recovery bit to indicate reset process that we are
+ * in fatal error recovery process rather than normal close
+ */
+ set_bit(QL_ASIC_RECOVERY, &qdev->flags);
+ queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
+}
+
+static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
+ struct ib_ae_iocb_rsp *ib_ae_rsp)
+{
+ switch (ib_ae_rsp->event) {
+ case MGMT_ERR_EVENT:
+ netif_err(qdev, rx_err, qdev->ndev,
+ "Management Processor Fatal Error.\n");
+ ql_queue_fw_error(qdev);
+ return;
+
+ case CAM_LOOKUP_ERR_EVENT:
+ netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
+ netdev_err(qdev->ndev, "This event shouldn't occur.\n");
+ ql_queue_asic_error(qdev);
+ return;
+
+ case SOFT_ECC_ERROR_EVENT:
+ netdev_err(qdev->ndev, "Soft ECC error detected.\n");
+ ql_queue_asic_error(qdev);
+ break;
+
+ case PCI_ERR_ANON_BUF_RD:
+ netdev_err(qdev->ndev, "PCI error occurred when reading "
+ "anonymous buffers from rx_ring %d.\n",
+ ib_ae_rsp->q_id);
+ ql_queue_asic_error(qdev);
+ break;
+
+ default:
+ netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
+ ib_ae_rsp->event);
+ ql_queue_asic_error(qdev);
+ break;
+ }
+}
+
+static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
+{
+ struct ql_adapter *qdev = rx_ring->qdev;
+ u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+ struct ob_mac_iocb_rsp *net_rsp = NULL;
+ int count = 0;
+
+ struct tx_ring *tx_ring;
+ /* While there are entries in the completion queue. */
+ while (prod != rx_ring->cnsmr_idx) {
+
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "cq_id = %d, prod = %d, cnsmr = %d.\n.",
+ rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
+
+ net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
+ rmb();
+ switch (net_rsp->opcode) {
+
+ case OPCODE_OB_MAC_TSO_IOCB:
+ case OPCODE_OB_MAC_IOCB:
+ ql_process_mac_tx_intr(qdev, net_rsp);
+ break;
+ default:
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Hit default case, not handled! dropping the packet, opcode = %x.\n",
+ net_rsp->opcode);
+ }
+ count++;
+ ql_update_cq(rx_ring);
+ prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+ }
+ if (!net_rsp)
+ return 0;
+ ql_write_cq_idx(rx_ring);
+ tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
+ if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
+ if (atomic_read(&tx_ring->queue_stopped) &&
+ (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
+ /*
+ * The queue got stopped because the tx_ring was full.
+ * Wake it up, because it's now at least 25% empty.
+ */
+ netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
+ }
+
+ return count;
+}
+
+static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
+{
+ struct ql_adapter *qdev = rx_ring->qdev;
+ u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+ struct ql_net_rsp_iocb *net_rsp;
+ int count = 0;
+
+ /* While there are entries in the completion queue. */
+ while (prod != rx_ring->cnsmr_idx) {
+
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "cq_id = %d, prod = %d, cnsmr = %d.\n.",
+ rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
+
+ net_rsp = rx_ring->curr_entry;
+ rmb();
+ switch (net_rsp->opcode) {
+ case OPCODE_IB_MAC_IOCB:
+ ql_process_mac_rx_intr(qdev, rx_ring,
+ (struct ib_mac_iocb_rsp *)
+ net_rsp);
+ break;
+
+ case OPCODE_IB_AE_IOCB:
+ ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
+ net_rsp);
+ break;
+ default:
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Hit default case, not handled! dropping the packet, opcode = %x.\n",
+ net_rsp->opcode);
+ break;
+ }
+ count++;
+ ql_update_cq(rx_ring);
+ prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
+ if (count == budget)
+ break;
+ }
+ ql_update_buffer_queues(qdev, rx_ring);
+ ql_write_cq_idx(rx_ring);
+ return count;
+}
+
+static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
+{
+ struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
+ struct ql_adapter *qdev = rx_ring->qdev;
+ struct rx_ring *trx_ring;
+ int i, work_done = 0;
+ struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
+
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
+
+ /* Service the TX rings first. They start
+ * right after the RSS rings. */
+ for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
+ trx_ring = &qdev->rx_ring[i];
+ /* If this TX completion ring belongs to this vector and
+ * it's not empty then service it.
+ */
+ if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
+ (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
+ trx_ring->cnsmr_idx)) {
+ netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
+ "%s: Servicing TX completion ring %d.\n",
+ __func__, trx_ring->cq_id);
+ ql_clean_outbound_rx_ring(trx_ring);
+ }
+ }
+
+ /*
+ * Now service the RSS ring if it's active.
+ */
+ if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
+ rx_ring->cnsmr_idx) {
+ netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
+ "%s: Servicing RX completion ring %d.\n",
+ __func__, rx_ring->cq_id);
+ work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
+ }
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ ql_enable_completion_interrupt(qdev, rx_ring->irq);
+ }
+ return work_done;
+}
+
+static void qlge_vlan_mode(struct net_device *ndev, u32 features)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ if (features & NETIF_F_HW_VLAN_RX) {
+ netif_printk(qdev, ifup, KERN_DEBUG, ndev,
+ "Turning on VLAN in NIC_RCV_CFG.\n");
+ ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
+ NIC_RCV_CFG_VLAN_MATCH_AND_NON);
+ } else {
+ netif_printk(qdev, ifup, KERN_DEBUG, ndev,
+ "Turning off VLAN in NIC_RCV_CFG.\n");
+ ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
+ }
+}
+
+static u32 qlge_fix_features(struct net_device *ndev, u32 features)
+{
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_RX)
+ features |= NETIF_F_HW_VLAN_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_TX;
+
+ return features;
+}
+
+static int qlge_set_features(struct net_device *ndev, u32 features)
+{
+ u32 changed = ndev->features ^ features;
+
+ if (changed & NETIF_F_HW_VLAN_RX)
+ qlge_vlan_mode(ndev, features);
+
+ return 0;
+}
+
+static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
+{
+ u32 enable_bit = MAC_ADDR_E;
+
+ if (ql_set_mac_addr_reg
+ (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init vlan address.\n");
+ }
+}
+
+static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int status;
+
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ return;
+
+ __qlge_vlan_rx_add_vid(qdev, vid);
+ set_bit(vid, qdev->active_vlans);
+
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+}
+
+static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
+{
+ u32 enable_bit = 0;
+
+ if (ql_set_mac_addr_reg
+ (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to clear vlan address.\n");
+ }
+}
+
+static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int status;
+
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ return;
+
+ __qlge_vlan_rx_kill_vid(qdev, vid);
+ clear_bit(vid, qdev->active_vlans);
+
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+}
+
+static void qlge_restore_vlan(struct ql_adapter *qdev)
+{
+ int status;
+ u16 vid;
+
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ return;
+
+ for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
+ __qlge_vlan_rx_add_vid(qdev, vid);
+
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+}
+
+/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
+static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
+{
+ struct rx_ring *rx_ring = dev_id;
+ napi_schedule(&rx_ring->napi);
+ return IRQ_HANDLED;
+}
+
+/* This handles a fatal error, MPI activity, and the default
+ * rx_ring in an MSI-X multiple vector environment.
+ * In MSI/Legacy environment it also process the rest of
+ * the rx_rings.
+ */
+static irqreturn_t qlge_isr(int irq, void *dev_id)
+{
+ struct rx_ring *rx_ring = dev_id;
+ struct ql_adapter *qdev = rx_ring->qdev;
+ struct intr_context *intr_context = &qdev->intr_context[0];
+ u32 var;
+ int work_done = 0;
+
+ spin_lock(&qdev->hw_lock);
+ if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
+ netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
+ "Shared Interrupt, Not ours!\n");
+ spin_unlock(&qdev->hw_lock);
+ return IRQ_NONE;
+ }
+ spin_unlock(&qdev->hw_lock);
+
+ var = ql_disable_completion_interrupt(qdev, intr_context->intr);
+
+ /*
+ * Check for fatal error.
+ */
+ if (var & STS_FE) {
+ ql_queue_asic_error(qdev);
+ netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
+ var = ql_read32(qdev, ERR_STS);
+ netdev_err(qdev->ndev, "Resetting chip. "
+ "Error Status Register = 0x%x\n", var);
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * Check MPI processor activity.
+ */
+ if ((var & STS_PI) &&
+ (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
+ /*
+ * We've got an async event or mailbox completion.
+ * Handle it and clear the source of the interrupt.
+ */
+ netif_err(qdev, intr, qdev->ndev,
+ "Got MPI processor interrupt.\n");
+ ql_disable_completion_interrupt(qdev, intr_context->intr);
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+ queue_delayed_work_on(smp_processor_id(),
+ qdev->workqueue, &qdev->mpi_work, 0);
+ work_done++;
+ }
+
+ /*
+ * Get the bit-mask that shows the active queues for this
+ * pass. Compare it to the queues that this irq services
+ * and call napi if there's a match.
+ */
+ var = ql_read32(qdev, ISR1);
+ if (var & intr_context->irq_mask) {
+ netif_info(qdev, intr, qdev->ndev,
+ "Waking handler for rx_ring[0].\n");
+ ql_disable_completion_interrupt(qdev, intr_context->intr);
+ napi_schedule(&rx_ring->napi);
+ work_done++;
+ }
+ ql_enable_completion_interrupt(qdev, intr_context->intr);
+ return work_done ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
+{
+
+ if (skb_is_gso(skb)) {
+ int err;
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+
+ mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
+ mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
+ mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
+ mac_iocb_ptr->total_hdrs_len =
+ cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
+ mac_iocb_ptr->net_trans_offset =
+ cpu_to_le16(skb_network_offset(skb) |
+ skb_transport_offset(skb)
+ << OB_MAC_TRANSPORT_HDR_SHIFT);
+ mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
+ if (likely(skb->protocol == htons(ETH_P_IP))) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->check = 0;
+ mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+ return 1;
+ }
+ return 0;
+}
+
+static void ql_hw_csum_setup(struct sk_buff *skb,
+ struct ob_mac_tso_iocb_req *mac_iocb_ptr)
+{
+ int len;
+ struct iphdr *iph = ip_hdr(skb);
+ __sum16 *check;
+ mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
+ mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
+ mac_iocb_ptr->net_trans_offset =
+ cpu_to_le16(skb_network_offset(skb) |
+ skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
+
+ mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
+ len = (ntohs(iph->tot_len) - (iph->ihl << 2));
+ if (likely(iph->protocol == IPPROTO_TCP)) {
+ check = &(tcp_hdr(skb)->check);
+ mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
+ mac_iocb_ptr->total_hdrs_len =
+ cpu_to_le16(skb_transport_offset(skb) +
+ (tcp_hdr(skb)->doff << 2));
+ } else {
+ check = &(udp_hdr(skb)->check);
+ mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
+ mac_iocb_ptr->total_hdrs_len =
+ cpu_to_le16(skb_transport_offset(skb) +
+ sizeof(struct udphdr));
+ }
+ *check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, len, iph->protocol, 0);
+}
+
+static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct tx_ring_desc *tx_ring_desc;
+ struct ob_mac_iocb_req *mac_iocb_ptr;
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int tso;
+ struct tx_ring *tx_ring;
+ u32 tx_ring_idx = (u32) skb->queue_mapping;
+
+ tx_ring = &qdev->tx_ring[tx_ring_idx];
+
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
+ if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
+ netif_info(qdev, tx_queued, qdev->ndev,
+ "%s: shutting down tx queue %d du to lack of resources.\n",
+ __func__, tx_ring_idx);
+ netif_stop_subqueue(ndev, tx_ring->wq_id);
+ atomic_inc(&tx_ring->queue_stopped);
+ tx_ring->tx_errors++;
+ return NETDEV_TX_BUSY;
+ }
+ tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
+ mac_iocb_ptr = tx_ring_desc->queue_entry;
+ memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
+
+ mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
+ mac_iocb_ptr->tid = tx_ring_desc->index;
+ /* We use the upper 32-bits to store the tx queue for this IO.
+ * When we get the completion we can use it to establish the context.
+ */
+ mac_iocb_ptr->txq_idx = tx_ring_idx;
+ tx_ring_desc->skb = skb;
+
+ mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
+
+ if (vlan_tx_tag_present(skb)) {
+ netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
+ "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
+ mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
+ mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
+ }
+ tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
+ if (tso < 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
+ ql_hw_csum_setup(skb,
+ (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
+ }
+ if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
+ NETDEV_TX_OK) {
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "Could not map the segments.\n");
+ tx_ring->tx_errors++;
+ return NETDEV_TX_BUSY;
+ }
+ QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
+ tx_ring->prod_idx++;
+ if (tx_ring->prod_idx == tx_ring->wq_len)
+ tx_ring->prod_idx = 0;
+ wmb();
+
+ ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
+ netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
+ "tx queued, slot %d, len %d\n",
+ tx_ring->prod_idx, skb->len);
+
+ atomic_dec(&tx_ring->tx_count);
+ return NETDEV_TX_OK;
+}
+
+
+static void ql_free_shadow_space(struct ql_adapter *qdev)
+{
+ if (qdev->rx_ring_shadow_reg_area) {
+ pci_free_consistent(qdev->pdev,
+ PAGE_SIZE,
+ qdev->rx_ring_shadow_reg_area,
+ qdev->rx_ring_shadow_reg_dma);
+ qdev->rx_ring_shadow_reg_area = NULL;
+ }
+ if (qdev->tx_ring_shadow_reg_area) {
+ pci_free_consistent(qdev->pdev,
+ PAGE_SIZE,
+ qdev->tx_ring_shadow_reg_area,
+ qdev->tx_ring_shadow_reg_dma);
+ qdev->tx_ring_shadow_reg_area = NULL;
+ }
+}
+
+static int ql_alloc_shadow_space(struct ql_adapter *qdev)
+{
+ qdev->rx_ring_shadow_reg_area =
+ pci_alloc_consistent(qdev->pdev,
+ PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
+ if (qdev->rx_ring_shadow_reg_area == NULL) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Allocation of RX shadow space failed.\n");
+ return -ENOMEM;
+ }
+ memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
+ qdev->tx_ring_shadow_reg_area =
+ pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
+ &qdev->tx_ring_shadow_reg_dma);
+ if (qdev->tx_ring_shadow_reg_area == NULL) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Allocation of TX shadow space failed.\n");
+ goto err_wqp_sh_area;
+ }
+ memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
+ return 0;
+
+err_wqp_sh_area:
+ pci_free_consistent(qdev->pdev,
+ PAGE_SIZE,
+ qdev->rx_ring_shadow_reg_area,
+ qdev->rx_ring_shadow_reg_dma);
+ return -ENOMEM;
+}
+
+static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
+{
+ struct tx_ring_desc *tx_ring_desc;
+ int i;
+ struct ob_mac_iocb_req *mac_iocb_ptr;
+
+ mac_iocb_ptr = tx_ring->wq_base;
+ tx_ring_desc = tx_ring->q;
+ for (i = 0; i < tx_ring->wq_len; i++) {
+ tx_ring_desc->index = i;
+ tx_ring_desc->skb = NULL;
+ tx_ring_desc->queue_entry = mac_iocb_ptr;
+ mac_iocb_ptr++;
+ tx_ring_desc++;
+ }
+ atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
+ atomic_set(&tx_ring->queue_stopped, 0);
+}
+
+static void ql_free_tx_resources(struct ql_adapter *qdev,
+ struct tx_ring *tx_ring)
+{
+ if (tx_ring->wq_base) {
+ pci_free_consistent(qdev->pdev, tx_ring->wq_size,
+ tx_ring->wq_base, tx_ring->wq_base_dma);
+ tx_ring->wq_base = NULL;
+ }
+ kfree(tx_ring->q);
+ tx_ring->q = NULL;
+}
+
+static int ql_alloc_tx_resources(struct ql_adapter *qdev,
+ struct tx_ring *tx_ring)
+{
+ tx_ring->wq_base =
+ pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
+ &tx_ring->wq_base_dma);
+
+ if ((tx_ring->wq_base == NULL) ||
+ tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
+ netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
+ return -ENOMEM;
+ }
+ tx_ring->q =
+ kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
+ if (tx_ring->q == NULL)
+ goto err;
+
+ return 0;
+err:
+ pci_free_consistent(qdev->pdev, tx_ring->wq_size,
+ tx_ring->wq_base, tx_ring->wq_base_dma);
+ return -ENOMEM;
+}
+
+static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+ struct bq_desc *lbq_desc;
+
+ uint32_t curr_idx, clean_idx;
+
+ curr_idx = rx_ring->lbq_curr_idx;
+ clean_idx = rx_ring->lbq_clean_idx;
+ while (curr_idx != clean_idx) {
+ lbq_desc = &rx_ring->lbq[curr_idx];
+
+ if (lbq_desc->p.pg_chunk.last_flag) {
+ pci_unmap_page(qdev->pdev,
+ lbq_desc->p.pg_chunk.map,
+ ql_lbq_block_size(qdev),
+ PCI_DMA_FROMDEVICE);
+ lbq_desc->p.pg_chunk.last_flag = 0;
+ }
+
+ put_page(lbq_desc->p.pg_chunk.page);
+ lbq_desc->p.pg_chunk.page = NULL;
+
+ if (++curr_idx == rx_ring->lbq_len)
+ curr_idx = 0;
+
+ }
+}
+
+static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+ int i;
+ struct bq_desc *sbq_desc;
+
+ for (i = 0; i < rx_ring->sbq_len; i++) {
+ sbq_desc = &rx_ring->sbq[i];
+ if (sbq_desc == NULL) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "sbq_desc %d is NULL.\n", i);
+ return;
+ }
+ if (sbq_desc->p.skb) {
+ pci_unmap_single(qdev->pdev,
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(sbq_desc->p.skb);
+ sbq_desc->p.skb = NULL;
+ }
+ }
+}
+
+/* Free all large and small rx buffers associated
+ * with the completion queues for this device.
+ */
+static void ql_free_rx_buffers(struct ql_adapter *qdev)
+{
+ int i;
+ struct rx_ring *rx_ring;
+
+ for (i = 0; i < qdev->rx_ring_count; i++) {
+ rx_ring = &qdev->rx_ring[i];
+ if (rx_ring->lbq)
+ ql_free_lbq_buffers(qdev, rx_ring);
+ if (rx_ring->sbq)
+ ql_free_sbq_buffers(qdev, rx_ring);
+ }
+}
+
+static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
+{
+ struct rx_ring *rx_ring;
+ int i;
+
+ for (i = 0; i < qdev->rx_ring_count; i++) {
+ rx_ring = &qdev->rx_ring[i];
+ if (rx_ring->type != TX_Q)
+ ql_update_buffer_queues(qdev, rx_ring);
+ }
+}
+
+static void ql_init_lbq_ring(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring)
+{
+ int i;
+ struct bq_desc *lbq_desc;
+ __le64 *bq = rx_ring->lbq_base;
+
+ memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
+ for (i = 0; i < rx_ring->lbq_len; i++) {
+ lbq_desc = &rx_ring->lbq[i];
+ memset(lbq_desc, 0, sizeof(*lbq_desc));
+ lbq_desc->index = i;
+ lbq_desc->addr = bq;
+ bq++;
+ }
+}
+
+static void ql_init_sbq_ring(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring)
+{
+ int i;
+ struct bq_desc *sbq_desc;
+ __le64 *bq = rx_ring->sbq_base;
+
+ memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
+ for (i = 0; i < rx_ring->sbq_len; i++) {
+ sbq_desc = &rx_ring->sbq[i];
+ memset(sbq_desc, 0, sizeof(*sbq_desc));
+ sbq_desc->index = i;
+ sbq_desc->addr = bq;
+ bq++;
+ }
+}
+
+static void ql_free_rx_resources(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring)
+{
+ /* Free the small buffer queue. */
+ if (rx_ring->sbq_base) {
+ pci_free_consistent(qdev->pdev,
+ rx_ring->sbq_size,
+ rx_ring->sbq_base, rx_ring->sbq_base_dma);
+ rx_ring->sbq_base = NULL;
+ }
+
+ /* Free the small buffer queue control blocks. */
+ kfree(rx_ring->sbq);
+ rx_ring->sbq = NULL;
+
+ /* Free the large buffer queue. */
+ if (rx_ring->lbq_base) {
+ pci_free_consistent(qdev->pdev,
+ rx_ring->lbq_size,
+ rx_ring->lbq_base, rx_ring->lbq_base_dma);
+ rx_ring->lbq_base = NULL;
+ }
+
+ /* Free the large buffer queue control blocks. */
+ kfree(rx_ring->lbq);
+ rx_ring->lbq = NULL;
+
+ /* Free the rx queue. */
+ if (rx_ring->cq_base) {
+ pci_free_consistent(qdev->pdev,
+ rx_ring->cq_size,
+ rx_ring->cq_base, rx_ring->cq_base_dma);
+ rx_ring->cq_base = NULL;
+ }
+}
+
+/* Allocate queues and buffers for this completions queue based
+ * on the values in the parameter structure. */
+static int ql_alloc_rx_resources(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring)
+{
+
+ /*
+ * Allocate the completion queue for this rx_ring.
+ */
+ rx_ring->cq_base =
+ pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
+ &rx_ring->cq_base_dma);
+
+ if (rx_ring->cq_base == NULL) {
+ netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
+ return -ENOMEM;
+ }
+
+ if (rx_ring->sbq_len) {
+ /*
+ * Allocate small buffer queue.
+ */
+ rx_ring->sbq_base =
+ pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
+ &rx_ring->sbq_base_dma);
+
+ if (rx_ring->sbq_base == NULL) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Small buffer queue allocation failed.\n");
+ goto err_mem;
+ }
+
+ /*
+ * Allocate small buffer queue control blocks.
+ */
+ rx_ring->sbq =
+ kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
+ GFP_KERNEL);
+ if (rx_ring->sbq == NULL) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Small buffer queue control block allocation failed.\n");
+ goto err_mem;
+ }
+
+ ql_init_sbq_ring(qdev, rx_ring);
+ }
+
+ if (rx_ring->lbq_len) {
+ /*
+ * Allocate large buffer queue.
+ */
+ rx_ring->lbq_base =
+ pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
+ &rx_ring->lbq_base_dma);
+
+ if (rx_ring->lbq_base == NULL) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Large buffer queue allocation failed.\n");
+ goto err_mem;
+ }
+ /*
+ * Allocate large buffer queue control blocks.
+ */
+ rx_ring->lbq =
+ kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
+ GFP_KERNEL);
+ if (rx_ring->lbq == NULL) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Large buffer queue control block allocation failed.\n");
+ goto err_mem;
+ }
+
+ ql_init_lbq_ring(qdev, rx_ring);
+ }
+
+ return 0;
+
+err_mem:
+ ql_free_rx_resources(qdev, rx_ring);
+ return -ENOMEM;
+}
+
+static void ql_tx_ring_clean(struct ql_adapter *qdev)
+{
+ struct tx_ring *tx_ring;
+ struct tx_ring_desc *tx_ring_desc;
+ int i, j;
+
+ /*
+ * Loop through all queues and free
+ * any resources.
+ */
+ for (j = 0; j < qdev->tx_ring_count; j++) {
+ tx_ring = &qdev->tx_ring[j];
+ for (i = 0; i < tx_ring->wq_len; i++) {
+ tx_ring_desc = &tx_ring->q[i];
+ if (tx_ring_desc && tx_ring_desc->skb) {
+ netif_err(qdev, ifdown, qdev->ndev,
+ "Freeing lost SKB %p, from queue %d, index %d.\n",
+ tx_ring_desc->skb, j,
+ tx_ring_desc->index);
+ ql_unmap_send(qdev, tx_ring_desc,
+ tx_ring_desc->map_cnt);
+ dev_kfree_skb(tx_ring_desc->skb);
+ tx_ring_desc->skb = NULL;
+ }
+ }
+ }
+}
+
+static void ql_free_mem_resources(struct ql_adapter *qdev)
+{
+ int i;
+
+ for (i = 0; i < qdev->tx_ring_count; i++)
+ ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
+ for (i = 0; i < qdev->rx_ring_count; i++)
+ ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
+ ql_free_shadow_space(qdev);
+}
+
+static int ql_alloc_mem_resources(struct ql_adapter *qdev)
+{
+ int i;
+
+ /* Allocate space for our shadow registers and such. */
+ if (ql_alloc_shadow_space(qdev))
+ return -ENOMEM;
+
+ for (i = 0; i < qdev->rx_ring_count; i++) {
+ if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "RX resource allocation failed.\n");
+ goto err_mem;
+ }
+ }
+ /* Allocate tx queue resources */
+ for (i = 0; i < qdev->tx_ring_count; i++) {
+ if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "TX resource allocation failed.\n");
+ goto err_mem;
+ }
+ }
+ return 0;
+
+err_mem:
+ ql_free_mem_resources(qdev);
+ return -ENOMEM;
+}
+
+/* Set up the rx ring control block and pass it to the chip.
+ * The control block is defined as
+ * "Completion Queue Initialization Control Block", or cqicb.
+ */
+static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+{
+ struct cqicb *cqicb = &rx_ring->cqicb;
+ void *shadow_reg = qdev->rx_ring_shadow_reg_area +
+ (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
+ u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
+ (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
+ void __iomem *doorbell_area =
+ qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
+ int err = 0;
+ u16 bq_len;
+ u64 tmp;
+ __le64 *base_indirect_ptr;
+ int page_entries;
+
+ /* Set up the shadow registers for this ring. */
+ rx_ring->prod_idx_sh_reg = shadow_reg;
+ rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
+ *rx_ring->prod_idx_sh_reg = 0;
+ shadow_reg += sizeof(u64);
+ shadow_reg_dma += sizeof(u64);
+ rx_ring->lbq_base_indirect = shadow_reg;
+ rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
+ shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
+ shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
+ rx_ring->sbq_base_indirect = shadow_reg;
+ rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
+
+ /* PCI doorbell mem area + 0x00 for consumer index register */
+ rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
+ rx_ring->cnsmr_idx = 0;
+ rx_ring->curr_entry = rx_ring->cq_base;
+
+ /* PCI doorbell mem area + 0x04 for valid register */
+ rx_ring->valid_db_reg = doorbell_area + 0x04;
+
+ /* PCI doorbell mem area + 0x18 for large buffer consumer */
+ rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
+
+ /* PCI doorbell mem area + 0x1c */
+ rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
+
+ memset((void *)cqicb, 0, sizeof(struct cqicb));
+ cqicb->msix_vect = rx_ring->irq;
+
+ bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
+ cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
+
+ cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
+
+ cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
+
+ /*
+ * Set up the control block load flags.
+ */
+ cqicb->flags = FLAGS_LC | /* Load queue base address */
+ FLAGS_LV | /* Load MSI-X vector */
+ FLAGS_LI; /* Load irq delay values */
+ if (rx_ring->lbq_len) {
+ cqicb->flags |= FLAGS_LL; /* Load lbq values */
+ tmp = (u64)rx_ring->lbq_base_dma;
+ base_indirect_ptr = rx_ring->lbq_base_indirect;
+ page_entries = 0;
+ do {
+ *base_indirect_ptr = cpu_to_le64(tmp);
+ tmp += DB_PAGE_SIZE;
+ base_indirect_ptr++;
+ page_entries++;
+ } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
+ cqicb->lbq_addr =
+ cpu_to_le64(rx_ring->lbq_base_indirect_dma);
+ bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
+ (u16) rx_ring->lbq_buf_size;
+ cqicb->lbq_buf_size = cpu_to_le16(bq_len);
+ bq_len = (rx_ring->lbq_len == 65536) ? 0 :
+ (u16) rx_ring->lbq_len;
+ cqicb->lbq_len = cpu_to_le16(bq_len);
+ rx_ring->lbq_prod_idx = 0;
+ rx_ring->lbq_curr_idx = 0;
+ rx_ring->lbq_clean_idx = 0;
+ rx_ring->lbq_free_cnt = rx_ring->lbq_len;
+ }
+ if (rx_ring->sbq_len) {
+ cqicb->flags |= FLAGS_LS; /* Load sbq values */
+ tmp = (u64)rx_ring->sbq_base_dma;
+ base_indirect_ptr = rx_ring->sbq_base_indirect;
+ page_entries = 0;
+ do {
+ *base_indirect_ptr = cpu_to_le64(tmp);
+ tmp += DB_PAGE_SIZE;
+ base_indirect_ptr++;
+ page_entries++;
+ } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
+ cqicb->sbq_addr =
+ cpu_to_le64(rx_ring->sbq_base_indirect_dma);
+ cqicb->sbq_buf_size =
+ cpu_to_le16((u16)(rx_ring->sbq_buf_size));
+ bq_len = (rx_ring->sbq_len == 65536) ? 0 :
+ (u16) rx_ring->sbq_len;
+ cqicb->sbq_len = cpu_to_le16(bq_len);
+ rx_ring->sbq_prod_idx = 0;
+ rx_ring->sbq_curr_idx = 0;
+ rx_ring->sbq_clean_idx = 0;
+ rx_ring->sbq_free_cnt = rx_ring->sbq_len;
+ }
+ switch (rx_ring->type) {
+ case TX_Q:
+ cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
+ cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
+ break;
+ case RX_Q:
+ /* Inbound completion handling rx_rings run in
+ * separate NAPI contexts.
+ */
+ netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
+ 64);
+ cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
+ cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
+ break;
+ default:
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Invalid rx_ring->type = %d.\n", rx_ring->type);
+ }
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Initializing rx work queue.\n");
+ err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
+ CFG_LCQ, rx_ring->cq_id);
+ if (err) {
+ netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
+ return err;
+ }
+ return err;
+}
+
+static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
+{
+ struct wqicb *wqicb = (struct wqicb *)tx_ring;
+ void __iomem *doorbell_area =
+ qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
+ void *shadow_reg = qdev->tx_ring_shadow_reg_area +
+ (tx_ring->wq_id * sizeof(u64));
+ u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
+ (tx_ring->wq_id * sizeof(u64));
+ int err = 0;
+
+ /*
+ * Assign doorbell registers for this tx_ring.
+ */
+ /* TX PCI doorbell mem area for tx producer index */
+ tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
+ tx_ring->prod_idx = 0;
+ /* TX PCI doorbell mem area + 0x04 */
+ tx_ring->valid_db_reg = doorbell_area + 0x04;
+
+ /*
+ * Assign shadow registers for this tx_ring.
+ */
+ tx_ring->cnsmr_idx_sh_reg = shadow_reg;
+ tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
+
+ wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
+ wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
+ Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
+ wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
+ wqicb->rid = 0;
+ wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
+
+ wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
+
+ ql_init_tx_ring(qdev, tx_ring);
+
+ err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
+ (u16) tx_ring->wq_id);
+ if (err) {
+ netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
+ return err;
+ }
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Successfully loaded WQICB.\n");
+ return err;
+}
+
+static void ql_disable_msix(struct ql_adapter *qdev)
+{
+ if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
+ pci_disable_msix(qdev->pdev);
+ clear_bit(QL_MSIX_ENABLED, &qdev->flags);
+ kfree(qdev->msi_x_entry);
+ qdev->msi_x_entry = NULL;
+ } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
+ pci_disable_msi(qdev->pdev);
+ clear_bit(QL_MSI_ENABLED, &qdev->flags);
+ }
+}
+
+/* We start by trying to get the number of vectors
+ * stored in qdev->intr_count. If we don't get that
+ * many then we reduce the count and try again.
+ */
+static void ql_enable_msix(struct ql_adapter *qdev)
+{
+ int i, err;
+
+ /* Get the MSIX vectors. */
+ if (qlge_irq_type == MSIX_IRQ) {
+ /* Try to alloc space for the msix struct,
+ * if it fails then go to MSI/legacy.
+ */
+ qdev->msi_x_entry = kcalloc(qdev->intr_count,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!qdev->msi_x_entry) {
+ qlge_irq_type = MSI_IRQ;
+ goto msi;
+ }
+
+ for (i = 0; i < qdev->intr_count; i++)
+ qdev->msi_x_entry[i].entry = i;
+
+ /* Loop to get our vectors. We start with
+ * what we want and settle for what we get.
+ */
+ do {
+ err = pci_enable_msix(qdev->pdev,
+ qdev->msi_x_entry, qdev->intr_count);
+ if (err > 0)
+ qdev->intr_count = err;
+ } while (err > 0);
+
+ if (err < 0) {
+ kfree(qdev->msi_x_entry);
+ qdev->msi_x_entry = NULL;
+ netif_warn(qdev, ifup, qdev->ndev,
+ "MSI-X Enable failed, trying MSI.\n");
+ qdev->intr_count = 1;
+ qlge_irq_type = MSI_IRQ;
+ } else if (err == 0) {
+ set_bit(QL_MSIX_ENABLED, &qdev->flags);
+ netif_info(qdev, ifup, qdev->ndev,
+ "MSI-X Enabled, got %d vectors.\n",
+ qdev->intr_count);
+ return;
+ }
+ }
+msi:
+ qdev->intr_count = 1;
+ if (qlge_irq_type == MSI_IRQ) {
+ if (!pci_enable_msi(qdev->pdev)) {
+ set_bit(QL_MSI_ENABLED, &qdev->flags);
+ netif_info(qdev, ifup, qdev->ndev,
+ "Running with MSI interrupts.\n");
+ return;
+ }
+ }
+ qlge_irq_type = LEG_IRQ;
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Running with legacy interrupts.\n");
+}
+
+/* Each vector services 1 RSS ring and and 1 or more
+ * TX completion rings. This function loops through
+ * the TX completion rings and assigns the vector that
+ * will service it. An example would be if there are
+ * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
+ * This would mean that vector 0 would service RSS ring 0
+ * and TX completion rings 0,1,2 and 3. Vector 1 would
+ * service RSS ring 1 and TX completion rings 4,5,6 and 7.
+ */
+static void ql_set_tx_vect(struct ql_adapter *qdev)
+{
+ int i, j, vect;
+ u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
+
+ if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
+ /* Assign irq vectors to TX rx_rings.*/
+ for (vect = 0, j = 0, i = qdev->rss_ring_count;
+ i < qdev->rx_ring_count; i++) {
+ if (j == tx_rings_per_vector) {
+ vect++;
+ j = 0;
+ }
+ qdev->rx_ring[i].irq = vect;
+ j++;
+ }
+ } else {
+ /* For single vector all rings have an irq
+ * of zero.
+ */
+ for (i = 0; i < qdev->rx_ring_count; i++)
+ qdev->rx_ring[i].irq = 0;
+ }
+}
+
+/* Set the interrupt mask for this vector. Each vector
+ * will service 1 RSS ring and 1 or more TX completion
+ * rings. This function sets up a bit mask per vector
+ * that indicates which rings it services.
+ */
+static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
+{
+ int j, vect = ctx->intr;
+ u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
+
+ if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
+ /* Add the RSS ring serviced by this vector
+ * to the mask.
+ */
+ ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
+ /* Add the TX ring(s) serviced by this vector
+ * to the mask. */
+ for (j = 0; j < tx_rings_per_vector; j++) {
+ ctx->irq_mask |=
+ (1 << qdev->rx_ring[qdev->rss_ring_count +
+ (vect * tx_rings_per_vector) + j].cq_id);
+ }
+ } else {
+ /* For single vector we just shift each queue's
+ * ID into the mask.
+ */
+ for (j = 0; j < qdev->rx_ring_count; j++)
+ ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
+ }
+}
+
+/*
+ * Here we build the intr_context structures based on
+ * our rx_ring count and intr vector count.
+ * The intr_context structure is used to hook each vector
+ * to possibly different handlers.
+ */
+static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
+{
+ int i = 0;
+ struct intr_context *intr_context = &qdev->intr_context[0];
+
+ if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
+ /* Each rx_ring has it's
+ * own intr_context since we have separate
+ * vectors for each queue.
+ */
+ for (i = 0; i < qdev->intr_count; i++, intr_context++) {
+ qdev->rx_ring[i].irq = i;
+ intr_context->intr = i;
+ intr_context->qdev = qdev;
+ /* Set up this vector's bit-mask that indicates
+ * which queues it services.
+ */
+ ql_set_irq_mask(qdev, intr_context);
+ /*
+ * We set up each vectors enable/disable/read bits so
+ * there's no bit/mask calculations in the critical path.
+ */
+ intr_context->intr_en_mask =
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+ INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
+ | i;
+ intr_context->intr_dis_mask =
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+ INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
+ INTR_EN_IHD | i;
+ intr_context->intr_read_mask =
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+ INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
+ i;
+ if (i == 0) {
+ /* The first vector/queue handles
+ * broadcast/multicast, fatal errors,
+ * and firmware events. This in addition
+ * to normal inbound NAPI processing.
+ */
+ intr_context->handler = qlge_isr;
+ sprintf(intr_context->name, "%s-rx-%d",
+ qdev->ndev->name, i);
+ } else {
+ /*
+ * Inbound queues handle unicast frames only.
+ */
+ intr_context->handler = qlge_msix_rx_isr;
+ sprintf(intr_context->name, "%s-rx-%d",
+ qdev->ndev->name, i);
+ }
+ }
+ } else {
+ /*
+ * All rx_rings use the same intr_context since
+ * there is only one vector.
+ */
+ intr_context->intr = 0;
+ intr_context->qdev = qdev;
+ /*
+ * We set up each vectors enable/disable/read bits so
+ * there's no bit/mask calculations in the critical path.
+ */
+ intr_context->intr_en_mask =
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
+ intr_context->intr_dis_mask =
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
+ INTR_EN_TYPE_DISABLE;
+ intr_context->intr_read_mask =
+ INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
+ /*
+ * Single interrupt means one handler for all rings.
+ */
+ intr_context->handler = qlge_isr;
+ sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
+ /* Set up this vector's bit-mask that indicates
+ * which queues it services. In this case there is
+ * a single vector so it will service all RSS and
+ * TX completion rings.
+ */
+ ql_set_irq_mask(qdev, intr_context);
+ }
+ /* Tell the TX completion rings which MSIx vector
+ * they will be using.
+ */
+ ql_set_tx_vect(qdev);
+}
+
+static void ql_free_irq(struct ql_adapter *qdev)
+{
+ int i;
+ struct intr_context *intr_context = &qdev->intr_context[0];
+
+ for (i = 0; i < qdev->intr_count; i++, intr_context++) {
+ if (intr_context->hooked) {
+ if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
+ free_irq(qdev->msi_x_entry[i].vector,
+ &qdev->rx_ring[i]);
+ netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
+ "freeing msix interrupt %d.\n", i);
+ } else {
+ free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
+ netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
+ "freeing msi interrupt %d.\n", i);
+ }
+ }
+ }
+ ql_disable_msix(qdev);
+}
+
+static int ql_request_irq(struct ql_adapter *qdev)
+{
+ int i;
+ int status = 0;
+ struct pci_dev *pdev = qdev->pdev;
+ struct intr_context *intr_context = &qdev->intr_context[0];
+
+ ql_resolve_queues_to_irqs(qdev);
+
+ for (i = 0; i < qdev->intr_count; i++, intr_context++) {
+ atomic_set(&intr_context->irq_cnt, 0);
+ if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
+ status = request_irq(qdev->msi_x_entry[i].vector,
+ intr_context->handler,
+ 0,
+ intr_context->name,
+ &qdev->rx_ring[i]);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed request for MSIX interrupt %d.\n",
+ i);
+ goto err_irq;
+ } else {
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Hooked intr %d, queue type %s, with name %s.\n",
+ i,
+ qdev->rx_ring[i].type == DEFAULT_Q ?
+ "DEFAULT_Q" :
+ qdev->rx_ring[i].type == TX_Q ?
+ "TX_Q" :
+ qdev->rx_ring[i].type == RX_Q ?
+ "RX_Q" : "",
+ intr_context->name);
+ }
+ } else {
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "trying msi or legacy interrupts.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s: irq = %d.\n", __func__, pdev->irq);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s: context->name = %s.\n", __func__,
+ intr_context->name);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s: dev_id = 0x%p.\n", __func__,
+ &qdev->rx_ring[0]);
+ status =
+ request_irq(pdev->irq, qlge_isr,
+ test_bit(QL_MSI_ENABLED,
+ &qdev->
+ flags) ? 0 : IRQF_SHARED,
+ intr_context->name, &qdev->rx_ring[0]);
+ if (status)
+ goto err_irq;
+
+ netif_err(qdev, ifup, qdev->ndev,
+ "Hooked intr %d, queue type %s, with name %s.\n",
+ i,
+ qdev->rx_ring[0].type == DEFAULT_Q ?
+ "DEFAULT_Q" :
+ qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
+ qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
+ intr_context->name);
+ }
+ intr_context->hooked = 1;
+ }
+ return status;
+err_irq:
+ netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
+ ql_free_irq(qdev);
+ return status;
+}
+
+static int ql_start_rss(struct ql_adapter *qdev)
+{
+ static const u8 init_hash_seed[] = {
+ 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
+ 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
+ 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
+ 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
+ 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
+ };
+ struct ricb *ricb = &qdev->ricb;
+ int status = 0;
+ int i;
+ u8 *hash_id = (u8 *) ricb->hash_cq_id;
+
+ memset((void *)ricb, 0, sizeof(*ricb));
+
+ ricb->base_cq = RSS_L4K;
+ ricb->flags =
+ (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
+ ricb->mask = cpu_to_le16((u16)(0x3ff));
+
+ /*
+ * Fill out the Indirection Table.
+ */
+ for (i = 0; i < 1024; i++)
+ hash_id[i] = (i & (qdev->rss_ring_count - 1));
+
+ memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
+ memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
+
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
+
+ status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
+ return status;
+ }
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Successfully loaded RICB.\n");
+ return status;
+}
+
+static int ql_clear_routing_entries(struct ql_adapter *qdev)
+{
+ int i, status = 0;
+
+ status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (status)
+ return status;
+ /* Clear all the entries in the routing table. */
+ for (i = 0; i < 16; i++) {
+ status = ql_set_routing_reg(qdev, i, 0, 0);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for CAM packets.\n");
+ break;
+ }
+ }
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+ return status;
+}
+
+/* Initialize the frame-to-queue routing. */
+static int ql_route_initialize(struct ql_adapter *qdev)
+{
+ int status = 0;
+
+ /* Clear all the entries in the routing table. */
+ status = ql_clear_routing_entries(qdev);
+ if (status)
+ return status;
+
+ status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (status)
+ return status;
+
+ status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
+ RT_IDX_IP_CSUM_ERR, 1);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register "
+ "for IP CSUM error packets.\n");
+ goto exit;
+ }
+ status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
+ RT_IDX_TU_CSUM_ERR, 1);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register "
+ "for TCP/UDP CSUM error packets.\n");
+ goto exit;
+ }
+ status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for broadcast packets.\n");
+ goto exit;
+ }
+ /* If we have more than one inbound queue, then turn on RSS in the
+ * routing block.
+ */
+ if (qdev->rss_ring_count > 1) {
+ status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
+ RT_IDX_RSS_MATCH, 1);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for MATCH RSS packets.\n");
+ goto exit;
+ }
+ }
+
+ status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
+ RT_IDX_CAM_HIT, 1);
+ if (status)
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for CAM packets.\n");
+exit:
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+ return status;
+}
+
+int ql_cam_route_initialize(struct ql_adapter *qdev)
+{
+ int status, set;
+
+ /* If check if the link is up and use to
+ * determine if we are setting or clearing
+ * the MAC address in the CAM.
+ */
+ set = ql_read32(qdev, STS);
+ set &= qdev->port_link_up;
+ status = ql_set_mac_addr(qdev, set);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
+ return status;
+ }
+
+ status = ql_route_initialize(qdev);
+ if (status)
+ netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
+
+ return status;
+}
+
+static int ql_adapter_initialize(struct ql_adapter *qdev)
+{
+ u32 value, mask;
+ int i;
+ int status = 0;
+
+ /*
+ * Set up the System register to halt on errors.
+ */
+ value = SYS_EFE | SYS_FAE;
+ mask = value << 16;
+ ql_write32(qdev, SYS, mask | value);
+
+ /* Set the default queue, and VLAN behavior. */
+ value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
+ mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
+ ql_write32(qdev, NIC_RCV_CFG, (mask | value));
+
+ /* Set the MPI interrupt to enabled. */
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+
+ /* Enable the function, set pagesize, enable error checking. */
+ value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
+ FSC_EC | FSC_VM_PAGE_4K;
+ value |= SPLT_SETTING;
+
+ /* Set/clear header splitting. */
+ mask = FSC_VM_PAGESIZE_MASK |
+ FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
+ ql_write32(qdev, FSC, mask | value);
+
+ ql_write32(qdev, SPLT_HDR, SPLT_LEN);
+
+ /* Set RX packet routing to use port/pci function on which the
+ * packet arrived on in addition to usual frame routing.
+ * This is helpful on bonding where both interfaces can have
+ * the same MAC address.
+ */
+ ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
+ /* Reroute all packets to our Interface.
+ * They may have been routed to MPI firmware
+ * due to WOL.
+ */
+ value = ql_read32(qdev, MGMT_RCV_CFG);
+ value &= ~MGMT_RCV_CFG_RM;
+ mask = 0xffff0000;
+
+ /* Sticky reg needs clearing due to WOL. */
+ ql_write32(qdev, MGMT_RCV_CFG, mask);
+ ql_write32(qdev, MGMT_RCV_CFG, mask | value);
+
+ /* Default WOL is enable on Mezz cards */
+ if (qdev->pdev->subsystem_device == 0x0068 ||
+ qdev->pdev->subsystem_device == 0x0180)
+ qdev->wol = WAKE_MAGIC;
+
+ /* Start up the rx queues. */
+ for (i = 0; i < qdev->rx_ring_count; i++) {
+ status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to start rx ring[%d].\n", i);
+ return status;
+ }
+ }
+
+ /* If there is more than one inbound completion queue
+ * then download a RICB to configure RSS.
+ */
+ if (qdev->rss_ring_count > 1) {
+ status = ql_start_rss(qdev);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
+ return status;
+ }
+ }
+
+ /* Start up the tx queues. */
+ for (i = 0; i < qdev->tx_ring_count; i++) {
+ status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to start tx ring[%d].\n", i);
+ return status;
+ }
+ }
+
+ /* Initialize the port and set the max framesize. */
+ status = qdev->nic_ops->port_initialize(qdev);
+ if (status)
+ netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
+
+ /* Set up the MAC address and frame routing filter. */
+ status = ql_cam_route_initialize(qdev);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init CAM/Routing tables.\n");
+ return status;
+ }
+
+ /* Start NAPI for the RSS queues. */
+ for (i = 0; i < qdev->rss_ring_count; i++) {
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Enabling NAPI for rx_ring[%d].\n", i);
+ napi_enable(&qdev->rx_ring[i].napi);
+ }
+
+ return status;
+}
+
+/* Issue soft reset to chip. */
+static int ql_adapter_reset(struct ql_adapter *qdev)
+{
+ u32 value;
+ int status = 0;
+ unsigned long end_jiffies;
+
+ /* Clear all the entries in the routing table. */
+ status = ql_clear_routing_entries(qdev);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
+ return status;
+ }
+
+ end_jiffies = jiffies +
+ max((unsigned long)1, usecs_to_jiffies(30));
+
+ /* Check if bit is set then skip the mailbox command and
+ * clear the bit, else we are in normal reset process.
+ */
+ if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
+ /* Stop management traffic. */
+ ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
+
+ /* Wait for the NIC and MGMNT FIFOs to empty. */
+ ql_wait_fifo_empty(qdev);
+ } else
+ clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
+
+ ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
+
+ do {
+ value = ql_read32(qdev, RST_FO);
+ if ((value & RST_FO_FR) == 0)
+ break;
+ cpu_relax();
+ } while (time_before(jiffies, end_jiffies));
+
+ if (value & RST_FO_FR) {
+ netif_err(qdev, ifdown, qdev->ndev,
+ "ETIMEDOUT!!! errored out of resetting the chip!\n");
+ status = -ETIMEDOUT;
+ }
+
+ /* Resume management traffic. */
+ ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
+ return status;
+}
+
+static void ql_display_dev_info(struct net_device *ndev)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ netif_info(qdev, probe, qdev->ndev,
+ "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
+ "XG Roll = %d, XG Rev = %d.\n",
+ qdev->func,
+ qdev->port,
+ qdev->chip_rev_id & 0x0000000f,
+ qdev->chip_rev_id >> 4 & 0x0000000f,
+ qdev->chip_rev_id >> 8 & 0x0000000f,
+ qdev->chip_rev_id >> 12 & 0x0000000f);
+ netif_info(qdev, probe, qdev->ndev,
+ "MAC address %pM\n", ndev->dev_addr);
+}
+
+static int ql_wol(struct ql_adapter *qdev)
+{
+ int status = 0;
+ u32 wol = MB_WOL_DISABLE;
+
+ /* The CAM is still intact after a reset, but if we
+ * are doing WOL, then we may need to program the
+ * routing regs. We would also need to issue the mailbox
+ * commands to instruct the MPI what to do per the ethtool
+ * settings.
+ */
+
+ if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
+ WAKE_MCAST | WAKE_BCAST)) {
+ netif_err(qdev, ifdown, qdev->ndev,
+ "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
+ qdev->wol);
+ return -EINVAL;
+ }
+
+ if (qdev->wol & WAKE_MAGIC) {
+ status = ql_mb_wol_set_magic(qdev, 1);
+ if (status) {
+ netif_err(qdev, ifdown, qdev->ndev,
+ "Failed to set magic packet on %s.\n",
+ qdev->ndev->name);
+ return status;
+ } else
+ netif_info(qdev, drv, qdev->ndev,
+ "Enabled magic packet successfully on %s.\n",
+ qdev->ndev->name);
+
+ wol |= MB_WOL_MAGIC_PKT;
+ }
+
+ if (qdev->wol) {
+ wol |= MB_WOL_MODE_ON;
+ status = ql_mb_wol_mode(qdev, wol);
+ netif_err(qdev, drv, qdev->ndev,
+ "WOL %s (wol code 0x%x) on %s\n",
+ (status == 0) ? "Successfully set" : "Failed",
+ wol, qdev->ndev->name);
+ }
+
+ return status;
+}
+
+static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
+{
+
+ /* Don't kill the reset worker thread if we
+ * are in the process of recovery.
+ */
+ if (test_bit(QL_ADAPTER_UP, &qdev->flags))
+ cancel_delayed_work_sync(&qdev->asic_reset_work);
+ cancel_delayed_work_sync(&qdev->mpi_reset_work);
+ cancel_delayed_work_sync(&qdev->mpi_work);
+ cancel_delayed_work_sync(&qdev->mpi_idc_work);
+ cancel_delayed_work_sync(&qdev->mpi_core_to_log);
+ cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
+}
+
+static int ql_adapter_down(struct ql_adapter *qdev)
+{
+ int i, status = 0;
+
+ ql_link_off(qdev);
+
+ ql_cancel_all_work_sync(qdev);
+
+ for (i = 0; i < qdev->rss_ring_count; i++)
+ napi_disable(&qdev->rx_ring[i].napi);
+
+ clear_bit(QL_ADAPTER_UP, &qdev->flags);
+
+ ql_disable_interrupts(qdev);
+
+ ql_tx_ring_clean(qdev);
+
+ /* Call netif_napi_del() from common point.
+ */
+ for (i = 0; i < qdev->rss_ring_count; i++)
+ netif_napi_del(&qdev->rx_ring[i].napi);
+
+ status = ql_adapter_reset(qdev);
+ if (status)
+ netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
+ qdev->func);
+ ql_free_rx_buffers(qdev);
+
+ return status;
+}
+
+static int ql_adapter_up(struct ql_adapter *qdev)
+{
+ int err = 0;
+
+ err = ql_adapter_initialize(qdev);
+ if (err) {
+ netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
+ goto err_init;
+ }
+ set_bit(QL_ADAPTER_UP, &qdev->flags);
+ ql_alloc_rx_buffers(qdev);
+ /* If the port is initialized and the
+ * link is up the turn on the carrier.
+ */
+ if ((ql_read32(qdev, STS) & qdev->port_init) &&
+ (ql_read32(qdev, STS) & qdev->port_link_up))
+ ql_link_on(qdev);
+ /* Restore rx mode. */
+ clear_bit(QL_ALLMULTI, &qdev->flags);
+ clear_bit(QL_PROMISCUOUS, &qdev->flags);
+ qlge_set_multicast_list(qdev->ndev);
+
+ /* Restore vlan setting. */
+ qlge_restore_vlan(qdev);
+
+ ql_enable_interrupts(qdev);
+ ql_enable_all_completion_interrupts(qdev);
+ netif_tx_start_all_queues(qdev->ndev);
+
+ return 0;
+err_init:
+ ql_adapter_reset(qdev);
+ return err;
+}
+
+static void ql_release_adapter_resources(struct ql_adapter *qdev)
+{
+ ql_free_mem_resources(qdev);
+ ql_free_irq(qdev);
+}
+
+static int ql_get_adapter_resources(struct ql_adapter *qdev)
+{
+ int status = 0;
+
+ if (ql_alloc_mem_resources(qdev)) {
+ netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
+ return -ENOMEM;
+ }
+ status = ql_request_irq(qdev);
+ return status;
+}
+
+static int qlge_close(struct net_device *ndev)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ /* If we hit pci_channel_io_perm_failure
+ * failure condition, then we already
+ * brought the adapter down.
+ */
+ if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
+ netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
+ clear_bit(QL_EEH_FATAL, &qdev->flags);
+ return 0;
+ }
+
+ /*
+ * Wait for device to recover from a reset.
+ * (Rarely happens, but possible.)
+ */
+ while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
+ msleep(1);
+ ql_adapter_down(qdev);
+ ql_release_adapter_resources(qdev);
+ return 0;
+}
+
+static int ql_configure_rings(struct ql_adapter *qdev)
+{
+ int i;
+ struct rx_ring *rx_ring;
+ struct tx_ring *tx_ring;
+ int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
+ unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
+ LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
+
+ qdev->lbq_buf_order = get_order(lbq_buf_len);
+
+ /* In a perfect world we have one RSS ring for each CPU
+ * and each has it's own vector. To do that we ask for
+ * cpu_cnt vectors. ql_enable_msix() will adjust the
+ * vector count to what we actually get. We then
+ * allocate an RSS ring for each.
+ * Essentially, we are doing min(cpu_count, msix_vector_count).
+ */
+ qdev->intr_count = cpu_cnt;
+ ql_enable_msix(qdev);
+ /* Adjust the RSS ring count to the actual vector count. */
+ qdev->rss_ring_count = qdev->intr_count;
+ qdev->tx_ring_count = cpu_cnt;
+ qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
+
+ for (i = 0; i < qdev->tx_ring_count; i++) {
+ tx_ring = &qdev->tx_ring[i];
+ memset((void *)tx_ring, 0, sizeof(*tx_ring));
+ tx_ring->qdev = qdev;
+ tx_ring->wq_id = i;
+ tx_ring->wq_len = qdev->tx_ring_size;
+ tx_ring->wq_size =
+ tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
+
+ /*
+ * The completion queue ID for the tx rings start
+ * immediately after the rss rings.
+ */
+ tx_ring->cq_id = qdev->rss_ring_count + i;
+ }
+
+ for (i = 0; i < qdev->rx_ring_count; i++) {
+ rx_ring = &qdev->rx_ring[i];
+ memset((void *)rx_ring, 0, sizeof(*rx_ring));
+ rx_ring->qdev = qdev;
+ rx_ring->cq_id = i;
+ rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
+ if (i < qdev->rss_ring_count) {
+ /*
+ * Inbound (RSS) queues.
+ */
+ rx_ring->cq_len = qdev->rx_ring_size;
+ rx_ring->cq_size =
+ rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
+ rx_ring->lbq_len = NUM_LARGE_BUFFERS;
+ rx_ring->lbq_size =
+ rx_ring->lbq_len * sizeof(__le64);
+ rx_ring->lbq_buf_size = (u16)lbq_buf_len;
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "lbq_buf_size %d, order = %d\n",
+ rx_ring->lbq_buf_size,
+ qdev->lbq_buf_order);
+ rx_ring->sbq_len = NUM_SMALL_BUFFERS;
+ rx_ring->sbq_size =
+ rx_ring->sbq_len * sizeof(__le64);
+ rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
+ rx_ring->type = RX_Q;
+ } else {
+ /*
+ * Outbound queue handles outbound completions only.
+ */
+ /* outbound cq is same size as tx_ring it services. */
+ rx_ring->cq_len = qdev->tx_ring_size;
+ rx_ring->cq_size =
+ rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
+ rx_ring->lbq_len = 0;
+ rx_ring->lbq_size = 0;
+ rx_ring->lbq_buf_size = 0;
+ rx_ring->sbq_len = 0;
+ rx_ring->sbq_size = 0;
+ rx_ring->sbq_buf_size = 0;
+ rx_ring->type = TX_Q;
+ }
+ }
+ return 0;
+}
+
+static int qlge_open(struct net_device *ndev)
+{
+ int err = 0;
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ err = ql_adapter_reset(qdev);
+ if (err)
+ return err;
+
+ err = ql_configure_rings(qdev);
+ if (err)
+ return err;
+
+ err = ql_get_adapter_resources(qdev);
+ if (err)
+ goto error_up;
+
+ err = ql_adapter_up(qdev);
+ if (err)
+ goto error_up;
+
+ return err;
+
+error_up:
+ ql_release_adapter_resources(qdev);
+ return err;
+}
+
+static int ql_change_rx_buffers(struct ql_adapter *qdev)
+{
+ struct rx_ring *rx_ring;
+ int i, status;
+ u32 lbq_buf_len;
+
+ /* Wait for an outstanding reset to complete. */
+ if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
+ int i = 3;
+ while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Waiting for adapter UP...\n");
+ ssleep(1);
+ }
+
+ if (!i) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Timed out waiting for adapter UP\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ status = ql_adapter_down(qdev);
+ if (status)
+ goto error;
+
+ /* Get the new rx buffer size. */
+ lbq_buf_len = (qdev->ndev->mtu > 1500) ?
+ LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
+ qdev->lbq_buf_order = get_order(lbq_buf_len);
+
+ for (i = 0; i < qdev->rss_ring_count; i++) {
+ rx_ring = &qdev->rx_ring[i];
+ /* Set the new size. */
+ rx_ring->lbq_buf_size = lbq_buf_len;
+ }
+
+ status = ql_adapter_up(qdev);
+ if (status)
+ goto error;
+
+ return status;
+error:
+ netif_alert(qdev, ifup, qdev->ndev,
+ "Driver up/down cycle failed, closing device.\n");
+ set_bit(QL_ADAPTER_UP, &qdev->flags);
+ dev_close(qdev->ndev);
+ return status;
+}
+
+static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int status;
+
+ if (ndev->mtu == 1500 && new_mtu == 9000) {
+ netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
+ } else if (ndev->mtu == 9000 && new_mtu == 1500) {
+ netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
+ } else
+ return -EINVAL;
+
+ queue_delayed_work(qdev->workqueue,
+ &qdev->mpi_port_cfg_work, 3*HZ);
+
+ ndev->mtu = new_mtu;
+
+ if (!netif_running(qdev->ndev)) {
+ return 0;
+ }
+
+ status = ql_change_rx_buffers(qdev);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Changing MTU failed.\n");
+ }
+
+ return status;
+}
+
+static struct net_device_stats *qlge_get_stats(struct net_device
+ *ndev)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ struct rx_ring *rx_ring = &qdev->rx_ring[0];
+ struct tx_ring *tx_ring = &qdev->tx_ring[0];
+ unsigned long pkts, mcast, dropped, errors, bytes;
+ int i;
+
+ /* Get RX stats. */
+ pkts = mcast = dropped = errors = bytes = 0;
+ for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
+ pkts += rx_ring->rx_packets;
+ bytes += rx_ring->rx_bytes;
+ dropped += rx_ring->rx_dropped;
+ errors += rx_ring->rx_errors;
+ mcast += rx_ring->rx_multicast;
+ }
+ ndev->stats.rx_packets = pkts;
+ ndev->stats.rx_bytes = bytes;
+ ndev->stats.rx_dropped = dropped;
+ ndev->stats.rx_errors = errors;
+ ndev->stats.multicast = mcast;
+
+ /* Get TX stats. */
+ pkts = errors = bytes = 0;
+ for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
+ pkts += tx_ring->tx_packets;
+ bytes += tx_ring->tx_bytes;
+ errors += tx_ring->tx_errors;
+ }
+ ndev->stats.tx_packets = pkts;
+ ndev->stats.tx_bytes = bytes;
+ ndev->stats.tx_errors = errors;
+ return &ndev->stats;
+}
+
+static void qlge_set_multicast_list(struct net_device *ndev)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ struct netdev_hw_addr *ha;
+ int i, status;
+
+ status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (status)
+ return;
+ /*
+ * Set or clear promiscuous mode if a
+ * transition is taking place.
+ */
+ if (ndev->flags & IFF_PROMISC) {
+ if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
+ if (ql_set_routing_reg
+ (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to set promiscuous mode.\n");
+ } else {
+ set_bit(QL_PROMISCUOUS, &qdev->flags);
+ }
+ }
+ } else {
+ if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
+ if (ql_set_routing_reg
+ (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to clear promiscuous mode.\n");
+ } else {
+ clear_bit(QL_PROMISCUOUS, &qdev->flags);
+ }
+ }
+ }
+
+ /*
+ * Set or clear all multicast mode if a
+ * transition is taking place.
+ */
+ if ((ndev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
+ if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
+ if (ql_set_routing_reg
+ (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to set all-multi mode.\n");
+ } else {
+ set_bit(QL_ALLMULTI, &qdev->flags);
+ }
+ }
+ } else {
+ if (test_bit(QL_ALLMULTI, &qdev->flags)) {
+ if (ql_set_routing_reg
+ (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to clear all-multi mode.\n");
+ } else {
+ clear_bit(QL_ALLMULTI, &qdev->flags);
+ }
+ }
+ }
+
+ if (!netdev_mc_empty(ndev)) {
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ goto exit;
+ i = 0;
+ netdev_for_each_mc_addr(ha, ndev) {
+ if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
+ MAC_ADDR_TYPE_MULTI_MAC, i)) {
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to loadmulticast address.\n");
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ goto exit;
+ }
+ i++;
+ }
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ if (ql_set_routing_reg
+ (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to set multicast match mode.\n");
+ } else {
+ set_bit(QL_ALLMULTI, &qdev->flags);
+ }
+ }
+exit:
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+}
+
+static int qlge_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ struct sockaddr *addr = p;
+ int status;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ /* Update local copy of current mac address. */
+ memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
+
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ return status;
+ status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
+ MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
+ if (status)
+ netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ return status;
+}
+
+static void qlge_tx_timeout(struct net_device *ndev)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ ql_queue_asic_error(qdev);
+}
+
+static void ql_asic_reset_work(struct work_struct *work)
+{
+ struct ql_adapter *qdev =
+ container_of(work, struct ql_adapter, asic_reset_work.work);
+ int status;
+ rtnl_lock();
+ status = ql_adapter_down(qdev);
+ if (status)
+ goto error;
+
+ status = ql_adapter_up(qdev);
+ if (status)
+ goto error;
+
+ /* Restore rx mode. */
+ clear_bit(QL_ALLMULTI, &qdev->flags);
+ clear_bit(QL_PROMISCUOUS, &qdev->flags);
+ qlge_set_multicast_list(qdev->ndev);
+
+ rtnl_unlock();
+ return;
+error:
+ netif_alert(qdev, ifup, qdev->ndev,
+ "Driver up/down cycle failed, closing device\n");
+
+ set_bit(QL_ADAPTER_UP, &qdev->flags);
+ dev_close(qdev->ndev);
+ rtnl_unlock();
+}
+
+static const struct nic_operations qla8012_nic_ops = {
+ .get_flash = ql_get_8012_flash_params,
+ .port_initialize = ql_8012_port_initialize,
+};
+
+static const struct nic_operations qla8000_nic_ops = {
+ .get_flash = ql_get_8000_flash_params,
+ .port_initialize = ql_8000_port_initialize,
+};
+
+/* Find the pcie function number for the other NIC
+ * on this chip. Since both NIC functions share a
+ * common firmware we have the lowest enabled function
+ * do any common work. Examples would be resetting
+ * after a fatal firmware error, or doing a firmware
+ * coredump.
+ */
+static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
+{
+ int status = 0;
+ u32 temp;
+ u32 nic_func1, nic_func2;
+
+ status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
+ &temp);
+ if (status)
+ return status;
+
+ nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
+ MPI_TEST_NIC_FUNC_MASK);
+ nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
+ MPI_TEST_NIC_FUNC_MASK);
+
+ if (qdev->func == nic_func1)
+ qdev->alt_func = nic_func2;
+ else if (qdev->func == nic_func2)
+ qdev->alt_func = nic_func1;
+ else
+ status = -EIO;
+
+ return status;
+}
+
+static int ql_get_board_info(struct ql_adapter *qdev)
+{
+ int status;
+ qdev->func =
+ (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
+ if (qdev->func > 3)
+ return -EIO;
+
+ status = ql_get_alt_pcie_func(qdev);
+ if (status)
+ return status;
+
+ qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
+ if (qdev->port) {
+ qdev->xg_sem_mask = SEM_XGMAC1_MASK;
+ qdev->port_link_up = STS_PL1;
+ qdev->port_init = STS_PI1;
+ qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
+ qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
+ } else {
+ qdev->xg_sem_mask = SEM_XGMAC0_MASK;
+ qdev->port_link_up = STS_PL0;
+ qdev->port_init = STS_PI0;
+ qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
+ qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
+ }
+ qdev->chip_rev_id = ql_read32(qdev, REV_ID);
+ qdev->device_id = qdev->pdev->device;
+ if (qdev->device_id == QLGE_DEVICE_ID_8012)
+ qdev->nic_ops = &qla8012_nic_ops;
+ else if (qdev->device_id == QLGE_DEVICE_ID_8000)
+ qdev->nic_ops = &qla8000_nic_ops;
+ return status;
+}
+
+static void ql_release_all(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ if (qdev->workqueue) {
+ destroy_workqueue(qdev->workqueue);
+ qdev->workqueue = NULL;
+ }
+
+ if (qdev->reg_base)
+ iounmap(qdev->reg_base);
+ if (qdev->doorbell_area)
+ iounmap(qdev->doorbell_area);
+ vfree(qdev->mpi_coredump);
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static int __devinit ql_init_device(struct pci_dev *pdev,
+ struct net_device *ndev, int cards_found)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int err = 0;
+
+ memset((void *)qdev, 0, sizeof(*qdev));
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "PCI device enable failed.\n");
+ return err;
+ }
+
+ qdev->ndev = ndev;
+ qdev->pdev = pdev;
+ pci_set_drvdata(pdev, ndev);
+
+ /* Set PCIe read request size */
+ err = pcie_set_readrq(pdev, 4096);
+ if (err) {
+ dev_err(&pdev->dev, "Set readrq failed.\n");
+ goto err_out1;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "PCI region request failed.\n");
+ return err;
+ }
+
+ pci_set_master(pdev);
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ set_bit(QL_DMA64, &qdev->flags);
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ }
+
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA configuration.\n");
+ goto err_out2;
+ }
+
+ /* Set PCIe reset type for EEH to fundamental. */
+ pdev->needs_freset = 1;
+ pci_save_state(pdev);
+ qdev->reg_base =
+ ioremap_nocache(pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1));
+ if (!qdev->reg_base) {
+ dev_err(&pdev->dev, "Register mapping failed.\n");
+ err = -ENOMEM;
+ goto err_out2;
+ }
+
+ qdev->doorbell_area_size = pci_resource_len(pdev, 3);
+ qdev->doorbell_area =
+ ioremap_nocache(pci_resource_start(pdev, 3),
+ pci_resource_len(pdev, 3));
+ if (!qdev->doorbell_area) {
+ dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
+ err = -ENOMEM;
+ goto err_out2;
+ }
+
+ err = ql_get_board_info(qdev);
+ if (err) {
+ dev_err(&pdev->dev, "Register access failed.\n");
+ err = -EIO;
+ goto err_out2;
+ }
+ qdev->msg_enable = netif_msg_init(debug, default_msg);
+ spin_lock_init(&qdev->hw_lock);
+ spin_lock_init(&qdev->stats_lock);
+
+ if (qlge_mpi_coredump) {
+ qdev->mpi_coredump =
+ vmalloc(sizeof(struct ql_mpi_coredump));
+ if (qdev->mpi_coredump == NULL) {
+ dev_err(&pdev->dev, "Coredump alloc failed.\n");
+ err = -ENOMEM;
+ goto err_out2;
+ }
+ if (qlge_force_coredump)
+ set_bit(QL_FRC_COREDUMP, &qdev->flags);
+ }
+ /* make sure the EEPROM is good */
+ err = qdev->nic_ops->get_flash(qdev);
+ if (err) {
+ dev_err(&pdev->dev, "Invalid FLASH.\n");
+ goto err_out2;
+ }
+
+ memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
+ /* Keep local copy of current mac address. */
+ memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
+
+ /* Set up the default ring sizes. */
+ qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
+ qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
+
+ /* Set up the coalescing parameters. */
+ qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
+ qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
+ qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
+ qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
+
+ /*
+ * Set up the operating parameters.
+ */
+ qdev->workqueue = create_singlethread_workqueue(ndev->name);
+ INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
+ INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
+ INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
+ INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
+ INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
+ INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
+ init_completion(&qdev->ide_completion);
+ mutex_init(&qdev->mpi_mutex);
+
+ if (!cards_found) {
+ dev_info(&pdev->dev, "%s\n", DRV_STRING);
+ dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
+ DRV_NAME, DRV_VERSION);
+ }
+ return 0;
+err_out2:
+ ql_release_all(pdev);
+err_out1:
+ pci_disable_device(pdev);
+ return err;
+}
+
+static const struct net_device_ops qlge_netdev_ops = {
+ .ndo_open = qlge_open,
+ .ndo_stop = qlge_close,
+ .ndo_start_xmit = qlge_send,
+ .ndo_change_mtu = qlge_change_mtu,
+ .ndo_get_stats = qlge_get_stats,
+ .ndo_set_rx_mode = qlge_set_multicast_list,
+ .ndo_set_mac_address = qlge_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = qlge_tx_timeout,
+ .ndo_fix_features = qlge_fix_features,
+ .ndo_set_features = qlge_set_features,
+ .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
+};
+
+static void ql_timer(unsigned long data)
+{
+ struct ql_adapter *qdev = (struct ql_adapter *)data;
+ u32 var = 0;
+
+ var = ql_read32(qdev, STS);
+ if (pci_channel_offline(qdev->pdev)) {
+ netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
+ return;
+ }
+
+ mod_timer(&qdev->timer, jiffies + (5*HZ));
+}
+
+static int __devinit qlge_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_entry)
+{
+ struct net_device *ndev = NULL;
+ struct ql_adapter *qdev = NULL;
+ static int cards_found = 0;
+ int err = 0;
+
+ ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
+ min(MAX_CPUS, (int)num_online_cpus()));
+ if (!ndev)
+ return -ENOMEM;
+
+ err = ql_init_device(pdev, ndev, cards_found);
+ if (err < 0) {
+ free_netdev(ndev);
+ return err;
+ }
+
+ qdev = netdev_priv(ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
+ NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
+ ndev->features = ndev->hw_features |
+ NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+
+ if (test_bit(QL_DMA64, &qdev->flags))
+ ndev->features |= NETIF_F_HIGHDMA;
+
+ /*
+ * Set up net_device structure.
+ */
+ ndev->tx_queue_len = qdev->tx_ring_size;
+ ndev->irq = pdev->irq;
+
+ ndev->netdev_ops = &qlge_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
+ ndev->watchdog_timeo = 10 * HZ;
+
+ err = register_netdev(ndev);
+ if (err) {
+ dev_err(&pdev->dev, "net device registration failed.\n");
+ ql_release_all(pdev);
+ pci_disable_device(pdev);
+ return err;
+ }
+ /* Start up the timer to trigger EEH if
+ * the bus goes dead
+ */
+ init_timer_deferrable(&qdev->timer);
+ qdev->timer.data = (unsigned long)qdev;
+ qdev->timer.function = ql_timer;
+ qdev->timer.expires = jiffies + (5*HZ);
+ add_timer(&qdev->timer);
+ ql_link_off(qdev);
+ ql_display_dev_info(ndev);
+ atomic_set(&qdev->lb_count, 0);
+ cards_found++;
+ return 0;
+}
+
+netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
+{
+ return qlge_send(skb, ndev);
+}
+
+int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
+{
+ return ql_clean_inbound_rx_ring(rx_ring, budget);
+}
+
+static void __devexit qlge_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ del_timer_sync(&qdev->timer);
+ ql_cancel_all_work_sync(qdev);
+ unregister_netdev(ndev);
+ ql_release_all(pdev);
+ pci_disable_device(pdev);
+ free_netdev(ndev);
+}
+
+/* Clean up resources without touching hardware. */
+static void ql_eeh_close(struct net_device *ndev)
+{
+ int i;
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ if (netif_carrier_ok(ndev)) {
+ netif_carrier_off(ndev);
+ netif_stop_queue(ndev);
+ }
+
+ /* Disabling the timer */
+ del_timer_sync(&qdev->timer);
+ ql_cancel_all_work_sync(qdev);
+
+ for (i = 0; i < qdev->rss_ring_count; i++)
+ netif_napi_del(&qdev->rx_ring[i].napi);
+
+ clear_bit(QL_ADAPTER_UP, &qdev->flags);
+ ql_tx_ring_clean(qdev);
+ ql_free_rx_buffers(qdev);
+ ql_release_adapter_resources(qdev);
+}
+
+/*
+ * This callback is called by the PCI subsystem whenever
+ * a PCI bus error is detected.
+ */
+static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
+ enum pci_channel_state state)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ netif_device_detach(ndev);
+ if (netif_running(ndev))
+ ql_eeh_close(ndev);
+ pci_disable_device(pdev);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ dev_err(&pdev->dev,
+ "%s: pci_channel_io_perm_failure.\n", __func__);
+ ql_eeh_close(ndev);
+ set_bit(QL_EEH_FATAL, &qdev->flags);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/*
+ * This callback is called after the PCI buss has been reset.
+ * Basically, this tries to restart the card from scratch.
+ * This is a shortened version of the device probe/discovery code,
+ * it resembles the first-half of the () routine.
+ */
+static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ pdev->error_state = pci_channel_io_normal;
+
+ pci_restore_state(pdev);
+ if (pci_enable_device(pdev)) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+
+ if (ql_adapter_reset(qdev)) {
+ netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
+ set_bit(QL_EEH_FATAL, &qdev->flags);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void qlge_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int err = 0;
+
+ if (netif_running(ndev)) {
+ err = qlge_open(ndev);
+ if (err) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Device initialization failed after reset.\n");
+ return;
+ }
+ } else {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Device was not running prior to EEH.\n");
+ }
+ mod_timer(&qdev->timer, jiffies + (5*HZ));
+ netif_device_attach(ndev);
+}
+
+static struct pci_error_handlers qlge_err_handler = {
+ .error_detected = qlge_io_error_detected,
+ .slot_reset = qlge_io_slot_reset,
+ .resume = qlge_io_resume,
+};
+
+static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int err;
+
+ netif_device_detach(ndev);
+ del_timer_sync(&qdev->timer);
+
+ if (netif_running(ndev)) {
+ err = ql_adapter_down(qdev);
+ if (!err)
+ return err;
+ }
+
+ ql_wol(qdev);
+ err = pci_save_state(pdev);
+ if (err)
+ return err;
+
+ pci_disable_device(pdev);
+
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int qlge_resume(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ err = pci_enable_device(pdev);
+ if (err) {
+ netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
+ return err;
+ }
+ pci_set_master(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ if (netif_running(ndev)) {
+ err = ql_adapter_up(qdev);
+ if (err)
+ return err;
+ }
+
+ mod_timer(&qdev->timer, jiffies + (5*HZ));
+ netif_device_attach(ndev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static void qlge_shutdown(struct pci_dev *pdev)
+{
+ qlge_suspend(pdev, PMSG_SUSPEND);
+}
+
+static struct pci_driver qlge_driver = {
+ .name = DRV_NAME,
+ .id_table = qlge_pci_tbl,
+ .probe = qlge_probe,
+ .remove = __devexit_p(qlge_remove),
+#ifdef CONFIG_PM
+ .suspend = qlge_suspend,
+ .resume = qlge_resume,
+#endif
+ .shutdown = qlge_shutdown,
+ .err_handler = &qlge_err_handler
+};
+
+static int __init qlge_init_module(void)
+{
+ return pci_register_driver(&qlge_driver);
+}
+
+static void __exit qlge_exit(void)
+{
+ pci_unregister_driver(&qlge_driver);
+}
+
+module_init(qlge_init_module);
+module_exit(qlge_exit);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
new file mode 100644
index 000000000000..ff2bf8a4e247
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
@@ -0,0 +1,1284 @@
+#include "qlge.h"
+
+int ql_unpause_mpi_risc(struct ql_adapter *qdev)
+{
+ u32 tmp;
+
+ /* Un-pause the RISC */
+ tmp = ql_read32(qdev, CSR);
+ if (!(tmp & CSR_RP))
+ return -EIO;
+
+ ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
+ return 0;
+}
+
+int ql_pause_mpi_risc(struct ql_adapter *qdev)
+{
+ u32 tmp;
+ int count = UDELAY_COUNT;
+
+ /* Pause the RISC */
+ ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
+ do {
+ tmp = ql_read32(qdev, CSR);
+ if (tmp & CSR_RP)
+ break;
+ mdelay(UDELAY_DELAY);
+ count--;
+ } while (count);
+ return (count == 0) ? -ETIMEDOUT : 0;
+}
+
+int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
+{
+ u32 tmp;
+ int count = UDELAY_COUNT;
+
+ /* Reset the RISC */
+ ql_write32(qdev, CSR, CSR_CMD_SET_RST);
+ do {
+ tmp = ql_read32(qdev, CSR);
+ if (tmp & CSR_RR) {
+ ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
+ break;
+ }
+ mdelay(UDELAY_DELAY);
+ count--;
+ } while (count);
+ return (count == 0) ? -ETIMEDOUT : 0;
+}
+
+int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
+{
+ int status;
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+ if (status)
+ goto exit;
+ /* set up for reg read */
+ ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+ if (status)
+ goto exit;
+ /* get the data */
+ *data = ql_read32(qdev, PROC_DATA);
+exit:
+ return status;
+}
+
+int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data)
+{
+ int status = 0;
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+ if (status)
+ goto exit;
+ /* write the data to the data reg */
+ ql_write32(qdev, PROC_DATA, data);
+ /* trigger the write */
+ ql_write32(qdev, PROC_ADDR, reg);
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
+ if (status)
+ goto exit;
+exit:
+ return status;
+}
+
+int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
+{
+ int status;
+ status = ql_write_mpi_reg(qdev, 0x00001010, 1);
+ return status;
+}
+
+/* Determine if we are in charge of the firwmare. If
+ * we are the lower of the 2 NIC pcie functions, or if
+ * we are the higher function and the lower function
+ * is not enabled.
+ */
+int ql_own_firmware(struct ql_adapter *qdev)
+{
+ u32 temp;
+
+ /* If we are the lower of the 2 NIC functions
+ * on the chip the we are responsible for
+ * core dump and firmware reset after an error.
+ */
+ if (qdev->func < qdev->alt_func)
+ return 1;
+
+ /* If we are the higher of the 2 NIC functions
+ * on the chip and the lower function is not
+ * enabled, then we are responsible for
+ * core dump and firmware reset after an error.
+ */
+ temp = ql_read32(qdev, STS);
+ if (!(temp & (1 << (8 + qdev->alt_func))))
+ return 1;
+
+ return 0;
+
+}
+
+static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int i, status;
+
+ status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+ if (status)
+ return -EBUSY;
+ for (i = 0; i < mbcp->out_count; i++) {
+ status =
+ ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
+ &mbcp->mbox_out[i]);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n");
+ break;
+ }
+ }
+ ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
+ return status;
+}
+
+/* Wait for a single mailbox command to complete.
+ * Returns zero on success.
+ */
+static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev)
+{
+ int count = 100;
+ u32 value;
+
+ do {
+ value = ql_read32(qdev, STS);
+ if (value & STS_PI)
+ return 0;
+ mdelay(UDELAY_DELAY); /* 100ms */
+ } while (--count);
+ return -ETIMEDOUT;
+}
+
+/* Execute a single mailbox command.
+ * Caller must hold PROC_ADDR semaphore.
+ */
+static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int i, status;
+
+ /*
+ * Make sure there's nothing pending.
+ * This shouldn't happen.
+ */
+ if (ql_read32(qdev, CSR) & CSR_HRI)
+ return -EIO;
+
+ status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+ if (status)
+ return status;
+
+ /*
+ * Fill the outbound mailboxes.
+ */
+ for (i = 0; i < mbcp->in_count; i++) {
+ status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i,
+ mbcp->mbox_in[i]);
+ if (status)
+ goto end;
+ }
+ /*
+ * Wake up the MPI firmware.
+ */
+ ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT);
+end:
+ ql_sem_unlock(qdev, SEM_PROC_REG_MASK);
+ return status;
+}
+
+/* We are being asked by firmware to accept
+ * a change to the port. This is only
+ * a change to max frame sizes (Tx/Rx), pause
+ * parameters, or loopback mode. We wake up a worker
+ * to handler processing this since a mailbox command
+ * will need to be sent to ACK the request.
+ */
+static int ql_idc_req_aen(struct ql_adapter *qdev)
+{
+ int status;
+ struct mbox_params *mbcp = &qdev->idc_mbc;
+
+ netif_err(qdev, drv, qdev->ndev, "Enter!\n");
+ /* Get the status data and start up a thread to
+ * handle the request.
+ */
+ mbcp = &qdev->idc_mbc;
+ mbcp->out_count = 4;
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Could not read MPI, resetting ASIC!\n");
+ ql_queue_asic_error(qdev);
+ } else {
+ /* Begin polled mode early so
+ * we don't get another interrupt
+ * when we leave mpi_worker.
+ */
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+ queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0);
+ }
+ return status;
+}
+
+/* Process an inter-device event completion.
+ * If good, signal the caller's completion.
+ */
+static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
+{
+ int status;
+ struct mbox_params *mbcp = &qdev->idc_mbc;
+ mbcp->out_count = 4;
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Could not read MPI, resetting RISC!\n");
+ ql_queue_fw_error(qdev);
+ } else
+ /* Wake up the sleeping mpi_idc_work thread that is
+ * waiting for this event.
+ */
+ complete(&qdev->ide_completion);
+
+ return status;
+}
+
+static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int status;
+ mbcp->out_count = 2;
+
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "%s: Could not get mailbox status.\n", __func__);
+ return;
+ }
+
+ qdev->link_status = mbcp->mbox_out[1];
+ netif_err(qdev, drv, qdev->ndev, "Link Up.\n");
+
+ /* If we're coming back from an IDC event
+ * then set up the CAM and frame routing.
+ */
+ if (test_bit(QL_CAM_RT_SET, &qdev->flags)) {
+ status = ql_cam_route_initialize(qdev);
+ if (status) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init CAM/Routing tables.\n");
+ return;
+ } else
+ clear_bit(QL_CAM_RT_SET, &qdev->flags);
+ }
+
+ /* Queue up a worker to check the frame
+ * size information, and fix it if it's not
+ * to our liking.
+ */
+ if (!test_bit(QL_PORT_CFG, &qdev->flags)) {
+ netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n");
+ set_bit(QL_PORT_CFG, &qdev->flags);
+ /* Begin polled mode early so
+ * we don't get another interrupt
+ * when we leave mpi_worker dpc.
+ */
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+ queue_delayed_work(qdev->workqueue,
+ &qdev->mpi_port_cfg_work, 0);
+ }
+
+ ql_link_on(qdev);
+}
+
+static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int status;
+
+ mbcp->out_count = 3;
+
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status)
+ netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n");
+
+ ql_link_off(qdev);
+}
+
+static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int status;
+
+ mbcp->out_count = 5;
+
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status)
+ netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n");
+ else
+ netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n");
+
+ return status;
+}
+
+static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int status;
+
+ mbcp->out_count = 1;
+
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status)
+ netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n");
+ else
+ netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n");
+
+ return status;
+}
+
+static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int status;
+
+ mbcp->out_count = 6;
+
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status)
+ netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n");
+ else {
+ int i;
+ netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n");
+ for (i = 0; i < mbcp->out_count; i++)
+ netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n",
+ i, mbcp->mbox_out[i]);
+
+ }
+
+ return status;
+}
+
+static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int status;
+
+ mbcp->out_count = 2;
+
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n");
+ } else {
+ netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n",
+ mbcp->mbox_out[1]);
+ qdev->fw_rev_id = mbcp->mbox_out[1];
+ status = ql_cam_route_initialize(qdev);
+ if (status)
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init CAM/Routing tables.\n");
+ }
+}
+
+/* Process an async event and clear it unless it's an
+ * error condition.
+ * This can get called iteratively from the mpi_work thread
+ * when events arrive via an interrupt.
+ * It also gets called when a mailbox command is polling for
+ * it's completion. */
+static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int status;
+ int orig_count = mbcp->out_count;
+
+ /* Just get mailbox zero for now. */
+ mbcp->out_count = 1;
+ status = ql_get_mb_sts(qdev, mbcp);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Could not read MPI, resetting ASIC!\n");
+ ql_queue_asic_error(qdev);
+ goto end;
+ }
+
+ switch (mbcp->mbox_out[0]) {
+
+ /* This case is only active when we arrive here
+ * as a result of issuing a mailbox command to
+ * the firmware.
+ */
+ case MB_CMD_STS_INTRMDT:
+ case MB_CMD_STS_GOOD:
+ case MB_CMD_STS_INVLD_CMD:
+ case MB_CMD_STS_XFC_ERR:
+ case MB_CMD_STS_CSUM_ERR:
+ case MB_CMD_STS_ERR:
+ case MB_CMD_STS_PARAM_ERR:
+ /* We can only get mailbox status if we're polling from an
+ * unfinished command. Get the rest of the status data and
+ * return back to the caller.
+ * We only end up here when we're polling for a mailbox
+ * command completion.
+ */
+ mbcp->out_count = orig_count;
+ status = ql_get_mb_sts(qdev, mbcp);
+ return status;
+
+ /* We are being asked by firmware to accept
+ * a change to the port. This is only
+ * a change to max frame sizes (Tx/Rx), pause
+ * parameters, or loopback mode.
+ */
+ case AEN_IDC_REQ:
+ status = ql_idc_req_aen(qdev);
+ break;
+
+ /* Process and inbound IDC event.
+ * This will happen when we're trying to
+ * change tx/rx max frame size, change pause
+ * parameters or loopback mode.
+ */
+ case AEN_IDC_CMPLT:
+ case AEN_IDC_EXT:
+ status = ql_idc_cmplt_aen(qdev);
+ break;
+
+ case AEN_LINK_UP:
+ ql_link_up(qdev, mbcp);
+ break;
+
+ case AEN_LINK_DOWN:
+ ql_link_down(qdev, mbcp);
+ break;
+
+ case AEN_FW_INIT_DONE:
+ /* If we're in process on executing the firmware,
+ * then convert the status to normal mailbox status.
+ */
+ if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
+ mbcp->out_count = orig_count;
+ status = ql_get_mb_sts(qdev, mbcp);
+ mbcp->mbox_out[0] = MB_CMD_STS_GOOD;
+ return status;
+ }
+ ql_init_fw_done(qdev, mbcp);
+ break;
+
+ case AEN_AEN_SFP_IN:
+ ql_sfp_in(qdev, mbcp);
+ break;
+
+ case AEN_AEN_SFP_OUT:
+ ql_sfp_out(qdev, mbcp);
+ break;
+
+ /* This event can arrive at boot time or after an
+ * MPI reset if the firmware failed to initialize.
+ */
+ case AEN_FW_INIT_FAIL:
+ /* If we're in process on executing the firmware,
+ * then convert the status to normal mailbox status.
+ */
+ if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
+ mbcp->out_count = orig_count;
+ status = ql_get_mb_sts(qdev, mbcp);
+ mbcp->mbox_out[0] = MB_CMD_STS_ERR;
+ return status;
+ }
+ netif_err(qdev, drv, qdev->ndev,
+ "Firmware initialization failed.\n");
+ status = -EIO;
+ ql_queue_fw_error(qdev);
+ break;
+
+ case AEN_SYS_ERR:
+ netif_err(qdev, drv, qdev->ndev, "System Error.\n");
+ ql_queue_fw_error(qdev);
+ status = -EIO;
+ break;
+
+ case AEN_AEN_LOST:
+ ql_aen_lost(qdev, mbcp);
+ break;
+
+ case AEN_DCBX_CHG:
+ /* Need to support AEN 8110 */
+ break;
+ default:
+ netif_err(qdev, drv, qdev->ndev,
+ "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
+ /* Clear the MPI firmware status. */
+ }
+end:
+ ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+ /* Restore the original mailbox count to
+ * what the caller asked for. This can get
+ * changed when a mailbox command is waiting
+ * for a response and an AEN arrives and
+ * is handled.
+ * */
+ mbcp->out_count = orig_count;
+ return status;
+}
+
+/* Execute a single mailbox command.
+ * mbcp is a pointer to an array of u32. Each
+ * element in the array contains the value for it's
+ * respective mailbox register.
+ */
+static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
+{
+ int status;
+ unsigned long count;
+
+ mutex_lock(&qdev->mpi_mutex);
+
+ /* Begin polled mode for MPI */
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+
+ /* Load the mailbox registers and wake up MPI RISC. */
+ status = ql_exec_mb_cmd(qdev, mbcp);
+ if (status)
+ goto end;
+
+
+ /* If we're generating a system error, then there's nothing
+ * to wait for.
+ */
+ if (mbcp->mbox_in[0] == MB_CMD_MAKE_SYS_ERR)
+ goto end;
+
+ /* Wait for the command to complete. We loop
+ * here because some AEN might arrive while
+ * we're waiting for the mailbox command to
+ * complete. If more than 5 seconds expire we can
+ * assume something is wrong. */
+ count = jiffies + HZ * MAILBOX_TIMEOUT;
+ do {
+ /* Wait for the interrupt to come in. */
+ status = ql_wait_mbx_cmd_cmplt(qdev);
+ if (status)
+ continue;
+
+ /* Process the event. If it's an AEN, it
+ * will be handled in-line or a worker
+ * will be spawned. If it's our completion
+ * we will catch it below.
+ */
+ status = ql_mpi_handler(qdev, mbcp);
+ if (status)
+ goto end;
+
+ /* It's either the completion for our mailbox
+ * command complete or an AEN. If it's our
+ * completion then get out.
+ */
+ if (((mbcp->mbox_out[0] & 0x0000f000) ==
+ MB_CMD_STS_GOOD) ||
+ ((mbcp->mbox_out[0] & 0x0000f000) ==
+ MB_CMD_STS_INTRMDT))
+ goto done;
+ } while (time_before(jiffies, count));
+
+ netif_err(qdev, drv, qdev->ndev,
+ "Timed out waiting for mailbox complete.\n");
+ status = -ETIMEDOUT;
+ goto end;
+
+done:
+
+ /* Now we can clear the interrupt condition
+ * and look at our status.
+ */
+ ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
+
+ if (((mbcp->mbox_out[0] & 0x0000f000) !=
+ MB_CMD_STS_GOOD) &&
+ ((mbcp->mbox_out[0] & 0x0000f000) !=
+ MB_CMD_STS_INTRMDT)) {
+ status = -EIO;
+ }
+end:
+ /* End polled mode for MPI */
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+ mutex_unlock(&qdev->mpi_mutex);
+ return status;
+}
+
+/* Get MPI firmware version. This will be used for
+ * driver banner and for ethtool info.
+ * Returns zero on success.
+ */
+int ql_mb_about_fw(struct ql_adapter *qdev)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status = 0;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 3;
+
+ mbcp->mbox_in[0] = MB_CMD_ABOUT_FW;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed about firmware command\n");
+ status = -EIO;
+ }
+
+ /* Store the firmware version */
+ qdev->fw_rev_id = mbcp->mbox_out[1];
+
+ return status;
+}
+
+/* Get functional state for MPI firmware.
+ * Returns zero on success.
+ */
+int ql_mb_get_fw_state(struct ql_adapter *qdev)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status = 0;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 2;
+
+ mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Get Firmware State.\n");
+ status = -EIO;
+ }
+
+ /* If bit zero is set in mbx 1 then the firmware is
+ * running, but not initialized. This should never
+ * happen.
+ */
+ if (mbcp->mbox_out[1] & 1) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Firmware waiting for initialization.\n");
+ status = -EIO;
+ }
+
+ return status;
+}
+
+/* Send and ACK mailbox command to the firmware to
+ * let it continue with the change.
+ */
+static int ql_mb_idc_ack(struct ql_adapter *qdev)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status = 0;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 5;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_IDC_ACK;
+ mbcp->mbox_in[1] = qdev->idc_mbc.mbox_out[1];
+ mbcp->mbox_in[2] = qdev->idc_mbc.mbox_out[2];
+ mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3];
+ mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4];
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+/* Get link settings and maximum frame size settings
+ * for the current port.
+ * Most likely will block.
+ */
+int ql_mb_set_port_cfg(struct ql_adapter *qdev)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status = 0;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 3;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_SET_PORT_CFG;
+ mbcp->mbox_in[1] = qdev->link_config;
+ mbcp->mbox_in[2] = qdev->max_frame_size;
+
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Port Config sent, wait for IDC.\n");
+ } else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Set Port Configuration.\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
+ u32 size)
+{
+ int status = 0;
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 9;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM;
+ mbcp->mbox_in[1] = LSW(addr);
+ mbcp->mbox_in[2] = MSW(req_dma);
+ mbcp->mbox_in[3] = LSW(req_dma);
+ mbcp->mbox_in[4] = MSW(size);
+ mbcp->mbox_in[5] = LSW(size);
+ mbcp->mbox_in[6] = MSW(MSD(req_dma));
+ mbcp->mbox_in[7] = LSW(MSD(req_dma));
+ mbcp->mbox_in[8] = MSW(addr);
+
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+/* Issue a mailbox command to dump RISC RAM. */
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
+ u32 ram_addr, int word_count)
+{
+ int status;
+ char *my_buf;
+ dma_addr_t buf_dma;
+
+ my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
+ &buf_dma);
+ if (!my_buf)
+ return -EIO;
+
+ status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
+ if (!status)
+ memcpy(buf, my_buf, word_count * sizeof(u32));
+
+ pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
+ buf_dma);
+ return status;
+}
+
+/* Get link settings and maximum frame size settings
+ * for the current port.
+ * Most likely will block.
+ */
+int ql_mb_get_port_cfg(struct ql_adapter *qdev)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status = 0;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 3;
+
+ mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Get Port Configuration.\n");
+ status = -EIO;
+ } else {
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "Passed Get Port Configuration.\n");
+ qdev->link_config = mbcp->mbox_out[1];
+ qdev->max_frame_size = mbcp->mbox_out[2];
+ }
+ return status;
+}
+
+int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 2;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
+ mbcp->mbox_in[1] = wol;
+
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+ u8 *addr = qdev->ndev->dev_addr;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 8;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC;
+ if (enable_wol) {
+ mbcp->mbox_in[1] = (u32)addr[0];
+ mbcp->mbox_in[2] = (u32)addr[1];
+ mbcp->mbox_in[3] = (u32)addr[2];
+ mbcp->mbox_in[4] = (u32)addr[3];
+ mbcp->mbox_in[5] = (u32)addr[4];
+ mbcp->mbox_in[6] = (u32)addr[5];
+ mbcp->mbox_in[7] = 0;
+ } else {
+ mbcp->mbox_in[1] = 0;
+ mbcp->mbox_in[2] = 1;
+ mbcp->mbox_in[3] = 1;
+ mbcp->mbox_in[4] = 1;
+ mbcp->mbox_in[5] = 1;
+ mbcp->mbox_in[6] = 1;
+ mbcp->mbox_in[7] = 0;
+ }
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+/* IDC - Inter Device Communication...
+ * Some firmware commands require consent of adjacent FCOE
+ * function. This function waits for the OK, or a
+ * counter-request for a little more time.i
+ * The firmware will complete the request if the other
+ * function doesn't respond.
+ */
+static int ql_idc_wait(struct ql_adapter *qdev)
+{
+ int status = -ETIMEDOUT;
+ long wait_time = 1 * HZ;
+ struct mbox_params *mbcp = &qdev->idc_mbc;
+ do {
+ /* Wait here for the command to complete
+ * via the IDC process.
+ */
+ wait_time =
+ wait_for_completion_timeout(&qdev->ide_completion,
+ wait_time);
+ if (!wait_time) {
+ netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n");
+ break;
+ }
+ /* Now examine the response from the IDC process.
+ * We might have a good completion or a request for
+ * more wait time.
+ */
+ if (mbcp->mbox_out[0] == AEN_IDC_EXT) {
+ netif_err(qdev, drv, qdev->ndev,
+ "IDC Time Extension from function.\n");
+ wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f;
+ } else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) {
+ netif_err(qdev, drv, qdev->ndev, "IDC Success.\n");
+ status = 0;
+ break;
+ } else {
+ netif_err(qdev, drv, qdev->ndev,
+ "IDC: Invalid State 0x%.04x.\n",
+ mbcp->mbox_out[0]);
+ status = -EIO;
+ break;
+ }
+ } while (wait_time);
+
+ return status;
+}
+
+int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 2;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
+ mbcp->mbox_in[1] = led_config;
+
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed to set LED Configuration.\n");
+ status = -EIO;
+ }
+
+ return status;
+}
+
+int ql_mb_get_led_cfg(struct ql_adapter *qdev)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 2;
+
+ mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed to get LED Configuration.\n");
+ status = -EIO;
+ } else
+ qdev->led_config = mbcp->mbox_out[1];
+
+ return status;
+}
+
+int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 2;
+
+ mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL;
+ mbcp->mbox_in[1] = control;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD)
+ return status;
+
+ if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Command not supported by firmware.\n");
+ status = -EINVAL;
+ } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
+ /* This indicates that the firmware is
+ * already in the state we are trying to
+ * change it to.
+ */
+ netif_err(qdev, drv, qdev->ndev,
+ "Command parameters make no change.\n");
+ }
+ return status;
+}
+
+/* Returns a negative error code or the mailbox command status. */
+static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+ *control = 0;
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) {
+ *control = mbcp->mbox_in[1];
+ return status;
+ }
+
+ if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Command not supported by firmware.\n");
+ status = -EINVAL;
+ } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed to get MPI traffic control.\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+int ql_wait_fifo_empty(struct ql_adapter *qdev)
+{
+ int count = 5;
+ u32 mgmnt_fifo_empty;
+ u32 nic_fifo_empty;
+
+ do {
+ nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE;
+ ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty);
+ mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY;
+ if (nic_fifo_empty && mgmnt_fifo_empty)
+ return 0;
+ msleep(100);
+ } while (count-- > 0);
+ return -ETIMEDOUT;
+}
+
+/* API called in work thread context to set new TX/RX
+ * maximum frame size values to match MTU.
+ */
+static int ql_set_port_cfg(struct ql_adapter *qdev)
+{
+ int status;
+ status = ql_mb_set_port_cfg(qdev);
+ if (status)
+ return status;
+ status = ql_idc_wait(qdev);
+ return status;
+}
+
+/* The following routines are worker threads that process
+ * events that may sleep waiting for completion.
+ */
+
+/* This thread gets the maximum TX and RX frame size values
+ * from the firmware and, if necessary, changes them to match
+ * the MTU setting.
+ */
+void ql_mpi_port_cfg_work(struct work_struct *work)
+{
+ struct ql_adapter *qdev =
+ container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
+ int status;
+
+ status = ql_mb_get_port_cfg(qdev);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: Failed to get port config data.\n");
+ goto err;
+ }
+
+ if (qdev->link_config & CFG_JUMBO_FRAME_SIZE &&
+ qdev->max_frame_size ==
+ CFG_DEFAULT_MAX_FRAME_SIZE)
+ goto end;
+
+ qdev->link_config |= CFG_JUMBO_FRAME_SIZE;
+ qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE;
+ status = ql_set_port_cfg(qdev);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: Failed to set port config data.\n");
+ goto err;
+ }
+end:
+ clear_bit(QL_PORT_CFG, &qdev->flags);
+ return;
+err:
+ ql_queue_fw_error(qdev);
+ goto end;
+}
+
+/* Process an inter-device request. This is issues by
+ * the firmware in response to another function requesting
+ * a change to the port. We set a flag to indicate a change
+ * has been made and then send a mailbox command ACKing
+ * the change request.
+ */
+void ql_mpi_idc_work(struct work_struct *work)
+{
+ struct ql_adapter *qdev =
+ container_of(work, struct ql_adapter, mpi_idc_work.work);
+ int status;
+ struct mbox_params *mbcp = &qdev->idc_mbc;
+ u32 aen;
+ int timeout;
+
+ aen = mbcp->mbox_out[1] >> 16;
+ timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
+
+ switch (aen) {
+ default:
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: Unhandled IDC action.\n");
+ break;
+ case MB_CMD_PORT_RESET:
+ case MB_CMD_STOP_FW:
+ ql_link_off(qdev);
+ case MB_CMD_SET_PORT_CFG:
+ /* Signal the resulting link up AEN
+ * that the frame routing and mac addr
+ * needs to be set.
+ * */
+ set_bit(QL_CAM_RT_SET, &qdev->flags);
+ /* Do ACK if required */
+ if (timeout) {
+ status = ql_mb_idc_ack(qdev);
+ if (status)
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: No pending IDC!\n");
+ } else {
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "IDC ACK not required\n");
+ status = 0; /* success */
+ }
+ break;
+
+ /* These sub-commands issued by another (FCoE)
+ * function are requesting to do an operation
+ * on the shared resource (MPI environment).
+ * We currently don't issue these so we just
+ * ACK the request.
+ */
+ case MB_CMD_IOP_RESTART_MPI:
+ case MB_CMD_IOP_PREP_LINK_DOWN:
+ /* Drop the link, reload the routing
+ * table when link comes up.
+ */
+ ql_link_off(qdev);
+ set_bit(QL_CAM_RT_SET, &qdev->flags);
+ /* Fall through. */
+ case MB_CMD_IOP_DVR_START:
+ case MB_CMD_IOP_FLASH_ACC:
+ case MB_CMD_IOP_CORE_DUMP_MPI:
+ case MB_CMD_IOP_PREP_UPDATE_MPI:
+ case MB_CMD_IOP_COMP_UPDATE_MPI:
+ case MB_CMD_IOP_NONE: /* an IDC without params */
+ /* Do ACK if required */
+ if (timeout) {
+ status = ql_mb_idc_ack(qdev);
+ if (status)
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: No pending IDC!\n");
+ } else {
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "IDC ACK not required\n");
+ status = 0; /* success */
+ }
+ break;
+ }
+}
+
+void ql_mpi_work(struct work_struct *work)
+{
+ struct ql_adapter *qdev =
+ container_of(work, struct ql_adapter, mpi_work.work);
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int err = 0;
+
+ mutex_lock(&qdev->mpi_mutex);
+ /* Begin polled mode for MPI */
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+
+ while (ql_read32(qdev, STS) & STS_PI) {
+ memset(mbcp, 0, sizeof(struct mbox_params));
+ mbcp->out_count = 1;
+ /* Don't continue if an async event
+ * did not complete properly.
+ */
+ err = ql_mpi_handler(qdev, mbcp);
+ if (err)
+ break;
+ }
+
+ /* End polled mode for MPI */
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+ mutex_unlock(&qdev->mpi_mutex);
+ ql_enable_completion_interrupt(qdev, 0);
+}
+
+void ql_mpi_reset_work(struct work_struct *work)
+{
+ struct ql_adapter *qdev =
+ container_of(work, struct ql_adapter, mpi_reset_work.work);
+ cancel_delayed_work_sync(&qdev->mpi_work);
+ cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
+ cancel_delayed_work_sync(&qdev->mpi_idc_work);
+ /* If we're not the dominant NIC function,
+ * then there is nothing to do.
+ */
+ if (!ql_own_firmware(qdev)) {
+ netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
+ return;
+ }
+
+ if (!ql_core_dump(qdev, qdev->mpi_coredump)) {
+ netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
+ qdev->core_is_dumped = 1;
+ queue_delayed_work(qdev->workqueue,
+ &qdev->mpi_core_to_log, 5 * HZ);
+ }
+ ql_soft_reset_mpi_risc(qdev);
+}
diff --git a/drivers/net/ethernet/racal/Kconfig b/drivers/net/ethernet/racal/Kconfig
new file mode 100644
index 000000000000..01969e0a9c68
--- /dev/null
+++ b/drivers/net/ethernet/racal/Kconfig
@@ -0,0 +1,33 @@
+#
+# Racal-Interlan device configuration
+#
+
+config NET_VENDOR_RACAL
+ bool "Racal-Interlan (Micom) NI devices"
+ default y
+ depends on ISA
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, such
+ as the NI5010, NI5210 or NI6210, say Y and read the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about NI cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_RACAL
+
+config NI5010
+ tristate "NI5010 support (EXPERIMENTAL)"
+ depends on ISA && EXPERIMENTAL && BROKEN_ON_SMP
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Note that this is still
+ experimental code.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ni5010.
+
+endif # NET_VENDOR_RACAL
diff --git a/drivers/net/ethernet/racal/Makefile b/drivers/net/ethernet/racal/Makefile
new file mode 100644
index 000000000000..1e210ca1d78b
--- /dev/null
+++ b/drivers/net/ethernet/racal/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Racal-Interlan network device drivers.
+#
+
+obj-$(CONFIG_NI5010) += ni5010.o
diff --git a/drivers/net/ethernet/racal/ni5010.c b/drivers/net/ethernet/racal/ni5010.c
new file mode 100644
index 000000000000..072810da9a37
--- /dev/null
+++ b/drivers/net/ethernet/racal/ni5010.c
@@ -0,0 +1,771 @@
+/* ni5010.c: A network driver for the MiCom-Interlan NI5010 ethercard.
+ *
+ * Copyright 1996,1997,2006 Jan-Pascal van Best and Andreas Mohr.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * The authors may be reached as:
+ * janpascal@vanbest.org andi@lisas.de
+ *
+ * Sources:
+ * Donald Becker's "skeleton.c"
+ * Crynwr ni5010 packet driver
+ *
+ * Changes:
+ * v0.0: First test version
+ * v0.1: First working version
+ * v0.2:
+ * v0.3->v0.90: Now demand setting io and irq when loading as module
+ * 970430 v0.91: modified for Linux 2.1.14
+ * v0.92: Implemented Andreas' (better) NI5010 probe
+ * 970503 v0.93: Fixed auto-irq failure on warm reboot (JB)
+ * 970623 v1.00: First kernel version (AM)
+ * 970814 v1.01: Added detection of onboard receive buffer size (AM)
+ * 060611 v1.02: slight cleanup: email addresses, driver modernization.
+ * Bugs:
+ * - not SMP-safe (no locking of I/O accesses)
+ * - Note that you have to patch ifconfig for the new /proc/net/dev
+ * format. It gives incorrect stats otherwise.
+ *
+ * To do:
+ * Fix all bugs :-)
+ * Move some stuff to chipset_init()
+ * Handle xmt errors other than collisions
+ * Complete merge with Andreas' driver
+ * Implement ring buffers (Is this useful? You can't squeeze
+ * too many packet in a 2k buffer!)
+ * Implement DMA (Again, is this useful? Some docs say DMA is
+ * slower than programmed I/O)
+ *
+ * Compile with:
+ * gcc -O2 -fomit-frame-pointer -m486 -D__KERNEL__ \
+ * -DMODULE -c ni5010.c
+ *
+ * Insert with e.g.:
+ * insmod ni5010.ko io=0x300 irq=5
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "ni5010.h"
+
+static const char boardname[] = "NI5010";
+static char version[] __initdata =
+ "ni5010.c: v1.02 20060611 Jan-Pascal van Best and Andreas Mohr\n";
+
+/* bufsize_rcv == 0 means autoprobing */
+static unsigned int bufsize_rcv;
+
+#define JUMPERED_INTERRUPTS /* IRQ line jumpered on board */
+#undef JUMPERED_DMA /* No DMA used */
+#undef FULL_IODETECT /* Only detect in portlist */
+
+#ifndef FULL_IODETECT
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int ports[] __initdata =
+ { 0x300, 0x320, 0x340, 0x360, 0x380, 0x3a0, 0 };
+#endif
+
+/* Use 0 for production, 1 for verification, >2 for debug */
+#ifndef NI5010_DEBUG
+#define NI5010_DEBUG 0
+#endif
+
+/* Information that needs to be kept for each board. */
+struct ni5010_local {
+ int o_pkt_size;
+ spinlock_t lock;
+};
+
+/* Index to functions, as function prototypes. */
+
+static int ni5010_probe1(struct net_device *dev, int ioaddr);
+static int ni5010_open(struct net_device *dev);
+static int ni5010_send_packet(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t ni5010_interrupt(int irq, void *dev_id);
+static void ni5010_rx(struct net_device *dev);
+static void ni5010_timeout(struct net_device *dev);
+static int ni5010_close(struct net_device *dev);
+static void ni5010_set_multicast_list(struct net_device *dev);
+static void reset_receiver(struct net_device *dev);
+
+static int process_xmt_interrupt(struct net_device *dev);
+#define tx_done(dev) 1
+static void hardware_send_packet(struct net_device *dev, char *buf, int length, int pad);
+static void chipset_init(struct net_device *dev, int startp);
+static void dump_packet(void *buf, int len);
+static void ni5010_show_registers(struct net_device *dev);
+
+static int io;
+static int irq;
+
+struct net_device * __init ni5010_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct ni5010_local));
+ int *port;
+ int err = 0;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ io = dev->base_addr;
+ irq = dev->irq;
+ }
+
+ PRINTK2((KERN_DEBUG "%s: Entering ni5010_probe\n", dev->name));
+
+ if (io > 0x1ff) { /* Check a single specified location. */
+ err = ni5010_probe1(dev, io);
+ } else if (io != 0) { /* Don't probe at all. */
+ err = -ENXIO;
+ } else {
+#ifdef FULL_IODETECT
+ for (io=0x200; io<0x400 && ni5010_probe1(dev, io) ; io+=0x20)
+ ;
+ if (io == 0x400)
+ err = -ENODEV;
+
+#else
+ for (port = ports; *port && ni5010_probe1(dev, *port); port++)
+ ;
+ if (!*port)
+ err = -ENODEV;
+#endif /* FULL_IODETECT */
+ }
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ release_region(dev->base_addr, NI5010_IO_EXTENT);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static inline int rd_port(int ioaddr)
+{
+ inb(IE_RBUF);
+ return inb(IE_SAPROM);
+}
+
+static void __init trigger_irq(int ioaddr)
+{
+ outb(0x00, EDLC_RESET); /* Clear EDLC hold RESET state */
+ outb(0x00, IE_RESET); /* Board reset */
+ outb(0x00, EDLC_XMASK); /* Disable all Xmt interrupts */
+ outb(0x00, EDLC_RMASK); /* Disable all Rcv interrupt */
+ outb(0xff, EDLC_XCLR); /* Clear all pending Xmt interrupts */
+ outb(0xff, EDLC_RCLR); /* Clear all pending Rcv interrupts */
+ /*
+ * Transmit packet mode: Ignore parity, Power xcvr,
+ * Enable loopback
+ */
+ outb(XMD_IG_PAR | XMD_T_MODE | XMD_LBC, EDLC_XMODE);
+ outb(RMD_BROADCAST, EDLC_RMODE); /* Receive normal&broadcast */
+ outb(XM_ALL, EDLC_XMASK); /* Enable all Xmt interrupts */
+ udelay(50); /* FIXME: Necessary? */
+ outb(MM_EN_XMT|MM_MUX, IE_MMODE); /* Start transmission */
+}
+
+static const struct net_device_ops ni5010_netdev_ops = {
+ .ndo_open = ni5010_open,
+ .ndo_stop = ni5010_close,
+ .ndo_start_xmit = ni5010_send_packet,
+ .ndo_set_rx_mode = ni5010_set_multicast_list,
+ .ndo_tx_timeout = ni5010_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+/*
+ * This is the real probe routine. Linux has a history of friendly device
+ * probes on the ISA bus. A good device probes avoids doing writes, and
+ * verifies that the correct device exists and functions.
+ */
+
+static int __init ni5010_probe1(struct net_device *dev, int ioaddr)
+{
+ static unsigned version_printed;
+ struct ni5010_local *lp;
+ int i;
+ unsigned int data = 0;
+ int boguscount = 40;
+ int err = -ENODEV;
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ if (!request_region(ioaddr, NI5010_IO_EXTENT, boardname))
+ return -EBUSY;
+
+ /*
+ * This is no "official" probe method, I've rather tested which
+ * probe works best with my seven NI5010 cards
+ * (they have very different serial numbers)
+ * Suggestions or failure reports are very, very welcome !
+ * But I think it is a relatively good probe method
+ * since it doesn't use any "outb"
+ * It should be nearly 100% reliable !
+ * well-known WARNING: this probe method (like many others)
+ * will hang the system if a NE2000 card region is probed !
+ *
+ * - Andreas
+ */
+
+ PRINTK2((KERN_DEBUG "%s: entering ni5010_probe1(%#3x)\n",
+ dev->name, ioaddr));
+
+ if (inb(ioaddr+0) == 0xff)
+ goto out;
+
+ while ( (rd_port(ioaddr) & rd_port(ioaddr) & rd_port(ioaddr) &
+ rd_port(ioaddr) & rd_port(ioaddr) & rd_port(ioaddr)) != 0xff)
+ {
+ if (boguscount-- == 0)
+ goto out;
+ }
+
+ PRINTK2((KERN_DEBUG "%s: I/O #1 passed!\n", dev->name));
+
+ for (i=0; i<32; i++)
+ if ( (data = rd_port(ioaddr)) != 0xff) break;
+ if (data==0xff)
+ goto out;
+
+ PRINTK2((KERN_DEBUG "%s: I/O #2 passed!\n", dev->name));
+
+ if ((data != SA_ADDR0) || (rd_port(ioaddr) != SA_ADDR1) ||
+ (rd_port(ioaddr) != SA_ADDR2))
+ goto out;
+
+ for (i=0; i<4; i++)
+ rd_port(ioaddr);
+
+ if ( (rd_port(ioaddr) != NI5010_MAGICVAL1) ||
+ (rd_port(ioaddr) != NI5010_MAGICVAL2) )
+ goto out;
+
+ PRINTK2((KERN_DEBUG "%s: I/O #3 passed!\n", dev->name));
+
+ if (NI5010_DEBUG && version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+
+ printk("NI5010 ethercard probe at 0x%x: ", ioaddr);
+
+ dev->base_addr = ioaddr;
+
+ for (i=0; i<6; i++) {
+ outw(i, IE_GP);
+ dev->dev_addr[i] = inb(IE_SAPROM);
+ }
+ printk("%pM ", dev->dev_addr);
+
+ PRINTK2((KERN_DEBUG "%s: I/O #4 passed!\n", dev->name));
+
+#ifdef JUMPERED_INTERRUPTS
+ if (dev->irq == 0xff)
+ ;
+ else if (dev->irq < 2) {
+ unsigned long irq_mask;
+
+ PRINTK2((KERN_DEBUG "%s: I/O #5 passed!\n", dev->name));
+
+ irq_mask = probe_irq_on();
+ trigger_irq(ioaddr);
+ mdelay(20);
+ dev->irq = probe_irq_off(irq_mask);
+
+ PRINTK2((KERN_DEBUG "%s: I/O #6 passed!\n", dev->name));
+
+ if (dev->irq == 0) {
+ err = -EAGAIN;
+ printk(KERN_WARNING "%s: no IRQ found!\n", dev->name);
+ goto out;
+ }
+ PRINTK2((KERN_DEBUG "%s: I/O #7 passed!\n", dev->name));
+ } else if (dev->irq == 2) {
+ dev->irq = 9;
+ }
+#endif /* JUMPERED_INTERRUPTS */
+ PRINTK2((KERN_DEBUG "%s: I/O #9 passed!\n", dev->name));
+
+ /* DMA is not supported (yet?), so no use detecting it */
+ lp = netdev_priv(dev);
+
+ spin_lock_init(&lp->lock);
+
+ PRINTK2((KERN_DEBUG "%s: I/O #10 passed!\n", dev->name));
+
+/* get the size of the onboard receive buffer
+ * higher addresses than bufsize are wrapped into real buffer
+ * i.e. data for offs. 0x801 is written to 0x1 with a 2K onboard buffer
+ */
+ if (!bufsize_rcv) {
+ outb(1, IE_MMODE); /* Put Rcv buffer on system bus */
+ outw(0, IE_GP); /* Point GP at start of packet */
+ outb(0, IE_RBUF); /* set buffer byte 0 to 0 */
+ for (i = 1; i < 0xff; i++) {
+ outw(i << 8, IE_GP); /* Point GP at packet size to be tested */
+ outb(i, IE_RBUF);
+ outw(0x0, IE_GP); /* Point GP at start of packet */
+ data = inb(IE_RBUF);
+ if (data == i) break;
+ }
+ bufsize_rcv = i << 8;
+ outw(0, IE_GP); /* Point GP at start of packet */
+ outb(0, IE_RBUF); /* set buffer byte 0 to 0 again */
+ }
+ printk("-> bufsize rcv/xmt=%d/%d\n", bufsize_rcv, NI5010_BUFSIZE);
+
+ dev->netdev_ops = &ni5010_netdev_ops;
+ dev->watchdog_timeo = HZ/20;
+
+ dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */
+
+ /* Shut up the ni5010 */
+ outb(0, EDLC_RMASK); /* Mask all receive interrupts */
+ outb(0, EDLC_XMASK); /* Mask all xmit interrupts */
+ outb(0xff, EDLC_RCLR); /* Kill all pending rcv interrupts */
+ outb(0xff, EDLC_XCLR); /* Kill all pending xmt interrupts */
+
+ printk(KERN_INFO "%s: NI5010 found at 0x%x, using IRQ %d", dev->name, ioaddr, dev->irq);
+ if (dev->dma)
+ printk(" & DMA %d", dev->dma);
+ printk(".\n");
+ return 0;
+out:
+ release_region(dev->base_addr, NI5010_IO_EXTENT);
+ return err;
+}
+
+/*
+ * Open/initialize the board. This is called (in the current kernel)
+ * sometime after booting when the 'ifconfig' program is run.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is a non-reboot way to recover if something goes wrong.
+ */
+
+static int ni5010_open(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ int i;
+
+ PRINTK2((KERN_DEBUG "%s: entering ni5010_open()\n", dev->name));
+
+ if (request_irq(dev->irq, ni5010_interrupt, 0, boardname, dev)) {
+ printk(KERN_WARNING "%s: Cannot get irq %#2x\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+ PRINTK3((KERN_DEBUG "%s: passed open() #1\n", dev->name));
+ /*
+ * Always allocate the DMA channel after the IRQ,
+ * and clean up on failure.
+ */
+#ifdef JUMPERED_DMA
+ if (request_dma(dev->dma, cardname)) {
+ printk(KERN_WARNING "%s: Cannot get dma %#2x\n", dev->name, dev->dma);
+ free_irq(dev->irq, NULL);
+ return -EAGAIN;
+ }
+#endif /* JUMPERED_DMA */
+
+ PRINTK3((KERN_DEBUG "%s: passed open() #2\n", dev->name));
+ /* Reset the hardware here. Don't forget to set the station address. */
+
+ outb(RS_RESET, EDLC_RESET); /* Hold up EDLC_RESET while configing board */
+ outb(0, IE_RESET); /* Hardware reset of ni5010 board */
+ outb(XMD_LBC, EDLC_XMODE); /* Only loopback xmits */
+
+ PRINTK3((KERN_DEBUG "%s: passed open() #3\n", dev->name));
+ /* Set the station address */
+ for(i = 0;i < 6; i++) {
+ outb(dev->dev_addr[i], EDLC_ADDR + i);
+ }
+
+ PRINTK3((KERN_DEBUG "%s: Initialising ni5010\n", dev->name));
+ outb(0, EDLC_XMASK); /* No xmit interrupts for now */
+ outb(XMD_IG_PAR | XMD_T_MODE | XMD_LBC, EDLC_XMODE);
+ /* Normal packet xmit mode */
+ outb(0xff, EDLC_XCLR); /* Clear all pending xmit interrupts */
+ outb(RMD_BROADCAST, EDLC_RMODE);
+ /* Receive broadcast and normal packets */
+ reset_receiver(dev); /* Ready ni5010 for receiving packets */
+
+ outb(0, EDLC_RESET); /* Un-reset the ni5010 */
+
+ netif_start_queue(dev);
+
+ if (NI5010_DEBUG) ni5010_show_registers(dev);
+
+ PRINTK((KERN_DEBUG "%s: open successful\n", dev->name));
+ return 0;
+}
+
+static void reset_receiver(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ PRINTK3((KERN_DEBUG "%s: resetting receiver\n", dev->name));
+ outw(0, IE_GP); /* Receive packet at start of buffer */
+ outb(0xff, EDLC_RCLR); /* Clear all pending rcv interrupts */
+ outb(0, IE_MMODE); /* Put EDLC to rcv buffer */
+ outb(MM_EN_RCV, IE_MMODE); /* Enable rcv */
+ outb(0xff, EDLC_RMASK); /* Enable all rcv interrupts */
+}
+
+static void ni5010_timeout(struct net_device *dev)
+{
+ printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
+ tx_done(dev) ? "IRQ conflict" : "network cable problem");
+ /* Try to restart the adaptor. */
+ /* FIXME: Give it a real kick here */
+ chipset_init(dev, 1);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+
+static int ni5010_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+
+ PRINTK2((KERN_DEBUG "%s: entering ni5010_send_packet\n", dev->name));
+
+ /*
+ * Block sending
+ */
+
+ netif_stop_queue(dev);
+ hardware_send_packet(dev, (unsigned char *)skb->data, skb->len, length-skb->len);
+ dev_kfree_skb (skb);
+ return NETDEV_TX_OK;
+}
+
+/*
+ * The typical workload of the driver:
+ * Handle the network interface interrupts.
+ */
+static irqreturn_t ni5010_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct ni5010_local *lp;
+ int ioaddr, status;
+ int xmit_was_error = 0;
+
+ PRINTK2((KERN_DEBUG "%s: entering ni5010_interrupt\n", dev->name));
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ spin_lock(&lp->lock);
+ status = inb(IE_ISTAT);
+ PRINTK3((KERN_DEBUG "%s: IE_ISTAT = %#02x\n", dev->name, status));
+
+ if ((status & IS_R_INT) == 0) ni5010_rx(dev);
+
+ if ((status & IS_X_INT) == 0) {
+ xmit_was_error = process_xmt_interrupt(dev);
+ }
+
+ if ((status & IS_DMA_INT) == 0) {
+ PRINTK((KERN_DEBUG "%s: DMA complete (?)\n", dev->name));
+ outb(0, IE_DMA_RST); /* Reset DMA int */
+ }
+
+ if (!xmit_was_error)
+ reset_receiver(dev);
+ spin_unlock(&lp->lock);
+ return IRQ_HANDLED;
+}
+
+
+static void dump_packet(void *buf, int len)
+{
+ int i;
+
+ printk(KERN_DEBUG "Packet length = %#4x\n", len);
+ for (i = 0; i < len; i++){
+ if (i % 16 == 0) printk(KERN_DEBUG "%#4.4x", i);
+ if (i % 2 == 0) printk(" ");
+ printk("%2.2x", ((unsigned char *)buf)[i]);
+ if (i % 16 == 15) printk("\n");
+ }
+ printk("\n");
+}
+
+/* We have a good packet, get it out of the buffer. */
+static void ni5010_rx(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ unsigned char rcv_stat;
+ struct sk_buff *skb;
+ int i_pkt_size;
+
+ PRINTK2((KERN_DEBUG "%s: entering ni5010_rx()\n", dev->name));
+
+ rcv_stat = inb(EDLC_RSTAT);
+ PRINTK3((KERN_DEBUG "%s: EDLC_RSTAT = %#2x\n", dev->name, rcv_stat));
+
+ if ( (rcv_stat & RS_VALID_BITS) != RS_PKT_OK) {
+ PRINTK((KERN_INFO "%s: receive error.\n", dev->name));
+ dev->stats.rx_errors++;
+ if (rcv_stat & RS_RUNT) dev->stats.rx_length_errors++;
+ if (rcv_stat & RS_ALIGN) dev->stats.rx_frame_errors++;
+ if (rcv_stat & RS_CRC_ERR) dev->stats.rx_crc_errors++;
+ if (rcv_stat & RS_OFLW) dev->stats.rx_fifo_errors++;
+ outb(0xff, EDLC_RCLR); /* Clear the interrupt */
+ return;
+ }
+
+ outb(0xff, EDLC_RCLR); /* Clear the interrupt */
+
+ i_pkt_size = inw(IE_RCNT);
+ if (i_pkt_size > ETH_FRAME_LEN || i_pkt_size < 10 ) {
+ PRINTK((KERN_DEBUG "%s: Packet size error, packet size = %#4.4x\n",
+ dev->name, i_pkt_size));
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
+ return;
+ }
+
+ /* Malloc up new buffer. */
+ skb = dev_alloc_skb(i_pkt_size + 3);
+ if (skb == NULL) {
+ printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
+ dev->stats.rx_dropped++;
+ return;
+ }
+
+ skb_reserve(skb, 2);
+
+ /* Read packet into buffer */
+ outb(MM_MUX, IE_MMODE); /* Rcv buffer to system bus */
+ outw(0, IE_GP); /* Seek to beginning of packet */
+ insb(IE_RBUF, skb_put(skb, i_pkt_size), i_pkt_size);
+
+ if (NI5010_DEBUG >= 4)
+ dump_packet(skb->data, skb->len);
+
+ skb->protocol = eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += i_pkt_size;
+
+ PRINTK2((KERN_DEBUG "%s: Received packet, size=%#4.4x\n",
+ dev->name, i_pkt_size));
+}
+
+static int process_xmt_interrupt(struct net_device *dev)
+{
+ struct ni5010_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int xmit_stat;
+
+ PRINTK2((KERN_DEBUG "%s: entering process_xmt_interrupt\n", dev->name));
+
+ xmit_stat = inb(EDLC_XSTAT);
+ PRINTK3((KERN_DEBUG "%s: EDLC_XSTAT = %2.2x\n", dev->name, xmit_stat));
+
+ outb(0, EDLC_XMASK); /* Disable xmit IRQ's */
+ outb(0xff, EDLC_XCLR); /* Clear all pending xmit IRQ's */
+
+ if (xmit_stat & XS_COLL){
+ PRINTK((KERN_DEBUG "%s: collision detected, retransmitting\n",
+ dev->name));
+ outw(NI5010_BUFSIZE - lp->o_pkt_size, IE_GP);
+ /* outb(0, IE_MMODE); */ /* xmt buf on sysbus FIXME: needed ? */
+ outb(MM_EN_XMT | MM_MUX, IE_MMODE);
+ outb(XM_ALL, EDLC_XMASK); /* Enable xmt IRQ's */
+ dev->stats.collisions++;
+ return 1;
+ }
+
+ /* FIXME: handle other xmt error conditions */
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += lp->o_pkt_size;
+ netif_wake_queue(dev);
+
+ PRINTK2((KERN_DEBUG "%s: sent packet, size=%#4.4x\n",
+ dev->name, lp->o_pkt_size));
+
+ return 0;
+}
+
+/* The inverse routine to ni5010_open(). */
+static int ni5010_close(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ PRINTK2((KERN_DEBUG "%s: entering ni5010_close\n", dev->name));
+#ifdef JUMPERED_INTERRUPTS
+ free_irq(dev->irq, NULL);
+#endif
+ /* Put card in held-RESET state */
+ outb(0, IE_MMODE);
+ outb(RS_RESET, EDLC_RESET);
+
+ netif_stop_queue(dev);
+
+ PRINTK((KERN_DEBUG "%s: %s closed down\n", dev->name, boardname));
+ return 0;
+
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+*/
+static void ni5010_set_multicast_list(struct net_device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name));
+
+ if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI ||
+ !netdev_mc_empty(dev)) {
+ outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */
+ PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name));
+ } else {
+ PRINTK((KERN_DEBUG "%s: Entering broadcast mode\n", dev->name));
+ outb(RMD_BROADCAST, EDLC_RMODE); /* Disable promiscuous mode, use normal mode */
+ }
+}
+
+static void hardware_send_packet(struct net_device *dev, char *buf, int length, int pad)
+{
+ struct ni5010_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+ unsigned int buf_offs;
+
+ PRINTK2((KERN_DEBUG "%s: entering hardware_send_packet\n", dev->name));
+
+ if (length > ETH_FRAME_LEN) {
+ PRINTK((KERN_WARNING "%s: packet too large, not possible\n",
+ dev->name));
+ return;
+ }
+
+ if (NI5010_DEBUG) ni5010_show_registers(dev);
+
+ if (inb(IE_ISTAT) & IS_EN_XMT) {
+ PRINTK((KERN_WARNING "%s: sending packet while already transmitting, not possible\n",
+ dev->name));
+ return;
+ }
+
+ if (NI5010_DEBUG > 3) dump_packet(buf, length);
+
+ buf_offs = NI5010_BUFSIZE - length - pad;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->o_pkt_size = length + pad;
+
+ outb(0, EDLC_RMASK); /* Mask all receive interrupts */
+ outb(0, IE_MMODE); /* Put Xmit buffer on system bus */
+ outb(0xff, EDLC_RCLR); /* Clear out pending rcv interrupts */
+
+ outw(buf_offs, IE_GP); /* Point GP at start of packet */
+ outsb(IE_XBUF, buf, length); /* Put data in buffer */
+ while(pad--)
+ outb(0, IE_XBUF);
+
+ outw(buf_offs, IE_GP); /* Rewrite where packet starts */
+
+ /* should work without that outb() (Crynwr used it) */
+ /*outb(MM_MUX, IE_MMODE);*/ /* Xmt buffer to EDLC bus */
+ outb(MM_EN_XMT | MM_MUX, IE_MMODE); /* Begin transmission */
+ outb(XM_ALL, EDLC_XMASK); /* Cause interrupt after completion or fail */
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ netif_wake_queue(dev);
+
+ if (NI5010_DEBUG) ni5010_show_registers(dev);
+}
+
+static void chipset_init(struct net_device *dev, int startp)
+{
+ /* FIXME: Move some stuff here */
+ PRINTK3((KERN_DEBUG "%s: doing NOTHING in chipset_init\n", dev->name));
+}
+
+static void ni5010_show_registers(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ PRINTK3((KERN_DEBUG "%s: XSTAT %#2.2x\n", dev->name, inb(EDLC_XSTAT)));
+ PRINTK3((KERN_DEBUG "%s: XMASK %#2.2x\n", dev->name, inb(EDLC_XMASK)));
+ PRINTK3((KERN_DEBUG "%s: RSTAT %#2.2x\n", dev->name, inb(EDLC_RSTAT)));
+ PRINTK3((KERN_DEBUG "%s: RMASK %#2.2x\n", dev->name, inb(EDLC_RMASK)));
+ PRINTK3((KERN_DEBUG "%s: RMODE %#2.2x\n", dev->name, inb(EDLC_RMODE)));
+ PRINTK3((KERN_DEBUG "%s: XMODE %#2.2x\n", dev->name, inb(EDLC_XMODE)));
+ PRINTK3((KERN_DEBUG "%s: ISTAT %#2.2x\n", dev->name, inb(IE_ISTAT)));
+}
+
+#ifdef MODULE
+static struct net_device *dev_ni5010;
+
+module_param(io, int, 0);
+module_param(irq, int, 0);
+MODULE_PARM_DESC(io, "ni5010 I/O base address");
+MODULE_PARM_DESC(irq, "ni5010 IRQ number");
+
+static int __init ni5010_init_module(void)
+{
+ PRINTK2((KERN_DEBUG "%s: entering init_module\n", boardname));
+ /*
+ if(io <= 0 || irq == 0){
+ printk(KERN_WARNING "%s: Autoprobing not allowed for modules.\n", boardname);
+ printk(KERN_WARNING "%s: Set symbols 'io' and 'irq'\n", boardname);
+ return -EINVAL;
+ }
+ */
+ if (io <= 0){
+ printk(KERN_WARNING "%s: Autoprobing for modules is hazardous, trying anyway..\n", boardname);
+ }
+
+ PRINTK2((KERN_DEBUG "%s: init_module irq=%#2x, io=%#3x\n", boardname, irq, io));
+ dev_ni5010 = ni5010_probe(-1);
+ if (IS_ERR(dev_ni5010))
+ return PTR_ERR(dev_ni5010);
+ return 0;
+}
+
+static void __exit ni5010_cleanup_module(void)
+{
+ PRINTK2((KERN_DEBUG "%s: entering cleanup_module\n", boardname));
+ unregister_netdev(dev_ni5010);
+ release_region(dev_ni5010->base_addr, NI5010_IO_EXTENT);
+ free_netdev(dev_ni5010);
+}
+module_init(ni5010_init_module);
+module_exit(ni5010_cleanup_module);
+#endif /* MODULE */
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/racal/ni5010.h b/drivers/net/ethernet/racal/ni5010.h
new file mode 100644
index 000000000000..e10e717fcd76
--- /dev/null
+++ b/drivers/net/ethernet/racal/ni5010.h
@@ -0,0 +1,144 @@
+/*
+ * Racal-Interlan ni5010 Ethernet definitions
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same GNU General Public License that covers that work.
+ *
+ * copyrights (c) 1996 by Jan-Pascal van Best (jvbest@wi.leidenuniv.nl)
+ *
+ * I have done a look in the following sources:
+ * crynwr-packet-driver by Russ Nelson
+ */
+
+#define NI5010_BUFSIZE 2048 /* number of bytes in a buffer */
+
+#define NI5010_MAGICVAL0 0x00 /* magic-values for ni5010 card */
+#define NI5010_MAGICVAL1 0x55
+#define NI5010_MAGICVAL2 0xAA
+
+#define SA_ADDR0 0x02
+#define SA_ADDR1 0x07
+#define SA_ADDR2 0x01
+
+/* The number of low I/O ports used by the ni5010 ethercard. */
+#define NI5010_IO_EXTENT 32
+
+#define PRINTK(x) if (NI5010_DEBUG) printk x
+#define PRINTK2(x) if (NI5010_DEBUG>=2) printk x
+#define PRINTK3(x) if (NI5010_DEBUG>=3) printk x
+
+/* The various IE command registers */
+#define EDLC_XSTAT (ioaddr + 0x00) /* EDLC transmit csr */
+#define EDLC_XCLR (ioaddr + 0x00) /* EDLC transmit "Clear IRQ" */
+#define EDLC_XMASK (ioaddr + 0x01) /* EDLC transmit "IRQ Masks" */
+#define EDLC_RSTAT (ioaddr + 0x02) /* EDLC receive csr */
+#define EDLC_RCLR (ioaddr + 0x02) /* EDLC receive "Clear IRQ" */
+#define EDLC_RMASK (ioaddr + 0x03) /* EDLC receive "IRQ Masks" */
+#define EDLC_XMODE (ioaddr + 0x04) /* EDLC transmit Mode */
+#define EDLC_RMODE (ioaddr + 0x05) /* EDLC receive Mode */
+#define EDLC_RESET (ioaddr + 0x06) /* EDLC RESET register */
+#define EDLC_TDR1 (ioaddr + 0x07) /* "Time Domain Reflectometry" reg1 */
+#define EDLC_ADDR (ioaddr + 0x08) /* EDLC station address, 6 bytes */
+ /* 0x0E doesn't exist for r/w */
+#define EDLC_TDR2 (ioaddr + 0x0f) /* "Time Domain Reflectometry" reg2 */
+#define IE_GP (ioaddr + 0x10) /* GP pointer (word register) */
+ /* 0x11 is 2nd byte of GP Pointer */
+#define IE_RCNT (ioaddr + 0x10) /* Count of bytes in rcv'd packet */
+ /* 0x11 is 2nd byte of "Byte Count" */
+#define IE_MMODE (ioaddr + 0x12) /* Memory Mode register */
+#define IE_DMA_RST (ioaddr + 0x13) /* IE DMA Reset. write only */
+#define IE_ISTAT (ioaddr + 0x13) /* IE Interrupt Status. read only */
+#define IE_RBUF (ioaddr + 0x14) /* IE Receive Buffer port */
+#define IE_XBUF (ioaddr + 0x15) /* IE Transmit Buffer port */
+#define IE_SAPROM (ioaddr + 0x16) /* window on station addr prom */
+#define IE_RESET (ioaddr + 0x17) /* any write causes Board Reset */
+
+/* bits in EDLC_XSTAT, interrupt clear on write, status when read */
+#define XS_TPOK 0x80 /* transmit packet successful */
+#define XS_CS 0x40 /* carrier sense */
+#define XS_RCVD 0x20 /* transmitted packet received */
+#define XS_SHORT 0x10 /* transmission media is shorted */
+#define XS_UFLW 0x08 /* underflow. iff failed board */
+#define XS_COLL 0x04 /* collision occurred */
+#define XS_16COLL 0x02 /* 16th collision occurred */
+#define XS_PERR 0x01 /* parity error */
+
+#define XS_CLR_UFLW 0x08 /* clear underflow */
+#define XS_CLR_COLL 0x04 /* clear collision */
+#define XS_CLR_16COLL 0x02 /* clear 16th collision */
+#define XS_CLR_PERR 0x01 /* clear parity error */
+
+/* bits in EDLC_XMASK, mask/enable transmit interrupts. register is r/w */
+#define XM_TPOK 0x80 /* =1 to enable Xmt Pkt OK interrupts */
+#define XM_RCVD 0x20 /* =1 to enable Xmt Pkt Rcvd ints */
+#define XM_UFLW 0x08 /* =1 to enable Xmt Underflow ints */
+#define XM_COLL 0x04 /* =1 to enable Xmt Collision ints */
+#define XM_COLL16 0x02 /* =1 to enable Xmt 16th Coll ints */
+#define XM_PERR 0x01 /* =1 to enable Xmt Parity Error ints */
+ /* note: always clear this bit */
+#define XM_ALL (XM_TPOK | XM_RCVD | XM_UFLW | XM_COLL | XM_COLL16)
+
+/* bits in EDLC_RSTAT, interrupt clear on write, status when read */
+#define RS_PKT_OK 0x80 /* received good packet */
+#define RS_RST_PKT 0x10 /* RESET packet received */
+#define RS_RUNT 0x08 /* Runt Pkt rcvd. Len < 64 Bytes */
+#define RS_ALIGN 0x04 /* Alignment error. not 8 bit aligned */
+#define RS_CRC_ERR 0x02 /* Bad CRC on rcvd pkt */
+#define RS_OFLW 0x01 /* overflow for rcv FIFO */
+#define RS_VALID_BITS ( RS_PKT_OK | RS_RST_PKT | RS_RUNT | RS_ALIGN | RS_CRC_ERR | RS_OFLW )
+ /* all valid RSTAT bits */
+
+#define RS_CLR_PKT_OK 0x80 /* clear rcvd packet interrupt */
+#define RS_CLR_RST_PKT 0x10 /* clear RESET packet received */
+#define RS_CLR_RUNT 0x08 /* clear Runt Pckt received */
+#define RS_CLR_ALIGN 0x04 /* clear Alignment error */
+#define RS_CLR_CRC_ERR 0x02 /* clear CRC error */
+#define RS_CLR_OFLW 0x01 /* clear rcv FIFO Overflow */
+
+/* bits in EDLC_RMASK, mask/enable receive interrupts. register is r/w */
+#define RM_PKT_OK 0x80 /* =1 to enable rcvd good packet ints */
+#define RM_RST_PKT 0x10 /* =1 to enable RESET packet ints */
+#define RM_RUNT 0x08 /* =1 to enable Runt Pkt rcvd ints */
+#define RM_ALIGN 0x04 /* =1 to enable Alignment error ints */
+#define RM_CRC_ERR 0x02 /* =1 to enable Bad CRC error ints */
+#define RM_OFLW 0x01 /* =1 to enable overflow error ints */
+
+/* bits in EDLC_RMODE, set Receive Packet mode. register is r/w */
+#define RMD_TEST 0x80 /* =1 for Chip testing. normally 0 */
+#define RMD_ADD_SIZ 0x10 /* =1 5-byte addr match. normally 0 */
+#define RMD_EN_RUNT 0x08 /* =1 enable runt rcv. normally 0 */
+#define RMD_EN_RST 0x04 /* =1 to rcv RESET pkt. normally 0 */
+
+#define RMD_PROMISC 0x03 /* receive *all* packets. unusual */
+#define RMD_MULTICAST 0x02 /* receive multicasts too. unusual */
+#define RMD_BROADCAST 0x01 /* receive broadcasts & normal. usual */
+#define RMD_NO_PACKETS 0x00 /* don't receive any packets. unusual */
+
+/* bits in EDLC_XMODE, set Transmit Packet mode. register is r/w */
+#define XMD_COLL_CNT 0xf0 /* coll's since success. read-only */
+#define XMD_IG_PAR 0x08 /* =1 to ignore parity. ALWAYS set */
+#define XMD_T_MODE 0x04 /* =1 to power xcvr. ALWAYS set this */
+#define XMD_LBC 0x02 /* =1 for loopbakc. normally set */
+#define XMD_DIS_C 0x01 /* =1 disables contention. normally 0 */
+
+/* bits in EDLC_RESET, write only */
+#define RS_RESET 0x80 /* =1 to hold EDLC in reset state */
+
+/* bits in IE_MMODE, write only */
+#define MM_EN_DMA 0x80 /* =1 begin DMA xfer, Cplt clrs it */
+#define MM_EN_RCV 0x40 /* =1 allows Pkt rcv. clr'd by rcv */
+#define MM_EN_XMT 0x20 /* =1 begin Xmt pkt. Cplt clrs it */
+#define MM_BUS_PAGE 0x18 /* =00 ALWAYS. Used when MUX=1 */
+#define MM_NET_PAGE 0x06 /* =00 ALWAYS. Used when MUX=0 */
+#define MM_MUX 0x01 /* =1 means Rcv Buff on system bus */
+ /* =0 means Xmt Buff on system bus */
+
+/* bits in IE_ISTAT, read only */
+#define IS_TDIAG 0x80 /* =1 if Diagnostic problem */
+#define IS_EN_RCV 0x20 /* =1 until frame is rcv'd cplt */
+#define IS_EN_XMT 0x10 /* =1 until frame is xmt'd cplt */
+#define IS_EN_DMA 0x08 /* =1 until DMA is cplt or aborted */
+#define IS_DMA_INT 0x04 /* =0 iff DMA done interrupt. */
+#define IS_R_INT 0x02 /* =0 iff unmasked Rcv interrupt */
+#define IS_X_INT 0x01 /* =0 iff unmasked Xmt interrupt */
+
diff --git a/drivers/net/ethernet/rdc/Kconfig b/drivers/net/ethernet/rdc/Kconfig
new file mode 100644
index 000000000000..c8ba4b3494c1
--- /dev/null
+++ b/drivers/net/ethernet/rdc/Kconfig
@@ -0,0 +1,35 @@
+#
+# RDC network device configuration
+#
+
+config NET_VENDOR_RDC
+ bool "RDC devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about RDC cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_RDC
+
+config R6040
+ tristate "RDC R6040 Fast Ethernet Adapter support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ select PHYLIB
+ ---help---
+ This is a driver for the R6040 Fast Ethernet MACs found in the
+ the RDC R-321x System-on-chips.
+
+ To compile this driver as a module, choose M here: the module
+ will be called r6040. This is recommended.
+
+endif # NET_VENDOR_RDC
diff --git a/drivers/net/ethernet/rdc/Makefile b/drivers/net/ethernet/rdc/Makefile
new file mode 100644
index 000000000000..8d51fd2d07fc
--- /dev/null
+++ b/drivers/net/ethernet/rdc/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the RDC network device drivers.
+#
+
+obj-$(CONFIG_R6040) += r6040.o
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
new file mode 100644
index 000000000000..2bbadc044784
--- /dev/null
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -0,0 +1,1276 @@
+/*
+ * RDC R6040 Fast Ethernet MAC support
+ *
+ * Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw>
+ * Copyright (C) 2007
+ * Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>
+ * Florian Fainelli <florian@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/uaccess.h>
+#include <linux/phy.h>
+
+#include <asm/processor.h>
+
+#define DRV_NAME "r6040"
+#define DRV_VERSION "0.27"
+#define DRV_RELDATE "23Feb2011"
+
+/* PHY CHIP Address */
+#define PHY1_ADDR 1 /* For MAC1 */
+#define PHY2_ADDR 3 /* For MAC2 */
+#define PHY_MODE 0x3100 /* PHY CHIP Register 0 */
+#define PHY_CAP 0x01E1 /* PHY CHIP Register 4 */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6000 * HZ / 1000)
+
+/* RDC MAC I/O Size */
+#define R6040_IO_SIZE 256
+
+/* MAX RDC MAC */
+#define MAX_MAC 2
+
+/* MAC registers */
+#define MCR0 0x00 /* Control register 0 */
+#define MCR0_PROMISC 0x0020 /* Promiscuous mode */
+#define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */
+#define MCR1 0x04 /* Control register 1 */
+#define MAC_RST 0x0001 /* Reset the MAC */
+#define MBCR 0x08 /* Bus control */
+#define MT_ICR 0x0C /* TX interrupt control */
+#define MR_ICR 0x10 /* RX interrupt control */
+#define MTPR 0x14 /* TX poll command register */
+#define MR_BSR 0x18 /* RX buffer size */
+#define MR_DCR 0x1A /* RX descriptor control */
+#define MLSR 0x1C /* Last status */
+#define MMDIO 0x20 /* MDIO control register */
+#define MDIO_WRITE 0x4000 /* MDIO write */
+#define MDIO_READ 0x2000 /* MDIO read */
+#define MMRD 0x24 /* MDIO read data register */
+#define MMWD 0x28 /* MDIO write data register */
+#define MTD_SA0 0x2C /* TX descriptor start address 0 */
+#define MTD_SA1 0x30 /* TX descriptor start address 1 */
+#define MRD_SA0 0x34 /* RX descriptor start address 0 */
+#define MRD_SA1 0x38 /* RX descriptor start address 1 */
+#define MISR 0x3C /* Status register */
+#define MIER 0x40 /* INT enable register */
+#define MSK_INT 0x0000 /* Mask off interrupts */
+#define RX_FINISH 0x0001 /* RX finished */
+#define RX_NO_DESC 0x0002 /* No RX descriptor available */
+#define RX_FIFO_FULL 0x0004 /* RX FIFO full */
+#define RX_EARLY 0x0008 /* RX early */
+#define TX_FINISH 0x0010 /* TX finished */
+#define TX_EARLY 0x0080 /* TX early */
+#define EVENT_OVRFL 0x0100 /* Event counter overflow */
+#define LINK_CHANGED 0x0200 /* PHY link changed */
+#define ME_CISR 0x44 /* Event counter INT status */
+#define ME_CIER 0x48 /* Event counter INT enable */
+#define MR_CNT 0x50 /* Successfully received packet counter */
+#define ME_CNT0 0x52 /* Event counter 0 */
+#define ME_CNT1 0x54 /* Event counter 1 */
+#define ME_CNT2 0x56 /* Event counter 2 */
+#define ME_CNT3 0x58 /* Event counter 3 */
+#define MT_CNT 0x5A /* Successfully transmit packet counter */
+#define ME_CNT4 0x5C /* Event counter 4 */
+#define MP_CNT 0x5E /* Pause frame counter register */
+#define MAR0 0x60 /* Hash table 0 */
+#define MAR1 0x62 /* Hash table 1 */
+#define MAR2 0x64 /* Hash table 2 */
+#define MAR3 0x66 /* Hash table 3 */
+#define MID_0L 0x68 /* Multicast address MID0 Low */
+#define MID_0M 0x6A /* Multicast address MID0 Medium */
+#define MID_0H 0x6C /* Multicast address MID0 High */
+#define MID_1L 0x70 /* MID1 Low */
+#define MID_1M 0x72 /* MID1 Medium */
+#define MID_1H 0x74 /* MID1 High */
+#define MID_2L 0x78 /* MID2 Low */
+#define MID_2M 0x7A /* MID2 Medium */
+#define MID_2H 0x7C /* MID2 High */
+#define MID_3L 0x80 /* MID3 Low */
+#define MID_3M 0x82 /* MID3 Medium */
+#define MID_3H 0x84 /* MID3 High */
+#define PHY_CC 0x88 /* PHY status change configuration register */
+#define PHY_ST 0x8A /* PHY status register */
+#define MAC_SM 0xAC /* MAC status machine */
+#define MAC_ID 0xBE /* Identifier register */
+
+#define TX_DCNT 0x80 /* TX descriptor count */
+#define RX_DCNT 0x80 /* RX descriptor count */
+#define MAX_BUF_SIZE 0x600
+#define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor))
+#define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor))
+#define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */
+#define MCAST_MAX 3 /* Max number multicast addresses to filter */
+
+/* Descriptor status */
+#define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */
+#define DSC_RX_OK 0x4000 /* RX was successful */
+#define DSC_RX_ERR 0x0800 /* RX PHY error */
+#define DSC_RX_ERR_DRI 0x0400 /* RX dribble packet */
+#define DSC_RX_ERR_BUF 0x0200 /* RX length exceeds buffer size */
+#define DSC_RX_ERR_LONG 0x0100 /* RX length > maximum packet length */
+#define DSC_RX_ERR_RUNT 0x0080 /* RX packet length < 64 byte */
+#define DSC_RX_ERR_CRC 0x0040 /* RX CRC error */
+#define DSC_RX_BCAST 0x0020 /* RX broadcast (no error) */
+#define DSC_RX_MCAST 0x0010 /* RX multicast (no error) */
+#define DSC_RX_MCH_HIT 0x0008 /* RX multicast hit in hash table (no error) */
+#define DSC_RX_MIDH_HIT 0x0004 /* RX MID table hit (no error) */
+#define DSC_RX_IDX_MID_MASK 3 /* RX mask for the index of matched MIDx */
+
+/* PHY settings */
+#define ICPLUS_PHY_ID 0x0243
+
+MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>,"
+ "Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>,"
+ "Florian Fainelli <florian@openwrt.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver");
+MODULE_VERSION(DRV_VERSION " " DRV_RELDATE);
+
+/* RX and TX interrupts that we handle */
+#define RX_INTS (RX_FIFO_FULL | RX_NO_DESC | RX_FINISH)
+#define TX_INTS (TX_FINISH)
+#define INT_MASK (RX_INTS | TX_INTS)
+
+struct r6040_descriptor {
+ u16 status, len; /* 0-3 */
+ __le32 buf; /* 4-7 */
+ __le32 ndesc; /* 8-B */
+ u32 rev1; /* C-F */
+ char *vbufp; /* 10-13 */
+ struct r6040_descriptor *vndescp; /* 14-17 */
+ struct sk_buff *skb_ptr; /* 18-1B */
+ u32 rev2; /* 1C-1F */
+} __attribute__((aligned(32)));
+
+struct r6040_private {
+ spinlock_t lock; /* driver lock */
+ struct pci_dev *pdev;
+ struct r6040_descriptor *rx_insert_ptr;
+ struct r6040_descriptor *rx_remove_ptr;
+ struct r6040_descriptor *tx_insert_ptr;
+ struct r6040_descriptor *tx_remove_ptr;
+ struct r6040_descriptor *rx_ring;
+ struct r6040_descriptor *tx_ring;
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+ u16 tx_free_desc, phy_addr;
+ u16 mcr0, mcr1;
+ struct net_device *dev;
+ struct mii_bus *mii_bus;
+ struct napi_struct napi;
+ void __iomem *base;
+ struct phy_device *phydev;
+ int old_link;
+ int old_duplex;
+};
+
+static char version[] __devinitdata = DRV_NAME
+ ": RDC R6040 NAPI net driver,"
+ "version "DRV_VERSION " (" DRV_RELDATE ")";
+
+static int phy_table[] = { PHY1_ADDR, PHY2_ADDR };
+
+/* Read a word data from PHY Chip */
+static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
+{
+ int limit = 2048;
+ u16 cmd;
+
+ iowrite16(MDIO_READ + reg + (phy_addr << 8), ioaddr + MMDIO);
+ /* Wait for the read bit to be cleared */
+ while (limit--) {
+ cmd = ioread16(ioaddr + MMDIO);
+ if (!(cmd & MDIO_READ))
+ break;
+ }
+
+ return ioread16(ioaddr + MMRD);
+}
+
+/* Write a word data from PHY Chip */
+static void r6040_phy_write(void __iomem *ioaddr,
+ int phy_addr, int reg, u16 val)
+{
+ int limit = 2048;
+ u16 cmd;
+
+ iowrite16(val, ioaddr + MMWD);
+ /* Write the command to the MDIO bus */
+ iowrite16(MDIO_WRITE + reg + (phy_addr << 8), ioaddr + MMDIO);
+ /* Wait for the write bit to be cleared */
+ while (limit--) {
+ cmd = ioread16(ioaddr + MMDIO);
+ if (!(cmd & MDIO_WRITE))
+ break;
+ }
+}
+
+static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg)
+{
+ struct net_device *dev = bus->priv;
+ struct r6040_private *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+
+ return r6040_phy_read(ioaddr, phy_addr, reg);
+}
+
+static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
+ int reg, u16 value)
+{
+ struct net_device *dev = bus->priv;
+ struct r6040_private *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+
+ r6040_phy_write(ioaddr, phy_addr, reg, value);
+
+ return 0;
+}
+
+static int r6040_mdiobus_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+static void r6040_free_txbufs(struct net_device *dev)
+{
+ struct r6040_private *lp = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < TX_DCNT; i++) {
+ if (lp->tx_insert_ptr->skb_ptr) {
+ pci_unmap_single(lp->pdev,
+ le32_to_cpu(lp->tx_insert_ptr->buf),
+ MAX_BUF_SIZE, PCI_DMA_TODEVICE);
+ dev_kfree_skb(lp->tx_insert_ptr->skb_ptr);
+ lp->tx_insert_ptr->skb_ptr = NULL;
+ }
+ lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp;
+ }
+}
+
+static void r6040_free_rxbufs(struct net_device *dev)
+{
+ struct r6040_private *lp = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < RX_DCNT; i++) {
+ if (lp->rx_insert_ptr->skb_ptr) {
+ pci_unmap_single(lp->pdev,
+ le32_to_cpu(lp->rx_insert_ptr->buf),
+ MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(lp->rx_insert_ptr->skb_ptr);
+ lp->rx_insert_ptr->skb_ptr = NULL;
+ }
+ lp->rx_insert_ptr = lp->rx_insert_ptr->vndescp;
+ }
+}
+
+static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
+ dma_addr_t desc_dma, int size)
+{
+ struct r6040_descriptor *desc = desc_ring;
+ dma_addr_t mapping = desc_dma;
+
+ while (size-- > 0) {
+ mapping += sizeof(*desc);
+ desc->ndesc = cpu_to_le32(mapping);
+ desc->vndescp = desc + 1;
+ desc++;
+ }
+ desc--;
+ desc->ndesc = cpu_to_le32(desc_dma);
+ desc->vndescp = desc_ring;
+}
+
+static void r6040_init_txbufs(struct net_device *dev)
+{
+ struct r6040_private *lp = netdev_priv(dev);
+
+ lp->tx_free_desc = TX_DCNT;
+
+ lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring;
+ r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT);
+}
+
+static int r6040_alloc_rxbufs(struct net_device *dev)
+{
+ struct r6040_private *lp = netdev_priv(dev);
+ struct r6040_descriptor *desc;
+ struct sk_buff *skb;
+ int rc;
+
+ lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring;
+ r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT);
+
+ /* Allocate skbs for the rx descriptors */
+ desc = lp->rx_ring;
+ do {
+ skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
+ if (!skb) {
+ netdev_err(dev, "failed to alloc skb for rx\n");
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ desc->skb_ptr = skb;
+ desc->buf = cpu_to_le32(pci_map_single(lp->pdev,
+ desc->skb_ptr->data,
+ MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
+ desc->status = DSC_OWNER_MAC;
+ desc = desc->vndescp;
+ } while (desc != lp->rx_ring);
+
+ return 0;
+
+err_exit:
+ /* Deallocate all previously allocated skbs */
+ r6040_free_rxbufs(dev);
+ return rc;
+}
+
+static void r6040_init_mac_regs(struct net_device *dev)
+{
+ struct r6040_private *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ int limit = 2048;
+ u16 cmd;
+
+ /* Mask Off Interrupt */
+ iowrite16(MSK_INT, ioaddr + MIER);
+
+ /* Reset RDC MAC */
+ iowrite16(MAC_RST, ioaddr + MCR1);
+ while (limit--) {
+ cmd = ioread16(ioaddr + MCR1);
+ if (cmd & 0x1)
+ break;
+ }
+ /* Reset internal state machine */
+ iowrite16(2, ioaddr + MAC_SM);
+ iowrite16(0, ioaddr + MAC_SM);
+ mdelay(5);
+
+ /* MAC Bus Control Register */
+ iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
+
+ /* Buffer Size Register */
+ iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR);
+
+ /* Write TX ring start address */
+ iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0);
+ iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1);
+
+ /* Write RX ring start address */
+ iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0);
+ iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1);
+
+ /* Set interrupt waiting time and packet numbers */
+ iowrite16(0, ioaddr + MT_ICR);
+ iowrite16(0, ioaddr + MR_ICR);
+
+ /* Enable interrupts */
+ iowrite16(INT_MASK, ioaddr + MIER);
+
+ /* Enable TX and RX */
+ iowrite16(lp->mcr0 | 0x0002, ioaddr);
+
+ /* Let TX poll the descriptors
+ * we may got called by r6040_tx_timeout which has left
+ * some unsent tx buffers */
+ iowrite16(0x01, ioaddr + MTPR);
+}
+
+static void r6040_tx_timeout(struct net_device *dev)
+{
+ struct r6040_private *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+
+ netdev_warn(dev, "transmit timed out, int enable %4.4x "
+ "status %4.4x\n",
+ ioread16(ioaddr + MIER),
+ ioread16(ioaddr + MISR));
+
+ dev->stats.tx_errors++;
+
+ /* Reset MAC and re-init all registers */
+ r6040_init_mac_regs(dev);
+}
+
+static struct net_device_stats *r6040_get_stats(struct net_device *dev)
+{
+ struct r6040_private *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ dev->stats.rx_crc_errors += ioread8(ioaddr + ME_CNT1);
+ dev->stats.multicast += ioread8(ioaddr + ME_CNT0);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return &dev->stats;
+}
+
+/* Stop RDC MAC and Free the allocated resource */
+static void r6040_down(struct net_device *dev)
+{
+ struct r6040_private *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ int limit = 2048;
+ u16 *adrp;
+ u16 cmd;
+
+ /* Stop MAC */
+ iowrite16(MSK_INT, ioaddr + MIER); /* Mask Off Interrupt */
+ iowrite16(MAC_RST, ioaddr + MCR1); /* Reset RDC MAC */
+ while (limit--) {
+ cmd = ioread16(ioaddr + MCR1);
+ if (cmd & 0x1)
+ break;
+ }
+
+ /* Restore MAC Address to MIDx */
+ adrp = (u16 *) dev->dev_addr;
+ iowrite16(adrp[0], ioaddr + MID_0L);
+ iowrite16(adrp[1], ioaddr + MID_0M);
+ iowrite16(adrp[2], ioaddr + MID_0H);
+}
+
+static int r6040_close(struct net_device *dev)
+{
+ struct r6040_private *lp = netdev_priv(dev);
+ struct pci_dev *pdev = lp->pdev;
+
+ spin_lock_irq(&lp->lock);
+ napi_disable(&lp->napi);
+ netif_stop_queue(dev);
+ r6040_down(dev);
+
+ free_irq(dev->irq, dev);
+
+ /* Free RX buffer */
+ r6040_free_rxbufs(dev);
+
+ /* Free TX buffer */
+ r6040_free_txbufs(dev);
+
+ spin_unlock_irq(&lp->lock);
+
+ /* Free Descriptor memory */
+ if (lp->rx_ring) {
+ pci_free_consistent(pdev,
+ RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma);
+ lp->rx_ring = NULL;
+ }
+
+ if (lp->tx_ring) {
+ pci_free_consistent(pdev,
+ TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma);
+ lp->tx_ring = NULL;
+ }
+
+ return 0;
+}
+
+static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct r6040_private *lp = netdev_priv(dev);
+
+ if (!lp->phydev)
+ return -EINVAL;
+
+ return phy_mii_ioctl(lp->phydev, rq, cmd);
+}
+
+static int r6040_rx(struct net_device *dev, int limit)
+{
+ struct r6040_private *priv = netdev_priv(dev);
+ struct r6040_descriptor *descptr = priv->rx_remove_ptr;
+ struct sk_buff *skb_ptr, *new_skb;
+ int count = 0;
+ u16 err;
+
+ /* Limit not reached and the descriptor belongs to the CPU */
+ while (count < limit && !(descptr->status & DSC_OWNER_MAC)) {
+ /* Read the descriptor status */
+ err = descptr->status;
+ /* Global error status set */
+ if (err & DSC_RX_ERR) {
+ /* RX dribble */
+ if (err & DSC_RX_ERR_DRI)
+ dev->stats.rx_frame_errors++;
+ /* Buffer length exceeded */
+ if (err & DSC_RX_ERR_BUF)
+ dev->stats.rx_length_errors++;
+ /* Packet too long */
+ if (err & DSC_RX_ERR_LONG)
+ dev->stats.rx_length_errors++;
+ /* Packet < 64 bytes */
+ if (err & DSC_RX_ERR_RUNT)
+ dev->stats.rx_length_errors++;
+ /* CRC error */
+ if (err & DSC_RX_ERR_CRC) {
+ spin_lock(&priv->lock);
+ dev->stats.rx_crc_errors++;
+ spin_unlock(&priv->lock);
+ }
+ goto next_descr;
+ }
+
+ /* Packet successfully received */
+ new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
+ if (!new_skb) {
+ dev->stats.rx_dropped++;
+ goto next_descr;
+ }
+ skb_ptr = descptr->skb_ptr;
+ skb_ptr->dev = priv->dev;
+
+ /* Do not count the CRC */
+ skb_put(skb_ptr, descptr->len - 4);
+ pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
+ MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);
+
+ /* Send to upper layer */
+ netif_receive_skb(skb_ptr);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += descptr->len - 4;
+
+ /* put new skb into descriptor */
+ descptr->skb_ptr = new_skb;
+ descptr->buf = cpu_to_le32(pci_map_single(priv->pdev,
+ descptr->skb_ptr->data,
+ MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
+
+next_descr:
+ /* put the descriptor back to the MAC */
+ descptr->status = DSC_OWNER_MAC;
+ descptr = descptr->vndescp;
+ count++;
+ }
+ priv->rx_remove_ptr = descptr;
+
+ return count;
+}
+
+static void r6040_tx(struct net_device *dev)
+{
+ struct r6040_private *priv = netdev_priv(dev);
+ struct r6040_descriptor *descptr;
+ void __iomem *ioaddr = priv->base;
+ struct sk_buff *skb_ptr;
+ u16 err;
+
+ spin_lock(&priv->lock);
+ descptr = priv->tx_remove_ptr;
+ while (priv->tx_free_desc < TX_DCNT) {
+ /* Check for errors */
+ err = ioread16(ioaddr + MLSR);
+
+ if (err & 0x0200)
+ dev->stats.rx_fifo_errors++;
+ if (err & (0x2000 | 0x4000))
+ dev->stats.tx_carrier_errors++;
+
+ if (descptr->status & DSC_OWNER_MAC)
+ break; /* Not complete */
+ skb_ptr = descptr->skb_ptr;
+ pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
+ skb_ptr->len, PCI_DMA_TODEVICE);
+ /* Free buffer */
+ dev_kfree_skb_irq(skb_ptr);
+ descptr->skb_ptr = NULL;
+ /* To next descriptor */
+ descptr = descptr->vndescp;
+ priv->tx_free_desc++;
+ }
+ priv->tx_remove_ptr = descptr;
+
+ if (priv->tx_free_desc)
+ netif_wake_queue(dev);
+ spin_unlock(&priv->lock);
+}
+
+static int r6040_poll(struct napi_struct *napi, int budget)
+{
+ struct r6040_private *priv =
+ container_of(napi, struct r6040_private, napi);
+ struct net_device *dev = priv->dev;
+ void __iomem *ioaddr = priv->base;
+ int work_done;
+
+ work_done = r6040_rx(dev, budget);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ /* Enable RX interrupt */
+ iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER);
+ }
+ return work_done;
+}
+
+/* The RDC interrupt handler. */
+static irqreturn_t r6040_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct r6040_private *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ u16 misr, status;
+
+ /* Save MIER */
+ misr = ioread16(ioaddr + MIER);
+ /* Mask off RDC MAC interrupt */
+ iowrite16(MSK_INT, ioaddr + MIER);
+ /* Read MISR status and clear */
+ status = ioread16(ioaddr + MISR);
+
+ if (status == 0x0000 || status == 0xffff) {
+ /* Restore RDC MAC interrupt */
+ iowrite16(misr, ioaddr + MIER);
+ return IRQ_NONE;
+ }
+
+ /* RX interrupt request */
+ if (status & RX_INTS) {
+ if (status & RX_NO_DESC) {
+ /* RX descriptor unavailable */
+ dev->stats.rx_dropped++;
+ dev->stats.rx_missed_errors++;
+ }
+ if (status & RX_FIFO_FULL)
+ dev->stats.rx_fifo_errors++;
+
+ if (likely(napi_schedule_prep(&lp->napi))) {
+ /* Mask off RX interrupt */
+ misr &= ~RX_INTS;
+ __napi_schedule(&lp->napi);
+ }
+ }
+
+ /* TX interrupt request */
+ if (status & TX_INTS)
+ r6040_tx(dev);
+
+ /* Restore RDC MAC interrupt */
+ iowrite16(misr, ioaddr + MIER);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void r6040_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ r6040_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+/* Init RDC MAC */
+static int r6040_up(struct net_device *dev)
+{
+ struct r6040_private *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ int ret;
+
+ /* Initialise and alloc RX/TX buffers */
+ r6040_init_txbufs(dev);
+ ret = r6040_alloc_rxbufs(dev);
+ if (ret)
+ return ret;
+
+ /* improve performance (by RDC guys) */
+ r6040_phy_write(ioaddr, 30, 17,
+ (r6040_phy_read(ioaddr, 30, 17) | 0x4000));
+ r6040_phy_write(ioaddr, 30, 17,
+ ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
+ r6040_phy_write(ioaddr, 0, 19, 0x0000);
+ r6040_phy_write(ioaddr, 0, 30, 0x01F0);
+
+ /* Initialize all MAC registers */
+ r6040_init_mac_regs(dev);
+
+ return 0;
+}
+
+
+/* Read/set MAC address routines */
+static void r6040_mac_address(struct net_device *dev)
+{
+ struct r6040_private *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ u16 *adrp;
+
+ /* MAC operation register */
+ iowrite16(0x01, ioaddr + MCR1); /* Reset MAC */
+ iowrite16(2, ioaddr + MAC_SM); /* Reset internal state machine */
+ iowrite16(0, ioaddr + MAC_SM);
+ mdelay(5);
+
+ /* Restore MAC Address */
+ adrp = (u16 *) dev->dev_addr;
+ iowrite16(adrp[0], ioaddr + MID_0L);
+ iowrite16(adrp[1], ioaddr + MID_0M);
+ iowrite16(adrp[2], ioaddr + MID_0H);
+
+ /* Store MAC Address in perm_addr */
+ memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
+}
+
+static int r6040_open(struct net_device *dev)
+{
+ struct r6040_private *lp = netdev_priv(dev);
+ int ret;
+
+ /* Request IRQ and Register interrupt handler */
+ ret = request_irq(dev->irq, r6040_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (ret)
+ goto out;
+
+ /* Set MAC address */
+ r6040_mac_address(dev);
+
+ /* Allocate Descriptor memory */
+ lp->rx_ring =
+ pci_alloc_consistent(lp->pdev, RX_DESC_SIZE, &lp->rx_ring_dma);
+ if (!lp->rx_ring) {
+ ret = -ENOMEM;
+ goto err_free_irq;
+ }
+
+ lp->tx_ring =
+ pci_alloc_consistent(lp->pdev, TX_DESC_SIZE, &lp->tx_ring_dma);
+ if (!lp->tx_ring) {
+ ret = -ENOMEM;
+ goto err_free_rx_ring;
+ }
+
+ ret = r6040_up(dev);
+ if (ret)
+ goto err_free_tx_ring;
+
+ napi_enable(&lp->napi);
+ netif_start_queue(dev);
+
+ return 0;
+
+err_free_tx_ring:
+ pci_free_consistent(lp->pdev, TX_DESC_SIZE, lp->tx_ring,
+ lp->tx_ring_dma);
+err_free_rx_ring:
+ pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring,
+ lp->rx_ring_dma);
+err_free_irq:
+ free_irq(dev->irq, dev);
+out:
+ return ret;
+}
+
+static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct r6040_private *lp = netdev_priv(dev);
+ struct r6040_descriptor *descptr;
+ void __iomem *ioaddr = lp->base;
+ unsigned long flags;
+
+ /* Critical Section */
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /* TX resource check */
+ if (!lp->tx_free_desc) {
+ spin_unlock_irqrestore(&lp->lock, flags);
+ netif_stop_queue(dev);
+ netdev_err(dev, ": no tx descriptor\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Statistic Counter */
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ /* Set TX descriptor & Transmit it */
+ lp->tx_free_desc--;
+ descptr = lp->tx_insert_ptr;
+ if (skb->len < MISR)
+ descptr->len = MISR;
+ else
+ descptr->len = skb->len;
+
+ descptr->skb_ptr = skb;
+ descptr->buf = cpu_to_le32(pci_map_single(lp->pdev,
+ skb->data, skb->len, PCI_DMA_TODEVICE));
+ descptr->status = DSC_OWNER_MAC;
+
+ skb_tx_timestamp(skb);
+
+ /* Trigger the MAC to check the TX descriptor */
+ iowrite16(0x01, ioaddr + MTPR);
+ lp->tx_insert_ptr = descptr->vndescp;
+
+ /* If no tx resource, stop */
+ if (!lp->tx_free_desc)
+ netif_stop_queue(dev);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static void r6040_multicast_list(struct net_device *dev)
+{
+ struct r6040_private *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned long flags;
+ struct netdev_hw_addr *ha;
+ int i;
+ u16 *adrp;
+ u16 hash_table[4] = { 0 };
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /* Keep our MAC Address */
+ adrp = (u16 *)dev->dev_addr;
+ iowrite16(adrp[0], ioaddr + MID_0L);
+ iowrite16(adrp[1], ioaddr + MID_0M);
+ iowrite16(adrp[2], ioaddr + MID_0H);
+
+ /* Clear AMCP & PROM bits */
+ lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN);
+
+ /* Promiscuous mode */
+ if (dev->flags & IFF_PROMISC)
+ lp->mcr0 |= MCR0_PROMISC;
+
+ /* Enable multicast hash table function to
+ * receive all multicast packets. */
+ else if (dev->flags & IFF_ALLMULTI) {
+ lp->mcr0 |= MCR0_HASH_EN;
+
+ for (i = 0; i < MCAST_MAX ; i++) {
+ iowrite16(0, ioaddr + MID_1L + 8 * i);
+ iowrite16(0, ioaddr + MID_1M + 8 * i);
+ iowrite16(0, ioaddr + MID_1H + 8 * i);
+ }
+
+ for (i = 0; i < 4; i++)
+ hash_table[i] = 0xffff;
+ }
+ /* Use internal multicast address registers if the number of
+ * multicast addresses is not greater than MCAST_MAX. */
+ else if (netdev_mc_count(dev) <= MCAST_MAX) {
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ u16 *adrp = (u16 *) ha->addr;
+ iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
+ iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
+ iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
+ i++;
+ }
+ while (i < MCAST_MAX) {
+ iowrite16(0, ioaddr + MID_1L + 8 * i);
+ iowrite16(0, ioaddr + MID_1M + 8 * i);
+ iowrite16(0, ioaddr + MID_1H + 8 * i);
+ i++;
+ }
+ }
+ /* Otherwise, Enable multicast hash table function. */
+ else {
+ u32 crc;
+
+ lp->mcr0 |= MCR0_HASH_EN;
+
+ for (i = 0; i < MCAST_MAX ; i++) {
+ iowrite16(0, ioaddr + MID_1L + 8 * i);
+ iowrite16(0, ioaddr + MID_1M + 8 * i);
+ iowrite16(0, ioaddr + MID_1H + 8 * i);
+ }
+
+ /* Build multicast hash table */
+ netdev_for_each_mc_addr(ha, dev) {
+ u8 *addrs = ha->addr;
+
+ crc = ether_crc(ETH_ALEN, addrs);
+ crc >>= 26;
+ hash_table[crc >> 4] |= 1 << (crc & 0xf);
+ }
+ }
+
+ iowrite16(lp->mcr0, ioaddr + MCR0);
+
+ /* Fill the MAC hash tables with their values */
+ if (lp->mcr0 && MCR0_HASH_EN) {
+ iowrite16(hash_table[0], ioaddr + MAR0);
+ iowrite16(hash_table[1], ioaddr + MAR1);
+ iowrite16(hash_table[2], ioaddr + MAR2);
+ iowrite16(hash_table[3], ioaddr + MAR3);
+ }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct r6040_private *rp = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(rp->pdev));
+}
+
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct r6040_private *rp = netdev_priv(dev);
+
+ return phy_ethtool_gset(rp->phydev, cmd);
+}
+
+static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct r6040_private *rp = netdev_priv(dev);
+
+ return phy_ethtool_sset(rp->phydev, cmd);
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_settings = netdev_get_settings,
+ .set_settings = netdev_set_settings,
+ .get_link = ethtool_op_get_link,
+};
+
+static const struct net_device_ops r6040_netdev_ops = {
+ .ndo_open = r6040_open,
+ .ndo_stop = r6040_close,
+ .ndo_start_xmit = r6040_start_xmit,
+ .ndo_get_stats = r6040_get_stats,
+ .ndo_set_rx_mode = r6040_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_do_ioctl = r6040_ioctl,
+ .ndo_tx_timeout = r6040_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = r6040_poll_controller,
+#endif
+};
+
+static void r6040_adjust_link(struct net_device *dev)
+{
+ struct r6040_private *lp = netdev_priv(dev);
+ struct phy_device *phydev = lp->phydev;
+ int status_changed = 0;
+ void __iomem *ioaddr = lp->base;
+
+ BUG_ON(!phydev);
+
+ if (lp->old_link != phydev->link) {
+ status_changed = 1;
+ lp->old_link = phydev->link;
+ }
+
+ /* reflect duplex change */
+ if (phydev->link && (lp->old_duplex != phydev->duplex)) {
+ lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? 0x8000 : 0);
+ iowrite16(lp->mcr0, ioaddr);
+
+ status_changed = 1;
+ lp->old_duplex = phydev->duplex;
+ }
+
+ if (status_changed) {
+ pr_info("%s: link %s", dev->name, phydev->link ?
+ "UP" : "DOWN");
+ if (phydev->link)
+ pr_cont(" - %d/%s", phydev->speed,
+ DUPLEX_FULL == phydev->duplex ? "full" : "half");
+ pr_cont("\n");
+ }
+}
+
+static int r6040_mii_probe(struct net_device *dev)
+{
+ struct r6040_private *lp = netdev_priv(dev);
+ struct phy_device *phydev = NULL;
+
+ phydev = phy_find_first(lp->mii_bus);
+ if (!phydev) {
+ dev_err(&lp->pdev->dev, "no PHY found\n");
+ return -ENODEV;
+ }
+
+ phydev = phy_connect(dev, dev_name(&phydev->dev), &r6040_adjust_link,
+ 0, PHY_INTERFACE_MODE_MII);
+
+ if (IS_ERR(phydev)) {
+ dev_err(&lp->pdev->dev, "could not attach to PHY\n");
+ return PTR_ERR(phydev);
+ }
+
+ /* mask with MAC supported features */
+ phydev->supported &= (SUPPORTED_10baseT_Half
+ | SUPPORTED_10baseT_Full
+ | SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full
+ | SUPPORTED_Autoneg
+ | SUPPORTED_MII
+ | SUPPORTED_TP);
+
+ phydev->advertising = phydev->supported;
+ lp->phydev = phydev;
+ lp->old_link = 0;
+ lp->old_duplex = -1;
+
+ dev_info(&lp->pdev->dev, "attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s)\n",
+ phydev->drv->name, dev_name(&phydev->dev));
+
+ return 0;
+}
+
+static int __devinit r6040_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct r6040_private *lp;
+ void __iomem *ioaddr;
+ int err, io_size = R6040_IO_SIZE;
+ static int card_idx = -1;
+ int bar = 0;
+ u16 *adrp;
+ int i;
+
+ pr_info("%s\n", version);
+
+ err = pci_enable_device(pdev);
+ if (err)
+ goto err_out;
+
+ /* this should always be supported */
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "32-bit PCI DMA addresses"
+ "not supported by the card\n");
+ goto err_out;
+ }
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "32-bit PCI DMA addresses"
+ "not supported by the card\n");
+ goto err_out;
+ }
+
+ /* IO Size check */
+ if (pci_resource_len(pdev, bar) < io_size) {
+ dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
+ err = -EIO;
+ goto err_out;
+ }
+
+ pci_set_master(pdev);
+
+ dev = alloc_etherdev(sizeof(struct r6040_private));
+ if (!dev) {
+ dev_err(&pdev->dev, "Failed to allocate etherdev\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ lp = netdev_priv(dev);
+
+ err = pci_request_regions(pdev, DRV_NAME);
+
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request PCI regions\n");
+ goto err_out_free_dev;
+ }
+
+ ioaddr = pci_iomap(pdev, bar, io_size);
+ if (!ioaddr) {
+ dev_err(&pdev->dev, "ioremap failed for device\n");
+ err = -EIO;
+ goto err_out_free_res;
+ }
+ /* If PHY status change register is still set to zero it means the
+ * bootloader didn't initialize it */
+ if (ioread16(ioaddr + PHY_CC) == 0)
+ iowrite16(0x9f07, ioaddr + PHY_CC);
+
+ /* Init system & device */
+ lp->base = ioaddr;
+ dev->irq = pdev->irq;
+
+ spin_lock_init(&lp->lock);
+ pci_set_drvdata(pdev, dev);
+
+ /* Set MAC address */
+ card_idx++;
+
+ adrp = (u16 *)dev->dev_addr;
+ adrp[0] = ioread16(ioaddr + MID_0L);
+ adrp[1] = ioread16(ioaddr + MID_0M);
+ adrp[2] = ioread16(ioaddr + MID_0H);
+
+ /* Some bootloader/BIOSes do not initialize
+ * MAC address, warn about that */
+ if (!(adrp[0] || adrp[1] || adrp[2])) {
+ netdev_warn(dev, "MAC address not initialized, "
+ "generating random\n");
+ random_ether_addr(dev->dev_addr);
+ }
+
+ /* Link new device into r6040_root_dev */
+ lp->pdev = pdev;
+ lp->dev = dev;
+
+ /* Init RDC private data */
+ lp->mcr0 = 0x1002;
+ lp->phy_addr = phy_table[card_idx];
+
+ /* The RDC-specific entries in the device structure. */
+ dev->netdev_ops = &r6040_netdev_ops;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ netif_napi_add(dev, &lp->napi, r6040_poll, 64);
+
+ lp->mii_bus = mdiobus_alloc();
+ if (!lp->mii_bus) {
+ dev_err(&pdev->dev, "mdiobus_alloc() failed\n");
+ err = -ENOMEM;
+ goto err_out_unmap;
+ }
+
+ lp->mii_bus->priv = dev;
+ lp->mii_bus->read = r6040_mdiobus_read;
+ lp->mii_bus->write = r6040_mdiobus_write;
+ lp->mii_bus->reset = r6040_mdiobus_reset;
+ lp->mii_bus->name = "r6040_eth_mii";
+ snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", card_idx);
+ lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ if (!lp->mii_bus->irq) {
+ dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
+ err = -ENOMEM;
+ goto err_out_mdio;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ lp->mii_bus->irq[i] = PHY_POLL;
+
+ err = mdiobus_register(lp->mii_bus);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register MII bus\n");
+ goto err_out_mdio_irq;
+ }
+
+ err = r6040_mii_probe(dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to probe MII bus\n");
+ goto err_out_mdio_unregister;
+ }
+
+ /* Register net device. After this dev->name assign */
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register net device\n");
+ goto err_out_mdio_unregister;
+ }
+ return 0;
+
+err_out_mdio_unregister:
+ mdiobus_unregister(lp->mii_bus);
+err_out_mdio_irq:
+ kfree(lp->mii_bus->irq);
+err_out_mdio:
+ mdiobus_free(lp->mii_bus);
+err_out_unmap:
+ pci_iounmap(pdev, ioaddr);
+err_out_free_res:
+ pci_release_regions(pdev);
+err_out_free_dev:
+ free_netdev(dev);
+err_out:
+ return err;
+}
+
+static void __devexit r6040_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct r6040_private *lp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ mdiobus_unregister(lp->mii_bus);
+ kfree(lp->mii_bus->irq);
+ mdiobus_free(lp->mii_bus);
+ pci_release_regions(pdev);
+ free_netdev(dev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+
+static DEFINE_PCI_DEVICE_TABLE(r6040_pci_tbl) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, r6040_pci_tbl);
+
+static struct pci_driver r6040_driver = {
+ .name = DRV_NAME,
+ .id_table = r6040_pci_tbl,
+ .probe = r6040_init_one,
+ .remove = __devexit_p(r6040_remove_one),
+};
+
+
+static int __init r6040_init(void)
+{
+ return pci_register_driver(&r6040_driver);
+}
+
+
+static void __exit r6040_cleanup(void)
+{
+ pci_unregister_driver(&r6040_driver);
+}
+
+module_init(r6040_init);
+module_exit(r6040_cleanup);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
new file mode 100644
index 000000000000..c77d5af676a1
--- /dev/null
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -0,0 +1,2063 @@
+/* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
+/*
+ Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
+
+ Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
+ Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
+ Copyright 2001 Manfred Spraul [natsemi.c]
+ Copyright 1999-2001 by Donald Becker. [natsemi.c]
+ Written 1997-2001 by Donald Becker. [8139too.c]
+ Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ See the file COPYING in this distribution for more information.
+
+ Contributors:
+
+ Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
+ PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
+ LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
+
+ TODO:
+ * Test Tx checksumming thoroughly
+
+ Low priority TODO:
+ * Complete reset on PciErr
+ * Consider Rx interrupt mitigation using TimerIntr
+ * Investigate using skb->priority with h/w VLAN priority
+ * Investigate using High Priority Tx Queue with skb->priority
+ * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
+ * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
+ * Implement Tx software interrupt mitigation via
+ Tx descriptor bit
+ * The real minimum of CP_MIN_MTU is 4 bytes. However,
+ for this to be supported, one must(?) turn on packet padding.
+ * Support external MII transceivers (patch available)
+
+ NOTES:
+ * TX checksumming is considered experimental. It is off by
+ default, use ethtool to turn it on.
+
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DRV_NAME "8139cp"
+#define DRV_VERSION "1.3"
+#define DRV_RELDATE "Mar 22, 2004"
+
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/gfp.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/cache.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+/* These identify the driver base version and may not be removed. */
+static char version[] =
+DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
+
+MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
+MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ The RTL chips use a 64 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 32;
+module_param(multicast_filter_limit, int, 0);
+MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
+
+#define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK)
+#define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */
+#define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */
+#define CP_REGS_SIZE (0xff + 1)
+#define CP_REGS_VER 1 /* version 1 */
+#define CP_RX_RING_SIZE 64
+#define CP_TX_RING_SIZE 64
+#define CP_RING_BYTES \
+ ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
+ (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
+ CP_STATS_SIZE)
+#define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
+#define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
+#define TX_BUFFS_AVAIL(CP) \
+ (((CP)->tx_tail <= (CP)->tx_head) ? \
+ (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
+ (CP)->tx_tail - (CP)->tx_head - 1)
+
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+#define CP_INTERNAL_PHY 32
+
+/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
+#define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */
+#define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */
+#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
+#define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* hardware minimum and maximum for a single frame's data payload */
+#define CP_MIN_MTU 60 /* TODO: allow lower, but pad */
+#define CP_MAX_MTU 4096
+
+enum {
+ /* NIC register offsets */
+ MAC0 = 0x00, /* Ethernet hardware address. */
+ MAR0 = 0x08, /* Multicast filter. */
+ StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
+ TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */
+ HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */
+ Cmd = 0x37, /* Command register */
+ IntrMask = 0x3C, /* Interrupt mask */
+ IntrStatus = 0x3E, /* Interrupt status */
+ TxConfig = 0x40, /* Tx configuration */
+ ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */
+ RxConfig = 0x44, /* Rx configuration */
+ RxMissed = 0x4C, /* 24 bits valid, write clears */
+ Cfg9346 = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
+ Config1 = 0x52, /* Config1 */
+ Config3 = 0x59, /* Config3 */
+ Config4 = 0x5A, /* Config4 */
+ MultiIntr = 0x5C, /* Multiple interrupt select */
+ BasicModeCtrl = 0x62, /* MII BMCR */
+ BasicModeStatus = 0x64, /* MII BMSR */
+ NWayAdvert = 0x66, /* MII ADVERTISE */
+ NWayLPAR = 0x68, /* MII LPA */
+ NWayExpansion = 0x6A, /* MII Expansion */
+ Config5 = 0xD8, /* Config5 */
+ TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
+ RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
+ CpCmd = 0xE0, /* C+ Command register (C+ mode only) */
+ IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */
+ RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */
+ TxThresh = 0xEC, /* Early Tx threshold */
+ OldRxBufAddr = 0x30, /* DMA address of Rx ring buffer (C mode) */
+ OldTSD0 = 0x10, /* DMA address of first Tx desc (C mode) */
+
+ /* Tx and Rx status descriptors */
+ DescOwn = (1 << 31), /* Descriptor is owned by NIC */
+ RingEnd = (1 << 30), /* End of descriptor ring */
+ FirstFrag = (1 << 29), /* First segment of a packet */
+ LastFrag = (1 << 28), /* Final segment of a packet */
+ LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
+ MSSShift = 16, /* MSS value position */
+ MSSMask = 0xfff, /* MSS value: 11 bits */
+ TxError = (1 << 23), /* Tx error summary */
+ RxError = (1 << 20), /* Rx error summary */
+ IPCS = (1 << 18), /* Calculate IP checksum */
+ UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
+ TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
+ TxVlanTag = (1 << 17), /* Add VLAN tag */
+ RxVlanTagged = (1 << 16), /* Rx VLAN tag available */
+ IPFail = (1 << 15), /* IP checksum failed */
+ UDPFail = (1 << 14), /* UDP/IP checksum failed */
+ TCPFail = (1 << 13), /* TCP/IP checksum failed */
+ NormalTxPoll = (1 << 6), /* One or more normal Tx packets to send */
+ PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */
+ PID0 = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
+ RxProtoTCP = 1,
+ RxProtoUDP = 2,
+ RxProtoIP = 3,
+ TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */
+ TxOWC = (1 << 22), /* Tx Out-of-window collision */
+ TxLinkFail = (1 << 21), /* Link failed during Tx of packet */
+ TxMaxCol = (1 << 20), /* Tx aborted due to excessive collisions */
+ TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */
+ TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
+ RxErrFrame = (1 << 27), /* Rx frame alignment error */
+ RxMcast = (1 << 26), /* Rx multicast packet rcv'd */
+ RxErrCRC = (1 << 18), /* Rx CRC error */
+ RxErrRunt = (1 << 19), /* Rx error, packet < 64 bytes */
+ RxErrLong = (1 << 21), /* Rx error, packet > 4096 bytes */
+ RxErrFIFO = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
+
+ /* StatsAddr register */
+ DumpStats = (1 << 3), /* Begin stats dump */
+
+ /* RxConfig register */
+ RxCfgFIFOShift = 13, /* Shift, to get Rx FIFO thresh value */
+ RxCfgDMAShift = 8, /* Shift, to get Rx Max DMA value */
+ AcceptErr = 0x20, /* Accept packets with CRC errors */
+ AcceptRunt = 0x10, /* Accept runt (<64 bytes) packets */
+ AcceptBroadcast = 0x08, /* Accept broadcast packets */
+ AcceptMulticast = 0x04, /* Accept multicast packets */
+ AcceptMyPhys = 0x02, /* Accept pkts with our MAC as dest */
+ AcceptAllPhys = 0x01, /* Accept all pkts w/ physical dest */
+
+ /* IntrMask / IntrStatus registers */
+ PciErr = (1 << 15), /* System error on the PCI bus */
+ TimerIntr = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
+ LenChg = (1 << 13), /* Cable length change */
+ SWInt = (1 << 8), /* Software-requested interrupt */
+ TxEmpty = (1 << 7), /* No Tx descriptors available */
+ RxFIFOOvr = (1 << 6), /* Rx FIFO Overflow */
+ LinkChg = (1 << 5), /* Packet underrun, or link change */
+ RxEmpty = (1 << 4), /* No Rx descriptors available */
+ TxErr = (1 << 3), /* Tx error */
+ TxOK = (1 << 2), /* Tx packet sent */
+ RxErr = (1 << 1), /* Rx error */
+ RxOK = (1 << 0), /* Rx packet received */
+ IntrResvd = (1 << 10), /* reserved, according to RealTek engineers,
+ but hardware likes to raise it */
+
+ IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
+ RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
+ RxErr | RxOK | IntrResvd,
+
+ /* C mode command register */
+ CmdReset = (1 << 4), /* Enable to reset; self-clearing */
+ RxOn = (1 << 3), /* Rx mode enable */
+ TxOn = (1 << 2), /* Tx mode enable */
+
+ /* C+ mode command register */
+ RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */
+ RxChkSum = (1 << 5), /* Rx checksum offload enable */
+ PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */
+ PCIMulRW = (1 << 3), /* Enable PCI read/write multiple */
+ CpRxOn = (1 << 1), /* Rx mode enable */
+ CpTxOn = (1 << 0), /* Tx mode enable */
+
+ /* Cfg9436 EEPROM control register */
+ Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */
+ Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */
+
+ /* TxConfig register */
+ IFG = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
+ TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
+
+ /* Early Tx Threshold register */
+ TxThreshMask = 0x3f, /* Mask bits 5-0 */
+ TxThreshMax = 2048, /* Max early Tx threshold */
+
+ /* Config1 register */
+ DriverLoaded = (1 << 5), /* Software marker, driver is loaded */
+ LWACT = (1 << 4), /* LWAKE active mode */
+ PMEnable = (1 << 0), /* Enable various PM features of chip */
+
+ /* Config3 register */
+ PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */
+ MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
+ LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
+
+ /* Config4 register */
+ LWPTN = (1 << 1), /* LWAKE Pattern */
+ LWPME = (1 << 4), /* LANWAKE vs PMEB */
+
+ /* Config5 register */
+ BWF = (1 << 6), /* Accept Broadcast wakeup frame */
+ MWF = (1 << 5), /* Accept Multicast wakeup frame */
+ UWF = (1 << 4), /* Accept Unicast wakeup frame */
+ LANWake = (1 << 1), /* Enable LANWake signal */
+ PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
+
+ cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
+ cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
+ cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
+};
+
+static const unsigned int cp_rx_config =
+ (RX_FIFO_THRESH << RxCfgFIFOShift) |
+ (RX_DMA_BURST << RxCfgDMAShift);
+
+struct cp_desc {
+ __le32 opts1;
+ __le32 opts2;
+ __le64 addr;
+};
+
+struct cp_dma_stats {
+ __le64 tx_ok;
+ __le64 rx_ok;
+ __le64 tx_err;
+ __le32 rx_err;
+ __le16 rx_fifo;
+ __le16 frame_align;
+ __le32 tx_ok_1col;
+ __le32 tx_ok_mcol;
+ __le64 rx_ok_phys;
+ __le64 rx_ok_bcast;
+ __le32 rx_ok_mcast;
+ __le16 tx_abort;
+ __le16 tx_underrun;
+} __packed;
+
+struct cp_extra_stats {
+ unsigned long rx_frags;
+};
+
+struct cp_private {
+ void __iomem *regs;
+ struct net_device *dev;
+ spinlock_t lock;
+ u32 msg_enable;
+
+ struct napi_struct napi;
+
+ struct pci_dev *pdev;
+ u32 rx_config;
+ u16 cpcmd;
+
+ struct cp_extra_stats cp_stats;
+
+ unsigned rx_head ____cacheline_aligned;
+ unsigned rx_tail;
+ struct cp_desc *rx_ring;
+ struct sk_buff *rx_skb[CP_RX_RING_SIZE];
+
+ unsigned tx_head ____cacheline_aligned;
+ unsigned tx_tail;
+ struct cp_desc *tx_ring;
+ struct sk_buff *tx_skb[CP_TX_RING_SIZE];
+
+ unsigned rx_buf_sz;
+ unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
+
+ dma_addr_t ring_dma;
+
+ struct mii_if_info mii_if;
+};
+
+#define cpr8(reg) readb(cp->regs + (reg))
+#define cpr16(reg) readw(cp->regs + (reg))
+#define cpr32(reg) readl(cp->regs + (reg))
+#define cpw8(reg,val) writeb((val), cp->regs + (reg))
+#define cpw16(reg,val) writew((val), cp->regs + (reg))
+#define cpw32(reg,val) writel((val), cp->regs + (reg))
+#define cpw8_f(reg,val) do { \
+ writeb((val), cp->regs + (reg)); \
+ readb(cp->regs + (reg)); \
+ } while (0)
+#define cpw16_f(reg,val) do { \
+ writew((val), cp->regs + (reg)); \
+ readw(cp->regs + (reg)); \
+ } while (0)
+#define cpw32_f(reg,val) do { \
+ writel((val), cp->regs + (reg)); \
+ readl(cp->regs + (reg)); \
+ } while (0)
+
+
+static void __cp_set_rx_mode (struct net_device *dev);
+static void cp_tx (struct cp_private *cp);
+static void cp_clean_rings (struct cp_private *cp);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cp_poll_controller(struct net_device *dev);
+#endif
+static int cp_get_eeprom_len(struct net_device *dev);
+static int cp_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data);
+static int cp_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data);
+
+static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
+ { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
+ { },
+};
+MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
+
+static struct {
+ const char str[ETH_GSTRING_LEN];
+} ethtool_stats_keys[] = {
+ { "tx_ok" },
+ { "rx_ok" },
+ { "tx_err" },
+ { "rx_err" },
+ { "rx_fifo" },
+ { "frame_align" },
+ { "tx_ok_1col" },
+ { "tx_ok_mcol" },
+ { "rx_ok_phys" },
+ { "rx_ok_bcast" },
+ { "rx_ok_mcast" },
+ { "tx_abort" },
+ { "tx_underrun" },
+ { "rx_frags" },
+};
+
+
+static inline void cp_set_rxbufsize (struct cp_private *cp)
+{
+ unsigned int mtu = cp->dev->mtu;
+
+ if (mtu > ETH_DATA_LEN)
+ /* MTU + ethernet header + FCS + optional VLAN tag */
+ cp->rx_buf_sz = mtu + ETH_HLEN + 8;
+ else
+ cp->rx_buf_sz = PKT_BUF_SZ;
+}
+
+static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
+ struct cp_desc *desc)
+{
+ u32 opts2 = le32_to_cpu(desc->opts2);
+
+ skb->protocol = eth_type_trans (skb, cp->dev);
+
+ cp->dev->stats.rx_packets++;
+ cp->dev->stats.rx_bytes += skb->len;
+
+ if (opts2 & RxVlanTagged)
+ __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
+
+ napi_gro_receive(&cp->napi, skb);
+}
+
+static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
+ u32 status, u32 len)
+{
+ netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
+ rx_tail, status, len);
+ cp->dev->stats.rx_errors++;
+ if (status & RxErrFrame)
+ cp->dev->stats.rx_frame_errors++;
+ if (status & RxErrCRC)
+ cp->dev->stats.rx_crc_errors++;
+ if ((status & RxErrRunt) || (status & RxErrLong))
+ cp->dev->stats.rx_length_errors++;
+ if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
+ cp->dev->stats.rx_length_errors++;
+ if (status & RxErrFIFO)
+ cp->dev->stats.rx_fifo_errors++;
+}
+
+static inline unsigned int cp_rx_csum_ok (u32 status)
+{
+ unsigned int protocol = (status >> 16) & 0x3;
+
+ if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
+ ((protocol == RxProtoUDP) && !(status & UDPFail)))
+ return 1;
+ else
+ return 0;
+}
+
+static int cp_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct cp_private *cp = container_of(napi, struct cp_private, napi);
+ struct net_device *dev = cp->dev;
+ unsigned int rx_tail = cp->rx_tail;
+ int rx;
+
+rx_status_loop:
+ rx = 0;
+ cpw16(IntrStatus, cp_rx_intr_mask);
+
+ while (1) {
+ u32 status, len;
+ dma_addr_t mapping;
+ struct sk_buff *skb, *new_skb;
+ struct cp_desc *desc;
+ const unsigned buflen = cp->rx_buf_sz;
+
+ skb = cp->rx_skb[rx_tail];
+ BUG_ON(!skb);
+
+ desc = &cp->rx_ring[rx_tail];
+ status = le32_to_cpu(desc->opts1);
+ if (status & DescOwn)
+ break;
+
+ len = (status & 0x1fff) - 4;
+ mapping = le64_to_cpu(desc->addr);
+
+ if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
+ /* we don't support incoming fragmented frames.
+ * instead, we attempt to ensure that the
+ * pre-allocated RX skbs are properly sized such
+ * that RX fragments are never encountered
+ */
+ cp_rx_err_acct(cp, rx_tail, status, len);
+ dev->stats.rx_dropped++;
+ cp->cp_stats.rx_frags++;
+ goto rx_next;
+ }
+
+ if (status & (RxError | RxErrFIFO)) {
+ cp_rx_err_acct(cp, rx_tail, status, len);
+ goto rx_next;
+ }
+
+ netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
+ rx_tail, status, len);
+
+ new_skb = netdev_alloc_skb_ip_align(dev, buflen);
+ if (!new_skb) {
+ dev->stats.rx_dropped++;
+ goto rx_next;
+ }
+
+ dma_unmap_single(&cp->pdev->dev, mapping,
+ buflen, PCI_DMA_FROMDEVICE);
+
+ /* Handle checksum offloading for incoming packets. */
+ if (cp_rx_csum_ok(status))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+
+ skb_put(skb, len);
+
+ mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
+ PCI_DMA_FROMDEVICE);
+ cp->rx_skb[rx_tail] = new_skb;
+
+ cp_rx_skb(cp, skb, desc);
+ rx++;
+
+rx_next:
+ cp->rx_ring[rx_tail].opts2 = 0;
+ cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
+ if (rx_tail == (CP_RX_RING_SIZE - 1))
+ desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
+ cp->rx_buf_sz);
+ else
+ desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
+ rx_tail = NEXT_RX(rx_tail);
+
+ if (rx >= budget)
+ break;
+ }
+
+ cp->rx_tail = rx_tail;
+
+ /* if we did not reach work limit, then we're done with
+ * this round of polling
+ */
+ if (rx < budget) {
+ unsigned long flags;
+
+ if (cpr16(IntrStatus) & cp_rx_intr_mask)
+ goto rx_status_loop;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ __napi_complete(napi);
+ cpw16_f(IntrMask, cp_intr_mask);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ }
+
+ return rx;
+}
+
+static irqreturn_t cp_interrupt (int irq, void *dev_instance)
+{
+ struct net_device *dev = dev_instance;
+ struct cp_private *cp;
+ u16 status;
+
+ if (unlikely(dev == NULL))
+ return IRQ_NONE;
+ cp = netdev_priv(dev);
+
+ status = cpr16(IntrStatus);
+ if (!status || (status == 0xFFFF))
+ return IRQ_NONE;
+
+ netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
+ status, cpr8(Cmd), cpr16(CpCmd));
+
+ cpw16(IntrStatus, status & ~cp_rx_intr_mask);
+
+ spin_lock(&cp->lock);
+
+ /* close possible race's with dev_close */
+ if (unlikely(!netif_running(dev))) {
+ cpw16(IntrMask, 0);
+ spin_unlock(&cp->lock);
+ return IRQ_HANDLED;
+ }
+
+ if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
+ if (napi_schedule_prep(&cp->napi)) {
+ cpw16_f(IntrMask, cp_norx_intr_mask);
+ __napi_schedule(&cp->napi);
+ }
+
+ if (status & (TxOK | TxErr | TxEmpty | SWInt))
+ cp_tx(cp);
+ if (status & LinkChg)
+ mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
+
+ spin_unlock(&cp->lock);
+
+ if (status & PciErr) {
+ u16 pci_status;
+
+ pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
+ pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
+ netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
+ status, pci_status);
+
+ /* TODO: reset hardware */
+ }
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void cp_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ cp_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static void cp_tx (struct cp_private *cp)
+{
+ unsigned tx_head = cp->tx_head;
+ unsigned tx_tail = cp->tx_tail;
+
+ while (tx_tail != tx_head) {
+ struct cp_desc *txd = cp->tx_ring + tx_tail;
+ struct sk_buff *skb;
+ u32 status;
+
+ rmb();
+ status = le32_to_cpu(txd->opts1);
+ if (status & DescOwn)
+ break;
+
+ skb = cp->tx_skb[tx_tail];
+ BUG_ON(!skb);
+
+ dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
+ le32_to_cpu(txd->opts1) & 0xffff,
+ PCI_DMA_TODEVICE);
+
+ if (status & LastFrag) {
+ if (status & (TxError | TxFIFOUnder)) {
+ netif_dbg(cp, tx_err, cp->dev,
+ "tx err, status 0x%x\n", status);
+ cp->dev->stats.tx_errors++;
+ if (status & TxOWC)
+ cp->dev->stats.tx_window_errors++;
+ if (status & TxMaxCol)
+ cp->dev->stats.tx_aborted_errors++;
+ if (status & TxLinkFail)
+ cp->dev->stats.tx_carrier_errors++;
+ if (status & TxFIFOUnder)
+ cp->dev->stats.tx_fifo_errors++;
+ } else {
+ cp->dev->stats.collisions +=
+ ((status >> TxColCntShift) & TxColCntMask);
+ cp->dev->stats.tx_packets++;
+ cp->dev->stats.tx_bytes += skb->len;
+ netif_dbg(cp, tx_done, cp->dev,
+ "tx done, slot %d\n", tx_tail);
+ }
+ dev_kfree_skb_irq(skb);
+ }
+
+ cp->tx_skb[tx_tail] = NULL;
+
+ tx_tail = NEXT_TX(tx_tail);
+ }
+
+ cp->tx_tail = tx_tail;
+
+ if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
+ netif_wake_queue(cp->dev);
+}
+
+static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
+{
+ return vlan_tx_tag_present(skb) ?
+ TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
+}
+
+static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned entry;
+ u32 eor, flags;
+ unsigned long intr_flags;
+ __le32 opts2;
+ int mss = 0;
+
+ spin_lock_irqsave(&cp->lock, intr_flags);
+
+ /* This is a hard error, log it. */
+ if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&cp->lock, intr_flags);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = cp->tx_head;
+ eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
+ mss = skb_shinfo(skb)->gso_size;
+
+ opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
+
+ if (skb_shinfo(skb)->nr_frags == 0) {
+ struct cp_desc *txd = &cp->tx_ring[entry];
+ u32 len;
+ dma_addr_t mapping;
+
+ len = skb->len;
+ mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
+ txd->opts2 = opts2;
+ txd->addr = cpu_to_le64(mapping);
+ wmb();
+
+ flags = eor | len | DescOwn | FirstFrag | LastFrag;
+
+ if (mss)
+ flags |= LargeSend | ((mss & MSSMask) << MSSShift);
+ else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ const struct iphdr *ip = ip_hdr(skb);
+ if (ip->protocol == IPPROTO_TCP)
+ flags |= IPCS | TCPCS;
+ else if (ip->protocol == IPPROTO_UDP)
+ flags |= IPCS | UDPCS;
+ else
+ WARN_ON(1); /* we need a WARN() */
+ }
+
+ txd->opts1 = cpu_to_le32(flags);
+ wmb();
+
+ cp->tx_skb[entry] = skb;
+ entry = NEXT_TX(entry);
+ } else {
+ struct cp_desc *txd;
+ u32 first_len, first_eor;
+ dma_addr_t first_mapping;
+ int frag, first_entry = entry;
+ const struct iphdr *ip = ip_hdr(skb);
+
+ /* We must give this initial chunk to the device last.
+ * Otherwise we could race with the device.
+ */
+ first_eor = eor;
+ first_len = skb_headlen(skb);
+ first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
+ first_len, PCI_DMA_TODEVICE);
+ cp->tx_skb[entry] = skb;
+ entry = NEXT_TX(entry);
+
+ for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
+ u32 len;
+ u32 ctrl;
+ dma_addr_t mapping;
+
+ len = this_frag->size;
+ mapping = dma_map_single(&cp->pdev->dev,
+ skb_frag_address(this_frag),
+ len, PCI_DMA_TODEVICE);
+ eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
+
+ ctrl = eor | len | DescOwn;
+
+ if (mss)
+ ctrl |= LargeSend |
+ ((mss & MSSMask) << MSSShift);
+ else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (ip->protocol == IPPROTO_TCP)
+ ctrl |= IPCS | TCPCS;
+ else if (ip->protocol == IPPROTO_UDP)
+ ctrl |= IPCS | UDPCS;
+ else
+ BUG();
+ }
+
+ if (frag == skb_shinfo(skb)->nr_frags - 1)
+ ctrl |= LastFrag;
+
+ txd = &cp->tx_ring[entry];
+ txd->opts2 = opts2;
+ txd->addr = cpu_to_le64(mapping);
+ wmb();
+
+ txd->opts1 = cpu_to_le32(ctrl);
+ wmb();
+
+ cp->tx_skb[entry] = skb;
+ entry = NEXT_TX(entry);
+ }
+
+ txd = &cp->tx_ring[first_entry];
+ txd->opts2 = opts2;
+ txd->addr = cpu_to_le64(first_mapping);
+ wmb();
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (ip->protocol == IPPROTO_TCP)
+ txd->opts1 = cpu_to_le32(first_eor | first_len |
+ FirstFrag | DescOwn |
+ IPCS | TCPCS);
+ else if (ip->protocol == IPPROTO_UDP)
+ txd->opts1 = cpu_to_le32(first_eor | first_len |
+ FirstFrag | DescOwn |
+ IPCS | UDPCS);
+ else
+ BUG();
+ } else
+ txd->opts1 = cpu_to_le32(first_eor | first_len |
+ FirstFrag | DescOwn);
+ wmb();
+ }
+ cp->tx_head = entry;
+ netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
+ entry, skb->len);
+ if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
+ netif_stop_queue(dev);
+
+ spin_unlock_irqrestore(&cp->lock, intr_flags);
+
+ cpw8(TxPoll, NormalTxPoll);
+
+ return NETDEV_TX_OK;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ This routine is not state sensitive and need not be SMP locked. */
+
+static void __cp_set_rx_mode (struct net_device *dev)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ u32 mc_filter[2]; /* Multicast hash filter */
+ int rx_mode;
+ u32 tmp;
+
+ /* Note: do not reorder, GCC is clever about common statements. */
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ rx_mode =
+ AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
+ AcceptAllPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else {
+ struct netdev_hw_addr *ha;
+ rx_mode = AcceptBroadcast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ rx_mode |= AcceptMulticast;
+ }
+ }
+
+ /* We can safely update without stopping the chip. */
+ tmp = cp_rx_config | rx_mode;
+ if (cp->rx_config != tmp) {
+ cpw32_f (RxConfig, tmp);
+ cp->rx_config = tmp;
+ }
+ cpw32_f (MAR0 + 0, mc_filter[0]);
+ cpw32_f (MAR0 + 4, mc_filter[1]);
+}
+
+static void cp_set_rx_mode (struct net_device *dev)
+{
+ unsigned long flags;
+ struct cp_private *cp = netdev_priv(dev);
+
+ spin_lock_irqsave (&cp->lock, flags);
+ __cp_set_rx_mode(dev);
+ spin_unlock_irqrestore (&cp->lock, flags);
+}
+
+static void __cp_get_stats(struct cp_private *cp)
+{
+ /* only lower 24 bits valid; write any value to clear */
+ cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
+ cpw32 (RxMissed, 0);
+}
+
+static struct net_device_stats *cp_get_stats(struct net_device *dev)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned long flags;
+
+ /* The chip only need report frame silently dropped. */
+ spin_lock_irqsave(&cp->lock, flags);
+ if (netif_running(dev) && netif_device_present(dev))
+ __cp_get_stats(cp);
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ return &dev->stats;
+}
+
+static void cp_stop_hw (struct cp_private *cp)
+{
+ cpw16(IntrStatus, ~(cpr16(IntrStatus)));
+ cpw16_f(IntrMask, 0);
+ cpw8(Cmd, 0);
+ cpw16_f(CpCmd, 0);
+ cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
+
+ cp->rx_tail = 0;
+ cp->tx_head = cp->tx_tail = 0;
+}
+
+static void cp_reset_hw (struct cp_private *cp)
+{
+ unsigned work = 1000;
+
+ cpw8(Cmd, CmdReset);
+
+ while (work--) {
+ if (!(cpr8(Cmd) & CmdReset))
+ return;
+
+ schedule_timeout_uninterruptible(10);
+ }
+
+ netdev_err(cp->dev, "hardware reset timeout\n");
+}
+
+static inline void cp_start_hw (struct cp_private *cp)
+{
+ cpw16(CpCmd, cp->cpcmd);
+ cpw8(Cmd, RxOn | TxOn);
+}
+
+static void cp_init_hw (struct cp_private *cp)
+{
+ struct net_device *dev = cp->dev;
+ dma_addr_t ring_dma;
+
+ cp_reset_hw(cp);
+
+ cpw8_f (Cfg9346, Cfg9346_Unlock);
+
+ /* Restore our idea of the MAC address. */
+ cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
+ cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
+
+ cp_start_hw(cp);
+ cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
+
+ __cp_set_rx_mode(dev);
+ cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
+
+ cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
+ /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
+ cpw8(Config3, PARMEnable);
+ cp->wol_enabled = 0;
+
+ cpw8(Config5, cpr8(Config5) & PMEStatus);
+
+ cpw32_f(HiTxRingAddr, 0);
+ cpw32_f(HiTxRingAddr + 4, 0);
+
+ ring_dma = cp->ring_dma;
+ cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
+ cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+ ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
+ cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
+ cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+ cpw16(MultiIntr, 0);
+
+ cpw16_f(IntrMask, cp_intr_mask);
+
+ cpw8_f(Cfg9346, Cfg9346_Lock);
+}
+
+static int cp_refill_rx(struct cp_private *cp)
+{
+ struct net_device *dev = cp->dev;
+ unsigned i;
+
+ for (i = 0; i < CP_RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+
+ skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
+ if (!skb)
+ goto err_out;
+
+ mapping = dma_map_single(&cp->pdev->dev, skb->data,
+ cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ cp->rx_skb[i] = skb;
+
+ cp->rx_ring[i].opts2 = 0;
+ cp->rx_ring[i].addr = cpu_to_le64(mapping);
+ if (i == (CP_RX_RING_SIZE - 1))
+ cp->rx_ring[i].opts1 =
+ cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
+ else
+ cp->rx_ring[i].opts1 =
+ cpu_to_le32(DescOwn | cp->rx_buf_sz);
+ }
+
+ return 0;
+
+err_out:
+ cp_clean_rings(cp);
+ return -ENOMEM;
+}
+
+static void cp_init_rings_index (struct cp_private *cp)
+{
+ cp->rx_tail = 0;
+ cp->tx_head = cp->tx_tail = 0;
+}
+
+static int cp_init_rings (struct cp_private *cp)
+{
+ memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
+ cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
+
+ cp_init_rings_index(cp);
+
+ return cp_refill_rx (cp);
+}
+
+static int cp_alloc_rings (struct cp_private *cp)
+{
+ void *mem;
+
+ mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES,
+ &cp->ring_dma, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ cp->rx_ring = mem;
+ cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
+
+ return cp_init_rings(cp);
+}
+
+static void cp_clean_rings (struct cp_private *cp)
+{
+ struct cp_desc *desc;
+ unsigned i;
+
+ for (i = 0; i < CP_RX_RING_SIZE; i++) {
+ if (cp->rx_skb[i]) {
+ desc = cp->rx_ring + i;
+ dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
+ cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(cp->rx_skb[i]);
+ }
+ }
+
+ for (i = 0; i < CP_TX_RING_SIZE; i++) {
+ if (cp->tx_skb[i]) {
+ struct sk_buff *skb = cp->tx_skb[i];
+
+ desc = cp->tx_ring + i;
+ dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
+ le32_to_cpu(desc->opts1) & 0xffff,
+ PCI_DMA_TODEVICE);
+ if (le32_to_cpu(desc->opts1) & LastFrag)
+ dev_kfree_skb(skb);
+ cp->dev->stats.tx_dropped++;
+ }
+ }
+
+ memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
+ memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
+
+ memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
+ memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
+}
+
+static void cp_free_rings (struct cp_private *cp)
+{
+ cp_clean_rings(cp);
+ dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
+ cp->ring_dma);
+ cp->rx_ring = NULL;
+ cp->tx_ring = NULL;
+}
+
+static int cp_open (struct net_device *dev)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ int rc;
+
+ netif_dbg(cp, ifup, dev, "enabling interface\n");
+
+ rc = cp_alloc_rings(cp);
+ if (rc)
+ return rc;
+
+ napi_enable(&cp->napi);
+
+ cp_init_hw(cp);
+
+ rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
+ if (rc)
+ goto err_out_hw;
+
+ netif_carrier_off(dev);
+ mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
+ netif_start_queue(dev);
+
+ return 0;
+
+err_out_hw:
+ napi_disable(&cp->napi);
+ cp_stop_hw(cp);
+ cp_free_rings(cp);
+ return rc;
+}
+
+static int cp_close (struct net_device *dev)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned long flags;
+
+ napi_disable(&cp->napi);
+
+ netif_dbg(cp, ifdown, dev, "disabling interface\n");
+
+ spin_lock_irqsave(&cp->lock, flags);
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ cp_stop_hw(cp);
+
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ free_irq(dev->irq, dev);
+
+ cp_free_rings(cp);
+ return 0;
+}
+
+static void cp_tx_timeout(struct net_device *dev)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned long flags;
+ int rc;
+
+ netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
+ cpr8(Cmd), cpr16(CpCmd),
+ cpr16(IntrStatus), cpr16(IntrMask));
+
+ spin_lock_irqsave(&cp->lock, flags);
+
+ cp_stop_hw(cp);
+ cp_clean_rings(cp);
+ rc = cp_init_rings(cp);
+ cp_start_hw(cp);
+
+ netif_wake_queue(dev);
+
+ spin_unlock_irqrestore(&cp->lock, flags);
+}
+
+#ifdef BROKEN
+static int cp_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ int rc;
+ unsigned long flags;
+
+ /* check for invalid MTU, according to hardware limits */
+ if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
+ return -EINVAL;
+
+ /* if network interface not up, no need for complexity */
+ if (!netif_running(dev)) {
+ dev->mtu = new_mtu;
+ cp_set_rxbufsize(cp); /* set new rx buf size */
+ return 0;
+ }
+
+ spin_lock_irqsave(&cp->lock, flags);
+
+ cp_stop_hw(cp); /* stop h/w and free rings */
+ cp_clean_rings(cp);
+
+ dev->mtu = new_mtu;
+ cp_set_rxbufsize(cp); /* set new rx buf size */
+
+ rc = cp_init_rings(cp); /* realloc and restart h/w */
+ cp_start_hw(cp);
+
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ return rc;
+}
+#endif /* BROKEN */
+
+static const char mii_2_8139_map[8] = {
+ BasicModeCtrl,
+ BasicModeStatus,
+ 0,
+ 0,
+ NWayAdvert,
+ NWayLPAR,
+ NWayExpansion,
+ 0
+};
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct cp_private *cp = netdev_priv(dev);
+
+ return location < 8 && mii_2_8139_map[location] ?
+ readw(cp->regs + mii_2_8139_map[location]) : 0;
+}
+
+
+static void mdio_write(struct net_device *dev, int phy_id, int location,
+ int value)
+{
+ struct cp_private *cp = netdev_priv(dev);
+
+ if (location == 0) {
+ cpw8(Cfg9346, Cfg9346_Unlock);
+ cpw16(BasicModeCtrl, value);
+ cpw8(Cfg9346, Cfg9346_Lock);
+ } else if (location < 8 && mii_2_8139_map[location])
+ cpw16(mii_2_8139_map[location], value);
+}
+
+/* Set the ethtool Wake-on-LAN settings */
+static int netdev_set_wol (struct cp_private *cp,
+ const struct ethtool_wolinfo *wol)
+{
+ u8 options;
+
+ options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
+ /* If WOL is being disabled, no need for complexity */
+ if (wol->wolopts) {
+ if (wol->wolopts & WAKE_PHY) options |= LinkUp;
+ if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket;
+ }
+
+ cpw8 (Cfg9346, Cfg9346_Unlock);
+ cpw8 (Config3, options);
+ cpw8 (Cfg9346, Cfg9346_Lock);
+
+ options = 0; /* Paranoia setting */
+ options = cpr8 (Config5) & ~(UWF | MWF | BWF);
+ /* If WOL is being disabled, no need for complexity */
+ if (wol->wolopts) {
+ if (wol->wolopts & WAKE_UCAST) options |= UWF;
+ if (wol->wolopts & WAKE_BCAST) options |= BWF;
+ if (wol->wolopts & WAKE_MCAST) options |= MWF;
+ }
+
+ cpw8 (Config5, options);
+
+ cp->wol_enabled = (wol->wolopts) ? 1 : 0;
+
+ return 0;
+}
+
+/* Get the ethtool Wake-on-LAN settings */
+static void netdev_get_wol (struct cp_private *cp,
+ struct ethtool_wolinfo *wol)
+{
+ u8 options;
+
+ wol->wolopts = 0; /* Start from scratch */
+ wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC |
+ WAKE_MCAST | WAKE_UCAST;
+ /* We don't need to go on if WOL is disabled */
+ if (!cp->wol_enabled) return;
+
+ options = cpr8 (Config3);
+ if (options & LinkUp) wol->wolopts |= WAKE_PHY;
+ if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
+
+ options = 0; /* Paranoia setting */
+ options = cpr8 (Config5);
+ if (options & UWF) wol->wolopts |= WAKE_UCAST;
+ if (options & BWF) wol->wolopts |= WAKE_BCAST;
+ if (options & MWF) wol->wolopts |= WAKE_MCAST;
+}
+
+static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct cp_private *cp = netdev_priv(dev);
+
+ strcpy (info->driver, DRV_NAME);
+ strcpy (info->version, DRV_VERSION);
+ strcpy (info->bus_info, pci_name(cp->pdev));
+}
+
+static int cp_get_regs_len(struct net_device *dev)
+{
+ return CP_REGS_SIZE;
+}
+
+static int cp_get_sset_count (struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return CP_NUM_STATS;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ int rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ rc = mii_ethtool_gset(&cp->mii_if, cmd);
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ return rc;
+}
+
+static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ int rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ rc = mii_ethtool_sset(&cp->mii_if, cmd);
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ return rc;
+}
+
+static int cp_nway_reset(struct net_device *dev)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ return mii_nway_restart(&cp->mii_if);
+}
+
+static u32 cp_get_msglevel(struct net_device *dev)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ return cp->msg_enable;
+}
+
+static void cp_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ cp->msg_enable = value;
+}
+
+static int cp_set_features(struct net_device *dev, u32 features)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned long flags;
+
+ if (!((dev->features ^ features) & NETIF_F_RXCSUM))
+ return 0;
+
+ spin_lock_irqsave(&cp->lock, flags);
+
+ if (features & NETIF_F_RXCSUM)
+ cp->cpcmd |= RxChkSum;
+ else
+ cp->cpcmd &= ~RxChkSum;
+
+ if (features & NETIF_F_HW_VLAN_RX)
+ cp->cpcmd |= RxVlanOn;
+ else
+ cp->cpcmd &= ~RxVlanOn;
+
+ cpw16_f(CpCmd, cp->cpcmd);
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ return 0;
+}
+
+static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned long flags;
+
+ if (regs->len < CP_REGS_SIZE)
+ return /* -EINVAL */;
+
+ regs->version = CP_REGS_VER;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
+ spin_unlock_irqrestore(&cp->lock, flags);
+}
+
+static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave (&cp->lock, flags);
+ netdev_get_wol (cp, wol);
+ spin_unlock_irqrestore (&cp->lock, flags);
+}
+
+static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave (&cp->lock, flags);
+ rc = netdev_set_wol (cp, wol);
+ spin_unlock_irqrestore (&cp->lock, flags);
+
+ return rc;
+}
+
+static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
+ break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+static void cp_get_ethtool_stats (struct net_device *dev,
+ struct ethtool_stats *estats, u64 *tmp_stats)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ struct cp_dma_stats *nic_stats;
+ dma_addr_t dma;
+ int i;
+
+ nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
+ &dma, GFP_KERNEL);
+ if (!nic_stats)
+ return;
+
+ /* begin NIC statistics dump */
+ cpw32(StatsAddr + 4, (u64)dma >> 32);
+ cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
+ cpr32(StatsAddr);
+
+ for (i = 0; i < 1000; i++) {
+ if ((cpr32(StatsAddr) & DumpStats) == 0)
+ break;
+ udelay(10);
+ }
+ cpw32(StatsAddr, 0);
+ cpw32(StatsAddr + 4, 0);
+ cpr32(StatsAddr);
+
+ i = 0;
+ tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
+ tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
+ tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
+ tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
+ tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
+ tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
+ tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
+ tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
+ tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
+ tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
+ tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
+ tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
+ tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
+ tmp_stats[i++] = cp->cp_stats.rx_frags;
+ BUG_ON(i != CP_NUM_STATS);
+
+ dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
+}
+
+static const struct ethtool_ops cp_ethtool_ops = {
+ .get_drvinfo = cp_get_drvinfo,
+ .get_regs_len = cp_get_regs_len,
+ .get_sset_count = cp_get_sset_count,
+ .get_settings = cp_get_settings,
+ .set_settings = cp_set_settings,
+ .nway_reset = cp_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = cp_get_msglevel,
+ .set_msglevel = cp_set_msglevel,
+ .get_regs = cp_get_regs,
+ .get_wol = cp_get_wol,
+ .set_wol = cp_set_wol,
+ .get_strings = cp_get_strings,
+ .get_ethtool_stats = cp_get_ethtool_stats,
+ .get_eeprom_len = cp_get_eeprom_len,
+ .get_eeprom = cp_get_eeprom,
+ .set_eeprom = cp_set_eeprom,
+};
+
+static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ int rc;
+ unsigned long flags;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return rc;
+}
+
+static int cp_set_mac_address(struct net_device *dev, void *p)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ spin_lock_irq(&cp->lock);
+
+ cpw8_f(Cfg9346, Cfg9346_Unlock);
+ cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
+ cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
+ cpw8_f(Cfg9346, Cfg9346_Lock);
+
+ spin_unlock_irq(&cp->lock);
+
+ return 0;
+}
+
+/* Serial EEPROM section. */
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
+#define EE_CS 0x08 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
+#define EE_WRITE_0 0x00
+#define EE_WRITE_1 0x02
+#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
+#define EE_ENB (0x80 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+ No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
+ */
+
+#define eeprom_delay() readl(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_EXTEND_CMD (4)
+#define EE_WRITE_CMD (5)
+#define EE_READ_CMD (6)
+#define EE_ERASE_CMD (7)
+
+#define EE_EWDS_ADDR (0)
+#define EE_WRAL_ADDR (1)
+#define EE_ERAL_ADDR (2)
+#define EE_EWEN_ADDR (3)
+
+#define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
+
+static void eeprom_cmd_start(void __iomem *ee_addr)
+{
+ writeb (EE_ENB & ~EE_CS, ee_addr);
+ writeb (EE_ENB, ee_addr);
+ eeprom_delay ();
+}
+
+static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
+{
+ int i;
+
+ /* Shift the command bits out. */
+ for (i = cmd_len - 1; i >= 0; i--) {
+ int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ writeb (EE_ENB | dataval, ee_addr);
+ eeprom_delay ();
+ writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay ();
+ }
+ writeb (EE_ENB, ee_addr);
+ eeprom_delay ();
+}
+
+static void eeprom_cmd_end(void __iomem *ee_addr)
+{
+ writeb (~EE_CS, ee_addr);
+ eeprom_delay ();
+}
+
+static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
+ int addr_len)
+{
+ int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
+
+ eeprom_cmd_start(ee_addr);
+ eeprom_cmd(ee_addr, cmd, 3 + addr_len);
+ eeprom_cmd_end(ee_addr);
+}
+
+static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
+{
+ int i;
+ u16 retval = 0;
+ void __iomem *ee_addr = ioaddr + Cfg9346;
+ int read_cmd = location | (EE_READ_CMD << addr_len);
+
+ eeprom_cmd_start(ee_addr);
+ eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
+
+ for (i = 16; i > 0; i--) {
+ writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay ();
+ retval =
+ (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
+ 0);
+ writeb (EE_ENB, ee_addr);
+ eeprom_delay ();
+ }
+
+ eeprom_cmd_end(ee_addr);
+
+ return retval;
+}
+
+static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
+ int addr_len)
+{
+ int i;
+ void __iomem *ee_addr = ioaddr + Cfg9346;
+ int write_cmd = location | (EE_WRITE_CMD << addr_len);
+
+ eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
+
+ eeprom_cmd_start(ee_addr);
+ eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
+ eeprom_cmd(ee_addr, val, 16);
+ eeprom_cmd_end(ee_addr);
+
+ eeprom_cmd_start(ee_addr);
+ for (i = 0; i < 20000; i++)
+ if (readb(ee_addr) & EE_DATA_READ)
+ break;
+ eeprom_cmd_end(ee_addr);
+
+ eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
+}
+
+static int cp_get_eeprom_len(struct net_device *dev)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ int size;
+
+ spin_lock_irq(&cp->lock);
+ size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
+ spin_unlock_irq(&cp->lock);
+
+ return size;
+}
+
+static int cp_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned int addr_len;
+ u16 val;
+ u32 offset = eeprom->offset >> 1;
+ u32 len = eeprom->len;
+ u32 i = 0;
+
+ eeprom->magic = CP_EEPROM_MAGIC;
+
+ spin_lock_irq(&cp->lock);
+
+ addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
+
+ if (eeprom->offset & 1) {
+ val = read_eeprom(cp->regs, offset, addr_len);
+ data[i++] = (u8)(val >> 8);
+ offset++;
+ }
+
+ while (i < len - 1) {
+ val = read_eeprom(cp->regs, offset, addr_len);
+ data[i++] = (u8)val;
+ data[i++] = (u8)(val >> 8);
+ offset++;
+ }
+
+ if (i < len) {
+ val = read_eeprom(cp->regs, offset, addr_len);
+ data[i] = (u8)val;
+ }
+
+ spin_unlock_irq(&cp->lock);
+ return 0;
+}
+
+static int cp_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned int addr_len;
+ u16 val;
+ u32 offset = eeprom->offset >> 1;
+ u32 len = eeprom->len;
+ u32 i = 0;
+
+ if (eeprom->magic != CP_EEPROM_MAGIC)
+ return -EINVAL;
+
+ spin_lock_irq(&cp->lock);
+
+ addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
+
+ if (eeprom->offset & 1) {
+ val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
+ val |= (u16)data[i++] << 8;
+ write_eeprom(cp->regs, offset, val, addr_len);
+ offset++;
+ }
+
+ while (i < len - 1) {
+ val = (u16)data[i++];
+ val |= (u16)data[i++] << 8;
+ write_eeprom(cp->regs, offset, val, addr_len);
+ offset++;
+ }
+
+ if (i < len) {
+ val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
+ val |= (u16)data[i];
+ write_eeprom(cp->regs, offset, val, addr_len);
+ }
+
+ spin_unlock_irq(&cp->lock);
+ return 0;
+}
+
+/* Put the board into D3cold state and wait for WakeUp signal */
+static void cp_set_d3_state (struct cp_private *cp)
+{
+ pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
+ pci_set_power_state (cp->pdev, PCI_D3hot);
+}
+
+static const struct net_device_ops cp_netdev_ops = {
+ .ndo_open = cp_open,
+ .ndo_stop = cp_close,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = cp_set_mac_address,
+ .ndo_set_rx_mode = cp_set_rx_mode,
+ .ndo_get_stats = cp_get_stats,
+ .ndo_do_ioctl = cp_ioctl,
+ .ndo_start_xmit = cp_start_xmit,
+ .ndo_tx_timeout = cp_tx_timeout,
+ .ndo_set_features = cp_set_features,
+#ifdef BROKEN
+ .ndo_change_mtu = cp_change_mtu,
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cp_poll_controller,
+#endif
+};
+
+static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct cp_private *cp;
+ int rc;
+ void __iomem *regs;
+ resource_size_t pciaddr;
+ unsigned int addr_len, i, pci_using_dac;
+
+#ifndef MODULE
+ static int version_printed;
+ if (version_printed++ == 0)
+ pr_info("%s", version);
+#endif
+
+ if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
+ pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
+ dev_info(&pdev->dev,
+ "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
+ pdev->vendor, pdev->device, pdev->revision);
+ return -ENODEV;
+ }
+
+ dev = alloc_etherdev(sizeof(struct cp_private));
+ if (!dev)
+ return -ENOMEM;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ cp = netdev_priv(dev);
+ cp->pdev = pdev;
+ cp->dev = dev;
+ cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
+ spin_lock_init (&cp->lock);
+ cp->mii_if.dev = dev;
+ cp->mii_if.mdio_read = mdio_read;
+ cp->mii_if.mdio_write = mdio_write;
+ cp->mii_if.phy_id = CP_INTERNAL_PHY;
+ cp->mii_if.phy_id_mask = 0x1f;
+ cp->mii_if.reg_num_mask = 0x1f;
+ cp_set_rxbufsize(cp);
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ goto err_out_free;
+
+ rc = pci_set_mwi(pdev);
+ if (rc)
+ goto err_out_disable;
+
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out_mwi;
+
+ pciaddr = pci_resource_start(pdev, 1);
+ if (!pciaddr) {
+ rc = -EIO;
+ dev_err(&pdev->dev, "no MMIO resource\n");
+ goto err_out_res;
+ }
+ if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
+ rc = -EIO;
+ dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
+ (unsigned long long)pci_resource_len(pdev, 1));
+ goto err_out_res;
+ }
+
+ /* Configure DMA attributes. */
+ if ((sizeof(dma_addr_t) > 4) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ pci_using_dac = 1;
+ } else {
+ pci_using_dac = 0;
+
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_out_res;
+ }
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "No usable consistent DMA configuration, aborting\n");
+ goto err_out_res;
+ }
+ }
+
+ cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
+ PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
+
+ dev->features |= NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_RXCSUM;
+
+ regs = ioremap(pciaddr, CP_REGS_SIZE);
+ if (!regs) {
+ rc = -EIO;
+ dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
+ (unsigned long long)pci_resource_len(pdev, 1),
+ (unsigned long long)pciaddr);
+ goto err_out_res;
+ }
+ dev->base_addr = (unsigned long) regs;
+ cp->regs = regs;
+
+ cp_stop_hw(cp);
+
+ /* read MAC address from EEPROM */
+ addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
+ for (i = 0; i < 3; i++)
+ ((__le16 *) (dev->dev_addr))[i] =
+ cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ dev->netdev_ops = &cp_netdev_ops;
+ netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
+ dev->ethtool_ops = &cp_ethtool_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+
+ if (pci_using_dac)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ /* disabled by default until verified */
+ dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_HIGHDMA;
+
+ dev->irq = pdev->irq;
+
+ rc = register_netdev(dev);
+ if (rc)
+ goto err_out_iomap;
+
+ netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n",
+ dev->base_addr, dev->dev_addr, dev->irq);
+
+ pci_set_drvdata(pdev, dev);
+
+ /* enable busmastering and memory-write-invalidate */
+ pci_set_master(pdev);
+
+ if (cp->wol_enabled)
+ cp_set_d3_state (cp);
+
+ return 0;
+
+err_out_iomap:
+ iounmap(regs);
+err_out_res:
+ pci_release_regions(pdev);
+err_out_mwi:
+ pci_clear_mwi(pdev);
+err_out_disable:
+ pci_disable_device(pdev);
+err_out_free:
+ free_netdev(dev);
+ return rc;
+}
+
+static void cp_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct cp_private *cp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ iounmap(cp->regs);
+ if (cp->wol_enabled)
+ pci_set_power_state (pdev, PCI_D0);
+ pci_release_regions(pdev);
+ pci_clear_mwi(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(dev);
+}
+
+#ifdef CONFIG_PM
+static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned long flags;
+
+ if (!netif_running(dev))
+ return 0;
+
+ netif_device_detach (dev);
+ netif_stop_queue (dev);
+
+ spin_lock_irqsave (&cp->lock, flags);
+
+ /* Disable Rx and Tx */
+ cpw16 (IntrMask, 0);
+ cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
+
+ spin_unlock_irqrestore (&cp->lock, flags);
+
+ pci_save_state(pdev);
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int cp_resume (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned long flags;
+
+ if (!netif_running(dev))
+ return 0;
+
+ netif_device_attach (dev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_enable_wake(pdev, PCI_D0, 0);
+
+ /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
+ cp_init_rings_index (cp);
+ cp_init_hw (cp);
+ netif_start_queue (dev);
+
+ spin_lock_irqsave (&cp->lock, flags);
+
+ mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
+
+ spin_unlock_irqrestore (&cp->lock, flags);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct pci_driver cp_driver = {
+ .name = DRV_NAME,
+ .id_table = cp_pci_tbl,
+ .probe = cp_init_one,
+ .remove = cp_remove_one,
+#ifdef CONFIG_PM
+ .resume = cp_resume,
+ .suspend = cp_suspend,
+#endif
+};
+
+static int __init cp_init (void)
+{
+#ifdef MODULE
+ pr_info("%s", version);
+#endif
+ return pci_register_driver(&cp_driver);
+}
+
+static void __exit cp_exit (void)
+{
+ pci_unregister_driver (&cp_driver);
+}
+
+module_init(cp_init);
+module_exit(cp_exit);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
new file mode 100644
index 000000000000..4d6b254fc6c1
--- /dev/null
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -0,0 +1,2622 @@
+/*
+
+ 8139too.c: A RealTek RTL-8139 Fast Ethernet driver for Linux.
+
+ Maintained by Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000-2002 Jeff Garzik
+
+ Much code comes from Donald Becker's rtl8139.c driver,
+ versions 1.13 and older. This driver was originally based
+ on rtl8139.c version 1.07. Header of rtl8139.c version 1.13:
+
+ -----<snip>-----
+
+ Written 1997-2001 by Donald Becker.
+ This software may be used and distributed according to the
+ terms of the GNU General Public License (GPL), incorporated
+ herein by reference. Drivers based on or derived from this
+ code fall under the GPL and must retain the authorship,
+ copyright and license notice. This file is not a complete
+ program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is for boards based on the RTL8129 and RTL8139
+ PCI ethernet chips.
+
+ The author may be reached as becker@scyld.com, or C/O Scyld
+ Computing Corporation 410 Severn Ave., Suite 210 Annapolis
+ MD 21403
+
+ Support and updates available at
+ http://www.scyld.com/network/rtl8139.html
+
+ Twister-tuning table provided by Kinston
+ <shangh@realtek.com.tw>.
+
+ -----<snip>-----
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ Contributors:
+
+ Donald Becker - he wrote the original driver, kudos to him!
+ (but please don't e-mail him for support, this isn't his driver)
+
+ Tigran Aivazian - bug fixes, skbuff free cleanup
+
+ Martin Mares - suggestions for PCI cleanup
+
+ David S. Miller - PCI DMA and softnet updates
+
+ Ernst Gill - fixes ported from BSD driver
+
+ Daniel Kobras - identified specific locations of
+ posted MMIO write bugginess
+
+ Gerard Sharp - bug fix, testing and feedback
+
+ David Ford - Rx ring wrap fix
+
+ Dan DeMaggio - swapped RTL8139 cards with me, and allowed me
+ to find and fix a crucial bug on older chipsets.
+
+ Donald Becker/Chris Butterworth/Marcus Westergren -
+ Noticed various Rx packet size-related buglets.
+
+ Santiago Garcia Mantinan - testing and feedback
+
+ Jens David - 2.2.x kernel backports
+
+ Martin Dennett - incredibly helpful insight on undocumented
+ features of the 8139 chips
+
+ Jean-Jacques Michel - bug fix
+
+ Tobias Ringström - Rx interrupt status checking suggestion
+
+ Andrew Morton - Clear blocked signals, avoid
+ buffer overrun setting current->comm.
+
+ Kalle Olavi Niemitalo - Wake-on-LAN ioctls
+
+ Robert Kuebel - Save kernel thread from dying on any signal.
+
+ Submitting bug reports:
+
+ "rtl8139-diag -mmmaaavvveefN" output
+ enable RTL8139_DEBUG below, and look at 'dmesg' or kernel log
+
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DRV_NAME "8139too"
+#define DRV_VERSION "0.9.28"
+
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/completion.h>
+#include <linux/crc32.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/gfp.h>
+#include <asm/irq.h>
+
+#define RTL8139_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION
+
+/* Default Message level */
+#define RTL8139_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK)
+
+
+/* define to 1, 2 or 3 to enable copious debugging info */
+#define RTL8139_DEBUG 0
+
+/* define to 1 to disable lightweight runtime debugging checks */
+#undef RTL8139_NDEBUG
+
+
+#ifdef RTL8139_NDEBUG
+# define assert(expr) do {} while (0)
+#else
+# define assert(expr) \
+ if (unlikely(!(expr))) { \
+ pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
+ #expr, __FILE__, __func__, __LINE__); \
+ }
+#endif
+
+
+/* A few user-configurable values. */
+/* media options */
+#define MAX_UNITS 8
+static int media[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Whether to use MMIO or PIO. Default to MMIO. */
+#ifdef CONFIG_8139TOO_PIO
+static int use_io = 1;
+#else
+static int use_io = 0;
+#endif
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ The RTL chips use a 64 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 32;
+
+/* bitmapped message enable number */
+static int debug = -1;
+
+/*
+ * Receive ring size
+ * Warning: 64K ring has hardware issues and may lock up.
+ */
+#if defined(CONFIG_SH_DREAMCAST)
+#define RX_BUF_IDX 0 /* 8K ring */
+#else
+#define RX_BUF_IDX 2 /* 32K ring */
+#endif
+#define RX_BUF_LEN (8192 << RX_BUF_IDX)
+#define RX_BUF_PAD 16
+#define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */
+
+#if RX_BUF_LEN == 65536
+#define RX_BUF_TOT_LEN RX_BUF_LEN
+#else
+#define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD)
+#endif
+
+/* Number of Tx descriptor registers. */
+#define NUM_TX_DESC 4
+
+/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/
+#define MAX_ETH_FRAME_SIZE 1536
+
+/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
+#define TX_BUF_SIZE MAX_ETH_FRAME_SIZE
+#define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC)
+
+/* PCI Tuning Parameters
+ Threshold is bytes transferred to chip before transmission starts. */
+#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */
+
+/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
+#define RX_FIFO_THRESH 7 /* Rx buffer level before first PCI xfer. */
+#define RX_DMA_BURST 7 /* Maximum PCI burst, '6' is 1024 */
+#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
+#define TX_RETRY 8 /* 0-15. retries = 16 + (TX_RETRY * 16) */
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+
+enum {
+ HAS_MII_XCVR = 0x010000,
+ HAS_CHIP_XCVR = 0x020000,
+ HAS_LNK_CHNG = 0x040000,
+};
+
+#define RTL_NUM_STATS 4 /* number of ETHTOOL_GSTATS u64's */
+#define RTL_REGS_VER 1 /* version of reg. data in ETHTOOL_GREGS */
+#define RTL_MIN_IO_SIZE 0x80
+#define RTL8139B_IO_SIZE 256
+
+#define RTL8129_CAPS HAS_MII_XCVR
+#define RTL8139_CAPS (HAS_CHIP_XCVR|HAS_LNK_CHNG)
+
+typedef enum {
+ RTL8139 = 0,
+ RTL8129,
+} board_t;
+
+
+/* indexed by board_t, above */
+static const struct {
+ const char *name;
+ u32 hw_flags;
+} board_info[] __devinitdata = {
+ { "RealTek RTL8139", RTL8139_CAPS },
+ { "RealTek RTL8129", RTL8129_CAPS },
+};
+
+
+static DEFINE_PCI_DEVICE_TABLE(rtl8139_pci_tbl) = {
+ {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x1500, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x4033, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x1186, 0x1300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x1186, 0x1340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x13d1, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x1259, 0xa117, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x1259, 0xa11e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x14ea, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x14ea, 0xab07, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x11db, 0x1234, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x1432, 0x9130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x02ac, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x018a, 0x0106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x126c, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x1743, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+
+#ifdef CONFIG_SH_SECUREEDGE5410
+ /* Bogus 8139 silicon reports 8129 without external PROM :-( */
+ {0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+#endif
+#ifdef CONFIG_8139TOO_8129
+ {0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8129 },
+#endif
+
+ /* some crazy cards report invalid vendor ids like
+ * 0x0001 here. The other ids are valid and constant,
+ * so we simply don't match on the main vendor id.
+ */
+ {PCI_ANY_ID, 0x8139, 0x10ec, 0x8139, 0, 0, RTL8139 },
+ {PCI_ANY_ID, 0x8139, 0x1186, 0x1300, 0, 0, RTL8139 },
+ {PCI_ANY_ID, 0x8139, 0x13d1, 0xab06, 0, 0, RTL8139 },
+
+ {0,}
+};
+MODULE_DEVICE_TABLE (pci, rtl8139_pci_tbl);
+
+static struct {
+ const char str[ETH_GSTRING_LEN];
+} ethtool_stats_keys[] = {
+ { "early_rx" },
+ { "tx_buf_mapped" },
+ { "tx_timeouts" },
+ { "rx_lost_in_ring" },
+};
+
+/* The rest of these values should never change. */
+
+/* Symbolic offsets to registers. */
+enum RTL8139_registers {
+ MAC0 = 0, /* Ethernet hardware address. */
+ MAR0 = 8, /* Multicast filter. */
+ TxStatus0 = 0x10, /* Transmit status (Four 32bit registers). */
+ TxAddr0 = 0x20, /* Tx descriptors (also four 32bit). */
+ RxBuf = 0x30,
+ ChipCmd = 0x37,
+ RxBufPtr = 0x38,
+ RxBufAddr = 0x3A,
+ IntrMask = 0x3C,
+ IntrStatus = 0x3E,
+ TxConfig = 0x40,
+ RxConfig = 0x44,
+ Timer = 0x48, /* A general-purpose counter. */
+ RxMissed = 0x4C, /* 24 bits valid, write clears. */
+ Cfg9346 = 0x50,
+ Config0 = 0x51,
+ Config1 = 0x52,
+ TimerInt = 0x54,
+ MediaStatus = 0x58,
+ Config3 = 0x59,
+ Config4 = 0x5A, /* absent on RTL-8139A */
+ HltClk = 0x5B,
+ MultiIntr = 0x5C,
+ TxSummary = 0x60,
+ BasicModeCtrl = 0x62,
+ BasicModeStatus = 0x64,
+ NWayAdvert = 0x66,
+ NWayLPAR = 0x68,
+ NWayExpansion = 0x6A,
+ /* Undocumented registers, but required for proper operation. */
+ FIFOTMS = 0x70, /* FIFO Control and test. */
+ CSCR = 0x74, /* Chip Status and Configuration Register. */
+ PARA78 = 0x78,
+ FlashReg = 0xD4, /* Communication with Flash ROM, four bytes. */
+ PARA7c = 0x7c, /* Magic transceiver parameter register. */
+ Config5 = 0xD8, /* absent on RTL-8139A */
+};
+
+enum ClearBitMasks {
+ MultiIntrClear = 0xF000,
+ ChipCmdClear = 0xE2,
+ Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1),
+};
+
+enum ChipCmdBits {
+ CmdReset = 0x10,
+ CmdRxEnb = 0x08,
+ CmdTxEnb = 0x04,
+ RxBufEmpty = 0x01,
+};
+
+/* Interrupt register bits, using my own meaningful names. */
+enum IntrStatusBits {
+ PCIErr = 0x8000,
+ PCSTimeout = 0x4000,
+ RxFIFOOver = 0x40,
+ RxUnderrun = 0x20,
+ RxOverflow = 0x10,
+ TxErr = 0x08,
+ TxOK = 0x04,
+ RxErr = 0x02,
+ RxOK = 0x01,
+
+ RxAckBits = RxFIFOOver | RxOverflow | RxOK,
+};
+
+enum TxStatusBits {
+ TxHostOwns = 0x2000,
+ TxUnderrun = 0x4000,
+ TxStatOK = 0x8000,
+ TxOutOfWindow = 0x20000000,
+ TxAborted = 0x40000000,
+ TxCarrierLost = 0x80000000,
+};
+enum RxStatusBits {
+ RxMulticast = 0x8000,
+ RxPhysical = 0x4000,
+ RxBroadcast = 0x2000,
+ RxBadSymbol = 0x0020,
+ RxRunt = 0x0010,
+ RxTooLong = 0x0008,
+ RxCRCErr = 0x0004,
+ RxBadAlign = 0x0002,
+ RxStatusOK = 0x0001,
+};
+
+/* Bits in RxConfig. */
+enum rx_mode_bits {
+ AcceptErr = 0x20,
+ AcceptRunt = 0x10,
+ AcceptBroadcast = 0x08,
+ AcceptMulticast = 0x04,
+ AcceptMyPhys = 0x02,
+ AcceptAllPhys = 0x01,
+};
+
+/* Bits in TxConfig. */
+enum tx_config_bits {
+ /* Interframe Gap Time. Only TxIFG96 doesn't violate IEEE 802.3 */
+ TxIFGShift = 24,
+ TxIFG84 = (0 << TxIFGShift), /* 8.4us / 840ns (10 / 100Mbps) */
+ TxIFG88 = (1 << TxIFGShift), /* 8.8us / 880ns (10 / 100Mbps) */
+ TxIFG92 = (2 << TxIFGShift), /* 9.2us / 920ns (10 / 100Mbps) */
+ TxIFG96 = (3 << TxIFGShift), /* 9.6us / 960ns (10 / 100Mbps) */
+
+ TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */
+ TxCRC = (1 << 16), /* DISABLE Tx pkt CRC append */
+ TxClearAbt = (1 << 0), /* Clear abort (WO) */
+ TxDMAShift = 8, /* DMA burst value (0-7) is shifted X many bits */
+ TxRetryShift = 4, /* TXRR value (0-15) is shifted X many bits */
+
+ TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */
+};
+
+/* Bits in Config1 */
+enum Config1Bits {
+ Cfg1_PM_Enable = 0x01,
+ Cfg1_VPD_Enable = 0x02,
+ Cfg1_PIO = 0x04,
+ Cfg1_MMIO = 0x08,
+ LWAKE = 0x10, /* not on 8139, 8139A */
+ Cfg1_Driver_Load = 0x20,
+ Cfg1_LED0 = 0x40,
+ Cfg1_LED1 = 0x80,
+ SLEEP = (1 << 1), /* only on 8139, 8139A */
+ PWRDN = (1 << 0), /* only on 8139, 8139A */
+};
+
+/* Bits in Config3 */
+enum Config3Bits {
+ Cfg3_FBtBEn = (1 << 0), /* 1 = Fast Back to Back */
+ Cfg3_FuncRegEn = (1 << 1), /* 1 = enable CardBus Function registers */
+ Cfg3_CLKRUN_En = (1 << 2), /* 1 = enable CLKRUN */
+ Cfg3_CardB_En = (1 << 3), /* 1 = enable CardBus registers */
+ Cfg3_LinkUp = (1 << 4), /* 1 = wake up on link up */
+ Cfg3_Magic = (1 << 5), /* 1 = wake up on Magic Packet (tm) */
+ Cfg3_PARM_En = (1 << 6), /* 0 = software can set twister parameters */
+ Cfg3_GNTSel = (1 << 7), /* 1 = delay 1 clock from PCI GNT signal */
+};
+
+/* Bits in Config4 */
+enum Config4Bits {
+ LWPTN = (1 << 2), /* not on 8139, 8139A */
+};
+
+/* Bits in Config5 */
+enum Config5Bits {
+ Cfg5_PME_STS = (1 << 0), /* 1 = PCI reset resets PME_Status */
+ Cfg5_LANWake = (1 << 1), /* 1 = enable LANWake signal */
+ Cfg5_LDPS = (1 << 2), /* 0 = save power when link is down */
+ Cfg5_FIFOAddrPtr= (1 << 3), /* Realtek internal SRAM testing */
+ Cfg5_UWF = (1 << 4), /* 1 = accept unicast wakeup frame */
+ Cfg5_MWF = (1 << 5), /* 1 = accept multicast wakeup frame */
+ Cfg5_BWF = (1 << 6), /* 1 = accept broadcast wakeup frame */
+};
+
+enum RxConfigBits {
+ /* rx fifo threshold */
+ RxCfgFIFOShift = 13,
+ RxCfgFIFONone = (7 << RxCfgFIFOShift),
+
+ /* Max DMA burst */
+ RxCfgDMAShift = 8,
+ RxCfgDMAUnlimited = (7 << RxCfgDMAShift),
+
+ /* rx ring buffer length */
+ RxCfgRcv8K = 0,
+ RxCfgRcv16K = (1 << 11),
+ RxCfgRcv32K = (1 << 12),
+ RxCfgRcv64K = (1 << 11) | (1 << 12),
+
+ /* Disable packet wrap at end of Rx buffer. (not possible with 64k) */
+ RxNoWrap = (1 << 7),
+};
+
+/* Twister tuning parameters from RealTek.
+ Completely undocumented, but required to tune bad links on some boards. */
+enum CSCRBits {
+ CSCR_LinkOKBit = 0x0400,
+ CSCR_LinkChangeBit = 0x0800,
+ CSCR_LinkStatusBits = 0x0f000,
+ CSCR_LinkDownOffCmd = 0x003c0,
+ CSCR_LinkDownCmd = 0x0f3c0,
+};
+
+enum Cfg9346Bits {
+ Cfg9346_Lock = 0x00,
+ Cfg9346_Unlock = 0xC0,
+};
+
+typedef enum {
+ CH_8139 = 0,
+ CH_8139_K,
+ CH_8139A,
+ CH_8139A_G,
+ CH_8139B,
+ CH_8130,
+ CH_8139C,
+ CH_8100,
+ CH_8100B_8139D,
+ CH_8101,
+} chip_t;
+
+enum chip_flags {
+ HasHltClk = (1 << 0),
+ HasLWake = (1 << 1),
+};
+
+#define HW_REVID(b30, b29, b28, b27, b26, b23, b22) \
+ (b30<<30 | b29<<29 | b28<<28 | b27<<27 | b26<<26 | b23<<23 | b22<<22)
+#define HW_REVID_MASK HW_REVID(1, 1, 1, 1, 1, 1, 1)
+
+/* directly indexed by chip_t, above */
+static const struct {
+ const char *name;
+ u32 version; /* from RTL8139C/RTL8139D docs */
+ u32 flags;
+} rtl_chip_info[] = {
+ { "RTL-8139",
+ HW_REVID(1, 0, 0, 0, 0, 0, 0),
+ HasHltClk,
+ },
+
+ { "RTL-8139 rev K",
+ HW_REVID(1, 1, 0, 0, 0, 0, 0),
+ HasHltClk,
+ },
+
+ { "RTL-8139A",
+ HW_REVID(1, 1, 1, 0, 0, 0, 0),
+ HasHltClk, /* XXX undocumented? */
+ },
+
+ { "RTL-8139A rev G",
+ HW_REVID(1, 1, 1, 0, 0, 1, 0),
+ HasHltClk, /* XXX undocumented? */
+ },
+
+ { "RTL-8139B",
+ HW_REVID(1, 1, 1, 1, 0, 0, 0),
+ HasLWake,
+ },
+
+ { "RTL-8130",
+ HW_REVID(1, 1, 1, 1, 1, 0, 0),
+ HasLWake,
+ },
+
+ { "RTL-8139C",
+ HW_REVID(1, 1, 1, 0, 1, 0, 0),
+ HasLWake,
+ },
+
+ { "RTL-8100",
+ HW_REVID(1, 1, 1, 1, 0, 1, 0),
+ HasLWake,
+ },
+
+ { "RTL-8100B/8139D",
+ HW_REVID(1, 1, 1, 0, 1, 0, 1),
+ HasHltClk /* XXX undocumented? */
+ | HasLWake,
+ },
+
+ { "RTL-8101",
+ HW_REVID(1, 1, 1, 0, 1, 1, 1),
+ HasLWake,
+ },
+};
+
+struct rtl_extra_stats {
+ unsigned long early_rx;
+ unsigned long tx_buf_mapped;
+ unsigned long tx_timeouts;
+ unsigned long rx_lost_in_ring;
+};
+
+struct rtl8139_private {
+ void __iomem *mmio_addr;
+ int drv_flags;
+ struct pci_dev *pci_dev;
+ u32 msg_enable;
+ struct napi_struct napi;
+ struct net_device *dev;
+
+ unsigned char *rx_ring;
+ unsigned int cur_rx; /* RX buf index of next pkt */
+ dma_addr_t rx_ring_dma;
+
+ unsigned int tx_flag;
+ unsigned long cur_tx;
+ unsigned long dirty_tx;
+ unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */
+ unsigned char *tx_bufs; /* Tx bounce buffer region. */
+ dma_addr_t tx_bufs_dma;
+
+ signed char phys[4]; /* MII device addresses. */
+
+ /* Twister tune state. */
+ char twistie, twist_row, twist_col;
+
+ unsigned int watchdog_fired : 1;
+ unsigned int default_port : 4; /* Last dev->if_port value. */
+ unsigned int have_thread : 1;
+
+ spinlock_t lock;
+ spinlock_t rx_lock;
+
+ chip_t chipset;
+ u32 rx_config;
+ struct rtl_extra_stats xstats;
+
+ struct delayed_work thread;
+
+ struct mii_if_info mii;
+ unsigned int regs_len;
+ unsigned long fifo_copy_timeout;
+};
+
+MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>");
+MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_param(use_io, int, 0);
+MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO");
+module_param(multicast_filter_limit, int, 0);
+module_param_array(media, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+module_param(debug, int, 0);
+MODULE_PARM_DESC (debug, "8139too bitmapped message enable number");
+MODULE_PARM_DESC (multicast_filter_limit, "8139too maximum number of filtered multicast addresses");
+MODULE_PARM_DESC (media, "8139too: Bits 4+9: force full duplex, bit 5: 100Mbps");
+MODULE_PARM_DESC (full_duplex, "8139too: Force full duplex for board(s) (1)");
+
+static int read_eeprom (void __iomem *ioaddr, int location, int addr_len);
+static int rtl8139_open (struct net_device *dev);
+static int mdio_read (struct net_device *dev, int phy_id, int location);
+static void mdio_write (struct net_device *dev, int phy_id, int location,
+ int val);
+static void rtl8139_start_thread(struct rtl8139_private *tp);
+static void rtl8139_tx_timeout (struct net_device *dev);
+static void rtl8139_init_ring (struct net_device *dev);
+static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
+ struct net_device *dev);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void rtl8139_poll_controller(struct net_device *dev);
+#endif
+static int rtl8139_set_mac_address(struct net_device *dev, void *p);
+static int rtl8139_poll(struct napi_struct *napi, int budget);
+static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance);
+static int rtl8139_close (struct net_device *dev);
+static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
+static struct net_device_stats *rtl8139_get_stats (struct net_device *dev);
+static void rtl8139_set_rx_mode (struct net_device *dev);
+static void __set_rx_mode (struct net_device *dev);
+static void rtl8139_hw_start (struct net_device *dev);
+static void rtl8139_thread (struct work_struct *work);
+static void rtl8139_tx_timeout_task(struct work_struct *work);
+static const struct ethtool_ops rtl8139_ethtool_ops;
+
+/* write MMIO register, with flush */
+/* Flush avoids rtl8139 bug w/ posted MMIO writes */
+#define RTL_W8_F(reg, val8) do { iowrite8 ((val8), ioaddr + (reg)); ioread8 (ioaddr + (reg)); } while (0)
+#define RTL_W16_F(reg, val16) do { iowrite16 ((val16), ioaddr + (reg)); ioread16 (ioaddr + (reg)); } while (0)
+#define RTL_W32_F(reg, val32) do { iowrite32 ((val32), ioaddr + (reg)); ioread32 (ioaddr + (reg)); } while (0)
+
+/* write MMIO register */
+#define RTL_W8(reg, val8) iowrite8 ((val8), ioaddr + (reg))
+#define RTL_W16(reg, val16) iowrite16 ((val16), ioaddr + (reg))
+#define RTL_W32(reg, val32) iowrite32 ((val32), ioaddr + (reg))
+
+/* read MMIO register */
+#define RTL_R8(reg) ioread8 (ioaddr + (reg))
+#define RTL_R16(reg) ioread16 (ioaddr + (reg))
+#define RTL_R32(reg) ioread32 (ioaddr + (reg))
+
+
+static const u16 rtl8139_intr_mask =
+ PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver |
+ TxErr | TxOK | RxErr | RxOK;
+
+static const u16 rtl8139_norx_intr_mask =
+ PCIErr | PCSTimeout | RxUnderrun |
+ TxErr | TxOK | RxErr ;
+
+#if RX_BUF_IDX == 0
+static const unsigned int rtl8139_rx_config =
+ RxCfgRcv8K | RxNoWrap |
+ (RX_FIFO_THRESH << RxCfgFIFOShift) |
+ (RX_DMA_BURST << RxCfgDMAShift);
+#elif RX_BUF_IDX == 1
+static const unsigned int rtl8139_rx_config =
+ RxCfgRcv16K | RxNoWrap |
+ (RX_FIFO_THRESH << RxCfgFIFOShift) |
+ (RX_DMA_BURST << RxCfgDMAShift);
+#elif RX_BUF_IDX == 2
+static const unsigned int rtl8139_rx_config =
+ RxCfgRcv32K | RxNoWrap |
+ (RX_FIFO_THRESH << RxCfgFIFOShift) |
+ (RX_DMA_BURST << RxCfgDMAShift);
+#elif RX_BUF_IDX == 3
+static const unsigned int rtl8139_rx_config =
+ RxCfgRcv64K |
+ (RX_FIFO_THRESH << RxCfgFIFOShift) |
+ (RX_DMA_BURST << RxCfgDMAShift);
+#else
+#error "Invalid configuration for 8139_RXBUF_IDX"
+#endif
+
+static const unsigned int rtl8139_tx_config =
+ TxIFG96 | (TX_DMA_BURST << TxDMAShift) | (TX_RETRY << TxRetryShift);
+
+static void __rtl8139_cleanup_dev (struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ struct pci_dev *pdev;
+
+ assert (dev != NULL);
+ assert (tp->pci_dev != NULL);
+ pdev = tp->pci_dev;
+
+ if (tp->mmio_addr)
+ pci_iounmap (pdev, tp->mmio_addr);
+
+ /* it's ok to call this even if we have no regions to free */
+ pci_release_regions (pdev);
+
+ free_netdev(dev);
+ pci_set_drvdata (pdev, NULL);
+}
+
+
+static void rtl8139_chip_reset (void __iomem *ioaddr)
+{
+ int i;
+
+ /* Soft reset the chip. */
+ RTL_W8 (ChipCmd, CmdReset);
+
+ /* Check that the chip has finished the reset. */
+ for (i = 1000; i > 0; i--) {
+ barrier();
+ if ((RTL_R8 (ChipCmd) & CmdReset) == 0)
+ break;
+ udelay (10);
+ }
+}
+
+
+static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev)
+{
+ void __iomem *ioaddr;
+ struct net_device *dev;
+ struct rtl8139_private *tp;
+ u8 tmp8;
+ int rc, disable_dev_on_err = 0;
+ unsigned int i;
+ unsigned long pio_start, pio_end, pio_flags, pio_len;
+ unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
+ u32 version;
+
+ assert (pdev != NULL);
+
+ /* dev and priv zeroed in alloc_etherdev */
+ dev = alloc_etherdev (sizeof (*tp));
+ if (dev == NULL) {
+ dev_err(&pdev->dev, "Unable to alloc new net device\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ tp = netdev_priv(dev);
+ tp->pci_dev = pdev;
+
+ /* enable device (incl. PCI PM wakeup and hotplug setup) */
+ rc = pci_enable_device (pdev);
+ if (rc)
+ goto err_out;
+
+ pio_start = pci_resource_start (pdev, 0);
+ pio_end = pci_resource_end (pdev, 0);
+ pio_flags = pci_resource_flags (pdev, 0);
+ pio_len = pci_resource_len (pdev, 0);
+
+ mmio_start = pci_resource_start (pdev, 1);
+ mmio_end = pci_resource_end (pdev, 1);
+ mmio_flags = pci_resource_flags (pdev, 1);
+ mmio_len = pci_resource_len (pdev, 1);
+
+ /* set this immediately, we need to know before
+ * we talk to the chip directly */
+ pr_debug("PIO region size == 0x%02lX\n", pio_len);
+ pr_debug("MMIO region size == 0x%02lX\n", mmio_len);
+
+retry:
+ if (use_io) {
+ /* make sure PCI base addr 0 is PIO */
+ if (!(pio_flags & IORESOURCE_IO)) {
+ dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n");
+ rc = -ENODEV;
+ goto err_out;
+ }
+ /* check for weird/broken PCI region reporting */
+ if (pio_len < RTL_MIN_IO_SIZE) {
+ dev_err(&pdev->dev, "Invalid PCI I/O region size(s), aborting\n");
+ rc = -ENODEV;
+ goto err_out;
+ }
+ } else {
+ /* make sure PCI base addr 1 is MMIO */
+ if (!(mmio_flags & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
+ rc = -ENODEV;
+ goto err_out;
+ }
+ if (mmio_len < RTL_MIN_IO_SIZE) {
+ dev_err(&pdev->dev, "Invalid PCI mem region size(s), aborting\n");
+ rc = -ENODEV;
+ goto err_out;
+ }
+ }
+
+ rc = pci_request_regions (pdev, DRV_NAME);
+ if (rc)
+ goto err_out;
+ disable_dev_on_err = 1;
+
+ /* enable PCI bus-mastering */
+ pci_set_master (pdev);
+
+ if (use_io) {
+ ioaddr = pci_iomap(pdev, 0, 0);
+ if (!ioaddr) {
+ dev_err(&pdev->dev, "cannot map PIO, aborting\n");
+ rc = -EIO;
+ goto err_out;
+ }
+ dev->base_addr = pio_start;
+ tp->regs_len = pio_len;
+ } else {
+ /* ioremap MMIO region */
+ ioaddr = pci_iomap(pdev, 1, 0);
+ if (ioaddr == NULL) {
+ dev_err(&pdev->dev, "cannot remap MMIO, trying PIO\n");
+ pci_release_regions(pdev);
+ use_io = 1;
+ goto retry;
+ }
+ dev->base_addr = (long) ioaddr;
+ tp->regs_len = mmio_len;
+ }
+ tp->mmio_addr = ioaddr;
+
+ /* Bring old chips out of low-power mode. */
+ RTL_W8 (HltClk, 'R');
+
+ /* check for missing/broken hardware */
+ if (RTL_R32 (TxConfig) == 0xFFFFFFFF) {
+ dev_err(&pdev->dev, "Chip not responding, ignoring board\n");
+ rc = -EIO;
+ goto err_out;
+ }
+
+ /* identify chip attached to board */
+ version = RTL_R32 (TxConfig) & HW_REVID_MASK;
+ for (i = 0; i < ARRAY_SIZE (rtl_chip_info); i++)
+ if (version == rtl_chip_info[i].version) {
+ tp->chipset = i;
+ goto match;
+ }
+
+ /* if unknown chip, assume array element #0, original RTL-8139 in this case */
+ i = 0;
+ dev_dbg(&pdev->dev, "unknown chip version, assuming RTL-8139\n");
+ dev_dbg(&pdev->dev, "TxConfig = 0x%x\n", RTL_R32 (TxConfig));
+ tp->chipset = 0;
+
+match:
+ pr_debug("chipset id (%d) == index %d, '%s'\n",
+ version, i, rtl_chip_info[i].name);
+
+ if (tp->chipset >= CH_8139B) {
+ u8 new_tmp8 = tmp8 = RTL_R8 (Config1);
+ pr_debug("PCI PM wakeup\n");
+ if ((rtl_chip_info[tp->chipset].flags & HasLWake) &&
+ (tmp8 & LWAKE))
+ new_tmp8 &= ~LWAKE;
+ new_tmp8 |= Cfg1_PM_Enable;
+ if (new_tmp8 != tmp8) {
+ RTL_W8 (Cfg9346, Cfg9346_Unlock);
+ RTL_W8 (Config1, tmp8);
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+ }
+ if (rtl_chip_info[tp->chipset].flags & HasLWake) {
+ tmp8 = RTL_R8 (Config4);
+ if (tmp8 & LWPTN) {
+ RTL_W8 (Cfg9346, Cfg9346_Unlock);
+ RTL_W8 (Config4, tmp8 & ~LWPTN);
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+ }
+ }
+ } else {
+ pr_debug("Old chip wakeup\n");
+ tmp8 = RTL_R8 (Config1);
+ tmp8 &= ~(SLEEP | PWRDN);
+ RTL_W8 (Config1, tmp8);
+ }
+
+ rtl8139_chip_reset (ioaddr);
+
+ return dev;
+
+err_out:
+ __rtl8139_cleanup_dev (dev);
+ if (disable_dev_on_err)
+ pci_disable_device (pdev);
+ return ERR_PTR(rc);
+}
+
+static const struct net_device_ops rtl8139_netdev_ops = {
+ .ndo_open = rtl8139_open,
+ .ndo_stop = rtl8139_close,
+ .ndo_get_stats = rtl8139_get_stats,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = rtl8139_set_mac_address,
+ .ndo_start_xmit = rtl8139_start_xmit,
+ .ndo_set_rx_mode = rtl8139_set_rx_mode,
+ .ndo_do_ioctl = netdev_ioctl,
+ .ndo_tx_timeout = rtl8139_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = rtl8139_poll_controller,
+#endif
+};
+
+static int __devinit rtl8139_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev = NULL;
+ struct rtl8139_private *tp;
+ int i, addr_len, option;
+ void __iomem *ioaddr;
+ static int board_idx = -1;
+
+ assert (pdev != NULL);
+ assert (ent != NULL);
+
+ board_idx++;
+
+ /* when we're built into the kernel, the driver version message
+ * is only printed if at least one 8139 board has been found
+ */
+#ifndef MODULE
+ {
+ static int printed_version;
+ if (!printed_version++)
+ pr_info(RTL8139_DRIVER_NAME "\n");
+ }
+#endif
+
+ if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
+ pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision >= 0x20) {
+ dev_info(&pdev->dev,
+ "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip, use 8139cp\n",
+ pdev->vendor, pdev->device, pdev->revision);
+ return -ENODEV;
+ }
+
+ if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
+ pdev->device == PCI_DEVICE_ID_REALTEK_8139 &&
+ pdev->subsystem_vendor == PCI_VENDOR_ID_ATHEROS &&
+ pdev->subsystem_device == PCI_DEVICE_ID_REALTEK_8139) {
+ pr_info("OQO Model 2 detected. Forcing PIO\n");
+ use_io = 1;
+ }
+
+ dev = rtl8139_init_board (pdev);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ assert (dev != NULL);
+ tp = netdev_priv(dev);
+ tp->dev = dev;
+
+ ioaddr = tp->mmio_addr;
+ assert (ioaddr != NULL);
+
+ addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6;
+ for (i = 0; i < 3; i++)
+ ((__le16 *) (dev->dev_addr))[i] =
+ cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len));
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ /* The Rtl8139-specific entries in the device structure. */
+ dev->netdev_ops = &rtl8139_netdev_ops;
+ dev->ethtool_ops = &rtl8139_ethtool_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ netif_napi_add(dev, &tp->napi, rtl8139_poll, 64);
+
+ /* note: the hardware is not capable of sg/csum/highdma, however
+ * through the use of skb_copy_and_csum_dev we enable these
+ * features
+ */
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
+ dev->vlan_features = dev->features;
+
+ dev->irq = pdev->irq;
+
+ /* tp zeroed and aligned in alloc_etherdev */
+ tp = netdev_priv(dev);
+
+ /* note: tp->chipset set in rtl8139_init_board */
+ tp->drv_flags = board_info[ent->driver_data].hw_flags;
+ tp->mmio_addr = ioaddr;
+ tp->msg_enable =
+ (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1));
+ spin_lock_init (&tp->lock);
+ spin_lock_init (&tp->rx_lock);
+ INIT_DELAYED_WORK(&tp->thread, rtl8139_thread);
+ tp->mii.dev = dev;
+ tp->mii.mdio_read = mdio_read;
+ tp->mii.mdio_write = mdio_write;
+ tp->mii.phy_id_mask = 0x3f;
+ tp->mii.reg_num_mask = 0x1f;
+
+ /* dev is fully set up and ready to use now */
+ pr_debug("about to register device named %s (%p)...\n",
+ dev->name, dev);
+ i = register_netdev (dev);
+ if (i) goto err_out;
+
+ pci_set_drvdata (pdev, dev);
+
+ netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n",
+ board_info[ent->driver_data].name,
+ dev->base_addr, dev->dev_addr, dev->irq);
+
+ netdev_dbg(dev, "Identified 8139 chip type '%s'\n",
+ rtl_chip_info[tp->chipset].name);
+
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs later, but
+ takes too much time. */
+#ifdef CONFIG_8139TOO_8129
+ if (tp->drv_flags & HAS_MII_XCVR) {
+ int phy, phy_idx = 0;
+ for (phy = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ u16 advertising = mdio_read(dev, phy, 4);
+ tp->phys[phy_idx++] = phy;
+ netdev_info(dev, "MII transceiver %d status 0x%04x advertising %04x\n",
+ phy, mii_status, advertising);
+ }
+ }
+ if (phy_idx == 0) {
+ netdev_info(dev, "No MII transceivers found! Assuming SYM transceiver\n");
+ tp->phys[0] = 32;
+ }
+ } else
+#endif
+ tp->phys[0] = 32;
+ tp->mii.phy_id = tp->phys[0];
+
+ /* The lower four bits are the media type. */
+ option = (board_idx >= MAX_UNITS) ? 0 : media[board_idx];
+ if (option > 0) {
+ tp->mii.full_duplex = (option & 0x210) ? 1 : 0;
+ tp->default_port = option & 0xFF;
+ if (tp->default_port)
+ tp->mii.force_media = 1;
+ }
+ if (board_idx < MAX_UNITS && full_duplex[board_idx] > 0)
+ tp->mii.full_duplex = full_duplex[board_idx];
+ if (tp->mii.full_duplex) {
+ netdev_info(dev, "Media type forced to Full Duplex\n");
+ /* Changing the MII-advertised media because might prevent
+ re-connection. */
+ tp->mii.force_media = 1;
+ }
+ if (tp->default_port) {
+ netdev_info(dev, " Forcing %dMbps %s-duplex operation\n",
+ (option & 0x20 ? 100 : 10),
+ (option & 0x10 ? "full" : "half"));
+ mdio_write(dev, tp->phys[0], 0,
+ ((option & 0x20) ? 0x2000 : 0) | /* 100Mbps? */
+ ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
+ }
+
+ /* Put the chip into low-power mode. */
+ if (rtl_chip_info[tp->chipset].flags & HasHltClk)
+ RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */
+
+ return 0;
+
+err_out:
+ __rtl8139_cleanup_dev (dev);
+ pci_disable_device (pdev);
+ return i;
+}
+
+
+static void __devexit rtl8139_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct rtl8139_private *tp = netdev_priv(dev);
+
+ assert (dev != NULL);
+
+ cancel_delayed_work_sync(&tp->thread);
+
+ unregister_netdev (dev);
+
+ __rtl8139_cleanup_dev (dev);
+ pci_disable_device (pdev);
+}
+
+
+/* Serial EEPROM section. */
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
+#define EE_CS 0x08 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
+#define EE_WRITE_0 0x00
+#define EE_WRITE_1 0x02
+#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
+#define EE_ENB (0x80 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+ No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
+ */
+
+#define eeprom_delay() (void)RTL_R32(Cfg9346)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD (5)
+#define EE_READ_CMD (6)
+#define EE_ERASE_CMD (7)
+
+static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_len)
+{
+ int i;
+ unsigned retval = 0;
+ int read_cmd = location | (EE_READ_CMD << addr_len);
+
+ RTL_W8 (Cfg9346, EE_ENB & ~EE_CS);
+ RTL_W8 (Cfg9346, EE_ENB);
+ eeprom_delay ();
+
+ /* Shift the read command bits out. */
+ for (i = 4 + addr_len; i >= 0; i--) {
+ int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ RTL_W8 (Cfg9346, EE_ENB | dataval);
+ eeprom_delay ();
+ RTL_W8 (Cfg9346, EE_ENB | dataval | EE_SHIFT_CLK);
+ eeprom_delay ();
+ }
+ RTL_W8 (Cfg9346, EE_ENB);
+ eeprom_delay ();
+
+ for (i = 16; i > 0; i--) {
+ RTL_W8 (Cfg9346, EE_ENB | EE_SHIFT_CLK);
+ eeprom_delay ();
+ retval =
+ (retval << 1) | ((RTL_R8 (Cfg9346) & EE_DATA_READ) ? 1 :
+ 0);
+ RTL_W8 (Cfg9346, EE_ENB);
+ eeprom_delay ();
+ }
+
+ /* Terminate the EEPROM access. */
+ RTL_W8 (Cfg9346, ~EE_CS);
+ eeprom_delay ();
+
+ return retval;
+}
+
+/* MII serial management: mostly bogus for now. */
+/* Read and write the MII management registers using software-generated
+ serial MDIO protocol.
+ The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+ "overclocking" issues. */
+#define MDIO_DIR 0x80
+#define MDIO_DATA_OUT 0x04
+#define MDIO_DATA_IN 0x02
+#define MDIO_CLK 0x01
+#define MDIO_WRITE0 (MDIO_DIR)
+#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
+
+#define mdio_delay() RTL_R8(Config4)
+
+
+static const char mii_2_8139_map[8] = {
+ BasicModeCtrl,
+ BasicModeStatus,
+ 0,
+ 0,
+ NWayAdvert,
+ NWayLPAR,
+ NWayExpansion,
+ 0
+};
+
+
+#ifdef CONFIG_8139TOO_8129
+/* Syncronize the MII management interface by shifting 32 one bits out. */
+static void mdio_sync (void __iomem *ioaddr)
+{
+ int i;
+
+ for (i = 32; i >= 0; i--) {
+ RTL_W8 (Config4, MDIO_WRITE1);
+ mdio_delay ();
+ RTL_W8 (Config4, MDIO_WRITE1 | MDIO_CLK);
+ mdio_delay ();
+ }
+}
+#endif
+
+static int mdio_read (struct net_device *dev, int phy_id, int location)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ int retval = 0;
+#ifdef CONFIG_8139TOO_8129
+ void __iomem *ioaddr = tp->mmio_addr;
+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int i;
+#endif
+
+ if (phy_id > 31) { /* Really a 8139. Use internal registers. */
+ void __iomem *ioaddr = tp->mmio_addr;
+ return location < 8 && mii_2_8139_map[location] ?
+ RTL_R16 (mii_2_8139_map[location]) : 0;
+ }
+
+#ifdef CONFIG_8139TOO_8129
+ mdio_sync (ioaddr);
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
+
+ RTL_W8 (Config4, MDIO_DIR | dataval);
+ mdio_delay ();
+ RTL_W8 (Config4, MDIO_DIR | dataval | MDIO_CLK);
+ mdio_delay ();
+ }
+
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ RTL_W8 (Config4, 0);
+ mdio_delay ();
+ retval = (retval << 1) | ((RTL_R8 (Config4) & MDIO_DATA_IN) ? 1 : 0);
+ RTL_W8 (Config4, MDIO_CLK);
+ mdio_delay ();
+ }
+#endif
+
+ return (retval >> 1) & 0xffff;
+}
+
+
+static void mdio_write (struct net_device *dev, int phy_id, int location,
+ int value)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+#ifdef CONFIG_8139TOO_8129
+ void __iomem *ioaddr = tp->mmio_addr;
+ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
+ int i;
+#endif
+
+ if (phy_id > 31) { /* Really a 8139. Use internal registers. */
+ void __iomem *ioaddr = tp->mmio_addr;
+ if (location == 0) {
+ RTL_W8 (Cfg9346, Cfg9346_Unlock);
+ RTL_W16 (BasicModeCtrl, value);
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+ } else if (location < 8 && mii_2_8139_map[location])
+ RTL_W16 (mii_2_8139_map[location], value);
+ return;
+ }
+
+#ifdef CONFIG_8139TOO_8129
+ mdio_sync (ioaddr);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval =
+ (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+ RTL_W8 (Config4, dataval);
+ mdio_delay ();
+ RTL_W8 (Config4, dataval | MDIO_CLK);
+ mdio_delay ();
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ RTL_W8 (Config4, 0);
+ mdio_delay ();
+ RTL_W8 (Config4, MDIO_CLK);
+ mdio_delay ();
+ }
+#endif
+}
+
+
+static int rtl8139_open (struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ int retval;
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ retval = request_irq (dev->irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev);
+ if (retval)
+ return retval;
+
+ tp->tx_bufs = dma_alloc_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN,
+ &tp->tx_bufs_dma, GFP_KERNEL);
+ tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN,
+ &tp->rx_ring_dma, GFP_KERNEL);
+ if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
+ free_irq(dev->irq, dev);
+
+ if (tp->tx_bufs)
+ dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN,
+ tp->tx_bufs, tp->tx_bufs_dma);
+ if (tp->rx_ring)
+ dma_free_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN,
+ tp->rx_ring, tp->rx_ring_dma);
+
+ return -ENOMEM;
+
+ }
+
+ napi_enable(&tp->napi);
+
+ tp->mii.full_duplex = tp->mii.force_media;
+ tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000;
+
+ rtl8139_init_ring (dev);
+ rtl8139_hw_start (dev);
+ netif_start_queue (dev);
+
+ netif_dbg(tp, ifup, dev,
+ "%s() ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n",
+ __func__,
+ (unsigned long long)pci_resource_start (tp->pci_dev, 1),
+ dev->irq, RTL_R8 (MediaStatus),
+ tp->mii.full_duplex ? "full" : "half");
+
+ rtl8139_start_thread(tp);
+
+ return 0;
+}
+
+
+static void rtl_check_media (struct net_device *dev, unsigned int init_media)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+
+ if (tp->phys[0] >= 0) {
+ mii_check_media(&tp->mii, netif_msg_link(tp), init_media);
+ }
+}
+
+/* Start the hardware at open or resume. */
+static void rtl8139_hw_start (struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ u32 i;
+ u8 tmp;
+
+ /* Bring old chips out of low-power mode. */
+ if (rtl_chip_info[tp->chipset].flags & HasHltClk)
+ RTL_W8 (HltClk, 'R');
+
+ rtl8139_chip_reset (ioaddr);
+
+ /* unlock Config[01234] and BMCR register writes */
+ RTL_W8_F (Cfg9346, Cfg9346_Unlock);
+ /* Restore our idea of the MAC address. */
+ RTL_W32_F (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
+ RTL_W32_F (MAC0 + 4, le16_to_cpu (*(__le16 *) (dev->dev_addr + 4)));
+
+ tp->cur_rx = 0;
+
+ /* init Rx ring buffer DMA address */
+ RTL_W32_F (RxBuf, tp->rx_ring_dma);
+
+ /* Must enable Tx/Rx before setting transfer thresholds! */
+ RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
+
+ tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys;
+ RTL_W32 (RxConfig, tp->rx_config);
+ RTL_W32 (TxConfig, rtl8139_tx_config);
+
+ rtl_check_media (dev, 1);
+
+ if (tp->chipset >= CH_8139B) {
+ /* Disable magic packet scanning, which is enabled
+ * when PM is enabled in Config1. It can be reenabled
+ * via ETHTOOL_SWOL if desired. */
+ RTL_W8 (Config3, RTL_R8 (Config3) & ~Cfg3_Magic);
+ }
+
+ netdev_dbg(dev, "init buffer addresses\n");
+
+ /* Lock Config[01234] and BMCR register writes */
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+
+ /* init Tx buffer DMA addresses */
+ for (i = 0; i < NUM_TX_DESC; i++)
+ RTL_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));
+
+ RTL_W32 (RxMissed, 0);
+
+ rtl8139_set_rx_mode (dev);
+
+ /* no early-rx interrupts */
+ RTL_W16 (MultiIntr, RTL_R16 (MultiIntr) & MultiIntrClear);
+
+ /* make sure RxTx has started */
+ tmp = RTL_R8 (ChipCmd);
+ if ((!(tmp & CmdRxEnb)) || (!(tmp & CmdTxEnb)))
+ RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+ RTL_W16 (IntrMask, rtl8139_intr_mask);
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void rtl8139_init_ring (struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ int i;
+
+ tp->cur_rx = 0;
+ tp->cur_tx = 0;
+ tp->dirty_tx = 0;
+
+ for (i = 0; i < NUM_TX_DESC; i++)
+ tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE];
+}
+
+
+/* This must be global for CONFIG_8139TOO_TUNE_TWISTER case */
+static int next_tick = 3 * HZ;
+
+#ifndef CONFIG_8139TOO_TUNE_TWISTER
+static inline void rtl8139_tune_twister (struct net_device *dev,
+ struct rtl8139_private *tp) {}
+#else
+enum TwisterParamVals {
+ PARA78_default = 0x78fa8388,
+ PARA7c_default = 0xcb38de43, /* param[0][3] */
+ PARA7c_xxx = 0xcb38de43,
+};
+
+static const unsigned long param[4][4] = {
+ {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
+ {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
+ {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
+ {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
+};
+
+static void rtl8139_tune_twister (struct net_device *dev,
+ struct rtl8139_private *tp)
+{
+ int linkcase;
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ /* This is a complicated state machine to configure the "twister" for
+ impedance/echos based on the cable length.
+ All of this is magic and undocumented.
+ */
+ switch (tp->twistie) {
+ case 1:
+ if (RTL_R16 (CSCR) & CSCR_LinkOKBit) {
+ /* We have link beat, let us tune the twister. */
+ RTL_W16 (CSCR, CSCR_LinkDownOffCmd);
+ tp->twistie = 2; /* Change to state 2. */
+ next_tick = HZ / 10;
+ } else {
+ /* Just put in some reasonable defaults for when beat returns. */
+ RTL_W16 (CSCR, CSCR_LinkDownCmd);
+ RTL_W32 (FIFOTMS, 0x20); /* Turn on cable test mode. */
+ RTL_W32 (PARA78, PARA78_default);
+ RTL_W32 (PARA7c, PARA7c_default);
+ tp->twistie = 0; /* Bail from future actions. */
+ }
+ break;
+ case 2:
+ /* Read how long it took to hear the echo. */
+ linkcase = RTL_R16 (CSCR) & CSCR_LinkStatusBits;
+ if (linkcase == 0x7000)
+ tp->twist_row = 3;
+ else if (linkcase == 0x3000)
+ tp->twist_row = 2;
+ else if (linkcase == 0x1000)
+ tp->twist_row = 1;
+ else
+ tp->twist_row = 0;
+ tp->twist_col = 0;
+ tp->twistie = 3; /* Change to state 2. */
+ next_tick = HZ / 10;
+ break;
+ case 3:
+ /* Put out four tuning parameters, one per 100msec. */
+ if (tp->twist_col == 0)
+ RTL_W16 (FIFOTMS, 0);
+ RTL_W32 (PARA7c, param[(int) tp->twist_row]
+ [(int) tp->twist_col]);
+ next_tick = HZ / 10;
+ if (++tp->twist_col >= 4) {
+ /* For short cables we are done.
+ For long cables (row == 3) check for mistune. */
+ tp->twistie =
+ (tp->twist_row == 3) ? 4 : 0;
+ }
+ break;
+ case 4:
+ /* Special case for long cables: check for mistune. */
+ if ((RTL_R16 (CSCR) &
+ CSCR_LinkStatusBits) == 0x7000) {
+ tp->twistie = 0;
+ break;
+ } else {
+ RTL_W32 (PARA7c, 0xfb38de03);
+ tp->twistie = 5;
+ next_tick = HZ / 10;
+ }
+ break;
+ case 5:
+ /* Retune for shorter cable (column 2). */
+ RTL_W32 (FIFOTMS, 0x20);
+ RTL_W32 (PARA78, PARA78_default);
+ RTL_W32 (PARA7c, PARA7c_default);
+ RTL_W32 (FIFOTMS, 0x00);
+ tp->twist_row = 2;
+ tp->twist_col = 0;
+ tp->twistie = 3;
+ next_tick = HZ / 10;
+ break;
+
+ default:
+ /* do nothing */
+ break;
+ }
+}
+#endif /* CONFIG_8139TOO_TUNE_TWISTER */
+
+static inline void rtl8139_thread_iter (struct net_device *dev,
+ struct rtl8139_private *tp,
+ void __iomem *ioaddr)
+{
+ int mii_lpa;
+
+ mii_lpa = mdio_read (dev, tp->phys[0], MII_LPA);
+
+ if (!tp->mii.force_media && mii_lpa != 0xffff) {
+ int duplex = ((mii_lpa & LPA_100FULL) ||
+ (mii_lpa & 0x01C0) == 0x0040);
+ if (tp->mii.full_duplex != duplex) {
+ tp->mii.full_duplex = duplex;
+
+ if (mii_lpa) {
+ netdev_info(dev, "Setting %s-duplex based on MII #%d link partner ability of %04x\n",
+ tp->mii.full_duplex ? "full" : "half",
+ tp->phys[0], mii_lpa);
+ } else {
+ netdev_info(dev, "media is unconnected, link down, or incompatible connection\n");
+ }
+#if 0
+ RTL_W8 (Cfg9346, Cfg9346_Unlock);
+ RTL_W8 (Config1, tp->mii.full_duplex ? 0x60 : 0x20);
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+#endif
+ }
+ }
+
+ next_tick = HZ * 60;
+
+ rtl8139_tune_twister (dev, tp);
+
+ netdev_dbg(dev, "Media selection tick, Link partner %04x\n",
+ RTL_R16(NWayLPAR));
+ netdev_dbg(dev, "Other registers are IntMask %04x IntStatus %04x\n",
+ RTL_R16(IntrMask), RTL_R16(IntrStatus));
+ netdev_dbg(dev, "Chip config %02x %02x\n",
+ RTL_R8(Config0), RTL_R8(Config1));
+}
+
+static void rtl8139_thread (struct work_struct *work)
+{
+ struct rtl8139_private *tp =
+ container_of(work, struct rtl8139_private, thread.work);
+ struct net_device *dev = tp->mii.dev;
+ unsigned long thr_delay = next_tick;
+
+ rtnl_lock();
+
+ if (!netif_running(dev))
+ goto out_unlock;
+
+ if (tp->watchdog_fired) {
+ tp->watchdog_fired = 0;
+ rtl8139_tx_timeout_task(work);
+ } else
+ rtl8139_thread_iter(dev, tp, tp->mmio_addr);
+
+ if (tp->have_thread)
+ schedule_delayed_work(&tp->thread, thr_delay);
+out_unlock:
+ rtnl_unlock ();
+}
+
+static void rtl8139_start_thread(struct rtl8139_private *tp)
+{
+ tp->twistie = 0;
+ if (tp->chipset == CH_8139_K)
+ tp->twistie = 1;
+ else if (tp->drv_flags & HAS_LNK_CHNG)
+ return;
+
+ tp->have_thread = 1;
+ tp->watchdog_fired = 0;
+
+ schedule_delayed_work(&tp->thread, next_tick);
+}
+
+static inline void rtl8139_tx_clear (struct rtl8139_private *tp)
+{
+ tp->cur_tx = 0;
+ tp->dirty_tx = 0;
+
+ /* XXX account for unsent Tx packets in tp->stats.tx_dropped */
+}
+
+static void rtl8139_tx_timeout_task (struct work_struct *work)
+{
+ struct rtl8139_private *tp =
+ container_of(work, struct rtl8139_private, thread.work);
+ struct net_device *dev = tp->mii.dev;
+ void __iomem *ioaddr = tp->mmio_addr;
+ int i;
+ u8 tmp8;
+
+ netdev_dbg(dev, "Transmit timeout, status %02x %04x %04x media %02x\n",
+ RTL_R8(ChipCmd), RTL_R16(IntrStatus),
+ RTL_R16(IntrMask), RTL_R8(MediaStatus));
+ /* Emit info to figure out what went wrong. */
+ netdev_dbg(dev, "Tx queue start entry %ld dirty entry %ld\n",
+ tp->cur_tx, tp->dirty_tx);
+ for (i = 0; i < NUM_TX_DESC; i++)
+ netdev_dbg(dev, "Tx descriptor %d is %08x%s\n",
+ i, RTL_R32(TxStatus0 + (i * 4)),
+ i == tp->dirty_tx % NUM_TX_DESC ?
+ " (queue head)" : "");
+
+ tp->xstats.tx_timeouts++;
+
+ /* disable Tx ASAP, if not already */
+ tmp8 = RTL_R8 (ChipCmd);
+ if (tmp8 & CmdTxEnb)
+ RTL_W8 (ChipCmd, CmdRxEnb);
+
+ spin_lock_bh(&tp->rx_lock);
+ /* Disable interrupts by clearing the interrupt mask. */
+ RTL_W16 (IntrMask, 0x0000);
+
+ /* Stop a shared interrupt from scavenging while we are. */
+ spin_lock_irq(&tp->lock);
+ rtl8139_tx_clear (tp);
+ spin_unlock_irq(&tp->lock);
+
+ /* ...and finally, reset everything */
+ if (netif_running(dev)) {
+ rtl8139_hw_start (dev);
+ netif_wake_queue (dev);
+ }
+ spin_unlock_bh(&tp->rx_lock);
+}
+
+static void rtl8139_tx_timeout (struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+
+ tp->watchdog_fired = 1;
+ if (!tp->have_thread) {
+ INIT_DELAYED_WORK(&tp->thread, rtl8139_thread);
+ schedule_delayed_work(&tp->thread, next_tick);
+ }
+}
+
+static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned int entry;
+ unsigned int len = skb->len;
+ unsigned long flags;
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = tp->cur_tx % NUM_TX_DESC;
+
+ /* Note: the chip doesn't have auto-pad! */
+ if (likely(len < TX_BUF_SIZE)) {
+ if (len < ETH_ZLEN)
+ memset(tp->tx_buf[entry], 0, ETH_ZLEN);
+ skb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
+ dev_kfree_skb(skb);
+ } else {
+ dev_kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ spin_lock_irqsave(&tp->lock, flags);
+ /*
+ * Writing to TxStatus triggers a DMA transfer of the data
+ * copied to tp->tx_buf[entry] above. Use a memory barrier
+ * to make sure that the device sees the updated data.
+ */
+ wmb();
+ RTL_W32_F (TxStatus0 + (entry * sizeof (u32)),
+ tp->tx_flag | max(len, (unsigned int)ETH_ZLEN));
+
+ tp->cur_tx++;
+
+ if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx)
+ netif_stop_queue (dev);
+ spin_unlock_irqrestore(&tp->lock, flags);
+
+ netif_dbg(tp, tx_queued, dev, "Queued Tx packet size %u to slot %d\n",
+ len, entry);
+
+ return NETDEV_TX_OK;
+}
+
+
+static void rtl8139_tx_interrupt (struct net_device *dev,
+ struct rtl8139_private *tp,
+ void __iomem *ioaddr)
+{
+ unsigned long dirty_tx, tx_left;
+
+ assert (dev != NULL);
+ assert (ioaddr != NULL);
+
+ dirty_tx = tp->dirty_tx;
+ tx_left = tp->cur_tx - dirty_tx;
+ while (tx_left > 0) {
+ int entry = dirty_tx % NUM_TX_DESC;
+ int txstatus;
+
+ txstatus = RTL_R32 (TxStatus0 + (entry * sizeof (u32)));
+
+ if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted)))
+ break; /* It still hasn't been Txed */
+
+ /* Note: TxCarrierLost is always asserted at 100mbps. */
+ if (txstatus & (TxOutOfWindow | TxAborted)) {
+ /* There was an major error, log it. */
+ netif_dbg(tp, tx_err, dev, "Transmit error, Tx status %08x\n",
+ txstatus);
+ dev->stats.tx_errors++;
+ if (txstatus & TxAborted) {
+ dev->stats.tx_aborted_errors++;
+ RTL_W32 (TxConfig, TxClearAbt);
+ RTL_W16 (IntrStatus, TxErr);
+ wmb();
+ }
+ if (txstatus & TxCarrierLost)
+ dev->stats.tx_carrier_errors++;
+ if (txstatus & TxOutOfWindow)
+ dev->stats.tx_window_errors++;
+ } else {
+ if (txstatus & TxUnderrun) {
+ /* Add 64 to the Tx FIFO threshold. */
+ if (tp->tx_flag < 0x00300000)
+ tp->tx_flag += 0x00020000;
+ dev->stats.tx_fifo_errors++;
+ }
+ dev->stats.collisions += (txstatus >> 24) & 15;
+ dev->stats.tx_bytes += txstatus & 0x7ff;
+ dev->stats.tx_packets++;
+ }
+
+ dirty_tx++;
+ tx_left--;
+ }
+
+#ifndef RTL8139_NDEBUG
+ if (tp->cur_tx - dirty_tx > NUM_TX_DESC) {
+ netdev_err(dev, "Out-of-sync dirty pointer, %ld vs. %ld\n",
+ dirty_tx, tp->cur_tx);
+ dirty_tx += NUM_TX_DESC;
+ }
+#endif /* RTL8139_NDEBUG */
+
+ /* only wake the queue if we did work, and the queue is stopped */
+ if (tp->dirty_tx != dirty_tx) {
+ tp->dirty_tx = dirty_tx;
+ mb();
+ netif_wake_queue (dev);
+ }
+}
+
+
+/* TODO: clean this up! Rx reset need not be this intensive */
+static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
+ struct rtl8139_private *tp, void __iomem *ioaddr)
+{
+ u8 tmp8;
+#ifdef CONFIG_8139_OLD_RX_RESET
+ int tmp_work;
+#endif
+
+ netif_dbg(tp, rx_err, dev, "Ethernet frame had errors, status %08x\n",
+ rx_status);
+ dev->stats.rx_errors++;
+ if (!(rx_status & RxStatusOK)) {
+ if (rx_status & RxTooLong) {
+ netdev_dbg(dev, "Oversized Ethernet frame, status %04x!\n",
+ rx_status);
+ /* A.C.: The chip hangs here. */
+ }
+ if (rx_status & (RxBadSymbol | RxBadAlign))
+ dev->stats.rx_frame_errors++;
+ if (rx_status & (RxRunt | RxTooLong))
+ dev->stats.rx_length_errors++;
+ if (rx_status & RxCRCErr)
+ dev->stats.rx_crc_errors++;
+ } else {
+ tp->xstats.rx_lost_in_ring++;
+ }
+
+#ifndef CONFIG_8139_OLD_RX_RESET
+ tmp8 = RTL_R8 (ChipCmd);
+ RTL_W8 (ChipCmd, tmp8 & ~CmdRxEnb);
+ RTL_W8 (ChipCmd, tmp8);
+ RTL_W32 (RxConfig, tp->rx_config);
+ tp->cur_rx = 0;
+#else
+ /* Reset the receiver, based on RealTek recommendation. (Bug?) */
+
+ /* disable receive */
+ RTL_W8_F (ChipCmd, CmdTxEnb);
+ tmp_work = 200;
+ while (--tmp_work > 0) {
+ udelay(1);
+ tmp8 = RTL_R8 (ChipCmd);
+ if (!(tmp8 & CmdRxEnb))
+ break;
+ }
+ if (tmp_work <= 0)
+ netdev_warn(dev, "rx stop wait too long\n");
+ /* restart receive */
+ tmp_work = 200;
+ while (--tmp_work > 0) {
+ RTL_W8_F (ChipCmd, CmdRxEnb | CmdTxEnb);
+ udelay(1);
+ tmp8 = RTL_R8 (ChipCmd);
+ if ((tmp8 & CmdRxEnb) && (tmp8 & CmdTxEnb))
+ break;
+ }
+ if (tmp_work <= 0)
+ netdev_warn(dev, "tx/rx enable wait too long\n");
+
+ /* and reinitialize all rx related registers */
+ RTL_W8_F (Cfg9346, Cfg9346_Unlock);
+ /* Must enable Tx/Rx before setting transfer thresholds! */
+ RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
+
+ tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys;
+ RTL_W32 (RxConfig, tp->rx_config);
+ tp->cur_rx = 0;
+
+ netdev_dbg(dev, "init buffer addresses\n");
+
+ /* Lock Config[01234] and BMCR register writes */
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+
+ /* init Rx ring buffer DMA address */
+ RTL_W32_F (RxBuf, tp->rx_ring_dma);
+
+ /* A.C.: Reset the multicast list. */
+ __set_rx_mode (dev);
+#endif
+}
+
+#if RX_BUF_IDX == 3
+static inline void wrap_copy(struct sk_buff *skb, const unsigned char *ring,
+ u32 offset, unsigned int size)
+{
+ u32 left = RX_BUF_LEN - offset;
+
+ if (size > left) {
+ skb_copy_to_linear_data(skb, ring + offset, left);
+ skb_copy_to_linear_data_offset(skb, left, ring, size - left);
+ } else
+ skb_copy_to_linear_data(skb, ring + offset, size);
+}
+#endif
+
+static void rtl8139_isr_ack(struct rtl8139_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ u16 status;
+
+ status = RTL_R16 (IntrStatus) & RxAckBits;
+
+ /* Clear out errors and receive interrupts */
+ if (likely(status != 0)) {
+ if (unlikely(status & (RxFIFOOver | RxOverflow))) {
+ tp->dev->stats.rx_errors++;
+ if (status & RxFIFOOver)
+ tp->dev->stats.rx_fifo_errors++;
+ }
+ RTL_W16_F (IntrStatus, RxAckBits);
+ }
+}
+
+static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
+ int budget)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ int received = 0;
+ unsigned char *rx_ring = tp->rx_ring;
+ unsigned int cur_rx = tp->cur_rx;
+ unsigned int rx_size = 0;
+
+ netdev_dbg(dev, "In %s(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
+ __func__, (u16)cur_rx,
+ RTL_R16(RxBufAddr), RTL_R16(RxBufPtr), RTL_R8(ChipCmd));
+
+ while (netif_running(dev) && received < budget &&
+ (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) {
+ u32 ring_offset = cur_rx % RX_BUF_LEN;
+ u32 rx_status;
+ unsigned int pkt_size;
+ struct sk_buff *skb;
+
+ rmb();
+
+ /* read size+status of next frame from DMA ring buffer */
+ rx_status = le32_to_cpu (*(__le32 *) (rx_ring + ring_offset));
+ rx_size = rx_status >> 16;
+ pkt_size = rx_size - 4;
+
+ netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n",
+ __func__, rx_status, rx_size, cur_rx);
+#if RTL8139_DEBUG > 2
+ print_hex_dump(KERN_DEBUG, "Frame contents: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ &rx_ring[ring_offset], 70, true);
+#endif
+
+ /* Packet copy from FIFO still in progress.
+ * Theoretically, this should never happen
+ * since EarlyRx is disabled.
+ */
+ if (unlikely(rx_size == 0xfff0)) {
+ if (!tp->fifo_copy_timeout)
+ tp->fifo_copy_timeout = jiffies + 2;
+ else if (time_after(jiffies, tp->fifo_copy_timeout)) {
+ netdev_dbg(dev, "hung FIFO. Reset\n");
+ rx_size = 0;
+ goto no_early_rx;
+ }
+ netif_dbg(tp, intr, dev, "fifo copy in progress\n");
+ tp->xstats.early_rx++;
+ break;
+ }
+
+no_early_rx:
+ tp->fifo_copy_timeout = 0;
+
+ /* If Rx err or invalid rx_size/rx_status received
+ * (which happens if we get lost in the ring),
+ * Rx process gets reset, so we abort any further
+ * Rx processing.
+ */
+ if (unlikely((rx_size > (MAX_ETH_FRAME_SIZE+4)) ||
+ (rx_size < 8) ||
+ (!(rx_status & RxStatusOK)))) {
+ rtl8139_rx_err (rx_status, dev, tp, ioaddr);
+ received = -1;
+ goto out;
+ }
+
+ /* Malloc up new buffer, compatible with net-2e. */
+ /* Omit the four octet CRC from the length. */
+
+ skb = netdev_alloc_skb_ip_align(dev, pkt_size);
+ if (likely(skb)) {
+#if RX_BUF_IDX == 3
+ wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
+#else
+ skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size);
+#endif
+ skb_put (skb, pkt_size);
+
+ skb->protocol = eth_type_trans (skb, dev);
+
+ dev->stats.rx_bytes += pkt_size;
+ dev->stats.rx_packets++;
+
+ netif_receive_skb (skb);
+ } else {
+ if (net_ratelimit())
+ netdev_warn(dev, "Memory squeeze, dropping packet\n");
+ dev->stats.rx_dropped++;
+ }
+ received++;
+
+ cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
+ RTL_W16 (RxBufPtr, (u16) (cur_rx - 16));
+
+ rtl8139_isr_ack(tp);
+ }
+
+ if (unlikely(!received || rx_size == 0xfff0))
+ rtl8139_isr_ack(tp);
+
+ netdev_dbg(dev, "Done %s(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
+ __func__, cur_rx,
+ RTL_R16(RxBufAddr), RTL_R16(RxBufPtr), RTL_R8(ChipCmd));
+
+ tp->cur_rx = cur_rx;
+
+ /*
+ * The receive buffer should be mostly empty.
+ * Tell NAPI to reenable the Rx irq.
+ */
+ if (tp->fifo_copy_timeout)
+ received = budget;
+
+out:
+ return received;
+}
+
+
+static void rtl8139_weird_interrupt (struct net_device *dev,
+ struct rtl8139_private *tp,
+ void __iomem *ioaddr,
+ int status, int link_changed)
+{
+ netdev_dbg(dev, "Abnormal interrupt, status %08x\n", status);
+
+ assert (dev != NULL);
+ assert (tp != NULL);
+ assert (ioaddr != NULL);
+
+ /* Update the error count. */
+ dev->stats.rx_missed_errors += RTL_R32 (RxMissed);
+ RTL_W32 (RxMissed, 0);
+
+ if ((status & RxUnderrun) && link_changed &&
+ (tp->drv_flags & HAS_LNK_CHNG)) {
+ rtl_check_media(dev, 0);
+ status &= ~RxUnderrun;
+ }
+
+ if (status & (RxUnderrun | RxErr))
+ dev->stats.rx_errors++;
+
+ if (status & PCSTimeout)
+ dev->stats.rx_length_errors++;
+ if (status & RxUnderrun)
+ dev->stats.rx_fifo_errors++;
+ if (status & PCIErr) {
+ u16 pci_cmd_status;
+ pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status);
+ pci_write_config_word (tp->pci_dev, PCI_STATUS, pci_cmd_status);
+
+ netdev_err(dev, "PCI Bus error %04x\n", pci_cmd_status);
+ }
+}
+
+static int rtl8139_poll(struct napi_struct *napi, int budget)
+{
+ struct rtl8139_private *tp = container_of(napi, struct rtl8139_private, napi);
+ struct net_device *dev = tp->dev;
+ void __iomem *ioaddr = tp->mmio_addr;
+ int work_done;
+
+ spin_lock(&tp->rx_lock);
+ work_done = 0;
+ if (likely(RTL_R16(IntrStatus) & RxAckBits))
+ work_done += rtl8139_rx(dev, tp, budget);
+
+ if (work_done < budget) {
+ unsigned long flags;
+ /*
+ * Order is important since data can get interrupted
+ * again when we think we are done.
+ */
+ spin_lock_irqsave(&tp->lock, flags);
+ __napi_complete(napi);
+ RTL_W16_F(IntrMask, rtl8139_intr_mask);
+ spin_unlock_irqrestore(&tp->lock, flags);
+ }
+ spin_unlock(&tp->rx_lock);
+
+ return work_done;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance)
+{
+ struct net_device *dev = (struct net_device *) dev_instance;
+ struct rtl8139_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ u16 status, ackstat;
+ int link_changed = 0; /* avoid bogus "uninit" warning */
+ int handled = 0;
+
+ spin_lock (&tp->lock);
+ status = RTL_R16 (IntrStatus);
+
+ /* shared irq? */
+ if (unlikely((status & rtl8139_intr_mask) == 0))
+ goto out;
+
+ handled = 1;
+
+ /* h/w no longer present (hotplug?) or major error, bail */
+ if (unlikely(status == 0xFFFF))
+ goto out;
+
+ /* close possible race's with dev_close */
+ if (unlikely(!netif_running(dev))) {
+ RTL_W16 (IntrMask, 0);
+ goto out;
+ }
+
+ /* Acknowledge all of the current interrupt sources ASAP, but
+ an first get an additional status bit from CSCR. */
+ if (unlikely(status & RxUnderrun))
+ link_changed = RTL_R16 (CSCR) & CSCR_LinkChangeBit;
+
+ ackstat = status & ~(RxAckBits | TxErr);
+ if (ackstat)
+ RTL_W16 (IntrStatus, ackstat);
+
+ /* Receive packets are processed by poll routine.
+ If not running start it now. */
+ if (status & RxAckBits){
+ if (napi_schedule_prep(&tp->napi)) {
+ RTL_W16_F (IntrMask, rtl8139_norx_intr_mask);
+ __napi_schedule(&tp->napi);
+ }
+ }
+
+ /* Check uncommon events with one test. */
+ if (unlikely(status & (PCIErr | PCSTimeout | RxUnderrun | RxErr)))
+ rtl8139_weird_interrupt (dev, tp, ioaddr,
+ status, link_changed);
+
+ if (status & (TxOK | TxErr)) {
+ rtl8139_tx_interrupt (dev, tp, ioaddr);
+ if (status & TxErr)
+ RTL_W16 (IntrStatus, TxErr);
+ }
+ out:
+ spin_unlock (&tp->lock);
+
+ netdev_dbg(dev, "exiting interrupt, intr_status=%#4.4x\n",
+ RTL_R16(IntrStatus));
+ return IRQ_RETVAL(handled);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void rtl8139_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ rtl8139_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static int rtl8139_set_mac_address(struct net_device *dev, void *p)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ spin_lock_irq(&tp->lock);
+
+ RTL_W8_F(Cfg9346, Cfg9346_Unlock);
+ RTL_W32_F(MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
+ RTL_W32_F(MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
+ RTL_W8_F(Cfg9346, Cfg9346_Lock);
+
+ spin_unlock_irq(&tp->lock);
+
+ return 0;
+}
+
+static int rtl8139_close (struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+ napi_disable(&tp->napi);
+
+ netif_dbg(tp, ifdown, dev, "Shutting down ethercard, status was 0x%04x\n",
+ RTL_R16(IntrStatus));
+
+ spin_lock_irqsave (&tp->lock, flags);
+
+ /* Stop the chip's Tx and Rx DMA processes. */
+ RTL_W8 (ChipCmd, 0);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ RTL_W16 (IntrMask, 0);
+
+ /* Update the error counts. */
+ dev->stats.rx_missed_errors += RTL_R32 (RxMissed);
+ RTL_W32 (RxMissed, 0);
+
+ spin_unlock_irqrestore (&tp->lock, flags);
+
+ free_irq (dev->irq, dev);
+
+ rtl8139_tx_clear (tp);
+
+ dma_free_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN,
+ tp->rx_ring, tp->rx_ring_dma);
+ dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN,
+ tp->tx_bufs, tp->tx_bufs_dma);
+ tp->rx_ring = NULL;
+ tp->tx_bufs = NULL;
+
+ /* Green! Put the chip in low-power mode. */
+ RTL_W8 (Cfg9346, Cfg9346_Unlock);
+
+ if (rtl_chip_info[tp->chipset].flags & HasHltClk)
+ RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */
+
+ return 0;
+}
+
+
+/* Get the ethtool Wake-on-LAN settings. Assumes that wol points to
+ kernel memory, *wol has been initialized as {ETHTOOL_GWOL}, and
+ other threads or interrupts aren't messing with the 8139. */
+static void rtl8139_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ spin_lock_irq(&tp->lock);
+ if (rtl_chip_info[tp->chipset].flags & HasLWake) {
+ u8 cfg3 = RTL_R8 (Config3);
+ u8 cfg5 = RTL_R8 (Config5);
+
+ wol->supported = WAKE_PHY | WAKE_MAGIC
+ | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
+
+ wol->wolopts = 0;
+ if (cfg3 & Cfg3_LinkUp)
+ wol->wolopts |= WAKE_PHY;
+ if (cfg3 & Cfg3_Magic)
+ wol->wolopts |= WAKE_MAGIC;
+ /* (KON)FIXME: See how netdev_set_wol() handles the
+ following constants. */
+ if (cfg5 & Cfg5_UWF)
+ wol->wolopts |= WAKE_UCAST;
+ if (cfg5 & Cfg5_MWF)
+ wol->wolopts |= WAKE_MCAST;
+ if (cfg5 & Cfg5_BWF)
+ wol->wolopts |= WAKE_BCAST;
+ }
+ spin_unlock_irq(&tp->lock);
+}
+
+
+/* Set the ethtool Wake-on-LAN settings. Return 0 or -errno. Assumes
+ that wol points to kernel memory and other threads or interrupts
+ aren't messing with the 8139. */
+static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ u32 support;
+ u8 cfg3, cfg5;
+
+ support = ((rtl_chip_info[tp->chipset].flags & HasLWake)
+ ? (WAKE_PHY | WAKE_MAGIC
+ | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)
+ : 0);
+ if (wol->wolopts & ~support)
+ return -EINVAL;
+
+ spin_lock_irq(&tp->lock);
+ cfg3 = RTL_R8 (Config3) & ~(Cfg3_LinkUp | Cfg3_Magic);
+ if (wol->wolopts & WAKE_PHY)
+ cfg3 |= Cfg3_LinkUp;
+ if (wol->wolopts & WAKE_MAGIC)
+ cfg3 |= Cfg3_Magic;
+ RTL_W8 (Cfg9346, Cfg9346_Unlock);
+ RTL_W8 (Config3, cfg3);
+ RTL_W8 (Cfg9346, Cfg9346_Lock);
+
+ cfg5 = RTL_R8 (Config5) & ~(Cfg5_UWF | Cfg5_MWF | Cfg5_BWF);
+ /* (KON)FIXME: These are untested. We may have to set the
+ CRC0, Wakeup0 and LSBCRC0 registers too, but I have no
+ documentation. */
+ if (wol->wolopts & WAKE_UCAST)
+ cfg5 |= Cfg5_UWF;
+ if (wol->wolopts & WAKE_MCAST)
+ cfg5 |= Cfg5_MWF;
+ if (wol->wolopts & WAKE_BCAST)
+ cfg5 |= Cfg5_BWF;
+ RTL_W8 (Config5, cfg5); /* need not unlock via Cfg9346 */
+ spin_unlock_irq(&tp->lock);
+
+ return 0;
+}
+
+static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(tp->pci_dev));
+ info->regdump_len = tp->regs_len;
+}
+
+static int rtl8139_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ spin_lock_irq(&tp->lock);
+ mii_ethtool_gset(&tp->mii, cmd);
+ spin_unlock_irq(&tp->lock);
+ return 0;
+}
+
+static int rtl8139_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ int rc;
+ spin_lock_irq(&tp->lock);
+ rc = mii_ethtool_sset(&tp->mii, cmd);
+ spin_unlock_irq(&tp->lock);
+ return rc;
+}
+
+static int rtl8139_nway_reset(struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ return mii_nway_restart(&tp->mii);
+}
+
+static u32 rtl8139_get_link(struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ return mii_link_ok(&tp->mii);
+}
+
+static u32 rtl8139_get_msglevel(struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ return tp->msg_enable;
+}
+
+static void rtl8139_set_msglevel(struct net_device *dev, u32 datum)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ tp->msg_enable = datum;
+}
+
+static int rtl8139_get_regs_len(struct net_device *dev)
+{
+ struct rtl8139_private *tp;
+ /* TODO: we are too slack to do reg dumping for pio, for now */
+ if (use_io)
+ return 0;
+ tp = netdev_priv(dev);
+ return tp->regs_len;
+}
+
+static void rtl8139_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
+{
+ struct rtl8139_private *tp;
+
+ /* TODO: we are too slack to do reg dumping for pio, for now */
+ if (use_io)
+ return;
+ tp = netdev_priv(dev);
+
+ regs->version = RTL_REGS_VER;
+
+ spin_lock_irq(&tp->lock);
+ memcpy_fromio(regbuf, tp->mmio_addr, regs->len);
+ spin_unlock_irq(&tp->lock);
+}
+
+static int rtl8139_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return RTL_NUM_STATS;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void rtl8139_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+
+ data[0] = tp->xstats.early_rx;
+ data[1] = tp->xstats.tx_buf_mapped;
+ data[2] = tp->xstats.tx_timeouts;
+ data[3] = tp->xstats.rx_lost_in_ring;
+}
+
+static void rtl8139_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
+}
+
+static const struct ethtool_ops rtl8139_ethtool_ops = {
+ .get_drvinfo = rtl8139_get_drvinfo,
+ .get_settings = rtl8139_get_settings,
+ .set_settings = rtl8139_set_settings,
+ .get_regs_len = rtl8139_get_regs_len,
+ .get_regs = rtl8139_get_regs,
+ .nway_reset = rtl8139_nway_reset,
+ .get_link = rtl8139_get_link,
+ .get_msglevel = rtl8139_get_msglevel,
+ .set_msglevel = rtl8139_set_msglevel,
+ .get_wol = rtl8139_get_wol,
+ .set_wol = rtl8139_set_wol,
+ .get_strings = rtl8139_get_strings,
+ .get_sset_count = rtl8139_get_sset_count,
+ .get_ethtool_stats = rtl8139_get_ethtool_stats,
+};
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ int rc;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irq(&tp->lock);
+ rc = generic_mii_ioctl(&tp->mii, if_mii(rq), cmd, NULL);
+ spin_unlock_irq(&tp->lock);
+
+ return rc;
+}
+
+
+static struct net_device_stats *rtl8139_get_stats (struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned long flags;
+
+ if (netif_running(dev)) {
+ spin_lock_irqsave (&tp->lock, flags);
+ dev->stats.rx_missed_errors += RTL_R32 (RxMissed);
+ RTL_W32 (RxMissed, 0);
+ spin_unlock_irqrestore (&tp->lock, flags);
+ }
+
+ return &dev->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ This routine is not state sensitive and need not be SMP locked. */
+
+static void __set_rx_mode (struct net_device *dev)
+{
+ struct rtl8139_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ int rx_mode;
+ u32 tmp;
+
+ netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08x\n",
+ dev->flags, RTL_R32(RxConfig));
+
+ /* Note: do not reorder, GCC is clever about common statements. */
+ if (dev->flags & IFF_PROMISC) {
+ rx_mode =
+ AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
+ AcceptAllPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else {
+ struct netdev_hw_addr *ha;
+ rx_mode = AcceptBroadcast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ rx_mode |= AcceptMulticast;
+ }
+ }
+
+ /* We can safely update without stopping the chip. */
+ tmp = rtl8139_rx_config | rx_mode;
+ if (tp->rx_config != tmp) {
+ RTL_W32_F (RxConfig, tmp);
+ tp->rx_config = tmp;
+ }
+ RTL_W32_F (MAR0 + 0, mc_filter[0]);
+ RTL_W32_F (MAR0 + 4, mc_filter[1]);
+}
+
+static void rtl8139_set_rx_mode (struct net_device *dev)
+{
+ unsigned long flags;
+ struct rtl8139_private *tp = netdev_priv(dev);
+
+ spin_lock_irqsave (&tp->lock, flags);
+ __set_rx_mode(dev);
+ spin_unlock_irqrestore (&tp->lock, flags);
+}
+
+#ifdef CONFIG_PM
+
+static int rtl8139_suspend (struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct rtl8139_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned long flags;
+
+ pci_save_state (pdev);
+
+ if (!netif_running (dev))
+ return 0;
+
+ netif_device_detach (dev);
+
+ spin_lock_irqsave (&tp->lock, flags);
+
+ /* Disable interrupts, stop Tx and Rx. */
+ RTL_W16 (IntrMask, 0);
+ RTL_W8 (ChipCmd, 0);
+
+ /* Update the error counts. */
+ dev->stats.rx_missed_errors += RTL_R32 (RxMissed);
+ RTL_W32 (RxMissed, 0);
+
+ spin_unlock_irqrestore (&tp->lock, flags);
+
+ pci_set_power_state (pdev, PCI_D3hot);
+
+ return 0;
+}
+
+
+static int rtl8139_resume (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+
+ pci_restore_state (pdev);
+ if (!netif_running (dev))
+ return 0;
+ pci_set_power_state (pdev, PCI_D0);
+ rtl8139_init_ring (dev);
+ rtl8139_hw_start (dev);
+ netif_device_attach (dev);
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+
+static struct pci_driver rtl8139_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = rtl8139_pci_tbl,
+ .probe = rtl8139_init_one,
+ .remove = __devexit_p(rtl8139_remove_one),
+#ifdef CONFIG_PM
+ .suspend = rtl8139_suspend,
+ .resume = rtl8139_resume,
+#endif /* CONFIG_PM */
+};
+
+
+static int __init rtl8139_init_module (void)
+{
+ /* when we're a module, we always print a version message,
+ * even if no 8139 board is found.
+ */
+#ifdef MODULE
+ pr_info(RTL8139_DRIVER_NAME "\n");
+#endif
+
+ return pci_register_driver(&rtl8139_pci_driver);
+}
+
+
+static void __exit rtl8139_cleanup_module (void)
+{
+ pci_unregister_driver (&rtl8139_pci_driver);
+}
+
+
+module_init(rtl8139_init_module);
+module_exit(rtl8139_cleanup_module);
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
new file mode 100644
index 000000000000..84083ec6e612
--- /dev/null
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -0,0 +1,130 @@
+#
+# Realtek device configuration
+#
+
+config NET_VENDOR_REALTEK
+ bool "Realtek devices"
+ default y
+ depends on PCI || (PARPORT && X86)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Realtek devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_REALTEK
+
+config ATP
+ tristate "AT-LAN-TEC/RealTek pocket adapter support"
+ depends on PARPORT && X86
+ select CRC32
+ ---help---
+ This is a network (Ethernet) device which attaches to your parallel
+ port. Read <file:drivers/net/atp.c> as well as the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>, if you
+ want to use this. If you intend to use this driver, you should have
+ said N to the "Parallel printer support", because the two drivers
+ don't like each other.
+
+ To compile this driver as a module, choose M here: the module
+ will be called atp.
+
+config 8139CP
+ tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support (EXPERIMENTAL)"
+ depends on PCI && EXPERIMENTAL
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This is a driver for the Fast Ethernet PCI network cards based on
+ the RTL8139C+ chips. If you have one of those, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called 8139cp. This is recommended.
+
+config 8139TOO
+ tristate "RealTek RTL-8129/8130/8139 PCI Fast Ethernet Adapter support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This is a driver for the Fast Ethernet PCI network cards based on
+ the RTL 8129/8130/8139 chips. If you have one of those, say Y and
+ read the Ethernet-HOWTO <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called 8139too. This is recommended.
+
+config 8139TOO_PIO
+ bool "Use PIO instead of MMIO"
+ default y
+ depends on 8139TOO
+ ---help---
+ This instructs the driver to use programmed I/O ports (PIO) instead
+ of PCI shared memory (MMIO). This can possibly solve some problems
+ in case your mainboard has memory consistency issues. If unsure,
+ say N.
+
+config 8139TOO_TUNE_TWISTER
+ bool "Support for uncommon RTL-8139 rev. K (automatic channel equalization)"
+ depends on 8139TOO
+ ---help---
+ This implements a function which might come in handy in case you
+ are using low quality on long cabling. It is required for RealTek
+ RTL-8139 revision K boards, and totally unused otherwise. It tries
+ to match the transceiver to the cable characteristics. This is
+ experimental since hardly documented by the manufacturer.
+ If unsure, say Y.
+
+config 8139TOO_8129
+ bool "Support for older RTL-8129/8130 boards"
+ depends on 8139TOO
+ ---help---
+ This enables support for the older and uncommon RTL-8129 and
+ RTL-8130 chips, which support MII via an external transceiver,
+ instead of an internal one. Disabling this option will save some
+ memory by making the code size smaller. If unsure, say Y.
+
+config 8139_OLD_RX_RESET
+ bool "Use older RX-reset method"
+ depends on 8139TOO
+ ---help---
+ The 8139too driver was recently updated to contain a more rapid
+ reset sequence, in the face of severe receive errors. This "new"
+ RX-reset method should be adequate for all boards. But if you
+ experience problems, you can enable this option to restore the
+ old RX-reset behavior. If unsure, say N.
+
+config R8169
+ tristate "Realtek 8169 gigabit ethernet support"
+ depends on PCI
+ select FW_LOADER
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
+
+ To compile this driver as a module, choose M here: the module
+ will be called r8169. This is recommended.
+
+config SC92031
+ tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)"
+ depends on PCI && EXPERIMENTAL
+ select CRC32
+ ---help---
+ This is a driver for the Fast Ethernet PCI network cards based on
+ the Silan SC92031 chip (sometimes also called Rsltek 8139D). If you
+ have one of these, say Y here.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sc92031. This is recommended.
+
+endif # NET_VENDOR_REALTEK
diff --git a/drivers/net/ethernet/realtek/Makefile b/drivers/net/ethernet/realtek/Makefile
new file mode 100644
index 000000000000..e48cfb6ac42d
--- /dev/null
+++ b/drivers/net/ethernet/realtek/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the Realtek network device drivers.
+#
+
+obj-$(CONFIG_8139CP) += 8139cp.o
+obj-$(CONFIG_8139TOO) += 8139too.o
+obj-$(CONFIG_ATP) += atp.o
+obj-$(CONFIG_R8169) += r8169.o
+obj-$(CONFIG_SC92031) += sc92031.o
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
new file mode 100644
index 000000000000..e3f57fdbf0ea
--- /dev/null
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -0,0 +1,940 @@
+/* atp.c: Attached (pocket) ethernet adapter driver for linux. */
+/*
+ This is a driver for commonly OEM pocket (parallel port)
+ ethernet adapters based on the Realtek RTL8002 and RTL8012 chips.
+
+ Written 1993-2000 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ Copyright 1993 United States Government as represented by the Director,
+ National Security Agency. Copyright 1994-2000 retained by the original
+ author, Donald Becker. The timer-based reset code was supplied in 1995
+ by Bill Carlson, wwc@super.org.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/atp.html
+
+
+ Modular support/softnet added by Alan Cox.
+ _bit abuse fixed up by Alan Cox
+
+*/
+
+static const char version[] =
+"atp.c:v1.09=ac 2002/10/01 Donald Becker <becker@scyld.com>\n";
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+#define net_debug debug
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 15;
+
+#define NUM_UNITS 2
+/* The standard set of ISA module parameters. */
+static int io[NUM_UNITS];
+static int irq[NUM_UNITS];
+static int xcvr[NUM_UNITS]; /* The data transfer mode. */
+
+/* Operational parameters that are set at compile time. */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (400*HZ/1000)
+
+/*
+ This file is a device driver for the RealTek (aka AT-Lan-Tec) pocket
+ ethernet adapter. This is a common low-cost OEM pocket ethernet
+ adapter, sold under many names.
+
+ Sources:
+ This driver was written from the packet driver assembly code provided by
+ Vincent Bono of AT-Lan-Tec. Ever try to figure out how a complicated
+ device works just from the assembly code? It ain't pretty. The following
+ description is written based on guesses and writing lots of special-purpose
+ code to test my theorized operation.
+
+ In 1997 Realtek made available the documentation for the second generation
+ RTL8012 chip, which has lead to several driver improvements.
+ http://www.realtek.com.tw/
+
+ Theory of Operation
+
+ The RTL8002 adapter seems to be built around a custom spin of the SEEQ
+ controller core. It probably has a 16K or 64K internal packet buffer, of
+ which the first 4K is devoted to transmit and the rest to receive.
+ The controller maintains the queue of received packet and the packet buffer
+ access pointer internally, with only 'reset to beginning' and 'skip to next
+ packet' commands visible. The transmit packet queue holds two (or more?)
+ packets: both 'retransmit this packet' (due to collision) and 'transmit next
+ packet' commands must be started by hand.
+
+ The station address is stored in a standard bit-serial EEPROM which must be
+ read (ughh) by the device driver. (Provisions have been made for
+ substituting a 74S288 PROM, but I haven't gotten reports of any models
+ using it.) Unlike built-in devices, a pocket adapter can temporarily lose
+ power without indication to the device driver. The major effect is that
+ the station address, receive filter (promiscuous, etc.) and transceiver
+ must be reset.
+
+ The controller itself has 16 registers, some of which use only the lower
+ bits. The registers are read and written 4 bits at a time. The four bit
+ register address is presented on the data lines along with a few additional
+ timing and control bits. The data is then read from status port or written
+ to the data port.
+
+ Correction: the controller has two banks of 16 registers. The second
+ bank contains only the multicast filter table (now used) and the EEPROM
+ access registers.
+
+ Since the bulk data transfer of the actual packets through the slow
+ parallel port dominates the driver's running time, four distinct data
+ (non-register) transfer modes are provided by the adapter, two in each
+ direction. In the first mode timing for the nibble transfers is
+ provided through the data port. In the second mode the same timing is
+ provided through the control port. In either case the data is read from
+ the status port and written to the data port, just as it is accessing
+ registers.
+
+ In addition to the basic data transfer methods, several more are modes are
+ created by adding some delay by doing multiple reads of the data to allow
+ it to stabilize. This delay seems to be needed on most machines.
+
+ The data transfer mode is stored in the 'dev->if_port' field. Its default
+ value is '4'. It may be overridden at boot-time using the third parameter
+ to the "ether=..." initialization.
+
+ The header file <atp.h> provides inline functions that encapsulate the
+ register and data access methods. These functions are hand-tuned to
+ generate reasonable object code. This header file also documents my
+ interpretations of the device registers.
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include "atp.h"
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("RealTek RTL8002/8012 parallel port Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(max_interrupt_work, int, 0);
+module_param(debug, int, 0);
+module_param_array(io, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param_array(xcvr, int, NULL, 0);
+MODULE_PARM_DESC(max_interrupt_work, "ATP maximum events handled per interrupt");
+MODULE_PARM_DESC(debug, "ATP debug level (0-7)");
+MODULE_PARM_DESC(io, "ATP I/O base address(es)");
+MODULE_PARM_DESC(irq, "ATP IRQ number(s)");
+MODULE_PARM_DESC(xcvr, "ATP transceiver(s) (0=internal, 1=external)");
+
+/* The number of low I/O ports used by the ethercard. */
+#define ETHERCARD_TOTAL_SIZE 3
+
+/* Sequence to switch an 8012 from printer mux to ethernet mode. */
+static char mux_8012[] = { 0xff, 0xf7, 0xff, 0xfb, 0xf3, 0xfb, 0xff, 0xf7,};
+
+struct net_local {
+ spinlock_t lock;
+ struct net_device *next_module;
+ struct timer_list timer; /* Media selection timer. */
+ long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
+ int saved_tx_size;
+ unsigned int tx_unit_busy:1;
+ unsigned char re_tx, /* Number of packet retransmissions. */
+ addr_mode, /* Current Rx filter e.g. promiscuous, etc. */
+ pac_cnt_in_tx_buf,
+ chip_type;
+};
+
+/* This code, written by wwc@super.org, resets the adapter every
+ TIMED_CHECKER ticks. This recovers from an unknown error which
+ hangs the device. */
+#define TIMED_CHECKER (HZ/4)
+#ifdef TIMED_CHECKER
+#include <linux/timer.h>
+static void atp_timed_checker(unsigned long ignored);
+#endif
+
+/* Index to functions, as function prototypes. */
+
+static int atp_probe1(long ioaddr);
+static void get_node_ID(struct net_device *dev);
+static unsigned short eeprom_op(long ioaddr, unsigned int cmd);
+static int net_open(struct net_device *dev);
+static void hardware_init(struct net_device *dev);
+static void write_packet(long ioaddr, int length, unsigned char *packet, int pad, int mode);
+static void trigger_send(long ioaddr, int length);
+static netdev_tx_t atp_send_packet(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t atp_interrupt(int irq, void *dev_id);
+static void net_rx(struct net_device *dev);
+static void read_block(long ioaddr, int length, unsigned char *buffer, int data_mode);
+static int net_close(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static void tx_timeout(struct net_device *dev);
+
+
+/* A list of all installed ATP devices, for removing the driver module. */
+static struct net_device *root_atp_dev;
+
+/* Check for a network adapter of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+
+ FIXME: we should use the parport layer for this
+ */
+static int __init atp_init(void)
+{
+ int *port, ports[] = {0x378, 0x278, 0x3bc, 0};
+ int base_addr = io[0];
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return atp_probe1(base_addr);
+ else if (base_addr == 1) /* Don't probe at all. */
+ return -ENXIO;
+
+ for (port = ports; *port; port++) {
+ long ioaddr = *port;
+ outb(0x57, ioaddr + PAR_DATA);
+ if (inb(ioaddr + PAR_DATA) != 0x57)
+ continue;
+ if (atp_probe1(ioaddr) == 0)
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static const struct net_device_ops atp_netdev_ops = {
+ .ndo_open = net_open,
+ .ndo_stop = net_close,
+ .ndo_start_xmit = atp_send_packet,
+ .ndo_set_rx_mode = set_rx_mode,
+ .ndo_tx_timeout = tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __init atp_probe1(long ioaddr)
+{
+ struct net_device *dev = NULL;
+ struct net_local *lp;
+ int saved_ctrl_reg, status, i;
+ int res;
+
+ outb(0xff, ioaddr + PAR_DATA);
+ /* Save the original value of the Control register, in case we guessed
+ wrong. */
+ saved_ctrl_reg = inb(ioaddr + PAR_CONTROL);
+ if (net_debug > 3)
+ printk("atp: Control register was %#2.2x.\n", saved_ctrl_reg);
+ /* IRQEN=0, SLCTB=high INITB=high, AUTOFDB=high, STBB=high. */
+ outb(0x04, ioaddr + PAR_CONTROL);
+#ifndef final_version
+ if (net_debug > 3) {
+ /* Turn off the printer multiplexer on the 8012. */
+ for (i = 0; i < 8; i++)
+ outb(mux_8012[i], ioaddr + PAR_DATA);
+ write_reg(ioaddr, MODSEL, 0x00);
+ printk("atp: Registers are ");
+ for (i = 0; i < 32; i++)
+ printk(" %2.2x", read_nibble(ioaddr, i));
+ printk(".\n");
+ }
+#endif
+ /* Turn off the printer multiplexer on the 8012. */
+ for (i = 0; i < 8; i++)
+ outb(mux_8012[i], ioaddr + PAR_DATA);
+ write_reg_high(ioaddr, CMR1, CMR1h_RESET);
+ /* udelay() here? */
+ status = read_nibble(ioaddr, CMR1);
+
+ if (net_debug > 3) {
+ printk(KERN_DEBUG "atp: Status nibble was %#2.2x..", status);
+ for (i = 0; i < 32; i++)
+ printk(" %2.2x", read_nibble(ioaddr, i));
+ printk("\n");
+ }
+
+ if ((status & 0x78) != 0x08) {
+ /* The pocket adapter probe failed, restore the control register. */
+ outb(saved_ctrl_reg, ioaddr + PAR_CONTROL);
+ return -ENODEV;
+ }
+ status = read_nibble(ioaddr, CMR2_h);
+ if ((status & 0x78) != 0x10) {
+ outb(saved_ctrl_reg, ioaddr + PAR_CONTROL);
+ return -ENODEV;
+ }
+
+ dev = alloc_etherdev(sizeof(struct net_local));
+ if (!dev)
+ return -ENOMEM;
+
+ /* Find the IRQ used by triggering an interrupt. */
+ write_reg_byte(ioaddr, CMR2, 0x01); /* No accept mode, IRQ out. */
+ write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE); /* Enable Tx and Rx. */
+
+ /* Omit autoIRQ routine for now. Use "table lookup" instead. Uhgggh. */
+ if (irq[0])
+ dev->irq = irq[0];
+ else if (ioaddr == 0x378)
+ dev->irq = 7;
+ else
+ dev->irq = 5;
+ write_reg_high(ioaddr, CMR1, CMR1h_TxRxOFF); /* Disable Tx and Rx units. */
+ write_reg(ioaddr, CMR2, CMR2_NULL);
+
+ dev->base_addr = ioaddr;
+
+ /* Read the station address PROM. */
+ get_node_ID(dev);
+
+#ifndef MODULE
+ if (net_debug)
+ printk(KERN_INFO "%s", version);
+#endif
+
+ printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, "
+ "SAPROM %pM.\n",
+ dev->name, dev->base_addr, dev->irq, dev->dev_addr);
+
+ /* Reset the ethernet hardware and activate the printer pass-through. */
+ write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX);
+
+ lp = netdev_priv(dev);
+ lp->chip_type = RTL8002;
+ lp->addr_mode = CMR2h_Normal;
+ spin_lock_init(&lp->lock);
+
+ /* For the ATP adapter the "if_port" is really the data transfer mode. */
+ if (xcvr[0])
+ dev->if_port = xcvr[0];
+ else
+ dev->if_port = (dev->mem_start & 0xf) ? (dev->mem_start & 0x7) : 4;
+ if (dev->mem_end & 0xf)
+ net_debug = dev->mem_end & 7;
+
+ dev->netdev_ops = &atp_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ res = register_netdev(dev);
+ if (res) {
+ free_netdev(dev);
+ return res;
+ }
+
+ lp->next_module = root_atp_dev;
+ root_atp_dev = dev;
+
+ return 0;
+}
+
+/* Read the station address PROM, usually a word-wide EEPROM. */
+static void __init get_node_ID(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ int sa_offset = 0;
+ int i;
+
+ write_reg(ioaddr, CMR2, CMR2_EEPROM); /* Point to the EEPROM control registers. */
+
+ /* Some adapters have the station address at offset 15 instead of offset
+ zero. Check for it, and fix it if needed. */
+ if (eeprom_op(ioaddr, EE_READ(0)) == 0xffff)
+ sa_offset = 15;
+
+ for (i = 0; i < 3; i++)
+ ((__be16 *)dev->dev_addr)[i] =
+ cpu_to_be16(eeprom_op(ioaddr, EE_READ(sa_offset + i)));
+
+ write_reg(ioaddr, CMR2, CMR2_NULL);
+}
+
+/*
+ An EEPROM read command starts by shifting out 0x60+address, and then
+ shifting in the serial data. See the NatSemi databook for details.
+ * ________________
+ * CS : __|
+ * ___ ___
+ * CLK: ______| |___| |
+ * __ _______ _______
+ * DI : __X_______X_______X
+ * DO : _________X_______X
+ */
+
+static unsigned short __init eeprom_op(long ioaddr, u32 cmd)
+{
+ unsigned eedata_out = 0;
+ int num_bits = EE_CMD_SIZE;
+
+ while (--num_bits >= 0) {
+ char outval = (cmd & (1<<num_bits)) ? EE_DATA_WRITE : 0;
+ write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_LOW);
+ write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_HIGH);
+ eedata_out <<= 1;
+ if (read_nibble(ioaddr, PROM_DATA) & EE_DATA_READ)
+ eedata_out++;
+ }
+ write_reg_high(ioaddr, PROM_CMD, EE_CLK_LOW & ~EE_CS);
+ return eedata_out;
+}
+
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine sets everything up anew at each open, even
+ registers that "should" only need to be set once at boot, so that
+ there is non-reboot way to recover if something goes wrong.
+
+ This is an attachable device: if there is no private entry then it wasn't
+ probed for at boot-time, and we need to probe for it again.
+ */
+static int net_open(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ret;
+
+ /* The interrupt line is turned off (tri-stated) when the device isn't in
+ use. That's especially important for "attached" interfaces where the
+ port or interrupt may be shared. */
+ ret = request_irq(dev->irq, atp_interrupt, 0, dev->name, dev);
+ if (ret)
+ return ret;
+
+ hardware_init(dev);
+
+ init_timer(&lp->timer);
+ lp->timer.expires = jiffies + TIMED_CHECKER;
+ lp->timer.data = (unsigned long)dev;
+ lp->timer.function = atp_timed_checker; /* timer handler */
+ add_timer(&lp->timer);
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+/* This routine resets the hardware. We initialize everything, assuming that
+ the hardware may have been temporarily detached. */
+static void hardware_init(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ int i;
+
+ /* Turn off the printer multiplexer on the 8012. */
+ for (i = 0; i < 8; i++)
+ outb(mux_8012[i], ioaddr + PAR_DATA);
+ write_reg_high(ioaddr, CMR1, CMR1h_RESET);
+
+ for (i = 0; i < 6; i++)
+ write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
+
+ write_reg_high(ioaddr, CMR2, lp->addr_mode);
+
+ if (net_debug > 2) {
+ printk(KERN_DEBUG "%s: Reset: current Rx mode %d.\n", dev->name,
+ (read_nibble(ioaddr, CMR2_h) >> 3) & 0x0f);
+ }
+
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT);
+ write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
+
+ /* Enable the interrupt line from the serial port. */
+ outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+
+ /* Unmask the interesting interrupts. */
+ write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
+ write_reg_high(ioaddr, IMR, ISRh_RxErr);
+
+ lp->tx_unit_busy = 0;
+ lp->pac_cnt_in_tx_buf = 0;
+ lp->saved_tx_size = 0;
+}
+
+static void trigger_send(long ioaddr, int length)
+{
+ write_reg_byte(ioaddr, TxCNT0, length & 0xff);
+ write_reg(ioaddr, TxCNT1, length >> 8);
+ write_reg(ioaddr, CMR1, CMR1_Xmit);
+}
+
+static void write_packet(long ioaddr, int length, unsigned char *packet, int pad_len, int data_mode)
+{
+ if (length & 1)
+ {
+ length++;
+ pad_len++;
+ }
+
+ outb(EOC+MAR, ioaddr + PAR_DATA);
+ if ((data_mode & 1) == 0) {
+ /* Write the packet out, starting with the write addr. */
+ outb(WrAddr+MAR, ioaddr + PAR_DATA);
+ do {
+ write_byte_mode0(ioaddr, *packet++);
+ } while (--length > pad_len) ;
+ do {
+ write_byte_mode0(ioaddr, 0);
+ } while (--length > 0) ;
+ } else {
+ /* Write the packet out in slow mode. */
+ unsigned char outbyte = *packet++;
+
+ outb(Ctrl_LNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+ outb(WrAddr+MAR, ioaddr + PAR_DATA);
+
+ outb((outbyte & 0x0f)|0x40, ioaddr + PAR_DATA);
+ outb(outbyte & 0x0f, ioaddr + PAR_DATA);
+ outbyte >>= 4;
+ outb(outbyte & 0x0f, ioaddr + PAR_DATA);
+ outb(Ctrl_HNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+ while (--length > pad_len)
+ write_byte_mode1(ioaddr, *packet++);
+ while (--length > 0)
+ write_byte_mode1(ioaddr, 0);
+ }
+ /* Terminate the Tx frame. End of write: ECB. */
+ outb(0xff, ioaddr + PAR_DATA);
+ outb(Ctrl_HNibWrite | Ctrl_SelData | Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Transmit timed out, %s?\n", dev->name,
+ inb(ioaddr + PAR_CONTROL) & 0x10 ? "network cable problem"
+ : "IRQ conflict");
+ dev->stats.tx_errors++;
+ /* Try to restart the adapter. */
+ hardware_init(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+ dev->stats.tx_errors++;
+}
+
+static netdev_tx_t atp_send_packet(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ int length;
+ unsigned long flags;
+
+ length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+
+ netif_stop_queue(dev);
+
+ /* Disable interrupts by writing 0x00 to the Interrupt Mask Register.
+ This sequence must not be interrupted by an incoming packet. */
+
+ spin_lock_irqsave(&lp->lock, flags);
+ write_reg(ioaddr, IMR, 0);
+ write_reg_high(ioaddr, IMR, 0);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ write_packet(ioaddr, length, skb->data, length-skb->len, dev->if_port);
+
+ lp->pac_cnt_in_tx_buf++;
+ if (lp->tx_unit_busy == 0) {
+ trigger_send(ioaddr, length);
+ lp->saved_tx_size = 0; /* Redundant */
+ lp->re_tx = 0;
+ lp->tx_unit_busy = 1;
+ } else
+ lp->saved_tx_size = length;
+ /* Re-enable the LPT interrupts. */
+ write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
+ write_reg_high(ioaddr, IMR, ISRh_RxErr);
+
+ dev_kfree_skb (skb);
+ return NETDEV_TX_OK;
+}
+
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static irqreturn_t atp_interrupt(int irq, void *dev_instance)
+{
+ struct net_device *dev = dev_instance;
+ struct net_local *lp;
+ long ioaddr;
+ static int num_tx_since_rx;
+ int boguscount = max_interrupt_work;
+ int handled = 0;
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ spin_lock(&lp->lock);
+
+ /* Disable additional spurious interrupts. */
+ outb(Ctrl_SelData, ioaddr + PAR_CONTROL);
+
+ /* The adapter's output is currently the IRQ line, switch it to data. */
+ write_reg(ioaddr, CMR2, CMR2_NULL);
+ write_reg(ioaddr, IMR, 0);
+
+ if (net_debug > 5) printk(KERN_DEBUG "%s: In interrupt ", dev->name);
+ while (--boguscount > 0) {
+ int status = read_nibble(ioaddr, ISR);
+ if (net_debug > 5) printk("loop status %02x..", status);
+
+ if (status & (ISR_RxOK<<3)) {
+ handled = 1;
+ write_reg(ioaddr, ISR, ISR_RxOK); /* Clear the Rx interrupt. */
+ do {
+ int read_status = read_nibble(ioaddr, CMR1);
+ if (net_debug > 6)
+ printk("handling Rx packet %02x..", read_status);
+ /* We acknowledged the normal Rx interrupt, so if the interrupt
+ is still outstanding we must have a Rx error. */
+ if (read_status & (CMR1_IRQ << 3)) { /* Overrun. */
+ dev->stats.rx_over_errors++;
+ /* Set to no-accept mode long enough to remove a packet. */
+ write_reg_high(ioaddr, CMR2, CMR2h_OFF);
+ net_rx(dev);
+ /* Clear the interrupt and return to normal Rx mode. */
+ write_reg_high(ioaddr, ISR, ISRh_RxErr);
+ write_reg_high(ioaddr, CMR2, lp->addr_mode);
+ } else if ((read_status & (CMR1_BufEnb << 3)) == 0) {
+ net_rx(dev);
+ num_tx_since_rx = 0;
+ } else
+ break;
+ } while (--boguscount > 0);
+ } else if (status & ((ISR_TxErr + ISR_TxOK)<<3)) {
+ handled = 1;
+ if (net_debug > 6) printk("handling Tx done..");
+ /* Clear the Tx interrupt. We should check for too many failures
+ and reinitialize the adapter. */
+ write_reg(ioaddr, ISR, ISR_TxErr + ISR_TxOK);
+ if (status & (ISR_TxErr<<3)) {
+ dev->stats.collisions++;
+ if (++lp->re_tx > 15) {
+ dev->stats.tx_aborted_errors++;
+ hardware_init(dev);
+ break;
+ }
+ /* Attempt to retransmit. */
+ if (net_debug > 6) printk("attempting to ReTx");
+ write_reg(ioaddr, CMR1, CMR1_ReXmit + CMR1_Xmit);
+ } else {
+ /* Finish up the transmit. */
+ dev->stats.tx_packets++;
+ lp->pac_cnt_in_tx_buf--;
+ if ( lp->saved_tx_size) {
+ trigger_send(ioaddr, lp->saved_tx_size);
+ lp->saved_tx_size = 0;
+ lp->re_tx = 0;
+ } else
+ lp->tx_unit_busy = 0;
+ netif_wake_queue(dev); /* Inform upper layers. */
+ }
+ num_tx_since_rx++;
+ } else if (num_tx_since_rx > 8 &&
+ time_after(jiffies, dev->last_rx + HZ)) {
+ if (net_debug > 2)
+ printk(KERN_DEBUG "%s: Missed packet? No Rx after %d Tx and "
+ "%ld jiffies status %02x CMR1 %02x.\n", dev->name,
+ num_tx_since_rx, jiffies - dev->last_rx, status,
+ (read_nibble(ioaddr, CMR1) >> 3) & 15);
+ dev->stats.rx_missed_errors++;
+ hardware_init(dev);
+ num_tx_since_rx = 0;
+ break;
+ } else
+ break;
+ }
+
+ /* This following code fixes a rare (and very difficult to track down)
+ problem where the adapter forgets its ethernet address. */
+ {
+ int i;
+ for (i = 0; i < 6; i++)
+ write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
+#if 0 && defined(TIMED_CHECKER)
+ mod_timer(&lp->timer, jiffies + TIMED_CHECKER);
+#endif
+ }
+
+ /* Tell the adapter that it can go back to using the output line as IRQ. */
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT);
+ /* Enable the physical interrupt line, which is sure to be low until.. */
+ outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+ /* .. we enable the interrupt sources. */
+ write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
+ write_reg_high(ioaddr, IMR, ISRh_RxErr); /* Hmmm, really needed? */
+
+ spin_unlock(&lp->lock);
+
+ if (net_debug > 5) printk("exiting interrupt.\n");
+ return IRQ_RETVAL(handled);
+}
+
+#ifdef TIMED_CHECKER
+/* This following code fixes a rare (and very difficult to track down)
+ problem where the adapter forgets its ethernet address. */
+static void atp_timed_checker(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ long ioaddr = dev->base_addr;
+ struct net_local *lp = netdev_priv(dev);
+ int tickssofar = jiffies - lp->last_rx_time;
+ int i;
+
+ spin_lock(&lp->lock);
+ if (tickssofar > 2*HZ) {
+#if 1
+ for (i = 0; i < 6; i++)
+ write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
+ lp->last_rx_time = jiffies;
+#else
+ for (i = 0; i < 6; i++)
+ if (read_cmd_byte(ioaddr, PAR0 + i) != atp_timed_dev->dev_addr[i])
+ {
+ struct net_local *lp = netdev_priv(atp_timed_dev);
+ write_reg_byte(ioaddr, PAR0 + i, atp_timed_dev->dev_addr[i]);
+ if (i == 2)
+ dev->stats.tx_errors++;
+ else if (i == 3)
+ dev->stats.tx_dropped++;
+ else if (i == 4)
+ dev->stats.collisions++;
+ else
+ dev->stats.rx_errors++;
+ }
+#endif
+ }
+ spin_unlock(&lp->lock);
+ lp->timer.expires = jiffies + TIMED_CHECKER;
+ add_timer(&lp->timer);
+}
+#endif
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void net_rx(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ struct rx_header rx_head;
+
+ /* Process the received packet. */
+ outb(EOC+MAR, ioaddr + PAR_DATA);
+ read_block(ioaddr, 8, (unsigned char*)&rx_head, dev->if_port);
+ if (net_debug > 5)
+ printk(KERN_DEBUG " rx_count %04x %04x %04x %04x..", rx_head.pad,
+ rx_head.rx_count, rx_head.rx_status, rx_head.cur_addr);
+ if ((rx_head.rx_status & 0x77) != 0x01) {
+ dev->stats.rx_errors++;
+ if (rx_head.rx_status & 0x0004) dev->stats.rx_frame_errors++;
+ else if (rx_head.rx_status & 0x0002) dev->stats.rx_crc_errors++;
+ if (net_debug > 3)
+ printk(KERN_DEBUG "%s: Unknown ATP Rx error %04x.\n",
+ dev->name, rx_head.rx_status);
+ if (rx_head.rx_status & 0x0020) {
+ dev->stats.rx_fifo_errors++;
+ write_reg_high(ioaddr, CMR1, CMR1h_TxENABLE);
+ write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
+ } else if (rx_head.rx_status & 0x0050)
+ hardware_init(dev);
+ return;
+ } else {
+ /* Malloc up new buffer. The "-4" omits the FCS (CRC). */
+ int pkt_len = (rx_head.rx_count & 0x7ff) - 4;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len + 2);
+ if (skb == NULL) {
+ printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n",
+ dev->name);
+ dev->stats.rx_dropped++;
+ goto done;
+ }
+
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+ done:
+ write_reg(ioaddr, CMR1, CMR1_NextPkt);
+ lp->last_rx_time = jiffies;
+}
+
+static void read_block(long ioaddr, int length, unsigned char *p, int data_mode)
+{
+ if (data_mode <= 3) { /* Mode 0 or 1 */
+ outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
+ outb(length == 8 ? RdAddr | HNib | MAR : RdAddr | MAR,
+ ioaddr + PAR_DATA);
+ if (data_mode <= 1) { /* Mode 0 or 1 */
+ do { *p++ = read_byte_mode0(ioaddr); } while (--length > 0);
+ } else { /* Mode 2 or 3 */
+ do { *p++ = read_byte_mode2(ioaddr); } while (--length > 0);
+ }
+ } else if (data_mode <= 5) {
+ do { *p++ = read_byte_mode4(ioaddr); } while (--length > 0);
+ } else {
+ do { *p++ = read_byte_mode6(ioaddr); } while (--length > 0);
+ }
+
+ outb(EOC+HNib+MAR, ioaddr + PAR_DATA);
+ outb(Ctrl_SelData, ioaddr + PAR_CONTROL);
+}
+
+/* The inverse routine to net_open(). */
+static int
+net_close(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+
+ netif_stop_queue(dev);
+
+ del_timer_sync(&lp->timer);
+
+ /* Flush the Tx and disable Rx here. */
+ lp->addr_mode = CMR2h_OFF;
+ write_reg_high(ioaddr, CMR2, CMR2h_OFF);
+
+ /* Free the IRQ line. */
+ outb(0x00, ioaddr + PAR_CONTROL);
+ free_irq(dev->irq, dev);
+
+ /* Reset the ethernet hardware and activate the printer pass-through. */
+ write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX);
+ return 0;
+}
+
+/*
+ * Set or clear the multicast filter for this adapter.
+ */
+
+static void set_rx_mode_8002(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+
+ if (!netdev_mc_empty(dev) || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC)))
+ lp->addr_mode = CMR2h_PROMISC;
+ else
+ lp->addr_mode = CMR2h_Normal;
+ write_reg_high(ioaddr, CMR2, lp->addr_mode);
+}
+
+static void set_rx_mode_8012(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ unsigned char new_mode, mc_filter[8]; /* Multicast hash filter */
+ int i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ new_mode = CMR2h_PROMISC;
+ } else if ((netdev_mc_count(dev) > 1000) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ new_mode = CMR2h_Normal;
+ } else {
+ struct netdev_hw_addr *ha;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ int filterbit = ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
+ mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
+ }
+ new_mode = CMR2h_Normal;
+ }
+ lp->addr_mode = new_mode;
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT | 0x04); /* Switch to page 1. */
+ for (i = 0; i < 8; i++)
+ write_reg_byte(ioaddr, i, mc_filter[i]);
+ if (net_debug > 2 || 1) {
+ lp->addr_mode = 1;
+ printk(KERN_DEBUG "%s: Mode %d, setting multicast filter to",
+ dev->name, lp->addr_mode);
+ for (i = 0; i < 8; i++)
+ printk(" %2.2x", mc_filter[i]);
+ printk(".\n");
+ }
+
+ write_reg_high(ioaddr, CMR2, lp->addr_mode);
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT); /* Switch back to page 0 */
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ if (lp->chip_type == RTL8002)
+ return set_rx_mode_8002(dev);
+ else
+ return set_rx_mode_8012(dev);
+}
+
+
+static int __init atp_init_module(void) {
+ if (debug) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s", version);
+ return atp_init();
+}
+
+static void __exit atp_cleanup_module(void) {
+ struct net_device *next_dev;
+
+ while (root_atp_dev) {
+ struct net_local *atp_local = netdev_priv(root_atp_dev);
+ next_dev = atp_local->next_module;
+ unregister_netdev(root_atp_dev);
+ /* No need to release_region(), since we never snarf it. */
+ free_netdev(root_atp_dev);
+ root_atp_dev = next_dev;
+ }
+}
+
+module_init(atp_init_module);
+module_exit(atp_cleanup_module);
diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h
new file mode 100644
index 000000000000..0edc642c2c2f
--- /dev/null
+++ b/drivers/net/ethernet/realtek/atp.h
@@ -0,0 +1,259 @@
+/* Linux header file for the ATP pocket ethernet adapter. */
+/* v1.09 8/9/2000 becker@scyld.com. */
+
+#include <linux/if_ether.h>
+#include <linux/types.h>
+
+/* The header prepended to received packets. */
+struct rx_header {
+ ushort pad; /* Pad. */
+ ushort rx_count;
+ ushort rx_status; /* Unknown bit assignments :-<. */
+ ushort cur_addr; /* Apparently the current buffer address(?) */
+};
+
+#define PAR_DATA 0
+#define PAR_STATUS 1
+#define PAR_CONTROL 2
+
+enum chip_type { RTL8002, RTL8012 };
+
+#define Ctrl_LNibRead 0x08 /* LP_PSELECP */
+#define Ctrl_HNibRead 0
+#define Ctrl_LNibWrite 0x08 /* LP_PSELECP */
+#define Ctrl_HNibWrite 0
+#define Ctrl_SelData 0x04 /* LP_PINITP */
+#define Ctrl_IRQEN 0x10 /* LP_PINTEN */
+
+#define EOW 0xE0
+#define EOC 0xE0
+#define WrAddr 0x40 /* Set address of EPLC read, write register. */
+#define RdAddr 0xC0
+#define HNib 0x10
+
+enum page0_regs
+{
+ /* The first six registers hold the ethernet physical station address. */
+ PAR0 = 0, PAR1 = 1, PAR2 = 2, PAR3 = 3, PAR4 = 4, PAR5 = 5,
+ TxCNT0 = 6, TxCNT1 = 7, /* The transmit byte count. */
+ TxSTAT = 8, RxSTAT = 9, /* Tx and Rx status. */
+ ISR = 10, IMR = 11, /* Interrupt status and mask. */
+ CMR1 = 12, /* Command register 1. */
+ CMR2 = 13, /* Command register 2. */
+ MODSEL = 14, /* Mode select register. */
+ MAR = 14, /* Memory address register (?). */
+ CMR2_h = 0x1d, };
+
+enum eepage_regs
+{ PROM_CMD = 6, PROM_DATA = 7 }; /* Note that PROM_CMD is in the "high" bits. */
+
+
+#define ISR_TxOK 0x01
+#define ISR_RxOK 0x04
+#define ISR_TxErr 0x02
+#define ISRh_RxErr 0x11 /* ISR, high nibble */
+
+#define CMR1h_MUX 0x08 /* Select printer multiplexor on 8012. */
+#define CMR1h_RESET 0x04 /* Reset. */
+#define CMR1h_RxENABLE 0x02 /* Rx unit enable. */
+#define CMR1h_TxENABLE 0x01 /* Tx unit enable. */
+#define CMR1h_TxRxOFF 0x00
+#define CMR1_ReXmit 0x08 /* Trigger a retransmit. */
+#define CMR1_Xmit 0x04 /* Trigger a transmit. */
+#define CMR1_IRQ 0x02 /* Interrupt active. */
+#define CMR1_BufEnb 0x01 /* Enable the buffer(?). */
+#define CMR1_NextPkt 0x01 /* Enable the buffer(?). */
+
+#define CMR2_NULL 8
+#define CMR2_IRQOUT 9
+#define CMR2_RAMTEST 10
+#define CMR2_EEPROM 12 /* Set to page 1, for reading the EEPROM. */
+
+#define CMR2h_OFF 0 /* No accept mode. */
+#define CMR2h_Physical 1 /* Accept a physical address match only. */
+#define CMR2h_Normal 2 /* Accept physical and broadcast address. */
+#define CMR2h_PROMISC 3 /* Promiscuous mode. */
+
+/* An inline function used below: it differs from inb() by explicitly return an unsigned
+ char, saving a truncation. */
+static inline unsigned char inbyte(unsigned short port)
+{
+ unsigned char _v;
+ __asm__ __volatile__ ("inb %w1,%b0" :"=a" (_v):"d" (port));
+ return _v;
+}
+
+/* Read register OFFSET.
+ This command should always be terminated with read_end(). */
+static inline unsigned char read_nibble(short port, unsigned char offset)
+{
+ unsigned char retval;
+ outb(EOC+offset, port + PAR_DATA);
+ outb(RdAddr+offset, port + PAR_DATA);
+ inbyte(port + PAR_STATUS); /* Settling time delay */
+ retval = inbyte(port + PAR_STATUS);
+ outb(EOC+offset, port + PAR_DATA);
+
+ return retval;
+}
+
+/* Functions for bulk data read. The interrupt line is always disabled. */
+/* Get a byte using read mode 0, reading data from the control lines. */
+static inline unsigned char read_byte_mode0(short ioaddr)
+{
+ unsigned char low_nib;
+
+ outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
+ inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+}
+
+/* The same as read_byte_mode0(), but does multiple inb()s for stability. */
+static inline unsigned char read_byte_mode2(short ioaddr)
+{
+ unsigned char low_nib;
+
+ outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+}
+
+/* Read a byte through the data register. */
+static inline unsigned char read_byte_mode4(short ioaddr)
+{
+ unsigned char low_nib;
+
+ outb(RdAddr | MAR, ioaddr + PAR_DATA);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+}
+
+/* Read a byte through the data register, double reading to allow settling. */
+static inline unsigned char read_byte_mode6(short ioaddr)
+{
+ unsigned char low_nib;
+
+ outb(RdAddr | MAR, ioaddr + PAR_DATA);
+ inbyte(ioaddr + PAR_STATUS);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
+ inbyte(ioaddr + PAR_STATUS);
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+}
+
+static inline void
+write_reg(short port, unsigned char reg, unsigned char value)
+{
+ unsigned char outval;
+ outb(EOC | reg, port + PAR_DATA);
+ outval = WrAddr | reg;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA); /* Double write for PS/2. */
+
+ outval &= 0xf0;
+ outval |= value;
+ outb(outval, port + PAR_DATA);
+ outval &= 0x1f;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA);
+
+ outb(EOC | outval, port + PAR_DATA);
+}
+
+static inline void
+write_reg_high(short port, unsigned char reg, unsigned char value)
+{
+ unsigned char outval = EOC | HNib | reg;
+
+ outb(outval, port + PAR_DATA);
+ outval &= WrAddr | HNib | 0x0f;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA); /* Double write for PS/2. */
+
+ outval = WrAddr | HNib | value;
+ outb(outval, port + PAR_DATA);
+ outval &= HNib | 0x0f; /* HNib | value */
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA);
+
+ outb(EOC | HNib | outval, port + PAR_DATA);
+}
+
+/* Write a byte out using nibble mode. The low nibble is written first. */
+static inline void
+write_reg_byte(short port, unsigned char reg, unsigned char value)
+{
+ unsigned char outval;
+ outb(EOC | reg, port + PAR_DATA); /* Reset the address register. */
+ outval = WrAddr | reg;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA); /* Double write for PS/2. */
+
+ outb((outval & 0xf0) | (value & 0x0f), port + PAR_DATA);
+ outb(value & 0x0f, port + PAR_DATA);
+ value >>= 4;
+ outb(value, port + PAR_DATA);
+ outb(0x10 | value, port + PAR_DATA);
+ outb(0x10 | value, port + PAR_DATA);
+
+ outb(EOC | value, port + PAR_DATA); /* Reset the address register. */
+}
+
+/*
+ * Bulk data writes to the packet buffer. The interrupt line remains enabled.
+ * The first, faster method uses only the dataport (data modes 0, 2 & 4).
+ * The second (backup) method uses data and control regs (modes 1, 3 & 5).
+ * It should only be needed when there is skew between the individual data
+ * lines.
+ */
+static inline void write_byte_mode0(short ioaddr, unsigned char value)
+{
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ outb((value>>4) | 0x10, ioaddr + PAR_DATA);
+}
+
+static inline void write_byte_mode1(short ioaddr, unsigned char value)
+{
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ outb(Ctrl_IRQEN | Ctrl_LNibWrite, ioaddr + PAR_CONTROL);
+ outb((value>>4) | 0x10, ioaddr + PAR_DATA);
+ outb(Ctrl_IRQEN | Ctrl_HNibWrite, ioaddr + PAR_CONTROL);
+}
+
+/* Write 16bit VALUE to the packet buffer: the same as above just doubled. */
+static inline void write_word_mode0(short ioaddr, unsigned short value)
+{
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ value >>= 4;
+ outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
+ value >>= 4;
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ value >>= 4;
+ outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
+}
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
+#define EE_CS 0x02 /* EEPROM chip select. */
+#define EE_CLK_HIGH 0x12
+#define EE_CLK_LOW 0x16
+#define EE_DATA_WRITE 0x01 /* EEPROM chip data in. */
+#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
+
+/* Delay between EEPROM clock transitions. */
+#define eeprom_delay(ticks) \
+do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD(offset) (((5 << 6) + (offset)) << 17)
+#define EE_READ(offset) (((6 << 6) + (offset)) << 17)
+#define EE_ERASE(offset) (((7 << 6) + (offset)) << 17)
+#define EE_CMD_SIZE 27 /* The command+address+data size. */
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
new file mode 100644
index 000000000000..2ce60709a455
--- /dev/null
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -0,0 +1,6219 @@
+/*
+ * r8169.c: RealTek 8169/8168/8101 ethernet driver.
+ *
+ * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
+ * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
+ * Copyright (c) a lot of people too. Please respect their work.
+ *
+ * See MAINTAINERS file for support contact information.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/firmware.h>
+#include <linux/pci-aspm.h>
+#include <linux/prefetch.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#define RTL8169_VERSION "2.3LK-NAPI"
+#define MODULENAME "r8169"
+#define PFX MODULENAME ": "
+
+#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
+#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
+#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
+#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
+#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
+#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
+#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
+#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
+
+#ifdef RTL8169_DEBUG
+#define assert(expr) \
+ if (!(expr)) { \
+ printk( "Assertion failed! %s,%s,%s,line=%d\n", \
+ #expr,__FILE__,__func__,__LINE__); \
+ }
+#define dprintk(fmt, args...) \
+ do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
+#else
+#define assert(expr) do {} while (0)
+#define dprintk(fmt, args...) do {} while (0)
+#endif /* RTL8169_DEBUG */
+
+#define R8169_MSG_DEFAULT \
+ (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
+
+#define TX_BUFFS_AVAIL(tp) \
+ (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ The RTL chips use a 64 element hash table based on the Ethernet CRC. */
+static const int multicast_filter_limit = 32;
+
+/* MAC address length */
+#define MAC_ADDR_LEN 6
+
+#define MAX_READ_REQUEST_SHIFT 12
+#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
+#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
+#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
+
+#define R8169_REGS_SIZE 256
+#define R8169_NAPI_WEIGHT 64
+#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
+#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
+#define RX_BUF_SIZE 1536 /* Rx Buffer size */
+#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
+#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
+
+#define RTL8169_TX_TIMEOUT (6*HZ)
+#define RTL8169_PHY_TIMEOUT (10*HZ)
+
+#define RTL_EEPROM_SIG cpu_to_le32(0x8129)
+#define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
+#define RTL_EEPROM_SIG_ADDR 0x0000
+
+/* write/read MMIO register */
+#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
+#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
+#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
+#define RTL_R8(reg) readb (ioaddr + (reg))
+#define RTL_R16(reg) readw (ioaddr + (reg))
+#define RTL_R32(reg) readl (ioaddr + (reg))
+
+enum mac_version {
+ RTL_GIGA_MAC_VER_01 = 0,
+ RTL_GIGA_MAC_VER_02,
+ RTL_GIGA_MAC_VER_03,
+ RTL_GIGA_MAC_VER_04,
+ RTL_GIGA_MAC_VER_05,
+ RTL_GIGA_MAC_VER_06,
+ RTL_GIGA_MAC_VER_07,
+ RTL_GIGA_MAC_VER_08,
+ RTL_GIGA_MAC_VER_09,
+ RTL_GIGA_MAC_VER_10,
+ RTL_GIGA_MAC_VER_11,
+ RTL_GIGA_MAC_VER_12,
+ RTL_GIGA_MAC_VER_13,
+ RTL_GIGA_MAC_VER_14,
+ RTL_GIGA_MAC_VER_15,
+ RTL_GIGA_MAC_VER_16,
+ RTL_GIGA_MAC_VER_17,
+ RTL_GIGA_MAC_VER_18,
+ RTL_GIGA_MAC_VER_19,
+ RTL_GIGA_MAC_VER_20,
+ RTL_GIGA_MAC_VER_21,
+ RTL_GIGA_MAC_VER_22,
+ RTL_GIGA_MAC_VER_23,
+ RTL_GIGA_MAC_VER_24,
+ RTL_GIGA_MAC_VER_25,
+ RTL_GIGA_MAC_VER_26,
+ RTL_GIGA_MAC_VER_27,
+ RTL_GIGA_MAC_VER_28,
+ RTL_GIGA_MAC_VER_29,
+ RTL_GIGA_MAC_VER_30,
+ RTL_GIGA_MAC_VER_31,
+ RTL_GIGA_MAC_VER_32,
+ RTL_GIGA_MAC_VER_33,
+ RTL_GIGA_MAC_VER_34,
+ RTL_GIGA_MAC_VER_35,
+ RTL_GIGA_MAC_VER_36,
+ RTL_GIGA_MAC_NONE = 0xff,
+};
+
+enum rtl_tx_desc_version {
+ RTL_TD_0 = 0,
+ RTL_TD_1 = 1,
+};
+
+#define JUMBO_1K ETH_DATA_LEN
+#define JUMBO_4K (4*1024 - ETH_HLEN - 2)
+#define JUMBO_6K (6*1024 - ETH_HLEN - 2)
+#define JUMBO_7K (7*1024 - ETH_HLEN - 2)
+#define JUMBO_9K (9*1024 - ETH_HLEN - 2)
+
+#define _R(NAME,TD,FW,SZ,B) { \
+ .name = NAME, \
+ .txd_version = TD, \
+ .fw_name = FW, \
+ .jumbo_max = SZ, \
+ .jumbo_tx_csum = B \
+}
+
+static const struct {
+ const char *name;
+ enum rtl_tx_desc_version txd_version;
+ const char *fw_name;
+ u16 jumbo_max;
+ bool jumbo_tx_csum;
+} rtl_chip_infos[] = {
+ /* PCI devices. */
+ [RTL_GIGA_MAC_VER_01] =
+ _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
+ [RTL_GIGA_MAC_VER_02] =
+ _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
+ [RTL_GIGA_MAC_VER_03] =
+ _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
+ [RTL_GIGA_MAC_VER_04] =
+ _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
+ [RTL_GIGA_MAC_VER_05] =
+ _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
+ [RTL_GIGA_MAC_VER_06] =
+ _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
+ /* PCI-E devices. */
+ [RTL_GIGA_MAC_VER_07] =
+ _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
+ [RTL_GIGA_MAC_VER_08] =
+ _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
+ [RTL_GIGA_MAC_VER_09] =
+ _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
+ [RTL_GIGA_MAC_VER_10] =
+ _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
+ [RTL_GIGA_MAC_VER_11] =
+ _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
+ [RTL_GIGA_MAC_VER_12] =
+ _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
+ [RTL_GIGA_MAC_VER_13] =
+ _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
+ [RTL_GIGA_MAC_VER_14] =
+ _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
+ [RTL_GIGA_MAC_VER_15] =
+ _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
+ [RTL_GIGA_MAC_VER_16] =
+ _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
+ [RTL_GIGA_MAC_VER_17] =
+ _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
+ [RTL_GIGA_MAC_VER_18] =
+ _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
+ [RTL_GIGA_MAC_VER_19] =
+ _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
+ [RTL_GIGA_MAC_VER_20] =
+ _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
+ [RTL_GIGA_MAC_VER_21] =
+ _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
+ [RTL_GIGA_MAC_VER_22] =
+ _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
+ [RTL_GIGA_MAC_VER_23] =
+ _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
+ [RTL_GIGA_MAC_VER_24] =
+ _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
+ [RTL_GIGA_MAC_VER_25] =
+ _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
+ JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_26] =
+ _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
+ JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_27] =
+ _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_28] =
+ _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_29] =
+ _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
+ JUMBO_1K, true),
+ [RTL_GIGA_MAC_VER_30] =
+ _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
+ JUMBO_1K, true),
+ [RTL_GIGA_MAC_VER_31] =
+ _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_32] =
+ _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
+ JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_33] =
+ _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
+ JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_34] =
+ _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
+ JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_35] =
+ _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
+ JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_36] =
+ _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
+ JUMBO_9K, false),
+};
+#undef _R
+
+enum cfg_version {
+ RTL_CFG_0 = 0x00,
+ RTL_CFG_1,
+ RTL_CFG_2
+};
+
+static void rtl_hw_start_8169(struct net_device *);
+static void rtl_hw_start_8168(struct net_device *);
+static void rtl_hw_start_8101(struct net_device *);
+
+static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
+ { PCI_VENDOR_ID_LINKSYS, 0x1032,
+ PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
+ { 0x0001, 0x8168,
+ PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
+ {0,},
+};
+
+MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
+
+static int rx_buf_sz = 16383;
+static int use_dac;
+static struct {
+ u32 msg_enable;
+} debug = { -1 };
+
+enum rtl_registers {
+ MAC0 = 0, /* Ethernet hardware address. */
+ MAC4 = 4,
+ MAR0 = 8, /* Multicast filter. */
+ CounterAddrLow = 0x10,
+ CounterAddrHigh = 0x14,
+ TxDescStartAddrLow = 0x20,
+ TxDescStartAddrHigh = 0x24,
+ TxHDescStartAddrLow = 0x28,
+ TxHDescStartAddrHigh = 0x2c,
+ FLASH = 0x30,
+ ERSR = 0x36,
+ ChipCmd = 0x37,
+ TxPoll = 0x38,
+ IntrMask = 0x3c,
+ IntrStatus = 0x3e,
+
+ TxConfig = 0x40,
+#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
+#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
+
+ RxConfig = 0x44,
+#define RX128_INT_EN (1 << 15) /* 8111c and later */
+#define RX_MULTI_EN (1 << 14) /* 8111c only */
+#define RXCFG_FIFO_SHIFT 13
+ /* No threshold before first PCI xfer */
+#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
+#define RXCFG_DMA_SHIFT 8
+ /* Unlimited maximum PCI burst. */
+#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
+
+ RxMissed = 0x4c,
+ Cfg9346 = 0x50,
+ Config0 = 0x51,
+ Config1 = 0x52,
+ Config2 = 0x53,
+ Config3 = 0x54,
+ Config4 = 0x55,
+ Config5 = 0x56,
+ MultiIntr = 0x5c,
+ PHYAR = 0x60,
+ PHYstatus = 0x6c,
+ RxMaxSize = 0xda,
+ CPlusCmd = 0xe0,
+ IntrMitigate = 0xe2,
+ RxDescAddrLow = 0xe4,
+ RxDescAddrHigh = 0xe8,
+ EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
+
+#define NoEarlyTx 0x3f /* Max value : no early transmit. */
+
+ MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
+
+#define TxPacketMax (8064 >> 7)
+#define EarlySize 0x27
+
+ FuncEvent = 0xf0,
+ FuncEventMask = 0xf4,
+ FuncPresetState = 0xf8,
+ FuncForceEvent = 0xfc,
+};
+
+enum rtl8110_registers {
+ TBICSR = 0x64,
+ TBI_ANAR = 0x68,
+ TBI_LPAR = 0x6a,
+};
+
+enum rtl8168_8101_registers {
+ CSIDR = 0x64,
+ CSIAR = 0x68,
+#define CSIAR_FLAG 0x80000000
+#define CSIAR_WRITE_CMD 0x80000000
+#define CSIAR_BYTE_ENABLE 0x0f
+#define CSIAR_BYTE_ENABLE_SHIFT 12
+#define CSIAR_ADDR_MASK 0x0fff
+ PMCH = 0x6f,
+ EPHYAR = 0x80,
+#define EPHYAR_FLAG 0x80000000
+#define EPHYAR_WRITE_CMD 0x80000000
+#define EPHYAR_REG_MASK 0x1f
+#define EPHYAR_REG_SHIFT 16
+#define EPHYAR_DATA_MASK 0xffff
+ DLLPR = 0xd0,
+#define PFM_EN (1 << 6)
+ DBG_REG = 0xd1,
+#define FIX_NAK_1 (1 << 4)
+#define FIX_NAK_2 (1 << 3)
+ TWSI = 0xd2,
+ MCU = 0xd3,
+#define NOW_IS_OOB (1 << 7)
+#define EN_NDP (1 << 3)
+#define EN_OOB_RESET (1 << 2)
+ EFUSEAR = 0xdc,
+#define EFUSEAR_FLAG 0x80000000
+#define EFUSEAR_WRITE_CMD 0x80000000
+#define EFUSEAR_READ_CMD 0x00000000
+#define EFUSEAR_REG_MASK 0x03ff
+#define EFUSEAR_REG_SHIFT 8
+#define EFUSEAR_DATA_MASK 0xff
+};
+
+enum rtl8168_registers {
+ LED_FREQ = 0x1a,
+ EEE_LED = 0x1b,
+ ERIDR = 0x70,
+ ERIAR = 0x74,
+#define ERIAR_FLAG 0x80000000
+#define ERIAR_WRITE_CMD 0x80000000
+#define ERIAR_READ_CMD 0x00000000
+#define ERIAR_ADDR_BYTE_ALIGN 4
+#define ERIAR_TYPE_SHIFT 16
+#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
+#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
+#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
+#define ERIAR_MASK_SHIFT 12
+#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
+#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
+#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
+ EPHY_RXER_NUM = 0x7c,
+ OCPDR = 0xb0, /* OCP GPHY access */
+#define OCPDR_WRITE_CMD 0x80000000
+#define OCPDR_READ_CMD 0x00000000
+#define OCPDR_REG_MASK 0x7f
+#define OCPDR_GPHY_REG_SHIFT 16
+#define OCPDR_DATA_MASK 0xffff
+ OCPAR = 0xb4,
+#define OCPAR_FLAG 0x80000000
+#define OCPAR_GPHY_WRITE_CMD 0x8000f060
+#define OCPAR_GPHY_READ_CMD 0x0000f060
+ RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
+ MISC = 0xf0, /* 8168e only. */
+#define TXPLA_RST (1 << 29)
+#define PWM_EN (1 << 22)
+};
+
+enum rtl_register_content {
+ /* InterruptStatusBits */
+ SYSErr = 0x8000,
+ PCSTimeout = 0x4000,
+ SWInt = 0x0100,
+ TxDescUnavail = 0x0080,
+ RxFIFOOver = 0x0040,
+ LinkChg = 0x0020,
+ RxOverflow = 0x0010,
+ TxErr = 0x0008,
+ TxOK = 0x0004,
+ RxErr = 0x0002,
+ RxOK = 0x0001,
+
+ /* RxStatusDesc */
+ RxBOVF = (1 << 24),
+ RxFOVF = (1 << 23),
+ RxRWT = (1 << 22),
+ RxRES = (1 << 21),
+ RxRUNT = (1 << 20),
+ RxCRC = (1 << 19),
+
+ /* ChipCmdBits */
+ StopReq = 0x80,
+ CmdReset = 0x10,
+ CmdRxEnb = 0x08,
+ CmdTxEnb = 0x04,
+ RxBufEmpty = 0x01,
+
+ /* TXPoll register p.5 */
+ HPQ = 0x80, /* Poll cmd on the high prio queue */
+ NPQ = 0x40, /* Poll cmd on the low prio queue */
+ FSWInt = 0x01, /* Forced software interrupt */
+
+ /* Cfg9346Bits */
+ Cfg9346_Lock = 0x00,
+ Cfg9346_Unlock = 0xc0,
+
+ /* rx_mode_bits */
+ AcceptErr = 0x20,
+ AcceptRunt = 0x10,
+ AcceptBroadcast = 0x08,
+ AcceptMulticast = 0x04,
+ AcceptMyPhys = 0x02,
+ AcceptAllPhys = 0x01,
+#define RX_CONFIG_ACCEPT_MASK 0x3f
+
+ /* TxConfigBits */
+ TxInterFrameGapShift = 24,
+ TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
+
+ /* Config1 register p.24 */
+ LEDS1 = (1 << 7),
+ LEDS0 = (1 << 6),
+ MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */
+ Speed_down = (1 << 4),
+ MEMMAP = (1 << 3),
+ IOMAP = (1 << 2),
+ VPD = (1 << 1),
+ PMEnable = (1 << 0), /* Power Management Enable */
+
+ /* Config2 register p. 25 */
+ PCI_Clock_66MHz = 0x01,
+ PCI_Clock_33MHz = 0x00,
+
+ /* Config3 register p.25 */
+ MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
+ LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
+ Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
+ Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
+
+ /* Config4 register */
+ Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
+
+ /* Config5 register p.27 */
+ BWF = (1 << 6), /* Accept Broadcast wakeup frame */
+ MWF = (1 << 5), /* Accept Multicast wakeup frame */
+ UWF = (1 << 4), /* Accept Unicast wakeup frame */
+ Spi_en = (1 << 3),
+ LanWake = (1 << 1), /* LanWake enable/disable */
+ PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
+
+ /* TBICSR p.28 */
+ TBIReset = 0x80000000,
+ TBILoopback = 0x40000000,
+ TBINwEnable = 0x20000000,
+ TBINwRestart = 0x10000000,
+ TBILinkOk = 0x02000000,
+ TBINwComplete = 0x01000000,
+
+ /* CPlusCmd p.31 */
+ EnableBist = (1 << 15), // 8168 8101
+ Mac_dbgo_oe = (1 << 14), // 8168 8101
+ Normal_mode = (1 << 13), // unused
+ Force_half_dup = (1 << 12), // 8168 8101
+ Force_rxflow_en = (1 << 11), // 8168 8101
+ Force_txflow_en = (1 << 10), // 8168 8101
+ Cxpl_dbg_sel = (1 << 9), // 8168 8101
+ ASF = (1 << 8), // 8168 8101
+ PktCntrDisable = (1 << 7), // 8168 8101
+ Mac_dbgo_sel = 0x001c, // 8168
+ RxVlan = (1 << 6),
+ RxChkSum = (1 << 5),
+ PCIDAC = (1 << 4),
+ PCIMulRW = (1 << 3),
+ INTT_0 = 0x0000, // 8168
+ INTT_1 = 0x0001, // 8168
+ INTT_2 = 0x0002, // 8168
+ INTT_3 = 0x0003, // 8168
+
+ /* rtl8169_PHYstatus */
+ TBI_Enable = 0x80,
+ TxFlowCtrl = 0x40,
+ RxFlowCtrl = 0x20,
+ _1000bpsF = 0x10,
+ _100bps = 0x08,
+ _10bps = 0x04,
+ LinkStatus = 0x02,
+ FullDup = 0x01,
+
+ /* _TBICSRBit */
+ TBILinkOK = 0x02000000,
+
+ /* DumpCounterCommand */
+ CounterDump = 0x8,
+};
+
+enum rtl_desc_bit {
+ /* First doubleword. */
+ DescOwn = (1 << 31), /* Descriptor is owned by NIC */
+ RingEnd = (1 << 30), /* End of descriptor ring */
+ FirstFrag = (1 << 29), /* First segment of a packet */
+ LastFrag = (1 << 28), /* Final segment of a packet */
+};
+
+/* Generic case. */
+enum rtl_tx_desc_bit {
+ /* First doubleword. */
+ TD_LSO = (1 << 27), /* Large Send Offload */
+#define TD_MSS_MAX 0x07ffu /* MSS value */
+
+ /* Second doubleword. */
+ TxVlanTag = (1 << 17), /* Add VLAN tag */
+};
+
+/* 8169, 8168b and 810x except 8102e. */
+enum rtl_tx_desc_bit_0 {
+ /* First doubleword. */
+#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
+ TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
+ TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
+ TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
+};
+
+/* 8102e, 8168c and beyond. */
+enum rtl_tx_desc_bit_1 {
+ /* Second doubleword. */
+#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
+ TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
+ TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
+ TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
+};
+
+static const struct rtl_tx_desc_info {
+ struct {
+ u32 udp;
+ u32 tcp;
+ } checksum;
+ u16 mss_shift;
+ u16 opts_offset;
+} tx_desc_info [] = {
+ [RTL_TD_0] = {
+ .checksum = {
+ .udp = TD0_IP_CS | TD0_UDP_CS,
+ .tcp = TD0_IP_CS | TD0_TCP_CS
+ },
+ .mss_shift = TD0_MSS_SHIFT,
+ .opts_offset = 0
+ },
+ [RTL_TD_1] = {
+ .checksum = {
+ .udp = TD1_IP_CS | TD1_UDP_CS,
+ .tcp = TD1_IP_CS | TD1_TCP_CS
+ },
+ .mss_shift = TD1_MSS_SHIFT,
+ .opts_offset = 1
+ }
+};
+
+enum rtl_rx_desc_bit {
+ /* Rx private */
+ PID1 = (1 << 18), /* Protocol ID bit 1/2 */
+ PID0 = (1 << 17), /* Protocol ID bit 2/2 */
+
+#define RxProtoUDP (PID1)
+#define RxProtoTCP (PID0)
+#define RxProtoIP (PID1 | PID0)
+#define RxProtoMask RxProtoIP
+
+ IPFail = (1 << 16), /* IP checksum failed */
+ UDPFail = (1 << 15), /* UDP/IP checksum failed */
+ TCPFail = (1 << 14), /* TCP/IP checksum failed */
+ RxVlanTag = (1 << 16), /* VLAN tag available */
+};
+
+#define RsvdMask 0x3fffc000
+
+struct TxDesc {
+ __le32 opts1;
+ __le32 opts2;
+ __le64 addr;
+};
+
+struct RxDesc {
+ __le32 opts1;
+ __le32 opts2;
+ __le64 addr;
+};
+
+struct ring_info {
+ struct sk_buff *skb;
+ u32 len;
+ u8 __pad[sizeof(void *) - sizeof(u32)];
+};
+
+enum features {
+ RTL_FEATURE_WOL = (1 << 0),
+ RTL_FEATURE_MSI = (1 << 1),
+ RTL_FEATURE_GMII = (1 << 2),
+};
+
+struct rtl8169_counters {
+ __le64 tx_packets;
+ __le64 rx_packets;
+ __le64 tx_errors;
+ __le32 rx_errors;
+ __le16 rx_missed;
+ __le16 align_errors;
+ __le32 tx_one_collision;
+ __le32 tx_multi_collision;
+ __le64 rx_unicast;
+ __le64 rx_broadcast;
+ __le32 rx_multicast;
+ __le16 tx_aborted;
+ __le16 tx_underun;
+};
+
+struct rtl8169_private {
+ void __iomem *mmio_addr; /* memory map physical address */
+ struct pci_dev *pci_dev;
+ struct net_device *dev;
+ struct napi_struct napi;
+ spinlock_t lock;
+ u32 msg_enable;
+ u16 txd_version;
+ u16 mac_version;
+ u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
+ u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
+ u32 dirty_rx;
+ u32 dirty_tx;
+ struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
+ struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
+ dma_addr_t TxPhyAddr;
+ dma_addr_t RxPhyAddr;
+ void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
+ struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
+ struct timer_list timer;
+ u16 cp_cmd;
+ u16 intr_event;
+ u16 napi_event;
+ u16 intr_mask;
+
+ struct mdio_ops {
+ void (*write)(void __iomem *, int, int);
+ int (*read)(void __iomem *, int);
+ } mdio_ops;
+
+ struct pll_power_ops {
+ void (*down)(struct rtl8169_private *);
+ void (*up)(struct rtl8169_private *);
+ } pll_power_ops;
+
+ struct jumbo_ops {
+ void (*enable)(struct rtl8169_private *);
+ void (*disable)(struct rtl8169_private *);
+ } jumbo_ops;
+
+ int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
+ int (*get_settings)(struct net_device *, struct ethtool_cmd *);
+ void (*phy_reset_enable)(struct rtl8169_private *tp);
+ void (*hw_start)(struct net_device *);
+ unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
+ unsigned int (*link_ok)(void __iomem *);
+ int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
+ struct delayed_work task;
+ unsigned features;
+
+ struct mii_if_info mii;
+ struct rtl8169_counters counters;
+ u32 saved_wolopts;
+ u32 opts1_mask;
+
+ struct rtl_fw {
+ const struct firmware *fw;
+
+#define RTL_VER_SIZE 32
+
+ char version[RTL_VER_SIZE];
+
+ struct rtl_fw_phy_action {
+ __le32 *code;
+ size_t size;
+ } phy_action;
+ } *rtl_fw;
+#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
+};
+
+MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
+MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
+module_param(use_dac, int, 0);
+MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
+module_param_named(debug, debug.msg_enable, int, 0);
+MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(RTL8169_VERSION);
+MODULE_FIRMWARE(FIRMWARE_8168D_1);
+MODULE_FIRMWARE(FIRMWARE_8168D_2);
+MODULE_FIRMWARE(FIRMWARE_8168E_1);
+MODULE_FIRMWARE(FIRMWARE_8168E_2);
+MODULE_FIRMWARE(FIRMWARE_8168E_3);
+MODULE_FIRMWARE(FIRMWARE_8105E_1);
+MODULE_FIRMWARE(FIRMWARE_8168F_1);
+MODULE_FIRMWARE(FIRMWARE_8168F_2);
+
+static int rtl8169_open(struct net_device *dev);
+static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance);
+static int rtl8169_init_ring(struct net_device *dev);
+static void rtl_hw_start(struct net_device *dev);
+static int rtl8169_close(struct net_device *dev);
+static void rtl_set_rx_mode(struct net_device *dev);
+static void rtl8169_tx_timeout(struct net_device *dev);
+static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
+static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
+ void __iomem *, u32 budget);
+static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
+static void rtl8169_down(struct net_device *dev);
+static void rtl8169_rx_clear(struct rtl8169_private *tp);
+static int rtl8169_poll(struct napi_struct *napi, int budget);
+
+static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
+{
+ int cap = pci_pcie_cap(pdev);
+
+ if (cap) {
+ u16 ctl;
+
+ pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
+ ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
+ pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
+ }
+}
+
+static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ int i;
+
+ RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
+ for (i = 0; i < 20; i++) {
+ udelay(100);
+ if (RTL_R32(OCPAR) & OCPAR_FLAG)
+ break;
+ }
+ return RTL_R32(OCPDR);
+}
+
+static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ int i;
+
+ RTL_W32(OCPDR, data);
+ RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
+ for (i = 0; i < 20; i++) {
+ udelay(100);
+ if ((RTL_R32(OCPAR) & OCPAR_FLAG) == 0)
+ break;
+ }
+}
+
+static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ int i;
+
+ RTL_W8(ERIDR, cmd);
+ RTL_W32(ERIAR, 0x800010e8);
+ msleep(2);
+ for (i = 0; i < 5; i++) {
+ udelay(100);
+ if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
+ break;
+ }
+
+ ocp_write(tp, 0x1, 0x30, 0x00000001);
+}
+
+#define OOB_CMD_RESET 0x00
+#define OOB_CMD_DRIVER_START 0x05
+#define OOB_CMD_DRIVER_STOP 0x06
+
+static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
+{
+ return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
+}
+
+static void rtl8168_driver_start(struct rtl8169_private *tp)
+{
+ u16 reg;
+ int i;
+
+ rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
+
+ reg = rtl8168_get_ocp_reg(tp);
+
+ for (i = 0; i < 10; i++) {
+ msleep(10);
+ if (ocp_read(tp, 0x0f, reg) & 0x00000800)
+ break;
+ }
+}
+
+static void rtl8168_driver_stop(struct rtl8169_private *tp)
+{
+ u16 reg;
+ int i;
+
+ rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
+
+ reg = rtl8168_get_ocp_reg(tp);
+
+ for (i = 0; i < 10; i++) {
+ msleep(10);
+ if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0)
+ break;
+ }
+}
+
+static int r8168dp_check_dash(struct rtl8169_private *tp)
+{
+ u16 reg = rtl8168_get_ocp_reg(tp);
+
+ return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
+}
+
+static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
+{
+ int i;
+
+ RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0x1f) << 16 | (value & 0xffff));
+
+ for (i = 20; i > 0; i--) {
+ /*
+ * Check if the RTL8169 has completed writing to the specified
+ * MII register.
+ */
+ if (!(RTL_R32(PHYAR) & 0x80000000))
+ break;
+ udelay(25);
+ }
+ /*
+ * According to hardware specs a 20us delay is required after write
+ * complete indication, but before sending next command.
+ */
+ udelay(20);
+}
+
+static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr)
+{
+ int i, value = -1;
+
+ RTL_W32(PHYAR, 0x0 | (reg_addr & 0x1f) << 16);
+
+ for (i = 20; i > 0; i--) {
+ /*
+ * Check if the RTL8169 has completed retrieving data from
+ * the specified MII register.
+ */
+ if (RTL_R32(PHYAR) & 0x80000000) {
+ value = RTL_R32(PHYAR) & 0xffff;
+ break;
+ }
+ udelay(25);
+ }
+ /*
+ * According to hardware specs a 20us delay is required after read
+ * complete indication, but before sending next command.
+ */
+ udelay(20);
+
+ return value;
+}
+
+static void r8168dp_1_mdio_access(void __iomem *ioaddr, int reg_addr, u32 data)
+{
+ int i;
+
+ RTL_W32(OCPDR, data |
+ ((reg_addr & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
+ RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
+ RTL_W32(EPHY_RXER_NUM, 0);
+
+ for (i = 0; i < 100; i++) {
+ mdelay(1);
+ if (!(RTL_R32(OCPAR) & OCPAR_FLAG))
+ break;
+ }
+}
+
+static void r8168dp_1_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
+{
+ r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_WRITE_CMD |
+ (value & OCPDR_DATA_MASK));
+}
+
+static int r8168dp_1_mdio_read(void __iomem *ioaddr, int reg_addr)
+{
+ int i;
+
+ r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_READ_CMD);
+
+ mdelay(1);
+ RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
+ RTL_W32(EPHY_RXER_NUM, 0);
+
+ for (i = 0; i < 100; i++) {
+ mdelay(1);
+ if (RTL_R32(OCPAR) & OCPAR_FLAG)
+ break;
+ }
+
+ return RTL_R32(OCPDR) & OCPDR_DATA_MASK;
+}
+
+#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
+
+static void r8168dp_2_mdio_start(void __iomem *ioaddr)
+{
+ RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
+}
+
+static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
+{
+ RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
+}
+
+static void r8168dp_2_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
+{
+ r8168dp_2_mdio_start(ioaddr);
+
+ r8169_mdio_write(ioaddr, reg_addr, value);
+
+ r8168dp_2_mdio_stop(ioaddr);
+}
+
+static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr)
+{
+ int value;
+
+ r8168dp_2_mdio_start(ioaddr);
+
+ value = r8169_mdio_read(ioaddr, reg_addr);
+
+ r8168dp_2_mdio_stop(ioaddr);
+
+ return value;
+}
+
+static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
+{
+ tp->mdio_ops.write(tp->mmio_addr, location, val);
+}
+
+static int rtl_readphy(struct rtl8169_private *tp, int location)
+{
+ return tp->mdio_ops.read(tp->mmio_addr, location);
+}
+
+static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
+{
+ rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
+}
+
+static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
+{
+ int val;
+
+ val = rtl_readphy(tp, reg_addr);
+ rtl_writephy(tp, reg_addr, (val | p) & ~m);
+}
+
+static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
+ int val)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl_writephy(tp, location, val);
+}
+
+static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ return rtl_readphy(tp, location);
+}
+
+static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
+{
+ unsigned int i;
+
+ RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
+ (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
+
+ for (i = 0; i < 100; i++) {
+ if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG))
+ break;
+ udelay(10);
+ }
+}
+
+static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
+{
+ u16 value = 0xffff;
+ unsigned int i;
+
+ RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
+
+ for (i = 0; i < 100; i++) {
+ if (RTL_R32(EPHYAR) & EPHYAR_FLAG) {
+ value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
+ break;
+ }
+ udelay(10);
+ }
+
+ return value;
+}
+
+static void rtl_csi_write(void __iomem *ioaddr, int addr, int value)
+{
+ unsigned int i;
+
+ RTL_W32(CSIDR, value);
+ RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
+ CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
+
+ for (i = 0; i < 100; i++) {
+ if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
+ break;
+ udelay(10);
+ }
+}
+
+static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
+{
+ u32 value = ~0x00;
+ unsigned int i;
+
+ RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
+ CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
+
+ for (i = 0; i < 100; i++) {
+ if (RTL_R32(CSIAR) & CSIAR_FLAG) {
+ value = RTL_R32(CSIDR);
+ break;
+ }
+ udelay(10);
+ }
+
+ return value;
+}
+
+static
+void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type)
+{
+ unsigned int i;
+
+ BUG_ON((addr & 3) || (mask == 0));
+ RTL_W32(ERIDR, val);
+ RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
+
+ for (i = 0; i < 100; i++) {
+ if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
+ break;
+ udelay(100);
+ }
+}
+
+static u32 rtl_eri_read(void __iomem *ioaddr, int addr, int type)
+{
+ u32 value = ~0x00;
+ unsigned int i;
+
+ RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
+
+ for (i = 0; i < 100; i++) {
+ if (RTL_R32(ERIAR) & ERIAR_FLAG) {
+ value = RTL_R32(ERIDR);
+ break;
+ }
+ udelay(100);
+ }
+
+ return value;
+}
+
+static void
+rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type)
+{
+ u32 val;
+
+ val = rtl_eri_read(ioaddr, addr, type);
+ rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type);
+}
+
+struct exgmac_reg {
+ u16 addr;
+ u16 mask;
+ u32 val;
+};
+
+static void rtl_write_exgmac_batch(void __iomem *ioaddr,
+ const struct exgmac_reg *r, int len)
+{
+ while (len-- > 0) {
+ rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC);
+ r++;
+ }
+}
+
+static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
+{
+ u8 value = 0xff;
+ unsigned int i;
+
+ RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
+
+ for (i = 0; i < 300; i++) {
+ if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) {
+ value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK;
+ break;
+ }
+ udelay(100);
+ }
+
+ return value;
+}
+
+static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
+{
+ RTL_W16(IntrMask, 0x0000);
+
+ RTL_W16(IntrStatus, 0xffff);
+}
+
+static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(TBICSR) & TBIReset;
+}
+
+static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
+{
+ return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
+}
+
+static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
+{
+ return RTL_R32(TBICSR) & TBILinkOk;
+}
+
+static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
+{
+ return RTL_R8(PHYstatus) & LinkStatus;
+}
+
+static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
+}
+
+static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
+{
+ unsigned int val;
+
+ val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
+ rtl_writephy(tp, MII_BMCR, val & 0xffff);
+}
+
+static void rtl_link_chg_patch(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct net_device *dev = tp->dev;
+
+ if (!netif_running(dev))
+ return;
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+ if (RTL_R8(PHYstatus) & _1000bpsF) {
+ rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
+ 0x00000011, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
+ 0x00000005, ERIAR_EXGMAC);
+ } else if (RTL_R8(PHYstatus) & _100bps) {
+ rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
+ 0x0000001f, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
+ 0x00000005, ERIAR_EXGMAC);
+ } else {
+ rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
+ 0x0000001f, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
+ 0x0000003f, ERIAR_EXGMAC);
+ }
+ /* Reset packet filter */
+ rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
+ ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
+ ERIAR_EXGMAC);
+ } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_36) {
+ if (RTL_R8(PHYstatus) & _1000bpsF) {
+ rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
+ 0x00000011, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
+ 0x00000005, ERIAR_EXGMAC);
+ } else {
+ rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
+ 0x0000001f, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
+ 0x0000003f, ERIAR_EXGMAC);
+ }
+ }
+}
+
+static void __rtl8169_check_link_status(struct net_device *dev,
+ struct rtl8169_private *tp,
+ void __iomem *ioaddr, bool pm)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->lock, flags);
+ if (tp->link_ok(ioaddr)) {
+ rtl_link_chg_patch(tp);
+ /* This is to cancel a scheduled suspend if there's one. */
+ if (pm)
+ pm_request_resume(&tp->pci_dev->dev);
+ netif_carrier_on(dev);
+ if (net_ratelimit())
+ netif_info(tp, ifup, dev, "link up\n");
+ } else {
+ netif_carrier_off(dev);
+ netif_info(tp, ifdown, dev, "link down\n");
+ if (pm)
+ pm_schedule_suspend(&tp->pci_dev->dev, 100);
+ }
+ spin_unlock_irqrestore(&tp->lock, flags);
+}
+
+static void rtl8169_check_link_status(struct net_device *dev,
+ struct rtl8169_private *tp,
+ void __iomem *ioaddr)
+{
+ __rtl8169_check_link_status(dev, tp, ioaddr, false);
+}
+
+#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
+
+static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ u8 options;
+ u32 wolopts = 0;
+
+ options = RTL_R8(Config1);
+ if (!(options & PMEnable))
+ return 0;
+
+ options = RTL_R8(Config3);
+ if (options & LinkUp)
+ wolopts |= WAKE_PHY;
+ if (options & MagicPacket)
+ wolopts |= WAKE_MAGIC;
+
+ options = RTL_R8(Config5);
+ if (options & UWF)
+ wolopts |= WAKE_UCAST;
+ if (options & BWF)
+ wolopts |= WAKE_BCAST;
+ if (options & MWF)
+ wolopts |= WAKE_MCAST;
+
+ return wolopts;
+}
+
+static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ spin_lock_irq(&tp->lock);
+
+ wol->supported = WAKE_ANY;
+ wol->wolopts = __rtl8169_get_wol(tp);
+
+ spin_unlock_irq(&tp->lock);
+}
+
+static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned int i;
+ static const struct {
+ u32 opt;
+ u16 reg;
+ u8 mask;
+ } cfg[] = {
+ { WAKE_ANY, Config1, PMEnable },
+ { WAKE_PHY, Config3, LinkUp },
+ { WAKE_MAGIC, Config3, MagicPacket },
+ { WAKE_UCAST, Config5, UWF },
+ { WAKE_BCAST, Config5, BWF },
+ { WAKE_MCAST, Config5, MWF },
+ { WAKE_ANY, Config5, LanWake }
+ };
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+
+ for (i = 0; i < ARRAY_SIZE(cfg); i++) {
+ u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
+ if (wolopts & cfg[i].opt)
+ options |= cfg[i].mask;
+ RTL_W8(cfg[i].reg, options);
+ }
+
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+}
+
+static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ spin_lock_irq(&tp->lock);
+
+ if (wol->wolopts)
+ tp->features |= RTL_FEATURE_WOL;
+ else
+ tp->features &= ~RTL_FEATURE_WOL;
+ __rtl8169_set_wol(tp, wol->wolopts);
+ spin_unlock_irq(&tp->lock);
+
+ device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
+
+ return 0;
+}
+
+static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
+{
+ return rtl_chip_infos[tp->mac_version].fw_name;
+}
+
+static void rtl8169_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl_fw *rtl_fw = tp->rtl_fw;
+
+ strcpy(info->driver, MODULENAME);
+ strcpy(info->version, RTL8169_VERSION);
+ strcpy(info->bus_info, pci_name(tp->pci_dev));
+ BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
+ strcpy(info->fw_version, IS_ERR_OR_NULL(rtl_fw) ? "N/A" :
+ rtl_fw->version);
+}
+
+static int rtl8169_get_regs_len(struct net_device *dev)
+{
+ return R8169_REGS_SIZE;
+}
+
+static int rtl8169_set_speed_tbi(struct net_device *dev,
+ u8 autoneg, u16 speed, u8 duplex, u32 ignored)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ int ret = 0;
+ u32 reg;
+
+ reg = RTL_R32(TBICSR);
+ if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
+ (duplex == DUPLEX_FULL)) {
+ RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
+ } else if (autoneg == AUTONEG_ENABLE)
+ RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
+ else {
+ netif_warn(tp, link, dev,
+ "incorrect speed setting refused in TBI mode\n");
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int rtl8169_set_speed_xmii(struct net_device *dev,
+ u8 autoneg, u16 speed, u8 duplex, u32 adv)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ int giga_ctrl, bmcr;
+ int rc = -EINVAL;
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ if (autoneg == AUTONEG_ENABLE) {
+ int auto_nego;
+
+ auto_nego = rtl_readphy(tp, MII_ADVERTISE);
+ auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL);
+
+ if (adv & ADVERTISED_10baseT_Half)
+ auto_nego |= ADVERTISE_10HALF;
+ if (adv & ADVERTISED_10baseT_Full)
+ auto_nego |= ADVERTISE_10FULL;
+ if (adv & ADVERTISED_100baseT_Half)
+ auto_nego |= ADVERTISE_100HALF;
+ if (adv & ADVERTISED_100baseT_Full)
+ auto_nego |= ADVERTISE_100FULL;
+
+ auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+
+ giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
+ giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+
+ /* The 8100e/8101e/8102e do Fast Ethernet only. */
+ if (tp->mii.supports_gmii) {
+ if (adv & ADVERTISED_1000baseT_Half)
+ giga_ctrl |= ADVERTISE_1000HALF;
+ if (adv & ADVERTISED_1000baseT_Full)
+ giga_ctrl |= ADVERTISE_1000FULL;
+ } else if (adv & (ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full)) {
+ netif_info(tp, link, dev,
+ "PHY does not support 1000Mbps\n");
+ goto out;
+ }
+
+ bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
+
+ rtl_writephy(tp, MII_ADVERTISE, auto_nego);
+ rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
+ } else {
+ giga_ctrl = 0;
+
+ if (speed == SPEED_10)
+ bmcr = 0;
+ else if (speed == SPEED_100)
+ bmcr = BMCR_SPEED100;
+ else
+ goto out;
+
+ if (duplex == DUPLEX_FULL)
+ bmcr |= BMCR_FULLDPLX;
+ }
+
+ rtl_writephy(tp, MII_BMCR, bmcr);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_03) {
+ if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
+ rtl_writephy(tp, 0x17, 0x2138);
+ rtl_writephy(tp, 0x0e, 0x0260);
+ } else {
+ rtl_writephy(tp, 0x17, 0x2108);
+ rtl_writephy(tp, 0x0e, 0x0000);
+ }
+ }
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static int rtl8169_set_speed(struct net_device *dev,
+ u8 autoneg, u16 speed, u8 duplex, u32 advertising)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ int ret;
+
+ ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
+ if (ret < 0)
+ goto out;
+
+ if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
+ (advertising & ADVERTISED_1000baseT_Full)) {
+ mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
+ }
+out:
+ return ret;
+}
+
+static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned long flags;
+ int ret;
+
+ del_timer_sync(&tp->timer);
+
+ spin_lock_irqsave(&tp->lock, flags);
+ ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
+ cmd->duplex, cmd->advertising);
+ spin_unlock_irqrestore(&tp->lock, flags);
+
+ return ret;
+}
+
+static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (dev->mtu > TD_MSS_MAX)
+ features &= ~NETIF_F_ALL_TSO;
+
+ if (dev->mtu > JUMBO_1K &&
+ !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
+ features &= ~NETIF_F_IP_CSUM;
+
+ return features;
+}
+
+static int rtl8169_set_features(struct net_device *dev, u32 features)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->lock, flags);
+
+ if (features & NETIF_F_RXCSUM)
+ tp->cp_cmd |= RxChkSum;
+ else
+ tp->cp_cmd &= ~RxChkSum;
+
+ if (dev->features & NETIF_F_HW_VLAN_RX)
+ tp->cp_cmd |= RxVlan;
+ else
+ tp->cp_cmd &= ~RxVlan;
+
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+ RTL_R16(CPlusCmd);
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+
+ return 0;
+}
+
+static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
+ struct sk_buff *skb)
+{
+ return (vlan_tx_tag_present(skb)) ?
+ TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
+}
+
+static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
+{
+ u32 opts2 = le32_to_cpu(desc->opts2);
+
+ if (opts2 & RxVlanTag)
+ __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
+
+ desc->opts2 = 0;
+}
+
+static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ u32 status;
+
+ cmd->supported =
+ SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
+ cmd->port = PORT_FIBRE;
+ cmd->transceiver = XCVR_INTERNAL;
+
+ status = RTL_R32(TBICSR);
+ cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
+ cmd->autoneg = !!(status & TBINwEnable);
+
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
+ cmd->duplex = DUPLEX_FULL; /* Always set */
+
+ return 0;
+}
+
+static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ return mii_ethtool_gset(&tp->mii, cmd);
+}
+
+static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&tp->lock, flags);
+
+ rc = tp->get_settings(dev, cmd);
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+ return rc;
+}
+
+static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned long flags;
+
+ if (regs->len > R8169_REGS_SIZE)
+ regs->len = R8169_REGS_SIZE;
+
+ spin_lock_irqsave(&tp->lock, flags);
+ memcpy_fromio(p, tp->mmio_addr, regs->len);
+ spin_unlock_irqrestore(&tp->lock, flags);
+}
+
+static u32 rtl8169_get_msglevel(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ return tp->msg_enable;
+}
+
+static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ tp->msg_enable = value;
+}
+
+static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
+ "tx_packets",
+ "rx_packets",
+ "tx_errors",
+ "rx_errors",
+ "rx_missed",
+ "align_errors",
+ "tx_single_collisions",
+ "tx_multi_collisions",
+ "unicast",
+ "broadcast",
+ "multicast",
+ "tx_aborted",
+ "tx_underrun",
+};
+
+static int rtl8169_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(rtl8169_gstrings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void rtl8169_update_counters(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct device *d = &tp->pci_dev->dev;
+ struct rtl8169_counters *counters;
+ dma_addr_t paddr;
+ u32 cmd;
+ int wait = 1000;
+
+ /*
+ * Some chips are unable to dump tally counters when the receiver
+ * is disabled.
+ */
+ if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
+ return;
+
+ counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
+ if (!counters)
+ return;
+
+ RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
+ cmd = (u64)paddr & DMA_BIT_MASK(32);
+ RTL_W32(CounterAddrLow, cmd);
+ RTL_W32(CounterAddrLow, cmd | CounterDump);
+
+ while (wait--) {
+ if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
+ memcpy(&tp->counters, counters, sizeof(*counters));
+ break;
+ }
+ udelay(10);
+ }
+
+ RTL_W32(CounterAddrLow, 0);
+ RTL_W32(CounterAddrHigh, 0);
+
+ dma_free_coherent(d, sizeof(*counters), counters, paddr);
+}
+
+static void rtl8169_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ ASSERT_RTNL();
+
+ rtl8169_update_counters(dev);
+
+ data[0] = le64_to_cpu(tp->counters.tx_packets);
+ data[1] = le64_to_cpu(tp->counters.rx_packets);
+ data[2] = le64_to_cpu(tp->counters.tx_errors);
+ data[3] = le32_to_cpu(tp->counters.rx_errors);
+ data[4] = le16_to_cpu(tp->counters.rx_missed);
+ data[5] = le16_to_cpu(tp->counters.align_errors);
+ data[6] = le32_to_cpu(tp->counters.tx_one_collision);
+ data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
+ data[8] = le64_to_cpu(tp->counters.rx_unicast);
+ data[9] = le64_to_cpu(tp->counters.rx_broadcast);
+ data[10] = le32_to_cpu(tp->counters.rx_multicast);
+ data[11] = le16_to_cpu(tp->counters.tx_aborted);
+ data[12] = le16_to_cpu(tp->counters.tx_underun);
+}
+
+static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ switch(stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
+ break;
+ }
+}
+
+static const struct ethtool_ops rtl8169_ethtool_ops = {
+ .get_drvinfo = rtl8169_get_drvinfo,
+ .get_regs_len = rtl8169_get_regs_len,
+ .get_link = ethtool_op_get_link,
+ .get_settings = rtl8169_get_settings,
+ .set_settings = rtl8169_set_settings,
+ .get_msglevel = rtl8169_get_msglevel,
+ .set_msglevel = rtl8169_set_msglevel,
+ .get_regs = rtl8169_get_regs,
+ .get_wol = rtl8169_get_wol,
+ .set_wol = rtl8169_set_wol,
+ .get_strings = rtl8169_get_strings,
+ .get_sset_count = rtl8169_get_sset_count,
+ .get_ethtool_stats = rtl8169_get_ethtool_stats,
+};
+
+static void rtl8169_get_mac_version(struct rtl8169_private *tp,
+ struct net_device *dev, u8 default_version)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ /*
+ * The driver currently handles the 8168Bf and the 8168Be identically
+ * but they can be identified more specifically through the test below
+ * if needed:
+ *
+ * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
+ *
+ * Same thing for the 8101Eb and the 8101Ec:
+ *
+ * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
+ */
+ static const struct rtl_mac_info {
+ u32 mask;
+ u32 val;
+ int mac_version;
+ } mac_info[] = {
+ /* 8168F family. */
+ { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
+ { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
+
+ /* 8168E family. */
+ { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
+ { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
+ { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
+ { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
+
+ /* 8168D family. */
+ { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
+ { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
+ { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
+
+ /* 8168DP family. */
+ { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
+ { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
+ { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
+
+ /* 8168C family. */
+ { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
+ { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
+ { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
+ { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
+ { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
+ { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
+ { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
+ { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
+ { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
+
+ /* 8168B family. */
+ { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
+ { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
+ { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
+ { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
+
+ /* 8101 family. */
+ { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
+ { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
+ { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
+ { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
+ { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
+ { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
+ { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
+ { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
+ { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
+ { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
+ { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
+ { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
+ { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
+ { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
+ { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
+ { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
+ /* FIXME: where did these entries come from ? -- FR */
+ { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
+ { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
+
+ /* 8110 family. */
+ { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
+ { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
+ { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
+ { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
+ { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
+ { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
+
+ /* Catch-all */
+ { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
+ };
+ const struct rtl_mac_info *p = mac_info;
+ u32 reg;
+
+ reg = RTL_R32(TxConfig);
+ while ((reg & p->mask) != p->val)
+ p++;
+ tp->mac_version = p->mac_version;
+
+ if (tp->mac_version == RTL_GIGA_MAC_NONE) {
+ netif_notice(tp, probe, dev,
+ "unknown MAC, using family default\n");
+ tp->mac_version = default_version;
+ }
+}
+
+static void rtl8169_print_mac_version(struct rtl8169_private *tp)
+{
+ dprintk("mac_version = 0x%02x\n", tp->mac_version);
+}
+
+struct phy_reg {
+ u16 reg;
+ u16 val;
+};
+
+static void rtl_writephy_batch(struct rtl8169_private *tp,
+ const struct phy_reg *regs, int len)
+{
+ while (len-- > 0) {
+ rtl_writephy(tp, regs->reg, regs->val);
+ regs++;
+ }
+}
+
+#define PHY_READ 0x00000000
+#define PHY_DATA_OR 0x10000000
+#define PHY_DATA_AND 0x20000000
+#define PHY_BJMPN 0x30000000
+#define PHY_READ_EFUSE 0x40000000
+#define PHY_READ_MAC_BYTE 0x50000000
+#define PHY_WRITE_MAC_BYTE 0x60000000
+#define PHY_CLEAR_READCOUNT 0x70000000
+#define PHY_WRITE 0x80000000
+#define PHY_READCOUNT_EQ_SKIP 0x90000000
+#define PHY_COMP_EQ_SKIPN 0xa0000000
+#define PHY_COMP_NEQ_SKIPN 0xb0000000
+#define PHY_WRITE_PREVIOUS 0xc0000000
+#define PHY_SKIPN 0xd0000000
+#define PHY_DELAY_MS 0xe0000000
+#define PHY_WRITE_ERI_WORD 0xf0000000
+
+struct fw_info {
+ u32 magic;
+ char version[RTL_VER_SIZE];
+ __le32 fw_start;
+ __le32 fw_len;
+ u8 chksum;
+} __packed;
+
+#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
+
+static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
+{
+ const struct firmware *fw = rtl_fw->fw;
+ struct fw_info *fw_info = (struct fw_info *)fw->data;
+ struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
+ char *version = rtl_fw->version;
+ bool rc = false;
+
+ if (fw->size < FW_OPCODE_SIZE)
+ goto out;
+
+ if (!fw_info->magic) {
+ size_t i, size, start;
+ u8 checksum = 0;
+
+ if (fw->size < sizeof(*fw_info))
+ goto out;
+
+ for (i = 0; i < fw->size; i++)
+ checksum += fw->data[i];
+ if (checksum != 0)
+ goto out;
+
+ start = le32_to_cpu(fw_info->fw_start);
+ if (start > fw->size)
+ goto out;
+
+ size = le32_to_cpu(fw_info->fw_len);
+ if (size > (fw->size - start) / FW_OPCODE_SIZE)
+ goto out;
+
+ memcpy(version, fw_info->version, RTL_VER_SIZE);
+
+ pa->code = (__le32 *)(fw->data + start);
+ pa->size = size;
+ } else {
+ if (fw->size % FW_OPCODE_SIZE)
+ goto out;
+
+ strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
+
+ pa->code = (__le32 *)fw->data;
+ pa->size = fw->size / FW_OPCODE_SIZE;
+ }
+ version[RTL_VER_SIZE - 1] = 0;
+
+ rc = true;
+out:
+ return rc;
+}
+
+static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
+ struct rtl_fw_phy_action *pa)
+{
+ bool rc = false;
+ size_t index;
+
+ for (index = 0; index < pa->size; index++) {
+ u32 action = le32_to_cpu(pa->code[index]);
+ u32 regno = (action & 0x0fff0000) >> 16;
+
+ switch(action & 0xf0000000) {
+ case PHY_READ:
+ case PHY_DATA_OR:
+ case PHY_DATA_AND:
+ case PHY_READ_EFUSE:
+ case PHY_CLEAR_READCOUNT:
+ case PHY_WRITE:
+ case PHY_WRITE_PREVIOUS:
+ case PHY_DELAY_MS:
+ break;
+
+ case PHY_BJMPN:
+ if (regno > index) {
+ netif_err(tp, ifup, tp->dev,
+ "Out of range of firmware\n");
+ goto out;
+ }
+ break;
+ case PHY_READCOUNT_EQ_SKIP:
+ if (index + 2 >= pa->size) {
+ netif_err(tp, ifup, tp->dev,
+ "Out of range of firmware\n");
+ goto out;
+ }
+ break;
+ case PHY_COMP_EQ_SKIPN:
+ case PHY_COMP_NEQ_SKIPN:
+ case PHY_SKIPN:
+ if (index + 1 + regno >= pa->size) {
+ netif_err(tp, ifup, tp->dev,
+ "Out of range of firmware\n");
+ goto out;
+ }
+ break;
+
+ case PHY_READ_MAC_BYTE:
+ case PHY_WRITE_MAC_BYTE:
+ case PHY_WRITE_ERI_WORD:
+ default:
+ netif_err(tp, ifup, tp->dev,
+ "Invalid action 0x%08x\n", action);
+ goto out;
+ }
+ }
+ rc = true;
+out:
+ return rc;
+}
+
+static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
+{
+ struct net_device *dev = tp->dev;
+ int rc = -EINVAL;
+
+ if (!rtl_fw_format_ok(tp, rtl_fw)) {
+ netif_err(tp, ifup, dev, "invalid firwmare\n");
+ goto out;
+ }
+
+ if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
+ rc = 0;
+out:
+ return rc;
+}
+
+static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
+{
+ struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
+ u32 predata, count;
+ size_t index;
+
+ predata = count = 0;
+
+ for (index = 0; index < pa->size; ) {
+ u32 action = le32_to_cpu(pa->code[index]);
+ u32 data = action & 0x0000ffff;
+ u32 regno = (action & 0x0fff0000) >> 16;
+
+ if (!action)
+ break;
+
+ switch(action & 0xf0000000) {
+ case PHY_READ:
+ predata = rtl_readphy(tp, regno);
+ count++;
+ index++;
+ break;
+ case PHY_DATA_OR:
+ predata |= data;
+ index++;
+ break;
+ case PHY_DATA_AND:
+ predata &= data;
+ index++;
+ break;
+ case PHY_BJMPN:
+ index -= regno;
+ break;
+ case PHY_READ_EFUSE:
+ predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
+ index++;
+ break;
+ case PHY_CLEAR_READCOUNT:
+ count = 0;
+ index++;
+ break;
+ case PHY_WRITE:
+ rtl_writephy(tp, regno, data);
+ index++;
+ break;
+ case PHY_READCOUNT_EQ_SKIP:
+ index += (count == data) ? 2 : 1;
+ break;
+ case PHY_COMP_EQ_SKIPN:
+ if (predata == data)
+ index += regno;
+ index++;
+ break;
+ case PHY_COMP_NEQ_SKIPN:
+ if (predata != data)
+ index += regno;
+ index++;
+ break;
+ case PHY_WRITE_PREVIOUS:
+ rtl_writephy(tp, regno, predata);
+ index++;
+ break;
+ case PHY_SKIPN:
+ index += regno + 1;
+ break;
+ case PHY_DELAY_MS:
+ mdelay(data);
+ index++;
+ break;
+
+ case PHY_READ_MAC_BYTE:
+ case PHY_WRITE_MAC_BYTE:
+ case PHY_WRITE_ERI_WORD:
+ default:
+ BUG();
+ }
+ }
+}
+
+static void rtl_release_firmware(struct rtl8169_private *tp)
+{
+ if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
+ release_firmware(tp->rtl_fw->fw);
+ kfree(tp->rtl_fw);
+ }
+ tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
+}
+
+static void rtl_apply_firmware(struct rtl8169_private *tp)
+{
+ struct rtl_fw *rtl_fw = tp->rtl_fw;
+
+ /* TODO: release firmware once rtl_phy_write_fw signals failures. */
+ if (!IS_ERR_OR_NULL(rtl_fw))
+ rtl_phy_write_fw(tp, rtl_fw);
+}
+
+static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
+{
+ if (rtl_readphy(tp, reg) != val)
+ netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
+ else
+ rtl_apply_firmware(tp);
+}
+
+static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x06, 0x006e },
+ { 0x08, 0x0708 },
+ { 0x15, 0x4000 },
+ { 0x18, 0x65c7 },
+
+ { 0x1f, 0x0001 },
+ { 0x03, 0x00a1 },
+ { 0x02, 0x0008 },
+ { 0x01, 0x0120 },
+ { 0x00, 0x1000 },
+ { 0x04, 0x0800 },
+ { 0x04, 0x0000 },
+
+ { 0x03, 0xff41 },
+ { 0x02, 0xdf60 },
+ { 0x01, 0x0140 },
+ { 0x00, 0x0077 },
+ { 0x04, 0x7800 },
+ { 0x04, 0x7000 },
+
+ { 0x03, 0x802f },
+ { 0x02, 0x4f02 },
+ { 0x01, 0x0409 },
+ { 0x00, 0xf0f9 },
+ { 0x04, 0x9800 },
+ { 0x04, 0x9000 },
+
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0xff95 },
+ { 0x00, 0xba00 },
+ { 0x04, 0xa800 },
+ { 0x04, 0xa000 },
+
+ { 0x03, 0xff41 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x0140 },
+ { 0x00, 0x00bb },
+ { 0x04, 0xb800 },
+ { 0x04, 0xb000 },
+
+ { 0x03, 0xdf41 },
+ { 0x02, 0xdc60 },
+ { 0x01, 0x6340 },
+ { 0x00, 0x007d },
+ { 0x04, 0xd800 },
+ { 0x04, 0xd000 },
+
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x100a },
+ { 0x00, 0xa0ff },
+ { 0x04, 0xf800 },
+ { 0x04, 0xf000 },
+
+ { 0x1f, 0x0000 },
+ { 0x0b, 0x0000 },
+ { 0x00, 0x9200 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0002 },
+ { 0x01, 0x90d0 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
+{
+ struct pci_dev *pdev = tp->pci_dev;
+
+ if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
+ (pdev->subsystem_device != 0xe000))
+ return;
+
+ rtl_writephy(tp, 0x1f, 0x0001);
+ rtl_writephy(tp, 0x10, 0xf01b);
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
+
+static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x04, 0x0000 },
+ { 0x03, 0x00a1 },
+ { 0x02, 0x0008 },
+ { 0x01, 0x0120 },
+ { 0x00, 0x1000 },
+ { 0x04, 0x0800 },
+ { 0x04, 0x9000 },
+ { 0x03, 0x802f },
+ { 0x02, 0x4f02 },
+ { 0x01, 0x0409 },
+ { 0x00, 0xf099 },
+ { 0x04, 0x9800 },
+ { 0x04, 0xa000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0xff95 },
+ { 0x00, 0xba00 },
+ { 0x04, 0xa800 },
+ { 0x04, 0xf000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x101a },
+ { 0x00, 0xa0ff },
+ { 0x04, 0xf800 },
+ { 0x04, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x10, 0xf41b },
+ { 0x14, 0xfb54 },
+ { 0x18, 0xf5c7 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ rtl8169scd_hw_phy_config_quirk(tp);
+}
+
+static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x04, 0x0000 },
+ { 0x03, 0x00a1 },
+ { 0x02, 0x0008 },
+ { 0x01, 0x0120 },
+ { 0x00, 0x1000 },
+ { 0x04, 0x0800 },
+ { 0x04, 0x9000 },
+ { 0x03, 0x802f },
+ { 0x02, 0x4f02 },
+ { 0x01, 0x0409 },
+ { 0x00, 0xf099 },
+ { 0x04, 0x9800 },
+ { 0x04, 0xa000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0xff95 },
+ { 0x00, 0xba00 },
+ { 0x04, 0xa800 },
+ { 0x04, 0xf000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x101a },
+ { 0x00, 0xa0ff },
+ { 0x04, 0xf800 },
+ { 0x04, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x0b, 0x8480 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x18, 0x67c7 },
+ { 0x04, 0x2000 },
+ { 0x03, 0x002f },
+ { 0x02, 0x4360 },
+ { 0x01, 0x0109 },
+ { 0x00, 0x3022 },
+ { 0x04, 0x2800 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x10, 0xf41b },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy(tp, 0x1f, 0x0001);
+ rtl_patchphy(tp, 0x16, 1 << 0);
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x10, 0xf41b },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0000 },
+ { 0x1d, 0x0f00 },
+ { 0x1f, 0x0002 },
+ { 0x0c, 0x1ec8 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x1d, 0x3d98 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_patchphy(tp, 0x14, 1 << 5);
+ rtl_patchphy(tp, 0x0d, 1 << 5);
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x12, 0x2300 },
+ { 0x1f, 0x0002 },
+ { 0x00, 0x88d4 },
+ { 0x01, 0x82b1 },
+ { 0x03, 0x7002 },
+ { 0x08, 0x9e30 },
+ { 0x09, 0x01f0 },
+ { 0x0a, 0x5500 },
+ { 0x0c, 0x00c8 },
+ { 0x1f, 0x0003 },
+ { 0x12, 0xc096 },
+ { 0x16, 0x000a },
+ { 0x1f, 0x0000 },
+ { 0x1f, 0x0000 },
+ { 0x09, 0x2000 },
+ { 0x09, 0x0000 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ rtl_patchphy(tp, 0x14, 1 << 5);
+ rtl_patchphy(tp, 0x0d, 1 << 5);
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
+
+static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x12, 0x2300 },
+ { 0x03, 0x802f },
+ { 0x02, 0x4f02 },
+ { 0x01, 0x0409 },
+ { 0x00, 0xf099 },
+ { 0x04, 0x9800 },
+ { 0x04, 0x9000 },
+ { 0x1d, 0x3d98 },
+ { 0x1f, 0x0002 },
+ { 0x0c, 0x7eb8 },
+ { 0x06, 0x0761 },
+ { 0x1f, 0x0003 },
+ { 0x16, 0x0f0a },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ rtl_patchphy(tp, 0x16, 1 << 0);
+ rtl_patchphy(tp, 0x14, 1 << 5);
+ rtl_patchphy(tp, 0x0d, 1 << 5);
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
+
+static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x12, 0x2300 },
+ { 0x1d, 0x3d98 },
+ { 0x1f, 0x0002 },
+ { 0x0c, 0x7eb8 },
+ { 0x06, 0x5461 },
+ { 0x1f, 0x0003 },
+ { 0x16, 0x0f0a },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ rtl_patchphy(tp, 0x16, 1 << 0);
+ rtl_patchphy(tp, 0x14, 1 << 5);
+ rtl_patchphy(tp, 0x0d, 1 << 5);
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
+
+static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
+{
+ rtl8168c_3_hw_phy_config(tp);
+}
+
+static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init_0[] = {
+ /* Channel Estimation */
+ { 0x1f, 0x0001 },
+ { 0x06, 0x4064 },
+ { 0x07, 0x2863 },
+ { 0x08, 0x059c },
+ { 0x09, 0x26b4 },
+ { 0x0a, 0x6a19 },
+ { 0x0b, 0xdcc8 },
+ { 0x10, 0xf06d },
+ { 0x14, 0x7f68 },
+ { 0x18, 0x7fd9 },
+ { 0x1c, 0xf0ff },
+ { 0x1d, 0x3d9c },
+ { 0x1f, 0x0003 },
+ { 0x12, 0xf49f },
+ { 0x13, 0x070b },
+ { 0x1a, 0x05ad },
+ { 0x14, 0x94c0 },
+
+ /*
+ * Tx Error Issue
+ * Enhance line driver power
+ */
+ { 0x1f, 0x0002 },
+ { 0x06, 0x5561 },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8332 },
+ { 0x06, 0x5561 },
+
+ /*
+ * Can not link to 1Gbps with bad cable
+ * Decrease SNR threshold form 21.07dB to 19.04dB
+ */
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+
+ { 0x1f, 0x0000 },
+ { 0x0d, 0xf880 }
+ };
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
+
+ /*
+ * Rx Error Issue
+ * Fine Tune Switching regulator parameter
+ */
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
+ rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
+
+ if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0002 },
+ { 0x05, 0x669a },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8330 },
+ { 0x06, 0x669a },
+ { 0x1f, 0x0002 }
+ };
+ int val;
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ val = rtl_readphy(tp, 0x0d);
+
+ if ((val & 0x00ff) != 0x006c) {
+ static const u32 set[] = {
+ 0x0065, 0x0066, 0x0067, 0x0068,
+ 0x0069, 0x006a, 0x006b, 0x006c
+ };
+ int i;
+
+ rtl_writephy(tp, 0x1f, 0x0002);
+
+ val &= 0xff00;
+ for (i = 0; i < ARRAY_SIZE(set); i++)
+ rtl_writephy(tp, 0x0d, val | set[i]);
+ }
+ } else {
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0002 },
+ { 0x05, 0x6662 },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8330 },
+ { 0x06, 0x6662 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ }
+
+ /* RSET couple improve */
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_patchphy(tp, 0x0d, 0x0300);
+ rtl_patchphy(tp, 0x0f, 0x0010);
+
+ /* Fine tune PLL performance */
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
+ rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
+
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x001b);
+
+ rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
+
+static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init_0[] = {
+ /* Channel Estimation */
+ { 0x1f, 0x0001 },
+ { 0x06, 0x4064 },
+ { 0x07, 0x2863 },
+ { 0x08, 0x059c },
+ { 0x09, 0x26b4 },
+ { 0x0a, 0x6a19 },
+ { 0x0b, 0xdcc8 },
+ { 0x10, 0xf06d },
+ { 0x14, 0x7f68 },
+ { 0x18, 0x7fd9 },
+ { 0x1c, 0xf0ff },
+ { 0x1d, 0x3d9c },
+ { 0x1f, 0x0003 },
+ { 0x12, 0xf49f },
+ { 0x13, 0x070b },
+ { 0x1a, 0x05ad },
+ { 0x14, 0x94c0 },
+
+ /*
+ * Tx Error Issue
+ * Enhance line driver power
+ */
+ { 0x1f, 0x0002 },
+ { 0x06, 0x5561 },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8332 },
+ { 0x06, 0x5561 },
+
+ /*
+ * Can not link to 1Gbps with bad cable
+ * Decrease SNR threshold form 21.07dB to 19.04dB
+ */
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+
+ { 0x1f, 0x0000 },
+ { 0x0d, 0xf880 }
+ };
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
+
+ if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0002 },
+ { 0x05, 0x669a },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8330 },
+ { 0x06, 0x669a },
+
+ { 0x1f, 0x0002 }
+ };
+ int val;
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ val = rtl_readphy(tp, 0x0d);
+ if ((val & 0x00ff) != 0x006c) {
+ static const u32 set[] = {
+ 0x0065, 0x0066, 0x0067, 0x0068,
+ 0x0069, 0x006a, 0x006b, 0x006c
+ };
+ int i;
+
+ rtl_writephy(tp, 0x1f, 0x0002);
+
+ val &= 0xff00;
+ for (i = 0; i < ARRAY_SIZE(set); i++)
+ rtl_writephy(tp, 0x0d, val | set[i]);
+ }
+ } else {
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0002 },
+ { 0x05, 0x2642 },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8330 },
+ { 0x06, 0x2642 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ }
+
+ /* Fine tune PLL performance */
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
+ rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
+
+ /* Switching regulator Slew rate */
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_patchphy(tp, 0x0f, 0x0017);
+
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x001b);
+
+ rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
+
+static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0002 },
+ { 0x10, 0x0008 },
+ { 0x0d, 0x006c },
+
+ { 0x1f, 0x0000 },
+ { 0x0d, 0xf880 },
+
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+
+ { 0x1f, 0x0001 },
+ { 0x0b, 0xa4d8 },
+ { 0x09, 0x281c },
+ { 0x07, 0x2883 },
+ { 0x0a, 0x6b35 },
+ { 0x1d, 0x3da4 },
+ { 0x1c, 0xeffd },
+ { 0x14, 0x7f52 },
+ { 0x18, 0x7fc6 },
+ { 0x08, 0x0601 },
+ { 0x06, 0x4063 },
+ { 0x10, 0xf074 },
+ { 0x1f, 0x0003 },
+ { 0x13, 0x0789 },
+ { 0x12, 0xf4bd },
+ { 0x1a, 0x04fd },
+ { 0x14, 0x84b0 },
+ { 0x1f, 0x0000 },
+ { 0x00, 0x9200 },
+
+ { 0x1f, 0x0005 },
+ { 0x01, 0x0340 },
+ { 0x1f, 0x0001 },
+ { 0x04, 0x4000 },
+ { 0x03, 0x1d21 },
+ { 0x02, 0x0c32 },
+ { 0x01, 0x0200 },
+ { 0x00, 0x5554 },
+ { 0x04, 0x4800 },
+ { 0x04, 0x4000 },
+ { 0x04, 0xf000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x101a },
+ { 0x00, 0xa0ff },
+ { 0x04, 0xf800 },
+ { 0x04, 0xf000 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0007 },
+ { 0x1e, 0x0023 },
+ { 0x16, 0x0000 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+
+ { 0x1f, 0x0007 },
+ { 0x1e, 0x002d },
+ { 0x18, 0x0040 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_patchphy(tp, 0x0d, 1 << 5);
+}
+
+static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ /* Enable Delay cap */
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8b80 },
+ { 0x06, 0xc896 },
+ { 0x1f, 0x0000 },
+
+ /* Channel estimation fine tune */
+ { 0x1f, 0x0001 },
+ { 0x0b, 0x6c20 },
+ { 0x07, 0x2872 },
+ { 0x1c, 0xefff },
+ { 0x1f, 0x0003 },
+ { 0x14, 0x6420 },
+ { 0x1f, 0x0000 },
+
+ /* Update PFM & 10M TX idle timer */
+ { 0x1f, 0x0007 },
+ { 0x1e, 0x002f },
+ { 0x15, 0x1919 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0007 },
+ { 0x1e, 0x00ac },
+ { 0x18, 0x0006 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_apply_firmware(tp);
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ /* DCO enable for 10M IDLE Power */
+ rtl_writephy(tp, 0x1f, 0x0007);
+ rtl_writephy(tp, 0x1e, 0x0023);
+ rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* For impedance matching */
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* PHY auto speed down */
+ rtl_writephy(tp, 0x1f, 0x0007);
+ rtl_writephy(tp, 0x1e, 0x002d);
+ rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
+
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b86);
+ rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b85);
+ rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
+ rtl_writephy(tp, 0x1f, 0x0007);
+ rtl_writephy(tp, 0x1e, 0x0020);
+ rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
+ rtl_writephy(tp, 0x1f, 0x0006);
+ rtl_writephy(tp, 0x00, 0x5a00);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x0d, 0x0007);
+ rtl_writephy(tp, 0x0e, 0x003c);
+ rtl_writephy(tp, 0x0d, 0x4007);
+ rtl_writephy(tp, 0x0e, 0x0000);
+ rtl_writephy(tp, 0x0d, 0x0000);
+}
+
+static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ /* Enable Delay cap */
+ { 0x1f, 0x0004 },
+ { 0x1f, 0x0007 },
+ { 0x1e, 0x00ac },
+ { 0x18, 0x0006 },
+ { 0x1f, 0x0002 },
+ { 0x1f, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ /* Channel estimation fine tune */
+ { 0x1f, 0x0003 },
+ { 0x09, 0xa20f },
+ { 0x1f, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ /* Green Setting */
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8b5b },
+ { 0x06, 0x9222 },
+ { 0x05, 0x8b6d },
+ { 0x06, 0x8000 },
+ { 0x05, 0x8b76 },
+ { 0x06, 0x8000 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_apply_firmware(tp);
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ /* For 4-corner performance improve */
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b80);
+ rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* PHY auto speed down */
+ rtl_writephy(tp, 0x1f, 0x0004);
+ rtl_writephy(tp, 0x1f, 0x0007);
+ rtl_writephy(tp, 0x1e, 0x002d);
+ rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
+
+ /* improve 10M EEE waveform */
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b86);
+ rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* Improve 2-pair detection performance */
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b85);
+ rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* EEE setting */
+ rtl_w1w0_eri(tp->mmio_addr, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003,
+ ERIAR_EXGMAC);
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b85);
+ rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
+ rtl_writephy(tp, 0x1f, 0x0004);
+ rtl_writephy(tp, 0x1f, 0x0007);
+ rtl_writephy(tp, 0x1e, 0x0020);
+ rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x0d, 0x0007);
+ rtl_writephy(tp, 0x0e, 0x003c);
+ rtl_writephy(tp, 0x0d, 0x4007);
+ rtl_writephy(tp, 0x0e, 0x0000);
+ rtl_writephy(tp, 0x0d, 0x0000);
+
+ /* Green feature */
+ rtl_writephy(tp, 0x1f, 0x0003);
+ rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
+ rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
+
+static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ /* Channel estimation fine tune */
+ { 0x1f, 0x0003 },
+ { 0x09, 0xa20f },
+ { 0x1f, 0x0000 },
+
+ /* Modify green table for giga & fnet */
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8b55 },
+ { 0x06, 0x0000 },
+ { 0x05, 0x8b5e },
+ { 0x06, 0x0000 },
+ { 0x05, 0x8b67 },
+ { 0x06, 0x0000 },
+ { 0x05, 0x8b70 },
+ { 0x06, 0x0000 },
+ { 0x1f, 0x0000 },
+ { 0x1f, 0x0007 },
+ { 0x1e, 0x0078 },
+ { 0x17, 0x0000 },
+ { 0x19, 0x00fb },
+ { 0x1f, 0x0000 },
+
+ /* Modify green table for 10M */
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8b79 },
+ { 0x06, 0xaa00 },
+ { 0x1f, 0x0000 },
+
+ /* Disable hiimpedance detection (RTCT) */
+ { 0x1f, 0x0003 },
+ { 0x01, 0x328a },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_apply_firmware(tp);
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ /* For 4-corner performance improve */
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b80);
+ rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* PHY auto speed down */
+ rtl_writephy(tp, 0x1f, 0x0007);
+ rtl_writephy(tp, 0x1e, 0x002d);
+ rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
+
+ /* Improve 10M EEE waveform */
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b86);
+ rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* Improve 2-pair detection performance */
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b85);
+ rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
+
+static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
+{
+ rtl_apply_firmware(tp);
+
+ /* For 4-corner performance improve */
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b80);
+ rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* PHY auto speed down */
+ rtl_writephy(tp, 0x1f, 0x0007);
+ rtl_writephy(tp, 0x1e, 0x002d);
+ rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
+
+ /* Improve 10M EEE waveform */
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b86);
+ rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
+
+static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0003 },
+ { 0x08, 0x441d },
+ { 0x01, 0x9100 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_patchphy(tp, 0x11, 1 << 12);
+ rtl_patchphy(tp, 0x19, 1 << 13);
+ rtl_patchphy(tp, 0x10, 1 << 15);
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0005 },
+ { 0x1a, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0004 },
+ { 0x1c, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x15, 0x7701 },
+ { 0x1f, 0x0000 }
+ };
+
+ /* Disable ALDPS before ram code */
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x18, 0x0310);
+ msleep(100);
+
+ rtl_apply_firmware(tp);
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl_hw_phy_config(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl8169_print_mac_version(tp);
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_01:
+ break;
+ case RTL_GIGA_MAC_VER_02:
+ case RTL_GIGA_MAC_VER_03:
+ rtl8169s_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_04:
+ rtl8169sb_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_05:
+ rtl8169scd_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_06:
+ rtl8169sce_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_07:
+ case RTL_GIGA_MAC_VER_08:
+ case RTL_GIGA_MAC_VER_09:
+ rtl8102e_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_11:
+ rtl8168bb_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_12:
+ rtl8168bef_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_17:
+ rtl8168bef_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_18:
+ rtl8168cp_1_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_19:
+ rtl8168c_1_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_20:
+ rtl8168c_2_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_21:
+ rtl8168c_3_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_22:
+ rtl8168c_4_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
+ rtl8168cp_2_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_25:
+ rtl8168d_1_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_26:
+ rtl8168d_2_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_27:
+ rtl8168d_3_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_28:
+ rtl8168d_4_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_29:
+ case RTL_GIGA_MAC_VER_30:
+ rtl8105e_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_31:
+ /* None. */
+ break;
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
+ rtl8168e_1_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_34:
+ rtl8168e_2_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_35:
+ rtl8168f_1_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_36:
+ rtl8168f_2_hw_phy_config(tp);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void rtl8169_phy_timer(unsigned long __opaque)
+{
+ struct net_device *dev = (struct net_device *)__opaque;
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct timer_list *timer = &tp->timer;
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned long timeout = RTL8169_PHY_TIMEOUT;
+
+ assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
+
+ spin_lock_irq(&tp->lock);
+
+ if (tp->phy_reset_pending(tp)) {
+ /*
+ * A busy loop could burn quite a few cycles on nowadays CPU.
+ * Let's delay the execution of the timer for a few ticks.
+ */
+ timeout = HZ/10;
+ goto out_mod_timer;
+ }
+
+ if (tp->link_ok(ioaddr))
+ goto out_unlock;
+
+ netif_warn(tp, link, dev, "PHY reset until link up\n");
+
+ tp->phy_reset_enable(tp);
+
+out_mod_timer:
+ mod_timer(timer, jiffies + timeout);
+out_unlock:
+ spin_unlock_irq(&tp->lock);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void rtl8169_netpoll(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct pci_dev *pdev = tp->pci_dev;
+
+ disable_irq(pdev->irq);
+ rtl8169_interrupt(pdev->irq, dev);
+ enable_irq(pdev->irq);
+}
+#endif
+
+static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
+ void __iomem *ioaddr)
+{
+ iounmap(ioaddr);
+ pci_release_regions(pdev);
+ pci_clear_mwi(pdev);
+ pci_disable_device(pdev);
+ free_netdev(dev);
+}
+
+static void rtl8169_phy_reset(struct net_device *dev,
+ struct rtl8169_private *tp)
+{
+ unsigned int i;
+
+ tp->phy_reset_enable(tp);
+ for (i = 0; i < 100; i++) {
+ if (!tp->phy_reset_pending(tp))
+ return;
+ msleep(1);
+ }
+ netif_err(tp, link, dev, "PHY reset failed\n");
+}
+
+static bool rtl_tbi_enabled(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
+ (RTL_R8(PHYstatus) & TBI_Enable);
+}
+
+static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ rtl_hw_phy_config(dev);
+
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
+ dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+ RTL_W8(0x82, 0x01);
+ }
+
+ pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
+
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+ pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
+ dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+ RTL_W8(0x82, 0x01);
+ dprintk("Set PHY Reg 0x0bh = 0x00h\n");
+ rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
+ }
+
+ rtl8169_phy_reset(dev, tp);
+
+ rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
+ ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
+ (tp->mii.supports_gmii ?
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full : 0));
+
+ if (rtl_tbi_enabled(tp))
+ netif_info(tp, link, dev, "TBI auto-negotiating\n");
+}
+
+static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ u32 high;
+ u32 low;
+
+ low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
+ high = addr[4] | (addr[5] << 8);
+
+ spin_lock_irq(&tp->lock);
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+
+ RTL_W32(MAC4, high);
+ RTL_R32(MAC4);
+
+ RTL_W32(MAC0, low);
+ RTL_R32(MAC0);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+ const struct exgmac_reg e[] = {
+ { .addr = 0xe0, ERIAR_MASK_1111, .val = low },
+ { .addr = 0xe4, ERIAR_MASK_1111, .val = high },
+ { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 },
+ { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 |
+ low >> 16 },
+ };
+
+ rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e));
+ }
+
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+
+ spin_unlock_irq(&tp->lock);
+}
+
+static int rtl_set_mac_address(struct net_device *dev, void *p)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ rtl_rar_set(tp, dev->dev_addr);
+
+ return 0;
+}
+
+static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
+}
+
+static int rtl_xmii_ioctl(struct rtl8169_private *tp,
+ struct mii_ioctl_data *data, int cmd)
+{
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = 32; /* Internal PHY */
+ return 0;
+
+ case SIOCGMIIREG:
+ data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
+ return 0;
+
+ case SIOCSMIIREG:
+ rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct rtl_cfg_info {
+ void (*hw_start)(struct net_device *);
+ unsigned int region;
+ unsigned int align;
+ u16 intr_event;
+ u16 napi_event;
+ unsigned features;
+ u8 default_ver;
+} rtl_cfg_infos [] = {
+ [RTL_CFG_0] = {
+ .hw_start = rtl_hw_start_8169,
+ .region = 1,
+ .align = 0,
+ .intr_event = SYSErr | LinkChg | RxOverflow |
+ RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
+ .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
+ .features = RTL_FEATURE_GMII,
+ .default_ver = RTL_GIGA_MAC_VER_01,
+ },
+ [RTL_CFG_1] = {
+ .hw_start = rtl_hw_start_8168,
+ .region = 2,
+ .align = 8,
+ .intr_event = SYSErr | LinkChg | RxOverflow |
+ TxErr | TxOK | RxOK | RxErr,
+ .napi_event = TxErr | TxOK | RxOK | RxOverflow,
+ .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
+ .default_ver = RTL_GIGA_MAC_VER_11,
+ },
+ [RTL_CFG_2] = {
+ .hw_start = rtl_hw_start_8101,
+ .region = 2,
+ .align = 8,
+ .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout |
+ RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
+ .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
+ .features = RTL_FEATURE_MSI,
+ .default_ver = RTL_GIGA_MAC_VER_13,
+ }
+};
+
+/* Cfg9346_Unlock assumed. */
+static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
+ const struct rtl_cfg_info *cfg)
+{
+ unsigned msi = 0;
+ u8 cfg2;
+
+ cfg2 = RTL_R8(Config2) & ~MSIEnable;
+ if (cfg->features & RTL_FEATURE_MSI) {
+ if (pci_enable_msi(pdev)) {
+ dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
+ } else {
+ cfg2 |= MSIEnable;
+ msi = RTL_FEATURE_MSI;
+ }
+ }
+ RTL_W8(Config2, cfg2);
+ return msi;
+}
+
+static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
+{
+ if (tp->features & RTL_FEATURE_MSI) {
+ pci_disable_msi(pdev);
+ tp->features &= ~RTL_FEATURE_MSI;
+ }
+}
+
+static const struct net_device_ops rtl8169_netdev_ops = {
+ .ndo_open = rtl8169_open,
+ .ndo_stop = rtl8169_close,
+ .ndo_get_stats = rtl8169_get_stats,
+ .ndo_start_xmit = rtl8169_start_xmit,
+ .ndo_tx_timeout = rtl8169_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = rtl8169_change_mtu,
+ .ndo_fix_features = rtl8169_fix_features,
+ .ndo_set_features = rtl8169_set_features,
+ .ndo_set_mac_address = rtl_set_mac_address,
+ .ndo_do_ioctl = rtl8169_ioctl,
+ .ndo_set_rx_mode = rtl_set_rx_mode,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = rtl8169_netpoll,
+#endif
+
+};
+
+static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
+{
+ struct mdio_ops *ops = &tp->mdio_ops;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_27:
+ ops->write = r8168dp_1_mdio_write;
+ ops->read = r8168dp_1_mdio_read;
+ break;
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ ops->write = r8168dp_2_mdio_write;
+ ops->read = r8168dp_2_mdio_read;
+ break;
+ default:
+ ops->write = r8169_mdio_write;
+ ops->read = r8169_mdio_read;
+ break;
+ }
+}
+
+static void r810x_phy_power_down(struct rtl8169_private *tp)
+{
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
+}
+
+static void r810x_phy_power_up(struct rtl8169_private *tp)
+{
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
+}
+
+static void r810x_pll_power_down(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (__rtl8169_get_wol(tp) & WAKE_ANY) {
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, MII_BMCR, 0x0000);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_29 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_30)
+ RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
+ AcceptMulticast | AcceptMyPhys);
+ return;
+ }
+
+ r810x_phy_power_down(tp);
+}
+
+static void r810x_pll_power_up(struct rtl8169_private *tp)
+{
+ r810x_phy_power_up(tp);
+}
+
+static void r8168_phy_power_up(struct rtl8169_private *tp)
+{
+ rtl_writephy(tp, 0x1f, 0x0000);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_18:
+ case RTL_GIGA_MAC_VER_19:
+ case RTL_GIGA_MAC_VER_20:
+ case RTL_GIGA_MAC_VER_21:
+ case RTL_GIGA_MAC_VER_22:
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ rtl_writephy(tp, 0x0e, 0x0000);
+ break;
+ default:
+ break;
+ }
+ rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
+}
+
+static void r8168_phy_power_down(struct rtl8169_private *tp)
+{
+ rtl_writephy(tp, 0x1f, 0x0000);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
+ rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
+ break;
+
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_18:
+ case RTL_GIGA_MAC_VER_19:
+ case RTL_GIGA_MAC_VER_20:
+ case RTL_GIGA_MAC_VER_21:
+ case RTL_GIGA_MAC_VER_22:
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ rtl_writephy(tp, 0x0e, 0x0200);
+ default:
+ rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
+ break;
+ }
+}
+
+static void r8168_pll_power_down(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) &&
+ r8168dp_check_dash(tp)) {
+ return;
+ }
+
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_24) &&
+ (RTL_R16(CPlusCmd) & ASF)) {
+ return;
+ }
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_33)
+ rtl_ephy_write(ioaddr, 0x19, 0xff64);
+
+ if (__rtl8169_get_wol(tp) & WAKE_ANY) {
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, MII_BMCR, 0x0000);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_33 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_34)
+ RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
+ AcceptMulticast | AcceptMyPhys);
+ return;
+ }
+
+ r8168_phy_power_down(tp);
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
+ RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
+ break;
+ }
+}
+
+static void r8168_pll_power_up(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) &&
+ r8168dp_check_dash(tp)) {
+ return;
+ }
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
+ RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
+ break;
+ }
+
+ r8168_phy_power_up(tp);
+}
+
+static void rtl_generic_op(struct rtl8169_private *tp,
+ void (*op)(struct rtl8169_private *))
+{
+ if (op)
+ op(tp);
+}
+
+static void rtl_pll_power_down(struct rtl8169_private *tp)
+{
+ rtl_generic_op(tp, tp->pll_power_ops.down);
+}
+
+static void rtl_pll_power_up(struct rtl8169_private *tp)
+{
+ rtl_generic_op(tp, tp->pll_power_ops.up);
+}
+
+static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
+{
+ struct pll_power_ops *ops = &tp->pll_power_ops;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_07:
+ case RTL_GIGA_MAC_VER_08:
+ case RTL_GIGA_MAC_VER_09:
+ case RTL_GIGA_MAC_VER_10:
+ case RTL_GIGA_MAC_VER_16:
+ case RTL_GIGA_MAC_VER_29:
+ case RTL_GIGA_MAC_VER_30:
+ ops->down = r810x_pll_power_down;
+ ops->up = r810x_pll_power_up;
+ break;
+
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_18:
+ case RTL_GIGA_MAC_VER_19:
+ case RTL_GIGA_MAC_VER_20:
+ case RTL_GIGA_MAC_VER_21:
+ case RTL_GIGA_MAC_VER_22:
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_35:
+ case RTL_GIGA_MAC_VER_36:
+ ops->down = r8168_pll_power_down;
+ ops->up = r8168_pll_power_up;
+ break;
+
+ default:
+ ops->down = NULL;
+ ops->up = NULL;
+ break;
+ }
+}
+
+static void rtl_init_rxcfg(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_01:
+ case RTL_GIGA_MAC_VER_02:
+ case RTL_GIGA_MAC_VER_03:
+ case RTL_GIGA_MAC_VER_04:
+ case RTL_GIGA_MAC_VER_05:
+ case RTL_GIGA_MAC_VER_06:
+ case RTL_GIGA_MAC_VER_10:
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_13:
+ case RTL_GIGA_MAC_VER_14:
+ case RTL_GIGA_MAC_VER_15:
+ case RTL_GIGA_MAC_VER_16:
+ case RTL_GIGA_MAC_VER_17:
+ RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
+ break;
+ case RTL_GIGA_MAC_VER_18:
+ case RTL_GIGA_MAC_VER_19:
+ case RTL_GIGA_MAC_VER_20:
+ case RTL_GIGA_MAC_VER_21:
+ case RTL_GIGA_MAC_VER_22:
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
+ RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
+ break;
+ default:
+ RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
+ break;
+ }
+}
+
+static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
+{
+ tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
+}
+
+static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
+{
+ rtl_generic_op(tp, tp->jumbo_ops.enable);
+}
+
+static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
+{
+ rtl_generic_op(tp, tp->jumbo_ops.disable);
+}
+
+static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
+ RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
+ rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
+}
+
+static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
+ RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
+ rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
+}
+
+static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
+}
+
+static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
+}
+
+static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
+
+ RTL_W8(MaxTxPacketSize, 0x3f);
+ RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
+ RTL_W8(Config4, RTL_R8(Config4) | 0x01);
+ pci_write_config_byte(pdev, 0x79, 0x20);
+}
+
+static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
+
+ RTL_W8(MaxTxPacketSize, 0x0c);
+ RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
+ RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
+ pci_write_config_byte(pdev, 0x79, 0x50);
+}
+
+static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
+{
+ rtl_tx_performance_tweak(tp->pci_dev,
+ (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
+}
+
+static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
+{
+ rtl_tx_performance_tweak(tp->pci_dev,
+ (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
+}
+
+static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ r8168b_0_hw_jumbo_enable(tp);
+
+ RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
+}
+
+static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ r8168b_0_hw_jumbo_disable(tp);
+
+ RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
+}
+
+static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
+{
+ struct jumbo_ops *ops = &tp->jumbo_ops;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_11:
+ ops->disable = r8168b_0_hw_jumbo_disable;
+ ops->enable = r8168b_0_hw_jumbo_enable;
+ break;
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ ops->disable = r8168b_1_hw_jumbo_disable;
+ ops->enable = r8168b_1_hw_jumbo_enable;
+ break;
+ case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
+ case RTL_GIGA_MAC_VER_19:
+ case RTL_GIGA_MAC_VER_20:
+ case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
+ case RTL_GIGA_MAC_VER_22:
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
+ ops->disable = r8168c_hw_jumbo_disable;
+ ops->enable = r8168c_hw_jumbo_enable;
+ break;
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ ops->disable = r8168dp_hw_jumbo_disable;
+ ops->enable = r8168dp_hw_jumbo_enable;
+ break;
+ case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_34:
+ ops->disable = r8168e_hw_jumbo_disable;
+ ops->enable = r8168e_hw_jumbo_enable;
+ break;
+
+ /*
+ * No action needed for jumbo frames with 8169.
+ * No jumbo for 810x at all.
+ */
+ default:
+ ops->disable = NULL;
+ ops->enable = NULL;
+ break;
+ }
+}
+
+static void rtl_hw_reset(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ int i;
+
+ /* Soft reset the chip. */
+ RTL_W8(ChipCmd, CmdReset);
+
+ /* Check that the chip has finished the reset. */
+ for (i = 0; i < 100; i++) {
+ if ((RTL_R8(ChipCmd) & CmdReset) == 0)
+ break;
+ udelay(100);
+ }
+
+ rtl8169_init_ring_indexes(tp);
+}
+
+static int __devinit
+rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
+ const unsigned int region = cfg->region;
+ struct rtl8169_private *tp;
+ struct mii_if_info *mii;
+ struct net_device *dev;
+ void __iomem *ioaddr;
+ int chipset, i;
+ int rc;
+
+ if (netif_msg_drv(&debug)) {
+ printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
+ MODULENAME, RTL8169_VERSION);
+ }
+
+ dev = alloc_etherdev(sizeof (*tp));
+ if (!dev) {
+ if (netif_msg_drv(&debug))
+ dev_err(&pdev->dev, "unable to alloc new ethernet\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->netdev_ops = &rtl8169_netdev_ops;
+ tp = netdev_priv(dev);
+ tp->dev = dev;
+ tp->pci_dev = pdev;
+ tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
+
+ mii = &tp->mii;
+ mii->dev = dev;
+ mii->mdio_read = rtl_mdio_read;
+ mii->mdio_write = rtl_mdio_write;
+ mii->phy_id_mask = 0x1f;
+ mii->reg_num_mask = 0x1f;
+ mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
+
+ /* disable ASPM completely as that cause random device stop working
+ * problems as well as full system hangs for some PCIe devices users */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
+ /* enable device (incl. PCI PM wakeup and hotplug setup) */
+ rc = pci_enable_device(pdev);
+ if (rc < 0) {
+ netif_err(tp, probe, dev, "enable failure\n");
+ goto err_out_free_dev_1;
+ }
+
+ if (pci_set_mwi(pdev) < 0)
+ netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
+
+ /* make sure PCI base addr 1 is MMIO */
+ if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
+ netif_err(tp, probe, dev,
+ "region #%d not an MMIO resource, aborting\n",
+ region);
+ rc = -ENODEV;
+ goto err_out_mwi_2;
+ }
+
+ /* check for weird/broken PCI region reporting */
+ if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
+ netif_err(tp, probe, dev,
+ "Invalid PCI region size(s), aborting\n");
+ rc = -ENODEV;
+ goto err_out_mwi_2;
+ }
+
+ rc = pci_request_regions(pdev, MODULENAME);
+ if (rc < 0) {
+ netif_err(tp, probe, dev, "could not request regions\n");
+ goto err_out_mwi_2;
+ }
+
+ tp->cp_cmd = RxChkSum;
+
+ if ((sizeof(dma_addr_t) > 4) &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
+ tp->cp_cmd |= PCIDAC;
+ dev->features |= NETIF_F_HIGHDMA;
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc < 0) {
+ netif_err(tp, probe, dev, "DMA configuration failed\n");
+ goto err_out_free_res_3;
+ }
+ }
+
+ /* ioremap MMIO region */
+ ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
+ if (!ioaddr) {
+ netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
+ rc = -EIO;
+ goto err_out_free_res_3;
+ }
+ tp->mmio_addr = ioaddr;
+
+ if (!pci_is_pcie(pdev))
+ netif_info(tp, probe, dev, "not PCI Express\n");
+
+ /* Identify chip attached to board */
+ rtl8169_get_mac_version(tp, dev, cfg->default_ver);
+
+ rtl_init_rxcfg(tp);
+
+ RTL_W16(IntrMask, 0x0000);
+
+ rtl_hw_reset(tp);
+
+ RTL_W16(IntrStatus, 0xffff);
+
+ pci_set_master(pdev);
+
+ /*
+ * Pretend we are using VLANs; This bypasses a nasty bug where
+ * Interrupts stop flowing on high load on 8110SCd controllers.
+ */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+ tp->cp_cmd |= RxVlan;
+
+ rtl_init_mdio_ops(tp);
+ rtl_init_pll_power_ops(tp);
+ rtl_init_jumbo_ops(tp);
+
+ rtl8169_print_mac_version(tp);
+
+ chipset = tp->mac_version;
+ tp->txd_version = rtl_chip_infos[chipset].txd_version;
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+ RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
+ RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
+ if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
+ tp->features |= RTL_FEATURE_WOL;
+ if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
+ tp->features |= RTL_FEATURE_WOL;
+ tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+
+ if (rtl_tbi_enabled(tp)) {
+ tp->set_speed = rtl8169_set_speed_tbi;
+ tp->get_settings = rtl8169_gset_tbi;
+ tp->phy_reset_enable = rtl8169_tbi_reset_enable;
+ tp->phy_reset_pending = rtl8169_tbi_reset_pending;
+ tp->link_ok = rtl8169_tbi_link_ok;
+ tp->do_ioctl = rtl_tbi_ioctl;
+ } else {
+ tp->set_speed = rtl8169_set_speed_xmii;
+ tp->get_settings = rtl8169_gset_xmii;
+ tp->phy_reset_enable = rtl8169_xmii_reset_enable;
+ tp->phy_reset_pending = rtl8169_xmii_reset_pending;
+ tp->link_ok = rtl8169_xmii_link_ok;
+ tp->do_ioctl = rtl_xmii_ioctl;
+ }
+
+ spin_lock_init(&tp->lock);
+
+ /* Get MAC address */
+ for (i = 0; i < MAC_ADDR_LEN; i++)
+ dev->dev_addr[i] = RTL_R8(MAC0 + i);
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
+ dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
+ dev->irq = pdev->irq;
+ dev->base_addr = (unsigned long) ioaddr;
+
+ netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
+
+ /* don't enable SG, IP_CSUM and TSO by default - it might not work
+ * properly for all devices */
+ dev->features |= NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_HIGHDMA;
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+ /* 8110SCd requires hardware Rx VLAN - disallow toggling */
+ dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
+
+ tp->intr_mask = 0xffff;
+ tp->hw_start = cfg->hw_start;
+ tp->intr_event = cfg->intr_event;
+ tp->napi_event = cfg->napi_event;
+
+ tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
+ ~(RxBOVF | RxFOVF) : ~0;
+
+ init_timer(&tp->timer);
+ tp->timer.data = (unsigned long) dev;
+ tp->timer.function = rtl8169_phy_timer;
+
+ tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
+
+ rc = register_netdev(dev);
+ if (rc < 0)
+ goto err_out_msi_4;
+
+ pci_set_drvdata(pdev, dev);
+
+ netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n",
+ rtl_chip_infos[chipset].name, dev->base_addr, dev->dev_addr,
+ (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
+ if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
+ netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
+ "tx checksumming: %s]\n",
+ rtl_chip_infos[chipset].jumbo_max,
+ rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
+ }
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) {
+ rtl8168_driver_start(tp);
+ }
+
+ device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
+
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_put_noidle(&pdev->dev);
+
+ netif_carrier_off(dev);
+
+out:
+ return rc;
+
+err_out_msi_4:
+ rtl_disable_msi(pdev, tp);
+ iounmap(ioaddr);
+err_out_free_res_3:
+ pci_release_regions(pdev);
+err_out_mwi_2:
+ pci_clear_mwi(pdev);
+ pci_disable_device(pdev);
+err_out_free_dev_1:
+ free_netdev(dev);
+ goto out;
+}
+
+static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) {
+ rtl8168_driver_stop(tp);
+ }
+
+ cancel_delayed_work_sync(&tp->task);
+
+ unregister_netdev(dev);
+
+ rtl_release_firmware(tp);
+
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_get_noresume(&pdev->dev);
+
+ /* restore original MAC address */
+ rtl_rar_set(tp, dev->perm_addr);
+
+ rtl_disable_msi(pdev, tp);
+ rtl8169_release_board(pdev, dev, tp->mmio_addr);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
+{
+ struct rtl_fw *rtl_fw;
+ const char *name;
+ int rc = -ENOMEM;
+
+ name = rtl_lookup_firmware_name(tp);
+ if (!name)
+ goto out_no_firmware;
+
+ rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
+ if (!rtl_fw)
+ goto err_warn;
+
+ rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
+ if (rc < 0)
+ goto err_free;
+
+ rc = rtl_check_firmware(tp, rtl_fw);
+ if (rc < 0)
+ goto err_release_firmware;
+
+ tp->rtl_fw = rtl_fw;
+out:
+ return;
+
+err_release_firmware:
+ release_firmware(rtl_fw->fw);
+err_free:
+ kfree(rtl_fw);
+err_warn:
+ netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
+ name, rc);
+out_no_firmware:
+ tp->rtl_fw = NULL;
+ goto out;
+}
+
+static void rtl_request_firmware(struct rtl8169_private *tp)
+{
+ if (IS_ERR(tp->rtl_fw))
+ rtl_request_uncached_firmware(tp);
+}
+
+static int rtl8169_open(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
+ int retval = -ENOMEM;
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ /*
+ * Rx and Tx desscriptors needs 256 bytes alignment.
+ * dma_alloc_coherent provides more.
+ */
+ tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
+ &tp->TxPhyAddr, GFP_KERNEL);
+ if (!tp->TxDescArray)
+ goto err_pm_runtime_put;
+
+ tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
+ &tp->RxPhyAddr, GFP_KERNEL);
+ if (!tp->RxDescArray)
+ goto err_free_tx_0;
+
+ retval = rtl8169_init_ring(dev);
+ if (retval < 0)
+ goto err_free_rx_1;
+
+ INIT_DELAYED_WORK(&tp->task, NULL);
+
+ smp_mb();
+
+ rtl_request_firmware(tp);
+
+ retval = request_irq(dev->irq, rtl8169_interrupt,
+ (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
+ dev->name, dev);
+ if (retval < 0)
+ goto err_release_fw_2;
+
+ napi_enable(&tp->napi);
+
+ rtl8169_init_phy(dev, tp);
+
+ rtl8169_set_features(dev, dev->features);
+
+ rtl_pll_power_up(tp);
+
+ rtl_hw_start(dev);
+
+ tp->saved_wolopts = 0;
+ pm_runtime_put_noidle(&pdev->dev);
+
+ rtl8169_check_link_status(dev, tp, ioaddr);
+out:
+ return retval;
+
+err_release_fw_2:
+ rtl_release_firmware(tp);
+ rtl8169_rx_clear(tp);
+err_free_rx_1:
+ dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
+ tp->RxPhyAddr);
+ tp->RxDescArray = NULL;
+err_free_tx_0:
+ dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
+ tp->TxPhyAddr);
+ tp->TxDescArray = NULL;
+err_pm_runtime_put:
+ pm_runtime_put_noidle(&pdev->dev);
+ goto out;
+}
+
+static void rtl_rx_close(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
+}
+
+static void rtl8169_hw_reset(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ /* Disable interrupts */
+ rtl8169_irq_mask_and_ack(ioaddr);
+
+ rtl_rx_close(tp);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) {
+ while (RTL_R8(TxPoll) & NPQ)
+ udelay(20);
+ } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_35 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_36) {
+ RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
+ while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
+ udelay(100);
+ } else {
+ RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
+ udelay(100);
+ }
+
+ rtl_hw_reset(tp);
+}
+
+static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ /* Set DMA burst size and Interframe Gap Time */
+ RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
+ (InterFrameGap << TxInterFrameGapShift));
+}
+
+static void rtl_hw_start(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ tp->hw_start(dev);
+
+ netif_start_queue(dev);
+}
+
+static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
+ void __iomem *ioaddr)
+{
+ /*
+ * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
+ * register to be written before TxDescAddrLow to work.
+ * Switching from MMIO to I/O access fixes the issue as well.
+ */
+ RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
+ RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
+ RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
+ RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
+}
+
+static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
+{
+ u16 cmd;
+
+ cmd = RTL_R16(CPlusCmd);
+ RTL_W16(CPlusCmd, cmd);
+ return cmd;
+}
+
+static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
+{
+ /* Low hurts. Let's disable the filtering. */
+ RTL_W16(RxMaxSize, rx_buf_sz + 1);
+}
+
+static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
+{
+ static const struct rtl_cfg2_info {
+ u32 mac_version;
+ u32 clk;
+ u32 val;
+ } cfg2_info [] = {
+ { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
+ { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
+ { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
+ { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
+ };
+ const struct rtl_cfg2_info *p = cfg2_info;
+ unsigned int i;
+ u32 clk;
+
+ clk = RTL_R8(Config2) & PCI_Clock_66MHz;
+ for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
+ if ((p->mac_version == mac_version) && (p->clk == clk)) {
+ RTL_W32(0x7c, p->val);
+ break;
+ }
+ }
+}
+
+static void rtl_hw_start_8169(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
+ }
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_02 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_03 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_04)
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+
+ rtl_init_rxcfg(tp);
+
+ RTL_W8(EarlyTxThres, NoEarlyTx);
+
+ rtl_set_rx_max_size(ioaddr, rx_buf_sz);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_02 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_03 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_04)
+ rtl_set_rx_tx_config_registers(tp);
+
+ tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_03) {
+ dprintk("Set MAC Reg C+CR Offset 0xE0. "
+ "Bit-3 and bit-14 MUST be 1\n");
+ tp->cp_cmd |= (1 << 14);
+ }
+
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+
+ rtl8169_set_magic_reg(ioaddr, tp->mac_version);
+
+ /*
+ * Undocumented corner. Supposedly:
+ * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
+ */
+ RTL_W16(IntrMitigate, 0x0000);
+
+ rtl_set_rx_tx_desc_registers(tp, ioaddr);
+
+ if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_02 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_03 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_04) {
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+ rtl_set_rx_tx_config_registers(tp);
+ }
+
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+
+ /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
+ RTL_R8(IntrMask);
+
+ RTL_W32(RxMissed, 0);
+
+ rtl_set_rx_mode(dev);
+
+ /* no early-rx interrupts */
+ RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+ RTL_W16(IntrMask, tp->intr_event);
+}
+
+static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits)
+{
+ u32 csi;
+
+ csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff;
+ rtl_csi_write(ioaddr, 0x070c, csi | bits);
+}
+
+static void rtl_csi_access_enable_1(void __iomem *ioaddr)
+{
+ rtl_csi_access_enable(ioaddr, 0x17000000);
+}
+
+static void rtl_csi_access_enable_2(void __iomem *ioaddr)
+{
+ rtl_csi_access_enable(ioaddr, 0x27000000);
+}
+
+struct ephy_info {
+ unsigned int offset;
+ u16 mask;
+ u16 bits;
+};
+
+static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len)
+{
+ u16 w;
+
+ while (len-- > 0) {
+ w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
+ rtl_ephy_write(ioaddr, e->offset, w);
+ e++;
+ }
+}
+
+static void rtl_disable_clock_request(struct pci_dev *pdev)
+{
+ int cap = pci_pcie_cap(pdev);
+
+ if (cap) {
+ u16 ctl;
+
+ pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
+ ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
+ pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
+ }
+}
+
+static void rtl_enable_clock_request(struct pci_dev *pdev)
+{
+ int cap = pci_pcie_cap(pdev);
+
+ if (cap) {
+ u16 ctl;
+
+ pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
+ ctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
+ pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
+ }
+}
+
+#define R8168_CPCMD_QUIRK_MASK (\
+ EnableBist | \
+ Mac_dbgo_oe | \
+ Force_half_dup | \
+ Force_rxflow_en | \
+ Force_txflow_en | \
+ Cxpl_dbg_sel | \
+ ASF | \
+ PktCntrDisable | \
+ Mac_dbgo_sel)
+
+static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+
+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+
+ rtl_tx_performance_tweak(pdev,
+ (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
+}
+
+static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_hw_start_8168bb(ioaddr, pdev);
+
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
+
+ RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
+}
+
+static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
+
+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ rtl_disable_clock_request(pdev);
+
+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+}
+
+static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8168cp[] = {
+ { 0x01, 0, 0x0001 },
+ { 0x02, 0x0800, 0x1000 },
+ { 0x03, 0, 0x0042 },
+ { 0x06, 0x0080, 0x0000 },
+ { 0x07, 0, 0x2000 }
+ };
+
+ rtl_csi_access_enable_2(ioaddr);
+
+ rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
+
+ __rtl_hw_start_8168cp(ioaddr, pdev);
+}
+
+static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_csi_access_enable_2(ioaddr);
+
+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+}
+
+static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_csi_access_enable_2(ioaddr);
+
+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+
+ /* Magic. */
+ RTL_W8(DBG_REG, 0x20);
+
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+}
+
+static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8168c_1[] = {
+ { 0x02, 0x0800, 0x1000 },
+ { 0x03, 0, 0x0002 },
+ { 0x06, 0x0080, 0x0000 }
+ };
+
+ rtl_csi_access_enable_2(ioaddr);
+
+ RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
+
+ rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
+
+ __rtl_hw_start_8168cp(ioaddr, pdev);
+}
+
+static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8168c_2[] = {
+ { 0x01, 0, 0x0001 },
+ { 0x03, 0x0400, 0x0220 }
+ };
+
+ rtl_csi_access_enable_2(ioaddr);
+
+ rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
+
+ __rtl_hw_start_8168cp(ioaddr, pdev);
+}
+
+static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_hw_start_8168c_2(ioaddr, pdev);
+}
+
+static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_csi_access_enable_2(ioaddr);
+
+ __rtl_hw_start_8168cp(ioaddr, pdev);
+}
+
+static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_csi_access_enable_2(ioaddr);
+
+ rtl_disable_clock_request(pdev);
+
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+}
+
+static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_csi_access_enable_1(ioaddr);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
+
+ rtl_disable_clock_request(pdev);
+}
+
+static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8168d_4[] = {
+ { 0x0b, ~0, 0x48 },
+ { 0x19, 0x20, 0x50 },
+ { 0x0c, ~0, 0x20 }
+ };
+ int i;
+
+ rtl_csi_access_enable_1(ioaddr);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
+
+ for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
+ const struct ephy_info *e = e_info_8168d_4 + i;
+ u16 w;
+
+ w = rtl_ephy_read(ioaddr, e->offset);
+ rtl_ephy_write(ioaddr, 0x03, (w & e->mask) | e->bits);
+ }
+
+ rtl_enable_clock_request(pdev);
+}
+
+static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8168e_1[] = {
+ { 0x00, 0x0200, 0x0100 },
+ { 0x00, 0x0000, 0x0004 },
+ { 0x06, 0x0002, 0x0001 },
+ { 0x06, 0x0000, 0x0030 },
+ { 0x07, 0x0000, 0x2000 },
+ { 0x00, 0x0000, 0x0020 },
+ { 0x03, 0x5800, 0x2000 },
+ { 0x03, 0x0000, 0x0001 },
+ { 0x01, 0x0800, 0x1000 },
+ { 0x07, 0x0000, 0x4000 },
+ { 0x1e, 0x0000, 0x2000 },
+ { 0x19, 0xffff, 0xfe6c },
+ { 0x0a, 0x0000, 0x0040 }
+ };
+
+ rtl_csi_access_enable_2(ioaddr);
+
+ rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
+
+ rtl_disable_clock_request(pdev);
+
+ /* Reset tx FIFO pointer */
+ RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
+ RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
+
+ RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
+}
+
+static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8168e_2[] = {
+ { 0x09, 0x0000, 0x0080 },
+ { 0x19, 0x0000, 0x0224 }
+ };
+
+ rtl_csi_access_enable_1(ioaddr);
+
+ rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
+ ERIAR_EXGMAC);
+
+ RTL_W8(MaxTxPacketSize, EarlySize);
+
+ rtl_disable_clock_request(pdev);
+
+ RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
+ RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
+
+ /* Adjust EEE LED frequency */
+ RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
+
+ RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
+ RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
+ RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
+}
+
+static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8168f_1[] = {
+ { 0x06, 0x00c0, 0x0020 },
+ { 0x08, 0x0001, 0x0002 },
+ { 0x09, 0x0000, 0x0080 },
+ { 0x19, 0x0000, 0x0224 }
+ };
+
+ rtl_csi_access_enable_1(ioaddr);
+
+ rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
+ ERIAR_EXGMAC);
+
+ RTL_W8(MaxTxPacketSize, EarlySize);
+
+ rtl_disable_clock_request(pdev);
+
+ RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
+ RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
+
+ /* Adjust EEE LED frequency */
+ RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
+
+ RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
+ RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
+ RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
+}
+
+static void rtl_hw_start_8168(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
+
+ rtl_set_rx_max_size(ioaddr, rx_buf_sz);
+
+ tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
+
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+
+ RTL_W16(IntrMitigate, 0x5151);
+
+ /* Work around for RxFIFO overflow. */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_22) {
+ tp->intr_event |= RxFIFOOver | PCSTimeout;
+ tp->intr_event &= ~RxOverflow;
+ }
+
+ rtl_set_rx_tx_desc_registers(tp, ioaddr);
+
+ rtl_set_rx_mode(dev);
+
+ RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
+ (InterFrameGap << TxInterFrameGapShift));
+
+ RTL_R8(IntrMask);
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_11:
+ rtl_hw_start_8168bb(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ rtl_hw_start_8168bef(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_18:
+ rtl_hw_start_8168cp_1(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_19:
+ rtl_hw_start_8168c_1(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_20:
+ rtl_hw_start_8168c_2(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_21:
+ rtl_hw_start_8168c_3(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_22:
+ rtl_hw_start_8168c_4(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_23:
+ rtl_hw_start_8168cp_2(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_24:
+ rtl_hw_start_8168cp_3(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ rtl_hw_start_8168d(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_28:
+ rtl_hw_start_8168d_4(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_31:
+ rtl_hw_start_8168dp(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
+ rtl_hw_start_8168e_1(ioaddr, pdev);
+ break;
+ case RTL_GIGA_MAC_VER_34:
+ rtl_hw_start_8168e_2(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_35:
+ case RTL_GIGA_MAC_VER_36:
+ rtl_hw_start_8168f_1(ioaddr, pdev);
+ break;
+
+ default:
+ printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
+ dev->name, tp->mac_version);
+ break;
+ }
+
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+
+ RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
+
+ RTL_W16(IntrMask, tp->intr_event);
+}
+
+#define R810X_CPCMD_QUIRK_MASK (\
+ EnableBist | \
+ Mac_dbgo_oe | \
+ Force_half_dup | \
+ Force_rxflow_en | \
+ Force_txflow_en | \
+ Cxpl_dbg_sel | \
+ ASF | \
+ PktCntrDisable | \
+ Mac_dbgo_sel)
+
+static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8102e_1[] = {
+ { 0x01, 0, 0x6e65 },
+ { 0x02, 0, 0x091f },
+ { 0x03, 0, 0xc2f9 },
+ { 0x06, 0, 0xafb5 },
+ { 0x07, 0, 0x0e00 },
+ { 0x19, 0, 0xec80 },
+ { 0x01, 0, 0x2e65 },
+ { 0x01, 0, 0x6e65 }
+ };
+ u8 cfg1;
+
+ rtl_csi_access_enable_2(ioaddr);
+
+ RTL_W8(DBG_REG, FIX_NAK_1);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W8(Config1,
+ LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+
+ cfg1 = RTL_R8(Config1);
+ if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
+ RTL_W8(Config1, cfg1 & ~LEDS0);
+
+ rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
+}
+
+static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_csi_access_enable_2(ioaddr);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+}
+
+static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_hw_start_8102e_2(ioaddr, pdev);
+
+ rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
+}
+
+static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8105e_1[] = {
+ { 0x07, 0, 0x4000 },
+ { 0x19, 0, 0x0200 },
+ { 0x19, 0, 0x0020 },
+ { 0x1e, 0, 0x2000 },
+ { 0x03, 0, 0x0001 },
+ { 0x19, 0, 0x0100 },
+ { 0x19, 0, 0x0004 },
+ { 0x0a, 0, 0x0020 }
+ };
+
+ /* Force LAN exit from ASPM if Rx/Tx are not idle */
+ RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
+
+ /* Disable Early Tally Counter */
+ RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
+
+ RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
+ RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
+
+ rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
+}
+
+static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_hw_start_8105e_1(ioaddr, pdev);
+ rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
+}
+
+static void rtl_hw_start_8101(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_16) {
+ int cap = pci_pcie_cap(pdev);
+
+ if (cap) {
+ pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_NOSNOOP_EN);
+ }
+ }
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_07:
+ rtl_hw_start_8102e_1(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_08:
+ rtl_hw_start_8102e_3(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_09:
+ rtl_hw_start_8102e_2(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_29:
+ rtl_hw_start_8105e_1(ioaddr, pdev);
+ break;
+ case RTL_GIGA_MAC_VER_30:
+ rtl_hw_start_8105e_2(ioaddr, pdev);
+ break;
+ }
+
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
+
+ rtl_set_rx_max_size(ioaddr, rx_buf_sz);
+
+ tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+
+ RTL_W16(IntrMitigate, 0x0000);
+
+ rtl_set_rx_tx_desc_registers(tp, ioaddr);
+
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+ rtl_set_rx_tx_config_registers(tp);
+
+ RTL_R8(IntrMask);
+
+ rtl_set_rx_mode(dev);
+
+ RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
+
+ RTL_W16(IntrMask, tp->intr_event);
+}
+
+static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (new_mtu < ETH_ZLEN ||
+ new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
+ return -EINVAL;
+
+ if (new_mtu > ETH_DATA_LEN)
+ rtl_hw_jumbo_enable(tp);
+ else
+ rtl_hw_jumbo_disable(tp);
+
+ dev->mtu = new_mtu;
+ netdev_update_features(dev);
+
+ return 0;
+}
+
+static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
+{
+ desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
+ desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
+}
+
+static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
+ void **data_buff, struct RxDesc *desc)
+{
+ dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
+ DMA_FROM_DEVICE);
+
+ kfree(*data_buff);
+ *data_buff = NULL;
+ rtl8169_make_unusable_by_asic(desc);
+}
+
+static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
+{
+ u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
+
+ desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
+}
+
+static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
+ u32 rx_buf_sz)
+{
+ desc->addr = cpu_to_le64(mapping);
+ wmb();
+ rtl8169_mark_to_asic(desc, rx_buf_sz);
+}
+
+static inline void *rtl8169_align(void *data)
+{
+ return (void *)ALIGN((long)data, 16);
+}
+
+static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
+ struct RxDesc *desc)
+{
+ void *data;
+ dma_addr_t mapping;
+ struct device *d = &tp->pci_dev->dev;
+ struct net_device *dev = tp->dev;
+ int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
+
+ data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
+ if (!data)
+ return NULL;
+
+ if (rtl8169_align(data) != data) {
+ kfree(data);
+ data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
+ if (!data)
+ return NULL;
+ }
+
+ mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(d, mapping))) {
+ if (net_ratelimit())
+ netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
+ goto err_out;
+ }
+
+ rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
+ return data;
+
+err_out:
+ kfree(data);
+ return NULL;
+}
+
+static void rtl8169_rx_clear(struct rtl8169_private *tp)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_RX_DESC; i++) {
+ if (tp->Rx_databuff[i]) {
+ rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
+ tp->RxDescArray + i);
+ }
+ }
+}
+
+static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
+{
+ desc->opts1 |= cpu_to_le32(RingEnd);
+}
+
+static int rtl8169_rx_fill(struct rtl8169_private *tp)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_RX_DESC; i++) {
+ void *data;
+
+ if (tp->Rx_databuff[i])
+ continue;
+
+ data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
+ if (!data) {
+ rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
+ goto err_out;
+ }
+ tp->Rx_databuff[i] = data;
+ }
+
+ rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
+ return 0;
+
+err_out:
+ rtl8169_rx_clear(tp);
+ return -ENOMEM;
+}
+
+static int rtl8169_init_ring(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl8169_init_ring_indexes(tp);
+
+ memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
+ memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
+
+ return rtl8169_rx_fill(tp);
+}
+
+static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
+ struct TxDesc *desc)
+{
+ unsigned int len = tx_skb->len;
+
+ dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
+
+ desc->opts1 = 0x00;
+ desc->opts2 = 0x00;
+ desc->addr = 0x00;
+ tx_skb->len = 0;
+}
+
+static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
+ unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 0; i < n; i++) {
+ unsigned int entry = (start + i) % NUM_TX_DESC;
+ struct ring_info *tx_skb = tp->tx_skb + entry;
+ unsigned int len = tx_skb->len;
+
+ if (len) {
+ struct sk_buff *skb = tx_skb->skb;
+
+ rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
+ tp->TxDescArray + entry);
+ if (skb) {
+ tp->dev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ tx_skb->skb = NULL;
+ }
+ }
+ }
+}
+
+static void rtl8169_tx_clear(struct rtl8169_private *tp)
+{
+ rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
+ tp->cur_tx = tp->dirty_tx = 0;
+}
+
+static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ PREPARE_DELAYED_WORK(&tp->task, task);
+ schedule_delayed_work(&tp->task, 4);
+}
+
+static void rtl8169_wait_for_quiescence(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ synchronize_irq(dev->irq);
+
+ /* Wait for any pending NAPI task to complete */
+ napi_disable(&tp->napi);
+
+ rtl8169_irq_mask_and_ack(ioaddr);
+
+ tp->intr_mask = 0xffff;
+ RTL_W16(IntrMask, tp->intr_event);
+ napi_enable(&tp->napi);
+}
+
+static void rtl8169_reinit_task(struct work_struct *work)
+{
+ struct rtl8169_private *tp =
+ container_of(work, struct rtl8169_private, task.work);
+ struct net_device *dev = tp->dev;
+ int ret;
+
+ rtnl_lock();
+
+ if (!netif_running(dev))
+ goto out_unlock;
+
+ rtl8169_wait_for_quiescence(dev);
+ rtl8169_close(dev);
+
+ ret = rtl8169_open(dev);
+ if (unlikely(ret < 0)) {
+ if (net_ratelimit())
+ netif_err(tp, drv, dev,
+ "reinit failure (status = %d). Rescheduling\n",
+ ret);
+ rtl8169_schedule_work(dev, rtl8169_reinit_task);
+ }
+
+out_unlock:
+ rtnl_unlock();
+}
+
+static void rtl8169_reset_task(struct work_struct *work)
+{
+ struct rtl8169_private *tp =
+ container_of(work, struct rtl8169_private, task.work);
+ struct net_device *dev = tp->dev;
+ int i;
+
+ rtnl_lock();
+
+ if (!netif_running(dev))
+ goto out_unlock;
+
+ rtl8169_wait_for_quiescence(dev);
+
+ for (i = 0; i < NUM_RX_DESC; i++)
+ rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
+
+ rtl8169_tx_clear(tp);
+
+ rtl8169_hw_reset(tp);
+ rtl_hw_start(dev);
+ netif_wake_queue(dev);
+ rtl8169_check_link_status(dev, tp, tp->mmio_addr);
+
+out_unlock:
+ rtnl_unlock();
+}
+
+static void rtl8169_tx_timeout(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl8169_hw_reset(tp);
+
+ /* Let's wait a bit while any (async) irq lands on */
+ rtl8169_schedule_work(dev, rtl8169_reset_task);
+}
+
+static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
+ u32 *opts)
+{
+ struct skb_shared_info *info = skb_shinfo(skb);
+ unsigned int cur_frag, entry;
+ struct TxDesc * uninitialized_var(txd);
+ struct device *d = &tp->pci_dev->dev;
+
+ entry = tp->cur_tx;
+ for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
+ skb_frag_t *frag = info->frags + cur_frag;
+ dma_addr_t mapping;
+ u32 status, len;
+ void *addr;
+
+ entry = (entry + 1) % NUM_TX_DESC;
+
+ txd = tp->TxDescArray + entry;
+ len = frag->size;
+ addr = skb_frag_address(frag);
+ mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(d, mapping))) {
+ if (net_ratelimit())
+ netif_err(tp, drv, tp->dev,
+ "Failed to map TX fragments DMA!\n");
+ goto err_out;
+ }
+
+ /* Anti gcc 2.95.3 bugware (sic) */
+ status = opts[0] | len |
+ (RingEnd * !((entry + 1) % NUM_TX_DESC));
+
+ txd->opts1 = cpu_to_le32(status);
+ txd->opts2 = cpu_to_le32(opts[1]);
+ txd->addr = cpu_to_le64(mapping);
+
+ tp->tx_skb[entry].len = len;
+ }
+
+ if (cur_frag) {
+ tp->tx_skb[entry].skb = skb;
+ txd->opts1 |= cpu_to_le32(LastFrag);
+ }
+
+ return cur_frag;
+
+err_out:
+ rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
+ return -EIO;
+}
+
+static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
+ struct sk_buff *skb, u32 *opts)
+{
+ const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
+ u32 mss = skb_shinfo(skb)->gso_size;
+ int offset = info->opts_offset;
+
+ if (mss) {
+ opts[0] |= TD_LSO;
+ opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ const struct iphdr *ip = ip_hdr(skb);
+
+ if (ip->protocol == IPPROTO_TCP)
+ opts[offset] |= info->checksum.tcp;
+ else if (ip->protocol == IPPROTO_UDP)
+ opts[offset] |= info->checksum.udp;
+ else
+ WARN_ON_ONCE(1);
+ }
+}
+
+static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned int entry = tp->cur_tx % NUM_TX_DESC;
+ struct TxDesc *txd = tp->TxDescArray + entry;
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct device *d = &tp->pci_dev->dev;
+ dma_addr_t mapping;
+ u32 status, len;
+ u32 opts[2];
+ int frags;
+
+ if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
+ netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
+ goto err_stop_0;
+ }
+
+ if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
+ goto err_stop_0;
+
+ len = skb_headlen(skb);
+ mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(d, mapping))) {
+ if (net_ratelimit())
+ netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
+ goto err_dma_0;
+ }
+
+ tp->tx_skb[entry].len = len;
+ txd->addr = cpu_to_le64(mapping);
+
+ opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
+ opts[0] = DescOwn;
+
+ rtl8169_tso_csum(tp, skb, opts);
+
+ frags = rtl8169_xmit_frags(tp, skb, opts);
+ if (frags < 0)
+ goto err_dma_1;
+ else if (frags)
+ opts[0] |= FirstFrag;
+ else {
+ opts[0] |= FirstFrag | LastFrag;
+ tp->tx_skb[entry].skb = skb;
+ }
+
+ txd->opts2 = cpu_to_le32(opts[1]);
+
+ wmb();
+
+ /* Anti gcc 2.95.3 bugware (sic) */
+ status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
+ txd->opts1 = cpu_to_le32(status);
+
+ tp->cur_tx += frags + 1;
+
+ wmb();
+
+ RTL_W8(TxPoll, NPQ);
+
+ if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
+ netif_stop_queue(dev);
+ smp_rmb();
+ if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
+ netif_wake_queue(dev);
+ }
+
+ return NETDEV_TX_OK;
+
+err_dma_1:
+ rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
+err_dma_0:
+ dev_kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+
+err_stop_0:
+ netif_stop_queue(dev);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_BUSY;
+}
+
+static void rtl8169_pcierr_interrupt(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct pci_dev *pdev = tp->pci_dev;
+ u16 pci_status, pci_cmd;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+ pci_read_config_word(pdev, PCI_STATUS, &pci_status);
+
+ netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
+ pci_cmd, pci_status);
+
+ /*
+ * The recovery sequence below admits a very elaborated explanation:
+ * - it seems to work;
+ * - I did not see what else could be done;
+ * - it makes iop3xx happy.
+ *
+ * Feel free to adjust to your needs.
+ */
+ if (pdev->broken_parity_status)
+ pci_cmd &= ~PCI_COMMAND_PARITY;
+ else
+ pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
+
+ pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
+
+ pci_write_config_word(pdev, PCI_STATUS,
+ pci_status & (PCI_STATUS_DETECTED_PARITY |
+ PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
+ PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
+
+ /* The infamous DAC f*ckup only happens at boot time */
+ if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ netif_info(tp, intr, dev, "disabling PCI DAC\n");
+ tp->cp_cmd &= ~PCIDAC;
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+ dev->features &= ~NETIF_F_HIGHDMA;
+ }
+
+ rtl8169_hw_reset(tp);
+
+ rtl8169_schedule_work(dev, rtl8169_reinit_task);
+}
+
+static void rtl8169_tx_interrupt(struct net_device *dev,
+ struct rtl8169_private *tp,
+ void __iomem *ioaddr)
+{
+ unsigned int dirty_tx, tx_left;
+
+ dirty_tx = tp->dirty_tx;
+ smp_rmb();
+ tx_left = tp->cur_tx - dirty_tx;
+
+ while (tx_left > 0) {
+ unsigned int entry = dirty_tx % NUM_TX_DESC;
+ struct ring_info *tx_skb = tp->tx_skb + entry;
+ u32 status;
+
+ rmb();
+ status = le32_to_cpu(tp->TxDescArray[entry].opts1);
+ if (status & DescOwn)
+ break;
+
+ rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
+ tp->TxDescArray + entry);
+ if (status & LastFrag) {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += tx_skb->skb->len;
+ dev_kfree_skb(tx_skb->skb);
+ tx_skb->skb = NULL;
+ }
+ dirty_tx++;
+ tx_left--;
+ }
+
+ if (tp->dirty_tx != dirty_tx) {
+ tp->dirty_tx = dirty_tx;
+ smp_wmb();
+ if (netif_queue_stopped(dev) &&
+ (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
+ netif_wake_queue(dev);
+ }
+ /*
+ * 8168 hack: TxPoll requests are lost when the Tx packets are
+ * too close. Let's kick an extra TxPoll request when a burst
+ * of start_xmit activity is detected (if it is not detected,
+ * it is slow enough). -- FR
+ */
+ smp_rmb();
+ if (tp->cur_tx != dirty_tx)
+ RTL_W8(TxPoll, NPQ);
+ }
+}
+
+static inline int rtl8169_fragmented_frame(u32 status)
+{
+ return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
+}
+
+static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
+{
+ u32 status = opts1 & RxProtoMask;
+
+ if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
+ ((status == RxProtoUDP) && !(opts1 & UDPFail)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+}
+
+static struct sk_buff *rtl8169_try_rx_copy(void *data,
+ struct rtl8169_private *tp,
+ int pkt_size,
+ dma_addr_t addr)
+{
+ struct sk_buff *skb;
+ struct device *d = &tp->pci_dev->dev;
+
+ data = rtl8169_align(data);
+ dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
+ prefetch(data);
+ skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
+ if (skb)
+ memcpy(skb->data, data, pkt_size);
+ dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
+
+ return skb;
+}
+
+static int rtl8169_rx_interrupt(struct net_device *dev,
+ struct rtl8169_private *tp,
+ void __iomem *ioaddr, u32 budget)
+{
+ unsigned int cur_rx, rx_left;
+ unsigned int count;
+
+ cur_rx = tp->cur_rx;
+ rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
+ rx_left = min(rx_left, budget);
+
+ for (; rx_left > 0; rx_left--, cur_rx++) {
+ unsigned int entry = cur_rx % NUM_RX_DESC;
+ struct RxDesc *desc = tp->RxDescArray + entry;
+ u32 status;
+
+ rmb();
+ status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
+
+ if (status & DescOwn)
+ break;
+ if (unlikely(status & RxRES)) {
+ netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
+ status);
+ dev->stats.rx_errors++;
+ if (status & (RxRWT | RxRUNT))
+ dev->stats.rx_length_errors++;
+ if (status & RxCRC)
+ dev->stats.rx_crc_errors++;
+ if (status & RxFOVF) {
+ rtl8169_schedule_work(dev, rtl8169_reset_task);
+ dev->stats.rx_fifo_errors++;
+ }
+ rtl8169_mark_to_asic(desc, rx_buf_sz);
+ } else {
+ struct sk_buff *skb;
+ dma_addr_t addr = le64_to_cpu(desc->addr);
+ int pkt_size = (status & 0x00003fff) - 4;
+
+ /*
+ * The driver does not support incoming fragmented
+ * frames. They are seen as a symptom of over-mtu
+ * sized frames.
+ */
+ if (unlikely(rtl8169_fragmented_frame(status))) {
+ dev->stats.rx_dropped++;
+ dev->stats.rx_length_errors++;
+ rtl8169_mark_to_asic(desc, rx_buf_sz);
+ continue;
+ }
+
+ skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
+ tp, pkt_size, addr);
+ rtl8169_mark_to_asic(desc, rx_buf_sz);
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ continue;
+ }
+
+ rtl8169_rx_csum(skb, status);
+ skb_put(skb, pkt_size);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ rtl8169_rx_vlan_tag(desc, skb);
+
+ napi_gro_receive(&tp->napi, skb);
+
+ dev->stats.rx_bytes += pkt_size;
+ dev->stats.rx_packets++;
+ }
+
+ /* Work around for AMD plateform. */
+ if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
+ (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
+ desc->opts2 = 0;
+ cur_rx++;
+ }
+ }
+
+ count = cur_rx - tp->cur_rx;
+ tp->cur_rx = cur_rx;
+
+ tp->dirty_rx += count;
+
+ return count;
+}
+
+static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
+{
+ struct net_device *dev = dev_instance;
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ int handled = 0;
+ int status;
+
+ /* loop handling interrupts until we have no new ones or
+ * we hit a invalid/hotplug case.
+ */
+ status = RTL_R16(IntrStatus);
+ while (status && status != 0xffff) {
+ handled = 1;
+
+ /* Handle all of the error cases first. These will reset
+ * the chip, so just exit the loop.
+ */
+ if (unlikely(!netif_running(dev))) {
+ rtl8169_hw_reset(tp);
+ break;
+ }
+
+ if (unlikely(status & RxFIFOOver)) {
+ switch (tp->mac_version) {
+ /* Work around for rx fifo overflow */
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_22:
+ case RTL_GIGA_MAC_VER_26:
+ netif_stop_queue(dev);
+ rtl8169_tx_timeout(dev);
+ goto done;
+ /* Testers needed. */
+ case RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_19:
+ case RTL_GIGA_MAC_VER_20:
+ case RTL_GIGA_MAC_VER_21:
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ /* Experimental science. Pktgen proof. */
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_25:
+ if (status == RxFIFOOver)
+ goto done;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (unlikely(status & SYSErr)) {
+ rtl8169_pcierr_interrupt(dev);
+ break;
+ }
+
+ if (status & LinkChg)
+ __rtl8169_check_link_status(dev, tp, ioaddr, true);
+
+ /* We need to see the lastest version of tp->intr_mask to
+ * avoid ignoring an MSI interrupt and having to wait for
+ * another event which may never come.
+ */
+ smp_rmb();
+ if (status & tp->intr_mask & tp->napi_event) {
+ RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
+ tp->intr_mask = ~tp->napi_event;
+
+ if (likely(napi_schedule_prep(&tp->napi)))
+ __napi_schedule(&tp->napi);
+ else
+ netif_info(tp, intr, dev,
+ "interrupt %04x in poll\n", status);
+ }
+
+ /* We only get a new MSI interrupt when all active irq
+ * sources on the chip have been acknowledged. So, ack
+ * everything we've seen and check if new sources have become
+ * active to avoid blocking all interrupts from the chip.
+ */
+ RTL_W16(IntrStatus,
+ (status & RxFIFOOver) ? (status | RxOverflow) : status);
+ status = RTL_R16(IntrStatus);
+ }
+done:
+ return IRQ_RETVAL(handled);
+}
+
+static int rtl8169_poll(struct napi_struct *napi, int budget)
+{
+ struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
+ struct net_device *dev = tp->dev;
+ void __iomem *ioaddr = tp->mmio_addr;
+ int work_done;
+
+ work_done = rtl8169_rx_interrupt(dev, tp, ioaddr, (u32) budget);
+ rtl8169_tx_interrupt(dev, tp, ioaddr);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+
+ /* We need for force the visibility of tp->intr_mask
+ * for other CPUs, as we can loose an MSI interrupt
+ * and potentially wait for a retransmit timeout if we don't.
+ * The posted write to IntrMask is safe, as it will
+ * eventually make it to the chip and we won't loose anything
+ * until it does.
+ */
+ tp->intr_mask = 0xffff;
+ wmb();
+ RTL_W16(IntrMask, tp->intr_event);
+ }
+
+ return work_done;
+}
+
+static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (tp->mac_version > RTL_GIGA_MAC_VER_06)
+ return;
+
+ dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
+ RTL_W32(RxMissed, 0);
+}
+
+static void rtl8169_down(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ del_timer_sync(&tp->timer);
+
+ netif_stop_queue(dev);
+
+ napi_disable(&tp->napi);
+
+ spin_lock_irq(&tp->lock);
+
+ rtl8169_hw_reset(tp);
+ /*
+ * At this point device interrupts can not be enabled in any function,
+ * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task,
+ * rtl8169_reinit_task) and napi is disabled (rtl8169_poll).
+ */
+ rtl8169_rx_missed(dev, ioaddr);
+
+ spin_unlock_irq(&tp->lock);
+
+ synchronize_irq(dev->irq);
+
+ /* Give a racing hard_start_xmit a few cycles to complete. */
+ synchronize_sched(); /* FIXME: should this be synchronize_irq()? */
+
+ rtl8169_tx_clear(tp);
+
+ rtl8169_rx_clear(tp);
+
+ rtl_pll_power_down(tp);
+}
+
+static int rtl8169_close(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct pci_dev *pdev = tp->pci_dev;
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ /* Update counters before going down */
+ rtl8169_update_counters(dev);
+
+ rtl8169_down(dev);
+
+ free_irq(dev->irq, dev);
+
+ dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
+ tp->RxPhyAddr);
+ dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
+ tp->TxPhyAddr);
+ tp->TxDescArray = NULL;
+ tp->RxDescArray = NULL;
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ return 0;
+}
+
+static void rtl_set_rx_mode(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned long flags;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ int rx_mode;
+ u32 tmp = 0;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
+ rx_mode =
+ AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
+ AcceptAllPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else {
+ struct netdev_hw_addr *ha;
+
+ rx_mode = AcceptBroadcast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ rx_mode |= AcceptMulticast;
+ }
+ }
+
+ spin_lock_irqsave(&tp->lock, flags);
+
+ tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
+
+ if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
+ u32 data = mc_filter[0];
+
+ mc_filter[0] = swab32(mc_filter[1]);
+ mc_filter[1] = swab32(data);
+ }
+
+ RTL_W32(MAR0 + 4, mc_filter[1]);
+ RTL_W32(MAR0 + 0, mc_filter[0]);
+
+ RTL_W32(RxConfig, tmp);
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+}
+
+/**
+ * rtl8169_get_stats - Get rtl8169 read/write statistics
+ * @dev: The Ethernet Device to get statistics for
+ *
+ * Get TX/RX statistics for rtl8169
+ */
+static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned long flags;
+
+ if (netif_running(dev)) {
+ spin_lock_irqsave(&tp->lock, flags);
+ rtl8169_rx_missed(dev, ioaddr);
+ spin_unlock_irqrestore(&tp->lock, flags);
+ }
+
+ return &dev->stats;
+}
+
+static void rtl8169_net_suspend(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return;
+
+ rtl_pll_power_down(tp);
+
+ netif_device_detach(dev);
+ netif_stop_queue(dev);
+}
+
+#ifdef CONFIG_PM
+
+static int rtl8169_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ rtl8169_net_suspend(dev);
+
+ return 0;
+}
+
+static void __rtl8169_resume(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ netif_device_attach(dev);
+
+ rtl_pll_power_up(tp);
+
+ rtl8169_schedule_work(dev, rtl8169_reset_task);
+}
+
+static int rtl8169_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl8169_init_phy(dev, tp);
+
+ if (netif_running(dev))
+ __rtl8169_resume(dev);
+
+ return 0;
+}
+
+static int rtl8169_runtime_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!tp->TxDescArray)
+ return 0;
+
+ spin_lock_irq(&tp->lock);
+ tp->saved_wolopts = __rtl8169_get_wol(tp);
+ __rtl8169_set_wol(tp, WAKE_ANY);
+ spin_unlock_irq(&tp->lock);
+
+ rtl8169_net_suspend(dev);
+
+ return 0;
+}
+
+static int rtl8169_runtime_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!tp->TxDescArray)
+ return 0;
+
+ spin_lock_irq(&tp->lock);
+ __rtl8169_set_wol(tp, tp->saved_wolopts);
+ tp->saved_wolopts = 0;
+ spin_unlock_irq(&tp->lock);
+
+ rtl8169_init_phy(dev, tp);
+
+ __rtl8169_resume(dev);
+
+ return 0;
+}
+
+static int rtl8169_runtime_idle(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ return tp->TxDescArray ? -EBUSY : 0;
+}
+
+static const struct dev_pm_ops rtl8169_pm_ops = {
+ .suspend = rtl8169_suspend,
+ .resume = rtl8169_resume,
+ .freeze = rtl8169_suspend,
+ .thaw = rtl8169_resume,
+ .poweroff = rtl8169_suspend,
+ .restore = rtl8169_resume,
+ .runtime_suspend = rtl8169_runtime_suspend,
+ .runtime_resume = rtl8169_runtime_resume,
+ .runtime_idle = rtl8169_runtime_idle,
+};
+
+#define RTL8169_PM_OPS (&rtl8169_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define RTL8169_PM_OPS NULL
+
+#endif /* !CONFIG_PM */
+
+static void rtl_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ rtl8169_net_suspend(dev);
+
+ /* Restore original MAC address */
+ rtl_rar_set(tp, dev->perm_addr);
+
+ spin_lock_irq(&tp->lock);
+
+ rtl8169_hw_reset(tp);
+
+ spin_unlock_irq(&tp->lock);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ /* WoL fails with 8168b when the receiver is disabled. */
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_11 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_12 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_17) &&
+ (tp->features & RTL_FEATURE_WOL)) {
+ pci_clear_master(pdev);
+
+ RTL_W8(ChipCmd, CmdRxEnb);
+ /* PCI commit */
+ RTL_R8(ChipCmd);
+ }
+
+ pci_wake_from_d3(pdev, true);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+static struct pci_driver rtl8169_pci_driver = {
+ .name = MODULENAME,
+ .id_table = rtl8169_pci_tbl,
+ .probe = rtl8169_init_one,
+ .remove = __devexit_p(rtl8169_remove_one),
+ .shutdown = rtl_shutdown,
+ .driver.pm = RTL8169_PM_OPS,
+};
+
+static int __init rtl8169_init_module(void)
+{
+ return pci_register_driver(&rtl8169_pci_driver);
+}
+
+static void __exit rtl8169_cleanup_module(void)
+{
+ pci_unregister_driver(&rtl8169_pci_driver);
+}
+
+module_init(rtl8169_init_module);
+module_exit(rtl8169_cleanup_module);
diff --git a/drivers/net/ethernet/realtek/sc92031.c b/drivers/net/ethernet/realtek/sc92031.c
new file mode 100644
index 000000000000..a284d6440538
--- /dev/null
+++ b/drivers/net/ethernet/realtek/sc92031.c
@@ -0,0 +1,1609 @@
+/* Silan SC92031 PCI Fast Ethernet Adapter driver
+ *
+ * Based on vendor drivers:
+ * Silan Fast Ethernet Netcard Driver:
+ * MODULE_AUTHOR ("gaoyonghong");
+ * MODULE_DESCRIPTION ("SILAN Fast Ethernet driver");
+ * MODULE_LICENSE("GPL");
+ * 8139D Fast Ethernet driver:
+ * (C) 2002 by gaoyonghong
+ * MODULE_AUTHOR ("gaoyonghong");
+ * MODULE_DESCRIPTION ("Rsltek 8139D PCI Fast Ethernet Adapter driver");
+ * MODULE_LICENSE("GPL");
+ * Both are almost identical and seem to be based on pci-skeleton.c
+ *
+ * Rewritten for 2.6 by Cesar Eduardo Barros
+ *
+ * A datasheet for this chip can be found at
+ * http://www.silan.com.cn/english/product/pdf/SC92031AY.pdf
+ */
+
+/* Note about set_mac_address: I don't know how to change the hardware
+ * matching, so you need to enable IFF_PROMISC when using it.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+
+#include <asm/irq.h>
+
+#define SC92031_NAME "sc92031"
+
+/* BAR 0 is MMIO, BAR 1 is PIO */
+#ifndef SC92031_USE_BAR
+#define SC92031_USE_BAR 0
+#endif
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
+static int multicast_filter_limit = 64;
+module_param(multicast_filter_limit, int, 0);
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Maximum number of filtered multicast addresses");
+
+static int media;
+module_param(media, int, 0);
+MODULE_PARM_DESC(media, "Media type (0x00 = autodetect,"
+ " 0x01 = 10M half, 0x02 = 10M full,"
+ " 0x04 = 100M half, 0x08 = 100M full)");
+
+/* Size of the in-memory receive ring. */
+#define RX_BUF_LEN_IDX 3 /* 0==8K, 1==16K, 2==32K, 3==64K ,4==128K*/
+#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
+
+/* Number of Tx descriptor registers. */
+#define NUM_TX_DESC 4
+
+/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/
+#define MAX_ETH_FRAME_SIZE 1536
+
+/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
+#define TX_BUF_SIZE MAX_ETH_FRAME_SIZE
+#define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC)
+
+/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
+#define RX_FIFO_THRESH 7 /* Rx buffer level before first PCI xfer. */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (4*HZ)
+
+#define SILAN_STATS_NUM 2 /* number of ETHTOOL_GSTATS */
+
+/* media options */
+#define AUTOSELECT 0x00
+#define M10_HALF 0x01
+#define M10_FULL 0x02
+#define M100_HALF 0x04
+#define M100_FULL 0x08
+
+ /* Symbolic offsets to registers. */
+enum silan_registers {
+ Config0 = 0x00, // Config0
+ Config1 = 0x04, // Config1
+ RxBufWPtr = 0x08, // Rx buffer writer poiter
+ IntrStatus = 0x0C, // Interrupt status
+ IntrMask = 0x10, // Interrupt mask
+ RxbufAddr = 0x14, // Rx buffer start address
+ RxBufRPtr = 0x18, // Rx buffer read pointer
+ Txstatusall = 0x1C, // Transmit status of all descriptors
+ TxStatus0 = 0x20, // Transmit status (Four 32bit registers).
+ TxAddr0 = 0x30, // Tx descriptors (also four 32bit).
+ RxConfig = 0x40, // Rx configuration
+ MAC0 = 0x44, // Ethernet hardware address.
+ MAR0 = 0x4C, // Multicast filter.
+ RxStatus0 = 0x54, // Rx status
+ TxConfig = 0x5C, // Tx configuration
+ PhyCtrl = 0x60, // physical control
+ FlowCtrlConfig = 0x64, // flow control
+ Miicmd0 = 0x68, // Mii command0 register
+ Miicmd1 = 0x6C, // Mii command1 register
+ Miistatus = 0x70, // Mii status register
+ Timercnt = 0x74, // Timer counter register
+ TimerIntr = 0x78, // Timer interrupt register
+ PMConfig = 0x7C, // Power Manager configuration
+ CRC0 = 0x80, // Power Manager CRC ( Two 32bit regisers)
+ Wakeup0 = 0x88, // power Manager wakeup( Eight 64bit regiser)
+ LSBCRC0 = 0xC8, // power Manager LSBCRC(Two 32bit regiser)
+ TestD0 = 0xD0,
+ TestD4 = 0xD4,
+ TestD8 = 0xD8,
+};
+
+#define MII_JAB 16
+#define MII_OutputStatus 24
+
+#define PHY_16_JAB_ENB 0x1000
+#define PHY_16_PORT_ENB 0x1
+
+enum IntrStatusBits {
+ LinkFail = 0x80000000,
+ LinkOK = 0x40000000,
+ TimeOut = 0x20000000,
+ RxOverflow = 0x0040,
+ RxOK = 0x0020,
+ TxOK = 0x0001,
+ IntrBits = LinkFail|LinkOK|TimeOut|RxOverflow|RxOK|TxOK,
+};
+
+enum TxStatusBits {
+ TxCarrierLost = 0x20000000,
+ TxAborted = 0x10000000,
+ TxOutOfWindow = 0x08000000,
+ TxNccShift = 22,
+ EarlyTxThresShift = 16,
+ TxStatOK = 0x8000,
+ TxUnderrun = 0x4000,
+ TxOwn = 0x2000,
+};
+
+enum RxStatusBits {
+ RxStatesOK = 0x80000,
+ RxBadAlign = 0x40000,
+ RxHugeFrame = 0x20000,
+ RxSmallFrame = 0x10000,
+ RxCRCOK = 0x8000,
+ RxCrlFrame = 0x4000,
+ Rx_Broadcast = 0x2000,
+ Rx_Multicast = 0x1000,
+ RxAddrMatch = 0x0800,
+ MiiErr = 0x0400,
+};
+
+enum RxConfigBits {
+ RxFullDx = 0x80000000,
+ RxEnb = 0x40000000,
+ RxSmall = 0x20000000,
+ RxHuge = 0x10000000,
+ RxErr = 0x08000000,
+ RxAllphys = 0x04000000,
+ RxMulticast = 0x02000000,
+ RxBroadcast = 0x01000000,
+ RxLoopBack = (1 << 23) | (1 << 22),
+ LowThresholdShift = 12,
+ HighThresholdShift = 2,
+};
+
+enum TxConfigBits {
+ TxFullDx = 0x80000000,
+ TxEnb = 0x40000000,
+ TxEnbPad = 0x20000000,
+ TxEnbHuge = 0x10000000,
+ TxEnbFCS = 0x08000000,
+ TxNoBackOff = 0x04000000,
+ TxEnbPrem = 0x02000000,
+ TxCareLostCrs = 0x1000000,
+ TxExdCollNum = 0xf00000,
+ TxDataRate = 0x80000,
+};
+
+enum PhyCtrlconfigbits {
+ PhyCtrlAne = 0x80000000,
+ PhyCtrlSpd100 = 0x40000000,
+ PhyCtrlSpd10 = 0x20000000,
+ PhyCtrlPhyBaseAddr = 0x1f000000,
+ PhyCtrlDux = 0x800000,
+ PhyCtrlReset = 0x400000,
+};
+
+enum FlowCtrlConfigBits {
+ FlowCtrlFullDX = 0x80000000,
+ FlowCtrlEnb = 0x40000000,
+};
+
+enum Config0Bits {
+ Cfg0_Reset = 0x80000000,
+ Cfg0_Anaoff = 0x40000000,
+ Cfg0_LDPS = 0x20000000,
+};
+
+enum Config1Bits {
+ Cfg1_EarlyRx = 1 << 31,
+ Cfg1_EarlyTx = 1 << 30,
+
+ //rx buffer size
+ Cfg1_Rcv8K = 0x0,
+ Cfg1_Rcv16K = 0x1,
+ Cfg1_Rcv32K = 0x3,
+ Cfg1_Rcv64K = 0x7,
+ Cfg1_Rcv128K = 0xf,
+};
+
+enum MiiCmd0Bits {
+ Mii_Divider = 0x20000000,
+ Mii_WRITE = 0x400000,
+ Mii_READ = 0x200000,
+ Mii_SCAN = 0x100000,
+ Mii_Tamod = 0x80000,
+ Mii_Drvmod = 0x40000,
+ Mii_mdc = 0x20000,
+ Mii_mdoen = 0x10000,
+ Mii_mdo = 0x8000,
+ Mii_mdi = 0x4000,
+};
+
+enum MiiStatusBits {
+ Mii_StatusBusy = 0x80000000,
+};
+
+enum PMConfigBits {
+ PM_Enable = 1 << 31,
+ PM_LongWF = 1 << 30,
+ PM_Magic = 1 << 29,
+ PM_LANWake = 1 << 28,
+ PM_LWPTN = (1 << 27 | 1<< 26),
+ PM_LinkUp = 1 << 25,
+ PM_WakeUp = 1 << 24,
+};
+
+/* Locking rules:
+ * priv->lock protects most of the fields of priv and most of the
+ * hardware registers. It does not have to protect against softirqs
+ * between sc92031_disable_interrupts and sc92031_enable_interrupts;
+ * it also does not need to be used in ->open and ->stop while the
+ * device interrupts are off.
+ * Not having to protect against softirqs is very useful due to heavy
+ * use of mdelay() at _sc92031_reset.
+ * Functions prefixed with _sc92031_ must be called with the lock held;
+ * functions prefixed with sc92031_ must be called without the lock held.
+ * Use mmiowb() before unlocking if the hardware was written to.
+ */
+
+/* Locking rules for the interrupt:
+ * - the interrupt and the tasklet never run at the same time
+ * - neither run between sc92031_disable_interrupts and
+ * sc92031_enable_interrupt
+ */
+
+struct sc92031_priv {
+ spinlock_t lock;
+ /* iomap.h cookie */
+ void __iomem *port_base;
+ /* pci device structure */
+ struct pci_dev *pdev;
+ /* tasklet */
+ struct tasklet_struct tasklet;
+
+ /* CPU address of rx ring */
+ void *rx_ring;
+ /* PCI address of rx ring */
+ dma_addr_t rx_ring_dma_addr;
+ /* PCI address of rx ring read pointer */
+ dma_addr_t rx_ring_tail;
+
+ /* tx ring write index */
+ unsigned tx_head;
+ /* tx ring read index */
+ unsigned tx_tail;
+ /* CPU address of tx bounce buffer */
+ void *tx_bufs;
+ /* PCI address of tx bounce buffer */
+ dma_addr_t tx_bufs_dma_addr;
+
+ /* copies of some hardware registers */
+ u32 intr_status;
+ atomic_t intr_mask;
+ u32 rx_config;
+ u32 tx_config;
+ u32 pm_config;
+
+ /* copy of some flags from dev->flags */
+ unsigned int mc_flags;
+
+ /* for ETHTOOL_GSTATS */
+ u64 tx_timeouts;
+ u64 rx_loss;
+
+ /* for dev->get_stats */
+ long rx_value;
+};
+
+/* I don't know which registers can be safely read; however, I can guess
+ * MAC0 is one of them. */
+static inline void _sc92031_dummy_read(void __iomem *port_base)
+{
+ ioread32(port_base + MAC0);
+}
+
+static u32 _sc92031_mii_wait(void __iomem *port_base)
+{
+ u32 mii_status;
+
+ do {
+ udelay(10);
+ mii_status = ioread32(port_base + Miistatus);
+ } while (mii_status & Mii_StatusBusy);
+
+ return mii_status;
+}
+
+static u32 _sc92031_mii_cmd(void __iomem *port_base, u32 cmd0, u32 cmd1)
+{
+ iowrite32(Mii_Divider, port_base + Miicmd0);
+
+ _sc92031_mii_wait(port_base);
+
+ iowrite32(cmd1, port_base + Miicmd1);
+ iowrite32(Mii_Divider | cmd0, port_base + Miicmd0);
+
+ return _sc92031_mii_wait(port_base);
+}
+
+static void _sc92031_mii_scan(void __iomem *port_base)
+{
+ _sc92031_mii_cmd(port_base, Mii_SCAN, 0x1 << 6);
+}
+
+static u16 _sc92031_mii_read(void __iomem *port_base, unsigned reg)
+{
+ return _sc92031_mii_cmd(port_base, Mii_READ, reg << 6) >> 13;
+}
+
+static void _sc92031_mii_write(void __iomem *port_base, unsigned reg, u16 val)
+{
+ _sc92031_mii_cmd(port_base, Mii_WRITE, (reg << 6) | ((u32)val << 11));
+}
+
+static void sc92031_disable_interrupts(struct net_device *dev)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+ void __iomem *port_base = priv->port_base;
+
+ /* tell the tasklet/interrupt not to enable interrupts */
+ atomic_set(&priv->intr_mask, 0);
+ wmb();
+
+ /* stop interrupts */
+ iowrite32(0, port_base + IntrMask);
+ _sc92031_dummy_read(port_base);
+ mmiowb();
+
+ /* wait for any concurrent interrupt/tasklet to finish */
+ synchronize_irq(dev->irq);
+ tasklet_disable(&priv->tasklet);
+}
+
+static void sc92031_enable_interrupts(struct net_device *dev)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+ void __iomem *port_base = priv->port_base;
+
+ tasklet_enable(&priv->tasklet);
+
+ atomic_set(&priv->intr_mask, IntrBits);
+ wmb();
+
+ iowrite32(IntrBits, port_base + IntrMask);
+ mmiowb();
+}
+
+static void _sc92031_disable_tx_rx(struct net_device *dev)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+ void __iomem *port_base = priv->port_base;
+
+ priv->rx_config &= ~RxEnb;
+ priv->tx_config &= ~TxEnb;
+ iowrite32(priv->rx_config, port_base + RxConfig);
+ iowrite32(priv->tx_config, port_base + TxConfig);
+}
+
+static void _sc92031_enable_tx_rx(struct net_device *dev)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+ void __iomem *port_base = priv->port_base;
+
+ priv->rx_config |= RxEnb;
+ priv->tx_config |= TxEnb;
+ iowrite32(priv->rx_config, port_base + RxConfig);
+ iowrite32(priv->tx_config, port_base + TxConfig);
+}
+
+static void _sc92031_tx_clear(struct net_device *dev)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+
+ while (priv->tx_head - priv->tx_tail > 0) {
+ priv->tx_tail++;
+ dev->stats.tx_dropped++;
+ }
+ priv->tx_head = priv->tx_tail = 0;
+}
+
+static void _sc92031_set_mar(struct net_device *dev)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+ void __iomem *port_base = priv->port_base;
+ u32 mar0 = 0, mar1 = 0;
+
+ if ((dev->flags & IFF_PROMISC) ||
+ netdev_mc_count(dev) > multicast_filter_limit ||
+ (dev->flags & IFF_ALLMULTI))
+ mar0 = mar1 = 0xffffffff;
+ else if (dev->flags & IFF_MULTICAST) {
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 crc;
+ unsigned bit = 0;
+
+ crc = ~ether_crc(ETH_ALEN, ha->addr);
+ crc >>= 24;
+
+ if (crc & 0x01) bit |= 0x02;
+ if (crc & 0x02) bit |= 0x01;
+ if (crc & 0x10) bit |= 0x20;
+ if (crc & 0x20) bit |= 0x10;
+ if (crc & 0x40) bit |= 0x08;
+ if (crc & 0x80) bit |= 0x04;
+
+ if (bit > 31)
+ mar0 |= 0x1 << (bit - 32);
+ else
+ mar1 |= 0x1 << bit;
+ }
+ }
+
+ iowrite32(mar0, port_base + MAR0);
+ iowrite32(mar1, port_base + MAR0 + 4);
+}
+
+static void _sc92031_set_rx_config(struct net_device *dev)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+ void __iomem *port_base = priv->port_base;
+ unsigned int old_mc_flags;
+ u32 rx_config_bits = 0;
+
+ old_mc_flags = priv->mc_flags;
+
+ if (dev->flags & IFF_PROMISC)
+ rx_config_bits |= RxSmall | RxHuge | RxErr | RxBroadcast
+ | RxMulticast | RxAllphys;
+
+ if (dev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ rx_config_bits |= RxMulticast;
+
+ if (dev->flags & IFF_BROADCAST)
+ rx_config_bits |= RxBroadcast;
+
+ priv->rx_config &= ~(RxSmall | RxHuge | RxErr | RxBroadcast
+ | RxMulticast | RxAllphys);
+ priv->rx_config |= rx_config_bits;
+
+ priv->mc_flags = dev->flags & (IFF_PROMISC | IFF_ALLMULTI
+ | IFF_MULTICAST | IFF_BROADCAST);
+
+ if (netif_carrier_ok(dev) && priv->mc_flags != old_mc_flags)
+ iowrite32(priv->rx_config, port_base + RxConfig);
+}
+
+static bool _sc92031_check_media(struct net_device *dev)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+ void __iomem *port_base = priv->port_base;
+ u16 bmsr;
+
+ bmsr = _sc92031_mii_read(port_base, MII_BMSR);
+ rmb();
+ if (bmsr & BMSR_LSTATUS) {
+ bool speed_100, duplex_full;
+ u32 flow_ctrl_config = 0;
+ u16 output_status = _sc92031_mii_read(port_base,
+ MII_OutputStatus);
+ _sc92031_mii_scan(port_base);
+
+ speed_100 = output_status & 0x2;
+ duplex_full = output_status & 0x4;
+
+ /* Initial Tx/Rx configuration */
+ priv->rx_config = (0x40 << LowThresholdShift) | (0x1c0 << HighThresholdShift);
+ priv->tx_config = 0x48800000;
+
+ /* NOTE: vendor driver had dead code here to enable tx padding */
+
+ if (!speed_100)
+ priv->tx_config |= 0x80000;
+
+ // configure rx mode
+ _sc92031_set_rx_config(dev);
+
+ if (duplex_full) {
+ priv->rx_config |= RxFullDx;
+ priv->tx_config |= TxFullDx;
+ flow_ctrl_config = FlowCtrlFullDX | FlowCtrlEnb;
+ } else {
+ priv->rx_config &= ~RxFullDx;
+ priv->tx_config &= ~TxFullDx;
+ }
+
+ _sc92031_set_mar(dev);
+ _sc92031_set_rx_config(dev);
+ _sc92031_enable_tx_rx(dev);
+ iowrite32(flow_ctrl_config, port_base + FlowCtrlConfig);
+
+ netif_carrier_on(dev);
+
+ if (printk_ratelimit())
+ printk(KERN_INFO "%s: link up, %sMbps, %s-duplex\n",
+ dev->name,
+ speed_100 ? "100" : "10",
+ duplex_full ? "full" : "half");
+ return true;
+ } else {
+ _sc92031_mii_scan(port_base);
+
+ netif_carrier_off(dev);
+
+ _sc92031_disable_tx_rx(dev);
+
+ if (printk_ratelimit())
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ return false;
+ }
+}
+
+static void _sc92031_phy_reset(struct net_device *dev)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+ void __iomem *port_base = priv->port_base;
+ u32 phy_ctrl;
+
+ phy_ctrl = ioread32(port_base + PhyCtrl);
+ phy_ctrl &= ~(PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10);
+ phy_ctrl |= PhyCtrlAne | PhyCtrlReset;
+
+ switch (media) {
+ default:
+ case AUTOSELECT:
+ phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
+ break;
+ case M10_HALF:
+ phy_ctrl |= PhyCtrlSpd10;
+ break;
+ case M10_FULL:
+ phy_ctrl |= PhyCtrlDux | PhyCtrlSpd10;
+ break;
+ case M100_HALF:
+ phy_ctrl |= PhyCtrlSpd100;
+ break;
+ case M100_FULL:
+ phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
+ break;
+ }
+
+ iowrite32(phy_ctrl, port_base + PhyCtrl);
+ mdelay(10);
+
+ phy_ctrl &= ~PhyCtrlReset;
+ iowrite32(phy_ctrl, port_base + PhyCtrl);
+ mdelay(1);
+
+ _sc92031_mii_write(port_base, MII_JAB,
+ PHY_16_JAB_ENB | PHY_16_PORT_ENB);
+ _sc92031_mii_scan(port_base);
+
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+}
+
+static void _sc92031_reset(struct net_device *dev)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+ void __iomem *port_base = priv->port_base;
+
+ /* disable PM */
+ iowrite32(0, port_base + PMConfig);
+
+ /* soft reset the chip */
+ iowrite32(Cfg0_Reset, port_base + Config0);
+ mdelay(200);
+
+ iowrite32(0, port_base + Config0);
+ mdelay(10);
+
+ /* disable interrupts */
+ iowrite32(0, port_base + IntrMask);
+
+ /* clear multicast address */
+ iowrite32(0, port_base + MAR0);
+ iowrite32(0, port_base + MAR0 + 4);
+
+ /* init rx ring */
+ iowrite32(priv->rx_ring_dma_addr, port_base + RxbufAddr);
+ priv->rx_ring_tail = priv->rx_ring_dma_addr;
+
+ /* init tx ring */
+ _sc92031_tx_clear(dev);
+
+ /* clear old register values */
+ priv->intr_status = 0;
+ atomic_set(&priv->intr_mask, 0);
+ priv->rx_config = 0;
+ priv->tx_config = 0;
+ priv->mc_flags = 0;
+
+ /* configure rx buffer size */
+ /* NOTE: vendor driver had dead code here to enable early tx/rx */
+ iowrite32(Cfg1_Rcv64K, port_base + Config1);
+
+ _sc92031_phy_reset(dev);
+ _sc92031_check_media(dev);
+
+ /* calculate rx fifo overflow */
+ priv->rx_value = 0;
+
+ /* enable PM */
+ iowrite32(priv->pm_config, port_base + PMConfig);
+
+ /* clear intr register */
+ ioread32(port_base + IntrStatus);
+}
+
+static void _sc92031_tx_tasklet(struct net_device *dev)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+ void __iomem *port_base = priv->port_base;
+
+ unsigned old_tx_tail;
+ unsigned entry;
+ u32 tx_status;
+
+ old_tx_tail = priv->tx_tail;
+ while (priv->tx_head - priv->tx_tail > 0) {
+ entry = priv->tx_tail % NUM_TX_DESC;
+ tx_status = ioread32(port_base + TxStatus0 + entry * 4);
+
+ if (!(tx_status & (TxStatOK | TxUnderrun | TxAborted)))
+ break;
+
+ priv->tx_tail++;
+
+ if (tx_status & TxStatOK) {
+ dev->stats.tx_bytes += tx_status & 0x1fff;
+ dev->stats.tx_packets++;
+ /* Note: TxCarrierLost is always asserted at 100mbps. */
+ dev->stats.collisions += (tx_status >> 22) & 0xf;
+ }
+
+ if (tx_status & (TxOutOfWindow | TxAborted)) {
+ dev->stats.tx_errors++;
+
+ if (tx_status & TxAborted)
+ dev->stats.tx_aborted_errors++;
+
+ if (tx_status & TxCarrierLost)
+ dev->stats.tx_carrier_errors++;
+
+ if (tx_status & TxOutOfWindow)
+ dev->stats.tx_window_errors++;
+ }
+
+ if (tx_status & TxUnderrun)
+ dev->stats.tx_fifo_errors++;
+ }
+
+ if (priv->tx_tail != old_tx_tail)
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+}
+
+static void _sc92031_rx_tasklet_error(struct net_device *dev,
+ u32 rx_status, unsigned rx_size)
+{
+ if(rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16) {
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
+ }
+
+ if (!(rx_status & RxStatesOK)) {
+ dev->stats.rx_errors++;
+
+ if (rx_status & (RxHugeFrame | RxSmallFrame))
+ dev->stats.rx_length_errors++;
+
+ if (rx_status & RxBadAlign)
+ dev->stats.rx_frame_errors++;
+
+ if (!(rx_status & RxCRCOK))
+ dev->stats.rx_crc_errors++;
+ } else {
+ struct sc92031_priv *priv = netdev_priv(dev);
+ priv->rx_loss++;
+ }
+}
+
+static void _sc92031_rx_tasklet(struct net_device *dev)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+ void __iomem *port_base = priv->port_base;
+
+ dma_addr_t rx_ring_head;
+ unsigned rx_len;
+ unsigned rx_ring_offset;
+ void *rx_ring = priv->rx_ring;
+
+ rx_ring_head = ioread32(port_base + RxBufWPtr);
+ rmb();
+
+ /* rx_ring_head is only 17 bits in the RxBufWPtr register.
+ * we need to change it to 32 bits physical address
+ */
+ rx_ring_head &= (dma_addr_t)(RX_BUF_LEN - 1);
+ rx_ring_head |= priv->rx_ring_dma_addr & ~(dma_addr_t)(RX_BUF_LEN - 1);
+ if (rx_ring_head < priv->rx_ring_dma_addr)
+ rx_ring_head += RX_BUF_LEN;
+
+ if (rx_ring_head >= priv->rx_ring_tail)
+ rx_len = rx_ring_head - priv->rx_ring_tail;
+ else
+ rx_len = RX_BUF_LEN - (priv->rx_ring_tail - rx_ring_head);
+
+ if (!rx_len)
+ return;
+
+ if (unlikely(rx_len > RX_BUF_LEN)) {
+ if (printk_ratelimit())
+ printk(KERN_ERR "%s: rx packets length > rx buffer\n",
+ dev->name);
+ return;
+ }
+
+ rx_ring_offset = (priv->rx_ring_tail - priv->rx_ring_dma_addr) % RX_BUF_LEN;
+
+ while (rx_len) {
+ u32 rx_status;
+ unsigned rx_size, rx_size_align, pkt_size;
+ struct sk_buff *skb;
+
+ rx_status = le32_to_cpup((__le32 *)(rx_ring + rx_ring_offset));
+ rmb();
+
+ rx_size = rx_status >> 20;
+ rx_size_align = (rx_size + 3) & ~3; // for 4 bytes aligned
+ pkt_size = rx_size - 4; // Omit the four octet CRC from the length.
+
+ rx_ring_offset = (rx_ring_offset + 4) % RX_BUF_LEN;
+
+ if (unlikely(rx_status == 0 ||
+ rx_size > (MAX_ETH_FRAME_SIZE + 4) ||
+ rx_size < 16 ||
+ !(rx_status & RxStatesOK))) {
+ _sc92031_rx_tasklet_error(dev, rx_status, rx_size);
+ break;
+ }
+
+ if (unlikely(rx_size_align + 4 > rx_len)) {
+ if (printk_ratelimit())
+ printk(KERN_ERR "%s: rx_len is too small\n", dev->name);
+ break;
+ }
+
+ rx_len -= rx_size_align + 4;
+
+ skb = netdev_alloc_skb_ip_align(dev, pkt_size);
+ if (unlikely(!skb)) {
+ if (printk_ratelimit())
+ printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n",
+ dev->name, pkt_size);
+ goto next;
+ }
+
+ if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) {
+ memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset),
+ rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset);
+ memcpy(skb_put(skb, pkt_size - (RX_BUF_LEN - rx_ring_offset)),
+ rx_ring, pkt_size - (RX_BUF_LEN - rx_ring_offset));
+ } else {
+ memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size);
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+
+ dev->stats.rx_bytes += pkt_size;
+ dev->stats.rx_packets++;
+
+ if (rx_status & Rx_Multicast)
+ dev->stats.multicast++;
+
+ next:
+ rx_ring_offset = (rx_ring_offset + rx_size_align) % RX_BUF_LEN;
+ }
+ mb();
+
+ priv->rx_ring_tail = rx_ring_head;
+ iowrite32(priv->rx_ring_tail, port_base + RxBufRPtr);
+}
+
+static void _sc92031_link_tasklet(struct net_device *dev)
+{
+ if (_sc92031_check_media(dev))
+ netif_wake_queue(dev);
+ else {
+ netif_stop_queue(dev);
+ dev->stats.tx_carrier_errors++;
+ }
+}
+
+static void sc92031_tasklet(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct sc92031_priv *priv = netdev_priv(dev);
+ void __iomem *port_base = priv->port_base;
+ u32 intr_status, intr_mask;
+
+ intr_status = priv->intr_status;
+
+ spin_lock(&priv->lock);
+
+ if (unlikely(!netif_running(dev)))
+ goto out;
+
+ if (intr_status & TxOK)
+ _sc92031_tx_tasklet(dev);
+
+ if (intr_status & RxOK)
+ _sc92031_rx_tasklet(dev);
+
+ if (intr_status & RxOverflow)
+ dev->stats.rx_errors++;
+
+ if (intr_status & TimeOut) {
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
+ }
+
+ if (intr_status & (LinkFail | LinkOK))
+ _sc92031_link_tasklet(dev);
+
+out:
+ intr_mask = atomic_read(&priv->intr_mask);
+ rmb();
+
+ iowrite32(intr_mask, port_base + IntrMask);
+ mmiowb();
+
+ spin_unlock(&priv->lock);
+}
+
+static irqreturn_t sc92031_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct sc92031_priv *priv = netdev_priv(dev);
+ void __iomem *port_base = priv->port_base;
+ u32 intr_status, intr_mask;
+
+ /* mask interrupts before clearing IntrStatus */
+ iowrite32(0, port_base + IntrMask);
+ _sc92031_dummy_read(port_base);
+
+ intr_status = ioread32(port_base + IntrStatus);
+ if (unlikely(intr_status == 0xffffffff))
+ return IRQ_NONE; // hardware has gone missing
+
+ intr_status &= IntrBits;
+ if (!intr_status)
+ goto out_none;
+
+ priv->intr_status = intr_status;
+ tasklet_schedule(&priv->tasklet);
+
+ return IRQ_HANDLED;
+
+out_none:
+ intr_mask = atomic_read(&priv->intr_mask);
+ rmb();
+
+ iowrite32(intr_mask, port_base + IntrMask);
+ mmiowb();
+
+ return IRQ_NONE;
+}
+
+static struct net_device_stats *sc92031_get_stats(struct net_device *dev)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+ void __iomem *port_base = priv->port_base;
+
+ // FIXME I do not understand what is this trying to do.
+ if (netif_running(dev)) {
+ int temp;
+
+ spin_lock_bh(&priv->lock);
+
+ /* Update the error count. */
+ temp = (ioread32(port_base + RxStatus0) >> 16) & 0xffff;
+
+ if (temp == 0xffff) {
+ priv->rx_value += temp;
+ dev->stats.rx_fifo_errors = priv->rx_value;
+ } else
+ dev->stats.rx_fifo_errors = temp + priv->rx_value;
+
+ spin_unlock_bh(&priv->lock);
+ }
+
+ return &dev->stats;
+}
+
+static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+ void __iomem *port_base = priv->port_base;
+ unsigned len;
+ unsigned entry;
+ u32 tx_status;
+
+ if (unlikely(skb->len > TX_BUF_SIZE)) {
+ dev->stats.tx_dropped++;
+ goto out;
+ }
+
+ spin_lock(&priv->lock);
+
+ if (unlikely(!netif_carrier_ok(dev))) {
+ dev->stats.tx_dropped++;
+ goto out_unlock;
+ }
+
+ BUG_ON(priv->tx_head - priv->tx_tail >= NUM_TX_DESC);
+
+ entry = priv->tx_head++ % NUM_TX_DESC;
+
+ skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE);
+
+ len = skb->len;
+ if (len < ETH_ZLEN) {
+ memset(priv->tx_bufs + entry * TX_BUF_SIZE + len,
+ 0, ETH_ZLEN - len);
+ len = ETH_ZLEN;
+ }
+
+ wmb();
+
+ if (len < 100)
+ tx_status = len;
+ else if (len < 300)
+ tx_status = 0x30000 | len;
+ else
+ tx_status = 0x50000 | len;
+
+ iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE,
+ port_base + TxAddr0 + entry * 4);
+ iowrite32(tx_status, port_base + TxStatus0 + entry * 4);
+ mmiowb();
+
+ if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC)
+ netif_stop_queue(dev);
+
+out_unlock:
+ spin_unlock(&priv->lock);
+
+out:
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static int sc92031_open(struct net_device *dev)
+{
+ int err;
+ struct sc92031_priv *priv = netdev_priv(dev);
+ struct pci_dev *pdev = priv->pdev;
+
+ priv->rx_ring = pci_alloc_consistent(pdev, RX_BUF_LEN,
+ &priv->rx_ring_dma_addr);
+ if (unlikely(!priv->rx_ring)) {
+ err = -ENOMEM;
+ goto out_alloc_rx_ring;
+ }
+
+ priv->tx_bufs = pci_alloc_consistent(pdev, TX_BUF_TOT_LEN,
+ &priv->tx_bufs_dma_addr);
+ if (unlikely(!priv->tx_bufs)) {
+ err = -ENOMEM;
+ goto out_alloc_tx_bufs;
+ }
+ priv->tx_head = priv->tx_tail = 0;
+
+ err = request_irq(pdev->irq, sc92031_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(err < 0))
+ goto out_request_irq;
+
+ priv->pm_config = 0;
+
+ /* Interrupts already disabled by sc92031_stop or sc92031_probe */
+ spin_lock_bh(&priv->lock);
+
+ _sc92031_reset(dev);
+ mmiowb();
+
+ spin_unlock_bh(&priv->lock);
+ sc92031_enable_interrupts(dev);
+
+ if (netif_carrier_ok(dev))
+ netif_start_queue(dev);
+ else
+ netif_tx_disable(dev);
+
+ return 0;
+
+out_request_irq:
+ pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
+ priv->tx_bufs_dma_addr);
+out_alloc_tx_bufs:
+ pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
+ priv->rx_ring_dma_addr);
+out_alloc_rx_ring:
+ return err;
+}
+
+static int sc92031_stop(struct net_device *dev)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+ struct pci_dev *pdev = priv->pdev;
+
+ netif_tx_disable(dev);
+
+ /* Disable interrupts, stop Tx and Rx. */
+ sc92031_disable_interrupts(dev);
+
+ spin_lock_bh(&priv->lock);
+
+ _sc92031_disable_tx_rx(dev);
+ _sc92031_tx_clear(dev);
+ mmiowb();
+
+ spin_unlock_bh(&priv->lock);
+
+ free_irq(pdev->irq, dev);
+ pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
+ priv->tx_bufs_dma_addr);
+ pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
+ priv->rx_ring_dma_addr);
+
+ return 0;
+}
+
+static void sc92031_set_multicast_list(struct net_device *dev)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+
+ spin_lock_bh(&priv->lock);
+
+ _sc92031_set_mar(dev);
+ _sc92031_set_rx_config(dev);
+ mmiowb();
+
+ spin_unlock_bh(&priv->lock);
+}
+
+static void sc92031_tx_timeout(struct net_device *dev)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+
+ /* Disable interrupts by clearing the interrupt mask.*/
+ sc92031_disable_interrupts(dev);
+
+ spin_lock(&priv->lock);
+
+ priv->tx_timeouts++;
+
+ _sc92031_reset(dev);
+ mmiowb();
+
+ spin_unlock(&priv->lock);
+
+ /* enable interrupts */
+ sc92031_enable_interrupts(dev);
+
+ if (netif_carrier_ok(dev))
+ netif_wake_queue(dev);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sc92031_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ if (sc92031_interrupt(dev->irq, dev) != IRQ_NONE)
+ sc92031_tasklet((unsigned long)dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static int sc92031_ethtool_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+ void __iomem *port_base = priv->port_base;
+ u8 phy_address;
+ u32 phy_ctrl;
+ u16 output_status;
+
+ spin_lock_bh(&priv->lock);
+
+ phy_address = ioread32(port_base + Miicmd1) >> 27;
+ phy_ctrl = ioread32(port_base + PhyCtrl);
+
+ output_status = _sc92031_mii_read(port_base, MII_OutputStatus);
+ _sc92031_mii_scan(port_base);
+ mmiowb();
+
+ spin_unlock_bh(&priv->lock);
+
+ cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full
+ | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full
+ | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII;
+
+ cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
+
+ if ((phy_ctrl & (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
+ == (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
+ cmd->advertising |= ADVERTISED_Autoneg;
+
+ if ((phy_ctrl & PhyCtrlSpd10) == PhyCtrlSpd10)
+ cmd->advertising |= ADVERTISED_10baseT_Half;
+
+ if ((phy_ctrl & (PhyCtrlSpd10 | PhyCtrlDux))
+ == (PhyCtrlSpd10 | PhyCtrlDux))
+ cmd->advertising |= ADVERTISED_10baseT_Full;
+
+ if ((phy_ctrl & PhyCtrlSpd100) == PhyCtrlSpd100)
+ cmd->advertising |= ADVERTISED_100baseT_Half;
+
+ if ((phy_ctrl & (PhyCtrlSpd100 | PhyCtrlDux))
+ == (PhyCtrlSpd100 | PhyCtrlDux))
+ cmd->advertising |= ADVERTISED_100baseT_Full;
+
+ if (phy_ctrl & PhyCtrlAne)
+ cmd->advertising |= ADVERTISED_Autoneg;
+
+ ethtool_cmd_speed_set(cmd,
+ (output_status & 0x2) ? SPEED_100 : SPEED_10);
+ cmd->duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF;
+ cmd->port = PORT_MII;
+ cmd->phy_address = phy_address;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->autoneg = (phy_ctrl & PhyCtrlAne) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static int sc92031_ethtool_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+ void __iomem *port_base = priv->port_base;
+ u32 speed = ethtool_cmd_speed(cmd);
+ u32 phy_ctrl;
+ u32 old_phy_ctrl;
+
+ if (!(speed == SPEED_10 || speed == SPEED_100))
+ return -EINVAL;
+ if (!(cmd->duplex == DUPLEX_HALF || cmd->duplex == DUPLEX_FULL))
+ return -EINVAL;
+ if (!(cmd->port == PORT_MII))
+ return -EINVAL;
+ if (!(cmd->phy_address == 0x1f))
+ return -EINVAL;
+ if (!(cmd->transceiver == XCVR_INTERNAL))
+ return -EINVAL;
+ if (!(cmd->autoneg == AUTONEG_DISABLE || cmd->autoneg == AUTONEG_ENABLE))
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (!(cmd->advertising & (ADVERTISED_Autoneg
+ | ADVERTISED_100baseT_Full
+ | ADVERTISED_100baseT_Half
+ | ADVERTISED_10baseT_Full
+ | ADVERTISED_10baseT_Half)))
+ return -EINVAL;
+
+ phy_ctrl = PhyCtrlAne;
+
+ // FIXME: I'm not sure what the original code was trying to do
+ if (cmd->advertising & ADVERTISED_Autoneg)
+ phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
+ if (cmd->advertising & ADVERTISED_100baseT_Full)
+ phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
+ if (cmd->advertising & ADVERTISED_100baseT_Half)
+ phy_ctrl |= PhyCtrlSpd100;
+ if (cmd->advertising & ADVERTISED_10baseT_Full)
+ phy_ctrl |= PhyCtrlSpd10 | PhyCtrlDux;
+ if (cmd->advertising & ADVERTISED_10baseT_Half)
+ phy_ctrl |= PhyCtrlSpd10;
+ } else {
+ // FIXME: Whole branch guessed
+ phy_ctrl = 0;
+
+ if (speed == SPEED_10)
+ phy_ctrl |= PhyCtrlSpd10;
+ else /* cmd->speed == SPEED_100 */
+ phy_ctrl |= PhyCtrlSpd100;
+
+ if (cmd->duplex == DUPLEX_FULL)
+ phy_ctrl |= PhyCtrlDux;
+ }
+
+ spin_lock_bh(&priv->lock);
+
+ old_phy_ctrl = ioread32(port_base + PhyCtrl);
+ phy_ctrl |= old_phy_ctrl & ~(PhyCtrlAne | PhyCtrlDux
+ | PhyCtrlSpd100 | PhyCtrlSpd10);
+ if (phy_ctrl != old_phy_ctrl)
+ iowrite32(phy_ctrl, port_base + PhyCtrl);
+
+ spin_unlock_bh(&priv->lock);
+
+ return 0;
+}
+
+static void sc92031_ethtool_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+ void __iomem *port_base = priv->port_base;
+ u32 pm_config;
+
+ spin_lock_bh(&priv->lock);
+ pm_config = ioread32(port_base + PMConfig);
+ spin_unlock_bh(&priv->lock);
+
+ // FIXME: Guessed
+ wolinfo->supported = WAKE_PHY | WAKE_MAGIC
+ | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
+ wolinfo->wolopts = 0;
+
+ if (pm_config & PM_LinkUp)
+ wolinfo->wolopts |= WAKE_PHY;
+
+ if (pm_config & PM_Magic)
+ wolinfo->wolopts |= WAKE_MAGIC;
+
+ if (pm_config & PM_WakeUp)
+ // FIXME: Guessed
+ wolinfo->wolopts |= WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
+}
+
+static int sc92031_ethtool_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+ void __iomem *port_base = priv->port_base;
+ u32 pm_config;
+
+ spin_lock_bh(&priv->lock);
+
+ pm_config = ioread32(port_base + PMConfig)
+ & ~(PM_LinkUp | PM_Magic | PM_WakeUp);
+
+ if (wolinfo->wolopts & WAKE_PHY)
+ pm_config |= PM_LinkUp;
+
+ if (wolinfo->wolopts & WAKE_MAGIC)
+ pm_config |= PM_Magic;
+
+ // FIXME: Guessed
+ if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST))
+ pm_config |= PM_WakeUp;
+
+ priv->pm_config = pm_config;
+ iowrite32(pm_config, port_base + PMConfig);
+ mmiowb();
+
+ spin_unlock_bh(&priv->lock);
+
+ return 0;
+}
+
+static int sc92031_ethtool_nway_reset(struct net_device *dev)
+{
+ int err = 0;
+ struct sc92031_priv *priv = netdev_priv(dev);
+ void __iomem *port_base = priv->port_base;
+ u16 bmcr;
+
+ spin_lock_bh(&priv->lock);
+
+ bmcr = _sc92031_mii_read(port_base, MII_BMCR);
+ if (!(bmcr & BMCR_ANENABLE)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ _sc92031_mii_write(port_base, MII_BMCR, bmcr | BMCR_ANRESTART);
+
+out:
+ _sc92031_mii_scan(port_base);
+ mmiowb();
+
+ spin_unlock_bh(&priv->lock);
+
+ return err;
+}
+
+static const char sc92031_ethtool_stats_strings[SILAN_STATS_NUM][ETH_GSTRING_LEN] = {
+ "tx_timeout",
+ "rx_loss",
+};
+
+static void sc92031_ethtool_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ if (stringset == ETH_SS_STATS)
+ memcpy(data, sc92031_ethtool_stats_strings,
+ SILAN_STATS_NUM * ETH_GSTRING_LEN);
+}
+
+static int sc92031_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return SILAN_STATS_NUM;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct sc92031_priv *priv = netdev_priv(dev);
+
+ spin_lock_bh(&priv->lock);
+ data[0] = priv->tx_timeouts;
+ data[1] = priv->rx_loss;
+ spin_unlock_bh(&priv->lock);
+}
+
+static const struct ethtool_ops sc92031_ethtool_ops = {
+ .get_settings = sc92031_ethtool_get_settings,
+ .set_settings = sc92031_ethtool_set_settings,
+ .get_wol = sc92031_ethtool_get_wol,
+ .set_wol = sc92031_ethtool_set_wol,
+ .nway_reset = sc92031_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_strings = sc92031_ethtool_get_strings,
+ .get_sset_count = sc92031_ethtool_get_sset_count,
+ .get_ethtool_stats = sc92031_ethtool_get_ethtool_stats,
+};
+
+
+static const struct net_device_ops sc92031_netdev_ops = {
+ .ndo_get_stats = sc92031_get_stats,
+ .ndo_start_xmit = sc92031_start_xmit,
+ .ndo_open = sc92031_open,
+ .ndo_stop = sc92031_stop,
+ .ndo_set_rx_mode = sc92031_set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_tx_timeout = sc92031_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = sc92031_poll_controller,
+#endif
+};
+
+static int __devinit sc92031_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int err;
+ void __iomem* port_base;
+ struct net_device *dev;
+ struct sc92031_priv *priv;
+ u32 mac0, mac1;
+ unsigned long base_addr;
+
+ err = pci_enable_device(pdev);
+ if (unlikely(err < 0))
+ goto out_enable_device;
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (unlikely(err < 0))
+ goto out_set_dma_mask;
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (unlikely(err < 0))
+ goto out_set_dma_mask;
+
+ err = pci_request_regions(pdev, SC92031_NAME);
+ if (unlikely(err < 0))
+ goto out_request_regions;
+
+ port_base = pci_iomap(pdev, SC92031_USE_BAR, 0);
+ if (unlikely(!port_base)) {
+ err = -EIO;
+ goto out_iomap;
+ }
+
+ dev = alloc_etherdev(sizeof(struct sc92031_priv));
+ if (unlikely(!dev)) {
+ err = -ENOMEM;
+ goto out_alloc_etherdev;
+ }
+
+ pci_set_drvdata(pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+#if SC92031_USE_BAR == 0
+ dev->mem_start = pci_resource_start(pdev, SC92031_USE_BAR);
+ dev->mem_end = pci_resource_end(pdev, SC92031_USE_BAR);
+#elif SC92031_USE_BAR == 1
+ dev->base_addr = pci_resource_start(pdev, SC92031_USE_BAR);
+#endif
+ dev->irq = pdev->irq;
+
+ /* faked with skb_copy_and_csum_dev */
+ dev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+ dev->netdev_ops = &sc92031_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->ethtool_ops = &sc92031_ethtool_ops;
+
+ priv = netdev_priv(dev);
+ spin_lock_init(&priv->lock);
+ priv->port_base = port_base;
+ priv->pdev = pdev;
+ tasklet_init(&priv->tasklet, sc92031_tasklet, (unsigned long)dev);
+ /* Fudge tasklet count so the call to sc92031_enable_interrupts at
+ * sc92031_open will work correctly */
+ tasklet_disable_nosync(&priv->tasklet);
+
+ /* PCI PM Wakeup */
+ iowrite32((~PM_LongWF & ~PM_LWPTN) | PM_Enable, port_base + PMConfig);
+
+ mac0 = ioread32(port_base + MAC0);
+ mac1 = ioread32(port_base + MAC0 + 4);
+ dev->dev_addr[0] = dev->perm_addr[0] = mac0 >> 24;
+ dev->dev_addr[1] = dev->perm_addr[1] = mac0 >> 16;
+ dev->dev_addr[2] = dev->perm_addr[2] = mac0 >> 8;
+ dev->dev_addr[3] = dev->perm_addr[3] = mac0;
+ dev->dev_addr[4] = dev->perm_addr[4] = mac1 >> 8;
+ dev->dev_addr[5] = dev->perm_addr[5] = mac1;
+
+ err = register_netdev(dev);
+ if (err < 0)
+ goto out_register_netdev;
+
+#if SC92031_USE_BAR == 0
+ base_addr = dev->mem_start;
+#elif SC92031_USE_BAR == 1
+ base_addr = dev->base_addr;
+#endif
+ printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name,
+ base_addr, dev->dev_addr, dev->irq);
+
+ return 0;
+
+out_register_netdev:
+ free_netdev(dev);
+out_alloc_etherdev:
+ pci_iounmap(pdev, port_base);
+out_iomap:
+ pci_release_regions(pdev);
+out_request_regions:
+out_set_dma_mask:
+ pci_disable_device(pdev);
+out_enable_device:
+ return err;
+}
+
+static void __devexit sc92031_remove(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct sc92031_priv *priv = netdev_priv(dev);
+ void __iomem* port_base = priv->port_base;
+
+ unregister_netdev(dev);
+ free_netdev(dev);
+ pci_iounmap(pdev, port_base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct sc92031_priv *priv = netdev_priv(dev);
+
+ pci_save_state(pdev);
+
+ if (!netif_running(dev))
+ goto out;
+
+ netif_device_detach(dev);
+
+ /* Disable interrupts, stop Tx and Rx. */
+ sc92031_disable_interrupts(dev);
+
+ spin_lock_bh(&priv->lock);
+
+ _sc92031_disable_tx_rx(dev);
+ _sc92031_tx_clear(dev);
+ mmiowb();
+
+ spin_unlock_bh(&priv->lock);
+
+out:
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int sc92031_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct sc92031_priv *priv = netdev_priv(dev);
+
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+
+ if (!netif_running(dev))
+ goto out;
+
+ /* Interrupts already disabled by sc92031_suspend */
+ spin_lock_bh(&priv->lock);
+
+ _sc92031_reset(dev);
+ mmiowb();
+
+ spin_unlock_bh(&priv->lock);
+ sc92031_enable_interrupts(dev);
+
+ netif_device_attach(dev);
+
+ if (netif_carrier_ok(dev))
+ netif_wake_queue(dev);
+ else
+ netif_tx_disable(dev);
+
+out:
+ return 0;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(sc92031_pci_device_id_table) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) },
+ { PCI_DEVICE(0x1088, 0x2031) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table);
+
+static struct pci_driver sc92031_pci_driver = {
+ .name = SC92031_NAME,
+ .id_table = sc92031_pci_device_id_table,
+ .probe = sc92031_probe,
+ .remove = __devexit_p(sc92031_remove),
+ .suspend = sc92031_suspend,
+ .resume = sc92031_resume,
+};
+
+static int __init sc92031_init(void)
+{
+ return pci_register_driver(&sc92031_pci_driver);
+}
+
+static void __exit sc92031_exit(void)
+{
+ pci_unregister_driver(&sc92031_pci_driver);
+}
+
+module_init(sc92031_init);
+module_exit(sc92031_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>");
+MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver");
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
new file mode 100644
index 000000000000..9755b49bbefb
--- /dev/null
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -0,0 +1,19 @@
+#
+# Renesas device configuration
+#
+
+config SH_ETH
+ tristate "Renesas SuperH Ethernet support"
+ depends on SUPERH && \
+ (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
+ CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
+ CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7757)
+ select CRC32
+ select NET_CORE
+ select MII
+ select MDIO_BITBANG
+ select PHYLIB
+ ---help---
+ Renesas SuperH Ethernet device driver.
+ This driver supporting CPUs are:
+ - SH7710, SH7712, SH7763, SH7619, SH7724, and SH7757.
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
new file mode 100644
index 000000000000..1c278a8e066a
--- /dev/null
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Renesas device drivers.
+#
+
+obj-$(CONFIG_SH_ETH) += sh_eth.o
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
new file mode 100644
index 000000000000..9b230740c6ab
--- /dev/null
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -0,0 +1,1975 @@
+/*
+ * SuperH Ethernet device driver
+ *
+ * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
+ * Copyright (C) 2008-2009 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/cache.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/ethtool.h>
+#include <linux/sh_eth.h>
+
+#include "sh_eth.h"
+
+#define SH_ETH_DEF_MSG_ENABLE \
+ (NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_RX_ERR| \
+ NETIF_MSG_TX_ERR)
+
+/* There is CPU dependent code */
+#if defined(CONFIG_CPU_SUBTYPE_SH7724)
+#define SH_ETH_RESET_DEFAULT 1
+static void sh_eth_set_duplex(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ if (mdp->duplex) /* Full */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
+ else /* Half */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
+}
+
+static void sh_eth_set_rate(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ switch (mdp->speed) {
+ case 10: /* 10BASE */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
+ break;
+ case 100:/* 100BASE */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
+ break;
+ default:
+ break;
+ }
+}
+
+/* SH7724 */
+static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .set_duplex = sh_eth_set_duplex,
+ .set_rate = sh_eth_set_rate,
+
+ .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
+ .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
+
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+ .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
+ EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
+ .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .hw_swap = 1,
+ .rpadir = 1,
+ .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
+};
+#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
+#define SH_ETH_HAS_BOTH_MODULES 1
+#define SH_ETH_HAS_TSU 1
+static void sh_eth_set_duplex(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ if (mdp->duplex) /* Full */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
+ else /* Half */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
+}
+
+static void sh_eth_set_rate(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ switch (mdp->speed) {
+ case 10: /* 10BASE */
+ sh_eth_write(ndev, 0, RTRATE);
+ break;
+ case 100:/* 100BASE */
+ sh_eth_write(ndev, 1, RTRATE);
+ break;
+ default:
+ break;
+ }
+}
+
+/* SH7757 */
+static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .set_duplex = sh_eth_set_duplex,
+ .set_rate = sh_eth_set_rate,
+
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+ .rmcr_value = 0x00000001,
+
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+ .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
+ EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
+ .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .hw_swap = 1,
+ .no_ade = 1,
+ .rpadir = 1,
+ .rpadir_value = 2 << 16,
+};
+
+#define SH_GIGA_ETH_BASE 0xfee00000
+#define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
+#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
+static void sh_eth_chip_reset_giga(struct net_device *ndev)
+{
+ int i;
+ unsigned long mahr[2], malr[2];
+
+ /* save MAHR and MALR */
+ for (i = 0; i < 2; i++) {
+ malr[i] = ioread32((void *)GIGA_MALR(i));
+ mahr[i] = ioread32((void *)GIGA_MAHR(i));
+ }
+
+ /* reset device */
+ iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
+ mdelay(1);
+
+ /* restore MAHR and MALR */
+ for (i = 0; i < 2; i++) {
+ iowrite32(malr[i], (void *)GIGA_MALR(i));
+ iowrite32(mahr[i], (void *)GIGA_MAHR(i));
+ }
+}
+
+static int sh_eth_is_gether(struct sh_eth_private *mdp);
+static void sh_eth_reset(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int cnt = 100;
+
+ if (sh_eth_is_gether(mdp)) {
+ sh_eth_write(ndev, 0x03, EDSR);
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
+ EDMR);
+ while (cnt > 0) {
+ if (!(sh_eth_read(ndev, EDMR) & 0x3))
+ break;
+ mdelay(1);
+ cnt--;
+ }
+ if (cnt < 0)
+ printk(KERN_ERR "Device reset fail\n");
+
+ /* Table Init */
+ sh_eth_write(ndev, 0x0, TDLAR);
+ sh_eth_write(ndev, 0x0, TDFAR);
+ sh_eth_write(ndev, 0x0, TDFXR);
+ sh_eth_write(ndev, 0x0, TDFFR);
+ sh_eth_write(ndev, 0x0, RDLAR);
+ sh_eth_write(ndev, 0x0, RDFAR);
+ sh_eth_write(ndev, 0x0, RDFXR);
+ sh_eth_write(ndev, 0x0, RDFFR);
+ } else {
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
+ EDMR);
+ mdelay(3);
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
+ EDMR);
+ }
+}
+
+static void sh_eth_set_duplex_giga(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ if (mdp->duplex) /* Full */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
+ else /* Half */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
+}
+
+static void sh_eth_set_rate_giga(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ switch (mdp->speed) {
+ case 10: /* 10BASE */
+ sh_eth_write(ndev, 0x00000000, GECMR);
+ break;
+ case 100:/* 100BASE */
+ sh_eth_write(ndev, 0x00000010, GECMR);
+ break;
+ case 1000: /* 1000BASE */
+ sh_eth_write(ndev, 0x00000020, GECMR);
+ break;
+ default:
+ break;
+ }
+}
+
+/* SH7757(GETHERC) */
+static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
+ .chip_reset = sh_eth_chip_reset_giga,
+ .set_duplex = sh_eth_set_duplex_giga,
+ .set_rate = sh_eth_set_rate_giga,
+
+ .ecsr_value = ECSR_ICD | ECSR_MPD,
+ .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+
+ .tx_check = EESR_TC1 | EESR_FTC,
+ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+ EESR_ECI,
+ .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
+ EESR_TFE,
+ .fdr_value = 0x0000072f,
+ .rmcr_value = 0x00000001,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .bculr = 1,
+ .hw_swap = 1,
+ .rpadir = 1,
+ .rpadir_value = 2 << 16,
+ .no_trimd = 1,
+ .no_ade = 1,
+};
+
+static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
+{
+ if (sh_eth_is_gether(mdp))
+ return &sh_eth_my_cpu_data_giga;
+ else
+ return &sh_eth_my_cpu_data;
+}
+
+#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
+#define SH_ETH_HAS_TSU 1
+static void sh_eth_chip_reset(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ /* reset device */
+ sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
+ mdelay(1);
+}
+
+static void sh_eth_reset(struct net_device *ndev)
+{
+ int cnt = 100;
+
+ sh_eth_write(ndev, EDSR_ENALL, EDSR);
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
+ while (cnt > 0) {
+ if (!(sh_eth_read(ndev, EDMR) & 0x3))
+ break;
+ mdelay(1);
+ cnt--;
+ }
+ if (cnt == 0)
+ printk(KERN_ERR "Device reset fail\n");
+
+ /* Table Init */
+ sh_eth_write(ndev, 0x0, TDLAR);
+ sh_eth_write(ndev, 0x0, TDFAR);
+ sh_eth_write(ndev, 0x0, TDFXR);
+ sh_eth_write(ndev, 0x0, TDFFR);
+ sh_eth_write(ndev, 0x0, RDLAR);
+ sh_eth_write(ndev, 0x0, RDFAR);
+ sh_eth_write(ndev, 0x0, RDFXR);
+ sh_eth_write(ndev, 0x0, RDFFR);
+}
+
+static void sh_eth_set_duplex(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ if (mdp->duplex) /* Full */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
+ else /* Half */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
+}
+
+static void sh_eth_set_rate(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ switch (mdp->speed) {
+ case 10: /* 10BASE */
+ sh_eth_write(ndev, GECMR_10, GECMR);
+ break;
+ case 100:/* 100BASE */
+ sh_eth_write(ndev, GECMR_100, GECMR);
+ break;
+ case 1000: /* 1000BASE */
+ sh_eth_write(ndev, GECMR_1000, GECMR);
+ break;
+ default:
+ break;
+ }
+}
+
+/* sh7763 */
+static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .chip_reset = sh_eth_chip_reset,
+ .set_duplex = sh_eth_set_duplex,
+ .set_rate = sh_eth_set_rate,
+
+ .ecsr_value = ECSR_ICD | ECSR_MPD,
+ .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+
+ .tx_check = EESR_TC1 | EESR_FTC,
+ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+ EESR_ECI,
+ .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
+ EESR_TFE,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .bculr = 1,
+ .hw_swap = 1,
+ .no_trimd = 1,
+ .no_ade = 1,
+ .tsu = 1,
+};
+
+#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
+#define SH_ETH_RESET_DEFAULT 1
+static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .hw_swap = 1,
+};
+#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
+#define SH_ETH_RESET_DEFAULT 1
+#define SH_ETH_HAS_TSU 1
+static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+ .tsu = 1,
+};
+#endif
+
+static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
+{
+ if (!cd->ecsr_value)
+ cd->ecsr_value = DEFAULT_ECSR_INIT;
+
+ if (!cd->ecsipr_value)
+ cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
+
+ if (!cd->fcftr_value)
+ cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
+ DEFAULT_FIFO_F_D_RFD;
+
+ if (!cd->fdr_value)
+ cd->fdr_value = DEFAULT_FDR_INIT;
+
+ if (!cd->rmcr_value)
+ cd->rmcr_value = DEFAULT_RMCR_VALUE;
+
+ if (!cd->tx_check)
+ cd->tx_check = DEFAULT_TX_CHECK;
+
+ if (!cd->eesr_err_check)
+ cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
+
+ if (!cd->tx_error_check)
+ cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
+}
+
+#if defined(SH_ETH_RESET_DEFAULT)
+/* Chip Reset */
+static void sh_eth_reset(struct net_device *ndev)
+{
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
+ mdelay(3);
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
+}
+#endif
+
+#if defined(CONFIG_CPU_SH4)
+static void sh_eth_set_receive_align(struct sk_buff *skb)
+{
+ int reserve;
+
+ reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
+ if (reserve)
+ skb_reserve(skb, reserve);
+}
+#else
+static void sh_eth_set_receive_align(struct sk_buff *skb)
+{
+ skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
+}
+#endif
+
+
+/* CPU <-> EDMAC endian convert */
+static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
+{
+ switch (mdp->edmac_endian) {
+ case EDMAC_LITTLE_ENDIAN:
+ return cpu_to_le32(x);
+ case EDMAC_BIG_ENDIAN:
+ return cpu_to_be32(x);
+ }
+ return x;
+}
+
+static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
+{
+ switch (mdp->edmac_endian) {
+ case EDMAC_LITTLE_ENDIAN:
+ return le32_to_cpu(x);
+ case EDMAC_BIG_ENDIAN:
+ return be32_to_cpu(x);
+ }
+ return x;
+}
+
+/*
+ * Program the hardware MAC address from dev->dev_addr.
+ */
+static void update_mac_address(struct net_device *ndev)
+{
+ sh_eth_write(ndev,
+ (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+ (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
+ sh_eth_write(ndev,
+ (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
+}
+
+/*
+ * Get MAC address from SuperH MAC address register
+ *
+ * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
+ * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
+ * When you want use this device, you must set MAC address in bootloader.
+ *
+ */
+static void read_mac_address(struct net_device *ndev, unsigned char *mac)
+{
+ if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
+ memcpy(ndev->dev_addr, mac, 6);
+ } else {
+ ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
+ ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
+ ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
+ ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
+ ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
+ ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
+ }
+}
+
+static int sh_eth_is_gether(struct sh_eth_private *mdp)
+{
+ if (mdp->reg_offset == sh_eth_offset_gigabit)
+ return 1;
+ else
+ return 0;
+}
+
+static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
+{
+ if (sh_eth_is_gether(mdp))
+ return EDTRR_TRNS_GETHER;
+ else
+ return EDTRR_TRNS_ETHER;
+}
+
+struct bb_info {
+ void (*set_gate)(void *addr);
+ struct mdiobb_ctrl ctrl;
+ void *addr;
+ u32 mmd_msk;/* MMD */
+ u32 mdo_msk;
+ u32 mdi_msk;
+ u32 mdc_msk;
+};
+
+/* PHY bit set */
+static void bb_set(void *addr, u32 msk)
+{
+ iowrite32(ioread32(addr) | msk, addr);
+}
+
+/* PHY bit clear */
+static void bb_clr(void *addr, u32 msk)
+{
+ iowrite32((ioread32(addr) & ~msk), addr);
+}
+
+/* PHY bit read */
+static int bb_read(void *addr, u32 msk)
+{
+ return (ioread32(addr) & msk) != 0;
+}
+
+/* Data I/O pin control */
+static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
+{
+ struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+
+ if (bitbang->set_gate)
+ bitbang->set_gate(bitbang->addr);
+
+ if (bit)
+ bb_set(bitbang->addr, bitbang->mmd_msk);
+ else
+ bb_clr(bitbang->addr, bitbang->mmd_msk);
+}
+
+/* Set bit data*/
+static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
+{
+ struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+
+ if (bitbang->set_gate)
+ bitbang->set_gate(bitbang->addr);
+
+ if (bit)
+ bb_set(bitbang->addr, bitbang->mdo_msk);
+ else
+ bb_clr(bitbang->addr, bitbang->mdo_msk);
+}
+
+/* Get bit data*/
+static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
+{
+ struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+
+ if (bitbang->set_gate)
+ bitbang->set_gate(bitbang->addr);
+
+ return bb_read(bitbang->addr, bitbang->mdi_msk);
+}
+
+/* MDC pin control */
+static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
+{
+ struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+
+ if (bitbang->set_gate)
+ bitbang->set_gate(bitbang->addr);
+
+ if (bit)
+ bb_set(bitbang->addr, bitbang->mdc_msk);
+ else
+ bb_clr(bitbang->addr, bitbang->mdc_msk);
+}
+
+/* mdio bus control struct */
+static struct mdiobb_ops bb_ops = {
+ .owner = THIS_MODULE,
+ .set_mdc = sh_mdc_ctrl,
+ .set_mdio_dir = sh_mmd_ctrl,
+ .set_mdio_data = sh_set_mdio,
+ .get_mdio_data = sh_get_mdio,
+};
+
+/* free skb and descriptor buffer */
+static void sh_eth_ring_free(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int i;
+
+ /* Free Rx skb ringbuffer */
+ if (mdp->rx_skbuff) {
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (mdp->rx_skbuff[i])
+ dev_kfree_skb(mdp->rx_skbuff[i]);
+ }
+ }
+ kfree(mdp->rx_skbuff);
+
+ /* Free Tx skb ringbuffer */
+ if (mdp->tx_skbuff) {
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (mdp->tx_skbuff[i])
+ dev_kfree_skb(mdp->tx_skbuff[i]);
+ }
+ }
+ kfree(mdp->tx_skbuff);
+}
+
+/* format skb and descriptor buffer */
+static void sh_eth_ring_format(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int i;
+ struct sk_buff *skb;
+ struct sh_eth_rxdesc *rxdesc = NULL;
+ struct sh_eth_txdesc *txdesc = NULL;
+ int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
+ int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
+
+ mdp->cur_rx = mdp->cur_tx = 0;
+ mdp->dirty_rx = mdp->dirty_tx = 0;
+
+ memset(mdp->rx_ring, 0, rx_ringsize);
+
+ /* build Rx ring buffer */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ /* skb */
+ mdp->rx_skbuff[i] = NULL;
+ skb = dev_alloc_skb(mdp->rx_buf_sz);
+ mdp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ skb->dev = ndev; /* Mark as being used by this device. */
+ sh_eth_set_receive_align(skb);
+
+ /* RX descriptor */
+ rxdesc = &mdp->rx_ring[i];
+ rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
+ rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
+
+ /* The size of the buffer is 16 byte boundary. */
+ rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
+ /* Rx descriptor address set */
+ if (i == 0) {
+ sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
+ if (sh_eth_is_gether(mdp))
+ sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
+ }
+ }
+
+ mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
+
+ /* Mark the last entry as wrapping the ring. */
+ rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
+
+ memset(mdp->tx_ring, 0, tx_ringsize);
+
+ /* build Tx ring buffer */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ mdp->tx_skbuff[i] = NULL;
+ txdesc = &mdp->tx_ring[i];
+ txdesc->status = cpu_to_edmac(mdp, TD_TFP);
+ txdesc->buffer_length = 0;
+ if (i == 0) {
+ /* Tx descriptor address set */
+ sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
+ if (sh_eth_is_gether(mdp))
+ sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
+ }
+ }
+
+ txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
+}
+
+/* Get skb and descriptor buffer */
+static int sh_eth_ring_init(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int rx_ringsize, tx_ringsize, ret = 0;
+
+ /*
+ * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
+ * card needs room to do 8 byte alignment, +2 so we can reserve
+ * the first 2 bytes, and +16 gets room for the status word from the
+ * card.
+ */
+ mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
+ (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
+ if (mdp->cd->rpadir)
+ mdp->rx_buf_sz += NET_IP_ALIGN;
+
+ /* Allocate RX and TX skb rings */
+ mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
+ GFP_KERNEL);
+ if (!mdp->rx_skbuff) {
+ dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
+ GFP_KERNEL);
+ if (!mdp->tx_skbuff) {
+ dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
+ ret = -ENOMEM;
+ goto skb_ring_free;
+ }
+
+ /* Allocate all Rx descriptors. */
+ rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
+ mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
+ GFP_KERNEL);
+
+ if (!mdp->rx_ring) {
+ dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
+ rx_ringsize);
+ ret = -ENOMEM;
+ goto desc_ring_free;
+ }
+
+ mdp->dirty_rx = 0;
+
+ /* Allocate all Tx descriptors. */
+ tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
+ mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
+ GFP_KERNEL);
+ if (!mdp->tx_ring) {
+ dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
+ tx_ringsize);
+ ret = -ENOMEM;
+ goto desc_ring_free;
+ }
+ return ret;
+
+desc_ring_free:
+ /* free DMA buffer */
+ dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
+
+skb_ring_free:
+ /* Free Rx and Tx skb ring buffer */
+ sh_eth_ring_free(ndev);
+
+ return ret;
+}
+
+static int sh_eth_dev_init(struct net_device *ndev)
+{
+ int ret = 0;
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ u_int32_t rx_int_var, tx_int_var;
+ u32 val;
+
+ /* Soft Reset */
+ sh_eth_reset(ndev);
+
+ /* Descriptor format */
+ sh_eth_ring_format(ndev);
+ if (mdp->cd->rpadir)
+ sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
+
+ /* all sh_eth int mask */
+ sh_eth_write(ndev, 0, EESIPR);
+
+#if defined(__LITTLE_ENDIAN__)
+ if (mdp->cd->hw_swap)
+ sh_eth_write(ndev, EDMR_EL, EDMR);
+ else
+#endif
+ sh_eth_write(ndev, 0, EDMR);
+
+ /* FIFO size set */
+ sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
+ sh_eth_write(ndev, 0, TFTR);
+
+ /* Frame recv control */
+ sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
+
+ rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
+ tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
+ sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);
+
+ if (mdp->cd->bculr)
+ sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
+
+ sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
+
+ if (!mdp->cd->no_trimd)
+ sh_eth_write(ndev, 0, TRIMD);
+
+ /* Recv frame limit set register */
+ sh_eth_write(ndev, RFLR_VALUE, RFLR);
+
+ sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
+ sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+
+ /* PAUSE Prohibition */
+ val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
+ ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
+
+ sh_eth_write(ndev, val, ECMR);
+
+ if (mdp->cd->set_rate)
+ mdp->cd->set_rate(ndev);
+
+ /* E-MAC Status Register clear */
+ sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
+
+ /* E-MAC Interrupt Enable register */
+ sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
+
+ /* Set MAC address */
+ update_mac_address(ndev);
+
+ /* mask reset */
+ if (mdp->cd->apr)
+ sh_eth_write(ndev, APR_AP, APR);
+ if (mdp->cd->mpr)
+ sh_eth_write(ndev, MPR_MP, MPR);
+ if (mdp->cd->tpauser)
+ sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
+
+ /* Setting the Rx mode will start the Rx process. */
+ sh_eth_write(ndev, EDRRR_R, EDRRR);
+
+ netif_start_queue(ndev);
+
+ return ret;
+}
+
+/* free Tx skb function */
+static int sh_eth_txfree(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ struct sh_eth_txdesc *txdesc;
+ int freeNum = 0;
+ int entry = 0;
+
+ for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
+ entry = mdp->dirty_tx % TX_RING_SIZE;
+ txdesc = &mdp->tx_ring[entry];
+ if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
+ break;
+ /* Free the original skb. */
+ if (mdp->tx_skbuff[entry]) {
+ dma_unmap_single(&ndev->dev, txdesc->addr,
+ txdesc->buffer_length, DMA_TO_DEVICE);
+ dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
+ mdp->tx_skbuff[entry] = NULL;
+ freeNum++;
+ }
+ txdesc->status = cpu_to_edmac(mdp, TD_TFP);
+ if (entry >= TX_RING_SIZE - 1)
+ txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
+
+ mdp->stats.tx_packets++;
+ mdp->stats.tx_bytes += txdesc->buffer_length;
+ }
+ return freeNum;
+}
+
+/* Packet receive function */
+static int sh_eth_rx(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ struct sh_eth_rxdesc *rxdesc;
+
+ int entry = mdp->cur_rx % RX_RING_SIZE;
+ int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
+ struct sk_buff *skb;
+ u16 pkt_len = 0;
+ u32 desc_status;
+
+ rxdesc = &mdp->rx_ring[entry];
+ while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
+ desc_status = edmac_to_cpu(mdp, rxdesc->status);
+ pkt_len = rxdesc->frame_length;
+
+ if (--boguscnt < 0)
+ break;
+
+ if (!(desc_status & RDFEND))
+ mdp->stats.rx_length_errors++;
+
+ if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
+ RD_RFS5 | RD_RFS6 | RD_RFS10)) {
+ mdp->stats.rx_errors++;
+ if (desc_status & RD_RFS1)
+ mdp->stats.rx_crc_errors++;
+ if (desc_status & RD_RFS2)
+ mdp->stats.rx_frame_errors++;
+ if (desc_status & RD_RFS3)
+ mdp->stats.rx_length_errors++;
+ if (desc_status & RD_RFS4)
+ mdp->stats.rx_length_errors++;
+ if (desc_status & RD_RFS6)
+ mdp->stats.rx_missed_errors++;
+ if (desc_status & RD_RFS10)
+ mdp->stats.rx_over_errors++;
+ } else {
+ if (!mdp->cd->hw_swap)
+ sh_eth_soft_swap(
+ phys_to_virt(ALIGN(rxdesc->addr, 4)),
+ pkt_len + 2);
+ skb = mdp->rx_skbuff[entry];
+ mdp->rx_skbuff[entry] = NULL;
+ if (mdp->cd->rpadir)
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_rx(skb);
+ mdp->stats.rx_packets++;
+ mdp->stats.rx_bytes += pkt_len;
+ }
+ rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
+ entry = (++mdp->cur_rx) % RX_RING_SIZE;
+ rxdesc = &mdp->rx_ring[entry];
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
+ entry = mdp->dirty_rx % RX_RING_SIZE;
+ rxdesc = &mdp->rx_ring[entry];
+ /* The size of the buffer is 16 byte boundary. */
+ rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
+
+ if (mdp->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(mdp->rx_buf_sz);
+ mdp->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ skb->dev = ndev;
+ sh_eth_set_receive_align(skb);
+
+ skb_checksum_none_assert(skb);
+ rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
+ }
+ if (entry >= RX_RING_SIZE - 1)
+ rxdesc->status |=
+ cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
+ else
+ rxdesc->status |=
+ cpu_to_edmac(mdp, RD_RACT | RD_RFP);
+ }
+
+ /* Restart Rx engine if stopped. */
+ /* If we don't need to check status, don't. -KDU */
+ if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
+ sh_eth_write(ndev, EDRRR_R, EDRRR);
+
+ return 0;
+}
+
+static void sh_eth_rcv_snd_disable(struct net_device *ndev)
+{
+ /* disable tx and rx */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
+ ~(ECMR_RE | ECMR_TE), ECMR);
+}
+
+static void sh_eth_rcv_snd_enable(struct net_device *ndev)
+{
+ /* enable tx and rx */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
+ (ECMR_RE | ECMR_TE), ECMR);
+}
+
+/* error control function */
+static void sh_eth_error(struct net_device *ndev, int intr_status)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ u32 felic_stat;
+ u32 link_stat;
+ u32 mask;
+
+ if (intr_status & EESR_ECI) {
+ felic_stat = sh_eth_read(ndev, ECSR);
+ sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
+ if (felic_stat & ECSR_ICD)
+ mdp->stats.tx_carrier_errors++;
+ if (felic_stat & ECSR_LCHNG) {
+ /* Link Changed */
+ if (mdp->cd->no_psr || mdp->no_ether_link) {
+ if (mdp->link == PHY_DOWN)
+ link_stat = 0;
+ else
+ link_stat = PHY_ST_LINK;
+ } else {
+ link_stat = (sh_eth_read(ndev, PSR));
+ if (mdp->ether_link_active_low)
+ link_stat = ~link_stat;
+ }
+ if (!(link_stat & PHY_ST_LINK))
+ sh_eth_rcv_snd_disable(ndev);
+ else {
+ /* Link Up */
+ sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
+ ~DMAC_M_ECI, EESIPR);
+ /*clear int */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
+ ECSR);
+ sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
+ DMAC_M_ECI, EESIPR);
+ /* enable tx and rx */
+ sh_eth_rcv_snd_enable(ndev);
+ }
+ }
+ }
+
+ if (intr_status & EESR_TWB) {
+ /* Write buck end. unused write back interrupt */
+ if (intr_status & EESR_TABT) /* Transmit Abort int */
+ mdp->stats.tx_aborted_errors++;
+ if (netif_msg_tx_err(mdp))
+ dev_err(&ndev->dev, "Transmit Abort\n");
+ }
+
+ if (intr_status & EESR_RABT) {
+ /* Receive Abort int */
+ if (intr_status & EESR_RFRMER) {
+ /* Receive Frame Overflow int */
+ mdp->stats.rx_frame_errors++;
+ if (netif_msg_rx_err(mdp))
+ dev_err(&ndev->dev, "Receive Abort\n");
+ }
+ }
+
+ if (intr_status & EESR_TDE) {
+ /* Transmit Descriptor Empty int */
+ mdp->stats.tx_fifo_errors++;
+ if (netif_msg_tx_err(mdp))
+ dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
+ }
+
+ if (intr_status & EESR_TFE) {
+ /* FIFO under flow */
+ mdp->stats.tx_fifo_errors++;
+ if (netif_msg_tx_err(mdp))
+ dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
+ }
+
+ if (intr_status & EESR_RDE) {
+ /* Receive Descriptor Empty int */
+ mdp->stats.rx_over_errors++;
+
+ if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
+ sh_eth_write(ndev, EDRRR_R, EDRRR);
+ if (netif_msg_rx_err(mdp))
+ dev_err(&ndev->dev, "Receive Descriptor Empty\n");
+ }
+
+ if (intr_status & EESR_RFE) {
+ /* Receive FIFO Overflow int */
+ mdp->stats.rx_fifo_errors++;
+ if (netif_msg_rx_err(mdp))
+ dev_err(&ndev->dev, "Receive FIFO Overflow\n");
+ }
+
+ if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
+ /* Address Error */
+ mdp->stats.tx_fifo_errors++;
+ if (netif_msg_tx_err(mdp))
+ dev_err(&ndev->dev, "Address Error\n");
+ }
+
+ mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
+ if (mdp->cd->no_ade)
+ mask &= ~EESR_ADE;
+ if (intr_status & mask) {
+ /* Tx error */
+ u32 edtrr = sh_eth_read(ndev, EDTRR);
+ /* dmesg */
+ dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
+ intr_status, mdp->cur_tx);
+ dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
+ mdp->dirty_tx, (u32) ndev->state, edtrr);
+ /* dirty buffer free */
+ sh_eth_txfree(ndev);
+
+ /* SH7712 BUG */
+ if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
+ /* tx dma start */
+ sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
+ }
+ /* wakeup */
+ netif_wake_queue(ndev);
+ }
+}
+
+static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
+{
+ struct net_device *ndev = netdev;
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ struct sh_eth_cpu_data *cd = mdp->cd;
+ irqreturn_t ret = IRQ_NONE;
+ u32 intr_status = 0;
+
+ spin_lock(&mdp->lock);
+
+ /* Get interrpt stat */
+ intr_status = sh_eth_read(ndev, EESR);
+ /* Clear interrupt */
+ if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
+ EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
+ cd->tx_check | cd->eesr_err_check)) {
+ sh_eth_write(ndev, intr_status, EESR);
+ ret = IRQ_HANDLED;
+ } else
+ goto other_irq;
+
+ if (intr_status & (EESR_FRC | /* Frame recv*/
+ EESR_RMAF | /* Multi cast address recv*/
+ EESR_RRF | /* Bit frame recv */
+ EESR_RTLF | /* Long frame recv*/
+ EESR_RTSF | /* short frame recv */
+ EESR_PRE | /* PHY-LSI recv error */
+ EESR_CERF)){ /* recv frame CRC error */
+ sh_eth_rx(ndev);
+ }
+
+ /* Tx Check */
+ if (intr_status & cd->tx_check) {
+ sh_eth_txfree(ndev);
+ netif_wake_queue(ndev);
+ }
+
+ if (intr_status & cd->eesr_err_check)
+ sh_eth_error(ndev, intr_status);
+
+other_irq:
+ spin_unlock(&mdp->lock);
+
+ return ret;
+}
+
+static void sh_eth_timer(unsigned long data)
+{
+ struct net_device *ndev = (struct net_device *)data;
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ mod_timer(&mdp->timer, jiffies + (10 * HZ));
+}
+
+/* PHY state control function */
+static void sh_eth_adjust_link(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ struct phy_device *phydev = mdp->phydev;
+ int new_state = 0;
+
+ if (phydev->link != PHY_DOWN) {
+ if (phydev->duplex != mdp->duplex) {
+ new_state = 1;
+ mdp->duplex = phydev->duplex;
+ if (mdp->cd->set_duplex)
+ mdp->cd->set_duplex(ndev);
+ }
+
+ if (phydev->speed != mdp->speed) {
+ new_state = 1;
+ mdp->speed = phydev->speed;
+ if (mdp->cd->set_rate)
+ mdp->cd->set_rate(ndev);
+ }
+ if (mdp->link == PHY_DOWN) {
+ sh_eth_write(ndev,
+ (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
+ new_state = 1;
+ mdp->link = phydev->link;
+ }
+ } else if (mdp->link) {
+ new_state = 1;
+ mdp->link = PHY_DOWN;
+ mdp->speed = 0;
+ mdp->duplex = -1;
+ }
+
+ if (new_state && netif_msg_link(mdp))
+ phy_print_status(phydev);
+}
+
+/* PHY init function */
+static int sh_eth_phy_init(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ char phy_id[MII_BUS_ID_SIZE + 3];
+ struct phy_device *phydev = NULL;
+
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
+ mdp->mii_bus->id , mdp->phy_id);
+
+ mdp->link = PHY_DOWN;
+ mdp->speed = 0;
+ mdp->duplex = -1;
+
+ /* Try connect to PHY */
+ phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
+ 0, mdp->phy_interface);
+ if (IS_ERR(phydev)) {
+ dev_err(&ndev->dev, "phy_connect failed\n");
+ return PTR_ERR(phydev);
+ }
+
+ dev_info(&ndev->dev, "attached phy %i to driver %s\n",
+ phydev->addr, phydev->drv->name);
+
+ mdp->phydev = phydev;
+
+ return 0;
+}
+
+/* PHY control start function */
+static int sh_eth_phy_start(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int ret;
+
+ ret = sh_eth_phy_init(ndev);
+ if (ret)
+ return ret;
+
+ /* reset phy - this also wakes it from PDOWN */
+ phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
+ phy_start(mdp->phydev);
+
+ return 0;
+}
+
+static int sh_eth_get_settings(struct net_device *ndev,
+ struct ethtool_cmd *ecmd)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mdp->lock, flags);
+ ret = phy_ethtool_gset(mdp->phydev, ecmd);
+ spin_unlock_irqrestore(&mdp->lock, flags);
+
+ return ret;
+}
+
+static int sh_eth_set_settings(struct net_device *ndev,
+ struct ethtool_cmd *ecmd)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mdp->lock, flags);
+
+ /* disable tx and rx */
+ sh_eth_rcv_snd_disable(ndev);
+
+ ret = phy_ethtool_sset(mdp->phydev, ecmd);
+ if (ret)
+ goto error_exit;
+
+ if (ecmd->duplex == DUPLEX_FULL)
+ mdp->duplex = 1;
+ else
+ mdp->duplex = 0;
+
+ if (mdp->cd->set_duplex)
+ mdp->cd->set_duplex(ndev);
+
+error_exit:
+ mdelay(1);
+
+ /* enable tx and rx */
+ sh_eth_rcv_snd_enable(ndev);
+
+ spin_unlock_irqrestore(&mdp->lock, flags);
+
+ return ret;
+}
+
+static int sh_eth_nway_reset(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mdp->lock, flags);
+ ret = phy_start_aneg(mdp->phydev);
+ spin_unlock_irqrestore(&mdp->lock, flags);
+
+ return ret;
+}
+
+static u32 sh_eth_get_msglevel(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ return mdp->msg_enable;
+}
+
+static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ mdp->msg_enable = value;
+}
+
+static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "rx_current", "tx_current",
+ "rx_dirty", "tx_dirty",
+};
+#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
+
+static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return SH_ETH_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void sh_eth_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int i = 0;
+
+ /* device-specific stats */
+ data[i++] = mdp->cur_rx;
+ data[i++] = mdp->cur_tx;
+ data[i++] = mdp->dirty_rx;
+ data[i++] = mdp->dirty_tx;
+}
+
+static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, *sh_eth_gstrings_stats,
+ sizeof(sh_eth_gstrings_stats));
+ break;
+ }
+}
+
+static struct ethtool_ops sh_eth_ethtool_ops = {
+ .get_settings = sh_eth_get_settings,
+ .set_settings = sh_eth_set_settings,
+ .nway_reset = sh_eth_nway_reset,
+ .get_msglevel = sh_eth_get_msglevel,
+ .set_msglevel = sh_eth_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_strings = sh_eth_get_strings,
+ .get_ethtool_stats = sh_eth_get_ethtool_stats,
+ .get_sset_count = sh_eth_get_sset_count,
+};
+
+/* network device open function */
+static int sh_eth_open(struct net_device *ndev)
+{
+ int ret = 0;
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ pm_runtime_get_sync(&mdp->pdev->dev);
+
+ ret = request_irq(ndev->irq, sh_eth_interrupt,
+#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7764) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7757)
+ IRQF_SHARED,
+#else
+ 0,
+#endif
+ ndev->name, ndev);
+ if (ret) {
+ dev_err(&ndev->dev, "Can not assign IRQ number\n");
+ return ret;
+ }
+
+ /* Descriptor set */
+ ret = sh_eth_ring_init(ndev);
+ if (ret)
+ goto out_free_irq;
+
+ /* device init */
+ ret = sh_eth_dev_init(ndev);
+ if (ret)
+ goto out_free_irq;
+
+ /* PHY control start*/
+ ret = sh_eth_phy_start(ndev);
+ if (ret)
+ goto out_free_irq;
+
+ /* Set the timer to check for link beat. */
+ init_timer(&mdp->timer);
+ mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
+ setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
+
+ return ret;
+
+out_free_irq:
+ free_irq(ndev->irq, ndev);
+ pm_runtime_put_sync(&mdp->pdev->dev);
+ return ret;
+}
+
+/* Timeout function */
+static void sh_eth_tx_timeout(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ struct sh_eth_rxdesc *rxdesc;
+ int i;
+
+ netif_stop_queue(ndev);
+
+ if (netif_msg_timer(mdp))
+ dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
+ " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
+
+ /* tx_errors count up */
+ mdp->stats.tx_errors++;
+
+ /* timer off */
+ del_timer_sync(&mdp->timer);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ rxdesc = &mdp->rx_ring[i];
+ rxdesc->status = 0;
+ rxdesc->addr = 0xBADF00D0;
+ if (mdp->rx_skbuff[i])
+ dev_kfree_skb(mdp->rx_skbuff[i]);
+ mdp->rx_skbuff[i] = NULL;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (mdp->tx_skbuff[i])
+ dev_kfree_skb(mdp->tx_skbuff[i]);
+ mdp->tx_skbuff[i] = NULL;
+ }
+
+ /* device init */
+ sh_eth_dev_init(ndev);
+
+ /* timer on */
+ mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
+ add_timer(&mdp->timer);
+}
+
+/* Packet transmit function */
+static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ struct sh_eth_txdesc *txdesc;
+ u32 entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdp->lock, flags);
+ if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
+ if (!sh_eth_txfree(ndev)) {
+ if (netif_msg_tx_queued(mdp))
+ dev_warn(&ndev->dev, "TxFD exhausted.\n");
+ netif_stop_queue(ndev);
+ spin_unlock_irqrestore(&mdp->lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+ }
+ spin_unlock_irqrestore(&mdp->lock, flags);
+
+ entry = mdp->cur_tx % TX_RING_SIZE;
+ mdp->tx_skbuff[entry] = skb;
+ txdesc = &mdp->tx_ring[entry];
+ /* soft swap. */
+ if (!mdp->cd->hw_swap)
+ sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
+ skb->len + 2);
+ txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (skb->len < ETHERSMALL)
+ txdesc->buffer_length = ETHERSMALL;
+ else
+ txdesc->buffer_length = skb->len;
+
+ if (entry >= TX_RING_SIZE - 1)
+ txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
+ else
+ txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
+
+ mdp->cur_tx++;
+
+ if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
+ sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
+
+ return NETDEV_TX_OK;
+}
+
+/* device close function */
+static int sh_eth_close(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int ringsize;
+
+ netif_stop_queue(ndev);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ sh_eth_write(ndev, 0x0000, EESIPR);
+
+ /* Stop the chip's Tx and Rx processes. */
+ sh_eth_write(ndev, 0, EDTRR);
+ sh_eth_write(ndev, 0, EDRRR);
+
+ /* PHY Disconnect */
+ if (mdp->phydev) {
+ phy_stop(mdp->phydev);
+ phy_disconnect(mdp->phydev);
+ }
+
+ free_irq(ndev->irq, ndev);
+
+ del_timer_sync(&mdp->timer);
+
+ /* Free all the skbuffs in the Rx queue. */
+ sh_eth_ring_free(ndev);
+
+ /* free DMA buffer */
+ ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
+ dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
+
+ /* free DMA buffer */
+ ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
+ dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
+
+ pm_runtime_put_sync(&mdp->pdev->dev);
+
+ return 0;
+}
+
+static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ pm_runtime_get_sync(&mdp->pdev->dev);
+
+ mdp->stats.tx_dropped += sh_eth_read(ndev, TROCR);
+ sh_eth_write(ndev, 0, TROCR); /* (write clear) */
+ mdp->stats.collisions += sh_eth_read(ndev, CDCR);
+ sh_eth_write(ndev, 0, CDCR); /* (write clear) */
+ mdp->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
+ sh_eth_write(ndev, 0, LCCR); /* (write clear) */
+ if (sh_eth_is_gether(mdp)) {
+ mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
+ sh_eth_write(ndev, 0, CERCR); /* (write clear) */
+ mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
+ sh_eth_write(ndev, 0, CEECR); /* (write clear) */
+ } else {
+ mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
+ sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
+ }
+ pm_runtime_put_sync(&mdp->pdev->dev);
+
+ return &mdp->stats;
+}
+
+/* ioctl to device funciotn*/
+static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
+ int cmd)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ struct phy_device *phydev = mdp->phydev;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(phydev, rq, cmd);
+}
+
+#if defined(SH_ETH_HAS_TSU)
+/* Multicast reception directions set */
+static void sh_eth_set_multicast_list(struct net_device *ndev)
+{
+ if (ndev->flags & IFF_PROMISC) {
+ /* Set promiscuous. */
+ sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_MCT) |
+ ECMR_PRM, ECMR);
+ } else {
+ /* Normal, unicast/broadcast-only mode. */
+ sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) |
+ ECMR_MCT, ECMR);
+ }
+}
+#endif /* SH_ETH_HAS_TSU */
+
+/* SuperH's TSU register init function */
+static void sh_eth_tsu_init(struct sh_eth_private *mdp)
+{
+ sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
+ sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
+ sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
+ sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
+ sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
+ sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
+ sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
+ sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
+ sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
+ sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
+ if (sh_eth_is_gether(mdp)) {
+ sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
+ sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
+ } else {
+ sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
+ sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
+ }
+ sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
+ sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
+ sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
+ sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
+ sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
+ sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
+ sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
+}
+
+/* MDIO bus release function */
+static int sh_mdio_release(struct net_device *ndev)
+{
+ struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
+
+ /* unregister mdio bus */
+ mdiobus_unregister(bus);
+
+ /* remove mdio bus info from net_device */
+ dev_set_drvdata(&ndev->dev, NULL);
+
+ /* free interrupts memory */
+ kfree(bus->irq);
+
+ /* free bitbang info */
+ free_mdio_bitbang(bus);
+
+ return 0;
+}
+
+/* MDIO bus init function */
+static int sh_mdio_init(struct net_device *ndev, int id,
+ struct sh_eth_plat_data *pd)
+{
+ int ret, i;
+ struct bb_info *bitbang;
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ /* create bit control struct for PHY */
+ bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
+ if (!bitbang) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* bitbang init */
+ bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
+ bitbang->set_gate = pd->set_mdio_gate;
+ bitbang->mdi_msk = 0x08;
+ bitbang->mdo_msk = 0x04;
+ bitbang->mmd_msk = 0x02;/* MMD */
+ bitbang->mdc_msk = 0x01;
+ bitbang->ctrl.ops = &bb_ops;
+
+ /* MII controller setting */
+ mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
+ if (!mdp->mii_bus) {
+ ret = -ENOMEM;
+ goto out_free_bitbang;
+ }
+
+ /* Hook up MII support for ethtool */
+ mdp->mii_bus->name = "sh_mii";
+ mdp->mii_bus->parent = &ndev->dev;
+ snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id);
+
+ /* PHY IRQ */
+ mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ if (!mdp->mii_bus->irq) {
+ ret = -ENOMEM;
+ goto out_free_bus;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ mdp->mii_bus->irq[i] = PHY_POLL;
+
+ /* regist mdio bus */
+ ret = mdiobus_register(mdp->mii_bus);
+ if (ret)
+ goto out_free_irq;
+
+ dev_set_drvdata(&ndev->dev, mdp->mii_bus);
+
+ return 0;
+
+out_free_irq:
+ kfree(mdp->mii_bus->irq);
+
+out_free_bus:
+ free_mdio_bitbang(mdp->mii_bus);
+
+out_free_bitbang:
+ kfree(bitbang);
+
+out:
+ return ret;
+}
+
+static const u16 *sh_eth_get_register_offset(int register_type)
+{
+ const u16 *reg_offset = NULL;
+
+ switch (register_type) {
+ case SH_ETH_REG_GIGABIT:
+ reg_offset = sh_eth_offset_gigabit;
+ break;
+ case SH_ETH_REG_FAST_SH4:
+ reg_offset = sh_eth_offset_fast_sh4;
+ break;
+ case SH_ETH_REG_FAST_SH3_SH2:
+ reg_offset = sh_eth_offset_fast_sh3_sh2;
+ break;
+ default:
+ printk(KERN_ERR "Unknown register type (%d)\n", register_type);
+ break;
+ }
+
+ return reg_offset;
+}
+
+static const struct net_device_ops sh_eth_netdev_ops = {
+ .ndo_open = sh_eth_open,
+ .ndo_stop = sh_eth_close,
+ .ndo_start_xmit = sh_eth_start_xmit,
+ .ndo_get_stats = sh_eth_get_stats,
+#if defined(SH_ETH_HAS_TSU)
+ .ndo_set_rx_mode = sh_eth_set_multicast_list,
+#endif
+ .ndo_tx_timeout = sh_eth_tx_timeout,
+ .ndo_do_ioctl = sh_eth_do_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static int sh_eth_drv_probe(struct platform_device *pdev)
+{
+ int ret, devno = 0;
+ struct resource *res;
+ struct net_device *ndev = NULL;
+ struct sh_eth_private *mdp = NULL;
+ struct sh_eth_plat_data *pd;
+
+ /* get base addr */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(res == NULL)) {
+ dev_err(&pdev->dev, "invalid resource\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ndev = alloc_etherdev(sizeof(struct sh_eth_private));
+ if (!ndev) {
+ dev_err(&pdev->dev, "Could not allocate device.\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* The sh Ether-specific entries in the device structure. */
+ ndev->base_addr = res->start;
+ devno = pdev->id;
+ if (devno < 0)
+ devno = 0;
+
+ ndev->dma = -1;
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ ret = -ENODEV;
+ goto out_release;
+ }
+ ndev->irq = ret;
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ /* Fill in the fields of the device structure with ethernet values. */
+ ether_setup(ndev);
+
+ mdp = netdev_priv(ndev);
+ mdp->addr = ioremap(res->start, resource_size(res));
+ if (mdp->addr == NULL) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "ioremap failed.\n");
+ goto out_release;
+ }
+
+ spin_lock_init(&mdp->lock);
+ mdp->pdev = pdev;
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
+ pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
+ /* get PHY ID */
+ mdp->phy_id = pd->phy;
+ mdp->phy_interface = pd->phy_interface;
+ /* EDMAC endian */
+ mdp->edmac_endian = pd->edmac_endian;
+ mdp->no_ether_link = pd->no_ether_link;
+ mdp->ether_link_active_low = pd->ether_link_active_low;
+ mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
+
+ /* set cpu data */
+#if defined(SH_ETH_HAS_BOTH_MODULES)
+ mdp->cd = sh_eth_get_cpu_data(mdp);
+#else
+ mdp->cd = &sh_eth_my_cpu_data;
+#endif
+ sh_eth_set_default_cpu_data(mdp->cd);
+
+ /* set function */
+ ndev->netdev_ops = &sh_eth_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
+ ndev->watchdog_timeo = TX_TIMEOUT;
+
+ /* debug message level */
+ mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
+ mdp->post_rx = POST_RX >> (devno << 1);
+ mdp->post_fw = POST_FW >> (devno << 1);
+
+ /* read and set MAC address */
+ read_mac_address(ndev, pd->mac_addr);
+
+ /* First device only init */
+ if (!devno) {
+ if (mdp->cd->tsu) {
+ struct resource *rtsu;
+ rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!rtsu) {
+ dev_err(&pdev->dev, "Not found TSU resource\n");
+ goto out_release;
+ }
+ mdp->tsu_addr = ioremap(rtsu->start,
+ resource_size(rtsu));
+ }
+ if (mdp->cd->chip_reset)
+ mdp->cd->chip_reset(ndev);
+
+ if (mdp->cd->tsu) {
+ /* TSU init (Init only)*/
+ sh_eth_tsu_init(mdp);
+ }
+ }
+
+ /* network device register */
+ ret = register_netdev(ndev);
+ if (ret)
+ goto out_release;
+
+ /* mdio bus init */
+ ret = sh_mdio_init(ndev, pdev->id, pd);
+ if (ret)
+ goto out_unregister;
+
+ /* print device information */
+ pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
+ (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
+
+ platform_set_drvdata(pdev, ndev);
+
+ return ret;
+
+out_unregister:
+ unregister_netdev(ndev);
+
+out_release:
+ /* net_dev free */
+ if (mdp && mdp->addr)
+ iounmap(mdp->addr);
+ if (mdp && mdp->tsu_addr)
+ iounmap(mdp->tsu_addr);
+ if (ndev)
+ free_netdev(ndev);
+
+out:
+ return ret;
+}
+
+static int sh_eth_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ iounmap(mdp->tsu_addr);
+ sh_mdio_release(ndev);
+ unregister_netdev(ndev);
+ pm_runtime_disable(&pdev->dev);
+ iounmap(mdp->addr);
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static int sh_eth_runtime_nop(struct device *dev)
+{
+ /*
+ * Runtime PM callback shared between ->runtime_suspend()
+ * and ->runtime_resume(). Simply returns success.
+ *
+ * This driver re-initializes all registers after
+ * pm_runtime_get_sync() anyway so there is no need
+ * to save and restore registers here.
+ */
+ return 0;
+}
+
+static struct dev_pm_ops sh_eth_dev_pm_ops = {
+ .runtime_suspend = sh_eth_runtime_nop,
+ .runtime_resume = sh_eth_runtime_nop,
+};
+
+static struct platform_driver sh_eth_driver = {
+ .probe = sh_eth_drv_probe,
+ .remove = sh_eth_drv_remove,
+ .driver = {
+ .name = CARDNAME,
+ .pm = &sh_eth_dev_pm_ops,
+ },
+};
+
+static int __init sh_eth_init(void)
+{
+ return platform_driver_register(&sh_eth_driver);
+}
+
+static void __exit sh_eth_cleanup(void)
+{
+ platform_driver_unregister(&sh_eth_driver);
+}
+
+module_init(sh_eth_init);
+module_exit(sh_eth_cleanup);
+
+MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
+MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
new file mode 100644
index 000000000000..47877b13ffad
--- /dev/null
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -0,0 +1,830 @@
+/*
+ * SuperH Ethernet device driver
+ *
+ * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
+ * Copyright (C) 2008-2011 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ */
+
+#ifndef __SH_ETH_H__
+#define __SH_ETH_H__
+
+#define CARDNAME "sh-eth"
+#define TX_TIMEOUT (5*HZ)
+#define TX_RING_SIZE 64 /* Tx ring size */
+#define RX_RING_SIZE 64 /* Rx ring size */
+#define ETHERSMALL 60
+#define PKT_BUF_SZ 1538
+
+enum {
+ /* E-DMAC registers */
+ EDSR = 0,
+ EDMR,
+ EDTRR,
+ EDRRR,
+ EESR,
+ EESIPR,
+ TDLAR,
+ TDFAR,
+ TDFXR,
+ TDFFR,
+ RDLAR,
+ RDFAR,
+ RDFXR,
+ RDFFR,
+ TRSCER,
+ RMFCR,
+ TFTR,
+ FDR,
+ RMCR,
+ EDOCR,
+ TFUCR,
+ RFOCR,
+ FCFTR,
+ RPADIR,
+ TRIMD,
+ RBWAR,
+ TBRAR,
+
+ /* Ether registers */
+ ECMR,
+ ECSR,
+ ECSIPR,
+ PIR,
+ PSR,
+ RDMLR,
+ PIPR,
+ RFLR,
+ IPGR,
+ APR,
+ MPR,
+ PFTCR,
+ PFRCR,
+ RFCR,
+ RFCF,
+ TPAUSER,
+ TPAUSECR,
+ BCFR,
+ BCFRR,
+ GECMR,
+ BCULR,
+ MAHR,
+ MALR,
+ TROCR,
+ CDCR,
+ LCCR,
+ CNDCR,
+ CEFCR,
+ FRECR,
+ TSFRCR,
+ TLFRCR,
+ CERCR,
+ CEECR,
+ MAFCR,
+ RTRATE,
+
+ /* TSU Absolute address */
+ ARSTR,
+ TSU_CTRST,
+ TSU_FWEN0,
+ TSU_FWEN1,
+ TSU_FCM,
+ TSU_BSYSL0,
+ TSU_BSYSL1,
+ TSU_PRISL0,
+ TSU_PRISL1,
+ TSU_FWSL0,
+ TSU_FWSL1,
+ TSU_FWSLC,
+ TSU_QTAG0,
+ TSU_QTAG1,
+ TSU_QTAGM0,
+ TSU_QTAGM1,
+ TSU_FWSR,
+ TSU_FWINMK,
+ TSU_ADQT0,
+ TSU_ADQT1,
+ TSU_VTAG0,
+ TSU_VTAG1,
+ TSU_ADSBSY,
+ TSU_TEN,
+ TSU_POST1,
+ TSU_POST2,
+ TSU_POST3,
+ TSU_POST4,
+ TSU_ADRH0,
+ TSU_ADRL0,
+ TSU_ADRH31,
+ TSU_ADRL31,
+
+ TXNLCR0,
+ TXALCR0,
+ RXNLCR0,
+ RXALCR0,
+ FWNLCR0,
+ FWALCR0,
+ TXNLCR1,
+ TXALCR1,
+ RXNLCR1,
+ RXALCR1,
+ FWNLCR1,
+ FWALCR1,
+
+ /* This value must be written at last. */
+ SH_ETH_MAX_REGISTER_OFFSET,
+};
+
+static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [EDSR] = 0x0000,
+ [EDMR] = 0x0400,
+ [EDTRR] = 0x0408,
+ [EDRRR] = 0x0410,
+ [EESR] = 0x0428,
+ [EESIPR] = 0x0430,
+ [TDLAR] = 0x0010,
+ [TDFAR] = 0x0014,
+ [TDFXR] = 0x0018,
+ [TDFFR] = 0x001c,
+ [RDLAR] = 0x0030,
+ [RDFAR] = 0x0034,
+ [RDFXR] = 0x0038,
+ [RDFFR] = 0x003c,
+ [TRSCER] = 0x0438,
+ [RMFCR] = 0x0440,
+ [TFTR] = 0x0448,
+ [FDR] = 0x0450,
+ [RMCR] = 0x0458,
+ [RPADIR] = 0x0460,
+ [FCFTR] = 0x0468,
+
+ [ECMR] = 0x0500,
+ [ECSR] = 0x0510,
+ [ECSIPR] = 0x0518,
+ [PIR] = 0x0520,
+ [PSR] = 0x0528,
+ [PIPR] = 0x052c,
+ [RFLR] = 0x0508,
+ [APR] = 0x0554,
+ [MPR] = 0x0558,
+ [PFTCR] = 0x055c,
+ [PFRCR] = 0x0560,
+ [TPAUSER] = 0x0564,
+ [GECMR] = 0x05b0,
+ [BCULR] = 0x05b4,
+ [MAHR] = 0x05c0,
+ [MALR] = 0x05c8,
+ [TROCR] = 0x0700,
+ [CDCR] = 0x0708,
+ [LCCR] = 0x0710,
+ [CEFCR] = 0x0740,
+ [FRECR] = 0x0748,
+ [TSFRCR] = 0x0750,
+ [TLFRCR] = 0x0758,
+ [RFCR] = 0x0760,
+ [CERCR] = 0x0768,
+ [CEECR] = 0x0770,
+ [MAFCR] = 0x0778,
+
+ [ARSTR] = 0x0000,
+ [TSU_CTRST] = 0x0004,
+ [TSU_FWEN0] = 0x0010,
+ [TSU_FWEN1] = 0x0014,
+ [TSU_FCM] = 0x0018,
+ [TSU_BSYSL0] = 0x0020,
+ [TSU_BSYSL1] = 0x0024,
+ [TSU_PRISL0] = 0x0028,
+ [TSU_PRISL1] = 0x002c,
+ [TSU_FWSL0] = 0x0030,
+ [TSU_FWSL1] = 0x0034,
+ [TSU_FWSLC] = 0x0038,
+ [TSU_QTAG0] = 0x0040,
+ [TSU_QTAG1] = 0x0044,
+ [TSU_FWSR] = 0x0050,
+ [TSU_FWINMK] = 0x0054,
+ [TSU_ADQT0] = 0x0048,
+ [TSU_ADQT1] = 0x004c,
+ [TSU_VTAG0] = 0x0058,
+ [TSU_VTAG1] = 0x005c,
+ [TSU_ADSBSY] = 0x0060,
+ [TSU_TEN] = 0x0064,
+ [TSU_POST1] = 0x0070,
+ [TSU_POST2] = 0x0074,
+ [TSU_POST3] = 0x0078,
+ [TSU_POST4] = 0x007c,
+ [TSU_ADRH0] = 0x0100,
+ [TSU_ADRL0] = 0x0104,
+ [TSU_ADRH31] = 0x01f8,
+ [TSU_ADRL31] = 0x01fc,
+
+ [TXNLCR0] = 0x0080,
+ [TXALCR0] = 0x0084,
+ [RXNLCR0] = 0x0088,
+ [RXALCR0] = 0x008c,
+ [FWNLCR0] = 0x0090,
+ [FWALCR0] = 0x0094,
+ [TXNLCR1] = 0x00a0,
+ [TXALCR1] = 0x00a0,
+ [RXNLCR1] = 0x00a8,
+ [RXALCR1] = 0x00ac,
+ [FWNLCR1] = 0x00b0,
+ [FWALCR1] = 0x00b4,
+};
+
+static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [ECMR] = 0x0100,
+ [RFLR] = 0x0108,
+ [ECSR] = 0x0110,
+ [ECSIPR] = 0x0118,
+ [PIR] = 0x0120,
+ [PSR] = 0x0128,
+ [RDMLR] = 0x0140,
+ [IPGR] = 0x0150,
+ [APR] = 0x0154,
+ [MPR] = 0x0158,
+ [TPAUSER] = 0x0164,
+ [RFCF] = 0x0160,
+ [TPAUSECR] = 0x0168,
+ [BCFRR] = 0x016c,
+ [MAHR] = 0x01c0,
+ [MALR] = 0x01c8,
+ [TROCR] = 0x01d0,
+ [CDCR] = 0x01d4,
+ [LCCR] = 0x01d8,
+ [CNDCR] = 0x01dc,
+ [CEFCR] = 0x01e4,
+ [FRECR] = 0x01e8,
+ [TSFRCR] = 0x01ec,
+ [TLFRCR] = 0x01f0,
+ [RFCR] = 0x01f4,
+ [MAFCR] = 0x01f8,
+ [RTRATE] = 0x01fc,
+
+ [EDMR] = 0x0000,
+ [EDTRR] = 0x0008,
+ [EDRRR] = 0x0010,
+ [TDLAR] = 0x0018,
+ [RDLAR] = 0x0020,
+ [EESR] = 0x0028,
+ [EESIPR] = 0x0030,
+ [TRSCER] = 0x0038,
+ [RMFCR] = 0x0040,
+ [TFTR] = 0x0048,
+ [FDR] = 0x0050,
+ [RMCR] = 0x0058,
+ [TFUCR] = 0x0064,
+ [RFOCR] = 0x0068,
+ [FCFTR] = 0x0070,
+ [RPADIR] = 0x0078,
+ [TRIMD] = 0x007c,
+ [RBWAR] = 0x00c8,
+ [RDFAR] = 0x00cc,
+ [TBRAR] = 0x00d4,
+ [TDFAR] = 0x00d8,
+};
+
+static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [ECMR] = 0x0160,
+ [ECSR] = 0x0164,
+ [ECSIPR] = 0x0168,
+ [PIR] = 0x016c,
+ [MAHR] = 0x0170,
+ [MALR] = 0x0174,
+ [RFLR] = 0x0178,
+ [PSR] = 0x017c,
+ [TROCR] = 0x0180,
+ [CDCR] = 0x0184,
+ [LCCR] = 0x0188,
+ [CNDCR] = 0x018c,
+ [CEFCR] = 0x0194,
+ [FRECR] = 0x0198,
+ [TSFRCR] = 0x019c,
+ [TLFRCR] = 0x01a0,
+ [RFCR] = 0x01a4,
+ [MAFCR] = 0x01a8,
+ [IPGR] = 0x01b4,
+ [APR] = 0x01b8,
+ [MPR] = 0x01bc,
+ [TPAUSER] = 0x01c4,
+ [BCFR] = 0x01cc,
+
+ [ARSTR] = 0x0000,
+ [TSU_CTRST] = 0x0004,
+ [TSU_FWEN0] = 0x0010,
+ [TSU_FWEN1] = 0x0014,
+ [TSU_FCM] = 0x0018,
+ [TSU_BSYSL0] = 0x0020,
+ [TSU_BSYSL1] = 0x0024,
+ [TSU_PRISL0] = 0x0028,
+ [TSU_PRISL1] = 0x002c,
+ [TSU_FWSL0] = 0x0030,
+ [TSU_FWSL1] = 0x0034,
+ [TSU_FWSLC] = 0x0038,
+ [TSU_QTAGM0] = 0x0040,
+ [TSU_QTAGM1] = 0x0044,
+ [TSU_ADQT0] = 0x0048,
+ [TSU_ADQT1] = 0x004c,
+ [TSU_FWSR] = 0x0050,
+ [TSU_FWINMK] = 0x0054,
+ [TSU_ADSBSY] = 0x0060,
+ [TSU_TEN] = 0x0064,
+ [TSU_POST1] = 0x0070,
+ [TSU_POST2] = 0x0074,
+ [TSU_POST3] = 0x0078,
+ [TSU_POST4] = 0x007c,
+
+ [TXNLCR0] = 0x0080,
+ [TXALCR0] = 0x0084,
+ [RXNLCR0] = 0x0088,
+ [RXALCR0] = 0x008c,
+ [FWNLCR0] = 0x0090,
+ [FWALCR0] = 0x0094,
+ [TXNLCR1] = 0x00a0,
+ [TXALCR1] = 0x00a0,
+ [RXNLCR1] = 0x00a8,
+ [RXALCR1] = 0x00ac,
+ [FWNLCR1] = 0x00b0,
+ [FWALCR1] = 0x00b4,
+
+ [TSU_ADRH0] = 0x0100,
+ [TSU_ADRL0] = 0x0104,
+ [TSU_ADRL31] = 0x01fc,
+
+};
+
+/* Driver's parameters */
+#if defined(CONFIG_CPU_SH4)
+#define SH4_SKB_RX_ALIGN 32
+#else
+#define SH2_SH3_SKB_RX_ALIGN 2
+#endif
+
+/*
+ * Register's bits
+ */
+#ifdef CONFIG_CPU_SUBTYPE_SH7763
+/* EDSR */
+enum EDSR_BIT {
+ EDSR_ENT = 0x01, EDSR_ENR = 0x02,
+};
+#define EDSR_ENALL (EDSR_ENT|EDSR_ENR)
+
+/* GECMR */
+enum GECMR_BIT {
+ GECMR_10 = 0x0, GECMR_100 = 0x04, GECMR_1000 = 0x01,
+};
+#endif
+
+/* EDMR */
+enum DMAC_M_BIT {
+ EDMR_EL = 0x40, /* Litte endian */
+ EDMR_DL1 = 0x20, EDMR_DL0 = 0x10,
+ EDMR_SRST_GETHER = 0x03,
+ EDMR_SRST_ETHER = 0x01,
+};
+
+/* EDTRR */
+enum DMAC_T_BIT {
+ EDTRR_TRNS_GETHER = 0x03,
+ EDTRR_TRNS_ETHER = 0x01,
+};
+
+/* EDRRR*/
+enum EDRRR_R_BIT {
+ EDRRR_R = 0x01,
+};
+
+/* TPAUSER */
+enum TPAUSER_BIT {
+ TPAUSER_TPAUSE = 0x0000ffff,
+ TPAUSER_UNLIMITED = 0,
+};
+
+/* BCFR */
+enum BCFR_BIT {
+ BCFR_RPAUSE = 0x0000ffff,
+ BCFR_UNLIMITED = 0,
+};
+
+/* PIR */
+enum PIR_BIT {
+ PIR_MDI = 0x08, PIR_MDO = 0x04, PIR_MMD = 0x02, PIR_MDC = 0x01,
+};
+
+/* PSR */
+enum PHY_STATUS_BIT { PHY_ST_LINK = 0x01, };
+
+/* EESR */
+enum EESR_BIT {
+ EESR_TWB1 = 0x80000000,
+ EESR_TWB = 0x40000000, /* same as TWB0 */
+ EESR_TC1 = 0x20000000,
+ EESR_TUC = 0x10000000,
+ EESR_ROC = 0x08000000,
+ EESR_TABT = 0x04000000,
+ EESR_RABT = 0x02000000,
+ EESR_RFRMER = 0x01000000, /* same as RFCOF */
+ EESR_ADE = 0x00800000,
+ EESR_ECI = 0x00400000,
+ EESR_FTC = 0x00200000, /* same as TC or TC0 */
+ EESR_TDE = 0x00100000,
+ EESR_TFE = 0x00080000, /* same as TFUF */
+ EESR_FRC = 0x00040000, /* same as FR */
+ EESR_RDE = 0x00020000,
+ EESR_RFE = 0x00010000,
+ EESR_CND = 0x00000800,
+ EESR_DLC = 0x00000400,
+ EESR_CD = 0x00000200,
+ EESR_RTO = 0x00000100,
+ EESR_RMAF = 0x00000080,
+ EESR_CEEF = 0x00000040,
+ EESR_CELF = 0x00000020,
+ EESR_RRF = 0x00000010,
+ EESR_RTLF = 0x00000008,
+ EESR_RTSF = 0x00000004,
+ EESR_PRE = 0x00000002,
+ EESR_CERF = 0x00000001,
+};
+
+#define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \
+ EESR_RTO)
+#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | \
+ EESR_RDE | EESR_RFRMER | EESR_ADE | \
+ EESR_TFE | EESR_TDE | EESR_ECI)
+#define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \
+ EESR_TFE)
+
+/* EESIPR */
+enum DMAC_IM_BIT {
+ DMAC_M_TWB = 0x40000000, DMAC_M_TABT = 0x04000000,
+ DMAC_M_RABT = 0x02000000,
+ DMAC_M_RFRMER = 0x01000000, DMAC_M_ADF = 0x00800000,
+ DMAC_M_ECI = 0x00400000, DMAC_M_FTC = 0x00200000,
+ DMAC_M_TDE = 0x00100000, DMAC_M_TFE = 0x00080000,
+ DMAC_M_FRC = 0x00040000, DMAC_M_RDE = 0x00020000,
+ DMAC_M_RFE = 0x00010000, DMAC_M_TINT4 = 0x00000800,
+ DMAC_M_TINT3 = 0x00000400, DMAC_M_TINT2 = 0x00000200,
+ DMAC_M_TINT1 = 0x00000100, DMAC_M_RINT8 = 0x00000080,
+ DMAC_M_RINT5 = 0x00000010, DMAC_M_RINT4 = 0x00000008,
+ DMAC_M_RINT3 = 0x00000004, DMAC_M_RINT2 = 0x00000002,
+ DMAC_M_RINT1 = 0x00000001,
+};
+
+/* Receive descriptor bit */
+enum RD_STS_BIT {
+ RD_RACT = 0x80000000, RD_RDEL = 0x40000000,
+ RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000,
+ RD_RFE = 0x08000000, RD_RFS10 = 0x00000200,
+ RD_RFS9 = 0x00000100, RD_RFS8 = 0x00000080,
+ RD_RFS7 = 0x00000040, RD_RFS6 = 0x00000020,
+ RD_RFS5 = 0x00000010, RD_RFS4 = 0x00000008,
+ RD_RFS3 = 0x00000004, RD_RFS2 = 0x00000002,
+ RD_RFS1 = 0x00000001,
+};
+#define RDF1ST RD_RFP1
+#define RDFEND RD_RFP0
+#define RD_RFP (RD_RFP1|RD_RFP0)
+
+/* FCFTR */
+enum FCFTR_BIT {
+ FCFTR_RFF2 = 0x00040000, FCFTR_RFF1 = 0x00020000,
+ FCFTR_RFF0 = 0x00010000, FCFTR_RFD2 = 0x00000004,
+ FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001,
+};
+#define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0)
+#define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0)
+
+/* Transfer descriptor bit */
+enum TD_STS_BIT {
+ TD_TACT = 0x80000000,
+ TD_TDLE = 0x40000000, TD_TFP1 = 0x20000000,
+ TD_TFP0 = 0x10000000,
+};
+#define TDF1ST TD_TFP1
+#define TDFEND TD_TFP0
+#define TD_TFP (TD_TFP1|TD_TFP0)
+
+/* RMCR */
+#define DEFAULT_RMCR_VALUE 0x00000000
+
+/* ECMR */
+enum FELIC_MODE_BIT {
+ ECMR_TRCCM = 0x04000000, ECMR_RCSC = 0x00800000,
+ ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000,
+ ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000,
+ ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000,
+ ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
+ ECMR_RTM = 0x00000010, ECMR_ILB = 0x00000008, ECMR_ELB = 0x00000004,
+ ECMR_DM = 0x00000002, ECMR_PRM = 0x00000001,
+};
+
+/* ECSR */
+enum ECSR_STATUS_BIT {
+ ECSR_BRCRX = 0x20, ECSR_PSRTO = 0x10,
+ ECSR_LCHNG = 0x04,
+ ECSR_MPD = 0x02, ECSR_ICD = 0x01,
+};
+
+#define DEFAULT_ECSR_INIT (ECSR_BRCRX | ECSR_PSRTO | ECSR_LCHNG | \
+ ECSR_ICD | ECSIPR_MPDIP)
+
+/* ECSIPR */
+enum ECSIPR_STATUS_MASK_BIT {
+ ECSIPR_BRCRXIP = 0x20, ECSIPR_PSRTOIP = 0x10,
+ ECSIPR_LCHNGIP = 0x04,
+ ECSIPR_MPDIP = 0x02, ECSIPR_ICDIP = 0x01,
+};
+
+#define DEFAULT_ECSIPR_INIT (ECSIPR_BRCRXIP | ECSIPR_PSRTOIP | \
+ ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP)
+
+/* APR */
+enum APR_BIT {
+ APR_AP = 0x00000001,
+};
+
+/* MPR */
+enum MPR_BIT {
+ MPR_MP = 0x00000001,
+};
+
+/* TRSCER */
+enum DESC_I_BIT {
+ DESC_I_TINT4 = 0x0800, DESC_I_TINT3 = 0x0400, DESC_I_TINT2 = 0x0200,
+ DESC_I_TINT1 = 0x0100, DESC_I_RINT8 = 0x0080, DESC_I_RINT5 = 0x0010,
+ DESC_I_RINT4 = 0x0008, DESC_I_RINT3 = 0x0004, DESC_I_RINT2 = 0x0002,
+ DESC_I_RINT1 = 0x0001,
+};
+
+/* RPADIR */
+enum RPADIR_BIT {
+ RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000,
+ RPADIR_PADR = 0x0003f,
+};
+
+/* RFLR */
+#define RFLR_VALUE 0x1000
+
+/* FDR */
+#define DEFAULT_FDR_INIT 0x00000707
+
+enum phy_offsets {
+ PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3,
+ PHY_ANA = 4, PHY_ANL = 5, PHY_ANE = 6,
+ PHY_16 = 16,
+};
+
+/* PHY_CTRL */
+enum PHY_CTRL_BIT {
+ PHY_C_RESET = 0x8000, PHY_C_LOOPBK = 0x4000, PHY_C_SPEEDSL = 0x2000,
+ PHY_C_ANEGEN = 0x1000, PHY_C_PWRDN = 0x0800, PHY_C_ISO = 0x0400,
+ PHY_C_RANEG = 0x0200, PHY_C_DUPLEX = 0x0100, PHY_C_COLT = 0x0080,
+};
+#define DM9161_PHY_C_ANEGEN 0 /* auto nego special */
+
+/* PHY_STAT */
+enum PHY_STAT_BIT {
+ PHY_S_100T4 = 0x8000, PHY_S_100X_F = 0x4000, PHY_S_100X_H = 0x2000,
+ PHY_S_10T_F = 0x1000, PHY_S_10T_H = 0x0800, PHY_S_ANEGC = 0x0020,
+ PHY_S_RFAULT = 0x0010, PHY_S_ANEGA = 0x0008, PHY_S_LINK = 0x0004,
+ PHY_S_JAB = 0x0002, PHY_S_EXTD = 0x0001,
+};
+
+/* PHY_ANA */
+enum PHY_ANA_BIT {
+ PHY_A_NP = 0x8000, PHY_A_ACK = 0x4000, PHY_A_RF = 0x2000,
+ PHY_A_FCS = 0x0400, PHY_A_T4 = 0x0200, PHY_A_FDX = 0x0100,
+ PHY_A_HDX = 0x0080, PHY_A_10FDX = 0x0040, PHY_A_10HDX = 0x0020,
+ PHY_A_SEL = 0x001e,
+};
+/* PHY_ANL */
+enum PHY_ANL_BIT {
+ PHY_L_NP = 0x8000, PHY_L_ACK = 0x4000, PHY_L_RF = 0x2000,
+ PHY_L_FCS = 0x0400, PHY_L_T4 = 0x0200, PHY_L_FDX = 0x0100,
+ PHY_L_HDX = 0x0080, PHY_L_10FDX = 0x0040, PHY_L_10HDX = 0x0020,
+ PHY_L_SEL = 0x001f,
+};
+
+/* PHY_ANE */
+enum PHY_ANE_BIT {
+ PHY_E_PDF = 0x0010, PHY_E_LPNPA = 0x0008, PHY_E_NPA = 0x0004,
+ PHY_E_PRX = 0x0002, PHY_E_LPANEGA = 0x0001,
+};
+
+/* DM9161 */
+enum PHY_16_BIT {
+ PHY_16_BP4B45 = 0x8000, PHY_16_BPSCR = 0x4000, PHY_16_BPALIGN = 0x2000,
+ PHY_16_BP_ADPOK = 0x1000, PHY_16_Repeatmode = 0x0800,
+ PHY_16_TXselect = 0x0400,
+ PHY_16_Rsvd = 0x0200, PHY_16_RMIIEnable = 0x0100,
+ PHY_16_Force100LNK = 0x0080,
+ PHY_16_APDLED_CTL = 0x0040, PHY_16_COLLED_CTL = 0x0020,
+ PHY_16_RPDCTR_EN = 0x0010,
+ PHY_16_ResetStMch = 0x0008, PHY_16_PreamSupr = 0x0004,
+ PHY_16_Sleepmode = 0x0002,
+ PHY_16_RemoteLoopOut = 0x0001,
+};
+
+#define POST_RX 0x08
+#define POST_FW 0x04
+#define POST0_RX (POST_RX)
+#define POST0_FW (POST_FW)
+#define POST1_RX (POST_RX >> 2)
+#define POST1_FW (POST_FW >> 2)
+#define POST_ALL (POST0_RX | POST0_FW | POST1_RX | POST1_FW)
+
+/* ARSTR */
+enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, };
+
+/* TSU_FWEN0 */
+enum TSU_FWEN0_BIT {
+ TSU_FWEN0_0 = 0x00000001,
+};
+
+/* TSU_ADSBSY */
+enum TSU_ADSBSY_BIT {
+ TSU_ADSBSY_0 = 0x00000001,
+};
+
+/* TSU_TEN */
+enum TSU_TEN_BIT {
+ TSU_TEN_0 = 0x80000000,
+};
+
+/* TSU_FWSL0 */
+enum TSU_FWSL0_BIT {
+ TSU_FWSL0_FW50 = 0x1000, TSU_FWSL0_FW40 = 0x0800,
+ TSU_FWSL0_FW30 = 0x0400, TSU_FWSL0_FW20 = 0x0200,
+ TSU_FWSL0_FW10 = 0x0100, TSU_FWSL0_RMSA0 = 0x0010,
+};
+
+/* TSU_FWSLC */
+enum TSU_FWSLC_BIT {
+ TSU_FWSLC_POSTENU = 0x2000, TSU_FWSLC_POSTENL = 0x1000,
+ TSU_FWSLC_CAMSEL03 = 0x0080, TSU_FWSLC_CAMSEL02 = 0x0040,
+ TSU_FWSLC_CAMSEL01 = 0x0020, TSU_FWSLC_CAMSEL00 = 0x0010,
+ TSU_FWSLC_CAMSEL13 = 0x0008, TSU_FWSLC_CAMSEL12 = 0x0004,
+ TSU_FWSLC_CAMSEL11 = 0x0002, TSU_FWSLC_CAMSEL10 = 0x0001,
+};
+
+/*
+ * The sh ether Tx buffer descriptors.
+ * This structure should be 20 bytes.
+ */
+struct sh_eth_txdesc {
+ u32 status; /* TD0 */
+#if defined(CONFIG_CPU_LITTLE_ENDIAN)
+ u16 pad0; /* TD1 */
+ u16 buffer_length; /* TD1 */
+#else
+ u16 buffer_length; /* TD1 */
+ u16 pad0; /* TD1 */
+#endif
+ u32 addr; /* TD2 */
+ u32 pad1; /* padding data */
+} __attribute__((aligned(2), packed));
+
+/*
+ * The sh ether Rx buffer descriptors.
+ * This structure should be 20 bytes.
+ */
+struct sh_eth_rxdesc {
+ u32 status; /* RD0 */
+#if defined(CONFIG_CPU_LITTLE_ENDIAN)
+ u16 frame_length; /* RD1 */
+ u16 buffer_length; /* RD1 */
+#else
+ u16 buffer_length; /* RD1 */
+ u16 frame_length; /* RD1 */
+#endif
+ u32 addr; /* RD2 */
+ u32 pad0; /* padding data */
+} __attribute__((aligned(2), packed));
+
+/* This structure is used by each CPU dependency handling. */
+struct sh_eth_cpu_data {
+ /* optional functions */
+ void (*chip_reset)(struct net_device *ndev);
+ void (*set_duplex)(struct net_device *ndev);
+ void (*set_rate)(struct net_device *ndev);
+
+ /* mandatory initialize value */
+ unsigned long eesipr_value;
+
+ /* optional initialize value */
+ unsigned long ecsr_value;
+ unsigned long ecsipr_value;
+ unsigned long fdr_value;
+ unsigned long fcftr_value;
+ unsigned long rpadir_value;
+ unsigned long rmcr_value;
+
+ /* interrupt checking mask */
+ unsigned long tx_check;
+ unsigned long eesr_err_check;
+ unsigned long tx_error_check;
+
+ /* hardware features */
+ unsigned no_psr:1; /* EtherC DO NOT have PSR */
+ unsigned apr:1; /* EtherC have APR */
+ unsigned mpr:1; /* EtherC have MPR */
+ unsigned tpauser:1; /* EtherC have TPAUSER */
+ unsigned bculr:1; /* EtherC have BCULR */
+ unsigned tsu:1; /* EtherC have TSU */
+ unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */
+ unsigned rpadir:1; /* E-DMAC have RPADIR */
+ unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
+ unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */
+};
+
+struct sh_eth_private {
+ struct platform_device *pdev;
+ struct sh_eth_cpu_data *cd;
+ const u16 *reg_offset;
+ void __iomem *addr;
+ void __iomem *tsu_addr;
+ dma_addr_t rx_desc_dma;
+ dma_addr_t tx_desc_dma;
+ struct sh_eth_rxdesc *rx_ring;
+ struct sh_eth_txdesc *tx_ring;
+ struct sk_buff **rx_skbuff;
+ struct sk_buff **tx_skbuff;
+ struct net_device_stats stats;
+ struct timer_list timer;
+ spinlock_t lock;
+ u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ u32 cur_tx, dirty_tx;
+ u32 rx_buf_sz; /* Based on MTU+slack. */
+ int edmac_endian;
+ /* MII transceiver section. */
+ u32 phy_id; /* PHY ID */
+ struct mii_bus *mii_bus; /* MDIO bus control */
+ struct phy_device *phydev; /* PHY device control */
+ enum phy_state link;
+ phy_interface_t phy_interface;
+ int msg_enable;
+ int speed;
+ int duplex;
+ u32 rx_int_var, tx_int_var; /* interrupt control variables */
+ char post_rx; /* POST receive */
+ char post_fw; /* POST forward */
+ struct net_device_stats tsu_stats; /* TSU forward status */
+
+ unsigned no_ether_link:1;
+ unsigned ether_link_active_low:1;
+};
+
+static inline void sh_eth_soft_swap(char *src, int len)
+{
+#ifdef __LITTLE_ENDIAN__
+ u32 *p = (u32 *)src;
+ u32 *maxp;
+ maxp = p + ((len + sizeof(u32) - 1) / sizeof(u32));
+
+ for (; p < maxp; p++)
+ *p = swab32(*p);
+#endif
+}
+
+static inline void sh_eth_write(struct net_device *ndev, unsigned long data,
+ int enum_index)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ iowrite32(data, mdp->addr + mdp->reg_offset[enum_index]);
+}
+
+static inline unsigned long sh_eth_read(struct net_device *ndev,
+ int enum_index)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ return ioread32(mdp->addr + mdp->reg_offset[enum_index]);
+}
+
+static inline void sh_eth_tsu_write(struct sh_eth_private *mdp,
+ unsigned long data, int enum_index)
+{
+ iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
+}
+
+static inline unsigned long sh_eth_tsu_read(struct sh_eth_private *mdp,
+ int enum_index)
+{
+ return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
+}
+
+#endif /* #ifndef __SH_ETH_H__ */
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
new file mode 100644
index 000000000000..a7ff8ea342b4
--- /dev/null
+++ b/drivers/net/ethernet/s6gmac.c
@@ -0,0 +1,1072 @@
+/*
+ * Ethernet driver for S6105 on chip network device
+ * (c)2008 emlix GmbH http://www.emlix.com
+ * Authors: Oskar Schirmer <os@emlix.com>
+ * Daniel Gloeckner <dg@emlix.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <linux/stddef.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <variant/hardware.h>
+#include <variant/dmac.h>
+
+#define DRV_NAME "s6gmac"
+#define DRV_PRMT DRV_NAME ": "
+
+
+/* register declarations */
+
+#define S6_GMAC_MACCONF1 0x000
+#define S6_GMAC_MACCONF1_TXENA 0
+#define S6_GMAC_MACCONF1_SYNCTX 1
+#define S6_GMAC_MACCONF1_RXENA 2
+#define S6_GMAC_MACCONF1_SYNCRX 3
+#define S6_GMAC_MACCONF1_TXFLOWCTRL 4
+#define S6_GMAC_MACCONF1_RXFLOWCTRL 5
+#define S6_GMAC_MACCONF1_LOOPBACK 8
+#define S6_GMAC_MACCONF1_RESTXFUNC 16
+#define S6_GMAC_MACCONF1_RESRXFUNC 17
+#define S6_GMAC_MACCONF1_RESTXMACCTRL 18
+#define S6_GMAC_MACCONF1_RESRXMACCTRL 19
+#define S6_GMAC_MACCONF1_SIMULRES 30
+#define S6_GMAC_MACCONF1_SOFTRES 31
+#define S6_GMAC_MACCONF2 0x004
+#define S6_GMAC_MACCONF2_FULL 0
+#define S6_GMAC_MACCONF2_CRCENA 1
+#define S6_GMAC_MACCONF2_PADCRCENA 2
+#define S6_GMAC_MACCONF2_LENGTHFCHK 4
+#define S6_GMAC_MACCONF2_HUGEFRAMENA 5
+#define S6_GMAC_MACCONF2_IFMODE 8
+#define S6_GMAC_MACCONF2_IFMODE_NIBBLE 1
+#define S6_GMAC_MACCONF2_IFMODE_BYTE 2
+#define S6_GMAC_MACCONF2_IFMODE_MASK 3
+#define S6_GMAC_MACCONF2_PREAMBLELEN 12
+#define S6_GMAC_MACCONF2_PREAMBLELEN_MASK 0x0F
+#define S6_GMAC_MACIPGIFG 0x008
+#define S6_GMAC_MACIPGIFG_B2BINTERPGAP 0
+#define S6_GMAC_MACIPGIFG_B2BINTERPGAP_MASK 0x7F
+#define S6_GMAC_MACIPGIFG_MINIFGENFORCE 8
+#define S6_GMAC_MACIPGIFG_B2BINTERPGAP2 16
+#define S6_GMAC_MACIPGIFG_B2BINTERPGAP1 24
+#define S6_GMAC_MACHALFDUPLEX 0x00C
+#define S6_GMAC_MACHALFDUPLEX_COLLISWIN 0
+#define S6_GMAC_MACHALFDUPLEX_COLLISWIN_MASK 0x3F
+#define S6_GMAC_MACHALFDUPLEX_RETXMAX 12
+#define S6_GMAC_MACHALFDUPLEX_RETXMAX_MASK 0x0F
+#define S6_GMAC_MACHALFDUPLEX_EXCESSDEF 16
+#define S6_GMAC_MACHALFDUPLEX_NOBACKOFF 17
+#define S6_GMAC_MACHALFDUPLEX_BPNOBCKOF 18
+#define S6_GMAC_MACHALFDUPLEX_ALTBEBENA 19
+#define S6_GMAC_MACHALFDUPLEX_ALTBEBTRN 20
+#define S6_GMAC_MACHALFDUPLEX_ALTBEBTR_MASK 0x0F
+#define S6_GMAC_MACMAXFRAMELEN 0x010
+#define S6_GMAC_MACMIICONF 0x020
+#define S6_GMAC_MACMIICONF_CSEL 0
+#define S6_GMAC_MACMIICONF_CSEL_DIV10 0
+#define S6_GMAC_MACMIICONF_CSEL_DIV12 1
+#define S6_GMAC_MACMIICONF_CSEL_DIV14 2
+#define S6_GMAC_MACMIICONF_CSEL_DIV18 3
+#define S6_GMAC_MACMIICONF_CSEL_DIV24 4
+#define S6_GMAC_MACMIICONF_CSEL_DIV34 5
+#define S6_GMAC_MACMIICONF_CSEL_DIV68 6
+#define S6_GMAC_MACMIICONF_CSEL_DIV168 7
+#define S6_GMAC_MACMIICONF_CSEL_MASK 7
+#define S6_GMAC_MACMIICONF_PREAMBLESUPR 4
+#define S6_GMAC_MACMIICONF_SCANAUTOINCR 5
+#define S6_GMAC_MACMIICMD 0x024
+#define S6_GMAC_MACMIICMD_READ 0
+#define S6_GMAC_MACMIICMD_SCAN 1
+#define S6_GMAC_MACMIIADDR 0x028
+#define S6_GMAC_MACMIIADDR_REG 0
+#define S6_GMAC_MACMIIADDR_REG_MASK 0x1F
+#define S6_GMAC_MACMIIADDR_PHY 8
+#define S6_GMAC_MACMIIADDR_PHY_MASK 0x1F
+#define S6_GMAC_MACMIICTRL 0x02C
+#define S6_GMAC_MACMIISTAT 0x030
+#define S6_GMAC_MACMIIINDI 0x034
+#define S6_GMAC_MACMIIINDI_BUSY 0
+#define S6_GMAC_MACMIIINDI_SCAN 1
+#define S6_GMAC_MACMIIINDI_INVAL 2
+#define S6_GMAC_MACINTERFSTAT 0x03C
+#define S6_GMAC_MACINTERFSTAT_LINKFAIL 3
+#define S6_GMAC_MACINTERFSTAT_EXCESSDEF 9
+#define S6_GMAC_MACSTATADDR1 0x040
+#define S6_GMAC_MACSTATADDR2 0x044
+
+#define S6_GMAC_FIFOCONF0 0x048
+#define S6_GMAC_FIFOCONF0_HSTRSTWT 0
+#define S6_GMAC_FIFOCONF0_HSTRSTSR 1
+#define S6_GMAC_FIFOCONF0_HSTRSTFR 2
+#define S6_GMAC_FIFOCONF0_HSTRSTST 3
+#define S6_GMAC_FIFOCONF0_HSTRSTFT 4
+#define S6_GMAC_FIFOCONF0_WTMENREQ 8
+#define S6_GMAC_FIFOCONF0_SRFENREQ 9
+#define S6_GMAC_FIFOCONF0_FRFENREQ 10
+#define S6_GMAC_FIFOCONF0_STFENREQ 11
+#define S6_GMAC_FIFOCONF0_FTFENREQ 12
+#define S6_GMAC_FIFOCONF0_WTMENRPLY 16
+#define S6_GMAC_FIFOCONF0_SRFENRPLY 17
+#define S6_GMAC_FIFOCONF0_FRFENRPLY 18
+#define S6_GMAC_FIFOCONF0_STFENRPLY 19
+#define S6_GMAC_FIFOCONF0_FTFENRPLY 20
+#define S6_GMAC_FIFOCONF1 0x04C
+#define S6_GMAC_FIFOCONF2 0x050
+#define S6_GMAC_FIFOCONF2_CFGLWM 0
+#define S6_GMAC_FIFOCONF2_CFGHWM 16
+#define S6_GMAC_FIFOCONF3 0x054
+#define S6_GMAC_FIFOCONF3_CFGFTTH 0
+#define S6_GMAC_FIFOCONF3_CFGHWMFT 16
+#define S6_GMAC_FIFOCONF4 0x058
+#define S6_GMAC_FIFOCONF_RSV_PREVDROP 0
+#define S6_GMAC_FIFOCONF_RSV_RUNT 1
+#define S6_GMAC_FIFOCONF_RSV_FALSECAR 2
+#define S6_GMAC_FIFOCONF_RSV_CODEERR 3
+#define S6_GMAC_FIFOCONF_RSV_CRCERR 4
+#define S6_GMAC_FIFOCONF_RSV_LENGTHERR 5
+#define S6_GMAC_FIFOCONF_RSV_LENRANGE 6
+#define S6_GMAC_FIFOCONF_RSV_OK 7
+#define S6_GMAC_FIFOCONF_RSV_MULTICAST 8
+#define S6_GMAC_FIFOCONF_RSV_BROADCAST 9
+#define S6_GMAC_FIFOCONF_RSV_DRIBBLE 10
+#define S6_GMAC_FIFOCONF_RSV_CTRLFRAME 11
+#define S6_GMAC_FIFOCONF_RSV_PAUSECTRL 12
+#define S6_GMAC_FIFOCONF_RSV_UNOPCODE 13
+#define S6_GMAC_FIFOCONF_RSV_VLANTAG 14
+#define S6_GMAC_FIFOCONF_RSV_LONGEVENT 15
+#define S6_GMAC_FIFOCONF_RSV_TRUNCATED 16
+#define S6_GMAC_FIFOCONF_RSV_MASK 0x3FFFF
+#define S6_GMAC_FIFOCONF5 0x05C
+#define S6_GMAC_FIFOCONF5_DROPLT64 18
+#define S6_GMAC_FIFOCONF5_CFGBYTM 19
+#define S6_GMAC_FIFOCONF5_RXDROPSIZE 20
+#define S6_GMAC_FIFOCONF5_RXDROPSIZE_MASK 0xF
+
+#define S6_GMAC_STAT_REGS 0x080
+#define S6_GMAC_STAT_SIZE_MIN 12
+#define S6_GMAC_STATTR64 0x080
+#define S6_GMAC_STATTR64_SIZE 18
+#define S6_GMAC_STATTR127 0x084
+#define S6_GMAC_STATTR127_SIZE 18
+#define S6_GMAC_STATTR255 0x088
+#define S6_GMAC_STATTR255_SIZE 18
+#define S6_GMAC_STATTR511 0x08C
+#define S6_GMAC_STATTR511_SIZE 18
+#define S6_GMAC_STATTR1K 0x090
+#define S6_GMAC_STATTR1K_SIZE 18
+#define S6_GMAC_STATTRMAX 0x094
+#define S6_GMAC_STATTRMAX_SIZE 18
+#define S6_GMAC_STATTRMGV 0x098
+#define S6_GMAC_STATTRMGV_SIZE 18
+#define S6_GMAC_STATRBYT 0x09C
+#define S6_GMAC_STATRBYT_SIZE 24
+#define S6_GMAC_STATRPKT 0x0A0
+#define S6_GMAC_STATRPKT_SIZE 18
+#define S6_GMAC_STATRFCS 0x0A4
+#define S6_GMAC_STATRFCS_SIZE 12
+#define S6_GMAC_STATRMCA 0x0A8
+#define S6_GMAC_STATRMCA_SIZE 18
+#define S6_GMAC_STATRBCA 0x0AC
+#define S6_GMAC_STATRBCA_SIZE 22
+#define S6_GMAC_STATRXCF 0x0B0
+#define S6_GMAC_STATRXCF_SIZE 18
+#define S6_GMAC_STATRXPF 0x0B4
+#define S6_GMAC_STATRXPF_SIZE 12
+#define S6_GMAC_STATRXUO 0x0B8
+#define S6_GMAC_STATRXUO_SIZE 12
+#define S6_GMAC_STATRALN 0x0BC
+#define S6_GMAC_STATRALN_SIZE 12
+#define S6_GMAC_STATRFLR 0x0C0
+#define S6_GMAC_STATRFLR_SIZE 16
+#define S6_GMAC_STATRCDE 0x0C4
+#define S6_GMAC_STATRCDE_SIZE 12
+#define S6_GMAC_STATRCSE 0x0C8
+#define S6_GMAC_STATRCSE_SIZE 12
+#define S6_GMAC_STATRUND 0x0CC
+#define S6_GMAC_STATRUND_SIZE 12
+#define S6_GMAC_STATROVR 0x0D0
+#define S6_GMAC_STATROVR_SIZE 12
+#define S6_GMAC_STATRFRG 0x0D4
+#define S6_GMAC_STATRFRG_SIZE 12
+#define S6_GMAC_STATRJBR 0x0D8
+#define S6_GMAC_STATRJBR_SIZE 12
+#define S6_GMAC_STATRDRP 0x0DC
+#define S6_GMAC_STATRDRP_SIZE 12
+#define S6_GMAC_STATTBYT 0x0E0
+#define S6_GMAC_STATTBYT_SIZE 24
+#define S6_GMAC_STATTPKT 0x0E4
+#define S6_GMAC_STATTPKT_SIZE 18
+#define S6_GMAC_STATTMCA 0x0E8
+#define S6_GMAC_STATTMCA_SIZE 18
+#define S6_GMAC_STATTBCA 0x0EC
+#define S6_GMAC_STATTBCA_SIZE 18
+#define S6_GMAC_STATTXPF 0x0F0
+#define S6_GMAC_STATTXPF_SIZE 12
+#define S6_GMAC_STATTDFR 0x0F4
+#define S6_GMAC_STATTDFR_SIZE 12
+#define S6_GMAC_STATTEDF 0x0F8
+#define S6_GMAC_STATTEDF_SIZE 12
+#define S6_GMAC_STATTSCL 0x0FC
+#define S6_GMAC_STATTSCL_SIZE 12
+#define S6_GMAC_STATTMCL 0x100
+#define S6_GMAC_STATTMCL_SIZE 12
+#define S6_GMAC_STATTLCL 0x104
+#define S6_GMAC_STATTLCL_SIZE 12
+#define S6_GMAC_STATTXCL 0x108
+#define S6_GMAC_STATTXCL_SIZE 12
+#define S6_GMAC_STATTNCL 0x10C
+#define S6_GMAC_STATTNCL_SIZE 13
+#define S6_GMAC_STATTPFH 0x110
+#define S6_GMAC_STATTPFH_SIZE 12
+#define S6_GMAC_STATTDRP 0x114
+#define S6_GMAC_STATTDRP_SIZE 12
+#define S6_GMAC_STATTJBR 0x118
+#define S6_GMAC_STATTJBR_SIZE 12
+#define S6_GMAC_STATTFCS 0x11C
+#define S6_GMAC_STATTFCS_SIZE 12
+#define S6_GMAC_STATTXCF 0x120
+#define S6_GMAC_STATTXCF_SIZE 12
+#define S6_GMAC_STATTOVR 0x124
+#define S6_GMAC_STATTOVR_SIZE 12
+#define S6_GMAC_STATTUND 0x128
+#define S6_GMAC_STATTUND_SIZE 12
+#define S6_GMAC_STATTFRG 0x12C
+#define S6_GMAC_STATTFRG_SIZE 12
+#define S6_GMAC_STATCARRY(n) (0x130 + 4*(n))
+#define S6_GMAC_STATCARRYMSK(n) (0x138 + 4*(n))
+#define S6_GMAC_STATCARRY1_RDRP 0
+#define S6_GMAC_STATCARRY1_RJBR 1
+#define S6_GMAC_STATCARRY1_RFRG 2
+#define S6_GMAC_STATCARRY1_ROVR 3
+#define S6_GMAC_STATCARRY1_RUND 4
+#define S6_GMAC_STATCARRY1_RCSE 5
+#define S6_GMAC_STATCARRY1_RCDE 6
+#define S6_GMAC_STATCARRY1_RFLR 7
+#define S6_GMAC_STATCARRY1_RALN 8
+#define S6_GMAC_STATCARRY1_RXUO 9
+#define S6_GMAC_STATCARRY1_RXPF 10
+#define S6_GMAC_STATCARRY1_RXCF 11
+#define S6_GMAC_STATCARRY1_RBCA 12
+#define S6_GMAC_STATCARRY1_RMCA 13
+#define S6_GMAC_STATCARRY1_RFCS 14
+#define S6_GMAC_STATCARRY1_RPKT 15
+#define S6_GMAC_STATCARRY1_RBYT 16
+#define S6_GMAC_STATCARRY1_TRMGV 25
+#define S6_GMAC_STATCARRY1_TRMAX 26
+#define S6_GMAC_STATCARRY1_TR1K 27
+#define S6_GMAC_STATCARRY1_TR511 28
+#define S6_GMAC_STATCARRY1_TR255 29
+#define S6_GMAC_STATCARRY1_TR127 30
+#define S6_GMAC_STATCARRY1_TR64 31
+#define S6_GMAC_STATCARRY2_TDRP 0
+#define S6_GMAC_STATCARRY2_TPFH 1
+#define S6_GMAC_STATCARRY2_TNCL 2
+#define S6_GMAC_STATCARRY2_TXCL 3
+#define S6_GMAC_STATCARRY2_TLCL 4
+#define S6_GMAC_STATCARRY2_TMCL 5
+#define S6_GMAC_STATCARRY2_TSCL 6
+#define S6_GMAC_STATCARRY2_TEDF 7
+#define S6_GMAC_STATCARRY2_TDFR 8
+#define S6_GMAC_STATCARRY2_TXPF 9
+#define S6_GMAC_STATCARRY2_TBCA 10
+#define S6_GMAC_STATCARRY2_TMCA 11
+#define S6_GMAC_STATCARRY2_TPKT 12
+#define S6_GMAC_STATCARRY2_TBYT 13
+#define S6_GMAC_STATCARRY2_TFRG 14
+#define S6_GMAC_STATCARRY2_TUND 15
+#define S6_GMAC_STATCARRY2_TOVR 16
+#define S6_GMAC_STATCARRY2_TXCF 17
+#define S6_GMAC_STATCARRY2_TFCS 18
+#define S6_GMAC_STATCARRY2_TJBR 19
+
+#define S6_GMAC_HOST_PBLKCTRL 0x140
+#define S6_GMAC_HOST_PBLKCTRL_TXENA 0
+#define S6_GMAC_HOST_PBLKCTRL_RXENA 1
+#define S6_GMAC_HOST_PBLKCTRL_TXSRES 2
+#define S6_GMAC_HOST_PBLKCTRL_RXSRES 3
+#define S6_GMAC_HOST_PBLKCTRL_TXBSIZ 8
+#define S6_GMAC_HOST_PBLKCTRL_RXBSIZ 12
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_16 4
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_32 5
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_64 6
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_128 7
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_MASK 0xF
+#define S6_GMAC_HOST_PBLKCTRL_STATENA 16
+#define S6_GMAC_HOST_PBLKCTRL_STATAUTOZ 17
+#define S6_GMAC_HOST_PBLKCTRL_STATCLEAR 18
+#define S6_GMAC_HOST_PBLKCTRL_RGMII 19
+#define S6_GMAC_HOST_INTMASK 0x144
+#define S6_GMAC_HOST_INTSTAT 0x148
+#define S6_GMAC_HOST_INT_TXBURSTOVER 3
+#define S6_GMAC_HOST_INT_TXPREWOVER 4
+#define S6_GMAC_HOST_INT_RXBURSTUNDER 5
+#define S6_GMAC_HOST_INT_RXPOSTRFULL 6
+#define S6_GMAC_HOST_INT_RXPOSTRUNDER 7
+#define S6_GMAC_HOST_RXFIFOHWM 0x14C
+#define S6_GMAC_HOST_CTRLFRAMXP 0x150
+#define S6_GMAC_HOST_DSTADDRLO(n) (0x160 + 8*(n))
+#define S6_GMAC_HOST_DSTADDRHI(n) (0x164 + 8*(n))
+#define S6_GMAC_HOST_DSTMASKLO(n) (0x180 + 8*(n))
+#define S6_GMAC_HOST_DSTMASKHI(n) (0x184 + 8*(n))
+
+#define S6_GMAC_BURST_PREWR 0x1B0
+#define S6_GMAC_BURST_PREWR_LEN 0
+#define S6_GMAC_BURST_PREWR_LEN_MASK ((1 << 20) - 1)
+#define S6_GMAC_BURST_PREWR_CFE 20
+#define S6_GMAC_BURST_PREWR_PPE 21
+#define S6_GMAC_BURST_PREWR_FCS 22
+#define S6_GMAC_BURST_PREWR_PAD 23
+#define S6_GMAC_BURST_POSTRD 0x1D0
+#define S6_GMAC_BURST_POSTRD_LEN 0
+#define S6_GMAC_BURST_POSTRD_LEN_MASK ((1 << 20) - 1)
+#define S6_GMAC_BURST_POSTRD_DROP 20
+
+
+/* data handling */
+
+#define S6_NUM_TX_SKB 8 /* must be larger than TX fifo size */
+#define S6_NUM_RX_SKB 16
+#define S6_MAX_FRLEN 1536
+
+struct s6gmac {
+ u32 reg;
+ u32 tx_dma;
+ u32 rx_dma;
+ u32 io;
+ u8 tx_chan;
+ u8 rx_chan;
+ spinlock_t lock;
+ u8 tx_skb_i, tx_skb_o;
+ u8 rx_skb_i, rx_skb_o;
+ struct sk_buff *tx_skb[S6_NUM_TX_SKB];
+ struct sk_buff *rx_skb[S6_NUM_RX_SKB];
+ unsigned long carry[sizeof(struct net_device_stats) / sizeof(long)];
+ unsigned long stats[sizeof(struct net_device_stats) / sizeof(long)];
+ struct phy_device *phydev;
+ struct {
+ struct mii_bus *bus;
+ int irq[PHY_MAX_ADDR];
+ } mii;
+ struct {
+ unsigned int mbit;
+ u8 giga;
+ u8 isup;
+ u8 full;
+ } link;
+};
+
+static void s6gmac_rx_fillfifo(struct s6gmac *pd)
+{
+ struct sk_buff *skb;
+ while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB) &&
+ (!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan)) &&
+ (skb = dev_alloc_skb(S6_MAX_FRLEN + 2))) {
+ pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb;
+ s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan,
+ pd->io, (u32)skb->data, S6_MAX_FRLEN);
+ }
+}
+
+static void s6gmac_rx_interrupt(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ u32 pfx;
+ struct sk_buff *skb;
+ while (((u8)(pd->rx_skb_i - pd->rx_skb_o)) >
+ s6dmac_pending_count(pd->rx_dma, pd->rx_chan)) {
+ skb = pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB];
+ pfx = readl(pd->reg + S6_GMAC_BURST_POSTRD);
+ if (pfx & (1 << S6_GMAC_BURST_POSTRD_DROP)) {
+ dev_kfree_skb_irq(skb);
+ } else {
+ skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN)
+ & S6_GMAC_BURST_POSTRD_LEN_MASK);
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_rx(skb);
+ }
+ }
+}
+
+static void s6gmac_tx_interrupt(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ while (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >
+ s6dmac_pending_count(pd->tx_dma, pd->tx_chan)) {
+ dev_kfree_skb_irq(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
+ }
+ if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
+ netif_wake_queue(dev);
+}
+
+struct s6gmac_statinf {
+ unsigned reg_size : 4; /* 0: unused */
+ unsigned reg_off : 6;
+ unsigned net_index : 6;
+};
+
+#define S6_STATS_B (8 * sizeof(u32))
+#define S6_STATS_C(b, r, f) [b] = { \
+ BUILD_BUG_ON_ZERO(r##_SIZE < S6_GMAC_STAT_SIZE_MIN) + \
+ BUILD_BUG_ON_ZERO((r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1)) \
+ >= (1<<4)) + \
+ r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1), \
+ BUILD_BUG_ON_ZERO(((unsigned)((r - S6_GMAC_STAT_REGS) / sizeof(u32))) \
+ >= ((1<<6)-1)) + \
+ (r - S6_GMAC_STAT_REGS) / sizeof(u32), \
+ BUILD_BUG_ON_ZERO((offsetof(struct net_device_stats, f)) \
+ % sizeof(unsigned long)) + \
+ BUILD_BUG_ON_ZERO((((unsigned)(offsetof(struct net_device_stats, f)) \
+ / sizeof(unsigned long)) >= (1<<6))) + \
+ BUILD_BUG_ON_ZERO((sizeof(((struct net_device_stats *)0)->f) \
+ != sizeof(unsigned long))) + \
+ (offsetof(struct net_device_stats, f)) / sizeof(unsigned long)},
+
+static const struct s6gmac_statinf statinf[2][S6_STATS_B] = { {
+ S6_STATS_C(S6_GMAC_STATCARRY1_RBYT, S6_GMAC_STATRBYT, rx_bytes)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RPKT, S6_GMAC_STATRPKT, rx_packets)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RFCS, S6_GMAC_STATRFCS, rx_crc_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RMCA, S6_GMAC_STATRMCA, multicast)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RALN, S6_GMAC_STATRALN, rx_frame_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RFLR, S6_GMAC_STATRFLR, rx_length_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RCDE, S6_GMAC_STATRCDE, rx_missed_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RUND, S6_GMAC_STATRUND, rx_length_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_ROVR, S6_GMAC_STATROVR, rx_length_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RFRG, S6_GMAC_STATRFRG, rx_crc_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RJBR, S6_GMAC_STATRJBR, rx_crc_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RDRP, S6_GMAC_STATRDRP, rx_dropped)
+}, {
+ S6_STATS_C(S6_GMAC_STATCARRY2_TBYT, S6_GMAC_STATTBYT, tx_bytes)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TPKT, S6_GMAC_STATTPKT, tx_packets)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TEDF, S6_GMAC_STATTEDF, tx_aborted_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TXCL, S6_GMAC_STATTXCL, tx_aborted_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TNCL, S6_GMAC_STATTNCL, collisions)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TDRP, S6_GMAC_STATTDRP, tx_dropped)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TJBR, S6_GMAC_STATTJBR, tx_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TFCS, S6_GMAC_STATTFCS, tx_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TOVR, S6_GMAC_STATTOVR, tx_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TUND, S6_GMAC_STATTUND, tx_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TFRG, S6_GMAC_STATTFRG, tx_errors)
+} };
+
+static void s6gmac_stats_collect(struct s6gmac *pd,
+ const struct s6gmac_statinf *inf)
+{
+ int b;
+ for (b = 0; b < S6_STATS_B; b++) {
+ if (inf[b].reg_size) {
+ pd->stats[inf[b].net_index] +=
+ readl(pd->reg + S6_GMAC_STAT_REGS
+ + sizeof(u32) * inf[b].reg_off);
+ }
+ }
+}
+
+static void s6gmac_stats_carry(struct s6gmac *pd,
+ const struct s6gmac_statinf *inf, u32 mask)
+{
+ int b;
+ while (mask) {
+ b = fls(mask) - 1;
+ mask &= ~(1 << b);
+ pd->carry[inf[b].net_index] += (1 << inf[b].reg_size);
+ }
+}
+
+static inline u32 s6gmac_stats_pending(struct s6gmac *pd, int carry)
+{
+ int r = readl(pd->reg + S6_GMAC_STATCARRY(carry)) &
+ ~readl(pd->reg + S6_GMAC_STATCARRYMSK(carry));
+ return r;
+}
+
+static inline void s6gmac_stats_interrupt(struct s6gmac *pd, int carry)
+{
+ u32 mask;
+ mask = s6gmac_stats_pending(pd, carry);
+ if (mask) {
+ writel(mask, pd->reg + S6_GMAC_STATCARRY(carry));
+ s6gmac_stats_carry(pd, &statinf[carry][0], mask);
+ }
+}
+
+static irqreturn_t s6gmac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct s6gmac *pd = netdev_priv(dev);
+ if (!dev)
+ return IRQ_NONE;
+ spin_lock(&pd->lock);
+ if (s6dmac_termcnt_irq(pd->rx_dma, pd->rx_chan))
+ s6gmac_rx_interrupt(dev);
+ s6gmac_rx_fillfifo(pd);
+ if (s6dmac_termcnt_irq(pd->tx_dma, pd->tx_chan))
+ s6gmac_tx_interrupt(dev);
+ s6gmac_stats_interrupt(pd, 0);
+ s6gmac_stats_interrupt(pd, 1);
+ spin_unlock(&pd->lock);
+ return IRQ_HANDLED;
+}
+
+static inline void s6gmac_set_dstaddr(struct s6gmac *pd, int n,
+ u32 addrlo, u32 addrhi, u32 masklo, u32 maskhi)
+{
+ writel(addrlo, pd->reg + S6_GMAC_HOST_DSTADDRLO(n));
+ writel(addrhi, pd->reg + S6_GMAC_HOST_DSTADDRHI(n));
+ writel(masklo, pd->reg + S6_GMAC_HOST_DSTMASKLO(n));
+ writel(maskhi, pd->reg + S6_GMAC_HOST_DSTMASKHI(n));
+}
+
+static inline void s6gmac_stop_device(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ writel(0, pd->reg + S6_GMAC_MACCONF1);
+}
+
+static inline void s6gmac_init_device(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ int is_rgmii = !!(pd->phydev->supported
+ & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half));
+#if 0
+ writel(1 << S6_GMAC_MACCONF1_SYNCTX |
+ 1 << S6_GMAC_MACCONF1_SYNCRX |
+ 1 << S6_GMAC_MACCONF1_TXFLOWCTRL |
+ 1 << S6_GMAC_MACCONF1_RXFLOWCTRL |
+ 1 << S6_GMAC_MACCONF1_RESTXFUNC |
+ 1 << S6_GMAC_MACCONF1_RESRXFUNC |
+ 1 << S6_GMAC_MACCONF1_RESTXMACCTRL |
+ 1 << S6_GMAC_MACCONF1_RESRXMACCTRL,
+ pd->reg + S6_GMAC_MACCONF1);
+#endif
+ writel(1 << S6_GMAC_MACCONF1_SOFTRES, pd->reg + S6_GMAC_MACCONF1);
+ udelay(1000);
+ writel(1 << S6_GMAC_MACCONF1_TXENA | 1 << S6_GMAC_MACCONF1_RXENA,
+ pd->reg + S6_GMAC_MACCONF1);
+ writel(1 << S6_GMAC_HOST_PBLKCTRL_TXSRES |
+ 1 << S6_GMAC_HOST_PBLKCTRL_RXSRES,
+ pd->reg + S6_GMAC_HOST_PBLKCTRL);
+ writel(S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
+ S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
+ 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
+ 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
+ is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
+ pd->reg + S6_GMAC_HOST_PBLKCTRL);
+ writel(1 << S6_GMAC_MACCONF1_TXENA |
+ 1 << S6_GMAC_MACCONF1_RXENA |
+ (dev->flags & IFF_LOOPBACK ? 1 : 0)
+ << S6_GMAC_MACCONF1_LOOPBACK,
+ pd->reg + S6_GMAC_MACCONF1);
+ writel(dev->mtu && (dev->mtu < (S6_MAX_FRLEN - ETH_HLEN-ETH_FCS_LEN)) ?
+ dev->mtu+ETH_HLEN+ETH_FCS_LEN : S6_MAX_FRLEN,
+ pd->reg + S6_GMAC_MACMAXFRAMELEN);
+ writel((pd->link.full ? 1 : 0) << S6_GMAC_MACCONF2_FULL |
+ 1 << S6_GMAC_MACCONF2_PADCRCENA |
+ 1 << S6_GMAC_MACCONF2_LENGTHFCHK |
+ (pd->link.giga ?
+ S6_GMAC_MACCONF2_IFMODE_BYTE :
+ S6_GMAC_MACCONF2_IFMODE_NIBBLE)
+ << S6_GMAC_MACCONF2_IFMODE |
+ 7 << S6_GMAC_MACCONF2_PREAMBLELEN,
+ pd->reg + S6_GMAC_MACCONF2);
+ writel(0, pd->reg + S6_GMAC_MACSTATADDR1);
+ writel(0, pd->reg + S6_GMAC_MACSTATADDR2);
+ writel(1 << S6_GMAC_FIFOCONF0_WTMENREQ |
+ 1 << S6_GMAC_FIFOCONF0_SRFENREQ |
+ 1 << S6_GMAC_FIFOCONF0_FRFENREQ |
+ 1 << S6_GMAC_FIFOCONF0_STFENREQ |
+ 1 << S6_GMAC_FIFOCONF0_FTFENREQ,
+ pd->reg + S6_GMAC_FIFOCONF0);
+ writel(128 << S6_GMAC_FIFOCONF3_CFGFTTH |
+ 128 << S6_GMAC_FIFOCONF3_CFGHWMFT,
+ pd->reg + S6_GMAC_FIFOCONF3);
+ writel((S6_GMAC_FIFOCONF_RSV_MASK & ~(
+ 1 << S6_GMAC_FIFOCONF_RSV_RUNT |
+ 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
+ 1 << S6_GMAC_FIFOCONF_RSV_OK |
+ 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
+ 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
+ 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
+ 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
+ 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED)) |
+ 1 << S6_GMAC_FIFOCONF5_DROPLT64 |
+ pd->link.giga << S6_GMAC_FIFOCONF5_CFGBYTM |
+ 1 << S6_GMAC_FIFOCONF5_RXDROPSIZE,
+ pd->reg + S6_GMAC_FIFOCONF5);
+ writel(1 << S6_GMAC_FIFOCONF_RSV_RUNT |
+ 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
+ 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
+ 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
+ 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
+ 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
+ 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED,
+ pd->reg + S6_GMAC_FIFOCONF4);
+ s6gmac_set_dstaddr(pd, 0,
+ 0xFFFFFFFF, 0x0000FFFF, 0xFFFFFFFF, 0x0000FFFF);
+ s6gmac_set_dstaddr(pd, 1,
+ dev->dev_addr[5] |
+ dev->dev_addr[4] << 8 |
+ dev->dev_addr[3] << 16 |
+ dev->dev_addr[2] << 24,
+ dev->dev_addr[1] |
+ dev->dev_addr[0] << 8,
+ 0xFFFFFFFF, 0x0000FFFF);
+ s6gmac_set_dstaddr(pd, 2,
+ 0x00000000, 0x00000100, 0x00000000, 0x00000100);
+ s6gmac_set_dstaddr(pd, 3,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000);
+ writel(1 << S6_GMAC_HOST_PBLKCTRL_TXENA |
+ 1 << S6_GMAC_HOST_PBLKCTRL_RXENA |
+ S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
+ S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
+ 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
+ 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
+ is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
+ pd->reg + S6_GMAC_HOST_PBLKCTRL);
+}
+
+static void s6mii_enable(struct s6gmac *pd)
+{
+ writel(readl(pd->reg + S6_GMAC_MACCONF1) &
+ ~(1 << S6_GMAC_MACCONF1_SOFTRES),
+ pd->reg + S6_GMAC_MACCONF1);
+ writel((readl(pd->reg + S6_GMAC_MACMIICONF)
+ & ~(S6_GMAC_MACMIICONF_CSEL_MASK << S6_GMAC_MACMIICONF_CSEL))
+ | (S6_GMAC_MACMIICONF_CSEL_DIV168 << S6_GMAC_MACMIICONF_CSEL),
+ pd->reg + S6_GMAC_MACMIICONF);
+}
+
+static int s6mii_busy(struct s6gmac *pd, int tmo)
+{
+ while (readl(pd->reg + S6_GMAC_MACMIIINDI)) {
+ if (--tmo == 0)
+ return -ETIME;
+ udelay(64);
+ }
+ return 0;
+}
+
+static int s6mii_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct s6gmac *pd = bus->priv;
+ s6mii_enable(pd);
+ if (s6mii_busy(pd, 256))
+ return -ETIME;
+ writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
+ regnum << S6_GMAC_MACMIIADDR_REG,
+ pd->reg + S6_GMAC_MACMIIADDR);
+ writel(1 << S6_GMAC_MACMIICMD_READ, pd->reg + S6_GMAC_MACMIICMD);
+ writel(0, pd->reg + S6_GMAC_MACMIICMD);
+ if (s6mii_busy(pd, 256))
+ return -ETIME;
+ return (u16)readl(pd->reg + S6_GMAC_MACMIISTAT);
+}
+
+static int s6mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
+{
+ struct s6gmac *pd = bus->priv;
+ s6mii_enable(pd);
+ if (s6mii_busy(pd, 256))
+ return -ETIME;
+ writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
+ regnum << S6_GMAC_MACMIIADDR_REG,
+ pd->reg + S6_GMAC_MACMIIADDR);
+ writel(value, pd->reg + S6_GMAC_MACMIICTRL);
+ if (s6mii_busy(pd, 256))
+ return -ETIME;
+ return 0;
+}
+
+static int s6mii_reset(struct mii_bus *bus)
+{
+ struct s6gmac *pd = bus->priv;
+ s6mii_enable(pd);
+ if (s6mii_busy(pd, PHY_INIT_TIMEOUT))
+ return -ETIME;
+ return 0;
+}
+
+static void s6gmac_set_rgmii_txclock(struct s6gmac *pd)
+{
+ u32 pllsel = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL);
+ pllsel &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC);
+ switch (pd->link.mbit) {
+ case 10:
+ pllsel |= S6_GREG1_PLLSEL_GMAC_2500KHZ << S6_GREG1_PLLSEL_GMAC;
+ break;
+ case 100:
+ pllsel |= S6_GREG1_PLLSEL_GMAC_25MHZ << S6_GREG1_PLLSEL_GMAC;
+ break;
+ case 1000:
+ pllsel |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC;
+ break;
+ default:
+ return;
+ }
+ writel(pllsel, S6_REG_GREG1 + S6_GREG1_PLLSEL);
+}
+
+static inline void s6gmac_linkisup(struct net_device *dev, int isup)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ struct phy_device *phydev = pd->phydev;
+
+ pd->link.full = phydev->duplex;
+ pd->link.giga = (phydev->speed == 1000);
+ if (pd->link.mbit != phydev->speed) {
+ pd->link.mbit = phydev->speed;
+ s6gmac_set_rgmii_txclock(pd);
+ }
+ pd->link.isup = isup;
+ if (isup)
+ netif_carrier_on(dev);
+ phy_print_status(phydev);
+}
+
+static void s6gmac_adjust_link(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ struct phy_device *phydev = pd->phydev;
+ if (pd->link.isup &&
+ (!phydev->link ||
+ (pd->link.mbit != phydev->speed) ||
+ (pd->link.full != phydev->duplex))) {
+ pd->link.isup = 0;
+ netif_tx_disable(dev);
+ if (!phydev->link) {
+ netif_carrier_off(dev);
+ phy_print_status(phydev);
+ }
+ }
+ if (!pd->link.isup && phydev->link) {
+ if (pd->link.full != phydev->duplex) {
+ u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
+ if (phydev->duplex)
+ maccfg |= 1 << S6_GMAC_MACCONF2_FULL;
+ else
+ maccfg &= ~(1 << S6_GMAC_MACCONF2_FULL);
+ writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
+ }
+
+ if (pd->link.giga != (phydev->speed == 1000)) {
+ u32 fifocfg = readl(pd->reg + S6_GMAC_FIFOCONF5);
+ u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
+ maccfg &= ~(S6_GMAC_MACCONF2_IFMODE_MASK
+ << S6_GMAC_MACCONF2_IFMODE);
+ if (phydev->speed == 1000) {
+ fifocfg |= 1 << S6_GMAC_FIFOCONF5_CFGBYTM;
+ maccfg |= S6_GMAC_MACCONF2_IFMODE_BYTE
+ << S6_GMAC_MACCONF2_IFMODE;
+ } else {
+ fifocfg &= ~(1 << S6_GMAC_FIFOCONF5_CFGBYTM);
+ maccfg |= S6_GMAC_MACCONF2_IFMODE_NIBBLE
+ << S6_GMAC_MACCONF2_IFMODE;
+ }
+ writel(fifocfg, pd->reg + S6_GMAC_FIFOCONF5);
+ writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
+ }
+
+ if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
+ netif_wake_queue(dev);
+ s6gmac_linkisup(dev, 1);
+ }
+}
+
+static inline int s6gmac_phy_start(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ int i = 0;
+ struct phy_device *p = NULL;
+ while ((i < PHY_MAX_ADDR) && (!(p = pd->mii.bus->phy_map[i])))
+ i++;
+ p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0,
+ PHY_INTERFACE_MODE_RGMII);
+ if (IS_ERR(p)) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(p);
+ }
+ p->supported &= PHY_GBIT_FEATURES;
+ p->advertising = p->supported;
+ pd->phydev = p;
+ return 0;
+}
+
+static inline void s6gmac_init_stats(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ u32 mask;
+ mask = 1 << S6_GMAC_STATCARRY1_RDRP |
+ 1 << S6_GMAC_STATCARRY1_RJBR |
+ 1 << S6_GMAC_STATCARRY1_RFRG |
+ 1 << S6_GMAC_STATCARRY1_ROVR |
+ 1 << S6_GMAC_STATCARRY1_RUND |
+ 1 << S6_GMAC_STATCARRY1_RCDE |
+ 1 << S6_GMAC_STATCARRY1_RFLR |
+ 1 << S6_GMAC_STATCARRY1_RALN |
+ 1 << S6_GMAC_STATCARRY1_RMCA |
+ 1 << S6_GMAC_STATCARRY1_RFCS |
+ 1 << S6_GMAC_STATCARRY1_RPKT |
+ 1 << S6_GMAC_STATCARRY1_RBYT;
+ writel(mask, pd->reg + S6_GMAC_STATCARRY(0));
+ writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(0));
+ mask = 1 << S6_GMAC_STATCARRY2_TDRP |
+ 1 << S6_GMAC_STATCARRY2_TNCL |
+ 1 << S6_GMAC_STATCARRY2_TXCL |
+ 1 << S6_GMAC_STATCARRY2_TEDF |
+ 1 << S6_GMAC_STATCARRY2_TPKT |
+ 1 << S6_GMAC_STATCARRY2_TBYT |
+ 1 << S6_GMAC_STATCARRY2_TFRG |
+ 1 << S6_GMAC_STATCARRY2_TUND |
+ 1 << S6_GMAC_STATCARRY2_TOVR |
+ 1 << S6_GMAC_STATCARRY2_TFCS |
+ 1 << S6_GMAC_STATCARRY2_TJBR;
+ writel(mask, pd->reg + S6_GMAC_STATCARRY(1));
+ writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(1));
+}
+
+static inline void s6gmac_init_dmac(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ s6dmac_disable_chan(pd->tx_dma, pd->tx_chan);
+ s6dmac_disable_chan(pd->rx_dma, pd->rx_chan);
+ s6dmac_disable_error_irqs(pd->tx_dma, 1 << S6_HIFDMA_GMACTX);
+ s6dmac_disable_error_irqs(pd->rx_dma, 1 << S6_HIFDMA_GMACRX);
+}
+
+static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pd->lock, flags);
+ writel(skb->len << S6_GMAC_BURST_PREWR_LEN |
+ 0 << S6_GMAC_BURST_PREWR_CFE |
+ 1 << S6_GMAC_BURST_PREWR_PPE |
+ 1 << S6_GMAC_BURST_PREWR_FCS |
+ ((skb->len < ETH_ZLEN) ? 1 : 0) << S6_GMAC_BURST_PREWR_PAD,
+ pd->reg + S6_GMAC_BURST_PREWR);
+ s6dmac_put_fifo_cache(pd->tx_dma, pd->tx_chan,
+ (u32)skb->data, pd->io, skb->len);
+ if (s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
+ netif_stop_queue(dev);
+ if (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >= S6_NUM_TX_SKB) {
+ printk(KERN_ERR "GMAC BUG: skb tx ring overflow [%x, %x]\n",
+ pd->tx_skb_o, pd->tx_skb_i);
+ BUG();
+ }
+ pd->tx_skb[(pd->tx_skb_i++) % S6_NUM_TX_SKB] = skb;
+ spin_unlock_irqrestore(&pd->lock, flags);
+ return 0;
+}
+
+static void s6gmac_tx_timeout(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ unsigned long flags;
+ spin_lock_irqsave(&pd->lock, flags);
+ s6gmac_tx_interrupt(dev);
+ spin_unlock_irqrestore(&pd->lock, flags);
+}
+
+static int s6gmac_open(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ unsigned long flags;
+ phy_read_status(pd->phydev);
+ spin_lock_irqsave(&pd->lock, flags);
+ pd->link.mbit = 0;
+ s6gmac_linkisup(dev, pd->phydev->link);
+ s6gmac_init_device(dev);
+ s6gmac_init_stats(dev);
+ s6gmac_init_dmac(dev);
+ s6gmac_rx_fillfifo(pd);
+ s6dmac_enable_chan(pd->rx_dma, pd->rx_chan,
+ 2, 1, 0, 1, 0, 0, 0, 7, -1, 2, 0, 1);
+ s6dmac_enable_chan(pd->tx_dma, pd->tx_chan,
+ 2, 0, 1, 0, 0, 0, 0, 7, -1, 2, 0, 1);
+ writel(0 << S6_GMAC_HOST_INT_TXBURSTOVER |
+ 0 << S6_GMAC_HOST_INT_TXPREWOVER |
+ 0 << S6_GMAC_HOST_INT_RXBURSTUNDER |
+ 0 << S6_GMAC_HOST_INT_RXPOSTRFULL |
+ 0 << S6_GMAC_HOST_INT_RXPOSTRUNDER,
+ pd->reg + S6_GMAC_HOST_INTMASK);
+ spin_unlock_irqrestore(&pd->lock, flags);
+ phy_start(pd->phydev);
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int s6gmac_stop(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ unsigned long flags;
+ netif_stop_queue(dev);
+ phy_stop(pd->phydev);
+ spin_lock_irqsave(&pd->lock, flags);
+ s6gmac_init_dmac(dev);
+ s6gmac_stop_device(dev);
+ while (pd->tx_skb_i != pd->tx_skb_o)
+ dev_kfree_skb(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
+ while (pd->rx_skb_i != pd->rx_skb_o)
+ dev_kfree_skb(pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]);
+ spin_unlock_irqrestore(&pd->lock, flags);
+ return 0;
+}
+
+static struct net_device_stats *s6gmac_stats(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ struct net_device_stats *st = (struct net_device_stats *)&pd->stats;
+ int i;
+ do {
+ unsigned long flags;
+ spin_lock_irqsave(&pd->lock, flags);
+ for (i = 0; i < sizeof(pd->stats) / sizeof(unsigned long); i++)
+ pd->stats[i] =
+ pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1);
+ s6gmac_stats_collect(pd, &statinf[0][0]);
+ s6gmac_stats_collect(pd, &statinf[1][0]);
+ i = s6gmac_stats_pending(pd, 0) |
+ s6gmac_stats_pending(pd, 1);
+ spin_unlock_irqrestore(&pd->lock, flags);
+ } while (i);
+ st->rx_errors = st->rx_crc_errors +
+ st->rx_frame_errors +
+ st->rx_length_errors +
+ st->rx_missed_errors;
+ st->tx_errors += st->tx_aborted_errors;
+ return st;
+}
+
+static int __devinit s6gmac_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct s6gmac *pd;
+ int res;
+ unsigned long i;
+ struct mii_bus *mb;
+ dev = alloc_etherdev(sizeof(*pd));
+ if (!dev) {
+ printk(KERN_ERR DRV_PRMT "etherdev alloc failed, aborting.\n");
+ return -ENOMEM;
+ }
+ dev->open = s6gmac_open;
+ dev->stop = s6gmac_stop;
+ dev->hard_start_xmit = s6gmac_tx;
+ dev->tx_timeout = s6gmac_tx_timeout;
+ dev->watchdog_timeo = HZ;
+ dev->get_stats = s6gmac_stats;
+ dev->irq = platform_get_irq(pdev, 0);
+ pd = netdev_priv(dev);
+ memset(pd, 0, sizeof(*pd));
+ spin_lock_init(&pd->lock);
+ pd->reg = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start;
+ i = platform_get_resource(pdev, IORESOURCE_DMA, 0)->start;
+ pd->tx_dma = DMA_MASK_DMAC(i);
+ pd->tx_chan = DMA_INDEX_CHNL(i);
+ i = platform_get_resource(pdev, IORESOURCE_DMA, 1)->start;
+ pd->rx_dma = DMA_MASK_DMAC(i);
+ pd->rx_chan = DMA_INDEX_CHNL(i);
+ pd->io = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
+ res = request_irq(dev->irq, s6gmac_interrupt, 0, dev->name, dev);
+ if (res) {
+ printk(KERN_ERR DRV_PRMT "irq request failed: %d\n", dev->irq);
+ goto errirq;
+ }
+ res = register_netdev(dev);
+ if (res) {
+ printk(KERN_ERR DRV_PRMT "error registering device %s\n",
+ dev->name);
+ goto errdev;
+ }
+ mb = mdiobus_alloc();
+ if (!mb) {
+ printk(KERN_ERR DRV_PRMT "error allocating mii bus\n");
+ goto errmii;
+ }
+ mb->name = "s6gmac_mii";
+ mb->read = s6mii_read;
+ mb->write = s6mii_write;
+ mb->reset = s6mii_reset;
+ mb->priv = pd;
+ snprintf(mb->id, MII_BUS_ID_SIZE, "0");
+ mb->phy_mask = ~(1 << 0);
+ mb->irq = &pd->mii.irq[0];
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ int n = platform_get_irq(pdev, i + 1);
+ if (n < 0)
+ n = PHY_POLL;
+ pd->mii.irq[i] = n;
+ }
+ mdiobus_register(mb);
+ pd->mii.bus = mb;
+ res = s6gmac_phy_start(dev);
+ if (res)
+ return res;
+ platform_set_drvdata(pdev, dev);
+ return 0;
+errmii:
+ unregister_netdev(dev);
+errdev:
+ free_irq(dev->irq, dev);
+errirq:
+ free_netdev(dev);
+ return res;
+}
+
+static int __devexit s6gmac_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ if (dev) {
+ struct s6gmac *pd = netdev_priv(dev);
+ mdiobus_unregister(pd->mii.bus);
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+ free_netdev(dev);
+ platform_set_drvdata(pdev, NULL);
+ }
+ return 0;
+}
+
+static struct platform_driver s6gmac_driver = {
+ .probe = s6gmac_probe,
+ .remove = __devexit_p(s6gmac_remove),
+ .driver = {
+ .name = "s6gmac",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init s6gmac_init(void)
+{
+ printk(KERN_INFO DRV_PRMT "S6 GMAC ethernet driver\n");
+ return platform_driver_register(&s6gmac_driver);
+}
+
+
+static void __exit s6gmac_exit(void)
+{
+ platform_driver_unregister(&s6gmac_driver);
+}
+
+module_init(s6gmac_init);
+module_exit(s6gmac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("S6105 on chip Ethernet driver");
+MODULE_AUTHOR("Oskar Schirmer <os@emlix.com>");
diff --git a/drivers/net/ethernet/seeq/Kconfig b/drivers/net/ethernet/seeq/Kconfig
new file mode 100644
index 000000000000..29f18533fdc7
--- /dev/null
+++ b/drivers/net/ethernet/seeq/Kconfig
@@ -0,0 +1,47 @@
+#
+# SEEQ device configuration
+#
+
+config NET_VENDOR_SEEQ
+ bool "SEEQ devices"
+ default y
+ depends on HAS_IOMEM
+ depends on (ARM && ARCH_ACORN) || SGI_HAS_SEEQ || EXPERIMENTAL
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about SEEQ devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_SEEQ
+
+config ARM_ETHER3
+ tristate "Acorn/ANT Ether3 support"
+ depends on ARM && ARCH_ACORN
+ ---help---
+ If you have an Acorn system with one of these network cards, you
+ should say Y to this option if you wish to use it with Linux.
+
+config SEEQ8005
+ tristate "SEEQ8005 support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ ---help---
+ This is a driver for the SEEQ 8005 network (Ethernet) card. If this
+ is for you, read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called seeq8005.
+
+config SGISEEQ
+ tristate "SGI Seeq ethernet controller support"
+ depends on SGI_HAS_SEEQ
+ ---help---
+ Say Y here if you have an Seeq based Ethernet network card. This is
+ used in many Silicon Graphics machines.
+
+endif # NET_VENDOR_SEEQ
diff --git a/drivers/net/ethernet/seeq/Makefile b/drivers/net/ethernet/seeq/Makefile
new file mode 100644
index 000000000000..3e258a580c05
--- /dev/null
+++ b/drivers/net/ethernet/seeq/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the SEEQ network device drivers
+#
+
+obj-$(CONFIG_ARM_ETHER3) += ether3.o
+obj-$(CONFIG_SEEQ8005) += seeq8005.o
+obj-$(CONFIG_SGISEEQ) += sgiseeq.o
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
new file mode 100644
index 000000000000..893c880dadf0
--- /dev/null
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -0,0 +1,918 @@
+/*
+ * linux/drivers/acorn/net/ether3.c
+ *
+ * Copyright (C) 1995-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * SEEQ nq8005 ethernet driver for Acorn/ANT Ether3 card
+ * for Acorn machines
+ *
+ * By Russell King, with some suggestions from borris@ant.co.uk
+ *
+ * Changelog:
+ * 1.04 RMK 29/02/1996 Won't pass packets that are from our ethernet
+ * address up to the higher levels - they're
+ * silently ignored. I/F can now be put into
+ * multicast mode. Receiver routine optimised.
+ * 1.05 RMK 30/02/1996 Now claims interrupt at open when part of
+ * the kernel rather than when a module.
+ * 1.06 RMK 02/03/1996 Various code cleanups
+ * 1.07 RMK 13/10/1996 Optimised interrupt routine and transmit
+ * routines.
+ * 1.08 RMK 14/10/1996 Fixed problem with too many packets,
+ * prevented the kernel message about dropped
+ * packets appearing too many times a second.
+ * Now does not disable all IRQs, only the IRQ
+ * used by this card.
+ * 1.09 RMK 10/11/1996 Only enables TX irq when buffer space is low,
+ * but we still service the TX queue if we get a
+ * RX interrupt.
+ * 1.10 RMK 15/07/1997 Fixed autoprobing of NQ8004.
+ * 1.11 RMK 16/11/1997 Fixed autoprobing of NQ8005A.
+ * 1.12 RMK 31/12/1997 Removed reference to dev_tint for Linux 2.1.
+ * RMK 27/06/1998 Changed asm/delay.h to linux/delay.h.
+ * 1.13 RMK 29/06/1998 Fixed problem with transmission of packets.
+ * Chip seems to have a bug in, whereby if the
+ * packet starts two bytes from the end of the
+ * buffer, it corrupts the receiver chain, and
+ * never updates the transmit status correctly.
+ * 1.14 RMK 07/01/1998 Added initial code for ETHERB addressing.
+ * 1.15 RMK 30/04/1999 More fixes to the transmit routine for buggy
+ * hardware.
+ * 1.16 RMK 10/02/2000 Updated for 2.3.43
+ * 1.17 RMK 13/05/2000 Updated for 2.3.99-pre8
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/ecard.h>
+#include <asm/io.h>
+
+static char version[] __devinitdata = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n";
+
+#include "ether3.h"
+
+static unsigned int net_debug = NET_DEBUG;
+
+static void ether3_setmulticastlist(struct net_device *dev);
+static int ether3_rx(struct net_device *dev, unsigned int maxcnt);
+static void ether3_tx(struct net_device *dev);
+static int ether3_open (struct net_device *dev);
+static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t ether3_interrupt (int irq, void *dev_id);
+static int ether3_close (struct net_device *dev);
+static void ether3_setmulticastlist (struct net_device *dev);
+static void ether3_timeout(struct net_device *dev);
+
+#define BUS_16 2
+#define BUS_8 1
+#define BUS_UNKNOWN 0
+
+/* --------------------------------------------------------------------------- */
+
+typedef enum {
+ buffer_write,
+ buffer_read
+} buffer_rw_t;
+
+/*
+ * ether3 read/write. Slow things down a bit...
+ * The SEEQ8005 doesn't like us writing to its registers
+ * too quickly.
+ */
+static inline void ether3_outb(int v, const void __iomem *r)
+{
+ writeb(v, r);
+ udelay(1);
+}
+
+static inline void ether3_outw(int v, const void __iomem *r)
+{
+ writew(v, r);
+ udelay(1);
+}
+#define ether3_inb(r) ({ unsigned int __v = readb((r)); udelay(1); __v; })
+#define ether3_inw(r) ({ unsigned int __v = readw((r)); udelay(1); __v; })
+
+static int
+ether3_setbuffer(struct net_device *dev, buffer_rw_t read, int start)
+{
+ int timeout = 1000;
+
+ ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1);
+ ether3_outw(priv(dev)->regs.command | CMD_FIFOWRITE, REG_COMMAND);
+
+ while ((ether3_inw(REG_STATUS) & STAT_FIFOEMPTY) == 0) {
+ if (!timeout--) {
+ printk("%s: setbuffer broken\n", dev->name);
+ priv(dev)->broken = 1;
+ return 1;
+ }
+ udelay(1);
+ }
+
+ if (read == buffer_read) {
+ ether3_outw(start, REG_DMAADDR);
+ ether3_outw(priv(dev)->regs.command | CMD_FIFOREAD, REG_COMMAND);
+ } else {
+ ether3_outw(priv(dev)->regs.command | CMD_FIFOWRITE, REG_COMMAND);
+ ether3_outw(start, REG_DMAADDR);
+ }
+ return 0;
+}
+
+/*
+ * write data to the buffer memory
+ */
+#define ether3_writebuffer(dev,data,length) \
+ writesw(REG_BUFWIN, (data), (length) >> 1)
+
+#define ether3_writeword(dev,data) \
+ writew((data), REG_BUFWIN)
+
+#define ether3_writelong(dev,data) { \
+ void __iomem *reg_bufwin = REG_BUFWIN; \
+ writew((data), reg_bufwin); \
+ writew((data) >> 16, reg_bufwin); \
+}
+
+/*
+ * read data from the buffer memory
+ */
+#define ether3_readbuffer(dev,data,length) \
+ readsw(REG_BUFWIN, (data), (length) >> 1)
+
+#define ether3_readword(dev) \
+ readw(REG_BUFWIN)
+
+#define ether3_readlong(dev) \
+ readw(REG_BUFWIN) | (readw(REG_BUFWIN) << 16)
+
+/*
+ * Switch LED off...
+ */
+static void ether3_ledoff(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2);
+}
+
+/*
+ * switch LED on...
+ */
+static inline void ether3_ledon(struct net_device *dev)
+{
+ del_timer(&priv(dev)->timer);
+ priv(dev)->timer.expires = jiffies + HZ / 50; /* leave on for 1/50th second */
+ priv(dev)->timer.data = (unsigned long)dev;
+ priv(dev)->timer.function = ether3_ledoff;
+ add_timer(&priv(dev)->timer);
+ if (priv(dev)->regs.config2 & CFG2_CTRLO)
+ ether3_outw(priv(dev)->regs.config2 &= ~CFG2_CTRLO, REG_CONFIG2);
+}
+
+/*
+ * Read the ethernet address string from the on board rom.
+ * This is an ascii string!!!
+ */
+static int __devinit
+ether3_addr(char *addr, struct expansion_card *ec)
+{
+ struct in_chunk_dir cd;
+ char *s;
+
+ if (ecard_readchunk(&cd, ec, 0xf5, 0) && (s = strchr(cd.d.string, '('))) {
+ int i;
+ for (i = 0; i<6; i++) {
+ addr[i] = simple_strtoul(s + 1, &s, 0x10);
+ if (*s != (i==5?')' : ':' ))
+ break;
+ }
+ if (i == 6)
+ return 0;
+ }
+ /* I wonder if we should even let the user continue in this case
+ * - no, it would be better to disable the device
+ */
+ printk(KERN_ERR "ether3: Couldn't read a valid MAC address from card.\n");
+ return -ENODEV;
+}
+
+/* --------------------------------------------------------------------------- */
+
+static int __devinit
+ether3_ramtest(struct net_device *dev, unsigned char byte)
+{
+ unsigned char *buffer = kmalloc(RX_END, GFP_KERNEL);
+ int i,ret = 0;
+ int max_errors = 4;
+ int bad = -1;
+
+ if (!buffer)
+ return 1;
+
+ memset(buffer, byte, RX_END);
+ ether3_setbuffer(dev, buffer_write, 0);
+ ether3_writebuffer(dev, buffer, TX_END);
+ ether3_setbuffer(dev, buffer_write, RX_START);
+ ether3_writebuffer(dev, buffer + RX_START, RX_LEN);
+ memset(buffer, byte ^ 0xff, RX_END);
+ ether3_setbuffer(dev, buffer_read, 0);
+ ether3_readbuffer(dev, buffer, TX_END);
+ ether3_setbuffer(dev, buffer_read, RX_START);
+ ether3_readbuffer(dev, buffer + RX_START, RX_LEN);
+
+ for (i = 0; i < RX_END; i++) {
+ if (buffer[i] != byte) {
+ if (max_errors > 0 && bad != buffer[i]) {
+ printk("%s: RAM failed with (%02X instead of %02X) at 0x%04X",
+ dev->name, buffer[i], byte, i);
+ ret = 2;
+ max_errors--;
+ bad = i;
+ }
+ } else {
+ if (bad != -1) {
+ if (bad != i - 1)
+ printk(" - 0x%04X\n", i - 1);
+ printk("\n");
+ bad = -1;
+ }
+ }
+ }
+ if (bad != -1)
+ printk(" - 0xffff\n");
+ kfree(buffer);
+
+ return ret;
+}
+
+/* ------------------------------------------------------------------------------- */
+
+static int __devinit ether3_init_2(struct net_device *dev)
+{
+ int i;
+
+ priv(dev)->regs.config1 = CFG1_RECVCOMPSTAT0|CFG1_DMABURST8;
+ priv(dev)->regs.config2 = CFG2_CTRLO|CFG2_RECVCRC|CFG2_ERRENCRC;
+ priv(dev)->regs.command = 0;
+
+ /*
+ * Set up our hardware address
+ */
+ ether3_outw(priv(dev)->regs.config1 | CFG1_BUFSELSTAT0, REG_CONFIG1);
+ for (i = 0; i < 6; i++)
+ ether3_outb(dev->dev_addr[i], REG_BUFWIN);
+
+ if (dev->flags & IFF_PROMISC)
+ priv(dev)->regs.config1 |= CFG1_RECVPROMISC;
+ else if (dev->flags & IFF_MULTICAST)
+ priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI;
+ else
+ priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD;
+
+ /*
+ * There is a problem with the NQ8005 in that it occasionally loses the
+ * last two bytes. To get round this problem, we receive the CRC as
+ * well. That way, if we do lose the last two, then it doesn't matter.
+ */
+ ether3_outw(priv(dev)->regs.config1 | CFG1_TRANSEND, REG_CONFIG1);
+ ether3_outw((TX_END>>8) - 1, REG_BUFWIN);
+ ether3_outw(priv(dev)->rx_head, REG_RECVPTR);
+ ether3_outw(0, REG_TRANSMITPTR);
+ ether3_outw(priv(dev)->rx_head >> 8, REG_RECVEND);
+ ether3_outw(priv(dev)->regs.config2, REG_CONFIG2);
+ ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1);
+ ether3_outw(priv(dev)->regs.command, REG_COMMAND);
+
+ i = ether3_ramtest(dev, 0x5A);
+ if(i)
+ return i;
+ i = ether3_ramtest(dev, 0x1E);
+ if(i)
+ return i;
+
+ ether3_setbuffer(dev, buffer_write, 0);
+ ether3_writelong(dev, 0);
+ return 0;
+}
+
+static void
+ether3_init_for_open(struct net_device *dev)
+{
+ int i;
+
+ /* Reset the chip */
+ ether3_outw(CFG2_RESET, REG_CONFIG2);
+ udelay(4);
+
+ priv(dev)->regs.command = 0;
+ ether3_outw(CMD_RXOFF|CMD_TXOFF, REG_COMMAND);
+ while (ether3_inw(REG_STATUS) & (STAT_RXON|STAT_TXON))
+ barrier();
+
+ ether3_outw(priv(dev)->regs.config1 | CFG1_BUFSELSTAT0, REG_CONFIG1);
+ for (i = 0; i < 6; i++)
+ ether3_outb(dev->dev_addr[i], REG_BUFWIN);
+
+ priv(dev)->tx_head = 0;
+ priv(dev)->tx_tail = 0;
+ priv(dev)->regs.config2 |= CFG2_CTRLO;
+ priv(dev)->rx_head = RX_START;
+
+ ether3_outw(priv(dev)->regs.config1 | CFG1_TRANSEND, REG_CONFIG1);
+ ether3_outw((TX_END>>8) - 1, REG_BUFWIN);
+ ether3_outw(priv(dev)->rx_head, REG_RECVPTR);
+ ether3_outw(priv(dev)->rx_head >> 8, REG_RECVEND);
+ ether3_outw(0, REG_TRANSMITPTR);
+ ether3_outw(priv(dev)->regs.config2, REG_CONFIG2);
+ ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1);
+
+ ether3_setbuffer(dev, buffer_write, 0);
+ ether3_writelong(dev, 0);
+
+ priv(dev)->regs.command = CMD_ENINTRX | CMD_ENINTTX;
+ ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND);
+}
+
+static inline int
+ether3_probe_bus_8(struct net_device *dev, int val)
+{
+ int write_low, write_high, read_low, read_high;
+
+ write_low = val & 255;
+ write_high = val >> 8;
+
+ printk(KERN_DEBUG "ether3_probe: write8 [%02X:%02X]", write_high, write_low);
+
+ ether3_outb(write_low, REG_RECVPTR);
+ ether3_outb(write_high, REG_RECVPTR + 4);
+
+ read_low = ether3_inb(REG_RECVPTR);
+ read_high = ether3_inb(REG_RECVPTR + 4);
+
+ printk(", read8 [%02X:%02X]\n", read_high, read_low);
+
+ return read_low == write_low && read_high == write_high;
+}
+
+static inline int
+ether3_probe_bus_16(struct net_device *dev, int val)
+{
+ int read_val;
+
+ ether3_outw(val, REG_RECVPTR);
+ read_val = ether3_inw(REG_RECVPTR);
+
+ printk(KERN_DEBUG "ether3_probe: write16 [%04X], read16 [%04X]\n", val, read_val);
+
+ return read_val == val;
+}
+
+/*
+ * Open/initialize the board. This is called (in the current kernel)
+ * sometime after booting when the 'ifconfig' program is run.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is non-reboot way to recover if something goes wrong.
+ */
+static int
+ether3_open(struct net_device *dev)
+{
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ printk(KERN_WARNING "%s: invalid ethernet MAC address\n",
+ dev->name);
+ return -EINVAL;
+ }
+
+ if (request_irq(dev->irq, ether3_interrupt, 0, "ether3", dev))
+ return -EAGAIN;
+
+ ether3_init_for_open(dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/*
+ * The inverse routine to ether3_open().
+ */
+static int
+ether3_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+
+ disable_irq(dev->irq);
+
+ ether3_outw(CMD_RXOFF|CMD_TXOFF, REG_COMMAND);
+ priv(dev)->regs.command = 0;
+ while (ether3_inw(REG_STATUS) & (STAT_RXON|STAT_TXON))
+ barrier();
+ ether3_outb(0x80, REG_CONFIG2 + 4);
+ ether3_outw(0, REG_COMMAND);
+
+ free_irq(dev->irq, dev);
+
+ return 0;
+}
+
+/*
+ * Set or clear promiscuous/multicast mode filter for this adaptor.
+ *
+ * We don't attempt any packet filtering. The card may have a SEEQ 8004
+ * in which does not have the other ethernet address registers present...
+ */
+static void ether3_setmulticastlist(struct net_device *dev)
+{
+ priv(dev)->regs.config1 &= ~CFG1_RECVPROMISC;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* promiscuous mode */
+ priv(dev)->regs.config1 |= CFG1_RECVPROMISC;
+ } else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
+ priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI;
+ } else
+ priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD;
+
+ ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1);
+}
+
+static void ether3_timeout(struct net_device *dev)
+{
+ unsigned long flags;
+
+ del_timer(&priv(dev)->timer);
+
+ local_irq_save(flags);
+ printk(KERN_ERR "%s: transmit timed out, network cable problem?\n", dev->name);
+ printk(KERN_ERR "%s: state: { status=%04X cfg1=%04X cfg2=%04X }\n", dev->name,
+ ether3_inw(REG_STATUS), ether3_inw(REG_CONFIG1), ether3_inw(REG_CONFIG2));
+ printk(KERN_ERR "%s: { rpr=%04X rea=%04X tpr=%04X }\n", dev->name,
+ ether3_inw(REG_RECVPTR), ether3_inw(REG_RECVEND), ether3_inw(REG_TRANSMITPTR));
+ printk(KERN_ERR "%s: tx head=%X tx tail=%X\n", dev->name,
+ priv(dev)->tx_head, priv(dev)->tx_tail);
+ ether3_setbuffer(dev, buffer_read, priv(dev)->tx_tail);
+ printk(KERN_ERR "%s: packet status = %08X\n", dev->name, ether3_readlong(dev));
+ local_irq_restore(flags);
+
+ priv(dev)->regs.config2 |= CFG2_CTRLO;
+ dev->stats.tx_errors += 1;
+ ether3_outw(priv(dev)->regs.config2, REG_CONFIG2);
+ priv(dev)->tx_head = priv(dev)->tx_tail = 0;
+
+ netif_wake_queue(dev);
+}
+
+/*
+ * Transmit a packet
+ */
+static int
+ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned long flags;
+ unsigned int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned int ptr, next_ptr;
+
+ if (priv(dev)->broken) {
+ dev_kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ netif_start_queue(dev);
+ return NETDEV_TX_OK;
+ }
+
+ length = (length + 1) & ~1;
+ if (length != skb->len) {
+ if (skb_padto(skb, length))
+ goto out;
+ }
+
+ next_ptr = (priv(dev)->tx_head + 1) & 15;
+
+ local_irq_save(flags);
+
+ if (priv(dev)->tx_tail == next_ptr) {
+ local_irq_restore(flags);
+ return NETDEV_TX_BUSY; /* unable to queue */
+ }
+
+ ptr = 0x600 * priv(dev)->tx_head;
+ priv(dev)->tx_head = next_ptr;
+ next_ptr *= 0x600;
+
+#define TXHDR_FLAGS (TXHDR_TRANSMIT|TXHDR_CHAINCONTINUE|TXHDR_DATAFOLLOWS|TXHDR_ENSUCCESS)
+
+ ether3_setbuffer(dev, buffer_write, next_ptr);
+ ether3_writelong(dev, 0);
+ ether3_setbuffer(dev, buffer_write, ptr);
+ ether3_writelong(dev, 0);
+ ether3_writebuffer(dev, skb->data, length);
+ ether3_writeword(dev, htons(next_ptr));
+ ether3_writeword(dev, TXHDR_CHAINCONTINUE >> 16);
+ ether3_setbuffer(dev, buffer_write, ptr);
+ ether3_writeword(dev, htons((ptr + length + 4)));
+ ether3_writeword(dev, TXHDR_FLAGS >> 16);
+ ether3_ledon(dev);
+
+ if (!(ether3_inw(REG_STATUS) & STAT_TXON)) {
+ ether3_outw(ptr, REG_TRANSMITPTR);
+ ether3_outw(priv(dev)->regs.command | CMD_TXON, REG_COMMAND);
+ }
+
+ next_ptr = (priv(dev)->tx_head + 1) & 15;
+ local_irq_restore(flags);
+
+ dev_kfree_skb(skb);
+
+ if (priv(dev)->tx_tail == next_ptr)
+ netif_stop_queue(dev);
+
+ out:
+ return NETDEV_TX_OK;
+}
+
+static irqreturn_t
+ether3_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ unsigned int status, handled = IRQ_NONE;
+
+#if NET_DEBUG > 1
+ if(net_debug & DEBUG_INT)
+ printk("eth3irq: %d ", irq);
+#endif
+
+ status = ether3_inw(REG_STATUS);
+
+ if (status & STAT_INTRX) {
+ ether3_outw(CMD_ACKINTRX | priv(dev)->regs.command, REG_COMMAND);
+ ether3_rx(dev, 12);
+ handled = IRQ_HANDLED;
+ }
+
+ if (status & STAT_INTTX) {
+ ether3_outw(CMD_ACKINTTX | priv(dev)->regs.command, REG_COMMAND);
+ ether3_tx(dev);
+ handled = IRQ_HANDLED;
+ }
+
+#if NET_DEBUG > 1
+ if(net_debug & DEBUG_INT)
+ printk("done\n");
+#endif
+ return handled;
+}
+
+/*
+ * If we have a good packet(s), get it/them out of the buffers.
+ */
+static int ether3_rx(struct net_device *dev, unsigned int maxcnt)
+{
+ unsigned int next_ptr = priv(dev)->rx_head, received = 0;
+
+ ether3_ledon(dev);
+
+ do {
+ unsigned int this_ptr, status;
+ unsigned char addrs[16];
+
+ /*
+ * read the first 16 bytes from the buffer.
+ * This contains the status bytes etc and ethernet addresses,
+ * and we also check the source ethernet address to see if
+ * it originated from us.
+ */
+ {
+ unsigned int temp_ptr;
+ ether3_setbuffer(dev, buffer_read, next_ptr);
+ temp_ptr = ether3_readword(dev);
+ status = ether3_readword(dev);
+ if ((status & (RXSTAT_DONE | RXHDR_CHAINCONTINUE | RXHDR_RECEIVE)) !=
+ (RXSTAT_DONE | RXHDR_CHAINCONTINUE) || !temp_ptr)
+ break;
+
+ this_ptr = next_ptr + 4;
+ next_ptr = ntohs(temp_ptr);
+ }
+ ether3_setbuffer(dev, buffer_read, this_ptr);
+ ether3_readbuffer(dev, addrs+2, 12);
+
+if (next_ptr < RX_START || next_ptr >= RX_END) {
+ int i;
+ printk("%s: bad next pointer @%04X: ", dev->name, priv(dev)->rx_head);
+ printk("%02X %02X %02X %02X ", next_ptr >> 8, next_ptr & 255, status & 255, status >> 8);
+ for (i = 2; i < 14; i++)
+ printk("%02X ", addrs[i]);
+ printk("\n");
+ next_ptr = priv(dev)->rx_head;
+ break;
+}
+ /*
+ * ignore our own packets...
+ */
+ if (!(*(unsigned long *)&dev->dev_addr[0] ^ *(unsigned long *)&addrs[2+6]) &&
+ !(*(unsigned short *)&dev->dev_addr[4] ^ *(unsigned short *)&addrs[2+10])) {
+ maxcnt ++; /* compensate for loopedback packet */
+ ether3_outw(next_ptr >> 8, REG_RECVEND);
+ } else
+ if (!(status & (RXSTAT_OVERSIZE|RXSTAT_CRCERROR|RXSTAT_DRIBBLEERROR|RXSTAT_SHORTPACKET))) {
+ unsigned int length = next_ptr - this_ptr;
+ struct sk_buff *skb;
+
+ if (next_ptr <= this_ptr)
+ length += RX_END - RX_START;
+
+ skb = dev_alloc_skb(length + 2);
+ if (skb) {
+ unsigned char *buf;
+
+ skb_reserve(skb, 2);
+ buf = skb_put(skb, length);
+ ether3_readbuffer(dev, buf + 12, length - 12);
+ ether3_outw(next_ptr >> 8, REG_RECVEND);
+ *(unsigned short *)(buf + 0) = *(unsigned short *)(addrs + 2);
+ *(unsigned long *)(buf + 2) = *(unsigned long *)(addrs + 4);
+ *(unsigned long *)(buf + 6) = *(unsigned long *)(addrs + 8);
+ *(unsigned short *)(buf + 10) = *(unsigned short *)(addrs + 12);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ received ++;
+ } else
+ goto dropping;
+ } else {
+ struct net_device_stats *stats = &dev->stats;
+ ether3_outw(next_ptr >> 8, REG_RECVEND);
+ if (status & RXSTAT_OVERSIZE) stats->rx_over_errors ++;
+ if (status & RXSTAT_CRCERROR) stats->rx_crc_errors ++;
+ if (status & RXSTAT_DRIBBLEERROR) stats->rx_fifo_errors ++;
+ if (status & RXSTAT_SHORTPACKET) stats->rx_length_errors ++;
+ stats->rx_errors++;
+ }
+ }
+ while (-- maxcnt);
+
+done:
+ dev->stats.rx_packets += received;
+ priv(dev)->rx_head = next_ptr;
+ /*
+ * If rx went off line, then that means that the buffer may be full. We
+ * have dropped at least one packet.
+ */
+ if (!(ether3_inw(REG_STATUS) & STAT_RXON)) {
+ dev->stats.rx_dropped++;
+ ether3_outw(next_ptr, REG_RECVPTR);
+ ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND);
+ }
+
+ return maxcnt;
+
+dropping:{
+ static unsigned long last_warned;
+
+ ether3_outw(next_ptr >> 8, REG_RECVEND);
+ /*
+ * Don't print this message too many times...
+ */
+ if (time_after(jiffies, last_warned + 10 * HZ)) {
+ last_warned = jiffies;
+ printk("%s: memory squeeze, dropping packet.\n", dev->name);
+ }
+ dev->stats.rx_dropped++;
+ goto done;
+ }
+}
+
+/*
+ * Update stats for the transmitted packet(s)
+ */
+static void ether3_tx(struct net_device *dev)
+{
+ unsigned int tx_tail = priv(dev)->tx_tail;
+ int max_work = 14;
+
+ do {
+ unsigned long status;
+
+ /*
+ * Read the packet header
+ */
+ ether3_setbuffer(dev, buffer_read, tx_tail * 0x600);
+ status = ether3_readlong(dev);
+
+ /*
+ * Check to see if this packet has been transmitted
+ */
+ if ((status & (TXSTAT_DONE | TXHDR_TRANSMIT)) !=
+ (TXSTAT_DONE | TXHDR_TRANSMIT))
+ break;
+
+ /*
+ * Update errors
+ */
+ if (!(status & (TXSTAT_BABBLED | TXSTAT_16COLLISIONS)))
+ dev->stats.tx_packets++;
+ else {
+ dev->stats.tx_errors++;
+ if (status & TXSTAT_16COLLISIONS)
+ dev->stats.collisions += 16;
+ if (status & TXSTAT_BABBLED)
+ dev->stats.tx_fifo_errors++;
+ }
+
+ tx_tail = (tx_tail + 1) & 15;
+ } while (--max_work);
+
+ if (priv(dev)->tx_tail != tx_tail) {
+ priv(dev)->tx_tail = tx_tail;
+ netif_wake_queue(dev);
+ }
+}
+
+static void __devinit ether3_banner(void)
+{
+ static unsigned version_printed = 0;
+
+ if (net_debug && version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+}
+
+static const struct net_device_ops ether3_netdev_ops = {
+ .ndo_open = ether3_open,
+ .ndo_stop = ether3_close,
+ .ndo_start_xmit = ether3_sendpacket,
+ .ndo_set_rx_mode = ether3_setmulticastlist,
+ .ndo_tx_timeout = ether3_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int __devinit
+ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
+{
+ const struct ether3_data *data = id->data;
+ struct net_device *dev;
+ int bus_type, ret;
+
+ ether3_banner();
+
+ ret = ecard_request_resources(ec);
+ if (ret)
+ goto out;
+
+ dev = alloc_etherdev(sizeof(struct dev_priv));
+ if (!dev) {
+ ret = -ENOMEM;
+ goto release;
+ }
+
+ SET_NETDEV_DEV(dev, &ec->dev);
+
+ priv(dev)->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
+ if (!priv(dev)->base) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ ec->irqaddr = priv(dev)->base + data->base_offset;
+ ec->irqmask = 0xf0;
+
+ priv(dev)->seeq = priv(dev)->base + data->base_offset;
+ dev->irq = ec->irq;
+
+ ether3_addr(dev->dev_addr, ec);
+
+ init_timer(&priv(dev)->timer);
+
+ /* Reset card...
+ */
+ ether3_outb(0x80, REG_CONFIG2 + 4);
+ bus_type = BUS_UNKNOWN;
+ udelay(4);
+
+ /* Test using Receive Pointer (16-bit register) to find out
+ * how the ether3 is connected to the bus...
+ */
+ if (ether3_probe_bus_8(dev, 0x100) &&
+ ether3_probe_bus_8(dev, 0x201))
+ bus_type = BUS_8;
+
+ if (bus_type == BUS_UNKNOWN &&
+ ether3_probe_bus_16(dev, 0x101) &&
+ ether3_probe_bus_16(dev, 0x201))
+ bus_type = BUS_16;
+
+ switch (bus_type) {
+ case BUS_UNKNOWN:
+ printk(KERN_ERR "%s: unable to identify bus width\n", dev->name);
+ ret = -ENODEV;
+ goto free;
+
+ case BUS_8:
+ printk(KERN_ERR "%s: %s found, but is an unsupported "
+ "8-bit card\n", dev->name, data->name);
+ ret = -ENODEV;
+ goto free;
+
+ default:
+ break;
+ }
+
+ if (ether3_init_2(dev)) {
+ ret = -ENODEV;
+ goto free;
+ }
+
+ dev->netdev_ops = &ether3_netdev_ops;
+ dev->watchdog_timeo = 5 * HZ / 100;
+
+ ret = register_netdev(dev);
+ if (ret)
+ goto free;
+
+ printk("%s: %s in slot %d, %pM\n",
+ dev->name, data->name, ec->slot_no, dev->dev_addr);
+
+ ecard_set_drvdata(ec, dev);
+ return 0;
+
+ free:
+ free_netdev(dev);
+ release:
+ ecard_release_resources(ec);
+ out:
+ return ret;
+}
+
+static void __devexit ether3_remove(struct expansion_card *ec)
+{
+ struct net_device *dev = ecard_get_drvdata(ec);
+
+ ecard_set_drvdata(ec, NULL);
+
+ unregister_netdev(dev);
+ free_netdev(dev);
+ ecard_release_resources(ec);
+}
+
+static struct ether3_data ether3 = {
+ .name = "ether3",
+ .base_offset = 0,
+};
+
+static struct ether3_data etherb = {
+ .name = "etherb",
+ .base_offset = 0x800,
+};
+
+static const struct ecard_id ether3_ids[] = {
+ { MANU_ANT2, PROD_ANT_ETHER3, &ether3 },
+ { MANU_ANT, PROD_ANT_ETHER3, &ether3 },
+ { MANU_ANT, PROD_ANT_ETHERB, &etherb },
+ { 0xffff, 0xffff }
+};
+
+static struct ecard_driver ether3_driver = {
+ .probe = ether3_probe,
+ .remove = __devexit_p(ether3_remove),
+ .id_table = ether3_ids,
+ .drv = {
+ .name = "ether3",
+ },
+};
+
+static int __init ether3_init(void)
+{
+ return ecard_register_driver(&ether3_driver);
+}
+
+static void __exit ether3_exit(void)
+{
+ ecard_remove_driver(&ether3_driver);
+}
+
+module_init(ether3_init);
+module_exit(ether3_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/seeq/ether3.h b/drivers/net/ethernet/seeq/ether3.h
new file mode 100644
index 000000000000..2db63b08bdf3
--- /dev/null
+++ b/drivers/net/ethernet/seeq/ether3.h
@@ -0,0 +1,176 @@
+/*
+ * linux/drivers/acorn/net/ether3.h
+ *
+ * Copyright (C) 1995-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * network driver for Acorn/ANT Ether3 cards
+ */
+
+#ifndef _LINUX_ether3_H
+#define _LINUX_ether3_H
+
+/* use 0 for production, 1 for verification, >2 for debug. debug flags: */
+#define DEBUG_TX 2
+#define DEBUG_RX 4
+#define DEBUG_INT 8
+#define DEBUG_IC 16
+#ifndef NET_DEBUG
+#define NET_DEBUG 0
+#endif
+
+#define priv(dev) ((struct dev_priv *)netdev_priv(dev))
+
+/* Command register definitions & bits */
+#define REG_COMMAND (priv(dev)->seeq + 0x0000)
+#define CMD_ENINTDMA 0x0001
+#define CMD_ENINTRX 0x0002
+#define CMD_ENINTTX 0x0004
+#define CMD_ENINTBUFWIN 0x0008
+#define CMD_ACKINTDMA 0x0010
+#define CMD_ACKINTRX 0x0020
+#define CMD_ACKINTTX 0x0040
+#define CMD_ACKINTBUFWIN 0x0080
+#define CMD_DMAON 0x0100
+#define CMD_RXON 0x0200
+#define CMD_TXON 0x0400
+#define CMD_DMAOFF 0x0800
+#define CMD_RXOFF 0x1000
+#define CMD_TXOFF 0x2000
+#define CMD_FIFOREAD 0x4000
+#define CMD_FIFOWRITE 0x8000
+
+/* status register */
+#define REG_STATUS (priv(dev)->seeq + 0x0000)
+#define STAT_ENINTSTAT 0x0001
+#define STAT_ENINTRX 0x0002
+#define STAT_ENINTTX 0x0004
+#define STAT_ENINTBUFWIN 0x0008
+#define STAT_INTDMA 0x0010
+#define STAT_INTRX 0x0020
+#define STAT_INTTX 0x0040
+#define STAT_INTBUFWIN 0x0080
+#define STAT_DMAON 0x0100
+#define STAT_RXON 0x0200
+#define STAT_TXON 0x0400
+#define STAT_FIFOFULL 0x2000
+#define STAT_FIFOEMPTY 0x4000
+#define STAT_FIFODIR 0x8000
+
+/* configuration register 1 */
+#define REG_CONFIG1 (priv(dev)->seeq + 0x0040)
+#define CFG1_BUFSELSTAT0 0x0000
+#define CFG1_BUFSELSTAT1 0x0001
+#define CFG1_BUFSELSTAT2 0x0002
+#define CFG1_BUFSELSTAT3 0x0003
+#define CFG1_BUFSELSTAT4 0x0004
+#define CFG1_BUFSELSTAT5 0x0005
+#define CFG1_ADDRPROM 0x0006
+#define CFG1_TRANSEND 0x0007
+#define CFG1_LOCBUFMEM 0x0008
+#define CFG1_INTVECTOR 0x0009
+#define CFG1_RECVSPECONLY 0x0000
+#define CFG1_RECVSPECBROAD 0x4000
+#define CFG1_RECVSPECBRMULTI 0x8000
+#define CFG1_RECVPROMISC 0xC000
+
+/* The following aren't in 8004 */
+#define CFG1_DMABURSTCONT 0x0000
+#define CFG1_DMABURST800NS 0x0010
+#define CFG1_DMABURST1600NS 0x0020
+#define CFG1_DMABURST3200NS 0x0030
+#define CFG1_DMABURST1 0x0000
+#define CFG1_DMABURST4 0x0040
+#define CFG1_DMABURST8 0x0080
+#define CFG1_DMABURST16 0x00C0
+#define CFG1_RECVCOMPSTAT0 0x0100
+#define CFG1_RECVCOMPSTAT1 0x0200
+#define CFG1_RECVCOMPSTAT2 0x0400
+#define CFG1_RECVCOMPSTAT3 0x0800
+#define CFG1_RECVCOMPSTAT4 0x1000
+#define CFG1_RECVCOMPSTAT5 0x2000
+
+/* configuration register 2 */
+#define REG_CONFIG2 (priv(dev)->seeq + 0x0080)
+#define CFG2_BYTESWAP 0x0001
+#define CFG2_ERRENCRC 0x0008
+#define CFG2_ERRENDRIBBLE 0x0010
+#define CFG2_ERRSHORTFRAME 0x0020
+#define CFG2_SLOTSELECT 0x0040
+#define CFG2_PREAMSELECT 0x0080
+#define CFG2_ADDRLENGTH 0x0100
+#define CFG2_RECVCRC 0x0200
+#define CFG2_XMITNOCRC 0x0400
+#define CFG2_LOOPBACK 0x0800
+#define CFG2_CTRLO 0x1000
+#define CFG2_RESET 0x8000
+
+#define REG_RECVEND (priv(dev)->seeq + 0x00c0)
+
+#define REG_BUFWIN (priv(dev)->seeq + 0x0100)
+
+#define REG_RECVPTR (priv(dev)->seeq + 0x0140)
+
+#define REG_TRANSMITPTR (priv(dev)->seeq + 0x0180)
+
+#define REG_DMAADDR (priv(dev)->seeq + 0x01c0)
+
+/*
+ * Cards transmit/receive headers
+ */
+#define TX_NEXT (0xffff)
+#define TXHDR_ENBABBLEINT (1 << 16)
+#define TXHDR_ENCOLLISIONINT (1 << 17)
+#define TXHDR_EN16COLLISION (1 << 18)
+#define TXHDR_ENSUCCESS (1 << 19)
+#define TXHDR_DATAFOLLOWS (1 << 21)
+#define TXHDR_CHAINCONTINUE (1 << 22)
+#define TXHDR_TRANSMIT (1 << 23)
+#define TXSTAT_BABBLED (1 << 24)
+#define TXSTAT_COLLISION (1 << 25)
+#define TXSTAT_16COLLISIONS (1 << 26)
+#define TXSTAT_DONE (1 << 31)
+
+#define RX_NEXT (0xffff)
+#define RXHDR_CHAINCONTINUE (1 << 6)
+#define RXHDR_RECEIVE (1 << 7)
+#define RXSTAT_OVERSIZE (1 << 8)
+#define RXSTAT_CRCERROR (1 << 9)
+#define RXSTAT_DRIBBLEERROR (1 << 10)
+#define RXSTAT_SHORTPACKET (1 << 11)
+#define RXSTAT_DONE (1 << 15)
+
+
+#define TX_START 0x0000
+#define TX_END 0x6000
+#define RX_START 0x6000
+#define RX_LEN 0xA000
+#define RX_END 0x10000
+/* must be a power of 2 and greater than MAX_TX_BUFFERED */
+#define MAX_TXED 16
+#define MAX_TX_BUFFERED 10
+
+struct dev_priv {
+ void __iomem *base;
+ void __iomem *seeq;
+ struct {
+ unsigned int command;
+ unsigned int config1;
+ unsigned int config2;
+ } regs;
+ unsigned char tx_head; /* buffer nr to insert next packet */
+ unsigned char tx_tail; /* buffer nr of transmitting packet */
+ unsigned int rx_head; /* address to fetch next packet from */
+ struct timer_list timer;
+ int broken; /* 0 = ok, 1 = something went wrong */
+};
+
+struct ether3_data {
+ const char name[8];
+ unsigned long base_offset;
+};
+
+#endif
diff --git a/drivers/net/ethernet/seeq/seeq8005.c b/drivers/net/ethernet/seeq/seeq8005.c
new file mode 100644
index 000000000000..60561451789b
--- /dev/null
+++ b/drivers/net/ethernet/seeq/seeq8005.c
@@ -0,0 +1,752 @@
+/* seeq8005.c: A network driver for linux. */
+/*
+ Based on skeleton.c,
+ Written 1993-94 by Donald Becker.
+ See the skeleton.c file for further copyright information.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as hamish@zot.apana.org.au
+
+ This file is a network device driver for the SEEQ 8005 chipset and
+ the Linux operating system.
+
+*/
+
+static const char version[] =
+ "seeq8005.c:v1.00 8/07/95 Hamish Coleman (hamish@zot.apana.org.au)\n";
+
+/*
+ Sources:
+ SEEQ 8005 databook
+
+ Version history:
+ 1.00 Public release. cosmetic changes (no warnings now)
+ 0.68 Turning per- packet,interrupt debug messages off - testing for release.
+ 0.67 timing problems/bad buffer reads seem to be fixed now
+ 0.63 *!@$ protocol=eth_type_trans -- now packets flow
+ 0.56 Send working
+ 0.48 Receive working
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include "seeq8005.h"
+
+/* First, a few definitions that the brave might change. */
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int seeq8005_portlist[] __initdata =
+ { 0x300, 0x320, 0x340, 0x360, 0};
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ unsigned short receive_ptr; /* What address in packet memory do we expect a recv_pkt_header? */
+ long open_time; /* Useless example local info. */
+};
+
+/* The station (ethernet) address prefix, used for IDing the board. */
+#define SA_ADDR0 0x00
+#define SA_ADDR1 0x80
+#define SA_ADDR2 0x4b
+
+/* Index to functions, as function prototypes. */
+
+static int seeq8005_probe1(struct net_device *dev, int ioaddr);
+static int seeq8005_open(struct net_device *dev);
+static void seeq8005_timeout(struct net_device *dev);
+static netdev_tx_t seeq8005_send_packet(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t seeq8005_interrupt(int irq, void *dev_id);
+static void seeq8005_rx(struct net_device *dev);
+static int seeq8005_close(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+
+/* Example routines you must write ;->. */
+#define tx_done(dev) (inw(SEEQ_STATUS) & SEEQSTAT_TX_ON)
+static void hardware_send_packet(struct net_device *dev, char *buf, int length);
+extern void seeq8005_init(struct net_device *dev, int startp);
+static inline void wait_for_buffer(struct net_device *dev);
+
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ */
+
+static int io = 0x320;
+static int irq = 10;
+
+struct net_device * __init seeq8005_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
+ unsigned *port;
+ int err = 0;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ io = dev->base_addr;
+ irq = dev->irq;
+ }
+
+ if (io > 0x1ff) { /* Check a single specified location. */
+ err = seeq8005_probe1(dev, io);
+ } else if (io != 0) { /* Don't probe at all. */
+ err = -ENXIO;
+ } else {
+ for (port = seeq8005_portlist; *port; port++) {
+ if (seeq8005_probe1(dev, *port) == 0)
+ break;
+ }
+ if (!*port)
+ err = -ENODEV;
+ }
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ release_region(dev->base_addr, SEEQ8005_IO_EXTENT);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static const struct net_device_ops seeq8005_netdev_ops = {
+ .ndo_open = seeq8005_open,
+ .ndo_stop = seeq8005_close,
+ .ndo_start_xmit = seeq8005_send_packet,
+ .ndo_tx_timeout = seeq8005_timeout,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/* This is the real probe routine. Linux has a history of friendly device
+ probes on the ISA bus. A good device probes avoids doing writes, and
+ verifies that the correct device exists and functions. */
+
+static int __init seeq8005_probe1(struct net_device *dev, int ioaddr)
+{
+ static unsigned version_printed;
+ int i,j;
+ unsigned char SA_prom[32];
+ int old_cfg1;
+ int old_cfg2;
+ int old_stat;
+ int old_dmaar;
+ int old_rear;
+ int retval;
+
+ if (!request_region(ioaddr, SEEQ8005_IO_EXTENT, "seeq8005"))
+ return -ENODEV;
+
+ if (net_debug>1)
+ printk("seeq8005: probing at 0x%x\n",ioaddr);
+
+ old_stat = inw(SEEQ_STATUS); /* read status register */
+ if (old_stat == 0xffff) {
+ retval = -ENODEV;
+ goto out; /* assume that 0xffff == no device */
+ }
+ if ( (old_stat & 0x1800) != 0x1800 ) { /* assume that unused bits are 1, as my manual says */
+ if (net_debug>1) {
+ printk("seeq8005: reserved stat bits != 0x1800\n");
+ printk(" == 0x%04x\n",old_stat);
+ }
+ retval = -ENODEV;
+ goto out;
+ }
+
+ old_rear = inw(SEEQ_REA);
+ if (old_rear == 0xffff) {
+ outw(0,SEEQ_REA);
+ if (inw(SEEQ_REA) == 0xffff) { /* assume that 0xffff == no device */
+ retval = -ENODEV;
+ goto out;
+ }
+ } else if ((old_rear & 0xff00) != 0xff00) { /* assume that unused bits are 1 */
+ if (net_debug>1) {
+ printk("seeq8005: unused rear bits != 0xff00\n");
+ printk(" == 0x%04x\n",old_rear);
+ }
+ retval = -ENODEV;
+ goto out;
+ }
+
+ old_cfg2 = inw(SEEQ_CFG2); /* read CFG2 register */
+ old_cfg1 = inw(SEEQ_CFG1);
+ old_dmaar = inw(SEEQ_DMAAR);
+
+ if (net_debug>4) {
+ printk("seeq8005: stat = 0x%04x\n",old_stat);
+ printk("seeq8005: cfg1 = 0x%04x\n",old_cfg1);
+ printk("seeq8005: cfg2 = 0x%04x\n",old_cfg2);
+ printk("seeq8005: raer = 0x%04x\n",old_rear);
+ printk("seeq8005: dmaar= 0x%04x\n",old_dmaar);
+ }
+
+ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD); /* setup for reading PROM */
+ outw( 0, SEEQ_DMAAR); /* set starting PROM address */
+ outw( SEEQCFG1_BUFFER_PROM, SEEQ_CFG1); /* set buffer to look at PROM */
+
+
+ j=0;
+ for(i=0; i <32; i++) {
+ j+= SA_prom[i] = inw(SEEQ_BUFFER) & 0xff;
+ }
+
+#if 0
+ /* untested because I only have the one card */
+ if ( (j&0xff) != 0 ) { /* checksum appears to be 8bit = 0 */
+ if (net_debug>1) { /* check this before deciding that we have a card */
+ printk("seeq8005: prom sum error\n");
+ }
+ outw( old_stat, SEEQ_STATUS);
+ outw( old_dmaar, SEEQ_DMAAR);
+ outw( old_cfg1, SEEQ_CFG1);
+ retval = -ENODEV;
+ goto out;
+ }
+#endif
+
+ outw( SEEQCFG2_RESET, SEEQ_CFG2); /* reset the card */
+ udelay(5);
+ outw( SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+
+ if (net_debug) {
+ printk("seeq8005: prom sum = 0x%08x\n",j);
+ for(j=0; j<32; j+=16) {
+ printk("seeq8005: prom %02x: ",j);
+ for(i=0;i<16;i++) {
+ printk("%02x ",SA_prom[j|i]);
+ }
+ printk(" ");
+ for(i=0;i<16;i++) {
+ if ((SA_prom[j|i]>31)&&(SA_prom[j|i]<127)) {
+ printk("%c", SA_prom[j|i]);
+ } else {
+ printk(" ");
+ }
+ }
+ printk("\n");
+ }
+ }
+
+#if 0
+ /*
+ * testing the packet buffer memory doesn't work yet
+ * but all other buffer accesses do
+ * - fixing is not a priority
+ */
+ if (net_debug>1) { /* test packet buffer memory */
+ printk("seeq8005: testing packet buffer ... ");
+ outw( SEEQCFG1_BUFFER_BUFFER, SEEQ_CFG1);
+ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+ outw( 0 , SEEQ_DMAAR);
+ for(i=0;i<32768;i++) {
+ outw(0x5a5a, SEEQ_BUFFER);
+ }
+ j=jiffies+HZ;
+ while ( ((inw(SEEQ_STATUS) & SEEQSTAT_FIFO_EMPTY) != SEEQSTAT_FIFO_EMPTY) && time_before(jiffies, j) )
+ mb();
+ outw( 0 , SEEQ_DMAAR);
+ while ( ((inw(SEEQ_STATUS) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && time_before(jiffies, j+HZ))
+ mb();
+ if ( (inw(SEEQ_STATUS) & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
+ outw( SEEQCMD_WINDOW_INT_ACK | (inw(SEEQ_STATUS)& SEEQCMD_INT_MASK), SEEQ_CMD);
+ outw( SEEQCMD_FIFO_READ | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+ j=0;
+ for(i=0;i<32768;i++) {
+ if (inw(SEEQ_BUFFER) != 0x5a5a)
+ j++;
+ }
+ if (j) {
+ printk("%i\n",j);
+ } else {
+ printk("ok.\n");
+ }
+ }
+#endif
+
+ if (net_debug && version_printed++ == 0)
+ printk(version);
+
+ printk("%s: %s found at %#3x, ", dev->name, "seeq8005", ioaddr);
+
+ /* Fill in the 'dev' fields. */
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ /* Retrieve and print the ethernet address. */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = SA_prom[i+6];
+ printk("%pM", dev->dev_addr);
+
+ if (dev->irq == 0xff)
+ ; /* Do nothing: a user-level program will set it. */
+ else if (dev->irq < 2) { /* "Auto-IRQ" */
+ unsigned long cookie = probe_irq_on();
+
+ outw( SEEQCMD_RX_INT_EN | SEEQCMD_SET_RX_ON | SEEQCMD_SET_RX_OFF, SEEQ_CMD );
+
+ dev->irq = probe_irq_off(cookie);
+
+ if (net_debug >= 2)
+ printk(" autoirq is %d\n", dev->irq);
+ } else if (dev->irq == 2)
+ /* Fixup for users that don't know that IRQ 2 is really IRQ 9,
+ * or don't know which one to set.
+ */
+ dev->irq = 9;
+
+#if 0
+ {
+ int irqval = request_irq(dev->irq, seeq8005_interrupt, 0, "seeq8005", dev);
+ if (irqval) {
+ printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
+ dev->irq, irqval);
+ retval = -EAGAIN;
+ goto out;
+ }
+ }
+#endif
+ dev->netdev_ops = &seeq8005_netdev_ops;
+ dev->watchdog_timeo = HZ/20;
+ dev->flags &= ~IFF_MULTICAST;
+
+ return 0;
+out:
+ release_region(ioaddr, SEEQ8005_IO_EXTENT);
+ return retval;
+}
+
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine should set everything up anew at each open, even
+ registers that "should" only need to be set once at boot, so that
+ there is non-reboot way to recover if something goes wrong.
+ */
+static int seeq8005_open(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ {
+ int irqval = request_irq(dev->irq, seeq8005_interrupt, 0, "seeq8005", dev);
+ if (irqval) {
+ printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
+ dev->irq, irqval);
+ return -EAGAIN;
+ }
+ }
+
+ /* Reset the hardware here. Don't forget to set the station address. */
+ seeq8005_init(dev, 1);
+
+ lp->open_time = jiffies;
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+static void seeq8005_timeout(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
+ tx_done(dev) ? "IRQ conflict" : "network cable problem");
+ /* Try to restart the adaptor. */
+ seeq8005_init(dev, 1);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+
+static netdev_tx_t seeq8005_send_packet(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ short length = skb->len;
+ unsigned char *buf;
+
+ if (length < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ length = ETH_ZLEN;
+ }
+ buf = skb->data;
+
+ /* Block a timer-based transmit from overlapping */
+ netif_stop_queue(dev);
+
+ hardware_send_packet(dev, buf, length);
+ dev->stats.tx_bytes += length;
+ dev_kfree_skb (skb);
+ /* You might need to clean up and record Tx statistics here. */
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * wait_for_buffer
+ *
+ * This routine waits for the SEEQ chip to assert that the FIFO is ready
+ * by checking for a window interrupt, and then clearing it. This has to
+ * occur in the interrupt handler!
+ */
+inline void wait_for_buffer(struct net_device * dev)
+{
+ int ioaddr = dev->base_addr;
+ unsigned long tmp;
+ int status;
+
+ tmp = jiffies + HZ;
+ while ( ( ((status=inw(SEEQ_STATUS)) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && time_before(jiffies, tmp))
+ cpu_relax();
+
+ if ( (status & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
+ outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static irqreturn_t seeq8005_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct net_local *lp;
+ int ioaddr, status, boguscount = 0;
+ int handled = 0;
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ status = inw(SEEQ_STATUS);
+ do {
+ if (net_debug >2) {
+ printk("%s: int, status=0x%04x\n",dev->name,status);
+ }
+
+ if (status & SEEQSTAT_WINDOW_INT) {
+ handled = 1;
+ outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ if (net_debug) {
+ printk("%s: window int!\n",dev->name);
+ }
+ }
+ if (status & SEEQSTAT_TX_INT) {
+ handled = 1;
+ outw( SEEQCMD_TX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ dev->stats.tx_packets++;
+ netif_wake_queue(dev); /* Inform upper layers. */
+ }
+ if (status & SEEQSTAT_RX_INT) {
+ handled = 1;
+ /* Got a packet(s). */
+ seeq8005_rx(dev);
+ }
+ status = inw(SEEQ_STATUS);
+ } while ( (++boguscount < 10) && (status & SEEQSTAT_ANY_INT)) ;
+
+ if(net_debug>2) {
+ printk("%s: eoi\n",dev->name);
+ }
+ return IRQ_RETVAL(handled);
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void seeq8005_rx(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int boguscount = 10;
+ int pkt_hdr;
+ int ioaddr = dev->base_addr;
+
+ do {
+ int next_packet;
+ int pkt_len;
+ int i;
+ int status;
+
+ status = inw(SEEQ_STATUS);
+ outw( lp->receive_ptr, SEEQ_DMAAR);
+ outw(SEEQCMD_FIFO_READ | SEEQCMD_RX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ wait_for_buffer(dev);
+ next_packet = ntohs(inw(SEEQ_BUFFER));
+ pkt_hdr = inw(SEEQ_BUFFER);
+
+ if (net_debug>2) {
+ printk("%s: 0x%04x recv next=0x%04x, hdr=0x%04x\n",dev->name,lp->receive_ptr,next_packet,pkt_hdr);
+ }
+
+ if ((next_packet == 0) || ((pkt_hdr & SEEQPKTH_CHAIN)==0)) { /* Read all the frames? */
+ return; /* Done for now */
+ }
+
+ if ((pkt_hdr & SEEQPKTS_DONE)==0)
+ break;
+
+ if (next_packet < lp->receive_ptr) {
+ pkt_len = (next_packet + 0x10000 - ((DEFAULT_TEA+1)<<8)) - lp->receive_ptr - 4;
+ } else {
+ pkt_len = next_packet - lp->receive_ptr - 4;
+ }
+
+ if (next_packet < ((DEFAULT_TEA+1)<<8)) { /* is the next_packet address sane? */
+ printk("%s: recv packet ring corrupt, resetting board\n",dev->name);
+ seeq8005_init(dev,1);
+ return;
+ }
+
+ lp->receive_ptr = next_packet;
+
+ if (net_debug>2) {
+ printk("%s: recv len=0x%04x\n",dev->name,pkt_len);
+ }
+
+ if (pkt_hdr & SEEQPKTS_ANY_ERROR) { /* There was an error. */
+ dev->stats.rx_errors++;
+ if (pkt_hdr & SEEQPKTS_SHORT) dev->stats.rx_frame_errors++;
+ if (pkt_hdr & SEEQPKTS_DRIB) dev->stats.rx_frame_errors++;
+ if (pkt_hdr & SEEQPKTS_OVERSIZE) dev->stats.rx_over_errors++;
+ if (pkt_hdr & SEEQPKTS_CRC_ERR) dev->stats.rx_crc_errors++;
+ /* skip over this packet */
+ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_DMA_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ outw( (lp->receive_ptr & 0xff00)>>8, SEEQ_REA);
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+ unsigned char *buf;
+
+ skb = dev_alloc_skb(pkt_len);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ dev->stats.rx_dropped++;
+ break;
+ }
+ skb_reserve(skb, 2); /* align data on 16 byte */
+ buf = skb_put(skb,pkt_len);
+
+ insw(SEEQ_BUFFER, buf, (pkt_len + 1) >> 1);
+
+ if (net_debug>2) {
+ char * p = buf;
+ printk("%s: recv ",dev->name);
+ for(i=0;i<14;i++) {
+ printk("%02x ",*(p++)&0xff);
+ }
+ printk("\n");
+ }
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+ } while ((--boguscount) && (pkt_hdr & SEEQPKTH_CHAIN));
+
+ /* If any worth-while packets have been received, netif_rx()
+ has done a mark_bh(NET_BH) for us and will work on them
+ when we get to the bottom-half routine. */
+}
+
+/* The inverse routine to net_open(). */
+static int seeq8005_close(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+
+ lp->open_time = 0;
+
+ netif_stop_queue(dev);
+
+ /* Flush the Tx and disable Rx here. */
+ outw( SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+
+ free_irq(dev->irq, dev);
+
+ /* Update the statistics here. */
+
+ return 0;
+
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+static void set_multicast_list(struct net_device *dev)
+{
+/*
+ * I _could_ do up to 6 addresses here, but won't (yet?)
+ */
+
+#if 0
+ int ioaddr = dev->base_addr;
+/*
+ * hmm, not even sure if my matching works _anyway_ - seem to be receiving
+ * _everything_ . . .
+ */
+
+ if (num_addrs) { /* Enable promiscuous mode */
+ outw( (inw(SEEQ_CFG1) & ~SEEQCFG1_MATCH_MASK)| SEEQCFG1_MATCH_ALL, SEEQ_CFG1);
+ dev->flags|=IFF_PROMISC;
+ } else { /* Disable promiscuous mode, use normal mode */
+ outw( (inw(SEEQ_CFG1) & ~SEEQCFG1_MATCH_MASK)| SEEQCFG1_MATCH_BROAD, SEEQ_CFG1);
+ }
+#endif
+}
+
+void seeq8005_init(struct net_device *dev, int startp)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int ioaddr = dev->base_addr;
+ int i;
+
+ outw(SEEQCFG2_RESET, SEEQ_CFG2); /* reset device */
+ udelay(5);
+
+ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+ outw( 0, SEEQ_DMAAR); /* load start address into both low and high byte */
+/* wait_for_buffer(dev); */ /* I think that you only need a wait for memory buffer */
+ outw( SEEQCFG1_BUFFER_MAC0, SEEQ_CFG1);
+
+ for(i=0;i<6;i++) { /* set Station address */
+ outb(dev->dev_addr[i], SEEQ_BUFFER);
+ udelay(2);
+ }
+
+ outw( SEEQCFG1_BUFFER_TEA, SEEQ_CFG1); /* set xmit end area pointer to 16K */
+ outb( DEFAULT_TEA, SEEQ_BUFFER); /* this gives us 16K of send buffer and 48K of recv buffer */
+
+ lp->receive_ptr = (DEFAULT_TEA+1)<<8; /* so we can find our packet_header */
+ outw( lp->receive_ptr, SEEQ_RPR); /* Receive Pointer Register is set to recv buffer memory */
+
+ outw( 0x00ff, SEEQ_REA); /* Receive Area End */
+
+ if (net_debug>4) {
+ printk("%s: SA0 = ",dev->name);
+
+ outw( SEEQCMD_FIFO_READ | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+ outw( 0, SEEQ_DMAAR);
+ outw( SEEQCFG1_BUFFER_MAC0, SEEQ_CFG1);
+
+ for(i=0;i<6;i++) {
+ printk("%02x ",inb(SEEQ_BUFFER));
+ }
+ printk("\n");
+ }
+
+ outw( SEEQCFG1_MAC0_EN | SEEQCFG1_MATCH_BROAD | SEEQCFG1_BUFFER_BUFFER, SEEQ_CFG1);
+ outw( SEEQCFG2_AUTO_REA | SEEQCFG2_CTRLO, SEEQ_CFG2);
+ outw( SEEQCMD_SET_RX_ON | SEEQCMD_TX_INT_EN | SEEQCMD_RX_INT_EN, SEEQ_CMD);
+
+ if (net_debug>4) {
+ int old_cfg1;
+ old_cfg1 = inw(SEEQ_CFG1);
+ printk("%s: stat = 0x%04x\n",dev->name,inw(SEEQ_STATUS));
+ printk("%s: cfg1 = 0x%04x\n",dev->name,old_cfg1);
+ printk("%s: cfg2 = 0x%04x\n",dev->name,inw(SEEQ_CFG2));
+ printk("%s: raer = 0x%04x\n",dev->name,inw(SEEQ_REA));
+ printk("%s: dmaar= 0x%04x\n",dev->name,inw(SEEQ_DMAAR));
+
+ }
+}
+
+
+static void hardware_send_packet(struct net_device * dev, char *buf, int length)
+{
+ int ioaddr = dev->base_addr;
+ int status = inw(SEEQ_STATUS);
+ int transmit_ptr = 0;
+ unsigned long tmp;
+
+ if (net_debug>4) {
+ printk("%s: send 0x%04x\n",dev->name,length);
+ }
+
+ /* Set FIFO to writemode and set packet-buffer address */
+ outw( SEEQCMD_FIFO_WRITE | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ outw( transmit_ptr, SEEQ_DMAAR);
+
+ /* output SEEQ Packet header barfage */
+ outw( htons(length + 4), SEEQ_BUFFER);
+ outw( SEEQPKTH_XMIT | SEEQPKTH_DATA_FOLLOWS | SEEQPKTH_XMIT_INT_EN, SEEQ_BUFFER );
+
+ /* blat the buffer */
+ outsw( SEEQ_BUFFER, buf, (length +1) >> 1);
+ /* paranoia !! */
+ outw( 0, SEEQ_BUFFER);
+ outw( 0, SEEQ_BUFFER);
+
+ /* set address of start of transmit chain */
+ outw( transmit_ptr, SEEQ_TPR);
+
+ /* drain FIFO */
+ tmp = jiffies;
+ while ( (((status=inw(SEEQ_STATUS)) & SEEQSTAT_FIFO_EMPTY) == 0) && time_before(jiffies, tmp + HZ))
+ mb();
+
+ /* doit ! */
+ outw( SEEQCMD_WINDOW_INT_ACK | SEEQCMD_SET_TX_ON | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+
+}
+
+
+#ifdef MODULE
+
+static struct net_device *dev_seeq;
+MODULE_LICENSE("GPL");
+module_param(io, int, 0);
+module_param(irq, int, 0);
+MODULE_PARM_DESC(io, "SEEQ 8005 I/O base address");
+MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number");
+
+int __init init_module(void)
+{
+ dev_seeq = seeq8005_probe(-1);
+ if (IS_ERR(dev_seeq))
+ return PTR_ERR(dev_seeq);
+ return 0;
+}
+
+void __exit cleanup_module(void)
+{
+ unregister_netdev(dev_seeq);
+ release_region(dev_seeq->base_addr, SEEQ8005_IO_EXTENT);
+ free_netdev(dev_seeq);
+}
+
+#endif /* MODULE */
diff --git a/drivers/net/ethernet/seeq/seeq8005.h b/drivers/net/ethernet/seeq/seeq8005.h
new file mode 100644
index 000000000000..5dfb0098c6ca
--- /dev/null
+++ b/drivers/net/ethernet/seeq/seeq8005.h
@@ -0,0 +1,156 @@
+/*
+ * defines, etc for the seeq8005
+ */
+
+/*
+ * This file is distributed under GPL.
+ *
+ * This style and layout of this file is also copied
+ * from many of the other linux network device drivers.
+ */
+
+/* The number of low I/O ports used by the ethercard. */
+#define SEEQ8005_IO_EXTENT 16
+
+#define SEEQ_B (ioaddr)
+
+#define SEEQ_CMD (SEEQ_B) /* Write only */
+#define SEEQ_STATUS (SEEQ_B) /* Read only */
+#define SEEQ_CFG1 (SEEQ_B + 2)
+#define SEEQ_CFG2 (SEEQ_B + 4)
+#define SEEQ_REA (SEEQ_B + 6) /* Receive End Area Register */
+#define SEEQ_RPR (SEEQ_B + 10) /* Receive Pointer Register */
+#define SEEQ_TPR (SEEQ_B + 12) /* Transmit Pointer Register */
+#define SEEQ_DMAAR (SEEQ_B + 14) /* DMA Address Register */
+#define SEEQ_BUFFER (SEEQ_B + 8) /* Buffer Window Register */
+
+#define DEFAULT_TEA (0x3f)
+
+#define SEEQCMD_DMA_INT_EN (0x0001) /* DMA Interrupt Enable */
+#define SEEQCMD_RX_INT_EN (0x0002) /* Receive Interrupt Enable */
+#define SEEQCMD_TX_INT_EN (0x0004) /* Transmit Interrupt Enable */
+#define SEEQCMD_WINDOW_INT_EN (0x0008) /* What the hell is this for?? */
+#define SEEQCMD_INT_MASK (0x000f)
+
+#define SEEQCMD_DMA_INT_ACK (0x0010) /* DMA ack */
+#define SEEQCMD_RX_INT_ACK (0x0020)
+#define SEEQCMD_TX_INT_ACK (0x0040)
+#define SEEQCMD_WINDOW_INT_ACK (0x0080)
+#define SEEQCMD_ACK_ALL (0x00f0)
+
+#define SEEQCMD_SET_DMA_ON (0x0100) /* Enables DMA Request logic */
+#define SEEQCMD_SET_RX_ON (0x0200) /* Enables Packet RX */
+#define SEEQCMD_SET_TX_ON (0x0400) /* Starts TX run */
+#define SEEQCMD_SET_DMA_OFF (0x0800)
+#define SEEQCMD_SET_RX_OFF (0x1000)
+#define SEEQCMD_SET_TX_OFF (0x2000)
+#define SEEQCMD_SET_ALL_OFF (0x3800) /* set all logic off */
+
+#define SEEQCMD_FIFO_READ (0x4000) /* Set FIFO to read mode (read from Buffer) */
+#define SEEQCMD_FIFO_WRITE (0x8000) /* Set FIFO to write mode */
+
+#define SEEQSTAT_DMA_INT_EN (0x0001) /* Status of interrupt enable */
+#define SEEQSTAT_RX_INT_EN (0x0002)
+#define SEEQSTAT_TX_INT_EN (0x0004)
+#define SEEQSTAT_WINDOW_INT_EN (0x0008)
+
+#define SEEQSTAT_DMA_INT (0x0010) /* Interrupt flagged */
+#define SEEQSTAT_RX_INT (0x0020)
+#define SEEQSTAT_TX_INT (0x0040)
+#define SEEQSTAT_WINDOW_INT (0x0080)
+#define SEEQSTAT_ANY_INT (0x00f0)
+
+#define SEEQSTAT_DMA_ON (0x0100) /* DMA logic on */
+#define SEEQSTAT_RX_ON (0x0200) /* Packet RX on */
+#define SEEQSTAT_TX_ON (0x0400) /* TX running */
+
+#define SEEQSTAT_FIFO_FULL (0x2000)
+#define SEEQSTAT_FIFO_EMPTY (0x4000)
+#define SEEQSTAT_FIFO_DIR (0x8000) /* 1=read, 0=write */
+
+#define SEEQCFG1_BUFFER_MASK (0x000f) /* define what maps into the BUFFER register */
+#define SEEQCFG1_BUFFER_MAC0 (0x0000) /* MAC station addresses 0-5 */
+#define SEEQCFG1_BUFFER_MAC1 (0x0001)
+#define SEEQCFG1_BUFFER_MAC2 (0x0002)
+#define SEEQCFG1_BUFFER_MAC3 (0x0003)
+#define SEEQCFG1_BUFFER_MAC4 (0x0004)
+#define SEEQCFG1_BUFFER_MAC5 (0x0005)
+#define SEEQCFG1_BUFFER_PROM (0x0006) /* The Address/CFG PROM */
+#define SEEQCFG1_BUFFER_TEA (0x0007) /* Transmit end area */
+#define SEEQCFG1_BUFFER_BUFFER (0x0008) /* Packet buffer memory */
+#define SEEQCFG1_BUFFER_INT_VEC (0x0009) /* Interrupt Vector */
+
+#define SEEQCFG1_DMA_INTVL_MASK (0x0030)
+#define SEEQCFG1_DMA_CONT (0x0000)
+#define SEEQCFG1_DMA_800ns (0x0010)
+#define SEEQCFG1_DMA_1600ns (0x0020)
+#define SEEQCFG1_DMA_3200ns (0x0030)
+
+#define SEEQCFG1_DMA_LEN_MASK (0x00c0)
+#define SEEQCFG1_DMA_LEN1 (0x0000)
+#define SEEQCFG1_DMA_LEN2 (0x0040)
+#define SEEQCFG1_DMA_LEN4 (0x0080)
+#define SEEQCFG1_DMA_LEN8 (0x00c0)
+
+#define SEEQCFG1_MAC_MASK (0x3f00) /* Dis/enable bits for MAC addresses */
+#define SEEQCFG1_MAC0_EN (0x0100)
+#define SEEQCFG1_MAC1_EN (0x0200)
+#define SEEQCFG1_MAC2_EN (0x0400)
+#define SEEQCFG1_MAC3_EN (0x0800)
+#define SEEQCFG1_MAC4_EN (0x1000)
+#define SEEQCFG1_MAC5_EN (0x2000)
+
+#define SEEQCFG1_MATCH_MASK (0xc000) /* Packet matching logic cfg bits */
+#define SEEQCFG1_MATCH_SPECIFIC (0x0000) /* only matching MAC addresses */
+#define SEEQCFG1_MATCH_BROAD (0x4000) /* matching and broadcast addresses */
+#define SEEQCFG1_MATCH_MULTI (0x8000) /* matching, broadcast and multicast */
+#define SEEQCFG1_MATCH_ALL (0xc000) /* Promiscuous mode */
+
+#define SEEQCFG1_DEFAULT (SEEQCFG1_BUFFER_BUFFER | SEEQCFG1_MAC0_EN | SEEQCFG1_MATCH_BROAD)
+
+#define SEEQCFG2_BYTE_SWAP (0x0001) /* 0=Intel byte-order */
+#define SEEQCFG2_AUTO_REA (0x0002) /* if set, Receive End Area will be updated when reading from Buffer */
+
+#define SEEQCFG2_CRC_ERR_EN (0x0008) /* enables receiving of packets with CRC errors */
+#define SEEQCFG2_DRIBBLE_EN (0x0010) /* enables receiving of non-aligned packets */
+#define SEEQCFG2_SHORT_EN (0x0020) /* enables receiving of short packets */
+
+#define SEEQCFG2_SLOTSEL (0x0040) /* 0= standard IEEE802.3, 1= smaller,faster, non-standard */
+#define SEEQCFG2_NO_PREAM (0x0080) /* 1= user supplies Xmit preamble bytes */
+#define SEEQCFG2_ADDR_LEN (0x0100) /* 1= 2byte addresses */
+#define SEEQCFG2_REC_CRC (0x0200) /* 0= received packets will have CRC stripped from them */
+#define SEEQCFG2_XMIT_NO_CRC (0x0400) /* don't xmit CRC with each packet (user supplies it) */
+#define SEEQCFG2_LOOPBACK (0x0800)
+#define SEEQCFG2_CTRLO (0x1000)
+#define SEEQCFG2_RESET (0x8000) /* software Hard-reset bit */
+
+struct seeq_pkt_hdr {
+ unsigned short next; /* address of next packet header */
+ unsigned char babble_int:1, /* enable int on >1514 byte packet */
+ coll_int:1, /* enable int on collision */
+ coll_16_int:1, /* enable int on >15 collision */
+ xmit_int:1, /* enable int on success (or xmit with <15 collision) */
+ unused:1,
+ data_follows:1, /* if not set, process this as a header and pointer only */
+ chain_cont:1, /* if set, more headers in chain only cmd bit valid in recv header */
+ xmit_recv:1; /* if set, a xmit packet, else a receive packet.*/
+ unsigned char status;
+};
+
+#define SEEQPKTH_BAB_INT_EN (0x01) /* xmit only */
+#define SEEQPKTH_COL_INT_EN (0x02) /* xmit only */
+#define SEEQPKTH_COL16_INT_EN (0x04) /* xmit only */
+#define SEEQPKTH_XMIT_INT_EN (0x08) /* xmit only */
+#define SEEQPKTH_DATA_FOLLOWS (0x20) /* supposedly in xmit only */
+#define SEEQPKTH_CHAIN (0x40) /* more headers follow */
+#define SEEQPKTH_XMIT (0x80)
+
+#define SEEQPKTS_BABBLE (0x0100) /* xmit only */
+#define SEEQPKTS_OVERSIZE (0x0100) /* recv only */
+#define SEEQPKTS_COLLISION (0x0200) /* xmit only */
+#define SEEQPKTS_CRC_ERR (0x0200) /* recv only */
+#define SEEQPKTS_COLL16 (0x0400) /* xmit only */
+#define SEEQPKTS_DRIB (0x0400) /* recv only */
+#define SEEQPKTS_SHORT (0x0800) /* recv only */
+#define SEEQPKTS_DONE (0x8000)
+#define SEEQPKTS_ANY_ERROR (0x0f00)
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
new file mode 100644
index 000000000000..c3673f151a41
--- /dev/null
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -0,0 +1,858 @@
+/*
+ * sgiseeq.c: Seeq8003 ethernet driver for SGI machines.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ */
+
+#undef DEBUG
+
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/sgi/hpc3.h>
+#include <asm/sgi/ip22.h>
+#include <asm/sgi/seeq.h>
+
+#include "sgiseeq.h"
+
+static char *sgiseeqstr = "SGI Seeq8003";
+
+/*
+ * If you want speed, you do something silly, it always has worked for me. So,
+ * with that in mind, I've decided to make this driver look completely like a
+ * stupid Lance from a driver architecture perspective. Only difference is that
+ * here our "ring buffer" looks and acts like a real Lance one does but is
+ * laid out like how the HPC DMA and the Seeq want it to. You'd be surprised
+ * how a stupid idea like this can pay off in performance, not to mention
+ * making this driver 2,000 times easier to write. ;-)
+ */
+
+/* Tune these if we tend to run out often etc. */
+#define SEEQ_RX_BUFFERS 16
+#define SEEQ_TX_BUFFERS 16
+
+#define PKT_BUF_SZ 1584
+
+#define NEXT_RX(i) (((i) + 1) & (SEEQ_RX_BUFFERS - 1))
+#define NEXT_TX(i) (((i) + 1) & (SEEQ_TX_BUFFERS - 1))
+#define PREV_RX(i) (((i) - 1) & (SEEQ_RX_BUFFERS - 1))
+#define PREV_TX(i) (((i) - 1) & (SEEQ_TX_BUFFERS - 1))
+
+#define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \
+ sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \
+ sp->tx_old - sp->tx_new - 1)
+
+#define VIRT_TO_DMA(sp, v) ((sp)->srings_dma + \
+ (dma_addr_t)((unsigned long)(v) - \
+ (unsigned long)((sp)->rx_desc)))
+
+/* Copy frames shorter than rx_copybreak, otherwise pass on up in
+ * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
+ */
+static int rx_copybreak = 100;
+
+#define PAD_SIZE (128 - sizeof(struct hpc_dma_desc) - sizeof(void *))
+
+struct sgiseeq_rx_desc {
+ volatile struct hpc_dma_desc rdma;
+ u8 padding[PAD_SIZE];
+ struct sk_buff *skb;
+};
+
+struct sgiseeq_tx_desc {
+ volatile struct hpc_dma_desc tdma;
+ u8 padding[PAD_SIZE];
+ struct sk_buff *skb;
+};
+
+/*
+ * Warning: This structure is laid out in a certain way because HPC dma
+ * descriptors must be 8-byte aligned. So don't touch this without
+ * some care.
+ */
+struct sgiseeq_init_block { /* Note the name ;-) */
+ struct sgiseeq_rx_desc rxvector[SEEQ_RX_BUFFERS];
+ struct sgiseeq_tx_desc txvector[SEEQ_TX_BUFFERS];
+};
+
+struct sgiseeq_private {
+ struct sgiseeq_init_block *srings;
+ dma_addr_t srings_dma;
+
+ /* Ptrs to the descriptors in uncached space. */
+ struct sgiseeq_rx_desc *rx_desc;
+ struct sgiseeq_tx_desc *tx_desc;
+
+ char *name;
+ struct hpc3_ethregs *hregs;
+ struct sgiseeq_regs *sregs;
+
+ /* Ring entry counters. */
+ unsigned int rx_new, tx_new;
+ unsigned int rx_old, tx_old;
+
+ int is_edlc;
+ unsigned char control;
+ unsigned char mode;
+
+ spinlock_t tx_lock;
+};
+
+static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
+{
+ dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
+ DMA_FROM_DEVICE);
+}
+
+static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
+{
+ dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
+ DMA_TO_DEVICE);
+}
+
+static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
+{
+ hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ;
+ udelay(20);
+ hregs->reset = 0;
+}
+
+static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs,
+ struct sgiseeq_regs *sregs)
+{
+ hregs->rx_ctrl = hregs->tx_ctrl = 0;
+ hpc3_eth_reset(hregs);
+}
+
+#define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \
+ SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC)
+
+static inline void seeq_go(struct sgiseeq_private *sp,
+ struct hpc3_ethregs *hregs,
+ struct sgiseeq_regs *sregs)
+{
+ sregs->rstat = sp->mode | RSTAT_GO_BITS;
+ hregs->rx_ctrl = HPC3_ERXCTRL_ACTIVE;
+}
+
+static inline void __sgiseeq_set_mac_address(struct net_device *dev)
+{
+ struct sgiseeq_private *sp = netdev_priv(dev);
+ struct sgiseeq_regs *sregs = sp->sregs;
+ int i;
+
+ sregs->tstat = SEEQ_TCMD_RB0;
+ for (i = 0; i < 6; i++)
+ sregs->rw.eth_addr[i] = dev->dev_addr[i];
+}
+
+static int sgiseeq_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sgiseeq_private *sp = netdev_priv(dev);
+ struct sockaddr *sa = addr;
+
+ memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+
+ spin_lock_irq(&sp->tx_lock);
+ __sgiseeq_set_mac_address(dev);
+ spin_unlock_irq(&sp->tx_lock);
+
+ return 0;
+}
+
+#define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD)
+#define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE)
+#define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT))
+
+static int seeq_init_ring(struct net_device *dev)
+{
+ struct sgiseeq_private *sp = netdev_priv(dev);
+ int i;
+
+ netif_stop_queue(dev);
+ sp->rx_new = sp->tx_new = 0;
+ sp->rx_old = sp->tx_old = 0;
+
+ __sgiseeq_set_mac_address(dev);
+
+ /* Setup tx ring. */
+ for(i = 0; i < SEEQ_TX_BUFFERS; i++) {
+ sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT;
+ dma_sync_desc_dev(dev, &sp->tx_desc[i]);
+ }
+
+ /* And now the rx ring. */
+ for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
+ if (!sp->rx_desc[i].skb) {
+ dma_addr_t dma_addr;
+ struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
+
+ if (skb == NULL)
+ return -ENOMEM;
+ skb_reserve(skb, 2);
+ dma_addr = dma_map_single(dev->dev.parent,
+ skb->data - 2,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
+ sp->rx_desc[i].skb = skb;
+ sp->rx_desc[i].rdma.pbuf = dma_addr;
+ }
+ sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT;
+ dma_sync_desc_dev(dev, &sp->rx_desc[i]);
+ }
+ sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR;
+ dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]);
+ return 0;
+}
+
+static void seeq_purge_ring(struct net_device *dev)
+{
+ struct sgiseeq_private *sp = netdev_priv(dev);
+ int i;
+
+ /* clear tx ring. */
+ for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
+ if (sp->tx_desc[i].skb) {
+ dev_kfree_skb(sp->tx_desc[i].skb);
+ sp->tx_desc[i].skb = NULL;
+ }
+ }
+
+ /* And now the rx ring. */
+ for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
+ if (sp->rx_desc[i].skb) {
+ dev_kfree_skb(sp->rx_desc[i].skb);
+ sp->rx_desc[i].skb = NULL;
+ }
+ }
+}
+
+#ifdef DEBUG
+static struct sgiseeq_private *gpriv;
+static struct net_device *gdev;
+
+static void sgiseeq_dump_rings(void)
+{
+ static int once;
+ struct sgiseeq_rx_desc *r = gpriv->rx_desc;
+ struct sgiseeq_tx_desc *t = gpriv->tx_desc;
+ struct hpc3_ethregs *hregs = gpriv->hregs;
+ int i;
+
+ if (once)
+ return;
+ once++;
+ printk("RING DUMP:\n");
+ for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
+ printk("RX [%d]: @(%p) [%08x,%08x,%08x] ",
+ i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
+ r[i].rdma.pnext);
+ i += 1;
+ printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
+ i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
+ r[i].rdma.pnext);
+ }
+ for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
+ printk("TX [%d]: @(%p) [%08x,%08x,%08x] ",
+ i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
+ t[i].tdma.pnext);
+ i += 1;
+ printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
+ i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
+ t[i].tdma.pnext);
+ }
+ printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n",
+ gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old);
+ printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n",
+ hregs->rx_cbptr, hregs->rx_ndptr, hregs->rx_ctrl);
+ printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n",
+ hregs->tx_cbptr, hregs->tx_ndptr, hregs->tx_ctrl);
+}
+#endif
+
+#define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF)
+#define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2)
+
+static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp,
+ struct sgiseeq_regs *sregs)
+{
+ struct hpc3_ethregs *hregs = sp->hregs;
+ int err;
+
+ reset_hpc3_and_seeq(hregs, sregs);
+ err = seeq_init_ring(dev);
+ if (err)
+ return err;
+
+ /* Setup to field the proper interrupt types. */
+ if (sp->is_edlc) {
+ sregs->tstat = TSTAT_INIT_EDLC;
+ sregs->rw.wregs.control = sp->control;
+ sregs->rw.wregs.frame_gap = 0;
+ } else {
+ sregs->tstat = TSTAT_INIT_SEEQ;
+ }
+
+ hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc);
+ hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc);
+
+ seeq_go(sp, hregs, sregs);
+ return 0;
+}
+
+static void record_rx_errors(struct net_device *dev, unsigned char status)
+{
+ if (status & SEEQ_RSTAT_OVERF ||
+ status & SEEQ_RSTAT_SFRAME)
+ dev->stats.rx_over_errors++;
+ if (status & SEEQ_RSTAT_CERROR)
+ dev->stats.rx_crc_errors++;
+ if (status & SEEQ_RSTAT_DERROR)
+ dev->stats.rx_frame_errors++;
+ if (status & SEEQ_RSTAT_REOF)
+ dev->stats.rx_errors++;
+}
+
+static inline void rx_maybe_restart(struct sgiseeq_private *sp,
+ struct hpc3_ethregs *hregs,
+ struct sgiseeq_regs *sregs)
+{
+ if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) {
+ hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc + sp->rx_new);
+ seeq_go(sp, hregs, sregs);
+ }
+}
+
+static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp,
+ struct hpc3_ethregs *hregs,
+ struct sgiseeq_regs *sregs)
+{
+ struct sgiseeq_rx_desc *rd;
+ struct sk_buff *skb = NULL;
+ struct sk_buff *newskb;
+ unsigned char pkt_status;
+ int len = 0;
+ unsigned int orig_end = PREV_RX(sp->rx_new);
+
+ /* Service every received packet. */
+ rd = &sp->rx_desc[sp->rx_new];
+ dma_sync_desc_cpu(dev, rd);
+ while (!(rd->rdma.cntinfo & HPCDMA_OWN)) {
+ len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3;
+ dma_unmap_single(dev->dev.parent, rd->rdma.pbuf,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
+ pkt_status = rd->skb->data[len];
+ if (pkt_status & SEEQ_RSTAT_FIG) {
+ /* Packet is OK. */
+ /* We don't want to receive our own packets */
+ if (memcmp(rd->skb->data + 6, dev->dev_addr, ETH_ALEN)) {
+ if (len > rx_copybreak) {
+ skb = rd->skb;
+ newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
+ if (!newskb) {
+ newskb = skb;
+ skb = NULL;
+ goto memory_squeeze;
+ }
+ skb_reserve(newskb, 2);
+ } else {
+ skb = netdev_alloc_skb_ip_align(dev, len);
+ if (skb)
+ skb_copy_to_linear_data(skb, rd->skb->data, len);
+
+ newskb = rd->skb;
+ }
+memory_squeeze:
+ if (skb) {
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ } else {
+ printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+ dev->stats.rx_dropped++;
+ }
+ } else {
+ /* Silently drop my own packets */
+ newskb = rd->skb;
+ }
+ } else {
+ record_rx_errors(dev, pkt_status);
+ newskb = rd->skb;
+ }
+ rd->skb = newskb;
+ rd->rdma.pbuf = dma_map_single(dev->dev.parent,
+ newskb->data - 2,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
+
+ /* Return the entry to the ring pool. */
+ rd->rdma.cntinfo = RCNTINFO_INIT;
+ sp->rx_new = NEXT_RX(sp->rx_new);
+ dma_sync_desc_dev(dev, rd);
+ rd = &sp->rx_desc[sp->rx_new];
+ dma_sync_desc_cpu(dev, rd);
+ }
+ dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]);
+ sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
+ dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]);
+ dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
+ sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR;
+ dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
+ rx_maybe_restart(sp, hregs, sregs);
+}
+
+static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp,
+ struct sgiseeq_regs *sregs)
+{
+ if (sp->is_edlc) {
+ sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT);
+ sregs->rw.wregs.control = sp->control;
+ }
+}
+
+static inline void kick_tx(struct net_device *dev,
+ struct sgiseeq_private *sp,
+ struct hpc3_ethregs *hregs)
+{
+ struct sgiseeq_tx_desc *td;
+ int i = sp->tx_old;
+
+ /* If the HPC aint doin nothin, and there are more packets
+ * with ETXD cleared and XIU set we must make very certain
+ * that we restart the HPC else we risk locking up the
+ * adapter. The following code is only safe iff the HPCDMA
+ * is not active!
+ */
+ td = &sp->tx_desc[i];
+ dma_sync_desc_cpu(dev, td);
+ while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) ==
+ (HPCDMA_XIU | HPCDMA_ETXD)) {
+ i = NEXT_TX(i);
+ td = &sp->tx_desc[i];
+ dma_sync_desc_cpu(dev, td);
+ }
+ if (td->tdma.cntinfo & HPCDMA_XIU) {
+ hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
+ hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
+ }
+}
+
+static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp,
+ struct hpc3_ethregs *hregs,
+ struct sgiseeq_regs *sregs)
+{
+ struct sgiseeq_tx_desc *td;
+ unsigned long status = hregs->tx_ctrl;
+ int j;
+
+ tx_maybe_reset_collisions(sp, sregs);
+
+ if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) {
+ /* Oops, HPC detected some sort of error. */
+ if (status & SEEQ_TSTAT_R16)
+ dev->stats.tx_aborted_errors++;
+ if (status & SEEQ_TSTAT_UFLOW)
+ dev->stats.tx_fifo_errors++;
+ if (status & SEEQ_TSTAT_LCLS)
+ dev->stats.collisions++;
+ }
+
+ /* Ack 'em... */
+ for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) {
+ td = &sp->tx_desc[j];
+
+ dma_sync_desc_cpu(dev, td);
+ if (!(td->tdma.cntinfo & (HPCDMA_XIU)))
+ break;
+ if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) {
+ if (!(status & HPC3_ETXCTRL_ACTIVE)) {
+ hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
+ hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
+ }
+ break;
+ }
+ dev->stats.tx_packets++;
+ sp->tx_old = NEXT_TX(sp->tx_old);
+ td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE);
+ td->tdma.cntinfo |= HPCDMA_EOX;
+ if (td->skb) {
+ dev_kfree_skb_any(td->skb);
+ td->skb = NULL;
+ }
+ dma_sync_desc_dev(dev, td);
+ }
+}
+
+static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct sgiseeq_private *sp = netdev_priv(dev);
+ struct hpc3_ethregs *hregs = sp->hregs;
+ struct sgiseeq_regs *sregs = sp->sregs;
+
+ spin_lock(&sp->tx_lock);
+
+ /* Ack the IRQ and set software state. */
+ hregs->reset = HPC3_ERST_CLRIRQ;
+
+ /* Always check for received packets. */
+ sgiseeq_rx(dev, sp, hregs, sregs);
+
+ /* Only check for tx acks if we have something queued. */
+ if (sp->tx_old != sp->tx_new)
+ sgiseeq_tx(dev, sp, hregs, sregs);
+
+ if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) {
+ netif_wake_queue(dev);
+ }
+ spin_unlock(&sp->tx_lock);
+
+ return IRQ_HANDLED;
+}
+
+static int sgiseeq_open(struct net_device *dev)
+{
+ struct sgiseeq_private *sp = netdev_priv(dev);
+ struct sgiseeq_regs *sregs = sp->sregs;
+ unsigned int irq = dev->irq;
+ int err;
+
+ if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
+ printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
+ return -EAGAIN;
+ }
+
+ err = init_seeq(dev, sp, sregs);
+ if (err)
+ goto out_free_irq;
+
+ netif_start_queue(dev);
+
+ return 0;
+
+out_free_irq:
+ free_irq(irq, dev);
+
+ return err;
+}
+
+static int sgiseeq_close(struct net_device *dev)
+{
+ struct sgiseeq_private *sp = netdev_priv(dev);
+ struct sgiseeq_regs *sregs = sp->sregs;
+ unsigned int irq = dev->irq;
+
+ netif_stop_queue(dev);
+
+ /* Shutdown the Seeq. */
+ reset_hpc3_and_seeq(sp->hregs, sregs);
+ free_irq(irq, dev);
+ seeq_purge_ring(dev);
+
+ return 0;
+}
+
+static inline int sgiseeq_reset(struct net_device *dev)
+{
+ struct sgiseeq_private *sp = netdev_priv(dev);
+ struct sgiseeq_regs *sregs = sp->sregs;
+ int err;
+
+ err = init_seeq(dev, sp, sregs);
+ if (err)
+ return err;
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+
+ return 0;
+}
+
+static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sgiseeq_private *sp = netdev_priv(dev);
+ struct hpc3_ethregs *hregs = sp->hregs;
+ unsigned long flags;
+ struct sgiseeq_tx_desc *td;
+ int len, entry;
+
+ spin_lock_irqsave(&sp->tx_lock, flags);
+
+ /* Setup... */
+ len = skb->len;
+ if (len < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN)) {
+ spin_unlock_irqrestore(&sp->tx_lock, flags);
+ return NETDEV_TX_OK;
+ }
+ len = ETH_ZLEN;
+ }
+
+ dev->stats.tx_bytes += len;
+ entry = sp->tx_new;
+ td = &sp->tx_desc[entry];
+ dma_sync_desc_cpu(dev, td);
+
+ /* Create entry. There are so many races with adding a new
+ * descriptor to the chain:
+ * 1) Assume that the HPC is off processing a DMA chain while
+ * we are changing all of the following.
+ * 2) Do no allow the HPC to look at a new descriptor until
+ * we have completely set up it's state. This means, do
+ * not clear HPCDMA_EOX in the current last descritptor
+ * until the one we are adding looks consistent and could
+ * be processes right now.
+ * 3) The tx interrupt code must notice when we've added a new
+ * entry and the HPC got to the end of the chain before we
+ * added this new entry and restarted it.
+ */
+ td->skb = skb;
+ td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data,
+ len, DMA_TO_DEVICE);
+ td->tdma.cntinfo = (len & HPCDMA_BCNT) |
+ HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX;
+ dma_sync_desc_dev(dev, td);
+ if (sp->tx_old != sp->tx_new) {
+ struct sgiseeq_tx_desc *backend;
+
+ backend = &sp->tx_desc[PREV_TX(sp->tx_new)];
+ dma_sync_desc_cpu(dev, backend);
+ backend->tdma.cntinfo &= ~HPCDMA_EOX;
+ dma_sync_desc_dev(dev, backend);
+ }
+ sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */
+
+ /* Maybe kick the HPC back into motion. */
+ if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE))
+ kick_tx(dev, sp, hregs);
+
+ if (!TX_BUFFS_AVAIL(sp))
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&sp->tx_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static void timeout(struct net_device *dev)
+{
+ printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
+ sgiseeq_reset(dev);
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+
+static void sgiseeq_set_multicast(struct net_device *dev)
+{
+ struct sgiseeq_private *sp = netdev_priv(dev);
+ unsigned char oldmode = sp->mode;
+
+ if(dev->flags & IFF_PROMISC)
+ sp->mode = SEEQ_RCMD_RANY;
+ else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
+ sp->mode = SEEQ_RCMD_RBMCAST;
+ else
+ sp->mode = SEEQ_RCMD_RBCAST;
+
+ /* XXX I know this sucks, but is there a better way to reprogram
+ * XXX the receiver? At least, this shouldn't happen too often.
+ */
+
+ if (oldmode != sp->mode)
+ sgiseeq_reset(dev);
+}
+
+static inline void setup_tx_ring(struct net_device *dev,
+ struct sgiseeq_tx_desc *buf,
+ int nbufs)
+{
+ struct sgiseeq_private *sp = netdev_priv(dev);
+ int i = 0;
+
+ while (i < (nbufs - 1)) {
+ buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
+ buf[i].tdma.pbuf = 0;
+ dma_sync_desc_dev(dev, &buf[i]);
+ i++;
+ }
+ buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf);
+ dma_sync_desc_dev(dev, &buf[i]);
+}
+
+static inline void setup_rx_ring(struct net_device *dev,
+ struct sgiseeq_rx_desc *buf,
+ int nbufs)
+{
+ struct sgiseeq_private *sp = netdev_priv(dev);
+ int i = 0;
+
+ while (i < (nbufs - 1)) {
+ buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
+ buf[i].rdma.pbuf = 0;
+ dma_sync_desc_dev(dev, &buf[i]);
+ i++;
+ }
+ buf[i].rdma.pbuf = 0;
+ buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf);
+ dma_sync_desc_dev(dev, &buf[i]);
+}
+
+static const struct net_device_ops sgiseeq_netdev_ops = {
+ .ndo_open = sgiseeq_open,
+ .ndo_stop = sgiseeq_close,
+ .ndo_start_xmit = sgiseeq_start_xmit,
+ .ndo_tx_timeout = timeout,
+ .ndo_set_rx_mode = sgiseeq_set_multicast,
+ .ndo_set_mac_address = sgiseeq_set_mac_address,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __devinit sgiseeq_probe(struct platform_device *pdev)
+{
+ struct sgiseeq_platform_data *pd = pdev->dev.platform_data;
+ struct hpc3_regs *hpcregs = pd->hpc;
+ struct sgiseeq_init_block *sr;
+ unsigned int irq = pd->irq;
+ struct sgiseeq_private *sp;
+ struct net_device *dev;
+ int err;
+
+ dev = alloc_etherdev(sizeof (struct sgiseeq_private));
+ if (!dev) {
+ printk(KERN_ERR "Sgiseeq: Etherdev alloc failed, aborting.\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ platform_set_drvdata(pdev, dev);
+ sp = netdev_priv(dev);
+
+ /* Make private data page aligned */
+ sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings),
+ &sp->srings_dma, GFP_KERNEL);
+ if (!sr) {
+ printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n");
+ err = -ENOMEM;
+ goto err_out_free_dev;
+ }
+ sp->srings = sr;
+ sp->rx_desc = sp->srings->rxvector;
+ sp->tx_desc = sp->srings->txvector;
+
+ /* A couple calculations now, saves many cycles later. */
+ setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS);
+ setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS);
+
+ memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
+
+#ifdef DEBUG
+ gpriv = sp;
+ gdev = dev;
+#endif
+ sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0];
+ sp->hregs = &hpcregs->ethregs;
+ sp->name = sgiseeqstr;
+ sp->mode = SEEQ_RCMD_RBCAST;
+
+ /* Setup PIO and DMA transfer timing */
+ sp->hregs->pconfig = 0x161;
+ sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
+ HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;
+
+ /* Setup PIO and DMA transfer timing */
+ sp->hregs->pconfig = 0x161;
+ sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
+ HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;
+
+ /* Reset the chip. */
+ hpc3_eth_reset(sp->hregs);
+
+ sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff);
+ if (sp->is_edlc)
+ sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT |
+ SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT |
+ SEEQ_CTRL_ENCARR;
+
+ dev->netdev_ops = &sgiseeq_netdev_ops;
+ dev->watchdog_timeo = (200 * HZ) / 1000;
+ dev->irq = irq;
+
+ if (register_netdev(dev)) {
+ printk(KERN_ERR "Sgiseeq: Cannot register net device, "
+ "aborting.\n");
+ err = -ENODEV;
+ goto err_out_free_page;
+ }
+
+ printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
+
+ return 0;
+
+err_out_free_page:
+ free_page((unsigned long) sp->srings);
+err_out_free_dev:
+ free_netdev(dev);
+
+err_out:
+ return err;
+}
+
+static int __exit sgiseeq_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct sgiseeq_private *sp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
+ sp->srings_dma);
+ free_netdev(dev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver sgiseeq_driver = {
+ .probe = sgiseeq_probe,
+ .remove = __exit_p(sgiseeq_remove),
+ .driver = {
+ .name = "sgiseeq",
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init sgiseeq_module_init(void)
+{
+ if (platform_driver_register(&sgiseeq_driver)) {
+ printk(KERN_ERR "Driver registration failed\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __exit sgiseeq_module_exit(void)
+{
+ platform_driver_unregister(&sgiseeq_driver);
+}
+
+module_init(sgiseeq_module_init);
+module_exit(sgiseeq_module_exit);
+
+MODULE_DESCRIPTION("SGI Seeq 8003 driver");
+MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sgiseeq");
diff --git a/drivers/net/ethernet/seeq/sgiseeq.h b/drivers/net/ethernet/seeq/sgiseeq.h
new file mode 100644
index 000000000000..2211e2987a8d
--- /dev/null
+++ b/drivers/net/ethernet/seeq/sgiseeq.h
@@ -0,0 +1,103 @@
+/*
+ * sgiseeq.h: Defines for the Seeq8003 ethernet controller.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ */
+#ifndef _SGISEEQ_H
+#define _SGISEEQ_H
+
+struct sgiseeq_wregs {
+ volatile unsigned int multicase_high[2];
+ volatile unsigned int frame_gap;
+ volatile unsigned int control;
+};
+
+struct sgiseeq_rregs {
+ volatile unsigned int collision_tx[2];
+ volatile unsigned int collision_all[2];
+ volatile unsigned int _unused0;
+ volatile unsigned int rflags;
+};
+
+struct sgiseeq_regs {
+ union {
+ volatile unsigned int eth_addr[6];
+ volatile unsigned int multicast_low[6];
+ struct sgiseeq_wregs wregs;
+ struct sgiseeq_rregs rregs;
+ } rw;
+ volatile unsigned int rstat;
+ volatile unsigned int tstat;
+};
+
+/* Seeq8003 receive status register */
+#define SEEQ_RSTAT_OVERF 0x001 /* Overflow */
+#define SEEQ_RSTAT_CERROR 0x002 /* CRC error */
+#define SEEQ_RSTAT_DERROR 0x004 /* Dribble error */
+#define SEEQ_RSTAT_SFRAME 0x008 /* Short frame */
+#define SEEQ_RSTAT_REOF 0x010 /* Received end of frame */
+#define SEEQ_RSTAT_FIG 0x020 /* Frame is good */
+#define SEEQ_RSTAT_TIMEO 0x040 /* Timeout, or late receive */
+#define SEEQ_RSTAT_WHICH 0x080 /* Which status, 1=old 0=new */
+#define SEEQ_RSTAT_LITTLE 0x100 /* DMA is done in little endian format */
+#define SEEQ_RSTAT_SDMA 0x200 /* DMA has started */
+#define SEEQ_RSTAT_ADMA 0x400 /* DMA is active */
+#define SEEQ_RSTAT_ROVERF 0x800 /* Receive buffer overflow */
+
+/* Seeq8003 receive command register */
+#define SEEQ_RCMD_RDISAB 0x000 /* Disable receiver on the Seeq8003 */
+#define SEEQ_RCMD_IOVERF 0x001 /* IRQ on buffer overflows */
+#define SEEQ_RCMD_ICRC 0x002 /* IRQ on CRC errors */
+#define SEEQ_RCMD_IDRIB 0x004 /* IRQ on dribble errors */
+#define SEEQ_RCMD_ISHORT 0x008 /* IRQ on short frames */
+#define SEEQ_RCMD_IEOF 0x010 /* IRQ on end of frame */
+#define SEEQ_RCMD_IGOOD 0x020 /* IRQ on good frames */
+#define SEEQ_RCMD_RANY 0x040 /* Receive any frame */
+#define SEEQ_RCMD_RBCAST 0x080 /* Receive broadcasts */
+#define SEEQ_RCMD_RBMCAST 0x0c0 /* Receive broadcasts/multicasts */
+
+/* Seeq8003 transmit status register */
+#define SEEQ_TSTAT_UFLOW 0x001 /* Transmit buffer underflow */
+#define SEEQ_TSTAT_CLS 0x002 /* Collision detected */
+#define SEEQ_TSTAT_R16 0x004 /* Did 16 retries to tx a frame */
+#define SEEQ_TSTAT_PTRANS 0x008 /* Packet was transmitted ok */
+#define SEEQ_TSTAT_LCLS 0x010 /* Late collision occurred */
+#define SEEQ_TSTAT_WHICH 0x080 /* Which status, 1=old 0=new */
+#define SEEQ_TSTAT_TLE 0x100 /* DMA is done in little endian format */
+#define SEEQ_TSTAT_SDMA 0x200 /* DMA has started */
+#define SEEQ_TSTAT_ADMA 0x400 /* DMA is active */
+
+/* Seeq8003 transmit command register */
+#define SEEQ_TCMD_RB0 0x00 /* Register bank zero w/station addr */
+#define SEEQ_TCMD_IUF 0x01 /* IRQ on tx underflow */
+#define SEEQ_TCMD_IC 0x02 /* IRQ on collisions */
+#define SEEQ_TCMD_I16 0x04 /* IRQ after 16 failed attempts to tx frame */
+#define SEEQ_TCMD_IPT 0x08 /* IRQ when packet successfully transmitted */
+#define SEEQ_TCMD_RB1 0x20 /* Register bank one w/multi-cast low byte */
+#define SEEQ_TCMD_RB2 0x40 /* Register bank two w/multi-cast high byte */
+
+/* Seeq8003 control register */
+#define SEEQ_CTRL_XCNT 0x01
+#define SEEQ_CTRL_ACCNT 0x02
+#define SEEQ_CTRL_SFLAG 0x04
+#define SEEQ_CTRL_EMULTI 0x08
+#define SEEQ_CTRL_ESHORT 0x10
+#define SEEQ_CTRL_ENCARR 0x20
+
+/* Seeq8003 control registers on the SGI Hollywood HPC. */
+#define SEEQ_HPIO_P1BITS 0x00000001 /* cycles to stay in P1 phase for PIO */
+#define SEEQ_HPIO_P2BITS 0x00000060 /* cycles to stay in P2 phase for PIO */
+#define SEEQ_HPIO_P3BITS 0x00000100 /* cycles to stay in P3 phase for PIO */
+#define SEEQ_HDMA_D1BITS 0x00000006 /* cycles to stay in D1 phase for DMA */
+#define SEEQ_HDMA_D2BITS 0x00000020 /* cycles to stay in D2 phase for DMA */
+#define SEEQ_HDMA_D3BITS 0x00000000 /* cycles to stay in D3 phase for DMA */
+#define SEEQ_HDMA_TIMEO 0x00030000 /* cycles for DMA timeout */
+#define SEEQ_HCTL_NORM 0x00000000 /* Normal operation mode */
+#define SEEQ_HCTL_RESET 0x00000001 /* Reset Seeq8003 and HPC interface */
+#define SEEQ_HCTL_IPEND 0x00000002 /* IRQ is pending for the chip */
+#define SEEQ_HCTL_IPG 0x00001000 /* Inter-packet gap */
+#define SEEQ_HCTL_RFIX 0x00002000 /* At rxdc, clear end-of-packet */
+#define SEEQ_HCTL_EFIX 0x00004000 /* fixes intr status bit settings */
+#define SEEQ_HCTL_IFIX 0x00008000 /* enable startup timeouts */
+
+#endif /* !(_SGISEEQ_H) */
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
new file mode 100644
index 000000000000..5d18841f0f3d
--- /dev/null
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -0,0 +1,21 @@
+config SFC
+ tristate "Solarflare SFC4000/SFC9000-family support"
+ depends on PCI && INET
+ select MDIO
+ select CRC32
+ select I2C
+ select I2C_ALGOBIT
+ ---help---
+ This driver supports 10-gigabit Ethernet cards based on
+ the Solarflare SFC4000 and SFC9000-family controllers.
+
+ To compile this driver as a module, choose M here. The module
+ will be called sfc.
+config SFC_MTD
+ bool "Solarflare SFC4000/SFC9000-family MTD support"
+ depends on SFC && MTD && !(SFC=y && MTD=m)
+ default y
+ ---help---
+ This exposes the on-board flash memory as MTD devices (e.g.
+ /dev/mtd1). This makes it possible to upload new firmware
+ to the NIC.
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
new file mode 100644
index 000000000000..ab31c7124db1
--- /dev/null
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -0,0 +1,8 @@
+sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
+ falcon_xmac.o mcdi_mac.o \
+ selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
+ tenxpress.o txc43128_phy.o falcon_boards.o \
+ mcdi.o mcdi_phy.o
+sfc-$(CONFIG_SFC_MTD) += mtd.o
+
+obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h
new file mode 100644
index 000000000000..098ac2ad757d
--- /dev/null
+++ b/drivers/net/ethernet/sfc/bitfield.h
@@ -0,0 +1,538 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2009 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_BITFIELD_H
+#define EFX_BITFIELD_H
+
+/*
+ * Efx bitfield access
+ *
+ * Efx NICs make extensive use of bitfields up to 128 bits
+ * wide. Since there is no native 128-bit datatype on most systems,
+ * and since 64-bit datatypes are inefficient on 32-bit systems and
+ * vice versa, we wrap accesses in a way that uses the most efficient
+ * datatype.
+ *
+ * The NICs are PCI devices and therefore little-endian. Since most
+ * of the quantities that we deal with are DMAed to/from host memory,
+ * we define our datatypes (efx_oword_t, efx_qword_t and
+ * efx_dword_t) to be little-endian.
+ */
+
+/* Lowest bit numbers and widths */
+#define EFX_DUMMY_FIELD_LBN 0
+#define EFX_DUMMY_FIELD_WIDTH 0
+#define EFX_DWORD_0_LBN 0
+#define EFX_DWORD_0_WIDTH 32
+#define EFX_DWORD_1_LBN 32
+#define EFX_DWORD_1_WIDTH 32
+#define EFX_DWORD_2_LBN 64
+#define EFX_DWORD_2_WIDTH 32
+#define EFX_DWORD_3_LBN 96
+#define EFX_DWORD_3_WIDTH 32
+#define EFX_QWORD_0_LBN 0
+#define EFX_QWORD_0_WIDTH 64
+
+/* Specified attribute (e.g. LBN) of the specified field */
+#define EFX_VAL(field, attribute) field ## _ ## attribute
+/* Low bit number of the specified field */
+#define EFX_LOW_BIT(field) EFX_VAL(field, LBN)
+/* Bit width of the specified field */
+#define EFX_WIDTH(field) EFX_VAL(field, WIDTH)
+/* High bit number of the specified field */
+#define EFX_HIGH_BIT(field) (EFX_LOW_BIT(field) + EFX_WIDTH(field) - 1)
+/* Mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x1f.
+ *
+ * The maximum width mask that can be generated is 64 bits.
+ */
+#define EFX_MASK64(width) \
+ ((width) == 64 ? ~((u64) 0) : \
+ (((((u64) 1) << (width))) - 1))
+
+/* Mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x1f.
+ *
+ * The maximum width mask that can be generated is 32 bits. Use
+ * EFX_MASK64 for higher width fields.
+ */
+#define EFX_MASK32(width) \
+ ((width) == 32 ? ~((u32) 0) : \
+ (((((u32) 1) << (width))) - 1))
+
+/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
+typedef union efx_dword {
+ __le32 u32[1];
+} efx_dword_t;
+
+/* A quadword (i.e. 8 byte) datatype - little-endian in HW */
+typedef union efx_qword {
+ __le64 u64[1];
+ __le32 u32[2];
+ efx_dword_t dword[2];
+} efx_qword_t;
+
+/* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */
+typedef union efx_oword {
+ __le64 u64[2];
+ efx_qword_t qword[2];
+ __le32 u32[4];
+ efx_dword_t dword[4];
+} efx_oword_t;
+
+/* Format string and value expanders for printk */
+#define EFX_DWORD_FMT "%08x"
+#define EFX_QWORD_FMT "%08x:%08x"
+#define EFX_OWORD_FMT "%08x:%08x:%08x:%08x"
+#define EFX_DWORD_VAL(dword) \
+ ((unsigned int) le32_to_cpu((dword).u32[0]))
+#define EFX_QWORD_VAL(qword) \
+ ((unsigned int) le32_to_cpu((qword).u32[1])), \
+ ((unsigned int) le32_to_cpu((qword).u32[0]))
+#define EFX_OWORD_VAL(oword) \
+ ((unsigned int) le32_to_cpu((oword).u32[3])), \
+ ((unsigned int) le32_to_cpu((oword).u32[2])), \
+ ((unsigned int) le32_to_cpu((oword).u32[1])), \
+ ((unsigned int) le32_to_cpu((oword).u32[0]))
+
+/*
+ * Extract bit field portion [low,high) from the native-endian element
+ * which contains bits [min,max).
+ *
+ * For example, suppose "element" represents the high 32 bits of a
+ * 64-bit value, and we wish to extract the bits belonging to the bit
+ * field occupying bits 28-45 of this 64-bit value.
+ *
+ * Then EFX_EXTRACT ( element, 32, 63, 28, 45 ) would give
+ *
+ * ( element ) << 4
+ *
+ * The result will contain the relevant bits filled in in the range
+ * [0,high-low), with garbage in bits [high-low+1,...).
+ */
+#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \
+ (((low > max) || (high < min)) ? 0 : \
+ ((low > min) ? \
+ ((native_element) >> (low - min)) : \
+ ((native_element) << (min - low))))
+
+/*
+ * Extract bit field portion [low,high) from the 64-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT64(element, min, max, low, high) \
+ EFX_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high)
+
+/*
+ * Extract bit field portion [low,high) from the 32-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT32(element, min, max, low, high) \
+ EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high)
+
+#define EFX_EXTRACT_OWORD64(oword, low, high) \
+ ((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
+ EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \
+ EFX_MASK64(high + 1 - low))
+
+#define EFX_EXTRACT_QWORD64(qword, low, high) \
+ (EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \
+ EFX_MASK64(high + 1 - low))
+
+#define EFX_EXTRACT_OWORD32(oword, low, high) \
+ ((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
+ EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
+ EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
+ EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \
+ EFX_MASK32(high + 1 - low))
+
+#define EFX_EXTRACT_QWORD32(qword, low, high) \
+ ((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
+ EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \
+ EFX_MASK32(high + 1 - low))
+
+#define EFX_EXTRACT_DWORD(dword, low, high) \
+ (EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \
+ EFX_MASK32(high + 1 - low))
+
+#define EFX_OWORD_FIELD64(oword, field) \
+ EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field))
+
+#define EFX_QWORD_FIELD64(qword, field) \
+ EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field))
+
+#define EFX_OWORD_FIELD32(oword, field) \
+ EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field))
+
+#define EFX_QWORD_FIELD32(qword, field) \
+ EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field))
+
+#define EFX_DWORD_FIELD(dword, field) \
+ EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field))
+
+#define EFX_OWORD_IS_ZERO64(oword) \
+ (((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0)
+
+#define EFX_QWORD_IS_ZERO64(qword) \
+ (((qword).u64[0]) == (__force __le64) 0)
+
+#define EFX_OWORD_IS_ZERO32(oword) \
+ (((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \
+ == (__force __le32) 0)
+
+#define EFX_QWORD_IS_ZERO32(qword) \
+ (((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0)
+
+#define EFX_DWORD_IS_ZERO(dword) \
+ (((dword).u32[0]) == (__force __le32) 0)
+
+#define EFX_OWORD_IS_ALL_ONES64(oword) \
+ (((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0))
+
+#define EFX_QWORD_IS_ALL_ONES64(qword) \
+ ((qword).u64[0] == ~((__force __le64) 0))
+
+#define EFX_OWORD_IS_ALL_ONES32(oword) \
+ (((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \
+ == ~((__force __le32) 0))
+
+#define EFX_QWORD_IS_ALL_ONES32(qword) \
+ (((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0))
+
+#define EFX_DWORD_IS_ALL_ONES(dword) \
+ ((dword).u32[0] == ~((__force __le32) 0))
+
+#if BITS_PER_LONG == 64
+#define EFX_OWORD_FIELD EFX_OWORD_FIELD64
+#define EFX_QWORD_FIELD EFX_QWORD_FIELD64
+#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64
+#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64
+#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES64
+#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES64
+#else
+#define EFX_OWORD_FIELD EFX_OWORD_FIELD32
+#define EFX_QWORD_FIELD EFX_QWORD_FIELD32
+#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32
+#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32
+#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES32
+#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES32
+#endif
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the bit field [low,high) that lies within
+ * the range [min,max).
+ */
+#define EFX_INSERT_NATIVE64(min, max, low, high, value) \
+ (((low > max) || (high < min)) ? 0 : \
+ ((low > min) ? \
+ (((u64) (value)) << (low - min)) : \
+ (((u64) (value)) >> (min - low))))
+
+#define EFX_INSERT_NATIVE32(min, max, low, high, value) \
+ (((low > max) || (high < min)) ? 0 : \
+ ((low > min) ? \
+ (((u32) (value)) << (low - min)) : \
+ (((u32) (value)) >> (min - low))))
+
+#define EFX_INSERT_NATIVE(min, max, low, high, value) \
+ ((((max - min) >= 32) || ((high - low) >= 32)) ? \
+ EFX_INSERT_NATIVE64(min, max, low, high, value) : \
+ EFX_INSERT_NATIVE32(min, max, low, high, value))
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the named bit field that lies within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELD_NATIVE(min, max, field, value) \
+ EFX_INSERT_NATIVE(min, max, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+/*
+ * Construct bit field
+ *
+ * Creates the portion of the named bit fields that lie within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELDS_NATIVE(min, max, \
+ field1, value1, \
+ field2, value2, \
+ field3, value3, \
+ field4, value4, \
+ field5, value5, \
+ field6, value6, \
+ field7, value7, \
+ field8, value8, \
+ field9, value9, \
+ field10, value10) \
+ (EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)))
+
+#define EFX_INSERT_FIELDS64(...) \
+ cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
+
+#define EFX_INSERT_FIELDS32(...) \
+ cpu_to_le32(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
+
+#define EFX_POPULATE_OWORD64(oword, ...) do { \
+ (oword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
+ (oword).u64[1] = EFX_INSERT_FIELDS64(64, 127, __VA_ARGS__); \
+ } while (0)
+
+#define EFX_POPULATE_QWORD64(qword, ...) do { \
+ (qword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
+ } while (0)
+
+#define EFX_POPULATE_OWORD32(oword, ...) do { \
+ (oword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
+ (oword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
+ (oword).u32[2] = EFX_INSERT_FIELDS32(64, 95, __VA_ARGS__); \
+ (oword).u32[3] = EFX_INSERT_FIELDS32(96, 127, __VA_ARGS__); \
+ } while (0)
+
+#define EFX_POPULATE_QWORD32(qword, ...) do { \
+ (qword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
+ (qword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
+ } while (0)
+
+#define EFX_POPULATE_DWORD(dword, ...) do { \
+ (dword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
+ } while (0)
+
+#if BITS_PER_LONG == 64
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64
+#else
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32
+#endif
+
+/* Populate an octword field with various numbers of arguments */
+#define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD
+#define EFX_POPULATE_OWORD_9(oword, ...) \
+ EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_8(oword, ...) \
+ EFX_POPULATE_OWORD_9(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_7(oword, ...) \
+ EFX_POPULATE_OWORD_8(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_6(oword, ...) \
+ EFX_POPULATE_OWORD_7(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_5(oword, ...) \
+ EFX_POPULATE_OWORD_6(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_4(oword, ...) \
+ EFX_POPULATE_OWORD_5(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_3(oword, ...) \
+ EFX_POPULATE_OWORD_4(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_2(oword, ...) \
+ EFX_POPULATE_OWORD_3(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_1(oword, ...) \
+ EFX_POPULATE_OWORD_2(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_ZERO_OWORD(oword) \
+ EFX_POPULATE_OWORD_1(oword, EFX_DUMMY_FIELD, 0)
+#define EFX_SET_OWORD(oword) \
+ EFX_POPULATE_OWORD_4(oword, \
+ EFX_DWORD_0, 0xffffffff, \
+ EFX_DWORD_1, 0xffffffff, \
+ EFX_DWORD_2, 0xffffffff, \
+ EFX_DWORD_3, 0xffffffff)
+
+/* Populate a quadword field with various numbers of arguments */
+#define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD
+#define EFX_POPULATE_QWORD_9(qword, ...) \
+ EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_8(qword, ...) \
+ EFX_POPULATE_QWORD_9(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_7(qword, ...) \
+ EFX_POPULATE_QWORD_8(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_6(qword, ...) \
+ EFX_POPULATE_QWORD_7(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_5(qword, ...) \
+ EFX_POPULATE_QWORD_6(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_4(qword, ...) \
+ EFX_POPULATE_QWORD_5(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_3(qword, ...) \
+ EFX_POPULATE_QWORD_4(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_2(qword, ...) \
+ EFX_POPULATE_QWORD_3(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_1(qword, ...) \
+ EFX_POPULATE_QWORD_2(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_ZERO_QWORD(qword) \
+ EFX_POPULATE_QWORD_1(qword, EFX_DUMMY_FIELD, 0)
+#define EFX_SET_QWORD(qword) \
+ EFX_POPULATE_QWORD_2(qword, \
+ EFX_DWORD_0, 0xffffffff, \
+ EFX_DWORD_1, 0xffffffff)
+
+/* Populate a dword field with various numbers of arguments */
+#define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD
+#define EFX_POPULATE_DWORD_9(dword, ...) \
+ EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_8(dword, ...) \
+ EFX_POPULATE_DWORD_9(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_7(dword, ...) \
+ EFX_POPULATE_DWORD_8(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_6(dword, ...) \
+ EFX_POPULATE_DWORD_7(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_5(dword, ...) \
+ EFX_POPULATE_DWORD_6(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_4(dword, ...) \
+ EFX_POPULATE_DWORD_5(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_3(dword, ...) \
+ EFX_POPULATE_DWORD_4(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_2(dword, ...) \
+ EFX_POPULATE_DWORD_3(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_1(dword, ...) \
+ EFX_POPULATE_DWORD_2(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_ZERO_DWORD(dword) \
+ EFX_POPULATE_DWORD_1(dword, EFX_DUMMY_FIELD, 0)
+#define EFX_SET_DWORD(dword) \
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xffffffff)
+
+/*
+ * Modify a named field within an already-populated structure. Used
+ * for read-modify-write operations.
+ *
+ */
+#define EFX_INVERT_OWORD(oword) do { \
+ (oword).u64[0] = ~((oword).u64[0]); \
+ (oword).u64[1] = ~((oword).u64[1]); \
+ } while (0)
+
+#define EFX_AND_OWORD(oword, from, mask) \
+ do { \
+ (oword).u64[0] = (from).u64[0] & (mask).u64[0]; \
+ (oword).u64[1] = (from).u64[1] & (mask).u64[1]; \
+ } while (0)
+
+#define EFX_OR_OWORD(oword, from, mask) \
+ do { \
+ (oword).u64[0] = (from).u64[0] | (mask).u64[0]; \
+ (oword).u64[1] = (from).u64[1] | (mask).u64[1]; \
+ } while (0)
+
+#define EFX_INSERT64(min, max, low, high, value) \
+ cpu_to_le64(EFX_INSERT_NATIVE(min, max, low, high, value))
+
+#define EFX_INSERT32(min, max, low, high, value) \
+ cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value))
+
+#define EFX_INPLACE_MASK64(min, max, low, high) \
+ EFX_INSERT64(min, max, low, high, EFX_MASK64(high + 1 - low))
+
+#define EFX_INPLACE_MASK32(min, max, low, high) \
+ EFX_INSERT32(min, max, low, high, EFX_MASK32(high + 1 - low))
+
+#define EFX_SET_OWORD64(oword, low, high, value) do { \
+ (oword).u64[0] = (((oword).u64[0] \
+ & ~EFX_INPLACE_MASK64(0, 63, low, high)) \
+ | EFX_INSERT64(0, 63, low, high, value)); \
+ (oword).u64[1] = (((oword).u64[1] \
+ & ~EFX_INPLACE_MASK64(64, 127, low, high)) \
+ | EFX_INSERT64(64, 127, low, high, value)); \
+ } while (0)
+
+#define EFX_SET_QWORD64(qword, low, high, value) do { \
+ (qword).u64[0] = (((qword).u64[0] \
+ & ~EFX_INPLACE_MASK64(0, 63, low, high)) \
+ | EFX_INSERT64(0, 63, low, high, value)); \
+ } while (0)
+
+#define EFX_SET_OWORD32(oword, low, high, value) do { \
+ (oword).u32[0] = (((oword).u32[0] \
+ & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
+ | EFX_INSERT32(0, 31, low, high, value)); \
+ (oword).u32[1] = (((oword).u32[1] \
+ & ~EFX_INPLACE_MASK32(32, 63, low, high)) \
+ | EFX_INSERT32(32, 63, low, high, value)); \
+ (oword).u32[2] = (((oword).u32[2] \
+ & ~EFX_INPLACE_MASK32(64, 95, low, high)) \
+ | EFX_INSERT32(64, 95, low, high, value)); \
+ (oword).u32[3] = (((oword).u32[3] \
+ & ~EFX_INPLACE_MASK32(96, 127, low, high)) \
+ | EFX_INSERT32(96, 127, low, high, value)); \
+ } while (0)
+
+#define EFX_SET_QWORD32(qword, low, high, value) do { \
+ (qword).u32[0] = (((qword).u32[0] \
+ & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
+ | EFX_INSERT32(0, 31, low, high, value)); \
+ (qword).u32[1] = (((qword).u32[1] \
+ & ~EFX_INPLACE_MASK32(32, 63, low, high)) \
+ | EFX_INSERT32(32, 63, low, high, value)); \
+ } while (0)
+
+#define EFX_SET_DWORD32(dword, low, high, value) do { \
+ (dword).u32[0] = (((dword).u32[0] \
+ & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
+ | EFX_INSERT32(0, 31, low, high, value)); \
+ } while (0)
+
+#define EFX_SET_OWORD_FIELD64(oword, field, value) \
+ EFX_SET_OWORD64(oword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_QWORD_FIELD64(qword, field, value) \
+ EFX_SET_QWORD64(qword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_OWORD_FIELD32(oword, field, value) \
+ EFX_SET_OWORD32(oword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_QWORD_FIELD32(qword, field, value) \
+ EFX_SET_QWORD32(qword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_DWORD_FIELD(dword, field, value) \
+ EFX_SET_DWORD32(dword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+
+
+#if BITS_PER_LONG == 64
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
+#else
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
+#endif
+
+/* Used to avoid compiler warnings about shift range exceeding width
+ * of the data types when dma_addr_t is only 32 bits wide.
+ */
+#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
+#define EFX_DMA_TYPE_WIDTH(width) \
+ (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
+
+
+/* Static initialiser */
+#define EFX_OWORD32(a, b, c, d) \
+ { .u32 = { cpu_to_le32(a), cpu_to_le32(b), \
+ cpu_to_le32(c), cpu_to_le32(d) } }
+
+#endif /* EFX_BITFIELD_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
new file mode 100644
index 000000000000..de9afebe1830
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -0,0 +1,2732 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/notifier.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/in.h>
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+#include <linux/topology.h>
+#include <linux/gfp.h>
+#include <linux/cpu_rmap.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "nic.h"
+
+#include "mcdi.h"
+#include "workarounds.h"
+
+/**************************************************************************
+ *
+ * Type name strings
+ *
+ **************************************************************************
+ */
+
+/* Loopback mode names (see LOOPBACK_MODE()) */
+const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
+const char *efx_loopback_mode_names[] = {
+ [LOOPBACK_NONE] = "NONE",
+ [LOOPBACK_DATA] = "DATAPATH",
+ [LOOPBACK_GMAC] = "GMAC",
+ [LOOPBACK_XGMII] = "XGMII",
+ [LOOPBACK_XGXS] = "XGXS",
+ [LOOPBACK_XAUI] = "XAUI",
+ [LOOPBACK_GMII] = "GMII",
+ [LOOPBACK_SGMII] = "SGMII",
+ [LOOPBACK_XGBR] = "XGBR",
+ [LOOPBACK_XFI] = "XFI",
+ [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
+ [LOOPBACK_GMII_FAR] = "GMII_FAR",
+ [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
+ [LOOPBACK_XFI_FAR] = "XFI_FAR",
+ [LOOPBACK_GPHY] = "GPHY",
+ [LOOPBACK_PHYXS] = "PHYXS",
+ [LOOPBACK_PCS] = "PCS",
+ [LOOPBACK_PMAPMD] = "PMA/PMD",
+ [LOOPBACK_XPORT] = "XPORT",
+ [LOOPBACK_XGMII_WS] = "XGMII_WS",
+ [LOOPBACK_XAUI_WS] = "XAUI_WS",
+ [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
+ [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
+ [LOOPBACK_GMII_WS] = "GMII_WS",
+ [LOOPBACK_XFI_WS] = "XFI_WS",
+ [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
+ [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
+};
+
+const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
+const char *efx_reset_type_names[] = {
+ [RESET_TYPE_INVISIBLE] = "INVISIBLE",
+ [RESET_TYPE_ALL] = "ALL",
+ [RESET_TYPE_WORLD] = "WORLD",
+ [RESET_TYPE_DISABLE] = "DISABLE",
+ [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
+ [RESET_TYPE_INT_ERROR] = "INT_ERROR",
+ [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
+ [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
+ [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
+ [RESET_TYPE_TX_SKIP] = "TX_SKIP",
+ [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
+};
+
+#define EFX_MAX_MTU (9 * 1024)
+
+/* Reset workqueue. If any NIC has a hardware failure then a reset will be
+ * queued onto this work queue. This is not a per-nic work queue, because
+ * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
+ */
+static struct workqueue_struct *reset_workqueue;
+
+/**************************************************************************
+ *
+ * Configurable values
+ *
+ *************************************************************************/
+
+/*
+ * Use separate channels for TX and RX events
+ *
+ * Set this to 1 to use separate channels for TX and RX. It allows us
+ * to control interrupt affinity separately for TX and RX.
+ *
+ * This is only used in MSI-X interrupt mode
+ */
+static unsigned int separate_tx_channels;
+module_param(separate_tx_channels, uint, 0444);
+MODULE_PARM_DESC(separate_tx_channels,
+ "Use separate channels for TX and RX");
+
+/* This is the weight assigned to each of the (per-channel) virtual
+ * NAPI devices.
+ */
+static int napi_weight = 64;
+
+/* This is the time (in jiffies) between invocations of the hardware
+ * monitor. On Falcon-based NICs, this will:
+ * - Check the on-board hardware monitor;
+ * - Poll the link state and reconfigure the hardware as necessary.
+ */
+static unsigned int efx_monitor_interval = 1 * HZ;
+
+/* This controls whether or not the driver will initialise devices
+ * with invalid MAC addresses stored in the EEPROM or flash. If true,
+ * such devices will be initialised with a random locally-generated
+ * MAC address. This allows for loading the sfc_mtd driver to
+ * reprogram the flash, even if the flash contents (including the MAC
+ * address) have previously been erased.
+ */
+static unsigned int allow_bad_hwaddr;
+
+/* Initial interrupt moderation settings. They can be modified after
+ * module load with ethtool.
+ *
+ * The default for RX should strike a balance between increasing the
+ * round-trip latency and reducing overhead.
+ */
+static unsigned int rx_irq_mod_usec = 60;
+
+/* Initial interrupt moderation settings. They can be modified after
+ * module load with ethtool.
+ *
+ * This default is chosen to ensure that a 10G link does not go idle
+ * while a TX queue is stopped after it has become full. A queue is
+ * restarted when it drops below half full. The time this takes (assuming
+ * worst case 3 descriptors per packet and 1024 descriptors) is
+ * 512 / 3 * 1.2 = 205 usec.
+ */
+static unsigned int tx_irq_mod_usec = 150;
+
+/* This is the first interrupt mode to try out of:
+ * 0 => MSI-X
+ * 1 => MSI
+ * 2 => legacy
+ */
+static unsigned int interrupt_mode;
+
+/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
+ * i.e. the number of CPUs among which we may distribute simultaneous
+ * interrupt handling.
+ *
+ * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
+ * The default (0) means to assign an interrupt to each package (level II cache)
+ */
+static unsigned int rss_cpus;
+module_param(rss_cpus, uint, 0444);
+MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
+
+static int phy_flash_cfg;
+module_param(phy_flash_cfg, int, 0644);
+MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
+
+static unsigned irq_adapt_low_thresh = 10000;
+module_param(irq_adapt_low_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_low_thresh,
+ "Threshold score for reducing IRQ moderation");
+
+static unsigned irq_adapt_high_thresh = 20000;
+module_param(irq_adapt_high_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_high_thresh,
+ "Threshold score for increasing IRQ moderation");
+
+static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR | NETIF_MSG_HW);
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
+
+/**************************************************************************
+ *
+ * Utility functions and prototypes
+ *
+ *************************************************************************/
+
+static void efx_remove_channels(struct efx_nic *efx);
+static void efx_remove_port(struct efx_nic *efx);
+static void efx_init_napi(struct efx_nic *efx);
+static void efx_fini_napi(struct efx_nic *efx);
+static void efx_fini_napi_channel(struct efx_channel *channel);
+static void efx_fini_struct(struct efx_nic *efx);
+static void efx_start_all(struct efx_nic *efx);
+static void efx_stop_all(struct efx_nic *efx);
+
+#define EFX_ASSERT_RESET_SERIALISED(efx) \
+ do { \
+ if ((efx->state == STATE_RUNNING) || \
+ (efx->state == STATE_DISABLED)) \
+ ASSERT_RTNL(); \
+ } while (0)
+
+/**************************************************************************
+ *
+ * Event queue processing
+ *
+ *************************************************************************/
+
+/* Process channel's event queue
+ *
+ * This function is responsible for processing the event queue of a
+ * single channel. The caller must guarantee that this function will
+ * never be concurrently called more than once on the same channel,
+ * though different channels may be being processed concurrently.
+ */
+static int efx_process_channel(struct efx_channel *channel, int budget)
+{
+ struct efx_nic *efx = channel->efx;
+ int spent;
+
+ if (unlikely(efx->reset_pending || !channel->enabled))
+ return 0;
+
+ spent = efx_nic_process_eventq(channel, budget);
+ if (spent == 0)
+ return 0;
+
+ /* Deliver last RX packet. */
+ if (channel->rx_pkt) {
+ __efx_rx_packet(channel, channel->rx_pkt,
+ channel->rx_pkt_csummed);
+ channel->rx_pkt = NULL;
+ }
+
+ efx_rx_strategy(channel);
+
+ efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
+
+ return spent;
+}
+
+/* Mark channel as finished processing
+ *
+ * Note that since we will not receive further interrupts for this
+ * channel before we finish processing and call the eventq_read_ack()
+ * method, there is no need to use the interrupt hold-off timers.
+ */
+static inline void efx_channel_processed(struct efx_channel *channel)
+{
+ /* The interrupt handler for this channel may set work_pending
+ * as soon as we acknowledge the events we've seen. Make sure
+ * it's cleared before then. */
+ channel->work_pending = false;
+ smp_wmb();
+
+ efx_nic_eventq_read_ack(channel);
+}
+
+/* NAPI poll handler
+ *
+ * NAPI guarantees serialisation of polls of the same device, which
+ * provides the guarantee required by efx_process_channel().
+ */
+static int efx_poll(struct napi_struct *napi, int budget)
+{
+ struct efx_channel *channel =
+ container_of(napi, struct efx_channel, napi_str);
+ struct efx_nic *efx = channel->efx;
+ int spent;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "channel %d NAPI poll executing on CPU %d\n",
+ channel->channel, raw_smp_processor_id());
+
+ spent = efx_process_channel(channel, budget);
+
+ if (spent < budget) {
+ if (channel->channel < efx->n_rx_channels &&
+ efx->irq_rx_adaptive &&
+ unlikely(++channel->irq_count == 1000)) {
+ if (unlikely(channel->irq_mod_score <
+ irq_adapt_low_thresh)) {
+ if (channel->irq_moderation > 1) {
+ channel->irq_moderation -= 1;
+ efx->type->push_irq_moderation(channel);
+ }
+ } else if (unlikely(channel->irq_mod_score >
+ irq_adapt_high_thresh)) {
+ if (channel->irq_moderation <
+ efx->irq_rx_moderation) {
+ channel->irq_moderation += 1;
+ efx->type->push_irq_moderation(channel);
+ }
+ }
+ channel->irq_count = 0;
+ channel->irq_mod_score = 0;
+ }
+
+ efx_filter_rfs_expire(channel);
+
+ /* There is no race here; although napi_disable() will
+ * only wait for napi_complete(), this isn't a problem
+ * since efx_channel_processed() will have no effect if
+ * interrupts have already been disabled.
+ */
+ napi_complete(napi);
+ efx_channel_processed(channel);
+ }
+
+ return spent;
+}
+
+/* Process the eventq of the specified channel immediately on this CPU
+ *
+ * Disable hardware generated interrupts, wait for any existing
+ * processing to finish, then directly poll (and ack ) the eventq.
+ * Finally reenable NAPI and interrupts.
+ *
+ * This is for use only during a loopback self-test. It must not
+ * deliver any packets up the stack as this can result in deadlock.
+ */
+void efx_process_channel_now(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+
+ BUG_ON(channel->channel >= efx->n_channels);
+ BUG_ON(!channel->enabled);
+ BUG_ON(!efx->loopback_selftest);
+
+ /* Disable interrupts and wait for ISRs to complete */
+ efx_nic_disable_interrupts(efx);
+ if (efx->legacy_irq) {
+ synchronize_irq(efx->legacy_irq);
+ efx->legacy_irq_enabled = false;
+ }
+ if (channel->irq)
+ synchronize_irq(channel->irq);
+
+ /* Wait for any NAPI processing to complete */
+ napi_disable(&channel->napi_str);
+
+ /* Poll the channel */
+ efx_process_channel(channel, channel->eventq_mask + 1);
+
+ /* Ack the eventq. This may cause an interrupt to be generated
+ * when they are reenabled */
+ efx_channel_processed(channel);
+
+ napi_enable(&channel->napi_str);
+ if (efx->legacy_irq)
+ efx->legacy_irq_enabled = true;
+ efx_nic_enable_interrupts(efx);
+}
+
+/* Create event queue
+ * Event queue memory allocations are done only once. If the channel
+ * is reset, the memory buffer will be reused; this guards against
+ * errors during channel reset and also simplifies interrupt handling.
+ */
+static int efx_probe_eventq(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned long entries;
+
+ netif_dbg(channel->efx, probe, channel->efx->net_dev,
+ "chan %d create event queue\n", channel->channel);
+
+ /* Build an event queue with room for one event per tx and rx buffer,
+ * plus some extra for link state events and MCDI completions. */
+ entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
+ EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
+ channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
+
+ return efx_nic_probe_eventq(channel);
+}
+
+/* Prepare channel's event queue */
+static void efx_init_eventq(struct efx_channel *channel)
+{
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d init event queue\n", channel->channel);
+
+ channel->eventq_read_ptr = 0;
+
+ efx_nic_init_eventq(channel);
+}
+
+static void efx_fini_eventq(struct efx_channel *channel)
+{
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d fini event queue\n", channel->channel);
+
+ efx_nic_fini_eventq(channel);
+}
+
+static void efx_remove_eventq(struct efx_channel *channel)
+{
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d remove event queue\n", channel->channel);
+
+ efx_nic_remove_eventq(channel);
+}
+
+/**************************************************************************
+ *
+ * Channel handling
+ *
+ *************************************************************************/
+
+/* Allocate and initialise a channel structure, optionally copying
+ * parameters (but not resources) from an old channel structure. */
+static struct efx_channel *
+efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
+{
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ int j;
+
+ if (old_channel) {
+ channel = kmalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ *channel = *old_channel;
+
+ channel->napi_dev = NULL;
+ memset(&channel->eventq, 0, sizeof(channel->eventq));
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->buffer = NULL;
+ memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
+
+ for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ if (tx_queue->channel)
+ tx_queue->channel = channel;
+ tx_queue->buffer = NULL;
+ memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
+ }
+ } else {
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ channel->efx = efx;
+ channel->channel = i;
+
+ for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ tx_queue->efx = efx;
+ tx_queue->queue = i * EFX_TXQ_TYPES + j;
+ tx_queue->channel = channel;
+ }
+ }
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->efx = efx;
+ setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
+ (unsigned long)rx_queue);
+
+ return channel;
+}
+
+static int efx_probe_channel(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int rc;
+
+ netif_dbg(channel->efx, probe, channel->efx->net_dev,
+ "creating channel %d\n", channel->channel);
+
+ rc = efx_probe_eventq(channel);
+ if (rc)
+ goto fail1;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ rc = efx_probe_tx_queue(tx_queue);
+ if (rc)
+ goto fail2;
+ }
+
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ rc = efx_probe_rx_queue(rx_queue);
+ if (rc)
+ goto fail3;
+ }
+
+ channel->n_rx_frm_trunc = 0;
+
+ return 0;
+
+ fail3:
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_remove_rx_queue(rx_queue);
+ fail2:
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_remove_tx_queue(tx_queue);
+ fail1:
+ return rc;
+}
+
+
+static void efx_set_channel_names(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ const char *type = "";
+ int number;
+
+ efx_for_each_channel(channel, efx) {
+ number = channel->channel;
+ if (efx->n_channels > efx->n_rx_channels) {
+ if (channel->channel < efx->n_rx_channels) {
+ type = "-rx";
+ } else {
+ type = "-tx";
+ number -= efx->n_rx_channels;
+ }
+ }
+ snprintf(efx->channel_name[channel->channel],
+ sizeof(efx->channel_name[0]),
+ "%s%s-%d", efx->name, type, number);
+ }
+}
+
+static int efx_probe_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ int rc;
+
+ /* Restart special buffer allocation */
+ efx->next_buffer_table = 0;
+
+ efx_for_each_channel(channel, efx) {
+ rc = efx_probe_channel(channel);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to create channel %d\n",
+ channel->channel);
+ goto fail;
+ }
+ }
+ efx_set_channel_names(efx);
+
+ return 0;
+
+fail:
+ efx_remove_channels(efx);
+ return rc;
+}
+
+/* Channels are shutdown and reinitialised whilst the NIC is running
+ * to propagate configuration changes (mtu, checksum offload), or
+ * to clear hardware error conditions
+ */
+static void efx_init_channels(struct efx_nic *efx)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
+
+ /* Calculate the rx buffer allocation parameters required to
+ * support the current MTU, including padding for header
+ * alignment and overruns.
+ */
+ efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
+ efx->type->rx_buffer_hash_size +
+ efx->type->rx_buffer_padding);
+ efx->rx_buffer_order = get_order(efx->rx_buffer_len +
+ sizeof(struct efx_rx_page_state));
+
+ /* Initialise the channels */
+ efx_for_each_channel(channel, efx) {
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "init chan %d\n", channel->channel);
+
+ efx_init_eventq(channel);
+
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_init_tx_queue(tx_queue);
+
+ /* The rx buffer allocation strategy is MTU dependent */
+ efx_rx_strategy(channel);
+
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_init_rx_queue(rx_queue);
+
+ WARN_ON(channel->rx_pkt != NULL);
+ efx_rx_strategy(channel);
+ }
+}
+
+/* This enables event queue processing and packet transmission.
+ *
+ * Note that this function is not allowed to fail, since that would
+ * introduce too much complexity into the suspend/resume path.
+ */
+static void efx_start_channel(struct efx_channel *channel)
+{
+ struct efx_rx_queue *rx_queue;
+
+ netif_dbg(channel->efx, ifup, channel->efx->net_dev,
+ "starting chan %d\n", channel->channel);
+
+ /* The interrupt handler for this channel may set work_pending
+ * as soon as we enable it. Make sure it's cleared before
+ * then. Similarly, make sure it sees the enabled flag set. */
+ channel->work_pending = false;
+ channel->enabled = true;
+ smp_wmb();
+
+ /* Fill the queues before enabling NAPI */
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_fast_push_rx_descriptors(rx_queue);
+
+ napi_enable(&channel->napi_str);
+}
+
+/* This disables event queue processing and packet transmission.
+ * This function does not guarantee that all queue processing
+ * (e.g. RX refill) is complete.
+ */
+static void efx_stop_channel(struct efx_channel *channel)
+{
+ if (!channel->enabled)
+ return;
+
+ netif_dbg(channel->efx, ifdown, channel->efx->net_dev,
+ "stop chan %d\n", channel->channel);
+
+ channel->enabled = false;
+ napi_disable(&channel->napi_str);
+}
+
+static void efx_fini_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->port_enabled);
+
+ rc = efx_nic_flush_queues(efx);
+ if (rc && EFX_WORKAROUND_7803(efx)) {
+ /* Schedule a reset to recover from the flush failure. The
+ * descriptor caches reference memory we're about to free,
+ * but falcon_reconfigure_mac_wrapper() won't reconnect
+ * the MACs because of the pending reset. */
+ netif_err(efx, drv, efx->net_dev,
+ "Resetting to recover from flush failure\n");
+ efx_schedule_reset(efx, RESET_TYPE_ALL);
+ } else if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
+ } else {
+ netif_dbg(efx, drv, efx->net_dev,
+ "successfully flushed all queues\n");
+ }
+
+ efx_for_each_channel(channel, efx) {
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "shut down chan %d\n", channel->channel);
+
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_fini_rx_queue(rx_queue);
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel)
+ efx_fini_tx_queue(tx_queue);
+ efx_fini_eventq(channel);
+ }
+}
+
+static void efx_remove_channel(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "destroy chan %d\n", channel->channel);
+
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_remove_rx_queue(rx_queue);
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel)
+ efx_remove_tx_queue(tx_queue);
+ efx_remove_eventq(channel);
+}
+
+static void efx_remove_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_remove_channel(channel);
+}
+
+int
+efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
+{
+ struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
+ u32 old_rxq_entries, old_txq_entries;
+ unsigned i;
+ int rc;
+
+ efx_stop_all(efx);
+ efx_fini_channels(efx);
+
+ /* Clone channels */
+ memset(other_channel, 0, sizeof(other_channel));
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx_alloc_channel(efx, i, efx->channel[i]);
+ if (!channel) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ other_channel[i] = channel;
+ }
+
+ /* Swap entry counts and channel pointers */
+ old_rxq_entries = efx->rxq_entries;
+ old_txq_entries = efx->txq_entries;
+ efx->rxq_entries = rxq_entries;
+ efx->txq_entries = txq_entries;
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ efx->channel[i] = other_channel[i];
+ other_channel[i] = channel;
+ }
+
+ rc = efx_probe_channels(efx);
+ if (rc)
+ goto rollback;
+
+ efx_init_napi(efx);
+
+ /* Destroy old channels */
+ for (i = 0; i < efx->n_channels; i++) {
+ efx_fini_napi_channel(other_channel[i]);
+ efx_remove_channel(other_channel[i]);
+ }
+out:
+ /* Free unused channel structures */
+ for (i = 0; i < efx->n_channels; i++)
+ kfree(other_channel[i]);
+
+ efx_init_channels(efx);
+ efx_start_all(efx);
+ return rc;
+
+rollback:
+ /* Swap back */
+ efx->rxq_entries = old_rxq_entries;
+ efx->txq_entries = old_txq_entries;
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ efx->channel[i] = other_channel[i];
+ other_channel[i] = channel;
+ }
+ goto out;
+}
+
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
+{
+ mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
+}
+
+/**************************************************************************
+ *
+ * Port handling
+ *
+ **************************************************************************/
+
+/* This ensures that the kernel is kept informed (via
+ * netif_carrier_on/off) of the link status, and also maintains the
+ * link status's stop on the port's TX queue.
+ */
+void efx_link_status_changed(struct efx_nic *efx)
+{
+ struct efx_link_state *link_state = &efx->link_state;
+
+ /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
+ * that no events are triggered between unregister_netdev() and the
+ * driver unloading. A more general condition is that NETDEV_CHANGE
+ * can only be generated between NETDEV_UP and NETDEV_DOWN */
+ if (!netif_running(efx->net_dev))
+ return;
+
+ if (link_state->up != netif_carrier_ok(efx->net_dev)) {
+ efx->n_link_state_changes++;
+
+ if (link_state->up)
+ netif_carrier_on(efx->net_dev);
+ else
+ netif_carrier_off(efx->net_dev);
+ }
+
+ /* Status message for kernel log */
+ if (link_state->up) {
+ netif_info(efx, link, efx->net_dev,
+ "link up at %uMbps %s-duplex (MTU %d)%s\n",
+ link_state->speed, link_state->fd ? "full" : "half",
+ efx->net_dev->mtu,
+ (efx->promiscuous ? " [PROMISC]" : ""));
+ } else {
+ netif_info(efx, link, efx->net_dev, "link down\n");
+ }
+
+}
+
+void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
+{
+ efx->link_advertising = advertising;
+ if (advertising) {
+ if (advertising & ADVERTISED_Pause)
+ efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
+ else
+ efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
+ if (advertising & ADVERTISED_Asym_Pause)
+ efx->wanted_fc ^= EFX_FC_TX;
+ }
+}
+
+void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
+{
+ efx->wanted_fc = wanted_fc;
+ if (efx->link_advertising) {
+ if (wanted_fc & EFX_FC_RX)
+ efx->link_advertising |= (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ else
+ efx->link_advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ if (wanted_fc & EFX_FC_TX)
+ efx->link_advertising ^= ADVERTISED_Asym_Pause;
+ }
+}
+
+static void efx_fini_port(struct efx_nic *efx);
+
+/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
+ * the MAC appropriately. All other PHY configuration changes are pushed
+ * through phy_op->set_settings(), and pushed asynchronously to the MAC
+ * through efx_monitor().
+ *
+ * Callers must hold the mac_lock
+ */
+int __efx_reconfigure_port(struct efx_nic *efx)
+{
+ enum efx_phy_mode phy_mode;
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ /* Serialise the promiscuous flag with efx_set_multicast_list. */
+ if (efx_dev_registered(efx)) {
+ netif_addr_lock_bh(efx->net_dev);
+ netif_addr_unlock_bh(efx->net_dev);
+ }
+
+ /* Disable PHY transmit in mac level loopbacks */
+ phy_mode = efx->phy_mode;
+ if (LOOPBACK_INTERNAL(efx))
+ efx->phy_mode |= PHY_MODE_TX_DISABLED;
+ else
+ efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
+
+ rc = efx->type->reconfigure_port(efx);
+
+ if (rc)
+ efx->phy_mode = phy_mode;
+
+ return rc;
+}
+
+/* Reinitialise the MAC to pick up new PHY settings, even if the port is
+ * disabled. */
+int efx_reconfigure_port(struct efx_nic *efx)
+{
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ mutex_lock(&efx->mac_lock);
+ rc = __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+/* Asynchronous work item for changing MAC promiscuity and multicast
+ * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
+ * MAC directly. */
+static void efx_mac_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
+
+ mutex_lock(&efx->mac_lock);
+ if (efx->port_enabled) {
+ efx->type->push_multicast_hash(efx);
+ efx->mac_op->reconfigure(efx);
+ }
+ mutex_unlock(&efx->mac_lock);
+}
+
+static int efx_probe_port(struct efx_nic *efx)
+{
+ unsigned char *perm_addr;
+ int rc;
+
+ netif_dbg(efx, probe, efx->net_dev, "create port\n");
+
+ if (phy_flash_cfg)
+ efx->phy_mode = PHY_MODE_SPECIAL;
+
+ /* Connect up MAC/PHY operations table */
+ rc = efx->type->probe_port(efx);
+ if (rc)
+ return rc;
+
+ /* Sanity check MAC address */
+ perm_addr = efx->net_dev->perm_addr;
+ if (is_valid_ether_addr(perm_addr)) {
+ memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN);
+ } else {
+ netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n",
+ perm_addr);
+ if (!allow_bad_hwaddr) {
+ rc = -EINVAL;
+ goto err;
+ }
+ random_ether_addr(efx->net_dev->dev_addr);
+ netif_info(efx, probe, efx->net_dev,
+ "using locally-generated MAC %pM\n",
+ efx->net_dev->dev_addr);
+ }
+
+ return 0;
+
+ err:
+ efx->type->remove_port(efx);
+ return rc;
+}
+
+static int efx_init_port(struct efx_nic *efx)
+{
+ int rc;
+
+ netif_dbg(efx, drv, efx->net_dev, "init port\n");
+
+ mutex_lock(&efx->mac_lock);
+
+ rc = efx->phy_op->init(efx);
+ if (rc)
+ goto fail1;
+
+ efx->port_initialized = true;
+
+ /* Reconfigure the MAC before creating dma queues (required for
+ * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
+ efx->mac_op->reconfigure(efx);
+
+ /* Ensure the PHY advertises the correct flow control settings */
+ rc = efx->phy_op->reconfigure(efx);
+ if (rc)
+ goto fail2;
+
+ mutex_unlock(&efx->mac_lock);
+ return 0;
+
+fail2:
+ efx->phy_op->fini(efx);
+fail1:
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
+
+static void efx_start_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, ifup, efx->net_dev, "start port\n");
+ BUG_ON(efx->port_enabled);
+
+ mutex_lock(&efx->mac_lock);
+ efx->port_enabled = true;
+
+ /* efx_mac_work() might have been scheduled after efx_stop_port(),
+ * and then cancelled by efx_flush_all() */
+ efx->type->push_multicast_hash(efx);
+ efx->mac_op->reconfigure(efx);
+
+ mutex_unlock(&efx->mac_lock);
+}
+
+/* Prevent efx_mac_work() and efx_monitor() from working */
+static void efx_stop_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
+
+ mutex_lock(&efx->mac_lock);
+ efx->port_enabled = false;
+ mutex_unlock(&efx->mac_lock);
+
+ /* Serialise against efx_set_multicast_list() */
+ if (efx_dev_registered(efx)) {
+ netif_addr_lock_bh(efx->net_dev);
+ netif_addr_unlock_bh(efx->net_dev);
+ }
+}
+
+static void efx_fini_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
+
+ if (!efx->port_initialized)
+ return;
+
+ efx->phy_op->fini(efx);
+ efx->port_initialized = false;
+
+ efx->link_state.up = false;
+ efx_link_status_changed(efx);
+}
+
+static void efx_remove_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
+
+ efx->type->remove_port(efx);
+}
+
+/**************************************************************************
+ *
+ * NIC handling
+ *
+ **************************************************************************/
+
+/* This configures the PCI device to enable I/O and DMA. */
+static int efx_init_io(struct efx_nic *efx)
+{
+ struct pci_dev *pci_dev = efx->pci_dev;
+ dma_addr_t dma_mask = efx->type->max_dma_mask;
+ int rc;
+
+ netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
+
+ rc = pci_enable_device(pci_dev);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to enable PCI device\n");
+ goto fail1;
+ }
+
+ pci_set_master(pci_dev);
+
+ /* Set the PCI DMA mask. Try all possibilities from our
+ * genuine mask down to 32 bits, because some architectures
+ * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
+ * masks event though they reject 46 bit masks.
+ */
+ while (dma_mask > 0x7fffffffUL) {
+ if (pci_dma_supported(pci_dev, dma_mask) &&
+ ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
+ break;
+ dma_mask >>= 1;
+ }
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not find a suitable DMA mask\n");
+ goto fail2;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "using DMA mask %llx\n", (unsigned long long) dma_mask);
+ rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
+ if (rc) {
+ /* pci_set_consistent_dma_mask() is not *allowed* to
+ * fail with a mask that pci_set_dma_mask() accepted,
+ * but just in case...
+ */
+ netif_err(efx, probe, efx->net_dev,
+ "failed to set consistent DMA mask\n");
+ goto fail2;
+ }
+
+ efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
+ rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "request for memory BAR failed\n");
+ rc = -EIO;
+ goto fail3;
+ }
+ efx->membase = ioremap_nocache(efx->membase_phys,
+ efx->type->mem_map_size);
+ if (!efx->membase) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not map memory BAR at %llx+%x\n",
+ (unsigned long long)efx->membase_phys,
+ efx->type->mem_map_size);
+ rc = -ENOMEM;
+ goto fail4;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "memory BAR at %llx+%x (virtual %p)\n",
+ (unsigned long long)efx->membase_phys,
+ efx->type->mem_map_size, efx->membase);
+
+ return 0;
+
+ fail4:
+ pci_release_region(efx->pci_dev, EFX_MEM_BAR);
+ fail3:
+ efx->membase_phys = 0;
+ fail2:
+ pci_disable_device(efx->pci_dev);
+ fail1:
+ return rc;
+}
+
+static void efx_fini_io(struct efx_nic *efx)
+{
+ netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
+
+ if (efx->membase) {
+ iounmap(efx->membase);
+ efx->membase = NULL;
+ }
+
+ if (efx->membase_phys) {
+ pci_release_region(efx->pci_dev, EFX_MEM_BAR);
+ efx->membase_phys = 0;
+ }
+
+ pci_disable_device(efx->pci_dev);
+}
+
+/* Get number of channels wanted. Each channel will have its own IRQ,
+ * 1 RX queue and/or 2 TX queues. */
+static int efx_wanted_channels(void)
+{
+ cpumask_var_t core_mask;
+ int count;
+ int cpu;
+
+ if (rss_cpus)
+ return rss_cpus;
+
+ if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
+ printk(KERN_WARNING
+ "sfc: RSS disabled due to allocation failure\n");
+ return 1;
+ }
+
+ count = 0;
+ for_each_online_cpu(cpu) {
+ if (!cpumask_test_cpu(cpu, core_mask)) {
+ ++count;
+ cpumask_or(core_mask, core_mask,
+ topology_core_cpumask(cpu));
+ }
+ }
+
+ free_cpumask_var(core_mask);
+ return count;
+}
+
+static int
+efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
+{
+#ifdef CONFIG_RFS_ACCEL
+ int i, rc;
+
+ efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
+ if (!efx->net_dev->rx_cpu_rmap)
+ return -ENOMEM;
+ for (i = 0; i < efx->n_rx_channels; i++) {
+ rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
+ xentries[i].vector);
+ if (rc) {
+ free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+ efx->net_dev->rx_cpu_rmap = NULL;
+ return rc;
+ }
+ }
+#endif
+ return 0;
+}
+
+/* Probe the number and type of interrupts we are able to obtain, and
+ * the resulting numbers of channels and RX queues.
+ */
+static int efx_probe_interrupts(struct efx_nic *efx)
+{
+ int max_channels =
+ min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
+ int rc, i;
+
+ if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
+ struct msix_entry xentries[EFX_MAX_CHANNELS];
+ int n_channels;
+
+ n_channels = efx_wanted_channels();
+ if (separate_tx_channels)
+ n_channels *= 2;
+ n_channels = min(n_channels, max_channels);
+
+ for (i = 0; i < n_channels; i++)
+ xentries[i].entry = i;
+ rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
+ if (rc > 0) {
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Insufficient MSI-X vectors"
+ " available (%d < %d).\n", rc, n_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Performance may be reduced.\n");
+ EFX_BUG_ON_PARANOID(rc >= n_channels);
+ n_channels = rc;
+ rc = pci_enable_msix(efx->pci_dev, xentries,
+ n_channels);
+ }
+
+ if (rc == 0) {
+ efx->n_channels = n_channels;
+ if (separate_tx_channels) {
+ efx->n_tx_channels =
+ max(efx->n_channels / 2, 1U);
+ efx->n_rx_channels =
+ max(efx->n_channels -
+ efx->n_tx_channels, 1U);
+ } else {
+ efx->n_tx_channels = efx->n_channels;
+ efx->n_rx_channels = efx->n_channels;
+ }
+ rc = efx_init_rx_cpu_rmap(efx, xentries);
+ if (rc) {
+ pci_disable_msix(efx->pci_dev);
+ return rc;
+ }
+ for (i = 0; i < n_channels; i++)
+ efx_get_channel(efx, i)->irq =
+ xentries[i].vector;
+ } else {
+ /* Fall back to single channel MSI */
+ efx->interrupt_mode = EFX_INT_MODE_MSI;
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI-X\n");
+ }
+ }
+
+ /* Try single interrupt MSI */
+ if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
+ efx->n_channels = 1;
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
+ rc = pci_enable_msi(efx->pci_dev);
+ if (rc == 0) {
+ efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
+ } else {
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI\n");
+ efx->interrupt_mode = EFX_INT_MODE_LEGACY;
+ }
+ }
+
+ /* Assume legacy interrupts */
+ if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
+ efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
+ efx->legacy_irq = efx->pci_dev->irq;
+ }
+
+ return 0;
+}
+
+static void efx_remove_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ /* Remove MSI/MSI-X interrupts */
+ efx_for_each_channel(channel, efx)
+ channel->irq = 0;
+ pci_disable_msi(efx->pci_dev);
+ pci_disable_msix(efx->pci_dev);
+
+ /* Remove legacy interrupt */
+ efx->legacy_irq = 0;
+}
+
+static void efx_set_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+
+ efx->tx_channel_offset =
+ separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
+
+ /* We need to adjust the TX queue numbers if we have separate
+ * RX-only and TX-only channels.
+ */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ tx_queue->queue -= (efx->tx_channel_offset *
+ EFX_TXQ_TYPES);
+ }
+}
+
+static int efx_probe_nic(struct efx_nic *efx)
+{
+ size_t i;
+ int rc;
+
+ netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
+
+ /* Carry out hardware-type specific initialisation */
+ rc = efx->type->probe(efx);
+ if (rc)
+ return rc;
+
+ /* Determine the number of channels and queues by trying to hook
+ * in MSI-X interrupts. */
+ rc = efx_probe_interrupts(efx);
+ if (rc)
+ goto fail;
+
+ if (efx->n_channels > 1)
+ get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
+ efx->rx_indir_table[i] = i % efx->n_rx_channels;
+
+ efx_set_channels(efx);
+ netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
+ netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
+
+ /* Initialise the interrupt moderation settings */
+ efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
+ true);
+
+ return 0;
+
+fail:
+ efx->type->remove(efx);
+ return rc;
+}
+
+static void efx_remove_nic(struct efx_nic *efx)
+{
+ netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
+
+ efx_remove_interrupts(efx);
+ efx->type->remove(efx);
+}
+
+/**************************************************************************
+ *
+ * NIC startup/shutdown
+ *
+ *************************************************************************/
+
+static int efx_probe_all(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_probe_nic(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
+ goto fail1;
+ }
+
+ rc = efx_probe_port(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev, "failed to create port\n");
+ goto fail2;
+ }
+
+ efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
+ rc = efx_probe_channels(efx);
+ if (rc)
+ goto fail3;
+
+ rc = efx_probe_filters(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to create filter tables\n");
+ goto fail4;
+ }
+
+ return 0;
+
+ fail4:
+ efx_remove_channels(efx);
+ fail3:
+ efx_remove_port(efx);
+ fail2:
+ efx_remove_nic(efx);
+ fail1:
+ return rc;
+}
+
+/* Called after previous invocation(s) of efx_stop_all, restarts the
+ * port, kernel transmit queue, NAPI processing and hardware interrupts,
+ * and ensures that the port is scheduled to be reconfigured.
+ * This function is safe to call multiple times when the NIC is in any
+ * state. */
+static void efx_start_all(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ /* Check that it is appropriate to restart the interface. All
+ * of these flags are safe to read under just the rtnl lock */
+ if (efx->port_enabled)
+ return;
+ if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
+ return;
+ if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
+ return;
+
+ /* Mark the port as enabled so port reconfigurations can start, then
+ * restart the transmit interface early so the watchdog timer stops */
+ efx_start_port(efx);
+
+ if (efx_dev_registered(efx) && netif_device_present(efx->net_dev))
+ netif_tx_wake_all_queues(efx->net_dev);
+
+ efx_for_each_channel(channel, efx)
+ efx_start_channel(channel);
+
+ if (efx->legacy_irq)
+ efx->legacy_irq_enabled = true;
+ efx_nic_enable_interrupts(efx);
+
+ /* Switch to event based MCDI completions after enabling interrupts.
+ * If a reset has been scheduled, then we need to stay in polled mode.
+ * Rather than serialising efx_mcdi_mode_event() [which sleeps] and
+ * reset_pending [modified from an atomic context], we instead guarantee
+ * that efx_mcdi_mode_poll() isn't reverted erroneously */
+ efx_mcdi_mode_event(efx);
+ if (efx->reset_pending)
+ efx_mcdi_mode_poll(efx);
+
+ /* Start the hardware monitor if there is one. Otherwise (we're link
+ * event driven), we have to poll the PHY because after an event queue
+ * flush, we could have a missed a link state change */
+ if (efx->type->monitor != NULL) {
+ queue_delayed_work(efx->workqueue, &efx->monitor_work,
+ efx_monitor_interval);
+ } else {
+ mutex_lock(&efx->mac_lock);
+ if (efx->phy_op->poll(efx))
+ efx_link_status_changed(efx);
+ mutex_unlock(&efx->mac_lock);
+ }
+
+ efx->type->start_stats(efx);
+}
+
+/* Flush all delayed work. Should only be called when no more delayed work
+ * will be scheduled. This doesn't flush pending online resets (efx_reset),
+ * since we're holding the rtnl_lock at this point. */
+static void efx_flush_all(struct efx_nic *efx)
+{
+ /* Make sure the hardware monitor is stopped */
+ cancel_delayed_work_sync(&efx->monitor_work);
+ /* Stop scheduled port reconfigurations */
+ cancel_work_sync(&efx->mac_work);
+}
+
+/* Quiesce hardware and software without bringing the link down.
+ * Safe to call multiple times, when the nic and interface is in any
+ * state. The caller is guaranteed to subsequently be in a position
+ * to modify any hardware and software state they see fit without
+ * taking locks. */
+static void efx_stop_all(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ /* port_enabled can be read safely under the rtnl lock */
+ if (!efx->port_enabled)
+ return;
+
+ efx->type->stop_stats(efx);
+
+ /* Switch to MCDI polling on Siena before disabling interrupts */
+ efx_mcdi_mode_poll(efx);
+
+ /* Disable interrupts and wait for ISR to complete */
+ efx_nic_disable_interrupts(efx);
+ if (efx->legacy_irq) {
+ synchronize_irq(efx->legacy_irq);
+ efx->legacy_irq_enabled = false;
+ }
+ efx_for_each_channel(channel, efx) {
+ if (channel->irq)
+ synchronize_irq(channel->irq);
+ }
+
+ /* Stop all NAPI processing and synchronous rx refills */
+ efx_for_each_channel(channel, efx)
+ efx_stop_channel(channel);
+
+ /* Stop all asynchronous port reconfigurations. Since all
+ * event processing has already been stopped, there is no
+ * window to loose phy events */
+ efx_stop_port(efx);
+
+ /* Flush efx_mac_work(), refill_workqueue, monitor_work */
+ efx_flush_all(efx);
+
+ /* Stop the kernel transmit interface late, so the watchdog
+ * timer isn't ticking over the flush */
+ if (efx_dev_registered(efx)) {
+ netif_tx_stop_all_queues(efx->net_dev);
+ netif_tx_lock_bh(efx->net_dev);
+ netif_tx_unlock_bh(efx->net_dev);
+ }
+}
+
+static void efx_remove_all(struct efx_nic *efx)
+{
+ efx_remove_filters(efx);
+ efx_remove_channels(efx);
+ efx_remove_port(efx);
+ efx_remove_nic(efx);
+}
+
+/**************************************************************************
+ *
+ * Interrupt moderation
+ *
+ **************************************************************************/
+
+static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int resolution)
+{
+ if (usecs == 0)
+ return 0;
+ if (usecs < resolution)
+ return 1; /* never round down to 0 */
+ return usecs / resolution;
+}
+
+/* Set interrupt moderation parameters */
+int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
+ unsigned int rx_usecs, bool rx_adaptive,
+ bool rx_may_override_tx)
+{
+ struct efx_channel *channel;
+ unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION);
+ unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION);
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ if (tx_ticks > EFX_IRQ_MOD_MAX || rx_ticks > EFX_IRQ_MOD_MAX)
+ return -EINVAL;
+
+ if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
+ !rx_may_override_tx) {
+ netif_err(efx, drv, efx->net_dev, "Channels are shared. "
+ "RX and TX IRQ moderation must be equal\n");
+ return -EINVAL;
+ }
+
+ efx->irq_rx_adaptive = rx_adaptive;
+ efx->irq_rx_moderation = rx_ticks;
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_rx_queue(channel))
+ channel->irq_moderation = rx_ticks;
+ else if (efx_channel_has_tx_queues(channel))
+ channel->irq_moderation = tx_ticks;
+ }
+
+ return 0;
+}
+
+void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
+ unsigned int *rx_usecs, bool *rx_adaptive)
+{
+ *rx_adaptive = efx->irq_rx_adaptive;
+ *rx_usecs = efx->irq_rx_moderation * EFX_IRQ_MOD_RESOLUTION;
+
+ /* If channels are shared between RX and TX, so is IRQ
+ * moderation. Otherwise, IRQ moderation is the same for all
+ * TX channels and is not adaptive.
+ */
+ if (efx->tx_channel_offset == 0)
+ *tx_usecs = *rx_usecs;
+ else
+ *tx_usecs =
+ efx->channel[efx->tx_channel_offset]->irq_moderation *
+ EFX_IRQ_MOD_RESOLUTION;
+}
+
+/**************************************************************************
+ *
+ * Hardware monitor
+ *
+ **************************************************************************/
+
+/* Run periodically off the general workqueue */
+static void efx_monitor(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic,
+ monitor_work.work);
+
+ netif_vdbg(efx, timer, efx->net_dev,
+ "hardware monitor executing on CPU %d\n",
+ raw_smp_processor_id());
+ BUG_ON(efx->type->monitor == NULL);
+
+ /* If the mac_lock is already held then it is likely a port
+ * reconfiguration is already in place, which will likely do
+ * most of the work of monitor() anyway. */
+ if (mutex_trylock(&efx->mac_lock)) {
+ if (efx->port_enabled)
+ efx->type->monitor(efx);
+ mutex_unlock(&efx->mac_lock);
+ }
+
+ queue_delayed_work(efx->workqueue, &efx->monitor_work,
+ efx_monitor_interval);
+}
+
+/**************************************************************************
+ *
+ * ioctls
+ *
+ *************************************************************************/
+
+/* Net device ioctl
+ * Context: process, rtnl_lock() held.
+ */
+static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ /* Convert phy_id from older PRTAD/DEVAD format */
+ if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
+ (data->phy_id & 0xfc00) == 0x0400)
+ data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
+
+ return mdio_mii_ioctl(&efx->mdio, data, cmd);
+}
+
+/**************************************************************************
+ *
+ * NAPI interface
+ *
+ **************************************************************************/
+
+static void efx_init_napi(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx) {
+ channel->napi_dev = efx->net_dev;
+ netif_napi_add(channel->napi_dev, &channel->napi_str,
+ efx_poll, napi_weight);
+ }
+}
+
+static void efx_fini_napi_channel(struct efx_channel *channel)
+{
+ if (channel->napi_dev)
+ netif_napi_del(&channel->napi_str);
+ channel->napi_dev = NULL;
+}
+
+static void efx_fini_napi(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_fini_napi_channel(channel);
+}
+
+/**************************************************************************
+ *
+ * Kernel netpoll interface
+ *
+ *************************************************************************/
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+
+/* Although in the common case interrupts will be disabled, this is not
+ * guaranteed. However, all our work happens inside the NAPI callback,
+ * so no locking is required.
+ */
+static void efx_netpoll(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_schedule_channel(channel);
+}
+
+#endif
+
+/**************************************************************************
+ *
+ * Kernel net device interface
+ *
+ *************************************************************************/
+
+/* Context: process, rtnl_lock() held. */
+static int efx_net_open(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
+ raw_smp_processor_id());
+
+ if (efx->state == STATE_DISABLED)
+ return -EIO;
+ if (efx->phy_mode & PHY_MODE_SPECIAL)
+ return -EBUSY;
+ if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
+ return -EIO;
+
+ /* Notify the kernel of the link state polled during driver load,
+ * before the monitor starts running */
+ efx_link_status_changed(efx);
+
+ efx_start_all(efx);
+ return 0;
+}
+
+/* Context: process, rtnl_lock() held.
+ * Note that the kernel will ignore our return code; this method
+ * should really be a void.
+ */
+static int efx_net_stop(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
+ raw_smp_processor_id());
+
+ if (efx->state != STATE_DISABLED) {
+ /* Stop the device and flush all the channels */
+ efx_stop_all(efx);
+ efx_fini_channels(efx);
+ efx_init_channels(efx);
+ }
+
+ return 0;
+}
+
+/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_mac_stats *mac_stats = &efx->mac_stats;
+
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx);
+ spin_unlock_bh(&efx->stats_lock);
+
+ stats->rx_packets = mac_stats->rx_packets;
+ stats->tx_packets = mac_stats->tx_packets;
+ stats->rx_bytes = mac_stats->rx_bytes;
+ stats->tx_bytes = mac_stats->tx_bytes;
+ stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
+ stats->multicast = mac_stats->rx_multicast;
+ stats->collisions = mac_stats->tx_collision;
+ stats->rx_length_errors = (mac_stats->rx_gtjumbo +
+ mac_stats->rx_length_error);
+ stats->rx_crc_errors = mac_stats->rx_bad;
+ stats->rx_frame_errors = mac_stats->rx_align_error;
+ stats->rx_fifo_errors = mac_stats->rx_overflow;
+ stats->rx_missed_errors = mac_stats->rx_missed;
+ stats->tx_window_errors = mac_stats->tx_late_collision;
+
+ stats->rx_errors = (stats->rx_length_errors +
+ stats->rx_crc_errors +
+ stats->rx_frame_errors +
+ mac_stats->rx_symbol_error);
+ stats->tx_errors = (stats->tx_window_errors +
+ mac_stats->tx_bad);
+
+ return stats;
+}
+
+/* Context: netif_tx_lock held, BHs disabled. */
+static void efx_watchdog(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX stuck with port_enabled=%d: resetting channels\n",
+ efx->port_enabled);
+
+ efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
+}
+
+
+/* Context: process, rtnl_lock() held. */
+static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc = 0;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ if (new_mtu > EFX_MAX_MTU)
+ return -EINVAL;
+
+ efx_stop_all(efx);
+
+ netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
+
+ efx_fini_channels(efx);
+
+ mutex_lock(&efx->mac_lock);
+ /* Reconfigure the MAC before enabling the dma queues so that
+ * the RX buffers don't overflow */
+ net_dev->mtu = new_mtu;
+ efx->mac_op->reconfigure(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ efx_init_channels(efx);
+
+ efx_start_all(efx);
+ return rc;
+}
+
+static int efx_set_mac_address(struct net_device *net_dev, void *data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct sockaddr *addr = data;
+ char *new_addr = addr->sa_data;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ if (!is_valid_ether_addr(new_addr)) {
+ netif_err(efx, drv, efx->net_dev,
+ "invalid ethernet MAC address requested: %pM\n",
+ new_addr);
+ return -EINVAL;
+ }
+
+ memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
+
+ /* Reconfigure the MAC */
+ mutex_lock(&efx->mac_lock);
+ efx->mac_op->reconfigure(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ return 0;
+}
+
+/* Context: netif_addr_lock held, BHs disabled. */
+static void efx_set_multicast_list(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct netdev_hw_addr *ha;
+ union efx_multicast_hash *mc_hash = &efx->multicast_hash;
+ u32 crc;
+ int bit;
+
+ efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
+
+ /* Build multicast hash table */
+ if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
+ memset(mc_hash, 0xff, sizeof(*mc_hash));
+ } else {
+ memset(mc_hash, 0x00, sizeof(*mc_hash));
+ netdev_for_each_mc_addr(ha, net_dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
+ set_bit_le(bit, mc_hash->byte);
+ }
+
+ /* Broadcast packets go through the multicast hash filter.
+ * ether_crc_le() of the broadcast address is 0xbe2612ff
+ * so we always add bit 0xff to the mask.
+ */
+ set_bit_le(0xff, mc_hash->byte);
+ }
+
+ if (efx->port_enabled)
+ queue_work(efx->workqueue, &efx->mac_work);
+ /* Otherwise efx_start_port() will do this */
+}
+
+static int efx_set_features(struct net_device *net_dev, u32 data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ /* If disabling RX n-tuple filtering, clear existing filters */
+ if (net_dev->features & ~data & NETIF_F_NTUPLE)
+ efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
+
+ return 0;
+}
+
+static const struct net_device_ops efx_netdev_ops = {
+ .ndo_open = efx_net_open,
+ .ndo_stop = efx_net_stop,
+ .ndo_get_stats64 = efx_net_stats,
+ .ndo_tx_timeout = efx_watchdog,
+ .ndo_start_xmit = efx_hard_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = efx_ioctl,
+ .ndo_change_mtu = efx_change_mtu,
+ .ndo_set_mac_address = efx_set_mac_address,
+ .ndo_set_rx_mode = efx_set_multicast_list,
+ .ndo_set_features = efx_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = efx_netpoll,
+#endif
+ .ndo_setup_tc = efx_setup_tc,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = efx_filter_rfs,
+#endif
+};
+
+static void efx_update_name(struct efx_nic *efx)
+{
+ strcpy(efx->name, efx->net_dev->name);
+ efx_mtd_rename(efx);
+ efx_set_channel_names(efx);
+}
+
+static int efx_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *net_dev = ptr;
+
+ if (net_dev->netdev_ops == &efx_netdev_ops &&
+ event == NETDEV_CHANGENAME)
+ efx_update_name(netdev_priv(net_dev));
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block efx_netdev_notifier = {
+ .notifier_call = efx_netdev_event,
+};
+
+static ssize_t
+show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ return sprintf(buf, "%d\n", efx->phy_type);
+}
+static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
+
+static int efx_register_netdev(struct efx_nic *efx)
+{
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_channel *channel;
+ int rc;
+
+ net_dev->watchdog_timeo = 5 * HZ;
+ net_dev->irq = efx->pci_dev->irq;
+ net_dev->netdev_ops = &efx_netdev_ops;
+ SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
+
+ /* Clear MAC statistics */
+ efx->mac_op->update_stats(efx);
+ memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
+
+ rtnl_lock();
+
+ rc = dev_alloc_name(net_dev, net_dev->name);
+ if (rc < 0)
+ goto fail_locked;
+ efx_update_name(efx);
+
+ rc = register_netdevice(net_dev);
+ if (rc)
+ goto fail_locked;
+
+ efx_for_each_channel(channel, efx) {
+ struct efx_tx_queue *tx_queue;
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_init_tx_queue_core_txq(tx_queue);
+ }
+
+ /* Always start with carrier off; PHY events will detect the link */
+ netif_carrier_off(efx->net_dev);
+
+ rtnl_unlock();
+
+ rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to init net dev attributes\n");
+ goto fail_registered;
+ }
+
+ return 0;
+
+fail_locked:
+ rtnl_unlock();
+ netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
+ return rc;
+
+fail_registered:
+ unregister_netdev(net_dev);
+ return rc;
+}
+
+static void efx_unregister_netdev(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+
+ if (!efx->net_dev)
+ return;
+
+ BUG_ON(netdev_priv(efx->net_dev) != efx);
+
+ /* Free up any skbs still remaining. This has to happen before
+ * we try to unregister the netdev as running their destructors
+ * may be needed to get the device ref. count to 0. */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_release_tx_buffers(tx_queue);
+ }
+
+ if (efx_dev_registered(efx)) {
+ strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+ unregister_netdev(efx->net_dev);
+ }
+}
+
+/**************************************************************************
+ *
+ * Device reset and suspend
+ *
+ **************************************************************************/
+
+/* Tears down the entire software state and most of the hardware state
+ * before reset. */
+void efx_reset_down(struct efx_nic *efx, enum reset_type method)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ efx_stop_all(efx);
+ mutex_lock(&efx->mac_lock);
+
+ efx_fini_channels(efx);
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
+ efx->phy_op->fini(efx);
+ efx->type->fini(efx);
+}
+
+/* This function will always ensure that the locks acquired in
+ * efx_reset_down() are released. A failure return code indicates
+ * that we were unable to reinitialise the hardware, and the
+ * driver should be disabled. If ok is false, then the rx and tx
+ * engines are not restarted, pending a RESET_DISABLE. */
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
+{
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ rc = efx->type->init(efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
+ goto fail;
+ }
+
+ if (!ok)
+ goto fail;
+
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
+ rc = efx->phy_op->init(efx);
+ if (rc)
+ goto fail;
+ if (efx->phy_op->reconfigure(efx))
+ netif_err(efx, drv, efx->net_dev,
+ "could not restore PHY settings\n");
+ }
+
+ efx->mac_op->reconfigure(efx);
+
+ efx_init_channels(efx);
+ efx_restore_filters(efx);
+
+ mutex_unlock(&efx->mac_lock);
+
+ efx_start_all(efx);
+
+ return 0;
+
+fail:
+ efx->port_initialized = false;
+
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+/* Reset the NIC using the specified method. Note that the reset may
+ * fail, in which case the card will be left in an unusable state.
+ *
+ * Caller must hold the rtnl_lock.
+ */
+int efx_reset(struct efx_nic *efx, enum reset_type method)
+{
+ int rc, rc2;
+ bool disabled;
+
+ netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
+ RESET_TYPE(method));
+
+ netif_device_detach(efx->net_dev);
+ efx_reset_down(efx, method);
+
+ rc = efx->type->reset(efx, method);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
+ goto out;
+ }
+
+ /* Clear flags for the scopes we covered. We assume the NIC and
+ * driver are now quiescent so that there is no race here.
+ */
+ efx->reset_pending &= -(1 << (method + 1));
+
+ /* Reinitialise bus-mastering, which may have been turned off before
+ * the reset was scheduled. This is still appropriate, even in the
+ * RESET_TYPE_DISABLE since this driver generally assumes the hardware
+ * can respond to requests. */
+ pci_set_master(efx->pci_dev);
+
+out:
+ /* Leave device stopped if necessary */
+ disabled = rc || method == RESET_TYPE_DISABLE;
+ rc2 = efx_reset_up(efx, method, !disabled);
+ if (rc2) {
+ disabled = true;
+ if (!rc)
+ rc = rc2;
+ }
+
+ if (disabled) {
+ dev_close(efx->net_dev);
+ netif_err(efx, drv, efx->net_dev, "has been disabled\n");
+ efx->state = STATE_DISABLED;
+ } else {
+ netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
+ netif_device_attach(efx->net_dev);
+ }
+ return rc;
+}
+
+/* The worker thread exists so that code that cannot sleep can
+ * schedule a reset for later.
+ */
+static void efx_reset_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
+ unsigned long pending = ACCESS_ONCE(efx->reset_pending);
+
+ if (!pending)
+ return;
+
+ /* If we're not RUNNING then don't reset. Leave the reset_pending
+ * flags set so that efx_pci_probe_main will be retried */
+ if (efx->state != STATE_RUNNING) {
+ netif_info(efx, drv, efx->net_dev,
+ "scheduled reset quenched. NIC not RUNNING\n");
+ return;
+ }
+
+ rtnl_lock();
+ (void)efx_reset(efx, fls(pending) - 1);
+ rtnl_unlock();
+}
+
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
+{
+ enum reset_type method;
+
+ switch (type) {
+ case RESET_TYPE_INVISIBLE:
+ case RESET_TYPE_ALL:
+ case RESET_TYPE_WORLD:
+ case RESET_TYPE_DISABLE:
+ method = type;
+ netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
+ RESET_TYPE(method));
+ break;
+ default:
+ method = efx->type->map_reset_reason(type);
+ netif_dbg(efx, drv, efx->net_dev,
+ "scheduling %s reset for %s\n",
+ RESET_TYPE(method), RESET_TYPE(type));
+ break;
+ }
+
+ set_bit(method, &efx->reset_pending);
+
+ /* efx_process_channel() will no longer read events once a
+ * reset is scheduled. So switch back to poll'd MCDI completions. */
+ efx_mcdi_mode_poll(efx);
+
+ queue_work(reset_workqueue, &efx->reset_work);
+}
+
+/**************************************************************************
+ *
+ * List of NICs we support
+ *
+ **************************************************************************/
+
+/* PCI device ID table */
+static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
+ {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
+ .driver_data = (unsigned long) &falcon_a1_nic_type},
+ {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
+ .driver_data = (unsigned long) &falcon_b0_nic_type},
+ {PCI_DEVICE(EFX_VENDID_SFC, BETHPAGE_A_P_DEVID),
+ .driver_data = (unsigned long) &siena_a0_nic_type},
+ {PCI_DEVICE(EFX_VENDID_SFC, SIENA_A_P_DEVID),
+ .driver_data = (unsigned long) &siena_a0_nic_type},
+ {0} /* end of list */
+};
+
+/**************************************************************************
+ *
+ * Dummy PHY/MAC operations
+ *
+ * Can be used for some unimplemented operations
+ * Needed so all function pointers are valid and do not have to be tested
+ * before use
+ *
+ **************************************************************************/
+int efx_port_dummy_op_int(struct efx_nic *efx)
+{
+ return 0;
+}
+void efx_port_dummy_op_void(struct efx_nic *efx) {}
+
+static bool efx_port_dummy_op_poll(struct efx_nic *efx)
+{
+ return false;
+}
+
+static const struct efx_phy_operations efx_dummy_phy_operations = {
+ .init = efx_port_dummy_op_int,
+ .reconfigure = efx_port_dummy_op_int,
+ .poll = efx_port_dummy_op_poll,
+ .fini = efx_port_dummy_op_void,
+};
+
+/**************************************************************************
+ *
+ * Data housekeeping
+ *
+ **************************************************************************/
+
+/* This zeroes out and then fills in the invariants in a struct
+ * efx_nic (including all sub-structures).
+ */
+static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
+ struct pci_dev *pci_dev, struct net_device *net_dev)
+{
+ int i;
+
+ /* Initialise common structures */
+ memset(efx, 0, sizeof(*efx));
+ spin_lock_init(&efx->biu_lock);
+#ifdef CONFIG_SFC_MTD
+ INIT_LIST_HEAD(&efx->mtd_list);
+#endif
+ INIT_WORK(&efx->reset_work, efx_reset_work);
+ INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
+ efx->pci_dev = pci_dev;
+ efx->msg_enable = debug;
+ efx->state = STATE_INIT;
+ strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
+
+ efx->net_dev = net_dev;
+ spin_lock_init(&efx->stats_lock);
+ mutex_init(&efx->mac_lock);
+ efx->mac_op = type->default_mac_ops;
+ efx->phy_op = &efx_dummy_phy_operations;
+ efx->mdio.dev = net_dev;
+ INIT_WORK(&efx->mac_work, efx_mac_work);
+
+ for (i = 0; i < EFX_MAX_CHANNELS; i++) {
+ efx->channel[i] = efx_alloc_channel(efx, i, NULL);
+ if (!efx->channel[i])
+ goto fail;
+ }
+
+ efx->type = type;
+
+ EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
+
+ /* Higher numbered interrupt modes are less capable! */
+ efx->interrupt_mode = max(efx->type->max_interrupt_mode,
+ interrupt_mode);
+
+ /* Would be good to use the net_dev name, but we're too early */
+ snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
+ pci_name(pci_dev));
+ efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
+ if (!efx->workqueue)
+ goto fail;
+
+ return 0;
+
+fail:
+ efx_fini_struct(efx);
+ return -ENOMEM;
+}
+
+static void efx_fini_struct(struct efx_nic *efx)
+{
+ int i;
+
+ for (i = 0; i < EFX_MAX_CHANNELS; i++)
+ kfree(efx->channel[i]);
+
+ if (efx->workqueue) {
+ destroy_workqueue(efx->workqueue);
+ efx->workqueue = NULL;
+ }
+}
+
+/**************************************************************************
+ *
+ * PCI interface
+ *
+ **************************************************************************/
+
+/* Main body of final NIC shutdown code
+ * This is called only at module unload (or hotplug removal).
+ */
+static void efx_pci_remove_main(struct efx_nic *efx)
+{
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+ efx->net_dev->rx_cpu_rmap = NULL;
+#endif
+ efx_nic_fini_interrupt(efx);
+ efx_fini_channels(efx);
+ efx_fini_port(efx);
+ efx->type->fini(efx);
+ efx_fini_napi(efx);
+ efx_remove_all(efx);
+}
+
+/* Final NIC shutdown
+ * This is called only at module unload (or hotplug removal).
+ */
+static void efx_pci_remove(struct pci_dev *pci_dev)
+{
+ struct efx_nic *efx;
+
+ efx = pci_get_drvdata(pci_dev);
+ if (!efx)
+ return;
+
+ /* Mark the NIC as fini, then stop the interface */
+ rtnl_lock();
+ efx->state = STATE_FINI;
+ dev_close(efx->net_dev);
+
+ /* Allow any queued efx_resets() to complete */
+ rtnl_unlock();
+
+ efx_unregister_netdev(efx);
+
+ efx_mtd_remove(efx);
+
+ /* Wait for any scheduled resets to complete. No more will be
+ * scheduled from this point because efx_stop_all() has been
+ * called, we are no longer registered with driverlink, and
+ * the net_device's have been removed. */
+ cancel_work_sync(&efx->reset_work);
+
+ efx_pci_remove_main(efx);
+
+ efx_fini_io(efx);
+ netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
+
+ pci_set_drvdata(pci_dev, NULL);
+ efx_fini_struct(efx);
+ free_netdev(efx->net_dev);
+};
+
+/* Main body of NIC initialisation
+ * This is called at module load (or hotplug insertion, theoretically).
+ */
+static int efx_pci_probe_main(struct efx_nic *efx)
+{
+ int rc;
+
+ /* Do start-of-day initialisation */
+ rc = efx_probe_all(efx);
+ if (rc)
+ goto fail1;
+
+ efx_init_napi(efx);
+
+ rc = efx->type->init(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to initialise NIC\n");
+ goto fail3;
+ }
+
+ rc = efx_init_port(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to initialise port\n");
+ goto fail4;
+ }
+
+ efx_init_channels(efx);
+
+ rc = efx_nic_init_interrupt(efx);
+ if (rc)
+ goto fail5;
+
+ return 0;
+
+ fail5:
+ efx_fini_channels(efx);
+ efx_fini_port(efx);
+ fail4:
+ efx->type->fini(efx);
+ fail3:
+ efx_fini_napi(efx);
+ efx_remove_all(efx);
+ fail1:
+ return rc;
+}
+
+/* NIC initialisation
+ *
+ * This is called at module load (or hotplug insertion,
+ * theoretically). It sets up PCI mappings, tests and resets the NIC,
+ * sets up and registers the network devices with the kernel and hooks
+ * the interrupt service routine. It does not prepare the device for
+ * transmission; this is left to the first time one of the network
+ * interfaces is brought up (i.e. efx_net_open).
+ */
+static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *entry)
+{
+ const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
+ struct net_device *net_dev;
+ struct efx_nic *efx;
+ int i, rc;
+
+ /* Allocate and initialise a struct net_device and struct efx_nic */
+ net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
+ EFX_MAX_RX_QUEUES);
+ if (!net_dev)
+ return -ENOMEM;
+ net_dev->features |= (type->offload_features | NETIF_F_SG |
+ NETIF_F_HIGHDMA | NETIF_F_TSO |
+ NETIF_F_RXCSUM);
+ if (type->offload_features & NETIF_F_V6_CSUM)
+ net_dev->features |= NETIF_F_TSO6;
+ /* Mask for features that also apply to VLAN devices */
+ net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
+ NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
+ NETIF_F_RXCSUM);
+ /* All offloads can be toggled */
+ net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
+ efx = netdev_priv(net_dev);
+ pci_set_drvdata(pci_dev, efx);
+ SET_NETDEV_DEV(net_dev, &pci_dev->dev);
+ rc = efx_init_struct(efx, type, pci_dev, net_dev);
+ if (rc)
+ goto fail1;
+
+ netif_info(efx, probe, efx->net_dev,
+ "Solarflare NIC detected\n");
+
+ /* Set up basic I/O (BAR mappings etc) */
+ rc = efx_init_io(efx);
+ if (rc)
+ goto fail2;
+
+ /* No serialisation is required with the reset path because
+ * we're in STATE_INIT. */
+ for (i = 0; i < 5; i++) {
+ rc = efx_pci_probe_main(efx);
+
+ /* Serialise against efx_reset(). No more resets will be
+ * scheduled since efx_stop_all() has been called, and we
+ * have not and never have been registered with either
+ * the rtnetlink or driverlink layers. */
+ cancel_work_sync(&efx->reset_work);
+
+ if (rc == 0) {
+ if (efx->reset_pending) {
+ /* If there was a scheduled reset during
+ * probe, the NIC is probably hosed anyway */
+ efx_pci_remove_main(efx);
+ rc = -EIO;
+ } else {
+ break;
+ }
+ }
+
+ /* Retry if a recoverably reset event has been scheduled */
+ if (efx->reset_pending &
+ ~(1 << RESET_TYPE_INVISIBLE | 1 << RESET_TYPE_ALL) ||
+ !efx->reset_pending)
+ goto fail3;
+
+ efx->reset_pending = 0;
+ }
+
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n");
+ goto fail4;
+ }
+
+ /* Switch to the running state before we expose the device to the OS,
+ * so that dev_open()|efx_start_all() will actually start the device */
+ efx->state = STATE_RUNNING;
+
+ rc = efx_register_netdev(efx);
+ if (rc)
+ goto fail5;
+
+ netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
+
+ rtnl_lock();
+ efx_mtd_probe(efx); /* allowed to fail */
+ rtnl_unlock();
+ return 0;
+
+ fail5:
+ efx_pci_remove_main(efx);
+ fail4:
+ fail3:
+ efx_fini_io(efx);
+ fail2:
+ efx_fini_struct(efx);
+ fail1:
+ WARN_ON(rc > 0);
+ netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
+ free_netdev(net_dev);
+ return rc;
+}
+
+static int efx_pm_freeze(struct device *dev)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+ efx->state = STATE_FINI;
+
+ netif_device_detach(efx->net_dev);
+
+ efx_stop_all(efx);
+ efx_fini_channels(efx);
+
+ return 0;
+}
+
+static int efx_pm_thaw(struct device *dev)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+ efx->state = STATE_INIT;
+
+ efx_init_channels(efx);
+
+ mutex_lock(&efx->mac_lock);
+ efx->phy_op->reconfigure(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ efx_start_all(efx);
+
+ netif_device_attach(efx->net_dev);
+
+ efx->state = STATE_RUNNING;
+
+ efx->type->resume_wol(efx);
+
+ /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
+ queue_work(reset_workqueue, &efx->reset_work);
+
+ return 0;
+}
+
+static int efx_pm_poweroff(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct efx_nic *efx = pci_get_drvdata(pci_dev);
+
+ efx->type->fini(efx);
+
+ efx->reset_pending = 0;
+
+ pci_save_state(pci_dev);
+ return pci_set_power_state(pci_dev, PCI_D3hot);
+}
+
+/* Used for both resume and restore */
+static int efx_pm_resume(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct efx_nic *efx = pci_get_drvdata(pci_dev);
+ int rc;
+
+ rc = pci_set_power_state(pci_dev, PCI_D0);
+ if (rc)
+ return rc;
+ pci_restore_state(pci_dev);
+ rc = pci_enable_device(pci_dev);
+ if (rc)
+ return rc;
+ pci_set_master(efx->pci_dev);
+ rc = efx->type->reset(efx, RESET_TYPE_ALL);
+ if (rc)
+ return rc;
+ rc = efx->type->init(efx);
+ if (rc)
+ return rc;
+ efx_pm_thaw(dev);
+ return 0;
+}
+
+static int efx_pm_suspend(struct device *dev)
+{
+ int rc;
+
+ efx_pm_freeze(dev);
+ rc = efx_pm_poweroff(dev);
+ if (rc)
+ efx_pm_resume(dev);
+ return rc;
+}
+
+static struct dev_pm_ops efx_pm_ops = {
+ .suspend = efx_pm_suspend,
+ .resume = efx_pm_resume,
+ .freeze = efx_pm_freeze,
+ .thaw = efx_pm_thaw,
+ .poweroff = efx_pm_poweroff,
+ .restore = efx_pm_resume,
+};
+
+static struct pci_driver efx_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = efx_pci_table,
+ .probe = efx_pci_probe,
+ .remove = efx_pci_remove,
+ .driver.pm = &efx_pm_ops,
+};
+
+/**************************************************************************
+ *
+ * Kernel module interface
+ *
+ *************************************************************************/
+
+module_param(interrupt_mode, uint, 0444);
+MODULE_PARM_DESC(interrupt_mode,
+ "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
+
+static int __init efx_init_module(void)
+{
+ int rc;
+
+ printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
+
+ rc = register_netdevice_notifier(&efx_netdev_notifier);
+ if (rc)
+ goto err_notifier;
+
+ reset_workqueue = create_singlethread_workqueue("sfc_reset");
+ if (!reset_workqueue) {
+ rc = -ENOMEM;
+ goto err_reset;
+ }
+
+ rc = pci_register_driver(&efx_pci_driver);
+ if (rc < 0)
+ goto err_pci;
+
+ return 0;
+
+ err_pci:
+ destroy_workqueue(reset_workqueue);
+ err_reset:
+ unregister_netdevice_notifier(&efx_netdev_notifier);
+ err_notifier:
+ return rc;
+}
+
+static void __exit efx_exit_module(void)
+{
+ printk(KERN_INFO "Solarflare NET driver unloading\n");
+
+ pci_unregister_driver(&efx_pci_driver);
+ destroy_workqueue(reset_workqueue);
+ unregister_netdevice_notifier(&efx_netdev_notifier);
+
+}
+
+module_init(efx_init_module);
+module_exit(efx_exit_module);
+
+MODULE_AUTHOR("Solarflare Communications and "
+ "Michael Brown <mbrown@fensystems.co.uk>");
+MODULE_DESCRIPTION("Solarflare Communications network driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, efx_pci_table);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
new file mode 100644
index 000000000000..442f4d0c247d
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -0,0 +1,150 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_EFX_H
+#define EFX_EFX_H
+
+#include "net_driver.h"
+#include "filter.h"
+
+/* PCI IDs */
+#define EFX_VENDID_SFC 0x1924
+#define FALCON_A_P_DEVID 0x0703
+#define FALCON_A_S_DEVID 0x6703
+#define FALCON_B_P_DEVID 0x0710
+#define BETHPAGE_A_P_DEVID 0x0803
+#define SIENA_A_P_DEVID 0x0813
+
+/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
+#define EFX_MEM_BAR 2
+
+/* TX */
+extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
+extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
+extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
+extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
+extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
+extern netdev_tx_t
+efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
+extern netdev_tx_t
+efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
+
+/* RX */
+extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
+extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
+extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
+extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
+extern void efx_rx_strategy(struct efx_channel *channel);
+extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
+extern void efx_rx_slow_fill(unsigned long context);
+extern void __efx_rx_packet(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf, bool checksummed);
+extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+ unsigned int len, bool checksummed, bool discard);
+extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
+
+#define EFX_MAX_DMAQ_SIZE 4096UL
+#define EFX_DEFAULT_DMAQ_SIZE 1024UL
+#define EFX_MIN_DMAQ_SIZE 512UL
+
+#define EFX_MAX_EVQ_SIZE 16384UL
+#define EFX_MIN_EVQ_SIZE 512UL
+
+/* The smallest [rt]xq_entries that the driver supports. Callers of
+ * efx_wake_queue() assume that they can subsequently send at least one
+ * skb. Falcon/A1 may require up to three descriptors per skb_frag. */
+#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS))
+
+/* Filters */
+extern int efx_probe_filters(struct efx_nic *efx);
+extern void efx_restore_filters(struct efx_nic *efx);
+extern void efx_remove_filters(struct efx_nic *efx);
+extern int efx_filter_insert_filter(struct efx_nic *efx,
+ struct efx_filter_spec *spec,
+ bool replace);
+extern int efx_filter_remove_filter(struct efx_nic *efx,
+ struct efx_filter_spec *spec);
+extern void efx_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+#ifdef CONFIG_RFS_ACCEL
+extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id);
+extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
+static inline void efx_filter_rfs_expire(struct efx_channel *channel)
+{
+ if (channel->rfs_filters_added >= 60 &&
+ __efx_filter_rfs_expire(channel->efx, 100))
+ channel->rfs_filters_added -= 60;
+}
+#define efx_filter_rfs_enabled() 1
+#else
+static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
+#define efx_filter_rfs_enabled() 0
+#endif
+
+/* Channels */
+extern void efx_process_channel_now(struct efx_channel *channel);
+extern int
+efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
+
+/* Ports */
+extern int efx_reconfigure_port(struct efx_nic *efx);
+extern int __efx_reconfigure_port(struct efx_nic *efx);
+
+/* Ethtool support */
+extern const struct ethtool_ops efx_ethtool_ops;
+
+/* Reset handling */
+extern int efx_reset(struct efx_nic *efx, enum reset_type method);
+extern void efx_reset_down(struct efx_nic *efx, enum reset_type method);
+extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
+
+/* Global */
+extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
+extern int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
+ unsigned int rx_usecs, bool rx_adaptive,
+ bool rx_may_override_tx);
+extern void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
+ unsigned int *rx_usecs, bool *rx_adaptive);
+
+/* Dummy PHY ops for PHY drivers */
+extern int efx_port_dummy_op_int(struct efx_nic *efx);
+extern void efx_port_dummy_op_void(struct efx_nic *efx);
+
+
+/* MTD */
+#ifdef CONFIG_SFC_MTD
+extern int efx_mtd_probe(struct efx_nic *efx);
+extern void efx_mtd_rename(struct efx_nic *efx);
+extern void efx_mtd_remove(struct efx_nic *efx);
+#else
+static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
+static inline void efx_mtd_rename(struct efx_nic *efx) {}
+static inline void efx_mtd_remove(struct efx_nic *efx) {}
+#endif
+
+static inline void efx_schedule_channel(struct efx_channel *channel)
+{
+ netif_vdbg(channel->efx, intr, channel->efx->net_dev,
+ "channel %d scheduling NAPI poll on CPU%d\n",
+ channel->channel, raw_smp_processor_id());
+ channel->work_pending = true;
+
+ napi_schedule(&channel->napi_str);
+}
+
+extern void efx_link_status_changed(struct efx_nic *efx);
+extern void efx_link_set_advertising(struct efx_nic *efx, u32);
+extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
+
+#endif /* EFX_EFX_H */
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
new file mode 100644
index 000000000000..d725a8fbe1a6
--- /dev/null
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -0,0 +1,167 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2007-2009 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_ENUM_H
+#define EFX_ENUM_H
+
+/**
+ * enum efx_loopback_mode - loopback modes
+ * @LOOPBACK_NONE: no loopback
+ * @LOOPBACK_DATA: data path loopback
+ * @LOOPBACK_GMAC: loopback within GMAC
+ * @LOOPBACK_XGMII: loopback after XMAC
+ * @LOOPBACK_XGXS: loopback within BPX after XGXS
+ * @LOOPBACK_XAUI: loopback within BPX before XAUI serdes
+ * @LOOPBACK_GMII: loopback within BPX after GMAC
+ * @LOOPBACK_SGMII: loopback within BPX within SGMII
+ * @LOOPBACK_XGBR: loopback within BPX within XGBR
+ * @LOOPBACK_XFI: loopback within BPX before XFI serdes
+ * @LOOPBACK_XAUI_FAR: loopback within BPX after XAUI serdes
+ * @LOOPBACK_GMII_FAR: loopback within BPX before SGMII
+ * @LOOPBACK_SGMII_FAR: loopback within BPX after SGMII
+ * @LOOPBACK_XFI_FAR: loopback after XFI serdes
+ * @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level
+ * @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level
+ * @LOOPBACK_PCS: loopback within 10G PHY at PCS level
+ * @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level
+ * @LOOPBACK_XPORT: cross port loopback
+ * @LOOPBACK_XGMII_WS: wireside loopback excluding XMAC
+ * @LOOPBACK_XAUI_WS: wireside loopback within BPX within XAUI serdes
+ * @LOOPBACK_XAUI_WS_FAR: wireside loopback within BPX including XAUI serdes
+ * @LOOPBACK_XAUI_WS_NEAR: wireside loopback within BPX excluding XAUI serdes
+ * @LOOPBACK_GMII_WS: wireside loopback excluding GMAC
+ * @LOOPBACK_XFI_WS: wireside loopback excluding XFI serdes
+ * @LOOPBACK_XFI_WS_FAR: wireside loopback including XFI serdes
+ * @LOOPBACK_PHYXS_WS: wireside loopback within 10G PHY at PHYXS level
+ */
+/* Please keep up-to-date w.r.t the following two #defines */
+enum efx_loopback_mode {
+ LOOPBACK_NONE = 0,
+ LOOPBACK_DATA = 1,
+ LOOPBACK_GMAC = 2,
+ LOOPBACK_XGMII = 3,
+ LOOPBACK_XGXS = 4,
+ LOOPBACK_XAUI = 5,
+ LOOPBACK_GMII = 6,
+ LOOPBACK_SGMII = 7,
+ LOOPBACK_XGBR = 8,
+ LOOPBACK_XFI = 9,
+ LOOPBACK_XAUI_FAR = 10,
+ LOOPBACK_GMII_FAR = 11,
+ LOOPBACK_SGMII_FAR = 12,
+ LOOPBACK_XFI_FAR = 13,
+ LOOPBACK_GPHY = 14,
+ LOOPBACK_PHYXS = 15,
+ LOOPBACK_PCS = 16,
+ LOOPBACK_PMAPMD = 17,
+ LOOPBACK_XPORT = 18,
+ LOOPBACK_XGMII_WS = 19,
+ LOOPBACK_XAUI_WS = 20,
+ LOOPBACK_XAUI_WS_FAR = 21,
+ LOOPBACK_XAUI_WS_NEAR = 22,
+ LOOPBACK_GMII_WS = 23,
+ LOOPBACK_XFI_WS = 24,
+ LOOPBACK_XFI_WS_FAR = 25,
+ LOOPBACK_PHYXS_WS = 26,
+ LOOPBACK_MAX
+};
+#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD
+
+/* These loopbacks occur within the controller */
+#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_DATA) | \
+ (1 << LOOPBACK_GMAC) | \
+ (1 << LOOPBACK_XGMII)| \
+ (1 << LOOPBACK_XGXS) | \
+ (1 << LOOPBACK_XAUI) | \
+ (1 << LOOPBACK_GMII) | \
+ (1 << LOOPBACK_SGMII) | \
+ (1 << LOOPBACK_SGMII) | \
+ (1 << LOOPBACK_XGBR) | \
+ (1 << LOOPBACK_XFI) | \
+ (1 << LOOPBACK_XAUI_FAR) | \
+ (1 << LOOPBACK_GMII_FAR) | \
+ (1 << LOOPBACK_SGMII_FAR) | \
+ (1 << LOOPBACK_XFI_FAR) | \
+ (1 << LOOPBACK_XGMII_WS) | \
+ (1 << LOOPBACK_XAUI_WS) | \
+ (1 << LOOPBACK_XAUI_WS_FAR) | \
+ (1 << LOOPBACK_XAUI_WS_NEAR) | \
+ (1 << LOOPBACK_GMII_WS) | \
+ (1 << LOOPBACK_XFI_WS) | \
+ (1 << LOOPBACK_XFI_WS_FAR))
+
+#define LOOPBACKS_WS ((1 << LOOPBACK_XGMII_WS) | \
+ (1 << LOOPBACK_XAUI_WS) | \
+ (1 << LOOPBACK_XAUI_WS_FAR) | \
+ (1 << LOOPBACK_XAUI_WS_NEAR) | \
+ (1 << LOOPBACK_GMII_WS) | \
+ (1 << LOOPBACK_XFI_WS) | \
+ (1 << LOOPBACK_XFI_WS_FAR) | \
+ (1 << LOOPBACK_PHYXS_WS))
+
+#define LOOPBACKS_EXTERNAL(_efx) \
+ ((_efx)->loopback_modes & ~LOOPBACKS_INTERNAL & \
+ ~(1 << LOOPBACK_NONE))
+
+#define LOOPBACK_MASK(_efx) \
+ (1 << (_efx)->loopback_mode)
+
+#define LOOPBACK_INTERNAL(_efx) \
+ (!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx)))
+
+#define LOOPBACK_EXTERNAL(_efx) \
+ (!!(LOOPBACK_MASK(_efx) & LOOPBACKS_EXTERNAL(_efx)))
+
+#define LOOPBACK_CHANGED(_from, _to, _mask) \
+ (!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask)))
+
+#define LOOPBACK_OUT_OF(_from, _to, _mask) \
+ ((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask)))
+
+/*****************************************************************************/
+
+/**
+ * enum reset_type - reset types
+ *
+ * %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and
+ * %RESET_TYPE_DISABLE specify the method/scope of the reset. The
+ * other valuesspecify reasons, which efx_schedule_reset() will choose
+ * a method for.
+ *
+ * Reset methods are numbered in order of increasing scope.
+ *
+ * @RESET_TYPE_INVISIBLE: don't reset the PHYs or interrupts
+ * @RESET_TYPE_ALL: reset everything but PCI core blocks
+ * @RESET_TYPE_WORLD: reset everything, save & restore PCI config
+ * @RESET_TYPE_DISABLE: disable NIC
+ * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
+ * @RESET_TYPE_INT_ERROR: reset due to internal error
+ * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
+ * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch
+ * @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch
+ * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
+ * @RESET_TYPE_MC_FAILURE: MC reboot/assertion
+ */
+enum reset_type {
+ RESET_TYPE_INVISIBLE = 0,
+ RESET_TYPE_ALL = 1,
+ RESET_TYPE_WORLD = 2,
+ RESET_TYPE_DISABLE = 3,
+ RESET_TYPE_MAX_METHOD,
+ RESET_TYPE_TX_WATCHDOG,
+ RESET_TYPE_INT_ERROR,
+ RESET_TYPE_RX_RECOVERY,
+ RESET_TYPE_RX_DESC_FETCH,
+ RESET_TYPE_TX_DESC_FETCH,
+ RESET_TYPE_TX_SKIP,
+ RESET_TYPE_MC_FAILURE,
+ RESET_TYPE_MAX,
+};
+
+#endif /* EFX_ENUM_H */
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
new file mode 100644
index 000000000000..9536925f5bdd
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -0,0 +1,1029 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+#include <linux/in.h>
+#include "net_driver.h"
+#include "workarounds.h"
+#include "selftest.h"
+#include "efx.h"
+#include "filter.h"
+#include "nic.h"
+
+struct ethtool_string {
+ char name[ETH_GSTRING_LEN];
+};
+
+struct efx_ethtool_stat {
+ const char *name;
+ enum {
+ EFX_ETHTOOL_STAT_SOURCE_mac_stats,
+ EFX_ETHTOOL_STAT_SOURCE_nic,
+ EFX_ETHTOOL_STAT_SOURCE_channel,
+ EFX_ETHTOOL_STAT_SOURCE_tx_queue
+ } source;
+ unsigned offset;
+ u64(*get_stat) (void *field); /* Reader function */
+};
+
+/* Initialiser for a struct #efx_ethtool_stat with type-checking */
+#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
+ get_stat_function) { \
+ .name = #stat_name, \
+ .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
+ .offset = ((((field_type *) 0) == \
+ &((struct efx_##source_name *)0)->field) ? \
+ offsetof(struct efx_##source_name, field) : \
+ offsetof(struct efx_##source_name, field)), \
+ .get_stat = get_stat_function, \
+}
+
+static u64 efx_get_uint_stat(void *field)
+{
+ return *(unsigned int *)field;
+}
+
+static u64 efx_get_ulong_stat(void *field)
+{
+ return *(unsigned long *)field;
+}
+
+static u64 efx_get_u64_stat(void *field)
+{
+ return *(u64 *) field;
+}
+
+static u64 efx_get_atomic_stat(void *field)
+{
+ return atomic_read((atomic_t *) field);
+}
+
+#define EFX_ETHTOOL_ULONG_MAC_STAT(field) \
+ EFX_ETHTOOL_STAT(field, mac_stats, field, \
+ unsigned long, efx_get_ulong_stat)
+
+#define EFX_ETHTOOL_U64_MAC_STAT(field) \
+ EFX_ETHTOOL_STAT(field, mac_stats, field, \
+ u64, efx_get_u64_stat)
+
+#define EFX_ETHTOOL_UINT_NIC_STAT(name) \
+ EFX_ETHTOOL_STAT(name, nic, n_##name, \
+ unsigned int, efx_get_uint_stat)
+
+#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
+ EFX_ETHTOOL_STAT(field, nic, field, \
+ atomic_t, efx_get_atomic_stat)
+
+#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
+ EFX_ETHTOOL_STAT(field, channel, n_##field, \
+ unsigned int, efx_get_uint_stat)
+
+#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
+ EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
+ unsigned int, efx_get_uint_stat)
+
+static struct efx_ethtool_stat efx_ethtool_stats[] = {
+ EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
+ EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_packets),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_bad),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_pause),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_control),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_unicast),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_multicast),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_broadcast),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_lt64),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_64),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_65_to_127),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_128_to_255),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_256_to_511),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_512_to_1023),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_1024_to_15xx),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_15xx_to_jumbo),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_gtjumbo),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_collision),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_single_collision),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_multiple_collision),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_collision),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_deferred),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_late_collision),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_deferred),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error),
+ EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
+ EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
+ EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_packets),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_good),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_pause),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_control),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_unicast),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_multicast),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_broadcast),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_lt64),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_64),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_65_to_127),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_128_to_255),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_256_to_511),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_512_to_1023),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_1024_to_15xx),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_15xx_to_jumbo),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_gtjumbo),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_lt64),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_64_to_15xx),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_15xx_to_jumbo),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_gtjumbo),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_overflow),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_missed),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_false_carrier),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_symbol_error),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_align_error),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_length_error),
+ EFX_ETHTOOL_ULONG_MAC_STAT(rx_internal_error),
+ EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt),
+ EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
+};
+
+/* Number of ethtool statistics */
+#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
+
+#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
+
+/**************************************************************************
+ *
+ * Ethtool operations
+ *
+ **************************************************************************
+ */
+
+/* Identify device by flashing LEDs */
+static int efx_ethtool_phys_id(struct net_device *net_dev,
+ enum ethtool_phys_id_state state)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ enum efx_led_mode mode = EFX_LED_DEFAULT;
+
+ switch (state) {
+ case ETHTOOL_ID_ON:
+ mode = EFX_LED_ON;
+ break;
+ case ETHTOOL_ID_OFF:
+ mode = EFX_LED_OFF;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ mode = EFX_LED_DEFAULT;
+ break;
+ case ETHTOOL_ID_ACTIVE:
+ return 1; /* cycle on/off once per second */
+ }
+
+ efx->type->set_id_led(efx, mode);
+ return 0;
+}
+
+/* This must be called with rtnl_lock held. */
+static int efx_ethtool_get_settings(struct net_device *net_dev,
+ struct ethtool_cmd *ecmd)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_link_state *link_state = &efx->link_state;
+
+ mutex_lock(&efx->mac_lock);
+ efx->phy_op->get_settings(efx, ecmd);
+ mutex_unlock(&efx->mac_lock);
+
+ /* GMAC does not support 1000Mbps HD */
+ ecmd->supported &= ~SUPPORTED_1000baseT_Half;
+ /* Both MACs support pause frames (bidirectional and respond-only) */
+ ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
+ if (LOOPBACK_INTERNAL(efx)) {
+ ethtool_cmd_speed_set(ecmd, link_state->speed);
+ ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
+ }
+
+ return 0;
+}
+
+/* This must be called with rtnl_lock held. */
+static int efx_ethtool_set_settings(struct net_device *net_dev,
+ struct ethtool_cmd *ecmd)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ /* GMAC does not support 1000Mbps HD */
+ if ((ethtool_cmd_speed(ecmd) == SPEED_1000) &&
+ (ecmd->duplex != DUPLEX_FULL)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "rejecting unsupported 1000Mbps HD setting\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&efx->mac_lock);
+ rc = efx->phy_op->set_settings(efx, ecmd);
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
+
+static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *info)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ efx_mcdi_print_fwver(efx, info->fw_version,
+ sizeof(info->fw_version));
+ strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
+}
+
+static int efx_ethtool_get_regs_len(struct net_device *net_dev)
+{
+ return efx_nic_get_regs_len(netdev_priv(net_dev));
+}
+
+static void efx_ethtool_get_regs(struct net_device *net_dev,
+ struct ethtool_regs *regs, void *buf)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ regs->version = efx->type->revision;
+ efx_nic_get_regs(efx, buf);
+}
+
+static u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ return efx->msg_enable;
+}
+
+static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ efx->msg_enable = msg_enable;
+}
+
+/**
+ * efx_fill_test - fill in an individual self-test entry
+ * @test_index: Index of the test
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ * @test: Pointer to test result (used only if data != %NULL)
+ * @unit_format: Unit name format (e.g. "chan\%d")
+ * @unit_id: Unit id (e.g. 0 for "chan0")
+ * @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
+ * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
+ *
+ * Fill in an individual self-test entry.
+ */
+static void efx_fill_test(unsigned int test_index,
+ struct ethtool_string *strings, u64 *data,
+ int *test, const char *unit_format, int unit_id,
+ const char *test_format, const char *test_id)
+{
+ struct ethtool_string unit_str, test_str;
+
+ /* Fill data value, if applicable */
+ if (data)
+ data[test_index] = *test;
+
+ /* Fill string, if applicable */
+ if (strings) {
+ if (strchr(unit_format, '%'))
+ snprintf(unit_str.name, sizeof(unit_str.name),
+ unit_format, unit_id);
+ else
+ strcpy(unit_str.name, unit_format);
+ snprintf(test_str.name, sizeof(test_str.name),
+ test_format, test_id);
+ snprintf(strings[test_index].name,
+ sizeof(strings[test_index].name),
+ "%-6s %-24s", unit_str.name, test_str.name);
+ }
+}
+
+#define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
+#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
+#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
+#define EFX_LOOPBACK_NAME(_mode, _counter) \
+ "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
+
+/**
+ * efx_fill_loopback_test - fill in a block of loopback self-test entries
+ * @efx: Efx NIC
+ * @lb_tests: Efx loopback self-test results structure
+ * @mode: Loopback test mode
+ * @test_index: Starting index of the test
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ */
+static int efx_fill_loopback_test(struct efx_nic *efx,
+ struct efx_loopback_self_tests *lb_tests,
+ enum efx_loopback_mode mode,
+ unsigned int test_index,
+ struct ethtool_string *strings, u64 *data)
+{
+ struct efx_channel *channel = efx_get_channel(efx, 0);
+ struct efx_tx_queue *tx_queue;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->tx_sent[tx_queue->queue],
+ EFX_TX_QUEUE_NAME(tx_queue),
+ EFX_LOOPBACK_NAME(mode, "tx_sent"));
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->tx_done[tx_queue->queue],
+ EFX_TX_QUEUE_NAME(tx_queue),
+ EFX_LOOPBACK_NAME(mode, "tx_done"));
+ }
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->rx_good,
+ "rx", 0,
+ EFX_LOOPBACK_NAME(mode, "rx_good"));
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->rx_bad,
+ "rx", 0,
+ EFX_LOOPBACK_NAME(mode, "rx_bad"));
+
+ return test_index;
+}
+
+/**
+ * efx_ethtool_fill_self_tests - get self-test details
+ * @efx: Efx NIC
+ * @tests: Efx self-test results structure, or %NULL
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ */
+static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
+ struct efx_self_tests *tests,
+ struct ethtool_string *strings,
+ u64 *data)
+{
+ struct efx_channel *channel;
+ unsigned int n = 0, i;
+ enum efx_loopback_mode mode;
+
+ efx_fill_test(n++, strings, data, &tests->phy_alive,
+ "phy", 0, "alive", NULL);
+ efx_fill_test(n++, strings, data, &tests->nvram,
+ "core", 0, "nvram", NULL);
+ efx_fill_test(n++, strings, data, &tests->interrupt,
+ "core", 0, "interrupt", NULL);
+
+ /* Event queues */
+ efx_for_each_channel(channel, efx) {
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_dma[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.dma", NULL);
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_int[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.int", NULL);
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_poll[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.poll", NULL);
+ }
+
+ efx_fill_test(n++, strings, data, &tests->registers,
+ "core", 0, "registers", NULL);
+
+ if (efx->phy_op->run_tests != NULL) {
+ EFX_BUG_ON_PARANOID(efx->phy_op->test_name == NULL);
+
+ for (i = 0; true; ++i) {
+ const char *name;
+
+ EFX_BUG_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
+ name = efx->phy_op->test_name(efx, i);
+ if (name == NULL)
+ break;
+
+ efx_fill_test(n++, strings, data, &tests->phy_ext[i],
+ "phy", 0, name, NULL);
+ }
+ }
+
+ /* Loopback tests */
+ for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
+ if (!(efx->loopback_modes & (1 << mode)))
+ continue;
+ n = efx_fill_loopback_test(efx,
+ &tests->loopback[mode], mode, n,
+ strings, data);
+ }
+
+ return n;
+}
+
+static int efx_ethtool_get_sset_count(struct net_device *net_dev,
+ int string_set)
+{
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return EFX_ETHTOOL_NUM_STATS;
+ case ETH_SS_TEST:
+ return efx_ethtool_fill_self_tests(netdev_priv(net_dev),
+ NULL, NULL, NULL);
+ default:
+ return -EINVAL;
+ }
+}
+
+static void efx_ethtool_get_strings(struct net_device *net_dev,
+ u32 string_set, u8 *strings)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct ethtool_string *ethtool_strings =
+ (struct ethtool_string *)strings;
+ int i;
+
+ switch (string_set) {
+ case ETH_SS_STATS:
+ for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
+ strncpy(ethtool_strings[i].name,
+ efx_ethtool_stats[i].name,
+ sizeof(ethtool_strings[i].name));
+ break;
+ case ETH_SS_TEST:
+ efx_ethtool_fill_self_tests(efx, NULL,
+ ethtool_strings, NULL);
+ break;
+ default:
+ /* No other string sets */
+ break;
+ }
+}
+
+static void efx_ethtool_get_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_mac_stats *mac_stats = &efx->mac_stats;
+ struct efx_ethtool_stat *stat;
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct rtnl_link_stats64 temp;
+ int i;
+
+ EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
+
+ /* Update MAC and NIC statistics */
+ dev_get_stats(net_dev, &temp);
+
+ /* Fill detailed statistics buffer */
+ for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
+ stat = &efx_ethtool_stats[i];
+ switch (stat->source) {
+ case EFX_ETHTOOL_STAT_SOURCE_mac_stats:
+ data[i] = stat->get_stat((void *)mac_stats +
+ stat->offset);
+ break;
+ case EFX_ETHTOOL_STAT_SOURCE_nic:
+ data[i] = stat->get_stat((void *)efx + stat->offset);
+ break;
+ case EFX_ETHTOOL_STAT_SOURCE_channel:
+ data[i] = 0;
+ efx_for_each_channel(channel, efx)
+ data[i] += stat->get_stat((void *)channel +
+ stat->offset);
+ break;
+ case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
+ data[i] = 0;
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ data[i] +=
+ stat->get_stat((void *)tx_queue
+ + stat->offset);
+ }
+ break;
+ }
+ }
+}
+
+static void efx_ethtool_self_test(struct net_device *net_dev,
+ struct ethtool_test *test, u64 *data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_self_tests *efx_tests;
+ int already_up;
+ int rc = -ENOMEM;
+
+ efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
+ if (!efx_tests)
+ goto fail;
+
+
+ ASSERT_RTNL();
+ if (efx->state != STATE_RUNNING) {
+ rc = -EIO;
+ goto fail1;
+ }
+
+ netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
+ (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+
+ /* We need rx buffers and interrupts. */
+ already_up = (efx->net_dev->flags & IFF_UP);
+ if (!already_up) {
+ rc = dev_open(efx->net_dev);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed opening device.\n");
+ goto fail1;
+ }
+ }
+
+ rc = efx_selftest(efx, efx_tests, test->flags);
+
+ if (!already_up)
+ dev_close(efx->net_dev);
+
+ netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
+ rc == 0 ? "passed" : "failed",
+ (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+
+fail1:
+ /* Fill ethtool results structures */
+ efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
+ kfree(efx_tests);
+fail:
+ if (rc)
+ test->flags |= ETH_TEST_FL_FAILED;
+}
+
+/* Restart autonegotiation */
+static int efx_ethtool_nway_reset(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ return mdio45_nway_restart(&efx->mdio);
+}
+
+/*
+ * Each channel has a single IRQ and moderation timer, started by any
+ * completion (or other event). Unless the module parameter
+ * separate_tx_channels is set, IRQs and moderation are therefore
+ * shared between RX and TX completions. In this case, when RX IRQ
+ * moderation is explicitly changed then TX IRQ moderation is
+ * automatically changed too, but otherwise we fail if the two values
+ * are requested to be different.
+ *
+ * The hardware does not support a limit on the number of completions
+ * before an IRQ, so we do not use the max_frames fields. We should
+ * report and require that max_frames == (usecs != 0), but this would
+ * invalidate existing user documentation.
+ *
+ * The hardware does not have distinct settings for interrupt
+ * moderation while the previous IRQ is being handled, so we should
+ * not use the 'irq' fields. However, an earlier developer
+ * misunderstood the meaning of the 'irq' fields and the driver did
+ * not support the standard fields. To avoid invalidating existing
+ * user documentation, we report and accept changes through either the
+ * standard or 'irq' fields. If both are changed at the same time, we
+ * prefer the standard field.
+ *
+ * We implement adaptive IRQ moderation, but use a different algorithm
+ * from that assumed in the definition of struct ethtool_coalesce.
+ * Therefore we do not use any of the adaptive moderation parameters
+ * in it.
+ */
+
+static int efx_ethtool_get_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *coalesce)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ unsigned int tx_usecs, rx_usecs;
+ bool rx_adaptive;
+
+ efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive);
+
+ coalesce->tx_coalesce_usecs = tx_usecs;
+ coalesce->tx_coalesce_usecs_irq = tx_usecs;
+ coalesce->rx_coalesce_usecs = rx_usecs;
+ coalesce->rx_coalesce_usecs_irq = rx_usecs;
+ coalesce->use_adaptive_rx_coalesce = rx_adaptive;
+
+ return 0;
+}
+
+static int efx_ethtool_set_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *coalesce)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_channel *channel;
+ unsigned int tx_usecs, rx_usecs;
+ bool adaptive, rx_may_override_tx;
+ int rc;
+
+ if (coalesce->use_adaptive_tx_coalesce)
+ return -EINVAL;
+
+ efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
+
+ if (coalesce->rx_coalesce_usecs != rx_usecs)
+ rx_usecs = coalesce->rx_coalesce_usecs;
+ else
+ rx_usecs = coalesce->rx_coalesce_usecs_irq;
+
+ adaptive = coalesce->use_adaptive_rx_coalesce;
+
+ /* If channels are shared, TX IRQ moderation can be quietly
+ * overridden unless it is changed from its old value.
+ */
+ rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs &&
+ coalesce->tx_coalesce_usecs_irq == tx_usecs);
+ if (coalesce->tx_coalesce_usecs != tx_usecs)
+ tx_usecs = coalesce->tx_coalesce_usecs;
+ else
+ tx_usecs = coalesce->tx_coalesce_usecs_irq;
+
+ rc = efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive,
+ rx_may_override_tx);
+ if (rc != 0)
+ return rc;
+
+ efx_for_each_channel(channel, efx)
+ efx->type->push_irq_moderation(channel);
+
+ return 0;
+}
+
+static void efx_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
+ ring->tx_max_pending = EFX_MAX_DMAQ_SIZE;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = efx->rxq_entries;
+ ring->tx_pending = efx->txq_entries;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int efx_ethtool_set_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
+ ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
+ ring->tx_pending > EFX_MAX_DMAQ_SIZE)
+ return -EINVAL;
+
+ if (ring->rx_pending < EFX_MIN_RING_SIZE ||
+ ring->tx_pending < EFX_MIN_RING_SIZE) {
+ netif_err(efx, drv, efx->net_dev,
+ "TX and RX queues cannot be smaller than %ld\n",
+ EFX_MIN_RING_SIZE);
+ return -EINVAL;
+ }
+
+ return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending);
+}
+
+static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ u8 wanted_fc, old_fc;
+ u32 old_adv;
+ bool reset;
+ int rc = 0;
+
+ mutex_lock(&efx->mac_lock);
+
+ wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) |
+ (pause->tx_pause ? EFX_FC_TX : 0) |
+ (pause->autoneg ? EFX_FC_AUTO : 0));
+
+ if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Flow control unsupported: tx ON rx OFF\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Autonegotiation is disabled\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* TX flow control may automatically turn itself off if the
+ * link partner (intermittently) stops responding to pause
+ * frames. There isn't any indication that this has happened,
+ * so the best we do is leave it up to the user to spot this
+ * and fix it be cycling transmit flow control on this end. */
+ reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX);
+ if (EFX_WORKAROUND_11482(efx) && reset) {
+ if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
+ /* Recover by resetting the EM block */
+ falcon_stop_nic_stats(efx);
+ falcon_drain_tx_fifo(efx);
+ efx->mac_op->reconfigure(efx);
+ falcon_start_nic_stats(efx);
+ } else {
+ /* Schedule a reset to recover */
+ efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
+ }
+ }
+
+ old_adv = efx->link_advertising;
+ old_fc = efx->wanted_fc;
+ efx_link_set_wanted_fc(efx, wanted_fc);
+ if (efx->link_advertising != old_adv ||
+ (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
+ rc = efx->phy_op->reconfigure(efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "Unable to advertise requested flow "
+ "control setting\n");
+ goto out;
+ }
+ }
+
+ /* Reconfigure the MAC. The PHY *may* generate a link state change event
+ * if the user just changed the advertised capabilities, but there's no
+ * harm doing this twice */
+ efx->mac_op->reconfigure(efx);
+
+out:
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
+ pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
+ pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
+}
+
+
+static void efx_ethtool_get_wol(struct net_device *net_dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ return efx->type->get_wol(efx, wol);
+}
+
+
+static int efx_ethtool_set_wol(struct net_device *net_dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ return efx->type->set_wol(efx, wol->wolopts);
+}
+
+static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ rc = efx->type->map_reset_flags(flags);
+ if (rc < 0)
+ return rc;
+
+ return efx_reset(efx, rc);
+}
+
+static int
+efx_ethtool_get_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *info, u32 *rules __always_unused)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = efx->n_rx_channels;
+ return 0;
+
+ case ETHTOOL_GRXFH: {
+ unsigned min_revision = 0;
+
+ info->data = 0;
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through */
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ min_revision = EFX_REV_FALCON_B0;
+ break;
+ case TCP_V6_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through */
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV6_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ min_revision = EFX_REV_SIENA_A0;
+ break;
+ default:
+ break;
+ }
+ if (efx_nic_rev(efx) < min_revision)
+ info->data = 0;
+ return 0;
+ }
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
+ struct ethtool_rx_ntuple *ntuple)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec;
+ struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec;
+ struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec;
+ struct efx_filter_spec filter;
+ int rc;
+
+ /* Range-check action */
+ if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR ||
+ ntuple->fs.action >= (s32)efx->n_rx_channels)
+ return -EINVAL;
+
+ if (~ntuple->fs.data_mask)
+ return -EINVAL;
+
+ efx_filter_init_rx(&filter, EFX_FILTER_PRI_MANUAL, 0,
+ (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ?
+ 0xfff : ntuple->fs.action);
+
+ switch (ntuple->fs.flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW: {
+ u8 proto = (ntuple->fs.flow_type == TCP_V4_FLOW ?
+ IPPROTO_TCP : IPPROTO_UDP);
+
+ /* Must match all of destination, */
+ if (ip_mask->ip4dst | ip_mask->pdst)
+ return -EINVAL;
+ /* all or none of source, */
+ if ((ip_mask->ip4src | ip_mask->psrc) &&
+ ((__force u32)~ip_mask->ip4src |
+ (__force u16)~ip_mask->psrc))
+ return -EINVAL;
+ /* and nothing else */
+ if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask)
+ return -EINVAL;
+
+ if (!ip_mask->ip4src)
+ rc = efx_filter_set_ipv4_full(&filter, proto,
+ ip_entry->ip4dst,
+ ip_entry->pdst,
+ ip_entry->ip4src,
+ ip_entry->psrc);
+ else
+ rc = efx_filter_set_ipv4_local(&filter, proto,
+ ip_entry->ip4dst,
+ ip_entry->pdst);
+ if (rc)
+ return rc;
+ break;
+ }
+
+ case ETHER_FLOW:
+ /* Must match all of destination, */
+ if (!is_zero_ether_addr(mac_mask->h_dest))
+ return -EINVAL;
+ /* all or none of VID, */
+ if (ntuple->fs.vlan_tag_mask != 0xf000 &&
+ ntuple->fs.vlan_tag_mask != 0xffff)
+ return -EINVAL;
+ /* and nothing else */
+ if (!is_broadcast_ether_addr(mac_mask->h_source) ||
+ mac_mask->h_proto != htons(0xffff))
+ return -EINVAL;
+
+ rc = efx_filter_set_eth_local(
+ &filter,
+ (ntuple->fs.vlan_tag_mask == 0xf000) ?
+ ntuple->fs.vlan_tag : EFX_FILTER_VID_UNSPEC,
+ mac_entry->h_dest);
+ if (rc)
+ return rc;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR)
+ return efx_filter_remove_filter(efx, &filter);
+
+ rc = efx_filter_insert_filter(efx, &filter, true);
+ return rc < 0 ? rc : 0;
+}
+
+static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
+ struct ethtool_rxfh_indir *indir)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ size_t copy_size =
+ min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table));
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+ return -EOPNOTSUPP;
+
+ indir->size = ARRAY_SIZE(efx->rx_indir_table);
+ memcpy(indir->ring_index, efx->rx_indir_table,
+ copy_size * sizeof(indir->ring_index[0]));
+ return 0;
+}
+
+static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
+ const struct ethtool_rxfh_indir *indir)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ size_t i;
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+ return -EOPNOTSUPP;
+
+ /* Validate size and indices */
+ if (indir->size != ARRAY_SIZE(efx->rx_indir_table))
+ return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
+ if (indir->ring_index[i] >= efx->n_rx_channels)
+ return -EINVAL;
+
+ memcpy(efx->rx_indir_table, indir->ring_index,
+ sizeof(efx->rx_indir_table));
+ efx_nic_push_rx_indir_table(efx);
+ return 0;
+}
+
+const struct ethtool_ops efx_ethtool_ops = {
+ .get_settings = efx_ethtool_get_settings,
+ .set_settings = efx_ethtool_set_settings,
+ .get_drvinfo = efx_ethtool_get_drvinfo,
+ .get_regs_len = efx_ethtool_get_regs_len,
+ .get_regs = efx_ethtool_get_regs,
+ .get_msglevel = efx_ethtool_get_msglevel,
+ .set_msglevel = efx_ethtool_set_msglevel,
+ .nway_reset = efx_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = efx_ethtool_get_coalesce,
+ .set_coalesce = efx_ethtool_set_coalesce,
+ .get_ringparam = efx_ethtool_get_ringparam,
+ .set_ringparam = efx_ethtool_set_ringparam,
+ .get_pauseparam = efx_ethtool_get_pauseparam,
+ .set_pauseparam = efx_ethtool_set_pauseparam,
+ .get_sset_count = efx_ethtool_get_sset_count,
+ .self_test = efx_ethtool_self_test,
+ .get_strings = efx_ethtool_get_strings,
+ .set_phys_id = efx_ethtool_phys_id,
+ .get_ethtool_stats = efx_ethtool_get_stats,
+ .get_wol = efx_ethtool_get_wol,
+ .set_wol = efx_ethtool_set_wol,
+ .reset = efx_ethtool_reset,
+ .get_rxnfc = efx_ethtool_get_rxnfc,
+ .set_rx_ntuple = efx_ethtool_set_rx_ntuple,
+ .get_rxfh_indir = efx_ethtool_get_rxfh_indir,
+ .set_rxfh_indir = efx_ethtool_set_rxfh_indir,
+};
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
new file mode 100644
index 000000000000..4dd1748a19c6
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -0,0 +1,1843 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/i2c.h>
+#include <linux/mii.h>
+#include <linux/slab.h>
+#include "net_driver.h"
+#include "bitfield.h"
+#include "efx.h"
+#include "mac.h"
+#include "spi.h"
+#include "nic.h"
+#include "regs.h"
+#include "io.h"
+#include "phy.h"
+#include "workarounds.h"
+
+/* Hardware control for SFC4000 (aka Falcon). */
+
+static const unsigned int
+/* "Large" EEPROM device: Atmel AT25640 or similar
+ * 8 KB, 16-bit address, 32 B write block */
+large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
+ | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
+ | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
+/* Default flash device: Atmel AT25F1024
+ * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
+default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
+ | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
+ | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
+ | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
+ | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
+
+/**************************************************************************
+ *
+ * I2C bus - this is a bit-bashing interface using GPIO pins
+ * Note that it uses the output enables to tristate the outputs
+ * SDA is the data pin and SCL is the clock
+ *
+ **************************************************************************
+ */
+static void falcon_setsda(void *data, int state)
+{
+ struct efx_nic *efx = (struct efx_nic *)data;
+ efx_oword_t reg;
+
+ efx_reado(efx, &reg, FR_AB_GPIO_CTL);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
+ efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
+}
+
+static void falcon_setscl(void *data, int state)
+{
+ struct efx_nic *efx = (struct efx_nic *)data;
+ efx_oword_t reg;
+
+ efx_reado(efx, &reg, FR_AB_GPIO_CTL);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
+ efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
+}
+
+static int falcon_getsda(void *data)
+{
+ struct efx_nic *efx = (struct efx_nic *)data;
+ efx_oword_t reg;
+
+ efx_reado(efx, &reg, FR_AB_GPIO_CTL);
+ return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
+}
+
+static int falcon_getscl(void *data)
+{
+ struct efx_nic *efx = (struct efx_nic *)data;
+ efx_oword_t reg;
+
+ efx_reado(efx, &reg, FR_AB_GPIO_CTL);
+ return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
+}
+
+static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
+ .setsda = falcon_setsda,
+ .setscl = falcon_setscl,
+ .getsda = falcon_getsda,
+ .getscl = falcon_getscl,
+ .udelay = 5,
+ /* Wait up to 50 ms for slave to let us pull SCL high */
+ .timeout = DIV_ROUND_UP(HZ, 20),
+};
+
+static void falcon_push_irq_moderation(struct efx_channel *channel)
+{
+ efx_dword_t timer_cmd;
+ struct efx_nic *efx = channel->efx;
+
+ BUILD_BUG_ON(EFX_IRQ_MOD_MAX > (1 << FRF_AB_TC_TIMER_VAL_WIDTH));
+
+ /* Set timer register */
+ if (channel->irq_moderation) {
+ EFX_POPULATE_DWORD_2(timer_cmd,
+ FRF_AB_TC_TIMER_MODE,
+ FFE_BB_TIMER_MODE_INT_HLDOFF,
+ FRF_AB_TC_TIMER_VAL,
+ channel->irq_moderation - 1);
+ } else {
+ EFX_POPULATE_DWORD_2(timer_cmd,
+ FRF_AB_TC_TIMER_MODE,
+ FFE_BB_TIMER_MODE_DIS,
+ FRF_AB_TC_TIMER_VAL, 0);
+ }
+ BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
+ efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
+ channel->channel);
+}
+
+static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);
+
+static void falcon_prepare_flush(struct efx_nic *efx)
+{
+ falcon_deconfigure_mac_wrapper(efx);
+
+ /* Wait for the tx and rx fifo's to get to the next packet boundary
+ * (~1ms without back-pressure), then to drain the remainder of the
+ * fifo's at data path speeds (negligible), with a healthy margin. */
+ msleep(10);
+}
+
+/* Acknowledge a legacy interrupt from Falcon
+ *
+ * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
+ *
+ * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
+ * BIU. Interrupt acknowledge is read sensitive so must write instead
+ * (then read to ensure the BIU collector is flushed)
+ *
+ * NB most hardware supports MSI interrupts
+ */
+inline void falcon_irq_ack_a1(struct efx_nic *efx)
+{
+ efx_dword_t reg;
+
+ EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
+ efx_writed(efx, &reg, FR_AA_INT_ACK_KER);
+ efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
+}
+
+
+irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
+{
+ struct efx_nic *efx = dev_id;
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ int syserr;
+ int queues;
+
+ /* Check to see if this is our interrupt. If it isn't, we
+ * exit without having touched the hardware.
+ */
+ if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d not for me\n", irq,
+ raw_smp_processor_id());
+ return IRQ_NONE;
+ }
+ efx->last_irq_cpu = raw_smp_processor_id();
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+
+ /* Determine interrupting queues, clear interrupt status
+ * register and acknowledge the device interrupt.
+ */
+ BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
+ queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
+
+ /* Check to see if we have a serious error condition */
+ if (queues & (1U << efx->fatal_irq_level)) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_nic_fatal_interrupt(efx);
+ }
+
+ EFX_ZERO_OWORD(*int_ker);
+ wmb(); /* Ensure the vector is cleared before interrupt ack */
+ falcon_irq_ack_a1(efx);
+
+ if (queues & 1)
+ efx_schedule_channel(efx_get_channel(efx, 0));
+ if (queues & 2)
+ efx_schedule_channel(efx_get_channel(efx, 1));
+ return IRQ_HANDLED;
+}
+/**************************************************************************
+ *
+ * EEPROM/flash
+ *
+ **************************************************************************
+ */
+
+#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
+
+static int falcon_spi_poll(struct efx_nic *efx)
+{
+ efx_oword_t reg;
+ efx_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
+ return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
+}
+
+/* Wait for SPI command completion */
+static int falcon_spi_wait(struct efx_nic *efx)
+{
+ /* Most commands will finish quickly, so we start polling at
+ * very short intervals. Sometimes the command may have to
+ * wait for VPD or expansion ROM access outside of our
+ * control, so we allow up to 100 ms. */
+ unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ if (!falcon_spi_poll(efx))
+ return 0;
+ udelay(10);
+ }
+
+ for (;;) {
+ if (!falcon_spi_poll(efx))
+ return 0;
+ if (time_after_eq(jiffies, timeout)) {
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for SPI\n");
+ return -ETIMEDOUT;
+ }
+ schedule_timeout_uninterruptible(1);
+ }
+}
+
+int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
+ unsigned int command, int address,
+ const void *in, void *out, size_t len)
+{
+ bool addressed = (address >= 0);
+ bool reading = (out != NULL);
+ efx_oword_t reg;
+ int rc;
+
+ /* Input validation */
+ if (len > FALCON_SPI_MAX_LEN)
+ return -EINVAL;
+
+ /* Check that previous command is not still running */
+ rc = falcon_spi_poll(efx);
+ if (rc)
+ return rc;
+
+ /* Program address register, if we have an address */
+ if (addressed) {
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
+ efx_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
+ }
+
+ /* Program data register, if we have data */
+ if (in != NULL) {
+ memcpy(&reg, in, len);
+ efx_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
+ }
+
+ /* Issue read/write command */
+ EFX_POPULATE_OWORD_7(reg,
+ FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
+ FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
+ FRF_AB_EE_SPI_HCMD_DABCNT, len,
+ FRF_AB_EE_SPI_HCMD_READ, reading,
+ FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
+ FRF_AB_EE_SPI_HCMD_ADBCNT,
+ (addressed ? spi->addr_len : 0),
+ FRF_AB_EE_SPI_HCMD_ENC, command);
+ efx_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
+
+ /* Wait for read/write to complete */
+ rc = falcon_spi_wait(efx);
+ if (rc)
+ return rc;
+
+ /* Read data */
+ if (out != NULL) {
+ efx_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
+ memcpy(out, &reg, len);
+ }
+
+ return 0;
+}
+
+static size_t
+falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
+{
+ return min(FALCON_SPI_MAX_LEN,
+ (spi->block_size - (start & (spi->block_size - 1))));
+}
+
+static inline u8
+efx_spi_munge_command(const struct efx_spi_device *spi,
+ const u8 command, const unsigned int address)
+{
+ return command | (((address >> 8) & spi->munge_address) << 3);
+}
+
+/* Wait up to 10 ms for buffered write completion */
+int
+falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
+{
+ unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
+ u8 status;
+ int rc;
+
+ for (;;) {
+ rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
+ &status, sizeof(status));
+ if (rc)
+ return rc;
+ if (!(status & SPI_STATUS_NRDY))
+ return 0;
+ if (time_after_eq(jiffies, timeout)) {
+ netif_err(efx, hw, efx->net_dev,
+ "SPI write timeout on device %d"
+ " last status=0x%02x\n",
+ spi->device_id, status);
+ return -ETIMEDOUT;
+ }
+ schedule_timeout_uninterruptible(1);
+ }
+}
+
+int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
+ loff_t start, size_t len, size_t *retlen, u8 *buffer)
+{
+ size_t block_len, pos = 0;
+ unsigned int command;
+ int rc = 0;
+
+ while (pos < len) {
+ block_len = min(len - pos, FALCON_SPI_MAX_LEN);
+
+ command = efx_spi_munge_command(spi, SPI_READ, start + pos);
+ rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
+ buffer + pos, block_len);
+ if (rc)
+ break;
+ pos += block_len;
+
+ /* Avoid locking up the system */
+ cond_resched();
+ if (signal_pending(current)) {
+ rc = -EINTR;
+ break;
+ }
+ }
+
+ if (retlen)
+ *retlen = pos;
+ return rc;
+}
+
+int
+falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
+ loff_t start, size_t len, size_t *retlen, const u8 *buffer)
+{
+ u8 verify_buffer[FALCON_SPI_MAX_LEN];
+ size_t block_len, pos = 0;
+ unsigned int command;
+ int rc = 0;
+
+ while (pos < len) {
+ rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
+ if (rc)
+ break;
+
+ block_len = min(len - pos,
+ falcon_spi_write_limit(spi, start + pos));
+ command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
+ rc = falcon_spi_cmd(efx, spi, command, start + pos,
+ buffer + pos, NULL, block_len);
+ if (rc)
+ break;
+
+ rc = falcon_spi_wait_write(efx, spi);
+ if (rc)
+ break;
+
+ command = efx_spi_munge_command(spi, SPI_READ, start + pos);
+ rc = falcon_spi_cmd(efx, spi, command, start + pos,
+ NULL, verify_buffer, block_len);
+ if (memcmp(verify_buffer, buffer + pos, block_len)) {
+ rc = -EIO;
+ break;
+ }
+
+ pos += block_len;
+
+ /* Avoid locking up the system */
+ cond_resched();
+ if (signal_pending(current)) {
+ rc = -EINTR;
+ break;
+ }
+ }
+
+ if (retlen)
+ *retlen = pos;
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * MAC wrapper
+ *
+ **************************************************************************
+ */
+
+static void falcon_push_multicast_hash(struct efx_nic *efx)
+{
+ union efx_multicast_hash *mc_hash = &efx->multicast_hash;
+
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
+ efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
+}
+
+static void falcon_reset_macs(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t reg, mac_ctrl;
+ int count;
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
+ /* It's not safe to use GLB_CTL_REG to reset the
+ * macs, so instead use the internal MAC resets
+ */
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
+ efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
+
+ for (count = 0; count < 10000; count++) {
+ efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
+ if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
+ 0)
+ return;
+ udelay(10);
+ }
+
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for XMAC core reset\n");
+ }
+
+ /* Mac stats will fail whist the TX fifo is draining */
+ WARN_ON(nic_data->stats_disable_count == 0);
+
+ efx_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL);
+ EFX_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1);
+ efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
+
+ efx_reado(efx, &reg, FR_AB_GLB_CTL);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
+ efx_writeo(efx, &reg, FR_AB_GLB_CTL);
+
+ count = 0;
+ while (1) {
+ efx_reado(efx, &reg, FR_AB_GLB_CTL);
+ if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
+ !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
+ !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
+ netif_dbg(efx, hw, efx->net_dev,
+ "Completed MAC reset after %d loops\n",
+ count);
+ break;
+ }
+ if (count > 20) {
+ netif_err(efx, hw, efx->net_dev, "MAC reset failed\n");
+ break;
+ }
+ count++;
+ udelay(10);
+ }
+
+ /* Ensure the correct MAC is selected before statistics
+ * are re-enabled by the caller */
+ efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
+
+ falcon_setup_xaui(efx);
+}
+
+void falcon_drain_tx_fifo(struct efx_nic *efx)
+{
+ efx_oword_t reg;
+
+ if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) ||
+ (efx->loopback_mode != LOOPBACK_NONE))
+ return;
+
+ efx_reado(efx, &reg, FR_AB_MAC_CTRL);
+ /* There is no point in draining more than once */
+ if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
+ return;
+
+ falcon_reset_macs(efx);
+}
+
+static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
+{
+ efx_oword_t reg;
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+ return;
+
+ /* Isolate the MAC -> RX */
+ efx_reado(efx, &reg, FR_AZ_RX_CFG);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
+ efx_writeo(efx, &reg, FR_AZ_RX_CFG);
+
+ /* Isolate TX -> MAC */
+ falcon_drain_tx_fifo(efx);
+}
+
+void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
+{
+ struct efx_link_state *link_state = &efx->link_state;
+ efx_oword_t reg;
+ int link_speed, isolate;
+
+ isolate = !!ACCESS_ONCE(efx->reset_pending);
+
+ switch (link_state->speed) {
+ case 10000: link_speed = 3; break;
+ case 1000: link_speed = 2; break;
+ case 100: link_speed = 1; break;
+ default: link_speed = 0; break;
+ }
+ /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
+ * as advertised. Disable to ensure packets are not
+ * indefinitely held and TX queue can be flushed at any point
+ * while the link is down. */
+ EFX_POPULATE_OWORD_5(reg,
+ FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
+ FRF_AB_MAC_BCAD_ACPT, 1,
+ FRF_AB_MAC_UC_PROM, efx->promiscuous,
+ FRF_AB_MAC_LINK_STATUS, 1, /* always set */
+ FRF_AB_MAC_SPEED, link_speed);
+ /* On B0, MAC backpressure can be disabled and packets get
+ * discarded. */
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
+ !link_state->up || isolate);
+ }
+
+ efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
+
+ /* Restore the multicast hash registers. */
+ falcon_push_multicast_hash(efx);
+
+ efx_reado(efx, &reg, FR_AZ_RX_CFG);
+ /* Enable XOFF signal from RX FIFO (we enabled it during NIC
+ * initialisation but it may read back as 0) */
+ EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
+ /* Unisolate the MAC -> RX */
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
+ efx_writeo(efx, &reg, FR_AZ_RX_CFG);
+}
+
+static void falcon_stats_request(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t reg;
+
+ WARN_ON(nic_data->stats_pending);
+ WARN_ON(nic_data->stats_disable_count);
+
+ if (nic_data->stats_dma_done == NULL)
+ return; /* no mac selected */
+
+ *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
+ nic_data->stats_pending = true;
+ wmb(); /* ensure done flag is clear */
+
+ /* Initiate DMA transfer of stats */
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AB_MAC_STAT_DMA_CMD, 1,
+ FRF_AB_MAC_STAT_DMA_ADR,
+ efx->stats_buffer.dma_addr);
+ efx_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
+
+ mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
+}
+
+static void falcon_stats_complete(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ if (!nic_data->stats_pending)
+ return;
+
+ nic_data->stats_pending = 0;
+ if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
+ rmb(); /* read the done flag before the stats */
+ efx->mac_op->update_stats(efx);
+ } else {
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for statistics\n");
+ }
+}
+
+static void falcon_stats_timer_func(unsigned long context)
+{
+ struct efx_nic *efx = (struct efx_nic *)context;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ spin_lock(&efx->stats_lock);
+
+ falcon_stats_complete(efx);
+ if (nic_data->stats_disable_count == 0)
+ falcon_stats_request(efx);
+
+ spin_unlock(&efx->stats_lock);
+}
+
+static bool falcon_loopback_link_poll(struct efx_nic *efx)
+{
+ struct efx_link_state old_state = efx->link_state;
+
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+ WARN_ON(!LOOPBACK_INTERNAL(efx));
+
+ efx->link_state.fd = true;
+ efx->link_state.fc = efx->wanted_fc;
+ efx->link_state.up = true;
+ efx->link_state.speed = 10000;
+
+ return !efx_link_state_equal(&efx->link_state, &old_state);
+}
+
+static int falcon_reconfigure_port(struct efx_nic *efx)
+{
+ int rc;
+
+ WARN_ON(efx_nic_rev(efx) > EFX_REV_FALCON_B0);
+
+ /* Poll the PHY link state *before* reconfiguring it. This means we
+ * will pick up the correct speed (in loopback) to select the correct
+ * MAC.
+ */
+ if (LOOPBACK_INTERNAL(efx))
+ falcon_loopback_link_poll(efx);
+ else
+ efx->phy_op->poll(efx);
+
+ falcon_stop_nic_stats(efx);
+ falcon_deconfigure_mac_wrapper(efx);
+
+ falcon_reset_macs(efx);
+
+ efx->phy_op->reconfigure(efx);
+ rc = efx->mac_op->reconfigure(efx);
+ BUG_ON(rc);
+
+ falcon_start_nic_stats(efx);
+
+ /* Synchronise efx->link_state with the kernel */
+ efx_link_status_changed(efx);
+
+ return 0;
+}
+
+/**************************************************************************
+ *
+ * PHY access via GMII
+ *
+ **************************************************************************
+ */
+
+/* Wait for GMII access to complete */
+static int falcon_gmii_wait(struct efx_nic *efx)
+{
+ efx_oword_t md_stat;
+ int count;
+
+ /* wait up to 50ms - taken max from datasheet */
+ for (count = 0; count < 5000; count++) {
+ efx_reado(efx, &md_stat, FR_AB_MD_STAT);
+ if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
+ if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
+ EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
+ netif_err(efx, hw, efx->net_dev,
+ "error from GMII access "
+ EFX_OWORD_FMT"\n",
+ EFX_OWORD_VAL(md_stat));
+ return -EIO;
+ }
+ return 0;
+ }
+ udelay(10);
+ }
+ netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n");
+ return -ETIMEDOUT;
+}
+
+/* Write an MDIO register of a PHY connected to Falcon. */
+static int falcon_mdio_write(struct net_device *net_dev,
+ int prtad, int devad, u16 addr, u16 value)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t reg;
+ int rc;
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing MDIO %d register %d.%d with 0x%04x\n",
+ prtad, devad, addr, value);
+
+ mutex_lock(&nic_data->mdio_lock);
+
+ /* Check MDIO not currently being accessed */
+ rc = falcon_gmii_wait(efx);
+ if (rc)
+ goto out;
+
+ /* Write the address/ID register */
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
+ efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
+
+ EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
+ FRF_AB_MD_DEV_ADR, devad);
+ efx_writeo(efx, &reg, FR_AB_MD_ID);
+
+ /* Write data */
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
+ efx_writeo(efx, &reg, FR_AB_MD_TXD);
+
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AB_MD_WRC, 1,
+ FRF_AB_MD_GC, 0);
+ efx_writeo(efx, &reg, FR_AB_MD_CS);
+
+ /* Wait for data to be written */
+ rc = falcon_gmii_wait(efx);
+ if (rc) {
+ /* Abort the write operation */
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AB_MD_WRC, 0,
+ FRF_AB_MD_GC, 1);
+ efx_writeo(efx, &reg, FR_AB_MD_CS);
+ udelay(10);
+ }
+
+out:
+ mutex_unlock(&nic_data->mdio_lock);
+ return rc;
+}
+
+/* Read an MDIO register of a PHY connected to Falcon. */
+static int falcon_mdio_read(struct net_device *net_dev,
+ int prtad, int devad, u16 addr)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t reg;
+ int rc;
+
+ mutex_lock(&nic_data->mdio_lock);
+
+ /* Check MDIO not currently being accessed */
+ rc = falcon_gmii_wait(efx);
+ if (rc)
+ goto out;
+
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
+ efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
+
+ EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
+ FRF_AB_MD_DEV_ADR, devad);
+ efx_writeo(efx, &reg, FR_AB_MD_ID);
+
+ /* Request data to be read */
+ EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
+ efx_writeo(efx, &reg, FR_AB_MD_CS);
+
+ /* Wait for data to become available */
+ rc = falcon_gmii_wait(efx);
+ if (rc == 0) {
+ efx_reado(efx, &reg, FR_AB_MD_RXD);
+ rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from MDIO %d register %d.%d, got %04x\n",
+ prtad, devad, addr, rc);
+ } else {
+ /* Abort the read operation */
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AB_MD_RIC, 0,
+ FRF_AB_MD_GC, 1);
+ efx_writeo(efx, &reg, FR_AB_MD_CS);
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "read from MDIO %d register %d.%d, got error %d\n",
+ prtad, devad, addr, rc);
+ }
+
+out:
+ mutex_unlock(&nic_data->mdio_lock);
+ return rc;
+}
+
+/* This call is responsible for hooking in the MAC and PHY operations */
+static int falcon_probe_port(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ switch (efx->phy_type) {
+ case PHY_TYPE_SFX7101:
+ efx->phy_op = &falcon_sfx7101_phy_ops;
+ break;
+ case PHY_TYPE_QT2022C2:
+ case PHY_TYPE_QT2025C:
+ efx->phy_op = &falcon_qt202x_phy_ops;
+ break;
+ case PHY_TYPE_TXC43128:
+ efx->phy_op = &falcon_txc_phy_ops;
+ break;
+ default:
+ netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
+ efx->phy_type);
+ return -ENODEV;
+ }
+
+ /* Fill out MDIO structure and loopback modes */
+ mutex_init(&nic_data->mdio_lock);
+ efx->mdio.mdio_read = falcon_mdio_read;
+ efx->mdio.mdio_write = falcon_mdio_write;
+ rc = efx->phy_op->probe(efx);
+ if (rc != 0)
+ return rc;
+
+ /* Initial assumption */
+ efx->link_state.speed = 10000;
+ efx->link_state.fd = true;
+
+ /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
+ efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
+ else
+ efx->wanted_fc = EFX_FC_RX;
+ if (efx->mdio.mmds & MDIO_DEVS_AN)
+ efx->wanted_fc |= EFX_FC_AUTO;
+
+ /* Allocate buffer for stats */
+ rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
+ FALCON_MAC_STATS_SIZE);
+ if (rc)
+ return rc;
+ netif_dbg(efx, probe, efx->net_dev,
+ "stats buffer at %llx (virt %p phys %llx)\n",
+ (u64)efx->stats_buffer.dma_addr,
+ efx->stats_buffer.addr,
+ (u64)virt_to_phys(efx->stats_buffer.addr));
+ nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset;
+
+ return 0;
+}
+
+static void falcon_remove_port(struct efx_nic *efx)
+{
+ efx->phy_op->remove(efx);
+ efx_nic_free_buffer(efx, &efx->stats_buffer);
+}
+
+/* Global events are basically PHY events */
+static bool
+falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
+ EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
+ EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
+ /* Ignored */
+ return true;
+
+ if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) &&
+ EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
+ nic_data->xmac_poll_required = true;
+ return true;
+ }
+
+ if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
+ EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
+ EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
+ netif_err(efx, rx_err, efx->net_dev,
+ "channel %d seen global RX_RESET event. Resetting.\n",
+ channel->channel);
+
+ atomic_inc(&efx->rx_reset);
+ efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
+ RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
+ return true;
+ }
+
+ return false;
+}
+
+/**************************************************************************
+ *
+ * Falcon test code
+ *
+ **************************************************************************/
+
+static int
+falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ struct falcon_nvconfig *nvconfig;
+ struct efx_spi_device *spi;
+ void *region;
+ int rc, magic_num, struct_ver;
+ __le16 *word, *limit;
+ u32 csum;
+
+ if (efx_spi_present(&nic_data->spi_flash))
+ spi = &nic_data->spi_flash;
+ else if (efx_spi_present(&nic_data->spi_eeprom))
+ spi = &nic_data->spi_eeprom;
+ else
+ return -EINVAL;
+
+ region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+ nvconfig = region + FALCON_NVCONFIG_OFFSET;
+
+ mutex_lock(&nic_data->spi_lock);
+ rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
+ mutex_unlock(&nic_data->spi_lock);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
+ efx_spi_present(&nic_data->spi_flash) ?
+ "flash" : "EEPROM");
+ rc = -EIO;
+ goto out;
+ }
+
+ magic_num = le16_to_cpu(nvconfig->board_magic_num);
+ struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
+
+ rc = -EINVAL;
+ if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
+ netif_err(efx, hw, efx->net_dev,
+ "NVRAM bad magic 0x%x\n", magic_num);
+ goto out;
+ }
+ if (struct_ver < 2) {
+ netif_err(efx, hw, efx->net_dev,
+ "NVRAM has ancient version 0x%x\n", struct_ver);
+ goto out;
+ } else if (struct_ver < 4) {
+ word = &nvconfig->board_magic_num;
+ limit = (__le16 *) (nvconfig + 1);
+ } else {
+ word = region;
+ limit = region + FALCON_NVCONFIG_END;
+ }
+ for (csum = 0; word < limit; ++word)
+ csum += le16_to_cpu(*word);
+
+ if (~csum & 0xffff) {
+ netif_err(efx, hw, efx->net_dev,
+ "NVRAM has incorrect checksum\n");
+ goto out;
+ }
+
+ rc = 0;
+ if (nvconfig_out)
+ memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
+
+ out:
+ kfree(region);
+ return rc;
+}
+
+static int falcon_test_nvram(struct efx_nic *efx)
+{
+ return falcon_read_nvram(efx, NULL);
+}
+
+static const struct efx_nic_register_test falcon_b0_register_tests[] = {
+ { FR_AZ_ADR_REGION,
+ EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
+ { FR_AZ_RX_CFG,
+ EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
+ { FR_AZ_TX_CFG,
+ EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AZ_TX_RESERVED,
+ EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
+ { FR_AB_MAC_CTRL,
+ EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AZ_SRM_TX_DC_CFG,
+ EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AZ_RX_DC_CFG,
+ EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AZ_RX_DC_PF_WM,
+ EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_BZ_DP_CTRL,
+ EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AB_GM_CFG2,
+ EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AB_GMF_CFG0,
+ EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AB_XM_GLB_CFG,
+ EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AB_XM_TX_CFG,
+ EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AB_XM_RX_CFG,
+ EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AB_XM_RX_PARAM,
+ EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AB_XM_FC,
+ EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AB_XM_ADR_LO,
+ EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AB_XX_SD_CTL,
+ EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
+};
+
+static int falcon_b0_test_registers(struct efx_nic *efx)
+{
+ return efx_nic_test_registers(efx, falcon_b0_register_tests,
+ ARRAY_SIZE(falcon_b0_register_tests));
+}
+
+/**************************************************************************
+ *
+ * Device reset
+ *
+ **************************************************************************
+ */
+
+static enum reset_type falcon_map_reset_reason(enum reset_type reason)
+{
+ switch (reason) {
+ case RESET_TYPE_RX_RECOVERY:
+ case RESET_TYPE_RX_DESC_FETCH:
+ case RESET_TYPE_TX_DESC_FETCH:
+ case RESET_TYPE_TX_SKIP:
+ /* These can occasionally occur due to hardware bugs.
+ * We try to reset without disrupting the link.
+ */
+ return RESET_TYPE_INVISIBLE;
+ default:
+ return RESET_TYPE_ALL;
+ }
+}
+
+static int falcon_map_reset_flags(u32 *flags)
+{
+ enum {
+ FALCON_RESET_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER |
+ ETH_RESET_OFFLOAD | ETH_RESET_MAC),
+ FALCON_RESET_ALL = FALCON_RESET_INVISIBLE | ETH_RESET_PHY,
+ FALCON_RESET_WORLD = FALCON_RESET_ALL | ETH_RESET_IRQ,
+ };
+
+ if ((*flags & FALCON_RESET_WORLD) == FALCON_RESET_WORLD) {
+ *flags &= ~FALCON_RESET_WORLD;
+ return RESET_TYPE_WORLD;
+ }
+
+ if ((*flags & FALCON_RESET_ALL) == FALCON_RESET_ALL) {
+ *flags &= ~FALCON_RESET_ALL;
+ return RESET_TYPE_ALL;
+ }
+
+ if ((*flags & FALCON_RESET_INVISIBLE) == FALCON_RESET_INVISIBLE) {
+ *flags &= ~FALCON_RESET_INVISIBLE;
+ return RESET_TYPE_INVISIBLE;
+ }
+
+ return -EINVAL;
+}
+
+/* Resets NIC to known state. This routine must be called in process
+ * context and is allowed to sleep. */
+static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t glb_ctl_reg_ker;
+ int rc;
+
+ netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n",
+ RESET_TYPE(method));
+
+ /* Initiate device reset */
+ if (method == RESET_TYPE_WORLD) {
+ rc = pci_save_state(efx->pci_dev);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to backup PCI state of primary "
+ "function prior to hardware reset\n");
+ goto fail1;
+ }
+ if (efx_nic_is_dual_func(efx)) {
+ rc = pci_save_state(nic_data->pci_dev2);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to backup PCI state of "
+ "secondary function prior to "
+ "hardware reset\n");
+ goto fail2;
+ }
+ }
+
+ EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
+ FRF_AB_EXT_PHY_RST_DUR,
+ FFE_AB_EXT_PHY_RST_DUR_10240US,
+ FRF_AB_SWRST, 1);
+ } else {
+ EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
+ /* exclude PHY from "invisible" reset */
+ FRF_AB_EXT_PHY_RST_CTL,
+ method == RESET_TYPE_INVISIBLE,
+ /* exclude EEPROM/flash and PCIe */
+ FRF_AB_PCIE_CORE_RST_CTL, 1,
+ FRF_AB_PCIE_NSTKY_RST_CTL, 1,
+ FRF_AB_PCIE_SD_RST_CTL, 1,
+ FRF_AB_EE_RST_CTL, 1,
+ FRF_AB_EXT_PHY_RST_DUR,
+ FFE_AB_EXT_PHY_RST_DUR_10240US,
+ FRF_AB_SWRST, 1);
+ }
+ efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
+
+ netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n");
+ schedule_timeout_uninterruptible(HZ / 20);
+
+ /* Restore PCI configuration if needed */
+ if (method == RESET_TYPE_WORLD) {
+ if (efx_nic_is_dual_func(efx))
+ pci_restore_state(nic_data->pci_dev2);
+ pci_restore_state(efx->pci_dev);
+ netif_dbg(efx, drv, efx->net_dev,
+ "successfully restored PCI config\n");
+ }
+
+ /* Assert that reset complete */
+ efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
+ if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
+ rc = -ETIMEDOUT;
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for hardware reset\n");
+ goto fail3;
+ }
+ netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
+
+ return 0;
+
+ /* pci_save_state() and pci_restore_state() MUST be called in pairs */
+fail2:
+ pci_restore_state(efx->pci_dev);
+fail1:
+fail3:
+ return rc;
+}
+
+static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ mutex_lock(&nic_data->spi_lock);
+ rc = __falcon_reset_hw(efx, method);
+ mutex_unlock(&nic_data->spi_lock);
+
+ return rc;
+}
+
+static void falcon_monitor(struct efx_nic *efx)
+{
+ bool link_changed;
+ int rc;
+
+ BUG_ON(!mutex_is_locked(&efx->mac_lock));
+
+ rc = falcon_board(efx)->type->monitor(efx);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev,
+ "Board sensor %s; shutting down PHY\n",
+ (rc == -ERANGE) ? "reported fault" : "failed");
+ efx->phy_mode |= PHY_MODE_LOW_POWER;
+ rc = __efx_reconfigure_port(efx);
+ WARN_ON(rc);
+ }
+
+ if (LOOPBACK_INTERNAL(efx))
+ link_changed = falcon_loopback_link_poll(efx);
+ else
+ link_changed = efx->phy_op->poll(efx);
+
+ if (link_changed) {
+ falcon_stop_nic_stats(efx);
+ falcon_deconfigure_mac_wrapper(efx);
+
+ falcon_reset_macs(efx);
+ rc = efx->mac_op->reconfigure(efx);
+ BUG_ON(rc);
+
+ falcon_start_nic_stats(efx);
+
+ efx_link_status_changed(efx);
+ }
+
+ falcon_poll_xmac(efx);
+}
+
+/* Zeroes out the SRAM contents. This routine must be called in
+ * process context and is allowed to sleep.
+ */
+static int falcon_reset_sram(struct efx_nic *efx)
+{
+ efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
+ int count;
+
+ /* Set the SRAM wake/sleep GPIO appropriately. */
+ efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
+ EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
+ EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
+ efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
+
+ /* Initiate SRAM reset */
+ EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
+ FRF_AZ_SRM_INIT_EN, 1,
+ FRF_AZ_SRM_NB_SZ, 0);
+ efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
+
+ /* Wait for SRAM reset to complete */
+ count = 0;
+ do {
+ netif_dbg(efx, hw, efx->net_dev,
+ "waiting for SRAM reset (attempt %d)...\n", count);
+
+ /* SRAM reset is slow; expect around 16ms */
+ schedule_timeout_uninterruptible(HZ / 50);
+
+ /* Check for reset complete */
+ efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
+ if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
+ netif_dbg(efx, hw, efx->net_dev,
+ "SRAM reset complete\n");
+
+ return 0;
+ }
+ } while (++count < 20); /* wait up to 0.4 sec */
+
+ netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n");
+ return -ETIMEDOUT;
+}
+
+static void falcon_spi_device_init(struct efx_nic *efx,
+ struct efx_spi_device *spi_device,
+ unsigned int device_id, u32 device_type)
+{
+ if (device_type != 0) {
+ spi_device->device_id = device_id;
+ spi_device->size =
+ 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
+ spi_device->addr_len =
+ SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
+ spi_device->munge_address = (spi_device->size == 1 << 9 &&
+ spi_device->addr_len == 1);
+ spi_device->erase_command =
+ SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
+ spi_device->erase_size =
+ 1 << SPI_DEV_TYPE_FIELD(device_type,
+ SPI_DEV_TYPE_ERASE_SIZE);
+ spi_device->block_size =
+ 1 << SPI_DEV_TYPE_FIELD(device_type,
+ SPI_DEV_TYPE_BLOCK_SIZE);
+ } else {
+ spi_device->size = 0;
+ }
+}
+
+/* Extract non-volatile configuration */
+static int falcon_probe_nvconfig(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ struct falcon_nvconfig *nvconfig;
+ int rc;
+
+ nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
+ if (!nvconfig)
+ return -ENOMEM;
+
+ rc = falcon_read_nvram(efx, nvconfig);
+ if (rc)
+ goto out;
+
+ efx->phy_type = nvconfig->board_v2.port0_phy_type;
+ efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr;
+
+ if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
+ falcon_spi_device_init(
+ efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
+ le32_to_cpu(nvconfig->board_v3
+ .spi_device_type[FFE_AB_SPI_DEVICE_FLASH]));
+ falcon_spi_device_init(
+ efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
+ le32_to_cpu(nvconfig->board_v3
+ .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM]));
+ }
+
+ /* Read the MAC addresses */
+ memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN);
+
+ netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
+ efx->phy_type, efx->mdio.prtad);
+
+ rc = falcon_probe_board(efx,
+ le16_to_cpu(nvconfig->board_v2.board_revision));
+out:
+ kfree(nvconfig);
+ return rc;
+}
+
+/* Probe all SPI devices on the NIC */
+static void falcon_probe_spi_devices(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
+ int boot_dev;
+
+ efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
+ efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
+ efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
+
+ if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
+ boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
+ FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
+ netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n",
+ boot_dev == FFE_AB_SPI_DEVICE_FLASH ?
+ "flash" : "EEPROM");
+ } else {
+ /* Disable VPD and set clock dividers to safe
+ * values for initial programming. */
+ boot_dev = -1;
+ netif_dbg(efx, probe, efx->net_dev,
+ "Booted from internal ASIC settings;"
+ " setting SPI config\n");
+ EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
+ /* 125 MHz / 7 ~= 20 MHz */
+ FRF_AB_EE_SF_CLOCK_DIV, 7,
+ /* 125 MHz / 63 ~= 2 MHz */
+ FRF_AB_EE_EE_CLOCK_DIV, 63);
+ efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
+ }
+
+ mutex_init(&nic_data->spi_lock);
+
+ if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
+ falcon_spi_device_init(efx, &nic_data->spi_flash,
+ FFE_AB_SPI_DEVICE_FLASH,
+ default_flash_type);
+ if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
+ falcon_spi_device_init(efx, &nic_data->spi_eeprom,
+ FFE_AB_SPI_DEVICE_EEPROM,
+ large_eeprom_type);
+}
+
+static int falcon_probe_nic(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data;
+ struct falcon_board *board;
+ int rc;
+
+ /* Allocate storage for hardware specific data */
+ nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
+ if (!nic_data)
+ return -ENOMEM;
+ efx->nic_data = nic_data;
+
+ rc = -ENODEV;
+
+ if (efx_nic_fpga_ver(efx) != 0) {
+ netif_err(efx, probe, efx->net_dev,
+ "Falcon FPGA not supported\n");
+ goto fail1;
+ }
+
+ if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
+ efx_oword_t nic_stat;
+ struct pci_dev *dev;
+ u8 pci_rev = efx->pci_dev->revision;
+
+ if ((pci_rev == 0xff) || (pci_rev == 0)) {
+ netif_err(efx, probe, efx->net_dev,
+ "Falcon rev A0 not supported\n");
+ goto fail1;
+ }
+ efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
+ if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
+ netif_err(efx, probe, efx->net_dev,
+ "Falcon rev A1 1G not supported\n");
+ goto fail1;
+ }
+ if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
+ netif_err(efx, probe, efx->net_dev,
+ "Falcon rev A1 PCI-X not supported\n");
+ goto fail1;
+ }
+
+ dev = pci_dev_get(efx->pci_dev);
+ while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
+ dev))) {
+ if (dev->bus == efx->pci_dev->bus &&
+ dev->devfn == efx->pci_dev->devfn + 1) {
+ nic_data->pci_dev2 = dev;
+ break;
+ }
+ }
+ if (!nic_data->pci_dev2) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to find secondary function\n");
+ rc = -ENODEV;
+ goto fail2;
+ }
+ }
+
+ /* Now we can reset the NIC */
+ rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
+ goto fail3;
+ }
+
+ /* Allocate memory for INT_KER */
+ rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
+ if (rc)
+ goto fail4;
+ BUG_ON(efx->irq_status.dma_addr & 0x0f);
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "INT_KER at %llx (virt %p phys %llx)\n",
+ (u64)efx->irq_status.dma_addr,
+ efx->irq_status.addr,
+ (u64)virt_to_phys(efx->irq_status.addr));
+
+ falcon_probe_spi_devices(efx);
+
+ /* Read in the non-volatile configuration */
+ rc = falcon_probe_nvconfig(efx);
+ if (rc) {
+ if (rc == -EINVAL)
+ netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
+ goto fail5;
+ }
+
+ /* Initialise I2C adapter */
+ board = falcon_board(efx);
+ board->i2c_adap.owner = THIS_MODULE;
+ board->i2c_data = falcon_i2c_bit_operations;
+ board->i2c_data.data = efx;
+ board->i2c_adap.algo_data = &board->i2c_data;
+ board->i2c_adap.dev.parent = &efx->pci_dev->dev;
+ strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
+ sizeof(board->i2c_adap.name));
+ rc = i2c_bit_add_bus(&board->i2c_adap);
+ if (rc)
+ goto fail5;
+
+ rc = falcon_board(efx)->type->init(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to initialise board\n");
+ goto fail6;
+ }
+
+ nic_data->stats_disable_count = 1;
+ setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func,
+ (unsigned long)efx);
+
+ return 0;
+
+ fail6:
+ BUG_ON(i2c_del_adapter(&board->i2c_adap));
+ memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
+ fail5:
+ efx_nic_free_buffer(efx, &efx->irq_status);
+ fail4:
+ fail3:
+ if (nic_data->pci_dev2) {
+ pci_dev_put(nic_data->pci_dev2);
+ nic_data->pci_dev2 = NULL;
+ }
+ fail2:
+ fail1:
+ kfree(efx->nic_data);
+ return rc;
+}
+
+static void falcon_init_rx_cfg(struct efx_nic *efx)
+{
+ /* Prior to Siena the RX DMA engine will split each frame at
+ * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
+ * be so large that that never happens. */
+ const unsigned huge_buf_size = (3 * 4096) >> 5;
+ /* RX control FIFO thresholds (32 entries) */
+ const unsigned ctrl_xon_thr = 20;
+ const unsigned ctrl_xoff_thr = 25;
+ efx_oword_t reg;
+
+ efx_reado(efx, &reg, FR_AZ_RX_CFG);
+ if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
+ /* Data FIFO size is 5.5K */
+ EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
+ EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
+ huge_buf_size);
+ EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
+ EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
+ EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
+ EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
+ } else {
+ /* Data FIFO size is 80K; register fields moved */
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
+ huge_buf_size);
+ /* Send XON and XOFF at ~3 * max MTU away from empty/full */
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
+
+ /* Enable hash insertion. This is broken for the
+ * 'Falcon' hash so also select Toeplitz TCP/IPv4 and
+ * IPv4 hashes. */
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1);
+ }
+ /* Always enable XOFF signal from RX FIFO. We enable
+ * or disable transmission of pause frames at the MAC. */
+ EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
+ efx_writeo(efx, &reg, FR_AZ_RX_CFG);
+}
+
+/* This call performs hardware-specific global initialisation, such as
+ * defining the descriptor cache sizes and number of RSS channels.
+ * It does not set up any buffers, descriptor rings or event queues.
+ */
+static int falcon_init_nic(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+ int rc;
+
+ /* Use on-chip SRAM */
+ efx_reado(efx, &temp, FR_AB_NIC_STAT);
+ EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
+ efx_writeo(efx, &temp, FR_AB_NIC_STAT);
+
+ rc = falcon_reset_sram(efx);
+ if (rc)
+ return rc;
+
+ /* Clear the parity enables on the TX data fifos as
+ * they produce false parity errors because of timing issues
+ */
+ if (EFX_WORKAROUND_5129(efx)) {
+ efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
+ EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
+ efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
+ }
+
+ if (EFX_WORKAROUND_7244(efx)) {
+ efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
+ efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
+ }
+
+ /* XXX This is documented only for Falcon A0/A1 */
+ /* Setup RX. Wait for descriptor is broken and must
+ * be disabled. RXDP recovery shouldn't be needed, but is.
+ */
+ efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
+ EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
+ if (EFX_WORKAROUND_5583(efx))
+ EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
+ efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
+
+ /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
+ * descriptors (which is bad).
+ */
+ efx_reado(efx, &temp, FR_AZ_TX_CFG);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
+ efx_writeo(efx, &temp, FR_AZ_TX_CFG);
+
+ falcon_init_rx_cfg(efx);
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ /* Set hash key for IPv4 */
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+
+ /* Set destination of both TX and RX Flush events */
+ EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
+ efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
+ }
+
+ efx_nic_init_common(efx);
+
+ return 0;
+}
+
+static void falcon_remove_nic(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ struct falcon_board *board = falcon_board(efx);
+ int rc;
+
+ board->type->fini(efx);
+
+ /* Remove I2C adapter and clear it in preparation for a retry */
+ rc = i2c_del_adapter(&board->i2c_adap);
+ BUG_ON(rc);
+ memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
+
+ efx_nic_free_buffer(efx, &efx->irq_status);
+
+ __falcon_reset_hw(efx, RESET_TYPE_ALL);
+
+ /* Release the second function after the reset */
+ if (nic_data->pci_dev2) {
+ pci_dev_put(nic_data->pci_dev2);
+ nic_data->pci_dev2 = NULL;
+ }
+
+ /* Tear down the private nic state */
+ kfree(efx->nic_data);
+ efx->nic_data = NULL;
+}
+
+static void falcon_update_nic_stats(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t cnt;
+
+ if (nic_data->stats_disable_count)
+ return;
+
+ efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
+ efx->n_rx_nodesc_drop_cnt +=
+ EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
+
+ if (nic_data->stats_pending &&
+ *nic_data->stats_dma_done == FALCON_STATS_DONE) {
+ nic_data->stats_pending = false;
+ rmb(); /* read the done flag before the stats */
+ efx->mac_op->update_stats(efx);
+ }
+}
+
+void falcon_start_nic_stats(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ spin_lock_bh(&efx->stats_lock);
+ if (--nic_data->stats_disable_count == 0)
+ falcon_stats_request(efx);
+ spin_unlock_bh(&efx->stats_lock);
+}
+
+void falcon_stop_nic_stats(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int i;
+
+ might_sleep();
+
+ spin_lock_bh(&efx->stats_lock);
+ ++nic_data->stats_disable_count;
+ spin_unlock_bh(&efx->stats_lock);
+
+ del_timer_sync(&nic_data->stats_timer);
+
+ /* Wait enough time for the most recent transfer to
+ * complete. */
+ for (i = 0; i < 4 && nic_data->stats_pending; i++) {
+ if (*nic_data->stats_dma_done == FALCON_STATS_DONE)
+ break;
+ msleep(1);
+ }
+
+ spin_lock_bh(&efx->stats_lock);
+ falcon_stats_complete(efx);
+ spin_unlock_bh(&efx->stats_lock);
+}
+
+static void falcon_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+{
+ falcon_board(efx)->type->set_id_led(efx, mode);
+}
+
+/**************************************************************************
+ *
+ * Wake on LAN
+ *
+ **************************************************************************
+ */
+
+static void falcon_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int falcon_set_wol(struct efx_nic *efx, u32 type)
+{
+ if (type != 0)
+ return -EINVAL;
+ return 0;
+}
+
+/**************************************************************************
+ *
+ * Revision-dependent attributes used by efx.c and nic.c
+ *
+ **************************************************************************
+ */
+
+const struct efx_nic_type falcon_a1_nic_type = {
+ .probe = falcon_probe_nic,
+ .remove = falcon_remove_nic,
+ .init = falcon_init_nic,
+ .fini = efx_port_dummy_op_void,
+ .monitor = falcon_monitor,
+ .map_reset_reason = falcon_map_reset_reason,
+ .map_reset_flags = falcon_map_reset_flags,
+ .reset = falcon_reset_hw,
+ .probe_port = falcon_probe_port,
+ .remove_port = falcon_remove_port,
+ .handle_global_event = falcon_handle_global_event,
+ .prepare_flush = falcon_prepare_flush,
+ .update_stats = falcon_update_nic_stats,
+ .start_stats = falcon_start_nic_stats,
+ .stop_stats = falcon_stop_nic_stats,
+ .set_id_led = falcon_set_id_led,
+ .push_irq_moderation = falcon_push_irq_moderation,
+ .push_multicast_hash = falcon_push_multicast_hash,
+ .reconfigure_port = falcon_reconfigure_port,
+ .get_wol = falcon_get_wol,
+ .set_wol = falcon_set_wol,
+ .resume_wol = efx_port_dummy_op_void,
+ .test_nvram = falcon_test_nvram,
+ .default_mac_ops = &falcon_xmac_operations,
+
+ .revision = EFX_REV_FALCON_A1,
+ .mem_map_size = 0x20000,
+ .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
+ .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
+ .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
+ .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
+ .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
+ .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_buffer_padding = 0x24,
+ .max_interrupt_mode = EFX_INT_MODE_MSI,
+ .phys_addr_channels = 4,
+ .tx_dc_base = 0x130000,
+ .rx_dc_base = 0x100000,
+ .offload_features = NETIF_F_IP_CSUM,
+};
+
+const struct efx_nic_type falcon_b0_nic_type = {
+ .probe = falcon_probe_nic,
+ .remove = falcon_remove_nic,
+ .init = falcon_init_nic,
+ .fini = efx_port_dummy_op_void,
+ .monitor = falcon_monitor,
+ .map_reset_reason = falcon_map_reset_reason,
+ .map_reset_flags = falcon_map_reset_flags,
+ .reset = falcon_reset_hw,
+ .probe_port = falcon_probe_port,
+ .remove_port = falcon_remove_port,
+ .handle_global_event = falcon_handle_global_event,
+ .prepare_flush = falcon_prepare_flush,
+ .update_stats = falcon_update_nic_stats,
+ .start_stats = falcon_start_nic_stats,
+ .stop_stats = falcon_stop_nic_stats,
+ .set_id_led = falcon_set_id_led,
+ .push_irq_moderation = falcon_push_irq_moderation,
+ .push_multicast_hash = falcon_push_multicast_hash,
+ .reconfigure_port = falcon_reconfigure_port,
+ .get_wol = falcon_get_wol,
+ .set_wol = falcon_set_wol,
+ .resume_wol = efx_port_dummy_op_void,
+ .test_registers = falcon_b0_test_registers,
+ .test_nvram = falcon_test_nvram,
+ .default_mac_ops = &falcon_xmac_operations,
+
+ .revision = EFX_REV_FALCON_B0,
+ /* Map everything up to and including the RSS indirection
+ * table. Don't map MSI-X table, MSI-X PBA since Linux
+ * requires that they not be mapped. */
+ .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
+ FR_BZ_RX_INDIRECTION_TBL_STEP *
+ FR_BZ_RX_INDIRECTION_TBL_ROWS),
+ .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
+ .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
+ .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
+ .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
+ .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
+ .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_buffer_hash_size = 0x10,
+ .rx_buffer_padding = 0,
+ .max_interrupt_mode = EFX_INT_MODE_MSIX,
+ .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
+ * interrupt handler only supports 32
+ * channels */
+ .tx_dc_base = 0x130000,
+ .rx_dc_base = 0x100000,
+ .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
+};
+
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
new file mode 100644
index 000000000000..b9cc846811d6
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -0,0 +1,776 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2007-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/rtnetlink.h>
+
+#include "net_driver.h"
+#include "phy.h"
+#include "efx.h"
+#include "nic.h"
+#include "workarounds.h"
+
+/* Macros for unpacking the board revision */
+/* The revision info is in host byte order. */
+#define FALCON_BOARD_TYPE(_rev) (_rev >> 8)
+#define FALCON_BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
+#define FALCON_BOARD_MINOR(_rev) (_rev & 0xf)
+
+/* Board types */
+#define FALCON_BOARD_SFE4001 0x01
+#define FALCON_BOARD_SFE4002 0x02
+#define FALCON_BOARD_SFE4003 0x03
+#define FALCON_BOARD_SFN4112F 0x52
+
+/* Board temperature is about 15°C above ambient when air flow is
+ * limited. The maximum acceptable ambient temperature varies
+ * depending on the PHY specifications but the critical temperature
+ * above which we should shut down to avoid damage is 80°C. */
+#define FALCON_BOARD_TEMP_BIAS 15
+#define FALCON_BOARD_TEMP_CRIT (80 + FALCON_BOARD_TEMP_BIAS)
+
+/* SFC4000 datasheet says: 'The maximum permitted junction temperature
+ * is 125°C; the thermal design of the environment for the SFC4000
+ * should aim to keep this well below 100°C.' */
+#define FALCON_JUNC_TEMP_MIN 0
+#define FALCON_JUNC_TEMP_MAX 90
+#define FALCON_JUNC_TEMP_CRIT 125
+
+/*****************************************************************************
+ * Support for LM87 sensor chip used on several boards
+ */
+#define LM87_REG_TEMP_HW_INT_LOCK 0x13
+#define LM87_REG_TEMP_HW_EXT_LOCK 0x14
+#define LM87_REG_TEMP_HW_INT 0x17
+#define LM87_REG_TEMP_HW_EXT 0x18
+#define LM87_REG_TEMP_EXT1 0x26
+#define LM87_REG_TEMP_INT 0x27
+#define LM87_REG_ALARMS1 0x41
+#define LM87_REG_ALARMS2 0x42
+#define LM87_IN_LIMITS(nr, _min, _max) \
+ 0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min
+#define LM87_AIN_LIMITS(nr, _min, _max) \
+ 0x3B + (nr), _max, 0x1A + (nr), _min
+#define LM87_TEMP_INT_LIMITS(_min, _max) \
+ 0x39, _max, 0x3A, _min
+#define LM87_TEMP_EXT1_LIMITS(_min, _max) \
+ 0x37, _max, 0x38, _min
+
+#define LM87_ALARM_TEMP_INT 0x10
+#define LM87_ALARM_TEMP_EXT1 0x20
+
+#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
+
+static int efx_poke_lm87(struct i2c_client *client, const u8 *reg_values)
+{
+ while (*reg_values) {
+ u8 reg = *reg_values++;
+ u8 value = *reg_values++;
+ int rc = i2c_smbus_write_byte_data(client, reg, value);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static const u8 falcon_lm87_common_regs[] = {
+ LM87_REG_TEMP_HW_INT_LOCK, FALCON_BOARD_TEMP_CRIT,
+ LM87_REG_TEMP_HW_INT, FALCON_BOARD_TEMP_CRIT,
+ LM87_TEMP_EXT1_LIMITS(FALCON_JUNC_TEMP_MIN, FALCON_JUNC_TEMP_MAX),
+ LM87_REG_TEMP_HW_EXT_LOCK, FALCON_JUNC_TEMP_CRIT,
+ LM87_REG_TEMP_HW_EXT, FALCON_JUNC_TEMP_CRIT,
+ 0
+};
+
+static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
+ const u8 *reg_values)
+{
+ struct falcon_board *board = falcon_board(efx);
+ struct i2c_client *client = i2c_new_device(&board->i2c_adap, info);
+ int rc;
+
+ if (!client)
+ return -EIO;
+
+ /* Read-to-clear alarm/interrupt status */
+ i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
+ i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
+
+ rc = efx_poke_lm87(client, reg_values);
+ if (rc)
+ goto err;
+ rc = efx_poke_lm87(client, falcon_lm87_common_regs);
+ if (rc)
+ goto err;
+
+ board->hwmon_client = client;
+ return 0;
+
+err:
+ i2c_unregister_device(client);
+ return rc;
+}
+
+static void efx_fini_lm87(struct efx_nic *efx)
+{
+ i2c_unregister_device(falcon_board(efx)->hwmon_client);
+}
+
+static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
+{
+ struct i2c_client *client = falcon_board(efx)->hwmon_client;
+ bool temp_crit, elec_fault, is_failure;
+ u16 alarms;
+ s32 reg;
+
+ /* If link is up then do not monitor temperature */
+ if (EFX_WORKAROUND_7884(efx) && efx->link_state.up)
+ return 0;
+
+ reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
+ if (reg < 0)
+ return reg;
+ alarms = reg;
+ reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
+ if (reg < 0)
+ return reg;
+ alarms |= reg << 8;
+ alarms &= mask;
+
+ temp_crit = false;
+ if (alarms & LM87_ALARM_TEMP_INT) {
+ reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_INT);
+ if (reg < 0)
+ return reg;
+ if (reg > FALCON_BOARD_TEMP_CRIT)
+ temp_crit = true;
+ }
+ if (alarms & LM87_ALARM_TEMP_EXT1) {
+ reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_EXT1);
+ if (reg < 0)
+ return reg;
+ if (reg > FALCON_JUNC_TEMP_CRIT)
+ temp_crit = true;
+ }
+ elec_fault = alarms & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1);
+ is_failure = temp_crit || elec_fault;
+
+ if (alarms)
+ netif_err(efx, hw, efx->net_dev,
+ "LM87 detected a hardware %s (status %02x:%02x)"
+ "%s%s%s%s\n",
+ is_failure ? "failure" : "problem",
+ alarms & 0xff, alarms >> 8,
+ (alarms & LM87_ALARM_TEMP_INT) ?
+ "; board is overheating" : "",
+ (alarms & LM87_ALARM_TEMP_EXT1) ?
+ "; controller is overheating" : "",
+ temp_crit ? "; reached critical temperature" : "",
+ elec_fault ? "; electrical fault" : "");
+
+ return is_failure ? -ERANGE : 0;
+}
+
+#else /* !CONFIG_SENSORS_LM87 */
+
+static inline int
+efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
+ const u8 *reg_values)
+{
+ return 0;
+}
+static inline void efx_fini_lm87(struct efx_nic *efx)
+{
+}
+static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
+{
+ return 0;
+}
+
+#endif /* CONFIG_SENSORS_LM87 */
+
+/*****************************************************************************
+ * Support for the SFE4001 NIC.
+ *
+ * The SFE4001 does not power-up fully at reset due to its high power
+ * consumption. We control its power via a PCA9539 I/O expander.
+ * It also has a MAX6647 temperature monitor which we expose to
+ * the lm90 driver.
+ *
+ * This also provides minimal support for reflashing the PHY, which is
+ * initiated by resetting it with the FLASH_CFG_1 pin pulled down.
+ * On SFE4001 rev A2 and later this is connected to the 3V3X output of
+ * the IO-expander.
+ * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually
+ * exclusive with the network device being open.
+ */
+
+/**************************************************************************
+ * Support for I2C IO Expander device on SFE4001
+ */
+#define PCA9539 0x74
+
+#define P0_IN 0x00
+#define P0_OUT 0x02
+#define P0_INVERT 0x04
+#define P0_CONFIG 0x06
+
+#define P0_EN_1V0X_LBN 0
+#define P0_EN_1V0X_WIDTH 1
+#define P0_EN_1V2_LBN 1
+#define P0_EN_1V2_WIDTH 1
+#define P0_EN_2V5_LBN 2
+#define P0_EN_2V5_WIDTH 1
+#define P0_EN_3V3X_LBN 3
+#define P0_EN_3V3X_WIDTH 1
+#define P0_EN_5V_LBN 4
+#define P0_EN_5V_WIDTH 1
+#define P0_SHORTEN_JTAG_LBN 5
+#define P0_SHORTEN_JTAG_WIDTH 1
+#define P0_X_TRST_LBN 6
+#define P0_X_TRST_WIDTH 1
+#define P0_DSP_RESET_LBN 7
+#define P0_DSP_RESET_WIDTH 1
+
+#define P1_IN 0x01
+#define P1_OUT 0x03
+#define P1_INVERT 0x05
+#define P1_CONFIG 0x07
+
+#define P1_AFE_PWD_LBN 0
+#define P1_AFE_PWD_WIDTH 1
+#define P1_DSP_PWD25_LBN 1
+#define P1_DSP_PWD25_WIDTH 1
+#define P1_RESERVED_LBN 2
+#define P1_RESERVED_WIDTH 2
+#define P1_SPARE_LBN 4
+#define P1_SPARE_WIDTH 4
+
+/* Temperature Sensor */
+#define MAX664X_REG_RSL 0x02
+#define MAX664X_REG_WLHO 0x0B
+
+static void sfe4001_poweroff(struct efx_nic *efx)
+{
+ struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client;
+ struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client;
+
+ /* Turn off all power rails and disable outputs */
+ i2c_smbus_write_byte_data(ioexp_client, P0_OUT, 0xff);
+ i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff);
+ i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff);
+
+ /* Clear any over-temperature alert */
+ i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
+}
+
+static int sfe4001_poweron(struct efx_nic *efx)
+{
+ struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client;
+ struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client;
+ unsigned int i, j;
+ int rc;
+ u8 out;
+
+ /* Clear any previous over-temperature alert */
+ rc = i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
+ if (rc < 0)
+ return rc;
+
+ /* Enable port 0 and port 1 outputs on IO expander */
+ rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00);
+ if (rc)
+ return rc;
+ rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
+ 0xff & ~(1 << P1_SPARE_LBN));
+ if (rc)
+ goto fail_on;
+
+ /* If PHY power is on, turn it all off and wait 1 second to
+ * ensure a full reset.
+ */
+ rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT);
+ if (rc < 0)
+ goto fail_on;
+ out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
+ (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
+ (0 << P0_EN_1V0X_LBN));
+ if (rc != out) {
+ netif_info(efx, hw, efx->net_dev, "power-cycling PHY\n");
+ rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
+ if (rc)
+ goto fail_on;
+ schedule_timeout_uninterruptible(HZ);
+ }
+
+ for (i = 0; i < 20; ++i) {
+ /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
+ out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
+ (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
+ (1 << P0_X_TRST_LBN));
+ if (efx->phy_mode & PHY_MODE_SPECIAL)
+ out |= 1 << P0_EN_3V3X_LBN;
+
+ rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
+ if (rc)
+ goto fail_on;
+ msleep(10);
+
+ /* Turn on 1V power rail */
+ out &= ~(1 << P0_EN_1V0X_LBN);
+ rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
+ if (rc)
+ goto fail_on;
+
+ netif_info(efx, hw, efx->net_dev,
+ "waiting for DSP boot (attempt %d)...\n", i);
+
+ /* In flash config mode, DSP does not turn on AFE, so
+ * just wait 1 second.
+ */
+ if (efx->phy_mode & PHY_MODE_SPECIAL) {
+ schedule_timeout_uninterruptible(HZ);
+ return 0;
+ }
+
+ for (j = 0; j < 10; ++j) {
+ msleep(100);
+
+ /* Check DSP has asserted AFE power line */
+ rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
+ if (rc < 0)
+ goto fail_on;
+ if (rc & (1 << P1_AFE_PWD_LBN))
+ return 0;
+ }
+ }
+
+ netif_info(efx, hw, efx->net_dev, "timed out waiting for DSP boot\n");
+ rc = -ETIMEDOUT;
+fail_on:
+ sfe4001_poweroff(efx);
+ return rc;
+}
+
+static ssize_t show_phy_flash_cfg(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
+}
+
+static ssize_t set_phy_flash_cfg(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ enum efx_phy_mode old_mode, new_mode;
+ int err;
+
+ rtnl_lock();
+ old_mode = efx->phy_mode;
+ if (count == 0 || *buf == '0')
+ new_mode = old_mode & ~PHY_MODE_SPECIAL;
+ else
+ new_mode = PHY_MODE_SPECIAL;
+ if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
+ err = 0;
+ } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
+ err = -EBUSY;
+ } else {
+ /* Reset the PHY, reconfigure the MAC and enable/disable
+ * MAC stats accordingly. */
+ efx->phy_mode = new_mode;
+ if (new_mode & PHY_MODE_SPECIAL)
+ falcon_stop_nic_stats(efx);
+ err = sfe4001_poweron(efx);
+ if (!err)
+ err = efx_reconfigure_port(efx);
+ if (!(new_mode & PHY_MODE_SPECIAL))
+ falcon_start_nic_stats(efx);
+ }
+ rtnl_unlock();
+
+ return err ? err : count;
+}
+
+static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg);
+
+static void sfe4001_fini(struct efx_nic *efx)
+{
+ struct falcon_board *board = falcon_board(efx);
+
+ netif_info(efx, drv, efx->net_dev, "%s\n", __func__);
+
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
+ sfe4001_poweroff(efx);
+ i2c_unregister_device(board->ioexp_client);
+ i2c_unregister_device(board->hwmon_client);
+}
+
+static int sfe4001_check_hw(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ s32 status;
+
+ /* If XAUI link is up then do not monitor */
+ if (EFX_WORKAROUND_7884(efx) && !nic_data->xmac_poll_required)
+ return 0;
+
+ /* Check the powered status of the PHY. Lack of power implies that
+ * the MAX6647 has shut down power to it, probably due to a temp.
+ * alarm. Reading the power status rather than the MAX6647 status
+ * directly because the later is read-to-clear and would thus
+ * start to power up the PHY again when polled, causing us to blip
+ * the power undesirably.
+ * We know we can read from the IO expander because we did
+ * it during power-on. Assume failure now is bad news. */
+ status = i2c_smbus_read_byte_data(falcon_board(efx)->ioexp_client, P1_IN);
+ if (status >= 0 &&
+ (status & ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN))) != 0)
+ return 0;
+
+ /* Use board power control, not PHY power control */
+ sfe4001_poweroff(efx);
+ efx->phy_mode = PHY_MODE_OFF;
+
+ return (status < 0) ? -EIO : -ERANGE;
+}
+
+static struct i2c_board_info sfe4001_hwmon_info = {
+ I2C_BOARD_INFO("max6647", 0x4e),
+};
+
+/* This board uses an I2C expander to provider power to the PHY, which needs to
+ * be turned on before the PHY can be used.
+ * Context: Process context, rtnl lock held
+ */
+static int sfe4001_init(struct efx_nic *efx)
+{
+ struct falcon_board *board = falcon_board(efx);
+ int rc;
+
+#if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE)
+ board->hwmon_client =
+ i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info);
+#else
+ board->hwmon_client =
+ i2c_new_dummy(&board->i2c_adap, sfe4001_hwmon_info.addr);
+#endif
+ if (!board->hwmon_client)
+ return -EIO;
+
+ /* Raise board/PHY high limit from 85 to 90 degrees Celsius */
+ rc = i2c_smbus_write_byte_data(board->hwmon_client,
+ MAX664X_REG_WLHO, 90);
+ if (rc)
+ goto fail_hwmon;
+
+ board->ioexp_client = i2c_new_dummy(&board->i2c_adap, PCA9539);
+ if (!board->ioexp_client) {
+ rc = -EIO;
+ goto fail_hwmon;
+ }
+
+ if (efx->phy_mode & PHY_MODE_SPECIAL) {
+ /* PHY won't generate a 156.25 MHz clock and MAC stats fetch
+ * will fail. */
+ falcon_stop_nic_stats(efx);
+ }
+ rc = sfe4001_poweron(efx);
+ if (rc)
+ goto fail_ioexp;
+
+ rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
+ if (rc)
+ goto fail_on;
+
+ netif_info(efx, hw, efx->net_dev, "PHY is powered on\n");
+ return 0;
+
+fail_on:
+ sfe4001_poweroff(efx);
+fail_ioexp:
+ i2c_unregister_device(board->ioexp_client);
+fail_hwmon:
+ i2c_unregister_device(board->hwmon_client);
+ return rc;
+}
+
+/*****************************************************************************
+ * Support for the SFE4002
+ *
+ */
+static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
+
+static const u8 sfe4002_lm87_regs[] = {
+ LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */
+ LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
+ LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
+ LM87_IN_LIMITS(3, 0xac, 0xd4), /* 5V: 5.0V +/- 10% */
+ LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
+ LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
+ LM87_AIN_LIMITS(0, 0x98, 0xbb), /* AIN1: 1.66V +/- 10% */
+ LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */
+ LM87_TEMP_INT_LIMITS(0, 80 + FALCON_BOARD_TEMP_BIAS),
+ LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
+ 0
+};
+
+static struct i2c_board_info sfe4002_hwmon_info = {
+ I2C_BOARD_INFO("lm87", 0x2e),
+ .platform_data = &sfe4002_lm87_channel,
+};
+
+/****************************************************************************/
+/* LED allocations. Note that on rev A0 boards the schematic and the reality
+ * differ: red and green are swapped. Below is the fixed (A1) layout (there
+ * are only 3 A0 boards in existence, so no real reason to make this
+ * conditional).
+ */
+#define SFE4002_FAULT_LED (2) /* Red */
+#define SFE4002_RX_LED (0) /* Green */
+#define SFE4002_TX_LED (1) /* Amber */
+
+static void sfe4002_init_phy(struct efx_nic *efx)
+{
+ /* Set the TX and RX LEDs to reflect status and activity, and the
+ * fault LED off */
+ falcon_qt202x_set_led(efx, SFE4002_TX_LED,
+ QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
+ falcon_qt202x_set_led(efx, SFE4002_RX_LED,
+ QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
+ falcon_qt202x_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
+}
+
+static void sfe4002_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+{
+ falcon_qt202x_set_led(
+ efx, SFE4002_FAULT_LED,
+ (mode == EFX_LED_ON) ? QUAKE_LED_ON : QUAKE_LED_OFF);
+}
+
+static int sfe4002_check_hw(struct efx_nic *efx)
+{
+ struct falcon_board *board = falcon_board(efx);
+
+ /* A0 board rev. 4002s report a temperature fault the whole time
+ * (bad sensor) so we mask it out. */
+ unsigned alarm_mask =
+ (board->major == 0 && board->minor == 0) ?
+ ~LM87_ALARM_TEMP_EXT1 : ~0;
+
+ return efx_check_lm87(efx, alarm_mask);
+}
+
+static int sfe4002_init(struct efx_nic *efx)
+{
+ return efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
+}
+
+/*****************************************************************************
+ * Support for the SFN4112F
+ *
+ */
+static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
+
+static const u8 sfn4112f_lm87_regs[] = {
+ LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */
+ LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
+ LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
+ LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
+ LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
+ LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */
+ LM87_TEMP_INT_LIMITS(0, 60 + FALCON_BOARD_TEMP_BIAS),
+ LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
+ 0
+};
+
+static struct i2c_board_info sfn4112f_hwmon_info = {
+ I2C_BOARD_INFO("lm87", 0x2e),
+ .platform_data = &sfn4112f_lm87_channel,
+};
+
+#define SFN4112F_ACT_LED 0
+#define SFN4112F_LINK_LED 1
+
+static void sfn4112f_init_phy(struct efx_nic *efx)
+{
+ falcon_qt202x_set_led(efx, SFN4112F_ACT_LED,
+ QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT);
+ falcon_qt202x_set_led(efx, SFN4112F_LINK_LED,
+ QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT);
+}
+
+static void sfn4112f_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+{
+ int reg;
+
+ switch (mode) {
+ case EFX_LED_OFF:
+ reg = QUAKE_LED_OFF;
+ break;
+ case EFX_LED_ON:
+ reg = QUAKE_LED_ON;
+ break;
+ default:
+ reg = QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT;
+ break;
+ }
+
+ falcon_qt202x_set_led(efx, SFN4112F_LINK_LED, reg);
+}
+
+static int sfn4112f_check_hw(struct efx_nic *efx)
+{
+ /* Mask out unused sensors */
+ return efx_check_lm87(efx, ~0x48);
+}
+
+static int sfn4112f_init(struct efx_nic *efx)
+{
+ return efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
+}
+
+/*****************************************************************************
+ * Support for the SFE4003
+ *
+ */
+static u8 sfe4003_lm87_channel = 0x03; /* use AIN not FAN inputs */
+
+static const u8 sfe4003_lm87_regs[] = {
+ LM87_IN_LIMITS(0, 0x67, 0x7f), /* 2.5V: 1.5V +/- 10% */
+ LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
+ LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
+ LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
+ LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
+ LM87_TEMP_INT_LIMITS(0, 70 + FALCON_BOARD_TEMP_BIAS),
+ 0
+};
+
+static struct i2c_board_info sfe4003_hwmon_info = {
+ I2C_BOARD_INFO("lm87", 0x2e),
+ .platform_data = &sfe4003_lm87_channel,
+};
+
+/* Board-specific LED info. */
+#define SFE4003_RED_LED_GPIO 11
+#define SFE4003_LED_ON 1
+#define SFE4003_LED_OFF 0
+
+static void sfe4003_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+{
+ struct falcon_board *board = falcon_board(efx);
+
+ /* The LEDs were not wired to GPIOs before A3 */
+ if (board->minor < 3 && board->major == 0)
+ return;
+
+ falcon_txc_set_gpio_val(
+ efx, SFE4003_RED_LED_GPIO,
+ (mode == EFX_LED_ON) ? SFE4003_LED_ON : SFE4003_LED_OFF);
+}
+
+static void sfe4003_init_phy(struct efx_nic *efx)
+{
+ struct falcon_board *board = falcon_board(efx);
+
+ /* The LEDs were not wired to GPIOs before A3 */
+ if (board->minor < 3 && board->major == 0)
+ return;
+
+ falcon_txc_set_gpio_dir(efx, SFE4003_RED_LED_GPIO, TXC_GPIO_DIR_OUTPUT);
+ falcon_txc_set_gpio_val(efx, SFE4003_RED_LED_GPIO, SFE4003_LED_OFF);
+}
+
+static int sfe4003_check_hw(struct efx_nic *efx)
+{
+ struct falcon_board *board = falcon_board(efx);
+
+ /* A0/A1/A2 board rev. 4003s report a temperature fault the whole time
+ * (bad sensor) so we mask it out. */
+ unsigned alarm_mask =
+ (board->major == 0 && board->minor <= 2) ?
+ ~LM87_ALARM_TEMP_EXT1 : ~0;
+
+ return efx_check_lm87(efx, alarm_mask);
+}
+
+static int sfe4003_init(struct efx_nic *efx)
+{
+ return efx_init_lm87(efx, &sfe4003_hwmon_info, sfe4003_lm87_regs);
+}
+
+static const struct falcon_board_type board_types[] = {
+ {
+ .id = FALCON_BOARD_SFE4001,
+ .ref_model = "SFE4001",
+ .gen_type = "10GBASE-T adapter",
+ .init = sfe4001_init,
+ .init_phy = efx_port_dummy_op_void,
+ .fini = sfe4001_fini,
+ .set_id_led = tenxpress_set_id_led,
+ .monitor = sfe4001_check_hw,
+ },
+ {
+ .id = FALCON_BOARD_SFE4002,
+ .ref_model = "SFE4002",
+ .gen_type = "XFP adapter",
+ .init = sfe4002_init,
+ .init_phy = sfe4002_init_phy,
+ .fini = efx_fini_lm87,
+ .set_id_led = sfe4002_set_id_led,
+ .monitor = sfe4002_check_hw,
+ },
+ {
+ .id = FALCON_BOARD_SFE4003,
+ .ref_model = "SFE4003",
+ .gen_type = "10GBASE-CX4 adapter",
+ .init = sfe4003_init,
+ .init_phy = sfe4003_init_phy,
+ .fini = efx_fini_lm87,
+ .set_id_led = sfe4003_set_id_led,
+ .monitor = sfe4003_check_hw,
+ },
+ {
+ .id = FALCON_BOARD_SFN4112F,
+ .ref_model = "SFN4112F",
+ .gen_type = "SFP+ adapter",
+ .init = sfn4112f_init,
+ .init_phy = sfn4112f_init_phy,
+ .fini = efx_fini_lm87,
+ .set_id_led = sfn4112f_set_id_led,
+ .monitor = sfn4112f_check_hw,
+ },
+};
+
+int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
+{
+ struct falcon_board *board = falcon_board(efx);
+ u8 type_id = FALCON_BOARD_TYPE(revision_info);
+ int i;
+
+ board->major = FALCON_BOARD_MAJOR(revision_info);
+ board->minor = FALCON_BOARD_MINOR(revision_info);
+
+ for (i = 0; i < ARRAY_SIZE(board_types); i++)
+ if (board_types[i].id == type_id)
+ board->type = &board_types[i];
+
+ if (board->type) {
+ netif_info(efx, probe, efx->net_dev, "board is %s rev %c%d\n",
+ (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
+ ? board->type->ref_model : board->type->gen_type,
+ 'A' + board->major, board->minor);
+ return 0;
+ } else {
+ netif_err(efx, probe, efx->net_dev, "unknown board type %d\n",
+ type_id);
+ return -ENODEV;
+ }
+}
diff --git a/drivers/net/ethernet/sfc/falcon_xmac.c b/drivers/net/ethernet/sfc/falcon_xmac.c
new file mode 100644
index 000000000000..9516452c079c
--- /dev/null
+++ b/drivers/net/ethernet/sfc/falcon_xmac.c
@@ -0,0 +1,369 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/delay.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "nic.h"
+#include "regs.h"
+#include "io.h"
+#include "mac.h"
+#include "mdio_10g.h"
+#include "workarounds.h"
+
+/**************************************************************************
+ *
+ * MAC operations
+ *
+ *************************************************************************/
+
+/* Configure the XAUI driver that is an output from Falcon */
+void falcon_setup_xaui(struct efx_nic *efx)
+{
+ efx_oword_t sdctl, txdrv;
+
+ /* Move the XAUI into low power, unless there is no PHY, in
+ * which case the XAUI will have to drive a cable. */
+ if (efx->phy_type == PHY_TYPE_NONE)
+ return;
+
+ efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
+ efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
+
+ EFX_POPULATE_OWORD_8(txdrv,
+ FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
+ FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
+ FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
+ FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
+ efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
+}
+
+int falcon_reset_xaui(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t reg;
+ int count;
+
+ /* Don't fetch MAC statistics over an XMAC reset */
+ WARN_ON(nic_data->stats_disable_count == 0);
+
+ /* Start reset sequence */
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
+ efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
+
+ /* Wait up to 10 ms for completion, then reinitialise */
+ for (count = 0; count < 1000; count++) {
+ efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
+ if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
+ EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
+ falcon_setup_xaui(efx);
+ return 0;
+ }
+ udelay(10);
+ }
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for XAUI/XGXS reset\n");
+ return -ETIMEDOUT;
+}
+
+static void falcon_ack_status_intr(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t reg;
+
+ if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
+ return;
+
+ /* We expect xgmii faults if the wireside link is down */
+ if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
+ return;
+
+ /* We can only use this interrupt to signal the negative edge of
+ * xaui_align [we have to poll the positive edge]. */
+ if (nic_data->xmac_poll_required)
+ return;
+
+ efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
+}
+
+static bool falcon_xgxs_link_ok(struct efx_nic *efx)
+{
+ efx_oword_t reg;
+ bool align_done, link_ok = false;
+ int sync_status;
+
+ /* Read link status */
+ efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+
+ align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
+ sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
+ if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
+ link_ok = true;
+
+ /* Clear link status ready for next read */
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
+ efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
+
+ return link_ok;
+}
+
+static bool falcon_xmac_link_ok(struct efx_nic *efx)
+{
+ /*
+ * Check MAC's XGXS link status except when using XGMII loopback
+ * which bypasses the XGXS block.
+ * If possible, check PHY's XGXS link status except when using
+ * MAC loopback.
+ */
+ return (efx->loopback_mode == LOOPBACK_XGMII ||
+ falcon_xgxs_link_ok(efx)) &&
+ (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
+ LOOPBACK_INTERNAL(efx) ||
+ efx_mdio_phyxgxs_lane_sync(efx));
+}
+
+static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
+{
+ unsigned int max_frame_len;
+ efx_oword_t reg;
+ bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
+ bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
+
+ /* Configure MAC - cut-thru mode is hard wired on */
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_AB_XM_RX_JUMBO_MODE, 1,
+ FRF_AB_XM_TX_STAT_EN, 1,
+ FRF_AB_XM_RX_STAT_EN, 1);
+ efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
+
+ /* Configure TX */
+ EFX_POPULATE_OWORD_6(reg,
+ FRF_AB_XM_TXEN, 1,
+ FRF_AB_XM_TX_PRMBL, 1,
+ FRF_AB_XM_AUTO_PAD, 1,
+ FRF_AB_XM_TXCRC, 1,
+ FRF_AB_XM_FCNTL, tx_fc,
+ FRF_AB_XM_IPG, 0x3);
+ efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
+
+ /* Configure RX */
+ EFX_POPULATE_OWORD_5(reg,
+ FRF_AB_XM_RXEN, 1,
+ FRF_AB_XM_AUTO_DEPAD, 0,
+ FRF_AB_XM_ACPT_ALL_MCAST, 1,
+ FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous,
+ FRF_AB_XM_PASS_CRC_ERR, 1);
+ efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
+
+ /* Set frame length */
+ max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
+ efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
+ FRF_AB_XM_TX_JUMBO_MODE, 1);
+ efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
+
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
+ FRF_AB_XM_DIS_FCNTL, !rx_fc);
+ efx_writeo(efx, &reg, FR_AB_XM_FC);
+
+ /* Set MAC address */
+ memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
+ efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
+ memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
+ efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
+}
+
+static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
+{
+ efx_oword_t reg;
+ bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
+ bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
+ bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
+
+ /* XGXS block is flaky and will need to be reset if moving
+ * into our out of XGMII, XGXS or XAUI loopbacks. */
+ if (EFX_WORKAROUND_5147(efx)) {
+ bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
+ bool reset_xgxs;
+
+ efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+ old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
+ old_xgmii_loopback =
+ EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
+
+ efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
+ old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
+
+ /* The PHY driver may have turned XAUI off */
+ reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
+ (xaui_loopback != old_xaui_loopback) ||
+ (xgmii_loopback != old_xgmii_loopback));
+
+ if (reset_xgxs)
+ falcon_reset_xaui(efx);
+ }
+
+ efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
+ (xgxs_loopback || xaui_loopback) ?
+ FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
+ efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
+
+ efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
+ efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
+}
+
+
+/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
+static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
+{
+ bool mac_up = falcon_xmac_link_ok(efx);
+
+ if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
+ efx_phy_mode_disabled(efx->phy_mode))
+ /* XAUI link is expected to be down */
+ return mac_up;
+
+ falcon_stop_nic_stats(efx);
+
+ while (!mac_up && tries) {
+ netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
+ falcon_reset_xaui(efx);
+ udelay(200);
+
+ mac_up = falcon_xmac_link_ok(efx);
+ --tries;
+ }
+
+ falcon_start_nic_stats(efx);
+
+ return mac_up;
+}
+
+static bool falcon_xmac_check_fault(struct efx_nic *efx)
+{
+ return !falcon_xmac_link_ok_retry(efx, 5);
+}
+
+static int falcon_reconfigure_xmac(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ falcon_reconfigure_xgxs_core(efx);
+ falcon_reconfigure_xmac_core(efx);
+
+ falcon_reconfigure_mac_wrapper(efx);
+
+ nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
+ falcon_ack_status_intr(efx);
+
+ return 0;
+}
+
+static void falcon_update_stats_xmac(struct efx_nic *efx)
+{
+ struct efx_mac_stats *mac_stats = &efx->mac_stats;
+
+ /* Update MAC stats from DMAed values */
+ FALCON_STAT(efx, XgRxOctets, rx_bytes);
+ FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes);
+ FALCON_STAT(efx, XgRxPkts, rx_packets);
+ FALCON_STAT(efx, XgRxPktsOK, rx_good);
+ FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast);
+ FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast);
+ FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast);
+ FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64);
+ FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo);
+ FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo);
+ FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64);
+ FALCON_STAT(efx, XgRxDropEvents, rx_overflow);
+ FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad);
+ FALCON_STAT(efx, XgRxAlignError, rx_align_error);
+ FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error);
+ FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error);
+ FALCON_STAT(efx, XgRxControlPkts, rx_control);
+ FALCON_STAT(efx, XgRxPausePkts, rx_pause);
+ FALCON_STAT(efx, XgRxPkts64Octets, rx_64);
+ FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127);
+ FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255);
+ FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511);
+ FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023);
+ FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx);
+ FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo);
+ FALCON_STAT(efx, XgRxLengthError, rx_length_error);
+ FALCON_STAT(efx, XgTxPkts, tx_packets);
+ FALCON_STAT(efx, XgTxOctets, tx_bytes);
+ FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast);
+ FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast);
+ FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast);
+ FALCON_STAT(efx, XgTxControlPkts, tx_control);
+ FALCON_STAT(efx, XgTxPausePkts, tx_pause);
+ FALCON_STAT(efx, XgTxPkts64Octets, tx_64);
+ FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127);
+ FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255);
+ FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511);
+ FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023);
+ FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx);
+ FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo);
+ FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64);
+ FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo);
+ FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp);
+ FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error);
+ FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
+
+ /* Update derived statistics */
+ mac_stats->tx_good_bytes =
+ (mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
+ mac_stats->tx_control * 64);
+ mac_stats->rx_bad_bytes =
+ (mac_stats->rx_bytes - mac_stats->rx_good_bytes -
+ mac_stats->rx_control * 64);
+}
+
+void falcon_poll_xmac(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
+ !nic_data->xmac_poll_required)
+ return;
+
+ nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
+ falcon_ack_status_intr(efx);
+}
+
+const struct efx_mac_operations falcon_xmac_operations = {
+ .reconfigure = falcon_reconfigure_xmac,
+ .update_stats = falcon_update_stats_xmac,
+ .check_fault = falcon_xmac_check_fault,
+};
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
new file mode 100644
index 000000000000..2b9636f96e05
--- /dev/null
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -0,0 +1,727 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/in.h>
+#include <net/ip.h>
+#include "efx.h"
+#include "filter.h"
+#include "io.h"
+#include "nic.h"
+#include "regs.h"
+
+/* "Fudge factors" - difference between programmed value and actual depth.
+ * Due to pipelined implementation we need to program H/W with a value that
+ * is larger than the hop limit we want.
+ */
+#define FILTER_CTL_SRCH_FUDGE_WILD 3
+#define FILTER_CTL_SRCH_FUDGE_FULL 1
+
+/* Hard maximum hop limit. Hardware will time-out beyond 200-something.
+ * We also need to avoid infinite loops in efx_filter_search() when the
+ * table is full.
+ */
+#define FILTER_CTL_SRCH_MAX 200
+
+/* Don't try very hard to find space for performance hints, as this is
+ * counter-productive. */
+#define FILTER_CTL_SRCH_HINT_MAX 5
+
+enum efx_filter_table_id {
+ EFX_FILTER_TABLE_RX_IP = 0,
+ EFX_FILTER_TABLE_RX_MAC,
+ EFX_FILTER_TABLE_COUNT,
+};
+
+struct efx_filter_table {
+ enum efx_filter_table_id id;
+ u32 offset; /* address of table relative to BAR */
+ unsigned size; /* number of entries */
+ unsigned step; /* step between entries */
+ unsigned used; /* number currently used */
+ unsigned long *used_bitmap;
+ struct efx_filter_spec *spec;
+ unsigned search_depth[EFX_FILTER_TYPE_COUNT];
+};
+
+struct efx_filter_state {
+ spinlock_t lock;
+ struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
+#ifdef CONFIG_RFS_ACCEL
+ u32 *rps_flow_id;
+ unsigned rps_expire_index;
+#endif
+};
+
+/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
+ * key derived from the n-tuple. The initial LFSR state is 0xffff. */
+static u16 efx_filter_hash(u32 key)
+{
+ u16 tmp;
+
+ /* First 16 rounds */
+ tmp = 0x1fff ^ key >> 16;
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ tmp = tmp ^ tmp >> 9;
+ /* Last 16 rounds */
+ tmp = tmp ^ tmp << 13 ^ key;
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ return tmp ^ tmp >> 9;
+}
+
+/* To allow for hash collisions, filter search continues at these
+ * increments from the first possible entry selected by the hash. */
+static u16 efx_filter_increment(u32 key)
+{
+ return key * 2 - 1;
+}
+
+static enum efx_filter_table_id
+efx_filter_spec_table_id(const struct efx_filter_spec *spec)
+{
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2));
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2));
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2));
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
+ EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
+ return spec->type >> 2;
+}
+
+static struct efx_filter_table *
+efx_filter_spec_table(struct efx_filter_state *state,
+ const struct efx_filter_spec *spec)
+{
+ if (spec->type == EFX_FILTER_UNSPEC)
+ return NULL;
+ else
+ return &state->table[efx_filter_spec_table_id(spec)];
+}
+
+static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
+{
+ memset(table->search_depth, 0, sizeof(table->search_depth));
+}
+
+static void efx_filter_push_rx_limits(struct efx_nic *efx)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ struct efx_filter_table *table;
+ efx_oword_t filter_ctl;
+
+ efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
+
+ table = &state->table[EFX_FILTER_TABLE_RX_IP];
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
+ table->search_depth[EFX_FILTER_TCP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
+ table->search_depth[EFX_FILTER_TCP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
+ table->search_depth[EFX_FILTER_UDP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
+ table->search_depth[EFX_FILTER_UDP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+
+ table = &state->table[EFX_FILTER_TABLE_RX_MAC];
+ if (table->size) {
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
+ table->search_depth[EFX_FILTER_MAC_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
+ table->search_depth[EFX_FILTER_MAC_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
+}
+
+static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
+ __be32 host1, __be16 port1,
+ __be32 host2, __be16 port2)
+{
+ spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
+ spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
+ spec->data[2] = ntohl(host2);
+}
+
+/**
+ * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @host: Local host address (network byte order)
+ * @port: Local port (network byte order)
+ */
+int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
+ __be32 host, __be16 port)
+{
+ __be32 host1;
+ __be16 port1;
+
+ EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
+
+ /* This cannot currently be combined with other filtering */
+ if (spec->type != EFX_FILTER_UNSPEC)
+ return -EPROTONOSUPPORT;
+
+ if (port == 0)
+ return -EINVAL;
+
+ switch (proto) {
+ case IPPROTO_TCP:
+ spec->type = EFX_FILTER_TCP_WILD;
+ break;
+ case IPPROTO_UDP:
+ spec->type = EFX_FILTER_UDP_WILD;
+ break;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ /* Filter is constructed in terms of source and destination,
+ * with the odd wrinkle that the ports are swapped in a UDP
+ * wildcard filter. We need to convert from local and remote
+ * (= zero for wildcard) addresses.
+ */
+ host1 = 0;
+ if (proto != IPPROTO_UDP) {
+ port1 = 0;
+ } else {
+ port1 = port;
+ port = 0;
+ }
+
+ __efx_filter_set_ipv4(spec, host1, port1, host, port);
+ return 0;
+}
+
+/**
+ * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @host: Local host address (network byte order)
+ * @port: Local port (network byte order)
+ * @rhost: Remote host address (network byte order)
+ * @rport: Remote port (network byte order)
+ */
+int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
+ __be32 host, __be16 port,
+ __be32 rhost, __be16 rport)
+{
+ EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
+
+ /* This cannot currently be combined with other filtering */
+ if (spec->type != EFX_FILTER_UNSPEC)
+ return -EPROTONOSUPPORT;
+
+ if (port == 0 || rport == 0)
+ return -EINVAL;
+
+ switch (proto) {
+ case IPPROTO_TCP:
+ spec->type = EFX_FILTER_TCP_FULL;
+ break;
+ case IPPROTO_UDP:
+ spec->type = EFX_FILTER_UDP_FULL;
+ break;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ __efx_filter_set_ipv4(spec, rhost, rport, host, port);
+ return 0;
+}
+
+/**
+ * efx_filter_set_eth_local - specify local Ethernet address and optional VID
+ * @spec: Specification to initialise
+ * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
+ * @addr: Local Ethernet MAC address
+ */
+int efx_filter_set_eth_local(struct efx_filter_spec *spec,
+ u16 vid, const u8 *addr)
+{
+ EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
+
+ /* This cannot currently be combined with other filtering */
+ if (spec->type != EFX_FILTER_UNSPEC)
+ return -EPROTONOSUPPORT;
+
+ if (vid == EFX_FILTER_VID_UNSPEC) {
+ spec->type = EFX_FILTER_MAC_WILD;
+ spec->data[0] = 0;
+ } else {
+ spec->type = EFX_FILTER_MAC_FULL;
+ spec->data[0] = vid;
+ }
+
+ spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
+ spec->data[2] = addr[0] << 8 | addr[1];
+ return 0;
+}
+
+/* Build a filter entry and return its n-tuple key. */
+static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
+{
+ u32 data3;
+
+ switch (efx_filter_spec_table_id(spec)) {
+ case EFX_FILTER_TABLE_RX_IP: {
+ bool is_udp = (spec->type == EFX_FILTER_UDP_FULL ||
+ spec->type == EFX_FILTER_UDP_WILD);
+ EFX_POPULATE_OWORD_7(
+ *filter,
+ FRF_BZ_RSS_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
+ FRF_BZ_SCATTER_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
+ FRF_BZ_TCP_UDP, is_udp,
+ FRF_BZ_RXQ_ID, spec->dmaq_id,
+ EFX_DWORD_2, spec->data[2],
+ EFX_DWORD_1, spec->data[1],
+ EFX_DWORD_0, spec->data[0]);
+ data3 = is_udp;
+ break;
+ }
+
+ case EFX_FILTER_TABLE_RX_MAC: {
+ bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
+ EFX_POPULATE_OWORD_8(
+ *filter,
+ FRF_CZ_RMFT_RSS_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
+ FRF_CZ_RMFT_SCATTER_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
+ FRF_CZ_RMFT_IP_OVERRIDE,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP),
+ FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
+ FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
+ FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
+ FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
+ data3 = is_wild;
+ break;
+ }
+
+ default:
+ BUG();
+ }
+
+ return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
+}
+
+static bool efx_filter_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right)
+{
+ if (left->type != right->type ||
+ memcmp(left->data, right->data, sizeof(left->data)))
+ return false;
+
+ return true;
+}
+
+static int efx_filter_search(struct efx_filter_table *table,
+ struct efx_filter_spec *spec, u32 key,
+ bool for_insert, int *depth_required)
+{
+ unsigned hash, incr, filter_idx, depth, depth_max;
+
+ hash = efx_filter_hash(key);
+ incr = efx_filter_increment(key);
+
+ filter_idx = hash & (table->size - 1);
+ depth = 1;
+ depth_max = (for_insert ?
+ (spec->priority <= EFX_FILTER_PRI_HINT ?
+ FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX) :
+ table->search_depth[spec->type]);
+
+ for (;;) {
+ /* Return success if entry is used and matches this spec
+ * or entry is unused and we are trying to insert.
+ */
+ if (test_bit(filter_idx, table->used_bitmap) ?
+ efx_filter_equal(spec, &table->spec[filter_idx]) :
+ for_insert) {
+ *depth_required = depth;
+ return filter_idx;
+ }
+
+ /* Return failure if we reached the maximum search depth */
+ if (depth == depth_max)
+ return for_insert ? -EBUSY : -ENOENT;
+
+ filter_idx = (filter_idx + incr) & (table->size - 1);
+ ++depth;
+ }
+}
+
+/* Construct/deconstruct external filter IDs */
+
+static inline int
+efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index)
+{
+ return table_id << 16 | index;
+}
+
+/**
+ * efx_filter_insert_filter - add or replace a filter
+ * @efx: NIC in which to insert the filter
+ * @spec: Specification for the filter
+ * @replace: Flag for whether the specified filter may replace a filter
+ * with an identical match expression and equal or lower priority
+ *
+ * On success, return the filter ID.
+ * On failure, return a negative error code.
+ */
+int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
+ bool replace)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ struct efx_filter_table *table = efx_filter_spec_table(state, spec);
+ struct efx_filter_spec *saved_spec;
+ efx_oword_t filter;
+ int filter_idx, depth;
+ u32 key;
+ int rc;
+
+ if (!table || table->size == 0)
+ return -EINVAL;
+
+ key = efx_filter_build(&filter, spec);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "%s: type %d search_depth=%d", __func__, spec->type,
+ table->search_depth[spec->type]);
+
+ spin_lock_bh(&state->lock);
+
+ rc = efx_filter_search(table, spec, key, true, &depth);
+ if (rc < 0)
+ goto out;
+ filter_idx = rc;
+ BUG_ON(filter_idx >= table->size);
+ saved_spec = &table->spec[filter_idx];
+
+ if (test_bit(filter_idx, table->used_bitmap)) {
+ /* Should we replace the existing filter? */
+ if (!replace) {
+ rc = -EEXIST;
+ goto out;
+ }
+ if (spec->priority < saved_spec->priority) {
+ rc = -EPERM;
+ goto out;
+ }
+ } else {
+ __set_bit(filter_idx, table->used_bitmap);
+ ++table->used;
+ }
+ *saved_spec = *spec;
+
+ if (table->search_depth[spec->type] < depth) {
+ table->search_depth[spec->type] = depth;
+ efx_filter_push_rx_limits(efx);
+ }
+
+ efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "%s: filter type %d index %d rxq %u set",
+ __func__, spec->type, filter_idx, spec->dmaq_id);
+ rc = efx_filter_make_id(table->id, filter_idx);
+
+out:
+ spin_unlock_bh(&state->lock);
+ return rc;
+}
+
+static void efx_filter_table_clear_entry(struct efx_nic *efx,
+ struct efx_filter_table *table,
+ int filter_idx)
+{
+ static efx_oword_t filter;
+
+ if (test_bit(filter_idx, table->used_bitmap)) {
+ __clear_bit(filter_idx, table->used_bitmap);
+ --table->used;
+ memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
+
+ efx_writeo(efx, &filter,
+ table->offset + table->step * filter_idx);
+ }
+}
+
+/**
+ * efx_filter_remove_filter - remove a filter by specification
+ * @efx: NIC from which to remove the filter
+ * @spec: Specification for the filter
+ *
+ * On success, return zero.
+ * On failure, return a negative error code.
+ */
+int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ struct efx_filter_table *table = efx_filter_spec_table(state, spec);
+ struct efx_filter_spec *saved_spec;
+ efx_oword_t filter;
+ int filter_idx, depth;
+ u32 key;
+ int rc;
+
+ if (!table)
+ return -EINVAL;
+
+ key = efx_filter_build(&filter, spec);
+
+ spin_lock_bh(&state->lock);
+
+ rc = efx_filter_search(table, spec, key, false, &depth);
+ if (rc < 0)
+ goto out;
+ filter_idx = rc;
+ saved_spec = &table->spec[filter_idx];
+
+ if (spec->priority < saved_spec->priority) {
+ rc = -EPERM;
+ goto out;
+ }
+
+ efx_filter_table_clear_entry(efx, table, filter_idx);
+ if (table->used == 0)
+ efx_filter_table_reset_search_depth(table);
+ rc = 0;
+
+out:
+ spin_unlock_bh(&state->lock);
+ return rc;
+}
+
+static void efx_filter_table_clear(struct efx_nic *efx,
+ enum efx_filter_table_id table_id,
+ enum efx_filter_priority priority)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ struct efx_filter_table *table = &state->table[table_id];
+ int filter_idx;
+
+ spin_lock_bh(&state->lock);
+
+ for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
+ if (table->spec[filter_idx].priority <= priority)
+ efx_filter_table_clear_entry(efx, table, filter_idx);
+ if (table->used == 0)
+ efx_filter_table_reset_search_depth(table);
+
+ spin_unlock_bh(&state->lock);
+}
+
+/**
+ * efx_filter_clear_rx - remove RX filters by priority
+ * @efx: NIC from which to remove the filters
+ * @priority: Maximum priority to remove
+ */
+void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
+{
+ efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority);
+ efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
+}
+
+/* Restore filter stater after reset */
+void efx_restore_filters(struct efx_nic *efx)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ enum efx_filter_table_id table_id;
+ struct efx_filter_table *table;
+ efx_oword_t filter;
+ int filter_idx;
+
+ spin_lock_bh(&state->lock);
+
+ for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
+ table = &state->table[table_id];
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (!test_bit(filter_idx, table->used_bitmap))
+ continue;
+ efx_filter_build(&filter, &table->spec[filter_idx]);
+ efx_writeo(efx, &filter,
+ table->offset + table->step * filter_idx);
+ }
+ }
+
+ efx_filter_push_rx_limits(efx);
+
+ spin_unlock_bh(&state->lock);
+}
+
+int efx_probe_filters(struct efx_nic *efx)
+{
+ struct efx_filter_state *state;
+ struct efx_filter_table *table;
+ unsigned table_id;
+
+ state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+ efx->filter_state = state;
+
+ spin_lock_init(&state->lock);
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+#ifdef CONFIG_RFS_ACCEL
+ state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
+ sizeof(*state->rps_flow_id),
+ GFP_KERNEL);
+ if (!state->rps_flow_id)
+ goto fail;
+#endif
+ table = &state->table[EFX_FILTER_TABLE_RX_IP];
+ table->id = EFX_FILTER_TABLE_RX_IP;
+ table->offset = FR_BZ_RX_FILTER_TBL0;
+ table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
+ table->step = FR_BZ_RX_FILTER_TBL0_STEP;
+ }
+
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
+ table = &state->table[EFX_FILTER_TABLE_RX_MAC];
+ table->id = EFX_FILTER_TABLE_RX_MAC;
+ table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
+ table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
+ table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
+ }
+
+ for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
+ table = &state->table[table_id];
+ if (table->size == 0)
+ continue;
+ table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!table->used_bitmap)
+ goto fail;
+ table->spec = vzalloc(table->size * sizeof(*table->spec));
+ if (!table->spec)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ efx_remove_filters(efx);
+ return -ENOMEM;
+}
+
+void efx_remove_filters(struct efx_nic *efx)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ enum efx_filter_table_id table_id;
+
+ for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
+ kfree(state->table[table_id].used_bitmap);
+ vfree(state->table[table_id].spec);
+ }
+#ifdef CONFIG_RFS_ACCEL
+ kfree(state->rps_flow_id);
+#endif
+ kfree(state);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_channel *channel;
+ struct efx_filter_state *state = efx->filter_state;
+ struct efx_filter_spec spec;
+ const struct iphdr *ip;
+ const __be16 *ports;
+ int nhoff;
+ int rc;
+
+ nhoff = skb_network_offset(skb);
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return -EPROTONOSUPPORT;
+
+ /* RFS must validate the IP header length before calling us */
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
+ ip = (const struct iphdr *)(skb->data + nhoff);
+ if (ip_is_fragment(ip))
+ return -EPROTONOSUPPORT;
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
+ ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
+ rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
+ ip->daddr, ports[1], ip->saddr, ports[0]);
+ if (rc)
+ return rc;
+
+ rc = efx_filter_insert_filter(efx, &spec, true);
+ if (rc < 0)
+ return rc;
+
+ /* Remember this so we can check whether to expire the filter later */
+ state->rps_flow_id[rc] = flow_id;
+ channel = efx_get_channel(efx, skb_get_rx_queue(skb));
+ ++channel->rfs_filters_added;
+
+ netif_info(efx, rx_status, efx->net_dev,
+ "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+ (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
+ &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
+ rxq_index, flow_id, rc);
+
+ return rc;
+}
+
+bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
+ unsigned mask = table->size - 1;
+ unsigned index;
+ unsigned stop;
+
+ if (!spin_trylock_bh(&state->lock))
+ return false;
+
+ index = state->rps_expire_index;
+ stop = (index + quota) & mask;
+
+ while (index != stop) {
+ if (test_bit(index, table->used_bitmap) &&
+ table->spec[index].priority == EFX_FILTER_PRI_HINT &&
+ rps_may_expire_flow(efx->net_dev,
+ table->spec[index].dmaq_id,
+ state->rps_flow_id[index], index)) {
+ netif_info(efx, rx_status, efx->net_dev,
+ "expiring filter %d [flow %u]\n",
+ index, state->rps_flow_id[index]);
+ efx_filter_table_clear_entry(efx, table, index);
+ }
+ index = (index + 1) & mask;
+ }
+
+ state->rps_expire_index = stop;
+ if (table->used == 0)
+ efx_filter_table_reset_search_depth(table);
+
+ spin_unlock_bh(&state->lock);
+ return true;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
new file mode 100644
index 000000000000..872f2132a496
--- /dev/null
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -0,0 +1,112 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_FILTER_H
+#define EFX_FILTER_H
+
+#include <linux/types.h>
+
+/**
+ * enum efx_filter_type - type of hardware filter
+ * @EFX_FILTER_TCP_FULL: Matching TCP/IPv4 4-tuple
+ * @EFX_FILTER_TCP_WILD: Matching TCP/IPv4 destination (host, port)
+ * @EFX_FILTER_UDP_FULL: Matching UDP/IPv4 4-tuple
+ * @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port)
+ * @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID
+ * @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address
+ * @EFX_FILTER_UNSPEC: Match type is unspecified
+ *
+ * Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types.
+ */
+enum efx_filter_type {
+ EFX_FILTER_TCP_FULL = 0,
+ EFX_FILTER_TCP_WILD,
+ EFX_FILTER_UDP_FULL,
+ EFX_FILTER_UDP_WILD,
+ EFX_FILTER_MAC_FULL = 4,
+ EFX_FILTER_MAC_WILD,
+ EFX_FILTER_TYPE_COUNT, /* number of specific types */
+ EFX_FILTER_UNSPEC = 0xf,
+};
+
+/**
+ * enum efx_filter_priority - priority of a hardware filter specification
+ * @EFX_FILTER_PRI_HINT: Performance hint
+ * @EFX_FILTER_PRI_MANUAL: Manually configured filter
+ * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour
+ */
+enum efx_filter_priority {
+ EFX_FILTER_PRI_HINT = 0,
+ EFX_FILTER_PRI_MANUAL,
+ EFX_FILTER_PRI_REQUIRED,
+};
+
+/**
+ * enum efx_filter_flags - flags for hardware filter specifications
+ * @EFX_FILTER_FLAG_RX_RSS: Use RSS to spread across multiple queues.
+ * By default, matching packets will be delivered only to the
+ * specified queue. If this flag is set, they will be delivered
+ * to a range of queues offset from the specified queue number
+ * according to the indirection table.
+ * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
+ * queue.
+ * @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override
+ * any IP filter that matches the same packet. By default, IP
+ * filters take precedence.
+ * @EFX_FILTER_FLAG_RX: Filter is for RX
+ */
+enum efx_filter_flags {
+ EFX_FILTER_FLAG_RX_RSS = 0x01,
+ EFX_FILTER_FLAG_RX_SCATTER = 0x02,
+ EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04,
+ EFX_FILTER_FLAG_RX = 0x08,
+};
+
+/**
+ * struct efx_filter_spec - specification for a hardware filter
+ * @type: Type of match to be performed, from &enum efx_filter_type
+ * @priority: Priority of the filter, from &enum efx_filter_priority
+ * @flags: Miscellaneous flags, from &enum efx_filter_flags
+ * @dmaq_id: Source/target queue index
+ * @data: Match data (type-dependent)
+ *
+ * Use the efx_filter_set_*() functions to initialise the @type and
+ * @data fields.
+ */
+struct efx_filter_spec {
+ u8 type:4;
+ u8 priority:4;
+ u8 flags;
+ u16 dmaq_id;
+ u32 data[3];
+};
+
+static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
+ enum efx_filter_priority priority,
+ enum efx_filter_flags flags,
+ unsigned rxq_id)
+{
+ spec->type = EFX_FILTER_UNSPEC;
+ spec->priority = priority;
+ spec->flags = EFX_FILTER_FLAG_RX | flags;
+ spec->dmaq_id = rxq_id;
+}
+
+extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
+ __be32 host, __be16 port);
+extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
+ __be32 host, __be16 port,
+ __be32 rhost, __be16 rport);
+extern int efx_filter_set_eth_local(struct efx_filter_spec *spec,
+ u16 vid, const u8 *addr);
+enum {
+ EFX_FILTER_VID_UNSPEC = 0xffff,
+};
+
+#endif /* EFX_FILTER_H */
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
new file mode 100644
index 000000000000..751d1ec112cc
--- /dev/null
+++ b/drivers/net/ethernet/sfc/io.h
@@ -0,0 +1,293 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_IO_H
+#define EFX_IO_H
+
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+/**************************************************************************
+ *
+ * NIC register I/O
+ *
+ **************************************************************************
+ *
+ * Notes on locking strategy:
+ *
+ * Most CSRs are 128-bit (oword) and therefore cannot be read or
+ * written atomically. Access from the host is buffered by the Bus
+ * Interface Unit (BIU). Whenever the host reads from the lowest
+ * address of such a register, or from the address of a different such
+ * register, the BIU latches the register's value. Subsequent reads
+ * from higher addresses of the same register will read the latched
+ * value. Whenever the host writes part of such a register, the BIU
+ * collects the written value and does not write to the underlying
+ * register until all 4 dwords have been written. A similar buffering
+ * scheme applies to host access to the NIC's 64-bit SRAM.
+ *
+ * Access to different CSRs and 64-bit SRAM words must be serialised,
+ * since interleaved access can result in lost writes or lost
+ * information from read-to-clear fields. We use efx_nic::biu_lock
+ * for this. (We could use separate locks for read and write, but
+ * this is not normally a performance bottleneck.)
+ *
+ * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
+ * 128-bit but are special-cased in the BIU to avoid the need for
+ * locking in the host:
+ *
+ * - They are write-only.
+ * - The semantics of writing to these registers are such that
+ * replacing the low 96 bits with zero does not affect functionality.
+ * - If the host writes to the last dword address of such a register
+ * (i.e. the high 32 bits) the underlying register will always be
+ * written. If the collector and the current write together do not
+ * provide values for all 128 bits of the register, the low 96 bits
+ * will be written as zero.
+ * - If the host writes to the address of any other part of such a
+ * register while the collector already holds values for some other
+ * register, the write is discarded and the collector maintains its
+ * current state.
+ */
+
+#if BITS_PER_LONG == 64
+#define EFX_USE_QWORD_IO 1
+#endif
+
+#ifdef EFX_USE_QWORD_IO
+static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
+ unsigned int reg)
+{
+ __raw_writeq((__force u64)value, efx->membase + reg);
+}
+static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg)
+{
+ return (__force __le64)__raw_readq(efx->membase + reg);
+}
+#endif
+
+static inline void _efx_writed(struct efx_nic *efx, __le32 value,
+ unsigned int reg)
+{
+ __raw_writel((__force u32)value, efx->membase + reg);
+}
+static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
+{
+ return (__force __le32)__raw_readl(efx->membase + reg);
+}
+
+/* Write a normal 128-bit CSR, locking as appropriate. */
+static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
+ unsigned int reg)
+{
+ unsigned long flags __attribute__ ((unused));
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing register %x with " EFX_OWORD_FMT "\n", reg,
+ EFX_OWORD_VAL(*value));
+
+ spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EFX_USE_QWORD_IO
+ _efx_writeq(efx, value->u64[0], reg + 0);
+ _efx_writeq(efx, value->u64[1], reg + 8);
+#else
+ _efx_writed(efx, value->u32[0], reg + 0);
+ _efx_writed(efx, value->u32[1], reg + 4);
+ _efx_writed(efx, value->u32[2], reg + 8);
+ _efx_writed(efx, value->u32[3], reg + 12);
+#endif
+ mmiowb();
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+}
+
+/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
+static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
+ efx_qword_t *value, unsigned int index)
+{
+ unsigned int addr = index * sizeof(*value);
+ unsigned long flags __attribute__ ((unused));
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing SRAM address %x with " EFX_QWORD_FMT "\n",
+ addr, EFX_QWORD_VAL(*value));
+
+ spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EFX_USE_QWORD_IO
+ __raw_writeq((__force u64)value->u64[0], membase + addr);
+#else
+ __raw_writel((__force u32)value->u32[0], membase + addr);
+ __raw_writel((__force u32)value->u32[1], membase + addr + 4);
+#endif
+ mmiowb();
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+}
+
+/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
+static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
+ unsigned int reg)
+{
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing register %x with "EFX_DWORD_FMT"\n",
+ reg, EFX_DWORD_VAL(*value));
+
+ /* No lock required */
+ _efx_writed(efx, value->u32[0], reg);
+}
+
+/* Read a 128-bit CSR, locking as appropriate. */
+static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
+ unsigned int reg)
+{
+ unsigned long flags __attribute__ ((unused));
+
+ spin_lock_irqsave(&efx->biu_lock, flags);
+ value->u32[0] = _efx_readd(efx, reg + 0);
+ value->u32[1] = _efx_readd(efx, reg + 4);
+ value->u32[2] = _efx_readd(efx, reg + 8);
+ value->u32[3] = _efx_readd(efx, reg + 12);
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from register %x, got " EFX_OWORD_FMT "\n", reg,
+ EFX_OWORD_VAL(*value));
+}
+
+/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */
+static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
+ efx_qword_t *value, unsigned int index)
+{
+ unsigned int addr = index * sizeof(*value);
+ unsigned long flags __attribute__ ((unused));
+
+ spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EFX_USE_QWORD_IO
+ value->u64[0] = (__force __le64)__raw_readq(membase + addr);
+#else
+ value->u32[0] = (__force __le32)__raw_readl(membase + addr);
+ value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
+#endif
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from SRAM address %x, got "EFX_QWORD_FMT"\n",
+ addr, EFX_QWORD_VAL(*value));
+}
+
+/* Read a 32-bit CSR or SRAM */
+static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
+ unsigned int reg)
+{
+ value->u32[0] = _efx_readd(efx, reg);
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from register %x, got "EFX_DWORD_FMT"\n",
+ reg, EFX_DWORD_VAL(*value));
+}
+
+/* Write a 128-bit CSR forming part of a table */
+static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value,
+ unsigned int reg, unsigned int index)
+{
+ efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
+}
+
+/* Read a 128-bit CSR forming part of a table */
+static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
+ unsigned int reg, unsigned int index)
+{
+ efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
+}
+
+/* Write a 32-bit CSR forming part of a table, or 32-bit SRAM */
+static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
+ unsigned int reg, unsigned int index)
+{
+ efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
+}
+
+/* Read a 32-bit CSR forming part of a table, or 32-bit SRAM */
+static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
+ unsigned int reg, unsigned int index)
+{
+ efx_readd(efx, value, reg + index * sizeof(efx_dword_t));
+}
+
+/* Page-mapped register block size */
+#define EFX_PAGE_BLOCK_SIZE 0x2000
+
+/* Calculate offset to page-mapped register block */
+#define EFX_PAGED_REG(page, reg) \
+ ((page) * EFX_PAGE_BLOCK_SIZE + (reg))
+
+/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
+static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
+ unsigned int reg, unsigned int page)
+{
+ reg = EFX_PAGED_REG(page, reg);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing register %x with " EFX_OWORD_FMT "\n", reg,
+ EFX_OWORD_VAL(*value));
+
+#ifdef EFX_USE_QWORD_IO
+ _efx_writeq(efx, value->u64[0], reg + 0);
+ _efx_writeq(efx, value->u64[1], reg + 8);
+#else
+ _efx_writed(efx, value->u32[0], reg + 0);
+ _efx_writed(efx, value->u32[1], reg + 4);
+ _efx_writed(efx, value->u32[2], reg + 8);
+ _efx_writed(efx, value->u32[3], reg + 12);
+#endif
+}
+#define efx_writeo_page(efx, value, reg, page) \
+ _efx_writeo_page(efx, value, \
+ reg + \
+ BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
+ page)
+
+/* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of
+ * RX_DESC_UPD or TX_DESC_UPD)
+ */
+static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
+ unsigned int reg, unsigned int page)
+{
+ efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+}
+#define efx_writed_page(efx, value, reg, page) \
+ _efx_writed_page(efx, value, \
+ reg + \
+ BUILD_BUG_ON_ZERO((reg) != 0x400 && (reg) != 0x83c \
+ && (reg) != 0xa1c), \
+ page)
+
+/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
+ * in the BIU means that writes to TIMER_COMMAND[0] invalidate the
+ * collector register.
+ */
+static inline void _efx_writed_page_locked(struct efx_nic *efx,
+ efx_dword_t *value,
+ unsigned int reg,
+ unsigned int page)
+{
+ unsigned long flags __attribute__ ((unused));
+
+ if (page == 0) {
+ spin_lock_irqsave(&efx->biu_lock, flags);
+ efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+ } else {
+ efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+ }
+}
+#define efx_writed_page_locked(efx, value, reg, page) \
+ _efx_writed_page_locked(efx, value, \
+ reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
+ page)
+
+#endif /* EFX_IO_H */
diff --git a/drivers/net/ethernet/sfc/mac.h b/drivers/net/ethernet/sfc/mac.h
new file mode 100644
index 000000000000..d6a255d0856b
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mac.h
@@ -0,0 +1,21 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2009 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_MAC_H
+#define EFX_MAC_H
+
+#include "net_driver.h"
+
+extern const struct efx_mac_operations falcon_xmac_operations;
+extern const struct efx_mac_operations efx_mcdi_mac_operations;
+extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
+ u32 dma_len, int enable, int clear);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
new file mode 100644
index 000000000000..81a425397468
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -0,0 +1,1191 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2008-2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/delay.h>
+#include "net_driver.h"
+#include "nic.h"
+#include "io.h"
+#include "regs.h"
+#include "mcdi_pcol.h"
+#include "phy.h"
+
+/**************************************************************************
+ *
+ * Management-Controller-to-Driver Interface
+ *
+ **************************************************************************
+ */
+
+/* Software-defined structure to the shared-memory */
+#define CMD_NOTIFY_PORT0 0
+#define CMD_NOTIFY_PORT1 4
+#define CMD_PDU_PORT0 0x008
+#define CMD_PDU_PORT1 0x108
+#define REBOOT_FLAG_PORT0 0x3f8
+#define REBOOT_FLAG_PORT1 0x3fc
+
+#define MCDI_RPC_TIMEOUT 10 /*seconds */
+
+#define MCDI_PDU(efx) \
+ (efx_port_num(efx) ? CMD_PDU_PORT1 : CMD_PDU_PORT0)
+#define MCDI_DOORBELL(efx) \
+ (efx_port_num(efx) ? CMD_NOTIFY_PORT1 : CMD_NOTIFY_PORT0)
+#define MCDI_REBOOT_FLAG(efx) \
+ (efx_port_num(efx) ? REBOOT_FLAG_PORT1 : REBOOT_FLAG_PORT0)
+
+#define SEQ_MASK \
+ EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
+
+static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
+{
+ struct siena_nic_data *nic_data;
+ EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
+ nic_data = efx->nic_data;
+ return &nic_data->mcdi;
+}
+
+void efx_mcdi_init(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi;
+
+ if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
+ return;
+
+ mcdi = efx_mcdi(efx);
+ init_waitqueue_head(&mcdi->wq);
+ spin_lock_init(&mcdi->iface_lock);
+ atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
+ mcdi->mode = MCDI_MODE_POLL;
+
+ (void) efx_mcdi_poll_reboot(efx);
+}
+
+static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
+ const u8 *inbuf, size_t inlen)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
+ unsigned int i;
+ efx_dword_t hdr;
+ u32 xflags, seqno;
+
+ BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
+ BUG_ON(inlen & 3 || inlen >= 0x100);
+
+ seqno = mcdi->seqno & SEQ_MASK;
+ xflags = 0;
+ if (mcdi->mode == MCDI_MODE_EVENTS)
+ xflags |= MCDI_HEADER_XFLAGS_EVREQ;
+
+ EFX_POPULATE_DWORD_6(hdr,
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_CODE, cmd,
+ MCDI_HEADER_DATALEN, inlen,
+ MCDI_HEADER_SEQ, seqno,
+ MCDI_HEADER_XFLAGS, xflags);
+
+ efx_writed(efx, &hdr, pdu);
+
+ for (i = 0; i < inlen; i += 4)
+ _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
+
+ /* Ensure the payload is written out before the header */
+ wmb();
+
+ /* ring the doorbell with a distinctive value */
+ _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
+}
+
+static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ int i;
+
+ BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
+ BUG_ON(outlen & 3 || outlen >= 0x100);
+
+ for (i = 0; i < outlen; i += 4)
+ *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
+}
+
+static int efx_mcdi_poll(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ unsigned int time, finish;
+ unsigned int respseq, respcmd, error;
+ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ unsigned int rc, spins;
+ efx_dword_t reg;
+
+ /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
+ rc = -efx_mcdi_poll_reboot(efx);
+ if (rc)
+ goto out;
+
+ /* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
+ * because generally mcdi responses are fast. After that, back off
+ * and poll once a jiffy (approximately)
+ */
+ spins = TICK_USEC;
+ finish = get_seconds() + MCDI_RPC_TIMEOUT;
+
+ while (1) {
+ if (spins != 0) {
+ --spins;
+ udelay(1);
+ } else {
+ schedule_timeout_uninterruptible(1);
+ }
+
+ time = get_seconds();
+
+ rmb();
+ efx_readd(efx, &reg, pdu);
+
+ /* All 1's indicates that shared memory is in reset (and is
+ * not a valid header). Wait for it to come out reset before
+ * completing the command */
+ if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff &&
+ EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
+ break;
+
+ if (time >= finish)
+ return -ETIMEDOUT;
+ }
+
+ mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN);
+ respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ);
+ respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE);
+ error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR);
+
+ if (error && mcdi->resplen == 0) {
+ netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
+ rc = EIO;
+ } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
+ netif_err(efx, hw, efx->net_dev,
+ "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
+ respseq, mcdi->seqno);
+ rc = EIO;
+ } else if (error) {
+ efx_readd(efx, &reg, pdu + 4);
+ switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
+#define TRANSLATE_ERROR(name) \
+ case MC_CMD_ERR_ ## name: \
+ rc = name; \
+ break
+ TRANSLATE_ERROR(ENOENT);
+ TRANSLATE_ERROR(EINTR);
+ TRANSLATE_ERROR(EACCES);
+ TRANSLATE_ERROR(EBUSY);
+ TRANSLATE_ERROR(EINVAL);
+ TRANSLATE_ERROR(EDEADLK);
+ TRANSLATE_ERROR(ENOSYS);
+ TRANSLATE_ERROR(ETIME);
+#undef TRANSLATE_ERROR
+ default:
+ rc = EIO;
+ break;
+ }
+ } else
+ rc = 0;
+
+out:
+ mcdi->resprc = rc;
+ if (rc)
+ mcdi->resplen = 0;
+
+ /* Return rc=0 like wait_event_timeout() */
+ return 0;
+}
+
+/* Test and clear MC-rebooted flag for this port/function */
+int efx_mcdi_poll_reboot(struct efx_nic *efx)
+{
+ unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
+ efx_dword_t reg;
+ uint32_t value;
+
+ if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
+ return false;
+
+ efx_readd(efx, &reg, addr);
+ value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
+
+ if (value == 0)
+ return 0;
+
+ EFX_ZERO_DWORD(reg);
+ efx_writed(efx, &reg, addr);
+
+ if (value == MC_STATUS_DWORD_ASSERT)
+ return -EINTR;
+ else
+ return -EIO;
+}
+
+static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
+{
+ /* Wait until the interface becomes QUIESCENT and we win the race
+ * to mark it RUNNING. */
+ wait_event(mcdi->wq,
+ atomic_cmpxchg(&mcdi->state,
+ MCDI_STATE_QUIESCENT,
+ MCDI_STATE_RUNNING)
+ == MCDI_STATE_QUIESCENT);
+}
+
+static int efx_mcdi_await_completion(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ if (wait_event_timeout(
+ mcdi->wq,
+ atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
+ msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0)
+ return -ETIMEDOUT;
+
+ /* Check if efx_mcdi_set_mode() switched us back to polled completions.
+ * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
+ * completed the request first, then we'll just end up completing the
+ * request again, which is safe.
+ *
+ * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
+ * wait_event_timeout() implicitly provides.
+ */
+ if (mcdi->mode == MCDI_MODE_POLL)
+ return efx_mcdi_poll(efx);
+
+ return 0;
+}
+
+static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
+{
+ /* If the interface is RUNNING, then move to COMPLETED and wake any
+ * waiters. If the interface isn't in RUNNING then we've received a
+ * duplicate completion after we've already transitioned back to
+ * QUIESCENT. [A subsequent invocation would increment seqno, so would
+ * have failed the seqno check].
+ */
+ if (atomic_cmpxchg(&mcdi->state,
+ MCDI_STATE_RUNNING,
+ MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) {
+ wake_up(&mcdi->wq);
+ return true;
+ }
+
+ return false;
+}
+
+static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
+{
+ atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
+ wake_up(&mcdi->wq);
+}
+
+static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
+ unsigned int datalen, unsigned int errno)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ bool wake = false;
+
+ spin_lock(&mcdi->iface_lock);
+
+ if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
+ if (mcdi->credits)
+ /* The request has been cancelled */
+ --mcdi->credits;
+ else
+ netif_err(efx, hw, efx->net_dev,
+ "MC response mismatch tx seq 0x%x rx "
+ "seq 0x%x\n", seqno, mcdi->seqno);
+ } else {
+ mcdi->resprc = errno;
+ mcdi->resplen = datalen;
+
+ wake = true;
+ }
+
+ spin_unlock(&mcdi->iface_lock);
+
+ if (wake)
+ efx_mcdi_complete(mcdi);
+}
+
+/* Issue the given command by writing the data into the shared memory PDU,
+ * ring the doorbell and wait for completion. Copyout the result. */
+int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
+ const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen,
+ size_t *outlen_actual)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ int rc;
+ BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
+
+ efx_mcdi_acquire(mcdi);
+
+ /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
+ spin_lock_bh(&mcdi->iface_lock);
+ ++mcdi->seqno;
+ spin_unlock_bh(&mcdi->iface_lock);
+
+ efx_mcdi_copyin(efx, cmd, inbuf, inlen);
+
+ if (mcdi->mode == MCDI_MODE_POLL)
+ rc = efx_mcdi_poll(efx);
+ else
+ rc = efx_mcdi_await_completion(efx);
+
+ if (rc != 0) {
+ /* Close the race with efx_mcdi_ev_cpl() executing just too late
+ * and completing a request we've just cancelled, by ensuring
+ * that the seqno check therein fails.
+ */
+ spin_lock_bh(&mcdi->iface_lock);
+ ++mcdi->seqno;
+ ++mcdi->credits;
+ spin_unlock_bh(&mcdi->iface_lock);
+
+ netif_err(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d mode %d timed out\n",
+ cmd, (int)inlen, mcdi->mode);
+ } else {
+ size_t resplen;
+
+ /* At the very least we need a memory barrier here to ensure
+ * we pick up changes from efx_mcdi_ev_cpl(). Protect against
+ * a spurious efx_mcdi_ev_cpl() running concurrently by
+ * acquiring the iface_lock. */
+ spin_lock_bh(&mcdi->iface_lock);
+ rc = -mcdi->resprc;
+ resplen = mcdi->resplen;
+ spin_unlock_bh(&mcdi->iface_lock);
+
+ if (rc == 0) {
+ efx_mcdi_copyout(efx, outbuf,
+ min(outlen, mcdi->resplen + 3) & ~0x3);
+ if (outlen_actual != NULL)
+ *outlen_actual = resplen;
+ } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
+ ; /* Don't reset if MC_CMD_REBOOT returns EIO */
+ else if (rc == -EIO || rc == -EINTR) {
+ netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
+ -rc);
+ efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+ } else
+ netif_dbg(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d failed rc=%d\n",
+ cmd, (int)inlen, -rc);
+ }
+
+ efx_mcdi_release(mcdi);
+ return rc;
+}
+
+void efx_mcdi_mode_poll(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi;
+
+ if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
+ return;
+
+ mcdi = efx_mcdi(efx);
+ if (mcdi->mode == MCDI_MODE_POLL)
+ return;
+
+ /* We can switch from event completion to polled completion, because
+ * mcdi requests are always completed in shared memory. We do this by
+ * switching the mode to POLL'd then completing the request.
+ * efx_mcdi_await_completion() will then call efx_mcdi_poll().
+ *
+ * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
+ * which efx_mcdi_complete() provides for us.
+ */
+ mcdi->mode = MCDI_MODE_POLL;
+
+ efx_mcdi_complete(mcdi);
+}
+
+void efx_mcdi_mode_event(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi;
+
+ if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
+ return;
+
+ mcdi = efx_mcdi(efx);
+
+ if (mcdi->mode == MCDI_MODE_EVENTS)
+ return;
+
+ /* We can't switch from polled to event completion in the middle of a
+ * request, because the completion method is specified in the request.
+ * So acquire the interface to serialise the requestors. We don't need
+ * to acquire the iface_lock to change the mode here, but we do need a
+ * write memory barrier ensure that efx_mcdi_rpc() sees it, which
+ * efx_mcdi_acquire() provides.
+ */
+ efx_mcdi_acquire(mcdi);
+ mcdi->mode = MCDI_MODE_EVENTS;
+ efx_mcdi_release(mcdi);
+}
+
+static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ /* If there is an outstanding MCDI request, it has been terminated
+ * either by a BADASSERT or REBOOT event. If the mcdi interface is
+ * in polled mode, then do nothing because the MC reboot handler will
+ * set the header correctly. However, if the mcdi interface is waiting
+ * for a CMDDONE event it won't receive it [and since all MCDI events
+ * are sent to the same queue, we can't be racing with
+ * efx_mcdi_ev_cpl()]
+ *
+ * There's a race here with efx_mcdi_rpc(), because we might receive
+ * a REBOOT event *before* the request has been copied out. In polled
+ * mode (during startup) this is irrelevant, because efx_mcdi_complete()
+ * is ignored. In event mode, this condition is just an edge-case of
+ * receiving a REBOOT event after posting the MCDI request. Did the mc
+ * reboot before or after the copyout? The best we can do always is
+ * just return failure.
+ */
+ spin_lock(&mcdi->iface_lock);
+ if (efx_mcdi_complete(mcdi)) {
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ mcdi->resprc = rc;
+ mcdi->resplen = 0;
+ ++mcdi->credits;
+ }
+ } else
+ /* Nobody was waiting for an MCDI request, so trigger a reset */
+ efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+
+ spin_unlock(&mcdi->iface_lock);
+}
+
+static unsigned int efx_mcdi_event_link_speed[] = {
+ [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
+ [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
+};
+
+
+static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
+{
+ u32 flags, fcntl, speed, lpa;
+
+ speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
+ EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
+ speed = efx_mcdi_event_link_speed[speed];
+
+ flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
+ fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
+ lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
+
+ /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
+ * which is only run after flushing the event queues. Therefore, it
+ * is safe to modify the link state outside of the mac_lock here.
+ */
+ efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
+
+ efx_mcdi_phy_check_fcntl(efx, lpa);
+
+ efx_link_status_changed(efx);
+}
+
+static const char *sensor_names[] = {
+ [MC_CMD_SENSOR_CONTROLLER_TEMP] = "Controller temp. sensor",
+ [MC_CMD_SENSOR_PHY_COMMON_TEMP] = "PHY shared temp. sensor",
+ [MC_CMD_SENSOR_CONTROLLER_COOLING] = "Controller cooling",
+ [MC_CMD_SENSOR_PHY0_TEMP] = "PHY 0 temp. sensor",
+ [MC_CMD_SENSOR_PHY0_COOLING] = "PHY 0 cooling",
+ [MC_CMD_SENSOR_PHY1_TEMP] = "PHY 1 temp. sensor",
+ [MC_CMD_SENSOR_PHY1_COOLING] = "PHY 1 cooling",
+ [MC_CMD_SENSOR_IN_1V0] = "1.0V supply sensor",
+ [MC_CMD_SENSOR_IN_1V2] = "1.2V supply sensor",
+ [MC_CMD_SENSOR_IN_1V8] = "1.8V supply sensor",
+ [MC_CMD_SENSOR_IN_2V5] = "2.5V supply sensor",
+ [MC_CMD_SENSOR_IN_3V3] = "3.3V supply sensor",
+ [MC_CMD_SENSOR_IN_12V0] = "12V supply sensor"
+};
+
+static const char *sensor_status_names[] = {
+ [MC_CMD_SENSOR_STATE_OK] = "OK",
+ [MC_CMD_SENSOR_STATE_WARNING] = "Warning",
+ [MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
+ [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
+};
+
+static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
+{
+ unsigned int monitor, state, value;
+ const char *name, *state_txt;
+ monitor = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
+ state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
+ value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE);
+ /* Deal gracefully with the board having more drivers than we
+ * know about, but do not expect new sensor states. */
+ name = (monitor >= ARRAY_SIZE(sensor_names))
+ ? "No sensor name available" :
+ sensor_names[monitor];
+ EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
+ state_txt = sensor_status_names[state];
+
+ netif_err(efx, hw, efx->net_dev,
+ "Sensor %d (%s) reports condition '%s' for raw value %d\n",
+ monitor, name, state_txt, value);
+}
+
+/* Called from falcon_process_eventq for MCDI events */
+void efx_mcdi_process_event(struct efx_channel *channel,
+ efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
+ u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
+
+ switch (code) {
+ case MCDI_EVENT_CODE_BADSSERT:
+ netif_err(efx, hw, efx->net_dev,
+ "MC watchdog or assertion failure at 0x%x\n", data);
+ efx_mcdi_ev_death(efx, EINTR);
+ break;
+
+ case MCDI_EVENT_CODE_PMNOTICE:
+ netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
+ break;
+
+ case MCDI_EVENT_CODE_CMDDONE:
+ efx_mcdi_ev_cpl(efx,
+ MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
+ MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
+ MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
+ break;
+
+ case MCDI_EVENT_CODE_LINKCHANGE:
+ efx_mcdi_process_link_change(efx, event);
+ break;
+ case MCDI_EVENT_CODE_SENSOREVT:
+ efx_mcdi_sensor_event(efx, event);
+ break;
+ case MCDI_EVENT_CODE_SCHEDERR:
+ netif_info(efx, hw, efx->net_dev,
+ "MC Scheduler error address=0x%x\n", data);
+ break;
+ case MCDI_EVENT_CODE_REBOOT:
+ netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
+ efx_mcdi_ev_death(efx, EIO);
+ break;
+ case MCDI_EVENT_CODE_MAC_STATS_DMA:
+ /* MAC stats are gather lazily. We can ignore this. */
+ break;
+
+ default:
+ netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
+ code);
+ }
+}
+
+/**************************************************************************
+ *
+ * Specific request functions
+ *
+ **************************************************************************
+ */
+
+void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
+{
+ u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)];
+ size_t outlength;
+ const __le16 *ver_words;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
+ outbuf, sizeof(outbuf), &outlength);
+ if (rc)
+ goto fail;
+
+ if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
+ snprintf(buf, len, "%u.%u.%u.%u",
+ le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
+ le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
+ return;
+
+fail:
+ netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ buf[0] = 0;
+}
+
+int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
+ bool *was_attached)
+{
+ u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN];
+ u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN];
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
+ driver_operating ? 1 : 0);
+ MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+ if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ if (was_attached != NULL)
+ *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
+ return 0;
+
+fail:
+ netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+ u16 *fw_subtype_list)
+{
+ uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN];
+ size_t outlen;
+ int port_num = efx_port_num(efx);
+ int offset;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ offset = (port_num)
+ ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST
+ : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
+ if (mac_address)
+ memcpy(mac_address, outbuf + offset, ETH_ALEN);
+ if (fw_subtype_list)
+ memcpy(fw_subtype_list,
+ outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST,
+ MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN);
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
+ __func__, rc, (int)outlen);
+
+ return rc;
+}
+
+int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
+{
+ u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN];
+ u32 dest = 0;
+ int rc;
+
+ if (uart)
+ dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
+ if (evq)
+ dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
+
+ MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
+ MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
+
+ BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
+{
+ u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN];
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+ if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
+ size_t *size_out, size_t *erase_size_out,
+ bool *protected_out)
+{
+ u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN];
+ u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN];
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+ if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
+ *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
+ *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
+ (1 << MC_CMD_NVRAM_PROTECTED_LBN));
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
+{
+ u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN];
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
+ loff_t offset, u8 *buffer, size_t length)
+{
+ u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN];
+ u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+ loff_t offset, const u8 *buffer, size_t length)
+{
+ u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
+ memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
+ ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
+ loff_t offset, size_t length)
+{
+ u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN];
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
+{
+ u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN];
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
+{
+ u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN];
+ u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN];
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return rc;
+
+ switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
+ case MC_CMD_NVRAM_TEST_PASS:
+ case MC_CMD_NVRAM_TEST_NOTSUPP:
+ return 0;
+ default:
+ return -EIO;
+ }
+}
+
+int efx_mcdi_nvram_test_all(struct efx_nic *efx)
+{
+ u32 nvram_types;
+ unsigned int type;
+ int rc;
+
+ rc = efx_mcdi_nvram_types(efx, &nvram_types);
+ if (rc)
+ goto fail1;
+
+ type = 0;
+ while (nvram_types != 0) {
+ if (nvram_types & 1) {
+ rc = efx_mcdi_nvram_test(efx, type);
+ if (rc)
+ goto fail2;
+ }
+ type++;
+ nvram_types >>= 1;
+ }
+
+ return 0;
+
+fail2:
+ netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
+ __func__, type);
+fail1:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_read_assertion(struct efx_nic *efx)
+{
+ u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN];
+ u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN];
+ unsigned int flags, index, ofst;
+ const char *reason;
+ size_t outlen;
+ int retry;
+ int rc;
+
+ /* Attempt to read any stored assertion state before we reboot
+ * the mcfw out of the assertion handler. Retry twice, once
+ * because a boot-time assertion might cause this command to fail
+ * with EINTR. And once again because GET_ASSERTS can race with
+ * MC_CMD_REBOOT running on the other port. */
+ retry = 2;
+ do {
+ MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
+ inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
+ outbuf, sizeof(outbuf), &outlen);
+ } while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
+
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
+ return -EIO;
+
+ /* Print out any recorded assertion state */
+ flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
+ if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
+ return 0;
+
+ reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
+ ? "system-level assertion"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
+ ? "thread-level assertion"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
+ ? "watchdog reset"
+ : "unknown assertion";
+ netif_err(efx, hw, efx->net_dev,
+ "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
+ MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
+ MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
+
+ /* Print out the registers */
+ ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
+ for (index = 1; index < 32; index++) {
+ netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index,
+ MCDI_DWORD2(outbuf, ofst));
+ ofst += sizeof(efx_dword_t);
+ }
+
+ return 0;
+}
+
+static void efx_mcdi_exit_assertion(struct efx_nic *efx)
+{
+ u8 inbuf[MC_CMD_REBOOT_IN_LEN];
+
+ /* Atomically reboot the mcfw out of the assertion handler */
+ BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
+ MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
+ MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
+ efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
+ NULL, 0, NULL);
+}
+
+int efx_mcdi_handle_assertion(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_mcdi_read_assertion(efx);
+ if (rc)
+ return rc;
+
+ efx_mcdi_exit_assertion(efx);
+
+ return 0;
+}
+
+void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+{
+ u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN];
+ int rc;
+
+ BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
+ BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
+ BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
+
+ BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
+
+ MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
+}
+
+int efx_mcdi_reset_port(struct efx_nic *efx)
+{
+ int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL);
+ if (rc)
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_reset_mc(struct efx_nic *efx)
+{
+ u8 inbuf[MC_CMD_REBOOT_IN_LEN];
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
+ MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ /* White is black, and up is down */
+ if (rc == -EIO)
+ return 0;
+ if (rc == 0)
+ rc = -EIO;
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
+ const u8 *mac, int *id_out)
+{
+ u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN];
+ u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN];
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
+ MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
+ MC_CMD_FILTER_MODE_SIMPLE);
+ memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
+
+ return 0;
+
+fail:
+ *id_out = -1;
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+
+}
+
+
+int
+efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
+{
+ return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
+}
+
+
+int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
+{
+ u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN];
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
+
+ return 0;
+
+fail:
+ *id_out = -1;
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+
+int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
+{
+ u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN];
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+
+int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
new file mode 100644
index 000000000000..aced2a7856fc
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -0,0 +1,130 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2008-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_MCDI_H
+#define EFX_MCDI_H
+
+/**
+ * enum efx_mcdi_state
+ * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the
+ * mcdi_lock then they are able to move to MCDI_STATE_RUNNING
+ * @MCDI_STATE_RUNNING: There is an MCDI request pending. Only the thread that
+ * moved into this state is allowed to move out of it.
+ * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
+ * has not yet consumed the result. For all other threads, equivalent to
+ * MCDI_STATE_RUNNING.
+ */
+enum efx_mcdi_state {
+ MCDI_STATE_QUIESCENT,
+ MCDI_STATE_RUNNING,
+ MCDI_STATE_COMPLETED,
+};
+
+enum efx_mcdi_mode {
+ MCDI_MODE_POLL,
+ MCDI_MODE_EVENTS,
+};
+
+/**
+ * struct efx_mcdi_iface
+ * @state: Interface state. Waited for by mcdi_wq.
+ * @wq: Wait queue for threads waiting for state != STATE_RUNNING
+ * @iface_lock: Protects @credits, @seqno, @resprc, @resplen
+ * @mode: Poll for mcdi completion, or wait for an mcdi_event.
+ * Serialised by @lock
+ * @seqno: The next sequence number to use for mcdi requests.
+ * Serialised by @lock
+ * @credits: Number of spurious MCDI completion events allowed before we
+ * trigger a fatal error. Protected by @lock
+ * @resprc: Returned MCDI completion
+ * @resplen: Returned payload length
+ */
+struct efx_mcdi_iface {
+ atomic_t state;
+ wait_queue_head_t wq;
+ spinlock_t iface_lock;
+ enum efx_mcdi_mode mode;
+ unsigned int credits;
+ unsigned int seqno;
+ unsigned int resprc;
+ size_t resplen;
+};
+
+extern void efx_mcdi_init(struct efx_nic *efx);
+
+extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
+ size_t inlen, u8 *outbuf, size_t outlen,
+ size_t *outlen_actual);
+
+extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
+extern void efx_mcdi_mode_poll(struct efx_nic *efx);
+extern void efx_mcdi_mode_event(struct efx_nic *efx);
+
+extern void efx_mcdi_process_event(struct efx_channel *channel,
+ efx_qword_t *event);
+
+#define MCDI_PTR2(_buf, _ofst) \
+ (((u8 *)_buf) + _ofst)
+#define MCDI_SET_DWORD2(_buf, _ofst, _value) \
+ EFX_POPULATE_DWORD_1(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \
+ EFX_DWORD_0, _value)
+#define MCDI_DWORD2(_buf, _ofst) \
+ EFX_DWORD_FIELD(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \
+ EFX_DWORD_0)
+#define MCDI_QWORD2(_buf, _ofst) \
+ EFX_QWORD_FIELD64(*((efx_qword_t *)MCDI_PTR2(_buf, _ofst)), \
+ EFX_QWORD_0)
+
+#define MCDI_PTR(_buf, _ofst) \
+ MCDI_PTR2(_buf, MC_CMD_ ## _ofst ## _OFST)
+#define MCDI_SET_DWORD(_buf, _ofst, _value) \
+ MCDI_SET_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST, _value)
+#define MCDI_DWORD(_buf, _ofst) \
+ MCDI_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST)
+#define MCDI_QWORD(_buf, _ofst) \
+ MCDI_QWORD2(_buf, MC_CMD_ ## _ofst ## _OFST)
+
+#define MCDI_EVENT_FIELD(_ev, _field) \
+ EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
+
+extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
+extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
+ bool *was_attached_out);
+extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+ u16 *fw_subtype_list);
+extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
+ u32 dest_evq);
+extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
+extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
+ size_t *size_out, size_t *erase_size_out,
+ bool *protected_out);
+extern int efx_mcdi_nvram_update_start(struct efx_nic *efx,
+ unsigned int type);
+extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
+ loff_t offset, u8 *buffer, size_t length);
+extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+ loff_t offset, const u8 *buffer,
+ size_t length);
+#define EFX_MCDI_NVRAM_LEN_MAX 128
+extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
+ loff_t offset, size_t length);
+extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
+ unsigned int type);
+extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
+extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
+extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+extern int efx_mcdi_reset_port(struct efx_nic *efx);
+extern int efx_mcdi_reset_mc(struct efx_nic *efx);
+extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
+ const u8 *mac, int *id_out);
+extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
+extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
+extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
+
+#endif /* EFX_MCDI_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_mac.c b/drivers/net/ethernet/sfc/mcdi_mac.c
new file mode 100644
index 000000000000..50c20777a564
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_mac.c
@@ -0,0 +1,145 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2009-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "efx.h"
+#include "mac.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+
+static int efx_mcdi_set_mac(struct efx_nic *efx)
+{
+ u32 reject, fcntl;
+ u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN];
+
+ memcpy(cmdbytes + MC_CMD_SET_MAC_IN_ADDR_OFST,
+ efx->net_dev->dev_addr, ETH_ALEN);
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
+
+ /* The MCDI command provides for controlling accept/reject
+ * of broadcast packets too, but the driver doesn't currently
+ * expose this. */
+ reject = (efx->promiscuous) ? 0 :
+ (1 << MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN);
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_REJECT, reject);
+
+ switch (efx->wanted_fc) {
+ case EFX_FC_RX | EFX_FC_TX:
+ fcntl = MC_CMD_FCNTL_BIDIR;
+ break;
+ case EFX_FC_RX:
+ fcntl = MC_CMD_FCNTL_RESPOND;
+ break;
+ default:
+ fcntl = MC_CMD_FCNTL_OFF;
+ break;
+ }
+ if (efx->wanted_fc & EFX_FC_AUTO)
+ fcntl = MC_CMD_FCNTL_AUTO;
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
+
+ return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
+ NULL, 0, NULL);
+}
+
+static int efx_mcdi_get_mac_faults(struct efx_nic *efx, u32 *faults)
+{
+ u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+ size_t outlength;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), &outlength);
+ if (rc)
+ goto fail;
+
+ *faults = MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT);
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
+ u32 dma_len, int enable, int clear)
+{
+ u8 inbuf[MC_CMD_MAC_STATS_IN_LEN];
+ int rc;
+ efx_dword_t *cmd_ptr;
+ int period = enable ? 1000 : 0;
+ u32 addr_hi;
+ u32 addr_lo;
+
+ BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_LEN != 0);
+
+ addr_lo = ((u64)dma_addr) >> 0;
+ addr_hi = ((u64)dma_addr) >> 32;
+
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi);
+ cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
+ EFX_POPULATE_DWORD_7(*cmd_ptr,
+ MC_CMD_MAC_STATS_CMD_DMA, !!enable,
+ MC_CMD_MAC_STATS_CMD_CLEAR, clear,
+ MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
+ MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, !!enable,
+ MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0,
+ MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT, 1,
+ MC_CMD_MAC_STATS_CMD_PERIOD_MS, period);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
+ __func__, enable ? "enable" : "disable", rc);
+ return rc;
+}
+
+static int efx_mcdi_mac_reconfigure(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_mcdi_set_mac(efx);
+ if (rc != 0)
+ return rc;
+
+ /* Restore the multicast hash registers. */
+ efx->type->push_multicast_hash(efx);
+
+ return 0;
+}
+
+
+static bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
+{
+ u32 faults;
+ int rc = efx_mcdi_get_mac_faults(efx, &faults);
+ return (rc != 0) || (faults != 0);
+}
+
+
+const struct efx_mac_operations efx_mcdi_mac_operations = {
+ .reconfigure = efx_mcdi_mac_reconfigure,
+ .update_stats = efx_port_dummy_op_void,
+ .check_fault = efx_mcdi_mac_check_fault,
+};
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
new file mode 100644
index 000000000000..41fe06fa0600
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -0,0 +1,1775 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2009-2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+
+#ifndef MCDI_PCOL_H
+#define MCDI_PCOL_H
+
+/* Values to be written into FMCR_CZ_RESET_STATE_REG to control boot. */
+/* Power-on reset state */
+#define MC_FW_STATE_POR (1)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash. */
+#define MC_FW_WARM_BOOT_OK (2)
+/* The MC main image has started to boot. */
+#define MC_FW_STATE_BOOTING (4)
+/* The Scheduler has started. */
+#define MC_FW_STATE_SCHED (8)
+
+/* Values to be written to the per-port status dword in shared
+ * memory on reboot and assert */
+#define MC_STATUS_DWORD_REBOOT (0xb007b007)
+#define MC_STATUS_DWORD_ASSERT (0xdeaddead)
+
+/* The current version of the MCDI protocol.
+ *
+ * Note that the ROM burnt into the card only talks V0, so at the very
+ * least every driver must support version 0 and MCDI_PCOL_VERSION
+ */
+#define MCDI_PCOL_VERSION 1
+
+/**
+ * MCDI version 1
+ *
+ * Each MCDI request starts with an MCDI_HEADER, which is a 32byte
+ * structure, filled in by the client.
+ *
+ * 0 7 8 16 20 22 23 24 31
+ * | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS |
+ * | | |
+ * | | \--- Response
+ * | \------- Error
+ * \------------------------------ Resync (always set)
+ *
+ * The client writes it's request into MC shared memory, and rings the
+ * doorbell. Each request is completed by either by the MC writting
+ * back into shared memory, or by writting out an event.
+ *
+ * All MCDI commands support completion by shared memory response. Each
+ * request may also contain additional data (accounted for by HEADER.LEN),
+ * and some response's may also contain additional data (again, accounted
+ * for by HEADER.LEN).
+ *
+ * Some MCDI commands support completion by event, in which any associated
+ * response data is included in the event.
+ *
+ * The protocol requires one response to be delivered for every request, a
+ * request should not be sent unless the response for the previous request
+ * has been received (either by polling shared memory, or by receiving
+ * an event).
+ */
+
+/** Request/Response structure */
+#define MCDI_HEADER_OFST 0
+#define MCDI_HEADER_CODE_LBN 0
+#define MCDI_HEADER_CODE_WIDTH 7
+#define MCDI_HEADER_RESYNC_LBN 7
+#define MCDI_HEADER_RESYNC_WIDTH 1
+#define MCDI_HEADER_DATALEN_LBN 8
+#define MCDI_HEADER_DATALEN_WIDTH 8
+#define MCDI_HEADER_SEQ_LBN 16
+#define MCDI_HEADER_RSVD_LBN 20
+#define MCDI_HEADER_RSVD_WIDTH 2
+#define MCDI_HEADER_SEQ_WIDTH 4
+#define MCDI_HEADER_ERROR_LBN 22
+#define MCDI_HEADER_ERROR_WIDTH 1
+#define MCDI_HEADER_RESPONSE_LBN 23
+#define MCDI_HEADER_RESPONSE_WIDTH 1
+#define MCDI_HEADER_XFLAGS_LBN 24
+#define MCDI_HEADER_XFLAGS_WIDTH 8
+/* Request response using event */
+#define MCDI_HEADER_XFLAGS_EVREQ 0x01
+
+/* Maximum number of payload bytes */
+#define MCDI_CTL_SDU_LEN_MAX 0xfc
+
+/* The MC can generate events for two reasons:
+ * - To complete a shared memory request if XFLAGS_EVREQ was set
+ * - As a notification (link state, i2c event), controlled
+ * via MC_CMD_LOG_CTRL
+ *
+ * Both events share a common structure:
+ *
+ * 0 32 33 36 44 52 60
+ * | Data | Cont | Level | Src | Code | Rsvd |
+ * |
+ * \ There is another event pending in this notification
+ *
+ * If Code==CMDDONE, then the fields are further interpreted as:
+ *
+ * - LEVEL==INFO Command succeeded
+ * - LEVEL==ERR Command failed
+ *
+ * 0 8 16 24 32
+ * | Seq | Datalen | Errno | Rsvd |
+ *
+ * These fields are taken directly out of the standard MCDI header, i.e.,
+ * LEVEL==ERR, Datalen == 0 => Reboot
+ *
+ * Events can be squirted out of the UART (using LOG_CTRL) without a
+ * MCDI header. An event can be distinguished from a MCDI response by
+ * examining the first byte which is 0xc0. This corresponds to the
+ * non-existent MCDI command MC_CMD_DEBUG_LOG.
+ *
+ * 0 7 8
+ * | command | Resync | = 0xc0
+ *
+ * Since the event is written in big-endian byte order, this works
+ * providing bits 56-63 of the event are 0xc0.
+ *
+ * 56 60 63
+ * | Rsvd | Code | = 0xc0
+ *
+ * Which means for convenience the event code is 0xc for all MC
+ * generated events.
+ */
+#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+
+#define MCDI_EVENT_DATA_LBN 0
+#define MCDI_EVENT_DATA_WIDTH 32
+#define MCDI_EVENT_CONT_LBN 32
+#define MCDI_EVENT_CONT_WIDTH 1
+#define MCDI_EVENT_LEVEL_LBN 33
+#define MCDI_EVENT_LEVEL_WIDTH 3
+#define MCDI_EVENT_LEVEL_INFO (0)
+#define MCDI_EVENT_LEVEL_WARN (1)
+#define MCDI_EVENT_LEVEL_ERR (2)
+#define MCDI_EVENT_LEVEL_FATAL (3)
+#define MCDI_EVENT_SRC_LBN 36
+#define MCDI_EVENT_SRC_WIDTH 8
+#define MCDI_EVENT_CODE_LBN 44
+#define MCDI_EVENT_CODE_WIDTH 8
+#define MCDI_EVENT_CODE_BADSSERT (1)
+#define MCDI_EVENT_CODE_PMNOTICE (2)
+#define MCDI_EVENT_CODE_CMDDONE (3)
+#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
+#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
+#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
+#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8
+#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16
+#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8
+#define MCDI_EVENT_CODE_LINKCHANGE (4)
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+#define MCDI_EVENT_LINKCHANGE_SPEED_100M 1
+#define MCDI_EVENT_LINKCHANGE_SPEED_1G 2
+#define MCDI_EVENT_LINKCHANGE_SPEED_10G 3
+#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
+#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
+#define MCDI_EVENT_CODE_SENSOREVT (5)
+#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
+#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_STATE_LBN 8
+#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16
+#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16
+#define MCDI_EVENT_CODE_SCHEDERR (6)
+#define MCDI_EVENT_CODE_REBOOT (7)
+#define MCDI_EVENT_CODE_MAC_STATS_DMA (8)
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
+
+/* Non-existent command target */
+#define MC_CMD_ERR_ENOENT 2
+/* assert() has killed the MC */
+#define MC_CMD_ERR_EINTR 4
+/* Caller does not hold required locks */
+#define MC_CMD_ERR_EACCES 13
+/* Resource is currently unavailable (e.g. lock contention) */
+#define MC_CMD_ERR_EBUSY 16
+/* Invalid argument to target */
+#define MC_CMD_ERR_EINVAL 22
+/* Non-recursive resource is already acquired */
+#define MC_CMD_ERR_EDEADLK 35
+/* Operation not implemented */
+#define MC_CMD_ERR_ENOSYS 38
+/* Operation timed out */
+#define MC_CMD_ERR_ETIME 62
+
+#define MC_CMD_ERR_CODE_OFST 0
+
+
+/* MC_CMD_READ32: (debug, variadic out)
+ * Read multiple 32byte words from MC memory
+ */
+#define MC_CMD_READ32 0x01
+#define MC_CMD_READ32_IN_LEN 8
+#define MC_CMD_READ32_IN_ADDR_OFST 0
+#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
+#define MC_CMD_READ32_OUT_LEN(_numwords) \
+ (4 * (_numwords))
+#define MC_CMD_READ32_OUT_BUFFER_OFST 0
+
+/* MC_CMD_WRITE32: (debug, variadic in)
+ * Write multiple 32byte words to MC memory
+ */
+#define MC_CMD_WRITE32 0x02
+#define MC_CMD_WRITE32_IN_LEN(_numwords) (((_numwords) * 4) + 4)
+#define MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
+#define MC_CMD_WRITE32_OUT_LEN 0
+
+/* MC_CMD_COPYCODE: (debug)
+ * Copy MC code between two locations and jump
+ */
+#define MC_CMD_COPYCODE 0x03
+#define MC_CMD_COPYCODE_IN_LEN 16
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
+/* Control should return to the caller rather than jumping */
+#define MC_CMD_COPYCODE_JUMP_NONE 1
+#define MC_CMD_COPYCODE_OUT_LEN 0
+
+/* MC_CMD_SET_FUNC: (debug)
+ * Select function for function-specific commands.
+ */
+#define MC_CMD_SET_FUNC 0x04
+#define MC_CMD_SET_FUNC_IN_LEN 4
+#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+#define MC_CMD_SET_FUNC_OUT_LEN 0
+
+/* MC_CMD_GET_BOOT_STATUS:
+ * Get the instruction address from which the MC booted.
+ */
+#define MC_CMD_GET_BOOT_STATUS 0x05
+#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+/* Reboot caused by watchdog */
+#define MC_CMD_GET_BOOT_STATUS_FLAGS_WATCHDOG_LBN (0)
+#define MC_CMD_GET_BOOT_STATUS_FLAGS_WATCHDOG_WIDTH (1)
+/* MC booted from primary flash partition */
+#define MC_CMD_GET_BOOT_STATUS_FLAGS_PRIMARY_LBN (1)
+#define MC_CMD_GET_BOOT_STATUS_FLAGS_PRIMARY_WIDTH (1)
+/* MC booted from backup flash partition */
+#define MC_CMD_GET_BOOT_STATUS_FLAGS_BACKUP_LBN (2)
+#define MC_CMD_GET_BOOT_STATUS_FLAGS_BACKUP_WIDTH (1)
+
+/* MC_CMD_GET_ASSERTS: (debug, variadic out)
+ * Get (and optionally clear) the current assertion status.
+ *
+ * Only OUT.GLOBAL_FLAGS is guaranteed to exist in the completion
+ * payload. The other fields will only be present if
+ * OUT.GLOBAL_FLAGS != NO_FAILS
+ */
+#define MC_CMD_GET_ASSERTS 0x06
+#define MC_CMD_GET_ASSERTS_IN_LEN 4
+#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+#define MC_CMD_GET_ASSERTS_OUT_LEN 140
+/* Assertion status flag */
+#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+/*! No assertions have failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 1
+/*! A system-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 2
+/*! A thread-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 3
+/*! The system was reset by the watchdog. */
+#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 4
+/* Failing PC value */
+#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+/* Saved GP regs */
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_LEN 124
+/* Failing thread address */
+#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+
+/* MC_CMD_LOG_CTRL:
+ * Determine the output stream for various events and messages
+ */
+#define MC_CMD_LOG_CTRL 0x07
+#define MC_CMD_LOG_CTRL_IN_LEN 8
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART (1)
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ (2)
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+#define MC_CMD_LOG_CTRL_OUT_LEN 0
+
+/* MC_CMD_GET_VERSION:
+ * Get version information about the MC firmware
+ */
+#define MC_CMD_GET_VERSION 0x08
+#define MC_CMD_GET_VERSION_IN_LEN 0
+#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
+#define MC_CMD_GET_VERSION_V1_OUT_LEN 32
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+/* Reserved version number to indicate "any" version. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
+/* The version response of a boot ROM awaiting rescue */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_BOOTROM 0xb0070000
+#define MC_CMD_GET_VERSION_V1_OUT_PCOL_OFST 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_V1_OUT_SUPPORTED_FUNCS_OFST 8
+/* The command set exported by the boot ROM (MCDI v0) */
+#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
+ (1 << MC_CMD_READ32) | \
+ (1 << MC_CMD_WRITE32) | \
+ (1 << MC_CMD_COPYCODE) | \
+ (1 << MC_CMD_GET_VERSION), \
+ 0, 0, 0 }
+#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
+
+/* Vectors in the boot ROM */
+/* Point to the copycode entry point. */
+#define MC_BOOTROM_COPYCODE_VEC (0x7f4)
+/* Points to the recovery mode entry point. */
+#define MC_BOOTROM_NOFLASH_VEC (0x7f8)
+
+/* Test execution limits */
+#define MC_TESTEXEC_VARIANT_COUNT 16
+#define MC_TESTEXEC_RESULT_COUNT 7
+
+/* MC_CMD_SET_TESTVARS: (debug, variadic in)
+ * Write variant words for test.
+ *
+ * The user supplies a bitmap of the variants they wish to set.
+ * They must ensure that IN.LEN >= 4 + 4 * ffs(BITMAP)
+ */
+#define MC_CMD_SET_TESTVARS 0x09
+#define MC_CMD_SET_TESTVARS_IN_LEN(_numwords) \
+ (4 + 4*(_numwords))
+#define MC_CMD_SET_TESTVARS_IN_ARGS_BITMAP_OFST 0
+/* Up to MC_TESTEXEC_VARIANT_COUNT of 32byte words start here */
+#define MC_CMD_SET_TESTVARS_IN_ARGS_BUFFER_OFST 4
+#define MC_CMD_SET_TESTVARS_OUT_LEN 0
+
+/* MC_CMD_GET_TESTRCS: (debug, variadic out)
+ * Return result words from test.
+ */
+#define MC_CMD_GET_TESTRCS 0x0a
+#define MC_CMD_GET_TESTRCS_IN_LEN 4
+#define MC_CMD_GET_TESTRCS_IN_NUMWORDS_OFST 0
+#define MC_CMD_GET_TESTRCS_OUT_LEN(_numwords) \
+ (4 * (_numwords))
+#define MC_CMD_GET_TESTRCS_OUT_BUFFER_OFST 0
+
+/* MC_CMD_RUN_TEST: (debug)
+ * Run the test exported by this firmware image
+ */
+#define MC_CMD_RUN_TEST 0x0b
+#define MC_CMD_RUN_TEST_IN_LEN 0
+#define MC_CMD_RUN_TEST_OUT_LEN 0
+
+/* MC_CMD_CSR_READ32: (debug, variadic out)
+ * Read 32bit words from the indirect memory map
+ */
+#define MC_CMD_CSR_READ32 0x0c
+#define MC_CMD_CSR_READ32_IN_LEN 12
+#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+#define MC_CMD_CSR_READ32_OUT_LEN(_numwords) \
+ (((_numwords) * 4) + 4)
+/* IN.NUMWORDS of 32bit words start here */
+#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_CSR_READ32_OUT_IREG_STATUS_OFST(_numwords) \
+ ((_numwords) * 4)
+
+/* MC_CMD_CSR_WRITE32: (debug, variadic in)
+ * Write 32bit dwords to the indirect memory map
+ */
+#define MC_CMD_CSR_WRITE32 0x0d
+#define MC_CMD_CSR_WRITE32_IN_LEN(_numwords) \
+ (((_numwords) * 4) + 8)
+#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+/* Multiple 32bit words of data to write start here */
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
+#define MC_CMD_CSR_WRITE32_OUT_LEN 4
+#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+
+/* MC_CMD_JTAG_WORK: (debug, fpga only)
+ * Process JTAG work buffer for RBF acceleration.
+ *
+ * Host: bit count, (up to) 32 words of data to clock out to JTAG
+ * (bits 1,0=TMS,TDO for first bit; bits 3,2=TMS,TDO for second bit, etc.)
+ * MC: bit count, (up to) 32 words of data clocked in from JTAG
+ * (bit 0=TDI for first bit, bit 1=TDI for second bit, etc.; [31:16] unused)
+ */
+#define MC_CMD_JTAG_WORK 0x0e
+
+/* MC_CMD_STACKINFO: (debug, variadic out)
+ * Get stack information
+ *
+ * Host: nothing
+ * MC: (thread ptr, stack size, free space) for each thread in system
+ */
+#define MC_CMD_STACKINFO 0x0f
+
+/* MC_CMD_MDIO_READ:
+ * MDIO register read
+ */
+#define MC_CMD_MDIO_READ 0x10
+#define MC_CMD_MDIO_READ_IN_LEN 16
+#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
+#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_READ_OUT_LEN 8
+#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+
+/* MC_CMD_MDIO_WRITE:
+ * MDIO register write
+ */
+#define MC_CMD_MDIO_WRITE 0x11
+#define MC_CMD_MDIO_WRITE_IN_LEN 20
+#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
+#define MC_CMD_MDIO_WRITE_OUT_LEN 4
+#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+
+/* By default all the MCDI MDIO operations perform clause45 mode.
+ * If you want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+#define MC_CMD_MDIO_CLAUSE22 32
+
+/* There are two MDIO buses: one for the internal PHY, and one for external
+ * devices.
+ */
+#define MC_CMD_MDIO_BUS_INTERNAL 0
+#define MC_CMD_MDIO_BUS_EXTERNAL 1
+
+/* The MDIO commands return the raw status bits from the MDIO block. A "good"
+ * transaction should have the DONE bit set and all other bits clear.
+ */
+#define MC_CMD_MDIO_STATUS_GOOD 0x08
+
+
+/* MC_CMD_DBI_WRITE: (debug)
+ * Write DBI register(s)
+ *
+ * Host: address, byte-enables (and VF selection, and cs2 flag),
+ * value [,address ...]
+ * MC: nothing
+ */
+#define MC_CMD_DBI_WRITE 0x12
+#define MC_CMD_DBI_WRITE_IN_LEN(_numwords) \
+ (12 * (_numwords))
+#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(_word) \
+ (((_word) * 12) + 0)
+#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(_word) \
+ (((_word) * 12) + 4)
+#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(_word) \
+ (((_word) * 12) + 8)
+#define MC_CMD_DBI_WRITE_OUT_LEN 0
+
+/* MC_CMD_DBI_READ: (debug)
+ * Read DBI register(s)
+ *
+ * Host: address, [,address ...]
+ * MC: value [,value ...]
+ * (note: this does not support reading from VFs, but is retained for backwards
+ * compatibility; see MC_CMD_DBI_READX below)
+ */
+#define MC_CMD_DBI_READ 0x13
+#define MC_CMD_DBI_READ_IN_LEN(_numwords) \
+ (4 * (_numwords))
+#define MC_CMD_DBI_READ_OUT_LEN(_numwords) \
+ (4 * (_numwords))
+
+/* MC_CMD_PORT_READ32: (debug)
+ * Read a 32-bit register from the indirect port register map.
+ *
+ * The port to access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ32 0x14
+#define MC_CMD_PORT_READ32_IN_LEN 4
+#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_READ32_OUT_LEN 8
+#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+
+/* MC_CMD_PORT_WRITE32: (debug)
+ * Write a 32-bit register to the indirect port register map.
+ *
+ * The port to access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE32 0x15
+#define MC_CMD_PORT_WRITE32_IN_LEN 8
+#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE32_OUT_LEN 4
+#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+
+/* MC_CMD_PORT_READ128: (debug)
+ * Read a 128-bit register from indirect port register map
+ *
+ * The port to access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ128 0x16
+#define MC_CMD_PORT_READ128_IN_LEN 4
+#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_READ128_OUT_LEN 20
+#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+
+/* MC_CMD_PORT_WRITE128: (debug)
+ * Write a 128-bit register to indirect port register map.
+ *
+ * The port to access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE128 0x17
+#define MC_CMD_PORT_WRITE128_IN_LEN 20
+#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE128_OUT_LEN 4
+#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+
+/* MC_CMD_GET_BOARD_CFG:
+ * Returns the MC firmware configuration structure
+ *
+ * The FW_SUBTYPE_LIST contains a 16-bit value for each of the 12 types of
+ * NVRAM area. The values are defined in the firmware/mc/platform/<xxx>.c file
+ * for a specific board type, but otherwise have no meaning to the MC; they
+ * are used by the driver to manage selection of appropriate firmware updates.
+ */
+#define MC_CMD_GET_BOARD_CFG 0x18
+#define MC_CMD_GET_BOARD_CFG_IN_LEN 0
+#define MC_CMD_GET_BOARD_CFG_OUT_LEN 96
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 24
+
+/* MC_CMD_DBI_READX: (debug)
+ * Read DBI register(s) -- extended functionality
+ *
+ * Host: vf selection, address, [,vf selection ...]
+ * MC: value [,value ...]
+ */
+#define MC_CMD_DBI_READX 0x19
+#define MC_CMD_DBI_READX_IN_LEN(_numwords) \
+ (8*(_numwords))
+#define MC_CMD_DBI_READX_OUT_LEN(_numwords) \
+ (4*(_numwords))
+
+/* MC_CMD_SET_RAND_SEED:
+ * Set the 16byte seed for the MC pseudo-random generator
+ */
+#define MC_CMD_SET_RAND_SEED 0x1a
+#define MC_CMD_SET_RAND_SEED_IN_LEN 16
+#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
+#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
+
+/* MC_CMD_LTSSM_HIST: (debug)
+ * Retrieve the history of the LTSSM, if the build supports it.
+ *
+ * Host: nothing
+ * MC: variable number of LTSSM values, as bytes
+ * The history is read-to-clear.
+ */
+#define MC_CMD_LTSSM_HIST 0x1b
+
+/* MC_CMD_DRV_ATTACH:
+ * Inform MCPU that this port is managed on the host (i.e. driver active)
+ */
+#define MC_CMD_DRV_ATTACH 0x1c
+#define MC_CMD_DRV_ATTACH_IN_LEN 8
+#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+#define MC_CMD_DRV_ATTACH_OUT_LEN 4
+#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+
+/* MC_CMD_NCSI_PROD: (debug)
+ * Trigger an NC-SI event (and possibly an AEN in response)
+ */
+#define MC_CMD_NCSI_PROD 0x1d
+#define MC_CMD_NCSI_PROD_IN_LEN 4
+#define MC_CMD_NCSI_PROD_IN_EVENTS_OFST 0
+#define MC_CMD_NCSI_PROD_LINKCHANGE_LBN 0
+#define MC_CMD_NCSI_PROD_LINKCHANGE_WIDTH 1
+#define MC_CMD_NCSI_PROD_RESET_LBN 1
+#define MC_CMD_NCSI_PROD_RESET_WIDTH 1
+#define MC_CMD_NCSI_PROD_DRVATTACH_LBN 2
+#define MC_CMD_NCSI_PROD_DRVATTACH_WIDTH 1
+#define MC_CMD_NCSI_PROD_OUT_LEN 0
+
+/* Enumeration */
+#define MC_CMD_NCSI_PROD_LINKCHANGE 0
+#define MC_CMD_NCSI_PROD_RESET 1
+#define MC_CMD_NCSI_PROD_DRVATTACH 2
+
+/* MC_CMD_DEVEL: (debug)
+ * Reserved for development
+ */
+#define MC_CMD_DEVEL 0x1e
+
+/* MC_CMD_SHMUART: (debug)
+ * Route UART output to circular buffer in shared memory instead.
+ */
+#define MC_CMD_SHMUART 0x1f
+#define MC_CMD_SHMUART_IN_FLAG_OFST 0
+#define MC_CMD_SHMUART_IN_LEN 4
+#define MC_CMD_SHMUART_OUT_LEN 0
+
+/* MC_CMD_PORT_RESET:
+ * Generic per-port reset. There is no equivalent for per-board reset.
+ *
+ * Locks required: None
+ * Return code: 0, ETIME
+ */
+#define MC_CMD_PORT_RESET 0x20
+#define MC_CMD_PORT_RESET_IN_LEN 0
+#define MC_CMD_PORT_RESET_OUT_LEN 0
+
+/* MC_CMD_RESOURCE_LOCK:
+ * Generic resource lock/unlock interface.
+ *
+ * Locks required: None
+ * Return code: 0,
+ * EBUSY (if trylock is contended by other port),
+ * EDEADLK (if trylock is already acquired by this port)
+ * EINVAL (if unlock doesn't own the lock)
+ */
+#define MC_CMD_RESOURCE_LOCK 0x21
+#define MC_CMD_RESOURCE_LOCK_IN_LEN 8
+#define MC_CMD_RESOURCE_LOCK_IN_ACTION_OFST 0
+#define MC_CMD_RESOURCE_LOCK_ACTION_TRYLOCK 1
+#define MC_CMD_RESOURCE_LOCK_ACTION_UNLOCK 0
+#define MC_CMD_RESOURCE_LOCK_IN_RESOURCE_OFST 4
+#define MC_CMD_RESOURCE_LOCK_I2C 2
+#define MC_CMD_RESOURCE_LOCK_PHY 3
+#define MC_CMD_RESOURCE_LOCK_OUT_LEN 0
+
+/* MC_CMD_SPI_COMMAND: (variadic in, variadic out)
+ * Read/Write to/from the SPI device.
+ *
+ * Locks required: SPI_LOCK
+ * Return code: 0, ETIME, EINVAL, EACCES (if SPI_LOCK is not held)
+ */
+#define MC_CMD_SPI_COMMAND 0x22
+#define MC_CMD_SPI_COMMAND_IN_LEN(_write_bytes) (12 + (_write_bytes))
+#define MC_CMD_SPI_COMMAND_IN_ARGS_OFST 0
+#define MC_CMD_SPI_COMMAND_IN_ARGS_ADDRESS_OFST 0
+#define MC_CMD_SPI_COMMAND_IN_ARGS_READ_BYTES_OFST 4
+#define MC_CMD_SPI_COMMAND_IN_ARGS_CHIP_SELECT_OFST 8
+/* Data to write here */
+#define MC_CMD_SPI_COMMAND_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_SPI_COMMAND_OUT_LEN(_read_bytes) (_read_bytes)
+/* Data read here */
+#define MC_CMD_SPI_COMMAND_OUT_READ_BUFFER_OFST 0
+
+/* MC_CMD_I2C_READ_WRITE: (variadic in, variadic out)
+ * Read/Write to/from the I2C bus.
+ *
+ * Locks required: I2C_LOCK
+ * Return code: 0, ETIME, EINVAL, EACCES (if I2C_LOCK is not held)
+ */
+#define MC_CMD_I2C_RW 0x23
+#define MC_CMD_I2C_RW_IN_LEN(_write_bytes) (8 + (_write_bytes))
+#define MC_CMD_I2C_RW_IN_ARGS_OFST 0
+#define MC_CMD_I2C_RW_IN_ARGS_ADDR_OFST 0
+#define MC_CMD_I2C_RW_IN_ARGS_READ_BYTES_OFST 4
+/* Data to write here */
+#define MC_CMD_I2C_RW_IN_WRITE_BUFFER_OFSET 8
+#define MC_CMD_I2C_RW_OUT_LEN(_read_bytes) (_read_bytes)
+/* Data read here */
+#define MC_CMD_I2C_RW_OUT_READ_BUFFER_OFST 0
+
+/* Generic phy capability bitmask */
+#define MC_CMD_PHY_CAP_10HDX_LBN 1
+#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10FDX_LBN 2
+#define MC_CMD_PHY_CAP_10FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100HDX_LBN 3
+#define MC_CMD_PHY_CAP_100HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100FDX_LBN 4
+#define MC_CMD_PHY_CAP_100FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000HDX_LBN 5
+#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000FDX_LBN 6
+#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10000FDX_LBN 7
+#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_PAUSE_LBN 8
+#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1
+#define MC_CMD_PHY_CAP_ASYM_LBN 9
+#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
+#define MC_CMD_PHY_CAP_AN_LBN 10
+#define MC_CMD_PHY_CAP_AN_WIDTH 1
+
+/* Generic loopback enumeration */
+#define MC_CMD_LOOPBACK_NONE 0
+#define MC_CMD_LOOPBACK_DATA 1
+#define MC_CMD_LOOPBACK_GMAC 2
+#define MC_CMD_LOOPBACK_XGMII 3
+#define MC_CMD_LOOPBACK_XGXS 4
+#define MC_CMD_LOOPBACK_XAUI 5
+#define MC_CMD_LOOPBACK_GMII 6
+#define MC_CMD_LOOPBACK_SGMII 7
+#define MC_CMD_LOOPBACK_XGBR 8
+#define MC_CMD_LOOPBACK_XFI 9
+#define MC_CMD_LOOPBACK_XAUI_FAR 10
+#define MC_CMD_LOOPBACK_GMII_FAR 11
+#define MC_CMD_LOOPBACK_SGMII_FAR 12
+#define MC_CMD_LOOPBACK_XFI_FAR 13
+#define MC_CMD_LOOPBACK_GPHY 14
+#define MC_CMD_LOOPBACK_PHYXS 15
+#define MC_CMD_LOOPBACK_PCS 16
+#define MC_CMD_LOOPBACK_PMAPMD 17
+#define MC_CMD_LOOPBACK_XPORT 18
+#define MC_CMD_LOOPBACK_XGMII_WS 19
+#define MC_CMD_LOOPBACK_XAUI_WS 20
+#define MC_CMD_LOOPBACK_XAUI_WS_FAR 21
+#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 22
+#define MC_CMD_LOOPBACK_GMII_WS 23
+#define MC_CMD_LOOPBACK_XFI_WS 24
+#define MC_CMD_LOOPBACK_XFI_WS_FAR 25
+#define MC_CMD_LOOPBACK_PHYXS_WS 26
+
+/* Generic PHY statistics enumeration */
+#define MC_CMD_OUI 0
+#define MC_CMD_PMA_PMD_LINK_UP 1
+#define MC_CMD_PMA_PMD_RX_FAULT 2
+#define MC_CMD_PMA_PMD_TX_FAULT 3
+#define MC_CMD_PMA_PMD_SIGNAL 4
+#define MC_CMD_PMA_PMD_SNR_A 5
+#define MC_CMD_PMA_PMD_SNR_B 6
+#define MC_CMD_PMA_PMD_SNR_C 7
+#define MC_CMD_PMA_PMD_SNR_D 8
+#define MC_CMD_PCS_LINK_UP 9
+#define MC_CMD_PCS_RX_FAULT 10
+#define MC_CMD_PCS_TX_FAULT 11
+#define MC_CMD_PCS_BER 12
+#define MC_CMD_PCS_BLOCK_ERRORS 13
+#define MC_CMD_PHYXS_LINK_UP 14
+#define MC_CMD_PHYXS_RX_FAULT 15
+#define MC_CMD_PHYXS_TX_FAULT 16
+#define MC_CMD_PHYXS_ALIGN 17
+#define MC_CMD_PHYXS_SYNC 18
+#define MC_CMD_AN_LINK_UP 19
+#define MC_CMD_AN_COMPLETE 20
+#define MC_CMD_AN_10GBT_STATUS 21
+#define MC_CMD_CL22_LINK_UP 22
+#define MC_CMD_PHY_NSTATS 23
+
+/* MC_CMD_GET_PHY_CFG:
+ * Report PHY configuration. This guarantees to succeed even if the PHY is in
+ * a "zombie" state.
+ *
+ * Locks required: None
+ * Return code: 0
+ */
+#define MC_CMD_GET_PHY_CFG 0x24
+
+#define MC_CMD_GET_PHY_CFG_IN_LEN 0
+#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
+
+#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PHY_CFG_PRESENT_LBN 0
+#define MC_CMD_GET_PHY_CFG_PRESENT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN 1
+#define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN 2
+#define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_LOWPOWER_LBN 3
+#define MC_CMD_GET_PHY_CFG_LOWPOWER_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_POWEROFF_LBN 4
+#define MC_CMD_GET_PHY_CFG_POWEROFF_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_TXDIS_LBN 5
+#define MC_CMD_GET_PHY_CFG_TXDIS_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_BIST_LBN 6
+#define MC_CMD_GET_PHY_CFG_BIST_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+/* Bitmask of supported capabilities */
+#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+/* PHY statistics bitmap */
+#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+/* PHY type/name string */
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
+#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+#define MC_CMD_MEDIA_XAUI 1
+#define MC_CMD_MEDIA_CX4 2
+#define MC_CMD_MEDIA_KX4 3
+#define MC_CMD_MEDIA_XFP 4
+#define MC_CMD_MEDIA_SFP_PLUS 5
+#define MC_CMD_MEDIA_BASE_T 6
+/* MDIO "MMDS" supported */
+#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+/* Native clause 22 */
+#define MC_CMD_MMD_CLAUSE22 0
+#define MC_CMD_MMD_CLAUSE45_PMAPMD 1
+#define MC_CMD_MMD_CLAUSE45_WIS 2
+#define MC_CMD_MMD_CLAUSE45_PCS 3
+#define MC_CMD_MMD_CLAUSE45_PHYXS 4
+#define MC_CMD_MMD_CLAUSE45_DTEXS 5
+#define MC_CMD_MMD_CLAUSE45_TC 6
+#define MC_CMD_MMD_CLAUSE45_AN 7
+/* Clause22 proxied over clause45 by PHY */
+#define MC_CMD_MMD_CLAUSE45_C22EXT 29
+#define MC_CMD_MMD_CLAUSE45_VEND1 30
+#define MC_CMD_MMD_CLAUSE45_VEND2 31
+/* PHY stepping version */
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
+
+/* MC_CMD_START_BIST:
+ * Start a BIST test on the PHY.
+ *
+ * Locks required: PHY_LOCK if doing a PHY BIST
+ * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
+ */
+#define MC_CMD_START_BIST 0x25
+#define MC_CMD_START_BIST_IN_LEN 4
+#define MC_CMD_START_BIST_IN_TYPE_OFST 0
+#define MC_CMD_START_BIST_OUT_LEN 0
+
+/* Run the PHY's short cable BIST */
+#define MC_CMD_PHY_BIST_CABLE_SHORT 1
+/* Run the PHY's long cable BIST */
+#define MC_CMD_PHY_BIST_CABLE_LONG 2
+/* Run BIST on the currently selected BPX Serdes (XAUI or XFI) */
+#define MC_CMD_BPX_SERDES_BIST 3
+/* Run the MC loopback tests */
+#define MC_CMD_MC_LOOPBACK_BIST 4
+/* Run the PHY's standard BIST */
+#define MC_CMD_PHY_BIST 5
+
+/* MC_CMD_POLL_PHY_BIST: (variadic output)
+ * Poll for BIST completion
+ *
+ * Returns a single status code, and optionally some PHY specific
+ * bist output. The driver should only consume the BIST output
+ * after validating OUTLEN and PHY_CFG.PHY_TYPE.
+ *
+ * If a driver can't successfully parse the BIST output, it should
+ * still respect the pass/Fail in OUT.RESULT
+ *
+ * Locks required: PHY_LOCK if doing a PHY BIST
+ * Return code: 0, EACCES (if PHY_LOCK is not held)
+ */
+#define MC_CMD_POLL_BIST 0x26
+#define MC_CMD_POLL_BIST_IN_LEN 0
+#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN
+#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
+#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
+#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+#define MC_CMD_POLL_BIST_RUNNING 1
+#define MC_CMD_POLL_BIST_PASSED 2
+#define MC_CMD_POLL_BIST_FAILED 3
+#define MC_CMD_POLL_BIST_TIMEOUT 4
+/* Generic: */
+#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+/* SFT9001-specific: */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2
+#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3
+#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 4
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 9
+/* mrsfp "PHY" driver: */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 1
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 2
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 3
+#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 4
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 5
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 6
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 7
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 8
+
+/* MC_CMD_PHY_SPI: (variadic in, variadic out)
+ * Read/Write/Erase the PHY SPI device
+ *
+ * Locks required: PHY_LOCK
+ * Return code: 0, ETIME, EINVAL, EACCES (if PHY_LOCK is not held)
+ */
+#define MC_CMD_PHY_SPI 0x27
+#define MC_CMD_PHY_SPI_IN_LEN(_write_bytes) (12 + (_write_bytes))
+#define MC_CMD_PHY_SPI_IN_ARGS_OFST 0
+#define MC_CMD_PHY_SPI_IN_ARGS_ADDR_OFST 0
+#define MC_CMD_PHY_SPI_IN_ARGS_READ_BYTES_OFST 4
+#define MC_CMD_PHY_SPI_IN_ARGS_ERASE_ALL_OFST 8
+/* Data to write here */
+#define MC_CMD_PHY_SPI_IN_WRITE_BUFFER_OFSET 12
+#define MC_CMD_PHY_SPI_OUT_LEN(_read_bytes) (_read_bytes)
+/* Data read here */
+#define MC_CMD_PHY_SPI_OUT_READ_BUFFER_OFST 0
+
+
+/* MC_CMD_GET_LOOPBACK_MODES:
+ * Returns a bitmask of loopback modes evailable at each speed.
+ *
+ * Locks required: None
+ * Return code: 0
+ */
+#define MC_CMD_GET_LOOPBACK_MODES 0x28
+#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 32
+#define MC_CMD_GET_LOOPBACK_MODES_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_SUGGESTED_OFST 24
+
+/* Flow control enumeration */
+#define MC_CMD_FCNTL_OFF 0
+#define MC_CMD_FCNTL_RESPOND 1
+#define MC_CMD_FCNTL_BIDIR 2
+/* Auto - Use what the link has autonegotiated
+ * - The driver should modify the advertised capabilities via SET_LINK.CAP
+ * to control the negotiated flow control mode.
+ * - Can only be set if the PHY supports PAUSE+ASYM capabilities
+ * - Never returned by GET_LINK as the value programmed into the MAC
+ */
+#define MC_CMD_FCNTL_AUTO 3
+
+/* Generic mac fault bitmask */
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1
+
+/* MC_CMD_GET_LINK:
+ * Read the unified MAC/PHY link state
+ *
+ * Locks required: None
+ * Return code: 0, ETIME
+ */
+#define MC_CMD_GET_LINK 0x29
+#define MC_CMD_GET_LINK_IN_LEN 0
+#define MC_CMD_GET_LINK_OUT_LEN 28
+/* near-side and link-partner advertised capabilities */
+#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
+#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+/* Autonegotiated speed in mbit/s. The link may still be down
+ * even if this reads non-zero */
+#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+/* Whether we have overall link up */
+#define MC_CMD_GET_LINK_LINK_UP_LBN 0
+#define MC_CMD_GET_LINK_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_FULL_DUPLEX_LBN 1
+#define MC_CMD_GET_LINK_FULL_DUPLEX_WIDTH 1
+/* Whether we have link at the layers provided by the BPX */
+#define MC_CMD_GET_LINK_BPX_LINK_LBN 2
+#define MC_CMD_GET_LINK_BPX_LINK_WIDTH 1
+/* Whether the PHY has external link */
+#define MC_CMD_GET_LINK_PHY_LINK_LBN 3
+#define MC_CMD_GET_LINK_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+
+/* MC_CMD_SET_LINK:
+ * Write the unified MAC/PHY link configuration
+ *
+ * A loopback speed of "0" is supported, and means
+ * (choose any available speed)
+ *
+ * Locks required: None
+ * Return code: 0, EINVAL, ETIME
+ */
+#define MC_CMD_SET_LINK 0x2a
+#define MC_CMD_SET_LINK_IN_LEN 16
+#define MC_CMD_SET_LINK_IN_CAP_OFST 0
+#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_LOWPOWER_LBN 0
+#define MC_CMD_SET_LINK_LOWPOWER_WIDTH 1
+#define MC_CMD_SET_LINK_POWEROFF_LBN 1
+#define MC_CMD_SET_LINK_POWEROFF_WIDTH 1
+#define MC_CMD_SET_LINK_TXDIS_LBN 2
+#define MC_CMD_SET_LINK_TXDIS_WIDTH 1
+#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+#define MC_CMD_SET_LINK_OUT_LEN 0
+
+/* MC_CMD_SET_ID_LED:
+ * Set indentification LED state
+ *
+ * Locks required: None
+ * Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_ID_LED 0x2b
+#define MC_CMD_SET_ID_LED_IN_LEN 4
+#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
+#define MC_CMD_LED_OFF 0
+#define MC_CMD_LED_ON 1
+#define MC_CMD_LED_DEFAULT 2
+#define MC_CMD_SET_ID_LED_OUT_LEN 0
+
+/* MC_CMD_SET_MAC:
+ * Set MAC configuration
+ *
+ * The MTU is the MTU programmed directly into the XMAC/GMAC
+ * (inclusive of EtherII, VLAN, bug16011 padding)
+ *
+ * Locks required: None
+ * Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_MAC 0x2c
+#define MC_CMD_SET_MAC_IN_LEN 24
+#define MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+#define MC_CMD_SET_MAC_OUT_LEN 0
+
+/* MC_CMD_PHY_STATS:
+ * Get generic PHY statistics
+ *
+ * This call returns the statistics for a generic PHY in a sparse
+ * array (indexed by the enumerate). Each value is represented by
+ * a 32bit number.
+ *
+ * If the DMA_ADDR is 0, then no DMA is performed, and the statistics
+ * may be read directly out of shared memory. If DMA_ADDR != 0, then
+ * the statistics are dmad to that (page-aligned location)
+ *
+ * Locks required: None
+ * Returns: 0, ETIME
+ * Response methods: shared memory, event
+ */
+#define MC_CMD_PHY_STATS 0x2d
+#define MC_CMD_PHY_STATS_IN_LEN 8
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (MC_CMD_PHY_NSTATS * 4)
+
+/* Unified MAC statistics enumeration */
+#define MC_CMD_MAC_GENERATION_START 0
+#define MC_CMD_MAC_TX_PKTS 1
+#define MC_CMD_MAC_TX_PAUSE_PKTS 2
+#define MC_CMD_MAC_TX_CONTROL_PKTS 3
+#define MC_CMD_MAC_TX_UNICAST_PKTS 4
+#define MC_CMD_MAC_TX_MULTICAST_PKTS 5
+#define MC_CMD_MAC_TX_BROADCAST_PKTS 6
+#define MC_CMD_MAC_TX_BYTES 7
+#define MC_CMD_MAC_TX_BAD_BYTES 8
+#define MC_CMD_MAC_TX_LT64_PKTS 9
+#define MC_CMD_MAC_TX_64_PKTS 10
+#define MC_CMD_MAC_TX_65_TO_127_PKTS 11
+#define MC_CMD_MAC_TX_128_TO_255_PKTS 12
+#define MC_CMD_MAC_TX_256_TO_511_PKTS 13
+#define MC_CMD_MAC_TX_512_TO_1023_PKTS 14
+#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 15
+#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 16
+#define MC_CMD_MAC_TX_GTJUMBO_PKTS 17
+#define MC_CMD_MAC_TX_BAD_FCS_PKTS 18
+#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 19
+#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 20
+#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 21
+#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 22
+#define MC_CMD_MAC_TX_DEFERRED_PKTS 23
+#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 24
+#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 25
+#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 26
+#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 27
+#define MC_CMD_MAC_RX_PKTS 28
+#define MC_CMD_MAC_RX_PAUSE_PKTS 29
+#define MC_CMD_MAC_RX_GOOD_PKTS 30
+#define MC_CMD_MAC_RX_CONTROL_PKTS 31
+#define MC_CMD_MAC_RX_UNICAST_PKTS 32
+#define MC_CMD_MAC_RX_MULTICAST_PKTS 33
+#define MC_CMD_MAC_RX_BROADCAST_PKTS 34
+#define MC_CMD_MAC_RX_BYTES 35
+#define MC_CMD_MAC_RX_BAD_BYTES 36
+#define MC_CMD_MAC_RX_64_PKTS 37
+#define MC_CMD_MAC_RX_65_TO_127_PKTS 38
+#define MC_CMD_MAC_RX_128_TO_255_PKTS 39
+#define MC_CMD_MAC_RX_256_TO_511_PKTS 40
+#define MC_CMD_MAC_RX_512_TO_1023_PKTS 41
+#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 42
+#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 43
+#define MC_CMD_MAC_RX_GTJUMBO_PKTS 44
+#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 45
+#define MC_CMD_MAC_RX_BAD_FCS_PKTS 46
+#define MC_CMD_MAC_RX_OVERFLOW_PKTS 47
+#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 48
+#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 49
+#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 50
+#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 51
+#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 52
+#define MC_CMD_MAC_RX_JABBER_PKTS 53
+#define MC_CMD_MAC_RX_NODESC_DROPS 54
+#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 55
+#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 56
+#define MC_CMD_MAC_RX_LANES01_DISP_ERR 57
+#define MC_CMD_MAC_RX_LANES23_DISP_ERR 58
+#define MC_CMD_MAC_RX_MATCH_FAULT 59
+#define MC_CMD_GMAC_DMABUF_START 64
+#define MC_CMD_GMAC_DMABUF_END 95
+/* Insert new members here. */
+#define MC_CMD_MAC_GENERATION_END 96
+#define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1)
+
+/* MC_CMD_MAC_STATS:
+ * Get unified GMAC/XMAC statistics
+ *
+ * This call returns unified statistics maintained by the MC as it
+ * switches between the GMAC and XMAC. The MC will write out all
+ * supported stats. The driver should zero initialise the buffer to
+ * guarantee consistent results.
+ *
+ * Locks required: None
+ * Returns: 0
+ * Response methods: shared memory, event
+ */
+#define MC_CMD_MAC_STATS 0x2e
+#define MC_CMD_MAC_STATS_IN_LEN 16
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_CMD_DMA_LBN 0
+#define MC_CMD_MAC_STATS_CMD_DMA_WIDTH 1
+#define MC_CMD_MAC_STATS_CMD_CLEAR_LBN 1
+#define MC_CMD_MAC_STATS_CMD_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_WIDTH 1
+/* Remaining PERIOD* fields only relevant when PERIODIC_CHANGE is set */
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_WIDTH 16
+#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+
+#define MC_CMD_MAC_STATS_OUT_LEN 0
+
+/* Callisto flags */
+#define MC_CMD_SFT9001_ROBUST_LBN 0
+#define MC_CMD_SFT9001_ROBUST_WIDTH 1
+#define MC_CMD_SFT9001_SHORT_REACH_LBN 1
+#define MC_CMD_SFT9001_SHORT_REACH_WIDTH 1
+
+/* MC_CMD_SFT9001_GET:
+ * Read current callisto specific setting
+ *
+ * Locks required: None
+ * Returns: 0, ETIME
+ */
+#define MC_CMD_SFT9001_GET 0x30
+#define MC_CMD_SFT9001_GET_IN_LEN 0
+#define MC_CMD_SFT9001_GET_OUT_LEN 4
+#define MC_CMD_SFT9001_GET_OUT_FLAGS_OFST 0
+
+/* MC_CMD_SFT9001_SET:
+ * Write current callisto specific setting
+ *
+ * Locks required: None
+ * Returns: 0, ETIME, EINVAL
+ */
+#define MC_CMD_SFT9001_SET 0x31
+#define MC_CMD_SFT9001_SET_IN_LEN 4
+#define MC_CMD_SFT9001_SET_IN_FLAGS_OFST 0
+#define MC_CMD_SFT9001_SET_OUT_LEN 0
+
+
+/* MC_CMD_WOL_FILTER_SET:
+ * Set a WoL filter
+ *
+ * Locks required: None
+ * Returns: 0, EBUSY, EINVAL, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_SET 0x32
+#define MC_CMD_WOL_FILTER_SET_IN_LEN 192 /* 190 rounded up to a word */
+#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
+#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+
+/* There is a union at offset 8, following defines overlap due to
+ * this */
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
+
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST \
+ MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
+
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST \
+ MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST \
+ (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 4)
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST \
+ (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 8)
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST \
+ (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 10)
+
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST \
+ MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST \
+ (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 16)
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST \
+ (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 32)
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST \
+ (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 34)
+
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST \
+ MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_OFST \
+ (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 48)
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST \
+ (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 176)
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST \
+ (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 177)
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST \
+ (MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 178)
+
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST \
+ MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
+
+#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+
+/* WOL Filter types enumeration */
+#define MC_CMD_WOL_TYPE_MAGIC 0x0
+ /* unused 0x1 */
+#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2
+#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
+#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
+#define MC_CMD_WOL_TYPE_BITMAP 0x5
+#define MC_CMD_WOL_TYPE_LINK 0x6
+#define MC_CMD_WOL_TYPE_MAX 0x7
+
+#define MC_CMD_FILTER_MODE_SIMPLE 0x0
+#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff
+
+/* MC_CMD_WOL_FILTER_REMOVE:
+ * Remove a WoL filter
+ *
+ * Locks required: None
+ * Returns: 0, EINVAL, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_REMOVE 0x33
+#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
+#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
+
+
+/* MC_CMD_WOL_FILTER_RESET:
+ * Reset (i.e. remove all) WoL filters
+ *
+ * Locks required: None
+ * Returns: 0, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_RESET 0x34
+#define MC_CMD_WOL_FILTER_RESET_IN_LEN 0
+#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0
+
+/* MC_CMD_SET_MCAST_HASH:
+ * Set the MCASH hash value without otherwise
+ * reconfiguring the MAC
+ */
+#define MC_CMD_SET_MCAST_HASH 0x35
+#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
+#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
+
+/* MC_CMD_NVRAM_TYPES:
+ * Return bitfield indicating available types of virtual NVRAM partitions
+ *
+ * Locks required: none
+ * Returns: 0
+ */
+#define MC_CMD_NVRAM_TYPES 0x36
+#define MC_CMD_NVRAM_TYPES_IN_LEN 0
+#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
+#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+
+/* Supported NVRAM types */
+#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0
+#define MC_CMD_NVRAM_TYPE_MC_FW 1
+#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 2
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 3
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 4
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 5
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 6
+#define MC_CMD_NVRAM_TYPE_EXP_ROM 7
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 8
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 9
+#define MC_CMD_NVRAM_TYPE_PHY_PORT0 10
+#define MC_CMD_NVRAM_TYPE_PHY_PORT1 11
+#define MC_CMD_NVRAM_TYPE_LOG 12
+
+/* MC_CMD_NVRAM_INFO:
+ * Read info about a virtual NVRAM partition
+ *
+ * Locks required: none
+ * Returns: 0, EINVAL (bad type)
+ */
+#define MC_CMD_NVRAM_INFO 0x37
+#define MC_CMD_NVRAM_INFO_IN_LEN 4
+#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_OUT_LEN 24
+#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_PROTECTED_LBN 0
+#define MC_CMD_NVRAM_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+
+/* MC_CMD_NVRAM_UPDATE_START:
+ * Start a group of update operations on a virtual NVRAM partition
+ *
+ * Locks required: PHY_LOCK if type==*PHY*
+ * Returns: 0, EINVAL (bad type), EACCES (if PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_UPDATE_START 0x38
+#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
+#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0
+
+/* MC_CMD_NVRAM_READ:
+ * Read data from a virtual NVRAM partition
+ *
+ * Locks required: PHY_LOCK if type==*PHY*
+ * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_READ 0x39
+#define MC_CMD_NVRAM_READ_IN_LEN 12
+#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_READ_OUT_LEN(_read_bytes) (_read_bytes)
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
+
+/* MC_CMD_NVRAM_WRITE:
+ * Write data to a virtual NVRAM partition
+ *
+ * Locks required: PHY_LOCK if type==*PHY*
+ * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_WRITE 0x3a
+#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_NVRAM_WRITE_IN_LEN(_write_bytes) (12 + _write_bytes)
+#define MC_CMD_NVRAM_WRITE_OUT_LEN 0
+
+/* MC_CMD_NVRAM_ERASE:
+ * Erase sector(s) from a virtual NVRAM partition
+ *
+ * Locks required: PHY_LOCK if type==*PHY*
+ * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_ERASE 0x3b
+#define MC_CMD_NVRAM_ERASE_IN_LEN 12
+#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_ERASE_OUT_LEN 0
+
+/* MC_CMD_NVRAM_UPDATE_FINISH:
+ * Finish a group of update operations on a virtual NVRAM partition
+ *
+ * Locks required: PHY_LOCK if type==*PHY*
+ * Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
+
+/* MC_CMD_REBOOT:
+ * Reboot the MC.
+ *
+ * The AFTER_ASSERTION flag is intended to be used when the driver notices
+ * an assertion failure (at which point it is expected to perform a complete
+ * tear down and reinitialise), to allow both ports to reset the MC once
+ * in an atomic fashion.
+ *
+ * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
+ * which means that they will automatically reboot out of the assertion
+ * handler, so this is in practise an optional operation. It is still
+ * recommended that drivers execute this to support custom firmwares
+ * with REBOOT_ON_ASSERT=0.
+ *
+ * Locks required: NONE
+ * Returns: Nothing. You get back a response with ERR=1, DATALEN=0
+ */
+#define MC_CMD_REBOOT 0x3d
+#define MC_CMD_REBOOT_IN_LEN 4
+#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 1
+#define MC_CMD_REBOOT_OUT_LEN 0
+
+/* MC_CMD_SCHEDINFO:
+ * Request scheduler info. from the MC.
+ *
+ * Locks required: NONE
+ * Returns: An array of (timeslice,maximum overrun), one for each thread,
+ * in ascending order of thread address.s
+ */
+#define MC_CMD_SCHEDINFO 0x3e
+#define MC_CMD_SCHEDINFO_IN_LEN 0
+
+
+/* MC_CMD_SET_REBOOT_MODE: (debug)
+ * Set the mode for the next MC reboot.
+ *
+ * Locks required: NONE
+ *
+ * Sets the reboot mode to the specified value. Returns the old mode.
+ */
+#define MC_CMD_REBOOT_MODE 0x3f
+#define MC_CMD_REBOOT_MODE_IN_LEN 4
+#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+#define MC_CMD_REBOOT_MODE_OUT_LEN 4
+#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+#define MC_CMD_REBOOT_MODE_NORMAL 0
+#define MC_CMD_REBOOT_MODE_SNAPPER 3
+
+/* MC_CMD_DEBUG_LOG:
+ * Null request/response command (debug)
+ * - sequence number is always zero
+ * - only supported on the UART interface
+ * (the same set of bytes is delivered as an
+ * event over PCI)
+ */
+#define MC_CMD_DEBUG_LOG 0x40
+#define MC_CMD_DEBUG_LOG_IN_LEN 0
+#define MC_CMD_DEBUG_LOG_OUT_LEN 0
+
+/* Generic sensor enumeration. Note that a dual port NIC
+ * will EITHER expose PHY_COMMON_TEMP OR PHY0_TEMP and
+ * PHY1_TEMP depending on whether there is a single sensor
+ * in the vicinity of the two port, or one per port.
+ */
+#define MC_CMD_SENSOR_CONTROLLER_TEMP 0 /* degC */
+#define MC_CMD_SENSOR_PHY_COMMON_TEMP 1 /* degC */
+#define MC_CMD_SENSOR_CONTROLLER_COOLING 2 /* bool */
+#define MC_CMD_SENSOR_PHY0_TEMP 3 /* degC */
+#define MC_CMD_SENSOR_PHY0_COOLING 4 /* bool */
+#define MC_CMD_SENSOR_PHY1_TEMP 5 /* degC */
+#define MC_CMD_SENSOR_PHY1_COOLING 6 /* bool */
+#define MC_CMD_SENSOR_IN_1V0 7 /* mV */
+#define MC_CMD_SENSOR_IN_1V2 8 /* mV */
+#define MC_CMD_SENSOR_IN_1V8 9 /* mV */
+#define MC_CMD_SENSOR_IN_2V5 10 /* mV */
+#define MC_CMD_SENSOR_IN_3V3 11 /* mV */
+#define MC_CMD_SENSOR_IN_12V0 12 /* mV */
+
+
+/* Sensor state */
+#define MC_CMD_SENSOR_STATE_OK 0
+#define MC_CMD_SENSOR_STATE_WARNING 1
+#define MC_CMD_SENSOR_STATE_FATAL 2
+#define MC_CMD_SENSOR_STATE_BROKEN 3
+
+/* MC_CMD_SENSOR_INFO:
+ * Returns information about every available sensor.
+ *
+ * Each sensor has a single (16bit) value, and a corresponding state.
+ * The mapping between value and sensor is nominally determined by the
+ * MC, but in practise is implemented as zero (BROKEN), one (TEMPERATURE),
+ * or two (VOLTAGE) ranges per sensor per state.
+ *
+ * This call returns a mask (32bit) of the sensors that are supported
+ * by this platform, then an array (indexed by MC_CMD_SENSOR) of byte
+ * offsets to the per-sensor arrays. Each sensor array has four 16bit
+ * numbers, min1, max1, min2, max2.
+ *
+ * Locks required: None
+ * Returns: 0
+ */
+#define MC_CMD_SENSOR_INFO 0x41
+#define MC_CMD_SENSOR_INFO_IN_LEN 0
+#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \
+ (4 + (_x))
+#define MC_CMD_SENSOR_INFO_OUT_MIN1_OFST(_ofst) \
+ ((_ofst) + 0)
+#define MC_CMD_SENSOR_INFO_OUT_MAX1_OFST(_ofst) \
+ ((_ofst) + 2)
+#define MC_CMD_SENSOR_INFO_OUT_MIN2_OFST(_ofst) \
+ ((_ofst) + 4)
+#define MC_CMD_SENSOR_INFO_OUT_MAX2_OFST(_ofst) \
+ ((_ofst) + 6)
+
+/* MC_CMD_READ_SENSORS
+ * Returns the current reading from each sensor
+ *
+ * Returns a sparse array of sensor readings (indexed by the sensor
+ * type) into host memory. Each array element is a dword.
+ *
+ * The MC will send a SENSOREVT event every time any sensor changes state. The
+ * driver is responsible for ensuring that it doesn't miss any events. The board
+ * will function normally if all sensors are in STATE_OK or state_WARNING.
+ * Otherwise the board should not be expected to function.
+ */
+#define MC_CMD_READ_SENSORS 0x42
+#define MC_CMD_READ_SENSORS_IN_LEN 8
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_READ_SENSORS_OUT_LEN 0
+
+/* Sensor reading fields */
+#define MC_CMD_READ_SENSOR_VALUE_LBN 0
+#define MC_CMD_READ_SENSOR_VALUE_WIDTH 16
+#define MC_CMD_READ_SENSOR_STATE_LBN 16
+#define MC_CMD_READ_SENSOR_STATE_WIDTH 8
+
+
+/* MC_CMD_GET_PHY_STATE:
+ * Report current state of PHY. A "zombie" PHY is a PHY that has failed to
+ * boot (e.g. due to missing or corrupted firmware).
+ *
+ * Locks required: None
+ * Return code: 0
+ */
+#define MC_CMD_GET_PHY_STATE 0x43
+
+#define MC_CMD_GET_PHY_STATE_IN_LEN 0
+#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
+#define MC_CMD_GET_PHY_STATE_STATE_OFST 0
+/* PHY state enumeration: */
+#define MC_CMD_PHY_STATE_OK 1
+#define MC_CMD_PHY_STATE_ZOMBIE 2
+
+
+/* 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
+ * disable 802.Qbb for a given priority. */
+#define MC_CMD_SETUP_8021QBB 0x44
+#define MC_CMD_SETUP_8021QBB_IN_LEN 32
+#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFFST 0
+
+
+/* MC_CMD_WOL_FILTER_GET:
+ * Retrieve ID of any WoL filters
+ *
+ * Locks required: None
+ * Returns: 0, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_GET 0x45
+#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
+#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD:
+ * Offload a protocol to NIC for lights-out state
+ *
+ * Locks required: None
+ * Returns: 0, ENOSYS
+ */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
+
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN 16
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+
+/* There is a union at offset 4, following defines overlap due to
+ * this */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARPMAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARPIP_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSMAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSSNIPV6_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NSIPV6_OFST 26
+
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+
+
+/* MC_CMD_REMOVE_LIGHTSOUT_PROTOCOL_OFFLOAD:
+ * Offload a protocol to NIC for lights-out state
+ *
+ * Locks required: None
+ * Returns: 0, ENOSYS
+ */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
+
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+
+/* Lights-out offload protocols enumeration */
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2
+
+
+/* MC_CMD_MAC_RESET_RESTORE:
+ * Restore MAC after block reset
+ *
+ * Locks required: None
+ * Returns: 0
+ */
+
+#define MC_CMD_MAC_RESET_RESTORE 0x48
+#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
+#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
+
+
+/* MC_CMD_TEST_ASSERT:
+ * Deliberately trigger an assert-detonation in the firmware for testing
+ * purposes (i.e. to allow tests that the driver copes gracefully).
+ *
+ * Locks required: None
+ * Returns: 0
+ */
+
+#define MC_CMD_TESTASSERT 0x49
+#define MC_CMD_TESTASSERT_IN_LEN 0
+#define MC_CMD_TESTASSERT_OUT_LEN 0
+
+/* MC_CMD_WORKAROUND 0x4a
+ *
+ * Enable/Disable a given workaround. The mcfw will return EINVAL if it
+ * doesn't understand the given workaround number - which should not
+ * be treated as a hard error by client code.
+ *
+ * This op does not imply any semantics about each workaround, that's between
+ * the driver and the mcfw on a per-workaround basis.
+ *
+ * Locks required: None
+ * Returns: 0, EINVAL
+ */
+#define MC_CMD_WORKAROUND 0x4a
+#define MC_CMD_WORKAROUND_IN_LEN 8
+#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+#define MC_CMD_WORKAROUND_BUG17230 1
+#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+#define MC_CMD_WORKAROUND_OUT_LEN 0
+
+/* MC_CMD_GET_PHY_MEDIA_INFO:
+ * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
+ * SFP+ PHYs).
+ *
+ * The "media type" can be found via GET_PHY_CFG (GET_PHY_CFG_OUT_MEDIA_TYPE);
+ * the valid "page number" input values, and the output data, are interpreted
+ * on a per-type basis.
+ *
+ * For SFP+: PAGE=0 or 1 returns a 128-byte block read from module I2C address
+ * 0xA0 offset 0 or 0x80.
+ * Anything else: currently undefined.
+ *
+ * Locks required: None
+ * Return code: 0
+ */
+#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(_num_bytes) (4 + (_num_bytes))
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
+
+/* MC_CMD_NVRAM_TEST:
+ * Test a particular NVRAM partition for valid contents (where "valid"
+ * depends on the type of partition).
+ *
+ * Locks required: None
+ * Return code: 0
+ */
+#define MC_CMD_NVRAM_TEST 0x4c
+#define MC_CMD_NVRAM_TEST_IN_LEN 4
+#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_TEST_OUT_LEN 4
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+#define MC_CMD_NVRAM_TEST_PASS 0
+#define MC_CMD_NVRAM_TEST_FAIL 1
+#define MC_CMD_NVRAM_TEST_NOTSUPP 2
+
+/* MC_CMD_MRSFP_TWEAK: (debug)
+ * Read status and/or set parameters for the "mrsfp" driver in mr_rusty builds.
+ * I2C I/O expander bits are always read; if equaliser parameters are supplied,
+ * they are configured first.
+ *
+ * Locks required: None
+ * Return code: 0, EINVAL
+ */
+#define MC_CMD_MRSFP_TWEAK 0x4d
+#define MC_CMD_MRSFP_TWEAK_IN_LEN_READ_ONLY 0
+#define MC_CMD_MRSFP_TWEAK_IN_LEN_EQ_CONFIG 16
+#define MC_CMD_MRSFP_TWEAK_IN_TXEQ_LEVEL_OFST 0 /* 0-6 low->high de-emph. */
+#define MC_CMD_MRSFP_TWEAK_IN_TXEQ_DT_CFG_OFST 4 /* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_IN_RXEQ_BOOST_OFST 8 /* 0-8 low->high boost */
+#define MC_CMD_MRSFP_TWEAK_IN_RXEQ_DT_CFG_OFST 12 /* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 /* input bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */
+
+/* MC_CMD_TEST_HACK: (debug (unsurprisingly))
+ * Change bits of network port state for test purposes in ways that would never be
+ * useful in normal operation and so need a special command to change. */
+#define MC_CMD_TEST_HACK 0x2f
+#define MC_CMD_TEST_HACK_IN_LEN 8
+#define MC_CMD_TEST_HACK_IN_TXPAD_OFST 0
+#define MC_CMD_TEST_HACK_IN_TXPAD_AUTO 0 /* Let the MC manage things */
+#define MC_CMD_TEST_HACK_IN_TXPAD_ON 1 /* Force on */
+#define MC_CMD_TEST_HACK_IN_TXPAD_OFF 2 /* Force on */
+#define MC_CMD_TEST_HACK_IN_IPG_OFST 4 /* Takes a value in bits */
+#define MC_CMD_TEST_HACK_IN_IPG_AUTO 0 /* The MC picks the value */
+#define MC_CMD_TEST_HACK_OUT_LEN 0
+
+/* MC_CMD_SENSOR_SET_LIMS: (debug) (mostly) adjust the sensor limits. This
+ * is a warranty-voiding operation.
+ *
+ * IN: sensor identifier (one of the enumeration starting with MC_CMD_SENSOR_CONTROLLER_TEMP
+ * followed by 4 32-bit values: min(warning) max(warning), min(fatal), max(fatal). Which
+ * of these limits are meaningful and what their interpretation is is sensor-specific.
+ *
+ * OUT: nothing
+ *
+ * Returns: ENOENT if the sensor specified does not exist, EINVAL if the limits are
+ * out of range.
+ */
+#define MC_CMD_SENSOR_SET_LIMS 0x4e
+#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+
+/* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be
+ * used for post-3.0 extensions. If you run out of space, look for gaps or
+ * commands that are unused in the existing range. */
+
+#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_phy.c b/drivers/net/ethernet/sfc/mcdi_phy.c
new file mode 100644
index 000000000000..6c63ab0710af
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mcdi_phy.c
@@ -0,0 +1,754 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2009-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+/*
+ * Driver for PHY related operations via MCDI.
+ */
+
+#include <linux/slab.h>
+#include "efx.h"
+#include "phy.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "nic.h"
+#include "selftest.h"
+
+struct efx_mcdi_phy_data {
+ u32 flags;
+ u32 type;
+ u32 supported_cap;
+ u32 channel;
+ u32 port;
+ u32 stats_mask;
+ u8 name[20];
+ u32 media;
+ u32 mmd_mask;
+ u8 revision[20];
+ u32 forced_cap;
+};
+
+static int
+efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
+{
+ u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN];
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0);
+ BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name));
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS);
+ cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE);
+ cfg->supported_cap =
+ MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP);
+ cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL);
+ cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT);
+ cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK);
+ memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME),
+ sizeof(cfg->name));
+ cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE);
+ cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK);
+ memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION),
+ sizeof(cfg->revision));
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
+ u32 flags, u32 loopback_mode,
+ u32 loopback_speed)
+{
+ u8 inbuf[MC_CMD_SET_LINK_IN_LEN];
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
+
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities);
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags);
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode);
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
+{
+ u8 outbuf[MC_CMD_GET_LOOPBACK_MODES_OUT_LEN];
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_SUGGESTED);
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
+ unsigned int prtad, unsigned int devad, u16 addr,
+ u16 *value_out, u32 *status_out)
+{
+ u8 inbuf[MC_CMD_MDIO_READ_IN_LEN];
+ u8 outbuf[MC_CMD_MDIO_READ_OUT_LEN];
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, bus);
+ MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
+ MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
+ MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ *value_out = (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
+ *status_out = MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS);
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
+ unsigned int prtad, unsigned int devad, u16 addr,
+ u16 value, u32 *status_out)
+{
+ u8 inbuf[MC_CMD_MDIO_WRITE_IN_LEN];
+ u8 outbuf[MC_CMD_MDIO_WRITE_OUT_LEN];
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, bus);
+ MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
+ MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
+ MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
+ MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ *status_out = MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS);
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
+{
+ u32 result = 0;
+
+ switch (media) {
+ case MC_CMD_MEDIA_KX4:
+ result |= SUPPORTED_Backplane;
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ result |= SUPPORTED_1000baseKX_Full;
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ result |= SUPPORTED_10000baseKX4_Full;
+ break;
+
+ case MC_CMD_MEDIA_XFP:
+ case MC_CMD_MEDIA_SFP_PLUS:
+ result |= SUPPORTED_FIBRE;
+ break;
+
+ case MC_CMD_MEDIA_BASE_T:
+ result |= SUPPORTED_TP;
+ if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+ result |= SUPPORTED_10baseT_Half;
+ if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+ result |= SUPPORTED_10baseT_Full;
+ if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+ result |= SUPPORTED_100baseT_Half;
+ if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+ result |= SUPPORTED_100baseT_Full;
+ if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+ result |= SUPPORTED_1000baseT_Half;
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ result |= SUPPORTED_1000baseT_Full;
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ result |= SUPPORTED_10000baseT_Full;
+ break;
+ }
+
+ if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ result |= SUPPORTED_Pause;
+ if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ result |= SUPPORTED_Asym_Pause;
+ if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ result |= SUPPORTED_Autoneg;
+
+ return result;
+}
+
+static u32 ethtool_to_mcdi_cap(u32 cap)
+{
+ u32 result = 0;
+
+ if (cap & SUPPORTED_10baseT_Half)
+ result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
+ if (cap & SUPPORTED_10baseT_Full)
+ result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
+ if (cap & SUPPORTED_100baseT_Half)
+ result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
+ if (cap & SUPPORTED_100baseT_Full)
+ result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
+ if (cap & SUPPORTED_1000baseT_Half)
+ result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
+ if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full))
+ result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
+ if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full))
+ result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
+ if (cap & SUPPORTED_Pause)
+ result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
+ if (cap & SUPPORTED_Asym_Pause)
+ result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
+ if (cap & SUPPORTED_Autoneg)
+ result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
+
+ return result;
+}
+
+static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ enum efx_phy_mode mode, supported;
+ u32 flags;
+
+ /* TODO: Advertise the capabilities supported by this PHY */
+ supported = 0;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_TXDIS_LBN))
+ supported |= PHY_MODE_TX_DISABLED;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_LOWPOWER_LBN))
+ supported |= PHY_MODE_LOW_POWER;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_POWEROFF_LBN))
+ supported |= PHY_MODE_OFF;
+
+ mode = efx->phy_mode & supported;
+
+ flags = 0;
+ if (mode & PHY_MODE_TX_DISABLED)
+ flags |= (1 << MC_CMD_SET_LINK_TXDIS_LBN);
+ if (mode & PHY_MODE_LOW_POWER)
+ flags |= (1 << MC_CMD_SET_LINK_LOWPOWER_LBN);
+ if (mode & PHY_MODE_OFF)
+ flags |= (1 << MC_CMD_SET_LINK_POWEROFF_LBN);
+
+ return flags;
+}
+
+static u32 mcdi_to_ethtool_media(u32 media)
+{
+ switch (media) {
+ case MC_CMD_MEDIA_XAUI:
+ case MC_CMD_MEDIA_CX4:
+ case MC_CMD_MEDIA_KX4:
+ return PORT_OTHER;
+
+ case MC_CMD_MEDIA_XFP:
+ case MC_CMD_MEDIA_SFP_PLUS:
+ return PORT_FIBRE;
+
+ case MC_CMD_MEDIA_BASE_T:
+ return PORT_TP;
+
+ default:
+ return PORT_OTHER;
+ }
+}
+
+static int efx_mcdi_phy_probe(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_data;
+ u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+ u32 caps;
+ int rc;
+
+ /* Initialise and populate phy_data */
+ phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+ if (phy_data == NULL)
+ return -ENOMEM;
+
+ rc = efx_mcdi_get_phy_cfg(efx, phy_data);
+ if (rc != 0)
+ goto fail;
+
+ /* Read initial link advertisement */
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ goto fail;
+
+ /* Fill out nic state */
+ efx->phy_data = phy_data;
+ efx->phy_type = phy_data->type;
+
+ efx->mdio_bus = phy_data->channel;
+ efx->mdio.prtad = phy_data->port;
+ efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
+ efx->mdio.mode_support = 0;
+ if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
+ efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
+ if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
+ efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+
+ caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
+ if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ efx->link_advertising =
+ mcdi_to_ethtool_cap(phy_data->media, caps);
+ else
+ phy_data->forced_cap = caps;
+
+ /* Assert that we can map efx -> mcdi loopback modes */
+ BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE);
+ BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA);
+ BUILD_BUG_ON(LOOPBACK_GMAC != MC_CMD_LOOPBACK_GMAC);
+ BUILD_BUG_ON(LOOPBACK_XGMII != MC_CMD_LOOPBACK_XGMII);
+ BUILD_BUG_ON(LOOPBACK_XGXS != MC_CMD_LOOPBACK_XGXS);
+ BUILD_BUG_ON(LOOPBACK_XAUI != MC_CMD_LOOPBACK_XAUI);
+ BUILD_BUG_ON(LOOPBACK_GMII != MC_CMD_LOOPBACK_GMII);
+ BUILD_BUG_ON(LOOPBACK_SGMII != MC_CMD_LOOPBACK_SGMII);
+ BUILD_BUG_ON(LOOPBACK_XGBR != MC_CMD_LOOPBACK_XGBR);
+ BUILD_BUG_ON(LOOPBACK_XFI != MC_CMD_LOOPBACK_XFI);
+ BUILD_BUG_ON(LOOPBACK_XAUI_FAR != MC_CMD_LOOPBACK_XAUI_FAR);
+ BUILD_BUG_ON(LOOPBACK_GMII_FAR != MC_CMD_LOOPBACK_GMII_FAR);
+ BUILD_BUG_ON(LOOPBACK_SGMII_FAR != MC_CMD_LOOPBACK_SGMII_FAR);
+ BUILD_BUG_ON(LOOPBACK_XFI_FAR != MC_CMD_LOOPBACK_XFI_FAR);
+ BUILD_BUG_ON(LOOPBACK_GPHY != MC_CMD_LOOPBACK_GPHY);
+ BUILD_BUG_ON(LOOPBACK_PHYXS != MC_CMD_LOOPBACK_PHYXS);
+ BUILD_BUG_ON(LOOPBACK_PCS != MC_CMD_LOOPBACK_PCS);
+ BUILD_BUG_ON(LOOPBACK_PMAPMD != MC_CMD_LOOPBACK_PMAPMD);
+ BUILD_BUG_ON(LOOPBACK_XPORT != MC_CMD_LOOPBACK_XPORT);
+ BUILD_BUG_ON(LOOPBACK_XGMII_WS != MC_CMD_LOOPBACK_XGMII_WS);
+ BUILD_BUG_ON(LOOPBACK_XAUI_WS != MC_CMD_LOOPBACK_XAUI_WS);
+ BUILD_BUG_ON(LOOPBACK_XAUI_WS_FAR != MC_CMD_LOOPBACK_XAUI_WS_FAR);
+ BUILD_BUG_ON(LOOPBACK_XAUI_WS_NEAR != MC_CMD_LOOPBACK_XAUI_WS_NEAR);
+ BUILD_BUG_ON(LOOPBACK_GMII_WS != MC_CMD_LOOPBACK_GMII_WS);
+ BUILD_BUG_ON(LOOPBACK_XFI_WS != MC_CMD_LOOPBACK_XFI_WS);
+ BUILD_BUG_ON(LOOPBACK_XFI_WS_FAR != MC_CMD_LOOPBACK_XFI_WS_FAR);
+ BUILD_BUG_ON(LOOPBACK_PHYXS_WS != MC_CMD_LOOPBACK_PHYXS_WS);
+
+ rc = efx_mcdi_loopback_modes(efx, &efx->loopback_modes);
+ if (rc != 0)
+ goto fail;
+ /* The MC indicates that LOOPBACK_NONE is a valid loopback mode,
+ * but by convention we don't */
+ efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
+
+ /* Set the initial link mode */
+ efx_mcdi_phy_decode_link(
+ efx, &efx->link_state,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
+
+ /* Default to Autonegotiated flow control if the PHY supports it */
+ efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
+ if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ efx->wanted_fc |= EFX_FC_AUTO;
+ efx_link_set_wanted_fc(efx, efx->wanted_fc);
+
+ return 0;
+
+fail:
+ kfree(phy_data);
+ return rc;
+}
+
+int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 caps = (efx->link_advertising ?
+ ethtool_to_mcdi_cap(efx->link_advertising) :
+ phy_cfg->forced_cap);
+
+ return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
+ efx->loopback_mode, 0);
+}
+
+void efx_mcdi_phy_decode_link(struct efx_nic *efx,
+ struct efx_link_state *link_state,
+ u32 speed, u32 flags, u32 fcntl)
+{
+ switch (fcntl) {
+ case MC_CMD_FCNTL_AUTO:
+ WARN_ON(1); /* This is not a link mode */
+ link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
+ break;
+ case MC_CMD_FCNTL_BIDIR:
+ link_state->fc = EFX_FC_TX | EFX_FC_RX;
+ break;
+ case MC_CMD_FCNTL_RESPOND:
+ link_state->fc = EFX_FC_RX;
+ break;
+ default:
+ WARN_ON(1);
+ case MC_CMD_FCNTL_OFF:
+ link_state->fc = 0;
+ break;
+ }
+
+ link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_LINK_UP_LBN));
+ link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_FULL_DUPLEX_LBN));
+ link_state->speed = speed;
+}
+
+/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
+ * supported by the link partner. Warn the user if this isn't the case
+ */
+void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 rmtadv;
+
+ /* The link partner capabilities are only relevant if the
+ * link supports flow control autonegotiation */
+ if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ return;
+
+ /* If flow control autoneg is supported and enabled, then fine */
+ if (efx->wanted_fc & EFX_FC_AUTO)
+ return;
+
+ rmtadv = 0;
+ if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ rmtadv |= ADVERTISED_Pause;
+ if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ rmtadv |= ADVERTISED_Asym_Pause;
+
+ if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause)
+ netif_err(efx, link, efx->net_dev,
+ "warning: link partner doesn't support pause frames");
+}
+
+static bool efx_mcdi_phy_poll(struct efx_nic *efx)
+{
+ struct efx_link_state old_state = efx->link_state;
+ u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
+ efx->link_state.up = false;
+ } else {
+ efx_mcdi_phy_decode_link(
+ efx, &efx->link_state,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
+ }
+
+ return !efx_link_state_equal(&efx->link_state, &old_state);
+}
+
+static void efx_mcdi_phy_remove(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_data = efx->phy_data;
+
+ efx->phy_data = NULL;
+ kfree(phy_data);
+}
+
+static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+ int rc;
+
+ ecmd->supported =
+ mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
+ ecmd->advertising = efx->link_advertising;
+ ethtool_cmd_speed_set(ecmd, efx->link_state.speed);
+ ecmd->duplex = efx->link_state.fd;
+ ecmd->port = mcdi_to_ethtool_media(phy_cfg->media);
+ ecmd->phy_address = phy_cfg->port;
+ ecmd->transceiver = XCVR_INTERNAL;
+ ecmd->autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg);
+ ecmd->mdio_support = (efx->mdio.mode_support &
+ (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
+ return;
+ }
+ ecmd->lp_advertising =
+ mcdi_to_ethtool_cap(phy_cfg->media,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
+}
+
+static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 caps;
+ int rc;
+
+ if (ecmd->autoneg) {
+ caps = (ethtool_to_mcdi_cap(ecmd->advertising) |
+ 1 << MC_CMD_PHY_CAP_AN_LBN);
+ } else if (ecmd->duplex) {
+ switch (ethtool_cmd_speed(ecmd)) {
+ case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
+ case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
+ case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
+ case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
+ default: return -EINVAL;
+ }
+ } else {
+ switch (ethtool_cmd_speed(ecmd)) {
+ case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
+ case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
+ case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
+ default: return -EINVAL;
+ }
+ }
+
+ rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
+ efx->loopback_mode, 0);
+ if (rc)
+ return rc;
+
+ if (ecmd->autoneg) {
+ efx_link_set_advertising(
+ efx, ecmd->advertising | ADVERTISED_Autoneg);
+ phy_cfg->forced_cap = 0;
+ } else {
+ efx_link_set_advertising(efx, 0);
+ phy_cfg->forced_cap = caps;
+ }
+ return 0;
+}
+
+static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
+{
+ u8 outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN];
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
+ return -EIO;
+ if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const char *const mcdi_sft9001_cable_diag_names[] = {
+ "cable.pairA.length",
+ "cable.pairB.length",
+ "cable.pairC.length",
+ "cable.pairD.length",
+ "cable.pairA.status",
+ "cable.pairB.status",
+ "cable.pairC.status",
+ "cable.pairD.status",
+};
+
+static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
+ int *results)
+{
+ unsigned int retry, i, count = 0;
+ size_t outlen;
+ u32 status;
+ u8 *buf, *ptr;
+ int rc;
+
+ buf = kzalloc(0x100, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
+ MCDI_SET_DWORD(buf, START_BIST_IN_TYPE, bist_mode);
+ rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, buf, MC_CMD_START_BIST_IN_LEN,
+ NULL, 0, NULL);
+ if (rc)
+ goto out;
+
+ /* Wait up to 10s for BIST to finish */
+ for (retry = 0; retry < 100; ++retry) {
+ BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
+ buf, 0x100, &outlen);
+ if (rc)
+ goto out;
+
+ status = MCDI_DWORD(buf, POLL_BIST_OUT_RESULT);
+ if (status != MC_CMD_POLL_BIST_RUNNING)
+ goto finished;
+
+ msleep(100);
+ }
+
+ rc = -ETIMEDOUT;
+ goto out;
+
+finished:
+ results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1;
+
+ /* SFT9001 specific cable diagnostics output */
+ if (efx->phy_type == PHY_TYPE_SFT9001B &&
+ (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
+ bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
+ ptr = MCDI_PTR(buf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
+ if (status == MC_CMD_POLL_BIST_PASSED &&
+ outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
+ for (i = 0; i < 8; i++) {
+ results[count + i] =
+ EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i],
+ EFX_DWORD_0);
+ }
+ }
+ count += 8;
+ }
+ rc = count;
+
+out:
+ kfree(buf);
+
+ return rc;
+}
+
+static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
+ unsigned flags)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 mode;
+ int rc;
+
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
+ rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results);
+ if (rc < 0)
+ return rc;
+
+ results += rc;
+ }
+
+ /* If we support both LONG and SHORT, then run each in response to
+ * break or not. Otherwise, run the one we support */
+ mode = 0;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN)) {
+ if ((flags & ETH_TEST_FL_OFFLINE) &&
+ (phy_cfg->flags &
+ (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN)))
+ mode = MC_CMD_PHY_BIST_CABLE_LONG;
+ else
+ mode = MC_CMD_PHY_BIST_CABLE_SHORT;
+ } else if (phy_cfg->flags &
+ (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))
+ mode = MC_CMD_PHY_BIST_CABLE_LONG;
+
+ if (mode != 0) {
+ rc = efx_mcdi_bist(efx, mode, results);
+ if (rc < 0)
+ return rc;
+ results += rc;
+ }
+
+ return 0;
+}
+
+static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
+ unsigned int index)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
+ if (index == 0)
+ return "bist";
+ --index;
+ }
+
+ if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN) |
+ (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))) {
+ if (index == 0)
+ return "cable";
+ --index;
+
+ if (efx->phy_type == PHY_TYPE_SFT9001B) {
+ if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names))
+ return mcdi_sft9001_cable_diag_names[index];
+ index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names);
+ }
+ }
+
+ return NULL;
+}
+
+const struct efx_phy_operations efx_mcdi_phy_ops = {
+ .probe = efx_mcdi_phy_probe,
+ .init = efx_port_dummy_op_int,
+ .reconfigure = efx_mcdi_phy_reconfigure,
+ .poll = efx_mcdi_phy_poll,
+ .fini = efx_port_dummy_op_void,
+ .remove = efx_mcdi_phy_remove,
+ .get_settings = efx_mcdi_phy_get_settings,
+ .set_settings = efx_mcdi_phy_set_settings,
+ .test_alive = efx_mcdi_phy_test_alive,
+ .run_tests = efx_mcdi_phy_run_tests,
+ .test_name = efx_mcdi_phy_test_name,
+};
diff --git a/drivers/net/ethernet/sfc/mdio_10g.c b/drivers/net/ethernet/sfc/mdio_10g.c
new file mode 100644
index 000000000000..7ab385c8136d
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mdio_10g.c
@@ -0,0 +1,323 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2006-2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+/*
+ * Useful functions for working with MDIO clause 45 PHYs
+ */
+#include <linux/types.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+#include "net_driver.h"
+#include "mdio_10g.h"
+#include "workarounds.h"
+
+unsigned efx_mdio_id_oui(u32 id)
+{
+ unsigned oui = 0;
+ int i;
+
+ /* The bits of the OUI are designated a..x, with a=0 and b variable.
+ * In the id register c is the MSB but the OUI is conventionally
+ * written as bytes h..a, p..i, x..q. Reorder the bits accordingly. */
+ for (i = 0; i < 22; ++i)
+ if (id & (1 << (i + 10)))
+ oui |= 1 << (i ^ 7);
+
+ return oui;
+}
+
+int efx_mdio_reset_mmd(struct efx_nic *port, int mmd,
+ int spins, int spintime)
+{
+ u32 ctrl;
+
+ /* Catch callers passing values in the wrong units (or just silly) */
+ EFX_BUG_ON_PARANOID(spins * spintime >= 5000);
+
+ efx_mdio_write(port, mmd, MDIO_CTRL1, MDIO_CTRL1_RESET);
+ /* Wait for the reset bit to clear. */
+ do {
+ msleep(spintime);
+ ctrl = efx_mdio_read(port, mmd, MDIO_CTRL1);
+ spins--;
+
+ } while (spins && (ctrl & MDIO_CTRL1_RESET));
+
+ return spins ? spins : -ETIMEDOUT;
+}
+
+static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd)
+{
+ int status;
+
+ if (mmd != MDIO_MMD_AN) {
+ /* Read MMD STATUS2 to check it is responding. */
+ status = efx_mdio_read(efx, mmd, MDIO_STAT2);
+ if ((status & MDIO_STAT2_DEVPRST) != MDIO_STAT2_DEVPRST_VAL) {
+ netif_err(efx, hw, efx->net_dev,
+ "PHY MMD %d not responding.\n", mmd);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/* This ought to be ridiculous overkill. We expect it to fail rarely */
+#define MDIO45_RESET_TIME 1000 /* ms */
+#define MDIO45_RESET_ITERS 100
+
+int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
+{
+ const int spintime = MDIO45_RESET_TIME / MDIO45_RESET_ITERS;
+ int tries = MDIO45_RESET_ITERS;
+ int rc = 0;
+ int in_reset;
+
+ while (tries) {
+ int mask = mmd_mask;
+ int mmd = 0;
+ int stat;
+ in_reset = 0;
+ while (mask) {
+ if (mask & 1) {
+ stat = efx_mdio_read(efx, mmd, MDIO_CTRL1);
+ if (stat < 0) {
+ netif_err(efx, hw, efx->net_dev,
+ "failed to read status of"
+ " MMD %d\n", mmd);
+ return -EIO;
+ }
+ if (stat & MDIO_CTRL1_RESET)
+ in_reset |= (1 << mmd);
+ }
+ mask = mask >> 1;
+ mmd++;
+ }
+ if (!in_reset)
+ break;
+ tries--;
+ msleep(spintime);
+ }
+ if (in_reset != 0) {
+ netif_err(efx, hw, efx->net_dev,
+ "not all MMDs came out of reset in time."
+ " MMDs still in reset: %x\n", in_reset);
+ rc = -ETIMEDOUT;
+ }
+ return rc;
+}
+
+int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask)
+{
+ int mmd = 0, probe_mmd, devs1, devs2;
+ u32 devices;
+
+ /* Historically we have probed the PHYXS to find out what devices are
+ * present,but that doesn't work so well if the PHYXS isn't expected
+ * to exist, if so just find the first item in the list supplied. */
+ probe_mmd = (mmd_mask & MDIO_DEVS_PHYXS) ? MDIO_MMD_PHYXS :
+ __ffs(mmd_mask);
+
+ /* Check all the expected MMDs are present */
+ devs1 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS1);
+ devs2 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS2);
+ if (devs1 < 0 || devs2 < 0) {
+ netif_err(efx, hw, efx->net_dev,
+ "failed to read devices present\n");
+ return -EIO;
+ }
+ devices = devs1 | (devs2 << 16);
+ if ((devices & mmd_mask) != mmd_mask) {
+ netif_err(efx, hw, efx->net_dev,
+ "required MMDs not present: got %x, wanted %x\n",
+ devices, mmd_mask);
+ return -ENODEV;
+ }
+ netif_vdbg(efx, hw, efx->net_dev, "Devices present: %x\n", devices);
+
+ /* Check all required MMDs are responding and happy. */
+ while (mmd_mask) {
+ if ((mmd_mask & 1) && efx_mdio_check_mmd(efx, mmd))
+ return -EIO;
+ mmd_mask = mmd_mask >> 1;
+ mmd++;
+ }
+
+ return 0;
+}
+
+bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
+{
+ /* If the port is in loopback, then we should only consider a subset
+ * of mmd's */
+ if (LOOPBACK_INTERNAL(efx))
+ return true;
+ else if (LOOPBACK_MASK(efx) & LOOPBACKS_WS)
+ return false;
+ else if (efx_phy_mode_disabled(efx->phy_mode))
+ return false;
+ else if (efx->loopback_mode == LOOPBACK_PHYXS)
+ mmd_mask &= ~(MDIO_DEVS_PHYXS |
+ MDIO_DEVS_PCS |
+ MDIO_DEVS_PMAPMD |
+ MDIO_DEVS_AN);
+ else if (efx->loopback_mode == LOOPBACK_PCS)
+ mmd_mask &= ~(MDIO_DEVS_PCS |
+ MDIO_DEVS_PMAPMD |
+ MDIO_DEVS_AN);
+ else if (efx->loopback_mode == LOOPBACK_PMAPMD)
+ mmd_mask &= ~(MDIO_DEVS_PMAPMD |
+ MDIO_DEVS_AN);
+
+ return mdio45_links_ok(&efx->mdio, mmd_mask);
+}
+
+void efx_mdio_transmit_disable(struct efx_nic *efx)
+{
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD,
+ MDIO_PMA_TXDIS, MDIO_PMD_TXDIS_GLOBAL,
+ efx->phy_mode & PHY_MODE_TX_DISABLED);
+}
+
+void efx_mdio_phy_reconfigure(struct efx_nic *efx)
+{
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD,
+ MDIO_CTRL1, MDIO_PMA_CTRL1_LOOPBACK,
+ efx->loopback_mode == LOOPBACK_PMAPMD);
+ efx_mdio_set_flag(efx, MDIO_MMD_PCS,
+ MDIO_CTRL1, MDIO_PCS_CTRL1_LOOPBACK,
+ efx->loopback_mode == LOOPBACK_PCS);
+ efx_mdio_set_flag(efx, MDIO_MMD_PHYXS,
+ MDIO_CTRL1, MDIO_PHYXS_CTRL1_LOOPBACK,
+ efx->loopback_mode == LOOPBACK_PHYXS_WS);
+}
+
+static void efx_mdio_set_mmd_lpower(struct efx_nic *efx,
+ int lpower, int mmd)
+{
+ int stat = efx_mdio_read(efx, mmd, MDIO_STAT1);
+
+ netif_vdbg(efx, drv, efx->net_dev, "Setting low power mode for MMD %d to %d\n",
+ mmd, lpower);
+
+ if (stat & MDIO_STAT1_LPOWERABLE) {
+ efx_mdio_set_flag(efx, mmd, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER, lpower);
+ }
+}
+
+void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
+ int low_power, unsigned int mmd_mask)
+{
+ int mmd = 0;
+ mmd_mask &= ~MDIO_DEVS_AN;
+ while (mmd_mask) {
+ if (mmd_mask & 1)
+ efx_mdio_set_mmd_lpower(efx, low_power, mmd);
+ mmd_mask = (mmd_mask >> 1);
+ mmd++;
+ }
+}
+
+/**
+ * efx_mdio_set_settings - Set (some of) the PHY settings over MDIO.
+ * @efx: Efx NIC
+ * @ecmd: New settings
+ */
+int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+ struct ethtool_cmd prev = { .cmd = ETHTOOL_GSET };
+
+ efx->phy_op->get_settings(efx, &prev);
+
+ if (ecmd->advertising == prev.advertising &&
+ ethtool_cmd_speed(ecmd) == ethtool_cmd_speed(&prev) &&
+ ecmd->duplex == prev.duplex &&
+ ecmd->port == prev.port &&
+ ecmd->autoneg == prev.autoneg)
+ return 0;
+
+ /* We can only change these settings for -T PHYs */
+ if (prev.port != PORT_TP || ecmd->port != PORT_TP)
+ return -EINVAL;
+
+ /* Check that PHY supports these settings */
+ if (!ecmd->autoneg ||
+ (ecmd->advertising | SUPPORTED_Autoneg) & ~prev.supported)
+ return -EINVAL;
+
+ efx_link_set_advertising(efx, ecmd->advertising | ADVERTISED_Autoneg);
+ efx_mdio_an_reconfigure(efx);
+ return 0;
+}
+
+/**
+ * efx_mdio_an_reconfigure - Push advertising flags and restart autonegotiation
+ * @efx: Efx NIC
+ */
+void efx_mdio_an_reconfigure(struct efx_nic *efx)
+{
+ int reg;
+
+ WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
+
+ /* Set up the base page */
+ reg = ADVERTISE_CSMA | ADVERTISE_RESV;
+ if (efx->link_advertising & ADVERTISED_Pause)
+ reg |= ADVERTISE_PAUSE_CAP;
+ if (efx->link_advertising & ADVERTISED_Asym_Pause)
+ reg |= ADVERTISE_PAUSE_ASYM;
+ efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
+
+ /* Set up the (extended) next page */
+ efx->phy_op->set_npage_adv(efx, efx->link_advertising);
+
+ /* Enable and restart AN */
+ reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
+ reg |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART | MDIO_AN_CTRL1_XNP;
+ efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
+}
+
+u8 efx_mdio_get_pause(struct efx_nic *efx)
+{
+ BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX));
+
+ if (!(efx->wanted_fc & EFX_FC_AUTO))
+ return efx->wanted_fc;
+
+ WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
+
+ return mii_resolve_flowctrl_fdx(
+ mii_advertise_flowctrl(efx->wanted_fc),
+ efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA));
+}
+
+int efx_mdio_test_alive(struct efx_nic *efx)
+{
+ int rc;
+ int devad = __ffs(efx->mdio.mmds);
+ u16 physid1, physid2;
+
+ mutex_lock(&efx->mac_lock);
+
+ physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1);
+ physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2);
+
+ if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
+ (physid2 == 0x0000) || (physid2 == 0xffff)) {
+ netif_err(efx, hw, efx->net_dev,
+ "no MDIO PHY present with ID %d\n", efx->mdio.prtad);
+ rc = -EINVAL;
+ } else {
+ rc = efx_mdio_check_mmds(efx, efx->mdio.mmds);
+ }
+
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
diff --git a/drivers/net/ethernet/sfc/mdio_10g.h b/drivers/net/ethernet/sfc/mdio_10g.h
new file mode 100644
index 000000000000..a97dbbd2de99
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mdio_10g.h
@@ -0,0 +1,112 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2006-2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_MDIO_10G_H
+#define EFX_MDIO_10G_H
+
+#include <linux/mdio.h>
+
+/*
+ * Helper functions for doing 10G MDIO as specified in IEEE 802.3 clause 45.
+ */
+
+#include "efx.h"
+
+static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
+static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
+extern unsigned efx_mdio_id_oui(u32 id);
+
+static inline int efx_mdio_read(struct efx_nic *efx, int devad, int addr)
+{
+ return efx->mdio.mdio_read(efx->net_dev, efx->mdio.prtad, devad, addr);
+}
+
+static inline void
+efx_mdio_write(struct efx_nic *efx, int devad, int addr, int value)
+{
+ efx->mdio.mdio_write(efx->net_dev, efx->mdio.prtad, devad, addr, value);
+}
+
+static inline u32 efx_mdio_read_id(struct efx_nic *efx, int mmd)
+{
+ u16 id_low = efx_mdio_read(efx, mmd, MDIO_DEVID2);
+ u16 id_hi = efx_mdio_read(efx, mmd, MDIO_DEVID1);
+ return (id_hi << 16) | (id_low);
+}
+
+static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx)
+{
+ int i, lane_status;
+ bool sync;
+
+ for (i = 0; i < 2; ++i)
+ lane_status = efx_mdio_read(efx, MDIO_MMD_PHYXS,
+ MDIO_PHYXS_LNSTAT);
+
+ sync = !!(lane_status & MDIO_PHYXS_LNSTAT_ALIGN);
+ if (!sync)
+ netif_dbg(efx, hw, efx->net_dev, "XGXS lane status: %x\n",
+ lane_status);
+ return sync;
+}
+
+extern const char *efx_mdio_mmd_name(int mmd);
+
+/*
+ * Reset a specific MMD and wait for reset to clear.
+ * Return number of spins left (>0) on success, -%ETIMEDOUT on failure.
+ *
+ * This function will sleep
+ */
+extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd,
+ int spins, int spintime);
+
+/* As efx_mdio_check_mmd but for multiple MMDs */
+int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
+
+/* Check the link status of specified mmds in bit mask */
+extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
+
+/* Generic transmit disable support though PMAPMD */
+extern void efx_mdio_transmit_disable(struct efx_nic *efx);
+
+/* Generic part of reconfigure: set/clear loopback bits */
+extern void efx_mdio_phy_reconfigure(struct efx_nic *efx);
+
+/* Set the power state of the specified MMDs */
+extern void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
+ int low_power, unsigned int mmd_mask);
+
+/* Set (some of) the PHY settings over MDIO */
+extern int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
+
+/* Push advertising flags and restart autonegotiation */
+extern void efx_mdio_an_reconfigure(struct efx_nic *efx);
+
+/* Get pause parameters from AN if available (otherwise return
+ * requested pause parameters)
+ */
+u8 efx_mdio_get_pause(struct efx_nic *efx);
+
+/* Wait for specified MMDs to exit reset within a timeout */
+extern int efx_mdio_wait_reset_mmds(struct efx_nic *efx,
+ unsigned int mmd_mask);
+
+/* Set or clear flag, debouncing */
+static inline void
+efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr,
+ int mask, bool state)
+{
+ mdio_set_flag(&efx->mdio, efx->mdio.prtad, devad, addr, mask, state);
+}
+
+/* Liveness self-test for MDIO PHYs */
+extern int efx_mdio_test_alive(struct efx_nic *efx);
+
+#endif /* EFX_MDIO_10G_H */
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
new file mode 100644
index 000000000000..b6304486f244
--- /dev/null
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -0,0 +1,693 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/rtnetlink.h>
+
+#include "net_driver.h"
+#include "spi.h"
+#include "efx.h"
+#include "nic.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+
+#define EFX_SPI_VERIFY_BUF_LEN 16
+
+struct efx_mtd_partition {
+ struct mtd_info mtd;
+ union {
+ struct {
+ bool updating;
+ u8 nvram_type;
+ u16 fw_subtype;
+ } mcdi;
+ size_t offset;
+ };
+ const char *type_name;
+ char name[IFNAMSIZ + 20];
+};
+
+struct efx_mtd_ops {
+ int (*read)(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, u8 *buffer);
+ int (*erase)(struct mtd_info *mtd, loff_t start, size_t len);
+ int (*write)(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, const u8 *buffer);
+ int (*sync)(struct mtd_info *mtd);
+};
+
+struct efx_mtd {
+ struct list_head node;
+ struct efx_nic *efx;
+ const struct efx_spi_device *spi;
+ const char *name;
+ const struct efx_mtd_ops *ops;
+ size_t n_parts;
+ struct efx_mtd_partition part[0];
+};
+
+#define efx_for_each_partition(part, efx_mtd) \
+ for ((part) = &(efx_mtd)->part[0]; \
+ (part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \
+ (part)++)
+
+#define to_efx_mtd_partition(mtd) \
+ container_of(mtd, struct efx_mtd_partition, mtd)
+
+static int falcon_mtd_probe(struct efx_nic *efx);
+static int siena_mtd_probe(struct efx_nic *efx);
+
+/* SPI utilities */
+
+static int
+efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
+{
+ struct efx_mtd *efx_mtd = part->mtd.priv;
+ const struct efx_spi_device *spi = efx_mtd->spi;
+ struct efx_nic *efx = efx_mtd->efx;
+ u8 status;
+ int rc, i;
+
+ /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
+ for (i = 0; i < 40; i++) {
+ __set_current_state(uninterruptible ?
+ TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ / 10);
+ rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
+ &status, sizeof(status));
+ if (rc)
+ return rc;
+ if (!(status & SPI_STATUS_NRDY))
+ return 0;
+ if (signal_pending(current))
+ return -EINTR;
+ }
+ pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name);
+ return -ETIMEDOUT;
+}
+
+static int
+efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
+{
+ const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
+ SPI_STATUS_BP0);
+ u8 status;
+ int rc;
+
+ rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
+ &status, sizeof(status));
+ if (rc)
+ return rc;
+
+ if (!(status & unlock_mask))
+ return 0; /* already unlocked */
+
+ rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
+ if (rc)
+ return rc;
+ rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
+ if (rc)
+ return rc;
+
+ status &= ~unlock_mask;
+ rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
+ NULL, sizeof(status));
+ if (rc)
+ return rc;
+ rc = falcon_spi_wait_write(efx, spi);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int
+efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
+{
+ struct efx_mtd *efx_mtd = part->mtd.priv;
+ const struct efx_spi_device *spi = efx_mtd->spi;
+ struct efx_nic *efx = efx_mtd->efx;
+ unsigned pos, block_len;
+ u8 empty[EFX_SPI_VERIFY_BUF_LEN];
+ u8 buffer[EFX_SPI_VERIFY_BUF_LEN];
+ int rc;
+
+ if (len != spi->erase_size)
+ return -EINVAL;
+
+ if (spi->erase_command == 0)
+ return -EOPNOTSUPP;
+
+ rc = efx_spi_unlock(efx, spi);
+ if (rc)
+ return rc;
+ rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
+ if (rc)
+ return rc;
+ rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
+ NULL, 0);
+ if (rc)
+ return rc;
+ rc = efx_spi_slow_wait(part, false);
+
+ /* Verify the entire region has been wiped */
+ memset(empty, 0xff, sizeof(empty));
+ for (pos = 0; pos < len; pos += block_len) {
+ block_len = min(len - pos, sizeof(buffer));
+ rc = falcon_spi_read(efx, spi, start + pos, block_len,
+ NULL, buffer);
+ if (rc)
+ return rc;
+ if (memcmp(empty, buffer, block_len))
+ return -EIO;
+
+ /* Avoid locking up the system */
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
+ }
+
+ return rc;
+}
+
+/* MTD interface */
+
+static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
+{
+ struct efx_mtd *efx_mtd = mtd->priv;
+ int rc;
+
+ rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len);
+ if (rc == 0) {
+ erase->state = MTD_ERASE_DONE;
+ } else {
+ erase->state = MTD_ERASE_FAILED;
+ erase->fail_addr = 0xffffffff;
+ }
+ mtd_erase_callback(erase);
+ return rc;
+}
+
+static void efx_mtd_sync(struct mtd_info *mtd)
+{
+ struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
+ struct efx_mtd *efx_mtd = mtd->priv;
+ int rc;
+
+ rc = efx_mtd->ops->sync(mtd);
+ if (rc)
+ pr_err("%s: %s sync failed (%d)\n",
+ part->name, efx_mtd->name, rc);
+}
+
+static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
+{
+ int rc;
+
+ for (;;) {
+ rc = mtd_device_unregister(&part->mtd);
+ if (rc != -EBUSY)
+ break;
+ ssleep(1);
+ }
+ WARN_ON(rc);
+}
+
+static void efx_mtd_remove_device(struct efx_mtd *efx_mtd)
+{
+ struct efx_mtd_partition *part;
+
+ efx_for_each_partition(part, efx_mtd)
+ efx_mtd_remove_partition(part);
+ list_del(&efx_mtd->node);
+ kfree(efx_mtd);
+}
+
+static void efx_mtd_rename_device(struct efx_mtd *efx_mtd)
+{
+ struct efx_mtd_partition *part;
+
+ efx_for_each_partition(part, efx_mtd)
+ if (efx_nic_rev(efx_mtd->efx) >= EFX_REV_SIENA_A0)
+ snprintf(part->name, sizeof(part->name),
+ "%s %s:%02x", efx_mtd->efx->name,
+ part->type_name, part->mcdi.fw_subtype);
+ else
+ snprintf(part->name, sizeof(part->name),
+ "%s %s", efx_mtd->efx->name,
+ part->type_name);
+}
+
+static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
+{
+ struct efx_mtd_partition *part;
+
+ efx_mtd->efx = efx;
+
+ efx_mtd_rename_device(efx_mtd);
+
+ efx_for_each_partition(part, efx_mtd) {
+ part->mtd.writesize = 1;
+
+ part->mtd.owner = THIS_MODULE;
+ part->mtd.priv = efx_mtd;
+ part->mtd.name = part->name;
+ part->mtd.erase = efx_mtd_erase;
+ part->mtd.read = efx_mtd->ops->read;
+ part->mtd.write = efx_mtd->ops->write;
+ part->mtd.sync = efx_mtd_sync;
+
+ if (mtd_device_register(&part->mtd, NULL, 0))
+ goto fail;
+ }
+
+ list_add(&efx_mtd->node, &efx->mtd_list);
+ return 0;
+
+fail:
+ while (part != &efx_mtd->part[0]) {
+ --part;
+ efx_mtd_remove_partition(part);
+ }
+ /* mtd_device_register() returns 1 if the MTD table is full */
+ return -ENOMEM;
+}
+
+void efx_mtd_remove(struct efx_nic *efx)
+{
+ struct efx_mtd *efx_mtd, *next;
+
+ WARN_ON(efx_dev_registered(efx));
+
+ list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node)
+ efx_mtd_remove_device(efx_mtd);
+}
+
+void efx_mtd_rename(struct efx_nic *efx)
+{
+ struct efx_mtd *efx_mtd;
+
+ ASSERT_RTNL();
+
+ list_for_each_entry(efx_mtd, &efx->mtd_list, node)
+ efx_mtd_rename_device(efx_mtd);
+}
+
+int efx_mtd_probe(struct efx_nic *efx)
+{
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ return siena_mtd_probe(efx);
+ else
+ return falcon_mtd_probe(efx);
+}
+
+/* Implementation of MTD operations for Falcon */
+
+static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, u8 *buffer)
+{
+ struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
+ struct efx_mtd *efx_mtd = mtd->priv;
+ const struct efx_spi_device *spi = efx_mtd->spi;
+ struct efx_nic *efx = efx_mtd->efx;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
+ if (rc)
+ return rc;
+ rc = falcon_spi_read(efx, spi, part->offset + start, len,
+ retlen, buffer);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
+{
+ struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
+ struct efx_mtd *efx_mtd = mtd->priv;
+ struct efx_nic *efx = efx_mtd->efx;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
+ if (rc)
+ return rc;
+ rc = efx_spi_erase(part, part->offset + start, len);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, const u8 *buffer)
+{
+ struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
+ struct efx_mtd *efx_mtd = mtd->priv;
+ const struct efx_spi_device *spi = efx_mtd->spi;
+ struct efx_nic *efx = efx_mtd->efx;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
+ if (rc)
+ return rc;
+ rc = falcon_spi_write(efx, spi, part->offset + start, len,
+ retlen, buffer);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_sync(struct mtd_info *mtd)
+{
+ struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
+ struct efx_mtd *efx_mtd = mtd->priv;
+ struct efx_nic *efx = efx_mtd->efx;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ mutex_lock(&nic_data->spi_lock);
+ rc = efx_spi_slow_wait(part, true);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static struct efx_mtd_ops falcon_mtd_ops = {
+ .read = falcon_mtd_read,
+ .erase = falcon_mtd_erase,
+ .write = falcon_mtd_write,
+ .sync = falcon_mtd_sync,
+};
+
+static int falcon_mtd_probe(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ struct efx_spi_device *spi;
+ struct efx_mtd *efx_mtd;
+ int rc = -ENODEV;
+
+ ASSERT_RTNL();
+
+ spi = &nic_data->spi_flash;
+ if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
+ efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
+ GFP_KERNEL);
+ if (!efx_mtd)
+ return -ENOMEM;
+
+ efx_mtd->spi = spi;
+ efx_mtd->name = "flash";
+ efx_mtd->ops = &falcon_mtd_ops;
+
+ efx_mtd->n_parts = 1;
+ efx_mtd->part[0].mtd.type = MTD_NORFLASH;
+ efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
+ efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
+ efx_mtd->part[0].mtd.erasesize = spi->erase_size;
+ efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
+ efx_mtd->part[0].type_name = "sfc_flash_bootrom";
+
+ rc = efx_mtd_probe_device(efx, efx_mtd);
+ if (rc) {
+ kfree(efx_mtd);
+ return rc;
+ }
+ }
+
+ spi = &nic_data->spi_eeprom;
+ if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) {
+ efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
+ GFP_KERNEL);
+ if (!efx_mtd)
+ return -ENOMEM;
+
+ efx_mtd->spi = spi;
+ efx_mtd->name = "EEPROM";
+ efx_mtd->ops = &falcon_mtd_ops;
+
+ efx_mtd->n_parts = 1;
+ efx_mtd->part[0].mtd.type = MTD_RAM;
+ efx_mtd->part[0].mtd.flags = MTD_CAP_RAM;
+ efx_mtd->part[0].mtd.size =
+ min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
+ EFX_EEPROM_BOOTCONFIG_START;
+ efx_mtd->part[0].mtd.erasesize = spi->erase_size;
+ efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START;
+ efx_mtd->part[0].type_name = "sfc_bootconfig";
+
+ rc = efx_mtd_probe_device(efx, efx_mtd);
+ if (rc) {
+ kfree(efx_mtd);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+/* Implementation of MTD operations for Siena */
+
+static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, u8 *buffer)
+{
+ struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
+ struct efx_mtd *efx_mtd = mtd->priv;
+ struct efx_nic *efx = efx_mtd->efx;
+ loff_t offset = start;
+ loff_t end = min_t(loff_t, start + len, mtd->size);
+ size_t chunk;
+ int rc = 0;
+
+ while (offset < end) {
+ chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
+ rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
+ buffer, chunk);
+ if (rc)
+ goto out;
+ offset += chunk;
+ buffer += chunk;
+ }
+out:
+ *retlen = offset - start;
+ return rc;
+}
+
+static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
+{
+ struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
+ struct efx_mtd *efx_mtd = mtd->priv;
+ struct efx_nic *efx = efx_mtd->efx;
+ loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
+ loff_t end = min_t(loff_t, start + len, mtd->size);
+ size_t chunk = part->mtd.erasesize;
+ int rc = 0;
+
+ if (!part->mcdi.updating) {
+ rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
+ if (rc)
+ goto out;
+ part->mcdi.updating = 1;
+ }
+
+ /* The MCDI interface can in fact do multiple erase blocks at once;
+ * but erasing may be slow, so we make multiple calls here to avoid
+ * tripping the MCDI RPC timeout. */
+ while (offset < end) {
+ rc = efx_mcdi_nvram_erase(efx, part->mcdi.nvram_type, offset,
+ chunk);
+ if (rc)
+ goto out;
+ offset += chunk;
+ }
+out:
+ return rc;
+}
+
+static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, const u8 *buffer)
+{
+ struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
+ struct efx_mtd *efx_mtd = mtd->priv;
+ struct efx_nic *efx = efx_mtd->efx;
+ loff_t offset = start;
+ loff_t end = min_t(loff_t, start + len, mtd->size);
+ size_t chunk;
+ int rc = 0;
+
+ if (!part->mcdi.updating) {
+ rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
+ if (rc)
+ goto out;
+ part->mcdi.updating = 1;
+ }
+
+ while (offset < end) {
+ chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
+ rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
+ buffer, chunk);
+ if (rc)
+ goto out;
+ offset += chunk;
+ buffer += chunk;
+ }
+out:
+ *retlen = offset - start;
+ return rc;
+}
+
+static int siena_mtd_sync(struct mtd_info *mtd)
+{
+ struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
+ struct efx_mtd *efx_mtd = mtd->priv;
+ struct efx_nic *efx = efx_mtd->efx;
+ int rc = 0;
+
+ if (part->mcdi.updating) {
+ part->mcdi.updating = 0;
+ rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
+ }
+
+ return rc;
+}
+
+static struct efx_mtd_ops siena_mtd_ops = {
+ .read = siena_mtd_read,
+ .erase = siena_mtd_erase,
+ .write = siena_mtd_write,
+ .sync = siena_mtd_sync,
+};
+
+struct siena_nvram_type_info {
+ int port;
+ const char *name;
+};
+
+static struct siena_nvram_type_info siena_nvram_types[] = {
+ [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
+ [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
+ [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
+ [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
+ [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
+ [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
+ [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
+ [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
+ [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
+ [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
+ [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
+ [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
+};
+
+static int siena_mtd_probe_partition(struct efx_nic *efx,
+ struct efx_mtd *efx_mtd,
+ unsigned int part_id,
+ unsigned int type)
+{
+ struct efx_mtd_partition *part = &efx_mtd->part[part_id];
+ struct siena_nvram_type_info *info;
+ size_t size, erase_size;
+ bool protected;
+ int rc;
+
+ if (type >= ARRAY_SIZE(siena_nvram_types))
+ return -ENODEV;
+
+ info = &siena_nvram_types[type];
+
+ if (info->port != efx_port_num(efx))
+ return -ENODEV;
+
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
+ if (rc)
+ return rc;
+ if (protected)
+ return -ENODEV; /* hide it */
+
+ part->mcdi.nvram_type = type;
+ part->type_name = info->name;
+
+ part->mtd.type = MTD_NORFLASH;
+ part->mtd.flags = MTD_CAP_NORFLASH;
+ part->mtd.size = size;
+ part->mtd.erasesize = erase_size;
+
+ return 0;
+}
+
+static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
+ struct efx_mtd *efx_mtd)
+{
+ struct efx_mtd_partition *part;
+ uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN /
+ sizeof(uint16_t)];
+ int rc;
+
+ rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list);
+ if (rc)
+ return rc;
+
+ efx_for_each_partition(part, efx_mtd)
+ part->mcdi.fw_subtype = fw_subtype_list[part->mcdi.nvram_type];
+
+ return 0;
+}
+
+static int siena_mtd_probe(struct efx_nic *efx)
+{
+ struct efx_mtd *efx_mtd;
+ int rc = -ENODEV;
+ u32 nvram_types;
+ unsigned int type;
+
+ ASSERT_RTNL();
+
+ rc = efx_mcdi_nvram_types(efx, &nvram_types);
+ if (rc)
+ return rc;
+
+ efx_mtd = kzalloc(sizeof(*efx_mtd) +
+ hweight32(nvram_types) * sizeof(efx_mtd->part[0]),
+ GFP_KERNEL);
+ if (!efx_mtd)
+ return -ENOMEM;
+
+ efx_mtd->name = "Siena NVRAM manager";
+
+ efx_mtd->ops = &siena_mtd_ops;
+
+ type = 0;
+ efx_mtd->n_parts = 0;
+
+ while (nvram_types != 0) {
+ if (nvram_types & 1) {
+ rc = siena_mtd_probe_partition(efx, efx_mtd,
+ efx_mtd->n_parts, type);
+ if (rc == 0)
+ efx_mtd->n_parts++;
+ else if (rc != -ENODEV)
+ goto fail;
+ }
+ type++;
+ nvram_types >>= 1;
+ }
+
+ rc = siena_mtd_get_fw_subtypes(efx, efx_mtd);
+ if (rc)
+ goto fail;
+
+ rc = efx_mtd_probe_device(efx, efx_mtd);
+fail:
+ if (rc)
+ kfree(efx_mtd);
+ return rc;
+}
+
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
new file mode 100644
index 000000000000..b8e251a1ee48
--- /dev/null
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -0,0 +1,1060 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+/* Common definitions for all Efx net driver code */
+
+#ifndef EFX_NET_DRIVER_H
+#define EFX_NET_DRIVER_H
+
+#if defined(EFX_ENABLE_DEBUG) && !defined(DEBUG)
+#define DEBUG
+#endif
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/timer.h>
+#include <linux/mdio.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/highmem.h>
+#include <linux/workqueue.h>
+#include <linux/vmalloc.h>
+#include <linux/i2c.h>
+
+#include "enum.h"
+#include "bitfield.h"
+
+/**************************************************************************
+ *
+ * Build definitions
+ *
+ **************************************************************************/
+
+#define EFX_DRIVER_VERSION "3.1"
+
+#ifdef EFX_ENABLE_DEBUG
+#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
+#define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
+#else
+#define EFX_BUG_ON_PARANOID(x) do {} while (0)
+#define EFX_WARN_ON_PARANOID(x) do {} while (0)
+#endif
+
+/**************************************************************************
+ *
+ * Efx data structures
+ *
+ **************************************************************************/
+
+#define EFX_MAX_CHANNELS 32
+#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
+
+/* Checksum generation is a per-queue option in hardware, so each
+ * queue visible to the networking core is backed by two hardware TX
+ * queues. */
+#define EFX_MAX_TX_TC 2
+#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
+#define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */
+#define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */
+#define EFX_TXQ_TYPES 4
+#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
+
+/**
+ * struct efx_special_buffer - An Efx special buffer
+ * @addr: CPU base address of the buffer
+ * @dma_addr: DMA base address of the buffer
+ * @len: Buffer length, in bytes
+ * @index: Buffer index within controller;s buffer table
+ * @entries: Number of buffer table entries
+ *
+ * Special buffers are used for the event queues and the TX and RX
+ * descriptor queues for each channel. They are *not* used for the
+ * actual transmit and receive buffers.
+ */
+struct efx_special_buffer {
+ void *addr;
+ dma_addr_t dma_addr;
+ unsigned int len;
+ int index;
+ int entries;
+};
+
+enum efx_flush_state {
+ FLUSH_NONE,
+ FLUSH_PENDING,
+ FLUSH_FAILED,
+ FLUSH_DONE,
+};
+
+/**
+ * struct efx_tx_buffer - An Efx TX buffer
+ * @skb: The associated socket buffer.
+ * Set only on the final fragment of a packet; %NULL for all other
+ * fragments. When this fragment completes, then we can free this
+ * skb.
+ * @tsoh: The associated TSO header structure, or %NULL if this
+ * buffer is not a TSO header.
+ * @dma_addr: DMA address of the fragment.
+ * @len: Length of this fragment.
+ * This field is zero when the queue slot is empty.
+ * @continuation: True if this fragment is not the end of a packet.
+ * @unmap_single: True if pci_unmap_single should be used.
+ * @unmap_len: Length of this fragment to unmap
+ */
+struct efx_tx_buffer {
+ const struct sk_buff *skb;
+ struct efx_tso_header *tsoh;
+ dma_addr_t dma_addr;
+ unsigned short len;
+ bool continuation;
+ bool unmap_single;
+ unsigned short unmap_len;
+};
+
+/**
+ * struct efx_tx_queue - An Efx TX queue
+ *
+ * This is a ring buffer of TX fragments.
+ * Since the TX completion path always executes on the same
+ * CPU and the xmit path can operate on different CPUs,
+ * performance is increased by ensuring that the completion
+ * path and the xmit path operate on different cache lines.
+ * This is particularly important if the xmit path is always
+ * executing on one CPU which is different from the completion
+ * path. There is also a cache line for members which are
+ * read but not written on the fast path.
+ *
+ * @efx: The associated Efx NIC
+ * @queue: DMA queue number
+ * @channel: The associated channel
+ * @core_txq: The networking core TX queue structure
+ * @buffer: The software buffer ring
+ * @txd: The hardware descriptor ring
+ * @ptr_mask: The size of the ring minus 1.
+ * @initialised: Has hardware queue been initialised?
+ * @flushed: Used when handling queue flushing
+ * @read_count: Current read pointer.
+ * This is the number of buffers that have been removed from both rings.
+ * @old_write_count: The value of @write_count when last checked.
+ * This is here for performance reasons. The xmit path will
+ * only get the up-to-date value of @write_count if this
+ * variable indicates that the queue is empty. This is to
+ * avoid cache-line ping-pong between the xmit path and the
+ * completion path.
+ * @insert_count: Current insert pointer
+ * This is the number of buffers that have been added to the
+ * software ring.
+ * @write_count: Current write pointer
+ * This is the number of buffers that have been added to the
+ * hardware ring.
+ * @old_read_count: The value of read_count when last checked.
+ * This is here for performance reasons. The xmit path will
+ * only get the up-to-date value of read_count if this
+ * variable indicates that the queue is full. This is to
+ * avoid cache-line ping-pong between the xmit path and the
+ * completion path.
+ * @tso_headers_free: A list of TSO headers allocated for this TX queue
+ * that are not in use, and so available for new TSO sends. The list
+ * is protected by the TX queue lock.
+ * @tso_bursts: Number of times TSO xmit invoked by kernel
+ * @tso_long_headers: Number of packets with headers too long for standard
+ * blocks
+ * @tso_packets: Number of packets via the TSO xmit path
+ * @pushes: Number of times the TX push feature has been used
+ * @empty_read_count: If the completion path has seen the queue as empty
+ * and the transmission path has not yet checked this, the value of
+ * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
+ */
+struct efx_tx_queue {
+ /* Members which don't change on the fast path */
+ struct efx_nic *efx ____cacheline_aligned_in_smp;
+ unsigned queue;
+ struct efx_channel *channel;
+ struct netdev_queue *core_txq;
+ struct efx_tx_buffer *buffer;
+ struct efx_special_buffer txd;
+ unsigned int ptr_mask;
+ bool initialised;
+ enum efx_flush_state flushed;
+
+ /* Members used mainly on the completion path */
+ unsigned int read_count ____cacheline_aligned_in_smp;
+ unsigned int old_write_count;
+
+ /* Members used only on the xmit path */
+ unsigned int insert_count ____cacheline_aligned_in_smp;
+ unsigned int write_count;
+ unsigned int old_read_count;
+ struct efx_tso_header *tso_headers_free;
+ unsigned int tso_bursts;
+ unsigned int tso_long_headers;
+ unsigned int tso_packets;
+ unsigned int pushes;
+
+ /* Members shared between paths and sometimes updated */
+ unsigned int empty_read_count ____cacheline_aligned_in_smp;
+#define EFX_EMPTY_COUNT_VALID 0x80000000
+};
+
+/**
+ * struct efx_rx_buffer - An Efx RX data buffer
+ * @dma_addr: DMA base address of the buffer
+ * @skb: The associated socket buffer, if any.
+ * If both this and page are %NULL, the buffer slot is currently free.
+ * @page: The associated page buffer, if any.
+ * If both this and skb are %NULL, the buffer slot is currently free.
+ * @len: Buffer length, in bytes.
+ * @is_page: Indicates if @page is valid. If false, @skb is valid.
+ */
+struct efx_rx_buffer {
+ dma_addr_t dma_addr;
+ union {
+ struct sk_buff *skb;
+ struct page *page;
+ } u;
+ unsigned int len;
+ bool is_page;
+};
+
+/**
+ * struct efx_rx_page_state - Page-based rx buffer state
+ *
+ * Inserted at the start of every page allocated for receive buffers.
+ * Used to facilitate sharing dma mappings between recycled rx buffers
+ * and those passed up to the kernel.
+ *
+ * @refcnt: Number of struct efx_rx_buffer's referencing this page.
+ * When refcnt falls to zero, the page is unmapped for dma
+ * @dma_addr: The dma address of this page.
+ */
+struct efx_rx_page_state {
+ unsigned refcnt;
+ dma_addr_t dma_addr;
+
+ unsigned int __pad[0] ____cacheline_aligned;
+};
+
+/**
+ * struct efx_rx_queue - An Efx RX queue
+ * @efx: The associated Efx NIC
+ * @buffer: The software buffer ring
+ * @rxd: The hardware descriptor ring
+ * @ptr_mask: The size of the ring minus 1.
+ * @added_count: Number of buffers added to the receive queue.
+ * @notified_count: Number of buffers given to NIC (<= @added_count).
+ * @removed_count: Number of buffers removed from the receive queue.
+ * @max_fill: RX descriptor maximum fill level (<= ring size)
+ * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
+ * (<= @max_fill)
+ * @fast_fill_limit: The level to which a fast fill will fill
+ * (@fast_fill_trigger <= @fast_fill_limit <= @max_fill)
+ * @min_fill: RX descriptor minimum non-zero fill level.
+ * This records the minimum fill level observed when a ring
+ * refill was triggered.
+ * @alloc_page_count: RX allocation strategy counter.
+ * @alloc_skb_count: RX allocation strategy counter.
+ * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
+ * @flushed: Use when handling queue flushing
+ */
+struct efx_rx_queue {
+ struct efx_nic *efx;
+ struct efx_rx_buffer *buffer;
+ struct efx_special_buffer rxd;
+ unsigned int ptr_mask;
+
+ int added_count;
+ int notified_count;
+ int removed_count;
+ unsigned int max_fill;
+ unsigned int fast_fill_trigger;
+ unsigned int fast_fill_limit;
+ unsigned int min_fill;
+ unsigned int min_overfill;
+ unsigned int alloc_page_count;
+ unsigned int alloc_skb_count;
+ struct timer_list slow_fill;
+ unsigned int slow_fill_count;
+
+ enum efx_flush_state flushed;
+};
+
+/**
+ * struct efx_buffer - An Efx general-purpose buffer
+ * @addr: host base address of the buffer
+ * @dma_addr: DMA base address of the buffer
+ * @len: Buffer length, in bytes
+ *
+ * The NIC uses these buffers for its interrupt status registers and
+ * MAC stats dumps.
+ */
+struct efx_buffer {
+ void *addr;
+ dma_addr_t dma_addr;
+ unsigned int len;
+};
+
+
+enum efx_rx_alloc_method {
+ RX_ALLOC_METHOD_AUTO = 0,
+ RX_ALLOC_METHOD_SKB = 1,
+ RX_ALLOC_METHOD_PAGE = 2,
+};
+
+/**
+ * struct efx_channel - An Efx channel
+ *
+ * A channel comprises an event queue, at least one TX queue, at least
+ * one RX queue, and an associated tasklet for processing the event
+ * queue.
+ *
+ * @efx: Associated Efx NIC
+ * @channel: Channel instance number
+ * @enabled: Channel enabled indicator
+ * @irq: IRQ number (MSI and MSI-X only)
+ * @irq_moderation: IRQ moderation value (in hardware ticks)
+ * @napi_dev: Net device used with NAPI
+ * @napi_str: NAPI control structure
+ * @work_pending: Is work pending via NAPI?
+ * @eventq: Event queue buffer
+ * @eventq_mask: Event queue pointer mask
+ * @eventq_read_ptr: Event queue read pointer
+ * @last_eventq_read_ptr: Last event queue read pointer value.
+ * @irq_count: Number of IRQs since last adaptive moderation decision
+ * @irq_mod_score: IRQ moderation score
+ * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
+ * and diagnostic counters
+ * @rx_alloc_push_pages: RX allocation method currently in use for pushing
+ * descriptors
+ * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
+ * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
+ * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
+ * @n_rx_mcast_mismatch: Count of unmatched multicast frames
+ * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
+ * @n_rx_overlength: Count of RX_OVERLENGTH errors
+ * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
+ * @rx_queue: RX queue for this channel
+ * @tx_queue: TX queues for this channel
+ */
+struct efx_channel {
+ struct efx_nic *efx;
+ int channel;
+ bool enabled;
+ int irq;
+ unsigned int irq_moderation;
+ struct net_device *napi_dev;
+ struct napi_struct napi_str;
+ bool work_pending;
+ struct efx_special_buffer eventq;
+ unsigned int eventq_mask;
+ unsigned int eventq_read_ptr;
+ unsigned int last_eventq_read_ptr;
+
+ unsigned int irq_count;
+ unsigned int irq_mod_score;
+#ifdef CONFIG_RFS_ACCEL
+ unsigned int rfs_filters_added;
+#endif
+
+ int rx_alloc_level;
+ int rx_alloc_push_pages;
+
+ unsigned n_rx_tobe_disc;
+ unsigned n_rx_ip_hdr_chksum_err;
+ unsigned n_rx_tcp_udp_chksum_err;
+ unsigned n_rx_mcast_mismatch;
+ unsigned n_rx_frm_trunc;
+ unsigned n_rx_overlength;
+ unsigned n_skbuff_leaks;
+
+ /* Used to pipeline received packets in order to optimise memory
+ * access with prefetches.
+ */
+ struct efx_rx_buffer *rx_pkt;
+ bool rx_pkt_csummed;
+
+ struct efx_rx_queue rx_queue;
+ struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
+};
+
+enum efx_led_mode {
+ EFX_LED_OFF = 0,
+ EFX_LED_ON = 1,
+ EFX_LED_DEFAULT = 2
+};
+
+#define STRING_TABLE_LOOKUP(val, member) \
+ ((val) < member ## _max) ? member ## _names[val] : "(invalid)"
+
+extern const char *efx_loopback_mode_names[];
+extern const unsigned int efx_loopback_mode_max;
+#define LOOPBACK_MODE(efx) \
+ STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)
+
+extern const char *efx_reset_type_names[];
+extern const unsigned int efx_reset_type_max;
+#define RESET_TYPE(type) \
+ STRING_TABLE_LOOKUP(type, efx_reset_type)
+
+enum efx_int_mode {
+ /* Be careful if altering to correct macro below */
+ EFX_INT_MODE_MSIX = 0,
+ EFX_INT_MODE_MSI = 1,
+ EFX_INT_MODE_LEGACY = 2,
+ EFX_INT_MODE_MAX /* Insert any new items before this */
+};
+#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
+
+enum nic_state {
+ STATE_INIT = 0,
+ STATE_RUNNING = 1,
+ STATE_FINI = 2,
+ STATE_DISABLED = 3,
+ STATE_MAX,
+};
+
+/*
+ * Alignment of page-allocated RX buffers
+ *
+ * Controls the number of bytes inserted at the start of an RX buffer.
+ * This is the equivalent of NET_IP_ALIGN [which controls the alignment
+ * of the skb->head for hardware DMA].
+ */
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#define EFX_PAGE_IP_ALIGN 0
+#else
+#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN
+#endif
+
+/*
+ * Alignment of the skb->head which wraps a page-allocated RX buffer
+ *
+ * The skb allocated to wrap an rx_buffer can have this alignment. Since
+ * the data is memcpy'd from the rx_buf, it does not need to be equal to
+ * EFX_PAGE_IP_ALIGN.
+ */
+#define EFX_PAGE_SKB_ALIGN 2
+
+/* Forward declaration */
+struct efx_nic;
+
+/* Pseudo bit-mask flow control field */
+#define EFX_FC_RX FLOW_CTRL_RX
+#define EFX_FC_TX FLOW_CTRL_TX
+#define EFX_FC_AUTO 4
+
+/**
+ * struct efx_link_state - Current state of the link
+ * @up: Link is up
+ * @fd: Link is full-duplex
+ * @fc: Actual flow control flags
+ * @speed: Link speed (Mbps)
+ */
+struct efx_link_state {
+ bool up;
+ bool fd;
+ u8 fc;
+ unsigned int speed;
+};
+
+static inline bool efx_link_state_equal(const struct efx_link_state *left,
+ const struct efx_link_state *right)
+{
+ return left->up == right->up && left->fd == right->fd &&
+ left->fc == right->fc && left->speed == right->speed;
+}
+
+/**
+ * struct efx_mac_operations - Efx MAC operations table
+ * @reconfigure: Reconfigure MAC. Serialised by the mac_lock
+ * @update_stats: Update statistics
+ * @check_fault: Check fault state. True if fault present.
+ */
+struct efx_mac_operations {
+ int (*reconfigure) (struct efx_nic *efx);
+ void (*update_stats) (struct efx_nic *efx);
+ bool (*check_fault)(struct efx_nic *efx);
+};
+
+/**
+ * struct efx_phy_operations - Efx PHY operations table
+ * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds,
+ * efx->loopback_modes.
+ * @init: Initialise PHY
+ * @fini: Shut down PHY
+ * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
+ * @poll: Update @link_state and report whether it changed.
+ * Serialised by the mac_lock.
+ * @get_settings: Get ethtool settings. Serialised by the mac_lock.
+ * @set_settings: Set ethtool settings. Serialised by the mac_lock.
+ * @set_npage_adv: Set abilities advertised in (Extended) Next Page
+ * (only needed where AN bit is set in mmds)
+ * @test_alive: Test that PHY is 'alive' (online)
+ * @test_name: Get the name of a PHY-specific test/result
+ * @run_tests: Run tests and record results as appropriate (offline).
+ * Flags are the ethtool tests flags.
+ */
+struct efx_phy_operations {
+ int (*probe) (struct efx_nic *efx);
+ int (*init) (struct efx_nic *efx);
+ void (*fini) (struct efx_nic *efx);
+ void (*remove) (struct efx_nic *efx);
+ int (*reconfigure) (struct efx_nic *efx);
+ bool (*poll) (struct efx_nic *efx);
+ void (*get_settings) (struct efx_nic *efx,
+ struct ethtool_cmd *ecmd);
+ int (*set_settings) (struct efx_nic *efx,
+ struct ethtool_cmd *ecmd);
+ void (*set_npage_adv) (struct efx_nic *efx, u32);
+ int (*test_alive) (struct efx_nic *efx);
+ const char *(*test_name) (struct efx_nic *efx, unsigned int index);
+ int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
+};
+
+/**
+ * @enum efx_phy_mode - PHY operating mode flags
+ * @PHY_MODE_NORMAL: on and should pass traffic
+ * @PHY_MODE_TX_DISABLED: on with TX disabled
+ * @PHY_MODE_LOW_POWER: set to low power through MDIO
+ * @PHY_MODE_OFF: switched off through external control
+ * @PHY_MODE_SPECIAL: on but will not pass traffic
+ */
+enum efx_phy_mode {
+ PHY_MODE_NORMAL = 0,
+ PHY_MODE_TX_DISABLED = 1,
+ PHY_MODE_LOW_POWER = 2,
+ PHY_MODE_OFF = 4,
+ PHY_MODE_SPECIAL = 8,
+};
+
+static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
+{
+ return !!(mode & ~PHY_MODE_TX_DISABLED);
+}
+
+/*
+ * Efx extended statistics
+ *
+ * Not all statistics are provided by all supported MACs. The purpose
+ * is this structure is to contain the raw statistics provided by each
+ * MAC.
+ */
+struct efx_mac_stats {
+ u64 tx_bytes;
+ u64 tx_good_bytes;
+ u64 tx_bad_bytes;
+ unsigned long tx_packets;
+ unsigned long tx_bad;
+ unsigned long tx_pause;
+ unsigned long tx_control;
+ unsigned long tx_unicast;
+ unsigned long tx_multicast;
+ unsigned long tx_broadcast;
+ unsigned long tx_lt64;
+ unsigned long tx_64;
+ unsigned long tx_65_to_127;
+ unsigned long tx_128_to_255;
+ unsigned long tx_256_to_511;
+ unsigned long tx_512_to_1023;
+ unsigned long tx_1024_to_15xx;
+ unsigned long tx_15xx_to_jumbo;
+ unsigned long tx_gtjumbo;
+ unsigned long tx_collision;
+ unsigned long tx_single_collision;
+ unsigned long tx_multiple_collision;
+ unsigned long tx_excessive_collision;
+ unsigned long tx_deferred;
+ unsigned long tx_late_collision;
+ unsigned long tx_excessive_deferred;
+ unsigned long tx_non_tcpudp;
+ unsigned long tx_mac_src_error;
+ unsigned long tx_ip_src_error;
+ u64 rx_bytes;
+ u64 rx_good_bytes;
+ u64 rx_bad_bytes;
+ unsigned long rx_packets;
+ unsigned long rx_good;
+ unsigned long rx_bad;
+ unsigned long rx_pause;
+ unsigned long rx_control;
+ unsigned long rx_unicast;
+ unsigned long rx_multicast;
+ unsigned long rx_broadcast;
+ unsigned long rx_lt64;
+ unsigned long rx_64;
+ unsigned long rx_65_to_127;
+ unsigned long rx_128_to_255;
+ unsigned long rx_256_to_511;
+ unsigned long rx_512_to_1023;
+ unsigned long rx_1024_to_15xx;
+ unsigned long rx_15xx_to_jumbo;
+ unsigned long rx_gtjumbo;
+ unsigned long rx_bad_lt64;
+ unsigned long rx_bad_64_to_15xx;
+ unsigned long rx_bad_15xx_to_jumbo;
+ unsigned long rx_bad_gtjumbo;
+ unsigned long rx_overflow;
+ unsigned long rx_missed;
+ unsigned long rx_false_carrier;
+ unsigned long rx_symbol_error;
+ unsigned long rx_align_error;
+ unsigned long rx_length_error;
+ unsigned long rx_internal_error;
+ unsigned long rx_good_lt64;
+};
+
+/* Number of bits used in a multicast filter hash address */
+#define EFX_MCAST_HASH_BITS 8
+
+/* Number of (single-bit) entries in a multicast filter hash */
+#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS)
+
+/* An Efx multicast filter hash */
+union efx_multicast_hash {
+ u8 byte[EFX_MCAST_HASH_ENTRIES / 8];
+ efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
+};
+
+struct efx_filter_state;
+
+/**
+ * struct efx_nic - an Efx NIC
+ * @name: Device name (net device name or bus id before net device registered)
+ * @pci_dev: The PCI device
+ * @type: Controller type attributes
+ * @legacy_irq: IRQ number
+ * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
+ * @workqueue: Workqueue for port reconfigures and the HW monitor.
+ * Work items do not hold and must not acquire RTNL.
+ * @workqueue_name: Name of workqueue
+ * @reset_work: Scheduled reset workitem
+ * @membase_phys: Memory BAR value as physical address
+ * @membase: Memory BAR value
+ * @interrupt_mode: Interrupt mode
+ * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
+ * @irq_rx_moderation: IRQ moderation time for RX event queues
+ * @msg_enable: Log message enable flags
+ * @state: Device state flag. Serialised by the rtnl_lock.
+ * @reset_pending: Bitmask for pending resets
+ * @tx_queue: TX DMA queues
+ * @rx_queue: RX DMA queues
+ * @channel: Channels
+ * @channel_name: Names for channels and their IRQs
+ * @rxq_entries: Size of receive queues requested by user.
+ * @txq_entries: Size of transmit queues requested by user.
+ * @next_buffer_table: First available buffer table id
+ * @n_channels: Number of channels in use
+ * @n_rx_channels: Number of channels used for RX (= number of RX queues)
+ * @n_tx_channels: Number of channels used for TX
+ * @rx_buffer_len: RX buffer length
+ * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
+ * @rx_hash_key: Toeplitz hash key for RSS
+ * @rx_indir_table: Indirection table for RSS
+ * @int_error_count: Number of internal errors seen recently
+ * @int_error_expire: Time at which error count will be expired
+ * @irq_status: Interrupt status buffer
+ * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
+ * @fatal_irq_level: IRQ level (bit number) used for serious errors
+ * @mtd_list: List of MTDs attached to the NIC
+ * @nic_data: Hardware dependent state
+ * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
+ * efx_monitor() and efx_reconfigure_port()
+ * @port_enabled: Port enabled indicator.
+ * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and
+ * efx_mac_work() with kernel interfaces. Safe to read under any
+ * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
+ * be held to modify it.
+ * @port_initialized: Port initialized?
+ * @net_dev: Operating system network device. Consider holding the rtnl lock
+ * @stats_buffer: DMA buffer for statistics
+ * @mac_op: MAC interface
+ * @phy_type: PHY type
+ * @phy_op: PHY interface
+ * @phy_data: PHY private data (including PHY-specific stats)
+ * @mdio: PHY MDIO interface
+ * @mdio_bus: PHY MDIO bus ID (only used by Siena)
+ * @phy_mode: PHY operating mode. Serialised by @mac_lock.
+ * @link_advertising: Autonegotiation advertising flags
+ * @link_state: Current state of the link
+ * @n_link_state_changes: Number of times the link has changed state
+ * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
+ * @multicast_hash: Multicast hash table
+ * @wanted_fc: Wanted flow control flags
+ * @mac_work: Work item for changing MAC promiscuity and multicast hash
+ * @loopback_mode: Loopback status
+ * @loopback_modes: Supported loopback mode bitmask
+ * @loopback_selftest: Offline self-test private state
+ * @monitor_work: Hardware monitor workitem
+ * @biu_lock: BIU (bus interface unit) lock
+ * @last_irq_cpu: Last CPU to handle interrupt.
+ * This register is written with the SMP processor ID whenever an
+ * interrupt is handled. It is used by efx_nic_test_interrupt()
+ * to verify that an interrupt has occurred.
+ * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
+ * @mac_stats: MAC statistics. These include all statistics the MACs
+ * can provide. Generic code converts these into a standard
+ * &struct net_device_stats.
+ * @stats_lock: Statistics update lock. Serialises statistics fetches
+ *
+ * This is stored in the private area of the &struct net_device.
+ */
+struct efx_nic {
+ /* The following fields should be written very rarely */
+
+ char name[IFNAMSIZ];
+ struct pci_dev *pci_dev;
+ const struct efx_nic_type *type;
+ int legacy_irq;
+ bool legacy_irq_enabled;
+ struct workqueue_struct *workqueue;
+ char workqueue_name[16];
+ struct work_struct reset_work;
+ resource_size_t membase_phys;
+ void __iomem *membase;
+
+ enum efx_int_mode interrupt_mode;
+ bool irq_rx_adaptive;
+ unsigned int irq_rx_moderation;
+ u32 msg_enable;
+
+ enum nic_state state;
+ unsigned long reset_pending;
+
+ struct efx_channel *channel[EFX_MAX_CHANNELS];
+ char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6];
+
+ unsigned rxq_entries;
+ unsigned txq_entries;
+ unsigned next_buffer_table;
+ unsigned n_channels;
+ unsigned n_rx_channels;
+ unsigned tx_channel_offset;
+ unsigned n_tx_channels;
+ unsigned int rx_buffer_len;
+ unsigned int rx_buffer_order;
+ u8 rx_hash_key[40];
+ u32 rx_indir_table[128];
+
+ unsigned int_error_count;
+ unsigned long int_error_expire;
+
+ struct efx_buffer irq_status;
+ unsigned irq_zero_count;
+ unsigned fatal_irq_level;
+
+#ifdef CONFIG_SFC_MTD
+ struct list_head mtd_list;
+#endif
+
+ void *nic_data;
+
+ struct mutex mac_lock;
+ struct work_struct mac_work;
+ bool port_enabled;
+
+ bool port_initialized;
+ struct net_device *net_dev;
+
+ struct efx_buffer stats_buffer;
+
+ const struct efx_mac_operations *mac_op;
+
+ unsigned int phy_type;
+ const struct efx_phy_operations *phy_op;
+ void *phy_data;
+ struct mdio_if_info mdio;
+ unsigned int mdio_bus;
+ enum efx_phy_mode phy_mode;
+
+ u32 link_advertising;
+ struct efx_link_state link_state;
+ unsigned int n_link_state_changes;
+
+ bool promiscuous;
+ union efx_multicast_hash multicast_hash;
+ u8 wanted_fc;
+
+ atomic_t rx_reset;
+ enum efx_loopback_mode loopback_mode;
+ u64 loopback_modes;
+
+ void *loopback_selftest;
+
+ struct efx_filter_state *filter_state;
+
+ /* The following fields may be written more often */
+
+ struct delayed_work monitor_work ____cacheline_aligned_in_smp;
+ spinlock_t biu_lock;
+ volatile signed int last_irq_cpu;
+ unsigned n_rx_nodesc_drop_cnt;
+ struct efx_mac_stats mac_stats;
+ spinlock_t stats_lock;
+};
+
+static inline int efx_dev_registered(struct efx_nic *efx)
+{
+ return efx->net_dev->reg_state == NETREG_REGISTERED;
+}
+
+/* Net device name, for inclusion in log messages if it has been registered.
+ * Use efx->name not efx->net_dev->name so that races with (un)registration
+ * are harmless.
+ */
+static inline const char *efx_dev_name(struct efx_nic *efx)
+{
+ return efx_dev_registered(efx) ? efx->name : "";
+}
+
+static inline unsigned int efx_port_num(struct efx_nic *efx)
+{
+ return efx->net_dev->dev_id;
+}
+
+/**
+ * struct efx_nic_type - Efx device type definition
+ * @probe: Probe the controller
+ * @remove: Free resources allocated by probe()
+ * @init: Initialise the controller
+ * @fini: Shut down the controller
+ * @monitor: Periodic function for polling link state and hardware monitor
+ * @map_reset_reason: Map ethtool reset reason to a reset method
+ * @map_reset_flags: Map ethtool reset flags to a reset method, if possible
+ * @reset: Reset the controller hardware and possibly the PHY. This will
+ * be called while the controller is uninitialised.
+ * @probe_port: Probe the MAC and PHY
+ * @remove_port: Free resources allocated by probe_port()
+ * @handle_global_event: Handle a "global" event (may be %NULL)
+ * @prepare_flush: Prepare the hardware for flushing the DMA queues
+ * @update_stats: Update statistics not provided by event handling
+ * @start_stats: Start the regular fetching of statistics
+ * @stop_stats: Stop the regular fetching of statistics
+ * @set_id_led: Set state of identifying LED or revert to automatic function
+ * @push_irq_moderation: Apply interrupt moderation value
+ * @push_multicast_hash: Apply multicast hash table
+ * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
+ * @get_wol: Get WoL configuration from driver state
+ * @set_wol: Push WoL configuration to the NIC
+ * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
+ * @test_registers: Test read/write functionality of control registers
+ * @test_nvram: Test validity of NVRAM contents
+ * @default_mac_ops: efx_mac_operations to set at startup
+ * @revision: Hardware architecture revision
+ * @mem_map_size: Memory BAR mapped size
+ * @txd_ptr_tbl_base: TX descriptor ring base address
+ * @rxd_ptr_tbl_base: RX descriptor ring base address
+ * @buf_tbl_base: Buffer table base address
+ * @evq_ptr_tbl_base: Event queue pointer table base address
+ * @evq_rptr_tbl_base: Event queue read-pointer table base address
+ * @max_dma_mask: Maximum possible DMA mask
+ * @rx_buffer_hash_size: Size of hash at start of RX buffer
+ * @rx_buffer_padding: Size of padding at end of RX buffer
+ * @max_interrupt_mode: Highest capability interrupt mode supported
+ * from &enum efx_init_mode.
+ * @phys_addr_channels: Number of channels with physically addressed
+ * descriptors
+ * @tx_dc_base: Base address in SRAM of TX queue descriptor caches
+ * @rx_dc_base: Base address in SRAM of RX queue descriptor caches
+ * @offload_features: net_device feature flags for protocol offload
+ * features implemented in hardware
+ */
+struct efx_nic_type {
+ int (*probe)(struct efx_nic *efx);
+ void (*remove)(struct efx_nic *efx);
+ int (*init)(struct efx_nic *efx);
+ void (*fini)(struct efx_nic *efx);
+ void (*monitor)(struct efx_nic *efx);
+ enum reset_type (*map_reset_reason)(enum reset_type reason);
+ int (*map_reset_flags)(u32 *flags);
+ int (*reset)(struct efx_nic *efx, enum reset_type method);
+ int (*probe_port)(struct efx_nic *efx);
+ void (*remove_port)(struct efx_nic *efx);
+ bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
+ void (*prepare_flush)(struct efx_nic *efx);
+ void (*update_stats)(struct efx_nic *efx);
+ void (*start_stats)(struct efx_nic *efx);
+ void (*stop_stats)(struct efx_nic *efx);
+ void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
+ void (*push_irq_moderation)(struct efx_channel *channel);
+ void (*push_multicast_hash)(struct efx_nic *efx);
+ int (*reconfigure_port)(struct efx_nic *efx);
+ void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
+ int (*set_wol)(struct efx_nic *efx, u32 type);
+ void (*resume_wol)(struct efx_nic *efx);
+ int (*test_registers)(struct efx_nic *efx);
+ int (*test_nvram)(struct efx_nic *efx);
+ const struct efx_mac_operations *default_mac_ops;
+
+ int revision;
+ unsigned int mem_map_size;
+ unsigned int txd_ptr_tbl_base;
+ unsigned int rxd_ptr_tbl_base;
+ unsigned int buf_tbl_base;
+ unsigned int evq_ptr_tbl_base;
+ unsigned int evq_rptr_tbl_base;
+ u64 max_dma_mask;
+ unsigned int rx_buffer_hash_size;
+ unsigned int rx_buffer_padding;
+ unsigned int max_interrupt_mode;
+ unsigned int phys_addr_channels;
+ unsigned int tx_dc_base;
+ unsigned int rx_dc_base;
+ u32 offload_features;
+};
+
+/**************************************************************************
+ *
+ * Prototypes and inline functions
+ *
+ *************************************************************************/
+
+static inline struct efx_channel *
+efx_get_channel(struct efx_nic *efx, unsigned index)
+{
+ EFX_BUG_ON_PARANOID(index >= efx->n_channels);
+ return efx->channel[index];
+}
+
+/* Iterate over all used channels */
+#define efx_for_each_channel(_channel, _efx) \
+ for (_channel = (_efx)->channel[0]; \
+ _channel; \
+ _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
+ (_efx)->channel[_channel->channel + 1] : NULL)
+
+static inline struct efx_tx_queue *
+efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
+{
+ EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
+ type >= EFX_TXQ_TYPES);
+ return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
+}
+
+static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
+{
+ return channel->channel - channel->efx->tx_channel_offset <
+ channel->efx->n_tx_channels;
+}
+
+static inline struct efx_tx_queue *
+efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
+{
+ EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
+ type >= EFX_TXQ_TYPES);
+ return &channel->tx_queue[type];
+}
+
+static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
+{
+ return !(tx_queue->efx->net_dev->num_tc < 2 &&
+ tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
+}
+
+/* Iterate over all TX queues belonging to a channel */
+#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
+ if (!efx_channel_has_tx_queues(_channel)) \
+ ; \
+ else \
+ for (_tx_queue = (_channel)->tx_queue; \
+ _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
+ efx_tx_queue_used(_tx_queue); \
+ _tx_queue++)
+
+/* Iterate over all possible TX queues belonging to a channel */
+#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
+ for (_tx_queue = (_channel)->tx_queue; \
+ _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
+ _tx_queue++)
+
+static inline struct efx_rx_queue *
+efx_get_rx_queue(struct efx_nic *efx, unsigned index)
+{
+ EFX_BUG_ON_PARANOID(index >= efx->n_rx_channels);
+ return &efx->channel[index]->rx_queue;
+}
+
+static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
+{
+ return channel->channel < channel->efx->n_rx_channels;
+}
+
+static inline struct efx_rx_queue *
+efx_channel_get_rx_queue(struct efx_channel *channel)
+{
+ EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
+ return &channel->rx_queue;
+}
+
+/* Iterate over all RX queues belonging to a channel */
+#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
+ if (!efx_channel_has_rx_queue(_channel)) \
+ ; \
+ else \
+ for (_rx_queue = &(_channel)->rx_queue; \
+ _rx_queue; \
+ _rx_queue = NULL)
+
+static inline struct efx_channel *
+efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
+{
+ return container_of(rx_queue, struct efx_channel, rx_queue);
+}
+
+static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue)
+{
+ return efx_rx_queue_channel(rx_queue)->channel;
+}
+
+/* Returns a pointer to the specified receive buffer in the RX
+ * descriptor queue.
+ */
+static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
+ unsigned int index)
+{
+ return &rx_queue->buffer[index];
+}
+
+/* Set bit in a little-endian bitfield */
+static inline void set_bit_le(unsigned nr, unsigned char *addr)
+{
+ addr[nr / 8] |= (1 << (nr % 8));
+}
+
+/* Clear bit in a little-endian bitfield */
+static inline void clear_bit_le(unsigned nr, unsigned char *addr)
+{
+ addr[nr / 8] &= ~(1 << (nr % 8));
+}
+
+
+/**
+ * EFX_MAX_FRAME_LEN - calculate maximum frame length
+ *
+ * This calculates the maximum frame length that will be used for a
+ * given MTU. The frame length will be equal to the MTU plus a
+ * constant amount of header space and padding. This is the quantity
+ * that the net driver will program into the MAC as the maximum frame
+ * length.
+ *
+ * The 10G MAC requires 8-byte alignment on the frame
+ * length, so we round up to the nearest 8.
+ *
+ * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an
+ * XGMII cycle). If the frame length reaches the maximum value in the
+ * same cycle, the XMAC can miss the IPG altogether. We work around
+ * this by adding a further 16 bytes.
+ */
+#define EFX_MAX_FRAME_LEN(mtu) \
+ ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16)
+
+
+#endif /* EFX_NET_DRIVER_H */
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
new file mode 100644
index 000000000000..3edfbaf5f022
--- /dev/null
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -0,0 +1,1962 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include "net_driver.h"
+#include "bitfield.h"
+#include "efx.h"
+#include "nic.h"
+#include "regs.h"
+#include "io.h"
+#include "workarounds.h"
+
+/**************************************************************************
+ *
+ * Configurable values
+ *
+ **************************************************************************
+ */
+
+/* This is set to 16 for a good reason. In summary, if larger than
+ * 16, the descriptor cache holds more than a default socket
+ * buffer's worth of packets (for UDP we can only have at most one
+ * socket buffer's worth outstanding). This combined with the fact
+ * that we only get 1 TX event per descriptor cache means the NIC
+ * goes idle.
+ */
+#define TX_DC_ENTRIES 16
+#define TX_DC_ENTRIES_ORDER 1
+
+#define RX_DC_ENTRIES 64
+#define RX_DC_ENTRIES_ORDER 3
+
+/* If EFX_MAX_INT_ERRORS internal errors occur within
+ * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
+ * disable it.
+ */
+#define EFX_INT_ERROR_EXPIRE 3600
+#define EFX_MAX_INT_ERRORS 5
+
+/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
+ */
+#define EFX_FLUSH_INTERVAL 10
+#define EFX_FLUSH_POLL_COUNT 100
+
+/* Size and alignment of special buffers (4KB) */
+#define EFX_BUF_SIZE 4096
+
+/* Depth of RX flush request fifo */
+#define EFX_RX_FLUSH_COUNT 4
+
+/* Generated event code for efx_generate_test_event() */
+#define EFX_CHANNEL_MAGIC_TEST(_channel) \
+ (0x00010100 + (_channel)->channel)
+
+/* Generated event code for efx_generate_fill_event() */
+#define EFX_CHANNEL_MAGIC_FILL(_channel) \
+ (0x00010200 + (_channel)->channel)
+
+/**************************************************************************
+ *
+ * Solarstorm hardware access
+ *
+ **************************************************************************/
+
+static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
+ unsigned int index)
+{
+ efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
+ value, index);
+}
+
+/* Read the current event from the event queue */
+static inline efx_qword_t *efx_event(struct efx_channel *channel,
+ unsigned int index)
+{
+ return ((efx_qword_t *) (channel->eventq.addr)) +
+ (index & channel->eventq_mask);
+}
+
+/* See if an event is present
+ *
+ * We check both the high and low dword of the event for all ones. We
+ * wrote all ones when we cleared the event, and no valid event can
+ * have all ones in either its high or low dwords. This approach is
+ * robust against reordering.
+ *
+ * Note that using a single 64-bit comparison is incorrect; even
+ * though the CPU read will be atomic, the DMA write may not be.
+ */
+static inline int efx_event_present(efx_qword_t *event)
+{
+ return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
+ EFX_DWORD_IS_ALL_ONES(event->dword[1]));
+}
+
+static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
+ const efx_oword_t *mask)
+{
+ return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
+ ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
+}
+
+int efx_nic_test_registers(struct efx_nic *efx,
+ const struct efx_nic_register_test *regs,
+ size_t n_regs)
+{
+ unsigned address = 0, i, j;
+ efx_oword_t mask, imask, original, reg, buf;
+
+ /* Falcon should be in loopback to isolate the XMAC from the PHY */
+ WARN_ON(!LOOPBACK_INTERNAL(efx));
+
+ for (i = 0; i < n_regs; ++i) {
+ address = regs[i].address;
+ mask = imask = regs[i].mask;
+ EFX_INVERT_OWORD(imask);
+
+ efx_reado(efx, &original, address);
+
+ /* bit sweep on and off */
+ for (j = 0; j < 128; j++) {
+ if (!EFX_EXTRACT_OWORD32(mask, j, j))
+ continue;
+
+ /* Test this testable bit can be set in isolation */
+ EFX_AND_OWORD(reg, original, mask);
+ EFX_SET_OWORD32(reg, j, j, 1);
+
+ efx_writeo(efx, &reg, address);
+ efx_reado(efx, &buf, address);
+
+ if (efx_masked_compare_oword(&reg, &buf, &mask))
+ goto fail;
+
+ /* Test this testable bit can be cleared in isolation */
+ EFX_OR_OWORD(reg, original, mask);
+ EFX_SET_OWORD32(reg, j, j, 0);
+
+ efx_writeo(efx, &reg, address);
+ efx_reado(efx, &buf, address);
+
+ if (efx_masked_compare_oword(&reg, &buf, &mask))
+ goto fail;
+ }
+
+ efx_writeo(efx, &original, address);
+ }
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev,
+ "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
+ " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
+ EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
+ return -EIO;
+}
+
+/**************************************************************************
+ *
+ * Special buffer handling
+ * Special buffers are used for event queues and the TX and RX
+ * descriptor rings.
+ *
+ *************************************************************************/
+
+/*
+ * Initialise a special buffer
+ *
+ * This will define a buffer (previously allocated via
+ * efx_alloc_special_buffer()) in the buffer table, allowing
+ * it to be used for event queues, descriptor rings etc.
+ */
+static void
+efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+ efx_qword_t buf_desc;
+ int index;
+ dma_addr_t dma_addr;
+ int i;
+
+ EFX_BUG_ON_PARANOID(!buffer->addr);
+
+ /* Write buffer descriptors to NIC */
+ for (i = 0; i < buffer->entries; i++) {
+ index = buffer->index + i;
+ dma_addr = buffer->dma_addr + (i * 4096);
+ netif_dbg(efx, probe, efx->net_dev,
+ "mapping special buffer %d at %llx\n",
+ index, (unsigned long long)dma_addr);
+ EFX_POPULATE_QWORD_3(buf_desc,
+ FRF_AZ_BUF_ADR_REGION, 0,
+ FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
+ FRF_AZ_BUF_OWNER_ID_FBUF, 0);
+ efx_write_buf_tbl(efx, &buf_desc, index);
+ }
+}
+
+/* Unmaps a buffer and clears the buffer table entries */
+static void
+efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+ efx_oword_t buf_tbl_upd;
+ unsigned int start = buffer->index;
+ unsigned int end = (buffer->index + buffer->entries - 1);
+
+ if (!buffer->entries)
+ return;
+
+ netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
+ buffer->index, buffer->index + buffer->entries - 1);
+
+ EFX_POPULATE_OWORD_4(buf_tbl_upd,
+ FRF_AZ_BUF_UPD_CMD, 0,
+ FRF_AZ_BUF_CLR_CMD, 1,
+ FRF_AZ_BUF_CLR_END_ID, end,
+ FRF_AZ_BUF_CLR_START_ID, start);
+ efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
+}
+
+/*
+ * Allocate a new special buffer
+ *
+ * This allocates memory for a new buffer, clears it and allocates a
+ * new buffer ID range. It does not write into the buffer table.
+ *
+ * This call will allocate 4KB buffers, since 8KB buffers can't be
+ * used for event queues and descriptor rings.
+ */
+static int efx_alloc_special_buffer(struct efx_nic *efx,
+ struct efx_special_buffer *buffer,
+ unsigned int len)
+{
+ len = ALIGN(len, EFX_BUF_SIZE);
+
+ buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
+ &buffer->dma_addr, GFP_KERNEL);
+ if (!buffer->addr)
+ return -ENOMEM;
+ buffer->len = len;
+ buffer->entries = len / EFX_BUF_SIZE;
+ BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
+
+ /* All zeros is a potentially valid event so memset to 0xff */
+ memset(buffer->addr, 0xff, len);
+
+ /* Select new buffer ID */
+ buffer->index = efx->next_buffer_table;
+ efx->next_buffer_table += buffer->entries;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "allocating special buffers %d-%d at %llx+%x "
+ "(virt %p phys %llx)\n", buffer->index,
+ buffer->index + buffer->entries - 1,
+ (u64)buffer->dma_addr, len,
+ buffer->addr, (u64)virt_to_phys(buffer->addr));
+
+ return 0;
+}
+
+static void
+efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+ if (!buffer->addr)
+ return;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "deallocating special buffers %d-%d at %llx+%x "
+ "(virt %p phys %llx)\n", buffer->index,
+ buffer->index + buffer->entries - 1,
+ (u64)buffer->dma_addr, buffer->len,
+ buffer->addr, (u64)virt_to_phys(buffer->addr));
+
+ dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
+ buffer->dma_addr);
+ buffer->addr = NULL;
+ buffer->entries = 0;
+}
+
+/**************************************************************************
+ *
+ * Generic buffer handling
+ * These buffers are used for interrupt status and MAC stats
+ *
+ **************************************************************************/
+
+int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
+ unsigned int len)
+{
+ buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
+ &buffer->dma_addr);
+ if (!buffer->addr)
+ return -ENOMEM;
+ buffer->len = len;
+ memset(buffer->addr, 0, len);
+ return 0;
+}
+
+void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
+{
+ if (buffer->addr) {
+ pci_free_consistent(efx->pci_dev, buffer->len,
+ buffer->addr, buffer->dma_addr);
+ buffer->addr = NULL;
+ }
+}
+
+/**************************************************************************
+ *
+ * TX path
+ *
+ **************************************************************************/
+
+/* Returns a pointer to the specified transmit descriptor in the TX
+ * descriptor queue belonging to the specified channel.
+ */
+static inline efx_qword_t *
+efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+ return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
+}
+
+/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
+static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
+{
+ unsigned write_ptr;
+ efx_dword_t reg;
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
+ efx_writed_page(tx_queue->efx, &reg,
+ FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
+}
+
+/* Write pointer and first descriptor for TX descriptor ring */
+static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
+ const efx_qword_t *txd)
+{
+ unsigned write_ptr;
+ efx_oword_t reg;
+
+ BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
+ BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
+ FRF_AZ_TX_DESC_WPTR, write_ptr);
+ reg.qword[0] = *txd;
+ efx_writeo_page(tx_queue->efx, &reg,
+ FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
+}
+
+static inline bool
+efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
+{
+ unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+
+ if (empty_read_count == 0)
+ return false;
+
+ tx_queue->empty_read_count = 0;
+ return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
+}
+
+/* For each entry inserted into the software descriptor ring, create a
+ * descriptor in the hardware TX descriptor ring (in host memory), and
+ * write a doorbell.
+ */
+void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
+{
+
+ struct efx_tx_buffer *buffer;
+ efx_qword_t *txd;
+ unsigned write_ptr;
+ unsigned old_write_count = tx_queue->write_count;
+
+ BUG_ON(tx_queue->write_count == tx_queue->insert_count);
+
+ do {
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ buffer = &tx_queue->buffer[write_ptr];
+ txd = efx_tx_desc(tx_queue, write_ptr);
+ ++tx_queue->write_count;
+
+ /* Create TX descriptor ring entry */
+ EFX_POPULATE_QWORD_4(*txd,
+ FSF_AZ_TX_KER_CONT, buffer->continuation,
+ FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
+ FSF_AZ_TX_KER_BUF_REGION, 0,
+ FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
+ } while (tx_queue->write_count != tx_queue->insert_count);
+
+ wmb(); /* Ensure descriptors are written before they are fetched */
+
+ if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
+ txd = efx_tx_desc(tx_queue,
+ old_write_count & tx_queue->ptr_mask);
+ efx_push_tx_desc(tx_queue, txd);
+ ++tx_queue->pushes;
+ } else {
+ efx_notify_tx_desc(tx_queue);
+ }
+}
+
+/* Allocate hardware resources for a TX queue */
+int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned entries;
+
+ entries = tx_queue->ptr_mask + 1;
+ return efx_alloc_special_buffer(efx, &tx_queue->txd,
+ entries * sizeof(efx_qword_t));
+}
+
+void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t reg;
+
+ tx_queue->flushed = FLUSH_NONE;
+
+ /* Pin TX descriptor ring */
+ efx_init_special_buffer(efx, &tx_queue->txd);
+
+ /* Push TX descriptor ring to card */
+ EFX_POPULATE_OWORD_10(reg,
+ FRF_AZ_TX_DESCQ_EN, 1,
+ FRF_AZ_TX_ISCSI_DDIG_EN, 0,
+ FRF_AZ_TX_ISCSI_HDIG_EN, 0,
+ FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
+ FRF_AZ_TX_DESCQ_EVQ_ID,
+ tx_queue->channel->channel,
+ FRF_AZ_TX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
+ FRF_AZ_TX_DESCQ_SIZE,
+ __ffs(tx_queue->txd.entries),
+ FRF_AZ_TX_DESCQ_TYPE, 0,
+ FRF_BZ_TX_NON_IP_DROP_DIS, 1);
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
+ !csum);
+ }
+
+ efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
+ tx_queue->queue);
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
+ /* Only 128 bits in this register */
+ BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
+
+ efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
+ if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
+ clear_bit_le(tx_queue->queue, (void *)&reg);
+ else
+ set_bit_le(tx_queue->queue, (void *)&reg);
+ efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
+ }
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_POPULATE_OWORD_1(reg,
+ FRF_BZ_TX_PACE,
+ (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+ FFE_BZ_TX_PACE_OFF :
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
+ tx_queue->queue);
+ }
+}
+
+static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t tx_flush_descq;
+
+ tx_queue->flushed = FLUSH_PENDING;
+
+ /* Post a flush command */
+ EFX_POPULATE_OWORD_2(tx_flush_descq,
+ FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
+ efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
+}
+
+void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t tx_desc_ptr;
+
+ /* The queue should have been flushed */
+ WARN_ON(tx_queue->flushed != FLUSH_DONE);
+
+ /* Remove TX descriptor ring from card */
+ EFX_ZERO_OWORD(tx_desc_ptr);
+ efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
+ tx_queue->queue);
+
+ /* Unpin TX descriptor ring */
+ efx_fini_special_buffer(efx, &tx_queue->txd);
+}
+
+/* Free buffers backing TX queue */
+void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
+{
+ efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
+}
+
+/**************************************************************************
+ *
+ * RX path
+ *
+ **************************************************************************/
+
+/* Returns a pointer to the specified descriptor in the RX descriptor queue */
+static inline efx_qword_t *
+efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
+{
+ return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
+}
+
+/* This creates an entry in the RX descriptor queue */
+static inline void
+efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
+{
+ struct efx_rx_buffer *rx_buf;
+ efx_qword_t *rxd;
+
+ rxd = efx_rx_desc(rx_queue, index);
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ EFX_POPULATE_QWORD_3(*rxd,
+ FSF_AZ_RX_KER_BUF_SIZE,
+ rx_buf->len -
+ rx_queue->efx->type->rx_buffer_padding,
+ FSF_AZ_RX_KER_BUF_REGION, 0,
+ FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
+}
+
+/* This writes to the RX_DESC_WPTR register for the specified receive
+ * descriptor ring.
+ */
+void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ efx_dword_t reg;
+ unsigned write_ptr;
+
+ while (rx_queue->notified_count != rx_queue->added_count) {
+ efx_build_rx_desc(
+ rx_queue,
+ rx_queue->notified_count & rx_queue->ptr_mask);
+ ++rx_queue->notified_count;
+ }
+
+ wmb();
+ write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
+ efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
+ efx_rx_queue_index(rx_queue));
+}
+
+int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned entries;
+
+ entries = rx_queue->ptr_mask + 1;
+ return efx_alloc_special_buffer(efx, &rx_queue->rxd,
+ entries * sizeof(efx_qword_t));
+}
+
+void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
+{
+ efx_oword_t rx_desc_ptr;
+ struct efx_nic *efx = rx_queue->efx;
+ bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
+ bool iscsi_digest_en = is_b0;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "RX queue %d ring in special buffers %d-%d\n",
+ efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
+ rx_queue->rxd.index + rx_queue->rxd.entries - 1);
+
+ rx_queue->flushed = FLUSH_NONE;
+
+ /* Pin RX descriptor ring */
+ efx_init_special_buffer(efx, &rx_queue->rxd);
+
+ /* Push RX descriptor ring to card */
+ EFX_POPULATE_OWORD_10(rx_desc_ptr,
+ FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
+ FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
+ FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
+ FRF_AZ_RX_DESCQ_EVQ_ID,
+ efx_rx_queue_channel(rx_queue)->channel,
+ FRF_AZ_RX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_RX_DESCQ_LABEL,
+ efx_rx_queue_index(rx_queue),
+ FRF_AZ_RX_DESCQ_SIZE,
+ __ffs(rx_queue->rxd.entries),
+ FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
+ /* For >=B0 this is scatter so disable */
+ FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
+ FRF_AZ_RX_DESCQ_EN, 1);
+ efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+ efx_rx_queue_index(rx_queue));
+}
+
+static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ efx_oword_t rx_flush_descq;
+
+ rx_queue->flushed = FLUSH_PENDING;
+
+ /* Post a flush command */
+ EFX_POPULATE_OWORD_2(rx_flush_descq,
+ FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_RX_FLUSH_DESCQ,
+ efx_rx_queue_index(rx_queue));
+ efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
+}
+
+void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
+{
+ efx_oword_t rx_desc_ptr;
+ struct efx_nic *efx = rx_queue->efx;
+
+ /* The queue should already have been flushed */
+ WARN_ON(rx_queue->flushed != FLUSH_DONE);
+
+ /* Remove RX descriptor ring from card */
+ EFX_ZERO_OWORD(rx_desc_ptr);
+ efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+ efx_rx_queue_index(rx_queue));
+
+ /* Unpin RX descriptor ring */
+ efx_fini_special_buffer(efx, &rx_queue->rxd);
+}
+
+/* Free buffers backing RX queue */
+void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
+{
+ efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
+}
+
+/**************************************************************************
+ *
+ * Event queue processing
+ * Event queues are processed by per-channel tasklets.
+ *
+ **************************************************************************/
+
+/* Update a channel's event queue's read pointer (RPTR) register
+ *
+ * This writes the EVQ_RPTR_REG register for the specified channel's
+ * event queue.
+ */
+void efx_nic_eventq_read_ack(struct efx_channel *channel)
+{
+ efx_dword_t reg;
+ struct efx_nic *efx = channel->efx;
+
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
+ channel->eventq_read_ptr & channel->eventq_mask);
+ efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
+ channel->channel);
+}
+
+/* Use HW to insert a SW defined event */
+static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ efx_oword_t drv_ev_reg;
+
+ BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
+ FRF_AZ_DRV_EV_DATA_WIDTH != 64);
+ drv_ev_reg.u32[0] = event->u32[0];
+ drv_ev_reg.u32[1] = event->u32[1];
+ drv_ev_reg.u32[2] = 0;
+ drv_ev_reg.u32[3] = 0;
+ EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
+ efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
+}
+
+/* Handle a transmit completion event
+ *
+ * The NIC batches TX completion events; the message we receive is of
+ * the form "complete all TX events up to this index".
+ */
+static int
+efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ unsigned int tx_ev_desc_ptr;
+ unsigned int tx_ev_q_label;
+ struct efx_tx_queue *tx_queue;
+ struct efx_nic *efx = channel->efx;
+ int tx_packets = 0;
+
+ if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
+ /* Transmit completion */
+ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
+ tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+ tx_queue = efx_channel_get_tx_queue(
+ channel, tx_ev_q_label % EFX_TXQ_TYPES);
+ tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
+ tx_queue->ptr_mask);
+ channel->irq_mod_score += tx_packets;
+ efx_xmit_done(tx_queue, tx_ev_desc_ptr);
+ } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
+ /* Rewrite the FIFO write pointer */
+ tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+ tx_queue = efx_channel_get_tx_queue(
+ channel, tx_ev_q_label % EFX_TXQ_TYPES);
+
+ if (efx_dev_registered(efx))
+ netif_tx_lock(efx->net_dev);
+ efx_notify_tx_desc(tx_queue);
+ if (efx_dev_registered(efx))
+ netif_tx_unlock(efx->net_dev);
+ } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
+ EFX_WORKAROUND_10727(efx)) {
+ efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
+ } else {
+ netif_err(efx, tx_err, efx->net_dev,
+ "channel %d unexpected TX event "
+ EFX_QWORD_FMT"\n", channel->channel,
+ EFX_QWORD_VAL(*event));
+ }
+
+ return tx_packets;
+}
+
+/* Detect errors included in the rx_evt_pkt_ok bit. */
+static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
+ const efx_qword_t *event,
+ bool *rx_ev_pkt_ok,
+ bool *discard)
+{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ struct efx_nic *efx = rx_queue->efx;
+ bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
+ bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
+ bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
+ bool rx_ev_other_err, rx_ev_pause_frm;
+ bool rx_ev_hdr_type, rx_ev_mcast_pkt;
+ unsigned rx_ev_pkt_type;
+
+ rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
+ rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
+ rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
+ rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
+ rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
+ rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
+ rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
+ rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
+ rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
+ rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
+ 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
+ rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
+
+ /* Every error apart from tobe_disc and pause_frm */
+ rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
+ rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
+ rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
+
+ /* Count errors that are not in MAC stats. Ignore expected
+ * checksum errors during self-test. */
+ if (rx_ev_frm_trunc)
+ ++channel->n_rx_frm_trunc;
+ else if (rx_ev_tobe_disc)
+ ++channel->n_rx_tobe_disc;
+ else if (!efx->loopback_selftest) {
+ if (rx_ev_ip_hdr_chksum_err)
+ ++channel->n_rx_ip_hdr_chksum_err;
+ else if (rx_ev_tcp_udp_chksum_err)
+ ++channel->n_rx_tcp_udp_chksum_err;
+ }
+
+ /* The frame must be discarded if any of these are true. */
+ *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
+ rx_ev_tobe_disc | rx_ev_pause_frm);
+
+ /* TOBE_DISC is expected on unicast mismatches; don't print out an
+ * error message. FRM_TRUNC indicates RXDP dropped the packet due
+ * to a FIFO overflow.
+ */
+#ifdef EFX_ENABLE_DEBUG
+ if (rx_ev_other_err && net_ratelimit()) {
+ netif_dbg(efx, rx_err, efx->net_dev,
+ " RX queue %d unexpected RX event "
+ EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
+ efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
+ rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
+ rx_ev_ip_hdr_chksum_err ?
+ " [IP_HDR_CHKSUM_ERR]" : "",
+ rx_ev_tcp_udp_chksum_err ?
+ " [TCP_UDP_CHKSUM_ERR]" : "",
+ rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
+ rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
+ rx_ev_drib_nib ? " [DRIB_NIB]" : "",
+ rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
+ rx_ev_pause_frm ? " [PAUSE]" : "");
+ }
+#endif
+}
+
+/* Handle receive events that are not in-order. */
+static void
+efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned expected, dropped;
+
+ expected = rx_queue->removed_count & rx_queue->ptr_mask;
+ dropped = (index - expected) & rx_queue->ptr_mask;
+ netif_info(efx, rx_err, efx->net_dev,
+ "dropped %d events (index=%d expected=%d)\n",
+ dropped, index, expected);
+
+ efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
+ RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
+}
+
+/* Handle a packet received event
+ *
+ * The NIC gives a "discard" flag if it's a unicast packet with the
+ * wrong destination address
+ * Also "is multicast" and "matches multicast filter" flags can be used to
+ * discard non-matching multicast packets.
+ */
+static void
+efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
+{
+ unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
+ unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
+ unsigned expected_ptr;
+ bool rx_ev_pkt_ok, discard = false, checksummed;
+ struct efx_rx_queue *rx_queue;
+
+ /* Basic packet information */
+ rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
+ rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
+ rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
+ WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
+ WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
+ WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
+ channel->channel);
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
+ expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
+ if (unlikely(rx_ev_desc_ptr != expected_ptr))
+ efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
+
+ if (likely(rx_ev_pkt_ok)) {
+ /* If packet is marked as OK and packet type is TCP/IP or
+ * UDP/IP, then we can rely on the hardware checksum.
+ */
+ checksummed =
+ rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
+ rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP;
+ } else {
+ efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard);
+ checksummed = false;
+ }
+
+ /* Detect multicast packets that didn't match the filter */
+ rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
+ if (rx_ev_mcast_pkt) {
+ unsigned int rx_ev_mcast_hash_match =
+ EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
+
+ if (unlikely(!rx_ev_mcast_hash_match)) {
+ ++channel->n_rx_mcast_mismatch;
+ discard = true;
+ }
+ }
+
+ channel->irq_mod_score += 2;
+
+ /* Handle received packet */
+ efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
+ checksummed, discard);
+}
+
+static void
+efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned code;
+
+ code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
+ if (code == EFX_CHANNEL_MAGIC_TEST(channel))
+ ; /* ignore */
+ else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
+ /* The queue must be empty, so we won't receive any rx
+ * events, so efx_process_channel() won't refill the
+ * queue. Refill it here */
+ efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
+ else
+ netif_dbg(efx, hw, efx->net_dev, "channel %d received "
+ "generated event "EFX_QWORD_FMT"\n",
+ channel->channel, EFX_QWORD_VAL(*event));
+}
+
+static void
+efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned int ev_sub_code;
+ unsigned int ev_sub_data;
+
+ ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
+ ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ switch (ev_sub_code) {
+ case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AZ_EVQ_INIT_DONE_EV:
+ netif_dbg(efx, hw, efx->net_dev,
+ "channel %d EVQ %d initialised\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AZ_SRM_UPD_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d SRAM update done\n", channel->channel);
+ break;
+ case FSE_AZ_WAKE_UP_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d RXQ %d wakeup event\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AZ_TIMER_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d RX queue %d timer expired\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AA_RX_RECOVER_EV:
+ netif_err(efx, rx_err, efx->net_dev,
+ "channel %d seen DRIVER RX_RESET event. "
+ "Resetting.\n", channel->channel);
+ atomic_inc(&efx->rx_reset);
+ efx_schedule_reset(efx,
+ EFX_WORKAROUND_6555(efx) ?
+ RESET_TYPE_RX_RECOVERY :
+ RESET_TYPE_DISABLE);
+ break;
+ case FSE_BZ_RX_DSC_ERROR_EV:
+ netif_err(efx, rx_err, efx->net_dev,
+ "RX DMA Q %d reports descriptor fetch error."
+ " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
+ efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
+ break;
+ case FSE_BZ_TX_DSC_ERROR_EV:
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX DMA Q %d reports descriptor fetch error."
+ " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
+ efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
+ break;
+ default:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d unknown driver event code %d "
+ "data %04x\n", channel->channel, ev_sub_code,
+ ev_sub_data);
+ break;
+ }
+}
+
+int efx_nic_process_eventq(struct efx_channel *channel, int budget)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned int read_ptr;
+ efx_qword_t event, *p_event;
+ int ev_code;
+ int tx_packets = 0;
+ int spent = 0;
+
+ read_ptr = channel->eventq_read_ptr;
+
+ for (;;) {
+ p_event = efx_event(channel, read_ptr);
+ event = *p_event;
+
+ if (!efx_event_present(&event))
+ /* End of events */
+ break;
+
+ netif_vdbg(channel->efx, intr, channel->efx->net_dev,
+ "channel %d event is "EFX_QWORD_FMT"\n",
+ channel->channel, EFX_QWORD_VAL(event));
+
+ /* Clear this event by marking it all ones */
+ EFX_SET_QWORD(*p_event);
+
+ ++read_ptr;
+
+ ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
+
+ switch (ev_code) {
+ case FSE_AZ_EV_CODE_RX_EV:
+ efx_handle_rx_event(channel, &event);
+ if (++spent == budget)
+ goto out;
+ break;
+ case FSE_AZ_EV_CODE_TX_EV:
+ tx_packets += efx_handle_tx_event(channel, &event);
+ if (tx_packets > efx->txq_entries) {
+ spent = budget;
+ goto out;
+ }
+ break;
+ case FSE_AZ_EV_CODE_DRV_GEN_EV:
+ efx_handle_generated_event(channel, &event);
+ break;
+ case FSE_AZ_EV_CODE_DRIVER_EV:
+ efx_handle_driver_event(channel, &event);
+ break;
+ case FSE_CZ_EV_CODE_MCDI_EV:
+ efx_mcdi_process_event(channel, &event);
+ break;
+ case FSE_AZ_EV_CODE_GLOBAL_EV:
+ if (efx->type->handle_global_event &&
+ efx->type->handle_global_event(channel, &event))
+ break;
+ /* else fall through */
+ default:
+ netif_err(channel->efx, hw, channel->efx->net_dev,
+ "channel %d unknown event type %d (data "
+ EFX_QWORD_FMT ")\n", channel->channel,
+ ev_code, EFX_QWORD_VAL(event));
+ }
+ }
+
+out:
+ channel->eventq_read_ptr = read_ptr;
+ return spent;
+}
+
+/* Check whether an event is present in the eventq at the current
+ * read pointer. Only useful for self-test.
+ */
+bool efx_nic_event_present(struct efx_channel *channel)
+{
+ return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
+}
+
+/* Allocate buffer table entries for event queue */
+int efx_nic_probe_eventq(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned entries;
+
+ entries = channel->eventq_mask + 1;
+ return efx_alloc_special_buffer(efx, &channel->eventq,
+ entries * sizeof(efx_qword_t));
+}
+
+void efx_nic_init_eventq(struct efx_channel *channel)
+{
+ efx_oword_t reg;
+ struct efx_nic *efx = channel->efx;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "channel %d event queue in special buffers %d-%d\n",
+ channel->channel, channel->eventq.index,
+ channel->eventq.index + channel->eventq.entries - 1);
+
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_CZ_TIMER_Q_EN, 1,
+ FRF_CZ_HOST_NOTIFY_MODE, 0,
+ FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
+ efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
+ }
+
+ /* Pin event queue buffer */
+ efx_init_special_buffer(efx, &channel->eventq);
+
+ /* Fill event queue with all ones (i.e. empty events) */
+ memset(channel->eventq.addr, 0xff, channel->eventq.len);
+
+ /* Push event queue to card */
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_AZ_EVQ_EN, 1,
+ FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
+ FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
+ efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
+ channel->channel);
+
+ efx->type->push_irq_moderation(channel);
+}
+
+void efx_nic_fini_eventq(struct efx_channel *channel)
+{
+ efx_oword_t reg;
+ struct efx_nic *efx = channel->efx;
+
+ /* Remove event queue from card */
+ EFX_ZERO_OWORD(reg);
+ efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
+ channel->channel);
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
+
+ /* Unpin event queue */
+ efx_fini_special_buffer(efx, &channel->eventq);
+}
+
+/* Free buffers backing event queue */
+void efx_nic_remove_eventq(struct efx_channel *channel)
+{
+ efx_free_special_buffer(channel->efx, &channel->eventq);
+}
+
+
+void efx_nic_generate_test_event(struct efx_channel *channel)
+{
+ unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel);
+ efx_qword_t test_event;
+
+ EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
+ FSE_AZ_EV_CODE_DRV_GEN_EV,
+ FSF_AZ_DRV_GEN_EV_MAGIC, magic);
+ efx_generate_event(channel, &test_event);
+}
+
+void efx_nic_generate_fill_event(struct efx_channel *channel)
+{
+ unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel);
+ efx_qword_t test_event;
+
+ EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
+ FSE_AZ_EV_CODE_DRV_GEN_EV,
+ FSF_AZ_DRV_GEN_EV_MAGIC, magic);
+ efx_generate_event(channel, &test_event);
+}
+
+/**************************************************************************
+ *
+ * Flush handling
+ *
+ **************************************************************************/
+
+
+static void efx_poll_flush_events(struct efx_nic *efx)
+{
+ struct efx_channel *channel = efx_get_channel(efx, 0);
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ unsigned int read_ptr = channel->eventq_read_ptr;
+ unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
+
+ do {
+ efx_qword_t *event = efx_event(channel, read_ptr);
+ int ev_code, ev_sub_code, ev_queue;
+ bool ev_failed;
+
+ if (!efx_event_present(event))
+ break;
+
+ ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
+ ev_sub_code = EFX_QWORD_FIELD(*event,
+ FSF_AZ_DRIVER_EV_SUBCODE);
+ if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
+ ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
+ ev_queue = EFX_QWORD_FIELD(*event,
+ FSF_AZ_DRIVER_EV_SUBDATA);
+ if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
+ tx_queue = efx_get_tx_queue(
+ efx, ev_queue / EFX_TXQ_TYPES,
+ ev_queue % EFX_TXQ_TYPES);
+ tx_queue->flushed = FLUSH_DONE;
+ }
+ } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
+ ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
+ ev_queue = EFX_QWORD_FIELD(
+ *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
+ ev_failed = EFX_QWORD_FIELD(
+ *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
+ if (ev_queue < efx->n_rx_channels) {
+ rx_queue = efx_get_rx_queue(efx, ev_queue);
+ rx_queue->flushed =
+ ev_failed ? FLUSH_FAILED : FLUSH_DONE;
+ }
+ }
+
+ /* We're about to destroy the queue anyway, so
+ * it's ok to throw away every non-flush event */
+ EFX_SET_QWORD(*event);
+
+ ++read_ptr;
+ } while (read_ptr != end_ptr);
+
+ channel->eventq_read_ptr = read_ptr;
+}
+
+/* Handle tx and rx flushes at the same time, since they run in
+ * parallel in the hardware and there's no reason for us to
+ * serialise them */
+int efx_nic_flush_queues(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ int i, tx_pending, rx_pending;
+
+ /* If necessary prepare the hardware for flushing */
+ efx->type->prepare_flush(efx);
+
+ /* Flush all tx queues in parallel */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->initialised)
+ efx_flush_tx_queue(tx_queue);
+ }
+ }
+
+ /* The hardware supports four concurrent rx flushes, each of which may
+ * need to be retried if there is an outstanding descriptor fetch */
+ for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
+ rx_pending = tx_pending = 0;
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ if (rx_queue->flushed == FLUSH_PENDING)
+ ++rx_pending;
+ }
+ }
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ if (rx_pending == EFX_RX_FLUSH_COUNT)
+ break;
+ if (rx_queue->flushed == FLUSH_FAILED ||
+ rx_queue->flushed == FLUSH_NONE) {
+ efx_flush_rx_queue(rx_queue);
+ ++rx_pending;
+ }
+ }
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->initialised &&
+ tx_queue->flushed != FLUSH_DONE)
+ ++tx_pending;
+ }
+ }
+
+ if (rx_pending == 0 && tx_pending == 0)
+ return 0;
+
+ msleep(EFX_FLUSH_INTERVAL);
+ efx_poll_flush_events(efx);
+ }
+
+ /* Mark the queues as all flushed. We're going to return failure
+ * leading to a reset, or fake up success anyway */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->initialised &&
+ tx_queue->flushed != FLUSH_DONE)
+ netif_err(efx, hw, efx->net_dev,
+ "tx queue %d flush command timed out\n",
+ tx_queue->queue);
+ tx_queue->flushed = FLUSH_DONE;
+ }
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ if (rx_queue->flushed != FLUSH_DONE)
+ netif_err(efx, hw, efx->net_dev,
+ "rx queue %d flush command timed out\n",
+ efx_rx_queue_index(rx_queue));
+ rx_queue->flushed = FLUSH_DONE;
+ }
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**************************************************************************
+ *
+ * Hardware interrupts
+ * The hardware interrupt handler does very little work; all the event
+ * queue processing is carried out by per-channel tasklets.
+ *
+ **************************************************************************/
+
+/* Enable/disable/generate interrupts */
+static inline void efx_nic_interrupts(struct efx_nic *efx,
+ bool enabled, bool force)
+{
+ efx_oword_t int_en_reg_ker;
+
+ EFX_POPULATE_OWORD_3(int_en_reg_ker,
+ FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level,
+ FRF_AZ_KER_INT_KER, force,
+ FRF_AZ_DRV_INT_EN_KER, enabled);
+ efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
+}
+
+void efx_nic_enable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
+ wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
+
+ /* Enable interrupts */
+ efx_nic_interrupts(efx, true, false);
+
+ /* Force processing of all the channels to get the EVQ RPTRs up to
+ date */
+ efx_for_each_channel(channel, efx)
+ efx_schedule_channel(channel);
+}
+
+void efx_nic_disable_interrupts(struct efx_nic *efx)
+{
+ /* Disable interrupts */
+ efx_nic_interrupts(efx, false, false);
+}
+
+/* Generate a test interrupt
+ * Interrupt must already have been enabled, otherwise nasty things
+ * may happen.
+ */
+void efx_nic_generate_interrupt(struct efx_nic *efx)
+{
+ efx_nic_interrupts(efx, true, true);
+}
+
+/* Process a fatal interrupt
+ * Disable bus mastering ASAP and schedule a reset
+ */
+irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ efx_oword_t fatal_intr;
+ int error, mem_perr;
+
+ efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
+ error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
+
+ netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
+ EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
+ EFX_OWORD_VAL(fatal_intr),
+ error ? "disabling bus mastering" : "no recognised error");
+
+ /* If this is a memory parity error dump which blocks are offending */
+ mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
+ EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
+ if (mem_perr) {
+ efx_oword_t reg;
+ efx_reado(efx, &reg, FR_AZ_MEM_STAT);
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
+ EFX_OWORD_VAL(reg));
+ }
+
+ /* Disable both devices */
+ pci_clear_master(efx->pci_dev);
+ if (efx_nic_is_dual_func(efx))
+ pci_clear_master(nic_data->pci_dev2);
+ efx_nic_disable_interrupts(efx);
+
+ /* Count errors and reset or disable the NIC accordingly */
+ if (efx->int_error_count == 0 ||
+ time_after(jiffies, efx->int_error_expire)) {
+ efx->int_error_count = 0;
+ efx->int_error_expire =
+ jiffies + EFX_INT_ERROR_EXPIRE * HZ;
+ }
+ if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR - reset scheduled\n");
+ efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
+ } else {
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR - max number of errors seen."
+ "NIC will be disabled\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Handle a legacy interrupt
+ * Acknowledges the interrupt and schedule event queue processing.
+ */
+static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
+{
+ struct efx_nic *efx = dev_id;
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ irqreturn_t result = IRQ_NONE;
+ struct efx_channel *channel;
+ efx_dword_t reg;
+ u32 queues;
+ int syserr;
+
+ /* Could this be ours? If interrupts are disabled then the
+ * channel state may not be valid.
+ */
+ if (!efx->legacy_irq_enabled)
+ return result;
+
+ /* Read the ISR which also ACKs the interrupts */
+ efx_readd(efx, &reg, FR_BZ_INT_ISR0);
+ queues = EFX_EXTRACT_DWORD(reg, 0, 31);
+
+ /* Check to see if we have a serious error condition */
+ if (queues & (1U << efx->fatal_irq_level)) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_nic_fatal_interrupt(efx);
+ }
+
+ if (queues != 0) {
+ if (EFX_WORKAROUND_15783(efx))
+ efx->irq_zero_count = 0;
+
+ /* Schedule processing of any interrupting queues */
+ efx_for_each_channel(channel, efx) {
+ if (queues & 1)
+ efx_schedule_channel(channel);
+ queues >>= 1;
+ }
+ result = IRQ_HANDLED;
+
+ } else if (EFX_WORKAROUND_15783(efx)) {
+ efx_qword_t *event;
+
+ /* We can't return IRQ_HANDLED more than once on seeing ISR=0
+ * because this might be a shared interrupt. */
+ if (efx->irq_zero_count++ == 0)
+ result = IRQ_HANDLED;
+
+ /* Ensure we schedule or rearm all event queues */
+ efx_for_each_channel(channel, efx) {
+ event = efx_event(channel, channel->eventq_read_ptr);
+ if (efx_event_present(event))
+ efx_schedule_channel(channel);
+ else
+ efx_nic_eventq_read_ack(channel);
+ }
+ }
+
+ if (result == IRQ_HANDLED) {
+ efx->last_irq_cpu = raw_smp_processor_id();
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+ }
+
+ return result;
+}
+
+/* Handle an MSI interrupt
+ *
+ * Handle an MSI hardware interrupt. This routine schedules event
+ * queue processing. No interrupt acknowledgement cycle is necessary.
+ * Also, we never need to check that the interrupt is for us, since
+ * MSI interrupts cannot be shared.
+ */
+static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
+{
+ struct efx_channel *channel = *(struct efx_channel **)dev_id;
+ struct efx_nic *efx = channel->efx;
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ int syserr;
+
+ efx->last_irq_cpu = raw_smp_processor_id();
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+
+ /* Check to see if we have a serious error condition */
+ if (channel->channel == efx->fatal_irq_level) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_nic_fatal_interrupt(efx);
+ }
+
+ /* Schedule processing of the channel */
+ efx_schedule_channel(channel);
+
+ return IRQ_HANDLED;
+}
+
+
+/* Setup RSS indirection table.
+ * This maps from the hash value of the packet to RXQ
+ */
+void efx_nic_push_rx_indir_table(struct efx_nic *efx)
+{
+ size_t i = 0;
+ efx_dword_t dword;
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+ return;
+
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+ FR_BZ_RX_INDIRECTION_TBL_ROWS);
+
+ for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
+ EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
+ efx->rx_indir_table[i]);
+ efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
+ }
+}
+
+/* Hook interrupt handler(s)
+ * Try MSI and then legacy interrupts.
+ */
+int efx_nic_init_interrupt(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ int rc;
+
+ if (!EFX_INT_MODE_USE_MSI(efx)) {
+ irq_handler_t handler;
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
+ handler = efx_legacy_interrupt;
+ else
+ handler = falcon_legacy_interrupt_a1;
+
+ rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
+ efx->name, efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to hook legacy IRQ %d\n",
+ efx->pci_dev->irq);
+ goto fail1;
+ }
+ return 0;
+ }
+
+ /* Hook MSI or MSI-X interrupt */
+ efx_for_each_channel(channel, efx) {
+ rc = request_irq(channel->irq, efx_msi_interrupt,
+ IRQF_PROBE_SHARED, /* Not shared */
+ efx->channel_name[channel->channel],
+ &efx->channel[channel->channel]);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to hook IRQ %d\n", channel->irq);
+ goto fail2;
+ }
+ }
+
+ return 0;
+
+ fail2:
+ efx_for_each_channel(channel, efx)
+ free_irq(channel->irq, &efx->channel[channel->channel]);
+ fail1:
+ return rc;
+}
+
+void efx_nic_fini_interrupt(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ efx_oword_t reg;
+
+ /* Disable MSI/MSI-X interrupts */
+ efx_for_each_channel(channel, efx) {
+ if (channel->irq)
+ free_irq(channel->irq, &efx->channel[channel->channel]);
+ }
+
+ /* ACK legacy interrupt */
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
+ efx_reado(efx, &reg, FR_BZ_INT_ISR0);
+ else
+ falcon_irq_ack_a1(efx);
+
+ /* Disable legacy interrupt */
+ if (efx->legacy_irq)
+ free_irq(efx->legacy_irq, efx);
+}
+
+u32 efx_nic_fpga_ver(struct efx_nic *efx)
+{
+ efx_oword_t altera_build;
+ efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
+ return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
+}
+
+void efx_nic_init_common(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+
+ /* Set positions of descriptor caches in SRAM. */
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR,
+ efx->type->tx_dc_base / 8);
+ efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR,
+ efx->type->rx_dc_base / 8);
+ efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
+
+ /* Set TX descriptor cache size. */
+ BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
+ efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
+
+ /* Set RX descriptor cache size. Set low watermark to size-8, as
+ * this allows most efficient prefetching.
+ */
+ BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
+ efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
+ efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
+
+ /* Program INT_KER address */
+ EFX_POPULATE_OWORD_2(temp,
+ FRF_AZ_NORM_INT_VEC_DIS_KER,
+ EFX_INT_MODE_USE_MSI(efx),
+ FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
+ efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
+
+ if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
+ /* Use an interrupt level unused by event queues */
+ efx->fatal_irq_level = 0x1f;
+ else
+ /* Use a valid MSI-X vector */
+ efx->fatal_irq_level = 0;
+
+ /* Enable all the genuinely fatal interrupts. (They are still
+ * masked by the overall interrupt mask, controlled by
+ * falcon_interrupts()).
+ *
+ * Note: All other fatal interrupts are enabled
+ */
+ EFX_POPULATE_OWORD_3(temp,
+ FRF_AZ_ILL_ADR_INT_KER_EN, 1,
+ FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
+ FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
+ EFX_INVERT_OWORD(temp);
+ efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
+
+ efx_nic_push_rx_indir_table(efx);
+
+ /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
+ * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
+ */
+ efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
+ /* Enable SW_EV to inherit in char driver - assume harmless here */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
+ /* Prefetch threshold 2 => fetch when descriptor cache half empty */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
+ /* Disable hardware watchdog which can misfire */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
+ /* Squash TX of packets of 16 bytes or less */
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+ efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_POPULATE_OWORD_4(temp,
+ /* Default values */
+ FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
+ FRF_BZ_TX_PACE_SB_AF, 0xb,
+ FRF_BZ_TX_PACE_FB_BASE, 0,
+ /* Allow large pace values in the
+ * fast bin. */
+ FRF_BZ_TX_PACE_BIN_TH,
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo(efx, &temp, FR_BZ_TX_PACE);
+ }
+}
+
+/* Register dump */
+
+#define REGISTER_REVISION_A 1
+#define REGISTER_REVISION_B 2
+#define REGISTER_REVISION_C 3
+#define REGISTER_REVISION_Z 3 /* latest revision */
+
+struct efx_nic_reg {
+ u32 offset:24;
+ u32 min_revision:2, max_revision:2;
+};
+
+#define REGISTER(name, min_rev, max_rev) { \
+ FR_ ## min_rev ## max_rev ## _ ## name, \
+ REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
+}
+#define REGISTER_AA(name) REGISTER(name, A, A)
+#define REGISTER_AB(name) REGISTER(name, A, B)
+#define REGISTER_AZ(name) REGISTER(name, A, Z)
+#define REGISTER_BB(name) REGISTER(name, B, B)
+#define REGISTER_BZ(name) REGISTER(name, B, Z)
+#define REGISTER_CZ(name) REGISTER(name, C, Z)
+
+static const struct efx_nic_reg efx_nic_regs[] = {
+ REGISTER_AZ(ADR_REGION),
+ REGISTER_AZ(INT_EN_KER),
+ REGISTER_BZ(INT_EN_CHAR),
+ REGISTER_AZ(INT_ADR_KER),
+ REGISTER_BZ(INT_ADR_CHAR),
+ /* INT_ACK_KER is WO */
+ /* INT_ISR0 is RC */
+ REGISTER_AZ(HW_INIT),
+ REGISTER_CZ(USR_EV_CFG),
+ REGISTER_AB(EE_SPI_HCMD),
+ REGISTER_AB(EE_SPI_HADR),
+ REGISTER_AB(EE_SPI_HDATA),
+ REGISTER_AB(EE_BASE_PAGE),
+ REGISTER_AB(EE_VPD_CFG0),
+ /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
+ /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
+ /* PCIE_CORE_INDIRECT is indirect */
+ REGISTER_AB(NIC_STAT),
+ REGISTER_AB(GPIO_CTL),
+ REGISTER_AB(GLB_CTL),
+ /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
+ REGISTER_BZ(DP_CTRL),
+ REGISTER_AZ(MEM_STAT),
+ REGISTER_AZ(CS_DEBUG),
+ REGISTER_AZ(ALTERA_BUILD),
+ REGISTER_AZ(CSR_SPARE),
+ REGISTER_AB(PCIE_SD_CTL0123),
+ REGISTER_AB(PCIE_SD_CTL45),
+ REGISTER_AB(PCIE_PCS_CTL_STAT),
+ /* DEBUG_DATA_OUT is not used */
+ /* DRV_EV is WO */
+ REGISTER_AZ(EVQ_CTL),
+ REGISTER_AZ(EVQ_CNT1),
+ REGISTER_AZ(EVQ_CNT2),
+ REGISTER_AZ(BUF_TBL_CFG),
+ REGISTER_AZ(SRM_RX_DC_CFG),
+ REGISTER_AZ(SRM_TX_DC_CFG),
+ REGISTER_AZ(SRM_CFG),
+ /* BUF_TBL_UPD is WO */
+ REGISTER_AZ(SRM_UPD_EVQ),
+ REGISTER_AZ(SRAM_PARITY),
+ REGISTER_AZ(RX_CFG),
+ REGISTER_BZ(RX_FILTER_CTL),
+ /* RX_FLUSH_DESCQ is WO */
+ REGISTER_AZ(RX_DC_CFG),
+ REGISTER_AZ(RX_DC_PF_WM),
+ REGISTER_BZ(RX_RSS_TKEY),
+ /* RX_NODESC_DROP is RC */
+ REGISTER_AA(RX_SELF_RST),
+ /* RX_DEBUG, RX_PUSH_DROP are not used */
+ REGISTER_CZ(RX_RSS_IPV6_REG1),
+ REGISTER_CZ(RX_RSS_IPV6_REG2),
+ REGISTER_CZ(RX_RSS_IPV6_REG3),
+ /* TX_FLUSH_DESCQ is WO */
+ REGISTER_AZ(TX_DC_CFG),
+ REGISTER_AA(TX_CHKSM_CFG),
+ REGISTER_AZ(TX_CFG),
+ /* TX_PUSH_DROP is not used */
+ REGISTER_AZ(TX_RESERVED),
+ REGISTER_BZ(TX_PACE),
+ /* TX_PACE_DROP_QID is RC */
+ REGISTER_BB(TX_VLAN),
+ REGISTER_BZ(TX_IPFIL_PORTEN),
+ REGISTER_AB(MD_TXD),
+ REGISTER_AB(MD_RXD),
+ REGISTER_AB(MD_CS),
+ REGISTER_AB(MD_PHY_ADR),
+ REGISTER_AB(MD_ID),
+ /* MD_STAT is RC */
+ REGISTER_AB(MAC_STAT_DMA),
+ REGISTER_AB(MAC_CTRL),
+ REGISTER_BB(GEN_MODE),
+ REGISTER_AB(MAC_MC_HASH_REG0),
+ REGISTER_AB(MAC_MC_HASH_REG1),
+ REGISTER_AB(GM_CFG1),
+ REGISTER_AB(GM_CFG2),
+ /* GM_IPG and GM_HD are not used */
+ REGISTER_AB(GM_MAX_FLEN),
+ /* GM_TEST is not used */
+ REGISTER_AB(GM_ADR1),
+ REGISTER_AB(GM_ADR2),
+ REGISTER_AB(GMF_CFG0),
+ REGISTER_AB(GMF_CFG1),
+ REGISTER_AB(GMF_CFG2),
+ REGISTER_AB(GMF_CFG3),
+ REGISTER_AB(GMF_CFG4),
+ REGISTER_AB(GMF_CFG5),
+ REGISTER_BB(TX_SRC_MAC_CTL),
+ REGISTER_AB(XM_ADR_LO),
+ REGISTER_AB(XM_ADR_HI),
+ REGISTER_AB(XM_GLB_CFG),
+ REGISTER_AB(XM_TX_CFG),
+ REGISTER_AB(XM_RX_CFG),
+ REGISTER_AB(XM_MGT_INT_MASK),
+ REGISTER_AB(XM_FC),
+ REGISTER_AB(XM_PAUSE_TIME),
+ REGISTER_AB(XM_TX_PARAM),
+ REGISTER_AB(XM_RX_PARAM),
+ /* XM_MGT_INT_MSK (note no 'A') is RC */
+ REGISTER_AB(XX_PWR_RST),
+ REGISTER_AB(XX_SD_CTL),
+ REGISTER_AB(XX_TXDRV_CTL),
+ /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
+ /* XX_CORE_STAT is partly RC */
+};
+
+struct efx_nic_reg_table {
+ u32 offset:24;
+ u32 min_revision:2, max_revision:2;
+ u32 step:6, rows:21;
+};
+
+#define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
+ offset, \
+ REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
+ step, rows \
+}
+#define REGISTER_TABLE(name, min_rev, max_rev) \
+ REGISTER_TABLE_DIMENSIONS( \
+ name, FR_ ## min_rev ## max_rev ## _ ## name, \
+ min_rev, max_rev, \
+ FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
+ FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
+#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
+#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
+#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
+#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
+#define REGISTER_TABLE_BB_CZ(name) \
+ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
+ FR_BZ_ ## name ## _STEP, \
+ FR_BB_ ## name ## _ROWS), \
+ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
+ FR_BZ_ ## name ## _STEP, \
+ FR_CZ_ ## name ## _ROWS)
+#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
+
+static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
+ /* DRIVER is not used */
+ /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
+ REGISTER_TABLE_BB(TX_IPFIL_TBL),
+ REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
+ REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
+ REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
+ REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
+ REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
+ REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
+ REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
+ /* We can't reasonably read all of the buffer table (up to 8MB!).
+ * However this driver will only use a few entries. Reading
+ * 1K entries allows for some expansion of queue count and
+ * size before we need to change the version. */
+ REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
+ A, A, 8, 1024),
+ REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
+ B, Z, 8, 1024),
+ REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
+ REGISTER_TABLE_BB_CZ(TIMER_TBL),
+ REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
+ REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
+ /* TX_FILTER_TBL0 is huge and not used by this driver */
+ REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
+ REGISTER_TABLE_CZ(MC_TREG_SMEM),
+ /* MSIX_PBA_TABLE is not mapped */
+ /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
+ REGISTER_TABLE_BZ(RX_FILTER_TBL0),
+};
+
+size_t efx_nic_get_regs_len(struct efx_nic *efx)
+{
+ const struct efx_nic_reg *reg;
+ const struct efx_nic_reg_table *table;
+ size_t len = 0;
+
+ for (reg = efx_nic_regs;
+ reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
+ reg++)
+ if (efx->type->revision >= reg->min_revision &&
+ efx->type->revision <= reg->max_revision)
+ len += sizeof(efx_oword_t);
+
+ for (table = efx_nic_reg_tables;
+ table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
+ table++)
+ if (efx->type->revision >= table->min_revision &&
+ efx->type->revision <= table->max_revision)
+ len += table->rows * min_t(size_t, table->step, 16);
+
+ return len;
+}
+
+void efx_nic_get_regs(struct efx_nic *efx, void *buf)
+{
+ const struct efx_nic_reg *reg;
+ const struct efx_nic_reg_table *table;
+
+ for (reg = efx_nic_regs;
+ reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
+ reg++) {
+ if (efx->type->revision >= reg->min_revision &&
+ efx->type->revision <= reg->max_revision) {
+ efx_reado(efx, (efx_oword_t *)buf, reg->offset);
+ buf += sizeof(efx_oword_t);
+ }
+ }
+
+ for (table = efx_nic_reg_tables;
+ table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
+ table++) {
+ size_t size, i;
+
+ if (!(efx->type->revision >= table->min_revision &&
+ efx->type->revision <= table->max_revision))
+ continue;
+
+ size = min_t(size_t, table->step, 16);
+
+ for (i = 0; i < table->rows; i++) {
+ switch (table->step) {
+ case 4: /* 32-bit register or SRAM */
+ efx_readd_table(efx, buf, table->offset, i);
+ break;
+ case 8: /* 64-bit SRAM */
+ efx_sram_readq(efx,
+ efx->membase + table->offset,
+ buf, i);
+ break;
+ case 16: /* 128-bit register */
+ efx_reado_table(efx, buf, table->offset, i);
+ break;
+ case 32: /* 128-bit register, interleaved */
+ efx_reado_table(efx, buf, table->offset, 2 * i);
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ buf += size;
+ }
+ }
+}
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
new file mode 100644
index 000000000000..5fb24d3aa3ca
--- /dev/null
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -0,0 +1,272 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_NIC_H
+#define EFX_NIC_H
+
+#include <linux/i2c-algo-bit.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "mcdi.h"
+#include "spi.h"
+
+/*
+ * Falcon hardware control
+ */
+
+enum {
+ EFX_REV_FALCON_A0 = 0,
+ EFX_REV_FALCON_A1 = 1,
+ EFX_REV_FALCON_B0 = 2,
+ EFX_REV_SIENA_A0 = 3,
+};
+
+static inline int efx_nic_rev(struct efx_nic *efx)
+{
+ return efx->type->revision;
+}
+
+extern u32 efx_nic_fpga_ver(struct efx_nic *efx);
+
+static inline bool efx_nic_has_mc(struct efx_nic *efx)
+{
+ return efx_nic_rev(efx) >= EFX_REV_SIENA_A0;
+}
+/* NIC has two interlinked PCI functions for the same port. */
+static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
+{
+ return efx_nic_rev(efx) < EFX_REV_FALCON_B0;
+}
+
+enum {
+ PHY_TYPE_NONE = 0,
+ PHY_TYPE_TXC43128 = 1,
+ PHY_TYPE_88E1111 = 2,
+ PHY_TYPE_SFX7101 = 3,
+ PHY_TYPE_QT2022C2 = 4,
+ PHY_TYPE_PM8358 = 6,
+ PHY_TYPE_SFT9001A = 8,
+ PHY_TYPE_QT2025C = 9,
+ PHY_TYPE_SFT9001B = 10,
+};
+
+#define FALCON_XMAC_LOOPBACKS \
+ ((1 << LOOPBACK_XGMII) | \
+ (1 << LOOPBACK_XGXS) | \
+ (1 << LOOPBACK_XAUI))
+
+#define FALCON_GMAC_LOOPBACKS \
+ (1 << LOOPBACK_GMAC)
+
+/**
+ * struct falcon_board_type - board operations and type information
+ * @id: Board type id, as found in NVRAM
+ * @ref_model: Model number of Solarflare reference design
+ * @gen_type: Generic board type description
+ * @init: Allocate resources and initialise peripheral hardware
+ * @init_phy: Do board-specific PHY initialisation
+ * @fini: Shut down hardware and free resources
+ * @set_id_led: Set state of identifying LED or revert to automatic function
+ * @monitor: Board-specific health check function
+ */
+struct falcon_board_type {
+ u8 id;
+ const char *ref_model;
+ const char *gen_type;
+ int (*init) (struct efx_nic *nic);
+ void (*init_phy) (struct efx_nic *efx);
+ void (*fini) (struct efx_nic *nic);
+ void (*set_id_led) (struct efx_nic *efx, enum efx_led_mode mode);
+ int (*monitor) (struct efx_nic *nic);
+};
+
+/**
+ * struct falcon_board - board information
+ * @type: Type of board
+ * @major: Major rev. ('A', 'B' ...)
+ * @minor: Minor rev. (0, 1, ...)
+ * @i2c_adap: I2C adapter for on-board peripherals
+ * @i2c_data: Data for bit-banging algorithm
+ * @hwmon_client: I2C client for hardware monitor
+ * @ioexp_client: I2C client for power/port control
+ */
+struct falcon_board {
+ const struct falcon_board_type *type;
+ int major;
+ int minor;
+ struct i2c_adapter i2c_adap;
+ struct i2c_algo_bit_data i2c_data;
+ struct i2c_client *hwmon_client, *ioexp_client;
+};
+
+/**
+ * struct falcon_nic_data - Falcon NIC state
+ * @pci_dev2: Secondary function of Falcon A
+ * @board: Board state and functions
+ * @stats_disable_count: Nest count for disabling statistics fetches
+ * @stats_pending: Is there a pending DMA of MAC statistics.
+ * @stats_timer: A timer for regularly fetching MAC statistics.
+ * @stats_dma_done: Pointer to the flag which indicates DMA completion.
+ * @spi_flash: SPI flash device
+ * @spi_eeprom: SPI EEPROM device
+ * @spi_lock: SPI bus lock
+ * @mdio_lock: MDIO bus lock
+ * @xmac_poll_required: XMAC link state needs polling
+ */
+struct falcon_nic_data {
+ struct pci_dev *pci_dev2;
+ struct falcon_board board;
+ unsigned int stats_disable_count;
+ bool stats_pending;
+ struct timer_list stats_timer;
+ u32 *stats_dma_done;
+ struct efx_spi_device spi_flash;
+ struct efx_spi_device spi_eeprom;
+ struct mutex spi_lock;
+ struct mutex mdio_lock;
+ bool xmac_poll_required;
+};
+
+static inline struct falcon_board *falcon_board(struct efx_nic *efx)
+{
+ struct falcon_nic_data *data = efx->nic_data;
+ return &data->board;
+}
+
+/**
+ * struct siena_nic_data - Siena NIC state
+ * @mcdi: Management-Controller-to-Driver Interface
+ * @wol_filter_id: Wake-on-LAN packet filter id
+ */
+struct siena_nic_data {
+ struct efx_mcdi_iface mcdi;
+ int wol_filter_id;
+};
+
+extern const struct efx_nic_type falcon_a1_nic_type;
+extern const struct efx_nic_type falcon_b0_nic_type;
+extern const struct efx_nic_type siena_a0_nic_type;
+
+/**************************************************************************
+ *
+ * Externs
+ *
+ **************************************************************************
+ */
+
+extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
+
+/* TX data path */
+extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
+extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue);
+extern void efx_nic_fini_tx(struct efx_tx_queue *tx_queue);
+extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue);
+extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue);
+
+/* RX data path */
+extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue);
+extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue);
+extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue);
+extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue);
+extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue);
+
+/* Event data path */
+extern int efx_nic_probe_eventq(struct efx_channel *channel);
+extern void efx_nic_init_eventq(struct efx_channel *channel);
+extern void efx_nic_fini_eventq(struct efx_channel *channel);
+extern void efx_nic_remove_eventq(struct efx_channel *channel);
+extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
+extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
+extern bool efx_nic_event_present(struct efx_channel *channel);
+
+/* MAC/PHY */
+extern void falcon_drain_tx_fifo(struct efx_nic *efx);
+extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
+
+/* Interrupts and test events */
+extern int efx_nic_init_interrupt(struct efx_nic *efx);
+extern void efx_nic_enable_interrupts(struct efx_nic *efx);
+extern void efx_nic_generate_test_event(struct efx_channel *channel);
+extern void efx_nic_generate_fill_event(struct efx_channel *channel);
+extern void efx_nic_generate_interrupt(struct efx_nic *efx);
+extern void efx_nic_disable_interrupts(struct efx_nic *efx);
+extern void efx_nic_fini_interrupt(struct efx_nic *efx);
+extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx);
+extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id);
+extern void falcon_irq_ack_a1(struct efx_nic *efx);
+
+#define EFX_IRQ_MOD_RESOLUTION 5
+#define EFX_IRQ_MOD_MAX 0x1000
+
+/* Global Resources */
+extern int efx_nic_flush_queues(struct efx_nic *efx);
+extern void falcon_start_nic_stats(struct efx_nic *efx);
+extern void falcon_stop_nic_stats(struct efx_nic *efx);
+extern void falcon_setup_xaui(struct efx_nic *efx);
+extern int falcon_reset_xaui(struct efx_nic *efx);
+extern void efx_nic_init_common(struct efx_nic *efx);
+extern void efx_nic_push_rx_indir_table(struct efx_nic *efx);
+
+int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
+ unsigned int len);
+void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
+
+/* Tests */
+struct efx_nic_register_test {
+ unsigned address;
+ efx_oword_t mask;
+};
+extern int efx_nic_test_registers(struct efx_nic *efx,
+ const struct efx_nic_register_test *regs,
+ size_t n_regs);
+
+extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
+extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
+
+/**************************************************************************
+ *
+ * Falcon MAC stats
+ *
+ **************************************************************************
+ */
+
+#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
+#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
+
+/* Retrieve statistic from statistics block */
+#define FALCON_STAT(efx, falcon_stat, efx_stat) do { \
+ if (FALCON_STAT_WIDTH(falcon_stat) == 16) \
+ (efx)->mac_stats.efx_stat += le16_to_cpu( \
+ *((__force __le16 *) \
+ (efx->stats_buffer.addr + \
+ FALCON_STAT_OFFSET(falcon_stat)))); \
+ else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \
+ (efx)->mac_stats.efx_stat += le32_to_cpu( \
+ *((__force __le32 *) \
+ (efx->stats_buffer.addr + \
+ FALCON_STAT_OFFSET(falcon_stat)))); \
+ else \
+ (efx)->mac_stats.efx_stat += le64_to_cpu( \
+ *((__force __le64 *) \
+ (efx->stats_buffer.addr + \
+ FALCON_STAT_OFFSET(falcon_stat)))); \
+ } while (0)
+
+#define FALCON_MAC_STATS_SIZE 0x100
+
+#define MAC_DATA_LBN 0
+#define MAC_DATA_WIDTH 32
+
+extern void efx_nic_generate_event(struct efx_channel *channel,
+ efx_qword_t *event);
+
+extern void falcon_poll_xmac(struct efx_nic *efx);
+
+#endif /* EFX_NIC_H */
diff --git a/drivers/net/ethernet/sfc/phy.h b/drivers/net/ethernet/sfc/phy.h
new file mode 100644
index 000000000000..11d148cd8441
--- /dev/null
+++ b/drivers/net/ethernet/sfc/phy.h
@@ -0,0 +1,67 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2007-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_PHY_H
+#define EFX_PHY_H
+
+/****************************************************************************
+ * 10Xpress (SFX7101) PHY
+ */
+extern const struct efx_phy_operations falcon_sfx7101_phy_ops;
+
+extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+
+/****************************************************************************
+ * AMCC/Quake QT202x PHYs
+ */
+extern const struct efx_phy_operations falcon_qt202x_phy_ops;
+
+/* These PHYs provide various H/W control states for LEDs */
+#define QUAKE_LED_LINK_INVAL (0)
+#define QUAKE_LED_LINK_STAT (1)
+#define QUAKE_LED_LINK_ACT (2)
+#define QUAKE_LED_LINK_ACTSTAT (3)
+#define QUAKE_LED_OFF (4)
+#define QUAKE_LED_ON (5)
+#define QUAKE_LED_LINK_INPUT (6) /* Pin is an input. */
+/* What link the LED tracks */
+#define QUAKE_LED_TXLINK (0)
+#define QUAKE_LED_RXLINK (8)
+
+extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
+
+/****************************************************************************
+* Transwitch CX4 retimer
+*/
+extern const struct efx_phy_operations falcon_txc_phy_ops;
+
+#define TXC_GPIO_DIR_INPUT 0
+#define TXC_GPIO_DIR_OUTPUT 1
+
+extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
+extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
+
+/****************************************************************************
+ * Siena managed PHYs
+ */
+extern const struct efx_phy_operations efx_mcdi_phy_ops;
+
+extern int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
+ unsigned int prtad, unsigned int devad,
+ u16 addr, u16 *value_out, u32 *status_out);
+extern int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
+ unsigned int prtad, unsigned int devad,
+ u16 addr, u16 value, u32 *status_out);
+extern void efx_mcdi_phy_decode_link(struct efx_nic *efx,
+ struct efx_link_state *link_state,
+ u32 speed, u32 flags, u32 fcntl);
+extern int efx_mcdi_phy_reconfigure(struct efx_nic *efx);
+extern void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/qt202x_phy.c b/drivers/net/ethernet/sfc/qt202x_phy.c
new file mode 100644
index 000000000000..7ad97e397406
--- /dev/null
+++ b/drivers/net/ethernet/sfc/qt202x_phy.c
@@ -0,0 +1,462 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2006-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+/*
+ * Driver for AMCC QT202x SFP+ and XFP adapters; see www.amcc.com for details
+ */
+
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include "efx.h"
+#include "mdio_10g.h"
+#include "phy.h"
+#include "nic.h"
+
+#define QT202X_REQUIRED_DEVS (MDIO_DEVS_PCS | \
+ MDIO_DEVS_PMAPMD | \
+ MDIO_DEVS_PHYXS)
+
+#define QT202X_LOOPBACKS ((1 << LOOPBACK_PCS) | \
+ (1 << LOOPBACK_PMAPMD) | \
+ (1 << LOOPBACK_PHYXS_WS))
+
+/****************************************************************************/
+/* Quake-specific MDIO registers */
+#define MDIO_QUAKE_LED0_REG (0xD006)
+
+/* QT2025C only */
+#define PCS_FW_HEARTBEAT_REG 0xd7ee
+#define PCS_FW_HEARTB_LBN 0
+#define PCS_FW_HEARTB_WIDTH 8
+#define PCS_FW_PRODUCT_CODE_1 0xd7f0
+#define PCS_FW_VERSION_1 0xd7f3
+#define PCS_FW_BUILD_1 0xd7f6
+#define PCS_UC8051_STATUS_REG 0xd7fd
+#define PCS_UC_STATUS_LBN 0
+#define PCS_UC_STATUS_WIDTH 8
+#define PCS_UC_STATUS_FW_SAVE 0x20
+#define PMA_PMD_MODE_REG 0xc301
+#define PMA_PMD_RXIN_SEL_LBN 6
+#define PMA_PMD_FTX_CTRL2_REG 0xc309
+#define PMA_PMD_FTX_STATIC_LBN 13
+#define PMA_PMD_VEND1_REG 0xc001
+#define PMA_PMD_VEND1_LBTXD_LBN 15
+#define PCS_VEND1_REG 0xc000
+#define PCS_VEND1_LBTXD_LBN 5
+
+void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
+{
+ int addr = MDIO_QUAKE_LED0_REG + led;
+ efx_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode);
+}
+
+struct qt202x_phy_data {
+ enum efx_phy_mode phy_mode;
+ bool bug17190_in_bad_state;
+ unsigned long bug17190_timer;
+ u32 firmware_ver;
+};
+
+#define QT2022C2_MAX_RESET_TIME 500
+#define QT2022C2_RESET_WAIT 10
+
+#define QT2025C_MAX_HEARTB_TIME (5 * HZ)
+#define QT2025C_HEARTB_WAIT 100
+#define QT2025C_MAX_FWSTART_TIME (25 * HZ / 10)
+#define QT2025C_FWSTART_WAIT 100
+
+#define BUG17190_INTERVAL (2 * HZ)
+
+static int qt2025c_wait_heartbeat(struct efx_nic *efx)
+{
+ unsigned long timeout = jiffies + QT2025C_MAX_HEARTB_TIME;
+ int reg, old_counter = 0;
+
+ /* Wait for firmware heartbeat to start */
+ for (;;) {
+ int counter;
+ reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_FW_HEARTBEAT_REG);
+ if (reg < 0)
+ return reg;
+ counter = ((reg >> PCS_FW_HEARTB_LBN) &
+ ((1 << PCS_FW_HEARTB_WIDTH) - 1));
+ if (old_counter == 0)
+ old_counter = counter;
+ else if (counter != old_counter)
+ break;
+ if (time_after(jiffies, timeout)) {
+ /* Some cables have EEPROMs that conflict with the
+ * PHY's on-board EEPROM so it cannot load firmware */
+ netif_err(efx, hw, efx->net_dev,
+ "If an SFP+ direct attach cable is"
+ " connected, please check that it complies"
+ " with the SFP+ specification\n");
+ return -ETIMEDOUT;
+ }
+ msleep(QT2025C_HEARTB_WAIT);
+ }
+
+ return 0;
+}
+
+static int qt2025c_wait_fw_status_good(struct efx_nic *efx)
+{
+ unsigned long timeout = jiffies + QT2025C_MAX_FWSTART_TIME;
+ int reg;
+
+ /* Wait for firmware status to look good */
+ for (;;) {
+ reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG);
+ if (reg < 0)
+ return reg;
+ if ((reg &
+ ((1 << PCS_UC_STATUS_WIDTH) - 1) << PCS_UC_STATUS_LBN) >=
+ PCS_UC_STATUS_FW_SAVE)
+ break;
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ msleep(QT2025C_FWSTART_WAIT);
+ }
+
+ return 0;
+}
+
+static void qt2025c_restart_firmware(struct efx_nic *efx)
+{
+ /* Restart microcontroller execution of firmware from RAM */
+ efx_mdio_write(efx, 3, 0xe854, 0x00c0);
+ efx_mdio_write(efx, 3, 0xe854, 0x0040);
+ msleep(50);
+}
+
+static int qt2025c_wait_reset(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = qt2025c_wait_heartbeat(efx);
+ if (rc != 0)
+ return rc;
+
+ rc = qt2025c_wait_fw_status_good(efx);
+ if (rc == -ETIMEDOUT) {
+ /* Bug 17689: occasionally heartbeat starts but firmware status
+ * code never progresses beyond 0x00. Try again, once, after
+ * restarting execution of the firmware image. */
+ netif_dbg(efx, hw, efx->net_dev,
+ "bashing QT2025C microcontroller\n");
+ qt2025c_restart_firmware(efx);
+ rc = qt2025c_wait_heartbeat(efx);
+ if (rc != 0)
+ return rc;
+ rc = qt2025c_wait_fw_status_good(efx);
+ }
+
+ return rc;
+}
+
+static void qt2025c_firmware_id(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data = efx->phy_data;
+ u8 firmware_id[9];
+ size_t i;
+
+ for (i = 0; i < sizeof(firmware_id); i++)
+ firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS,
+ PCS_FW_PRODUCT_CODE_1 + i);
+ netif_info(efx, probe, efx->net_dev,
+ "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
+ (firmware_id[0] << 8) | firmware_id[1], firmware_id[2],
+ firmware_id[3] >> 4, firmware_id[3] & 0xf,
+ firmware_id[4], firmware_id[5],
+ firmware_id[6], firmware_id[7], firmware_id[8]);
+ phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) |
+ ((firmware_id[3] & 0x0f) << 16) |
+ (firmware_id[4] << 8) | firmware_id[5];
+}
+
+static void qt2025c_bug17190_workaround(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data = efx->phy_data;
+
+ /* The PHY can get stuck in a state where it reports PHY_XS and PMA/PMD
+ * layers up, but PCS down (no block_lock). If we notice this state
+ * persisting for a couple of seconds, we switch PMA/PMD loopback
+ * briefly on and then off again, which is normally sufficient to
+ * recover it.
+ */
+ if (efx->link_state.up ||
+ !efx_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) {
+ phy_data->bug17190_in_bad_state = false;
+ return;
+ }
+
+ if (!phy_data->bug17190_in_bad_state) {
+ phy_data->bug17190_in_bad_state = true;
+ phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
+ return;
+ }
+
+ if (time_after_eq(jiffies, phy_data->bug17190_timer)) {
+ netif_dbg(efx, hw, efx->net_dev, "bashing QT2025C PMA/PMD\n");
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_PMA_CTRL1_LOOPBACK, true);
+ msleep(100);
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_PMA_CTRL1_LOOPBACK, false);
+ phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
+ }
+}
+
+static int qt2025c_select_phy_mode(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data = efx->phy_data;
+ struct falcon_board *board = falcon_board(efx);
+ int reg, rc, i;
+ uint16_t phy_op_mode;
+
+ /* Only 2.0.1.0+ PHY firmware supports the more optimal SFP+
+ * Self-Configure mode. Don't attempt any switching if we encounter
+ * older firmware. */
+ if (phy_data->firmware_ver < 0x02000100)
+ return 0;
+
+ /* In general we will get optimal behaviour in "SFP+ Self-Configure"
+ * mode; however, that powers down most of the PHY when no module is
+ * present, so we must use a different mode (any fixed mode will do)
+ * to be sure that loopbacks will work. */
+ phy_op_mode = (efx->loopback_mode == LOOPBACK_NONE) ? 0x0038 : 0x0020;
+
+ /* Only change mode if really necessary */
+ reg = efx_mdio_read(efx, 1, 0xc319);
+ if ((reg & 0x0038) == phy_op_mode)
+ return 0;
+ netif_dbg(efx, hw, efx->net_dev, "Switching PHY to mode 0x%04x\n",
+ phy_op_mode);
+
+ /* This sequence replicates the register writes configured in the boot
+ * EEPROM (including the differences between board revisions), except
+ * that the operating mode is changed, and the PHY is prevented from
+ * unnecessarily reloading the main firmware image again. */
+ efx_mdio_write(efx, 1, 0xc300, 0x0000);
+ /* (Note: this portion of the boot EEPROM sequence, which bit-bashes 9
+ * STOPs onto the firmware/module I2C bus to reset it, varies across
+ * board revisions, as the bus is connected to different GPIO/LED
+ * outputs on the PHY.) */
+ if (board->major == 0 && board->minor < 2) {
+ efx_mdio_write(efx, 1, 0xc303, 0x4498);
+ for (i = 0; i < 9; i++) {
+ efx_mdio_write(efx, 1, 0xc303, 0x4488);
+ efx_mdio_write(efx, 1, 0xc303, 0x4480);
+ efx_mdio_write(efx, 1, 0xc303, 0x4490);
+ efx_mdio_write(efx, 1, 0xc303, 0x4498);
+ }
+ } else {
+ efx_mdio_write(efx, 1, 0xc303, 0x0920);
+ efx_mdio_write(efx, 1, 0xd008, 0x0004);
+ for (i = 0; i < 9; i++) {
+ efx_mdio_write(efx, 1, 0xc303, 0x0900);
+ efx_mdio_write(efx, 1, 0xd008, 0x0005);
+ efx_mdio_write(efx, 1, 0xc303, 0x0920);
+ efx_mdio_write(efx, 1, 0xd008, 0x0004);
+ }
+ efx_mdio_write(efx, 1, 0xc303, 0x4900);
+ }
+ efx_mdio_write(efx, 1, 0xc303, 0x4900);
+ efx_mdio_write(efx, 1, 0xc302, 0x0004);
+ efx_mdio_write(efx, 1, 0xc316, 0x0013);
+ efx_mdio_write(efx, 1, 0xc318, 0x0054);
+ efx_mdio_write(efx, 1, 0xc319, phy_op_mode);
+ efx_mdio_write(efx, 1, 0xc31a, 0x0098);
+ efx_mdio_write(efx, 3, 0x0026, 0x0e00);
+ efx_mdio_write(efx, 3, 0x0027, 0x0013);
+ efx_mdio_write(efx, 3, 0x0028, 0xa528);
+ efx_mdio_write(efx, 1, 0xd006, 0x000a);
+ efx_mdio_write(efx, 1, 0xd007, 0x0009);
+ efx_mdio_write(efx, 1, 0xd008, 0x0004);
+ /* This additional write is not present in the boot EEPROM. It
+ * prevents the PHY's internal boot ROM doing another pointless (and
+ * slow) reload of the firmware image (the microcontroller's code
+ * memory is not affected by the microcontroller reset). */
+ efx_mdio_write(efx, 1, 0xc317, 0x00ff);
+ /* PMA/PMD loopback sets RXIN to inverse polarity and the firmware
+ * restart doesn't reset it. We need to do that ourselves. */
+ efx_mdio_set_flag(efx, 1, PMA_PMD_MODE_REG,
+ 1 << PMA_PMD_RXIN_SEL_LBN, false);
+ efx_mdio_write(efx, 1, 0xc300, 0x0002);
+ msleep(20);
+
+ /* Restart microcontroller execution of firmware from RAM */
+ qt2025c_restart_firmware(efx);
+
+ /* Wait for the microcontroller to be ready again */
+ rc = qt2025c_wait_reset(efx);
+ if (rc < 0) {
+ netif_err(efx, hw, efx->net_dev,
+ "PHY microcontroller reset during mode switch "
+ "timed out\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qt202x_reset_phy(struct efx_nic *efx)
+{
+ int rc;
+
+ if (efx->phy_type == PHY_TYPE_QT2025C) {
+ /* Wait for the reset triggered by falcon_reset_hw()
+ * to complete */
+ rc = qt2025c_wait_reset(efx);
+ if (rc < 0)
+ goto fail;
+ } else {
+ /* Reset the PHYXS MMD. This is documented as doing
+ * a complete soft reset. */
+ rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PHYXS,
+ QT2022C2_MAX_RESET_TIME /
+ QT2022C2_RESET_WAIT,
+ QT2022C2_RESET_WAIT);
+ if (rc < 0)
+ goto fail;
+ }
+
+ /* Wait 250ms for the PHY to complete bootup */
+ msleep(250);
+
+ falcon_board(efx)->type->init_phy(efx);
+
+ return 0;
+
+ fail:
+ netif_err(efx, hw, efx->net_dev, "PHY reset timed out\n");
+ return rc;
+}
+
+static int qt202x_phy_probe(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data;
+
+ phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
+ efx->phy_data = phy_data;
+ phy_data->phy_mode = efx->phy_mode;
+ phy_data->bug17190_in_bad_state = false;
+ phy_data->bug17190_timer = 0;
+
+ efx->mdio.mmds = QT202X_REQUIRED_DEVS;
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
+ return 0;
+}
+
+static int qt202x_phy_init(struct efx_nic *efx)
+{
+ u32 devid;
+ int rc;
+
+ rc = qt202x_reset_phy(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev, "PHY init failed\n");
+ return rc;
+ }
+
+ devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
+ netif_info(efx, probe, efx->net_dev,
+ "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
+ devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
+ efx_mdio_id_rev(devid));
+
+ if (efx->phy_type == PHY_TYPE_QT2025C)
+ qt2025c_firmware_id(efx);
+
+ return 0;
+}
+
+static int qt202x_link_ok(struct efx_nic *efx)
+{
+ return efx_mdio_links_ok(efx, QT202X_REQUIRED_DEVS);
+}
+
+static bool qt202x_phy_poll(struct efx_nic *efx)
+{
+ bool was_up = efx->link_state.up;
+
+ efx->link_state.up = qt202x_link_ok(efx);
+ efx->link_state.speed = 10000;
+ efx->link_state.fd = true;
+ efx->link_state.fc = efx->wanted_fc;
+
+ if (efx->phy_type == PHY_TYPE_QT2025C)
+ qt2025c_bug17190_workaround(efx);
+
+ return efx->link_state.up != was_up;
+}
+
+static int qt202x_phy_reconfigure(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data = efx->phy_data;
+
+ if (efx->phy_type == PHY_TYPE_QT2025C) {
+ int rc = qt2025c_select_phy_mode(efx);
+ if (rc)
+ return rc;
+
+ /* There are several different register bits which can
+ * disable TX (and save power) on direct-attach cables
+ * or optical transceivers, varying somewhat between
+ * firmware versions. Only 'static mode' appears to
+ * cover everything. */
+ mdio_set_flag(
+ &efx->mdio, efx->mdio.prtad, MDIO_MMD_PMAPMD,
+ PMA_PMD_FTX_CTRL2_REG, 1 << PMA_PMD_FTX_STATIC_LBN,
+ efx->phy_mode & PHY_MODE_TX_DISABLED ||
+ efx->phy_mode & PHY_MODE_LOW_POWER ||
+ efx->loopback_mode == LOOPBACK_PCS ||
+ efx->loopback_mode == LOOPBACK_PMAPMD);
+ } else {
+ /* Reset the PHY when moving from tx off to tx on */
+ if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) &&
+ (phy_data->phy_mode & PHY_MODE_TX_DISABLED))
+ qt202x_reset_phy(efx);
+
+ efx_mdio_transmit_disable(efx);
+ }
+
+ efx_mdio_phy_reconfigure(efx);
+
+ phy_data->phy_mode = efx->phy_mode;
+
+ return 0;
+}
+
+static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+ mdio45_ethtool_gset(&efx->mdio, ecmd);
+}
+
+static void qt202x_phy_remove(struct efx_nic *efx)
+{
+ /* Free the context block */
+ kfree(efx->phy_data);
+ efx->phy_data = NULL;
+}
+
+const struct efx_phy_operations falcon_qt202x_phy_ops = {
+ .probe = qt202x_phy_probe,
+ .init = qt202x_phy_init,
+ .reconfigure = qt202x_phy_reconfigure,
+ .poll = qt202x_phy_poll,
+ .fini = efx_port_dummy_op_void,
+ .remove = qt202x_phy_remove,
+ .get_settings = qt202x_phy_get_settings,
+ .set_settings = efx_mdio_set_settings,
+ .test_alive = efx_mdio_test_alive,
+};
diff --git a/drivers/net/ethernet/sfc/regs.h b/drivers/net/ethernet/sfc/regs.h
new file mode 100644
index 000000000000..cc2c86b76a7b
--- /dev/null
+++ b/drivers/net/ethernet/sfc/regs.h
@@ -0,0 +1,3188 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_REGS_H
+#define EFX_REGS_H
+
+/*
+ * Falcon hardware architecture definitions have a name prefix following
+ * the format:
+ *
+ * F<type>_<min-rev><max-rev>_
+ *
+ * The following <type> strings are used:
+ *
+ * MMIO register MC register Host memory structure
+ * -------------------------------------------------------------
+ * Address R MCR
+ * Bitfield RF MCRF SF
+ * Enumerator FE MCFE SE
+ *
+ * <min-rev> is the first revision to which the definition applies:
+ *
+ * A: Falcon A1 (SFC4000AB)
+ * B: Falcon B0 (SFC4000BA)
+ * C: Siena A0 (SFL9021AA)
+ *
+ * If the definition has been changed or removed in later revisions
+ * then <max-rev> is the last revision to which the definition applies;
+ * otherwise it is "Z".
+ */
+
+/**************************************************************************
+ *
+ * Falcon/Siena registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/* ADR_REGION_REG: Address region register */
+#define FR_AZ_ADR_REGION 0x00000000
+#define FRF_AZ_ADR_REGION3_LBN 96
+#define FRF_AZ_ADR_REGION3_WIDTH 18
+#define FRF_AZ_ADR_REGION2_LBN 64
+#define FRF_AZ_ADR_REGION2_WIDTH 18
+#define FRF_AZ_ADR_REGION1_LBN 32
+#define FRF_AZ_ADR_REGION1_WIDTH 18
+#define FRF_AZ_ADR_REGION0_LBN 0
+#define FRF_AZ_ADR_REGION0_WIDTH 18
+
+/* INT_EN_REG_KER: Kernel driver Interrupt enable register */
+#define FR_AZ_INT_EN_KER 0x00000010
+#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8
+#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6
+#define FRF_AZ_KER_INT_CHAR_LBN 4
+#define FRF_AZ_KER_INT_CHAR_WIDTH 1
+#define FRF_AZ_KER_INT_KER_LBN 3
+#define FRF_AZ_KER_INT_KER_WIDTH 1
+#define FRF_AZ_DRV_INT_EN_KER_LBN 0
+#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1
+
+/* INT_EN_REG_CHAR: Char Driver interrupt enable register */
+#define FR_BZ_INT_EN_CHAR 0x00000020
+#define FRF_BZ_CHAR_INT_LEVE_SEL_LBN 8
+#define FRF_BZ_CHAR_INT_LEVE_SEL_WIDTH 6
+#define FRF_BZ_CHAR_INT_CHAR_LBN 4
+#define FRF_BZ_CHAR_INT_CHAR_WIDTH 1
+#define FRF_BZ_CHAR_INT_KER_LBN 3
+#define FRF_BZ_CHAR_INT_KER_WIDTH 1
+#define FRF_BZ_DRV_INT_EN_CHAR_LBN 0
+#define FRF_BZ_DRV_INT_EN_CHAR_WIDTH 1
+
+/* INT_ADR_REG_KER: Interrupt host address for Kernel driver */
+#define FR_AZ_INT_ADR_KER 0x00000030
+#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64
+#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1
+#define FRF_AZ_INT_ADR_KER_LBN 0
+#define FRF_AZ_INT_ADR_KER_WIDTH 64
+
+/* INT_ADR_REG_CHAR: Interrupt host address for Char driver */
+#define FR_BZ_INT_ADR_CHAR 0x00000040
+#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_LBN 64
+#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1
+#define FRF_BZ_INT_ADR_CHAR_LBN 0
+#define FRF_BZ_INT_ADR_CHAR_WIDTH 64
+
+/* INT_ACK_KER: Kernel interrupt acknowledge register */
+#define FR_AA_INT_ACK_KER 0x00000050
+#define FRF_AA_INT_ACK_KER_FIELD_LBN 0
+#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32
+
+/* INT_ISR0_REG: Function 0 Interrupt Acknowledge Status register */
+#define FR_BZ_INT_ISR0 0x00000090
+#define FRF_BZ_INT_ISR_REG_LBN 0
+#define FRF_BZ_INT_ISR_REG_WIDTH 64
+
+/* HW_INIT_REG: Hardware initialization register */
+#define FR_AZ_HW_INIT 0x000000c0
+#define FRF_BB_BDMRD_CPLF_FULL_LBN 124
+#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1
+#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121
+#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3
+#define FRF_CZ_TX_MRG_TAGS_LBN 120
+#define FRF_CZ_TX_MRG_TAGS_WIDTH 1
+#define FRF_AB_TRGT_MASK_ALL_LBN 100
+#define FRF_AB_TRGT_MASK_ALL_WIDTH 1
+#define FRF_AZ_DOORBELL_DROP_LBN 92
+#define FRF_AZ_DOORBELL_DROP_WIDTH 8
+#define FRF_AB_TX_RREQ_MASK_EN_LBN 76
+#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1
+#define FRF_AB_PE_EIDLE_DIS_LBN 75
+#define FRF_AB_PE_EIDLE_DIS_WIDTH 1
+#define FRF_AA_FC_BLOCKING_EN_LBN 45
+#define FRF_AA_FC_BLOCKING_EN_WIDTH 1
+#define FRF_BZ_B2B_REQ_EN_LBN 45
+#define FRF_BZ_B2B_REQ_EN_WIDTH 1
+#define FRF_AA_B2B_REQ_EN_LBN 44
+#define FRF_AA_B2B_REQ_EN_WIDTH 1
+#define FRF_BB_FC_BLOCKING_EN_LBN 44
+#define FRF_BB_FC_BLOCKING_EN_WIDTH 1
+#define FRF_AZ_POST_WR_MASK_LBN 40
+#define FRF_AZ_POST_WR_MASK_WIDTH 4
+#define FRF_AZ_TLP_TC_LBN 34
+#define FRF_AZ_TLP_TC_WIDTH 3
+#define FRF_AZ_TLP_ATTR_LBN 32
+#define FRF_AZ_TLP_ATTR_WIDTH 2
+#define FRF_AB_INTB_VEC_LBN 24
+#define FRF_AB_INTB_VEC_WIDTH 5
+#define FRF_AB_INTA_VEC_LBN 16
+#define FRF_AB_INTA_VEC_WIDTH 5
+#define FRF_AZ_WD_TIMER_LBN 8
+#define FRF_AZ_WD_TIMER_WIDTH 8
+#define FRF_AZ_US_DISABLE_LBN 5
+#define FRF_AZ_US_DISABLE_WIDTH 1
+#define FRF_AZ_TLP_EP_LBN 4
+#define FRF_AZ_TLP_EP_WIDTH 1
+#define FRF_AZ_ATTR_SEL_LBN 3
+#define FRF_AZ_ATTR_SEL_WIDTH 1
+#define FRF_AZ_TD_SEL_LBN 1
+#define FRF_AZ_TD_SEL_WIDTH 1
+#define FRF_AZ_TLP_TD_LBN 0
+#define FRF_AZ_TLP_TD_WIDTH 1
+
+/* EE_SPI_HCMD_REG: SPI host command register */
+#define FR_AB_EE_SPI_HCMD 0x00000100
+#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31
+#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1
+#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28
+#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24
+#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16
+#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5
+#define FRF_AB_EE_SPI_HCMD_READ_LBN 15
+#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12
+#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2
+#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8
+#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2
+#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0
+#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8
+
+/* USR_EV_CFG: User Level Event Configuration register */
+#define FR_CZ_USR_EV_CFG 0x00000100
+#define FRF_CZ_USREV_DIS_LBN 16
+#define FRF_CZ_USREV_DIS_WIDTH 1
+#define FRF_CZ_DFLT_EVQ_LBN 0
+#define FRF_CZ_DFLT_EVQ_WIDTH 10
+
+/* EE_SPI_HADR_REG: SPI host address register */
+#define FR_AB_EE_SPI_HADR 0x00000110
+#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24
+#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8
+#define FRF_AB_EE_SPI_HADR_ADR_LBN 0
+#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24
+
+/* EE_SPI_HDATA_REG: SPI host data register */
+#define FR_AB_EE_SPI_HDATA 0x00000120
+#define FRF_AB_EE_SPI_HDATA3_LBN 96
+#define FRF_AB_EE_SPI_HDATA3_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA2_LBN 64
+#define FRF_AB_EE_SPI_HDATA2_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA1_LBN 32
+#define FRF_AB_EE_SPI_HDATA1_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA0_LBN 0
+#define FRF_AB_EE_SPI_HDATA0_WIDTH 32
+
+/* EE_BASE_PAGE_REG: Expansion ROM base mirror register */
+#define FR_AB_EE_BASE_PAGE 0x00000130
+#define FRF_AB_EE_EXPROM_MASK_LBN 16
+#define FRF_AB_EE_EXPROM_MASK_WIDTH 13
+#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0
+#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13
+
+/* EE_VPD_CFG0_REG: SPI/VPD configuration register 0 */
+#define FR_AB_EE_VPD_CFG0 0x00000140
+#define FRF_AB_EE_SF_FASTRD_EN_LBN 127
+#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1
+#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120
+#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7
+#define FRF_AB_EE_VPD_WIP_POLL_LBN 119
+#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1
+#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112
+#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7
+#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96
+#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16
+#define FRF_AB_EE_VPDW_LENGTH_LBN 80
+#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15
+#define FRF_AB_EE_VPDW_BASE_LBN 64
+#define FRF_AB_EE_VPDW_BASE_WIDTH 15
+#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56
+#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8
+#define FRF_AB_EE_VPD_BASE_LBN 32
+#define FRF_AB_EE_VPD_BASE_WIDTH 24
+#define FRF_AB_EE_VPD_LENGTH_LBN 16
+#define FRF_AB_EE_VPD_LENGTH_WIDTH 15
+#define FRF_AB_EE_VPD_AD_SIZE_LBN 8
+#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5
+#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5
+#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1
+#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4
+#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1
+#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2
+#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1
+#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1
+#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1
+#define FRF_AB_EE_VPD_EN_LBN 0
+#define FRF_AB_EE_VPD_EN_WIDTH 1
+
+/* EE_VPD_SW_CNTL_REG: VPD access SW control register */
+#define FR_AB_EE_VPD_SW_CNTL 0x00000150
+#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31
+#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1
+#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28
+#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1
+#define FRF_AB_EE_VPD_CYC_ADR_LBN 0
+#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15
+
+/* EE_VPD_SW_DATA_REG: VPD access SW data register */
+#define FR_AB_EE_VPD_SW_DATA 0x00000160
+#define FRF_AB_EE_VPD_CYC_DAT_LBN 0
+#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32
+
+/* PBMX_DBG_IADDR_REG: Capture Module address register */
+#define FR_CZ_PBMX_DBG_IADDR 0x000001f0
+#define FRF_CZ_PBMX_DBG_IADDR_LBN 0
+#define FRF_CZ_PBMX_DBG_IADDR_WIDTH 32
+
+/* PCIE_CORE_INDIRECT_REG: Indirect Access to PCIE Core registers */
+#define FR_BB_PCIE_CORE_INDIRECT 0x000001f0
+#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32
+#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32
+#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15
+#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1
+#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0
+#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12
+
+/* PBMX_DBG_IDATA_REG: Capture Module data register */
+#define FR_CZ_PBMX_DBG_IDATA 0x000001f8
+#define FRF_CZ_PBMX_DBG_IDATA_LBN 0
+#define FRF_CZ_PBMX_DBG_IDATA_WIDTH 64
+
+/* NIC_STAT_REG: NIC status register */
+#define FR_AB_NIC_STAT 0x00000200
+#define FRF_BB_AER_DIS_LBN 34
+#define FRF_BB_AER_DIS_WIDTH 1
+#define FRF_BB_EE_STRAP_EN_LBN 31
+#define FRF_BB_EE_STRAP_EN_WIDTH 1
+#define FRF_BB_EE_STRAP_LBN 24
+#define FRF_BB_EE_STRAP_WIDTH 4
+#define FRF_BB_REVISION_ID_LBN 17
+#define FRF_BB_REVISION_ID_WIDTH 7
+#define FRF_AB_ONCHIP_SRAM_LBN 16
+#define FRF_AB_ONCHIP_SRAM_WIDTH 1
+#define FRF_AB_SF_PRST_LBN 9
+#define FRF_AB_SF_PRST_WIDTH 1
+#define FRF_AB_EE_PRST_LBN 8
+#define FRF_AB_EE_PRST_WIDTH 1
+#define FRF_AB_ATE_MODE_LBN 3
+#define FRF_AB_ATE_MODE_WIDTH 1
+#define FRF_AB_STRAP_PINS_LBN 0
+#define FRF_AB_STRAP_PINS_WIDTH 3
+
+/* GPIO_CTL_REG: GPIO control register */
+#define FR_AB_GPIO_CTL 0x00000210
+#define FRF_AB_GPIO_OUT3_LBN 112
+#define FRF_AB_GPIO_OUT3_WIDTH 16
+#define FRF_AB_GPIO_IN3_LBN 104
+#define FRF_AB_GPIO_IN3_WIDTH 8
+#define FRF_AB_GPIO_PWRUP_VALUE3_LBN 96
+#define FRF_AB_GPIO_PWRUP_VALUE3_WIDTH 8
+#define FRF_AB_GPIO_OUT2_LBN 80
+#define FRF_AB_GPIO_OUT2_WIDTH 16
+#define FRF_AB_GPIO_IN2_LBN 72
+#define FRF_AB_GPIO_IN2_WIDTH 8
+#define FRF_AB_GPIO_PWRUP_VALUE2_LBN 64
+#define FRF_AB_GPIO_PWRUP_VALUE2_WIDTH 8
+#define FRF_AB_GPIO15_OEN_LBN 63
+#define FRF_AB_GPIO15_OEN_WIDTH 1
+#define FRF_AB_GPIO14_OEN_LBN 62
+#define FRF_AB_GPIO14_OEN_WIDTH 1
+#define FRF_AB_GPIO13_OEN_LBN 61
+#define FRF_AB_GPIO13_OEN_WIDTH 1
+#define FRF_AB_GPIO12_OEN_LBN 60
+#define FRF_AB_GPIO12_OEN_WIDTH 1
+#define FRF_AB_GPIO11_OEN_LBN 59
+#define FRF_AB_GPIO11_OEN_WIDTH 1
+#define FRF_AB_GPIO10_OEN_LBN 58
+#define FRF_AB_GPIO10_OEN_WIDTH 1
+#define FRF_AB_GPIO9_OEN_LBN 57
+#define FRF_AB_GPIO9_OEN_WIDTH 1
+#define FRF_AB_GPIO8_OEN_LBN 56
+#define FRF_AB_GPIO8_OEN_WIDTH 1
+#define FRF_AB_GPIO15_OUT_LBN 55
+#define FRF_AB_GPIO15_OUT_WIDTH 1
+#define FRF_AB_GPIO14_OUT_LBN 54
+#define FRF_AB_GPIO14_OUT_WIDTH 1
+#define FRF_AB_GPIO13_OUT_LBN 53
+#define FRF_AB_GPIO13_OUT_WIDTH 1
+#define FRF_AB_GPIO12_OUT_LBN 52
+#define FRF_AB_GPIO12_OUT_WIDTH 1
+#define FRF_AB_GPIO11_OUT_LBN 51
+#define FRF_AB_GPIO11_OUT_WIDTH 1
+#define FRF_AB_GPIO10_OUT_LBN 50
+#define FRF_AB_GPIO10_OUT_WIDTH 1
+#define FRF_AB_GPIO9_OUT_LBN 49
+#define FRF_AB_GPIO9_OUT_WIDTH 1
+#define FRF_AB_GPIO8_OUT_LBN 48
+#define FRF_AB_GPIO8_OUT_WIDTH 1
+#define FRF_AB_GPIO15_IN_LBN 47
+#define FRF_AB_GPIO15_IN_WIDTH 1
+#define FRF_AB_GPIO14_IN_LBN 46
+#define FRF_AB_GPIO14_IN_WIDTH 1
+#define FRF_AB_GPIO13_IN_LBN 45
+#define FRF_AB_GPIO13_IN_WIDTH 1
+#define FRF_AB_GPIO12_IN_LBN 44
+#define FRF_AB_GPIO12_IN_WIDTH 1
+#define FRF_AB_GPIO11_IN_LBN 43
+#define FRF_AB_GPIO11_IN_WIDTH 1
+#define FRF_AB_GPIO10_IN_LBN 42
+#define FRF_AB_GPIO10_IN_WIDTH 1
+#define FRF_AB_GPIO9_IN_LBN 41
+#define FRF_AB_GPIO9_IN_WIDTH 1
+#define FRF_AB_GPIO8_IN_LBN 40
+#define FRF_AB_GPIO8_IN_WIDTH 1
+#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39
+#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38
+#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37
+#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36
+#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35
+#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34
+#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33
+#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32
+#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_CLK156_OUT_EN_LBN 31
+#define FRF_AB_CLK156_OUT_EN_WIDTH 1
+#define FRF_AB_USE_NIC_CLK_LBN 30
+#define FRF_AB_USE_NIC_CLK_WIDTH 1
+#define FRF_AB_GPIO5_OEN_LBN 29
+#define FRF_AB_GPIO5_OEN_WIDTH 1
+#define FRF_AB_GPIO4_OEN_LBN 28
+#define FRF_AB_GPIO4_OEN_WIDTH 1
+#define FRF_AB_GPIO3_OEN_LBN 27
+#define FRF_AB_GPIO3_OEN_WIDTH 1
+#define FRF_AB_GPIO2_OEN_LBN 26
+#define FRF_AB_GPIO2_OEN_WIDTH 1
+#define FRF_AB_GPIO1_OEN_LBN 25
+#define FRF_AB_GPIO1_OEN_WIDTH 1
+#define FRF_AB_GPIO0_OEN_LBN 24
+#define FRF_AB_GPIO0_OEN_WIDTH 1
+#define FRF_AB_GPIO7_OUT_LBN 23
+#define FRF_AB_GPIO7_OUT_WIDTH 1
+#define FRF_AB_GPIO6_OUT_LBN 22
+#define FRF_AB_GPIO6_OUT_WIDTH 1
+#define FRF_AB_GPIO5_OUT_LBN 21
+#define FRF_AB_GPIO5_OUT_WIDTH 1
+#define FRF_AB_GPIO4_OUT_LBN 20
+#define FRF_AB_GPIO4_OUT_WIDTH 1
+#define FRF_AB_GPIO3_OUT_LBN 19
+#define FRF_AB_GPIO3_OUT_WIDTH 1
+#define FRF_AB_GPIO2_OUT_LBN 18
+#define FRF_AB_GPIO2_OUT_WIDTH 1
+#define FRF_AB_GPIO1_OUT_LBN 17
+#define FRF_AB_GPIO1_OUT_WIDTH 1
+#define FRF_AB_GPIO0_OUT_LBN 16
+#define FRF_AB_GPIO0_OUT_WIDTH 1
+#define FRF_AB_GPIO7_IN_LBN 15
+#define FRF_AB_GPIO7_IN_WIDTH 1
+#define FRF_AB_GPIO6_IN_LBN 14
+#define FRF_AB_GPIO6_IN_WIDTH 1
+#define FRF_AB_GPIO5_IN_LBN 13
+#define FRF_AB_GPIO5_IN_WIDTH 1
+#define FRF_AB_GPIO4_IN_LBN 12
+#define FRF_AB_GPIO4_IN_WIDTH 1
+#define FRF_AB_GPIO3_IN_LBN 11
+#define FRF_AB_GPIO3_IN_WIDTH 1
+#define FRF_AB_GPIO2_IN_LBN 10
+#define FRF_AB_GPIO2_IN_WIDTH 1
+#define FRF_AB_GPIO1_IN_LBN 9
+#define FRF_AB_GPIO1_IN_WIDTH 1
+#define FRF_AB_GPIO0_IN_LBN 8
+#define FRF_AB_GPIO0_IN_WIDTH 1
+#define FRF_AB_GPIO7_PWRUP_VALUE_LBN 7
+#define FRF_AB_GPIO7_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO6_PWRUP_VALUE_LBN 6
+#define FRF_AB_GPIO6_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5
+#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4
+#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3
+#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2
+#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1
+#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0
+#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1
+
+/* GLB_CTL_REG: Global control register */
+#define FR_AB_GLB_CTL 0x00000220
+#define FRF_AB_EXT_PHY_RST_CTL_LBN 63
+#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1
+#define FRF_AB_XAUI_SD_RST_CTL_LBN 62
+#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_SD_RST_CTL_LBN 61
+#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1
+#define FRF_AA_PCIX_RST_CTL_LBN 60
+#define FRF_AA_PCIX_RST_CTL_WIDTH 1
+#define FRF_BB_BIU_RST_CTL_LBN 60
+#define FRF_BB_BIU_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59
+#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58
+#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57
+#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1
+#define FRF_AB_XGRX_RST_CTL_LBN 56
+#define FRF_AB_XGRX_RST_CTL_WIDTH 1
+#define FRF_AB_XGTX_RST_CTL_LBN 55
+#define FRF_AB_XGTX_RST_CTL_WIDTH 1
+#define FRF_AB_EM_RST_CTL_LBN 54
+#define FRF_AB_EM_RST_CTL_WIDTH 1
+#define FRF_AB_EV_RST_CTL_LBN 53
+#define FRF_AB_EV_RST_CTL_WIDTH 1
+#define FRF_AB_SR_RST_CTL_LBN 52
+#define FRF_AB_SR_RST_CTL_WIDTH 1
+#define FRF_AB_RX_RST_CTL_LBN 51
+#define FRF_AB_RX_RST_CTL_WIDTH 1
+#define FRF_AB_TX_RST_CTL_LBN 50
+#define FRF_AB_TX_RST_CTL_WIDTH 1
+#define FRF_AB_EE_RST_CTL_LBN 49
+#define FRF_AB_EE_RST_CTL_WIDTH 1
+#define FRF_AB_CS_RST_CTL_LBN 48
+#define FRF_AB_CS_RST_CTL_WIDTH 1
+#define FRF_AB_HOT_RST_CTL_LBN 40
+#define FRF_AB_HOT_RST_CTL_WIDTH 2
+#define FRF_AB_RST_EXT_PHY_LBN 31
+#define FRF_AB_RST_EXT_PHY_WIDTH 1
+#define FRF_AB_RST_XAUI_SD_LBN 30
+#define FRF_AB_RST_XAUI_SD_WIDTH 1
+#define FRF_AB_RST_PCIE_SD_LBN 29
+#define FRF_AB_RST_PCIE_SD_WIDTH 1
+#define FRF_AA_RST_PCIX_LBN 28
+#define FRF_AA_RST_PCIX_WIDTH 1
+#define FRF_BB_RST_BIU_LBN 28
+#define FRF_BB_RST_BIU_WIDTH 1
+#define FRF_AB_RST_PCIE_STKY_LBN 27
+#define FRF_AB_RST_PCIE_STKY_WIDTH 1
+#define FRF_AB_RST_PCIE_NSTKY_LBN 26
+#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1
+#define FRF_AB_RST_PCIE_CORE_LBN 25
+#define FRF_AB_RST_PCIE_CORE_WIDTH 1
+#define FRF_AB_RST_XGRX_LBN 24
+#define FRF_AB_RST_XGRX_WIDTH 1
+#define FRF_AB_RST_XGTX_LBN 23
+#define FRF_AB_RST_XGTX_WIDTH 1
+#define FRF_AB_RST_EM_LBN 22
+#define FRF_AB_RST_EM_WIDTH 1
+#define FRF_AB_RST_EV_LBN 21
+#define FRF_AB_RST_EV_WIDTH 1
+#define FRF_AB_RST_SR_LBN 20
+#define FRF_AB_RST_SR_WIDTH 1
+#define FRF_AB_RST_RX_LBN 19
+#define FRF_AB_RST_RX_WIDTH 1
+#define FRF_AB_RST_TX_LBN 18
+#define FRF_AB_RST_TX_WIDTH 1
+#define FRF_AB_RST_SF_LBN 17
+#define FRF_AB_RST_SF_WIDTH 1
+#define FRF_AB_RST_CS_LBN 16
+#define FRF_AB_RST_CS_WIDTH 1
+#define FRF_AB_INT_RST_DUR_LBN 4
+#define FRF_AB_INT_RST_DUR_WIDTH 3
+#define FRF_AB_EXT_PHY_RST_DUR_LBN 1
+#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3
+#define FFE_AB_EXT_PHY_RST_DUR_10240US 7
+#define FFE_AB_EXT_PHY_RST_DUR_5120US 6
+#define FFE_AB_EXT_PHY_RST_DUR_2560US 5
+#define FFE_AB_EXT_PHY_RST_DUR_1280US 4
+#define FFE_AB_EXT_PHY_RST_DUR_640US 3
+#define FFE_AB_EXT_PHY_RST_DUR_320US 2
+#define FFE_AB_EXT_PHY_RST_DUR_160US 1
+#define FFE_AB_EXT_PHY_RST_DUR_80US 0
+#define FRF_AB_SWRST_LBN 0
+#define FRF_AB_SWRST_WIDTH 1
+
+/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
+#define FR_AZ_FATAL_INTR_KER 0x00000230
+#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44
+#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43
+#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43
+#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42
+#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1
+#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41
+#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40
+#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39
+#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38
+#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37
+#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36
+#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35
+#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34
+#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33
+#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32
+#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1
+#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12
+#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11
+#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_KER_LBN 11
+#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10
+#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1
+#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9
+#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_KER_LBN 8
+#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7
+#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6
+#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5
+#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4
+#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3
+#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2
+#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_KER_LBN 1
+#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_KER_LBN 0
+#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1
+
+/* FATAL_INTR_REG_CHAR: Fatal interrupt register for Char */
+#define FR_BZ_FATAL_INTR_CHAR 0x00000240
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1
+#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_LBN 43
+#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43
+#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_LBN 42
+#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_LBN 41
+#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_MEM_PERR_INT_CHAR_EN_LBN 40
+#define FRF_BZ_MEM_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_LBN 39
+#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_LBN 38
+#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37
+#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36
+#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_LBN 35
+#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_LBN 34
+#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_ILL_ADR_INT_CHAR_EN_LBN 33
+#define FRF_BZ_ILL_ADR_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_SRM_PERR_INT_CHAR_EN_LBN 32
+#define FRF_BZ_SRM_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1
+#define FRF_BB_PCI_BUSERR_INT_CHAR_LBN 11
+#define FRF_BB_PCI_BUSERR_INT_CHAR_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11
+#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1
+#define FRF_BZ_SRAM_OOB_INT_CHAR_LBN 10
+#define FRF_BZ_SRAM_OOB_INT_CHAR_WIDTH 1
+#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_LBN 9
+#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1
+#define FRF_BZ_MEM_PERR_INT_CHAR_LBN 8
+#define FRF_BZ_MEM_PERR_INT_CHAR_WIDTH 1
+#define FRF_BZ_RBUF_OWN_INT_CHAR_LBN 7
+#define FRF_BZ_RBUF_OWN_INT_CHAR_WIDTH 1
+#define FRF_BZ_TBUF_OWN_INT_CHAR_LBN 6
+#define FRF_BZ_TBUF_OWN_INT_CHAR_WIDTH 1
+#define FRF_BZ_RDESCQ_OWN_INT_CHAR_LBN 5
+#define FRF_BZ_RDESCQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_BZ_TDESCQ_OWN_INT_CHAR_LBN 4
+#define FRF_BZ_TDESCQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_BZ_EVQ_OWN_INT_CHAR_LBN 3
+#define FRF_BZ_EVQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_BZ_EVF_OFLO_INT_CHAR_LBN 2
+#define FRF_BZ_EVF_OFLO_INT_CHAR_WIDTH 1
+#define FRF_BZ_ILL_ADR_INT_CHAR_LBN 1
+#define FRF_BZ_ILL_ADR_INT_CHAR_WIDTH 1
+#define FRF_BZ_SRM_PERR_INT_CHAR_LBN 0
+#define FRF_BZ_SRM_PERR_INT_CHAR_WIDTH 1
+
+/* DP_CTRL_REG: Datapath control register */
+#define FR_BZ_DP_CTRL 0x00000250
+#define FRF_BZ_FLS_EVQ_ID_LBN 0
+#define FRF_BZ_FLS_EVQ_ID_WIDTH 12
+
+/* MEM_STAT_REG: Memory status register */
+#define FR_AZ_MEM_STAT 0x00000260
+#define FRF_AB_MEM_PERR_VEC_LBN 53
+#define FRF_AB_MEM_PERR_VEC_WIDTH 38
+#define FRF_AB_MBIST_CORR_LBN 38
+#define FRF_AB_MBIST_CORR_WIDTH 15
+#define FRF_AB_MBIST_ERR_LBN 0
+#define FRF_AB_MBIST_ERR_WIDTH 40
+#define FRF_CZ_MEM_PERR_VEC_LBN 0
+#define FRF_CZ_MEM_PERR_VEC_WIDTH 35
+
+/* CS_DEBUG_REG: Debug register */
+#define FR_AZ_CS_DEBUG 0x00000270
+#define FRF_AB_GLB_DEBUG2_SEL_LBN 50
+#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL2_LBN 47
+#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL1_LBN 44
+#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL0_LBN 41
+#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3
+#define FRF_CZ_CS_PORT_NUM_LBN 40
+#define FRF_CZ_CS_PORT_NUM_WIDTH 2
+#define FRF_AB_MISC_DEBUG_ADDR_LBN 36
+#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31
+#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5
+#define FRF_CZ_CS_PORT_FPE_LBN 1
+#define FRF_CZ_CS_PORT_FPE_WIDTH 35
+#define FRF_AB_EM_DEBUG_ADDR_LBN 26
+#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_SR_DEBUG_ADDR_LBN 21
+#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_EV_DEBUG_ADDR_LBN 16
+#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_RX_DEBUG_ADDR_LBN 11
+#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_TX_DEBUG_ADDR_LBN 6
+#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1
+#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5
+#define FRF_AZ_CS_DEBUG_EN_LBN 0
+#define FRF_AZ_CS_DEBUG_EN_WIDTH 1
+
+/* DRIVER_REG: Driver scratch register [0-7] */
+#define FR_AZ_DRIVER 0x00000280
+#define FR_AZ_DRIVER_STEP 16
+#define FR_AZ_DRIVER_ROWS 8
+#define FRF_AZ_DRIVER_DW0_LBN 0
+#define FRF_AZ_DRIVER_DW0_WIDTH 32
+
+/* ALTERA_BUILD_REG: Altera build register */
+#define FR_AZ_ALTERA_BUILD 0x00000300
+#define FRF_AZ_ALTERA_BUILD_VER_LBN 0
+#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32
+
+/* CSR_SPARE_REG: Spare register */
+#define FR_AZ_CSR_SPARE 0x00000310
+#define FRF_AB_MEM_PERR_EN_LBN 64
+#define FRF_AB_MEM_PERR_EN_WIDTH 38
+#define FRF_CZ_MEM_PERR_EN_LBN 64
+#define FRF_CZ_MEM_PERR_EN_WIDTH 35
+#define FRF_AB_MEM_PERR_EN_TX_DATA_LBN 72
+#define FRF_AB_MEM_PERR_EN_TX_DATA_WIDTH 2
+#define FRF_AZ_CSR_SPARE_BITS_LBN 0
+#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32
+
+/* PCIE_SD_CTL0123_REG: PCIE SerDes control register 0 to 3 */
+#define FR_AB_PCIE_SD_CTL0123 0x00000320
+#define FRF_AB_PCIE_TESTSIG_H_LBN 96
+#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19
+#define FRF_AB_PCIE_TESTSIG_L_LBN 64
+#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19
+#define FRF_AB_PCIE_OFFSET_LBN 56
+#define FRF_AB_PCIE_OFFSET_WIDTH 8
+#define FRF_AB_PCIE_OFFSETEN_H_LBN 55
+#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1
+#define FRF_AB_PCIE_OFFSETEN_L_LBN 54
+#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1
+#define FRF_AB_PCIE_HIVMODE_H_LBN 53
+#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1
+#define FRF_AB_PCIE_HIVMODE_L_LBN 52
+#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1
+#define FRF_AB_PCIE_PARRESET_H_LBN 51
+#define FRF_AB_PCIE_PARRESET_H_WIDTH 1
+#define FRF_AB_PCIE_PARRESET_L_LBN 50
+#define FRF_AB_PCIE_PARRESET_L_WIDTH 1
+#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49
+#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1
+#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48
+#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1
+#define FRF_AB_PCIE_LPBK_LBN 40
+#define FRF_AB_PCIE_LPBK_WIDTH 8
+#define FRF_AB_PCIE_PARLPBK_LBN 32
+#define FRF_AB_PCIE_PARLPBK_WIDTH 8
+#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30
+#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2
+#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28
+#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2
+#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3
+#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2
+#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1
+#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0
+#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26
+#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2
+#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24
+#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2
+#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3
+#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2
+#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1
+#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0
+#define FRF_AB_PCIE_RXEQCTL_H_LBN 18
+#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2
+#define FRF_AB_PCIE_RXEQCTL_L_LBN 16
+#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2
+#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3
+#define FFE_AB_PCIE_RXEQCTL_OFF 2
+#define FFE_AB_PCIE_RXEQCTL_MIN 1
+#define FFE_AB_PCIE_RXEQCTL_MAX 0
+#define FRF_AB_PCIE_HIDRV_LBN 8
+#define FRF_AB_PCIE_HIDRV_WIDTH 8
+#define FRF_AB_PCIE_LODRV_LBN 0
+#define FRF_AB_PCIE_LODRV_WIDTH 8
+
+/* PCIE_SD_CTL45_REG: PCIE SerDes control register 4 and 5 */
+#define FR_AB_PCIE_SD_CTL45 0x00000330
+#define FRF_AB_PCIE_DTX7_LBN 60
+#define FRF_AB_PCIE_DTX7_WIDTH 4
+#define FRF_AB_PCIE_DTX6_LBN 56
+#define FRF_AB_PCIE_DTX6_WIDTH 4
+#define FRF_AB_PCIE_DTX5_LBN 52
+#define FRF_AB_PCIE_DTX5_WIDTH 4
+#define FRF_AB_PCIE_DTX4_LBN 48
+#define FRF_AB_PCIE_DTX4_WIDTH 4
+#define FRF_AB_PCIE_DTX3_LBN 44
+#define FRF_AB_PCIE_DTX3_WIDTH 4
+#define FRF_AB_PCIE_DTX2_LBN 40
+#define FRF_AB_PCIE_DTX2_WIDTH 4
+#define FRF_AB_PCIE_DTX1_LBN 36
+#define FRF_AB_PCIE_DTX1_WIDTH 4
+#define FRF_AB_PCIE_DTX0_LBN 32
+#define FRF_AB_PCIE_DTX0_WIDTH 4
+#define FRF_AB_PCIE_DEQ7_LBN 28
+#define FRF_AB_PCIE_DEQ7_WIDTH 4
+#define FRF_AB_PCIE_DEQ6_LBN 24
+#define FRF_AB_PCIE_DEQ6_WIDTH 4
+#define FRF_AB_PCIE_DEQ5_LBN 20
+#define FRF_AB_PCIE_DEQ5_WIDTH 4
+#define FRF_AB_PCIE_DEQ4_LBN 16
+#define FRF_AB_PCIE_DEQ4_WIDTH 4
+#define FRF_AB_PCIE_DEQ3_LBN 12
+#define FRF_AB_PCIE_DEQ3_WIDTH 4
+#define FRF_AB_PCIE_DEQ2_LBN 8
+#define FRF_AB_PCIE_DEQ2_WIDTH 4
+#define FRF_AB_PCIE_DEQ1_LBN 4
+#define FRF_AB_PCIE_DEQ1_WIDTH 4
+#define FRF_AB_PCIE_DEQ0_LBN 0
+#define FRF_AB_PCIE_DEQ0_WIDTH 4
+
+/* PCIE_PCS_CTL_STAT_REG: PCIE PCS control and status register */
+#define FR_AB_PCIE_PCS_CTL_STAT 0x00000340
+#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52
+#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4
+#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48
+#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4
+#define FRF_AB_PCIE_PRBSERR_LBN 40
+#define FRF_AB_PCIE_PRBSERR_WIDTH 8
+#define FRF_AB_PCIE_PRBSERRH0_LBN 32
+#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8
+#define FRF_AB_PCIE_FASTINIT_H_LBN 15
+#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1
+#define FRF_AB_PCIE_FASTINIT_L_LBN 14
+#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1
+#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13
+#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1
+#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12
+#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11
+#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1
+#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10
+#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9
+#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1
+#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8
+#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSSEL_LBN 0
+#define FRF_AB_PCIE_PRBSSEL_WIDTH 8
+
+/* DEBUG_DATA_OUT_REG: Live Debug and Debug 2 out ports */
+#define FR_BB_DEBUG_DATA_OUT 0x00000350
+#define FRF_BB_DEBUG2_PORT_LBN 25
+#define FRF_BB_DEBUG2_PORT_WIDTH 15
+#define FRF_BB_DEBUG1_PORT_LBN 0
+#define FRF_BB_DEBUG1_PORT_WIDTH 25
+
+/* EVQ_RPTR_REGP0: Event queue read pointer register */
+#define FR_BZ_EVQ_RPTR_P0 0x00000400
+#define FR_BZ_EVQ_RPTR_P0_STEP 8192
+#define FR_BZ_EVQ_RPTR_P0_ROWS 1024
+/* EVQ_RPTR_REG_KER: Event queue read pointer register */
+#define FR_AA_EVQ_RPTR_KER 0x00011b00
+#define FR_AA_EVQ_RPTR_KER_STEP 4
+#define FR_AA_EVQ_RPTR_KER_ROWS 4
+/* EVQ_RPTR_REG: Event queue read pointer register */
+#define FR_BZ_EVQ_RPTR 0x00fa0000
+#define FR_BZ_EVQ_RPTR_STEP 16
+#define FR_BB_EVQ_RPTR_ROWS 4096
+#define FR_CZ_EVQ_RPTR_ROWS 1024
+/* EVQ_RPTR_REGP123: Event queue read pointer register */
+#define FR_BB_EVQ_RPTR_P123 0x01000400
+#define FR_BB_EVQ_RPTR_P123_STEP 8192
+#define FR_BB_EVQ_RPTR_P123_ROWS 3072
+#define FRF_AZ_EVQ_RPTR_VLD_LBN 15
+#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1
+#define FRF_AZ_EVQ_RPTR_LBN 0
+#define FRF_AZ_EVQ_RPTR_WIDTH 15
+
+/* TIMER_COMMAND_REGP0: Timer Command Registers */
+#define FR_BZ_TIMER_COMMAND_P0 0x00000420
+#define FR_BZ_TIMER_COMMAND_P0_STEP 8192
+#define FR_BZ_TIMER_COMMAND_P0_ROWS 1024
+/* TIMER_COMMAND_REG_KER: Timer Command Registers */
+#define FR_AA_TIMER_COMMAND_KER 0x00000420
+#define FR_AA_TIMER_COMMAND_KER_STEP 8192
+#define FR_AA_TIMER_COMMAND_KER_ROWS 4
+/* TIMER_COMMAND_REGP123: Timer Command Registers */
+#define FR_BB_TIMER_COMMAND_P123 0x01000420
+#define FR_BB_TIMER_COMMAND_P123_STEP 8192
+#define FR_BB_TIMER_COMMAND_P123_ROWS 3072
+#define FRF_CZ_TC_TIMER_MODE_LBN 14
+#define FRF_CZ_TC_TIMER_MODE_WIDTH 2
+#define FRF_AB_TC_TIMER_MODE_LBN 12
+#define FRF_AB_TC_TIMER_MODE_WIDTH 2
+#define FRF_CZ_TC_TIMER_VAL_LBN 0
+#define FRF_CZ_TC_TIMER_VAL_WIDTH 14
+#define FRF_AB_TC_TIMER_VAL_LBN 0
+#define FRF_AB_TC_TIMER_VAL_WIDTH 12
+
+/* DRV_EV_REG: Driver generated event register */
+#define FR_AZ_DRV_EV 0x00000440
+#define FRF_AZ_DRV_EV_QID_LBN 64
+#define FRF_AZ_DRV_EV_QID_WIDTH 12
+#define FRF_AZ_DRV_EV_DATA_LBN 0
+#define FRF_AZ_DRV_EV_DATA_WIDTH 64
+
+/* EVQ_CTL_REG: Event queue control register */
+#define FR_AZ_EVQ_CTL 0x00000450
+#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15
+#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10
+#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15
+#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6
+#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14
+#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1
+#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7
+#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7
+#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0
+#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7
+
+/* EVQ_CNT1_REG: Event counter 1 register */
+#define FR_AZ_EVQ_CNT1 0x00000460
+#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120
+#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7
+#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100
+#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20
+#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80
+#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60
+#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40
+#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20
+#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0
+#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20
+
+/* EVQ_CNT2_REG: Event counter 2 register */
+#define FR_AZ_EVQ_CNT2 0x00000470
+#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104
+#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84
+#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_RDY_CNT_LBN 80
+#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4
+#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60
+#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40
+#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20
+#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0
+#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20
+
+/* USR_EV_REG: Event mailbox register */
+#define FR_CZ_USR_EV 0x00000540
+#define FR_CZ_USR_EV_STEP 8192
+#define FR_CZ_USR_EV_ROWS 1024
+#define FRF_CZ_USR_EV_DATA_LBN 0
+#define FRF_CZ_USR_EV_DATA_WIDTH 32
+
+/* BUF_TBL_CFG_REG: Buffer table configuration register */
+#define FR_AZ_BUF_TBL_CFG 0x00000600
+#define FRF_AZ_BUF_TBL_MODE_LBN 3
+#define FRF_AZ_BUF_TBL_MODE_WIDTH 1
+
+/* SRM_RX_DC_CFG_REG: SRAM receive descriptor cache configuration register */
+#define FR_AZ_SRM_RX_DC_CFG 0x00000610
+#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21
+#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1
+#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0
+#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21
+
+/* SRM_TX_DC_CFG_REG: SRAM transmit descriptor cache configuration register */
+#define FR_AZ_SRM_TX_DC_CFG 0x00000620
+#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0
+#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21
+
+/* SRM_CFG_REG: SRAM configuration register */
+#define FR_AZ_SRM_CFG 0x00000630
+#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5
+#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1
+#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4
+#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1
+#define FRF_AZ_SRM_INIT_EN_LBN 3
+#define FRF_AZ_SRM_INIT_EN_WIDTH 1
+#define FRF_AZ_SRM_NUM_BANK_LBN 2
+#define FRF_AZ_SRM_NUM_BANK_WIDTH 1
+#define FRF_AZ_SRM_BANK_SIZE_LBN 0
+#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2
+
+/* BUF_TBL_UPD_REG: Buffer table update register */
+#define FR_AZ_BUF_TBL_UPD 0x00000650
+#define FRF_AZ_BUF_UPD_CMD_LBN 63
+#define FRF_AZ_BUF_UPD_CMD_WIDTH 1
+#define FRF_AZ_BUF_CLR_CMD_LBN 62
+#define FRF_AZ_BUF_CLR_CMD_WIDTH 1
+#define FRF_AZ_BUF_CLR_END_ID_LBN 32
+#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20
+#define FRF_AZ_BUF_CLR_START_ID_LBN 0
+#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20
+
+/* SRM_UPD_EVQ_REG: Buffer table update register */
+#define FR_AZ_SRM_UPD_EVQ 0x00000660
+#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0
+#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12
+
+/* SRAM_PARITY_REG: SRAM parity register. */
+#define FR_AZ_SRAM_PARITY 0x00000670
+#define FRF_CZ_BYPASS_ECC_LBN 3
+#define FRF_CZ_BYPASS_ECC_WIDTH 1
+#define FRF_CZ_SEC_INT_LBN 2
+#define FRF_CZ_SEC_INT_WIDTH 1
+#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1
+#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1
+#define FRF_AB_FORCE_SRAM_PERR_LBN 0
+#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1
+#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0
+#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1
+
+/* RX_CFG_REG: Receive configuration register */
+#define FR_AZ_RX_CFG 0x00000800
+#define FRF_CZ_RX_MIN_KBUF_SIZE_LBN 72
+#define FRF_CZ_RX_MIN_KBUF_SIZE_WIDTH 14
+#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71
+#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1
+#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62
+#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9
+#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53
+#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9
+#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49
+#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4
+#define FRF_BZ_RX_TCP_SUP_LBN 48
+#define FRF_BZ_RX_TCP_SUP_WIDTH 1
+#define FRF_BZ_RX_INGR_EN_LBN 47
+#define FRF_BZ_RX_INGR_EN_WIDTH 1
+#define FRF_BZ_RX_IP_HASH_LBN 46
+#define FRF_BZ_RX_IP_HASH_WIDTH 1
+#define FRF_BZ_RX_HASH_ALG_LBN 45
+#define FRF_BZ_RX_HASH_ALG_WIDTH 1
+#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44
+#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1
+#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43
+#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1
+#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42
+#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1
+#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39
+#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3
+#define FRF_BZ_RX_OWNERR_CTL_LBN 38
+#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1
+#define FRF_BZ_RX_XON_TX_TH_LBN 33
+#define FRF_BZ_RX_XON_TX_TH_WIDTH 5
+#define FRF_AA_RX_DESC_PUSH_EN_LBN 35
+#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1
+#define FRF_AA_RX_RDW_PATCH_EN_LBN 34
+#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1
+#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31
+#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3
+#define FRF_BZ_RX_XOFF_TX_TH_LBN 28
+#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5
+#define FRF_AA_RX_OWNERR_CTL_LBN 30
+#define FRF_AA_RX_OWNERR_CTL_WIDTH 1
+#define FRF_AA_RX_XON_TX_TH_LBN 25
+#define FRF_AA_RX_XON_TX_TH_WIDTH 5
+#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19
+#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9
+#define FRF_AA_RX_XOFF_TX_TH_LBN 20
+#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5
+#define FRF_AA_RX_USR_BUF_SIZE_LBN 11
+#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9
+#define FRF_BZ_RX_XON_MAC_TH_LBN 10
+#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9
+#define FRF_AA_RX_XON_MAC_TH_LBN 6
+#define FRF_AA_RX_XON_MAC_TH_WIDTH 5
+#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1
+#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9
+#define FRF_AA_RX_XOFF_MAC_TH_LBN 1
+#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5
+#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0
+#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1
+
+/* RX_FILTER_CTL_REG: Receive filter control registers */
+#define FR_BZ_RX_FILTER_CTL 0x00000810
+#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94
+#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8
+#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86
+#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8
+#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85
+#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1
+#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69
+#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16
+#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57
+#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12
+#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56
+#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55
+#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43
+#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12
+#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42
+#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41
+#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40
+#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1
+#define FRF_BZ_UDP_FULL_SRCH_LIMIT_LBN 32
+#define FRF_BZ_UDP_FULL_SRCH_LIMIT_WIDTH 8
+#define FRF_BZ_NUM_KER_LBN 24
+#define FRF_BZ_NUM_KER_WIDTH 2
+#define FRF_BZ_UDP_WILD_SRCH_LIMIT_LBN 16
+#define FRF_BZ_UDP_WILD_SRCH_LIMIT_WIDTH 8
+#define FRF_BZ_TCP_WILD_SRCH_LIMIT_LBN 8
+#define FRF_BZ_TCP_WILD_SRCH_LIMIT_WIDTH 8
+#define FRF_BZ_TCP_FULL_SRCH_LIMIT_LBN 0
+#define FRF_BZ_TCP_FULL_SRCH_LIMIT_WIDTH 8
+
+/* RX_FLUSH_DESCQ_REG: Receive flush descriptor queue register */
+#define FR_AZ_RX_FLUSH_DESCQ 0x00000820
+#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24
+#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1
+#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0
+#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12
+
+/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
+#define FR_BZ_RX_DESC_UPD_P0 0x00000830
+#define FR_BZ_RX_DESC_UPD_P0_STEP 8192
+#define FR_BZ_RX_DESC_UPD_P0_ROWS 1024
+/* RX_DESC_UPD_REG_KER: Receive descriptor update register. */
+#define FR_AA_RX_DESC_UPD_KER 0x00000830
+#define FR_AA_RX_DESC_UPD_KER_STEP 8192
+#define FR_AA_RX_DESC_UPD_KER_ROWS 4
+/* RX_DESC_UPD_REGP123: Receive descriptor update register. */
+#define FR_BB_RX_DESC_UPD_P123 0x01000830
+#define FR_BB_RX_DESC_UPD_P123_STEP 8192
+#define FR_BB_RX_DESC_UPD_P123_ROWS 3072
+#define FRF_AZ_RX_DESC_WPTR_LBN 96
+#define FRF_AZ_RX_DESC_WPTR_WIDTH 12
+#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95
+#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1
+#define FRF_AZ_RX_DESC_LBN 0
+#define FRF_AZ_RX_DESC_WIDTH 64
+
+/* RX_DC_CFG_REG: Receive descriptor cache configuration register */
+#define FR_AZ_RX_DC_CFG 0x00000840
+#define FRF_AB_RX_MAX_PF_LBN 2
+#define FRF_AB_RX_MAX_PF_WIDTH 2
+#define FRF_AZ_RX_DC_SIZE_LBN 0
+#define FRF_AZ_RX_DC_SIZE_WIDTH 2
+#define FFE_AZ_RX_DC_SIZE_64 3
+#define FFE_AZ_RX_DC_SIZE_32 2
+#define FFE_AZ_RX_DC_SIZE_16 1
+#define FFE_AZ_RX_DC_SIZE_8 0
+
+/* RX_DC_PF_WM_REG: Receive descriptor cache pre-fetch watermark register */
+#define FR_AZ_RX_DC_PF_WM 0x00000850
+#define FRF_AZ_RX_DC_PF_HWM_LBN 6
+#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6
+#define FRF_AZ_RX_DC_PF_LWM_LBN 0
+#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6
+
+/* RX_RSS_TKEY_REG: RSS Toeplitz hash key */
+#define FR_BZ_RX_RSS_TKEY 0x00000860
+#define FRF_BZ_RX_RSS_TKEY_HI_LBN 64
+#define FRF_BZ_RX_RSS_TKEY_HI_WIDTH 64
+#define FRF_BZ_RX_RSS_TKEY_LO_LBN 0
+#define FRF_BZ_RX_RSS_TKEY_LO_WIDTH 64
+
+/* RX_NODESC_DROP_REG: Receive dropped packet counter register */
+#define FR_AZ_RX_NODESC_DROP 0x00000880
+#define FRF_CZ_RX_NODESC_DROP_CNT_LBN 0
+#define FRF_CZ_RX_NODESC_DROP_CNT_WIDTH 32
+#define FRF_AB_RX_NODESC_DROP_CNT_LBN 0
+#define FRF_AB_RX_NODESC_DROP_CNT_WIDTH 16
+
+/* RX_SELF_RST_REG: Receive self reset register */
+#define FR_AA_RX_SELF_RST 0x00000890
+#define FRF_AA_RX_ISCSI_DIS_LBN 17
+#define FRF_AA_RX_ISCSI_DIS_WIDTH 1
+#define FRF_AA_RX_SW_RST_REG_LBN 16
+#define FRF_AA_RX_SW_RST_REG_WIDTH 1
+#define FRF_AA_RX_NODESC_WAIT_DIS_LBN 9
+#define FRF_AA_RX_NODESC_WAIT_DIS_WIDTH 1
+#define FRF_AA_RX_SELF_RST_EN_LBN 8
+#define FRF_AA_RX_SELF_RST_EN_WIDTH 1
+#define FRF_AA_RX_MAX_PF_LAT_LBN 4
+#define FRF_AA_RX_MAX_PF_LAT_WIDTH 4
+#define FRF_AA_RX_MAX_LU_LAT_LBN 0
+#define FRF_AA_RX_MAX_LU_LAT_WIDTH 4
+
+/* RX_DEBUG_REG: undocumented register */
+#define FR_AZ_RX_DEBUG 0x000008a0
+#define FRF_AZ_RX_DEBUG_LBN 0
+#define FRF_AZ_RX_DEBUG_WIDTH 64
+
+/* RX_PUSH_DROP_REG: Receive descriptor push dropped counter register */
+#define FR_AZ_RX_PUSH_DROP 0x000008b0
+#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0
+#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32
+
+/* RX_RSS_IPV6_REG1: IPv6 RSS Toeplitz hash key low bytes */
+#define FR_CZ_RX_RSS_IPV6_REG1 0x000008d0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128
+
+/* RX_RSS_IPV6_REG2: IPv6 RSS Toeplitz hash key middle bytes */
+#define FR_CZ_RX_RSS_IPV6_REG2 0x000008e0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128
+
+/* RX_RSS_IPV6_REG3: IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings */
+#define FR_CZ_RX_RSS_IPV6_REG3 0x000008f0
+#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66
+#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65
+#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64
+
+/* TX_FLUSH_DESCQ_REG: Transmit flush descriptor queue register */
+#define FR_AZ_TX_FLUSH_DESCQ 0x00000a00
+#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12
+#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1
+#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0
+#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12
+
+/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
+#define FR_BZ_TX_DESC_UPD_P0 0x00000a10
+#define FR_BZ_TX_DESC_UPD_P0_STEP 8192
+#define FR_BZ_TX_DESC_UPD_P0_ROWS 1024
+/* TX_DESC_UPD_REG_KER: Transmit descriptor update register. */
+#define FR_AA_TX_DESC_UPD_KER 0x00000a10
+#define FR_AA_TX_DESC_UPD_KER_STEP 8192
+#define FR_AA_TX_DESC_UPD_KER_ROWS 8
+/* TX_DESC_UPD_REGP123: Transmit descriptor update register. */
+#define FR_BB_TX_DESC_UPD_P123 0x01000a10
+#define FR_BB_TX_DESC_UPD_P123_STEP 8192
+#define FR_BB_TX_DESC_UPD_P123_ROWS 3072
+#define FRF_AZ_TX_DESC_WPTR_LBN 96
+#define FRF_AZ_TX_DESC_WPTR_WIDTH 12
+#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95
+#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1
+#define FRF_AZ_TX_DESC_LBN 0
+#define FRF_AZ_TX_DESC_WIDTH 95
+
+/* TX_DC_CFG_REG: Transmit descriptor cache configuration register */
+#define FR_AZ_TX_DC_CFG 0x00000a20
+#define FRF_AZ_TX_DC_SIZE_LBN 0
+#define FRF_AZ_TX_DC_SIZE_WIDTH 2
+#define FFE_AZ_TX_DC_SIZE_32 2
+#define FFE_AZ_TX_DC_SIZE_16 1
+#define FFE_AZ_TX_DC_SIZE_8 0
+
+/* TX_CHKSM_CFG_REG: Transmit checksum configuration register */
+#define FR_AA_TX_CHKSM_CFG 0x00000a30
+#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96
+#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64
+#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32
+#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0
+#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32
+
+/* TX_CFG_REG: Transmit configuration register */
+#define FR_AZ_TX_CFG 0x00000a50
+#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114
+#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8
+#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113
+#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1
+#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105
+#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97
+#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89
+#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81
+#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73
+#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65
+#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64
+#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1
+#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48
+#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16
+#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47
+#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1
+#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16
+#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15
+#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5
+#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1
+#define FRF_AZ_TX_P1_PRI_EN_LBN 4
+#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1
+#define FRF_AZ_TX_OWNERR_CTL_LBN 2
+#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1
+#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1
+#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1
+#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0
+#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1
+
+/* TX_PUSH_DROP_REG: Transmit push dropped register */
+#define FR_AZ_TX_PUSH_DROP 0x00000a60
+#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0
+#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32
+
+/* TX_RESERVED_REG: Transmit configuration register */
+#define FR_AZ_TX_RESERVED 0x00000a80
+#define FRF_AZ_TX_EVT_CNT_LBN 121
+#define FRF_AZ_TX_EVT_CNT_WIDTH 7
+#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119
+#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2
+#define FRF_AZ_TX_RD_COMP_TMR_LBN 96
+#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23
+#define FRF_AZ_TX_PUSH_EN_LBN 89
+#define FRF_AZ_TX_PUSH_EN_WIDTH 1
+#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88
+#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1
+#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85
+#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1
+#define FRF_AZ_TX_DMAR_ST_P0_LBN 81
+#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1
+#define FRF_AZ_TX_DMAQ_ST_LBN 78
+#define FRF_AZ_TX_DMAQ_ST_WIDTH 1
+#define FRF_AZ_TX_RX_SPACER_LBN 64
+#define FRF_AZ_TX_RX_SPACER_WIDTH 8
+#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60
+#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1
+#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59
+#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1
+#define FRF_AZ_TX_PS_EVT_DIS_LBN 58
+#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1
+#define FRF_AZ_TX_RX_SPACER_EN_LBN 57
+#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1
+#define FRF_AZ_TX_XP_TIMER_LBN 52
+#define FRF_AZ_TX_XP_TIMER_WIDTH 5
+#define FRF_AZ_TX_PREF_SPACER_LBN 44
+#define FRF_AZ_TX_PREF_SPACER_WIDTH 8
+#define FRF_AZ_TX_PREF_WD_TMR_LBN 22
+#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22
+#define FRF_AZ_TX_ONLY1TAG_LBN 21
+#define FRF_AZ_TX_ONLY1TAG_WIDTH 1
+#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19
+#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2
+#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18
+#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1
+#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17
+#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1
+#define FRF_AA_TX_DMA_FF_THR_LBN 16
+#define FRF_AA_TX_DMA_FF_THR_WIDTH 1
+#define FRF_AZ_TX_DMA_SPACER_LBN 8
+#define FRF_AZ_TX_DMA_SPACER_WIDTH 8
+#define FRF_AA_TX_TCP_DIS_LBN 7
+#define FRF_AA_TX_TCP_DIS_WIDTH 1
+#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7
+#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1
+#define FRF_AA_TX_IP_DIS_LBN 6
+#define FRF_AA_TX_IP_DIS_WIDTH 1
+#define FRF_AZ_TX_MAX_CPL_LBN 2
+#define FRF_AZ_TX_MAX_CPL_WIDTH 2
+#define FFE_AZ_TX_MAX_CPL_16 3
+#define FFE_AZ_TX_MAX_CPL_8 2
+#define FFE_AZ_TX_MAX_CPL_4 1
+#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0
+#define FRF_AZ_TX_MAX_PREF_LBN 0
+#define FRF_AZ_TX_MAX_PREF_WIDTH 2
+#define FFE_AZ_TX_MAX_PREF_32 3
+#define FFE_AZ_TX_MAX_PREF_16 2
+#define FFE_AZ_TX_MAX_PREF_8 1
+#define FFE_AZ_TX_MAX_PREF_OFF 0
+
+/* TX_PACE_REG: Transmit pace control register */
+#define FR_BZ_TX_PACE 0x00000a90
+#define FRF_BZ_TX_PACE_SB_NOT_AF_LBN 19
+#define FRF_BZ_TX_PACE_SB_NOT_AF_WIDTH 10
+#define FRF_BZ_TX_PACE_SB_AF_LBN 9
+#define FRF_BZ_TX_PACE_SB_AF_WIDTH 10
+#define FRF_BZ_TX_PACE_FB_BASE_LBN 5
+#define FRF_BZ_TX_PACE_FB_BASE_WIDTH 4
+#define FRF_BZ_TX_PACE_BIN_TH_LBN 0
+#define FRF_BZ_TX_PACE_BIN_TH_WIDTH 5
+
+/* TX_PACE_DROP_QID_REG: PACE Drop QID Counter */
+#define FR_BZ_TX_PACE_DROP_QID 0x00000aa0
+#define FRF_BZ_TX_PACE_QID_DRP_CNT_LBN 0
+#define FRF_BZ_TX_PACE_QID_DRP_CNT_WIDTH 16
+
+/* TX_VLAN_REG: Transmit VLAN tag register */
+#define FR_BB_TX_VLAN 0x00000ae0
+#define FRF_BB_TX_VLAN_EN_LBN 127
+#define FRF_BB_TX_VLAN_EN_WIDTH 1
+#define FRF_BB_TX_VLAN7_PORT1_EN_LBN 125
+#define FRF_BB_TX_VLAN7_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN7_PORT0_EN_LBN 124
+#define FRF_BB_TX_VLAN7_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN7_LBN 112
+#define FRF_BB_TX_VLAN7_WIDTH 12
+#define FRF_BB_TX_VLAN6_PORT1_EN_LBN 109
+#define FRF_BB_TX_VLAN6_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN6_PORT0_EN_LBN 108
+#define FRF_BB_TX_VLAN6_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN6_LBN 96
+#define FRF_BB_TX_VLAN6_WIDTH 12
+#define FRF_BB_TX_VLAN5_PORT1_EN_LBN 93
+#define FRF_BB_TX_VLAN5_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN5_PORT0_EN_LBN 92
+#define FRF_BB_TX_VLAN5_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN5_LBN 80
+#define FRF_BB_TX_VLAN5_WIDTH 12
+#define FRF_BB_TX_VLAN4_PORT1_EN_LBN 77
+#define FRF_BB_TX_VLAN4_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN4_PORT0_EN_LBN 76
+#define FRF_BB_TX_VLAN4_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN4_LBN 64
+#define FRF_BB_TX_VLAN4_WIDTH 12
+#define FRF_BB_TX_VLAN3_PORT1_EN_LBN 61
+#define FRF_BB_TX_VLAN3_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN3_PORT0_EN_LBN 60
+#define FRF_BB_TX_VLAN3_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN3_LBN 48
+#define FRF_BB_TX_VLAN3_WIDTH 12
+#define FRF_BB_TX_VLAN2_PORT1_EN_LBN 45
+#define FRF_BB_TX_VLAN2_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN2_PORT0_EN_LBN 44
+#define FRF_BB_TX_VLAN2_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN2_LBN 32
+#define FRF_BB_TX_VLAN2_WIDTH 12
+#define FRF_BB_TX_VLAN1_PORT1_EN_LBN 29
+#define FRF_BB_TX_VLAN1_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN1_PORT0_EN_LBN 28
+#define FRF_BB_TX_VLAN1_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN1_LBN 16
+#define FRF_BB_TX_VLAN1_WIDTH 12
+#define FRF_BB_TX_VLAN0_PORT1_EN_LBN 13
+#define FRF_BB_TX_VLAN0_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN0_PORT0_EN_LBN 12
+#define FRF_BB_TX_VLAN0_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN0_LBN 0
+#define FRF_BB_TX_VLAN0_WIDTH 12
+
+/* TX_IPFIL_PORTEN_REG: Transmit filter control register */
+#define FR_BZ_TX_IPFIL_PORTEN 0x00000af0
+#define FRF_BZ_TX_MADR0_FIL_EN_LBN 64
+#define FRF_BZ_TX_MADR0_FIL_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL31_PORT_EN_LBN 62
+#define FRF_BB_TX_IPFIL31_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL30_PORT_EN_LBN 60
+#define FRF_BB_TX_IPFIL30_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL29_PORT_EN_LBN 58
+#define FRF_BB_TX_IPFIL29_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL28_PORT_EN_LBN 56
+#define FRF_BB_TX_IPFIL28_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL27_PORT_EN_LBN 54
+#define FRF_BB_TX_IPFIL27_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL26_PORT_EN_LBN 52
+#define FRF_BB_TX_IPFIL26_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL25_PORT_EN_LBN 50
+#define FRF_BB_TX_IPFIL25_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL24_PORT_EN_LBN 48
+#define FRF_BB_TX_IPFIL24_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL23_PORT_EN_LBN 46
+#define FRF_BB_TX_IPFIL23_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL22_PORT_EN_LBN 44
+#define FRF_BB_TX_IPFIL22_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL21_PORT_EN_LBN 42
+#define FRF_BB_TX_IPFIL21_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL20_PORT_EN_LBN 40
+#define FRF_BB_TX_IPFIL20_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL19_PORT_EN_LBN 38
+#define FRF_BB_TX_IPFIL19_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL18_PORT_EN_LBN 36
+#define FRF_BB_TX_IPFIL18_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL17_PORT_EN_LBN 34
+#define FRF_BB_TX_IPFIL17_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL16_PORT_EN_LBN 32
+#define FRF_BB_TX_IPFIL16_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL15_PORT_EN_LBN 30
+#define FRF_BB_TX_IPFIL15_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL14_PORT_EN_LBN 28
+#define FRF_BB_TX_IPFIL14_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL13_PORT_EN_LBN 26
+#define FRF_BB_TX_IPFIL13_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL12_PORT_EN_LBN 24
+#define FRF_BB_TX_IPFIL12_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL11_PORT_EN_LBN 22
+#define FRF_BB_TX_IPFIL11_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL10_PORT_EN_LBN 20
+#define FRF_BB_TX_IPFIL10_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL9_PORT_EN_LBN 18
+#define FRF_BB_TX_IPFIL9_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL8_PORT_EN_LBN 16
+#define FRF_BB_TX_IPFIL8_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL7_PORT_EN_LBN 14
+#define FRF_BB_TX_IPFIL7_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL6_PORT_EN_LBN 12
+#define FRF_BB_TX_IPFIL6_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL5_PORT_EN_LBN 10
+#define FRF_BB_TX_IPFIL5_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL4_PORT_EN_LBN 8
+#define FRF_BB_TX_IPFIL4_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL3_PORT_EN_LBN 6
+#define FRF_BB_TX_IPFIL3_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL2_PORT_EN_LBN 4
+#define FRF_BB_TX_IPFIL2_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL1_PORT_EN_LBN 2
+#define FRF_BB_TX_IPFIL1_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL0_PORT_EN_LBN 0
+#define FRF_BB_TX_IPFIL0_PORT_EN_WIDTH 1
+
+/* TX_IPFIL_TBL: Transmit IP source address filter table */
+#define FR_BB_TX_IPFIL_TBL 0x00000b00
+#define FR_BB_TX_IPFIL_TBL_STEP 16
+#define FR_BB_TX_IPFIL_TBL_ROWS 16
+#define FRF_BB_TX_IPFIL_MASK_1_LBN 96
+#define FRF_BB_TX_IPFIL_MASK_1_WIDTH 32
+#define FRF_BB_TX_IP_SRC_ADR_1_LBN 64
+#define FRF_BB_TX_IP_SRC_ADR_1_WIDTH 32
+#define FRF_BB_TX_IPFIL_MASK_0_LBN 32
+#define FRF_BB_TX_IPFIL_MASK_0_WIDTH 32
+#define FRF_BB_TX_IP_SRC_ADR_0_LBN 0
+#define FRF_BB_TX_IP_SRC_ADR_0_WIDTH 32
+
+/* MD_TXD_REG: PHY management transmit data register */
+#define FR_AB_MD_TXD 0x00000c00
+#define FRF_AB_MD_TXD_LBN 0
+#define FRF_AB_MD_TXD_WIDTH 16
+
+/* MD_RXD_REG: PHY management receive data register */
+#define FR_AB_MD_RXD 0x00000c10
+#define FRF_AB_MD_RXD_LBN 0
+#define FRF_AB_MD_RXD_WIDTH 16
+
+/* MD_CS_REG: PHY management configuration & status register */
+#define FR_AB_MD_CS 0x00000c20
+#define FRF_AB_MD_RD_EN_CMD_LBN 15
+#define FRF_AB_MD_RD_EN_CMD_WIDTH 1
+#define FRF_AB_MD_WR_EN_CMD_LBN 14
+#define FRF_AB_MD_WR_EN_CMD_WIDTH 1
+#define FRF_AB_MD_ADDR_CMD_LBN 13
+#define FRF_AB_MD_ADDR_CMD_WIDTH 1
+#define FRF_AB_MD_PT_LBN 7
+#define FRF_AB_MD_PT_WIDTH 3
+#define FRF_AB_MD_PL_LBN 6
+#define FRF_AB_MD_PL_WIDTH 1
+#define FRF_AB_MD_INT_CLR_LBN 5
+#define FRF_AB_MD_INT_CLR_WIDTH 1
+#define FRF_AB_MD_GC_LBN 4
+#define FRF_AB_MD_GC_WIDTH 1
+#define FRF_AB_MD_PRSP_LBN 3
+#define FRF_AB_MD_PRSP_WIDTH 1
+#define FRF_AB_MD_RIC_LBN 2
+#define FRF_AB_MD_RIC_WIDTH 1
+#define FRF_AB_MD_RDC_LBN 1
+#define FRF_AB_MD_RDC_WIDTH 1
+#define FRF_AB_MD_WRC_LBN 0
+#define FRF_AB_MD_WRC_WIDTH 1
+
+/* MD_PHY_ADR_REG: PHY management PHY address register */
+#define FR_AB_MD_PHY_ADR 0x00000c30
+#define FRF_AB_MD_PHY_ADR_LBN 0
+#define FRF_AB_MD_PHY_ADR_WIDTH 16
+
+/* MD_ID_REG: PHY management ID register */
+#define FR_AB_MD_ID 0x00000c40
+#define FRF_AB_MD_PRT_ADR_LBN 11
+#define FRF_AB_MD_PRT_ADR_WIDTH 5
+#define FRF_AB_MD_DEV_ADR_LBN 6
+#define FRF_AB_MD_DEV_ADR_WIDTH 5
+
+/* MD_STAT_REG: PHY management status & mask register */
+#define FR_AB_MD_STAT 0x00000c50
+#define FRF_AB_MD_PINT_LBN 4
+#define FRF_AB_MD_PINT_WIDTH 1
+#define FRF_AB_MD_DONE_LBN 3
+#define FRF_AB_MD_DONE_WIDTH 1
+#define FRF_AB_MD_BSERR_LBN 2
+#define FRF_AB_MD_BSERR_WIDTH 1
+#define FRF_AB_MD_LNFL_LBN 1
+#define FRF_AB_MD_LNFL_WIDTH 1
+#define FRF_AB_MD_BSY_LBN 0
+#define FRF_AB_MD_BSY_WIDTH 1
+
+/* MAC_STAT_DMA_REG: Port MAC statistical counter DMA register */
+#define FR_AB_MAC_STAT_DMA 0x00000c60
+#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48
+#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1
+#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0
+#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48
+
+/* MAC_CTRL_REG: Port MAC control register */
+#define FR_AB_MAC_CTRL 0x00000c80
+#define FRF_AB_MAC_XOFF_VAL_LBN 16
+#define FRF_AB_MAC_XOFF_VAL_WIDTH 16
+#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7
+#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1
+#define FRF_AB_MAC_XG_DISTXCRC_LBN 5
+#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1
+#define FRF_AB_MAC_BCAD_ACPT_LBN 4
+#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1
+#define FRF_AB_MAC_UC_PROM_LBN 3
+#define FRF_AB_MAC_UC_PROM_WIDTH 1
+#define FRF_AB_MAC_LINK_STATUS_LBN 2
+#define FRF_AB_MAC_LINK_STATUS_WIDTH 1
+#define FRF_AB_MAC_SPEED_LBN 0
+#define FRF_AB_MAC_SPEED_WIDTH 2
+#define FFE_AB_MAC_SPEED_10G 3
+#define FFE_AB_MAC_SPEED_1G 2
+#define FFE_AB_MAC_SPEED_100M 1
+#define FFE_AB_MAC_SPEED_10M 0
+
+/* GEN_MODE_REG: General Purpose mode register (external interrupt mask) */
+#define FR_BB_GEN_MODE 0x00000c90
+#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3
+#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1
+#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2
+#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1
+#define FRF_BB_XFP_PHY_INT_MASK_LBN 1
+#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1
+#define FRF_BB_XG_PHY_INT_MASK_LBN 0
+#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1
+
+/* MAC_MC_HASH_REG0: Multicast address hash table */
+#define FR_AB_MAC_MC_HASH_REG0 0x00000ca0
+#define FRF_AB_MAC_MCAST_HASH0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128
+
+/* MAC_MC_HASH_REG1: Multicast address hash table */
+#define FR_AB_MAC_MC_HASH_REG1 0x00000cb0
+#define FRF_AB_MAC_MCAST_HASH1_LBN 0
+#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128
+
+/* GM_CFG1_REG: GMAC configuration register 1 */
+#define FR_AB_GM_CFG1 0x00000e00
+#define FRF_AB_GM_SW_RST_LBN 31
+#define FRF_AB_GM_SW_RST_WIDTH 1
+#define FRF_AB_GM_SIM_RST_LBN 30
+#define FRF_AB_GM_SIM_RST_WIDTH 1
+#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19
+#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1
+#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18
+#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1
+#define FRF_AB_GM_RST_RX_FUNC_LBN 17
+#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1
+#define FRF_AB_GM_RST_TX_FUNC_LBN 16
+#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1
+#define FRF_AB_GM_LOOP_LBN 8
+#define FRF_AB_GM_LOOP_WIDTH 1
+#define FRF_AB_GM_RX_FC_EN_LBN 5
+#define FRF_AB_GM_RX_FC_EN_WIDTH 1
+#define FRF_AB_GM_TX_FC_EN_LBN 4
+#define FRF_AB_GM_TX_FC_EN_WIDTH 1
+#define FRF_AB_GM_SYNC_RXEN_LBN 3
+#define FRF_AB_GM_SYNC_RXEN_WIDTH 1
+#define FRF_AB_GM_RX_EN_LBN 2
+#define FRF_AB_GM_RX_EN_WIDTH 1
+#define FRF_AB_GM_SYNC_TXEN_LBN 1
+#define FRF_AB_GM_SYNC_TXEN_WIDTH 1
+#define FRF_AB_GM_TX_EN_LBN 0
+#define FRF_AB_GM_TX_EN_WIDTH 1
+
+/* GM_CFG2_REG: GMAC configuration register 2 */
+#define FR_AB_GM_CFG2 0x00000e10
+#define FRF_AB_GM_PAMBL_LEN_LBN 12
+#define FRF_AB_GM_PAMBL_LEN_WIDTH 4
+#define FRF_AB_GM_IF_MODE_LBN 8
+#define FRF_AB_GM_IF_MODE_WIDTH 2
+#define FFE_AB_IF_MODE_BYTE_MODE 2
+#define FFE_AB_IF_MODE_NIBBLE_MODE 1
+#define FRF_AB_GM_HUGE_FRM_EN_LBN 5
+#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1
+#define FRF_AB_GM_LEN_CHK_LBN 4
+#define FRF_AB_GM_LEN_CHK_WIDTH 1
+#define FRF_AB_GM_PAD_CRC_EN_LBN 2
+#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1
+#define FRF_AB_GM_CRC_EN_LBN 1
+#define FRF_AB_GM_CRC_EN_WIDTH 1
+#define FRF_AB_GM_FD_LBN 0
+#define FRF_AB_GM_FD_WIDTH 1
+
+/* GM_IPG_REG: GMAC IPG register */
+#define FR_AB_GM_IPG 0x00000e20
+#define FRF_AB_GM_NONB2B_IPG1_LBN 24
+#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7
+#define FRF_AB_GM_NONB2B_IPG2_LBN 16
+#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7
+#define FRF_AB_GM_MIN_IPG_ENF_LBN 8
+#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8
+#define FRF_AB_GM_B2B_IPG_LBN 0
+#define FRF_AB_GM_B2B_IPG_WIDTH 7
+
+/* GM_HD_REG: GMAC half duplex register */
+#define FR_AB_GM_HD 0x00000e30
+#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20
+#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4
+#define FRF_AB_GM_ALT_BOFF_EN_LBN 19
+#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1
+#define FRF_AB_GM_BP_NO_BOFF_LBN 18
+#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1
+#define FRF_AB_GM_DIS_BOFF_LBN 17
+#define FRF_AB_GM_DIS_BOFF_WIDTH 1
+#define FRF_AB_GM_EXDEF_TX_EN_LBN 16
+#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1
+#define FRF_AB_GM_RTRY_LIMIT_LBN 12
+#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4
+#define FRF_AB_GM_COL_WIN_LBN 0
+#define FRF_AB_GM_COL_WIN_WIDTH 10
+
+/* GM_MAX_FLEN_REG: GMAC maximum frame length register */
+#define FR_AB_GM_MAX_FLEN 0x00000e40
+#define FRF_AB_GM_MAX_FLEN_LBN 0
+#define FRF_AB_GM_MAX_FLEN_WIDTH 16
+
+/* GM_TEST_REG: GMAC test register */
+#define FR_AB_GM_TEST 0x00000e70
+#define FRF_AB_GM_MAX_BOFF_LBN 3
+#define FRF_AB_GM_MAX_BOFF_WIDTH 1
+#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2
+#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1
+#define FRF_AB_GM_TEST_PAUSE_LBN 1
+#define FRF_AB_GM_TEST_PAUSE_WIDTH 1
+#define FRF_AB_GM_SHORT_SLOT_LBN 0
+#define FRF_AB_GM_SHORT_SLOT_WIDTH 1
+
+/* GM_ADR1_REG: GMAC station address register 1 */
+#define FR_AB_GM_ADR1 0x00000f00
+#define FRF_AB_GM_ADR_B0_LBN 24
+#define FRF_AB_GM_ADR_B0_WIDTH 8
+#define FRF_AB_GM_ADR_B1_LBN 16
+#define FRF_AB_GM_ADR_B1_WIDTH 8
+#define FRF_AB_GM_ADR_B2_LBN 8
+#define FRF_AB_GM_ADR_B2_WIDTH 8
+#define FRF_AB_GM_ADR_B3_LBN 0
+#define FRF_AB_GM_ADR_B3_WIDTH 8
+
+/* GM_ADR2_REG: GMAC station address register 2 */
+#define FR_AB_GM_ADR2 0x00000f10
+#define FRF_AB_GM_ADR_B4_LBN 24
+#define FRF_AB_GM_ADR_B4_WIDTH 8
+#define FRF_AB_GM_ADR_B5_LBN 16
+#define FRF_AB_GM_ADR_B5_WIDTH 8
+
+/* GMF_CFG0_REG: GMAC FIFO configuration register 0 */
+#define FR_AB_GMF_CFG0 0x00000f20
+#define FRF_AB_GMF_FTFENRPLY_LBN 20
+#define FRF_AB_GMF_FTFENRPLY_WIDTH 1
+#define FRF_AB_GMF_STFENRPLY_LBN 19
+#define FRF_AB_GMF_STFENRPLY_WIDTH 1
+#define FRF_AB_GMF_FRFENRPLY_LBN 18
+#define FRF_AB_GMF_FRFENRPLY_WIDTH 1
+#define FRF_AB_GMF_SRFENRPLY_LBN 17
+#define FRF_AB_GMF_SRFENRPLY_WIDTH 1
+#define FRF_AB_GMF_WTMENRPLY_LBN 16
+#define FRF_AB_GMF_WTMENRPLY_WIDTH 1
+#define FRF_AB_GMF_FTFENREQ_LBN 12
+#define FRF_AB_GMF_FTFENREQ_WIDTH 1
+#define FRF_AB_GMF_STFENREQ_LBN 11
+#define FRF_AB_GMF_STFENREQ_WIDTH 1
+#define FRF_AB_GMF_FRFENREQ_LBN 10
+#define FRF_AB_GMF_FRFENREQ_WIDTH 1
+#define FRF_AB_GMF_SRFENREQ_LBN 9
+#define FRF_AB_GMF_SRFENREQ_WIDTH 1
+#define FRF_AB_GMF_WTMENREQ_LBN 8
+#define FRF_AB_GMF_WTMENREQ_WIDTH 1
+#define FRF_AB_GMF_HSTRSTFT_LBN 4
+#define FRF_AB_GMF_HSTRSTFT_WIDTH 1
+#define FRF_AB_GMF_HSTRSTST_LBN 3
+#define FRF_AB_GMF_HSTRSTST_WIDTH 1
+#define FRF_AB_GMF_HSTRSTFR_LBN 2
+#define FRF_AB_GMF_HSTRSTFR_WIDTH 1
+#define FRF_AB_GMF_HSTRSTSR_LBN 1
+#define FRF_AB_GMF_HSTRSTSR_WIDTH 1
+#define FRF_AB_GMF_HSTRSTWT_LBN 0
+#define FRF_AB_GMF_HSTRSTWT_WIDTH 1
+
+/* GMF_CFG1_REG: GMAC FIFO configuration register 1 */
+#define FR_AB_GMF_CFG1 0x00000f30
+#define FRF_AB_GMF_CFGFRTH_LBN 16
+#define FRF_AB_GMF_CFGFRTH_WIDTH 5
+#define FRF_AB_GMF_CFGXOFFRTX_LBN 0
+#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16
+
+/* GMF_CFG2_REG: GMAC FIFO configuration register 2 */
+#define FR_AB_GMF_CFG2 0x00000f40
+#define FRF_AB_GMF_CFGHWM_LBN 16
+#define FRF_AB_GMF_CFGHWM_WIDTH 6
+#define FRF_AB_GMF_CFGLWM_LBN 0
+#define FRF_AB_GMF_CFGLWM_WIDTH 6
+
+/* GMF_CFG3_REG: GMAC FIFO configuration register 3 */
+#define FR_AB_GMF_CFG3 0x00000f50
+#define FRF_AB_GMF_CFGHWMFT_LBN 16
+#define FRF_AB_GMF_CFGHWMFT_WIDTH 6
+#define FRF_AB_GMF_CFGFTTH_LBN 0
+#define FRF_AB_GMF_CFGFTTH_WIDTH 6
+
+/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
+#define FR_AB_GMF_CFG4 0x00000f60
+#define FRF_AB_GMF_HSTFLTRFRM_LBN 0
+#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18
+
+/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
+#define FR_AB_GMF_CFG5 0x00000f70
+#define FRF_AB_GMF_CFGHDPLX_LBN 22
+#define FRF_AB_GMF_CFGHDPLX_WIDTH 1
+#define FRF_AB_GMF_SRFULL_LBN 21
+#define FRF_AB_GMF_SRFULL_WIDTH 1
+#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20
+#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1
+#define FRF_AB_GMF_CFGBYTMODE_LBN 19
+#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1
+#define FRF_AB_GMF_HSTDRPLT64_LBN 18
+#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1
+#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0
+#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18
+
+/* TX_SRC_MAC_TBL: Transmit IP source address filter table */
+#define FR_BB_TX_SRC_MAC_TBL 0x00001000
+#define FR_BB_TX_SRC_MAC_TBL_STEP 16
+#define FR_BB_TX_SRC_MAC_TBL_ROWS 16
+#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64
+#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48
+#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0
+#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48
+
+/* TX_SRC_MAC_CTL_REG: Transmit MAC source address filter control */
+#define FR_BB_TX_SRC_MAC_CTL 0x00001100
+#define FRF_BB_TX_SRC_DROP_CTR_LBN 16
+#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16
+#define FRF_BB_TX_SRC_FLTR_EN_LBN 15
+#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1
+#define FRF_BB_TX_DROP_CTR_CLR_LBN 12
+#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1
+#define FRF_BB_TX_MAC_QID_SEL_LBN 0
+#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3
+
+/* XM_ADR_LO_REG: XGMAC address register low */
+#define FR_AB_XM_ADR_LO 0x00001200
+#define FRF_AB_XM_ADR_LO_LBN 0
+#define FRF_AB_XM_ADR_LO_WIDTH 32
+
+/* XM_ADR_HI_REG: XGMAC address register high */
+#define FR_AB_XM_ADR_HI 0x00001210
+#define FRF_AB_XM_ADR_HI_LBN 0
+#define FRF_AB_XM_ADR_HI_WIDTH 16
+
+/* XM_GLB_CFG_REG: XGMAC global configuration */
+#define FR_AB_XM_GLB_CFG 0x00001220
+#define FRF_AB_XM_RMTFLT_GEN_LBN 17
+#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1
+#define FRF_AB_XM_DEBUG_MODE_LBN 16
+#define FRF_AB_XM_DEBUG_MODE_WIDTH 1
+#define FRF_AB_XM_RX_STAT_EN_LBN 11
+#define FRF_AB_XM_RX_STAT_EN_WIDTH 1
+#define FRF_AB_XM_TX_STAT_EN_LBN 10
+#define FRF_AB_XM_TX_STAT_EN_WIDTH 1
+#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6
+#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1
+#define FRF_AB_XM_WAN_MODE_LBN 5
+#define FRF_AB_XM_WAN_MODE_WIDTH 1
+#define FRF_AB_XM_INTCLR_MODE_LBN 3
+#define FRF_AB_XM_INTCLR_MODE_WIDTH 1
+#define FRF_AB_XM_CORE_RST_LBN 0
+#define FRF_AB_XM_CORE_RST_WIDTH 1
+
+/* XM_TX_CFG_REG: XGMAC transmit configuration */
+#define FR_AB_XM_TX_CFG 0x00001230
+#define FRF_AB_XM_TX_PROG_LBN 24
+#define FRF_AB_XM_TX_PROG_WIDTH 1
+#define FRF_AB_XM_IPG_LBN 16
+#define FRF_AB_XM_IPG_WIDTH 4
+#define FRF_AB_XM_FCNTL_LBN 10
+#define FRF_AB_XM_FCNTL_WIDTH 1
+#define FRF_AB_XM_TXCRC_LBN 8
+#define FRF_AB_XM_TXCRC_WIDTH 1
+#define FRF_AB_XM_EDRC_LBN 6
+#define FRF_AB_XM_EDRC_WIDTH 1
+#define FRF_AB_XM_AUTO_PAD_LBN 5
+#define FRF_AB_XM_AUTO_PAD_WIDTH 1
+#define FRF_AB_XM_TX_PRMBL_LBN 2
+#define FRF_AB_XM_TX_PRMBL_WIDTH 1
+#define FRF_AB_XM_TXEN_LBN 1
+#define FRF_AB_XM_TXEN_WIDTH 1
+#define FRF_AB_XM_TX_RST_LBN 0
+#define FRF_AB_XM_TX_RST_WIDTH 1
+
+/* XM_RX_CFG_REG: XGMAC receive configuration */
+#define FR_AB_XM_RX_CFG 0x00001240
+#define FRF_AB_XM_PASS_LENERR_LBN 26
+#define FRF_AB_XM_PASS_LENERR_WIDTH 1
+#define FRF_AB_XM_PASS_CRC_ERR_LBN 25
+#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1
+#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24
+#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_REJ_BCAST_LBN 20
+#define FRF_AB_XM_REJ_BCAST_WIDTH 1
+#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11
+#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1
+#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9
+#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1
+#define FRF_AB_XM_AUTO_DEPAD_LBN 8
+#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1
+#define FRF_AB_XM_RXCRC_LBN 3
+#define FRF_AB_XM_RXCRC_WIDTH 1
+#define FRF_AB_XM_RX_PRMBL_LBN 2
+#define FRF_AB_XM_RX_PRMBL_WIDTH 1
+#define FRF_AB_XM_RXEN_LBN 1
+#define FRF_AB_XM_RXEN_WIDTH 1
+#define FRF_AB_XM_RX_RST_LBN 0
+#define FRF_AB_XM_RX_RST_WIDTH 1
+
+/* XM_MGT_INT_MASK: documentation to be written for sum_XM_MGT_INT_MASK */
+#define FR_AB_XM_MGT_INT_MASK 0x00001250
+#define FRF_AB_XM_MSK_STA_INTR_LBN 16
+#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1
+#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9
+#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1
+#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8
+#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1
+#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2
+#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_MSK_RMTFLT_LBN 1
+#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1
+#define FRF_AB_XM_MSK_LCLFLT_LBN 0
+#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1
+
+/* XM_FC_REG: XGMAC flow control register */
+#define FR_AB_XM_FC 0x00001270
+#define FRF_AB_XM_PAUSE_TIME_LBN 16
+#define FRF_AB_XM_PAUSE_TIME_WIDTH 16
+#define FRF_AB_XM_RX_MAC_STAT_LBN 11
+#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1
+#define FRF_AB_XM_TX_MAC_STAT_LBN 10
+#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1
+#define FRF_AB_XM_MCNTL_PASS_LBN 8
+#define FRF_AB_XM_MCNTL_PASS_WIDTH 2
+#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6
+#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1
+#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5
+#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1
+#define FRF_AB_XM_ZPAUSE_LBN 2
+#define FRF_AB_XM_ZPAUSE_WIDTH 1
+#define FRF_AB_XM_XMIT_PAUSE_LBN 1
+#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1
+#define FRF_AB_XM_DIS_FCNTL_LBN 0
+#define FRF_AB_XM_DIS_FCNTL_WIDTH 1
+
+/* XM_PAUSE_TIME_REG: XGMAC pause time register */
+#define FR_AB_XM_PAUSE_TIME 0x00001290
+#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16
+#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16
+#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0
+#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16
+
+/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
+#define FR_AB_XM_TX_PARAM 0x000012d0
+#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31
+#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3
+#define FRF_AB_XM_PAD_CHAR_LBN 0
+#define FRF_AB_XM_PAD_CHAR_WIDTH 8
+
+/* XM_RX_PARAM_REG: XGMAC receive parameter register */
+#define FR_AB_XM_RX_PARAM 0x000012e0
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3
+
+/* XM_MGT_INT_MSK_REG: XGMAC management interrupt mask register */
+#define FR_AB_XM_MGT_INT_MSK 0x000012f0
+#define FRF_AB_XM_STAT_CNTR_OF_LBN 9
+#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1
+#define FRF_AB_XM_STAT_CNTR_HF_LBN 8
+#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1
+#define FRF_AB_XM_PRMBLE_ERR_LBN 2
+#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_RMTFLT_LBN 1
+#define FRF_AB_XM_RMTFLT_WIDTH 1
+#define FRF_AB_XM_LCLFLT_LBN 0
+#define FRF_AB_XM_LCLFLT_WIDTH 1
+
+/* XX_PWR_RST_REG: XGXS/XAUI powerdown/reset register */
+#define FR_AB_XX_PWR_RST 0x00001300
+#define FRF_AB_XX_PWRDND_SIG_LBN 31
+#define FRF_AB_XX_PWRDND_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNC_SIG_LBN 30
+#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNB_SIG_LBN 29
+#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNA_SIG_LBN 28
+#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1
+#define FRF_AB_XX_SIM_MODE_LBN 27
+#define FRF_AB_XX_SIM_MODE_WIDTH 1
+#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25
+#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1
+#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24
+#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1
+#define FRF_AB_XX_RESETD_SIG_LBN 23
+#define FRF_AB_XX_RESETD_SIG_WIDTH 1
+#define FRF_AB_XX_RESETC_SIG_LBN 22
+#define FRF_AB_XX_RESETC_SIG_WIDTH 1
+#define FRF_AB_XX_RESETB_SIG_LBN 21
+#define FRF_AB_XX_RESETB_SIG_WIDTH 1
+#define FRF_AB_XX_RESETA_SIG_LBN 20
+#define FRF_AB_XX_RESETA_SIG_WIDTH 1
+#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18
+#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1
+#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17
+#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1
+#define FRF_AB_XX_SD_RST_ACT_LBN 16
+#define FRF_AB_XX_SD_RST_ACT_WIDTH 1
+#define FRF_AB_XX_PWRDND_EN_LBN 15
+#define FRF_AB_XX_PWRDND_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNC_EN_LBN 14
+#define FRF_AB_XX_PWRDNC_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNB_EN_LBN 13
+#define FRF_AB_XX_PWRDNB_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNA_EN_LBN 12
+#define FRF_AB_XX_PWRDNA_EN_WIDTH 1
+#define FRF_AB_XX_RSTPLLCD_EN_LBN 9
+#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1
+#define FRF_AB_XX_RSTPLLAB_EN_LBN 8
+#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1
+#define FRF_AB_XX_RESETD_EN_LBN 7
+#define FRF_AB_XX_RESETD_EN_WIDTH 1
+#define FRF_AB_XX_RESETC_EN_LBN 6
+#define FRF_AB_XX_RESETC_EN_WIDTH 1
+#define FRF_AB_XX_RESETB_EN_LBN 5
+#define FRF_AB_XX_RESETB_EN_WIDTH 1
+#define FRF_AB_XX_RESETA_EN_LBN 4
+#define FRF_AB_XX_RESETA_EN_WIDTH 1
+#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2
+#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1
+#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1
+#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1
+#define FRF_AB_XX_RST_XX_EN_LBN 0
+#define FRF_AB_XX_RST_XX_EN_WIDTH 1
+
+/* XX_SD_CTL_REG: XGXS/XAUI powerdown/reset control register */
+#define FR_AB_XX_SD_CTL 0x00001310
+#define FRF_AB_XX_TERMADJ1_LBN 17
+#define FRF_AB_XX_TERMADJ1_WIDTH 1
+#define FRF_AB_XX_TERMADJ0_LBN 16
+#define FRF_AB_XX_TERMADJ0_WIDTH 1
+#define FRF_AB_XX_HIDRVD_LBN 15
+#define FRF_AB_XX_HIDRVD_WIDTH 1
+#define FRF_AB_XX_LODRVD_LBN 14
+#define FRF_AB_XX_LODRVD_WIDTH 1
+#define FRF_AB_XX_HIDRVC_LBN 13
+#define FRF_AB_XX_HIDRVC_WIDTH 1
+#define FRF_AB_XX_LODRVC_LBN 12
+#define FRF_AB_XX_LODRVC_WIDTH 1
+#define FRF_AB_XX_HIDRVB_LBN 11
+#define FRF_AB_XX_HIDRVB_WIDTH 1
+#define FRF_AB_XX_LODRVB_LBN 10
+#define FRF_AB_XX_LODRVB_WIDTH 1
+#define FRF_AB_XX_HIDRVA_LBN 9
+#define FRF_AB_XX_HIDRVA_WIDTH 1
+#define FRF_AB_XX_LODRVA_LBN 8
+#define FRF_AB_XX_LODRVA_WIDTH 1
+#define FRF_AB_XX_LPBKD_LBN 3
+#define FRF_AB_XX_LPBKD_WIDTH 1
+#define FRF_AB_XX_LPBKC_LBN 2
+#define FRF_AB_XX_LPBKC_WIDTH 1
+#define FRF_AB_XX_LPBKB_LBN 1
+#define FRF_AB_XX_LPBKB_WIDTH 1
+#define FRF_AB_XX_LPBKA_LBN 0
+#define FRF_AB_XX_LPBKA_WIDTH 1
+
+/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
+#define FR_AB_XX_TXDRV_CTL 0x00001320
+#define FRF_AB_XX_DEQD_LBN 28
+#define FRF_AB_XX_DEQD_WIDTH 4
+#define FRF_AB_XX_DEQC_LBN 24
+#define FRF_AB_XX_DEQC_WIDTH 4
+#define FRF_AB_XX_DEQB_LBN 20
+#define FRF_AB_XX_DEQB_WIDTH 4
+#define FRF_AB_XX_DEQA_LBN 16
+#define FRF_AB_XX_DEQA_WIDTH 4
+#define FRF_AB_XX_DTXD_LBN 12
+#define FRF_AB_XX_DTXD_WIDTH 4
+#define FRF_AB_XX_DTXC_LBN 8
+#define FRF_AB_XX_DTXC_WIDTH 4
+#define FRF_AB_XX_DTXB_LBN 4
+#define FRF_AB_XX_DTXB_WIDTH 4
+#define FRF_AB_XX_DTXA_LBN 0
+#define FRF_AB_XX_DTXA_WIDTH 4
+
+/* XX_PRBS_CTL_REG: documentation to be written for sum_XX_PRBS_CTL_REG */
+#define FR_AB_XX_PRBS_CTL 0x00001330
+#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30
+#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29
+#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28
+#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26
+#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25
+#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24
+#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22
+#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21
+#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20
+#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18
+#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17
+#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16
+#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14
+#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13
+#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12
+#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10
+#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9
+#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8
+#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6
+#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5
+#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4
+#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2
+#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1
+#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0
+#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1
+
+/* XX_PRBS_CHK_REG: documentation to be written for sum_XX_PRBS_CHK_REG */
+#define FR_AB_XX_PRBS_CHK 0x00001340
+#define FRF_AB_XX_REV_LB_EN_LBN 16
+#define FRF_AB_XX_REV_LB_EN_WIDTH 1
+#define FRF_AB_XX_CH3_DEG_DET_LBN 15
+#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14
+#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13
+#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH3_ERR_CHK_LBN 12
+#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH2_DEG_DET_LBN 11
+#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10
+#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9
+#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH2_ERR_CHK_LBN 8
+#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH1_DEG_DET_LBN 7
+#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6
+#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5
+#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH1_ERR_CHK_LBN 4
+#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH0_DEG_DET_LBN 3
+#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2
+#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1
+#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH0_ERR_CHK_LBN 0
+#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1
+
+/* XX_PRBS_ERR_REG: documentation to be written for sum_XX_PRBS_ERR_REG */
+#define FR_AB_XX_PRBS_ERR 0x00001350
+#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24
+#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16
+#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8
+#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0
+#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8
+
+/* XX_CORE_STAT_REG: XAUI XGXS core status register */
+#define FR_AB_XX_CORE_STAT 0x00001360
+#define FRF_AB_XX_FORCE_SIG3_LBN 31
+#define FRF_AB_XX_FORCE_SIG3_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30
+#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG2_LBN 29
+#define FRF_AB_XX_FORCE_SIG2_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28
+#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG1_LBN 27
+#define FRF_AB_XX_FORCE_SIG1_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26
+#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG0_LBN 25
+#define FRF_AB_XX_FORCE_SIG0_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24
+#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1
+#define FRF_AB_XX_XGXS_LB_EN_LBN 23
+#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1
+#define FRF_AB_XX_XGMII_LB_EN_LBN 22
+#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1
+#define FRF_AB_XX_MATCH_FAULT_LBN 21
+#define FRF_AB_XX_MATCH_FAULT_WIDTH 1
+#define FRF_AB_XX_ALIGN_DONE_LBN 20
+#define FRF_AB_XX_ALIGN_DONE_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT3_LBN 19
+#define FRF_AB_XX_SYNC_STAT3_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT2_LBN 18
+#define FRF_AB_XX_SYNC_STAT2_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT1_LBN 17
+#define FRF_AB_XX_SYNC_STAT1_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT0_LBN 16
+#define FRF_AB_XX_SYNC_STAT0_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH3_LBN 15
+#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH2_LBN 14
+#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH1_LBN 13
+#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH0_LBN 12
+#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11
+#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10
+#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9
+#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8
+#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7
+#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6
+#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5
+#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4
+#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH3_LBN 3
+#define FRF_AB_XX_DISPERR_CH3_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH2_LBN 2
+#define FRF_AB_XX_DISPERR_CH2_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH1_LBN 1
+#define FRF_AB_XX_DISPERR_CH1_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH0_LBN 0
+#define FRF_AB_XX_DISPERR_CH0_WIDTH 1
+
+/* RX_DESC_PTR_TBL_KER: Receive descriptor pointer table */
+#define FR_AA_RX_DESC_PTR_TBL_KER 0x00011800
+#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16
+#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4
+/* RX_DESC_PTR_TBL: Receive descriptor pointer table */
+#define FR_BZ_RX_DESC_PTR_TBL 0x00f40000
+#define FR_BZ_RX_DESC_PTR_TBL_STEP 16
+#define FR_BB_RX_DESC_PTR_TBL_ROWS 4096
+#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024
+#define FRF_CZ_RX_HDR_SPLIT_LBN 90
+#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1
+#define FRF_AA_RX_RESET_LBN 89
+#define FRF_AA_RX_RESET_WIDTH 1
+#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88
+#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1
+#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87
+#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1
+#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86
+#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1
+#define FRF_AZ_RX_DC_HW_RPTR_LBN 80
+#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6
+#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68
+#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12
+#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56
+#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12
+#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36
+#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24
+#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12
+#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10
+#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14
+#define FRF_AZ_RX_DESCQ_LABEL_LBN 5
+#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5
+#define FRF_AZ_RX_DESCQ_SIZE_LBN 3
+#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2
+#define FFE_AZ_RX_DESCQ_SIZE_4K 3
+#define FFE_AZ_RX_DESCQ_SIZE_2K 2
+#define FFE_AZ_RX_DESCQ_SIZE_1K 1
+#define FFE_AZ_RX_DESCQ_SIZE_512 0
+#define FRF_AZ_RX_DESCQ_TYPE_LBN 2
+#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1
+#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1
+#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1
+#define FRF_AZ_RX_DESCQ_EN_LBN 0
+#define FRF_AZ_RX_DESCQ_EN_WIDTH 1
+
+/* TX_DESC_PTR_TBL_KER: Transmit descriptor pointer */
+#define FR_AA_TX_DESC_PTR_TBL_KER 0x00011900
+#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16
+#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8
+/* TX_DESC_PTR_TBL: Transmit descriptor pointer */
+#define FR_BZ_TX_DESC_PTR_TBL 0x00f50000
+#define FR_BZ_TX_DESC_PTR_TBL_STEP 16
+#define FR_BB_TX_DESC_PTR_TBL_ROWS 4096
+#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024
+#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94
+#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2
+#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93
+#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1
+#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92
+#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1
+#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91
+#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1
+#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90
+#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1
+#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89
+#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1
+#define FRF_AZ_TX_DESCQ_EN_LBN 88
+#define FRF_AZ_TX_DESCQ_EN_WIDTH 1
+#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87
+#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1
+#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86
+#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1
+#define FRF_AZ_TX_DC_HW_RPTR_LBN 80
+#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6
+#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68
+#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12
+#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56
+#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12
+#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36
+#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24
+#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12
+#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10
+#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14
+#define FRF_AZ_TX_DESCQ_LABEL_LBN 5
+#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5
+#define FRF_AZ_TX_DESCQ_SIZE_LBN 3
+#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2
+#define FFE_AZ_TX_DESCQ_SIZE_4K 3
+#define FFE_AZ_TX_DESCQ_SIZE_2K 2
+#define FFE_AZ_TX_DESCQ_SIZE_1K 1
+#define FFE_AZ_TX_DESCQ_SIZE_512 0
+#define FRF_AZ_TX_DESCQ_TYPE_LBN 1
+#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2
+#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0
+#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1
+
+/* EVQ_PTR_TBL_KER: Event queue pointer table */
+#define FR_AA_EVQ_PTR_TBL_KER 0x00011a00
+#define FR_AA_EVQ_PTR_TBL_KER_STEP 16
+#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4
+/* EVQ_PTR_TBL: Event queue pointer table */
+#define FR_BZ_EVQ_PTR_TBL 0x00f60000
+#define FR_BZ_EVQ_PTR_TBL_STEP 16
+#define FR_CZ_EVQ_PTR_TBL_ROWS 1024
+#define FR_BB_EVQ_PTR_TBL_ROWS 4096
+#define FRF_BZ_EVQ_RPTR_IGN_LBN 40
+#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1
+#define FRF_AB_EVQ_WKUP_OR_INT_EN_LBN 39
+#define FRF_AB_EVQ_WKUP_OR_INT_EN_WIDTH 1
+#define FRF_CZ_EVQ_DOS_PROTECT_EN_LBN 39
+#define FRF_CZ_EVQ_DOS_PROTECT_EN_WIDTH 1
+#define FRF_AZ_EVQ_NXT_WPTR_LBN 24
+#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15
+#define FRF_AZ_EVQ_EN_LBN 23
+#define FRF_AZ_EVQ_EN_WIDTH 1
+#define FRF_AZ_EVQ_SIZE_LBN 20
+#define FRF_AZ_EVQ_SIZE_WIDTH 3
+#define FFE_AZ_EVQ_SIZE_32K 6
+#define FFE_AZ_EVQ_SIZE_16K 5
+#define FFE_AZ_EVQ_SIZE_8K 4
+#define FFE_AZ_EVQ_SIZE_4K 3
+#define FFE_AZ_EVQ_SIZE_2K 2
+#define FFE_AZ_EVQ_SIZE_1K 1
+#define FFE_AZ_EVQ_SIZE_512 0
+#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0
+#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20
+
+/* BUF_HALF_TBL_KER: Buffer table in half buffer table mode direct access by driver */
+#define FR_AA_BUF_HALF_TBL_KER 0x00018000
+#define FR_AA_BUF_HALF_TBL_KER_STEP 8
+#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096
+/* BUF_HALF_TBL: Buffer table in half buffer table mode direct access by driver */
+#define FR_BZ_BUF_HALF_TBL 0x00800000
+#define FR_BZ_BUF_HALF_TBL_STEP 8
+#define FR_CZ_BUF_HALF_TBL_ROWS 147456
+#define FR_BB_BUF_HALF_TBL_ROWS 524288
+#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44
+#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20
+#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32
+#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12
+#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12
+#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20
+#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0
+#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
+
+/* BUF_FULL_TBL_KER: Buffer table in full buffer table mode direct access by driver */
+#define FR_AA_BUF_FULL_TBL_KER 0x00018000
+#define FR_AA_BUF_FULL_TBL_KER_STEP 8
+#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096
+/* BUF_FULL_TBL: Buffer table in full buffer table mode direct access by driver */
+#define FR_BZ_BUF_FULL_TBL 0x00800000
+#define FR_BZ_BUF_FULL_TBL_STEP 8
+#define FR_CZ_BUF_FULL_TBL_ROWS 147456
+#define FR_BB_BUF_FULL_TBL_ROWS 917504
+#define FRF_AZ_BUF_FULL_UNUSED_LBN 51
+#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13
+#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50
+#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1
+#define FRF_AZ_BUF_ADR_REGION_LBN 48
+#define FRF_AZ_BUF_ADR_REGION_WIDTH 2
+#define FFE_AZ_BUF_ADR_REGN3 3
+#define FFE_AZ_BUF_ADR_REGN2 2
+#define FFE_AZ_BUF_ADR_REGN1 1
+#define FFE_AZ_BUF_ADR_REGN0 0
+#define FRF_AZ_BUF_ADR_FBUF_LBN 14
+#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34
+#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0
+#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14
+
+/* RX_FILTER_TBL0: TCP/IPv4 Receive filter table */
+#define FR_BZ_RX_FILTER_TBL0 0x00f00000
+#define FR_BZ_RX_FILTER_TBL0_STEP 32
+#define FR_BZ_RX_FILTER_TBL0_ROWS 8192
+/* RX_FILTER_TBL1: TCP/IPv4 Receive filter table */
+#define FR_BB_RX_FILTER_TBL1 0x00f00010
+#define FR_BB_RX_FILTER_TBL1_STEP 32
+#define FR_BB_RX_FILTER_TBL1_ROWS 8192
+#define FRF_BZ_RSS_EN_LBN 110
+#define FRF_BZ_RSS_EN_WIDTH 1
+#define FRF_BZ_SCATTER_EN_LBN 109
+#define FRF_BZ_SCATTER_EN_WIDTH 1
+#define FRF_BZ_TCP_UDP_LBN 108
+#define FRF_BZ_TCP_UDP_WIDTH 1
+#define FRF_BZ_RXQ_ID_LBN 96
+#define FRF_BZ_RXQ_ID_WIDTH 12
+#define FRF_BZ_DEST_IP_LBN 64
+#define FRF_BZ_DEST_IP_WIDTH 32
+#define FRF_BZ_DEST_PORT_TCP_LBN 48
+#define FRF_BZ_DEST_PORT_TCP_WIDTH 16
+#define FRF_BZ_SRC_IP_LBN 16
+#define FRF_BZ_SRC_IP_WIDTH 32
+#define FRF_BZ_SRC_TCP_DEST_UDP_LBN 0
+#define FRF_BZ_SRC_TCP_DEST_UDP_WIDTH 16
+
+/* RX_MAC_FILTER_TBL0: Receive Ethernet filter table */
+#define FR_CZ_RX_MAC_FILTER_TBL0 0x00f00010
+#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32
+#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512
+#define FRF_CZ_RMFT_RSS_EN_LBN 75
+#define FRF_CZ_RMFT_RSS_EN_WIDTH 1
+#define FRF_CZ_RMFT_SCATTER_EN_LBN 74
+#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1
+#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73
+#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1
+#define FRF_CZ_RMFT_RXQ_ID_LBN 61
+#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12
+#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60
+#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1
+#define FRF_CZ_RMFT_DEST_MAC_LBN 16
+#define FRF_CZ_RMFT_DEST_MAC_WIDTH 44
+#define FRF_CZ_RMFT_VLAN_ID_LBN 0
+#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12
+
+/* TIMER_TBL: Timer table */
+#define FR_BZ_TIMER_TBL 0x00f70000
+#define FR_BZ_TIMER_TBL_STEP 16
+#define FR_CZ_TIMER_TBL_ROWS 1024
+#define FR_BB_TIMER_TBL_ROWS 4096
+#define FRF_CZ_TIMER_Q_EN_LBN 33
+#define FRF_CZ_TIMER_Q_EN_WIDTH 1
+#define FRF_CZ_INT_ARMD_LBN 32
+#define FRF_CZ_INT_ARMD_WIDTH 1
+#define FRF_CZ_INT_PEND_LBN 31
+#define FRF_CZ_INT_PEND_WIDTH 1
+#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30
+#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1
+#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16
+#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14
+#define FRF_CZ_TIMER_MODE_LBN 14
+#define FRF_CZ_TIMER_MODE_WIDTH 2
+#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3
+#define FFE_CZ_TIMER_MODE_TRIG_START 2
+#define FFE_CZ_TIMER_MODE_IMMED_START 1
+#define FFE_CZ_TIMER_MODE_DIS 0
+#define FRF_BB_TIMER_MODE_LBN 12
+#define FRF_BB_TIMER_MODE_WIDTH 2
+#define FFE_BB_TIMER_MODE_INT_HLDOFF 2
+#define FFE_BB_TIMER_MODE_TRIG_START 2
+#define FFE_BB_TIMER_MODE_IMMED_START 1
+#define FFE_BB_TIMER_MODE_DIS 0
+#define FRF_CZ_TIMER_VAL_LBN 0
+#define FRF_CZ_TIMER_VAL_WIDTH 14
+#define FRF_BB_TIMER_VAL_LBN 0
+#define FRF_BB_TIMER_VAL_WIDTH 12
+
+/* TX_PACE_TBL: Transmit pacing table */
+#define FR_BZ_TX_PACE_TBL 0x00f80000
+#define FR_BZ_TX_PACE_TBL_STEP 16
+#define FR_CZ_TX_PACE_TBL_ROWS 1024
+#define FR_BB_TX_PACE_TBL_ROWS 4096
+#define FRF_BZ_TX_PACE_LBN 0
+#define FRF_BZ_TX_PACE_WIDTH 5
+
+/* RX_INDIRECTION_TBL: RX Indirection Table */
+#define FR_BZ_RX_INDIRECTION_TBL 0x00fb0000
+#define FR_BZ_RX_INDIRECTION_TBL_STEP 16
+#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128
+#define FRF_BZ_IT_QUEUE_LBN 0
+#define FRF_BZ_IT_QUEUE_WIDTH 6
+
+/* TX_FILTER_TBL0: TCP/IPv4 Transmit filter table */
+#define FR_CZ_TX_FILTER_TBL0 0x00fc0000
+#define FR_CZ_TX_FILTER_TBL0_STEP 16
+#define FR_CZ_TX_FILTER_TBL0_ROWS 8192
+#define FRF_CZ_TIFT_TCP_UDP_LBN 108
+#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1
+#define FRF_CZ_TIFT_TXQ_ID_LBN 96
+#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12
+#define FRF_CZ_TIFT_DEST_IP_LBN 64
+#define FRF_CZ_TIFT_DEST_IP_WIDTH 32
+#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48
+#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16
+#define FRF_CZ_TIFT_SRC_IP_LBN 16
+#define FRF_CZ_TIFT_SRC_IP_WIDTH 32
+#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0
+#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16
+
+/* TX_MAC_FILTER_TBL0: Transmit Ethernet filter table */
+#define FR_CZ_TX_MAC_FILTER_TBL0 0x00fe0000
+#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16
+#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512
+#define FRF_CZ_TMFT_TXQ_ID_LBN 61
+#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12
+#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60
+#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1
+#define FRF_CZ_TMFT_SRC_MAC_LBN 16
+#define FRF_CZ_TMFT_SRC_MAC_WIDTH 44
+#define FRF_CZ_TMFT_VLAN_ID_LBN 0
+#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12
+
+/* MC_TREG_SMEM: MC Shared Memory */
+#define FR_CZ_MC_TREG_SMEM 0x00ff0000
+#define FR_CZ_MC_TREG_SMEM_STEP 4
+#define FR_CZ_MC_TREG_SMEM_ROWS 512
+#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0
+#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32
+
+/* MSIX_VECTOR_TABLE: MSIX Vector Table */
+#define FR_BB_MSIX_VECTOR_TABLE 0x00ff0000
+#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16
+#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64
+/* MSIX_VECTOR_TABLE: MSIX Vector Table */
+#define FR_CZ_MSIX_VECTOR_TABLE 0x00000000
+/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */
+#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024
+#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97
+#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31
+#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96
+#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1
+#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64
+#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32
+
+/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
+#define FR_BB_MSIX_PBA_TABLE 0x00ff2000
+#define FR_BZ_MSIX_PBA_TABLE_STEP 4
+#define FR_BB_MSIX_PBA_TABLE_ROWS 2
+/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
+#define FR_CZ_MSIX_PBA_TABLE 0x00008000
+/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */
+#define FR_CZ_MSIX_PBA_TABLE_ROWS 32
+#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0
+#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32
+
+/* SRM_DBG_REG: SRAM debug access */
+#define FR_BZ_SRM_DBG 0x03000000
+#define FR_BZ_SRM_DBG_STEP 8
+#define FR_CZ_SRM_DBG_ROWS 262144
+#define FR_BB_SRM_DBG_ROWS 2097152
+#define FRF_BZ_SRM_DBG_LBN 0
+#define FRF_BZ_SRM_DBG_WIDTH 64
+
+/* TB_MSIX_PBA_TABLE: MSIX Pending Bit Array */
+#define FR_CZ_TB_MSIX_PBA_TABLE 0x00008000
+#define FR_CZ_TB_MSIX_PBA_TABLE_STEP 4
+#define FR_CZ_TB_MSIX_PBA_TABLE_ROWS 1024
+#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_LBN 0
+#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_WIDTH 32
+
+/* DRIVER_EV */
+#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56
+#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4
+#define FSE_BZ_TX_DSC_ERROR_EV 15
+#define FSE_BZ_RX_DSC_ERROR_EV 14
+#define FSE_AA_RX_RECOVER_EV 11
+#define FSE_AZ_TIMER_EV 10
+#define FSE_AZ_TX_PKT_NON_TCP_UDP 9
+#define FSE_AZ_WAKE_UP_EV 6
+#define FSE_AZ_SRM_UPD_DONE_EV 5
+#define FSE_AB_EVQ_NOT_EN_EV 3
+#define FSE_AZ_EVQ_INIT_DONE_EV 2
+#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1
+#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0
+#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0
+#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14
+
+/* EVENT_ENTRY */
+#define FSF_AZ_EV_CODE_LBN 60
+#define FSF_AZ_EV_CODE_WIDTH 4
+#define FSE_CZ_EV_CODE_MCDI_EV 12
+#define FSE_CZ_EV_CODE_USER_EV 8
+#define FSE_AZ_EV_CODE_DRV_GEN_EV 7
+#define FSE_AZ_EV_CODE_GLOBAL_EV 6
+#define FSE_AZ_EV_CODE_DRIVER_EV 5
+#define FSE_AZ_EV_CODE_TX_EV 2
+#define FSE_AZ_EV_CODE_RX_EV 0
+#define FSF_AZ_EV_DATA_LBN 0
+#define FSF_AZ_EV_DATA_WIDTH 60
+
+/* GLOBAL_EV */
+#define FSF_BB_GLB_EV_RX_RECOVERY_LBN 12
+#define FSF_BB_GLB_EV_RX_RECOVERY_WIDTH 1
+#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 11
+#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1
+#define FSF_BB_GLB_EV_XG_MGT_INTR_LBN 11
+#define FSF_BB_GLB_EV_XG_MGT_INTR_WIDTH 1
+#define FSF_AB_GLB_EV_XFP_PHY0_INTR_LBN 10
+#define FSF_AB_GLB_EV_XFP_PHY0_INTR_WIDTH 1
+#define FSF_AB_GLB_EV_XG_PHY0_INTR_LBN 9
+#define FSF_AB_GLB_EV_XG_PHY0_INTR_WIDTH 1
+#define FSF_AB_GLB_EV_G_PHY0_INTR_LBN 7
+#define FSF_AB_GLB_EV_G_PHY0_INTR_WIDTH 1
+
+/* LEGACY_INT_VEC */
+#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64
+#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1
+#define FSF_AZ_NET_IVEC_INT_Q_LBN 40
+#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4
+#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32
+#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1
+
+/* MC_XGMAC_FLTR_RULE_DEF */
+#define FSF_CZ_MC_XFRC_MODE_LBN 416
+#define FSF_CZ_MC_XFRC_MODE_WIDTH 1
+#define FSE_CZ_MC_XFRC_MODE_LAYERED 1
+#define FSE_CZ_MC_XFRC_MODE_SIMPLE 0
+#define FSF_CZ_MC_XFRC_HASH_LBN 384
+#define FSF_CZ_MC_XFRC_HASH_WIDTH 32
+#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_LBN 256
+#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_WIDTH 128
+#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_LBN 128
+#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_WIDTH 128
+#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_LBN 0
+#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_WIDTH 128
+
+/* RX_EV */
+#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58
+#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1
+#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57
+#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1
+#define FSF_AZ_RX_EV_PKT_OK_LBN 56
+#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1
+#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55
+#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54
+#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53
+#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
+#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
+#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50
+#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49
+#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1
+#define FSF_AA_RX_EV_DRIB_NIB_LBN 49
+#define FSF_AA_RX_EV_DRIB_NIB_WIDTH 1
+#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47
+#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1
+#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44
+#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3
+#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2
+#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1
+#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0
+#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42
+#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2
+#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3
+#define FSE_AB_RX_EV_HDR_TYPE_IPV4_OTHER 2
+#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2
+#define FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP 1
+#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1
+#define FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP 0
+#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0
+#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41
+#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1
+#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40
+#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1
+#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39
+#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1
+#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37
+#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1
+#define FSF_AZ_RX_EV_Q_LABEL_LBN 32
+#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5
+#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31
+#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1
+#define FSF_AZ_RX_EV_PORT_LBN 30
+#define FSF_AZ_RX_EV_PORT_WIDTH 1
+#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16
+#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14
+#define FSF_AZ_RX_EV_SOP_LBN 15
+#define FSF_AZ_RX_EV_SOP_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14
+#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13
+#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12
+#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_DESC_PTR_LBN 0
+#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12
+
+/* RX_KER_DESC */
+#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48
+#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14
+#define FSF_AZ_RX_KER_BUF_REGION_LBN 46
+#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2
+#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0
+#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46
+
+/* RX_USER_DESC */
+#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20
+#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12
+#define FSF_AZ_RX_USER_BUF_ID_LBN 0
+#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20
+
+/* TX_EV */
+#define FSF_AZ_TX_EV_PKT_ERR_LBN 38
+#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1
+#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37
+#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1
+#define FSF_AZ_TX_EV_Q_LABEL_LBN 32
+#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5
+#define FSF_AZ_TX_EV_PORT_LBN 16
+#define FSF_AZ_TX_EV_PORT_WIDTH 1
+#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15
+#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1
+#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14
+#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define FSF_AZ_TX_EV_COMP_LBN 12
+#define FSF_AZ_TX_EV_COMP_WIDTH 1
+#define FSF_AZ_TX_EV_DESC_PTR_LBN 0
+#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12
+
+/* TX_KER_DESC */
+#define FSF_AZ_TX_KER_CONT_LBN 62
+#define FSF_AZ_TX_KER_CONT_WIDTH 1
+#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48
+#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14
+#define FSF_AZ_TX_KER_BUF_REGION_LBN 46
+#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2
+#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0
+#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46
+
+/* TX_USER_DESC */
+#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48
+#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1
+#define FSF_AZ_TX_USER_CONT_LBN 46
+#define FSF_AZ_TX_USER_CONT_WIDTH 1
+#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33
+#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13
+#define FSF_AZ_TX_USER_BUF_ID_LBN 13
+#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20
+#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0
+#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13
+
+/* USER_EV */
+#define FSF_CZ_USER_QID_LBN 32
+#define FSF_CZ_USER_QID_WIDTH 10
+#define FSF_CZ_USER_EV_REG_VALUE_LBN 0
+#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32
+
+/**************************************************************************
+ *
+ * Falcon B0 PCIe core indirect registers
+ *
+ **************************************************************************
+ */
+
+#define FPCR_BB_PCIE_DEVICE_CTRL_STAT 0x68
+
+#define FPCR_BB_PCIE_LINK_CTRL_STAT 0x70
+
+#define FPCR_BB_ACK_RPL_TIMER 0x700
+#define FPCRF_BB_ACK_TL_LBN 0
+#define FPCRF_BB_ACK_TL_WIDTH 16
+#define FPCRF_BB_RPL_TL_LBN 16
+#define FPCRF_BB_RPL_TL_WIDTH 16
+
+#define FPCR_BB_ACK_FREQ 0x70C
+#define FPCRF_BB_ACK_FREQ_LBN 0
+#define FPCRF_BB_ACK_FREQ_WIDTH 7
+
+/**************************************************************************
+ *
+ * Pseudo-registers and fields
+ *
+ **************************************************************************
+ */
+
+/* Interrupt acknowledge work-around register (A0/A1 only) */
+#define FR_AA_WORK_AROUND_BROKEN_PCI_READS 0x0070
+
+/* EE_SPI_HCMD_REG: SPI host command register */
+/* Values for the EE_SPI_HCMD_SF_SEL register field */
+#define FFE_AB_SPI_DEVICE_EEPROM 0
+#define FFE_AB_SPI_DEVICE_FLASH 1
+
+/* NIC_STAT_REG: NIC status register */
+#define FRF_AB_STRAP_10G_LBN 2
+#define FRF_AB_STRAP_10G_WIDTH 1
+#define FRF_AA_STRAP_PCIE_LBN 0
+#define FRF_AA_STRAP_PCIE_WIDTH 1
+
+/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
+#define FRF_AZ_FATAL_INTR_LBN 0
+#define FRF_AZ_FATAL_INTR_WIDTH 12
+
+/* SRM_CFG_REG: SRAM configuration register */
+/* We treat the number of SRAM banks and bank size as a single field */
+#define FRF_AZ_SRM_NB_SZ_LBN FRF_AZ_SRM_BANK_SIZE_LBN
+#define FRF_AZ_SRM_NB_SZ_WIDTH \
+ (FRF_AZ_SRM_BANK_SIZE_WIDTH + FRF_AZ_SRM_NUM_BANK_WIDTH)
+#define FFE_AB_SRM_NB1_SZ2M 0
+#define FFE_AB_SRM_NB1_SZ4M 1
+#define FFE_AB_SRM_NB1_SZ8M 2
+#define FFE_AB_SRM_NB_SZ_DEF 3
+#define FFE_AB_SRM_NB2_SZ4M 4
+#define FFE_AB_SRM_NB2_SZ8M 5
+#define FFE_AB_SRM_NB2_SZ16M 6
+#define FFE_AB_SRM_NB_SZ_RES 7
+
+/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
+/* We write just the last dword of these registers */
+#define FR_AZ_RX_DESC_UPD_DWORD_P0 \
+ (BUILD_BUG_ON_ZERO(FR_AA_RX_DESC_UPD_KER != FR_BZ_RX_DESC_UPD_P0) + \
+ FR_BZ_RX_DESC_UPD_P0 + 3 * 4)
+#define FRF_AZ_RX_DESC_WPTR_DWORD_LBN (FRF_AZ_RX_DESC_WPTR_LBN - 3 * 32)
+#define FRF_AZ_RX_DESC_WPTR_DWORD_WIDTH FRF_AZ_RX_DESC_WPTR_WIDTH
+
+/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
+#define FR_AZ_TX_DESC_UPD_DWORD_P0 \
+ (BUILD_BUG_ON_ZERO(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0) + \
+ FR_BZ_TX_DESC_UPD_P0 + 3 * 4)
+#define FRF_AZ_TX_DESC_WPTR_DWORD_LBN (FRF_AZ_TX_DESC_WPTR_LBN - 3 * 32)
+#define FRF_AZ_TX_DESC_WPTR_DWORD_WIDTH FRF_AZ_TX_DESC_WPTR_WIDTH
+
+/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
+#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_LBN 12
+#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_WIDTH 1
+
+/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
+#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_LBN 12
+#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1
+
+/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LBN FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH + \
+ FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH)
+
+/* XM_RX_PARAM_REG: XGMAC receive parameter register */
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LBN FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH + \
+ FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH)
+
+/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
+/* Default values */
+#define FFE_AB_XX_TXDRV_DEQ_DEF 0xe /* deq=.6 */
+#define FFE_AB_XX_TXDRV_DTX_DEF 0x5 /* 1.25 */
+#define FFE_AB_XX_SD_CTL_DRV_DEF 0 /* 20mA */
+
+/* XX_CORE_STAT_REG: XAUI XGXS core status register */
+/* XGXS all-lanes status fields */
+#define FRF_AB_XX_SYNC_STAT_LBN FRF_AB_XX_SYNC_STAT0_LBN
+#define FRF_AB_XX_SYNC_STAT_WIDTH 4
+#define FRF_AB_XX_COMMA_DET_LBN FRF_AB_XX_COMMA_DET_CH0_LBN
+#define FRF_AB_XX_COMMA_DET_WIDTH 4
+#define FRF_AB_XX_CHAR_ERR_LBN FRF_AB_XX_CHAR_ERR_CH0_LBN
+#define FRF_AB_XX_CHAR_ERR_WIDTH 4
+#define FRF_AB_XX_DISPERR_LBN FRF_AB_XX_DISPERR_CH0_LBN
+#define FRF_AB_XX_DISPERR_WIDTH 4
+#define FFE_AB_XX_STAT_ALL_LANES 0xf
+#define FRF_AB_XX_FORCE_SIG_LBN FRF_AB_XX_FORCE_SIG0_VAL_LBN
+#define FRF_AB_XX_FORCE_SIG_WIDTH 8
+#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff
+
+/* RX_MAC_FILTER_TBL0 */
+/* RMFT_DEST_MAC is wider than 32 bits */
+#define FRF_CZ_RMFT_DEST_MAC_LO_LBN 12
+#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32
+#define FRF_CZ_RMFT_DEST_MAC_HI_LBN 44
+#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH 16
+
+/* TX_MAC_FILTER_TBL0 */
+/* TMFT_SRC_MAC is wider than 32 bits */
+#define FRF_CZ_TMFT_SRC_MAC_LO_LBN 12
+#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32
+#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44
+#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16
+
+/* TX_PACE_TBL */
+/* Values >20 are documented as reserved, but will result in a queue going
+ * into the fast bin with a pace value of zero. */
+#define FFE_BZ_TX_PACE_OFF 0
+#define FFE_BZ_TX_PACE_RESERVED 21
+
+/* DRIVER_EV */
+/* Sub-fields of an RX flush completion event */
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12
+
+/* EVENT_ENTRY */
+/* Magic number field for event test */
+#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0
+#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32
+
+/**************************************************************************
+ *
+ * Falcon MAC stats
+ *
+ **************************************************************************
+ *
+ */
+
+#define GRxGoodOct_offset 0x0
+#define GRxGoodOct_WIDTH 48
+#define GRxBadOct_offset 0x8
+#define GRxBadOct_WIDTH 48
+#define GRxMissPkt_offset 0x10
+#define GRxMissPkt_WIDTH 32
+#define GRxFalseCRS_offset 0x14
+#define GRxFalseCRS_WIDTH 32
+#define GRxPausePkt_offset 0x18
+#define GRxPausePkt_WIDTH 32
+#define GRxBadPkt_offset 0x1C
+#define GRxBadPkt_WIDTH 32
+#define GRxUcastPkt_offset 0x20
+#define GRxUcastPkt_WIDTH 32
+#define GRxMcastPkt_offset 0x24
+#define GRxMcastPkt_WIDTH 32
+#define GRxBcastPkt_offset 0x28
+#define GRxBcastPkt_WIDTH 32
+#define GRxGoodLt64Pkt_offset 0x2C
+#define GRxGoodLt64Pkt_WIDTH 32
+#define GRxBadLt64Pkt_offset 0x30
+#define GRxBadLt64Pkt_WIDTH 32
+#define GRx64Pkt_offset 0x34
+#define GRx64Pkt_WIDTH 32
+#define GRx65to127Pkt_offset 0x38
+#define GRx65to127Pkt_WIDTH 32
+#define GRx128to255Pkt_offset 0x3C
+#define GRx128to255Pkt_WIDTH 32
+#define GRx256to511Pkt_offset 0x40
+#define GRx256to511Pkt_WIDTH 32
+#define GRx512to1023Pkt_offset 0x44
+#define GRx512to1023Pkt_WIDTH 32
+#define GRx1024to15xxPkt_offset 0x48
+#define GRx1024to15xxPkt_WIDTH 32
+#define GRx15xxtoJumboPkt_offset 0x4C
+#define GRx15xxtoJumboPkt_WIDTH 32
+#define GRxGtJumboPkt_offset 0x50
+#define GRxGtJumboPkt_WIDTH 32
+#define GRxFcsErr64to15xxPkt_offset 0x54
+#define GRxFcsErr64to15xxPkt_WIDTH 32
+#define GRxFcsErr15xxtoJumboPkt_offset 0x58
+#define GRxFcsErr15xxtoJumboPkt_WIDTH 32
+#define GRxFcsErrGtJumboPkt_offset 0x5C
+#define GRxFcsErrGtJumboPkt_WIDTH 32
+#define GTxGoodBadOct_offset 0x80
+#define GTxGoodBadOct_WIDTH 48
+#define GTxGoodOct_offset 0x88
+#define GTxGoodOct_WIDTH 48
+#define GTxSglColPkt_offset 0x90
+#define GTxSglColPkt_WIDTH 32
+#define GTxMultColPkt_offset 0x94
+#define GTxMultColPkt_WIDTH 32
+#define GTxExColPkt_offset 0x98
+#define GTxExColPkt_WIDTH 32
+#define GTxDefPkt_offset 0x9C
+#define GTxDefPkt_WIDTH 32
+#define GTxLateCol_offset 0xA0
+#define GTxLateCol_WIDTH 32
+#define GTxExDefPkt_offset 0xA4
+#define GTxExDefPkt_WIDTH 32
+#define GTxPausePkt_offset 0xA8
+#define GTxPausePkt_WIDTH 32
+#define GTxBadPkt_offset 0xAC
+#define GTxBadPkt_WIDTH 32
+#define GTxUcastPkt_offset 0xB0
+#define GTxUcastPkt_WIDTH 32
+#define GTxMcastPkt_offset 0xB4
+#define GTxMcastPkt_WIDTH 32
+#define GTxBcastPkt_offset 0xB8
+#define GTxBcastPkt_WIDTH 32
+#define GTxLt64Pkt_offset 0xBC
+#define GTxLt64Pkt_WIDTH 32
+#define GTx64Pkt_offset 0xC0
+#define GTx64Pkt_WIDTH 32
+#define GTx65to127Pkt_offset 0xC4
+#define GTx65to127Pkt_WIDTH 32
+#define GTx128to255Pkt_offset 0xC8
+#define GTx128to255Pkt_WIDTH 32
+#define GTx256to511Pkt_offset 0xCC
+#define GTx256to511Pkt_WIDTH 32
+#define GTx512to1023Pkt_offset 0xD0
+#define GTx512to1023Pkt_WIDTH 32
+#define GTx1024to15xxPkt_offset 0xD4
+#define GTx1024to15xxPkt_WIDTH 32
+#define GTx15xxtoJumboPkt_offset 0xD8
+#define GTx15xxtoJumboPkt_WIDTH 32
+#define GTxGtJumboPkt_offset 0xDC
+#define GTxGtJumboPkt_WIDTH 32
+#define GTxNonTcpUdpPkt_offset 0xE0
+#define GTxNonTcpUdpPkt_WIDTH 16
+#define GTxMacSrcErrPkt_offset 0xE4
+#define GTxMacSrcErrPkt_WIDTH 16
+#define GTxIpSrcErrPkt_offset 0xE8
+#define GTxIpSrcErrPkt_WIDTH 16
+#define GDmaDone_offset 0xEC
+#define GDmaDone_WIDTH 32
+
+#define XgRxOctets_offset 0x0
+#define XgRxOctets_WIDTH 48
+#define XgRxOctetsOK_offset 0x8
+#define XgRxOctetsOK_WIDTH 48
+#define XgRxPkts_offset 0x10
+#define XgRxPkts_WIDTH 32
+#define XgRxPktsOK_offset 0x14
+#define XgRxPktsOK_WIDTH 32
+#define XgRxBroadcastPkts_offset 0x18
+#define XgRxBroadcastPkts_WIDTH 32
+#define XgRxMulticastPkts_offset 0x1C
+#define XgRxMulticastPkts_WIDTH 32
+#define XgRxUnicastPkts_offset 0x20
+#define XgRxUnicastPkts_WIDTH 32
+#define XgRxUndersizePkts_offset 0x24
+#define XgRxUndersizePkts_WIDTH 32
+#define XgRxOversizePkts_offset 0x28
+#define XgRxOversizePkts_WIDTH 32
+#define XgRxJabberPkts_offset 0x2C
+#define XgRxJabberPkts_WIDTH 32
+#define XgRxUndersizeFCSerrorPkts_offset 0x30
+#define XgRxUndersizeFCSerrorPkts_WIDTH 32
+#define XgRxDropEvents_offset 0x34
+#define XgRxDropEvents_WIDTH 32
+#define XgRxFCSerrorPkts_offset 0x38
+#define XgRxFCSerrorPkts_WIDTH 32
+#define XgRxAlignError_offset 0x3C
+#define XgRxAlignError_WIDTH 32
+#define XgRxSymbolError_offset 0x40
+#define XgRxSymbolError_WIDTH 32
+#define XgRxInternalMACError_offset 0x44
+#define XgRxInternalMACError_WIDTH 32
+#define XgRxControlPkts_offset 0x48
+#define XgRxControlPkts_WIDTH 32
+#define XgRxPausePkts_offset 0x4C
+#define XgRxPausePkts_WIDTH 32
+#define XgRxPkts64Octets_offset 0x50
+#define XgRxPkts64Octets_WIDTH 32
+#define XgRxPkts65to127Octets_offset 0x54
+#define XgRxPkts65to127Octets_WIDTH 32
+#define XgRxPkts128to255Octets_offset 0x58
+#define XgRxPkts128to255Octets_WIDTH 32
+#define XgRxPkts256to511Octets_offset 0x5C
+#define XgRxPkts256to511Octets_WIDTH 32
+#define XgRxPkts512to1023Octets_offset 0x60
+#define XgRxPkts512to1023Octets_WIDTH 32
+#define XgRxPkts1024to15xxOctets_offset 0x64
+#define XgRxPkts1024to15xxOctets_WIDTH 32
+#define XgRxPkts15xxtoMaxOctets_offset 0x68
+#define XgRxPkts15xxtoMaxOctets_WIDTH 32
+#define XgRxLengthError_offset 0x6C
+#define XgRxLengthError_WIDTH 32
+#define XgTxPkts_offset 0x80
+#define XgTxPkts_WIDTH 32
+#define XgTxOctets_offset 0x88
+#define XgTxOctets_WIDTH 48
+#define XgTxMulticastPkts_offset 0x90
+#define XgTxMulticastPkts_WIDTH 32
+#define XgTxBroadcastPkts_offset 0x94
+#define XgTxBroadcastPkts_WIDTH 32
+#define XgTxUnicastPkts_offset 0x98
+#define XgTxUnicastPkts_WIDTH 32
+#define XgTxControlPkts_offset 0x9C
+#define XgTxControlPkts_WIDTH 32
+#define XgTxPausePkts_offset 0xA0
+#define XgTxPausePkts_WIDTH 32
+#define XgTxPkts64Octets_offset 0xA4
+#define XgTxPkts64Octets_WIDTH 32
+#define XgTxPkts65to127Octets_offset 0xA8
+#define XgTxPkts65to127Octets_WIDTH 32
+#define XgTxPkts128to255Octets_offset 0xAC
+#define XgTxPkts128to255Octets_WIDTH 32
+#define XgTxPkts256to511Octets_offset 0xB0
+#define XgTxPkts256to511Octets_WIDTH 32
+#define XgTxPkts512to1023Octets_offset 0xB4
+#define XgTxPkts512to1023Octets_WIDTH 32
+#define XgTxPkts1024to15xxOctets_offset 0xB8
+#define XgTxPkts1024to15xxOctets_WIDTH 32
+#define XgTxPkts1519toMaxOctets_offset 0xBC
+#define XgTxPkts1519toMaxOctets_WIDTH 32
+#define XgTxUndersizePkts_offset 0xC0
+#define XgTxUndersizePkts_WIDTH 32
+#define XgTxOversizePkts_offset 0xC4
+#define XgTxOversizePkts_WIDTH 32
+#define XgTxNonTcpUdpPkt_offset 0xC8
+#define XgTxNonTcpUdpPkt_WIDTH 16
+#define XgTxMacSrcErrPkt_offset 0xCC
+#define XgTxMacSrcErrPkt_WIDTH 16
+#define XgTxIpSrcErrPkt_offset 0xD0
+#define XgTxIpSrcErrPkt_WIDTH 16
+#define XgDmaDone_offset 0xD4
+#define XgDmaDone_WIDTH 32
+
+#define FALCON_STATS_NOT_DONE 0x00000000
+#define FALCON_STATS_DONE 0xffffffff
+
+/**************************************************************************
+ *
+ * Falcon non-volatile configuration
+ *
+ **************************************************************************
+ */
+
+/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
+struct falcon_nvconfig_board_v2 {
+ __le16 nports;
+ u8 port0_phy_addr;
+ u8 port0_phy_type;
+ u8 port1_phy_addr;
+ u8 port1_phy_type;
+ __le16 asic_sub_revision;
+ __le16 board_revision;
+} __packed;
+
+/* Board configuration v3 extra information */
+struct falcon_nvconfig_board_v3 {
+ __le32 spi_device_type[2];
+} __packed;
+
+/* Bit numbers for spi_device_type */
+#define SPI_DEV_TYPE_SIZE_LBN 0
+#define SPI_DEV_TYPE_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
+#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
+#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
+#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
+#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
+#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
+#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_FIELD(type, field) \
+ (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
+
+#define FALCON_NVCONFIG_OFFSET 0x300
+
+#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
+struct falcon_nvconfig {
+ efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
+ u8 mac_address[2][8]; /* 0x310 */
+ efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
+ efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
+ efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
+ efx_oword_t hw_init_reg; /* 0x350 */
+ efx_oword_t nic_stat_reg; /* 0x360 */
+ efx_oword_t glb_ctl_reg; /* 0x370 */
+ efx_oword_t srm_cfg_reg; /* 0x380 */
+ efx_oword_t spare_reg; /* 0x390 */
+ __le16 board_magic_num; /* 0x3A0 */
+ __le16 board_struct_ver;
+ __le16 board_checksum;
+ struct falcon_nvconfig_board_v2 board_v2;
+ efx_oword_t ee_base_page_reg; /* 0x3B0 */
+ struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
+} __packed;
+
+#endif /* EFX_REGS_H */
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
new file mode 100644
index 000000000000..91a6b7123539
--- /dev/null
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -0,0 +1,749 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/prefetch.h>
+#include <net/ip.h>
+#include <net/checksum.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "nic.h"
+#include "selftest.h"
+#include "workarounds.h"
+
+/* Number of RX descriptors pushed at once. */
+#define EFX_RX_BATCH 8
+
+/* Maximum size of a buffer sharing a page */
+#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
+
+/* Size of buffer allocated for skb header area. */
+#define EFX_SKB_HEADERS 64u
+
+/*
+ * rx_alloc_method - RX buffer allocation method
+ *
+ * This driver supports two methods for allocating and using RX buffers:
+ * each RX buffer may be backed by an skb or by an order-n page.
+ *
+ * When GRO is in use then the second method has a lower overhead,
+ * since we don't have to allocate then free skbs on reassembled frames.
+ *
+ * Values:
+ * - RX_ALLOC_METHOD_AUTO = 0
+ * - RX_ALLOC_METHOD_SKB = 1
+ * - RX_ALLOC_METHOD_PAGE = 2
+ *
+ * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
+ * controlled by the parameters below.
+ *
+ * - Since pushing and popping descriptors are separated by the rx_queue
+ * size, so the watermarks should be ~rxd_size.
+ * - The performance win by using page-based allocation for GRO is less
+ * than the performance hit of using page-based allocation of non-GRO,
+ * so the watermarks should reflect this.
+ *
+ * Per channel we maintain a single variable, updated by each channel:
+ *
+ * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
+ * RX_ALLOC_FACTOR_SKB)
+ * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
+ * limits the hysteresis), and update the allocation strategy:
+ *
+ * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
+ * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
+ */
+static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
+
+#define RX_ALLOC_LEVEL_GRO 0x2000
+#define RX_ALLOC_LEVEL_MAX 0x3000
+#define RX_ALLOC_FACTOR_GRO 1
+#define RX_ALLOC_FACTOR_SKB (-2)
+
+/* This is the percentage fill level below which new RX descriptors
+ * will be added to the RX descriptor ring.
+ */
+static unsigned int rx_refill_threshold = 90;
+
+/* This is the percentage fill level to which an RX queue will be refilled
+ * when the "RX refill threshold" is reached.
+ */
+static unsigned int rx_refill_limit = 95;
+
+/*
+ * RX maximum head room required.
+ *
+ * This must be at least 1 to prevent overflow and at least 2 to allow
+ * pipelined receives.
+ */
+#define EFX_RXD_HEAD_ROOM 2
+
+/* Offset of ethernet header within page */
+static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
+ struct efx_rx_buffer *buf)
+{
+ /* Offset is always within one page, so we don't need to consider
+ * the page order.
+ */
+ return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) +
+ efx->type->rx_buffer_hash_size);
+}
+static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
+{
+ return PAGE_SIZE << efx->rx_buffer_order;
+}
+
+static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
+{
+ if (buf->is_page)
+ return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
+ else
+ return ((u8 *)buf->u.skb->data +
+ efx->type->rx_buffer_hash_size);
+}
+
+static inline u32 efx_rx_buf_hash(const u8 *eh)
+{
+ /* The ethernet header is always directly after any hash. */
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
+ return __le32_to_cpup((const __le32 *)(eh - 4));
+#else
+ const u8 *data = eh - 4;
+ return ((u32)data[0] |
+ (u32)data[1] << 8 |
+ (u32)data[2] << 16 |
+ (u32)data[3] << 24);
+#endif
+}
+
+/**
+ * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
+ *
+ * @rx_queue: Efx RX queue
+ *
+ * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
+ * struct efx_rx_buffer for each one. Return a negative error code or 0
+ * on success. May fail having only inserted fewer than EFX_RX_BATCH
+ * buffers.
+ */
+static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_rx_buffer *rx_buf;
+ struct sk_buff *skb;
+ int skb_len = efx->rx_buffer_len;
+ unsigned index, count;
+
+ for (count = 0; count < EFX_RX_BATCH; ++count) {
+ index = rx_queue->added_count & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, index);
+
+ rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ /* Adjust the SKB for padding and checksum */
+ skb_reserve(skb, NET_IP_ALIGN);
+ rx_buf->len = skb_len - NET_IP_ALIGN;
+ rx_buf->is_page = false;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ rx_buf->dma_addr = pci_map_single(efx->pci_dev,
+ skb->data, rx_buf->len,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(efx->pci_dev,
+ rx_buf->dma_addr))) {
+ dev_kfree_skb_any(skb);
+ rx_buf->u.skb = NULL;
+ return -EIO;
+ }
+
+ ++rx_queue->added_count;
+ ++rx_queue->alloc_skb_count;
+ }
+
+ return 0;
+}
+
+/**
+ * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
+ *
+ * @rx_queue: Efx RX queue
+ *
+ * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
+ * and populates struct efx_rx_buffers for each one. Return a negative error
+ * code or 0 on success. If a single page can be split between two buffers,
+ * then the page will either be inserted fully, or not at at all.
+ */
+static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ struct efx_rx_buffer *rx_buf;
+ struct page *page;
+ void *page_addr;
+ struct efx_rx_page_state *state;
+ dma_addr_t dma_addr;
+ unsigned index, count;
+
+ /* We can split a page between two buffers */
+ BUILD_BUG_ON(EFX_RX_BATCH & 1);
+
+ for (count = 0; count < EFX_RX_BATCH; ++count) {
+ page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
+ efx->rx_buffer_order);
+ if (unlikely(page == NULL))
+ return -ENOMEM;
+ dma_addr = pci_map_page(efx->pci_dev, page, 0,
+ efx_rx_buf_size(efx),
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
+ __free_pages(page, efx->rx_buffer_order);
+ return -EIO;
+ }
+ page_addr = page_address(page);
+ state = page_addr;
+ state->refcnt = 0;
+ state->dma_addr = dma_addr;
+
+ page_addr += sizeof(struct efx_rx_page_state);
+ dma_addr += sizeof(struct efx_rx_page_state);
+
+ split:
+ index = rx_queue->added_count & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
+ rx_buf->u.page = page;
+ rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
+ rx_buf->is_page = true;
+ ++rx_queue->added_count;
+ ++rx_queue->alloc_page_count;
+ ++state->refcnt;
+
+ if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
+ /* Use the second half of the page */
+ get_page(page);
+ dma_addr += (PAGE_SIZE >> 1);
+ page_addr += (PAGE_SIZE >> 1);
+ ++count;
+ goto split;
+ }
+ }
+
+ return 0;
+}
+
+static void efx_unmap_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf)
+{
+ if (rx_buf->is_page && rx_buf->u.page) {
+ struct efx_rx_page_state *state;
+
+ state = page_address(rx_buf->u.page);
+ if (--state->refcnt == 0) {
+ pci_unmap_page(efx->pci_dev,
+ state->dma_addr,
+ efx_rx_buf_size(efx),
+ PCI_DMA_FROMDEVICE);
+ }
+ } else if (!rx_buf->is_page && rx_buf->u.skb) {
+ pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
+ rx_buf->len, PCI_DMA_FROMDEVICE);
+ }
+}
+
+static void efx_free_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf)
+{
+ if (rx_buf->is_page && rx_buf->u.page) {
+ __free_pages(rx_buf->u.page, efx->rx_buffer_order);
+ rx_buf->u.page = NULL;
+ } else if (!rx_buf->is_page && rx_buf->u.skb) {
+ dev_kfree_skb_any(rx_buf->u.skb);
+ rx_buf->u.skb = NULL;
+ }
+}
+
+static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
+{
+ efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
+ efx_free_rx_buffer(rx_queue->efx, rx_buf);
+}
+
+/* Attempt to resurrect the other receive buffer that used to share this page,
+ * which had previously been passed up to the kernel and freed. */
+static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
+{
+ struct efx_rx_page_state *state = page_address(rx_buf->u.page);
+ struct efx_rx_buffer *new_buf;
+ unsigned fill_level, index;
+
+ /* +1 because efx_rx_packet() incremented removed_count. +1 because
+ * we'd like to insert an additional descriptor whilst leaving
+ * EFX_RXD_HEAD_ROOM for the non-recycle path */
+ fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
+ if (unlikely(fill_level > rx_queue->max_fill)) {
+ /* We could place "state" on a list, and drain the list in
+ * efx_fast_push_rx_descriptors(). For now, this will do. */
+ return;
+ }
+
+ ++state->refcnt;
+ get_page(rx_buf->u.page);
+
+ index = rx_queue->added_count & rx_queue->ptr_mask;
+ new_buf = efx_rx_buffer(rx_queue, index);
+ new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
+ new_buf->u.page = rx_buf->u.page;
+ new_buf->len = rx_buf->len;
+ new_buf->is_page = true;
+ ++rx_queue->added_count;
+}
+
+/* Recycle the given rx buffer directly back into the rx_queue. There is
+ * always room to add this buffer, because we've just popped a buffer. */
+static void efx_recycle_rx_buffer(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+ struct efx_rx_buffer *new_buf;
+ unsigned index;
+
+ if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
+ page_count(rx_buf->u.page) == 1)
+ efx_resurrect_rx_buffer(rx_queue, rx_buf);
+
+ index = rx_queue->added_count & rx_queue->ptr_mask;
+ new_buf = efx_rx_buffer(rx_queue, index);
+
+ memcpy(new_buf, rx_buf, sizeof(*new_buf));
+ rx_buf->u.page = NULL;
+ ++rx_queue->added_count;
+}
+
+/**
+ * efx_fast_push_rx_descriptors - push new RX descriptors quickly
+ * @rx_queue: RX descriptor queue
+ * This will aim to fill the RX descriptor queue up to
+ * @rx_queue->@fast_fill_limit. If there is insufficient atomic
+ * memory to do so, a slow fill will be scheduled.
+ *
+ * The caller must provide serialisation (none is used here). In practise,
+ * this means this function must run from the NAPI handler, or be called
+ * when NAPI is disabled.
+ */
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
+{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ unsigned fill_level;
+ int space, rc = 0;
+
+ /* Calculate current fill level, and exit if we don't need to fill */
+ fill_level = (rx_queue->added_count - rx_queue->removed_count);
+ EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
+ if (fill_level >= rx_queue->fast_fill_trigger)
+ goto out;
+
+ /* Record minimum fill level */
+ if (unlikely(fill_level < rx_queue->min_fill)) {
+ if (fill_level)
+ rx_queue->min_fill = fill_level;
+ }
+
+ space = rx_queue->fast_fill_limit - fill_level;
+ if (space < EFX_RX_BATCH)
+ goto out;
+
+ netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+ "RX queue %d fast-filling descriptor ring from"
+ " level %d to level %d using %s allocation\n",
+ efx_rx_queue_index(rx_queue), fill_level,
+ rx_queue->fast_fill_limit,
+ channel->rx_alloc_push_pages ? "page" : "skb");
+
+ do {
+ if (channel->rx_alloc_push_pages)
+ rc = efx_init_rx_buffers_page(rx_queue);
+ else
+ rc = efx_init_rx_buffers_skb(rx_queue);
+ if (unlikely(rc)) {
+ /* Ensure that we don't leave the rx queue empty */
+ if (rx_queue->added_count == rx_queue->removed_count)
+ efx_schedule_slow_fill(rx_queue);
+ goto out;
+ }
+ } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
+
+ netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+ "RX queue %d fast-filled descriptor ring "
+ "to level %d\n", efx_rx_queue_index(rx_queue),
+ rx_queue->added_count - rx_queue->removed_count);
+
+ out:
+ if (rx_queue->notified_count != rx_queue->added_count)
+ efx_nic_notify_rx_desc(rx_queue);
+}
+
+void efx_rx_slow_fill(unsigned long context)
+{
+ struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+
+ /* Post an event to cause NAPI to run and refill the queue */
+ efx_nic_generate_fill_event(channel);
+ ++rx_queue->slow_fill_count;
+}
+
+static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf,
+ int len, bool *discard,
+ bool *leak_packet)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
+
+ if (likely(len <= max_len))
+ return;
+
+ /* The packet must be discarded, but this is only a fatal error
+ * if the caller indicated it was
+ */
+ *discard = true;
+
+ if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ " RX queue %d seriously overlength "
+ "RX event (0x%x > 0x%x+0x%x). Leaking\n",
+ efx_rx_queue_index(rx_queue), len, max_len,
+ efx->type->rx_buffer_padding);
+ /* If this buffer was skb-allocated, then the meta
+ * data at the end of the skb will be trashed. So
+ * we have no choice but to leak the fragment.
+ */
+ *leak_packet = !rx_buf->is_page;
+ efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
+ } else {
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ " RX queue %d overlength RX event "
+ "(0x%x > 0x%x)\n",
+ efx_rx_queue_index(rx_queue), len, max_len);
+ }
+
+ efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
+}
+
+/* Pass a received packet up through the generic GRO stack
+ *
+ * Handles driverlink veto, and passes the fragment up via
+ * the appropriate GRO method
+ */
+static void efx_rx_packet_gro(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ const u8 *eh, bool checksummed)
+{
+ struct napi_struct *napi = &channel->napi_str;
+ gro_result_t gro_result;
+
+ /* Pass the skb/page into the GRO engine */
+ if (rx_buf->is_page) {
+ struct efx_nic *efx = channel->efx;
+ struct page *page = rx_buf->u.page;
+ struct sk_buff *skb;
+
+ rx_buf->u.page = NULL;
+
+ skb = napi_get_frags(napi);
+ if (!skb) {
+ put_page(page);
+ return;
+ }
+
+ if (efx->net_dev->features & NETIF_F_RXHASH)
+ skb->rxhash = efx_rx_buf_hash(eh);
+
+ skb_frag_set_page(skb, 0, page);
+ skb_shinfo(skb)->frags[0].page_offset =
+ efx_rx_buf_offset(efx, rx_buf);
+ skb_shinfo(skb)->frags[0].size = rx_buf->len;
+ skb_shinfo(skb)->nr_frags = 1;
+
+ skb->len = rx_buf->len;
+ skb->data_len = rx_buf->len;
+ skb->truesize += rx_buf->len;
+ skb->ip_summed =
+ checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
+
+ skb_record_rx_queue(skb, channel->channel);
+
+ gro_result = napi_gro_frags(napi);
+ } else {
+ struct sk_buff *skb = rx_buf->u.skb;
+
+ EFX_BUG_ON_PARANOID(!checksummed);
+ rx_buf->u.skb = NULL;
+
+ gro_result = napi_gro_receive(napi, skb);
+ }
+
+ if (gro_result == GRO_NORMAL) {
+ channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
+ } else if (gro_result != GRO_DROP) {
+ channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO;
+ channel->irq_mod_score += 2;
+ }
+}
+
+void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+ unsigned int len, bool checksummed, bool discard)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ struct efx_rx_buffer *rx_buf;
+ bool leak_packet = false;
+
+ rx_buf = efx_rx_buffer(rx_queue, index);
+
+ /* This allows the refill path to post another buffer.
+ * EFX_RXD_HEAD_ROOM ensures that the slot we are using
+ * isn't overwritten yet.
+ */
+ rx_queue->removed_count++;
+
+ /* Validate the length encoded in the event vs the descriptor pushed */
+ efx_rx_packet__check_len(rx_queue, rx_buf, len,
+ &discard, &leak_packet);
+
+ netif_vdbg(efx, rx_status, efx->net_dev,
+ "RX queue %d received id %x at %llx+%x %s%s\n",
+ efx_rx_queue_index(rx_queue), index,
+ (unsigned long long)rx_buf->dma_addr, len,
+ (checksummed ? " [SUMMED]" : ""),
+ (discard ? " [DISCARD]" : ""));
+
+ /* Discard packet, if instructed to do so */
+ if (unlikely(discard)) {
+ if (unlikely(leak_packet))
+ channel->n_skbuff_leaks++;
+ else
+ efx_recycle_rx_buffer(channel, rx_buf);
+
+ /* Don't hold off the previous receive */
+ rx_buf = NULL;
+ goto out;
+ }
+
+ /* Release card resources - assumes all RX buffers consumed in-order
+ * per RX queue
+ */
+ efx_unmap_rx_buffer(efx, rx_buf);
+
+ /* Prefetch nice and early so data will (hopefully) be in cache by
+ * the time we look at it.
+ */
+ prefetch(efx_rx_buf_eh(efx, rx_buf));
+
+ /* Pipeline receives so that we give time for packet headers to be
+ * prefetched into cache.
+ */
+ rx_buf->len = len - efx->type->rx_buffer_hash_size;
+out:
+ if (channel->rx_pkt)
+ __efx_rx_packet(channel,
+ channel->rx_pkt, channel->rx_pkt_csummed);
+ channel->rx_pkt = rx_buf;
+ channel->rx_pkt_csummed = checksummed;
+}
+
+/* Handle a received packet. Second half: Touches packet payload. */
+void __efx_rx_packet(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf, bool checksummed)
+{
+ struct efx_nic *efx = channel->efx;
+ struct sk_buff *skb;
+ u8 *eh = efx_rx_buf_eh(efx, rx_buf);
+
+ /* If we're in loopback test, then pass the packet directly to the
+ * loopback layer, and free the rx_buf here
+ */
+ if (unlikely(efx->loopback_selftest)) {
+ efx_loopback_rx_packet(efx, eh, rx_buf->len);
+ efx_free_rx_buffer(efx, rx_buf);
+ return;
+ }
+
+ if (!rx_buf->is_page) {
+ skb = rx_buf->u.skb;
+
+ prefetch(skb_shinfo(skb));
+
+ skb_reserve(skb, efx->type->rx_buffer_hash_size);
+ skb_put(skb, rx_buf->len);
+
+ if (efx->net_dev->features & NETIF_F_RXHASH)
+ skb->rxhash = efx_rx_buf_hash(eh);
+
+ /* Move past the ethernet header. rx_buf->data still points
+ * at the ethernet header */
+ skb->protocol = eth_type_trans(skb, efx->net_dev);
+
+ skb_record_rx_queue(skb, channel->channel);
+ }
+
+ if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
+ checksummed = false;
+
+ if (likely(checksummed || rx_buf->is_page)) {
+ efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
+ return;
+ }
+
+ /* We now own the SKB */
+ skb = rx_buf->u.skb;
+ rx_buf->u.skb = NULL;
+
+ /* Set the SKB flags */
+ skb_checksum_none_assert(skb);
+
+ /* Pass the packet up */
+ netif_receive_skb(skb);
+
+ /* Update allocation strategy method */
+ channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
+}
+
+void efx_rx_strategy(struct efx_channel *channel)
+{
+ enum efx_rx_alloc_method method = rx_alloc_method;
+
+ /* Only makes sense to use page based allocation if GRO is enabled */
+ if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
+ method = RX_ALLOC_METHOD_SKB;
+ } else if (method == RX_ALLOC_METHOD_AUTO) {
+ /* Constrain the rx_alloc_level */
+ if (channel->rx_alloc_level < 0)
+ channel->rx_alloc_level = 0;
+ else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
+ channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
+
+ /* Decide on the allocation method */
+ method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ?
+ RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
+ }
+
+ /* Push the option */
+ channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
+}
+
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int entries;
+ int rc;
+
+ /* Create the smallest power-of-two aligned ring */
+ entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
+ EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+ rx_queue->ptr_mask = entries - 1;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "creating RX queue %d size %#x mask %#x\n",
+ efx_rx_queue_index(rx_queue), efx->rxq_entries,
+ rx_queue->ptr_mask);
+
+ /* Allocate RX buffers */
+ rx_queue->buffer = kzalloc(entries * sizeof(*rx_queue->buffer),
+ GFP_KERNEL);
+ if (!rx_queue->buffer)
+ return -ENOMEM;
+
+ rc = efx_nic_probe_rx(rx_queue);
+ if (rc) {
+ kfree(rx_queue->buffer);
+ rx_queue->buffer = NULL;
+ }
+ return rc;
+}
+
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int max_fill, trigger, limit;
+
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+ /* Initialise ptr fields */
+ rx_queue->added_count = 0;
+ rx_queue->notified_count = 0;
+ rx_queue->removed_count = 0;
+ rx_queue->min_fill = -1U;
+
+ /* Initialise limit fields */
+ max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
+ trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
+ limit = max_fill * min(rx_refill_limit, 100U) / 100U;
+
+ rx_queue->max_fill = max_fill;
+ rx_queue->fast_fill_trigger = trigger;
+ rx_queue->fast_fill_limit = limit;
+
+ /* Set up RX descriptor ring */
+ efx_nic_init_rx(rx_queue);
+}
+
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ int i;
+ struct efx_rx_buffer *rx_buf;
+
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+ del_timer_sync(&rx_queue->slow_fill);
+ efx_nic_fini_rx(rx_queue);
+
+ /* Release RX buffers NB start at index 0 not current HW ptr */
+ if (rx_queue->buffer) {
+ for (i = 0; i <= rx_queue->ptr_mask; i++) {
+ rx_buf = efx_rx_buffer(rx_queue, i);
+ efx_fini_rx_buffer(rx_queue, rx_buf);
+ }
+ }
+}
+
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+ efx_nic_remove_rx(rx_queue);
+
+ kfree(rx_queue->buffer);
+ rx_queue->buffer = NULL;
+}
+
+
+module_param(rx_alloc_method, int, 0644);
+MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
+
+module_param(rx_refill_threshold, uint, 0444);
+MODULE_PARM_DESC(rx_refill_threshold,
+ "RX descriptor ring fast/slow fill threshold (%)");
+
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
new file mode 100644
index 000000000000..822f6c2a6a7c
--- /dev/null
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -0,0 +1,761 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/kernel_stat.h>
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/udp.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "nic.h"
+#include "selftest.h"
+#include "workarounds.h"
+
+/*
+ * Loopback test packet structure
+ *
+ * The self-test should stress every RSS vector, and unfortunately
+ * Falcon only performs RSS on TCP/UDP packets.
+ */
+struct efx_loopback_payload {
+ struct ethhdr header;
+ struct iphdr ip;
+ struct udphdr udp;
+ __be16 iteration;
+ const char msg[64];
+} __packed;
+
+/* Loopback test source MAC address */
+static const unsigned char payload_source[ETH_ALEN] = {
+ 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
+};
+
+static const char payload_msg[] =
+ "Hello world! This is an Efx loopback test in progress!";
+
+/* Interrupt mode names */
+static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
+static const char *efx_interrupt_mode_names[] = {
+ [EFX_INT_MODE_MSIX] = "MSI-X",
+ [EFX_INT_MODE_MSI] = "MSI",
+ [EFX_INT_MODE_LEGACY] = "legacy",
+};
+#define INT_MODE(efx) \
+ STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
+
+/**
+ * efx_loopback_state - persistent state during a loopback selftest
+ * @flush: Drop all packets in efx_loopback_rx_packet
+ * @packet_count: Number of packets being used in this test
+ * @skbs: An array of skbs transmitted
+ * @offload_csum: Checksums are being offloaded
+ * @rx_good: RX good packet count
+ * @rx_bad: RX bad packet count
+ * @payload: Payload used in tests
+ */
+struct efx_loopback_state {
+ bool flush;
+ int packet_count;
+ struct sk_buff **skbs;
+ bool offload_csum;
+ atomic_t rx_good;
+ atomic_t rx_bad;
+ struct efx_loopback_payload payload;
+};
+
+/**************************************************************************
+ *
+ * MII, NVRAM and register tests
+ *
+ **************************************************************************/
+
+static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+ int rc = 0;
+
+ if (efx->phy_op->test_alive) {
+ rc = efx->phy_op->test_alive(efx);
+ tests->phy_alive = rc ? -1 : 1;
+ }
+
+ return rc;
+}
+
+static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+ int rc = 0;
+
+ if (efx->type->test_nvram) {
+ rc = efx->type->test_nvram(efx);
+ tests->nvram = rc ? -1 : 1;
+ }
+
+ return rc;
+}
+
+static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+ int rc = 0;
+
+ /* Test register access */
+ if (efx->type->test_registers) {
+ rc = efx->type->test_registers(efx);
+ tests->registers = rc ? -1 : 1;
+ }
+
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Interrupt and event queue testing
+ *
+ **************************************************************************/
+
+/* Test generation and receipt of interrupts */
+static int efx_test_interrupts(struct efx_nic *efx,
+ struct efx_self_tests *tests)
+{
+ netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
+ tests->interrupt = -1;
+
+ /* Reset interrupt flag */
+ efx->last_irq_cpu = -1;
+ smp_wmb();
+
+ efx_nic_generate_interrupt(efx);
+
+ /* Wait for arrival of test interrupt. */
+ netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
+ schedule_timeout_uninterruptible(HZ / 10);
+ if (efx->last_irq_cpu >= 0)
+ goto success;
+
+ netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
+ return -ETIMEDOUT;
+
+ success:
+ netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
+ INT_MODE(efx),
+ efx->last_irq_cpu);
+ tests->interrupt = 1;
+ return 0;
+}
+
+/* Test generation and receipt of interrupting events */
+static int efx_test_eventq_irq(struct efx_channel *channel,
+ struct efx_self_tests *tests)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned int read_ptr, count;
+
+ tests->eventq_dma[channel->channel] = -1;
+ tests->eventq_int[channel->channel] = -1;
+ tests->eventq_poll[channel->channel] = -1;
+
+ read_ptr = channel->eventq_read_ptr;
+ channel->efx->last_irq_cpu = -1;
+ smp_wmb();
+
+ efx_nic_generate_test_event(channel);
+
+ /* Wait for arrival of interrupt */
+ count = 0;
+ do {
+ schedule_timeout_uninterruptible(HZ / 100);
+
+ if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr)
+ goto eventq_ok;
+ } while (++count < 2);
+
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d timed out waiting for event queue\n",
+ channel->channel);
+
+ /* See if interrupt arrived */
+ if (channel->efx->last_irq_cpu >= 0) {
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d saw interrupt on CPU%d "
+ "during event queue test\n", channel->channel,
+ raw_smp_processor_id());
+ tests->eventq_int[channel->channel] = 1;
+ }
+
+ /* Check to see if event was received even if interrupt wasn't */
+ if (efx_nic_event_present(channel)) {
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d event was generated, but "
+ "failed to trigger an interrupt\n", channel->channel);
+ tests->eventq_dma[channel->channel] = 1;
+ }
+
+ return -ETIMEDOUT;
+ eventq_ok:
+ netif_dbg(efx, drv, efx->net_dev, "channel %d event queue passed\n",
+ channel->channel);
+ tests->eventq_dma[channel->channel] = 1;
+ tests->eventq_int[channel->channel] = 1;
+ tests->eventq_poll[channel->channel] = 1;
+ return 0;
+}
+
+static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
+ unsigned flags)
+{
+ int rc;
+
+ if (!efx->phy_op->run_tests)
+ return 0;
+
+ mutex_lock(&efx->mac_lock);
+ rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Loopback testing
+ * NB Only one loopback test can be executing concurrently.
+ *
+ **************************************************************************/
+
+/* Loopback test RX callback
+ * This is called for each received packet during loopback testing.
+ */
+void efx_loopback_rx_packet(struct efx_nic *efx,
+ const char *buf_ptr, int pkt_len)
+{
+ struct efx_loopback_state *state = efx->loopback_selftest;
+ struct efx_loopback_payload *received;
+ struct efx_loopback_payload *payload;
+
+ BUG_ON(!buf_ptr);
+
+ /* If we are just flushing, then drop the packet */
+ if ((state == NULL) || state->flush)
+ return;
+
+ payload = &state->payload;
+
+ received = (struct efx_loopback_payload *) buf_ptr;
+ received->ip.saddr = payload->ip.saddr;
+ if (state->offload_csum)
+ received->ip.check = payload->ip.check;
+
+ /* Check that header exists */
+ if (pkt_len < sizeof(received->header)) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw runt RX packet (length %d) in %s loopback "
+ "test\n", pkt_len, LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that the ethernet header exists */
+ if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw non-loopback RX packet in %s loopback test\n",
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check packet length */
+ if (pkt_len != sizeof(*payload)) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw incorrect RX packet length %d (wanted %d) in "
+ "%s loopback test\n", pkt_len, (int)sizeof(*payload),
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that IP header matches */
+ if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw corrupted IP header in %s loopback test\n",
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that msg and padding matches */
+ if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw corrupted RX packet in %s loopback test\n",
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that iteration matches */
+ if (received->iteration != payload->iteration) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw RX packet from iteration %d (wanted %d) in "
+ "%s loopback test\n", ntohs(received->iteration),
+ ntohs(payload->iteration), LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Increase correct RX count */
+ netif_vdbg(efx, drv, efx->net_dev,
+ "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
+
+ atomic_inc(&state->rx_good);
+ return;
+
+ err:
+#ifdef EFX_ENABLE_DEBUG
+ if (atomic_read(&state->rx_bad) == 0) {
+ netif_err(efx, drv, efx->net_dev, "received packet:\n");
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
+ buf_ptr, pkt_len, 0);
+ netif_err(efx, drv, efx->net_dev, "expected packet:\n");
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
+ &state->payload, sizeof(state->payload), 0);
+ }
+#endif
+ atomic_inc(&state->rx_bad);
+}
+
+/* Initialise an efx_selftest_state for a new iteration */
+static void efx_iterate_state(struct efx_nic *efx)
+{
+ struct efx_loopback_state *state = efx->loopback_selftest;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_loopback_payload *payload = &state->payload;
+
+ /* Initialise the layerII header */
+ memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN);
+ memcpy(&payload->header.h_source, &payload_source, ETH_ALEN);
+ payload->header.h_proto = htons(ETH_P_IP);
+
+ /* saddr set later and used as incrementing count */
+ payload->ip.daddr = htonl(INADDR_LOOPBACK);
+ payload->ip.ihl = 5;
+ payload->ip.check = htons(0xdead);
+ payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
+ payload->ip.version = IPVERSION;
+ payload->ip.protocol = IPPROTO_UDP;
+
+ /* Initialise udp header */
+ payload->udp.source = 0;
+ payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
+ sizeof(struct iphdr));
+ payload->udp.check = 0; /* checksum ignored */
+
+ /* Fill out payload */
+ payload->iteration = htons(ntohs(payload->iteration) + 1);
+ memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
+
+ /* Fill out remaining state members */
+ atomic_set(&state->rx_good, 0);
+ atomic_set(&state->rx_bad, 0);
+ smp_wmb();
+}
+
+static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_loopback_state *state = efx->loopback_selftest;
+ struct efx_loopback_payload *payload;
+ struct sk_buff *skb;
+ int i;
+ netdev_tx_t rc;
+
+ /* Transmit N copies of buffer */
+ for (i = 0; i < state->packet_count; i++) {
+ /* Allocate an skb, holding an extra reference for
+ * transmit completion counting */
+ skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ state->skbs[i] = skb;
+ skb_get(skb);
+
+ /* Copy the payload in, incrementing the source address to
+ * exercise the rss vectors */
+ payload = ((struct efx_loopback_payload *)
+ skb_put(skb, sizeof(state->payload)));
+ memcpy(payload, &state->payload, sizeof(state->payload));
+ payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
+
+ /* Ensure everything we've written is visible to the
+ * interrupt handler. */
+ smp_wmb();
+
+ if (efx_dev_registered(efx))
+ netif_tx_lock_bh(efx->net_dev);
+ rc = efx_enqueue_skb(tx_queue, skb);
+ if (efx_dev_registered(efx))
+ netif_tx_unlock_bh(efx->net_dev);
+
+ if (rc != NETDEV_TX_OK) {
+ netif_err(efx, drv, efx->net_dev,
+ "TX queue %d could not transmit packet %d of "
+ "%d in %s loopback test\n", tx_queue->queue,
+ i + 1, state->packet_count,
+ LOOPBACK_MODE(efx));
+
+ /* Defer cleaning up the other skbs for the caller */
+ kfree_skb(skb);
+ return -EPIPE;
+ }
+ }
+
+ return 0;
+}
+
+static int efx_poll_loopback(struct efx_nic *efx)
+{
+ struct efx_loopback_state *state = efx->loopback_selftest;
+ struct efx_channel *channel;
+
+ /* NAPI polling is not enabled, so process channels
+ * synchronously */
+ efx_for_each_channel(channel, efx) {
+ if (channel->work_pending)
+ efx_process_channel_now(channel);
+ }
+ return atomic_read(&state->rx_good) == state->packet_count;
+}
+
+static int efx_end_loopback(struct efx_tx_queue *tx_queue,
+ struct efx_loopback_self_tests *lb_tests)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_loopback_state *state = efx->loopback_selftest;
+ struct sk_buff *skb;
+ int tx_done = 0, rx_good, rx_bad;
+ int i, rc = 0;
+
+ if (efx_dev_registered(efx))
+ netif_tx_lock_bh(efx->net_dev);
+
+ /* Count the number of tx completions, and decrement the refcnt. Any
+ * skbs not already completed will be free'd when the queue is flushed */
+ for (i=0; i < state->packet_count; i++) {
+ skb = state->skbs[i];
+ if (skb && !skb_shared(skb))
+ ++tx_done;
+ dev_kfree_skb_any(skb);
+ }
+
+ if (efx_dev_registered(efx))
+ netif_tx_unlock_bh(efx->net_dev);
+
+ /* Check TX completion and received packet counts */
+ rx_good = atomic_read(&state->rx_good);
+ rx_bad = atomic_read(&state->rx_bad);
+ if (tx_done != state->packet_count) {
+ /* Don't free the skbs; they will be picked up on TX
+ * overflow or channel teardown.
+ */
+ netif_err(efx, drv, efx->net_dev,
+ "TX queue %d saw only %d out of an expected %d "
+ "TX completion events in %s loopback test\n",
+ tx_queue->queue, tx_done, state->packet_count,
+ LOOPBACK_MODE(efx));
+ rc = -ETIMEDOUT;
+ /* Allow to fall through so we see the RX errors as well */
+ }
+
+ /* We may always be up to a flush away from our desired packet total */
+ if (rx_good != state->packet_count) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "TX queue %d saw only %d out of an expected %d "
+ "received packets in %s loopback test\n",
+ tx_queue->queue, rx_good, state->packet_count,
+ LOOPBACK_MODE(efx));
+ rc = -ETIMEDOUT;
+ /* Fall through */
+ }
+
+ /* Update loopback test structure */
+ lb_tests->tx_sent[tx_queue->queue] += state->packet_count;
+ lb_tests->tx_done[tx_queue->queue] += tx_done;
+ lb_tests->rx_good += rx_good;
+ lb_tests->rx_bad += rx_bad;
+
+ return rc;
+}
+
+static int
+efx_test_loopback(struct efx_tx_queue *tx_queue,
+ struct efx_loopback_self_tests *lb_tests)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_loopback_state *state = efx->loopback_selftest;
+ int i, begin_rc, end_rc;
+
+ for (i = 0; i < 3; i++) {
+ /* Determine how many packets to send */
+ state->packet_count = efx->txq_entries / 3;
+ state->packet_count = min(1 << (i << 2), state->packet_count);
+ state->skbs = kzalloc(sizeof(state->skbs[0]) *
+ state->packet_count, GFP_KERNEL);
+ if (!state->skbs)
+ return -ENOMEM;
+ state->flush = false;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "TX queue %d testing %s loopback with %d packets\n",
+ tx_queue->queue, LOOPBACK_MODE(efx),
+ state->packet_count);
+
+ efx_iterate_state(efx);
+ begin_rc = efx_begin_loopback(tx_queue);
+
+ /* This will normally complete very quickly, but be
+ * prepared to wait up to 100 ms. */
+ msleep(1);
+ if (!efx_poll_loopback(efx)) {
+ msleep(100);
+ efx_poll_loopback(efx);
+ }
+
+ end_rc = efx_end_loopback(tx_queue, lb_tests);
+ kfree(state->skbs);
+
+ if (begin_rc || end_rc) {
+ /* Wait a while to ensure there are no packets
+ * floating around after a failure. */
+ schedule_timeout_uninterruptible(HZ / 10);
+ return begin_rc ? begin_rc : end_rc;
+ }
+ }
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "TX queue %d passed %s loopback test with a burst length "
+ "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
+ state->packet_count);
+
+ return 0;
+}
+
+/* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but
+ * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it
+ * to delay and retry. Therefore, it's safer to just poll directly. Wait
+ * for link up and any faults to dissipate. */
+static int efx_wait_for_link(struct efx_nic *efx)
+{
+ struct efx_link_state *link_state = &efx->link_state;
+ int count, link_up_count = 0;
+ bool link_up;
+
+ for (count = 0; count < 40; count++) {
+ schedule_timeout_uninterruptible(HZ / 10);
+
+ if (efx->type->monitor != NULL) {
+ mutex_lock(&efx->mac_lock);
+ efx->type->monitor(efx);
+ mutex_unlock(&efx->mac_lock);
+ } else {
+ struct efx_channel *channel = efx_get_channel(efx, 0);
+ if (channel->work_pending)
+ efx_process_channel_now(channel);
+ }
+
+ mutex_lock(&efx->mac_lock);
+ link_up = link_state->up;
+ if (link_up)
+ link_up = !efx->mac_op->check_fault(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ if (link_up) {
+ if (++link_up_count == 2)
+ return 0;
+ } else {
+ link_up_count = 0;
+ }
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
+ unsigned int loopback_modes)
+{
+ enum efx_loopback_mode mode;
+ struct efx_loopback_state *state;
+ struct efx_channel *channel = efx_get_channel(efx, 0);
+ struct efx_tx_queue *tx_queue;
+ int rc = 0;
+
+ /* Set the port loopback_selftest member. From this point on
+ * all received packets will be dropped. Mark the state as
+ * "flushing" so all inflight packets are dropped */
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state == NULL)
+ return -ENOMEM;
+ BUG_ON(efx->loopback_selftest);
+ state->flush = true;
+ efx->loopback_selftest = state;
+
+ /* Test all supported loopback modes */
+ for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
+ if (!(loopback_modes & (1 << mode)))
+ continue;
+
+ /* Move the port into the specified loopback mode. */
+ state->flush = true;
+ mutex_lock(&efx->mac_lock);
+ efx->loopback_mode = mode;
+ rc = __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "unable to move into %s loopback\n",
+ LOOPBACK_MODE(efx));
+ goto out;
+ }
+
+ rc = efx_wait_for_link(efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "loopback %s never came up\n",
+ LOOPBACK_MODE(efx));
+ goto out;
+ }
+
+ /* Test all enabled types of TX queue */
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ state->offload_csum = (tx_queue->queue &
+ EFX_TXQ_TYPE_OFFLOAD);
+ rc = efx_test_loopback(tx_queue,
+ &tests->loopback[mode]);
+ if (rc)
+ goto out;
+ }
+ }
+
+ out:
+ /* Remove the flush. The caller will remove the loopback setting */
+ state->flush = true;
+ efx->loopback_selftest = NULL;
+ wmb();
+ kfree(state);
+
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Entry point
+ *
+ *************************************************************************/
+
+int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
+ unsigned flags)
+{
+ enum efx_loopback_mode loopback_mode = efx->loopback_mode;
+ int phy_mode = efx->phy_mode;
+ enum reset_type reset_method = RESET_TYPE_INVISIBLE;
+ struct efx_channel *channel;
+ int rc_test = 0, rc_reset = 0, rc;
+
+ /* Online (i.e. non-disruptive) testing
+ * This checks interrupt generation, event delivery and PHY presence. */
+
+ rc = efx_test_phy_alive(efx, tests);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ rc = efx_test_nvram(efx, tests);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ rc = efx_test_interrupts(efx, tests);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ efx_for_each_channel(channel, efx) {
+ rc = efx_test_eventq_irq(channel, tests);
+ if (rc && !rc_test)
+ rc_test = rc;
+ }
+
+ if (rc_test)
+ return rc_test;
+
+ if (!(flags & ETH_TEST_FL_OFFLINE))
+ return efx_test_phy(efx, tests, flags);
+
+ /* Offline (i.e. disruptive) testing
+ * This checks MAC and PHY loopback on the specified port. */
+
+ /* Detach the device so the kernel doesn't transmit during the
+ * loopback test and the watchdog timeout doesn't fire.
+ */
+ netif_device_detach(efx->net_dev);
+
+ mutex_lock(&efx->mac_lock);
+ if (efx->loopback_modes) {
+ /* We need the 312 clock from the PHY to test the XMAC
+ * registers, so move into XGMII loopback if available */
+ if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
+ efx->loopback_mode = LOOPBACK_XGMII;
+ else
+ efx->loopback_mode = __ffs(efx->loopback_modes);
+ }
+
+ __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ /* free up all consumers of SRAM (including all the queues) */
+ efx_reset_down(efx, reset_method);
+
+ rc = efx_test_chip(efx, tests);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ /* reset the chip to recover from the register test */
+ rc_reset = efx->type->reset(efx, reset_method);
+
+ /* Ensure that the phy is powered and out of loopback
+ * for the bist and loopback tests */
+ efx->phy_mode &= ~PHY_MODE_LOW_POWER;
+ efx->loopback_mode = LOOPBACK_NONE;
+
+ rc = efx_reset_up(efx, reset_method, rc_reset == 0);
+ if (rc && !rc_reset)
+ rc_reset = rc;
+
+ if (rc_reset) {
+ netif_err(efx, drv, efx->net_dev,
+ "Unable to recover from chip test\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ return rc_reset;
+ }
+
+ rc = efx_test_phy(efx, tests, flags);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ rc = efx_test_loopbacks(efx, tests, efx->loopback_modes);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ /* restore the PHY to the previous state */
+ mutex_lock(&efx->mac_lock);
+ efx->phy_mode = phy_mode;
+ efx->loopback_mode = loopback_mode;
+ __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ netif_device_attach(efx->net_dev);
+
+ return rc_test;
+}
+
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
new file mode 100644
index 000000000000..dba5456e70f3
--- /dev/null
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -0,0 +1,53 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_SELFTEST_H
+#define EFX_SELFTEST_H
+
+#include "net_driver.h"
+
+/*
+ * Self tests
+ */
+
+struct efx_loopback_self_tests {
+ int tx_sent[EFX_TXQ_TYPES];
+ int tx_done[EFX_TXQ_TYPES];
+ int rx_good;
+ int rx_bad;
+};
+
+#define EFX_MAX_PHY_TESTS 20
+
+/* Efx self test results
+ * For fields which are not counters, 1 indicates success and -1
+ * indicates failure.
+ */
+struct efx_self_tests {
+ /* online tests */
+ int phy_alive;
+ int nvram;
+ int interrupt;
+ int eventq_dma[EFX_MAX_CHANNELS];
+ int eventq_int[EFX_MAX_CHANNELS];
+ int eventq_poll[EFX_MAX_CHANNELS];
+ /* offline tests */
+ int registers;
+ int phy_ext[EFX_MAX_PHY_TESTS];
+ struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
+};
+
+extern void efx_loopback_rx_packet(struct efx_nic *efx,
+ const char *buf_ptr, int pkt_len);
+extern int efx_selftest(struct efx_nic *efx,
+ struct efx_self_tests *tests,
+ unsigned flags);
+
+#endif /* EFX_SELFTEST_H */
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
new file mode 100644
index 000000000000..cc2549cb7076
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -0,0 +1,661 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include "net_driver.h"
+#include "bitfield.h"
+#include "efx.h"
+#include "nic.h"
+#include "mac.h"
+#include "spi.h"
+#include "regs.h"
+#include "io.h"
+#include "phy.h"
+#include "workarounds.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+
+/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
+
+static void siena_init_wol(struct efx_nic *efx);
+
+
+static void siena_push_irq_moderation(struct efx_channel *channel)
+{
+ efx_dword_t timer_cmd;
+
+ BUILD_BUG_ON(EFX_IRQ_MOD_MAX > (1 << FRF_CZ_TC_TIMER_VAL_WIDTH));
+
+ if (channel->irq_moderation)
+ EFX_POPULATE_DWORD_2(timer_cmd,
+ FRF_CZ_TC_TIMER_MODE,
+ FFE_CZ_TIMER_MODE_INT_HLDOFF,
+ FRF_CZ_TC_TIMER_VAL,
+ channel->irq_moderation - 1);
+ else
+ EFX_POPULATE_DWORD_2(timer_cmd,
+ FRF_CZ_TC_TIMER_MODE,
+ FFE_CZ_TIMER_MODE_DIS,
+ FRF_CZ_TC_TIMER_VAL, 0);
+ efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
+ channel->channel);
+}
+
+static void siena_push_multicast_hash(struct efx_nic *efx)
+{
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
+ efx->multicast_hash.byte, sizeof(efx->multicast_hash),
+ NULL, 0, NULL);
+}
+
+static int siena_mdio_write(struct net_device *net_dev,
+ int prtad, int devad, u16 addr, u16 value)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ uint32_t status;
+ int rc;
+
+ rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad,
+ addr, value, &status);
+ if (rc)
+ return rc;
+ if (status != MC_CMD_MDIO_STATUS_GOOD)
+ return -EIO;
+
+ return 0;
+}
+
+static int siena_mdio_read(struct net_device *net_dev,
+ int prtad, int devad, u16 addr)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ uint16_t value;
+ uint32_t status;
+ int rc;
+
+ rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad,
+ addr, &value, &status);
+ if (rc)
+ return rc;
+ if (status != MC_CMD_MDIO_STATUS_GOOD)
+ return -EIO;
+
+ return (int)value;
+}
+
+/* This call is responsible for hooking in the MAC and PHY operations */
+static int siena_probe_port(struct efx_nic *efx)
+{
+ int rc;
+
+ /* Hook in PHY operations table */
+ efx->phy_op = &efx_mcdi_phy_ops;
+
+ /* Set up MDIO structure for PHY */
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ efx->mdio.mdio_read = siena_mdio_read;
+ efx->mdio.mdio_write = siena_mdio_write;
+
+ /* Fill out MDIO structure, loopback modes, and initial link state */
+ rc = efx->phy_op->probe(efx);
+ if (rc != 0)
+ return rc;
+
+ /* Allocate buffer for stats */
+ rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
+ MC_CMD_MAC_NSTATS * sizeof(u64));
+ if (rc)
+ return rc;
+ netif_dbg(efx, probe, efx->net_dev,
+ "stats buffer at %llx (virt %p phys %llx)\n",
+ (u64)efx->stats_buffer.dma_addr,
+ efx->stats_buffer.addr,
+ (u64)virt_to_phys(efx->stats_buffer.addr));
+
+ efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
+
+ return 0;
+}
+
+static void siena_remove_port(struct efx_nic *efx)
+{
+ efx->phy_op->remove(efx);
+ efx_nic_free_buffer(efx, &efx->stats_buffer);
+}
+
+static const struct efx_nic_register_test siena_register_tests[] = {
+ { FR_AZ_ADR_REGION,
+ EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
+ { FR_CZ_USR_EV_CFG,
+ EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AZ_RX_CFG,
+ EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) },
+ { FR_AZ_TX_CFG,
+ EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) },
+ { FR_AZ_TX_RESERVED,
+ EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
+ { FR_AZ_SRM_TX_DC_CFG,
+ EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AZ_RX_DC_CFG,
+ EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AZ_RX_DC_PF_WM,
+ EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_BZ_DP_CTRL,
+ EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_BZ_RX_RSS_TKEY,
+ EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
+ { FR_CZ_RX_RSS_IPV6_REG1,
+ EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
+ { FR_CZ_RX_RSS_IPV6_REG2,
+ EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
+ { FR_CZ_RX_RSS_IPV6_REG3,
+ EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) },
+};
+
+static int siena_test_registers(struct efx_nic *efx)
+{
+ return efx_nic_test_registers(efx, siena_register_tests,
+ ARRAY_SIZE(siena_register_tests));
+}
+
+/**************************************************************************
+ *
+ * Device reset
+ *
+ **************************************************************************
+ */
+
+static enum reset_type siena_map_reset_reason(enum reset_type reason)
+{
+ return RESET_TYPE_ALL;
+}
+
+static int siena_map_reset_flags(u32 *flags)
+{
+ enum {
+ SIENA_RESET_PORT = (ETH_RESET_DMA | ETH_RESET_FILTER |
+ ETH_RESET_OFFLOAD | ETH_RESET_MAC |
+ ETH_RESET_PHY),
+ SIENA_RESET_MC = (SIENA_RESET_PORT |
+ ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT),
+ };
+
+ if ((*flags & SIENA_RESET_MC) == SIENA_RESET_MC) {
+ *flags &= ~SIENA_RESET_MC;
+ return RESET_TYPE_WORLD;
+ }
+
+ if ((*flags & SIENA_RESET_PORT) == SIENA_RESET_PORT) {
+ *flags &= ~SIENA_RESET_PORT;
+ return RESET_TYPE_ALL;
+ }
+
+ /* no invisible reset implemented */
+
+ return -EINVAL;
+}
+
+static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
+{
+ int rc;
+
+ /* Recover from a failed assertion pre-reset */
+ rc = efx_mcdi_handle_assertion(efx);
+ if (rc)
+ return rc;
+
+ if (method == RESET_TYPE_WORLD)
+ return efx_mcdi_reset_mc(efx);
+ else
+ return efx_mcdi_reset_port(efx);
+}
+
+static int siena_probe_nvconfig(struct efx_nic *efx)
+{
+ return efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL);
+}
+
+static int siena_probe_nic(struct efx_nic *efx)
+{
+ struct siena_nic_data *nic_data;
+ bool already_attached = 0;
+ efx_oword_t reg;
+ int rc;
+
+ /* Allocate storage for hardware specific data */
+ nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL);
+ if (!nic_data)
+ return -ENOMEM;
+ efx->nic_data = nic_data;
+
+ if (efx_nic_fpga_ver(efx) != 0) {
+ netif_err(efx, probe, efx->net_dev,
+ "Siena FPGA not supported\n");
+ rc = -ENODEV;
+ goto fail1;
+ }
+
+ efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
+ efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
+
+ efx_mcdi_init(efx);
+
+ /* Recover from a failed assertion before probing */
+ rc = efx_mcdi_handle_assertion(efx);
+ if (rc)
+ goto fail1;
+
+ /* Let the BMC know that the driver is now in charge of link and
+ * filter settings. We must do this before we reset the NIC */
+ rc = efx_mcdi_drv_attach(efx, true, &already_attached);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Unable to register driver with MCPU\n");
+ goto fail2;
+ }
+ if (already_attached)
+ /* Not a fatal error */
+ netif_err(efx, probe, efx->net_dev,
+ "Host already registered with MCPU\n");
+
+ /* Now we can reset the NIC */
+ rc = siena_reset_hw(efx, RESET_TYPE_ALL);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
+ goto fail3;
+ }
+
+ siena_init_wol(efx);
+
+ /* Allocate memory for INT_KER */
+ rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
+ if (rc)
+ goto fail4;
+ BUG_ON(efx->irq_status.dma_addr & 0x0f);
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "INT_KER at %llx (virt %p phys %llx)\n",
+ (unsigned long long)efx->irq_status.dma_addr,
+ efx->irq_status.addr,
+ (unsigned long long)virt_to_phys(efx->irq_status.addr));
+
+ /* Read in the non-volatile configuration */
+ rc = siena_probe_nvconfig(efx);
+ if (rc == -EINVAL) {
+ netif_err(efx, probe, efx->net_dev,
+ "NVRAM is invalid therefore using defaults\n");
+ efx->phy_type = PHY_TYPE_NONE;
+ efx->mdio.prtad = MDIO_PRTAD_NONE;
+ } else if (rc) {
+ goto fail5;
+ }
+
+ return 0;
+
+fail5:
+ efx_nic_free_buffer(efx, &efx->irq_status);
+fail4:
+fail3:
+ efx_mcdi_drv_attach(efx, false, NULL);
+fail2:
+fail1:
+ kfree(efx->nic_data);
+ return rc;
+}
+
+/* This call performs hardware-specific global initialisation, such as
+ * defining the descriptor cache sizes and number of RSS channels.
+ * It does not set up any buffers, descriptor rings or event queues.
+ */
+static int siena_init_nic(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+ int rc;
+
+ /* Recover from a failed assertion post-reset */
+ rc = efx_mcdi_handle_assertion(efx);
+ if (rc)
+ return rc;
+
+ /* Squash TX of packets of 16 bytes or less */
+ efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+ efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
+
+ /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
+ * descriptors (which is bad).
+ */
+ efx_reado(efx, &temp, FR_AZ_TX_CFG);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
+ EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1);
+ efx_writeo(efx, &temp, FR_AZ_TX_CFG);
+
+ efx_reado(efx, &temp, FR_AZ_RX_CFG);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
+ /* Enable hash insertion. This is broken for the 'Falcon' hash
+ * if IPv6 hashing is also enabled, so also select Toeplitz
+ * TCP/IPv4 and IPv4 hashes. */
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1);
+ efx_writeo(efx, &temp, FR_AZ_RX_CFG);
+
+ /* Set hash key for IPv4 */
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+
+ /* Enable IPv6 RSS */
+ BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
+ 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
+ memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
+ EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
+ FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
+ memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+
+ /* Enable event logging */
+ rc = efx_mcdi_log_ctrl(efx, true, false, 0);
+ if (rc)
+ return rc;
+
+ /* Set destination of both TX and RX Flush events */
+ EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
+ efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
+
+ EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1);
+ efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG);
+
+ efx_nic_init_common(efx);
+ return 0;
+}
+
+static void siena_remove_nic(struct efx_nic *efx)
+{
+ efx_nic_free_buffer(efx, &efx->irq_status);
+
+ siena_reset_hw(efx, RESET_TYPE_ALL);
+
+ /* Relinquish the device back to the BMC */
+ if (efx_nic_has_mc(efx))
+ efx_mcdi_drv_attach(efx, false, NULL);
+
+ /* Tear down the private nic state */
+ kfree(efx->nic_data);
+ efx->nic_data = NULL;
+}
+
+#define STATS_GENERATION_INVALID ((__force __le64)(-1))
+
+static int siena_try_update_nic_stats(struct efx_nic *efx)
+{
+ __le64 *dma_stats;
+ struct efx_mac_stats *mac_stats;
+ __le64 generation_start, generation_end;
+
+ mac_stats = &efx->mac_stats;
+ dma_stats = efx->stats_buffer.addr;
+
+ generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ if (generation_end == STATS_GENERATION_INVALID)
+ return 0;
+ rmb();
+
+#define MAC_STAT(M, D) \
+ mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D])
+
+ MAC_STAT(tx_bytes, TX_BYTES);
+ MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
+ mac_stats->tx_good_bytes = (mac_stats->tx_bytes -
+ mac_stats->tx_bad_bytes);
+ MAC_STAT(tx_packets, TX_PKTS);
+ MAC_STAT(tx_bad, TX_BAD_FCS_PKTS);
+ MAC_STAT(tx_pause, TX_PAUSE_PKTS);
+ MAC_STAT(tx_control, TX_CONTROL_PKTS);
+ MAC_STAT(tx_unicast, TX_UNICAST_PKTS);
+ MAC_STAT(tx_multicast, TX_MULTICAST_PKTS);
+ MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS);
+ MAC_STAT(tx_lt64, TX_LT64_PKTS);
+ MAC_STAT(tx_64, TX_64_PKTS);
+ MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS);
+ MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS);
+ MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS);
+ MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS);
+ MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS);
+ MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS);
+ MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS);
+ mac_stats->tx_collision = 0;
+ MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS);
+ MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS);
+ MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS);
+ MAC_STAT(tx_deferred, TX_DEFERRED_PKTS);
+ MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS);
+ mac_stats->tx_collision = (mac_stats->tx_single_collision +
+ mac_stats->tx_multiple_collision +
+ mac_stats->tx_excessive_collision +
+ mac_stats->tx_late_collision);
+ MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS);
+ MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS);
+ MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS);
+ MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS);
+ MAC_STAT(rx_bytes, RX_BYTES);
+ MAC_STAT(rx_bad_bytes, RX_BAD_BYTES);
+ mac_stats->rx_good_bytes = (mac_stats->rx_bytes -
+ mac_stats->rx_bad_bytes);
+ MAC_STAT(rx_packets, RX_PKTS);
+ MAC_STAT(rx_good, RX_GOOD_PKTS);
+ MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
+ MAC_STAT(rx_pause, RX_PAUSE_PKTS);
+ MAC_STAT(rx_control, RX_CONTROL_PKTS);
+ MAC_STAT(rx_unicast, RX_UNICAST_PKTS);
+ MAC_STAT(rx_multicast, RX_MULTICAST_PKTS);
+ MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS);
+ MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS);
+ MAC_STAT(rx_64, RX_64_PKTS);
+ MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS);
+ MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS);
+ MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS);
+ MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS);
+ MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS);
+ MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS);
+ MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS);
+ mac_stats->rx_bad_lt64 = 0;
+ mac_stats->rx_bad_64_to_15xx = 0;
+ mac_stats->rx_bad_15xx_to_jumbo = 0;
+ MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS);
+ MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS);
+ mac_stats->rx_missed = 0;
+ MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS);
+ MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS);
+ MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS);
+ MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS);
+ MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS);
+ mac_stats->rx_good_lt64 = 0;
+
+ efx->n_rx_nodesc_drop_cnt =
+ le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]);
+
+#undef MAC_STAT
+
+ rmb();
+ generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
+ if (generation_end != generation_start)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static void siena_update_nic_stats(struct efx_nic *efx)
+{
+ int retry;
+
+ /* If we're unlucky enough to read statistics wduring the DMA, wait
+ * up to 10ms for it to finish (typically takes <500us) */
+ for (retry = 0; retry < 100; ++retry) {
+ if (siena_try_update_nic_stats(efx) == 0)
+ return;
+ udelay(100);
+ }
+
+ /* Use the old values instead */
+}
+
+static void siena_start_nic_stats(struct efx_nic *efx)
+{
+ __le64 *dma_stats = efx->stats_buffer.addr;
+
+ dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID;
+
+ efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
+ MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
+}
+
+static void siena_stop_nic_stats(struct efx_nic *efx)
+{
+ efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
+}
+
+/**************************************************************************
+ *
+ * Wake on LAN
+ *
+ **************************************************************************
+ */
+
+static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+
+ wol->supported = WAKE_MAGIC;
+ if (nic_data->wol_filter_id != -1)
+ wol->wolopts = WAKE_MAGIC;
+ else
+ wol->wolopts = 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+
+static int siena_set_wol(struct efx_nic *efx, u32 type)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ if (type & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ if (type & WAKE_MAGIC) {
+ if (nic_data->wol_filter_id != -1)
+ efx_mcdi_wol_filter_remove(efx,
+ nic_data->wol_filter_id);
+ rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr,
+ &nic_data->wol_filter_id);
+ if (rc)
+ goto fail;
+
+ pci_wake_from_d3(efx->pci_dev, true);
+ } else {
+ rc = efx_mcdi_wol_filter_reset(efx);
+ nic_data->wol_filter_id = -1;
+ pci_wake_from_d3(efx->pci_dev, false);
+ if (rc)
+ goto fail;
+ }
+
+ return 0;
+ fail:
+ netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n",
+ __func__, type, rc);
+ return rc;
+}
+
+
+static void siena_init_wol(struct efx_nic *efx)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id);
+
+ if (rc != 0) {
+ /* If it failed, attempt to get into a synchronised
+ * state with MC by resetting any set WoL filters */
+ efx_mcdi_wol_filter_reset(efx);
+ nic_data->wol_filter_id = -1;
+ } else if (nic_data->wol_filter_id != -1) {
+ pci_wake_from_d3(efx->pci_dev, true);
+ }
+}
+
+
+/**************************************************************************
+ *
+ * Revision-dependent attributes used by efx.c and nic.c
+ *
+ **************************************************************************
+ */
+
+const struct efx_nic_type siena_a0_nic_type = {
+ .probe = siena_probe_nic,
+ .remove = siena_remove_nic,
+ .init = siena_init_nic,
+ .fini = efx_port_dummy_op_void,
+ .monitor = NULL,
+ .map_reset_reason = siena_map_reset_reason,
+ .map_reset_flags = siena_map_reset_flags,
+ .reset = siena_reset_hw,
+ .probe_port = siena_probe_port,
+ .remove_port = siena_remove_port,
+ .prepare_flush = efx_port_dummy_op_void,
+ .update_stats = siena_update_nic_stats,
+ .start_stats = siena_start_nic_stats,
+ .stop_stats = siena_stop_nic_stats,
+ .set_id_led = efx_mcdi_set_id_led,
+ .push_irq_moderation = siena_push_irq_moderation,
+ .push_multicast_hash = siena_push_multicast_hash,
+ .reconfigure_port = efx_mcdi_phy_reconfigure,
+ .get_wol = siena_get_wol,
+ .set_wol = siena_set_wol,
+ .resume_wol = siena_init_wol,
+ .test_registers = siena_test_registers,
+ .test_nvram = efx_mcdi_nvram_test_all,
+ .default_mac_ops = &efx_mcdi_mac_operations,
+
+ .revision = EFX_REV_SIENA_A0,
+ .mem_map_size = (FR_CZ_MC_TREG_SMEM +
+ FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
+ .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
+ .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
+ .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
+ .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
+ .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
+ .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_buffer_hash_size = 0x10,
+ .rx_buffer_padding = 0,
+ .max_interrupt_mode = EFX_INT_MODE_MSIX,
+ .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
+ * interrupt handler only supports 32
+ * channels */
+ .tx_dc_base = 0x88000,
+ .rx_dc_base = 0x68000,
+ .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXHASH | NETIF_F_NTUPLE),
+};
diff --git a/drivers/net/ethernet/sfc/spi.h b/drivers/net/ethernet/sfc/spi.h
new file mode 100644
index 000000000000..71f2e3ebe1c7
--- /dev/null
+++ b/drivers/net/ethernet/sfc/spi.h
@@ -0,0 +1,99 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005 Fen Systems Ltd.
+ * Copyright 2006-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_SPI_H
+#define EFX_SPI_H
+
+#include "net_driver.h"
+
+/**************************************************************************
+ *
+ * Basic SPI command set and bit definitions
+ *
+ *************************************************************************/
+
+#define SPI_WRSR 0x01 /* Write status register */
+#define SPI_WRITE 0x02 /* Write data to memory array */
+#define SPI_READ 0x03 /* Read data from memory array */
+#define SPI_WRDI 0x04 /* Reset write enable latch */
+#define SPI_RDSR 0x05 /* Read status register */
+#define SPI_WREN 0x06 /* Set write enable latch */
+#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */
+
+#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
+#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
+#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */
+#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */
+#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */
+#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
+
+/**
+ * struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device
+ * @device_id: Controller's id for the device
+ * @size: Size (in bytes)
+ * @addr_len: Number of address bytes in read/write commands
+ * @munge_address: Flag whether addresses should be munged.
+ * Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
+ * use bit 3 of the command byte as address bit A8, rather
+ * than having a two-byte address. If this flag is set, then
+ * commands should be munged in this way.
+ * @erase_command: Erase command (or 0 if sector erase not needed).
+ * @erase_size: Erase sector size (in bytes)
+ * Erase commands affect sectors with this size and alignment.
+ * This must be a power of two.
+ * @block_size: Write block size (in bytes).
+ * Write commands are limited to blocks with this size and alignment.
+ */
+struct efx_spi_device {
+ int device_id;
+ unsigned int size;
+ unsigned int addr_len;
+ unsigned int munge_address:1;
+ u8 erase_command;
+ unsigned int erase_size;
+ unsigned int block_size;
+};
+
+static inline bool efx_spi_present(const struct efx_spi_device *spi)
+{
+ return spi->size != 0;
+}
+
+int falcon_spi_cmd(struct efx_nic *efx,
+ const struct efx_spi_device *spi, unsigned int command,
+ int address, const void* in, void *out, size_t len);
+int falcon_spi_wait_write(struct efx_nic *efx,
+ const struct efx_spi_device *spi);
+int falcon_spi_read(struct efx_nic *efx,
+ const struct efx_spi_device *spi, loff_t start,
+ size_t len, size_t *retlen, u8 *buffer);
+int falcon_spi_write(struct efx_nic *efx,
+ const struct efx_spi_device *spi, loff_t start,
+ size_t len, size_t *retlen, const u8 *buffer);
+
+/*
+ * SFC4000 flash is partitioned into:
+ * 0-0x400 chip and board config (see falcon_hwdefs.h)
+ * 0x400-0x8000 unused (or may contain VPD if EEPROM not present)
+ * 0x8000-end boot code (mapped to PCI expansion ROM)
+ * SFC4000 small EEPROM (size < 0x400) is used for VPD only.
+ * SFC4000 large EEPROM (size >= 0x400) is partitioned into:
+ * 0-0x400 chip and board config
+ * configurable VPD
+ * 0x800-0x1800 boot config
+ * Aside from the chip and board config, all of these are optional and may
+ * be absent or truncated depending on the devices used.
+ */
+#define FALCON_NVCONFIG_END 0x400U
+#define FALCON_FLASH_BOOTCODE_START 0x8000U
+#define EFX_EEPROM_BOOTCONFIG_START 0x800U
+#define EFX_EEPROM_BOOTCONFIG_END 0x1800U
+
+#endif /* EFX_SPI_H */
diff --git a/drivers/net/ethernet/sfc/tenxpress.c b/drivers/net/ethernet/sfc/tenxpress.c
new file mode 100644
index 000000000000..7b0fd89e7b85
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tenxpress.c
@@ -0,0 +1,494 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2007-2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/delay.h>
+#include <linux/rtnetlink.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include "efx.h"
+#include "mdio_10g.h"
+#include "nic.h"
+#include "phy.h"
+#include "workarounds.h"
+
+/* We expect these MMDs to be in the package. */
+#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \
+ MDIO_DEVS_PCS | \
+ MDIO_DEVS_PHYXS | \
+ MDIO_DEVS_AN)
+
+#define SFX7101_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \
+ (1 << LOOPBACK_PCS) | \
+ (1 << LOOPBACK_PMAPMD) | \
+ (1 << LOOPBACK_PHYXS_WS))
+
+/* We complain if we fail to see the link partner as 10G capable this many
+ * times in a row (must be > 1 as sampling the autoneg. registers is racy)
+ */
+#define MAX_BAD_LP_TRIES (5)
+
+/* Extended control register */
+#define PMA_PMD_XCONTROL_REG 49152
+#define PMA_PMD_EXT_GMII_EN_LBN 1
+#define PMA_PMD_EXT_GMII_EN_WIDTH 1
+#define PMA_PMD_EXT_CLK_OUT_LBN 2
+#define PMA_PMD_EXT_CLK_OUT_WIDTH 1
+#define PMA_PMD_LNPGA_POWERDOWN_LBN 8
+#define PMA_PMD_LNPGA_POWERDOWN_WIDTH 1
+#define PMA_PMD_EXT_CLK312_WIDTH 1
+#define PMA_PMD_EXT_LPOWER_LBN 12
+#define PMA_PMD_EXT_LPOWER_WIDTH 1
+#define PMA_PMD_EXT_ROBUST_LBN 14
+#define PMA_PMD_EXT_ROBUST_WIDTH 1
+#define PMA_PMD_EXT_SSR_LBN 15
+#define PMA_PMD_EXT_SSR_WIDTH 1
+
+/* extended status register */
+#define PMA_PMD_XSTATUS_REG 49153
+#define PMA_PMD_XSTAT_MDIX_LBN 14
+#define PMA_PMD_XSTAT_FLP_LBN (12)
+
+/* LED control register */
+#define PMA_PMD_LED_CTRL_REG 49159
+#define PMA_PMA_LED_ACTIVITY_LBN (3)
+
+/* LED function override register */
+#define PMA_PMD_LED_OVERR_REG 49161
+/* Bit positions for different LEDs (there are more but not wired on SFE4001)*/
+#define PMA_PMD_LED_LINK_LBN (0)
+#define PMA_PMD_LED_SPEED_LBN (2)
+#define PMA_PMD_LED_TX_LBN (4)
+#define PMA_PMD_LED_RX_LBN (6)
+/* Override settings */
+#define PMA_PMD_LED_AUTO (0) /* H/W control */
+#define PMA_PMD_LED_ON (1)
+#define PMA_PMD_LED_OFF (2)
+#define PMA_PMD_LED_FLASH (3)
+#define PMA_PMD_LED_MASK 3
+/* All LEDs under hardware control */
+/* Green and Amber under hardware control, Red off */
+#define SFX7101_PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
+
+#define PMA_PMD_SPEED_ENABLE_REG 49192
+#define PMA_PMD_100TX_ADV_LBN 1
+#define PMA_PMD_100TX_ADV_WIDTH 1
+#define PMA_PMD_1000T_ADV_LBN 2
+#define PMA_PMD_1000T_ADV_WIDTH 1
+#define PMA_PMD_10000T_ADV_LBN 3
+#define PMA_PMD_10000T_ADV_WIDTH 1
+#define PMA_PMD_SPEED_LBN 4
+#define PMA_PMD_SPEED_WIDTH 4
+
+/* Misc register defines */
+#define PCS_CLOCK_CTRL_REG 55297
+#define PLL312_RST_N_LBN 2
+
+#define PCS_SOFT_RST2_REG 55302
+#define SERDES_RST_N_LBN 13
+#define XGXS_RST_N_LBN 12
+
+#define PCS_TEST_SELECT_REG 55303 /* PRM 10.5.8 */
+#define CLK312_EN_LBN 3
+
+/* PHYXS registers */
+#define PHYXS_XCONTROL_REG 49152
+#define PHYXS_RESET_LBN 15
+#define PHYXS_RESET_WIDTH 1
+
+#define PHYXS_TEST1 (49162)
+#define LOOPBACK_NEAR_LBN (8)
+#define LOOPBACK_NEAR_WIDTH (1)
+
+/* Boot status register */
+#define PCS_BOOT_STATUS_REG 53248
+#define PCS_BOOT_FATAL_ERROR_LBN 0
+#define PCS_BOOT_PROGRESS_LBN 1
+#define PCS_BOOT_PROGRESS_WIDTH 2
+#define PCS_BOOT_PROGRESS_INIT 0
+#define PCS_BOOT_PROGRESS_WAIT_MDIO 1
+#define PCS_BOOT_PROGRESS_CHECKSUM 2
+#define PCS_BOOT_PROGRESS_JUMP 3
+#define PCS_BOOT_DOWNLOAD_WAIT_LBN 3
+#define PCS_BOOT_CODE_STARTED_LBN 4
+
+/* 100M/1G PHY registers */
+#define GPHY_XCONTROL_REG 49152
+#define GPHY_ISOLATE_LBN 10
+#define GPHY_ISOLATE_WIDTH 1
+#define GPHY_DUPLEX_LBN 8
+#define GPHY_DUPLEX_WIDTH 1
+#define GPHY_LOOPBACK_NEAR_LBN 14
+#define GPHY_LOOPBACK_NEAR_WIDTH 1
+
+#define C22EXT_STATUS_REG 49153
+#define C22EXT_STATUS_LINK_LBN 2
+#define C22EXT_STATUS_LINK_WIDTH 1
+
+#define C22EXT_MSTSLV_CTRL 49161
+#define C22EXT_MSTSLV_CTRL_ADV_1000_HD_LBN 8
+#define C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN 9
+
+#define C22EXT_MSTSLV_STATUS 49162
+#define C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN 10
+#define C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN 11
+
+/* Time to wait between powering down the LNPGA and turning off the power
+ * rails */
+#define LNPGA_PDOWN_WAIT (HZ / 5)
+
+struct tenxpress_phy_data {
+ enum efx_loopback_mode loopback_mode;
+ enum efx_phy_mode phy_mode;
+ int bad_lp_tries;
+};
+
+static int tenxpress_init(struct efx_nic *efx)
+{
+ /* Enable 312.5 MHz clock */
+ efx_mdio_write(efx, MDIO_MMD_PCS, PCS_TEST_SELECT_REG,
+ 1 << CLK312_EN_LBN);
+
+ /* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG,
+ 1 << PMA_PMA_LED_ACTIVITY_LBN, true);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG,
+ SFX7101_PMA_PMD_LED_DEFAULT);
+
+ return 0;
+}
+
+static int tenxpress_phy_probe(struct efx_nic *efx)
+{
+ struct tenxpress_phy_data *phy_data;
+
+ /* Allocate phy private storage */
+ phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
+ efx->phy_data = phy_data;
+ phy_data->phy_mode = efx->phy_mode;
+
+ efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45;
+
+ efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
+
+ efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
+ ADVERTISED_10000baseT_Full);
+
+ return 0;
+}
+
+static int tenxpress_phy_init(struct efx_nic *efx)
+{
+ int rc;
+
+ falcon_board(efx)->type->init_phy(efx);
+
+ if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
+ rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
+ if (rc < 0)
+ return rc;
+
+ rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS);
+ if (rc < 0)
+ return rc;
+ }
+
+ rc = tenxpress_init(efx);
+ if (rc < 0)
+ return rc;
+
+ /* Reinitialise flow control settings */
+ efx_link_set_wanted_fc(efx, efx->wanted_fc);
+ efx_mdio_an_reconfigure(efx);
+
+ schedule_timeout_uninterruptible(HZ / 5); /* 200ms */
+
+ /* Let XGXS and SerDes out of reset */
+ falcon_reset_xaui(efx);
+
+ return 0;
+}
+
+/* Perform a "special software reset" on the PHY. The caller is
+ * responsible for saving and restoring the PHY hardware registers
+ * properly, and masking/unmasking LASI */
+static int tenxpress_special_reset(struct efx_nic *efx)
+{
+ int rc, reg;
+
+ /* The XGMAC clock is driven from the SFX7101 312MHz clock, so
+ * a special software reset can glitch the XGMAC sufficiently for stats
+ * requests to fail. */
+ falcon_stop_nic_stats(efx);
+
+ /* Initiate reset */
+ reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG);
+ reg |= (1 << PMA_PMD_EXT_SSR_LBN);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
+
+ mdelay(200);
+
+ /* Wait for the blocks to come out of reset */
+ rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
+ if (rc < 0)
+ goto out;
+
+ /* Try and reconfigure the device */
+ rc = tenxpress_init(efx);
+ if (rc < 0)
+ goto out;
+
+ /* Wait for the XGXS state machine to churn */
+ mdelay(10);
+out:
+ falcon_start_nic_stats(efx);
+ return rc;
+}
+
+static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok)
+{
+ struct tenxpress_phy_data *pd = efx->phy_data;
+ bool bad_lp;
+ int reg;
+
+ if (link_ok) {
+ bad_lp = false;
+ } else {
+ /* Check that AN has started but not completed. */
+ reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_STAT1);
+ if (!(reg & MDIO_AN_STAT1_LPABLE))
+ return; /* LP status is unknown */
+ bad_lp = !(reg & MDIO_AN_STAT1_COMPLETE);
+ if (bad_lp)
+ pd->bad_lp_tries++;
+ }
+
+ /* Nothing to do if all is well and was previously so. */
+ if (!pd->bad_lp_tries)
+ return;
+
+ /* Use the RX (red) LED as an error indicator once we've seen AN
+ * failure several times in a row, and also log a message. */
+ if (!bad_lp || pd->bad_lp_tries == MAX_BAD_LP_TRIES) {
+ reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD,
+ PMA_PMD_LED_OVERR_REG);
+ reg &= ~(PMA_PMD_LED_MASK << PMA_PMD_LED_RX_LBN);
+ if (!bad_lp) {
+ reg |= PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN;
+ } else {
+ reg |= PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN;
+ netif_err(efx, link, efx->net_dev,
+ "appears to be plugged into a port"
+ " that is not 10GBASE-T capable. The PHY"
+ " supports 10GBASE-T ONLY, so no link can"
+ " be established\n");
+ }
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD,
+ PMA_PMD_LED_OVERR_REG, reg);
+ pd->bad_lp_tries = bad_lp;
+ }
+}
+
+static bool sfx7101_link_ok(struct efx_nic *efx)
+{
+ return efx_mdio_links_ok(efx,
+ MDIO_DEVS_PMAPMD |
+ MDIO_DEVS_PCS |
+ MDIO_DEVS_PHYXS);
+}
+
+static void tenxpress_ext_loopback(struct efx_nic *efx)
+{
+ efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, PHYXS_TEST1,
+ 1 << LOOPBACK_NEAR_LBN,
+ efx->loopback_mode == LOOPBACK_PHYXS);
+}
+
+static void tenxpress_low_power(struct efx_nic *efx)
+{
+ efx_mdio_set_mmds_lpower(
+ efx, !!(efx->phy_mode & PHY_MODE_LOW_POWER),
+ TENXPRESS_REQUIRED_DEVS);
+}
+
+static int tenxpress_phy_reconfigure(struct efx_nic *efx)
+{
+ struct tenxpress_phy_data *phy_data = efx->phy_data;
+ bool phy_mode_change, loop_reset;
+
+ if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) {
+ phy_data->phy_mode = efx->phy_mode;
+ return 0;
+ }
+
+ phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL &&
+ phy_data->phy_mode != PHY_MODE_NORMAL);
+ loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, LOOPBACKS_EXTERNAL(efx)) ||
+ LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY));
+
+ if (loop_reset || phy_mode_change) {
+ tenxpress_special_reset(efx);
+ falcon_reset_xaui(efx);
+ }
+
+ tenxpress_low_power(efx);
+ efx_mdio_transmit_disable(efx);
+ efx_mdio_phy_reconfigure(efx);
+ tenxpress_ext_loopback(efx);
+ efx_mdio_an_reconfigure(efx);
+
+ phy_data->loopback_mode = efx->loopback_mode;
+ phy_data->phy_mode = efx->phy_mode;
+
+ return 0;
+}
+
+static void
+tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
+
+/* Poll for link state changes */
+static bool tenxpress_phy_poll(struct efx_nic *efx)
+{
+ struct efx_link_state old_state = efx->link_state;
+
+ efx->link_state.up = sfx7101_link_ok(efx);
+ efx->link_state.speed = 10000;
+ efx->link_state.fd = true;
+ efx->link_state.fc = efx_mdio_get_pause(efx);
+
+ sfx7101_check_bad_lp(efx, efx->link_state.up);
+
+ return !efx_link_state_equal(&efx->link_state, &old_state);
+}
+
+static void sfx7101_phy_fini(struct efx_nic *efx)
+{
+ int reg;
+
+ /* Power down the LNPGA */
+ reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
+
+ /* Waiting here ensures that the board fini, which can turn
+ * off the power to the PHY, won't get run until the LNPGA
+ * powerdown has been given long enough to complete. */
+ schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
+}
+
+static void tenxpress_phy_remove(struct efx_nic *efx)
+{
+ kfree(efx->phy_data);
+ efx->phy_data = NULL;
+}
+
+
+/* Override the RX, TX and link LEDs */
+void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+{
+ int reg;
+
+ switch (mode) {
+ case EFX_LED_OFF:
+ reg = (PMA_PMD_LED_OFF << PMA_PMD_LED_TX_LBN) |
+ (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) |
+ (PMA_PMD_LED_OFF << PMA_PMD_LED_LINK_LBN);
+ break;
+ case EFX_LED_ON:
+ reg = (PMA_PMD_LED_ON << PMA_PMD_LED_TX_LBN) |
+ (PMA_PMD_LED_ON << PMA_PMD_LED_RX_LBN) |
+ (PMA_PMD_LED_ON << PMA_PMD_LED_LINK_LBN);
+ break;
+ default:
+ reg = SFX7101_PMA_PMD_LED_DEFAULT;
+ break;
+ }
+
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, reg);
+}
+
+static const char *const sfx7101_test_names[] = {
+ "bist"
+};
+
+static const char *sfx7101_test_name(struct efx_nic *efx, unsigned int index)
+{
+ if (index < ARRAY_SIZE(sfx7101_test_names))
+ return sfx7101_test_names[index];
+ return NULL;
+}
+
+static int
+sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags)
+{
+ int rc;
+
+ if (!(flags & ETH_TEST_FL_OFFLINE))
+ return 0;
+
+ /* BIST is automatically run after a special software reset */
+ rc = tenxpress_special_reset(efx);
+ results[0] = rc ? -1 : 1;
+
+ efx_mdio_an_reconfigure(efx);
+
+ return rc;
+}
+
+static void
+tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+ u32 adv = 0, lpa = 0;
+ int reg;
+
+ reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL);
+ if (reg & MDIO_AN_10GBT_CTRL_ADV10G)
+ adv |= ADVERTISED_10000baseT_Full;
+ reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
+ if (reg & MDIO_AN_10GBT_STAT_LP10G)
+ lpa |= ADVERTISED_10000baseT_Full;
+
+ mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa);
+
+ /* In loopback, the PHY automatically brings up the correct interface,
+ * but doesn't advertise the correct speed. So override it */
+ if (LOOPBACK_EXTERNAL(efx))
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
+}
+
+static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+ if (!ecmd->autoneg)
+ return -EINVAL;
+
+ return efx_mdio_set_settings(efx, ecmd);
+}
+
+static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising)
+{
+ efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV10G,
+ advertising & ADVERTISED_10000baseT_Full);
+}
+
+const struct efx_phy_operations falcon_sfx7101_phy_ops = {
+ .probe = tenxpress_phy_probe,
+ .init = tenxpress_phy_init,
+ .reconfigure = tenxpress_phy_reconfigure,
+ .poll = tenxpress_phy_poll,
+ .fini = sfx7101_phy_fini,
+ .remove = tenxpress_phy_remove,
+ .get_settings = tenxpress_get_settings,
+ .set_settings = tenxpress_set_settings,
+ .set_npage_adv = sfx7101_set_npage_adv,
+ .test_alive = efx_mdio_test_alive,
+ .test_name = sfx7101_test_name,
+ .run_tests = sfx7101_run_tests,
+};
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
new file mode 100644
index 000000000000..3964a62dde8b
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -0,0 +1,1207 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/pci.h>
+#include <linux/tcp.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <net/ipv6.h>
+#include <linux/if_ether.h>
+#include <linux/highmem.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "nic.h"
+#include "workarounds.h"
+
+/*
+ * TX descriptor ring full threshold
+ *
+ * The tx_queue descriptor ring fill-level must fall below this value
+ * before we restart the netif queue
+ */
+#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
+
+static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer)
+{
+ if (buffer->unmap_len) {
+ struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
+ dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
+ buffer->unmap_len);
+ if (buffer->unmap_single)
+ pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len,
+ PCI_DMA_TODEVICE);
+ buffer->unmap_len = 0;
+ buffer->unmap_single = false;
+ }
+
+ if (buffer->skb) {
+ dev_kfree_skb_any((struct sk_buff *) buffer->skb);
+ buffer->skb = NULL;
+ netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
+ "TX queue %d transmission id %x complete\n",
+ tx_queue->queue, tx_queue->read_count);
+ }
+}
+
+/**
+ * struct efx_tso_header - a DMA mapped buffer for packet headers
+ * @next: Linked list of free ones.
+ * The list is protected by the TX queue lock.
+ * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
+ * @dma_addr: The DMA address of the header below.
+ *
+ * This controls the memory used for a TSO header. Use TSOH_DATA()
+ * to find the packet header data. Use TSOH_SIZE() to calculate the
+ * total size required for a given packet header length. TSO headers
+ * in the free list are exactly %TSOH_STD_SIZE bytes in size.
+ */
+struct efx_tso_header {
+ union {
+ struct efx_tso_header *next;
+ size_t unmap_len;
+ };
+ dma_addr_t dma_addr;
+};
+
+static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
+ struct sk_buff *skb);
+static void efx_fini_tso(struct efx_tx_queue *tx_queue);
+static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
+ struct efx_tso_header *tsoh);
+
+static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer)
+{
+ if (buffer->tsoh) {
+ if (likely(!buffer->tsoh->unmap_len)) {
+ buffer->tsoh->next = tx_queue->tso_headers_free;
+ tx_queue->tso_headers_free = buffer->tsoh;
+ } else {
+ efx_tsoh_heap_free(tx_queue, buffer->tsoh);
+ }
+ buffer->tsoh = NULL;
+ }
+}
+
+
+static inline unsigned
+efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
+{
+ /* Depending on the NIC revision, we can use descriptor
+ * lengths up to 8K or 8K-1. However, since PCI Express
+ * devices must split read requests at 4K boundaries, there is
+ * little benefit from using descriptors that cross those
+ * boundaries and we keep things simple by not doing so.
+ */
+ unsigned len = (~dma_addr & 0xfff) + 1;
+
+ /* Work around hardware bug for unaligned buffers. */
+ if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
+ len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
+
+ return len;
+}
+
+/*
+ * Add a socket buffer to a TX queue
+ *
+ * This maps all fragments of a socket buffer for DMA and adds them to
+ * the TX queue. The queue's insert pointer will be incremented by
+ * the number of fragments in the socket buffer.
+ *
+ * If any DMA mapping fails, any mapped fragments will be unmapped,
+ * the queue's insert pointer will be restored to its original value.
+ *
+ * This function is split out from efx_hard_start_xmit to allow the
+ * loopback test to direct packets via specific TX queues.
+ *
+ * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
+ * You must hold netif_tx_lock() to call this function.
+ */
+netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct pci_dev *pci_dev = efx->pci_dev;
+ struct efx_tx_buffer *buffer;
+ skb_frag_t *fragment;
+ unsigned int len, unmap_len = 0, fill_level, insert_ptr;
+ dma_addr_t dma_addr, unmap_addr = 0;
+ unsigned int dma_len;
+ bool unmap_single;
+ int q_space, i = 0;
+ netdev_tx_t rc = NETDEV_TX_OK;
+
+ EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
+
+ if (skb_shinfo(skb)->gso_size)
+ return efx_enqueue_skb_tso(tx_queue, skb);
+
+ /* Get size of the initial fragment */
+ len = skb_headlen(skb);
+
+ /* Pad if necessary */
+ if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
+ EFX_BUG_ON_PARANOID(skb->data_len);
+ len = 32 + 1;
+ if (skb_pad(skb, len - skb->len))
+ return NETDEV_TX_OK;
+ }
+
+ fill_level = tx_queue->insert_count - tx_queue->old_read_count;
+ q_space = efx->txq_entries - 1 - fill_level;
+
+ /* Map for DMA. Use pci_map_single rather than pci_map_page
+ * since this is more efficient on machines with sparse
+ * memory.
+ */
+ unmap_single = true;
+ dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+
+ /* Process all fragments */
+ while (1) {
+ if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
+ goto pci_err;
+
+ /* Store fields for marking in the per-fragment final
+ * descriptor */
+ unmap_len = len;
+ unmap_addr = dma_addr;
+
+ /* Add to TX queue, splitting across DMA boundaries */
+ do {
+ if (unlikely(q_space-- <= 0)) {
+ /* It might be that completions have
+ * happened since the xmit path last
+ * checked. Update the xmit path's
+ * copy of read_count.
+ */
+ netif_tx_stop_queue(tx_queue->core_txq);
+ /* This memory barrier protects the
+ * change of queue state from the access
+ * of read_count. */
+ smp_mb();
+ tx_queue->old_read_count =
+ ACCESS_ONCE(tx_queue->read_count);
+ fill_level = (tx_queue->insert_count
+ - tx_queue->old_read_count);
+ q_space = efx->txq_entries - 1 - fill_level;
+ if (unlikely(q_space-- <= 0)) {
+ rc = NETDEV_TX_BUSY;
+ goto unwind;
+ }
+ smp_mb();
+ if (likely(!efx->loopback_selftest))
+ netif_tx_start_queue(
+ tx_queue->core_txq);
+ }
+
+ insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
+ buffer = &tx_queue->buffer[insert_ptr];
+ efx_tsoh_free(tx_queue, buffer);
+ EFX_BUG_ON_PARANOID(buffer->tsoh);
+ EFX_BUG_ON_PARANOID(buffer->skb);
+ EFX_BUG_ON_PARANOID(buffer->len);
+ EFX_BUG_ON_PARANOID(!buffer->continuation);
+ EFX_BUG_ON_PARANOID(buffer->unmap_len);
+
+ dma_len = efx_max_tx_len(efx, dma_addr);
+ if (likely(dma_len >= len))
+ dma_len = len;
+
+ /* Fill out per descriptor fields */
+ buffer->len = dma_len;
+ buffer->dma_addr = dma_addr;
+ len -= dma_len;
+ dma_addr += dma_len;
+ ++tx_queue->insert_count;
+ } while (len);
+
+ /* Transfer ownership of the unmapping to the final buffer */
+ buffer->unmap_single = unmap_single;
+ buffer->unmap_len = unmap_len;
+ unmap_len = 0;
+
+ /* Get address and size of next fragment */
+ if (i >= skb_shinfo(skb)->nr_frags)
+ break;
+ fragment = &skb_shinfo(skb)->frags[i];
+ len = fragment->size;
+ i++;
+ /* Map for DMA */
+ unmap_single = false;
+ dma_addr = skb_frag_dma_map(&pci_dev->dev, fragment, 0, len,
+ DMA_TO_DEVICE);
+ }
+
+ /* Transfer ownership of the skb to the final buffer */
+ buffer->skb = skb;
+ buffer->continuation = false;
+
+ /* Pass off to hardware */
+ efx_nic_push_buffers(tx_queue);
+
+ return NETDEV_TX_OK;
+
+ pci_err:
+ netif_err(efx, tx_err, efx->net_dev,
+ " TX queue %d could not map skb with %d bytes %d "
+ "fragments for DMA\n", tx_queue->queue, skb->len,
+ skb_shinfo(skb)->nr_frags + 1);
+
+ /* Mark the packet as transmitted, and free the SKB ourselves */
+ dev_kfree_skb_any(skb);
+
+ unwind:
+ /* Work backwards until we hit the original insert pointer value */
+ while (tx_queue->insert_count != tx_queue->write_count) {
+ --tx_queue->insert_count;
+ insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
+ buffer = &tx_queue->buffer[insert_ptr];
+ efx_dequeue_buffer(tx_queue, buffer);
+ buffer->len = 0;
+ }
+
+ /* Free the fragment we were mid-way through pushing */
+ if (unmap_len) {
+ if (unmap_single)
+ pci_unmap_single(pci_dev, unmap_addr, unmap_len,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(pci_dev, unmap_addr, unmap_len,
+ PCI_DMA_TODEVICE);
+ }
+
+ return rc;
+}
+
+/* Remove packets from the TX queue
+ *
+ * This removes packets from the TX queue, up to and including the
+ * specified index.
+ */
+static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
+ unsigned int index)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned int stop_index, read_ptr;
+
+ stop_index = (index + 1) & tx_queue->ptr_mask;
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+
+ while (read_ptr != stop_index) {
+ struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
+ if (unlikely(buffer->len == 0)) {
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX queue %d spurious TX completion id %x\n",
+ tx_queue->queue, read_ptr);
+ efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
+ return;
+ }
+
+ efx_dequeue_buffer(tx_queue, buffer);
+ buffer->continuation = true;
+ buffer->len = 0;
+
+ ++tx_queue->read_count;
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+ }
+}
+
+/* Initiate a packet transmission. We use one channel per CPU
+ * (sharing when we have more CPUs than channels). On Falcon, the TX
+ * completion events will be directed back to the CPU that transmitted
+ * the packet, which should be cache-efficient.
+ *
+ * Context: non-blocking.
+ * Note that returning anything other than NETDEV_TX_OK will cause the
+ * OS to free the skb.
+ */
+netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_tx_queue *tx_queue;
+ unsigned index, type;
+
+ EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
+
+ index = skb_get_queue_mapping(skb);
+ type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
+ if (index >= efx->n_tx_channels) {
+ index -= efx->n_tx_channels;
+ type |= EFX_TXQ_TYPE_HIGHPRI;
+ }
+ tx_queue = efx_get_tx_queue(efx, index, type);
+
+ return efx_enqueue_skb(tx_queue, skb);
+}
+
+void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+
+ /* Must be inverse of queue lookup in efx_hard_start_xmit() */
+ tx_queue->core_txq =
+ netdev_get_tx_queue(efx->net_dev,
+ tx_queue->queue / EFX_TXQ_TYPES +
+ ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+ efx->n_tx_channels : 0));
+}
+
+int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ unsigned tc;
+ int rc;
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
+ return -EINVAL;
+
+ if (num_tc == net_dev->num_tc)
+ return 0;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
+ net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
+ }
+
+ if (num_tc > net_dev->num_tc) {
+ /* Initialise high-priority queues as necessary */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_possible_channel_tx_queue(tx_queue,
+ channel) {
+ if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
+ continue;
+ if (!tx_queue->buffer) {
+ rc = efx_probe_tx_queue(tx_queue);
+ if (rc)
+ return rc;
+ }
+ if (!tx_queue->initialised)
+ efx_init_tx_queue(tx_queue);
+ efx_init_tx_queue_core_txq(tx_queue);
+ }
+ }
+ } else {
+ /* Reduce number of classes before number of queues */
+ net_dev->num_tc = num_tc;
+ }
+
+ rc = netif_set_real_num_tx_queues(net_dev,
+ max_t(int, num_tc, 1) *
+ efx->n_tx_channels);
+ if (rc)
+ return rc;
+
+ /* Do not destroy high-priority queues when they become
+ * unused. We would have to flush them first, and it is
+ * fairly difficult to flush a subset of TX queues. Leave
+ * it to efx_fini_channels().
+ */
+
+ net_dev->num_tc = num_tc;
+ return 0;
+}
+
+void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+ unsigned fill_level;
+ struct efx_nic *efx = tx_queue->efx;
+
+ EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
+
+ efx_dequeue_buffers(tx_queue, index);
+
+ /* See if we need to restart the netif queue. This barrier
+ * separates the update of read_count from the test of the
+ * queue state. */
+ smp_mb();
+ if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
+ likely(efx->port_enabled) &&
+ likely(netif_device_present(efx->net_dev))) {
+ fill_level = tx_queue->insert_count - tx_queue->read_count;
+ if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
+ EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
+ netif_tx_wake_queue(tx_queue->core_txq);
+ }
+ }
+
+ /* Check whether the hardware queue is now empty */
+ if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
+ tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
+ if (tx_queue->read_count == tx_queue->old_write_count) {
+ smp_mb();
+ tx_queue->empty_read_count =
+ tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
+ }
+ }
+}
+
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned int entries;
+ int i, rc;
+
+ /* Create the smallest power-of-two aligned ring */
+ entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
+ EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+ tx_queue->ptr_mask = entries - 1;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "creating TX queue %d size %#x mask %#x\n",
+ tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
+
+ /* Allocate software ring */
+ tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer),
+ GFP_KERNEL);
+ if (!tx_queue->buffer)
+ return -ENOMEM;
+ for (i = 0; i <= tx_queue->ptr_mask; ++i)
+ tx_queue->buffer[i].continuation = true;
+
+ /* Allocate hardware ring */
+ rc = efx_nic_probe_tx(tx_queue);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+ fail:
+ kfree(tx_queue->buffer);
+ tx_queue->buffer = NULL;
+ return rc;
+}
+
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "initialising TX queue %d\n", tx_queue->queue);
+
+ tx_queue->insert_count = 0;
+ tx_queue->write_count = 0;
+ tx_queue->old_write_count = 0;
+ tx_queue->read_count = 0;
+ tx_queue->old_read_count = 0;
+ tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
+
+ /* Set up TX descriptor ring */
+ efx_nic_init_tx(tx_queue);
+
+ tx_queue->initialised = true;
+}
+
+void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
+{
+ struct efx_tx_buffer *buffer;
+
+ if (!tx_queue->buffer)
+ return;
+
+ /* Free any buffers left in the ring */
+ while (tx_queue->read_count != tx_queue->write_count) {
+ buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
+ efx_dequeue_buffer(tx_queue, buffer);
+ buffer->continuation = true;
+ buffer->len = 0;
+
+ ++tx_queue->read_count;
+ }
+}
+
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ if (!tx_queue->initialised)
+ return;
+
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "shutting down TX queue %d\n", tx_queue->queue);
+
+ tx_queue->initialised = false;
+
+ /* Flush TX queue, remove descriptor ring */
+ efx_nic_fini_tx(tx_queue);
+
+ efx_release_tx_buffers(tx_queue);
+
+ /* Free up TSO header cache */
+ efx_fini_tso(tx_queue);
+}
+
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ if (!tx_queue->buffer)
+ return;
+
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "destroying TX queue %d\n", tx_queue->queue);
+ efx_nic_remove_tx(tx_queue);
+
+ kfree(tx_queue->buffer);
+ tx_queue->buffer = NULL;
+}
+
+
+/* Efx TCP segmentation acceleration.
+ *
+ * Why? Because by doing it here in the driver we can go significantly
+ * faster than the GSO.
+ *
+ * Requires TX checksum offload support.
+ */
+
+/* Number of bytes inserted at the start of a TSO header buffer,
+ * similar to NET_IP_ALIGN.
+ */
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#define TSOH_OFFSET 0
+#else
+#define TSOH_OFFSET NET_IP_ALIGN
+#endif
+
+#define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
+
+/* Total size of struct efx_tso_header, buffer and padding */
+#define TSOH_SIZE(hdr_len) \
+ (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
+
+/* Size of blocks on free list. Larger blocks must be allocated from
+ * the heap.
+ */
+#define TSOH_STD_SIZE 128
+
+#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
+#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
+#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
+#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
+#define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
+
+/**
+ * struct tso_state - TSO state for an SKB
+ * @out_len: Remaining length in current segment
+ * @seqnum: Current sequence number
+ * @ipv4_id: Current IPv4 ID, host endian
+ * @packet_space: Remaining space in current packet
+ * @dma_addr: DMA address of current position
+ * @in_len: Remaining length in current SKB fragment
+ * @unmap_len: Length of SKB fragment
+ * @unmap_addr: DMA address of SKB fragment
+ * @unmap_single: DMA single vs page mapping flag
+ * @protocol: Network protocol (after any VLAN header)
+ * @header_len: Number of bytes of header
+ * @full_packet_size: Number of bytes to put in each outgoing segment
+ *
+ * The state used during segmentation. It is put into this data structure
+ * just to make it easy to pass into inline functions.
+ */
+struct tso_state {
+ /* Output position */
+ unsigned out_len;
+ unsigned seqnum;
+ unsigned ipv4_id;
+ unsigned packet_space;
+
+ /* Input position */
+ dma_addr_t dma_addr;
+ unsigned in_len;
+ unsigned unmap_len;
+ dma_addr_t unmap_addr;
+ bool unmap_single;
+
+ __be16 protocol;
+ unsigned header_len;
+ int full_packet_size;
+};
+
+
+/*
+ * Verify that our various assumptions about sk_buffs and the conditions
+ * under which TSO will be attempted hold true. Return the protocol number.
+ */
+static __be16 efx_tso_check_protocol(struct sk_buff *skb)
+{
+ __be16 protocol = skb->protocol;
+
+ EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
+ protocol);
+ if (protocol == htons(ETH_P_8021Q)) {
+ /* Find the encapsulated protocol; reset network header
+ * and transport header based on that. */
+ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+ protocol = veh->h_vlan_encapsulated_proto;
+ skb_set_network_header(skb, sizeof(*veh));
+ if (protocol == htons(ETH_P_IP))
+ skb_set_transport_header(skb, sizeof(*veh) +
+ 4 * ip_hdr(skb)->ihl);
+ else if (protocol == htons(ETH_P_IPV6))
+ skb_set_transport_header(skb, sizeof(*veh) +
+ sizeof(struct ipv6hdr));
+ }
+
+ if (protocol == htons(ETH_P_IP)) {
+ EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
+ } else {
+ EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
+ EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
+ }
+ EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
+ + (tcp_hdr(skb)->doff << 2u)) >
+ skb_headlen(skb));
+
+ return protocol;
+}
+
+
+/*
+ * Allocate a page worth of efx_tso_header structures, and string them
+ * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
+ */
+static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
+{
+
+ struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
+ struct efx_tso_header *tsoh;
+ dma_addr_t dma_addr;
+ u8 *base_kva, *kva;
+
+ base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
+ if (base_kva == NULL) {
+ netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
+ "Unable to allocate page for TSO headers\n");
+ return -ENOMEM;
+ }
+
+ /* pci_alloc_consistent() allocates pages. */
+ EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
+
+ for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
+ tsoh = (struct efx_tso_header *)kva;
+ tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
+ tsoh->next = tx_queue->tso_headers_free;
+ tx_queue->tso_headers_free = tsoh;
+ }
+
+ return 0;
+}
+
+
+/* Free up a TSO header, and all others in the same page. */
+static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
+ struct efx_tso_header *tsoh,
+ struct pci_dev *pci_dev)
+{
+ struct efx_tso_header **p;
+ unsigned long base_kva;
+ dma_addr_t base_dma;
+
+ base_kva = (unsigned long)tsoh & PAGE_MASK;
+ base_dma = tsoh->dma_addr & PAGE_MASK;
+
+ p = &tx_queue->tso_headers_free;
+ while (*p != NULL) {
+ if (((unsigned long)*p & PAGE_MASK) == base_kva)
+ *p = (*p)->next;
+ else
+ p = &(*p)->next;
+ }
+
+ pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
+}
+
+static struct efx_tso_header *
+efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
+{
+ struct efx_tso_header *tsoh;
+
+ tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
+ if (unlikely(!tsoh))
+ return NULL;
+
+ tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
+ TSOH_BUFFER(tsoh), header_len,
+ PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
+ tsoh->dma_addr))) {
+ kfree(tsoh);
+ return NULL;
+ }
+
+ tsoh->unmap_len = header_len;
+ return tsoh;
+}
+
+static void
+efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
+{
+ pci_unmap_single(tx_queue->efx->pci_dev,
+ tsoh->dma_addr, tsoh->unmap_len,
+ PCI_DMA_TODEVICE);
+ kfree(tsoh);
+}
+
+/**
+ * efx_tx_queue_insert - push descriptors onto the TX queue
+ * @tx_queue: Efx TX queue
+ * @dma_addr: DMA address of fragment
+ * @len: Length of fragment
+ * @final_buffer: The final buffer inserted into the queue
+ *
+ * Push descriptors onto the TX queue. Return 0 on success or 1 if
+ * @tx_queue full.
+ */
+static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, unsigned len,
+ struct efx_tx_buffer **final_buffer)
+{
+ struct efx_tx_buffer *buffer;
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned dma_len, fill_level, insert_ptr;
+ int q_space;
+
+ EFX_BUG_ON_PARANOID(len <= 0);
+
+ fill_level = tx_queue->insert_count - tx_queue->old_read_count;
+ /* -1 as there is no way to represent all descriptors used */
+ q_space = efx->txq_entries - 1 - fill_level;
+
+ while (1) {
+ if (unlikely(q_space-- <= 0)) {
+ /* It might be that completions have happened
+ * since the xmit path last checked. Update
+ * the xmit path's copy of read_count.
+ */
+ netif_tx_stop_queue(tx_queue->core_txq);
+ /* This memory barrier protects the change of
+ * queue state from the access of read_count. */
+ smp_mb();
+ tx_queue->old_read_count =
+ ACCESS_ONCE(tx_queue->read_count);
+ fill_level = (tx_queue->insert_count
+ - tx_queue->old_read_count);
+ q_space = efx->txq_entries - 1 - fill_level;
+ if (unlikely(q_space-- <= 0)) {
+ *final_buffer = NULL;
+ return 1;
+ }
+ smp_mb();
+ netif_tx_start_queue(tx_queue->core_txq);
+ }
+
+ insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
+ buffer = &tx_queue->buffer[insert_ptr];
+ ++tx_queue->insert_count;
+
+ EFX_BUG_ON_PARANOID(tx_queue->insert_count -
+ tx_queue->read_count >=
+ efx->txq_entries);
+
+ efx_tsoh_free(tx_queue, buffer);
+ EFX_BUG_ON_PARANOID(buffer->len);
+ EFX_BUG_ON_PARANOID(buffer->unmap_len);
+ EFX_BUG_ON_PARANOID(buffer->skb);
+ EFX_BUG_ON_PARANOID(!buffer->continuation);
+ EFX_BUG_ON_PARANOID(buffer->tsoh);
+
+ buffer->dma_addr = dma_addr;
+
+ dma_len = efx_max_tx_len(efx, dma_addr);
+
+ /* If there is enough space to send then do so */
+ if (dma_len >= len)
+ break;
+
+ buffer->len = dma_len; /* Don't set the other members */
+ dma_addr += dma_len;
+ len -= dma_len;
+ }
+
+ EFX_BUG_ON_PARANOID(!len);
+ buffer->len = len;
+ *final_buffer = buffer;
+ return 0;
+}
+
+
+/*
+ * Put a TSO header into the TX queue.
+ *
+ * This is special-cased because we know that it is small enough to fit in
+ * a single fragment, and we know it doesn't cross a page boundary. It
+ * also allows us to not worry about end-of-packet etc.
+ */
+static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
+ struct efx_tso_header *tsoh, unsigned len)
+{
+ struct efx_tx_buffer *buffer;
+
+ buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
+ efx_tsoh_free(tx_queue, buffer);
+ EFX_BUG_ON_PARANOID(buffer->len);
+ EFX_BUG_ON_PARANOID(buffer->unmap_len);
+ EFX_BUG_ON_PARANOID(buffer->skb);
+ EFX_BUG_ON_PARANOID(!buffer->continuation);
+ EFX_BUG_ON_PARANOID(buffer->tsoh);
+ buffer->len = len;
+ buffer->dma_addr = tsoh->dma_addr;
+ buffer->tsoh = tsoh;
+
+ ++tx_queue->insert_count;
+}
+
+
+/* Remove descriptors put into a tx_queue. */
+static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
+{
+ struct efx_tx_buffer *buffer;
+ dma_addr_t unmap_addr;
+
+ /* Work backwards until we hit the original insert pointer value */
+ while (tx_queue->insert_count != tx_queue->write_count) {
+ --tx_queue->insert_count;
+ buffer = &tx_queue->buffer[tx_queue->insert_count &
+ tx_queue->ptr_mask];
+ efx_tsoh_free(tx_queue, buffer);
+ EFX_BUG_ON_PARANOID(buffer->skb);
+ if (buffer->unmap_len) {
+ unmap_addr = (buffer->dma_addr + buffer->len -
+ buffer->unmap_len);
+ if (buffer->unmap_single)
+ pci_unmap_single(tx_queue->efx->pci_dev,
+ unmap_addr, buffer->unmap_len,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(tx_queue->efx->pci_dev,
+ unmap_addr, buffer->unmap_len,
+ PCI_DMA_TODEVICE);
+ buffer->unmap_len = 0;
+ }
+ buffer->len = 0;
+ buffer->continuation = true;
+ }
+}
+
+
+/* Parse the SKB header and initialise state. */
+static void tso_start(struct tso_state *st, const struct sk_buff *skb)
+{
+ /* All ethernet/IP/TCP headers combined size is TCP header size
+ * plus offset of TCP header relative to start of packet.
+ */
+ st->header_len = ((tcp_hdr(skb)->doff << 2u)
+ + PTR_DIFF(tcp_hdr(skb), skb->data));
+ st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
+
+ if (st->protocol == htons(ETH_P_IP))
+ st->ipv4_id = ntohs(ip_hdr(skb)->id);
+ else
+ st->ipv4_id = 0;
+ st->seqnum = ntohl(tcp_hdr(skb)->seq);
+
+ EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
+ EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
+ EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
+
+ st->packet_space = st->full_packet_size;
+ st->out_len = skb->len - st->header_len;
+ st->unmap_len = 0;
+ st->unmap_single = false;
+}
+
+static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
+ skb_frag_t *frag)
+{
+ st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
+ frag->size, DMA_TO_DEVICE);
+ if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
+ st->unmap_single = false;
+ st->unmap_len = frag->size;
+ st->in_len = frag->size;
+ st->dma_addr = st->unmap_addr;
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
+ const struct sk_buff *skb)
+{
+ int hl = st->header_len;
+ int len = skb_headlen(skb) - hl;
+
+ st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
+ len, PCI_DMA_TODEVICE);
+ if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
+ st->unmap_single = true;
+ st->unmap_len = len;
+ st->in_len = len;
+ st->dma_addr = st->unmap_addr;
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+
+/**
+ * tso_fill_packet_with_fragment - form descriptors for the current fragment
+ * @tx_queue: Efx TX queue
+ * @skb: Socket buffer
+ * @st: TSO state
+ *
+ * Form descriptors for the current fragment, until we reach the end
+ * of fragment or end-of-packet. Return 0 on success, 1 if not enough
+ * space in @tx_queue.
+ */
+static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb,
+ struct tso_state *st)
+{
+ struct efx_tx_buffer *buffer;
+ int n, end_of_packet, rc;
+
+ if (st->in_len == 0)
+ return 0;
+ if (st->packet_space == 0)
+ return 0;
+
+ EFX_BUG_ON_PARANOID(st->in_len <= 0);
+ EFX_BUG_ON_PARANOID(st->packet_space <= 0);
+
+ n = min(st->in_len, st->packet_space);
+
+ st->packet_space -= n;
+ st->out_len -= n;
+ st->in_len -= n;
+
+ rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
+ if (likely(rc == 0)) {
+ if (st->out_len == 0)
+ /* Transfer ownership of the skb */
+ buffer->skb = skb;
+
+ end_of_packet = st->out_len == 0 || st->packet_space == 0;
+ buffer->continuation = !end_of_packet;
+
+ if (st->in_len == 0) {
+ /* Transfer ownership of the pci mapping */
+ buffer->unmap_len = st->unmap_len;
+ buffer->unmap_single = st->unmap_single;
+ st->unmap_len = 0;
+ }
+ }
+
+ st->dma_addr += n;
+ return rc;
+}
+
+
+/**
+ * tso_start_new_packet - generate a new header and prepare for the new packet
+ * @tx_queue: Efx TX queue
+ * @skb: Socket buffer
+ * @st: TSO state
+ *
+ * Generate a new header and prepare for the new packet. Return 0 on
+ * success, or -1 if failed to alloc header.
+ */
+static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb,
+ struct tso_state *st)
+{
+ struct efx_tso_header *tsoh;
+ struct tcphdr *tsoh_th;
+ unsigned ip_length;
+ u8 *header;
+
+ /* Allocate a DMA-mapped header buffer. */
+ if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
+ if (tx_queue->tso_headers_free == NULL) {
+ if (efx_tsoh_block_alloc(tx_queue))
+ return -1;
+ }
+ EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
+ tsoh = tx_queue->tso_headers_free;
+ tx_queue->tso_headers_free = tsoh->next;
+ tsoh->unmap_len = 0;
+ } else {
+ tx_queue->tso_long_headers++;
+ tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
+ if (unlikely(!tsoh))
+ return -1;
+ }
+
+ header = TSOH_BUFFER(tsoh);
+ tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
+
+ /* Copy and update the headers. */
+ memcpy(header, skb->data, st->header_len);
+
+ tsoh_th->seq = htonl(st->seqnum);
+ st->seqnum += skb_shinfo(skb)->gso_size;
+ if (st->out_len > skb_shinfo(skb)->gso_size) {
+ /* This packet will not finish the TSO burst. */
+ ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
+ tsoh_th->fin = 0;
+ tsoh_th->psh = 0;
+ } else {
+ /* This packet will be the last in the TSO burst. */
+ ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
+ tsoh_th->fin = tcp_hdr(skb)->fin;
+ tsoh_th->psh = tcp_hdr(skb)->psh;
+ }
+
+ if (st->protocol == htons(ETH_P_IP)) {
+ struct iphdr *tsoh_iph =
+ (struct iphdr *)(header + SKB_IPV4_OFF(skb));
+
+ tsoh_iph->tot_len = htons(ip_length);
+
+ /* Linux leaves suitable gaps in the IP ID space for us to fill. */
+ tsoh_iph->id = htons(st->ipv4_id);
+ st->ipv4_id++;
+ } else {
+ struct ipv6hdr *tsoh_iph =
+ (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb));
+
+ tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph));
+ }
+
+ st->packet_space = skb_shinfo(skb)->gso_size;
+ ++tx_queue->tso_packets;
+
+ /* Form a descriptor for this header. */
+ efx_tso_put_header(tx_queue, tsoh, st->header_len);
+
+ return 0;
+}
+
+
+/**
+ * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
+ * @tx_queue: Efx TX queue
+ * @skb: Socket buffer
+ *
+ * Context: You must hold netif_tx_lock() to call this function.
+ *
+ * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
+ * @skb was not enqueued. In all cases @skb is consumed. Return
+ * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
+ */
+static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
+ struct sk_buff *skb)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ int frag_i, rc, rc2 = NETDEV_TX_OK;
+ struct tso_state state;
+
+ /* Find the packet protocol and sanity-check it */
+ state.protocol = efx_tso_check_protocol(skb);
+
+ EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
+
+ tso_start(&state, skb);
+
+ /* Assume that skb header area contains exactly the headers, and
+ * all payload is in the frag list.
+ */
+ if (skb_headlen(skb) == state.header_len) {
+ /* Grab the first payload fragment. */
+ EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
+ frag_i = 0;
+ rc = tso_get_fragment(&state, efx,
+ skb_shinfo(skb)->frags + frag_i);
+ if (rc)
+ goto mem_err;
+ } else {
+ rc = tso_get_head_fragment(&state, efx, skb);
+ if (rc)
+ goto mem_err;
+ frag_i = -1;
+ }
+
+ if (tso_start_new_packet(tx_queue, skb, &state) < 0)
+ goto mem_err;
+
+ while (1) {
+ rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
+ if (unlikely(rc)) {
+ rc2 = NETDEV_TX_BUSY;
+ goto unwind;
+ }
+
+ /* Move onto the next fragment? */
+ if (state.in_len == 0) {
+ if (++frag_i >= skb_shinfo(skb)->nr_frags)
+ /* End of payload reached. */
+ break;
+ rc = tso_get_fragment(&state, efx,
+ skb_shinfo(skb)->frags + frag_i);
+ if (rc)
+ goto mem_err;
+ }
+
+ /* Start at new packet? */
+ if (state.packet_space == 0 &&
+ tso_start_new_packet(tx_queue, skb, &state) < 0)
+ goto mem_err;
+ }
+
+ /* Pass off to hardware */
+ efx_nic_push_buffers(tx_queue);
+
+ tx_queue->tso_bursts++;
+ return NETDEV_TX_OK;
+
+ mem_err:
+ netif_err(efx, tx_err, efx->net_dev,
+ "Out of memory for TSO headers, or PCI mapping error\n");
+ dev_kfree_skb_any(skb);
+
+ unwind:
+ /* Free the DMA mapping we were in the process of writing out */
+ if (state.unmap_len) {
+ if (state.unmap_single)
+ pci_unmap_single(efx->pci_dev, state.unmap_addr,
+ state.unmap_len, PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(efx->pci_dev, state.unmap_addr,
+ state.unmap_len, PCI_DMA_TODEVICE);
+ }
+
+ efx_enqueue_unwind(tx_queue);
+ return rc2;
+}
+
+
+/*
+ * Free up all TSO datastructures associated with tx_queue. This
+ * routine should be called only once the tx_queue is both empty and
+ * will no longer be used.
+ */
+static void efx_fini_tso(struct efx_tx_queue *tx_queue)
+{
+ unsigned i;
+
+ if (tx_queue->buffer) {
+ for (i = 0; i <= tx_queue->ptr_mask; ++i)
+ efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
+ }
+
+ while (tx_queue->tso_headers_free != NULL)
+ efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
+ tx_queue->efx->pci_dev);
+}
diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c
new file mode 100644
index 000000000000..7c21b334a75b
--- /dev/null
+++ b/drivers/net/ethernet/sfc/txc43128_phy.c
@@ -0,0 +1,560 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2006-2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+/*
+ * Driver for Transwitch/Mysticom CX4 retimer
+ * see www.transwitch.com, part is TXC-43128
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include "efx.h"
+#include "mdio_10g.h"
+#include "phy.h"
+#include "nic.h"
+
+/* We expect these MMDs to be in the package */
+#define TXC_REQUIRED_DEVS (MDIO_DEVS_PCS | \
+ MDIO_DEVS_PMAPMD | \
+ MDIO_DEVS_PHYXS)
+
+#define TXC_LOOPBACKS ((1 << LOOPBACK_PCS) | \
+ (1 << LOOPBACK_PMAPMD) | \
+ (1 << LOOPBACK_PHYXS_WS))
+
+/**************************************************************************
+ *
+ * Compile-time config
+ *
+ **************************************************************************
+ */
+#define TXCNAME "TXC43128"
+/* Total length of time we'll wait for the PHY to come out of reset (ms) */
+#define TXC_MAX_RESET_TIME 500
+/* Interval between checks (ms) */
+#define TXC_RESET_WAIT 10
+/* How long to run BIST (us) */
+#define TXC_BIST_DURATION 50
+
+/**************************************************************************
+ *
+ * Register definitions
+ *
+ **************************************************************************
+ */
+
+/* Command register */
+#define TXC_GLRGS_GLCMD 0xc004
+/* Useful bits in command register */
+/* Lane power-down */
+#define TXC_GLCMD_L01PD_LBN 5
+#define TXC_GLCMD_L23PD_LBN 6
+/* Limited SW reset: preserves configuration but
+ * initiates a logic reset. Self-clearing */
+#define TXC_GLCMD_LMTSWRST_LBN 14
+
+/* Signal Quality Control */
+#define TXC_GLRGS_GSGQLCTL 0xc01a
+/* Enable bit */
+#define TXC_GSGQLCT_SGQLEN_LBN 15
+/* Lane selection */
+#define TXC_GSGQLCT_LNSL_LBN 13
+#define TXC_GSGQLCT_LNSL_WIDTH 2
+
+/* Analog TX control */
+#define TXC_ALRGS_ATXCTL 0xc040
+/* Lane power-down */
+#define TXC_ATXCTL_TXPD3_LBN 15
+#define TXC_ATXCTL_TXPD2_LBN 14
+#define TXC_ATXCTL_TXPD1_LBN 13
+#define TXC_ATXCTL_TXPD0_LBN 12
+
+/* Amplitude on lanes 0, 1 */
+#define TXC_ALRGS_ATXAMP0 0xc041
+/* Amplitude on lanes 2, 3 */
+#define TXC_ALRGS_ATXAMP1 0xc042
+/* Bit position of value for lane 0 (or 2) */
+#define TXC_ATXAMP_LANE02_LBN 3
+/* Bit position of value for lane 1 (or 3) */
+#define TXC_ATXAMP_LANE13_LBN 11
+
+#define TXC_ATXAMP_1280_mV 0
+#define TXC_ATXAMP_1200_mV 8
+#define TXC_ATXAMP_1120_mV 12
+#define TXC_ATXAMP_1060_mV 14
+#define TXC_ATXAMP_0820_mV 25
+#define TXC_ATXAMP_0720_mV 26
+#define TXC_ATXAMP_0580_mV 27
+#define TXC_ATXAMP_0440_mV 28
+
+#define TXC_ATXAMP_0820_BOTH \
+ ((TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE02_LBN) \
+ | (TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE13_LBN))
+
+#define TXC_ATXAMP_DEFAULT 0x6060 /* From databook */
+
+/* Preemphasis on lanes 0, 1 */
+#define TXC_ALRGS_ATXPRE0 0xc043
+/* Preemphasis on lanes 2, 3 */
+#define TXC_ALRGS_ATXPRE1 0xc044
+
+#define TXC_ATXPRE_NONE 0
+#define TXC_ATXPRE_DEFAULT 0x1010 /* From databook */
+
+#define TXC_ALRGS_ARXCTL 0xc045
+/* Lane power-down */
+#define TXC_ARXCTL_RXPD3_LBN 15
+#define TXC_ARXCTL_RXPD2_LBN 14
+#define TXC_ARXCTL_RXPD1_LBN 13
+#define TXC_ARXCTL_RXPD0_LBN 12
+
+/* Main control */
+#define TXC_MRGS_CTL 0xc340
+/* Bits in main control */
+#define TXC_MCTL_RESET_LBN 15 /* Self clear */
+#define TXC_MCTL_TXLED_LBN 14 /* 1 to show align status */
+#define TXC_MCTL_RXLED_LBN 13 /* 1 to show align status */
+
+/* GPIO output */
+#define TXC_GPIO_OUTPUT 0xc346
+#define TXC_GPIO_DIR 0xc348
+
+/* Vendor-specific BIST registers */
+#define TXC_BIST_CTL 0xc280
+#define TXC_BIST_TXFRMCNT 0xc281
+#define TXC_BIST_RX0FRMCNT 0xc282
+#define TXC_BIST_RX1FRMCNT 0xc283
+#define TXC_BIST_RX2FRMCNT 0xc284
+#define TXC_BIST_RX3FRMCNT 0xc285
+#define TXC_BIST_RX0ERRCNT 0xc286
+#define TXC_BIST_RX1ERRCNT 0xc287
+#define TXC_BIST_RX2ERRCNT 0xc288
+#define TXC_BIST_RX3ERRCNT 0xc289
+
+/* BIST type (controls bit patter in test) */
+#define TXC_BIST_CTRL_TYPE_LBN 10
+#define TXC_BIST_CTRL_TYPE_TSD 0 /* TranSwitch Deterministic */
+#define TXC_BIST_CTRL_TYPE_CRP 1 /* CRPAT standard */
+#define TXC_BIST_CTRL_TYPE_CJP 2 /* CJPAT standard */
+#define TXC_BIST_CTRL_TYPE_TSR 3 /* TranSwitch pseudo-random */
+/* Set this to 1 for 10 bit and 0 for 8 bit */
+#define TXC_BIST_CTRL_B10EN_LBN 12
+/* Enable BIST (write 0 to disable) */
+#define TXC_BIST_CTRL_ENAB_LBN 13
+/* Stop BIST (self-clears when stop complete) */
+#define TXC_BIST_CTRL_STOP_LBN 14
+/* Start BIST (cleared by writing 1 to STOP) */
+#define TXC_BIST_CTRL_STRT_LBN 15
+
+/* Mt. Diablo test configuration */
+#define TXC_MTDIABLO_CTRL 0xc34f
+#define TXC_MTDIABLO_CTRL_PMA_LOOP_LBN 10
+
+struct txc43128_data {
+ unsigned long bug10934_timer;
+ enum efx_phy_mode phy_mode;
+ enum efx_loopback_mode loopback_mode;
+};
+
+/* The PHY sometimes needs a reset to bring the link back up. So long as
+ * it reports link down, we reset it every 5 seconds.
+ */
+#define BUG10934_RESET_INTERVAL (5 * HZ)
+
+/* Perform a reset that doesn't clear configuration changes */
+static void txc_reset_logic(struct efx_nic *efx);
+
+/* Set the output value of a gpio */
+void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int on)
+{
+ efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_OUTPUT, 1 << pin, on);
+}
+
+/* Set up the GPIO direction register */
+void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir)
+{
+ efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_DIR, 1 << pin, dir);
+}
+
+/* Reset the PMA/PMD MMD. The documentation is explicit that this does a
+ * global reset (it's less clear what reset of other MMDs does).*/
+static int txc_reset_phy(struct efx_nic *efx)
+{
+ int rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PMAPMD,
+ TXC_MAX_RESET_TIME / TXC_RESET_WAIT,
+ TXC_RESET_WAIT);
+ if (rc < 0)
+ goto fail;
+
+ /* Check that all the MMDs we expect are present and responding. */
+ rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS);
+ if (rc < 0)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, TXCNAME ": reset timed out!\n");
+ return rc;
+}
+
+/* Run a single BIST on one MMD */
+static int txc_bist_one(struct efx_nic *efx, int mmd, int test)
+{
+ int ctrl, bctl;
+ int lane;
+ int rc = 0;
+
+ /* Set PMA to test into loopback using Mt Diablo reg as per app note */
+ ctrl = efx_mdio_read(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL);
+ ctrl |= (1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
+ efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
+
+ /* The BIST app. note lists these as 3 distinct steps. */
+ /* Set the BIST type */
+ bctl = (test << TXC_BIST_CTRL_TYPE_LBN);
+ efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
+
+ /* Set the BSTEN bit in the BIST Control register to enable */
+ bctl |= (1 << TXC_BIST_CTRL_ENAB_LBN);
+ efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
+
+ /* Set the BSTRT bit in the BIST Control register */
+ efx_mdio_write(efx, mmd, TXC_BIST_CTL,
+ bctl | (1 << TXC_BIST_CTRL_STRT_LBN));
+
+ /* Wait. */
+ udelay(TXC_BIST_DURATION);
+
+ /* Set the BSTOP bit in the BIST Control register */
+ bctl |= (1 << TXC_BIST_CTRL_STOP_LBN);
+ efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
+
+ /* The STOP bit should go off when things have stopped */
+ while (bctl & (1 << TXC_BIST_CTRL_STOP_LBN))
+ bctl = efx_mdio_read(efx, mmd, TXC_BIST_CTL);
+
+ /* Check all the error counts are 0 and all the frame counts are
+ non-zero */
+ for (lane = 0; lane < 4; lane++) {
+ int count = efx_mdio_read(efx, mmd, TXC_BIST_RX0ERRCNT + lane);
+ if (count != 0) {
+ netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
+ "Lane %d had %d errs\n", lane, count);
+ rc = -EIO;
+ }
+ count = efx_mdio_read(efx, mmd, TXC_BIST_RX0FRMCNT + lane);
+ if (count == 0) {
+ netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
+ "Lane %d got 0 frames\n", lane);
+ rc = -EIO;
+ }
+ }
+
+ if (rc == 0)
+ netif_info(efx, hw, efx->net_dev, TXCNAME": BIST pass\n");
+
+ /* Disable BIST */
+ efx_mdio_write(efx, mmd, TXC_BIST_CTL, 0);
+
+ /* Turn off loopback */
+ ctrl &= ~(1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
+ efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
+
+ return rc;
+}
+
+static int txc_bist(struct efx_nic *efx)
+{
+ return txc_bist_one(efx, MDIO_MMD_PCS, TXC_BIST_CTRL_TYPE_TSD);
+}
+
+/* Push the non-configurable defaults into the PHY. This must be
+ * done after every full reset */
+static void txc_apply_defaults(struct efx_nic *efx)
+{
+ int mctrl;
+
+ /* Turn amplitude down and preemphasis off on the host side
+ * (PHY<->MAC) as this is believed less likely to upset Falcon
+ * and no adverse effects have been noted. It probably also
+ * saves a picowatt or two */
+
+ /* Turn off preemphasis */
+ efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE0, TXC_ATXPRE_NONE);
+ efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE1, TXC_ATXPRE_NONE);
+
+ /* Turn down the amplitude */
+ efx_mdio_write(efx, MDIO_MMD_PHYXS,
+ TXC_ALRGS_ATXAMP0, TXC_ATXAMP_0820_BOTH);
+ efx_mdio_write(efx, MDIO_MMD_PHYXS,
+ TXC_ALRGS_ATXAMP1, TXC_ATXAMP_0820_BOTH);
+
+ /* Set the line side amplitude and preemphasis to the databook
+ * defaults as an erratum causes them to be 0 on at least some
+ * PHY rev.s */
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD,
+ TXC_ALRGS_ATXPRE0, TXC_ATXPRE_DEFAULT);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD,
+ TXC_ALRGS_ATXPRE1, TXC_ATXPRE_DEFAULT);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD,
+ TXC_ALRGS_ATXAMP0, TXC_ATXAMP_DEFAULT);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD,
+ TXC_ALRGS_ATXAMP1, TXC_ATXAMP_DEFAULT);
+
+ /* Set up the LEDs */
+ mctrl = efx_mdio_read(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL);
+
+ /* Set the Green and Red LEDs to their default modes */
+ mctrl &= ~((1 << TXC_MCTL_TXLED_LBN) | (1 << TXC_MCTL_RXLED_LBN));
+ efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL, mctrl);
+
+ /* Databook recommends doing this after configuration changes */
+ txc_reset_logic(efx);
+
+ falcon_board(efx)->type->init_phy(efx);
+}
+
+static int txc43128_phy_probe(struct efx_nic *efx)
+{
+ struct txc43128_data *phy_data;
+
+ /* Allocate phy private storage */
+ phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
+ efx->phy_data = phy_data;
+ phy_data->phy_mode = efx->phy_mode;
+
+ efx->mdio.mmds = TXC_REQUIRED_DEVS;
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+
+ efx->loopback_modes = TXC_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
+
+ return 0;
+}
+
+/* Initialisation entry point for this PHY driver */
+static int txc43128_phy_init(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = txc_reset_phy(efx);
+ if (rc < 0)
+ return rc;
+
+ rc = txc_bist(efx);
+ if (rc < 0)
+ return rc;
+
+ txc_apply_defaults(efx);
+
+ return 0;
+}
+
+/* Set the lane power down state in the global registers */
+static void txc_glrgs_lane_power(struct efx_nic *efx, int mmd)
+{
+ int pd = (1 << TXC_GLCMD_L01PD_LBN) | (1 << TXC_GLCMD_L23PD_LBN);
+ int ctl = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
+
+ if (!(efx->phy_mode & PHY_MODE_LOW_POWER))
+ ctl &= ~pd;
+ else
+ ctl |= pd;
+
+ efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, ctl);
+}
+
+/* Set the lane power down state in the analog control registers */
+static void txc_analog_lane_power(struct efx_nic *efx, int mmd)
+{
+ int txpd = (1 << TXC_ATXCTL_TXPD3_LBN) | (1 << TXC_ATXCTL_TXPD2_LBN)
+ | (1 << TXC_ATXCTL_TXPD1_LBN) | (1 << TXC_ATXCTL_TXPD0_LBN);
+ int rxpd = (1 << TXC_ARXCTL_RXPD3_LBN) | (1 << TXC_ARXCTL_RXPD2_LBN)
+ | (1 << TXC_ARXCTL_RXPD1_LBN) | (1 << TXC_ARXCTL_RXPD0_LBN);
+ int txctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ATXCTL);
+ int rxctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ARXCTL);
+
+ if (!(efx->phy_mode & PHY_MODE_LOW_POWER)) {
+ txctl &= ~txpd;
+ rxctl &= ~rxpd;
+ } else {
+ txctl |= txpd;
+ rxctl |= rxpd;
+ }
+
+ efx_mdio_write(efx, mmd, TXC_ALRGS_ATXCTL, txctl);
+ efx_mdio_write(efx, mmd, TXC_ALRGS_ARXCTL, rxctl);
+}
+
+static void txc_set_power(struct efx_nic *efx)
+{
+ /* According to the data book, all the MMDs can do low power */
+ efx_mdio_set_mmds_lpower(efx,
+ !!(efx->phy_mode & PHY_MODE_LOW_POWER),
+ TXC_REQUIRED_DEVS);
+
+ /* Global register bank is in PCS, PHY XS. These control the host
+ * side and line side settings respectively. */
+ txc_glrgs_lane_power(efx, MDIO_MMD_PCS);
+ txc_glrgs_lane_power(efx, MDIO_MMD_PHYXS);
+
+ /* Analog register bank in PMA/PMD, PHY XS */
+ txc_analog_lane_power(efx, MDIO_MMD_PMAPMD);
+ txc_analog_lane_power(efx, MDIO_MMD_PHYXS);
+}
+
+static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd)
+{
+ int val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
+ int tries = 50;
+
+ val |= (1 << TXC_GLCMD_LMTSWRST_LBN);
+ efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val);
+ while (tries--) {
+ val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
+ if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN)))
+ break;
+ udelay(1);
+ }
+ if (!tries)
+ netif_info(efx, hw, efx->net_dev,
+ TXCNAME " Logic reset timed out!\n");
+}
+
+/* Perform a logic reset. This preserves the configuration registers
+ * and is needed for some configuration changes to take effect */
+static void txc_reset_logic(struct efx_nic *efx)
+{
+ /* The data sheet claims we can do the logic reset on either the
+ * PCS or the PHYXS and the result is a reset of both host- and
+ * line-side logic. */
+ txc_reset_logic_mmd(efx, MDIO_MMD_PCS);
+}
+
+static bool txc43128_phy_read_link(struct efx_nic *efx)
+{
+ return efx_mdio_links_ok(efx, TXC_REQUIRED_DEVS);
+}
+
+static int txc43128_phy_reconfigure(struct efx_nic *efx)
+{
+ struct txc43128_data *phy_data = efx->phy_data;
+ enum efx_phy_mode mode_change = efx->phy_mode ^ phy_data->phy_mode;
+ bool loop_change = LOOPBACK_CHANGED(phy_data, efx, TXC_LOOPBACKS);
+
+ if (efx->phy_mode & mode_change & PHY_MODE_TX_DISABLED) {
+ txc_reset_phy(efx);
+ txc_apply_defaults(efx);
+ falcon_reset_xaui(efx);
+ mode_change &= ~PHY_MODE_TX_DISABLED;
+ }
+
+ efx_mdio_transmit_disable(efx);
+ efx_mdio_phy_reconfigure(efx);
+ if (mode_change & PHY_MODE_LOW_POWER)
+ txc_set_power(efx);
+
+ /* The data sheet claims this is required after every reconfiguration
+ * (note at end of 7.1), but we mustn't do it when nothing changes as
+ * it glitches the link, and reconfigure gets called on link change,
+ * so we get an IRQ storm on link up. */
+ if (loop_change || mode_change)
+ txc_reset_logic(efx);
+
+ phy_data->phy_mode = efx->phy_mode;
+ phy_data->loopback_mode = efx->loopback_mode;
+
+ return 0;
+}
+
+static void txc43128_phy_fini(struct efx_nic *efx)
+{
+ /* Disable link events */
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
+}
+
+static void txc43128_phy_remove(struct efx_nic *efx)
+{
+ kfree(efx->phy_data);
+ efx->phy_data = NULL;
+}
+
+/* Periodic callback: this exists mainly to poll link status as we
+ * don't use LASI interrupts */
+static bool txc43128_phy_poll(struct efx_nic *efx)
+{
+ struct txc43128_data *data = efx->phy_data;
+ bool was_up = efx->link_state.up;
+
+ efx->link_state.up = txc43128_phy_read_link(efx);
+ efx->link_state.speed = 10000;
+ efx->link_state.fd = true;
+ efx->link_state.fc = efx->wanted_fc;
+
+ if (efx->link_state.up || (efx->loopback_mode != LOOPBACK_NONE)) {
+ data->bug10934_timer = jiffies;
+ } else {
+ if (time_after_eq(jiffies, (data->bug10934_timer +
+ BUG10934_RESET_INTERVAL))) {
+ data->bug10934_timer = jiffies;
+ txc_reset_logic(efx);
+ }
+ }
+
+ return efx->link_state.up != was_up;
+}
+
+static const char *txc43128_test_names[] = {
+ "bist"
+};
+
+static const char *txc43128_test_name(struct efx_nic *efx, unsigned int index)
+{
+ if (index < ARRAY_SIZE(txc43128_test_names))
+ return txc43128_test_names[index];
+ return NULL;
+}
+
+static int txc43128_run_tests(struct efx_nic *efx, int *results, unsigned flags)
+{
+ int rc;
+
+ if (!(flags & ETH_TEST_FL_OFFLINE))
+ return 0;
+
+ rc = txc_reset_phy(efx);
+ if (rc < 0)
+ return rc;
+
+ rc = txc_bist(efx);
+ txc_apply_defaults(efx);
+ results[0] = rc ? -1 : 1;
+ return rc;
+}
+
+static void txc43128_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+ mdio45_ethtool_gset(&efx->mdio, ecmd);
+}
+
+const struct efx_phy_operations falcon_txc_phy_ops = {
+ .probe = txc43128_phy_probe,
+ .init = txc43128_phy_init,
+ .reconfigure = txc43128_phy_reconfigure,
+ .poll = txc43128_phy_poll,
+ .fini = txc43128_phy_fini,
+ .remove = txc43128_phy_remove,
+ .get_settings = txc43128_get_settings,
+ .set_settings = efx_mdio_set_settings,
+ .test_alive = efx_mdio_test_alive,
+ .run_tests = txc43128_run_tests,
+ .test_name = txc43128_test_name,
+};
diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h
new file mode 100644
index 000000000000..e4dd3a7f304b
--- /dev/null
+++ b/drivers/net/ethernet/sfc/workarounds.h
@@ -0,0 +1,59 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2006-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_WORKAROUNDS_H
+#define EFX_WORKAROUNDS_H
+
+/*
+ * Hardware workarounds.
+ * Bug numbers are from Solarflare's Bugzilla.
+ */
+
+#define EFX_WORKAROUND_ALWAYS(efx) 1
+#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
+#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
+#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
+#define EFX_WORKAROUND_10G(efx) 1
+
+/* XAUI resets if link not detected */
+#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
+/* RX PCIe double split performance issue */
+#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
+/* Bit-bashed I2C reads cause performance drop */
+#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
+/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
+ * or a PCIe error (bug 11028) */
+#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
+/* Transmit flow control may get disabled */
+#define EFX_WORKAROUND_11482 EFX_WORKAROUND_FALCON_AB
+/* Truncated IPv4 packets can confuse the TX packet parser */
+#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
+/* Legacy ISR read can return zero once */
+#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
+/* Legacy interrupt storm when interrupt fifo fills */
+#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
+
+/* Spurious parity errors in TSORT buffers */
+#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
+/* Unaligned read request >512 bytes after aligning may break TSORT */
+#define EFX_WORKAROUND_5391 EFX_WORKAROUND_FALCON_A
+/* iSCSI parsing errors */
+#define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A
+/* RX events go missing */
+#define EFX_WORKAROUND_5676 EFX_WORKAROUND_FALCON_A
+/* RX_RESET on A1 */
+#define EFX_WORKAROUND_6555 EFX_WORKAROUND_FALCON_A
+/* Increase filter depth to avoid RX_RESET */
+#define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A
+/* Flushes may never complete */
+#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_AB
+/* Leak overlength packets rather than free */
+#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
+
+#endif /* EFX_WORKAROUNDS_H */
diff --git a/drivers/net/ethernet/sgi/Kconfig b/drivers/net/ethernet/sgi/Kconfig
new file mode 100644
index 000000000000..c1c4bb868a3b
--- /dev/null
+++ b/drivers/net/ethernet/sgi/Kconfig
@@ -0,0 +1,36 @@
+#
+# SGI device configuration
+#
+
+config NET_VENDOR_SGI
+ bool "SGI devices"
+ default y
+ depends on (PCI && SGI_IP27) || SGI_IP32
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about SGI devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_SGI
+
+config SGI_IOC3_ETH
+ bool "SGI IOC3 Ethernet"
+ depends on PCI && SGI_IP27
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+config SGI_O2MACE_ETH
+ tristate "SGI O2 MACE Fast Ethernet support"
+ depends on SGI_IP32=y
+
+endif # NET_VENDOR_SGI
diff --git a/drivers/net/ethernet/sgi/Makefile b/drivers/net/ethernet/sgi/Makefile
new file mode 100644
index 000000000000..e5bedd271e29
--- /dev/null
+++ b/drivers/net/ethernet/sgi/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the SGI device drivers.
+#
+
+obj-$(CONFIG_SGI_O2MACE_ETH) += meth.o
+obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
new file mode 100644
index 000000000000..ac149d99f78f
--- /dev/null
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -0,0 +1,1684 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Driver for SGI's IOC3 based Ethernet cards as found in the PCI card.
+ *
+ * Copyright (C) 1999, 2000, 01, 03, 06 Ralf Baechle
+ * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc.
+ *
+ * References:
+ * o IOC3 ASIC specification 4.51, 1996-04-18
+ * o IEEE 802.3 specification, 2000 edition
+ * o DP38840A Specification, National Semiconductor, March 1997
+ *
+ * To do:
+ *
+ * o Handle allocation failures in ioc3_alloc_skb() more gracefully.
+ * o Handle allocation failures in ioc3_init_rings().
+ * o Use prefetching for large packets. What is a good lower limit for
+ * prefetching?
+ * o We're probably allocating a bit too much memory.
+ * o Use hardware checksums.
+ * o Convert to using a IOC3 meta driver.
+ * o Which PHYs might possibly be attached to the IOC3 in real live,
+ * which workarounds are required for them? Do we ever have Lucent's?
+ * o For the 2.5 branch kill the mii-tool ioctls.
+ */
+
+#define IOC3_NAME "ioc3-eth"
+#define IOC3_VERSION "2.6.3-4"
+
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+
+#ifdef CONFIG_SERIAL_8250
+#include <linux/serial_core.h>
+#include <linux/serial_8250.h>
+#include <linux/serial_reg.h>
+#endif
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <net/ip.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/sn/types.h>
+#include <asm/sn/ioc3.h>
+#include <asm/pci/bridge.h>
+
+/*
+ * 64 RX buffers. This is tunable in the range of 16 <= x < 512. The
+ * value must be a power of two.
+ */
+#define RX_BUFFS 64
+
+#define ETCSR_FD ((17<<ETCSR_IPGR2_SHIFT) | (11<<ETCSR_IPGR1_SHIFT) | 21)
+#define ETCSR_HD ((21<<ETCSR_IPGR2_SHIFT) | (21<<ETCSR_IPGR1_SHIFT) | 21)
+
+/* Private per NIC data of the driver. */
+struct ioc3_private {
+ struct ioc3 *regs;
+ unsigned long *rxr; /* pointer to receiver ring */
+ struct ioc3_etxd *txr;
+ struct sk_buff *rx_skbs[512];
+ struct sk_buff *tx_skbs[128];
+ int rx_ci; /* RX consumer index */
+ int rx_pi; /* RX producer index */
+ int tx_ci; /* TX consumer index */
+ int tx_pi; /* TX producer index */
+ int txqlen;
+ u32 emcr, ehar_h, ehar_l;
+ spinlock_t ioc3_lock;
+ struct mii_if_info mii;
+
+ struct pci_dev *pdev;
+
+ /* Members used by autonegotiation */
+ struct timer_list ioc3_timer;
+};
+
+static inline struct net_device *priv_netdev(struct ioc3_private *dev)
+{
+ return (void *)dev - ((sizeof(struct net_device) + 31) & ~31);
+}
+
+static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void ioc3_set_multicast_list(struct net_device *dev);
+static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void ioc3_timeout(struct net_device *dev);
+static inline unsigned int ioc3_hash(const unsigned char *addr);
+static inline void ioc3_stop(struct ioc3_private *ip);
+static void ioc3_init(struct net_device *dev);
+
+static const char ioc3_str[] = "IOC3 Ethernet";
+static const struct ethtool_ops ioc3_ethtool_ops;
+
+/* We use this to acquire receive skb's that we can DMA directly into. */
+
+#define IOC3_CACHELINE 128UL
+
+static inline unsigned long aligned_rx_skb_addr(unsigned long addr)
+{
+ return (~addr + 1) & (IOC3_CACHELINE - 1UL);
+}
+
+static inline struct sk_buff * ioc3_alloc_skb(unsigned long length,
+ unsigned int gfp_mask)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(length + IOC3_CACHELINE - 1, gfp_mask);
+ if (likely(skb)) {
+ int offset = aligned_rx_skb_addr((unsigned long) skb->data);
+ if (offset)
+ skb_reserve(skb, offset);
+ }
+
+ return skb;
+}
+
+static inline unsigned long ioc3_map(void *ptr, unsigned long vdev)
+{
+#ifdef CONFIG_SGI_IP27
+ vdev <<= 57; /* Shift to PCI64_ATTR_VIRTUAL */
+
+ return vdev | (0xaUL << PCI64_ATTR_TARG_SHFT) | PCI64_ATTR_PREF |
+ ((unsigned long)ptr & TO_PHYS_MASK);
+#else
+ return virt_to_bus(ptr);
+#endif
+}
+
+/* BEWARE: The IOC3 documentation documents the size of rx buffers as
+ 1644 while it's actually 1664. This one was nasty to track down ... */
+#define RX_OFFSET 10
+#define RX_BUF_ALLOC_SIZE (1664 + RX_OFFSET + IOC3_CACHELINE)
+
+/* DMA barrier to separate cached and uncached accesses. */
+#define BARRIER() \
+ __asm__("sync" ::: "memory")
+
+
+#define IOC3_SIZE 0x100000
+
+/*
+ * IOC3 is a big endian device
+ *
+ * Unorthodox but makes the users of these macros more readable - the pointer
+ * to the IOC3's memory mapped registers is expected as struct ioc3 * ioc3
+ * in the environment.
+ */
+#define ioc3_r_mcr() be32_to_cpu(ioc3->mcr)
+#define ioc3_w_mcr(v) do { ioc3->mcr = cpu_to_be32(v); } while (0)
+#define ioc3_w_gpcr_s(v) do { ioc3->gpcr_s = cpu_to_be32(v); } while (0)
+#define ioc3_r_emcr() be32_to_cpu(ioc3->emcr)
+#define ioc3_w_emcr(v) do { ioc3->emcr = cpu_to_be32(v); } while (0)
+#define ioc3_r_eisr() be32_to_cpu(ioc3->eisr)
+#define ioc3_w_eisr(v) do { ioc3->eisr = cpu_to_be32(v); } while (0)
+#define ioc3_r_eier() be32_to_cpu(ioc3->eier)
+#define ioc3_w_eier(v) do { ioc3->eier = cpu_to_be32(v); } while (0)
+#define ioc3_r_ercsr() be32_to_cpu(ioc3->ercsr)
+#define ioc3_w_ercsr(v) do { ioc3->ercsr = cpu_to_be32(v); } while (0)
+#define ioc3_r_erbr_h() be32_to_cpu(ioc3->erbr_h)
+#define ioc3_w_erbr_h(v) do { ioc3->erbr_h = cpu_to_be32(v); } while (0)
+#define ioc3_r_erbr_l() be32_to_cpu(ioc3->erbr_l)
+#define ioc3_w_erbr_l(v) do { ioc3->erbr_l = cpu_to_be32(v); } while (0)
+#define ioc3_r_erbar() be32_to_cpu(ioc3->erbar)
+#define ioc3_w_erbar(v) do { ioc3->erbar = cpu_to_be32(v); } while (0)
+#define ioc3_r_ercir() be32_to_cpu(ioc3->ercir)
+#define ioc3_w_ercir(v) do { ioc3->ercir = cpu_to_be32(v); } while (0)
+#define ioc3_r_erpir() be32_to_cpu(ioc3->erpir)
+#define ioc3_w_erpir(v) do { ioc3->erpir = cpu_to_be32(v); } while (0)
+#define ioc3_r_ertr() be32_to_cpu(ioc3->ertr)
+#define ioc3_w_ertr(v) do { ioc3->ertr = cpu_to_be32(v); } while (0)
+#define ioc3_r_etcsr() be32_to_cpu(ioc3->etcsr)
+#define ioc3_w_etcsr(v) do { ioc3->etcsr = cpu_to_be32(v); } while (0)
+#define ioc3_r_ersr() be32_to_cpu(ioc3->ersr)
+#define ioc3_w_ersr(v) do { ioc3->ersr = cpu_to_be32(v); } while (0)
+#define ioc3_r_etcdc() be32_to_cpu(ioc3->etcdc)
+#define ioc3_w_etcdc(v) do { ioc3->etcdc = cpu_to_be32(v); } while (0)
+#define ioc3_r_ebir() be32_to_cpu(ioc3->ebir)
+#define ioc3_w_ebir(v) do { ioc3->ebir = cpu_to_be32(v); } while (0)
+#define ioc3_r_etbr_h() be32_to_cpu(ioc3->etbr_h)
+#define ioc3_w_etbr_h(v) do { ioc3->etbr_h = cpu_to_be32(v); } while (0)
+#define ioc3_r_etbr_l() be32_to_cpu(ioc3->etbr_l)
+#define ioc3_w_etbr_l(v) do { ioc3->etbr_l = cpu_to_be32(v); } while (0)
+#define ioc3_r_etcir() be32_to_cpu(ioc3->etcir)
+#define ioc3_w_etcir(v) do { ioc3->etcir = cpu_to_be32(v); } while (0)
+#define ioc3_r_etpir() be32_to_cpu(ioc3->etpir)
+#define ioc3_w_etpir(v) do { ioc3->etpir = cpu_to_be32(v); } while (0)
+#define ioc3_r_emar_h() be32_to_cpu(ioc3->emar_h)
+#define ioc3_w_emar_h(v) do { ioc3->emar_h = cpu_to_be32(v); } while (0)
+#define ioc3_r_emar_l() be32_to_cpu(ioc3->emar_l)
+#define ioc3_w_emar_l(v) do { ioc3->emar_l = cpu_to_be32(v); } while (0)
+#define ioc3_r_ehar_h() be32_to_cpu(ioc3->ehar_h)
+#define ioc3_w_ehar_h(v) do { ioc3->ehar_h = cpu_to_be32(v); } while (0)
+#define ioc3_r_ehar_l() be32_to_cpu(ioc3->ehar_l)
+#define ioc3_w_ehar_l(v) do { ioc3->ehar_l = cpu_to_be32(v); } while (0)
+#define ioc3_r_micr() be32_to_cpu(ioc3->micr)
+#define ioc3_w_micr(v) do { ioc3->micr = cpu_to_be32(v); } while (0)
+#define ioc3_r_midr_r() be32_to_cpu(ioc3->midr_r)
+#define ioc3_w_midr_r(v) do { ioc3->midr_r = cpu_to_be32(v); } while (0)
+#define ioc3_r_midr_w() be32_to_cpu(ioc3->midr_w)
+#define ioc3_w_midr_w(v) do { ioc3->midr_w = cpu_to_be32(v); } while (0)
+
+static inline u32 mcr_pack(u32 pulse, u32 sample)
+{
+ return (pulse << 10) | (sample << 2);
+}
+
+static int nic_wait(struct ioc3 *ioc3)
+{
+ u32 mcr;
+
+ do {
+ mcr = ioc3_r_mcr();
+ } while (!(mcr & 2));
+
+ return mcr & 1;
+}
+
+static int nic_reset(struct ioc3 *ioc3)
+{
+ int presence;
+
+ ioc3_w_mcr(mcr_pack(500, 65));
+ presence = nic_wait(ioc3);
+
+ ioc3_w_mcr(mcr_pack(0, 500));
+ nic_wait(ioc3);
+
+ return presence;
+}
+
+static inline int nic_read_bit(struct ioc3 *ioc3)
+{
+ int result;
+
+ ioc3_w_mcr(mcr_pack(6, 13));
+ result = nic_wait(ioc3);
+ ioc3_w_mcr(mcr_pack(0, 100));
+ nic_wait(ioc3);
+
+ return result;
+}
+
+static inline void nic_write_bit(struct ioc3 *ioc3, int bit)
+{
+ if (bit)
+ ioc3_w_mcr(mcr_pack(6, 110));
+ else
+ ioc3_w_mcr(mcr_pack(80, 30));
+
+ nic_wait(ioc3);
+}
+
+/*
+ * Read a byte from an iButton device
+ */
+static u32 nic_read_byte(struct ioc3 *ioc3)
+{
+ u32 result = 0;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ result = (result >> 1) | (nic_read_bit(ioc3) << 7);
+
+ return result;
+}
+
+/*
+ * Write a byte to an iButton device
+ */
+static void nic_write_byte(struct ioc3 *ioc3, int byte)
+{
+ int i, bit;
+
+ for (i = 8; i; i--) {
+ bit = byte & 1;
+ byte >>= 1;
+
+ nic_write_bit(ioc3, bit);
+ }
+}
+
+static u64 nic_find(struct ioc3 *ioc3, int *last)
+{
+ int a, b, index, disc;
+ u64 address = 0;
+
+ nic_reset(ioc3);
+ /* Search ROM. */
+ nic_write_byte(ioc3, 0xf0);
+
+ /* Algorithm from ``Book of iButton Standards''. */
+ for (index = 0, disc = 0; index < 64; index++) {
+ a = nic_read_bit(ioc3);
+ b = nic_read_bit(ioc3);
+
+ if (a && b) {
+ printk("NIC search failed (not fatal).\n");
+ *last = 0;
+ return 0;
+ }
+
+ if (!a && !b) {
+ if (index == *last) {
+ address |= 1UL << index;
+ } else if (index > *last) {
+ address &= ~(1UL << index);
+ disc = index;
+ } else if ((address & (1UL << index)) == 0)
+ disc = index;
+ nic_write_bit(ioc3, address & (1UL << index));
+ continue;
+ } else {
+ if (a)
+ address |= 1UL << index;
+ else
+ address &= ~(1UL << index);
+ nic_write_bit(ioc3, a);
+ continue;
+ }
+ }
+
+ *last = disc;
+
+ return address;
+}
+
+static int nic_init(struct ioc3 *ioc3)
+{
+ const char *unknown = "unknown";
+ const char *type = unknown;
+ u8 crc;
+ u8 serial[6];
+ int save = 0, i;
+
+ while (1) {
+ u64 reg;
+ reg = nic_find(ioc3, &save);
+
+ switch (reg & 0xff) {
+ case 0x91:
+ type = "DS1981U";
+ break;
+ default:
+ if (save == 0) {
+ /* Let the caller try again. */
+ return -1;
+ }
+ continue;
+ }
+
+ nic_reset(ioc3);
+
+ /* Match ROM. */
+ nic_write_byte(ioc3, 0x55);
+ for (i = 0; i < 8; i++)
+ nic_write_byte(ioc3, (reg >> (i << 3)) & 0xff);
+
+ reg >>= 8; /* Shift out type. */
+ for (i = 0; i < 6; i++) {
+ serial[i] = reg & 0xff;
+ reg >>= 8;
+ }
+ crc = reg & 0xff;
+ break;
+ }
+
+ printk("Found %s NIC", type);
+ if (type != unknown)
+ printk (" registration number %pM, CRC %02x", serial, crc);
+ printk(".\n");
+
+ return 0;
+}
+
+/*
+ * Read the NIC (Number-In-a-Can) device used to store the MAC address on
+ * SN0 / SN00 nodeboards and PCI cards.
+ */
+static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
+{
+ struct ioc3 *ioc3 = ip->regs;
+ u8 nic[14];
+ int tries = 2; /* There may be some problem with the battery? */
+ int i;
+
+ ioc3_w_gpcr_s(1 << 21);
+
+ while (tries--) {
+ if (!nic_init(ioc3))
+ break;
+ udelay(500);
+ }
+
+ if (tries < 0) {
+ printk("Failed to read MAC address\n");
+ return;
+ }
+
+ /* Read Memory. */
+ nic_write_byte(ioc3, 0xf0);
+ nic_write_byte(ioc3, 0x00);
+ nic_write_byte(ioc3, 0x00);
+
+ for (i = 13; i >= 0; i--)
+ nic[i] = nic_read_byte(ioc3);
+
+ for (i = 2; i < 8; i++)
+ priv_netdev(ip)->dev_addr[i - 2] = nic[i];
+}
+
+/*
+ * Ok, this is hosed by design. It's necessary to know what machine the
+ * NIC is in in order to know how to read the NIC address. We also have
+ * to know if it's a PCI card or a NIC in on the node board ...
+ */
+static void ioc3_get_eaddr(struct ioc3_private *ip)
+{
+ ioc3_get_eaddr_nic(ip);
+
+ printk("Ethernet address is %pM.\n", priv_netdev(ip)->dev_addr);
+}
+
+static void __ioc3_set_mac_address(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3 *ioc3 = ip->regs;
+
+ ioc3_w_emar_h((dev->dev_addr[5] << 8) | dev->dev_addr[4]);
+ ioc3_w_emar_l((dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) |
+ (dev->dev_addr[1] << 8) | dev->dev_addr[0]);
+}
+
+static int ioc3_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct sockaddr *sa = addr;
+
+ memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+
+ spin_lock_irq(&ip->ioc3_lock);
+ __ioc3_set_mac_address(dev);
+ spin_unlock_irq(&ip->ioc3_lock);
+
+ return 0;
+}
+
+/*
+ * Caller must hold the ioc3_lock ever for MII readers. This is also
+ * used to protect the transmitter side but it's low contention.
+ */
+static int ioc3_mdio_read(struct net_device *dev, int phy, int reg)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3 *ioc3 = ip->regs;
+
+ while (ioc3_r_micr() & MICR_BUSY);
+ ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG);
+ while (ioc3_r_micr() & MICR_BUSY);
+
+ return ioc3_r_midr_r() & MIDR_DATA_MASK;
+}
+
+static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3 *ioc3 = ip->regs;
+
+ while (ioc3_r_micr() & MICR_BUSY);
+ ioc3_w_midr_w(data);
+ ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg);
+ while (ioc3_r_micr() & MICR_BUSY);
+}
+
+static int ioc3_mii_init(struct ioc3_private *ip);
+
+static struct net_device_stats *ioc3_get_stats(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3 *ioc3 = ip->regs;
+
+ dev->stats.collisions += (ioc3_r_etcdc() & ETCDC_COLLCNT_MASK);
+ return &dev->stats;
+}
+
+static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
+{
+ struct ethhdr *eh = eth_hdr(skb);
+ uint32_t csum, ehsum;
+ unsigned int proto;
+ struct iphdr *ih;
+ uint16_t *ew;
+ unsigned char *cp;
+
+ /*
+ * Did hardware handle the checksum at all? The cases we can handle
+ * are:
+ *
+ * - TCP and UDP checksums of IPv4 only.
+ * - IPv6 would be doable but we keep that for later ...
+ * - Only unfragmented packets. Did somebody already tell you
+ * fragmentation is evil?
+ * - don't care about packet size. Worst case when processing a
+ * malformed packet we'll try to access the packet at ip header +
+ * 64 bytes which is still inside the skb. Even in the unlikely
+ * case where the checksum is right the higher layers will still
+ * drop the packet as appropriate.
+ */
+ if (eh->h_proto != htons(ETH_P_IP))
+ return;
+
+ ih = (struct iphdr *) ((char *)eh + ETH_HLEN);
+ if (ip_is_fragment(ih))
+ return;
+
+ proto = ih->protocol;
+ if (proto != IPPROTO_TCP && proto != IPPROTO_UDP)
+ return;
+
+ /* Same as tx - compute csum of pseudo header */
+ csum = hwsum +
+ (ih->tot_len - (ih->ihl << 2)) +
+ htons((uint16_t)ih->protocol) +
+ (ih->saddr >> 16) + (ih->saddr & 0xffff) +
+ (ih->daddr >> 16) + (ih->daddr & 0xffff);
+
+ /* Sum up ethernet dest addr, src addr and protocol */
+ ew = (uint16_t *) eh;
+ ehsum = ew[0] + ew[1] + ew[2] + ew[3] + ew[4] + ew[5] + ew[6];
+
+ ehsum = (ehsum & 0xffff) + (ehsum >> 16);
+ ehsum = (ehsum & 0xffff) + (ehsum >> 16);
+
+ csum += 0xffff ^ ehsum;
+
+ /* In the next step we also subtract the 1's complement
+ checksum of the trailing ethernet CRC. */
+ cp = (char *)eh + len; /* points at trailing CRC */
+ if (len & 1) {
+ csum += 0xffff ^ (uint16_t) ((cp[1] << 8) | cp[0]);
+ csum += 0xffff ^ (uint16_t) ((cp[3] << 8) | cp[2]);
+ } else {
+ csum += 0xffff ^ (uint16_t) ((cp[0] << 8) | cp[1]);
+ csum += 0xffff ^ (uint16_t) ((cp[2] << 8) | cp[3]);
+ }
+
+ csum = (csum & 0xffff) + (csum >> 16);
+ csum = (csum & 0xffff) + (csum >> 16);
+
+ if (csum == 0xffff)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static inline void ioc3_rx(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct sk_buff *skb, *new_skb;
+ struct ioc3 *ioc3 = ip->regs;
+ int rx_entry, n_entry, len;
+ struct ioc3_erxbuf *rxb;
+ unsigned long *rxr;
+ u32 w0, err;
+
+ rxr = (unsigned long *) ip->rxr; /* Ring base */
+ rx_entry = ip->rx_ci; /* RX consume index */
+ n_entry = ip->rx_pi;
+
+ skb = ip->rx_skbs[rx_entry];
+ rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
+ w0 = be32_to_cpu(rxb->w0);
+
+ while (w0 & ERXBUF_V) {
+ err = be32_to_cpu(rxb->err); /* It's valid ... */
+ if (err & ERXBUF_GOODPKT) {
+ len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4;
+ skb_trim(skb, len);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
+ if (!new_skb) {
+ /* Ouch, drop packet and just recycle packet
+ to keep the ring filled. */
+ dev->stats.rx_dropped++;
+ new_skb = skb;
+ goto next;
+ }
+
+ if (likely(dev->features & NETIF_F_RXCSUM))
+ ioc3_tcpudp_checksum(skb,
+ w0 & ERXBUF_IPCKSUM_MASK, len);
+
+ netif_rx(skb);
+
+ ip->rx_skbs[rx_entry] = NULL; /* Poison */
+
+ /* Because we reserve afterwards. */
+ skb_put(new_skb, (1664 + RX_OFFSET));
+ rxb = (struct ioc3_erxbuf *) new_skb->data;
+ skb_reserve(new_skb, RX_OFFSET);
+
+ dev->stats.rx_packets++; /* Statistics */
+ dev->stats.rx_bytes += len;
+ } else {
+ /* The frame is invalid and the skb never
+ reached the network layer so we can just
+ recycle it. */
+ new_skb = skb;
+ dev->stats.rx_errors++;
+ }
+ if (err & ERXBUF_CRCERR) /* Statistics */
+ dev->stats.rx_crc_errors++;
+ if (err & ERXBUF_FRAMERR)
+ dev->stats.rx_frame_errors++;
+next:
+ ip->rx_skbs[n_entry] = new_skb;
+ rxr[n_entry] = cpu_to_be64(ioc3_map(rxb, 1));
+ rxb->w0 = 0; /* Clear valid flag */
+ n_entry = (n_entry + 1) & 511; /* Update erpir */
+
+ /* Now go on to the next ring entry. */
+ rx_entry = (rx_entry + 1) & 511;
+ skb = ip->rx_skbs[rx_entry];
+ rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
+ w0 = be32_to_cpu(rxb->w0);
+ }
+ ioc3_w_erpir((n_entry << 3) | ERPIR_ARM);
+ ip->rx_pi = n_entry;
+ ip->rx_ci = rx_entry;
+}
+
+static inline void ioc3_tx(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ unsigned long packets, bytes;
+ struct ioc3 *ioc3 = ip->regs;
+ int tx_entry, o_entry;
+ struct sk_buff *skb;
+ u32 etcir;
+
+ spin_lock(&ip->ioc3_lock);
+ etcir = ioc3_r_etcir();
+
+ tx_entry = (etcir >> 7) & 127;
+ o_entry = ip->tx_ci;
+ packets = 0;
+ bytes = 0;
+
+ while (o_entry != tx_entry) {
+ packets++;
+ skb = ip->tx_skbs[o_entry];
+ bytes += skb->len;
+ dev_kfree_skb_irq(skb);
+ ip->tx_skbs[o_entry] = NULL;
+
+ o_entry = (o_entry + 1) & 127; /* Next */
+
+ etcir = ioc3_r_etcir(); /* More pkts sent? */
+ tx_entry = (etcir >> 7) & 127;
+ }
+
+ dev->stats.tx_packets += packets;
+ dev->stats.tx_bytes += bytes;
+ ip->txqlen -= packets;
+
+ if (ip->txqlen < 128)
+ netif_wake_queue(dev);
+
+ ip->tx_ci = o_entry;
+ spin_unlock(&ip->ioc3_lock);
+}
+
+/*
+ * Deal with fatal IOC3 errors. This condition might be caused by a hard or
+ * software problems, so we should try to recover
+ * more gracefully if this ever happens. In theory we might be flooded
+ * with such error interrupts if something really goes wrong, so we might
+ * also consider to take the interface down.
+ */
+static void ioc3_error(struct net_device *dev, u32 eisr)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ unsigned char *iface = dev->name;
+
+ spin_lock(&ip->ioc3_lock);
+
+ if (eisr & EISR_RXOFLO)
+ printk(KERN_ERR "%s: RX overflow.\n", iface);
+ if (eisr & EISR_RXBUFOFLO)
+ printk(KERN_ERR "%s: RX buffer overflow.\n", iface);
+ if (eisr & EISR_RXMEMERR)
+ printk(KERN_ERR "%s: RX PCI error.\n", iface);
+ if (eisr & EISR_RXPARERR)
+ printk(KERN_ERR "%s: RX SSRAM parity error.\n", iface);
+ if (eisr & EISR_TXBUFUFLO)
+ printk(KERN_ERR "%s: TX buffer underflow.\n", iface);
+ if (eisr & EISR_TXMEMERR)
+ printk(KERN_ERR "%s: TX PCI error.\n", iface);
+
+ ioc3_stop(ip);
+ ioc3_init(dev);
+ ioc3_mii_init(ip);
+
+ netif_wake_queue(dev);
+
+ spin_unlock(&ip->ioc3_lock);
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t ioc3_interrupt(int irq, void *_dev)
+{
+ struct net_device *dev = (struct net_device *)_dev;
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3 *ioc3 = ip->regs;
+ const u32 enabled = EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
+ EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
+ EISR_TXEXPLICIT | EISR_TXMEMERR;
+ u32 eisr;
+
+ eisr = ioc3_r_eisr() & enabled;
+
+ ioc3_w_eisr(eisr);
+ (void) ioc3_r_eisr(); /* Flush */
+
+ if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR |
+ EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR))
+ ioc3_error(dev, eisr);
+ if (eisr & EISR_RXTIMERINT)
+ ioc3_rx(dev);
+ if (eisr & EISR_TXEXPLICIT)
+ ioc3_tx(dev);
+
+ return IRQ_HANDLED;
+}
+
+static inline void ioc3_setup_duplex(struct ioc3_private *ip)
+{
+ struct ioc3 *ioc3 = ip->regs;
+
+ if (ip->mii.full_duplex) {
+ ioc3_w_etcsr(ETCSR_FD);
+ ip->emcr |= EMCR_DUPLEX;
+ } else {
+ ioc3_w_etcsr(ETCSR_HD);
+ ip->emcr &= ~EMCR_DUPLEX;
+ }
+ ioc3_w_emcr(ip->emcr);
+}
+
+static void ioc3_timer(unsigned long data)
+{
+ struct ioc3_private *ip = (struct ioc3_private *) data;
+
+ /* Print the link status if it has changed */
+ mii_check_media(&ip->mii, 1, 0);
+ ioc3_setup_duplex(ip);
+
+ ip->ioc3_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2s */
+ add_timer(&ip->ioc3_timer);
+}
+
+/*
+ * Try to find a PHY. There is no apparent relation between the MII addresses
+ * in the SGI documentation and what we find in reality, so we simply probe
+ * for the PHY. It seems IOC3 PHYs usually live on address 31. One of my
+ * onboard IOC3s has the special oddity that probing doesn't seem to find it
+ * yet the interface seems to work fine, so if probing fails we for now will
+ * simply default to PHY 31 instead of bailing out.
+ */
+static int ioc3_mii_init(struct ioc3_private *ip)
+{
+ struct net_device *dev = priv_netdev(ip);
+ int i, found = 0, res = 0;
+ int ioc3_phy_workaround = 1;
+ u16 word;
+
+ for (i = 0; i < 32; i++) {
+ word = ioc3_mdio_read(dev, i, MII_PHYSID1);
+
+ if (word != 0xffff && word != 0x0000) {
+ found = 1;
+ break; /* Found a PHY */
+ }
+ }
+
+ if (!found) {
+ if (ioc3_phy_workaround)
+ i = 31;
+ else {
+ ip->mii.phy_id = -1;
+ res = -ENODEV;
+ goto out;
+ }
+ }
+
+ ip->mii.phy_id = i;
+
+out:
+ return res;
+}
+
+static void ioc3_mii_start(struct ioc3_private *ip)
+{
+ ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
+ ip->ioc3_timer.data = (unsigned long) ip;
+ ip->ioc3_timer.function = ioc3_timer;
+ add_timer(&ip->ioc3_timer);
+}
+
+static inline void ioc3_clean_rx_ring(struct ioc3_private *ip)
+{
+ struct sk_buff *skb;
+ int i;
+
+ for (i = ip->rx_ci; i & 15; i++) {
+ ip->rx_skbs[ip->rx_pi] = ip->rx_skbs[ip->rx_ci];
+ ip->rxr[ip->rx_pi++] = ip->rxr[ip->rx_ci++];
+ }
+ ip->rx_pi &= 511;
+ ip->rx_ci &= 511;
+
+ for (i = ip->rx_ci; i != ip->rx_pi; i = (i+1) & 511) {
+ struct ioc3_erxbuf *rxb;
+ skb = ip->rx_skbs[i];
+ rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
+ rxb->w0 = 0;
+ }
+}
+
+static inline void ioc3_clean_tx_ring(struct ioc3_private *ip)
+{
+ struct sk_buff *skb;
+ int i;
+
+ for (i=0; i < 128; i++) {
+ skb = ip->tx_skbs[i];
+ if (skb) {
+ ip->tx_skbs[i] = NULL;
+ dev_kfree_skb_any(skb);
+ }
+ ip->txr[i].cmd = 0;
+ }
+ ip->tx_pi = 0;
+ ip->tx_ci = 0;
+}
+
+static void ioc3_free_rings(struct ioc3_private *ip)
+{
+ struct sk_buff *skb;
+ int rx_entry, n_entry;
+
+ if (ip->txr) {
+ ioc3_clean_tx_ring(ip);
+ free_pages((unsigned long)ip->txr, 2);
+ ip->txr = NULL;
+ }
+
+ if (ip->rxr) {
+ n_entry = ip->rx_ci;
+ rx_entry = ip->rx_pi;
+
+ while (n_entry != rx_entry) {
+ skb = ip->rx_skbs[n_entry];
+ if (skb)
+ dev_kfree_skb_any(skb);
+
+ n_entry = (n_entry + 1) & 511;
+ }
+ free_page((unsigned long)ip->rxr);
+ ip->rxr = NULL;
+ }
+}
+
+static void ioc3_alloc_rings(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3_erxbuf *rxb;
+ unsigned long *rxr;
+ int i;
+
+ if (ip->rxr == NULL) {
+ /* Allocate and initialize rx ring. 4kb = 512 entries */
+ ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
+ rxr = (unsigned long *) ip->rxr;
+ if (!rxr)
+ printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n");
+
+ /* Now the rx buffers. The RX ring may be larger but
+ we only allocate 16 buffers for now. Need to tune
+ this for performance and memory later. */
+ for (i = 0; i < RX_BUFFS; i++) {
+ struct sk_buff *skb;
+
+ skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
+ if (!skb) {
+ show_free_areas(0);
+ continue;
+ }
+
+ ip->rx_skbs[i] = skb;
+
+ /* Because we reserve afterwards. */
+ skb_put(skb, (1664 + RX_OFFSET));
+ rxb = (struct ioc3_erxbuf *) skb->data;
+ rxr[i] = cpu_to_be64(ioc3_map(rxb, 1));
+ skb_reserve(skb, RX_OFFSET);
+ }
+ ip->rx_ci = 0;
+ ip->rx_pi = RX_BUFFS;
+ }
+
+ if (ip->txr == NULL) {
+ /* Allocate and initialize tx rings. 16kb = 128 bufs. */
+ ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2);
+ if (!ip->txr)
+ printk("ioc3_alloc_rings(): __get_free_pages() failed!\n");
+ ip->tx_pi = 0;
+ ip->tx_ci = 0;
+ }
+}
+
+static void ioc3_init_rings(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3 *ioc3 = ip->regs;
+ unsigned long ring;
+
+ ioc3_free_rings(ip);
+ ioc3_alloc_rings(dev);
+
+ ioc3_clean_rx_ring(ip);
+ ioc3_clean_tx_ring(ip);
+
+ /* Now the rx ring base, consume & produce registers. */
+ ring = ioc3_map(ip->rxr, 0);
+ ioc3_w_erbr_h(ring >> 32);
+ ioc3_w_erbr_l(ring & 0xffffffff);
+ ioc3_w_ercir(ip->rx_ci << 3);
+ ioc3_w_erpir((ip->rx_pi << 3) | ERPIR_ARM);
+
+ ring = ioc3_map(ip->txr, 0);
+
+ ip->txqlen = 0; /* nothing queued */
+
+ /* Now the tx ring base, consume & produce registers. */
+ ioc3_w_etbr_h(ring >> 32);
+ ioc3_w_etbr_l(ring & 0xffffffff);
+ ioc3_w_etpir(ip->tx_pi << 7);
+ ioc3_w_etcir(ip->tx_ci << 7);
+ (void) ioc3_r_etcir(); /* Flush */
+}
+
+static inline void ioc3_ssram_disc(struct ioc3_private *ip)
+{
+ struct ioc3 *ioc3 = ip->regs;
+ volatile u32 *ssram0 = &ioc3->ssram[0x0000];
+ volatile u32 *ssram1 = &ioc3->ssram[0x4000];
+ unsigned int pattern = 0x5555;
+
+ /* Assume the larger size SSRAM and enable parity checking */
+ ioc3_w_emcr(ioc3_r_emcr() | (EMCR_BUFSIZ | EMCR_RAMPAR));
+
+ *ssram0 = pattern;
+ *ssram1 = ~pattern & IOC3_SSRAM_DM;
+
+ if ((*ssram0 & IOC3_SSRAM_DM) != pattern ||
+ (*ssram1 & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) {
+ /* set ssram size to 64 KB */
+ ip->emcr = EMCR_RAMPAR;
+ ioc3_w_emcr(ioc3_r_emcr() & ~EMCR_BUFSIZ);
+ } else
+ ip->emcr = EMCR_BUFSIZ | EMCR_RAMPAR;
+}
+
+static void ioc3_init(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3 *ioc3 = ip->regs;
+
+ del_timer_sync(&ip->ioc3_timer); /* Kill if running */
+
+ ioc3_w_emcr(EMCR_RST); /* Reset */
+ (void) ioc3_r_emcr(); /* Flush WB */
+ udelay(4); /* Give it time ... */
+ ioc3_w_emcr(0);
+ (void) ioc3_r_emcr();
+
+ /* Misc registers */
+#ifdef CONFIG_SGI_IP27
+ ioc3_w_erbar(PCI64_ATTR_BAR >> 32); /* Barrier on last store */
+#else
+ ioc3_w_erbar(0); /* Let PCI API get it right */
+#endif
+ (void) ioc3_r_etcdc(); /* Clear on read */
+ ioc3_w_ercsr(15); /* RX low watermark */
+ ioc3_w_ertr(0); /* Interrupt immediately */
+ __ioc3_set_mac_address(dev);
+ ioc3_w_ehar_h(ip->ehar_h);
+ ioc3_w_ehar_l(ip->ehar_l);
+ ioc3_w_ersr(42); /* XXX should be random */
+
+ ioc3_init_rings(dev);
+
+ ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN |
+ EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN;
+ ioc3_w_emcr(ip->emcr);
+ ioc3_w_eier(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
+ EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
+ EISR_TXEXPLICIT | EISR_TXMEMERR);
+ (void) ioc3_r_eier();
+}
+
+static inline void ioc3_stop(struct ioc3_private *ip)
+{
+ struct ioc3 *ioc3 = ip->regs;
+
+ ioc3_w_emcr(0); /* Shutup */
+ ioc3_w_eier(0); /* Disable interrupts */
+ (void) ioc3_r_eier(); /* Flush */
+}
+
+static int ioc3_open(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+
+ if (request_irq(dev->irq, ioc3_interrupt, IRQF_SHARED, ioc3_str, dev)) {
+ printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
+
+ return -EAGAIN;
+ }
+
+ ip->ehar_h = 0;
+ ip->ehar_l = 0;
+ ioc3_init(dev);
+ ioc3_mii_start(ip);
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int ioc3_close(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+
+ del_timer_sync(&ip->ioc3_timer);
+
+ netif_stop_queue(dev);
+
+ ioc3_stop(ip);
+ free_irq(dev->irq, dev);
+
+ ioc3_free_rings(ip);
+ return 0;
+}
+
+/*
+ * MENET cards have four IOC3 chips, which are attached to two sets of
+ * PCI slot resources each: the primary connections are on slots
+ * 0..3 and the secondaries are on 4..7
+ *
+ * All four ethernets are brought out to connectors; six serial ports
+ * (a pair from each of the first three IOC3s) are brought out to
+ * MiniDINs; all other subdevices are left swinging in the wind, leave
+ * them disabled.
+ */
+
+static int ioc3_adjacent_is_ioc3(struct pci_dev *pdev, int slot)
+{
+ struct pci_dev *dev = pci_get_slot(pdev->bus, PCI_DEVFN(slot, 0));
+ int ret = 0;
+
+ if (dev) {
+ if (dev->vendor == PCI_VENDOR_ID_SGI &&
+ dev->device == PCI_DEVICE_ID_SGI_IOC3)
+ ret = 1;
+ pci_dev_put(dev);
+ }
+
+ return ret;
+}
+
+static int ioc3_is_menet(struct pci_dev *pdev)
+{
+ return pdev->bus->parent == NULL &&
+ ioc3_adjacent_is_ioc3(pdev, 0) &&
+ ioc3_adjacent_is_ioc3(pdev, 1) &&
+ ioc3_adjacent_is_ioc3(pdev, 2);
+}
+
+#ifdef CONFIG_SERIAL_8250
+/*
+ * Note about serial ports and consoles:
+ * For console output, everyone uses the IOC3 UARTA (offset 0x178)
+ * connected to the master node (look in ip27_setup_console() and
+ * ip27prom_console_write()).
+ *
+ * For serial (/dev/ttyS0 etc), we can not have hardcoded serial port
+ * addresses on a partitioned machine. Since we currently use the ioc3
+ * serial ports, we use dynamic serial port discovery that the serial.c
+ * driver uses for pci/pnp ports (there is an entry for the SGI ioc3
+ * boards in pci_boards[]). Unfortunately, UARTA's pio address is greater
+ * than UARTB's, although UARTA on o200s has traditionally been known as
+ * port 0. So, we just use one serial port from each ioc3 (since the
+ * serial driver adds addresses to get to higher ports).
+ *
+ * The first one to do a register_console becomes the preferred console
+ * (if there is no kernel command line console= directive). /dev/console
+ * (ie 5, 1) is then "aliased" into the device number returned by the
+ * "device" routine referred to in this console structure
+ * (ip27prom_console_dev).
+ *
+ * Also look in ip27-pci.c:pci_fixup_ioc3() for some comments on working
+ * around ioc3 oddities in this respect.
+ *
+ * The IOC3 serials use a 22MHz clock rate with an additional divider which
+ * can be programmed in the SCR register if the DLAB bit is set.
+ *
+ * Register to interrupt zero because we share the interrupt with
+ * the serial driver which we don't properly support yet.
+ *
+ * Can't use UPF_IOREMAP as the whole of IOC3 resources have already been
+ * registered.
+ */
+static void __devinit ioc3_8250_register(struct ioc3_uartregs __iomem *uart)
+{
+#define COSMISC_CONSTANT 6
+
+ struct uart_port port = {
+ .irq = 0,
+ .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF,
+ .iotype = UPIO_MEM,
+ .regshift = 0,
+ .uartclk = (22000000 << 1) / COSMISC_CONSTANT,
+
+ .membase = (unsigned char __iomem *) uart,
+ .mapbase = (unsigned long) uart,
+ };
+ unsigned char lcr;
+
+ lcr = uart->iu_lcr;
+ uart->iu_lcr = lcr | UART_LCR_DLAB;
+ uart->iu_scr = COSMISC_CONSTANT,
+ uart->iu_lcr = lcr;
+ uart->iu_lcr;
+ serial8250_register_port(&port);
+}
+
+static void __devinit ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
+{
+ /*
+ * We need to recognice and treat the fourth MENET serial as it
+ * does not have an SuperIO chip attached to it, therefore attempting
+ * to access it will result in bus errors. We call something an
+ * MENET if PCI slot 0, 1, 2 and 3 of a master PCI bus all have an IOC3
+ * in it. This is paranoid but we want to avoid blowing up on a
+ * showhorn PCI box that happens to have 4 IOC3 cards in it so it's
+ * not paranoid enough ...
+ */
+ if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3)
+ return;
+
+ /*
+ * Switch IOC3 to PIO mode. It probably already was but let's be
+ * paranoid
+ */
+ ioc3->gpcr_s = GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL;
+ ioc3->gpcr_s;
+ ioc3->gppr_6 = 0;
+ ioc3->gppr_6;
+ ioc3->gppr_7 = 0;
+ ioc3->gppr_7;
+ ioc3->sscr_a = ioc3->sscr_a & ~SSCR_DMA_EN;
+ ioc3->sscr_a;
+ ioc3->sscr_b = ioc3->sscr_b & ~SSCR_DMA_EN;
+ ioc3->sscr_b;
+ /* Disable all SA/B interrupts except for SA/B_INT in SIO_IEC. */
+ ioc3->sio_iec &= ~ (SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL |
+ SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER |
+ SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS |
+ SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR);
+ ioc3->sio_iec |= SIO_IR_SA_INT;
+ ioc3->sscr_a = 0;
+ ioc3->sio_iec &= ~ (SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL |
+ SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER |
+ SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS |
+ SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR);
+ ioc3->sio_iec |= SIO_IR_SB_INT;
+ ioc3->sscr_b = 0;
+
+ ioc3_8250_register(&ioc3->sregs.uarta);
+ ioc3_8250_register(&ioc3->sregs.uartb);
+}
+#endif
+
+static const struct net_device_ops ioc3_netdev_ops = {
+ .ndo_open = ioc3_open,
+ .ndo_stop = ioc3_close,
+ .ndo_start_xmit = ioc3_start_xmit,
+ .ndo_tx_timeout = ioc3_timeout,
+ .ndo_get_stats = ioc3_get_stats,
+ .ndo_set_rx_mode = ioc3_set_multicast_list,
+ .ndo_do_ioctl = ioc3_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = ioc3_set_mac_address,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static int __devinit ioc3_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ unsigned int sw_physid1, sw_physid2;
+ struct net_device *dev = NULL;
+ struct ioc3_private *ip;
+ struct ioc3 *ioc3;
+ unsigned long ioc3_base, ioc3_size;
+ u32 vendor, model, rev;
+ int err, pci_using_dac;
+
+ /* Configure DMA attributes. */
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (!err) {
+ pci_using_dac = 1;
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err < 0) {
+ printk(KERN_ERR "%s: Unable to obtain 64 bit DMA "
+ "for consistent allocations\n", pci_name(pdev));
+ goto out;
+ }
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ printk(KERN_ERR "%s: No usable DMA configuration, "
+ "aborting.\n", pci_name(pdev));
+ goto out;
+ }
+ pci_using_dac = 0;
+ }
+
+ if (pci_enable_device(pdev))
+ return -ENODEV;
+
+ dev = alloc_etherdev(sizeof(struct ioc3_private));
+ if (!dev) {
+ err = -ENOMEM;
+ goto out_disable;
+ }
+
+ if (pci_using_dac)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ err = pci_request_regions(pdev, "ioc3");
+ if (err)
+ goto out_free;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ ip = netdev_priv(dev);
+
+ dev->irq = pdev->irq;
+
+ ioc3_base = pci_resource_start(pdev, 0);
+ ioc3_size = pci_resource_len(pdev, 0);
+ ioc3 = (struct ioc3 *) ioremap(ioc3_base, ioc3_size);
+ if (!ioc3) {
+ printk(KERN_CRIT "ioc3eth(%s): ioremap failed, goodbye.\n",
+ pci_name(pdev));
+ err = -ENOMEM;
+ goto out_res;
+ }
+ ip->regs = ioc3;
+
+#ifdef CONFIG_SERIAL_8250
+ ioc3_serial_probe(pdev, ioc3);
+#endif
+
+ spin_lock_init(&ip->ioc3_lock);
+ init_timer(&ip->ioc3_timer);
+
+ ioc3_stop(ip);
+ ioc3_init(dev);
+
+ ip->pdev = pdev;
+
+ ip->mii.phy_id_mask = 0x1f;
+ ip->mii.reg_num_mask = 0x1f;
+ ip->mii.dev = dev;
+ ip->mii.mdio_read = ioc3_mdio_read;
+ ip->mii.mdio_write = ioc3_mdio_write;
+
+ ioc3_mii_init(ip);
+
+ if (ip->mii.phy_id == -1) {
+ printk(KERN_CRIT "ioc3-eth(%s): Didn't find a PHY, goodbye.\n",
+ pci_name(pdev));
+ err = -ENODEV;
+ goto out_stop;
+ }
+
+ ioc3_mii_start(ip);
+ ioc3_ssram_disc(ip);
+ ioc3_get_eaddr(ip);
+
+ /* The IOC3-specific entries in the device structure. */
+ dev->watchdog_timeo = 5 * HZ;
+ dev->netdev_ops = &ioc3_netdev_ops;
+ dev->ethtool_ops = &ioc3_ethtool_ops;
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+ dev->features = NETIF_F_IP_CSUM;
+
+ sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1);
+ sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2);
+
+ err = register_netdev(dev);
+ if (err)
+ goto out_stop;
+
+ mii_check_media(&ip->mii, 1, 1);
+ ioc3_setup_duplex(ip);
+
+ vendor = (sw_physid1 << 12) | (sw_physid2 >> 4);
+ model = (sw_physid2 >> 4) & 0x3f;
+ rev = sw_physid2 & 0xf;
+ printk(KERN_INFO "%s: Using PHY %d, vendor 0x%x, model %d, "
+ "rev %d.\n", dev->name, ip->mii.phy_id, vendor, model, rev);
+ printk(KERN_INFO "%s: IOC3 SSRAM has %d kbyte.\n", dev->name,
+ ip->emcr & EMCR_BUFSIZ ? 128 : 64);
+
+ return 0;
+
+out_stop:
+ ioc3_stop(ip);
+ del_timer_sync(&ip->ioc3_timer);
+ ioc3_free_rings(ip);
+out_res:
+ pci_release_regions(pdev);
+out_free:
+ free_netdev(dev);
+out_disable:
+ /*
+ * We should call pci_disable_device(pdev); here if the IOC3 wasn't
+ * such a weird device ...
+ */
+out:
+ return err;
+}
+
+static void __devexit ioc3_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3 *ioc3 = ip->regs;
+
+ unregister_netdev(dev);
+ del_timer_sync(&ip->ioc3_timer);
+
+ iounmap(ioc3);
+ pci_release_regions(pdev);
+ free_netdev(dev);
+ /*
+ * We should call pci_disable_device(pdev); here if the IOC3 wasn't
+ * such a weird device ...
+ */
+}
+
+static DEFINE_PCI_DEVICE_TABLE(ioc3_pci_tbl) = {
+ { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, ioc3_pci_tbl);
+
+static struct pci_driver ioc3_driver = {
+ .name = "ioc3-eth",
+ .id_table = ioc3_pci_tbl,
+ .probe = ioc3_probe,
+ .remove = __devexit_p(ioc3_remove_one),
+};
+
+static int __init ioc3_init_module(void)
+{
+ return pci_register_driver(&ioc3_driver);
+}
+
+static void __exit ioc3_cleanup_module(void)
+{
+ pci_unregister_driver(&ioc3_driver);
+}
+
+static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned long data;
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3 *ioc3 = ip->regs;
+ unsigned int len;
+ struct ioc3_etxd *desc;
+ uint32_t w0 = 0;
+ int produce;
+
+ /*
+ * IOC3 has a fairly simple minded checksumming hardware which simply
+ * adds up the 1's complement checksum for the entire packet and
+ * inserts it at an offset which can be specified in the descriptor
+ * into the transmit packet. This means we have to compensate for the
+ * MAC header which should not be summed and the TCP/UDP pseudo headers
+ * manually.
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ const struct iphdr *ih = ip_hdr(skb);
+ const int proto = ntohs(ih->protocol);
+ unsigned int csoff;
+ uint32_t csum, ehsum;
+ uint16_t *eh;
+
+ /* The MAC header. skb->mac seem the logic approach
+ to find the MAC header - except it's a NULL pointer ... */
+ eh = (uint16_t *) skb->data;
+
+ /* Sum up dest addr, src addr and protocol */
+ ehsum = eh[0] + eh[1] + eh[2] + eh[3] + eh[4] + eh[5] + eh[6];
+
+ /* Fold ehsum. can't use csum_fold which negates also ... */
+ ehsum = (ehsum & 0xffff) + (ehsum >> 16);
+ ehsum = (ehsum & 0xffff) + (ehsum >> 16);
+
+ /* Skip IP header; it's sum is always zero and was
+ already filled in by ip_output.c */
+ csum = csum_tcpudp_nofold(ih->saddr, ih->daddr,
+ ih->tot_len - (ih->ihl << 2),
+ proto, 0xffff ^ ehsum);
+
+ csum = (csum & 0xffff) + (csum >> 16); /* Fold again */
+ csum = (csum & 0xffff) + (csum >> 16);
+
+ csoff = ETH_HLEN + (ih->ihl << 2);
+ if (proto == IPPROTO_UDP) {
+ csoff += offsetof(struct udphdr, check);
+ udp_hdr(skb)->check = csum;
+ }
+ if (proto == IPPROTO_TCP) {
+ csoff += offsetof(struct tcphdr, check);
+ tcp_hdr(skb)->check = csum;
+ }
+
+ w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT);
+ }
+
+ spin_lock_irq(&ip->ioc3_lock);
+
+ data = (unsigned long) skb->data;
+ len = skb->len;
+
+ produce = ip->tx_pi;
+ desc = &ip->txr[produce];
+
+ if (len <= 104) {
+ /* Short packet, let's copy it directly into the ring. */
+ skb_copy_from_linear_data(skb, desc->data, skb->len);
+ if (len < ETH_ZLEN) {
+ /* Very short packet, pad with zeros at the end. */
+ memset(desc->data + len, 0, ETH_ZLEN - len);
+ len = ETH_ZLEN;
+ }
+ desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_D0V | w0);
+ desc->bufcnt = cpu_to_be32(len);
+ } else if ((data ^ (data + len - 1)) & 0x4000) {
+ unsigned long b2 = (data | 0x3fffUL) + 1UL;
+ unsigned long s1 = b2 - data;
+ unsigned long s2 = data + len - b2;
+
+ desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE |
+ ETXD_B1V | ETXD_B2V | w0);
+ desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) |
+ (s2 << ETXD_B2CNT_SHIFT));
+ desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1));
+ desc->p2 = cpu_to_be64(ioc3_map((void *) b2, 1));
+ } else {
+ /* Normal sized packet that doesn't cross a page boundary. */
+ desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0);
+ desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT);
+ desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1));
+ }
+
+ BARRIER();
+
+ ip->tx_skbs[produce] = skb; /* Remember skb */
+ produce = (produce + 1) & 127;
+ ip->tx_pi = produce;
+ ioc3_w_etpir(produce << 7); /* Fire ... */
+
+ ip->txqlen++;
+
+ if (ip->txqlen >= 127)
+ netif_stop_queue(dev);
+
+ spin_unlock_irq(&ip->ioc3_lock);
+
+ return NETDEV_TX_OK;
+}
+
+static void ioc3_timeout(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+
+ printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+
+ spin_lock_irq(&ip->ioc3_lock);
+
+ ioc3_stop(ip);
+ ioc3_init(dev);
+ ioc3_mii_init(ip);
+ ioc3_mii_start(ip);
+
+ spin_unlock_irq(&ip->ioc3_lock);
+
+ netif_wake_queue(dev);
+}
+
+/*
+ * Given a multicast ethernet address, this routine calculates the
+ * address's bit index in the logical address filter mask
+ */
+
+static inline unsigned int ioc3_hash(const unsigned char *addr)
+{
+ unsigned int temp = 0;
+ u32 crc;
+ int bits;
+
+ crc = ether_crc_le(ETH_ALEN, addr);
+
+ crc &= 0x3f; /* bit reverse lowest 6 bits for hash index */
+ for (bits = 6; --bits >= 0; ) {
+ temp <<= 1;
+ temp |= (crc & 0x1);
+ crc >>= 1;
+ }
+
+ return temp;
+}
+
+static void ioc3_get_drvinfo (struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+
+ strcpy (info->driver, IOC3_NAME);
+ strcpy (info->version, IOC3_VERSION);
+ strcpy (info->bus_info, pci_name(ip->pdev));
+}
+
+static int ioc3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&ip->ioc3_lock);
+ rc = mii_ethtool_gset(&ip->mii, cmd);
+ spin_unlock_irq(&ip->ioc3_lock);
+
+ return rc;
+}
+
+static int ioc3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&ip->ioc3_lock);
+ rc = mii_ethtool_sset(&ip->mii, cmd);
+ spin_unlock_irq(&ip->ioc3_lock);
+
+ return rc;
+}
+
+static int ioc3_nway_reset(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&ip->ioc3_lock);
+ rc = mii_nway_restart(&ip->mii);
+ spin_unlock_irq(&ip->ioc3_lock);
+
+ return rc;
+}
+
+static u32 ioc3_get_link(struct net_device *dev)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&ip->ioc3_lock);
+ rc = mii_link_ok(&ip->mii);
+ spin_unlock_irq(&ip->ioc3_lock);
+
+ return rc;
+}
+
+static const struct ethtool_ops ioc3_ethtool_ops = {
+ .get_drvinfo = ioc3_get_drvinfo,
+ .get_settings = ioc3_get_settings,
+ .set_settings = ioc3_set_settings,
+ .nway_reset = ioc3_nway_reset,
+ .get_link = ioc3_get_link,
+};
+
+static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct ioc3_private *ip = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&ip->ioc3_lock);
+ rc = generic_mii_ioctl(&ip->mii, if_mii(rq), cmd, NULL);
+ spin_unlock_irq(&ip->ioc3_lock);
+
+ return rc;
+}
+
+static void ioc3_set_multicast_list(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3 *ioc3 = ip->regs;
+ u64 ehar = 0;
+
+ netif_stop_queue(dev); /* Lock out others. */
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ ip->emcr |= EMCR_PROMISC;
+ ioc3_w_emcr(ip->emcr);
+ (void) ioc3_r_emcr();
+ } else {
+ ip->emcr &= ~EMCR_PROMISC;
+ ioc3_w_emcr(ip->emcr); /* Clear promiscuous. */
+ (void) ioc3_r_emcr();
+
+ if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > 64)) {
+ /* Too many for hashing to make sense or we want all
+ multicast packets anyway, so skip computing all the
+ hashes and just accept all packets. */
+ ip->ehar_h = 0xffffffff;
+ ip->ehar_l = 0xffffffff;
+ } else {
+ netdev_for_each_mc_addr(ha, dev) {
+ ehar |= (1UL << ioc3_hash(ha->addr));
+ }
+ ip->ehar_h = ehar >> 32;
+ ip->ehar_l = ehar & 0xffffffff;
+ }
+ ioc3_w_ehar_h(ip->ehar_h);
+ ioc3_w_ehar_l(ip->ehar_l);
+ }
+
+ netif_wake_queue(dev); /* Let us get going again. */
+}
+
+MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
+MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_init(ioc3_init_module);
+module_exit(ioc3_cleanup_module);
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
new file mode 100644
index 000000000000..60135aa55802
--- /dev/null
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -0,0 +1,855 @@
+/*
+ * meth.c -- O2 Builtin 10/100 Ethernet driver
+ *
+ * Copyright (C) 2001-2003 Ilya Volynets
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/device.h> /* struct device, et al */
+#include <linux/netdevice.h> /* struct device, and other headers */
+#include <linux/etherdevice.h> /* eth_type_trans */
+#include <linux/ip.h> /* struct iphdr */
+#include <linux/tcp.h> /* struct tcphdr */
+#include <linux/skbuff.h>
+#include <linux/mii.h> /* MII definitions */
+
+#include <asm/ip32/mace.h>
+#include <asm/ip32/ip32_ints.h>
+
+#include <asm/io.h>
+
+#include "meth.h"
+
+#ifndef MFE_DEBUG
+#define MFE_DEBUG 0
+#endif
+
+#if MFE_DEBUG>=1
+#define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __func__ , ## args)
+#define MFE_RX_DEBUG 2
+#else
+#define DPRINTK(str,args...)
+#define MFE_RX_DEBUG 0
+#endif
+
+
+static const char *meth_str="SGI O2 Fast Ethernet";
+
+/* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */
+#define TX_TIMEOUT (400*HZ/1000)
+
+static int timeout = TX_TIMEOUT;
+module_param(timeout, int, 0);
+
+/*
+ * This structure is private to each device. It is used to pass
+ * packets in and out, so there is place for a packet
+ */
+struct meth_private {
+ /* in-memory copy of MAC Control register */
+ unsigned long mac_ctrl;
+ /* in-memory copy of DMA Control register */
+ unsigned long dma_ctrl;
+ /* address of PHY, used by mdio_* functions, initialized in mdio_probe */
+ unsigned long phy_addr;
+ tx_packet *tx_ring;
+ dma_addr_t tx_ring_dma;
+ struct sk_buff *tx_skbs[TX_RING_ENTRIES];
+ dma_addr_t tx_skb_dmas[TX_RING_ENTRIES];
+ unsigned long tx_read, tx_write, tx_count;
+
+ rx_packet *rx_ring[RX_RING_ENTRIES];
+ dma_addr_t rx_ring_dmas[RX_RING_ENTRIES];
+ struct sk_buff *rx_skbs[RX_RING_ENTRIES];
+ unsigned long rx_write;
+
+ spinlock_t meth_lock;
+};
+
+static void meth_tx_timeout(struct net_device *dev);
+static irqreturn_t meth_interrupt(int irq, void *dev_id);
+
+/* global, initialized in ip32-setup.c */
+char o2meth_eaddr[8]={0,0,0,0,0,0,0,0};
+
+static inline void load_eaddr(struct net_device *dev)
+{
+ int i;
+ u64 macaddr;
+
+ DPRINTK("Loading MAC Address: %pM\n", dev->dev_addr);
+ macaddr = 0;
+ for (i = 0; i < 6; i++)
+ macaddr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
+
+ mace->eth.mac_addr = macaddr;
+}
+
+/*
+ * Waits for BUSY status of mdio bus to clear
+ */
+#define WAIT_FOR_PHY(___rval) \
+ while ((___rval = mace->eth.phy_data) & MDIO_BUSY) { \
+ udelay(25); \
+ }
+/*read phy register, return value read */
+static unsigned long mdio_read(struct meth_private *priv, unsigned long phyreg)
+{
+ unsigned long rval;
+ WAIT_FOR_PHY(rval);
+ mace->eth.phy_regs = (priv->phy_addr << 5) | (phyreg & 0x1f);
+ udelay(25);
+ mace->eth.phy_trans_go = 1;
+ udelay(25);
+ WAIT_FOR_PHY(rval);
+ return rval & MDIO_DATA_MASK;
+}
+
+static int mdio_probe(struct meth_private *priv)
+{
+ int i;
+ unsigned long p2, p3, flags;
+ /* check if phy is detected already */
+ if(priv->phy_addr>=0&&priv->phy_addr<32)
+ return 0;
+ spin_lock_irqsave(&priv->meth_lock, flags);
+ for (i=0;i<32;++i){
+ priv->phy_addr=i;
+ p2=mdio_read(priv,2);
+ p3=mdio_read(priv,3);
+#if MFE_DEBUG>=2
+ switch ((p2<<12)|(p3>>4)){
+ case PHY_QS6612X:
+ DPRINTK("PHY is QS6612X\n");
+ break;
+ case PHY_ICS1889:
+ DPRINTK("PHY is ICS1889\n");
+ break;
+ case PHY_ICS1890:
+ DPRINTK("PHY is ICS1890\n");
+ break;
+ case PHY_DP83840:
+ DPRINTK("PHY is DP83840\n");
+ break;
+ }
+#endif
+ if(p2!=0xffff&&p2!=0x0000){
+ DPRINTK("PHY code: %x\n",(p2<<12)|(p3>>4));
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
+ if(priv->phy_addr<32) {
+ return 0;
+ }
+ DPRINTK("Oopsie! PHY is not known!\n");
+ priv->phy_addr=-1;
+ return -ENODEV;
+}
+
+static void meth_check_link(struct net_device *dev)
+{
+ struct meth_private *priv = netdev_priv(dev);
+ unsigned long mii_advertising = mdio_read(priv, 4);
+ unsigned long mii_partner = mdio_read(priv, 5);
+ unsigned long negotiated = mii_advertising & mii_partner;
+ unsigned long duplex, speed;
+
+ if (mii_partner == 0xffff)
+ return;
+
+ speed = (negotiated & 0x0380) ? METH_100MBIT : 0;
+ duplex = ((negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040) ?
+ METH_PHY_FDX : 0;
+
+ if ((priv->mac_ctrl & METH_PHY_FDX) ^ duplex) {
+ DPRINTK("Setting %s-duplex\n", duplex ? "full" : "half");
+ if (duplex)
+ priv->mac_ctrl |= METH_PHY_FDX;
+ else
+ priv->mac_ctrl &= ~METH_PHY_FDX;
+ mace->eth.mac_ctrl = priv->mac_ctrl;
+ }
+
+ if ((priv->mac_ctrl & METH_100MBIT) ^ speed) {
+ DPRINTK("Setting %dMbs mode\n", speed ? 100 : 10);
+ if (duplex)
+ priv->mac_ctrl |= METH_100MBIT;
+ else
+ priv->mac_ctrl &= ~METH_100MBIT;
+ mace->eth.mac_ctrl = priv->mac_ctrl;
+ }
+}
+
+
+static int meth_init_tx_ring(struct meth_private *priv)
+{
+ /* Init TX ring */
+ priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE,
+ &priv->tx_ring_dma, GFP_ATOMIC);
+ if (!priv->tx_ring)
+ return -ENOMEM;
+ memset(priv->tx_ring, 0, TX_RING_BUFFER_SIZE);
+ priv->tx_count = priv->tx_read = priv->tx_write = 0;
+ mace->eth.tx_ring_base = priv->tx_ring_dma;
+ /* Now init skb save area */
+ memset(priv->tx_skbs, 0, sizeof(priv->tx_skbs));
+ memset(priv->tx_skb_dmas, 0, sizeof(priv->tx_skb_dmas));
+ return 0;
+}
+
+static int meth_init_rx_ring(struct meth_private *priv)
+{
+ int i;
+
+ for (i = 0; i < RX_RING_ENTRIES; i++) {
+ priv->rx_skbs[i] = alloc_skb(METH_RX_BUFF_SIZE, 0);
+ /* 8byte status vector + 3quad padding + 2byte padding,
+ * to put data on 64bit aligned boundary */
+ skb_reserve(priv->rx_skbs[i],METH_RX_HEAD);
+ priv->rx_ring[i]=(rx_packet*)(priv->rx_skbs[i]->head);
+ /* I'll need to re-sync it after each RX */
+ priv->rx_ring_dmas[i] =
+ dma_map_single(NULL, priv->rx_ring[i],
+ METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
+ mace->eth.rx_fifo = priv->rx_ring_dmas[i];
+ }
+ priv->rx_write = 0;
+ return 0;
+}
+static void meth_free_tx_ring(struct meth_private *priv)
+{
+ int i;
+
+ /* Remove any pending skb */
+ for (i = 0; i < TX_RING_ENTRIES; i++) {
+ if (priv->tx_skbs[i])
+ dev_kfree_skb(priv->tx_skbs[i]);
+ priv->tx_skbs[i] = NULL;
+ }
+ dma_free_coherent(NULL, TX_RING_BUFFER_SIZE, priv->tx_ring,
+ priv->tx_ring_dma);
+}
+
+/* Presumes RX DMA engine is stopped, and RX fifo ring is reset */
+static void meth_free_rx_ring(struct meth_private *priv)
+{
+ int i;
+
+ for (i = 0; i < RX_RING_ENTRIES; i++) {
+ dma_unmap_single(NULL, priv->rx_ring_dmas[i],
+ METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
+ priv->rx_ring[i] = 0;
+ priv->rx_ring_dmas[i] = 0;
+ kfree_skb(priv->rx_skbs[i]);
+ }
+}
+
+int meth_reset(struct net_device *dev)
+{
+ struct meth_private *priv = netdev_priv(dev);
+
+ /* Reset card */
+ mace->eth.mac_ctrl = SGI_MAC_RESET;
+ udelay(1);
+ mace->eth.mac_ctrl = 0;
+ udelay(25);
+
+ /* Load ethernet address */
+ load_eaddr(dev);
+ /* Should load some "errata", but later */
+
+ /* Check for device */
+ if (mdio_probe(priv) < 0) {
+ DPRINTK("Unable to find PHY\n");
+ return -ENODEV;
+ }
+
+ /* Initial mode: 10 | Half-duplex | Accept normal packets */
+ priv->mac_ctrl = METH_ACCEPT_MCAST | METH_DEFAULT_IPG;
+ if (dev->flags & IFF_PROMISC)
+ priv->mac_ctrl |= METH_PROMISC;
+ mace->eth.mac_ctrl = priv->mac_ctrl;
+
+ /* Autonegotiate speed and duplex mode */
+ meth_check_link(dev);
+
+ /* Now set dma control, but don't enable DMA, yet */
+ priv->dma_ctrl = (4 << METH_RX_OFFSET_SHIFT) |
+ (RX_RING_ENTRIES << METH_RX_DEPTH_SHIFT);
+ mace->eth.dma_ctrl = priv->dma_ctrl;
+
+ return 0;
+}
+
+/*============End Helper Routines=====================*/
+
+/*
+ * Open and close
+ */
+static int meth_open(struct net_device *dev)
+{
+ struct meth_private *priv = netdev_priv(dev);
+ int ret;
+
+ priv->phy_addr = -1; /* No PHY is known yet... */
+
+ /* Initialize the hardware */
+ ret = meth_reset(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Allocate the ring buffers */
+ ret = meth_init_tx_ring(priv);
+ if (ret < 0)
+ return ret;
+ ret = meth_init_rx_ring(priv);
+ if (ret < 0)
+ goto out_free_tx_ring;
+
+ ret = request_irq(dev->irq, meth_interrupt, 0, meth_str, dev);
+ if (ret) {
+ printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
+ goto out_free_rx_ring;
+ }
+
+ /* Start DMA */
+ priv->dma_ctrl |= METH_DMA_TX_EN | /*METH_DMA_TX_INT_EN |*/
+ METH_DMA_RX_EN | METH_DMA_RX_INT_EN;
+ mace->eth.dma_ctrl = priv->dma_ctrl;
+
+ DPRINTK("About to start queue\n");
+ netif_start_queue(dev);
+
+ return 0;
+
+out_free_rx_ring:
+ meth_free_rx_ring(priv);
+out_free_tx_ring:
+ meth_free_tx_ring(priv);
+
+ return ret;
+}
+
+static int meth_release(struct net_device *dev)
+{
+ struct meth_private *priv = netdev_priv(dev);
+
+ DPRINTK("Stopping queue\n");
+ netif_stop_queue(dev); /* can't transmit any more */
+ /* shut down DMA */
+ priv->dma_ctrl &= ~(METH_DMA_TX_EN | METH_DMA_TX_INT_EN |
+ METH_DMA_RX_EN | METH_DMA_RX_INT_EN);
+ mace->eth.dma_ctrl = priv->dma_ctrl;
+ free_irq(dev->irq, dev);
+ meth_free_tx_ring(priv);
+ meth_free_rx_ring(priv);
+
+ return 0;
+}
+
+/*
+ * Receive a packet: retrieve, encapsulate and pass over to upper levels
+ */
+static void meth_rx(struct net_device* dev, unsigned long int_status)
+{
+ struct sk_buff *skb;
+ unsigned long status, flags;
+ struct meth_private *priv = netdev_priv(dev);
+ unsigned long fifo_rptr = (int_status & METH_INT_RX_RPTR_MASK) >> 8;
+
+ spin_lock_irqsave(&priv->meth_lock, flags);
+ priv->dma_ctrl &= ~METH_DMA_RX_INT_EN;
+ mace->eth.dma_ctrl = priv->dma_ctrl;
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
+
+ if (int_status & METH_INT_RX_UNDERFLOW) {
+ fifo_rptr = (fifo_rptr - 1) & 0x0f;
+ }
+ while (priv->rx_write != fifo_rptr) {
+ dma_unmap_single(NULL, priv->rx_ring_dmas[priv->rx_write],
+ METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
+ status = priv->rx_ring[priv->rx_write]->status.raw;
+#if MFE_DEBUG
+ if (!(status & METH_RX_ST_VALID)) {
+ DPRINTK("Not received? status=%016lx\n",status);
+ }
+#endif
+ if ((!(status & METH_RX_STATUS_ERRORS)) && (status & METH_RX_ST_VALID)) {
+ int len = (status & 0xffff) - 4; /* omit CRC */
+ /* length sanity check */
+ if (len < 60 || len > 1518) {
+ printk(KERN_DEBUG "%s: bogus packet size: %ld, status=%#2Lx.\n",
+ dev->name, priv->rx_write,
+ priv->rx_ring[priv->rx_write]->status.raw);
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
+ skb = priv->rx_skbs[priv->rx_write];
+ } else {
+ skb = alloc_skb(METH_RX_BUFF_SIZE, GFP_ATOMIC);
+ if (!skb) {
+ /* Ouch! No memory! Drop packet on the floor */
+ DPRINTK("No mem: dropping packet\n");
+ dev->stats.rx_dropped++;
+ skb = priv->rx_skbs[priv->rx_write];
+ } else {
+ struct sk_buff *skb_c = priv->rx_skbs[priv->rx_write];
+ /* 8byte status vector + 3quad padding + 2byte padding,
+ * to put data on 64bit aligned boundary */
+ skb_reserve(skb, METH_RX_HEAD);
+ /* Write metadata, and then pass to the receive level */
+ skb_put(skb_c, len);
+ priv->rx_skbs[priv->rx_write] = skb;
+ skb_c->protocol = eth_type_trans(skb_c, dev);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ netif_rx(skb_c);
+ }
+ }
+ } else {
+ dev->stats.rx_errors++;
+ skb=priv->rx_skbs[priv->rx_write];
+#if MFE_DEBUG>0
+ printk(KERN_WARNING "meth: RX error: status=0x%016lx\n",status);
+ if(status&METH_RX_ST_RCV_CODE_VIOLATION)
+ printk(KERN_WARNING "Receive Code Violation\n");
+ if(status&METH_RX_ST_CRC_ERR)
+ printk(KERN_WARNING "CRC error\n");
+ if(status&METH_RX_ST_INV_PREAMBLE_CTX)
+ printk(KERN_WARNING "Invalid Preamble Context\n");
+ if(status&METH_RX_ST_LONG_EVT_SEEN)
+ printk(KERN_WARNING "Long Event Seen...\n");
+ if(status&METH_RX_ST_BAD_PACKET)
+ printk(KERN_WARNING "Bad Packet\n");
+ if(status&METH_RX_ST_CARRIER_EVT_SEEN)
+ printk(KERN_WARNING "Carrier Event Seen\n");
+#endif
+ }
+ priv->rx_ring[priv->rx_write] = (rx_packet*)skb->head;
+ priv->rx_ring[priv->rx_write]->status.raw = 0;
+ priv->rx_ring_dmas[priv->rx_write] =
+ dma_map_single(NULL, priv->rx_ring[priv->rx_write],
+ METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
+ mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write];
+ ADVANCE_RX_PTR(priv->rx_write);
+ }
+ spin_lock_irqsave(&priv->meth_lock, flags);
+ /* In case there was underflow, and Rx DMA was disabled */
+ priv->dma_ctrl |= METH_DMA_RX_INT_EN | METH_DMA_RX_EN;
+ mace->eth.dma_ctrl = priv->dma_ctrl;
+ mace->eth.int_stat = METH_INT_RX_THRESHOLD;
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
+}
+
+static int meth_tx_full(struct net_device *dev)
+{
+ struct meth_private *priv = netdev_priv(dev);
+
+ return priv->tx_count >= TX_RING_ENTRIES - 1;
+}
+
+static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status)
+{
+ struct meth_private *priv = netdev_priv(dev);
+ unsigned long status, flags;
+ struct sk_buff *skb;
+ unsigned long rptr = (int_status&TX_INFO_RPTR) >> 16;
+
+ spin_lock_irqsave(&priv->meth_lock, flags);
+
+ /* Stop DMA notification */
+ priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN);
+ mace->eth.dma_ctrl = priv->dma_ctrl;
+
+ while (priv->tx_read != rptr) {
+ skb = priv->tx_skbs[priv->tx_read];
+ status = priv->tx_ring[priv->tx_read].header.raw;
+#if MFE_DEBUG>=1
+ if (priv->tx_read == priv->tx_write)
+ DPRINTK("Auchi! tx_read=%d,tx_write=%d,rptr=%d?\n", priv->tx_read, priv->tx_write,rptr);
+#endif
+ if (status & METH_TX_ST_DONE) {
+ if (status & METH_TX_ST_SUCCESS){
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ } else {
+ dev->stats.tx_errors++;
+#if MFE_DEBUG>=1
+ DPRINTK("TX error: status=%016lx <",status);
+ if(status & METH_TX_ST_SUCCESS)
+ printk(" SUCCESS");
+ if(status & METH_TX_ST_TOOLONG)
+ printk(" TOOLONG");
+ if(status & METH_TX_ST_UNDERRUN)
+ printk(" UNDERRUN");
+ if(status & METH_TX_ST_EXCCOLL)
+ printk(" EXCCOLL");
+ if(status & METH_TX_ST_DEFER)
+ printk(" DEFER");
+ if(status & METH_TX_ST_LATECOLL)
+ printk(" LATECOLL");
+ printk(" >\n");
+#endif
+ }
+ } else {
+ DPRINTK("RPTR points us here, but packet not done?\n");
+ break;
+ }
+ dev_kfree_skb_irq(skb);
+ priv->tx_skbs[priv->tx_read] = NULL;
+ priv->tx_ring[priv->tx_read].header.raw = 0;
+ priv->tx_read = (priv->tx_read+1)&(TX_RING_ENTRIES-1);
+ priv->tx_count--;
+ }
+
+ /* wake up queue if it was stopped */
+ if (netif_queue_stopped(dev) && !meth_tx_full(dev)) {
+ netif_wake_queue(dev);
+ }
+
+ mace->eth.int_stat = METH_INT_TX_EMPTY | METH_INT_TX_PKT;
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
+}
+
+static void meth_error(struct net_device* dev, unsigned status)
+{
+ struct meth_private *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ printk(KERN_WARNING "meth: error status: 0x%08x\n",status);
+ /* check for errors too... */
+ if (status & (METH_INT_TX_LINK_FAIL))
+ printk(KERN_WARNING "meth: link failure\n");
+ /* Should I do full reset in this case? */
+ if (status & (METH_INT_MEM_ERROR))
+ printk(KERN_WARNING "meth: memory error\n");
+ if (status & (METH_INT_TX_ABORT))
+ printk(KERN_WARNING "meth: aborted\n");
+ if (status & (METH_INT_RX_OVERFLOW))
+ printk(KERN_WARNING "meth: Rx overflow\n");
+ if (status & (METH_INT_RX_UNDERFLOW)) {
+ printk(KERN_WARNING "meth: Rx underflow\n");
+ spin_lock_irqsave(&priv->meth_lock, flags);
+ mace->eth.int_stat = METH_INT_RX_UNDERFLOW;
+ /* more underflow interrupts will be delivered,
+ * effectively throwing us into an infinite loop.
+ * Thus I stop processing Rx in this case. */
+ priv->dma_ctrl &= ~METH_DMA_RX_EN;
+ mace->eth.dma_ctrl = priv->dma_ctrl;
+ DPRINTK("Disabled meth Rx DMA temporarily\n");
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
+ }
+ mace->eth.int_stat = METH_INT_ERROR;
+}
+
+/*
+ * The typical interrupt entry point
+ */
+static irqreturn_t meth_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct meth_private *priv = netdev_priv(dev);
+ unsigned long status;
+
+ status = mace->eth.int_stat;
+ while (status & 0xff) {
+ /* First handle errors - if we get Rx underflow,
+ * Rx DMA will be disabled, and Rx handler will reenable
+ * it. I don't think it's possible to get Rx underflow,
+ * without getting Rx interrupt */
+ if (status & METH_INT_ERROR) {
+ meth_error(dev, status);
+ }
+ if (status & (METH_INT_TX_EMPTY | METH_INT_TX_PKT)) {
+ /* a transmission is over: free the skb */
+ meth_tx_cleanup(dev, status);
+ }
+ if (status & METH_INT_RX_THRESHOLD) {
+ if (!(priv->dma_ctrl & METH_DMA_RX_INT_EN))
+ break;
+ /* send it to meth_rx for handling */
+ meth_rx(dev, status);
+ }
+ status = mace->eth.int_stat;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Transmits packets that fit into TX descriptor (are <=120B)
+ */
+static void meth_tx_short_prepare(struct meth_private *priv,
+ struct sk_buff *skb)
+{
+ tx_packet *desc = &priv->tx_ring[priv->tx_write];
+ int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
+
+ desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16);
+ /* maybe I should set whole thing to 0 first... */
+ skb_copy_from_linear_data(skb, desc->data.dt + (120 - len), skb->len);
+ if (skb->len < len)
+ memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len);
+}
+#define TX_CATBUF1 BIT(25)
+static void meth_tx_1page_prepare(struct meth_private *priv,
+ struct sk_buff *skb)
+{
+ tx_packet *desc = &priv->tx_ring[priv->tx_write];
+ void *buffer_data = (void *)(((unsigned long)skb->data + 7) & ~7);
+ int unaligned_len = (int)((unsigned long)buffer_data - (unsigned long)skb->data);
+ int buffer_len = skb->len - unaligned_len;
+ dma_addr_t catbuf;
+
+ desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | (skb->len - 1);
+
+ /* unaligned part */
+ if (unaligned_len) {
+ skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len),
+ unaligned_len);
+ desc->header.raw |= (128 - unaligned_len) << 16;
+ }
+
+ /* first page */
+ catbuf = dma_map_single(NULL, buffer_data, buffer_len,
+ DMA_TO_DEVICE);
+ desc->data.cat_buf[0].form.start_addr = catbuf >> 3;
+ desc->data.cat_buf[0].form.len = buffer_len - 1;
+}
+#define TX_CATBUF2 BIT(26)
+static void meth_tx_2page_prepare(struct meth_private *priv,
+ struct sk_buff *skb)
+{
+ tx_packet *desc = &priv->tx_ring[priv->tx_write];
+ void *buffer1_data = (void *)(((unsigned long)skb->data + 7) & ~7);
+ void *buffer2_data = (void *)PAGE_ALIGN((unsigned long)skb->data);
+ int unaligned_len = (int)((unsigned long)buffer1_data - (unsigned long)skb->data);
+ int buffer1_len = (int)((unsigned long)buffer2_data - (unsigned long)buffer1_data);
+ int buffer2_len = skb->len - buffer1_len - unaligned_len;
+ dma_addr_t catbuf1, catbuf2;
+
+ desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1);
+ /* unaligned part */
+ if (unaligned_len){
+ skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len),
+ unaligned_len);
+ desc->header.raw |= (128 - unaligned_len) << 16;
+ }
+
+ /* first page */
+ catbuf1 = dma_map_single(NULL, buffer1_data, buffer1_len,
+ DMA_TO_DEVICE);
+ desc->data.cat_buf[0].form.start_addr = catbuf1 >> 3;
+ desc->data.cat_buf[0].form.len = buffer1_len - 1;
+ /* second page */
+ catbuf2 = dma_map_single(NULL, buffer2_data, buffer2_len,
+ DMA_TO_DEVICE);
+ desc->data.cat_buf[1].form.start_addr = catbuf2 >> 3;
+ desc->data.cat_buf[1].form.len = buffer2_len - 1;
+}
+
+static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb)
+{
+ /* Remember the skb, so we can free it at interrupt time */
+ priv->tx_skbs[priv->tx_write] = skb;
+ if (skb->len <= 120) {
+ /* Whole packet fits into descriptor */
+ meth_tx_short_prepare(priv, skb);
+ } else if (PAGE_ALIGN((unsigned long)skb->data) !=
+ PAGE_ALIGN((unsigned long)skb->data + skb->len - 1)) {
+ /* Packet crosses page boundary */
+ meth_tx_2page_prepare(priv, skb);
+ } else {
+ /* Packet is in one page */
+ meth_tx_1page_prepare(priv, skb);
+ }
+ priv->tx_write = (priv->tx_write + 1) & (TX_RING_ENTRIES - 1);
+ mace->eth.tx_info = priv->tx_write;
+ priv->tx_count++;
+}
+
+/*
+ * Transmit a packet (called by the kernel)
+ */
+static int meth_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct meth_private *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->meth_lock, flags);
+ /* Stop DMA notification */
+ priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN);
+ mace->eth.dma_ctrl = priv->dma_ctrl;
+
+ meth_add_to_tx_ring(priv, skb);
+ dev->trans_start = jiffies; /* save the timestamp */
+
+ /* If TX ring is full, tell the upper layer to stop sending packets */
+ if (meth_tx_full(dev)) {
+ printk(KERN_DEBUG "TX full: stopping\n");
+ netif_stop_queue(dev);
+ }
+
+ /* Restart DMA notification */
+ priv->dma_ctrl |= METH_DMA_TX_INT_EN;
+ mace->eth.dma_ctrl = priv->dma_ctrl;
+
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Deal with a transmit timeout.
+ */
+static void meth_tx_timeout(struct net_device *dev)
+{
+ struct meth_private *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ printk(KERN_WARNING "%s: transmit timed out\n", dev->name);
+
+ /* Protect against concurrent rx interrupts */
+ spin_lock_irqsave(&priv->meth_lock,flags);
+
+ /* Try to reset the interface. */
+ meth_reset(dev);
+
+ dev->stats.tx_errors++;
+
+ /* Clear all rings */
+ meth_free_tx_ring(priv);
+ meth_free_rx_ring(priv);
+ meth_init_tx_ring(priv);
+ meth_init_rx_ring(priv);
+
+ /* Restart dma */
+ priv->dma_ctrl |= METH_DMA_TX_EN | METH_DMA_RX_EN | METH_DMA_RX_INT_EN;
+ mace->eth.dma_ctrl = priv->dma_ctrl;
+
+ /* Enable interrupt */
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+
+/*
+ * Ioctl commands
+ */
+static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ /* XXX Not yet implemented */
+ switch(cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct net_device_ops meth_netdev_ops = {
+ .ndo_open = meth_open,
+ .ndo_stop = meth_release,
+ .ndo_start_xmit = meth_tx,
+ .ndo_do_ioctl = meth_ioctl,
+ .ndo_tx_timeout = meth_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+/*
+ * The init function.
+ */
+static int __devinit meth_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct meth_private *priv;
+ int err;
+
+ dev = alloc_etherdev(sizeof(struct meth_private));
+ if (!dev)
+ return -ENOMEM;
+
+ dev->netdev_ops = &meth_netdev_ops;
+ dev->watchdog_timeo = timeout;
+ dev->irq = MACE_ETHERNET_IRQ;
+ dev->base_addr = (unsigned long)&mace->eth;
+ memcpy(dev->dev_addr, o2meth_eaddr, 6);
+
+ priv = netdev_priv(dev);
+ spin_lock_init(&priv->meth_lock);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = register_netdev(dev);
+ if (err) {
+ free_netdev(dev);
+ return err;
+ }
+
+ printk(KERN_INFO "%s: SGI MACE Ethernet rev. %d\n",
+ dev->name, (unsigned int)(mace->eth.mac_ctrl >> 29));
+ return 0;
+}
+
+static int __exit meth_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+
+ unregister_netdev(dev);
+ free_netdev(dev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver meth_driver = {
+ .probe = meth_probe,
+ .remove = __exit_p(meth_remove),
+ .driver = {
+ .name = "meth",
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init meth_init_module(void)
+{
+ int err;
+
+ err = platform_driver_register(&meth_driver);
+ if (err)
+ printk(KERN_ERR "Driver registration failed\n");
+
+ return err;
+}
+
+static void __exit meth_exit_module(void)
+{
+ platform_driver_unregister(&meth_driver);
+}
+
+module_init(meth_init_module);
+module_exit(meth_exit_module);
+
+MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>");
+MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:meth");
diff --git a/drivers/net/ethernet/sgi/meth.h b/drivers/net/ethernet/sgi/meth.h
new file mode 100644
index 000000000000..5b145c6bad60
--- /dev/null
+++ b/drivers/net/ethernet/sgi/meth.h
@@ -0,0 +1,243 @@
+
+/*
+ * snull.h -- definitions for the network module
+ *
+ * Copyright (C) 2001 Alessandro Rubini and Jonathan Corbet
+ * Copyright (C) 2001 O'Reilly & Associates
+ *
+ * The source code in this file can be freely used, adapted,
+ * and redistributed in source or binary form, so long as an
+ * acknowledgment appears in derived source files. The citation
+ * should list that the code comes from the book "Linux Device
+ * Drivers" by Alessandro Rubini and Jonathan Corbet, published
+ * by O'Reilly & Associates. No warranty is attached;
+ * we cannot take responsibility for errors or fitness for use.
+ */
+
+/* version dependencies have been confined to a separate file */
+
+/* Tunable parameters */
+#define TX_RING_ENTRIES 64 /* 64-512?*/
+
+#define RX_RING_ENTRIES 16 /* Do not change */
+/* Internal constants */
+#define TX_RING_BUFFER_SIZE (TX_RING_ENTRIES*sizeof(tx_packet))
+#define RX_BUFFER_SIZE 1546 /* ethenet packet size */
+#define METH_RX_BUFF_SIZE 4096
+#define METH_RX_HEAD 34 /* status + 3 quad garbage-fill + 2 byte zero-pad */
+#define RX_BUFFER_OFFSET (sizeof(rx_status_vector)+2) /* staus vector + 2 bytes of padding */
+#define RX_BUCKET_SIZE 256
+
+/* For more detailed explanations of what each field menas,
+ see Nick's great comments to #defines below (or docs, if
+ you are lucky enough toget hold of them :)*/
+
+/* tx status vector is written over tx command header upon
+ dma completion. */
+
+typedef struct tx_status_vector {
+ u64 sent:1; /* always set to 1...*/
+ u64 pad0:34;/* always set to 0 */
+ u64 flags:9; /*I'm too lazy to specify each one separately at the moment*/
+ u64 col_retry_cnt:4; /*collision retry count*/
+ u64 len:16; /*Transmit length in bytes*/
+} tx_status_vector;
+
+/*
+ * Each packet is 128 bytes long.
+ * It consists of header, 0-3 concatination
+ * buffer pointers and up to 120 data bytes.
+ */
+typedef struct tx_packet_hdr {
+ u64 pad1:36; /*should be filled with 0 */
+ u64 cat_ptr3_valid:1, /*Concatination pointer valid flags*/
+ cat_ptr2_valid:1,
+ cat_ptr1_valid:1;
+ u64 tx_int_flag:1; /*Generate TX intrrupt when packet has been sent*/
+ u64 term_dma_flag:1; /*Terminate transmit DMA on transmit abort conditions*/
+ u64 data_offset:7; /*Starting byte offset in ring data block*/
+ u64 data_len:16; /*Length of valid data in bytes-1*/
+} tx_packet_hdr;
+typedef union tx_cat_ptr {
+ struct {
+ u64 pad2:16; /* should be 0 */
+ u64 len:16; /*length of buffer data - 1*/
+ u64 start_addr:29; /*Physical starting address*/
+ u64 pad1:3; /* should be zero */
+ } form;
+ u64 raw;
+} tx_cat_ptr;
+
+typedef struct tx_packet {
+ union {
+ tx_packet_hdr header;
+ tx_status_vector res;
+ u64 raw;
+ }header;
+ union {
+ tx_cat_ptr cat_buf[3];
+ char dt[120];
+ } data;
+} tx_packet;
+
+typedef union rx_status_vector {
+ volatile struct {
+ u64 pad1:1;/*fill it with ones*/
+ u64 pad2:15;/*fill with 0*/
+ u64 ip_chk_sum:16;
+ u64 seq_num:5;
+ u64 mac_addr_match:1;
+ u64 mcast_addr_match:1;
+ u64 carrier_event_seen:1;
+ u64 bad_packet:1;
+ u64 long_event_seen:1;
+ u64 invalid_preamble:1;
+ u64 broadcast:1;
+ u64 multicast:1;
+ u64 crc_error:1;
+ u64 huh:1;/*???*/
+ u64 rx_code_violation:1;
+ u64 rx_len:16;
+ } parsed;
+ volatile u64 raw;
+} rx_status_vector;
+
+typedef struct rx_packet {
+ rx_status_vector status;
+ u64 pad[3]; /* For whatever reason, there needs to be 4 double-word offset */
+ u16 pad2;
+ char buf[METH_RX_BUFF_SIZE-sizeof(rx_status_vector)-3*sizeof(u64)-sizeof(u16)];/* data */
+} rx_packet;
+
+#define TX_INFO_RPTR 0x00FF0000
+#define TX_INFO_WPTR 0x000000FF
+
+ /* Bits in METH_MAC */
+
+#define SGI_MAC_RESET BIT(0) /* 0: MAC110 active in run mode, 1: Global reset signal to MAC110 core is active */
+#define METH_PHY_FDX BIT(1) /* 0: Disable full duplex, 1: Enable full duplex */
+#define METH_PHY_LOOP BIT(2) /* 0: Normal operation, follows 10/100mbit and M10T/MII select, 1: loops internal MII bus */
+ /* selects ignored */
+#define METH_100MBIT BIT(3) /* 0: 10meg mode, 1: 100meg mode */
+#define METH_PHY_MII BIT(4) /* 0: MII selected, 1: SIA selected */
+ /* Note: when loopback is set this bit becomes collision control. Setting this bit will */
+ /* cause a collision to be reported. */
+
+ /* Bits 5 and 6 are used to determine the Destination address filter mode */
+#define METH_ACCEPT_MY 0 /* 00: Accept PHY address only */
+#define METH_ACCEPT_MCAST 0x20 /* 01: Accept physical, broadcast, and multicast filter matches only */
+#define METH_ACCEPT_AMCAST 0x40 /* 10: Accept physical, broadcast, and all multicast packets */
+#define METH_PROMISC 0x60 /* 11: Promiscious mode */
+
+#define METH_PHY_LINK_FAIL BIT(7) /* 0: Link failure detection disabled, 1: Hardware scans for link failure in PHY */
+
+#define METH_MAC_IPG 0x1ffff00
+
+#define METH_DEFAULT_IPG ((17<<15) | (11<<22) | (21<<8))
+ /* 0x172e5c00 */ /* 23, 23, 23 */ /*0x54A9500 *//*21,21,21*/
+ /* Bits 8 through 14 are used to determine Inter-Packet Gap between "Back to Back" packets */
+ /* The gap depends on the clock speed of the link, 80ns per increment for 100baseT, 800ns */
+ /* per increment for 10BaseT */
+
+ /* Bits 15 through 21 are used to determine IPGR1 */
+
+ /* Bits 22 through 28 are used to determine IPGR2 */
+
+#define METH_REV_SHIFT 29 /* Bits 29 through 31 are used to determine the revision */
+ /* 000: Initial revision */
+ /* 001: First revision, Improved TX concatenation */
+
+
+/* DMA control bits */
+#define METH_RX_OFFSET_SHIFT 12 /* Bits 12:14 of DMA control register indicate starting offset of packet data for RX operation */
+#define METH_RX_DEPTH_SHIFT 4 /* Bits 8:4 define RX fifo depth -- when # of RX fifo entries != depth, interrupt is generted */
+
+#define METH_DMA_TX_EN BIT(1) /* enable TX DMA */
+#define METH_DMA_TX_INT_EN BIT(0) /* enable TX Buffer Empty interrupt */
+#define METH_DMA_RX_EN BIT(15) /* Enable RX */
+#define METH_DMA_RX_INT_EN BIT(9) /* Enable interrupt on RX packet */
+
+/* RX FIFO MCL Info bits */
+#define METH_RX_FIFO_WPTR(x) (((x)>>16)&0xf)
+#define METH_RX_FIFO_RPTR(x) (((x)>>8)&0xf)
+#define METH_RX_FIFO_DEPTH(x) ((x)&0x1f)
+
+/* RX status bits */
+
+#define METH_RX_ST_VALID BIT(63)
+#define METH_RX_ST_RCV_CODE_VIOLATION BIT(16)
+#define METH_RX_ST_DRBL_NBL BIT(17)
+#define METH_RX_ST_CRC_ERR BIT(18)
+#define METH_RX_ST_MCAST_PKT BIT(19)
+#define METH_RX_ST_BCAST_PKT BIT(20)
+#define METH_RX_ST_INV_PREAMBLE_CTX BIT(21)
+#define METH_RX_ST_LONG_EVT_SEEN BIT(22)
+#define METH_RX_ST_BAD_PACKET BIT(23)
+#define METH_RX_ST_CARRIER_EVT_SEEN BIT(24)
+#define METH_RX_ST_MCAST_FILTER_MATCH BIT(25)
+#define METH_RX_ST_PHYS_ADDR_MATCH BIT(26)
+
+#define METH_RX_STATUS_ERRORS \
+ ( \
+ METH_RX_ST_RCV_CODE_VIOLATION| \
+ METH_RX_ST_CRC_ERR| \
+ METH_RX_ST_INV_PREAMBLE_CTX| \
+ METH_RX_ST_LONG_EVT_SEEN| \
+ METH_RX_ST_BAD_PACKET| \
+ METH_RX_ST_CARRIER_EVT_SEEN \
+ )
+ /* Bits in METH_INT */
+ /* Write _1_ to corresponding bit to clear */
+#define METH_INT_TX_EMPTY BIT(0) /* 0: No interrupt pending, 1: The TX ring buffer is empty */
+#define METH_INT_TX_PKT BIT(1) /* 0: No interrupt pending */
+ /* 1: A TX message had the INT request bit set, the packet has been sent. */
+#define METH_INT_TX_LINK_FAIL BIT(2) /* 0: No interrupt pending, 1: PHY has reported a link failure */
+#define METH_INT_MEM_ERROR BIT(3) /* 0: No interrupt pending */
+ /* 1: A memory error occurred during DMA, DMA stopped, Fatal */
+#define METH_INT_TX_ABORT BIT(4) /* 0: No interrupt pending, 1: The TX aborted operation, DMA stopped, FATAL */
+#define METH_INT_RX_THRESHOLD BIT(5) /* 0: No interrupt pending, 1: Selected receive threshold condition Valid */
+#define METH_INT_RX_UNDERFLOW BIT(6) /* 0: No interrupt pending, 1: FIFO was empty, packet could not be queued */
+#define METH_INT_RX_OVERFLOW BIT(7) /* 0: No interrupt pending, 1: DMA FIFO Overflow, DMA stopped, FATAL */
+
+/*#define METH_INT_RX_RPTR_MASK 0x0001F00*/ /* Bits 8 through 12 alias of RX read-pointer */
+#define METH_INT_RX_RPTR_MASK 0x0000F00 /* Bits 8 through 11 alias of RX read-pointer - so, is Rx FIFO 16 or 32 entry?*/
+
+ /* Bits 13 through 15 are always 0. */
+
+#define METH_INT_TX_RPTR_MASK 0x1FF0000 /* Bits 16 through 24 alias of TX read-pointer */
+
+#define METH_INT_RX_SEQ_MASK 0x2E000000 /* Bits 25 through 29 are the starting seq number for the message at the */
+
+ /* top of the queue */
+
+#define METH_INT_ERROR (METH_INT_TX_LINK_FAIL| \
+ METH_INT_MEM_ERROR| \
+ METH_INT_TX_ABORT| \
+ METH_INT_RX_OVERFLOW| \
+ METH_INT_RX_UNDERFLOW)
+
+#define METH_INT_MCAST_HASH BIT(30) /* If RX DMA is enabled the hash select logic output is latched here */
+
+/* TX status bits */
+#define METH_TX_ST_DONE BIT(63) /* TX complete */
+#define METH_TX_ST_SUCCESS BIT(23) /* Packet was transmitted successfully */
+#define METH_TX_ST_TOOLONG BIT(24) /* TX abort due to excessive length */
+#define METH_TX_ST_UNDERRUN BIT(25) /* TX abort due to underrun (?) */
+#define METH_TX_ST_EXCCOLL BIT(26) /* TX abort due to excess collisions */
+#define METH_TX_ST_DEFER BIT(27) /* TX abort due to excess deferals */
+#define METH_TX_ST_LATECOLL BIT(28) /* TX abort due to late collision */
+
+
+/* Tx command header bits */
+#define METH_TX_CMD_INT_EN BIT(24) /* Generate TX interrupt when packet is sent */
+
+/* Phy MDIO interface busy flag */
+#define MDIO_BUSY BIT(16)
+#define MDIO_DATA_MASK 0xFFFF
+/* PHY defines */
+#define PHY_QS6612X 0x0181441 /* Quality TX */
+#define PHY_ICS1889 0x0015F41 /* ICS FX */
+#define PHY_ICS1890 0x0015F42 /* ICS TX */
+#define PHY_DP83840 0x20005C0 /* National TX */
+
+#define ADVANCE_RX_PTR(x) x=(x+1)&(RX_RING_ENTRIES-1)
diff --git a/drivers/net/ethernet/sis/Kconfig b/drivers/net/ethernet/sis/Kconfig
new file mode 100644
index 000000000000..f1135cc1bd48
--- /dev/null
+++ b/drivers/net/ethernet/sis/Kconfig
@@ -0,0 +1,53 @@
+#
+# Silicon Integrated Systems (SiS) device configuration
+#
+
+config NET_VENDOR_SIS
+ bool "Silicon Integrated Systems (SiS) devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about SiS devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_SIS
+
+config SIS900
+ tristate "SiS 900/7016 PCI Fast Ethernet Adapter support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This is a driver for the Fast Ethernet PCI network cards based on
+ the SiS 900 and SiS 7016 chips. The SiS 900 core is also embedded in
+ SiS 630 and SiS 540 chipsets.
+
+ This driver also supports AMD 79C901 HomePNA so that you can use
+ your phone line as a network cable.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sis900. This is recommended.
+
+config SIS190
+ tristate "SiS190/SiS191 gigabit ethernet support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ Say Y here if you have a SiS 190 PCI Fast Ethernet adapter or
+ a SiS 191 PCI Gigabit Ethernet adapter. Both are expected to
+ appear in lan on motherboard designs which are based on SiS 965
+ and SiS 966 south bridge.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sis190. This is recommended.
+
+endif # NET_VENDOR_SIS
diff --git a/drivers/net/ethernet/sis/Makefile b/drivers/net/ethernet/sis/Makefile
new file mode 100644
index 000000000000..58d3ac1985df
--- /dev/null
+++ b/drivers/net/ethernet/sis/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for Silicon Integrated Systems (SiS) network device drivers.
+#
+
+obj-$(CONFIG_SIS190) += sis190.o
+obj-$(CONFIG_SIS900) += sis900.o
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
new file mode 100644
index 000000000000..1b4658c99391
--- /dev/null
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -0,0 +1,1956 @@
+/*
+ sis190.c: Silicon Integrated Systems SiS190 ethernet driver
+
+ Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
+ Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
+ Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
+
+ Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
+ genuine driver.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ See the file COPYING in this distribution for more information.
+
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <asm/irq.h>
+
+#define PHY_MAX_ADDR 32
+#define PHY_ID_ANY 0x1f
+#define MII_REG_ANY 0x1f
+
+#define DRV_VERSION "1.4"
+#define DRV_NAME "sis190"
+#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
+
+#define sis190_rx_skb netif_rx
+#define sis190_rx_quota(count, quota) count
+
+#define MAC_ADDR_LEN 6
+
+#define NUM_TX_DESC 64 /* [8..1024] */
+#define NUM_RX_DESC 64 /* [8..8192] */
+#define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
+#define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
+#define RX_BUF_SIZE 1536
+#define RX_BUF_MASK 0xfff8
+
+#define SIS190_REGS_SIZE 0x80
+#define SIS190_TX_TIMEOUT (6*HZ)
+#define SIS190_PHY_TIMEOUT (10*HZ)
+#define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | NETIF_MSG_IFUP | \
+ NETIF_MSG_IFDOWN)
+
+/* Enhanced PHY access register bit definitions */
+#define EhnMIIread 0x0000
+#define EhnMIIwrite 0x0020
+#define EhnMIIdataShift 16
+#define EhnMIIpmdShift 6 /* 7016 only */
+#define EhnMIIregShift 11
+#define EhnMIIreq 0x0010
+#define EhnMIInotDone 0x0010
+
+/* Write/read MMIO register */
+#define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
+#define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
+#define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
+#define SIS_R8(reg) readb (ioaddr + (reg))
+#define SIS_R16(reg) readw (ioaddr + (reg))
+#define SIS_R32(reg) readl (ioaddr + (reg))
+
+#define SIS_PCI_COMMIT() SIS_R32(IntrControl)
+
+enum sis190_registers {
+ TxControl = 0x00,
+ TxDescStartAddr = 0x04,
+ rsv0 = 0x08, // reserved
+ TxSts = 0x0c, // unused (Control/Status)
+ RxControl = 0x10,
+ RxDescStartAddr = 0x14,
+ rsv1 = 0x18, // reserved
+ RxSts = 0x1c, // unused
+ IntrStatus = 0x20,
+ IntrMask = 0x24,
+ IntrControl = 0x28,
+ IntrTimer = 0x2c, // unused (Interrupt Timer)
+ PMControl = 0x30, // unused (Power Mgmt Control/Status)
+ rsv2 = 0x34, // reserved
+ ROMControl = 0x38,
+ ROMInterface = 0x3c,
+ StationControl = 0x40,
+ GMIIControl = 0x44,
+ GIoCR = 0x48, // unused (GMAC IO Compensation)
+ GIoCtrl = 0x4c, // unused (GMAC IO Control)
+ TxMacControl = 0x50,
+ TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
+ RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
+ rsv3 = 0x5c, // reserved
+ RxMacControl = 0x60,
+ RxMacAddr = 0x62,
+ RxHashTable = 0x68,
+ // Undocumented = 0x6c,
+ RxWolCtrl = 0x70,
+ RxWolData = 0x74, // unused (Rx WOL Data Access)
+ RxMPSControl = 0x78, // unused (Rx MPS Control)
+ rsv4 = 0x7c, // reserved
+};
+
+enum sis190_register_content {
+ /* IntrStatus */
+ SoftInt = 0x40000000, // unused
+ Timeup = 0x20000000, // unused
+ PauseFrame = 0x00080000, // unused
+ MagicPacket = 0x00040000, // unused
+ WakeupFrame = 0x00020000, // unused
+ LinkChange = 0x00010000,
+ RxQEmpty = 0x00000080,
+ RxQInt = 0x00000040,
+ TxQ1Empty = 0x00000020, // unused
+ TxQ1Int = 0x00000010,
+ TxQ0Empty = 0x00000008, // unused
+ TxQ0Int = 0x00000004,
+ RxHalt = 0x00000002,
+ TxHalt = 0x00000001,
+
+ /* {Rx/Tx}CmdBits */
+ CmdReset = 0x10,
+ CmdRxEnb = 0x08, // unused
+ CmdTxEnb = 0x01,
+ RxBufEmpty = 0x01, // unused
+
+ /* Cfg9346Bits */
+ Cfg9346_Lock = 0x00, // unused
+ Cfg9346_Unlock = 0xc0, // unused
+
+ /* RxMacControl */
+ AcceptErr = 0x20, // unused
+ AcceptRunt = 0x10, // unused
+ AcceptBroadcast = 0x0800,
+ AcceptMulticast = 0x0400,
+ AcceptMyPhys = 0x0200,
+ AcceptAllPhys = 0x0100,
+
+ /* RxConfigBits */
+ RxCfgFIFOShift = 13,
+ RxCfgDMAShift = 8, // 0x1a in RxControl ?
+
+ /* TxConfigBits */
+ TxInterFrameGapShift = 24,
+ TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
+
+ LinkStatus = 0x02, // unused
+ FullDup = 0x01, // unused
+
+ /* TBICSRBit */
+ TBILinkOK = 0x02000000, // unused
+};
+
+struct TxDesc {
+ __le32 PSize;
+ __le32 status;
+ __le32 addr;
+ __le32 size;
+};
+
+struct RxDesc {
+ __le32 PSize;
+ __le32 status;
+ __le32 addr;
+ __le32 size;
+};
+
+enum _DescStatusBit {
+ /* _Desc.status */
+ OWNbit = 0x80000000, // RXOWN/TXOWN
+ INTbit = 0x40000000, // RXINT/TXINT
+ CRCbit = 0x00020000, // CRCOFF/CRCEN
+ PADbit = 0x00010000, // PREADD/PADEN
+ /* _Desc.size */
+ RingEnd = 0x80000000,
+ /* TxDesc.status */
+ LSEN = 0x08000000, // TSO ? -- FR
+ IPCS = 0x04000000,
+ TCPCS = 0x02000000,
+ UDPCS = 0x01000000,
+ BSTEN = 0x00800000,
+ EXTEN = 0x00400000,
+ DEFEN = 0x00200000,
+ BKFEN = 0x00100000,
+ CRSEN = 0x00080000,
+ COLEN = 0x00040000,
+ THOL3 = 0x30000000,
+ THOL2 = 0x20000000,
+ THOL1 = 0x10000000,
+ THOL0 = 0x00000000,
+
+ WND = 0x00080000,
+ TABRT = 0x00040000,
+ FIFO = 0x00020000,
+ LINK = 0x00010000,
+ ColCountMask = 0x0000ffff,
+ /* RxDesc.status */
+ IPON = 0x20000000,
+ TCPON = 0x10000000,
+ UDPON = 0x08000000,
+ Wakup = 0x00400000,
+ Magic = 0x00200000,
+ Pause = 0x00100000,
+ DEFbit = 0x00200000,
+ BCAST = 0x000c0000,
+ MCAST = 0x00080000,
+ UCAST = 0x00040000,
+ /* RxDesc.PSize */
+ TAGON = 0x80000000,
+ RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
+ ABORT = 0x00800000,
+ SHORT = 0x00400000,
+ LIMIT = 0x00200000,
+ MIIER = 0x00100000,
+ OVRUN = 0x00080000,
+ NIBON = 0x00040000,
+ COLON = 0x00020000,
+ CRCOK = 0x00010000,
+ RxSizeMask = 0x0000ffff
+ /*
+ * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
+ * provide two (unused with Linux) Tx queues. No publicly
+ * available documentation alas.
+ */
+};
+
+enum sis190_eeprom_access_register_bits {
+ EECS = 0x00000001, // unused
+ EECLK = 0x00000002, // unused
+ EEDO = 0x00000008, // unused
+ EEDI = 0x00000004, // unused
+ EEREQ = 0x00000080,
+ EEROP = 0x00000200,
+ EEWOP = 0x00000100 // unused
+};
+
+/* EEPROM Addresses */
+enum sis190_eeprom_address {
+ EEPROMSignature = 0x00,
+ EEPROMCLK = 0x01, // unused
+ EEPROMInfo = 0x02,
+ EEPROMMACAddr = 0x03
+};
+
+enum sis190_feature {
+ F_HAS_RGMII = 1,
+ F_PHY_88E1111 = 2,
+ F_PHY_BCM5461 = 4
+};
+
+struct sis190_private {
+ void __iomem *mmio_addr;
+ struct pci_dev *pci_dev;
+ struct net_device *dev;
+ spinlock_t lock;
+ u32 rx_buf_sz;
+ u32 cur_rx;
+ u32 cur_tx;
+ u32 dirty_rx;
+ u32 dirty_tx;
+ dma_addr_t rx_dma;
+ dma_addr_t tx_dma;
+ struct RxDesc *RxDescRing;
+ struct TxDesc *TxDescRing;
+ struct sk_buff *Rx_skbuff[NUM_RX_DESC];
+ struct sk_buff *Tx_skbuff[NUM_TX_DESC];
+ struct work_struct phy_task;
+ struct timer_list timer;
+ u32 msg_enable;
+ struct mii_if_info mii_if;
+ struct list_head first_phy;
+ u32 features;
+ u32 negotiated_lpa;
+ enum {
+ LNK_OFF,
+ LNK_ON,
+ LNK_AUTONEG,
+ } link_status;
+};
+
+struct sis190_phy {
+ struct list_head list;
+ int phy_id;
+ u16 id[2];
+ u16 status;
+ u8 type;
+};
+
+enum sis190_phy_type {
+ UNKNOWN = 0x00,
+ HOME = 0x01,
+ LAN = 0x02,
+ MIX = 0x03
+};
+
+static struct mii_chip_info {
+ const char *name;
+ u16 id[2];
+ unsigned int type;
+ u32 feature;
+} mii_chip_table[] = {
+ { "Atheros PHY", { 0x004d, 0xd010 }, LAN, 0 },
+ { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 },
+ { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
+ { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
+ { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
+ { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
+ { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
+ { NULL, }
+};
+
+static const struct {
+ const char *name;
+} sis_chip_info[] = {
+ { "SiS 190 PCI Fast Ethernet adapter" },
+ { "SiS 191 PCI Gigabit Ethernet adapter" },
+};
+
+static DEFINE_PCI_DEVICE_TABLE(sis190_pci_tbl) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
+ { 0, },
+};
+
+MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
+
+static int rx_copybreak = 200;
+
+static struct {
+ u32 msg_enable;
+} debug = { -1 };
+
+MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver");
+module_param(rx_copybreak, int, 0);
+MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
+module_param_named(debug, debug.msg_enable, int, 0);
+MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
+MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+
+static const u32 sis190_intr_mask =
+ RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
+
+/*
+ * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ * The chips use a 64 element hash table based on the Ethernet CRC.
+ */
+static const int multicast_filter_limit = 32;
+
+static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
+{
+ unsigned int i;
+
+ SIS_W32(GMIIControl, ctl);
+
+ msleep(1);
+
+ for (i = 0; i < 100; i++) {
+ if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
+ break;
+ msleep(1);
+ }
+
+ if (i > 99)
+ pr_err("PHY command failed !\n");
+}
+
+static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
+{
+ __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
+ (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
+ (((u32) val) << EhnMIIdataShift));
+}
+
+static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
+{
+ __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
+ (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
+
+ return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
+}
+
+static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+
+ mdio_write(tp->mmio_addr, phy_id, reg, val);
+}
+
+static int __mdio_read(struct net_device *dev, int phy_id, int reg)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+
+ return mdio_read(tp->mmio_addr, phy_id, reg);
+}
+
+static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
+{
+ mdio_read(ioaddr, phy_id, reg);
+ return mdio_read(ioaddr, phy_id, reg);
+}
+
+static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
+{
+ u16 data = 0xffff;
+ unsigned int i;
+
+ if (!(SIS_R32(ROMControl) & 0x0002))
+ return 0;
+
+ SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
+
+ for (i = 0; i < 200; i++) {
+ if (!(SIS_R32(ROMInterface) & EEREQ)) {
+ data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
+ break;
+ }
+ msleep(1);
+ }
+
+ return data;
+}
+
+static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
+{
+ SIS_W32(IntrMask, 0x00);
+ SIS_W32(IntrStatus, 0xffffffff);
+ SIS_PCI_COMMIT();
+}
+
+static void sis190_asic_down(void __iomem *ioaddr)
+{
+ /* Stop the chip's Tx and Rx DMA processes. */
+
+ SIS_W32(TxControl, 0x1a00);
+ SIS_W32(RxControl, 0x1a00);
+
+ sis190_irq_mask_and_ack(ioaddr);
+}
+
+static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
+{
+ desc->size |= cpu_to_le32(RingEnd);
+}
+
+static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
+{
+ u32 eor = le32_to_cpu(desc->size) & RingEnd;
+
+ desc->PSize = 0x0;
+ desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
+ wmb();
+ desc->status = cpu_to_le32(OWNbit | INTbit);
+}
+
+static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
+ u32 rx_buf_sz)
+{
+ desc->addr = cpu_to_le32(mapping);
+ sis190_give_to_asic(desc, rx_buf_sz);
+}
+
+static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
+{
+ desc->PSize = 0x0;
+ desc->addr = cpu_to_le32(0xdeadbeef);
+ desc->size &= cpu_to_le32(RingEnd);
+ wmb();
+ desc->status = 0x0;
+}
+
+static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
+ struct RxDesc *desc)
+{
+ u32 rx_buf_sz = tp->rx_buf_sz;
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+
+ skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
+ if (unlikely(!skb))
+ goto skb_alloc_failed;
+ mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(tp->pci_dev, mapping))
+ goto out;
+ sis190_map_to_asic(desc, mapping, rx_buf_sz);
+
+ return skb;
+
+out:
+ dev_kfree_skb_any(skb);
+skb_alloc_failed:
+ sis190_make_unusable_by_asic(desc);
+ return NULL;
+}
+
+static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
+ u32 start, u32 end)
+{
+ u32 cur;
+
+ for (cur = start; cur < end; cur++) {
+ unsigned int i = cur % NUM_RX_DESC;
+
+ if (tp->Rx_skbuff[i])
+ continue;
+
+ tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
+
+ if (!tp->Rx_skbuff[i])
+ break;
+ }
+ return cur - start;
+}
+
+static bool sis190_try_rx_copy(struct sis190_private *tp,
+ struct sk_buff **sk_buff, int pkt_size,
+ dma_addr_t addr)
+{
+ struct sk_buff *skb;
+ bool done = false;
+
+ if (pkt_size >= rx_copybreak)
+ goto out;
+
+ skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
+ if (!skb)
+ goto out;
+
+ pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
+ *sk_buff = skb;
+ done = true;
+out:
+ return done;
+}
+
+static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
+{
+#define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
+
+ if ((status & CRCOK) && !(status & ErrMask))
+ return 0;
+
+ if (!(status & CRCOK))
+ stats->rx_crc_errors++;
+ else if (status & OVRUN)
+ stats->rx_over_errors++;
+ else if (status & (SHORT | LIMIT))
+ stats->rx_length_errors++;
+ else if (status & (MIIER | NIBON | COLON))
+ stats->rx_frame_errors++;
+
+ stats->rx_errors++;
+ return -1;
+}
+
+static int sis190_rx_interrupt(struct net_device *dev,
+ struct sis190_private *tp, void __iomem *ioaddr)
+{
+ struct net_device_stats *stats = &dev->stats;
+ u32 rx_left, cur_rx = tp->cur_rx;
+ u32 delta, count;
+
+ rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
+ rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
+
+ for (; rx_left > 0; rx_left--, cur_rx++) {
+ unsigned int entry = cur_rx % NUM_RX_DESC;
+ struct RxDesc *desc = tp->RxDescRing + entry;
+ u32 status;
+
+ if (le32_to_cpu(desc->status) & OWNbit)
+ break;
+
+ status = le32_to_cpu(desc->PSize);
+
+ //netif_info(tp, intr, dev, "Rx PSize = %08x\n", status);
+
+ if (sis190_rx_pkt_err(status, stats) < 0)
+ sis190_give_to_asic(desc, tp->rx_buf_sz);
+ else {
+ struct sk_buff *skb = tp->Rx_skbuff[entry];
+ dma_addr_t addr = le32_to_cpu(desc->addr);
+ int pkt_size = (status & RxSizeMask) - 4;
+ struct pci_dev *pdev = tp->pci_dev;
+
+ if (unlikely(pkt_size > tp->rx_buf_sz)) {
+ netif_info(tp, intr, dev,
+ "(frag) status = %08x\n", status);
+ stats->rx_dropped++;
+ stats->rx_length_errors++;
+ sis190_give_to_asic(desc, tp->rx_buf_sz);
+ continue;
+ }
+
+
+ if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
+ pci_dma_sync_single_for_device(pdev, addr,
+ tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ sis190_give_to_asic(desc, tp->rx_buf_sz);
+ } else {
+ pci_unmap_single(pdev, addr, tp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ tp->Rx_skbuff[entry] = NULL;
+ sis190_make_unusable_by_asic(desc);
+ }
+
+ skb_put(skb, pkt_size);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ sis190_rx_skb(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_size;
+ if ((status & BCAST) == MCAST)
+ stats->multicast++;
+ }
+ }
+ count = cur_rx - tp->cur_rx;
+ tp->cur_rx = cur_rx;
+
+ delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
+ if (!delta && count)
+ netif_info(tp, intr, dev, "no Rx buffer allocated\n");
+ tp->dirty_rx += delta;
+
+ if ((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx)
+ netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
+
+ return count;
+}
+
+static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
+ struct TxDesc *desc)
+{
+ unsigned int len;
+
+ len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
+
+ pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
+
+ memset(desc, 0x00, sizeof(*desc));
+}
+
+static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats)
+{
+#define TxErrMask (WND | TABRT | FIFO | LINK)
+
+ if (!unlikely(status & TxErrMask))
+ return 0;
+
+ if (status & WND)
+ stats->tx_window_errors++;
+ if (status & TABRT)
+ stats->tx_aborted_errors++;
+ if (status & FIFO)
+ stats->tx_fifo_errors++;
+ if (status & LINK)
+ stats->tx_carrier_errors++;
+
+ stats->tx_errors++;
+
+ return -1;
+}
+
+static void sis190_tx_interrupt(struct net_device *dev,
+ struct sis190_private *tp, void __iomem *ioaddr)
+{
+ struct net_device_stats *stats = &dev->stats;
+ u32 pending, dirty_tx = tp->dirty_tx;
+ /*
+ * It would not be needed if queueing was allowed to be enabled
+ * again too early (hint: think preempt and unclocked smp systems).
+ */
+ unsigned int queue_stopped;
+
+ smp_rmb();
+ pending = tp->cur_tx - dirty_tx;
+ queue_stopped = (pending == NUM_TX_DESC);
+
+ for (; pending; pending--, dirty_tx++) {
+ unsigned int entry = dirty_tx % NUM_TX_DESC;
+ struct TxDesc *txd = tp->TxDescRing + entry;
+ u32 status = le32_to_cpu(txd->status);
+ struct sk_buff *skb;
+
+ if (status & OWNbit)
+ break;
+
+ skb = tp->Tx_skbuff[entry];
+
+ if (likely(sis190_tx_pkt_err(status, stats) == 0)) {
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+ stats->collisions += ((status & ColCountMask) - 1);
+ }
+
+ sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
+ tp->Tx_skbuff[entry] = NULL;
+ dev_kfree_skb_irq(skb);
+ }
+
+ if (tp->dirty_tx != dirty_tx) {
+ tp->dirty_tx = dirty_tx;
+ smp_wmb();
+ if (queue_stopped)
+ netif_wake_queue(dev);
+ }
+}
+
+/*
+ * The interrupt handler does all of the Rx thread work and cleans up after
+ * the Tx thread.
+ */
+static irqreturn_t sis190_interrupt(int irq, void *__dev)
+{
+ struct net_device *dev = __dev;
+ struct sis190_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned int handled = 0;
+ u32 status;
+
+ status = SIS_R32(IntrStatus);
+
+ if ((status == 0xffffffff) || !status)
+ goto out;
+
+ handled = 1;
+
+ if (unlikely(!netif_running(dev))) {
+ sis190_asic_down(ioaddr);
+ goto out;
+ }
+
+ SIS_W32(IntrStatus, status);
+
+// netif_info(tp, intr, dev, "status = %08x\n", status);
+
+ if (status & LinkChange) {
+ netif_info(tp, intr, dev, "link change\n");
+ del_timer(&tp->timer);
+ schedule_work(&tp->phy_task);
+ }
+
+ if (status & RxQInt)
+ sis190_rx_interrupt(dev, tp, ioaddr);
+
+ if (status & TxQ0Int)
+ sis190_tx_interrupt(dev, tp, ioaddr);
+out:
+ return IRQ_RETVAL(handled);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sis190_netpoll(struct net_device *dev)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+ struct pci_dev *pdev = tp->pci_dev;
+
+ disable_irq(pdev->irq);
+ sis190_interrupt(pdev->irq, dev);
+ enable_irq(pdev->irq);
+}
+#endif
+
+static void sis190_free_rx_skb(struct sis190_private *tp,
+ struct sk_buff **sk_buff, struct RxDesc *desc)
+{
+ struct pci_dev *pdev = tp->pci_dev;
+
+ pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(*sk_buff);
+ *sk_buff = NULL;
+ sis190_make_unusable_by_asic(desc);
+}
+
+static void sis190_rx_clear(struct sis190_private *tp)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_RX_DESC; i++) {
+ if (!tp->Rx_skbuff[i])
+ continue;
+ sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
+ }
+}
+
+static void sis190_init_ring_indexes(struct sis190_private *tp)
+{
+ tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
+}
+
+static int sis190_init_ring(struct net_device *dev)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+
+ sis190_init_ring_indexes(tp);
+
+ memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
+ memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
+
+ if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
+ goto err_rx_clear;
+
+ sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
+
+ return 0;
+
+err_rx_clear:
+ sis190_rx_clear(tp);
+ return -ENOMEM;
+}
+
+static void sis190_set_rx_mode(struct net_device *dev)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned long flags;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ u16 rx_mode;
+
+ if (dev->flags & IFF_PROMISC) {
+ rx_mode =
+ AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
+ AcceptAllPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else {
+ struct netdev_hw_addr *ha;
+
+ rx_mode = AcceptBroadcast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr =
+ ether_crc(ETH_ALEN, ha->addr) & 0x3f;
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ rx_mode |= AcceptMulticast;
+ }
+ }
+
+ spin_lock_irqsave(&tp->lock, flags);
+
+ SIS_W16(RxMacControl, rx_mode | 0x2);
+ SIS_W32(RxHashTable, mc_filter[0]);
+ SIS_W32(RxHashTable + 4, mc_filter[1]);
+
+ spin_unlock_irqrestore(&tp->lock, flags);
+}
+
+static void sis190_soft_reset(void __iomem *ioaddr)
+{
+ SIS_W32(IntrControl, 0x8000);
+ SIS_PCI_COMMIT();
+ SIS_W32(IntrControl, 0x0);
+ sis190_asic_down(ioaddr);
+}
+
+static void sis190_hw_start(struct net_device *dev)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ sis190_soft_reset(ioaddr);
+
+ SIS_W32(TxDescStartAddr, tp->tx_dma);
+ SIS_W32(RxDescStartAddr, tp->rx_dma);
+
+ SIS_W32(IntrStatus, 0xffffffff);
+ SIS_W32(IntrMask, 0x0);
+ SIS_W32(GMIIControl, 0x0);
+ SIS_W32(TxMacControl, 0x60);
+ SIS_W16(RxMacControl, 0x02);
+ SIS_W32(RxHashTable, 0x0);
+ SIS_W32(0x6c, 0x0);
+ SIS_W32(RxWolCtrl, 0x0);
+ SIS_W32(RxWolData, 0x0);
+
+ SIS_PCI_COMMIT();
+
+ sis190_set_rx_mode(dev);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+ SIS_W32(IntrMask, sis190_intr_mask);
+
+ SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
+ SIS_W32(RxControl, 0x1a1d);
+
+ netif_start_queue(dev);
+}
+
+static void sis190_phy_task(struct work_struct *work)
+{
+ struct sis190_private *tp =
+ container_of(work, struct sis190_private, phy_task);
+ struct net_device *dev = tp->dev;
+ void __iomem *ioaddr = tp->mmio_addr;
+ int phy_id = tp->mii_if.phy_id;
+ u16 val;
+
+ rtnl_lock();
+
+ if (!netif_running(dev))
+ goto out_unlock;
+
+ val = mdio_read(ioaddr, phy_id, MII_BMCR);
+ if (val & BMCR_RESET) {
+ // FIXME: needlessly high ? -- FR 02/07/2005
+ mod_timer(&tp->timer, jiffies + HZ/10);
+ goto out_unlock;
+ }
+
+ val = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
+ if (!(val & BMSR_ANEGCOMPLETE) && tp->link_status != LNK_AUTONEG) {
+ netif_carrier_off(dev);
+ netif_warn(tp, link, dev, "auto-negotiating...\n");
+ tp->link_status = LNK_AUTONEG;
+ } else if ((val & BMSR_LSTATUS) && tp->link_status != LNK_ON) {
+ /* Rejoice ! */
+ struct {
+ int val;
+ u32 ctl;
+ const char *msg;
+ } reg31[] = {
+ { LPA_1000FULL, 0x07000c00 | 0x00001000,
+ "1000 Mbps Full Duplex" },
+ { LPA_1000HALF, 0x07000c00,
+ "1000 Mbps Half Duplex" },
+ { LPA_100FULL, 0x04000800 | 0x00001000,
+ "100 Mbps Full Duplex" },
+ { LPA_100HALF, 0x04000800,
+ "100 Mbps Half Duplex" },
+ { LPA_10FULL, 0x04000400 | 0x00001000,
+ "10 Mbps Full Duplex" },
+ { LPA_10HALF, 0x04000400,
+ "10 Mbps Half Duplex" },
+ { 0, 0x04000400, "unknown" }
+ }, *p = NULL;
+ u16 adv, autoexp, gigadv, gigrec;
+
+ val = mdio_read(ioaddr, phy_id, 0x1f);
+ netif_info(tp, link, dev, "mii ext = %04x\n", val);
+
+ val = mdio_read(ioaddr, phy_id, MII_LPA);
+ adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
+ autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
+ netif_info(tp, link, dev, "mii lpa=%04x adv=%04x exp=%04x\n",
+ val, adv, autoexp);
+
+ if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
+ /* check for gigabit speed */
+ gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000);
+ gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000);
+ val = (gigadv & (gigrec >> 2));
+ if (val & ADVERTISE_1000FULL)
+ p = reg31;
+ else if (val & ADVERTISE_1000HALF)
+ p = reg31 + 1;
+ }
+ if (!p) {
+ val &= adv;
+
+ for (p = reg31; p->val; p++) {
+ if ((val & p->val) == p->val)
+ break;
+ }
+ }
+
+ p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
+
+ if ((tp->features & F_HAS_RGMII) &&
+ (tp->features & F_PHY_BCM5461)) {
+ // Set Tx Delay in RGMII mode.
+ mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
+ udelay(200);
+ mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
+ p->ctl |= 0x03000000;
+ }
+
+ SIS_W32(StationControl, p->ctl);
+
+ if (tp->features & F_HAS_RGMII) {
+ SIS_W32(RGDelay, 0x0441);
+ SIS_W32(RGDelay, 0x0440);
+ }
+
+ tp->negotiated_lpa = p->val;
+
+ netif_info(tp, link, dev, "link on %s mode\n", p->msg);
+ netif_carrier_on(dev);
+ tp->link_status = LNK_ON;
+ } else if (!(val & BMSR_LSTATUS) && tp->link_status != LNK_AUTONEG)
+ tp->link_status = LNK_OFF;
+ mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
+
+out_unlock:
+ rtnl_unlock();
+}
+
+static void sis190_phy_timer(unsigned long __opaque)
+{
+ struct net_device *dev = (struct net_device *)__opaque;
+ struct sis190_private *tp = netdev_priv(dev);
+
+ if (likely(netif_running(dev)))
+ schedule_work(&tp->phy_task);
+}
+
+static inline void sis190_delete_timer(struct net_device *dev)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+
+ del_timer_sync(&tp->timer);
+}
+
+static inline void sis190_request_timer(struct net_device *dev)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+ struct timer_list *timer = &tp->timer;
+
+ init_timer(timer);
+ timer->expires = jiffies + SIS190_PHY_TIMEOUT;
+ timer->data = (unsigned long)dev;
+ timer->function = sis190_phy_timer;
+ add_timer(timer);
+}
+
+static void sis190_set_rxbufsize(struct sis190_private *tp,
+ struct net_device *dev)
+{
+ unsigned int mtu = dev->mtu;
+
+ tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
+ /* RxDesc->size has a licence to kill the lower bits */
+ if (tp->rx_buf_sz & 0x07) {
+ tp->rx_buf_sz += 8;
+ tp->rx_buf_sz &= RX_BUF_MASK;
+ }
+}
+
+static int sis190_open(struct net_device *dev)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+ struct pci_dev *pdev = tp->pci_dev;
+ int rc = -ENOMEM;
+
+ sis190_set_rxbufsize(tp, dev);
+
+ /*
+ * Rx and Tx descriptors need 256 bytes alignment.
+ * pci_alloc_consistent() guarantees a stronger alignment.
+ */
+ tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
+ if (!tp->TxDescRing)
+ goto out;
+
+ tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
+ if (!tp->RxDescRing)
+ goto err_free_tx_0;
+
+ rc = sis190_init_ring(dev);
+ if (rc < 0)
+ goto err_free_rx_1;
+
+ sis190_request_timer(dev);
+
+ rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
+ if (rc < 0)
+ goto err_release_timer_2;
+
+ sis190_hw_start(dev);
+out:
+ return rc;
+
+err_release_timer_2:
+ sis190_delete_timer(dev);
+ sis190_rx_clear(tp);
+err_free_rx_1:
+ pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
+ tp->rx_dma);
+err_free_tx_0:
+ pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
+ tp->tx_dma);
+ goto out;
+}
+
+static void sis190_tx_clear(struct sis190_private *tp)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ struct sk_buff *skb = tp->Tx_skbuff[i];
+
+ if (!skb)
+ continue;
+
+ sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
+ tp->Tx_skbuff[i] = NULL;
+ dev_kfree_skb(skb);
+
+ tp->dev->stats.tx_dropped++;
+ }
+ tp->cur_tx = tp->dirty_tx = 0;
+}
+
+static void sis190_down(struct net_device *dev)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ unsigned int poll_locked = 0;
+
+ sis190_delete_timer(dev);
+
+ netif_stop_queue(dev);
+
+ do {
+ spin_lock_irq(&tp->lock);
+
+ sis190_asic_down(ioaddr);
+
+ spin_unlock_irq(&tp->lock);
+
+ synchronize_irq(dev->irq);
+
+ if (!poll_locked)
+ poll_locked++;
+
+ synchronize_sched();
+
+ } while (SIS_R32(IntrMask));
+
+ sis190_tx_clear(tp);
+ sis190_rx_clear(tp);
+}
+
+static int sis190_close(struct net_device *dev)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+ struct pci_dev *pdev = tp->pci_dev;
+
+ sis190_down(dev);
+
+ free_irq(dev->irq, dev);
+
+ pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
+ pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
+
+ tp->TxDescRing = NULL;
+ tp->RxDescRing = NULL;
+
+ return 0;
+}
+
+static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ u32 len, entry, dirty_tx;
+ struct TxDesc *desc;
+ dma_addr_t mapping;
+
+ if (unlikely(skb->len < ETH_ZLEN)) {
+ if (skb_padto(skb, ETH_ZLEN)) {
+ dev->stats.tx_dropped++;
+ goto out;
+ }
+ len = ETH_ZLEN;
+ } else {
+ len = skb->len;
+ }
+
+ entry = tp->cur_tx % NUM_TX_DESC;
+ desc = tp->TxDescRing + entry;
+
+ if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
+ netif_stop_queue(dev);
+ netif_err(tp, tx_err, dev,
+ "BUG! Tx Ring full when queue awake!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(tp->pci_dev, mapping)) {
+ netif_err(tp, tx_err, dev,
+ "PCI mapping failed, dropping packet");
+ return NETDEV_TX_BUSY;
+ }
+
+ tp->Tx_skbuff[entry] = skb;
+
+ desc->PSize = cpu_to_le32(len);
+ desc->addr = cpu_to_le32(mapping);
+
+ desc->size = cpu_to_le32(len);
+ if (entry == (NUM_TX_DESC - 1))
+ desc->size |= cpu_to_le32(RingEnd);
+
+ wmb();
+
+ desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
+ if (tp->negotiated_lpa & (LPA_1000HALF | LPA_100HALF | LPA_10HALF)) {
+ /* Half Duplex */
+ desc->status |= cpu_to_le32(COLEN | CRSEN | BKFEN);
+ if (tp->negotiated_lpa & (LPA_1000HALF | LPA_1000FULL))
+ desc->status |= cpu_to_le32(EXTEN | BSTEN); /* gigabit HD */
+ }
+
+ tp->cur_tx++;
+
+ smp_wmb();
+
+ SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
+
+ dirty_tx = tp->dirty_tx;
+ if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
+ netif_stop_queue(dev);
+ smp_rmb();
+ if (dirty_tx != tp->dirty_tx)
+ netif_wake_queue(dev);
+ }
+out:
+ return NETDEV_TX_OK;
+}
+
+static void sis190_free_phy(struct list_head *first_phy)
+{
+ struct sis190_phy *cur, *next;
+
+ list_for_each_entry_safe(cur, next, first_phy, list) {
+ kfree(cur);
+ }
+}
+
+/**
+ * sis190_default_phy - Select default PHY for sis190 mac.
+ * @dev: the net device to probe for
+ *
+ * Select first detected PHY with link as default.
+ * If no one is link on, select PHY whose types is HOME as default.
+ * If HOME doesn't exist, select LAN.
+ */
+static u16 sis190_default_phy(struct net_device *dev)
+{
+ struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
+ struct sis190_private *tp = netdev_priv(dev);
+ struct mii_if_info *mii_if = &tp->mii_if;
+ void __iomem *ioaddr = tp->mmio_addr;
+ u16 status;
+
+ phy_home = phy_default = phy_lan = NULL;
+
+ list_for_each_entry(phy, &tp->first_phy, list) {
+ status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
+
+ // Link ON & Not select default PHY & not ghost PHY.
+ if ((status & BMSR_LSTATUS) &&
+ !phy_default &&
+ (phy->type != UNKNOWN)) {
+ phy_default = phy;
+ } else {
+ status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
+ mdio_write(ioaddr, phy->phy_id, MII_BMCR,
+ status | BMCR_ANENABLE | BMCR_ISOLATE);
+ if (phy->type == HOME)
+ phy_home = phy;
+ else if (phy->type == LAN)
+ phy_lan = phy;
+ }
+ }
+
+ if (!phy_default) {
+ if (phy_home)
+ phy_default = phy_home;
+ else if (phy_lan)
+ phy_default = phy_lan;
+ else
+ phy_default = list_first_entry(&tp->first_phy,
+ struct sis190_phy, list);
+ }
+
+ if (mii_if->phy_id != phy_default->phy_id) {
+ mii_if->phy_id = phy_default->phy_id;
+ if (netif_msg_probe(tp))
+ pr_info("%s: Using transceiver at address %d as default\n",
+ pci_name(tp->pci_dev), mii_if->phy_id);
+ }
+
+ status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
+ status &= (~BMCR_ISOLATE);
+
+ mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
+ status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
+
+ return status;
+}
+
+static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
+ struct sis190_phy *phy, unsigned int phy_id,
+ u16 mii_status)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct mii_chip_info *p;
+
+ INIT_LIST_HEAD(&phy->list);
+ phy->status = mii_status;
+ phy->phy_id = phy_id;
+
+ phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
+ phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
+
+ for (p = mii_chip_table; p->type; p++) {
+ if ((p->id[0] == phy->id[0]) &&
+ (p->id[1] == (phy->id[1] & 0xfff0))) {
+ break;
+ }
+ }
+
+ if (p->id[1]) {
+ phy->type = (p->type == MIX) ?
+ ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
+ LAN : HOME) : p->type;
+ tp->features |= p->feature;
+ if (netif_msg_probe(tp))
+ pr_info("%s: %s transceiver at address %d\n",
+ pci_name(tp->pci_dev), p->name, phy_id);
+ } else {
+ phy->type = UNKNOWN;
+ if (netif_msg_probe(tp))
+ pr_info("%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
+ pci_name(tp->pci_dev),
+ phy->id[0], (phy->id[1] & 0xfff0), phy_id);
+ }
+}
+
+static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
+{
+ if (tp->features & F_PHY_88E1111) {
+ void __iomem *ioaddr = tp->mmio_addr;
+ int phy_id = tp->mii_if.phy_id;
+ u16 reg[2][2] = {
+ { 0x808b, 0x0ce1 },
+ { 0x808f, 0x0c60 }
+ }, *p;
+
+ p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
+
+ mdio_write(ioaddr, phy_id, 0x1b, p[0]);
+ udelay(200);
+ mdio_write(ioaddr, phy_id, 0x14, p[1]);
+ udelay(200);
+ }
+}
+
+/**
+ * sis190_mii_probe - Probe MII PHY for sis190
+ * @dev: the net device to probe for
+ *
+ * Search for total of 32 possible mii phy addresses.
+ * Identify and set current phy if found one,
+ * return error if it failed to found.
+ */
+static int __devinit sis190_mii_probe(struct net_device *dev)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+ struct mii_if_info *mii_if = &tp->mii_if;
+ void __iomem *ioaddr = tp->mmio_addr;
+ int phy_id;
+ int rc = 0;
+
+ INIT_LIST_HEAD(&tp->first_phy);
+
+ for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
+ struct sis190_phy *phy;
+ u16 status;
+
+ status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
+
+ // Try next mii if the current one is not accessible.
+ if (status == 0xffff || status == 0x0000)
+ continue;
+
+ phy = kmalloc(sizeof(*phy), GFP_KERNEL);
+ if (!phy) {
+ sis190_free_phy(&tp->first_phy);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ sis190_init_phy(dev, tp, phy, phy_id, status);
+
+ list_add(&tp->first_phy, &phy->list);
+ }
+
+ if (list_empty(&tp->first_phy)) {
+ if (netif_msg_probe(tp))
+ pr_info("%s: No MII transceivers found!\n",
+ pci_name(tp->pci_dev));
+ rc = -EIO;
+ goto out;
+ }
+
+ /* Select default PHY for mac */
+ sis190_default_phy(dev);
+
+ sis190_mii_probe_88e1111_fixup(tp);
+
+ mii_if->dev = dev;
+ mii_if->mdio_read = __mdio_read;
+ mii_if->mdio_write = __mdio_write;
+ mii_if->phy_id_mask = PHY_ID_ANY;
+ mii_if->reg_num_mask = MII_REG_ANY;
+out:
+ return rc;
+}
+
+static void sis190_mii_remove(struct net_device *dev)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+
+ sis190_free_phy(&tp->first_phy);
+}
+
+static void sis190_release_board(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct sis190_private *tp = netdev_priv(dev);
+
+ iounmap(tp->mmio_addr);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ free_netdev(dev);
+}
+
+static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
+{
+ struct sis190_private *tp;
+ struct net_device *dev;
+ void __iomem *ioaddr;
+ int rc;
+
+ dev = alloc_etherdev(sizeof(*tp));
+ if (!dev) {
+ if (netif_msg_drv(&debug))
+ pr_err("unable to alloc new ethernet\n");
+ rc = -ENOMEM;
+ goto err_out_0;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ tp = netdev_priv(dev);
+ tp->dev = dev;
+ tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
+
+ rc = pci_enable_device(pdev);
+ if (rc < 0) {
+ if (netif_msg_probe(tp))
+ pr_err("%s: enable failure\n", pci_name(pdev));
+ goto err_free_dev_1;
+ }
+
+ rc = -ENODEV;
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ if (netif_msg_probe(tp))
+ pr_err("%s: region #0 is no MMIO resource\n",
+ pci_name(pdev));
+ goto err_pci_disable_2;
+ }
+ if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
+ if (netif_msg_probe(tp))
+ pr_err("%s: invalid PCI region size(s)\n",
+ pci_name(pdev));
+ goto err_pci_disable_2;
+ }
+
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc < 0) {
+ if (netif_msg_probe(tp))
+ pr_err("%s: could not request regions\n",
+ pci_name(pdev));
+ goto err_pci_disable_2;
+ }
+
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc < 0) {
+ if (netif_msg_probe(tp))
+ pr_err("%s: DMA configuration failed\n",
+ pci_name(pdev));
+ goto err_free_res_3;
+ }
+
+ pci_set_master(pdev);
+
+ ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
+ if (!ioaddr) {
+ if (netif_msg_probe(tp))
+ pr_err("%s: cannot remap MMIO, aborting\n",
+ pci_name(pdev));
+ rc = -EIO;
+ goto err_free_res_3;
+ }
+
+ tp->pci_dev = pdev;
+ tp->mmio_addr = ioaddr;
+ tp->link_status = LNK_OFF;
+
+ sis190_irq_mask_and_ack(ioaddr);
+
+ sis190_soft_reset(ioaddr);
+out:
+ return dev;
+
+err_free_res_3:
+ pci_release_regions(pdev);
+err_pci_disable_2:
+ pci_disable_device(pdev);
+err_free_dev_1:
+ free_netdev(dev);
+err_out_0:
+ dev = ERR_PTR(rc);
+ goto out;
+}
+
+static void sis190_tx_timeout(struct net_device *dev)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ u8 tmp8;
+
+ /* Disable Tx, if not already */
+ tmp8 = SIS_R8(TxControl);
+ if (tmp8 & CmdTxEnb)
+ SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
+
+ netif_info(tp, tx_err, dev, "Transmit timeout, status %08x %08x\n",
+ SIS_R32(TxControl), SIS_R32(TxSts));
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ SIS_W32(IntrMask, 0x0000);
+
+ /* Stop a shared interrupt from scavenging while we are. */
+ spin_lock_irq(&tp->lock);
+ sis190_tx_clear(tp);
+ spin_unlock_irq(&tp->lock);
+
+ /* ...and finally, reset everything. */
+ sis190_hw_start(dev);
+
+ netif_wake_queue(dev);
+}
+
+static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
+{
+ tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
+}
+
+static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
+ struct net_device *dev)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ u16 sig;
+ int i;
+
+ if (netif_msg_probe(tp))
+ pr_info("%s: Read MAC address from EEPROM\n", pci_name(pdev));
+
+ /* Check to see if there is a sane EEPROM */
+ sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
+
+ if ((sig == 0xffff) || (sig == 0x0000)) {
+ if (netif_msg_probe(tp))
+ pr_info("%s: Error EEPROM read %x\n",
+ pci_name(pdev), sig);
+ return -EIO;
+ }
+
+ /* Get MAC address from EEPROM */
+ for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
+ u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
+
+ ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
+ }
+
+ sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
+
+ return 0;
+}
+
+/**
+ * sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model
+ * @pdev: PCI device
+ * @dev: network device to get address for
+ *
+ * SiS96x model, use APC CMOS RAM to store MAC address.
+ * APC CMOS RAM is accessed through ISA bridge.
+ * MAC address is read into @net_dev->dev_addr.
+ */
+static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
+ struct net_device *dev)
+{
+ static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
+ struct sis190_private *tp = netdev_priv(dev);
+ struct pci_dev *isa_bridge;
+ u8 reg, tmp8;
+ unsigned int i;
+
+ if (netif_msg_probe(tp))
+ pr_info("%s: Read MAC address from APC\n", pci_name(pdev));
+
+ for (i = 0; i < ARRAY_SIZE(ids); i++) {
+ isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
+ if (isa_bridge)
+ break;
+ }
+
+ if (!isa_bridge) {
+ if (netif_msg_probe(tp))
+ pr_info("%s: Can not find ISA bridge\n",
+ pci_name(pdev));
+ return -EIO;
+ }
+
+ /* Enable port 78h & 79h to access APC Registers. */
+ pci_read_config_byte(isa_bridge, 0x48, &tmp8);
+ reg = (tmp8 & ~0x02);
+ pci_write_config_byte(isa_bridge, 0x48, reg);
+ udelay(50);
+ pci_read_config_byte(isa_bridge, 0x48, &reg);
+
+ for (i = 0; i < MAC_ADDR_LEN; i++) {
+ outb(0x9 + i, 0x78);
+ dev->dev_addr[i] = inb(0x79);
+ }
+
+ outb(0x12, 0x78);
+ reg = inb(0x79);
+
+ sis190_set_rgmii(tp, reg);
+
+ /* Restore the value to ISA Bridge */
+ pci_write_config_byte(isa_bridge, 0x48, tmp8);
+ pci_dev_put(isa_bridge);
+
+ return 0;
+}
+
+/**
+ * sis190_init_rxfilter - Initialize the Rx filter
+ * @dev: network device to initialize
+ *
+ * Set receive filter address to our MAC address
+ * and enable packet filtering.
+ */
+static inline void sis190_init_rxfilter(struct net_device *dev)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ u16 ctl;
+ int i;
+
+ ctl = SIS_R16(RxMacControl);
+ /*
+ * Disable packet filtering before setting filter.
+ * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
+ * only and followed by RxMacAddr (6 bytes). Strange. -- FR
+ */
+ SIS_W16(RxMacControl, ctl & ~0x0f00);
+
+ for (i = 0; i < MAC_ADDR_LEN; i++)
+ SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
+
+ SIS_W16(RxMacControl, ctl);
+ SIS_PCI_COMMIT();
+}
+
+static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
+ struct net_device *dev)
+{
+ int rc;
+
+ rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
+ if (rc < 0) {
+ u8 reg;
+
+ pci_read_config_byte(pdev, 0x73, &reg);
+
+ if (reg & 0x00000001)
+ rc = sis190_get_mac_addr_from_apc(pdev, dev);
+ }
+ return rc;
+}
+
+static void sis190_set_speed_auto(struct net_device *dev)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ int phy_id = tp->mii_if.phy_id;
+ int val;
+
+ netif_info(tp, link, dev, "Enabling Auto-negotiation\n");
+
+ val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
+
+ // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
+ // unchanged.
+ mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
+ ADVERTISE_100FULL | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_10HALF);
+
+ // Enable 1000 Full Mode.
+ mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
+
+ // Enable auto-negotiation and restart auto-negotiation.
+ mdio_write(ioaddr, phy_id, MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
+}
+
+static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+
+ return mii_ethtool_gset(&tp->mii_if, cmd);
+}
+
+static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+
+ return mii_ethtool_sset(&tp->mii_if, cmd);
+}
+
+static void sis190_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(tp->pci_dev));
+}
+
+static int sis190_get_regs_len(struct net_device *dev)
+{
+ return SIS190_REGS_SIZE;
+}
+
+static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+ unsigned long flags;
+
+ if (regs->len > SIS190_REGS_SIZE)
+ regs->len = SIS190_REGS_SIZE;
+
+ spin_lock_irqsave(&tp->lock, flags);
+ memcpy_fromio(p, tp->mmio_addr, regs->len);
+ spin_unlock_irqrestore(&tp->lock, flags);
+}
+
+static int sis190_nway_reset(struct net_device *dev)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+
+ return mii_nway_restart(&tp->mii_if);
+}
+
+static u32 sis190_get_msglevel(struct net_device *dev)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+
+ return tp->msg_enable;
+}
+
+static void sis190_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+
+ tp->msg_enable = value;
+}
+
+static const struct ethtool_ops sis190_ethtool_ops = {
+ .get_settings = sis190_get_settings,
+ .set_settings = sis190_set_settings,
+ .get_drvinfo = sis190_get_drvinfo,
+ .get_regs_len = sis190_get_regs_len,
+ .get_regs = sis190_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = sis190_get_msglevel,
+ .set_msglevel = sis190_set_msglevel,
+ .nway_reset = sis190_nway_reset,
+};
+
+static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct sis190_private *tp = netdev_priv(dev);
+
+ return !netif_running(dev) ? -EINVAL :
+ generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
+}
+
+static int sis190_mac_addr(struct net_device *dev, void *p)
+{
+ int rc;
+
+ rc = eth_mac_addr(dev, p);
+ if (!rc)
+ sis190_init_rxfilter(dev);
+ return rc;
+}
+
+static const struct net_device_ops sis190_netdev_ops = {
+ .ndo_open = sis190_open,
+ .ndo_stop = sis190_close,
+ .ndo_do_ioctl = sis190_ioctl,
+ .ndo_start_xmit = sis190_start_xmit,
+ .ndo_tx_timeout = sis190_tx_timeout,
+ .ndo_set_rx_mode = sis190_set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = sis190_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = sis190_netpoll,
+#endif
+};
+
+static int __devinit sis190_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int printed_version = 0;
+ struct sis190_private *tp;
+ struct net_device *dev;
+ void __iomem *ioaddr;
+ int rc;
+
+ if (!printed_version) {
+ if (netif_msg_drv(&debug))
+ pr_info(SIS190_DRIVER_NAME " loaded\n");
+ printed_version = 1;
+ }
+
+ dev = sis190_init_board(pdev);
+ if (IS_ERR(dev)) {
+ rc = PTR_ERR(dev);
+ goto out;
+ }
+
+ pci_set_drvdata(pdev, dev);
+
+ tp = netdev_priv(dev);
+ ioaddr = tp->mmio_addr;
+
+ rc = sis190_get_mac_addr(pdev, dev);
+ if (rc < 0)
+ goto err_release_board;
+
+ sis190_init_rxfilter(dev);
+
+ INIT_WORK(&tp->phy_task, sis190_phy_task);
+
+ dev->netdev_ops = &sis190_netdev_ops;
+
+ SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
+ dev->irq = pdev->irq;
+ dev->base_addr = (unsigned long) 0xdead;
+ dev->watchdog_timeo = SIS190_TX_TIMEOUT;
+
+ spin_lock_init(&tp->lock);
+
+ rc = sis190_mii_probe(dev);
+ if (rc < 0)
+ goto err_release_board;
+
+ rc = register_netdev(dev);
+ if (rc < 0)
+ goto err_remove_mii;
+
+ if (netif_msg_probe(tp)) {
+ netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
+ pci_name(pdev),
+ sis_chip_info[ent->driver_data].name,
+ ioaddr, dev->irq, dev->dev_addr);
+ netdev_info(dev, "%s mode.\n",
+ (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
+ }
+
+ netif_carrier_off(dev);
+
+ sis190_set_speed_auto(dev);
+out:
+ return rc;
+
+err_remove_mii:
+ sis190_mii_remove(dev);
+err_release_board:
+ sis190_release_board(pdev);
+ goto out;
+}
+
+static void __devexit sis190_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct sis190_private *tp = netdev_priv(dev);
+
+ sis190_mii_remove(dev);
+ cancel_work_sync(&tp->phy_task);
+ unregister_netdev(dev);
+ sis190_release_board(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver sis190_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = sis190_pci_tbl,
+ .probe = sis190_init_one,
+ .remove = __devexit_p(sis190_remove_one),
+};
+
+static int __init sis190_init_module(void)
+{
+ return pci_register_driver(&sis190_pci_driver);
+}
+
+static void __exit sis190_cleanup_module(void)
+{
+ pci_unregister_driver(&sis190_pci_driver);
+}
+
+module_init(sis190_init_module);
+module_exit(sis190_cleanup_module);
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
new file mode 100644
index 000000000000..a184abc5ef11
--- /dev/null
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -0,0 +1,2494 @@
+/* sis900.c: A SiS 900/7016 PCI Fast Ethernet driver for Linux.
+ Copyright 1999 Silicon Integrated System Corporation
+ Revision: 1.08.10 Apr. 2 2006
+
+ Modified from the driver which is originally written by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on this skeleton fall under the GPL and must retain
+ the authorship (implicit copyright) notice.
+
+ References:
+ SiS 7016 Fast Ethernet PCI Bus 10/100 Mbps LAN Controller with OnNow Support,
+ preliminary Rev. 1.0 Jan. 14, 1998
+ SiS 900 Fast Ethernet PCI Bus 10/100 Mbps LAN Single Chip with OnNow Support,
+ preliminary Rev. 1.0 Nov. 10, 1998
+ SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution,
+ preliminary Rev. 1.0 Jan. 18, 1998
+
+ Rev 1.08.10 Apr. 2 2006 Daniele Venzano add vlan (jumbo packets) support
+ Rev 1.08.09 Sep. 19 2005 Daniele Venzano add Wake on LAN support
+ Rev 1.08.08 Jan. 22 2005 Daniele Venzano use netif_msg for debugging messages
+ Rev 1.08.07 Nov. 2 2003 Daniele Venzano <venza@brownhat.org> add suspend/resume support
+ Rev 1.08.06 Sep. 24 2002 Mufasa Yang bug fix for Tx timeout & add SiS963 support
+ Rev 1.08.05 Jun. 6 2002 Mufasa Yang bug fix for read_eeprom & Tx descriptor over-boundary
+ Rev 1.08.04 Apr. 25 2002 Mufasa Yang <mufasa@sis.com.tw> added SiS962 support
+ Rev 1.08.03 Feb. 1 2002 Matt Domsch <Matt_Domsch@dell.com> update to use library crc32 function
+ Rev 1.08.02 Nov. 30 2001 Hui-Fen Hsu workaround for EDB & bug fix for dhcp problem
+ Rev 1.08.01 Aug. 25 2001 Hui-Fen Hsu update for 630ET & workaround for ICS1893 PHY
+ Rev 1.08.00 Jun. 11 2001 Hui-Fen Hsu workaround for RTL8201 PHY and some bug fix
+ Rev 1.07.11 Apr. 2 2001 Hui-Fen Hsu updates PCI drivers to use the new pci_set_dma_mask for kernel 2.4.3
+ Rev 1.07.10 Mar. 1 2001 Hui-Fen Hsu <hfhsu@sis.com.tw> some bug fix & 635M/B support
+ Rev 1.07.09 Feb. 9 2001 Dave Jones <davej@suse.de> PCI enable cleanup
+ Rev 1.07.08 Jan. 8 2001 Lei-Chun Chang added RTL8201 PHY support
+ Rev 1.07.07 Nov. 29 2000 Lei-Chun Chang added kernel-doc extractable documentation and 630 workaround fix
+ Rev 1.07.06 Nov. 7 2000 Jeff Garzik <jgarzik@pobox.com> some bug fix and cleaning
+ Rev 1.07.05 Nov. 6 2000 metapirat<metapirat@gmx.de> contribute media type select by ifconfig
+ Rev 1.07.04 Sep. 6 2000 Lei-Chun Chang added ICS1893 PHY support
+ Rev 1.07.03 Aug. 24 2000 Lei-Chun Chang (lcchang@sis.com.tw) modified 630E equalizer workaround rule
+ Rev 1.07.01 Aug. 08 2000 Ollie Lho minor update for SiS 630E and SiS 630E A1
+ Rev 1.07 Mar. 07 2000 Ollie Lho bug fix in Rx buffer ring
+ Rev 1.06.04 Feb. 11 2000 Jeff Garzik <jgarzik@pobox.com> softnet and init for kernel 2.4
+ Rev 1.06.03 Dec. 23 1999 Ollie Lho Third release
+ Rev 1.06.02 Nov. 23 1999 Ollie Lho bug in mac probing fixed
+ Rev 1.06.01 Nov. 16 1999 Ollie Lho CRC calculation provide by Joseph Zbiciak (im14u2c@primenet.com)
+ Rev 1.06 Nov. 4 1999 Ollie Lho (ollie@sis.com.tw) Second release
+ Rev 1.05.05 Oct. 29 1999 Ollie Lho (ollie@sis.com.tw) Single buffer Tx/Rx
+ Chin-Shan Li (lcs@sis.com.tw) Added AMD Am79c901 HomePNA PHY support
+ Rev 1.05 Aug. 7 1999 Jim Huang (cmhuang@sis.com.tw) Initial release
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/init.h>
+#include <linux/mii.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h> /* User space memory access functions */
+
+#include "sis900.h"
+
+#define SIS900_MODULE_NAME "sis900"
+#define SIS900_DRV_VERSION "v1.08.10 Apr. 2 2006"
+
+static const char version[] __devinitconst =
+ KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n";
+
+static int max_interrupt_work = 40;
+static int multicast_filter_limit = 128;
+
+static int sis900_debug = -1; /* Use SIS900_DEF_MSG as value */
+
+#define SIS900_DEF_MSG \
+ (NETIF_MSG_DRV | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (4*HZ)
+
+enum {
+ SIS_900 = 0,
+ SIS_7016
+};
+static const char * card_names[] = {
+ "SiS 900 PCI Fast Ethernet",
+ "SiS 7016 PCI Fast Ethernet"
+};
+static DEFINE_PCI_DEVICE_TABLE(sis900_pci_tbl) = {
+ {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_900,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_900},
+ {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7016,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_7016},
+ {0,}
+};
+MODULE_DEVICE_TABLE (pci, sis900_pci_tbl);
+
+static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex);
+
+static const struct mii_chip_info {
+ const char * name;
+ u16 phy_id0;
+ u16 phy_id1;
+ u8 phy_types;
+#define HOME 0x0001
+#define LAN 0x0002
+#define MIX 0x0003
+#define UNKNOWN 0x0
+} mii_chip_table[] = {
+ { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN },
+ { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN },
+ { "SiS 900 on Foxconn 661 7MI", 0x0143, 0xBC70, LAN },
+ { "Altimata AC101LF PHY", 0x0022, 0x5520, LAN },
+ { "ADM 7001 LAN PHY", 0x002e, 0xcc60, LAN },
+ { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN },
+ { "AMD 79C901 HomePNA PHY", 0x0000, 0x6B90, HOME},
+ { "ICS LAN PHY", 0x0015, 0xF440, LAN },
+ { "ICS LAN PHY", 0x0143, 0xBC70, LAN },
+ { "NS 83851 PHY", 0x2000, 0x5C20, MIX },
+ { "NS 83847 PHY", 0x2000, 0x5C30, MIX },
+ { "Realtek RTL8201 PHY", 0x0000, 0x8200, LAN },
+ { "VIA 6103 PHY", 0x0101, 0x8f20, LAN },
+ {NULL,},
+};
+
+struct mii_phy {
+ struct mii_phy * next;
+ int phy_addr;
+ u16 phy_id0;
+ u16 phy_id1;
+ u16 status;
+ u8 phy_types;
+};
+
+typedef struct _BufferDesc {
+ u32 link;
+ u32 cmdsts;
+ u32 bufptr;
+} BufferDesc;
+
+struct sis900_private {
+ struct pci_dev * pci_dev;
+
+ spinlock_t lock;
+
+ struct mii_phy * mii;
+ struct mii_phy * first_mii; /* record the first mii structure */
+ unsigned int cur_phy;
+ struct mii_if_info mii_info;
+
+ struct timer_list timer; /* Link status detection timer. */
+ u8 autong_complete; /* 1: auto-negotiate complete */
+
+ u32 msg_enable;
+
+ unsigned int cur_rx, dirty_rx; /* producer/comsumer pointers for Tx/Rx ring */
+ unsigned int cur_tx, dirty_tx;
+
+ /* The saved address of a sent/receive-in-place packet buffer */
+ struct sk_buff *tx_skbuff[NUM_TX_DESC];
+ struct sk_buff *rx_skbuff[NUM_RX_DESC];
+ BufferDesc *tx_ring;
+ BufferDesc *rx_ring;
+
+ dma_addr_t tx_ring_dma;
+ dma_addr_t rx_ring_dma;
+
+ unsigned int tx_full; /* The Tx queue is full. */
+ u8 host_bridge_rev;
+ u8 chipset_rev;
+};
+
+MODULE_AUTHOR("Jim Huang <cmhuang@sis.com.tw>, Ollie Lho <ollie@sis.com.tw>");
+MODULE_DESCRIPTION("SiS 900 PCI Fast Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(multicast_filter_limit, int, 0444);
+module_param(max_interrupt_work, int, 0444);
+module_param(sis900_debug, int, 0444);
+MODULE_PARM_DESC(multicast_filter_limit, "SiS 900/7016 maximum number of filtered multicast addresses");
+MODULE_PARM_DESC(max_interrupt_work, "SiS 900/7016 maximum events handled per interrupt");
+MODULE_PARM_DESC(sis900_debug, "SiS 900/7016 bitmapped debugging message level");
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sis900_poll(struct net_device *dev);
+#endif
+static int sis900_open(struct net_device *net_dev);
+static int sis900_mii_probe (struct net_device * net_dev);
+static void sis900_init_rxfilter (struct net_device * net_dev);
+static u16 read_eeprom(long ioaddr, int location);
+static int mdio_read(struct net_device *net_dev, int phy_id, int location);
+static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val);
+static void sis900_timer(unsigned long data);
+static void sis900_check_mode (struct net_device *net_dev, struct mii_phy *mii_phy);
+static void sis900_tx_timeout(struct net_device *net_dev);
+static void sis900_init_tx_ring(struct net_device *net_dev);
+static void sis900_init_rx_ring(struct net_device *net_dev);
+static netdev_tx_t sis900_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev);
+static int sis900_rx(struct net_device *net_dev);
+static void sis900_finish_xmit (struct net_device *net_dev);
+static irqreturn_t sis900_interrupt(int irq, void *dev_instance);
+static int sis900_close(struct net_device *net_dev);
+static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd);
+static u16 sis900_mcast_bitnr(u8 *addr, u8 revision);
+static void set_rx_mode(struct net_device *net_dev);
+static void sis900_reset(struct net_device *net_dev);
+static void sis630_set_eq(struct net_device *net_dev, u8 revision);
+static int sis900_set_config(struct net_device *dev, struct ifmap *map);
+static u16 sis900_default_phy(struct net_device * net_dev);
+static void sis900_set_capability( struct net_device *net_dev ,struct mii_phy *phy);
+static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr);
+static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr);
+static void sis900_set_mode (long ioaddr, int speed, int duplex);
+static const struct ethtool_ops sis900_ethtool_ops;
+
+/**
+ * sis900_get_mac_addr - Get MAC address for stand alone SiS900 model
+ * @pci_dev: the sis900 pci device
+ * @net_dev: the net device to get address for
+ *
+ * Older SiS900 and friends, use EEPROM to store MAC address.
+ * MAC address is read from read_eeprom() into @net_dev->dev_addr and
+ * @net_dev->perm_addr.
+ */
+
+static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev)
+{
+ long ioaddr = pci_resource_start(pci_dev, 0);
+ u16 signature;
+ int i;
+
+ /* check to see if we have sane EEPROM */
+ signature = (u16) read_eeprom(ioaddr, EEPROMSignature);
+ if (signature == 0xffff || signature == 0x0000) {
+ printk (KERN_WARNING "%s: Error EERPOM read %x\n",
+ pci_name(pci_dev), signature);
+ return 0;
+ }
+
+ /* get MAC address from EEPROM */
+ for (i = 0; i < 3; i++)
+ ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+
+ /* Store MAC Address in perm_addr */
+ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
+ return 1;
+}
+
+/**
+ * sis630e_get_mac_addr - Get MAC address for SiS630E model
+ * @pci_dev: the sis900 pci device
+ * @net_dev: the net device to get address for
+ *
+ * SiS630E model, use APC CMOS RAM to store MAC address.
+ * APC CMOS RAM is accessed through ISA bridge.
+ * MAC address is read into @net_dev->dev_addr and
+ * @net_dev->perm_addr.
+ */
+
+static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
+ struct net_device *net_dev)
+{
+ struct pci_dev *isa_bridge = NULL;
+ u8 reg;
+ int i;
+
+ isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0008, isa_bridge);
+ if (!isa_bridge)
+ isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0018, isa_bridge);
+ if (!isa_bridge) {
+ printk(KERN_WARNING "%s: Can not find ISA bridge\n",
+ pci_name(pci_dev));
+ return 0;
+ }
+ pci_read_config_byte(isa_bridge, 0x48, &reg);
+ pci_write_config_byte(isa_bridge, 0x48, reg | 0x40);
+
+ for (i = 0; i < 6; i++) {
+ outb(0x09 + i, 0x70);
+ ((u8 *)(net_dev->dev_addr))[i] = inb(0x71);
+ }
+
+ /* Store MAC Address in perm_addr */
+ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
+ pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40);
+ pci_dev_put(isa_bridge);
+
+ return 1;
+}
+
+
+/**
+ * sis635_get_mac_addr - Get MAC address for SIS635 model
+ * @pci_dev: the sis900 pci device
+ * @net_dev: the net device to get address for
+ *
+ * SiS635 model, set MAC Reload Bit to load Mac address from APC
+ * to rfdr. rfdr is accessed through rfcr. MAC address is read into
+ * @net_dev->dev_addr and @net_dev->perm_addr.
+ */
+
+static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
+ struct net_device *net_dev)
+{
+ long ioaddr = net_dev->base_addr;
+ u32 rfcrSave;
+ u32 i;
+
+ rfcrSave = inl(rfcr + ioaddr);
+
+ outl(rfcrSave | RELOAD, ioaddr + cr);
+ outl(0, ioaddr + cr);
+
+ /* disable packet filtering before setting filter */
+ outl(rfcrSave & ~RFEN, rfcr + ioaddr);
+
+ /* load MAC addr to filter data register */
+ for (i = 0 ; i < 3 ; i++) {
+ outl((i << RFADDR_shift), ioaddr + rfcr);
+ *( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr);
+ }
+
+ /* Store MAC Address in perm_addr */
+ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
+ /* enable packet filtering */
+ outl(rfcrSave | RFEN, rfcr + ioaddr);
+
+ return 1;
+}
+
+/**
+ * sis96x_get_mac_addr - Get MAC address for SiS962 or SiS963 model
+ * @pci_dev: the sis900 pci device
+ * @net_dev: the net device to get address for
+ *
+ * SiS962 or SiS963 model, use EEPROM to store MAC address. And EEPROM
+ * is shared by
+ * LAN and 1394. When access EEPROM, send EEREQ signal to hardware first
+ * and wait for EEGNT. If EEGNT is ON, EEPROM is permitted to be access
+ * by LAN, otherwise is not. After MAC address is read from EEPROM, send
+ * EEDONE signal to refuse EEPROM access by LAN.
+ * The EEPROM map of SiS962 or SiS963 is different to SiS900.
+ * The signature field in SiS962 or SiS963 spec is meaningless.
+ * MAC address is read into @net_dev->dev_addr and @net_dev->perm_addr.
+ */
+
+static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
+ struct net_device *net_dev)
+{
+ long ioaddr = net_dev->base_addr;
+ long ee_addr = ioaddr + mear;
+ u32 waittime = 0;
+ int i;
+
+ outl(EEREQ, ee_addr);
+ while(waittime < 2000) {
+ if(inl(ee_addr) & EEGNT) {
+
+ /* get MAC address from EEPROM */
+ for (i = 0; i < 3; i++)
+ ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+
+ /* Store MAC Address in perm_addr */
+ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
+ outl(EEDONE, ee_addr);
+ return 1;
+ } else {
+ udelay(1);
+ waittime ++;
+ }
+ }
+ outl(EEDONE, ee_addr);
+ return 0;
+}
+
+static const struct net_device_ops sis900_netdev_ops = {
+ .ndo_open = sis900_open,
+ .ndo_stop = sis900_close,
+ .ndo_start_xmit = sis900_start_xmit,
+ .ndo_set_config = sis900_set_config,
+ .ndo_set_rx_mode = set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_do_ioctl = mii_ioctl,
+ .ndo_tx_timeout = sis900_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = sis900_poll,
+#endif
+};
+
+/**
+ * sis900_probe - Probe for sis900 device
+ * @pci_dev: the sis900 pci device
+ * @pci_id: the pci device ID
+ *
+ * Check and probe sis900 net device for @pci_dev.
+ * Get mac address according to the chip revision,
+ * and assign SiS900-specific entries in the device structure.
+ * ie: sis900_open(), sis900_start_xmit(), sis900_close(), etc.
+ */
+
+static int __devinit sis900_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
+{
+ struct sis900_private *sis_priv;
+ struct net_device *net_dev;
+ struct pci_dev *dev;
+ dma_addr_t ring_dma;
+ void *ring_space;
+ long ioaddr;
+ int i, ret;
+ const char *card_name = card_names[pci_id->driver_data];
+ const char *dev_name = pci_name(pci_dev);
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(version);
+#endif
+
+ /* setup various bits in PCI command register */
+ ret = pci_enable_device(pci_dev);
+ if(ret) return ret;
+
+ i = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ if(i){
+ printk(KERN_ERR "sis900.c: architecture does not support "
+ "32bit PCI busmaster DMA\n");
+ return i;
+ }
+
+ pci_set_master(pci_dev);
+
+ net_dev = alloc_etherdev(sizeof(struct sis900_private));
+ if (!net_dev)
+ return -ENOMEM;
+ SET_NETDEV_DEV(net_dev, &pci_dev->dev);
+
+ /* We do a request_region() to register /proc/ioports info. */
+ ioaddr = pci_resource_start(pci_dev, 0);
+ ret = pci_request_regions(pci_dev, "sis900");
+ if (ret)
+ goto err_out;
+
+ sis_priv = netdev_priv(net_dev);
+ net_dev->base_addr = ioaddr;
+ net_dev->irq = pci_dev->irq;
+ sis_priv->pci_dev = pci_dev;
+ spin_lock_init(&sis_priv->lock);
+
+ pci_set_drvdata(pci_dev, net_dev);
+
+ ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space) {
+ ret = -ENOMEM;
+ goto err_out_cleardev;
+ }
+ sis_priv->tx_ring = ring_space;
+ sis_priv->tx_ring_dma = ring_dma;
+
+ ring_space = pci_alloc_consistent(pci_dev, RX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space) {
+ ret = -ENOMEM;
+ goto err_unmap_tx;
+ }
+ sis_priv->rx_ring = ring_space;
+ sis_priv->rx_ring_dma = ring_dma;
+
+ /* The SiS900-specific entries in the device structure. */
+ net_dev->netdev_ops = &sis900_netdev_ops;
+ net_dev->watchdog_timeo = TX_TIMEOUT;
+ net_dev->ethtool_ops = &sis900_ethtool_ops;
+
+ if (sis900_debug > 0)
+ sis_priv->msg_enable = sis900_debug;
+ else
+ sis_priv->msg_enable = SIS900_DEF_MSG;
+
+ sis_priv->mii_info.dev = net_dev;
+ sis_priv->mii_info.mdio_read = mdio_read;
+ sis_priv->mii_info.mdio_write = mdio_write;
+ sis_priv->mii_info.phy_id_mask = 0x1f;
+ sis_priv->mii_info.reg_num_mask = 0x1f;
+
+ /* Get Mac address according to the chip revision */
+ sis_priv->chipset_rev = pci_dev->revision;
+ if(netif_msg_probe(sis_priv))
+ printk(KERN_DEBUG "%s: detected revision %2.2x, "
+ "trying to get MAC address...\n",
+ dev_name, sis_priv->chipset_rev);
+
+ ret = 0;
+ if (sis_priv->chipset_rev == SIS630E_900_REV)
+ ret = sis630e_get_mac_addr(pci_dev, net_dev);
+ else if ((sis_priv->chipset_rev > 0x81) && (sis_priv->chipset_rev <= 0x90) )
+ ret = sis635_get_mac_addr(pci_dev, net_dev);
+ else if (sis_priv->chipset_rev == SIS96x_900_REV)
+ ret = sis96x_get_mac_addr(pci_dev, net_dev);
+ else
+ ret = sis900_get_mac_addr(pci_dev, net_dev);
+
+ if (!ret || !is_valid_ether_addr(net_dev->dev_addr)) {
+ random_ether_addr(net_dev->dev_addr);
+ printk(KERN_WARNING "%s: Unreadable or invalid MAC address,"
+ "using random generated one\n", dev_name);
+ }
+
+ /* 630ET : set the mii access mode as software-mode */
+ if (sis_priv->chipset_rev == SIS630ET_900_REV)
+ outl(ACCESSMODE | inl(ioaddr + cr), ioaddr + cr);
+
+ /* probe for mii transceiver */
+ if (sis900_mii_probe(net_dev) == 0) {
+ printk(KERN_WARNING "%s: Error probing MII device.\n",
+ dev_name);
+ ret = -ENODEV;
+ goto err_unmap_rx;
+ }
+
+ /* save our host bridge revision */
+ dev = pci_get_device(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_630, NULL);
+ if (dev) {
+ sis_priv->host_bridge_rev = dev->revision;
+ pci_dev_put(dev);
+ }
+
+ ret = register_netdev(net_dev);
+ if (ret)
+ goto err_unmap_rx;
+
+ /* print some information about our NIC */
+ printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n",
+ net_dev->name, card_name, ioaddr, net_dev->irq,
+ net_dev->dev_addr);
+
+ /* Detect Wake on Lan support */
+ ret = (inl(net_dev->base_addr + CFGPMC) & PMESP) >> 27;
+ if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0)
+ printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name);
+
+ return 0;
+
+ err_unmap_rx:
+ pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
+ sis_priv->rx_ring_dma);
+ err_unmap_tx:
+ pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
+ sis_priv->tx_ring_dma);
+ err_out_cleardev:
+ pci_set_drvdata(pci_dev, NULL);
+ pci_release_regions(pci_dev);
+ err_out:
+ free_netdev(net_dev);
+ return ret;
+}
+
+/**
+ * sis900_mii_probe - Probe MII PHY for sis900
+ * @net_dev: the net device to probe for
+ *
+ * Search for total of 32 possible mii phy addresses.
+ * Identify and set current phy if found one,
+ * return error if it failed to found.
+ */
+
+static int __devinit sis900_mii_probe(struct net_device * net_dev)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ const char *dev_name = pci_name(sis_priv->pci_dev);
+ u16 poll_bit = MII_STAT_LINK, status = 0;
+ unsigned long timeout = jiffies + 5 * HZ;
+ int phy_addr;
+
+ sis_priv->mii = NULL;
+
+ /* search for total of 32 possible mii phy addresses */
+ for (phy_addr = 0; phy_addr < 32; phy_addr++) {
+ struct mii_phy * mii_phy = NULL;
+ u16 mii_status;
+ int i;
+
+ mii_phy = NULL;
+ for(i = 0; i < 2; i++)
+ mii_status = mdio_read(net_dev, phy_addr, MII_STATUS);
+
+ if (mii_status == 0xffff || mii_status == 0x0000) {
+ if (netif_msg_probe(sis_priv))
+ printk(KERN_DEBUG "%s: MII at address %d"
+ " not accessible\n",
+ dev_name, phy_addr);
+ continue;
+ }
+
+ if ((mii_phy = kmalloc(sizeof(struct mii_phy), GFP_KERNEL)) == NULL) {
+ printk(KERN_WARNING "Cannot allocate mem for struct mii_phy\n");
+ mii_phy = sis_priv->first_mii;
+ while (mii_phy) {
+ struct mii_phy *phy;
+ phy = mii_phy;
+ mii_phy = mii_phy->next;
+ kfree(phy);
+ }
+ return 0;
+ }
+
+ mii_phy->phy_id0 = mdio_read(net_dev, phy_addr, MII_PHY_ID0);
+ mii_phy->phy_id1 = mdio_read(net_dev, phy_addr, MII_PHY_ID1);
+ mii_phy->phy_addr = phy_addr;
+ mii_phy->status = mii_status;
+ mii_phy->next = sis_priv->mii;
+ sis_priv->mii = mii_phy;
+ sis_priv->first_mii = mii_phy;
+
+ for (i = 0; mii_chip_table[i].phy_id1; i++)
+ if ((mii_phy->phy_id0 == mii_chip_table[i].phy_id0 ) &&
+ ((mii_phy->phy_id1 & 0xFFF0) == mii_chip_table[i].phy_id1)){
+ mii_phy->phy_types = mii_chip_table[i].phy_types;
+ if (mii_chip_table[i].phy_types == MIX)
+ mii_phy->phy_types =
+ (mii_status & (MII_STAT_CAN_TX_FDX | MII_STAT_CAN_TX)) ? LAN : HOME;
+ printk(KERN_INFO "%s: %s transceiver found "
+ "at address %d.\n",
+ dev_name,
+ mii_chip_table[i].name,
+ phy_addr);
+ break;
+ }
+
+ if( !mii_chip_table[i].phy_id1 ) {
+ printk(KERN_INFO "%s: Unknown PHY transceiver found at address %d.\n",
+ dev_name, phy_addr);
+ mii_phy->phy_types = UNKNOWN;
+ }
+ }
+
+ if (sis_priv->mii == NULL) {
+ printk(KERN_INFO "%s: No MII transceivers found!\n", dev_name);
+ return 0;
+ }
+
+ /* select default PHY for mac */
+ sis_priv->mii = NULL;
+ sis900_default_phy( net_dev );
+
+ /* Reset phy if default phy is internal sis900 */
+ if ((sis_priv->mii->phy_id0 == 0x001D) &&
+ ((sis_priv->mii->phy_id1&0xFFF0) == 0x8000))
+ status = sis900_reset_phy(net_dev, sis_priv->cur_phy);
+
+ /* workaround for ICS1893 PHY */
+ if ((sis_priv->mii->phy_id0 == 0x0015) &&
+ ((sis_priv->mii->phy_id1&0xFFF0) == 0xF440))
+ mdio_write(net_dev, sis_priv->cur_phy, 0x0018, 0xD200);
+
+ if(status & MII_STAT_LINK){
+ while (poll_bit) {
+ yield();
+
+ poll_bit ^= (mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS) & poll_bit);
+ if (time_after_eq(jiffies, timeout)) {
+ printk(KERN_WARNING "%s: reset phy and link down now\n",
+ dev_name);
+ return -ETIME;
+ }
+ }
+ }
+
+ if (sis_priv->chipset_rev == SIS630E_900_REV) {
+ /* SiS 630E has some bugs on default value of PHY registers */
+ mdio_write(net_dev, sis_priv->cur_phy, MII_ANADV, 0x05e1);
+ mdio_write(net_dev, sis_priv->cur_phy, MII_CONFIG1, 0x22);
+ mdio_write(net_dev, sis_priv->cur_phy, MII_CONFIG2, 0xff00);
+ mdio_write(net_dev, sis_priv->cur_phy, MII_MASK, 0xffc0);
+ //mdio_write(net_dev, sis_priv->cur_phy, MII_CONTROL, 0x1000);
+ }
+
+ if (sis_priv->mii->status & MII_STAT_LINK)
+ netif_carrier_on(net_dev);
+ else
+ netif_carrier_off(net_dev);
+
+ return 1;
+}
+
+/**
+ * sis900_default_phy - Select default PHY for sis900 mac.
+ * @net_dev: the net device to probe for
+ *
+ * Select first detected PHY with link as default.
+ * If no one is link on, select PHY whose types is HOME as default.
+ * If HOME doesn't exist, select LAN.
+ */
+
+static u16 sis900_default_phy(struct net_device * net_dev)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ struct mii_phy *phy = NULL, *phy_home = NULL,
+ *default_phy = NULL, *phy_lan = NULL;
+ u16 status;
+
+ for (phy=sis_priv->first_mii; phy; phy=phy->next) {
+ status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+ status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+
+ /* Link ON & Not select default PHY & not ghost PHY */
+ if ((status & MII_STAT_LINK) && !default_phy &&
+ (phy->phy_types != UNKNOWN))
+ default_phy = phy;
+ else {
+ status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL);
+ mdio_write(net_dev, phy->phy_addr, MII_CONTROL,
+ status | MII_CNTL_AUTO | MII_CNTL_ISOLATE);
+ if (phy->phy_types == HOME)
+ phy_home = phy;
+ else if(phy->phy_types == LAN)
+ phy_lan = phy;
+ }
+ }
+
+ if (!default_phy && phy_home)
+ default_phy = phy_home;
+ else if (!default_phy && phy_lan)
+ default_phy = phy_lan;
+ else if (!default_phy)
+ default_phy = sis_priv->first_mii;
+
+ if (sis_priv->mii != default_phy) {
+ sis_priv->mii = default_phy;
+ sis_priv->cur_phy = default_phy->phy_addr;
+ printk(KERN_INFO "%s: Using transceiver found at address %d as default\n",
+ pci_name(sis_priv->pci_dev), sis_priv->cur_phy);
+ }
+
+ sis_priv->mii_info.phy_id = sis_priv->cur_phy;
+
+ status = mdio_read(net_dev, sis_priv->cur_phy, MII_CONTROL);
+ status &= (~MII_CNTL_ISOLATE);
+
+ mdio_write(net_dev, sis_priv->cur_phy, MII_CONTROL, status);
+ status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
+ status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
+
+ return status;
+}
+
+
+/**
+ * sis900_set_capability - set the media capability of network adapter.
+ * @net_dev : the net device to probe for
+ * @phy : default PHY
+ *
+ * Set the media capability of network adapter according to
+ * mii status register. It's necessary before auto-negotiate.
+ */
+
+static void sis900_set_capability(struct net_device *net_dev, struct mii_phy *phy)
+{
+ u16 cap;
+ u16 status;
+
+ status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+ status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+
+ cap = MII_NWAY_CSMA_CD |
+ ((phy->status & MII_STAT_CAN_TX_FDX)? MII_NWAY_TX_FDX:0) |
+ ((phy->status & MII_STAT_CAN_TX) ? MII_NWAY_TX:0) |
+ ((phy->status & MII_STAT_CAN_T_FDX) ? MII_NWAY_T_FDX:0)|
+ ((phy->status & MII_STAT_CAN_T) ? MII_NWAY_T:0);
+
+ mdio_write(net_dev, phy->phy_addr, MII_ANADV, cap);
+}
+
+
+/* Delay between EEPROM clock transitions. */
+#define eeprom_delay() inl(ee_addr)
+
+/**
+ * read_eeprom - Read Serial EEPROM
+ * @ioaddr: base i/o address
+ * @location: the EEPROM location to read
+ *
+ * Read Serial EEPROM through EEPROM Access Register.
+ * Note that location is in word (16 bits) unit
+ */
+
+static u16 __devinit read_eeprom(long ioaddr, int location)
+{
+ int i;
+ u16 retval = 0;
+ long ee_addr = ioaddr + mear;
+ u32 read_cmd = location | EEread;
+
+ outl(0, ee_addr);
+ eeprom_delay();
+ outl(EECS, ee_addr);
+ eeprom_delay();
+
+ /* Shift the read command (9) bits out. */
+ for (i = 8; i >= 0; i--) {
+ u32 dataval = (read_cmd & (1 << i)) ? EEDI | EECS : EECS;
+ outl(dataval, ee_addr);
+ eeprom_delay();
+ outl(dataval | EECLK, ee_addr);
+ eeprom_delay();
+ }
+ outl(EECS, ee_addr);
+ eeprom_delay();
+
+ /* read the 16-bits data in */
+ for (i = 16; i > 0; i--) {
+ outl(EECS, ee_addr);
+ eeprom_delay();
+ outl(EECS | EECLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((inl(ee_addr) & EEDO) ? 1 : 0);
+ eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+ outl(0, ee_addr);
+ eeprom_delay();
+
+ return retval;
+}
+
+/* Read and write the MII management registers using software-generated
+ serial MDIO protocol. Note that the command bits and data bits are
+ send out separately */
+#define mdio_delay() inl(mdio_addr)
+
+static void mdio_idle(long mdio_addr)
+{
+ outl(MDIO | MDDIR, mdio_addr);
+ mdio_delay();
+ outl(MDIO | MDDIR | MDC, mdio_addr);
+}
+
+/* Syncronize the MII management interface by shifting 32 one bits out. */
+static void mdio_reset(long mdio_addr)
+{
+ int i;
+
+ for (i = 31; i >= 0; i--) {
+ outl(MDDIR | MDIO, mdio_addr);
+ mdio_delay();
+ outl(MDDIR | MDIO | MDC, mdio_addr);
+ mdio_delay();
+ }
+}
+
+/**
+ * mdio_read - read MII PHY register
+ * @net_dev: the net device to read
+ * @phy_id: the phy address to read
+ * @location: the phy regiester id to read
+ *
+ * Read MII registers through MDIO and MDC
+ * using MDIO management frame structure and protocol(defined by ISO/IEC).
+ * Please see SiS7014 or ICS spec
+ */
+
+static int mdio_read(struct net_device *net_dev, int phy_id, int location)
+{
+ long mdio_addr = net_dev->base_addr + mear;
+ int mii_cmd = MIIread|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
+ u16 retval = 0;
+ int i;
+
+ mdio_reset(mdio_addr);
+ mdio_idle(mdio_addr);
+
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
+ outl(dataval, mdio_addr);
+ mdio_delay();
+ outl(dataval | MDC, mdio_addr);
+ mdio_delay();
+ }
+
+ /* Read the 16 data bits. */
+ for (i = 16; i > 0; i--) {
+ outl(0, mdio_addr);
+ mdio_delay();
+ retval = (retval << 1) | ((inl(mdio_addr) & MDIO) ? 1 : 0);
+ outl(MDC, mdio_addr);
+ mdio_delay();
+ }
+ outl(0x00, mdio_addr);
+
+ return retval;
+}
+
+/**
+ * mdio_write - write MII PHY register
+ * @net_dev: the net device to write
+ * @phy_id: the phy address to write
+ * @location: the phy regiester id to write
+ * @value: the register value to write with
+ *
+ * Write MII registers with @value through MDIO and MDC
+ * using MDIO management frame structure and protocol(defined by ISO/IEC)
+ * please see SiS7014 or ICS spec
+ */
+
+static void mdio_write(struct net_device *net_dev, int phy_id, int location,
+ int value)
+{
+ long mdio_addr = net_dev->base_addr + mear;
+ int mii_cmd = MIIwrite|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
+ int i;
+
+ mdio_reset(mdio_addr);
+ mdio_idle(mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
+ outb(dataval, mdio_addr);
+ mdio_delay();
+ outb(dataval | MDC, mdio_addr);
+ mdio_delay();
+ }
+ mdio_delay();
+
+ /* Shift the value bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (value & (1 << i)) ? MDDIR | MDIO : MDDIR;
+ outl(dataval, mdio_addr);
+ mdio_delay();
+ outl(dataval | MDC, mdio_addr);
+ mdio_delay();
+ }
+ mdio_delay();
+
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ outb(0, mdio_addr);
+ mdio_delay();
+ outb(MDC, mdio_addr);
+ mdio_delay();
+ }
+ outl(0x00, mdio_addr);
+}
+
+
+/**
+ * sis900_reset_phy - reset sis900 mii phy.
+ * @net_dev: the net device to write
+ * @phy_addr: default phy address
+ *
+ * Some specific phy can't work properly without reset.
+ * This function will be called during initialization and
+ * link status change from ON to DOWN.
+ */
+
+static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr)
+{
+ int i;
+ u16 status;
+
+ for (i = 0; i < 2; i++)
+ status = mdio_read(net_dev, phy_addr, MII_STATUS);
+
+ mdio_write( net_dev, phy_addr, MII_CONTROL, MII_CNTL_RESET );
+
+ return status;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+*/
+static void sis900_poll(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ sis900_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+/**
+ * sis900_open - open sis900 device
+ * @net_dev: the net device to open
+ *
+ * Do some initialization and start net interface.
+ * enable interrupts and set sis900 timer.
+ */
+
+static int
+sis900_open(struct net_device *net_dev)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ long ioaddr = net_dev->base_addr;
+ int ret;
+
+ /* Soft reset the chip. */
+ sis900_reset(net_dev);
+
+ /* Equalizer workaround Rule */
+ sis630_set_eq(net_dev, sis_priv->chipset_rev);
+
+ ret = request_irq(net_dev->irq, sis900_interrupt, IRQF_SHARED,
+ net_dev->name, net_dev);
+ if (ret)
+ return ret;
+
+ sis900_init_rxfilter(net_dev);
+
+ sis900_init_tx_ring(net_dev);
+ sis900_init_rx_ring(net_dev);
+
+ set_rx_mode(net_dev);
+
+ netif_start_queue(net_dev);
+
+ /* Workaround for EDB */
+ sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+ outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
+ outl(RxENA | inl(ioaddr + cr), ioaddr + cr);
+ outl(IE, ioaddr + ier);
+
+ sis900_check_mode(net_dev, sis_priv->mii);
+
+ /* Set the timer to switch to check for link beat and perhaps switch
+ to an alternate media type. */
+ init_timer(&sis_priv->timer);
+ sis_priv->timer.expires = jiffies + HZ;
+ sis_priv->timer.data = (unsigned long)net_dev;
+ sis_priv->timer.function = sis900_timer;
+ add_timer(&sis_priv->timer);
+
+ return 0;
+}
+
+/**
+ * sis900_init_rxfilter - Initialize the Rx filter
+ * @net_dev: the net device to initialize for
+ *
+ * Set receive filter address to our MAC address
+ * and enable packet filtering.
+ */
+
+static void
+sis900_init_rxfilter (struct net_device * net_dev)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ long ioaddr = net_dev->base_addr;
+ u32 rfcrSave;
+ u32 i;
+
+ rfcrSave = inl(rfcr + ioaddr);
+
+ /* disable packet filtering before setting filter */
+ outl(rfcrSave & ~RFEN, rfcr + ioaddr);
+
+ /* load MAC addr to filter data register */
+ for (i = 0 ; i < 3 ; i++) {
+ u32 w;
+
+ w = (u32) *((u16 *)(net_dev->dev_addr)+i);
+ outl((i << RFADDR_shift), ioaddr + rfcr);
+ outl(w, ioaddr + rfdr);
+
+ if (netif_msg_hw(sis_priv)) {
+ printk(KERN_DEBUG "%s: Receive Filter Addrss[%d]=%x\n",
+ net_dev->name, i, inl(ioaddr + rfdr));
+ }
+ }
+
+ /* enable packet filtering */
+ outl(rfcrSave | RFEN, rfcr + ioaddr);
+}
+
+/**
+ * sis900_init_tx_ring - Initialize the Tx descriptor ring
+ * @net_dev: the net device to initialize for
+ *
+ * Initialize the Tx descriptor ring,
+ */
+
+static void
+sis900_init_tx_ring(struct net_device *net_dev)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ long ioaddr = net_dev->base_addr;
+ int i;
+
+ sis_priv->tx_full = 0;
+ sis_priv->dirty_tx = sis_priv->cur_tx = 0;
+
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ sis_priv->tx_skbuff[i] = NULL;
+
+ sis_priv->tx_ring[i].link = sis_priv->tx_ring_dma +
+ ((i+1)%NUM_TX_DESC)*sizeof(BufferDesc);
+ sis_priv->tx_ring[i].cmdsts = 0;
+ sis_priv->tx_ring[i].bufptr = 0;
+ }
+
+ /* load Transmit Descriptor Register */
+ outl(sis_priv->tx_ring_dma, ioaddr + txdp);
+ if (netif_msg_hw(sis_priv))
+ printk(KERN_DEBUG "%s: TX descriptor register loaded with: %8.8x\n",
+ net_dev->name, inl(ioaddr + txdp));
+}
+
+/**
+ * sis900_init_rx_ring - Initialize the Rx descriptor ring
+ * @net_dev: the net device to initialize for
+ *
+ * Initialize the Rx descriptor ring,
+ * and pre-allocate recevie buffers (socket buffer)
+ */
+
+static void
+sis900_init_rx_ring(struct net_device *net_dev)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ long ioaddr = net_dev->base_addr;
+ int i;
+
+ sis_priv->cur_rx = 0;
+ sis_priv->dirty_rx = 0;
+
+ /* init RX descriptor */
+ for (i = 0; i < NUM_RX_DESC; i++) {
+ sis_priv->rx_skbuff[i] = NULL;
+
+ sis_priv->rx_ring[i].link = sis_priv->rx_ring_dma +
+ ((i+1)%NUM_RX_DESC)*sizeof(BufferDesc);
+ sis_priv->rx_ring[i].cmdsts = 0;
+ sis_priv->rx_ring[i].bufptr = 0;
+ }
+
+ /* allocate sock buffers */
+ for (i = 0; i < NUM_RX_DESC; i++) {
+ struct sk_buff *skb;
+
+ if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
+ /* not enough memory for skbuff, this makes a "hole"
+ on the buffer ring, it is not clear how the
+ hardware will react to this kind of degenerated
+ buffer */
+ break;
+ }
+ sis_priv->rx_skbuff[i] = skb;
+ sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE;
+ sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev,
+ skb->data, RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ }
+ sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC);
+
+ /* load Receive Descriptor Register */
+ outl(sis_priv->rx_ring_dma, ioaddr + rxdp);
+ if (netif_msg_hw(sis_priv))
+ printk(KERN_DEBUG "%s: RX descriptor register loaded with: %8.8x\n",
+ net_dev->name, inl(ioaddr + rxdp));
+}
+
+/**
+ * sis630_set_eq - set phy equalizer value for 630 LAN
+ * @net_dev: the net device to set equalizer value
+ * @revision: 630 LAN revision number
+ *
+ * 630E equalizer workaround rule(Cyrus Huang 08/15)
+ * PHY register 14h(Test)
+ * Bit 14: 0 -- Automatically detect (default)
+ * 1 -- Manually set Equalizer filter
+ * Bit 13: 0 -- (Default)
+ * 1 -- Speed up convergence of equalizer setting
+ * Bit 9 : 0 -- (Default)
+ * 1 -- Disable Baseline Wander
+ * Bit 3~7 -- Equalizer filter setting
+ * Link ON: Set Bit 9, 13 to 1, Bit 14 to 0
+ * Then calculate equalizer value
+ * Then set equalizer value, and set Bit 14 to 1, Bit 9 to 0
+ * Link Off:Set Bit 13 to 1, Bit 14 to 0
+ * Calculate Equalizer value:
+ * When Link is ON and Bit 14 is 0, SIS900PHY will auto-detect proper equalizer value.
+ * When the equalizer is stable, this value is not a fixed value. It will be within
+ * a small range(eg. 7~9). Then we get a minimum and a maximum value(eg. min=7, max=9)
+ * 0 <= max <= 4 --> set equalizer to max
+ * 5 <= max <= 14 --> set equalizer to max+1 or set equalizer to max+2 if max == min
+ * max >= 15 --> set equalizer to max+5 or set equalizer to max+6 if max == min
+ */
+
+static void sis630_set_eq(struct net_device *net_dev, u8 revision)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ u16 reg14h, eq_value=0, max_value=0, min_value=0;
+ int i, maxcount=10;
+
+ if ( !(revision == SIS630E_900_REV || revision == SIS630EA1_900_REV ||
+ revision == SIS630A_900_REV || revision == SIS630ET_900_REV) )
+ return;
+
+ if (netif_carrier_ok(net_dev)) {
+ reg14h = mdio_read(net_dev, sis_priv->cur_phy, MII_RESV);
+ mdio_write(net_dev, sis_priv->cur_phy, MII_RESV,
+ (0x2200 | reg14h) & 0xBFFF);
+ for (i=0; i < maxcount; i++) {
+ eq_value = (0x00F8 & mdio_read(net_dev,
+ sis_priv->cur_phy, MII_RESV)) >> 3;
+ if (i == 0)
+ max_value=min_value=eq_value;
+ max_value = (eq_value > max_value) ?
+ eq_value : max_value;
+ min_value = (eq_value < min_value) ?
+ eq_value : min_value;
+ }
+ /* 630E rule to determine the equalizer value */
+ if (revision == SIS630E_900_REV || revision == SIS630EA1_900_REV ||
+ revision == SIS630ET_900_REV) {
+ if (max_value < 5)
+ eq_value = max_value;
+ else if (max_value >= 5 && max_value < 15)
+ eq_value = (max_value == min_value) ?
+ max_value+2 : max_value+1;
+ else if (max_value >= 15)
+ eq_value=(max_value == min_value) ?
+ max_value+6 : max_value+5;
+ }
+ /* 630B0&B1 rule to determine the equalizer value */
+ if (revision == SIS630A_900_REV &&
+ (sis_priv->host_bridge_rev == SIS630B0 ||
+ sis_priv->host_bridge_rev == SIS630B1)) {
+ if (max_value == 0)
+ eq_value = 3;
+ else
+ eq_value = (max_value + min_value + 1)/2;
+ }
+ /* write equalizer value and setting */
+ reg14h = mdio_read(net_dev, sis_priv->cur_phy, MII_RESV);
+ reg14h = (reg14h & 0xFF07) | ((eq_value << 3) & 0x00F8);
+ reg14h = (reg14h | 0x6000) & 0xFDFF;
+ mdio_write(net_dev, sis_priv->cur_phy, MII_RESV, reg14h);
+ } else {
+ reg14h = mdio_read(net_dev, sis_priv->cur_phy, MII_RESV);
+ if (revision == SIS630A_900_REV &&
+ (sis_priv->host_bridge_rev == SIS630B0 ||
+ sis_priv->host_bridge_rev == SIS630B1))
+ mdio_write(net_dev, sis_priv->cur_phy, MII_RESV,
+ (reg14h | 0x2200) & 0xBFFF);
+ else
+ mdio_write(net_dev, sis_priv->cur_phy, MII_RESV,
+ (reg14h | 0x2000) & 0xBFFF);
+ }
+}
+
+/**
+ * sis900_timer - sis900 timer routine
+ * @data: pointer to sis900 net device
+ *
+ * On each timer ticks we check two things,
+ * link status (ON/OFF) and link mode (10/100/Full/Half)
+ */
+
+static void sis900_timer(unsigned long data)
+{
+ struct net_device *net_dev = (struct net_device *)data;
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ struct mii_phy *mii_phy = sis_priv->mii;
+ static const int next_tick = 5*HZ;
+ u16 status;
+
+ if (!sis_priv->autong_complete){
+ int uninitialized_var(speed), duplex = 0;
+
+ sis900_read_mode(net_dev, &speed, &duplex);
+ if (duplex){
+ sis900_set_mode(net_dev->base_addr, speed, duplex);
+ sis630_set_eq(net_dev, sis_priv->chipset_rev);
+ netif_start_queue(net_dev);
+ }
+
+ sis_priv->timer.expires = jiffies + HZ;
+ add_timer(&sis_priv->timer);
+ return;
+ }
+
+ status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
+ status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
+
+ /* Link OFF -> ON */
+ if (!netif_carrier_ok(net_dev)) {
+ LookForLink:
+ /* Search for new PHY */
+ status = sis900_default_phy(net_dev);
+ mii_phy = sis_priv->mii;
+
+ if (status & MII_STAT_LINK){
+ sis900_check_mode(net_dev, mii_phy);
+ netif_carrier_on(net_dev);
+ }
+ } else {
+ /* Link ON -> OFF */
+ if (!(status & MII_STAT_LINK)){
+ netif_carrier_off(net_dev);
+ if(netif_msg_link(sis_priv))
+ printk(KERN_INFO "%s: Media Link Off\n", net_dev->name);
+
+ /* Change mode issue */
+ if ((mii_phy->phy_id0 == 0x001D) &&
+ ((mii_phy->phy_id1 & 0xFFF0) == 0x8000))
+ sis900_reset_phy(net_dev, sis_priv->cur_phy);
+
+ sis630_set_eq(net_dev, sis_priv->chipset_rev);
+
+ goto LookForLink;
+ }
+ }
+
+ sis_priv->timer.expires = jiffies + next_tick;
+ add_timer(&sis_priv->timer);
+}
+
+/**
+ * sis900_check_mode - check the media mode for sis900
+ * @net_dev: the net device to be checked
+ * @mii_phy: the mii phy
+ *
+ * Older driver gets the media mode from mii status output
+ * register. Now we set our media capability and auto-negotiate
+ * to get the upper bound of speed and duplex between two ends.
+ * If the types of mii phy is HOME, it doesn't need to auto-negotiate
+ * and autong_complete should be set to 1.
+ */
+
+static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_phy)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ long ioaddr = net_dev->base_addr;
+ int speed, duplex;
+
+ if (mii_phy->phy_types == LAN) {
+ outl(~EXD & inl(ioaddr + cfg), ioaddr + cfg);
+ sis900_set_capability(net_dev , mii_phy);
+ sis900_auto_negotiate(net_dev, sis_priv->cur_phy);
+ } else {
+ outl(EXD | inl(ioaddr + cfg), ioaddr + cfg);
+ speed = HW_SPEED_HOME;
+ duplex = FDX_CAPABLE_HALF_SELECTED;
+ sis900_set_mode(ioaddr, speed, duplex);
+ sis_priv->autong_complete = 1;
+ }
+}
+
+/**
+ * sis900_set_mode - Set the media mode of mac register.
+ * @ioaddr: the address of the device
+ * @speed : the transmit speed to be determined
+ * @duplex: the duplex mode to be determined
+ *
+ * Set the media mode of mac register txcfg/rxcfg according to
+ * speed and duplex of phy. Bit EDB_MASTER_EN indicates the EDB
+ * bus is used instead of PCI bus. When this bit is set 1, the
+ * Max DMA Burst Size for TX/RX DMA should be no larger than 16
+ * double words.
+ */
+
+static void sis900_set_mode (long ioaddr, int speed, int duplex)
+{
+ u32 tx_flags = 0, rx_flags = 0;
+
+ if (inl(ioaddr + cfg) & EDB_MASTER_EN) {
+ tx_flags = TxATP | (DMA_BURST_64 << TxMXDMA_shift) |
+ (TX_FILL_THRESH << TxFILLT_shift);
+ rx_flags = DMA_BURST_64 << RxMXDMA_shift;
+ } else {
+ tx_flags = TxATP | (DMA_BURST_512 << TxMXDMA_shift) |
+ (TX_FILL_THRESH << TxFILLT_shift);
+ rx_flags = DMA_BURST_512 << RxMXDMA_shift;
+ }
+
+ if (speed == HW_SPEED_HOME || speed == HW_SPEED_10_MBPS) {
+ rx_flags |= (RxDRNT_10 << RxDRNT_shift);
+ tx_flags |= (TxDRNT_10 << TxDRNT_shift);
+ } else {
+ rx_flags |= (RxDRNT_100 << RxDRNT_shift);
+ tx_flags |= (TxDRNT_100 << TxDRNT_shift);
+ }
+
+ if (duplex == FDX_CAPABLE_FULL_SELECTED) {
+ tx_flags |= (TxCSI | TxHBI);
+ rx_flags |= RxATX;
+ }
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+ /* Can accept Jumbo packet */
+ rx_flags |= RxAJAB;
+#endif
+
+ outl (tx_flags, ioaddr + txcfg);
+ outl (rx_flags, ioaddr + rxcfg);
+}
+
+/**
+ * sis900_auto_negotiate - Set the Auto-Negotiation Enable/Reset bit.
+ * @net_dev: the net device to read mode for
+ * @phy_addr: mii phy address
+ *
+ * If the adapter is link-on, set the auto-negotiate enable/reset bit.
+ * autong_complete should be set to 0 when starting auto-negotiation.
+ * autong_complete should be set to 1 if we didn't start auto-negotiation.
+ * sis900_timer will wait for link on again if autong_complete = 0.
+ */
+
+static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ int i = 0;
+ u32 status;
+
+ for (i = 0; i < 2; i++)
+ status = mdio_read(net_dev, phy_addr, MII_STATUS);
+
+ if (!(status & MII_STAT_LINK)){
+ if(netif_msg_link(sis_priv))
+ printk(KERN_INFO "%s: Media Link Off\n", net_dev->name);
+ sis_priv->autong_complete = 1;
+ netif_carrier_off(net_dev);
+ return;
+ }
+
+ /* (Re)start AutoNegotiate */
+ mdio_write(net_dev, phy_addr, MII_CONTROL,
+ MII_CNTL_AUTO | MII_CNTL_RST_AUTO);
+ sis_priv->autong_complete = 0;
+}
+
+
+/**
+ * sis900_read_mode - read media mode for sis900 internal phy
+ * @net_dev: the net device to read mode for
+ * @speed : the transmit speed to be determined
+ * @duplex : the duplex mode to be determined
+ *
+ * The capability of remote end will be put in mii register autorec
+ * after auto-negotiation. Use AND operation to get the upper bound
+ * of speed and duplex between two ends.
+ */
+
+static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ struct mii_phy *phy = sis_priv->mii;
+ int phy_addr = sis_priv->cur_phy;
+ u32 status;
+ u16 autoadv, autorec;
+ int i;
+
+ for (i = 0; i < 2; i++)
+ status = mdio_read(net_dev, phy_addr, MII_STATUS);
+
+ if (!(status & MII_STAT_LINK))
+ return;
+
+ /* AutoNegotiate completed */
+ autoadv = mdio_read(net_dev, phy_addr, MII_ANADV);
+ autorec = mdio_read(net_dev, phy_addr, MII_ANLPAR);
+ status = autoadv & autorec;
+
+ *speed = HW_SPEED_10_MBPS;
+ *duplex = FDX_CAPABLE_HALF_SELECTED;
+
+ if (status & (MII_NWAY_TX | MII_NWAY_TX_FDX))
+ *speed = HW_SPEED_100_MBPS;
+ if (status & ( MII_NWAY_TX_FDX | MII_NWAY_T_FDX))
+ *duplex = FDX_CAPABLE_FULL_SELECTED;
+
+ sis_priv->autong_complete = 1;
+
+ /* Workaround for Realtek RTL8201 PHY issue */
+ if ((phy->phy_id0 == 0x0000) && ((phy->phy_id1 & 0xFFF0) == 0x8200)) {
+ if (mdio_read(net_dev, phy_addr, MII_CONTROL) & MII_CNTL_FDX)
+ *duplex = FDX_CAPABLE_FULL_SELECTED;
+ if (mdio_read(net_dev, phy_addr, 0x0019) & 0x01)
+ *speed = HW_SPEED_100_MBPS;
+ }
+
+ if(netif_msg_link(sis_priv))
+ printk(KERN_INFO "%s: Media Link On %s %s-duplex\n",
+ net_dev->name,
+ *speed == HW_SPEED_100_MBPS ?
+ "100mbps" : "10mbps",
+ *duplex == FDX_CAPABLE_FULL_SELECTED ?
+ "full" : "half");
+}
+
+/**
+ * sis900_tx_timeout - sis900 transmit timeout routine
+ * @net_dev: the net device to transmit
+ *
+ * print transmit timeout status
+ * disable interrupts and do some tasks
+ */
+
+static void sis900_tx_timeout(struct net_device *net_dev)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ long ioaddr = net_dev->base_addr;
+ unsigned long flags;
+ int i;
+
+ if(netif_msg_tx_err(sis_priv))
+ printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n",
+ net_dev->name, inl(ioaddr + cr), inl(ioaddr + isr));
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outl(0x0000, ioaddr + imr);
+
+ /* use spinlock to prevent interrupt handler accessing buffer ring */
+ spin_lock_irqsave(&sis_priv->lock, flags);
+
+ /* discard unsent packets */
+ sis_priv->dirty_tx = sis_priv->cur_tx = 0;
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ struct sk_buff *skb = sis_priv->tx_skbuff[i];
+
+ if (skb) {
+ pci_unmap_single(sis_priv->pci_dev,
+ sis_priv->tx_ring[i].bufptr, skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ sis_priv->tx_skbuff[i] = NULL;
+ sis_priv->tx_ring[i].cmdsts = 0;
+ sis_priv->tx_ring[i].bufptr = 0;
+ net_dev->stats.tx_dropped++;
+ }
+ }
+ sis_priv->tx_full = 0;
+ netif_wake_queue(net_dev);
+
+ spin_unlock_irqrestore(&sis_priv->lock, flags);
+
+ net_dev->trans_start = jiffies; /* prevent tx timeout */
+
+ /* load Transmit Descriptor Register */
+ outl(sis_priv->tx_ring_dma, ioaddr + txdp);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+ outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
+}
+
+/**
+ * sis900_start_xmit - sis900 start transmit routine
+ * @skb: socket buffer pointer to put the data being transmitted
+ * @net_dev: the net device to transmit with
+ *
+ * Set the transmit buffer descriptor,
+ * and write TxENA to enable transmit state machine.
+ * tell upper layer if the buffer is full
+ */
+
+static netdev_tx_t
+sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ long ioaddr = net_dev->base_addr;
+ unsigned int entry;
+ unsigned long flags;
+ unsigned int index_cur_tx, index_dirty_tx;
+ unsigned int count_dirty_tx;
+
+ /* Don't transmit data before the complete of auto-negotiation */
+ if(!sis_priv->autong_complete){
+ netif_stop_queue(net_dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ spin_lock_irqsave(&sis_priv->lock, flags);
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = sis_priv->cur_tx % NUM_TX_DESC;
+ sis_priv->tx_skbuff[entry] = skb;
+
+ /* set the transmit buffer descriptor and enable Transmit State Machine */
+ sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev,
+ skb->data, skb->len, PCI_DMA_TODEVICE);
+ sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len);
+ outl(TxENA | inl(ioaddr + cr), ioaddr + cr);
+
+ sis_priv->cur_tx ++;
+ index_cur_tx = sis_priv->cur_tx;
+ index_dirty_tx = sis_priv->dirty_tx;
+
+ for (count_dirty_tx = 0; index_cur_tx != index_dirty_tx; index_dirty_tx++)
+ count_dirty_tx ++;
+
+ if (index_cur_tx == index_dirty_tx) {
+ /* dirty_tx is met in the cycle of cur_tx, buffer full */
+ sis_priv->tx_full = 1;
+ netif_stop_queue(net_dev);
+ } else if (count_dirty_tx < NUM_TX_DESC) {
+ /* Typical path, tell upper layer that more transmission is possible */
+ netif_start_queue(net_dev);
+ } else {
+ /* buffer full, tell upper layer no more transmission */
+ sis_priv->tx_full = 1;
+ netif_stop_queue(net_dev);
+ }
+
+ spin_unlock_irqrestore(&sis_priv->lock, flags);
+
+ if (netif_msg_tx_queued(sis_priv))
+ printk(KERN_DEBUG "%s: Queued Tx packet at %p size %d "
+ "to slot %d.\n",
+ net_dev->name, skb->data, (int)skb->len, entry);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * sis900_interrupt - sis900 interrupt handler
+ * @irq: the irq number
+ * @dev_instance: the client data object
+ *
+ * The interrupt handler does all of the Rx thread work,
+ * and cleans up after the Tx thread
+ */
+
+static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
+{
+ struct net_device *net_dev = dev_instance;
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ int boguscnt = max_interrupt_work;
+ long ioaddr = net_dev->base_addr;
+ u32 status;
+ unsigned int handled = 0;
+
+ spin_lock (&sis_priv->lock);
+
+ do {
+ status = inl(ioaddr + isr);
+
+ if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0)
+ /* nothing intresting happened */
+ break;
+ handled = 1;
+
+ /* why dow't we break after Tx/Rx case ?? keyword: full-duplex */
+ if (status & (RxORN | RxERR | RxOK))
+ /* Rx interrupt */
+ sis900_rx(net_dev);
+
+ if (status & (TxURN | TxERR | TxIDLE))
+ /* Tx interrupt */
+ sis900_finish_xmit(net_dev);
+
+ /* something strange happened !!! */
+ if (status & HIBERR) {
+ if(netif_msg_intr(sis_priv))
+ printk(KERN_INFO "%s: Abnormal interrupt, "
+ "status %#8.8x.\n", net_dev->name, status);
+ break;
+ }
+ if (--boguscnt < 0) {
+ if(netif_msg_intr(sis_priv))
+ printk(KERN_INFO "%s: Too much work at interrupt, "
+ "interrupt status = %#8.8x.\n",
+ net_dev->name, status);
+ break;
+ }
+ } while (1);
+
+ if(netif_msg_intr(sis_priv))
+ printk(KERN_DEBUG "%s: exiting interrupt, "
+ "interrupt status = 0x%#8.8x.\n",
+ net_dev->name, inl(ioaddr + isr));
+
+ spin_unlock (&sis_priv->lock);
+ return IRQ_RETVAL(handled);
+}
+
+/**
+ * sis900_rx - sis900 receive routine
+ * @net_dev: the net device which receives data
+ *
+ * Process receive interrupt events,
+ * put buffer to higher layer and refill buffer pool
+ * Note: This function is called by interrupt handler,
+ * don't do "too much" work here
+ */
+
+static int sis900_rx(struct net_device *net_dev)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ long ioaddr = net_dev->base_addr;
+ unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC;
+ u32 rx_status = sis_priv->rx_ring[entry].cmdsts;
+ int rx_work_limit;
+
+ if (netif_msg_rx_status(sis_priv))
+ printk(KERN_DEBUG "sis900_rx, cur_rx:%4.4d, dirty_rx:%4.4d "
+ "status:0x%8.8x\n",
+ sis_priv->cur_rx, sis_priv->dirty_rx, rx_status);
+ rx_work_limit = sis_priv->dirty_rx + NUM_RX_DESC - sis_priv->cur_rx;
+
+ while (rx_status & OWN) {
+ unsigned int rx_size;
+ unsigned int data_size;
+
+ if (--rx_work_limit < 0)
+ break;
+
+ data_size = rx_status & DSIZE;
+ rx_size = data_size - CRC_SIZE;
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+ /* ``TOOLONG'' flag means jumbo packet received. */
+ if ((rx_status & TOOLONG) && data_size <= MAX_FRAME_SIZE)
+ rx_status &= (~ ((unsigned int)TOOLONG));
+#endif
+
+ if (rx_status & (ABORT|OVERRUN|TOOLONG|RUNT|RXISERR|CRCERR|FAERR)) {
+ /* corrupted packet received */
+ if (netif_msg_rx_err(sis_priv))
+ printk(KERN_DEBUG "%s: Corrupted packet "
+ "received, buffer status = 0x%8.8x/%d.\n",
+ net_dev->name, rx_status, data_size);
+ net_dev->stats.rx_errors++;
+ if (rx_status & OVERRUN)
+ net_dev->stats.rx_over_errors++;
+ if (rx_status & (TOOLONG|RUNT))
+ net_dev->stats.rx_length_errors++;
+ if (rx_status & (RXISERR | FAERR))
+ net_dev->stats.rx_frame_errors++;
+ if (rx_status & CRCERR)
+ net_dev->stats.rx_crc_errors++;
+ /* reset buffer descriptor state */
+ sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
+ } else {
+ struct sk_buff * skb;
+ struct sk_buff * rx_skb;
+
+ pci_unmap_single(sis_priv->pci_dev,
+ sis_priv->rx_ring[entry].bufptr, RX_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+
+ /* refill the Rx buffer, what if there is not enough
+ * memory for new socket buffer ?? */
+ if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
+ /*
+ * Not enough memory to refill the buffer
+ * so we need to recycle the old one so
+ * as to avoid creating a memory hole
+ * in the rx ring
+ */
+ skb = sis_priv->rx_skbuff[entry];
+ net_dev->stats.rx_dropped++;
+ goto refill_rx_ring;
+ }
+
+ /* This situation should never happen, but due to
+ some unknown bugs, it is possible that
+ we are working on NULL sk_buff :-( */
+ if (sis_priv->rx_skbuff[entry] == NULL) {
+ if (netif_msg_rx_err(sis_priv))
+ printk(KERN_WARNING "%s: NULL pointer "
+ "encountered in Rx ring\n"
+ "cur_rx:%4.4d, dirty_rx:%4.4d\n",
+ net_dev->name, sis_priv->cur_rx,
+ sis_priv->dirty_rx);
+ dev_kfree_skb(skb);
+ break;
+ }
+
+ /* give the socket buffer to upper layers */
+ rx_skb = sis_priv->rx_skbuff[entry];
+ skb_put(rx_skb, rx_size);
+ rx_skb->protocol = eth_type_trans(rx_skb, net_dev);
+ netif_rx(rx_skb);
+
+ /* some network statistics */
+ if ((rx_status & BCAST) == MCAST)
+ net_dev->stats.multicast++;
+ net_dev->stats.rx_bytes += rx_size;
+ net_dev->stats.rx_packets++;
+ sis_priv->dirty_rx++;
+refill_rx_ring:
+ sis_priv->rx_skbuff[entry] = skb;
+ sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
+ sis_priv->rx_ring[entry].bufptr =
+ pci_map_single(sis_priv->pci_dev, skb->data,
+ RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ }
+ sis_priv->cur_rx++;
+ entry = sis_priv->cur_rx % NUM_RX_DESC;
+ rx_status = sis_priv->rx_ring[entry].cmdsts;
+ } // while
+
+ /* refill the Rx buffer, what if the rate of refilling is slower
+ * than consuming ?? */
+ for (; sis_priv->cur_rx != sis_priv->dirty_rx; sis_priv->dirty_rx++) {
+ struct sk_buff *skb;
+
+ entry = sis_priv->dirty_rx % NUM_RX_DESC;
+
+ if (sis_priv->rx_skbuff[entry] == NULL) {
+ if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
+ /* not enough memory for skbuff, this makes a
+ * "hole" on the buffer ring, it is not clear
+ * how the hardware will react to this kind
+ * of degenerated buffer */
+ if (netif_msg_rx_err(sis_priv))
+ printk(KERN_INFO "%s: Memory squeeze, "
+ "deferring packet.\n",
+ net_dev->name);
+ net_dev->stats.rx_dropped++;
+ break;
+ }
+ sis_priv->rx_skbuff[entry] = skb;
+ sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
+ sis_priv->rx_ring[entry].bufptr =
+ pci_map_single(sis_priv->pci_dev, skb->data,
+ RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ }
+ }
+ /* re-enable the potentially idle receive state matchine */
+ outl(RxENA | inl(ioaddr + cr), ioaddr + cr );
+
+ return 0;
+}
+
+/**
+ * sis900_finish_xmit - finish up transmission of packets
+ * @net_dev: the net device to be transmitted on
+ *
+ * Check for error condition and free socket buffer etc
+ * schedule for more transmission as needed
+ * Note: This function is called by interrupt handler,
+ * don't do "too much" work here
+ */
+
+static void sis900_finish_xmit (struct net_device *net_dev)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+
+ for (; sis_priv->dirty_tx != sis_priv->cur_tx; sis_priv->dirty_tx++) {
+ struct sk_buff *skb;
+ unsigned int entry;
+ u32 tx_status;
+
+ entry = sis_priv->dirty_tx % NUM_TX_DESC;
+ tx_status = sis_priv->tx_ring[entry].cmdsts;
+
+ if (tx_status & OWN) {
+ /* The packet is not transmitted yet (owned by hardware) !
+ * Note: the interrupt is generated only when Tx Machine
+ * is idle, so this is an almost impossible case */
+ break;
+ }
+
+ if (tx_status & (ABORT | UNDERRUN | OWCOLL)) {
+ /* packet unsuccessfully transmitted */
+ if (netif_msg_tx_err(sis_priv))
+ printk(KERN_DEBUG "%s: Transmit "
+ "error, Tx status %8.8x.\n",
+ net_dev->name, tx_status);
+ net_dev->stats.tx_errors++;
+ if (tx_status & UNDERRUN)
+ net_dev->stats.tx_fifo_errors++;
+ if (tx_status & ABORT)
+ net_dev->stats.tx_aborted_errors++;
+ if (tx_status & NOCARRIER)
+ net_dev->stats.tx_carrier_errors++;
+ if (tx_status & OWCOLL)
+ net_dev->stats.tx_window_errors++;
+ } else {
+ /* packet successfully transmitted */
+ net_dev->stats.collisions += (tx_status & COLCNT) >> 16;
+ net_dev->stats.tx_bytes += tx_status & DSIZE;
+ net_dev->stats.tx_packets++;
+ }
+ /* Free the original skb. */
+ skb = sis_priv->tx_skbuff[entry];
+ pci_unmap_single(sis_priv->pci_dev,
+ sis_priv->tx_ring[entry].bufptr, skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ sis_priv->tx_skbuff[entry] = NULL;
+ sis_priv->tx_ring[entry].bufptr = 0;
+ sis_priv->tx_ring[entry].cmdsts = 0;
+ }
+
+ if (sis_priv->tx_full && netif_queue_stopped(net_dev) &&
+ sis_priv->cur_tx - sis_priv->dirty_tx < NUM_TX_DESC - 4) {
+ /* The ring is no longer full, clear tx_full and schedule
+ * more transmission by netif_wake_queue(net_dev) */
+ sis_priv->tx_full = 0;
+ netif_wake_queue (net_dev);
+ }
+}
+
+/**
+ * sis900_close - close sis900 device
+ * @net_dev: the net device to be closed
+ *
+ * Disable interrupts, stop the Tx and Rx Status Machine
+ * free Tx and RX socket buffer
+ */
+
+static int sis900_close(struct net_device *net_dev)
+{
+ long ioaddr = net_dev->base_addr;
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ struct sk_buff *skb;
+ int i;
+
+ netif_stop_queue(net_dev);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outl(0x0000, ioaddr + imr);
+ outl(0x0000, ioaddr + ier);
+
+ /* Stop the chip's Tx and Rx Status Machine */
+ outl(RxDIS | TxDIS | inl(ioaddr + cr), ioaddr + cr);
+
+ del_timer(&sis_priv->timer);
+
+ free_irq(net_dev->irq, net_dev);
+
+ /* Free Tx and RX skbuff */
+ for (i = 0; i < NUM_RX_DESC; i++) {
+ skb = sis_priv->rx_skbuff[i];
+ if (skb) {
+ pci_unmap_single(sis_priv->pci_dev,
+ sis_priv->rx_ring[i].bufptr,
+ RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(skb);
+ sis_priv->rx_skbuff[i] = NULL;
+ }
+ }
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ skb = sis_priv->tx_skbuff[i];
+ if (skb) {
+ pci_unmap_single(sis_priv->pci_dev,
+ sis_priv->tx_ring[i].bufptr, skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ sis_priv->tx_skbuff[i] = NULL;
+ }
+ }
+
+ /* Green! Put the chip in low-power mode. */
+
+ return 0;
+}
+
+/**
+ * sis900_get_drvinfo - Return information about driver
+ * @net_dev: the net device to probe
+ * @info: container for info returned
+ *
+ * Process ethtool command such as "ehtool -i" to show information
+ */
+
+static void sis900_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *info)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+
+ strcpy (info->driver, SIS900_MODULE_NAME);
+ strcpy (info->version, SIS900_DRV_VERSION);
+ strcpy (info->bus_info, pci_name(sis_priv->pci_dev));
+}
+
+static u32 sis900_get_msglevel(struct net_device *net_dev)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ return sis_priv->msg_enable;
+}
+
+static void sis900_set_msglevel(struct net_device *net_dev, u32 value)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ sis_priv->msg_enable = value;
+}
+
+static u32 sis900_get_link(struct net_device *net_dev)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ return mii_link_ok(&sis_priv->mii_info);
+}
+
+static int sis900_get_settings(struct net_device *net_dev,
+ struct ethtool_cmd *cmd)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ spin_lock_irq(&sis_priv->lock);
+ mii_ethtool_gset(&sis_priv->mii_info, cmd);
+ spin_unlock_irq(&sis_priv->lock);
+ return 0;
+}
+
+static int sis900_set_settings(struct net_device *net_dev,
+ struct ethtool_cmd *cmd)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ int rt;
+ spin_lock_irq(&sis_priv->lock);
+ rt = mii_ethtool_sset(&sis_priv->mii_info, cmd);
+ spin_unlock_irq(&sis_priv->lock);
+ return rt;
+}
+
+static int sis900_nway_reset(struct net_device *net_dev)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ return mii_nway_restart(&sis_priv->mii_info);
+}
+
+/**
+ * sis900_set_wol - Set up Wake on Lan registers
+ * @net_dev: the net device to probe
+ * @wol: container for info passed to the driver
+ *
+ * Process ethtool command "wol" to setup wake on lan features.
+ * SiS900 supports sending WoL events if a correct packet is received,
+ * but there is no simple way to filter them to only a subset (broadcast,
+ * multicast, unicast or arp).
+ */
+
+static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ long pmctrl_addr = net_dev->base_addr + pmctrl;
+ u32 cfgpmcsr = 0, pmctrl_bits = 0;
+
+ if (wol->wolopts == 0) {
+ pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
+ cfgpmcsr &= ~PME_EN;
+ pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
+ outl(pmctrl_bits, pmctrl_addr);
+ if (netif_msg_wol(sis_priv))
+ printk(KERN_DEBUG "%s: Wake on LAN disabled\n", net_dev->name);
+ return 0;
+ }
+
+ if (wol->wolopts & (WAKE_MAGICSECURE | WAKE_UCAST | WAKE_MCAST
+ | WAKE_BCAST | WAKE_ARP))
+ return -EINVAL;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ pmctrl_bits |= MAGICPKT;
+ if (wol->wolopts & WAKE_PHY)
+ pmctrl_bits |= LINKON;
+
+ outl(pmctrl_bits, pmctrl_addr);
+
+ pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
+ cfgpmcsr |= PME_EN;
+ pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
+ if (netif_msg_wol(sis_priv))
+ printk(KERN_DEBUG "%s: Wake on LAN enabled\n", net_dev->name);
+
+ return 0;
+}
+
+static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
+{
+ long pmctrl_addr = net_dev->base_addr + pmctrl;
+ u32 pmctrl_bits;
+
+ pmctrl_bits = inl(pmctrl_addr);
+ if (pmctrl_bits & MAGICPKT)
+ wol->wolopts |= WAKE_MAGIC;
+ if (pmctrl_bits & LINKON)
+ wol->wolopts |= WAKE_PHY;
+
+ wol->supported = (WAKE_PHY | WAKE_MAGIC);
+}
+
+static const struct ethtool_ops sis900_ethtool_ops = {
+ .get_drvinfo = sis900_get_drvinfo,
+ .get_msglevel = sis900_get_msglevel,
+ .set_msglevel = sis900_set_msglevel,
+ .get_link = sis900_get_link,
+ .get_settings = sis900_get_settings,
+ .set_settings = sis900_set_settings,
+ .nway_reset = sis900_nway_reset,
+ .get_wol = sis900_get_wol,
+ .set_wol = sis900_set_wol
+};
+
+/**
+ * mii_ioctl - process MII i/o control command
+ * @net_dev: the net device to command for
+ * @rq: parameter for command
+ * @cmd: the i/o command
+ *
+ * Process MII command like read/write MII register
+ */
+
+static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ struct mii_ioctl_data *data = if_mii(rq);
+
+ switch(cmd) {
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ data->phy_id = sis_priv->mii->phy_addr;
+ /* Fall Through */
+
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ data->val_out = mdio_read(net_dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
+ return 0;
+
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ mdio_write(net_dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * sis900_set_config - Set media type by net_device.set_config
+ * @dev: the net device for media type change
+ * @map: ifmap passed by ifconfig
+ *
+ * Set media type to 10baseT, 100baseT or 0(for auto) by ifconfig
+ * we support only port changes. All other runtime configuration
+ * changes will be ignored
+ */
+
+static int sis900_set_config(struct net_device *dev, struct ifmap *map)
+{
+ struct sis900_private *sis_priv = netdev_priv(dev);
+ struct mii_phy *mii_phy = sis_priv->mii;
+
+ u16 status;
+
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ /* we switch on the ifmap->port field. I couldn't find anything
+ * like a definition or standard for the values of that field.
+ * I think the meaning of those values is device specific. But
+ * since I would like to change the media type via the ifconfig
+ * command I use the definition from linux/netdevice.h
+ * (which seems to be different from the ifport(pcmcia) definition) */
+ switch(map->port){
+ case IF_PORT_UNKNOWN: /* use auto here */
+ dev->if_port = map->port;
+ /* we are going to change the media type, so the Link
+ * will be temporary down and we need to reflect that
+ * here. When the Link comes up again, it will be
+ * sensed by the sis_timer procedure, which also does
+ * all the rest for us */
+ netif_carrier_off(dev);
+
+ /* read current state */
+ status = mdio_read(dev, mii_phy->phy_addr, MII_CONTROL);
+
+ /* enable auto negotiation and reset the negotioation
+ * (I don't really know what the auto negatiotiation
+ * reset really means, but it sounds for me right to
+ * do one here) */
+ mdio_write(dev, mii_phy->phy_addr,
+ MII_CONTROL, status | MII_CNTL_AUTO | MII_CNTL_RST_AUTO);
+
+ break;
+
+ case IF_PORT_10BASET: /* 10BaseT */
+ dev->if_port = map->port;
+
+ /* we are going to change the media type, so the Link
+ * will be temporary down and we need to reflect that
+ * here. When the Link comes up again, it will be
+ * sensed by the sis_timer procedure, which also does
+ * all the rest for us */
+ netif_carrier_off(dev);
+
+ /* set Speed to 10Mbps */
+ /* read current state */
+ status = mdio_read(dev, mii_phy->phy_addr, MII_CONTROL);
+
+ /* disable auto negotiation and force 10MBit mode*/
+ mdio_write(dev, mii_phy->phy_addr,
+ MII_CONTROL, status & ~(MII_CNTL_SPEED |
+ MII_CNTL_AUTO));
+ break;
+
+ case IF_PORT_100BASET: /* 100BaseT */
+ case IF_PORT_100BASETX: /* 100BaseTx */
+ dev->if_port = map->port;
+
+ /* we are going to change the media type, so the Link
+ * will be temporary down and we need to reflect that
+ * here. When the Link comes up again, it will be
+ * sensed by the sis_timer procedure, which also does
+ * all the rest for us */
+ netif_carrier_off(dev);
+
+ /* set Speed to 100Mbps */
+ /* disable auto negotiation and enable 100MBit Mode */
+ status = mdio_read(dev, mii_phy->phy_addr, MII_CONTROL);
+ mdio_write(dev, mii_phy->phy_addr,
+ MII_CONTROL, (status & ~MII_CNTL_SPEED) |
+ MII_CNTL_SPEED);
+
+ break;
+
+ case IF_PORT_10BASE2: /* 10Base2 */
+ case IF_PORT_AUI: /* AUI */
+ case IF_PORT_100BASEFX: /* 100BaseFx */
+ /* These Modes are not supported (are they?)*/
+ return -EOPNOTSUPP;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/**
+ * sis900_mcast_bitnr - compute hashtable index
+ * @addr: multicast address
+ * @revision: revision id of chip
+ *
+ * SiS 900 uses the most sigificant 7 bits to index a 128 bits multicast
+ * hash table, which makes this function a little bit different from other drivers
+ * SiS 900 B0 & 635 M/B uses the most significat 8 bits to index 256 bits
+ * multicast hash table.
+ */
+
+static inline u16 sis900_mcast_bitnr(u8 *addr, u8 revision)
+{
+
+ u32 crc = ether_crc(6, addr);
+
+ /* leave 8 or 7 most siginifant bits */
+ if ((revision >= SIS635A_900_REV) || (revision == SIS900B_900_REV))
+ return (int)(crc >> 24);
+ else
+ return (int)(crc >> 25);
+}
+
+/**
+ * set_rx_mode - Set SiS900 receive mode
+ * @net_dev: the net device to be set
+ *
+ * Set SiS900 receive mode for promiscuous, multicast, or broadcast mode.
+ * And set the appropriate multicast filter.
+ * Multicast hash table changes from 128 to 256 bits for 635M/B & 900B0.
+ */
+
+static void set_rx_mode(struct net_device *net_dev)
+{
+ long ioaddr = net_dev->base_addr;
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ u16 mc_filter[16] = {0}; /* 256/128 bits multicast hash table */
+ int i, table_entries;
+ u32 rx_mode;
+
+ /* 635 Hash Table entries = 256(2^16) */
+ if((sis_priv->chipset_rev >= SIS635A_900_REV) ||
+ (sis_priv->chipset_rev == SIS900B_900_REV))
+ table_entries = 16;
+ else
+ table_entries = 8;
+
+ if (net_dev->flags & IFF_PROMISC) {
+ /* Accept any kinds of packets */
+ rx_mode = RFPromiscuous;
+ for (i = 0; i < table_entries; i++)
+ mc_filter[i] = 0xffff;
+ } else if ((netdev_mc_count(net_dev) > multicast_filter_limit) ||
+ (net_dev->flags & IFF_ALLMULTI)) {
+ /* too many multicast addresses or accept all multicast packet */
+ rx_mode = RFAAB | RFAAM;
+ for (i = 0; i < table_entries; i++)
+ mc_filter[i] = 0xffff;
+ } else {
+ /* Accept Broadcast packet, destination address matchs our
+ * MAC address, use Receive Filter to reject unwanted MCAST
+ * packets */
+ struct netdev_hw_addr *ha;
+ rx_mode = RFAAB;
+
+ netdev_for_each_mc_addr(ha, net_dev) {
+ unsigned int bit_nr;
+
+ bit_nr = sis900_mcast_bitnr(ha->addr,
+ sis_priv->chipset_rev);
+ mc_filter[bit_nr >> 4] |= (1 << (bit_nr & 0xf));
+ }
+ }
+
+ /* update Multicast Hash Table in Receive Filter */
+ for (i = 0; i < table_entries; i++) {
+ /* why plus 0x04 ??, That makes the correct value for hash table. */
+ outl((u32)(0x00000004+i) << RFADDR_shift, ioaddr + rfcr);
+ outl(mc_filter[i], ioaddr + rfdr);
+ }
+
+ outl(RFEN | rx_mode, ioaddr + rfcr);
+
+ /* sis900 is capable of looping back packets at MAC level for
+ * debugging purpose */
+ if (net_dev->flags & IFF_LOOPBACK) {
+ u32 cr_saved;
+ /* We must disable Tx/Rx before setting loopback mode */
+ cr_saved = inl(ioaddr + cr);
+ outl(cr_saved | TxDIS | RxDIS, ioaddr + cr);
+ /* enable loopback */
+ outl(inl(ioaddr + txcfg) | TxMLB, ioaddr + txcfg);
+ outl(inl(ioaddr + rxcfg) | RxATX, ioaddr + rxcfg);
+ /* restore cr */
+ outl(cr_saved, ioaddr + cr);
+ }
+}
+
+/**
+ * sis900_reset - Reset sis900 MAC
+ * @net_dev: the net device to reset
+ *
+ * reset sis900 MAC and wait until finished
+ * reset through command register
+ * change backoff algorithm for 900B0 & 635 M/B
+ */
+
+static void sis900_reset(struct net_device *net_dev)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ long ioaddr = net_dev->base_addr;
+ int i = 0;
+ u32 status = TxRCMP | RxRCMP;
+
+ outl(0, ioaddr + ier);
+ outl(0, ioaddr + imr);
+ outl(0, ioaddr + rfcr);
+
+ outl(RxRESET | TxRESET | RESET | inl(ioaddr + cr), ioaddr + cr);
+
+ /* Check that the chip has finished the reset. */
+ while (status && (i++ < 1000)) {
+ status ^= (inl(isr + ioaddr) & status);
+ }
+
+ if( (sis_priv->chipset_rev >= SIS635A_900_REV) ||
+ (sis_priv->chipset_rev == SIS900B_900_REV) )
+ outl(PESEL | RND_CNT, ioaddr + cfg);
+ else
+ outl(PESEL, ioaddr + cfg);
+}
+
+/**
+ * sis900_remove - Remove sis900 device
+ * @pci_dev: the pci device to be removed
+ *
+ * remove and release SiS900 net device
+ */
+
+static void __devexit sis900_remove(struct pci_dev *pci_dev)
+{
+ struct net_device *net_dev = pci_get_drvdata(pci_dev);
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ struct mii_phy *phy = NULL;
+
+ while (sis_priv->first_mii) {
+ phy = sis_priv->first_mii;
+ sis_priv->first_mii = phy->next;
+ kfree(phy);
+ }
+
+ pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
+ sis_priv->rx_ring_dma);
+ pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
+ sis_priv->tx_ring_dma);
+ unregister_netdev(net_dev);
+ free_netdev(net_dev);
+ pci_release_regions(pci_dev);
+ pci_set_drvdata(pci_dev, NULL);
+}
+
+#ifdef CONFIG_PM
+
+static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state)
+{
+ struct net_device *net_dev = pci_get_drvdata(pci_dev);
+ long ioaddr = net_dev->base_addr;
+
+ if(!netif_running(net_dev))
+ return 0;
+
+ netif_stop_queue(net_dev);
+ netif_device_detach(net_dev);
+
+ /* Stop the chip's Tx and Rx Status Machine */
+ outl(RxDIS | TxDIS | inl(ioaddr + cr), ioaddr + cr);
+
+ pci_set_power_state(pci_dev, PCI_D3hot);
+ pci_save_state(pci_dev);
+
+ return 0;
+}
+
+static int sis900_resume(struct pci_dev *pci_dev)
+{
+ struct net_device *net_dev = pci_get_drvdata(pci_dev);
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ long ioaddr = net_dev->base_addr;
+
+ if(!netif_running(net_dev))
+ return 0;
+ pci_restore_state(pci_dev);
+ pci_set_power_state(pci_dev, PCI_D0);
+
+ sis900_init_rxfilter(net_dev);
+
+ sis900_init_tx_ring(net_dev);
+ sis900_init_rx_ring(net_dev);
+
+ set_rx_mode(net_dev);
+
+ netif_device_attach(net_dev);
+ netif_start_queue(net_dev);
+
+ /* Workaround for EDB */
+ sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+ outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
+ outl(RxENA | inl(ioaddr + cr), ioaddr + cr);
+ outl(IE, ioaddr + ier);
+
+ sis900_check_mode(net_dev, sis_priv->mii);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct pci_driver sis900_pci_driver = {
+ .name = SIS900_MODULE_NAME,
+ .id_table = sis900_pci_tbl,
+ .probe = sis900_probe,
+ .remove = __devexit_p(sis900_remove),
+#ifdef CONFIG_PM
+ .suspend = sis900_suspend,
+ .resume = sis900_resume,
+#endif /* CONFIG_PM */
+};
+
+static int __init sis900_init_module(void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk(version);
+#endif
+
+ return pci_register_driver(&sis900_pci_driver);
+}
+
+static void __exit sis900_cleanup_module(void)
+{
+ pci_unregister_driver(&sis900_pci_driver);
+}
+
+module_init(sis900_init_module);
+module_exit(sis900_cleanup_module);
+
diff --git a/drivers/net/ethernet/sis/sis900.h b/drivers/net/ethernet/sis/sis900.h
new file mode 100644
index 000000000000..150511a922ef
--- /dev/null
+++ b/drivers/net/ethernet/sis/sis900.h
@@ -0,0 +1,329 @@
+/* sis900.h Definitions for SiS ethernet controllers including 7014/7016 and 900
+ * Copyright 1999 Silicon Integrated System Corporation
+ * References:
+ * SiS 7016 Fast Ethernet PCI Bus 10/100 Mbps LAN Controller with OnNow Support,
+ * preliminary Rev. 1.0 Jan. 14, 1998
+ * SiS 900 Fast Ethernet PCI Bus 10/100 Mbps LAN Single Chip with OnNow Support,
+ * preliminary Rev. 1.0 Nov. 10, 1998
+ * SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution,
+ * preliminary Rev. 1.0 Jan. 18, 1998
+ * http://www.sis.com.tw/support/databook.htm
+ */
+
+/*
+ * SiS 7016 and SiS 900 ethernet controller registers
+ */
+
+/* The I/O extent, SiS 900 needs 256 bytes of io address */
+#define SIS900_TOTAL_SIZE 0x100
+
+/* Symbolic offsets to registers. */
+enum sis900_registers {
+ cr=0x0, //Command Register
+ cfg=0x4, //Configuration Register
+ mear=0x8, //EEPROM Access Register
+ ptscr=0xc, //PCI Test Control Register
+ isr=0x10, //Interrupt Status Register
+ imr=0x14, //Interrupt Mask Register
+ ier=0x18, //Interrupt Enable Register
+ epar=0x18, //Enhanced PHY Access Register
+ txdp=0x20, //Transmit Descriptor Pointer Register
+ txcfg=0x24, //Transmit Configuration Register
+ rxdp=0x30, //Receive Descriptor Pointer Register
+ rxcfg=0x34, //Receive Configuration Register
+ flctrl=0x38, //Flow Control Register
+ rxlen=0x3c, //Receive Packet Length Register
+ rfcr=0x48, //Receive Filter Control Register
+ rfdr=0x4C, //Receive Filter Data Register
+ pmctrl=0xB0, //Power Management Control Register
+ pmer=0xB4 //Power Management Wake-up Event Register
+};
+
+/* Symbolic names for bits in various registers */
+enum sis900_command_register_bits {
+ RELOAD = 0x00000400, ACCESSMODE = 0x00000200,/* ET */
+ RESET = 0x00000100, SWI = 0x00000080, RxRESET = 0x00000020,
+ TxRESET = 0x00000010, RxDIS = 0x00000008, RxENA = 0x00000004,
+ TxDIS = 0x00000002, TxENA = 0x00000001
+};
+
+enum sis900_configuration_register_bits {
+ DESCRFMT = 0x00000100 /* 7016 specific */, REQALG = 0x00000080,
+ SB = 0x00000040, POW = 0x00000020, EXD = 0x00000010,
+ PESEL = 0x00000008, LPM = 0x00000004, BEM = 0x00000001,
+ /* 635 & 900B Specific */
+ RND_CNT = 0x00000400, FAIR_BACKOFF = 0x00000200,
+ EDB_MASTER_EN = 0x00002000
+};
+
+enum sis900_eeprom_access_reigster_bits {
+ MDC = 0x00000040, MDDIR = 0x00000020, MDIO = 0x00000010, /* 7016 specific */
+ EECS = 0x00000008, EECLK = 0x00000004, EEDO = 0x00000002,
+ EEDI = 0x00000001
+};
+
+enum sis900_interrupt_register_bits {
+ WKEVT = 0x10000000, TxPAUSEEND = 0x08000000, TxPAUSE = 0x04000000,
+ TxRCMP = 0x02000000, RxRCMP = 0x01000000, DPERR = 0x00800000,
+ SSERR = 0x00400000, RMABT = 0x00200000, RTABT = 0x00100000,
+ RxSOVR = 0x00010000, HIBERR = 0x00008000, SWINT = 0x00001000,
+ MIBINT = 0x00000800, TxURN = 0x00000400, TxIDLE = 0x00000200,
+ TxERR = 0x00000100, TxDESC = 0x00000080, TxOK = 0x00000040,
+ RxORN = 0x00000020, RxIDLE = 0x00000010, RxEARLY = 0x00000008,
+ RxERR = 0x00000004, RxDESC = 0x00000002, RxOK = 0x00000001
+};
+
+enum sis900_interrupt_enable_reigster_bits {
+ IE = 0x00000001
+};
+
+/* maximum dma burst for transmission and receive */
+#define MAX_DMA_RANGE 7 /* actually 0 means MAXIMUM !! */
+#define TxMXDMA_shift 20
+#define RxMXDMA_shift 20
+
+enum sis900_tx_rx_dma{
+ DMA_BURST_512 = 0, DMA_BURST_64 = 5
+};
+
+/* transmit FIFO thresholds */
+#define TX_FILL_THRESH 16 /* 1/4 FIFO size */
+#define TxFILLT_shift 8
+#define TxDRNT_shift 0
+#define TxDRNT_100 48 /* 3/4 FIFO size */
+#define TxDRNT_10 16 /* 1/2 FIFO size */
+
+enum sis900_transmit_config_register_bits {
+ TxCSI = 0x80000000, TxHBI = 0x40000000, TxMLB = 0x20000000,
+ TxATP = 0x10000000, TxIFG = 0x0C000000, TxFILLT = 0x00003F00,
+ TxDRNT = 0x0000003F
+};
+
+/* recevie FIFO thresholds */
+#define RxDRNT_shift 1
+#define RxDRNT_100 16 /* 1/2 FIFO size */
+#define RxDRNT_10 24 /* 3/4 FIFO size */
+
+enum sis900_reveive_config_register_bits {
+ RxAEP = 0x80000000, RxARP = 0x40000000, RxATX = 0x10000000,
+ RxAJAB = 0x08000000, RxDRNT = 0x0000007F
+};
+
+#define RFAA_shift 28
+#define RFADDR_shift 16
+
+enum sis900_receive_filter_control_register_bits {
+ RFEN = 0x80000000, RFAAB = 0x40000000, RFAAM = 0x20000000,
+ RFAAP = 0x10000000, RFPromiscuous = (RFAAB|RFAAM|RFAAP)
+};
+
+enum sis900_reveive_filter_data_mask {
+ RFDAT = 0x0000FFFF
+};
+
+/* EEPROM Addresses */
+enum sis900_eeprom_address {
+ EEPROMSignature = 0x00, EEPROMVendorID = 0x02, EEPROMDeviceID = 0x03,
+ EEPROMMACAddr = 0x08, EEPROMChecksum = 0x0b
+};
+
+/* The EEPROM commands include the alway-set leading bit. Refer to NM93Cxx datasheet */
+enum sis900_eeprom_command {
+ EEread = 0x0180, EEwrite = 0x0140, EEerase = 0x01C0,
+ EEwriteEnable = 0x0130, EEwriteDisable = 0x0100,
+ EEeraseAll = 0x0120, EEwriteAll = 0x0110,
+ EEaddrMask = 0x013F, EEcmdShift = 16
+};
+
+/* For SiS962 or SiS963, request the eeprom software access */
+enum sis96x_eeprom_command {
+ EEREQ = 0x00000400, EEDONE = 0x00000200, EEGNT = 0x00000100
+};
+
+/* PCI Registers */
+enum sis900_pci_registers {
+ CFGPMC = 0x40,
+ CFGPMCSR = 0x44
+};
+
+/* Power management capabilities bits */
+enum sis900_cfgpmc_register_bits {
+ PMVER = 0x00070000,
+ DSI = 0x00100000,
+ PMESP = 0xf8000000
+};
+
+enum sis900_pmesp_bits {
+ PME_D0 = 0x1,
+ PME_D1 = 0x2,
+ PME_D2 = 0x4,
+ PME_D3H = 0x8,
+ PME_D3C = 0x10
+};
+
+/* Power management control/status bits */
+enum sis900_cfgpmcsr_register_bits {
+ PMESTS = 0x00004000,
+ PME_EN = 0x00000100, // Power management enable
+ PWR_STA = 0x00000003 // Current power state
+};
+
+/* Wake-on-LAN support. */
+enum sis900_power_management_control_register_bits {
+ LINKLOSS = 0x00000001,
+ LINKON = 0x00000002,
+ MAGICPKT = 0x00000400,
+ ALGORITHM = 0x00000800,
+ FRM1EN = 0x00100000,
+ FRM2EN = 0x00200000,
+ FRM3EN = 0x00400000,
+ FRM1ACS = 0x01000000,
+ FRM2ACS = 0x02000000,
+ FRM3ACS = 0x04000000,
+ WAKEALL = 0x40000000,
+ GATECLK = 0x80000000
+};
+
+/* Management Data I/O (mdio) frame */
+#define MIIread 0x6000
+#define MIIwrite 0x5002
+#define MIIpmdShift 7
+#define MIIregShift 2
+#define MIIcmdLen 16
+#define MIIcmdShift 16
+
+/* Buffer Descriptor Status*/
+enum sis900_buffer_status {
+ OWN = 0x80000000, MORE = 0x40000000, INTR = 0x20000000,
+ SUPCRC = 0x10000000, INCCRC = 0x10000000,
+ OK = 0x08000000, DSIZE = 0x00000FFF
+};
+/* Status for TX Buffers */
+enum sis900_tx_buffer_status {
+ ABORT = 0x04000000, UNDERRUN = 0x02000000, NOCARRIER = 0x01000000,
+ DEFERD = 0x00800000, EXCDEFER = 0x00400000, OWCOLL = 0x00200000,
+ EXCCOLL = 0x00100000, COLCNT = 0x000F0000
+};
+
+enum sis900_rx_bufer_status {
+ OVERRUN = 0x02000000, DEST = 0x00800000, BCAST = 0x01800000,
+ MCAST = 0x01000000, UNIMATCH = 0x00800000, TOOLONG = 0x00400000,
+ RUNT = 0x00200000, RXISERR = 0x00100000, CRCERR = 0x00080000,
+ FAERR = 0x00040000, LOOPBK = 0x00020000, RXCOL = 0x00010000
+};
+
+/* MII register offsets */
+enum mii_registers {
+ MII_CONTROL = 0x0000, MII_STATUS = 0x0001, MII_PHY_ID0 = 0x0002,
+ MII_PHY_ID1 = 0x0003, MII_ANADV = 0x0004, MII_ANLPAR = 0x0005,
+ MII_ANEXT = 0x0006
+};
+
+/* mii registers specific to SiS 900 */
+enum sis_mii_registers {
+ MII_CONFIG1 = 0x0010, MII_CONFIG2 = 0x0011, MII_STSOUT = 0x0012,
+ MII_MASK = 0x0013, MII_RESV = 0x0014
+};
+
+/* mii registers specific to ICS 1893 */
+enum ics_mii_registers {
+ MII_EXTCTRL = 0x0010, MII_QPDSTS = 0x0011, MII_10BTOP = 0x0012,
+ MII_EXTCTRL2 = 0x0013
+};
+
+/* mii registers specific to AMD 79C901 */
+enum amd_mii_registers {
+ MII_STATUS_SUMMARY = 0x0018
+};
+
+/* MII Control register bit definitions. */
+enum mii_control_register_bits {
+ MII_CNTL_FDX = 0x0100, MII_CNTL_RST_AUTO = 0x0200,
+ MII_CNTL_ISOLATE = 0x0400, MII_CNTL_PWRDWN = 0x0800,
+ MII_CNTL_AUTO = 0x1000, MII_CNTL_SPEED = 0x2000,
+ MII_CNTL_LPBK = 0x4000, MII_CNTL_RESET = 0x8000
+};
+
+/* MII Status register bit */
+enum mii_status_register_bits {
+ MII_STAT_EXT = 0x0001, MII_STAT_JAB = 0x0002,
+ MII_STAT_LINK = 0x0004, MII_STAT_CAN_AUTO = 0x0008,
+ MII_STAT_FAULT = 0x0010, MII_STAT_AUTO_DONE = 0x0020,
+ MII_STAT_CAN_T = 0x0800, MII_STAT_CAN_T_FDX = 0x1000,
+ MII_STAT_CAN_TX = 0x2000, MII_STAT_CAN_TX_FDX = 0x4000,
+ MII_STAT_CAN_T4 = 0x8000
+};
+
+#define MII_ID1_OUI_LO 0xFC00 /* low bits of OUI mask */
+#define MII_ID1_MODEL 0x03F0 /* model number */
+#define MII_ID1_REV 0x000F /* model number */
+
+/* MII NWAY Register Bits ...
+ valid for the ANAR (Auto-Negotiation Advertisement) and
+ ANLPAR (Auto-Negotiation Link Partner) registers */
+enum mii_nway_register_bits {
+ MII_NWAY_NODE_SEL = 0x001f, MII_NWAY_CSMA_CD = 0x0001,
+ MII_NWAY_T = 0x0020, MII_NWAY_T_FDX = 0x0040,
+ MII_NWAY_TX = 0x0080, MII_NWAY_TX_FDX = 0x0100,
+ MII_NWAY_T4 = 0x0200, MII_NWAY_PAUSE = 0x0400,
+ MII_NWAY_RF = 0x2000, MII_NWAY_ACK = 0x4000,
+ MII_NWAY_NP = 0x8000
+};
+
+enum mii_stsout_register_bits {
+ MII_STSOUT_LINK_FAIL = 0x4000,
+ MII_STSOUT_SPD = 0x0080, MII_STSOUT_DPLX = 0x0040
+};
+
+enum mii_stsics_register_bits {
+ MII_STSICS_SPD = 0x8000, MII_STSICS_DPLX = 0x4000,
+ MII_STSICS_LINKSTS = 0x0001
+};
+
+enum mii_stssum_register_bits {
+ MII_STSSUM_LINK = 0x0008, MII_STSSUM_DPLX = 0x0004,
+ MII_STSSUM_AUTO = 0x0002, MII_STSSUM_SPD = 0x0001
+};
+
+enum sis900_revision_id {
+ SIS630A_900_REV = 0x80, SIS630E_900_REV = 0x81,
+ SIS630S_900_REV = 0x82, SIS630EA1_900_REV = 0x83,
+ SIS630ET_900_REV = 0x84, SIS635A_900_REV = 0x90,
+ SIS96x_900_REV = 0X91, SIS900B_900_REV = 0x03
+};
+
+enum sis630_revision_id {
+ SIS630A0 = 0x00, SIS630A1 = 0x01,
+ SIS630B0 = 0x10, SIS630B1 = 0x11
+};
+
+#define FDX_CAPABLE_DUPLEX_UNKNOWN 0
+#define FDX_CAPABLE_HALF_SELECTED 1
+#define FDX_CAPABLE_FULL_SELECTED 2
+
+#define HW_SPEED_UNCONFIG 0
+#define HW_SPEED_HOME 1
+#define HW_SPEED_10_MBPS 10
+#define HW_SPEED_100_MBPS 100
+#define HW_SPEED_DEFAULT (HW_SPEED_100_MBPS)
+
+#define CRC_SIZE 4
+#define MAC_HEADER_SIZE 14
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define MAX_FRAME_SIZE (1518 + 4)
+#else
+#define MAX_FRAME_SIZE 1518
+#endif /* CONFIG_VLAN_802_1Q */
+
+#define TX_BUF_SIZE (MAX_FRAME_SIZE+18)
+#define RX_BUF_SIZE (MAX_FRAME_SIZE+18)
+
+#define NUM_TX_DESC 16 /* Number of Tx descriptor registers. */
+#define NUM_RX_DESC 16 /* Number of Rx descriptor registers. */
+#define TX_TOTAL_SIZE NUM_TX_DESC*sizeof(BufferDesc)
+#define RX_TOTAL_SIZE NUM_RX_DESC*sizeof(BufferDesc)
+
+/* PCI stuff, should be move to pci.h */
+#define SIS630_VENDOR_ID 0x1039
+#define SIS630_DEVICE_ID 0x0630
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
new file mode 100644
index 000000000000..1854c88dfb92
--- /dev/null
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -0,0 +1,137 @@
+#
+# Western Digital/SMC network device configuration
+#
+
+config NET_VENDOR_SMSC
+ bool "SMC (SMSC)/Western Digital devices"
+ default y
+ depends on ARM || ISA || MAC || ARM || MIPS || M32R || SUPERH || \
+ BLACKFIN || MN10300 || COLDFIRE || PCI || PCMCIA
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about SMC/Western Digital cards. If you say Y, you will
+ be asked for your specific card in the following questions.
+
+if NET_VENDOR_SMSC
+
+config SMC9194
+ tristate "SMC 9194 support"
+ depends on (ISA || MAC && BROKEN)
+ select CRC32
+ ---help---
+ This is support for the SMC9xxx based Ethernet cards. Choose this
+ option if you have a DELL laptop with the docking station, or
+ another SMC9192/9194 based chipset. Say Y if you want it compiled
+ into the kernel, and read the file
+ <file:Documentation/networking/smc9.txt> and the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called smc9194.
+
+config SMC91X
+ tristate "SMC 91C9x/91C1xxx support"
+ select CRC32
+ select NET_CORE
+ select MII
+ depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \
+ MN10300 || COLDFIRE)
+ ---help---
+ This is a driver for SMC's 91x series of Ethernet chipsets,
+ including the SMC91C94 and the SMC91C111. Say Y if you want it
+ compiled into the kernel, and read the file
+ <file:Documentation/networking/smc9.txt> and the Ethernet-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ This driver is also available as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want).
+ The module will be called smc91x. If you want to compile it as a
+ module, say M here and read <file:Documentation/kbuild/modules.txt>.
+
+config PCMCIA_SMC91C92
+ tristate "SMC 91Cxx PCMCIA support"
+ depends on PCMCIA
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ Say Y here if you intend to attach an SMC 91Cxx compatible PCMCIA
+ (PC-card) Ethernet or Fast Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called smc91c92_cs. If unsure, say N.
+
+config EPIC100
+ tristate "SMC EtherPower II"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ This driver is for the SMC EtherPower II 9432 PCI Ethernet NIC,
+ which is based on the SMC83c17x (EPIC/100).
+ More specific information and updates are available from
+ <http://www.scyld.com/network/epic100.html>.
+
+config SMC911X
+ tristate "SMSC LAN911[5678] support"
+ select CRC32
+ select NET_CORE
+ select MII
+ depends on (ARM || SUPERH || MN10300)
+ ---help---
+ This is a driver for SMSC's LAN911x series of Ethernet chipsets
+ including the new LAN9115, LAN9116, LAN9117, and LAN9118.
+ Say Y if you want it compiled into the kernel,
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ This driver is also available as a module. The module will be
+ called smc911x. If you want to compile it as a module, say M
+ here and read <file:Documentation/kbuild/modules.txt>
+
+config SMSC911X
+ tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
+ depends on (ARM || SUPERH || BLACKFIN || MIPS || MN10300)
+ select CRC32
+ select NET_CORE
+ select MII
+ select PHYLIB
+ ---help---
+ Say Y here if you want support for SMSC LAN911x and LAN921x families
+ of ethernet controllers.
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called smsc911x.
+
+config SMSC911X_ARCH_HOOKS
+ def_bool n
+ depends on SMSC911X
+ ---help---
+ If the arch enables this, it allows the arch to implement various
+ hooks for more comprehensive interrupt control and also to override
+ the source of the MAC address.
+
+config SMSC9420
+ tristate "SMSC LAN9420 PCI ethernet adapter support"
+ depends on PCI
+ select CRC32
+ select PHYLIB
+ select SMSC_PHY
+ ---help---
+ This is a driver for SMSC's LAN9420 PCI ethernet adapter.
+ Say Y if you want it compiled into the kernel,
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ This driver is also available as a module. The module will be
+ called smsc9420. If you want to compile it as a module, say M
+ here and read <file:Documentation/kbuild/modules.txt>
+
+endif # NET_VENDOR_SMSC
diff --git a/drivers/net/ethernet/smsc/Makefile b/drivers/net/ethernet/smsc/Makefile
new file mode 100644
index 000000000000..f3438dec9d90
--- /dev/null
+++ b/drivers/net/ethernet/smsc/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the SMSC network device drivers.
+#
+
+obj-$(CONFIG_SMC9194) += smc9194.o
+obj-$(CONFIG_SMC91X) += smc91x.o
+obj-$(CONFIG_PCMCIA_SMC91C92) += smc91c92_cs.o
+obj-$(CONFIG_EPIC100) += epic100.o
+obj-$(CONFIG_SMSC9420) += smsc9420.o
+obj-$(CONFIG_SMC911X) += smc911x.o
+obj-$(CONFIG_SMSC911X) += smsc911x.o
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
new file mode 100644
index 000000000000..0a5dfb814157
--- /dev/null
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -0,0 +1,1609 @@
+/* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
+/*
+ Written/copyright 1997-2001 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is for the SMC83c170/175 "EPIC" series, as used on the
+ SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Information and updates available at
+ http://www.scyld.com/network/epic100.html
+ [this link no longer provides anything useful -jgarzik]
+
+ ---------------------------------------------------------------------
+
+*/
+
+#define DRV_NAME "epic100"
+#define DRV_VERSION "2.1"
+#define DRV_RELDATE "Sept 11, 2006"
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+
+/* Used to pass the full-duplex flag, etc. */
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak;
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for operational efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE 256
+#define TX_QUEUE_LEN 240 /* Limit ring entries actually used. */
+#define RX_RING_SIZE 256
+#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
+#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc)
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2*HZ)
+
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+/* Bytes transferred to chip before transmission starts. */
+/* Initial threshold, increased on underflow, rounded down to 4 byte units. */
+#define TX_FIFO_THRESH 256
+#define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/byteorder.h>
+
+/* These identify the driver base version and may not be removed. */
+static char version[] __devinitdata =
+DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
+static char version2[] __devinitdata =
+" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
+MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
+MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the SMC "EPIC/100", the SMC
+single-chip Ethernet controllers for PCI. This chip is used on
+the SMC EtherPower II boards.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS will assign the
+PCI INTA signal to a (preferably otherwise unused) system IRQ line.
+Note: Kernel versions earlier than 1.3.73 do not support shared PCI
+interrupt lines.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+IVb. References
+
+http://www.smsc.com/media/Downloads_Public/discontinued/83c171.pdf
+http://www.smsc.com/media/Downloads_Public/discontinued/83c175.pdf
+http://scyld.com/expert/NWay.html
+http://www.national.com/pf/DP/DP83840A.html
+
+IVc. Errata
+
+*/
+
+
+enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
+
+#define EPIC_TOTAL_SIZE 0x100
+#define USE_IO_OPS 1
+
+typedef enum {
+ SMSC_83C170_0,
+ SMSC_83C170,
+ SMSC_83C175,
+} chip_t;
+
+
+struct epic_chip_info {
+ const char *name;
+ int drv_flags; /* Driver use, intended as capability flags. */
+};
+
+
+/* indexed by chip_t */
+static const struct epic_chip_info pci_id_tbl[] = {
+ { "SMSC EPIC/100 83c170", TYPE2_INTR | NO_MII | MII_PWRDWN },
+ { "SMSC EPIC/100 83c170", TYPE2_INTR },
+ { "SMSC EPIC/C 83c175", TYPE2_INTR | MII_PWRDWN },
+};
+
+
+static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = {
+ { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
+ { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
+ { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
+ { 0,}
+};
+MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
+
+
+#ifndef USE_IO_OPS
+#undef inb
+#undef inw
+#undef inl
+#undef outb
+#undef outw
+#undef outl
+#define inb readb
+#define inw readw
+#define inl readl
+#define outb writeb
+#define outw writew
+#define outl writel
+#endif
+
+/* Offsets to registers, using the (ugh) SMC names. */
+enum epic_registers {
+ COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
+ PCIBurstCnt=0x18,
+ TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */
+ MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
+ LAN0=64, /* MAC address. */
+ MC0=80, /* Multicast filter table. */
+ RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
+ PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
+};
+
+/* Interrupt register bits, using my own meaningful names. */
+enum IntrStatus {
+ TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
+ PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
+ RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
+ TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
+ RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
+};
+enum CommandBits {
+ StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
+ StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
+};
+
+#define EpicRemoved 0xffffffff /* Chip failed or removed (CardBus) */
+
+#define EpicNapiEvent (TxEmpty | TxDone | \
+ RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
+#define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent)
+
+static const u16 media2miictl[16] = {
+ 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 };
+
+/*
+ * The EPIC100 Rx and Tx buffer descriptors. Note that these
+ * really ARE host-endian; it's not a misannotation. We tell
+ * the card to byteswap them internally on big-endian hosts -
+ * look for #ifdef __BIG_ENDIAN in epic_open().
+ */
+
+struct epic_tx_desc {
+ u32 txstatus;
+ u32 bufaddr;
+ u32 buflength;
+ u32 next;
+};
+
+struct epic_rx_desc {
+ u32 rxstatus;
+ u32 bufaddr;
+ u32 buflength;
+ u32 next;
+};
+
+enum desc_status_bits {
+ DescOwn=0x8000,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+struct epic_private {
+ struct epic_rx_desc *rx_ring;
+ struct epic_tx_desc *tx_ring;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+
+ dma_addr_t tx_ring_dma;
+ dma_addr_t rx_ring_dma;
+
+ /* Ring pointers. */
+ spinlock_t lock; /* Group with Tx control cache line. */
+ spinlock_t napi_lock;
+ struct napi_struct napi;
+ unsigned int reschedule_in_poll;
+ unsigned int cur_tx, dirty_tx;
+
+ unsigned int cur_rx, dirty_rx;
+ u32 irq_mask;
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+
+ struct pci_dev *pci_dev; /* PCI bus location. */
+ int chip_id, chip_flags;
+
+ struct timer_list timer; /* Media selection timer. */
+ int tx_threshold;
+ unsigned char mc_filter[8];
+ signed char phys[4]; /* MII device addresses. */
+ u16 advertising; /* NWay media advertisement */
+ int mii_phy_cnt;
+ struct mii_if_info mii;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int default_port:4; /* Last dev->if_port value. */
+};
+
+static int epic_open(struct net_device *dev);
+static int read_eeprom(long ioaddr, int location);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
+static void epic_restart(struct net_device *dev);
+static void epic_timer(unsigned long data);
+static void epic_tx_timeout(struct net_device *dev);
+static void epic_init_ring(struct net_device *dev);
+static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static int epic_rx(struct net_device *dev, int budget);
+static int epic_poll(struct napi_struct *napi, int budget);
+static irqreturn_t epic_interrupt(int irq, void *dev_instance);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static const struct ethtool_ops netdev_ethtool_ops;
+static int epic_close(struct net_device *dev);
+static struct net_device_stats *epic_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+
+static const struct net_device_ops epic_netdev_ops = {
+ .ndo_open = epic_open,
+ .ndo_stop = epic_close,
+ .ndo_start_xmit = epic_start_xmit,
+ .ndo_tx_timeout = epic_tx_timeout,
+ .ndo_get_stats = epic_get_stats,
+ .ndo_set_rx_mode = set_rx_mode,
+ .ndo_do_ioctl = netdev_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __devinit epic_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int card_idx = -1;
+ long ioaddr;
+ int chip_idx = (int) ent->driver_data;
+ int irq;
+ struct net_device *dev;
+ struct epic_private *ep;
+ int i, ret, option = 0, duplex = 0;
+ void *ring_space;
+ dma_addr_t ring_dma;
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ static int printed_version;
+ if (!printed_version++)
+ printk(KERN_INFO "%s%s", version, version2);
+#endif
+
+ card_idx++;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto out;
+ irq = pdev->irq;
+
+ if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
+ dev_err(&pdev->dev, "no PCI region space\n");
+ ret = -ENODEV;
+ goto err_out_disable;
+ }
+
+ pci_set_master(pdev);
+
+ ret = pci_request_regions(pdev, DRV_NAME);
+ if (ret < 0)
+ goto err_out_disable;
+
+ ret = -ENOMEM;
+
+ dev = alloc_etherdev(sizeof (*ep));
+ if (!dev) {
+ dev_err(&pdev->dev, "no memory for eth device\n");
+ goto err_out_free_res;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+#ifdef USE_IO_OPS
+ ioaddr = pci_resource_start (pdev, 0);
+#else
+ ioaddr = pci_resource_start (pdev, 1);
+ ioaddr = (long) pci_ioremap_bar(pdev, 1);
+ if (!ioaddr) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ goto err_out_free_netdev;
+ }
+#endif
+
+ pci_set_drvdata(pdev, dev);
+ ep = netdev_priv(dev);
+ ep->mii.dev = dev;
+ ep->mii.mdio_read = mdio_read;
+ ep->mii.mdio_write = mdio_write;
+ ep->mii.phy_id_mask = 0x1f;
+ ep->mii.reg_num_mask = 0x1f;
+
+ ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space)
+ goto err_out_iounmap;
+ ep->tx_ring = ring_space;
+ ep->tx_ring_dma = ring_dma;
+
+ ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space)
+ goto err_out_unmap_tx;
+ ep->rx_ring = ring_space;
+ ep->rx_ring_dma = ring_dma;
+
+ if (dev->mem_start) {
+ option = dev->mem_start;
+ duplex = (dev->mem_start & 16) ? 1 : 0;
+ } else if (card_idx >= 0 && card_idx < MAX_UNITS) {
+ if (options[card_idx] >= 0)
+ option = options[card_idx];
+ if (full_duplex[card_idx] >= 0)
+ duplex = full_duplex[card_idx];
+ }
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ spin_lock_init(&ep->lock);
+ spin_lock_init(&ep->napi_lock);
+ ep->reschedule_in_poll = 0;
+
+ /* Bring the chip out of low-power mode. */
+ outl(0x4200, ioaddr + GENCTL);
+ /* Magic?! If we don't set this bit the MII interface won't work. */
+ /* This magic is documented in SMSC app note 7.15 */
+ for (i = 16; i > 0; i--)
+ outl(0x0008, ioaddr + TEST1);
+
+ /* Turn on the MII transceiver. */
+ outl(0x12, ioaddr + MIICfg);
+ if (chip_idx == 1)
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+ outl(0x0200, ioaddr + GENCTL);
+
+ /* Note: the '175 does not have a serial EEPROM. */
+ for (i = 0; i < 3; i++)
+ ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(inw(ioaddr + LAN0 + i*4));
+
+ if (debug > 2) {
+ dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
+ for (i = 0; i < 64; i++)
+ printk(" %4.4x%s", read_eeprom(ioaddr, i),
+ i % 16 == 15 ? "\n" : "");
+ }
+
+ ep->pci_dev = pdev;
+ ep->chip_id = chip_idx;
+ ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
+ ep->irq_mask =
+ (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
+ | CntFull | TxUnderrun | EpicNapiEvent;
+
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs later, but
+ takes much time and no cards have external MII. */
+ {
+ int phy, phy_idx = 0;
+ for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
+ int mii_status = mdio_read(dev, phy, MII_BMSR);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ ep->phys[phy_idx++] = phy;
+ dev_info(&pdev->dev,
+ "MII transceiver #%d control "
+ "%4.4x status %4.4x.\n",
+ phy, mdio_read(dev, phy, 0), mii_status);
+ }
+ }
+ ep->mii_phy_cnt = phy_idx;
+ if (phy_idx != 0) {
+ phy = ep->phys[0];
+ ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
+ dev_info(&pdev->dev,
+ "Autonegotiation advertising %4.4x link "
+ "partner %4.4x.\n",
+ ep->mii.advertising, mdio_read(dev, phy, 5));
+ } else if ( ! (ep->chip_flags & NO_MII)) {
+ dev_warn(&pdev->dev,
+ "***WARNING***: No MII transceiver found!\n");
+ /* Use the known PHY address of the EPII. */
+ ep->phys[0] = 3;
+ }
+ ep->mii.phy_id = ep->phys[0];
+ }
+
+ /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
+ if (ep->chip_flags & MII_PWRDWN)
+ outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
+ outl(0x0008, ioaddr + GENCTL);
+
+ /* The lower four bits are the media type. */
+ if (duplex) {
+ ep->mii.force_media = ep->mii.full_duplex = 1;
+ dev_info(&pdev->dev, "Forced full duplex requested.\n");
+ }
+ dev->if_port = ep->default_port = option;
+
+ /* The Epic-specific entries in the device structure. */
+ dev->netdev_ops = &epic_netdev_ops;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ netif_napi_add(dev, &ep->napi, epic_poll, 64);
+
+ ret = register_netdev(dev);
+ if (ret < 0)
+ goto err_out_unmap_rx;
+
+ printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq,
+ dev->dev_addr);
+
+out:
+ return ret;
+
+err_out_unmap_rx:
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
+err_out_unmap_tx:
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
+err_out_iounmap:
+#ifndef USE_IO_OPS
+ iounmap(ioaddr);
+err_out_free_netdev:
+#endif
+ free_netdev(dev);
+err_out_free_res:
+ pci_release_regions(pdev);
+err_out_disable:
+ pci_disable_device(pdev);
+ goto out;
+}
+
+/* Serial EEPROM section. */
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
+#define EE_CS 0x02 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */
+#define EE_WRITE_0 0x01
+#define EE_WRITE_1 0x09
+#define EE_DATA_READ 0x10 /* EEPROM chip data out. */
+#define EE_ENB (0x0001 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+ This serves to flush the operation to the PCI bus.
+ */
+
+#define eeprom_delay() inl(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD (5 << 6)
+#define EE_READ64_CMD (6 << 6)
+#define EE_READ256_CMD (6 << 8)
+#define EE_ERASE_CMD (7 << 6)
+
+static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
+{
+ long ioaddr = dev->base_addr;
+
+ outl(0x00000000, ioaddr + INTMASK);
+}
+
+static inline void __epic_pci_commit(long ioaddr)
+{
+#ifndef USE_IO_OPS
+ inl(ioaddr + INTMASK);
+#endif
+}
+
+static inline void epic_napi_irq_off(struct net_device *dev,
+ struct epic_private *ep)
+{
+ long ioaddr = dev->base_addr;
+
+ outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK);
+ __epic_pci_commit(ioaddr);
+}
+
+static inline void epic_napi_irq_on(struct net_device *dev,
+ struct epic_private *ep)
+{
+ long ioaddr = dev->base_addr;
+
+ /* No need to commit possible posted write */
+ outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK);
+}
+
+static int __devinit read_eeprom(long ioaddr, int location)
+{
+ int i;
+ int retval = 0;
+ long ee_addr = ioaddr + EECTL;
+ int read_cmd = location |
+ (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
+
+ outl(EE_ENB & ~EE_CS, ee_addr);
+ outl(EE_ENB, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 12; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
+ outl(EE_ENB | dataval, ee_addr);
+ eeprom_delay();
+ outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ }
+ outl(EE_ENB, ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ outl(EE_ENB, ee_addr);
+ eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+ outl(EE_ENB & ~EE_CS, ee_addr);
+ return retval;
+}
+
+#define MII_READOP 1
+#define MII_WRITEOP 2
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ long ioaddr = dev->base_addr;
+ int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
+ int i;
+
+ outl(read_cmd, ioaddr + MIICtrl);
+ /* Typical operation takes 25 loops. */
+ for (i = 400; i > 0; i--) {
+ barrier();
+ if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
+ /* Work around read failure bug. */
+ if (phy_id == 1 && location < 6 &&
+ inw(ioaddr + MIIData) == 0xffff) {
+ outl(read_cmd, ioaddr + MIICtrl);
+ continue;
+ }
+ return inw(ioaddr + MIIData);
+ }
+ }
+ return 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
+{
+ long ioaddr = dev->base_addr;
+ int i;
+
+ outw(value, ioaddr + MIIData);
+ outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
+ for (i = 10000; i > 0; i--) {
+ barrier();
+ if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
+ break;
+ }
+}
+
+
+static int epic_open(struct net_device *dev)
+{
+ struct epic_private *ep = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ int i;
+ int retval;
+
+ /* Soft reset the chip. */
+ outl(0x4001, ioaddr + GENCTL);
+
+ napi_enable(&ep->napi);
+ if ((retval = request_irq(dev->irq, epic_interrupt, IRQF_SHARED, dev->name, dev))) {
+ napi_disable(&ep->napi);
+ return retval;
+ }
+
+ epic_init_ring(dev);
+
+ outl(0x4000, ioaddr + GENCTL);
+ /* This magic is documented in SMSC app note 7.15 */
+ for (i = 16; i > 0; i--)
+ outl(0x0008, ioaddr + TEST1);
+
+ /* Pull the chip out of low-power mode, enable interrupts, and set for
+ PCI read multiple. The MIIcfg setting and strange write order are
+ required by the details of which bits are reset and the transceiver
+ wiring on the Ositech CardBus card.
+ */
+#if 0
+ outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
+#endif
+ if (ep->chip_flags & MII_PWRDWN)
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+
+ /* Tell the chip to byteswap descriptors on big-endian hosts */
+#ifdef __BIG_ENDIAN
+ outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+ inl(ioaddr + GENCTL);
+ outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+#else
+ outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+ inl(ioaddr + GENCTL);
+ outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+#endif
+
+ udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
+
+ for (i = 0; i < 3; i++)
+ outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
+
+ ep->tx_threshold = TX_FIFO_THRESH;
+ outl(ep->tx_threshold, ioaddr + TxThresh);
+
+ if (media2miictl[dev->if_port & 15]) {
+ if (ep->mii_phy_cnt)
+ mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
+ if (dev->if_port == 1) {
+ if (debug > 1)
+ printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
+ "status %4.4x.\n",
+ dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
+ }
+ } else {
+ int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
+ if (mii_lpa != 0xffff) {
+ if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
+ ep->mii.full_duplex = 1;
+ else if (! (mii_lpa & LPA_LPACK))
+ mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
+ if (debug > 1)
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
+ " register read of %4.4x.\n", dev->name,
+ ep->mii.full_duplex ? "full" : "half",
+ ep->phys[0], mii_lpa);
+ }
+ }
+
+ outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
+ outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
+ outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
+
+ /* Start the chip's Rx process. */
+ set_rx_mode(dev);
+ outl(StartRx | RxQueued, ioaddr + COMMAND);
+
+ netif_start_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
+ | CntFull | TxUnderrun
+ | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
+
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
+ "%s-duplex.\n",
+ dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
+ ep->mii.full_duplex ? "full" : "half");
+
+ /* Set the timer to switch to check for link beat and perhaps switch
+ to an alternate media type. */
+ init_timer(&ep->timer);
+ ep->timer.expires = jiffies + 3*HZ;
+ ep->timer.data = (unsigned long)dev;
+ ep->timer.function = epic_timer; /* timer handler */
+ add_timer(&ep->timer);
+
+ return 0;
+}
+
+/* Reset the chip to recover from a PCI transaction error.
+ This may occur at interrupt time. */
+static void epic_pause(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+
+ netif_stop_queue (dev);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outl(0x00000000, ioaddr + INTMASK);
+ /* Stop the chip's Tx and Rx DMA processes. */
+ outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
+
+ /* Update the error counts. */
+ if (inw(ioaddr + COMMAND) != 0xffff) {
+ dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
+ dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
+ dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+ }
+
+ /* Remove the packets on the Rx queue. */
+ epic_rx(dev, RX_RING_SIZE);
+}
+
+static void epic_restart(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct epic_private *ep = netdev_priv(dev);
+ int i;
+
+ /* Soft reset the chip. */
+ outl(0x4001, ioaddr + GENCTL);
+
+ printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
+ dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
+ udelay(1);
+
+ /* This magic is documented in SMSC app note 7.15 */
+ for (i = 16; i > 0; i--)
+ outl(0x0008, ioaddr + TEST1);
+
+#ifdef __BIG_ENDIAN
+ outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+#else
+ outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+#endif
+ outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
+ if (ep->chip_flags & MII_PWRDWN)
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+
+ for (i = 0; i < 3; i++)
+ outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
+
+ ep->tx_threshold = TX_FIFO_THRESH;
+ outl(ep->tx_threshold, ioaddr + TxThresh);
+ outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
+ outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
+ sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);
+ outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
+ sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);
+
+ /* Start the chip's Rx process. */
+ set_rx_mode(dev);
+ outl(StartRx | RxQueued, ioaddr + COMMAND);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
+ | CntFull | TxUnderrun
+ | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
+
+ printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
+ " interrupt %4.4x.\n",
+ dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
+ (int)inl(ioaddr + INTSTAT));
+}
+
+static void check_media(struct net_device *dev)
+{
+ struct epic_private *ep = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
+ int negotiated = mii_lpa & ep->mii.advertising;
+ int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+
+ if (ep->mii.force_media)
+ return;
+ if (mii_lpa == 0xffff) /* Bogus read */
+ return;
+ if (ep->mii.full_duplex != duplex) {
+ ep->mii.full_duplex = duplex;
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
+ " partner capability of %4.4x.\n", dev->name,
+ ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
+ outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
+ }
+}
+
+static void epic_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct epic_private *ep = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ int next_tick = 5*HZ;
+
+ if (debug > 3) {
+ printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
+ dev->name, (int)inl(ioaddr + TxSTAT));
+ printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
+ "IntStatus %4.4x RxStatus %4.4x.\n",
+ dev->name, (int)inl(ioaddr + INTMASK),
+ (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
+ }
+
+ check_media(dev);
+
+ ep->timer.expires = jiffies + next_tick;
+ add_timer(&ep->timer);
+}
+
+static void epic_tx_timeout(struct net_device *dev)
+{
+ struct epic_private *ep = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+
+ if (debug > 0) {
+ printk(KERN_WARNING "%s: Transmit timeout using MII device, "
+ "Tx status %4.4x.\n",
+ dev->name, (int)inw(ioaddr + TxSTAT));
+ if (debug > 1) {
+ printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
+ dev->name, ep->dirty_tx, ep->cur_tx);
+ }
+ }
+ if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */
+ dev->stats.tx_fifo_errors++;
+ outl(RestartTx, ioaddr + COMMAND);
+ } else {
+ epic_restart(dev);
+ outl(TxQueued, dev->base_addr + COMMAND);
+ }
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ dev->stats.tx_errors++;
+ if (!ep->tx_full)
+ netif_wake_queue(dev);
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void epic_init_ring(struct net_device *dev)
+{
+ struct epic_private *ep = netdev_priv(dev);
+ int i;
+
+ ep->tx_full = 0;
+ ep->dirty_tx = ep->cur_tx = 0;
+ ep->cur_rx = ep->dirty_rx = 0;
+ ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ ep->rx_ring[i].rxstatus = 0;
+ ep->rx_ring[i].buflength = ep->rx_buf_sz;
+ ep->rx_ring[i].next = ep->rx_ring_dma +
+ (i+1)*sizeof(struct epic_rx_desc);
+ ep->rx_skbuff[i] = NULL;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ ep->rx_ring[i-1].next = ep->rx_ring_dma;
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz + 2);
+ ep->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
+ skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ ep->rx_ring[i].rxstatus = DescOwn;
+ }
+ ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ /* The Tx buffer descriptor is filled in as needed, but we
+ do need to clear the ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ ep->tx_skbuff[i] = NULL;
+ ep->tx_ring[i].txstatus = 0x0000;
+ ep->tx_ring[i].next = ep->tx_ring_dma +
+ (i+1)*sizeof(struct epic_tx_desc);
+ }
+ ep->tx_ring[i-1].next = ep->tx_ring_dma;
+}
+
+static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct epic_private *ep = netdev_priv(dev);
+ int entry, free_count;
+ u32 ctrl_word;
+ unsigned long flags;
+
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
+ /* Caution: the write order is important here, set the field with the
+ "ownership" bit last. */
+
+ /* Calculate the next Tx descriptor entry. */
+ spin_lock_irqsave(&ep->lock, flags);
+ free_count = ep->cur_tx - ep->dirty_tx;
+ entry = ep->cur_tx % TX_RING_SIZE;
+
+ ep->tx_skbuff[entry] = skb;
+ ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
+ if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
+ ctrl_word = 0x100000; /* No interrupt */
+ } else if (free_count == TX_QUEUE_LEN/2) {
+ ctrl_word = 0x140000; /* Tx-done intr. */
+ } else if (free_count < TX_QUEUE_LEN - 1) {
+ ctrl_word = 0x100000; /* No Tx-done intr. */
+ } else {
+ /* Leave room for an additional entry. */
+ ctrl_word = 0x140000; /* Tx-done intr. */
+ ep->tx_full = 1;
+ }
+ ep->tx_ring[entry].buflength = ctrl_word | skb->len;
+ ep->tx_ring[entry].txstatus =
+ ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
+ | DescOwn;
+
+ ep->cur_tx++;
+ if (ep->tx_full)
+ netif_stop_queue(dev);
+
+ spin_unlock_irqrestore(&ep->lock, flags);
+ /* Trigger an immediate transmit demand. */
+ outl(TxQueued, dev->base_addr + COMMAND);
+
+ if (debug > 4)
+ printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
+ "flag %2.2x Tx status %8.8x.\n",
+ dev->name, (int)skb->len, entry, ctrl_word,
+ (int)inl(dev->base_addr + TxSTAT));
+
+ return NETDEV_TX_OK;
+}
+
+static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
+ int status)
+{
+ struct net_device_stats *stats = &dev->stats;
+
+#ifndef final_version
+ /* There was an major error, log it. */
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, status);
+#endif
+ stats->tx_errors++;
+ if (status & 0x1050)
+ stats->tx_aborted_errors++;
+ if (status & 0x0008)
+ stats->tx_carrier_errors++;
+ if (status & 0x0040)
+ stats->tx_window_errors++;
+ if (status & 0x0010)
+ stats->tx_fifo_errors++;
+}
+
+static void epic_tx(struct net_device *dev, struct epic_private *ep)
+{
+ unsigned int dirty_tx, cur_tx;
+
+ /*
+ * Note: if this lock becomes a problem we can narrow the locked
+ * region at the cost of occasionally grabbing the lock more times.
+ */
+ cur_tx = ep->cur_tx;
+ for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
+ struct sk_buff *skb;
+ int entry = dirty_tx % TX_RING_SIZE;
+ int txstatus = ep->tx_ring[entry].txstatus;
+
+ if (txstatus & DescOwn)
+ break; /* It still hasn't been Txed */
+
+ if (likely(txstatus & 0x0001)) {
+ dev->stats.collisions += (txstatus >> 8) & 15;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += ep->tx_skbuff[entry]->len;
+ } else
+ epic_tx_error(dev, ep, txstatus);
+
+ /* Free the original skb. */
+ skb = ep->tx_skbuff[entry];
+ pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ ep->tx_skbuff[entry] = NULL;
+ }
+
+#ifndef final_version
+ if (cur_tx - dirty_tx > TX_RING_SIZE) {
+ printk(KERN_WARNING
+ "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dev->name, dirty_tx, cur_tx, ep->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+ ep->dirty_tx = dirty_tx;
+ if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, allow new TX entries. */
+ ep->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t epic_interrupt(int irq, void *dev_instance)
+{
+ struct net_device *dev = dev_instance;
+ struct epic_private *ep = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ unsigned int handled = 0;
+ int status;
+
+ status = inl(ioaddr + INTSTAT);
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outl(status & EpicNormalEvent, ioaddr + INTSTAT);
+
+ if (debug > 4) {
+ printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
+ "intstat=%#8.8x.\n", dev->name, status,
+ (int)inl(ioaddr + INTSTAT));
+ }
+
+ if ((status & IntrSummary) == 0)
+ goto out;
+
+ handled = 1;
+
+ if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
+ spin_lock(&ep->napi_lock);
+ if (napi_schedule_prep(&ep->napi)) {
+ epic_napi_irq_off(dev, ep);
+ __napi_schedule(&ep->napi);
+ } else
+ ep->reschedule_in_poll++;
+ spin_unlock(&ep->napi_lock);
+ }
+ status &= ~EpicNapiEvent;
+
+ /* Check uncommon events all at once. */
+ if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
+ if (status == EpicRemoved)
+ goto out;
+
+ /* Always update the error counts to avoid overhead later. */
+ dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
+ dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
+ dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+
+ if (status & TxUnderrun) { /* Tx FIFO underflow. */
+ dev->stats.tx_fifo_errors++;
+ outl(ep->tx_threshold += 128, ioaddr + TxThresh);
+ /* Restart the transmit process. */
+ outl(RestartTx, ioaddr + COMMAND);
+ }
+ if (status & PCIBusErr170) {
+ printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
+ dev->name, status);
+ epic_pause(dev);
+ epic_restart(dev);
+ }
+ /* Clear all error sources. */
+ outl(status & 0x7f18, ioaddr + INTSTAT);
+ }
+
+out:
+ if (debug > 3) {
+ printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
+ dev->name, status);
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+static int epic_rx(struct net_device *dev, int budget)
+{
+ struct epic_private *ep = netdev_priv(dev);
+ int entry = ep->cur_rx % RX_RING_SIZE;
+ int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
+ int work_done = 0;
+
+ if (debug > 4)
+ printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
+ ep->rx_ring[entry].rxstatus);
+
+ if (rx_work_limit > budget)
+ rx_work_limit = budget;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
+ int status = ep->rx_ring[entry].rxstatus;
+
+ if (debug > 4)
+ printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
+ if (--rx_work_limit < 0)
+ break;
+ if (status & 0x2006) {
+ if (debug > 2)
+ printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
+ dev->name, status);
+ if (status & 0x2000) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+ "multiple buffers, status %4.4x!\n", dev->name, status);
+ dev->stats.rx_length_errors++;
+ } else if (status & 0x0006)
+ /* Rx Frame errors are counted in hardware. */
+ dev->stats.rx_errors++;
+ } else {
+ /* Malloc up new buffer, compatible with net-2e. */
+ /* Omit the four octet CRC from the length. */
+ short pkt_len = (status >> 16) - 4;
+ struct sk_buff *skb;
+
+ if (pkt_len > PKT_BUF_SZ - 4) {
+ printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
+ "%d bytes.\n",
+ dev->name, status, pkt_len);
+ pkt_len = 1514;
+ }
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak &&
+ (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ pci_dma_sync_single_for_cpu(ep->pci_dev,
+ ep->rx_ring[entry].bufaddr,
+ ep->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
+ skb_put(skb, pkt_len);
+ pci_dma_sync_single_for_device(ep->pci_dev,
+ ep->rx_ring[entry].bufaddr,
+ ep->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ } else {
+ pci_unmap_single(ep->pci_dev,
+ ep->rx_ring[entry].bufaddr,
+ ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ skb_put(skb = ep->rx_skbuff[entry], pkt_len);
+ ep->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+ work_done++;
+ entry = (++ep->cur_rx) % RX_RING_SIZE;
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
+ entry = ep->dirty_rx % RX_RING_SIZE;
+ if (ep->rx_skbuff[entry] == NULL) {
+ struct sk_buff *skb;
+ skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz + 2);
+ if (skb == NULL)
+ break;
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
+ skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ work_done++;
+ }
+ /* AV: shouldn't we add a barrier here? */
+ ep->rx_ring[entry].rxstatus = DescOwn;
+ }
+ return work_done;
+}
+
+static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
+{
+ long ioaddr = dev->base_addr;
+ int status;
+
+ status = inl(ioaddr + INTSTAT);
+
+ if (status == EpicRemoved)
+ return;
+ if (status & RxOverflow) /* Missed a Rx frame. */
+ dev->stats.rx_errors++;
+ if (status & (RxOverflow | RxFull))
+ outw(RxQueued, ioaddr + COMMAND);
+}
+
+static int epic_poll(struct napi_struct *napi, int budget)
+{
+ struct epic_private *ep = container_of(napi, struct epic_private, napi);
+ struct net_device *dev = ep->mii.dev;
+ int work_done = 0;
+ long ioaddr = dev->base_addr;
+
+rx_action:
+
+ epic_tx(dev, ep);
+
+ work_done += epic_rx(dev, budget);
+
+ epic_rx_err(dev, ep);
+
+ if (work_done < budget) {
+ unsigned long flags;
+ int more;
+
+ /* A bit baroque but it avoids a (space hungry) spin_unlock */
+
+ spin_lock_irqsave(&ep->napi_lock, flags);
+
+ more = ep->reschedule_in_poll;
+ if (!more) {
+ __napi_complete(napi);
+ outl(EpicNapiEvent, ioaddr + INTSTAT);
+ epic_napi_irq_on(dev, ep);
+ } else
+ ep->reschedule_in_poll--;
+
+ spin_unlock_irqrestore(&ep->napi_lock, flags);
+
+ if (more)
+ goto rx_action;
+ }
+
+ return work_done;
+}
+
+static int epic_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct epic_private *ep = netdev_priv(dev);
+ struct sk_buff *skb;
+ int i;
+
+ netif_stop_queue(dev);
+ napi_disable(&ep->napi);
+
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, (int)inl(ioaddr + INTSTAT));
+
+ del_timer_sync(&ep->timer);
+
+ epic_disable_int(dev, ep);
+
+ free_irq(dev->irq, dev);
+
+ epic_pause(dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ skb = ep->rx_skbuff[i];
+ ep->rx_skbuff[i] = NULL;
+ ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
+ ep->rx_ring[i].buflength = 0;
+ if (skb) {
+ pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
+ ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(skb);
+ }
+ ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ skb = ep->tx_skbuff[i];
+ ep->tx_skbuff[i] = NULL;
+ if (!skb)
+ continue;
+ pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ }
+
+ /* Green! Leave the chip in low-power mode. */
+ outl(0x0008, ioaddr + GENCTL);
+
+ return 0;
+}
+
+static struct net_device_stats *epic_get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+
+ if (netif_running(dev)) {
+ /* Update the error counts. */
+ dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
+ dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
+ dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+ }
+
+ return &dev->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ Note that we only use exclusion around actually queueing the
+ new frame, not around filling ep->setup_frame. This is non-deterministic
+ when re-entered but still correct. */
+
+static void set_rx_mode(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct epic_private *ep = netdev_priv(dev);
+ unsigned char mc_filter[8]; /* Multicast hash filter */
+ int i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ outl(0x002C, ioaddr + RxCtrl);
+ /* Unconditionally log net taps. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
+ /* There is apparently a chip bug, so the multicast filter
+ is never enabled. */
+ /* Too many to filter perfectly -- accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ outl(0x000C, ioaddr + RxCtrl);
+ } else if (netdev_mc_empty(dev)) {
+ outl(0x0004, ioaddr + RxCtrl);
+ return;
+ } else { /* Never executed, for now. */
+ struct netdev_hw_addr *ha;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned int bit_nr =
+ ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
+ mc_filter[bit_nr >> 3] |= (1 << bit_nr);
+ }
+ }
+ /* ToDo: perhaps we need to stop the Tx and Rx process here? */
+ if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
+ for (i = 0; i < 4; i++)
+ outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
+ memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
+ }
+}
+
+static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct epic_private *np = netdev_priv(dev);
+
+ strcpy (info->driver, DRV_NAME);
+ strcpy (info->version, DRV_VERSION);
+ strcpy (info->bus_info, pci_name(np->pci_dev));
+}
+
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct epic_private *np = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&np->lock);
+ rc = mii_ethtool_gset(&np->mii, cmd);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct epic_private *np = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&np->lock);
+ rc = mii_ethtool_sset(&np->mii, cmd);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+static int netdev_nway_reset(struct net_device *dev)
+{
+ struct epic_private *np = netdev_priv(dev);
+ return mii_nway_restart(&np->mii);
+}
+
+static u32 netdev_get_link(struct net_device *dev)
+{
+ struct epic_private *np = netdev_priv(dev);
+ return mii_link_ok(&np->mii);
+}
+
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 value)
+{
+ debug = value;
+}
+
+static int ethtool_begin(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ /* power-up, if interface is down */
+ if (! netif_running(dev)) {
+ outl(0x0200, ioaddr + GENCTL);
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+ }
+ return 0;
+}
+
+static void ethtool_complete(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ /* power-down, if interface is down */
+ if (! netif_running(dev)) {
+ outl(0x0008, ioaddr + GENCTL);
+ outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
+ }
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_settings = netdev_get_settings,
+ .set_settings = netdev_set_settings,
+ .nway_reset = netdev_nway_reset,
+ .get_link = netdev_get_link,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+ .begin = ethtool_begin,
+ .complete = ethtool_complete
+};
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct epic_private *np = netdev_priv(dev);
+ long ioaddr = dev->base_addr;
+ struct mii_ioctl_data *data = if_mii(rq);
+ int rc;
+
+ /* power-up, if interface is down */
+ if (! netif_running(dev)) {
+ outl(0x0200, ioaddr + GENCTL);
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+ }
+
+ /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
+ spin_lock_irq(&np->lock);
+ rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
+ spin_unlock_irq(&np->lock);
+
+ /* power-down, if interface is down */
+ if (! netif_running(dev)) {
+ outl(0x0008, ioaddr + GENCTL);
+ outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
+ }
+ return rc;
+}
+
+
+static void __devexit epic_remove_one (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct epic_private *ep = netdev_priv(dev);
+
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
+ unregister_netdev(dev);
+#ifndef USE_IO_OPS
+ iounmap((void*) dev->base_addr);
+#endif
+ pci_release_regions(pdev);
+ free_netdev(dev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ /* pci_power_off(pdev, -1); */
+}
+
+
+#ifdef CONFIG_PM
+
+static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ long ioaddr = dev->base_addr;
+
+ if (!netif_running(dev))
+ return 0;
+ epic_pause(dev);
+ /* Put the chip into low-power mode. */
+ outl(0x0008, ioaddr + GENCTL);
+ /* pci_power_off(pdev, -1); */
+ return 0;
+}
+
+
+static int epic_resume (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (!netif_running(dev))
+ return 0;
+ epic_restart(dev);
+ /* pci_power_on(pdev); */
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+
+static struct pci_driver epic_driver = {
+ .name = DRV_NAME,
+ .id_table = epic_pci_tbl,
+ .probe = epic_init_one,
+ .remove = __devexit_p(epic_remove_one),
+#ifdef CONFIG_PM
+ .suspend = epic_suspend,
+ .resume = epic_resume,
+#endif /* CONFIG_PM */
+};
+
+
+static int __init epic_init (void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ printk (KERN_INFO "%s%s",
+ version, version2);
+#endif
+
+ return pci_register_driver(&epic_driver);
+}
+
+
+static void __exit epic_cleanup (void)
+{
+ pci_unregister_driver (&epic_driver);
+}
+
+
+module_init(epic_init);
+module_exit(epic_cleanup);
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
new file mode 100644
index 000000000000..8f61fe9db1d0
--- /dev/null
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -0,0 +1,2210 @@
+/*
+ * smc911x.c
+ * This is a driver for SMSC's LAN911{5,6,7,8} single-chip Ethernet devices.
+ *
+ * Copyright (C) 2005 Sensoria Corp
+ * Derived from the unified SMC91x driver by Nicolas Pitre
+ * and the smsc911x.c reference driver by SMSC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Arguments:
+ * watchdog = TX watchdog timeout
+ * tx_fifo_kb = Size of TX FIFO in KB
+ *
+ * History:
+ * 04/16/05 Dustin McIntire Initial version
+ */
+static const char version[] =
+ "smc911x.c: v1.0 04-16-2005 by Dustin McIntire <dustin@sensoria.com>\n";
+
+/* Debugging options */
+#define ENABLE_SMC_DEBUG_RX 0
+#define ENABLE_SMC_DEBUG_TX 0
+#define ENABLE_SMC_DEBUG_DMA 0
+#define ENABLE_SMC_DEBUG_PKTS 0
+#define ENABLE_SMC_DEBUG_MISC 0
+#define ENABLE_SMC_DEBUG_FUNC 0
+
+#define SMC_DEBUG_RX ((ENABLE_SMC_DEBUG_RX ? 1 : 0) << 0)
+#define SMC_DEBUG_TX ((ENABLE_SMC_DEBUG_TX ? 1 : 0) << 1)
+#define SMC_DEBUG_DMA ((ENABLE_SMC_DEBUG_DMA ? 1 : 0) << 2)
+#define SMC_DEBUG_PKTS ((ENABLE_SMC_DEBUG_PKTS ? 1 : 0) << 3)
+#define SMC_DEBUG_MISC ((ENABLE_SMC_DEBUG_MISC ? 1 : 0) << 4)
+#define SMC_DEBUG_FUNC ((ENABLE_SMC_DEBUG_FUNC ? 1 : 0) << 5)
+
+#ifndef SMC_DEBUG
+#define SMC_DEBUG ( SMC_DEBUG_RX | \
+ SMC_DEBUG_TX | \
+ SMC_DEBUG_DMA | \
+ SMC_DEBUG_PKTS | \
+ SMC_DEBUG_MISC | \
+ SMC_DEBUG_FUNC \
+ )
+#endif
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/crc32.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/workqueue.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/io.h>
+
+#include "smc911x.h"
+
+/*
+ * Transmit timeout, default 5 seconds.
+ */
+static int watchdog = 5000;
+module_param(watchdog, int, 0400);
+MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
+
+static int tx_fifo_kb=8;
+module_param(tx_fifo_kb, int, 0400);
+MODULE_PARM_DESC(tx_fifo_kb,"transmit FIFO size in KB (1<x<15)(default=8)");
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:smc911x");
+
+/*
+ * The internal workings of the driver. If you are changing anything
+ * here with the SMC stuff, you should have the datasheet and know
+ * what you are doing.
+ */
+#define CARDNAME "smc911x"
+
+/*
+ * Use power-down feature of the chip
+ */
+#define POWER_DOWN 1
+
+#if SMC_DEBUG > 0
+#define DBG(n, args...) \
+ do { \
+ if (SMC_DEBUG & (n)) \
+ printk(args); \
+ } while (0)
+
+#define PRINTK(args...) printk(args)
+#else
+#define DBG(n, args...) do { } while (0)
+#define PRINTK(args...) printk(KERN_DEBUG args)
+#endif
+
+#if SMC_DEBUG_PKTS > 0
+static void PRINT_PKT(u_char *buf, int length)
+{
+ int i;
+ int remainder;
+ int lines;
+
+ lines = length / 16;
+ remainder = length % 16;
+
+ for (i = 0; i < lines ; i ++) {
+ int cur;
+ for (cur = 0; cur < 8; cur++) {
+ u_char a, b;
+ a = *buf++;
+ b = *buf++;
+ printk("%02x%02x ", a, b);
+ }
+ printk("\n");
+ }
+ for (i = 0; i < remainder/2 ; i++) {
+ u_char a, b;
+ a = *buf++;
+ b = *buf++;
+ printk("%02x%02x ", a, b);
+ }
+ printk("\n");
+}
+#else
+#define PRINT_PKT(x...) do { } while (0)
+#endif
+
+
+/* this enables an interrupt in the interrupt mask register */
+#define SMC_ENABLE_INT(lp, x) do { \
+ unsigned int __mask; \
+ __mask = SMC_GET_INT_EN((lp)); \
+ __mask |= (x); \
+ SMC_SET_INT_EN((lp), __mask); \
+} while (0)
+
+/* this disables an interrupt from the interrupt mask register */
+#define SMC_DISABLE_INT(lp, x) do { \
+ unsigned int __mask; \
+ __mask = SMC_GET_INT_EN((lp)); \
+ __mask &= ~(x); \
+ SMC_SET_INT_EN((lp), __mask); \
+} while (0)
+
+/*
+ * this does a soft reset on the device
+ */
+static void smc911x_reset(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned int reg, timeout=0, resets=1, irq_cfg;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+
+ /* Take out of PM setting first */
+ if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) {
+ /* Write to the bytetest will take out of powerdown */
+ SMC_SET_BYTE_TEST(lp, 0);
+ timeout=10;
+ do {
+ udelay(10);
+ reg = SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_;
+ } while (--timeout && !reg);
+ if (timeout == 0) {
+ PRINTK("%s: smc911x_reset timeout waiting for PM restore\n", dev->name);
+ return;
+ }
+ }
+
+ /* Disable all interrupts */
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_SET_INT_EN(lp, 0);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ while (resets--) {
+ SMC_SET_HW_CFG(lp, HW_CFG_SRST_);
+ timeout=10;
+ do {
+ udelay(10);
+ reg = SMC_GET_HW_CFG(lp);
+ /* If chip indicates reset timeout then try again */
+ if (reg & HW_CFG_SRST_TO_) {
+ PRINTK("%s: chip reset timeout, retrying...\n", dev->name);
+ resets++;
+ break;
+ }
+ } while (--timeout && (reg & HW_CFG_SRST_));
+ }
+ if (timeout == 0) {
+ PRINTK("%s: smc911x_reset timeout waiting for reset\n", dev->name);
+ return;
+ }
+
+ /* make sure EEPROM has finished loading before setting GPIO_CFG */
+ timeout=1000;
+ while (--timeout && (SMC_GET_E2P_CMD(lp) & E2P_CMD_EPC_BUSY_))
+ udelay(10);
+
+ if (timeout == 0){
+ PRINTK("%s: smc911x_reset timeout waiting for EEPROM busy\n", dev->name);
+ return;
+ }
+
+ /* Initialize interrupts */
+ SMC_SET_INT_EN(lp, 0);
+ SMC_ACK_INT(lp, -1);
+
+ /* Reset the FIFO level and flow control settings */
+ SMC_SET_HW_CFG(lp, (lp->tx_fifo_kb & 0xF) << 16);
+//TODO: Figure out what appropriate pause time is
+ SMC_SET_FLOW(lp, FLOW_FCPT_ | FLOW_FCEN_);
+ SMC_SET_AFC_CFG(lp, lp->afc_cfg);
+
+
+ /* Set to LED outputs */
+ SMC_SET_GPIO_CFG(lp, 0x70070000);
+
+ /*
+ * Deassert IRQ for 1*10us for edge type interrupts
+ * and drive IRQ pin push-pull
+ */
+ irq_cfg = (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_;
+#ifdef SMC_DYNAMIC_BUS_CONFIG
+ if (lp->cfg.irq_polarity)
+ irq_cfg |= INT_CFG_IRQ_POL_;
+#endif
+ SMC_SET_IRQ_CFG(lp, irq_cfg);
+
+ /* clear anything saved */
+ if (lp->pending_tx_skb != NULL) {
+ dev_kfree_skb (lp->pending_tx_skb);
+ lp->pending_tx_skb = NULL;
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
+ }
+}
+
+/*
+ * Enable Interrupts, Receive, and Transmit
+ */
+static void smc911x_enable(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned mask, cfg, cr;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ SMC_SET_MAC_ADDR(lp, dev->dev_addr);
+
+ /* Enable TX */
+ cfg = SMC_GET_HW_CFG(lp);
+ cfg &= HW_CFG_TX_FIF_SZ_ | 0xFFF;
+ cfg |= HW_CFG_SF_;
+ SMC_SET_HW_CFG(lp, cfg);
+ SMC_SET_FIFO_TDA(lp, 0xFF);
+ /* Update TX stats on every 64 packets received or every 1 sec */
+ SMC_SET_FIFO_TSL(lp, 64);
+ SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
+
+ SMC_GET_MAC_CR(lp, cr);
+ cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_;
+ SMC_SET_MAC_CR(lp, cr);
+ SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_);
+
+ /* Add 2 byte padding to start of packets */
+ SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_);
+
+ /* Turn on receiver and enable RX */
+ if (cr & MAC_CR_RXEN_)
+ DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name);
+
+ SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_);
+
+ /* Interrupt on every received packet */
+ SMC_SET_FIFO_RSA(lp, 0x01);
+ SMC_SET_FIFO_RSL(lp, 0x00);
+
+ /* now, enable interrupts */
+ mask = INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_ | INT_EN_RSFL_EN_ |
+ INT_EN_GPT_INT_EN_ | INT_EN_RXDFH_INT_EN_ | INT_EN_RXE_EN_ |
+ INT_EN_PHY_INT_EN_;
+ if (IS_REV_A(lp->revision))
+ mask|=INT_EN_RDFL_EN_;
+ else {
+ mask|=INT_EN_RDFO_EN_;
+ }
+ SMC_ENABLE_INT(lp, mask);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+/*
+ * this puts the device in an inactive state
+ */
+static void smc911x_shutdown(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned cr;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __func__);
+
+ /* Disable IRQ's */
+ SMC_SET_INT_EN(lp, 0);
+
+ /* Turn of Rx and TX */
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_GET_MAC_CR(lp, cr);
+ cr &= ~(MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_);
+ SMC_SET_MAC_CR(lp, cr);
+ SMC_SET_TX_CFG(lp, TX_CFG_STOP_TX_);
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+static inline void smc911x_drop_pkt(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned int fifo_count, timeout, reg;
+
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __func__);
+ fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF;
+ if (fifo_count <= 4) {
+ /* Manually dump the packet data */
+ while (fifo_count--)
+ SMC_GET_RX_FIFO(lp);
+ } else {
+ /* Fast forward through the bad packet */
+ SMC_SET_RX_DP_CTRL(lp, RX_DP_CTRL_FFWD_BUSY_);
+ timeout=50;
+ do {
+ udelay(10);
+ reg = SMC_GET_RX_DP_CTRL(lp) & RX_DP_CTRL_FFWD_BUSY_;
+ } while (--timeout && reg);
+ if (timeout == 0) {
+ PRINTK("%s: timeout waiting for RX fast forward\n", dev->name);
+ }
+ }
+}
+
+/*
+ * This is the procedure to handle the receipt of a packet.
+ * It should be called after checking for packet presence in
+ * the RX status FIFO. It must be called with the spin lock
+ * already held.
+ */
+static inline void smc911x_rcv(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned int pkt_len, status;
+ struct sk_buff *skb;
+ unsigned char *data;
+
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n",
+ dev->name, __func__);
+ status = SMC_GET_RX_STS_FIFO(lp);
+ DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x\n",
+ dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff);
+ pkt_len = (status & RX_STS_PKT_LEN_) >> 16;
+ if (status & RX_STS_ES_) {
+ /* Deal with a bad packet */
+ dev->stats.rx_errors++;
+ if (status & RX_STS_CRC_ERR_)
+ dev->stats.rx_crc_errors++;
+ else {
+ if (status & RX_STS_LEN_ERR_)
+ dev->stats.rx_length_errors++;
+ if (status & RX_STS_MCAST_)
+ dev->stats.multicast++;
+ }
+ /* Remove the bad packet data from the RX FIFO */
+ smc911x_drop_pkt(dev);
+ } else {
+ /* Receive a valid packet */
+ /* Alloc a buffer with extra room for DMA alignment */
+ skb=dev_alloc_skb(pkt_len+32);
+ if (unlikely(skb == NULL)) {
+ PRINTK( "%s: Low memory, rcvd packet dropped.\n",
+ dev->name);
+ dev->stats.rx_dropped++;
+ smc911x_drop_pkt(dev);
+ return;
+ }
+ /* Align IP header to 32 bits
+ * Note that the device is configured to add a 2
+ * byte padding to the packet start, so we really
+ * want to write to the orignal data pointer */
+ data = skb->data;
+ skb_reserve(skb, 2);
+ skb_put(skb,pkt_len-4);
+#ifdef SMC_USE_DMA
+ {
+ unsigned int fifo;
+ /* Lower the FIFO threshold if possible */
+ fifo = SMC_GET_FIFO_INT(lp);
+ if (fifo & 0xFF) fifo--;
+ DBG(SMC_DEBUG_RX, "%s: Setting RX stat FIFO threshold to %d\n",
+ dev->name, fifo & 0xff);
+ SMC_SET_FIFO_INT(lp, fifo);
+ /* Setup RX DMA */
+ SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_));
+ lp->rxdma_active = 1;
+ lp->current_rx_skb = skb;
+ SMC_PULL_DATA(lp, data, (pkt_len+2+15) & ~15);
+ /* Packet processing deferred to DMA RX interrupt */
+ }
+#else
+ SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_));
+ SMC_PULL_DATA(lp, data, pkt_len+2+3);
+
+ DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name);
+ PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len-4;
+#endif
+ }
+}
+
+/*
+ * This is called to actually send a packet to the chip.
+ */
+static void smc911x_hardware_send_pkt(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ struct sk_buff *skb;
+ unsigned int cmdA, cmdB, len;
+ unsigned char *buf;
+
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__);
+ BUG_ON(lp->pending_tx_skb == NULL);
+
+ skb = lp->pending_tx_skb;
+ lp->pending_tx_skb = NULL;
+
+ /* cmdA {25:24] data alignment [20:16] start offset [10:0] buffer length */
+ /* cmdB {31:16] pkt tag [10:0] length */
+#ifdef SMC_USE_DMA
+ /* 16 byte buffer alignment mode */
+ buf = (char*)((u32)(skb->data) & ~0xF);
+ len = (skb->len + 0xF + ((u32)skb->data & 0xF)) & ~0xF;
+ cmdA = (1<<24) | (((u32)skb->data & 0xF)<<16) |
+ TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
+ skb->len;
+#else
+ buf = (char*)((u32)skb->data & ~0x3);
+ len = (skb->len + 3 + ((u32)skb->data & 3)) & ~0x3;
+ cmdA = (((u32)skb->data & 0x3) << 16) |
+ TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
+ skb->len;
+#endif
+ /* tag is packet length so we can use this in stats update later */
+ cmdB = (skb->len << 16) | (skb->len & 0x7FF);
+
+ DBG(SMC_DEBUG_TX, "%s: TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n",
+ dev->name, len, len, buf, cmdA, cmdB);
+ SMC_SET_TX_FIFO(lp, cmdA);
+ SMC_SET_TX_FIFO(lp, cmdB);
+
+ DBG(SMC_DEBUG_PKTS, "%s: Transmitted packet\n", dev->name);
+ PRINT_PKT(buf, len <= 64 ? len : 64);
+
+ /* Send pkt via PIO or DMA */
+#ifdef SMC_USE_DMA
+ lp->current_tx_skb = skb;
+ SMC_PUSH_DATA(lp, buf, len);
+ /* DMA complete IRQ will free buffer and set jiffies */
+#else
+ SMC_PUSH_DATA(lp, buf, len);
+ dev->trans_start = jiffies;
+ dev_kfree_skb_irq(skb);
+#endif
+ if (!lp->tx_throttle) {
+ netif_wake_queue(dev);
+ }
+ SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_);
+}
+
+/*
+ * Since I am not sure if I will have enough room in the chip's ram
+ * to store the packet, I call this routine which either sends it
+ * now, or set the card to generates an interrupt when ready
+ * for the packet.
+ */
+static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned int free;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
+ dev->name, __func__);
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ BUG_ON(lp->pending_tx_skb != NULL);
+
+ free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_;
+ DBG(SMC_DEBUG_TX, "%s: TX free space %d\n", dev->name, free);
+
+ /* Turn off the flow when running out of space in FIFO */
+ if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) {
+ DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n",
+ dev->name, free);
+ /* Reenable when at least 1 packet of size MTU present */
+ SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64);
+ lp->tx_throttle = 1;
+ netif_stop_queue(dev);
+ }
+
+ /* Drop packets when we run out of space in TX FIFO
+ * Account for overhead required for:
+ *
+ * Tx command words 8 bytes
+ * Start offset 15 bytes
+ * End padding 15 bytes
+ */
+ if (unlikely(free < (skb->len + 8 + 15 + 15))) {
+ printk("%s: No Tx free space %d < %d\n",
+ dev->name, free, skb->len);
+ lp->pending_tx_skb = NULL;
+ dev->stats.tx_errors++;
+ dev->stats.tx_dropped++;
+ spin_unlock_irqrestore(&lp->lock, flags);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+#ifdef SMC_USE_DMA
+ {
+ /* If the DMA is already running then defer this packet Tx until
+ * the DMA IRQ starts it
+ */
+ if (lp->txdma_active) {
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name);
+ lp->pending_tx_skb = skb;
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return NETDEV_TX_OK;
+ } else {
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name);
+ lp->txdma_active = 1;
+ }
+ }
+#endif
+ lp->pending_tx_skb = skb;
+ smc911x_hardware_send_pkt(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * This handles a TX status interrupt, which is only called when:
+ * - a TX error occurred, or
+ * - TX of a packet completed.
+ */
+static void smc911x_tx(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned int tx_status;
+
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
+ dev->name, __func__);
+
+ /* Collect the TX status */
+ while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) {
+ DBG(SMC_DEBUG_TX, "%s: Tx stat FIFO used 0x%04x\n",
+ dev->name,
+ (SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16);
+ tx_status = SMC_GET_TX_STS_FIFO(lp);
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes+=tx_status>>16;
+ DBG(SMC_DEBUG_TX, "%s: Tx FIFO tag 0x%04x status 0x%04x\n",
+ dev->name, (tx_status & 0xffff0000) >> 16,
+ tx_status & 0x0000ffff);
+ /* count Tx errors, but ignore lost carrier errors when in
+ * full-duplex mode */
+ if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx &&
+ !(tx_status & 0x00000306))) {
+ dev->stats.tx_errors++;
+ }
+ if (tx_status & TX_STS_MANY_COLL_) {
+ dev->stats.collisions+=16;
+ dev->stats.tx_aborted_errors++;
+ } else {
+ dev->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3;
+ }
+ /* carrier error only has meaning for half-duplex communication */
+ if ((tx_status & (TX_STS_LOC_ | TX_STS_NO_CARR_)) &&
+ !lp->ctl_rfduplx) {
+ dev->stats.tx_carrier_errors++;
+ }
+ if (tx_status & TX_STS_LATE_COLL_) {
+ dev->stats.collisions++;
+ dev->stats.tx_aborted_errors++;
+ }
+ }
+}
+
+
+/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
+/*
+ * Reads a register from the MII Management serial interface
+ */
+
+static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned int phydata;
+
+ SMC_GET_MII(lp, phyreg, phyaddr, phydata);
+
+ DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
+ __func__, phyaddr, phyreg, phydata);
+ return phydata;
+}
+
+
+/*
+ * Writes a register to the MII Management serial interface
+ */
+static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg,
+ int phydata)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+
+ DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
+ __func__, phyaddr, phyreg, phydata);
+
+ SMC_SET_MII(lp, phyreg, phyaddr, phydata);
+}
+
+/*
+ * Finds and reports the PHY address (115 and 117 have external
+ * PHY interface 118 has internal only
+ */
+static void smc911x_phy_detect(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ int phyaddr;
+ unsigned int cfg, id1, id2;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+
+ lp->phy_type = 0;
+
+ /*
+ * Scan all 32 PHY addresses if necessary, starting at
+ * PHY#1 to PHY#31, and then PHY#0 last.
+ */
+ switch(lp->version) {
+ case CHIP_9115:
+ case CHIP_9117:
+ case CHIP_9215:
+ case CHIP_9217:
+ cfg = SMC_GET_HW_CFG(lp);
+ if (cfg & HW_CFG_EXT_PHY_DET_) {
+ cfg &= ~HW_CFG_PHY_CLK_SEL_;
+ cfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_;
+ SMC_SET_HW_CFG(lp, cfg);
+ udelay(10); /* Wait for clocks to stop */
+
+ cfg |= HW_CFG_EXT_PHY_EN_;
+ SMC_SET_HW_CFG(lp, cfg);
+ udelay(10); /* Wait for clocks to stop */
+
+ cfg &= ~HW_CFG_PHY_CLK_SEL_;
+ cfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_;
+ SMC_SET_HW_CFG(lp, cfg);
+ udelay(10); /* Wait for clocks to stop */
+
+ cfg |= HW_CFG_SMI_SEL_;
+ SMC_SET_HW_CFG(lp, cfg);
+
+ for (phyaddr = 1; phyaddr < 32; ++phyaddr) {
+
+ /* Read the PHY identifiers */
+ SMC_GET_PHY_ID1(lp, phyaddr & 31, id1);
+ SMC_GET_PHY_ID2(lp, phyaddr & 31, id2);
+
+ /* Make sure it is a valid identifier */
+ if (id1 != 0x0000 && id1 != 0xffff &&
+ id1 != 0x8000 && id2 != 0x0000 &&
+ id2 != 0xffff && id2 != 0x8000) {
+ /* Save the PHY's address */
+ lp->mii.phy_id = phyaddr & 31;
+ lp->phy_type = id1 << 16 | id2;
+ break;
+ }
+ }
+ if (phyaddr < 32)
+ /* Found an external PHY */
+ break;
+ }
+ default:
+ /* Internal media only */
+ SMC_GET_PHY_ID1(lp, 1, id1);
+ SMC_GET_PHY_ID2(lp, 1, id2);
+ /* Save the PHY's address */
+ lp->mii.phy_id = 1;
+ lp->phy_type = id1 << 16 | id2;
+ }
+
+ DBG(SMC_DEBUG_MISC, "%s: phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%d\n",
+ dev->name, id1, id2, lp->mii.phy_id);
+}
+
+/*
+ * Sets the PHY to a configuration as determined by the user.
+ * Called with spin_lock held.
+ */
+static int smc911x_phy_fixed(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ int phyaddr = lp->mii.phy_id;
+ int bmcr;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+
+ /* Enter Link Disable state */
+ SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
+ bmcr |= BMCR_PDOWN;
+ SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
+
+ /*
+ * Set our fixed capabilities
+ * Disable auto-negotiation
+ */
+ bmcr &= ~BMCR_ANENABLE;
+ if (lp->ctl_rfduplx)
+ bmcr |= BMCR_FULLDPLX;
+
+ if (lp->ctl_rspeed == 100)
+ bmcr |= BMCR_SPEED100;
+
+ /* Write our capabilities to the phy control register */
+ SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
+
+ /* Re-Configure the Receive/Phy Control register */
+ bmcr &= ~BMCR_PDOWN;
+ SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
+
+ return 1;
+}
+
+/*
+ * smc911x_phy_reset - reset the phy
+ * @dev: net device
+ * @phy: phy address
+ *
+ * Issue a software reset for the specified PHY and
+ * wait up to 100ms for the reset to complete. We should
+ * not access the PHY for 50ms after issuing the reset.
+ *
+ * The time to wait appears to be dependent on the PHY.
+ *
+ */
+static int smc911x_phy_reset(struct net_device *dev, int phy)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ int timeout;
+ unsigned long flags;
+ unsigned int reg;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ reg = SMC_GET_PMT_CTRL(lp);
+ reg &= ~0xfffff030;
+ reg |= PMT_CTRL_PHY_RST_;
+ SMC_SET_PMT_CTRL(lp, reg);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ for (timeout = 2; timeout; timeout--) {
+ msleep(50);
+ spin_lock_irqsave(&lp->lock, flags);
+ reg = SMC_GET_PMT_CTRL(lp);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ if (!(reg & PMT_CTRL_PHY_RST_)) {
+ /* extra delay required because the phy may
+ * not be completed with its reset
+ * when PHY_BCR_RESET_ is cleared. 256us
+ * should suffice, but use 500us to be safe
+ */
+ udelay(500);
+ break;
+ }
+ }
+
+ return reg & PMT_CTRL_PHY_RST_;
+}
+
+/*
+ * smc911x_phy_powerdown - powerdown phy
+ * @dev: net device
+ * @phy: phy address
+ *
+ * Power down the specified PHY
+ */
+static void smc911x_phy_powerdown(struct net_device *dev, int phy)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned int bmcr;
+
+ /* Enter Link Disable state */
+ SMC_GET_PHY_BMCR(lp, phy, bmcr);
+ bmcr |= BMCR_PDOWN;
+ SMC_SET_PHY_BMCR(lp, phy, bmcr);
+}
+
+/*
+ * smc911x_phy_check_media - check the media status and adjust BMCR
+ * @dev: net device
+ * @init: set true for initialisation
+ *
+ * Select duplex mode depending on negotiation state. This
+ * also updates our carrier state.
+ */
+static void smc911x_phy_check_media(struct net_device *dev, int init)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ int phyaddr = lp->mii.phy_id;
+ unsigned int bmcr, cr;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+
+ if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
+ /* duplex state has changed */
+ SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
+ SMC_GET_MAC_CR(lp, cr);
+ if (lp->mii.full_duplex) {
+ DBG(SMC_DEBUG_MISC, "%s: Configuring for full-duplex mode\n", dev->name);
+ bmcr |= BMCR_FULLDPLX;
+ cr |= MAC_CR_RCVOWN_;
+ } else {
+ DBG(SMC_DEBUG_MISC, "%s: Configuring for half-duplex mode\n", dev->name);
+ bmcr &= ~BMCR_FULLDPLX;
+ cr &= ~MAC_CR_RCVOWN_;
+ }
+ SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
+ SMC_SET_MAC_CR(lp, cr);
+ }
+}
+
+/*
+ * Configures the specified PHY through the MII management interface
+ * using Autonegotiation.
+ * Calls smc911x_phy_fixed() if the user has requested a certain config.
+ * If RPC ANEG bit is set, the media selection is dependent purely on
+ * the selection by the MII (either in the MII BMCR reg or the result
+ * of autonegotiation.) If the RPC ANEG bit is cleared, the selection
+ * is controlled by the RPC SPEED and RPC DPLX bits.
+ */
+static void smc911x_phy_configure(struct work_struct *work)
+{
+ struct smc911x_local *lp = container_of(work, struct smc911x_local,
+ phy_configure);
+ struct net_device *dev = lp->netdev;
+ int phyaddr = lp->mii.phy_id;
+ int my_phy_caps; /* My PHY capabilities */
+ int my_ad_caps; /* My Advertised capabilities */
+ int status;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__);
+
+ /*
+ * We should not be called if phy_type is zero.
+ */
+ if (lp->phy_type == 0)
+ return;
+
+ if (smc911x_phy_reset(dev, phyaddr)) {
+ printk("%s: PHY reset timed out\n", dev->name);
+ return;
+ }
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /*
+ * Enable PHY Interrupts (for register 18)
+ * Interrupts listed here are enabled
+ */
+ SMC_SET_PHY_INT_MASK(lp, phyaddr, PHY_INT_MASK_ENERGY_ON_ |
+ PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_REMOTE_FAULT_ |
+ PHY_INT_MASK_LINK_DOWN_);
+
+ /* If the user requested no auto neg, then go set his request */
+ if (lp->mii.force_media) {
+ smc911x_phy_fixed(dev);
+ goto smc911x_phy_configure_exit;
+ }
+
+ /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */
+ SMC_GET_PHY_BMSR(lp, phyaddr, my_phy_caps);
+ if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
+ printk(KERN_INFO "Auto negotiation NOT supported\n");
+ smc911x_phy_fixed(dev);
+ goto smc911x_phy_configure_exit;
+ }
+
+ /* CSMA capable w/ both pauses */
+ my_ad_caps = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+
+ if (my_phy_caps & BMSR_100BASE4)
+ my_ad_caps |= ADVERTISE_100BASE4;
+ if (my_phy_caps & BMSR_100FULL)
+ my_ad_caps |= ADVERTISE_100FULL;
+ if (my_phy_caps & BMSR_100HALF)
+ my_ad_caps |= ADVERTISE_100HALF;
+ if (my_phy_caps & BMSR_10FULL)
+ my_ad_caps |= ADVERTISE_10FULL;
+ if (my_phy_caps & BMSR_10HALF)
+ my_ad_caps |= ADVERTISE_10HALF;
+
+ /* Disable capabilities not selected by our user */
+ if (lp->ctl_rspeed != 100)
+ my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF);
+
+ if (!lp->ctl_rfduplx)
+ my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL);
+
+ /* Update our Auto-Neg Advertisement Register */
+ SMC_SET_PHY_MII_ADV(lp, phyaddr, my_ad_caps);
+ lp->mii.advertising = my_ad_caps;
+
+ /*
+ * Read the register back. Without this, it appears that when
+ * auto-negotiation is restarted, sometimes it isn't ready and
+ * the link does not come up.
+ */
+ udelay(10);
+ SMC_GET_PHY_MII_ADV(lp, phyaddr, status);
+
+ DBG(SMC_DEBUG_MISC, "%s: phy caps=0x%04x\n", dev->name, my_phy_caps);
+ DBG(SMC_DEBUG_MISC, "%s: phy advertised caps=0x%04x\n", dev->name, my_ad_caps);
+
+ /* Restart auto-negotiation process in order to advertise my caps */
+ SMC_SET_PHY_BMCR(lp, phyaddr, BMCR_ANENABLE | BMCR_ANRESTART);
+
+ smc911x_phy_check_media(dev, 1);
+
+smc911x_phy_configure_exit:
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+/*
+ * smc911x_phy_interrupt
+ *
+ * Purpose: Handle interrupts relating to PHY register 18. This is
+ * called from the "hard" interrupt handler under our private spinlock.
+ */
+static void smc911x_phy_interrupt(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ int phyaddr = lp->mii.phy_id;
+ int status;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+
+ if (lp->phy_type == 0)
+ return;
+
+ smc911x_phy_check_media(dev, 0);
+ /* read to clear status bits */
+ SMC_GET_PHY_INT_SRC(lp, phyaddr,status);
+ DBG(SMC_DEBUG_MISC, "%s: PHY interrupt status 0x%04x\n",
+ dev->name, status & 0xffff);
+ DBG(SMC_DEBUG_MISC, "%s: AFC_CFG 0x%08x\n",
+ dev->name, SMC_GET_AFC_CFG(lp));
+}
+
+/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/
+
+/*
+ * This is the main routine of the driver, to handle the device when
+ * it needs some attention.
+ */
+static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned int status, mask, timeout;
+ unsigned int rx_overrun=0, cr, pkts;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /* Spurious interrupt check */
+ if ((SMC_GET_IRQ_CFG(lp) & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) !=
+ (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) {
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return IRQ_NONE;
+ }
+
+ mask = SMC_GET_INT_EN(lp);
+ SMC_SET_INT_EN(lp, 0);
+
+ /* set a timeout value, so I don't stay here forever */
+ timeout = 8;
+
+
+ do {
+ status = SMC_GET_INT(lp);
+
+ DBG(SMC_DEBUG_MISC, "%s: INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n",
+ dev->name, status, mask, status & ~mask);
+
+ status &= mask;
+ if (!status)
+ break;
+
+ /* Handle SW interrupt condition */
+ if (status & INT_STS_SW_INT_) {
+ SMC_ACK_INT(lp, INT_STS_SW_INT_);
+ mask &= ~INT_EN_SW_INT_EN_;
+ }
+ /* Handle various error conditions */
+ if (status & INT_STS_RXE_) {
+ SMC_ACK_INT(lp, INT_STS_RXE_);
+ dev->stats.rx_errors++;
+ }
+ if (status & INT_STS_RXDFH_INT_) {
+ SMC_ACK_INT(lp, INT_STS_RXDFH_INT_);
+ dev->stats.rx_dropped+=SMC_GET_RX_DROP(lp);
+ }
+ /* Undocumented interrupt-what is the right thing to do here? */
+ if (status & INT_STS_RXDF_INT_) {
+ SMC_ACK_INT(lp, INT_STS_RXDF_INT_);
+ }
+
+ /* Rx Data FIFO exceeds set level */
+ if (status & INT_STS_RDFL_) {
+ if (IS_REV_A(lp->revision)) {
+ rx_overrun=1;
+ SMC_GET_MAC_CR(lp, cr);
+ cr &= ~MAC_CR_RXEN_;
+ SMC_SET_MAC_CR(lp, cr);
+ DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name);
+ dev->stats.rx_errors++;
+ dev->stats.rx_fifo_errors++;
+ }
+ SMC_ACK_INT(lp, INT_STS_RDFL_);
+ }
+ if (status & INT_STS_RDFO_) {
+ if (!IS_REV_A(lp->revision)) {
+ SMC_GET_MAC_CR(lp, cr);
+ cr &= ~MAC_CR_RXEN_;
+ SMC_SET_MAC_CR(lp, cr);
+ rx_overrun=1;
+ DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name);
+ dev->stats.rx_errors++;
+ dev->stats.rx_fifo_errors++;
+ }
+ SMC_ACK_INT(lp, INT_STS_RDFO_);
+ }
+ /* Handle receive condition */
+ if ((status & INT_STS_RSFL_) || rx_overrun) {
+ unsigned int fifo;
+ DBG(SMC_DEBUG_RX, "%s: RX irq\n", dev->name);
+ fifo = SMC_GET_RX_FIFO_INF(lp);
+ pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16;
+ DBG(SMC_DEBUG_RX, "%s: Rx FIFO pkts %d, bytes %d\n",
+ dev->name, pkts, fifo & 0xFFFF );
+ if (pkts != 0) {
+#ifdef SMC_USE_DMA
+ unsigned int fifo;
+ if (lp->rxdma_active){
+ DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA,
+ "%s: RX DMA active\n", dev->name);
+ /* The DMA is already running so up the IRQ threshold */
+ fifo = SMC_GET_FIFO_INT(lp) & ~0xFF;
+ fifo |= pkts & 0xFF;
+ DBG(SMC_DEBUG_RX,
+ "%s: Setting RX stat FIFO threshold to %d\n",
+ dev->name, fifo & 0xff);
+ SMC_SET_FIFO_INT(lp, fifo);
+ } else
+#endif
+ smc911x_rcv(dev);
+ }
+ SMC_ACK_INT(lp, INT_STS_RSFL_);
+ }
+ /* Handle transmit FIFO available */
+ if (status & INT_STS_TDFA_) {
+ DBG(SMC_DEBUG_TX, "%s: TX data FIFO space available irq\n", dev->name);
+ SMC_SET_FIFO_TDA(lp, 0xFF);
+ lp->tx_throttle = 0;
+#ifdef SMC_USE_DMA
+ if (!lp->txdma_active)
+#endif
+ netif_wake_queue(dev);
+ SMC_ACK_INT(lp, INT_STS_TDFA_);
+ }
+ /* Handle transmit done condition */
+#if 1
+ if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) {
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC,
+ "%s: Tx stat FIFO limit (%d) /GPT irq\n",
+ dev->name, (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16);
+ smc911x_tx(dev);
+ SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
+ SMC_ACK_INT(lp, INT_STS_TSFL_);
+ SMC_ACK_INT(lp, INT_STS_TSFL_ | INT_STS_GPT_INT_);
+ }
+#else
+ if (status & INT_STS_TSFL_) {
+ DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq\n", dev->name, );
+ smc911x_tx(dev);
+ SMC_ACK_INT(lp, INT_STS_TSFL_);
+ }
+
+ if (status & INT_STS_GPT_INT_) {
+ DBG(SMC_DEBUG_RX, "%s: IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n",
+ dev->name,
+ SMC_GET_IRQ_CFG(lp),
+ SMC_GET_FIFO_INT(lp),
+ SMC_GET_RX_CFG(lp));
+ DBG(SMC_DEBUG_RX, "%s: Rx Stat FIFO Used 0x%02x "
+ "Data FIFO Used 0x%04x Stat FIFO 0x%08x\n",
+ dev->name,
+ (SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16,
+ SMC_GET_RX_FIFO_INF(lp) & 0xffff,
+ SMC_GET_RX_STS_FIFO_PEEK(lp));
+ SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
+ SMC_ACK_INT(lp, INT_STS_GPT_INT_);
+ }
+#endif
+
+ /* Handle PHY interrupt condition */
+ if (status & INT_STS_PHY_INT_) {
+ DBG(SMC_DEBUG_MISC, "%s: PHY irq\n", dev->name);
+ smc911x_phy_interrupt(dev);
+ SMC_ACK_INT(lp, INT_STS_PHY_INT_);
+ }
+ } while (--timeout);
+
+ /* restore mask state */
+ SMC_SET_INT_EN(lp, mask);
+
+ DBG(SMC_DEBUG_MISC, "%s: Interrupt done (%d loops)\n",
+ dev->name, 8-timeout);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef SMC_USE_DMA
+static void
+smc911x_tx_dma_irq(int dma, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct smc911x_local *lp = netdev_priv(dev);
+ struct sk_buff *skb = lp->current_tx_skb;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: TX DMA irq handler\n", dev->name);
+ /* Clear the DMA interrupt sources */
+ SMC_DMA_ACK_IRQ(dev, dma);
+ BUG_ON(skb == NULL);
+ dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
+ dev->trans_start = jiffies;
+ dev_kfree_skb_irq(skb);
+ lp->current_tx_skb = NULL;
+ if (lp->pending_tx_skb != NULL)
+ smc911x_hardware_send_pkt(dev);
+ else {
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA,
+ "%s: No pending Tx packets. DMA disabled\n", dev->name);
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->txdma_active = 0;
+ if (!lp->tx_throttle) {
+ netif_wake_queue(dev);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA,
+ "%s: TX DMA irq completed\n", dev->name);
+}
+static void
+smc911x_rx_dma_irq(int dma, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ unsigned long ioaddr = dev->base_addr;
+ struct smc911x_local *lp = netdev_priv(dev);
+ struct sk_buff *skb = lp->current_rx_skb;
+ unsigned long flags;
+ unsigned int pkts;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, "%s: RX DMA irq handler\n", dev->name);
+ /* Clear the DMA interrupt sources */
+ SMC_DMA_ACK_IRQ(dev, dma);
+ dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE);
+ BUG_ON(skb == NULL);
+ lp->current_rx_skb = NULL;
+ PRINT_PKT(skb->data, skb->len);
+ skb->protocol = eth_type_trans(skb, dev);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+ netif_rx(skb);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ pkts = (SMC_GET_RX_FIFO_INF(lp) & RX_FIFO_INF_RXSUSED_) >> 16;
+ if (pkts != 0) {
+ smc911x_rcv(dev);
+ }else {
+ lp->rxdma_active = 0;
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA,
+ "%s: RX DMA irq completed. DMA RX FIFO PKTS %d\n",
+ dev->name, pkts);
+}
+#endif /* SMC_USE_DMA */
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void smc911x_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ smc911x_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+/* Our watchdog timed out. Called by the networking layer */
+static void smc911x_timeout(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ int status, mask;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ status = SMC_GET_INT(lp);
+ mask = SMC_GET_INT_EN(lp);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x\n",
+ dev->name, status, mask);
+
+ /* Dump the current TX FIFO contents and restart */
+ mask = SMC_GET_TX_CFG(lp);
+ SMC_SET_TX_CFG(lp, mask | TX_CFG_TXS_DUMP_ | TX_CFG_TXD_DUMP_);
+ /*
+ * Reconfiguring the PHY doesn't seem like a bad idea here, but
+ * smc911x_phy_configure() calls msleep() which calls schedule_timeout()
+ * which calls schedule(). Hence we use a work queue.
+ */
+ if (lp->phy_type != 0)
+ schedule_work(&lp->phy_configure);
+
+ /* We can accept TX packets again */
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+
+/*
+ * This routine will, depending on the values passed to it,
+ * either make it accept multicast packets, go into
+ * promiscuous mode (for TCPDUMP and cousins) or accept
+ * a select set of multicast packets
+ */
+static void smc911x_set_multicast_list(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned int multicast_table[2];
+ unsigned int mcr, update_multicast = 0;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_GET_MAC_CR(lp, mcr);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ if (dev->flags & IFF_PROMISC) {
+
+ DBG(SMC_DEBUG_MISC, "%s: RCR_PRMS\n", dev->name);
+ mcr |= MAC_CR_PRMS_;
+ }
+ /*
+ * Here, I am setting this to accept all multicast packets.
+ * I don't need to zero the multicast table, because the flag is
+ * checked before the table is
+ */
+ else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
+ DBG(SMC_DEBUG_MISC, "%s: RCR_ALMUL\n", dev->name);
+ mcr |= MAC_CR_MCPAS_;
+ }
+
+ /*
+ * This sets the internal hardware table to filter out unwanted
+ * multicast packets before they take up memory.
+ *
+ * The SMC chip uses a hash table where the high 6 bits of the CRC of
+ * address are the offset into the table. If that bit is 1, then the
+ * multicast packet is accepted. Otherwise, it's dropped silently.
+ *
+ * To use the 6 bits as an offset into the table, the high 1 bit is
+ * the number of the 32 bit register, while the low 5 bits are the bit
+ * within that register.
+ */
+ else if (!netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+
+ /* Set the Hash perfec mode */
+ mcr |= MAC_CR_HPFILT_;
+
+ /* start with a table of all zeros: reject all */
+ memset(multicast_table, 0, sizeof(multicast_table));
+
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 position;
+
+ /* upper 6 bits are used as hash index */
+ position = ether_crc(ETH_ALEN, ha->addr)>>26;
+
+ multicast_table[position>>5] |= 1 << (position&0x1f);
+ }
+
+ /* be sure I get rid of flags I might have set */
+ mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
+
+ /* now, the table can be loaded into the chipset */
+ update_multicast = 1;
+ } else {
+ DBG(SMC_DEBUG_MISC, "%s: ~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n",
+ dev->name);
+ mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
+
+ /*
+ * since I'm disabling all multicast entirely, I need to
+ * clear the multicast list
+ */
+ memset(multicast_table, 0, sizeof(multicast_table));
+ update_multicast = 1;
+ }
+
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_SET_MAC_CR(lp, mcr);
+ if (update_multicast) {
+ DBG(SMC_DEBUG_MISC,
+ "%s: update mcast hash table 0x%08x 0x%08x\n",
+ dev->name, multicast_table[0], multicast_table[1]);
+ SMC_SET_HASHL(lp, multicast_table[0]);
+ SMC_SET_HASHH(lp, multicast_table[1]);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+
+/*
+ * Open and Initialize the board
+ *
+ * Set up everything, reset the card, etc..
+ */
+static int
+smc911x_open(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+
+ /*
+ * Check that the address is valid. If its not, refuse
+ * to bring the device up. The user must specify an
+ * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
+ */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ PRINTK("%s: no valid ethernet hw addr\n", __func__);
+ return -EINVAL;
+ }
+
+ /* reset the hardware */
+ smc911x_reset(dev);
+
+ /* Configure the PHY, initialize the link state */
+ smc911x_phy_configure(&lp->phy_configure);
+
+ /* Turn on Tx + Rx */
+ smc911x_enable(dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/*
+ * smc911x_close
+ *
+ * this makes the board clean up everything that it can
+ * and not talk to the outside world. Caused by
+ * an 'ifconfig ethX down'
+ */
+static int smc911x_close(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ /* clear everything */
+ smc911x_shutdown(dev);
+
+ if (lp->phy_type != 0) {
+ /* We need to ensure that no calls to
+ * smc911x_phy_configure are pending.
+ */
+ cancel_work_sync(&lp->phy_configure);
+ smc911x_phy_powerdown(dev, lp->mii.phy_id);
+ }
+
+ if (lp->pending_tx_skb) {
+ dev_kfree_skb(lp->pending_tx_skb);
+ lp->pending_tx_skb = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * Ethtool support
+ */
+static int
+smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ int ret, status;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ cmd->maxtxpkt = 1;
+ cmd->maxrxpkt = 1;
+
+ if (lp->phy_type != 0) {
+ spin_lock_irqsave(&lp->lock, flags);
+ ret = mii_ethtool_gset(&lp->mii, cmd);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ } else {
+ cmd->supported = SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_TP | SUPPORTED_AUI;
+
+ if (lp->ctl_rspeed == 10)
+ ethtool_cmd_speed_set(cmd, SPEED_10);
+ else if (lp->ctl_rspeed == 100)
+ ethtool_cmd_speed_set(cmd, SPEED_100);
+
+ cmd->autoneg = AUTONEG_DISABLE;
+ if (lp->mii.phy_id==1)
+ cmd->transceiver = XCVR_INTERNAL;
+ else
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->port = 0;
+ SMC_GET_PHY_SPECIAL(lp, lp->mii.phy_id, status);
+ cmd->duplex =
+ (status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int
+smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ int ret;
+ unsigned long flags;
+
+ if (lp->phy_type != 0) {
+ spin_lock_irqsave(&lp->lock, flags);
+ ret = mii_ethtool_sset(&lp->mii, cmd);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ } else {
+ if (cmd->autoneg != AUTONEG_DISABLE ||
+ cmd->speed != SPEED_10 ||
+ (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) ||
+ (cmd->port != PORT_TP && cmd->port != PORT_AUI))
+ return -EINVAL;
+
+ lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL;
+
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void
+smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, CARDNAME, sizeof(info->driver));
+ strncpy(info->version, version, sizeof(info->version));
+ strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
+}
+
+static int smc911x_ethtool_nwayreset(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ int ret = -EINVAL;
+ unsigned long flags;
+
+ if (lp->phy_type != 0) {
+ spin_lock_irqsave(&lp->lock, flags);
+ ret = mii_nway_restart(&lp->mii);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+
+ return ret;
+}
+
+static u32 smc911x_ethtool_getmsglevel(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ return lp->msg_enable;
+}
+
+static void smc911x_ethtool_setmsglevel(struct net_device *dev, u32 level)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ lp->msg_enable = level;
+}
+
+static int smc911x_ethtool_getregslen(struct net_device *dev)
+{
+ /* System regs + MAC regs + PHY regs */
+ return (((E2P_CMD - ID_REV)/4 + 1) +
+ (WUCSR - MAC_CR)+1 + 32) * sizeof(u32);
+}
+
+static void smc911x_ethtool_getregs(struct net_device *dev,
+ struct ethtool_regs* regs, void *buf)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long flags;
+ u32 reg,i,j=0;
+ u32 *data = (u32*)buf;
+
+ regs->version = lp->version;
+ for(i=ID_REV;i<=E2P_CMD;i+=4) {
+ data[j++] = SMC_inl(lp, i);
+ }
+ for(i=MAC_CR;i<=WUCSR;i++) {
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_GET_MAC_CSR(lp, i, reg);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ data[j++] = reg;
+ }
+ for(i=0;i<=31;i++) {
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_GET_MII(lp, i, lp->mii.phy_id, reg);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ data[j++] = reg & 0xFFFF;
+ }
+}
+
+static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned int timeout;
+ int e2p_cmd;
+
+ e2p_cmd = SMC_GET_E2P_CMD(lp);
+ for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) {
+ if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) {
+ PRINTK("%s: %s timeout waiting for EEPROM to respond\n",
+ dev->name, __func__);
+ return -EFAULT;
+ }
+ mdelay(1);
+ e2p_cmd = SMC_GET_E2P_CMD(lp);
+ }
+ if (timeout == 0) {
+ PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n",
+ dev->name, __func__);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev,
+ int cmd, int addr)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ int ret;
+
+ if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
+ return ret;
+ SMC_SET_E2P_CMD(lp, E2P_CMD_EPC_BUSY_ |
+ ((cmd) & (0x7<<28)) |
+ ((addr) & 0xFF));
+ return 0;
+}
+
+static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev,
+ u8 *data)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ int ret;
+
+ if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
+ return ret;
+ *data = SMC_GET_E2P_DATA(lp);
+ return 0;
+}
+
+static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev,
+ u8 data)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ int ret;
+
+ if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
+ return ret;
+ SMC_SET_E2P_DATA(lp, data);
+ return 0;
+}
+
+static int smc911x_ethtool_geteeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ u8 eebuf[SMC911X_EEPROM_LEN];
+ int i, ret;
+
+ for(i=0;i<SMC911X_EEPROM_LEN;i++) {
+ if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_READ_, i ))!=0)
+ return ret;
+ if ((ret=smc911x_ethtool_read_eeprom_byte(dev, &eebuf[i]))!=0)
+ return ret;
+ }
+ memcpy(data, eebuf+eeprom->offset, eeprom->len);
+ return 0;
+}
+
+static int smc911x_ethtool_seteeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ int i, ret;
+
+ /* Enable erase */
+ if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_EWEN_, 0 ))!=0)
+ return ret;
+ for(i=eeprom->offset;i<(eeprom->offset+eeprom->len);i++) {
+ /* erase byte */
+ if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_ERASE_, i ))!=0)
+ return ret;
+ /* write byte */
+ if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0)
+ return ret;
+ if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0)
+ return ret;
+ }
+ return 0;
+}
+
+static int smc911x_ethtool_geteeprom_len(struct net_device *dev)
+{
+ return SMC911X_EEPROM_LEN;
+}
+
+static const struct ethtool_ops smc911x_ethtool_ops = {
+ .get_settings = smc911x_ethtool_getsettings,
+ .set_settings = smc911x_ethtool_setsettings,
+ .get_drvinfo = smc911x_ethtool_getdrvinfo,
+ .get_msglevel = smc911x_ethtool_getmsglevel,
+ .set_msglevel = smc911x_ethtool_setmsglevel,
+ .nway_reset = smc911x_ethtool_nwayreset,
+ .get_link = ethtool_op_get_link,
+ .get_regs_len = smc911x_ethtool_getregslen,
+ .get_regs = smc911x_ethtool_getregs,
+ .get_eeprom_len = smc911x_ethtool_geteeprom_len,
+ .get_eeprom = smc911x_ethtool_geteeprom,
+ .set_eeprom = smc911x_ethtool_seteeprom,
+};
+
+/*
+ * smc911x_findirq
+ *
+ * This routine has a simple purpose -- make the SMC chip generate an
+ * interrupt, so an auto-detect routine can detect it, and find the IRQ,
+ */
+static int __devinit smc911x_findirq(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ int timeout = 20;
+ unsigned long cookie;
+
+ DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
+
+ cookie = probe_irq_on();
+
+ /*
+ * Force a SW interrupt
+ */
+
+ SMC_SET_INT_EN(lp, INT_EN_SW_INT_EN_);
+
+ /*
+ * Wait until positive that the interrupt has been generated
+ */
+ do {
+ int int_status;
+ udelay(10);
+ int_status = SMC_GET_INT_EN(lp);
+ if (int_status & INT_EN_SW_INT_EN_)
+ break; /* got the interrupt */
+ } while (--timeout);
+
+ /*
+ * there is really nothing that I can do here if timeout fails,
+ * as autoirq_report will return a 0 anyway, which is what I
+ * want in this case. Plus, the clean up is needed in both
+ * cases.
+ */
+
+ /* and disable all interrupts again */
+ SMC_SET_INT_EN(lp, 0);
+
+ /* and return what I found */
+ return probe_irq_off(cookie);
+}
+
+static const struct net_device_ops smc911x_netdev_ops = {
+ .ndo_open = smc911x_open,
+ .ndo_stop = smc911x_close,
+ .ndo_start_xmit = smc911x_hard_start_xmit,
+ .ndo_tx_timeout = smc911x_timeout,
+ .ndo_set_rx_mode = smc911x_set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = smc911x_poll_controller,
+#endif
+};
+
+/*
+ * Function: smc911x_probe(unsigned long ioaddr)
+ *
+ * Purpose:
+ * Tests to see if a given ioaddr points to an SMC911x chip.
+ * Returns a 0 on success
+ *
+ * Algorithm:
+ * (1) see if the endian word is OK
+ * (1) see if I recognize the chip ID in the appropriate register
+ *
+ * Here I do typical initialization tasks.
+ *
+ * o Initialize the structure if needed
+ * o print out my vanity message if not done so already
+ * o print out what type of hardware is detected
+ * o print out the ethernet address
+ * o find the IRQ
+ * o set up my private data
+ * o configure the dev structure with my subroutines
+ * o actually GRAB the irq.
+ * o GRAB the region
+ */
+static int __devinit smc911x_probe(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ int i, retval;
+ unsigned int val, chip_id, revision;
+ const char *version_string;
+ unsigned long irq_flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+
+ /* First, see if the endian word is recognized */
+ val = SMC_GET_BYTE_TEST(lp);
+ DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val);
+ if (val != 0x87654321) {
+ printk(KERN_ERR "Invalid chip endian 0x%08x\n",val);
+ retval = -ENODEV;
+ goto err_out;
+ }
+
+ /*
+ * check if the revision register is something that I
+ * recognize. These might need to be added to later,
+ * as future revisions could be added.
+ */
+ chip_id = SMC_GET_PN(lp);
+ DBG(SMC_DEBUG_MISC, "%s: id probe returned 0x%04x\n", CARDNAME, chip_id);
+ for(i=0;chip_ids[i].id != 0; i++) {
+ if (chip_ids[i].id == chip_id) break;
+ }
+ if (!chip_ids[i].id) {
+ printk(KERN_ERR "Unknown chip ID %04x\n", chip_id);
+ retval = -ENODEV;
+ goto err_out;
+ }
+ version_string = chip_ids[i].name;
+
+ revision = SMC_GET_REV(lp);
+ DBG(SMC_DEBUG_MISC, "%s: revision = 0x%04x\n", CARDNAME, revision);
+
+ /* At this point I'll assume that the chip is an SMC911x. */
+ DBG(SMC_DEBUG_MISC, "%s: Found a %s\n", CARDNAME, chip_ids[i].name);
+
+ /* Validate the TX FIFO size requested */
+ if ((tx_fifo_kb < 2) || (tx_fifo_kb > 14)) {
+ printk(KERN_ERR "Invalid TX FIFO size requested %d\n", tx_fifo_kb);
+ retval = -EINVAL;
+ goto err_out;
+ }
+
+ /* fill in some of the fields */
+ lp->version = chip_ids[i].id;
+ lp->revision = revision;
+ lp->tx_fifo_kb = tx_fifo_kb;
+ /* Reverse calculate the RX FIFO size from the TX */
+ lp->tx_fifo_size=(lp->tx_fifo_kb<<10) - 512;
+ lp->rx_fifo_size= ((0x4000 - 512 - lp->tx_fifo_size) / 16) * 15;
+
+ /* Set the automatic flow control values */
+ switch(lp->tx_fifo_kb) {
+ /*
+ * AFC_HI is about ((Rx Data Fifo Size)*2/3)/64
+ * AFC_LO is AFC_HI/2
+ * BACK_DUR is about 5uS*(AFC_LO) rounded down
+ */
+ case 2:/* 13440 Rx Data Fifo Size */
+ lp->afc_cfg=0x008C46AF;break;
+ case 3:/* 12480 Rx Data Fifo Size */
+ lp->afc_cfg=0x0082419F;break;
+ case 4:/* 11520 Rx Data Fifo Size */
+ lp->afc_cfg=0x00783C9F;break;
+ case 5:/* 10560 Rx Data Fifo Size */
+ lp->afc_cfg=0x006E374F;break;
+ case 6:/* 9600 Rx Data Fifo Size */
+ lp->afc_cfg=0x0064328F;break;
+ case 7:/* 8640 Rx Data Fifo Size */
+ lp->afc_cfg=0x005A2D7F;break;
+ case 8:/* 7680 Rx Data Fifo Size */
+ lp->afc_cfg=0x0050287F;break;
+ case 9:/* 6720 Rx Data Fifo Size */
+ lp->afc_cfg=0x0046236F;break;
+ case 10:/* 5760 Rx Data Fifo Size */
+ lp->afc_cfg=0x003C1E6F;break;
+ case 11:/* 4800 Rx Data Fifo Size */
+ lp->afc_cfg=0x0032195F;break;
+ /*
+ * AFC_HI is ~1520 bytes less than RX Data Fifo Size
+ * AFC_LO is AFC_HI/2
+ * BACK_DUR is about 5uS*(AFC_LO) rounded down
+ */
+ case 12:/* 3840 Rx Data Fifo Size */
+ lp->afc_cfg=0x0024124F;break;
+ case 13:/* 2880 Rx Data Fifo Size */
+ lp->afc_cfg=0x0015073F;break;
+ case 14:/* 1920 Rx Data Fifo Size */
+ lp->afc_cfg=0x0006032F;break;
+ default:
+ PRINTK("%s: ERROR -- no AFC_CFG setting found",
+ dev->name);
+ break;
+ }
+
+ DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX,
+ "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME,
+ lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg);
+
+ spin_lock_init(&lp->lock);
+
+ /* Get the MAC address */
+ SMC_GET_MAC_ADDR(lp, dev->dev_addr);
+
+ /* now, reset the chip, and put it into a known state */
+ smc911x_reset(dev);
+
+ /*
+ * If dev->irq is 0, then the device has to be banged on to see
+ * what the IRQ is.
+ *
+ * Specifying an IRQ is done with the assumption that the user knows
+ * what (s)he is doing. No checking is done!!!!
+ */
+ if (dev->irq < 1) {
+ int trials;
+
+ trials = 3;
+ while (trials--) {
+ dev->irq = smc911x_findirq(dev);
+ if (dev->irq)
+ break;
+ /* kick the card and try again */
+ smc911x_reset(dev);
+ }
+ }
+ if (dev->irq == 0) {
+ printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n",
+ dev->name);
+ retval = -ENODEV;
+ goto err_out;
+ }
+ dev->irq = irq_canonicalize(dev->irq);
+
+ /* Fill in the fields of the device structure with ethernet values. */
+ ether_setup(dev);
+
+ dev->netdev_ops = &smc911x_netdev_ops;
+ dev->watchdog_timeo = msecs_to_jiffies(watchdog);
+ dev->ethtool_ops = &smc911x_ethtool_ops;
+
+ INIT_WORK(&lp->phy_configure, smc911x_phy_configure);
+ lp->mii.phy_id_mask = 0x1f;
+ lp->mii.reg_num_mask = 0x1f;
+ lp->mii.force_media = 0;
+ lp->mii.full_duplex = 0;
+ lp->mii.dev = dev;
+ lp->mii.mdio_read = smc911x_phy_read;
+ lp->mii.mdio_write = smc911x_phy_write;
+
+ /*
+ * Locate the phy, if any.
+ */
+ smc911x_phy_detect(dev);
+
+ /* Set default parameters */
+ lp->msg_enable = NETIF_MSG_LINK;
+ lp->ctl_rfduplx = 1;
+ lp->ctl_rspeed = 100;
+
+#ifdef SMC_DYNAMIC_BUS_CONFIG
+ irq_flags = lp->cfg.irq_flags;
+#else
+ irq_flags = IRQF_SHARED | SMC_IRQ_SENSE;
+#endif
+
+ /* Grab the IRQ */
+ retval = request_irq(dev->irq, smc911x_interrupt,
+ irq_flags, dev->name, dev);
+ if (retval)
+ goto err_out;
+
+#ifdef SMC_USE_DMA
+ lp->rxdma = SMC_DMA_REQUEST(dev, smc911x_rx_dma_irq);
+ lp->txdma = SMC_DMA_REQUEST(dev, smc911x_tx_dma_irq);
+ lp->rxdma_active = 0;
+ lp->txdma_active = 0;
+ dev->dma = lp->rxdma;
+#endif
+
+ retval = register_netdev(dev);
+ if (retval == 0) {
+ /* now, print out the card info, in a short format.. */
+ printk("%s: %s (rev %d) at %#lx IRQ %d",
+ dev->name, version_string, lp->revision,
+ dev->base_addr, dev->irq);
+
+#ifdef SMC_USE_DMA
+ if (lp->rxdma != -1)
+ printk(" RXDMA %d ", lp->rxdma);
+
+ if (lp->txdma != -1)
+ printk("TXDMA %d", lp->txdma);
+#endif
+ printk("\n");
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ printk("%s: Invalid ethernet MAC address. Please "
+ "set using ifconfig\n", dev->name);
+ } else {
+ /* Print the Ethernet address */
+ printk("%s: Ethernet addr: %pM\n",
+ dev->name, dev->dev_addr);
+ }
+
+ if (lp->phy_type == 0) {
+ PRINTK("%s: No PHY found\n", dev->name);
+ } else if ((lp->phy_type & ~0xff) == LAN911X_INTERNAL_PHY_ID) {
+ PRINTK("%s: LAN911x Internal PHY\n", dev->name);
+ } else {
+ PRINTK("%s: External PHY 0x%08x\n", dev->name, lp->phy_type);
+ }
+ }
+
+err_out:
+#ifdef SMC_USE_DMA
+ if (retval) {
+ if (lp->rxdma != -1) {
+ SMC_DMA_FREE(dev, lp->rxdma);
+ }
+ if (lp->txdma != -1) {
+ SMC_DMA_FREE(dev, lp->txdma);
+ }
+ }
+#endif
+ return retval;
+}
+
+/*
+ * smc911x_init(void)
+ *
+ * Output:
+ * 0 --> there is a device
+ * anything else, error
+ */
+static int __devinit smc911x_drv_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev;
+ struct resource *res;
+ struct smc911x_local *lp;
+ unsigned int *addr;
+ int ret;
+
+ DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /*
+ * Request the regions.
+ */
+ if (!request_mem_region(res->start, SMC911X_IO_EXTENT, CARDNAME)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ndev = alloc_etherdev(sizeof(struct smc911x_local));
+ if (!ndev) {
+ printk("%s: could not allocate device.\n", CARDNAME);
+ ret = -ENOMEM;
+ goto release_1;
+ }
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ndev->dma = (unsigned char)-1;
+ ndev->irq = platform_get_irq(pdev, 0);
+ lp = netdev_priv(ndev);
+ lp->netdev = ndev;
+#ifdef SMC_DYNAMIC_BUS_CONFIG
+ {
+ struct smc911x_platdata *pd = pdev->dev.platform_data;
+ if (!pd) {
+ ret = -EINVAL;
+ goto release_both;
+ }
+ memcpy(&lp->cfg, pd, sizeof(lp->cfg));
+ }
+#endif
+
+ addr = ioremap(res->start, SMC911X_IO_EXTENT);
+ if (!addr) {
+ ret = -ENOMEM;
+ goto release_both;
+ }
+
+ platform_set_drvdata(pdev, ndev);
+ lp->base = addr;
+ ndev->base_addr = res->start;
+ ret = smc911x_probe(ndev);
+ if (ret != 0) {
+ platform_set_drvdata(pdev, NULL);
+ iounmap(addr);
+release_both:
+ free_netdev(ndev);
+release_1:
+ release_mem_region(res->start, SMC911X_IO_EXTENT);
+out:
+ printk("%s: not found (%d).\n", CARDNAME, ret);
+ }
+#ifdef SMC_USE_DMA
+ else {
+ lp->physaddr = res->start;
+ lp->dev = &pdev->dev;
+ }
+#endif
+
+ return ret;
+}
+
+static int __devexit smc911x_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smc911x_local *lp = netdev_priv(ndev);
+ struct resource *res;
+
+ DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
+ platform_set_drvdata(pdev, NULL);
+
+ unregister_netdev(ndev);
+
+ free_irq(ndev->irq, ndev);
+
+#ifdef SMC_USE_DMA
+ {
+ if (lp->rxdma != -1) {
+ SMC_DMA_FREE(dev, lp->rxdma);
+ }
+ if (lp->txdma != -1) {
+ SMC_DMA_FREE(dev, lp->txdma);
+ }
+ }
+#endif
+ iounmap(lp->base);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, SMC911X_IO_EXTENT);
+
+ free_netdev(ndev);
+ return 0;
+}
+
+static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct net_device *ndev = platform_get_drvdata(dev);
+ struct smc911x_local *lp = netdev_priv(ndev);
+
+ DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
+ if (ndev) {
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ smc911x_shutdown(ndev);
+#if POWER_DOWN
+ /* Set D2 - Energy detect only setting */
+ SMC_SET_PMT_CTRL(lp, 2<<12);
+#endif
+ }
+ }
+ return 0;
+}
+
+static int smc911x_drv_resume(struct platform_device *dev)
+{
+ struct net_device *ndev = platform_get_drvdata(dev);
+
+ DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
+ if (ndev) {
+ struct smc911x_local *lp = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ smc911x_reset(ndev);
+ if (lp->phy_type != 0)
+ smc911x_phy_configure(&lp->phy_configure);
+ smc911x_enable(ndev);
+ netif_device_attach(ndev);
+ }
+ }
+ return 0;
+}
+
+static struct platform_driver smc911x_driver = {
+ .probe = smc911x_drv_probe,
+ .remove = __devexit_p(smc911x_drv_remove),
+ .suspend = smc911x_drv_suspend,
+ .resume = smc911x_drv_resume,
+ .driver = {
+ .name = CARDNAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init smc911x_init(void)
+{
+ return platform_driver_register(&smc911x_driver);
+}
+
+static void __exit smc911x_cleanup(void)
+{
+ platform_driver_unregister(&smc911x_driver);
+}
+
+module_init(smc911x_init);
+module_exit(smc911x_cleanup);
diff --git a/drivers/net/ethernet/smsc/smc911x.h b/drivers/net/ethernet/smsc/smc911x.h
new file mode 100644
index 000000000000..3269292efecc
--- /dev/null
+++ b/drivers/net/ethernet/smsc/smc911x.h
@@ -0,0 +1,924 @@
+/*------------------------------------------------------------------------
+ . smc911x.h - macros for SMSC's LAN911{5,6,7,8} single-chip Ethernet device.
+ .
+ . Copyright (C) 2005 Sensoria Corp.
+ . Derived from the unified SMC91x driver by Nicolas Pitre
+ .
+ . This program is free software; you can redistribute it and/or modify
+ . it under the terms of the GNU General Public License as published by
+ . the Free Software Foundation; either version 2 of the License, or
+ . (at your option) any later version.
+ .
+ . This program is distributed in the hope that it will be useful,
+ . but WITHOUT ANY WARRANTY; without even the implied warranty of
+ . MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ . GNU General Public License for more details.
+ .
+ . You should have received a copy of the GNU General Public License
+ . along with this program; if not, write to the Free Software
+ . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ .
+ . Information contained in this file was obtained from the LAN9118
+ . manual from SMC. To get a copy, if you really want one, you can find
+ . information under www.smsc.com.
+ .
+ . Authors
+ . Dustin McIntire <dustin@sensoria.com>
+ .
+ ---------------------------------------------------------------------------*/
+#ifndef _SMC911X_H_
+#define _SMC911X_H_
+
+#include <linux/smc911x.h>
+/*
+ * Use the DMA feature on PXA chips
+ */
+#ifdef CONFIG_ARCH_PXA
+ #define SMC_USE_PXA_DMA 1
+ #define SMC_USE_16BIT 0
+ #define SMC_USE_32BIT 1
+ #define SMC_IRQ_SENSE IRQF_TRIGGER_FALLING
+#elif defined(CONFIG_SH_MAGIC_PANEL_R2)
+ #define SMC_USE_16BIT 0
+ #define SMC_USE_32BIT 1
+ #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
+#elif defined(CONFIG_ARCH_OMAP3)
+ #define SMC_USE_16BIT 0
+ #define SMC_USE_32BIT 1
+ #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
+ #define SMC_MEM_RESERVED 1
+#elif defined(CONFIG_ARCH_OMAP2)
+ #define SMC_USE_16BIT 0
+ #define SMC_USE_32BIT 1
+ #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
+ #define SMC_MEM_RESERVED 1
+#else
+/*
+ * Default configuration
+ */
+
+#define SMC_DYNAMIC_BUS_CONFIG
+#endif
+
+#ifdef SMC_USE_PXA_DMA
+#define SMC_USE_DMA
+#endif
+
+/* store this information for the driver.. */
+struct smc911x_local {
+ /*
+ * If I have to wait until the DMA is finished and ready to reload a
+ * packet, I will store the skbuff here. Then, the DMA will send it
+ * out and free it.
+ */
+ struct sk_buff *pending_tx_skb;
+
+ /* version/revision of the SMC911x chip */
+ u16 version;
+ u16 revision;
+
+ /* FIFO sizes */
+ int tx_fifo_kb;
+ int tx_fifo_size;
+ int rx_fifo_size;
+ int afc_cfg;
+
+ /* Contains the current active receive/phy mode */
+ int ctl_rfduplx;
+ int ctl_rspeed;
+
+ u32 msg_enable;
+ u32 phy_type;
+ struct mii_if_info mii;
+
+ /* work queue */
+ struct work_struct phy_configure;
+
+ int tx_throttle;
+ spinlock_t lock;
+
+ struct net_device *netdev;
+
+#ifdef SMC_USE_DMA
+ /* DMA needs the physical address of the chip */
+ u_long physaddr;
+ int rxdma;
+ int txdma;
+ int rxdma_active;
+ int txdma_active;
+ struct sk_buff *current_rx_skb;
+ struct sk_buff *current_tx_skb;
+ struct device *dev;
+#endif
+ void __iomem *base;
+#ifdef SMC_DYNAMIC_BUS_CONFIG
+ struct smc911x_platdata cfg;
+#endif
+};
+
+/*
+ * Define the bus width specific IO macros
+ */
+
+#ifdef SMC_DYNAMIC_BUS_CONFIG
+static inline unsigned int SMC_inl(struct smc911x_local *lp, int reg)
+{
+ void __iomem *ioaddr = lp->base + reg;
+
+ if (lp->cfg.flags & SMC911X_USE_32BIT)
+ return readl(ioaddr);
+
+ if (lp->cfg.flags & SMC911X_USE_16BIT)
+ return readw(ioaddr) | (readw(ioaddr + 2) << 16);
+
+ BUG();
+}
+
+static inline void SMC_outl(unsigned int value, struct smc911x_local *lp,
+ int reg)
+{
+ void __iomem *ioaddr = lp->base + reg;
+
+ if (lp->cfg.flags & SMC911X_USE_32BIT) {
+ writel(value, ioaddr);
+ return;
+ }
+
+ if (lp->cfg.flags & SMC911X_USE_16BIT) {
+ writew(value & 0xffff, ioaddr);
+ writew(value >> 16, ioaddr + 2);
+ return;
+ }
+
+ BUG();
+}
+
+static inline void SMC_insl(struct smc911x_local *lp, int reg,
+ void *addr, unsigned int count)
+{
+ void __iomem *ioaddr = lp->base + reg;
+
+ if (lp->cfg.flags & SMC911X_USE_32BIT) {
+ readsl(ioaddr, addr, count);
+ return;
+ }
+
+ if (lp->cfg.flags & SMC911X_USE_16BIT) {
+ readsw(ioaddr, addr, count * 2);
+ return;
+ }
+
+ BUG();
+}
+
+static inline void SMC_outsl(struct smc911x_local *lp, int reg,
+ void *addr, unsigned int count)
+{
+ void __iomem *ioaddr = lp->base + reg;
+
+ if (lp->cfg.flags & SMC911X_USE_32BIT) {
+ writesl(ioaddr, addr, count);
+ return;
+ }
+
+ if (lp->cfg.flags & SMC911X_USE_16BIT) {
+ writesw(ioaddr, addr, count * 2);
+ return;
+ }
+
+ BUG();
+}
+#else
+#if SMC_USE_16BIT
+#define SMC_inl(lp, r) ((readw((lp)->base + (r)) & 0xFFFF) + (readw((lp)->base + (r) + 2) << 16))
+#define SMC_outl(v, lp, r) \
+ do{ \
+ writew(v & 0xFFFF, (lp)->base + (r)); \
+ writew(v >> 16, (lp)->base + (r) + 2); \
+ } while (0)
+#define SMC_insl(lp, r, p, l) readsw((short*)((lp)->base + (r)), p, l*2)
+#define SMC_outsl(lp, r, p, l) writesw((short*)((lp)->base + (r)), p, l*2)
+
+#elif SMC_USE_32BIT
+#define SMC_inl(lp, r) readl((lp)->base + (r))
+#define SMC_outl(v, lp, r) writel(v, (lp)->base + (r))
+#define SMC_insl(lp, r, p, l) readsl((int*)((lp)->base + (r)), p, l)
+#define SMC_outsl(lp, r, p, l) writesl((int*)((lp)->base + (r)), p, l)
+
+#endif /* SMC_USE_16BIT */
+#endif /* SMC_DYNAMIC_BUS_CONFIG */
+
+
+#ifdef SMC_USE_PXA_DMA
+
+#include <mach/dma.h>
+
+/*
+ * Define the request and free functions
+ * These are unfortunately architecture specific as no generic allocation
+ * mechanism exits
+ */
+#define SMC_DMA_REQUEST(dev, handler) \
+ pxa_request_dma(dev->name, DMA_PRIO_LOW, handler, dev)
+
+#define SMC_DMA_FREE(dev, dma) \
+ pxa_free_dma(dma)
+
+#define SMC_DMA_ACK_IRQ(dev, dma) \
+{ \
+ if (DCSR(dma) & DCSR_BUSERR) { \
+ printk("%s: DMA %d bus error!\n", dev->name, dma); \
+ } \
+ DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR; \
+}
+
+/*
+ * Use a DMA for RX and TX packets.
+ */
+#include <linux/dma-mapping.h>
+
+static dma_addr_t rx_dmabuf, tx_dmabuf;
+static int rx_dmalen, tx_dmalen;
+
+#ifdef SMC_insl
+#undef SMC_insl
+#define SMC_insl(lp, r, p, l) \
+ smc_pxa_dma_insl(lp, lp->physaddr, r, lp->rxdma, p, l)
+
+static inline void
+smc_pxa_dma_insl(struct smc911x_local *lp, u_long physaddr,
+ int reg, int dma, u_char *buf, int len)
+{
+ /* 64 bit alignment is required for memory to memory DMA */
+ if ((long)buf & 4) {
+ *((u32 *)buf) = SMC_inl(lp, reg);
+ buf += 4;
+ len--;
+ }
+
+ len *= 4;
+ rx_dmabuf = dma_map_single(lp->dev, buf, len, DMA_FROM_DEVICE);
+ rx_dmalen = len;
+ DCSR(dma) = DCSR_NODESC;
+ DTADR(dma) = rx_dmabuf;
+ DSADR(dma) = physaddr + reg;
+ DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 |
+ DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & rx_dmalen));
+ DCSR(dma) = DCSR_NODESC | DCSR_RUN;
+}
+#endif
+
+#ifdef SMC_outsl
+#undef SMC_outsl
+#define SMC_outsl(lp, r, p, l) \
+ smc_pxa_dma_outsl(lp, lp->physaddr, r, lp->txdma, p, l)
+
+static inline void
+smc_pxa_dma_outsl(struct smc911x_local *lp, u_long physaddr,
+ int reg, int dma, u_char *buf, int len)
+{
+ /* 64 bit alignment is required for memory to memory DMA */
+ if ((long)buf & 4) {
+ SMC_outl(*((u32 *)buf), lp, reg);
+ buf += 4;
+ len--;
+ }
+
+ len *= 4;
+ tx_dmabuf = dma_map_single(lp->dev, buf, len, DMA_TO_DEVICE);
+ tx_dmalen = len;
+ DCSR(dma) = DCSR_NODESC;
+ DSADR(dma) = tx_dmabuf;
+ DTADR(dma) = physaddr + reg;
+ DCMD(dma) = (DCMD_INCSRCADDR | DCMD_BURST32 |
+ DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & tx_dmalen));
+ DCSR(dma) = DCSR_NODESC | DCSR_RUN;
+}
+#endif
+#endif /* SMC_USE_PXA_DMA */
+
+
+/* Chip Parameters and Register Definitions */
+
+#define SMC911X_TX_FIFO_LOW_THRESHOLD (1536*2)
+
+#define SMC911X_IO_EXTENT 0x100
+
+#define SMC911X_EEPROM_LEN 7
+
+/* Below are the register offsets and bit definitions
+ * of the Lan911x memory space
+ */
+#define RX_DATA_FIFO (0x00)
+
+#define TX_DATA_FIFO (0x20)
+#define TX_CMD_A_INT_ON_COMP_ (0x80000000)
+#define TX_CMD_A_INT_BUF_END_ALGN_ (0x03000000)
+#define TX_CMD_A_INT_4_BYTE_ALGN_ (0x00000000)
+#define TX_CMD_A_INT_16_BYTE_ALGN_ (0x01000000)
+#define TX_CMD_A_INT_32_BYTE_ALGN_ (0x02000000)
+#define TX_CMD_A_INT_DATA_OFFSET_ (0x001F0000)
+#define TX_CMD_A_INT_FIRST_SEG_ (0x00002000)
+#define TX_CMD_A_INT_LAST_SEG_ (0x00001000)
+#define TX_CMD_A_BUF_SIZE_ (0x000007FF)
+#define TX_CMD_B_PKT_TAG_ (0xFFFF0000)
+#define TX_CMD_B_ADD_CRC_DISABLE_ (0x00002000)
+#define TX_CMD_B_DISABLE_PADDING_ (0x00001000)
+#define TX_CMD_B_PKT_BYTE_LENGTH_ (0x000007FF)
+
+#define RX_STATUS_FIFO (0x40)
+#define RX_STS_PKT_LEN_ (0x3FFF0000)
+#define RX_STS_ES_ (0x00008000)
+#define RX_STS_BCST_ (0x00002000)
+#define RX_STS_LEN_ERR_ (0x00001000)
+#define RX_STS_RUNT_ERR_ (0x00000800)
+#define RX_STS_MCAST_ (0x00000400)
+#define RX_STS_TOO_LONG_ (0x00000080)
+#define RX_STS_COLL_ (0x00000040)
+#define RX_STS_ETH_TYPE_ (0x00000020)
+#define RX_STS_WDOG_TMT_ (0x00000010)
+#define RX_STS_MII_ERR_ (0x00000008)
+#define RX_STS_DRIBBLING_ (0x00000004)
+#define RX_STS_CRC_ERR_ (0x00000002)
+#define RX_STATUS_FIFO_PEEK (0x44)
+#define TX_STATUS_FIFO (0x48)
+#define TX_STS_TAG_ (0xFFFF0000)
+#define TX_STS_ES_ (0x00008000)
+#define TX_STS_LOC_ (0x00000800)
+#define TX_STS_NO_CARR_ (0x00000400)
+#define TX_STS_LATE_COLL_ (0x00000200)
+#define TX_STS_MANY_COLL_ (0x00000100)
+#define TX_STS_COLL_CNT_ (0x00000078)
+#define TX_STS_MANY_DEFER_ (0x00000004)
+#define TX_STS_UNDERRUN_ (0x00000002)
+#define TX_STS_DEFERRED_ (0x00000001)
+#define TX_STATUS_FIFO_PEEK (0x4C)
+#define ID_REV (0x50)
+#define ID_REV_CHIP_ID_ (0xFFFF0000) /* RO */
+#define ID_REV_REV_ID_ (0x0000FFFF) /* RO */
+
+#define INT_CFG (0x54)
+#define INT_CFG_INT_DEAS_ (0xFF000000) /* R/W */
+#define INT_CFG_INT_DEAS_CLR_ (0x00004000)
+#define INT_CFG_INT_DEAS_STS_ (0x00002000)
+#define INT_CFG_IRQ_INT_ (0x00001000) /* RO */
+#define INT_CFG_IRQ_EN_ (0x00000100) /* R/W */
+#define INT_CFG_IRQ_POL_ (0x00000010) /* R/W Not Affected by SW Reset */
+#define INT_CFG_IRQ_TYPE_ (0x00000001) /* R/W Not Affected by SW Reset */
+
+#define INT_STS (0x58)
+#define INT_STS_SW_INT_ (0x80000000) /* R/WC */
+#define INT_STS_TXSTOP_INT_ (0x02000000) /* R/WC */
+#define INT_STS_RXSTOP_INT_ (0x01000000) /* R/WC */
+#define INT_STS_RXDFH_INT_ (0x00800000) /* R/WC */
+#define INT_STS_RXDF_INT_ (0x00400000) /* R/WC */
+#define INT_STS_TX_IOC_ (0x00200000) /* R/WC */
+#define INT_STS_RXD_INT_ (0x00100000) /* R/WC */
+#define INT_STS_GPT_INT_ (0x00080000) /* R/WC */
+#define INT_STS_PHY_INT_ (0x00040000) /* RO */
+#define INT_STS_PME_INT_ (0x00020000) /* R/WC */
+#define INT_STS_TXSO_ (0x00010000) /* R/WC */
+#define INT_STS_RWT_ (0x00008000) /* R/WC */
+#define INT_STS_RXE_ (0x00004000) /* R/WC */
+#define INT_STS_TXE_ (0x00002000) /* R/WC */
+//#define INT_STS_ERX_ (0x00001000) /* R/WC */
+#define INT_STS_TDFU_ (0x00000800) /* R/WC */
+#define INT_STS_TDFO_ (0x00000400) /* R/WC */
+#define INT_STS_TDFA_ (0x00000200) /* R/WC */
+#define INT_STS_TSFF_ (0x00000100) /* R/WC */
+#define INT_STS_TSFL_ (0x00000080) /* R/WC */
+//#define INT_STS_RXDF_ (0x00000040) /* R/WC */
+#define INT_STS_RDFO_ (0x00000040) /* R/WC */
+#define INT_STS_RDFL_ (0x00000020) /* R/WC */
+#define INT_STS_RSFF_ (0x00000010) /* R/WC */
+#define INT_STS_RSFL_ (0x00000008) /* R/WC */
+#define INT_STS_GPIO2_INT_ (0x00000004) /* R/WC */
+#define INT_STS_GPIO1_INT_ (0x00000002) /* R/WC */
+#define INT_STS_GPIO0_INT_ (0x00000001) /* R/WC */
+
+#define INT_EN (0x5C)
+#define INT_EN_SW_INT_EN_ (0x80000000) /* R/W */
+#define INT_EN_TXSTOP_INT_EN_ (0x02000000) /* R/W */
+#define INT_EN_RXSTOP_INT_EN_ (0x01000000) /* R/W */
+#define INT_EN_RXDFH_INT_EN_ (0x00800000) /* R/W */
+//#define INT_EN_RXDF_INT_EN_ (0x00400000) /* R/W */
+#define INT_EN_TIOC_INT_EN_ (0x00200000) /* R/W */
+#define INT_EN_RXD_INT_EN_ (0x00100000) /* R/W */
+#define INT_EN_GPT_INT_EN_ (0x00080000) /* R/W */
+#define INT_EN_PHY_INT_EN_ (0x00040000) /* R/W */
+#define INT_EN_PME_INT_EN_ (0x00020000) /* R/W */
+#define INT_EN_TXSO_EN_ (0x00010000) /* R/W */
+#define INT_EN_RWT_EN_ (0x00008000) /* R/W */
+#define INT_EN_RXE_EN_ (0x00004000) /* R/W */
+#define INT_EN_TXE_EN_ (0x00002000) /* R/W */
+//#define INT_EN_ERX_EN_ (0x00001000) /* R/W */
+#define INT_EN_TDFU_EN_ (0x00000800) /* R/W */
+#define INT_EN_TDFO_EN_ (0x00000400) /* R/W */
+#define INT_EN_TDFA_EN_ (0x00000200) /* R/W */
+#define INT_EN_TSFF_EN_ (0x00000100) /* R/W */
+#define INT_EN_TSFL_EN_ (0x00000080) /* R/W */
+//#define INT_EN_RXDF_EN_ (0x00000040) /* R/W */
+#define INT_EN_RDFO_EN_ (0x00000040) /* R/W */
+#define INT_EN_RDFL_EN_ (0x00000020) /* R/W */
+#define INT_EN_RSFF_EN_ (0x00000010) /* R/W */
+#define INT_EN_RSFL_EN_ (0x00000008) /* R/W */
+#define INT_EN_GPIO2_INT_ (0x00000004) /* R/W */
+#define INT_EN_GPIO1_INT_ (0x00000002) /* R/W */
+#define INT_EN_GPIO0_INT_ (0x00000001) /* R/W */
+
+#define BYTE_TEST (0x64)
+#define FIFO_INT (0x68)
+#define FIFO_INT_TX_AVAIL_LEVEL_ (0xFF000000) /* R/W */
+#define FIFO_INT_TX_STS_LEVEL_ (0x00FF0000) /* R/W */
+#define FIFO_INT_RX_AVAIL_LEVEL_ (0x0000FF00) /* R/W */
+#define FIFO_INT_RX_STS_LEVEL_ (0x000000FF) /* R/W */
+
+#define RX_CFG (0x6C)
+#define RX_CFG_RX_END_ALGN_ (0xC0000000) /* R/W */
+#define RX_CFG_RX_END_ALGN4_ (0x00000000) /* R/W */
+#define RX_CFG_RX_END_ALGN16_ (0x40000000) /* R/W */
+#define RX_CFG_RX_END_ALGN32_ (0x80000000) /* R/W */
+#define RX_CFG_RX_DMA_CNT_ (0x0FFF0000) /* R/W */
+#define RX_CFG_RX_DUMP_ (0x00008000) /* R/W */
+#define RX_CFG_RXDOFF_ (0x00001F00) /* R/W */
+//#define RX_CFG_RXBAD_ (0x00000001) /* R/W */
+
+#define TX_CFG (0x70)
+//#define TX_CFG_TX_DMA_LVL_ (0xE0000000) /* R/W */
+//#define TX_CFG_TX_DMA_CNT_ (0x0FFF0000) /* R/W Self Clearing */
+#define TX_CFG_TXS_DUMP_ (0x00008000) /* Self Clearing */
+#define TX_CFG_TXD_DUMP_ (0x00004000) /* Self Clearing */
+#define TX_CFG_TXSAO_ (0x00000004) /* R/W */
+#define TX_CFG_TX_ON_ (0x00000002) /* R/W */
+#define TX_CFG_STOP_TX_ (0x00000001) /* Self Clearing */
+
+#define HW_CFG (0x74)
+#define HW_CFG_TTM_ (0x00200000) /* R/W */
+#define HW_CFG_SF_ (0x00100000) /* R/W */
+#define HW_CFG_TX_FIF_SZ_ (0x000F0000) /* R/W */
+#define HW_CFG_TR_ (0x00003000) /* R/W */
+#define HW_CFG_PHY_CLK_SEL_ (0x00000060) /* R/W */
+#define HW_CFG_PHY_CLK_SEL_INT_PHY_ (0x00000000) /* R/W */
+#define HW_CFG_PHY_CLK_SEL_EXT_PHY_ (0x00000020) /* R/W */
+#define HW_CFG_PHY_CLK_SEL_CLK_DIS_ (0x00000040) /* R/W */
+#define HW_CFG_SMI_SEL_ (0x00000010) /* R/W */
+#define HW_CFG_EXT_PHY_DET_ (0x00000008) /* RO */
+#define HW_CFG_EXT_PHY_EN_ (0x00000004) /* R/W */
+#define HW_CFG_32_16_BIT_MODE_ (0x00000004) /* RO */
+#define HW_CFG_SRST_TO_ (0x00000002) /* RO */
+#define HW_CFG_SRST_ (0x00000001) /* Self Clearing */
+
+#define RX_DP_CTRL (0x78)
+#define RX_DP_CTRL_RX_FFWD_ (0x80000000) /* R/W */
+#define RX_DP_CTRL_FFWD_BUSY_ (0x80000000) /* RO */
+
+#define RX_FIFO_INF (0x7C)
+#define RX_FIFO_INF_RXSUSED_ (0x00FF0000) /* RO */
+#define RX_FIFO_INF_RXDUSED_ (0x0000FFFF) /* RO */
+
+#define TX_FIFO_INF (0x80)
+#define TX_FIFO_INF_TSUSED_ (0x00FF0000) /* RO */
+#define TX_FIFO_INF_TDFREE_ (0x0000FFFF) /* RO */
+
+#define PMT_CTRL (0x84)
+#define PMT_CTRL_PM_MODE_ (0x00003000) /* Self Clearing */
+#define PMT_CTRL_PHY_RST_ (0x00000400) /* Self Clearing */
+#define PMT_CTRL_WOL_EN_ (0x00000200) /* R/W */
+#define PMT_CTRL_ED_EN_ (0x00000100) /* R/W */
+#define PMT_CTRL_PME_TYPE_ (0x00000040) /* R/W Not Affected by SW Reset */
+#define PMT_CTRL_WUPS_ (0x00000030) /* R/WC */
+#define PMT_CTRL_WUPS_NOWAKE_ (0x00000000) /* R/WC */
+#define PMT_CTRL_WUPS_ED_ (0x00000010) /* R/WC */
+#define PMT_CTRL_WUPS_WOL_ (0x00000020) /* R/WC */
+#define PMT_CTRL_WUPS_MULTI_ (0x00000030) /* R/WC */
+#define PMT_CTRL_PME_IND_ (0x00000008) /* R/W */
+#define PMT_CTRL_PME_POL_ (0x00000004) /* R/W */
+#define PMT_CTRL_PME_EN_ (0x00000002) /* R/W Not Affected by SW Reset */
+#define PMT_CTRL_READY_ (0x00000001) /* RO */
+
+#define GPIO_CFG (0x88)
+#define GPIO_CFG_LED3_EN_ (0x40000000) /* R/W */
+#define GPIO_CFG_LED2_EN_ (0x20000000) /* R/W */
+#define GPIO_CFG_LED1_EN_ (0x10000000) /* R/W */
+#define GPIO_CFG_GPIO2_INT_POL_ (0x04000000) /* R/W */
+#define GPIO_CFG_GPIO1_INT_POL_ (0x02000000) /* R/W */
+#define GPIO_CFG_GPIO0_INT_POL_ (0x01000000) /* R/W */
+#define GPIO_CFG_EEPR_EN_ (0x00700000) /* R/W */
+#define GPIO_CFG_GPIOBUF2_ (0x00040000) /* R/W */
+#define GPIO_CFG_GPIOBUF1_ (0x00020000) /* R/W */
+#define GPIO_CFG_GPIOBUF0_ (0x00010000) /* R/W */
+#define GPIO_CFG_GPIODIR2_ (0x00000400) /* R/W */
+#define GPIO_CFG_GPIODIR1_ (0x00000200) /* R/W */
+#define GPIO_CFG_GPIODIR0_ (0x00000100) /* R/W */
+#define GPIO_CFG_GPIOD4_ (0x00000010) /* R/W */
+#define GPIO_CFG_GPIOD3_ (0x00000008) /* R/W */
+#define GPIO_CFG_GPIOD2_ (0x00000004) /* R/W */
+#define GPIO_CFG_GPIOD1_ (0x00000002) /* R/W */
+#define GPIO_CFG_GPIOD0_ (0x00000001) /* R/W */
+
+#define GPT_CFG (0x8C)
+#define GPT_CFG_TIMER_EN_ (0x20000000) /* R/W */
+#define GPT_CFG_GPT_LOAD_ (0x0000FFFF) /* R/W */
+
+#define GPT_CNT (0x90)
+#define GPT_CNT_GPT_CNT_ (0x0000FFFF) /* RO */
+
+#define ENDIAN (0x98)
+#define FREE_RUN (0x9C)
+#define RX_DROP (0xA0)
+#define MAC_CSR_CMD (0xA4)
+#define MAC_CSR_CMD_CSR_BUSY_ (0x80000000) /* Self Clearing */
+#define MAC_CSR_CMD_R_NOT_W_ (0x40000000) /* R/W */
+#define MAC_CSR_CMD_CSR_ADDR_ (0x000000FF) /* R/W */
+
+#define MAC_CSR_DATA (0xA8)
+#define AFC_CFG (0xAC)
+#define AFC_CFG_AFC_HI_ (0x00FF0000) /* R/W */
+#define AFC_CFG_AFC_LO_ (0x0000FF00) /* R/W */
+#define AFC_CFG_BACK_DUR_ (0x000000F0) /* R/W */
+#define AFC_CFG_FCMULT_ (0x00000008) /* R/W */
+#define AFC_CFG_FCBRD_ (0x00000004) /* R/W */
+#define AFC_CFG_FCADD_ (0x00000002) /* R/W */
+#define AFC_CFG_FCANY_ (0x00000001) /* R/W */
+
+#define E2P_CMD (0xB0)
+#define E2P_CMD_EPC_BUSY_ (0x80000000) /* Self Clearing */
+#define E2P_CMD_EPC_CMD_ (0x70000000) /* R/W */
+#define E2P_CMD_EPC_CMD_READ_ (0x00000000) /* R/W */
+#define E2P_CMD_EPC_CMD_EWDS_ (0x10000000) /* R/W */
+#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000) /* R/W */
+#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000) /* R/W */
+#define E2P_CMD_EPC_CMD_WRAL_ (0x40000000) /* R/W */
+#define E2P_CMD_EPC_CMD_ERASE_ (0x50000000) /* R/W */
+#define E2P_CMD_EPC_CMD_ERAL_ (0x60000000) /* R/W */
+#define E2P_CMD_EPC_CMD_RELOAD_ (0x70000000) /* R/W */
+#define E2P_CMD_EPC_TIMEOUT_ (0x00000200) /* RO */
+#define E2P_CMD_MAC_ADDR_LOADED_ (0x00000100) /* RO */
+#define E2P_CMD_EPC_ADDR_ (0x000000FF) /* R/W */
+
+#define E2P_DATA (0xB4)
+#define E2P_DATA_EEPROM_DATA_ (0x000000FF) /* R/W */
+/* end of LAN register offsets and bit definitions */
+
+/*
+ ****************************************************************************
+ ****************************************************************************
+ * MAC Control and Status Register (Indirect Address)
+ * Offset (through the MAC_CSR CMD and DATA port)
+ ****************************************************************************
+ ****************************************************************************
+ *
+ */
+#define MAC_CR (0x01) /* R/W */
+
+/* MAC_CR - MAC Control Register */
+#define MAC_CR_RXALL_ (0x80000000)
+// TODO: delete this bit? It is not described in the data sheet.
+#define MAC_CR_HBDIS_ (0x10000000)
+#define MAC_CR_RCVOWN_ (0x00800000)
+#define MAC_CR_LOOPBK_ (0x00200000)
+#define MAC_CR_FDPX_ (0x00100000)
+#define MAC_CR_MCPAS_ (0x00080000)
+#define MAC_CR_PRMS_ (0x00040000)
+#define MAC_CR_INVFILT_ (0x00020000)
+#define MAC_CR_PASSBAD_ (0x00010000)
+#define MAC_CR_HFILT_ (0x00008000)
+#define MAC_CR_HPFILT_ (0x00002000)
+#define MAC_CR_LCOLL_ (0x00001000)
+#define MAC_CR_BCAST_ (0x00000800)
+#define MAC_CR_DISRTY_ (0x00000400)
+#define MAC_CR_PADSTR_ (0x00000100)
+#define MAC_CR_BOLMT_MASK_ (0x000000C0)
+#define MAC_CR_DFCHK_ (0x00000020)
+#define MAC_CR_TXEN_ (0x00000008)
+#define MAC_CR_RXEN_ (0x00000004)
+
+#define ADDRH (0x02) /* R/W mask 0x0000FFFFUL */
+#define ADDRL (0x03) /* R/W mask 0xFFFFFFFFUL */
+#define HASHH (0x04) /* R/W */
+#define HASHL (0x05) /* R/W */
+
+#define MII_ACC (0x06) /* R/W */
+#define MII_ACC_PHY_ADDR_ (0x0000F800)
+#define MII_ACC_MIIRINDA_ (0x000007C0)
+#define MII_ACC_MII_WRITE_ (0x00000002)
+#define MII_ACC_MII_BUSY_ (0x00000001)
+
+#define MII_DATA (0x07) /* R/W mask 0x0000FFFFUL */
+
+#define FLOW (0x08) /* R/W */
+#define FLOW_FCPT_ (0xFFFF0000)
+#define FLOW_FCPASS_ (0x00000004)
+#define FLOW_FCEN_ (0x00000002)
+#define FLOW_FCBSY_ (0x00000001)
+
+#define VLAN1 (0x09) /* R/W mask 0x0000FFFFUL */
+#define VLAN1_VTI1_ (0x0000ffff)
+
+#define VLAN2 (0x0A) /* R/W mask 0x0000FFFFUL */
+#define VLAN2_VTI2_ (0x0000ffff)
+
+#define WUFF (0x0B) /* WO */
+
+#define WUCSR (0x0C) /* R/W */
+#define WUCSR_GUE_ (0x00000200)
+#define WUCSR_WUFR_ (0x00000040)
+#define WUCSR_MPR_ (0x00000020)
+#define WUCSR_WAKE_EN_ (0x00000004)
+#define WUCSR_MPEN_ (0x00000002)
+
+/*
+ ****************************************************************************
+ * Chip Specific MII Defines
+ ****************************************************************************
+ *
+ * Phy register offsets and bit definitions
+ *
+ */
+
+#define PHY_MODE_CTRL_STS ((u32)17) /* Mode Control/Status Register */
+//#define MODE_CTRL_STS_FASTRIP_ ((u16)0x4000)
+#define MODE_CTRL_STS_EDPWRDOWN_ ((u16)0x2000)
+//#define MODE_CTRL_STS_LOWSQEN_ ((u16)0x0800)
+//#define MODE_CTRL_STS_MDPREBP_ ((u16)0x0400)
+//#define MODE_CTRL_STS_FARLOOPBACK_ ((u16)0x0200)
+//#define MODE_CTRL_STS_FASTEST_ ((u16)0x0100)
+//#define MODE_CTRL_STS_REFCLKEN_ ((u16)0x0010)
+//#define MODE_CTRL_STS_PHYADBP_ ((u16)0x0008)
+//#define MODE_CTRL_STS_FORCE_G_LINK_ ((u16)0x0004)
+#define MODE_CTRL_STS_ENERGYON_ ((u16)0x0002)
+
+#define PHY_INT_SRC ((u32)29)
+#define PHY_INT_SRC_ENERGY_ON_ ((u16)0x0080)
+#define PHY_INT_SRC_ANEG_COMP_ ((u16)0x0040)
+#define PHY_INT_SRC_REMOTE_FAULT_ ((u16)0x0020)
+#define PHY_INT_SRC_LINK_DOWN_ ((u16)0x0010)
+#define PHY_INT_SRC_ANEG_LP_ACK_ ((u16)0x0008)
+#define PHY_INT_SRC_PAR_DET_FAULT_ ((u16)0x0004)
+#define PHY_INT_SRC_ANEG_PGRX_ ((u16)0x0002)
+
+#define PHY_INT_MASK ((u32)30)
+#define PHY_INT_MASK_ENERGY_ON_ ((u16)0x0080)
+#define PHY_INT_MASK_ANEG_COMP_ ((u16)0x0040)
+#define PHY_INT_MASK_REMOTE_FAULT_ ((u16)0x0020)
+#define PHY_INT_MASK_LINK_DOWN_ ((u16)0x0010)
+#define PHY_INT_MASK_ANEG_LP_ACK_ ((u16)0x0008)
+#define PHY_INT_MASK_PAR_DET_FAULT_ ((u16)0x0004)
+#define PHY_INT_MASK_ANEG_PGRX_ ((u16)0x0002)
+
+#define PHY_SPECIAL ((u32)31)
+#define PHY_SPECIAL_ANEG_DONE_ ((u16)0x1000)
+#define PHY_SPECIAL_RES_ ((u16)0x0040)
+#define PHY_SPECIAL_RES_MASK_ ((u16)0x0FE1)
+#define PHY_SPECIAL_SPD_ ((u16)0x001C)
+#define PHY_SPECIAL_SPD_10HALF_ ((u16)0x0004)
+#define PHY_SPECIAL_SPD_10FULL_ ((u16)0x0014)
+#define PHY_SPECIAL_SPD_100HALF_ ((u16)0x0008)
+#define PHY_SPECIAL_SPD_100FULL_ ((u16)0x0018)
+
+#define LAN911X_INTERNAL_PHY_ID (0x0007C000)
+
+/* Chip ID values */
+#define CHIP_9115 0x0115
+#define CHIP_9116 0x0116
+#define CHIP_9117 0x0117
+#define CHIP_9118 0x0118
+#define CHIP_9211 0x9211
+#define CHIP_9215 0x115A
+#define CHIP_9217 0x117A
+#define CHIP_9218 0x118A
+
+struct chip_id {
+ u16 id;
+ char *name;
+};
+
+static const struct chip_id chip_ids[] = {
+ { CHIP_9115, "LAN9115" },
+ { CHIP_9116, "LAN9116" },
+ { CHIP_9117, "LAN9117" },
+ { CHIP_9118, "LAN9118" },
+ { CHIP_9211, "LAN9211" },
+ { CHIP_9215, "LAN9215" },
+ { CHIP_9217, "LAN9217" },
+ { CHIP_9218, "LAN9218" },
+ { 0, NULL },
+};
+
+#define IS_REV_A(x) ((x & 0xFFFF)==0)
+
+/*
+ * Macros to abstract register access according to the data bus
+ * capabilities. Please use those and not the in/out primitives.
+ */
+/* FIFO read/write macros */
+#define SMC_PUSH_DATA(lp, p, l) SMC_outsl( lp, TX_DATA_FIFO, p, (l) >> 2 )
+#define SMC_PULL_DATA(lp, p, l) SMC_insl ( lp, RX_DATA_FIFO, p, (l) >> 2 )
+#define SMC_SET_TX_FIFO(lp, x) SMC_outl( x, lp, TX_DATA_FIFO )
+#define SMC_GET_RX_FIFO(lp) SMC_inl( lp, RX_DATA_FIFO )
+
+
+/* I/O mapped register read/write macros */
+#define SMC_GET_TX_STS_FIFO(lp) SMC_inl( lp, TX_STATUS_FIFO )
+#define SMC_GET_RX_STS_FIFO(lp) SMC_inl( lp, RX_STATUS_FIFO )
+#define SMC_GET_RX_STS_FIFO_PEEK(lp) SMC_inl( lp, RX_STATUS_FIFO_PEEK )
+#define SMC_GET_PN(lp) (SMC_inl( lp, ID_REV ) >> 16)
+#define SMC_GET_REV(lp) (SMC_inl( lp, ID_REV ) & 0xFFFF)
+#define SMC_GET_IRQ_CFG(lp) SMC_inl( lp, INT_CFG )
+#define SMC_SET_IRQ_CFG(lp, x) SMC_outl( x, lp, INT_CFG )
+#define SMC_GET_INT(lp) SMC_inl( lp, INT_STS )
+#define SMC_ACK_INT(lp, x) SMC_outl( x, lp, INT_STS )
+#define SMC_GET_INT_EN(lp) SMC_inl( lp, INT_EN )
+#define SMC_SET_INT_EN(lp, x) SMC_outl( x, lp, INT_EN )
+#define SMC_GET_BYTE_TEST(lp) SMC_inl( lp, BYTE_TEST )
+#define SMC_SET_BYTE_TEST(lp, x) SMC_outl( x, lp, BYTE_TEST )
+#define SMC_GET_FIFO_INT(lp) SMC_inl( lp, FIFO_INT )
+#define SMC_SET_FIFO_INT(lp, x) SMC_outl( x, lp, FIFO_INT )
+#define SMC_SET_FIFO_TDA(lp, x) \
+ do { \
+ unsigned long __flags; \
+ int __mask; \
+ local_irq_save(__flags); \
+ __mask = SMC_GET_FIFO_INT((lp)) & ~(0xFF<<24); \
+ SMC_SET_FIFO_INT( (lp), __mask | (x)<<24 ); \
+ local_irq_restore(__flags); \
+ } while (0)
+#define SMC_SET_FIFO_TSL(lp, x) \
+ do { \
+ unsigned long __flags; \
+ int __mask; \
+ local_irq_save(__flags); \
+ __mask = SMC_GET_FIFO_INT((lp)) & ~(0xFF<<16); \
+ SMC_SET_FIFO_INT( (lp), __mask | (((x) & 0xFF)<<16)); \
+ local_irq_restore(__flags); \
+ } while (0)
+#define SMC_SET_FIFO_RSA(lp, x) \
+ do { \
+ unsigned long __flags; \
+ int __mask; \
+ local_irq_save(__flags); \
+ __mask = SMC_GET_FIFO_INT((lp)) & ~(0xFF<<8); \
+ SMC_SET_FIFO_INT( (lp), __mask | (((x) & 0xFF)<<8)); \
+ local_irq_restore(__flags); \
+ } while (0)
+#define SMC_SET_FIFO_RSL(lp, x) \
+ do { \
+ unsigned long __flags; \
+ int __mask; \
+ local_irq_save(__flags); \
+ __mask = SMC_GET_FIFO_INT((lp)) & ~0xFF; \
+ SMC_SET_FIFO_INT( (lp),__mask | ((x) & 0xFF)); \
+ local_irq_restore(__flags); \
+ } while (0)
+#define SMC_GET_RX_CFG(lp) SMC_inl( lp, RX_CFG )
+#define SMC_SET_RX_CFG(lp, x) SMC_outl( x, lp, RX_CFG )
+#define SMC_GET_TX_CFG(lp) SMC_inl( lp, TX_CFG )
+#define SMC_SET_TX_CFG(lp, x) SMC_outl( x, lp, TX_CFG )
+#define SMC_GET_HW_CFG(lp) SMC_inl( lp, HW_CFG )
+#define SMC_SET_HW_CFG(lp, x) SMC_outl( x, lp, HW_CFG )
+#define SMC_GET_RX_DP_CTRL(lp) SMC_inl( lp, RX_DP_CTRL )
+#define SMC_SET_RX_DP_CTRL(lp, x) SMC_outl( x, lp, RX_DP_CTRL )
+#define SMC_GET_PMT_CTRL(lp) SMC_inl( lp, PMT_CTRL )
+#define SMC_SET_PMT_CTRL(lp, x) SMC_outl( x, lp, PMT_CTRL )
+#define SMC_GET_GPIO_CFG(lp) SMC_inl( lp, GPIO_CFG )
+#define SMC_SET_GPIO_CFG(lp, x) SMC_outl( x, lp, GPIO_CFG )
+#define SMC_GET_RX_FIFO_INF(lp) SMC_inl( lp, RX_FIFO_INF )
+#define SMC_SET_RX_FIFO_INF(lp, x) SMC_outl( x, lp, RX_FIFO_INF )
+#define SMC_GET_TX_FIFO_INF(lp) SMC_inl( lp, TX_FIFO_INF )
+#define SMC_SET_TX_FIFO_INF(lp, x) SMC_outl( x, lp, TX_FIFO_INF )
+#define SMC_GET_GPT_CFG(lp) SMC_inl( lp, GPT_CFG )
+#define SMC_SET_GPT_CFG(lp, x) SMC_outl( x, lp, GPT_CFG )
+#define SMC_GET_RX_DROP(lp) SMC_inl( lp, RX_DROP )
+#define SMC_SET_RX_DROP(lp, x) SMC_outl( x, lp, RX_DROP )
+#define SMC_GET_MAC_CMD(lp) SMC_inl( lp, MAC_CSR_CMD )
+#define SMC_SET_MAC_CMD(lp, x) SMC_outl( x, lp, MAC_CSR_CMD )
+#define SMC_GET_MAC_DATA(lp) SMC_inl( lp, MAC_CSR_DATA )
+#define SMC_SET_MAC_DATA(lp, x) SMC_outl( x, lp, MAC_CSR_DATA )
+#define SMC_GET_AFC_CFG(lp) SMC_inl( lp, AFC_CFG )
+#define SMC_SET_AFC_CFG(lp, x) SMC_outl( x, lp, AFC_CFG )
+#define SMC_GET_E2P_CMD(lp) SMC_inl( lp, E2P_CMD )
+#define SMC_SET_E2P_CMD(lp, x) SMC_outl( x, lp, E2P_CMD )
+#define SMC_GET_E2P_DATA(lp) SMC_inl( lp, E2P_DATA )
+#define SMC_SET_E2P_DATA(lp, x) SMC_outl( x, lp, E2P_DATA )
+
+/* MAC register read/write macros */
+#define SMC_GET_MAC_CSR(lp,a,v) \
+ do { \
+ while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \
+ SMC_SET_MAC_CMD((lp),MAC_CSR_CMD_CSR_BUSY_ | \
+ MAC_CSR_CMD_R_NOT_W_ | (a) ); \
+ while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \
+ v = SMC_GET_MAC_DATA((lp)); \
+ } while (0)
+#define SMC_SET_MAC_CSR(lp,a,v) \
+ do { \
+ while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \
+ SMC_SET_MAC_DATA((lp), v); \
+ SMC_SET_MAC_CMD((lp), MAC_CSR_CMD_CSR_BUSY_ | (a) ); \
+ while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \
+ } while (0)
+#define SMC_GET_MAC_CR(lp, x) SMC_GET_MAC_CSR( (lp), MAC_CR, x )
+#define SMC_SET_MAC_CR(lp, x) SMC_SET_MAC_CSR( (lp), MAC_CR, x )
+#define SMC_GET_ADDRH(lp, x) SMC_GET_MAC_CSR( (lp), ADDRH, x )
+#define SMC_SET_ADDRH(lp, x) SMC_SET_MAC_CSR( (lp), ADDRH, x )
+#define SMC_GET_ADDRL(lp, x) SMC_GET_MAC_CSR( (lp), ADDRL, x )
+#define SMC_SET_ADDRL(lp, x) SMC_SET_MAC_CSR( (lp), ADDRL, x )
+#define SMC_GET_HASHH(lp, x) SMC_GET_MAC_CSR( (lp), HASHH, x )
+#define SMC_SET_HASHH(lp, x) SMC_SET_MAC_CSR( (lp), HASHH, x )
+#define SMC_GET_HASHL(lp, x) SMC_GET_MAC_CSR( (lp), HASHL, x )
+#define SMC_SET_HASHL(lp, x) SMC_SET_MAC_CSR( (lp), HASHL, x )
+#define SMC_GET_MII_ACC(lp, x) SMC_GET_MAC_CSR( (lp), MII_ACC, x )
+#define SMC_SET_MII_ACC(lp, x) SMC_SET_MAC_CSR( (lp), MII_ACC, x )
+#define SMC_GET_MII_DATA(lp, x) SMC_GET_MAC_CSR( (lp), MII_DATA, x )
+#define SMC_SET_MII_DATA(lp, x) SMC_SET_MAC_CSR( (lp), MII_DATA, x )
+#define SMC_GET_FLOW(lp, x) SMC_GET_MAC_CSR( (lp), FLOW, x )
+#define SMC_SET_FLOW(lp, x) SMC_SET_MAC_CSR( (lp), FLOW, x )
+#define SMC_GET_VLAN1(lp, x) SMC_GET_MAC_CSR( (lp), VLAN1, x )
+#define SMC_SET_VLAN1(lp, x) SMC_SET_MAC_CSR( (lp), VLAN1, x )
+#define SMC_GET_VLAN2(lp, x) SMC_GET_MAC_CSR( (lp), VLAN2, x )
+#define SMC_SET_VLAN2(lp, x) SMC_SET_MAC_CSR( (lp), VLAN2, x )
+#define SMC_SET_WUFF(lp, x) SMC_SET_MAC_CSR( (lp), WUFF, x )
+#define SMC_GET_WUCSR(lp, x) SMC_GET_MAC_CSR( (lp), WUCSR, x )
+#define SMC_SET_WUCSR(lp, x) SMC_SET_MAC_CSR( (lp), WUCSR, x )
+
+/* PHY register read/write macros */
+#define SMC_GET_MII(lp,a,phy,v) \
+ do { \
+ u32 __v; \
+ do { \
+ SMC_GET_MII_ACC((lp), __v); \
+ } while ( __v & MII_ACC_MII_BUSY_ ); \
+ SMC_SET_MII_ACC( (lp), ((phy)<<11) | ((a)<<6) | \
+ MII_ACC_MII_BUSY_); \
+ do { \
+ SMC_GET_MII_ACC( (lp), __v); \
+ } while ( __v & MII_ACC_MII_BUSY_ ); \
+ SMC_GET_MII_DATA((lp), v); \
+ } while (0)
+#define SMC_SET_MII(lp,a,phy,v) \
+ do { \
+ u32 __v; \
+ do { \
+ SMC_GET_MII_ACC((lp), __v); \
+ } while ( __v & MII_ACC_MII_BUSY_ ); \
+ SMC_SET_MII_DATA((lp), v); \
+ SMC_SET_MII_ACC( (lp), ((phy)<<11) | ((a)<<6) | \
+ MII_ACC_MII_BUSY_ | \
+ MII_ACC_MII_WRITE_ ); \
+ do { \
+ SMC_GET_MII_ACC((lp), __v); \
+ } while ( __v & MII_ACC_MII_BUSY_ ); \
+ } while (0)
+#define SMC_GET_PHY_BMCR(lp,phy,x) SMC_GET_MII( (lp), MII_BMCR, phy, x )
+#define SMC_SET_PHY_BMCR(lp,phy,x) SMC_SET_MII( (lp), MII_BMCR, phy, x )
+#define SMC_GET_PHY_BMSR(lp,phy,x) SMC_GET_MII( (lp), MII_BMSR, phy, x )
+#define SMC_GET_PHY_ID1(lp,phy,x) SMC_GET_MII( (lp), MII_PHYSID1, phy, x )
+#define SMC_GET_PHY_ID2(lp,phy,x) SMC_GET_MII( (lp), MII_PHYSID2, phy, x )
+#define SMC_GET_PHY_MII_ADV(lp,phy,x) SMC_GET_MII( (lp), MII_ADVERTISE, phy, x )
+#define SMC_SET_PHY_MII_ADV(lp,phy,x) SMC_SET_MII( (lp), MII_ADVERTISE, phy, x )
+#define SMC_GET_PHY_MII_LPA(lp,phy,x) SMC_GET_MII( (lp), MII_LPA, phy, x )
+#define SMC_SET_PHY_MII_LPA(lp,phy,x) SMC_SET_MII( (lp), MII_LPA, phy, x )
+#define SMC_GET_PHY_CTRL_STS(lp,phy,x) SMC_GET_MII( (lp), PHY_MODE_CTRL_STS, phy, x )
+#define SMC_SET_PHY_CTRL_STS(lp,phy,x) SMC_SET_MII( (lp), PHY_MODE_CTRL_STS, phy, x )
+#define SMC_GET_PHY_INT_SRC(lp,phy,x) SMC_GET_MII( (lp), PHY_INT_SRC, phy, x )
+#define SMC_SET_PHY_INT_SRC(lp,phy,x) SMC_SET_MII( (lp), PHY_INT_SRC, phy, x )
+#define SMC_GET_PHY_INT_MASK(lp,phy,x) SMC_GET_MII( (lp), PHY_INT_MASK, phy, x )
+#define SMC_SET_PHY_INT_MASK(lp,phy,x) SMC_SET_MII( (lp), PHY_INT_MASK, phy, x )
+#define SMC_GET_PHY_SPECIAL(lp,phy,x) SMC_GET_MII( (lp), PHY_SPECIAL, phy, x )
+
+
+
+/* Misc read/write macros */
+
+#ifndef SMC_GET_MAC_ADDR
+#define SMC_GET_MAC_ADDR(lp, addr) \
+ do { \
+ unsigned int __v; \
+ \
+ SMC_GET_MAC_CSR((lp), ADDRL, __v); \
+ addr[0] = __v; addr[1] = __v >> 8; \
+ addr[2] = __v >> 16; addr[3] = __v >> 24; \
+ SMC_GET_MAC_CSR((lp), ADDRH, __v); \
+ addr[4] = __v; addr[5] = __v >> 8; \
+ } while (0)
+#endif
+
+#define SMC_SET_MAC_ADDR(lp, addr) \
+ do { \
+ SMC_SET_MAC_CSR((lp), ADDRL, \
+ addr[0] | \
+ (addr[1] << 8) | \
+ (addr[2] << 16) | \
+ (addr[3] << 24)); \
+ SMC_SET_MAC_CSR((lp), ADDRH, addr[4]|(addr[5] << 8));\
+ } while (0)
+
+
+#define SMC_WRITE_EEPROM_CMD(lp, cmd, addr) \
+ do { \
+ while (SMC_GET_E2P_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \
+ SMC_SET_MAC_CMD((lp), MAC_CSR_CMD_R_NOT_W_ | a ); \
+ while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \
+ } while (0)
+
+#endif /* _SMC911X_H_ */
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
new file mode 100644
index 000000000000..4e45094efb18
--- /dev/null
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -0,0 +1,1589 @@
+/*------------------------------------------------------------------------
+ . smc9194.c
+ . This is a driver for SMC's 9000 series of Ethernet cards.
+ .
+ . Copyright (C) 1996 by Erik Stahlman
+ . This software may be used and distributed according to the terms
+ . of the GNU General Public License, incorporated herein by reference.
+ .
+ . "Features" of the SMC chip:
+ . 4608 byte packet memory. ( for the 91C92. Others have more )
+ . EEPROM for configuration
+ . AUI/TP selection ( mine has 10Base2/10BaseT select )
+ .
+ . Arguments:
+ . io = for the base address
+ . irq = for the IRQ
+ . ifport = 0 for autodetect, 1 for TP, 2 for AUI ( or 10base2 )
+ .
+ . author:
+ . Erik Stahlman ( erik@vt.edu )
+ . contributors:
+ . Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ .
+ . Hardware multicast code from Peter Cammaert ( pc@denkart.be )
+ .
+ . Sources:
+ . o SMC databook
+ . o skeleton.c by Donald Becker ( becker@scyld.com )
+ . o ( a LOT of advice from Becker as well )
+ .
+ . History:
+ . 12/07/95 Erik Stahlman written, got receive/xmit handled
+ . 01/03/96 Erik Stahlman worked out some bugs, actually usable!!! :-)
+ . 01/06/96 Erik Stahlman cleaned up some, better testing, etc
+ . 01/29/96 Erik Stahlman fixed autoirq, added multicast
+ . 02/01/96 Erik Stahlman 1. disabled all interrupts in smc_reset
+ . 2. got rid of post-decrementing bug -- UGH.
+ . 02/13/96 Erik Stahlman Tried to fix autoirq failure. Added more
+ . descriptive error messages.
+ . 02/15/96 Erik Stahlman Fixed typo that caused detection failure
+ . 02/23/96 Erik Stahlman Modified it to fit into kernel tree
+ . Added support to change hardware address
+ . Cleared stats on opens
+ . 02/26/96 Erik Stahlman Trial support for Kernel 1.2.13
+ . Kludge for automatic IRQ detection
+ . 03/04/96 Erik Stahlman Fixed kernel 1.3.70 +
+ . Fixed bug reported by Gardner Buchanan in
+ . smc_enable, with outw instead of outb
+ . 03/06/96 Erik Stahlman Added hardware multicast from Peter Cammaert
+ . 04/14/00 Heiko Pruessing (SMA Regelsysteme) Fixed bug in chip memory
+ . allocation
+ . 08/20/00 Arnaldo Melo fix kfree(skb) in smc_hardware_send_packet
+ . 12/15/00 Christian Jullien fix "Warning: kfree_skb on hard IRQ"
+ . 11/08/01 Matt Domsch Use common crc32 function
+ ----------------------------------------------------------------------------*/
+
+static const char version[] =
+ "smc9194.c:v0.14 12/15/00 by Erik Stahlman (erik@vt.edu)\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+
+#include "smc9194.h"
+
+#define DRV_NAME "smc9194"
+
+/*------------------------------------------------------------------------
+ .
+ . Configuration options, for the experienced user to change.
+ .
+ -------------------------------------------------------------------------*/
+
+/*
+ . Do you want to use 32 bit xfers? This should work on all chips, as
+ . the chipset is designed to accommodate them.
+*/
+#ifdef __sh__
+#undef USE_32_BIT
+#else
+#define USE_32_BIT 1
+#endif
+
+#if defined(__H8300H__) || defined(__H8300S__)
+#define NO_AUTOPROBE
+#undef insl
+#undef outsl
+#define insl(a,b,l) io_insl_noswap(a,b,l)
+#define outsl(a,b,l) io_outsl_noswap(a,b,l)
+#endif
+
+/*
+ .the SMC9194 can be at any of the following port addresses. To change,
+ .for a slightly different card, you can add it to the array. Keep in
+ .mind that the array must end in zero.
+*/
+
+struct devlist {
+ unsigned int port;
+ unsigned int irq;
+};
+
+#if defined(CONFIG_H8S_EDOSK2674)
+static struct devlist smc_devlist[] __initdata = {
+ {.port = 0xf80000, .irq = 16},
+ {.port = 0, .irq = 0 },
+};
+#else
+static struct devlist smc_devlist[] __initdata = {
+ {.port = 0x200, .irq = 0},
+ {.port = 0x220, .irq = 0},
+ {.port = 0x240, .irq = 0},
+ {.port = 0x260, .irq = 0},
+ {.port = 0x280, .irq = 0},
+ {.port = 0x2A0, .irq = 0},
+ {.port = 0x2C0, .irq = 0},
+ {.port = 0x2E0, .irq = 0},
+ {.port = 0x300, .irq = 0},
+ {.port = 0x320, .irq = 0},
+ {.port = 0x340, .irq = 0},
+ {.port = 0x360, .irq = 0},
+ {.port = 0x380, .irq = 0},
+ {.port = 0x3A0, .irq = 0},
+ {.port = 0x3C0, .irq = 0},
+ {.port = 0x3E0, .irq = 0},
+ {.port = 0, .irq = 0},
+};
+#endif
+/*
+ . Wait time for memory to be free. This probably shouldn't be
+ . tuned that much, as waiting for this means nothing else happens
+ . in the system
+*/
+#define MEMORY_WAIT_TIME 16
+
+/*
+ . DEBUGGING LEVELS
+ .
+ . 0 for normal operation
+ . 1 for slightly more details
+ . >2 for various levels of increasingly useless information
+ . 2 for interrupt tracking, status flags
+ . 3 for packet dumps, etc.
+*/
+#define SMC_DEBUG 0
+
+#if (SMC_DEBUG > 2 )
+#define PRINTK3(x) printk x
+#else
+#define PRINTK3(x)
+#endif
+
+#if SMC_DEBUG > 1
+#define PRINTK2(x) printk x
+#else
+#define PRINTK2(x)
+#endif
+
+#ifdef SMC_DEBUG
+#define PRINTK(x) printk x
+#else
+#define PRINTK(x)
+#endif
+
+
+/*------------------------------------------------------------------------
+ .
+ . The internal workings of the driver. If you are changing anything
+ . here with the SMC stuff, you should have the datasheet and known
+ . what you are doing.
+ .
+ -------------------------------------------------------------------------*/
+#define CARDNAME "SMC9194"
+
+
+/* store this information for the driver.. */
+struct smc_local {
+ /*
+ If I have to wait until memory is available to send
+ a packet, I will store the skbuff here, until I get the
+ desired memory. Then, I'll send it out and free it.
+ */
+ struct sk_buff * saved_skb;
+
+ /*
+ . This keeps track of how many packets that I have
+ . sent out. When an TX_EMPTY interrupt comes, I know
+ . that all of these have been sent.
+ */
+ int packets_waiting;
+};
+
+
+/*-----------------------------------------------------------------
+ .
+ . The driver can be entered at any of the following entry points.
+ .
+ .------------------------------------------------------------------ */
+
+/*
+ . This is called by register_netdev(). It is responsible for
+ . checking the portlist for the SMC9000 series chipset. If it finds
+ . one, then it will initialize the device, find the hardware information,
+ . and sets up the appropriate device parameters.
+ . NOTE: Interrupts are *OFF* when this procedure is called.
+ .
+ . NB:This shouldn't be static since it is referred to externally.
+*/
+struct net_device *smc_init(int unit);
+
+/*
+ . The kernel calls this function when someone wants to use the device,
+ . typically 'ifconfig ethX up'.
+*/
+static int smc_open(struct net_device *dev);
+
+/*
+ . Our watchdog timed out. Called by the networking layer
+*/
+static void smc_timeout(struct net_device *dev);
+
+/*
+ . This is called by the kernel in response to 'ifconfig ethX down'. It
+ . is responsible for cleaning up everything that the open routine
+ . does, and maybe putting the card into a powerdown state.
+*/
+static int smc_close(struct net_device *dev);
+
+/*
+ . Finally, a call to set promiscuous mode ( for TCPDUMP and related
+ . programs ) and multicast modes.
+*/
+static void smc_set_multicast_list(struct net_device *dev);
+
+
+/*---------------------------------------------------------------
+ .
+ . Interrupt level calls..
+ .
+ ----------------------------------------------------------------*/
+
+/*
+ . Handles the actual interrupt
+*/
+static irqreturn_t smc_interrupt(int irq, void *);
+/*
+ . This is a separate procedure to handle the receipt of a packet, to
+ . leave the interrupt code looking slightly cleaner
+*/
+static inline void smc_rcv( struct net_device *dev );
+/*
+ . This handles a TX interrupt, which is only called when an error
+ . relating to a packet is sent.
+*/
+static inline void smc_tx( struct net_device * dev );
+
+/*
+ ------------------------------------------------------------
+ .
+ . Internal routines
+ .
+ ------------------------------------------------------------
+*/
+
+/*
+ . Test if a given location contains a chip, trying to cause as
+ . little damage as possible if it's not a SMC chip.
+*/
+static int smc_probe(struct net_device *dev, int ioaddr);
+
+/*
+ . A rather simple routine to print out a packet for debugging purposes.
+*/
+#if SMC_DEBUG > 2
+static void print_packet( byte *, int );
+#endif
+
+#define tx_done(dev) 1
+
+/* this is called to actually send the packet to the chip */
+static void smc_hardware_send_packet( struct net_device * dev );
+
+/* Since I am not sure if I will have enough room in the chip's ram
+ . to store the packet, I call this routine, which either sends it
+ . now, or generates an interrupt when the card is ready for the
+ . packet */
+static netdev_tx_t smc_wait_to_send_packet( struct sk_buff * skb,
+ struct net_device *dev );
+
+/* this does a soft reset on the device */
+static void smc_reset( int ioaddr );
+
+/* Enable Interrupts, Receive, and Transmit */
+static void smc_enable( int ioaddr );
+
+/* this puts the device in an inactive state */
+static void smc_shutdown( int ioaddr );
+
+/* This routine will find the IRQ of the driver if one is not
+ . specified in the input to the device. */
+static int smc_findirq( int ioaddr );
+
+/*
+ . Function: smc_reset( int ioaddr )
+ . Purpose:
+ . This sets the SMC91xx chip to its normal state, hopefully from whatever
+ . mess that any other DOS driver has put it in.
+ .
+ . Maybe I should reset more registers to defaults in here? SOFTRESET should
+ . do that for me.
+ .
+ . Method:
+ . 1. send a SOFT RESET
+ . 2. wait for it to finish
+ . 3. enable autorelease mode
+ . 4. reset the memory management unit
+ . 5. clear all interrupts
+ .
+*/
+static void smc_reset( int ioaddr )
+{
+ /* This resets the registers mostly to defaults, but doesn't
+ affect EEPROM. That seems unnecessary */
+ SMC_SELECT_BANK( 0 );
+ outw( RCR_SOFTRESET, ioaddr + RCR );
+
+ /* this should pause enough for the chip to be happy */
+ SMC_DELAY( );
+
+ /* Set the transmit and receive configuration registers to
+ default values */
+ outw( RCR_CLEAR, ioaddr + RCR );
+ outw( TCR_CLEAR, ioaddr + TCR );
+
+ /* set the control register to automatically
+ release successfully transmitted packets, to make the best
+ use out of our limited memory */
+ SMC_SELECT_BANK( 1 );
+ outw( inw( ioaddr + CONTROL ) | CTL_AUTO_RELEASE , ioaddr + CONTROL );
+
+ /* Reset the MMU */
+ SMC_SELECT_BANK( 2 );
+ outw( MC_RESET, ioaddr + MMU_CMD );
+
+ /* Note: It doesn't seem that waiting for the MMU busy is needed here,
+ but this is a place where future chipsets _COULD_ break. Be wary
+ of issuing another MMU command right after this */
+
+ outb( 0, ioaddr + INT_MASK );
+}
+
+/*
+ . Function: smc_enable
+ . Purpose: let the chip talk to the outside work
+ . Method:
+ . 1. Enable the transmitter
+ . 2. Enable the receiver
+ . 3. Enable interrupts
+*/
+static void smc_enable( int ioaddr )
+{
+ SMC_SELECT_BANK( 0 );
+ /* see the header file for options in TCR/RCR NORMAL*/
+ outw( TCR_NORMAL, ioaddr + TCR );
+ outw( RCR_NORMAL, ioaddr + RCR );
+
+ /* now, enable interrupts */
+ SMC_SELECT_BANK( 2 );
+ outb( SMC_INTERRUPT_MASK, ioaddr + INT_MASK );
+}
+
+/*
+ . Function: smc_shutdown
+ . Purpose: closes down the SMC91xxx chip.
+ . Method:
+ . 1. zero the interrupt mask
+ . 2. clear the enable receive flag
+ . 3. clear the enable xmit flags
+ .
+ . TODO:
+ . (1) maybe utilize power down mode.
+ . Why not yet? Because while the chip will go into power down mode,
+ . the manual says that it will wake up in response to any I/O requests
+ . in the register space. Empirical results do not show this working.
+*/
+static void smc_shutdown( int ioaddr )
+{
+ /* no more interrupts for me */
+ SMC_SELECT_BANK( 2 );
+ outb( 0, ioaddr + INT_MASK );
+
+ /* and tell the card to stay away from that nasty outside world */
+ SMC_SELECT_BANK( 0 );
+ outb( RCR_CLEAR, ioaddr + RCR );
+ outb( TCR_CLEAR, ioaddr + TCR );
+#if 0
+ /* finally, shut the chip down */
+ SMC_SELECT_BANK( 1 );
+ outw( inw( ioaddr + CONTROL ), CTL_POWERDOWN, ioaddr + CONTROL );
+#endif
+}
+
+
+/*
+ . Function: smc_setmulticast( int ioaddr, struct net_device *dev )
+ . Purpose:
+ . This sets the internal hardware table to filter out unwanted multicast
+ . packets before they take up memory.
+ .
+ . The SMC chip uses a hash table where the high 6 bits of the CRC of
+ . address are the offset into the table. If that bit is 1, then the
+ . multicast packet is accepted. Otherwise, it's dropped silently.
+ .
+ . To use the 6 bits as an offset into the table, the high 3 bits are the
+ . number of the 8 bit register, while the low 3 bits are the bit within
+ . that register.
+ .
+ . This routine is based very heavily on the one provided by Peter Cammaert.
+*/
+
+
+static void smc_setmulticast(int ioaddr, struct net_device *dev)
+{
+ int i;
+ unsigned char multicast_table[ 8 ];
+ struct netdev_hw_addr *ha;
+ /* table for flipping the order of 3 bits */
+ unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 };
+
+ /* start with a table of all zeros: reject all */
+ memset( multicast_table, 0, sizeof( multicast_table ) );
+
+ netdev_for_each_mc_addr(ha, dev) {
+ int position;
+
+ /* only use the low order bits */
+ position = ether_crc_le(6, ha->addr) & 0x3f;
+
+ /* do some messy swapping to put the bit in the right spot */
+ multicast_table[invert3[position&7]] |=
+ (1<<invert3[(position>>3)&7]);
+
+ }
+ /* now, the table can be loaded into the chipset */
+ SMC_SELECT_BANK( 3 );
+
+ for ( i = 0; i < 8 ; i++ ) {
+ outb( multicast_table[i], ioaddr + MULTICAST1 + i );
+ }
+}
+
+/*
+ . Function: smc_wait_to_send_packet( struct sk_buff * skb, struct net_device * )
+ . Purpose:
+ . Attempt to allocate memory for a packet, if chip-memory is not
+ . available, then tell the card to generate an interrupt when it
+ . is available.
+ .
+ . Algorithm:
+ .
+ . o if the saved_skb is not currently null, then drop this packet
+ . on the floor. This should never happen, because of TBUSY.
+ . o if the saved_skb is null, then replace it with the current packet,
+ . o See if I can sending it now.
+ . o (NO): Enable interrupts and let the interrupt handler deal with it.
+ . o (YES):Send it now.
+*/
+static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ word length;
+ unsigned short numPages;
+ word time_out;
+
+ netif_stop_queue(dev);
+ /* Well, I want to send the packet.. but I don't know
+ if I can send it right now... */
+
+ if ( lp->saved_skb) {
+ /* THIS SHOULD NEVER HAPPEN. */
+ dev->stats.tx_aborted_errors++;
+ printk(CARDNAME": Bad Craziness - sent packet while busy.\n" );
+ return NETDEV_TX_BUSY;
+ }
+ lp->saved_skb = skb;
+
+ length = skb->len;
+
+ if (length < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN)) {
+ netif_wake_queue(dev);
+ return NETDEV_TX_OK;
+ }
+ length = ETH_ZLEN;
+ }
+
+ /*
+ ** The MMU wants the number of pages to be the number of 256 bytes
+ ** 'pages', minus 1 ( since a packet can't ever have 0 pages :) )
+ **
+ ** Pkt size for allocating is data length +6 (for additional status words,
+ ** length and ctl!) If odd size last byte is included in this header.
+ */
+ numPages = ((length & 0xfffe) + 6) / 256;
+
+ if (numPages > 7 ) {
+ printk(CARDNAME": Far too big packet error.\n");
+ /* freeing the packet is a good thing here... but should
+ . any packets of this size get down here? */
+ dev_kfree_skb (skb);
+ lp->saved_skb = NULL;
+ /* this IS an error, but, i don't want the skb saved */
+ netif_wake_queue(dev);
+ return NETDEV_TX_OK;
+ }
+ /* either way, a packet is waiting now */
+ lp->packets_waiting++;
+
+ /* now, try to allocate the memory */
+ SMC_SELECT_BANK( 2 );
+ outw( MC_ALLOC | numPages, ioaddr + MMU_CMD );
+ /*
+ . Performance Hack
+ .
+ . wait a short amount of time.. if I can send a packet now, I send
+ . it now. Otherwise, I enable an interrupt and wait for one to be
+ . available.
+ .
+ . I could have handled this a slightly different way, by checking to
+ . see if any memory was available in the FREE MEMORY register. However,
+ . either way, I need to generate an allocation, and the allocation works
+ . no matter what, so I saw no point in checking free memory.
+ */
+ time_out = MEMORY_WAIT_TIME;
+ do {
+ word status;
+
+ status = inb( ioaddr + INTERRUPT );
+ if ( status & IM_ALLOC_INT ) {
+ /* acknowledge the interrupt */
+ outb( IM_ALLOC_INT, ioaddr + INTERRUPT );
+ break;
+ }
+ } while ( -- time_out );
+
+ if ( !time_out ) {
+ /* oh well, wait until the chip finds memory later */
+ SMC_ENABLE_INT( IM_ALLOC_INT );
+ PRINTK2((CARDNAME": memory allocation deferred.\n"));
+ /* it's deferred, but I'll handle it later */
+ return NETDEV_TX_OK;
+ }
+ /* or YES! I can send the packet now.. */
+ smc_hardware_send_packet(dev);
+ netif_wake_queue(dev);
+ return NETDEV_TX_OK;
+}
+
+/*
+ . Function: smc_hardware_send_packet(struct net_device * )
+ . Purpose:
+ . This sends the actual packet to the SMC9xxx chip.
+ .
+ . Algorithm:
+ . First, see if a saved_skb is available.
+ . ( this should NOT be called if there is no 'saved_skb'
+ . Now, find the packet number that the chip allocated
+ . Point the data pointers at it in memory
+ . Set the length word in the chip's memory
+ . Dump the packet to chip memory
+ . Check if a last byte is needed ( odd length packet )
+ . if so, set the control flag right
+ . Tell the card to send it
+ . Enable the transmit interrupt, so I know if it failed
+ . Free the kernel data if I actually sent it.
+*/
+static void smc_hardware_send_packet( struct net_device * dev )
+{
+ struct smc_local *lp = netdev_priv(dev);
+ byte packet_no;
+ struct sk_buff * skb = lp->saved_skb;
+ word length;
+ unsigned int ioaddr;
+ byte * buf;
+
+ ioaddr = dev->base_addr;
+
+ if ( !skb ) {
+ PRINTK((CARDNAME": In XMIT with no packet to send\n"));
+ return;
+ }
+ length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ buf = skb->data;
+
+ /* If I get here, I _know_ there is a packet slot waiting for me */
+ packet_no = inb( ioaddr + PNR_ARR + 1 );
+ if ( packet_no & 0x80 ) {
+ /* or isn't there? BAD CHIP! */
+ printk(KERN_DEBUG CARDNAME": Memory allocation failed.\n");
+ dev_kfree_skb_any(skb);
+ lp->saved_skb = NULL;
+ netif_wake_queue(dev);
+ return;
+ }
+
+ /* we have a packet address, so tell the card to use it */
+ outb( packet_no, ioaddr + PNR_ARR );
+
+ /* point to the beginning of the packet */
+ outw( PTR_AUTOINC , ioaddr + POINTER );
+
+ PRINTK3((CARDNAME": Trying to xmit packet of length %x\n", length ));
+#if SMC_DEBUG > 2
+ print_packet( buf, length );
+#endif
+
+ /* send the packet length ( +6 for status, length and ctl byte )
+ and the status word ( set to zeros ) */
+#ifdef USE_32_BIT
+ outl( (length +6 ) << 16 , ioaddr + DATA_1 );
+#else
+ outw( 0, ioaddr + DATA_1 );
+ /* send the packet length ( +6 for status words, length, and ctl*/
+ outb( (length+6) & 0xFF,ioaddr + DATA_1 );
+ outb( (length+6) >> 8 , ioaddr + DATA_1 );
+#endif
+
+ /* send the actual data
+ . I _think_ it's faster to send the longs first, and then
+ . mop up by sending the last word. It depends heavily
+ . on alignment, at least on the 486. Maybe it would be
+ . a good idea to check which is optimal? But that could take
+ . almost as much time as is saved?
+ */
+#ifdef USE_32_BIT
+ if ( length & 0x2 ) {
+ outsl(ioaddr + DATA_1, buf, length >> 2 );
+#if !defined(__H8300H__) && !defined(__H8300S__)
+ outw( *((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_1);
+#else
+ ctrl_outw( *((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_1);
+#endif
+ }
+ else
+ outsl(ioaddr + DATA_1, buf, length >> 2 );
+#else
+ outsw(ioaddr + DATA_1 , buf, (length ) >> 1);
+#endif
+ /* Send the last byte, if there is one. */
+
+ if ( (length & 1) == 0 ) {
+ outw( 0, ioaddr + DATA_1 );
+ } else {
+ outb( buf[length -1 ], ioaddr + DATA_1 );
+ outb( 0x20, ioaddr + DATA_1);
+ }
+
+ /* enable the interrupts */
+ SMC_ENABLE_INT( (IM_TX_INT | IM_TX_EMPTY_INT) );
+
+ /* and let the chipset deal with it */
+ outw( MC_ENQUEUE , ioaddr + MMU_CMD );
+
+ PRINTK2((CARDNAME": Sent packet of length %d\n", length));
+
+ lp->saved_skb = NULL;
+ dev_kfree_skb_any (skb);
+
+ dev->trans_start = jiffies;
+
+ /* we can send another packet */
+ netif_wake_queue(dev);
+}
+
+/*-------------------------------------------------------------------------
+ |
+ | smc_init(int unit)
+ | Input parameters:
+ | dev->base_addr == 0, try to find all possible locations
+ | dev->base_addr == 1, return failure code
+ | dev->base_addr == 2, always allocate space, and return success
+ | dev->base_addr == <anything else> this is the address to check
+ |
+ | Output:
+ | pointer to net_device or ERR_PTR(error)
+ |
+ ---------------------------------------------------------------------------
+*/
+static int io;
+static int irq;
+static int ifport;
+
+struct net_device * __init smc_init(int unit)
+{
+ struct net_device *dev = alloc_etherdev(sizeof(struct smc_local));
+ struct devlist *smcdev = smc_devlist;
+ int err = 0;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ io = dev->base_addr;
+ irq = dev->irq;
+ }
+
+ if (io > 0x1ff) { /* Check a single specified location. */
+ err = smc_probe(dev, io);
+ } else if (io != 0) { /* Don't probe at all. */
+ err = -ENXIO;
+ } else {
+ for (;smcdev->port; smcdev++) {
+ if (smc_probe(dev, smcdev->port) == 0)
+ break;
+ }
+ if (!smcdev->port)
+ err = -ENODEV;
+ }
+ if (err)
+ goto out;
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, SMC_IO_EXTENT);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+/*----------------------------------------------------------------------
+ . smc_findirq
+ .
+ . This routine has a simple purpose -- make the SMC chip generate an
+ . interrupt, so an auto-detect routine can detect it, and find the IRQ,
+ ------------------------------------------------------------------------
+*/
+static int __init smc_findirq(int ioaddr)
+{
+#ifndef NO_AUTOPROBE
+ int timeout = 20;
+ unsigned long cookie;
+
+
+ cookie = probe_irq_on();
+
+ /*
+ * What I try to do here is trigger an ALLOC_INT. This is done
+ * by allocating a small chunk of memory, which will give an interrupt
+ * when done.
+ */
+
+
+ SMC_SELECT_BANK(2);
+ /* enable ALLOCation interrupts ONLY */
+ outb( IM_ALLOC_INT, ioaddr + INT_MASK );
+
+ /*
+ . Allocate 512 bytes of memory. Note that the chip was just
+ . reset so all the memory is available
+ */
+ outw( MC_ALLOC | 1, ioaddr + MMU_CMD );
+
+ /*
+ . Wait until positive that the interrupt has been generated
+ */
+ while ( timeout ) {
+ byte int_status;
+
+ int_status = inb( ioaddr + INTERRUPT );
+
+ if ( int_status & IM_ALLOC_INT )
+ break; /* got the interrupt */
+ timeout--;
+ }
+ /* there is really nothing that I can do here if timeout fails,
+ as probe_irq_off will return a 0 anyway, which is what I
+ want in this case. Plus, the clean up is needed in both
+ cases. */
+
+ /* DELAY HERE!
+ On a fast machine, the status might change before the interrupt
+ is given to the processor. This means that the interrupt was
+ never detected, and probe_irq_off fails to report anything.
+ This should fix probe_irq_* problems.
+ */
+ SMC_DELAY();
+ SMC_DELAY();
+
+ /* and disable all interrupts again */
+ outb( 0, ioaddr + INT_MASK );
+
+ /* and return what I found */
+ return probe_irq_off(cookie);
+#else /* NO_AUTOPROBE */
+ struct devlist *smcdev;
+ for (smcdev = smc_devlist; smcdev->port; smcdev++) {
+ if (smcdev->port == ioaddr)
+ return smcdev->irq;
+ }
+ return 0;
+#endif
+}
+
+static const struct net_device_ops smc_netdev_ops = {
+ .ndo_open = smc_open,
+ .ndo_stop = smc_close,
+ .ndo_start_xmit = smc_wait_to_send_packet,
+ .ndo_tx_timeout = smc_timeout,
+ .ndo_set_rx_mode = smc_set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/*----------------------------------------------------------------------
+ . Function: smc_probe( int ioaddr )
+ .
+ . Purpose:
+ . Tests to see if a given ioaddr points to an SMC9xxx chip.
+ . Returns a 0 on success
+ .
+ . Algorithm:
+ . (1) see if the high byte of BANK_SELECT is 0x33
+ . (2) compare the ioaddr with the base register's address
+ . (3) see if I recognize the chip ID in the appropriate register
+ .
+ .---------------------------------------------------------------------
+ */
+
+/*---------------------------------------------------------------
+ . Here I do typical initialization tasks.
+ .
+ . o Initialize the structure if needed
+ . o print out my vanity message if not done so already
+ . o print out what type of hardware is detected
+ . o print out the ethernet address
+ . o find the IRQ
+ . o set up my private data
+ . o configure the dev structure with my subroutines
+ . o actually GRAB the irq.
+ . o GRAB the region
+ .-----------------------------------------------------------------
+*/
+static int __init smc_probe(struct net_device *dev, int ioaddr)
+{
+ int i, memory, retval;
+ static unsigned version_printed;
+ unsigned int bank;
+
+ const char *version_string;
+ const char *if_string;
+
+ /* registers */
+ word revision_register;
+ word base_address_register;
+ word configuration_register;
+ word memory_info_register;
+ word memory_cfg_register;
+
+ /* Grab the region so that no one else tries to probe our ioports. */
+ if (!request_region(ioaddr, SMC_IO_EXTENT, DRV_NAME))
+ return -EBUSY;
+
+ dev->irq = irq;
+ dev->if_port = ifport;
+
+ /* First, see if the high byte is 0x33 */
+ bank = inw( ioaddr + BANK_SELECT );
+ if ( (bank & 0xFF00) != 0x3300 ) {
+ retval = -ENODEV;
+ goto err_out;
+ }
+ /* The above MIGHT indicate a device, but I need to write to further
+ test this. */
+ outw( 0x0, ioaddr + BANK_SELECT );
+ bank = inw( ioaddr + BANK_SELECT );
+ if ( (bank & 0xFF00 ) != 0x3300 ) {
+ retval = -ENODEV;
+ goto err_out;
+ }
+#if !defined(CONFIG_H8S_EDOSK2674)
+ /* well, we've already written once, so hopefully another time won't
+ hurt. This time, I need to switch the bank register to bank 1,
+ so I can access the base address register */
+ SMC_SELECT_BANK(1);
+ base_address_register = inw( ioaddr + BASE );
+ if ( ioaddr != ( base_address_register >> 3 & 0x3E0 ) ) {
+ printk(CARDNAME ": IOADDR %x doesn't match configuration (%x). "
+ "Probably not a SMC chip\n",
+ ioaddr, base_address_register >> 3 & 0x3E0 );
+ /* well, the base address register didn't match. Must not have
+ been a SMC chip after all. */
+ retval = -ENODEV;
+ goto err_out;
+ }
+#else
+ (void)base_address_register; /* Warning suppression */
+#endif
+
+
+ /* check if the revision register is something that I recognize.
+ These might need to be added to later, as future revisions
+ could be added. */
+ SMC_SELECT_BANK(3);
+ revision_register = inw( ioaddr + REVISION );
+ if ( !chip_ids[ ( revision_register >> 4 ) & 0xF ] ) {
+ /* I don't recognize this chip, so... */
+ printk(CARDNAME ": IO %x: Unrecognized revision register:"
+ " %x, Contact author.\n", ioaddr, revision_register);
+
+ retval = -ENODEV;
+ goto err_out;
+ }
+
+ /* at this point I'll assume that the chip is an SMC9xxx.
+ It might be prudent to check a listing of MAC addresses
+ against the hardware address, or do some other tests. */
+
+ if (version_printed++ == 0)
+ printk("%s", version);
+
+ /* fill in some of the fields */
+ dev->base_addr = ioaddr;
+
+ /*
+ . Get the MAC address ( bank 1, regs 4 - 9 )
+ */
+ SMC_SELECT_BANK( 1 );
+ for ( i = 0; i < 6; i += 2 ) {
+ word address;
+
+ address = inw( ioaddr + ADDR0 + i );
+ dev->dev_addr[ i + 1] = address >> 8;
+ dev->dev_addr[ i ] = address & 0xFF;
+ }
+
+ /* get the memory information */
+
+ SMC_SELECT_BANK( 0 );
+ memory_info_register = inw( ioaddr + MIR );
+ memory_cfg_register = inw( ioaddr + MCR );
+ memory = ( memory_cfg_register >> 9 ) & 0x7; /* multiplier */
+ memory *= 256 * ( memory_info_register & 0xFF );
+
+ /*
+ Now, I want to find out more about the chip. This is sort of
+ redundant, but it's cleaner to have it in both, rather than having
+ one VERY long probe procedure.
+ */
+ SMC_SELECT_BANK(3);
+ revision_register = inw( ioaddr + REVISION );
+ version_string = chip_ids[ ( revision_register >> 4 ) & 0xF ];
+ if ( !version_string ) {
+ /* I shouldn't get here because this call was done before.... */
+ retval = -ENODEV;
+ goto err_out;
+ }
+
+ /* is it using AUI or 10BaseT ? */
+ if ( dev->if_port == 0 ) {
+ SMC_SELECT_BANK(1);
+ configuration_register = inw( ioaddr + CONFIG );
+ if ( configuration_register & CFG_AUI_SELECT )
+ dev->if_port = 2;
+ else
+ dev->if_port = 1;
+ }
+ if_string = interfaces[ dev->if_port - 1 ];
+
+ /* now, reset the chip, and put it into a known state */
+ smc_reset( ioaddr );
+
+ /*
+ . If dev->irq is 0, then the device has to be banged on to see
+ . what the IRQ is.
+ .
+ . This banging doesn't always detect the IRQ, for unknown reasons.
+ . a workaround is to reset the chip and try again.
+ .
+ . Interestingly, the DOS packet driver *SETS* the IRQ on the card to
+ . be what is requested on the command line. I don't do that, mostly
+ . because the card that I have uses a non-standard method of accessing
+ . the IRQs, and because this _should_ work in most configurations.
+ .
+ . Specifying an IRQ is done with the assumption that the user knows
+ . what (s)he is doing. No checking is done!!!!
+ .
+ */
+ if ( dev->irq < 2 ) {
+ int trials;
+
+ trials = 3;
+ while ( trials-- ) {
+ dev->irq = smc_findirq( ioaddr );
+ if ( dev->irq )
+ break;
+ /* kick the card and try again */
+ smc_reset( ioaddr );
+ }
+ }
+ if (dev->irq == 0 ) {
+ printk(CARDNAME": Couldn't autodetect your IRQ. Use irq=xx.\n");
+ retval = -ENODEV;
+ goto err_out;
+ }
+
+ /* now, print out the card info, in a short format.. */
+
+ printk("%s: %s(r:%d) at %#3x IRQ:%d INTF:%s MEM:%db ", dev->name,
+ version_string, revision_register & 0xF, ioaddr, dev->irq,
+ if_string, memory );
+ /*
+ . Print the Ethernet address
+ */
+ printk("ADDR: %pM\n", dev->dev_addr);
+
+ /* Grab the IRQ */
+ retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev);
+ if (retval) {
+ printk("%s: unable to get IRQ %d (irqval=%d).\n", DRV_NAME,
+ dev->irq, retval);
+ goto err_out;
+ }
+
+ dev->netdev_ops = &smc_netdev_ops;
+ dev->watchdog_timeo = HZ/20;
+
+ return 0;
+
+err_out:
+ release_region(ioaddr, SMC_IO_EXTENT);
+ return retval;
+}
+
+#if SMC_DEBUG > 2
+static void print_packet( byte * buf, int length )
+{
+#if 0
+ int i;
+ int remainder;
+ int lines;
+
+ printk("Packet of length %d\n", length);
+ lines = length / 16;
+ remainder = length % 16;
+
+ for ( i = 0; i < lines ; i ++ ) {
+ int cur;
+
+ for ( cur = 0; cur < 8; cur ++ ) {
+ byte a, b;
+
+ a = *(buf ++ );
+ b = *(buf ++ );
+ printk("%02x%02x ", a, b );
+ }
+ printk("\n");
+ }
+ for ( i = 0; i < remainder/2 ; i++ ) {
+ byte a, b;
+
+ a = *(buf ++ );
+ b = *(buf ++ );
+ printk("%02x%02x ", a, b );
+ }
+ printk("\n");
+#endif
+}
+#endif
+
+
+/*
+ * Open and Initialize the board
+ *
+ * Set up everything, reset the card, etc ..
+ *
+ */
+static int smc_open(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ int i; /* used to set hw ethernet address */
+
+ /* clear out all the junk that was put here before... */
+ memset(netdev_priv(dev), 0, sizeof(struct smc_local));
+
+ /* reset the hardware */
+
+ smc_reset( ioaddr );
+ smc_enable( ioaddr );
+
+ /* Select which interface to use */
+
+ SMC_SELECT_BANK( 1 );
+ if ( dev->if_port == 1 ) {
+ outw( inw( ioaddr + CONFIG ) & ~CFG_AUI_SELECT,
+ ioaddr + CONFIG );
+ }
+ else if ( dev->if_port == 2 ) {
+ outw( inw( ioaddr + CONFIG ) | CFG_AUI_SELECT,
+ ioaddr + CONFIG );
+ }
+
+ /*
+ According to Becker, I have to set the hardware address
+ at this point, because the (l)user can set it with an
+ ioctl. Easily done...
+ */
+ SMC_SELECT_BANK( 1 );
+ for ( i = 0; i < 6; i += 2 ) {
+ word address;
+
+ address = dev->dev_addr[ i + 1 ] << 8 ;
+ address |= dev->dev_addr[ i ];
+ outw( address, ioaddr + ADDR0 + i );
+ }
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+/*--------------------------------------------------------
+ . Called by the kernel to send a packet out into the void
+ . of the net. This routine is largely based on
+ . skeleton.c, from Becker.
+ .--------------------------------------------------------
+*/
+
+static void smc_timeout(struct net_device *dev)
+{
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+ printk(KERN_WARNING CARDNAME": transmit timed out, %s?\n",
+ tx_done(dev) ? "IRQ conflict" :
+ "network cable problem");
+ /* "kick" the adaptor */
+ smc_reset( dev->base_addr );
+ smc_enable( dev->base_addr );
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ /* clear anything saved */
+ ((struct smc_local *)netdev_priv(dev))->saved_skb = NULL;
+ netif_wake_queue(dev);
+}
+
+/*-------------------------------------------------------------
+ .
+ . smc_rcv - receive a packet from the card
+ .
+ . There is ( at least ) a packet waiting to be read from
+ . chip-memory.
+ .
+ . o Read the status
+ . o If an error, record it
+ . o otherwise, read in the packet
+ --------------------------------------------------------------
+*/
+static void smc_rcv(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ int packet_number;
+ word status;
+ word packet_length;
+
+ /* assume bank 2 */
+
+ packet_number = inw( ioaddr + FIFO_PORTS );
+
+ if ( packet_number & FP_RXEMPTY ) {
+ /* we got called , but nothing was on the FIFO */
+ PRINTK((CARDNAME ": WARNING: smc_rcv with nothing on FIFO.\n"));
+ /* don't need to restore anything */
+ return;
+ }
+
+ /* start reading from the start of the packet */
+ outw( PTR_READ | PTR_RCV | PTR_AUTOINC, ioaddr + POINTER );
+
+ /* First two words are status and packet_length */
+ status = inw( ioaddr + DATA_1 );
+ packet_length = inw( ioaddr + DATA_1 );
+
+ packet_length &= 0x07ff; /* mask off top bits */
+
+ PRINTK2(("RCV: STATUS %4x LENGTH %4x\n", status, packet_length ));
+ /*
+ . the packet length contains 3 extra words :
+ . status, length, and an extra word with an odd byte .
+ */
+ packet_length -= 6;
+
+ if ( !(status & RS_ERRORS ) ){
+ /* do stuff to make a new packet */
+ struct sk_buff * skb;
+ byte * data;
+
+ /* read one extra byte */
+ if ( status & RS_ODDFRAME )
+ packet_length++;
+
+ /* set multicast stats */
+ if ( status & RS_MULTICAST )
+ dev->stats.multicast++;
+
+ skb = dev_alloc_skb( packet_length + 5);
+
+ if ( skb == NULL ) {
+ printk(KERN_NOTICE CARDNAME ": Low memory, packet dropped.\n");
+ dev->stats.rx_dropped++;
+ goto done;
+ }
+
+ /*
+ ! This should work without alignment, but it could be
+ ! in the worse case
+ */
+
+ skb_reserve( skb, 2 ); /* 16 bit alignment */
+
+ data = skb_put( skb, packet_length);
+
+#ifdef USE_32_BIT
+ /* QUESTION: Like in the TX routine, do I want
+ to send the DWORDs or the bytes first, or some
+ mixture. A mixture might improve already slow PIO
+ performance */
+ PRINTK3((" Reading %d dwords (and %d bytes)\n",
+ packet_length >> 2, packet_length & 3 ));
+ insl(ioaddr + DATA_1 , data, packet_length >> 2 );
+ /* read the left over bytes */
+ insb( ioaddr + DATA_1, data + (packet_length & 0xFFFFFC),
+ packet_length & 0x3 );
+#else
+ PRINTK3((" Reading %d words and %d byte(s)\n",
+ (packet_length >> 1 ), packet_length & 1 ));
+ insw(ioaddr + DATA_1 , data, packet_length >> 1);
+ if ( packet_length & 1 ) {
+ data += packet_length & ~1;
+ *(data++) = inb( ioaddr + DATA_1 );
+ }
+#endif
+#if SMC_DEBUG > 2
+ print_packet( data, packet_length );
+#endif
+
+ skb->protocol = eth_type_trans(skb, dev );
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += packet_length;
+ } else {
+ /* error ... */
+ dev->stats.rx_errors++;
+
+ if ( status & RS_ALGNERR ) dev->stats.rx_frame_errors++;
+ if ( status & (RS_TOOSHORT | RS_TOOLONG ) )
+ dev->stats.rx_length_errors++;
+ if ( status & RS_BADCRC) dev->stats.rx_crc_errors++;
+ }
+
+done:
+ /* error or good, tell the card to get rid of this packet */
+ outw( MC_RELEASE, ioaddr + MMU_CMD );
+}
+
+
+/*************************************************************************
+ . smc_tx
+ .
+ . Purpose: Handle a transmit error message. This will only be called
+ . when an error, because of the AUTO_RELEASE mode.
+ .
+ . Algorithm:
+ . Save pointer and packet no
+ . Get the packet no from the top of the queue
+ . check if it's valid ( if not, is this an error??? )
+ . read the status word
+ . record the error
+ . ( resend? Not really, since we don't want old packets around )
+ . Restore saved values
+ ************************************************************************/
+static void smc_tx( struct net_device * dev )
+{
+ int ioaddr = dev->base_addr;
+ struct smc_local *lp = netdev_priv(dev);
+ byte saved_packet;
+ byte packet_no;
+ word tx_status;
+
+
+ /* assume bank 2 */
+
+ saved_packet = inb( ioaddr + PNR_ARR );
+ packet_no = inw( ioaddr + FIFO_PORTS );
+ packet_no &= 0x7F;
+
+ /* select this as the packet to read from */
+ outb( packet_no, ioaddr + PNR_ARR );
+
+ /* read the first word from this packet */
+ outw( PTR_AUTOINC | PTR_READ, ioaddr + POINTER );
+
+ tx_status = inw( ioaddr + DATA_1 );
+ PRINTK3((CARDNAME": TX DONE STATUS: %4x\n", tx_status));
+
+ dev->stats.tx_errors++;
+ if ( tx_status & TS_LOSTCAR ) dev->stats.tx_carrier_errors++;
+ if ( tx_status & TS_LATCOL ) {
+ printk(KERN_DEBUG CARDNAME
+ ": Late collision occurred on last xmit.\n");
+ dev->stats.tx_window_errors++;
+ }
+#if 0
+ if ( tx_status & TS_16COL ) { ... }
+#endif
+
+ if ( tx_status & TS_SUCCESS ) {
+ printk(CARDNAME": Successful packet caused interrupt\n");
+ }
+ /* re-enable transmit */
+ SMC_SELECT_BANK( 0 );
+ outw( inw( ioaddr + TCR ) | TCR_ENABLE, ioaddr + TCR );
+
+ /* kill the packet */
+ SMC_SELECT_BANK( 2 );
+ outw( MC_FREEPKT, ioaddr + MMU_CMD );
+
+ /* one less packet waiting for me */
+ lp->packets_waiting--;
+
+ outb( saved_packet, ioaddr + PNR_ARR );
+}
+
+/*--------------------------------------------------------------------
+ .
+ . This is the main routine of the driver, to handle the device when
+ . it needs some attention.
+ .
+ . So:
+ . first, save state of the chipset
+ . branch off into routines to handle each case, and acknowledge
+ . each to the interrupt register
+ . and finally restore state.
+ .
+ ---------------------------------------------------------------------*/
+
+static irqreturn_t smc_interrupt(int irq, void * dev_id)
+{
+ struct net_device *dev = dev_id;
+ int ioaddr = dev->base_addr;
+ struct smc_local *lp = netdev_priv(dev);
+
+ byte status;
+ word card_stats;
+ byte mask;
+ int timeout;
+ /* state registers */
+ word saved_bank;
+ word saved_pointer;
+ int handled = 0;
+
+
+ PRINTK3((CARDNAME": SMC interrupt started\n"));
+
+ saved_bank = inw( ioaddr + BANK_SELECT );
+
+ SMC_SELECT_BANK(2);
+ saved_pointer = inw( ioaddr + POINTER );
+
+ mask = inb( ioaddr + INT_MASK );
+ /* clear all interrupts */
+ outb( 0, ioaddr + INT_MASK );
+
+
+ /* set a timeout value, so I don't stay here forever */
+ timeout = 4;
+
+ PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x\n", mask));
+ do {
+ /* read the status flag, and mask it */
+ status = inb( ioaddr + INTERRUPT ) & mask;
+ if (!status )
+ break;
+
+ handled = 1;
+
+ PRINTK3((KERN_WARNING CARDNAME
+ ": Handling interrupt status %x\n", status));
+
+ if (status & IM_RCV_INT) {
+ /* Got a packet(s). */
+ PRINTK2((KERN_WARNING CARDNAME
+ ": Receive Interrupt\n"));
+ smc_rcv(dev);
+ } else if (status & IM_TX_INT ) {
+ PRINTK2((KERN_WARNING CARDNAME
+ ": TX ERROR handled\n"));
+ smc_tx(dev);
+ outb(IM_TX_INT, ioaddr + INTERRUPT );
+ } else if (status & IM_TX_EMPTY_INT ) {
+ /* update stats */
+ SMC_SELECT_BANK( 0 );
+ card_stats = inw( ioaddr + COUNTER );
+ /* single collisions */
+ dev->stats.collisions += card_stats & 0xF;
+ card_stats >>= 4;
+ /* multiple collisions */
+ dev->stats.collisions += card_stats & 0xF;
+
+ /* these are for when linux supports these statistics */
+
+ SMC_SELECT_BANK( 2 );
+ PRINTK2((KERN_WARNING CARDNAME
+ ": TX_BUFFER_EMPTY handled\n"));
+ outb( IM_TX_EMPTY_INT, ioaddr + INTERRUPT );
+ mask &= ~IM_TX_EMPTY_INT;
+ dev->stats.tx_packets += lp->packets_waiting;
+ lp->packets_waiting = 0;
+
+ } else if (status & IM_ALLOC_INT ) {
+ PRINTK2((KERN_DEBUG CARDNAME
+ ": Allocation interrupt\n"));
+ /* clear this interrupt so it doesn't happen again */
+ mask &= ~IM_ALLOC_INT;
+
+ smc_hardware_send_packet( dev );
+
+ /* enable xmit interrupts based on this */
+ mask |= ( IM_TX_EMPTY_INT | IM_TX_INT );
+
+ /* and let the card send more packets to me */
+ netif_wake_queue(dev);
+
+ PRINTK2((CARDNAME": Handoff done successfully.\n"));
+ } else if (status & IM_RX_OVRN_INT ) {
+ dev->stats.rx_errors++;
+ dev->stats.rx_fifo_errors++;
+ outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT );
+ } else if (status & IM_EPH_INT ) {
+ PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT\n"));
+ } else if (status & IM_ERCV_INT ) {
+ PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT\n"));
+ outb( IM_ERCV_INT, ioaddr + INTERRUPT );
+ }
+ } while ( timeout -- );
+
+
+ /* restore state register */
+ SMC_SELECT_BANK( 2 );
+ outb( mask, ioaddr + INT_MASK );
+
+ PRINTK3((KERN_WARNING CARDNAME ": MASK is now %x\n", mask));
+ outw( saved_pointer, ioaddr + POINTER );
+
+ SMC_SELECT_BANK( saved_bank );
+
+ PRINTK3((CARDNAME ": Interrupt done\n"));
+ return IRQ_RETVAL(handled);
+}
+
+
+/*----------------------------------------------------
+ . smc_close
+ .
+ . this makes the board clean up everything that it can
+ . and not talk to the outside world. Caused by
+ . an 'ifconfig ethX down'
+ .
+ -----------------------------------------------------*/
+static int smc_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ /* clear everything */
+ smc_shutdown( dev->base_addr );
+
+ /* Update the statistics here. */
+ return 0;
+}
+
+/*-----------------------------------------------------------
+ . smc_set_multicast_list
+ .
+ . This routine will, depending on the values passed to it,
+ . either make it accept multicast packets, go into
+ . promiscuous mode ( for TCPDUMP and cousins ) or accept
+ . a select set of multicast packets
+*/
+static void smc_set_multicast_list(struct net_device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ SMC_SELECT_BANK(0);
+ if ( dev->flags & IFF_PROMISC )
+ outw( inw(ioaddr + RCR ) | RCR_PROMISC, ioaddr + RCR );
+
+/* BUG? I never disable promiscuous mode if multicasting was turned on.
+ Now, I turn off promiscuous mode, but I don't do anything to multicasting
+ when promiscuous mode is turned on.
+*/
+
+ /* Here, I am setting this to accept all multicast packets.
+ I don't need to zero the multicast table, because the flag is
+ checked before the table is
+ */
+ else if (dev->flags & IFF_ALLMULTI)
+ outw( inw(ioaddr + RCR ) | RCR_ALMUL, ioaddr + RCR );
+
+ /* We just get all multicast packets even if we only want them
+ . from one source. This will be changed at some future
+ . point. */
+ else if (!netdev_mc_empty(dev)) {
+ /* support hardware multicasting */
+
+ /* be sure I get rid of flags I might have set */
+ outw( inw( ioaddr + RCR ) & ~(RCR_PROMISC | RCR_ALMUL),
+ ioaddr + RCR );
+ /* NOTE: this has to set the bank, so make sure it is the
+ last thing called. The bank is set to zero at the top */
+ smc_setmulticast(ioaddr, dev);
+ }
+ else {
+ outw( inw( ioaddr + RCR ) & ~(RCR_PROMISC | RCR_ALMUL),
+ ioaddr + RCR );
+
+ /*
+ since I'm disabling all multicast entirely, I need to
+ clear the multicast list
+ */
+ SMC_SELECT_BANK( 3 );
+ outw( 0, ioaddr + MULTICAST1 );
+ outw( 0, ioaddr + MULTICAST2 );
+ outw( 0, ioaddr + MULTICAST3 );
+ outw( 0, ioaddr + MULTICAST4 );
+ }
+}
+
+#ifdef MODULE
+
+static struct net_device *devSMC9194;
+MODULE_LICENSE("GPL");
+
+module_param(io, int, 0);
+module_param(irq, int, 0);
+module_param(ifport, int, 0);
+MODULE_PARM_DESC(io, "SMC 99194 I/O base address");
+MODULE_PARM_DESC(irq, "SMC 99194 IRQ number");
+MODULE_PARM_DESC(ifport, "SMC 99194 interface port (0-default, 1-TP, 2-AUI)");
+
+int __init init_module(void)
+{
+ if (io == 0)
+ printk(KERN_WARNING
+ CARDNAME": You shouldn't use auto-probing with insmod!\n" );
+
+ /* copy the parameters from insmod into the device structure */
+ devSMC9194 = smc_init(-1);
+ if (IS_ERR(devSMC9194))
+ return PTR_ERR(devSMC9194);
+ return 0;
+}
+
+void __exit cleanup_module(void)
+{
+ unregister_netdev(devSMC9194);
+ free_irq(devSMC9194->irq, devSMC9194);
+ release_region(devSMC9194->base_addr, SMC_IO_EXTENT);
+ free_netdev(devSMC9194);
+}
+
+#endif /* MODULE */
diff --git a/drivers/net/ethernet/smsc/smc9194.h b/drivers/net/ethernet/smsc/smc9194.h
new file mode 100644
index 000000000000..cf69d0a5a1cb
--- /dev/null
+++ b/drivers/net/ethernet/smsc/smc9194.h
@@ -0,0 +1,241 @@
+/*------------------------------------------------------------------------
+ . smc9194.h
+ . Copyright (C) 1996 by Erik Stahlman
+ .
+ . This software may be used and distributed according to the terms
+ . of the GNU General Public License, incorporated herein by reference.
+ .
+ . This file contains register information and access macros for
+ . the SMC91xxx chipset.
+ .
+ . Information contained in this file was obtained from the SMC91C94
+ . manual from SMC. To get a copy, if you really want one, you can find
+ . information under www.smc.com in the components division.
+ . ( this thanks to advice from Donald Becker ).
+ .
+ . Authors
+ . Erik Stahlman ( erik@vt.edu )
+ .
+ . History
+ . 01/06/96 Erik Stahlman moved definitions here from main .c file
+ . 01/19/96 Erik Stahlman polished this up some, and added better
+ . error handling
+ .
+ ---------------------------------------------------------------------------*/
+#ifndef _SMC9194_H_
+#define _SMC9194_H_
+
+/* I want some simple types */
+
+typedef unsigned char byte;
+typedef unsigned short word;
+typedef unsigned long int dword;
+
+
+/* Because of bank switching, the SMC91xxx uses only 16 I/O ports */
+
+#define SMC_IO_EXTENT 16
+
+
+/*---------------------------------------------------------------
+ .
+ . A description of the SMC registers is probably in order here,
+ . although for details, the SMC datasheet is invaluable.
+ .
+ . Basically, the chip has 4 banks of registers ( 0 to 3 ), which
+ . are accessed by writing a number into the BANK_SELECT register
+ . ( I also use a SMC_SELECT_BANK macro for this ).
+ .
+ . The banks are configured so that for most purposes, bank 2 is all
+ . that is needed for simple run time tasks.
+ -----------------------------------------------------------------------*/
+
+/*
+ . Bank Select Register:
+ .
+ . yyyy yyyy 0000 00xx
+ . xx = bank number
+ . yyyy yyyy = 0x33, for identification purposes.
+*/
+#define BANK_SELECT 14
+
+/* BANK 0 */
+
+#define TCR 0 /* transmit control register */
+#define TCR_ENABLE 0x0001 /* if this is 1, we can transmit */
+#define TCR_FDUPLX 0x0800 /* receive packets sent out */
+#define TCR_STP_SQET 0x1000 /* stop transmitting if Signal quality error */
+#define TCR_MON_CNS 0x0400 /* monitors the carrier status */
+#define TCR_PAD_ENABLE 0x0080 /* pads short packets to 64 bytes */
+
+#define TCR_CLEAR 0 /* do NOTHING */
+/* the normal settings for the TCR register : */
+/* QUESTION: do I want to enable padding of short packets ? */
+#define TCR_NORMAL TCR_ENABLE
+
+
+#define EPH_STATUS 2
+#define ES_LINK_OK 0x4000 /* is the link integrity ok ? */
+
+#define RCR 4
+#define RCR_SOFTRESET 0x8000 /* resets the chip */
+#define RCR_STRIP_CRC 0x200 /* strips CRC */
+#define RCR_ENABLE 0x100 /* IFF this is set, we can receive packets */
+#define RCR_ALMUL 0x4 /* receive all multicast packets */
+#define RCR_PROMISC 0x2 /* enable promiscuous mode */
+
+/* the normal settings for the RCR register : */
+#define RCR_NORMAL (RCR_STRIP_CRC | RCR_ENABLE)
+#define RCR_CLEAR 0x0 /* set it to a base state */
+
+#define COUNTER 6
+#define MIR 8
+#define MCR 10
+/* 12 is reserved */
+
+/* BANK 1 */
+#define CONFIG 0
+#define CFG_AUI_SELECT 0x100
+#define BASE 2
+#define ADDR0 4
+#define ADDR1 6
+#define ADDR2 8
+#define GENERAL 10
+#define CONTROL 12
+#define CTL_POWERDOWN 0x2000
+#define CTL_LE_ENABLE 0x80
+#define CTL_CR_ENABLE 0x40
+#define CTL_TE_ENABLE 0x0020
+#define CTL_AUTO_RELEASE 0x0800
+#define CTL_EPROM_ACCESS 0x0003 /* high if Eprom is being read */
+
+/* BANK 2 */
+#define MMU_CMD 0
+#define MC_BUSY 1 /* only readable bit in the register */
+#define MC_NOP 0
+#define MC_ALLOC 0x20 /* or with number of 256 byte packets */
+#define MC_RESET 0x40
+#define MC_REMOVE 0x60 /* remove the current rx packet */
+#define MC_RELEASE 0x80 /* remove and release the current rx packet */
+#define MC_FREEPKT 0xA0 /* Release packet in PNR register */
+#define MC_ENQUEUE 0xC0 /* Enqueue the packet for transmit */
+
+#define PNR_ARR 2
+#define FIFO_PORTS 4
+
+#define FP_RXEMPTY 0x8000
+#define FP_TXEMPTY 0x80
+
+#define POINTER 6
+#define PTR_READ 0x2000
+#define PTR_RCV 0x8000
+#define PTR_AUTOINC 0x4000
+#define PTR_AUTO_INC 0x0040
+
+#define DATA_1 8
+#define DATA_2 10
+#define INTERRUPT 12
+
+#define INT_MASK 13
+#define IM_RCV_INT 0x1
+#define IM_TX_INT 0x2
+#define IM_TX_EMPTY_INT 0x4
+#define IM_ALLOC_INT 0x8
+#define IM_RX_OVRN_INT 0x10
+#define IM_EPH_INT 0x20
+#define IM_ERCV_INT 0x40 /* not on SMC9192 */
+
+/* BANK 3 */
+#define MULTICAST1 0
+#define MULTICAST2 2
+#define MULTICAST3 4
+#define MULTICAST4 6
+#define MGMT 8
+#define REVISION 10 /* ( hi: chip id low: rev # ) */
+
+
+/* this is NOT on SMC9192 */
+#define ERCV 12
+
+#define CHIP_9190 3
+#define CHIP_9194 4
+#define CHIP_9195 5
+#define CHIP_91100 7
+
+static const char * chip_ids[ 15 ] = {
+ NULL, NULL, NULL,
+ /* 3 */ "SMC91C90/91C92",
+ /* 4 */ "SMC91C94",
+ /* 5 */ "SMC91C95",
+ NULL,
+ /* 7 */ "SMC91C100",
+ /* 8 */ "SMC91C100FD",
+ NULL, NULL, NULL,
+ NULL, NULL, NULL};
+
+/*
+ . Transmit status bits
+*/
+#define TS_SUCCESS 0x0001
+#define TS_LOSTCAR 0x0400
+#define TS_LATCOL 0x0200
+#define TS_16COL 0x0010
+
+/*
+ . Receive status bits
+*/
+#define RS_ALGNERR 0x8000
+#define RS_BADCRC 0x2000
+#define RS_ODDFRAME 0x1000
+#define RS_TOOLONG 0x0800
+#define RS_TOOSHORT 0x0400
+#define RS_MULTICAST 0x0001
+#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT)
+
+static const char * interfaces[ 2 ] = { "TP", "AUI" };
+
+/*-------------------------------------------------------------------------
+ . I define some macros to make it easier to do somewhat common
+ . or slightly complicated, repeated tasks.
+ --------------------------------------------------------------------------*/
+
+/* select a register bank, 0 to 3 */
+
+#define SMC_SELECT_BANK(x) { outw( x, ioaddr + BANK_SELECT ); }
+
+/* define a small delay for the reset */
+#define SMC_DELAY() { inw( ioaddr + RCR );\
+ inw( ioaddr + RCR );\
+ inw( ioaddr + RCR ); }
+
+/* this enables an interrupt in the interrupt mask register */
+#define SMC_ENABLE_INT(x) {\
+ unsigned char mask;\
+ SMC_SELECT_BANK(2);\
+ mask = inb( ioaddr + INT_MASK );\
+ mask |= (x);\
+ outb( mask, ioaddr + INT_MASK ); \
+}
+
+/* this disables an interrupt from the interrupt mask register */
+
+#define SMC_DISABLE_INT(x) {\
+ unsigned char mask;\
+ SMC_SELECT_BANK(2);\
+ mask = inb( ioaddr + INT_MASK );\
+ mask &= ~(x);\
+ outb( mask, ioaddr + INT_MASK ); \
+}
+
+/*----------------------------------------------------------------------
+ . Define the interrupts that I want to receive from the card
+ .
+ . I want:
+ . IM_EPH_INT, for nasty errors
+ . IM_RCV_INT, for happy received packets
+ . IM_RX_OVRN_INT, because I have to kick the receiver
+ --------------------------------------------------------------------------*/
+#define SMC_INTERRUPT_MASK (IM_EPH_INT | IM_RX_OVRN_INT | IM_RCV_INT)
+
+#endif /* _SMC_9194_H_ */
+
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
new file mode 100644
index 000000000000..cbfa98187131
--- /dev/null
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -0,0 +1,2070 @@
+/*======================================================================
+
+ A PCMCIA ethernet driver for SMC91c92-based cards.
+
+ This driver supports Megahertz PCMCIA ethernet cards; and
+ Megahertz, Motorola, Ositech, and Psion Dacom ethernet/modem
+ multifunction cards.
+
+ Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
+
+ smc91c92_cs.c 1.122 2002/10/25 06:26:39
+
+ This driver contains code written by Donald Becker
+ (becker@scyld.com), Rowan Hughes (x-csrdh@jcu.edu.au),
+ David Hinds (dahinds@users.sourceforge.net), and Erik Stahlman
+ (erik@vt.edu). Donald wrote the SMC 91c92 code using parts of
+ Erik's SMC 91c94 driver. Rowan wrote a similar driver, and I've
+ incorporated some parts of his driver here. I (Dave) wrote most
+ of the PCMCIA glue code, and the Ositech support code. Kelly
+ Stephens (kstephen@holli.com) added support for the Motorola
+ Mariner, with help from Allen Brost.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License, incorporated herein by reference.
+
+======================================================================*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/jiffies.h>
+#include <linux/firmware.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/ss.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+/*====================================================================*/
+
+static const char *if_names[] = { "auto", "10baseT", "10base2"};
+
+/* Firmware name */
+#define FIRMWARE_NAME "ositech/Xilinx7OD.bin"
+
+/* Module parameters */
+
+MODULE_DESCRIPTION("SMC 91c92 series PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FIRMWARE_NAME);
+
+#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
+
+/*
+ Transceiver/media type.
+ 0 = auto
+ 1 = 10baseT (and autoselect if #define AUTOSELECT),
+ 2 = AUI/10base2,
+*/
+INT_MODULE_PARM(if_port, 0);
+
+
+#define DRV_NAME "smc91c92_cs"
+#define DRV_VERSION "1.123"
+
+/*====================================================================*/
+
+/* Operational parameter that usually are not changed. */
+
+/* Time in jiffies before concluding Tx hung */
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+#define INTR_WORK 4
+
+/* Times to check the check the chip before concluding that it doesn't
+ currently have room for another Tx packet. */
+#define MEMORY_WAIT_TIME 8
+
+struct smc_private {
+ struct pcmcia_device *p_dev;
+ spinlock_t lock;
+ u_short manfid;
+ u_short cardid;
+
+ struct sk_buff *saved_skb;
+ int packets_waiting;
+ void __iomem *base;
+ u_short cfg;
+ struct timer_list media;
+ int watchdog, tx_err;
+ u_short media_status;
+ u_short fast_poll;
+ u_short link_status;
+ struct mii_if_info mii_if;
+ int duplex;
+ int rx_ovrn;
+};
+
+/* Special definitions for Megahertz multifunction cards */
+#define MEGAHERTZ_ISR 0x0380
+
+/* Special function registers for Motorola Mariner */
+#define MOT_LAN 0x0000
+#define MOT_UART 0x0020
+#define MOT_EEPROM 0x20
+
+#define MOT_NORMAL \
+(COR_LEVEL_REQ | COR_FUNC_ENA | COR_ADDR_DECODE | COR_IREQ_ENA)
+
+/* Special function registers for Ositech cards */
+#define OSITECH_AUI_CTL 0x0c
+#define OSITECH_PWRDOWN 0x0d
+#define OSITECH_RESET 0x0e
+#define OSITECH_ISR 0x0f
+#define OSITECH_AUI_PWR 0x0c
+#define OSITECH_RESET_ISR 0x0e
+
+#define OSI_AUI_PWR 0x40
+#define OSI_LAN_PWRDOWN 0x02
+#define OSI_MODEM_PWRDOWN 0x01
+#define OSI_LAN_RESET 0x02
+#define OSI_MODEM_RESET 0x01
+
+/* Symbolic constants for the SMC91c9* series chips, from Erik Stahlman. */
+#define BANK_SELECT 14 /* Window select register. */
+#define SMC_SELECT_BANK(x) { outw(x, ioaddr + BANK_SELECT); }
+
+/* Bank 0 registers. */
+#define TCR 0 /* transmit control register */
+#define TCR_CLEAR 0 /* do NOTHING */
+#define TCR_ENABLE 0x0001 /* if this is 1, we can transmit */
+#define TCR_PAD_EN 0x0080 /* pads short packets to 64 bytes */
+#define TCR_MONCSN 0x0400 /* Monitor Carrier. */
+#define TCR_FDUPLX 0x0800 /* Full duplex mode. */
+#define TCR_NORMAL TCR_ENABLE | TCR_PAD_EN
+
+#define EPH 2 /* Ethernet Protocol Handler report. */
+#define EPH_TX_SUC 0x0001
+#define EPH_SNGLCOL 0x0002
+#define EPH_MULCOL 0x0004
+#define EPH_LTX_MULT 0x0008
+#define EPH_16COL 0x0010
+#define EPH_SQET 0x0020
+#define EPH_LTX_BRD 0x0040
+#define EPH_TX_DEFR 0x0080
+#define EPH_LAT_COL 0x0200
+#define EPH_LOST_CAR 0x0400
+#define EPH_EXC_DEF 0x0800
+#define EPH_CTR_ROL 0x1000
+#define EPH_RX_OVRN 0x2000
+#define EPH_LINK_OK 0x4000
+#define EPH_TX_UNRN 0x8000
+#define MEMINFO 8 /* Memory Information Register */
+#define MEMCFG 10 /* Memory Configuration Register */
+
+/* Bank 1 registers. */
+#define CONFIG 0
+#define CFG_MII_SELECT 0x8000 /* 91C100 only */
+#define CFG_NO_WAIT 0x1000
+#define CFG_FULL_STEP 0x0400
+#define CFG_SET_SQLCH 0x0200
+#define CFG_AUI_SELECT 0x0100
+#define CFG_16BIT 0x0080
+#define CFG_DIS_LINK 0x0040
+#define CFG_STATIC 0x0030
+#define CFG_IRQ_SEL_1 0x0004
+#define CFG_IRQ_SEL_0 0x0002
+#define BASE_ADDR 2
+#define ADDR0 4
+#define GENERAL 10
+#define CONTROL 12
+#define CTL_STORE 0x0001
+#define CTL_RELOAD 0x0002
+#define CTL_EE_SELECT 0x0004
+#define CTL_TE_ENABLE 0x0020
+#define CTL_CR_ENABLE 0x0040
+#define CTL_LE_ENABLE 0x0080
+#define CTL_AUTO_RELEASE 0x0800
+#define CTL_POWERDOWN 0x2000
+
+/* Bank 2 registers. */
+#define MMU_CMD 0
+#define MC_ALLOC 0x20 /* or with number of 256 byte packets */
+#define MC_RESET 0x40
+#define MC_RELEASE 0x80 /* remove and release the current rx packet */
+#define MC_FREEPKT 0xA0 /* Release packet in PNR register */
+#define MC_ENQUEUE 0xC0 /* Enqueue the packet for transmit */
+#define PNR_ARR 2
+#define FIFO_PORTS 4
+#define FP_RXEMPTY 0x8000
+#define POINTER 6
+#define PTR_AUTO_INC 0x0040
+#define PTR_READ 0x2000
+#define PTR_AUTOINC 0x4000
+#define PTR_RCV 0x8000
+#define DATA_1 8
+#define INTERRUPT 12
+#define IM_RCV_INT 0x1
+#define IM_TX_INT 0x2
+#define IM_TX_EMPTY_INT 0x4
+#define IM_ALLOC_INT 0x8
+#define IM_RX_OVRN_INT 0x10
+#define IM_EPH_INT 0x20
+
+#define RCR 4
+enum RxCfg { RxAllMulti = 0x0004, RxPromisc = 0x0002,
+ RxEnable = 0x0100, RxStripCRC = 0x0200};
+#define RCR_SOFTRESET 0x8000 /* resets the chip */
+#define RCR_STRIP_CRC 0x200 /* strips CRC */
+#define RCR_ENABLE 0x100 /* IFF this is set, we can receive packets */
+#define RCR_ALMUL 0x4 /* receive all multicast packets */
+#define RCR_PROMISC 0x2 /* enable promiscuous mode */
+
+/* the normal settings for the RCR register : */
+#define RCR_NORMAL (RCR_STRIP_CRC | RCR_ENABLE)
+#define RCR_CLEAR 0x0 /* set it to a base state */
+#define COUNTER 6
+
+/* BANK 3 -- not the same values as in smc9194! */
+#define MULTICAST0 0
+#define MULTICAST2 2
+#define MULTICAST4 4
+#define MULTICAST6 6
+#define MGMT 8
+#define REVISION 0x0a
+
+/* Transmit status bits. */
+#define TS_SUCCESS 0x0001
+#define TS_16COL 0x0010
+#define TS_LATCOL 0x0200
+#define TS_LOSTCAR 0x0400
+
+/* Receive status bits. */
+#define RS_ALGNERR 0x8000
+#define RS_BADCRC 0x2000
+#define RS_ODDFRAME 0x1000
+#define RS_TOOLONG 0x0800
+#define RS_TOOSHORT 0x0400
+#define RS_MULTICAST 0x0001
+#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT)
+
+#define set_bits(v, p) outw(inw(p)|(v), (p))
+#define mask_bits(v, p) outw(inw(p)&(v), (p))
+
+/*====================================================================*/
+
+static void smc91c92_detach(struct pcmcia_device *p_dev);
+static int smc91c92_config(struct pcmcia_device *link);
+static void smc91c92_release(struct pcmcia_device *link);
+
+static int smc_open(struct net_device *dev);
+static int smc_close(struct net_device *dev);
+static int smc_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void smc_tx_timeout(struct net_device *dev);
+static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t smc_interrupt(int irq, void *dev_id);
+static void smc_rx(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static int s9k_config(struct net_device *dev, struct ifmap *map);
+static void smc_set_xcvr(struct net_device *dev, int if_port);
+static void smc_reset(struct net_device *dev);
+static void media_check(u_long arg);
+static void mdio_sync(unsigned int addr);
+static int mdio_read(struct net_device *dev, int phy_id, int loc);
+static void mdio_write(struct net_device *dev, int phy_id, int loc, int value);
+static int smc_link_ok(struct net_device *dev);
+static const struct ethtool_ops ethtool_ops;
+
+static const struct net_device_ops smc_netdev_ops = {
+ .ndo_open = smc_open,
+ .ndo_stop = smc_close,
+ .ndo_start_xmit = smc_start_xmit,
+ .ndo_tx_timeout = smc_tx_timeout,
+ .ndo_set_config = s9k_config,
+ .ndo_set_rx_mode = set_rx_mode,
+ .ndo_do_ioctl = smc_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int smc91c92_probe(struct pcmcia_device *link)
+{
+ struct smc_private *smc;
+ struct net_device *dev;
+
+ dev_dbg(&link->dev, "smc91c92_attach()\n");
+
+ /* Create new ethernet device */
+ dev = alloc_etherdev(sizeof(struct smc_private));
+ if (!dev)
+ return -ENOMEM;
+ smc = netdev_priv(dev);
+ smc->p_dev = link;
+ link->priv = dev;
+
+ spin_lock_init(&smc->lock);
+
+ /* The SMC91c92-specific entries in the device structure. */
+ dev->netdev_ops = &smc_netdev_ops;
+ SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ smc->mii_if.dev = dev;
+ smc->mii_if.mdio_read = mdio_read;
+ smc->mii_if.mdio_write = mdio_write;
+ smc->mii_if.phy_id_mask = 0x1f;
+ smc->mii_if.reg_num_mask = 0x1f;
+
+ return smc91c92_config(link);
+} /* smc91c92_attach */
+
+static void smc91c92_detach(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ dev_dbg(&link->dev, "smc91c92_detach\n");
+
+ unregister_netdev(dev);
+
+ smc91c92_release(link);
+
+ free_netdev(dev);
+} /* smc91c92_detach */
+
+/*====================================================================*/
+
+static int cvt_ascii_address(struct net_device *dev, char *s)
+{
+ int i, j, da, c;
+
+ if (strlen(s) != 12)
+ return -1;
+ for (i = 0; i < 6; i++) {
+ da = 0;
+ for (j = 0; j < 2; j++) {
+ c = *s++;
+ da <<= 4;
+ da += ((c >= '0') && (c <= '9')) ?
+ (c - '0') : ((c & 0x0f) + 9);
+ }
+ dev->dev_addr[i] = da;
+ }
+ return 0;
+}
+
+/*====================================================================
+
+ Configuration stuff for Megahertz cards
+
+ mhz_3288_power() is used to power up a 3288's ethernet chip.
+ mhz_mfc_config() handles socket setup for multifunction (1144
+ and 3288) cards. mhz_setup() gets a card's hardware ethernet
+ address.
+
+======================================================================*/
+
+static int mhz_3288_power(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ struct smc_private *smc = netdev_priv(dev);
+ u_char tmp;
+
+ /* Read the ISR twice... */
+ readb(smc->base+MEGAHERTZ_ISR);
+ udelay(5);
+ readb(smc->base+MEGAHERTZ_ISR);
+
+ /* Pause 200ms... */
+ mdelay(200);
+
+ /* Now read and write the COR... */
+ tmp = readb(smc->base + link->config_base + CISREG_COR);
+ udelay(5);
+ writeb(tmp, smc->base + link->config_base + CISREG_COR);
+
+ return 0;
+}
+
+static int mhz_mfc_config_check(struct pcmcia_device *p_dev, void *priv_data)
+{
+ int k;
+ p_dev->io_lines = 16;
+ p_dev->resource[1]->start = p_dev->resource[0]->start;
+ p_dev->resource[1]->end = 8;
+ p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
+ p_dev->resource[0]->end = 16;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+ for (k = 0; k < 0x400; k += 0x10) {
+ if (k & 0x80)
+ continue;
+ p_dev->resource[0]->start = k ^ 0x300;
+ if (!pcmcia_request_io(p_dev))
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static int mhz_mfc_config(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ struct smc_private *smc = netdev_priv(dev);
+ unsigned int offset;
+ int i;
+
+ link->config_flags |= CONF_ENABLE_SPKR | CONF_ENABLE_IRQ |
+ CONF_AUTO_SET_IO;
+
+ /* The Megahertz combo cards have modem-like CIS entries, so
+ we have to explicitly try a bunch of port combinations. */
+ if (pcmcia_loop_config(link, mhz_mfc_config_check, NULL))
+ return -ENODEV;
+
+ dev->base_addr = link->resource[0]->start;
+
+ /* Allocate a memory window, for accessing the ISR */
+ link->resource[2]->flags = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
+ link->resource[2]->start = link->resource[2]->end = 0;
+ i = pcmcia_request_window(link, link->resource[2], 0);
+ if (i != 0)
+ return -ENODEV;
+
+ smc->base = ioremap(link->resource[2]->start,
+ resource_size(link->resource[2]));
+ offset = (smc->manfid == MANFID_MOTOROLA) ? link->config_base : 0;
+ i = pcmcia_map_mem_page(link, link->resource[2], offset);
+ if ((i == 0) &&
+ (smc->manfid == MANFID_MEGAHERTZ) &&
+ (smc->cardid == PRODID_MEGAHERTZ_EM3288))
+ mhz_3288_power(link);
+
+ return 0;
+}
+
+static int pcmcia_get_versmac(struct pcmcia_device *p_dev,
+ tuple_t *tuple,
+ void *priv)
+{
+ struct net_device *dev = priv;
+ cisparse_t parse;
+ u8 *buf;
+
+ if (pcmcia_parse_tuple(tuple, &parse))
+ return -EINVAL;
+
+ buf = parse.version_1.str + parse.version_1.ofs[3];
+
+ if ((parse.version_1.ns > 3) && (cvt_ascii_address(dev, buf) == 0))
+ return 0;
+
+ return -EINVAL;
+};
+
+static int mhz_setup(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ size_t len;
+ u8 *buf;
+ int rc;
+
+ /* Read the station address from the CIS. It is stored as the last
+ (fourth) string in the Version 1 Version/ID tuple. */
+ if ((link->prod_id[3]) &&
+ (cvt_ascii_address(dev, link->prod_id[3]) == 0))
+ return 0;
+
+ /* Workarounds for broken cards start here. */
+ /* Ugh -- the EM1144 card has two VERS_1 tuples!?! */
+ if (!pcmcia_loop_tuple(link, CISTPL_VERS_1, pcmcia_get_versmac, dev))
+ return 0;
+
+ /* Another possibility: for the EM3288, in a special tuple */
+ rc = -1;
+ len = pcmcia_get_tuple(link, 0x81, &buf);
+ if (buf && len >= 13) {
+ buf[12] = '\0';
+ if (cvt_ascii_address(dev, buf) == 0)
+ rc = 0;
+ }
+ kfree(buf);
+
+ return rc;
+};
+
+/*======================================================================
+
+ Configuration stuff for the Motorola Mariner
+
+ mot_config() writes directly to the Mariner configuration
+ registers because the CIS is just bogus.
+
+======================================================================*/
+
+static void mot_config(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ struct smc_private *smc = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ unsigned int iouart = link->resource[1]->start;
+
+ /* Set UART base address and force map with COR bit 1 */
+ writeb(iouart & 0xff, smc->base + MOT_UART + CISREG_IOBASE_0);
+ writeb((iouart >> 8) & 0xff, smc->base + MOT_UART + CISREG_IOBASE_1);
+ writeb(MOT_NORMAL, smc->base + MOT_UART + CISREG_COR);
+
+ /* Set SMC base address and force map with COR bit 1 */
+ writeb(ioaddr & 0xff, smc->base + MOT_LAN + CISREG_IOBASE_0);
+ writeb((ioaddr >> 8) & 0xff, smc->base + MOT_LAN + CISREG_IOBASE_1);
+ writeb(MOT_NORMAL, smc->base + MOT_LAN + CISREG_COR);
+
+ /* Wait for things to settle down */
+ mdelay(100);
+}
+
+static int mot_setup(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ unsigned int ioaddr = dev->base_addr;
+ int i, wait, loop;
+ u_int addr;
+
+ /* Read Ethernet address from Serial EEPROM */
+
+ for (i = 0; i < 3; i++) {
+ SMC_SELECT_BANK(2);
+ outw(MOT_EEPROM + i, ioaddr + POINTER);
+ SMC_SELECT_BANK(1);
+ outw((CTL_RELOAD | CTL_EE_SELECT), ioaddr + CONTROL);
+
+ for (loop = wait = 0; loop < 200; loop++) {
+ udelay(10);
+ wait = ((CTL_RELOAD | CTL_STORE) & inw(ioaddr + CONTROL));
+ if (wait == 0) break;
+ }
+
+ if (wait)
+ return -1;
+
+ addr = inw(ioaddr + GENERAL);
+ dev->dev_addr[2*i] = addr & 0xff;
+ dev->dev_addr[2*i+1] = (addr >> 8) & 0xff;
+ }
+
+ return 0;
+}
+
+/*====================================================================*/
+
+static int smc_configcheck(struct pcmcia_device *p_dev, void *priv_data)
+{
+ p_dev->resource[0]->end = 16;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+
+ return pcmcia_request_io(p_dev);
+}
+
+static int smc_config(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ int i;
+
+ link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+
+ i = pcmcia_loop_config(link, smc_configcheck, NULL);
+ if (!i)
+ dev->base_addr = link->resource[0]->start;
+
+ return i;
+}
+
+
+static int smc_setup(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ /* Check for a LAN function extension tuple */
+ if (!pcmcia_get_mac_from_cis(link, dev))
+ return 0;
+
+ /* Try the third string in the Version 1 Version/ID tuple. */
+ if (link->prod_id[2]) {
+ if (cvt_ascii_address(dev, link->prod_id[2]) == 0)
+ return 0;
+ }
+ return -1;
+}
+
+/*====================================================================*/
+
+static int osi_config(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ static const unsigned int com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 };
+ int i, j;
+
+ link->config_flags |= CONF_ENABLE_SPKR | CONF_ENABLE_IRQ;
+ link->resource[0]->end = 64;
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[1]->end = 8;
+
+ /* Enable Hard Decode, LAN, Modem */
+ link->io_lines = 16;
+ link->config_index = 0x23;
+
+ for (i = j = 0; j < 4; j++) {
+ link->resource[1]->start = com[j];
+ i = pcmcia_request_io(link);
+ if (i == 0)
+ break;
+ }
+ if (i != 0) {
+ /* Fallback: turn off hard decode */
+ link->config_index = 0x03;
+ link->resource[1]->end = 0;
+ i = pcmcia_request_io(link);
+ }
+ dev->base_addr = link->resource[0]->start + 0x10;
+ return i;
+}
+
+static int osi_load_firmware(struct pcmcia_device *link)
+{
+ const struct firmware *fw;
+ int i, err;
+
+ err = request_firmware(&fw, FIRMWARE_NAME, &link->dev);
+ if (err) {
+ pr_err("Failed to load firmware \"%s\"\n", FIRMWARE_NAME);
+ return err;
+ }
+
+ /* Download the Seven of Diamonds firmware */
+ for (i = 0; i < fw->size; i++) {
+ outb(fw->data[i], link->resource[0]->start + 2);
+ udelay(50);
+ }
+ release_firmware(fw);
+ return err;
+}
+
+static int pcmcia_osi_mac(struct pcmcia_device *p_dev,
+ tuple_t *tuple,
+ void *priv)
+{
+ struct net_device *dev = priv;
+ int i;
+
+ if (tuple->TupleDataLen < 8)
+ return -EINVAL;
+ if (tuple->TupleData[0] != 0x04)
+ return -EINVAL;
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = tuple->TupleData[i+2];
+ return 0;
+};
+
+
+static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid)
+{
+ struct net_device *dev = link->priv;
+ int rc;
+
+ /* Read the station address from tuple 0x90, subtuple 0x04 */
+ if (pcmcia_loop_tuple(link, 0x90, pcmcia_osi_mac, dev))
+ return -1;
+
+ if (((manfid == MANFID_OSITECH) &&
+ (cardid == PRODID_OSITECH_SEVEN)) ||
+ ((manfid == MANFID_PSION) &&
+ (cardid == PRODID_PSION_NET100))) {
+ rc = osi_load_firmware(link);
+ if (rc)
+ return rc;
+ } else if (manfid == MANFID_OSITECH) {
+ /* Make sure both functions are powered up */
+ set_bits(0x300, link->resource[0]->start + OSITECH_AUI_PWR);
+ /* Now, turn on the interrupt for both card functions */
+ set_bits(0x300, link->resource[0]->start + OSITECH_RESET_ISR);
+ dev_dbg(&link->dev, "AUI/PWR: %4.4x RESET/ISR: %4.4x\n",
+ inw(link->resource[0]->start + OSITECH_AUI_PWR),
+ inw(link->resource[0]->start + OSITECH_RESET_ISR));
+ }
+ return 0;
+}
+
+static int smc91c92_suspend(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ if (link->open)
+ netif_device_detach(dev);
+
+ return 0;
+}
+
+static int smc91c92_resume(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ struct smc_private *smc = netdev_priv(dev);
+ int i;
+
+ if ((smc->manfid == MANFID_MEGAHERTZ) &&
+ (smc->cardid == PRODID_MEGAHERTZ_EM3288))
+ mhz_3288_power(link);
+ if (smc->manfid == MANFID_MOTOROLA)
+ mot_config(link);
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN)) {
+ /* Power up the card and enable interrupts */
+ set_bits(0x0300, dev->base_addr-0x10+OSITECH_AUI_PWR);
+ set_bits(0x0300, dev->base_addr-0x10+OSITECH_RESET_ISR);
+ }
+ if (((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid == PRODID_OSITECH_SEVEN)) ||
+ ((smc->manfid == MANFID_PSION) &&
+ (smc->cardid == PRODID_PSION_NET100))) {
+ i = osi_load_firmware(link);
+ if (i) {
+ pr_err("smc91c92_cs: Failed to load firmware\n");
+ return i;
+ }
+ }
+ if (link->open) {
+ smc_reset(dev);
+ netif_device_attach(dev);
+ }
+
+ return 0;
+}
+
+
+/*======================================================================
+
+ This verifies that the chip is some SMC91cXX variant, and returns
+ the revision code if successful. Otherwise, it returns -ENODEV.
+
+======================================================================*/
+
+static int check_sig(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ unsigned int ioaddr = dev->base_addr;
+ int width;
+ u_short s;
+
+ SMC_SELECT_BANK(1);
+ if (inw(ioaddr + BANK_SELECT) >> 8 != 0x33) {
+ /* Try powering up the chip */
+ outw(0, ioaddr + CONTROL);
+ mdelay(55);
+ }
+
+ /* Try setting bus width */
+ width = (link->resource[0]->flags == IO_DATA_PATH_WIDTH_AUTO);
+ s = inb(ioaddr + CONFIG);
+ if (width)
+ s |= CFG_16BIT;
+ else
+ s &= ~CFG_16BIT;
+ outb(s, ioaddr + CONFIG);
+
+ /* Check Base Address Register to make sure bus width is OK */
+ s = inw(ioaddr + BASE_ADDR);
+ if ((inw(ioaddr + BANK_SELECT) >> 8 == 0x33) &&
+ ((s >> 8) != (s & 0xff))) {
+ SMC_SELECT_BANK(3);
+ s = inw(ioaddr + REVISION);
+ return s & 0xff;
+ }
+
+ if (width) {
+ pr_info("using 8-bit IO window\n");
+
+ smc91c92_suspend(link);
+ pcmcia_fixup_iowidth(link);
+ smc91c92_resume(link);
+ return check_sig(link);
+ }
+ return -ENODEV;
+}
+
+static int smc91c92_config(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ struct smc_private *smc = netdev_priv(dev);
+ char *name;
+ int i, rev, j = 0;
+ unsigned int ioaddr;
+ u_long mir;
+
+ dev_dbg(&link->dev, "smc91c92_config\n");
+
+ smc->manfid = link->manf_id;
+ smc->cardid = link->card_id;
+
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN)) {
+ i = osi_config(link);
+ } else if ((smc->manfid == MANFID_MOTOROLA) ||
+ ((smc->manfid == MANFID_MEGAHERTZ) &&
+ ((smc->cardid == PRODID_MEGAHERTZ_VARIOUS) ||
+ (smc->cardid == PRODID_MEGAHERTZ_EM3288)))) {
+ i = mhz_mfc_config(link);
+ } else {
+ i = smc_config(link);
+ }
+ if (i)
+ goto config_failed;
+
+ i = pcmcia_request_irq(link, smc_interrupt);
+ if (i)
+ goto config_failed;
+ i = pcmcia_enable_device(link);
+ if (i)
+ goto config_failed;
+
+ if (smc->manfid == MANFID_MOTOROLA)
+ mot_config(link);
+
+ dev->irq = link->irq;
+
+ if ((if_port >= 0) && (if_port <= 2))
+ dev->if_port = if_port;
+ else
+ dev_notice(&link->dev, "invalid if_port requested\n");
+
+ switch (smc->manfid) {
+ case MANFID_OSITECH:
+ case MANFID_PSION:
+ i = osi_setup(link, smc->manfid, smc->cardid); break;
+ case MANFID_SMC:
+ case MANFID_NEW_MEDIA:
+ i = smc_setup(link); break;
+ case 0x128: /* For broken Megahertz cards */
+ case MANFID_MEGAHERTZ:
+ i = mhz_setup(link); break;
+ case MANFID_MOTOROLA:
+ default: /* get the hw address from EEPROM */
+ i = mot_setup(link); break;
+ }
+
+ if (i != 0) {
+ dev_notice(&link->dev, "Unable to find hardware address.\n");
+ goto config_failed;
+ }
+
+ smc->duplex = 0;
+ smc->rx_ovrn = 0;
+
+ rev = check_sig(link);
+ name = "???";
+ if (rev > 0)
+ switch (rev >> 4) {
+ case 3: name = "92"; break;
+ case 4: name = ((rev & 15) >= 6) ? "96" : "94"; break;
+ case 5: name = "95"; break;
+ case 7: name = "100"; break;
+ case 8: name = "100-FD"; break;
+ case 9: name = "110"; break;
+ }
+
+ ioaddr = dev->base_addr;
+ if (rev > 0) {
+ u_long mcr;
+ SMC_SELECT_BANK(0);
+ mir = inw(ioaddr + MEMINFO) & 0xff;
+ if (mir == 0xff) mir++;
+ /* Get scale factor for memory size */
+ mcr = ((rev >> 4) > 3) ? inw(ioaddr + MEMCFG) : 0x0200;
+ mir *= 128 * (1<<((mcr >> 9) & 7));
+ SMC_SELECT_BANK(1);
+ smc->cfg = inw(ioaddr + CONFIG) & ~CFG_AUI_SELECT;
+ smc->cfg |= CFG_NO_WAIT | CFG_16BIT | CFG_STATIC;
+ if (smc->manfid == MANFID_OSITECH)
+ smc->cfg |= CFG_IRQ_SEL_1 | CFG_IRQ_SEL_0;
+ if ((rev >> 4) >= 7)
+ smc->cfg |= CFG_MII_SELECT;
+ } else
+ mir = 0;
+
+ if (smc->cfg & CFG_MII_SELECT) {
+ SMC_SELECT_BANK(3);
+
+ for (i = 0; i < 32; i++) {
+ j = mdio_read(dev, i, 1);
+ if ((j != 0) && (j != 0xffff)) break;
+ }
+ smc->mii_if.phy_id = (i < 32) ? i : -1;
+
+ SMC_SELECT_BANK(0);
+ }
+
+ SET_NETDEV_DEV(dev, &link->dev);
+
+ if (register_netdev(dev) != 0) {
+ dev_err(&link->dev, "register_netdev() failed\n");
+ goto config_undo;
+ }
+
+ netdev_info(dev, "smc91c%s rev %d: io %#3lx, irq %d, hw_addr %pM\n",
+ name, (rev & 0x0f), dev->base_addr, dev->irq, dev->dev_addr);
+
+ if (rev > 0) {
+ if (mir & 0x3ff)
+ netdev_info(dev, " %lu byte", mir);
+ else
+ netdev_info(dev, " %lu kb", mir>>10);
+ pr_cont(" buffer, %s xcvr\n",
+ (smc->cfg & CFG_MII_SELECT) ? "MII" : if_names[dev->if_port]);
+ }
+
+ if (smc->cfg & CFG_MII_SELECT) {
+ if (smc->mii_if.phy_id != -1) {
+ netdev_dbg(dev, " MII transceiver at index %d, status %x\n",
+ smc->mii_if.phy_id, j);
+ } else {
+ netdev_notice(dev, " No MII transceivers found!\n");
+ }
+ }
+ return 0;
+
+config_undo:
+ unregister_netdev(dev);
+config_failed:
+ smc91c92_release(link);
+ free_netdev(dev);
+ return -ENODEV;
+} /* smc91c92_config */
+
+static void smc91c92_release(struct pcmcia_device *link)
+{
+ dev_dbg(&link->dev, "smc91c92_release\n");
+ if (link->resource[2]->end) {
+ struct net_device *dev = link->priv;
+ struct smc_private *smc = netdev_priv(dev);
+ iounmap(smc->base);
+ }
+ pcmcia_disable_device(link);
+}
+
+/*======================================================================
+
+ MII interface support for SMC91cXX based cards
+======================================================================*/
+
+#define MDIO_SHIFT_CLK 0x04
+#define MDIO_DATA_OUT 0x01
+#define MDIO_DIR_WRITE 0x08
+#define MDIO_DATA_WRITE0 (MDIO_DIR_WRITE)
+#define MDIO_DATA_WRITE1 (MDIO_DIR_WRITE | MDIO_DATA_OUT)
+#define MDIO_DATA_READ 0x02
+
+static void mdio_sync(unsigned int addr)
+{
+ int bits;
+ for (bits = 0; bits < 32; bits++) {
+ outb(MDIO_DATA_WRITE1, addr);
+ outb(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr);
+ }
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int loc)
+{
+ unsigned int addr = dev->base_addr + MGMT;
+ u_int cmd = (0x06<<10)|(phy_id<<5)|loc;
+ int i, retval = 0;
+
+ mdio_sync(addr);
+ for (i = 13; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb(dat, addr);
+ outb(dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 19; i > 0; i--) {
+ outb(0, addr);
+ retval = (retval << 1) | ((inb(addr) & MDIO_DATA_READ) != 0);
+ outb(MDIO_SHIFT_CLK, addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
+{
+ unsigned int addr = dev->base_addr + MGMT;
+ u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value;
+ int i;
+
+ mdio_sync(addr);
+ for (i = 31; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb(dat, addr);
+ outb(dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 1; i >= 0; i--) {
+ outb(0, addr);
+ outb(MDIO_SHIFT_CLK, addr);
+ }
+}
+
+/*======================================================================
+
+ The driver core code, most of which should be common with a
+ non-PCMCIA implementation.
+
+======================================================================*/
+
+#ifdef PCMCIA_DEBUG
+static void smc_dump(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ u_short i, w, save;
+ save = inw(ioaddr + BANK_SELECT);
+ for (w = 0; w < 4; w++) {
+ SMC_SELECT_BANK(w);
+ netdev_printk(KERN_DEBUG, dev, "bank %d: ", w);
+ for (i = 0; i < 14; i += 2)
+ pr_cont(" %04x", inw(ioaddr + i));
+ pr_cont("\n");
+ }
+ outw(save, ioaddr + BANK_SELECT);
+}
+#endif
+
+static int smc_open(struct net_device *dev)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ struct pcmcia_device *link = smc->p_dev;
+
+ dev_dbg(&link->dev, "%s: smc_open(%p), ID/Window %4.4x.\n",
+ dev->name, dev, inw(dev->base_addr + BANK_SELECT));
+#ifdef PCMCIA_DEBUG
+ smc_dump(dev);
+#endif
+
+ /* Check that the PCMCIA card is still here. */
+ if (!pcmcia_dev_present(link))
+ return -ENODEV;
+ /* Physical device present signature. */
+ if (check_sig(link) < 0) {
+ netdev_info(dev, "Yikes! Bad chip signature!\n");
+ return -ENODEV;
+ }
+ link->open++;
+
+ netif_start_queue(dev);
+ smc->saved_skb = NULL;
+ smc->packets_waiting = 0;
+
+ smc_reset(dev);
+ init_timer(&smc->media);
+ smc->media.function = media_check;
+ smc->media.data = (u_long) dev;
+ smc->media.expires = jiffies + HZ;
+ add_timer(&smc->media);
+
+ return 0;
+} /* smc_open */
+
+/*====================================================================*/
+
+static int smc_close(struct net_device *dev)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ struct pcmcia_device *link = smc->p_dev;
+ unsigned int ioaddr = dev->base_addr;
+
+ dev_dbg(&link->dev, "%s: smc_close(), status %4.4x.\n",
+ dev->name, inw(ioaddr + BANK_SELECT));
+
+ netif_stop_queue(dev);
+
+ /* Shut off all interrupts, and turn off the Tx and Rx sections.
+ Don't bother to check for chip present. */
+ SMC_SELECT_BANK(2); /* Nominally paranoia, but do no assume... */
+ outw(0, ioaddr + INTERRUPT);
+ SMC_SELECT_BANK(0);
+ mask_bits(0xff00, ioaddr + RCR);
+ mask_bits(0xff00, ioaddr + TCR);
+
+ /* Put the chip into power-down mode. */
+ SMC_SELECT_BANK(1);
+ outw(CTL_POWERDOWN, ioaddr + CONTROL );
+
+ link->open--;
+ del_timer_sync(&smc->media);
+
+ return 0;
+} /* smc_close */
+
+/*======================================================================
+
+ Transfer a packet to the hardware and trigger the packet send.
+ This may be called at either from either the Tx queue code
+ or the interrupt handler.
+
+======================================================================*/
+
+static void smc_hardware_send_packet(struct net_device * dev)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ struct sk_buff *skb = smc->saved_skb;
+ unsigned int ioaddr = dev->base_addr;
+ u_char packet_no;
+
+ if (!skb) {
+ netdev_err(dev, "In XMIT with no packet to send\n");
+ return;
+ }
+
+ /* There should be a packet slot waiting. */
+ packet_no = inw(ioaddr + PNR_ARR) >> 8;
+ if (packet_no & 0x80) {
+ /* If not, there is a hardware problem! Likely an ejected card. */
+ netdev_warn(dev, "hardware Tx buffer allocation failed, status %#2.2x\n",
+ packet_no);
+ dev_kfree_skb_irq(skb);
+ smc->saved_skb = NULL;
+ netif_start_queue(dev);
+ return;
+ }
+
+ dev->stats.tx_bytes += skb->len;
+ /* The card should use the just-allocated buffer. */
+ outw(packet_no, ioaddr + PNR_ARR);
+ /* point to the beginning of the packet */
+ outw(PTR_AUTOINC , ioaddr + POINTER);
+
+ /* Send the packet length (+6 for status, length and ctl byte)
+ and the status word (set to zeros). */
+ {
+ u_char *buf = skb->data;
+ u_int length = skb->len; /* The chip will pad to ethernet min. */
+
+ netdev_dbg(dev, "Trying to xmit packet of length %d\n", length);
+
+ /* send the packet length: +6 for status word, length, and ctl */
+ outw(0, ioaddr + DATA_1);
+ outw(length + 6, ioaddr + DATA_1);
+ outsw(ioaddr + DATA_1, buf, length >> 1);
+
+ /* The odd last byte, if there is one, goes in the control word. */
+ outw((length & 1) ? 0x2000 | buf[length-1] : 0, ioaddr + DATA_1);
+ }
+
+ /* Enable the Tx interrupts, both Tx (TxErr) and TxEmpty. */
+ outw(((IM_TX_INT|IM_TX_EMPTY_INT)<<8) |
+ (inw(ioaddr + INTERRUPT) & 0xff00),
+ ioaddr + INTERRUPT);
+
+ /* The chip does the rest of the work. */
+ outw(MC_ENQUEUE , ioaddr + MMU_CMD);
+
+ smc->saved_skb = NULL;
+ dev_kfree_skb_irq(skb);
+ dev->trans_start = jiffies;
+ netif_start_queue(dev);
+}
+
+/*====================================================================*/
+
+static void smc_tx_timeout(struct net_device *dev)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+
+ netdev_notice(dev, "transmit timed out, Tx_status %2.2x status %4.4x.\n",
+ inw(ioaddr)&0xff, inw(ioaddr + 2));
+ dev->stats.tx_errors++;
+ smc_reset(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ smc->saved_skb = NULL;
+ netif_wake_queue(dev);
+}
+
+static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ u_short num_pages;
+ short time_out, ir;
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+
+ netdev_dbg(dev, "smc_start_xmit(length = %d) called, status %04x\n",
+ skb->len, inw(ioaddr + 2));
+
+ if (smc->saved_skb) {
+ /* THIS SHOULD NEVER HAPPEN. */
+ dev->stats.tx_aborted_errors++;
+ netdev_printk(KERN_DEBUG, dev,
+ "Internal error -- sent packet while busy\n");
+ return NETDEV_TX_BUSY;
+ }
+ smc->saved_skb = skb;
+
+ num_pages = skb->len >> 8;
+
+ if (num_pages > 7) {
+ netdev_err(dev, "Far too big packet error: %d pages\n", num_pages);
+ dev_kfree_skb (skb);
+ smc->saved_skb = NULL;
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK; /* Do not re-queue this packet. */
+ }
+ /* A packet is now waiting. */
+ smc->packets_waiting++;
+
+ spin_lock_irqsave(&smc->lock, flags);
+ SMC_SELECT_BANK(2); /* Paranoia, we should always be in window 2 */
+
+ /* need MC_RESET to keep the memory consistent. errata? */
+ if (smc->rx_ovrn) {
+ outw(MC_RESET, ioaddr + MMU_CMD);
+ smc->rx_ovrn = 0;
+ }
+
+ /* Allocate the memory; send the packet now if we win. */
+ outw(MC_ALLOC | num_pages, ioaddr + MMU_CMD);
+ for (time_out = MEMORY_WAIT_TIME; time_out >= 0; time_out--) {
+ ir = inw(ioaddr+INTERRUPT);
+ if (ir & IM_ALLOC_INT) {
+ /* Acknowledge the interrupt, send the packet. */
+ outw((ir&0xff00) | IM_ALLOC_INT, ioaddr + INTERRUPT);
+ smc_hardware_send_packet(dev); /* Send the packet now.. */
+ spin_unlock_irqrestore(&smc->lock, flags);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ /* Otherwise defer until the Tx-space-allocated interrupt. */
+ pr_debug("%s: memory allocation deferred.\n", dev->name);
+ outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT);
+ spin_unlock_irqrestore(&smc->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/*======================================================================
+
+ Handle a Tx anomalous event. Entered while in Window 2.
+
+======================================================================*/
+
+static void smc_tx_err(struct net_device * dev)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ int saved_packet = inw(ioaddr + PNR_ARR) & 0xff;
+ int packet_no = inw(ioaddr + FIFO_PORTS) & 0x7f;
+ int tx_status;
+
+ /* select this as the packet to read from */
+ outw(packet_no, ioaddr + PNR_ARR);
+
+ /* read the first word from this packet */
+ outw(PTR_AUTOINC | PTR_READ | 0, ioaddr + POINTER);
+
+ tx_status = inw(ioaddr + DATA_1);
+
+ dev->stats.tx_errors++;
+ if (tx_status & TS_LOSTCAR) dev->stats.tx_carrier_errors++;
+ if (tx_status & TS_LATCOL) dev->stats.tx_window_errors++;
+ if (tx_status & TS_16COL) {
+ dev->stats.tx_aborted_errors++;
+ smc->tx_err++;
+ }
+
+ if (tx_status & TS_SUCCESS) {
+ netdev_notice(dev, "Successful packet caused error interrupt?\n");
+ }
+ /* re-enable transmit */
+ SMC_SELECT_BANK(0);
+ outw(inw(ioaddr + TCR) | TCR_ENABLE | smc->duplex, ioaddr + TCR);
+ SMC_SELECT_BANK(2);
+
+ outw(MC_FREEPKT, ioaddr + MMU_CMD); /* Free the packet memory. */
+
+ /* one less packet waiting for me */
+ smc->packets_waiting--;
+
+ outw(saved_packet, ioaddr + PNR_ARR);
+}
+
+/*====================================================================*/
+
+static void smc_eph_irq(struct net_device *dev)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ u_short card_stats, ephs;
+
+ SMC_SELECT_BANK(0);
+ ephs = inw(ioaddr + EPH);
+ pr_debug("%s: Ethernet protocol handler interrupt, status"
+ " %4.4x.\n", dev->name, ephs);
+ /* Could be a counter roll-over warning: update stats. */
+ card_stats = inw(ioaddr + COUNTER);
+ /* single collisions */
+ dev->stats.collisions += card_stats & 0xF;
+ card_stats >>= 4;
+ /* multiple collisions */
+ dev->stats.collisions += card_stats & 0xF;
+#if 0 /* These are for when linux supports these statistics */
+ card_stats >>= 4; /* deferred */
+ card_stats >>= 4; /* excess deferred */
+#endif
+ /* If we had a transmit error we must re-enable the transmitter. */
+ outw(inw(ioaddr + TCR) | TCR_ENABLE | smc->duplex, ioaddr + TCR);
+
+ /* Clear a link error interrupt. */
+ SMC_SELECT_BANK(1);
+ outw(CTL_AUTO_RELEASE | 0x0000, ioaddr + CONTROL);
+ outw(CTL_AUTO_RELEASE | CTL_TE_ENABLE | CTL_CR_ENABLE,
+ ioaddr + CONTROL);
+ SMC_SELECT_BANK(2);
+}
+
+/*====================================================================*/
+
+static irqreturn_t smc_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct smc_private *smc = netdev_priv(dev);
+ unsigned int ioaddr;
+ u_short saved_bank, saved_pointer, mask, status;
+ unsigned int handled = 1;
+ char bogus_cnt = INTR_WORK; /* Work we are willing to do. */
+
+ if (!netif_device_present(dev))
+ return IRQ_NONE;
+
+ ioaddr = dev->base_addr;
+
+ pr_debug("%s: SMC91c92 interrupt %d at %#x.\n", dev->name,
+ irq, ioaddr);
+
+ spin_lock(&smc->lock);
+ smc->watchdog = 0;
+ saved_bank = inw(ioaddr + BANK_SELECT);
+ if ((saved_bank & 0xff00) != 0x3300) {
+ /* The device does not exist -- the card could be off-line, or
+ maybe it has been ejected. */
+ pr_debug("%s: SMC91c92 interrupt %d for non-existent"
+ "/ejected device.\n", dev->name, irq);
+ handled = 0;
+ goto irq_done;
+ }
+
+ SMC_SELECT_BANK(2);
+ saved_pointer = inw(ioaddr + POINTER);
+ mask = inw(ioaddr + INTERRUPT) >> 8;
+ /* clear all interrupts */
+ outw(0, ioaddr + INTERRUPT);
+
+ do { /* read the status flag, and mask it */
+ status = inw(ioaddr + INTERRUPT) & 0xff;
+ pr_debug("%s: Status is %#2.2x (mask %#2.2x).\n", dev->name,
+ status, mask);
+ if ((status & mask) == 0) {
+ if (bogus_cnt == INTR_WORK)
+ handled = 0;
+ break;
+ }
+ if (status & IM_RCV_INT) {
+ /* Got a packet(s). */
+ smc_rx(dev);
+ }
+ if (status & IM_TX_INT) {
+ smc_tx_err(dev);
+ outw(IM_TX_INT, ioaddr + INTERRUPT);
+ }
+ status &= mask;
+ if (status & IM_TX_EMPTY_INT) {
+ outw(IM_TX_EMPTY_INT, ioaddr + INTERRUPT);
+ mask &= ~IM_TX_EMPTY_INT;
+ dev->stats.tx_packets += smc->packets_waiting;
+ smc->packets_waiting = 0;
+ }
+ if (status & IM_ALLOC_INT) {
+ /* Clear this interrupt so it doesn't happen again */
+ mask &= ~IM_ALLOC_INT;
+
+ smc_hardware_send_packet(dev);
+
+ /* enable xmit interrupts based on this */
+ mask |= (IM_TX_EMPTY_INT | IM_TX_INT);
+
+ /* and let the card send more packets to me */
+ netif_wake_queue(dev);
+ }
+ if (status & IM_RX_OVRN_INT) {
+ dev->stats.rx_errors++;
+ dev->stats.rx_fifo_errors++;
+ if (smc->duplex)
+ smc->rx_ovrn = 1; /* need MC_RESET outside smc_interrupt */
+ outw(IM_RX_OVRN_INT, ioaddr + INTERRUPT);
+ }
+ if (status & IM_EPH_INT)
+ smc_eph_irq(dev);
+ } while (--bogus_cnt);
+
+ pr_debug(" Restoring saved registers mask %2.2x bank %4.4x"
+ " pointer %4.4x.\n", mask, saved_bank, saved_pointer);
+
+ /* restore state register */
+ outw((mask<<8), ioaddr + INTERRUPT);
+ outw(saved_pointer, ioaddr + POINTER);
+ SMC_SELECT_BANK(saved_bank);
+
+ pr_debug("%s: Exiting interrupt IRQ%d.\n", dev->name, irq);
+
+irq_done:
+
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN)) {
+ /* Retrigger interrupt if needed */
+ mask_bits(0x00ff, ioaddr-0x10+OSITECH_RESET_ISR);
+ set_bits(0x0300, ioaddr-0x10+OSITECH_RESET_ISR);
+ }
+ if (smc->manfid == MANFID_MOTOROLA) {
+ u_char cor;
+ cor = readb(smc->base + MOT_UART + CISREG_COR);
+ writeb(cor & ~COR_IREQ_ENA, smc->base + MOT_UART + CISREG_COR);
+ writeb(cor, smc->base + MOT_UART + CISREG_COR);
+ cor = readb(smc->base + MOT_LAN + CISREG_COR);
+ writeb(cor & ~COR_IREQ_ENA, smc->base + MOT_LAN + CISREG_COR);
+ writeb(cor, smc->base + MOT_LAN + CISREG_COR);
+ }
+
+ if ((smc->base != NULL) && /* Megahertz MFC's */
+ (smc->manfid == MANFID_MEGAHERTZ) &&
+ (smc->cardid == PRODID_MEGAHERTZ_EM3288)) {
+
+ u_char tmp;
+ tmp = readb(smc->base+MEGAHERTZ_ISR);
+ tmp = readb(smc->base+MEGAHERTZ_ISR);
+
+ /* Retrigger interrupt if needed */
+ writeb(tmp, smc->base + MEGAHERTZ_ISR);
+ writeb(tmp, smc->base + MEGAHERTZ_ISR);
+ }
+
+ spin_unlock(&smc->lock);
+ return IRQ_RETVAL(handled);
+}
+
+/*====================================================================*/
+
+static void smc_rx(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ int rx_status;
+ int packet_length; /* Caution: not frame length, rather words
+ to transfer from the chip. */
+
+ /* Assertion: we are in Window 2. */
+
+ if (inw(ioaddr + FIFO_PORTS) & FP_RXEMPTY) {
+ netdev_err(dev, "smc_rx() with nothing on Rx FIFO\n");
+ return;
+ }
+
+ /* Reset the read pointer, and read the status and packet length. */
+ outw(PTR_READ | PTR_RCV | PTR_AUTOINC, ioaddr + POINTER);
+ rx_status = inw(ioaddr + DATA_1);
+ packet_length = inw(ioaddr + DATA_1) & 0x07ff;
+
+ pr_debug("%s: Receive status %4.4x length %d.\n",
+ dev->name, rx_status, packet_length);
+
+ if (!(rx_status & RS_ERRORS)) {
+ /* do stuff to make a new packet */
+ struct sk_buff *skb;
+
+ /* Note: packet_length adds 5 or 6 extra bytes here! */
+ skb = dev_alloc_skb(packet_length+2);
+
+ if (skb == NULL) {
+ pr_debug("%s: Low memory, packet dropped.\n", dev->name);
+ dev->stats.rx_dropped++;
+ outw(MC_RELEASE, ioaddr + MMU_CMD);
+ return;
+ }
+
+ packet_length -= (rx_status & RS_ODDFRAME ? 5 : 6);
+ skb_reserve(skb, 2);
+ insw(ioaddr+DATA_1, skb_put(skb, packet_length),
+ (packet_length+1)>>1);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += packet_length;
+ if (rx_status & RS_MULTICAST)
+ dev->stats.multicast++;
+ } else {
+ /* error ... */
+ dev->stats.rx_errors++;
+
+ if (rx_status & RS_ALGNERR) dev->stats.rx_frame_errors++;
+ if (rx_status & (RS_TOOSHORT | RS_TOOLONG))
+ dev->stats.rx_length_errors++;
+ if (rx_status & RS_BADCRC) dev->stats.rx_crc_errors++;
+ }
+ /* Let the MMU free the memory of this packet. */
+ outw(MC_RELEASE, ioaddr + MMU_CMD);
+}
+
+/*======================================================================
+
+ Set the receive mode.
+
+ This routine is used by both the protocol level to notify us of
+ promiscuous/multicast mode changes, and by the open/reset code to
+ initialize the Rx registers. We always set the multicast list and
+ leave the receiver running.
+
+======================================================================*/
+
+static void set_rx_mode(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ struct smc_private *smc = netdev_priv(dev);
+ unsigned char multicast_table[8];
+ unsigned long flags;
+ u_short rx_cfg_setting;
+ int i;
+
+ memset(multicast_table, 0, sizeof(multicast_table));
+
+ if (dev->flags & IFF_PROMISC) {
+ rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti;
+ } else if (dev->flags & IFF_ALLMULTI)
+ rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti;
+ else {
+ if (!netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ u_int position = ether_crc(6, ha->addr);
+ multicast_table[position >> 29] |= 1 << ((position >> 26) & 7);
+ }
+ }
+ rx_cfg_setting = RxStripCRC | RxEnable;
+ }
+
+ /* Load MC table and Rx setting into the chip without interrupts. */
+ spin_lock_irqsave(&smc->lock, flags);
+ SMC_SELECT_BANK(3);
+ for (i = 0; i < 8; i++)
+ outb(multicast_table[i], ioaddr + MULTICAST0 + i);
+ SMC_SELECT_BANK(0);
+ outw(rx_cfg_setting, ioaddr + RCR);
+ SMC_SELECT_BANK(2);
+ spin_unlock_irqrestore(&smc->lock, flags);
+}
+
+/*======================================================================
+
+ Senses when a card's config changes. Here, it's coax or TP.
+
+======================================================================*/
+
+static int s9k_config(struct net_device *dev, struct ifmap *map)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (smc->cfg & CFG_MII_SELECT)
+ return -EOPNOTSUPP;
+ else if (map->port > 2)
+ return -EINVAL;
+ dev->if_port = map->port;
+ netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
+ smc_reset(dev);
+ }
+ return 0;
+}
+
+/*======================================================================
+
+ Reset the chip, reloading every register that might be corrupted.
+
+======================================================================*/
+
+/*
+ Set transceiver type, perhaps to something other than what the user
+ specified in dev->if_port.
+*/
+static void smc_set_xcvr(struct net_device *dev, int if_port)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ u_short saved_bank;
+
+ saved_bank = inw(ioaddr + BANK_SELECT);
+ SMC_SELECT_BANK(1);
+ if (if_port == 2) {
+ outw(smc->cfg | CFG_AUI_SELECT, ioaddr + CONFIG);
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN))
+ set_bits(OSI_AUI_PWR, ioaddr - 0x10 + OSITECH_AUI_PWR);
+ smc->media_status = ((dev->if_port == 0) ? 0x0001 : 0x0002);
+ } else {
+ outw(smc->cfg, ioaddr + CONFIG);
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN))
+ mask_bits(~OSI_AUI_PWR, ioaddr - 0x10 + OSITECH_AUI_PWR);
+ smc->media_status = ((dev->if_port == 0) ? 0x0012 : 0x4001);
+ }
+ SMC_SELECT_BANK(saved_bank);
+}
+
+static void smc_reset(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ struct smc_private *smc = netdev_priv(dev);
+ int i;
+
+ pr_debug("%s: smc91c92 reset called.\n", dev->name);
+
+ /* The first interaction must be a write to bring the chip out
+ of sleep mode. */
+ SMC_SELECT_BANK(0);
+ /* Reset the chip. */
+ outw(RCR_SOFTRESET, ioaddr + RCR);
+ udelay(10);
+
+ /* Clear the transmit and receive configuration registers. */
+ outw(RCR_CLEAR, ioaddr + RCR);
+ outw(TCR_CLEAR, ioaddr + TCR);
+
+ /* Set the Window 1 control, configuration and station addr registers.
+ No point in writing the I/O base register ;-> */
+ SMC_SELECT_BANK(1);
+ /* Automatically release successfully transmitted packets,
+ Accept link errors, counter and Tx error interrupts. */
+ outw(CTL_AUTO_RELEASE | CTL_TE_ENABLE | CTL_CR_ENABLE,
+ ioaddr + CONTROL);
+ smc_set_xcvr(dev, dev->if_port);
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN))
+ outw((dev->if_port == 2 ? OSI_AUI_PWR : 0) |
+ (inw(ioaddr-0x10+OSITECH_AUI_PWR) & 0xff00),
+ ioaddr - 0x10 + OSITECH_AUI_PWR);
+
+ /* Fill in the physical address. The databook is wrong about the order! */
+ for (i = 0; i < 6; i += 2)
+ outw((dev->dev_addr[i+1]<<8)+dev->dev_addr[i],
+ ioaddr + ADDR0 + i);
+
+ /* Reset the MMU */
+ SMC_SELECT_BANK(2);
+ outw(MC_RESET, ioaddr + MMU_CMD);
+ outw(0, ioaddr + INTERRUPT);
+
+ /* Re-enable the chip. */
+ SMC_SELECT_BANK(0);
+ outw(((smc->cfg & CFG_MII_SELECT) ? 0 : TCR_MONCSN) |
+ TCR_ENABLE | TCR_PAD_EN | smc->duplex, ioaddr + TCR);
+ set_rx_mode(dev);
+
+ if (smc->cfg & CFG_MII_SELECT) {
+ SMC_SELECT_BANK(3);
+
+ /* Reset MII */
+ mdio_write(dev, smc->mii_if.phy_id, 0, 0x8000);
+
+ /* Advertise 100F, 100H, 10F, 10H */
+ mdio_write(dev, smc->mii_if.phy_id, 4, 0x01e1);
+
+ /* Restart MII autonegotiation */
+ mdio_write(dev, smc->mii_if.phy_id, 0, 0x0000);
+ mdio_write(dev, smc->mii_if.phy_id, 0, 0x1200);
+ }
+
+ /* Enable interrupts. */
+ SMC_SELECT_BANK(2);
+ outw((IM_EPH_INT | IM_RX_OVRN_INT | IM_RCV_INT) << 8,
+ ioaddr + INTERRUPT);
+}
+
+/*======================================================================
+
+ Media selection timer routine
+
+======================================================================*/
+
+static void media_check(u_long arg)
+{
+ struct net_device *dev = (struct net_device *) arg;
+ struct smc_private *smc = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ u_short i, media, saved_bank;
+ u_short link;
+ unsigned long flags;
+
+ spin_lock_irqsave(&smc->lock, flags);
+
+ saved_bank = inw(ioaddr + BANK_SELECT);
+
+ if (!netif_device_present(dev))
+ goto reschedule;
+
+ SMC_SELECT_BANK(2);
+
+ /* need MC_RESET to keep the memory consistent. errata? */
+ if (smc->rx_ovrn) {
+ outw(MC_RESET, ioaddr + MMU_CMD);
+ smc->rx_ovrn = 0;
+ }
+ i = inw(ioaddr + INTERRUPT);
+ SMC_SELECT_BANK(0);
+ media = inw(ioaddr + EPH) & EPH_LINK_OK;
+ SMC_SELECT_BANK(1);
+ media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1;
+
+ SMC_SELECT_BANK(saved_bank);
+ spin_unlock_irqrestore(&smc->lock, flags);
+
+ /* Check for pending interrupt with watchdog flag set: with
+ this, we can limp along even if the interrupt is blocked */
+ if (smc->watchdog++ && ((i>>8) & i)) {
+ if (!smc->fast_poll)
+ netdev_info(dev, "interrupt(s) dropped!\n");
+ local_irq_save(flags);
+ smc_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+ smc->fast_poll = HZ;
+ }
+ if (smc->fast_poll) {
+ smc->fast_poll--;
+ smc->media.expires = jiffies + HZ/100;
+ add_timer(&smc->media);
+ return;
+ }
+
+ spin_lock_irqsave(&smc->lock, flags);
+
+ saved_bank = inw(ioaddr + BANK_SELECT);
+
+ if (smc->cfg & CFG_MII_SELECT) {
+ if (smc->mii_if.phy_id < 0)
+ goto reschedule;
+
+ SMC_SELECT_BANK(3);
+ link = mdio_read(dev, smc->mii_if.phy_id, 1);
+ if (!link || (link == 0xffff)) {
+ netdev_info(dev, "MII is missing!\n");
+ smc->mii_if.phy_id = -1;
+ goto reschedule;
+ }
+
+ link &= 0x0004;
+ if (link != smc->link_status) {
+ u_short p = mdio_read(dev, smc->mii_if.phy_id, 5);
+ netdev_info(dev, "%s link beat\n", link ? "found" : "lost");
+ smc->duplex = (((p & 0x0100) || ((p & 0x1c0) == 0x40))
+ ? TCR_FDUPLX : 0);
+ if (link) {
+ netdev_info(dev, "autonegotiation complete: "
+ "%dbaseT-%cD selected\n",
+ (p & 0x0180) ? 100 : 10, smc->duplex ? 'F' : 'H');
+ }
+ SMC_SELECT_BANK(0);
+ outw(inw(ioaddr + TCR) | smc->duplex, ioaddr + TCR);
+ smc->link_status = link;
+ }
+ goto reschedule;
+ }
+
+ /* Ignore collisions unless we've had no rx's recently */
+ if (time_after(jiffies, dev->last_rx + HZ)) {
+ if (smc->tx_err || (smc->media_status & EPH_16COL))
+ media |= EPH_16COL;
+ }
+ smc->tx_err = 0;
+
+ if (media != smc->media_status) {
+ if ((media & smc->media_status & 1) &&
+ ((smc->media_status ^ media) & EPH_LINK_OK))
+ netdev_info(dev, "%s link beat\n",
+ smc->media_status & EPH_LINK_OK ? "lost" : "found");
+ else if ((media & smc->media_status & 2) &&
+ ((smc->media_status ^ media) & EPH_16COL))
+ netdev_info(dev, "coax cable %s\n",
+ media & EPH_16COL ? "problem" : "ok");
+ if (dev->if_port == 0) {
+ if (media & 1) {
+ if (media & EPH_LINK_OK)
+ netdev_info(dev, "flipped to 10baseT\n");
+ else
+ smc_set_xcvr(dev, 2);
+ } else {
+ if (media & EPH_16COL)
+ smc_set_xcvr(dev, 1);
+ else
+ netdev_info(dev, "flipped to 10base2\n");
+ }
+ }
+ smc->media_status = media;
+ }
+
+reschedule:
+ smc->media.expires = jiffies + HZ;
+ add_timer(&smc->media);
+ SMC_SELECT_BANK(saved_bank);
+ spin_unlock_irqrestore(&smc->lock, flags);
+}
+
+static int smc_link_ok(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ struct smc_private *smc = netdev_priv(dev);
+
+ if (smc->cfg & CFG_MII_SELECT) {
+ return mii_link_ok(&smc->mii_if);
+ } else {
+ SMC_SELECT_BANK(0);
+ return inw(ioaddr + EPH) & EPH_LINK_OK;
+ }
+}
+
+static int smc_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ u16 tmp;
+ unsigned int ioaddr = dev->base_addr;
+
+ ecmd->supported = (SUPPORTED_TP | SUPPORTED_AUI |
+ SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full);
+
+ SMC_SELECT_BANK(1);
+ tmp = inw(ioaddr + CONFIG);
+ ecmd->port = (tmp & CFG_AUI_SELECT) ? PORT_AUI : PORT_TP;
+ ecmd->transceiver = XCVR_INTERNAL;
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
+ ecmd->phy_address = ioaddr + MGMT;
+
+ SMC_SELECT_BANK(0);
+ tmp = inw(ioaddr + TCR);
+ ecmd->duplex = (tmp & TCR_FDUPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+
+ return 0;
+}
+
+static int smc_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ u16 tmp;
+ unsigned int ioaddr = dev->base_addr;
+
+ if (ethtool_cmd_speed(ecmd) != SPEED_10)
+ return -EINVAL;
+ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI)
+ return -EINVAL;
+ if (ecmd->transceiver != XCVR_INTERNAL)
+ return -EINVAL;
+
+ if (ecmd->port == PORT_AUI)
+ smc_set_xcvr(dev, 1);
+ else
+ smc_set_xcvr(dev, 0);
+
+ SMC_SELECT_BANK(0);
+ tmp = inw(ioaddr + TCR);
+ if (ecmd->duplex == DUPLEX_FULL)
+ tmp |= TCR_FDUPLX;
+ else
+ tmp &= ~TCR_FDUPLX;
+ outw(tmp, ioaddr + TCR);
+
+ return 0;
+}
+
+static int check_if_running(struct net_device *dev)
+{
+ if (!netif_running(dev))
+ return -EINVAL;
+ return 0;
+}
+
+static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+}
+
+static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ u16 saved_bank = inw(ioaddr + BANK_SELECT);
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&smc->lock, flags);
+ SMC_SELECT_BANK(3);
+ if (smc->cfg & CFG_MII_SELECT)
+ ret = mii_ethtool_gset(&smc->mii_if, ecmd);
+ else
+ ret = smc_netdev_get_ecmd(dev, ecmd);
+ SMC_SELECT_BANK(saved_bank);
+ spin_unlock_irqrestore(&smc->lock, flags);
+ return ret;
+}
+
+static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ u16 saved_bank = inw(ioaddr + BANK_SELECT);
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&smc->lock, flags);
+ SMC_SELECT_BANK(3);
+ if (smc->cfg & CFG_MII_SELECT)
+ ret = mii_ethtool_sset(&smc->mii_if, ecmd);
+ else
+ ret = smc_netdev_set_ecmd(dev, ecmd);
+ SMC_SELECT_BANK(saved_bank);
+ spin_unlock_irqrestore(&smc->lock, flags);
+ return ret;
+}
+
+static u32 smc_get_link(struct net_device *dev)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ u16 saved_bank = inw(ioaddr + BANK_SELECT);
+ u32 ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&smc->lock, flags);
+ SMC_SELECT_BANK(3);
+ ret = smc_link_ok(dev);
+ SMC_SELECT_BANK(saved_bank);
+ spin_unlock_irqrestore(&smc->lock, flags);
+ return ret;
+}
+
+static int smc_nway_reset(struct net_device *dev)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ if (smc->cfg & CFG_MII_SELECT) {
+ unsigned int ioaddr = dev->base_addr;
+ u16 saved_bank = inw(ioaddr + BANK_SELECT);
+ int res;
+
+ SMC_SELECT_BANK(3);
+ res = mii_nway_restart(&smc->mii_if);
+ SMC_SELECT_BANK(saved_bank);
+
+ return res;
+ } else
+ return -EOPNOTSUPP;
+}
+
+static const struct ethtool_ops ethtool_ops = {
+ .begin = check_if_running,
+ .get_drvinfo = smc_get_drvinfo,
+ .get_settings = smc_get_settings,
+ .set_settings = smc_set_settings,
+ .get_link = smc_get_link,
+ .nway_reset = smc_nway_reset,
+};
+
+static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct smc_private *smc = netdev_priv(dev);
+ struct mii_ioctl_data *mii = if_mii(rq);
+ int rc = 0;
+ u16 saved_bank;
+ unsigned int ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irqsave(&smc->lock, flags);
+ saved_bank = inw(ioaddr + BANK_SELECT);
+ SMC_SELECT_BANK(3);
+ rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL);
+ SMC_SELECT_BANK(saved_bank);
+ spin_unlock_irqrestore(&smc->lock, flags);
+ return rc;
+}
+
+static const struct pcmcia_device_id smc91c92_ids[] = {
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0109, 0x0501),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0140, 0x000a),
+ PCMCIA_PFC_DEVICE_PROD_ID123(0, "MEGAHERTZ", "CC/XJEM3288", "DATA/FAX/CELL ETHERNET MODEM", 0xf510db04, 0x04cd2988, 0x46a52d63),
+ PCMCIA_PFC_DEVICE_PROD_ID123(0, "MEGAHERTZ", "CC/XJEM3336", "DATA/FAX/CELL ETHERNET MODEM", 0xf510db04, 0x0143b773, 0x46a52d63),
+ PCMCIA_PFC_DEVICE_PROD_ID123(0, "MEGAHERTZ", "EM1144T", "PCMCIA MODEM", 0xf510db04, 0x856d66c8, 0xbd6c43ef),
+ PCMCIA_PFC_DEVICE_PROD_ID123(0, "MEGAHERTZ", "XJEM1144/CCEM1144", "PCMCIA MODEM", 0xf510db04, 0x52d21e1e, 0xbd6c43ef),
+ PCMCIA_PFC_DEVICE_PROD_ID12(0, "Gateway 2000", "XJEM3336", 0xdd9989be, 0x662c394c),
+ PCMCIA_PFC_DEVICE_PROD_ID12(0, "MEGAHERTZ", "XJEM1144/CCEM1144", 0xf510db04, 0x52d21e1e),
+ PCMCIA_PFC_DEVICE_PROD_ID12(0, "Ositech", "Trumpcard:Jack of Diamonds Modem+Ethernet", 0xc2f80cd, 0x656947b9),
+ PCMCIA_PFC_DEVICE_PROD_ID12(0, "Ositech", "Trumpcard:Jack of Hearts Modem+Ethernet", 0xc2f80cd, 0xdc9ba5ed),
+ PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x016c, 0x0020),
+ PCMCIA_DEVICE_MANF_CARD(0x016c, 0x0023),
+ PCMCIA_DEVICE_PROD_ID123("BASICS by New Media Corporation", "Ethernet", "SMC91C94", 0x23c78a9d, 0x00b2e941, 0xcef397fb),
+ PCMCIA_DEVICE_PROD_ID12("ARGOSY", "Fast Ethernet PCCard", 0x78f308dc, 0xdcea68bc),
+ PCMCIA_DEVICE_PROD_ID12("dit Co., Ltd.", "PC Card-10/100BTX", 0xe59365c8, 0x6a2161d1),
+ PCMCIA_DEVICE_PROD_ID12("DYNALINK", "L100C", 0x6a26d1cf, 0xc16ce9c5),
+ PCMCIA_DEVICE_PROD_ID12("Farallon", "Farallon Enet", 0x58d93fc4, 0x244734e9),
+ PCMCIA_DEVICE_PROD_ID12("Megahertz", "CC10BT/2", 0x33234748, 0x3c95b953),
+ PCMCIA_DEVICE_PROD_ID12("MELCO/SMC", "LPC-TX", 0xa2cd8e6d, 0x42da662a),
+ PCMCIA_DEVICE_PROD_ID12("Ositech", "Trumpcard:Four of Diamonds Ethernet", 0xc2f80cd, 0xb3466314),
+ PCMCIA_DEVICE_PROD_ID12("Ositech", "Trumpcard:Seven of Diamonds Ethernet", 0xc2f80cd, 0x194b650a),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Fast Ethernet PCCard", 0x281f1c5d, 0xdcea68bc),
+ PCMCIA_DEVICE_PROD_ID12("Psion", "10Mb Ethernet", 0x4ef00b21, 0x844be9e9),
+ PCMCIA_DEVICE_PROD_ID12("SMC", "EtherEZ Ethernet 8020", 0xc4f8b18b, 0x4a0eeb2d),
+ /* These conflict with other cards! */
+ /* PCMCIA_DEVICE_MANF_CARD(0x0186, 0x0100), */
+ /* PCMCIA_DEVICE_MANF_CARD(0x8a01, 0xc1ab), */
+ PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, smc91c92_ids);
+
+static struct pcmcia_driver smc91c92_cs_driver = {
+ .owner = THIS_MODULE,
+ .name = "smc91c92_cs",
+ .probe = smc91c92_probe,
+ .remove = smc91c92_detach,
+ .id_table = smc91c92_ids,
+ .suspend = smc91c92_suspend,
+ .resume = smc91c92_resume,
+};
+
+static int __init init_smc91c92_cs(void)
+{
+ return pcmcia_register_driver(&smc91c92_cs_driver);
+}
+
+static void __exit exit_smc91c92_cs(void)
+{
+ pcmcia_unregister_driver(&smc91c92_cs_driver);
+}
+
+module_init(init_smc91c92_cs);
+module_exit(exit_smc91c92_cs);
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
new file mode 100644
index 000000000000..f47f81e25322
--- /dev/null
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -0,0 +1,2431 @@
+/*
+ * smc91x.c
+ * This is a driver for SMSC's 91C9x/91C1xx single-chip Ethernet devices.
+ *
+ * Copyright (C) 1996 by Erik Stahlman
+ * Copyright (C) 2001 Standard Microsystems Corporation
+ * Developed by Simple Network Magic Corporation
+ * Copyright (C) 2003 Monta Vista Software, Inc.
+ * Unified SMC91x driver by Nicolas Pitre
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Arguments:
+ * io = for the base address
+ * irq = for the IRQ
+ * nowait = 0 for normal wait states, 1 eliminates additional wait states
+ *
+ * original author:
+ * Erik Stahlman <erik@vt.edu>
+ *
+ * hardware multicast code:
+ * Peter Cammaert <pc@denkart.be>
+ *
+ * contributors:
+ * Daris A Nevil <dnevil@snmc.com>
+ * Nicolas Pitre <nico@fluxnic.net>
+ * Russell King <rmk@arm.linux.org.uk>
+ *
+ * History:
+ * 08/20/00 Arnaldo Melo fix kfree(skb) in smc_hardware_send_packet
+ * 12/15/00 Christian Jullien fix "Warning: kfree_skb on hard IRQ"
+ * 03/16/01 Daris A Nevil modified smc9194.c for use with LAN91C111
+ * 08/22/01 Scott Anderson merge changes from smc9194 to smc91111
+ * 08/21/01 Pramod B Bhardwaj added support for RevB of LAN91C111
+ * 12/20/01 Jeff Sutherland initial port to Xscale PXA with DMA support
+ * 04/07/03 Nicolas Pitre unified SMC91x driver, killed irq races,
+ * more bus abstraction, big cleanup, etc.
+ * 29/09/03 Russell King - add driver model support
+ * - ethtool support
+ * - convert to use generic MII interface
+ * - add link up/down notification
+ * - don't try to handle full negotiation in
+ * smc_phy_configure
+ * - clean up (and fix stack overrun) in PHY
+ * MII read/write functions
+ * 22/09/04 Nicolas Pitre big update (see commit log for details)
+ */
+static const char version[] =
+ "smc91x.c: v1.1, sep 22 2004 by Nicolas Pitre <nico@fluxnic.net>\n";
+
+/* Debugging level */
+#ifndef SMC_DEBUG
+#define SMC_DEBUG 0
+#endif
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/crc32.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/workqueue.h>
+#include <linux/of.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/io.h>
+
+#include "smc91x.h"
+
+#ifndef SMC_NOWAIT
+# define SMC_NOWAIT 0
+#endif
+static int nowait = SMC_NOWAIT;
+module_param(nowait, int, 0400);
+MODULE_PARM_DESC(nowait, "set to 1 for no wait state");
+
+/*
+ * Transmit timeout, default 5 seconds.
+ */
+static int watchdog = 1000;
+module_param(watchdog, int, 0400);
+MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:smc91x");
+
+/*
+ * The internal workings of the driver. If you are changing anything
+ * here with the SMC stuff, you should have the datasheet and know
+ * what you are doing.
+ */
+#define CARDNAME "smc91x"
+
+/*
+ * Use power-down feature of the chip
+ */
+#define POWER_DOWN 1
+
+/*
+ * Wait time for memory to be free. This probably shouldn't be
+ * tuned that much, as waiting for this means nothing else happens
+ * in the system
+ */
+#define MEMORY_WAIT_TIME 16
+
+/*
+ * The maximum number of processing loops allowed for each call to the
+ * IRQ handler.
+ */
+#define MAX_IRQ_LOOPS 8
+
+/*
+ * This selects whether TX packets are sent one by one to the SMC91x internal
+ * memory and throttled until transmission completes. This may prevent
+ * RX overruns a litle by keeping much of the memory free for RX packets
+ * but to the expense of reduced TX throughput and increased IRQ overhead.
+ * Note this is not a cure for a too slow data bus or too high IRQ latency.
+ */
+#define THROTTLE_TX_PKTS 0
+
+/*
+ * The MII clock high/low times. 2x this number gives the MII clock period
+ * in microseconds. (was 50, but this gives 6.4ms for each MII transaction!)
+ */
+#define MII_DELAY 1
+
+#if SMC_DEBUG > 0
+#define DBG(n, args...) \
+ do { \
+ if (SMC_DEBUG >= (n)) \
+ printk(args); \
+ } while (0)
+
+#define PRINTK(args...) printk(args)
+#else
+#define DBG(n, args...) do { } while(0)
+#define PRINTK(args...) printk(KERN_DEBUG args)
+#endif
+
+#if SMC_DEBUG > 3
+static void PRINT_PKT(u_char *buf, int length)
+{
+ int i;
+ int remainder;
+ int lines;
+
+ lines = length / 16;
+ remainder = length % 16;
+
+ for (i = 0; i < lines ; i ++) {
+ int cur;
+ for (cur = 0; cur < 8; cur++) {
+ u_char a, b;
+ a = *buf++;
+ b = *buf++;
+ printk("%02x%02x ", a, b);
+ }
+ printk("\n");
+ }
+ for (i = 0; i < remainder/2 ; i++) {
+ u_char a, b;
+ a = *buf++;
+ b = *buf++;
+ printk("%02x%02x ", a, b);
+ }
+ printk("\n");
+}
+#else
+#define PRINT_PKT(x...) do { } while(0)
+#endif
+
+
+/* this enables an interrupt in the interrupt mask register */
+#define SMC_ENABLE_INT(lp, x) do { \
+ unsigned char mask; \
+ unsigned long smc_enable_flags; \
+ spin_lock_irqsave(&lp->lock, smc_enable_flags); \
+ mask = SMC_GET_INT_MASK(lp); \
+ mask |= (x); \
+ SMC_SET_INT_MASK(lp, mask); \
+ spin_unlock_irqrestore(&lp->lock, smc_enable_flags); \
+} while (0)
+
+/* this disables an interrupt from the interrupt mask register */
+#define SMC_DISABLE_INT(lp, x) do { \
+ unsigned char mask; \
+ unsigned long smc_disable_flags; \
+ spin_lock_irqsave(&lp->lock, smc_disable_flags); \
+ mask = SMC_GET_INT_MASK(lp); \
+ mask &= ~(x); \
+ SMC_SET_INT_MASK(lp, mask); \
+ spin_unlock_irqrestore(&lp->lock, smc_disable_flags); \
+} while (0)
+
+/*
+ * Wait while MMU is busy. This is usually in the order of a few nanosecs
+ * if at all, but let's avoid deadlocking the system if the hardware
+ * decides to go south.
+ */
+#define SMC_WAIT_MMU_BUSY(lp) do { \
+ if (unlikely(SMC_GET_MMU_CMD(lp) & MC_BUSY)) { \
+ unsigned long timeout = jiffies + 2; \
+ while (SMC_GET_MMU_CMD(lp) & MC_BUSY) { \
+ if (time_after(jiffies, timeout)) { \
+ printk("%s: timeout %s line %d\n", \
+ dev->name, __FILE__, __LINE__); \
+ break; \
+ } \
+ cpu_relax(); \
+ } \
+ } \
+} while (0)
+
+
+/*
+ * this does a soft reset on the device
+ */
+static void smc_reset(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned int ctl, cfg;
+ struct sk_buff *pending_skb;
+
+ DBG(2, "%s: %s\n", dev->name, __func__);
+
+ /* Disable all interrupts, block TX tasklet */
+ spin_lock_irq(&lp->lock);
+ SMC_SELECT_BANK(lp, 2);
+ SMC_SET_INT_MASK(lp, 0);
+ pending_skb = lp->pending_tx_skb;
+ lp->pending_tx_skb = NULL;
+ spin_unlock_irq(&lp->lock);
+
+ /* free any pending tx skb */
+ if (pending_skb) {
+ dev_kfree_skb(pending_skb);
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
+ }
+
+ /*
+ * This resets the registers mostly to defaults, but doesn't
+ * affect EEPROM. That seems unnecessary
+ */
+ SMC_SELECT_BANK(lp, 0);
+ SMC_SET_RCR(lp, RCR_SOFTRST);
+
+ /*
+ * Setup the Configuration Register
+ * This is necessary because the CONFIG_REG is not affected
+ * by a soft reset
+ */
+ SMC_SELECT_BANK(lp, 1);
+
+ cfg = CONFIG_DEFAULT;
+
+ /*
+ * Setup for fast accesses if requested. If the card/system
+ * can't handle it then there will be no recovery except for
+ * a hard reset or power cycle
+ */
+ if (lp->cfg.flags & SMC91X_NOWAIT)
+ cfg |= CONFIG_NO_WAIT;
+
+ /*
+ * Release from possible power-down state
+ * Configuration register is not affected by Soft Reset
+ */
+ cfg |= CONFIG_EPH_POWER_EN;
+
+ SMC_SET_CONFIG(lp, cfg);
+
+ /* this should pause enough for the chip to be happy */
+ /*
+ * elaborate? What does the chip _need_? --jgarzik
+ *
+ * This seems to be undocumented, but something the original
+ * driver(s) have always done. Suspect undocumented timing
+ * info/determined empirically. --rmk
+ */
+ udelay(1);
+
+ /* Disable transmit and receive functionality */
+ SMC_SELECT_BANK(lp, 0);
+ SMC_SET_RCR(lp, RCR_CLEAR);
+ SMC_SET_TCR(lp, TCR_CLEAR);
+
+ SMC_SELECT_BANK(lp, 1);
+ ctl = SMC_GET_CTL(lp) | CTL_LE_ENABLE;
+
+ /*
+ * Set the control register to automatically release successfully
+ * transmitted packets, to make the best use out of our limited
+ * memory
+ */
+ if(!THROTTLE_TX_PKTS)
+ ctl |= CTL_AUTO_RELEASE;
+ else
+ ctl &= ~CTL_AUTO_RELEASE;
+ SMC_SET_CTL(lp, ctl);
+
+ /* Reset the MMU */
+ SMC_SELECT_BANK(lp, 2);
+ SMC_SET_MMU_CMD(lp, MC_RESET);
+ SMC_WAIT_MMU_BUSY(lp);
+}
+
+/*
+ * Enable Interrupts, Receive, and Transmit
+ */
+static void smc_enable(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ int mask;
+
+ DBG(2, "%s: %s\n", dev->name, __func__);
+
+ /* see the header file for options in TCR/RCR DEFAULT */
+ SMC_SELECT_BANK(lp, 0);
+ SMC_SET_TCR(lp, lp->tcr_cur_mode);
+ SMC_SET_RCR(lp, lp->rcr_cur_mode);
+
+ SMC_SELECT_BANK(lp, 1);
+ SMC_SET_MAC_ADDR(lp, dev->dev_addr);
+
+ /* now, enable interrupts */
+ mask = IM_EPH_INT|IM_RX_OVRN_INT|IM_RCV_INT;
+ if (lp->version >= (CHIP_91100 << 4))
+ mask |= IM_MDINT;
+ SMC_SELECT_BANK(lp, 2);
+ SMC_SET_INT_MASK(lp, mask);
+
+ /*
+ * From this point the register bank must _NOT_ be switched away
+ * to something else than bank 2 without proper locking against
+ * races with any tasklet or interrupt handlers until smc_shutdown()
+ * or smc_reset() is called.
+ */
+}
+
+/*
+ * this puts the device in an inactive state
+ */
+static void smc_shutdown(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ struct sk_buff *pending_skb;
+
+ DBG(2, "%s: %s\n", CARDNAME, __func__);
+
+ /* no more interrupts for me */
+ spin_lock_irq(&lp->lock);
+ SMC_SELECT_BANK(lp, 2);
+ SMC_SET_INT_MASK(lp, 0);
+ pending_skb = lp->pending_tx_skb;
+ lp->pending_tx_skb = NULL;
+ spin_unlock_irq(&lp->lock);
+ if (pending_skb)
+ dev_kfree_skb(pending_skb);
+
+ /* and tell the card to stay away from that nasty outside world */
+ SMC_SELECT_BANK(lp, 0);
+ SMC_SET_RCR(lp, RCR_CLEAR);
+ SMC_SET_TCR(lp, TCR_CLEAR);
+
+#ifdef POWER_DOWN
+ /* finally, shut the chip down */
+ SMC_SELECT_BANK(lp, 1);
+ SMC_SET_CONFIG(lp, SMC_GET_CONFIG(lp) & ~CONFIG_EPH_POWER_EN);
+#endif
+}
+
+/*
+ * This is the procedure to handle the receipt of a packet.
+ */
+static inline void smc_rcv(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned int packet_number, status, packet_len;
+
+ DBG(3, "%s: %s\n", dev->name, __func__);
+
+ packet_number = SMC_GET_RXFIFO(lp);
+ if (unlikely(packet_number & RXFIFO_REMPTY)) {
+ PRINTK("%s: smc_rcv with nothing on FIFO.\n", dev->name);
+ return;
+ }
+
+ /* read from start of packet */
+ SMC_SET_PTR(lp, PTR_READ | PTR_RCV | PTR_AUTOINC);
+
+ /* First two words are status and packet length */
+ SMC_GET_PKT_HDR(lp, status, packet_len);
+ packet_len &= 0x07ff; /* mask off top bits */
+ DBG(2, "%s: RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n",
+ dev->name, packet_number, status,
+ packet_len, packet_len);
+
+ back:
+ if (unlikely(packet_len < 6 || status & RS_ERRORS)) {
+ if (status & RS_TOOLONG && packet_len <= (1514 + 4 + 6)) {
+ /* accept VLAN packets */
+ status &= ~RS_TOOLONG;
+ goto back;
+ }
+ if (packet_len < 6) {
+ /* bloody hardware */
+ printk(KERN_ERR "%s: fubar (rxlen %u status %x\n",
+ dev->name, packet_len, status);
+ status |= RS_TOOSHORT;
+ }
+ SMC_WAIT_MMU_BUSY(lp);
+ SMC_SET_MMU_CMD(lp, MC_RELEASE);
+ dev->stats.rx_errors++;
+ if (status & RS_ALGNERR)
+ dev->stats.rx_frame_errors++;
+ if (status & (RS_TOOSHORT | RS_TOOLONG))
+ dev->stats.rx_length_errors++;
+ if (status & RS_BADCRC)
+ dev->stats.rx_crc_errors++;
+ } else {
+ struct sk_buff *skb;
+ unsigned char *data;
+ unsigned int data_len;
+
+ /* set multicast stats */
+ if (status & RS_MULTICAST)
+ dev->stats.multicast++;
+
+ /*
+ * Actual payload is packet_len - 6 (or 5 if odd byte).
+ * We want skb_reserve(2) and the final ctrl word
+ * (2 bytes, possibly containing the payload odd byte).
+ * Furthermore, we add 2 bytes to allow rounding up to
+ * multiple of 4 bytes on 32 bit buses.
+ * Hence packet_len - 6 + 2 + 2 + 2.
+ */
+ skb = dev_alloc_skb(packet_len);
+ if (unlikely(skb == NULL)) {
+ printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
+ dev->name);
+ SMC_WAIT_MMU_BUSY(lp);
+ SMC_SET_MMU_CMD(lp, MC_RELEASE);
+ dev->stats.rx_dropped++;
+ return;
+ }
+
+ /* Align IP header to 32 bits */
+ skb_reserve(skb, 2);
+
+ /* BUG: the LAN91C111 rev A never sets this bit. Force it. */
+ if (lp->version == 0x90)
+ status |= RS_ODDFRAME;
+
+ /*
+ * If odd length: packet_len - 5,
+ * otherwise packet_len - 6.
+ * With the trailing ctrl byte it's packet_len - 4.
+ */
+ data_len = packet_len - ((status & RS_ODDFRAME) ? 5 : 6);
+ data = skb_put(skb, data_len);
+ SMC_PULL_DATA(lp, data, packet_len - 4);
+
+ SMC_WAIT_MMU_BUSY(lp);
+ SMC_SET_MMU_CMD(lp, MC_RELEASE);
+
+ PRINT_PKT(data, packet_len - 4);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += data_len;
+ }
+}
+
+#ifdef CONFIG_SMP
+/*
+ * On SMP we have the following problem:
+ *
+ * A = smc_hardware_send_pkt()
+ * B = smc_hard_start_xmit()
+ * C = smc_interrupt()
+ *
+ * A and B can never be executed simultaneously. However, at least on UP,
+ * it is possible (and even desirable) for C to interrupt execution of
+ * A or B in order to have better RX reliability and avoid overruns.
+ * C, just like A and B, must have exclusive access to the chip and
+ * each of them must lock against any other concurrent access.
+ * Unfortunately this is not possible to have C suspend execution of A or
+ * B taking place on another CPU. On UP this is no an issue since A and B
+ * are run from softirq context and C from hard IRQ context, and there is
+ * no other CPU where concurrent access can happen.
+ * If ever there is a way to force at least B and C to always be executed
+ * on the same CPU then we could use read/write locks to protect against
+ * any other concurrent access and C would always interrupt B. But life
+ * isn't that easy in a SMP world...
+ */
+#define smc_special_trylock(lock, flags) \
+({ \
+ int __ret; \
+ local_irq_save(flags); \
+ __ret = spin_trylock(lock); \
+ if (!__ret) \
+ local_irq_restore(flags); \
+ __ret; \
+})
+#define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags)
+#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags)
+#else
+#define smc_special_trylock(lock, flags) (flags == flags)
+#define smc_special_lock(lock, flags) do { flags = 0; } while (0)
+#define smc_special_unlock(lock, flags) do { flags = 0; } while (0)
+#endif
+
+/*
+ * This is called to actually send a packet to the chip.
+ */
+static void smc_hardware_send_pkt(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ struct sk_buff *skb;
+ unsigned int packet_no, len;
+ unsigned char *buf;
+ unsigned long flags;
+
+ DBG(3, "%s: %s\n", dev->name, __func__);
+
+ if (!smc_special_trylock(&lp->lock, flags)) {
+ netif_stop_queue(dev);
+ tasklet_schedule(&lp->tx_task);
+ return;
+ }
+
+ skb = lp->pending_tx_skb;
+ if (unlikely(!skb)) {
+ smc_special_unlock(&lp->lock, flags);
+ return;
+ }
+ lp->pending_tx_skb = NULL;
+
+ packet_no = SMC_GET_AR(lp);
+ if (unlikely(packet_no & AR_FAILED)) {
+ printk("%s: Memory allocation failed.\n", dev->name);
+ dev->stats.tx_errors++;
+ dev->stats.tx_fifo_errors++;
+ smc_special_unlock(&lp->lock, flags);
+ goto done;
+ }
+
+ /* point to the beginning of the packet */
+ SMC_SET_PN(lp, packet_no);
+ SMC_SET_PTR(lp, PTR_AUTOINC);
+
+ buf = skb->data;
+ len = skb->len;
+ DBG(2, "%s: TX PNR 0x%x LENGTH 0x%04x (%d) BUF 0x%p\n",
+ dev->name, packet_no, len, len, buf);
+ PRINT_PKT(buf, len);
+
+ /*
+ * Send the packet length (+6 for status words, length, and ctl.
+ * The card will pad to 64 bytes with zeroes if packet is too small.
+ */
+ SMC_PUT_PKT_HDR(lp, 0, len + 6);
+
+ /* send the actual data */
+ SMC_PUSH_DATA(lp, buf, len & ~1);
+
+ /* Send final ctl word with the last byte if there is one */
+ SMC_outw(((len & 1) ? (0x2000 | buf[len-1]) : 0), ioaddr, DATA_REG(lp));
+
+ /*
+ * If THROTTLE_TX_PKTS is set, we stop the queue here. This will
+ * have the effect of having at most one packet queued for TX
+ * in the chip's memory at all time.
+ *
+ * If THROTTLE_TX_PKTS is not set then the queue is stopped only
+ * when memory allocation (MC_ALLOC) does not succeed right away.
+ */
+ if (THROTTLE_TX_PKTS)
+ netif_stop_queue(dev);
+
+ /* queue the packet for TX */
+ SMC_SET_MMU_CMD(lp, MC_ENQUEUE);
+ smc_special_unlock(&lp->lock, flags);
+
+ dev->trans_start = jiffies;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += len;
+
+ SMC_ENABLE_INT(lp, IM_TX_INT | IM_TX_EMPTY_INT);
+
+done: if (!THROTTLE_TX_PKTS)
+ netif_wake_queue(dev);
+
+ dev_kfree_skb(skb);
+}
+
+/*
+ * Since I am not sure if I will have enough room in the chip's ram
+ * to store the packet, I call this routine which either sends it
+ * now, or set the card to generates an interrupt when ready
+ * for the packet.
+ */
+static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned int numPages, poll_count, status;
+ unsigned long flags;
+
+ DBG(3, "%s: %s\n", dev->name, __func__);
+
+ BUG_ON(lp->pending_tx_skb != NULL);
+
+ /*
+ * The MMU wants the number of pages to be the number of 256 bytes
+ * 'pages', minus 1 (since a packet can't ever have 0 pages :))
+ *
+ * The 91C111 ignores the size bits, but earlier models don't.
+ *
+ * Pkt size for allocating is data length +6 (for additional status
+ * words, length and ctl)
+ *
+ * If odd size then last byte is included in ctl word.
+ */
+ numPages = ((skb->len & ~1) + (6 - 1)) >> 8;
+ if (unlikely(numPages > 7)) {
+ printk("%s: Far too big packet error.\n", dev->name);
+ dev->stats.tx_errors++;
+ dev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ smc_special_lock(&lp->lock, flags);
+
+ /* now, try to allocate the memory */
+ SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages);
+
+ /*
+ * Poll the chip for a short amount of time in case the
+ * allocation succeeds quickly.
+ */
+ poll_count = MEMORY_WAIT_TIME;
+ do {
+ status = SMC_GET_INT(lp);
+ if (status & IM_ALLOC_INT) {
+ SMC_ACK_INT(lp, IM_ALLOC_INT);
+ break;
+ }
+ } while (--poll_count);
+
+ smc_special_unlock(&lp->lock, flags);
+
+ lp->pending_tx_skb = skb;
+ if (!poll_count) {
+ /* oh well, wait until the chip finds memory later */
+ netif_stop_queue(dev);
+ DBG(2, "%s: TX memory allocation deferred.\n", dev->name);
+ SMC_ENABLE_INT(lp, IM_ALLOC_INT);
+ } else {
+ /*
+ * Allocation succeeded: push packet to the chip's own memory
+ * immediately.
+ */
+ smc_hardware_send_pkt((unsigned long)dev);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * This handles a TX interrupt, which is only called when:
+ * - a TX error occurred, or
+ * - CTL_AUTO_RELEASE is not set and TX of a packet completed.
+ */
+static void smc_tx(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned int saved_packet, packet_no, tx_status, pkt_len;
+
+ DBG(3, "%s: %s\n", dev->name, __func__);
+
+ /* If the TX FIFO is empty then nothing to do */
+ packet_no = SMC_GET_TXFIFO(lp);
+ if (unlikely(packet_no & TXFIFO_TEMPTY)) {
+ PRINTK("%s: smc_tx with nothing on FIFO.\n", dev->name);
+ return;
+ }
+
+ /* select packet to read from */
+ saved_packet = SMC_GET_PN(lp);
+ SMC_SET_PN(lp, packet_no);
+
+ /* read the first word (status word) from this packet */
+ SMC_SET_PTR(lp, PTR_AUTOINC | PTR_READ);
+ SMC_GET_PKT_HDR(lp, tx_status, pkt_len);
+ DBG(2, "%s: TX STATUS 0x%04x PNR 0x%02x\n",
+ dev->name, tx_status, packet_no);
+
+ if (!(tx_status & ES_TX_SUC))
+ dev->stats.tx_errors++;
+
+ if (tx_status & ES_LOSTCARR)
+ dev->stats.tx_carrier_errors++;
+
+ if (tx_status & (ES_LATCOL | ES_16COL)) {
+ PRINTK("%s: %s occurred on last xmit\n", dev->name,
+ (tx_status & ES_LATCOL) ?
+ "late collision" : "too many collisions");
+ dev->stats.tx_window_errors++;
+ if (!(dev->stats.tx_window_errors & 63) && net_ratelimit()) {
+ printk(KERN_INFO "%s: unexpectedly large number of "
+ "bad collisions. Please check duplex "
+ "setting.\n", dev->name);
+ }
+ }
+
+ /* kill the packet */
+ SMC_WAIT_MMU_BUSY(lp);
+ SMC_SET_MMU_CMD(lp, MC_FREEPKT);
+
+ /* Don't restore Packet Number Reg until busy bit is cleared */
+ SMC_WAIT_MMU_BUSY(lp);
+ SMC_SET_PN(lp, saved_packet);
+
+ /* re-enable transmit */
+ SMC_SELECT_BANK(lp, 0);
+ SMC_SET_TCR(lp, lp->tcr_cur_mode);
+ SMC_SELECT_BANK(lp, 2);
+}
+
+
+/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
+
+static void smc_mii_out(struct net_device *dev, unsigned int val, int bits)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned int mii_reg, mask;
+
+ mii_reg = SMC_GET_MII(lp) & ~(MII_MCLK | MII_MDOE | MII_MDO);
+ mii_reg |= MII_MDOE;
+
+ for (mask = 1 << (bits - 1); mask; mask >>= 1) {
+ if (val & mask)
+ mii_reg |= MII_MDO;
+ else
+ mii_reg &= ~MII_MDO;
+
+ SMC_SET_MII(lp, mii_reg);
+ udelay(MII_DELAY);
+ SMC_SET_MII(lp, mii_reg | MII_MCLK);
+ udelay(MII_DELAY);
+ }
+}
+
+static unsigned int smc_mii_in(struct net_device *dev, int bits)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned int mii_reg, mask, val;
+
+ mii_reg = SMC_GET_MII(lp) & ~(MII_MCLK | MII_MDOE | MII_MDO);
+ SMC_SET_MII(lp, mii_reg);
+
+ for (mask = 1 << (bits - 1), val = 0; mask; mask >>= 1) {
+ if (SMC_GET_MII(lp) & MII_MDI)
+ val |= mask;
+
+ SMC_SET_MII(lp, mii_reg);
+ udelay(MII_DELAY);
+ SMC_SET_MII(lp, mii_reg | MII_MCLK);
+ udelay(MII_DELAY);
+ }
+
+ return val;
+}
+
+/*
+ * Reads a register from the MII Management serial interface
+ */
+static int smc_phy_read(struct net_device *dev, int phyaddr, int phyreg)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned int phydata;
+
+ SMC_SELECT_BANK(lp, 3);
+
+ /* Idle - 32 ones */
+ smc_mii_out(dev, 0xffffffff, 32);
+
+ /* Start code (01) + read (10) + phyaddr + phyreg */
+ smc_mii_out(dev, 6 << 10 | phyaddr << 5 | phyreg, 14);
+
+ /* Turnaround (2bits) + phydata */
+ phydata = smc_mii_in(dev, 18);
+
+ /* Return to idle state */
+ SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
+
+ DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
+ __func__, phyaddr, phyreg, phydata);
+
+ SMC_SELECT_BANK(lp, 2);
+ return phydata;
+}
+
+/*
+ * Writes a register to the MII Management serial interface
+ */
+static void smc_phy_write(struct net_device *dev, int phyaddr, int phyreg,
+ int phydata)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+
+ SMC_SELECT_BANK(lp, 3);
+
+ /* Idle - 32 ones */
+ smc_mii_out(dev, 0xffffffff, 32);
+
+ /* Start code (01) + write (01) + phyaddr + phyreg + turnaround + phydata */
+ smc_mii_out(dev, 5 << 28 | phyaddr << 23 | phyreg << 18 | 2 << 16 | phydata, 32);
+
+ /* Return to idle state */
+ SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
+
+ DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
+ __func__, phyaddr, phyreg, phydata);
+
+ SMC_SELECT_BANK(lp, 2);
+}
+
+/*
+ * Finds and reports the PHY address
+ */
+static void smc_phy_detect(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ int phyaddr;
+
+ DBG(2, "%s: %s\n", dev->name, __func__);
+
+ lp->phy_type = 0;
+
+ /*
+ * Scan all 32 PHY addresses if necessary, starting at
+ * PHY#1 to PHY#31, and then PHY#0 last.
+ */
+ for (phyaddr = 1; phyaddr < 33; ++phyaddr) {
+ unsigned int id1, id2;
+
+ /* Read the PHY identifiers */
+ id1 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID1);
+ id2 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID2);
+
+ DBG(3, "%s: phy_id1=0x%x, phy_id2=0x%x\n",
+ dev->name, id1, id2);
+
+ /* Make sure it is a valid identifier */
+ if (id1 != 0x0000 && id1 != 0xffff && id1 != 0x8000 &&
+ id2 != 0x0000 && id2 != 0xffff && id2 != 0x8000) {
+ /* Save the PHY's address */
+ lp->mii.phy_id = phyaddr & 31;
+ lp->phy_type = id1 << 16 | id2;
+ break;
+ }
+ }
+}
+
+/*
+ * Sets the PHY to a configuration as determined by the user
+ */
+static int smc_phy_fixed(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ int phyaddr = lp->mii.phy_id;
+ int bmcr, cfg1;
+
+ DBG(3, "%s: %s\n", dev->name, __func__);
+
+ /* Enter Link Disable state */
+ cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG);
+ cfg1 |= PHY_CFG1_LNKDIS;
+ smc_phy_write(dev, phyaddr, PHY_CFG1_REG, cfg1);
+
+ /*
+ * Set our fixed capabilities
+ * Disable auto-negotiation
+ */
+ bmcr = 0;
+
+ if (lp->ctl_rfduplx)
+ bmcr |= BMCR_FULLDPLX;
+
+ if (lp->ctl_rspeed == 100)
+ bmcr |= BMCR_SPEED100;
+
+ /* Write our capabilities to the phy control register */
+ smc_phy_write(dev, phyaddr, MII_BMCR, bmcr);
+
+ /* Re-Configure the Receive/Phy Control register */
+ SMC_SELECT_BANK(lp, 0);
+ SMC_SET_RPC(lp, lp->rpc_cur_mode);
+ SMC_SELECT_BANK(lp, 2);
+
+ return 1;
+}
+
+/*
+ * smc_phy_reset - reset the phy
+ * @dev: net device
+ * @phy: phy address
+ *
+ * Issue a software reset for the specified PHY and
+ * wait up to 100ms for the reset to complete. We should
+ * not access the PHY for 50ms after issuing the reset.
+ *
+ * The time to wait appears to be dependent on the PHY.
+ *
+ * Must be called with lp->lock locked.
+ */
+static int smc_phy_reset(struct net_device *dev, int phy)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ unsigned int bmcr;
+ int timeout;
+
+ smc_phy_write(dev, phy, MII_BMCR, BMCR_RESET);
+
+ for (timeout = 2; timeout; timeout--) {
+ spin_unlock_irq(&lp->lock);
+ msleep(50);
+ spin_lock_irq(&lp->lock);
+
+ bmcr = smc_phy_read(dev, phy, MII_BMCR);
+ if (!(bmcr & BMCR_RESET))
+ break;
+ }
+
+ return bmcr & BMCR_RESET;
+}
+
+/*
+ * smc_phy_powerdown - powerdown phy
+ * @dev: net device
+ *
+ * Power down the specified PHY
+ */
+static void smc_phy_powerdown(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ unsigned int bmcr;
+ int phy = lp->mii.phy_id;
+
+ if (lp->phy_type == 0)
+ return;
+
+ /* We need to ensure that no calls to smc_phy_configure are
+ pending.
+ */
+ cancel_work_sync(&lp->phy_configure);
+
+ bmcr = smc_phy_read(dev, phy, MII_BMCR);
+ smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN);
+}
+
+/*
+ * smc_phy_check_media - check the media status and adjust TCR
+ * @dev: net device
+ * @init: set true for initialisation
+ *
+ * Select duplex mode depending on negotiation state. This
+ * also updates our carrier state.
+ */
+static void smc_phy_check_media(struct net_device *dev, int init)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+
+ if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
+ /* duplex state has changed */
+ if (lp->mii.full_duplex) {
+ lp->tcr_cur_mode |= TCR_SWFDUP;
+ } else {
+ lp->tcr_cur_mode &= ~TCR_SWFDUP;
+ }
+
+ SMC_SELECT_BANK(lp, 0);
+ SMC_SET_TCR(lp, lp->tcr_cur_mode);
+ }
+}
+
+/*
+ * Configures the specified PHY through the MII management interface
+ * using Autonegotiation.
+ * Calls smc_phy_fixed() if the user has requested a certain config.
+ * If RPC ANEG bit is set, the media selection is dependent purely on
+ * the selection by the MII (either in the MII BMCR reg or the result
+ * of autonegotiation.) If the RPC ANEG bit is cleared, the selection
+ * is controlled by the RPC SPEED and RPC DPLX bits.
+ */
+static void smc_phy_configure(struct work_struct *work)
+{
+ struct smc_local *lp =
+ container_of(work, struct smc_local, phy_configure);
+ struct net_device *dev = lp->dev;
+ void __iomem *ioaddr = lp->base;
+ int phyaddr = lp->mii.phy_id;
+ int my_phy_caps; /* My PHY capabilities */
+ int my_ad_caps; /* My Advertised capabilities */
+ int status;
+
+ DBG(3, "%s:smc_program_phy()\n", dev->name);
+
+ spin_lock_irq(&lp->lock);
+
+ /*
+ * We should not be called if phy_type is zero.
+ */
+ if (lp->phy_type == 0)
+ goto smc_phy_configure_exit;
+
+ if (smc_phy_reset(dev, phyaddr)) {
+ printk("%s: PHY reset timed out\n", dev->name);
+ goto smc_phy_configure_exit;
+ }
+
+ /*
+ * Enable PHY Interrupts (for register 18)
+ * Interrupts listed here are disabled
+ */
+ smc_phy_write(dev, phyaddr, PHY_MASK_REG,
+ PHY_INT_LOSSSYNC | PHY_INT_CWRD | PHY_INT_SSD |
+ PHY_INT_ESD | PHY_INT_RPOL | PHY_INT_JAB |
+ PHY_INT_SPDDET | PHY_INT_DPLXDET);
+
+ /* Configure the Receive/Phy Control register */
+ SMC_SELECT_BANK(lp, 0);
+ SMC_SET_RPC(lp, lp->rpc_cur_mode);
+
+ /* If the user requested no auto neg, then go set his request */
+ if (lp->mii.force_media) {
+ smc_phy_fixed(dev);
+ goto smc_phy_configure_exit;
+ }
+
+ /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */
+ my_phy_caps = smc_phy_read(dev, phyaddr, MII_BMSR);
+
+ if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
+ printk(KERN_INFO "Auto negotiation NOT supported\n");
+ smc_phy_fixed(dev);
+ goto smc_phy_configure_exit;
+ }
+
+ my_ad_caps = ADVERTISE_CSMA; /* I am CSMA capable */
+
+ if (my_phy_caps & BMSR_100BASE4)
+ my_ad_caps |= ADVERTISE_100BASE4;
+ if (my_phy_caps & BMSR_100FULL)
+ my_ad_caps |= ADVERTISE_100FULL;
+ if (my_phy_caps & BMSR_100HALF)
+ my_ad_caps |= ADVERTISE_100HALF;
+ if (my_phy_caps & BMSR_10FULL)
+ my_ad_caps |= ADVERTISE_10FULL;
+ if (my_phy_caps & BMSR_10HALF)
+ my_ad_caps |= ADVERTISE_10HALF;
+
+ /* Disable capabilities not selected by our user */
+ if (lp->ctl_rspeed != 100)
+ my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF);
+
+ if (!lp->ctl_rfduplx)
+ my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL);
+
+ /* Update our Auto-Neg Advertisement Register */
+ smc_phy_write(dev, phyaddr, MII_ADVERTISE, my_ad_caps);
+ lp->mii.advertising = my_ad_caps;
+
+ /*
+ * Read the register back. Without this, it appears that when
+ * auto-negotiation is restarted, sometimes it isn't ready and
+ * the link does not come up.
+ */
+ status = smc_phy_read(dev, phyaddr, MII_ADVERTISE);
+
+ DBG(2, "%s: phy caps=%x\n", dev->name, my_phy_caps);
+ DBG(2, "%s: phy advertised caps=%x\n", dev->name, my_ad_caps);
+
+ /* Restart auto-negotiation process in order to advertise my caps */
+ smc_phy_write(dev, phyaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
+
+ smc_phy_check_media(dev, 1);
+
+smc_phy_configure_exit:
+ SMC_SELECT_BANK(lp, 2);
+ spin_unlock_irq(&lp->lock);
+}
+
+/*
+ * smc_phy_interrupt
+ *
+ * Purpose: Handle interrupts relating to PHY register 18. This is
+ * called from the "hard" interrupt handler under our private spinlock.
+ */
+static void smc_phy_interrupt(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ int phyaddr = lp->mii.phy_id;
+ int phy18;
+
+ DBG(2, "%s: %s\n", dev->name, __func__);
+
+ if (lp->phy_type == 0)
+ return;
+
+ for(;;) {
+ smc_phy_check_media(dev, 0);
+
+ /* Read PHY Register 18, Status Output */
+ phy18 = smc_phy_read(dev, phyaddr, PHY_INT_REG);
+ if ((phy18 & PHY_INT_INT) == 0)
+ break;
+ }
+}
+
+/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/
+
+static void smc_10bt_check_media(struct net_device *dev, int init)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned int old_carrier, new_carrier;
+
+ old_carrier = netif_carrier_ok(dev) ? 1 : 0;
+
+ SMC_SELECT_BANK(lp, 0);
+ new_carrier = (SMC_GET_EPH_STATUS(lp) & ES_LINK_OK) ? 1 : 0;
+ SMC_SELECT_BANK(lp, 2);
+
+ if (init || (old_carrier != new_carrier)) {
+ if (!new_carrier) {
+ netif_carrier_off(dev);
+ } else {
+ netif_carrier_on(dev);
+ }
+ if (netif_msg_link(lp))
+ printk(KERN_INFO "%s: link %s\n", dev->name,
+ new_carrier ? "up" : "down");
+ }
+}
+
+static void smc_eph_interrupt(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned int ctl;
+
+ smc_10bt_check_media(dev, 0);
+
+ SMC_SELECT_BANK(lp, 1);
+ ctl = SMC_GET_CTL(lp);
+ SMC_SET_CTL(lp, ctl & ~CTL_LE_ENABLE);
+ SMC_SET_CTL(lp, ctl);
+ SMC_SELECT_BANK(lp, 2);
+}
+
+/*
+ * This is the main routine of the driver, to handle the device when
+ * it needs some attention.
+ */
+static irqreturn_t smc_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ int status, mask, timeout, card_stats;
+ int saved_pointer;
+
+ DBG(3, "%s: %s\n", dev->name, __func__);
+
+ spin_lock(&lp->lock);
+
+ /* A preamble may be used when there is a potential race
+ * between the interruptible transmit functions and this
+ * ISR. */
+ SMC_INTERRUPT_PREAMBLE;
+
+ saved_pointer = SMC_GET_PTR(lp);
+ mask = SMC_GET_INT_MASK(lp);
+ SMC_SET_INT_MASK(lp, 0);
+
+ /* set a timeout value, so I don't stay here forever */
+ timeout = MAX_IRQ_LOOPS;
+
+ do {
+ status = SMC_GET_INT(lp);
+
+ DBG(2, "%s: INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n",
+ dev->name, status, mask,
+ ({ int meminfo; SMC_SELECT_BANK(lp, 0);
+ meminfo = SMC_GET_MIR(lp);
+ SMC_SELECT_BANK(lp, 2); meminfo; }),
+ SMC_GET_FIFO(lp));
+
+ status &= mask;
+ if (!status)
+ break;
+
+ if (status & IM_TX_INT) {
+ /* do this before RX as it will free memory quickly */
+ DBG(3, "%s: TX int\n", dev->name);
+ smc_tx(dev);
+ SMC_ACK_INT(lp, IM_TX_INT);
+ if (THROTTLE_TX_PKTS)
+ netif_wake_queue(dev);
+ } else if (status & IM_RCV_INT) {
+ DBG(3, "%s: RX irq\n", dev->name);
+ smc_rcv(dev);
+ } else if (status & IM_ALLOC_INT) {
+ DBG(3, "%s: Allocation irq\n", dev->name);
+ tasklet_hi_schedule(&lp->tx_task);
+ mask &= ~IM_ALLOC_INT;
+ } else if (status & IM_TX_EMPTY_INT) {
+ DBG(3, "%s: TX empty\n", dev->name);
+ mask &= ~IM_TX_EMPTY_INT;
+
+ /* update stats */
+ SMC_SELECT_BANK(lp, 0);
+ card_stats = SMC_GET_COUNTER(lp);
+ SMC_SELECT_BANK(lp, 2);
+
+ /* single collisions */
+ dev->stats.collisions += card_stats & 0xF;
+ card_stats >>= 4;
+
+ /* multiple collisions */
+ dev->stats.collisions += card_stats & 0xF;
+ } else if (status & IM_RX_OVRN_INT) {
+ DBG(1, "%s: RX overrun (EPH_ST 0x%04x)\n", dev->name,
+ ({ int eph_st; SMC_SELECT_BANK(lp, 0);
+ eph_st = SMC_GET_EPH_STATUS(lp);
+ SMC_SELECT_BANK(lp, 2); eph_st; }));
+ SMC_ACK_INT(lp, IM_RX_OVRN_INT);
+ dev->stats.rx_errors++;
+ dev->stats.rx_fifo_errors++;
+ } else if (status & IM_EPH_INT) {
+ smc_eph_interrupt(dev);
+ } else if (status & IM_MDINT) {
+ SMC_ACK_INT(lp, IM_MDINT);
+ smc_phy_interrupt(dev);
+ } else if (status & IM_ERCV_INT) {
+ SMC_ACK_INT(lp, IM_ERCV_INT);
+ PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT\n", dev->name);
+ }
+ } while (--timeout);
+
+ /* restore register states */
+ SMC_SET_PTR(lp, saved_pointer);
+ SMC_SET_INT_MASK(lp, mask);
+ spin_unlock(&lp->lock);
+
+#ifndef CONFIG_NET_POLL_CONTROLLER
+ if (timeout == MAX_IRQ_LOOPS)
+ PRINTK("%s: spurious interrupt (mask = 0x%02x)\n",
+ dev->name, mask);
+#endif
+ DBG(3, "%s: Interrupt done (%d loops)\n",
+ dev->name, MAX_IRQ_LOOPS - timeout);
+
+ /*
+ * We return IRQ_HANDLED unconditionally here even if there was
+ * nothing to do. There is a possibility that a packet might
+ * get enqueued into the chip right after TX_EMPTY_INT is raised
+ * but just before the CPU acknowledges the IRQ.
+ * Better take an unneeded IRQ in some occasions than complexifying
+ * the code for all cases.
+ */
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void smc_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ smc_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+/* Our watchdog timed out. Called by the networking layer */
+static void smc_timeout(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ int status, mask, eph_st, meminfo, fifo;
+
+ DBG(2, "%s: %s\n", dev->name, __func__);
+
+ spin_lock_irq(&lp->lock);
+ status = SMC_GET_INT(lp);
+ mask = SMC_GET_INT_MASK(lp);
+ fifo = SMC_GET_FIFO(lp);
+ SMC_SELECT_BANK(lp, 0);
+ eph_st = SMC_GET_EPH_STATUS(lp);
+ meminfo = SMC_GET_MIR(lp);
+ SMC_SELECT_BANK(lp, 2);
+ spin_unlock_irq(&lp->lock);
+ PRINTK( "%s: TX timeout (INT 0x%02x INTMASK 0x%02x "
+ "MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n",
+ dev->name, status, mask, meminfo, fifo, eph_st );
+
+ smc_reset(dev);
+ smc_enable(dev);
+
+ /*
+ * Reconfiguring the PHY doesn't seem like a bad idea here, but
+ * smc_phy_configure() calls msleep() which calls schedule_timeout()
+ * which calls schedule(). Hence we use a work queue.
+ */
+ if (lp->phy_type != 0)
+ schedule_work(&lp->phy_configure);
+
+ /* We can accept TX packets again */
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+
+/*
+ * This routine will, depending on the values passed to it,
+ * either make it accept multicast packets, go into
+ * promiscuous mode (for TCPDUMP and cousins) or accept
+ * a select set of multicast packets
+ */
+static void smc_set_multicast_list(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned char multicast_table[8];
+ int update_multicast = 0;
+
+ DBG(2, "%s: %s\n", dev->name, __func__);
+
+ if (dev->flags & IFF_PROMISC) {
+ DBG(2, "%s: RCR_PRMS\n", dev->name);
+ lp->rcr_cur_mode |= RCR_PRMS;
+ }
+
+/* BUG? I never disable promiscuous mode if multicasting was turned on.
+ Now, I turn off promiscuous mode, but I don't do anything to multicasting
+ when promiscuous mode is turned on.
+*/
+
+ /*
+ * Here, I am setting this to accept all multicast packets.
+ * I don't need to zero the multicast table, because the flag is
+ * checked before the table is
+ */
+ else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
+ DBG(2, "%s: RCR_ALMUL\n", dev->name);
+ lp->rcr_cur_mode |= RCR_ALMUL;
+ }
+
+ /*
+ * This sets the internal hardware table to filter out unwanted
+ * multicast packets before they take up memory.
+ *
+ * The SMC chip uses a hash table where the high 6 bits of the CRC of
+ * address are the offset into the table. If that bit is 1, then the
+ * multicast packet is accepted. Otherwise, it's dropped silently.
+ *
+ * To use the 6 bits as an offset into the table, the high 3 bits are
+ * the number of the 8 bit register, while the low 3 bits are the bit
+ * within that register.
+ */
+ else if (!netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+
+ /* table for flipping the order of 3 bits */
+ static const unsigned char invert3[] = {0, 4, 2, 6, 1, 5, 3, 7};
+
+ /* start with a table of all zeros: reject all */
+ memset(multicast_table, 0, sizeof(multicast_table));
+
+ netdev_for_each_mc_addr(ha, dev) {
+ int position;
+
+ /* only use the low order bits */
+ position = crc32_le(~0, ha->addr, 6) & 0x3f;
+
+ /* do some messy swapping to put the bit in the right spot */
+ multicast_table[invert3[position&7]] |=
+ (1<<invert3[(position>>3)&7]);
+ }
+
+ /* be sure I get rid of flags I might have set */
+ lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL);
+
+ /* now, the table can be loaded into the chipset */
+ update_multicast = 1;
+ } else {
+ DBG(2, "%s: ~(RCR_PRMS|RCR_ALMUL)\n", dev->name);
+ lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL);
+
+ /*
+ * since I'm disabling all multicast entirely, I need to
+ * clear the multicast list
+ */
+ memset(multicast_table, 0, sizeof(multicast_table));
+ update_multicast = 1;
+ }
+
+ spin_lock_irq(&lp->lock);
+ SMC_SELECT_BANK(lp, 0);
+ SMC_SET_RCR(lp, lp->rcr_cur_mode);
+ if (update_multicast) {
+ SMC_SELECT_BANK(lp, 3);
+ SMC_SET_MCAST(lp, multicast_table);
+ }
+ SMC_SELECT_BANK(lp, 2);
+ spin_unlock_irq(&lp->lock);
+}
+
+
+/*
+ * Open and Initialize the board
+ *
+ * Set up everything, reset the card, etc..
+ */
+static int
+smc_open(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+
+ DBG(2, "%s: %s\n", dev->name, __func__);
+
+ /*
+ * Check that the address is valid. If its not, refuse
+ * to bring the device up. The user must specify an
+ * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
+ */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ PRINTK("%s: no valid ethernet hw addr\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Setup the default Register Modes */
+ lp->tcr_cur_mode = TCR_DEFAULT;
+ lp->rcr_cur_mode = RCR_DEFAULT;
+ lp->rpc_cur_mode = RPC_DEFAULT |
+ lp->cfg.leda << RPC_LSXA_SHFT |
+ lp->cfg.ledb << RPC_LSXB_SHFT;
+
+ /*
+ * If we are not using a MII interface, we need to
+ * monitor our own carrier signal to detect faults.
+ */
+ if (lp->phy_type == 0)
+ lp->tcr_cur_mode |= TCR_MON_CSN;
+
+ /* reset the hardware */
+ smc_reset(dev);
+ smc_enable(dev);
+
+ /* Configure the PHY, initialize the link state */
+ if (lp->phy_type != 0)
+ smc_phy_configure(&lp->phy_configure);
+ else {
+ spin_lock_irq(&lp->lock);
+ smc_10bt_check_media(dev, 1);
+ spin_unlock_irq(&lp->lock);
+ }
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+/*
+ * smc_close
+ *
+ * this makes the board clean up everything that it can
+ * and not talk to the outside world. Caused by
+ * an 'ifconfig ethX down'
+ */
+static int smc_close(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+
+ DBG(2, "%s: %s\n", dev->name, __func__);
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ /* clear everything */
+ smc_shutdown(dev);
+ tasklet_kill(&lp->tx_task);
+ smc_phy_powerdown(dev);
+ return 0;
+}
+
+/*
+ * Ethtool support
+ */
+static int
+smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ int ret;
+
+ cmd->maxtxpkt = 1;
+ cmd->maxrxpkt = 1;
+
+ if (lp->phy_type != 0) {
+ spin_lock_irq(&lp->lock);
+ ret = mii_ethtool_gset(&lp->mii, cmd);
+ spin_unlock_irq(&lp->lock);
+ } else {
+ cmd->supported = SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_TP | SUPPORTED_AUI;
+
+ if (lp->ctl_rspeed == 10)
+ ethtool_cmd_speed_set(cmd, SPEED_10);
+ else if (lp->ctl_rspeed == 100)
+ ethtool_cmd_speed_set(cmd, SPEED_100);
+
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->port = 0;
+ cmd->duplex = lp->tcr_cur_mode & TCR_SWFDUP ? DUPLEX_FULL : DUPLEX_HALF;
+
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int
+smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ int ret;
+
+ if (lp->phy_type != 0) {
+ spin_lock_irq(&lp->lock);
+ ret = mii_ethtool_sset(&lp->mii, cmd);
+ spin_unlock_irq(&lp->lock);
+ } else {
+ if (cmd->autoneg != AUTONEG_DISABLE ||
+ cmd->speed != SPEED_10 ||
+ (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) ||
+ (cmd->port != PORT_TP && cmd->port != PORT_AUI))
+ return -EINVAL;
+
+// lp->port = cmd->port;
+ lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL;
+
+// if (netif_running(dev))
+// smc_set_port(dev);
+
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void
+smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, CARDNAME, sizeof(info->driver));
+ strncpy(info->version, version, sizeof(info->version));
+ strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
+}
+
+static int smc_ethtool_nwayreset(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ int ret = -EINVAL;
+
+ if (lp->phy_type != 0) {
+ spin_lock_irq(&lp->lock);
+ ret = mii_nway_restart(&lp->mii);
+ spin_unlock_irq(&lp->lock);
+ }
+
+ return ret;
+}
+
+static u32 smc_ethtool_getmsglevel(struct net_device *dev)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ return lp->msg_enable;
+}
+
+static void smc_ethtool_setmsglevel(struct net_device *dev, u32 level)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ lp->msg_enable = level;
+}
+
+static int smc_write_eeprom_word(struct net_device *dev, u16 addr, u16 word)
+{
+ u16 ctl;
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+
+ spin_lock_irq(&lp->lock);
+ /* load word into GP register */
+ SMC_SELECT_BANK(lp, 1);
+ SMC_SET_GP(lp, word);
+ /* set the address to put the data in EEPROM */
+ SMC_SELECT_BANK(lp, 2);
+ SMC_SET_PTR(lp, addr);
+ /* tell it to write */
+ SMC_SELECT_BANK(lp, 1);
+ ctl = SMC_GET_CTL(lp);
+ SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_STORE));
+ /* wait for it to finish */
+ do {
+ udelay(1);
+ } while (SMC_GET_CTL(lp) & CTL_STORE);
+ /* clean up */
+ SMC_SET_CTL(lp, ctl);
+ SMC_SELECT_BANK(lp, 2);
+ spin_unlock_irq(&lp->lock);
+ return 0;
+}
+
+static int smc_read_eeprom_word(struct net_device *dev, u16 addr, u16 *word)
+{
+ u16 ctl;
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+
+ spin_lock_irq(&lp->lock);
+ /* set the EEPROM address to get the data from */
+ SMC_SELECT_BANK(lp, 2);
+ SMC_SET_PTR(lp, addr | PTR_READ);
+ /* tell it to load */
+ SMC_SELECT_BANK(lp, 1);
+ SMC_SET_GP(lp, 0xffff); /* init to known */
+ ctl = SMC_GET_CTL(lp);
+ SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_RELOAD));
+ /* wait for it to finish */
+ do {
+ udelay(1);
+ } while (SMC_GET_CTL(lp) & CTL_RELOAD);
+ /* read word from GP register */
+ *word = SMC_GET_GP(lp);
+ /* clean up */
+ SMC_SET_CTL(lp, ctl);
+ SMC_SELECT_BANK(lp, 2);
+ spin_unlock_irq(&lp->lock);
+ return 0;
+}
+
+static int smc_ethtool_geteeprom_len(struct net_device *dev)
+{
+ return 0x23 * 2;
+}
+
+static int smc_ethtool_geteeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ int i;
+ int imax;
+
+ DBG(1, "Reading %d bytes at %d(0x%x)\n",
+ eeprom->len, eeprom->offset, eeprom->offset);
+ imax = smc_ethtool_geteeprom_len(dev);
+ for (i = 0; i < eeprom->len; i += 2) {
+ int ret;
+ u16 wbuf;
+ int offset = i + eeprom->offset;
+ if (offset > imax)
+ break;
+ ret = smc_read_eeprom_word(dev, offset >> 1, &wbuf);
+ if (ret != 0)
+ return ret;
+ DBG(2, "Read 0x%x from 0x%x\n", wbuf, offset >> 1);
+ data[i] = (wbuf >> 8) & 0xff;
+ data[i+1] = wbuf & 0xff;
+ }
+ return 0;
+}
+
+static int smc_ethtool_seteeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ int i;
+ int imax;
+
+ DBG(1, "Writing %d bytes to %d(0x%x)\n",
+ eeprom->len, eeprom->offset, eeprom->offset);
+ imax = smc_ethtool_geteeprom_len(dev);
+ for (i = 0; i < eeprom->len; i += 2) {
+ int ret;
+ u16 wbuf;
+ int offset = i + eeprom->offset;
+ if (offset > imax)
+ break;
+ wbuf = (data[i] << 8) | data[i + 1];
+ DBG(2, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1);
+ ret = smc_write_eeprom_word(dev, offset >> 1, wbuf);
+ if (ret != 0)
+ return ret;
+ }
+ return 0;
+}
+
+
+static const struct ethtool_ops smc_ethtool_ops = {
+ .get_settings = smc_ethtool_getsettings,
+ .set_settings = smc_ethtool_setsettings,
+ .get_drvinfo = smc_ethtool_getdrvinfo,
+
+ .get_msglevel = smc_ethtool_getmsglevel,
+ .set_msglevel = smc_ethtool_setmsglevel,
+ .nway_reset = smc_ethtool_nwayreset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = smc_ethtool_geteeprom_len,
+ .get_eeprom = smc_ethtool_geteeprom,
+ .set_eeprom = smc_ethtool_seteeprom,
+};
+
+static const struct net_device_ops smc_netdev_ops = {
+ .ndo_open = smc_open,
+ .ndo_stop = smc_close,
+ .ndo_start_xmit = smc_hard_start_xmit,
+ .ndo_tx_timeout = smc_timeout,
+ .ndo_set_rx_mode = smc_set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = smc_poll_controller,
+#endif
+};
+
+/*
+ * smc_findirq
+ *
+ * This routine has a simple purpose -- make the SMC chip generate an
+ * interrupt, so an auto-detect routine can detect it, and find the IRQ,
+ */
+/*
+ * does this still work?
+ *
+ * I just deleted auto_irq.c, since it was never built...
+ * --jgarzik
+ */
+static int __devinit smc_findirq(struct smc_local *lp)
+{
+ void __iomem *ioaddr = lp->base;
+ int timeout = 20;
+ unsigned long cookie;
+
+ DBG(2, "%s: %s\n", CARDNAME, __func__);
+
+ cookie = probe_irq_on();
+
+ /*
+ * What I try to do here is trigger an ALLOC_INT. This is done
+ * by allocating a small chunk of memory, which will give an interrupt
+ * when done.
+ */
+ /* enable ALLOCation interrupts ONLY */
+ SMC_SELECT_BANK(lp, 2);
+ SMC_SET_INT_MASK(lp, IM_ALLOC_INT);
+
+ /*
+ * Allocate 512 bytes of memory. Note that the chip was just
+ * reset so all the memory is available
+ */
+ SMC_SET_MMU_CMD(lp, MC_ALLOC | 1);
+
+ /*
+ * Wait until positive that the interrupt has been generated
+ */
+ do {
+ int int_status;
+ udelay(10);
+ int_status = SMC_GET_INT(lp);
+ if (int_status & IM_ALLOC_INT)
+ break; /* got the interrupt */
+ } while (--timeout);
+
+ /*
+ * there is really nothing that I can do here if timeout fails,
+ * as autoirq_report will return a 0 anyway, which is what I
+ * want in this case. Plus, the clean up is needed in both
+ * cases.
+ */
+
+ /* and disable all interrupts again */
+ SMC_SET_INT_MASK(lp, 0);
+
+ /* and return what I found */
+ return probe_irq_off(cookie);
+}
+
+/*
+ * Function: smc_probe(unsigned long ioaddr)
+ *
+ * Purpose:
+ * Tests to see if a given ioaddr points to an SMC91x chip.
+ * Returns a 0 on success
+ *
+ * Algorithm:
+ * (1) see if the high byte of BANK_SELECT is 0x33
+ * (2) compare the ioaddr with the base register's address
+ * (3) see if I recognize the chip ID in the appropriate register
+ *
+ * Here I do typical initialization tasks.
+ *
+ * o Initialize the structure if needed
+ * o print out my vanity message if not done so already
+ * o print out what type of hardware is detected
+ * o print out the ethernet address
+ * o find the IRQ
+ * o set up my private data
+ * o configure the dev structure with my subroutines
+ * o actually GRAB the irq.
+ * o GRAB the region
+ */
+static int __devinit smc_probe(struct net_device *dev, void __iomem *ioaddr,
+ unsigned long irq_flags)
+{
+ struct smc_local *lp = netdev_priv(dev);
+ static int version_printed = 0;
+ int retval;
+ unsigned int val, revision_register;
+ const char *version_string;
+
+ DBG(2, "%s: %s\n", CARDNAME, __func__);
+
+ /* First, see if the high byte is 0x33 */
+ val = SMC_CURRENT_BANK(lp);
+ DBG(2, "%s: bank signature probe returned 0x%04x\n", CARDNAME, val);
+ if ((val & 0xFF00) != 0x3300) {
+ if ((val & 0xFF) == 0x33) {
+ printk(KERN_WARNING
+ "%s: Detected possible byte-swapped interface"
+ " at IOADDR %p\n", CARDNAME, ioaddr);
+ }
+ retval = -ENODEV;
+ goto err_out;
+ }
+
+ /*
+ * The above MIGHT indicate a device, but I need to write to
+ * further test this.
+ */
+ SMC_SELECT_BANK(lp, 0);
+ val = SMC_CURRENT_BANK(lp);
+ if ((val & 0xFF00) != 0x3300) {
+ retval = -ENODEV;
+ goto err_out;
+ }
+
+ /*
+ * well, we've already written once, so hopefully another
+ * time won't hurt. This time, I need to switch the bank
+ * register to bank 1, so I can access the base address
+ * register
+ */
+ SMC_SELECT_BANK(lp, 1);
+ val = SMC_GET_BASE(lp);
+ val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT;
+ if (((unsigned int)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) {
+ printk("%s: IOADDR %p doesn't match configuration (%x).\n",
+ CARDNAME, ioaddr, val);
+ }
+
+ /*
+ * check if the revision register is something that I
+ * recognize. These might need to be added to later,
+ * as future revisions could be added.
+ */
+ SMC_SELECT_BANK(lp, 3);
+ revision_register = SMC_GET_REV(lp);
+ DBG(2, "%s: revision = 0x%04x\n", CARDNAME, revision_register);
+ version_string = chip_ids[ (revision_register >> 4) & 0xF];
+ if (!version_string || (revision_register & 0xff00) != 0x3300) {
+ /* I don't recognize this chip, so... */
+ printk("%s: IO %p: Unrecognized revision register 0x%04x"
+ ", Contact author.\n", CARDNAME,
+ ioaddr, revision_register);
+
+ retval = -ENODEV;
+ goto err_out;
+ }
+
+ /* At this point I'll assume that the chip is an SMC91x. */
+ if (version_printed++ == 0)
+ printk("%s", version);
+
+ /* fill in some of the fields */
+ dev->base_addr = (unsigned long)ioaddr;
+ lp->base = ioaddr;
+ lp->version = revision_register & 0xff;
+ spin_lock_init(&lp->lock);
+
+ /* Get the MAC address */
+ SMC_SELECT_BANK(lp, 1);
+ SMC_GET_MAC_ADDR(lp, dev->dev_addr);
+
+ /* now, reset the chip, and put it into a known state */
+ smc_reset(dev);
+
+ /*
+ * If dev->irq is 0, then the device has to be banged on to see
+ * what the IRQ is.
+ *
+ * This banging doesn't always detect the IRQ, for unknown reasons.
+ * a workaround is to reset the chip and try again.
+ *
+ * Interestingly, the DOS packet driver *SETS* the IRQ on the card to
+ * be what is requested on the command line. I don't do that, mostly
+ * because the card that I have uses a non-standard method of accessing
+ * the IRQs, and because this _should_ work in most configurations.
+ *
+ * Specifying an IRQ is done with the assumption that the user knows
+ * what (s)he is doing. No checking is done!!!!
+ */
+ if (dev->irq < 1) {
+ int trials;
+
+ trials = 3;
+ while (trials--) {
+ dev->irq = smc_findirq(lp);
+ if (dev->irq)
+ break;
+ /* kick the card and try again */
+ smc_reset(dev);
+ }
+ }
+ if (dev->irq == 0) {
+ printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n",
+ dev->name);
+ retval = -ENODEV;
+ goto err_out;
+ }
+ dev->irq = irq_canonicalize(dev->irq);
+
+ /* Fill in the fields of the device structure with ethernet values. */
+ ether_setup(dev);
+
+ dev->watchdog_timeo = msecs_to_jiffies(watchdog);
+ dev->netdev_ops = &smc_netdev_ops;
+ dev->ethtool_ops = &smc_ethtool_ops;
+
+ tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev);
+ INIT_WORK(&lp->phy_configure, smc_phy_configure);
+ lp->dev = dev;
+ lp->mii.phy_id_mask = 0x1f;
+ lp->mii.reg_num_mask = 0x1f;
+ lp->mii.force_media = 0;
+ lp->mii.full_duplex = 0;
+ lp->mii.dev = dev;
+ lp->mii.mdio_read = smc_phy_read;
+ lp->mii.mdio_write = smc_phy_write;
+
+ /*
+ * Locate the phy, if any.
+ */
+ if (lp->version >= (CHIP_91100 << 4))
+ smc_phy_detect(dev);
+
+ /* then shut everything down to save power */
+ smc_shutdown(dev);
+ smc_phy_powerdown(dev);
+
+ /* Set default parameters */
+ lp->msg_enable = NETIF_MSG_LINK;
+ lp->ctl_rfduplx = 0;
+ lp->ctl_rspeed = 10;
+
+ if (lp->version >= (CHIP_91100 << 4)) {
+ lp->ctl_rfduplx = 1;
+ lp->ctl_rspeed = 100;
+ }
+
+ /* Grab the IRQ */
+ retval = request_irq(dev->irq, smc_interrupt, irq_flags, dev->name, dev);
+ if (retval)
+ goto err_out;
+
+#ifdef CONFIG_ARCH_PXA
+# ifdef SMC_USE_PXA_DMA
+ lp->cfg.flags |= SMC91X_USE_DMA;
+# endif
+ if (lp->cfg.flags & SMC91X_USE_DMA) {
+ int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW,
+ smc_pxa_dma_irq, NULL);
+ if (dma >= 0)
+ dev->dma = dma;
+ }
+#endif
+
+ retval = register_netdev(dev);
+ if (retval == 0) {
+ /* now, print out the card info, in a short format.. */
+ printk("%s: %s (rev %d) at %p IRQ %d",
+ dev->name, version_string, revision_register & 0x0f,
+ lp->base, dev->irq);
+
+ if (dev->dma != (unsigned char)-1)
+ printk(" DMA %d", dev->dma);
+
+ printk("%s%s\n",
+ lp->cfg.flags & SMC91X_NOWAIT ? " [nowait]" : "",
+ THROTTLE_TX_PKTS ? " [throttle_tx]" : "");
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ printk("%s: Invalid ethernet MAC address. Please "
+ "set using ifconfig\n", dev->name);
+ } else {
+ /* Print the Ethernet address */
+ printk("%s: Ethernet addr: %pM\n",
+ dev->name, dev->dev_addr);
+ }
+
+ if (lp->phy_type == 0) {
+ PRINTK("%s: No PHY found\n", dev->name);
+ } else if ((lp->phy_type & 0xfffffff0) == 0x0016f840) {
+ PRINTK("%s: PHY LAN83C183 (LAN91C111 Internal)\n", dev->name);
+ } else if ((lp->phy_type & 0xfffffff0) == 0x02821c50) {
+ PRINTK("%s: PHY LAN83C180\n", dev->name);
+ }
+ }
+
+err_out:
+#ifdef CONFIG_ARCH_PXA
+ if (retval && dev->dma != (unsigned char)-1)
+ pxa_free_dma(dev->dma);
+#endif
+ return retval;
+}
+
+static int smc_enable_device(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smc_local *lp = netdev_priv(ndev);
+ unsigned long flags;
+ unsigned char ecor, ecsr;
+ void __iomem *addr;
+ struct resource * res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
+ if (!res)
+ return 0;
+
+ /*
+ * Map the attribute space. This is overkill, but clean.
+ */
+ addr = ioremap(res->start, ATTRIB_SIZE);
+ if (!addr)
+ return -ENOMEM;
+
+ /*
+ * Reset the device. We must disable IRQs around this
+ * since a reset causes the IRQ line become active.
+ */
+ local_irq_save(flags);
+ ecor = readb(addr + (ECOR << SMC_IO_SHIFT)) & ~ECOR_RESET;
+ writeb(ecor | ECOR_RESET, addr + (ECOR << SMC_IO_SHIFT));
+ readb(addr + (ECOR << SMC_IO_SHIFT));
+
+ /*
+ * Wait 100us for the chip to reset.
+ */
+ udelay(100);
+
+ /*
+ * The device will ignore all writes to the enable bit while
+ * reset is asserted, even if the reset bit is cleared in the
+ * same write. Must clear reset first, then enable the device.
+ */
+ writeb(ecor, addr + (ECOR << SMC_IO_SHIFT));
+ writeb(ecor | ECOR_ENABLE, addr + (ECOR << SMC_IO_SHIFT));
+
+ /*
+ * Set the appropriate byte/word mode.
+ */
+ ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8;
+ if (!SMC_16BIT(lp))
+ ecsr |= ECSR_IOIS8;
+ writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT));
+ local_irq_restore(flags);
+
+ iounmap(addr);
+
+ /*
+ * Wait for the chip to wake up. We could poll the control
+ * register in the main register space, but that isn't mapped
+ * yet. We know this is going to take 750us.
+ */
+ msleep(1);
+
+ return 0;
+}
+
+static int smc_request_attrib(struct platform_device *pdev,
+ struct net_device *ndev)
+{
+ struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
+ struct smc_local *lp __maybe_unused = netdev_priv(ndev);
+
+ if (!res)
+ return 0;
+
+ if (!request_mem_region(res->start, ATTRIB_SIZE, CARDNAME))
+ return -EBUSY;
+
+ return 0;
+}
+
+static void smc_release_attrib(struct platform_device *pdev,
+ struct net_device *ndev)
+{
+ struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
+ struct smc_local *lp __maybe_unused = netdev_priv(ndev);
+
+ if (res)
+ release_mem_region(res->start, ATTRIB_SIZE);
+}
+
+static inline void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev)
+{
+ if (SMC_CAN_USE_DATACS) {
+ struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
+ struct smc_local *lp = netdev_priv(ndev);
+
+ if (!res)
+ return;
+
+ if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) {
+ printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME);
+ return;
+ }
+
+ lp->datacs = ioremap(res->start, SMC_DATA_EXTENT);
+ }
+}
+
+static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev)
+{
+ if (SMC_CAN_USE_DATACS) {
+ struct smc_local *lp = netdev_priv(ndev);
+ struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
+
+ if (lp->datacs)
+ iounmap(lp->datacs);
+
+ lp->datacs = NULL;
+
+ if (res)
+ release_mem_region(res->start, SMC_DATA_EXTENT);
+ }
+}
+
+/*
+ * smc_init(void)
+ * Input parameters:
+ * dev->base_addr == 0, try to find all possible locations
+ * dev->base_addr > 0x1ff, this is the address to check
+ * dev->base_addr == <anything else>, return failure code
+ *
+ * Output:
+ * 0 --> there is a device
+ * anything else, error
+ */
+static int __devinit smc_drv_probe(struct platform_device *pdev)
+{
+ struct smc91x_platdata *pd = pdev->dev.platform_data;
+ struct smc_local *lp;
+ struct net_device *ndev;
+ struct resource *res, *ires;
+ unsigned int __iomem *addr;
+ unsigned long irq_flags = SMC_IRQ_FLAGS;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(struct smc_local));
+ if (!ndev) {
+ printk("%s: could not allocate device.\n", CARDNAME);
+ ret = -ENOMEM;
+ goto out;
+ }
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ /* get configuration from platform data, only allow use of
+ * bus width if both SMC_CAN_USE_xxx and SMC91X_USE_xxx are set.
+ */
+
+ lp = netdev_priv(ndev);
+
+ if (pd) {
+ memcpy(&lp->cfg, pd, sizeof(lp->cfg));
+ lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
+ } else {
+ lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0;
+ lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0;
+ lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0;
+ lp->cfg.flags |= (nowait) ? SMC91X_NOWAIT : 0;
+ }
+
+ if (!lp->cfg.leda && !lp->cfg.ledb) {
+ lp->cfg.leda = RPC_LSA_DEFAULT;
+ lp->cfg.ledb = RPC_LSB_DEFAULT;
+ }
+
+ ndev->dma = (unsigned char)-1;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ goto out_free_netdev;
+ }
+
+
+ if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) {
+ ret = -EBUSY;
+ goto out_free_netdev;
+ }
+
+ ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!ires) {
+ ret = -ENODEV;
+ goto out_release_io;
+ }
+
+ ndev->irq = ires->start;
+
+ if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
+ irq_flags = ires->flags & IRQF_TRIGGER_MASK;
+
+ ret = smc_request_attrib(pdev, ndev);
+ if (ret)
+ goto out_release_io;
+#if defined(CONFIG_SA1100_ASSABET)
+ NCR_0 |= NCR_ENET_OSC_EN;
+#endif
+ platform_set_drvdata(pdev, ndev);
+ ret = smc_enable_device(pdev);
+ if (ret)
+ goto out_release_attrib;
+
+ addr = ioremap(res->start, SMC_IO_EXTENT);
+ if (!addr) {
+ ret = -ENOMEM;
+ goto out_release_attrib;
+ }
+
+#ifdef CONFIG_ARCH_PXA
+ {
+ struct smc_local *lp = netdev_priv(ndev);
+ lp->device = &pdev->dev;
+ lp->physaddr = res->start;
+ }
+#endif
+
+ ret = smc_probe(ndev, addr, irq_flags);
+ if (ret != 0)
+ goto out_iounmap;
+
+ smc_request_datacs(pdev, ndev);
+
+ return 0;
+
+ out_iounmap:
+ platform_set_drvdata(pdev, NULL);
+ iounmap(addr);
+ out_release_attrib:
+ smc_release_attrib(pdev, ndev);
+ out_release_io:
+ release_mem_region(res->start, SMC_IO_EXTENT);
+ out_free_netdev:
+ free_netdev(ndev);
+ out:
+ printk("%s: not found (%d).\n", CARDNAME, ret);
+
+ return ret;
+}
+
+static int __devexit smc_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smc_local *lp = netdev_priv(ndev);
+ struct resource *res;
+
+ platform_set_drvdata(pdev, NULL);
+
+ unregister_netdev(ndev);
+
+ free_irq(ndev->irq, ndev);
+
+#ifdef CONFIG_ARCH_PXA
+ if (ndev->dma != (unsigned char)-1)
+ pxa_free_dma(ndev->dma);
+#endif
+ iounmap(lp->base);
+
+ smc_release_datacs(pdev,ndev);
+ smc_release_attrib(pdev,ndev);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, SMC_IO_EXTENT);
+
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static int smc_drv_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ if (ndev) {
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ smc_shutdown(ndev);
+ smc_phy_powerdown(ndev);
+ }
+ }
+ return 0;
+}
+
+static int smc_drv_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ if (ndev) {
+ struct smc_local *lp = netdev_priv(ndev);
+ smc_enable_device(pdev);
+ if (netif_running(ndev)) {
+ smc_reset(ndev);
+ smc_enable(ndev);
+ if (lp->phy_type != 0)
+ smc_phy_configure(&lp->phy_configure);
+ netif_device_attach(ndev);
+ }
+ }
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id smc91x_match[] = {
+ { .compatible = "smsc,lan91c94", },
+ { .compatible = "smsc,lan91c111", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, smc91x_match);
+#else
+#define smc91x_match NULL
+#endif
+
+static struct dev_pm_ops smc_drv_pm_ops = {
+ .suspend = smc_drv_suspend,
+ .resume = smc_drv_resume,
+};
+
+static struct platform_driver smc_driver = {
+ .probe = smc_drv_probe,
+ .remove = __devexit_p(smc_drv_remove),
+ .driver = {
+ .name = CARDNAME,
+ .owner = THIS_MODULE,
+ .pm = &smc_drv_pm_ops,
+ .of_match_table = smc91x_match,
+ },
+};
+
+static int __init smc_init(void)
+{
+ return platform_driver_register(&smc_driver);
+}
+
+static void __exit smc_cleanup(void)
+{
+ platform_driver_unregister(&smc_driver);
+}
+
+module_init(smc_init);
+module_exit(smc_cleanup);
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
new file mode 100644
index 000000000000..5f53fbbf67be
--- /dev/null
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -0,0 +1,1180 @@
+/*------------------------------------------------------------------------
+ . smc91x.h - macros for SMSC's 91C9x/91C1xx single-chip Ethernet device.
+ .
+ . Copyright (C) 1996 by Erik Stahlman
+ . Copyright (C) 2001 Standard Microsystems Corporation
+ . Developed by Simple Network Magic Corporation
+ . Copyright (C) 2003 Monta Vista Software, Inc.
+ . Unified SMC91x driver by Nicolas Pitre
+ .
+ . This program is free software; you can redistribute it and/or modify
+ . it under the terms of the GNU General Public License as published by
+ . the Free Software Foundation; either version 2 of the License, or
+ . (at your option) any later version.
+ .
+ . This program is distributed in the hope that it will be useful,
+ . but WITHOUT ANY WARRANTY; without even the implied warranty of
+ . MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ . GNU General Public License for more details.
+ .
+ . You should have received a copy of the GNU General Public License
+ . along with this program; if not, write to the Free Software
+ . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ .
+ . Information contained in this file was obtained from the LAN91C111
+ . manual from SMC. To get a copy, if you really want one, you can find
+ . information under www.smsc.com.
+ .
+ . Authors
+ . Erik Stahlman <erik@vt.edu>
+ . Daris A Nevil <dnevil@snmc.com>
+ . Nicolas Pitre <nico@fluxnic.net>
+ .
+ ---------------------------------------------------------------------------*/
+#ifndef _SMC91X_H_
+#define _SMC91X_H_
+
+#include <linux/smc91x.h>
+
+/*
+ * Define your architecture specific bus configuration parameters here.
+ */
+
+#if defined(CONFIG_ARCH_LUBBOCK) ||\
+ defined(CONFIG_MACH_MAINSTONE) ||\
+ defined(CONFIG_MACH_ZYLONITE) ||\
+ defined(CONFIG_MACH_LITTLETON) ||\
+ defined(CONFIG_MACH_ZYLONITE2) ||\
+ defined(CONFIG_ARCH_VIPER) ||\
+ defined(CONFIG_MACH_STARGATE2)
+
+#include <asm/mach-types.h>
+
+/* Now the bus width is specified in the platform data
+ * pretend here to support all I/O access types
+ */
+#define SMC_CAN_USE_8BIT 1
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 1
+#define SMC_NOWAIT 1
+
+#define SMC_IO_SHIFT (lp->io_shift)
+
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inl(a, r) readl((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outl(v, a, r) writel(v, (a) + (r))
+#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
+#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
+#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
+#define SMC_IRQ_FLAGS (-1) /* from resource */
+
+/* We actually can't write halfwords properly if not word aligned */
+static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
+{
+ if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) {
+ unsigned int v = val << 16;
+ v |= readl(ioaddr + (reg & ~2)) & 0xffff;
+ writel(v, ioaddr + (reg & ~2));
+ } else {
+ writew(val, ioaddr + reg);
+ }
+}
+
+#elif defined(CONFIG_SA1100_PLEB)
+/* We can only do 16-bit reads and writes in the static memory space. */
+#define SMC_CAN_USE_8BIT 1
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 0
+#define SMC_IO_SHIFT 0
+#define SMC_NOWAIT 1
+
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
+#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
+
+#define SMC_IRQ_FLAGS (-1)
+
+#elif defined(CONFIG_SA1100_ASSABET)
+
+#include <mach/neponset.h>
+
+/* We can only do 8-bit reads and writes in the static memory space. */
+#define SMC_CAN_USE_8BIT 1
+#define SMC_CAN_USE_16BIT 0
+#define SMC_CAN_USE_32BIT 0
+#define SMC_NOWAIT 1
+
+/* The first two address lines aren't connected... */
+#define SMC_IO_SHIFT 2
+
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
+#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
+#define SMC_IRQ_FLAGS (-1) /* from resource */
+
+#elif defined(CONFIG_MACH_LOGICPD_PXA270) || \
+ defined(CONFIG_MACH_NOMADIK_8815NHK)
+
+#define SMC_CAN_USE_8BIT 0
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 0
+#define SMC_IO_SHIFT 0
+#define SMC_NOWAIT 1
+
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
+#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
+
+#elif defined(CONFIG_ARCH_INNOKOM) || \
+ defined(CONFIG_ARCH_PXA_IDP) || \
+ defined(CONFIG_ARCH_RAMSES) || \
+ defined(CONFIG_ARCH_PCM027)
+
+#define SMC_CAN_USE_8BIT 1
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 1
+#define SMC_IO_SHIFT 0
+#define SMC_NOWAIT 1
+#define SMC_USE_PXA_DMA 1
+
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inl(a, r) readl((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outl(v, a, r) writel(v, (a) + (r))
+#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
+#define SMC_IRQ_FLAGS (-1) /* from resource */
+
+/* We actually can't write halfwords properly if not word aligned */
+static inline void
+SMC_outw(u16 val, void __iomem *ioaddr, int reg)
+{
+ if (reg & 2) {
+ unsigned int v = val << 16;
+ v |= readl(ioaddr + (reg & ~2)) & 0xffff;
+ writel(v, ioaddr + (reg & ~2));
+ } else {
+ writew(val, ioaddr + reg);
+ }
+}
+
+#elif defined(CONFIG_SH_SH4202_MICRODEV)
+
+#define SMC_CAN_USE_8BIT 0
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 0
+
+#define SMC_inb(a, r) inb((a) + (r) - 0xa0000000)
+#define SMC_inw(a, r) inw((a) + (r) - 0xa0000000)
+#define SMC_inl(a, r) inl((a) + (r) - 0xa0000000)
+#define SMC_outb(v, a, r) outb(v, (a) + (r) - 0xa0000000)
+#define SMC_outw(v, a, r) outw(v, (a) + (r) - 0xa0000000)
+#define SMC_outl(v, a, r) outl(v, (a) + (r) - 0xa0000000)
+#define SMC_insl(a, r, p, l) insl((a) + (r) - 0xa0000000, p, l)
+#define SMC_outsl(a, r, p, l) outsl((a) + (r) - 0xa0000000, p, l)
+#define SMC_insw(a, r, p, l) insw((a) + (r) - 0xa0000000, p, l)
+#define SMC_outsw(a, r, p, l) outsw((a) + (r) - 0xa0000000, p, l)
+
+#define SMC_IRQ_FLAGS (0)
+
+#elif defined(CONFIG_M32R)
+
+#define SMC_CAN_USE_8BIT 0
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 0
+
+#define SMC_inb(a, r) inb(((u32)a) + (r))
+#define SMC_inw(a, r) inw(((u32)a) + (r))
+#define SMC_outb(v, a, r) outb(v, ((u32)a) + (r))
+#define SMC_outw(v, a, r) outw(v, ((u32)a) + (r))
+#define SMC_insw(a, r, p, l) insw(((u32)a) + (r), p, l)
+#define SMC_outsw(a, r, p, l) outsw(((u32)a) + (r), p, l)
+
+#define SMC_IRQ_FLAGS (0)
+
+#define RPC_LSA_DEFAULT RPC_LED_TX_RX
+#define RPC_LSB_DEFAULT RPC_LED_100_10
+
+#elif defined(CONFIG_ARCH_VERSATILE)
+
+#define SMC_CAN_USE_8BIT 1
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 1
+#define SMC_NOWAIT 1
+
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inl(a, r) readl((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_outl(v, a, r) writel(v, (a) + (r))
+#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
+#define SMC_IRQ_FLAGS (-1) /* from resource */
+
+#elif defined(CONFIG_MN10300)
+
+/*
+ * MN10300/AM33 configuration
+ */
+
+#include <unit/smc91111.h>
+
+#elif defined(CONFIG_ARCH_MSM)
+
+#define SMC_CAN_USE_8BIT 0
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 0
+#define SMC_NOWAIT 1
+
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
+#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
+
+#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH
+
+#elif defined(CONFIG_COLDFIRE)
+
+#define SMC_CAN_USE_8BIT 0
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 0
+#define SMC_NOWAIT 1
+
+static inline void mcf_insw(void *a, unsigned char *p, int l)
+{
+ u16 *wp = (u16 *) p;
+ while (l-- > 0)
+ *wp++ = readw(a);
+}
+
+static inline void mcf_outsw(void *a, unsigned char *p, int l)
+{
+ u16 *wp = (u16 *) p;
+ while (l-- > 0)
+ writew(*wp++, a);
+}
+
+#define SMC_inw(a, r) _swapw(readw((a) + (r)))
+#define SMC_outw(v, a, r) writew(_swapw(v), (a) + (r))
+#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l)
+#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l)
+
+#define SMC_IRQ_FLAGS (IRQF_DISABLED)
+
+#else
+
+/*
+ * Default configuration
+ */
+
+#define SMC_CAN_USE_8BIT 1
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 1
+#define SMC_NOWAIT 1
+
+#define SMC_IO_SHIFT (lp->io_shift)
+
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inl(a, r) readl((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_outl(v, a, r) writel(v, (a) + (r))
+#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
+#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
+#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
+
+#define RPC_LSA_DEFAULT RPC_LED_100_10
+#define RPC_LSB_DEFAULT RPC_LED_TX_RX
+
+#endif
+
+
+/* store this information for the driver.. */
+struct smc_local {
+ /*
+ * If I have to wait until memory is available to send a
+ * packet, I will store the skbuff here, until I get the
+ * desired memory. Then, I'll send it out and free it.
+ */
+ struct sk_buff *pending_tx_skb;
+ struct tasklet_struct tx_task;
+
+ /* version/revision of the SMC91x chip */
+ int version;
+
+ /* Contains the current active transmission mode */
+ int tcr_cur_mode;
+
+ /* Contains the current active receive mode */
+ int rcr_cur_mode;
+
+ /* Contains the current active receive/phy mode */
+ int rpc_cur_mode;
+ int ctl_rfduplx;
+ int ctl_rspeed;
+
+ u32 msg_enable;
+ u32 phy_type;
+ struct mii_if_info mii;
+
+ /* work queue */
+ struct work_struct phy_configure;
+ struct net_device *dev;
+ int work_pending;
+
+ spinlock_t lock;
+
+#ifdef CONFIG_ARCH_PXA
+ /* DMA needs the physical address of the chip */
+ u_long physaddr;
+ struct device *device;
+#endif
+ void __iomem *base;
+ void __iomem *datacs;
+
+ /* the low address lines on some platforms aren't connected... */
+ int io_shift;
+
+ struct smc91x_platdata cfg;
+};
+
+#define SMC_8BIT(p) ((p)->cfg.flags & SMC91X_USE_8BIT)
+#define SMC_16BIT(p) ((p)->cfg.flags & SMC91X_USE_16BIT)
+#define SMC_32BIT(p) ((p)->cfg.flags & SMC91X_USE_32BIT)
+
+#ifdef CONFIG_ARCH_PXA
+/*
+ * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is
+ * always happening in irq context so no need to worry about races. TX is
+ * different and probably not worth it for that reason, and not as critical
+ * as RX which can overrun memory and lose packets.
+ */
+#include <linux/dma-mapping.h>
+#include <mach/dma.h>
+
+#ifdef SMC_insl
+#undef SMC_insl
+#define SMC_insl(a, r, p, l) \
+ smc_pxa_dma_insl(a, lp, r, dev->dma, p, l)
+static inline void
+smc_pxa_dma_insl(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
+ u_char *buf, int len)
+{
+ u_long physaddr = lp->physaddr;
+ dma_addr_t dmabuf;
+
+ /* fallback if no DMA available */
+ if (dma == (unsigned char)-1) {
+ readsl(ioaddr + reg, buf, len);
+ return;
+ }
+
+ /* 64 bit alignment is required for memory to memory DMA */
+ if ((long)buf & 4) {
+ *((u32 *)buf) = SMC_inl(ioaddr, reg);
+ buf += 4;
+ len--;
+ }
+
+ len *= 4;
+ dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE);
+ DCSR(dma) = DCSR_NODESC;
+ DTADR(dma) = dmabuf;
+ DSADR(dma) = physaddr + reg;
+ DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 |
+ DCMD_WIDTH4 | (DCMD_LENGTH & len));
+ DCSR(dma) = DCSR_NODESC | DCSR_RUN;
+ while (!(DCSR(dma) & DCSR_STOPSTATE))
+ cpu_relax();
+ DCSR(dma) = 0;
+ dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE);
+}
+#endif
+
+#ifdef SMC_insw
+#undef SMC_insw
+#define SMC_insw(a, r, p, l) \
+ smc_pxa_dma_insw(a, lp, r, dev->dma, p, l)
+static inline void
+smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
+ u_char *buf, int len)
+{
+ u_long physaddr = lp->physaddr;
+ dma_addr_t dmabuf;
+
+ /* fallback if no DMA available */
+ if (dma == (unsigned char)-1) {
+ readsw(ioaddr + reg, buf, len);
+ return;
+ }
+
+ /* 64 bit alignment is required for memory to memory DMA */
+ while ((long)buf & 6) {
+ *((u16 *)buf) = SMC_inw(ioaddr, reg);
+ buf += 2;
+ len--;
+ }
+
+ len *= 2;
+ dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE);
+ DCSR(dma) = DCSR_NODESC;
+ DTADR(dma) = dmabuf;
+ DSADR(dma) = physaddr + reg;
+ DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 |
+ DCMD_WIDTH2 | (DCMD_LENGTH & len));
+ DCSR(dma) = DCSR_NODESC | DCSR_RUN;
+ while (!(DCSR(dma) & DCSR_STOPSTATE))
+ cpu_relax();
+ DCSR(dma) = 0;
+ dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE);
+}
+#endif
+
+static void
+smc_pxa_dma_irq(int dma, void *dummy)
+{
+ DCSR(dma) = 0;
+}
+#endif /* CONFIG_ARCH_PXA */
+
+
+/*
+ * Everything a particular hardware setup needs should have been defined
+ * at this point. Add stubs for the undefined cases, mainly to avoid
+ * compilation warnings since they'll be optimized away, or to prevent buggy
+ * use of them.
+ */
+
+#if ! SMC_CAN_USE_32BIT
+#define SMC_inl(ioaddr, reg) ({ BUG(); 0; })
+#define SMC_outl(x, ioaddr, reg) BUG()
+#define SMC_insl(a, r, p, l) BUG()
+#define SMC_outsl(a, r, p, l) BUG()
+#endif
+
+#if !defined(SMC_insl) || !defined(SMC_outsl)
+#define SMC_insl(a, r, p, l) BUG()
+#define SMC_outsl(a, r, p, l) BUG()
+#endif
+
+#if ! SMC_CAN_USE_16BIT
+
+/*
+ * Any 16-bit access is performed with two 8-bit accesses if the hardware
+ * can't do it directly. Most registers are 16-bit so those are mandatory.
+ */
+#define SMC_outw(x, ioaddr, reg) \
+ do { \
+ unsigned int __val16 = (x); \
+ SMC_outb( __val16, ioaddr, reg ); \
+ SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\
+ } while (0)
+#define SMC_inw(ioaddr, reg) \
+ ({ \
+ unsigned int __val16; \
+ __val16 = SMC_inb( ioaddr, reg ); \
+ __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
+ __val16; \
+ })
+
+#define SMC_insw(a, r, p, l) BUG()
+#define SMC_outsw(a, r, p, l) BUG()
+
+#endif
+
+#if !defined(SMC_insw) || !defined(SMC_outsw)
+#define SMC_insw(a, r, p, l) BUG()
+#define SMC_outsw(a, r, p, l) BUG()
+#endif
+
+#if ! SMC_CAN_USE_8BIT
+#define SMC_inb(ioaddr, reg) ({ BUG(); 0; })
+#define SMC_outb(x, ioaddr, reg) BUG()
+#define SMC_insb(a, r, p, l) BUG()
+#define SMC_outsb(a, r, p, l) BUG()
+#endif
+
+#if !defined(SMC_insb) || !defined(SMC_outsb)
+#define SMC_insb(a, r, p, l) BUG()
+#define SMC_outsb(a, r, p, l) BUG()
+#endif
+
+#ifndef SMC_CAN_USE_DATACS
+#define SMC_CAN_USE_DATACS 0
+#endif
+
+#ifndef SMC_IO_SHIFT
+#define SMC_IO_SHIFT 0
+#endif
+
+#ifndef SMC_IRQ_FLAGS
+#define SMC_IRQ_FLAGS IRQF_TRIGGER_RISING
+#endif
+
+#ifndef SMC_INTERRUPT_PREAMBLE
+#define SMC_INTERRUPT_PREAMBLE
+#endif
+
+
+/* Because of bank switching, the LAN91x uses only 16 I/O ports */
+#define SMC_IO_EXTENT (16 << SMC_IO_SHIFT)
+#define SMC_DATA_EXTENT (4)
+
+/*
+ . Bank Select Register:
+ .
+ . yyyy yyyy 0000 00xx
+ . xx = bank number
+ . yyyy yyyy = 0x33, for identification purposes.
+*/
+#define BANK_SELECT (14 << SMC_IO_SHIFT)
+
+
+// Transmit Control Register
+/* BANK 0 */
+#define TCR_REG(lp) SMC_REG(lp, 0x0000, 0)
+#define TCR_ENABLE 0x0001 // When 1 we can transmit
+#define TCR_LOOP 0x0002 // Controls output pin LBK
+#define TCR_FORCOL 0x0004 // When 1 will force a collision
+#define TCR_PAD_EN 0x0080 // When 1 will pad tx frames < 64 bytes w/0
+#define TCR_NOCRC 0x0100 // When 1 will not append CRC to tx frames
+#define TCR_MON_CSN 0x0400 // When 1 tx monitors carrier
+#define TCR_FDUPLX 0x0800 // When 1 enables full duplex operation
+#define TCR_STP_SQET 0x1000 // When 1 stops tx if Signal Quality Error
+#define TCR_EPH_LOOP 0x2000 // When 1 enables EPH block loopback
+#define TCR_SWFDUP 0x8000 // When 1 enables Switched Full Duplex mode
+
+#define TCR_CLEAR 0 /* do NOTHING */
+/* the default settings for the TCR register : */
+#define TCR_DEFAULT (TCR_ENABLE | TCR_PAD_EN)
+
+
+// EPH Status Register
+/* BANK 0 */
+#define EPH_STATUS_REG(lp) SMC_REG(lp, 0x0002, 0)
+#define ES_TX_SUC 0x0001 // Last TX was successful
+#define ES_SNGL_COL 0x0002 // Single collision detected for last tx
+#define ES_MUL_COL 0x0004 // Multiple collisions detected for last tx
+#define ES_LTX_MULT 0x0008 // Last tx was a multicast
+#define ES_16COL 0x0010 // 16 Collisions Reached
+#define ES_SQET 0x0020 // Signal Quality Error Test
+#define ES_LTXBRD 0x0040 // Last tx was a broadcast
+#define ES_TXDEFR 0x0080 // Transmit Deferred
+#define ES_LATCOL 0x0200 // Late collision detected on last tx
+#define ES_LOSTCARR 0x0400 // Lost Carrier Sense
+#define ES_EXC_DEF 0x0800 // Excessive Deferral
+#define ES_CTR_ROL 0x1000 // Counter Roll Over indication
+#define ES_LINK_OK 0x4000 // Driven by inverted value of nLNK pin
+#define ES_TXUNRN 0x8000 // Tx Underrun
+
+
+// Receive Control Register
+/* BANK 0 */
+#define RCR_REG(lp) SMC_REG(lp, 0x0004, 0)
+#define RCR_RX_ABORT 0x0001 // Set if a rx frame was aborted
+#define RCR_PRMS 0x0002 // Enable promiscuous mode
+#define RCR_ALMUL 0x0004 // When set accepts all multicast frames
+#define RCR_RXEN 0x0100 // IFF this is set, we can receive packets
+#define RCR_STRIP_CRC 0x0200 // When set strips CRC from rx packets
+#define RCR_ABORT_ENB 0x0200 // When set will abort rx on collision
+#define RCR_FILT_CAR 0x0400 // When set filters leading 12 bit s of carrier
+#define RCR_SOFTRST 0x8000 // resets the chip
+
+/* the normal settings for the RCR register : */
+#define RCR_DEFAULT (RCR_STRIP_CRC | RCR_RXEN)
+#define RCR_CLEAR 0x0 // set it to a base state
+
+
+// Counter Register
+/* BANK 0 */
+#define COUNTER_REG(lp) SMC_REG(lp, 0x0006, 0)
+
+
+// Memory Information Register
+/* BANK 0 */
+#define MIR_REG(lp) SMC_REG(lp, 0x0008, 0)
+
+
+// Receive/Phy Control Register
+/* BANK 0 */
+#define RPC_REG(lp) SMC_REG(lp, 0x000A, 0)
+#define RPC_SPEED 0x2000 // When 1 PHY is in 100Mbps mode.
+#define RPC_DPLX 0x1000 // When 1 PHY is in Full-Duplex Mode
+#define RPC_ANEG 0x0800 // When 1 PHY is in Auto-Negotiate Mode
+#define RPC_LSXA_SHFT 5 // Bits to shift LS2A,LS1A,LS0A to lsb
+#define RPC_LSXB_SHFT 2 // Bits to get LS2B,LS1B,LS0B to lsb
+
+#ifndef RPC_LSA_DEFAULT
+#define RPC_LSA_DEFAULT RPC_LED_100
+#endif
+#ifndef RPC_LSB_DEFAULT
+#define RPC_LSB_DEFAULT RPC_LED_FD
+#endif
+
+#define RPC_DEFAULT (RPC_ANEG | RPC_SPEED | RPC_DPLX)
+
+
+/* Bank 0 0x0C is reserved */
+
+// Bank Select Register
+/* All Banks */
+#define BSR_REG 0x000E
+
+
+// Configuration Reg
+/* BANK 1 */
+#define CONFIG_REG(lp) SMC_REG(lp, 0x0000, 1)
+#define CONFIG_EXT_PHY 0x0200 // 1=external MII, 0=internal Phy
+#define CONFIG_GPCNTRL 0x0400 // Inverse value drives pin nCNTRL
+#define CONFIG_NO_WAIT 0x1000 // When 1 no extra wait states on ISA bus
+#define CONFIG_EPH_POWER_EN 0x8000 // When 0 EPH is placed into low power mode.
+
+// Default is powered-up, Internal Phy, Wait States, and pin nCNTRL=low
+#define CONFIG_DEFAULT (CONFIG_EPH_POWER_EN)
+
+
+// Base Address Register
+/* BANK 1 */
+#define BASE_REG(lp) SMC_REG(lp, 0x0002, 1)
+
+
+// Individual Address Registers
+/* BANK 1 */
+#define ADDR0_REG(lp) SMC_REG(lp, 0x0004, 1)
+#define ADDR1_REG(lp) SMC_REG(lp, 0x0006, 1)
+#define ADDR2_REG(lp) SMC_REG(lp, 0x0008, 1)
+
+
+// General Purpose Register
+/* BANK 1 */
+#define GP_REG(lp) SMC_REG(lp, 0x000A, 1)
+
+
+// Control Register
+/* BANK 1 */
+#define CTL_REG(lp) SMC_REG(lp, 0x000C, 1)
+#define CTL_RCV_BAD 0x4000 // When 1 bad CRC packets are received
+#define CTL_AUTO_RELEASE 0x0800 // When 1 tx pages are released automatically
+#define CTL_LE_ENABLE 0x0080 // When 1 enables Link Error interrupt
+#define CTL_CR_ENABLE 0x0040 // When 1 enables Counter Rollover interrupt
+#define CTL_TE_ENABLE 0x0020 // When 1 enables Transmit Error interrupt
+#define CTL_EEPROM_SELECT 0x0004 // Controls EEPROM reload & store
+#define CTL_RELOAD 0x0002 // When set reads EEPROM into registers
+#define CTL_STORE 0x0001 // When set stores registers into EEPROM
+
+
+// MMU Command Register
+/* BANK 2 */
+#define MMU_CMD_REG(lp) SMC_REG(lp, 0x0000, 2)
+#define MC_BUSY 1 // When 1 the last release has not completed
+#define MC_NOP (0<<5) // No Op
+#define MC_ALLOC (1<<5) // OR with number of 256 byte packets
+#define MC_RESET (2<<5) // Reset MMU to initial state
+#define MC_REMOVE (3<<5) // Remove the current rx packet
+#define MC_RELEASE (4<<5) // Remove and release the current rx packet
+#define MC_FREEPKT (5<<5) // Release packet in PNR register
+#define MC_ENQUEUE (6<<5) // Enqueue the packet for transmit
+#define MC_RSTTXFIFO (7<<5) // Reset the TX FIFOs
+
+
+// Packet Number Register
+/* BANK 2 */
+#define PN_REG(lp) SMC_REG(lp, 0x0002, 2)
+
+
+// Allocation Result Register
+/* BANK 2 */
+#define AR_REG(lp) SMC_REG(lp, 0x0003, 2)
+#define AR_FAILED 0x80 // Alocation Failed
+
+
+// TX FIFO Ports Register
+/* BANK 2 */
+#define TXFIFO_REG(lp) SMC_REG(lp, 0x0004, 2)
+#define TXFIFO_TEMPTY 0x80 // TX FIFO Empty
+
+// RX FIFO Ports Register
+/* BANK 2 */
+#define RXFIFO_REG(lp) SMC_REG(lp, 0x0005, 2)
+#define RXFIFO_REMPTY 0x80 // RX FIFO Empty
+
+#define FIFO_REG(lp) SMC_REG(lp, 0x0004, 2)
+
+// Pointer Register
+/* BANK 2 */
+#define PTR_REG(lp) SMC_REG(lp, 0x0006, 2)
+#define PTR_RCV 0x8000 // 1=Receive area, 0=Transmit area
+#define PTR_AUTOINC 0x4000 // Auto increment the pointer on each access
+#define PTR_READ 0x2000 // When 1 the operation is a read
+
+
+// Data Register
+/* BANK 2 */
+#define DATA_REG(lp) SMC_REG(lp, 0x0008, 2)
+
+
+// Interrupt Status/Acknowledge Register
+/* BANK 2 */
+#define INT_REG(lp) SMC_REG(lp, 0x000C, 2)
+
+
+// Interrupt Mask Register
+/* BANK 2 */
+#define IM_REG(lp) SMC_REG(lp, 0x000D, 2)
+#define IM_MDINT 0x80 // PHY MI Register 18 Interrupt
+#define IM_ERCV_INT 0x40 // Early Receive Interrupt
+#define IM_EPH_INT 0x20 // Set by Ethernet Protocol Handler section
+#define IM_RX_OVRN_INT 0x10 // Set by Receiver Overruns
+#define IM_ALLOC_INT 0x08 // Set when allocation request is completed
+#define IM_TX_EMPTY_INT 0x04 // Set if the TX FIFO goes empty
+#define IM_TX_INT 0x02 // Transmit Interrupt
+#define IM_RCV_INT 0x01 // Receive Interrupt
+
+
+// Multicast Table Registers
+/* BANK 3 */
+#define MCAST_REG1(lp) SMC_REG(lp, 0x0000, 3)
+#define MCAST_REG2(lp) SMC_REG(lp, 0x0002, 3)
+#define MCAST_REG3(lp) SMC_REG(lp, 0x0004, 3)
+#define MCAST_REG4(lp) SMC_REG(lp, 0x0006, 3)
+
+
+// Management Interface Register (MII)
+/* BANK 3 */
+#define MII_REG(lp) SMC_REG(lp, 0x0008, 3)
+#define MII_MSK_CRS100 0x4000 // Disables CRS100 detection during tx half dup
+#define MII_MDOE 0x0008 // MII Output Enable
+#define MII_MCLK 0x0004 // MII Clock, pin MDCLK
+#define MII_MDI 0x0002 // MII Input, pin MDI
+#define MII_MDO 0x0001 // MII Output, pin MDO
+
+
+// Revision Register
+/* BANK 3 */
+/* ( hi: chip id low: rev # ) */
+#define REV_REG(lp) SMC_REG(lp, 0x000A, 3)
+
+
+// Early RCV Register
+/* BANK 3 */
+/* this is NOT on SMC9192 */
+#define ERCV_REG(lp) SMC_REG(lp, 0x000C, 3)
+#define ERCV_RCV_DISCRD 0x0080 // When 1 discards a packet being received
+#define ERCV_THRESHOLD 0x001F // ERCV Threshold Mask
+
+
+// External Register
+/* BANK 7 */
+#define EXT_REG(lp) SMC_REG(lp, 0x0000, 7)
+
+
+#define CHIP_9192 3
+#define CHIP_9194 4
+#define CHIP_9195 5
+#define CHIP_9196 6
+#define CHIP_91100 7
+#define CHIP_91100FD 8
+#define CHIP_91111FD 9
+
+static const char * chip_ids[ 16 ] = {
+ NULL, NULL, NULL,
+ /* 3 */ "SMC91C90/91C92",
+ /* 4 */ "SMC91C94",
+ /* 5 */ "SMC91C95",
+ /* 6 */ "SMC91C96",
+ /* 7 */ "SMC91C100",
+ /* 8 */ "SMC91C100FD",
+ /* 9 */ "SMC91C11xFD",
+ NULL, NULL, NULL,
+ NULL, NULL, NULL};
+
+
+/*
+ . Receive status bits
+*/
+#define RS_ALGNERR 0x8000
+#define RS_BRODCAST 0x4000
+#define RS_BADCRC 0x2000
+#define RS_ODDFRAME 0x1000
+#define RS_TOOLONG 0x0800
+#define RS_TOOSHORT 0x0400
+#define RS_MULTICAST 0x0001
+#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT)
+
+
+/*
+ * PHY IDs
+ * LAN83C183 == LAN91C111 Internal PHY
+ */
+#define PHY_LAN83C183 0x0016f840
+#define PHY_LAN83C180 0x02821c50
+
+/*
+ * PHY Register Addresses (LAN91C111 Internal PHY)
+ *
+ * Generic PHY registers can be found in <linux/mii.h>
+ *
+ * These phy registers are specific to our on-board phy.
+ */
+
+// PHY Configuration Register 1
+#define PHY_CFG1_REG 0x10
+#define PHY_CFG1_LNKDIS 0x8000 // 1=Rx Link Detect Function disabled
+#define PHY_CFG1_XMTDIS 0x4000 // 1=TP Transmitter Disabled
+#define PHY_CFG1_XMTPDN 0x2000 // 1=TP Transmitter Powered Down
+#define PHY_CFG1_BYPSCR 0x0400 // 1=Bypass scrambler/descrambler
+#define PHY_CFG1_UNSCDS 0x0200 // 1=Unscramble Idle Reception Disable
+#define PHY_CFG1_EQLZR 0x0100 // 1=Rx Equalizer Disabled
+#define PHY_CFG1_CABLE 0x0080 // 1=STP(150ohm), 0=UTP(100ohm)
+#define PHY_CFG1_RLVL0 0x0040 // 1=Rx Squelch level reduced by 4.5db
+#define PHY_CFG1_TLVL_SHIFT 2 // Transmit Output Level Adjust
+#define PHY_CFG1_TLVL_MASK 0x003C
+#define PHY_CFG1_TRF_MASK 0x0003 // Transmitter Rise/Fall time
+
+
+// PHY Configuration Register 2
+#define PHY_CFG2_REG 0x11
+#define PHY_CFG2_APOLDIS 0x0020 // 1=Auto Polarity Correction disabled
+#define PHY_CFG2_JABDIS 0x0010 // 1=Jabber disabled
+#define PHY_CFG2_MREG 0x0008 // 1=Multiple register access (MII mgt)
+#define PHY_CFG2_INTMDIO 0x0004 // 1=Interrupt signaled with MDIO pulseo
+
+// PHY Status Output (and Interrupt status) Register
+#define PHY_INT_REG 0x12 // Status Output (Interrupt Status)
+#define PHY_INT_INT 0x8000 // 1=bits have changed since last read
+#define PHY_INT_LNKFAIL 0x4000 // 1=Link Not detected
+#define PHY_INT_LOSSSYNC 0x2000 // 1=Descrambler has lost sync
+#define PHY_INT_CWRD 0x1000 // 1=Invalid 4B5B code detected on rx
+#define PHY_INT_SSD 0x0800 // 1=No Start Of Stream detected on rx
+#define PHY_INT_ESD 0x0400 // 1=No End Of Stream detected on rx
+#define PHY_INT_RPOL 0x0200 // 1=Reverse Polarity detected
+#define PHY_INT_JAB 0x0100 // 1=Jabber detected
+#define PHY_INT_SPDDET 0x0080 // 1=100Base-TX mode, 0=10Base-T mode
+#define PHY_INT_DPLXDET 0x0040 // 1=Device in Full Duplex
+
+// PHY Interrupt/Status Mask Register
+#define PHY_MASK_REG 0x13 // Interrupt Mask
+// Uses the same bit definitions as PHY_INT_REG
+
+
+/*
+ * SMC91C96 ethernet config and status registers.
+ * These are in the "attribute" space.
+ */
+#define ECOR 0x8000
+#define ECOR_RESET 0x80
+#define ECOR_LEVEL_IRQ 0x40
+#define ECOR_WR_ATTRIB 0x04
+#define ECOR_ENABLE 0x01
+
+#define ECSR 0x8002
+#define ECSR_IOIS8 0x20
+#define ECSR_PWRDWN 0x04
+#define ECSR_INT 0x02
+
+#define ATTRIB_SIZE ((64*1024) << SMC_IO_SHIFT)
+
+
+/*
+ * Macros to abstract register access according to the data bus
+ * capabilities. Please use those and not the in/out primitives.
+ * Note: the following macros do *not* select the bank -- this must
+ * be done separately as needed in the main code. The SMC_REG() macro
+ * only uses the bank argument for debugging purposes (when enabled).
+ *
+ * Note: despite inline functions being safer, everything leading to this
+ * should preferably be macros to let BUG() display the line number in
+ * the core source code since we're interested in the top call site
+ * not in any inline function location.
+ */
+
+#if SMC_DEBUG > 0
+#define SMC_REG(lp, reg, bank) \
+ ({ \
+ int __b = SMC_CURRENT_BANK(lp); \
+ if (unlikely((__b & ~0xf0) != (0x3300 | bank))) { \
+ printk( "%s: bank reg screwed (0x%04x)\n", \
+ CARDNAME, __b ); \
+ BUG(); \
+ } \
+ reg<<SMC_IO_SHIFT; \
+ })
+#else
+#define SMC_REG(lp, reg, bank) (reg<<SMC_IO_SHIFT)
+#endif
+
+/*
+ * Hack Alert: Some setups just can't write 8 or 16 bits reliably when not
+ * aligned to a 32 bit boundary. I tell you that does exist!
+ * Fortunately the affected register accesses can be easily worked around
+ * since we can write zeroes to the preceding 16 bits without adverse
+ * effects and use a 32-bit access.
+ *
+ * Enforce it on any 32-bit capable setup for now.
+ */
+#define SMC_MUST_ALIGN_WRITE(lp) SMC_32BIT(lp)
+
+#define SMC_GET_PN(lp) \
+ (SMC_8BIT(lp) ? (SMC_inb(ioaddr, PN_REG(lp))) \
+ : (SMC_inw(ioaddr, PN_REG(lp)) & 0xFF))
+
+#define SMC_SET_PN(lp, x) \
+ do { \
+ if (SMC_MUST_ALIGN_WRITE(lp)) \
+ SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 0, 2)); \
+ else if (SMC_8BIT(lp)) \
+ SMC_outb(x, ioaddr, PN_REG(lp)); \
+ else \
+ SMC_outw(x, ioaddr, PN_REG(lp)); \
+ } while (0)
+
+#define SMC_GET_AR(lp) \
+ (SMC_8BIT(lp) ? (SMC_inb(ioaddr, AR_REG(lp))) \
+ : (SMC_inw(ioaddr, PN_REG(lp)) >> 8))
+
+#define SMC_GET_TXFIFO(lp) \
+ (SMC_8BIT(lp) ? (SMC_inb(ioaddr, TXFIFO_REG(lp))) \
+ : (SMC_inw(ioaddr, TXFIFO_REG(lp)) & 0xFF))
+
+#define SMC_GET_RXFIFO(lp) \
+ (SMC_8BIT(lp) ? (SMC_inb(ioaddr, RXFIFO_REG(lp))) \
+ : (SMC_inw(ioaddr, TXFIFO_REG(lp)) >> 8))
+
+#define SMC_GET_INT(lp) \
+ (SMC_8BIT(lp) ? (SMC_inb(ioaddr, INT_REG(lp))) \
+ : (SMC_inw(ioaddr, INT_REG(lp)) & 0xFF))
+
+#define SMC_ACK_INT(lp, x) \
+ do { \
+ if (SMC_8BIT(lp)) \
+ SMC_outb(x, ioaddr, INT_REG(lp)); \
+ else { \
+ unsigned long __flags; \
+ int __mask; \
+ local_irq_save(__flags); \
+ __mask = SMC_inw(ioaddr, INT_REG(lp)) & ~0xff; \
+ SMC_outw(__mask | (x), ioaddr, INT_REG(lp)); \
+ local_irq_restore(__flags); \
+ } \
+ } while (0)
+
+#define SMC_GET_INT_MASK(lp) \
+ (SMC_8BIT(lp) ? (SMC_inb(ioaddr, IM_REG(lp))) \
+ : (SMC_inw(ioaddr, INT_REG(lp)) >> 8))
+
+#define SMC_SET_INT_MASK(lp, x) \
+ do { \
+ if (SMC_8BIT(lp)) \
+ SMC_outb(x, ioaddr, IM_REG(lp)); \
+ else \
+ SMC_outw((x) << 8, ioaddr, INT_REG(lp)); \
+ } while (0)
+
+#define SMC_CURRENT_BANK(lp) SMC_inw(ioaddr, BANK_SELECT)
+
+#define SMC_SELECT_BANK(lp, x) \
+ do { \
+ if (SMC_MUST_ALIGN_WRITE(lp)) \
+ SMC_outl((x)<<16, ioaddr, 12<<SMC_IO_SHIFT); \
+ else \
+ SMC_outw(x, ioaddr, BANK_SELECT); \
+ } while (0)
+
+#define SMC_GET_BASE(lp) SMC_inw(ioaddr, BASE_REG(lp))
+
+#define SMC_SET_BASE(lp, x) SMC_outw(x, ioaddr, BASE_REG(lp))
+
+#define SMC_GET_CONFIG(lp) SMC_inw(ioaddr, CONFIG_REG(lp))
+
+#define SMC_SET_CONFIG(lp, x) SMC_outw(x, ioaddr, CONFIG_REG(lp))
+
+#define SMC_GET_COUNTER(lp) SMC_inw(ioaddr, COUNTER_REG(lp))
+
+#define SMC_GET_CTL(lp) SMC_inw(ioaddr, CTL_REG(lp))
+
+#define SMC_SET_CTL(lp, x) SMC_outw(x, ioaddr, CTL_REG(lp))
+
+#define SMC_GET_MII(lp) SMC_inw(ioaddr, MII_REG(lp))
+
+#define SMC_GET_GP(lp) SMC_inw(ioaddr, GP_REG(lp))
+
+#define SMC_SET_GP(lp, x) \
+ do { \
+ if (SMC_MUST_ALIGN_WRITE(lp)) \
+ SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 8, 1)); \
+ else \
+ SMC_outw(x, ioaddr, GP_REG(lp)); \
+ } while (0)
+
+#define SMC_SET_MII(lp, x) SMC_outw(x, ioaddr, MII_REG(lp))
+
+#define SMC_GET_MIR(lp) SMC_inw(ioaddr, MIR_REG(lp))
+
+#define SMC_SET_MIR(lp, x) SMC_outw(x, ioaddr, MIR_REG(lp))
+
+#define SMC_GET_MMU_CMD(lp) SMC_inw(ioaddr, MMU_CMD_REG(lp))
+
+#define SMC_SET_MMU_CMD(lp, x) SMC_outw(x, ioaddr, MMU_CMD_REG(lp))
+
+#define SMC_GET_FIFO(lp) SMC_inw(ioaddr, FIFO_REG(lp))
+
+#define SMC_GET_PTR(lp) SMC_inw(ioaddr, PTR_REG(lp))
+
+#define SMC_SET_PTR(lp, x) \
+ do { \
+ if (SMC_MUST_ALIGN_WRITE(lp)) \
+ SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 4, 2)); \
+ else \
+ SMC_outw(x, ioaddr, PTR_REG(lp)); \
+ } while (0)
+
+#define SMC_GET_EPH_STATUS(lp) SMC_inw(ioaddr, EPH_STATUS_REG(lp))
+
+#define SMC_GET_RCR(lp) SMC_inw(ioaddr, RCR_REG(lp))
+
+#define SMC_SET_RCR(lp, x) SMC_outw(x, ioaddr, RCR_REG(lp))
+
+#define SMC_GET_REV(lp) SMC_inw(ioaddr, REV_REG(lp))
+
+#define SMC_GET_RPC(lp) SMC_inw(ioaddr, RPC_REG(lp))
+
+#define SMC_SET_RPC(lp, x) \
+ do { \
+ if (SMC_MUST_ALIGN_WRITE(lp)) \
+ SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 8, 0)); \
+ else \
+ SMC_outw(x, ioaddr, RPC_REG(lp)); \
+ } while (0)
+
+#define SMC_GET_TCR(lp) SMC_inw(ioaddr, TCR_REG(lp))
+
+#define SMC_SET_TCR(lp, x) SMC_outw(x, ioaddr, TCR_REG(lp))
+
+#ifndef SMC_GET_MAC_ADDR
+#define SMC_GET_MAC_ADDR(lp, addr) \
+ do { \
+ unsigned int __v; \
+ __v = SMC_inw(ioaddr, ADDR0_REG(lp)); \
+ addr[0] = __v; addr[1] = __v >> 8; \
+ __v = SMC_inw(ioaddr, ADDR1_REG(lp)); \
+ addr[2] = __v; addr[3] = __v >> 8; \
+ __v = SMC_inw(ioaddr, ADDR2_REG(lp)); \
+ addr[4] = __v; addr[5] = __v >> 8; \
+ } while (0)
+#endif
+
+#define SMC_SET_MAC_ADDR(lp, addr) \
+ do { \
+ SMC_outw(addr[0]|(addr[1] << 8), ioaddr, ADDR0_REG(lp)); \
+ SMC_outw(addr[2]|(addr[3] << 8), ioaddr, ADDR1_REG(lp)); \
+ SMC_outw(addr[4]|(addr[5] << 8), ioaddr, ADDR2_REG(lp)); \
+ } while (0)
+
+#define SMC_SET_MCAST(lp, x) \
+ do { \
+ const unsigned char *mt = (x); \
+ SMC_outw(mt[0] | (mt[1] << 8), ioaddr, MCAST_REG1(lp)); \
+ SMC_outw(mt[2] | (mt[3] << 8), ioaddr, MCAST_REG2(lp)); \
+ SMC_outw(mt[4] | (mt[5] << 8), ioaddr, MCAST_REG3(lp)); \
+ SMC_outw(mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4(lp)); \
+ } while (0)
+
+#define SMC_PUT_PKT_HDR(lp, status, length) \
+ do { \
+ if (SMC_32BIT(lp)) \
+ SMC_outl((status) | (length)<<16, ioaddr, \
+ DATA_REG(lp)); \
+ else { \
+ SMC_outw(status, ioaddr, DATA_REG(lp)); \
+ SMC_outw(length, ioaddr, DATA_REG(lp)); \
+ } \
+ } while (0)
+
+#define SMC_GET_PKT_HDR(lp, status, length) \
+ do { \
+ if (SMC_32BIT(lp)) { \
+ unsigned int __val = SMC_inl(ioaddr, DATA_REG(lp)); \
+ (status) = __val & 0xffff; \
+ (length) = __val >> 16; \
+ } else { \
+ (status) = SMC_inw(ioaddr, DATA_REG(lp)); \
+ (length) = SMC_inw(ioaddr, DATA_REG(lp)); \
+ } \
+ } while (0)
+
+#define SMC_PUSH_DATA(lp, p, l) \
+ do { \
+ if (SMC_32BIT(lp)) { \
+ void *__ptr = (p); \
+ int __len = (l); \
+ void __iomem *__ioaddr = ioaddr; \
+ if (__len >= 2 && (unsigned long)__ptr & 2) { \
+ __len -= 2; \
+ SMC_outw(*(u16 *)__ptr, ioaddr, \
+ DATA_REG(lp)); \
+ __ptr += 2; \
+ } \
+ if (SMC_CAN_USE_DATACS && lp->datacs) \
+ __ioaddr = lp->datacs; \
+ SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \
+ if (__len & 2) { \
+ __ptr += (__len & ~3); \
+ SMC_outw(*((u16 *)__ptr), ioaddr, \
+ DATA_REG(lp)); \
+ } \
+ } else if (SMC_16BIT(lp)) \
+ SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1); \
+ else if (SMC_8BIT(lp)) \
+ SMC_outsb(ioaddr, DATA_REG(lp), p, l); \
+ } while (0)
+
+#define SMC_PULL_DATA(lp, p, l) \
+ do { \
+ if (SMC_32BIT(lp)) { \
+ void *__ptr = (p); \
+ int __len = (l); \
+ void __iomem *__ioaddr = ioaddr; \
+ if ((unsigned long)__ptr & 2) { \
+ /* \
+ * We want 32bit alignment here. \
+ * Since some buses perform a full \
+ * 32bit fetch even for 16bit data \
+ * we can't use SMC_inw() here. \
+ * Back both source (on-chip) and \
+ * destination pointers of 2 bytes. \
+ * This is possible since the call to \
+ * SMC_GET_PKT_HDR() already advanced \
+ * the source pointer of 4 bytes, and \
+ * the skb_reserve(skb, 2) advanced \
+ * the destination pointer of 2 bytes. \
+ */ \
+ __ptr -= 2; \
+ __len += 2; \
+ SMC_SET_PTR(lp, \
+ 2|PTR_READ|PTR_RCV|PTR_AUTOINC); \
+ } \
+ if (SMC_CAN_USE_DATACS && lp->datacs) \
+ __ioaddr = lp->datacs; \
+ __len += 2; \
+ SMC_insl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \
+ } else if (SMC_16BIT(lp)) \
+ SMC_insw(ioaddr, DATA_REG(lp), p, (l) >> 1); \
+ else if (SMC_8BIT(lp)) \
+ SMC_insb(ioaddr, DATA_REG(lp), p, l); \
+ } while (0)
+
+#endif /* _SMC91X_H_ */
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
new file mode 100644
index 000000000000..a3aa4c0e87f3
--- /dev/null
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -0,0 +1,2406 @@
+/***************************************************************************
+ *
+ * Copyright (C) 2004-2008 SMSC
+ * Copyright (C) 2005-2008 ARM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ***************************************************************************
+ * Rewritten, heavily based on smsc911x simple driver by SMSC.
+ * Partly uses io macros from smc91x.c by Nicolas Pitre
+ *
+ * Supported devices:
+ * LAN9115, LAN9116, LAN9117, LAN9118
+ * LAN9215, LAN9216, LAN9217, LAN9218
+ * LAN9210, LAN9211
+ * LAN9220, LAN9221
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/bug.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/swab.h>
+#include <linux/phy.h>
+#include <linux/smsc911x.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
+#include "smsc911x.h"
+
+#define SMSC_CHIPNAME "smsc911x"
+#define SMSC_MDIONAME "smsc911x-mdio"
+#define SMSC_DRV_VERSION "2008-10-21"
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(SMSC_DRV_VERSION);
+MODULE_ALIAS("platform:smsc911x");
+
+#if USE_DEBUG > 0
+static int debug = 16;
+#else
+static int debug = 3;
+#endif
+
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+struct smsc911x_data;
+
+struct smsc911x_ops {
+ u32 (*reg_read)(struct smsc911x_data *pdata, u32 reg);
+ void (*reg_write)(struct smsc911x_data *pdata, u32 reg, u32 val);
+ void (*rx_readfifo)(struct smsc911x_data *pdata,
+ unsigned int *buf, unsigned int wordcount);
+ void (*tx_writefifo)(struct smsc911x_data *pdata,
+ unsigned int *buf, unsigned int wordcount);
+};
+
+struct smsc911x_data {
+ void __iomem *ioaddr;
+
+ unsigned int idrev;
+
+ /* used to decide which workarounds apply */
+ unsigned int generation;
+
+ /* device configuration (copied from platform_data during probe) */
+ struct smsc911x_platform_config config;
+
+ /* This needs to be acquired before calling any of below:
+ * smsc911x_mac_read(), smsc911x_mac_write()
+ */
+ spinlock_t mac_lock;
+
+ /* spinlock to ensure register accesses are serialised */
+ spinlock_t dev_lock;
+
+ struct phy_device *phy_dev;
+ struct mii_bus *mii_bus;
+ int phy_irq[PHY_MAX_ADDR];
+ unsigned int using_extphy;
+ int last_duplex;
+ int last_carrier;
+
+ u32 msg_enable;
+ unsigned int gpio_setting;
+ unsigned int gpio_orig_setting;
+ struct net_device *dev;
+ struct napi_struct napi;
+
+ unsigned int software_irq_signal;
+
+#ifdef USE_PHY_WORK_AROUND
+#define MIN_PACKET_SIZE (64)
+ char loopback_tx_pkt[MIN_PACKET_SIZE];
+ char loopback_rx_pkt[MIN_PACKET_SIZE];
+ unsigned int resetcount;
+#endif
+
+ /* Members for Multicast filter workaround */
+ unsigned int multicast_update_pending;
+ unsigned int set_bits_mask;
+ unsigned int clear_bits_mask;
+ unsigned int hashhi;
+ unsigned int hashlo;
+
+ /* register access functions */
+ const struct smsc911x_ops *ops;
+};
+
+/* Easy access to information */
+#define __smsc_shift(pdata, reg) ((reg) << ((pdata)->config.shift))
+
+static inline u32 __smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
+{
+ if (pdata->config.flags & SMSC911X_USE_32BIT)
+ return readl(pdata->ioaddr + reg);
+
+ if (pdata->config.flags & SMSC911X_USE_16BIT)
+ return ((readw(pdata->ioaddr + reg) & 0xFFFF) |
+ ((readw(pdata->ioaddr + reg + 2) & 0xFFFF) << 16));
+
+ BUG();
+ return 0;
+}
+
+static inline u32
+__smsc911x_reg_read_shift(struct smsc911x_data *pdata, u32 reg)
+{
+ if (pdata->config.flags & SMSC911X_USE_32BIT)
+ return readl(pdata->ioaddr + __smsc_shift(pdata, reg));
+
+ if (pdata->config.flags & SMSC911X_USE_16BIT)
+ return (readw(pdata->ioaddr +
+ __smsc_shift(pdata, reg)) & 0xFFFF) |
+ ((readw(pdata->ioaddr +
+ __smsc_shift(pdata, reg + 2)) & 0xFFFF) << 16);
+
+ BUG();
+ return 0;
+}
+
+static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
+{
+ u32 data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->dev_lock, flags);
+ data = pdata->ops->reg_read(pdata, reg);
+ spin_unlock_irqrestore(&pdata->dev_lock, flags);
+
+ return data;
+}
+
+static inline void __smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
+ u32 val)
+{
+ if (pdata->config.flags & SMSC911X_USE_32BIT) {
+ writel(val, pdata->ioaddr + reg);
+ return;
+ }
+
+ if (pdata->config.flags & SMSC911X_USE_16BIT) {
+ writew(val & 0xFFFF, pdata->ioaddr + reg);
+ writew((val >> 16) & 0xFFFF, pdata->ioaddr + reg + 2);
+ return;
+ }
+
+ BUG();
+}
+
+static inline void
+__smsc911x_reg_write_shift(struct smsc911x_data *pdata, u32 reg, u32 val)
+{
+ if (pdata->config.flags & SMSC911X_USE_32BIT) {
+ writel(val, pdata->ioaddr + __smsc_shift(pdata, reg));
+ return;
+ }
+
+ if (pdata->config.flags & SMSC911X_USE_16BIT) {
+ writew(val & 0xFFFF,
+ pdata->ioaddr + __smsc_shift(pdata, reg));
+ writew((val >> 16) & 0xFFFF,
+ pdata->ioaddr + __smsc_shift(pdata, reg + 2));
+ return;
+ }
+
+ BUG();
+}
+
+static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
+ u32 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->dev_lock, flags);
+ pdata->ops->reg_write(pdata, reg, val);
+ spin_unlock_irqrestore(&pdata->dev_lock, flags);
+}
+
+/* Writes a packet to the TX_DATA_FIFO */
+static inline void
+smsc911x_tx_writefifo(struct smsc911x_data *pdata, unsigned int *buf,
+ unsigned int wordcount)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->dev_lock, flags);
+
+ if (pdata->config.flags & SMSC911X_SWAP_FIFO) {
+ while (wordcount--)
+ __smsc911x_reg_write(pdata, TX_DATA_FIFO,
+ swab32(*buf++));
+ goto out;
+ }
+
+ if (pdata->config.flags & SMSC911X_USE_32BIT) {
+ writesl(pdata->ioaddr + TX_DATA_FIFO, buf, wordcount);
+ goto out;
+ }
+
+ if (pdata->config.flags & SMSC911X_USE_16BIT) {
+ while (wordcount--)
+ __smsc911x_reg_write(pdata, TX_DATA_FIFO, *buf++);
+ goto out;
+ }
+
+ BUG();
+out:
+ spin_unlock_irqrestore(&pdata->dev_lock, flags);
+}
+
+/* Writes a packet to the TX_DATA_FIFO - shifted version */
+static inline void
+smsc911x_tx_writefifo_shift(struct smsc911x_data *pdata, unsigned int *buf,
+ unsigned int wordcount)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->dev_lock, flags);
+
+ if (pdata->config.flags & SMSC911X_SWAP_FIFO) {
+ while (wordcount--)
+ __smsc911x_reg_write_shift(pdata, TX_DATA_FIFO,
+ swab32(*buf++));
+ goto out;
+ }
+
+ if (pdata->config.flags & SMSC911X_USE_32BIT) {
+ writesl(pdata->ioaddr + __smsc_shift(pdata,
+ TX_DATA_FIFO), buf, wordcount);
+ goto out;
+ }
+
+ if (pdata->config.flags & SMSC911X_USE_16BIT) {
+ while (wordcount--)
+ __smsc911x_reg_write_shift(pdata,
+ TX_DATA_FIFO, *buf++);
+ goto out;
+ }
+
+ BUG();
+out:
+ spin_unlock_irqrestore(&pdata->dev_lock, flags);
+}
+
+/* Reads a packet out of the RX_DATA_FIFO */
+static inline void
+smsc911x_rx_readfifo(struct smsc911x_data *pdata, unsigned int *buf,
+ unsigned int wordcount)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->dev_lock, flags);
+
+ if (pdata->config.flags & SMSC911X_SWAP_FIFO) {
+ while (wordcount--)
+ *buf++ = swab32(__smsc911x_reg_read(pdata,
+ RX_DATA_FIFO));
+ goto out;
+ }
+
+ if (pdata->config.flags & SMSC911X_USE_32BIT) {
+ readsl(pdata->ioaddr + RX_DATA_FIFO, buf, wordcount);
+ goto out;
+ }
+
+ if (pdata->config.flags & SMSC911X_USE_16BIT) {
+ while (wordcount--)
+ *buf++ = __smsc911x_reg_read(pdata, RX_DATA_FIFO);
+ goto out;
+ }
+
+ BUG();
+out:
+ spin_unlock_irqrestore(&pdata->dev_lock, flags);
+}
+
+/* Reads a packet out of the RX_DATA_FIFO - shifted version */
+static inline void
+smsc911x_rx_readfifo_shift(struct smsc911x_data *pdata, unsigned int *buf,
+ unsigned int wordcount)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->dev_lock, flags);
+
+ if (pdata->config.flags & SMSC911X_SWAP_FIFO) {
+ while (wordcount--)
+ *buf++ = swab32(__smsc911x_reg_read_shift(pdata,
+ RX_DATA_FIFO));
+ goto out;
+ }
+
+ if (pdata->config.flags & SMSC911X_USE_32BIT) {
+ readsl(pdata->ioaddr + __smsc_shift(pdata,
+ RX_DATA_FIFO), buf, wordcount);
+ goto out;
+ }
+
+ if (pdata->config.flags & SMSC911X_USE_16BIT) {
+ while (wordcount--)
+ *buf++ = __smsc911x_reg_read_shift(pdata,
+ RX_DATA_FIFO);
+ goto out;
+ }
+
+ BUG();
+out:
+ spin_unlock_irqrestore(&pdata->dev_lock, flags);
+}
+
+/* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read
+ * and smsc911x_mac_write, so assumes mac_lock is held */
+static int smsc911x_mac_complete(struct smsc911x_data *pdata)
+{
+ int i;
+ u32 val;
+
+ SMSC_ASSERT_MAC_LOCK(pdata);
+
+ for (i = 0; i < 40; i++) {
+ val = smsc911x_reg_read(pdata, MAC_CSR_CMD);
+ if (!(val & MAC_CSR_CMD_CSR_BUSY_))
+ return 0;
+ }
+ SMSC_WARN(pdata, hw, "Timed out waiting for MAC not BUSY. "
+ "MAC_CSR_CMD: 0x%08X", val);
+ return -EIO;
+}
+
+/* Fetches a MAC register value. Assumes mac_lock is acquired */
+static u32 smsc911x_mac_read(struct smsc911x_data *pdata, unsigned int offset)
+{
+ unsigned int temp;
+
+ SMSC_ASSERT_MAC_LOCK(pdata);
+
+ temp = smsc911x_reg_read(pdata, MAC_CSR_CMD);
+ if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) {
+ SMSC_WARN(pdata, hw, "MAC busy at entry");
+ return 0xFFFFFFFF;
+ }
+
+ /* Send the MAC cmd */
+ smsc911x_reg_write(pdata, MAC_CSR_CMD, ((offset & 0xFF) |
+ MAC_CSR_CMD_CSR_BUSY_ | MAC_CSR_CMD_R_NOT_W_));
+
+ /* Workaround for hardware read-after-write restriction */
+ temp = smsc911x_reg_read(pdata, BYTE_TEST);
+
+ /* Wait for the read to complete */
+ if (likely(smsc911x_mac_complete(pdata) == 0))
+ return smsc911x_reg_read(pdata, MAC_CSR_DATA);
+
+ SMSC_WARN(pdata, hw, "MAC busy after read");
+ return 0xFFFFFFFF;
+}
+
+/* Set a mac register, mac_lock must be acquired before calling */
+static void smsc911x_mac_write(struct smsc911x_data *pdata,
+ unsigned int offset, u32 val)
+{
+ unsigned int temp;
+
+ SMSC_ASSERT_MAC_LOCK(pdata);
+
+ temp = smsc911x_reg_read(pdata, MAC_CSR_CMD);
+ if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) {
+ SMSC_WARN(pdata, hw,
+ "smsc911x_mac_write failed, MAC busy at entry");
+ return;
+ }
+
+ /* Send data to write */
+ smsc911x_reg_write(pdata, MAC_CSR_DATA, val);
+
+ /* Write the actual data */
+ smsc911x_reg_write(pdata, MAC_CSR_CMD, ((offset & 0xFF) |
+ MAC_CSR_CMD_CSR_BUSY_));
+
+ /* Workaround for hardware read-after-write restriction */
+ temp = smsc911x_reg_read(pdata, BYTE_TEST);
+
+ /* Wait for the write to complete */
+ if (likely(smsc911x_mac_complete(pdata) == 0))
+ return;
+
+ SMSC_WARN(pdata, hw, "smsc911x_mac_write failed, MAC busy after write");
+}
+
+/* Get a phy register */
+static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
+{
+ struct smsc911x_data *pdata = (struct smsc911x_data *)bus->priv;
+ unsigned long flags;
+ unsigned int addr;
+ int i, reg;
+
+ spin_lock_irqsave(&pdata->mac_lock, flags);
+
+ /* Confirm MII not busy */
+ if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) {
+ SMSC_WARN(pdata, hw, "MII is busy in smsc911x_mii_read???");
+ reg = -EIO;
+ goto out;
+ }
+
+ /* Set the address, index & direction (read from PHY) */
+ addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6);
+ smsc911x_mac_write(pdata, MII_ACC, addr);
+
+ /* Wait for read to complete w/ timeout */
+ for (i = 0; i < 100; i++)
+ if (!(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) {
+ reg = smsc911x_mac_read(pdata, MII_DATA);
+ goto out;
+ }
+
+ SMSC_WARN(pdata, hw, "Timed out waiting for MII read to finish");
+ reg = -EIO;
+
+out:
+ spin_unlock_irqrestore(&pdata->mac_lock, flags);
+ return reg;
+}
+
+/* Set a phy register */
+static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
+ u16 val)
+{
+ struct smsc911x_data *pdata = (struct smsc911x_data *)bus->priv;
+ unsigned long flags;
+ unsigned int addr;
+ int i, reg;
+
+ spin_lock_irqsave(&pdata->mac_lock, flags);
+
+ /* Confirm MII not busy */
+ if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) {
+ SMSC_WARN(pdata, hw, "MII is busy in smsc911x_mii_write???");
+ reg = -EIO;
+ goto out;
+ }
+
+ /* Put the data to write in the MAC */
+ smsc911x_mac_write(pdata, MII_DATA, val);
+
+ /* Set the address, index & direction (write to PHY) */
+ addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6) |
+ MII_ACC_MII_WRITE_;
+ smsc911x_mac_write(pdata, MII_ACC, addr);
+
+ /* Wait for write to complete w/ timeout */
+ for (i = 0; i < 100; i++)
+ if (!(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) {
+ reg = 0;
+ goto out;
+ }
+
+ SMSC_WARN(pdata, hw, "Timed out waiting for MII write to finish");
+ reg = -EIO;
+
+out:
+ spin_unlock_irqrestore(&pdata->mac_lock, flags);
+ return reg;
+}
+
+/* Switch to external phy. Assumes tx and rx are stopped. */
+static void smsc911x_phy_enable_external(struct smsc911x_data *pdata)
+{
+ unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG);
+
+ /* Disable phy clocks to the MAC */
+ hwcfg &= (~HW_CFG_PHY_CLK_SEL_);
+ hwcfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_;
+ smsc911x_reg_write(pdata, HW_CFG, hwcfg);
+ udelay(10); /* Enough time for clocks to stop */
+
+ /* Switch to external phy */
+ hwcfg |= HW_CFG_EXT_PHY_EN_;
+ smsc911x_reg_write(pdata, HW_CFG, hwcfg);
+
+ /* Enable phy clocks to the MAC */
+ hwcfg &= (~HW_CFG_PHY_CLK_SEL_);
+ hwcfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_;
+ smsc911x_reg_write(pdata, HW_CFG, hwcfg);
+ udelay(10); /* Enough time for clocks to restart */
+
+ hwcfg |= HW_CFG_SMI_SEL_;
+ smsc911x_reg_write(pdata, HW_CFG, hwcfg);
+}
+
+/* Autodetects and enables external phy if present on supported chips.
+ * autodetection can be overridden by specifying SMSC911X_FORCE_INTERNAL_PHY
+ * or SMSC911X_FORCE_EXTERNAL_PHY in the platform_data flags. */
+static void smsc911x_phy_initialise_external(struct smsc911x_data *pdata)
+{
+ unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG);
+
+ if (pdata->config.flags & SMSC911X_FORCE_INTERNAL_PHY) {
+ SMSC_TRACE(pdata, hw, "Forcing internal PHY");
+ pdata->using_extphy = 0;
+ } else if (pdata->config.flags & SMSC911X_FORCE_EXTERNAL_PHY) {
+ SMSC_TRACE(pdata, hw, "Forcing external PHY");
+ smsc911x_phy_enable_external(pdata);
+ pdata->using_extphy = 1;
+ } else if (hwcfg & HW_CFG_EXT_PHY_DET_) {
+ SMSC_TRACE(pdata, hw,
+ "HW_CFG EXT_PHY_DET set, using external PHY");
+ smsc911x_phy_enable_external(pdata);
+ pdata->using_extphy = 1;
+ } else {
+ SMSC_TRACE(pdata, hw,
+ "HW_CFG EXT_PHY_DET clear, using internal PHY");
+ pdata->using_extphy = 0;
+ }
+}
+
+/* Fetches a tx status out of the status fifo */
+static unsigned int smsc911x_tx_get_txstatus(struct smsc911x_data *pdata)
+{
+ unsigned int result =
+ smsc911x_reg_read(pdata, TX_FIFO_INF) & TX_FIFO_INF_TSUSED_;
+
+ if (result != 0)
+ result = smsc911x_reg_read(pdata, TX_STATUS_FIFO);
+
+ return result;
+}
+
+/* Fetches the next rx status */
+static unsigned int smsc911x_rx_get_rxstatus(struct smsc911x_data *pdata)
+{
+ unsigned int result =
+ smsc911x_reg_read(pdata, RX_FIFO_INF) & RX_FIFO_INF_RXSUSED_;
+
+ if (result != 0)
+ result = smsc911x_reg_read(pdata, RX_STATUS_FIFO);
+
+ return result;
+}
+
+#ifdef USE_PHY_WORK_AROUND
+static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
+{
+ unsigned int tries;
+ u32 wrsz;
+ u32 rdsz;
+ ulong bufp;
+
+ for (tries = 0; tries < 10; tries++) {
+ unsigned int txcmd_a;
+ unsigned int txcmd_b;
+ unsigned int status;
+ unsigned int pktlength;
+ unsigned int i;
+
+ /* Zero-out rx packet memory */
+ memset(pdata->loopback_rx_pkt, 0, MIN_PACKET_SIZE);
+
+ /* Write tx packet to 118 */
+ txcmd_a = (u32)((ulong)pdata->loopback_tx_pkt & 0x03) << 16;
+ txcmd_a |= TX_CMD_A_FIRST_SEG_ | TX_CMD_A_LAST_SEG_;
+ txcmd_a |= MIN_PACKET_SIZE;
+
+ txcmd_b = MIN_PACKET_SIZE << 16 | MIN_PACKET_SIZE;
+
+ smsc911x_reg_write(pdata, TX_DATA_FIFO, txcmd_a);
+ smsc911x_reg_write(pdata, TX_DATA_FIFO, txcmd_b);
+
+ bufp = (ulong)pdata->loopback_tx_pkt & (~0x3);
+ wrsz = MIN_PACKET_SIZE + 3;
+ wrsz += (u32)((ulong)pdata->loopback_tx_pkt & 0x3);
+ wrsz >>= 2;
+
+ pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
+
+ /* Wait till transmit is done */
+ i = 60;
+ do {
+ udelay(5);
+ status = smsc911x_tx_get_txstatus(pdata);
+ } while ((i--) && (!status));
+
+ if (!status) {
+ SMSC_WARN(pdata, hw,
+ "Failed to transmit during loopback test");
+ continue;
+ }
+ if (status & TX_STS_ES_) {
+ SMSC_WARN(pdata, hw,
+ "Transmit encountered errors during loopback test");
+ continue;
+ }
+
+ /* Wait till receive is done */
+ i = 60;
+ do {
+ udelay(5);
+ status = smsc911x_rx_get_rxstatus(pdata);
+ } while ((i--) && (!status));
+
+ if (!status) {
+ SMSC_WARN(pdata, hw,
+ "Failed to receive during loopback test");
+ continue;
+ }
+ if (status & RX_STS_ES_) {
+ SMSC_WARN(pdata, hw,
+ "Receive encountered errors during loopback test");
+ continue;
+ }
+
+ pktlength = ((status & 0x3FFF0000UL) >> 16);
+ bufp = (ulong)pdata->loopback_rx_pkt;
+ rdsz = pktlength + 3;
+ rdsz += (u32)((ulong)pdata->loopback_rx_pkt & 0x3);
+ rdsz >>= 2;
+
+ pdata->ops->rx_readfifo(pdata, (unsigned int *)bufp, rdsz);
+
+ if (pktlength != (MIN_PACKET_SIZE + 4)) {
+ SMSC_WARN(pdata, hw, "Unexpected packet size "
+ "during loop back test, size=%d, will retry",
+ pktlength);
+ } else {
+ unsigned int j;
+ int mismatch = 0;
+ for (j = 0; j < MIN_PACKET_SIZE; j++) {
+ if (pdata->loopback_tx_pkt[j]
+ != pdata->loopback_rx_pkt[j]) {
+ mismatch = 1;
+ break;
+ }
+ }
+ if (!mismatch) {
+ SMSC_TRACE(pdata, hw, "Successfully verified "
+ "loopback packet");
+ return 0;
+ } else {
+ SMSC_WARN(pdata, hw, "Data mismatch "
+ "during loop back test, will retry");
+ }
+ }
+ }
+
+ return -EIO;
+}
+
+static int smsc911x_phy_reset(struct smsc911x_data *pdata)
+{
+ struct phy_device *phy_dev = pdata->phy_dev;
+ unsigned int temp;
+ unsigned int i = 100000;
+
+ BUG_ON(!phy_dev);
+ BUG_ON(!phy_dev->bus);
+
+ SMSC_TRACE(pdata, hw, "Performing PHY BCR Reset");
+ smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, BMCR_RESET);
+ do {
+ msleep(1);
+ temp = smsc911x_mii_read(phy_dev->bus, phy_dev->addr,
+ MII_BMCR);
+ } while ((i--) && (temp & BMCR_RESET));
+
+ if (temp & BMCR_RESET) {
+ SMSC_WARN(pdata, hw, "PHY reset failed to complete");
+ return -EIO;
+ }
+ /* Extra delay required because the phy may not be completed with
+ * its reset when BMCR_RESET is cleared. Specs say 256 uS is
+ * enough delay but using 1ms here to be safe */
+ msleep(1);
+
+ return 0;
+}
+
+static int smsc911x_phy_loopbacktest(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ struct phy_device *phy_dev = pdata->phy_dev;
+ int result = -EIO;
+ unsigned int i, val;
+ unsigned long flags;
+
+ /* Initialise tx packet using broadcast destination address */
+ memset(pdata->loopback_tx_pkt, 0xff, ETH_ALEN);
+
+ /* Use incrementing source address */
+ for (i = 6; i < 12; i++)
+ pdata->loopback_tx_pkt[i] = (char)i;
+
+ /* Set length type field */
+ pdata->loopback_tx_pkt[12] = 0x00;
+ pdata->loopback_tx_pkt[13] = 0x00;
+
+ for (i = 14; i < MIN_PACKET_SIZE; i++)
+ pdata->loopback_tx_pkt[i] = (char)i;
+
+ val = smsc911x_reg_read(pdata, HW_CFG);
+ val &= HW_CFG_TX_FIF_SZ_;
+ val |= HW_CFG_SF_;
+ smsc911x_reg_write(pdata, HW_CFG, val);
+
+ smsc911x_reg_write(pdata, TX_CFG, TX_CFG_TX_ON_);
+ smsc911x_reg_write(pdata, RX_CFG,
+ (u32)((ulong)pdata->loopback_rx_pkt & 0x03) << 8);
+
+ for (i = 0; i < 10; i++) {
+ /* Set PHY to 10/FD, no ANEG, and loopback mode */
+ smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR,
+ BMCR_LOOPBACK | BMCR_FULLDPLX);
+
+ /* Enable MAC tx/rx, FD */
+ spin_lock_irqsave(&pdata->mac_lock, flags);
+ smsc911x_mac_write(pdata, MAC_CR, MAC_CR_FDPX_
+ | MAC_CR_TXEN_ | MAC_CR_RXEN_);
+ spin_unlock_irqrestore(&pdata->mac_lock, flags);
+
+ if (smsc911x_phy_check_loopbackpkt(pdata) == 0) {
+ result = 0;
+ break;
+ }
+ pdata->resetcount++;
+
+ /* Disable MAC rx */
+ spin_lock_irqsave(&pdata->mac_lock, flags);
+ smsc911x_mac_write(pdata, MAC_CR, 0);
+ spin_unlock_irqrestore(&pdata->mac_lock, flags);
+
+ smsc911x_phy_reset(pdata);
+ }
+
+ /* Disable MAC */
+ spin_lock_irqsave(&pdata->mac_lock, flags);
+ smsc911x_mac_write(pdata, MAC_CR, 0);
+ spin_unlock_irqrestore(&pdata->mac_lock, flags);
+
+ /* Cancel PHY loopback mode */
+ smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, 0);
+
+ smsc911x_reg_write(pdata, TX_CFG, 0);
+ smsc911x_reg_write(pdata, RX_CFG, 0);
+
+ return result;
+}
+#endif /* USE_PHY_WORK_AROUND */
+
+static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata)
+{
+ struct phy_device *phy_dev = pdata->phy_dev;
+ u32 afc = smsc911x_reg_read(pdata, AFC_CFG);
+ u32 flow;
+ unsigned long flags;
+
+ if (phy_dev->duplex == DUPLEX_FULL) {
+ u16 lcladv = phy_read(phy_dev, MII_ADVERTISE);
+ u16 rmtadv = phy_read(phy_dev, MII_LPA);
+ u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+
+ if (cap & FLOW_CTRL_RX)
+ flow = 0xFFFF0002;
+ else
+ flow = 0;
+
+ if (cap & FLOW_CTRL_TX)
+ afc |= 0xF;
+ else
+ afc &= ~0xF;
+
+ SMSC_TRACE(pdata, hw, "rx pause %s, tx pause %s",
+ (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
+ (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
+ } else {
+ SMSC_TRACE(pdata, hw, "half duplex");
+ flow = 0;
+ afc |= 0xF;
+ }
+
+ spin_lock_irqsave(&pdata->mac_lock, flags);
+ smsc911x_mac_write(pdata, FLOW, flow);
+ spin_unlock_irqrestore(&pdata->mac_lock, flags);
+
+ smsc911x_reg_write(pdata, AFC_CFG, afc);
+}
+
+/* Update link mode if anything has changed. Called periodically when the
+ * PHY is in polling mode, even if nothing has changed. */
+static void smsc911x_phy_adjust_link(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ struct phy_device *phy_dev = pdata->phy_dev;
+ unsigned long flags;
+ int carrier;
+
+ if (phy_dev->duplex != pdata->last_duplex) {
+ unsigned int mac_cr;
+ SMSC_TRACE(pdata, hw, "duplex state has changed");
+
+ spin_lock_irqsave(&pdata->mac_lock, flags);
+ mac_cr = smsc911x_mac_read(pdata, MAC_CR);
+ if (phy_dev->duplex) {
+ SMSC_TRACE(pdata, hw,
+ "configuring for full duplex mode");
+ mac_cr |= MAC_CR_FDPX_;
+ } else {
+ SMSC_TRACE(pdata, hw,
+ "configuring for half duplex mode");
+ mac_cr &= ~MAC_CR_FDPX_;
+ }
+ smsc911x_mac_write(pdata, MAC_CR, mac_cr);
+ spin_unlock_irqrestore(&pdata->mac_lock, flags);
+
+ smsc911x_phy_update_flowcontrol(pdata);
+ pdata->last_duplex = phy_dev->duplex;
+ }
+
+ carrier = netif_carrier_ok(dev);
+ if (carrier != pdata->last_carrier) {
+ SMSC_TRACE(pdata, hw, "carrier state has changed");
+ if (carrier) {
+ SMSC_TRACE(pdata, hw, "configuring for carrier OK");
+ if ((pdata->gpio_orig_setting & GPIO_CFG_LED1_EN_) &&
+ (!pdata->using_extphy)) {
+ /* Restore original GPIO configuration */
+ pdata->gpio_setting = pdata->gpio_orig_setting;
+ smsc911x_reg_write(pdata, GPIO_CFG,
+ pdata->gpio_setting);
+ }
+ } else {
+ SMSC_TRACE(pdata, hw, "configuring for no carrier");
+ /* Check global setting that LED1
+ * usage is 10/100 indicator */
+ pdata->gpio_setting = smsc911x_reg_read(pdata,
+ GPIO_CFG);
+ if ((pdata->gpio_setting & GPIO_CFG_LED1_EN_) &&
+ (!pdata->using_extphy)) {
+ /* Force 10/100 LED off, after saving
+ * original GPIO configuration */
+ pdata->gpio_orig_setting = pdata->gpio_setting;
+
+ pdata->gpio_setting &= ~GPIO_CFG_LED1_EN_;
+ pdata->gpio_setting |= (GPIO_CFG_GPIOBUF0_
+ | GPIO_CFG_GPIODIR0_
+ | GPIO_CFG_GPIOD0_);
+ smsc911x_reg_write(pdata, GPIO_CFG,
+ pdata->gpio_setting);
+ }
+ }
+ pdata->last_carrier = carrier;
+ }
+}
+
+static int smsc911x_mii_probe(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ struct phy_device *phydev = NULL;
+ int ret;
+
+ /* find the first phy */
+ phydev = phy_find_first(pdata->mii_bus);
+ if (!phydev) {
+ netdev_err(dev, "no PHY found\n");
+ return -ENODEV;
+ }
+
+ SMSC_TRACE(pdata, probe, "PHY: addr %d, phy_id 0x%08X",
+ phydev->addr, phydev->phy_id);
+
+ ret = phy_connect_direct(dev, phydev,
+ &smsc911x_phy_adjust_link, 0,
+ pdata->config.phy_interface);
+
+ if (ret) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ return ret;
+ }
+
+ netdev_info(dev,
+ "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
+
+ /* mask with MAC supported features */
+ phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ phydev->advertising = phydev->supported;
+
+ pdata->phy_dev = phydev;
+ pdata->last_duplex = -1;
+ pdata->last_carrier = -1;
+
+#ifdef USE_PHY_WORK_AROUND
+ if (smsc911x_phy_loopbacktest(dev) < 0) {
+ SMSC_WARN(pdata, hw, "Failed Loop Back Test");
+ return -ENODEV;
+ }
+ SMSC_TRACE(pdata, hw, "Passed Loop Back Test");
+#endif /* USE_PHY_WORK_AROUND */
+
+ SMSC_TRACE(pdata, hw, "phy initialised successfully");
+ return 0;
+}
+
+static int __devinit smsc911x_mii_init(struct platform_device *pdev,
+ struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ int err = -ENXIO, i;
+
+ pdata->mii_bus = mdiobus_alloc();
+ if (!pdata->mii_bus) {
+ err = -ENOMEM;
+ goto err_out_1;
+ }
+
+ pdata->mii_bus->name = SMSC_MDIONAME;
+ snprintf(pdata->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+ pdata->mii_bus->priv = pdata;
+ pdata->mii_bus->read = smsc911x_mii_read;
+ pdata->mii_bus->write = smsc911x_mii_write;
+ pdata->mii_bus->irq = pdata->phy_irq;
+ for (i = 0; i < PHY_MAX_ADDR; ++i)
+ pdata->mii_bus->irq[i] = PHY_POLL;
+
+ pdata->mii_bus->parent = &pdev->dev;
+
+ switch (pdata->idrev & 0xFFFF0000) {
+ case 0x01170000:
+ case 0x01150000:
+ case 0x117A0000:
+ case 0x115A0000:
+ /* External PHY supported, try to autodetect */
+ smsc911x_phy_initialise_external(pdata);
+ break;
+ default:
+ SMSC_TRACE(pdata, hw, "External PHY is not supported, "
+ "using internal PHY");
+ pdata->using_extphy = 0;
+ break;
+ }
+
+ if (!pdata->using_extphy) {
+ /* Mask all PHYs except ID 1 (internal) */
+ pdata->mii_bus->phy_mask = ~(1 << 1);
+ }
+
+ if (mdiobus_register(pdata->mii_bus)) {
+ SMSC_WARN(pdata, probe, "Error registering mii bus");
+ goto err_out_free_bus_2;
+ }
+
+ if (smsc911x_mii_probe(dev) < 0) {
+ SMSC_WARN(pdata, probe, "Error registering mii bus");
+ goto err_out_unregister_bus_3;
+ }
+
+ return 0;
+
+err_out_unregister_bus_3:
+ mdiobus_unregister(pdata->mii_bus);
+err_out_free_bus_2:
+ mdiobus_free(pdata->mii_bus);
+err_out_1:
+ return err;
+}
+
+/* Gets the number of tx statuses in the fifo */
+static unsigned int smsc911x_tx_get_txstatcount(struct smsc911x_data *pdata)
+{
+ return (smsc911x_reg_read(pdata, TX_FIFO_INF)
+ & TX_FIFO_INF_TSUSED_) >> 16;
+}
+
+/* Reads tx statuses and increments counters where necessary */
+static void smsc911x_tx_update_txcounters(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ unsigned int tx_stat;
+
+ while ((tx_stat = smsc911x_tx_get_txstatus(pdata)) != 0) {
+ if (unlikely(tx_stat & 0x80000000)) {
+ /* In this driver the packet tag is used as the packet
+ * length. Since a packet length can never reach the
+ * size of 0x8000, this bit is reserved. It is worth
+ * noting that the "reserved bit" in the warning above
+ * does not reference a hardware defined reserved bit
+ * but rather a driver defined one.
+ */
+ SMSC_WARN(pdata, hw, "Packet tag reserved bit is high");
+ } else {
+ if (unlikely(tx_stat & TX_STS_ES_)) {
+ dev->stats.tx_errors++;
+ } else {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += (tx_stat >> 16);
+ }
+ if (unlikely(tx_stat & TX_STS_EXCESS_COL_)) {
+ dev->stats.collisions += 16;
+ dev->stats.tx_aborted_errors += 1;
+ } else {
+ dev->stats.collisions +=
+ ((tx_stat >> 3) & 0xF);
+ }
+ if (unlikely(tx_stat & TX_STS_LOST_CARRIER_))
+ dev->stats.tx_carrier_errors += 1;
+ if (unlikely(tx_stat & TX_STS_LATE_COL_)) {
+ dev->stats.collisions++;
+ dev->stats.tx_aborted_errors++;
+ }
+ }
+ }
+}
+
+/* Increments the Rx error counters */
+static void
+smsc911x_rx_counterrors(struct net_device *dev, unsigned int rxstat)
+{
+ int crc_err = 0;
+
+ if (unlikely(rxstat & RX_STS_ES_)) {
+ dev->stats.rx_errors++;
+ if (unlikely(rxstat & RX_STS_CRC_ERR_)) {
+ dev->stats.rx_crc_errors++;
+ crc_err = 1;
+ }
+ }
+ if (likely(!crc_err)) {
+ if (unlikely((rxstat & RX_STS_FRAME_TYPE_) &&
+ (rxstat & RX_STS_LENGTH_ERR_)))
+ dev->stats.rx_length_errors++;
+ if (rxstat & RX_STS_MCAST_)
+ dev->stats.multicast++;
+ }
+}
+
+/* Quickly dumps bad packets */
+static void
+smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes)
+{
+ unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2;
+
+ if (likely(pktwords >= 4)) {
+ unsigned int timeout = 500;
+ unsigned int val;
+ smsc911x_reg_write(pdata, RX_DP_CTRL, RX_DP_CTRL_RX_FFWD_);
+ do {
+ udelay(1);
+ val = smsc911x_reg_read(pdata, RX_DP_CTRL);
+ } while ((val & RX_DP_CTRL_RX_FFWD_) && --timeout);
+
+ if (unlikely(timeout == 0))
+ SMSC_WARN(pdata, hw, "Timed out waiting for "
+ "RX FFWD to finish, RX_DP_CTRL: 0x%08X", val);
+ } else {
+ unsigned int temp;
+ while (pktwords--)
+ temp = smsc911x_reg_read(pdata, RX_DATA_FIFO);
+ }
+}
+
+/* NAPI poll function */
+static int smsc911x_poll(struct napi_struct *napi, int budget)
+{
+ struct smsc911x_data *pdata =
+ container_of(napi, struct smsc911x_data, napi);
+ struct net_device *dev = pdata->dev;
+ int npackets = 0;
+
+ while (npackets < budget) {
+ unsigned int pktlength;
+ unsigned int pktwords;
+ struct sk_buff *skb;
+ unsigned int rxstat = smsc911x_rx_get_rxstatus(pdata);
+
+ if (!rxstat) {
+ unsigned int temp;
+ /* We processed all packets available. Tell NAPI it can
+ * stop polling then re-enable rx interrupts */
+ smsc911x_reg_write(pdata, INT_STS, INT_STS_RSFL_);
+ napi_complete(napi);
+ temp = smsc911x_reg_read(pdata, INT_EN);
+ temp |= INT_EN_RSFL_EN_;
+ smsc911x_reg_write(pdata, INT_EN, temp);
+ break;
+ }
+
+ /* Count packet for NAPI scheduling, even if it has an error.
+ * Error packets still require cycles to discard */
+ npackets++;
+
+ pktlength = ((rxstat & 0x3FFF0000) >> 16);
+ pktwords = (pktlength + NET_IP_ALIGN + 3) >> 2;
+ smsc911x_rx_counterrors(dev, rxstat);
+
+ if (unlikely(rxstat & RX_STS_ES_)) {
+ SMSC_WARN(pdata, rx_err,
+ "Discarding packet with error bit set");
+ /* Packet has an error, discard it and continue with
+ * the next */
+ smsc911x_rx_fastforward(pdata, pktwords);
+ dev->stats.rx_dropped++;
+ continue;
+ }
+
+ skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN);
+ if (unlikely(!skb)) {
+ SMSC_WARN(pdata, rx_err,
+ "Unable to allocate skb for rx packet");
+ /* Drop the packet and stop this polling iteration */
+ smsc911x_rx_fastforward(pdata, pktwords);
+ dev->stats.rx_dropped++;
+ break;
+ }
+
+ skb->data = skb->head;
+ skb_reset_tail_pointer(skb);
+
+ /* Align IP on 16B boundary */
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb_put(skb, pktlength - 4);
+ pdata->ops->rx_readfifo(pdata,
+ (unsigned int *)skb->head, pktwords);
+ skb->protocol = eth_type_trans(skb, dev);
+ skb_checksum_none_assert(skb);
+ netif_receive_skb(skb);
+
+ /* Update counters */
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += (pktlength - 4);
+ }
+
+ /* Return total received packets */
+ return npackets;
+}
+
+/* Returns hash bit number for given MAC address
+ * Example:
+ * 01 00 5E 00 00 01 -> returns bit number 31 */
+static unsigned int smsc911x_hash(char addr[ETH_ALEN])
+{
+ return (ether_crc(ETH_ALEN, addr) >> 26) & 0x3f;
+}
+
+static void smsc911x_rx_multicast_update(struct smsc911x_data *pdata)
+{
+ /* Performs the multicast & mac_cr update. This is called when
+ * safe on the current hardware, and with the mac_lock held */
+ unsigned int mac_cr;
+
+ SMSC_ASSERT_MAC_LOCK(pdata);
+
+ mac_cr = smsc911x_mac_read(pdata, MAC_CR);
+ mac_cr |= pdata->set_bits_mask;
+ mac_cr &= ~(pdata->clear_bits_mask);
+ smsc911x_mac_write(pdata, MAC_CR, mac_cr);
+ smsc911x_mac_write(pdata, HASHH, pdata->hashhi);
+ smsc911x_mac_write(pdata, HASHL, pdata->hashlo);
+ SMSC_TRACE(pdata, hw, "maccr 0x%08X, HASHH 0x%08X, HASHL 0x%08X",
+ mac_cr, pdata->hashhi, pdata->hashlo);
+}
+
+static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
+{
+ unsigned int mac_cr;
+
+ /* This function is only called for older LAN911x devices
+ * (revA or revB), where MAC_CR, HASHH and HASHL should not
+ * be modified during Rx - newer devices immediately update the
+ * registers.
+ *
+ * This is called from interrupt context */
+
+ spin_lock(&pdata->mac_lock);
+
+ /* Check Rx has stopped */
+ if (smsc911x_mac_read(pdata, MAC_CR) & MAC_CR_RXEN_)
+ SMSC_WARN(pdata, drv, "Rx not stopped");
+
+ /* Perform the update - safe to do now Rx has stopped */
+ smsc911x_rx_multicast_update(pdata);
+
+ /* Re-enable Rx */
+ mac_cr = smsc911x_mac_read(pdata, MAC_CR);
+ mac_cr |= MAC_CR_RXEN_;
+ smsc911x_mac_write(pdata, MAC_CR, mac_cr);
+
+ pdata->multicast_update_pending = 0;
+
+ spin_unlock(&pdata->mac_lock);
+}
+
+static int smsc911x_soft_reset(struct smsc911x_data *pdata)
+{
+ unsigned int timeout;
+ unsigned int temp;
+
+ /* Reset the LAN911x */
+ smsc911x_reg_write(pdata, HW_CFG, HW_CFG_SRST_);
+ timeout = 10;
+ do {
+ udelay(10);
+ temp = smsc911x_reg_read(pdata, HW_CFG);
+ } while ((--timeout) && (temp & HW_CFG_SRST_));
+
+ if (unlikely(temp & HW_CFG_SRST_)) {
+ SMSC_WARN(pdata, drv, "Failed to complete reset");
+ return -EIO;
+ }
+ return 0;
+}
+
+/* Sets the device MAC address to dev_addr, called with mac_lock held */
+static void
+smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, u8 dev_addr[6])
+{
+ u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4];
+ u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
+ (dev_addr[1] << 8) | dev_addr[0];
+
+ SMSC_ASSERT_MAC_LOCK(pdata);
+
+ smsc911x_mac_write(pdata, ADDRH, mac_high16);
+ smsc911x_mac_write(pdata, ADDRL, mac_low32);
+}
+
+static int smsc911x_open(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ unsigned int timeout;
+ unsigned int temp;
+ unsigned int intcfg;
+
+ /* if the phy is not yet registered, retry later*/
+ if (!pdata->phy_dev) {
+ SMSC_WARN(pdata, hw, "phy_dev is NULL");
+ return -EAGAIN;
+ }
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ SMSC_WARN(pdata, hw, "dev_addr is not a valid MAC address");
+ return -EADDRNOTAVAIL;
+ }
+
+ /* Reset the LAN911x */
+ if (smsc911x_soft_reset(pdata)) {
+ SMSC_WARN(pdata, hw, "soft reset failed");
+ return -EIO;
+ }
+
+ smsc911x_reg_write(pdata, HW_CFG, 0x00050000);
+ smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740);
+
+ /* Increase the legal frame size of VLAN tagged frames to 1522 bytes */
+ spin_lock_irq(&pdata->mac_lock);
+ smsc911x_mac_write(pdata, VLAN1, ETH_P_8021Q);
+ spin_unlock_irq(&pdata->mac_lock);
+
+ /* Make sure EEPROM has finished loading before setting GPIO_CFG */
+ timeout = 50;
+ while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) &&
+ --timeout) {
+ udelay(10);
+ }
+
+ if (unlikely(timeout == 0))
+ SMSC_WARN(pdata, ifup,
+ "Timed out waiting for EEPROM busy bit to clear");
+
+ smsc911x_reg_write(pdata, GPIO_CFG, 0x70070000);
+
+ /* The soft reset above cleared the device's MAC address,
+ * restore it from local copy (set in probe) */
+ spin_lock_irq(&pdata->mac_lock);
+ smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
+ spin_unlock_irq(&pdata->mac_lock);
+
+ /* Initialise irqs, but leave all sources disabled */
+ smsc911x_reg_write(pdata, INT_EN, 0);
+ smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
+
+ /* Set interrupt deassertion to 100uS */
+ intcfg = ((10 << 24) | INT_CFG_IRQ_EN_);
+
+ if (pdata->config.irq_polarity) {
+ SMSC_TRACE(pdata, ifup, "irq polarity: active high");
+ intcfg |= INT_CFG_IRQ_POL_;
+ } else {
+ SMSC_TRACE(pdata, ifup, "irq polarity: active low");
+ }
+
+ if (pdata->config.irq_type) {
+ SMSC_TRACE(pdata, ifup, "irq type: push-pull");
+ intcfg |= INT_CFG_IRQ_TYPE_;
+ } else {
+ SMSC_TRACE(pdata, ifup, "irq type: open drain");
+ }
+
+ smsc911x_reg_write(pdata, INT_CFG, intcfg);
+
+ SMSC_TRACE(pdata, ifup, "Testing irq handler using IRQ %d", dev->irq);
+ pdata->software_irq_signal = 0;
+ smp_wmb();
+
+ temp = smsc911x_reg_read(pdata, INT_EN);
+ temp |= INT_EN_SW_INT_EN_;
+ smsc911x_reg_write(pdata, INT_EN, temp);
+
+ timeout = 1000;
+ while (timeout--) {
+ if (pdata->software_irq_signal)
+ break;
+ msleep(1);
+ }
+
+ if (!pdata->software_irq_signal) {
+ netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n",
+ dev->irq);
+ return -ENODEV;
+ }
+ SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d",
+ dev->irq);
+
+ netdev_info(dev, "SMSC911x/921x identified at %#08lx, IRQ: %d\n",
+ (unsigned long)pdata->ioaddr, dev->irq);
+
+ /* Reset the last known duplex and carrier */
+ pdata->last_duplex = -1;
+ pdata->last_carrier = -1;
+
+ /* Bring the PHY up */
+ phy_start(pdata->phy_dev);
+
+ temp = smsc911x_reg_read(pdata, HW_CFG);
+ /* Preserve TX FIFO size and external PHY configuration */
+ temp &= (HW_CFG_TX_FIF_SZ_|0x00000FFF);
+ temp |= HW_CFG_SF_;
+ smsc911x_reg_write(pdata, HW_CFG, temp);
+
+ temp = smsc911x_reg_read(pdata, FIFO_INT);
+ temp |= FIFO_INT_TX_AVAIL_LEVEL_;
+ temp &= ~(FIFO_INT_RX_STS_LEVEL_);
+ smsc911x_reg_write(pdata, FIFO_INT, temp);
+
+ /* set RX Data offset to 2 bytes for alignment */
+ smsc911x_reg_write(pdata, RX_CFG, (2 << 8));
+
+ /* enable NAPI polling before enabling RX interrupts */
+ napi_enable(&pdata->napi);
+
+ temp = smsc911x_reg_read(pdata, INT_EN);
+ temp |= (INT_EN_TDFA_EN_ | INT_EN_RSFL_EN_ | INT_EN_RXSTOP_INT_EN_);
+ smsc911x_reg_write(pdata, INT_EN, temp);
+
+ spin_lock_irq(&pdata->mac_lock);
+ temp = smsc911x_mac_read(pdata, MAC_CR);
+ temp |= (MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_);
+ smsc911x_mac_write(pdata, MAC_CR, temp);
+ spin_unlock_irq(&pdata->mac_lock);
+
+ smsc911x_reg_write(pdata, TX_CFG, TX_CFG_TX_ON_);
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+/* Entry point for stopping the interface */
+static int smsc911x_stop(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ unsigned int temp;
+
+ /* Disable all device interrupts */
+ temp = smsc911x_reg_read(pdata, INT_CFG);
+ temp &= ~INT_CFG_IRQ_EN_;
+ smsc911x_reg_write(pdata, INT_CFG, temp);
+
+ /* Stop Tx and Rx polling */
+ netif_stop_queue(dev);
+ napi_disable(&pdata->napi);
+
+ /* At this point all Rx and Tx activity is stopped */
+ dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP);
+ smsc911x_tx_update_txcounters(dev);
+
+ /* Bring the PHY down */
+ if (pdata->phy_dev)
+ phy_stop(pdata->phy_dev);
+
+ SMSC_TRACE(pdata, ifdown, "Interface stopped");
+ return 0;
+}
+
+/* Entry point for transmitting a packet */
+static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ unsigned int freespace;
+ unsigned int tx_cmd_a;
+ unsigned int tx_cmd_b;
+ unsigned int temp;
+ u32 wrsz;
+ ulong bufp;
+
+ freespace = smsc911x_reg_read(pdata, TX_FIFO_INF) & TX_FIFO_INF_TDFREE_;
+
+ if (unlikely(freespace < TX_FIFO_LOW_THRESHOLD))
+ SMSC_WARN(pdata, tx_err,
+ "Tx data fifo low, space available: %d", freespace);
+
+ /* Word alignment adjustment */
+ tx_cmd_a = (u32)((ulong)skb->data & 0x03) << 16;
+ tx_cmd_a |= TX_CMD_A_FIRST_SEG_ | TX_CMD_A_LAST_SEG_;
+ tx_cmd_a |= (unsigned int)skb->len;
+
+ tx_cmd_b = ((unsigned int)skb->len) << 16;
+ tx_cmd_b |= (unsigned int)skb->len;
+
+ smsc911x_reg_write(pdata, TX_DATA_FIFO, tx_cmd_a);
+ smsc911x_reg_write(pdata, TX_DATA_FIFO, tx_cmd_b);
+
+ bufp = (ulong)skb->data & (~0x3);
+ wrsz = (u32)skb->len + 3;
+ wrsz += (u32)((ulong)skb->data & 0x3);
+ wrsz >>= 2;
+
+ pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
+ freespace -= (skb->len + 32);
+ skb_tx_timestamp(skb);
+ dev_kfree_skb(skb);
+
+ if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30))
+ smsc911x_tx_update_txcounters(dev);
+
+ if (freespace < TX_FIFO_LOW_THRESHOLD) {
+ netif_stop_queue(dev);
+ temp = smsc911x_reg_read(pdata, FIFO_INT);
+ temp &= 0x00FFFFFF;
+ temp |= 0x32000000;
+ smsc911x_reg_write(pdata, FIFO_INT, temp);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+/* Entry point for getting status counters */
+static struct net_device_stats *smsc911x_get_stats(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ smsc911x_tx_update_txcounters(dev);
+ dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP);
+ return &dev->stats;
+}
+
+/* Entry point for setting addressing modes */
+static void smsc911x_set_multicast_list(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ unsigned long flags;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Enabling promiscuous mode */
+ pdata->set_bits_mask = MAC_CR_PRMS_;
+ pdata->clear_bits_mask = (MAC_CR_MCPAS_ | MAC_CR_HPFILT_);
+ pdata->hashhi = 0;
+ pdata->hashlo = 0;
+ } else if (dev->flags & IFF_ALLMULTI) {
+ /* Enabling all multicast mode */
+ pdata->set_bits_mask = MAC_CR_MCPAS_;
+ pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_HPFILT_);
+ pdata->hashhi = 0;
+ pdata->hashlo = 0;
+ } else if (!netdev_mc_empty(dev)) {
+ /* Enabling specific multicast addresses */
+ unsigned int hash_high = 0;
+ unsigned int hash_low = 0;
+ struct netdev_hw_addr *ha;
+
+ pdata->set_bits_mask = MAC_CR_HPFILT_;
+ pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned int bitnum = smsc911x_hash(ha->addr);
+ unsigned int mask = 0x01 << (bitnum & 0x1F);
+
+ if (bitnum & 0x20)
+ hash_high |= mask;
+ else
+ hash_low |= mask;
+ }
+
+ pdata->hashhi = hash_high;
+ pdata->hashlo = hash_low;
+ } else {
+ /* Enabling local MAC address only */
+ pdata->set_bits_mask = 0;
+ pdata->clear_bits_mask =
+ (MAC_CR_PRMS_ | MAC_CR_MCPAS_ | MAC_CR_HPFILT_);
+ pdata->hashhi = 0;
+ pdata->hashlo = 0;
+ }
+
+ spin_lock_irqsave(&pdata->mac_lock, flags);
+
+ if (pdata->generation <= 1) {
+ /* Older hardware revision - cannot change these flags while
+ * receiving data */
+ if (!pdata->multicast_update_pending) {
+ unsigned int temp;
+ SMSC_TRACE(pdata, hw, "scheduling mcast update");
+ pdata->multicast_update_pending = 1;
+
+ /* Request the hardware to stop, then perform the
+ * update when we get an RX_STOP interrupt */
+ temp = smsc911x_mac_read(pdata, MAC_CR);
+ temp &= ~(MAC_CR_RXEN_);
+ smsc911x_mac_write(pdata, MAC_CR, temp);
+ } else {
+ /* There is another update pending, this should now
+ * use the newer values */
+ }
+ } else {
+ /* Newer hardware revision - can write immediately */
+ smsc911x_rx_multicast_update(pdata);
+ }
+
+ spin_unlock_irqrestore(&pdata->mac_lock, flags);
+}
+
+static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ u32 intsts = smsc911x_reg_read(pdata, INT_STS);
+ u32 inten = smsc911x_reg_read(pdata, INT_EN);
+ int serviced = IRQ_NONE;
+ u32 temp;
+
+ if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
+ temp = smsc911x_reg_read(pdata, INT_EN);
+ temp &= (~INT_EN_SW_INT_EN_);
+ smsc911x_reg_write(pdata, INT_EN, temp);
+ smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
+ pdata->software_irq_signal = 1;
+ smp_wmb();
+ serviced = IRQ_HANDLED;
+ }
+
+ if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
+ /* Called when there is a multicast update scheduled and
+ * it is now safe to complete the update */
+ SMSC_TRACE(pdata, intr, "RX Stop interrupt");
+ smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
+ if (pdata->multicast_update_pending)
+ smsc911x_rx_multicast_update_workaround(pdata);
+ serviced = IRQ_HANDLED;
+ }
+
+ if (intsts & inten & INT_STS_TDFA_) {
+ temp = smsc911x_reg_read(pdata, FIFO_INT);
+ temp |= FIFO_INT_TX_AVAIL_LEVEL_;
+ smsc911x_reg_write(pdata, FIFO_INT, temp);
+ smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
+ netif_wake_queue(dev);
+ serviced = IRQ_HANDLED;
+ }
+
+ if (unlikely(intsts & inten & INT_STS_RXE_)) {
+ SMSC_TRACE(pdata, intr, "RX Error interrupt");
+ smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
+ serviced = IRQ_HANDLED;
+ }
+
+ if (likely(intsts & inten & INT_STS_RSFL_)) {
+ if (likely(napi_schedule_prep(&pdata->napi))) {
+ /* Disable Rx interrupts */
+ temp = smsc911x_reg_read(pdata, INT_EN);
+ temp &= (~INT_EN_RSFL_EN_);
+ smsc911x_reg_write(pdata, INT_EN, temp);
+ /* Schedule a NAPI poll */
+ __napi_schedule(&pdata->napi);
+ } else {
+ SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
+ }
+ serviced = IRQ_HANDLED;
+ }
+
+ return serviced;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void smsc911x_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ smsc911x_irqhandler(0, dev);
+ enable_irq(dev->irq);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+static int smsc911x_set_mac_address(struct net_device *dev, void *p)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ struct sockaddr *addr = p;
+
+ /* On older hardware revisions we cannot change the mac address
+ * registers while receiving data. Newer devices can safely change
+ * this at any time. */
+ if (pdata->generation <= 1 && netif_running(dev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+
+ spin_lock_irq(&pdata->mac_lock);
+ smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
+ spin_unlock_irq(&pdata->mac_lock);
+
+ netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr);
+
+ return 0;
+}
+
+/* Standard ioctls for mii-tool */
+static int smsc911x_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+
+ if (!netif_running(dev) || !pdata->phy_dev)
+ return -EINVAL;
+
+ return phy_mii_ioctl(pdata->phy_dev, ifr, cmd);
+}
+
+static int
+smsc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+
+ cmd->maxtxpkt = 1;
+ cmd->maxrxpkt = 1;
+ return phy_ethtool_gset(pdata->phy_dev, cmd);
+}
+
+static int
+smsc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+
+ return phy_ethtool_sset(pdata->phy_dev, cmd);
+}
+
+static void smsc911x_ethtool_getdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, SMSC_CHIPNAME, sizeof(info->driver));
+ strlcpy(info->version, SMSC_DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ sizeof(info->bus_info));
+}
+
+static int smsc911x_ethtool_nwayreset(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+
+ return phy_start_aneg(pdata->phy_dev);
+}
+
+static u32 smsc911x_ethtool_getmsglevel(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ return pdata->msg_enable;
+}
+
+static void smsc911x_ethtool_setmsglevel(struct net_device *dev, u32 level)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ pdata->msg_enable = level;
+}
+
+static int smsc911x_ethtool_getregslen(struct net_device *dev)
+{
+ return (((E2P_DATA - ID_REV) / 4 + 1) + (WUCSR - MAC_CR) + 1 + 32) *
+ sizeof(u32);
+}
+
+static void
+smsc911x_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
+ void *buf)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ struct phy_device *phy_dev = pdata->phy_dev;
+ unsigned long flags;
+ unsigned int i;
+ unsigned int j = 0;
+ u32 *data = buf;
+
+ regs->version = pdata->idrev;
+ for (i = ID_REV; i <= E2P_DATA; i += (sizeof(u32)))
+ data[j++] = smsc911x_reg_read(pdata, i);
+
+ for (i = MAC_CR; i <= WUCSR; i++) {
+ spin_lock_irqsave(&pdata->mac_lock, flags);
+ data[j++] = smsc911x_mac_read(pdata, i);
+ spin_unlock_irqrestore(&pdata->mac_lock, flags);
+ }
+
+ for (i = 0; i <= 31; i++)
+ data[j++] = smsc911x_mii_read(phy_dev->bus, phy_dev->addr, i);
+}
+
+static void smsc911x_eeprom_enable_access(struct smsc911x_data *pdata)
+{
+ unsigned int temp = smsc911x_reg_read(pdata, GPIO_CFG);
+ temp &= ~GPIO_CFG_EEPR_EN_;
+ smsc911x_reg_write(pdata, GPIO_CFG, temp);
+ msleep(1);
+}
+
+static int smsc911x_eeprom_send_cmd(struct smsc911x_data *pdata, u32 op)
+{
+ int timeout = 100;
+ u32 e2cmd;
+
+ SMSC_TRACE(pdata, drv, "op 0x%08x", op);
+ if (smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) {
+ SMSC_WARN(pdata, drv, "Busy at start");
+ return -EBUSY;
+ }
+
+ e2cmd = op | E2P_CMD_EPC_BUSY_;
+ smsc911x_reg_write(pdata, E2P_CMD, e2cmd);
+
+ do {
+ msleep(1);
+ e2cmd = smsc911x_reg_read(pdata, E2P_CMD);
+ } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout));
+
+ if (!timeout) {
+ SMSC_TRACE(pdata, drv, "TIMED OUT");
+ return -EAGAIN;
+ }
+
+ if (e2cmd & E2P_CMD_EPC_TIMEOUT_) {
+ SMSC_TRACE(pdata, drv, "Error occurred during eeprom operation");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int smsc911x_eeprom_read_location(struct smsc911x_data *pdata,
+ u8 address, u8 *data)
+{
+ u32 op = E2P_CMD_EPC_CMD_READ_ | address;
+ int ret;
+
+ SMSC_TRACE(pdata, drv, "address 0x%x", address);
+ ret = smsc911x_eeprom_send_cmd(pdata, op);
+
+ if (!ret)
+ data[address] = smsc911x_reg_read(pdata, E2P_DATA);
+
+ return ret;
+}
+
+static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata,
+ u8 address, u8 data)
+{
+ u32 op = E2P_CMD_EPC_CMD_ERASE_ | address;
+ u32 temp;
+ int ret;
+
+ SMSC_TRACE(pdata, drv, "address 0x%x, data 0x%x", address, data);
+ ret = smsc911x_eeprom_send_cmd(pdata, op);
+
+ if (!ret) {
+ op = E2P_CMD_EPC_CMD_WRITE_ | address;
+ smsc911x_reg_write(pdata, E2P_DATA, (u32)data);
+
+ /* Workaround for hardware read-after-write restriction */
+ temp = smsc911x_reg_read(pdata, BYTE_TEST);
+
+ ret = smsc911x_eeprom_send_cmd(pdata, op);
+ }
+
+ return ret;
+}
+
+static int smsc911x_ethtool_get_eeprom_len(struct net_device *dev)
+{
+ return SMSC911X_EEPROM_SIZE;
+}
+
+static int smsc911x_ethtool_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ u8 eeprom_data[SMSC911X_EEPROM_SIZE];
+ int len;
+ int i;
+
+ smsc911x_eeprom_enable_access(pdata);
+
+ len = min(eeprom->len, SMSC911X_EEPROM_SIZE);
+ for (i = 0; i < len; i++) {
+ int ret = smsc911x_eeprom_read_location(pdata, i, eeprom_data);
+ if (ret < 0) {
+ eeprom->len = 0;
+ return ret;
+ }
+ }
+
+ memcpy(data, &eeprom_data[eeprom->offset], len);
+ eeprom->len = len;
+ return 0;
+}
+
+static int smsc911x_ethtool_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ int ret;
+ struct smsc911x_data *pdata = netdev_priv(dev);
+
+ smsc911x_eeprom_enable_access(pdata);
+ smsc911x_eeprom_send_cmd(pdata, E2P_CMD_EPC_CMD_EWEN_);
+ ret = smsc911x_eeprom_write_location(pdata, eeprom->offset, *data);
+ smsc911x_eeprom_send_cmd(pdata, E2P_CMD_EPC_CMD_EWDS_);
+
+ /* Single byte write, according to man page */
+ eeprom->len = 1;
+
+ return ret;
+}
+
+static const struct ethtool_ops smsc911x_ethtool_ops = {
+ .get_settings = smsc911x_ethtool_getsettings,
+ .set_settings = smsc911x_ethtool_setsettings,
+ .get_link = ethtool_op_get_link,
+ .get_drvinfo = smsc911x_ethtool_getdrvinfo,
+ .nway_reset = smsc911x_ethtool_nwayreset,
+ .get_msglevel = smsc911x_ethtool_getmsglevel,
+ .set_msglevel = smsc911x_ethtool_setmsglevel,
+ .get_regs_len = smsc911x_ethtool_getregslen,
+ .get_regs = smsc911x_ethtool_getregs,
+ .get_eeprom_len = smsc911x_ethtool_get_eeprom_len,
+ .get_eeprom = smsc911x_ethtool_get_eeprom,
+ .set_eeprom = smsc911x_ethtool_set_eeprom,
+};
+
+static const struct net_device_ops smsc911x_netdev_ops = {
+ .ndo_open = smsc911x_open,
+ .ndo_stop = smsc911x_stop,
+ .ndo_start_xmit = smsc911x_hard_start_xmit,
+ .ndo_get_stats = smsc911x_get_stats,
+ .ndo_set_rx_mode = smsc911x_set_multicast_list,
+ .ndo_do_ioctl = smsc911x_do_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = smsc911x_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = smsc911x_poll_controller,
+#endif
+};
+
+/* copies the current mac address from hardware to dev->dev_addr */
+static void __devinit smsc911x_read_mac_address(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH);
+ u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL);
+
+ dev->dev_addr[0] = (u8)(mac_low32);
+ dev->dev_addr[1] = (u8)(mac_low32 >> 8);
+ dev->dev_addr[2] = (u8)(mac_low32 >> 16);
+ dev->dev_addr[3] = (u8)(mac_low32 >> 24);
+ dev->dev_addr[4] = (u8)(mac_high16);
+ dev->dev_addr[5] = (u8)(mac_high16 >> 8);
+}
+
+/* Initializing private device structures, only called from probe */
+static int __devinit smsc911x_init(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ unsigned int byte_test;
+
+ SMSC_TRACE(pdata, probe, "Driver Parameters:");
+ SMSC_TRACE(pdata, probe, "LAN base: 0x%08lX",
+ (unsigned long)pdata->ioaddr);
+ SMSC_TRACE(pdata, probe, "IRQ: %d", dev->irq);
+ SMSC_TRACE(pdata, probe, "PHY will be autodetected.");
+
+ spin_lock_init(&pdata->dev_lock);
+ spin_lock_init(&pdata->mac_lock);
+
+ if (pdata->ioaddr == 0) {
+ SMSC_WARN(pdata, probe, "pdata->ioaddr: 0x00000000");
+ return -ENODEV;
+ }
+
+ /* Check byte ordering */
+ byte_test = smsc911x_reg_read(pdata, BYTE_TEST);
+ SMSC_TRACE(pdata, probe, "BYTE_TEST: 0x%08X", byte_test);
+ if (byte_test == 0x43218765) {
+ SMSC_TRACE(pdata, probe, "BYTE_TEST looks swapped, "
+ "applying WORD_SWAP");
+ smsc911x_reg_write(pdata, WORD_SWAP, 0xffffffff);
+
+ /* 1 dummy read of BYTE_TEST is needed after a write to
+ * WORD_SWAP before its contents are valid */
+ byte_test = smsc911x_reg_read(pdata, BYTE_TEST);
+
+ byte_test = smsc911x_reg_read(pdata, BYTE_TEST);
+ }
+
+ if (byte_test != 0x87654321) {
+ SMSC_WARN(pdata, drv, "BYTE_TEST: 0x%08X", byte_test);
+ if (((byte_test >> 16) & 0xFFFF) == (byte_test & 0xFFFF)) {
+ SMSC_WARN(pdata, probe,
+ "top 16 bits equal to bottom 16 bits");
+ SMSC_TRACE(pdata, probe,
+ "This may mean the chip is set "
+ "for 32 bit while the bus is reading 16 bit");
+ }
+ return -ENODEV;
+ }
+
+ /* Default generation to zero (all workarounds apply) */
+ pdata->generation = 0;
+
+ pdata->idrev = smsc911x_reg_read(pdata, ID_REV);
+ switch (pdata->idrev & 0xFFFF0000) {
+ case 0x01180000:
+ case 0x01170000:
+ case 0x01160000:
+ case 0x01150000:
+ /* LAN911[5678] family */
+ pdata->generation = pdata->idrev & 0x0000FFFF;
+ break;
+
+ case 0x118A0000:
+ case 0x117A0000:
+ case 0x116A0000:
+ case 0x115A0000:
+ /* LAN921[5678] family */
+ pdata->generation = 3;
+ break;
+
+ case 0x92100000:
+ case 0x92110000:
+ case 0x92200000:
+ case 0x92210000:
+ /* LAN9210/LAN9211/LAN9220/LAN9221 */
+ pdata->generation = 4;
+ break;
+
+ default:
+ SMSC_WARN(pdata, probe, "LAN911x not identified, idrev: 0x%08X",
+ pdata->idrev);
+ return -ENODEV;
+ }
+
+ SMSC_TRACE(pdata, probe,
+ "LAN911x identified, idrev: 0x%08X, generation: %d",
+ pdata->idrev, pdata->generation);
+
+ if (pdata->generation == 0)
+ SMSC_WARN(pdata, probe,
+ "This driver is not intended for this chip revision");
+
+ /* workaround for platforms without an eeprom, where the mac address
+ * is stored elsewhere and set by the bootloader. This saves the
+ * mac address before resetting the device */
+ if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS) {
+ spin_lock_irq(&pdata->mac_lock);
+ smsc911x_read_mac_address(dev);
+ spin_unlock_irq(&pdata->mac_lock);
+ }
+
+ /* Reset the LAN911x */
+ if (smsc911x_soft_reset(pdata))
+ return -ENODEV;
+
+ /* Disable all interrupt sources until we bring the device up */
+ smsc911x_reg_write(pdata, INT_EN, 0);
+
+ ether_setup(dev);
+ dev->flags |= IFF_MULTICAST;
+ netif_napi_add(dev, &pdata->napi, smsc911x_poll, SMSC_NAPI_WEIGHT);
+ dev->netdev_ops = &smsc911x_netdev_ops;
+ dev->ethtool_ops = &smsc911x_ethtool_ops;
+
+ return 0;
+}
+
+static int __devexit smsc911x_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct smsc911x_data *pdata;
+ struct resource *res;
+
+ dev = platform_get_drvdata(pdev);
+ BUG_ON(!dev);
+ pdata = netdev_priv(dev);
+ BUG_ON(!pdata);
+ BUG_ON(!pdata->ioaddr);
+ BUG_ON(!pdata->phy_dev);
+
+ SMSC_TRACE(pdata, ifdown, "Stopping driver");
+
+ phy_disconnect(pdata->phy_dev);
+ pdata->phy_dev = NULL;
+ mdiobus_unregister(pdata->mii_bus);
+ mdiobus_free(pdata->mii_bus);
+
+ platform_set_drvdata(pdev, NULL);
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "smsc911x-memory");
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ release_mem_region(res->start, resource_size(res));
+
+ iounmap(pdata->ioaddr);
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+/* standard register acces */
+static const struct smsc911x_ops standard_smsc911x_ops = {
+ .reg_read = __smsc911x_reg_read,
+ .reg_write = __smsc911x_reg_write,
+ .rx_readfifo = smsc911x_rx_readfifo,
+ .tx_writefifo = smsc911x_tx_writefifo,
+};
+
+/* shifted register access */
+static const struct smsc911x_ops shifted_smsc911x_ops = {
+ .reg_read = __smsc911x_reg_read_shift,
+ .reg_write = __smsc911x_reg_write_shift,
+ .rx_readfifo = smsc911x_rx_readfifo_shift,
+ .tx_writefifo = smsc911x_tx_writefifo_shift,
+};
+
+#ifdef CONFIG_OF
+static int __devinit smsc911x_probe_config_dt(
+ struct smsc911x_platform_config *config,
+ struct device_node *np)
+{
+ const char *mac;
+ u32 width = 0;
+
+ if (!np)
+ return -ENODEV;
+
+ config->phy_interface = of_get_phy_mode(np);
+
+ mac = of_get_mac_address(np);
+ if (mac)
+ memcpy(config->mac, mac, ETH_ALEN);
+
+ of_property_read_u32(np, "reg-shift", &config->shift);
+
+ of_property_read_u32(np, "reg-io-width", &width);
+ if (width == 4)
+ config->flags |= SMSC911X_USE_32BIT;
+ else
+ config->flags |= SMSC911X_USE_16BIT;
+
+ if (of_get_property(np, "smsc,irq-active-high", NULL))
+ config->irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH;
+
+ if (of_get_property(np, "smsc,irq-push-pull", NULL))
+ config->irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL;
+
+ if (of_get_property(np, "smsc,force-internal-phy", NULL))
+ config->flags |= SMSC911X_FORCE_INTERNAL_PHY;
+
+ if (of_get_property(np, "smsc,force-external-phy", NULL))
+ config->flags |= SMSC911X_FORCE_EXTERNAL_PHY;
+
+ if (of_get_property(np, "smsc,save-mac-address", NULL))
+ config->flags |= SMSC911X_SAVE_MAC_ADDRESS;
+
+ return 0;
+}
+#else
+static inline int smsc911x_probe_config_dt(
+ struct smsc911x_platform_config *config,
+ struct device_node *np)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_OF */
+
+static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct net_device *dev;
+ struct smsc911x_data *pdata;
+ struct smsc911x_platform_config *config = pdev->dev.platform_data;
+ struct resource *res, *irq_res;
+ unsigned int intcfg = 0;
+ int res_size, irq_flags;
+ int retval;
+
+ pr_info("Driver version %s\n", SMSC_DRV_VERSION);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "smsc911x-memory");
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ pr_warn("Could not allocate resource\n");
+ retval = -ENODEV;
+ goto out_0;
+ }
+ res_size = resource_size(res);
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq_res) {
+ pr_warn("Could not allocate irq resource\n");
+ retval = -ENODEV;
+ goto out_0;
+ }
+
+ if (!request_mem_region(res->start, res_size, SMSC_CHIPNAME)) {
+ retval = -EBUSY;
+ goto out_0;
+ }
+
+ dev = alloc_etherdev(sizeof(struct smsc911x_data));
+ if (!dev) {
+ pr_warn("Could not allocate device\n");
+ retval = -ENOMEM;
+ goto out_release_io_1;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ pdata = netdev_priv(dev);
+
+ dev->irq = irq_res->start;
+ irq_flags = irq_res->flags & IRQF_TRIGGER_MASK;
+ pdata->ioaddr = ioremap_nocache(res->start, res_size);
+
+ pdata->dev = dev;
+ pdata->msg_enable = ((1 << debug) - 1);
+
+ if (pdata->ioaddr == NULL) {
+ SMSC_WARN(pdata, probe, "Error smsc911x base address invalid");
+ retval = -ENOMEM;
+ goto out_free_netdev_2;
+ }
+
+ retval = smsc911x_probe_config_dt(&pdata->config, np);
+ if (retval && config) {
+ /* copy config parameters across to pdata */
+ memcpy(&pdata->config, config, sizeof(pdata->config));
+ retval = 0;
+ }
+
+ if (retval) {
+ SMSC_WARN(pdata, probe, "Error smsc911x config not found");
+ goto out_unmap_io_3;
+ }
+
+ /* assume standard, non-shifted, access to HW registers */
+ pdata->ops = &standard_smsc911x_ops;
+ /* apply the right access if shifting is needed */
+ if (pdata->config.shift)
+ pdata->ops = &shifted_smsc911x_ops;
+
+ retval = smsc911x_init(dev);
+ if (retval < 0)
+ goto out_unmap_io_3;
+
+ /* configure irq polarity and type before connecting isr */
+ if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH)
+ intcfg |= INT_CFG_IRQ_POL_;
+
+ if (pdata->config.irq_type == SMSC911X_IRQ_TYPE_PUSH_PULL)
+ intcfg |= INT_CFG_IRQ_TYPE_;
+
+ smsc911x_reg_write(pdata, INT_CFG, intcfg);
+
+ /* Ensure interrupts are globally disabled before connecting ISR */
+ smsc911x_reg_write(pdata, INT_EN, 0);
+ smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
+
+ retval = request_irq(dev->irq, smsc911x_irqhandler,
+ irq_flags | IRQF_SHARED, dev->name, dev);
+ if (retval) {
+ SMSC_WARN(pdata, probe,
+ "Unable to claim requested irq: %d", dev->irq);
+ goto out_unmap_io_3;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ retval = register_netdev(dev);
+ if (retval) {
+ SMSC_WARN(pdata, probe, "Error %i registering device", retval);
+ goto out_unset_drvdata_4;
+ } else {
+ SMSC_TRACE(pdata, probe,
+ "Network interface: \"%s\"", dev->name);
+ }
+
+ retval = smsc911x_mii_init(pdev, dev);
+ if (retval) {
+ SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
+ goto out_unregister_netdev_5;
+ }
+
+ spin_lock_irq(&pdata->mac_lock);
+
+ /* Check if mac address has been specified when bringing interface up */
+ if (is_valid_ether_addr(dev->dev_addr)) {
+ smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
+ SMSC_TRACE(pdata, probe,
+ "MAC Address is specified by configuration");
+ } else if (is_valid_ether_addr(pdata->config.mac)) {
+ memcpy(dev->dev_addr, pdata->config.mac, 6);
+ SMSC_TRACE(pdata, probe,
+ "MAC Address specified by platform data");
+ } else {
+ /* Try reading mac address from device. if EEPROM is present
+ * it will already have been set */
+ smsc_get_mac(dev);
+
+ if (is_valid_ether_addr(dev->dev_addr)) {
+ /* eeprom values are valid so use them */
+ SMSC_TRACE(pdata, probe,
+ "Mac Address is read from LAN911x EEPROM");
+ } else {
+ /* eeprom values are invalid, generate random MAC */
+ random_ether_addr(dev->dev_addr);
+ smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
+ SMSC_TRACE(pdata, probe,
+ "MAC Address is set to random_ether_addr");
+ }
+ }
+
+ spin_unlock_irq(&pdata->mac_lock);
+
+ netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr);
+
+ return 0;
+
+out_unregister_netdev_5:
+ unregister_netdev(dev);
+out_unset_drvdata_4:
+ platform_set_drvdata(pdev, NULL);
+ free_irq(dev->irq, dev);
+out_unmap_io_3:
+ iounmap(pdata->ioaddr);
+out_free_netdev_2:
+ free_netdev(dev);
+out_release_io_1:
+ release_mem_region(res->start, resource_size(res));
+out_0:
+ return retval;
+}
+
+#ifdef CONFIG_PM
+/* This implementation assumes the devices remains powered on its VDDVARIO
+ * pins during suspend. */
+
+/* TODO: implement freeze/thaw callbacks for hibernation.*/
+
+static int smsc911x_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct smsc911x_data *pdata = netdev_priv(ndev);
+
+ /* enable wake on LAN, energy detection and the external PME
+ * signal. */
+ smsc911x_reg_write(pdata, PMT_CTRL,
+ PMT_CTRL_PM_MODE_D1_ | PMT_CTRL_WOL_EN_ |
+ PMT_CTRL_ED_EN_ | PMT_CTRL_PME_EN_);
+
+ return 0;
+}
+
+static int smsc911x_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct smsc911x_data *pdata = netdev_priv(ndev);
+ unsigned int to = 100;
+
+ /* Note 3.11 from the datasheet:
+ * "When the LAN9220 is in a power saving state, a write of any
+ * data to the BYTE_TEST register will wake-up the device."
+ */
+ smsc911x_reg_write(pdata, BYTE_TEST, 0);
+
+ /* poll the READY bit in PMT_CTRL. Any other access to the device is
+ * forbidden while this bit isn't set. Try for 100ms and return -EIO
+ * if it failed. */
+ while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to)
+ udelay(1000);
+
+ return (to == 0) ? -EIO : 0;
+}
+
+static const struct dev_pm_ops smsc911x_pm_ops = {
+ .suspend = smsc911x_suspend,
+ .resume = smsc911x_resume,
+};
+
+#define SMSC911X_PM_OPS (&smsc911x_pm_ops)
+
+#else
+#define SMSC911X_PM_OPS NULL
+#endif
+
+static const struct of_device_id smsc911x_dt_ids[] = {
+ { .compatible = "smsc,lan9115", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, smsc911x_dt_ids);
+
+static struct platform_driver smsc911x_driver = {
+ .probe = smsc911x_drv_probe,
+ .remove = __devexit_p(smsc911x_drv_remove),
+ .driver = {
+ .name = SMSC_CHIPNAME,
+ .owner = THIS_MODULE,
+ .pm = SMSC911X_PM_OPS,
+ .of_match_table = smsc911x_dt_ids,
+ },
+};
+
+/* Entry point for loading the module */
+static int __init smsc911x_init_module(void)
+{
+ SMSC_INITIALIZE();
+ return platform_driver_register(&smsc911x_driver);
+}
+
+/* entry point for unloading the module */
+static void __exit smsc911x_cleanup_module(void)
+{
+ platform_driver_unregister(&smsc911x_driver);
+}
+
+module_init(smsc911x_init_module);
+module_exit(smsc911x_cleanup_module);
diff --git a/drivers/net/ethernet/smsc/smsc911x.h b/drivers/net/ethernet/smsc/smsc911x.h
new file mode 100644
index 000000000000..8d67aacf8867
--- /dev/null
+++ b/drivers/net/ethernet/smsc/smsc911x.h
@@ -0,0 +1,404 @@
+/***************************************************************************
+ *
+ * Copyright (C) 2004-2008 SMSC
+ * Copyright (C) 2005-2008 ARM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ***************************************************************************/
+#ifndef __SMSC911X_H__
+#define __SMSC911X_H__
+
+#define TX_FIFO_LOW_THRESHOLD ((u32)1600)
+#define SMSC911X_EEPROM_SIZE ((u32)128)
+#define USE_DEBUG 0
+
+/* This is the maximum number of packets to be received every
+ * NAPI poll */
+#define SMSC_NAPI_WEIGHT 16
+
+/* implements a PHY loopback test at initialisation time, to ensure a packet
+ * can be successfully looped back */
+#define USE_PHY_WORK_AROUND
+
+#if USE_DEBUG >= 1
+#define SMSC_WARN(pdata, nlevel, fmt, args...) \
+ netif_warn(pdata, nlevel, (pdata)->dev, \
+ "%s: " fmt "\n", __func__, ##args)
+#else
+#define SMSC_WARN(pdata, nlevel, fmt, args...) \
+ no_printk(fmt "\n", ##args)
+#endif
+
+#if USE_DEBUG >= 2
+#define SMSC_TRACE(pdata, nlevel, fmt, args...) \
+ netif_info(pdata, nlevel, pdata->dev, fmt "\n", ##args)
+#else
+#define SMSC_TRACE(pdata, nlevel, fmt, args...) \
+ no_printk(fmt "\n", ##args)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#define SMSC_ASSERT_MAC_LOCK(pdata) \
+ WARN_ON(!spin_is_locked(&pdata->mac_lock))
+#else
+#define SMSC_ASSERT_MAC_LOCK(pdata) do {} while (0)
+#endif /* CONFIG_DEBUG_SPINLOCK */
+
+/* SMSC911x registers and bitfields */
+#define RX_DATA_FIFO 0x00
+
+#define TX_DATA_FIFO 0x20
+#define TX_CMD_A_ON_COMP_ 0x80000000
+#define TX_CMD_A_BUF_END_ALGN_ 0x03000000
+#define TX_CMD_A_4_BYTE_ALGN_ 0x00000000
+#define TX_CMD_A_16_BYTE_ALGN_ 0x01000000
+#define TX_CMD_A_32_BYTE_ALGN_ 0x02000000
+#define TX_CMD_A_DATA_OFFSET_ 0x001F0000
+#define TX_CMD_A_FIRST_SEG_ 0x00002000
+#define TX_CMD_A_LAST_SEG_ 0x00001000
+#define TX_CMD_A_BUF_SIZE_ 0x000007FF
+#define TX_CMD_B_PKT_TAG_ 0xFFFF0000
+#define TX_CMD_B_ADD_CRC_DISABLE_ 0x00002000
+#define TX_CMD_B_DISABLE_PADDING_ 0x00001000
+#define TX_CMD_B_PKT_BYTE_LENGTH_ 0x000007FF
+
+#define RX_STATUS_FIFO 0x40
+#define RX_STS_ES_ 0x00008000
+#define RX_STS_LENGTH_ERR_ 0x00001000
+#define RX_STS_MCAST_ 0x00000400
+#define RX_STS_FRAME_TYPE_ 0x00000020
+#define RX_STS_CRC_ERR_ 0x00000002
+
+#define RX_STATUS_FIFO_PEEK 0x44
+
+#define TX_STATUS_FIFO 0x48
+#define TX_STS_ES_ 0x00008000
+#define TX_STS_LOST_CARRIER_ 0x00000800
+#define TX_STS_NO_CARRIER_ 0x00000400
+#define TX_STS_LATE_COL_ 0x00000200
+#define TX_STS_EXCESS_COL_ 0x00000100
+
+#define TX_STATUS_FIFO_PEEK 0x4C
+
+#define ID_REV 0x50
+#define ID_REV_CHIP_ID_ 0xFFFF0000
+#define ID_REV_REV_ID_ 0x0000FFFF
+
+#define INT_CFG 0x54
+#define INT_CFG_INT_DEAS_ 0xFF000000
+#define INT_CFG_INT_DEAS_CLR_ 0x00004000
+#define INT_CFG_INT_DEAS_STS_ 0x00002000
+#define INT_CFG_IRQ_INT_ 0x00001000
+#define INT_CFG_IRQ_EN_ 0x00000100
+#define INT_CFG_IRQ_POL_ 0x00000010
+#define INT_CFG_IRQ_TYPE_ 0x00000001
+
+#define INT_STS 0x58
+#define INT_STS_SW_INT_ 0x80000000
+#define INT_STS_TXSTOP_INT_ 0x02000000
+#define INT_STS_RXSTOP_INT_ 0x01000000
+#define INT_STS_RXDFH_INT_ 0x00800000
+#define INT_STS_RXDF_INT_ 0x00400000
+#define INT_STS_TX_IOC_ 0x00200000
+#define INT_STS_RXD_INT_ 0x00100000
+#define INT_STS_GPT_INT_ 0x00080000
+#define INT_STS_PHY_INT_ 0x00040000
+#define INT_STS_PME_INT_ 0x00020000
+#define INT_STS_TXSO_ 0x00010000
+#define INT_STS_RWT_ 0x00008000
+#define INT_STS_RXE_ 0x00004000
+#define INT_STS_TXE_ 0x00002000
+#define INT_STS_TDFU_ 0x00000800
+#define INT_STS_TDFO_ 0x00000400
+#define INT_STS_TDFA_ 0x00000200
+#define INT_STS_TSFF_ 0x00000100
+#define INT_STS_TSFL_ 0x00000080
+#define INT_STS_RXDF_ 0x00000040
+#define INT_STS_RDFL_ 0x00000020
+#define INT_STS_RSFF_ 0x00000010
+#define INT_STS_RSFL_ 0x00000008
+#define INT_STS_GPIO2_INT_ 0x00000004
+#define INT_STS_GPIO1_INT_ 0x00000002
+#define INT_STS_GPIO0_INT_ 0x00000001
+
+#define INT_EN 0x5C
+#define INT_EN_SW_INT_EN_ 0x80000000
+#define INT_EN_TXSTOP_INT_EN_ 0x02000000
+#define INT_EN_RXSTOP_INT_EN_ 0x01000000
+#define INT_EN_RXDFH_INT_EN_ 0x00800000
+#define INT_EN_TIOC_INT_EN_ 0x00200000
+#define INT_EN_RXD_INT_EN_ 0x00100000
+#define INT_EN_GPT_INT_EN_ 0x00080000
+#define INT_EN_PHY_INT_EN_ 0x00040000
+#define INT_EN_PME_INT_EN_ 0x00020000
+#define INT_EN_TXSO_EN_ 0x00010000
+#define INT_EN_RWT_EN_ 0x00008000
+#define INT_EN_RXE_EN_ 0x00004000
+#define INT_EN_TXE_EN_ 0x00002000
+#define INT_EN_TDFU_EN_ 0x00000800
+#define INT_EN_TDFO_EN_ 0x00000400
+#define INT_EN_TDFA_EN_ 0x00000200
+#define INT_EN_TSFF_EN_ 0x00000100
+#define INT_EN_TSFL_EN_ 0x00000080
+#define INT_EN_RXDF_EN_ 0x00000040
+#define INT_EN_RDFL_EN_ 0x00000020
+#define INT_EN_RSFF_EN_ 0x00000010
+#define INT_EN_RSFL_EN_ 0x00000008
+#define INT_EN_GPIO2_INT_ 0x00000004
+#define INT_EN_GPIO1_INT_ 0x00000002
+#define INT_EN_GPIO0_INT_ 0x00000001
+
+#define BYTE_TEST 0x64
+
+#define FIFO_INT 0x68
+#define FIFO_INT_TX_AVAIL_LEVEL_ 0xFF000000
+#define FIFO_INT_TX_STS_LEVEL_ 0x00FF0000
+#define FIFO_INT_RX_AVAIL_LEVEL_ 0x0000FF00
+#define FIFO_INT_RX_STS_LEVEL_ 0x000000FF
+
+#define RX_CFG 0x6C
+#define RX_CFG_RX_END_ALGN_ 0xC0000000
+#define RX_CFG_RX_END_ALGN4_ 0x00000000
+#define RX_CFG_RX_END_ALGN16_ 0x40000000
+#define RX_CFG_RX_END_ALGN32_ 0x80000000
+#define RX_CFG_RX_DMA_CNT_ 0x0FFF0000
+#define RX_CFG_RX_DUMP_ 0x00008000
+#define RX_CFG_RXDOFF_ 0x00001F00
+
+#define TX_CFG 0x70
+#define TX_CFG_TXS_DUMP_ 0x00008000
+#define TX_CFG_TXD_DUMP_ 0x00004000
+#define TX_CFG_TXSAO_ 0x00000004
+#define TX_CFG_TX_ON_ 0x00000002
+#define TX_CFG_STOP_TX_ 0x00000001
+
+#define HW_CFG 0x74
+#define HW_CFG_TTM_ 0x00200000
+#define HW_CFG_SF_ 0x00100000
+#define HW_CFG_TX_FIF_SZ_ 0x000F0000
+#define HW_CFG_TR_ 0x00003000
+#define HW_CFG_SRST_ 0x00000001
+
+/* only available on 115/117 */
+#define HW_CFG_PHY_CLK_SEL_ 0x00000060
+#define HW_CFG_PHY_CLK_SEL_INT_PHY_ 0x00000000
+#define HW_CFG_PHY_CLK_SEL_EXT_PHY_ 0x00000020
+#define HW_CFG_PHY_CLK_SEL_CLK_DIS_ 0x00000040
+#define HW_CFG_SMI_SEL_ 0x00000010
+#define HW_CFG_EXT_PHY_DET_ 0x00000008
+#define HW_CFG_EXT_PHY_EN_ 0x00000004
+#define HW_CFG_SRST_TO_ 0x00000002
+
+/* only available on 116/118 */
+#define HW_CFG_32_16_BIT_MODE_ 0x00000004
+
+#define RX_DP_CTRL 0x78
+#define RX_DP_CTRL_RX_FFWD_ 0x80000000
+
+#define RX_FIFO_INF 0x7C
+#define RX_FIFO_INF_RXSUSED_ 0x00FF0000
+#define RX_FIFO_INF_RXDUSED_ 0x0000FFFF
+
+#define TX_FIFO_INF 0x80
+#define TX_FIFO_INF_TSUSED_ 0x00FF0000
+#define TX_FIFO_INF_TDFREE_ 0x0000FFFF
+
+#define PMT_CTRL 0x84
+#define PMT_CTRL_PM_MODE_ 0x00003000
+#define PMT_CTRL_PM_MODE_D0_ 0x00000000
+#define PMT_CTRL_PM_MODE_D1_ 0x00001000
+#define PMT_CTRL_PM_MODE_D2_ 0x00002000
+#define PMT_CTRL_PM_MODE_D3_ 0x00003000
+#define PMT_CTRL_PHY_RST_ 0x00000400
+#define PMT_CTRL_WOL_EN_ 0x00000200
+#define PMT_CTRL_ED_EN_ 0x00000100
+#define PMT_CTRL_PME_TYPE_ 0x00000040
+#define PMT_CTRL_WUPS_ 0x00000030
+#define PMT_CTRL_WUPS_NOWAKE_ 0x00000000
+#define PMT_CTRL_WUPS_ED_ 0x00000010
+#define PMT_CTRL_WUPS_WOL_ 0x00000020
+#define PMT_CTRL_WUPS_MULTI_ 0x00000030
+#define PMT_CTRL_PME_IND_ 0x00000008
+#define PMT_CTRL_PME_POL_ 0x00000004
+#define PMT_CTRL_PME_EN_ 0x00000002
+#define PMT_CTRL_READY_ 0x00000001
+
+#define GPIO_CFG 0x88
+#define GPIO_CFG_LED3_EN_ 0x40000000
+#define GPIO_CFG_LED2_EN_ 0x20000000
+#define GPIO_CFG_LED1_EN_ 0x10000000
+#define GPIO_CFG_GPIO2_INT_POL_ 0x04000000
+#define GPIO_CFG_GPIO1_INT_POL_ 0x02000000
+#define GPIO_CFG_GPIO0_INT_POL_ 0x01000000
+#define GPIO_CFG_EEPR_EN_ 0x00700000
+#define GPIO_CFG_GPIOBUF2_ 0x00040000
+#define GPIO_CFG_GPIOBUF1_ 0x00020000
+#define GPIO_CFG_GPIOBUF0_ 0x00010000
+#define GPIO_CFG_GPIODIR2_ 0x00000400
+#define GPIO_CFG_GPIODIR1_ 0x00000200
+#define GPIO_CFG_GPIODIR0_ 0x00000100
+#define GPIO_CFG_GPIOD4_ 0x00000020
+#define GPIO_CFG_GPIOD3_ 0x00000010
+#define GPIO_CFG_GPIOD2_ 0x00000004
+#define GPIO_CFG_GPIOD1_ 0x00000002
+#define GPIO_CFG_GPIOD0_ 0x00000001
+
+#define GPT_CFG 0x8C
+#define GPT_CFG_TIMER_EN_ 0x20000000
+#define GPT_CFG_GPT_LOAD_ 0x0000FFFF
+
+#define GPT_CNT 0x90
+#define GPT_CNT_GPT_CNT_ 0x0000FFFF
+
+#define WORD_SWAP 0x98
+
+#define FREE_RUN 0x9C
+
+#define RX_DROP 0xA0
+
+#define MAC_CSR_CMD 0xA4
+#define MAC_CSR_CMD_CSR_BUSY_ 0x80000000
+#define MAC_CSR_CMD_R_NOT_W_ 0x40000000
+#define MAC_CSR_CMD_CSR_ADDR_ 0x000000FF
+
+#define MAC_CSR_DATA 0xA8
+
+#define AFC_CFG 0xAC
+#define AFC_CFG_AFC_HI_ 0x00FF0000
+#define AFC_CFG_AFC_LO_ 0x0000FF00
+#define AFC_CFG_BACK_DUR_ 0x000000F0
+#define AFC_CFG_FCMULT_ 0x00000008
+#define AFC_CFG_FCBRD_ 0x00000004
+#define AFC_CFG_FCADD_ 0x00000002
+#define AFC_CFG_FCANY_ 0x00000001
+
+#define E2P_CMD 0xB0
+#define E2P_CMD_EPC_BUSY_ 0x80000000
+#define E2P_CMD_EPC_CMD_ 0x70000000
+#define E2P_CMD_EPC_CMD_READ_ 0x00000000
+#define E2P_CMD_EPC_CMD_EWDS_ 0x10000000
+#define E2P_CMD_EPC_CMD_EWEN_ 0x20000000
+#define E2P_CMD_EPC_CMD_WRITE_ 0x30000000
+#define E2P_CMD_EPC_CMD_WRAL_ 0x40000000
+#define E2P_CMD_EPC_CMD_ERASE_ 0x50000000
+#define E2P_CMD_EPC_CMD_ERAL_ 0x60000000
+#define E2P_CMD_EPC_CMD_RELOAD_ 0x70000000
+#define E2P_CMD_EPC_TIMEOUT_ 0x00000200
+#define E2P_CMD_MAC_ADDR_LOADED_ 0x00000100
+#define E2P_CMD_EPC_ADDR_ 0x000000FF
+
+#define E2P_DATA 0xB4
+#define E2P_DATA_EEPROM_DATA_ 0x000000FF
+#define LAN_REGISTER_EXTENT 0x00000100
+
+/*
+ * MAC Control and Status Register (Indirect Address)
+ * Offset (through the MAC_CSR CMD and DATA port)
+ */
+#define MAC_CR 0x01
+#define MAC_CR_RXALL_ 0x80000000
+#define MAC_CR_HBDIS_ 0x10000000
+#define MAC_CR_RCVOWN_ 0x00800000
+#define MAC_CR_LOOPBK_ 0x00200000
+#define MAC_CR_FDPX_ 0x00100000
+#define MAC_CR_MCPAS_ 0x00080000
+#define MAC_CR_PRMS_ 0x00040000
+#define MAC_CR_INVFILT_ 0x00020000
+#define MAC_CR_PASSBAD_ 0x00010000
+#define MAC_CR_HFILT_ 0x00008000
+#define MAC_CR_HPFILT_ 0x00002000
+#define MAC_CR_LCOLL_ 0x00001000
+#define MAC_CR_BCAST_ 0x00000800
+#define MAC_CR_DISRTY_ 0x00000400
+#define MAC_CR_PADSTR_ 0x00000100
+#define MAC_CR_BOLMT_MASK_ 0x000000C0
+#define MAC_CR_DFCHK_ 0x00000020
+#define MAC_CR_TXEN_ 0x00000008
+#define MAC_CR_RXEN_ 0x00000004
+
+#define ADDRH 0x02
+
+#define ADDRL 0x03
+
+#define HASHH 0x04
+
+#define HASHL 0x05
+
+#define MII_ACC 0x06
+#define MII_ACC_PHY_ADDR_ 0x0000F800
+#define MII_ACC_MIIRINDA_ 0x000007C0
+#define MII_ACC_MII_WRITE_ 0x00000002
+#define MII_ACC_MII_BUSY_ 0x00000001
+
+#define MII_DATA 0x07
+
+#define FLOW 0x08
+#define FLOW_FCPT_ 0xFFFF0000
+#define FLOW_FCPASS_ 0x00000004
+#define FLOW_FCEN_ 0x00000002
+#define FLOW_FCBSY_ 0x00000001
+
+#define VLAN1 0x09
+
+#define VLAN2 0x0A
+
+#define WUFF 0x0B
+
+#define WUCSR 0x0C
+#define WUCSR_GUE_ 0x00000200
+#define WUCSR_WUFR_ 0x00000040
+#define WUCSR_MPR_ 0x00000020
+#define WUCSR_WAKE_EN_ 0x00000004
+#define WUCSR_MPEN_ 0x00000002
+
+/*
+ * Phy definitions (vendor-specific)
+ */
+#define LAN9118_PHY_ID 0x00C0001C
+
+#define MII_INTSTS 0x1D
+
+#define MII_INTMSK 0x1E
+#define PHY_INTMSK_AN_RCV_ (1 << 1)
+#define PHY_INTMSK_PDFAULT_ (1 << 2)
+#define PHY_INTMSK_AN_ACK_ (1 << 3)
+#define PHY_INTMSK_LNKDOWN_ (1 << 4)
+#define PHY_INTMSK_RFAULT_ (1 << 5)
+#define PHY_INTMSK_AN_COMP_ (1 << 6)
+#define PHY_INTMSK_ENERGYON_ (1 << 7)
+#define PHY_INTMSK_DEFAULT_ (PHY_INTMSK_ENERGYON_ | \
+ PHY_INTMSK_AN_COMP_ | \
+ PHY_INTMSK_RFAULT_ | \
+ PHY_INTMSK_LNKDOWN_)
+
+#define ADVERTISE_PAUSE_ALL (ADVERTISE_PAUSE_CAP | \
+ ADVERTISE_PAUSE_ASYM)
+
+#define LPA_PAUSE_ALL (LPA_PAUSE_CAP | \
+ LPA_PAUSE_ASYM)
+
+/*
+ * Provide hooks to let the arch add to the initialisation procedure
+ * and to override the source of the MAC address.
+ */
+#define SMSC_INITIALIZE() do {} while (0)
+#define smsc_get_mac(dev) smsc911x_read_mac_address((dev))
+
+#ifdef CONFIG_SMSC911X_ARCH_HOOKS
+#include <asm/smsc911x.h>
+#endif
+
+#endif /* __SMSC911X_H__ */
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
new file mode 100644
index 000000000000..4f15680849ff
--- /dev/null
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -0,0 +1,1763 @@
+ /***************************************************************************
+ *
+ * Copyright (C) 2007,2008 SMSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ***************************************************************************
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/pci.h>
+#include <linux/if_vlan.h>
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+#include "smsc9420.h"
+
+#define DRV_NAME "smsc9420"
+#define PFX DRV_NAME ": "
+#define DRV_MDIONAME "smsc9420-mdio"
+#define DRV_DESCRIPTION "SMSC LAN9420 driver"
+#define DRV_VERSION "1.01"
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+struct smsc9420_dma_desc {
+ u32 status;
+ u32 length;
+ u32 buffer1;
+ u32 buffer2;
+};
+
+struct smsc9420_ring_info {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+};
+
+struct smsc9420_pdata {
+ void __iomem *base_addr;
+ struct pci_dev *pdev;
+ struct net_device *dev;
+
+ struct smsc9420_dma_desc *rx_ring;
+ struct smsc9420_dma_desc *tx_ring;
+ struct smsc9420_ring_info *tx_buffers;
+ struct smsc9420_ring_info *rx_buffers;
+ dma_addr_t rx_dma_addr;
+ dma_addr_t tx_dma_addr;
+ int tx_ring_head, tx_ring_tail;
+ int rx_ring_head, rx_ring_tail;
+
+ spinlock_t int_lock;
+ spinlock_t phy_lock;
+
+ struct napi_struct napi;
+
+ bool software_irq_signal;
+ bool rx_csum;
+ u32 msg_enable;
+
+ struct phy_device *phy_dev;
+ struct mii_bus *mii_bus;
+ int phy_irq[PHY_MAX_ADDR];
+ int last_duplex;
+ int last_carrier;
+};
+
+static DEFINE_PCI_DEVICE_TABLE(smsc9420_id_table) = {
+ { PCI_VENDOR_ID_9420, PCI_DEVICE_ID_9420, PCI_ANY_ID, PCI_ANY_ID, },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, smsc9420_id_table);
+
+#define SMSC_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+
+static uint smsc_debug;
+static uint debug = -1;
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "debug level");
+
+#define smsc_dbg(TYPE, f, a...) \
+do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
+ printk(KERN_DEBUG PFX f "\n", ## a); \
+} while (0)
+
+#define smsc_info(TYPE, f, a...) \
+do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
+ printk(KERN_INFO PFX f "\n", ## a); \
+} while (0)
+
+#define smsc_warn(TYPE, f, a...) \
+do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
+ printk(KERN_WARNING PFX f "\n", ## a); \
+} while (0)
+
+static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset)
+{
+ return ioread32(pd->base_addr + offset);
+}
+
+static inline void
+smsc9420_reg_write(struct smsc9420_pdata *pd, u32 offset, u32 value)
+{
+ iowrite32(value, pd->base_addr + offset);
+}
+
+static inline void smsc9420_pci_flush_write(struct smsc9420_pdata *pd)
+{
+ /* to ensure PCI write completion, we must perform a PCI read */
+ smsc9420_reg_read(pd, ID_REV);
+}
+
+static int smsc9420_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
+{
+ struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv;
+ unsigned long flags;
+ u32 addr;
+ int i, reg = -EIO;
+
+ spin_lock_irqsave(&pd->phy_lock, flags);
+
+ /* confirm MII not busy */
+ if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) {
+ smsc_warn(DRV, "MII is busy???");
+ goto out;
+ }
+
+ /* set the address, index & direction (read from PHY) */
+ addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6) |
+ MII_ACCESS_MII_READ_;
+ smsc9420_reg_write(pd, MII_ACCESS, addr);
+
+ /* wait for read to complete with 50us timeout */
+ for (i = 0; i < 5; i++) {
+ if (!(smsc9420_reg_read(pd, MII_ACCESS) &
+ MII_ACCESS_MII_BUSY_)) {
+ reg = (u16)smsc9420_reg_read(pd, MII_DATA);
+ goto out;
+ }
+ udelay(10);
+ }
+
+ smsc_warn(DRV, "MII busy timeout!");
+
+out:
+ spin_unlock_irqrestore(&pd->phy_lock, flags);
+ return reg;
+}
+
+static int smsc9420_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
+ u16 val)
+{
+ struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv;
+ unsigned long flags;
+ u32 addr;
+ int i, reg = -EIO;
+
+ spin_lock_irqsave(&pd->phy_lock, flags);
+
+ /* confirm MII not busy */
+ if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) {
+ smsc_warn(DRV, "MII is busy???");
+ goto out;
+ }
+
+ /* put the data to write in the MAC */
+ smsc9420_reg_write(pd, MII_DATA, (u32)val);
+
+ /* set the address, index & direction (write to PHY) */
+ addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6) |
+ MII_ACCESS_MII_WRITE_;
+ smsc9420_reg_write(pd, MII_ACCESS, addr);
+
+ /* wait for write to complete with 50us timeout */
+ for (i = 0; i < 5; i++) {
+ if (!(smsc9420_reg_read(pd, MII_ACCESS) &
+ MII_ACCESS_MII_BUSY_)) {
+ reg = 0;
+ goto out;
+ }
+ udelay(10);
+ }
+
+ smsc_warn(DRV, "MII busy timeout!");
+
+out:
+ spin_unlock_irqrestore(&pd->phy_lock, flags);
+ return reg;
+}
+
+/* Returns hash bit number for given MAC address
+ * Example:
+ * 01 00 5E 00 00 01 -> returns bit number 31 */
+static u32 smsc9420_hash(u8 addr[ETH_ALEN])
+{
+ return (ether_crc(ETH_ALEN, addr) >> 26) & 0x3f;
+}
+
+static int smsc9420_eeprom_reload(struct smsc9420_pdata *pd)
+{
+ int timeout = 100000;
+
+ BUG_ON(!pd);
+
+ if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) {
+ smsc_dbg(DRV, "smsc9420_eeprom_reload: Eeprom busy");
+ return -EIO;
+ }
+
+ smsc9420_reg_write(pd, E2P_CMD,
+ (E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_RELOAD_));
+
+ do {
+ udelay(10);
+ if (!(smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_))
+ return 0;
+ } while (timeout--);
+
+ smsc_warn(DRV, "smsc9420_eeprom_reload: Eeprom timed out");
+ return -EIO;
+}
+
+/* Standard ioctls for mii-tool */
+static int smsc9420_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct smsc9420_pdata *pd = netdev_priv(dev);
+
+ if (!netif_running(dev) || !pd->phy_dev)
+ return -EINVAL;
+
+ return phy_mii_ioctl(pd->phy_dev, ifr, cmd);
+}
+
+static int smsc9420_ethtool_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct smsc9420_pdata *pd = netdev_priv(dev);
+
+ if (!pd->phy_dev)
+ return -ENODEV;
+
+ cmd->maxtxpkt = 1;
+ cmd->maxrxpkt = 1;
+ return phy_ethtool_gset(pd->phy_dev, cmd);
+}
+
+static int smsc9420_ethtool_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct smsc9420_pdata *pd = netdev_priv(dev);
+
+ if (!pd->phy_dev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(pd->phy_dev, cmd);
+}
+
+static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct smsc9420_pdata *pd = netdev_priv(netdev);
+
+ strcpy(drvinfo->driver, DRV_NAME);
+ strcpy(drvinfo->bus_info, pci_name(pd->pdev));
+ strcpy(drvinfo->version, DRV_VERSION);
+}
+
+static u32 smsc9420_ethtool_get_msglevel(struct net_device *netdev)
+{
+ struct smsc9420_pdata *pd = netdev_priv(netdev);
+ return pd->msg_enable;
+}
+
+static void smsc9420_ethtool_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct smsc9420_pdata *pd = netdev_priv(netdev);
+ pd->msg_enable = data;
+}
+
+static int smsc9420_ethtool_nway_reset(struct net_device *netdev)
+{
+ struct smsc9420_pdata *pd = netdev_priv(netdev);
+
+ if (!pd->phy_dev)
+ return -ENODEV;
+
+ return phy_start_aneg(pd->phy_dev);
+}
+
+static int smsc9420_ethtool_getregslen(struct net_device *dev)
+{
+ /* all smsc9420 registers plus all phy registers */
+ return 0x100 + (32 * sizeof(u32));
+}
+
+static void
+smsc9420_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
+ void *buf)
+{
+ struct smsc9420_pdata *pd = netdev_priv(dev);
+ struct phy_device *phy_dev = pd->phy_dev;
+ unsigned int i, j = 0;
+ u32 *data = buf;
+
+ regs->version = smsc9420_reg_read(pd, ID_REV);
+ for (i = 0; i < 0x100; i += (sizeof(u32)))
+ data[j++] = smsc9420_reg_read(pd, i);
+
+ // cannot read phy registers if the net device is down
+ if (!phy_dev)
+ return;
+
+ for (i = 0; i <= 31; i++)
+ data[j++] = smsc9420_mii_read(phy_dev->bus, phy_dev->addr, i);
+}
+
+static void smsc9420_eeprom_enable_access(struct smsc9420_pdata *pd)
+{
+ unsigned int temp = smsc9420_reg_read(pd, GPIO_CFG);
+ temp &= ~GPIO_CFG_EEPR_EN_;
+ smsc9420_reg_write(pd, GPIO_CFG, temp);
+ msleep(1);
+}
+
+static int smsc9420_eeprom_send_cmd(struct smsc9420_pdata *pd, u32 op)
+{
+ int timeout = 100;
+ u32 e2cmd;
+
+ smsc_dbg(HW, "op 0x%08x", op);
+ if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) {
+ smsc_warn(HW, "Busy at start");
+ return -EBUSY;
+ }
+
+ e2cmd = op | E2P_CMD_EPC_BUSY_;
+ smsc9420_reg_write(pd, E2P_CMD, e2cmd);
+
+ do {
+ msleep(1);
+ e2cmd = smsc9420_reg_read(pd, E2P_CMD);
+ } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout));
+
+ if (!timeout) {
+ smsc_info(HW, "TIMED OUT");
+ return -EAGAIN;
+ }
+
+ if (e2cmd & E2P_CMD_EPC_TIMEOUT_) {
+ smsc_info(HW, "Error occurred during eeprom operation");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int smsc9420_eeprom_read_location(struct smsc9420_pdata *pd,
+ u8 address, u8 *data)
+{
+ u32 op = E2P_CMD_EPC_CMD_READ_ | address;
+ int ret;
+
+ smsc_dbg(HW, "address 0x%x", address);
+ ret = smsc9420_eeprom_send_cmd(pd, op);
+
+ if (!ret)
+ data[address] = smsc9420_reg_read(pd, E2P_DATA);
+
+ return ret;
+}
+
+static int smsc9420_eeprom_write_location(struct smsc9420_pdata *pd,
+ u8 address, u8 data)
+{
+ u32 op = E2P_CMD_EPC_CMD_ERASE_ | address;
+ int ret;
+
+ smsc_dbg(HW, "address 0x%x, data 0x%x", address, data);
+ ret = smsc9420_eeprom_send_cmd(pd, op);
+
+ if (!ret) {
+ op = E2P_CMD_EPC_CMD_WRITE_ | address;
+ smsc9420_reg_write(pd, E2P_DATA, (u32)data);
+ ret = smsc9420_eeprom_send_cmd(pd, op);
+ }
+
+ return ret;
+}
+
+static int smsc9420_ethtool_get_eeprom_len(struct net_device *dev)
+{
+ return SMSC9420_EEPROM_SIZE;
+}
+
+static int smsc9420_ethtool_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct smsc9420_pdata *pd = netdev_priv(dev);
+ u8 eeprom_data[SMSC9420_EEPROM_SIZE];
+ int len, i;
+
+ smsc9420_eeprom_enable_access(pd);
+
+ len = min(eeprom->len, SMSC9420_EEPROM_SIZE);
+ for (i = 0; i < len; i++) {
+ int ret = smsc9420_eeprom_read_location(pd, i, eeprom_data);
+ if (ret < 0) {
+ eeprom->len = 0;
+ return ret;
+ }
+ }
+
+ memcpy(data, &eeprom_data[eeprom->offset], len);
+ eeprom->magic = SMSC9420_EEPROM_MAGIC;
+ eeprom->len = len;
+ return 0;
+}
+
+static int smsc9420_ethtool_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct smsc9420_pdata *pd = netdev_priv(dev);
+ int ret;
+
+ if (eeprom->magic != SMSC9420_EEPROM_MAGIC)
+ return -EINVAL;
+
+ smsc9420_eeprom_enable_access(pd);
+ smsc9420_eeprom_send_cmd(pd, E2P_CMD_EPC_CMD_EWEN_);
+ ret = smsc9420_eeprom_write_location(pd, eeprom->offset, *data);
+ smsc9420_eeprom_send_cmd(pd, E2P_CMD_EPC_CMD_EWDS_);
+
+ /* Single byte write, according to man page */
+ eeprom->len = 1;
+
+ return ret;
+}
+
+static const struct ethtool_ops smsc9420_ethtool_ops = {
+ .get_settings = smsc9420_ethtool_get_settings,
+ .set_settings = smsc9420_ethtool_set_settings,
+ .get_drvinfo = smsc9420_ethtool_get_drvinfo,
+ .get_msglevel = smsc9420_ethtool_get_msglevel,
+ .set_msglevel = smsc9420_ethtool_set_msglevel,
+ .nway_reset = smsc9420_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = smsc9420_ethtool_get_eeprom_len,
+ .get_eeprom = smsc9420_ethtool_get_eeprom,
+ .set_eeprom = smsc9420_ethtool_set_eeprom,
+ .get_regs_len = smsc9420_ethtool_getregslen,
+ .get_regs = smsc9420_ethtool_getregs,
+};
+
+/* Sets the device MAC address to dev_addr */
+static void smsc9420_set_mac_address(struct net_device *dev)
+{
+ struct smsc9420_pdata *pd = netdev_priv(dev);
+ u8 *dev_addr = dev->dev_addr;
+ u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4];
+ u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
+ (dev_addr[1] << 8) | dev_addr[0];
+
+ smsc9420_reg_write(pd, ADDRH, mac_high16);
+ smsc9420_reg_write(pd, ADDRL, mac_low32);
+}
+
+static void smsc9420_check_mac_address(struct net_device *dev)
+{
+ struct smsc9420_pdata *pd = netdev_priv(dev);
+
+ /* Check if mac address has been specified when bringing interface up */
+ if (is_valid_ether_addr(dev->dev_addr)) {
+ smsc9420_set_mac_address(dev);
+ smsc_dbg(PROBE, "MAC Address is specified by configuration");
+ } else {
+ /* Try reading mac address from device. if EEPROM is present
+ * it will already have been set */
+ u32 mac_high16 = smsc9420_reg_read(pd, ADDRH);
+ u32 mac_low32 = smsc9420_reg_read(pd, ADDRL);
+ dev->dev_addr[0] = (u8)(mac_low32);
+ dev->dev_addr[1] = (u8)(mac_low32 >> 8);
+ dev->dev_addr[2] = (u8)(mac_low32 >> 16);
+ dev->dev_addr[3] = (u8)(mac_low32 >> 24);
+ dev->dev_addr[4] = (u8)(mac_high16);
+ dev->dev_addr[5] = (u8)(mac_high16 >> 8);
+
+ if (is_valid_ether_addr(dev->dev_addr)) {
+ /* eeprom values are valid so use them */
+ smsc_dbg(PROBE, "Mac Address is read from EEPROM");
+ } else {
+ /* eeprom values are invalid, generate random MAC */
+ random_ether_addr(dev->dev_addr);
+ smsc9420_set_mac_address(dev);
+ smsc_dbg(PROBE,
+ "MAC Address is set to random_ether_addr");
+ }
+ }
+}
+
+static void smsc9420_stop_tx(struct smsc9420_pdata *pd)
+{
+ u32 dmac_control, mac_cr, dma_intr_ena;
+ int timeout = 1000;
+
+ /* disable TX DMAC */
+ dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL);
+ dmac_control &= (~DMAC_CONTROL_ST_);
+ smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control);
+
+ /* Wait max 10ms for transmit process to stop */
+ while (--timeout) {
+ if (smsc9420_reg_read(pd, DMAC_STATUS) & DMAC_STS_TS_)
+ break;
+ udelay(10);
+ }
+
+ if (!timeout)
+ smsc_warn(IFDOWN, "TX DMAC failed to stop");
+
+ /* ACK Tx DMAC stop bit */
+ smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_TXPS_);
+
+ /* mask TX DMAC interrupts */
+ dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
+ dma_intr_ena &= ~(DMAC_INTR_ENA_TX_);
+ smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena);
+ smsc9420_pci_flush_write(pd);
+
+ /* stop MAC TX */
+ mac_cr = smsc9420_reg_read(pd, MAC_CR) & (~MAC_CR_TXEN_);
+ smsc9420_reg_write(pd, MAC_CR, mac_cr);
+ smsc9420_pci_flush_write(pd);
+}
+
+static void smsc9420_free_tx_ring(struct smsc9420_pdata *pd)
+{
+ int i;
+
+ BUG_ON(!pd->tx_ring);
+
+ if (!pd->tx_buffers)
+ return;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ struct sk_buff *skb = pd->tx_buffers[i].skb;
+
+ if (skb) {
+ BUG_ON(!pd->tx_buffers[i].mapping);
+ pci_unmap_single(pd->pdev, pd->tx_buffers[i].mapping,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(skb);
+ }
+
+ pd->tx_ring[i].status = 0;
+ pd->tx_ring[i].length = 0;
+ pd->tx_ring[i].buffer1 = 0;
+ pd->tx_ring[i].buffer2 = 0;
+ }
+ wmb();
+
+ kfree(pd->tx_buffers);
+ pd->tx_buffers = NULL;
+
+ pd->tx_ring_head = 0;
+ pd->tx_ring_tail = 0;
+}
+
+static void smsc9420_free_rx_ring(struct smsc9420_pdata *pd)
+{
+ int i;
+
+ BUG_ON(!pd->rx_ring);
+
+ if (!pd->rx_buffers)
+ return;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (pd->rx_buffers[i].skb)
+ dev_kfree_skb_any(pd->rx_buffers[i].skb);
+
+ if (pd->rx_buffers[i].mapping)
+ pci_unmap_single(pd->pdev, pd->rx_buffers[i].mapping,
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+
+ pd->rx_ring[i].status = 0;
+ pd->rx_ring[i].length = 0;
+ pd->rx_ring[i].buffer1 = 0;
+ pd->rx_ring[i].buffer2 = 0;
+ }
+ wmb();
+
+ kfree(pd->rx_buffers);
+ pd->rx_buffers = NULL;
+
+ pd->rx_ring_head = 0;
+ pd->rx_ring_tail = 0;
+}
+
+static void smsc9420_stop_rx(struct smsc9420_pdata *pd)
+{
+ int timeout = 1000;
+ u32 mac_cr, dmac_control, dma_intr_ena;
+
+ /* mask RX DMAC interrupts */
+ dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
+ dma_intr_ena &= (~DMAC_INTR_ENA_RX_);
+ smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena);
+ smsc9420_pci_flush_write(pd);
+
+ /* stop RX MAC prior to stoping DMA */
+ mac_cr = smsc9420_reg_read(pd, MAC_CR) & (~MAC_CR_RXEN_);
+ smsc9420_reg_write(pd, MAC_CR, mac_cr);
+ smsc9420_pci_flush_write(pd);
+
+ /* stop RX DMAC */
+ dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL);
+ dmac_control &= (~DMAC_CONTROL_SR_);
+ smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control);
+ smsc9420_pci_flush_write(pd);
+
+ /* wait up to 10ms for receive to stop */
+ while (--timeout) {
+ if (smsc9420_reg_read(pd, DMAC_STATUS) & DMAC_STS_RS_)
+ break;
+ udelay(10);
+ }
+
+ if (!timeout)
+ smsc_warn(IFDOWN, "RX DMAC did not stop! timeout.");
+
+ /* ACK the Rx DMAC stop bit */
+ smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_RXPS_);
+}
+
+static irqreturn_t smsc9420_isr(int irq, void *dev_id)
+{
+ struct smsc9420_pdata *pd = dev_id;
+ u32 int_cfg, int_sts, int_ctl;
+ irqreturn_t ret = IRQ_NONE;
+ ulong flags;
+
+ BUG_ON(!pd);
+ BUG_ON(!pd->base_addr);
+
+ int_cfg = smsc9420_reg_read(pd, INT_CFG);
+
+ /* check if it's our interrupt */
+ if ((int_cfg & (INT_CFG_IRQ_EN_ | INT_CFG_IRQ_INT_)) !=
+ (INT_CFG_IRQ_EN_ | INT_CFG_IRQ_INT_))
+ return IRQ_NONE;
+
+ int_sts = smsc9420_reg_read(pd, INT_STAT);
+
+ if (likely(INT_STAT_DMAC_INT_ & int_sts)) {
+ u32 status = smsc9420_reg_read(pd, DMAC_STATUS);
+ u32 ints_to_clear = 0;
+
+ if (status & DMAC_STS_TX_) {
+ ints_to_clear |= (DMAC_STS_TX_ | DMAC_STS_NIS_);
+ netif_wake_queue(pd->dev);
+ }
+
+ if (status & DMAC_STS_RX_) {
+ /* mask RX DMAC interrupts */
+ u32 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
+ dma_intr_ena &= (~DMAC_INTR_ENA_RX_);
+ smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena);
+ smsc9420_pci_flush_write(pd);
+
+ ints_to_clear |= (DMAC_STS_RX_ | DMAC_STS_NIS_);
+ napi_schedule(&pd->napi);
+ }
+
+ if (ints_to_clear)
+ smsc9420_reg_write(pd, DMAC_STATUS, ints_to_clear);
+
+ ret = IRQ_HANDLED;
+ }
+
+ if (unlikely(INT_STAT_SW_INT_ & int_sts)) {
+ /* mask software interrupt */
+ spin_lock_irqsave(&pd->int_lock, flags);
+ int_ctl = smsc9420_reg_read(pd, INT_CTL);
+ int_ctl &= (~INT_CTL_SW_INT_EN_);
+ smsc9420_reg_write(pd, INT_CTL, int_ctl);
+ spin_unlock_irqrestore(&pd->int_lock, flags);
+
+ smsc9420_reg_write(pd, INT_STAT, INT_STAT_SW_INT_);
+ pd->software_irq_signal = true;
+ smp_wmb();
+
+ ret = IRQ_HANDLED;
+ }
+
+ /* to ensure PCI write completion, we must perform a PCI read */
+ smsc9420_pci_flush_write(pd);
+
+ return ret;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void smsc9420_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ smsc9420_isr(0, dev);
+ enable_irq(dev->irq);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+static void smsc9420_dmac_soft_reset(struct smsc9420_pdata *pd)
+{
+ smsc9420_reg_write(pd, BUS_MODE, BUS_MODE_SWR_);
+ smsc9420_reg_read(pd, BUS_MODE);
+ udelay(2);
+ if (smsc9420_reg_read(pd, BUS_MODE) & BUS_MODE_SWR_)
+ smsc_warn(DRV, "Software reset not cleared");
+}
+
+static int smsc9420_stop(struct net_device *dev)
+{
+ struct smsc9420_pdata *pd = netdev_priv(dev);
+ u32 int_cfg;
+ ulong flags;
+
+ BUG_ON(!pd);
+ BUG_ON(!pd->phy_dev);
+
+ /* disable master interrupt */
+ spin_lock_irqsave(&pd->int_lock, flags);
+ int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_);
+ smsc9420_reg_write(pd, INT_CFG, int_cfg);
+ spin_unlock_irqrestore(&pd->int_lock, flags);
+
+ netif_tx_disable(dev);
+ napi_disable(&pd->napi);
+
+ smsc9420_stop_tx(pd);
+ smsc9420_free_tx_ring(pd);
+
+ smsc9420_stop_rx(pd);
+ smsc9420_free_rx_ring(pd);
+
+ free_irq(dev->irq, pd);
+
+ smsc9420_dmac_soft_reset(pd);
+
+ phy_stop(pd->phy_dev);
+
+ phy_disconnect(pd->phy_dev);
+ pd->phy_dev = NULL;
+ mdiobus_unregister(pd->mii_bus);
+ mdiobus_free(pd->mii_bus);
+
+ return 0;
+}
+
+static void smsc9420_rx_count_stats(struct net_device *dev, u32 desc_status)
+{
+ if (unlikely(desc_status & RDES0_ERROR_SUMMARY_)) {
+ dev->stats.rx_errors++;
+ if (desc_status & RDES0_DESCRIPTOR_ERROR_)
+ dev->stats.rx_over_errors++;
+ else if (desc_status & (RDES0_FRAME_TOO_LONG_ |
+ RDES0_RUNT_FRAME_ | RDES0_COLLISION_SEEN_))
+ dev->stats.rx_frame_errors++;
+ else if (desc_status & RDES0_CRC_ERROR_)
+ dev->stats.rx_crc_errors++;
+ }
+
+ if (unlikely(desc_status & RDES0_LENGTH_ERROR_))
+ dev->stats.rx_length_errors++;
+
+ if (unlikely(!((desc_status & RDES0_LAST_DESCRIPTOR_) &&
+ (desc_status & RDES0_FIRST_DESCRIPTOR_))))
+ dev->stats.rx_length_errors++;
+
+ if (desc_status & RDES0_MULTICAST_FRAME_)
+ dev->stats.multicast++;
+}
+
+static void smsc9420_rx_handoff(struct smsc9420_pdata *pd, const int index,
+ const u32 status)
+{
+ struct net_device *dev = pd->dev;
+ struct sk_buff *skb;
+ u16 packet_length = (status & RDES0_FRAME_LENGTH_MASK_)
+ >> RDES0_FRAME_LENGTH_SHFT_;
+
+ /* remove crc from packet lendth */
+ packet_length -= 4;
+
+ if (pd->rx_csum)
+ packet_length -= 2;
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += packet_length;
+
+ pci_unmap_single(pd->pdev, pd->rx_buffers[index].mapping,
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ pd->rx_buffers[index].mapping = 0;
+
+ skb = pd->rx_buffers[index].skb;
+ pd->rx_buffers[index].skb = NULL;
+
+ if (pd->rx_csum) {
+ u16 hw_csum = get_unaligned_le16(skb_tail_pointer(skb) +
+ NET_IP_ALIGN + packet_length + 4);
+ put_unaligned_le16(hw_csum, &skb->csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb_put(skb, packet_length);
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ netif_receive_skb(skb);
+}
+
+static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index)
+{
+ struct sk_buff *skb = netdev_alloc_skb(pd->dev, PKT_BUF_SZ);
+ dma_addr_t mapping;
+
+ BUG_ON(pd->rx_buffers[index].skb);
+ BUG_ON(pd->rx_buffers[index].mapping);
+
+ if (unlikely(!skb)) {
+ smsc_warn(RX_ERR, "Failed to allocate new skb!");
+ return -ENOMEM;
+ }
+
+ skb->dev = pd->dev;
+
+ mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb),
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(pd->pdev, mapping)) {
+ dev_kfree_skb_any(skb);
+ smsc_warn(RX_ERR, "pci_map_single failed!");
+ return -ENOMEM;
+ }
+
+ pd->rx_buffers[index].skb = skb;
+ pd->rx_buffers[index].mapping = mapping;
+ pd->rx_ring[index].buffer1 = mapping + NET_IP_ALIGN;
+ pd->rx_ring[index].status = RDES0_OWN_;
+ wmb();
+
+ return 0;
+}
+
+static void smsc9420_alloc_new_rx_buffers(struct smsc9420_pdata *pd)
+{
+ while (pd->rx_ring_tail != pd->rx_ring_head) {
+ if (smsc9420_alloc_rx_buffer(pd, pd->rx_ring_tail))
+ break;
+
+ pd->rx_ring_tail = (pd->rx_ring_tail + 1) % RX_RING_SIZE;
+ }
+}
+
+static int smsc9420_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct smsc9420_pdata *pd =
+ container_of(napi, struct smsc9420_pdata, napi);
+ struct net_device *dev = pd->dev;
+ u32 drop_frame_cnt, dma_intr_ena, status;
+ int work_done;
+
+ for (work_done = 0; work_done < budget; work_done++) {
+ rmb();
+ status = pd->rx_ring[pd->rx_ring_head].status;
+
+ /* stop if DMAC owns this dma descriptor */
+ if (status & RDES0_OWN_)
+ break;
+
+ smsc9420_rx_count_stats(dev, status);
+ smsc9420_rx_handoff(pd, pd->rx_ring_head, status);
+ pd->rx_ring_head = (pd->rx_ring_head + 1) % RX_RING_SIZE;
+ smsc9420_alloc_new_rx_buffers(pd);
+ }
+
+ drop_frame_cnt = smsc9420_reg_read(pd, MISS_FRAME_CNTR);
+ dev->stats.rx_dropped +=
+ (drop_frame_cnt & 0xFFFF) + ((drop_frame_cnt >> 17) & 0x3FF);
+
+ /* Kick RXDMA */
+ smsc9420_reg_write(pd, RX_POLL_DEMAND, 1);
+ smsc9420_pci_flush_write(pd);
+
+ if (work_done < budget) {
+ napi_complete(&pd->napi);
+
+ /* re-enable RX DMA interrupts */
+ dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
+ dma_intr_ena |= (DMAC_INTR_ENA_RX_ | DMAC_INTR_ENA_NIS_);
+ smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena);
+ smsc9420_pci_flush_write(pd);
+ }
+ return work_done;
+}
+
+static void
+smsc9420_tx_update_stats(struct net_device *dev, u32 status, u32 length)
+{
+ if (unlikely(status & TDES0_ERROR_SUMMARY_)) {
+ dev->stats.tx_errors++;
+ if (status & (TDES0_EXCESSIVE_DEFERRAL_ |
+ TDES0_EXCESSIVE_COLLISIONS_))
+ dev->stats.tx_aborted_errors++;
+
+ if (status & (TDES0_LOSS_OF_CARRIER_ | TDES0_NO_CARRIER_))
+ dev->stats.tx_carrier_errors++;
+ } else {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += (length & 0x7FF);
+ }
+
+ if (unlikely(status & TDES0_EXCESSIVE_COLLISIONS_)) {
+ dev->stats.collisions += 16;
+ } else {
+ dev->stats.collisions +=
+ (status & TDES0_COLLISION_COUNT_MASK_) >>
+ TDES0_COLLISION_COUNT_SHFT_;
+ }
+
+ if (unlikely(status & TDES0_HEARTBEAT_FAIL_))
+ dev->stats.tx_heartbeat_errors++;
+}
+
+/* Check for completed dma transfers, update stats and free skbs */
+static void smsc9420_complete_tx(struct net_device *dev)
+{
+ struct smsc9420_pdata *pd = netdev_priv(dev);
+
+ while (pd->tx_ring_tail != pd->tx_ring_head) {
+ int index = pd->tx_ring_tail;
+ u32 status, length;
+
+ rmb();
+ status = pd->tx_ring[index].status;
+ length = pd->tx_ring[index].length;
+
+ /* Check if DMA still owns this descriptor */
+ if (unlikely(TDES0_OWN_ & status))
+ break;
+
+ smsc9420_tx_update_stats(dev, status, length);
+
+ BUG_ON(!pd->tx_buffers[index].skb);
+ BUG_ON(!pd->tx_buffers[index].mapping);
+
+ pci_unmap_single(pd->pdev, pd->tx_buffers[index].mapping,
+ pd->tx_buffers[index].skb->len, PCI_DMA_TODEVICE);
+ pd->tx_buffers[index].mapping = 0;
+
+ dev_kfree_skb_any(pd->tx_buffers[index].skb);
+ pd->tx_buffers[index].skb = NULL;
+
+ pd->tx_ring[index].buffer1 = 0;
+ wmb();
+
+ pd->tx_ring_tail = (pd->tx_ring_tail + 1) % TX_RING_SIZE;
+ }
+}
+
+static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct smsc9420_pdata *pd = netdev_priv(dev);
+ dma_addr_t mapping;
+ int index = pd->tx_ring_head;
+ u32 tmp_desc1;
+ bool about_to_take_last_desc =
+ (((pd->tx_ring_head + 2) % TX_RING_SIZE) == pd->tx_ring_tail);
+
+ smsc9420_complete_tx(dev);
+
+ rmb();
+ BUG_ON(pd->tx_ring[index].status & TDES0_OWN_);
+ BUG_ON(pd->tx_buffers[index].skb);
+ BUG_ON(pd->tx_buffers[index].mapping);
+
+ mapping = pci_map_single(pd->pdev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pd->pdev, mapping)) {
+ smsc_warn(TX_ERR, "pci_map_single failed, dropping packet");
+ return NETDEV_TX_BUSY;
+ }
+
+ pd->tx_buffers[index].skb = skb;
+ pd->tx_buffers[index].mapping = mapping;
+
+ tmp_desc1 = (TDES1_LS_ | ((u32)skb->len & 0x7FF));
+ if (unlikely(about_to_take_last_desc)) {
+ tmp_desc1 |= TDES1_IC_;
+ netif_stop_queue(pd->dev);
+ }
+
+ /* check if we are at the last descriptor and need to set EOR */
+ if (unlikely(index == (TX_RING_SIZE - 1)))
+ tmp_desc1 |= TDES1_TER_;
+
+ pd->tx_ring[index].buffer1 = mapping;
+ pd->tx_ring[index].length = tmp_desc1;
+ wmb();
+
+ /* increment head */
+ pd->tx_ring_head = (pd->tx_ring_head + 1) % TX_RING_SIZE;
+
+ /* assign ownership to DMAC */
+ pd->tx_ring[index].status = TDES0_OWN_;
+ wmb();
+
+ skb_tx_timestamp(skb);
+
+ /* kick the DMA */
+ smsc9420_reg_write(pd, TX_POLL_DEMAND, 1);
+ smsc9420_pci_flush_write(pd);
+
+ return NETDEV_TX_OK;
+}
+
+static struct net_device_stats *smsc9420_get_stats(struct net_device *dev)
+{
+ struct smsc9420_pdata *pd = netdev_priv(dev);
+ u32 counter = smsc9420_reg_read(pd, MISS_FRAME_CNTR);
+ dev->stats.rx_dropped +=
+ (counter & 0x0000FFFF) + ((counter >> 17) & 0x000003FF);
+ return &dev->stats;
+}
+
+static void smsc9420_set_multicast_list(struct net_device *dev)
+{
+ struct smsc9420_pdata *pd = netdev_priv(dev);
+ u32 mac_cr = smsc9420_reg_read(pd, MAC_CR);
+
+ if (dev->flags & IFF_PROMISC) {
+ smsc_dbg(HW, "Promiscuous Mode Enabled");
+ mac_cr |= MAC_CR_PRMS_;
+ mac_cr &= (~MAC_CR_MCPAS_);
+ mac_cr &= (~MAC_CR_HPFILT_);
+ } else if (dev->flags & IFF_ALLMULTI) {
+ smsc_dbg(HW, "Receive all Multicast Enabled");
+ mac_cr &= (~MAC_CR_PRMS_);
+ mac_cr |= MAC_CR_MCPAS_;
+ mac_cr &= (~MAC_CR_HPFILT_);
+ } else if (!netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+ u32 hash_lo = 0, hash_hi = 0;
+
+ smsc_dbg(HW, "Multicast filter enabled");
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 bit_num = smsc9420_hash(ha->addr);
+ u32 mask = 1 << (bit_num & 0x1F);
+
+ if (bit_num & 0x20)
+ hash_hi |= mask;
+ else
+ hash_lo |= mask;
+
+ }
+ smsc9420_reg_write(pd, HASHH, hash_hi);
+ smsc9420_reg_write(pd, HASHL, hash_lo);
+
+ mac_cr &= (~MAC_CR_PRMS_);
+ mac_cr &= (~MAC_CR_MCPAS_);
+ mac_cr |= MAC_CR_HPFILT_;
+ } else {
+ smsc_dbg(HW, "Receive own packets only.");
+ smsc9420_reg_write(pd, HASHH, 0);
+ smsc9420_reg_write(pd, HASHL, 0);
+
+ mac_cr &= (~MAC_CR_PRMS_);
+ mac_cr &= (~MAC_CR_MCPAS_);
+ mac_cr &= (~MAC_CR_HPFILT_);
+ }
+
+ smsc9420_reg_write(pd, MAC_CR, mac_cr);
+ smsc9420_pci_flush_write(pd);
+}
+
+static void smsc9420_phy_update_flowcontrol(struct smsc9420_pdata *pd)
+{
+ struct phy_device *phy_dev = pd->phy_dev;
+ u32 flow;
+
+ if (phy_dev->duplex == DUPLEX_FULL) {
+ u16 lcladv = phy_read(phy_dev, MII_ADVERTISE);
+ u16 rmtadv = phy_read(phy_dev, MII_LPA);
+ u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+
+ if (cap & FLOW_CTRL_RX)
+ flow = 0xFFFF0002;
+ else
+ flow = 0;
+
+ smsc_info(LINK, "rx pause %s, tx pause %s",
+ (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
+ (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
+ } else {
+ smsc_info(LINK, "half duplex");
+ flow = 0;
+ }
+
+ smsc9420_reg_write(pd, FLOW, flow);
+}
+
+/* Update link mode if anything has changed. Called periodically when the
+ * PHY is in polling mode, even if nothing has changed. */
+static void smsc9420_phy_adjust_link(struct net_device *dev)
+{
+ struct smsc9420_pdata *pd = netdev_priv(dev);
+ struct phy_device *phy_dev = pd->phy_dev;
+ int carrier;
+
+ if (phy_dev->duplex != pd->last_duplex) {
+ u32 mac_cr = smsc9420_reg_read(pd, MAC_CR);
+ if (phy_dev->duplex) {
+ smsc_dbg(LINK, "full duplex mode");
+ mac_cr |= MAC_CR_FDPX_;
+ } else {
+ smsc_dbg(LINK, "half duplex mode");
+ mac_cr &= ~MAC_CR_FDPX_;
+ }
+ smsc9420_reg_write(pd, MAC_CR, mac_cr);
+
+ smsc9420_phy_update_flowcontrol(pd);
+ pd->last_duplex = phy_dev->duplex;
+ }
+
+ carrier = netif_carrier_ok(dev);
+ if (carrier != pd->last_carrier) {
+ if (carrier)
+ smsc_dbg(LINK, "carrier OK");
+ else
+ smsc_dbg(LINK, "no carrier");
+ pd->last_carrier = carrier;
+ }
+}
+
+static int smsc9420_mii_probe(struct net_device *dev)
+{
+ struct smsc9420_pdata *pd = netdev_priv(dev);
+ struct phy_device *phydev = NULL;
+
+ BUG_ON(pd->phy_dev);
+
+ /* Device only supports internal PHY at address 1 */
+ if (!pd->mii_bus->phy_map[1]) {
+ pr_err("%s: no PHY found at address 1\n", dev->name);
+ return -ENODEV;
+ }
+
+ phydev = pd->mii_bus->phy_map[1];
+ smsc_info(PROBE, "PHY addr %d, phy_id 0x%08X", phydev->addr,
+ phydev->phy_id);
+
+ phydev = phy_connect(dev, dev_name(&phydev->dev),
+ smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII);
+
+ if (IS_ERR(phydev)) {
+ pr_err("%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(phydev);
+ }
+
+ pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ dev->name, phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
+
+ /* mask with MAC supported features */
+ phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ phydev->advertising = phydev->supported;
+
+ pd->phy_dev = phydev;
+ pd->last_duplex = -1;
+ pd->last_carrier = -1;
+
+ return 0;
+}
+
+static int smsc9420_mii_init(struct net_device *dev)
+{
+ struct smsc9420_pdata *pd = netdev_priv(dev);
+ int err = -ENXIO, i;
+
+ pd->mii_bus = mdiobus_alloc();
+ if (!pd->mii_bus) {
+ err = -ENOMEM;
+ goto err_out_1;
+ }
+ pd->mii_bus->name = DRV_MDIONAME;
+ snprintf(pd->mii_bus->id, MII_BUS_ID_SIZE, "%x",
+ (pd->pdev->bus->number << 8) | pd->pdev->devfn);
+ pd->mii_bus->priv = pd;
+ pd->mii_bus->read = smsc9420_mii_read;
+ pd->mii_bus->write = smsc9420_mii_write;
+ pd->mii_bus->irq = pd->phy_irq;
+ for (i = 0; i < PHY_MAX_ADDR; ++i)
+ pd->mii_bus->irq[i] = PHY_POLL;
+
+ /* Mask all PHYs except ID 1 (internal) */
+ pd->mii_bus->phy_mask = ~(1 << 1);
+
+ if (mdiobus_register(pd->mii_bus)) {
+ smsc_warn(PROBE, "Error registering mii bus");
+ goto err_out_free_bus_2;
+ }
+
+ if (smsc9420_mii_probe(dev) < 0) {
+ smsc_warn(PROBE, "Error probing mii bus");
+ goto err_out_unregister_bus_3;
+ }
+
+ return 0;
+
+err_out_unregister_bus_3:
+ mdiobus_unregister(pd->mii_bus);
+err_out_free_bus_2:
+ mdiobus_free(pd->mii_bus);
+err_out_1:
+ return err;
+}
+
+static int smsc9420_alloc_tx_ring(struct smsc9420_pdata *pd)
+{
+ int i;
+
+ BUG_ON(!pd->tx_ring);
+
+ pd->tx_buffers = kmalloc((sizeof(struct smsc9420_ring_info) *
+ TX_RING_SIZE), GFP_KERNEL);
+ if (!pd->tx_buffers) {
+ smsc_warn(IFUP, "Failed to allocated tx_buffers");
+ return -ENOMEM;
+ }
+
+ /* Initialize the TX Ring */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ pd->tx_buffers[i].skb = NULL;
+ pd->tx_buffers[i].mapping = 0;
+ pd->tx_ring[i].status = 0;
+ pd->tx_ring[i].length = 0;
+ pd->tx_ring[i].buffer1 = 0;
+ pd->tx_ring[i].buffer2 = 0;
+ }
+ pd->tx_ring[TX_RING_SIZE - 1].length = TDES1_TER_;
+ wmb();
+
+ pd->tx_ring_head = 0;
+ pd->tx_ring_tail = 0;
+
+ smsc9420_reg_write(pd, TX_BASE_ADDR, pd->tx_dma_addr);
+ smsc9420_pci_flush_write(pd);
+
+ return 0;
+}
+
+static int smsc9420_alloc_rx_ring(struct smsc9420_pdata *pd)
+{
+ int i;
+
+ BUG_ON(!pd->rx_ring);
+
+ pd->rx_buffers = kmalloc((sizeof(struct smsc9420_ring_info) *
+ RX_RING_SIZE), GFP_KERNEL);
+ if (pd->rx_buffers == NULL) {
+ smsc_warn(IFUP, "Failed to allocated rx_buffers");
+ goto out;
+ }
+
+ /* initialize the rx ring */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ pd->rx_ring[i].status = 0;
+ pd->rx_ring[i].length = PKT_BUF_SZ;
+ pd->rx_ring[i].buffer2 = 0;
+ pd->rx_buffers[i].skb = NULL;
+ pd->rx_buffers[i].mapping = 0;
+ }
+ pd->rx_ring[RX_RING_SIZE - 1].length = (PKT_BUF_SZ | RDES1_RER_);
+
+ /* now allocate the entire ring of skbs */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (smsc9420_alloc_rx_buffer(pd, i)) {
+ smsc_warn(IFUP, "failed to allocate rx skb %d", i);
+ goto out_free_rx_skbs;
+ }
+ }
+
+ pd->rx_ring_head = 0;
+ pd->rx_ring_tail = 0;
+
+ smsc9420_reg_write(pd, VLAN1, ETH_P_8021Q);
+ smsc_dbg(IFUP, "VLAN1 = 0x%08x", smsc9420_reg_read(pd, VLAN1));
+
+ if (pd->rx_csum) {
+ /* Enable RX COE */
+ u32 coe = smsc9420_reg_read(pd, COE_CR) | RX_COE_EN;
+ smsc9420_reg_write(pd, COE_CR, coe);
+ smsc_dbg(IFUP, "COE_CR = 0x%08x", coe);
+ }
+
+ smsc9420_reg_write(pd, RX_BASE_ADDR, pd->rx_dma_addr);
+ smsc9420_pci_flush_write(pd);
+
+ return 0;
+
+out_free_rx_skbs:
+ smsc9420_free_rx_ring(pd);
+out:
+ return -ENOMEM;
+}
+
+static int smsc9420_open(struct net_device *dev)
+{
+ struct smsc9420_pdata *pd;
+ u32 bus_mode, mac_cr, dmac_control, int_cfg, dma_intr_ena, int_ctl;
+ unsigned long flags;
+ int result = 0, timeout;
+
+ BUG_ON(!dev);
+ pd = netdev_priv(dev);
+ BUG_ON(!pd);
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ smsc_warn(IFUP, "dev_addr is not a valid MAC address");
+ result = -EADDRNOTAVAIL;
+ goto out_0;
+ }
+
+ netif_carrier_off(dev);
+
+ /* disable, mask and acknowledge all interrupts */
+ spin_lock_irqsave(&pd->int_lock, flags);
+ int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_);
+ smsc9420_reg_write(pd, INT_CFG, int_cfg);
+ smsc9420_reg_write(pd, INT_CTL, 0);
+ spin_unlock_irqrestore(&pd->int_lock, flags);
+ smsc9420_reg_write(pd, DMAC_INTR_ENA, 0);
+ smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF);
+ smsc9420_pci_flush_write(pd);
+
+ if (request_irq(dev->irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED,
+ DRV_NAME, pd)) {
+ smsc_warn(IFUP, "Unable to use IRQ = %d", dev->irq);
+ result = -ENODEV;
+ goto out_0;
+ }
+
+ smsc9420_dmac_soft_reset(pd);
+
+ /* make sure MAC_CR is sane */
+ smsc9420_reg_write(pd, MAC_CR, 0);
+
+ smsc9420_set_mac_address(dev);
+
+ /* Configure GPIO pins to drive LEDs */
+ smsc9420_reg_write(pd, GPIO_CFG,
+ (GPIO_CFG_LED_3_ | GPIO_CFG_LED_2_ | GPIO_CFG_LED_1_));
+
+ bus_mode = BUS_MODE_DMA_BURST_LENGTH_16;
+
+#ifdef __BIG_ENDIAN
+ bus_mode |= BUS_MODE_DBO_;
+#endif
+
+ smsc9420_reg_write(pd, BUS_MODE, bus_mode);
+
+ smsc9420_pci_flush_write(pd);
+
+ /* set bus master bridge arbitration priority for Rx and TX DMA */
+ smsc9420_reg_write(pd, BUS_CFG, BUS_CFG_RXTXWEIGHT_4_1);
+
+ smsc9420_reg_write(pd, DMAC_CONTROL,
+ (DMAC_CONTROL_SF_ | DMAC_CONTROL_OSF_));
+
+ smsc9420_pci_flush_write(pd);
+
+ /* test the IRQ connection to the ISR */
+ smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq);
+ pd->software_irq_signal = false;
+
+ spin_lock_irqsave(&pd->int_lock, flags);
+ /* configure interrupt deassertion timer and enable interrupts */
+ int_cfg = smsc9420_reg_read(pd, INT_CFG) | INT_CFG_IRQ_EN_;
+ int_cfg &= ~(INT_CFG_INT_DEAS_MASK);
+ int_cfg |= (INT_DEAS_TIME & INT_CFG_INT_DEAS_MASK);
+ smsc9420_reg_write(pd, INT_CFG, int_cfg);
+
+ /* unmask software interrupt */
+ int_ctl = smsc9420_reg_read(pd, INT_CTL) | INT_CTL_SW_INT_EN_;
+ smsc9420_reg_write(pd, INT_CTL, int_ctl);
+ spin_unlock_irqrestore(&pd->int_lock, flags);
+ smsc9420_pci_flush_write(pd);
+
+ timeout = 1000;
+ while (timeout--) {
+ if (pd->software_irq_signal)
+ break;
+ msleep(1);
+ }
+
+ /* disable interrupts */
+ spin_lock_irqsave(&pd->int_lock, flags);
+ int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_);
+ smsc9420_reg_write(pd, INT_CFG, int_cfg);
+ spin_unlock_irqrestore(&pd->int_lock, flags);
+
+ if (!pd->software_irq_signal) {
+ smsc_warn(IFUP, "ISR failed signaling test");
+ result = -ENODEV;
+ goto out_free_irq_1;
+ }
+
+ smsc_dbg(IFUP, "ISR passed test using IRQ %d", dev->irq);
+
+ result = smsc9420_alloc_tx_ring(pd);
+ if (result) {
+ smsc_warn(IFUP, "Failed to Initialize tx dma ring");
+ result = -ENOMEM;
+ goto out_free_irq_1;
+ }
+
+ result = smsc9420_alloc_rx_ring(pd);
+ if (result) {
+ smsc_warn(IFUP, "Failed to Initialize rx dma ring");
+ result = -ENOMEM;
+ goto out_free_tx_ring_2;
+ }
+
+ result = smsc9420_mii_init(dev);
+ if (result) {
+ smsc_warn(IFUP, "Failed to initialize Phy");
+ result = -ENODEV;
+ goto out_free_rx_ring_3;
+ }
+
+ /* Bring the PHY up */
+ phy_start(pd->phy_dev);
+
+ napi_enable(&pd->napi);
+
+ /* start tx and rx */
+ mac_cr = smsc9420_reg_read(pd, MAC_CR) | MAC_CR_TXEN_ | MAC_CR_RXEN_;
+ smsc9420_reg_write(pd, MAC_CR, mac_cr);
+
+ dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL);
+ dmac_control |= DMAC_CONTROL_ST_ | DMAC_CONTROL_SR_;
+ smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control);
+ smsc9420_pci_flush_write(pd);
+
+ dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
+ dma_intr_ena |=
+ (DMAC_INTR_ENA_TX_ | DMAC_INTR_ENA_RX_ | DMAC_INTR_ENA_NIS_);
+ smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena);
+ smsc9420_pci_flush_write(pd);
+
+ netif_wake_queue(dev);
+
+ smsc9420_reg_write(pd, RX_POLL_DEMAND, 1);
+
+ /* enable interrupts */
+ spin_lock_irqsave(&pd->int_lock, flags);
+ int_cfg = smsc9420_reg_read(pd, INT_CFG) | INT_CFG_IRQ_EN_;
+ smsc9420_reg_write(pd, INT_CFG, int_cfg);
+ spin_unlock_irqrestore(&pd->int_lock, flags);
+
+ return 0;
+
+out_free_rx_ring_3:
+ smsc9420_free_rx_ring(pd);
+out_free_tx_ring_2:
+ smsc9420_free_tx_ring(pd);
+out_free_irq_1:
+ free_irq(dev->irq, pd);
+out_0:
+ return result;
+}
+
+#ifdef CONFIG_PM
+
+static int smsc9420_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct smsc9420_pdata *pd = netdev_priv(dev);
+ u32 int_cfg;
+ ulong flags;
+
+ /* disable interrupts */
+ spin_lock_irqsave(&pd->int_lock, flags);
+ int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_);
+ smsc9420_reg_write(pd, INT_CFG, int_cfg);
+ spin_unlock_irqrestore(&pd->int_lock, flags);
+
+ if (netif_running(dev)) {
+ netif_tx_disable(dev);
+ smsc9420_stop_tx(pd);
+ smsc9420_free_tx_ring(pd);
+
+ napi_disable(&pd->napi);
+ smsc9420_stop_rx(pd);
+ smsc9420_free_rx_ring(pd);
+
+ free_irq(dev->irq, pd);
+
+ netif_device_detach(dev);
+ }
+
+ pci_save_state(pdev);
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int smsc9420_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct smsc9420_pdata *pd = netdev_priv(dev);
+ int err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_master(pdev);
+
+ err = pci_enable_wake(pdev, 0, 0);
+ if (err)
+ smsc_warn(IFUP, "pci_enable_wake failed: %d", err);
+
+ if (netif_running(dev)) {
+ err = smsc9420_open(dev);
+ netif_device_attach(dev);
+ }
+ return err;
+}
+
+#endif /* CONFIG_PM */
+
+static const struct net_device_ops smsc9420_netdev_ops = {
+ .ndo_open = smsc9420_open,
+ .ndo_stop = smsc9420_stop,
+ .ndo_start_xmit = smsc9420_hard_start_xmit,
+ .ndo_get_stats = smsc9420_get_stats,
+ .ndo_set_rx_mode = smsc9420_set_multicast_list,
+ .ndo_do_ioctl = smsc9420_do_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = smsc9420_poll_controller,
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+};
+
+static int __devinit
+smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct net_device *dev;
+ struct smsc9420_pdata *pd;
+ void __iomem *virt_addr;
+ int result = 0;
+ u32 id_rev;
+
+ printk(KERN_INFO DRV_DESCRIPTION " version " DRV_VERSION "\n");
+
+ /* First do the PCI initialisation */
+ result = pci_enable_device(pdev);
+ if (unlikely(result)) {
+ printk(KERN_ERR "Cannot enable smsc9420\n");
+ goto out_0;
+ }
+
+ pci_set_master(pdev);
+
+ dev = alloc_etherdev(sizeof(*pd));
+ if (!dev) {
+ printk(KERN_ERR "ether device alloc failed\n");
+ goto out_disable_pci_device_1;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ if (!(pci_resource_flags(pdev, SMSC_BAR) & IORESOURCE_MEM)) {
+ printk(KERN_ERR "Cannot find PCI device base address\n");
+ goto out_free_netdev_2;
+ }
+
+ if ((pci_request_regions(pdev, DRV_NAME))) {
+ printk(KERN_ERR "Cannot obtain PCI resources, aborting.\n");
+ goto out_free_netdev_2;
+ }
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ printk(KERN_ERR "No usable DMA configuration, aborting.\n");
+ goto out_free_regions_3;
+ }
+
+ virt_addr = ioremap(pci_resource_start(pdev, SMSC_BAR),
+ pci_resource_len(pdev, SMSC_BAR));
+ if (!virt_addr) {
+ printk(KERN_ERR "Cannot map device registers, aborting.\n");
+ goto out_free_regions_3;
+ }
+
+ /* registers are double mapped with 0 offset for LE and 0x200 for BE */
+ virt_addr += LAN9420_CPSR_ENDIAN_OFFSET;
+
+ dev->base_addr = (ulong)virt_addr;
+
+ pd = netdev_priv(dev);
+
+ /* pci descriptors are created in the PCI consistent area */
+ pd->rx_ring = pci_alloc_consistent(pdev,
+ sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE +
+ sizeof(struct smsc9420_dma_desc) * TX_RING_SIZE,
+ &pd->rx_dma_addr);
+
+ if (!pd->rx_ring)
+ goto out_free_io_4;
+
+ /* descriptors are aligned due to the nature of pci_alloc_consistent */
+ pd->tx_ring = (struct smsc9420_dma_desc *)
+ (pd->rx_ring + RX_RING_SIZE);
+ pd->tx_dma_addr = pd->rx_dma_addr +
+ sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE;
+
+ pd->pdev = pdev;
+ pd->dev = dev;
+ pd->base_addr = virt_addr;
+ pd->msg_enable = smsc_debug;
+ pd->rx_csum = true;
+
+ smsc_dbg(PROBE, "lan_base=0x%08lx", (ulong)virt_addr);
+
+ id_rev = smsc9420_reg_read(pd, ID_REV);
+ switch (id_rev & 0xFFFF0000) {
+ case 0x94200000:
+ smsc_info(PROBE, "LAN9420 identified, ID_REV=0x%08X", id_rev);
+ break;
+ default:
+ smsc_warn(PROBE, "LAN9420 NOT identified");
+ smsc_warn(PROBE, "ID_REV=0x%08X", id_rev);
+ goto out_free_dmadesc_5;
+ }
+
+ smsc9420_dmac_soft_reset(pd);
+ smsc9420_eeprom_reload(pd);
+ smsc9420_check_mac_address(dev);
+
+ dev->netdev_ops = &smsc9420_netdev_ops;
+ dev->ethtool_ops = &smsc9420_ethtool_ops;
+ dev->irq = pdev->irq;
+
+ netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT);
+
+ result = register_netdev(dev);
+ if (result) {
+ smsc_warn(PROBE, "error %i registering device", result);
+ goto out_free_dmadesc_5;
+ }
+
+ pci_set_drvdata(pdev, dev);
+
+ spin_lock_init(&pd->int_lock);
+ spin_lock_init(&pd->phy_lock);
+
+ dev_info(&dev->dev, "MAC Address: %pM\n", dev->dev_addr);
+
+ return 0;
+
+out_free_dmadesc_5:
+ pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) *
+ (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr);
+out_free_io_4:
+ iounmap(virt_addr - LAN9420_CPSR_ENDIAN_OFFSET);
+out_free_regions_3:
+ pci_release_regions(pdev);
+out_free_netdev_2:
+ free_netdev(dev);
+out_disable_pci_device_1:
+ pci_disable_device(pdev);
+out_0:
+ return -ENODEV;
+}
+
+static void __devexit smsc9420_remove(struct pci_dev *pdev)
+{
+ struct net_device *dev;
+ struct smsc9420_pdata *pd;
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return;
+
+ pci_set_drvdata(pdev, NULL);
+
+ pd = netdev_priv(dev);
+ unregister_netdev(dev);
+
+ /* tx_buffers and rx_buffers are freed in stop */
+ BUG_ON(pd->tx_buffers);
+ BUG_ON(pd->rx_buffers);
+
+ BUG_ON(!pd->tx_ring);
+ BUG_ON(!pd->rx_ring);
+
+ pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) *
+ (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr);
+
+ iounmap(pd->base_addr - LAN9420_CPSR_ENDIAN_OFFSET);
+ pci_release_regions(pdev);
+ free_netdev(dev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver smsc9420_driver = {
+ .name = DRV_NAME,
+ .id_table = smsc9420_id_table,
+ .probe = smsc9420_probe,
+ .remove = __devexit_p(smsc9420_remove),
+#ifdef CONFIG_PM
+ .suspend = smsc9420_suspend,
+ .resume = smsc9420_resume,
+#endif /* CONFIG_PM */
+};
+
+static int __init smsc9420_init_module(void)
+{
+ smsc_debug = netif_msg_init(debug, SMSC_MSG_DEFAULT);
+
+ return pci_register_driver(&smsc9420_driver);
+}
+
+static void __exit smsc9420_exit_module(void)
+{
+ pci_unregister_driver(&smsc9420_driver);
+}
+
+module_init(smsc9420_init_module);
+module_exit(smsc9420_exit_module);
diff --git a/drivers/net/ethernet/smsc/smsc9420.h b/drivers/net/ethernet/smsc/smsc9420.h
new file mode 100644
index 000000000000..e441402f77a2
--- /dev/null
+++ b/drivers/net/ethernet/smsc/smsc9420.h
@@ -0,0 +1,276 @@
+ /***************************************************************************
+ *
+ * Copyright (C) 2007,2008 SMSC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ***************************************************************************
+ */
+
+#ifndef _SMSC9420_H
+#define _SMSC9420_H
+
+#define TX_RING_SIZE (32)
+#define RX_RING_SIZE (128)
+
+/* interrupt deassertion in multiples of 10us */
+#define INT_DEAS_TIME (50)
+
+#define NAPI_WEIGHT (64)
+#define SMSC_BAR (3)
+
+#ifdef __BIG_ENDIAN
+/* Register set is duplicated for BE at an offset of 0x200 */
+#define LAN9420_CPSR_ENDIAN_OFFSET (0x200)
+#else
+#define LAN9420_CPSR_ENDIAN_OFFSET (0)
+#endif
+
+#define PCI_VENDOR_ID_9420 (0x1055)
+#define PCI_DEVICE_ID_9420 (0xE420)
+
+#define LAN_REGISTER_EXTENT (0x400)
+
+#define SMSC9420_EEPROM_SIZE ((u32)11)
+#define SMSC9420_EEPROM_MAGIC (0x9420)
+
+#define PKT_BUF_SZ (VLAN_ETH_FRAME_LEN + NET_IP_ALIGN + 4)
+
+/***********************************************/
+/* DMA Controller Control and Status Registers */
+/***********************************************/
+#define BUS_MODE (0x00)
+#define BUS_MODE_SWR_ (BIT(0))
+#define BUS_MODE_DMA_BURST_LENGTH_1 (BIT(8))
+#define BUS_MODE_DMA_BURST_LENGTH_2 (BIT(9))
+#define BUS_MODE_DMA_BURST_LENGTH_4 (BIT(10))
+#define BUS_MODE_DMA_BURST_LENGTH_8 (BIT(11))
+#define BUS_MODE_DMA_BURST_LENGTH_16 (BIT(12))
+#define BUS_MODE_DMA_BURST_LENGTH_32 (BIT(13))
+#define BUS_MODE_DBO_ (BIT(20))
+
+#define TX_POLL_DEMAND (0x04)
+
+#define RX_POLL_DEMAND (0x08)
+
+#define RX_BASE_ADDR (0x0C)
+
+#define TX_BASE_ADDR (0x10)
+
+#define DMAC_STATUS (0x14)
+#define DMAC_STS_TS_ (7 << 20)
+#define DMAC_STS_RS_ (7 << 17)
+#define DMAC_STS_NIS_ (BIT(16))
+#define DMAC_STS_AIS_ (BIT(15))
+#define DMAC_STS_RWT_ (BIT(9))
+#define DMAC_STS_RXPS_ (BIT(8))
+#define DMAC_STS_RXBU_ (BIT(7))
+#define DMAC_STS_RX_ (BIT(6))
+#define DMAC_STS_TXUNF_ (BIT(5))
+#define DMAC_STS_TXBU_ (BIT(2))
+#define DMAC_STS_TXPS_ (BIT(1))
+#define DMAC_STS_TX_ (BIT(0))
+
+#define DMAC_CONTROL (0x18)
+#define DMAC_CONTROL_TTM_ (BIT(22))
+#define DMAC_CONTROL_SF_ (BIT(21))
+#define DMAC_CONTROL_ST_ (BIT(13))
+#define DMAC_CONTROL_OSF_ (BIT(2))
+#define DMAC_CONTROL_SR_ (BIT(1))
+
+#define DMAC_INTR_ENA (0x1C)
+#define DMAC_INTR_ENA_NIS_ (BIT(16))
+#define DMAC_INTR_ENA_AIS_ (BIT(15))
+#define DMAC_INTR_ENA_RWT_ (BIT(9))
+#define DMAC_INTR_ENA_RXPS_ (BIT(8))
+#define DMAC_INTR_ENA_RXBU_ (BIT(7))
+#define DMAC_INTR_ENA_RX_ (BIT(6))
+#define DMAC_INTR_ENA_TXBU_ (BIT(2))
+#define DMAC_INTR_ENA_TXPS_ (BIT(1))
+#define DMAC_INTR_ENA_TX_ (BIT(0))
+
+#define MISS_FRAME_CNTR (0x20)
+
+#define TX_BUFF_ADDR (0x50)
+
+#define RX_BUFF_ADDR (0x54)
+
+/* Transmit Descriptor Bit Defs */
+#define TDES0_OWN_ (0x80000000)
+#define TDES0_ERROR_SUMMARY_ (0x00008000)
+#define TDES0_LOSS_OF_CARRIER_ (0x00000800)
+#define TDES0_NO_CARRIER_ (0x00000400)
+#define TDES0_LATE_COLLISION_ (0x00000200)
+#define TDES0_EXCESSIVE_COLLISIONS_ (0x00000100)
+#define TDES0_HEARTBEAT_FAIL_ (0x00000080)
+#define TDES0_COLLISION_COUNT_MASK_ (0x00000078)
+#define TDES0_COLLISION_COUNT_SHFT_ (3)
+#define TDES0_EXCESSIVE_DEFERRAL_ (0x00000004)
+#define TDES0_DEFERRED_ (0x00000001)
+
+#define TDES1_IC_ 0x80000000
+#define TDES1_LS_ 0x40000000
+#define TDES1_FS_ 0x20000000
+#define TDES1_TXCSEN_ 0x08000000
+#define TDES1_TER_ (BIT(25))
+#define TDES1_TCH_ 0x01000000
+
+/* Receive Descriptor 0 Bit Defs */
+#define RDES0_OWN_ (0x80000000)
+#define RDES0_FRAME_LENGTH_MASK_ (0x07FF0000)
+#define RDES0_FRAME_LENGTH_SHFT_ (16)
+#define RDES0_ERROR_SUMMARY_ (0x00008000)
+#define RDES0_DESCRIPTOR_ERROR_ (0x00004000)
+#define RDES0_LENGTH_ERROR_ (0x00001000)
+#define RDES0_RUNT_FRAME_ (0x00000800)
+#define RDES0_MULTICAST_FRAME_ (0x00000400)
+#define RDES0_FIRST_DESCRIPTOR_ (0x00000200)
+#define RDES0_LAST_DESCRIPTOR_ (0x00000100)
+#define RDES0_FRAME_TOO_LONG_ (0x00000080)
+#define RDES0_COLLISION_SEEN_ (0x00000040)
+#define RDES0_FRAME_TYPE_ (0x00000020)
+#define RDES0_WATCHDOG_TIMEOUT_ (0x00000010)
+#define RDES0_MII_ERROR_ (0x00000008)
+#define RDES0_DRIBBLING_BIT_ (0x00000004)
+#define RDES0_CRC_ERROR_ (0x00000002)
+
+/* Receive Descriptor 1 Bit Defs */
+#define RDES1_RER_ (0x02000000)
+
+/***********************************************/
+/* MAC Control and Status Registers */
+/***********************************************/
+#define MAC_CR (0x80)
+#define MAC_CR_RXALL_ (0x80000000)
+#define MAC_CR_DIS_RXOWN_ (0x00800000)
+#define MAC_CR_LOOPBK_ (0x00200000)
+#define MAC_CR_FDPX_ (0x00100000)
+#define MAC_CR_MCPAS_ (0x00080000)
+#define MAC_CR_PRMS_ (0x00040000)
+#define MAC_CR_INVFILT_ (0x00020000)
+#define MAC_CR_PASSBAD_ (0x00010000)
+#define MAC_CR_HFILT_ (0x00008000)
+#define MAC_CR_HPFILT_ (0x00002000)
+#define MAC_CR_LCOLL_ (0x00001000)
+#define MAC_CR_DIS_BCAST_ (0x00000800)
+#define MAC_CR_DIS_RTRY_ (0x00000400)
+#define MAC_CR_PADSTR_ (0x00000100)
+#define MAC_CR_BOLMT_MSK (0x000000C0)
+#define MAC_CR_MFCHK_ (0x00000020)
+#define MAC_CR_TXEN_ (0x00000008)
+#define MAC_CR_RXEN_ (0x00000004)
+
+#define ADDRH (0x84)
+
+#define ADDRL (0x88)
+
+#define HASHH (0x8C)
+
+#define HASHL (0x90)
+
+#define MII_ACCESS (0x94)
+#define MII_ACCESS_MII_BUSY_ (0x00000001)
+#define MII_ACCESS_MII_WRITE_ (0x00000002)
+#define MII_ACCESS_MII_READ_ (0x00000000)
+#define MII_ACCESS_INDX_MSK_ (0x000007C0)
+#define MII_ACCESS_PHYADDR_MSK_ (0x0000F8C0)
+#define MII_ACCESS_INDX_SHFT_CNT (6)
+#define MII_ACCESS_PHYADDR_SHFT_CNT (11)
+
+#define MII_DATA (0x98)
+
+#define FLOW (0x9C)
+
+#define VLAN1 (0xA0)
+
+#define VLAN2 (0xA4)
+
+#define WUFF (0xA8)
+
+#define WUCSR (0xAC)
+
+#define COE_CR (0xB0)
+#define TX_COE_EN (0x00010000)
+#define RX_COE_MODE (0x00000002)
+#define RX_COE_EN (0x00000001)
+
+/***********************************************/
+/* System Control and Status Registers */
+/***********************************************/
+#define ID_REV (0xC0)
+
+#define INT_CTL (0xC4)
+#define INT_CTL_SW_INT_EN_ (0x00008000)
+#define INT_CTL_SBERR_INT_EN_ (1 << 12)
+#define INT_CTL_MBERR_INT_EN_ (1 << 13)
+#define INT_CTL_GPT_INT_EN_ (0x00000008)
+#define INT_CTL_PHY_INT_EN_ (0x00000004)
+#define INT_CTL_WAKE_INT_EN_ (0x00000002)
+
+#define INT_STAT (0xC8)
+#define INT_STAT_SW_INT_ (1 << 15)
+#define INT_STAT_MBERR_INT_ (1 << 13)
+#define INT_STAT_SBERR_INT_ (1 << 12)
+#define INT_STAT_GPT_INT_ (1 << 3)
+#define INT_STAT_PHY_INT_ (0x00000004)
+#define INT_STAT_WAKE_INT_ (0x00000002)
+#define INT_STAT_DMAC_INT_ (0x00000001)
+
+#define INT_CFG (0xCC)
+#define INT_CFG_IRQ_INT_ (0x00080000)
+#define INT_CFG_IRQ_EN_ (0x00040000)
+#define INT_CFG_INT_DEAS_CLR_ (0x00000200)
+#define INT_CFG_INT_DEAS_MASK (0x000000FF)
+
+#define GPIO_CFG (0xD0)
+#define GPIO_CFG_LED_3_ (0x40000000)
+#define GPIO_CFG_LED_2_ (0x20000000)
+#define GPIO_CFG_LED_1_ (0x10000000)
+#define GPIO_CFG_EEPR_EN_ (0x00700000)
+
+#define GPT_CFG (0xD4)
+#define GPT_CFG_TIMER_EN_ (0x20000000)
+
+#define GPT_CNT (0xD8)
+
+#define BUS_CFG (0xDC)
+#define BUS_CFG_RXTXWEIGHT_1_1 (0 << 25)
+#define BUS_CFG_RXTXWEIGHT_2_1 (1 << 25)
+#define BUS_CFG_RXTXWEIGHT_3_1 (2 << 25)
+#define BUS_CFG_RXTXWEIGHT_4_1 (3 << 25)
+
+#define PMT_CTRL (0xE0)
+
+#define FREE_RUN (0xF4)
+
+#define E2P_CMD (0xF8)
+#define E2P_CMD_EPC_BUSY_ (0x80000000)
+#define E2P_CMD_EPC_CMD_ (0x70000000)
+#define E2P_CMD_EPC_CMD_READ_ (0x00000000)
+#define E2P_CMD_EPC_CMD_EWDS_ (0x10000000)
+#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000)
+#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000)
+#define E2P_CMD_EPC_CMD_WRAL_ (0x40000000)
+#define E2P_CMD_EPC_CMD_ERASE_ (0x50000000)
+#define E2P_CMD_EPC_CMD_ERAL_ (0x60000000)
+#define E2P_CMD_EPC_CMD_RELOAD_ (0x70000000)
+#define E2P_CMD_EPC_TIMEOUT_ (0x00000200)
+#define E2P_CMD_MAC_ADDR_LOADED_ (0x00000100)
+#define E2P_CMD_EPC_ADDR_ (0x000000FF)
+
+#define E2P_DATA (0xFC)
+#define E2P_DATA_EEPROM_DATA_ (0x000000FF)
+
+#endif /* _SMSC9420_H */
diff --git a/drivers/net/ethernet/stmicro/Kconfig b/drivers/net/ethernet/stmicro/Kconfig
new file mode 100644
index 000000000000..f4a80da00650
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/Kconfig
@@ -0,0 +1,23 @@
+#
+# STMicroelectronics device configuration
+#
+
+config NET_VENDOR_STMICRO
+ bool "STMicroelectronics devices"
+ default y
+ depends on HAS_IOMEM
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about STMicroelectronics cards. If you say Y, you will
+ be asked for your specific card in the following questions.
+
+if NET_VENDOR_STMICRO
+
+source "drivers/net/ethernet/stmicro/stmmac/Kconfig"
+
+endif # NET_VENDOR_STMICRO
diff --git a/drivers/net/ethernet/stmicro/Makefile b/drivers/net/ethernet/stmicro/Makefile
new file mode 100644
index 000000000000..9b3bfddda7dd
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the STMicroelectronics device drivers.
+#
+
+obj-$(CONFIG_STMMAC_ETH) += stmmac/
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
new file mode 100644
index 000000000000..8cd9ddec05a0
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -0,0 +1,66 @@
+config STMMAC_ETH
+ tristate "STMicroelectronics 10/100/1000 Ethernet driver"
+ depends on HAS_IOMEM
+ select NET_CORE
+ select MII
+ select PHYLIB
+ select CRC32
+ ---help---
+ This is the driver for the Ethernet IPs are built around a
+ Synopsys IP Core and only tested on the STMicroelectronics
+ platforms.
+
+if STMMAC_ETH
+
+config STMMAC_DEBUG_FS
+ bool "Enable monitoring via sysFS "
+ default n
+ depends on STMMAC_ETH && DEBUG_FS
+ -- help
+ The stmmac entry in /sys reports DMA TX/RX rings
+ or (if supported) the HW cap register.
+
+config STMMAC_DA
+ bool "STMMAC DMA arbitration scheme"
+ default n
+ ---help---
+ Selecting this option, rx has priority over Tx (only for Giga
+ Ethernet device).
+ By default, the DMA arbitration scheme is based on Round-robin
+ (rx:tx priority is 1:1).
+
+config STMMAC_DUAL_MAC
+ bool "STMMAC: dual mac support (EXPERIMENTAL)"
+ default n
+ depends on EXPERIMENTAL && STMMAC_ETH && !STMMAC_TIMER
+ ---help---
+ Some ST SoCs (for example the stx7141 and stx7200c2) have two
+ Ethernet Controllers. This option turns on the second Ethernet
+ device on this kind of platforms.
+
+config STMMAC_TIMER
+ bool "STMMAC Timer optimisation"
+ default n
+ depends on RTC_HCTOSYS_DEVICE
+ ---help---
+ Use an external timer for mitigating the number of network
+ interrupts. Currently, for SH architectures, it is possible
+ to use the TMU channel 2 and the SH-RTC device.
+
+choice
+ prompt "Select Timer device"
+ depends on STMMAC_TIMER
+
+config STMMAC_TMU_TIMER
+ bool "TMU channel 2"
+ depends on CPU_SH4
+ ---help---
+
+config STMMAC_RTC_TIMER
+ bool "Real time clock"
+ depends on RTC_CLASS
+ ---help---
+
+endchoice
+
+endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
new file mode 100644
index 000000000000..0f23d95746b7
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_STMMAC_ETH) += stmmac.o
+stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
+stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
+ dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
+ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
+ mmc_core.o $(stmmac-y)
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
new file mode 100644
index 000000000000..22c61b2ebfa3
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -0,0 +1,276 @@
+/*******************************************************************************
+ STMMAC Common Header File
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/netdevice.h>
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define STMMAC_VLAN_TAG_USED
+#include <linux/if_vlan.h>
+#endif
+
+#include "descs.h"
+#include "mmc.h"
+
+#undef CHIP_DEBUG_PRINT
+/* Turn-on extra printk debug for MAC core, dma and descriptors */
+/* #define CHIP_DEBUG_PRINT */
+
+#ifdef CHIP_DEBUG_PRINT
+#define CHIP_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define CHIP_DBG(fmt, args...) do { } while (0)
+#endif
+
+#undef FRAME_FILTER_DEBUG
+/* #define FRAME_FILTER_DEBUG */
+
+struct stmmac_extra_stats {
+ /* Transmit errors */
+ unsigned long tx_underflow ____cacheline_aligned;
+ unsigned long tx_carrier;
+ unsigned long tx_losscarrier;
+ unsigned long tx_heartbeat;
+ unsigned long tx_deferred;
+ unsigned long tx_vlan;
+ unsigned long tx_jabber;
+ unsigned long tx_frame_flushed;
+ unsigned long tx_payload_error;
+ unsigned long tx_ip_header_error;
+ /* Receive errors */
+ unsigned long rx_desc;
+ unsigned long rx_partial;
+ unsigned long rx_runt;
+ unsigned long rx_toolong;
+ unsigned long rx_collision;
+ unsigned long rx_crc;
+ unsigned long rx_length;
+ unsigned long rx_mii;
+ unsigned long rx_multicast;
+ unsigned long rx_gmac_overflow;
+ unsigned long rx_watchdog;
+ unsigned long da_rx_filter_fail;
+ unsigned long sa_rx_filter_fail;
+ unsigned long rx_missed_cntr;
+ unsigned long rx_overflow_cntr;
+ unsigned long rx_vlan;
+ /* Tx/Rx IRQ errors */
+ unsigned long tx_undeflow_irq;
+ unsigned long tx_process_stopped_irq;
+ unsigned long tx_jabber_irq;
+ unsigned long rx_overflow_irq;
+ unsigned long rx_buf_unav_irq;
+ unsigned long rx_process_stopped_irq;
+ unsigned long rx_watchdog_irq;
+ unsigned long tx_early_irq;
+ unsigned long fatal_bus_error_irq;
+ /* Extra info */
+ unsigned long threshold;
+ unsigned long tx_pkt_n;
+ unsigned long rx_pkt_n;
+ unsigned long poll_n;
+ unsigned long sched_timer_n;
+ unsigned long normal_irq_n;
+};
+
+#define HASH_TABLE_SIZE 64
+#define PAUSE_TIME 0x200
+
+/* Flow Control defines */
+#define FLOW_OFF 0
+#define FLOW_RX 1
+#define FLOW_TX 2
+#define FLOW_AUTO (FLOW_TX | FLOW_RX)
+
+#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
+
+enum rx_frame_status { /* IPC status */
+ good_frame = 0,
+ discard_frame = 1,
+ csum_none = 2,
+ llc_snap = 4,
+};
+
+enum tx_dma_irq_status {
+ tx_hard_error = 1,
+ tx_hard_error_bump_tc = 2,
+ handle_tx_rx = 3,
+};
+
+/* DMA HW capabilities */
+struct dma_features {
+ unsigned int mbps_10_100;
+ unsigned int mbps_1000;
+ unsigned int half_duplex;
+ unsigned int hash_filter;
+ unsigned int multi_addr;
+ unsigned int pcs;
+ unsigned int sma_mdio;
+ unsigned int pmt_remote_wake_up;
+ unsigned int pmt_magic_frame;
+ unsigned int rmon;
+ /* IEEE 1588-2002*/
+ unsigned int time_stamp;
+ /* IEEE 1588-2008*/
+ unsigned int atime_stamp;
+ /* 802.3az - Energy-Efficient Ethernet (EEE) */
+ unsigned int eee;
+ unsigned int av;
+ /* TX and RX csum */
+ unsigned int tx_coe;
+ unsigned int rx_coe_type1;
+ unsigned int rx_coe_type2;
+ unsigned int rxfifo_over_2048;
+ /* TX and RX number of channels */
+ unsigned int number_rx_channel;
+ unsigned int number_tx_channel;
+ /* Alternate (enhanced) DESC mode*/
+ unsigned int enh_desc;
+};
+
+/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
+#define BUF_SIZE_16KiB 16384
+#define BUF_SIZE_8KiB 8192
+#define BUF_SIZE_4KiB 4096
+#define BUF_SIZE_2KiB 2048
+
+/* Power Down and WOL */
+#define PMT_NOT_SUPPORTED 0
+#define PMT_SUPPORTED 1
+
+/* Common MAC defines */
+#define MAC_CTRL_REG 0x00000000 /* MAC Control */
+#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
+#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
+
+struct stmmac_desc_ops {
+ /* DMA RX descriptor ring initialization */
+ void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
+ int disable_rx_ic);
+ /* DMA TX descriptor ring initialization */
+ void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
+
+ /* Invoked by the xmit function to prepare the tx descriptor */
+ void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
+ int csum_flag);
+ /* Set/get the owner of the descriptor */
+ void (*set_tx_owner) (struct dma_desc *p);
+ int (*get_tx_owner) (struct dma_desc *p);
+ /* Invoked by the xmit function to close the tx descriptor */
+ void (*close_tx_desc) (struct dma_desc *p);
+ /* Clean the tx descriptor as soon as the tx irq is received */
+ void (*release_tx_desc) (struct dma_desc *p);
+ /* Clear interrupt on tx frame completion. When this bit is
+ * set an interrupt happens as soon as the frame is transmitted */
+ void (*clear_tx_ic) (struct dma_desc *p);
+ /* Last tx segment reports the transmit status */
+ int (*get_tx_ls) (struct dma_desc *p);
+ /* Return the transmit status looking at the TDES1 */
+ int (*tx_status) (void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, void __iomem *ioaddr);
+ /* Get the buffer size from the descriptor */
+ int (*get_tx_len) (struct dma_desc *p);
+ /* Handle extra events on specific interrupts hw dependent */
+ int (*get_rx_owner) (struct dma_desc *p);
+ void (*set_rx_owner) (struct dma_desc *p);
+ /* Get the receive frame size */
+ int (*get_rx_frame_len) (struct dma_desc *p);
+ /* Return the reception status looking at the RDES1 */
+ int (*rx_status) (void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p);
+};
+
+struct stmmac_dma_ops {
+ /* DMA core initialization */
+ int (*init) (void __iomem *ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
+ /* Dump DMA registers */
+ void (*dump_regs) (void __iomem *ioaddr);
+ /* Set tx/rx threshold in the csr6 register
+ * An invalid value enables the store-and-forward mode */
+ void (*dma_mode) (void __iomem *ioaddr, int txmode, int rxmode);
+ /* To track extra statistic (if supported) */
+ void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
+ void __iomem *ioaddr);
+ void (*enable_dma_transmission) (void __iomem *ioaddr);
+ void (*enable_dma_irq) (void __iomem *ioaddr);
+ void (*disable_dma_irq) (void __iomem *ioaddr);
+ void (*start_tx) (void __iomem *ioaddr);
+ void (*stop_tx) (void __iomem *ioaddr);
+ void (*start_rx) (void __iomem *ioaddr);
+ void (*stop_rx) (void __iomem *ioaddr);
+ int (*dma_interrupt) (void __iomem *ioaddr,
+ struct stmmac_extra_stats *x);
+ /* If supported then get the optional core features */
+ unsigned int (*get_hw_feature) (void __iomem *ioaddr);
+};
+
+struct stmmac_ops {
+ /* MAC core initialization */
+ void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned;
+ /* Support checksum offload engine */
+ int (*rx_coe) (void __iomem *ioaddr);
+ /* Dump MAC registers */
+ void (*dump_regs) (void __iomem *ioaddr);
+ /* Handle extra events on specific interrupts hw dependent */
+ void (*host_irq_status) (void __iomem *ioaddr);
+ /* Multicast filter setting */
+ void (*set_filter) (struct net_device *dev);
+ /* Flow control setting */
+ void (*flow_ctrl) (void __iomem *ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time);
+ /* Set power management mode (e.g. magic frame) */
+ void (*pmt) (void __iomem *ioaddr, unsigned long mode);
+ /* Set/Get Unicast MAC addresses */
+ void (*set_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n);
+ void (*get_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n);
+};
+
+struct mac_link {
+ int port;
+ int duplex;
+ int speed;
+};
+
+struct mii_regs {
+ unsigned int addr; /* MII Address */
+ unsigned int data; /* MII Data */
+};
+
+struct mac_device_info {
+ const struct stmmac_ops *mac;
+ const struct stmmac_desc_ops *desc;
+ const struct stmmac_dma_ops *dma;
+ struct mii_regs mii; /* MII register Addresses */
+ struct mac_link link;
+ unsigned int synopsys_uid;
+};
+
+struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr);
+struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
+
+extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low);
+extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low);
+extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
new file mode 100644
index 000000000000..63a03e264694
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -0,0 +1,163 @@
+/*******************************************************************************
+ Header File to describe the DMA descriptors.
+ Enhanced descriptors have been in case of DWMAC1000 Cores.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+struct dma_desc {
+ /* Receive descriptor */
+ union {
+ struct {
+ /* RDES0 */
+ u32 reserved1:1;
+ u32 crc_error:1;
+ u32 dribbling:1;
+ u32 mii_error:1;
+ u32 receive_watchdog:1;
+ u32 frame_type:1;
+ u32 collision:1;
+ u32 frame_too_long:1;
+ u32 last_descriptor:1;
+ u32 first_descriptor:1;
+ u32 multicast_frame:1;
+ u32 run_frame:1;
+ u32 length_error:1;
+ u32 partial_frame_error:1;
+ u32 descriptor_error:1;
+ u32 error_summary:1;
+ u32 frame_length:14;
+ u32 filtering_fail:1;
+ u32 own:1;
+ /* RDES1 */
+ u32 buffer1_size:11;
+ u32 buffer2_size:11;
+ u32 reserved2:2;
+ u32 second_address_chained:1;
+ u32 end_ring:1;
+ u32 reserved3:5;
+ u32 disable_ic:1;
+ } rx;
+ struct {
+ /* RDES0 */
+ u32 payload_csum_error:1;
+ u32 crc_error:1;
+ u32 dribbling:1;
+ u32 error_gmii:1;
+ u32 receive_watchdog:1;
+ u32 frame_type:1;
+ u32 late_collision:1;
+ u32 ipc_csum_error:1;
+ u32 last_descriptor:1;
+ u32 first_descriptor:1;
+ u32 vlan_tag:1;
+ u32 overflow_error:1;
+ u32 length_error:1;
+ u32 sa_filter_fail:1;
+ u32 descriptor_error:1;
+ u32 error_summary:1;
+ u32 frame_length:14;
+ u32 da_filter_fail:1;
+ u32 own:1;
+ /* RDES1 */
+ u32 buffer1_size:13;
+ u32 reserved1:1;
+ u32 second_address_chained:1;
+ u32 end_ring:1;
+ u32 buffer2_size:13;
+ u32 reserved2:2;
+ u32 disable_ic:1;
+ } erx; /* -- enhanced -- */
+
+ /* Transmit descriptor */
+ struct {
+ /* TDES0 */
+ u32 deferred:1;
+ u32 underflow_error:1;
+ u32 excessive_deferral:1;
+ u32 collision_count:4;
+ u32 heartbeat_fail:1;
+ u32 excessive_collisions:1;
+ u32 late_collision:1;
+ u32 no_carrier:1;
+ u32 loss_carrier:1;
+ u32 reserved1:3;
+ u32 error_summary:1;
+ u32 reserved2:15;
+ u32 own:1;
+ /* TDES1 */
+ u32 buffer1_size:11;
+ u32 buffer2_size:11;
+ u32 reserved3:1;
+ u32 disable_padding:1;
+ u32 second_address_chained:1;
+ u32 end_ring:1;
+ u32 crc_disable:1;
+ u32 reserved4:2;
+ u32 first_segment:1;
+ u32 last_segment:1;
+ u32 interrupt:1;
+ } tx;
+ struct {
+ /* TDES0 */
+ u32 deferred:1;
+ u32 underflow_error:1;
+ u32 excessive_deferral:1;
+ u32 collision_count:4;
+ u32 vlan_frame:1;
+ u32 excessive_collisions:1;
+ u32 late_collision:1;
+ u32 no_carrier:1;
+ u32 loss_carrier:1;
+ u32 payload_error:1;
+ u32 frame_flushed:1;
+ u32 jabber_timeout:1;
+ u32 error_summary:1;
+ u32 ip_header_error:1;
+ u32 time_stamp_status:1;
+ u32 reserved1:2;
+ u32 second_address_chained:1;
+ u32 end_ring:1;
+ u32 checksum_insertion:2;
+ u32 reserved2:1;
+ u32 time_stamp_enable:1;
+ u32 disable_padding:1;
+ u32 crc_disable:1;
+ u32 first_segment:1;
+ u32 last_segment:1;
+ u32 interrupt:1;
+ u32 own:1;
+ /* TDES1 */
+ u32 buffer1_size:13;
+ u32 reserved3:3;
+ u32 buffer2_size:13;
+ u32 reserved4:3;
+ } etx; /* -- enhanced -- */
+ } des01;
+ unsigned int des2;
+ unsigned int des3;
+};
+
+/* Transmit checksum insertion control */
+enum tdes_csum_insertion {
+ cic_disabled = 0, /* Checksum Insertion Control */
+ cic_only_ip = 1, /* Only IP header */
+ cic_no_pseudoheader = 2, /* IP header but pseudoheader
+ * is not calculated */
+ cic_full = 3, /* IP header and pseudoheader */
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
new file mode 100644
index 000000000000..7c6d857a9cc7
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
@@ -0,0 +1,121 @@
+/*******************************************************************************
+ MAC 10/100 Header File
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/phy.h>
+#include "common.h"
+
+/*----------------------------------------------------------------------------
+ * MAC BLOCK defines
+ *---------------------------------------------------------------------------*/
+/* MAC CSR offset */
+#define MAC_CONTROL 0x00000000 /* MAC Control */
+#define MAC_ADDR_HIGH 0x00000004 /* MAC Address High */
+#define MAC_ADDR_LOW 0x00000008 /* MAC Address Low */
+#define MAC_HASH_HIGH 0x0000000c /* Multicast Hash Table High */
+#define MAC_HASH_LOW 0x00000010 /* Multicast Hash Table Low */
+#define MAC_MII_ADDR 0x00000014 /* MII Address */
+#define MAC_MII_DATA 0x00000018 /* MII Data */
+#define MAC_FLOW_CTRL 0x0000001c /* Flow Control */
+#define MAC_VLAN1 0x00000020 /* VLAN1 Tag */
+#define MAC_VLAN2 0x00000024 /* VLAN2 Tag */
+
+/* MAC CTRL defines */
+#define MAC_CONTROL_RA 0x80000000 /* Receive All Mode */
+#define MAC_CONTROL_BLE 0x40000000 /* Endian Mode */
+#define MAC_CONTROL_HBD 0x10000000 /* Heartbeat Disable */
+#define MAC_CONTROL_PS 0x08000000 /* Port Select */
+#define MAC_CONTROL_DRO 0x00800000 /* Disable Receive Own */
+#define MAC_CONTROL_EXT_LOOPBACK 0x00400000 /* Reserved (ext loopback?) */
+#define MAC_CONTROL_OM 0x00200000 /* Loopback Operating Mode */
+#define MAC_CONTROL_F 0x00100000 /* Full Duplex Mode */
+#define MAC_CONTROL_PM 0x00080000 /* Pass All Multicast */
+#define MAC_CONTROL_PR 0x00040000 /* Promiscuous Mode */
+#define MAC_CONTROL_IF 0x00020000 /* Inverse Filtering */
+#define MAC_CONTROL_PB 0x00010000 /* Pass Bad Frames */
+#define MAC_CONTROL_HO 0x00008000 /* Hash Only Filtering Mode */
+#define MAC_CONTROL_HP 0x00002000 /* Hash/Perfect Filtering Mode */
+#define MAC_CONTROL_LCC 0x00001000 /* Late Collision Control */
+#define MAC_CONTROL_DBF 0x00000800 /* Disable Broadcast Frames */
+#define MAC_CONTROL_DRTY 0x00000400 /* Disable Retry */
+#define MAC_CONTROL_ASTP 0x00000100 /* Automatic Pad Stripping */
+#define MAC_CONTROL_BOLMT_10 0x00000000 /* Back Off Limit 10 */
+#define MAC_CONTROL_BOLMT_8 0x00000040 /* Back Off Limit 8 */
+#define MAC_CONTROL_BOLMT_4 0x00000080 /* Back Off Limit 4 */
+#define MAC_CONTROL_BOLMT_1 0x000000c0 /* Back Off Limit 1 */
+#define MAC_CONTROL_DC 0x00000020 /* Deferral Check */
+#define MAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
+#define MAC_CONTROL_RE 0x00000004 /* Receiver Enable */
+
+#define MAC_CORE_INIT (MAC_CONTROL_HBD | MAC_CONTROL_ASTP)
+
+/* MAC FLOW CTRL defines */
+#define MAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
+#define MAC_FLOW_CTRL_PT_SHIFT 16
+#define MAC_FLOW_CTRL_PASS 0x00000004 /* Pass Control Frames */
+#define MAC_FLOW_CTRL_ENABLE 0x00000002 /* Flow Control Enable */
+#define MAC_FLOW_CTRL_PAUSE 0x00000001 /* Flow Control Busy ... */
+
+/* MII ADDR defines */
+#define MAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
+#define MAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
+
+/*----------------------------------------------------------------------------
+ * DMA BLOCK defines
+ *---------------------------------------------------------------------------*/
+
+/* DMA Bus Mode register defines */
+#define DMA_BUS_MODE_DBO 0x00100000 /* Descriptor Byte Ordering */
+#define DMA_BUS_MODE_BLE 0x00000080 /* Big Endian/Little Endian */
+#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
+#define DMA_BUS_MODE_PBL_SHIFT 8
+#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
+#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
+#define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */
+#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
+#define DMA_BUS_MODE_DEFAULT 0x00000000
+
+/* DMA Control register defines */
+#define DMA_CONTROL_SF 0x00200000 /* Store And Forward */
+
+/* Transmit Threshold Control */
+enum ttc_control {
+ DMA_CONTROL_TTC_DEFAULT = 0x00000000, /* Threshold is 32 DWORDS */
+ DMA_CONTROL_TTC_64 = 0x00004000, /* Threshold is 64 DWORDS */
+ DMA_CONTROL_TTC_128 = 0x00008000, /* Threshold is 128 DWORDS */
+ DMA_CONTROL_TTC_256 = 0x0000c000, /* Threshold is 256 DWORDS */
+ DMA_CONTROL_TTC_18 = 0x00400000, /* Threshold is 18 DWORDS */
+ DMA_CONTROL_TTC_24 = 0x00404000, /* Threshold is 24 DWORDS */
+ DMA_CONTROL_TTC_32 = 0x00408000, /* Threshold is 32 DWORDS */
+ DMA_CONTROL_TTC_40 = 0x0040c000, /* Threshold is 40 DWORDS */
+ DMA_CONTROL_SE = 0x00000008, /* Stop On Empty */
+ DMA_CONTROL_OSF = 0x00000004, /* Operate On 2nd Frame */
+};
+
+/* STMAC110 DMA Missed Frame Counter register defines */
+#define DMA_MISSED_FRAME_OVE 0x10000000 /* FIFO Overflow Overflow */
+#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */
+#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */
+#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
+
+extern const struct stmmac_dma_ops dwmac100_dma_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
new file mode 100644
index 000000000000..cfcef0ea0fa5
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -0,0 +1,208 @@
+/*******************************************************************************
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/phy.h>
+#include "common.h"
+
+#define GMAC_CONTROL 0x00000000 /* Configuration */
+#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */
+#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
+#define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */
+#define GMAC_MII_ADDR 0x00000010 /* MII Address */
+#define GMAC_MII_DATA 0x00000014 /* MII Data */
+#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */
+#define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */
+#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
+#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
+
+#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
+enum dwmac1000_irq_status {
+ time_stamp_irq = 0x0200,
+ mmc_rx_csum_offload_irq = 0x0080,
+ mmc_tx_irq = 0x0040,
+ mmc_rx_irq = 0x0020,
+ mmc_irq = 0x0010,
+ pmt_irq = 0x0008,
+ pcs_ane_irq = 0x0004,
+ pcs_link_irq = 0x0002,
+ rgmii_irq = 0x0001,
+};
+#define GMAC_INT_MASK 0x0000003c /* interrupt mask register */
+
+/* PMT Control and Status */
+#define GMAC_PMT 0x0000002c
+enum power_event {
+ pointer_reset = 0x80000000,
+ global_unicast = 0x00000200,
+ wake_up_rx_frame = 0x00000040,
+ magic_frame = 0x00000020,
+ wake_up_frame_en = 0x00000004,
+ magic_pkt_en = 0x00000002,
+ power_down = 0x00000001,
+};
+
+/* GMAC HW ADDR regs */
+#define GMAC_ADDR_HIGH(reg) (0x00000040+(reg * 8))
+#define GMAC_ADDR_LOW(reg) (0x00000044+(reg * 8))
+#define GMAC_MAX_UNICAST_ADDRESSES 16
+
+#define GMAC_AN_CTRL 0x000000c0 /* AN control */
+#define GMAC_AN_STATUS 0x000000c4 /* AN status */
+#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */
+#define GMAC_ANE_LINK 0x000000cc /* Auto-Neg. link partener ability */
+#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */
+#define GMAC_TBI 0x000000d4 /* TBI extend status */
+#define GMAC_GMII_STATUS 0x000000d8 /* S/R-GMII status */
+
+/* GMAC Configuration defines */
+#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
+#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
+#define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */
+#define GMAC_CONTROL_BE 0x00200000 /* Frame Burst Enable */
+#define GMAC_CONTROL_JE 0x00100000 /* Jumbo frame */
+enum inter_frame_gap {
+ GMAC_CONTROL_IFG_88 = 0x00040000,
+ GMAC_CONTROL_IFG_80 = 0x00020000,
+ GMAC_CONTROL_IFG_40 = 0x000e0000,
+};
+#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense during tx */
+#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */
+#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */
+#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */
+#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
+#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */
+#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
+#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */
+#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */
+#define GMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Stripping */
+#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */
+#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
+#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
+
+#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
+ GMAC_CONTROL_JE | GMAC_CONTROL_BE)
+
+/* GMAC Frame Filter defines */
+#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
+#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
+#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
+#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
+#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
+#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
+#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
+#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
+#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
+#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
+/* GMII ADDR defines */
+#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
+#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
+/* GMAC FLOW CTRL defines */
+#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
+#define GMAC_FLOW_CTRL_PT_SHIFT 16
+#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
+#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
+#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
+
+/*--- DMA BLOCK defines ---*/
+/* DMA Bus Mode register defines */
+#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
+#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */
+#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
+#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
+/* Programmable burst length (passed thorugh platform)*/
+#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
+#define DMA_BUS_MODE_PBL_SHIFT 8
+
+enum rx_tx_priority_ratio {
+ double_ratio = 0x00004000, /*2:1 */
+ triple_ratio = 0x00008000, /*3:1 */
+ quadruple_ratio = 0x0000c000, /*4:1 */
+};
+
+#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
+#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
+#define DMA_BUS_MODE_RPBL_SHIFT 17
+#define DMA_BUS_MODE_USP 0x00800000
+#define DMA_BUS_MODE_4PBL 0x01000000
+#define DMA_BUS_MODE_AAL 0x02000000
+
+/* DMA CRS Control and Status Register Mapping */
+#define DMA_HOST_TX_DESC 0x00001048 /* Current Host Tx descriptor */
+#define DMA_HOST_RX_DESC 0x0000104c /* Current Host Rx descriptor */
+/* DMA Bus Mode register defines */
+#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */
+#define DMA_BUS_PR_RATIO_SHIFT 14
+#define DMA_BUS_FB 0x00010000 /* Fixed Burst */
+
+/* DMA operation mode defines (start/stop tx/rx are placed in common header)*/
+#define DMA_CONTROL_DT 0x04000000 /* Disable Drop TCP/IP csum error */
+#define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */
+#define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */
+/* Threshold for Activating the FC */
+enum rfa {
+ act_full_minus_1 = 0x00800000,
+ act_full_minus_2 = 0x00800200,
+ act_full_minus_3 = 0x00800400,
+ act_full_minus_4 = 0x00800600,
+};
+/* Threshold for Deactivating the FC */
+enum rfd {
+ deac_full_minus_1 = 0x00400000,
+ deac_full_minus_2 = 0x00400800,
+ deac_full_minus_3 = 0x00401000,
+ deac_full_minus_4 = 0x00401800,
+};
+#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */
+
+enum ttc_control {
+ DMA_CONTROL_TTC_64 = 0x00000000,
+ DMA_CONTROL_TTC_128 = 0x00004000,
+ DMA_CONTROL_TTC_192 = 0x00008000,
+ DMA_CONTROL_TTC_256 = 0x0000c000,
+ DMA_CONTROL_TTC_40 = 0x00010000,
+ DMA_CONTROL_TTC_32 = 0x00014000,
+ DMA_CONTROL_TTC_24 = 0x00018000,
+ DMA_CONTROL_TTC_16 = 0x0001c000,
+};
+#define DMA_CONTROL_TC_TX_MASK 0xfffe3fff
+
+#define DMA_CONTROL_EFC 0x00000100
+#define DMA_CONTROL_FEF 0x00000080
+#define DMA_CONTROL_FUF 0x00000040
+
+enum rtc_control {
+ DMA_CONTROL_RTC_64 = 0x00000000,
+ DMA_CONTROL_RTC_32 = 0x00000008,
+ DMA_CONTROL_RTC_96 = 0x00000010,
+ DMA_CONTROL_RTC_128 = 0x00000018,
+};
+#define DMA_CONTROL_TC_RX_MASK 0xffffffe7
+
+#define DMA_CONTROL_OSF 0x00000004 /* Operate on second frame */
+
+/* MMC registers offset */
+#define GMAC_MMC_CTRL 0x100
+#define GMAC_MMC_RX_INTR 0x104
+#define GMAC_MMC_TX_INTR 0x108
+#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
+
+extern const struct stmmac_dma_ops dwmac1000_dma_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
new file mode 100644
index 000000000000..b1c48b975945
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -0,0 +1,244 @@
+/*******************************************************************************
+ This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
+ developing this code.
+
+ This only implements the mac core functions for this chip.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include "dwmac1000.h"
+
+static void dwmac1000_core_init(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + GMAC_CONTROL);
+ value |= GMAC_CORE_INIT;
+ writel(value, ioaddr + GMAC_CONTROL);
+
+ /* Mask GMAC interrupts */
+ writel(0x207, ioaddr + GMAC_INT_MASK);
+
+#ifdef STMMAC_VLAN_TAG_USED
+ /* Tag detection without filtering */
+ writel(0x0, ioaddr + GMAC_VLAN_TAG);
+#endif
+}
+
+static int dwmac1000_rx_coe_supported(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + GMAC_CONTROL);
+
+ value |= GMAC_CONTROL_IPC;
+ writel(value, ioaddr + GMAC_CONTROL);
+
+ value = readl(ioaddr + GMAC_CONTROL);
+
+ return !!(value & GMAC_CONTROL_IPC);
+}
+
+static void dwmac1000_dump_regs(void __iomem *ioaddr)
+{
+ int i;
+ pr_info("\tDWMAC1000 regs (base addr = 0x%p)\n", ioaddr);
+
+ for (i = 0; i < 55; i++) {
+ int offset = i * 4;
+ pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
+ offset, readl(ioaddr + offset));
+ }
+}
+
+static void dwmac1000_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac1000_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac1000_set_filter(struct net_device *dev)
+{
+ void __iomem *ioaddr = (void __iomem *) dev->base_addr;
+ unsigned int value = 0;
+
+ CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
+ __func__, netdev_mc_count(dev), netdev_uc_count(dev));
+
+ if (dev->flags & IFF_PROMISC)
+ value = GMAC_FRAME_FILTER_PR;
+ else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
+ || (dev->flags & IFF_ALLMULTI)) {
+ value = GMAC_FRAME_FILTER_PM; /* pass all multi */
+ writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
+ writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
+ } else if (!netdev_mc_empty(dev)) {
+ u32 mc_filter[2];
+ struct netdev_hw_addr *ha;
+
+ /* Hash filter for multicast */
+ value = GMAC_FRAME_FILTER_HMC;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ /* The upper 6 bits of the calculated CRC are used to
+ index the contens of the hash table */
+ int bit_nr =
+ bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
+ writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
+ }
+
+ /* Handle multiple unicast addresses (perfect filtering)*/
+ if (netdev_uc_count(dev) > GMAC_MAX_UNICAST_ADDRESSES)
+ /* Switch to promiscuous mode is more than 16 addrs
+ are required */
+ value |= GMAC_FRAME_FILTER_PR;
+ else {
+ int reg = 1;
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ dwmac1000_set_umac_addr(ioaddr, ha->addr, reg);
+ reg++;
+ }
+ }
+
+#ifdef FRAME_FILTER_DEBUG
+ /* Enable Receive all mode (to debug filtering_fail errors) */
+ value |= GMAC_FRAME_FILTER_RA;
+#endif
+ writel(value, ioaddr + GMAC_FRAME_FILTER);
+
+ CHIP_DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
+ "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
+ readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
+}
+
+static void dwmac1000_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time)
+{
+ unsigned int flow = 0;
+
+ CHIP_DBG(KERN_DEBUG "GMAC Flow-Control:\n");
+ if (fc & FLOW_RX) {
+ CHIP_DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
+ flow |= GMAC_FLOW_CTRL_RFE;
+ }
+ if (fc & FLOW_TX) {
+ CHIP_DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
+ flow |= GMAC_FLOW_CTRL_TFE;
+ }
+
+ if (duplex) {
+ CHIP_DBG(KERN_DEBUG "\tduplex mode: PAUSE %d\n", pause_time);
+ flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
+ }
+
+ writel(flow, ioaddr + GMAC_FLOW_CTRL);
+}
+
+static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
+{
+ unsigned int pmt = 0;
+
+ if (mode & WAKE_MAGIC) {
+ CHIP_DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
+ pmt |= power_down | magic_pkt_en;
+ }
+ if (mode & WAKE_UCAST) {
+ CHIP_DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
+ pmt |= global_unicast;
+ }
+
+ writel(pmt, ioaddr + GMAC_PMT);
+}
+
+
+static void dwmac1000_irq_status(void __iomem *ioaddr)
+{
+ u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+
+ /* Not used events (e.g. MMC interrupts) are not handled. */
+ if ((intr_status & mmc_tx_irq))
+ CHIP_DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_TX_INTR));
+ if (unlikely(intr_status & mmc_rx_irq))
+ CHIP_DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_RX_INTR));
+ if (unlikely(intr_status & mmc_rx_csum_offload_irq))
+ CHIP_DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
+ if (unlikely(intr_status & pmt_irq)) {
+ CHIP_DBG(KERN_DEBUG "GMAC: received Magic frame\n");
+ /* clear the PMT bits 5 and 6 by reading the PMT
+ * status register. */
+ readl(ioaddr + GMAC_PMT);
+ }
+}
+
+static const struct stmmac_ops dwmac1000_ops = {
+ .core_init = dwmac1000_core_init,
+ .rx_coe = dwmac1000_rx_coe_supported,
+ .dump_regs = dwmac1000_dump_regs,
+ .host_irq_status = dwmac1000_irq_status,
+ .set_filter = dwmac1000_set_filter,
+ .flow_ctrl = dwmac1000_flow_ctrl,
+ .pmt = dwmac1000_pmt,
+ .set_umac_addr = dwmac1000_set_umac_addr,
+ .get_umac_addr = dwmac1000_get_umac_addr,
+};
+
+struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
+{
+ struct mac_device_info *mac;
+ u32 hwid = readl(ioaddr + GMAC_VERSION);
+
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+ if (!mac)
+ return NULL;
+
+ mac->mac = &dwmac1000_ops;
+ mac->dma = &dwmac1000_dma_ops;
+
+ mac->link.port = GMAC_CONTROL_PS;
+ mac->link.duplex = GMAC_CONTROL_DM;
+ mac->link.speed = GMAC_CONTROL_FES;
+ mac->mii.addr = GMAC_MII_ADDR;
+ mac->mii.data = GMAC_MII_DATA;
+ mac->synopsys_uid = hwid;
+
+ return mac;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
new file mode 100644
index 000000000000..da66ac511c4c
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -0,0 +1,153 @@
+/*******************************************************************************
+ This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
+ developing this code.
+
+ This contains the functions to handle the dma.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <asm/io.h>
+#include "dwmac1000.h"
+#include "dwmac_dma.h"
+
+static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
+ u32 dma_rx)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+ int limit;
+
+ /* DMA SW reset */
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+ limit = 15000;
+ while (limit--) {
+ if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+ break;
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
+ ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
+ (pbl << DMA_BUS_MODE_RPBL_SHIFT));
+
+#ifdef CONFIG_STMMAC_DA
+ value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
+#endif
+ writel(value, ioaddr + DMA_BUS_MODE);
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+
+ /* The base address of the RX/TX descriptor lists must be written into
+ * DMA CSR3 and CSR4, respectively. */
+ writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
+ writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
+
+ return 0;
+}
+
+static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
+ int rxmode)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+
+ if (txmode == SF_DMA_MODE) {
+ CHIP_DBG(KERN_DEBUG "GMAC: enable TX store and forward mode\n");
+ /* Transmit COE type 2 cannot be done in cut-through mode. */
+ csr6 |= DMA_CONTROL_TSF;
+ /* Operating on second frame increase the performance
+ * especially when transmit store-and-forward is used.*/
+ csr6 |= DMA_CONTROL_OSF;
+ } else {
+ CHIP_DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
+ " (threshold = %d)\n", txmode);
+ csr6 &= ~DMA_CONTROL_TSF;
+ csr6 &= DMA_CONTROL_TC_TX_MASK;
+ /* Set the transmit threshold */
+ if (txmode <= 32)
+ csr6 |= DMA_CONTROL_TTC_32;
+ else if (txmode <= 64)
+ csr6 |= DMA_CONTROL_TTC_64;
+ else if (txmode <= 128)
+ csr6 |= DMA_CONTROL_TTC_128;
+ else if (txmode <= 192)
+ csr6 |= DMA_CONTROL_TTC_192;
+ else
+ csr6 |= DMA_CONTROL_TTC_256;
+ }
+
+ if (rxmode == SF_DMA_MODE) {
+ CHIP_DBG(KERN_DEBUG "GMAC: enable RX store and forward mode\n");
+ csr6 |= DMA_CONTROL_RSF;
+ } else {
+ CHIP_DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
+ " (threshold = %d)\n", rxmode);
+ csr6 &= ~DMA_CONTROL_RSF;
+ csr6 &= DMA_CONTROL_TC_RX_MASK;
+ if (rxmode <= 32)
+ csr6 |= DMA_CONTROL_RTC_32;
+ else if (rxmode <= 64)
+ csr6 |= DMA_CONTROL_RTC_64;
+ else if (rxmode <= 96)
+ csr6 |= DMA_CONTROL_RTC_96;
+ else
+ csr6 |= DMA_CONTROL_RTC_128;
+ }
+
+ writel(csr6, ioaddr + DMA_CONTROL);
+}
+
+static void dwmac1000_dump_dma_regs(void __iomem *ioaddr)
+{
+ int i;
+ pr_info(" DMA registers\n");
+ for (i = 0; i < 22; i++) {
+ if ((i < 9) || (i > 17)) {
+ int offset = i * 4;
+ pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i,
+ (DMA_BUS_MODE + offset),
+ readl(ioaddr + DMA_BUS_MODE + offset));
+ }
+ }
+}
+
+static unsigned int dwmac1000_get_hw_feature(void __iomem *ioaddr)
+{
+ return readl(ioaddr + DMA_HW_FEATURE);
+}
+
+const struct stmmac_dma_ops dwmac1000_dma_ops = {
+ .init = dwmac1000_dma_init,
+ .dump_regs = dwmac1000_dump_dma_regs,
+ .dma_mode = dwmac1000_dma_operation_mode,
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_irq = dwmac_enable_dma_irq,
+ .disable_dma_irq = dwmac_disable_dma_irq,
+ .start_tx = dwmac_dma_start_tx,
+ .stop_tx = dwmac_dma_stop_tx,
+ .start_rx = dwmac_dma_start_rx,
+ .stop_rx = dwmac_dma_stop_rx,
+ .dma_interrupt = dwmac_dma_interrupt,
+ .get_hw_feature = dwmac1000_get_hw_feature,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
new file mode 100644
index 000000000000..138fb8dd1e87
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -0,0 +1,194 @@
+/*******************************************************************************
+ This is the driver for the MAC 10/100 on-chip Ethernet controller
+ currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
+
+ DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
+ this code.
+
+ This only implements the mac core functions for this chip.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/crc32.h>
+#include <asm/io.h>
+#include "dwmac100.h"
+
+static void dwmac100_core_init(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + MAC_CONTROL);
+
+ writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
+
+#ifdef STMMAC_VLAN_TAG_USED
+ writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
+#endif
+}
+
+static int dwmac100_rx_coe_supported(void __iomem *ioaddr)
+{
+ return 0;
+}
+
+static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
+{
+ pr_info("\t----------------------------------------------\n"
+ "\t DWMAC 100 CSR (base addr = 0x%p)\n"
+ "\t----------------------------------------------\n",
+ ioaddr);
+ pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
+ readl(ioaddr + MAC_CONTROL));
+ pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
+ readl(ioaddr + MAC_ADDR_HIGH));
+ pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
+ readl(ioaddr + MAC_ADDR_LOW));
+ pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
+ MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
+ pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
+ MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
+ pr_info("\tflow control (offset 0x%x): 0x%08x\n",
+ MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
+ pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
+ readl(ioaddr + MAC_VLAN1));
+ pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
+ readl(ioaddr + MAC_VLAN2));
+}
+
+static void dwmac100_irq_status(void __iomem *ioaddr)
+{
+ return;
+}
+
+static void dwmac100_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
+}
+
+static void dwmac100_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
+}
+
+static void dwmac100_set_filter(struct net_device *dev)
+{
+ void __iomem *ioaddr = (void __iomem *) dev->base_addr;
+ u32 value = readl(ioaddr + MAC_CONTROL);
+
+ if (dev->flags & IFF_PROMISC) {
+ value |= MAC_CONTROL_PR;
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
+ MAC_CONTROL_HP);
+ } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
+ || (dev->flags & IFF_ALLMULTI)) {
+ value |= MAC_CONTROL_PM;
+ value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
+ writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
+ writel(0xffffffff, ioaddr + MAC_HASH_LOW);
+ } else if (netdev_mc_empty(dev)) { /* no multicast */
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
+ MAC_CONTROL_HO | MAC_CONTROL_HP);
+ } else {
+ u32 mc_filter[2];
+ struct netdev_hw_addr *ha;
+
+ /* Perfect filter mode for physical address and Hash
+ filter for multicast */
+ value |= MAC_CONTROL_HP;
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
+ MAC_CONTROL_IF | MAC_CONTROL_HO);
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ /* The upper 6 bits of the calculated CRC are used to
+ * index the contens of the hash table */
+ int bit_nr =
+ ether_crc(ETH_ALEN, ha->addr) >> 26;
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
+ writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
+ }
+
+ writel(value, ioaddr + MAC_CONTROL);
+
+ CHIP_DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
+ "HI 0x%08x, LO 0x%08x\n",
+ __func__, readl(ioaddr + MAC_CONTROL),
+ readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
+}
+
+static void dwmac100_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time)
+{
+ unsigned int flow = MAC_FLOW_CTRL_ENABLE;
+
+ if (duplex)
+ flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
+ writel(flow, ioaddr + MAC_FLOW_CTRL);
+}
+
+/* No PMT module supported for this Ethernet Controller.
+ * Tested on ST platforms only.
+ */
+static void dwmac100_pmt(void __iomem *ioaddr, unsigned long mode)
+{
+ return;
+}
+
+static const struct stmmac_ops dwmac100_ops = {
+ .core_init = dwmac100_core_init,
+ .rx_coe = dwmac100_rx_coe_supported,
+ .dump_regs = dwmac100_dump_mac_regs,
+ .host_irq_status = dwmac100_irq_status,
+ .set_filter = dwmac100_set_filter,
+ .flow_ctrl = dwmac100_flow_ctrl,
+ .pmt = dwmac100_pmt,
+ .set_umac_addr = dwmac100_set_umac_addr,
+ .get_umac_addr = dwmac100_get_umac_addr,
+};
+
+struct mac_device_info *dwmac100_setup(void __iomem *ioaddr)
+{
+ struct mac_device_info *mac;
+
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+ if (!mac)
+ return NULL;
+
+ pr_info("\tDWMAC100\n");
+
+ mac->mac = &dwmac100_ops;
+ mac->dma = &dwmac100_dma_ops;
+
+ mac->link.port = MAC_CONTROL_PS;
+ mac->link.duplex = MAC_CONTROL_F;
+ mac->link.speed = 0;
+ mac->mii.addr = MAC_MII_ADDR;
+ mac->mii.data = MAC_MII_DATA;
+ mac->synopsys_uid = 0;
+
+ return mac;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
new file mode 100644
index 000000000000..627f656b0f3c
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -0,0 +1,143 @@
+/*******************************************************************************
+ This is the driver for the MAC 10/100 on-chip Ethernet controller
+ currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
+
+ DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
+ this code.
+
+ This contains the functions to handle the dma.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <asm/io.h>
+#include "dwmac100.h"
+#include "dwmac_dma.h"
+
+static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
+ u32 dma_rx)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+ int limit;
+
+ /* DMA SW reset */
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+ limit = 15000;
+ while (limit--) {
+ if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+ break;
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ /* Enable Application Access by writing to DMA CSR0 */
+ writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
+ ioaddr + DMA_BUS_MODE);
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+
+ /* The base address of the RX/TX descriptor lists must be written into
+ * DMA CSR3 and CSR4, respectively. */
+ writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
+ writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
+
+ return 0;
+}
+
+/* Store and Forward capability is not used at all..
+ * The transmit threshold can be programmed by
+ * setting the TTC bits in the DMA control register.*/
+static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode,
+ int rxmode)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+
+ if (txmode <= 32)
+ csr6 |= DMA_CONTROL_TTC_32;
+ else if (txmode <= 64)
+ csr6 |= DMA_CONTROL_TTC_64;
+ else
+ csr6 |= DMA_CONTROL_TTC_128;
+
+ writel(csr6, ioaddr + DMA_CONTROL);
+}
+
+static void dwmac100_dump_dma_regs(void __iomem *ioaddr)
+{
+ int i;
+
+ CHIP_DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n");
+ for (i = 0; i < 9; i++)
+ pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
+ (DMA_BUS_MODE + i * 4),
+ readl(ioaddr + DMA_BUS_MODE + i * 4));
+ CHIP_DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
+ DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
+ CHIP_DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
+ DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
+}
+
+/* DMA controller has two counters to track the number of
+ * the receive missed frames. */
+static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
+ void __iomem *ioaddr)
+{
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+ u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
+
+ if (unlikely(csr8)) {
+ if (csr8 & DMA_MISSED_FRAME_OVE) {
+ stats->rx_over_errors += 0x800;
+ x->rx_overflow_cntr += 0x800;
+ } else {
+ unsigned int ove_cntr;
+ ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
+ stats->rx_over_errors += ove_cntr;
+ x->rx_overflow_cntr += ove_cntr;
+ }
+
+ if (csr8 & DMA_MISSED_FRAME_OVE_M) {
+ stats->rx_missed_errors += 0xffff;
+ x->rx_missed_cntr += 0xffff;
+ } else {
+ unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
+ stats->rx_missed_errors += miss_f;
+ x->rx_missed_cntr += miss_f;
+ }
+ }
+}
+
+const struct stmmac_dma_ops dwmac100_dma_ops = {
+ .init = dwmac100_dma_init,
+ .dump_regs = dwmac100_dump_dma_regs,
+ .dma_mode = dwmac100_dma_operation_mode,
+ .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_irq = dwmac_enable_dma_irq,
+ .disable_dma_irq = dwmac_disable_dma_irq,
+ .start_tx = dwmac_dma_start_tx,
+ .stop_tx = dwmac_dma_stop_tx,
+ .start_rx = dwmac_dma_start_rx,
+ .stop_rx = dwmac_dma_stop_rx,
+ .dma_interrupt = dwmac_dma_interrupt,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
new file mode 100644
index 000000000000..437edacd602e
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -0,0 +1,109 @@
+/*******************************************************************************
+ DWMAC DMA Header file.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+/* DMA CRS Control and Status Register Mapping */
+#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
+#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
+#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
+#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
+#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
+#define DMA_STATUS 0x00001014 /* Status Register */
+#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
+#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
+#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
+#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
+#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
+#define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */
+
+/* DMA Control register defines */
+#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
+#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
+
+/* DMA Normal interrupt */
+#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
+#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
+#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
+#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
+
+#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
+ DMA_INTR_ENA_TIE)
+
+/* DMA Abnormal interrupt */
+#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
+#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
+#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
+#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
+#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
+#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
+#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
+#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
+#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
+
+#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
+ DMA_INTR_ENA_UNE)
+
+/* DMA default interrupt mask */
+#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+
+/* DMA Status register defines */
+#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
+#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
+#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
+#define DMA_STATUS_GMI 0x08000000
+#define DMA_STATUS_GLI 0x04000000
+#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
+#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
+#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
+#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
+#define DMA_STATUS_TS_SHIFT 20
+#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
+#define DMA_STATUS_RS_SHIFT 17
+#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
+#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
+#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
+#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
+#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
+#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
+#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
+#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
+#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
+#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
+#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
+#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
+
+extern void dwmac_enable_dma_transmission(void __iomem *ioaddr);
+extern void dwmac_enable_dma_irq(void __iomem *ioaddr);
+extern void dwmac_disable_dma_irq(void __iomem *ioaddr);
+extern void dwmac_dma_start_tx(void __iomem *ioaddr);
+extern void dwmac_dma_stop_tx(void __iomem *ioaddr);
+extern void dwmac_dma_start_rx(void __iomem *ioaddr);
+extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
+extern int dwmac_dma_interrupt(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
new file mode 100644
index 000000000000..e25093510b0c
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -0,0 +1,258 @@
+/*******************************************************************************
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/io.h>
+#include "common.h"
+#include "dwmac_dma.h"
+
+#undef DWMAC_DMA_DEBUG
+#ifdef DWMAC_DMA_DEBUG
+#define DWMAC_LIB_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define DWMAC_LIB_DBG(fmt, args...) do { } while (0)
+#endif
+
+/* CSR1 enables the transmit DMA to check for new descriptor */
+void dwmac_enable_dma_transmission(void __iomem *ioaddr)
+{
+ writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
+}
+
+void dwmac_enable_dma_irq(void __iomem *ioaddr)
+{
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+}
+
+void dwmac_disable_dma_irq(void __iomem *ioaddr)
+{
+ writel(0, ioaddr + DMA_INTR_ENA);
+}
+
+void dwmac_dma_start_tx(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value |= DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CONTROL);
+}
+
+void dwmac_dma_stop_tx(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value &= ~DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CONTROL);
+}
+
+void dwmac_dma_start_rx(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value |= DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CONTROL);
+}
+
+void dwmac_dma_stop_rx(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value &= ~DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CONTROL);
+}
+
+#ifdef DWMAC_DMA_DEBUG
+static void show_tx_process_state(unsigned int status)
+{
+ unsigned int state;
+ state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_info("- TX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_info("- TX (Running):Fetching the Tx desc\n");
+ break;
+ case 2:
+ pr_info("- TX (Running): Waiting for end of tx\n");
+ break;
+ case 3:
+ pr_info("- TX (Running): Reading the data "
+ "and queuing the data into the Tx buf\n");
+ break;
+ case 6:
+ pr_info("- TX (Suspended): Tx Buff Underflow "
+ "or an unavailable Transmit descriptor\n");
+ break;
+ case 7:
+ pr_info("- TX (Running): Closing Tx descriptor\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void show_rx_process_state(unsigned int status)
+{
+ unsigned int state;
+ state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_info("- RX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_info("- RX (Running): Fetching the Rx desc\n");
+ break;
+ case 2:
+ pr_info("- RX (Running):Checking for end of pkt\n");
+ break;
+ case 3:
+ pr_info("- RX (Running): Waiting for Rx pkt\n");
+ break;
+ case 4:
+ pr_info("- RX (Suspended): Unavailable Rx buf\n");
+ break;
+ case 5:
+ pr_info("- RX (Running): Closing Rx descriptor\n");
+ break;
+ case 6:
+ pr_info("- RX(Running): Flushing the current frame"
+ " from the Rx buf\n");
+ break;
+ case 7:
+ pr_info("- RX (Running): Queuing the Rx frame"
+ " from the Rx buf into memory\n");
+ break;
+ default:
+ break;
+ }
+}
+#endif
+
+int dwmac_dma_interrupt(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x)
+{
+ int ret = 0;
+ /* read the status register (CSR5) */
+ u32 intr_status = readl(ioaddr + DMA_STATUS);
+
+ DWMAC_LIB_DBG(KERN_INFO "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
+#ifdef DWMAC_DMA_DEBUG
+ /* It displays the DMA process states (CSR5 register) */
+ show_tx_process_state(intr_status);
+ show_rx_process_state(intr_status);
+#endif
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & DMA_STATUS_AIS)) {
+ DWMAC_LIB_DBG(KERN_INFO "CSR5[15] DMA ABNORMAL IRQ: ");
+ if (unlikely(intr_status & DMA_STATUS_UNF)) {
+ DWMAC_LIB_DBG(KERN_INFO "transmit underflow\n");
+ ret = tx_hard_error_bump_tc;
+ x->tx_undeflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TJT)) {
+ DWMAC_LIB_DBG(KERN_INFO "transmit jabber\n");
+ x->tx_jabber_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_OVF)) {
+ DWMAC_LIB_DBG(KERN_INFO "recv overflow\n");
+ x->rx_overflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RU)) {
+ DWMAC_LIB_DBG(KERN_INFO "receive buffer unavailable\n");
+ x->rx_buf_unav_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RPS)) {
+ DWMAC_LIB_DBG(KERN_INFO "receive process stopped\n");
+ x->rx_process_stopped_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RWT)) {
+ DWMAC_LIB_DBG(KERN_INFO "receive watchdog\n");
+ x->rx_watchdog_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_ETI)) {
+ DWMAC_LIB_DBG(KERN_INFO "transmit early interrupt\n");
+ x->tx_early_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TPS)) {
+ DWMAC_LIB_DBG(KERN_INFO "transmit process stopped\n");
+ x->tx_process_stopped_irq++;
+ ret = tx_hard_error;
+ }
+ if (unlikely(intr_status & DMA_STATUS_FBI)) {
+ DWMAC_LIB_DBG(KERN_INFO "fatal bus error\n");
+ x->fatal_bus_error_irq++;
+ ret = tx_hard_error;
+ }
+ }
+ /* TX/RX NORMAL interrupts */
+ if (intr_status & DMA_STATUS_NIS) {
+ x->normal_irq_n++;
+ if (likely((intr_status & DMA_STATUS_RI) ||
+ (intr_status & (DMA_STATUS_TI))))
+ ret = handle_tx_rx;
+ }
+ /* Optional hardware blocks, interrupts should be disabled */
+ if (unlikely(intr_status &
+ (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
+ pr_info("%s: unexpected status %08x\n", __func__, intr_status);
+ /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
+ writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
+
+ DWMAC_LIB_DBG(KERN_INFO "\n\n");
+ return ret;
+}
+
+void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+ writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
+
+ do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
+}
+
+void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low)
+{
+ unsigned long data;
+
+ data = (addr[5] << 8) | addr[4];
+ writel(data, ioaddr + high);
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + low);
+}
+
+void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low)
+{
+ unsigned int hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + high);
+ lo_addr = readl(ioaddr + low);
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+}
+
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
new file mode 100644
index 000000000000..e5dfb6a30182
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -0,0 +1,337 @@
+/*******************************************************************************
+ This contains the functions to handle the enhanced descriptors.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "common.h"
+
+static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, void __iomem *ioaddr)
+{
+ int ret = 0;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.etx.error_summary)) {
+ CHIP_DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
+ if (unlikely(p->des01.etx.jabber_timeout)) {
+ CHIP_DBG(KERN_ERR "\tjabber_timeout error\n");
+ x->tx_jabber++;
+ }
+
+ if (unlikely(p->des01.etx.frame_flushed)) {
+ CHIP_DBG(KERN_ERR "\tframe_flushed error\n");
+ x->tx_frame_flushed++;
+ dwmac_dma_flush_tx_fifo(ioaddr);
+ }
+
+ if (unlikely(p->des01.etx.loss_carrier)) {
+ CHIP_DBG(KERN_ERR "\tloss_carrier error\n");
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.etx.no_carrier)) {
+ CHIP_DBG(KERN_ERR "\tno_carrier error\n");
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.etx.late_collision)) {
+ CHIP_DBG(KERN_ERR "\tlate_collision error\n");
+ stats->collisions += p->des01.etx.collision_count;
+ }
+ if (unlikely(p->des01.etx.excessive_collisions)) {
+ CHIP_DBG(KERN_ERR "\texcessive_collisions\n");
+ stats->collisions += p->des01.etx.collision_count;
+ }
+ if (unlikely(p->des01.etx.excessive_deferral)) {
+ CHIP_DBG(KERN_INFO "\texcessive tx_deferral\n");
+ x->tx_deferred++;
+ }
+
+ if (unlikely(p->des01.etx.underflow_error)) {
+ CHIP_DBG(KERN_ERR "\tunderflow error\n");
+ dwmac_dma_flush_tx_fifo(ioaddr);
+ x->tx_underflow++;
+ }
+
+ if (unlikely(p->des01.etx.ip_header_error)) {
+ CHIP_DBG(KERN_ERR "\tTX IP header csum error\n");
+ x->tx_ip_header_error++;
+ }
+
+ if (unlikely(p->des01.etx.payload_error)) {
+ CHIP_DBG(KERN_ERR "\tAddr/Payload csum error\n");
+ x->tx_payload_error++;
+ dwmac_dma_flush_tx_fifo(ioaddr);
+ }
+
+ ret = -1;
+ }
+
+ if (unlikely(p->des01.etx.deferred)) {
+ CHIP_DBG(KERN_INFO "GMAC TX status: tx deferred\n");
+ x->tx_deferred++;
+ }
+#ifdef STMMAC_VLAN_TAG_USED
+ if (p->des01.etx.vlan_frame) {
+ CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
+ x->tx_vlan++;
+ }
+#endif
+
+ return ret;
+}
+
+static int enh_desc_get_tx_len(struct dma_desc *p)
+{
+ return p->des01.etx.buffer1_size;
+}
+
+static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
+{
+ int ret = good_frame;
+ u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
+
+ /* bits 5 7 0 | Frame status
+ * ----------------------------------------------------------
+ * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
+ * 1 0 0 | IPv4/6 No CSUM errorS.
+ * 1 0 1 | IPv4/6 CSUM PAYLOAD error
+ * 1 1 0 | IPv4/6 CSUM IP HR error
+ * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
+ * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
+ * 0 1 1 | COE bypassed.. no IPv4/6 frame
+ * 0 1 0 | Reserved.
+ */
+ if (status == 0x0) {
+ CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
+ ret = llc_snap;
+ } else if (status == 0x4) {
+ CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
+ ret = good_frame;
+ } else if (status == 0x5) {
+ CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
+ ret = csum_none;
+ } else if (status == 0x6) {
+ CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
+ ret = csum_none;
+ } else if (status == 0x7) {
+ CHIP_DBG(KERN_ERR
+ "RX Des0 status: IPv4/6 Header and Payload Error.\n");
+ ret = csum_none;
+ } else if (status == 0x1) {
+ CHIP_DBG(KERN_ERR
+ "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
+ ret = discard_frame;
+ } else if (status == 0x3) {
+ CHIP_DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
+ ret = discard_frame;
+ }
+ return ret;
+}
+
+static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ int ret = good_frame;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.erx.error_summary)) {
+ CHIP_DBG(KERN_ERR "GMAC RX Error Summary 0x%08x\n",
+ p->des01.erx);
+ if (unlikely(p->des01.erx.descriptor_error)) {
+ CHIP_DBG(KERN_ERR "\tdescriptor error\n");
+ x->rx_desc++;
+ stats->rx_length_errors++;
+ }
+ if (unlikely(p->des01.erx.overflow_error)) {
+ CHIP_DBG(KERN_ERR "\toverflow error\n");
+ x->rx_gmac_overflow++;
+ }
+
+ if (unlikely(p->des01.erx.ipc_csum_error))
+ CHIP_DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
+
+ if (unlikely(p->des01.erx.late_collision)) {
+ CHIP_DBG(KERN_ERR "\tlate_collision error\n");
+ stats->collisions++;
+ stats->collisions++;
+ }
+ if (unlikely(p->des01.erx.receive_watchdog)) {
+ CHIP_DBG(KERN_ERR "\treceive_watchdog error\n");
+ x->rx_watchdog++;
+ }
+ if (unlikely(p->des01.erx.error_gmii)) {
+ CHIP_DBG(KERN_ERR "\tReceive Error\n");
+ x->rx_mii++;
+ }
+ if (unlikely(p->des01.erx.crc_error)) {
+ CHIP_DBG(KERN_ERR "\tCRC error\n");
+ x->rx_crc++;
+ stats->rx_crc_errors++;
+ }
+ ret = discard_frame;
+ }
+
+ /* After a payload csum error, the ES bit is set.
+ * It doesn't match with the information reported into the databook.
+ * At any rate, we need to understand if the CSUM hw computation is ok
+ * and report this info to the upper layers. */
+ ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error,
+ p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
+
+ if (unlikely(p->des01.erx.dribbling)) {
+ CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n");
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.sa_filter_fail)) {
+ CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
+ x->sa_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.da_filter_fail)) {
+ CHIP_DBG(KERN_ERR "GMAC RX : Dest Address filter fail\n");
+ x->da_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.length_error)) {
+ CHIP_DBG(KERN_ERR "GMAC RX: length_error error\n");
+ x->rx_length++;
+ ret = discard_frame;
+ }
+#ifdef STMMAC_VLAN_TAG_USED
+ if (p->des01.erx.vlan_tag) {
+ CHIP_DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
+ x->rx_vlan++;
+ }
+#endif
+ return ret;
+}
+
+static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+ int disable_rx_ic)
+{
+ int i;
+ for (i = 0; i < ring_size; i++) {
+ p->des01.erx.own = 1;
+ p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
+ /* To support jumbo frames */
+ p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
+ if (i == ring_size - 1)
+ p->des01.erx.end_ring = 1;
+ if (disable_rx_ic)
+ p->des01.erx.disable_ic = 1;
+ p++;
+ }
+}
+
+static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+{
+ int i;
+
+ for (i = 0; i < ring_size; i++) {
+ p->des01.etx.own = 0;
+ if (i == ring_size - 1)
+ p->des01.etx.end_ring = 1;
+ p++;
+ }
+}
+
+static int enh_desc_get_tx_owner(struct dma_desc *p)
+{
+ return p->des01.etx.own;
+}
+
+static int enh_desc_get_rx_owner(struct dma_desc *p)
+{
+ return p->des01.erx.own;
+}
+
+static void enh_desc_set_tx_owner(struct dma_desc *p)
+{
+ p->des01.etx.own = 1;
+}
+
+static void enh_desc_set_rx_owner(struct dma_desc *p)
+{
+ p->des01.erx.own = 1;
+}
+
+static int enh_desc_get_tx_ls(struct dma_desc *p)
+{
+ return p->des01.etx.last_segment;
+}
+
+static void enh_desc_release_tx_desc(struct dma_desc *p)
+{
+ int ter = p->des01.etx.end_ring;
+
+ memset(p, 0, offsetof(struct dma_desc, des2));
+ p->des01.etx.end_ring = ter;
+}
+
+static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ int csum_flag)
+{
+ p->des01.etx.first_segment = is_fs;
+ if (unlikely(len > BUF_SIZE_4KiB)) {
+ p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
+ p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
+ } else {
+ p->des01.etx.buffer1_size = len;
+ }
+ if (likely(csum_flag))
+ p->des01.etx.checksum_insertion = cic_full;
+}
+
+static void enh_desc_clear_tx_ic(struct dma_desc *p)
+{
+ p->des01.etx.interrupt = 0;
+}
+
+static void enh_desc_close_tx_desc(struct dma_desc *p)
+{
+ p->des01.etx.last_segment = 1;
+ p->des01.etx.interrupt = 1;
+}
+
+static int enh_desc_get_rx_frame_len(struct dma_desc *p)
+{
+ return p->des01.erx.frame_length;
+}
+
+const struct stmmac_desc_ops enh_desc_ops = {
+ .tx_status = enh_desc_get_tx_status,
+ .rx_status = enh_desc_get_rx_status,
+ .get_tx_len = enh_desc_get_tx_len,
+ .init_rx_desc = enh_desc_init_rx_desc,
+ .init_tx_desc = enh_desc_init_tx_desc,
+ .get_tx_owner = enh_desc_get_tx_owner,
+ .get_rx_owner = enh_desc_get_rx_owner,
+ .release_tx_desc = enh_desc_release_tx_desc,
+ .prepare_tx_desc = enh_desc_prepare_tx_desc,
+ .clear_tx_ic = enh_desc_clear_tx_ic,
+ .close_tx_desc = enh_desc_close_tx_desc,
+ .get_tx_ls = enh_desc_get_tx_ls,
+ .set_tx_owner = enh_desc_set_tx_owner,
+ .set_rx_owner = enh_desc_set_rx_owner,
+ .get_rx_frame_len = enh_desc_get_rx_frame_len,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
new file mode 100644
index 000000000000..a38352024cb8
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -0,0 +1,131 @@
+/*******************************************************************************
+ MMC Header file
+
+ Copyright (C) 2011 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+/* MMC control register */
+/* When set, all counter are reset */
+#define MMC_CNTRL_COUNTER_RESET 0x1
+/* When set, do not roll over zero
+ * after reaching the max value*/
+#define MMC_CNTRL_COUNTER_STOP_ROLLOVER 0x2
+#define MMC_CNTRL_RESET_ON_READ 0x4 /* Reset after reading */
+#define MMC_CNTRL_COUNTER_FREEZER 0x8 /* Freeze counter values to the
+ * current value.*/
+#define MMC_CNTRL_PRESET 0x10
+#define MMC_CNTRL_FULL_HALF_PRESET 0x20
+struct stmmac_counters {
+ unsigned int mmc_tx_octetcount_gb;
+ unsigned int mmc_tx_framecount_gb;
+ unsigned int mmc_tx_broadcastframe_g;
+ unsigned int mmc_tx_multicastframe_g;
+ unsigned int mmc_tx_64_octets_gb;
+ unsigned int mmc_tx_65_to_127_octets_gb;
+ unsigned int mmc_tx_128_to_255_octets_gb;
+ unsigned int mmc_tx_256_to_511_octets_gb;
+ unsigned int mmc_tx_512_to_1023_octets_gb;
+ unsigned int mmc_tx_1024_to_max_octets_gb;
+ unsigned int mmc_tx_unicast_gb;
+ unsigned int mmc_tx_multicast_gb;
+ unsigned int mmc_tx_broadcast_gb;
+ unsigned int mmc_tx_underflow_error;
+ unsigned int mmc_tx_singlecol_g;
+ unsigned int mmc_tx_multicol_g;
+ unsigned int mmc_tx_deferred;
+ unsigned int mmc_tx_latecol;
+ unsigned int mmc_tx_exesscol;
+ unsigned int mmc_tx_carrier_error;
+ unsigned int mmc_tx_octetcount_g;
+ unsigned int mmc_tx_framecount_g;
+ unsigned int mmc_tx_excessdef;
+ unsigned int mmc_tx_pause_frame;
+ unsigned int mmc_tx_vlan_frame_g;
+
+ /* MMC RX counter registers */
+ unsigned int mmc_rx_framecount_gb;
+ unsigned int mmc_rx_octetcount_gb;
+ unsigned int mmc_rx_octetcount_g;
+ unsigned int mmc_rx_broadcastframe_g;
+ unsigned int mmc_rx_multicastframe_g;
+ unsigned int mmc_rx_crc_errror;
+ unsigned int mmc_rx_align_error;
+ unsigned int mmc_rx_run_error;
+ unsigned int mmc_rx_jabber_error;
+ unsigned int mmc_rx_undersize_g;
+ unsigned int mmc_rx_oversize_g;
+ unsigned int mmc_rx_64_octets_gb;
+ unsigned int mmc_rx_65_to_127_octets_gb;
+ unsigned int mmc_rx_128_to_255_octets_gb;
+ unsigned int mmc_rx_256_to_511_octets_gb;
+ unsigned int mmc_rx_512_to_1023_octets_gb;
+ unsigned int mmc_rx_1024_to_max_octets_gb;
+ unsigned int mmc_rx_unicast_g;
+ unsigned int mmc_rx_length_error;
+ unsigned int mmc_rx_autofrangetype;
+ unsigned int mmc_rx_pause_frames;
+ unsigned int mmc_rx_fifo_overflow;
+ unsigned int mmc_rx_vlan_frames_gb;
+ unsigned int mmc_rx_watchdog_error;
+ /* IPC */
+ unsigned int mmc_rx_ipc_intr_mask;
+ unsigned int mmc_rx_ipc_intr;
+ /* IPv4 */
+ unsigned int mmc_rx_ipv4_gd;
+ unsigned int mmc_rx_ipv4_hderr;
+ unsigned int mmc_rx_ipv4_nopay;
+ unsigned int mmc_rx_ipv4_frag;
+ unsigned int mmc_rx_ipv4_udsbl;
+
+ unsigned int mmc_rx_ipv4_gd_octets;
+ unsigned int mmc_rx_ipv4_hderr_octets;
+ unsigned int mmc_rx_ipv4_nopay_octets;
+ unsigned int mmc_rx_ipv4_frag_octets;
+ unsigned int mmc_rx_ipv4_udsbl_octets;
+
+ /* IPV6 */
+ unsigned int mmc_rx_ipv6_gd_octets;
+ unsigned int mmc_rx_ipv6_hderr_octets;
+ unsigned int mmc_rx_ipv6_nopay_octets;
+
+ unsigned int mmc_rx_ipv6_gd;
+ unsigned int mmc_rx_ipv6_hderr;
+ unsigned int mmc_rx_ipv6_nopay;
+
+ /* Protocols */
+ unsigned int mmc_rx_udp_gd;
+ unsigned int mmc_rx_udp_err;
+ unsigned int mmc_rx_tcp_gd;
+ unsigned int mmc_rx_tcp_err;
+ unsigned int mmc_rx_icmp_gd;
+ unsigned int mmc_rx_icmp_err;
+
+ unsigned int mmc_rx_udp_gd_octets;
+ unsigned int mmc_rx_udp_err_octets;
+ unsigned int mmc_rx_tcp_gd_octets;
+ unsigned int mmc_rx_tcp_err_octets;
+ unsigned int mmc_rx_icmp_gd_octets;
+ unsigned int mmc_rx_icmp_err_octets;
+};
+
+extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
+extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
+extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
new file mode 100644
index 000000000000..41e6b33e1b08
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -0,0 +1,265 @@
+/*******************************************************************************
+ DWMAC Management Counters
+
+ Copyright (C) 2011 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/io.h>
+#include "mmc.h"
+
+/* MAC Management Counters register offset */
+
+#define MMC_CNTRL 0x00000100 /* MMC Control */
+#define MMC_RX_INTR 0x00000104 /* MMC RX Interrupt */
+#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */
+#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */
+#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */
+#define MMC_DEFAUL_MASK 0xffffffff
+
+/* MMC TX counter registers */
+
+/* Note:
+ * _GB register stands for good and bad frames
+ * _G is for good only.
+ */
+#define MMC_TX_OCTETCOUNT_GB 0x00000114
+#define MMC_TX_FRAMECOUNT_GB 0x00000118
+#define MMC_TX_BROADCASTFRAME_G 0x0000011c
+#define MMC_TX_MULTICASTFRAME_G 0x00000120
+#define MMC_TX_64_OCTETS_GB 0x00000124
+#define MMC_TX_65_TO_127_OCTETS_GB 0x00000128
+#define MMC_TX_128_TO_255_OCTETS_GB 0x0000012c
+#define MMC_TX_256_TO_511_OCTETS_GB 0x00000130
+#define MMC_TX_512_TO_1023_OCTETS_GB 0x00000134
+#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x00000138
+#define MMC_TX_UNICAST_GB 0x0000013c
+#define MMC_TX_MULTICAST_GB 0x00000140
+#define MMC_TX_BROADCAST_GB 0x00000144
+#define MMC_TX_UNDERFLOW_ERROR 0x00000148
+#define MMC_TX_SINGLECOL_G 0x0000014c
+#define MMC_TX_MULTICOL_G 0x00000150
+#define MMC_TX_DEFERRED 0x00000154
+#define MMC_TX_LATECOL 0x00000158
+#define MMC_TX_EXESSCOL 0x0000015c
+#define MMC_TX_CARRIER_ERROR 0x00000160
+#define MMC_TX_OCTETCOUNT_G 0x00000164
+#define MMC_TX_FRAMECOUNT_G 0x00000168
+#define MMC_TX_EXCESSDEF 0x0000016c
+#define MMC_TX_PAUSE_FRAME 0x00000170
+#define MMC_TX_VLAN_FRAME_G 0x00000174
+
+/* MMC RX counter registers */
+#define MMC_RX_FRAMECOUNT_GB 0x00000180
+#define MMC_RX_OCTETCOUNT_GB 0x00000184
+#define MMC_RX_OCTETCOUNT_G 0x00000188
+#define MMC_RX_BROADCASTFRAME_G 0x0000018c
+#define MMC_RX_MULTICASTFRAME_G 0x00000190
+#define MMC_RX_CRC_ERRROR 0x00000194
+#define MMC_RX_ALIGN_ERROR 0x00000198
+#define MMC_RX_RUN_ERROR 0x0000019C
+#define MMC_RX_JABBER_ERROR 0x000001A0
+#define MMC_RX_UNDERSIZE_G 0x000001A4
+#define MMC_RX_OVERSIZE_G 0x000001A8
+#define MMC_RX_64_OCTETS_GB 0x000001AC
+#define MMC_RX_65_TO_127_OCTETS_GB 0x000001b0
+#define MMC_RX_128_TO_255_OCTETS_GB 0x000001b4
+#define MMC_RX_256_TO_511_OCTETS_GB 0x000001b8
+#define MMC_RX_512_TO_1023_OCTETS_GB 0x000001bc
+#define MMC_RX_1024_TO_MAX_OCTETS_GB 0x000001c0
+#define MMC_RX_UNICAST_G 0x000001c4
+#define MMC_RX_LENGTH_ERROR 0x000001c8
+#define MMC_RX_AUTOFRANGETYPE 0x000001cc
+#define MMC_RX_PAUSE_FRAMES 0x000001d0
+#define MMC_RX_FIFO_OVERFLOW 0x000001d4
+#define MMC_RX_VLAN_FRAMES_GB 0x000001d8
+#define MMC_RX_WATCHDOG_ERROR 0x000001dc
+/* IPC*/
+#define MMC_RX_IPC_INTR_MASK 0x00000200
+#define MMC_RX_IPC_INTR 0x00000208
+/* IPv4*/
+#define MMC_RX_IPV4_GD 0x00000210
+#define MMC_RX_IPV4_HDERR 0x00000214
+#define MMC_RX_IPV4_NOPAY 0x00000218
+#define MMC_RX_IPV4_FRAG 0x0000021C
+#define MMC_RX_IPV4_UDSBL 0x00000220
+
+#define MMC_RX_IPV4_GD_OCTETS 0x00000250
+#define MMC_RX_IPV4_HDERR_OCTETS 0x00000254
+#define MMC_RX_IPV4_NOPAY_OCTETS 0x00000258
+#define MMC_RX_IPV4_FRAG_OCTETS 0x0000025c
+#define MMC_RX_IPV4_UDSBL_OCTETS 0x00000260
+
+/* IPV6*/
+#define MMC_RX_IPV6_GD_OCTETS 0x00000264
+#define MMC_RX_IPV6_HDERR_OCTETS 0x00000268
+#define MMC_RX_IPV6_NOPAY_OCTETS 0x0000026c
+
+#define MMC_RX_IPV6_GD 0x00000224
+#define MMC_RX_IPV6_HDERR 0x00000228
+#define MMC_RX_IPV6_NOPAY 0x0000022c
+
+/* Protocols*/
+#define MMC_RX_UDP_GD 0x00000230
+#define MMC_RX_UDP_ERR 0x00000234
+#define MMC_RX_TCP_GD 0x00000238
+#define MMC_RX_TCP_ERR 0x0000023c
+#define MMC_RX_ICMP_GD 0x00000240
+#define MMC_RX_ICMP_ERR 0x00000244
+
+#define MMC_RX_UDP_GD_OCTETS 0x00000270
+#define MMC_RX_UDP_ERR_OCTETS 0x00000274
+#define MMC_RX_TCP_GD_OCTETS 0x00000278
+#define MMC_RX_TCP_ERR_OCTETS 0x0000027c
+#define MMC_RX_ICMP_GD_OCTETS 0x00000280
+#define MMC_RX_ICMP_ERR_OCTETS 0x00000284
+
+void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
+{
+ u32 value = readl(ioaddr + MMC_CNTRL);
+
+ value |= (mode & 0x3F);
+
+ writel(value, ioaddr + MMC_CNTRL);
+
+ pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
+ MMC_CNTRL, value);
+}
+
+/* To mask all all interrupts.*/
+void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
+{
+ writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK);
+ writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK);
+}
+
+/* This reads the MAC core counters (if actaully supported).
+ * by default the MMC core is programmed to reset each
+ * counter after a read. So all the field of the mmc struct
+ * have to be incremented.
+ */
+void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc)
+{
+ mmc->mmc_tx_octetcount_gb += readl(ioaddr + MMC_TX_OCTETCOUNT_GB);
+ mmc->mmc_tx_framecount_gb += readl(ioaddr + MMC_TX_FRAMECOUNT_GB);
+ mmc->mmc_tx_broadcastframe_g += readl(ioaddr + MMC_TX_BROADCASTFRAME_G);
+ mmc->mmc_tx_multicastframe_g += readl(ioaddr + MMC_TX_MULTICASTFRAME_G);
+ mmc->mmc_tx_64_octets_gb += readl(ioaddr + MMC_TX_64_OCTETS_GB);
+ mmc->mmc_tx_65_to_127_octets_gb +=
+ readl(ioaddr + MMC_TX_65_TO_127_OCTETS_GB);
+ mmc->mmc_tx_128_to_255_octets_gb +=
+ readl(ioaddr + MMC_TX_128_TO_255_OCTETS_GB);
+ mmc->mmc_tx_256_to_511_octets_gb +=
+ readl(ioaddr + MMC_TX_256_TO_511_OCTETS_GB);
+ mmc->mmc_tx_512_to_1023_octets_gb +=
+ readl(ioaddr + MMC_TX_512_TO_1023_OCTETS_GB);
+ mmc->mmc_tx_1024_to_max_octets_gb +=
+ readl(ioaddr + MMC_TX_1024_TO_MAX_OCTETS_GB);
+ mmc->mmc_tx_unicast_gb += readl(ioaddr + MMC_TX_UNICAST_GB);
+ mmc->mmc_tx_multicast_gb += readl(ioaddr + MMC_TX_MULTICAST_GB);
+ mmc->mmc_tx_broadcast_gb += readl(ioaddr + MMC_TX_BROADCAST_GB);
+ mmc->mmc_tx_underflow_error += readl(ioaddr + MMC_TX_UNDERFLOW_ERROR);
+ mmc->mmc_tx_singlecol_g += readl(ioaddr + MMC_TX_SINGLECOL_G);
+ mmc->mmc_tx_multicol_g += readl(ioaddr + MMC_TX_MULTICOL_G);
+ mmc->mmc_tx_deferred += readl(ioaddr + MMC_TX_DEFERRED);
+ mmc->mmc_tx_latecol += readl(ioaddr + MMC_TX_LATECOL);
+ mmc->mmc_tx_exesscol += readl(ioaddr + MMC_TX_EXESSCOL);
+ mmc->mmc_tx_carrier_error += readl(ioaddr + MMC_TX_CARRIER_ERROR);
+ mmc->mmc_tx_octetcount_g += readl(ioaddr + MMC_TX_OCTETCOUNT_G);
+ mmc->mmc_tx_framecount_g += readl(ioaddr + MMC_TX_FRAMECOUNT_G);
+ mmc->mmc_tx_excessdef += readl(ioaddr + MMC_TX_EXCESSDEF);
+ mmc->mmc_tx_pause_frame += readl(ioaddr + MMC_TX_PAUSE_FRAME);
+ mmc->mmc_tx_vlan_frame_g += readl(ioaddr + MMC_TX_VLAN_FRAME_G);
+
+ /* MMC RX counter registers */
+ mmc->mmc_rx_framecount_gb += readl(ioaddr + MMC_RX_FRAMECOUNT_GB);
+ mmc->mmc_rx_octetcount_gb += readl(ioaddr + MMC_RX_OCTETCOUNT_GB);
+ mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G);
+ mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G);
+ mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G);
+ mmc->mmc_rx_crc_errror += readl(ioaddr + MMC_RX_CRC_ERRROR);
+ mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR);
+ mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR);
+ mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR);
+ mmc->mmc_rx_undersize_g += readl(ioaddr + MMC_RX_UNDERSIZE_G);
+ mmc->mmc_rx_oversize_g += readl(ioaddr + MMC_RX_OVERSIZE_G);
+ mmc->mmc_rx_64_octets_gb += readl(ioaddr + MMC_RX_64_OCTETS_GB);
+ mmc->mmc_rx_65_to_127_octets_gb +=
+ readl(ioaddr + MMC_RX_65_TO_127_OCTETS_GB);
+ mmc->mmc_rx_128_to_255_octets_gb +=
+ readl(ioaddr + MMC_RX_128_TO_255_OCTETS_GB);
+ mmc->mmc_rx_256_to_511_octets_gb +=
+ readl(ioaddr + MMC_RX_256_TO_511_OCTETS_GB);
+ mmc->mmc_rx_512_to_1023_octets_gb +=
+ readl(ioaddr + MMC_RX_512_TO_1023_OCTETS_GB);
+ mmc->mmc_rx_1024_to_max_octets_gb +=
+ readl(ioaddr + MMC_RX_1024_TO_MAX_OCTETS_GB);
+ mmc->mmc_rx_unicast_g += readl(ioaddr + MMC_RX_UNICAST_G);
+ mmc->mmc_rx_length_error += readl(ioaddr + MMC_RX_LENGTH_ERROR);
+ mmc->mmc_rx_autofrangetype += readl(ioaddr + MMC_RX_AUTOFRANGETYPE);
+ mmc->mmc_rx_pause_frames += readl(ioaddr + MMC_RX_PAUSE_FRAMES);
+ mmc->mmc_rx_fifo_overflow += readl(ioaddr + MMC_RX_FIFO_OVERFLOW);
+ mmc->mmc_rx_vlan_frames_gb += readl(ioaddr + MMC_RX_VLAN_FRAMES_GB);
+ mmc->mmc_rx_watchdog_error += readl(ioaddr + MMC_RX_WATCHDOG_ERROR);
+ /* IPC */
+ mmc->mmc_rx_ipc_intr_mask += readl(ioaddr + MMC_RX_IPC_INTR_MASK);
+ mmc->mmc_rx_ipc_intr += readl(ioaddr + MMC_RX_IPC_INTR);
+ /* IPv4 */
+ mmc->mmc_rx_ipv4_gd += readl(ioaddr + MMC_RX_IPV4_GD);
+ mmc->mmc_rx_ipv4_hderr += readl(ioaddr + MMC_RX_IPV4_HDERR);
+ mmc->mmc_rx_ipv4_nopay += readl(ioaddr + MMC_RX_IPV4_NOPAY);
+ mmc->mmc_rx_ipv4_frag += readl(ioaddr + MMC_RX_IPV4_FRAG);
+ mmc->mmc_rx_ipv4_udsbl += readl(ioaddr + MMC_RX_IPV4_UDSBL);
+
+ mmc->mmc_rx_ipv4_gd_octets += readl(ioaddr + MMC_RX_IPV4_GD_OCTETS);
+ mmc->mmc_rx_ipv4_hderr_octets +=
+ readl(ioaddr + MMC_RX_IPV4_HDERR_OCTETS);
+ mmc->mmc_rx_ipv4_nopay_octets +=
+ readl(ioaddr + MMC_RX_IPV4_NOPAY_OCTETS);
+ mmc->mmc_rx_ipv4_frag_octets += readl(ioaddr + MMC_RX_IPV4_FRAG_OCTETS);
+ mmc->mmc_rx_ipv4_udsbl_octets +=
+ readl(ioaddr + MMC_RX_IPV4_UDSBL_OCTETS);
+
+ /* IPV6 */
+ mmc->mmc_rx_ipv6_gd_octets += readl(ioaddr + MMC_RX_IPV6_GD_OCTETS);
+ mmc->mmc_rx_ipv6_hderr_octets +=
+ readl(ioaddr + MMC_RX_IPV6_HDERR_OCTETS);
+ mmc->mmc_rx_ipv6_nopay_octets +=
+ readl(ioaddr + MMC_RX_IPV6_NOPAY_OCTETS);
+
+ mmc->mmc_rx_ipv6_gd += readl(ioaddr + MMC_RX_IPV6_GD);
+ mmc->mmc_rx_ipv6_hderr += readl(ioaddr + MMC_RX_IPV6_HDERR);
+ mmc->mmc_rx_ipv6_nopay += readl(ioaddr + MMC_RX_IPV6_NOPAY);
+
+ /* Protocols */
+ mmc->mmc_rx_udp_gd += readl(ioaddr + MMC_RX_UDP_GD);
+ mmc->mmc_rx_udp_err += readl(ioaddr + MMC_RX_UDP_ERR);
+ mmc->mmc_rx_tcp_gd += readl(ioaddr + MMC_RX_TCP_GD);
+ mmc->mmc_rx_tcp_err += readl(ioaddr + MMC_RX_TCP_ERR);
+ mmc->mmc_rx_icmp_gd += readl(ioaddr + MMC_RX_ICMP_GD);
+ mmc->mmc_rx_icmp_err += readl(ioaddr + MMC_RX_ICMP_ERR);
+
+ mmc->mmc_rx_udp_gd_octets += readl(ioaddr + MMC_RX_UDP_GD_OCTETS);
+ mmc->mmc_rx_udp_err_octets += readl(ioaddr + MMC_RX_UDP_ERR_OCTETS);
+ mmc->mmc_rx_tcp_gd_octets += readl(ioaddr + MMC_RX_TCP_GD_OCTETS);
+ mmc->mmc_rx_tcp_err_octets += readl(ioaddr + MMC_RX_TCP_ERR_OCTETS);
+ mmc->mmc_rx_icmp_gd_octets += readl(ioaddr + MMC_RX_ICMP_GD_OCTETS);
+ mmc->mmc_rx_icmp_err_octets += readl(ioaddr + MMC_RX_ICMP_ERR_OCTETS);
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
new file mode 100644
index 000000000000..029c2a2cf524
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -0,0 +1,221 @@
+/*******************************************************************************
+ This contains the functions to handle the normal descriptors.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "common.h"
+
+static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, void __iomem *ioaddr)
+{
+ int ret = 0;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.tx.error_summary)) {
+ if (unlikely(p->des01.tx.underflow_error)) {
+ x->tx_underflow++;
+ stats->tx_fifo_errors++;
+ }
+ if (unlikely(p->des01.tx.no_carrier)) {
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.tx.loss_carrier)) {
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely((p->des01.tx.excessive_deferral) ||
+ (p->des01.tx.excessive_collisions) ||
+ (p->des01.tx.late_collision)))
+ stats->collisions += p->des01.tx.collision_count;
+ ret = -1;
+ }
+ if (unlikely(p->des01.tx.heartbeat_fail)) {
+ x->tx_heartbeat++;
+ stats->tx_heartbeat_errors++;
+ ret = -1;
+ }
+ if (unlikely(p->des01.tx.deferred))
+ x->tx_deferred++;
+
+ return ret;
+}
+
+static int ndesc_get_tx_len(struct dma_desc *p)
+{
+ return p->des01.tx.buffer1_size;
+}
+
+/* This function verifies if each incoming frame has some errors
+ * and, if required, updates the multicast statistics.
+ * In case of success, it returns csum_none because the device
+ * is not able to compute the csum in HW. */
+static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ int ret = csum_none;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.rx.last_descriptor == 0)) {
+ pr_warning("ndesc Error: Oversized Ethernet "
+ "frame spanned multiple buffers\n");
+ stats->rx_length_errors++;
+ return discard_frame;
+ }
+
+ if (unlikely(p->des01.rx.error_summary)) {
+ if (unlikely(p->des01.rx.descriptor_error))
+ x->rx_desc++;
+ if (unlikely(p->des01.rx.partial_frame_error))
+ x->rx_partial++;
+ if (unlikely(p->des01.rx.run_frame))
+ x->rx_runt++;
+ if (unlikely(p->des01.rx.frame_too_long))
+ x->rx_toolong++;
+ if (unlikely(p->des01.rx.collision)) {
+ x->rx_collision++;
+ stats->collisions++;
+ }
+ if (unlikely(p->des01.rx.crc_error)) {
+ x->rx_crc++;
+ stats->rx_crc_errors++;
+ }
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.rx.dribbling))
+ ret = discard_frame;
+
+ if (unlikely(p->des01.rx.length_error)) {
+ x->rx_length++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.rx.mii_error)) {
+ x->rx_mii++;
+ ret = discard_frame;
+ }
+ if (p->des01.rx.multicast_frame) {
+ x->rx_multicast++;
+ stats->multicast++;
+ }
+ return ret;
+}
+
+static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+ int disable_rx_ic)
+{
+ int i;
+ for (i = 0; i < ring_size; i++) {
+ p->des01.rx.own = 1;
+ p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
+ if (i == ring_size - 1)
+ p->des01.rx.end_ring = 1;
+ if (disable_rx_ic)
+ p->des01.rx.disable_ic = 1;
+ p++;
+ }
+}
+
+static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+{
+ int i;
+ for (i = 0; i < ring_size; i++) {
+ p->des01.tx.own = 0;
+ if (i == ring_size - 1)
+ p->des01.tx.end_ring = 1;
+ p++;
+ }
+}
+
+static int ndesc_get_tx_owner(struct dma_desc *p)
+{
+ return p->des01.tx.own;
+}
+
+static int ndesc_get_rx_owner(struct dma_desc *p)
+{
+ return p->des01.rx.own;
+}
+
+static void ndesc_set_tx_owner(struct dma_desc *p)
+{
+ p->des01.tx.own = 1;
+}
+
+static void ndesc_set_rx_owner(struct dma_desc *p)
+{
+ p->des01.rx.own = 1;
+}
+
+static int ndesc_get_tx_ls(struct dma_desc *p)
+{
+ return p->des01.tx.last_segment;
+}
+
+static void ndesc_release_tx_desc(struct dma_desc *p)
+{
+ int ter = p->des01.tx.end_ring;
+
+ memset(p, 0, offsetof(struct dma_desc, des2));
+ /* set termination field */
+ p->des01.tx.end_ring = ter;
+}
+
+static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ int csum_flag)
+{
+ p->des01.tx.first_segment = is_fs;
+ p->des01.tx.buffer1_size = len;
+}
+
+static void ndesc_clear_tx_ic(struct dma_desc *p)
+{
+ p->des01.tx.interrupt = 0;
+}
+
+static void ndesc_close_tx_desc(struct dma_desc *p)
+{
+ p->des01.tx.last_segment = 1;
+ p->des01.tx.interrupt = 1;
+}
+
+static int ndesc_get_rx_frame_len(struct dma_desc *p)
+{
+ return p->des01.rx.frame_length;
+}
+
+const struct stmmac_desc_ops ndesc_ops = {
+ .tx_status = ndesc_get_tx_status,
+ .rx_status = ndesc_get_rx_status,
+ .get_tx_len = ndesc_get_tx_len,
+ .init_rx_desc = ndesc_init_rx_desc,
+ .init_tx_desc = ndesc_init_tx_desc,
+ .get_tx_owner = ndesc_get_tx_owner,
+ .get_rx_owner = ndesc_get_rx_owner,
+ .release_tx_desc = ndesc_release_tx_desc,
+ .prepare_tx_desc = ndesc_prepare_tx_desc,
+ .clear_tx_ic = ndesc_clear_tx_ic,
+ .close_tx_desc = ndesc_close_tx_desc,
+ .get_tx_ls = ndesc_get_tx_ls,
+ .set_tx_owner = ndesc_set_tx_owner,
+ .set_rx_owner = ndesc_set_rx_owner,
+ .get_rx_frame_len = ndesc_get_rx_frame_len,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
new file mode 100644
index 000000000000..1434bdb390d4
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -0,0 +1,88 @@
+/*******************************************************************************
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#define DRV_MODULE_VERSION "Aug_2011"
+#include <linux/stmmac.h>
+
+#include "common.h"
+#ifdef CONFIG_STMMAC_TIMER
+#include "stmmac_timer.h"
+#endif
+
+struct stmmac_priv {
+ /* Frequently used values are kept adjacent for cache effect */
+ struct dma_desc *dma_tx ____cacheline_aligned;
+ dma_addr_t dma_tx_phy;
+ struct sk_buff **tx_skbuff;
+ unsigned int cur_tx;
+ unsigned int dirty_tx;
+ unsigned int dma_tx_size;
+ int tx_coalesce;
+
+ struct dma_desc *dma_rx ;
+ unsigned int cur_rx;
+ unsigned int dirty_rx;
+ struct sk_buff **rx_skbuff;
+ dma_addr_t *rx_skbuff_dma;
+ struct sk_buff_head rx_recycle;
+
+ struct net_device *dev;
+ dma_addr_t dma_rx_phy;
+ unsigned int dma_rx_size;
+ unsigned int dma_buf_sz;
+ struct device *device;
+ struct mac_device_info *hw;
+ void __iomem *ioaddr;
+
+ struct stmmac_extra_stats xstats;
+ struct napi_struct napi;
+
+ int rx_coe;
+ int no_csum_insertion;
+
+ struct phy_device *phydev;
+ int oldlink;
+ int speed;
+ int oldduplex;
+ unsigned int flow_ctrl;
+ unsigned int pause;
+ struct mii_bus *mii;
+ int mii_irq[PHY_MAX_ADDR];
+
+ u32 msg_enable;
+ spinlock_t lock;
+ int wolopts;
+ int wolenabled;
+ int wol_irq;
+#ifdef CONFIG_STMMAC_TIMER
+ struct stmmac_timer *tm;
+#endif
+ struct plat_stmmacenet_data *plat;
+ struct stmmac_counters mmc;
+ struct dma_features dma_cap;
+};
+
+extern int stmmac_mdio_unregister(struct net_device *ndev);
+extern int stmmac_mdio_register(struct net_device *ndev);
+extern void stmmac_set_ethtool_ops(struct net_device *netdev);
+extern const struct stmmac_desc_ops enh_desc_ops;
+extern const struct stmmac_desc_ops ndesc_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
new file mode 100644
index 000000000000..aedff9a90ebc
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -0,0 +1,473 @@
+/*******************************************************************************
+ STMMAC Ethtool support
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <asm/io.h>
+
+#include "stmmac.h"
+#include "dwmac_dma.h"
+
+#define REG_SPACE_SIZE 0x1054
+#define MAC100_ETHTOOL_NAME "st_mac100"
+#define GMAC_ETHTOOL_NAME "st_gmac"
+
+struct stmmac_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define STMMAC_STAT(m) \
+ { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \
+ offsetof(struct stmmac_priv, xstats.m)}
+
+static const struct stmmac_stats stmmac_gstrings_stats[] = {
+ STMMAC_STAT(tx_underflow),
+ STMMAC_STAT(tx_carrier),
+ STMMAC_STAT(tx_losscarrier),
+ STMMAC_STAT(tx_heartbeat),
+ STMMAC_STAT(tx_deferred),
+ STMMAC_STAT(tx_vlan),
+ STMMAC_STAT(rx_vlan),
+ STMMAC_STAT(tx_jabber),
+ STMMAC_STAT(tx_frame_flushed),
+ STMMAC_STAT(tx_payload_error),
+ STMMAC_STAT(tx_ip_header_error),
+ STMMAC_STAT(rx_desc),
+ STMMAC_STAT(rx_partial),
+ STMMAC_STAT(rx_runt),
+ STMMAC_STAT(rx_toolong),
+ STMMAC_STAT(rx_collision),
+ STMMAC_STAT(rx_crc),
+ STMMAC_STAT(rx_length),
+ STMMAC_STAT(rx_mii),
+ STMMAC_STAT(rx_multicast),
+ STMMAC_STAT(rx_gmac_overflow),
+ STMMAC_STAT(rx_watchdog),
+ STMMAC_STAT(da_rx_filter_fail),
+ STMMAC_STAT(sa_rx_filter_fail),
+ STMMAC_STAT(rx_missed_cntr),
+ STMMAC_STAT(rx_overflow_cntr),
+ STMMAC_STAT(tx_undeflow_irq),
+ STMMAC_STAT(tx_process_stopped_irq),
+ STMMAC_STAT(tx_jabber_irq),
+ STMMAC_STAT(rx_overflow_irq),
+ STMMAC_STAT(rx_buf_unav_irq),
+ STMMAC_STAT(rx_process_stopped_irq),
+ STMMAC_STAT(rx_watchdog_irq),
+ STMMAC_STAT(tx_early_irq),
+ STMMAC_STAT(fatal_bus_error_irq),
+ STMMAC_STAT(threshold),
+ STMMAC_STAT(tx_pkt_n),
+ STMMAC_STAT(rx_pkt_n),
+ STMMAC_STAT(poll_n),
+ STMMAC_STAT(sched_timer_n),
+ STMMAC_STAT(normal_irq_n),
+};
+#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
+
+/* HW MAC Management counters (if supported) */
+#define STMMAC_MMC_STAT(m) \
+ { #m, FIELD_SIZEOF(struct stmmac_counters, m), \
+ offsetof(struct stmmac_priv, mmc.m)}
+
+static const struct stmmac_stats stmmac_gstr_mmc[] = {
+ STMMAC_MMC_STAT(mmc_tx_octetcount_gb),
+ STMMAC_MMC_STAT(mmc_tx_framecount_gb),
+ STMMAC_MMC_STAT(mmc_tx_broadcastframe_g),
+ STMMAC_MMC_STAT(mmc_tx_multicastframe_g),
+ STMMAC_MMC_STAT(mmc_tx_64_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_65_to_127_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_128_to_255_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_256_to_511_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_512_to_1023_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_1024_to_max_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_unicast_gb),
+ STMMAC_MMC_STAT(mmc_tx_multicast_gb),
+ STMMAC_MMC_STAT(mmc_tx_broadcast_gb),
+ STMMAC_MMC_STAT(mmc_tx_underflow_error),
+ STMMAC_MMC_STAT(mmc_tx_singlecol_g),
+ STMMAC_MMC_STAT(mmc_tx_multicol_g),
+ STMMAC_MMC_STAT(mmc_tx_deferred),
+ STMMAC_MMC_STAT(mmc_tx_latecol),
+ STMMAC_MMC_STAT(mmc_tx_exesscol),
+ STMMAC_MMC_STAT(mmc_tx_carrier_error),
+ STMMAC_MMC_STAT(mmc_tx_octetcount_g),
+ STMMAC_MMC_STAT(mmc_tx_framecount_g),
+ STMMAC_MMC_STAT(mmc_tx_excessdef),
+ STMMAC_MMC_STAT(mmc_tx_pause_frame),
+ STMMAC_MMC_STAT(mmc_tx_vlan_frame_g),
+ STMMAC_MMC_STAT(mmc_rx_framecount_gb),
+ STMMAC_MMC_STAT(mmc_rx_octetcount_gb),
+ STMMAC_MMC_STAT(mmc_rx_octetcount_g),
+ STMMAC_MMC_STAT(mmc_rx_broadcastframe_g),
+ STMMAC_MMC_STAT(mmc_rx_multicastframe_g),
+ STMMAC_MMC_STAT(mmc_rx_crc_errror),
+ STMMAC_MMC_STAT(mmc_rx_align_error),
+ STMMAC_MMC_STAT(mmc_rx_run_error),
+ STMMAC_MMC_STAT(mmc_rx_jabber_error),
+ STMMAC_MMC_STAT(mmc_rx_undersize_g),
+ STMMAC_MMC_STAT(mmc_rx_oversize_g),
+ STMMAC_MMC_STAT(mmc_rx_64_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_65_to_127_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_128_to_255_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_256_to_511_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_512_to_1023_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_1024_to_max_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_unicast_g),
+ STMMAC_MMC_STAT(mmc_rx_length_error),
+ STMMAC_MMC_STAT(mmc_rx_autofrangetype),
+ STMMAC_MMC_STAT(mmc_rx_pause_frames),
+ STMMAC_MMC_STAT(mmc_rx_fifo_overflow),
+ STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb),
+ STMMAC_MMC_STAT(mmc_rx_watchdog_error),
+ STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask),
+ STMMAC_MMC_STAT(mmc_rx_ipc_intr),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_gd),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_hderr),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_nopay),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_frag),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_gd_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_hderr_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_nopay_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_frag_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_gd_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_hderr_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_nopay_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_gd),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_hderr),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_nopay),
+ STMMAC_MMC_STAT(mmc_rx_udp_gd),
+ STMMAC_MMC_STAT(mmc_rx_udp_err),
+ STMMAC_MMC_STAT(mmc_rx_tcp_gd),
+ STMMAC_MMC_STAT(mmc_rx_tcp_err),
+ STMMAC_MMC_STAT(mmc_rx_icmp_gd),
+ STMMAC_MMC_STAT(mmc_rx_icmp_err),
+ STMMAC_MMC_STAT(mmc_rx_udp_gd_octets),
+ STMMAC_MMC_STAT(mmc_rx_udp_err_octets),
+ STMMAC_MMC_STAT(mmc_rx_tcp_gd_octets),
+ STMMAC_MMC_STAT(mmc_rx_tcp_err_octets),
+ STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets),
+ STMMAC_MMC_STAT(mmc_rx_icmp_err_octets),
+};
+#define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_gstr_mmc)
+
+static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (priv->plat->has_gmac)
+ strcpy(info->driver, GMAC_ETHTOOL_NAME);
+ else
+ strcpy(info->driver, MAC100_ETHTOOL_NAME);
+
+ strcpy(info->version, DRV_MODULE_VERSION);
+ info->fw_version[0] = '\0';
+}
+
+static int stmmac_ethtool_getsettings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct phy_device *phy = priv->phydev;
+ int rc;
+ if (phy == NULL) {
+ pr_err("%s: %s: PHY is not registered\n",
+ __func__, dev->name);
+ return -ENODEV;
+ }
+ if (!netif_running(dev)) {
+ pr_err("%s: interface is disabled: we cannot track "
+ "link speed / duplex setting\n", dev->name);
+ return -EBUSY;
+ }
+ cmd->transceiver = XCVR_INTERNAL;
+ spin_lock_irq(&priv->lock);
+ rc = phy_ethtool_gset(phy, cmd);
+ spin_unlock_irq(&priv->lock);
+ return rc;
+}
+
+static int stmmac_ethtool_setsettings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct phy_device *phy = priv->phydev;
+ int rc;
+
+ spin_lock(&priv->lock);
+ rc = phy_ethtool_sset(phy, cmd);
+ spin_unlock(&priv->lock);
+
+ return rc;
+}
+
+static u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ priv->msg_enable = level;
+
+}
+
+static int stmmac_check_if_running(struct net_device *dev)
+{
+ if (!netif_running(dev))
+ return -EBUSY;
+ return 0;
+}
+
+static int stmmac_ethtool_get_regs_len(struct net_device *dev)
+{
+ return REG_SPACE_SIZE;
+}
+
+static void stmmac_ethtool_gregs(struct net_device *dev,
+ struct ethtool_regs *regs, void *space)
+{
+ int i;
+ u32 *reg_space = (u32 *) space;
+
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ memset(reg_space, 0x0, REG_SPACE_SIZE);
+
+ if (!priv->plat->has_gmac) {
+ /* MAC registers */
+ for (i = 0; i < 12; i++)
+ reg_space[i] = readl(priv->ioaddr + (i * 4));
+ /* DMA registers */
+ for (i = 0; i < 9; i++)
+ reg_space[i + 12] =
+ readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
+ reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR);
+ reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR);
+ } else {
+ /* MAC registers */
+ for (i = 0; i < 55; i++)
+ reg_space[i] = readl(priv->ioaddr + (i * 4));
+ /* DMA registers */
+ for (i = 0; i < 22; i++)
+ reg_space[i + 55] =
+ readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
+ }
+}
+
+static void
+stmmac_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+
+ spin_lock(&priv->lock);
+
+ pause->rx_pause = 0;
+ pause->tx_pause = 0;
+ pause->autoneg = priv->phydev->autoneg;
+
+ if (priv->flow_ctrl & FLOW_RX)
+ pause->rx_pause = 1;
+ if (priv->flow_ctrl & FLOW_TX)
+ pause->tx_pause = 1;
+
+ spin_unlock(&priv->lock);
+}
+
+static int
+stmmac_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+ struct phy_device *phy = priv->phydev;
+ int new_pause = FLOW_OFF;
+ int ret = 0;
+
+ spin_lock(&priv->lock);
+
+ if (pause->rx_pause)
+ new_pause |= FLOW_RX;
+ if (pause->tx_pause)
+ new_pause |= FLOW_TX;
+
+ priv->flow_ctrl = new_pause;
+ phy->autoneg = pause->autoneg;
+
+ if (phy->autoneg) {
+ if (netif_running(netdev))
+ ret = phy_start_aneg(phy);
+ } else
+ priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex,
+ priv->flow_ctrl, priv->pause);
+ spin_unlock(&priv->lock);
+ return ret;
+}
+
+static void stmmac_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *dummy, u64 *data)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int i, j = 0;
+
+ /* Update the DMA HW counters for dwmac10/100 */
+ if (!priv->plat->has_gmac)
+ priv->hw->dma->dma_diagnostic_fr(&dev->stats,
+ (void *) &priv->xstats,
+ priv->ioaddr);
+ else {
+ /* If supported, for new GMAC chips expose the MMC counters */
+ dwmac_mmc_read(priv->ioaddr, &priv->mmc);
+
+ for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
+ char *p = (char *)priv + stmmac_gstr_mmc[i].stat_offset;
+
+ data[j++] = (stmmac_gstr_mmc[i].sizeof_stat ==
+ sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
+ }
+ }
+ for (i = 0; i < STMMAC_STATS_LEN; i++) {
+ char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
+ data[j++] = (stmmac_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
+ }
+}
+
+static int stmmac_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+ int len;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ len = STMMAC_STATS_LEN;
+
+ if (priv->plat->has_gmac)
+ len += STMMAC_MMC_STATS_LEN;
+
+ return len;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+ u8 *p = data;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ if (priv->plat->has_gmac)
+ for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
+ memcpy(p, stmmac_gstr_mmc[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < STMMAC_STATS_LEN; i++) {
+ memcpy(p, stmmac_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+/* Currently only support WOL through Magic packet. */
+static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ spin_lock_irq(&priv->lock);
+ if (device_can_wakeup(priv->device)) {
+ wol->supported = WAKE_MAGIC | WAKE_UCAST;
+ wol->wolopts = priv->wolopts;
+ }
+ spin_unlock_irq(&priv->lock);
+}
+
+static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 support = WAKE_MAGIC | WAKE_UCAST;
+
+ if (!device_can_wakeup(priv->device))
+ return -EINVAL;
+
+ if (wol->wolopts & ~support)
+ return -EINVAL;
+
+ if (wol->wolopts) {
+ pr_info("stmmac: wakeup enable\n");
+ device_set_wakeup_enable(priv->device, 1);
+ enable_irq_wake(priv->wol_irq);
+ } else {
+ device_set_wakeup_enable(priv->device, 0);
+ disable_irq_wake(priv->wol_irq);
+ }
+
+ spin_lock_irq(&priv->lock);
+ priv->wolopts = wol->wolopts;
+ spin_unlock_irq(&priv->lock);
+
+ return 0;
+}
+
+static struct ethtool_ops stmmac_ethtool_ops = {
+ .begin = stmmac_check_if_running,
+ .get_drvinfo = stmmac_ethtool_getdrvinfo,
+ .get_settings = stmmac_ethtool_getsettings,
+ .set_settings = stmmac_ethtool_setsettings,
+ .get_msglevel = stmmac_ethtool_getmsglevel,
+ .set_msglevel = stmmac_ethtool_setmsglevel,
+ .get_regs = stmmac_ethtool_gregs,
+ .get_regs_len = stmmac_ethtool_get_regs_len,
+ .get_link = ethtool_op_get_link,
+ .get_pauseparam = stmmac_get_pauseparam,
+ .set_pauseparam = stmmac_set_pauseparam,
+ .get_ethtool_stats = stmmac_get_ethtool_stats,
+ .get_strings = stmmac_get_strings,
+ .get_wol = stmmac_get_wol,
+ .set_wol = stmmac_set_wol,
+ .get_sset_count = stmmac_get_sset_count,
+};
+
+void stmmac_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
new file mode 100644
index 000000000000..c0ee6b6b0198
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -0,0 +1,2172 @@
+/*******************************************************************************
+ This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
+ ST Ethernet IPs are built around a Synopsys IP Core.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+
+ Documentation available at:
+ http://www.stlinux.com
+ Support available at:
+ https://bugzilla.stlinux.com/
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/prefetch.h>
+#include "stmmac.h"
+#ifdef CONFIG_STMMAC_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#endif
+
+#define STMMAC_RESOURCE_NAME "stmmaceth"
+
+#undef STMMAC_DEBUG
+/*#define STMMAC_DEBUG*/
+#ifdef STMMAC_DEBUG
+#define DBG(nlevel, klevel, fmt, args...) \
+ ((void)(netif_msg_##nlevel(priv) && \
+ printk(KERN_##klevel fmt, ## args)))
+#else
+#define DBG(nlevel, klevel, fmt, args...) do { } while (0)
+#endif
+
+#undef STMMAC_RX_DEBUG
+/*#define STMMAC_RX_DEBUG*/
+#ifdef STMMAC_RX_DEBUG
+#define RX_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define RX_DBG(fmt, args...) do { } while (0)
+#endif
+
+#undef STMMAC_XMIT_DEBUG
+/*#define STMMAC_XMIT_DEBUG*/
+#ifdef STMMAC_TX_DEBUG
+#define TX_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define TX_DBG(fmt, args...) do { } while (0)
+#endif
+
+#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
+#define JUMBO_LEN 9000
+
+/* Module parameters */
+#define TX_TIMEO 5000 /* default 5 seconds */
+static int watchdog = TX_TIMEO;
+module_param(watchdog, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds");
+
+static int debug = -1; /* -1: default, 0: no output, 16: all */
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)");
+
+static int phyaddr = -1;
+module_param(phyaddr, int, S_IRUGO);
+MODULE_PARM_DESC(phyaddr, "Physical device address");
+
+#define DMA_TX_SIZE 256
+static int dma_txsize = DMA_TX_SIZE;
+module_param(dma_txsize, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list");
+
+#define DMA_RX_SIZE 256
+static int dma_rxsize = DMA_RX_SIZE;
+module_param(dma_rxsize, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list");
+
+static int flow_ctrl = FLOW_OFF;
+module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
+
+static int pause = PAUSE_TIME;
+module_param(pause, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(pause, "Flow Control Pause Time");
+
+#define TC_DEFAULT 64
+static int tc = TC_DEFAULT;
+module_param(tc, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tc, "DMA threshold control value");
+
+/* Pay attention to tune this parameter; take care of both
+ * hardware capability and network stabitily/performance impact.
+ * Many tests showed that ~4ms latency seems to be good enough. */
+#ifdef CONFIG_STMMAC_TIMER
+#define DEFAULT_PERIODIC_RATE 256
+static int tmrate = DEFAULT_PERIODIC_RATE;
+module_param(tmrate, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tmrate, "External timer freq. (default: 256Hz)");
+#endif
+
+#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
+static int buf_sz = DMA_BUFFER_SIZE;
+module_param(buf_sz, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(buf_sz, "DMA buffer size");
+
+static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
+
+static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
+
+/**
+ * stmmac_verify_args - verify the driver parameters.
+ * Description: it verifies if some wrong parameter is passed to the driver.
+ * Note that wrong parameters are replaced with the default values.
+ */
+static void stmmac_verify_args(void)
+{
+ if (unlikely(watchdog < 0))
+ watchdog = TX_TIMEO;
+ if (unlikely(dma_rxsize < 0))
+ dma_rxsize = DMA_RX_SIZE;
+ if (unlikely(dma_txsize < 0))
+ dma_txsize = DMA_TX_SIZE;
+ if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB)))
+ buf_sz = DMA_BUFFER_SIZE;
+ if (unlikely(flow_ctrl > 1))
+ flow_ctrl = FLOW_AUTO;
+ else if (likely(flow_ctrl < 0))
+ flow_ctrl = FLOW_OFF;
+ if (unlikely((pause < 0) || (pause > 0xffff)))
+ pause = PAUSE_TIME;
+}
+
+#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
+static void print_pkt(unsigned char *buf, int len)
+{
+ int j;
+ pr_info("len = %d byte, buf addr: 0x%p", len, buf);
+ for (j = 0; j < len; j++) {
+ if ((j % 16) == 0)
+ pr_info("\n %03x:", j);
+ pr_info(" %02x", buf[j]);
+ }
+ pr_info("\n");
+}
+#endif
+
+/* minimum number of free TX descriptors required to wake up TX process */
+#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4)
+
+static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
+{
+ return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
+}
+
+/* On some ST platforms, some HW system configuraton registers have to be
+ * set according to the link speed negotiated.
+ */
+static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
+{
+ struct phy_device *phydev = priv->phydev;
+
+ if (likely(priv->plat->fix_mac_speed))
+ priv->plat->fix_mac_speed(priv->plat->bsp_priv,
+ phydev->speed);
+}
+
+/**
+ * stmmac_adjust_link
+ * @dev: net device structure
+ * Description: it adjusts the link parameters.
+ */
+static void stmmac_adjust_link(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ unsigned long flags;
+ int new_state = 0;
+ unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
+
+ if (phydev == NULL)
+ return;
+
+ DBG(probe, DEBUG, "stmmac_adjust_link: called. address %d link %d\n",
+ phydev->addr, phydev->link);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (phydev->link) {
+ u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
+
+ /* Now we make sure that we can be in full duplex mode.
+ * If not, we operate in half-duplex mode. */
+ if (phydev->duplex != priv->oldduplex) {
+ new_state = 1;
+ if (!(phydev->duplex))
+ ctrl &= ~priv->hw->link.duplex;
+ else
+ ctrl |= priv->hw->link.duplex;
+ priv->oldduplex = phydev->duplex;
+ }
+ /* Flow Control operation */
+ if (phydev->pause)
+ priv->hw->mac->flow_ctrl(priv->ioaddr, phydev->duplex,
+ fc, pause_time);
+
+ if (phydev->speed != priv->speed) {
+ new_state = 1;
+ switch (phydev->speed) {
+ case 1000:
+ if (likely(priv->plat->has_gmac))
+ ctrl &= ~priv->hw->link.port;
+ stmmac_hw_fix_mac_speed(priv);
+ break;
+ case 100:
+ case 10:
+ if (priv->plat->has_gmac) {
+ ctrl |= priv->hw->link.port;
+ if (phydev->speed == SPEED_100) {
+ ctrl |= priv->hw->link.speed;
+ } else {
+ ctrl &= ~(priv->hw->link.speed);
+ }
+ } else {
+ ctrl &= ~priv->hw->link.port;
+ }
+ stmmac_hw_fix_mac_speed(priv);
+ break;
+ default:
+ if (netif_msg_link(priv))
+ pr_warning("%s: Speed (%d) is not 10"
+ " or 100!\n", dev->name, phydev->speed);
+ break;
+ }
+
+ priv->speed = phydev->speed;
+ }
+
+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+
+ if (!priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 1;
+ }
+ } else if (priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 0;
+ priv->speed = 0;
+ priv->oldduplex = -1;
+ }
+
+ if (new_state && netif_msg_link(priv))
+ phy_print_status(phydev);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
+}
+
+/**
+ * stmmac_init_phy - PHY initialization
+ * @dev: net device structure
+ * Description: it initializes the driver's PHY state, and attaches the PHY
+ * to the mac driver.
+ * Return value:
+ * 0 on success
+ */
+static int stmmac_init_phy(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev;
+ char phy_id[MII_BUS_ID_SIZE + 3];
+ char bus_id[MII_BUS_ID_SIZE];
+
+ priv->oldlink = 0;
+ priv->speed = 0;
+ priv->oldduplex = -1;
+
+ snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+ priv->plat->phy_addr);
+ pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
+
+ phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0,
+ priv->plat->interface);
+
+ if (IS_ERR(phydev)) {
+ pr_err("%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(phydev);
+ }
+
+ /*
+ * Broken HW is sometimes missing the pull-up resistor on the
+ * MDIO line, which results in reads to non-existent devices returning
+ * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
+ * device as well.
+ * Note: phydev->phy_id is the result of reading the UID PHY registers.
+ */
+ if (phydev->phy_id == 0) {
+ phy_disconnect(phydev);
+ return -ENODEV;
+ }
+ pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
+ " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
+
+ priv->phydev = phydev;
+
+ return 0;
+}
+
+static inline void stmmac_enable_mac(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + MAC_CTRL_REG);
+
+ value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
+ writel(value, ioaddr + MAC_CTRL_REG);
+}
+
+static inline void stmmac_disable_mac(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + MAC_CTRL_REG);
+
+ value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
+ writel(value, ioaddr + MAC_CTRL_REG);
+}
+
+/**
+ * display_ring
+ * @p: pointer to the ring.
+ * @size: size of the ring.
+ * Description: display all the descriptors within the ring.
+ */
+static void display_ring(struct dma_desc *p, int size)
+{
+ struct tmp_s {
+ u64 a;
+ unsigned int b;
+ unsigned int c;
+ };
+ int i;
+ for (i = 0; i < size; i++) {
+ struct tmp_s *x = (struct tmp_s *)(p + i);
+ pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
+ i, (unsigned int)virt_to_phys(&p[i]),
+ (unsigned int)(x->a), (unsigned int)((x->a) >> 32),
+ x->b, x->c);
+ pr_info("\n");
+ }
+}
+
+/**
+ * init_dma_desc_rings - init the RX/TX descriptor rings
+ * @dev: net device structure
+ * Description: this function initializes the DMA RX/TX descriptors
+ * and allocates the socket buffers.
+ */
+static void init_dma_desc_rings(struct net_device *dev)
+{
+ int i;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct sk_buff *skb;
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int rxsize = priv->dma_rx_size;
+ unsigned int bfsize = priv->dma_buf_sz;
+ int buff2_needed = 0, dis_ic = 0;
+
+ /* Set the Buffer size according to the MTU;
+ * indeed, in case of jumbo we need to bump-up the buffer sizes.
+ */
+ if (unlikely(dev->mtu >= BUF_SIZE_8KiB))
+ bfsize = BUF_SIZE_16KiB;
+ else if (unlikely(dev->mtu >= BUF_SIZE_4KiB))
+ bfsize = BUF_SIZE_8KiB;
+ else if (unlikely(dev->mtu >= BUF_SIZE_2KiB))
+ bfsize = BUF_SIZE_4KiB;
+ else if (unlikely(dev->mtu >= DMA_BUFFER_SIZE))
+ bfsize = BUF_SIZE_2KiB;
+ else
+ bfsize = DMA_BUFFER_SIZE;
+
+#ifdef CONFIG_STMMAC_TIMER
+ /* Disable interrupts on completion for the reception if timer is on */
+ if (likely(priv->tm->enable))
+ dis_ic = 1;
+#endif
+ /* If the MTU exceeds 8k so use the second buffer in the chain */
+ if (bfsize >= BUF_SIZE_8KiB)
+ buff2_needed = 1;
+
+ DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
+ txsize, rxsize, bfsize);
+
+ priv->rx_skbuff_dma = kmalloc(rxsize * sizeof(dma_addr_t), GFP_KERNEL);
+ priv->rx_skbuff =
+ kmalloc(sizeof(struct sk_buff *) * rxsize, GFP_KERNEL);
+ priv->dma_rx =
+ (struct dma_desc *)dma_alloc_coherent(priv->device,
+ rxsize *
+ sizeof(struct dma_desc),
+ &priv->dma_rx_phy,
+ GFP_KERNEL);
+ priv->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * txsize,
+ GFP_KERNEL);
+ priv->dma_tx =
+ (struct dma_desc *)dma_alloc_coherent(priv->device,
+ txsize *
+ sizeof(struct dma_desc),
+ &priv->dma_tx_phy,
+ GFP_KERNEL);
+
+ if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) {
+ pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__);
+ return;
+ }
+
+ DBG(probe, INFO, "stmmac (%s) DMA desc rings: virt addr (Rx %p, "
+ "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
+ dev->name, priv->dma_rx, priv->dma_tx,
+ (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
+
+ /* RX INITIALIZATION */
+ DBG(probe, INFO, "stmmac: SKB addresses:\n"
+ "skb\t\tskb data\tdma data\n");
+
+ for (i = 0; i < rxsize; i++) {
+ struct dma_desc *p = priv->dma_rx + i;
+
+ skb = netdev_alloc_skb_ip_align(dev, bfsize);
+ if (unlikely(skb == NULL)) {
+ pr_err("%s: Rx init fails; skb is NULL\n", __func__);
+ break;
+ }
+ priv->rx_skbuff[i] = skb;
+ priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
+ bfsize, DMA_FROM_DEVICE);
+
+ p->des2 = priv->rx_skbuff_dma[i];
+ if (unlikely(buff2_needed))
+ p->des3 = p->des2 + BUF_SIZE_8KiB;
+ DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
+ priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
+ }
+ priv->cur_rx = 0;
+ priv->dirty_rx = (unsigned int)(i - rxsize);
+ priv->dma_buf_sz = bfsize;
+ buf_sz = bfsize;
+
+ /* TX INITIALIZATION */
+ for (i = 0; i < txsize; i++) {
+ priv->tx_skbuff[i] = NULL;
+ priv->dma_tx[i].des2 = 0;
+ }
+ priv->dirty_tx = 0;
+ priv->cur_tx = 0;
+
+ /* Clear the Rx/Tx descriptors */
+ priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
+ priv->hw->desc->init_tx_desc(priv->dma_tx, txsize);
+
+ if (netif_msg_hw(priv)) {
+ pr_info("RX descriptor ring:\n");
+ display_ring(priv->dma_rx, rxsize);
+ pr_info("TX descriptor ring:\n");
+ display_ring(priv->dma_tx, txsize);
+ }
+}
+
+static void dma_free_rx_skbufs(struct stmmac_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->dma_rx_size; i++) {
+ if (priv->rx_skbuff[i]) {
+ dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(priv->rx_skbuff[i]);
+ }
+ priv->rx_skbuff[i] = NULL;
+ }
+}
+
+static void dma_free_tx_skbufs(struct stmmac_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->dma_tx_size; i++) {
+ if (priv->tx_skbuff[i] != NULL) {
+ struct dma_desc *p = priv->dma_tx + i;
+ if (p->des2)
+ dma_unmap_single(priv->device, p->des2,
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(priv->tx_skbuff[i]);
+ priv->tx_skbuff[i] = NULL;
+ }
+ }
+}
+
+static void free_dma_desc_resources(struct stmmac_priv *priv)
+{
+ /* Release the DMA TX/RX socket buffers */
+ dma_free_rx_skbufs(priv);
+ dma_free_tx_skbufs(priv);
+
+ /* Free the region of consistent memory previously allocated for
+ * the DMA */
+ dma_free_coherent(priv->device,
+ priv->dma_tx_size * sizeof(struct dma_desc),
+ priv->dma_tx, priv->dma_tx_phy);
+ dma_free_coherent(priv->device,
+ priv->dma_rx_size * sizeof(struct dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+ kfree(priv->rx_skbuff_dma);
+ kfree(priv->rx_skbuff);
+ kfree(priv->tx_skbuff);
+}
+
+/**
+ * stmmac_dma_operation_mode - HW DMA operation mode
+ * @priv : pointer to the private device structure.
+ * Description: it sets the DMA operation mode: tx/rx DMA thresholds
+ * or Store-And-Forward capability.
+ */
+static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
+{
+ if (likely(priv->plat->force_sf_dma_mode ||
+ ((priv->plat->tx_coe) && (!priv->no_csum_insertion)))) {
+ /*
+ * In case of GMAC, SF mode can be enabled
+ * to perform the TX COE in HW. This depends on:
+ * 1) TX COE if actually supported
+ * 2) There is no bugged Jumbo frame support
+ * that needs to not insert csum in the TDES.
+ */
+ priv->hw->dma->dma_mode(priv->ioaddr,
+ SF_DMA_MODE, SF_DMA_MODE);
+ tc = SF_DMA_MODE;
+ } else
+ priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
+}
+
+/**
+ * stmmac_tx:
+ * @priv: private driver structure
+ * Description: it reclaims resources after transmission completes.
+ */
+static void stmmac_tx(struct stmmac_priv *priv)
+{
+ unsigned int txsize = priv->dma_tx_size;
+
+ while (priv->dirty_tx != priv->cur_tx) {
+ int last;
+ unsigned int entry = priv->dirty_tx % txsize;
+ struct sk_buff *skb = priv->tx_skbuff[entry];
+ struct dma_desc *p = priv->dma_tx + entry;
+
+ /* Check if the descriptor is owned by the DMA. */
+ if (priv->hw->desc->get_tx_owner(p))
+ break;
+
+ /* Verify tx error by looking at the last segment */
+ last = priv->hw->desc->get_tx_ls(p);
+ if (likely(last)) {
+ int tx_error =
+ priv->hw->desc->tx_status(&priv->dev->stats,
+ &priv->xstats, p,
+ priv->ioaddr);
+ if (likely(tx_error == 0)) {
+ priv->dev->stats.tx_packets++;
+ priv->xstats.tx_pkt_n++;
+ } else
+ priv->dev->stats.tx_errors++;
+ }
+ TX_DBG("%s: curr %d, dirty %d\n", __func__,
+ priv->cur_tx, priv->dirty_tx);
+
+ if (likely(p->des2))
+ dma_unmap_single(priv->device, p->des2,
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
+ if (unlikely(p->des3))
+ p->des3 = 0;
+
+ if (likely(skb != NULL)) {
+ /*
+ * If there's room in the queue (limit it to size)
+ * we add this skb back into the pool,
+ * if it's the right size.
+ */
+ if ((skb_queue_len(&priv->rx_recycle) <
+ priv->dma_rx_size) &&
+ skb_recycle_check(skb, priv->dma_buf_sz))
+ __skb_queue_head(&priv->rx_recycle, skb);
+ else
+ dev_kfree_skb(skb);
+
+ priv->tx_skbuff[entry] = NULL;
+ }
+
+ priv->hw->desc->release_tx_desc(p);
+
+ entry = (++priv->dirty_tx) % txsize;
+ }
+ if (unlikely(netif_queue_stopped(priv->dev) &&
+ stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
+ netif_tx_lock(priv->dev);
+ if (netif_queue_stopped(priv->dev) &&
+ stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
+ TX_DBG("%s: restart transmit\n", __func__);
+ netif_wake_queue(priv->dev);
+ }
+ netif_tx_unlock(priv->dev);
+ }
+}
+
+static inline void stmmac_enable_irq(struct stmmac_priv *priv)
+{
+#ifdef CONFIG_STMMAC_TIMER
+ if (likely(priv->tm->enable))
+ priv->tm->timer_start(tmrate);
+ else
+#endif
+ priv->hw->dma->enable_dma_irq(priv->ioaddr);
+}
+
+static inline void stmmac_disable_irq(struct stmmac_priv *priv)
+{
+#ifdef CONFIG_STMMAC_TIMER
+ if (likely(priv->tm->enable))
+ priv->tm->timer_stop();
+ else
+#endif
+ priv->hw->dma->disable_dma_irq(priv->ioaddr);
+}
+
+static int stmmac_has_work(struct stmmac_priv *priv)
+{
+ unsigned int has_work = 0;
+ int rxret, tx_work = 0;
+
+ rxret = priv->hw->desc->get_rx_owner(priv->dma_rx +
+ (priv->cur_rx % priv->dma_rx_size));
+
+ if (priv->dirty_tx != priv->cur_tx)
+ tx_work = 1;
+
+ if (likely(!rxret || tx_work))
+ has_work = 1;
+
+ return has_work;
+}
+
+static inline void _stmmac_schedule(struct stmmac_priv *priv)
+{
+ if (likely(stmmac_has_work(priv))) {
+ stmmac_disable_irq(priv);
+ napi_schedule(&priv->napi);
+ }
+}
+
+#ifdef CONFIG_STMMAC_TIMER
+void stmmac_schedule(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ priv->xstats.sched_timer_n++;
+
+ _stmmac_schedule(priv);
+}
+
+static void stmmac_no_timer_started(unsigned int x)
+{;
+};
+
+static void stmmac_no_timer_stopped(void)
+{;
+};
+#endif
+
+/**
+ * stmmac_tx_err:
+ * @priv: pointer to the private device structure
+ * Description: it cleans the descriptors and restarts the transmission
+ * in case of errors.
+ */
+static void stmmac_tx_err(struct stmmac_priv *priv)
+{
+
+ netif_stop_queue(priv->dev);
+
+ priv->hw->dma->stop_tx(priv->ioaddr);
+ dma_free_tx_skbufs(priv);
+ priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
+ priv->dirty_tx = 0;
+ priv->cur_tx = 0;
+ priv->hw->dma->start_tx(priv->ioaddr);
+
+ priv->dev->stats.tx_errors++;
+ netif_wake_queue(priv->dev);
+}
+
+
+static void stmmac_dma_interrupt(struct stmmac_priv *priv)
+{
+ int status;
+
+ status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
+ if (likely(status == handle_tx_rx))
+ _stmmac_schedule(priv);
+
+ else if (unlikely(status == tx_hard_error_bump_tc)) {
+ /* Try to bump up the dma threshold on this failure */
+ if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
+ tc += 64;
+ priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
+ priv->xstats.threshold = tc;
+ }
+ } else if (unlikely(status == tx_hard_error))
+ stmmac_tx_err(priv);
+}
+
+static void stmmac_mmc_setup(struct stmmac_priv *priv)
+{
+ unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
+ MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
+
+ /* Do not manage MMC IRQ (FIXME) */
+ dwmac_mmc_intr_all_mask(priv->ioaddr);
+ dwmac_mmc_ctrl(priv->ioaddr, mode);
+ memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+}
+
+static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
+{
+ u32 hwid = priv->hw->synopsys_uid;
+
+ /* Only check valid Synopsys Id because old MAC chips
+ * have no HW registers where get the ID */
+ if (likely(hwid)) {
+ u32 uid = ((hwid & 0x0000ff00) >> 8);
+ u32 synid = (hwid & 0x000000ff);
+
+ pr_info("STMMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
+ uid, synid);
+
+ return synid;
+ }
+ return 0;
+}
+
+/* New GMAC chips support a new register to indicate the
+ * presence of the optional feature/functions.
+ */
+static int stmmac_get_hw_features(struct stmmac_priv *priv)
+{
+ u32 hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr);
+
+ if (likely(hw_cap)) {
+ priv->dma_cap.mbps_10_100 = (hw_cap & 0x1);
+ priv->dma_cap.mbps_1000 = (hw_cap & 0x2) >> 1;
+ priv->dma_cap.half_duplex = (hw_cap & 0x4) >> 2;
+ priv->dma_cap.hash_filter = (hw_cap & 0x10) >> 4;
+ priv->dma_cap.multi_addr = (hw_cap & 0x20) >> 5;
+ priv->dma_cap.pcs = (hw_cap & 0x40) >> 6;
+ priv->dma_cap.sma_mdio = (hw_cap & 0x100) >> 8;
+ priv->dma_cap.pmt_remote_wake_up = (hw_cap & 0x200) >> 9;
+ priv->dma_cap.pmt_magic_frame = (hw_cap & 0x400) >> 10;
+ priv->dma_cap.rmon = (hw_cap & 0x800) >> 11; /* MMC */
+ /* IEEE 1588-2002*/
+ priv->dma_cap.time_stamp = (hw_cap & 0x1000) >> 12;
+ /* IEEE 1588-2008*/
+ priv->dma_cap.atime_stamp = (hw_cap & 0x2000) >> 13;
+ /* 802.3az - Energy-Efficient Ethernet (EEE) */
+ priv->dma_cap.eee = (hw_cap & 0x4000) >> 14;
+ priv->dma_cap.av = (hw_cap & 0x8000) >> 15;
+ /* TX and RX csum */
+ priv->dma_cap.tx_coe = (hw_cap & 0x10000) >> 16;
+ priv->dma_cap.rx_coe_type1 = (hw_cap & 0x20000) >> 17;
+ priv->dma_cap.rx_coe_type2 = (hw_cap & 0x40000) >> 18;
+ priv->dma_cap.rxfifo_over_2048 = (hw_cap & 0x80000) >> 19;
+ /* TX and RX number of channels */
+ priv->dma_cap.number_rx_channel = (hw_cap & 0x300000) >> 20;
+ priv->dma_cap.number_tx_channel = (hw_cap & 0xc00000) >> 22;
+ /* Alternate (enhanced) DESC mode*/
+ priv->dma_cap.enh_desc = (hw_cap & 0x1000000) >> 24;
+
+ } else
+ pr_debug("\tNo HW DMA feature register supported");
+
+ return hw_cap;
+}
+
+/**
+ * stmmac_open - open entry point of the driver
+ * @dev : pointer to the device structure.
+ * Description:
+ * This function is the open entry point of the driver.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int stmmac_open(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
+
+ /* Check that the MAC address is valid. If its not, refuse
+ * to bring the device up. The user must specify an
+ * address using the following linux command:
+ * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ random_ether_addr(dev->dev_addr);
+ pr_warning("%s: generated random MAC address %pM\n", dev->name,
+ dev->dev_addr);
+ }
+
+ stmmac_verify_args();
+
+#ifdef CONFIG_STMMAC_TIMER
+ priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
+ if (unlikely(priv->tm == NULL)) {
+ pr_err("%s: ERROR: timer memory alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+ priv->tm->freq = tmrate;
+
+ /* Test if the external timer can be actually used.
+ * In case of failure continue without timer. */
+ if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) {
+ pr_warning("stmmaceth: cannot attach the external timer.\n");
+ priv->tm->freq = 0;
+ priv->tm->timer_start = stmmac_no_timer_started;
+ priv->tm->timer_stop = stmmac_no_timer_stopped;
+ } else
+ priv->tm->enable = 1;
+#endif
+ ret = stmmac_init_phy(dev);
+ if (unlikely(ret)) {
+ pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
+ goto open_error;
+ }
+
+ /* Create and initialize the TX/RX descriptors chains. */
+ priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
+ priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
+ priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
+ init_dma_desc_rings(dev);
+
+ /* DMA initialization and SW reset */
+ ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
+ priv->dma_tx_phy, priv->dma_rx_phy);
+ if (ret < 0) {
+ pr_err("%s: DMA initialization failed\n", __func__);
+ goto open_error;
+ }
+
+ /* Copy the MAC addr into the HW */
+ priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
+ /* If required, perform hw setup of the bus. */
+ if (priv->plat->bus_setup)
+ priv->plat->bus_setup(priv->ioaddr);
+ /* Initialize the MAC Core */
+ priv->hw->mac->core_init(priv->ioaddr);
+
+ stmmac_get_synopsys_id(priv);
+
+ stmmac_get_hw_features(priv);
+
+ if (priv->rx_coe)
+ pr_info("stmmac: Rx Checksum Offload Engine supported\n");
+ if (priv->plat->tx_coe)
+ pr_info("\tTX Checksum insertion supported\n");
+ netdev_update_features(dev);
+
+ /* Request the IRQ lines */
+ ret = request_irq(dev->irq, stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
+ __func__, dev->irq, ret);
+ goto open_error;
+ }
+
+ /* Enable the MAC Rx/Tx */
+ stmmac_enable_mac(priv->ioaddr);
+
+ /* Set the HW DMA mode and the COE */
+ stmmac_dma_operation_mode(priv);
+
+ /* Extra statistics */
+ memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
+ priv->xstats.threshold = tc;
+
+ stmmac_mmc_setup(priv);
+
+ /* Start the ball rolling... */
+ DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
+ priv->hw->dma->start_tx(priv->ioaddr);
+ priv->hw->dma->start_rx(priv->ioaddr);
+
+#ifdef CONFIG_STMMAC_TIMER
+ priv->tm->timer_start(tmrate);
+#endif
+ /* Dump DMA/MAC registers */
+ if (netif_msg_hw(priv)) {
+ priv->hw->mac->dump_regs(priv->ioaddr);
+ priv->hw->dma->dump_regs(priv->ioaddr);
+ }
+
+ if (priv->phydev)
+ phy_start(priv->phydev);
+
+ napi_enable(&priv->napi);
+ skb_queue_head_init(&priv->rx_recycle);
+ netif_start_queue(dev);
+
+ return 0;
+
+open_error:
+#ifdef CONFIG_STMMAC_TIMER
+ kfree(priv->tm);
+#endif
+ if (priv->phydev)
+ phy_disconnect(priv->phydev);
+
+ return ret;
+}
+
+/**
+ * stmmac_release - close entry point of the driver
+ * @dev : device pointer.
+ * Description:
+ * This is the stop entry point of the driver.
+ */
+static int stmmac_release(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ /* Stop and disconnect the PHY */
+ if (priv->phydev) {
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ }
+
+ netif_stop_queue(dev);
+
+#ifdef CONFIG_STMMAC_TIMER
+ /* Stop and release the timer */
+ stmmac_close_ext_timer();
+ if (priv->tm != NULL)
+ kfree(priv->tm);
+#endif
+ napi_disable(&priv->napi);
+ skb_queue_purge(&priv->rx_recycle);
+
+ /* Free the IRQ lines */
+ free_irq(dev->irq, dev);
+
+ /* Stop TX/RX DMA and clear the descriptors */
+ priv->hw->dma->stop_tx(priv->ioaddr);
+ priv->hw->dma->stop_rx(priv->ioaddr);
+
+ /* Release and free the Rx/Tx resources */
+ free_dma_desc_resources(priv);
+
+ /* Disable the MAC Rx/Tx */
+ stmmac_disable_mac(priv->ioaddr);
+
+ netif_carrier_off(dev);
+
+ return 0;
+}
+
+static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
+ struct net_device *dev,
+ int csum_insertion)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned int nopaged_len = skb_headlen(skb);
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int entry = priv->cur_tx % txsize;
+ struct dma_desc *desc = priv->dma_tx + entry;
+
+ if (nopaged_len > BUF_SIZE_8KiB) {
+
+ int buf2_size = nopaged_len - BUF_SIZE_8KiB;
+
+ desc->des2 = dma_map_single(priv->device, skb->data,
+ BUF_SIZE_8KiB, DMA_TO_DEVICE);
+ desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+ priv->hw->desc->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
+ csum_insertion);
+
+ entry = (++priv->cur_tx) % txsize;
+ desc = priv->dma_tx + entry;
+
+ desc->des2 = dma_map_single(priv->device,
+ skb->data + BUF_SIZE_8KiB,
+ buf2_size, DMA_TO_DEVICE);
+ desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+ priv->hw->desc->prepare_tx_desc(desc, 0, buf2_size,
+ csum_insertion);
+ priv->hw->desc->set_tx_owner(desc);
+ priv->tx_skbuff[entry] = NULL;
+ } else {
+ desc->des2 = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+ priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
+ csum_insertion);
+ }
+ return entry;
+}
+
+/**
+ * stmmac_xmit:
+ * @skb : the socket buffer
+ * @dev : device pointer
+ * Description : Tx entry point of the driver.
+ */
+static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int entry;
+ int i, csum_insertion = 0;
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ struct dma_desc *desc, *first;
+
+ if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
+ if (!netif_queue_stopped(dev)) {
+ netif_stop_queue(dev);
+ /* This is a hard error, log it. */
+ pr_err("%s: BUG! Tx Ring full when queue awake\n",
+ __func__);
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = priv->cur_tx % txsize;
+
+#ifdef STMMAC_XMIT_DEBUG
+ if ((skb->len > ETH_FRAME_LEN) || nfrags)
+ pr_info("stmmac xmit:\n"
+ "\tskb addr %p - len: %d - nopaged_len: %d\n"
+ "\tn_frags: %d - ip_summed: %d - %s gso\n",
+ skb, skb->len, skb_headlen(skb), nfrags, skb->ip_summed,
+ !skb_is_gso(skb) ? "isn't" : "is");
+#endif
+
+ csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
+
+ desc = priv->dma_tx + entry;
+ first = desc;
+
+#ifdef STMMAC_XMIT_DEBUG
+ if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
+ pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n"
+ "\t\tn_frags: %d, ip_summed: %d\n",
+ skb->len, skb_headlen(skb), nfrags, skb->ip_summed);
+#endif
+ priv->tx_skbuff[entry] = skb;
+ if (unlikely(skb->len >= BUF_SIZE_4KiB)) {
+ entry = stmmac_handle_jumbo_frames(skb, dev, csum_insertion);
+ desc = priv->dma_tx + entry;
+ } else {
+ unsigned int nopaged_len = skb_headlen(skb);
+ desc->des2 = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
+ csum_insertion);
+ }
+
+ for (i = 0; i < nfrags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ int len = frag->size;
+
+ entry = (++priv->cur_tx) % txsize;
+ desc = priv->dma_tx + entry;
+
+ TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
+ desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
+ DMA_TO_DEVICE);
+ priv->tx_skbuff[entry] = NULL;
+ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
+ wmb();
+ priv->hw->desc->set_tx_owner(desc);
+ }
+
+ /* Interrupt on completition only for the latest segment */
+ priv->hw->desc->close_tx_desc(desc);
+
+#ifdef CONFIG_STMMAC_TIMER
+ /* Clean IC while using timer */
+ if (likely(priv->tm->enable))
+ priv->hw->desc->clear_tx_ic(desc);
+#endif
+
+ wmb();
+
+ /* To avoid raise condition */
+ priv->hw->desc->set_tx_owner(first);
+
+ priv->cur_tx++;
+
+#ifdef STMMAC_XMIT_DEBUG
+ if (netif_msg_pktdata(priv)) {
+ pr_info("stmmac xmit: current=%d, dirty=%d, entry=%d, "
+ "first=%p, nfrags=%d\n",
+ (priv->cur_tx % txsize), (priv->dirty_tx % txsize),
+ entry, first, nfrags);
+ display_ring(priv->dma_tx, txsize);
+ pr_info(">>> frame to be transmitted: ");
+ print_pkt(skb->data, skb->len);
+ }
+#endif
+ if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
+ TX_DBG("%s: stop transmitted packets\n", __func__);
+ netif_stop_queue(dev);
+ }
+
+ dev->stats.tx_bytes += skb->len;
+
+ skb_tx_timestamp(skb);
+
+ priv->hw->dma->enable_dma_transmission(priv->ioaddr);
+
+ return NETDEV_TX_OK;
+}
+
+static inline void stmmac_rx_refill(struct stmmac_priv *priv)
+{
+ unsigned int rxsize = priv->dma_rx_size;
+ int bfsize = priv->dma_buf_sz;
+ struct dma_desc *p = priv->dma_rx;
+
+ for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
+ unsigned int entry = priv->dirty_rx % rxsize;
+ if (likely(priv->rx_skbuff[entry] == NULL)) {
+ struct sk_buff *skb;
+
+ skb = __skb_dequeue(&priv->rx_recycle);
+ if (skb == NULL)
+ skb = netdev_alloc_skb_ip_align(priv->dev,
+ bfsize);
+
+ if (unlikely(skb == NULL))
+ break;
+
+ priv->rx_skbuff[entry] = skb;
+ priv->rx_skbuff_dma[entry] =
+ dma_map_single(priv->device, skb->data, bfsize,
+ DMA_FROM_DEVICE);
+
+ (p + entry)->des2 = priv->rx_skbuff_dma[entry];
+ if (unlikely(priv->plat->has_gmac)) {
+ if (bfsize >= BUF_SIZE_8KiB)
+ (p + entry)->des3 =
+ (p + entry)->des2 + BUF_SIZE_8KiB;
+ }
+ RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
+ }
+ wmb();
+ priv->hw->desc->set_rx_owner(p + entry);
+ }
+}
+
+static int stmmac_rx(struct stmmac_priv *priv, int limit)
+{
+ unsigned int rxsize = priv->dma_rx_size;
+ unsigned int entry = priv->cur_rx % rxsize;
+ unsigned int next_entry;
+ unsigned int count = 0;
+ struct dma_desc *p = priv->dma_rx + entry;
+ struct dma_desc *p_next;
+
+#ifdef STMMAC_RX_DEBUG
+ if (netif_msg_hw(priv)) {
+ pr_debug(">>> stmmac_rx: descriptor ring:\n");
+ display_ring(priv->dma_rx, rxsize);
+ }
+#endif
+ count = 0;
+ while (!priv->hw->desc->get_rx_owner(p)) {
+ int status;
+
+ if (count >= limit)
+ break;
+
+ count++;
+
+ next_entry = (++priv->cur_rx) % rxsize;
+ p_next = priv->dma_rx + next_entry;
+ prefetch(p_next);
+
+ /* read the status of the incoming frame */
+ status = (priv->hw->desc->rx_status(&priv->dev->stats,
+ &priv->xstats, p));
+ if (unlikely(status == discard_frame))
+ priv->dev->stats.rx_errors++;
+ else {
+ struct sk_buff *skb;
+ int frame_len;
+
+ frame_len = priv->hw->desc->get_rx_frame_len(p);
+ /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
+ * Type frames (LLC/LLC-SNAP) */
+ if (unlikely(status != llc_snap))
+ frame_len -= ETH_FCS_LEN;
+#ifdef STMMAC_RX_DEBUG
+ if (frame_len > ETH_FRAME_LEN)
+ pr_debug("\tRX frame size %d, COE status: %d\n",
+ frame_len, status);
+
+ if (netif_msg_hw(priv))
+ pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
+ p, entry, p->des2);
+#endif
+ skb = priv->rx_skbuff[entry];
+ if (unlikely(!skb)) {
+ pr_err("%s: Inconsistent Rx descriptor chain\n",
+ priv->dev->name);
+ priv->dev->stats.rx_dropped++;
+ break;
+ }
+ prefetch(skb->data - NET_IP_ALIGN);
+ priv->rx_skbuff[entry] = NULL;
+
+ skb_put(skb, frame_len);
+ dma_unmap_single(priv->device,
+ priv->rx_skbuff_dma[entry],
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+#ifdef STMMAC_RX_DEBUG
+ if (netif_msg_pktdata(priv)) {
+ pr_info(" frame received (%dbytes)", frame_len);
+ print_pkt(skb->data, frame_len);
+ }
+#endif
+ skb->protocol = eth_type_trans(skb, priv->dev);
+
+ if (unlikely(status == csum_none)) {
+ /* always for the old mac 10/100 */
+ skb_checksum_none_assert(skb);
+ netif_receive_skb(skb);
+ } else {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ napi_gro_receive(&priv->napi, skb);
+ }
+
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += frame_len;
+ }
+ entry = next_entry;
+ p = p_next; /* use prefetched values */
+ }
+
+ stmmac_rx_refill(priv);
+
+ priv->xstats.rx_pkt_n += count;
+
+ return count;
+}
+
+/**
+ * stmmac_poll - stmmac poll method (NAPI)
+ * @napi : pointer to the napi structure.
+ * @budget : maximum number of packets that the current CPU can receive from
+ * all interfaces.
+ * Description :
+ * This function implements the the reception process.
+ * Also it runs the TX completion thread
+ */
+static int stmmac_poll(struct napi_struct *napi, int budget)
+{
+ struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
+ int work_done = 0;
+
+ priv->xstats.poll_n++;
+ stmmac_tx(priv);
+ work_done = stmmac_rx(priv, budget);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ stmmac_enable_irq(priv);
+ }
+ return work_done;
+}
+
+/**
+ * stmmac_tx_timeout
+ * @dev : Pointer to net device structure
+ * Description: this function is called when a packet transmission fails to
+ * complete within a reasonable tmrate. The driver will mark the error in the
+ * netdev structure and arrange for the device to be reset to a sane state
+ * in order to transmit a new packet.
+ */
+static void stmmac_tx_timeout(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ /* Clear Tx resources and restart transmitting again */
+ stmmac_tx_err(priv);
+}
+
+/* Configuration changes (passed on by ifconfig) */
+static int stmmac_config(struct net_device *dev, struct ifmap *map)
+{
+ if (dev->flags & IFF_UP) /* can't act on a running interface */
+ return -EBUSY;
+
+ /* Don't allow changing the I/O address */
+ if (map->base_addr != dev->base_addr) {
+ pr_warning("%s: can't change I/O address\n", dev->name);
+ return -EOPNOTSUPP;
+ }
+
+ /* Don't allow changing the IRQ */
+ if (map->irq != dev->irq) {
+ pr_warning("%s: can't change IRQ number %d\n",
+ dev->name, dev->irq);
+ return -EOPNOTSUPP;
+ }
+
+ /* ignore other fields */
+ return 0;
+}
+
+/**
+ * stmmac_set_rx_mode - entry point for multicast addressing
+ * @dev : pointer to the device structure
+ * Description:
+ * This function is a driver entry point which gets called by the kernel
+ * whenever multicast addresses must be enabled/disabled.
+ * Return value:
+ * void.
+ */
+static void stmmac_set_rx_mode(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ spin_lock(&priv->lock);
+ priv->hw->mac->set_filter(dev);
+ spin_unlock(&priv->lock);
+}
+
+/**
+ * stmmac_change_mtu - entry point to change MTU size for the device.
+ * @dev : device pointer.
+ * @new_mtu : the new MTU size for the device.
+ * Description: the Maximum Transfer Unit (MTU) is used by the network layer
+ * to drive packet transmission. Ethernet has an MTU of 1500 octets
+ * (ETH_DATA_LEN). This value can be changed with ifconfig.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int max_mtu;
+
+ if (netif_running(dev)) {
+ pr_err("%s: must be stopped to change its MTU\n", dev->name);
+ return -EBUSY;
+ }
+
+ if (priv->plat->has_gmac)
+ max_mtu = JUMBO_LEN;
+ else
+ max_mtu = ETH_DATA_LEN;
+
+ if ((new_mtu < 46) || (new_mtu > max_mtu)) {
+ pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
+ return -EINVAL;
+ }
+
+ dev->mtu = new_mtu;
+ netdev_update_features(dev);
+
+ return 0;
+}
+
+static u32 stmmac_fix_features(struct net_device *dev, u32 features)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (!priv->rx_coe)
+ features &= ~NETIF_F_RXCSUM;
+ if (!priv->plat->tx_coe)
+ features &= ~NETIF_F_ALL_CSUM;
+
+ /* Some GMAC devices have a bugged Jumbo frame support that
+ * needs to have the Tx COE disabled for oversized frames
+ * (due to limited buffer sizes). In this case we disable
+ * the TX csum insertionin the TDES and not use SF. */
+ if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
+ features &= ~NETIF_F_ALL_CSUM;
+
+ return features;
+}
+
+static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(!dev)) {
+ pr_err("%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ if (priv->plat->has_gmac)
+ /* To handle GMAC own interrupts */
+ priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr);
+
+ stmmac_dma_interrupt(priv);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling receive - used by NETCONSOLE and other diagnostic tools
+ * to allow network I/O with interrupts disabled. */
+static void stmmac_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ stmmac_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+/**
+ * stmmac_ioctl - Entry point for the Ioctl
+ * @dev: Device pointer.
+ * @rq: An IOCTL specefic structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * @cmd: IOCTL command
+ * Description:
+ * Currently there are no special functionality supported in IOCTL, just the
+ * phy_mii_ioctl(...) can be invoked.
+ */
+static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!priv->phydev)
+ return -EINVAL;
+
+ spin_lock(&priv->lock);
+ ret = phy_mii_ioctl(priv->phydev, rq, cmd);
+ spin_unlock(&priv->lock);
+
+ return ret;
+}
+
+#ifdef CONFIG_STMMAC_DEBUG_FS
+static struct dentry *stmmac_fs_dir;
+static struct dentry *stmmac_rings_status;
+static struct dentry *stmmac_dma_cap;
+
+static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
+{
+ struct tmp_s {
+ u64 a;
+ unsigned int b;
+ unsigned int c;
+ };
+ int i;
+ struct net_device *dev = seq->private;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ seq_printf(seq, "=======================\n");
+ seq_printf(seq, " RX descriptor ring\n");
+ seq_printf(seq, "=======================\n");
+
+ for (i = 0; i < priv->dma_rx_size; i++) {
+ struct tmp_s *x = (struct tmp_s *)(priv->dma_rx + i);
+ seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
+ i, (unsigned int)(x->a),
+ (unsigned int)((x->a) >> 32), x->b, x->c);
+ seq_printf(seq, "\n");
+ }
+
+ seq_printf(seq, "\n");
+ seq_printf(seq, "=======================\n");
+ seq_printf(seq, " TX descriptor ring\n");
+ seq_printf(seq, "=======================\n");
+
+ for (i = 0; i < priv->dma_tx_size; i++) {
+ struct tmp_s *x = (struct tmp_s *)(priv->dma_tx + i);
+ seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
+ i, (unsigned int)(x->a),
+ (unsigned int)((x->a) >> 32), x->b, x->c);
+ seq_printf(seq, "\n");
+ }
+
+ return 0;
+}
+
+static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
+}
+
+static const struct file_operations stmmac_rings_status_fops = {
+ .owner = THIS_MODULE,
+ .open = stmmac_sysfs_ring_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
+{
+ struct net_device *dev = seq->private;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (!stmmac_get_hw_features(priv)) {
+ seq_printf(seq, "DMA HW features not supported\n");
+ return 0;
+ }
+
+ seq_printf(seq, "==============================\n");
+ seq_printf(seq, "\tDMA HW features\n");
+ seq_printf(seq, "==============================\n");
+
+ seq_printf(seq, "\t10/100 Mbps %s\n",
+ (priv->dma_cap.mbps_10_100) ? "Y" : "N");
+ seq_printf(seq, "\t1000 Mbps %s\n",
+ (priv->dma_cap.mbps_1000) ? "Y" : "N");
+ seq_printf(seq, "\tHalf duple %s\n",
+ (priv->dma_cap.half_duplex) ? "Y" : "N");
+ seq_printf(seq, "\tHash Filter: %s\n",
+ (priv->dma_cap.hash_filter) ? "Y" : "N");
+ seq_printf(seq, "\tMultiple MAC address registers: %s\n",
+ (priv->dma_cap.multi_addr) ? "Y" : "N");
+ seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n",
+ (priv->dma_cap.pcs) ? "Y" : "N");
+ seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
+ (priv->dma_cap.sma_mdio) ? "Y" : "N");
+ seq_printf(seq, "\tPMT Remote wake up: %s\n",
+ (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
+ seq_printf(seq, "\tPMT Magic Frame: %s\n",
+ (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
+ seq_printf(seq, "\tRMON module: %s\n",
+ (priv->dma_cap.rmon) ? "Y" : "N");
+ seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
+ (priv->dma_cap.time_stamp) ? "Y" : "N");
+ seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n",
+ (priv->dma_cap.atime_stamp) ? "Y" : "N");
+ seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n",
+ (priv->dma_cap.eee) ? "Y" : "N");
+ seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
+ seq_printf(seq, "\tChecksum Offload in TX: %s\n",
+ (priv->dma_cap.tx_coe) ? "Y" : "N");
+ seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
+ (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
+ seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
+ (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
+ seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
+ (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
+ seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
+ priv->dma_cap.number_rx_channel);
+ seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
+ priv->dma_cap.number_tx_channel);
+ seq_printf(seq, "\tEnhanced descriptors: %s\n",
+ (priv->dma_cap.enh_desc) ? "Y" : "N");
+
+ return 0;
+}
+
+static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
+}
+
+static const struct file_operations stmmac_dma_cap_fops = {
+ .owner = THIS_MODULE,
+ .open = stmmac_sysfs_dma_cap_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int stmmac_init_fs(struct net_device *dev)
+{
+ /* Create debugfs entries */
+ stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
+
+ if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
+ pr_err("ERROR %s, debugfs create directory failed\n",
+ STMMAC_RESOURCE_NAME);
+
+ return -ENOMEM;
+ }
+
+ /* Entry to report DMA RX/TX rings */
+ stmmac_rings_status = debugfs_create_file("descriptors_status",
+ S_IRUGO, stmmac_fs_dir, dev,
+ &stmmac_rings_status_fops);
+
+ if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) {
+ pr_info("ERROR creating stmmac ring debugfs file\n");
+ debugfs_remove(stmmac_fs_dir);
+
+ return -ENOMEM;
+ }
+
+ /* Entry to report the DMA HW features */
+ stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir,
+ dev, &stmmac_dma_cap_fops);
+
+ if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) {
+ pr_info("ERROR creating stmmac MMC debugfs file\n");
+ debugfs_remove(stmmac_rings_status);
+ debugfs_remove(stmmac_fs_dir);
+
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void stmmac_exit_fs(void)
+{
+ debugfs_remove(stmmac_rings_status);
+ debugfs_remove(stmmac_dma_cap);
+ debugfs_remove(stmmac_fs_dir);
+}
+#endif /* CONFIG_STMMAC_DEBUG_FS */
+
+static const struct net_device_ops stmmac_netdev_ops = {
+ .ndo_open = stmmac_open,
+ .ndo_start_xmit = stmmac_xmit,
+ .ndo_stop = stmmac_release,
+ .ndo_change_mtu = stmmac_change_mtu,
+ .ndo_fix_features = stmmac_fix_features,
+ .ndo_set_rx_mode = stmmac_set_rx_mode,
+ .ndo_tx_timeout = stmmac_tx_timeout,
+ .ndo_do_ioctl = stmmac_ioctl,
+ .ndo_set_config = stmmac_config,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = stmmac_poll_controller,
+#endif
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+/**
+ * stmmac_probe - Initialization of the adapter .
+ * @dev : device pointer
+ * Description: The function initializes the network device structure for
+ * the STMMAC driver. It also calls the low level routines
+ * in order to init the HW (i.e. the DMA engine)
+ */
+static int stmmac_probe(struct net_device *dev)
+{
+ int ret = 0;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ ether_setup(dev);
+
+ dev->netdev_ops = &stmmac_netdev_ops;
+ stmmac_set_ethtool_ops(dev);
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
+ dev->watchdog_timeo = msecs_to_jiffies(watchdog);
+#ifdef STMMAC_VLAN_TAG_USED
+ /* Both mac100 and gmac support receive VLAN tag detection */
+ dev->features |= NETIF_F_HW_VLAN_RX;
+#endif
+ priv->msg_enable = netif_msg_init(debug, default_msg_level);
+
+ if (flow_ctrl)
+ priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
+
+ priv->pause = pause;
+ netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
+
+ /* Get the MAC address */
+ priv->hw->mac->get_umac_addr((void __iomem *) dev->base_addr,
+ dev->dev_addr, 0);
+
+ if (!is_valid_ether_addr(dev->dev_addr))
+ pr_warning("\tno valid MAC address;"
+ "please, use ifconfig or nwhwconfig!\n");
+
+ spin_lock_init(&priv->lock);
+
+ ret = register_netdev(dev);
+ if (ret) {
+ pr_err("%s: ERROR %i registering the device\n",
+ __func__, ret);
+ return -ENODEV;
+ }
+
+ DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
+ dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
+ (dev->features & NETIF_F_IP_CSUM) ? "on" : "off");
+
+ return ret;
+}
+
+/**
+ * stmmac_mac_device_setup
+ * @dev : device pointer
+ * Description: select and initialise the mac device (mac100 or Gmac).
+ */
+static int stmmac_mac_device_setup(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ struct mac_device_info *device;
+
+ if (priv->plat->has_gmac) {
+ dev->priv_flags |= IFF_UNICAST_FLT;
+ device = dwmac1000_setup(priv->ioaddr);
+ } else {
+ device = dwmac100_setup(priv->ioaddr);
+ }
+
+ if (!device)
+ return -ENOMEM;
+
+ if (priv->plat->enh_desc) {
+ device->desc = &enh_desc_ops;
+ pr_info("\tEnhanced descriptor structure\n");
+ } else
+ device->desc = &ndesc_ops;
+
+ priv->hw = device;
+
+ if (device_can_wakeup(priv->device)) {
+ priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
+ enable_irq_wake(priv->wol_irq);
+ }
+
+ return 0;
+}
+
+/**
+ * stmmac_dvr_probe
+ * @pdev: platform device pointer
+ * Description: the driver is initialized through platform_device.
+ */
+static int stmmac_dvr_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res;
+ void __iomem *addr = NULL;
+ struct net_device *ndev = NULL;
+ struct stmmac_priv *priv = NULL;
+ struct plat_stmmacenet_data *plat_dat;
+
+ pr_info("STMMAC driver:\n\tplatform registration... ");
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ pr_info("\tdone!\n");
+
+ if (!request_mem_region(res->start, resource_size(res),
+ pdev->name)) {
+ pr_err("%s: ERROR: memory allocation failed"
+ "cannot get the I/O addr 0x%x\n",
+ __func__, (unsigned int)res->start);
+ return -EBUSY;
+ }
+
+ addr = ioremap(res->start, resource_size(res));
+ if (!addr) {
+ pr_err("%s: ERROR: memory mapping failed\n", __func__);
+ ret = -ENOMEM;
+ goto out_release_region;
+ }
+
+ ndev = alloc_etherdev(sizeof(struct stmmac_priv));
+ if (!ndev) {
+ pr_err("%s: ERROR: allocating the device\n", __func__);
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ /* Get the MAC information */
+ ndev->irq = platform_get_irq_byname(pdev, "macirq");
+ if (ndev->irq == -ENXIO) {
+ pr_err("%s: ERROR: MAC IRQ configuration "
+ "information not found\n", __func__);
+ ret = -ENXIO;
+ goto out_free_ndev;
+ }
+
+ priv = netdev_priv(ndev);
+ priv->device = &(pdev->dev);
+ priv->dev = ndev;
+ plat_dat = pdev->dev.platform_data;
+
+ priv->plat = plat_dat;
+
+ priv->ioaddr = addr;
+
+ /* PMT module is not integrated in all the MAC devices. */
+ if (plat_dat->pmt) {
+ pr_info("\tPMT module supported\n");
+ device_set_wakeup_capable(&pdev->dev, 1);
+ }
+ /*
+ * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
+ * The external wake up irq can be passed through the platform code
+ * named as "eth_wake_irq"
+ *
+ * In case the wake up interrupt is not passed from the platform
+ * so the driver will continue to use the mac irq (ndev->irq)
+ */
+ priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+ if (priv->wol_irq == -ENXIO)
+ priv->wol_irq = ndev->irq;
+
+
+ platform_set_drvdata(pdev, ndev);
+
+ /* Set the I/O base addr */
+ ndev->base_addr = (unsigned long)addr;
+
+ /* Custom initialisation */
+ if (priv->plat->init) {
+ ret = priv->plat->init(pdev);
+ if (unlikely(ret))
+ goto out_free_ndev;
+ }
+
+ /* MAC HW revice detection */
+ ret = stmmac_mac_device_setup(ndev);
+ if (ret < 0)
+ goto out_plat_exit;
+
+ /* Network Device Registration */
+ ret = stmmac_probe(ndev);
+ if (ret < 0)
+ goto out_plat_exit;
+
+ /* Override with kernel parameters if supplied XXX CRS XXX
+ * this needs to have multiple instances */
+ if ((phyaddr >= 0) && (phyaddr <= 31))
+ priv->plat->phy_addr = phyaddr;
+
+ pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
+ "\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
+ pdev->id, ndev->irq, addr);
+
+ /* MDIO bus Registration */
+ pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id);
+ ret = stmmac_mdio_register(ndev);
+ if (ret < 0)
+ goto out_unregister;
+ pr_debug("registered!\n");
+
+#ifdef CONFIG_STMMAC_DEBUG_FS
+ ret = stmmac_init_fs(ndev);
+ if (ret < 0)
+ pr_warning("\tFailed debugFS registration");
+#endif
+
+ return 0;
+
+out_unregister:
+ unregister_netdev(ndev);
+out_plat_exit:
+ if (priv->plat->exit)
+ priv->plat->exit(pdev);
+out_free_ndev:
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+out_unmap:
+ iounmap(addr);
+out_release_region:
+ release_mem_region(res->start, resource_size(res));
+
+ return ret;
+}
+
+/**
+ * stmmac_dvr_remove
+ * @pdev: platform device pointer
+ * Description: this function resets the TX/RX processes, disables the MAC RX/TX
+ * changes the link status, releases the DMA descriptor rings,
+ * unregisters the MDIO bus and unmaps the allocated memory.
+ */
+static int stmmac_dvr_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct resource *res;
+
+ pr_info("%s:\n\tremoving driver", __func__);
+
+ priv->hw->dma->stop_rx(priv->ioaddr);
+ priv->hw->dma->stop_tx(priv->ioaddr);
+
+ stmmac_disable_mac(priv->ioaddr);
+
+ netif_carrier_off(ndev);
+
+ stmmac_mdio_unregister(ndev);
+
+ if (priv->plat->exit)
+ priv->plat->exit(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ unregister_netdev(ndev);
+
+ iounmap((void *)priv->ioaddr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+#ifdef CONFIG_STMMAC_DEBUG_FS
+ stmmac_exit_fs();
+#endif
+
+ free_netdev(ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int stmmac_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int dis_ic = 0;
+
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ spin_lock(&priv->lock);
+
+ netif_device_detach(ndev);
+ netif_stop_queue(ndev);
+ if (priv->phydev)
+ phy_stop(priv->phydev);
+
+#ifdef CONFIG_STMMAC_TIMER
+ priv->tm->timer_stop();
+ if (likely(priv->tm->enable))
+ dis_ic = 1;
+#endif
+ napi_disable(&priv->napi);
+
+ /* Stop TX/RX DMA */
+ priv->hw->dma->stop_tx(priv->ioaddr);
+ priv->hw->dma->stop_rx(priv->ioaddr);
+ /* Clear the Rx/Tx descriptors */
+ priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
+ dis_ic);
+ priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
+
+ /* Enable Power down mode by programming the PMT regs */
+ if (device_may_wakeup(priv->device))
+ priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
+ else
+ stmmac_disable_mac(priv->ioaddr);
+
+ spin_unlock(&priv->lock);
+ return 0;
+}
+
+static int stmmac_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ if (!netif_running(ndev))
+ return 0;
+
+ spin_lock(&priv->lock);
+
+ /* Power Down bit, into the PM register, is cleared
+ * automatically as soon as a magic packet or a Wake-up frame
+ * is received. Anyway, it's better to manually clear
+ * this bit because it can generate problems while resuming
+ * from another devices (e.g. serial console). */
+ if (device_may_wakeup(priv->device))
+ priv->hw->mac->pmt(priv->ioaddr, 0);
+
+ netif_device_attach(ndev);
+
+ /* Enable the MAC and DMA */
+ stmmac_enable_mac(priv->ioaddr);
+ priv->hw->dma->start_tx(priv->ioaddr);
+ priv->hw->dma->start_rx(priv->ioaddr);
+
+#ifdef CONFIG_STMMAC_TIMER
+ if (likely(priv->tm->enable))
+ priv->tm->timer_start(tmrate);
+#endif
+ napi_enable(&priv->napi);
+
+ if (priv->phydev)
+ phy_start(priv->phydev);
+
+ netif_start_queue(ndev);
+
+ spin_unlock(&priv->lock);
+ return 0;
+}
+
+static int stmmac_freeze(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ return stmmac_release(ndev);
+}
+
+static int stmmac_restore(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ return stmmac_open(ndev);
+}
+
+static const struct dev_pm_ops stmmac_pm_ops = {
+ .suspend = stmmac_suspend,
+ .resume = stmmac_resume,
+ .freeze = stmmac_freeze,
+ .thaw = stmmac_restore,
+ .restore = stmmac_restore,
+};
+#else
+static const struct dev_pm_ops stmmac_pm_ops;
+#endif /* CONFIG_PM */
+
+static struct platform_driver stmmac_driver = {
+ .probe = stmmac_dvr_probe,
+ .remove = stmmac_dvr_remove,
+ .driver = {
+ .name = STMMAC_RESOURCE_NAME,
+ .owner = THIS_MODULE,
+ .pm = &stmmac_pm_ops,
+ },
+};
+
+/**
+ * stmmac_init_module - Entry point for the driver
+ * Description: This function is the entry point for the driver.
+ */
+static int __init stmmac_init_module(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&stmmac_driver);
+ return ret;
+}
+
+/**
+ * stmmac_cleanup_module - Cleanup routine for the driver
+ * Description: This function is the cleanup routine for the driver.
+ */
+static void __exit stmmac_cleanup_module(void)
+{
+ platform_driver_unregister(&stmmac_driver);
+}
+
+#ifndef MODULE
+static int __init stmmac_cmdline_opt(char *str)
+{
+ char *opt;
+
+ if (!str || !*str)
+ return -EINVAL;
+ while ((opt = strsep(&str, ",")) != NULL) {
+ if (!strncmp(opt, "debug:", 6)) {
+ if (strict_strtoul(opt + 6, 0, (unsigned long *)&debug))
+ goto err;
+ } else if (!strncmp(opt, "phyaddr:", 8)) {
+ if (strict_strtoul(opt + 8, 0,
+ (unsigned long *)&phyaddr))
+ goto err;
+ } else if (!strncmp(opt, "dma_txsize:", 11)) {
+ if (strict_strtoul(opt + 11, 0,
+ (unsigned long *)&dma_txsize))
+ goto err;
+ } else if (!strncmp(opt, "dma_rxsize:", 11)) {
+ if (strict_strtoul(opt + 11, 0,
+ (unsigned long *)&dma_rxsize))
+ goto err;
+ } else if (!strncmp(opt, "buf_sz:", 7)) {
+ if (strict_strtoul(opt + 7, 0,
+ (unsigned long *)&buf_sz))
+ goto err;
+ } else if (!strncmp(opt, "tc:", 3)) {
+ if (strict_strtoul(opt + 3, 0, (unsigned long *)&tc))
+ goto err;
+ } else if (!strncmp(opt, "watchdog:", 9)) {
+ if (strict_strtoul(opt + 9, 0,
+ (unsigned long *)&watchdog))
+ goto err;
+ } else if (!strncmp(opt, "flow_ctrl:", 10)) {
+ if (strict_strtoul(opt + 10, 0,
+ (unsigned long *)&flow_ctrl))
+ goto err;
+ } else if (!strncmp(opt, "pause:", 6)) {
+ if (strict_strtoul(opt + 6, 0, (unsigned long *)&pause))
+ goto err;
+#ifdef CONFIG_STMMAC_TIMER
+ } else if (!strncmp(opt, "tmrate:", 7)) {
+ if (strict_strtoul(opt + 7, 0,
+ (unsigned long *)&tmrate))
+ goto err;
+#endif
+ }
+ }
+ return 0;
+
+err:
+ pr_err("%s: ERROR broken module parameter conversion", __func__);
+ return -EINVAL;
+}
+
+__setup("stmmaceth=", stmmac_cmdline_opt);
+#endif
+
+module_init(stmmac_init_module);
+module_exit(stmmac_cleanup_module);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet driver");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
new file mode 100644
index 000000000000..9c3b9d5c3411
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -0,0 +1,247 @@
+/*******************************************************************************
+ STMMAC Ethernet Driver -- MDIO bus implementation
+ Provides Bus interface for MII registers
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Carl Shaw <carl.shaw@st.com>
+ Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+#include "stmmac.h"
+
+#define MII_BUSY 0x00000001
+#define MII_WRITE 0x00000002
+
+/**
+ * stmmac_mdio_read
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr reg bits 15-11
+ * @phyreg: MII addr reg bits 10-6
+ * Description: it reads data from the MII register from within the phy device.
+ * For the 7111 GMAC, we must set the bit 0 in the MII address register while
+ * accessing the PHY registers.
+ * Fortunately, it seems this has no drawback for the 7109 MAC.
+ */
+static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+
+ int data;
+ u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
+ ((phyreg << 6) & (0x000007C0)));
+ regValue |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
+
+ do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
+ writel(regValue, priv->ioaddr + mii_address);
+ do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
+
+ /* Read the data from the MII data register */
+ data = (int)readl(priv->ioaddr + mii_data);
+
+ return data;
+}
+
+/**
+ * stmmac_mdio_write
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr reg bits 15-11
+ * @phyreg: MII addr reg bits 10-6
+ * @phydata: phy data
+ * Description: it writes the data into the MII register from within the device.
+ */
+static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
+ u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+
+ u16 value =
+ (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
+ | MII_WRITE;
+
+ value |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
+
+
+ /* Wait until any existing MII operation is complete */
+ do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
+
+ /* Set the MII address register to write */
+ writel(phydata, priv->ioaddr + mii_data);
+ writel(value, priv->ioaddr + mii_address);
+
+ /* Wait until any existing MII operation is complete */
+ do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
+
+ return 0;
+}
+
+/**
+ * stmmac_mdio_reset
+ * @bus: points to the mii_bus structure
+ * Description: reset the MII bus
+ */
+static int stmmac_mdio_reset(struct mii_bus *bus)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+
+ if (priv->plat->mdio_bus_data->phy_reset) {
+ pr_debug("stmmac_mdio_reset: calling phy_reset\n");
+ priv->plat->mdio_bus_data->phy_reset(priv->plat->bsp_priv);
+ }
+
+ /* This is a workaround for problems with the STE101P PHY.
+ * It doesn't complete its reset until at least one clock cycle
+ * on MDC, so perform a dummy mdio read.
+ */
+ writel(0, priv->ioaddr + mii_address);
+
+ return 0;
+}
+
+/**
+ * stmmac_mdio_register
+ * @ndev: net device structure
+ * Description: it registers the MII bus
+ */
+int stmmac_mdio_register(struct net_device *ndev)
+{
+ int err = 0;
+ struct mii_bus *new_bus;
+ int *irqlist;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
+ int addr, found;
+
+ if (!mdio_bus_data)
+ return 0;
+
+ new_bus = mdiobus_alloc();
+ if (new_bus == NULL)
+ return -ENOMEM;
+
+ if (mdio_bus_data->irqs)
+ irqlist = mdio_bus_data->irqs;
+ else
+ irqlist = priv->mii_irq;
+
+ new_bus->name = "STMMAC MII Bus";
+ new_bus->read = &stmmac_mdio_read;
+ new_bus->write = &stmmac_mdio_write;
+ new_bus->reset = &stmmac_mdio_reset;
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", mdio_bus_data->bus_id);
+ new_bus->priv = ndev;
+ new_bus->irq = irqlist;
+ new_bus->phy_mask = mdio_bus_data->phy_mask;
+ new_bus->parent = priv->device;
+ err = mdiobus_register(new_bus);
+ if (err != 0) {
+ pr_err("%s: Cannot register as MDIO bus\n", new_bus->name);
+ goto bus_register_fail;
+ }
+
+ priv->mii = new_bus;
+
+ found = 0;
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ struct phy_device *phydev = new_bus->phy_map[addr];
+ if (phydev) {
+ int act = 0;
+ char irq_num[4];
+ char *irq_str;
+
+ /*
+ * If an IRQ was provided to be assigned after
+ * the bus probe, do it here.
+ */
+ if ((mdio_bus_data->irqs == NULL) &&
+ (mdio_bus_data->probed_phy_irq > 0)) {
+ irqlist[addr] = mdio_bus_data->probed_phy_irq;
+ phydev->irq = mdio_bus_data->probed_phy_irq;
+ }
+
+ /*
+ * If we're going to bind the MAC to this PHY bus,
+ * and no PHY number was provided to the MAC,
+ * use the one probed here.
+ */
+ if ((priv->plat->bus_id == mdio_bus_data->bus_id) &&
+ (priv->plat->phy_addr == -1))
+ priv->plat->phy_addr = addr;
+
+ act = (priv->plat->bus_id == mdio_bus_data->bus_id) &&
+ (priv->plat->phy_addr == addr);
+ switch (phydev->irq) {
+ case PHY_POLL:
+ irq_str = "POLL";
+ break;
+ case PHY_IGNORE_INTERRUPT:
+ irq_str = "IGNORE";
+ break;
+ default:
+ sprintf(irq_num, "%d", phydev->irq);
+ irq_str = irq_num;
+ break;
+ }
+ pr_info("%s: PHY ID %08x at %d IRQ %s (%s)%s\n",
+ ndev->name, phydev->phy_id, addr,
+ irq_str, dev_name(&phydev->dev),
+ act ? " active" : "");
+ found = 1;
+ }
+ }
+
+ if (!found)
+ pr_warning("%s: No PHY found\n", ndev->name);
+
+ return 0;
+
+bus_register_fail:
+ mdiobus_free(new_bus);
+ return err;
+}
+
+/**
+ * stmmac_mdio_unregister
+ * @ndev: net device structure
+ * Description: it unregisters the MII bus
+ */
+int stmmac_mdio_unregister(struct net_device *ndev)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ mdiobus_unregister(priv->mii);
+ priv->mii->priv = NULL;
+ mdiobus_free(priv->mii);
+ priv->mii = NULL;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c
new file mode 100644
index 000000000000..2a0e1abde7e7
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c
@@ -0,0 +1,134 @@
+/*******************************************************************************
+ STMMAC external timer support.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+#include "stmmac_timer.h"
+
+static void stmmac_timer_handler(void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+
+ stmmac_schedule(dev);
+}
+
+#define STMMAC_TIMER_MSG(timer, freq) \
+printk(KERN_INFO "stmmac_timer: %s Timer ON (freq %dHz)\n", timer, freq);
+
+#if defined(CONFIG_STMMAC_RTC_TIMER)
+#include <linux/rtc.h>
+static struct rtc_device *stmmac_rtc;
+static rtc_task_t stmmac_task;
+
+static void stmmac_rtc_start(unsigned int new_freq)
+{
+ rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq);
+ rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1);
+}
+
+static void stmmac_rtc_stop(void)
+{
+ rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
+}
+
+int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
+{
+ stmmac_task.private_data = dev;
+ stmmac_task.func = stmmac_timer_handler;
+
+ stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
+ if (stmmac_rtc == NULL) {
+ pr_err("open rtc device failed\n");
+ return -ENODEV;
+ }
+
+ rtc_irq_register(stmmac_rtc, &stmmac_task);
+
+ /* Periodic mode is not supported */
+ if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) {
+ pr_err("set periodic failed\n");
+ rtc_irq_unregister(stmmac_rtc, &stmmac_task);
+ rtc_class_close(stmmac_rtc);
+ return -1;
+ }
+
+ STMMAC_TIMER_MSG(CONFIG_RTC_HCTOSYS_DEVICE, tm->freq);
+
+ tm->timer_start = stmmac_rtc_start;
+ tm->timer_stop = stmmac_rtc_stop;
+
+ return 0;
+}
+
+int stmmac_close_ext_timer(void)
+{
+ rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
+ rtc_irq_unregister(stmmac_rtc, &stmmac_task);
+ rtc_class_close(stmmac_rtc);
+ return 0;
+}
+
+#elif defined(CONFIG_STMMAC_TMU_TIMER)
+#include <linux/clk.h>
+#define TMU_CHANNEL "tmu2_clk"
+static struct clk *timer_clock;
+
+static void stmmac_tmu_start(unsigned int new_freq)
+{
+ clk_set_rate(timer_clock, new_freq);
+ clk_enable(timer_clock);
+}
+
+static void stmmac_tmu_stop(void)
+{
+ clk_disable(timer_clock);
+}
+
+int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
+{
+ timer_clock = clk_get(NULL, TMU_CHANNEL);
+
+ if (timer_clock == NULL)
+ return -1;
+
+ if (tmu2_register_user(stmmac_timer_handler, (void *)dev) < 0) {
+ timer_clock = NULL;
+ return -1;
+ }
+
+ STMMAC_TIMER_MSG("TMU2", tm->freq);
+ tm->timer_start = stmmac_tmu_start;
+ tm->timer_stop = stmmac_tmu_stop;
+
+ return 0;
+}
+
+int stmmac_close_ext_timer(void)
+{
+ clk_disable(timer_clock);
+ tmu2_unregister_user();
+ clk_put(timer_clock);
+ return 0;
+}
+#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
new file mode 100644
index 000000000000..6863590d184b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
@@ -0,0 +1,42 @@
+/*******************************************************************************
+ STMMAC external timer Header File.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+struct stmmac_timer {
+ void (*timer_start) (unsigned int new_freq);
+ void (*timer_stop) (void);
+ unsigned int freq;
+ unsigned int enable;
+};
+
+/* Open the HW timer device and return 0 in case of success */
+int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm);
+/* Stop the timer and release it */
+int stmmac_close_ext_timer(void);
+/* Function used for scheduling task within the stmmac */
+void stmmac_schedule(struct net_device *dev);
+
+#if defined(CONFIG_STMMAC_TMU_TIMER)
+extern int tmu2_register_user(void *fnt, void *data);
+extern void tmu2_unregister_user(void);
+#endif
diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig
new file mode 100644
index 000000000000..57bfd8599679
--- /dev/null
+++ b/drivers/net/ethernet/sun/Kconfig
@@ -0,0 +1,88 @@
+#
+# Sun network device configuration
+#
+
+config NET_VENDOR_SUN
+ bool "Sun devices"
+ default y
+ depends on SUN3 || SBUS || PCI || SUN_LDOMS
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say
+ Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Sun network interfaces. If you say Y, you will be
+ asked for your specific card in the following questions.
+
+if NET_VENDOR_SUN
+
+config HAPPYMEAL
+ tristate "Sun Happy Meal 10/100baseT support"
+ depends on (SBUS || PCI)
+ select CRC32
+ ---help---
+ This driver supports the "hme" interface present on most Ultra
+ systems and as an option on older Sbus systems. This driver supports
+ both PCI and Sbus devices. This driver also supports the "qfe" quad
+ 100baseT device available in both PCI and Sbus configurations.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sunhme.
+
+config SUNBMAC
+ tristate "Sun BigMAC 10/100baseT support (EXPERIMENTAL)"
+ depends on SBUS && EXPERIMENTAL
+ select CRC32
+ ---help---
+ This driver supports the "be" interface available as an Sbus option.
+ This is Sun's older 100baseT Ethernet device.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sunbmac.
+
+config SUNQE
+ tristate "Sun QuadEthernet support"
+ depends on SBUS
+ select CRC32
+ ---help---
+ This driver supports the "qe" 10baseT Ethernet device, available as
+ an Sbus option. Note that this is not the same as Quad FastEthernet
+ "qfe" which is supported by the Happy Meal driver instead.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sunqe.
+
+config SUNGEM
+ tristate "Sun GEM support"
+ depends on PCI
+ select CRC32
+ select SUNGEM_PHY
+ ---help---
+ Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0. See also
+ <http://www.sun.com/products-n-solutions/hardware/docs/pdf/806-3985-10.pdf>.
+
+config CASSINI
+ tristate "Sun Cassini support"
+ depends on PCI
+ select CRC32
+ ---help---
+ Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also
+ <http://www.sun.com/products-n-solutions/hardware/docs/pdf/817-4341-10.pdf>
+
+config SUNVNET
+ tristate "Sun Virtual Network support"
+ depends on SUN_LDOMS
+ ---help---
+ Support for virtual network devices under Sun Logical Domains.
+
+config NIU
+ tristate "Sun Neptune 10Gbit Ethernet support"
+ depends on PCI
+ select CRC32
+ ---help---
+ This enables support for cards based upon Sun's
+ Neptune chipset.
+
+endif # NET_VENDOR_SUN
diff --git a/drivers/net/ethernet/sun/Makefile b/drivers/net/ethernet/sun/Makefile
new file mode 100644
index 000000000000..1e620ff88eba
--- /dev/null
+++ b/drivers/net/ethernet/sun/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the Sun network device drivers.
+#
+
+obj-$(CONFIG_HAPPYMEAL) += sunhme.o
+obj-$(CONFIG_SUNQE) += sunqe.o
+obj-$(CONFIG_SUNBMAC) += sunbmac.o
+obj-$(CONFIG_SUNGEM) += sungem.o
+obj-$(CONFIG_CASSINI) += cassini.o
+obj-$(CONFIG_SUNVNET) += sunvnet.o
+obj-$(CONFIG_NIU) += niu.o
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
new file mode 100644
index 000000000000..d9460d81a137
--- /dev/null
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -0,0 +1,5303 @@
+/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
+ *
+ * Copyright (C) 2004 Sun Microsystems Inc.
+ * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ *
+ * This driver uses the sungem driver (c) David Miller
+ * (davem@redhat.com) as its basis.
+ *
+ * The cassini chip has a number of features that distinguish it from
+ * the gem chip:
+ * 4 transmit descriptor rings that are used for either QoS (VLAN) or
+ * load balancing (non-VLAN mode)
+ * batching of multiple packets
+ * multiple CPU dispatching
+ * page-based RX descriptor engine with separate completion rings
+ * Gigabit support (GMII and PCS interface)
+ * MIF link up/down detection works
+ *
+ * RX is handled by page sized buffers that are attached as fragments to
+ * the skb. here's what's done:
+ * -- driver allocates pages at a time and keeps reference counts
+ * on them.
+ * -- the upper protocol layers assume that the header is in the skb
+ * itself. as a result, cassini will copy a small amount (64 bytes)
+ * to make them happy.
+ * -- driver appends the rest of the data pages as frags to skbuffs
+ * and increments the reference count
+ * -- on page reclamation, the driver swaps the page with a spare page.
+ * if that page is still in use, it frees its reference to that page,
+ * and allocates a new page for use. otherwise, it just recycles the
+ * the page.
+ *
+ * NOTE: cassini can parse the header. however, it's not worth it
+ * as long as the network stack requires a header copy.
+ *
+ * TX has 4 queues. currently these queues are used in a round-robin
+ * fashion for load balancing. They can also be used for QoS. for that
+ * to work, however, QoS information needs to be exposed down to the driver
+ * level so that subqueues get targeted to particular transmit rings.
+ * alternatively, the queues can be configured via use of the all-purpose
+ * ioctl.
+ *
+ * RX DATA: the rx completion ring has all the info, but the rx desc
+ * ring has all of the data. RX can conceivably come in under multiple
+ * interrupts, but the INT# assignment needs to be set up properly by
+ * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
+ * that. also, the two descriptor rings are designed to distinguish between
+ * encrypted and non-encrypted packets, but we use them for buffering
+ * instead.
+ *
+ * by default, the selective clear mask is set up to process rx packets.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/vmalloc.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/random.h>
+#include <linux/mii.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/mutex.h>
+#include <linux/firmware.h>
+
+#include <net/checksum.h>
+
+#include <linux/atomic.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+#define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
+#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
+#define CAS_NCPUS num_online_cpus()
+
+#define cas_skb_release(x) netif_rx(x)
+
+/* select which firmware to use */
+#define USE_HP_WORKAROUND
+#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
+#define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */
+
+#include "cassini.h"
+
+#define USE_TX_COMPWB /* use completion writeback registers */
+#define USE_CSMA_CD_PROTO /* standard CSMA/CD */
+#define USE_RX_BLANK /* hw interrupt mitigation */
+#undef USE_ENTROPY_DEV /* don't test for entropy device */
+
+/* NOTE: these aren't useable unless PCI interrupts can be assigned.
+ * also, we need to make cp->lock finer-grained.
+ */
+#undef USE_PCI_INTB
+#undef USE_PCI_INTC
+#undef USE_PCI_INTD
+#undef USE_QOS
+
+#undef USE_VPD_DEBUG /* debug vpd information if defined */
+
+/* rx processing options */
+#define USE_PAGE_ORDER /* specify to allocate large rx pages */
+#define RX_DONT_BATCH 0 /* if 1, don't batch flows */
+#define RX_COPY_ALWAYS 0 /* if 0, use frags */
+#define RX_COPY_MIN 64 /* copy a little to make upper layers happy */
+#undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */
+
+#define DRV_MODULE_NAME "cassini"
+#define DRV_MODULE_VERSION "1.6"
+#define DRV_MODULE_RELDATE "21 May 2008"
+
+#define CAS_DEF_MSG_ENABLE \
+ (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+/* length of time before we decide the hardware is borked,
+ * and dev->tx_timeout() should be called to fix the problem
+ */
+#define CAS_TX_TIMEOUT (HZ)
+#define CAS_LINK_TIMEOUT (22*HZ/10)
+#define CAS_LINK_FAST_TIMEOUT (1)
+
+/* timeout values for state changing. these specify the number
+ * of 10us delays to be used before giving up.
+ */
+#define STOP_TRIES_PHY 1000
+#define STOP_TRIES 5000
+
+/* specify a minimum frame size to deal with some fifo issues
+ * max mtu == 2 * page size - ethernet header - 64 - swivel =
+ * 2 * page_size - 0x50
+ */
+#define CAS_MIN_FRAME 97
+#define CAS_1000MB_MIN_FRAME 255
+#define CAS_MIN_MTU 60
+#define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
+
+#if 1
+/*
+ * Eliminate these and use separate atomic counters for each, to
+ * avoid a race condition.
+ */
+#else
+#define CAS_RESET_MTU 1
+#define CAS_RESET_ALL 2
+#define CAS_RESET_SPARE 3
+#endif
+
+static char version[] __devinitdata =
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
+static int link_mode;
+
+MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
+MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("sun/cassini.bin");
+module_param(cassini_debug, int, 0);
+MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
+module_param(link_mode, int, 0);
+MODULE_PARM_DESC(link_mode, "default link mode");
+
+/*
+ * Work around for a PCS bug in which the link goes down due to the chip
+ * being confused and never showing a link status of "up."
+ */
+#define DEFAULT_LINKDOWN_TIMEOUT 5
+/*
+ * Value in seconds, for user input.
+ */
+static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
+module_param(linkdown_timeout, int, 0);
+MODULE_PARM_DESC(linkdown_timeout,
+"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
+
+/*
+ * value in 'ticks' (units used by jiffies). Set when we init the
+ * module because 'HZ' in actually a function call on some flavors of
+ * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
+ */
+static int link_transition_timeout;
+
+
+
+static u16 link_modes[] __devinitdata = {
+ BMCR_ANENABLE, /* 0 : autoneg */
+ 0, /* 1 : 10bt half duplex */
+ BMCR_SPEED100, /* 2 : 100bt half duplex */
+ BMCR_FULLDPLX, /* 3 : 10bt full duplex */
+ BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */
+ CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
+};
+
+static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
+ { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
+
+static void cas_set_link_modes(struct cas *cp);
+
+static inline void cas_lock_tx(struct cas *cp)
+{
+ int i;
+
+ for (i = 0; i < N_TX_RINGS; i++)
+ spin_lock(&cp->tx_lock[i]);
+}
+
+static inline void cas_lock_all(struct cas *cp)
+{
+ spin_lock_irq(&cp->lock);
+ cas_lock_tx(cp);
+}
+
+/* WTZ: QA was finding deadlock problems with the previous
+ * versions after long test runs with multiple cards per machine.
+ * See if replacing cas_lock_all with safer versions helps. The
+ * symptoms QA is reporting match those we'd expect if interrupts
+ * aren't being properly restored, and we fixed a previous deadlock
+ * with similar symptoms by using save/restore versions in other
+ * places.
+ */
+#define cas_lock_all_save(cp, flags) \
+do { \
+ struct cas *xxxcp = (cp); \
+ spin_lock_irqsave(&xxxcp->lock, flags); \
+ cas_lock_tx(xxxcp); \
+} while (0)
+
+static inline void cas_unlock_tx(struct cas *cp)
+{
+ int i;
+
+ for (i = N_TX_RINGS; i > 0; i--)
+ spin_unlock(&cp->tx_lock[i - 1]);
+}
+
+static inline void cas_unlock_all(struct cas *cp)
+{
+ cas_unlock_tx(cp);
+ spin_unlock_irq(&cp->lock);
+}
+
+#define cas_unlock_all_restore(cp, flags) \
+do { \
+ struct cas *xxxcp = (cp); \
+ cas_unlock_tx(xxxcp); \
+ spin_unlock_irqrestore(&xxxcp->lock, flags); \
+} while (0)
+
+static void cas_disable_irq(struct cas *cp, const int ring)
+{
+ /* Make sure we won't get any more interrupts */
+ if (ring == 0) {
+ writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
+ return;
+ }
+
+ /* disable completion interrupts and selectively mask */
+ if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+ switch (ring) {
+#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
+#ifdef USE_PCI_INTB
+ case 1:
+#endif
+#ifdef USE_PCI_INTC
+ case 2:
+#endif
+#ifdef USE_PCI_INTD
+ case 3:
+#endif
+ writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
+ cp->regs + REG_PLUS_INTRN_MASK(ring));
+ break;
+#endif
+ default:
+ writel(INTRN_MASK_CLEAR_ALL, cp->regs +
+ REG_PLUS_INTRN_MASK(ring));
+ break;
+ }
+ }
+}
+
+static inline void cas_mask_intr(struct cas *cp)
+{
+ int i;
+
+ for (i = 0; i < N_RX_COMP_RINGS; i++)
+ cas_disable_irq(cp, i);
+}
+
+static void cas_enable_irq(struct cas *cp, const int ring)
+{
+ if (ring == 0) { /* all but TX_DONE */
+ writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
+ return;
+ }
+
+ if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+ switch (ring) {
+#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
+#ifdef USE_PCI_INTB
+ case 1:
+#endif
+#ifdef USE_PCI_INTC
+ case 2:
+#endif
+#ifdef USE_PCI_INTD
+ case 3:
+#endif
+ writel(INTRN_MASK_RX_EN, cp->regs +
+ REG_PLUS_INTRN_MASK(ring));
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+}
+
+static inline void cas_unmask_intr(struct cas *cp)
+{
+ int i;
+
+ for (i = 0; i < N_RX_COMP_RINGS; i++)
+ cas_enable_irq(cp, i);
+}
+
+static inline void cas_entropy_gather(struct cas *cp)
+{
+#ifdef USE_ENTROPY_DEV
+ if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
+ return;
+
+ batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
+ readl(cp->regs + REG_ENTROPY_IV),
+ sizeof(uint64_t)*8);
+#endif
+}
+
+static inline void cas_entropy_reset(struct cas *cp)
+{
+#ifdef USE_ENTROPY_DEV
+ if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
+ return;
+
+ writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
+ cp->regs + REG_BIM_LOCAL_DEV_EN);
+ writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
+ writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
+
+ /* if we read back 0x0, we don't have an entropy device */
+ if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
+ cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
+#endif
+}
+
+/* access to the phy. the following assumes that we've initialized the MIF to
+ * be in frame rather than bit-bang mode
+ */
+static u16 cas_phy_read(struct cas *cp, int reg)
+{
+ u32 cmd;
+ int limit = STOP_TRIES_PHY;
+
+ cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
+ cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
+ cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
+ cmd |= MIF_FRAME_TURN_AROUND_MSB;
+ writel(cmd, cp->regs + REG_MIF_FRAME);
+
+ /* poll for completion */
+ while (limit-- > 0) {
+ udelay(10);
+ cmd = readl(cp->regs + REG_MIF_FRAME);
+ if (cmd & MIF_FRAME_TURN_AROUND_LSB)
+ return cmd & MIF_FRAME_DATA_MASK;
+ }
+ return 0xFFFF; /* -1 */
+}
+
+static int cas_phy_write(struct cas *cp, int reg, u16 val)
+{
+ int limit = STOP_TRIES_PHY;
+ u32 cmd;
+
+ cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
+ cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
+ cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
+ cmd |= MIF_FRAME_TURN_AROUND_MSB;
+ cmd |= val & MIF_FRAME_DATA_MASK;
+ writel(cmd, cp->regs + REG_MIF_FRAME);
+
+ /* poll for completion */
+ while (limit-- > 0) {
+ udelay(10);
+ cmd = readl(cp->regs + REG_MIF_FRAME);
+ if (cmd & MIF_FRAME_TURN_AROUND_LSB)
+ return 0;
+ }
+ return -1;
+}
+
+static void cas_phy_powerup(struct cas *cp)
+{
+ u16 ctl = cas_phy_read(cp, MII_BMCR);
+
+ if ((ctl & BMCR_PDOWN) == 0)
+ return;
+ ctl &= ~BMCR_PDOWN;
+ cas_phy_write(cp, MII_BMCR, ctl);
+}
+
+static void cas_phy_powerdown(struct cas *cp)
+{
+ u16 ctl = cas_phy_read(cp, MII_BMCR);
+
+ if (ctl & BMCR_PDOWN)
+ return;
+ ctl |= BMCR_PDOWN;
+ cas_phy_write(cp, MII_BMCR, ctl);
+}
+
+/* cp->lock held. note: the last put_page will free the buffer */
+static int cas_page_free(struct cas *cp, cas_page_t *page)
+{
+ pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
+ PCI_DMA_FROMDEVICE);
+ __free_pages(page->buffer, cp->page_order);
+ kfree(page);
+ return 0;
+}
+
+#ifdef RX_COUNT_BUFFERS
+#define RX_USED_ADD(x, y) ((x)->used += (y))
+#define RX_USED_SET(x, y) ((x)->used = (y))
+#else
+#define RX_USED_ADD(x, y)
+#define RX_USED_SET(x, y)
+#endif
+
+/* local page allocation routines for the receive buffers. jumbo pages
+ * require at least 8K contiguous and 8K aligned buffers.
+ */
+static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
+{
+ cas_page_t *page;
+
+ page = kmalloc(sizeof(cas_page_t), flags);
+ if (!page)
+ return NULL;
+
+ INIT_LIST_HEAD(&page->list);
+ RX_USED_SET(page, 0);
+ page->buffer = alloc_pages(flags, cp->page_order);
+ if (!page->buffer)
+ goto page_err;
+ page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
+ cp->page_size, PCI_DMA_FROMDEVICE);
+ return page;
+
+page_err:
+ kfree(page);
+ return NULL;
+}
+
+/* initialize spare pool of rx buffers, but allocate during the open */
+static void cas_spare_init(struct cas *cp)
+{
+ spin_lock(&cp->rx_inuse_lock);
+ INIT_LIST_HEAD(&cp->rx_inuse_list);
+ spin_unlock(&cp->rx_inuse_lock);
+
+ spin_lock(&cp->rx_spare_lock);
+ INIT_LIST_HEAD(&cp->rx_spare_list);
+ cp->rx_spares_needed = RX_SPARE_COUNT;
+ spin_unlock(&cp->rx_spare_lock);
+}
+
+/* used on close. free all the spare buffers. */
+static void cas_spare_free(struct cas *cp)
+{
+ struct list_head list, *elem, *tmp;
+
+ /* free spare buffers */
+ INIT_LIST_HEAD(&list);
+ spin_lock(&cp->rx_spare_lock);
+ list_splice_init(&cp->rx_spare_list, &list);
+ spin_unlock(&cp->rx_spare_lock);
+ list_for_each_safe(elem, tmp, &list) {
+ cas_page_free(cp, list_entry(elem, cas_page_t, list));
+ }
+
+ INIT_LIST_HEAD(&list);
+#if 1
+ /*
+ * Looks like Adrian had protected this with a different
+ * lock than used everywhere else to manipulate this list.
+ */
+ spin_lock(&cp->rx_inuse_lock);
+ list_splice_init(&cp->rx_inuse_list, &list);
+ spin_unlock(&cp->rx_inuse_lock);
+#else
+ spin_lock(&cp->rx_spare_lock);
+ list_splice_init(&cp->rx_inuse_list, &list);
+ spin_unlock(&cp->rx_spare_lock);
+#endif
+ list_for_each_safe(elem, tmp, &list) {
+ cas_page_free(cp, list_entry(elem, cas_page_t, list));
+ }
+}
+
+/* replenish spares if needed */
+static void cas_spare_recover(struct cas *cp, const gfp_t flags)
+{
+ struct list_head list, *elem, *tmp;
+ int needed, i;
+
+ /* check inuse list. if we don't need any more free buffers,
+ * just free it
+ */
+
+ /* make a local copy of the list */
+ INIT_LIST_HEAD(&list);
+ spin_lock(&cp->rx_inuse_lock);
+ list_splice_init(&cp->rx_inuse_list, &list);
+ spin_unlock(&cp->rx_inuse_lock);
+
+ list_for_each_safe(elem, tmp, &list) {
+ cas_page_t *page = list_entry(elem, cas_page_t, list);
+
+ /*
+ * With the lockless pagecache, cassini buffering scheme gets
+ * slightly less accurate: we might find that a page has an
+ * elevated reference count here, due to a speculative ref,
+ * and skip it as in-use. Ideally we would be able to reclaim
+ * it. However this would be such a rare case, it doesn't
+ * matter too much as we should pick it up the next time round.
+ *
+ * Importantly, if we find that the page has a refcount of 1
+ * here (our refcount), then we know it is definitely not inuse
+ * so we can reuse it.
+ */
+ if (page_count(page->buffer) > 1)
+ continue;
+
+ list_del(elem);
+ spin_lock(&cp->rx_spare_lock);
+ if (cp->rx_spares_needed > 0) {
+ list_add(elem, &cp->rx_spare_list);
+ cp->rx_spares_needed--;
+ spin_unlock(&cp->rx_spare_lock);
+ } else {
+ spin_unlock(&cp->rx_spare_lock);
+ cas_page_free(cp, page);
+ }
+ }
+
+ /* put any inuse buffers back on the list */
+ if (!list_empty(&list)) {
+ spin_lock(&cp->rx_inuse_lock);
+ list_splice(&list, &cp->rx_inuse_list);
+ spin_unlock(&cp->rx_inuse_lock);
+ }
+
+ spin_lock(&cp->rx_spare_lock);
+ needed = cp->rx_spares_needed;
+ spin_unlock(&cp->rx_spare_lock);
+ if (!needed)
+ return;
+
+ /* we still need spares, so try to allocate some */
+ INIT_LIST_HEAD(&list);
+ i = 0;
+ while (i < needed) {
+ cas_page_t *spare = cas_page_alloc(cp, flags);
+ if (!spare)
+ break;
+ list_add(&spare->list, &list);
+ i++;
+ }
+
+ spin_lock(&cp->rx_spare_lock);
+ list_splice(&list, &cp->rx_spare_list);
+ cp->rx_spares_needed -= i;
+ spin_unlock(&cp->rx_spare_lock);
+}
+
+/* pull a page from the list. */
+static cas_page_t *cas_page_dequeue(struct cas *cp)
+{
+ struct list_head *entry;
+ int recover;
+
+ spin_lock(&cp->rx_spare_lock);
+ if (list_empty(&cp->rx_spare_list)) {
+ /* try to do a quick recovery */
+ spin_unlock(&cp->rx_spare_lock);
+ cas_spare_recover(cp, GFP_ATOMIC);
+ spin_lock(&cp->rx_spare_lock);
+ if (list_empty(&cp->rx_spare_list)) {
+ netif_err(cp, rx_err, cp->dev,
+ "no spare buffers available\n");
+ spin_unlock(&cp->rx_spare_lock);
+ return NULL;
+ }
+ }
+
+ entry = cp->rx_spare_list.next;
+ list_del(entry);
+ recover = ++cp->rx_spares_needed;
+ spin_unlock(&cp->rx_spare_lock);
+
+ /* trigger the timer to do the recovery */
+ if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
+#if 1
+ atomic_inc(&cp->reset_task_pending);
+ atomic_inc(&cp->reset_task_pending_spare);
+ schedule_work(&cp->reset_task);
+#else
+ atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
+ schedule_work(&cp->reset_task);
+#endif
+ }
+ return list_entry(entry, cas_page_t, list);
+}
+
+
+static void cas_mif_poll(struct cas *cp, const int enable)
+{
+ u32 cfg;
+
+ cfg = readl(cp->regs + REG_MIF_CFG);
+ cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
+
+ if (cp->phy_type & CAS_PHY_MII_MDIO1)
+ cfg |= MIF_CFG_PHY_SELECT;
+
+ /* poll and interrupt on link status change. */
+ if (enable) {
+ cfg |= MIF_CFG_POLL_EN;
+ cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
+ cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
+ }
+ writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
+ cp->regs + REG_MIF_MASK);
+ writel(cfg, cp->regs + REG_MIF_CFG);
+}
+
+/* Must be invoked under cp->lock */
+static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
+{
+ u16 ctl;
+#if 1
+ int lcntl;
+ int changed = 0;
+ int oldstate = cp->lstate;
+ int link_was_not_down = !(oldstate == link_down);
+#endif
+ /* Setup link parameters */
+ if (!ep)
+ goto start_aneg;
+ lcntl = cp->link_cntl;
+ if (ep->autoneg == AUTONEG_ENABLE)
+ cp->link_cntl = BMCR_ANENABLE;
+ else {
+ u32 speed = ethtool_cmd_speed(ep);
+ cp->link_cntl = 0;
+ if (speed == SPEED_100)
+ cp->link_cntl |= BMCR_SPEED100;
+ else if (speed == SPEED_1000)
+ cp->link_cntl |= CAS_BMCR_SPEED1000;
+ if (ep->duplex == DUPLEX_FULL)
+ cp->link_cntl |= BMCR_FULLDPLX;
+ }
+#if 1
+ changed = (lcntl != cp->link_cntl);
+#endif
+start_aneg:
+ if (cp->lstate == link_up) {
+ netdev_info(cp->dev, "PCS link down\n");
+ } else {
+ if (changed) {
+ netdev_info(cp->dev, "link configuration changed\n");
+ }
+ }
+ cp->lstate = link_down;
+ cp->link_transition = LINK_TRANSITION_LINK_DOWN;
+ if (!cp->hw_running)
+ return;
+#if 1
+ /*
+ * WTZ: If the old state was link_up, we turn off the carrier
+ * to replicate everything we do elsewhere on a link-down
+ * event when we were already in a link-up state..
+ */
+ if (oldstate == link_up)
+ netif_carrier_off(cp->dev);
+ if (changed && link_was_not_down) {
+ /*
+ * WTZ: This branch will simply schedule a full reset after
+ * we explicitly changed link modes in an ioctl. See if this
+ * fixes the link-problems we were having for forced mode.
+ */
+ atomic_inc(&cp->reset_task_pending);
+ atomic_inc(&cp->reset_task_pending_all);
+ schedule_work(&cp->reset_task);
+ cp->timer_ticks = 0;
+ mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
+ return;
+ }
+#endif
+ if (cp->phy_type & CAS_PHY_SERDES) {
+ u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
+
+ if (cp->link_cntl & BMCR_ANENABLE) {
+ val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
+ cp->lstate = link_aneg;
+ } else {
+ if (cp->link_cntl & BMCR_FULLDPLX)
+ val |= PCS_MII_CTRL_DUPLEX;
+ val &= ~PCS_MII_AUTONEG_EN;
+ cp->lstate = link_force_ok;
+ }
+ cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
+ writel(val, cp->regs + REG_PCS_MII_CTRL);
+
+ } else {
+ cas_mif_poll(cp, 0);
+ ctl = cas_phy_read(cp, MII_BMCR);
+ ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
+ CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
+ ctl |= cp->link_cntl;
+ if (ctl & BMCR_ANENABLE) {
+ ctl |= BMCR_ANRESTART;
+ cp->lstate = link_aneg;
+ } else {
+ cp->lstate = link_force_ok;
+ }
+ cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
+ cas_phy_write(cp, MII_BMCR, ctl);
+ cas_mif_poll(cp, 1);
+ }
+
+ cp->timer_ticks = 0;
+ mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
+}
+
+/* Must be invoked under cp->lock. */
+static int cas_reset_mii_phy(struct cas *cp)
+{
+ int limit = STOP_TRIES_PHY;
+ u16 val;
+
+ cas_phy_write(cp, MII_BMCR, BMCR_RESET);
+ udelay(100);
+ while (--limit) {
+ val = cas_phy_read(cp, MII_BMCR);
+ if ((val & BMCR_RESET) == 0)
+ break;
+ udelay(10);
+ }
+ return limit <= 0;
+}
+
+static int cas_saturn_firmware_init(struct cas *cp)
+{
+ const struct firmware *fw;
+ const char fw_name[] = "sun/cassini.bin";
+ int err;
+
+ if (PHY_NS_DP83065 != cp->phy_id)
+ return 0;
+
+ err = request_firmware(&fw, fw_name, &cp->pdev->dev);
+ if (err) {
+ pr_err("Failed to load firmware \"%s\"\n",
+ fw_name);
+ return err;
+ }
+ if (fw->size < 2) {
+ pr_err("bogus length %zu in \"%s\"\n",
+ fw->size, fw_name);
+ err = -EINVAL;
+ goto out;
+ }
+ cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
+ cp->fw_size = fw->size - 2;
+ cp->fw_data = vmalloc(cp->fw_size);
+ if (!cp->fw_data) {
+ err = -ENOMEM;
+ pr_err("\"%s\" Failed %d\n", fw_name, err);
+ goto out;
+ }
+ memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
+out:
+ release_firmware(fw);
+ return err;
+}
+
+static void cas_saturn_firmware_load(struct cas *cp)
+{
+ int i;
+
+ cas_phy_powerdown(cp);
+
+ /* expanded memory access mode */
+ cas_phy_write(cp, DP83065_MII_MEM, 0x0);
+
+ /* pointer configuration for new firmware */
+ cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
+ cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
+ cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
+ cas_phy_write(cp, DP83065_MII_REGD, 0x82);
+ cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
+ cas_phy_write(cp, DP83065_MII_REGD, 0x0);
+ cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
+ cas_phy_write(cp, DP83065_MII_REGD, 0x39);
+
+ /* download new firmware */
+ cas_phy_write(cp, DP83065_MII_MEM, 0x1);
+ cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
+ for (i = 0; i < cp->fw_size; i++)
+ cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
+
+ /* enable firmware */
+ cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
+ cas_phy_write(cp, DP83065_MII_REGD, 0x1);
+}
+
+
+/* phy initialization */
+static void cas_phy_init(struct cas *cp)
+{
+ u16 val;
+
+ /* if we're in MII/GMII mode, set up phy */
+ if (CAS_PHY_MII(cp->phy_type)) {
+ writel(PCS_DATAPATH_MODE_MII,
+ cp->regs + REG_PCS_DATAPATH_MODE);
+
+ cas_mif_poll(cp, 0);
+ cas_reset_mii_phy(cp); /* take out of isolate mode */
+
+ if (PHY_LUCENT_B0 == cp->phy_id) {
+ /* workaround link up/down issue with lucent */
+ cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
+ cas_phy_write(cp, MII_BMCR, 0x00f1);
+ cas_phy_write(cp, LUCENT_MII_REG, 0x0);
+
+ } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
+ /* workarounds for broadcom phy */
+ cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
+ cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
+ cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
+ cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
+ cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
+ cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
+ cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
+ cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
+ cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
+ cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
+ cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
+
+ } else if (PHY_BROADCOM_5411 == cp->phy_id) {
+ val = cas_phy_read(cp, BROADCOM_MII_REG4);
+ val = cas_phy_read(cp, BROADCOM_MII_REG4);
+ if (val & 0x0080) {
+ /* link workaround */
+ cas_phy_write(cp, BROADCOM_MII_REG4,
+ val & ~0x0080);
+ }
+
+ } else if (cp->cas_flags & CAS_FLAG_SATURN) {
+ writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
+ SATURN_PCFG_FSI : 0x0,
+ cp->regs + REG_SATURN_PCFG);
+
+ /* load firmware to address 10Mbps auto-negotiation
+ * issue. NOTE: this will need to be changed if the
+ * default firmware gets fixed.
+ */
+ if (PHY_NS_DP83065 == cp->phy_id) {
+ cas_saturn_firmware_load(cp);
+ }
+ cas_phy_powerup(cp);
+ }
+
+ /* advertise capabilities */
+ val = cas_phy_read(cp, MII_BMCR);
+ val &= ~BMCR_ANENABLE;
+ cas_phy_write(cp, MII_BMCR, val);
+ udelay(10);
+
+ cas_phy_write(cp, MII_ADVERTISE,
+ cas_phy_read(cp, MII_ADVERTISE) |
+ (ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL |
+ CAS_ADVERTISE_PAUSE |
+ CAS_ADVERTISE_ASYM_PAUSE));
+
+ if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
+ /* make sure that we don't advertise half
+ * duplex to avoid a chip issue
+ */
+ val = cas_phy_read(cp, CAS_MII_1000_CTRL);
+ val &= ~CAS_ADVERTISE_1000HALF;
+ val |= CAS_ADVERTISE_1000FULL;
+ cas_phy_write(cp, CAS_MII_1000_CTRL, val);
+ }
+
+ } else {
+ /* reset pcs for serdes */
+ u32 val;
+ int limit;
+
+ writel(PCS_DATAPATH_MODE_SERDES,
+ cp->regs + REG_PCS_DATAPATH_MODE);
+
+ /* enable serdes pins on saturn */
+ if (cp->cas_flags & CAS_FLAG_SATURN)
+ writel(0, cp->regs + REG_SATURN_PCFG);
+
+ /* Reset PCS unit. */
+ val = readl(cp->regs + REG_PCS_MII_CTRL);
+ val |= PCS_MII_RESET;
+ writel(val, cp->regs + REG_PCS_MII_CTRL);
+
+ limit = STOP_TRIES;
+ while (--limit > 0) {
+ udelay(10);
+ if ((readl(cp->regs + REG_PCS_MII_CTRL) &
+ PCS_MII_RESET) == 0)
+ break;
+ }
+ if (limit <= 0)
+ netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
+ readl(cp->regs + REG_PCS_STATE_MACHINE));
+
+ /* Make sure PCS is disabled while changing advertisement
+ * configuration.
+ */
+ writel(0x0, cp->regs + REG_PCS_CFG);
+
+ /* Advertise all capabilities except half-duplex. */
+ val = readl(cp->regs + REG_PCS_MII_ADVERT);
+ val &= ~PCS_MII_ADVERT_HD;
+ val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
+ PCS_MII_ADVERT_ASYM_PAUSE);
+ writel(val, cp->regs + REG_PCS_MII_ADVERT);
+
+ /* enable PCS */
+ writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
+
+ /* pcs workaround: enable sync detect */
+ writel(PCS_SERDES_CTRL_SYNCD_EN,
+ cp->regs + REG_PCS_SERDES_CTRL);
+ }
+}
+
+
+static int cas_pcs_link_check(struct cas *cp)
+{
+ u32 stat, state_machine;
+ int retval = 0;
+
+ /* The link status bit latches on zero, so you must
+ * read it twice in such a case to see a transition
+ * to the link being up.
+ */
+ stat = readl(cp->regs + REG_PCS_MII_STATUS);
+ if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
+ stat = readl(cp->regs + REG_PCS_MII_STATUS);
+
+ /* The remote-fault indication is only valid
+ * when autoneg has completed.
+ */
+ if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
+ PCS_MII_STATUS_REMOTE_FAULT)) ==
+ (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
+ netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
+
+ /* work around link detection issue by querying the PCS state
+ * machine directly.
+ */
+ state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
+ if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
+ stat &= ~PCS_MII_STATUS_LINK_STATUS;
+ } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
+ stat |= PCS_MII_STATUS_LINK_STATUS;
+ }
+
+ if (stat & PCS_MII_STATUS_LINK_STATUS) {
+ if (cp->lstate != link_up) {
+ if (cp->opened) {
+ cp->lstate = link_up;
+ cp->link_transition = LINK_TRANSITION_LINK_UP;
+
+ cas_set_link_modes(cp);
+ netif_carrier_on(cp->dev);
+ }
+ }
+ } else if (cp->lstate == link_up) {
+ cp->lstate = link_down;
+ if (link_transition_timeout != 0 &&
+ cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
+ !cp->link_transition_jiffies_valid) {
+ /*
+ * force a reset, as a workaround for the
+ * link-failure problem. May want to move this to a
+ * point a bit earlier in the sequence. If we had
+ * generated a reset a short time ago, we'll wait for
+ * the link timer to check the status until a
+ * timer expires (link_transistion_jiffies_valid is
+ * true when the timer is running.) Instead of using
+ * a system timer, we just do a check whenever the
+ * link timer is running - this clears the flag after
+ * a suitable delay.
+ */
+ retval = 1;
+ cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
+ cp->link_transition_jiffies = jiffies;
+ cp->link_transition_jiffies_valid = 1;
+ } else {
+ cp->link_transition = LINK_TRANSITION_ON_FAILURE;
+ }
+ netif_carrier_off(cp->dev);
+ if (cp->opened)
+ netif_info(cp, link, cp->dev, "PCS link down\n");
+
+ /* Cassini only: if you force a mode, there can be
+ * sync problems on link down. to fix that, the following
+ * things need to be checked:
+ * 1) read serialink state register
+ * 2) read pcs status register to verify link down.
+ * 3) if link down and serial link == 0x03, then you need
+ * to global reset the chip.
+ */
+ if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
+ /* should check to see if we're in a forced mode */
+ stat = readl(cp->regs + REG_PCS_SERDES_STATE);
+ if (stat == 0x03)
+ return 1;
+ }
+ } else if (cp->lstate == link_down) {
+ if (link_transition_timeout != 0 &&
+ cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
+ !cp->link_transition_jiffies_valid) {
+ /* force a reset, as a workaround for the
+ * link-failure problem. May want to move
+ * this to a point a bit earlier in the
+ * sequence.
+ */
+ retval = 1;
+ cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
+ cp->link_transition_jiffies = jiffies;
+ cp->link_transition_jiffies_valid = 1;
+ } else {
+ cp->link_transition = LINK_TRANSITION_STILL_FAILED;
+ }
+ }
+
+ return retval;
+}
+
+static int cas_pcs_interrupt(struct net_device *dev,
+ struct cas *cp, u32 status)
+{
+ u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
+
+ if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
+ return 0;
+ return cas_pcs_link_check(cp);
+}
+
+static int cas_txmac_interrupt(struct net_device *dev,
+ struct cas *cp, u32 status)
+{
+ u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
+
+ if (!txmac_stat)
+ return 0;
+
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
+
+ /* Defer timer expiration is quite normal,
+ * don't even log the event.
+ */
+ if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
+ !(txmac_stat & ~MAC_TX_DEFER_TIMER))
+ return 0;
+
+ spin_lock(&cp->stat_lock[0]);
+ if (txmac_stat & MAC_TX_UNDERRUN) {
+ netdev_err(dev, "TX MAC xmit underrun\n");
+ cp->net_stats[0].tx_fifo_errors++;
+ }
+
+ if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
+ netdev_err(dev, "TX MAC max packet size error\n");
+ cp->net_stats[0].tx_errors++;
+ }
+
+ /* The rest are all cases of one of the 16-bit TX
+ * counters expiring.
+ */
+ if (txmac_stat & MAC_TX_COLL_NORMAL)
+ cp->net_stats[0].collisions += 0x10000;
+
+ if (txmac_stat & MAC_TX_COLL_EXCESS) {
+ cp->net_stats[0].tx_aborted_errors += 0x10000;
+ cp->net_stats[0].collisions += 0x10000;
+ }
+
+ if (txmac_stat & MAC_TX_COLL_LATE) {
+ cp->net_stats[0].tx_aborted_errors += 0x10000;
+ cp->net_stats[0].collisions += 0x10000;
+ }
+ spin_unlock(&cp->stat_lock[0]);
+
+ /* We do not keep track of MAC_TX_COLL_FIRST and
+ * MAC_TX_PEAK_ATTEMPTS events.
+ */
+ return 0;
+}
+
+static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
+{
+ cas_hp_inst_t *inst;
+ u32 val;
+ int i;
+
+ i = 0;
+ while ((inst = firmware) && inst->note) {
+ writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
+
+ val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
+ val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
+ writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
+
+ val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
+ val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
+ val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
+ val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
+ val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
+ val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
+ val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
+ writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
+
+ val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
+ val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
+ val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
+ val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
+ writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
+ ++firmware;
+ ++i;
+ }
+}
+
+static void cas_init_rx_dma(struct cas *cp)
+{
+ u64 desc_dma = cp->block_dvma;
+ u32 val;
+ int i, size;
+
+ /* rx free descriptors */
+ val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
+ val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
+ val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
+ if ((N_RX_DESC_RINGS > 1) &&
+ (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */
+ val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
+ writel(val, cp->regs + REG_RX_CFG);
+
+ val = (unsigned long) cp->init_rxds[0] -
+ (unsigned long) cp->init_block;
+ writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
+ writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
+ writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
+
+ if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+ /* rx desc 2 is for IPSEC packets. however,
+ * we don't it that for that purpose.
+ */
+ val = (unsigned long) cp->init_rxds[1] -
+ (unsigned long) cp->init_block;
+ writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
+ writel((desc_dma + val) & 0xffffffff, cp->regs +
+ REG_PLUS_RX_DB1_LOW);
+ writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
+ REG_PLUS_RX_KICK1);
+ }
+
+ /* rx completion registers */
+ val = (unsigned long) cp->init_rxcs[0] -
+ (unsigned long) cp->init_block;
+ writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
+ writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
+
+ if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+ /* rx comp 2-4 */
+ for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
+ val = (unsigned long) cp->init_rxcs[i] -
+ (unsigned long) cp->init_block;
+ writel((desc_dma + val) >> 32, cp->regs +
+ REG_PLUS_RX_CBN_HI(i));
+ writel((desc_dma + val) & 0xffffffff, cp->regs +
+ REG_PLUS_RX_CBN_LOW(i));
+ }
+ }
+
+ /* read selective clear regs to prevent spurious interrupts
+ * on reset because complete == kick.
+ * selective clear set up to prevent interrupts on resets
+ */
+ readl(cp->regs + REG_INTR_STATUS_ALIAS);
+ writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
+ if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+ for (i = 1; i < N_RX_COMP_RINGS; i++)
+ readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
+
+ /* 2 is different from 3 and 4 */
+ if (N_RX_COMP_RINGS > 1)
+ writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
+ cp->regs + REG_PLUS_ALIASN_CLEAR(1));
+
+ for (i = 2; i < N_RX_COMP_RINGS; i++)
+ writel(INTR_RX_DONE_ALT,
+ cp->regs + REG_PLUS_ALIASN_CLEAR(i));
+ }
+
+ /* set up pause thresholds */
+ val = CAS_BASE(RX_PAUSE_THRESH_OFF,
+ cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
+ val |= CAS_BASE(RX_PAUSE_THRESH_ON,
+ cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
+ writel(val, cp->regs + REG_RX_PAUSE_THRESH);
+
+ /* zero out dma reassembly buffers */
+ for (i = 0; i < 64; i++) {
+ writel(i, cp->regs + REG_RX_TABLE_ADDR);
+ writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
+ writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
+ writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
+ }
+
+ /* make sure address register is 0 for normal operation */
+ writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
+ writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
+
+ /* interrupt mitigation */
+#ifdef USE_RX_BLANK
+ val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
+ val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
+ writel(val, cp->regs + REG_RX_BLANK);
+#else
+ writel(0x0, cp->regs + REG_RX_BLANK);
+#endif
+
+ /* interrupt generation as a function of low water marks for
+ * free desc and completion entries. these are used to trigger
+ * housekeeping for rx descs. we don't use the free interrupt
+ * as it's not very useful
+ */
+ /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
+ val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
+ writel(val, cp->regs + REG_RX_AE_THRESH);
+ if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+ val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
+ writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
+ }
+
+ /* Random early detect registers. useful for congestion avoidance.
+ * this should be tunable.
+ */
+ writel(0x0, cp->regs + REG_RX_RED);
+
+ /* receive page sizes. default == 2K (0x800) */
+ val = 0;
+ if (cp->page_size == 0x1000)
+ val = 0x1;
+ else if (cp->page_size == 0x2000)
+ val = 0x2;
+ else if (cp->page_size == 0x4000)
+ val = 0x3;
+
+ /* round mtu + offset. constrain to page size. */
+ size = cp->dev->mtu + 64;
+ if (size > cp->page_size)
+ size = cp->page_size;
+
+ if (size <= 0x400)
+ i = 0x0;
+ else if (size <= 0x800)
+ i = 0x1;
+ else if (size <= 0x1000)
+ i = 0x2;
+ else
+ i = 0x3;
+
+ cp->mtu_stride = 1 << (i + 10);
+ val = CAS_BASE(RX_PAGE_SIZE, val);
+ val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
+ val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
+ val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
+ writel(val, cp->regs + REG_RX_PAGE_SIZE);
+
+ /* enable the header parser if desired */
+ if (CAS_HP_FIRMWARE == cas_prog_null)
+ return;
+
+ val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
+ val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
+ val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
+ writel(val, cp->regs + REG_HP_CFG);
+}
+
+static inline void cas_rxc_init(struct cas_rx_comp *rxc)
+{
+ memset(rxc, 0, sizeof(*rxc));
+ rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
+}
+
+/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
+ * flipping is protected by the fact that the chip will not
+ * hand back the same page index while it's being processed.
+ */
+static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
+{
+ cas_page_t *page = cp->rx_pages[1][index];
+ cas_page_t *new;
+
+ if (page_count(page->buffer) == 1)
+ return page;
+
+ new = cas_page_dequeue(cp);
+ if (new) {
+ spin_lock(&cp->rx_inuse_lock);
+ list_add(&page->list, &cp->rx_inuse_list);
+ spin_unlock(&cp->rx_inuse_lock);
+ }
+ return new;
+}
+
+/* this needs to be changed if we actually use the ENC RX DESC ring */
+static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
+ const int index)
+{
+ cas_page_t **page0 = cp->rx_pages[0];
+ cas_page_t **page1 = cp->rx_pages[1];
+
+ /* swap if buffer is in use */
+ if (page_count(page0[index]->buffer) > 1) {
+ cas_page_t *new = cas_page_spare(cp, index);
+ if (new) {
+ page1[index] = page0[index];
+ page0[index] = new;
+ }
+ }
+ RX_USED_SET(page0[index], 0);
+ return page0[index];
+}
+
+static void cas_clean_rxds(struct cas *cp)
+{
+ /* only clean ring 0 as ring 1 is used for spare buffers */
+ struct cas_rx_desc *rxd = cp->init_rxds[0];
+ int i, size;
+
+ /* release all rx flows */
+ for (i = 0; i < N_RX_FLOWS; i++) {
+ struct sk_buff *skb;
+ while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
+ cas_skb_release(skb);
+ }
+ }
+
+ /* initialize descriptors */
+ size = RX_DESC_RINGN_SIZE(0);
+ for (i = 0; i < size; i++) {
+ cas_page_t *page = cas_page_swap(cp, 0, i);
+ rxd[i].buffer = cpu_to_le64(page->dma_addr);
+ rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
+ CAS_BASE(RX_INDEX_RING, 0));
+ }
+
+ cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
+ cp->rx_last[0] = 0;
+ cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
+}
+
+static void cas_clean_rxcs(struct cas *cp)
+{
+ int i, j;
+
+ /* take ownership of rx comp descriptors */
+ memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
+ memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
+ for (i = 0; i < N_RX_COMP_RINGS; i++) {
+ struct cas_rx_comp *rxc = cp->init_rxcs[i];
+ for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
+ cas_rxc_init(rxc + j);
+ }
+ }
+}
+
+#if 0
+/* When we get a RX fifo overflow, the RX unit is probably hung
+ * so we do the following.
+ *
+ * If any part of the reset goes wrong, we return 1 and that causes the
+ * whole chip to be reset.
+ */
+static int cas_rxmac_reset(struct cas *cp)
+{
+ struct net_device *dev = cp->dev;
+ int limit;
+ u32 val;
+
+ /* First, reset MAC RX. */
+ writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
+ for (limit = 0; limit < STOP_TRIES; limit++) {
+ if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
+ break;
+ udelay(10);
+ }
+ if (limit == STOP_TRIES) {
+ netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
+ return 1;
+ }
+
+ /* Second, disable RX DMA. */
+ writel(0, cp->regs + REG_RX_CFG);
+ for (limit = 0; limit < STOP_TRIES; limit++) {
+ if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
+ break;
+ udelay(10);
+ }
+ if (limit == STOP_TRIES) {
+ netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
+ return 1;
+ }
+
+ mdelay(5);
+
+ /* Execute RX reset command. */
+ writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
+ for (limit = 0; limit < STOP_TRIES; limit++) {
+ if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
+ break;
+ udelay(10);
+ }
+ if (limit == STOP_TRIES) {
+ netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
+ return 1;
+ }
+
+ /* reset driver rx state */
+ cas_clean_rxds(cp);
+ cas_clean_rxcs(cp);
+
+ /* Now, reprogram the rest of RX unit. */
+ cas_init_rx_dma(cp);
+
+ /* re-enable */
+ val = readl(cp->regs + REG_RX_CFG);
+ writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
+ writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
+ val = readl(cp->regs + REG_MAC_RX_CFG);
+ writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
+ return 0;
+}
+#endif
+
+static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
+ u32 status)
+{
+ u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
+
+ if (!stat)
+ return 0;
+
+ netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
+
+ /* these are all rollovers */
+ spin_lock(&cp->stat_lock[0]);
+ if (stat & MAC_RX_ALIGN_ERR)
+ cp->net_stats[0].rx_frame_errors += 0x10000;
+
+ if (stat & MAC_RX_CRC_ERR)
+ cp->net_stats[0].rx_crc_errors += 0x10000;
+
+ if (stat & MAC_RX_LEN_ERR)
+ cp->net_stats[0].rx_length_errors += 0x10000;
+
+ if (stat & MAC_RX_OVERFLOW) {
+ cp->net_stats[0].rx_over_errors++;
+ cp->net_stats[0].rx_fifo_errors++;
+ }
+
+ /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
+ * events.
+ */
+ spin_unlock(&cp->stat_lock[0]);
+ return 0;
+}
+
+static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
+ u32 status)
+{
+ u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
+
+ if (!stat)
+ return 0;
+
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "mac interrupt, stat: 0x%x\n", stat);
+
+ /* This interrupt is just for pause frame and pause
+ * tracking. It is useful for diagnostics and debug
+ * but probably by default we will mask these events.
+ */
+ if (stat & MAC_CTRL_PAUSE_STATE)
+ cp->pause_entered++;
+
+ if (stat & MAC_CTRL_PAUSE_RECEIVED)
+ cp->pause_last_time_recvd = (stat >> 16);
+
+ return 0;
+}
+
+
+/* Must be invoked under cp->lock. */
+static inline int cas_mdio_link_not_up(struct cas *cp)
+{
+ u16 val;
+
+ switch (cp->lstate) {
+ case link_force_ret:
+ netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
+ cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
+ cp->timer_ticks = 5;
+ cp->lstate = link_force_ok;
+ cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
+ break;
+
+ case link_aneg:
+ val = cas_phy_read(cp, MII_BMCR);
+
+ /* Try forced modes. we try things in the following order:
+ * 1000 full -> 100 full/half -> 10 half
+ */
+ val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
+ val |= BMCR_FULLDPLX;
+ val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
+ CAS_BMCR_SPEED1000 : BMCR_SPEED100;
+ cas_phy_write(cp, MII_BMCR, val);
+ cp->timer_ticks = 5;
+ cp->lstate = link_force_try;
+ cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
+ break;
+
+ case link_force_try:
+ /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
+ val = cas_phy_read(cp, MII_BMCR);
+ cp->timer_ticks = 5;
+ if (val & CAS_BMCR_SPEED1000) { /* gigabit */
+ val &= ~CAS_BMCR_SPEED1000;
+ val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
+ cas_phy_write(cp, MII_BMCR, val);
+ break;
+ }
+
+ if (val & BMCR_SPEED100) {
+ if (val & BMCR_FULLDPLX) /* fd failed */
+ val &= ~BMCR_FULLDPLX;
+ else { /* 100Mbps failed */
+ val &= ~BMCR_SPEED100;
+ }
+ cas_phy_write(cp, MII_BMCR, val);
+ break;
+ }
+ default:
+ break;
+ }
+ return 0;
+}
+
+
+/* must be invoked with cp->lock held */
+static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
+{
+ int restart;
+
+ if (bmsr & BMSR_LSTATUS) {
+ /* Ok, here we got a link. If we had it due to a forced
+ * fallback, and we were configured for autoneg, we
+ * retry a short autoneg pass. If you know your hub is
+ * broken, use ethtool ;)
+ */
+ if ((cp->lstate == link_force_try) &&
+ (cp->link_cntl & BMCR_ANENABLE)) {
+ cp->lstate = link_force_ret;
+ cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
+ cas_mif_poll(cp, 0);
+ cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
+ cp->timer_ticks = 5;
+ if (cp->opened)
+ netif_info(cp, link, cp->dev,
+ "Got link after fallback, retrying autoneg once...\n");
+ cas_phy_write(cp, MII_BMCR,
+ cp->link_fcntl | BMCR_ANENABLE |
+ BMCR_ANRESTART);
+ cas_mif_poll(cp, 1);
+
+ } else if (cp->lstate != link_up) {
+ cp->lstate = link_up;
+ cp->link_transition = LINK_TRANSITION_LINK_UP;
+
+ if (cp->opened) {
+ cas_set_link_modes(cp);
+ netif_carrier_on(cp->dev);
+ }
+ }
+ return 0;
+ }
+
+ /* link not up. if the link was previously up, we restart the
+ * whole process
+ */
+ restart = 0;
+ if (cp->lstate == link_up) {
+ cp->lstate = link_down;
+ cp->link_transition = LINK_TRANSITION_LINK_DOWN;
+
+ netif_carrier_off(cp->dev);
+ if (cp->opened)
+ netif_info(cp, link, cp->dev, "Link down\n");
+ restart = 1;
+
+ } else if (++cp->timer_ticks > 10)
+ cas_mdio_link_not_up(cp);
+
+ return restart;
+}
+
+static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
+ u32 status)
+{
+ u32 stat = readl(cp->regs + REG_MIF_STATUS);
+ u16 bmsr;
+
+ /* check for a link change */
+ if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
+ return 0;
+
+ bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
+ return cas_mii_link_check(cp, bmsr);
+}
+
+static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
+ u32 status)
+{
+ u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
+
+ if (!stat)
+ return 0;
+
+ netdev_err(dev, "PCI error [%04x:%04x]",
+ stat, readl(cp->regs + REG_BIM_DIAG));
+
+ /* cassini+ has this reserved */
+ if ((stat & PCI_ERR_BADACK) &&
+ ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
+ pr_cont(" <No ACK64# during ABS64 cycle>");
+
+ if (stat & PCI_ERR_DTRTO)
+ pr_cont(" <Delayed transaction timeout>");
+ if (stat & PCI_ERR_OTHER)
+ pr_cont(" <other>");
+ if (stat & PCI_ERR_BIM_DMA_WRITE)
+ pr_cont(" <BIM DMA 0 write req>");
+ if (stat & PCI_ERR_BIM_DMA_READ)
+ pr_cont(" <BIM DMA 0 read req>");
+ pr_cont("\n");
+
+ if (stat & PCI_ERR_OTHER) {
+ u16 cfg;
+
+ /* Interrogate PCI config space for the
+ * true cause.
+ */
+ pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
+ netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
+ if (cfg & PCI_STATUS_PARITY)
+ netdev_err(dev, "PCI parity error detected\n");
+ if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
+ netdev_err(dev, "PCI target abort\n");
+ if (cfg & PCI_STATUS_REC_TARGET_ABORT)
+ netdev_err(dev, "PCI master acks target abort\n");
+ if (cfg & PCI_STATUS_REC_MASTER_ABORT)
+ netdev_err(dev, "PCI master abort\n");
+ if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
+ netdev_err(dev, "PCI system error SERR#\n");
+ if (cfg & PCI_STATUS_DETECTED_PARITY)
+ netdev_err(dev, "PCI parity error\n");
+
+ /* Write the error bits back to clear them. */
+ cfg &= (PCI_STATUS_PARITY |
+ PCI_STATUS_SIG_TARGET_ABORT |
+ PCI_STATUS_REC_TARGET_ABORT |
+ PCI_STATUS_REC_MASTER_ABORT |
+ PCI_STATUS_SIG_SYSTEM_ERROR |
+ PCI_STATUS_DETECTED_PARITY);
+ pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
+ }
+
+ /* For all PCI errors, we should reset the chip. */
+ return 1;
+}
+
+/* All non-normal interrupt conditions get serviced here.
+ * Returns non-zero if we should just exit the interrupt
+ * handler right now (ie. if we reset the card which invalidates
+ * all of the other original irq status bits).
+ */
+static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
+ u32 status)
+{
+ if (status & INTR_RX_TAG_ERROR) {
+ /* corrupt RX tag framing */
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "corrupt rx tag framing\n");
+ spin_lock(&cp->stat_lock[0]);
+ cp->net_stats[0].rx_errors++;
+ spin_unlock(&cp->stat_lock[0]);
+ goto do_reset;
+ }
+
+ if (status & INTR_RX_LEN_MISMATCH) {
+ /* length mismatch. */
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "length mismatch for rx frame\n");
+ spin_lock(&cp->stat_lock[0]);
+ cp->net_stats[0].rx_errors++;
+ spin_unlock(&cp->stat_lock[0]);
+ goto do_reset;
+ }
+
+ if (status & INTR_PCS_STATUS) {
+ if (cas_pcs_interrupt(dev, cp, status))
+ goto do_reset;
+ }
+
+ if (status & INTR_TX_MAC_STATUS) {
+ if (cas_txmac_interrupt(dev, cp, status))
+ goto do_reset;
+ }
+
+ if (status & INTR_RX_MAC_STATUS) {
+ if (cas_rxmac_interrupt(dev, cp, status))
+ goto do_reset;
+ }
+
+ if (status & INTR_MAC_CTRL_STATUS) {
+ if (cas_mac_interrupt(dev, cp, status))
+ goto do_reset;
+ }
+
+ if (status & INTR_MIF_STATUS) {
+ if (cas_mif_interrupt(dev, cp, status))
+ goto do_reset;
+ }
+
+ if (status & INTR_PCI_ERROR_STATUS) {
+ if (cas_pci_interrupt(dev, cp, status))
+ goto do_reset;
+ }
+ return 0;
+
+do_reset:
+#if 1
+ atomic_inc(&cp->reset_task_pending);
+ atomic_inc(&cp->reset_task_pending_all);
+ netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
+ schedule_work(&cp->reset_task);
+#else
+ atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
+ netdev_err(dev, "reset called in cas_abnormal_irq\n");
+ schedule_work(&cp->reset_task);
+#endif
+ return 1;
+}
+
+/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
+ * determining whether to do a netif_stop/wakeup
+ */
+#define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
+#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
+static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
+ const int len)
+{
+ unsigned long off = addr + len;
+
+ if (CAS_TABORT(cp) == 1)
+ return 0;
+ if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
+ return 0;
+ return TX_TARGET_ABORT_LEN;
+}
+
+static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
+{
+ struct cas_tx_desc *txds;
+ struct sk_buff **skbs;
+ struct net_device *dev = cp->dev;
+ int entry, count;
+
+ spin_lock(&cp->tx_lock[ring]);
+ txds = cp->init_txds[ring];
+ skbs = cp->tx_skbs[ring];
+ entry = cp->tx_old[ring];
+
+ count = TX_BUFF_COUNT(ring, entry, limit);
+ while (entry != limit) {
+ struct sk_buff *skb = skbs[entry];
+ dma_addr_t daddr;
+ u32 dlen;
+ int frag;
+
+ if (!skb) {
+ /* this should never occur */
+ entry = TX_DESC_NEXT(ring, entry);
+ continue;
+ }
+
+ /* however, we might get only a partial skb release. */
+ count -= skb_shinfo(skb)->nr_frags +
+ + cp->tx_tiny_use[ring][entry].nbufs + 1;
+ if (count < 0)
+ break;
+
+ netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
+ "tx[%d] done, slot %d\n", ring, entry);
+
+ skbs[entry] = NULL;
+ cp->tx_tiny_use[ring][entry].nbufs = 0;
+
+ for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
+ struct cas_tx_desc *txd = txds + entry;
+
+ daddr = le64_to_cpu(txd->buffer);
+ dlen = CAS_VAL(TX_DESC_BUFLEN,
+ le64_to_cpu(txd->control));
+ pci_unmap_page(cp->pdev, daddr, dlen,
+ PCI_DMA_TODEVICE);
+ entry = TX_DESC_NEXT(ring, entry);
+
+ /* tiny buffer may follow */
+ if (cp->tx_tiny_use[ring][entry].used) {
+ cp->tx_tiny_use[ring][entry].used = 0;
+ entry = TX_DESC_NEXT(ring, entry);
+ }
+ }
+
+ spin_lock(&cp->stat_lock[ring]);
+ cp->net_stats[ring].tx_packets++;
+ cp->net_stats[ring].tx_bytes += skb->len;
+ spin_unlock(&cp->stat_lock[ring]);
+ dev_kfree_skb_irq(skb);
+ }
+ cp->tx_old[ring] = entry;
+
+ /* this is wrong for multiple tx rings. the net device needs
+ * multiple queues for this to do the right thing. we wait
+ * for 2*packets to be available when using tiny buffers
+ */
+ if (netif_queue_stopped(dev) &&
+ (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
+ netif_wake_queue(dev);
+ spin_unlock(&cp->tx_lock[ring]);
+}
+
+static void cas_tx(struct net_device *dev, struct cas *cp,
+ u32 status)
+{
+ int limit, ring;
+#ifdef USE_TX_COMPWB
+ u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
+#endif
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "tx interrupt, status: 0x%x, %llx\n",
+ status, (unsigned long long)compwb);
+ /* process all the rings */
+ for (ring = 0; ring < N_TX_RINGS; ring++) {
+#ifdef USE_TX_COMPWB
+ /* use the completion writeback registers */
+ limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
+ CAS_VAL(TX_COMPWB_LSB, compwb);
+ compwb = TX_COMPWB_NEXT(compwb);
+#else
+ limit = readl(cp->regs + REG_TX_COMPN(ring));
+#endif
+ if (cp->tx_old[ring] != limit)
+ cas_tx_ringN(cp, ring, limit);
+ }
+}
+
+
+static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
+ int entry, const u64 *words,
+ struct sk_buff **skbref)
+{
+ int dlen, hlen, len, i, alloclen;
+ int off, swivel = RX_SWIVEL_OFF_VAL;
+ struct cas_page *page;
+ struct sk_buff *skb;
+ void *addr, *crcaddr;
+ __sum16 csum;
+ char *p;
+
+ hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
+ dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
+ len = hlen + dlen;
+
+ if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
+ alloclen = len;
+ else
+ alloclen = max(hlen, RX_COPY_MIN);
+
+ skb = dev_alloc_skb(alloclen + swivel + cp->crc_size);
+ if (skb == NULL)
+ return -1;
+
+ *skbref = skb;
+ skb_reserve(skb, swivel);
+
+ p = skb->data;
+ addr = crcaddr = NULL;
+ if (hlen) { /* always copy header pages */
+ i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
+ page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
+ off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
+ swivel;
+
+ i = hlen;
+ if (!dlen) /* attach FCS */
+ i += cp->crc_size;
+ pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
+ PCI_DMA_FROMDEVICE);
+ addr = cas_page_map(page->buffer);
+ memcpy(p, addr + off, i);
+ pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
+ PCI_DMA_FROMDEVICE);
+ cas_page_unmap(addr);
+ RX_USED_ADD(page, 0x100);
+ p += hlen;
+ swivel = 0;
+ }
+
+
+ if (alloclen < (hlen + dlen)) {
+ skb_frag_t *frag = skb_shinfo(skb)->frags;
+
+ /* normal or jumbo packets. we use frags */
+ i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
+ page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
+ off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
+
+ hlen = min(cp->page_size - off, dlen);
+ if (hlen < 0) {
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "rx page overflow: %d\n", hlen);
+ dev_kfree_skb_irq(skb);
+ return -1;
+ }
+ i = hlen;
+ if (i == dlen) /* attach FCS */
+ i += cp->crc_size;
+ pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
+ PCI_DMA_FROMDEVICE);
+
+ /* make sure we always copy a header */
+ swivel = 0;
+ if (p == (char *) skb->data) { /* not split */
+ addr = cas_page_map(page->buffer);
+ memcpy(p, addr + off, RX_COPY_MIN);
+ pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
+ PCI_DMA_FROMDEVICE);
+ cas_page_unmap(addr);
+ off += RX_COPY_MIN;
+ swivel = RX_COPY_MIN;
+ RX_USED_ADD(page, cp->mtu_stride);
+ } else {
+ RX_USED_ADD(page, hlen);
+ }
+ skb_put(skb, alloclen);
+
+ skb_shinfo(skb)->nr_frags++;
+ skb->data_len += hlen - swivel;
+ skb->truesize += hlen - swivel;
+ skb->len += hlen - swivel;
+
+ __skb_frag_set_page(frag, page->buffer);
+ __skb_frag_ref(frag);
+ frag->page_offset = off;
+ frag->size = hlen - swivel;
+
+ /* any more data? */
+ if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
+ hlen = dlen;
+ off = 0;
+
+ i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
+ page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
+ pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
+ hlen + cp->crc_size,
+ PCI_DMA_FROMDEVICE);
+ pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
+ hlen + cp->crc_size,
+ PCI_DMA_FROMDEVICE);
+
+ skb_shinfo(skb)->nr_frags++;
+ skb->data_len += hlen;
+ skb->len += hlen;
+ frag++;
+
+ __skb_frag_set_page(frag, page->buffer);
+ __skb_frag_ref(frag);
+ frag->page_offset = 0;
+ frag->size = hlen;
+ RX_USED_ADD(page, hlen + cp->crc_size);
+ }
+
+ if (cp->crc_size) {
+ addr = cas_page_map(page->buffer);
+ crcaddr = addr + off + hlen;
+ }
+
+ } else {
+ /* copying packet */
+ if (!dlen)
+ goto end_copy_pkt;
+
+ i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
+ page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
+ off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
+ hlen = min(cp->page_size - off, dlen);
+ if (hlen < 0) {
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "rx page overflow: %d\n", hlen);
+ dev_kfree_skb_irq(skb);
+ return -1;
+ }
+ i = hlen;
+ if (i == dlen) /* attach FCS */
+ i += cp->crc_size;
+ pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
+ PCI_DMA_FROMDEVICE);
+ addr = cas_page_map(page->buffer);
+ memcpy(p, addr + off, i);
+ pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
+ PCI_DMA_FROMDEVICE);
+ cas_page_unmap(addr);
+ if (p == (char *) skb->data) /* not split */
+ RX_USED_ADD(page, cp->mtu_stride);
+ else
+ RX_USED_ADD(page, i);
+
+ /* any more data? */
+ if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
+ p += hlen;
+ i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
+ page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
+ pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
+ dlen + cp->crc_size,
+ PCI_DMA_FROMDEVICE);
+ addr = cas_page_map(page->buffer);
+ memcpy(p, addr, dlen + cp->crc_size);
+ pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
+ dlen + cp->crc_size,
+ PCI_DMA_FROMDEVICE);
+ cas_page_unmap(addr);
+ RX_USED_ADD(page, dlen + cp->crc_size);
+ }
+end_copy_pkt:
+ if (cp->crc_size) {
+ addr = NULL;
+ crcaddr = skb->data + alloclen;
+ }
+ skb_put(skb, alloclen);
+ }
+
+ csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
+ if (cp->crc_size) {
+ /* checksum includes FCS. strip it out. */
+ csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
+ csum_unfold(csum)));
+ if (addr)
+ cas_page_unmap(addr);
+ }
+ skb->protocol = eth_type_trans(skb, cp->dev);
+ if (skb->protocol == htons(ETH_P_IP)) {
+ skb->csum = csum_unfold(~csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ } else
+ skb_checksum_none_assert(skb);
+ return len;
+}
+
+
+/* we can handle up to 64 rx flows at a time. we do the same thing
+ * as nonreassm except that we batch up the buffers.
+ * NOTE: we currently just treat each flow as a bunch of packets that
+ * we pass up. a better way would be to coalesce the packets
+ * into a jumbo packet. to do that, we need to do the following:
+ * 1) the first packet will have a clean split between header and
+ * data. save both.
+ * 2) each time the next flow packet comes in, extend the
+ * data length and merge the checksums.
+ * 3) on flow release, fix up the header.
+ * 4) make sure the higher layer doesn't care.
+ * because packets get coalesced, we shouldn't run into fragment count
+ * issues.
+ */
+static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
+ struct sk_buff *skb)
+{
+ int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
+ struct sk_buff_head *flow = &cp->rx_flows[flowid];
+
+ /* this is protected at a higher layer, so no need to
+ * do any additional locking here. stick the buffer
+ * at the end.
+ */
+ __skb_queue_tail(flow, skb);
+ if (words[0] & RX_COMP1_RELEASE_FLOW) {
+ while ((skb = __skb_dequeue(flow))) {
+ cas_skb_release(skb);
+ }
+ }
+}
+
+/* put rx descriptor back on ring. if a buffer is in use by a higher
+ * layer, this will need to put in a replacement.
+ */
+static void cas_post_page(struct cas *cp, const int ring, const int index)
+{
+ cas_page_t *new;
+ int entry;
+
+ entry = cp->rx_old[ring];
+
+ new = cas_page_swap(cp, ring, index);
+ cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
+ cp->init_rxds[ring][entry].index =
+ cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
+ CAS_BASE(RX_INDEX_RING, ring));
+
+ entry = RX_DESC_ENTRY(ring, entry + 1);
+ cp->rx_old[ring] = entry;
+
+ if (entry % 4)
+ return;
+
+ if (ring == 0)
+ writel(entry, cp->regs + REG_RX_KICK);
+ else if ((N_RX_DESC_RINGS > 1) &&
+ (cp->cas_flags & CAS_FLAG_REG_PLUS))
+ writel(entry, cp->regs + REG_PLUS_RX_KICK1);
+}
+
+
+/* only when things are bad */
+static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
+{
+ unsigned int entry, last, count, released;
+ int cluster;
+ cas_page_t **page = cp->rx_pages[ring];
+
+ entry = cp->rx_old[ring];
+
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "rxd[%d] interrupt, done: %d\n", ring, entry);
+
+ cluster = -1;
+ count = entry & 0x3;
+ last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
+ released = 0;
+ while (entry != last) {
+ /* make a new buffer if it's still in use */
+ if (page_count(page[entry]->buffer) > 1) {
+ cas_page_t *new = cas_page_dequeue(cp);
+ if (!new) {
+ /* let the timer know that we need to
+ * do this again
+ */
+ cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
+ if (!timer_pending(&cp->link_timer))
+ mod_timer(&cp->link_timer, jiffies +
+ CAS_LINK_FAST_TIMEOUT);
+ cp->rx_old[ring] = entry;
+ cp->rx_last[ring] = num ? num - released : 0;
+ return -ENOMEM;
+ }
+ spin_lock(&cp->rx_inuse_lock);
+ list_add(&page[entry]->list, &cp->rx_inuse_list);
+ spin_unlock(&cp->rx_inuse_lock);
+ cp->init_rxds[ring][entry].buffer =
+ cpu_to_le64(new->dma_addr);
+ page[entry] = new;
+
+ }
+
+ if (++count == 4) {
+ cluster = entry;
+ count = 0;
+ }
+ released++;
+ entry = RX_DESC_ENTRY(ring, entry + 1);
+ }
+ cp->rx_old[ring] = entry;
+
+ if (cluster < 0)
+ return 0;
+
+ if (ring == 0)
+ writel(cluster, cp->regs + REG_RX_KICK);
+ else if ((N_RX_DESC_RINGS > 1) &&
+ (cp->cas_flags & CAS_FLAG_REG_PLUS))
+ writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
+ return 0;
+}
+
+
+/* process a completion ring. packets are set up in three basic ways:
+ * small packets: should be copied header + data in single buffer.
+ * large packets: header and data in a single buffer.
+ * split packets: header in a separate buffer from data.
+ * data may be in multiple pages. data may be > 256
+ * bytes but in a single page.
+ *
+ * NOTE: RX page posting is done in this routine as well. while there's
+ * the capability of using multiple RX completion rings, it isn't
+ * really worthwhile due to the fact that the page posting will
+ * force serialization on the single descriptor ring.
+ */
+static int cas_rx_ringN(struct cas *cp, int ring, int budget)
+{
+ struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
+ int entry, drops;
+ int npackets = 0;
+
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "rx[%d] interrupt, done: %d/%d\n",
+ ring,
+ readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
+
+ entry = cp->rx_new[ring];
+ drops = 0;
+ while (1) {
+ struct cas_rx_comp *rxc = rxcs + entry;
+ struct sk_buff *uninitialized_var(skb);
+ int type, len;
+ u64 words[4];
+ int i, dring;
+
+ words[0] = le64_to_cpu(rxc->word1);
+ words[1] = le64_to_cpu(rxc->word2);
+ words[2] = le64_to_cpu(rxc->word3);
+ words[3] = le64_to_cpu(rxc->word4);
+
+ /* don't touch if still owned by hw */
+ type = CAS_VAL(RX_COMP1_TYPE, words[0]);
+ if (type == 0)
+ break;
+
+ /* hw hasn't cleared the zero bit yet */
+ if (words[3] & RX_COMP4_ZERO) {
+ break;
+ }
+
+ /* get info on the packet */
+ if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
+ spin_lock(&cp->stat_lock[ring]);
+ cp->net_stats[ring].rx_errors++;
+ if (words[3] & RX_COMP4_LEN_MISMATCH)
+ cp->net_stats[ring].rx_length_errors++;
+ if (words[3] & RX_COMP4_BAD)
+ cp->net_stats[ring].rx_crc_errors++;
+ spin_unlock(&cp->stat_lock[ring]);
+
+ /* We'll just return it to Cassini. */
+ drop_it:
+ spin_lock(&cp->stat_lock[ring]);
+ ++cp->net_stats[ring].rx_dropped;
+ spin_unlock(&cp->stat_lock[ring]);
+ goto next;
+ }
+
+ len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
+ if (len < 0) {
+ ++drops;
+ goto drop_it;
+ }
+
+ /* see if it's a flow re-assembly or not. the driver
+ * itself handles release back up.
+ */
+ if (RX_DONT_BATCH || (type == 0x2)) {
+ /* non-reassm: these always get released */
+ cas_skb_release(skb);
+ } else {
+ cas_rx_flow_pkt(cp, words, skb);
+ }
+
+ spin_lock(&cp->stat_lock[ring]);
+ cp->net_stats[ring].rx_packets++;
+ cp->net_stats[ring].rx_bytes += len;
+ spin_unlock(&cp->stat_lock[ring]);
+
+ next:
+ npackets++;
+
+ /* should it be released? */
+ if (words[0] & RX_COMP1_RELEASE_HDR) {
+ i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
+ dring = CAS_VAL(RX_INDEX_RING, i);
+ i = CAS_VAL(RX_INDEX_NUM, i);
+ cas_post_page(cp, dring, i);
+ }
+
+ if (words[0] & RX_COMP1_RELEASE_DATA) {
+ i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
+ dring = CAS_VAL(RX_INDEX_RING, i);
+ i = CAS_VAL(RX_INDEX_NUM, i);
+ cas_post_page(cp, dring, i);
+ }
+
+ if (words[0] & RX_COMP1_RELEASE_NEXT) {
+ i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
+ dring = CAS_VAL(RX_INDEX_RING, i);
+ i = CAS_VAL(RX_INDEX_NUM, i);
+ cas_post_page(cp, dring, i);
+ }
+
+ /* skip to the next entry */
+ entry = RX_COMP_ENTRY(ring, entry + 1 +
+ CAS_VAL(RX_COMP1_SKIP, words[0]));
+#ifdef USE_NAPI
+ if (budget && (npackets >= budget))
+ break;
+#endif
+ }
+ cp->rx_new[ring] = entry;
+
+ if (drops)
+ netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
+ return npackets;
+}
+
+
+/* put completion entries back on the ring */
+static void cas_post_rxcs_ringN(struct net_device *dev,
+ struct cas *cp, int ring)
+{
+ struct cas_rx_comp *rxc = cp->init_rxcs[ring];
+ int last, entry;
+
+ last = cp->rx_cur[ring];
+ entry = cp->rx_new[ring];
+ netif_printk(cp, intr, KERN_DEBUG, dev,
+ "rxc[%d] interrupt, done: %d/%d\n",
+ ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
+
+ /* zero and re-mark descriptors */
+ while (last != entry) {
+ cas_rxc_init(rxc + last);
+ last = RX_COMP_ENTRY(ring, last + 1);
+ }
+ cp->rx_cur[ring] = last;
+
+ if (ring == 0)
+ writel(last, cp->regs + REG_RX_COMP_TAIL);
+ else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
+ writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
+}
+
+
+
+/* cassini can use all four PCI interrupts for the completion ring.
+ * rings 3 and 4 are identical
+ */
+#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
+static inline void cas_handle_irqN(struct net_device *dev,
+ struct cas *cp, const u32 status,
+ const int ring)
+{
+ if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
+ cas_post_rxcs_ringN(dev, cp, ring);
+}
+
+static irqreturn_t cas_interruptN(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct cas *cp = netdev_priv(dev);
+ unsigned long flags;
+ int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
+ u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
+
+ /* check for shared irq */
+ if (status == 0)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
+#ifdef USE_NAPI
+ cas_mask_intr(cp);
+ napi_schedule(&cp->napi);
+#else
+ cas_rx_ringN(cp, ring, 0);
+#endif
+ status &= ~INTR_RX_DONE_ALT;
+ }
+
+ if (status)
+ cas_handle_irqN(dev, cp, status, ring);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return IRQ_HANDLED;
+}
+#endif
+
+#ifdef USE_PCI_INTB
+/* everything but rx packets */
+static inline void cas_handle_irq1(struct cas *cp, const u32 status)
+{
+ if (status & INTR_RX_BUF_UNAVAIL_1) {
+ /* Frame arrived, no free RX buffers available.
+ * NOTE: we can get this on a link transition. */
+ cas_post_rxds_ringN(cp, 1, 0);
+ spin_lock(&cp->stat_lock[1]);
+ cp->net_stats[1].rx_dropped++;
+ spin_unlock(&cp->stat_lock[1]);
+ }
+
+ if (status & INTR_RX_BUF_AE_1)
+ cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
+ RX_AE_FREEN_VAL(1));
+
+ if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
+ cas_post_rxcs_ringN(cp, 1);
+}
+
+/* ring 2 handles a few more events than 3 and 4 */
+static irqreturn_t cas_interrupt1(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct cas *cp = netdev_priv(dev);
+ unsigned long flags;
+ u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
+
+ /* check for shared interrupt */
+ if (status == 0)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
+#ifdef USE_NAPI
+ cas_mask_intr(cp);
+ napi_schedule(&cp->napi);
+#else
+ cas_rx_ringN(cp, 1, 0);
+#endif
+ status &= ~INTR_RX_DONE_ALT;
+ }
+ if (status)
+ cas_handle_irq1(cp, status);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return IRQ_HANDLED;
+}
+#endif
+
+static inline void cas_handle_irq(struct net_device *dev,
+ struct cas *cp, const u32 status)
+{
+ /* housekeeping interrupts */
+ if (status & INTR_ERROR_MASK)
+ cas_abnormal_irq(dev, cp, status);
+
+ if (status & INTR_RX_BUF_UNAVAIL) {
+ /* Frame arrived, no free RX buffers available.
+ * NOTE: we can get this on a link transition.
+ */
+ cas_post_rxds_ringN(cp, 0, 0);
+ spin_lock(&cp->stat_lock[0]);
+ cp->net_stats[0].rx_dropped++;
+ spin_unlock(&cp->stat_lock[0]);
+ } else if (status & INTR_RX_BUF_AE) {
+ cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
+ RX_AE_FREEN_VAL(0));
+ }
+
+ if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
+ cas_post_rxcs_ringN(dev, cp, 0);
+}
+
+static irqreturn_t cas_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct cas *cp = netdev_priv(dev);
+ unsigned long flags;
+ u32 status = readl(cp->regs + REG_INTR_STATUS);
+
+ if (status == 0)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
+ cas_tx(dev, cp, status);
+ status &= ~(INTR_TX_ALL | INTR_TX_INTME);
+ }
+
+ if (status & INTR_RX_DONE) {
+#ifdef USE_NAPI
+ cas_mask_intr(cp);
+ napi_schedule(&cp->napi);
+#else
+ cas_rx_ringN(cp, 0, 0);
+#endif
+ status &= ~INTR_RX_DONE;
+ }
+
+ if (status)
+ cas_handle_irq(dev, cp, status);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return IRQ_HANDLED;
+}
+
+
+#ifdef USE_NAPI
+static int cas_poll(struct napi_struct *napi, int budget)
+{
+ struct cas *cp = container_of(napi, struct cas, napi);
+ struct net_device *dev = cp->dev;
+ int i, enable_intr, credits;
+ u32 status = readl(cp->regs + REG_INTR_STATUS);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ cas_tx(dev, cp, status);
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ /* NAPI rx packets. we spread the credits across all of the
+ * rxc rings
+ *
+ * to make sure we're fair with the work we loop through each
+ * ring N_RX_COMP_RING times with a request of
+ * budget / N_RX_COMP_RINGS
+ */
+ enable_intr = 1;
+ credits = 0;
+ for (i = 0; i < N_RX_COMP_RINGS; i++) {
+ int j;
+ for (j = 0; j < N_RX_COMP_RINGS; j++) {
+ credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
+ if (credits >= budget) {
+ enable_intr = 0;
+ goto rx_comp;
+ }
+ }
+ }
+
+rx_comp:
+ /* final rx completion */
+ spin_lock_irqsave(&cp->lock, flags);
+ if (status)
+ cas_handle_irq(dev, cp, status);
+
+#ifdef USE_PCI_INTB
+ if (N_RX_COMP_RINGS > 1) {
+ status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
+ if (status)
+ cas_handle_irq1(dev, cp, status);
+ }
+#endif
+
+#ifdef USE_PCI_INTC
+ if (N_RX_COMP_RINGS > 2) {
+ status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
+ if (status)
+ cas_handle_irqN(dev, cp, status, 2);
+ }
+#endif
+
+#ifdef USE_PCI_INTD
+ if (N_RX_COMP_RINGS > 3) {
+ status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
+ if (status)
+ cas_handle_irqN(dev, cp, status, 3);
+ }
+#endif
+ spin_unlock_irqrestore(&cp->lock, flags);
+ if (enable_intr) {
+ napi_complete(napi);
+ cas_unmask_intr(cp);
+ }
+ return credits;
+}
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cas_netpoll(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+
+ cas_disable_irq(cp, 0);
+ cas_interrupt(cp->pdev->irq, dev);
+ cas_enable_irq(cp, 0);
+
+#ifdef USE_PCI_INTB
+ if (N_RX_COMP_RINGS > 1) {
+ /* cas_interrupt1(); */
+ }
+#endif
+#ifdef USE_PCI_INTC
+ if (N_RX_COMP_RINGS > 2) {
+ /* cas_interruptN(); */
+ }
+#endif
+#ifdef USE_PCI_INTD
+ if (N_RX_COMP_RINGS > 3) {
+ /* cas_interruptN(); */
+ }
+#endif
+}
+#endif
+
+static void cas_tx_timeout(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+
+ netdev_err(dev, "transmit timed out, resetting\n");
+ if (!cp->hw_running) {
+ netdev_err(dev, "hrm.. hw not running!\n");
+ return;
+ }
+
+ netdev_err(dev, "MIF_STATE[%08x]\n",
+ readl(cp->regs + REG_MIF_STATE_MACHINE));
+
+ netdev_err(dev, "MAC_STATE[%08x]\n",
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
+
+ netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
+ readl(cp->regs + REG_TX_CFG),
+ readl(cp->regs + REG_MAC_TX_STATUS),
+ readl(cp->regs + REG_MAC_TX_CFG),
+ readl(cp->regs + REG_TX_FIFO_PKT_CNT),
+ readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
+ readl(cp->regs + REG_TX_FIFO_READ_PTR),
+ readl(cp->regs + REG_TX_SM_1),
+ readl(cp->regs + REG_TX_SM_2));
+
+ netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
+ readl(cp->regs + REG_RX_CFG),
+ readl(cp->regs + REG_MAC_RX_STATUS),
+ readl(cp->regs + REG_MAC_RX_CFG));
+
+ netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
+ readl(cp->regs + REG_HP_STATE_MACHINE),
+ readl(cp->regs + REG_HP_STATUS0),
+ readl(cp->regs + REG_HP_STATUS1),
+ readl(cp->regs + REG_HP_STATUS2));
+
+#if 1
+ atomic_inc(&cp->reset_task_pending);
+ atomic_inc(&cp->reset_task_pending_all);
+ schedule_work(&cp->reset_task);
+#else
+ atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
+ schedule_work(&cp->reset_task);
+#endif
+}
+
+static inline int cas_intme(int ring, int entry)
+{
+ /* Algorithm: IRQ every 1/2 of descriptors. */
+ if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
+ return 1;
+ return 0;
+}
+
+
+static void cas_write_txd(struct cas *cp, int ring, int entry,
+ dma_addr_t mapping, int len, u64 ctrl, int last)
+{
+ struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
+
+ ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
+ if (cas_intme(ring, entry))
+ ctrl |= TX_DESC_INTME;
+ if (last)
+ ctrl |= TX_DESC_EOF;
+ txd->control = cpu_to_le64(ctrl);
+ txd->buffer = cpu_to_le64(mapping);
+}
+
+static inline void *tx_tiny_buf(struct cas *cp, const int ring,
+ const int entry)
+{
+ return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
+}
+
+static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
+ const int entry, const int tentry)
+{
+ cp->tx_tiny_use[ring][tentry].nbufs++;
+ cp->tx_tiny_use[ring][entry].used = 1;
+ return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
+}
+
+static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
+ struct sk_buff *skb)
+{
+ struct net_device *dev = cp->dev;
+ int entry, nr_frags, frag, tabort, tentry;
+ dma_addr_t mapping;
+ unsigned long flags;
+ u64 ctrl;
+ u32 len;
+
+ spin_lock_irqsave(&cp->tx_lock[ring], flags);
+
+ /* This is a hard error, log it. */
+ if (TX_BUFFS_AVAIL(cp, ring) <=
+ CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ return 1;
+ }
+
+ ctrl = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ const u64 csum_start_off = skb_checksum_start_offset(skb);
+ const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
+
+ ctrl = TX_DESC_CSUM_EN |
+ CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
+ CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
+ }
+
+ entry = cp->tx_new[ring];
+ cp->tx_skbs[ring][entry] = skb;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ len = skb_headlen(skb);
+ mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
+ offset_in_page(skb->data), len,
+ PCI_DMA_TODEVICE);
+
+ tentry = entry;
+ tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
+ if (unlikely(tabort)) {
+ /* NOTE: len is always > tabort */
+ cas_write_txd(cp, ring, entry, mapping, len - tabort,
+ ctrl | TX_DESC_SOF, 0);
+ entry = TX_DESC_NEXT(ring, entry);
+
+ skb_copy_from_linear_data_offset(skb, len - tabort,
+ tx_tiny_buf(cp, ring, entry), tabort);
+ mapping = tx_tiny_map(cp, ring, entry, tentry);
+ cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
+ (nr_frags == 0));
+ } else {
+ cas_write_txd(cp, ring, entry, mapping, len, ctrl |
+ TX_DESC_SOF, (nr_frags == 0));
+ }
+ entry = TX_DESC_NEXT(ring, entry);
+
+ for (frag = 0; frag < nr_frags; frag++) {
+ skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
+
+ len = fragp->size;
+ mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
+ DMA_TO_DEVICE);
+
+ tabort = cas_calc_tabort(cp, fragp->page_offset, len);
+ if (unlikely(tabort)) {
+ void *addr;
+
+ /* NOTE: len is always > tabort */
+ cas_write_txd(cp, ring, entry, mapping, len - tabort,
+ ctrl, 0);
+ entry = TX_DESC_NEXT(ring, entry);
+
+ addr = cas_page_map(skb_frag_page(fragp));
+ memcpy(tx_tiny_buf(cp, ring, entry),
+ addr + fragp->page_offset + len - tabort,
+ tabort);
+ cas_page_unmap(addr);
+ mapping = tx_tiny_map(cp, ring, entry, tentry);
+ len = tabort;
+ }
+
+ cas_write_txd(cp, ring, entry, mapping, len, ctrl,
+ (frag + 1 == nr_frags));
+ entry = TX_DESC_NEXT(ring, entry);
+ }
+
+ cp->tx_new[ring] = entry;
+ if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
+ netif_stop_queue(dev);
+
+ netif_printk(cp, tx_queued, KERN_DEBUG, dev,
+ "tx[%d] queued, slot %d, skblen %d, avail %d\n",
+ ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
+ writel(entry, cp->regs + REG_TX_KICKN(ring));
+ spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
+ return 0;
+}
+
+static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+
+ /* this is only used as a load-balancing hint, so it doesn't
+ * need to be SMP safe
+ */
+ static int ring;
+
+ if (skb_padto(skb, cp->min_frame_size))
+ return NETDEV_TX_OK;
+
+ /* XXX: we need some higher-level QoS hooks to steer packets to
+ * individual queues.
+ */
+ if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
+ return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
+}
+
+static void cas_init_tx_dma(struct cas *cp)
+{
+ u64 desc_dma = cp->block_dvma;
+ unsigned long off;
+ u32 val;
+ int i;
+
+ /* set up tx completion writeback registers. must be 8-byte aligned */
+#ifdef USE_TX_COMPWB
+ off = offsetof(struct cas_init_block, tx_compwb);
+ writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
+ writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
+#endif
+
+ /* enable completion writebacks, enable paced mode,
+ * disable read pipe, and disable pre-interrupt compwbs
+ */
+ val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
+ TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
+ TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
+ TX_CFG_INTR_COMPWB_DIS;
+
+ /* write out tx ring info and tx desc bases */
+ for (i = 0; i < MAX_TX_RINGS; i++) {
+ off = (unsigned long) cp->init_txds[i] -
+ (unsigned long) cp->init_block;
+
+ val |= CAS_TX_RINGN_BASE(i);
+ writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
+ writel((desc_dma + off) & 0xffffffff, cp->regs +
+ REG_TX_DBN_LOW(i));
+ /* don't zero out the kick register here as the system
+ * will wedge
+ */
+ }
+ writel(val, cp->regs + REG_TX_CFG);
+
+ /* program max burst sizes. these numbers should be different
+ * if doing QoS.
+ */
+#ifdef USE_QOS
+ writel(0x800, cp->regs + REG_TX_MAXBURST_0);
+ writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
+ writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
+ writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
+#else
+ writel(0x800, cp->regs + REG_TX_MAXBURST_0);
+ writel(0x800, cp->regs + REG_TX_MAXBURST_1);
+ writel(0x800, cp->regs + REG_TX_MAXBURST_2);
+ writel(0x800, cp->regs + REG_TX_MAXBURST_3);
+#endif
+}
+
+/* Must be invoked under cp->lock. */
+static inline void cas_init_dma(struct cas *cp)
+{
+ cas_init_tx_dma(cp);
+ cas_init_rx_dma(cp);
+}
+
+static void cas_process_mc_list(struct cas *cp)
+{
+ u16 hash_table[16];
+ u32 crc;
+ struct netdev_hw_addr *ha;
+ int i = 1;
+
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(ha, cp->dev) {
+ if (i <= CAS_MC_EXACT_MATCH_SIZE) {
+ /* use the alternate mac address registers for the
+ * first 15 multicast addresses
+ */
+ writel((ha->addr[4] << 8) | ha->addr[5],
+ cp->regs + REG_MAC_ADDRN(i*3 + 0));
+ writel((ha->addr[2] << 8) | ha->addr[3],
+ cp->regs + REG_MAC_ADDRN(i*3 + 1));
+ writel((ha->addr[0] << 8) | ha->addr[1],
+ cp->regs + REG_MAC_ADDRN(i*3 + 2));
+ i++;
+ }
+ else {
+ /* use hw hash table for the next series of
+ * multicast addresses
+ */
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ crc >>= 24;
+ hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
+ }
+ }
+ for (i = 0; i < 16; i++)
+ writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
+}
+
+/* Must be invoked under cp->lock. */
+static u32 cas_setup_multicast(struct cas *cp)
+{
+ u32 rxcfg = 0;
+ int i;
+
+ if (cp->dev->flags & IFF_PROMISC) {
+ rxcfg |= MAC_RX_CFG_PROMISC_EN;
+
+ } else if (cp->dev->flags & IFF_ALLMULTI) {
+ for (i=0; i < 16; i++)
+ writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
+ rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
+
+ } else {
+ cas_process_mc_list(cp);
+ rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
+ }
+
+ return rxcfg;
+}
+
+/* must be invoked under cp->stat_lock[N_TX_RINGS] */
+static void cas_clear_mac_err(struct cas *cp)
+{
+ writel(0, cp->regs + REG_MAC_COLL_NORMAL);
+ writel(0, cp->regs + REG_MAC_COLL_FIRST);
+ writel(0, cp->regs + REG_MAC_COLL_EXCESS);
+ writel(0, cp->regs + REG_MAC_COLL_LATE);
+ writel(0, cp->regs + REG_MAC_TIMER_DEFER);
+ writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
+ writel(0, cp->regs + REG_MAC_RECV_FRAME);
+ writel(0, cp->regs + REG_MAC_LEN_ERR);
+ writel(0, cp->regs + REG_MAC_ALIGN_ERR);
+ writel(0, cp->regs + REG_MAC_FCS_ERR);
+ writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
+}
+
+
+static void cas_mac_reset(struct cas *cp)
+{
+ int i;
+
+ /* do both TX and RX reset */
+ writel(0x1, cp->regs + REG_MAC_TX_RESET);
+ writel(0x1, cp->regs + REG_MAC_RX_RESET);
+
+ /* wait for TX */
+ i = STOP_TRIES;
+ while (i-- > 0) {
+ if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
+ break;
+ udelay(10);
+ }
+
+ /* wait for RX */
+ i = STOP_TRIES;
+ while (i-- > 0) {
+ if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
+ break;
+ udelay(10);
+ }
+
+ if (readl(cp->regs + REG_MAC_TX_RESET) |
+ readl(cp->regs + REG_MAC_RX_RESET))
+ netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
+ readl(cp->regs + REG_MAC_TX_RESET),
+ readl(cp->regs + REG_MAC_RX_RESET),
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
+}
+
+
+/* Must be invoked under cp->lock. */
+static void cas_init_mac(struct cas *cp)
+{
+ unsigned char *e = &cp->dev->dev_addr[0];
+ int i;
+ cas_mac_reset(cp);
+
+ /* setup core arbitration weight register */
+ writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
+
+ /* XXX Use pci_dma_burst_advice() */
+#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
+ /* set the infinite burst register for chips that don't have
+ * pci issues.
+ */
+ if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
+ writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
+#endif
+
+ writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
+
+ writel(0x00, cp->regs + REG_MAC_IPG0);
+ writel(0x08, cp->regs + REG_MAC_IPG1);
+ writel(0x04, cp->regs + REG_MAC_IPG2);
+
+ /* change later for 802.3z */
+ writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
+
+ /* min frame + FCS */
+ writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
+
+ /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
+ * specify the maximum frame size to prevent RX tag errors on
+ * oversized frames.
+ */
+ writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
+ CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
+ (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
+ cp->regs + REG_MAC_FRAMESIZE_MAX);
+
+ /* NOTE: crc_size is used as a surrogate for half-duplex.
+ * workaround saturn half-duplex issue by increasing preamble
+ * size to 65 bytes.
+ */
+ if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
+ writel(0x41, cp->regs + REG_MAC_PA_SIZE);
+ else
+ writel(0x07, cp->regs + REG_MAC_PA_SIZE);
+ writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
+ writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
+ writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
+
+ writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
+
+ writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
+ writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
+ writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
+ writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
+ writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
+
+ /* setup mac address in perfect filter array */
+ for (i = 0; i < 45; i++)
+ writel(0x0, cp->regs + REG_MAC_ADDRN(i));
+
+ writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
+ writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
+ writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
+
+ writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
+ writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
+ writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
+
+ cp->mac_rx_cfg = cas_setup_multicast(cp);
+
+ spin_lock(&cp->stat_lock[N_TX_RINGS]);
+ cas_clear_mac_err(cp);
+ spin_unlock(&cp->stat_lock[N_TX_RINGS]);
+
+ /* Setup MAC interrupts. We want to get all of the interesting
+ * counter expiration events, but we do not want to hear about
+ * normal rx/tx as the DMA engine tells us that.
+ */
+ writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
+ writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
+
+ /* Don't enable even the PAUSE interrupts for now, we
+ * make no use of those events other than to record them.
+ */
+ writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
+}
+
+/* Must be invoked under cp->lock. */
+static void cas_init_pause_thresholds(struct cas *cp)
+{
+ /* Calculate pause thresholds. Setting the OFF threshold to the
+ * full RX fifo size effectively disables PAUSE generation
+ */
+ if (cp->rx_fifo_size <= (2 * 1024)) {
+ cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
+ } else {
+ int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
+ if (max_frame * 3 > cp->rx_fifo_size) {
+ cp->rx_pause_off = 7104;
+ cp->rx_pause_on = 960;
+ } else {
+ int off = (cp->rx_fifo_size - (max_frame * 2));
+ int on = off - max_frame;
+ cp->rx_pause_off = off;
+ cp->rx_pause_on = on;
+ }
+ }
+}
+
+static int cas_vpd_match(const void __iomem *p, const char *str)
+{
+ int len = strlen(str) + 1;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (readb(p + i) != str[i])
+ return 0;
+ }
+ return 1;
+}
+
+
+/* get the mac address by reading the vpd information in the rom.
+ * also get the phy type and determine if there's an entropy generator.
+ * NOTE: this is a bit convoluted for the following reasons:
+ * 1) vpd info has order-dependent mac addresses for multinic cards
+ * 2) the only way to determine the nic order is to use the slot
+ * number.
+ * 3) fiber cards don't have bridges, so their slot numbers don't
+ * mean anything.
+ * 4) we don't actually know we have a fiber card until after
+ * the mac addresses are parsed.
+ */
+static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
+ const int offset)
+{
+ void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
+ void __iomem *base, *kstart;
+ int i, len;
+ int found = 0;
+#define VPD_FOUND_MAC 0x01
+#define VPD_FOUND_PHY 0x02
+
+ int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
+ int mac_off = 0;
+
+#if defined(CONFIG_SPARC)
+ const unsigned char *addr;
+#endif
+
+ /* give us access to the PROM */
+ writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
+ cp->regs + REG_BIM_LOCAL_DEV_EN);
+
+ /* check for an expansion rom */
+ if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
+ goto use_random_mac_addr;
+
+ /* search for beginning of vpd */
+ base = NULL;
+ for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
+ /* check for PCIR */
+ if ((readb(p + i + 0) == 0x50) &&
+ (readb(p + i + 1) == 0x43) &&
+ (readb(p + i + 2) == 0x49) &&
+ (readb(p + i + 3) == 0x52)) {
+ base = p + (readb(p + i + 8) |
+ (readb(p + i + 9) << 8));
+ break;
+ }
+ }
+
+ if (!base || (readb(base) != 0x82))
+ goto use_random_mac_addr;
+
+ i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
+ while (i < EXPANSION_ROM_SIZE) {
+ if (readb(base + i) != 0x90) /* no vpd found */
+ goto use_random_mac_addr;
+
+ /* found a vpd field */
+ len = readb(base + i + 1) | (readb(base + i + 2) << 8);
+
+ /* extract keywords */
+ kstart = base + i + 3;
+ p = kstart;
+ while ((p - kstart) < len) {
+ int klen = readb(p + 2);
+ int j;
+ char type;
+
+ p += 3;
+
+ /* look for the following things:
+ * -- correct length == 29
+ * 3 (type) + 2 (size) +
+ * 18 (strlen("local-mac-address") + 1) +
+ * 6 (mac addr)
+ * -- VPD Instance 'I'
+ * -- VPD Type Bytes 'B'
+ * -- VPD data length == 6
+ * -- property string == local-mac-address
+ *
+ * -- correct length == 24
+ * 3 (type) + 2 (size) +
+ * 12 (strlen("entropy-dev") + 1) +
+ * 7 (strlen("vms110") + 1)
+ * -- VPD Instance 'I'
+ * -- VPD Type String 'B'
+ * -- VPD data length == 7
+ * -- property string == entropy-dev
+ *
+ * -- correct length == 18
+ * 3 (type) + 2 (size) +
+ * 9 (strlen("phy-type") + 1) +
+ * 4 (strlen("pcs") + 1)
+ * -- VPD Instance 'I'
+ * -- VPD Type String 'S'
+ * -- VPD data length == 4
+ * -- property string == phy-type
+ *
+ * -- correct length == 23
+ * 3 (type) + 2 (size) +
+ * 14 (strlen("phy-interface") + 1) +
+ * 4 (strlen("pcs") + 1)
+ * -- VPD Instance 'I'
+ * -- VPD Type String 'S'
+ * -- VPD data length == 4
+ * -- property string == phy-interface
+ */
+ if (readb(p) != 'I')
+ goto next;
+
+ /* finally, check string and length */
+ type = readb(p + 3);
+ if (type == 'B') {
+ if ((klen == 29) && readb(p + 4) == 6 &&
+ cas_vpd_match(p + 5,
+ "local-mac-address")) {
+ if (mac_off++ > offset)
+ goto next;
+
+ /* set mac address */
+ for (j = 0; j < 6; j++)
+ dev_addr[j] =
+ readb(p + 23 + j);
+ goto found_mac;
+ }
+ }
+
+ if (type != 'S')
+ goto next;
+
+#ifdef USE_ENTROPY_DEV
+ if ((klen == 24) &&
+ cas_vpd_match(p + 5, "entropy-dev") &&
+ cas_vpd_match(p + 17, "vms110")) {
+ cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
+ goto next;
+ }
+#endif
+
+ if (found & VPD_FOUND_PHY)
+ goto next;
+
+ if ((klen == 18) && readb(p + 4) == 4 &&
+ cas_vpd_match(p + 5, "phy-type")) {
+ if (cas_vpd_match(p + 14, "pcs")) {
+ phy_type = CAS_PHY_SERDES;
+ goto found_phy;
+ }
+ }
+
+ if ((klen == 23) && readb(p + 4) == 4 &&
+ cas_vpd_match(p + 5, "phy-interface")) {
+ if (cas_vpd_match(p + 19, "pcs")) {
+ phy_type = CAS_PHY_SERDES;
+ goto found_phy;
+ }
+ }
+found_mac:
+ found |= VPD_FOUND_MAC;
+ goto next;
+
+found_phy:
+ found |= VPD_FOUND_PHY;
+
+next:
+ p += klen;
+ }
+ i += len + 3;
+ }
+
+use_random_mac_addr:
+ if (found & VPD_FOUND_MAC)
+ goto done;
+
+#if defined(CONFIG_SPARC)
+ addr = of_get_property(cp->of_node, "local-mac-address", NULL);
+ if (addr != NULL) {
+ memcpy(dev_addr, addr, 6);
+ goto done;
+ }
+#endif
+
+ /* Sun MAC prefix then 3 random bytes. */
+ pr_info("MAC address not found in ROM VPD\n");
+ dev_addr[0] = 0x08;
+ dev_addr[1] = 0x00;
+ dev_addr[2] = 0x20;
+ get_random_bytes(dev_addr + 3, 3);
+
+done:
+ writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
+ return phy_type;
+}
+
+/* check pci invariants */
+static void cas_check_pci_invariants(struct cas *cp)
+{
+ struct pci_dev *pdev = cp->pdev;
+
+ cp->cas_flags = 0;
+ if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
+ (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
+ if (pdev->revision >= CAS_ID_REVPLUS)
+ cp->cas_flags |= CAS_FLAG_REG_PLUS;
+ if (pdev->revision < CAS_ID_REVPLUS02u)
+ cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
+
+ /* Original Cassini supports HW CSUM, but it's not
+ * enabled by default as it can trigger TX hangs.
+ */
+ if (pdev->revision < CAS_ID_REV2)
+ cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
+ } else {
+ /* Only sun has original cassini chips. */
+ cp->cas_flags |= CAS_FLAG_REG_PLUS;
+
+ /* We use a flag because the same phy might be externally
+ * connected.
+ */
+ if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
+ (pdev->device == PCI_DEVICE_ID_NS_SATURN))
+ cp->cas_flags |= CAS_FLAG_SATURN;
+ }
+}
+
+
+static int cas_check_invariants(struct cas *cp)
+{
+ struct pci_dev *pdev = cp->pdev;
+ u32 cfg;
+ int i;
+
+ /* get page size for rx buffers. */
+ cp->page_order = 0;
+#ifdef USE_PAGE_ORDER
+ if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
+ /* see if we can allocate larger pages */
+ struct page *page = alloc_pages(GFP_ATOMIC,
+ CAS_JUMBO_PAGE_SHIFT -
+ PAGE_SHIFT);
+ if (page) {
+ __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
+ cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
+ } else {
+ printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
+ }
+ }
+#endif
+ cp->page_size = (PAGE_SIZE << cp->page_order);
+
+ /* Fetch the FIFO configurations. */
+ cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
+ cp->rx_fifo_size = RX_FIFO_SIZE;
+
+ /* finish phy determination. MDIO1 takes precedence over MDIO0 if
+ * they're both connected.
+ */
+ cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
+ PCI_SLOT(pdev->devfn));
+ if (cp->phy_type & CAS_PHY_SERDES) {
+ cp->cas_flags |= CAS_FLAG_1000MB_CAP;
+ return 0; /* no more checking needed */
+ }
+
+ /* MII */
+ cfg = readl(cp->regs + REG_MIF_CFG);
+ if (cfg & MIF_CFG_MDIO_1) {
+ cp->phy_type = CAS_PHY_MII_MDIO1;
+ } else if (cfg & MIF_CFG_MDIO_0) {
+ cp->phy_type = CAS_PHY_MII_MDIO0;
+ }
+
+ cas_mif_poll(cp, 0);
+ writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
+
+ for (i = 0; i < 32; i++) {
+ u32 phy_id;
+ int j;
+
+ for (j = 0; j < 3; j++) {
+ cp->phy_addr = i;
+ phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
+ phy_id |= cas_phy_read(cp, MII_PHYSID2);
+ if (phy_id && (phy_id != 0xFFFFFFFF)) {
+ cp->phy_id = phy_id;
+ goto done;
+ }
+ }
+ }
+ pr_err("MII phy did not respond [%08x]\n",
+ readl(cp->regs + REG_MIF_STATE_MACHINE));
+ return -1;
+
+done:
+ /* see if we can do gigabit */
+ cfg = cas_phy_read(cp, MII_BMSR);
+ if ((cfg & CAS_BMSR_1000_EXTEND) &&
+ cas_phy_read(cp, CAS_MII_1000_EXTEND))
+ cp->cas_flags |= CAS_FLAG_1000MB_CAP;
+ return 0;
+}
+
+/* Must be invoked under cp->lock. */
+static inline void cas_start_dma(struct cas *cp)
+{
+ int i;
+ u32 val;
+ int txfailed = 0;
+
+ /* enable dma */
+ val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
+ writel(val, cp->regs + REG_TX_CFG);
+ val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
+ writel(val, cp->regs + REG_RX_CFG);
+
+ /* enable the mac */
+ val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
+ writel(val, cp->regs + REG_MAC_TX_CFG);
+ val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
+ writel(val, cp->regs + REG_MAC_RX_CFG);
+
+ i = STOP_TRIES;
+ while (i-- > 0) {
+ val = readl(cp->regs + REG_MAC_TX_CFG);
+ if ((val & MAC_TX_CFG_EN))
+ break;
+ udelay(10);
+ }
+ if (i < 0) txfailed = 1;
+ i = STOP_TRIES;
+ while (i-- > 0) {
+ val = readl(cp->regs + REG_MAC_RX_CFG);
+ if ((val & MAC_RX_CFG_EN)) {
+ if (txfailed) {
+ netdev_err(cp->dev,
+ "enabling mac failed [tx:%08x:%08x]\n",
+ readl(cp->regs + REG_MIF_STATE_MACHINE),
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
+ }
+ goto enable_rx_done;
+ }
+ udelay(10);
+ }
+ netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
+ (txfailed ? "tx,rx" : "rx"),
+ readl(cp->regs + REG_MIF_STATE_MACHINE),
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
+
+enable_rx_done:
+ cas_unmask_intr(cp); /* enable interrupts */
+ writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
+ writel(0, cp->regs + REG_RX_COMP_TAIL);
+
+ if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+ if (N_RX_DESC_RINGS > 1)
+ writel(RX_DESC_RINGN_SIZE(1) - 4,
+ cp->regs + REG_PLUS_RX_KICK1);
+
+ for (i = 1; i < N_RX_COMP_RINGS; i++)
+ writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
+ }
+}
+
+/* Must be invoked under cp->lock. */
+static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
+ int *pause)
+{
+ u32 val = readl(cp->regs + REG_PCS_MII_LPA);
+ *fd = (val & PCS_MII_LPA_FD) ? 1 : 0;
+ *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
+ if (val & PCS_MII_LPA_ASYM_PAUSE)
+ *pause |= 0x10;
+ *spd = 1000;
+}
+
+/* Must be invoked under cp->lock. */
+static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
+ int *pause)
+{
+ u32 val;
+
+ *fd = 0;
+ *spd = 10;
+ *pause = 0;
+
+ /* use GMII registers */
+ val = cas_phy_read(cp, MII_LPA);
+ if (val & CAS_LPA_PAUSE)
+ *pause = 0x01;
+
+ if (val & CAS_LPA_ASYM_PAUSE)
+ *pause |= 0x10;
+
+ if (val & LPA_DUPLEX)
+ *fd = 1;
+ if (val & LPA_100)
+ *spd = 100;
+
+ if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
+ val = cas_phy_read(cp, CAS_MII_1000_STATUS);
+ if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
+ *spd = 1000;
+ if (val & CAS_LPA_1000FULL)
+ *fd = 1;
+ }
+}
+
+/* A link-up condition has occurred, initialize and enable the
+ * rest of the chip.
+ *
+ * Must be invoked under cp->lock.
+ */
+static void cas_set_link_modes(struct cas *cp)
+{
+ u32 val;
+ int full_duplex, speed, pause;
+
+ full_duplex = 0;
+ speed = 10;
+ pause = 0;
+
+ if (CAS_PHY_MII(cp->phy_type)) {
+ cas_mif_poll(cp, 0);
+ val = cas_phy_read(cp, MII_BMCR);
+ if (val & BMCR_ANENABLE) {
+ cas_read_mii_link_mode(cp, &full_duplex, &speed,
+ &pause);
+ } else {
+ if (val & BMCR_FULLDPLX)
+ full_duplex = 1;
+
+ if (val & BMCR_SPEED100)
+ speed = 100;
+ else if (val & CAS_BMCR_SPEED1000)
+ speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
+ 1000 : 100;
+ }
+ cas_mif_poll(cp, 1);
+
+ } else {
+ val = readl(cp->regs + REG_PCS_MII_CTRL);
+ cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
+ if ((val & PCS_MII_AUTONEG_EN) == 0) {
+ if (val & PCS_MII_CTRL_DUPLEX)
+ full_duplex = 1;
+ }
+ }
+
+ netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
+ speed, full_duplex ? "full" : "half");
+
+ val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
+ if (CAS_PHY_MII(cp->phy_type)) {
+ val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
+ if (!full_duplex)
+ val |= MAC_XIF_DISABLE_ECHO;
+ }
+ if (full_duplex)
+ val |= MAC_XIF_FDPLX_LED;
+ if (speed == 1000)
+ val |= MAC_XIF_GMII_MODE;
+ writel(val, cp->regs + REG_MAC_XIF_CFG);
+
+ /* deal with carrier and collision detect. */
+ val = MAC_TX_CFG_IPG_EN;
+ if (full_duplex) {
+ val |= MAC_TX_CFG_IGNORE_CARRIER;
+ val |= MAC_TX_CFG_IGNORE_COLL;
+ } else {
+#ifndef USE_CSMA_CD_PROTO
+ val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
+ val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
+#endif
+ }
+ /* val now set up for REG_MAC_TX_CFG */
+
+ /* If gigabit and half-duplex, enable carrier extension
+ * mode. increase slot time to 512 bytes as well.
+ * else, disable it and make sure slot time is 64 bytes.
+ * also activate checksum bug workaround
+ */
+ if ((speed == 1000) && !full_duplex) {
+ writel(val | MAC_TX_CFG_CARRIER_EXTEND,
+ cp->regs + REG_MAC_TX_CFG);
+
+ val = readl(cp->regs + REG_MAC_RX_CFG);
+ val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
+ writel(val | MAC_RX_CFG_CARRIER_EXTEND,
+ cp->regs + REG_MAC_RX_CFG);
+
+ writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
+
+ cp->crc_size = 4;
+ /* minimum size gigabit frame at half duplex */
+ cp->min_frame_size = CAS_1000MB_MIN_FRAME;
+
+ } else {
+ writel(val, cp->regs + REG_MAC_TX_CFG);
+
+ /* checksum bug workaround. don't strip FCS when in
+ * half-duplex mode
+ */
+ val = readl(cp->regs + REG_MAC_RX_CFG);
+ if (full_duplex) {
+ val |= MAC_RX_CFG_STRIP_FCS;
+ cp->crc_size = 0;
+ cp->min_frame_size = CAS_MIN_MTU;
+ } else {
+ val &= ~MAC_RX_CFG_STRIP_FCS;
+ cp->crc_size = 4;
+ cp->min_frame_size = CAS_MIN_FRAME;
+ }
+ writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
+ cp->regs + REG_MAC_RX_CFG);
+ writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
+ }
+
+ if (netif_msg_link(cp)) {
+ if (pause & 0x01) {
+ netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
+ cp->rx_fifo_size,
+ cp->rx_pause_off,
+ cp->rx_pause_on);
+ } else if (pause & 0x10) {
+ netdev_info(cp->dev, "TX pause enabled\n");
+ } else {
+ netdev_info(cp->dev, "Pause is disabled\n");
+ }
+ }
+
+ val = readl(cp->regs + REG_MAC_CTRL_CFG);
+ val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
+ if (pause) { /* symmetric or asymmetric pause */
+ val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
+ if (pause & 0x01) { /* symmetric pause */
+ val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
+ }
+ }
+ writel(val, cp->regs + REG_MAC_CTRL_CFG);
+ cas_start_dma(cp);
+}
+
+/* Must be invoked under cp->lock. */
+static void cas_init_hw(struct cas *cp, int restart_link)
+{
+ if (restart_link)
+ cas_phy_init(cp);
+
+ cas_init_pause_thresholds(cp);
+ cas_init_mac(cp);
+ cas_init_dma(cp);
+
+ if (restart_link) {
+ /* Default aneg parameters */
+ cp->timer_ticks = 0;
+ cas_begin_auto_negotiation(cp, NULL);
+ } else if (cp->lstate == link_up) {
+ cas_set_link_modes(cp);
+ netif_carrier_on(cp->dev);
+ }
+}
+
+/* Must be invoked under cp->lock. on earlier cassini boards,
+ * SOFT_0 is tied to PCI reset. we use this to force a pci reset,
+ * let it settle out, and then restore pci state.
+ */
+static void cas_hard_reset(struct cas *cp)
+{
+ writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
+ udelay(20);
+ pci_restore_state(cp->pdev);
+}
+
+
+static void cas_global_reset(struct cas *cp, int blkflag)
+{
+ int limit;
+
+ /* issue a global reset. don't use RSTOUT. */
+ if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
+ /* For PCS, when the blkflag is set, we should set the
+ * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of
+ * the last autonegotiation from being cleared. We'll
+ * need some special handling if the chip is set into a
+ * loopback mode.
+ */
+ writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
+ cp->regs + REG_SW_RESET);
+ } else {
+ writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
+ }
+
+ /* need to wait at least 3ms before polling register */
+ mdelay(3);
+
+ limit = STOP_TRIES;
+ while (limit-- > 0) {
+ u32 val = readl(cp->regs + REG_SW_RESET);
+ if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
+ goto done;
+ udelay(10);
+ }
+ netdev_err(cp->dev, "sw reset failed\n");
+
+done:
+ /* enable various BIM interrupts */
+ writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
+ BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
+
+ /* clear out pci error status mask for handled errors.
+ * we don't deal with DMA counter overflows as they happen
+ * all the time.
+ */
+ writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
+ PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
+ PCI_ERR_BIM_DMA_READ), cp->regs +
+ REG_PCI_ERR_STATUS_MASK);
+
+ /* set up for MII by default to address mac rx reset timeout
+ * issue
+ */
+ writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
+}
+
+static void cas_reset(struct cas *cp, int blkflag)
+{
+ u32 val;
+
+ cas_mask_intr(cp);
+ cas_global_reset(cp, blkflag);
+ cas_mac_reset(cp);
+ cas_entropy_reset(cp);
+
+ /* disable dma engines. */
+ val = readl(cp->regs + REG_TX_CFG);
+ val &= ~TX_CFG_DMA_EN;
+ writel(val, cp->regs + REG_TX_CFG);
+
+ val = readl(cp->regs + REG_RX_CFG);
+ val &= ~RX_CFG_DMA_EN;
+ writel(val, cp->regs + REG_RX_CFG);
+
+ /* program header parser */
+ if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
+ (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
+ cas_load_firmware(cp, CAS_HP_FIRMWARE);
+ } else {
+ cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
+ }
+
+ /* clear out error registers */
+ spin_lock(&cp->stat_lock[N_TX_RINGS]);
+ cas_clear_mac_err(cp);
+ spin_unlock(&cp->stat_lock[N_TX_RINGS]);
+}
+
+/* Shut down the chip, must be called with pm_mutex held. */
+static void cas_shutdown(struct cas *cp)
+{
+ unsigned long flags;
+
+ /* Make us not-running to avoid timers respawning */
+ cp->hw_running = 0;
+
+ del_timer_sync(&cp->link_timer);
+
+ /* Stop the reset task */
+#if 0
+ while (atomic_read(&cp->reset_task_pending_mtu) ||
+ atomic_read(&cp->reset_task_pending_spare) ||
+ atomic_read(&cp->reset_task_pending_all))
+ schedule();
+
+#else
+ while (atomic_read(&cp->reset_task_pending))
+ schedule();
+#endif
+ /* Actually stop the chip */
+ cas_lock_all_save(cp, flags);
+ cas_reset(cp, 0);
+ if (cp->cas_flags & CAS_FLAG_SATURN)
+ cas_phy_powerdown(cp);
+ cas_unlock_all_restore(cp, flags);
+}
+
+static int cas_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct cas *cp = netdev_priv(dev);
+
+ if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ if (!netif_running(dev) || !netif_device_present(dev))
+ return 0;
+
+ /* let the reset task handle it */
+#if 1
+ atomic_inc(&cp->reset_task_pending);
+ if ((cp->phy_type & CAS_PHY_SERDES)) {
+ atomic_inc(&cp->reset_task_pending_all);
+ } else {
+ atomic_inc(&cp->reset_task_pending_mtu);
+ }
+ schedule_work(&cp->reset_task);
+#else
+ atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
+ CAS_RESET_ALL : CAS_RESET_MTU);
+ pr_err("reset called in cas_change_mtu\n");
+ schedule_work(&cp->reset_task);
+#endif
+
+ flush_work_sync(&cp->reset_task);
+ return 0;
+}
+
+static void cas_clean_txd(struct cas *cp, int ring)
+{
+ struct cas_tx_desc *txd = cp->init_txds[ring];
+ struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
+ u64 daddr, dlen;
+ int i, size;
+
+ size = TX_DESC_RINGN_SIZE(ring);
+ for (i = 0; i < size; i++) {
+ int frag;
+
+ if (skbs[i] == NULL)
+ continue;
+
+ skb = skbs[i];
+ skbs[i] = NULL;
+
+ for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
+ int ent = i & (size - 1);
+
+ /* first buffer is never a tiny buffer and so
+ * needs to be unmapped.
+ */
+ daddr = le64_to_cpu(txd[ent].buffer);
+ dlen = CAS_VAL(TX_DESC_BUFLEN,
+ le64_to_cpu(txd[ent].control));
+ pci_unmap_page(cp->pdev, daddr, dlen,
+ PCI_DMA_TODEVICE);
+
+ if (frag != skb_shinfo(skb)->nr_frags) {
+ i++;
+
+ /* next buffer might by a tiny buffer.
+ * skip past it.
+ */
+ ent = i & (size - 1);
+ if (cp->tx_tiny_use[ring][ent].used)
+ i++;
+ }
+ }
+ dev_kfree_skb_any(skb);
+ }
+
+ /* zero out tiny buf usage */
+ memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
+}
+
+/* freed on close */
+static inline void cas_free_rx_desc(struct cas *cp, int ring)
+{
+ cas_page_t **page = cp->rx_pages[ring];
+ int i, size;
+
+ size = RX_DESC_RINGN_SIZE(ring);
+ for (i = 0; i < size; i++) {
+ if (page[i]) {
+ cas_page_free(cp, page[i]);
+ page[i] = NULL;
+ }
+ }
+}
+
+static void cas_free_rxds(struct cas *cp)
+{
+ int i;
+
+ for (i = 0; i < N_RX_DESC_RINGS; i++)
+ cas_free_rx_desc(cp, i);
+}
+
+/* Must be invoked under cp->lock. */
+static void cas_clean_rings(struct cas *cp)
+{
+ int i;
+
+ /* need to clean all tx rings */
+ memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
+ memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
+ for (i = 0; i < N_TX_RINGS; i++)
+ cas_clean_txd(cp, i);
+
+ /* zero out init block */
+ memset(cp->init_block, 0, sizeof(struct cas_init_block));
+ cas_clean_rxds(cp);
+ cas_clean_rxcs(cp);
+}
+
+/* allocated on open */
+static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
+{
+ cas_page_t **page = cp->rx_pages[ring];
+ int size, i = 0;
+
+ size = RX_DESC_RINGN_SIZE(ring);
+ for (i = 0; i < size; i++) {
+ if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
+ return -1;
+ }
+ return 0;
+}
+
+static int cas_alloc_rxds(struct cas *cp)
+{
+ int i;
+
+ for (i = 0; i < N_RX_DESC_RINGS; i++) {
+ if (cas_alloc_rx_desc(cp, i) < 0) {
+ cas_free_rxds(cp);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static void cas_reset_task(struct work_struct *work)
+{
+ struct cas *cp = container_of(work, struct cas, reset_task);
+#if 0
+ int pending = atomic_read(&cp->reset_task_pending);
+#else
+ int pending_all = atomic_read(&cp->reset_task_pending_all);
+ int pending_spare = atomic_read(&cp->reset_task_pending_spare);
+ int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
+
+ if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
+ /* We can have more tasks scheduled than actually
+ * needed.
+ */
+ atomic_dec(&cp->reset_task_pending);
+ return;
+ }
+#endif
+ /* The link went down, we reset the ring, but keep
+ * DMA stopped. Use this function for reset
+ * on error as well.
+ */
+ if (cp->hw_running) {
+ unsigned long flags;
+
+ /* Make sure we don't get interrupts or tx packets */
+ netif_device_detach(cp->dev);
+ cas_lock_all_save(cp, flags);
+
+ if (cp->opened) {
+ /* We call cas_spare_recover when we call cas_open.
+ * but we do not initialize the lists cas_spare_recover
+ * uses until cas_open is called.
+ */
+ cas_spare_recover(cp, GFP_ATOMIC);
+ }
+#if 1
+ /* test => only pending_spare set */
+ if (!pending_all && !pending_mtu)
+ goto done;
+#else
+ if (pending == CAS_RESET_SPARE)
+ goto done;
+#endif
+ /* when pending == CAS_RESET_ALL, the following
+ * call to cas_init_hw will restart auto negotiation.
+ * Setting the second argument of cas_reset to
+ * !(pending == CAS_RESET_ALL) will set this argument
+ * to 1 (avoiding reinitializing the PHY for the normal
+ * PCS case) when auto negotiation is not restarted.
+ */
+#if 1
+ cas_reset(cp, !(pending_all > 0));
+ if (cp->opened)
+ cas_clean_rings(cp);
+ cas_init_hw(cp, (pending_all > 0));
+#else
+ cas_reset(cp, !(pending == CAS_RESET_ALL));
+ if (cp->opened)
+ cas_clean_rings(cp);
+ cas_init_hw(cp, pending == CAS_RESET_ALL);
+#endif
+
+done:
+ cas_unlock_all_restore(cp, flags);
+ netif_device_attach(cp->dev);
+ }
+#if 1
+ atomic_sub(pending_all, &cp->reset_task_pending_all);
+ atomic_sub(pending_spare, &cp->reset_task_pending_spare);
+ atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
+ atomic_dec(&cp->reset_task_pending);
+#else
+ atomic_set(&cp->reset_task_pending, 0);
+#endif
+}
+
+static void cas_link_timer(unsigned long data)
+{
+ struct cas *cp = (struct cas *) data;
+ int mask, pending = 0, reset = 0;
+ unsigned long flags;
+
+ if (link_transition_timeout != 0 &&
+ cp->link_transition_jiffies_valid &&
+ ((jiffies - cp->link_transition_jiffies) >
+ (link_transition_timeout))) {
+ /* One-second counter so link-down workaround doesn't
+ * cause resets to occur so fast as to fool the switch
+ * into thinking the link is down.
+ */
+ cp->link_transition_jiffies_valid = 0;
+ }
+
+ if (!cp->hw_running)
+ return;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ cas_lock_tx(cp);
+ cas_entropy_gather(cp);
+
+ /* If the link task is still pending, we just
+ * reschedule the link timer
+ */
+#if 1
+ if (atomic_read(&cp->reset_task_pending_all) ||
+ atomic_read(&cp->reset_task_pending_spare) ||
+ atomic_read(&cp->reset_task_pending_mtu))
+ goto done;
+#else
+ if (atomic_read(&cp->reset_task_pending))
+ goto done;
+#endif
+
+ /* check for rx cleaning */
+ if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
+ int i, rmask;
+
+ for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
+ rmask = CAS_FLAG_RXD_POST(i);
+ if ((mask & rmask) == 0)
+ continue;
+
+ /* post_rxds will do a mod_timer */
+ if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
+ pending = 1;
+ continue;
+ }
+ cp->cas_flags &= ~rmask;
+ }
+ }
+
+ if (CAS_PHY_MII(cp->phy_type)) {
+ u16 bmsr;
+ cas_mif_poll(cp, 0);
+ bmsr = cas_phy_read(cp, MII_BMSR);
+ /* WTZ: Solaris driver reads this twice, but that
+ * may be due to the PCS case and the use of a
+ * common implementation. Read it twice here to be
+ * safe.
+ */
+ bmsr = cas_phy_read(cp, MII_BMSR);
+ cas_mif_poll(cp, 1);
+ readl(cp->regs + REG_MIF_STATUS); /* avoid dups */
+ reset = cas_mii_link_check(cp, bmsr);
+ } else {
+ reset = cas_pcs_link_check(cp);
+ }
+
+ if (reset)
+ goto done;
+
+ /* check for tx state machine confusion */
+ if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
+ u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
+ u32 wptr, rptr;
+ int tlm = CAS_VAL(MAC_SM_TLM, val);
+
+ if (((tlm == 0x5) || (tlm == 0x3)) &&
+ (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
+ netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
+ "tx err: MAC_STATE[%08x]\n", val);
+ reset = 1;
+ goto done;
+ }
+
+ val = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
+ wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
+ rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
+ if ((val == 0) && (wptr != rptr)) {
+ netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
+ "tx err: TX_FIFO[%08x:%08x:%08x]\n",
+ val, wptr, rptr);
+ reset = 1;
+ }
+
+ if (reset)
+ cas_hard_reset(cp);
+ }
+
+done:
+ if (reset) {
+#if 1
+ atomic_inc(&cp->reset_task_pending);
+ atomic_inc(&cp->reset_task_pending_all);
+ schedule_work(&cp->reset_task);
+#else
+ atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
+ pr_err("reset called in cas_link_timer\n");
+ schedule_work(&cp->reset_task);
+#endif
+ }
+
+ if (!pending)
+ mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
+ cas_unlock_tx(cp);
+ spin_unlock_irqrestore(&cp->lock, flags);
+}
+
+/* tiny buffers are used to avoid target abort issues with
+ * older cassini's
+ */
+static void cas_tx_tiny_free(struct cas *cp)
+{
+ struct pci_dev *pdev = cp->pdev;
+ int i;
+
+ for (i = 0; i < N_TX_RINGS; i++) {
+ if (!cp->tx_tiny_bufs[i])
+ continue;
+
+ pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
+ cp->tx_tiny_bufs[i],
+ cp->tx_tiny_dvma[i]);
+ cp->tx_tiny_bufs[i] = NULL;
+ }
+}
+
+static int cas_tx_tiny_alloc(struct cas *cp)
+{
+ struct pci_dev *pdev = cp->pdev;
+ int i;
+
+ for (i = 0; i < N_TX_RINGS; i++) {
+ cp->tx_tiny_bufs[i] =
+ pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
+ &cp->tx_tiny_dvma[i]);
+ if (!cp->tx_tiny_bufs[i]) {
+ cas_tx_tiny_free(cp);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+
+static int cas_open(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ int hw_was_up, err;
+ unsigned long flags;
+
+ mutex_lock(&cp->pm_mutex);
+
+ hw_was_up = cp->hw_running;
+
+ /* The power-management mutex protects the hw_running
+ * etc. state so it is safe to do this bit without cp->lock
+ */
+ if (!cp->hw_running) {
+ /* Reset the chip */
+ cas_lock_all_save(cp, flags);
+ /* We set the second arg to cas_reset to zero
+ * because cas_init_hw below will have its second
+ * argument set to non-zero, which will force
+ * autonegotiation to start.
+ */
+ cas_reset(cp, 0);
+ cp->hw_running = 1;
+ cas_unlock_all_restore(cp, flags);
+ }
+
+ err = -ENOMEM;
+ if (cas_tx_tiny_alloc(cp) < 0)
+ goto err_unlock;
+
+ /* alloc rx descriptors */
+ if (cas_alloc_rxds(cp) < 0)
+ goto err_tx_tiny;
+
+ /* allocate spares */
+ cas_spare_init(cp);
+ cas_spare_recover(cp, GFP_KERNEL);
+
+ /* We can now request the interrupt as we know it's masked
+ * on the controller. cassini+ has up to 4 interrupts
+ * that can be used, but you need to do explicit pci interrupt
+ * mapping to expose them
+ */
+ if (request_irq(cp->pdev->irq, cas_interrupt,
+ IRQF_SHARED, dev->name, (void *) dev)) {
+ netdev_err(cp->dev, "failed to request irq !\n");
+ err = -EAGAIN;
+ goto err_spare;
+ }
+
+#ifdef USE_NAPI
+ napi_enable(&cp->napi);
+#endif
+ /* init hw */
+ cas_lock_all_save(cp, flags);
+ cas_clean_rings(cp);
+ cas_init_hw(cp, !hw_was_up);
+ cp->opened = 1;
+ cas_unlock_all_restore(cp, flags);
+
+ netif_start_queue(dev);
+ mutex_unlock(&cp->pm_mutex);
+ return 0;
+
+err_spare:
+ cas_spare_free(cp);
+ cas_free_rxds(cp);
+err_tx_tiny:
+ cas_tx_tiny_free(cp);
+err_unlock:
+ mutex_unlock(&cp->pm_mutex);
+ return err;
+}
+
+static int cas_close(struct net_device *dev)
+{
+ unsigned long flags;
+ struct cas *cp = netdev_priv(dev);
+
+#ifdef USE_NAPI
+ napi_disable(&cp->napi);
+#endif
+ /* Make sure we don't get distracted by suspend/resume */
+ mutex_lock(&cp->pm_mutex);
+
+ netif_stop_queue(dev);
+
+ /* Stop traffic, mark us closed */
+ cas_lock_all_save(cp, flags);
+ cp->opened = 0;
+ cas_reset(cp, 0);
+ cas_phy_init(cp);
+ cas_begin_auto_negotiation(cp, NULL);
+ cas_clean_rings(cp);
+ cas_unlock_all_restore(cp, flags);
+
+ free_irq(cp->pdev->irq, (void *) dev);
+ cas_spare_free(cp);
+ cas_free_rxds(cp);
+ cas_tx_tiny_free(cp);
+ mutex_unlock(&cp->pm_mutex);
+ return 0;
+}
+
+static struct {
+ const char name[ETH_GSTRING_LEN];
+} ethtool_cassini_statnames[] = {
+ {"collisions"},
+ {"rx_bytes"},
+ {"rx_crc_errors"},
+ {"rx_dropped"},
+ {"rx_errors"},
+ {"rx_fifo_errors"},
+ {"rx_frame_errors"},
+ {"rx_length_errors"},
+ {"rx_over_errors"},
+ {"rx_packets"},
+ {"tx_aborted_errors"},
+ {"tx_bytes"},
+ {"tx_dropped"},
+ {"tx_errors"},
+ {"tx_fifo_errors"},
+ {"tx_packets"}
+};
+#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
+
+static struct {
+ const int offsets; /* neg. values for 2nd arg to cas_read_phy */
+} ethtool_register_table[] = {
+ {-MII_BMSR},
+ {-MII_BMCR},
+ {REG_CAWR},
+ {REG_INF_BURST},
+ {REG_BIM_CFG},
+ {REG_RX_CFG},
+ {REG_HP_CFG},
+ {REG_MAC_TX_CFG},
+ {REG_MAC_RX_CFG},
+ {REG_MAC_CTRL_CFG},
+ {REG_MAC_XIF_CFG},
+ {REG_MIF_CFG},
+ {REG_PCS_CFG},
+ {REG_SATURN_PCFG},
+ {REG_PCS_MII_STATUS},
+ {REG_PCS_STATE_MACHINE},
+ {REG_MAC_COLL_EXCESS},
+ {REG_MAC_COLL_LATE}
+};
+#define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table)
+#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
+
+static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
+{
+ u8 *p;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
+ u16 hval;
+ u32 val;
+ if (ethtool_register_table[i].offsets < 0) {
+ hval = cas_phy_read(cp,
+ -ethtool_register_table[i].offsets);
+ val = hval;
+ } else {
+ val= readl(cp->regs+ethtool_register_table[i].offsets);
+ }
+ memcpy(p, (u8 *)&val, sizeof(u32));
+ }
+ spin_unlock_irqrestore(&cp->lock, flags);
+}
+
+static struct net_device_stats *cas_get_stats(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ struct net_device_stats *stats = cp->net_stats;
+ unsigned long flags;
+ int i;
+ unsigned long tmp;
+
+ /* we collate all of the stats into net_stats[N_TX_RING] */
+ if (!cp->hw_running)
+ return stats + N_TX_RINGS;
+
+ /* collect outstanding stats */
+ /* WTZ: the Cassini spec gives these as 16 bit counters but
+ * stored in 32-bit words. Added a mask of 0xffff to be safe,
+ * in case the chip somehow puts any garbage in the other bits.
+ * Also, counter usage didn't seem to mach what Adrian did
+ * in the parts of the code that set these quantities. Made
+ * that consistent.
+ */
+ spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
+ stats[N_TX_RINGS].rx_crc_errors +=
+ readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
+ stats[N_TX_RINGS].rx_frame_errors +=
+ readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
+ stats[N_TX_RINGS].rx_length_errors +=
+ readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
+#if 1
+ tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
+ (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
+ stats[N_TX_RINGS].tx_aborted_errors += tmp;
+ stats[N_TX_RINGS].collisions +=
+ tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
+#else
+ stats[N_TX_RINGS].tx_aborted_errors +=
+ readl(cp->regs + REG_MAC_COLL_EXCESS);
+ stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
+ readl(cp->regs + REG_MAC_COLL_LATE);
+#endif
+ cas_clear_mac_err(cp);
+
+ /* saved bits that are unique to ring 0 */
+ spin_lock(&cp->stat_lock[0]);
+ stats[N_TX_RINGS].collisions += stats[0].collisions;
+ stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors;
+ stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors;
+ stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors;
+ stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
+ stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors;
+ spin_unlock(&cp->stat_lock[0]);
+
+ for (i = 0; i < N_TX_RINGS; i++) {
+ spin_lock(&cp->stat_lock[i]);
+ stats[N_TX_RINGS].rx_length_errors +=
+ stats[i].rx_length_errors;
+ stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
+ stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
+ stats[N_TX_RINGS].tx_packets += stats[i].tx_packets;
+ stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes;
+ stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes;
+ stats[N_TX_RINGS].rx_errors += stats[i].rx_errors;
+ stats[N_TX_RINGS].tx_errors += stats[i].tx_errors;
+ stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped;
+ stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped;
+ memset(stats + i, 0, sizeof(struct net_device_stats));
+ spin_unlock(&cp->stat_lock[i]);
+ }
+ spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
+ return stats + N_TX_RINGS;
+}
+
+
+static void cas_set_multicast(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ u32 rxcfg, rxcfg_new;
+ unsigned long flags;
+ int limit = STOP_TRIES;
+
+ if (!cp->hw_running)
+ return;
+
+ spin_lock_irqsave(&cp->lock, flags);
+ rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
+
+ /* disable RX MAC and wait for completion */
+ writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
+ while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
+ if (!limit--)
+ break;
+ udelay(10);
+ }
+
+ /* disable hash filter and wait for completion */
+ limit = STOP_TRIES;
+ rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
+ writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
+ while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
+ if (!limit--)
+ break;
+ udelay(10);
+ }
+
+ /* program hash filters */
+ cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
+ rxcfg |= rxcfg_new;
+ writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
+ spin_unlock_irqrestore(&cp->lock, flags);
+}
+
+static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct cas *cp = netdev_priv(dev);
+ strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
+ strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
+ info->fw_version[0] = '\0';
+ strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
+ info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
+ cp->casreg_len : CAS_MAX_REGS;
+ info->n_stats = CAS_NUM_STAT_KEYS;
+}
+
+static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct cas *cp = netdev_priv(dev);
+ u16 bmcr;
+ int full_duplex, speed, pause;
+ unsigned long flags;
+ enum link_state linkstate = link_up;
+
+ cmd->advertising = 0;
+ cmd->supported = SUPPORTED_Autoneg;
+ if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
+ cmd->supported |= SUPPORTED_1000baseT_Full;
+ cmd->advertising |= ADVERTISED_1000baseT_Full;
+ }
+
+ /* Record PHY settings if HW is on. */
+ spin_lock_irqsave(&cp->lock, flags);
+ bmcr = 0;
+ linkstate = cp->lstate;
+ if (CAS_PHY_MII(cp->phy_type)) {
+ cmd->port = PORT_MII;
+ cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
+ XCVR_INTERNAL : XCVR_EXTERNAL;
+ cmd->phy_address = cp->phy_addr;
+ cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full;
+
+ cmd->supported |=
+ (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_TP | SUPPORTED_MII);
+
+ if (cp->hw_running) {
+ cas_mif_poll(cp, 0);
+ bmcr = cas_phy_read(cp, MII_BMCR);
+ cas_read_mii_link_mode(cp, &full_duplex,
+ &speed, &pause);
+ cas_mif_poll(cp, 1);
+ }
+
+ } else {
+ cmd->port = PORT_FIBRE;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->phy_address = 0;
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_FIBRE;
+
+ if (cp->hw_running) {
+ /* pcs uses the same bits as mii */
+ bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
+ cas_read_pcs_link_mode(cp, &full_duplex,
+ &speed, &pause);
+ }
+ }
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ if (bmcr & BMCR_ANENABLE) {
+ cmd->advertising |= ADVERTISED_Autoneg;
+ cmd->autoneg = AUTONEG_ENABLE;
+ ethtool_cmd_speed_set(cmd, ((speed == 10) ?
+ SPEED_10 :
+ ((speed == 1000) ?
+ SPEED_1000 : SPEED_100)));
+ cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ } else {
+ cmd->autoneg = AUTONEG_DISABLE;
+ ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ?
+ SPEED_1000 :
+ ((bmcr & BMCR_SPEED100) ?
+ SPEED_100 : SPEED_10)));
+ cmd->duplex =
+ (bmcr & BMCR_FULLDPLX) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ }
+ if (linkstate != link_up) {
+ /* Force these to "unknown" if the link is not up and
+ * autonogotiation in enabled. We can set the link
+ * speed to 0, but not cmd->duplex,
+ * because its legal values are 0 and 1. Ethtool will
+ * print the value reported in parentheses after the
+ * word "Unknown" for unrecognized values.
+ *
+ * If in forced mode, we report the speed and duplex
+ * settings that we configured.
+ */
+ if (cp->link_cntl & BMCR_ANENABLE) {
+ ethtool_cmd_speed_set(cmd, 0);
+ cmd->duplex = 0xff;
+ } else {
+ ethtool_cmd_speed_set(cmd, SPEED_10);
+ if (cp->link_cntl & BMCR_SPEED100) {
+ ethtool_cmd_speed_set(cmd, SPEED_100);
+ } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
+ }
+ cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
+ DUPLEX_FULL : DUPLEX_HALF;
+ }
+ }
+ return 0;
+}
+
+static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct cas *cp = netdev_priv(dev);
+ unsigned long flags;
+ u32 speed = ethtool_cmd_speed(cmd);
+
+ /* Verify the settings we care about. */
+ if (cmd->autoneg != AUTONEG_ENABLE &&
+ cmd->autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_DISABLE &&
+ ((speed != SPEED_1000 &&
+ speed != SPEED_100 &&
+ speed != SPEED_10) ||
+ (cmd->duplex != DUPLEX_HALF &&
+ cmd->duplex != DUPLEX_FULL)))
+ return -EINVAL;
+
+ /* Apply settings and restart link process. */
+ spin_lock_irqsave(&cp->lock, flags);
+ cas_begin_auto_negotiation(cp, cmd);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ return 0;
+}
+
+static int cas_nway_reset(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ unsigned long flags;
+
+ if ((cp->link_cntl & BMCR_ANENABLE) == 0)
+ return -EINVAL;
+
+ /* Restart link process. */
+ spin_lock_irqsave(&cp->lock, flags);
+ cas_begin_auto_negotiation(cp, NULL);
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ return 0;
+}
+
+static u32 cas_get_link(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ return cp->lstate == link_up;
+}
+
+static u32 cas_get_msglevel(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ return cp->msg_enable;
+}
+
+static void cas_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct cas *cp = netdev_priv(dev);
+ cp->msg_enable = value;
+}
+
+static int cas_get_regs_len(struct net_device *dev)
+{
+ struct cas *cp = netdev_priv(dev);
+ return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
+}
+
+static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct cas *cp = netdev_priv(dev);
+ regs->version = 0;
+ /* cas_read_regs handles locks (cp->lock). */
+ cas_read_regs(cp, p, regs->len / sizeof(u32));
+}
+
+static int cas_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return CAS_NUM_STAT_KEYS;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ memcpy(data, &ethtool_cassini_statnames,
+ CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
+}
+
+static void cas_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *estats, u64 *data)
+{
+ struct cas *cp = netdev_priv(dev);
+ struct net_device_stats *stats = cas_get_stats(cp->dev);
+ int i = 0;
+ data[i++] = stats->collisions;
+ data[i++] = stats->rx_bytes;
+ data[i++] = stats->rx_crc_errors;
+ data[i++] = stats->rx_dropped;
+ data[i++] = stats->rx_errors;
+ data[i++] = stats->rx_fifo_errors;
+ data[i++] = stats->rx_frame_errors;
+ data[i++] = stats->rx_length_errors;
+ data[i++] = stats->rx_over_errors;
+ data[i++] = stats->rx_packets;
+ data[i++] = stats->tx_aborted_errors;
+ data[i++] = stats->tx_bytes;
+ data[i++] = stats->tx_dropped;
+ data[i++] = stats->tx_errors;
+ data[i++] = stats->tx_fifo_errors;
+ data[i++] = stats->tx_packets;
+ BUG_ON(i != CAS_NUM_STAT_KEYS);
+}
+
+static const struct ethtool_ops cas_ethtool_ops = {
+ .get_drvinfo = cas_get_drvinfo,
+ .get_settings = cas_get_settings,
+ .set_settings = cas_set_settings,
+ .nway_reset = cas_nway_reset,
+ .get_link = cas_get_link,
+ .get_msglevel = cas_get_msglevel,
+ .set_msglevel = cas_set_msglevel,
+ .get_regs_len = cas_get_regs_len,
+ .get_regs = cas_get_regs,
+ .get_sset_count = cas_get_sset_count,
+ .get_strings = cas_get_strings,
+ .get_ethtool_stats = cas_get_ethtool_stats,
+};
+
+static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct cas *cp = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+ unsigned long flags;
+ int rc = -EOPNOTSUPP;
+
+ /* Hold the PM mutex while doing ioctl's or we may collide
+ * with open/close and power management and oops.
+ */
+ mutex_lock(&cp->pm_mutex);
+ switch (cmd) {
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ data->phy_id = cp->phy_addr;
+ /* Fallthrough... */
+
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ spin_lock_irqsave(&cp->lock, flags);
+ cas_mif_poll(cp, 0);
+ data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
+ cas_mif_poll(cp, 1);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ rc = 0;
+ break;
+
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ spin_lock_irqsave(&cp->lock, flags);
+ cas_mif_poll(cp, 0);
+ rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
+ cas_mif_poll(cp, 1);
+ spin_unlock_irqrestore(&cp->lock, flags);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&cp->pm_mutex);
+ return rc;
+}
+
+/* When this chip sits underneath an Intel 31154 bridge, it is the
+ * only subordinate device and we can tweak the bridge settings to
+ * reflect that fact.
+ */
+static void __devinit cas_program_bridge(struct pci_dev *cas_pdev)
+{
+ struct pci_dev *pdev = cas_pdev->bus->self;
+ u32 val;
+
+ if (!pdev)
+ return;
+
+ if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
+ return;
+
+ /* Clear bit 10 (Bus Parking Control) in the Secondary
+ * Arbiter Control/Status Register which lives at offset
+ * 0x41. Using a 32-bit word read/modify/write at 0x40
+ * is much simpler so that's how we do this.
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+ val &= ~0x00040000;
+ pci_write_config_dword(pdev, 0x40, val);
+
+ /* Max out the Multi-Transaction Timer settings since
+ * Cassini is the only device present.
+ *
+ * The register is 16-bit and lives at 0x50. When the
+ * settings are enabled, it extends the GRANT# signal
+ * for a requestor after a transaction is complete. This
+ * allows the next request to run without first needing
+ * to negotiate the GRANT# signal back.
+ *
+ * Bits 12:10 define the grant duration:
+ *
+ * 1 -- 16 clocks
+ * 2 -- 32 clocks
+ * 3 -- 64 clocks
+ * 4 -- 128 clocks
+ * 5 -- 256 clocks
+ *
+ * All other values are illegal.
+ *
+ * Bits 09:00 define which REQ/GNT signal pairs get the
+ * GRANT# signal treatment. We set them all.
+ */
+ pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
+
+ /* The Read Prefecth Policy register is 16-bit and sits at
+ * offset 0x52. It enables a "smart" pre-fetch policy. We
+ * enable it and max out all of the settings since only one
+ * device is sitting underneath and thus bandwidth sharing is
+ * not an issue.
+ *
+ * The register has several 3 bit fields, which indicates a
+ * multiplier applied to the base amount of prefetching the
+ * chip would do. These fields are at:
+ *
+ * 15:13 --- ReRead Primary Bus
+ * 12:10 --- FirstRead Primary Bus
+ * 09:07 --- ReRead Secondary Bus
+ * 06:04 --- FirstRead Secondary Bus
+ *
+ * Bits 03:00 control which REQ/GNT pairs the prefetch settings
+ * get enabled on. Bit 3 is a grouped enabler which controls
+ * all of the REQ/GNT pairs from [8:3]. Bits 2 to 0 control
+ * the individual REQ/GNT pairs [2:0].
+ */
+ pci_write_config_word(pdev, 0x52,
+ (0x7 << 13) |
+ (0x7 << 10) |
+ (0x7 << 7) |
+ (0x7 << 4) |
+ (0xf << 0));
+
+ /* Force cacheline size to 0x8 */
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
+
+ /* Force latency timer to maximum setting so Cassini can
+ * sit on the bus as long as it likes.
+ */
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
+}
+
+static const struct net_device_ops cas_netdev_ops = {
+ .ndo_open = cas_open,
+ .ndo_stop = cas_close,
+ .ndo_start_xmit = cas_start_xmit,
+ .ndo_get_stats = cas_get_stats,
+ .ndo_set_rx_mode = cas_set_multicast,
+ .ndo_do_ioctl = cas_ioctl,
+ .ndo_tx_timeout = cas_tx_timeout,
+ .ndo_change_mtu = cas_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cas_netpoll,
+#endif
+};
+
+static int __devinit cas_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int cas_version_printed = 0;
+ unsigned long casreg_len;
+ struct net_device *dev;
+ struct cas *cp;
+ int i, err, pci_using_dac;
+ u16 pci_cmd;
+ u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
+
+ if (cas_version_printed++ == 0)
+ pr_info("%s", version);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+ return err;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev, "Cannot find proper PCI device "
+ "base address, aborting\n");
+ err = -ENODEV;
+ goto err_out_disable_pdev;
+ }
+
+ dev = alloc_etherdev(sizeof(*cp));
+ if (!dev) {
+ dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
+ err = -ENOMEM;
+ goto err_out_disable_pdev;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = pci_request_regions(pdev, dev->name);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+ goto err_out_free_netdev;
+ }
+ pci_set_master(pdev);
+
+ /* we must always turn on parity response or else parity
+ * doesn't get generated properly. disable SERR/PERR as well.
+ * in addition, we want to turn MWI on.
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+ pci_cmd &= ~PCI_COMMAND_SERR;
+ pci_cmd |= PCI_COMMAND_PARITY;
+ pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
+ if (pci_try_set_mwi(pdev))
+ pr_warning("Could not enable MWI for %s\n", pci_name(pdev));
+
+ cas_program_bridge(pdev);
+
+ /*
+ * On some architectures, the default cache line size set
+ * by pci_try_set_mwi reduces perforamnce. We have to increase
+ * it for this case. To start, we'll print some configuration
+ * data.
+ */
+#if 1
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
+ &orig_cacheline_size);
+ if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
+ cas_cacheline_size =
+ (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
+ CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
+ if (pci_write_config_byte(pdev,
+ PCI_CACHE_LINE_SIZE,
+ cas_cacheline_size)) {
+ dev_err(&pdev->dev, "Could not set PCI cache "
+ "line size\n");
+ goto err_write_cacheline;
+ }
+ }
+#endif
+
+
+ /* Configure DMA attributes. */
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ pci_using_dac = 1;
+ err = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(64));
+ if (err < 0) {
+ dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
+ "for consistent allocations\n");
+ goto err_out_free_res;
+ }
+
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA configuration, "
+ "aborting\n");
+ goto err_out_free_res;
+ }
+ pci_using_dac = 0;
+ }
+
+ casreg_len = pci_resource_len(pdev, 0);
+
+ cp = netdev_priv(dev);
+ cp->pdev = pdev;
+#if 1
+ /* A value of 0 indicates we never explicitly set it */
+ cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
+#endif
+ cp->dev = dev;
+ cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
+ cassini_debug;
+
+#if defined(CONFIG_SPARC)
+ cp->of_node = pci_device_to_OF_node(pdev);
+#endif
+
+ cp->link_transition = LINK_TRANSITION_UNKNOWN;
+ cp->link_transition_jiffies_valid = 0;
+
+ spin_lock_init(&cp->lock);
+ spin_lock_init(&cp->rx_inuse_lock);
+ spin_lock_init(&cp->rx_spare_lock);
+ for (i = 0; i < N_TX_RINGS; i++) {
+ spin_lock_init(&cp->stat_lock[i]);
+ spin_lock_init(&cp->tx_lock[i]);
+ }
+ spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
+ mutex_init(&cp->pm_mutex);
+
+ init_timer(&cp->link_timer);
+ cp->link_timer.function = cas_link_timer;
+ cp->link_timer.data = (unsigned long) cp;
+
+#if 1
+ /* Just in case the implementation of atomic operations
+ * change so that an explicit initialization is necessary.
+ */
+ atomic_set(&cp->reset_task_pending, 0);
+ atomic_set(&cp->reset_task_pending_all, 0);
+ atomic_set(&cp->reset_task_pending_spare, 0);
+ atomic_set(&cp->reset_task_pending_mtu, 0);
+#endif
+ INIT_WORK(&cp->reset_task, cas_reset_task);
+
+ /* Default link parameters */
+ if (link_mode >= 0 && link_mode < 6)
+ cp->link_cntl = link_modes[link_mode];
+ else
+ cp->link_cntl = BMCR_ANENABLE;
+ cp->lstate = link_down;
+ cp->link_transition = LINK_TRANSITION_LINK_DOWN;
+ netif_carrier_off(cp->dev);
+ cp->timer_ticks = 0;
+
+ /* give us access to cassini registers */
+ cp->regs = pci_iomap(pdev, 0, casreg_len);
+ if (!cp->regs) {
+ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
+ goto err_out_free_res;
+ }
+ cp->casreg_len = casreg_len;
+
+ pci_save_state(pdev);
+ cas_check_pci_invariants(cp);
+ cas_hard_reset(cp);
+ cas_reset(cp, 0);
+ if (cas_check_invariants(cp))
+ goto err_out_iounmap;
+ if (cp->cas_flags & CAS_FLAG_SATURN)
+ if (cas_saturn_firmware_init(cp))
+ goto err_out_iounmap;
+
+ cp->init_block = (struct cas_init_block *)
+ pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
+ &cp->block_dvma);
+ if (!cp->init_block) {
+ dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
+ goto err_out_iounmap;
+ }
+
+ for (i = 0; i < N_TX_RINGS; i++)
+ cp->init_txds[i] = cp->init_block->txds[i];
+
+ for (i = 0; i < N_RX_DESC_RINGS; i++)
+ cp->init_rxds[i] = cp->init_block->rxds[i];
+
+ for (i = 0; i < N_RX_COMP_RINGS; i++)
+ cp->init_rxcs[i] = cp->init_block->rxcs[i];
+
+ for (i = 0; i < N_RX_FLOWS; i++)
+ skb_queue_head_init(&cp->rx_flows[i]);
+
+ dev->netdev_ops = &cas_netdev_ops;
+ dev->ethtool_ops = &cas_ethtool_ops;
+ dev->watchdog_timeo = CAS_TX_TIMEOUT;
+
+#ifdef USE_NAPI
+ netif_napi_add(dev, &cp->napi, cas_poll, 64);
+#endif
+ dev->irq = pdev->irq;
+ dev->dma = 0;
+
+ /* Cassini features. */
+ if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
+ dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
+
+ if (pci_using_dac)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ if (register_netdev(dev)) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting\n");
+ goto err_out_free_consistent;
+ }
+
+ i = readl(cp->regs + REG_BIM_CFG);
+ netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
+ (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
+ (i & BIM_CFG_32BIT) ? "32" : "64",
+ (i & BIM_CFG_66MHZ) ? "66" : "33",
+ (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
+ dev->dev_addr);
+
+ pci_set_drvdata(pdev, dev);
+ cp->hw_running = 1;
+ cas_entropy_reset(cp);
+ cas_phy_init(cp);
+ cas_begin_auto_negotiation(cp, NULL);
+ return 0;
+
+err_out_free_consistent:
+ pci_free_consistent(pdev, sizeof(struct cas_init_block),
+ cp->init_block, cp->block_dvma);
+
+err_out_iounmap:
+ mutex_lock(&cp->pm_mutex);
+ if (cp->hw_running)
+ cas_shutdown(cp);
+ mutex_unlock(&cp->pm_mutex);
+
+ pci_iounmap(pdev, cp->regs);
+
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_write_cacheline:
+ /* Try to restore it in case the error occurred after we
+ * set it.
+ */
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
+
+err_out_free_netdev:
+ free_netdev(dev);
+
+err_out_disable_pdev:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return -ENODEV;
+}
+
+static void __devexit cas_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct cas *cp;
+ if (!dev)
+ return;
+
+ cp = netdev_priv(dev);
+ unregister_netdev(dev);
+
+ if (cp->fw_data)
+ vfree(cp->fw_data);
+
+ mutex_lock(&cp->pm_mutex);
+ cancel_work_sync(&cp->reset_task);
+ if (cp->hw_running)
+ cas_shutdown(cp);
+ mutex_unlock(&cp->pm_mutex);
+
+#if 1
+ if (cp->orig_cacheline_size) {
+ /* Restore the cache line size if we had modified
+ * it.
+ */
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
+ cp->orig_cacheline_size);
+ }
+#endif
+ pci_free_consistent(pdev, sizeof(struct cas_init_block),
+ cp->init_block, cp->block_dvma);
+ pci_iounmap(pdev, cp->regs);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+#ifdef CONFIG_PM
+static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct cas *cp = netdev_priv(dev);
+ unsigned long flags;
+
+ mutex_lock(&cp->pm_mutex);
+
+ /* If the driver is opened, we stop the DMA */
+ if (cp->opened) {
+ netif_device_detach(dev);
+
+ cas_lock_all_save(cp, flags);
+
+ /* We can set the second arg of cas_reset to 0
+ * because on resume, we'll call cas_init_hw with
+ * its second arg set so that autonegotiation is
+ * restarted.
+ */
+ cas_reset(cp, 0);
+ cas_clean_rings(cp);
+ cas_unlock_all_restore(cp, flags);
+ }
+
+ if (cp->hw_running)
+ cas_shutdown(cp);
+ mutex_unlock(&cp->pm_mutex);
+
+ return 0;
+}
+
+static int cas_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct cas *cp = netdev_priv(dev);
+
+ netdev_info(dev, "resuming\n");
+
+ mutex_lock(&cp->pm_mutex);
+ cas_hard_reset(cp);
+ if (cp->opened) {
+ unsigned long flags;
+ cas_lock_all_save(cp, flags);
+ cas_reset(cp, 0);
+ cp->hw_running = 1;
+ cas_clean_rings(cp);
+ cas_init_hw(cp, 1);
+ cas_unlock_all_restore(cp, flags);
+
+ netif_device_attach(dev);
+ }
+ mutex_unlock(&cp->pm_mutex);
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct pci_driver cas_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = cas_pci_tbl,
+ .probe = cas_init_one,
+ .remove = __devexit_p(cas_remove_one),
+#ifdef CONFIG_PM
+ .suspend = cas_suspend,
+ .resume = cas_resume
+#endif
+};
+
+static int __init cas_init(void)
+{
+ if (linkdown_timeout > 0)
+ link_transition_timeout = linkdown_timeout * HZ;
+ else
+ link_transition_timeout = 0;
+
+ return pci_register_driver(&cas_driver);
+}
+
+static void __exit cas_cleanup(void)
+{
+ pci_unregister_driver(&cas_driver);
+}
+
+module_init(cas_init);
+module_exit(cas_cleanup);
diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
new file mode 100644
index 000000000000..b361424d5f57
--- /dev/null
+++ b/drivers/net/ethernet/sun/cassini.h
@@ -0,0 +1,2914 @@
+/* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $
+ * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver.
+ *
+ * Copyright (C) 2004 Sun Microsystems Inc.
+ * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ *
+ * vendor id: 0x108E (Sun Microsystems, Inc.)
+ * device id: 0xabba (Cassini)
+ * revision ids: 0x01 = Cassini
+ * 0x02 = Cassini rev 2
+ * 0x10 = Cassini+
+ * 0x11 = Cassini+ 0.2u
+ *
+ * vendor id: 0x100b (National Semiconductor)
+ * device id: 0x0035 (DP83065/Saturn)
+ * revision ids: 0x30 = Saturn B2
+ *
+ * rings are all offset from 0.
+ *
+ * there are two clock domains:
+ * PCI: 33/66MHz clock
+ * chip: 125MHz clock
+ */
+
+#ifndef _CASSINI_H
+#define _CASSINI_H
+
+/* cassini register map: 2M memory mapped in 32-bit memory space accessible as
+ * 32-bit words. there is no i/o port access. REG_ addresses are
+ * shared between cassini and cassini+. REG_PLUS_ addresses only
+ * appear in cassini+. REG_MINUS_ addresses only appear in cassini.
+ */
+#define CAS_ID_REV2 0x02
+#define CAS_ID_REVPLUS 0x10
+#define CAS_ID_REVPLUS02u 0x11
+#define CAS_ID_REVSATURNB2 0x30
+
+/** global resources **/
+
+/* this register sets the weights for the weighted round robin arbiter. e.g.,
+ * if rx weight == 1 and tx weight == 0, rx == 2x tx transfer credit
+ * for its next turn to access the pci bus.
+ * map: 0x0 = x1, 0x1 = x2, 0x2 = x4, 0x3 = x8
+ * DEFAULT: 0x0, SIZE: 5 bits
+ */
+#define REG_CAWR 0x0004 /* core arbitration weight */
+#define CAWR_RX_DMA_WEIGHT_SHIFT 0
+#define CAWR_RX_DMA_WEIGHT_MASK 0x03 /* [0:1] */
+#define CAWR_TX_DMA_WEIGHT_SHIFT 2
+#define CAWR_TX_DMA_WEIGHT_MASK 0x0C /* [3:2] */
+#define CAWR_RR_DIS 0x10 /* [4] */
+
+/* if enabled, BIM can send bursts across PCI bus > cacheline size. burst
+ * sizes determined by length of packet or descriptor transfer and the
+ * max length allowed by the target.
+ * DEFAULT: 0x0, SIZE: 1 bit
+ */
+#define REG_INF_BURST 0x0008 /* infinite burst enable reg */
+#define INF_BURST_EN 0x1 /* enable */
+
+/* top level interrupts [0-9] are auto-cleared to 0 when the status
+ * register is read. second level interrupts [13 - 18] are cleared at
+ * the source. tx completion register 3 is replicated in [19 - 31]
+ * DEFAULT: 0x00000000, SIZE: 29 bits
+ */
+#define REG_INTR_STATUS 0x000C /* interrupt status register */
+#define INTR_TX_INTME 0x00000001 /* frame w/ INT ME desc bit set
+ xferred from host queue to
+ TX FIFO */
+#define INTR_TX_ALL 0x00000002 /* all xmit frames xferred into
+ TX FIFO. i.e.,
+ TX Kick == TX complete. if
+ PACED_MODE set, then TX FIFO
+ also empty */
+#define INTR_TX_DONE 0x00000004 /* any frame xferred into tx
+ FIFO */
+#define INTR_TX_TAG_ERROR 0x00000008 /* TX FIFO tag framing
+ corrupted. FATAL ERROR */
+#define INTR_RX_DONE 0x00000010 /* at least 1 frame xferred
+ from RX FIFO to host mem.
+ RX completion reg updated.
+ may be delayed by recv
+ intr blanking. */
+#define INTR_RX_BUF_UNAVAIL 0x00000020 /* no more receive buffers.
+ RX Kick == RX complete */
+#define INTR_RX_TAG_ERROR 0x00000040 /* RX FIFO tag framing
+ corrupted. FATAL ERROR */
+#define INTR_RX_COMP_FULL 0x00000080 /* no more room in completion
+ ring to post descriptors.
+ RX complete head incr to
+ almost reach RX complete
+ tail */
+#define INTR_RX_BUF_AE 0x00000100 /* less than the
+ programmable threshold #
+ of free descr avail for
+ hw use */
+#define INTR_RX_COMP_AF 0x00000200 /* less than the
+ programmable threshold #
+ of descr spaces for hw
+ use in completion descr
+ ring */
+#define INTR_RX_LEN_MISMATCH 0x00000400 /* len field from MAC !=
+ len of non-reassembly pkt
+ from fifo during DMA or
+ header parser provides TCP
+ header and payload size >
+ MAC packet size.
+ FATAL ERROR */
+#define INTR_SUMMARY 0x00001000 /* summary interrupt bit. this
+ bit will be set if an interrupt
+ generated on the pci bus. useful
+ when driver is polling for
+ interrupts */
+#define INTR_PCS_STATUS 0x00002000 /* PCS interrupt status register */
+#define INTR_TX_MAC_STATUS 0x00004000 /* TX MAC status register has at
+ least 1 unmasked interrupt set */
+#define INTR_RX_MAC_STATUS 0x00008000 /* RX MAC status register has at
+ least 1 unmasked interrupt set */
+#define INTR_MAC_CTRL_STATUS 0x00010000 /* MAC control status register has
+ at least 1 unmasked interrupt
+ set */
+#define INTR_MIF_STATUS 0x00020000 /* MIF status register has at least
+ 1 unmasked interrupt set */
+#define INTR_PCI_ERROR_STATUS 0x00040000 /* PCI error status register in the
+ BIF has at least 1 unmasked
+ interrupt set */
+#define INTR_TX_COMP_3_MASK 0xFFF80000 /* mask for TX completion
+ 3 reg data */
+#define INTR_TX_COMP_3_SHIFT 19
+#define INTR_ERROR_MASK (INTR_MIF_STATUS | INTR_PCI_ERROR_STATUS | \
+ INTR_PCS_STATUS | INTR_RX_LEN_MISMATCH | \
+ INTR_TX_MAC_STATUS | INTR_RX_MAC_STATUS | \
+ INTR_TX_TAG_ERROR | INTR_RX_TAG_ERROR | \
+ INTR_MAC_CTRL_STATUS)
+
+/* determines which status events will cause an interrupt. layout same
+ * as REG_INTR_STATUS.
+ * DEFAULT: 0xFFFFFFFF, SIZE: 16 bits
+ */
+#define REG_INTR_MASK 0x0010 /* Interrupt mask */
+
+/* top level interrupt bits that are cleared during read of REG_INTR_STATUS_ALIAS.
+ * useful when driver is polling for interrupts. layout same as REG_INTR_MASK.
+ * DEFAULT: 0x00000000, SIZE: 12 bits
+ */
+#define REG_ALIAS_CLEAR 0x0014 /* alias clear mask
+ (used w/ status alias) */
+/* same as REG_INTR_STATUS except that only bits cleared are those selected by
+ * REG_ALIAS_CLEAR
+ * DEFAULT: 0x00000000, SIZE: 29 bits
+ */
+#define REG_INTR_STATUS_ALIAS 0x001C /* interrupt status alias
+ (selective clear) */
+
+/* DEFAULT: 0x0, SIZE: 3 bits */
+#define REG_PCI_ERR_STATUS 0x1000 /* PCI error status */
+#define PCI_ERR_BADACK 0x01 /* reserved in Cassini+.
+ set if no ACK64# during ABS64 cycle
+ in Cassini. */
+#define PCI_ERR_DTRTO 0x02 /* delayed xaction timeout. set if
+ no read retry after 2^15 clocks */
+#define PCI_ERR_OTHER 0x04 /* other PCI errors */
+#define PCI_ERR_BIM_DMA_WRITE 0x08 /* BIM received 0 count DMA write req.
+ unused in Cassini. */
+#define PCI_ERR_BIM_DMA_READ 0x10 /* BIM received 0 count DMA read req.
+ unused in Cassini. */
+#define PCI_ERR_BIM_DMA_TIMEOUT 0x20 /* BIM received 255 retries during
+ DMA. unused in cassini. */
+
+/* mask for PCI status events that will set PCI_ERR_STATUS. if cleared, event
+ * causes an interrupt to be generated.
+ * DEFAULT: 0x7, SIZE: 3 bits
+ */
+#define REG_PCI_ERR_STATUS_MASK 0x1004 /* PCI Error status mask */
+
+/* used to configure PCI related parameters that are not in PCI config space.
+ * DEFAULT: 0bxx000, SIZE: 5 bits
+ */
+#define REG_BIM_CFG 0x1008 /* BIM Configuration */
+#define BIM_CFG_RESERVED0 0x001 /* reserved */
+#define BIM_CFG_RESERVED1 0x002 /* reserved */
+#define BIM_CFG_64BIT_DISABLE 0x004 /* disable 64-bit mode */
+#define BIM_CFG_66MHZ 0x008 /* (ro) 1 = 66MHz, 0 = < 66MHz */
+#define BIM_CFG_32BIT 0x010 /* (ro) 1 = 32-bit slot, 0 = 64-bit */
+#define BIM_CFG_DPAR_INTR_ENABLE 0x020 /* detected parity err enable */
+#define BIM_CFG_RMA_INTR_ENABLE 0x040 /* master abort intr enable */
+#define BIM_CFG_RTA_INTR_ENABLE 0x080 /* target abort intr enable */
+#define BIM_CFG_RESERVED2 0x100 /* reserved */
+#define BIM_CFG_BIM_DISABLE 0x200 /* stop BIM DMA. use before global
+ reset. reserved in Cassini. */
+#define BIM_CFG_BIM_STATUS 0x400 /* (ro) 1 = BIM DMA suspended.
+ reserved in Cassini. */
+#define BIM_CFG_PERROR_BLOCK 0x800 /* block PERR# to pci bus. def: 0.
+ reserved in Cassini. */
+
+/* DEFAULT: 0x00000000, SIZE: 32 bits */
+#define REG_BIM_DIAG 0x100C /* BIM Diagnostic */
+#define BIM_DIAG_MSTR_SM_MASK 0x3FFFFF00 /* PCI master controller state
+ machine bits [21:0] */
+#define BIM_DIAG_BRST_SM_MASK 0x7F /* PCI burst controller state
+ machine bits [6:0] */
+
+/* writing to SW_RESET_TX and SW_RESET_RX will issue a global
+ * reset. poll until TX and RX read back as 0's for completion.
+ */
+#define REG_SW_RESET 0x1010 /* Software reset */
+#define SW_RESET_TX 0x00000001 /* reset TX DMA engine. poll until
+ cleared to 0. */
+#define SW_RESET_RX 0x00000002 /* reset RX DMA engine. poll until
+ cleared to 0. */
+#define SW_RESET_RSTOUT 0x00000004 /* force RSTOUT# pin active (low).
+ resets PHY and anything else
+ connected to RSTOUT#. RSTOUT#
+ is also activated by local PCI
+ reset when hot-swap is being
+ done. */
+#define SW_RESET_BLOCK_PCS_SLINK 0x00000008 /* if a global reset is done with
+ this bit set, PCS and SLINK
+ modules won't be reset.
+ i.e., link won't drop. */
+#define SW_RESET_BREQ_SM_MASK 0x00007F00 /* breq state machine [6:0] */
+#define SW_RESET_PCIARB_SM_MASK 0x00070000 /* pci arbitration state bits:
+ 0b000: ARB_IDLE1
+ 0b001: ARB_IDLE2
+ 0b010: ARB_WB_ACK
+ 0b011: ARB_WB_WAT
+ 0b100: ARB_RB_ACK
+ 0b101: ARB_RB_WAT
+ 0b110: ARB_RB_END
+ 0b111: ARB_WB_END */
+#define SW_RESET_RDPCI_SM_MASK 0x00300000 /* read pci state bits:
+ 0b00: RD_PCI_WAT
+ 0b01: RD_PCI_RDY
+ 0b11: RD_PCI_ACK */
+#define SW_RESET_RDARB_SM_MASK 0x00C00000 /* read arbitration state bits:
+ 0b00: AD_IDL_RX
+ 0b01: AD_ACK_RX
+ 0b10: AD_ACK_TX
+ 0b11: AD_IDL_TX */
+#define SW_RESET_WRPCI_SM_MASK 0x06000000 /* write pci state bits
+ 0b00: WR_PCI_WAT
+ 0b01: WR_PCI_RDY
+ 0b11: WR_PCI_ACK */
+#define SW_RESET_WRARB_SM_MASK 0x38000000 /* write arbitration state bits:
+ 0b000: ARB_IDLE1
+ 0b001: ARB_IDLE2
+ 0b010: ARB_TX_ACK
+ 0b011: ARB_TX_WAT
+ 0b100: ARB_RX_ACK
+ 0b110: ARB_RX_WAT */
+
+/* Cassini only. 64-bit register used to check PCI datapath. when read,
+ * value written has both lower and upper 32-bit halves rotated to the right
+ * one bit position. e.g., FFFFFFFF FFFFFFFF -> 7FFFFFFF 7FFFFFFF
+ */
+#define REG_MINUS_BIM_DATAPATH_TEST 0x1018 /* Cassini: BIM datapath test
+ Cassini+: reserved */
+
+/* output enables are provided for each device's chip select and for the rest
+ * of the outputs from cassini to its local bus devices. two sw programmable
+ * bits are connected to general purpus control/status bits.
+ * DEFAULT: 0x7
+ */
+#define REG_BIM_LOCAL_DEV_EN 0x1020 /* BIM local device
+ output EN. default: 0x7 */
+#define BIM_LOCAL_DEV_PAD 0x01 /* address bus, RW signal, and
+ OE signal output enable on the
+ local bus interface. these
+ are shared between both local
+ bus devices. tristate when 0. */
+#define BIM_LOCAL_DEV_PROM 0x02 /* PROM chip select */
+#define BIM_LOCAL_DEV_EXT 0x04 /* secondary local bus device chip
+ select output enable */
+#define BIM_LOCAL_DEV_SOFT_0 0x08 /* sw programmable ctrl bit 0 */
+#define BIM_LOCAL_DEV_SOFT_1 0x10 /* sw programmable ctrl bit 1 */
+#define BIM_LOCAL_DEV_HW_RESET 0x20 /* internal hw reset. Cassini+ only. */
+
+/* access 24 entry BIM read and write buffers. put address in REG_BIM_BUFFER_ADDR
+ * and read/write from/to it REG_BIM_BUFFER_DATA_LOW and _DATA_HI.
+ * _DATA_HI should be the last access of the sequence.
+ * DEFAULT: undefined
+ */
+#define REG_BIM_BUFFER_ADDR 0x1024 /* BIM buffer address. for
+ purposes. */
+#define BIM_BUFFER_ADDR_MASK 0x3F /* index (0 - 23) of buffer */
+#define BIM_BUFFER_WR_SELECT 0x40 /* write buffer access = 1
+ read buffer access = 0 */
+/* DEFAULT: undefined */
+#define REG_BIM_BUFFER_DATA_LOW 0x1028 /* BIM buffer data low */
+#define REG_BIM_BUFFER_DATA_HI 0x102C /* BIM buffer data high */
+
+/* set BIM_RAM_BIST_START to start built-in self test for BIM read buffer.
+ * bit auto-clears when done with status read from _SUMMARY and _PASS bits.
+ */
+#define REG_BIM_RAM_BIST 0x102C /* BIM RAM (read buffer) BIST
+ control/status */
+#define BIM_RAM_BIST_RD_START 0x01 /* start BIST for BIM read buffer */
+#define BIM_RAM_BIST_WR_START 0x02 /* start BIST for BIM write buffer.
+ Cassini only. reserved in
+ Cassini+. */
+#define BIM_RAM_BIST_RD_PASS 0x04 /* summary BIST pass status for read
+ buffer. */
+#define BIM_RAM_BIST_WR_PASS 0x08 /* summary BIST pass status for write
+ buffer. Cassini only. reserved
+ in Cassini+. */
+#define BIM_RAM_BIST_RD_LOW_PASS 0x10 /* read low bank passes BIST */
+#define BIM_RAM_BIST_RD_HI_PASS 0x20 /* read high bank passes BIST */
+#define BIM_RAM_BIST_WR_LOW_PASS 0x40 /* write low bank passes BIST.
+ Cassini only. reserved in
+ Cassini+. */
+#define BIM_RAM_BIST_WR_HI_PASS 0x80 /* write high bank passes BIST.
+ Cassini only. reserved in
+ Cassini+. */
+
+/* ASUN: i'm not sure what this does as it's not in the spec.
+ * DEFAULT: 0xFC
+ */
+#define REG_BIM_DIAG_MUX 0x1030 /* BIM diagnostic probe mux
+ select register */
+
+/* enable probe monitoring mode and select data appearing on the P_A* bus. bit
+ * values for _SEL_HI_MASK and _SEL_LOW_MASK:
+ * 0x0: internal probe[7:0] (pci arb state, wtc empty w, wtc full w, wtc empty w,
+ * wtc empty r, post pci)
+ * 0x1: internal probe[15:8] (pci wbuf comp, pci wpkt comp, pci rbuf comp,
+ * pci rpkt comp, txdma wr req, txdma wr ack,
+ * txdma wr rdy, txdma wr xfr done)
+ * 0x2: internal probe[23:16] (txdma rd req, txdma rd ack, txdma rd rdy, rxdma rd,
+ * rd arb state, rd pci state)
+ * 0x3: internal probe[31:24] (rxdma req, rxdma ack, rxdma rdy, wrarb state,
+ * wrpci state)
+ * 0x4: pci io probe[7:0] 0x5: pci io probe[15:8]
+ * 0x6: pci io probe[23:16] 0x7: pci io probe[31:24]
+ * 0x8: pci io probe[39:32] 0x9: pci io probe[47:40]
+ * 0xa: pci io probe[55:48] 0xb: pci io probe[63:56]
+ * the following are not available in Cassini:
+ * 0xc: rx probe[7:0] 0xd: tx probe[7:0]
+ * 0xe: hp probe[7:0] 0xf: mac probe[7:0]
+ */
+#define REG_PLUS_PROBE_MUX_SELECT 0x1034 /* Cassini+: PROBE MUX SELECT */
+#define PROBE_MUX_EN 0x80000000 /* allow probe signals to be
+ driven on local bus P_A[15:0]
+ for debugging */
+#define PROBE_MUX_SUB_MUX_MASK 0x0000FF00 /* select sub module probe signals:
+ 0x03 = mac[1:0]
+ 0x0C = rx[1:0]
+ 0x30 = tx[1:0]
+ 0xC0 = hp[1:0] */
+#define PROBE_MUX_SEL_HI_MASK 0x000000F0 /* select which module to appear
+ on P_A[15:8]. see above for
+ values. */
+#define PROBE_MUX_SEL_LOW_MASK 0x0000000F /* select which module to appear
+ on P_A[7:0]. see above for
+ values. */
+
+/* values mean the same thing as REG_INTR_MASK excep that it's for INTB.
+ DEFAULT: 0x1F */
+#define REG_PLUS_INTR_MASK_1 0x1038 /* Cassini+: interrupt mask
+ register 2 for INTB */
+#define REG_PLUS_INTRN_MASK(x) (REG_PLUS_INTR_MASK_1 + ((x) - 1)*16)
+/* bits correspond to both _MASK and _STATUS registers. _ALT corresponds to
+ * all of the alternate (2-4) INTR registers while _1 corresponds to only
+ * _MASK_1 and _STATUS_1 registers.
+ * DEFAULT: 0x7 for MASK registers, 0x0 for ALIAS_CLEAR registers
+ */
+#define INTR_RX_DONE_ALT 0x01
+#define INTR_RX_COMP_FULL_ALT 0x02
+#define INTR_RX_COMP_AF_ALT 0x04
+#define INTR_RX_BUF_UNAVAIL_1 0x08
+#define INTR_RX_BUF_AE_1 0x10 /* almost empty */
+#define INTRN_MASK_RX_EN 0x80
+#define INTRN_MASK_CLEAR_ALL (INTR_RX_DONE_ALT | \
+ INTR_RX_COMP_FULL_ALT | \
+ INTR_RX_COMP_AF_ALT | \
+ INTR_RX_BUF_UNAVAIL_1 | \
+ INTR_RX_BUF_AE_1)
+#define REG_PLUS_INTR_STATUS_1 0x103C /* Cassini+: interrupt status
+ register 2 for INTB. default: 0x1F */
+#define REG_PLUS_INTRN_STATUS(x) (REG_PLUS_INTR_STATUS_1 + ((x) - 1)*16)
+#define INTR_STATUS_ALT_INTX_EN 0x80 /* generate INTX when one of the
+ flags are set. enables desc ring. */
+
+#define REG_PLUS_ALIAS_CLEAR_1 0x1040 /* Cassini+: alias clear mask
+ register 2 for INTB */
+#define REG_PLUS_ALIASN_CLEAR(x) (REG_PLUS_ALIAS_CLEAR_1 + ((x) - 1)*16)
+
+#define REG_PLUS_INTR_STATUS_ALIAS_1 0x1044 /* Cassini+: interrupt status
+ register alias 2 for INTB */
+#define REG_PLUS_INTRN_STATUS_ALIAS(x) (REG_PLUS_INTR_STATUS_ALIAS_1 + ((x) - 1)*16)
+
+#define REG_SATURN_PCFG 0x106c /* pin configuration register for
+ integrated macphy */
+
+#define SATURN_PCFG_TLA 0x00000001 /* 1 = phy actled */
+#define SATURN_PCFG_FLA 0x00000002 /* 1 = phy link10led */
+#define SATURN_PCFG_CLA 0x00000004 /* 1 = phy link100led */
+#define SATURN_PCFG_LLA 0x00000008 /* 1 = phy link1000led */
+#define SATURN_PCFG_RLA 0x00000010 /* 1 = phy duplexled */
+#define SATURN_PCFG_PDS 0x00000020 /* phy debug mode.
+ 0 = normal */
+#define SATURN_PCFG_MTP 0x00000080 /* test point select */
+#define SATURN_PCFG_GMO 0x00000100 /* GMII observe. 1 =
+ GMII on SERDES pins for
+ monitoring. */
+#define SATURN_PCFG_FSI 0x00000200 /* 1 = freeze serdes/gmii. all
+ pins configed as outputs.
+ for power saving when using
+ internal phy. */
+#define SATURN_PCFG_LAD 0x00000800 /* 0 = mac core led ctrl
+ polarity from strapping
+ value.
+ 1 = mac core led ctrl
+ polarity active low. */
+
+
+/** transmit dma registers **/
+#define MAX_TX_RINGS_SHIFT 2
+#define MAX_TX_RINGS (1 << MAX_TX_RINGS_SHIFT)
+#define MAX_TX_RINGS_MASK (MAX_TX_RINGS - 1)
+
+/* TX configuration.
+ * descr ring sizes size = 32 * (1 << n), n < 9. e.g., 0x8 = 8k. default: 0x8
+ * DEFAULT: 0x3F000001
+ */
+#define REG_TX_CFG 0x2004 /* TX config */
+#define TX_CFG_DMA_EN 0x00000001 /* enable TX DMA. if cleared, DMA
+ will stop after xfer of current
+ buffer has been completed. */
+#define TX_CFG_FIFO_PIO_SEL 0x00000002 /* TX DMA FIFO can be
+ accessed w/ FIFO addr
+ and data registers.
+ TX DMA should be
+ disabled. */
+#define TX_CFG_DESC_RING0_MASK 0x0000003C /* # desc entries in
+ ring 1. */
+#define TX_CFG_DESC_RING0_SHIFT 2
+#define TX_CFG_DESC_RINGN_MASK(a) (TX_CFG_DESC_RING0_MASK << (a)*4)
+#define TX_CFG_DESC_RINGN_SHIFT(a) (TX_CFG_DESC_RING0_SHIFT + (a)*4)
+#define TX_CFG_PACED_MODE 0x00100000 /* TX_ALL only set after
+ TX FIFO becomes empty.
+ if 0, TX_ALL set
+ if descr queue empty. */
+#define TX_CFG_DMA_RDPIPE_DIS 0x01000000 /* always set to 1 */
+#define TX_CFG_COMPWB_Q1 0x02000000 /* completion writeback happens at
+ the end of every packet kicked
+ through Q1. */
+#define TX_CFG_COMPWB_Q2 0x04000000 /* completion writeback happens at
+ the end of every packet kicked
+ through Q2. */
+#define TX_CFG_COMPWB_Q3 0x08000000 /* completion writeback happens at
+ the end of every packet kicked
+ through Q3 */
+#define TX_CFG_COMPWB_Q4 0x10000000 /* completion writeback happens at
+ the end of every packet kicked
+ through Q4 */
+#define TX_CFG_INTR_COMPWB_DIS 0x20000000 /* disable pre-interrupt completion
+ writeback */
+#define TX_CFG_CTX_SEL_MASK 0xC0000000 /* selects tx test port
+ connection
+ 0b00: tx mac req,
+ tx mac retry req,
+ tx ack and tx tag.
+ 0b01: txdma rd req,
+ txdma rd ack,
+ txdma rd rdy,
+ txdma rd type0
+ 0b11: txdma wr req,
+ txdma wr ack,
+ txdma wr rdy,
+ txdma wr xfr done. */
+#define TX_CFG_CTX_SEL_SHIFT 30
+
+/* 11-bit counters that point to next location in FIFO to be loaded/retrieved.
+ * used for diagnostics only.
+ */
+#define REG_TX_FIFO_WRITE_PTR 0x2014 /* TX FIFO write pointer */
+#define REG_TX_FIFO_SHADOW_WRITE_PTR 0x2018 /* TX FIFO shadow write
+ pointer. temp hold reg.
+ diagnostics only. */
+#define REG_TX_FIFO_READ_PTR 0x201C /* TX FIFO read pointer */
+#define REG_TX_FIFO_SHADOW_READ_PTR 0x2020 /* TX FIFO shadow read
+ pointer */
+
+/* (ro) 11-bit up/down counter w/ # of frames currently in TX FIFO */
+#define REG_TX_FIFO_PKT_CNT 0x2024 /* TX FIFO packet counter */
+
+/* current state of all state machines in TX */
+#define REG_TX_SM_1 0x2028 /* TX state machine reg #1 */
+#define TX_SM_1_CHAIN_MASK 0x000003FF /* chaining state machine */
+#define TX_SM_1_CSUM_MASK 0x00000C00 /* checksum state machine */
+#define TX_SM_1_FIFO_LOAD_MASK 0x0003F000 /* FIFO load state machine.
+ = 0x01 when TX disabled. */
+#define TX_SM_1_FIFO_UNLOAD_MASK 0x003C0000 /* FIFO unload state machine */
+#define TX_SM_1_CACHE_MASK 0x03C00000 /* desc. prefetch cache controller
+ state machine */
+#define TX_SM_1_CBQ_ARB_MASK 0xF8000000 /* CBQ arbiter state machine */
+
+#define REG_TX_SM_2 0x202C /* TX state machine reg #2 */
+#define TX_SM_2_COMP_WB_MASK 0x07 /* completion writeback sm */
+#define TX_SM_2_SUB_LOAD_MASK 0x38 /* sub load state machine */
+#define TX_SM_2_KICK_MASK 0xC0 /* kick state machine */
+
+/* 64-bit pointer to the transmit data buffer. only the 50 LSB are incremented
+ * while the upper 23 bits are taken from the TX descriptor
+ */
+#define REG_TX_DATA_PTR_LOW 0x2030 /* TX data pointer low */
+#define REG_TX_DATA_PTR_HI 0x2034 /* TX data pointer high */
+
+/* 13 bit registers written by driver w/ descriptor value that follows
+ * last valid xmit descriptor. kick # and complete # values are used by
+ * the xmit dma engine to control tx descr fetching. if > 1 valid
+ * tx descr is available within the cache line being read, cassini will
+ * internally cache up to 4 of them. 0 on reset. _KICK = rw, _COMP = ro.
+ */
+#define REG_TX_KICK0 0x2038 /* TX kick reg #1 */
+#define REG_TX_KICKN(x) (REG_TX_KICK0 + (x)*4)
+#define REG_TX_COMP0 0x2048 /* TX completion reg #1 */
+#define REG_TX_COMPN(x) (REG_TX_COMP0 + (x)*4)
+
+/* values of TX_COMPLETE_1-4 are written. each completion register
+ * is 2bytes in size and contiguous. 8B allocation w/ 8B alignment.
+ * NOTE: completion reg values are only written back prior to TX_INTME and
+ * TX_ALL interrupts. at all other times, the most up-to-date index values
+ * should be obtained from the REG_TX_COMPLETE_# registers.
+ * here's the layout:
+ * offset from base addr completion # byte
+ * 0 TX_COMPLETE_1_MSB
+ * 1 TX_COMPLETE_1_LSB
+ * 2 TX_COMPLETE_2_MSB
+ * 3 TX_COMPLETE_2_LSB
+ * 4 TX_COMPLETE_3_MSB
+ * 5 TX_COMPLETE_3_LSB
+ * 6 TX_COMPLETE_4_MSB
+ * 7 TX_COMPLETE_4_LSB
+ */
+#define TX_COMPWB_SIZE 8
+#define REG_TX_COMPWB_DB_LOW 0x2058 /* TX completion write back
+ base low */
+#define REG_TX_COMPWB_DB_HI 0x205C /* TX completion write back
+ base high */
+#define TX_COMPWB_MSB_MASK 0x00000000000000FFULL
+#define TX_COMPWB_MSB_SHIFT 0
+#define TX_COMPWB_LSB_MASK 0x000000000000FF00ULL
+#define TX_COMPWB_LSB_SHIFT 8
+#define TX_COMPWB_NEXT(x) ((x) >> 16)
+
+/* 53 MSB used as base address. 11 LSB assumed to be 0. TX desc pointer must
+ * be 2KB-aligned. */
+#define REG_TX_DB0_LOW 0x2060 /* TX descriptor base low #1 */
+#define REG_TX_DB0_HI 0x2064 /* TX descriptor base hi #1 */
+#define REG_TX_DBN_LOW(x) (REG_TX_DB0_LOW + (x)*8)
+#define REG_TX_DBN_HI(x) (REG_TX_DB0_HI + (x)*8)
+
+/* 16-bit registers hold weights for the weighted round-robin of the
+ * four CBQ TX descr rings. weights correspond to # bytes xferred from
+ * host to TXFIFO in a round of WRR arbitration. can be set
+ * dynamically with new weights set upon completion of the current
+ * packet transfer from host memory to TXFIFO. a dummy write to any of
+ * these registers causes a queue1 pre-emption with all historical bw
+ * deficit data reset to 0 (useful when congestion requires a
+ * pre-emption/re-allocation of network bandwidth
+ */
+#define REG_TX_MAXBURST_0 0x2080 /* TX MaxBurst #1 */
+#define REG_TX_MAXBURST_1 0x2084 /* TX MaxBurst #2 */
+#define REG_TX_MAXBURST_2 0x2088 /* TX MaxBurst #3 */
+#define REG_TX_MAXBURST_3 0x208C /* TX MaxBurst #4 */
+
+/* diagnostics access to any TX FIFO location. every access is 65
+ * bits. _DATA_LOW = 32 LSB, _DATA_HI_T1/T0 = 32 MSB. _TAG = tag bit.
+ * writing _DATA_HI_T0 sets tag bit low, writing _DATA_HI_T1 sets tag
+ * bit high. TX_FIFO_PIO_SEL must be set for TX FIFO PIO access. if
+ * TX FIFO data integrity is desired, TX DMA should be
+ * disabled. _DATA_HI_Tx should be the last access of the sequence.
+ */
+#define REG_TX_FIFO_ADDR 0x2104 /* TX FIFO address */
+#define REG_TX_FIFO_TAG 0x2108 /* TX FIFO tag */
+#define REG_TX_FIFO_DATA_LOW 0x210C /* TX FIFO data low */
+#define REG_TX_FIFO_DATA_HI_T1 0x2110 /* TX FIFO data high t1 */
+#define REG_TX_FIFO_DATA_HI_T0 0x2114 /* TX FIFO data high t0 */
+#define REG_TX_FIFO_SIZE 0x2118 /* (ro) TX FIFO size = 0x090 = 9KB */
+
+/* 9-bit register controls BIST of TX FIFO. bit set indicates that the BIST
+ * passed for the specified memory
+ */
+#define REG_TX_RAMBIST 0x211C /* TX RAMBIST control/status */
+#define TX_RAMBIST_STATE 0x01C0 /* progress state of RAMBIST
+ controller state machine */
+#define TX_RAMBIST_RAM33A_PASS 0x0020 /* RAM33A passed */
+#define TX_RAMBIST_RAM32A_PASS 0x0010 /* RAM32A passed */
+#define TX_RAMBIST_RAM33B_PASS 0x0008 /* RAM33B passed */
+#define TX_RAMBIST_RAM32B_PASS 0x0004 /* RAM32B passed */
+#define TX_RAMBIST_SUMMARY 0x0002 /* all RAM passed */
+#define TX_RAMBIST_START 0x0001 /* write 1 to start BIST. self
+ clears on completion. */
+
+/** receive dma registers **/
+#define MAX_RX_DESC_RINGS 2
+#define MAX_RX_COMP_RINGS 4
+
+/* receive DMA channel configuration. default: 0x80910
+ * free ring size = (1 << n)*32 -> [32 - 8k]
+ * completion ring size = (1 << n)*128 -> [128 - 32k], n < 9
+ * DEFAULT: 0x80910
+ */
+#define REG_RX_CFG 0x4000 /* RX config */
+#define RX_CFG_DMA_EN 0x00000001 /* enable RX DMA. 0 stops
+ channel as soon as current
+ frame xfer has completed.
+ driver should disable MAC
+ for 200ms before disabling
+ RX */
+#define RX_CFG_DESC_RING_MASK 0x0000001E /* # desc entries in RX
+ free desc ring.
+ def: 0x8 = 8k */
+#define RX_CFG_DESC_RING_SHIFT 1
+#define RX_CFG_COMP_RING_MASK 0x000001E0 /* # desc entries in RX complete
+ ring. def: 0x8 = 32k */
+#define RX_CFG_COMP_RING_SHIFT 5
+#define RX_CFG_BATCH_DIS 0x00000200 /* disable receive desc
+ batching. def: 0x0 =
+ enabled */
+#define RX_CFG_SWIVEL_MASK 0x00001C00 /* byte offset of the 1st
+ data byte of the packet
+ w/in 8 byte boundares.
+ this swivels the data
+ DMA'ed to header
+ buffers, jumbo buffers
+ when header split is not
+ requested and MTU sized
+ buffers. def: 0x2 */
+#define RX_CFG_SWIVEL_SHIFT 10
+
+/* cassini+ only */
+#define RX_CFG_DESC_RING1_MASK 0x000F0000 /* # of desc entries in
+ RX free desc ring 2.
+ def: 0x8 = 8k */
+#define RX_CFG_DESC_RING1_SHIFT 16
+
+
+/* the page size register allows cassini chips to do the following with
+ * received data:
+ * [--------------------------------------------------------------] page
+ * [off][buf1][pad][off][buf2][pad][off][buf3][pad][off][buf4][pad]
+ * |--------------| = PAGE_SIZE_BUFFER_STRIDE
+ * page = PAGE_SIZE
+ * offset = PAGE_SIZE_MTU_OFF
+ * for the above example, MTU_BUFFER_COUNT = 4.
+ * NOTE: as is apparent, you need to ensure that the following holds:
+ * MTU_BUFFER_COUNT <= PAGE_SIZE/PAGE_SIZE_BUFFER_STRIDE
+ * DEFAULT: 0x48002002 (8k pages)
+ */
+#define REG_RX_PAGE_SIZE 0x4004 /* RX page size */
+#define RX_PAGE_SIZE_MASK 0x00000003 /* size of pages pointed to
+ by receive descriptors.
+ if jumbo buffers are
+ supported the page size
+ should not be < 8k.
+ 0b00 = 2k, 0b01 = 4k
+ 0b10 = 8k, 0b11 = 16k
+ DEFAULT: 8k */
+#define RX_PAGE_SIZE_SHIFT 0
+#define RX_PAGE_SIZE_MTU_COUNT_MASK 0x00007800 /* # of MTU buffers the hw
+ packs into a page.
+ DEFAULT: 4 */
+#define RX_PAGE_SIZE_MTU_COUNT_SHIFT 11
+#define RX_PAGE_SIZE_MTU_STRIDE_MASK 0x18000000 /* # of bytes that separate
+ each MTU buffer +
+ offset from each
+ other.
+ 0b00 = 1k, 0b01 = 2k
+ 0b10 = 4k, 0b11 = 8k
+ DEFAULT: 0x1 */
+#define RX_PAGE_SIZE_MTU_STRIDE_SHIFT 27
+#define RX_PAGE_SIZE_MTU_OFF_MASK 0xC0000000 /* offset in each page that
+ hw writes the MTU buffer
+ into.
+ 0b00 = 0,
+ 0b01 = 64 bytes
+ 0b10 = 96, 0b11 = 128
+ DEFAULT: 0x1 */
+#define RX_PAGE_SIZE_MTU_OFF_SHIFT 30
+
+/* 11-bit counter points to next location in RX FIFO to be loaded/read.
+ * shadow write pointers enable retries in case of early receive aborts.
+ * DEFAULT: 0x0. generated on 64-bit boundaries.
+ */
+#define REG_RX_FIFO_WRITE_PTR 0x4008 /* RX FIFO write pointer */
+#define REG_RX_FIFO_READ_PTR 0x400C /* RX FIFO read pointer */
+#define REG_RX_IPP_FIFO_SHADOW_WRITE_PTR 0x4010 /* RX IPP FIFO shadow write
+ pointer */
+#define REG_RX_IPP_FIFO_SHADOW_READ_PTR 0x4014 /* RX IPP FIFO shadow read
+ pointer */
+#define REG_RX_IPP_FIFO_READ_PTR 0x400C /* RX IPP FIFO read
+ pointer. (8-bit counter) */
+
+/* current state of RX DMA state engines + other info
+ * DEFAULT: 0x0
+ */
+#define REG_RX_DEBUG 0x401C /* RX debug */
+#define RX_DEBUG_LOAD_STATE_MASK 0x0000000F /* load state machine w/ MAC:
+ 0x0 = idle, 0x1 = load_bop
+ 0x2 = load 1, 0x3 = load 2
+ 0x4 = load 3, 0x5 = load 4
+ 0x6 = last detect
+ 0x7 = wait req
+ 0x8 = wait req statuss 1st
+ 0x9 = load st
+ 0xa = bubble mac
+ 0xb = error */
+#define RX_DEBUG_LM_STATE_MASK 0x00000070 /* load state machine w/ HP and
+ RX FIFO:
+ 0x0 = idle, 0x1 = hp xfr
+ 0x2 = wait hp ready
+ 0x3 = wait flow code
+ 0x4 = fifo xfer
+ 0x5 = make status
+ 0x6 = csum ready
+ 0x7 = error */
+#define RX_DEBUG_FC_STATE_MASK 0x000000180 /* flow control state machine
+ w/ MAC:
+ 0x0 = idle
+ 0x1 = wait xoff ack
+ 0x2 = wait xon
+ 0x3 = wait xon ack */
+#define RX_DEBUG_DATA_STATE_MASK 0x000001E00 /* unload data state machine
+ states:
+ 0x0 = idle data
+ 0x1 = header begin
+ 0x2 = xfer header
+ 0x3 = xfer header ld
+ 0x4 = mtu begin
+ 0x5 = xfer mtu
+ 0x6 = xfer mtu ld
+ 0x7 = jumbo begin
+ 0x8 = xfer jumbo
+ 0x9 = xfer jumbo ld
+ 0xa = reas begin
+ 0xb = xfer reas
+ 0xc = flush tag
+ 0xd = xfer reas ld
+ 0xe = error
+ 0xf = bubble idle */
+#define RX_DEBUG_DESC_STATE_MASK 0x0001E000 /* unload desc state machine
+ states:
+ 0x0 = idle desc
+ 0x1 = wait ack
+ 0x9 = wait ack 2
+ 0x2 = fetch desc 1
+ 0xa = fetch desc 2
+ 0x3 = load ptrs
+ 0x4 = wait dma
+ 0x5 = wait ack batch
+ 0x6 = post batch
+ 0x7 = xfr done */
+#define RX_DEBUG_INTR_READ_PTR_MASK 0x30000000 /* interrupt read ptr of the
+ interrupt queue */
+#define RX_DEBUG_INTR_WRITE_PTR_MASK 0xC0000000 /* interrupt write pointer
+ of the interrupt queue */
+
+/* flow control frames are emitted using two PAUSE thresholds:
+ * XOFF PAUSE uses pause time value pre-programmed in the Send PAUSE MAC reg
+ * XON PAUSE uses a pause time of 0. granularity of threshold is 64bytes.
+ * PAUSE thresholds defined in terms of FIFO occupancy and may be translated
+ * into FIFO vacancy using RX_FIFO_SIZE. setting ON will trigger XON frames
+ * when FIFO reaches 0. OFF threshold should not be > size of RX FIFO. max
+ * value is is 0x6F.
+ * DEFAULT: 0x00078
+ */
+#define REG_RX_PAUSE_THRESH 0x4020 /* RX pause thresholds */
+#define RX_PAUSE_THRESH_QUANTUM 64
+#define RX_PAUSE_THRESH_OFF_MASK 0x000001FF /* XOFF PAUSE emitted when
+ RX FIFO occupancy >
+ value*64B */
+#define RX_PAUSE_THRESH_OFF_SHIFT 0
+#define RX_PAUSE_THRESH_ON_MASK 0x001FF000 /* XON PAUSE emitted after
+ emitting XOFF PAUSE when RX
+ FIFO occupancy falls below
+ this value*64B. must be
+ < XOFF threshold. if =
+ RX_FIFO_SIZE< XON frames are
+ never emitted. */
+#define RX_PAUSE_THRESH_ON_SHIFT 12
+
+/* 13-bit register used to control RX desc fetching and intr generation. if 4+
+ * valid RX descriptors are available, Cassini will read 4 at a time.
+ * writing N means that all desc up to *but* excluding N are available. N must
+ * be a multiple of 4 (N % 4 = 0). first desc should be cache-line aligned.
+ * DEFAULT: 0 on reset
+ */
+#define REG_RX_KICK 0x4024 /* RX kick reg */
+
+/* 8KB aligned 64-bit pointer to the base of the RX free/completion rings.
+ * lower 13 bits of the low register are hard-wired to 0.
+ */
+#define REG_RX_DB_LOW 0x4028 /* RX descriptor ring
+ base low */
+#define REG_RX_DB_HI 0x402C /* RX descriptor ring
+ base hi */
+#define REG_RX_CB_LOW 0x4030 /* RX completion ring
+ base low */
+#define REG_RX_CB_HI 0x4034 /* RX completion ring
+ base hi */
+/* 13-bit register indicate desc used by cassini for receive frames. used
+ * for diagnostic purposes.
+ * DEFAULT: 0 on reset
+ */
+#define REG_RX_COMP 0x4038 /* (ro) RX completion */
+
+/* HEAD and TAIL are used to control RX desc posting and interrupt
+ * generation. hw moves the head register to pass ownership to sw. sw
+ * moves the tail register to pass ownership back to hw. to give all
+ * entries to hw, set TAIL = HEAD. if HEAD and TAIL indicate that no
+ * more entries are available, DMA will pause and an interrupt will be
+ * generated to indicate no more entries are available. sw can use
+ * this interrupt to reduce the # of times it must update the
+ * completion tail register.
+ * DEFAULT: 0 on reset
+ */
+#define REG_RX_COMP_HEAD 0x403C /* RX completion head */
+#define REG_RX_COMP_TAIL 0x4040 /* RX completion tail */
+
+/* values used for receive interrupt blanking. loaded each time the ISR is read
+ * DEFAULT: 0x00000000
+ */
+#define REG_RX_BLANK 0x4044 /* RX blanking register
+ for ISR read */
+#define RX_BLANK_INTR_PKT_MASK 0x000001FF /* RX_DONE intr asserted if
+ this many sets of completion
+ writebacks (up to 2 packets)
+ occur since the last time
+ the ISR was read. 0 = no
+ packet blanking */
+#define RX_BLANK_INTR_PKT_SHIFT 0
+#define RX_BLANK_INTR_TIME_MASK 0x3FFFF000 /* RX_DONE interrupt asserted
+ if that many clocks were
+ counted since last time the
+ ISR was read.
+ each count is 512 core
+ clocks (125MHz). 0 = no
+ time blanking */
+#define RX_BLANK_INTR_TIME_SHIFT 12
+
+/* values used for interrupt generation based on threshold values of how
+ * many free desc and completion entries are available for hw use.
+ * DEFAULT: 0x00000000
+ */
+#define REG_RX_AE_THRESH 0x4048 /* RX almost empty
+ thresholds */
+#define RX_AE_THRESH_FREE_MASK 0x00001FFF /* RX_BUF_AE will be
+ generated if # desc
+ avail for hw use <=
+ # */
+#define RX_AE_THRESH_FREE_SHIFT 0
+#define RX_AE_THRESH_COMP_MASK 0x0FFFE000 /* RX_COMP_AE will be
+ generated if # of
+ completion entries
+ avail for hw use <=
+ # */
+#define RX_AE_THRESH_COMP_SHIFT 13
+
+/* probabilities for random early drop (RED) thresholds on a FIFO threshold
+ * basis. probability should increase when the FIFO level increases. control
+ * packets are never dropped and not counted in stats. probability programmed
+ * on a 12.5% granularity. e.g., 0x1 = 1/8 packets dropped.
+ * DEFAULT: 0x00000000
+ */
+#define REG_RX_RED 0x404C /* RX random early detect enable */
+#define RX_RED_4K_6K_FIFO_MASK 0x000000FF /* 4KB < FIFO thresh < 6KB */
+#define RX_RED_6K_8K_FIFO_MASK 0x0000FF00 /* 6KB < FIFO thresh < 8KB */
+#define RX_RED_8K_10K_FIFO_MASK 0x00FF0000 /* 8KB < FIFO thresh < 10KB */
+#define RX_RED_10K_12K_FIFO_MASK 0xFF000000 /* 10KB < FIFO thresh < 12KB */
+
+/* FIFO fullness levels for RX FIFO, RX control FIFO, and RX IPP FIFO.
+ * RX control FIFO = # of packets in RX FIFO.
+ * DEFAULT: 0x0
+ */
+#define REG_RX_FIFO_FULLNESS 0x4050 /* (ro) RX FIFO fullness */
+#define RX_FIFO_FULLNESS_RX_FIFO_MASK 0x3FF80000 /* level w/ 8B granularity */
+#define RX_FIFO_FULLNESS_IPP_FIFO_MASK 0x0007FF00 /* level w/ 8B granularity */
+#define RX_FIFO_FULLNESS_RX_PKT_MASK 0x000000FF /* # packets in RX FIFO */
+#define REG_RX_IPP_PACKET_COUNT 0x4054 /* RX IPP packet counter */
+#define REG_RX_WORK_DMA_PTR_LOW 0x4058 /* RX working DMA ptr low */
+#define REG_RX_WORK_DMA_PTR_HI 0x405C /* RX working DMA ptr
+ high */
+
+/* BIST testing ro RX FIFO, RX control FIFO, and RX IPP FIFO. only RX BIST
+ * START/COMPLETE is writeable. START will clear when the BIST has completed
+ * checking all 17 RAMS.
+ * DEFAULT: 0bxxxx xxxxx xxxx xxxx xxxx x000 0000 0000 00x0
+ */
+#define REG_RX_BIST 0x4060 /* (ro) RX BIST */
+#define RX_BIST_32A_PASS 0x80000000 /* RX FIFO 32A passed */
+#define RX_BIST_33A_PASS 0x40000000 /* RX FIFO 33A passed */
+#define RX_BIST_32B_PASS 0x20000000 /* RX FIFO 32B passed */
+#define RX_BIST_33B_PASS 0x10000000 /* RX FIFO 33B passed */
+#define RX_BIST_32C_PASS 0x08000000 /* RX FIFO 32C passed */
+#define RX_BIST_33C_PASS 0x04000000 /* RX FIFO 33C passed */
+#define RX_BIST_IPP_32A_PASS 0x02000000 /* RX IPP FIFO 33B passed */
+#define RX_BIST_IPP_33A_PASS 0x01000000 /* RX IPP FIFO 33A passed */
+#define RX_BIST_IPP_32B_PASS 0x00800000 /* RX IPP FIFO 32B passed */
+#define RX_BIST_IPP_33B_PASS 0x00400000 /* RX IPP FIFO 33B passed */
+#define RX_BIST_IPP_32C_PASS 0x00200000 /* RX IPP FIFO 32C passed */
+#define RX_BIST_IPP_33C_PASS 0x00100000 /* RX IPP FIFO 33C passed */
+#define RX_BIST_CTRL_32_PASS 0x00800000 /* RX CTRL FIFO 32 passed */
+#define RX_BIST_CTRL_33_PASS 0x00400000 /* RX CTRL FIFO 33 passed */
+#define RX_BIST_REAS_26A_PASS 0x00200000 /* RX Reas 26A passed */
+#define RX_BIST_REAS_26B_PASS 0x00100000 /* RX Reas 26B passed */
+#define RX_BIST_REAS_27_PASS 0x00080000 /* RX Reas 27 passed */
+#define RX_BIST_STATE_MASK 0x00078000 /* BIST state machine */
+#define RX_BIST_SUMMARY 0x00000002 /* when BIST complete,
+ summary pass bit
+ contains AND of BIST
+ results of all 16
+ RAMS */
+#define RX_BIST_START 0x00000001 /* write 1 to start
+ BIST. self clears
+ on completion. */
+
+/* next location in RX CTRL FIFO that will be loaded w/ data from RX IPP/read
+ * from to retrieve packet control info.
+ * DEFAULT: 0
+ */
+#define REG_RX_CTRL_FIFO_WRITE_PTR 0x4064 /* (ro) RX control FIFO
+ write ptr */
+#define REG_RX_CTRL_FIFO_READ_PTR 0x4068 /* (ro) RX control FIFO read
+ ptr */
+
+/* receive interrupt blanking. loaded each time interrupt alias register is
+ * read.
+ * DEFAULT: 0x0
+ */
+#define REG_RX_BLANK_ALIAS_READ 0x406C /* RX blanking register for
+ alias read */
+#define RX_BAR_INTR_PACKET_MASK 0x000001FF /* assert RX_DONE if #
+ completion writebacks
+ > # since last ISR
+ read. 0 = no
+ blanking. up to 2
+ packets per
+ completion wb. */
+#define RX_BAR_INTR_TIME_MASK 0x3FFFF000 /* assert RX_DONE if #
+ clocks > # since last
+ ISR read. each count
+ is 512 core clocks
+ (125MHz). 0 = no
+ blanking. */
+
+/* diagnostic access to RX FIFO. 32 LSB accessed via DATA_LOW. 32 MSB accessed
+ * via DATA_HI_T0 or DATA_HI_T1. TAG reads the tag bit. writing HI_T0
+ * will unset the tag bit while writing HI_T1 will set the tag bit. to reset
+ * to normal operation after diagnostics, write to address location 0x0.
+ * RX_DMA_EN bit must be set to 0x0 for RX FIFO PIO access. DATA_HI should
+ * be the last write access of a write sequence.
+ * DEFAULT: undefined
+ */
+#define REG_RX_FIFO_ADDR 0x4080 /* RX FIFO address */
+#define REG_RX_FIFO_TAG 0x4084 /* RX FIFO tag */
+#define REG_RX_FIFO_DATA_LOW 0x4088 /* RX FIFO data low */
+#define REG_RX_FIFO_DATA_HI_T0 0x408C /* RX FIFO data high T0 */
+#define REG_RX_FIFO_DATA_HI_T1 0x4090 /* RX FIFO data high T1 */
+
+/* diagnostic assess to RX CTRL FIFO. 8-bit FIFO_ADDR holds address of
+ * 81 bit control entry and 6 bit flow id. LOW and MID are both 32-bit
+ * accesses. HI is 7-bits with 6-bit flow id and 1 bit control
+ * word. RX_DMA_EN must be 0 for RX CTRL FIFO PIO access. DATA_HI
+ * should be last write access of the write sequence.
+ * DEFAULT: undefined
+ */
+#define REG_RX_CTRL_FIFO_ADDR 0x4094 /* RX Control FIFO and
+ Batching FIFO addr */
+#define REG_RX_CTRL_FIFO_DATA_LOW 0x4098 /* RX Control FIFO data
+ low */
+#define REG_RX_CTRL_FIFO_DATA_MID 0x409C /* RX Control FIFO data
+ mid */
+#define REG_RX_CTRL_FIFO_DATA_HI 0x4100 /* RX Control FIFO data
+ hi and flow id */
+#define RX_CTRL_FIFO_DATA_HI_CTRL 0x0001 /* upper bit of ctrl word */
+#define RX_CTRL_FIFO_DATA_HI_FLOW_MASK 0x007E /* flow id */
+
+/* diagnostic access to RX IPP FIFO. same semantics as RX_FIFO.
+ * DEFAULT: undefined
+ */
+#define REG_RX_IPP_FIFO_ADDR 0x4104 /* RX IPP FIFO address */
+#define REG_RX_IPP_FIFO_TAG 0x4108 /* RX IPP FIFO tag */
+#define REG_RX_IPP_FIFO_DATA_LOW 0x410C /* RX IPP FIFO data low */
+#define REG_RX_IPP_FIFO_DATA_HI_T0 0x4110 /* RX IPP FIFO data high
+ T0 */
+#define REG_RX_IPP_FIFO_DATA_HI_T1 0x4114 /* RX IPP FIFO data high
+ T1 */
+
+/* 64-bit pointer to receive data buffer in host memory used for headers and
+ * small packets. MSB in high register. loaded by DMA state machine and
+ * increments as DMA writes receive data. only 50 LSB are incremented. top
+ * 13 bits taken from RX descriptor.
+ * DEFAULT: undefined
+ */
+#define REG_RX_HEADER_PAGE_PTR_LOW 0x4118 /* (ro) RX header page ptr
+ low */
+#define REG_RX_HEADER_PAGE_PTR_HI 0x411C /* (ro) RX header page ptr
+ high */
+#define REG_RX_MTU_PAGE_PTR_LOW 0x4120 /* (ro) RX MTU page pointer
+ low */
+#define REG_RX_MTU_PAGE_PTR_HI 0x4124 /* (ro) RX MTU page pointer
+ high */
+
+/* PIO diagnostic access to RX reassembly DMA Table RAM. 6-bit register holds
+ * one of 64 79-bit locations in the RX Reassembly DMA table and the addr of
+ * one of the 64 byte locations in the Batching table. LOW holds 32 LSB.
+ * MID holds the next 32 LSB. HIGH holds the 15 MSB. RX_DMA_EN must be set
+ * to 0 for PIO access. DATA_HIGH should be last write of write sequence.
+ * layout:
+ * reassmbl ptr [78:15] | reassmbl index [14:1] | reassmbl entry valid [0]
+ * DEFAULT: undefined
+ */
+#define REG_RX_TABLE_ADDR 0x4128 /* RX reassembly DMA table
+ address */
+#define RX_TABLE_ADDR_MASK 0x0000003F /* address mask */
+
+#define REG_RX_TABLE_DATA_LOW 0x412C /* RX reassembly DMA table
+ data low */
+#define REG_RX_TABLE_DATA_MID 0x4130 /* RX reassembly DMA table
+ data mid */
+#define REG_RX_TABLE_DATA_HI 0x4134 /* RX reassembly DMA table
+ data high */
+
+/* cassini+ only */
+/* 8KB aligned 64-bit pointer to base of RX rings. lower 13 bits hardwired to
+ * 0. same semantics as primary desc/complete rings.
+ */
+#define REG_PLUS_RX_DB1_LOW 0x4200 /* RX descriptor ring
+ 2 base low */
+#define REG_PLUS_RX_DB1_HI 0x4204 /* RX descriptor ring
+ 2 base high */
+#define REG_PLUS_RX_CB1_LOW 0x4208 /* RX completion ring
+ 2 base low. 4 total */
+#define REG_PLUS_RX_CB1_HI 0x420C /* RX completion ring
+ 2 base high. 4 total */
+#define REG_PLUS_RX_CBN_LOW(x) (REG_PLUS_RX_CB1_LOW + 8*((x) - 1))
+#define REG_PLUS_RX_CBN_HI(x) (REG_PLUS_RX_CB1_HI + 8*((x) - 1))
+#define REG_PLUS_RX_KICK1 0x4220 /* RX Kick 2 register */
+#define REG_PLUS_RX_COMP1 0x4224 /* (ro) RX completion 2
+ reg */
+#define REG_PLUS_RX_COMP1_HEAD 0x4228 /* (ro) RX completion 2
+ head reg. 4 total. */
+#define REG_PLUS_RX_COMP1_TAIL 0x422C /* RX completion 2
+ tail reg. 4 total. */
+#define REG_PLUS_RX_COMPN_HEAD(x) (REG_PLUS_RX_COMP1_HEAD + 8*((x) - 1))
+#define REG_PLUS_RX_COMPN_TAIL(x) (REG_PLUS_RX_COMP1_TAIL + 8*((x) - 1))
+#define REG_PLUS_RX_AE1_THRESH 0x4240 /* RX almost empty 2
+ thresholds */
+#define RX_AE1_THRESH_FREE_MASK RX_AE_THRESH_FREE_MASK
+#define RX_AE1_THRESH_FREE_SHIFT RX_AE_THRESH_FREE_SHIFT
+
+/** header parser registers **/
+
+/* RX parser configuration register.
+ * DEFAULT: 0x1651004
+ */
+#define REG_HP_CFG 0x4140 /* header parser
+ configuration reg */
+#define HP_CFG_PARSE_EN 0x00000001 /* enab header parsing */
+#define HP_CFG_NUM_CPU_MASK 0x000000FC /* # processors
+ 0 = 64. 0x3f = 63 */
+#define HP_CFG_NUM_CPU_SHIFT 2
+#define HP_CFG_SYN_INC_MASK 0x00000100 /* SYN bit won't increment
+ TCP seq # by one when
+ stored in FDBM */
+#define HP_CFG_TCP_THRESH_MASK 0x000FFE00 /* # bytes of TCP data
+ needed to be considered
+ for reassembly */
+#define HP_CFG_TCP_THRESH_SHIFT 9
+
+/* access to RX Instruction RAM. 5-bit register/counter holds addr
+ * of 39 bit entry to be read/written. 32 LSB in _DATA_LOW. 7 MSB in _DATA_HI.
+ * RX_DMA_EN must be 0 for RX instr PIO access. DATA_HI should be last access
+ * of sequence.
+ * DEFAULT: undefined
+ */
+#define REG_HP_INSTR_RAM_ADDR 0x4144 /* HP instruction RAM
+ address */
+#define HP_INSTR_RAM_ADDR_MASK 0x01F /* 5-bit mask */
+#define REG_HP_INSTR_RAM_DATA_LOW 0x4148 /* HP instruction RAM
+ data low */
+#define HP_INSTR_RAM_LOW_OUTMASK_MASK 0x0000FFFF
+#define HP_INSTR_RAM_LOW_OUTMASK_SHIFT 0
+#define HP_INSTR_RAM_LOW_OUTSHIFT_MASK 0x000F0000
+#define HP_INSTR_RAM_LOW_OUTSHIFT_SHIFT 16
+#define HP_INSTR_RAM_LOW_OUTEN_MASK 0x00300000
+#define HP_INSTR_RAM_LOW_OUTEN_SHIFT 20
+#define HP_INSTR_RAM_LOW_OUTARG_MASK 0xFFC00000
+#define HP_INSTR_RAM_LOW_OUTARG_SHIFT 22
+#define REG_HP_INSTR_RAM_DATA_MID 0x414C /* HP instruction RAM
+ data mid */
+#define HP_INSTR_RAM_MID_OUTARG_MASK 0x00000003
+#define HP_INSTR_RAM_MID_OUTARG_SHIFT 0
+#define HP_INSTR_RAM_MID_OUTOP_MASK 0x0000003C
+#define HP_INSTR_RAM_MID_OUTOP_SHIFT 2
+#define HP_INSTR_RAM_MID_FNEXT_MASK 0x000007C0
+#define HP_INSTR_RAM_MID_FNEXT_SHIFT 6
+#define HP_INSTR_RAM_MID_FOFF_MASK 0x0003F800
+#define HP_INSTR_RAM_MID_FOFF_SHIFT 11
+#define HP_INSTR_RAM_MID_SNEXT_MASK 0x007C0000
+#define HP_INSTR_RAM_MID_SNEXT_SHIFT 18
+#define HP_INSTR_RAM_MID_SOFF_MASK 0x3F800000
+#define HP_INSTR_RAM_MID_SOFF_SHIFT 23
+#define HP_INSTR_RAM_MID_OP_MASK 0xC0000000
+#define HP_INSTR_RAM_MID_OP_SHIFT 30
+#define REG_HP_INSTR_RAM_DATA_HI 0x4150 /* HP instruction RAM
+ data high */
+#define HP_INSTR_RAM_HI_VAL_MASK 0x0000FFFF
+#define HP_INSTR_RAM_HI_VAL_SHIFT 0
+#define HP_INSTR_RAM_HI_MASK_MASK 0xFFFF0000
+#define HP_INSTR_RAM_HI_MASK_SHIFT 16
+
+/* PIO access into RX Header parser data RAM and flow database.
+ * 11-bit register. Data fills the LSB portion of bus if less than 32 bits.
+ * DATA_RAM: write RAM_FDB_DATA with index to access DATA_RAM.
+ * RAM bytes = 4*(x - 1) + [3:0]. e.g., 0 -> [3:0], 31 -> [123:120]
+ * FLOWDB: write DATA_RAM_FDB register and then read/write FDB1-12 to access
+ * flow database.
+ * RX_DMA_EN must be 0 for RX parser RAM PIO access. RX Parser RAM data reg
+ * should be the last write access of the write sequence.
+ * DEFAULT: undefined
+ */
+#define REG_HP_DATA_RAM_FDB_ADDR 0x4154 /* HP data and FDB
+ RAM address */
+#define HP_DATA_RAM_FDB_DATA_MASK 0x001F /* select 1 of 86 byte
+ locations in header
+ parser data ram to
+ read/write */
+#define HP_DATA_RAM_FDB_FDB_MASK 0x3F00 /* 1 of 64 353-bit locations
+ in the flow database */
+#define REG_HP_DATA_RAM_DATA 0x4158 /* HP data RAM data */
+
+/* HP flow database registers: 1 - 12, 0x415C - 0x4188, 4 8-bit bytes
+ * FLOW_DB(1) = IP_SA[127:96], FLOW_DB(2) = IP_SA[95:64]
+ * FLOW_DB(3) = IP_SA[63:32], FLOW_DB(4) = IP_SA[31:0]
+ * FLOW_DB(5) = IP_DA[127:96], FLOW_DB(6) = IP_DA[95:64]
+ * FLOW_DB(7) = IP_DA[63:32], FLOW_DB(8) = IP_DA[31:0]
+ * FLOW_DB(9) = {TCP_SP[15:0],TCP_DP[15:0]}
+ * FLOW_DB(10) = bit 0 has value for flow valid
+ * FLOW_DB(11) = TCP_SEQ[63:32], FLOW_DB(12) = TCP_SEQ[31:0]
+ */
+#define REG_HP_FLOW_DB0 0x415C /* HP flow database 1 reg */
+#define REG_HP_FLOW_DBN(x) (REG_HP_FLOW_DB0 + (x)*4)
+
+/* diagnostics for RX Header Parser block.
+ * ASUN: the header parser state machine register is used for diagnostics
+ * purposes. however, the spec doesn't have any details on it.
+ */
+#define REG_HP_STATE_MACHINE 0x418C /* (ro) HP state machine */
+#define REG_HP_STATUS0 0x4190 /* (ro) HP status 1 */
+#define HP_STATUS0_SAP_MASK 0xFFFF0000 /* SAP */
+#define HP_STATUS0_L3_OFF_MASK 0x0000FE00 /* L3 offset */
+#define HP_STATUS0_LB_CPUNUM_MASK 0x000001F8 /* load balancing CPU
+ number */
+#define HP_STATUS0_HRP_OPCODE_MASK 0x00000007 /* HRP opcode */
+
+#define REG_HP_STATUS1 0x4194 /* (ro) HP status 2 */
+#define HP_STATUS1_ACCUR2_MASK 0xE0000000 /* accu R2[6:4] */
+#define HP_STATUS1_FLOWID_MASK 0x1F800000 /* flow id */
+#define HP_STATUS1_TCP_OFF_MASK 0x007F0000 /* tcp payload offset */
+#define HP_STATUS1_TCP_SIZE_MASK 0x0000FFFF /* tcp payload size */
+
+#define REG_HP_STATUS2 0x4198 /* (ro) HP status 3 */
+#define HP_STATUS2_ACCUR2_MASK 0xF0000000 /* accu R2[3:0] */
+#define HP_STATUS2_CSUM_OFF_MASK 0x07F00000 /* checksum start
+ start offset */
+#define HP_STATUS2_ACCUR1_MASK 0x000FE000 /* accu R1 */
+#define HP_STATUS2_FORCE_DROP 0x00001000 /* force drop */
+#define HP_STATUS2_BWO_REASSM 0x00000800 /* batching w/o
+ reassembly */
+#define HP_STATUS2_JH_SPLIT_EN 0x00000400 /* jumbo header split
+ enable */
+#define HP_STATUS2_FORCE_TCP_NOCHECK 0x00000200 /* force tcp no payload
+ check */
+#define HP_STATUS2_DATA_MASK_ZERO 0x00000100 /* mask of data length
+ equal to zero */
+#define HP_STATUS2_FORCE_TCP_CHECK 0x00000080 /* force tcp payload
+ chk */
+#define HP_STATUS2_MASK_TCP_THRESH 0x00000040 /* mask of payload
+ threshold */
+#define HP_STATUS2_NO_ASSIST 0x00000020 /* no assist */
+#define HP_STATUS2_CTRL_PACKET_FLAG 0x00000010 /* control packet flag */
+#define HP_STATUS2_TCP_FLAG_CHECK 0x00000008 /* tcp flag check */
+#define HP_STATUS2_SYN_FLAG 0x00000004 /* syn flag */
+#define HP_STATUS2_TCP_CHECK 0x00000002 /* tcp payload chk */
+#define HP_STATUS2_TCP_NOCHECK 0x00000001 /* tcp no payload chk */
+
+/* BIST for header parser(HP) and flow database memories (FDBM). set _START
+ * to start BIST. controller clears _START on completion. _START can also
+ * be cleared to force termination of BIST. a bit set indicates that that
+ * memory passed its BIST.
+ */
+#define REG_HP_RAM_BIST 0x419C /* HP RAM BIST reg */
+#define HP_RAM_BIST_HP_DATA_PASS 0x80000000 /* HP data ram */
+#define HP_RAM_BIST_HP_INSTR0_PASS 0x40000000 /* HP instr ram 0 */
+#define HP_RAM_BIST_HP_INSTR1_PASS 0x20000000 /* HP instr ram 1 */
+#define HP_RAM_BIST_HP_INSTR2_PASS 0x10000000 /* HP instr ram 2 */
+#define HP_RAM_BIST_FDBM_AGE0_PASS 0x08000000 /* FDBM aging RAM0 */
+#define HP_RAM_BIST_FDBM_AGE1_PASS 0x04000000 /* FDBM aging RAM1 */
+#define HP_RAM_BIST_FDBM_FLOWID00_PASS 0x02000000 /* FDBM flowid RAM0
+ bank 0 */
+#define HP_RAM_BIST_FDBM_FLOWID10_PASS 0x01000000 /* FDBM flowid RAM1
+ bank 0 */
+#define HP_RAM_BIST_FDBM_FLOWID20_PASS 0x00800000 /* FDBM flowid RAM2
+ bank 0 */
+#define HP_RAM_BIST_FDBM_FLOWID30_PASS 0x00400000 /* FDBM flowid RAM3
+ bank 0 */
+#define HP_RAM_BIST_FDBM_FLOWID01_PASS 0x00200000 /* FDBM flowid RAM0
+ bank 1 */
+#define HP_RAM_BIST_FDBM_FLOWID11_PASS 0x00100000 /* FDBM flowid RAM1
+ bank 2 */
+#define HP_RAM_BIST_FDBM_FLOWID21_PASS 0x00080000 /* FDBM flowid RAM2
+ bank 1 */
+#define HP_RAM_BIST_FDBM_FLOWID31_PASS 0x00040000 /* FDBM flowid RAM3
+ bank 1 */
+#define HP_RAM_BIST_FDBM_TCPSEQ_PASS 0x00020000 /* FDBM tcp sequence
+ RAM */
+#define HP_RAM_BIST_SUMMARY 0x00000002 /* all BIST tests */
+#define HP_RAM_BIST_START 0x00000001 /* start/stop BIST */
+
+
+/** MAC registers. **/
+/* reset bits are set using a PIO write and self-cleared after the command
+ * execution has completed.
+ */
+#define REG_MAC_TX_RESET 0x6000 /* TX MAC software reset
+ command (default: 0x0) */
+#define REG_MAC_RX_RESET 0x6004 /* RX MAC software reset
+ command (default: 0x0) */
+/* execute a pause flow control frame transmission
+ DEFAULT: 0x0XXXX */
+#define REG_MAC_SEND_PAUSE 0x6008 /* send pause command reg */
+#define MAC_SEND_PAUSE_TIME_MASK 0x0000FFFF /* value of pause time
+ to be sent on network
+ in units of slot
+ times */
+#define MAC_SEND_PAUSE_SEND 0x00010000 /* send pause flow ctrl
+ frame on network */
+
+/* bit set indicates that event occurred. auto-cleared when status register
+ * is read and have corresponding mask bits in mask register. events will
+ * trigger an interrupt if the corresponding mask bit is 0.
+ * status register default: 0x00000000
+ * mask register default = 0xFFFFFFFF on reset
+ */
+#define REG_MAC_TX_STATUS 0x6010 /* TX MAC status reg */
+#define MAC_TX_FRAME_XMIT 0x0001 /* successful frame
+ transmision */
+#define MAC_TX_UNDERRUN 0x0002 /* terminated frame
+ transmission due to
+ data starvation in the
+ xmit data path */
+#define MAC_TX_MAX_PACKET_ERR 0x0004 /* frame exceeds max allowed
+ length passed to TX MAC
+ by the DMA engine */
+#define MAC_TX_COLL_NORMAL 0x0008 /* rollover of the normal
+ collision counter */
+#define MAC_TX_COLL_EXCESS 0x0010 /* rollover of the excessive
+ collision counter */
+#define MAC_TX_COLL_LATE 0x0020 /* rollover of the late
+ collision counter */
+#define MAC_TX_COLL_FIRST 0x0040 /* rollover of the first
+ collision counter */
+#define MAC_TX_DEFER_TIMER 0x0080 /* rollover of the defer
+ timer */
+#define MAC_TX_PEAK_ATTEMPTS 0x0100 /* rollover of the peak
+ attempts counter */
+
+#define REG_MAC_RX_STATUS 0x6014 /* RX MAC status reg */
+#define MAC_RX_FRAME_RECV 0x0001 /* successful receipt of
+ a frame */
+#define MAC_RX_OVERFLOW 0x0002 /* dropped frame due to
+ RX FIFO overflow */
+#define MAC_RX_FRAME_COUNT 0x0004 /* rollover of receive frame
+ counter */
+#define MAC_RX_ALIGN_ERR 0x0008 /* rollover of alignment
+ error counter */
+#define MAC_RX_CRC_ERR 0x0010 /* rollover of crc error
+ counter */
+#define MAC_RX_LEN_ERR 0x0020 /* rollover of length
+ error counter */
+#define MAC_RX_VIOL_ERR 0x0040 /* rollover of code
+ violation error */
+
+/* DEFAULT: 0xXXXX0000 on reset */
+#define REG_MAC_CTRL_STATUS 0x6018 /* MAC control status reg */
+#define MAC_CTRL_PAUSE_RECEIVED 0x00000001 /* successful
+ reception of a
+ pause control
+ frame */
+#define MAC_CTRL_PAUSE_STATE 0x00000002 /* MAC has made a
+ transition from
+ "not paused" to
+ "paused" */
+#define MAC_CTRL_NOPAUSE_STATE 0x00000004 /* MAC has made a
+ transition from
+ "paused" to "not
+ paused" */
+#define MAC_CTRL_PAUSE_TIME_MASK 0xFFFF0000 /* value of pause time
+ operand that was
+ received in the last
+ pause flow control
+ frame */
+
+/* layout identical to TX MAC[8:0] */
+#define REG_MAC_TX_MASK 0x6020 /* TX MAC mask reg */
+/* layout identical to RX MAC[6:0] */
+#define REG_MAC_RX_MASK 0x6024 /* RX MAC mask reg */
+/* layout identical to CTRL MAC[2:0] */
+#define REG_MAC_CTRL_MASK 0x6028 /* MAC control mask reg */
+
+/* to ensure proper operation, CFG_EN must be cleared to 0 and a delay
+ * imposed before writes to other bits in the TX_MAC_CFG register or any of
+ * the MAC parameters is performed. delay dependent upon time required to
+ * transmit a maximum size frame (= MAC_FRAMESIZE_MAX*8/Mbps). e.g.,
+ * the delay for a 1518-byte frame on a 100Mbps network is 125us.
+ * alternatively, just poll TX_CFG_EN until it reads back as 0.
+ * NOTE: on half-duplex 1Gbps, TX_CFG_CARRIER_EXTEND and
+ * RX_CFG_CARRIER_EXTEND should be set and the SLOT_TIME register should
+ * be 0x200 (slot time of 512 bytes)
+ */
+#define REG_MAC_TX_CFG 0x6030 /* TX MAC config reg */
+#define MAC_TX_CFG_EN 0x0001 /* enable TX MAC. 0 will
+ force TXMAC state
+ machine to remain in
+ idle state or to
+ transition to idle state
+ on completion of an
+ ongoing packet. */
+#define MAC_TX_CFG_IGNORE_CARRIER 0x0002 /* disable CSMA/CD deferral
+ process. set to 1 when
+ full duplex and 0 when
+ half duplex */
+#define MAC_TX_CFG_IGNORE_COLL 0x0004 /* disable CSMA/CD backoff
+ algorithm. set to 1 when
+ full duplex and 0 when
+ half duplex */
+#define MAC_TX_CFG_IPG_EN 0x0008 /* enable extension of the
+ Rx-to-TX IPG. after
+ receiving a frame, TX
+ MAC will reset its
+ deferral process to
+ carrier sense for the
+ amount of time = IPG0 +
+ IPG1 and commit to
+ transmission for time
+ specified in IPG2. when
+ 0 or when xmitting frames
+ back-to-pack (Tx-to-Tx
+ IPG), TX MAC ignores
+ IPG0 and will only use
+ IPG1 for deferral time.
+ IPG2 still used. */
+#define MAC_TX_CFG_NEVER_GIVE_UP_EN 0x0010 /* TX MAC will not easily
+ give up on frame
+ xmission. if backoff
+ algorithm reaches the
+ ATTEMPT_LIMIT, it will
+ clear attempts counter
+ and continue trying to
+ send the frame as
+ specified by
+ GIVE_UP_LIM. when 0,
+ TX MAC will execute
+ standard CSMA/CD prot. */
+#define MAC_TX_CFG_NEVER_GIVE_UP_LIM 0x0020 /* when set, TX MAC will
+ continue to try to xmit
+ until successful. when
+ 0, TX MAC will continue
+ to try xmitting until
+ successful or backoff
+ algorithm reaches
+ ATTEMPT_LIMIT*16 */
+#define MAC_TX_CFG_NO_BACKOFF 0x0040 /* modify CSMA/CD to disable
+ backoff algorithm. TX
+ MAC will not back off
+ after a xmission attempt
+ that resulted in a
+ collision. */
+#define MAC_TX_CFG_SLOW_DOWN 0x0080 /* modify CSMA/CD so that
+ deferral process is reset
+ in response to carrier
+ sense during the entire
+ duration of IPG. TX MAC
+ will only commit to frame
+ xmission after frame
+ xmission has actually
+ begun. */
+#define MAC_TX_CFG_NO_FCS 0x0100 /* TX MAC will not generate
+ CRC for all xmitted
+ packets. when clear, CRC
+ generation is dependent
+ upon NO_CRC bit in the
+ xmit control word from
+ TX DMA */
+#define MAC_TX_CFG_CARRIER_EXTEND 0x0200 /* enables xmit part of the
+ carrier extension
+ feature. this allows for
+ longer collision domains
+ by extending the carrier
+ and collision window
+ from the end of FCS until
+ the end of the slot time
+ if necessary. Required
+ for half-duplex at 1Gbps,
+ clear otherwise. */
+
+/* when CRC is not stripped, reassembly packets will not contain the CRC.
+ * these will be stripped by HRP because it reassembles layer 4 data, and the
+ * CRC is layer 2. however, non-reassembly packets will still contain the CRC
+ * when passed to the host. to ensure proper operation, need to wait 3.2ms
+ * after clearing RX_CFG_EN before writing to any other RX MAC registers
+ * or other MAC parameters. alternatively, poll RX_CFG_EN until it clears
+ * to 0. similary, HASH_FILTER_EN and ADDR_FILTER_EN have the same
+ * restrictions as CFG_EN.
+ */
+#define REG_MAC_RX_CFG 0x6034 /* RX MAC config reg */
+#define MAC_RX_CFG_EN 0x0001 /* enable RX MAC */
+#define MAC_RX_CFG_STRIP_PAD 0x0002 /* always program to 0.
+ feature not supported */
+#define MAC_RX_CFG_STRIP_FCS 0x0004 /* RX MAC will strip the
+ last 4 bytes of a
+ received frame. */
+#define MAC_RX_CFG_PROMISC_EN 0x0008 /* promiscuous mode */
+#define MAC_RX_CFG_PROMISC_GROUP_EN 0x0010 /* accept all valid
+ multicast frames (group
+ bit in DA field set) */
+#define MAC_RX_CFG_HASH_FILTER_EN 0x0020 /* use hash table to filter
+ multicast addresses */
+#define MAC_RX_CFG_ADDR_FILTER_EN 0x0040 /* cause RX MAC to use
+ address filtering regs
+ to filter both unicast
+ and multicast
+ addresses */
+#define MAC_RX_CFG_DISABLE_DISCARD 0x0080 /* pass errored frames to
+ RX DMA by setting BAD
+ bit but not Abort bit
+ in the status. CRC,
+ framing, and length errs
+ will not increment
+ error counters. frames
+ which don't match dest
+ addr will be passed up
+ w/ BAD bit set. */
+#define MAC_RX_CFG_CARRIER_EXTEND 0x0100 /* enable reception of
+ packet bursts generated
+ by carrier extension
+ with packet bursting
+ senders. only applies
+ to half-duplex 1Gbps */
+
+/* DEFAULT: 0x0 */
+#define REG_MAC_CTRL_CFG 0x6038 /* MAC control config reg */
+#define MAC_CTRL_CFG_SEND_PAUSE_EN 0x0001 /* respond to requests for
+ sending pause flow ctrl
+ frames */
+#define MAC_CTRL_CFG_RECV_PAUSE_EN 0x0002 /* respond to received
+ pause flow ctrl frames */
+#define MAC_CTRL_CFG_PASS_CTRL 0x0004 /* pass valid MAC ctrl
+ packets to RX DMA */
+
+/* to ensure proper operation, a global initialization sequence should be
+ * performed when a loopback config is entered or exited. if programmed after
+ * a hw or global sw reset, RX/TX MAC software reset and initialization
+ * should be done to ensure stable clocking.
+ * DEFAULT: 0x0
+ */
+#define REG_MAC_XIF_CFG 0x603C /* XIF config reg */
+#define MAC_XIF_TX_MII_OUTPUT_EN 0x0001 /* enable output drivers
+ on MII xmit bus */
+#define MAC_XIF_MII_INT_LOOPBACK 0x0002 /* loopback GMII xmit data
+ path to GMII recv data
+ path. phy mode register
+ clock selection must be
+ set to GMII mode and
+ GMII_MODE should be set
+ to 1. in loopback mode,
+ REFCLK will drive the
+ entire mac core. 0 for
+ normal operation. */
+#define MAC_XIF_DISABLE_ECHO 0x0004 /* disables receive data
+ path during packet
+ xmission. clear to 0
+ in any full duplex mode,
+ in any loopback mode,
+ or in half-duplex SERDES
+ or SLINK modes. set when
+ in half-duplex when
+ using external phy. */
+#define MAC_XIF_GMII_MODE 0x0008 /* MAC operates with GMII
+ clocks and datapath */
+#define MAC_XIF_MII_BUFFER_OUTPUT_EN 0x0010 /* MII_BUF_EN pin. enable
+ external tristate buffer
+ on the MII receive
+ bus. */
+#define MAC_XIF_LINK_LED 0x0020 /* LINKLED# active (low) */
+#define MAC_XIF_FDPLX_LED 0x0040 /* FDPLXLED# active (low) */
+
+#define REG_MAC_IPG0 0x6040 /* inter-packet gap0 reg.
+ recommended: 0x00 */
+#define REG_MAC_IPG1 0x6044 /* inter-packet gap1 reg
+ recommended: 0x08 */
+#define REG_MAC_IPG2 0x6048 /* inter-packet gap2 reg
+ recommended: 0x04 */
+#define REG_MAC_SLOT_TIME 0x604C /* slot time reg
+ recommended: 0x40 */
+#define REG_MAC_FRAMESIZE_MIN 0x6050 /* min frame size reg
+ recommended: 0x40 */
+
+/* FRAMESIZE_MAX holds both the max frame size as well as the max burst size.
+ * recommended value: 0x2000.05EE
+ */
+#define REG_MAC_FRAMESIZE_MAX 0x6054 /* max frame size reg */
+#define MAC_FRAMESIZE_MAX_BURST_MASK 0x3FFF0000 /* max burst size */
+#define MAC_FRAMESIZE_MAX_BURST_SHIFT 16
+#define MAC_FRAMESIZE_MAX_FRAME_MASK 0x00007FFF /* max frame size */
+#define MAC_FRAMESIZE_MAX_FRAME_SHIFT 0
+#define REG_MAC_PA_SIZE 0x6058 /* PA size reg. number of
+ preamble bytes that the
+ TX MAC will xmit at the
+ beginning of each frame
+ value should be 2 or
+ greater. recommended
+ value: 0x07 */
+#define REG_MAC_JAM_SIZE 0x605C /* jam size reg. duration
+ of jam in units of media
+ byte time. recommended
+ value: 0x04 */
+#define REG_MAC_ATTEMPT_LIMIT 0x6060 /* attempt limit reg. #
+ of attempts TX MAC will
+ make to xmit a frame
+ before it resets its
+ attempts counter. after
+ the limit has been
+ reached, TX MAC may or
+ may not drop the frame
+ dependent upon value
+ in TX_MAC_CFG.
+ recommended
+ value: 0x10 */
+#define REG_MAC_CTRL_TYPE 0x6064 /* MAC control type reg.
+ type field of a MAC
+ ctrl frame. recommended
+ value: 0x8808 */
+
+/* mac address registers: 0 - 44, 0x6080 - 0x6130, 4 8-bit bytes.
+ * register contains comparison
+ * 0 16 MSB of primary MAC addr [47:32] of DA field
+ * 1 16 middle bits "" [31:16] of DA field
+ * 2 16 LSB "" [15:0] of DA field
+ * 3*x 16MSB of alt MAC addr 1-15 [47:32] of DA field
+ * 4*x 16 middle bits "" [31:16]
+ * 5*x 16 LSB "" [15:0]
+ * 42 16 MSB of MAC CTRL addr [47:32] of DA.
+ * 43 16 middle bits "" [31:16]
+ * 44 16 LSB "" [15:0]
+ * MAC CTRL addr must be the reserved multicast addr for MAC CTRL frames.
+ * if there is a match, MAC will set the bit for alternative address
+ * filter pass [15]
+
+ * here is the map of registers given MAC address notation: a:b:c:d:e:f
+ * ab cd ef
+ * primary addr reg 2 reg 1 reg 0
+ * alt addr 1 reg 5 reg 4 reg 3
+ * alt addr x reg 5*x reg 4*x reg 3*x
+ * ctrl addr reg 44 reg 43 reg 42
+ */
+#define REG_MAC_ADDR0 0x6080 /* MAC address 0 reg */
+#define REG_MAC_ADDRN(x) (REG_MAC_ADDR0 + (x)*4)
+#define REG_MAC_ADDR_FILTER0 0x614C /* address filter 0 reg
+ [47:32] */
+#define REG_MAC_ADDR_FILTER1 0x6150 /* address filter 1 reg
+ [31:16] */
+#define REG_MAC_ADDR_FILTER2 0x6154 /* address filter 2 reg
+ [15:0] */
+#define REG_MAC_ADDR_FILTER2_1_MASK 0x6158 /* address filter 2 and 1
+ mask reg. 8-bit reg
+ contains nibble mask for
+ reg 2 and 1. */
+#define REG_MAC_ADDR_FILTER0_MASK 0x615C /* address filter 0 mask
+ reg */
+
+/* hash table registers: 0 - 15, 0x6160 - 0x619C, 4 8-bit bytes
+ * 16-bit registers contain bits of the hash table.
+ * reg x -> [16*(15 - x) + 15 : 16*(15 - x)].
+ * e.g., 15 -> [15:0], 0 -> [255:240]
+ */
+#define REG_MAC_HASH_TABLE0 0x6160 /* hash table 0 reg */
+#define REG_MAC_HASH_TABLEN(x) (REG_MAC_HASH_TABLE0 + (x)*4)
+
+/* statistics registers. these registers generate an interrupt on
+ * overflow. recommended initialization: 0x0000. most are 16-bits except
+ * for PEAK_ATTEMPTS register which is 8 bits.
+ */
+#define REG_MAC_COLL_NORMAL 0x61A0 /* normal collision
+ counter. */
+#define REG_MAC_COLL_FIRST 0x61A4 /* first attempt
+ successful collision
+ counter */
+#define REG_MAC_COLL_EXCESS 0x61A8 /* excessive collision
+ counter */
+#define REG_MAC_COLL_LATE 0x61AC /* late collision counter */
+#define REG_MAC_TIMER_DEFER 0x61B0 /* defer timer. time base
+ is the media byte
+ clock/256 */
+#define REG_MAC_ATTEMPTS_PEAK 0x61B4 /* peak attempts reg */
+#define REG_MAC_RECV_FRAME 0x61B8 /* receive frame counter */
+#define REG_MAC_LEN_ERR 0x61BC /* length error counter */
+#define REG_MAC_ALIGN_ERR 0x61C0 /* alignment error counter */
+#define REG_MAC_FCS_ERR 0x61C4 /* FCS error counter */
+#define REG_MAC_RX_CODE_ERR 0x61C8 /* RX code violation
+ error counter */
+
+/* misc registers */
+#define REG_MAC_RANDOM_SEED 0x61CC /* random number seed reg.
+ 10-bit register used as a
+ seed for the random number
+ generator for the CSMA/CD
+ backoff algorithm. only
+ programmed after power-on
+ reset and should be a
+ random value which has a
+ high likelihood of being
+ unique for each MAC
+ attached to a network
+ segment (e.g., 10 LSB of
+ MAC address) */
+
+/* ASUN: there's a PAUSE_TIMER (ro) described, but it's not in the address
+ * map
+ */
+
+/* 27-bit register has the current state for key state machines in the MAC */
+#define REG_MAC_STATE_MACHINE 0x61D0 /* (ro) state machine reg */
+#define MAC_SM_RLM_MASK 0x07800000
+#define MAC_SM_RLM_SHIFT 23
+#define MAC_SM_RX_FC_MASK 0x00700000
+#define MAC_SM_RX_FC_SHIFT 20
+#define MAC_SM_TLM_MASK 0x000F0000
+#define MAC_SM_TLM_SHIFT 16
+#define MAC_SM_ENCAP_SM_MASK 0x0000F000
+#define MAC_SM_ENCAP_SM_SHIFT 12
+#define MAC_SM_TX_REQ_MASK 0x00000C00
+#define MAC_SM_TX_REQ_SHIFT 10
+#define MAC_SM_TX_FC_MASK 0x000003C0
+#define MAC_SM_TX_FC_SHIFT 6
+#define MAC_SM_FIFO_WRITE_SEL_MASK 0x00000038
+#define MAC_SM_FIFO_WRITE_SEL_SHIFT 3
+#define MAC_SM_TX_FIFO_EMPTY_MASK 0x00000007
+#define MAC_SM_TX_FIFO_EMPTY_SHIFT 0
+
+/** MIF registers. the MIF can be programmed in either bit-bang or
+ * frame mode.
+ **/
+#define REG_MIF_BIT_BANG_CLOCK 0x6200 /* MIF bit-bang clock.
+ 1 -> 0 will generate a
+ rising edge. 0 -> 1 will
+ generate a falling edge. */
+#define REG_MIF_BIT_BANG_DATA 0x6204 /* MIF bit-bang data. 1-bit
+ register generates data */
+#define REG_MIF_BIT_BANG_OUTPUT_EN 0x6208 /* MIF bit-bang output
+ enable. enable when
+ xmitting data from MIF to
+ transceiver. */
+
+/* 32-bit register serves as an instruction register when the MIF is
+ * programmed in frame mode. load this register w/ a valid instruction
+ * (as per IEEE 802.3u MII spec). poll this register to check for instruction
+ * execution completion. during a read operation, this register will also
+ * contain the 16-bit data returned by the tranceiver. unless specified
+ * otherwise, fields are considered "don't care" when polling for
+ * completion.
+ */
+#define REG_MIF_FRAME 0x620C /* MIF frame/output reg */
+#define MIF_FRAME_START_MASK 0xC0000000 /* start of frame.
+ load w/ 01 when
+ issuing an instr */
+#define MIF_FRAME_ST 0x40000000 /* STart of frame */
+#define MIF_FRAME_OPCODE_MASK 0x30000000 /* opcode. 01 for a
+ write. 10 for a
+ read */
+#define MIF_FRAME_OP_READ 0x20000000 /* read OPcode */
+#define MIF_FRAME_OP_WRITE 0x10000000 /* write OPcode */
+#define MIF_FRAME_PHY_ADDR_MASK 0x0F800000 /* phy address. when
+ issuing an instr,
+ this field should be
+ loaded w/ the XCVR
+ addr */
+#define MIF_FRAME_PHY_ADDR_SHIFT 23
+#define MIF_FRAME_REG_ADDR_MASK 0x007C0000 /* register address.
+ when issuing an instr,
+ addr of register
+ to be read/written */
+#define MIF_FRAME_REG_ADDR_SHIFT 18
+#define MIF_FRAME_TURN_AROUND_MSB 0x00020000 /* turn around, MSB.
+ when issuing an instr,
+ set this bit to 1 */
+#define MIF_FRAME_TURN_AROUND_LSB 0x00010000 /* turn around, LSB.
+ when issuing an instr,
+ set this bit to 0.
+ when polling for
+ completion, 1 means
+ that instr execution
+ has been completed */
+#define MIF_FRAME_DATA_MASK 0x0000FFFF /* instruction payload
+ load with 16-bit data
+ to be written in
+ transceiver reg for a
+ write. doesn't matter
+ in a read. when
+ polling for
+ completion, field is
+ "don't care" for write
+ and 16-bit data
+ returned by the
+ transceiver for a
+ read (if valid bit
+ is set) */
+#define REG_MIF_CFG 0x6210 /* MIF config reg */
+#define MIF_CFG_PHY_SELECT 0x0001 /* 1 -> select MDIO_1
+ 0 -> select MDIO_0 */
+#define MIF_CFG_POLL_EN 0x0002 /* enable polling
+ mechanism. if set,
+ BB_MODE should be 0 */
+#define MIF_CFG_BB_MODE 0x0004 /* 1 -> bit-bang mode
+ 0 -> frame mode */
+#define MIF_CFG_POLL_REG_MASK 0x00F8 /* register address to be
+ used by polling mode.
+ only meaningful if POLL_EN
+ is set to 1 */
+#define MIF_CFG_POLL_REG_SHIFT 3
+#define MIF_CFG_MDIO_0 0x0100 /* (ro) dual purpose.
+ when MDIO_0 is idle,
+ 1 -> tranceiver is
+ connected to MDIO_0.
+ when MIF is communicating
+ w/ MDIO_0 in bit-bang
+ mode, this bit indicates
+ the incoming bit stream
+ during a read op */
+#define MIF_CFG_MDIO_1 0x0200 /* (ro) dual purpose.
+ when MDIO_1 is idle,
+ 1 -> transceiver is
+ connected to MDIO_1.
+ when MIF is communicating
+ w/ MDIO_1 in bit-bang
+ mode, this bit indicates
+ the incoming bit stream
+ during a read op */
+#define MIF_CFG_POLL_PHY_MASK 0x7C00 /* tranceiver address to
+ be polled */
+#define MIF_CFG_POLL_PHY_SHIFT 10
+
+/* 16-bit register used to determine which bits in the POLL_STATUS portion of
+ * the MIF_STATUS register will cause an interrupt. if a mask bit is 0,
+ * corresponding bit of the POLL_STATUS will generate a MIF interrupt when
+ * set. DEFAULT: 0xFFFF
+ */
+#define REG_MIF_MASK 0x6214 /* MIF mask reg */
+
+/* 32-bit register used when in poll mode. auto-cleared after being read */
+#define REG_MIF_STATUS 0x6218 /* MIF status reg */
+#define MIF_STATUS_POLL_DATA_MASK 0xFFFF0000 /* poll data contains
+ the "latest image"
+ update of the XCVR
+ reg being read */
+#define MIF_STATUS_POLL_DATA_SHIFT 16
+#define MIF_STATUS_POLL_STATUS_MASK 0x0000FFFF /* poll status indicates
+ which bits in the
+ POLL_DATA field have
+ changed since the
+ MIF_STATUS reg was
+ last read */
+#define MIF_STATUS_POLL_STATUS_SHIFT 0
+
+/* 7-bit register has current state for all state machines in the MIF */
+#define REG_MIF_STATE_MACHINE 0x621C /* MIF state machine reg */
+#define MIF_SM_CONTROL_MASK 0x07 /* control state machine
+ state */
+#define MIF_SM_EXECUTION_MASK 0x60 /* execution state machine
+ state */
+
+/** PCS/Serialink. the following registers are equivalent to the standard
+ * MII management registers except that they're directly mapped in
+ * Cassini's register space.
+ **/
+
+/* the auto-negotiation enable bit should be programmed the same at
+ * the link partner as in the local device to enable auto-negotiation to
+ * complete. when that bit is reprogrammed, auto-neg/manual config is
+ * restarted automatically.
+ * DEFAULT: 0x1040
+ */
+#define REG_PCS_MII_CTRL 0x9000 /* PCS MII control reg */
+#define PCS_MII_CTRL_1000_SEL 0x0040 /* reads 1. ignored on
+ writes */
+#define PCS_MII_CTRL_COLLISION_TEST 0x0080 /* COL signal at the PCS
+ to MAC interface is
+ activated regardless
+ of activity */
+#define PCS_MII_CTRL_DUPLEX 0x0100 /* forced 0x0. PCS
+ behaviour same for
+ half and full dplx */
+#define PCS_MII_RESTART_AUTONEG 0x0200 /* self clearing.
+ restart auto-
+ negotiation */
+#define PCS_MII_ISOLATE 0x0400 /* read as 0. ignored
+ on writes */
+#define PCS_MII_POWER_DOWN 0x0800 /* read as 0. ignored
+ on writes */
+#define PCS_MII_AUTONEG_EN 0x1000 /* default 1. PCS goes
+ through automatic
+ link config before it
+ can be used. when 0,
+ link can be used
+ w/out any link config
+ phase */
+#define PCS_MII_10_100_SEL 0x2000 /* read as 0. ignored on
+ writes */
+#define PCS_MII_RESET 0x8000 /* reset PCS. self-clears
+ when done */
+
+/* DEFAULT: 0x0108 */
+#define REG_PCS_MII_STATUS 0x9004 /* PCS MII status reg */
+#define PCS_MII_STATUS_EXTEND_CAP 0x0001 /* reads 0 */
+#define PCS_MII_STATUS_JABBER_DETECT 0x0002 /* reads 0 */
+#define PCS_MII_STATUS_LINK_STATUS 0x0004 /* 1 -> link up.
+ 0 -> link down. 0 is
+ latched so that 0 is
+ kept until read. read
+ 2x to determine if the
+ link has gone up again */
+#define PCS_MII_STATUS_AUTONEG_ABLE 0x0008 /* reads 1 (able to perform
+ auto-neg) */
+#define PCS_MII_STATUS_REMOTE_FAULT 0x0010 /* 1 -> remote fault detected
+ from received link code
+ word. only valid after
+ auto-neg completed */
+#define PCS_MII_STATUS_AUTONEG_COMP 0x0020 /* 1 -> auto-negotiation
+ completed
+ 0 -> auto-negotiation not
+ completed */
+#define PCS_MII_STATUS_EXTEND_STATUS 0x0100 /* reads as 1. used as an
+ indication that this is
+ a 1000 Base-X PHY. writes
+ to it are ignored */
+
+/* used during auto-negotiation.
+ * DEFAULT: 0x00E0
+ */
+#define REG_PCS_MII_ADVERT 0x9008 /* PCS MII advertisement
+ reg */
+#define PCS_MII_ADVERT_FD 0x0020 /* advertise full duplex
+ 1000 Base-X */
+#define PCS_MII_ADVERT_HD 0x0040 /* advertise half-duplex
+ 1000 Base-X */
+#define PCS_MII_ADVERT_SYM_PAUSE 0x0080 /* advertise PAUSE
+ symmetric capability */
+#define PCS_MII_ADVERT_ASYM_PAUSE 0x0100 /* advertises PAUSE
+ asymmetric capability */
+#define PCS_MII_ADVERT_RF_MASK 0x3000 /* remote fault. write bit13
+ to optionally indicate to
+ link partner that chip is
+ going off-line. bit12 will
+ get set when signal
+ detect == FAIL and will
+ remain set until
+ successful negotiation */
+#define PCS_MII_ADVERT_ACK 0x4000 /* (ro) */
+#define PCS_MII_ADVERT_NEXT_PAGE 0x8000 /* (ro) forced 0x0 */
+
+/* contents updated as a result of autonegotiation. layout and definitions
+ * identical to PCS_MII_ADVERT
+ */
+#define REG_PCS_MII_LPA 0x900C /* PCS MII link partner
+ ability reg */
+#define PCS_MII_LPA_FD PCS_MII_ADVERT_FD
+#define PCS_MII_LPA_HD PCS_MII_ADVERT_HD
+#define PCS_MII_LPA_SYM_PAUSE PCS_MII_ADVERT_SYM_PAUSE
+#define PCS_MII_LPA_ASYM_PAUSE PCS_MII_ADVERT_ASYM_PAUSE
+#define PCS_MII_LPA_RF_MASK PCS_MII_ADVERT_RF_MASK
+#define PCS_MII_LPA_ACK PCS_MII_ADVERT_ACK
+#define PCS_MII_LPA_NEXT_PAGE PCS_MII_ADVERT_NEXT_PAGE
+
+/* DEFAULT: 0x0 */
+#define REG_PCS_CFG 0x9010 /* PCS config reg */
+#define PCS_CFG_EN 0x01 /* enable PCS. must be
+ 0 when modifying
+ PCS_MII_ADVERT */
+#define PCS_CFG_SD_OVERRIDE 0x02 /* sets signal detect to
+ OK. bit is
+ non-resettable */
+#define PCS_CFG_SD_ACTIVE_LOW 0x04 /* changes interpretation
+ of optical signal to make
+ signal detect okay when
+ signal is low */
+#define PCS_CFG_JITTER_STUDY_MASK 0x18 /* used to make jitter
+ measurements. a single
+ code group is xmitted
+ regularly.
+ 0x0 = normal operation
+ 0x1 = high freq test
+ pattern, D21.5
+ 0x2 = low freq test
+ pattern, K28.7
+ 0x3 = reserved */
+#define PCS_CFG_10MS_TIMER_OVERRIDE 0x20 /* shortens 10-20ms auto-
+ negotiation timer to
+ a few cycles for test
+ purposes */
+
+/* used for diagnostic purposes. bits 20-22 autoclear on read */
+#define REG_PCS_STATE_MACHINE 0x9014 /* (ro) PCS state machine
+ and diagnostic reg */
+#define PCS_SM_TX_STATE_MASK 0x0000000F /* 0 and 1 indicate
+ xmission of idle.
+ otherwise, xmission of
+ a packet */
+#define PCS_SM_RX_STATE_MASK 0x000000F0 /* 0 indicates reception
+ of idle. otherwise,
+ reception of packet */
+#define PCS_SM_WORD_SYNC_STATE_MASK 0x00000700 /* 0 indicates loss of
+ sync */
+#define PCS_SM_SEQ_DETECT_STATE_MASK 0x00001800 /* cycling through 0-3
+ indicates reception of
+ Config codes. cycling
+ through 0-1 indicates
+ reception of idles */
+#define PCS_SM_LINK_STATE_MASK 0x0001E000
+#define SM_LINK_STATE_UP 0x00016000 /* link state is up */
+
+#define PCS_SM_LOSS_LINK_C 0x00100000 /* loss of link due to
+ recept of Config
+ codes */
+#define PCS_SM_LOSS_LINK_SYNC 0x00200000 /* loss of link due to
+ loss of sync */
+#define PCS_SM_LOSS_SIGNAL_DETECT 0x00400000 /* signal detect goes
+ from OK to FAIL. bit29
+ will also be set if
+ this is set */
+#define PCS_SM_NO_LINK_BREAKLINK 0x01000000 /* link not up due to
+ receipt of breaklink
+ C codes from partner.
+ C codes w/ 0 content
+ received triggering
+ start/restart of
+ autonegotiation.
+ should be sent for
+ no longer than 20ms */
+#define PCS_SM_NO_LINK_SERDES 0x02000000 /* serdes being
+ initialized. see serdes
+ state reg */
+#define PCS_SM_NO_LINK_C 0x04000000 /* C codes not stable or
+ not received */
+#define PCS_SM_NO_LINK_SYNC 0x08000000 /* word sync not
+ achieved */
+#define PCS_SM_NO_LINK_WAIT_C 0x10000000 /* waiting for C codes
+ w/ ack bit set */
+#define PCS_SM_NO_LINK_NO_IDLE 0x20000000 /* link partner continues
+ to send C codes
+ instead of idle
+ symbols or pkt data */
+
+/* this register indicates interrupt changes in specific PCS MII status bits.
+ * PCS_INT may be masked at the ISR level. only a single bit is implemented
+ * for link status change.
+ */
+#define REG_PCS_INTR_STATUS 0x9018 /* PCS interrupt status */
+#define PCS_INTR_STATUS_LINK_CHANGE 0x04 /* link status has changed
+ since last read */
+
+/* control which network interface is used. no more than one bit should
+ * be set.
+ * DEFAULT: none
+ */
+#define REG_PCS_DATAPATH_MODE 0x9050 /* datapath mode reg */
+#define PCS_DATAPATH_MODE_MII 0x00 /* PCS is not used and
+ MII/GMII is selected.
+ selection between MII and
+ GMII is controlled by
+ XIF_CFG */
+#define PCS_DATAPATH_MODE_SERDES 0x02 /* PCS is used via the
+ 10-bit interface */
+
+/* input to serdes chip or serialink block */
+#define REG_PCS_SERDES_CTRL 0x9054 /* serdes control reg */
+#define PCS_SERDES_CTRL_LOOPBACK 0x01 /* enable loopback on
+ serdes interface */
+#define PCS_SERDES_CTRL_SYNCD_EN 0x02 /* enable sync carrier
+ detection. should be
+ 0x0 for normal
+ operation */
+#define PCS_SERDES_CTRL_LOCKREF 0x04 /* frequency-lock RBC[0:1]
+ to REFCLK when set.
+ when clear, receiver
+ clock locks to incoming
+ serial data */
+
+/* multiplex test outputs into the PROM address (PA_3 through PA_0) pins.
+ * should be 0x0 for normal operations.
+ * 0b000 normal operation, PROM address[3:0] selected
+ * 0b001 rxdma req, rxdma ack, rxdma ready, rxdma read
+ * 0b010 rxmac req, rx ack, rx tag, rx clk shared
+ * 0b011 txmac req, tx ack, tx tag, tx retry req
+ * 0b100 tx tp3, tx tp2, tx tp1, tx tp0
+ * 0b101 R period RX, R period TX, R period HP, R period BIM
+ * DEFAULT: 0x0
+ */
+#define REG_PCS_SHARED_OUTPUT_SEL 0x9058 /* shared output select */
+#define PCS_SOS_PROM_ADDR_MASK 0x0007
+
+/* used for diagnostics. this register indicates progress of the SERDES
+ * boot up.
+ * 0b00 undergoing reset
+ * 0b01 waiting 500us while lockrefn is asserted
+ * 0b10 waiting for comma detect
+ * 0b11 receive data is synchronized
+ * DEFAULT: 0x0
+ */
+#define REG_PCS_SERDES_STATE 0x905C /* (ro) serdes state */
+#define PCS_SERDES_STATE_MASK 0x03
+
+/* used for diagnostics. indicates number of packets transmitted or received.
+ * counters rollover w/out generating an interrupt.
+ * DEFAULT: 0x0
+ */
+#define REG_PCS_PACKET_COUNT 0x9060 /* (ro) PCS packet counter */
+#define PCS_PACKET_COUNT_TX 0x000007FF /* pkts xmitted by PCS */
+#define PCS_PACKET_COUNT_RX 0x07FF0000 /* pkts recvd by PCS
+ whether they
+ encountered an error
+ or not */
+
+/** LocalBus Devices. the following provides run-time access to the
+ * Cassini's PROM
+ ***/
+#define REG_EXPANSION_ROM_RUN_START 0x100000 /* expansion rom run time
+ access */
+#define REG_EXPANSION_ROM_RUN_END 0x17FFFF
+
+#define REG_SECOND_LOCALBUS_START 0x180000 /* secondary local bus
+ device */
+#define REG_SECOND_LOCALBUS_END 0x1FFFFF
+
+/* entropy device */
+#define REG_ENTROPY_START REG_SECOND_LOCALBUS_START
+#define REG_ENTROPY_DATA (REG_ENTROPY_START + 0x00)
+#define REG_ENTROPY_STATUS (REG_ENTROPY_START + 0x04)
+#define ENTROPY_STATUS_DRDY 0x01
+#define ENTROPY_STATUS_BUSY 0x02
+#define ENTROPY_STATUS_CIPHER 0x04
+#define ENTROPY_STATUS_BYPASS_MASK 0x18
+#define REG_ENTROPY_MODE (REG_ENTROPY_START + 0x05)
+#define ENTROPY_MODE_KEY_MASK 0x07
+#define ENTROPY_MODE_ENCRYPT 0x40
+#define REG_ENTROPY_RAND_REG (REG_ENTROPY_START + 0x06)
+#define REG_ENTROPY_RESET (REG_ENTROPY_START + 0x07)
+#define ENTROPY_RESET_DES_IO 0x01
+#define ENTROPY_RESET_STC_MODE 0x02
+#define ENTROPY_RESET_KEY_CACHE 0x04
+#define ENTROPY_RESET_IV 0x08
+#define REG_ENTROPY_IV (REG_ENTROPY_START + 0x08)
+#define REG_ENTROPY_KEY0 (REG_ENTROPY_START + 0x10)
+#define REG_ENTROPY_KEYN(x) (REG_ENTROPY_KEY0 + 4*(x))
+
+/* phys of interest w/ their special mii registers */
+#define PHY_LUCENT_B0 0x00437421
+#define LUCENT_MII_REG 0x1F
+
+#define PHY_NS_DP83065 0x20005c78
+#define DP83065_MII_MEM 0x16
+#define DP83065_MII_REGD 0x1D
+#define DP83065_MII_REGE 0x1E
+
+#define PHY_BROADCOM_5411 0x00206071
+#define PHY_BROADCOM_B0 0x00206050
+#define BROADCOM_MII_REG4 0x14
+#define BROADCOM_MII_REG5 0x15
+#define BROADCOM_MII_REG7 0x17
+#define BROADCOM_MII_REG8 0x18
+
+#define CAS_MII_ANNPTR 0x07
+#define CAS_MII_ANNPRR 0x08
+#define CAS_MII_1000_CTRL 0x09
+#define CAS_MII_1000_STATUS 0x0A
+#define CAS_MII_1000_EXTEND 0x0F
+
+#define CAS_BMSR_1000_EXTEND 0x0100 /* supports 1000Base-T extended status */
+/*
+ * if autoneg is disabled, here's the table:
+ * BMCR_SPEED100 = 100Mbps
+ * BMCR_SPEED1000 = 1000Mbps
+ * ~(BMCR_SPEED100 | BMCR_SPEED1000) = 10Mbps
+ */
+#define CAS_BMCR_SPEED1000 0x0040 /* Select 1000Mbps */
+
+#define CAS_ADVERTISE_1000HALF 0x0100
+#define CAS_ADVERTISE_1000FULL 0x0200
+#define CAS_ADVERTISE_PAUSE 0x0400
+#define CAS_ADVERTISE_ASYM_PAUSE 0x0800
+
+/* regular lpa register */
+#define CAS_LPA_PAUSE CAS_ADVERTISE_PAUSE
+#define CAS_LPA_ASYM_PAUSE CAS_ADVERTISE_ASYM_PAUSE
+
+/* 1000_STATUS register */
+#define CAS_LPA_1000HALF 0x0400
+#define CAS_LPA_1000FULL 0x0800
+
+#define CAS_EXTEND_1000XFULL 0x8000
+#define CAS_EXTEND_1000XHALF 0x4000
+#define CAS_EXTEND_1000TFULL 0x2000
+#define CAS_EXTEND_1000THALF 0x1000
+
+/* cassini header parser firmware */
+typedef struct cas_hp_inst {
+ const char *note;
+
+ u16 mask, val;
+
+ u8 op;
+ u8 soff, snext; /* if match succeeds, new offset and match */
+ u8 foff, fnext; /* if match fails, new offset and match */
+ /* output info */
+ u8 outop; /* output opcode */
+
+ u16 outarg; /* output argument */
+ u8 outenab; /* output enable: 0 = not, 1 = if match
+ 2 = if !match, 3 = always */
+ u8 outshift; /* barrel shift right, 4 bits */
+ u16 outmask;
+} cas_hp_inst_t;
+
+/* comparison */
+#define OP_EQ 0 /* packet == value */
+#define OP_LT 1 /* packet < value */
+#define OP_GT 2 /* packet > value */
+#define OP_NP 3 /* new packet */
+
+/* output opcodes */
+#define CL_REG 0
+#define LD_FID 1
+#define LD_SEQ 2
+#define LD_CTL 3
+#define LD_SAP 4
+#define LD_R1 5
+#define LD_L3 6
+#define LD_SUM 7
+#define LD_HDR 8
+#define IM_FID 9
+#define IM_SEQ 10
+#define IM_SAP 11
+#define IM_R1 12
+#define IM_CTL 13
+#define LD_LEN 14
+#define ST_FLG 15
+
+/* match setp #s for IP4TCP4 */
+#define S1_PCKT 0
+#define S1_VLAN 1
+#define S1_CFI 2
+#define S1_8023 3
+#define S1_LLC 4
+#define S1_LLCc 5
+#define S1_IPV4 6
+#define S1_IPV4c 7
+#define S1_IPV4F 8
+#define S1_TCP44 9
+#define S1_IPV6 10
+#define S1_IPV6L 11
+#define S1_IPV6c 12
+#define S1_TCP64 13
+#define S1_TCPSQ 14
+#define S1_TCPFG 15
+#define S1_TCPHL 16
+#define S1_TCPHc 17
+#define S1_CLNP 18
+#define S1_CLNP2 19
+#define S1_DROP 20
+#define S2_HTTP 21
+#define S1_ESP4 22
+#define S1_AH4 23
+#define S1_ESP6 24
+#define S1_AH6 25
+
+#define CAS_PROG_IP46TCP4_PREAMBLE \
+{ "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, S1_PCKT, \
+ CL_REG, 0x3ff, 1, 0x0, 0x0000}, \
+{ "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023, \
+ IM_CTL, 0x00a, 3, 0x0, 0xffff}, \
+{ "CFI?", 0x1000, 0x1000, OP_EQ, 0, S1_DROP, 1, S1_8023, \
+ CL_REG, 0x000, 0, 0x0, 0x0000}, \
+{ "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4, \
+ CL_REG, 0x000, 0, 0x0, 0x0000}, \
+{ "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP, \
+ CL_REG, 0x000, 0, 0x0, 0x0000}, \
+{ "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP, \
+ CL_REG, 0x000, 0, 0x0, 0x0000}, \
+{ "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6, \
+ LD_SAP, 0x100, 3, 0x0, 0xffff}, \
+{ "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP, \
+ LD_SUM, 0x00a, 1, 0x0, 0x0000}, \
+{ "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP, \
+ LD_LEN, 0x03e, 1, 0x0, 0xffff}, \
+{ "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_CLNP, \
+ LD_FID, 0x182, 1, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ \
+{ "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP, \
+ LD_SUM, 0x015, 1, 0x0, 0x0000}, \
+{ "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP, \
+ IM_R1, 0x128, 1, 0x0, 0xffff}, \
+{ "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP, \
+ LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ \
+{ "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_CLNP, \
+ LD_LEN, 0x03f, 1, 0x0, 0xffff}
+
+#ifdef USE_HP_IP46TCP4
+static cas_hp_inst_t cas_prog_ip46tcp4tab[] = {
+ CAS_PROG_IP46TCP4_PREAMBLE,
+ { "TCP seq", /* DADDR should point to dest port */
+ 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 4, S1_TCPFG, LD_SEQ,
+ 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */
+ { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0,
+ S1_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */
+ { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0,
+ S1_TCPHc, LD_R1, 0x205, 3, 0xB, 0xf000},
+ { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0,
+ S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff},
+ { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2,
+ IM_CTL, 0x001, 3, 0x0, 0x0001},
+ { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ IM_CTL, 0x000, 0, 0x0, 0x0000},
+ { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ IM_CTL, 0x080, 3, 0x0, 0xffff},
+ { NULL },
+};
+#ifdef HP_IP46TCP4_DEFAULT
+#define CAS_HP_FIRMWARE cas_prog_ip46tcp4tab
+#endif
+#endif
+
+/*
+ * Alternate table load which excludes HTTP server traffic from reassembly.
+ * It is substantially similar to the basic table, with one extra state
+ * and a few extra compares. */
+#ifdef USE_HP_IP46TCP4NOHTTP
+static cas_hp_inst_t cas_prog_ip46tcp4nohttptab[] = {
+ CAS_PROG_IP46TCP4_PREAMBLE,
+ { "TCP seq", /* DADDR should point to dest port */
+ 0xFFFF, 0x0080, OP_EQ, 0, S2_HTTP, 0, S1_TCPFG, LD_SEQ,
+ 0x081, 3, 0x0, 0xffff} , /* Load TCP seq # */
+ { "TCP control flags", 0xFFFF, 0x8080, OP_EQ, 0, S2_HTTP, 0,
+ S1_TCPHL, ST_FLG, 0x145, 2, 0x0, 0x002f, }, /* Load TCP flags */
+ { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc,
+ LD_R1, 0x205, 3, 0xB, 0xf000},
+ { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ LD_HDR, 0x0ff, 3, 0x0, 0xffff},
+ { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2,
+ IM_CTL, 0x001, 3, 0x0, 0x0001},
+ { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ CL_REG, 0x002, 3, 0x0, 0x0000},
+ { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ IM_CTL, 0x080, 3, 0x0, 0xffff},
+ { "No HTTP", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ IM_CTL, 0x044, 3, 0x0, 0xffff},
+ { NULL },
+};
+#ifdef HP_IP46TCP4NOHTTP_DEFAULT
+#define CAS_HP_FIRMWARE cas_prog_ip46tcp4nohttptab
+#endif
+#endif
+
+/* match step #s for IP4FRAG */
+#define S3_IPV6c 11
+#define S3_TCP64 12
+#define S3_TCPSQ 13
+#define S3_TCPFG 14
+#define S3_TCPHL 15
+#define S3_TCPHc 16
+#define S3_FRAG 17
+#define S3_FOFF 18
+#define S3_CLNP 19
+
+#ifdef USE_HP_IP4FRAG
+static cas_hp_inst_t cas_prog_ip4fragtab[] = {
+ { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, S1_PCKT,
+ CL_REG, 0x3ff, 1, 0x0, 0x0000},
+ { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023,
+ IM_CTL, 0x00a, 3, 0x0, 0xffff},
+ { "CFI?", 0x1000, 0x1000, OP_EQ, 0, S3_CLNP, 1, S1_8023,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S3_CLNP,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "LLCc?",0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S3_CLNP,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6,
+ LD_SAP, 0x100, 3, 0x0, 0xffff},
+ { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S3_CLNP,
+ LD_SUM, 0x00a, 1, 0x0, 0x0000},
+ { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S3_FRAG,
+ LD_LEN, 0x03e, 3, 0x0, 0xffff},
+ { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S3_TCPSQ, 0, S3_CLNP,
+ LD_FID, 0x182, 3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */
+ { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S3_IPV6c, 0, S3_CLNP,
+ LD_SUM, 0x015, 1, 0x0, 0x0000},
+ { "IPV6 cont?", 0xf000, 0x6000, OP_EQ, 3, S3_TCP64, 0, S3_CLNP,
+ LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */
+ { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S3_TCPSQ, 0, S3_CLNP,
+ LD_LEN, 0x03f, 1, 0x0, 0xffff},
+ { "TCP seq", /* DADDR should point to dest port */
+ 0x0000, 0x0000, OP_EQ, 0, S3_TCPFG, 4, S3_TCPFG, LD_SEQ,
+ 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */
+ { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S3_TCPHL, 0,
+ S3_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */
+ { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S3_TCPHc, 0, S3_TCPHc,
+ LD_R1, 0x205, 3, 0xB, 0xf000},
+ { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ LD_HDR, 0x0ff, 3, 0x0, 0xffff},
+ { "IP4 Fragment", 0x0000, 0x0000, OP_EQ, 0, S3_FOFF, 0, S3_FOFF,
+ LD_FID, 0x103, 3, 0x0, 0xffff}, /* FID IP4 src+dst */
+ { "IP4 frag offset", 0x0000, 0x0000, OP_EQ, 0, S3_FOFF, 0, S3_FOFF,
+ LD_SEQ, 0x040, 1, 0xD, 0xfff8},
+ { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ IM_CTL, 0x001, 3, 0x0, 0x0001},
+ { NULL },
+};
+#ifdef HP_IP4FRAG_DEFAULT
+#define CAS_HP_FIRMWARE cas_prog_ip4fragtab
+#endif
+#endif
+
+/*
+ * Alternate table which does batching without reassembly
+ */
+#ifdef USE_HP_IP46TCP4BATCH
+static cas_hp_inst_t cas_prog_ip46tcp4batchtab[] = {
+ CAS_PROG_IP46TCP4_PREAMBLE,
+ { "TCP seq", /* DADDR should point to dest port */
+ 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 0, S1_TCPFG, LD_SEQ,
+ 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */
+ { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0,
+ S1_TCPHL, ST_FLG, 0x000, 3, 0x0, 0x0000}, /* Load TCP flags */
+ { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0,
+ S1_TCPHc, LD_R1, 0x205, 3, 0xB, 0xf000},
+ { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0,
+ S1_PCKT, IM_CTL, 0x040, 3, 0x0, 0xffff}, /* set batch bit */
+ { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ IM_CTL, 0x001, 3, 0x0, 0x0001},
+ { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0,
+ S1_PCKT, IM_CTL, 0x080, 3, 0x0, 0xffff},
+ { NULL },
+};
+#ifdef HP_IP46TCP4BATCH_DEFAULT
+#define CAS_HP_FIRMWARE cas_prog_ip46tcp4batchtab
+#endif
+#endif
+
+/* Workaround for Cassini rev2 descriptor corruption problem.
+ * Does batching without reassembly, and sets the SAP to a known
+ * data pattern for all packets.
+ */
+#ifdef USE_HP_WORKAROUND
+static cas_hp_inst_t cas_prog_workaroundtab[] = {
+ { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0,
+ S1_PCKT, CL_REG, 0x3ff, 1, 0x0, 0x0000} ,
+ { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023,
+ IM_CTL, 0x04a, 3, 0x0, 0xffff},
+ { "CFI?", 0x1000, 0x1000, OP_EQ, 0, S1_CLNP, 1, S1_8023,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6,
+ IM_SAP, 0x6AE, 3, 0x0, 0xffff},
+ { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP,
+ LD_SUM, 0x00a, 1, 0x0, 0x0000},
+ { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP,
+ LD_LEN, 0x03e, 1, 0x0, 0xffff},
+ { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_CLNP,
+ LD_FID, 0x182, 3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */
+ { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP,
+ LD_SUM, 0x015, 1, 0x0, 0x0000},
+ { "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP,
+ IM_R1, 0x128, 1, 0x0, 0xffff},
+ { "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP,
+ LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */
+ { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_CLNP,
+ LD_LEN, 0x03f, 1, 0x0, 0xffff},
+ { "TCP seq", /* DADDR should point to dest port */
+ 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 4, S1_TCPFG, LD_SEQ,
+ 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */
+ { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0,
+ S1_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */
+ { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc,
+ LD_R1, 0x205, 3, 0xB, 0xf000},
+ { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0,
+ S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff},
+ { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2,
+ IM_SAP, 0x6AE, 3, 0x0, 0xffff} ,
+ { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ IM_CTL, 0x001, 3, 0x0, 0x0001},
+ { NULL },
+};
+#ifdef HP_WORKAROUND_DEFAULT
+#define CAS_HP_FIRMWARE cas_prog_workaroundtab
+#endif
+#endif
+
+#ifdef USE_HP_ENCRYPT
+static cas_hp_inst_t cas_prog_encryptiontab[] = {
+ { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0,
+ S1_PCKT, CL_REG, 0x3ff, 1, 0x0, 0x0000},
+ { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023,
+ IM_CTL, 0x00a, 3, 0x0, 0xffff},
+#if 0
+//"CFI?", /* 02 FIND CFI and If FIND go to S1_DROP */
+//0x1000, 0x1000, OP_EQ, 0, S1_DROP, 1, S1_8023, CL_REG, 0x000, 0, 0x0, 0x00
+ 00,
+#endif
+ { "CFI?", /* FIND CFI and If FIND go to CleanUP1 (ignore and send to host) */
+ 0x1000, 0x1000, OP_EQ, 0, S1_CLNP, 1, S1_8023,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP,
+ CL_REG, 0x000, 0, 0x0, 0x0000},
+ { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6,
+ LD_SAP, 0x100, 3, 0x0, 0xffff},
+ { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP,
+ LD_SUM, 0x00a, 1, 0x0, 0x0000},
+ { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP,
+ LD_LEN, 0x03e, 1, 0x0, 0xffff},
+ { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_ESP4,
+ LD_FID, 0x182, 1, 0x0, 0xffff}, /* FID IP4&TCP src+dst */
+ { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP,
+ LD_SUM, 0x015, 1, 0x0, 0x0000},
+ { "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP,
+ IM_R1, 0x128, 1, 0x0, 0xffff},
+ { "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP,
+ LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */
+ { "TCP64?",
+#if 0
+//@@@0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_ESP6, LD_LEN, 0x03f, 1, 0x0, 0xffff,
+#endif
+ 0xff00, 0x0600, OP_EQ, 12, S1_TCPSQ, 0, S1_ESP6, LD_LEN,
+ 0x03f, 1, 0x0, 0xffff},
+ { "TCP seq", /* 14:DADDR should point to dest port */
+ 0xFFFF, 0x0080, OP_EQ, 0, S2_HTTP, 0, S1_TCPFG, LD_SEQ,
+ 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */
+ { "TCP control flags", 0xFFFF, 0x8080, OP_EQ, 0, S2_HTTP, 0,
+ S1_TCPHL, ST_FLG, 0x145, 2, 0x0, 0x002f}, /* Load TCP flags */
+ { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc,
+ LD_R1, 0x205, 3, 0xB, 0xf000} ,
+ { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0,
+ S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff},
+ { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2,
+ IM_CTL, 0x001, 3, 0x0, 0x0001},
+ { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ CL_REG, 0x002, 3, 0x0, 0x0000},
+ { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ IM_CTL, 0x080, 3, 0x0, 0xffff},
+ { "No HTTP", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
+ IM_CTL, 0x044, 3, 0x0, 0xffff},
+ { "IPV4 ESP encrypted?", /* S1_ESP4 */
+ 0x00ff, 0x0032, OP_EQ, 0, S1_CLNP2, 0, S1_AH4, IM_CTL,
+ 0x021, 1, 0x0, 0xffff},
+ { "IPV4 AH encrypted?", /* S1_AH4 */
+ 0x00ff, 0x0033, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL,
+ 0x021, 1, 0x0, 0xffff},
+ { "IPV6 ESP encrypted?", /* S1_ESP6 */
+#if 0
+//@@@0x00ff, 0x0032, OP_EQ, 0, S1_CLNP2, 0, S1_AH6, IM_CTL, 0x021, 1, 0x0, 0xffff,
+#endif
+ 0xff00, 0x3200, OP_EQ, 0, S1_CLNP2, 0, S1_AH6, IM_CTL,
+ 0x021, 1, 0x0, 0xffff},
+ { "IPV6 AH encrypted?", /* S1_AH6 */
+#if 0
+//@@@0x00ff, 0x0033, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL, 0x021, 1, 0x0, 0xffff,
+#endif
+ 0xff00, 0x3300, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL,
+ 0x021, 1, 0x0, 0xffff},
+ { NULL },
+};
+#ifdef HP_ENCRYPT_DEFAULT
+#define CAS_HP_FIRMWARE cas_prog_encryptiontab
+#endif
+#endif
+
+static cas_hp_inst_t cas_prog_null[] = { {NULL} };
+#ifdef HP_NULL_DEFAULT
+#define CAS_HP_FIRMWARE cas_prog_null
+#endif
+
+/* phy types */
+#define CAS_PHY_UNKNOWN 0x00
+#define CAS_PHY_SERDES 0x01
+#define CAS_PHY_MII_MDIO0 0x02
+#define CAS_PHY_MII_MDIO1 0x04
+#define CAS_PHY_MII(x) ((x) & (CAS_PHY_MII_MDIO0 | CAS_PHY_MII_MDIO1))
+
+/* _RING_INDEX is the index for the ring sizes to be used. _RING_SIZE
+ * is the actual size. the default index for the various rings is
+ * 8. NOTE: there a bunch of alignment constraints for the rings. to
+ * deal with that, i just allocate rings to create the desired
+ * alignment. here are the constraints:
+ * RX DESC and COMP rings must be 8KB aligned
+ * TX DESC must be 2KB aligned.
+ * if you change the numbers, be cognizant of how the alignment will change
+ * in INIT_BLOCK as well.
+ */
+
+#define DESC_RING_I_TO_S(x) (32*(1 << (x)))
+#define COMP_RING_I_TO_S(x) (128*(1 << (x)))
+#define TX_DESC_RING_INDEX 4 /* 512 = 8k */
+#define RX_DESC_RING_INDEX 4 /* 512 = 8k */
+#define RX_COMP_RING_INDEX 4 /* 2048 = 64k: should be 4x rx ring size */
+
+#if (TX_DESC_RING_INDEX > 8) || (TX_DESC_RING_INDEX < 0)
+#error TX_DESC_RING_INDEX must be between 0 and 8
+#endif
+
+#if (RX_DESC_RING_INDEX > 8) || (RX_DESC_RING_INDEX < 0)
+#error RX_DESC_RING_INDEX must be between 0 and 8
+#endif
+
+#if (RX_COMP_RING_INDEX > 8) || (RX_COMP_RING_INDEX < 0)
+#error RX_COMP_RING_INDEX must be between 0 and 8
+#endif
+
+#define N_TX_RINGS MAX_TX_RINGS /* for QoS */
+#define N_TX_RINGS_MASK MAX_TX_RINGS_MASK
+#define N_RX_DESC_RINGS MAX_RX_DESC_RINGS /* 1 for ipsec */
+#define N_RX_COMP_RINGS 0x1 /* for mult. PCI interrupts */
+
+/* number of flows that can go through re-assembly */
+#define N_RX_FLOWS 64
+
+#define TX_DESC_RING_SIZE DESC_RING_I_TO_S(TX_DESC_RING_INDEX)
+#define RX_DESC_RING_SIZE DESC_RING_I_TO_S(RX_DESC_RING_INDEX)
+#define RX_COMP_RING_SIZE COMP_RING_I_TO_S(RX_COMP_RING_INDEX)
+#define TX_DESC_RINGN_INDEX(x) TX_DESC_RING_INDEX
+#define RX_DESC_RINGN_INDEX(x) RX_DESC_RING_INDEX
+#define RX_COMP_RINGN_INDEX(x) RX_COMP_RING_INDEX
+#define TX_DESC_RINGN_SIZE(x) TX_DESC_RING_SIZE
+#define RX_DESC_RINGN_SIZE(x) RX_DESC_RING_SIZE
+#define RX_COMP_RINGN_SIZE(x) RX_COMP_RING_SIZE
+
+/* convert values */
+#define CAS_BASE(x, y) (((y) << (x ## _SHIFT)) & (x ## _MASK))
+#define CAS_VAL(x, y) (((y) & (x ## _MASK)) >> (x ## _SHIFT))
+#define CAS_TX_RINGN_BASE(y) ((TX_DESC_RINGN_INDEX(y) << \
+ TX_CFG_DESC_RINGN_SHIFT(y)) & \
+ TX_CFG_DESC_RINGN_MASK(y))
+
+/* min is 2k, but we can't do jumbo frames unless it's at least 8k */
+#define CAS_MIN_PAGE_SHIFT 11 /* 2048 */
+#define CAS_JUMBO_PAGE_SHIFT 13 /* 8192 */
+#define CAS_MAX_PAGE_SHIFT 14 /* 16384 */
+
+#define TX_DESC_BUFLEN_MASK 0x0000000000003FFFULL /* buffer length in
+ bytes. 0 - 9256 */
+#define TX_DESC_BUFLEN_SHIFT 0
+#define TX_DESC_CSUM_START_MASK 0x00000000001F8000ULL /* checksum start. #
+ of bytes to be
+ skipped before
+ csum calc begins.
+ value must be
+ even */
+#define TX_DESC_CSUM_START_SHIFT 15
+#define TX_DESC_CSUM_STUFF_MASK 0x000000001FE00000ULL /* checksum stuff.
+ byte offset w/in
+ the pkt for the
+ 1st csum byte.
+ must be > 8 */
+#define TX_DESC_CSUM_STUFF_SHIFT 21
+#define TX_DESC_CSUM_EN 0x0000000020000000ULL /* enable checksum */
+#define TX_DESC_EOF 0x0000000040000000ULL /* end of frame */
+#define TX_DESC_SOF 0x0000000080000000ULL /* start of frame */
+#define TX_DESC_INTME 0x0000000100000000ULL /* interrupt me */
+#define TX_DESC_NO_CRC 0x0000000200000000ULL /* debugging only.
+ CRC will not be
+ inserted into
+ outgoing frame. */
+struct cas_tx_desc {
+ __le64 control;
+ __le64 buffer;
+};
+
+/* descriptor ring for free buffers contains page-sized buffers. the index
+ * value is not used by the hw in any way. it's just stored and returned in
+ * the completion ring.
+ */
+struct cas_rx_desc {
+ __le64 index;
+ __le64 buffer;
+};
+
+/* received packets are put on the completion ring. */
+/* word 1 */
+#define RX_COMP1_DATA_SIZE_MASK 0x0000000007FFE000ULL
+#define RX_COMP1_DATA_SIZE_SHIFT 13
+#define RX_COMP1_DATA_OFF_MASK 0x000001FFF8000000ULL
+#define RX_COMP1_DATA_OFF_SHIFT 27
+#define RX_COMP1_DATA_INDEX_MASK 0x007FFE0000000000ULL
+#define RX_COMP1_DATA_INDEX_SHIFT 41
+#define RX_COMP1_SKIP_MASK 0x0180000000000000ULL
+#define RX_COMP1_SKIP_SHIFT 55
+#define RX_COMP1_RELEASE_NEXT 0x0200000000000000ULL
+#define RX_COMP1_SPLIT_PKT 0x0400000000000000ULL
+#define RX_COMP1_RELEASE_FLOW 0x0800000000000000ULL
+#define RX_COMP1_RELEASE_DATA 0x1000000000000000ULL
+#define RX_COMP1_RELEASE_HDR 0x2000000000000000ULL
+#define RX_COMP1_TYPE_MASK 0xC000000000000000ULL
+#define RX_COMP1_TYPE_SHIFT 62
+
+/* word 2 */
+#define RX_COMP2_NEXT_INDEX_MASK 0x00000007FFE00000ULL
+#define RX_COMP2_NEXT_INDEX_SHIFT 21
+#define RX_COMP2_HDR_SIZE_MASK 0x00000FF800000000ULL
+#define RX_COMP2_HDR_SIZE_SHIFT 35
+#define RX_COMP2_HDR_OFF_MASK 0x0003F00000000000ULL
+#define RX_COMP2_HDR_OFF_SHIFT 44
+#define RX_COMP2_HDR_INDEX_MASK 0xFFFC000000000000ULL
+#define RX_COMP2_HDR_INDEX_SHIFT 50
+
+/* word 3 */
+#define RX_COMP3_SMALL_PKT 0x0000000000000001ULL
+#define RX_COMP3_JUMBO_PKT 0x0000000000000002ULL
+#define RX_COMP3_JUMBO_HDR_SPLIT_EN 0x0000000000000004ULL
+#define RX_COMP3_CSUM_START_MASK 0x000000000007F000ULL
+#define RX_COMP3_CSUM_START_SHIFT 12
+#define RX_COMP3_FLOWID_MASK 0x0000000001F80000ULL
+#define RX_COMP3_FLOWID_SHIFT 19
+#define RX_COMP3_OPCODE_MASK 0x000000000E000000ULL
+#define RX_COMP3_OPCODE_SHIFT 25
+#define RX_COMP3_FORCE_FLAG 0x0000000010000000ULL
+#define RX_COMP3_NO_ASSIST 0x0000000020000000ULL
+#define RX_COMP3_LOAD_BAL_MASK 0x000001F800000000ULL
+#define RX_COMP3_LOAD_BAL_SHIFT 35
+#define RX_PLUS_COMP3_ENC_PKT 0x0000020000000000ULL /* cas+ */
+#define RX_COMP3_L3_HEAD_OFF_MASK 0x0000FE0000000000ULL /* cas */
+#define RX_COMP3_L3_HEAD_OFF_SHIFT 41
+#define RX_PLUS_COMP_L3_HEAD_OFF_MASK 0x0000FC0000000000ULL /* cas+ */
+#define RX_PLUS_COMP_L3_HEAD_OFF_SHIFT 42
+#define RX_COMP3_SAP_MASK 0xFFFF000000000000ULL
+#define RX_COMP3_SAP_SHIFT 48
+
+/* word 4 */
+#define RX_COMP4_TCP_CSUM_MASK 0x000000000000FFFFULL
+#define RX_COMP4_TCP_CSUM_SHIFT 0
+#define RX_COMP4_PKT_LEN_MASK 0x000000003FFF0000ULL
+#define RX_COMP4_PKT_LEN_SHIFT 16
+#define RX_COMP4_PERFECT_MATCH_MASK 0x00000003C0000000ULL
+#define RX_COMP4_PERFECT_MATCH_SHIFT 30
+#define RX_COMP4_ZERO 0x0000080000000000ULL
+#define RX_COMP4_HASH_VAL_MASK 0x0FFFF00000000000ULL
+#define RX_COMP4_HASH_VAL_SHIFT 44
+#define RX_COMP4_HASH_PASS 0x1000000000000000ULL
+#define RX_COMP4_BAD 0x4000000000000000ULL
+#define RX_COMP4_LEN_MISMATCH 0x8000000000000000ULL
+
+/* we encode the following: ring/index/release. only 14 bits
+ * are usable.
+ * NOTE: the encoding is dependent upon RX_DESC_RING_SIZE and
+ * MAX_RX_DESC_RINGS. */
+#define RX_INDEX_NUM_MASK 0x0000000000000FFFULL
+#define RX_INDEX_NUM_SHIFT 0
+#define RX_INDEX_RING_MASK 0x0000000000001000ULL
+#define RX_INDEX_RING_SHIFT 12
+#define RX_INDEX_RELEASE 0x0000000000002000ULL
+
+struct cas_rx_comp {
+ __le64 word1;
+ __le64 word2;
+ __le64 word3;
+ __le64 word4;
+};
+
+enum link_state {
+ link_down = 0, /* No link, will retry */
+ link_aneg, /* Autoneg in progress */
+ link_force_try, /* Try Forced link speed */
+ link_force_ret, /* Forced mode worked, retrying autoneg */
+ link_force_ok, /* Stay in forced mode */
+ link_up /* Link is up */
+};
+
+typedef struct cas_page {
+ struct list_head list;
+ struct page *buffer;
+ dma_addr_t dma_addr;
+ int used;
+} cas_page_t;
+
+
+/* some alignment constraints:
+ * TX DESC, RX DESC, and RX COMP must each be 8K aligned.
+ * TX COMPWB must be 8-byte aligned.
+ * to accomplish this, here's what we do:
+ *
+ * INIT_BLOCK_RX_COMP = 64k (already aligned)
+ * INIT_BLOCK_RX_DESC = 8k
+ * INIT_BLOCK_TX = 8k
+ * INIT_BLOCK_RX1_DESC = 8k
+ * TX COMPWB
+ */
+#define INIT_BLOCK_TX (TX_DESC_RING_SIZE)
+#define INIT_BLOCK_RX_DESC (RX_DESC_RING_SIZE)
+#define INIT_BLOCK_RX_COMP (RX_COMP_RING_SIZE)
+
+struct cas_init_block {
+ struct cas_rx_comp rxcs[N_RX_COMP_RINGS][INIT_BLOCK_RX_COMP];
+ struct cas_rx_desc rxds[N_RX_DESC_RINGS][INIT_BLOCK_RX_DESC];
+ struct cas_tx_desc txds[N_TX_RINGS][INIT_BLOCK_TX];
+ __le64 tx_compwb;
+};
+
+/* tiny buffers to deal with target abort issue. we allocate a bit
+ * over so that we don't have target abort issues with these buffers
+ * as well.
+ */
+#define TX_TINY_BUF_LEN 0x100
+#define TX_TINY_BUF_BLOCK ((INIT_BLOCK_TX + 1)*TX_TINY_BUF_LEN)
+
+struct cas_tiny_count {
+ int nbufs;
+ int used;
+};
+
+struct cas {
+ spinlock_t lock; /* for most bits */
+ spinlock_t tx_lock[N_TX_RINGS]; /* tx bits */
+ spinlock_t stat_lock[N_TX_RINGS + 1]; /* for stat gathering */
+ spinlock_t rx_inuse_lock; /* rx inuse list */
+ spinlock_t rx_spare_lock; /* rx spare list */
+
+ void __iomem *regs;
+ int tx_new[N_TX_RINGS], tx_old[N_TX_RINGS];
+ int rx_old[N_RX_DESC_RINGS];
+ int rx_cur[N_RX_COMP_RINGS], rx_new[N_RX_COMP_RINGS];
+ int rx_last[N_RX_DESC_RINGS];
+
+ struct napi_struct napi;
+
+ /* Set when chip is actually in operational state
+ * (ie. not power managed) */
+ int hw_running;
+ int opened;
+ struct mutex pm_mutex; /* open/close/suspend/resume */
+
+ struct cas_init_block *init_block;
+ struct cas_tx_desc *init_txds[MAX_TX_RINGS];
+ struct cas_rx_desc *init_rxds[MAX_RX_DESC_RINGS];
+ struct cas_rx_comp *init_rxcs[MAX_RX_COMP_RINGS];
+
+ /* we use sk_buffs for tx and pages for rx. the rx skbuffs
+ * are there for flow re-assembly. */
+ struct sk_buff *tx_skbs[N_TX_RINGS][TX_DESC_RING_SIZE];
+ struct sk_buff_head rx_flows[N_RX_FLOWS];
+ cas_page_t *rx_pages[N_RX_DESC_RINGS][RX_DESC_RING_SIZE];
+ struct list_head rx_spare_list, rx_inuse_list;
+ int rx_spares_needed;
+
+ /* for small packets when copying would be quicker than
+ mapping */
+ struct cas_tiny_count tx_tiny_use[N_TX_RINGS][TX_DESC_RING_SIZE];
+ u8 *tx_tiny_bufs[N_TX_RINGS];
+
+ u32 msg_enable;
+
+ /* N_TX_RINGS must be >= N_RX_DESC_RINGS */
+ struct net_device_stats net_stats[N_TX_RINGS + 1];
+
+ u32 pci_cfg[64 >> 2];
+ u8 pci_revision;
+
+ int phy_type;
+ int phy_addr;
+ u32 phy_id;
+#define CAS_FLAG_1000MB_CAP 0x00000001
+#define CAS_FLAG_REG_PLUS 0x00000002
+#define CAS_FLAG_TARGET_ABORT 0x00000004
+#define CAS_FLAG_SATURN 0x00000008
+#define CAS_FLAG_RXD_POST_MASK 0x000000F0
+#define CAS_FLAG_RXD_POST_SHIFT 4
+#define CAS_FLAG_RXD_POST(x) ((1 << (CAS_FLAG_RXD_POST_SHIFT + (x))) & \
+ CAS_FLAG_RXD_POST_MASK)
+#define CAS_FLAG_ENTROPY_DEV 0x00000100
+#define CAS_FLAG_NO_HW_CSUM 0x00000200
+ u32 cas_flags;
+ int packet_min; /* minimum packet size */
+ int tx_fifo_size;
+ int rx_fifo_size;
+ int rx_pause_off;
+ int rx_pause_on;
+ int crc_size; /* 4 if half-duplex */
+
+ int pci_irq_INTC;
+ int min_frame_size; /* for tx fifo workaround */
+
+ /* page size allocation */
+ int page_size;
+ int page_order;
+ int mtu_stride;
+
+ u32 mac_rx_cfg;
+
+ /* Autoneg & PHY control */
+ int link_cntl;
+ int link_fcntl;
+ enum link_state lstate;
+ struct timer_list link_timer;
+ int timer_ticks;
+ struct work_struct reset_task;
+#if 0
+ atomic_t reset_task_pending;
+#else
+ atomic_t reset_task_pending;
+ atomic_t reset_task_pending_mtu;
+ atomic_t reset_task_pending_spare;
+ atomic_t reset_task_pending_all;
+#endif
+
+ /* Link-down problem workaround */
+#define LINK_TRANSITION_UNKNOWN 0
+#define LINK_TRANSITION_ON_FAILURE 1
+#define LINK_TRANSITION_STILL_FAILED 2
+#define LINK_TRANSITION_LINK_UP 3
+#define LINK_TRANSITION_LINK_CONFIG 4
+#define LINK_TRANSITION_LINK_DOWN 5
+#define LINK_TRANSITION_REQUESTED_RESET 6
+ int link_transition;
+ int link_transition_jiffies_valid;
+ unsigned long link_transition_jiffies;
+
+ /* Tuning */
+ u8 orig_cacheline_size; /* value when loaded */
+#define CAS_PREF_CACHELINE_SIZE 0x20 /* Minimum desired */
+
+ /* Diagnostic counters and state. */
+ int casreg_len; /* reg-space size for dumping */
+ u64 pause_entered;
+ u16 pause_last_time_recvd;
+
+ dma_addr_t block_dvma, tx_tiny_dvma[N_TX_RINGS];
+ struct pci_dev *pdev;
+ struct net_device *dev;
+#if defined(CONFIG_OF)
+ struct device_node *of_node;
+#endif
+
+ /* Firmware Info */
+ u16 fw_load_addr;
+ u32 fw_size;
+ u8 *fw_data;
+};
+
+#define TX_DESC_NEXT(r, x) (((x) + 1) & (TX_DESC_RINGN_SIZE(r) - 1))
+#define RX_DESC_ENTRY(r, x) ((x) & (RX_DESC_RINGN_SIZE(r) - 1))
+#define RX_COMP_ENTRY(r, x) ((x) & (RX_COMP_RINGN_SIZE(r) - 1))
+
+#define TX_BUFF_COUNT(r, x, y) ((x) <= (y) ? ((y) - (x)) : \
+ (TX_DESC_RINGN_SIZE(r) - (x) + (y)))
+
+#define TX_BUFFS_AVAIL(cp, i) ((cp)->tx_old[(i)] <= (cp)->tx_new[(i)] ? \
+ (cp)->tx_old[(i)] + (TX_DESC_RINGN_SIZE(i) - 1) - (cp)->tx_new[(i)] : \
+ (cp)->tx_old[(i)] - (cp)->tx_new[(i)] - 1)
+
+#define CAS_ALIGN(addr, align) \
+ (((unsigned long) (addr) + ((align) - 1UL)) & ~((align) - 1))
+
+#define RX_FIFO_SIZE 16384
+#define EXPANSION_ROM_SIZE 65536
+
+#define CAS_MC_EXACT_MATCH_SIZE 15
+#define CAS_MC_HASH_SIZE 256
+#define CAS_MC_HASH_MAX (CAS_MC_EXACT_MATCH_SIZE + \
+ CAS_MC_HASH_SIZE)
+
+#define TX_TARGET_ABORT_LEN 0x20
+#define RX_SWIVEL_OFF_VAL 0x2
+#define RX_AE_FREEN_VAL(x) (RX_DESC_RINGN_SIZE(x) >> 1)
+#define RX_AE_COMP_VAL (RX_COMP_RING_SIZE >> 1)
+#define RX_BLANK_INTR_PKT_VAL 0x05
+#define RX_BLANK_INTR_TIME_VAL 0x0F
+#define HP_TCP_THRESH_VAL 1530 /* reduce to enable reassembly */
+
+#define RX_SPARE_COUNT (RX_DESC_RING_SIZE >> 1)
+#define RX_SPARE_RECOVER_VAL (RX_SPARE_COUNT >> 2)
+
+#endif /* _CASSINI_H */
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
new file mode 100644
index 000000000000..d1338885dc8b
--- /dev/null
+++ b/drivers/net/ethernet/sun/niu.c
@@ -0,0 +1,10265 @@
+/* niu.c: Neptune ethernet driver.
+ *
+ * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/mii.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/ipv6.h>
+#include <linux/log2.h>
+#include <linux/jiffies.h>
+#include <linux/crc32.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <linux/io.h>
+#include <linux/of_device.h>
+
+#include "niu.h"
+
+#define DRV_MODULE_NAME "niu"
+#define DRV_MODULE_VERSION "1.1"
+#define DRV_MODULE_RELDATE "Apr 22, 2010"
+
+static char version[] __devinitdata =
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_DESCRIPTION("NIU ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#ifndef readq
+static u64 readq(void __iomem *reg)
+{
+ return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
+}
+
+static void writeq(u64 val, void __iomem *reg)
+{
+ writel(val & 0xffffffff, reg);
+ writel(val >> 32, reg + 0x4UL);
+}
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, niu_pci_tbl);
+
+#define NIU_TX_TIMEOUT (5 * HZ)
+
+#define nr64(reg) readq(np->regs + (reg))
+#define nw64(reg, val) writeq((val), np->regs + (reg))
+
+#define nr64_mac(reg) readq(np->mac_regs + (reg))
+#define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg))
+
+#define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg))
+#define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg))
+
+#define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg))
+#define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg))
+
+#define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg))
+#define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg))
+
+#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+
+static int niu_debug;
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "NIU debug level");
+
+#define niu_lock_parent(np, flags) \
+ spin_lock_irqsave(&np->parent->lock, flags)
+#define niu_unlock_parent(np, flags) \
+ spin_unlock_irqrestore(&np->parent->lock, flags)
+
+static int serdes_init_10g_serdes(struct niu *np);
+
+static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
+ u64 bits, int limit, int delay)
+{
+ while (--limit >= 0) {
+ u64 val = nr64_mac(reg);
+
+ if (!(val & bits))
+ break;
+ udelay(delay);
+ }
+ if (limit < 0)
+ return -ENODEV;
+ return 0;
+}
+
+static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
+ u64 bits, int limit, int delay,
+ const char *reg_name)
+{
+ int err;
+
+ nw64_mac(reg, bits);
+ err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
+ if (err)
+ netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
+ (unsigned long long)bits, reg_name,
+ (unsigned long long)nr64_mac(reg));
+ return err;
+}
+
+#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
+({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
+ __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
+})
+
+static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg,
+ u64 bits, int limit, int delay)
+{
+ while (--limit >= 0) {
+ u64 val = nr64_ipp(reg);
+
+ if (!(val & bits))
+ break;
+ udelay(delay);
+ }
+ if (limit < 0)
+ return -ENODEV;
+ return 0;
+}
+
+static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
+ u64 bits, int limit, int delay,
+ const char *reg_name)
+{
+ int err;
+ u64 val;
+
+ val = nr64_ipp(reg);
+ val |= bits;
+ nw64_ipp(reg, val);
+
+ err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
+ if (err)
+ netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
+ (unsigned long long)bits, reg_name,
+ (unsigned long long)nr64_ipp(reg));
+ return err;
+}
+
+#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
+({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
+ __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
+})
+
+static int __niu_wait_bits_clear(struct niu *np, unsigned long reg,
+ u64 bits, int limit, int delay)
+{
+ while (--limit >= 0) {
+ u64 val = nr64(reg);
+
+ if (!(val & bits))
+ break;
+ udelay(delay);
+ }
+ if (limit < 0)
+ return -ENODEV;
+ return 0;
+}
+
+#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
+({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
+ __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
+})
+
+static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
+ u64 bits, int limit, int delay,
+ const char *reg_name)
+{
+ int err;
+
+ nw64(reg, bits);
+ err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
+ if (err)
+ netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
+ (unsigned long long)bits, reg_name,
+ (unsigned long long)nr64(reg));
+ return err;
+}
+
+#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
+({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
+ __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
+})
+
+static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on)
+{
+ u64 val = (u64) lp->timer;
+
+ if (on)
+ val |= LDG_IMGMT_ARM;
+
+ nw64(LDG_IMGMT(lp->ldg_num), val);
+}
+
+static int niu_ldn_irq_enable(struct niu *np, int ldn, int on)
+{
+ unsigned long mask_reg, bits;
+ u64 val;
+
+ if (ldn < 0 || ldn > LDN_MAX)
+ return -EINVAL;
+
+ if (ldn < 64) {
+ mask_reg = LD_IM0(ldn);
+ bits = LD_IM0_MASK;
+ } else {
+ mask_reg = LD_IM1(ldn - 64);
+ bits = LD_IM1_MASK;
+ }
+
+ val = nr64(mask_reg);
+ if (on)
+ val &= ~bits;
+ else
+ val |= bits;
+ nw64(mask_reg, val);
+
+ return 0;
+}
+
+static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on)
+{
+ struct niu_parent *parent = np->parent;
+ int i;
+
+ for (i = 0; i <= LDN_MAX; i++) {
+ int err;
+
+ if (parent->ldg_map[i] != lp->ldg_num)
+ continue;
+
+ err = niu_ldn_irq_enable(np, i, on);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int niu_enable_interrupts(struct niu *np, int on)
+{
+ int i;
+
+ for (i = 0; i < np->num_ldg; i++) {
+ struct niu_ldg *lp = &np->ldg[i];
+ int err;
+
+ err = niu_enable_ldn_in_ldg(np, lp, on);
+ if (err)
+ return err;
+ }
+ for (i = 0; i < np->num_ldg; i++)
+ niu_ldg_rearm(np, &np->ldg[i], on);
+
+ return 0;
+}
+
+static u32 phy_encode(u32 type, int port)
+{
+ return type << (port * 2);
+}
+
+static u32 phy_decode(u32 val, int port)
+{
+ return (val >> (port * 2)) & PORT_TYPE_MASK;
+}
+
+static int mdio_wait(struct niu *np)
+{
+ int limit = 1000;
+ u64 val;
+
+ while (--limit > 0) {
+ val = nr64(MIF_FRAME_OUTPUT);
+ if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1)
+ return val & MIF_FRAME_OUTPUT_DATA;
+
+ udelay(10);
+ }
+
+ return -ENODEV;
+}
+
+static int mdio_read(struct niu *np, int port, int dev, int reg)
+{
+ int err;
+
+ nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
+ err = mdio_wait(np);
+ if (err < 0)
+ return err;
+
+ nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev));
+ return mdio_wait(np);
+}
+
+static int mdio_write(struct niu *np, int port, int dev, int reg, int data)
+{
+ int err;
+
+ nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
+ err = mdio_wait(np);
+ if (err < 0)
+ return err;
+
+ nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data));
+ err = mdio_wait(np);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int mii_read(struct niu *np, int port, int reg)
+{
+ nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg));
+ return mdio_wait(np);
+}
+
+static int mii_write(struct niu *np, int port, int reg, int data)
+{
+ int err;
+
+ nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data));
+ err = mdio_wait(np);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val)
+{
+ int err;
+
+ err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_TX_CFG_L(channel),
+ val & 0xffff);
+ if (!err)
+ err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_TX_CFG_H(channel),
+ val >> 16);
+ return err;
+}
+
+static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
+{
+ int err;
+
+ err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_RX_CFG_L(channel),
+ val & 0xffff);
+ if (!err)
+ err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_RX_CFG_H(channel),
+ val >> 16);
+ return err;
+}
+
+/* Mode is always 10G fiber. */
+static int serdes_init_niu_10g_fiber(struct niu *np)
+{
+ struct niu_link_config *lp = &np->link_config;
+ u32 tx_cfg, rx_cfg;
+ unsigned long i;
+
+ tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
+ rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
+ PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
+ PLL_RX_CFG_EQ_LP_ADAPTIVE);
+
+ if (lp->loopback_mode == LOOPBACK_PHY) {
+ u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
+
+ mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_TEST_CFG_L, test_cfg);
+
+ tx_cfg |= PLL_TX_CFG_ENTEST;
+ rx_cfg |= PLL_RX_CFG_ENTEST;
+ }
+
+ /* Initialize all 4 lanes of the SERDES. */
+ for (i = 0; i < 4; i++) {
+ int err = esr2_set_tx_cfg(np, i, tx_cfg);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < 4; i++) {
+ int err = esr2_set_rx_cfg(np, i, rx_cfg);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int serdes_init_niu_1g_serdes(struct niu *np)
+{
+ struct niu_link_config *lp = &np->link_config;
+ u16 pll_cfg, pll_sts;
+ int max_retry = 100;
+ u64 uninitialized_var(sig), mask, val;
+ u32 tx_cfg, rx_cfg;
+ unsigned long i;
+ int err;
+
+ tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
+ PLL_TX_CFG_RATE_HALF);
+ rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
+ PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
+ PLL_RX_CFG_RATE_HALF);
+
+ if (np->port == 0)
+ rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
+
+ if (lp->loopback_mode == LOOPBACK_PHY) {
+ u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
+
+ mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_TEST_CFG_L, test_cfg);
+
+ tx_cfg |= PLL_TX_CFG_ENTEST;
+ rx_cfg |= PLL_RX_CFG_ENTEST;
+ }
+
+ /* Initialize PLL for 1G */
+ pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
+
+ err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_CFG_L, pll_cfg);
+ if (err) {
+ netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
+ np->port, __func__);
+ return err;
+ }
+
+ pll_sts = PLL_CFG_ENPLL;
+
+ err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_STS_L, pll_sts);
+ if (err) {
+ netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
+ np->port, __func__);
+ return err;
+ }
+
+ udelay(200);
+
+ /* Initialize all 4 lanes of the SERDES. */
+ for (i = 0; i < 4; i++) {
+ err = esr2_set_tx_cfg(np, i, tx_cfg);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < 4; i++) {
+ err = esr2_set_rx_cfg(np, i, rx_cfg);
+ if (err)
+ return err;
+ }
+
+ switch (np->port) {
+ case 0:
+ val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
+ mask = val;
+ break;
+
+ case 1:
+ val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
+ mask = val;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ while (max_retry--) {
+ sig = nr64(ESR_INT_SIGNALS);
+ if ((sig & mask) == val)
+ break;
+
+ mdelay(500);
+ }
+
+ if ((sig & mask) != val) {
+ netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
+ np->port, (int)(sig & mask), (int)val);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int serdes_init_niu_10g_serdes(struct niu *np)
+{
+ struct niu_link_config *lp = &np->link_config;
+ u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
+ int max_retry = 100;
+ u64 uninitialized_var(sig), mask, val;
+ unsigned long i;
+ int err;
+
+ tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
+ rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
+ PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
+ PLL_RX_CFG_EQ_LP_ADAPTIVE);
+
+ if (lp->loopback_mode == LOOPBACK_PHY) {
+ u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
+
+ mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_TEST_CFG_L, test_cfg);
+
+ tx_cfg |= PLL_TX_CFG_ENTEST;
+ rx_cfg |= PLL_RX_CFG_ENTEST;
+ }
+
+ /* Initialize PLL for 10G */
+ pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
+
+ err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
+ if (err) {
+ netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
+ np->port, __func__);
+ return err;
+ }
+
+ pll_sts = PLL_CFG_ENPLL;
+
+ err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
+ if (err) {
+ netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
+ np->port, __func__);
+ return err;
+ }
+
+ udelay(200);
+
+ /* Initialize all 4 lanes of the SERDES. */
+ for (i = 0; i < 4; i++) {
+ err = esr2_set_tx_cfg(np, i, tx_cfg);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < 4; i++) {
+ err = esr2_set_rx_cfg(np, i, rx_cfg);
+ if (err)
+ return err;
+ }
+
+ /* check if serdes is ready */
+
+ switch (np->port) {
+ case 0:
+ mask = ESR_INT_SIGNALS_P0_BITS;
+ val = (ESR_INT_SRDY0_P0 |
+ ESR_INT_DET0_P0 |
+ ESR_INT_XSRDY_P0 |
+ ESR_INT_XDP_P0_CH3 |
+ ESR_INT_XDP_P0_CH2 |
+ ESR_INT_XDP_P0_CH1 |
+ ESR_INT_XDP_P0_CH0);
+ break;
+
+ case 1:
+ mask = ESR_INT_SIGNALS_P1_BITS;
+ val = (ESR_INT_SRDY0_P1 |
+ ESR_INT_DET0_P1 |
+ ESR_INT_XSRDY_P1 |
+ ESR_INT_XDP_P1_CH3 |
+ ESR_INT_XDP_P1_CH2 |
+ ESR_INT_XDP_P1_CH1 |
+ ESR_INT_XDP_P1_CH0);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ while (max_retry--) {
+ sig = nr64(ESR_INT_SIGNALS);
+ if ((sig & mask) == val)
+ break;
+
+ mdelay(500);
+ }
+
+ if ((sig & mask) != val) {
+ pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
+ np->port, (int)(sig & mask), (int)val);
+
+ /* 10G failed, try initializing at 1G */
+ err = serdes_init_niu_1g_serdes(np);
+ if (!err) {
+ np->flags &= ~NIU_FLAGS_10G;
+ np->mac_xcvr = MAC_XCVR_PCS;
+ } else {
+ netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
+ np->port);
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
+{
+ int err;
+
+ err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan));
+ if (err >= 0) {
+ *val = (err & 0xffff);
+ err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
+ ESR_RXTX_CTRL_H(chan));
+ if (err >= 0)
+ *val |= ((err & 0xffff) << 16);
+ err = 0;
+ }
+ return err;
+}
+
+static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val)
+{
+ int err;
+
+ err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
+ ESR_GLUE_CTRL0_L(chan));
+ if (err >= 0) {
+ *val = (err & 0xffff);
+ err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
+ ESR_GLUE_CTRL0_H(chan));
+ if (err >= 0) {
+ *val |= ((err & 0xffff) << 16);
+ err = 0;
+ }
+ }
+ return err;
+}
+
+static int esr_read_reset(struct niu *np, u32 *val)
+{
+ int err;
+
+ err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
+ ESR_RXTX_RESET_CTRL_L);
+ if (err >= 0) {
+ *val = (err & 0xffff);
+ err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
+ ESR_RXTX_RESET_CTRL_H);
+ if (err >= 0) {
+ *val |= ((err & 0xffff) << 16);
+ err = 0;
+ }
+ }
+ return err;
+}
+
+static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val)
+{
+ int err;
+
+ err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
+ ESR_RXTX_CTRL_L(chan), val & 0xffff);
+ if (!err)
+ err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
+ ESR_RXTX_CTRL_H(chan), (val >> 16));
+ return err;
+}
+
+static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
+{
+ int err;
+
+ err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
+ ESR_GLUE_CTRL0_L(chan), val & 0xffff);
+ if (!err)
+ err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
+ ESR_GLUE_CTRL0_H(chan), (val >> 16));
+ return err;
+}
+
+static int esr_reset(struct niu *np)
+{
+ u32 uninitialized_var(reset);
+ int err;
+
+ err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
+ ESR_RXTX_RESET_CTRL_L, 0x0000);
+ if (err)
+ return err;
+ err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
+ ESR_RXTX_RESET_CTRL_H, 0xffff);
+ if (err)
+ return err;
+ udelay(200);
+
+ err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
+ ESR_RXTX_RESET_CTRL_L, 0xffff);
+ if (err)
+ return err;
+ udelay(200);
+
+ err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
+ ESR_RXTX_RESET_CTRL_H, 0x0000);
+ if (err)
+ return err;
+ udelay(200);
+
+ err = esr_read_reset(np, &reset);
+ if (err)
+ return err;
+ if (reset != 0) {
+ netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n",
+ np->port, reset);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int serdes_init_10g(struct niu *np)
+{
+ struct niu_link_config *lp = &np->link_config;
+ unsigned long ctrl_reg, test_cfg_reg, i;
+ u64 ctrl_val, test_cfg_val, sig, mask, val;
+ int err;
+
+ switch (np->port) {
+ case 0:
+ ctrl_reg = ENET_SERDES_0_CTRL_CFG;
+ test_cfg_reg = ENET_SERDES_0_TEST_CFG;
+ break;
+ case 1:
+ ctrl_reg = ENET_SERDES_1_CTRL_CFG;
+ test_cfg_reg = ENET_SERDES_1_TEST_CFG;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
+ ENET_SERDES_CTRL_SDET_1 |
+ ENET_SERDES_CTRL_SDET_2 |
+ ENET_SERDES_CTRL_SDET_3 |
+ (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
+ (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
+ (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
+ (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
+ (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
+ (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
+ (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
+ (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
+ test_cfg_val = 0;
+
+ if (lp->loopback_mode == LOOPBACK_PHY) {
+ test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
+ ENET_SERDES_TEST_MD_0_SHIFT) |
+ (ENET_TEST_MD_PAD_LOOPBACK <<
+ ENET_SERDES_TEST_MD_1_SHIFT) |
+ (ENET_TEST_MD_PAD_LOOPBACK <<
+ ENET_SERDES_TEST_MD_2_SHIFT) |
+ (ENET_TEST_MD_PAD_LOOPBACK <<
+ ENET_SERDES_TEST_MD_3_SHIFT));
+ }
+
+ nw64(ctrl_reg, ctrl_val);
+ nw64(test_cfg_reg, test_cfg_val);
+
+ /* Initialize all 4 lanes of the SERDES. */
+ for (i = 0; i < 4; i++) {
+ u32 rxtx_ctrl, glue0;
+
+ err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
+ if (err)
+ return err;
+ err = esr_read_glue0(np, i, &glue0);
+ if (err)
+ return err;
+
+ rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
+ rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
+ (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
+
+ glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
+ ESR_GLUE_CTRL0_THCNT |
+ ESR_GLUE_CTRL0_BLTIME);
+ glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
+ (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
+ (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
+ (BLTIME_300_CYCLES <<
+ ESR_GLUE_CTRL0_BLTIME_SHIFT));
+
+ err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
+ if (err)
+ return err;
+ err = esr_write_glue0(np, i, glue0);
+ if (err)
+ return err;
+ }
+
+ err = esr_reset(np);
+ if (err)
+ return err;
+
+ sig = nr64(ESR_INT_SIGNALS);
+ switch (np->port) {
+ case 0:
+ mask = ESR_INT_SIGNALS_P0_BITS;
+ val = (ESR_INT_SRDY0_P0 |
+ ESR_INT_DET0_P0 |
+ ESR_INT_XSRDY_P0 |
+ ESR_INT_XDP_P0_CH3 |
+ ESR_INT_XDP_P0_CH2 |
+ ESR_INT_XDP_P0_CH1 |
+ ESR_INT_XDP_P0_CH0);
+ break;
+
+ case 1:
+ mask = ESR_INT_SIGNALS_P1_BITS;
+ val = (ESR_INT_SRDY0_P1 |
+ ESR_INT_DET0_P1 |
+ ESR_INT_XSRDY_P1 |
+ ESR_INT_XDP_P1_CH3 |
+ ESR_INT_XDP_P1_CH2 |
+ ESR_INT_XDP_P1_CH1 |
+ ESR_INT_XDP_P1_CH0);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if ((sig & mask) != val) {
+ if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
+ np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
+ return 0;
+ }
+ netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
+ np->port, (int)(sig & mask), (int)val);
+ return -ENODEV;
+ }
+ if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
+ np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
+ return 0;
+}
+
+static int serdes_init_1g(struct niu *np)
+{
+ u64 val;
+
+ val = nr64(ENET_SERDES_1_PLL_CFG);
+ val &= ~ENET_SERDES_PLL_FBDIV2;
+ switch (np->port) {
+ case 0:
+ val |= ENET_SERDES_PLL_HRATE0;
+ break;
+ case 1:
+ val |= ENET_SERDES_PLL_HRATE1;
+ break;
+ case 2:
+ val |= ENET_SERDES_PLL_HRATE2;
+ break;
+ case 3:
+ val |= ENET_SERDES_PLL_HRATE3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ nw64(ENET_SERDES_1_PLL_CFG, val);
+
+ return 0;
+}
+
+static int serdes_init_1g_serdes(struct niu *np)
+{
+ struct niu_link_config *lp = &np->link_config;
+ unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
+ u64 ctrl_val, test_cfg_val, sig, mask, val;
+ int err;
+ u64 reset_val, val_rd;
+
+ val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
+ ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
+ ENET_SERDES_PLL_FBDIV0;
+ switch (np->port) {
+ case 0:
+ reset_val = ENET_SERDES_RESET_0;
+ ctrl_reg = ENET_SERDES_0_CTRL_CFG;
+ test_cfg_reg = ENET_SERDES_0_TEST_CFG;
+ pll_cfg = ENET_SERDES_0_PLL_CFG;
+ break;
+ case 1:
+ reset_val = ENET_SERDES_RESET_1;
+ ctrl_reg = ENET_SERDES_1_CTRL_CFG;
+ test_cfg_reg = ENET_SERDES_1_TEST_CFG;
+ pll_cfg = ENET_SERDES_1_PLL_CFG;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
+ ENET_SERDES_CTRL_SDET_1 |
+ ENET_SERDES_CTRL_SDET_2 |
+ ENET_SERDES_CTRL_SDET_3 |
+ (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
+ (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
+ (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
+ (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
+ (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
+ (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
+ (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
+ (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
+ test_cfg_val = 0;
+
+ if (lp->loopback_mode == LOOPBACK_PHY) {
+ test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
+ ENET_SERDES_TEST_MD_0_SHIFT) |
+ (ENET_TEST_MD_PAD_LOOPBACK <<
+ ENET_SERDES_TEST_MD_1_SHIFT) |
+ (ENET_TEST_MD_PAD_LOOPBACK <<
+ ENET_SERDES_TEST_MD_2_SHIFT) |
+ (ENET_TEST_MD_PAD_LOOPBACK <<
+ ENET_SERDES_TEST_MD_3_SHIFT));
+ }
+
+ nw64(ENET_SERDES_RESET, reset_val);
+ mdelay(20);
+ val_rd = nr64(ENET_SERDES_RESET);
+ val_rd &= ~reset_val;
+ nw64(pll_cfg, val);
+ nw64(ctrl_reg, ctrl_val);
+ nw64(test_cfg_reg, test_cfg_val);
+ nw64(ENET_SERDES_RESET, val_rd);
+ mdelay(2000);
+
+ /* Initialize all 4 lanes of the SERDES. */
+ for (i = 0; i < 4; i++) {
+ u32 rxtx_ctrl, glue0;
+
+ err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
+ if (err)
+ return err;
+ err = esr_read_glue0(np, i, &glue0);
+ if (err)
+ return err;
+
+ rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
+ rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
+ (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
+
+ glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
+ ESR_GLUE_CTRL0_THCNT |
+ ESR_GLUE_CTRL0_BLTIME);
+ glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
+ (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
+ (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
+ (BLTIME_300_CYCLES <<
+ ESR_GLUE_CTRL0_BLTIME_SHIFT));
+
+ err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
+ if (err)
+ return err;
+ err = esr_write_glue0(np, i, glue0);
+ if (err)
+ return err;
+ }
+
+
+ sig = nr64(ESR_INT_SIGNALS);
+ switch (np->port) {
+ case 0:
+ val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
+ mask = val;
+ break;
+
+ case 1:
+ val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
+ mask = val;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if ((sig & mask) != val) {
+ netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
+ np->port, (int)(sig & mask), (int)val);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int link_status_1g_serdes(struct niu *np, int *link_up_p)
+{
+ struct niu_link_config *lp = &np->link_config;
+ int link_up;
+ u64 val;
+ u16 current_speed;
+ unsigned long flags;
+ u8 current_duplex;
+
+ link_up = 0;
+ current_speed = SPEED_INVALID;
+ current_duplex = DUPLEX_INVALID;
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ val = nr64_pcs(PCS_MII_STAT);
+
+ if (val & PCS_MII_STAT_LINK_STATUS) {
+ link_up = 1;
+ current_speed = SPEED_1000;
+ current_duplex = DUPLEX_FULL;
+ }
+
+ lp->active_speed = current_speed;
+ lp->active_duplex = current_duplex;
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ *link_up_p = link_up;
+ return 0;
+}
+
+static int link_status_10g_serdes(struct niu *np, int *link_up_p)
+{
+ unsigned long flags;
+ struct niu_link_config *lp = &np->link_config;
+ int link_up = 0;
+ int link_ok = 1;
+ u64 val, val2;
+ u16 current_speed;
+ u8 current_duplex;
+
+ if (!(np->flags & NIU_FLAGS_10G))
+ return link_status_1g_serdes(np, link_up_p);
+
+ current_speed = SPEED_INVALID;
+ current_duplex = DUPLEX_INVALID;
+ spin_lock_irqsave(&np->lock, flags);
+
+ val = nr64_xpcs(XPCS_STATUS(0));
+ val2 = nr64_mac(XMAC_INTER2);
+ if (val2 & 0x01000000)
+ link_ok = 0;
+
+ if ((val & 0x1000ULL) && link_ok) {
+ link_up = 1;
+ current_speed = SPEED_10000;
+ current_duplex = DUPLEX_FULL;
+ }
+ lp->active_speed = current_speed;
+ lp->active_duplex = current_duplex;
+ spin_unlock_irqrestore(&np->lock, flags);
+ *link_up_p = link_up;
+ return 0;
+}
+
+static int link_status_mii(struct niu *np, int *link_up_p)
+{
+ struct niu_link_config *lp = &np->link_config;
+ int err;
+ int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus;
+ int supported, advertising, active_speed, active_duplex;
+
+ err = mii_read(np, np->phy_addr, MII_BMCR);
+ if (unlikely(err < 0))
+ return err;
+ bmcr = err;
+
+ err = mii_read(np, np->phy_addr, MII_BMSR);
+ if (unlikely(err < 0))
+ return err;
+ bmsr = err;
+
+ err = mii_read(np, np->phy_addr, MII_ADVERTISE);
+ if (unlikely(err < 0))
+ return err;
+ advert = err;
+
+ err = mii_read(np, np->phy_addr, MII_LPA);
+ if (unlikely(err < 0))
+ return err;
+ lpa = err;
+
+ if (likely(bmsr & BMSR_ESTATEN)) {
+ err = mii_read(np, np->phy_addr, MII_ESTATUS);
+ if (unlikely(err < 0))
+ return err;
+ estatus = err;
+
+ err = mii_read(np, np->phy_addr, MII_CTRL1000);
+ if (unlikely(err < 0))
+ return err;
+ ctrl1000 = err;
+
+ err = mii_read(np, np->phy_addr, MII_STAT1000);
+ if (unlikely(err < 0))
+ return err;
+ stat1000 = err;
+ } else
+ estatus = ctrl1000 = stat1000 = 0;
+
+ supported = 0;
+ if (bmsr & BMSR_ANEGCAPABLE)
+ supported |= SUPPORTED_Autoneg;
+ if (bmsr & BMSR_10HALF)
+ supported |= SUPPORTED_10baseT_Half;
+ if (bmsr & BMSR_10FULL)
+ supported |= SUPPORTED_10baseT_Full;
+ if (bmsr & BMSR_100HALF)
+ supported |= SUPPORTED_100baseT_Half;
+ if (bmsr & BMSR_100FULL)
+ supported |= SUPPORTED_100baseT_Full;
+ if (estatus & ESTATUS_1000_THALF)
+ supported |= SUPPORTED_1000baseT_Half;
+ if (estatus & ESTATUS_1000_TFULL)
+ supported |= SUPPORTED_1000baseT_Full;
+ lp->supported = supported;
+
+ advertising = 0;
+ if (advert & ADVERTISE_10HALF)
+ advertising |= ADVERTISED_10baseT_Half;
+ if (advert & ADVERTISE_10FULL)
+ advertising |= ADVERTISED_10baseT_Full;
+ if (advert & ADVERTISE_100HALF)
+ advertising |= ADVERTISED_100baseT_Half;
+ if (advert & ADVERTISE_100FULL)
+ advertising |= ADVERTISED_100baseT_Full;
+ if (ctrl1000 & ADVERTISE_1000HALF)
+ advertising |= ADVERTISED_1000baseT_Half;
+ if (ctrl1000 & ADVERTISE_1000FULL)
+ advertising |= ADVERTISED_1000baseT_Full;
+
+ if (bmcr & BMCR_ANENABLE) {
+ int neg, neg1000;
+
+ lp->active_autoneg = 1;
+ advertising |= ADVERTISED_Autoneg;
+
+ neg = advert & lpa;
+ neg1000 = (ctrl1000 << 2) & stat1000;
+
+ if (neg1000 & (LPA_1000FULL | LPA_1000HALF))
+ active_speed = SPEED_1000;
+ else if (neg & LPA_100)
+ active_speed = SPEED_100;
+ else if (neg & (LPA_10HALF | LPA_10FULL))
+ active_speed = SPEED_10;
+ else
+ active_speed = SPEED_INVALID;
+
+ if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX))
+ active_duplex = DUPLEX_FULL;
+ else if (active_speed != SPEED_INVALID)
+ active_duplex = DUPLEX_HALF;
+ else
+ active_duplex = DUPLEX_INVALID;
+ } else {
+ lp->active_autoneg = 0;
+
+ if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100))
+ active_speed = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ active_speed = SPEED_100;
+ else
+ active_speed = SPEED_10;
+
+ if (bmcr & BMCR_FULLDPLX)
+ active_duplex = DUPLEX_FULL;
+ else
+ active_duplex = DUPLEX_HALF;
+ }
+
+ lp->active_advertising = advertising;
+ lp->active_speed = active_speed;
+ lp->active_duplex = active_duplex;
+ *link_up_p = !!(bmsr & BMSR_LSTATUS);
+
+ return 0;
+}
+
+static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
+{
+ struct niu_link_config *lp = &np->link_config;
+ u16 current_speed, bmsr;
+ unsigned long flags;
+ u8 current_duplex;
+ int err, link_up;
+
+ link_up = 0;
+ current_speed = SPEED_INVALID;
+ current_duplex = DUPLEX_INVALID;
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ err = -EINVAL;
+
+ err = mii_read(np, np->phy_addr, MII_BMSR);
+ if (err < 0)
+ goto out;
+
+ bmsr = err;
+ if (bmsr & BMSR_LSTATUS) {
+ u16 adv, lpa;
+
+ err = mii_read(np, np->phy_addr, MII_ADVERTISE);
+ if (err < 0)
+ goto out;
+ adv = err;
+
+ err = mii_read(np, np->phy_addr, MII_LPA);
+ if (err < 0)
+ goto out;
+ lpa = err;
+
+ err = mii_read(np, np->phy_addr, MII_ESTATUS);
+ if (err < 0)
+ goto out;
+ link_up = 1;
+ current_speed = SPEED_1000;
+ current_duplex = DUPLEX_FULL;
+
+ }
+ lp->active_speed = current_speed;
+ lp->active_duplex = current_duplex;
+ err = 0;
+
+out:
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ *link_up_p = link_up;
+ return err;
+}
+
+static int link_status_1g(struct niu *np, int *link_up_p)
+{
+ struct niu_link_config *lp = &np->link_config;
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ err = link_status_mii(np, link_up_p);
+ lp->supported |= SUPPORTED_TP;
+ lp->active_advertising |= ADVERTISED_TP;
+
+ spin_unlock_irqrestore(&np->lock, flags);
+ return err;
+}
+
+static int bcm8704_reset(struct niu *np)
+{
+ int err, limit;
+
+ err = mdio_read(np, np->phy_addr,
+ BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
+ if (err < 0 || err == 0xffff)
+ return err;
+ err |= BMCR_RESET;
+ err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
+ MII_BMCR, err);
+ if (err)
+ return err;
+
+ limit = 1000;
+ while (--limit >= 0) {
+ err = mdio_read(np, np->phy_addr,
+ BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
+ if (err < 0)
+ return err;
+ if (!(err & BMCR_RESET))
+ break;
+ }
+ if (limit < 0) {
+ netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n",
+ np->port, (err & 0xffff));
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/* When written, certain PHY registers need to be read back twice
+ * in order for the bits to settle properly.
+ */
+static int bcm8704_user_dev3_readback(struct niu *np, int reg)
+{
+ int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
+ if (err < 0)
+ return err;
+ err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
+ if (err < 0)
+ return err;
+ return 0;
+}
+
+static int bcm8706_init_user_dev3(struct niu *np)
+{
+ int err;
+
+
+ err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+ BCM8704_USER_OPT_DIGITAL_CTRL);
+ if (err < 0)
+ return err;
+ err &= ~USER_ODIG_CTRL_GPIOS;
+ err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
+ err |= USER_ODIG_CTRL_RESV2;
+ err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+ BCM8704_USER_OPT_DIGITAL_CTRL, err);
+ if (err)
+ return err;
+
+ mdelay(1000);
+
+ return 0;
+}
+
+static int bcm8704_init_user_dev3(struct niu *np)
+{
+ int err;
+
+ err = mdio_write(np, np->phy_addr,
+ BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
+ (USER_CONTROL_OPTXRST_LVL |
+ USER_CONTROL_OPBIASFLT_LVL |
+ USER_CONTROL_OBTMPFLT_LVL |
+ USER_CONTROL_OPPRFLT_LVL |
+ USER_CONTROL_OPTXFLT_LVL |
+ USER_CONTROL_OPRXLOS_LVL |
+ USER_CONTROL_OPRXFLT_LVL |
+ USER_CONTROL_OPTXON_LVL |
+ (0x3f << USER_CONTROL_RES1_SHIFT)));
+ if (err)
+ return err;
+
+ err = mdio_write(np, np->phy_addr,
+ BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
+ (USER_PMD_TX_CTL_XFP_CLKEN |
+ (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
+ (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
+ USER_PMD_TX_CTL_TSCK_LPWREN));
+ if (err)
+ return err;
+
+ err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
+ if (err)
+ return err;
+ err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
+ if (err)
+ return err;
+
+ err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+ BCM8704_USER_OPT_DIGITAL_CTRL);
+ if (err < 0)
+ return err;
+ err &= ~USER_ODIG_CTRL_GPIOS;
+ err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
+ err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+ BCM8704_USER_OPT_DIGITAL_CTRL, err);
+ if (err)
+ return err;
+
+ mdelay(1000);
+
+ return 0;
+}
+
+static int mrvl88x2011_act_led(struct niu *np, int val)
+{
+ int err;
+
+ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
+ MRVL88X2011_LED_8_TO_11_CTL);
+ if (err < 0)
+ return err;
+
+ err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
+ err |= MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
+
+ return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
+ MRVL88X2011_LED_8_TO_11_CTL, err);
+}
+
+static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
+{
+ int err;
+
+ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
+ MRVL88X2011_LED_BLINK_CTL);
+ if (err >= 0) {
+ err &= ~MRVL88X2011_LED_BLKRATE_MASK;
+ err |= (rate << 4);
+
+ err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
+ MRVL88X2011_LED_BLINK_CTL, err);
+ }
+
+ return err;
+}
+
+static int xcvr_init_10g_mrvl88x2011(struct niu *np)
+{
+ int err;
+
+ /* Set LED functions */
+ err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
+ if (err)
+ return err;
+
+ /* led activity */
+ err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
+ if (err)
+ return err;
+
+ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
+ MRVL88X2011_GENERAL_CTL);
+ if (err < 0)
+ return err;
+
+ err |= MRVL88X2011_ENA_XFPREFCLK;
+
+ err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
+ MRVL88X2011_GENERAL_CTL, err);
+ if (err < 0)
+ return err;
+
+ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
+ MRVL88X2011_PMA_PMD_CTL_1);
+ if (err < 0)
+ return err;
+
+ if (np->link_config.loopback_mode == LOOPBACK_MAC)
+ err |= MRVL88X2011_LOOPBACK;
+ else
+ err &= ~MRVL88X2011_LOOPBACK;
+
+ err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
+ MRVL88X2011_PMA_PMD_CTL_1, err);
+ if (err < 0)
+ return err;
+
+ /* Enable PMD */
+ return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
+ MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
+}
+
+
+static int xcvr_diag_bcm870x(struct niu *np)
+{
+ u16 analog_stat0, tx_alarm_status;
+ int err = 0;
+
+#if 1
+ err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
+ MII_STAT1000);
+ if (err < 0)
+ return err;
+ pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err);
+
+ err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
+ if (err < 0)
+ return err;
+ pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err);
+
+ err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
+ MII_NWAYTEST);
+ if (err < 0)
+ return err;
+ pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err);
+#endif
+
+ /* XXX dig this out it might not be so useful XXX */
+ err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+ BCM8704_USER_ANALOG_STATUS0);
+ if (err < 0)
+ return err;
+ err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+ BCM8704_USER_ANALOG_STATUS0);
+ if (err < 0)
+ return err;
+ analog_stat0 = err;
+
+ err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+ BCM8704_USER_TX_ALARM_STATUS);
+ if (err < 0)
+ return err;
+ err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+ BCM8704_USER_TX_ALARM_STATUS);
+ if (err < 0)
+ return err;
+ tx_alarm_status = err;
+
+ if (analog_stat0 != 0x03fc) {
+ if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
+ pr_info("Port %u cable not connected or bad cable\n",
+ np->port);
+ } else if (analog_stat0 == 0x639c) {
+ pr_info("Port %u optical module is bad or missing\n",
+ np->port);
+ }
+ }
+
+ return 0;
+}
+
+static int xcvr_10g_set_lb_bcm870x(struct niu *np)
+{
+ struct niu_link_config *lp = &np->link_config;
+ int err;
+
+ err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
+ MII_BMCR);
+ if (err < 0)
+ return err;
+
+ err &= ~BMCR_LOOPBACK;
+
+ if (lp->loopback_mode == LOOPBACK_MAC)
+ err |= BMCR_LOOPBACK;
+
+ err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
+ MII_BMCR, err);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int xcvr_init_10g_bcm8706(struct niu *np)
+{
+ int err = 0;
+ u64 val;
+
+ if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
+ (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
+ return err;
+
+ val = nr64_mac(XMAC_CONFIG);
+ val &= ~XMAC_CONFIG_LED_POLARITY;
+ val |= XMAC_CONFIG_FORCE_LED_ON;
+ nw64_mac(XMAC_CONFIG, val);
+
+ val = nr64(MIF_CONFIG);
+ val |= MIF_CONFIG_INDIRECT_MODE;
+ nw64(MIF_CONFIG, val);
+
+ err = bcm8704_reset(np);
+ if (err)
+ return err;
+
+ err = xcvr_10g_set_lb_bcm870x(np);
+ if (err)
+ return err;
+
+ err = bcm8706_init_user_dev3(np);
+ if (err)
+ return err;
+
+ err = xcvr_diag_bcm870x(np);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int xcvr_init_10g_bcm8704(struct niu *np)
+{
+ int err;
+
+ err = bcm8704_reset(np);
+ if (err)
+ return err;
+
+ err = bcm8704_init_user_dev3(np);
+ if (err)
+ return err;
+
+ err = xcvr_10g_set_lb_bcm870x(np);
+ if (err)
+ return err;
+
+ err = xcvr_diag_bcm870x(np);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int xcvr_init_10g(struct niu *np)
+{
+ int phy_id, err;
+ u64 val;
+
+ val = nr64_mac(XMAC_CONFIG);
+ val &= ~XMAC_CONFIG_LED_POLARITY;
+ val |= XMAC_CONFIG_FORCE_LED_ON;
+ nw64_mac(XMAC_CONFIG, val);
+
+ /* XXX shared resource, lock parent XXX */
+ val = nr64(MIF_CONFIG);
+ val |= MIF_CONFIG_INDIRECT_MODE;
+ nw64(MIF_CONFIG, val);
+
+ phy_id = phy_decode(np->parent->port_phy, np->port);
+ phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
+
+ /* handle different phy types */
+ switch (phy_id & NIU_PHY_ID_MASK) {
+ case NIU_PHY_ID_MRVL88X2011:
+ err = xcvr_init_10g_mrvl88x2011(np);
+ break;
+
+ default: /* bcom 8704 */
+ err = xcvr_init_10g_bcm8704(np);
+ break;
+ }
+
+ return err;
+}
+
+static int mii_reset(struct niu *np)
+{
+ int limit, err;
+
+ err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
+ if (err)
+ return err;
+
+ limit = 1000;
+ while (--limit >= 0) {
+ udelay(500);
+ err = mii_read(np, np->phy_addr, MII_BMCR);
+ if (err < 0)
+ return err;
+ if (!(err & BMCR_RESET))
+ break;
+ }
+ if (limit < 0) {
+ netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n",
+ np->port, err);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int xcvr_init_1g_rgmii(struct niu *np)
+{
+ int err;
+ u64 val;
+ u16 bmcr, bmsr, estat;
+
+ val = nr64(MIF_CONFIG);
+ val &= ~MIF_CONFIG_INDIRECT_MODE;
+ nw64(MIF_CONFIG, val);
+
+ err = mii_reset(np);
+ if (err)
+ return err;
+
+ err = mii_read(np, np->phy_addr, MII_BMSR);
+ if (err < 0)
+ return err;
+ bmsr = err;
+
+ estat = 0;
+ if (bmsr & BMSR_ESTATEN) {
+ err = mii_read(np, np->phy_addr, MII_ESTATUS);
+ if (err < 0)
+ return err;
+ estat = err;
+ }
+
+ bmcr = 0;
+ err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
+ if (err)
+ return err;
+
+ if (bmsr & BMSR_ESTATEN) {
+ u16 ctrl1000 = 0;
+
+ if (estat & ESTATUS_1000_TFULL)
+ ctrl1000 |= ADVERTISE_1000FULL;
+ err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
+ if (err)
+ return err;
+ }
+
+ bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
+
+ err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
+ if (err)
+ return err;
+
+ err = mii_read(np, np->phy_addr, MII_BMCR);
+ if (err < 0)
+ return err;
+ bmcr = mii_read(np, np->phy_addr, MII_BMCR);
+
+ err = mii_read(np, np->phy_addr, MII_BMSR);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int mii_init_common(struct niu *np)
+{
+ struct niu_link_config *lp = &np->link_config;
+ u16 bmcr, bmsr, adv, estat;
+ int err;
+
+ err = mii_reset(np);
+ if (err)
+ return err;
+
+ err = mii_read(np, np->phy_addr, MII_BMSR);
+ if (err < 0)
+ return err;
+ bmsr = err;
+
+ estat = 0;
+ if (bmsr & BMSR_ESTATEN) {
+ err = mii_read(np, np->phy_addr, MII_ESTATUS);
+ if (err < 0)
+ return err;
+ estat = err;
+ }
+
+ bmcr = 0;
+ err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
+ if (err)
+ return err;
+
+ if (lp->loopback_mode == LOOPBACK_MAC) {
+ bmcr |= BMCR_LOOPBACK;
+ if (lp->active_speed == SPEED_1000)
+ bmcr |= BMCR_SPEED1000;
+ if (lp->active_duplex == DUPLEX_FULL)
+ bmcr |= BMCR_FULLDPLX;
+ }
+
+ if (lp->loopback_mode == LOOPBACK_PHY) {
+ u16 aux;
+
+ aux = (BCM5464R_AUX_CTL_EXT_LB |
+ BCM5464R_AUX_CTL_WRITE_1);
+ err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
+ if (err)
+ return err;
+ }
+
+ if (lp->autoneg) {
+ u16 ctrl1000;
+
+ adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
+ if ((bmsr & BMSR_10HALF) &&
+ (lp->advertising & ADVERTISED_10baseT_Half))
+ adv |= ADVERTISE_10HALF;
+ if ((bmsr & BMSR_10FULL) &&
+ (lp->advertising & ADVERTISED_10baseT_Full))
+ adv |= ADVERTISE_10FULL;
+ if ((bmsr & BMSR_100HALF) &&
+ (lp->advertising & ADVERTISED_100baseT_Half))
+ adv |= ADVERTISE_100HALF;
+ if ((bmsr & BMSR_100FULL) &&
+ (lp->advertising & ADVERTISED_100baseT_Full))
+ adv |= ADVERTISE_100FULL;
+ err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
+ if (err)
+ return err;
+
+ if (likely(bmsr & BMSR_ESTATEN)) {
+ ctrl1000 = 0;
+ if ((estat & ESTATUS_1000_THALF) &&
+ (lp->advertising & ADVERTISED_1000baseT_Half))
+ ctrl1000 |= ADVERTISE_1000HALF;
+ if ((estat & ESTATUS_1000_TFULL) &&
+ (lp->advertising & ADVERTISED_1000baseT_Full))
+ ctrl1000 |= ADVERTISE_1000FULL;
+ err = mii_write(np, np->phy_addr,
+ MII_CTRL1000, ctrl1000);
+ if (err)
+ return err;
+ }
+
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ } else {
+ /* !lp->autoneg */
+ int fulldpx;
+
+ if (lp->duplex == DUPLEX_FULL) {
+ bmcr |= BMCR_FULLDPLX;
+ fulldpx = 1;
+ } else if (lp->duplex == DUPLEX_HALF)
+ fulldpx = 0;
+ else
+ return -EINVAL;
+
+ if (lp->speed == SPEED_1000) {
+ /* if X-full requested while not supported, or
+ X-half requested while not supported... */
+ if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) ||
+ (!fulldpx && !(estat & ESTATUS_1000_THALF)))
+ return -EINVAL;
+ bmcr |= BMCR_SPEED1000;
+ } else if (lp->speed == SPEED_100) {
+ if ((fulldpx && !(bmsr & BMSR_100FULL)) ||
+ (!fulldpx && !(bmsr & BMSR_100HALF)))
+ return -EINVAL;
+ bmcr |= BMCR_SPEED100;
+ } else if (lp->speed == SPEED_10) {
+ if ((fulldpx && !(bmsr & BMSR_10FULL)) ||
+ (!fulldpx && !(bmsr & BMSR_10HALF)))
+ return -EINVAL;
+ } else
+ return -EINVAL;
+ }
+
+ err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
+ if (err)
+ return err;
+
+#if 0
+ err = mii_read(np, np->phy_addr, MII_BMCR);
+ if (err < 0)
+ return err;
+ bmcr = err;
+
+ err = mii_read(np, np->phy_addr, MII_BMSR);
+ if (err < 0)
+ return err;
+ bmsr = err;
+
+ pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
+ np->port, bmcr, bmsr);
+#endif
+
+ return 0;
+}
+
+static int xcvr_init_1g(struct niu *np)
+{
+ u64 val;
+
+ /* XXX shared resource, lock parent XXX */
+ val = nr64(MIF_CONFIG);
+ val &= ~MIF_CONFIG_INDIRECT_MODE;
+ nw64(MIF_CONFIG, val);
+
+ return mii_init_common(np);
+}
+
+static int niu_xcvr_init(struct niu *np)
+{
+ const struct niu_phy_ops *ops = np->phy_ops;
+ int err;
+
+ err = 0;
+ if (ops->xcvr_init)
+ err = ops->xcvr_init(np);
+
+ return err;
+}
+
+static int niu_serdes_init(struct niu *np)
+{
+ const struct niu_phy_ops *ops = np->phy_ops;
+ int err;
+
+ err = 0;
+ if (ops->serdes_init)
+ err = ops->serdes_init(np);
+
+ return err;
+}
+
+static void niu_init_xif(struct niu *);
+static void niu_handle_led(struct niu *, int status);
+
+static int niu_link_status_common(struct niu *np, int link_up)
+{
+ struct niu_link_config *lp = &np->link_config;
+ struct net_device *dev = np->dev;
+ unsigned long flags;
+
+ if (!netif_carrier_ok(dev) && link_up) {
+ netif_info(np, link, dev, "Link is up at %s, %s duplex\n",
+ lp->active_speed == SPEED_10000 ? "10Gb/sec" :
+ lp->active_speed == SPEED_1000 ? "1Gb/sec" :
+ lp->active_speed == SPEED_100 ? "100Mbit/sec" :
+ "10Mbit/sec",
+ lp->active_duplex == DUPLEX_FULL ? "full" : "half");
+
+ spin_lock_irqsave(&np->lock, flags);
+ niu_init_xif(np);
+ niu_handle_led(np, 1);
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ netif_carrier_on(dev);
+ } else if (netif_carrier_ok(dev) && !link_up) {
+ netif_warn(np, link, dev, "Link is down\n");
+ spin_lock_irqsave(&np->lock, flags);
+ niu_handle_led(np, 0);
+ spin_unlock_irqrestore(&np->lock, flags);
+ netif_carrier_off(dev);
+ }
+
+ return 0;
+}
+
+static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
+{
+ int err, link_up, pma_status, pcs_status;
+
+ link_up = 0;
+
+ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
+ MRVL88X2011_10G_PMD_STATUS_2);
+ if (err < 0)
+ goto out;
+
+ /* Check PMA/PMD Register: 1.0001.2 == 1 */
+ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
+ MRVL88X2011_PMA_PMD_STATUS_1);
+ if (err < 0)
+ goto out;
+
+ pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
+
+ /* Check PMC Register : 3.0001.2 == 1: read twice */
+ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
+ MRVL88X2011_PMA_PMD_STATUS_1);
+ if (err < 0)
+ goto out;
+
+ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
+ MRVL88X2011_PMA_PMD_STATUS_1);
+ if (err < 0)
+ goto out;
+
+ pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
+
+ /* Check XGXS Register : 4.0018.[0-3,12] */
+ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
+ MRVL88X2011_10G_XGXS_LANE_STAT);
+ if (err < 0)
+ goto out;
+
+ if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
+ PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
+ PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
+ 0x800))
+ link_up = (pma_status && pcs_status) ? 1 : 0;
+
+ np->link_config.active_speed = SPEED_10000;
+ np->link_config.active_duplex = DUPLEX_FULL;
+ err = 0;
+out:
+ mrvl88x2011_act_led(np, (link_up ?
+ MRVL88X2011_LED_CTL_PCS_ACT :
+ MRVL88X2011_LED_CTL_OFF));
+
+ *link_up_p = link_up;
+ return err;
+}
+
+static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
+{
+ int err, link_up;
+ link_up = 0;
+
+ err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
+ BCM8704_PMD_RCV_SIGDET);
+ if (err < 0 || err == 0xffff)
+ goto out;
+ if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
+ err = 0;
+ goto out;
+ }
+
+ err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
+ BCM8704_PCS_10G_R_STATUS);
+ if (err < 0)
+ goto out;
+
+ if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
+ err = 0;
+ goto out;
+ }
+
+ err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
+ BCM8704_PHYXS_XGXS_LANE_STAT);
+ if (err < 0)
+ goto out;
+ if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
+ PHYXS_XGXS_LANE_STAT_MAGIC |
+ PHYXS_XGXS_LANE_STAT_PATTEST |
+ PHYXS_XGXS_LANE_STAT_LANE3 |
+ PHYXS_XGXS_LANE_STAT_LANE2 |
+ PHYXS_XGXS_LANE_STAT_LANE1 |
+ PHYXS_XGXS_LANE_STAT_LANE0)) {
+ err = 0;
+ np->link_config.active_speed = SPEED_INVALID;
+ np->link_config.active_duplex = DUPLEX_INVALID;
+ goto out;
+ }
+
+ link_up = 1;
+ np->link_config.active_speed = SPEED_10000;
+ np->link_config.active_duplex = DUPLEX_FULL;
+ err = 0;
+
+out:
+ *link_up_p = link_up;
+ return err;
+}
+
+static int link_status_10g_bcom(struct niu *np, int *link_up_p)
+{
+ int err, link_up;
+
+ link_up = 0;
+
+ err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
+ BCM8704_PMD_RCV_SIGDET);
+ if (err < 0)
+ goto out;
+ if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
+ err = 0;
+ goto out;
+ }
+
+ err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
+ BCM8704_PCS_10G_R_STATUS);
+ if (err < 0)
+ goto out;
+ if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
+ err = 0;
+ goto out;
+ }
+
+ err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
+ BCM8704_PHYXS_XGXS_LANE_STAT);
+ if (err < 0)
+ goto out;
+
+ if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
+ PHYXS_XGXS_LANE_STAT_MAGIC |
+ PHYXS_XGXS_LANE_STAT_LANE3 |
+ PHYXS_XGXS_LANE_STAT_LANE2 |
+ PHYXS_XGXS_LANE_STAT_LANE1 |
+ PHYXS_XGXS_LANE_STAT_LANE0)) {
+ err = 0;
+ goto out;
+ }
+
+ link_up = 1;
+ np->link_config.active_speed = SPEED_10000;
+ np->link_config.active_duplex = DUPLEX_FULL;
+ err = 0;
+
+out:
+ *link_up_p = link_up;
+ return err;
+}
+
+static int link_status_10g(struct niu *np, int *link_up_p)
+{
+ unsigned long flags;
+ int err = -EINVAL;
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
+ int phy_id;
+
+ phy_id = phy_decode(np->parent->port_phy, np->port);
+ phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
+
+ /* handle different phy types */
+ switch (phy_id & NIU_PHY_ID_MASK) {
+ case NIU_PHY_ID_MRVL88X2011:
+ err = link_status_10g_mrvl(np, link_up_p);
+ break;
+
+ default: /* bcom 8704 */
+ err = link_status_10g_bcom(np, link_up_p);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ return err;
+}
+
+static int niu_10g_phy_present(struct niu *np)
+{
+ u64 sig, mask, val;
+
+ sig = nr64(ESR_INT_SIGNALS);
+ switch (np->port) {
+ case 0:
+ mask = ESR_INT_SIGNALS_P0_BITS;
+ val = (ESR_INT_SRDY0_P0 |
+ ESR_INT_DET0_P0 |
+ ESR_INT_XSRDY_P0 |
+ ESR_INT_XDP_P0_CH3 |
+ ESR_INT_XDP_P0_CH2 |
+ ESR_INT_XDP_P0_CH1 |
+ ESR_INT_XDP_P0_CH0);
+ break;
+
+ case 1:
+ mask = ESR_INT_SIGNALS_P1_BITS;
+ val = (ESR_INT_SRDY0_P1 |
+ ESR_INT_DET0_P1 |
+ ESR_INT_XSRDY_P1 |
+ ESR_INT_XDP_P1_CH3 |
+ ESR_INT_XDP_P1_CH2 |
+ ESR_INT_XDP_P1_CH1 |
+ ESR_INT_XDP_P1_CH0);
+ break;
+
+ default:
+ return 0;
+ }
+
+ if ((sig & mask) != val)
+ return 0;
+ return 1;
+}
+
+static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
+{
+ unsigned long flags;
+ int err = 0;
+ int phy_present;
+ int phy_present_prev;
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
+ phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
+ 1 : 0;
+ phy_present = niu_10g_phy_present(np);
+ if (phy_present != phy_present_prev) {
+ /* state change */
+ if (phy_present) {
+ /* A NEM was just plugged in */
+ np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
+ if (np->phy_ops->xcvr_init)
+ err = np->phy_ops->xcvr_init(np);
+ if (err) {
+ err = mdio_read(np, np->phy_addr,
+ BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
+ if (err == 0xffff) {
+ /* No mdio, back-to-back XAUI */
+ goto out;
+ }
+ /* debounce */
+ np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
+ }
+ } else {
+ np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
+ *link_up_p = 0;
+ netif_warn(np, link, np->dev,
+ "Hotplug PHY Removed\n");
+ }
+ }
+out:
+ if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) {
+ err = link_status_10g_bcm8706(np, link_up_p);
+ if (err == 0xffff) {
+ /* No mdio, back-to-back XAUI: it is C10NEM */
+ *link_up_p = 1;
+ np->link_config.active_speed = SPEED_10000;
+ np->link_config.active_duplex = DUPLEX_FULL;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ return 0;
+}
+
+static int niu_link_status(struct niu *np, int *link_up_p)
+{
+ const struct niu_phy_ops *ops = np->phy_ops;
+ int err;
+
+ err = 0;
+ if (ops->link_status)
+ err = ops->link_status(np, link_up_p);
+
+ return err;
+}
+
+static void niu_timer(unsigned long __opaque)
+{
+ struct niu *np = (struct niu *) __opaque;
+ unsigned long off;
+ int err, link_up;
+
+ err = niu_link_status(np, &link_up);
+ if (!err)
+ niu_link_status_common(np, link_up);
+
+ if (netif_carrier_ok(np->dev))
+ off = 5 * HZ;
+ else
+ off = 1 * HZ;
+ np->timer.expires = jiffies + off;
+
+ add_timer(&np->timer);
+}
+
+static const struct niu_phy_ops phy_ops_10g_serdes = {
+ .serdes_init = serdes_init_10g_serdes,
+ .link_status = link_status_10g_serdes,
+};
+
+static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
+ .serdes_init = serdes_init_niu_10g_serdes,
+ .link_status = link_status_10g_serdes,
+};
+
+static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
+ .serdes_init = serdes_init_niu_1g_serdes,
+ .link_status = link_status_1g_serdes,
+};
+
+static const struct niu_phy_ops phy_ops_1g_rgmii = {
+ .xcvr_init = xcvr_init_1g_rgmii,
+ .link_status = link_status_1g_rgmii,
+};
+
+static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
+ .serdes_init = serdes_init_niu_10g_fiber,
+ .xcvr_init = xcvr_init_10g,
+ .link_status = link_status_10g,
+};
+
+static const struct niu_phy_ops phy_ops_10g_fiber = {
+ .serdes_init = serdes_init_10g,
+ .xcvr_init = xcvr_init_10g,
+ .link_status = link_status_10g,
+};
+
+static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
+ .serdes_init = serdes_init_10g,
+ .xcvr_init = xcvr_init_10g_bcm8706,
+ .link_status = link_status_10g_hotplug,
+};
+
+static const struct niu_phy_ops phy_ops_niu_10g_hotplug = {
+ .serdes_init = serdes_init_niu_10g_fiber,
+ .xcvr_init = xcvr_init_10g_bcm8706,
+ .link_status = link_status_10g_hotplug,
+};
+
+static const struct niu_phy_ops phy_ops_10g_copper = {
+ .serdes_init = serdes_init_10g,
+ .link_status = link_status_10g, /* XXX */
+};
+
+static const struct niu_phy_ops phy_ops_1g_fiber = {
+ .serdes_init = serdes_init_1g,
+ .xcvr_init = xcvr_init_1g,
+ .link_status = link_status_1g,
+};
+
+static const struct niu_phy_ops phy_ops_1g_copper = {
+ .xcvr_init = xcvr_init_1g,
+ .link_status = link_status_1g,
+};
+
+struct niu_phy_template {
+ const struct niu_phy_ops *ops;
+ u32 phy_addr_base;
+};
+
+static const struct niu_phy_template phy_template_niu_10g_fiber = {
+ .ops = &phy_ops_10g_fiber_niu,
+ .phy_addr_base = 16,
+};
+
+static const struct niu_phy_template phy_template_niu_10g_serdes = {
+ .ops = &phy_ops_10g_serdes_niu,
+ .phy_addr_base = 0,
+};
+
+static const struct niu_phy_template phy_template_niu_1g_serdes = {
+ .ops = &phy_ops_1g_serdes_niu,
+ .phy_addr_base = 0,
+};
+
+static const struct niu_phy_template phy_template_10g_fiber = {
+ .ops = &phy_ops_10g_fiber,
+ .phy_addr_base = 8,
+};
+
+static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
+ .ops = &phy_ops_10g_fiber_hotplug,
+ .phy_addr_base = 8,
+};
+
+static const struct niu_phy_template phy_template_niu_10g_hotplug = {
+ .ops = &phy_ops_niu_10g_hotplug,
+ .phy_addr_base = 8,
+};
+
+static const struct niu_phy_template phy_template_10g_copper = {
+ .ops = &phy_ops_10g_copper,
+ .phy_addr_base = 10,
+};
+
+static const struct niu_phy_template phy_template_1g_fiber = {
+ .ops = &phy_ops_1g_fiber,
+ .phy_addr_base = 0,
+};
+
+static const struct niu_phy_template phy_template_1g_copper = {
+ .ops = &phy_ops_1g_copper,
+ .phy_addr_base = 0,
+};
+
+static const struct niu_phy_template phy_template_1g_rgmii = {
+ .ops = &phy_ops_1g_rgmii,
+ .phy_addr_base = 0,
+};
+
+static const struct niu_phy_template phy_template_10g_serdes = {
+ .ops = &phy_ops_10g_serdes,
+ .phy_addr_base = 0,
+};
+
+static int niu_atca_port_num[4] = {
+ 0, 0, 11, 10
+};
+
+static int serdes_init_10g_serdes(struct niu *np)
+{
+ struct niu_link_config *lp = &np->link_config;
+ unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
+ u64 ctrl_val, test_cfg_val, sig, mask, val;
+
+ switch (np->port) {
+ case 0:
+ ctrl_reg = ENET_SERDES_0_CTRL_CFG;
+ test_cfg_reg = ENET_SERDES_0_TEST_CFG;
+ pll_cfg = ENET_SERDES_0_PLL_CFG;
+ break;
+ case 1:
+ ctrl_reg = ENET_SERDES_1_CTRL_CFG;
+ test_cfg_reg = ENET_SERDES_1_TEST_CFG;
+ pll_cfg = ENET_SERDES_1_PLL_CFG;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
+ ENET_SERDES_CTRL_SDET_1 |
+ ENET_SERDES_CTRL_SDET_2 |
+ ENET_SERDES_CTRL_SDET_3 |
+ (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
+ (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
+ (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
+ (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
+ (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
+ (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
+ (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
+ (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
+ test_cfg_val = 0;
+
+ if (lp->loopback_mode == LOOPBACK_PHY) {
+ test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
+ ENET_SERDES_TEST_MD_0_SHIFT) |
+ (ENET_TEST_MD_PAD_LOOPBACK <<
+ ENET_SERDES_TEST_MD_1_SHIFT) |
+ (ENET_TEST_MD_PAD_LOOPBACK <<
+ ENET_SERDES_TEST_MD_2_SHIFT) |
+ (ENET_TEST_MD_PAD_LOOPBACK <<
+ ENET_SERDES_TEST_MD_3_SHIFT));
+ }
+
+ esr_reset(np);
+ nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
+ nw64(ctrl_reg, ctrl_val);
+ nw64(test_cfg_reg, test_cfg_val);
+
+ /* Initialize all 4 lanes of the SERDES. */
+ for (i = 0; i < 4; i++) {
+ u32 rxtx_ctrl, glue0;
+ int err;
+
+ err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
+ if (err)
+ return err;
+ err = esr_read_glue0(np, i, &glue0);
+ if (err)
+ return err;
+
+ rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
+ rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
+ (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
+
+ glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
+ ESR_GLUE_CTRL0_THCNT |
+ ESR_GLUE_CTRL0_BLTIME);
+ glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
+ (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
+ (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
+ (BLTIME_300_CYCLES <<
+ ESR_GLUE_CTRL0_BLTIME_SHIFT));
+
+ err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
+ if (err)
+ return err;
+ err = esr_write_glue0(np, i, glue0);
+ if (err)
+ return err;
+ }
+
+
+ sig = nr64(ESR_INT_SIGNALS);
+ switch (np->port) {
+ case 0:
+ mask = ESR_INT_SIGNALS_P0_BITS;
+ val = (ESR_INT_SRDY0_P0 |
+ ESR_INT_DET0_P0 |
+ ESR_INT_XSRDY_P0 |
+ ESR_INT_XDP_P0_CH3 |
+ ESR_INT_XDP_P0_CH2 |
+ ESR_INT_XDP_P0_CH1 |
+ ESR_INT_XDP_P0_CH0);
+ break;
+
+ case 1:
+ mask = ESR_INT_SIGNALS_P1_BITS;
+ val = (ESR_INT_SRDY0_P1 |
+ ESR_INT_DET0_P1 |
+ ESR_INT_XSRDY_P1 |
+ ESR_INT_XDP_P1_CH3 |
+ ESR_INT_XDP_P1_CH2 |
+ ESR_INT_XDP_P1_CH1 |
+ ESR_INT_XDP_P1_CH0);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if ((sig & mask) != val) {
+ int err;
+ err = serdes_init_1g_serdes(np);
+ if (!err) {
+ np->flags &= ~NIU_FLAGS_10G;
+ np->mac_xcvr = MAC_XCVR_PCS;
+ } else {
+ netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
+ np->port);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+static int niu_determine_phy_disposition(struct niu *np)
+{
+ struct niu_parent *parent = np->parent;
+ u8 plat_type = parent->plat_type;
+ const struct niu_phy_template *tp;
+ u32 phy_addr_off = 0;
+
+ if (plat_type == PLAT_TYPE_NIU) {
+ switch (np->flags &
+ (NIU_FLAGS_10G |
+ NIU_FLAGS_FIBER |
+ NIU_FLAGS_XCVR_SERDES)) {
+ case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
+ /* 10G Serdes */
+ tp = &phy_template_niu_10g_serdes;
+ break;
+ case NIU_FLAGS_XCVR_SERDES:
+ /* 1G Serdes */
+ tp = &phy_template_niu_1g_serdes;
+ break;
+ case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
+ /* 10G Fiber */
+ default:
+ if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
+ tp = &phy_template_niu_10g_hotplug;
+ if (np->port == 0)
+ phy_addr_off = 8;
+ if (np->port == 1)
+ phy_addr_off = 12;
+ } else {
+ tp = &phy_template_niu_10g_fiber;
+ phy_addr_off += np->port;
+ }
+ break;
+ }
+ } else {
+ switch (np->flags &
+ (NIU_FLAGS_10G |
+ NIU_FLAGS_FIBER |
+ NIU_FLAGS_XCVR_SERDES)) {
+ case 0:
+ /* 1G copper */
+ tp = &phy_template_1g_copper;
+ if (plat_type == PLAT_TYPE_VF_P0)
+ phy_addr_off = 10;
+ else if (plat_type == PLAT_TYPE_VF_P1)
+ phy_addr_off = 26;
+
+ phy_addr_off += (np->port ^ 0x3);
+ break;
+
+ case NIU_FLAGS_10G:
+ /* 10G copper */
+ tp = &phy_template_10g_copper;
+ break;
+
+ case NIU_FLAGS_FIBER:
+ /* 1G fiber */
+ tp = &phy_template_1g_fiber;
+ break;
+
+ case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
+ /* 10G fiber */
+ tp = &phy_template_10g_fiber;
+ if (plat_type == PLAT_TYPE_VF_P0 ||
+ plat_type == PLAT_TYPE_VF_P1)
+ phy_addr_off = 8;
+ phy_addr_off += np->port;
+ if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
+ tp = &phy_template_10g_fiber_hotplug;
+ if (np->port == 0)
+ phy_addr_off = 8;
+ if (np->port == 1)
+ phy_addr_off = 12;
+ }
+ break;
+
+ case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
+ case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
+ case NIU_FLAGS_XCVR_SERDES:
+ switch(np->port) {
+ case 0:
+ case 1:
+ tp = &phy_template_10g_serdes;
+ break;
+ case 2:
+ case 3:
+ tp = &phy_template_1g_rgmii;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ phy_addr_off = niu_atca_port_num[np->port];
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ np->phy_ops = tp->ops;
+ np->phy_addr = tp->phy_addr_base + phy_addr_off;
+
+ return 0;
+}
+
+static int niu_init_link(struct niu *np)
+{
+ struct niu_parent *parent = np->parent;
+ int err, ignore;
+
+ if (parent->plat_type == PLAT_TYPE_NIU) {
+ err = niu_xcvr_init(np);
+ if (err)
+ return err;
+ msleep(200);
+ }
+ err = niu_serdes_init(np);
+ if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY))
+ return err;
+ msleep(200);
+ err = niu_xcvr_init(np);
+ if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY))
+ niu_link_status(np, &ignore);
+ return 0;
+}
+
+static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
+{
+ u16 reg0 = addr[4] << 8 | addr[5];
+ u16 reg1 = addr[2] << 8 | addr[3];
+ u16 reg2 = addr[0] << 8 | addr[1];
+
+ if (np->flags & NIU_FLAGS_XMAC) {
+ nw64_mac(XMAC_ADDR0, reg0);
+ nw64_mac(XMAC_ADDR1, reg1);
+ nw64_mac(XMAC_ADDR2, reg2);
+ } else {
+ nw64_mac(BMAC_ADDR0, reg0);
+ nw64_mac(BMAC_ADDR1, reg1);
+ nw64_mac(BMAC_ADDR2, reg2);
+ }
+}
+
+static int niu_num_alt_addr(struct niu *np)
+{
+ if (np->flags & NIU_FLAGS_XMAC)
+ return XMAC_NUM_ALT_ADDR;
+ else
+ return BMAC_NUM_ALT_ADDR;
+}
+
+static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
+{
+ u16 reg0 = addr[4] << 8 | addr[5];
+ u16 reg1 = addr[2] << 8 | addr[3];
+ u16 reg2 = addr[0] << 8 | addr[1];
+
+ if (index >= niu_num_alt_addr(np))
+ return -EINVAL;
+
+ if (np->flags & NIU_FLAGS_XMAC) {
+ nw64_mac(XMAC_ALT_ADDR0(index), reg0);
+ nw64_mac(XMAC_ALT_ADDR1(index), reg1);
+ nw64_mac(XMAC_ALT_ADDR2(index), reg2);
+ } else {
+ nw64_mac(BMAC_ALT_ADDR0(index), reg0);
+ nw64_mac(BMAC_ALT_ADDR1(index), reg1);
+ nw64_mac(BMAC_ALT_ADDR2(index), reg2);
+ }
+
+ return 0;
+}
+
+static int niu_enable_alt_mac(struct niu *np, int index, int on)
+{
+ unsigned long reg;
+ u64 val, mask;
+
+ if (index >= niu_num_alt_addr(np))
+ return -EINVAL;
+
+ if (np->flags & NIU_FLAGS_XMAC) {
+ reg = XMAC_ADDR_CMPEN;
+ mask = 1 << index;
+ } else {
+ reg = BMAC_ADDR_CMPEN;
+ mask = 1 << (index + 1);
+ }
+
+ val = nr64_mac(reg);
+ if (on)
+ val |= mask;
+ else
+ val &= ~mask;
+ nw64_mac(reg, val);
+
+ return 0;
+}
+
+static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
+ int num, int mac_pref)
+{
+ u64 val = nr64_mac(reg);
+ val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
+ val |= num;
+ if (mac_pref)
+ val |= HOST_INFO_MPR;
+ nw64_mac(reg, val);
+}
+
+static int __set_rdc_table_num(struct niu *np,
+ int xmac_index, int bmac_index,
+ int rdc_table_num, int mac_pref)
+{
+ unsigned long reg;
+
+ if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
+ return -EINVAL;
+ if (np->flags & NIU_FLAGS_XMAC)
+ reg = XMAC_HOST_INFO(xmac_index);
+ else
+ reg = BMAC_HOST_INFO(bmac_index);
+ __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
+ return 0;
+}
+
+static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
+ int mac_pref)
+{
+ return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
+}
+
+static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
+ int mac_pref)
+{
+ return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
+}
+
+static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
+ int table_num, int mac_pref)
+{
+ if (idx >= niu_num_alt_addr(np))
+ return -EINVAL;
+ return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
+}
+
+static u64 vlan_entry_set_parity(u64 reg_val)
+{
+ u64 port01_mask;
+ u64 port23_mask;
+
+ port01_mask = 0x00ff;
+ port23_mask = 0xff00;
+
+ if (hweight64(reg_val & port01_mask) & 1)
+ reg_val |= ENET_VLAN_TBL_PARITY0;
+ else
+ reg_val &= ~ENET_VLAN_TBL_PARITY0;
+
+ if (hweight64(reg_val & port23_mask) & 1)
+ reg_val |= ENET_VLAN_TBL_PARITY1;
+ else
+ reg_val &= ~ENET_VLAN_TBL_PARITY1;
+
+ return reg_val;
+}
+
+static void vlan_tbl_write(struct niu *np, unsigned long index,
+ int port, int vpr, int rdc_table)
+{
+ u64 reg_val = nr64(ENET_VLAN_TBL(index));
+
+ reg_val &= ~((ENET_VLAN_TBL_VPR |
+ ENET_VLAN_TBL_VLANRDCTBLN) <<
+ ENET_VLAN_TBL_SHIFT(port));
+ if (vpr)
+ reg_val |= (ENET_VLAN_TBL_VPR <<
+ ENET_VLAN_TBL_SHIFT(port));
+ reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
+
+ reg_val = vlan_entry_set_parity(reg_val);
+
+ nw64(ENET_VLAN_TBL(index), reg_val);
+}
+
+static void vlan_tbl_clear(struct niu *np)
+{
+ int i;
+
+ for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
+ nw64(ENET_VLAN_TBL(i), 0);
+}
+
+static int tcam_wait_bit(struct niu *np, u64 bit)
+{
+ int limit = 1000;
+
+ while (--limit > 0) {
+ if (nr64(TCAM_CTL) & bit)
+ break;
+ udelay(1);
+ }
+ if (limit <= 0)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int tcam_flush(struct niu *np, int index)
+{
+ nw64(TCAM_KEY_0, 0x00);
+ nw64(TCAM_KEY_MASK_0, 0xff);
+ nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
+
+ return tcam_wait_bit(np, TCAM_CTL_STAT);
+}
+
+#if 0
+static int tcam_read(struct niu *np, int index,
+ u64 *key, u64 *mask)
+{
+ int err;
+
+ nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));
+ err = tcam_wait_bit(np, TCAM_CTL_STAT);
+ if (!err) {
+ key[0] = nr64(TCAM_KEY_0);
+ key[1] = nr64(TCAM_KEY_1);
+ key[2] = nr64(TCAM_KEY_2);
+ key[3] = nr64(TCAM_KEY_3);
+ mask[0] = nr64(TCAM_KEY_MASK_0);
+ mask[1] = nr64(TCAM_KEY_MASK_1);
+ mask[2] = nr64(TCAM_KEY_MASK_2);
+ mask[3] = nr64(TCAM_KEY_MASK_3);
+ }
+ return err;
+}
+#endif
+
+static int tcam_write(struct niu *np, int index,
+ u64 *key, u64 *mask)
+{
+ nw64(TCAM_KEY_0, key[0]);
+ nw64(TCAM_KEY_1, key[1]);
+ nw64(TCAM_KEY_2, key[2]);
+ nw64(TCAM_KEY_3, key[3]);
+ nw64(TCAM_KEY_MASK_0, mask[0]);
+ nw64(TCAM_KEY_MASK_1, mask[1]);
+ nw64(TCAM_KEY_MASK_2, mask[2]);
+ nw64(TCAM_KEY_MASK_3, mask[3]);
+ nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
+
+ return tcam_wait_bit(np, TCAM_CTL_STAT);
+}
+
+#if 0
+static int tcam_assoc_read(struct niu *np, int index, u64 *data)
+{
+ int err;
+
+ nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));
+ err = tcam_wait_bit(np, TCAM_CTL_STAT);
+ if (!err)
+ *data = nr64(TCAM_KEY_1);
+
+ return err;
+}
+#endif
+
+static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
+{
+ nw64(TCAM_KEY_1, assoc_data);
+ nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
+
+ return tcam_wait_bit(np, TCAM_CTL_STAT);
+}
+
+static void tcam_enable(struct niu *np, int on)
+{
+ u64 val = nr64(FFLP_CFG_1);
+
+ if (on)
+ val &= ~FFLP_CFG_1_TCAM_DIS;
+ else
+ val |= FFLP_CFG_1_TCAM_DIS;
+ nw64(FFLP_CFG_1, val);
+}
+
+static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
+{
+ u64 val = nr64(FFLP_CFG_1);
+
+ val &= ~(FFLP_CFG_1_FFLPINITDONE |
+ FFLP_CFG_1_CAMLAT |
+ FFLP_CFG_1_CAMRATIO);
+ val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
+ val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
+ nw64(FFLP_CFG_1, val);
+
+ val = nr64(FFLP_CFG_1);
+ val |= FFLP_CFG_1_FFLPINITDONE;
+ nw64(FFLP_CFG_1, val);
+}
+
+static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
+ int on)
+{
+ unsigned long reg;
+ u64 val;
+
+ if (class < CLASS_CODE_ETHERTYPE1 ||
+ class > CLASS_CODE_ETHERTYPE2)
+ return -EINVAL;
+
+ reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
+ val = nr64(reg);
+ if (on)
+ val |= L2_CLS_VLD;
+ else
+ val &= ~L2_CLS_VLD;
+ nw64(reg, val);
+
+ return 0;
+}
+
+#if 0
+static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
+ u64 ether_type)
+{
+ unsigned long reg;
+ u64 val;
+
+ if (class < CLASS_CODE_ETHERTYPE1 ||
+ class > CLASS_CODE_ETHERTYPE2 ||
+ (ether_type & ~(u64)0xffff) != 0)
+ return -EINVAL;
+
+ reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
+ val = nr64(reg);
+ val &= ~L2_CLS_ETYPE;
+ val |= (ether_type << L2_CLS_ETYPE_SHIFT);
+ nw64(reg, val);
+
+ return 0;
+}
+#endif
+
+static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
+ int on)
+{
+ unsigned long reg;
+ u64 val;
+
+ if (class < CLASS_CODE_USER_PROG1 ||
+ class > CLASS_CODE_USER_PROG4)
+ return -EINVAL;
+
+ reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
+ val = nr64(reg);
+ if (on)
+ val |= L3_CLS_VALID;
+ else
+ val &= ~L3_CLS_VALID;
+ nw64(reg, val);
+
+ return 0;
+}
+
+static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
+ int ipv6, u64 protocol_id,
+ u64 tos_mask, u64 tos_val)
+{
+ unsigned long reg;
+ u64 val;
+
+ if (class < CLASS_CODE_USER_PROG1 ||
+ class > CLASS_CODE_USER_PROG4 ||
+ (protocol_id & ~(u64)0xff) != 0 ||
+ (tos_mask & ~(u64)0xff) != 0 ||
+ (tos_val & ~(u64)0xff) != 0)
+ return -EINVAL;
+
+ reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
+ val = nr64(reg);
+ val &= ~(L3_CLS_IPVER | L3_CLS_PID |
+ L3_CLS_TOSMASK | L3_CLS_TOS);
+ if (ipv6)
+ val |= L3_CLS_IPVER;
+ val |= (protocol_id << L3_CLS_PID_SHIFT);
+ val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
+ val |= (tos_val << L3_CLS_TOS_SHIFT);
+ nw64(reg, val);
+
+ return 0;
+}
+
+static int tcam_early_init(struct niu *np)
+{
+ unsigned long i;
+ int err;
+
+ tcam_enable(np, 0);
+ tcam_set_lat_and_ratio(np,
+ DEFAULT_TCAM_LATENCY,
+ DEFAULT_TCAM_ACCESS_RATIO);
+ for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
+ err = tcam_user_eth_class_enable(np, i, 0);
+ if (err)
+ return err;
+ }
+ for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
+ err = tcam_user_ip_class_enable(np, i, 0);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int tcam_flush_all(struct niu *np)
+{
+ unsigned long i;
+
+ for (i = 0; i < np->parent->tcam_num_entries; i++) {
+ int err = tcam_flush(np, i);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
+{
+ return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0);
+}
+
+#if 0
+static int hash_read(struct niu *np, unsigned long partition,
+ unsigned long index, unsigned long num_entries,
+ u64 *data)
+{
+ u64 val = hash_addr_regval(index, num_entries);
+ unsigned long i;
+
+ if (partition >= FCRAM_NUM_PARTITIONS ||
+ index + num_entries > FCRAM_SIZE)
+ return -EINVAL;
+
+ nw64(HASH_TBL_ADDR(partition), val);
+ for (i = 0; i < num_entries; i++)
+ data[i] = nr64(HASH_TBL_DATA(partition));
+
+ return 0;
+}
+#endif
+
+static int hash_write(struct niu *np, unsigned long partition,
+ unsigned long index, unsigned long num_entries,
+ u64 *data)
+{
+ u64 val = hash_addr_regval(index, num_entries);
+ unsigned long i;
+
+ if (partition >= FCRAM_NUM_PARTITIONS ||
+ index + (num_entries * 8) > FCRAM_SIZE)
+ return -EINVAL;
+
+ nw64(HASH_TBL_ADDR(partition), val);
+ for (i = 0; i < num_entries; i++)
+ nw64(HASH_TBL_DATA(partition), data[i]);
+
+ return 0;
+}
+
+static void fflp_reset(struct niu *np)
+{
+ u64 val;
+
+ nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
+ udelay(10);
+ nw64(FFLP_CFG_1, 0);
+
+ val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
+ nw64(FFLP_CFG_1, val);
+}
+
+static void fflp_set_timings(struct niu *np)
+{
+ u64 val = nr64(FFLP_CFG_1);
+
+ val &= ~FFLP_CFG_1_FFLPINITDONE;
+ val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
+ nw64(FFLP_CFG_1, val);
+
+ val = nr64(FFLP_CFG_1);
+ val |= FFLP_CFG_1_FFLPINITDONE;
+ nw64(FFLP_CFG_1, val);
+
+ val = nr64(FCRAM_REF_TMR);
+ val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
+ val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
+ val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
+ nw64(FCRAM_REF_TMR, val);
+}
+
+static int fflp_set_partition(struct niu *np, u64 partition,
+ u64 mask, u64 base, int enable)
+{
+ unsigned long reg;
+ u64 val;
+
+ if (partition >= FCRAM_NUM_PARTITIONS ||
+ (mask & ~(u64)0x1f) != 0 ||
+ (base & ~(u64)0x1f) != 0)
+ return -EINVAL;
+
+ reg = FLW_PRT_SEL(partition);
+
+ val = nr64(reg);
+ val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
+ val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
+ val |= (base << FLW_PRT_SEL_BASE_SHIFT);
+ if (enable)
+ val |= FLW_PRT_SEL_EXT;
+ nw64(reg, val);
+
+ return 0;
+}
+
+static int fflp_disable_all_partitions(struct niu *np)
+{
+ unsigned long i;
+
+ for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
+ int err = fflp_set_partition(np, 0, 0, 0, 0);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static void fflp_llcsnap_enable(struct niu *np, int on)
+{
+ u64 val = nr64(FFLP_CFG_1);
+
+ if (on)
+ val |= FFLP_CFG_1_LLCSNAP;
+ else
+ val &= ~FFLP_CFG_1_LLCSNAP;
+ nw64(FFLP_CFG_1, val);
+}
+
+static void fflp_errors_enable(struct niu *np, int on)
+{
+ u64 val = nr64(FFLP_CFG_1);
+
+ if (on)
+ val &= ~FFLP_CFG_1_ERRORDIS;
+ else
+ val |= FFLP_CFG_1_ERRORDIS;
+ nw64(FFLP_CFG_1, val);
+}
+
+static int fflp_hash_clear(struct niu *np)
+{
+ struct fcram_hash_ipv4 ent;
+ unsigned long i;
+
+ /* IPV4 hash entry with valid bit clear, rest is don't care. */
+ memset(&ent, 0, sizeof(ent));
+ ent.header = HASH_HEADER_EXT;
+
+ for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
+ int err = hash_write(np, 0, i, 1, (u64 *) &ent);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int fflp_early_init(struct niu *np)
+{
+ struct niu_parent *parent;
+ unsigned long flags;
+ int err;
+
+ niu_lock_parent(np, flags);
+
+ parent = np->parent;
+ err = 0;
+ if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
+ if (np->parent->plat_type != PLAT_TYPE_NIU) {
+ fflp_reset(np);
+ fflp_set_timings(np);
+ err = fflp_disable_all_partitions(np);
+ if (err) {
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "fflp_disable_all_partitions failed, err=%d\n",
+ err);
+ goto out;
+ }
+ }
+
+ err = tcam_early_init(np);
+ if (err) {
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "tcam_early_init failed, err=%d\n", err);
+ goto out;
+ }
+ fflp_llcsnap_enable(np, 1);
+ fflp_errors_enable(np, 0);
+ nw64(H1POLY, 0);
+ nw64(H2POLY, 0);
+
+ err = tcam_flush_all(np);
+ if (err) {
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "tcam_flush_all failed, err=%d\n", err);
+ goto out;
+ }
+ if (np->parent->plat_type != PLAT_TYPE_NIU) {
+ err = fflp_hash_clear(np);
+ if (err) {
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "fflp_hash_clear failed, err=%d\n",
+ err);
+ goto out;
+ }
+ }
+
+ vlan_tbl_clear(np);
+
+ parent->flags |= PARENT_FLGS_CLS_HWINIT;
+ }
+out:
+ niu_unlock_parent(np, flags);
+ return err;
+}
+
+static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
+{
+ if (class_code < CLASS_CODE_USER_PROG1 ||
+ class_code > CLASS_CODE_SCTP_IPV6)
+ return -EINVAL;
+
+ nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
+ return 0;
+}
+
+static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
+{
+ if (class_code < CLASS_CODE_USER_PROG1 ||
+ class_code > CLASS_CODE_SCTP_IPV6)
+ return -EINVAL;
+
+ nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
+ return 0;
+}
+
+/* Entries for the ports are interleaved in the TCAM */
+static u16 tcam_get_index(struct niu *np, u16 idx)
+{
+ /* One entry reserved for IP fragment rule */
+ if (idx >= (np->clas.tcam_sz - 1))
+ idx = 0;
+ return np->clas.tcam_top + ((idx+1) * np->parent->num_ports);
+}
+
+static u16 tcam_get_size(struct niu *np)
+{
+ /* One entry reserved for IP fragment rule */
+ return np->clas.tcam_sz - 1;
+}
+
+static u16 tcam_get_valid_entry_cnt(struct niu *np)
+{
+ /* One entry reserved for IP fragment rule */
+ return np->clas.tcam_valid_entries - 1;
+}
+
+static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
+ u32 offset, u32 size)
+{
+ int i = skb_shinfo(skb)->nr_frags;
+
+ __skb_fill_page_desc(skb, i, page, offset, size);
+
+ skb->len += size;
+ skb->data_len += size;
+ skb->truesize += size;
+
+ skb_shinfo(skb)->nr_frags = i + 1;
+}
+
+static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
+{
+ a >>= PAGE_SHIFT;
+ a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
+
+ return a & (MAX_RBR_RING_SIZE - 1);
+}
+
+static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
+ struct page ***link)
+{
+ unsigned int h = niu_hash_rxaddr(rp, addr);
+ struct page *p, **pp;
+
+ addr &= PAGE_MASK;
+ pp = &rp->rxhash[h];
+ for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
+ if (p->index == addr) {
+ *link = pp;
+ goto found;
+ }
+ }
+ BUG();
+
+found:
+ return p;
+}
+
+static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
+{
+ unsigned int h = niu_hash_rxaddr(rp, base);
+
+ page->index = base;
+ page->mapping = (struct address_space *) rp->rxhash[h];
+ rp->rxhash[h] = page;
+}
+
+static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
+ gfp_t mask, int start_index)
+{
+ struct page *page;
+ u64 addr;
+ int i;
+
+ page = alloc_page(mask);
+ if (!page)
+ return -ENOMEM;
+
+ addr = np->ops->map_page(np->device, page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ niu_hash_page(rp, page, addr);
+ if (rp->rbr_blocks_per_page > 1)
+ atomic_add(rp->rbr_blocks_per_page - 1,
+ &compound_head(page)->_count);
+
+ for (i = 0; i < rp->rbr_blocks_per_page; i++) {
+ __le32 *rbr = &rp->rbr[start_index + i];
+
+ *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
+ addr += rp->rbr_block_size;
+ }
+
+ return 0;
+}
+
+static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
+{
+ int index = rp->rbr_index;
+
+ rp->rbr_pending++;
+ if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
+ int err = niu_rbr_add_page(np, rp, mask, index);
+
+ if (unlikely(err)) {
+ rp->rbr_pending--;
+ return;
+ }
+
+ rp->rbr_index += rp->rbr_blocks_per_page;
+ BUG_ON(rp->rbr_index > rp->rbr_table_size);
+ if (rp->rbr_index == rp->rbr_table_size)
+ rp->rbr_index = 0;
+
+ if (rp->rbr_pending >= rp->rbr_kick_thresh) {
+ nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
+ rp->rbr_pending = 0;
+ }
+ }
+}
+
+static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
+{
+ unsigned int index = rp->rcr_index;
+ int num_rcr = 0;
+
+ rp->rx_dropped++;
+ while (1) {
+ struct page *page, **link;
+ u64 addr, val;
+ u32 rcr_size;
+
+ num_rcr++;
+
+ val = le64_to_cpup(&rp->rcr[index]);
+ addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
+ RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
+ page = niu_find_rxpage(rp, addr, &link);
+
+ rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
+ RCR_ENTRY_PKTBUFSZ_SHIFT];
+ if ((page->index + PAGE_SIZE) - rcr_size == addr) {
+ *link = (struct page *) page->mapping;
+ np->ops->unmap_page(np->device, page->index,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ page->index = 0;
+ page->mapping = NULL;
+ __free_page(page);
+ rp->rbr_refill_pending++;
+ }
+
+ index = NEXT_RCR(rp, index);
+ if (!(val & RCR_ENTRY_MULTI))
+ break;
+
+ }
+ rp->rcr_index = index;
+
+ return num_rcr;
+}
+
+static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
+ struct rx_ring_info *rp)
+{
+ unsigned int index = rp->rcr_index;
+ struct rx_pkt_hdr1 *rh;
+ struct sk_buff *skb;
+ int len, num_rcr;
+
+ skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
+ if (unlikely(!skb))
+ return niu_rx_pkt_ignore(np, rp);
+
+ num_rcr = 0;
+ while (1) {
+ struct page *page, **link;
+ u32 rcr_size, append_size;
+ u64 addr, val, off;
+
+ num_rcr++;
+
+ val = le64_to_cpup(&rp->rcr[index]);
+
+ len = (val & RCR_ENTRY_L2_LEN) >>
+ RCR_ENTRY_L2_LEN_SHIFT;
+ len -= ETH_FCS_LEN;
+
+ addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
+ RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
+ page = niu_find_rxpage(rp, addr, &link);
+
+ rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
+ RCR_ENTRY_PKTBUFSZ_SHIFT];
+
+ off = addr & ~PAGE_MASK;
+ append_size = rcr_size;
+ if (num_rcr == 1) {
+ int ptype;
+
+ ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
+ if ((ptype == RCR_PKT_TYPE_TCP ||
+ ptype == RCR_PKT_TYPE_UDP) &&
+ !(val & (RCR_ENTRY_NOPORT |
+ RCR_ENTRY_ERROR)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+ } else if (!(val & RCR_ENTRY_MULTI))
+ append_size = len - skb->len;
+
+ niu_rx_skb_append(skb, page, off, append_size);
+ if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
+ *link = (struct page *) page->mapping;
+ np->ops->unmap_page(np->device, page->index,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ page->index = 0;
+ page->mapping = NULL;
+ rp->rbr_refill_pending++;
+ } else
+ get_page(page);
+
+ index = NEXT_RCR(rp, index);
+ if (!(val & RCR_ENTRY_MULTI))
+ break;
+
+ }
+ rp->rcr_index = index;
+
+ len += sizeof(*rh);
+ len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN);
+ __pskb_pull_tail(skb, len);
+
+ rh = (struct rx_pkt_hdr1 *) skb->data;
+ if (np->dev->features & NETIF_F_RXHASH)
+ skb->rxhash = ((u32)rh->hashval2_0 << 24 |
+ (u32)rh->hashval2_1 << 16 |
+ (u32)rh->hashval1_1 << 8 |
+ (u32)rh->hashval1_2 << 0);
+ skb_pull(skb, sizeof(*rh));
+
+ rp->rx_packets++;
+ rp->rx_bytes += skb->len;
+
+ skb->protocol = eth_type_trans(skb, np->dev);
+ skb_record_rx_queue(skb, rp->rx_channel);
+ napi_gro_receive(napi, skb);
+
+ return num_rcr;
+}
+
+static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
+{
+ int blocks_per_page = rp->rbr_blocks_per_page;
+ int err, index = rp->rbr_index;
+
+ err = 0;
+ while (index < (rp->rbr_table_size - blocks_per_page)) {
+ err = niu_rbr_add_page(np, rp, mask, index);
+ if (err)
+ break;
+
+ index += blocks_per_page;
+ }
+
+ rp->rbr_index = index;
+ return err;
+}
+
+static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
+{
+ int i;
+
+ for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
+ struct page *page;
+
+ page = rp->rxhash[i];
+ while (page) {
+ struct page *next = (struct page *) page->mapping;
+ u64 base = page->index;
+
+ np->ops->unmap_page(np->device, base, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ page->index = 0;
+ page->mapping = NULL;
+
+ __free_page(page);
+
+ page = next;
+ }
+ }
+
+ for (i = 0; i < rp->rbr_table_size; i++)
+ rp->rbr[i] = cpu_to_le32(0);
+ rp->rbr_index = 0;
+}
+
+static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
+{
+ struct tx_buff_info *tb = &rp->tx_buffs[idx];
+ struct sk_buff *skb = tb->skb;
+ struct tx_pkt_hdr *tp;
+ u64 tx_flags;
+ int i, len;
+
+ tp = (struct tx_pkt_hdr *) skb->data;
+ tx_flags = le64_to_cpup(&tp->flags);
+
+ rp->tx_packets++;
+ rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
+ ((tx_flags & TXHDR_PAD) / 2));
+
+ len = skb_headlen(skb);
+ np->ops->unmap_single(np->device, tb->mapping,
+ len, DMA_TO_DEVICE);
+
+ if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
+ rp->mark_pending--;
+
+ tb->skb = NULL;
+ do {
+ idx = NEXT_TX(rp, idx);
+ len -= MAX_TX_DESC_LEN;
+ } while (len > 0);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ tb = &rp->tx_buffs[idx];
+ BUG_ON(tb->skb != NULL);
+ np->ops->unmap_page(np->device, tb->mapping,
+ skb_shinfo(skb)->frags[i].size,
+ DMA_TO_DEVICE);
+ idx = NEXT_TX(rp, idx);
+ }
+
+ dev_kfree_skb(skb);
+
+ return idx;
+}
+
+#define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4)
+
+static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
+{
+ struct netdev_queue *txq;
+ u16 pkt_cnt, tmp;
+ int cons, index;
+ u64 cs;
+
+ index = (rp - np->tx_rings);
+ txq = netdev_get_tx_queue(np->dev, index);
+
+ cs = rp->tx_cs;
+ if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
+ goto out;
+
+ tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
+ pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
+ (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
+
+ rp->last_pkt_cnt = tmp;
+
+ cons = rp->cons;
+
+ netif_printk(np, tx_done, KERN_DEBUG, np->dev,
+ "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
+
+ while (pkt_cnt--)
+ cons = release_tx_packet(np, rp, cons);
+
+ rp->cons = cons;
+ smp_mb();
+
+out:
+ if (unlikely(netif_tx_queue_stopped(txq) &&
+ (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
+ __netif_tx_lock(txq, smp_processor_id());
+ if (netif_tx_queue_stopped(txq) &&
+ (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock(txq);
+ }
+}
+
+static inline void niu_sync_rx_discard_stats(struct niu *np,
+ struct rx_ring_info *rp,
+ const int limit)
+{
+ /* This elaborate scheme is needed for reading the RX discard
+ * counters, as they are only 16-bit and can overflow quickly,
+ * and because the overflow indication bit is not usable as
+ * the counter value does not wrap, but remains at max value
+ * 0xFFFF.
+ *
+ * In theory and in practice counters can be lost in between
+ * reading nr64() and clearing the counter nw64(). For this
+ * reason, the number of counter clearings nw64() is
+ * limited/reduced though the limit parameter.
+ */
+ int rx_channel = rp->rx_channel;
+ u32 misc, wred;
+
+ /* RXMISC (Receive Miscellaneous Discard Count), covers the
+ * following discard events: IPP (Input Port Process),
+ * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
+ * Block Ring) prefetch buffer is empty.
+ */
+ misc = nr64(RXMISC(rx_channel));
+ if (unlikely((misc & RXMISC_COUNT) > limit)) {
+ nw64(RXMISC(rx_channel), 0);
+ rp->rx_errors += misc & RXMISC_COUNT;
+
+ if (unlikely(misc & RXMISC_OFLOW))
+ dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n",
+ rx_channel);
+
+ netif_printk(np, rx_err, KERN_DEBUG, np->dev,
+ "rx-%d: MISC drop=%u over=%u\n",
+ rx_channel, misc, misc-limit);
+ }
+
+ /* WRED (Weighted Random Early Discard) by hardware */
+ wred = nr64(RED_DIS_CNT(rx_channel));
+ if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) {
+ nw64(RED_DIS_CNT(rx_channel), 0);
+ rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
+
+ if (unlikely(wred & RED_DIS_CNT_OFLOW))
+ dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel);
+
+ netif_printk(np, rx_err, KERN_DEBUG, np->dev,
+ "rx-%d: WRED drop=%u over=%u\n",
+ rx_channel, wred, wred-limit);
+ }
+}
+
+static int niu_rx_work(struct napi_struct *napi, struct niu *np,
+ struct rx_ring_info *rp, int budget)
+{
+ int qlen, rcr_done = 0, work_done = 0;
+ struct rxdma_mailbox *mbox = rp->mbox;
+ u64 stat;
+
+#if 1
+ stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
+ qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
+#else
+ stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
+ qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);
+#endif
+ mbox->rx_dma_ctl_stat = 0;
+ mbox->rcrstat_a = 0;
+
+ netif_printk(np, rx_status, KERN_DEBUG, np->dev,
+ "%s(chan[%d]), stat[%llx] qlen=%d\n",
+ __func__, rp->rx_channel, (unsigned long long)stat, qlen);
+
+ rcr_done = work_done = 0;
+ qlen = min(qlen, budget);
+ while (work_done < qlen) {
+ rcr_done += niu_process_rx_pkt(napi, np, rp);
+ work_done++;
+ }
+
+ if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
+ unsigned int i;
+
+ for (i = 0; i < rp->rbr_refill_pending; i++)
+ niu_rbr_refill(np, rp, GFP_ATOMIC);
+ rp->rbr_refill_pending = 0;
+ }
+
+ stat = (RX_DMA_CTL_STAT_MEX |
+ ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
+ ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
+
+ nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
+
+ /* Only sync discards stats when qlen indicate potential for drops */
+ if (qlen > 10)
+ niu_sync_rx_discard_stats(np, rp, 0x7FFF);
+
+ return work_done;
+}
+
+static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
+{
+ u64 v0 = lp->v0;
+ u32 tx_vec = (v0 >> 32);
+ u32 rx_vec = (v0 & 0xffffffff);
+ int i, work_done = 0;
+
+ netif_printk(np, intr, KERN_DEBUG, np->dev,
+ "%s() v0[%016llx]\n", __func__, (unsigned long long)v0);
+
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &np->tx_rings[i];
+ if (tx_vec & (1 << rp->tx_channel))
+ niu_tx_work(np, rp);
+ nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
+ }
+
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &np->rx_rings[i];
+
+ if (rx_vec & (1 << rp->rx_channel)) {
+ int this_work_done;
+
+ this_work_done = niu_rx_work(&lp->napi, np, rp,
+ budget);
+
+ budget -= this_work_done;
+ work_done += this_work_done;
+ }
+ nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
+ }
+
+ return work_done;
+}
+
+static int niu_poll(struct napi_struct *napi, int budget)
+{
+ struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
+ struct niu *np = lp->np;
+ int work_done;
+
+ work_done = niu_poll_core(np, lp, budget);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ niu_ldg_rearm(np, lp, 1);
+ }
+ return work_done;
+}
+
+static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
+ u64 stat)
+{
+ netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel);
+
+ if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
+ pr_cont("RBR_TMOUT ");
+ if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
+ pr_cont("RSP_CNT ");
+ if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
+ pr_cont("BYTE_EN_BUS ");
+ if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
+ pr_cont("RSP_DAT ");
+ if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
+ pr_cont("RCR_ACK ");
+ if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
+ pr_cont("RCR_SHA_PAR ");
+ if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
+ pr_cont("RBR_PRE_PAR ");
+ if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
+ pr_cont("CONFIG ");
+ if (stat & RX_DMA_CTL_STAT_RCRINCON)
+ pr_cont("RCRINCON ");
+ if (stat & RX_DMA_CTL_STAT_RCRFULL)
+ pr_cont("RCRFULL ");
+ if (stat & RX_DMA_CTL_STAT_RBRFULL)
+ pr_cont("RBRFULL ");
+ if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
+ pr_cont("RBRLOGPAGE ");
+ if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
+ pr_cont("CFIGLOGPAGE ");
+ if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
+ pr_cont("DC_FIDO ");
+
+ pr_cont(")\n");
+}
+
+static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
+{
+ u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
+ int err = 0;
+
+
+ if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
+ RX_DMA_CTL_STAT_PORT_FATAL))
+ err = -EINVAL;
+
+ if (err) {
+ netdev_err(np->dev, "RX channel %u error, stat[%llx]\n",
+ rp->rx_channel,
+ (unsigned long long) stat);
+
+ niu_log_rxchan_errors(np, rp, stat);
+ }
+
+ nw64(RX_DMA_CTL_STAT(rp->rx_channel),
+ stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
+
+ return err;
+}
+
+static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
+ u64 cs)
+{
+ netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel);
+
+ if (cs & TX_CS_MBOX_ERR)
+ pr_cont("MBOX ");
+ if (cs & TX_CS_PKT_SIZE_ERR)
+ pr_cont("PKT_SIZE ");
+ if (cs & TX_CS_TX_RING_OFLOW)
+ pr_cont("TX_RING_OFLOW ");
+ if (cs & TX_CS_PREF_BUF_PAR_ERR)
+ pr_cont("PREF_BUF_PAR ");
+ if (cs & TX_CS_NACK_PREF)
+ pr_cont("NACK_PREF ");
+ if (cs & TX_CS_NACK_PKT_RD)
+ pr_cont("NACK_PKT_RD ");
+ if (cs & TX_CS_CONF_PART_ERR)
+ pr_cont("CONF_PART ");
+ if (cs & TX_CS_PKT_PRT_ERR)
+ pr_cont("PKT_PTR ");
+
+ pr_cont(")\n");
+}
+
+static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
+{
+ u64 cs, logh, logl;
+
+ cs = nr64(TX_CS(rp->tx_channel));
+ logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
+ logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
+
+ netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
+ rp->tx_channel,
+ (unsigned long long)cs,
+ (unsigned long long)logh,
+ (unsigned long long)logl);
+
+ niu_log_txchan_errors(np, rp, cs);
+
+ return -ENODEV;
+}
+
+static int niu_mif_interrupt(struct niu *np)
+{
+ u64 mif_status = nr64(MIF_STATUS);
+ int phy_mdint = 0;
+
+ if (np->flags & NIU_FLAGS_XMAC) {
+ u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
+
+ if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
+ phy_mdint = 1;
+ }
+
+ netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
+ (unsigned long long)mif_status, phy_mdint);
+
+ return -ENODEV;
+}
+
+static void niu_xmac_interrupt(struct niu *np)
+{
+ struct niu_xmac_stats *mp = &np->mac_stats.xmac;
+ u64 val;
+
+ val = nr64_mac(XTXMAC_STATUS);
+ if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
+ mp->tx_frames += TXMAC_FRM_CNT_COUNT;
+ if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
+ mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
+ if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
+ mp->tx_fifo_errors++;
+ if (val & XTXMAC_STATUS_TXMAC_OFLOW)
+ mp->tx_overflow_errors++;
+ if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
+ mp->tx_max_pkt_size_errors++;
+ if (val & XTXMAC_STATUS_TXMAC_UFLOW)
+ mp->tx_underflow_errors++;
+
+ val = nr64_mac(XRXMAC_STATUS);
+ if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
+ mp->rx_local_faults++;
+ if (val & XRXMAC_STATUS_RFLT_DET)
+ mp->rx_remote_faults++;
+ if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
+ mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
+ if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
+ mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
+ if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
+ mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
+ if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
+ mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
+ if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
+ mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
+ if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
+ mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
+ if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
+ mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
+ if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
+ mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
+ if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
+ mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
+ if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
+ mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
+ if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
+ mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
+ if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
+ mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
+ if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
+ mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
+ if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP)
+ mp->rx_octets += RXMAC_BT_CNT_COUNT;
+ if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
+ mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
+ if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
+ mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
+ if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
+ mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
+ if (val & XRXMAC_STATUS_RXUFLOW)
+ mp->rx_underflows++;
+ if (val & XRXMAC_STATUS_RXOFLOW)
+ mp->rx_overflows++;
+
+ val = nr64_mac(XMAC_FC_STAT);
+ if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
+ mp->pause_off_state++;
+ if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
+ mp->pause_on_state++;
+ if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
+ mp->pause_received++;
+}
+
+static void niu_bmac_interrupt(struct niu *np)
+{
+ struct niu_bmac_stats *mp = &np->mac_stats.bmac;
+ u64 val;
+
+ val = nr64_mac(BTXMAC_STATUS);
+ if (val & BTXMAC_STATUS_UNDERRUN)
+ mp->tx_underflow_errors++;
+ if (val & BTXMAC_STATUS_MAX_PKT_ERR)
+ mp->tx_max_pkt_size_errors++;
+ if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
+ mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
+ if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
+ mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
+
+ val = nr64_mac(BRXMAC_STATUS);
+ if (val & BRXMAC_STATUS_OVERFLOW)
+ mp->rx_overflows++;
+ if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
+ mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
+ if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
+ mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
+ if (val & BRXMAC_STATUS_CRC_ERR_EXP)
+ mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
+ if (val & BRXMAC_STATUS_LEN_ERR_EXP)
+ mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
+
+ val = nr64_mac(BMAC_CTRL_STATUS);
+ if (val & BMAC_CTRL_STATUS_NOPAUSE)
+ mp->pause_off_state++;
+ if (val & BMAC_CTRL_STATUS_PAUSE)
+ mp->pause_on_state++;
+ if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
+ mp->pause_received++;
+}
+
+static int niu_mac_interrupt(struct niu *np)
+{
+ if (np->flags & NIU_FLAGS_XMAC)
+ niu_xmac_interrupt(np);
+ else
+ niu_bmac_interrupt(np);
+
+ return 0;
+}
+
+static void niu_log_device_error(struct niu *np, u64 stat)
+{
+ netdev_err(np->dev, "Core device errors ( ");
+
+ if (stat & SYS_ERR_MASK_META2)
+ pr_cont("META2 ");
+ if (stat & SYS_ERR_MASK_META1)
+ pr_cont("META1 ");
+ if (stat & SYS_ERR_MASK_PEU)
+ pr_cont("PEU ");
+ if (stat & SYS_ERR_MASK_TXC)
+ pr_cont("TXC ");
+ if (stat & SYS_ERR_MASK_RDMC)
+ pr_cont("RDMC ");
+ if (stat & SYS_ERR_MASK_TDMC)
+ pr_cont("TDMC ");
+ if (stat & SYS_ERR_MASK_ZCP)
+ pr_cont("ZCP ");
+ if (stat & SYS_ERR_MASK_FFLP)
+ pr_cont("FFLP ");
+ if (stat & SYS_ERR_MASK_IPP)
+ pr_cont("IPP ");
+ if (stat & SYS_ERR_MASK_MAC)
+ pr_cont("MAC ");
+ if (stat & SYS_ERR_MASK_SMX)
+ pr_cont("SMX ");
+
+ pr_cont(")\n");
+}
+
+static int niu_device_error(struct niu *np)
+{
+ u64 stat = nr64(SYS_ERR_STAT);
+
+ netdev_err(np->dev, "Core device error, stat[%llx]\n",
+ (unsigned long long)stat);
+
+ niu_log_device_error(np, stat);
+
+ return -ENODEV;
+}
+
+static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
+ u64 v0, u64 v1, u64 v2)
+{
+
+ int i, err = 0;
+
+ lp->v0 = v0;
+ lp->v1 = v1;
+ lp->v2 = v2;
+
+ if (v1 & 0x00000000ffffffffULL) {
+ u32 rx_vec = (v1 & 0xffffffff);
+
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &np->rx_rings[i];
+
+ if (rx_vec & (1 << rp->rx_channel)) {
+ int r = niu_rx_error(np, rp);
+ if (r) {
+ err = r;
+ } else {
+ if (!v0)
+ nw64(RX_DMA_CTL_STAT(rp->rx_channel),
+ RX_DMA_CTL_STAT_MEX);
+ }
+ }
+ }
+ }
+ if (v1 & 0x7fffffff00000000ULL) {
+ u32 tx_vec = (v1 >> 32) & 0x7fffffff;
+
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &np->tx_rings[i];
+
+ if (tx_vec & (1 << rp->tx_channel)) {
+ int r = niu_tx_error(np, rp);
+ if (r)
+ err = r;
+ }
+ }
+ }
+ if ((v0 | v1) & 0x8000000000000000ULL) {
+ int r = niu_mif_interrupt(np);
+ if (r)
+ err = r;
+ }
+ if (v2) {
+ if (v2 & 0x01ef) {
+ int r = niu_mac_interrupt(np);
+ if (r)
+ err = r;
+ }
+ if (v2 & 0x0210) {
+ int r = niu_device_error(np);
+ if (r)
+ err = r;
+ }
+ }
+
+ if (err)
+ niu_enable_interrupts(np, 0);
+
+ return err;
+}
+
+static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
+ int ldn)
+{
+ struct rxdma_mailbox *mbox = rp->mbox;
+ u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
+
+ stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
+ RX_DMA_CTL_STAT_RCRTO);
+ nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
+
+ netif_printk(np, intr, KERN_DEBUG, np->dev,
+ "%s() stat[%llx]\n", __func__, (unsigned long long)stat);
+}
+
+static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
+ int ldn)
+{
+ rp->tx_cs = nr64(TX_CS(rp->tx_channel));
+
+ netif_printk(np, intr, KERN_DEBUG, np->dev,
+ "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs);
+}
+
+static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
+{
+ struct niu_parent *parent = np->parent;
+ u32 rx_vec, tx_vec;
+ int i;
+
+ tx_vec = (v0 >> 32);
+ rx_vec = (v0 & 0xffffffff);
+
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &np->rx_rings[i];
+ int ldn = LDN_RXDMA(rp->rx_channel);
+
+ if (parent->ldg_map[ldn] != ldg)
+ continue;
+
+ nw64(LD_IM0(ldn), LD_IM0_MASK);
+ if (rx_vec & (1 << rp->rx_channel))
+ niu_rxchan_intr(np, rp, ldn);
+ }
+
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &np->tx_rings[i];
+ int ldn = LDN_TXDMA(rp->tx_channel);
+
+ if (parent->ldg_map[ldn] != ldg)
+ continue;
+
+ nw64(LD_IM0(ldn), LD_IM0_MASK);
+ if (tx_vec & (1 << rp->tx_channel))
+ niu_txchan_intr(np, rp, ldn);
+ }
+}
+
+static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
+ u64 v0, u64 v1, u64 v2)
+{
+ if (likely(napi_schedule_prep(&lp->napi))) {
+ lp->v0 = v0;
+ lp->v1 = v1;
+ lp->v2 = v2;
+ __niu_fastpath_interrupt(np, lp->ldg_num, v0);
+ __napi_schedule(&lp->napi);
+ }
+}
+
+static irqreturn_t niu_interrupt(int irq, void *dev_id)
+{
+ struct niu_ldg *lp = dev_id;
+ struct niu *np = lp->np;
+ int ldg = lp->ldg_num;
+ unsigned long flags;
+ u64 v0, v1, v2;
+
+ if (netif_msg_intr(np))
+ printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)",
+ __func__, lp, ldg);
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ v0 = nr64(LDSV0(ldg));
+ v1 = nr64(LDSV1(ldg));
+ v2 = nr64(LDSV2(ldg));
+
+ if (netif_msg_intr(np))
+ pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
+ (unsigned long long) v0,
+ (unsigned long long) v1,
+ (unsigned long long) v2);
+
+ if (unlikely(!v0 && !v1 && !v2)) {
+ spin_unlock_irqrestore(&np->lock, flags);
+ return IRQ_NONE;
+ }
+
+ if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
+ int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
+ if (err)
+ goto out;
+ }
+ if (likely(v0 & ~((u64)1 << LDN_MIF)))
+ niu_schedule_napi(np, lp, v0, v1, v2);
+ else
+ niu_ldg_rearm(np, lp, 1);
+out:
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
+{
+ if (rp->mbox) {
+ np->ops->free_coherent(np->device,
+ sizeof(struct rxdma_mailbox),
+ rp->mbox, rp->mbox_dma);
+ rp->mbox = NULL;
+ }
+ if (rp->rcr) {
+ np->ops->free_coherent(np->device,
+ MAX_RCR_RING_SIZE * sizeof(__le64),
+ rp->rcr, rp->rcr_dma);
+ rp->rcr = NULL;
+ rp->rcr_table_size = 0;
+ rp->rcr_index = 0;
+ }
+ if (rp->rbr) {
+ niu_rbr_free(np, rp);
+
+ np->ops->free_coherent(np->device,
+ MAX_RBR_RING_SIZE * sizeof(__le32),
+ rp->rbr, rp->rbr_dma);
+ rp->rbr = NULL;
+ rp->rbr_table_size = 0;
+ rp->rbr_index = 0;
+ }
+ kfree(rp->rxhash);
+ rp->rxhash = NULL;
+}
+
+static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
+{
+ if (rp->mbox) {
+ np->ops->free_coherent(np->device,
+ sizeof(struct txdma_mailbox),
+ rp->mbox, rp->mbox_dma);
+ rp->mbox = NULL;
+ }
+ if (rp->descr) {
+ int i;
+
+ for (i = 0; i < MAX_TX_RING_SIZE; i++) {
+ if (rp->tx_buffs[i].skb)
+ (void) release_tx_packet(np, rp, i);
+ }
+
+ np->ops->free_coherent(np->device,
+ MAX_TX_RING_SIZE * sizeof(__le64),
+ rp->descr, rp->descr_dma);
+ rp->descr = NULL;
+ rp->pending = 0;
+ rp->prod = 0;
+ rp->cons = 0;
+ rp->wrap_bit = 0;
+ }
+}
+
+static void niu_free_channels(struct niu *np)
+{
+ int i;
+
+ if (np->rx_rings) {
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &np->rx_rings[i];
+
+ niu_free_rx_ring_info(np, rp);
+ }
+ kfree(np->rx_rings);
+ np->rx_rings = NULL;
+ np->num_rx_rings = 0;
+ }
+
+ if (np->tx_rings) {
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &np->tx_rings[i];
+
+ niu_free_tx_ring_info(np, rp);
+ }
+ kfree(np->tx_rings);
+ np->tx_rings = NULL;
+ np->num_tx_rings = 0;
+ }
+}
+
+static int niu_alloc_rx_ring_info(struct niu *np,
+ struct rx_ring_info *rp)
+{
+ BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
+
+ rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *),
+ GFP_KERNEL);
+ if (!rp->rxhash)
+ return -ENOMEM;
+
+ rp->mbox = np->ops->alloc_coherent(np->device,
+ sizeof(struct rxdma_mailbox),
+ &rp->mbox_dma, GFP_KERNEL);
+ if (!rp->mbox)
+ return -ENOMEM;
+ if ((unsigned long)rp->mbox & (64UL - 1)) {
+ netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
+ rp->mbox);
+ return -EINVAL;
+ }
+
+ rp->rcr = np->ops->alloc_coherent(np->device,
+ MAX_RCR_RING_SIZE * sizeof(__le64),
+ &rp->rcr_dma, GFP_KERNEL);
+ if (!rp->rcr)
+ return -ENOMEM;
+ if ((unsigned long)rp->rcr & (64UL - 1)) {
+ netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
+ rp->rcr);
+ return -EINVAL;
+ }
+ rp->rcr_table_size = MAX_RCR_RING_SIZE;
+ rp->rcr_index = 0;
+
+ rp->rbr = np->ops->alloc_coherent(np->device,
+ MAX_RBR_RING_SIZE * sizeof(__le32),
+ &rp->rbr_dma, GFP_KERNEL);
+ if (!rp->rbr)
+ return -ENOMEM;
+ if ((unsigned long)rp->rbr & (64UL - 1)) {
+ netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
+ rp->rbr);
+ return -EINVAL;
+ }
+ rp->rbr_table_size = MAX_RBR_RING_SIZE;
+ rp->rbr_index = 0;
+ rp->rbr_pending = 0;
+
+ return 0;
+}
+
+static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
+{
+ int mtu = np->dev->mtu;
+
+ /* These values are recommended by the HW designers for fair
+ * utilization of DRR amongst the rings.
+ */
+ rp->max_burst = mtu + 32;
+ if (rp->max_burst > 4096)
+ rp->max_burst = 4096;
+}
+
+static int niu_alloc_tx_ring_info(struct niu *np,
+ struct tx_ring_info *rp)
+{
+ BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
+
+ rp->mbox = np->ops->alloc_coherent(np->device,
+ sizeof(struct txdma_mailbox),
+ &rp->mbox_dma, GFP_KERNEL);
+ if (!rp->mbox)
+ return -ENOMEM;
+ if ((unsigned long)rp->mbox & (64UL - 1)) {
+ netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
+ rp->mbox);
+ return -EINVAL;
+ }
+
+ rp->descr = np->ops->alloc_coherent(np->device,
+ MAX_TX_RING_SIZE * sizeof(__le64),
+ &rp->descr_dma, GFP_KERNEL);
+ if (!rp->descr)
+ return -ENOMEM;
+ if ((unsigned long)rp->descr & (64UL - 1)) {
+ netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n",
+ rp->descr);
+ return -EINVAL;
+ }
+
+ rp->pending = MAX_TX_RING_SIZE;
+ rp->prod = 0;
+ rp->cons = 0;
+ rp->wrap_bit = 0;
+
+ /* XXX make these configurable... XXX */
+ rp->mark_freq = rp->pending / 4;
+
+ niu_set_max_burst(np, rp);
+
+ return 0;
+}
+
+static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
+{
+ u16 bss;
+
+ bss = min(PAGE_SHIFT, 15);
+
+ rp->rbr_block_size = 1 << bss;
+ rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
+
+ rp->rbr_sizes[0] = 256;
+ rp->rbr_sizes[1] = 1024;
+ if (np->dev->mtu > ETH_DATA_LEN) {
+ switch (PAGE_SIZE) {
+ case 4 * 1024:
+ rp->rbr_sizes[2] = 4096;
+ break;
+
+ default:
+ rp->rbr_sizes[2] = 8192;
+ break;
+ }
+ } else {
+ rp->rbr_sizes[2] = 2048;
+ }
+ rp->rbr_sizes[3] = rp->rbr_block_size;
+}
+
+static int niu_alloc_channels(struct niu *np)
+{
+ struct niu_parent *parent = np->parent;
+ int first_rx_channel, first_tx_channel;
+ int num_rx_rings, num_tx_rings;
+ struct rx_ring_info *rx_rings;
+ struct tx_ring_info *tx_rings;
+ int i, port, err;
+
+ port = np->port;
+ first_rx_channel = first_tx_channel = 0;
+ for (i = 0; i < port; i++) {
+ first_rx_channel += parent->rxchan_per_port[i];
+ first_tx_channel += parent->txchan_per_port[i];
+ }
+
+ num_rx_rings = parent->rxchan_per_port[port];
+ num_tx_rings = parent->txchan_per_port[port];
+
+ rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
+ GFP_KERNEL);
+ err = -ENOMEM;
+ if (!rx_rings)
+ goto out_err;
+
+ np->num_rx_rings = num_rx_rings;
+ smp_wmb();
+ np->rx_rings = rx_rings;
+
+ netif_set_real_num_rx_queues(np->dev, num_rx_rings);
+
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &np->rx_rings[i];
+
+ rp->np = np;
+ rp->rx_channel = first_rx_channel + i;
+
+ err = niu_alloc_rx_ring_info(np, rp);
+ if (err)
+ goto out_err;
+
+ niu_size_rbr(np, rp);
+
+ /* XXX better defaults, configurable, etc... XXX */
+ rp->nonsyn_window = 64;
+ rp->nonsyn_threshold = rp->rcr_table_size - 64;
+ rp->syn_window = 64;
+ rp->syn_threshold = rp->rcr_table_size - 64;
+ rp->rcr_pkt_threshold = 16;
+ rp->rcr_timeout = 8;
+ rp->rbr_kick_thresh = RBR_REFILL_MIN;
+ if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
+ rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
+
+ err = niu_rbr_fill(np, rp, GFP_KERNEL);
+ if (err)
+ return err;
+ }
+
+ tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
+ GFP_KERNEL);
+ err = -ENOMEM;
+ if (!tx_rings)
+ goto out_err;
+
+ np->num_tx_rings = num_tx_rings;
+ smp_wmb();
+ np->tx_rings = tx_rings;
+
+ netif_set_real_num_tx_queues(np->dev, num_tx_rings);
+
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &np->tx_rings[i];
+
+ rp->np = np;
+ rp->tx_channel = first_tx_channel + i;
+
+ err = niu_alloc_tx_ring_info(np, rp);
+ if (err)
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ niu_free_channels(np);
+ return err;
+}
+
+static int niu_tx_cs_sng_poll(struct niu *np, int channel)
+{
+ int limit = 1000;
+
+ while (--limit > 0) {
+ u64 val = nr64(TX_CS(channel));
+ if (val & TX_CS_SNG_STATE)
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static int niu_tx_channel_stop(struct niu *np, int channel)
+{
+ u64 val = nr64(TX_CS(channel));
+
+ val |= TX_CS_STOP_N_GO;
+ nw64(TX_CS(channel), val);
+
+ return niu_tx_cs_sng_poll(np, channel);
+}
+
+static int niu_tx_cs_reset_poll(struct niu *np, int channel)
+{
+ int limit = 1000;
+
+ while (--limit > 0) {
+ u64 val = nr64(TX_CS(channel));
+ if (!(val & TX_CS_RST))
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static int niu_tx_channel_reset(struct niu *np, int channel)
+{
+ u64 val = nr64(TX_CS(channel));
+ int err;
+
+ val |= TX_CS_RST;
+ nw64(TX_CS(channel), val);
+
+ err = niu_tx_cs_reset_poll(np, channel);
+ if (!err)
+ nw64(TX_RING_KICK(channel), 0);
+
+ return err;
+}
+
+static int niu_tx_channel_lpage_init(struct niu *np, int channel)
+{
+ u64 val;
+
+ nw64(TX_LOG_MASK1(channel), 0);
+ nw64(TX_LOG_VAL1(channel), 0);
+ nw64(TX_LOG_MASK2(channel), 0);
+ nw64(TX_LOG_VAL2(channel), 0);
+ nw64(TX_LOG_PAGE_RELO1(channel), 0);
+ nw64(TX_LOG_PAGE_RELO2(channel), 0);
+ nw64(TX_LOG_PAGE_HDL(channel), 0);
+
+ val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
+ val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
+ nw64(TX_LOG_PAGE_VLD(channel), val);
+
+ /* XXX TXDMA 32bit mode? XXX */
+
+ return 0;
+}
+
+static void niu_txc_enable_port(struct niu *np, int on)
+{
+ unsigned long flags;
+ u64 val, mask;
+
+ niu_lock_parent(np, flags);
+ val = nr64(TXC_CONTROL);
+ mask = (u64)1 << np->port;
+ if (on) {
+ val |= TXC_CONTROL_ENABLE | mask;
+ } else {
+ val &= ~mask;
+ if ((val & ~TXC_CONTROL_ENABLE) == 0)
+ val &= ~TXC_CONTROL_ENABLE;
+ }
+ nw64(TXC_CONTROL, val);
+ niu_unlock_parent(np, flags);
+}
+
+static void niu_txc_set_imask(struct niu *np, u64 imask)
+{
+ unsigned long flags;
+ u64 val;
+
+ niu_lock_parent(np, flags);
+ val = nr64(TXC_INT_MASK);
+ val &= ~TXC_INT_MASK_VAL(np->port);
+ val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
+ niu_unlock_parent(np, flags);
+}
+
+static void niu_txc_port_dma_enable(struct niu *np, int on)
+{
+ u64 val = 0;
+
+ if (on) {
+ int i;
+
+ for (i = 0; i < np->num_tx_rings; i++)
+ val |= (1 << np->tx_rings[i].tx_channel);
+ }
+ nw64(TXC_PORT_DMA(np->port), val);
+}
+
+static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
+{
+ int err, channel = rp->tx_channel;
+ u64 val, ring_len;
+
+ err = niu_tx_channel_stop(np, channel);
+ if (err)
+ return err;
+
+ err = niu_tx_channel_reset(np, channel);
+ if (err)
+ return err;
+
+ err = niu_tx_channel_lpage_init(np, channel);
+ if (err)
+ return err;
+
+ nw64(TXC_DMA_MAX(channel), rp->max_burst);
+ nw64(TX_ENT_MSK(channel), 0);
+
+ if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
+ TX_RNG_CFIG_STADDR)) {
+ netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n",
+ channel, (unsigned long long)rp->descr_dma);
+ return -EINVAL;
+ }
+
+ /* The length field in TX_RNG_CFIG is measured in 64-byte
+ * blocks. rp->pending is the number of TX descriptors in
+ * our ring, 8 bytes each, thus we divide by 8 bytes more
+ * to get the proper value the chip wants.
+ */
+ ring_len = (rp->pending / 8);
+
+ val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
+ rp->descr_dma);
+ nw64(TX_RNG_CFIG(channel), val);
+
+ if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
+ ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
+ netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
+ channel, (unsigned long long)rp->mbox_dma);
+ return -EINVAL;
+ }
+ nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
+ nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
+
+ nw64(TX_CS(channel), 0);
+
+ rp->last_pkt_cnt = 0;
+
+ return 0;
+}
+
+static void niu_init_rdc_groups(struct niu *np)
+{
+ struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
+ int i, first_table_num = tp->first_table_num;
+
+ for (i = 0; i < tp->num_tables; i++) {
+ struct rdc_table *tbl = &tp->tables[i];
+ int this_table = first_table_num + i;
+ int slot;
+
+ for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
+ nw64(RDC_TBL(this_table, slot),
+ tbl->rxdma_channel[slot]);
+ }
+
+ nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
+}
+
+static void niu_init_drr_weight(struct niu *np)
+{
+ int type = phy_decode(np->parent->port_phy, np->port);
+ u64 val;
+
+ switch (type) {
+ case PORT_TYPE_10G:
+ val = PT_DRR_WEIGHT_DEFAULT_10G;
+ break;
+
+ case PORT_TYPE_1G:
+ default:
+ val = PT_DRR_WEIGHT_DEFAULT_1G;
+ break;
+ }
+ nw64(PT_DRR_WT(np->port), val);
+}
+
+static int niu_init_hostinfo(struct niu *np)
+{
+ struct niu_parent *parent = np->parent;
+ struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
+ int i, err, num_alt = niu_num_alt_addr(np);
+ int first_rdc_table = tp->first_table_num;
+
+ err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
+ if (err)
+ return err;
+
+ err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
+ if (err)
+ return err;
+
+ for (i = 0; i < num_alt; i++) {
+ err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int niu_rx_channel_reset(struct niu *np, int channel)
+{
+ return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
+ RXDMA_CFIG1_RST, 1000, 10,
+ "RXDMA_CFIG1");
+}
+
+static int niu_rx_channel_lpage_init(struct niu *np, int channel)
+{
+ u64 val;
+
+ nw64(RX_LOG_MASK1(channel), 0);
+ nw64(RX_LOG_VAL1(channel), 0);
+ nw64(RX_LOG_MASK2(channel), 0);
+ nw64(RX_LOG_VAL2(channel), 0);
+ nw64(RX_LOG_PAGE_RELO1(channel), 0);
+ nw64(RX_LOG_PAGE_RELO2(channel), 0);
+ nw64(RX_LOG_PAGE_HDL(channel), 0);
+
+ val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
+ val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
+ nw64(RX_LOG_PAGE_VLD(channel), val);
+
+ return 0;
+}
+
+static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
+{
+ u64 val;
+
+ val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
+ ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
+ ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
+ ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
+ nw64(RDC_RED_PARA(rp->rx_channel), val);
+}
+
+static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
+{
+ u64 val = 0;
+
+ *ret = 0;
+ switch (rp->rbr_block_size) {
+ case 4 * 1024:
+ val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
+ break;
+ case 8 * 1024:
+ val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
+ break;
+ case 16 * 1024:
+ val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
+ break;
+ case 32 * 1024:
+ val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
+ break;
+ default:
+ return -EINVAL;
+ }
+ val |= RBR_CFIG_B_VLD2;
+ switch (rp->rbr_sizes[2]) {
+ case 2 * 1024:
+ val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
+ break;
+ case 4 * 1024:
+ val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
+ break;
+ case 8 * 1024:
+ val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
+ break;
+ case 16 * 1024:
+ val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ val |= RBR_CFIG_B_VLD1;
+ switch (rp->rbr_sizes[1]) {
+ case 1 * 1024:
+ val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
+ break;
+ case 2 * 1024:
+ val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
+ break;
+ case 4 * 1024:
+ val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
+ break;
+ case 8 * 1024:
+ val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ val |= RBR_CFIG_B_VLD0;
+ switch (rp->rbr_sizes[0]) {
+ case 256:
+ val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
+ break;
+ case 512:
+ val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
+ break;
+ case 1 * 1024:
+ val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
+ break;
+ case 2 * 1024:
+ val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ *ret = val;
+ return 0;
+}
+
+static int niu_enable_rx_channel(struct niu *np, int channel, int on)
+{
+ u64 val = nr64(RXDMA_CFIG1(channel));
+ int limit;
+
+ if (on)
+ val |= RXDMA_CFIG1_EN;
+ else
+ val &= ~RXDMA_CFIG1_EN;
+ nw64(RXDMA_CFIG1(channel), val);
+
+ limit = 1000;
+ while (--limit > 0) {
+ if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
+ break;
+ udelay(10);
+ }
+ if (limit <= 0)
+ return -ENODEV;
+ return 0;
+}
+
+static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
+{
+ int err, channel = rp->rx_channel;
+ u64 val;
+
+ err = niu_rx_channel_reset(np, channel);
+ if (err)
+ return err;
+
+ err = niu_rx_channel_lpage_init(np, channel);
+ if (err)
+ return err;
+
+ niu_rx_channel_wred_init(np, rp);
+
+ nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
+ nw64(RX_DMA_CTL_STAT(channel),
+ (RX_DMA_CTL_STAT_MEX |
+ RX_DMA_CTL_STAT_RCRTHRES |
+ RX_DMA_CTL_STAT_RCRTO |
+ RX_DMA_CTL_STAT_RBR_EMPTY));
+ nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
+ nw64(RXDMA_CFIG2(channel),
+ ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) |
+ RXDMA_CFIG2_FULL_HDR));
+ nw64(RBR_CFIG_A(channel),
+ ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
+ (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
+ err = niu_compute_rbr_cfig_b(rp, &val);
+ if (err)
+ return err;
+ nw64(RBR_CFIG_B(channel), val);
+ nw64(RCRCFIG_A(channel),
+ ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
+ (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
+ nw64(RCRCFIG_B(channel),
+ ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
+ RCRCFIG_B_ENTOUT |
+ ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
+
+ err = niu_enable_rx_channel(np, channel, 1);
+ if (err)
+ return err;
+
+ nw64(RBR_KICK(channel), rp->rbr_index);
+
+ val = nr64(RX_DMA_CTL_STAT(channel));
+ val |= RX_DMA_CTL_STAT_RBR_EMPTY;
+ nw64(RX_DMA_CTL_STAT(channel), val);
+
+ return 0;
+}
+
+static int niu_init_rx_channels(struct niu *np)
+{
+ unsigned long flags;
+ u64 seed = jiffies_64;
+ int err, i;
+
+ niu_lock_parent(np, flags);
+ nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
+ nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
+ niu_unlock_parent(np, flags);
+
+ /* XXX RXDMA 32bit mode? XXX */
+
+ niu_init_rdc_groups(np);
+ niu_init_drr_weight(np);
+
+ err = niu_init_hostinfo(np);
+ if (err)
+ return err;
+
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &np->rx_rings[i];
+
+ err = niu_init_one_rx_channel(np, rp);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int niu_set_ip_frag_rule(struct niu *np)
+{
+ struct niu_parent *parent = np->parent;
+ struct niu_classifier *cp = &np->clas;
+ struct niu_tcam_entry *tp;
+ int index, err;
+
+ index = cp->tcam_top;
+ tp = &parent->tcam[index];
+
+ /* Note that the noport bit is the same in both ipv4 and
+ * ipv6 format TCAM entries.
+ */
+ memset(tp, 0, sizeof(*tp));
+ tp->key[1] = TCAM_V4KEY1_NOPORT;
+ tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
+ tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
+ ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
+ err = tcam_write(np, index, tp->key, tp->key_mask);
+ if (err)
+ return err;
+ err = tcam_assoc_write(np, index, tp->assoc_data);
+ if (err)
+ return err;
+ tp->valid = 1;
+ cp->tcam_valid_entries++;
+
+ return 0;
+}
+
+static int niu_init_classifier_hw(struct niu *np)
+{
+ struct niu_parent *parent = np->parent;
+ struct niu_classifier *cp = &np->clas;
+ int i, err;
+
+ nw64(H1POLY, cp->h1_init);
+ nw64(H2POLY, cp->h2_init);
+
+ err = niu_init_hostinfo(np);
+ if (err)
+ return err;
+
+ for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
+ struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
+
+ vlan_tbl_write(np, i, np->port,
+ vp->vlan_pref, vp->rdc_num);
+ }
+
+ for (i = 0; i < cp->num_alt_mac_mappings; i++) {
+ struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
+
+ err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
+ ap->rdc_num, ap->mac_pref);
+ if (err)
+ return err;
+ }
+
+ for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
+ int index = i - CLASS_CODE_USER_PROG1;
+
+ err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
+ if (err)
+ return err;
+ err = niu_set_flow_key(np, i, parent->flow_key[index]);
+ if (err)
+ return err;
+ }
+
+ err = niu_set_ip_frag_rule(np);
+ if (err)
+ return err;
+
+ tcam_enable(np, 1);
+
+ return 0;
+}
+
+static int niu_zcp_write(struct niu *np, int index, u64 *data)
+{
+ nw64(ZCP_RAM_DATA0, data[0]);
+ nw64(ZCP_RAM_DATA1, data[1]);
+ nw64(ZCP_RAM_DATA2, data[2]);
+ nw64(ZCP_RAM_DATA3, data[3]);
+ nw64(ZCP_RAM_DATA4, data[4]);
+ nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
+ nw64(ZCP_RAM_ACC,
+ (ZCP_RAM_ACC_WRITE |
+ (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
+ (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
+
+ return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
+ 1000, 100);
+}
+
+static int niu_zcp_read(struct niu *np, int index, u64 *data)
+{
+ int err;
+
+ err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
+ 1000, 100);
+ if (err) {
+ netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
+ (unsigned long long)nr64(ZCP_RAM_ACC));
+ return err;
+ }
+
+ nw64(ZCP_RAM_ACC,
+ (ZCP_RAM_ACC_READ |
+ (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
+ (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
+
+ err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
+ 1000, 100);
+ if (err) {
+ netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
+ (unsigned long long)nr64(ZCP_RAM_ACC));
+ return err;
+ }
+
+ data[0] = nr64(ZCP_RAM_DATA0);
+ data[1] = nr64(ZCP_RAM_DATA1);
+ data[2] = nr64(ZCP_RAM_DATA2);
+ data[3] = nr64(ZCP_RAM_DATA3);
+ data[4] = nr64(ZCP_RAM_DATA4);
+
+ return 0;
+}
+
+static void niu_zcp_cfifo_reset(struct niu *np)
+{
+ u64 val = nr64(RESET_CFIFO);
+
+ val |= RESET_CFIFO_RST(np->port);
+ nw64(RESET_CFIFO, val);
+ udelay(10);
+
+ val &= ~RESET_CFIFO_RST(np->port);
+ nw64(RESET_CFIFO, val);
+}
+
+static int niu_init_zcp(struct niu *np)
+{
+ u64 data[5], rbuf[5];
+ int i, max, err;
+
+ if (np->parent->plat_type != PLAT_TYPE_NIU) {
+ if (np->port == 0 || np->port == 1)
+ max = ATLAS_P0_P1_CFIFO_ENTRIES;
+ else
+ max = ATLAS_P2_P3_CFIFO_ENTRIES;
+ } else
+ max = NIU_CFIFO_ENTRIES;
+
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 0;
+ data[3] = 0;
+ data[4] = 0;
+
+ for (i = 0; i < max; i++) {
+ err = niu_zcp_write(np, i, data);
+ if (err)
+ return err;
+ err = niu_zcp_read(np, i, rbuf);
+ if (err)
+ return err;
+ }
+
+ niu_zcp_cfifo_reset(np);
+ nw64(CFIFO_ECC(np->port), 0);
+ nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
+ (void) nr64(ZCP_INT_STAT);
+ nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
+
+ return 0;
+}
+
+static void niu_ipp_write(struct niu *np, int index, u64 *data)
+{
+ u64 val = nr64_ipp(IPP_CFIG);
+
+ nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
+ nw64_ipp(IPP_DFIFO_WR_PTR, index);
+ nw64_ipp(IPP_DFIFO_WR0, data[0]);
+ nw64_ipp(IPP_DFIFO_WR1, data[1]);
+ nw64_ipp(IPP_DFIFO_WR2, data[2]);
+ nw64_ipp(IPP_DFIFO_WR3, data[3]);
+ nw64_ipp(IPP_DFIFO_WR4, data[4]);
+ nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
+}
+
+static void niu_ipp_read(struct niu *np, int index, u64 *data)
+{
+ nw64_ipp(IPP_DFIFO_RD_PTR, index);
+ data[0] = nr64_ipp(IPP_DFIFO_RD0);
+ data[1] = nr64_ipp(IPP_DFIFO_RD1);
+ data[2] = nr64_ipp(IPP_DFIFO_RD2);
+ data[3] = nr64_ipp(IPP_DFIFO_RD3);
+ data[4] = nr64_ipp(IPP_DFIFO_RD4);
+}
+
+static int niu_ipp_reset(struct niu *np)
+{
+ return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
+ 1000, 100, "IPP_CFIG");
+}
+
+static int niu_init_ipp(struct niu *np)
+{
+ u64 data[5], rbuf[5], val;
+ int i, max, err;
+
+ if (np->parent->plat_type != PLAT_TYPE_NIU) {
+ if (np->port == 0 || np->port == 1)
+ max = ATLAS_P0_P1_DFIFO_ENTRIES;
+ else
+ max = ATLAS_P2_P3_DFIFO_ENTRIES;
+ } else
+ max = NIU_DFIFO_ENTRIES;
+
+ data[0] = 0;
+ data[1] = 0;
+ data[2] = 0;
+ data[3] = 0;
+ data[4] = 0;
+
+ for (i = 0; i < max; i++) {
+ niu_ipp_write(np, i, data);
+ niu_ipp_read(np, i, rbuf);
+ }
+
+ (void) nr64_ipp(IPP_INT_STAT);
+ (void) nr64_ipp(IPP_INT_STAT);
+
+ err = niu_ipp_reset(np);
+ if (err)
+ return err;
+
+ (void) nr64_ipp(IPP_PKT_DIS);
+ (void) nr64_ipp(IPP_BAD_CS_CNT);
+ (void) nr64_ipp(IPP_ECC);
+
+ (void) nr64_ipp(IPP_INT_STAT);
+
+ nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
+
+ val = nr64_ipp(IPP_CFIG);
+ val &= ~IPP_CFIG_IP_MAX_PKT;
+ val |= (IPP_CFIG_IPP_ENABLE |
+ IPP_CFIG_DFIFO_ECC_EN |
+ IPP_CFIG_DROP_BAD_CRC |
+ IPP_CFIG_CKSUM_EN |
+ (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
+ nw64_ipp(IPP_CFIG, val);
+
+ return 0;
+}
+
+static void niu_handle_led(struct niu *np, int status)
+{
+ u64 val;
+ val = nr64_mac(XMAC_CONFIG);
+
+ if ((np->flags & NIU_FLAGS_10G) != 0 &&
+ (np->flags & NIU_FLAGS_FIBER) != 0) {
+ if (status) {
+ val |= XMAC_CONFIG_LED_POLARITY;
+ val &= ~XMAC_CONFIG_FORCE_LED_ON;
+ } else {
+ val |= XMAC_CONFIG_FORCE_LED_ON;
+ val &= ~XMAC_CONFIG_LED_POLARITY;
+ }
+ }
+
+ nw64_mac(XMAC_CONFIG, val);
+}
+
+static void niu_init_xif_xmac(struct niu *np)
+{
+ struct niu_link_config *lp = &np->link_config;
+ u64 val;
+
+ if (np->flags & NIU_FLAGS_XCVR_SERDES) {
+ val = nr64(MIF_CONFIG);
+ val |= MIF_CONFIG_ATCA_GE;
+ nw64(MIF_CONFIG, val);
+ }
+
+ val = nr64_mac(XMAC_CONFIG);
+ val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
+
+ val |= XMAC_CONFIG_TX_OUTPUT_EN;
+
+ if (lp->loopback_mode == LOOPBACK_MAC) {
+ val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
+ val |= XMAC_CONFIG_LOOPBACK;
+ } else {
+ val &= ~XMAC_CONFIG_LOOPBACK;
+ }
+
+ if (np->flags & NIU_FLAGS_10G) {
+ val &= ~XMAC_CONFIG_LFS_DISABLE;
+ } else {
+ val |= XMAC_CONFIG_LFS_DISABLE;
+ if (!(np->flags & NIU_FLAGS_FIBER) &&
+ !(np->flags & NIU_FLAGS_XCVR_SERDES))
+ val |= XMAC_CONFIG_1G_PCS_BYPASS;
+ else
+ val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
+ }
+
+ val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
+
+ if (lp->active_speed == SPEED_100)
+ val |= XMAC_CONFIG_SEL_CLK_25MHZ;
+ else
+ val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
+
+ nw64_mac(XMAC_CONFIG, val);
+
+ val = nr64_mac(XMAC_CONFIG);
+ val &= ~XMAC_CONFIG_MODE_MASK;
+ if (np->flags & NIU_FLAGS_10G) {
+ val |= XMAC_CONFIG_MODE_XGMII;
+ } else {
+ if (lp->active_speed == SPEED_1000)
+ val |= XMAC_CONFIG_MODE_GMII;
+ else
+ val |= XMAC_CONFIG_MODE_MII;
+ }
+
+ nw64_mac(XMAC_CONFIG, val);
+}
+
+static void niu_init_xif_bmac(struct niu *np)
+{
+ struct niu_link_config *lp = &np->link_config;
+ u64 val;
+
+ val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
+
+ if (lp->loopback_mode == LOOPBACK_MAC)
+ val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
+ else
+ val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
+
+ if (lp->active_speed == SPEED_1000)
+ val |= BMAC_XIF_CONFIG_GMII_MODE;
+ else
+ val &= ~BMAC_XIF_CONFIG_GMII_MODE;
+
+ val &= ~(BMAC_XIF_CONFIG_LINK_LED |
+ BMAC_XIF_CONFIG_LED_POLARITY);
+
+ if (!(np->flags & NIU_FLAGS_10G) &&
+ !(np->flags & NIU_FLAGS_FIBER) &&
+ lp->active_speed == SPEED_100)
+ val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
+ else
+ val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
+
+ nw64_mac(BMAC_XIF_CONFIG, val);
+}
+
+static void niu_init_xif(struct niu *np)
+{
+ if (np->flags & NIU_FLAGS_XMAC)
+ niu_init_xif_xmac(np);
+ else
+ niu_init_xif_bmac(np);
+}
+
+static void niu_pcs_mii_reset(struct niu *np)
+{
+ int limit = 1000;
+ u64 val = nr64_pcs(PCS_MII_CTL);
+ val |= PCS_MII_CTL_RST;
+ nw64_pcs(PCS_MII_CTL, val);
+ while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
+ udelay(100);
+ val = nr64_pcs(PCS_MII_CTL);
+ }
+}
+
+static void niu_xpcs_reset(struct niu *np)
+{
+ int limit = 1000;
+ u64 val = nr64_xpcs(XPCS_CONTROL1);
+ val |= XPCS_CONTROL1_RESET;
+ nw64_xpcs(XPCS_CONTROL1, val);
+ while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
+ udelay(100);
+ val = nr64_xpcs(XPCS_CONTROL1);
+ }
+}
+
+static int niu_init_pcs(struct niu *np)
+{
+ struct niu_link_config *lp = &np->link_config;
+ u64 val;
+
+ switch (np->flags & (NIU_FLAGS_10G |
+ NIU_FLAGS_FIBER |
+ NIU_FLAGS_XCVR_SERDES)) {
+ case NIU_FLAGS_FIBER:
+ /* 1G fiber */
+ nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
+ nw64_pcs(PCS_DPATH_MODE, 0);
+ niu_pcs_mii_reset(np);
+ break;
+
+ case NIU_FLAGS_10G:
+ case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
+ case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
+ /* 10G SERDES */
+ if (!(np->flags & NIU_FLAGS_XMAC))
+ return -EINVAL;
+
+ /* 10G copper or fiber */
+ val = nr64_mac(XMAC_CONFIG);
+ val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
+ nw64_mac(XMAC_CONFIG, val);
+
+ niu_xpcs_reset(np);
+
+ val = nr64_xpcs(XPCS_CONTROL1);
+ if (lp->loopback_mode == LOOPBACK_PHY)
+ val |= XPCS_CONTROL1_LOOPBACK;
+ else
+ val &= ~XPCS_CONTROL1_LOOPBACK;
+ nw64_xpcs(XPCS_CONTROL1, val);
+
+ nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
+ (void) nr64_xpcs(XPCS_SYMERR_CNT01);
+ (void) nr64_xpcs(XPCS_SYMERR_CNT23);
+ break;
+
+
+ case NIU_FLAGS_XCVR_SERDES:
+ /* 1G SERDES */
+ niu_pcs_mii_reset(np);
+ nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
+ nw64_pcs(PCS_DPATH_MODE, 0);
+ break;
+
+ case 0:
+ /* 1G copper */
+ case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
+ /* 1G RGMII FIBER */
+ nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
+ niu_pcs_mii_reset(np);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int niu_reset_tx_xmac(struct niu *np)
+{
+ return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
+ (XTXMAC_SW_RST_REG_RS |
+ XTXMAC_SW_RST_SOFT_RST),
+ 1000, 100, "XTXMAC_SW_RST");
+}
+
+static int niu_reset_tx_bmac(struct niu *np)
+{
+ int limit;
+
+ nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
+ limit = 1000;
+ while (--limit >= 0) {
+ if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
+ break;
+ udelay(100);
+ }
+ if (limit < 0) {
+ dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
+ np->port,
+ (unsigned long long) nr64_mac(BTXMAC_SW_RST));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int niu_reset_tx_mac(struct niu *np)
+{
+ if (np->flags & NIU_FLAGS_XMAC)
+ return niu_reset_tx_xmac(np);
+ else
+ return niu_reset_tx_bmac(np);
+}
+
+static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
+{
+ u64 val;
+
+ val = nr64_mac(XMAC_MIN);
+ val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
+ XMAC_MIN_RX_MIN_PKT_SIZE);
+ val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
+ val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
+ nw64_mac(XMAC_MIN, val);
+
+ nw64_mac(XMAC_MAX, max);
+
+ nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
+
+ val = nr64_mac(XMAC_IPG);
+ if (np->flags & NIU_FLAGS_10G) {
+ val &= ~XMAC_IPG_IPG_XGMII;
+ val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
+ } else {
+ val &= ~XMAC_IPG_IPG_MII_GMII;
+ val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
+ }
+ nw64_mac(XMAC_IPG, val);
+
+ val = nr64_mac(XMAC_CONFIG);
+ val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
+ XMAC_CONFIG_STRETCH_MODE |
+ XMAC_CONFIG_VAR_MIN_IPG_EN |
+ XMAC_CONFIG_TX_ENABLE);
+ nw64_mac(XMAC_CONFIG, val);
+
+ nw64_mac(TXMAC_FRM_CNT, 0);
+ nw64_mac(TXMAC_BYTE_CNT, 0);
+}
+
+static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
+{
+ u64 val;
+
+ nw64_mac(BMAC_MIN_FRAME, min);
+ nw64_mac(BMAC_MAX_FRAME, max);
+
+ nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
+ nw64_mac(BMAC_CTRL_TYPE, 0x8808);
+ nw64_mac(BMAC_PREAMBLE_SIZE, 7);
+
+ val = nr64_mac(BTXMAC_CONFIG);
+ val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
+ BTXMAC_CONFIG_ENABLE);
+ nw64_mac(BTXMAC_CONFIG, val);
+}
+
+static void niu_init_tx_mac(struct niu *np)
+{
+ u64 min, max;
+
+ min = 64;
+ if (np->dev->mtu > ETH_DATA_LEN)
+ max = 9216;
+ else
+ max = 1522;
+
+ /* The XMAC_MIN register only accepts values for TX min which
+ * have the low 3 bits cleared.
+ */
+ BUG_ON(min & 0x7);
+
+ if (np->flags & NIU_FLAGS_XMAC)
+ niu_init_tx_xmac(np, min, max);
+ else
+ niu_init_tx_bmac(np, min, max);
+}
+
+static int niu_reset_rx_xmac(struct niu *np)
+{
+ int limit;
+
+ nw64_mac(XRXMAC_SW_RST,
+ XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
+ limit = 1000;
+ while (--limit >= 0) {
+ if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
+ XRXMAC_SW_RST_SOFT_RST)))
+ break;
+ udelay(100);
+ }
+ if (limit < 0) {
+ dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
+ np->port,
+ (unsigned long long) nr64_mac(XRXMAC_SW_RST));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int niu_reset_rx_bmac(struct niu *np)
+{
+ int limit;
+
+ nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
+ limit = 1000;
+ while (--limit >= 0) {
+ if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
+ break;
+ udelay(100);
+ }
+ if (limit < 0) {
+ dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
+ np->port,
+ (unsigned long long) nr64_mac(BRXMAC_SW_RST));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int niu_reset_rx_mac(struct niu *np)
+{
+ if (np->flags & NIU_FLAGS_XMAC)
+ return niu_reset_rx_xmac(np);
+ else
+ return niu_reset_rx_bmac(np);
+}
+
+static void niu_init_rx_xmac(struct niu *np)
+{
+ struct niu_parent *parent = np->parent;
+ struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
+ int first_rdc_table = tp->first_table_num;
+ unsigned long i;
+ u64 val;
+
+ nw64_mac(XMAC_ADD_FILT0, 0);
+ nw64_mac(XMAC_ADD_FILT1, 0);
+ nw64_mac(XMAC_ADD_FILT2, 0);
+ nw64_mac(XMAC_ADD_FILT12_MASK, 0);
+ nw64_mac(XMAC_ADD_FILT00_MASK, 0);
+ for (i = 0; i < MAC_NUM_HASH; i++)
+ nw64_mac(XMAC_HASH_TBL(i), 0);
+ nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
+ niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
+ niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
+
+ val = nr64_mac(XMAC_CONFIG);
+ val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
+ XMAC_CONFIG_PROMISCUOUS |
+ XMAC_CONFIG_PROMISC_GROUP |
+ XMAC_CONFIG_ERR_CHK_DIS |
+ XMAC_CONFIG_RX_CRC_CHK_DIS |
+ XMAC_CONFIG_RESERVED_MULTICAST |
+ XMAC_CONFIG_RX_CODEV_CHK_DIS |
+ XMAC_CONFIG_ADDR_FILTER_EN |
+ XMAC_CONFIG_RCV_PAUSE_ENABLE |
+ XMAC_CONFIG_STRIP_CRC |
+ XMAC_CONFIG_PASS_FLOW_CTRL |
+ XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
+ val |= (XMAC_CONFIG_HASH_FILTER_EN);
+ nw64_mac(XMAC_CONFIG, val);
+
+ nw64_mac(RXMAC_BT_CNT, 0);
+ nw64_mac(RXMAC_BC_FRM_CNT, 0);
+ nw64_mac(RXMAC_MC_FRM_CNT, 0);
+ nw64_mac(RXMAC_FRAG_CNT, 0);
+ nw64_mac(RXMAC_HIST_CNT1, 0);
+ nw64_mac(RXMAC_HIST_CNT2, 0);
+ nw64_mac(RXMAC_HIST_CNT3, 0);
+ nw64_mac(RXMAC_HIST_CNT4, 0);
+ nw64_mac(RXMAC_HIST_CNT5, 0);
+ nw64_mac(RXMAC_HIST_CNT6, 0);
+ nw64_mac(RXMAC_HIST_CNT7, 0);
+ nw64_mac(RXMAC_MPSZER_CNT, 0);
+ nw64_mac(RXMAC_CRC_ER_CNT, 0);
+ nw64_mac(RXMAC_CD_VIO_CNT, 0);
+ nw64_mac(LINK_FAULT_CNT, 0);
+}
+
+static void niu_init_rx_bmac(struct niu *np)
+{
+ struct niu_parent *parent = np->parent;
+ struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
+ int first_rdc_table = tp->first_table_num;
+ unsigned long i;
+ u64 val;
+
+ nw64_mac(BMAC_ADD_FILT0, 0);
+ nw64_mac(BMAC_ADD_FILT1, 0);
+ nw64_mac(BMAC_ADD_FILT2, 0);
+ nw64_mac(BMAC_ADD_FILT12_MASK, 0);
+ nw64_mac(BMAC_ADD_FILT00_MASK, 0);
+ for (i = 0; i < MAC_NUM_HASH; i++)
+ nw64_mac(BMAC_HASH_TBL(i), 0);
+ niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
+ niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
+ nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
+
+ val = nr64_mac(BRXMAC_CONFIG);
+ val &= ~(BRXMAC_CONFIG_ENABLE |
+ BRXMAC_CONFIG_STRIP_PAD |
+ BRXMAC_CONFIG_STRIP_FCS |
+ BRXMAC_CONFIG_PROMISC |
+ BRXMAC_CONFIG_PROMISC_GRP |
+ BRXMAC_CONFIG_ADDR_FILT_EN |
+ BRXMAC_CONFIG_DISCARD_DIS);
+ val |= (BRXMAC_CONFIG_HASH_FILT_EN);
+ nw64_mac(BRXMAC_CONFIG, val);
+
+ val = nr64_mac(BMAC_ADDR_CMPEN);
+ val |= BMAC_ADDR_CMPEN_EN0;
+ nw64_mac(BMAC_ADDR_CMPEN, val);
+}
+
+static void niu_init_rx_mac(struct niu *np)
+{
+ niu_set_primary_mac(np, np->dev->dev_addr);
+
+ if (np->flags & NIU_FLAGS_XMAC)
+ niu_init_rx_xmac(np);
+ else
+ niu_init_rx_bmac(np);
+}
+
+static void niu_enable_tx_xmac(struct niu *np, int on)
+{
+ u64 val = nr64_mac(XMAC_CONFIG);
+
+ if (on)
+ val |= XMAC_CONFIG_TX_ENABLE;
+ else
+ val &= ~XMAC_CONFIG_TX_ENABLE;
+ nw64_mac(XMAC_CONFIG, val);
+}
+
+static void niu_enable_tx_bmac(struct niu *np, int on)
+{
+ u64 val = nr64_mac(BTXMAC_CONFIG);
+
+ if (on)
+ val |= BTXMAC_CONFIG_ENABLE;
+ else
+ val &= ~BTXMAC_CONFIG_ENABLE;
+ nw64_mac(BTXMAC_CONFIG, val);
+}
+
+static void niu_enable_tx_mac(struct niu *np, int on)
+{
+ if (np->flags & NIU_FLAGS_XMAC)
+ niu_enable_tx_xmac(np, on);
+ else
+ niu_enable_tx_bmac(np, on);
+}
+
+static void niu_enable_rx_xmac(struct niu *np, int on)
+{
+ u64 val = nr64_mac(XMAC_CONFIG);
+
+ val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
+ XMAC_CONFIG_PROMISCUOUS);
+
+ if (np->flags & NIU_FLAGS_MCAST)
+ val |= XMAC_CONFIG_HASH_FILTER_EN;
+ if (np->flags & NIU_FLAGS_PROMISC)
+ val |= XMAC_CONFIG_PROMISCUOUS;
+
+ if (on)
+ val |= XMAC_CONFIG_RX_MAC_ENABLE;
+ else
+ val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
+ nw64_mac(XMAC_CONFIG, val);
+}
+
+static void niu_enable_rx_bmac(struct niu *np, int on)
+{
+ u64 val = nr64_mac(BRXMAC_CONFIG);
+
+ val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
+ BRXMAC_CONFIG_PROMISC);
+
+ if (np->flags & NIU_FLAGS_MCAST)
+ val |= BRXMAC_CONFIG_HASH_FILT_EN;
+ if (np->flags & NIU_FLAGS_PROMISC)
+ val |= BRXMAC_CONFIG_PROMISC;
+
+ if (on)
+ val |= BRXMAC_CONFIG_ENABLE;
+ else
+ val &= ~BRXMAC_CONFIG_ENABLE;
+ nw64_mac(BRXMAC_CONFIG, val);
+}
+
+static void niu_enable_rx_mac(struct niu *np, int on)
+{
+ if (np->flags & NIU_FLAGS_XMAC)
+ niu_enable_rx_xmac(np, on);
+ else
+ niu_enable_rx_bmac(np, on);
+}
+
+static int niu_init_mac(struct niu *np)
+{
+ int err;
+
+ niu_init_xif(np);
+ err = niu_init_pcs(np);
+ if (err)
+ return err;
+
+ err = niu_reset_tx_mac(np);
+ if (err)
+ return err;
+ niu_init_tx_mac(np);
+ err = niu_reset_rx_mac(np);
+ if (err)
+ return err;
+ niu_init_rx_mac(np);
+
+ /* This looks hookey but the RX MAC reset we just did will
+ * undo some of the state we setup in niu_init_tx_mac() so we
+ * have to call it again. In particular, the RX MAC reset will
+ * set the XMAC_MAX register back to it's default value.
+ */
+ niu_init_tx_mac(np);
+ niu_enable_tx_mac(np, 1);
+
+ niu_enable_rx_mac(np, 1);
+
+ return 0;
+}
+
+static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
+{
+ (void) niu_tx_channel_stop(np, rp->tx_channel);
+}
+
+static void niu_stop_tx_channels(struct niu *np)
+{
+ int i;
+
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &np->tx_rings[i];
+
+ niu_stop_one_tx_channel(np, rp);
+ }
+}
+
+static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
+{
+ (void) niu_tx_channel_reset(np, rp->tx_channel);
+}
+
+static void niu_reset_tx_channels(struct niu *np)
+{
+ int i;
+
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &np->tx_rings[i];
+
+ niu_reset_one_tx_channel(np, rp);
+ }
+}
+
+static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
+{
+ (void) niu_enable_rx_channel(np, rp->rx_channel, 0);
+}
+
+static void niu_stop_rx_channels(struct niu *np)
+{
+ int i;
+
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &np->rx_rings[i];
+
+ niu_stop_one_rx_channel(np, rp);
+ }
+}
+
+static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
+{
+ int channel = rp->rx_channel;
+
+ (void) niu_rx_channel_reset(np, channel);
+ nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
+ nw64(RX_DMA_CTL_STAT(channel), 0);
+ (void) niu_enable_rx_channel(np, channel, 0);
+}
+
+static void niu_reset_rx_channels(struct niu *np)
+{
+ int i;
+
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &np->rx_rings[i];
+
+ niu_reset_one_rx_channel(np, rp);
+ }
+}
+
+static void niu_disable_ipp(struct niu *np)
+{
+ u64 rd, wr, val;
+ int limit;
+
+ rd = nr64_ipp(IPP_DFIFO_RD_PTR);
+ wr = nr64_ipp(IPP_DFIFO_WR_PTR);
+ limit = 100;
+ while (--limit >= 0 && (rd != wr)) {
+ rd = nr64_ipp(IPP_DFIFO_RD_PTR);
+ wr = nr64_ipp(IPP_DFIFO_WR_PTR);
+ }
+ if (limit < 0 &&
+ (rd != 0 && wr != 1)) {
+ netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
+ (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR),
+ (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR));
+ }
+
+ val = nr64_ipp(IPP_CFIG);
+ val &= ~(IPP_CFIG_IPP_ENABLE |
+ IPP_CFIG_DFIFO_ECC_EN |
+ IPP_CFIG_DROP_BAD_CRC |
+ IPP_CFIG_CKSUM_EN);
+ nw64_ipp(IPP_CFIG, val);
+
+ (void) niu_ipp_reset(np);
+}
+
+static int niu_init_hw(struct niu *np)
+{
+ int i, err;
+
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n");
+ niu_txc_enable_port(np, 1);
+ niu_txc_port_dma_enable(np, 1);
+ niu_txc_set_imask(np, 0);
+
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n");
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &np->tx_rings[i];
+
+ err = niu_init_one_tx_channel(np, rp);
+ if (err)
+ return err;
+ }
+
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n");
+ err = niu_init_rx_channels(np);
+ if (err)
+ goto out_uninit_tx_channels;
+
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n");
+ err = niu_init_classifier_hw(np);
+ if (err)
+ goto out_uninit_rx_channels;
+
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n");
+ err = niu_init_zcp(np);
+ if (err)
+ goto out_uninit_rx_channels;
+
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n");
+ err = niu_init_ipp(np);
+ if (err)
+ goto out_uninit_rx_channels;
+
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n");
+ err = niu_init_mac(np);
+ if (err)
+ goto out_uninit_ipp;
+
+ return 0;
+
+out_uninit_ipp:
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n");
+ niu_disable_ipp(np);
+
+out_uninit_rx_channels:
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n");
+ niu_stop_rx_channels(np);
+ niu_reset_rx_channels(np);
+
+out_uninit_tx_channels:
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n");
+ niu_stop_tx_channels(np);
+ niu_reset_tx_channels(np);
+
+ return err;
+}
+
+static void niu_stop_hw(struct niu *np)
+{
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n");
+ niu_enable_interrupts(np, 0);
+
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n");
+ niu_enable_rx_mac(np, 0);
+
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n");
+ niu_disable_ipp(np);
+
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n");
+ niu_stop_tx_channels(np);
+
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n");
+ niu_stop_rx_channels(np);
+
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n");
+ niu_reset_tx_channels(np);
+
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n");
+ niu_reset_rx_channels(np);
+}
+
+static void niu_set_irq_name(struct niu *np)
+{
+ int port = np->port;
+ int i, j = 1;
+
+ sprintf(np->irq_name[0], "%s:MAC", np->dev->name);
+
+ if (port == 0) {
+ sprintf(np->irq_name[1], "%s:MIF", np->dev->name);
+ sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name);
+ j = 3;
+ }
+
+ for (i = 0; i < np->num_ldg - j; i++) {
+ if (i < np->num_rx_rings)
+ sprintf(np->irq_name[i+j], "%s-rx-%d",
+ np->dev->name, i);
+ else if (i < np->num_tx_rings + np->num_rx_rings)
+ sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name,
+ i - np->num_rx_rings);
+ }
+}
+
+static int niu_request_irq(struct niu *np)
+{
+ int i, j, err;
+
+ niu_set_irq_name(np);
+
+ err = 0;
+ for (i = 0; i < np->num_ldg; i++) {
+ struct niu_ldg *lp = &np->ldg[i];
+
+ err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED,
+ np->irq_name[i], lp);
+ if (err)
+ goto out_free_irqs;
+
+ }
+
+ return 0;
+
+out_free_irqs:
+ for (j = 0; j < i; j++) {
+ struct niu_ldg *lp = &np->ldg[j];
+
+ free_irq(lp->irq, lp);
+ }
+ return err;
+}
+
+static void niu_free_irq(struct niu *np)
+{
+ int i;
+
+ for (i = 0; i < np->num_ldg; i++) {
+ struct niu_ldg *lp = &np->ldg[i];
+
+ free_irq(lp->irq, lp);
+ }
+}
+
+static void niu_enable_napi(struct niu *np)
+{
+ int i;
+
+ for (i = 0; i < np->num_ldg; i++)
+ napi_enable(&np->ldg[i].napi);
+}
+
+static void niu_disable_napi(struct niu *np)
+{
+ int i;
+
+ for (i = 0; i < np->num_ldg; i++)
+ napi_disable(&np->ldg[i].napi);
+}
+
+static int niu_open(struct net_device *dev)
+{
+ struct niu *np = netdev_priv(dev);
+ int err;
+
+ netif_carrier_off(dev);
+
+ err = niu_alloc_channels(np);
+ if (err)
+ goto out_err;
+
+ err = niu_enable_interrupts(np, 0);
+ if (err)
+ goto out_free_channels;
+
+ err = niu_request_irq(np);
+ if (err)
+ goto out_free_channels;
+
+ niu_enable_napi(np);
+
+ spin_lock_irq(&np->lock);
+
+ err = niu_init_hw(np);
+ if (!err) {
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + HZ;
+ np->timer.data = (unsigned long) np;
+ np->timer.function = niu_timer;
+
+ err = niu_enable_interrupts(np, 1);
+ if (err)
+ niu_stop_hw(np);
+ }
+
+ spin_unlock_irq(&np->lock);
+
+ if (err) {
+ niu_disable_napi(np);
+ goto out_free_irq;
+ }
+
+ netif_tx_start_all_queues(dev);
+
+ if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
+ netif_carrier_on(dev);
+
+ add_timer(&np->timer);
+
+ return 0;
+
+out_free_irq:
+ niu_free_irq(np);
+
+out_free_channels:
+ niu_free_channels(np);
+
+out_err:
+ return err;
+}
+
+static void niu_full_shutdown(struct niu *np, struct net_device *dev)
+{
+ cancel_work_sync(&np->reset_task);
+
+ niu_disable_napi(np);
+ netif_tx_stop_all_queues(dev);
+
+ del_timer_sync(&np->timer);
+
+ spin_lock_irq(&np->lock);
+
+ niu_stop_hw(np);
+
+ spin_unlock_irq(&np->lock);
+}
+
+static int niu_close(struct net_device *dev)
+{
+ struct niu *np = netdev_priv(dev);
+
+ niu_full_shutdown(np, dev);
+
+ niu_free_irq(np);
+
+ niu_free_channels(np);
+
+ niu_handle_led(np, 0);
+
+ return 0;
+}
+
+static void niu_sync_xmac_stats(struct niu *np)
+{
+ struct niu_xmac_stats *mp = &np->mac_stats.xmac;
+
+ mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
+ mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
+
+ mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
+ mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
+ mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
+ mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
+ mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
+ mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
+ mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
+ mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
+ mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
+ mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
+ mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
+ mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
+ mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
+ mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
+ mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
+ mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
+}
+
+static void niu_sync_bmac_stats(struct niu *np)
+{
+ struct niu_bmac_stats *mp = &np->mac_stats.bmac;
+
+ mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
+ mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
+
+ mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
+ mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
+ mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
+ mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
+}
+
+static void niu_sync_mac_stats(struct niu *np)
+{
+ if (np->flags & NIU_FLAGS_XMAC)
+ niu_sync_xmac_stats(np);
+ else
+ niu_sync_bmac_stats(np);
+}
+
+static void niu_get_rx_stats(struct niu *np,
+ struct rtnl_link_stats64 *stats)
+{
+ u64 pkts, dropped, errors, bytes;
+ struct rx_ring_info *rx_rings;
+ int i;
+
+ pkts = dropped = errors = bytes = 0;
+
+ rx_rings = ACCESS_ONCE(np->rx_rings);
+ if (!rx_rings)
+ goto no_rings;
+
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &rx_rings[i];
+
+ niu_sync_rx_discard_stats(np, rp, 0);
+
+ pkts += rp->rx_packets;
+ bytes += rp->rx_bytes;
+ dropped += rp->rx_dropped;
+ errors += rp->rx_errors;
+ }
+
+no_rings:
+ stats->rx_packets = pkts;
+ stats->rx_bytes = bytes;
+ stats->rx_dropped = dropped;
+ stats->rx_errors = errors;
+}
+
+static void niu_get_tx_stats(struct niu *np,
+ struct rtnl_link_stats64 *stats)
+{
+ u64 pkts, errors, bytes;
+ struct tx_ring_info *tx_rings;
+ int i;
+
+ pkts = errors = bytes = 0;
+
+ tx_rings = ACCESS_ONCE(np->tx_rings);
+ if (!tx_rings)
+ goto no_rings;
+
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &tx_rings[i];
+
+ pkts += rp->tx_packets;
+ bytes += rp->tx_bytes;
+ errors += rp->tx_errors;
+ }
+
+no_rings:
+ stats->tx_packets = pkts;
+ stats->tx_bytes = bytes;
+ stats->tx_errors = errors;
+}
+
+static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct niu *np = netdev_priv(dev);
+
+ if (netif_running(dev)) {
+ niu_get_rx_stats(np, stats);
+ niu_get_tx_stats(np, stats);
+ }
+
+ return stats;
+}
+
+static void niu_load_hash_xmac(struct niu *np, u16 *hash)
+{
+ int i;
+
+ for (i = 0; i < 16; i++)
+ nw64_mac(XMAC_HASH_TBL(i), hash[i]);
+}
+
+static void niu_load_hash_bmac(struct niu *np, u16 *hash)
+{
+ int i;
+
+ for (i = 0; i < 16; i++)
+ nw64_mac(BMAC_HASH_TBL(i), hash[i]);
+}
+
+static void niu_load_hash(struct niu *np, u16 *hash)
+{
+ if (np->flags & NIU_FLAGS_XMAC)
+ niu_load_hash_xmac(np, hash);
+ else
+ niu_load_hash_bmac(np, hash);
+}
+
+static void niu_set_rx_mode(struct net_device *dev)
+{
+ struct niu *np = netdev_priv(dev);
+ int i, alt_cnt, err;
+ struct netdev_hw_addr *ha;
+ unsigned long flags;
+ u16 hash[16] = { 0, };
+
+ spin_lock_irqsave(&np->lock, flags);
+ niu_enable_rx_mac(np, 0);
+
+ np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
+ if (dev->flags & IFF_PROMISC)
+ np->flags |= NIU_FLAGS_PROMISC;
+ if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev)))
+ np->flags |= NIU_FLAGS_MCAST;
+
+ alt_cnt = netdev_uc_count(dev);
+ if (alt_cnt > niu_num_alt_addr(np)) {
+ alt_cnt = 0;
+ np->flags |= NIU_FLAGS_PROMISC;
+ }
+
+ if (alt_cnt) {
+ int index = 0;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ err = niu_set_alt_mac(np, index, ha->addr);
+ if (err)
+ netdev_warn(dev, "Error %d adding alt mac %d\n",
+ err, index);
+ err = niu_enable_alt_mac(np, index, 1);
+ if (err)
+ netdev_warn(dev, "Error %d enabling alt mac %d\n",
+ err, index);
+
+ index++;
+ }
+ } else {
+ int alt_start;
+ if (np->flags & NIU_FLAGS_XMAC)
+ alt_start = 0;
+ else
+ alt_start = 1;
+ for (i = alt_start; i < niu_num_alt_addr(np); i++) {
+ err = niu_enable_alt_mac(np, i, 0);
+ if (err)
+ netdev_warn(dev, "Error %d disabling alt mac %d\n",
+ err, i);
+ }
+ }
+ if (dev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < 16; i++)
+ hash[i] = 0xffff;
+ } else if (!netdev_mc_empty(dev)) {
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 crc = ether_crc_le(ETH_ALEN, ha->addr);
+
+ crc >>= 24;
+ hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
+ }
+ }
+
+ if (np->flags & NIU_FLAGS_MCAST)
+ niu_load_hash(np, hash);
+
+ niu_enable_rx_mac(np, 1);
+ spin_unlock_irqrestore(&np->lock, flags);
+}
+
+static int niu_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct niu *np = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ unsigned long flags;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+
+ if (!netif_running(dev))
+ return 0;
+
+ spin_lock_irqsave(&np->lock, flags);
+ niu_enable_rx_mac(np, 0);
+ niu_set_primary_mac(np, dev->dev_addr);
+ niu_enable_rx_mac(np, 1);
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ return 0;
+}
+
+static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+static void niu_netif_stop(struct niu *np)
+{
+ np->dev->trans_start = jiffies; /* prevent tx timeout */
+
+ niu_disable_napi(np);
+
+ netif_tx_disable(np->dev);
+}
+
+static void niu_netif_start(struct niu *np)
+{
+ /* NOTE: unconditional netif_wake_queue is only appropriate
+ * so long as all callers are assured to have free tx slots
+ * (such as after niu_init_hw).
+ */
+ netif_tx_wake_all_queues(np->dev);
+
+ niu_enable_napi(np);
+
+ niu_enable_interrupts(np, 1);
+}
+
+static void niu_reset_buffers(struct niu *np)
+{
+ int i, j, k, err;
+
+ if (np->rx_rings) {
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &np->rx_rings[i];
+
+ for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
+ struct page *page;
+
+ page = rp->rxhash[j];
+ while (page) {
+ struct page *next =
+ (struct page *) page->mapping;
+ u64 base = page->index;
+ base = base >> RBR_DESCR_ADDR_SHIFT;
+ rp->rbr[k++] = cpu_to_le32(base);
+ page = next;
+ }
+ }
+ for (; k < MAX_RBR_RING_SIZE; k++) {
+ err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
+ if (unlikely(err))
+ break;
+ }
+
+ rp->rbr_index = rp->rbr_table_size - 1;
+ rp->rcr_index = 0;
+ rp->rbr_pending = 0;
+ rp->rbr_refill_pending = 0;
+ }
+ }
+ if (np->tx_rings) {
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &np->tx_rings[i];
+
+ for (j = 0; j < MAX_TX_RING_SIZE; j++) {
+ if (rp->tx_buffs[j].skb)
+ (void) release_tx_packet(np, rp, j);
+ }
+
+ rp->pending = MAX_TX_RING_SIZE;
+ rp->prod = 0;
+ rp->cons = 0;
+ rp->wrap_bit = 0;
+ }
+ }
+}
+
+static void niu_reset_task(struct work_struct *work)
+{
+ struct niu *np = container_of(work, struct niu, reset_task);
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&np->lock, flags);
+ if (!netif_running(np->dev)) {
+ spin_unlock_irqrestore(&np->lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ del_timer_sync(&np->timer);
+
+ niu_netif_stop(np);
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ niu_stop_hw(np);
+
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ niu_reset_buffers(np);
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ err = niu_init_hw(np);
+ if (!err) {
+ np->timer.expires = jiffies + HZ;
+ add_timer(&np->timer);
+ niu_netif_start(np);
+ }
+
+ spin_unlock_irqrestore(&np->lock, flags);
+}
+
+static void niu_tx_timeout(struct net_device *dev)
+{
+ struct niu *np = netdev_priv(dev);
+
+ dev_err(np->device, "%s: Transmit timed out, resetting\n",
+ dev->name);
+
+ schedule_work(&np->reset_task);
+}
+
+static void niu_set_txd(struct tx_ring_info *rp, int index,
+ u64 mapping, u64 len, u64 mark,
+ u64 n_frags)
+{
+ __le64 *desc = &rp->descr[index];
+
+ *desc = cpu_to_le64(mark |
+ (n_frags << TX_DESC_NUM_PTR_SHIFT) |
+ (len << TX_DESC_TR_LEN_SHIFT) |
+ (mapping & TX_DESC_SAD));
+}
+
+static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
+ u64 pad_bytes, u64 len)
+{
+ u16 eth_proto, eth_proto_inner;
+ u64 csum_bits, l3off, ihl, ret;
+ u8 ip_proto;
+ int ipv6;
+
+ eth_proto = be16_to_cpu(ehdr->h_proto);
+ eth_proto_inner = eth_proto;
+ if (eth_proto == ETH_P_8021Q) {
+ struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr;
+ __be16 val = vp->h_vlan_encapsulated_proto;
+
+ eth_proto_inner = be16_to_cpu(val);
+ }
+
+ ipv6 = ihl = 0;
+ switch (skb->protocol) {
+ case cpu_to_be16(ETH_P_IP):
+ ip_proto = ip_hdr(skb)->protocol;
+ ihl = ip_hdr(skb)->ihl;
+ break;
+ case cpu_to_be16(ETH_P_IPV6):
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ ihl = (40 >> 2);
+ ipv6 = 1;
+ break;
+ default:
+ ip_proto = ihl = 0;
+ break;
+ }
+
+ csum_bits = TXHDR_CSUM_NONE;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u64 start, stuff;
+
+ csum_bits = (ip_proto == IPPROTO_TCP ?
+ TXHDR_CSUM_TCP :
+ (ip_proto == IPPROTO_UDP ?
+ TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
+
+ start = skb_checksum_start_offset(skb) -
+ (pad_bytes + sizeof(struct tx_pkt_hdr));
+ stuff = start + skb->csum_offset;
+
+ csum_bits |= (start / 2) << TXHDR_L4START_SHIFT;
+ csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT;
+ }
+
+ l3off = skb_network_offset(skb) -
+ (pad_bytes + sizeof(struct tx_pkt_hdr));
+
+ ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) |
+ (len << TXHDR_LEN_SHIFT) |
+ ((l3off / 2) << TXHDR_L3START_SHIFT) |
+ (ihl << TXHDR_IHL_SHIFT) |
+ ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) |
+ ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
+ (ipv6 ? TXHDR_IP_VER : 0) |
+ csum_bits);
+
+ return ret;
+}
+
+static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct niu *np = netdev_priv(dev);
+ unsigned long align, headroom;
+ struct netdev_queue *txq;
+ struct tx_ring_info *rp;
+ struct tx_pkt_hdr *tp;
+ unsigned int len, nfg;
+ struct ethhdr *ehdr;
+ int prod, i, tlen;
+ u64 mapping, mrk;
+
+ i = skb_get_queue_mapping(skb);
+ rp = &np->tx_rings[i];
+ txq = netdev_get_tx_queue(dev, i);
+
+ if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
+ netif_tx_stop_queue(txq);
+ dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name);
+ rp->tx_errors++;
+ return NETDEV_TX_BUSY;
+ }
+
+ if (skb->len < ETH_ZLEN) {
+ unsigned int pad_bytes = ETH_ZLEN - skb->len;
+
+ if (skb_pad(skb, pad_bytes))
+ goto out;
+ skb_put(skb, pad_bytes);
+ }
+
+ len = sizeof(struct tx_pkt_hdr) + 15;
+ if (skb_headroom(skb) < len) {
+ struct sk_buff *skb_new;
+
+ skb_new = skb_realloc_headroom(skb, len);
+ if (!skb_new) {
+ rp->tx_errors++;
+ goto out_drop;
+ }
+ kfree_skb(skb);
+ skb = skb_new;
+ } else
+ skb_orphan(skb);
+
+ align = ((unsigned long) skb->data & (16 - 1));
+ headroom = align + sizeof(struct tx_pkt_hdr);
+
+ ehdr = (struct ethhdr *) skb->data;
+ tp = (struct tx_pkt_hdr *) skb_push(skb, headroom);
+
+ len = skb->len - sizeof(struct tx_pkt_hdr);
+ tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
+ tp->resv = 0;
+
+ len = skb_headlen(skb);
+ mapping = np->ops->map_single(np->device, skb->data,
+ len, DMA_TO_DEVICE);
+
+ prod = rp->prod;
+
+ rp->tx_buffs[prod].skb = skb;
+ rp->tx_buffs[prod].mapping = mapping;
+
+ mrk = TX_DESC_SOP;
+ if (++rp->mark_counter == rp->mark_freq) {
+ rp->mark_counter = 0;
+ mrk |= TX_DESC_MARK;
+ rp->mark_pending++;
+ }
+
+ tlen = len;
+ nfg = skb_shinfo(skb)->nr_frags;
+ while (tlen > 0) {
+ tlen -= MAX_TX_DESC_LEN;
+ nfg++;
+ }
+
+ while (len > 0) {
+ unsigned int this_len = len;
+
+ if (this_len > MAX_TX_DESC_LEN)
+ this_len = MAX_TX_DESC_LEN;
+
+ niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
+ mrk = nfg = 0;
+
+ prod = NEXT_TX(rp, prod);
+ mapping += this_len;
+ len -= this_len;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ len = frag->size;
+ mapping = np->ops->map_page(np->device, skb_frag_page(frag),
+ frag->page_offset, len,
+ DMA_TO_DEVICE);
+
+ rp->tx_buffs[prod].skb = NULL;
+ rp->tx_buffs[prod].mapping = mapping;
+
+ niu_set_txd(rp, prod, mapping, len, 0, 0);
+
+ prod = NEXT_TX(rp, prod);
+ }
+
+ if (prod < rp->prod)
+ rp->wrap_bit ^= TX_RING_KICK_WRAP;
+ rp->prod = prod;
+
+ nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
+
+ if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
+ netif_tx_stop_queue(txq);
+ if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
+ netif_tx_wake_queue(txq);
+ }
+
+out:
+ return NETDEV_TX_OK;
+
+out_drop:
+ rp->tx_errors++;
+ kfree_skb(skb);
+ goto out;
+}
+
+static int niu_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct niu *np = netdev_priv(dev);
+ int err, orig_jumbo, new_jumbo;
+
+ if (new_mtu < 68 || new_mtu > NIU_MAX_MTU)
+ return -EINVAL;
+
+ orig_jumbo = (dev->mtu > ETH_DATA_LEN);
+ new_jumbo = (new_mtu > ETH_DATA_LEN);
+
+ dev->mtu = new_mtu;
+
+ if (!netif_running(dev) ||
+ (orig_jumbo == new_jumbo))
+ return 0;
+
+ niu_full_shutdown(np, dev);
+
+ niu_free_channels(np);
+
+ niu_enable_napi(np);
+
+ err = niu_alloc_channels(np);
+ if (err)
+ return err;
+
+ spin_lock_irq(&np->lock);
+
+ err = niu_init_hw(np);
+ if (!err) {
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + HZ;
+ np->timer.data = (unsigned long) np;
+ np->timer.function = niu_timer;
+
+ err = niu_enable_interrupts(np, 1);
+ if (err)
+ niu_stop_hw(np);
+ }
+
+ spin_unlock_irq(&np->lock);
+
+ if (!err) {
+ netif_tx_start_all_queues(dev);
+ if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
+ netif_carrier_on(dev);
+
+ add_timer(&np->timer);
+ }
+
+ return err;
+}
+
+static void niu_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct niu *np = netdev_priv(dev);
+ struct niu_vpd *vpd = &np->vpd;
+
+ strcpy(info->driver, DRV_MODULE_NAME);
+ strcpy(info->version, DRV_MODULE_VERSION);
+ sprintf(info->fw_version, "%d.%d",
+ vpd->fcode_major, vpd->fcode_minor);
+ if (np->parent->plat_type != PLAT_TYPE_NIU)
+ strcpy(info->bus_info, pci_name(np->pdev));
+}
+
+static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct niu *np = netdev_priv(dev);
+ struct niu_link_config *lp;
+
+ lp = &np->link_config;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->phy_address = np->phy_addr;
+ cmd->supported = lp->supported;
+ cmd->advertising = lp->active_advertising;
+ cmd->autoneg = lp->active_autoneg;
+ ethtool_cmd_speed_set(cmd, lp->active_speed);
+ cmd->duplex = lp->active_duplex;
+ cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
+ cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ?
+ XCVR_EXTERNAL : XCVR_INTERNAL;
+
+ return 0;
+}
+
+static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct niu *np = netdev_priv(dev);
+ struct niu_link_config *lp = &np->link_config;
+
+ lp->advertising = cmd->advertising;
+ lp->speed = ethtool_cmd_speed(cmd);
+ lp->duplex = cmd->duplex;
+ lp->autoneg = cmd->autoneg;
+ return niu_init_link(np);
+}
+
+static u32 niu_get_msglevel(struct net_device *dev)
+{
+ struct niu *np = netdev_priv(dev);
+ return np->msg_enable;
+}
+
+static void niu_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct niu *np = netdev_priv(dev);
+ np->msg_enable = value;
+}
+
+static int niu_nway_reset(struct net_device *dev)
+{
+ struct niu *np = netdev_priv(dev);
+
+ if (np->link_config.autoneg)
+ return niu_init_link(np);
+
+ return 0;
+}
+
+static int niu_get_eeprom_len(struct net_device *dev)
+{
+ struct niu *np = netdev_priv(dev);
+
+ return np->eeprom_len;
+}
+
+static int niu_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct niu *np = netdev_priv(dev);
+ u32 offset, len, val;
+
+ offset = eeprom->offset;
+ len = eeprom->len;
+
+ if (offset + len < offset)
+ return -EINVAL;
+ if (offset >= np->eeprom_len)
+ return -EINVAL;
+ if (offset + len > np->eeprom_len)
+ len = eeprom->len = np->eeprom_len - offset;
+
+ if (offset & 3) {
+ u32 b_offset, b_count;
+
+ b_offset = offset & 3;
+ b_count = 4 - b_offset;
+ if (b_count > len)
+ b_count = len;
+
+ val = nr64(ESPC_NCR((offset - b_offset) / 4));
+ memcpy(data, ((char *)&val) + b_offset, b_count);
+ data += b_count;
+ len -= b_count;
+ offset += b_count;
+ }
+ while (len >= 4) {
+ val = nr64(ESPC_NCR(offset / 4));
+ memcpy(data, &val, 4);
+ data += 4;
+ len -= 4;
+ offset += 4;
+ }
+ if (len) {
+ val = nr64(ESPC_NCR(offset / 4));
+ memcpy(data, &val, len);
+ }
+ return 0;
+}
+
+static void niu_ethflow_to_l3proto(int flow_type, u8 *pid)
+{
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ *pid = IPPROTO_TCP;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ *pid = IPPROTO_UDP;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ *pid = IPPROTO_SCTP;
+ break;
+ case AH_V4_FLOW:
+ case AH_V6_FLOW:
+ *pid = IPPROTO_AH;
+ break;
+ case ESP_V4_FLOW:
+ case ESP_V6_FLOW:
+ *pid = IPPROTO_ESP;
+ break;
+ default:
+ *pid = 0;
+ break;
+ }
+}
+
+static int niu_class_to_ethflow(u64 class, int *flow_type)
+{
+ switch (class) {
+ case CLASS_CODE_TCP_IPV4:
+ *flow_type = TCP_V4_FLOW;
+ break;
+ case CLASS_CODE_UDP_IPV4:
+ *flow_type = UDP_V4_FLOW;
+ break;
+ case CLASS_CODE_AH_ESP_IPV4:
+ *flow_type = AH_V4_FLOW;
+ break;
+ case CLASS_CODE_SCTP_IPV4:
+ *flow_type = SCTP_V4_FLOW;
+ break;
+ case CLASS_CODE_TCP_IPV6:
+ *flow_type = TCP_V6_FLOW;
+ break;
+ case CLASS_CODE_UDP_IPV6:
+ *flow_type = UDP_V6_FLOW;
+ break;
+ case CLASS_CODE_AH_ESP_IPV6:
+ *flow_type = AH_V6_FLOW;
+ break;
+ case CLASS_CODE_SCTP_IPV6:
+ *flow_type = SCTP_V6_FLOW;
+ break;
+ case CLASS_CODE_USER_PROG1:
+ case CLASS_CODE_USER_PROG2:
+ case CLASS_CODE_USER_PROG3:
+ case CLASS_CODE_USER_PROG4:
+ *flow_type = IP_USER_FLOW;
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+static int niu_ethflow_to_class(int flow_type, u64 *class)
+{
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ *class = CLASS_CODE_TCP_IPV4;
+ break;
+ case UDP_V4_FLOW:
+ *class = CLASS_CODE_UDP_IPV4;
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ *class = CLASS_CODE_AH_ESP_IPV4;
+ break;
+ case SCTP_V4_FLOW:
+ *class = CLASS_CODE_SCTP_IPV4;
+ break;
+ case TCP_V6_FLOW:
+ *class = CLASS_CODE_TCP_IPV6;
+ break;
+ case UDP_V6_FLOW:
+ *class = CLASS_CODE_UDP_IPV6;
+ break;
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ *class = CLASS_CODE_AH_ESP_IPV6;
+ break;
+ case SCTP_V6_FLOW:
+ *class = CLASS_CODE_SCTP_IPV6;
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+static u64 niu_flowkey_to_ethflow(u64 flow_key)
+{
+ u64 ethflow = 0;
+
+ if (flow_key & FLOW_KEY_L2DA)
+ ethflow |= RXH_L2DA;
+ if (flow_key & FLOW_KEY_VLAN)
+ ethflow |= RXH_VLAN;
+ if (flow_key & FLOW_KEY_IPSA)
+ ethflow |= RXH_IP_SRC;
+ if (flow_key & FLOW_KEY_IPDA)
+ ethflow |= RXH_IP_DST;
+ if (flow_key & FLOW_KEY_PROTO)
+ ethflow |= RXH_L3_PROTO;
+ if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT))
+ ethflow |= RXH_L4_B_0_1;
+ if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT))
+ ethflow |= RXH_L4_B_2_3;
+
+ return ethflow;
+
+}
+
+static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
+{
+ u64 key = 0;
+
+ if (ethflow & RXH_L2DA)
+ key |= FLOW_KEY_L2DA;
+ if (ethflow & RXH_VLAN)
+ key |= FLOW_KEY_VLAN;
+ if (ethflow & RXH_IP_SRC)
+ key |= FLOW_KEY_IPSA;
+ if (ethflow & RXH_IP_DST)
+ key |= FLOW_KEY_IPDA;
+ if (ethflow & RXH_L3_PROTO)
+ key |= FLOW_KEY_PROTO;
+ if (ethflow & RXH_L4_B_0_1)
+ key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT);
+ if (ethflow & RXH_L4_B_2_3)
+ key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT);
+
+ *flow_key = key;
+
+ return 1;
+
+}
+
+static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
+{
+ u64 class;
+
+ nfc->data = 0;
+
+ if (!niu_ethflow_to_class(nfc->flow_type, &class))
+ return -EINVAL;
+
+ if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
+ TCAM_KEY_DISC)
+ nfc->data = RXH_DISCARD;
+ else
+ nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class -
+ CLASS_CODE_USER_PROG1]);
+ return 0;
+}
+
+static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ u32 tmp;
+ u16 prt;
+
+ tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
+ fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
+
+ tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
+ fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
+
+ tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
+ fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
+
+ tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
+ fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
+
+ fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >>
+ TCAM_V4KEY2_TOS_SHIFT;
+ fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >>
+ TCAM_V4KEY2_TOS_SHIFT;
+
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
+ fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
+
+ prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
+ fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
+
+ prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
+ fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
+
+ prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
+ fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT;
+ fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
+
+ tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT;
+ fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
+ break;
+ case IP_USER_FLOW:
+ tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT;
+ fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
+
+ tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+ TCAM_V4KEY2_PORT_SPI_SHIFT;
+ fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
+
+ fsp->h_u.usr_ip4_spec.proto =
+ (tp->key[2] & TCAM_V4KEY2_PROTO) >>
+ TCAM_V4KEY2_PROTO_SHIFT;
+ fsp->m_u.usr_ip4_spec.proto =
+ (tp->key_mask[2] & TCAM_V4KEY2_PROTO) >>
+ TCAM_V4KEY2_PROTO_SHIFT;
+
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ break;
+ default:
+ break;
+ }
+}
+
+static int niu_get_ethtool_tcam_entry(struct niu *np,
+ struct ethtool_rxnfc *nfc)
+{
+ struct niu_parent *parent = np->parent;
+ struct niu_tcam_entry *tp;
+ struct ethtool_rx_flow_spec *fsp = &nfc->fs;
+ u16 idx;
+ u64 class;
+ int ret = 0;
+
+ idx = tcam_get_index(np, (u16)nfc->fs.location);
+
+ tp = &parent->tcam[idx];
+ if (!tp->valid) {
+ netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n",
+ parent->index, (u16)nfc->fs.location, idx);
+ return -EINVAL;
+ }
+
+ /* fill the flow spec entry */
+ class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
+ TCAM_V4KEY0_CLASS_CODE_SHIFT;
+ ret = niu_class_to_ethflow(class, &fsp->flow_type);
+
+ if (ret < 0) {
+ netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
+ parent->index);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) {
+ u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >>
+ TCAM_V4KEY2_PROTO_SHIFT;
+ if (proto == IPPROTO_ESP) {
+ if (fsp->flow_type == AH_V4_FLOW)
+ fsp->flow_type = ESP_V4_FLOW;
+ else
+ fsp->flow_type = ESP_V6_FLOW;
+ }
+ }
+
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ niu_get_ip4fs_from_tcam_key(tp, fsp);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ /* Not yet implemented */
+ ret = -EINVAL;
+ break;
+ case IP_USER_FLOW:
+ niu_get_ip4fs_from_tcam_key(tp, fsp);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret < 0)
+ goto out;
+
+ if (tp->assoc_data & TCAM_ASSOCDATA_DISC)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >>
+ TCAM_ASSOCDATA_OFFSET_SHIFT;
+
+ /* put the tcam size here */
+ nfc->data = tcam_get_size(np);
+out:
+ return ret;
+}
+
+static int niu_get_ethtool_tcam_all(struct niu *np,
+ struct ethtool_rxnfc *nfc,
+ u32 *rule_locs)
+{
+ struct niu_parent *parent = np->parent;
+ struct niu_tcam_entry *tp;
+ int i, idx, cnt;
+ unsigned long flags;
+ int ret = 0;
+
+ /* put the tcam size here */
+ nfc->data = tcam_get_size(np);
+
+ niu_lock_parent(np, flags);
+ for (cnt = 0, i = 0; i < nfc->data; i++) {
+ idx = tcam_get_index(np, i);
+ tp = &parent->tcam[idx];
+ if (!tp->valid)
+ continue;
+ if (cnt == nfc->rule_cnt) {
+ ret = -EMSGSIZE;
+ break;
+ }
+ rule_locs[cnt] = i;
+ cnt++;
+ }
+ niu_unlock_parent(np, flags);
+
+ nfc->rule_cnt = cnt;
+
+ return ret;
+}
+
+static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct niu *np = netdev_priv(dev);
+ int ret = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXFH:
+ ret = niu_get_hash_opts(np, cmd);
+ break;
+ case ETHTOOL_GRXRINGS:
+ cmd->data = np->num_rx_rings;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = tcam_get_valid_entry_cnt(np);
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = niu_get_ethtool_tcam_entry(np, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = niu_get_ethtool_tcam_all(np, cmd, rule_locs);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
+{
+ u64 class;
+ u64 flow_key = 0;
+ unsigned long flags;
+
+ if (!niu_ethflow_to_class(nfc->flow_type, &class))
+ return -EINVAL;
+
+ if (class < CLASS_CODE_USER_PROG1 ||
+ class > CLASS_CODE_SCTP_IPV6)
+ return -EINVAL;
+
+ if (nfc->data & RXH_DISCARD) {
+ niu_lock_parent(np, flags);
+ flow_key = np->parent->tcam_key[class -
+ CLASS_CODE_USER_PROG1];
+ flow_key |= TCAM_KEY_DISC;
+ nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
+ np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key;
+ niu_unlock_parent(np, flags);
+ return 0;
+ } else {
+ /* Discard was set before, but is not set now */
+ if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
+ TCAM_KEY_DISC) {
+ niu_lock_parent(np, flags);
+ flow_key = np->parent->tcam_key[class -
+ CLASS_CODE_USER_PROG1];
+ flow_key &= ~TCAM_KEY_DISC;
+ nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1),
+ flow_key);
+ np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] =
+ flow_key;
+ niu_unlock_parent(np, flags);
+ }
+ }
+
+ if (!niu_ethflow_to_flowkey(nfc->data, &flow_key))
+ return -EINVAL;
+
+ niu_lock_parent(np, flags);
+ nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
+ np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key;
+ niu_unlock_parent(np, flags);
+
+ return 0;
+}
+
+static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp,
+ struct niu_tcam_entry *tp,
+ int l2_rdc_tab, u64 class)
+{
+ u8 pid = 0;
+ u32 sip, dip, sipm, dipm, spi, spim;
+ u16 sport, dport, spm, dpm;
+
+ sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src);
+ sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src);
+ dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst);
+ dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst);
+
+ tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT;
+ tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE;
+ tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT;
+ tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM;
+
+ tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT;
+ tp->key[3] |= dip;
+
+ tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT;
+ tp->key_mask[3] |= dipm;
+
+ tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos <<
+ TCAM_V4KEY2_TOS_SHIFT);
+ tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos <<
+ TCAM_V4KEY2_TOS_SHIFT);
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
+ spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
+ dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
+ dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
+
+ tp->key[2] |= (((u64)sport << 16) | dport);
+ tp->key_mask[2] |= (((u64)spm << 16) | dpm);
+ niu_ethflow_to_l3proto(fsp->flow_type, &pid);
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi);
+ spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi);
+
+ tp->key[2] |= spi;
+ tp->key_mask[2] |= spim;
+ niu_ethflow_to_l3proto(fsp->flow_type, &pid);
+ break;
+ case IP_USER_FLOW:
+ spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes);
+ spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes);
+
+ tp->key[2] |= spi;
+ tp->key_mask[2] |= spim;
+ pid = fsp->h_u.usr_ip4_spec.proto;
+ break;
+ default:
+ break;
+ }
+
+ tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT);
+ if (pid) {
+ tp->key_mask[2] |= TCAM_V4KEY2_PROTO;
+ }
+}
+
+static int niu_add_ethtool_tcam_entry(struct niu *np,
+ struct ethtool_rxnfc *nfc)
+{
+ struct niu_parent *parent = np->parent;
+ struct niu_tcam_entry *tp;
+ struct ethtool_rx_flow_spec *fsp = &nfc->fs;
+ struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port];
+ int l2_rdc_table = rdc_table->first_table_num;
+ u16 idx;
+ u64 class;
+ unsigned long flags;
+ int err, ret;
+
+ ret = 0;
+
+ idx = nfc->fs.location;
+ if (idx >= tcam_get_size(np))
+ return -EINVAL;
+
+ if (fsp->flow_type == IP_USER_FLOW) {
+ int i;
+ int add_usr_cls = 0;
+ struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec;
+
+ if (uspec->ip_ver != ETH_RX_NFC_IP4)
+ return -EINVAL;
+
+ niu_lock_parent(np, flags);
+
+ for (i = 0; i < NIU_L3_PROG_CLS; i++) {
+ if (parent->l3_cls[i]) {
+ if (uspec->proto == parent->l3_cls_pid[i]) {
+ class = parent->l3_cls[i];
+ parent->l3_cls_refcnt[i]++;
+ add_usr_cls = 1;
+ break;
+ }
+ } else {
+ /* Program new user IP class */
+ switch (i) {
+ case 0:
+ class = CLASS_CODE_USER_PROG1;
+ break;
+ case 1:
+ class = CLASS_CODE_USER_PROG2;
+ break;
+ case 2:
+ class = CLASS_CODE_USER_PROG3;
+ break;
+ case 3:
+ class = CLASS_CODE_USER_PROG4;
+ break;
+ default:
+ break;
+ }
+ ret = tcam_user_ip_class_set(np, class, 0,
+ uspec->proto,
+ uspec->tos,
+ umask->tos);
+ if (ret)
+ goto out;
+
+ ret = tcam_user_ip_class_enable(np, class, 1);
+ if (ret)
+ goto out;
+ parent->l3_cls[i] = class;
+ parent->l3_cls_pid[i] = uspec->proto;
+ parent->l3_cls_refcnt[i]++;
+ add_usr_cls = 1;
+ break;
+ }
+ }
+ if (!add_usr_cls) {
+ netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n",
+ parent->index, __func__, uspec->proto);
+ ret = -EINVAL;
+ goto out;
+ }
+ niu_unlock_parent(np, flags);
+ } else {
+ if (!niu_ethflow_to_class(fsp->flow_type, &class)) {
+ return -EINVAL;
+ }
+ }
+
+ niu_lock_parent(np, flags);
+
+ idx = tcam_get_index(np, idx);
+ tp = &parent->tcam[idx];
+
+ memset(tp, 0, sizeof(*tp));
+
+ /* fill in the tcam key and mask */
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ /* Not yet implemented */
+ netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
+ parent->index, __func__, fsp->flow_type);
+ ret = -EINVAL;
+ goto out;
+ case IP_USER_FLOW:
+ niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
+ break;
+ default:
+ netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
+ parent->index, __func__, fsp->flow_type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* fill in the assoc data */
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ tp->assoc_data = TCAM_ASSOCDATA_DISC;
+ } else {
+ if (fsp->ring_cookie >= np->num_rx_rings) {
+ netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n",
+ parent->index, __func__,
+ (long long)fsp->ring_cookie);
+ ret = -EINVAL;
+ goto out;
+ }
+ tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
+ (fsp->ring_cookie <<
+ TCAM_ASSOCDATA_OFFSET_SHIFT));
+ }
+
+ err = tcam_write(np, idx, tp->key, tp->key_mask);
+ if (err) {
+ ret = -EINVAL;
+ goto out;
+ }
+ err = tcam_assoc_write(np, idx, tp->assoc_data);
+ if (err) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* validate the entry */
+ tp->valid = 1;
+ np->clas.tcam_valid_entries++;
+out:
+ niu_unlock_parent(np, flags);
+
+ return ret;
+}
+
+static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
+{
+ struct niu_parent *parent = np->parent;
+ struct niu_tcam_entry *tp;
+ u16 idx;
+ unsigned long flags;
+ u64 class;
+ int ret = 0;
+
+ if (loc >= tcam_get_size(np))
+ return -EINVAL;
+
+ niu_lock_parent(np, flags);
+
+ idx = tcam_get_index(np, loc);
+ tp = &parent->tcam[idx];
+
+ /* if the entry is of a user defined class, then update*/
+ class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
+ TCAM_V4KEY0_CLASS_CODE_SHIFT;
+
+ if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) {
+ int i;
+ for (i = 0; i < NIU_L3_PROG_CLS; i++) {
+ if (parent->l3_cls[i] == class) {
+ parent->l3_cls_refcnt[i]--;
+ if (!parent->l3_cls_refcnt[i]) {
+ /* disable class */
+ ret = tcam_user_ip_class_enable(np,
+ class,
+ 0);
+ if (ret)
+ goto out;
+ parent->l3_cls[i] = 0;
+ parent->l3_cls_pid[i] = 0;
+ }
+ break;
+ }
+ }
+ if (i == NIU_L3_PROG_CLS) {
+ netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n",
+ parent->index, __func__,
+ (unsigned long long)class);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ ret = tcam_flush(np, idx);
+ if (ret)
+ goto out;
+
+ /* invalidate the entry */
+ tp->valid = 0;
+ np->clas.tcam_valid_entries--;
+out:
+ niu_unlock_parent(np, flags);
+
+ return ret;
+}
+
+static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct niu *np = netdev_priv(dev);
+ int ret = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = niu_set_hash_opts(np, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ ret = niu_add_ethtool_tcam_entry(np, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct {
+ const char string[ETH_GSTRING_LEN];
+} niu_xmac_stat_keys[] = {
+ { "tx_frames" },
+ { "tx_bytes" },
+ { "tx_fifo_errors" },
+ { "tx_overflow_errors" },
+ { "tx_max_pkt_size_errors" },
+ { "tx_underflow_errors" },
+ { "rx_local_faults" },
+ { "rx_remote_faults" },
+ { "rx_link_faults" },
+ { "rx_align_errors" },
+ { "rx_frags" },
+ { "rx_mcasts" },
+ { "rx_bcasts" },
+ { "rx_hist_cnt1" },
+ { "rx_hist_cnt2" },
+ { "rx_hist_cnt3" },
+ { "rx_hist_cnt4" },
+ { "rx_hist_cnt5" },
+ { "rx_hist_cnt6" },
+ { "rx_hist_cnt7" },
+ { "rx_octets" },
+ { "rx_code_violations" },
+ { "rx_len_errors" },
+ { "rx_crc_errors" },
+ { "rx_underflows" },
+ { "rx_overflows" },
+ { "pause_off_state" },
+ { "pause_on_state" },
+ { "pause_received" },
+};
+
+#define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys)
+
+static const struct {
+ const char string[ETH_GSTRING_LEN];
+} niu_bmac_stat_keys[] = {
+ { "tx_underflow_errors" },
+ { "tx_max_pkt_size_errors" },
+ { "tx_bytes" },
+ { "tx_frames" },
+ { "rx_overflows" },
+ { "rx_frames" },
+ { "rx_align_errors" },
+ { "rx_crc_errors" },
+ { "rx_len_errors" },
+ { "pause_off_state" },
+ { "pause_on_state" },
+ { "pause_received" },
+};
+
+#define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys)
+
+static const struct {
+ const char string[ETH_GSTRING_LEN];
+} niu_rxchan_stat_keys[] = {
+ { "rx_channel" },
+ { "rx_packets" },
+ { "rx_bytes" },
+ { "rx_dropped" },
+ { "rx_errors" },
+};
+
+#define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys)
+
+static const struct {
+ const char string[ETH_GSTRING_LEN];
+} niu_txchan_stat_keys[] = {
+ { "tx_channel" },
+ { "tx_packets" },
+ { "tx_bytes" },
+ { "tx_errors" },
+};
+
+#define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys)
+
+static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ struct niu *np = netdev_priv(dev);
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ if (np->flags & NIU_FLAGS_XMAC) {
+ memcpy(data, niu_xmac_stat_keys,
+ sizeof(niu_xmac_stat_keys));
+ data += sizeof(niu_xmac_stat_keys);
+ } else {
+ memcpy(data, niu_bmac_stat_keys,
+ sizeof(niu_bmac_stat_keys));
+ data += sizeof(niu_bmac_stat_keys);
+ }
+ for (i = 0; i < np->num_rx_rings; i++) {
+ memcpy(data, niu_rxchan_stat_keys,
+ sizeof(niu_rxchan_stat_keys));
+ data += sizeof(niu_rxchan_stat_keys);
+ }
+ for (i = 0; i < np->num_tx_rings; i++) {
+ memcpy(data, niu_txchan_stat_keys,
+ sizeof(niu_txchan_stat_keys));
+ data += sizeof(niu_txchan_stat_keys);
+ }
+}
+
+static int niu_get_sset_count(struct net_device *dev, int stringset)
+{
+ struct niu *np = netdev_priv(dev);
+
+ if (stringset != ETH_SS_STATS)
+ return -EINVAL;
+
+ return (np->flags & NIU_FLAGS_XMAC ?
+ NUM_XMAC_STAT_KEYS :
+ NUM_BMAC_STAT_KEYS) +
+ (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) +
+ (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS);
+}
+
+static void niu_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct niu *np = netdev_priv(dev);
+ int i;
+
+ niu_sync_mac_stats(np);
+ if (np->flags & NIU_FLAGS_XMAC) {
+ memcpy(data, &np->mac_stats.xmac,
+ sizeof(struct niu_xmac_stats));
+ data += (sizeof(struct niu_xmac_stats) / sizeof(u64));
+ } else {
+ memcpy(data, &np->mac_stats.bmac,
+ sizeof(struct niu_bmac_stats));
+ data += (sizeof(struct niu_bmac_stats) / sizeof(u64));
+ }
+ for (i = 0; i < np->num_rx_rings; i++) {
+ struct rx_ring_info *rp = &np->rx_rings[i];
+
+ niu_sync_rx_discard_stats(np, rp, 0);
+
+ data[0] = rp->rx_channel;
+ data[1] = rp->rx_packets;
+ data[2] = rp->rx_bytes;
+ data[3] = rp->rx_dropped;
+ data[4] = rp->rx_errors;
+ data += 5;
+ }
+ for (i = 0; i < np->num_tx_rings; i++) {
+ struct tx_ring_info *rp = &np->tx_rings[i];
+
+ data[0] = rp->tx_channel;
+ data[1] = rp->tx_packets;
+ data[2] = rp->tx_bytes;
+ data[3] = rp->tx_errors;
+ data += 4;
+ }
+}
+
+static u64 niu_led_state_save(struct niu *np)
+{
+ if (np->flags & NIU_FLAGS_XMAC)
+ return nr64_mac(XMAC_CONFIG);
+ else
+ return nr64_mac(BMAC_XIF_CONFIG);
+}
+
+static void niu_led_state_restore(struct niu *np, u64 val)
+{
+ if (np->flags & NIU_FLAGS_XMAC)
+ nw64_mac(XMAC_CONFIG, val);
+ else
+ nw64_mac(BMAC_XIF_CONFIG, val);
+}
+
+static void niu_force_led(struct niu *np, int on)
+{
+ u64 val, reg, bit;
+
+ if (np->flags & NIU_FLAGS_XMAC) {
+ reg = XMAC_CONFIG;
+ bit = XMAC_CONFIG_FORCE_LED_ON;
+ } else {
+ reg = BMAC_XIF_CONFIG;
+ bit = BMAC_XIF_CONFIG_LINK_LED;
+ }
+
+ val = nr64_mac(reg);
+ if (on)
+ val |= bit;
+ else
+ val &= ~bit;
+ nw64_mac(reg, val);
+}
+
+static int niu_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+
+{
+ struct niu *np = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ np->orig_led_state = niu_led_state_save(np);
+ return 1; /* cycle on/off once per second */
+
+ case ETHTOOL_ID_ON:
+ niu_force_led(np, 1);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ niu_force_led(np, 0);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ niu_led_state_restore(np, np->orig_led_state);
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops niu_ethtool_ops = {
+ .get_drvinfo = niu_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = niu_get_msglevel,
+ .set_msglevel = niu_set_msglevel,
+ .nway_reset = niu_nway_reset,
+ .get_eeprom_len = niu_get_eeprom_len,
+ .get_eeprom = niu_get_eeprom,
+ .get_settings = niu_get_settings,
+ .set_settings = niu_set_settings,
+ .get_strings = niu_get_strings,
+ .get_sset_count = niu_get_sset_count,
+ .get_ethtool_stats = niu_get_ethtool_stats,
+ .set_phys_id = niu_set_phys_id,
+ .get_rxnfc = niu_get_nfc,
+ .set_rxnfc = niu_set_nfc,
+};
+
+static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
+ int ldg, int ldn)
+{
+ if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX)
+ return -EINVAL;
+ if (ldn < 0 || ldn > LDN_MAX)
+ return -EINVAL;
+
+ parent->ldg_map[ldn] = ldg;
+
+ if (np->parent->plat_type == PLAT_TYPE_NIU) {
+ /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
+ * the firmware, and we're not supposed to change them.
+ * Validate the mapping, because if it's wrong we probably
+ * won't get any interrupts and that's painful to debug.
+ */
+ if (nr64(LDG_NUM(ldn)) != ldg) {
+ dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
+ np->port, ldn, ldg,
+ (unsigned long long) nr64(LDG_NUM(ldn)));
+ return -EINVAL;
+ }
+ } else
+ nw64(LDG_NUM(ldn), ldg);
+
+ return 0;
+}
+
+static int niu_set_ldg_timer_res(struct niu *np, int res)
+{
+ if (res < 0 || res > LDG_TIMER_RES_VAL)
+ return -EINVAL;
+
+
+ nw64(LDG_TIMER_RES, res);
+
+ return 0;
+}
+
+static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector)
+{
+ if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) ||
+ (func < 0 || func > 3) ||
+ (vector < 0 || vector > 0x1f))
+ return -EINVAL;
+
+ nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector);
+
+ return 0;
+}
+
+static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
+{
+ u64 frame, frame_base = (ESPC_PIO_STAT_READ_START |
+ (addr << ESPC_PIO_STAT_ADDR_SHIFT));
+ int limit;
+
+ if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT))
+ return -EINVAL;
+
+ frame = frame_base;
+ nw64(ESPC_PIO_STAT, frame);
+ limit = 64;
+ do {
+ udelay(5);
+ frame = nr64(ESPC_PIO_STAT);
+ if (frame & ESPC_PIO_STAT_READ_END)
+ break;
+ } while (limit--);
+ if (!(frame & ESPC_PIO_STAT_READ_END)) {
+ dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
+ (unsigned long long) frame);
+ return -ENODEV;
+ }
+
+ frame = frame_base;
+ nw64(ESPC_PIO_STAT, frame);
+ limit = 64;
+ do {
+ udelay(5);
+ frame = nr64(ESPC_PIO_STAT);
+ if (frame & ESPC_PIO_STAT_READ_END)
+ break;
+ } while (limit--);
+ if (!(frame & ESPC_PIO_STAT_READ_END)) {
+ dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
+ (unsigned long long) frame);
+ return -ENODEV;
+ }
+
+ frame = nr64(ESPC_PIO_STAT);
+ return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT;
+}
+
+static int __devinit niu_pci_eeprom_read16(struct niu *np, u32 off)
+{
+ int err = niu_pci_eeprom_read(np, off);
+ u16 val;
+
+ if (err < 0)
+ return err;
+ val = (err << 8);
+ err = niu_pci_eeprom_read(np, off + 1);
+ if (err < 0)
+ return err;
+ val |= (err & 0xff);
+
+ return val;
+}
+
+static int __devinit niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
+{
+ int err = niu_pci_eeprom_read(np, off);
+ u16 val;
+
+ if (err < 0)
+ return err;
+
+ val = (err & 0xff);
+ err = niu_pci_eeprom_read(np, off + 1);
+ if (err < 0)
+ return err;
+
+ val |= (err & 0xff) << 8;
+
+ return val;
+}
+
+static int __devinit niu_pci_vpd_get_propname(struct niu *np,
+ u32 off,
+ char *namebuf,
+ int namebuf_len)
+{
+ int i;
+
+ for (i = 0; i < namebuf_len; i++) {
+ int err = niu_pci_eeprom_read(np, off + i);
+ if (err < 0)
+ return err;
+ *namebuf++ = err;
+ if (!err)
+ break;
+ }
+ if (i >= namebuf_len)
+ return -EINVAL;
+
+ return i + 1;
+}
+
+static void __devinit niu_vpd_parse_version(struct niu *np)
+{
+ struct niu_vpd *vpd = &np->vpd;
+ int len = strlen(vpd->version) + 1;
+ const char *s = vpd->version;
+ int i;
+
+ for (i = 0; i < len - 5; i++) {
+ if (!strncmp(s + i, "FCode ", 6))
+ break;
+ }
+ if (i >= len - 5)
+ return;
+
+ s += i + 5;
+ sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
+
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "VPD_SCAN: FCODE major(%d) minor(%d)\n",
+ vpd->fcode_major, vpd->fcode_minor);
+ if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
+ (vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
+ vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
+ np->flags |= NIU_FLAGS_VPD_VALID;
+}
+
+/* ESPC_PIO_EN_ENABLE must be set */
+static int __devinit niu_pci_vpd_scan_props(struct niu *np,
+ u32 start, u32 end)
+{
+ unsigned int found_mask = 0;
+#define FOUND_MASK_MODEL 0x00000001
+#define FOUND_MASK_BMODEL 0x00000002
+#define FOUND_MASK_VERS 0x00000004
+#define FOUND_MASK_MAC 0x00000008
+#define FOUND_MASK_NMAC 0x00000010
+#define FOUND_MASK_PHY 0x00000020
+#define FOUND_MASK_ALL 0x0000003f
+
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "VPD_SCAN: start[%x] end[%x]\n", start, end);
+ while (start < end) {
+ int len, err, prop_len;
+ char namebuf[64];
+ u8 *prop_buf;
+ int max_len;
+
+ if (found_mask == FOUND_MASK_ALL) {
+ niu_vpd_parse_version(np);
+ return 1;
+ }
+
+ err = niu_pci_eeprom_read(np, start + 2);
+ if (err < 0)
+ return err;
+ len = err;
+ start += 3;
+
+ prop_len = niu_pci_eeprom_read(np, start + 4);
+ err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
+ if (err < 0)
+ return err;
+
+ prop_buf = NULL;
+ max_len = 0;
+ if (!strcmp(namebuf, "model")) {
+ prop_buf = np->vpd.model;
+ max_len = NIU_VPD_MODEL_MAX;
+ found_mask |= FOUND_MASK_MODEL;
+ } else if (!strcmp(namebuf, "board-model")) {
+ prop_buf = np->vpd.board_model;
+ max_len = NIU_VPD_BD_MODEL_MAX;
+ found_mask |= FOUND_MASK_BMODEL;
+ } else if (!strcmp(namebuf, "version")) {
+ prop_buf = np->vpd.version;
+ max_len = NIU_VPD_VERSION_MAX;
+ found_mask |= FOUND_MASK_VERS;
+ } else if (!strcmp(namebuf, "local-mac-address")) {
+ prop_buf = np->vpd.local_mac;
+ max_len = ETH_ALEN;
+ found_mask |= FOUND_MASK_MAC;
+ } else if (!strcmp(namebuf, "num-mac-addresses")) {
+ prop_buf = &np->vpd.mac_num;
+ max_len = 1;
+ found_mask |= FOUND_MASK_NMAC;
+ } else if (!strcmp(namebuf, "phy-type")) {
+ prop_buf = np->vpd.phy_type;
+ max_len = NIU_VPD_PHY_TYPE_MAX;
+ found_mask |= FOUND_MASK_PHY;
+ }
+
+ if (max_len && prop_len > max_len) {
+ dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len);
+ return -EINVAL;
+ }
+
+ if (prop_buf) {
+ u32 off = start + 5 + err;
+ int i;
+
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "VPD_SCAN: Reading in property [%s] len[%d]\n",
+ namebuf, prop_len);
+ for (i = 0; i < prop_len; i++)
+ *prop_buf++ = niu_pci_eeprom_read(np, off + i);
+ }
+
+ start += len;
+ }
+
+ return 0;
+}
+
+/* ESPC_PIO_EN_ENABLE must be set */
+static void __devinit niu_pci_vpd_fetch(struct niu *np, u32 start)
+{
+ u32 offset;
+ int err;
+
+ err = niu_pci_eeprom_read16_swp(np, start + 1);
+ if (err < 0)
+ return;
+
+ offset = err + 3;
+
+ while (start + offset < ESPC_EEPROM_SIZE) {
+ u32 here = start + offset;
+ u32 end;
+
+ err = niu_pci_eeprom_read(np, here);
+ if (err != 0x90)
+ return;
+
+ err = niu_pci_eeprom_read16_swp(np, here + 1);
+ if (err < 0)
+ return;
+
+ here = start + offset + 3;
+ end = start + offset + err;
+
+ offset += err;
+
+ err = niu_pci_vpd_scan_props(np, here, end);
+ if (err < 0 || err == 1)
+ return;
+ }
+}
+
+/* ESPC_PIO_EN_ENABLE must be set */
+static u32 __devinit niu_pci_vpd_offset(struct niu *np)
+{
+ u32 start = 0, end = ESPC_EEPROM_SIZE, ret;
+ int err;
+
+ while (start < end) {
+ ret = start;
+
+ /* ROM header signature? */
+ err = niu_pci_eeprom_read16(np, start + 0);
+ if (err != 0x55aa)
+ return 0;
+
+ /* Apply offset to PCI data structure. */
+ err = niu_pci_eeprom_read16(np, start + 23);
+ if (err < 0)
+ return 0;
+ start += err;
+
+ /* Check for "PCIR" signature. */
+ err = niu_pci_eeprom_read16(np, start + 0);
+ if (err != 0x5043)
+ return 0;
+ err = niu_pci_eeprom_read16(np, start + 2);
+ if (err != 0x4952)
+ return 0;
+
+ /* Check for OBP image type. */
+ err = niu_pci_eeprom_read(np, start + 20);
+ if (err < 0)
+ return 0;
+ if (err != 0x01) {
+ err = niu_pci_eeprom_read(np, ret + 2);
+ if (err < 0)
+ return 0;
+
+ start = ret + (err * 512);
+ continue;
+ }
+
+ err = niu_pci_eeprom_read16_swp(np, start + 8);
+ if (err < 0)
+ return err;
+ ret += err;
+
+ err = niu_pci_eeprom_read(np, ret + 0);
+ if (err != 0x82)
+ return 0;
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __devinit niu_phy_type_prop_decode(struct niu *np,
+ const char *phy_prop)
+{
+ if (!strcmp(phy_prop, "mif")) {
+ /* 1G copper, MII */
+ np->flags &= ~(NIU_FLAGS_FIBER |
+ NIU_FLAGS_10G);
+ np->mac_xcvr = MAC_XCVR_MII;
+ } else if (!strcmp(phy_prop, "xgf")) {
+ /* 10G fiber, XPCS */
+ np->flags |= (NIU_FLAGS_10G |
+ NIU_FLAGS_FIBER);
+ np->mac_xcvr = MAC_XCVR_XPCS;
+ } else if (!strcmp(phy_prop, "pcs")) {
+ /* 1G fiber, PCS */
+ np->flags &= ~NIU_FLAGS_10G;
+ np->flags |= NIU_FLAGS_FIBER;
+ np->mac_xcvr = MAC_XCVR_PCS;
+ } else if (!strcmp(phy_prop, "xgc")) {
+ /* 10G copper, XPCS */
+ np->flags |= NIU_FLAGS_10G;
+ np->flags &= ~NIU_FLAGS_FIBER;
+ np->mac_xcvr = MAC_XCVR_XPCS;
+ } else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) {
+ /* 10G Serdes or 1G Serdes, default to 10G */
+ np->flags |= NIU_FLAGS_10G;
+ np->flags &= ~NIU_FLAGS_FIBER;
+ np->flags |= NIU_FLAGS_XCVR_SERDES;
+ np->mac_xcvr = MAC_XCVR_XPCS;
+ } else {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int niu_pci_vpd_get_nports(struct niu *np)
+{
+ int ports = 0;
+
+ if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
+ (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
+ (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
+ (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
+ (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
+ ports = 4;
+ } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
+ (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
+ (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
+ (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
+ ports = 2;
+ }
+
+ return ports;
+}
+
+static void __devinit niu_pci_vpd_validate(struct niu *np)
+{
+ struct net_device *dev = np->dev;
+ struct niu_vpd *vpd = &np->vpd;
+ u8 val8;
+
+ if (!is_valid_ether_addr(&vpd->local_mac[0])) {
+ dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n");
+
+ np->flags &= ~NIU_FLAGS_VPD_VALID;
+ return;
+ }
+
+ if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
+ !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
+ np->flags |= NIU_FLAGS_10G;
+ np->flags &= ~NIU_FLAGS_FIBER;
+ np->flags |= NIU_FLAGS_XCVR_SERDES;
+ np->mac_xcvr = MAC_XCVR_PCS;
+ if (np->port > 1) {
+ np->flags |= NIU_FLAGS_FIBER;
+ np->flags &= ~NIU_FLAGS_10G;
+ }
+ if (np->flags & NIU_FLAGS_10G)
+ np->mac_xcvr = MAC_XCVR_XPCS;
+ } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
+ np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
+ NIU_FLAGS_HOTPLUG_PHY);
+ } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
+ dev_err(np->device, "Illegal phy string [%s]\n",
+ np->vpd.phy_type);
+ dev_err(np->device, "Falling back to SPROM\n");
+ np->flags &= ~NIU_FLAGS_VPD_VALID;
+ return;
+ }
+
+ memcpy(dev->perm_addr, vpd->local_mac, ETH_ALEN);
+
+ val8 = dev->perm_addr[5];
+ dev->perm_addr[5] += np->port;
+ if (dev->perm_addr[5] < val8)
+ dev->perm_addr[4]++;
+
+ memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
+}
+
+static int __devinit niu_pci_probe_sprom(struct niu *np)
+{
+ struct net_device *dev = np->dev;
+ int len, i;
+ u64 val, sum;
+ u8 val8;
+
+ val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ);
+ val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT;
+ len = val / 4;
+
+ np->eeprom_len = len;
+
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: Image size %llu\n", (unsigned long long)val);
+
+ sum = 0;
+ for (i = 0; i < len; i++) {
+ val = nr64(ESPC_NCR(i));
+ sum += (val >> 0) & 0xff;
+ sum += (val >> 8) & 0xff;
+ sum += (val >> 16) & 0xff;
+ sum += (val >> 24) & 0xff;
+ }
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: Checksum %x\n", (int)(sum & 0xff));
+ if ((sum & 0xff) != 0xab) {
+ dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff));
+ return -EINVAL;
+ }
+
+ val = nr64(ESPC_PHY_TYPE);
+ switch (np->port) {
+ case 0:
+ val8 = (val & ESPC_PHY_TYPE_PORT0) >>
+ ESPC_PHY_TYPE_PORT0_SHIFT;
+ break;
+ case 1:
+ val8 = (val & ESPC_PHY_TYPE_PORT1) >>
+ ESPC_PHY_TYPE_PORT1_SHIFT;
+ break;
+ case 2:
+ val8 = (val & ESPC_PHY_TYPE_PORT2) >>
+ ESPC_PHY_TYPE_PORT2_SHIFT;
+ break;
+ case 3:
+ val8 = (val & ESPC_PHY_TYPE_PORT3) >>
+ ESPC_PHY_TYPE_PORT3_SHIFT;
+ break;
+ default:
+ dev_err(np->device, "Bogus port number %u\n",
+ np->port);
+ return -EINVAL;
+ }
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: PHY type %x\n", val8);
+
+ switch (val8) {
+ case ESPC_PHY_TYPE_1G_COPPER:
+ /* 1G copper, MII */
+ np->flags &= ~(NIU_FLAGS_FIBER |
+ NIU_FLAGS_10G);
+ np->mac_xcvr = MAC_XCVR_MII;
+ break;
+
+ case ESPC_PHY_TYPE_1G_FIBER:
+ /* 1G fiber, PCS */
+ np->flags &= ~NIU_FLAGS_10G;
+ np->flags |= NIU_FLAGS_FIBER;
+ np->mac_xcvr = MAC_XCVR_PCS;
+ break;
+
+ case ESPC_PHY_TYPE_10G_COPPER:
+ /* 10G copper, XPCS */
+ np->flags |= NIU_FLAGS_10G;
+ np->flags &= ~NIU_FLAGS_FIBER;
+ np->mac_xcvr = MAC_XCVR_XPCS;
+ break;
+
+ case ESPC_PHY_TYPE_10G_FIBER:
+ /* 10G fiber, XPCS */
+ np->flags |= (NIU_FLAGS_10G |
+ NIU_FLAGS_FIBER);
+ np->mac_xcvr = MAC_XCVR_XPCS;
+ break;
+
+ default:
+ dev_err(np->device, "Bogus SPROM phy type %u\n", val8);
+ return -EINVAL;
+ }
+
+ val = nr64(ESPC_MAC_ADDR0);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
+ dev->perm_addr[0] = (val >> 0) & 0xff;
+ dev->perm_addr[1] = (val >> 8) & 0xff;
+ dev->perm_addr[2] = (val >> 16) & 0xff;
+ dev->perm_addr[3] = (val >> 24) & 0xff;
+
+ val = nr64(ESPC_MAC_ADDR1);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
+ dev->perm_addr[4] = (val >> 0) & 0xff;
+ dev->perm_addr[5] = (val >> 8) & 0xff;
+
+ if (!is_valid_ether_addr(&dev->perm_addr[0])) {
+ dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
+ dev->perm_addr);
+ return -EINVAL;
+ }
+
+ val8 = dev->perm_addr[5];
+ dev->perm_addr[5] += np->port;
+ if (dev->perm_addr[5] < val8)
+ dev->perm_addr[4]++;
+
+ memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
+
+ val = nr64(ESPC_MOD_STR_LEN);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val);
+ if (val >= 8 * 4)
+ return -EINVAL;
+
+ for (i = 0; i < val; i += 4) {
+ u64 tmp = nr64(ESPC_NCR(5 + (i / 4)));
+
+ np->vpd.model[i + 3] = (tmp >> 0) & 0xff;
+ np->vpd.model[i + 2] = (tmp >> 8) & 0xff;
+ np->vpd.model[i + 1] = (tmp >> 16) & 0xff;
+ np->vpd.model[i + 0] = (tmp >> 24) & 0xff;
+ }
+ np->vpd.model[val] = '\0';
+
+ val = nr64(ESPC_BD_MOD_STR_LEN);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val);
+ if (val >= 4 * 4)
+ return -EINVAL;
+
+ for (i = 0; i < val; i += 4) {
+ u64 tmp = nr64(ESPC_NCR(14 + (i / 4)));
+
+ np->vpd.board_model[i + 3] = (tmp >> 0) & 0xff;
+ np->vpd.board_model[i + 2] = (tmp >> 8) & 0xff;
+ np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff;
+ np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff;
+ }
+ np->vpd.board_model[val] = '\0';
+
+ np->vpd.mac_num =
+ nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num);
+
+ return 0;
+}
+
+static int __devinit niu_get_and_validate_port(struct niu *np)
+{
+ struct niu_parent *parent = np->parent;
+
+ if (np->port <= 1)
+ np->flags |= NIU_FLAGS_XMAC;
+
+ if (!parent->num_ports) {
+ if (parent->plat_type == PLAT_TYPE_NIU) {
+ parent->num_ports = 2;
+ } else {
+ parent->num_ports = niu_pci_vpd_get_nports(np);
+ if (!parent->num_ports) {
+ /* Fall back to SPROM as last resort.
+ * This will fail on most cards.
+ */
+ parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
+ ESPC_NUM_PORTS_MACS_VAL;
+
+ /* All of the current probing methods fail on
+ * Maramba on-board parts.
+ */
+ if (!parent->num_ports)
+ parent->num_ports = 4;
+ }
+ }
+ }
+
+ if (np->port >= parent->num_ports)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int __devinit phy_record(struct niu_parent *parent,
+ struct phy_probe_info *p,
+ int dev_id_1, int dev_id_2, u8 phy_port,
+ int type)
+{
+ u32 id = (dev_id_1 << 16) | dev_id_2;
+ u8 idx;
+
+ if (dev_id_1 < 0 || dev_id_2 < 0)
+ return 0;
+ if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
+ if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
+ ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) &&
+ ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706))
+ return 0;
+ } else {
+ if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
+ return 0;
+ }
+
+ pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
+ parent->index, id,
+ type == PHY_TYPE_PMA_PMD ? "PMA/PMD" :
+ type == PHY_TYPE_PCS ? "PCS" : "MII",
+ phy_port);
+
+ if (p->cur[type] >= NIU_MAX_PORTS) {
+ pr_err("Too many PHY ports\n");
+ return -EINVAL;
+ }
+ idx = p->cur[type];
+ p->phy_id[type][idx] = id;
+ p->phy_port[type][idx] = phy_port;
+ p->cur[type] = idx + 1;
+ return 0;
+}
+
+static int __devinit port_has_10g(struct phy_probe_info *p, int port)
+{
+ int i;
+
+ for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) {
+ if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port)
+ return 1;
+ }
+ for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) {
+ if (p->phy_port[PHY_TYPE_PCS][i] == port)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int __devinit count_10g_ports(struct phy_probe_info *p, int *lowest)
+{
+ int port, cnt;
+
+ cnt = 0;
+ *lowest = 32;
+ for (port = 8; port < 32; port++) {
+ if (port_has_10g(p, port)) {
+ if (!cnt)
+ *lowest = port;
+ cnt++;
+ }
+ }
+
+ return cnt;
+}
+
+static int __devinit count_1g_ports(struct phy_probe_info *p, int *lowest)
+{
+ *lowest = 32;
+ if (p->cur[PHY_TYPE_MII])
+ *lowest = p->phy_port[PHY_TYPE_MII][0];
+
+ return p->cur[PHY_TYPE_MII];
+}
+
+static void __devinit niu_n2_divide_channels(struct niu_parent *parent)
+{
+ int num_ports = parent->num_ports;
+ int i;
+
+ for (i = 0; i < num_ports; i++) {
+ parent->rxchan_per_port[i] = (16 / num_ports);
+ parent->txchan_per_port[i] = (16 / num_ports);
+
+ pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
+ parent->index, i,
+ parent->rxchan_per_port[i],
+ parent->txchan_per_port[i]);
+ }
+}
+
+static void __devinit niu_divide_channels(struct niu_parent *parent,
+ int num_10g, int num_1g)
+{
+ int num_ports = parent->num_ports;
+ int rx_chans_per_10g, rx_chans_per_1g;
+ int tx_chans_per_10g, tx_chans_per_1g;
+ int i, tot_rx, tot_tx;
+
+ if (!num_10g || !num_1g) {
+ rx_chans_per_10g = rx_chans_per_1g =
+ (NIU_NUM_RXCHAN / num_ports);
+ tx_chans_per_10g = tx_chans_per_1g =
+ (NIU_NUM_TXCHAN / num_ports);
+ } else {
+ rx_chans_per_1g = NIU_NUM_RXCHAN / 8;
+ rx_chans_per_10g = (NIU_NUM_RXCHAN -
+ (rx_chans_per_1g * num_1g)) /
+ num_10g;
+
+ tx_chans_per_1g = NIU_NUM_TXCHAN / 6;
+ tx_chans_per_10g = (NIU_NUM_TXCHAN -
+ (tx_chans_per_1g * num_1g)) /
+ num_10g;
+ }
+
+ tot_rx = tot_tx = 0;
+ for (i = 0; i < num_ports; i++) {
+ int type = phy_decode(parent->port_phy, i);
+
+ if (type == PORT_TYPE_10G) {
+ parent->rxchan_per_port[i] = rx_chans_per_10g;
+ parent->txchan_per_port[i] = tx_chans_per_10g;
+ } else {
+ parent->rxchan_per_port[i] = rx_chans_per_1g;
+ parent->txchan_per_port[i] = tx_chans_per_1g;
+ }
+ pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
+ parent->index, i,
+ parent->rxchan_per_port[i],
+ parent->txchan_per_port[i]);
+ tot_rx += parent->rxchan_per_port[i];
+ tot_tx += parent->txchan_per_port[i];
+ }
+
+ if (tot_rx > NIU_NUM_RXCHAN) {
+ pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
+ parent->index, tot_rx);
+ for (i = 0; i < num_ports; i++)
+ parent->rxchan_per_port[i] = 1;
+ }
+ if (tot_tx > NIU_NUM_TXCHAN) {
+ pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
+ parent->index, tot_tx);
+ for (i = 0; i < num_ports; i++)
+ parent->txchan_per_port[i] = 1;
+ }
+ if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
+ pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
+ parent->index, tot_rx, tot_tx);
+ }
+}
+
+static void __devinit niu_divide_rdc_groups(struct niu_parent *parent,
+ int num_10g, int num_1g)
+{
+ int i, num_ports = parent->num_ports;
+ int rdc_group, rdc_groups_per_port;
+ int rdc_channel_base;
+
+ rdc_group = 0;
+ rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports;
+
+ rdc_channel_base = 0;
+
+ for (i = 0; i < num_ports; i++) {
+ struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i];
+ int grp, num_channels = parent->rxchan_per_port[i];
+ int this_channel_offset;
+
+ tp->first_table_num = rdc_group;
+ tp->num_tables = rdc_groups_per_port;
+ this_channel_offset = 0;
+ for (grp = 0; grp < tp->num_tables; grp++) {
+ struct rdc_table *rt = &tp->tables[grp];
+ int slot;
+
+ pr_info("niu%d: Port %d RDC tbl(%d) [ ",
+ parent->index, i, tp->first_table_num + grp);
+ for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
+ rt->rxdma_channel[slot] =
+ rdc_channel_base + this_channel_offset;
+
+ pr_cont("%d ", rt->rxdma_channel[slot]);
+
+ if (++this_channel_offset == num_channels)
+ this_channel_offset = 0;
+ }
+ pr_cont("]\n");
+ }
+
+ parent->rdc_default[i] = rdc_channel_base;
+
+ rdc_channel_base += num_channels;
+ rdc_group += rdc_groups_per_port;
+ }
+}
+
+static int __devinit fill_phy_probe_info(struct niu *np,
+ struct niu_parent *parent,
+ struct phy_probe_info *info)
+{
+ unsigned long flags;
+ int port, err;
+
+ memset(info, 0, sizeof(*info));
+
+ /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */
+ niu_lock_parent(np, flags);
+ err = 0;
+ for (port = 8; port < 32; port++) {
+ int dev_id_1, dev_id_2;
+
+ dev_id_1 = mdio_read(np, port,
+ NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1);
+ dev_id_2 = mdio_read(np, port,
+ NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2);
+ err = phy_record(parent, info, dev_id_1, dev_id_2, port,
+ PHY_TYPE_PMA_PMD);
+ if (err)
+ break;
+ dev_id_1 = mdio_read(np, port,
+ NIU_PCS_DEV_ADDR, MII_PHYSID1);
+ dev_id_2 = mdio_read(np, port,
+ NIU_PCS_DEV_ADDR, MII_PHYSID2);
+ err = phy_record(parent, info, dev_id_1, dev_id_2, port,
+ PHY_TYPE_PCS);
+ if (err)
+ break;
+ dev_id_1 = mii_read(np, port, MII_PHYSID1);
+ dev_id_2 = mii_read(np, port, MII_PHYSID2);
+ err = phy_record(parent, info, dev_id_1, dev_id_2, port,
+ PHY_TYPE_MII);
+ if (err)
+ break;
+ }
+ niu_unlock_parent(np, flags);
+
+ return err;
+}
+
+static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
+{
+ struct phy_probe_info *info = &parent->phy_probe_info;
+ int lowest_10g, lowest_1g;
+ int num_10g, num_1g;
+ u32 val;
+ int err;
+
+ num_10g = num_1g = 0;
+
+ if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
+ !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
+ num_10g = 0;
+ num_1g = 2;
+ parent->plat_type = PLAT_TYPE_ATCA_CP3220;
+ parent->num_ports = 4;
+ val = (phy_encode(PORT_TYPE_1G, 0) |
+ phy_encode(PORT_TYPE_1G, 1) |
+ phy_encode(PORT_TYPE_1G, 2) |
+ phy_encode(PORT_TYPE_1G, 3));
+ } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
+ num_10g = 2;
+ num_1g = 0;
+ parent->num_ports = 2;
+ val = (phy_encode(PORT_TYPE_10G, 0) |
+ phy_encode(PORT_TYPE_10G, 1));
+ } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) &&
+ (parent->plat_type == PLAT_TYPE_NIU)) {
+ /* this is the Monza case */
+ if (np->flags & NIU_FLAGS_10G) {
+ val = (phy_encode(PORT_TYPE_10G, 0) |
+ phy_encode(PORT_TYPE_10G, 1));
+ } else {
+ val = (phy_encode(PORT_TYPE_1G, 0) |
+ phy_encode(PORT_TYPE_1G, 1));
+ }
+ } else {
+ err = fill_phy_probe_info(np, parent, info);
+ if (err)
+ return err;
+
+ num_10g = count_10g_ports(info, &lowest_10g);
+ num_1g = count_1g_ports(info, &lowest_1g);
+
+ switch ((num_10g << 4) | num_1g) {
+ case 0x24:
+ if (lowest_1g == 10)
+ parent->plat_type = PLAT_TYPE_VF_P0;
+ else if (lowest_1g == 26)
+ parent->plat_type = PLAT_TYPE_VF_P1;
+ else
+ goto unknown_vg_1g_port;
+
+ /* fallthru */
+ case 0x22:
+ val = (phy_encode(PORT_TYPE_10G, 0) |
+ phy_encode(PORT_TYPE_10G, 1) |
+ phy_encode(PORT_TYPE_1G, 2) |
+ phy_encode(PORT_TYPE_1G, 3));
+ break;
+
+ case 0x20:
+ val = (phy_encode(PORT_TYPE_10G, 0) |
+ phy_encode(PORT_TYPE_10G, 1));
+ break;
+
+ case 0x10:
+ val = phy_encode(PORT_TYPE_10G, np->port);
+ break;
+
+ case 0x14:
+ if (lowest_1g == 10)
+ parent->plat_type = PLAT_TYPE_VF_P0;
+ else if (lowest_1g == 26)
+ parent->plat_type = PLAT_TYPE_VF_P1;
+ else
+ goto unknown_vg_1g_port;
+
+ /* fallthru */
+ case 0x13:
+ if ((lowest_10g & 0x7) == 0)
+ val = (phy_encode(PORT_TYPE_10G, 0) |
+ phy_encode(PORT_TYPE_1G, 1) |
+ phy_encode(PORT_TYPE_1G, 2) |
+ phy_encode(PORT_TYPE_1G, 3));
+ else
+ val = (phy_encode(PORT_TYPE_1G, 0) |
+ phy_encode(PORT_TYPE_10G, 1) |
+ phy_encode(PORT_TYPE_1G, 2) |
+ phy_encode(PORT_TYPE_1G, 3));
+ break;
+
+ case 0x04:
+ if (lowest_1g == 10)
+ parent->plat_type = PLAT_TYPE_VF_P0;
+ else if (lowest_1g == 26)
+ parent->plat_type = PLAT_TYPE_VF_P1;
+ else
+ goto unknown_vg_1g_port;
+
+ val = (phy_encode(PORT_TYPE_1G, 0) |
+ phy_encode(PORT_TYPE_1G, 1) |
+ phy_encode(PORT_TYPE_1G, 2) |
+ phy_encode(PORT_TYPE_1G, 3));
+ break;
+
+ default:
+ pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
+ num_10g, num_1g);
+ return -EINVAL;
+ }
+ }
+
+ parent->port_phy = val;
+
+ if (parent->plat_type == PLAT_TYPE_NIU)
+ niu_n2_divide_channels(parent);
+ else
+ niu_divide_channels(parent, num_10g, num_1g);
+
+ niu_divide_rdc_groups(parent, num_10g, num_1g);
+
+ return 0;
+
+unknown_vg_1g_port:
+ pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g);
+ return -EINVAL;
+}
+
+static int __devinit niu_probe_ports(struct niu *np)
+{
+ struct niu_parent *parent = np->parent;
+ int err, i;
+
+ if (parent->port_phy == PORT_PHY_UNKNOWN) {
+ err = walk_phys(np, parent);
+ if (err)
+ return err;
+
+ niu_set_ldg_timer_res(np, 2);
+ for (i = 0; i <= LDN_MAX; i++)
+ niu_ldn_irq_enable(np, i, 0);
+ }
+
+ if (parent->port_phy == PORT_PHY_INVALID)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __devinit niu_classifier_swstate_init(struct niu *np)
+{
+ struct niu_classifier *cp = &np->clas;
+
+ cp->tcam_top = (u16) np->port;
+ cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
+ cp->h1_init = 0xffffffff;
+ cp->h2_init = 0xffff;
+
+ return fflp_early_init(np);
+}
+
+static void __devinit niu_link_config_init(struct niu *np)
+{
+ struct niu_link_config *lp = &np->link_config;
+
+ lp->advertising = (ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_10000baseT_Full |
+ ADVERTISED_Autoneg);
+ lp->speed = lp->active_speed = SPEED_INVALID;
+ lp->duplex = DUPLEX_FULL;
+ lp->active_duplex = DUPLEX_INVALID;
+ lp->autoneg = 1;
+#if 0
+ lp->loopback_mode = LOOPBACK_MAC;
+ lp->active_speed = SPEED_10000;
+ lp->active_duplex = DUPLEX_FULL;
+#else
+ lp->loopback_mode = LOOPBACK_DISABLED;
+#endif
+}
+
+static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np)
+{
+ switch (np->port) {
+ case 0:
+ np->mac_regs = np->regs + XMAC_PORT0_OFF;
+ np->ipp_off = 0x00000;
+ np->pcs_off = 0x04000;
+ np->xpcs_off = 0x02000;
+ break;
+
+ case 1:
+ np->mac_regs = np->regs + XMAC_PORT1_OFF;
+ np->ipp_off = 0x08000;
+ np->pcs_off = 0x0a000;
+ np->xpcs_off = 0x08000;
+ break;
+
+ case 2:
+ np->mac_regs = np->regs + BMAC_PORT2_OFF;
+ np->ipp_off = 0x04000;
+ np->pcs_off = 0x0e000;
+ np->xpcs_off = ~0UL;
+ break;
+
+ case 3:
+ np->mac_regs = np->regs + BMAC_PORT3_OFF;
+ np->ipp_off = 0x0c000;
+ np->pcs_off = 0x12000;
+ np->xpcs_off = ~0UL;
+ break;
+
+ default:
+ dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
+{
+ struct msix_entry msi_vec[NIU_NUM_LDG];
+ struct niu_parent *parent = np->parent;
+ struct pci_dev *pdev = np->pdev;
+ int i, num_irqs, err;
+ u8 first_ldg;
+
+ first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
+ for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
+ ldg_num_map[i] = first_ldg + i;
+
+ num_irqs = (parent->rxchan_per_port[np->port] +
+ parent->txchan_per_port[np->port] +
+ (np->port == 0 ? 3 : 1));
+ BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
+
+retry:
+ for (i = 0; i < num_irqs; i++) {
+ msi_vec[i].vector = 0;
+ msi_vec[i].entry = i;
+ }
+
+ err = pci_enable_msix(pdev, msi_vec, num_irqs);
+ if (err < 0) {
+ np->flags &= ~NIU_FLAGS_MSIX;
+ return;
+ }
+ if (err > 0) {
+ num_irqs = err;
+ goto retry;
+ }
+
+ np->flags |= NIU_FLAGS_MSIX;
+ for (i = 0; i < num_irqs; i++)
+ np->ldg[i].irq = msi_vec[i].vector;
+ np->num_ldg = num_irqs;
+}
+
+static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
+{
+#ifdef CONFIG_SPARC64
+ struct platform_device *op = np->op;
+ const u32 *int_prop;
+ int i;
+
+ int_prop = of_get_property(op->dev.of_node, "interrupts", NULL);
+ if (!int_prop)
+ return -ENODEV;
+
+ for (i = 0; i < op->archdata.num_irqs; i++) {
+ ldg_num_map[i] = int_prop[i];
+ np->ldg[i].irq = op->archdata.irqs[i];
+ }
+
+ np->num_ldg = op->archdata.num_irqs;
+
+ return 0;
+#else
+ return -EINVAL;
+#endif
+}
+
+static int __devinit niu_ldg_init(struct niu *np)
+{
+ struct niu_parent *parent = np->parent;
+ u8 ldg_num_map[NIU_NUM_LDG];
+ int first_chan, num_chan;
+ int i, err, ldg_rotor;
+ u8 port;
+
+ np->num_ldg = 1;
+ np->ldg[0].irq = np->dev->irq;
+ if (parent->plat_type == PLAT_TYPE_NIU) {
+ err = niu_n2_irq_init(np, ldg_num_map);
+ if (err)
+ return err;
+ } else
+ niu_try_msix(np, ldg_num_map);
+
+ port = np->port;
+ for (i = 0; i < np->num_ldg; i++) {
+ struct niu_ldg *lp = &np->ldg[i];
+
+ netif_napi_add(np->dev, &lp->napi, niu_poll, 64);
+
+ lp->np = np;
+ lp->ldg_num = ldg_num_map[i];
+ lp->timer = 2; /* XXX */
+
+ /* On N2 NIU the firmware has setup the SID mappings so they go
+ * to the correct values that will route the LDG to the proper
+ * interrupt in the NCU interrupt table.
+ */
+ if (np->parent->plat_type != PLAT_TYPE_NIU) {
+ err = niu_set_ldg_sid(np, lp->ldg_num, port, i);
+ if (err)
+ return err;
+ }
+ }
+
+ /* We adopt the LDG assignment ordering used by the N2 NIU
+ * 'interrupt' properties because that simplifies a lot of
+ * things. This ordering is:
+ *
+ * MAC
+ * MIF (if port zero)
+ * SYSERR (if port zero)
+ * RX channels
+ * TX channels
+ */
+
+ ldg_rotor = 0;
+
+ err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor],
+ LDN_MAC(port));
+ if (err)
+ return err;
+
+ ldg_rotor++;
+ if (ldg_rotor == np->num_ldg)
+ ldg_rotor = 0;
+
+ if (port == 0) {
+ err = niu_ldg_assign_ldn(np, parent,
+ ldg_num_map[ldg_rotor],
+ LDN_MIF);
+ if (err)
+ return err;
+
+ ldg_rotor++;
+ if (ldg_rotor == np->num_ldg)
+ ldg_rotor = 0;
+
+ err = niu_ldg_assign_ldn(np, parent,
+ ldg_num_map[ldg_rotor],
+ LDN_DEVICE_ERROR);
+ if (err)
+ return err;
+
+ ldg_rotor++;
+ if (ldg_rotor == np->num_ldg)
+ ldg_rotor = 0;
+
+ }
+
+ first_chan = 0;
+ for (i = 0; i < port; i++)
+ first_chan += parent->rxchan_per_port[i];
+ num_chan = parent->rxchan_per_port[port];
+
+ for (i = first_chan; i < (first_chan + num_chan); i++) {
+ err = niu_ldg_assign_ldn(np, parent,
+ ldg_num_map[ldg_rotor],
+ LDN_RXDMA(i));
+ if (err)
+ return err;
+ ldg_rotor++;
+ if (ldg_rotor == np->num_ldg)
+ ldg_rotor = 0;
+ }
+
+ first_chan = 0;
+ for (i = 0; i < port; i++)
+ first_chan += parent->txchan_per_port[i];
+ num_chan = parent->txchan_per_port[port];
+ for (i = first_chan; i < (first_chan + num_chan); i++) {
+ err = niu_ldg_assign_ldn(np, parent,
+ ldg_num_map[ldg_rotor],
+ LDN_TXDMA(i));
+ if (err)
+ return err;
+ ldg_rotor++;
+ if (ldg_rotor == np->num_ldg)
+ ldg_rotor = 0;
+ }
+
+ return 0;
+}
+
+static void __devexit niu_ldg_free(struct niu *np)
+{
+ if (np->flags & NIU_FLAGS_MSIX)
+ pci_disable_msix(np->pdev);
+}
+
+static int __devinit niu_get_of_props(struct niu *np)
+{
+#ifdef CONFIG_SPARC64
+ struct net_device *dev = np->dev;
+ struct device_node *dp;
+ const char *phy_type;
+ const u8 *mac_addr;
+ const char *model;
+ int prop_len;
+
+ if (np->parent->plat_type == PLAT_TYPE_NIU)
+ dp = np->op->dev.of_node;
+ else
+ dp = pci_device_to_OF_node(np->pdev);
+
+ phy_type = of_get_property(dp, "phy-type", &prop_len);
+ if (!phy_type) {
+ netdev_err(dev, "%s: OF node lacks phy-type property\n",
+ dp->full_name);
+ return -EINVAL;
+ }
+
+ if (!strcmp(phy_type, "none"))
+ return -ENODEV;
+
+ strcpy(np->vpd.phy_type, phy_type);
+
+ if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
+ netdev_err(dev, "%s: Illegal phy string [%s]\n",
+ dp->full_name, np->vpd.phy_type);
+ return -EINVAL;
+ }
+
+ mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
+ if (!mac_addr) {
+ netdev_err(dev, "%s: OF node lacks local-mac-address property\n",
+ dp->full_name);
+ return -EINVAL;
+ }
+ if (prop_len != dev->addr_len) {
+ netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n",
+ dp->full_name, prop_len);
+ }
+ memcpy(dev->perm_addr, mac_addr, dev->addr_len);
+ if (!is_valid_ether_addr(&dev->perm_addr[0])) {
+ netdev_err(dev, "%s: OF MAC address is invalid\n",
+ dp->full_name);
+ netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->perm_addr);
+ return -EINVAL;
+ }
+
+ memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
+
+ model = of_get_property(dp, "model", &prop_len);
+
+ if (model)
+ strcpy(np->vpd.model, model);
+
+ if (of_find_property(dp, "hot-swappable-phy", &prop_len)) {
+ np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
+ NIU_FLAGS_HOTPLUG_PHY);
+ }
+
+ return 0;
+#else
+ return -EINVAL;
+#endif
+}
+
+static int __devinit niu_get_invariants(struct niu *np)
+{
+ int err, have_props;
+ u32 offset;
+
+ err = niu_get_of_props(np);
+ if (err == -ENODEV)
+ return err;
+
+ have_props = !err;
+
+ err = niu_init_mac_ipp_pcs_base(np);
+ if (err)
+ return err;
+
+ if (have_props) {
+ err = niu_get_and_validate_port(np);
+ if (err)
+ return err;
+
+ } else {
+ if (np->parent->plat_type == PLAT_TYPE_NIU)
+ return -EINVAL;
+
+ nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
+ offset = niu_pci_vpd_offset(np);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "%s() VPD offset [%08x]\n", __func__, offset);
+ if (offset)
+ niu_pci_vpd_fetch(np, offset);
+ nw64(ESPC_PIO_EN, 0);
+
+ if (np->flags & NIU_FLAGS_VPD_VALID) {
+ niu_pci_vpd_validate(np);
+ err = niu_get_and_validate_port(np);
+ if (err)
+ return err;
+ }
+
+ if (!(np->flags & NIU_FLAGS_VPD_VALID)) {
+ err = niu_get_and_validate_port(np);
+ if (err)
+ return err;
+ err = niu_pci_probe_sprom(np);
+ if (err)
+ return err;
+ }
+ }
+
+ err = niu_probe_ports(np);
+ if (err)
+ return err;
+
+ niu_ldg_init(np);
+
+ niu_classifier_swstate_init(np);
+ niu_link_config_init(np);
+
+ err = niu_determine_phy_disposition(np);
+ if (!err)
+ err = niu_init_link(np);
+
+ return err;
+}
+
+static LIST_HEAD(niu_parent_list);
+static DEFINE_MUTEX(niu_parent_lock);
+static int niu_parent_index;
+
+static ssize_t show_port_phy(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *plat_dev = to_platform_device(dev);
+ struct niu_parent *p = plat_dev->dev.platform_data;
+ u32 port_phy = p->port_phy;
+ char *orig_buf = buf;
+ int i;
+
+ if (port_phy == PORT_PHY_UNKNOWN ||
+ port_phy == PORT_PHY_INVALID)
+ return 0;
+
+ for (i = 0; i < p->num_ports; i++) {
+ const char *type_str;
+ int type;
+
+ type = phy_decode(port_phy, i);
+ if (type == PORT_TYPE_10G)
+ type_str = "10G";
+ else
+ type_str = "1G";
+ buf += sprintf(buf,
+ (i == 0) ? "%s" : " %s",
+ type_str);
+ }
+ buf += sprintf(buf, "\n");
+ return buf - orig_buf;
+}
+
+static ssize_t show_plat_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *plat_dev = to_platform_device(dev);
+ struct niu_parent *p = plat_dev->dev.platform_data;
+ const char *type_str;
+
+ switch (p->plat_type) {
+ case PLAT_TYPE_ATLAS:
+ type_str = "atlas";
+ break;
+ case PLAT_TYPE_NIU:
+ type_str = "niu";
+ break;
+ case PLAT_TYPE_VF_P0:
+ type_str = "vf_p0";
+ break;
+ case PLAT_TYPE_VF_P1:
+ type_str = "vf_p1";
+ break;
+ default:
+ type_str = "unknown";
+ break;
+ }
+
+ return sprintf(buf, "%s\n", type_str);
+}
+
+static ssize_t __show_chan_per_port(struct device *dev,
+ struct device_attribute *attr, char *buf,
+ int rx)
+{
+ struct platform_device *plat_dev = to_platform_device(dev);
+ struct niu_parent *p = plat_dev->dev.platform_data;
+ char *orig_buf = buf;
+ u8 *arr;
+ int i;
+
+ arr = (rx ? p->rxchan_per_port : p->txchan_per_port);
+
+ for (i = 0; i < p->num_ports; i++) {
+ buf += sprintf(buf,
+ (i == 0) ? "%d" : " %d",
+ arr[i]);
+ }
+ buf += sprintf(buf, "\n");
+
+ return buf - orig_buf;
+}
+
+static ssize_t show_rxchan_per_port(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return __show_chan_per_port(dev, attr, buf, 1);
+}
+
+static ssize_t show_txchan_per_port(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return __show_chan_per_port(dev, attr, buf, 1);
+}
+
+static ssize_t show_num_ports(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *plat_dev = to_platform_device(dev);
+ struct niu_parent *p = plat_dev->dev.platform_data;
+
+ return sprintf(buf, "%d\n", p->num_ports);
+}
+
+static struct device_attribute niu_parent_attributes[] = {
+ __ATTR(port_phy, S_IRUGO, show_port_phy, NULL),
+ __ATTR(plat_type, S_IRUGO, show_plat_type, NULL),
+ __ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL),
+ __ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL),
+ __ATTR(num_ports, S_IRUGO, show_num_ports, NULL),
+ {}
+};
+
+static struct niu_parent * __devinit niu_new_parent(struct niu *np,
+ union niu_parent_id *id,
+ u8 ptype)
+{
+ struct platform_device *plat_dev;
+ struct niu_parent *p;
+ int i;
+
+ plat_dev = platform_device_register_simple("niu-board", niu_parent_index,
+ NULL, 0);
+ if (IS_ERR(plat_dev))
+ return NULL;
+
+ for (i = 0; attr_name(niu_parent_attributes[i]); i++) {
+ int err = device_create_file(&plat_dev->dev,
+ &niu_parent_attributes[i]);
+ if (err)
+ goto fail_unregister;
+ }
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ goto fail_unregister;
+
+ p->index = niu_parent_index++;
+
+ plat_dev->dev.platform_data = p;
+ p->plat_dev = plat_dev;
+
+ memcpy(&p->id, id, sizeof(*id));
+ p->plat_type = ptype;
+ INIT_LIST_HEAD(&p->list);
+ atomic_set(&p->refcnt, 0);
+ list_add(&p->list, &niu_parent_list);
+ spin_lock_init(&p->lock);
+
+ p->rxdma_clock_divider = 7500;
+
+ p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES;
+ if (p->plat_type == PLAT_TYPE_NIU)
+ p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES;
+
+ for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
+ int index = i - CLASS_CODE_USER_PROG1;
+
+ p->tcam_key[index] = TCAM_KEY_TSEL;
+ p->flow_key[index] = (FLOW_KEY_IPSA |
+ FLOW_KEY_IPDA |
+ FLOW_KEY_PROTO |
+ (FLOW_KEY_L4_BYTE12 <<
+ FLOW_KEY_L4_0_SHIFT) |
+ (FLOW_KEY_L4_BYTE12 <<
+ FLOW_KEY_L4_1_SHIFT));
+ }
+
+ for (i = 0; i < LDN_MAX + 1; i++)
+ p->ldg_map[i] = LDG_INVALID;
+
+ return p;
+
+fail_unregister:
+ platform_device_unregister(plat_dev);
+ return NULL;
+}
+
+static struct niu_parent * __devinit niu_get_parent(struct niu *np,
+ union niu_parent_id *id,
+ u8 ptype)
+{
+ struct niu_parent *p, *tmp;
+ int port = np->port;
+
+ mutex_lock(&niu_parent_lock);
+ p = NULL;
+ list_for_each_entry(tmp, &niu_parent_list, list) {
+ if (!memcmp(id, &tmp->id, sizeof(*id))) {
+ p = tmp;
+ break;
+ }
+ }
+ if (!p)
+ p = niu_new_parent(np, id, ptype);
+
+ if (p) {
+ char port_name[6];
+ int err;
+
+ sprintf(port_name, "port%d", port);
+ err = sysfs_create_link(&p->plat_dev->dev.kobj,
+ &np->device->kobj,
+ port_name);
+ if (!err) {
+ p->ports[port] = np;
+ atomic_inc(&p->refcnt);
+ }
+ }
+ mutex_unlock(&niu_parent_lock);
+
+ return p;
+}
+
+static void niu_put_parent(struct niu *np)
+{
+ struct niu_parent *p = np->parent;
+ u8 port = np->port;
+ char port_name[6];
+
+ BUG_ON(!p || p->ports[port] != np);
+
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "%s() port[%u]\n", __func__, port);
+
+ sprintf(port_name, "port%d", port);
+
+ mutex_lock(&niu_parent_lock);
+
+ sysfs_remove_link(&p->plat_dev->dev.kobj, port_name);
+
+ p->ports[port] = NULL;
+ np->parent = NULL;
+
+ if (atomic_dec_and_test(&p->refcnt)) {
+ list_del(&p->list);
+ platform_device_unregister(p->plat_dev);
+ }
+
+ mutex_unlock(&niu_parent_lock);
+}
+
+static void *niu_pci_alloc_coherent(struct device *dev, size_t size,
+ u64 *handle, gfp_t flag)
+{
+ dma_addr_t dh;
+ void *ret;
+
+ ret = dma_alloc_coherent(dev, size, &dh, flag);
+ if (ret)
+ *handle = dh;
+ return ret;
+}
+
+static void niu_pci_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, u64 handle)
+{
+ dma_free_coherent(dev, size, cpu_addr, handle);
+}
+
+static u64 niu_pci_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ return dma_map_page(dev, page, offset, size, direction);
+}
+
+static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
+ size_t size, enum dma_data_direction direction)
+{
+ dma_unmap_page(dev, dma_address, size, direction);
+}
+
+static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ return dma_map_single(dev, cpu_addr, size, direction);
+}
+
+static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ dma_unmap_single(dev, dma_address, size, direction);
+}
+
+static const struct niu_ops niu_pci_ops = {
+ .alloc_coherent = niu_pci_alloc_coherent,
+ .free_coherent = niu_pci_free_coherent,
+ .map_page = niu_pci_map_page,
+ .unmap_page = niu_pci_unmap_page,
+ .map_single = niu_pci_map_single,
+ .unmap_single = niu_pci_unmap_single,
+};
+
+static void __devinit niu_driver_version(void)
+{
+ static int niu_version_printed;
+
+ if (niu_version_printed++ == 0)
+ pr_info("%s", version);
+}
+
+static struct net_device * __devinit niu_alloc_and_init(
+ struct device *gen_dev, struct pci_dev *pdev,
+ struct platform_device *op, const struct niu_ops *ops,
+ u8 port)
+{
+ struct net_device *dev;
+ struct niu *np;
+
+ dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
+ if (!dev) {
+ dev_err(gen_dev, "Etherdev alloc failed, aborting\n");
+ return NULL;
+ }
+
+ SET_NETDEV_DEV(dev, gen_dev);
+
+ np = netdev_priv(dev);
+ np->dev = dev;
+ np->pdev = pdev;
+ np->op = op;
+ np->device = gen_dev;
+ np->ops = ops;
+
+ np->msg_enable = niu_debug;
+
+ spin_lock_init(&np->lock);
+ INIT_WORK(&np->reset_task, niu_reset_task);
+
+ np->port = port;
+
+ return dev;
+}
+
+static const struct net_device_ops niu_netdev_ops = {
+ .ndo_open = niu_open,
+ .ndo_stop = niu_close,
+ .ndo_start_xmit = niu_start_xmit,
+ .ndo_get_stats64 = niu_get_stats,
+ .ndo_set_rx_mode = niu_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = niu_set_mac_addr,
+ .ndo_do_ioctl = niu_ioctl,
+ .ndo_tx_timeout = niu_tx_timeout,
+ .ndo_change_mtu = niu_change_mtu,
+};
+
+static void __devinit niu_assign_netdev_ops(struct net_device *dev)
+{
+ dev->netdev_ops = &niu_netdev_ops;
+ dev->ethtool_ops = &niu_ethtool_ops;
+ dev->watchdog_timeo = NIU_TX_TIMEOUT;
+}
+
+static void __devinit niu_device_announce(struct niu *np)
+{
+ struct net_device *dev = np->dev;
+
+ pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr);
+
+ if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
+ pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
+ dev->name,
+ (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
+ (np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
+ (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"),
+ (np->mac_xcvr == MAC_XCVR_MII ? "MII" :
+ (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
+ np->vpd.phy_type);
+ } else {
+ pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
+ dev->name,
+ (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
+ (np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
+ (np->flags & NIU_FLAGS_FIBER ? "FIBER" :
+ (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" :
+ "COPPER")),
+ (np->mac_xcvr == MAC_XCVR_MII ? "MII" :
+ (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
+ np->vpd.phy_type);
+ }
+}
+
+static void __devinit niu_set_basic_features(struct net_device *dev)
+{
+ dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH;
+ dev->features |= dev->hw_features | NETIF_F_RXCSUM;
+}
+
+static int __devinit niu_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ union niu_parent_id parent_id;
+ struct net_device *dev;
+ struct niu *np;
+ int err, pos;
+ u64 dma_mask;
+ u16 val16;
+
+ niu_driver_version();
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+ return err;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
+ !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n");
+ err = -ENODEV;
+ goto err_out_disable_pdev;
+ }
+
+ err = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+ goto err_out_disable_pdev;
+ }
+
+ pos = pci_pcie_cap(pdev);
+ if (pos <= 0) {
+ dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
+ goto err_out_free_res;
+ }
+
+ dev = niu_alloc_and_init(&pdev->dev, pdev, NULL,
+ &niu_pci_ops, PCI_FUNC(pdev->devfn));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_out_free_res;
+ }
+ np = netdev_priv(dev);
+
+ memset(&parent_id, 0, sizeof(parent_id));
+ parent_id.pci.domain = pci_domain_nr(pdev->bus);
+ parent_id.pci.bus = pdev->bus->number;
+ parent_id.pci.device = PCI_SLOT(pdev->devfn);
+
+ np->parent = niu_get_parent(np, &parent_id,
+ PLAT_TYPE_ATLAS);
+ if (!np->parent) {
+ err = -ENOMEM;
+ goto err_out_free_dev;
+ }
+
+ pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
+ val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
+ val16 |= (PCI_EXP_DEVCTL_CERE |
+ PCI_EXP_DEVCTL_NFERE |
+ PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_URRE |
+ PCI_EXP_DEVCTL_RELAX_EN);
+ pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
+
+ dma_mask = DMA_BIT_MASK(44);
+ err = pci_set_dma_mask(pdev, dma_mask);
+ if (!err) {
+ dev->features |= NETIF_F_HIGHDMA;
+ err = pci_set_consistent_dma_mask(pdev, dma_mask);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
+ goto err_out_release_parent;
+ }
+ }
+ if (err || dma_mask == DMA_BIT_MASK(32)) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
+ goto err_out_release_parent;
+ }
+ }
+
+ niu_set_basic_features(dev);
+
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
+ np->regs = pci_ioremap_bar(pdev, 0);
+ if (!np->regs) {
+ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
+ err = -ENOMEM;
+ goto err_out_release_parent;
+ }
+
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+
+ dev->irq = pdev->irq;
+
+ niu_assign_netdev_ops(dev);
+
+ err = niu_get_invariants(np);
+ if (err) {
+ if (err != -ENODEV)
+ dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
+ goto err_out_iounmap;
+ }
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting\n");
+ goto err_out_iounmap;
+ }
+
+ pci_set_drvdata(pdev, dev);
+
+ niu_device_announce(np);
+
+ return 0;
+
+err_out_iounmap:
+ if (np->regs) {
+ iounmap(np->regs);
+ np->regs = NULL;
+ }
+
+err_out_release_parent:
+ niu_put_parent(np);
+
+err_out_free_dev:
+ free_netdev(dev);
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_out_disable_pdev:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ return err;
+}
+
+static void __devexit niu_pci_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct niu *np = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ if (np->regs) {
+ iounmap(np->regs);
+ np->regs = NULL;
+ }
+
+ niu_ldg_free(np);
+
+ niu_put_parent(np);
+
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct niu *np = netdev_priv(dev);
+ unsigned long flags;
+
+ if (!netif_running(dev))
+ return 0;
+
+ flush_work_sync(&np->reset_task);
+ niu_netif_stop(np);
+
+ del_timer_sync(&np->timer);
+
+ spin_lock_irqsave(&np->lock, flags);
+ niu_enable_interrupts(np, 0);
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ netif_device_detach(dev);
+
+ spin_lock_irqsave(&np->lock, flags);
+ niu_stop_hw(np);
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ pci_save_state(pdev);
+
+ return 0;
+}
+
+static int niu_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct niu *np = netdev_priv(dev);
+ unsigned long flags;
+ int err;
+
+ if (!netif_running(dev))
+ return 0;
+
+ pci_restore_state(pdev);
+
+ netif_device_attach(dev);
+
+ spin_lock_irqsave(&np->lock, flags);
+
+ err = niu_init_hw(np);
+ if (!err) {
+ np->timer.expires = jiffies + HZ;
+ add_timer(&np->timer);
+ niu_netif_start(np);
+ }
+
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ return err;
+}
+
+static struct pci_driver niu_pci_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = niu_pci_tbl,
+ .probe = niu_pci_init_one,
+ .remove = __devexit_p(niu_pci_remove_one),
+ .suspend = niu_suspend,
+ .resume = niu_resume,
+};
+
+#ifdef CONFIG_SPARC64
+static void *niu_phys_alloc_coherent(struct device *dev, size_t size,
+ u64 *dma_addr, gfp_t flag)
+{
+ unsigned long order = get_order(size);
+ unsigned long page = __get_free_pages(flag, order);
+
+ if (page == 0UL)
+ return NULL;
+ memset((char *)page, 0, PAGE_SIZE << order);
+ *dma_addr = __pa(page);
+
+ return (void *) page;
+}
+
+static void niu_phys_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, u64 handle)
+{
+ unsigned long order = get_order(size);
+
+ free_pages((unsigned long) cpu_addr, order);
+}
+
+static u64 niu_phys_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ return page_to_phys(page) + offset;
+}
+
+static void niu_phys_unmap_page(struct device *dev, u64 dma_address,
+ size_t size, enum dma_data_direction direction)
+{
+ /* Nothing to do. */
+}
+
+static u64 niu_phys_map_single(struct device *dev, void *cpu_addr,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ return __pa(cpu_addr);
+}
+
+static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ /* Nothing to do. */
+}
+
+static const struct niu_ops niu_phys_ops = {
+ .alloc_coherent = niu_phys_alloc_coherent,
+ .free_coherent = niu_phys_free_coherent,
+ .map_page = niu_phys_map_page,
+ .unmap_page = niu_phys_unmap_page,
+ .map_single = niu_phys_map_single,
+ .unmap_single = niu_phys_unmap_single,
+};
+
+static int __devinit niu_of_probe(struct platform_device *op)
+{
+ union niu_parent_id parent_id;
+ struct net_device *dev;
+ struct niu *np;
+ const u32 *reg;
+ int err;
+
+ niu_driver_version();
+
+ reg = of_get_property(op->dev.of_node, "reg", NULL);
+ if (!reg) {
+ dev_err(&op->dev, "%s: No 'reg' property, aborting\n",
+ op->dev.of_node->full_name);
+ return -ENODEV;
+ }
+
+ dev = niu_alloc_and_init(&op->dev, NULL, op,
+ &niu_phys_ops, reg[0] & 0x1);
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ np = netdev_priv(dev);
+
+ memset(&parent_id, 0, sizeof(parent_id));
+ parent_id.of = of_get_parent(op->dev.of_node);
+
+ np->parent = niu_get_parent(np, &parent_id,
+ PLAT_TYPE_NIU);
+ if (!np->parent) {
+ err = -ENOMEM;
+ goto err_out_free_dev;
+ }
+
+ niu_set_basic_features(dev);
+
+ np->regs = of_ioremap(&op->resource[1], 0,
+ resource_size(&op->resource[1]),
+ "niu regs");
+ if (!np->regs) {
+ dev_err(&op->dev, "Cannot map device registers, aborting\n");
+ err = -ENOMEM;
+ goto err_out_release_parent;
+ }
+
+ np->vir_regs_1 = of_ioremap(&op->resource[2], 0,
+ resource_size(&op->resource[2]),
+ "niu vregs-1");
+ if (!np->vir_regs_1) {
+ dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n");
+ err = -ENOMEM;
+ goto err_out_iounmap;
+ }
+
+ np->vir_regs_2 = of_ioremap(&op->resource[3], 0,
+ resource_size(&op->resource[3]),
+ "niu vregs-2");
+ if (!np->vir_regs_2) {
+ dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n");
+ err = -ENOMEM;
+ goto err_out_iounmap;
+ }
+
+ niu_assign_netdev_ops(dev);
+
+ err = niu_get_invariants(np);
+ if (err) {
+ if (err != -ENODEV)
+ dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n");
+ goto err_out_iounmap;
+ }
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&op->dev, "Cannot register net device, aborting\n");
+ goto err_out_iounmap;
+ }
+
+ dev_set_drvdata(&op->dev, dev);
+
+ niu_device_announce(np);
+
+ return 0;
+
+err_out_iounmap:
+ if (np->vir_regs_1) {
+ of_iounmap(&op->resource[2], np->vir_regs_1,
+ resource_size(&op->resource[2]));
+ np->vir_regs_1 = NULL;
+ }
+
+ if (np->vir_regs_2) {
+ of_iounmap(&op->resource[3], np->vir_regs_2,
+ resource_size(&op->resource[3]));
+ np->vir_regs_2 = NULL;
+ }
+
+ if (np->regs) {
+ of_iounmap(&op->resource[1], np->regs,
+ resource_size(&op->resource[1]));
+ np->regs = NULL;
+ }
+
+err_out_release_parent:
+ niu_put_parent(np);
+
+err_out_free_dev:
+ free_netdev(dev);
+
+err_out:
+ return err;
+}
+
+static int __devexit niu_of_remove(struct platform_device *op)
+{
+ struct net_device *dev = dev_get_drvdata(&op->dev);
+
+ if (dev) {
+ struct niu *np = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ if (np->vir_regs_1) {
+ of_iounmap(&op->resource[2], np->vir_regs_1,
+ resource_size(&op->resource[2]));
+ np->vir_regs_1 = NULL;
+ }
+
+ if (np->vir_regs_2) {
+ of_iounmap(&op->resource[3], np->vir_regs_2,
+ resource_size(&op->resource[3]));
+ np->vir_regs_2 = NULL;
+ }
+
+ if (np->regs) {
+ of_iounmap(&op->resource[1], np->regs,
+ resource_size(&op->resource[1]));
+ np->regs = NULL;
+ }
+
+ niu_ldg_free(np);
+
+ niu_put_parent(np);
+
+ free_netdev(dev);
+ dev_set_drvdata(&op->dev, NULL);
+ }
+ return 0;
+}
+
+static const struct of_device_id niu_match[] = {
+ {
+ .name = "network",
+ .compatible = "SUNW,niusl",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, niu_match);
+
+static struct platform_driver niu_of_driver = {
+ .driver = {
+ .name = "niu",
+ .owner = THIS_MODULE,
+ .of_match_table = niu_match,
+ },
+ .probe = niu_of_probe,
+ .remove = __devexit_p(niu_of_remove),
+};
+
+#endif /* CONFIG_SPARC64 */
+
+static int __init niu_init(void)
+{
+ int err = 0;
+
+ BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
+
+ niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
+
+#ifdef CONFIG_SPARC64
+ err = platform_driver_register(&niu_of_driver);
+#endif
+
+ if (!err) {
+ err = pci_register_driver(&niu_pci_driver);
+#ifdef CONFIG_SPARC64
+ if (err)
+ platform_driver_unregister(&niu_of_driver);
+#endif
+ }
+
+ return err;
+}
+
+static void __exit niu_exit(void)
+{
+ pci_unregister_driver(&niu_pci_driver);
+#ifdef CONFIG_SPARC64
+ platform_driver_unregister(&niu_of_driver);
+#endif
+}
+
+module_init(niu_init);
+module_exit(niu_exit);
diff --git a/drivers/net/ethernet/sun/niu.h b/drivers/net/ethernet/sun/niu.h
new file mode 100644
index 000000000000..51e177e1860d
--- /dev/null
+++ b/drivers/net/ethernet/sun/niu.h
@@ -0,0 +1,3306 @@
+/* niu.h: Definitions for Neptune ethernet driver.
+ *
+ * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
+ */
+
+#ifndef _NIU_H
+#define _NIU_H
+
+#define PIO 0x000000UL
+#define FZC_PIO 0x080000UL
+#define FZC_MAC 0x180000UL
+#define FZC_IPP 0x280000UL
+#define FFLP 0x300000UL
+#define FZC_FFLP 0x380000UL
+#define PIO_VADDR 0x400000UL
+#define ZCP 0x500000UL
+#define FZC_ZCP 0x580000UL
+#define DMC 0x600000UL
+#define FZC_DMC 0x680000UL
+#define TXC 0x700000UL
+#define FZC_TXC 0x780000UL
+#define PIO_LDSV 0x800000UL
+#define PIO_PIO_LDGIM 0x900000UL
+#define PIO_IMASK0 0xa00000UL
+#define PIO_IMASK1 0xb00000UL
+#define FZC_PROM 0xc80000UL
+#define FZC_PIM 0xd80000UL
+
+#define LDSV0(LDG) (PIO_LDSV + 0x00000UL + (LDG) * 0x2000UL)
+#define LDSV1(LDG) (PIO_LDSV + 0x00008UL + (LDG) * 0x2000UL)
+#define LDSV2(LDG) (PIO_LDSV + 0x00010UL + (LDG) * 0x2000UL)
+
+#define LDG_IMGMT(LDG) (PIO_LDSV + 0x00018UL + (LDG) * 0x2000UL)
+#define LDG_IMGMT_ARM 0x0000000080000000ULL
+#define LDG_IMGMT_TIMER 0x000000000000003fULL
+
+#define LD_IM0(IDX) (PIO_IMASK0 + 0x00000UL + (IDX) * 0x2000UL)
+#define LD_IM0_MASK 0x0000000000000003ULL
+
+#define LD_IM1(IDX) (PIO_IMASK1 + 0x00000UL + (IDX) * 0x2000UL)
+#define LD_IM1_MASK 0x0000000000000003ULL
+
+#define LDG_TIMER_RES (FZC_PIO + 0x00008UL)
+#define LDG_TIMER_RES_VAL 0x00000000000fffffULL
+
+#define DIRTY_TID_CTL (FZC_PIO + 0x00010UL)
+#define DIRTY_TID_CTL_NPTHRED 0x00000000003f0000ULL
+#define DIRTY_TID_CTL_RDTHRED 0x00000000000003f0ULL
+#define DIRTY_TID_CTL_DTIDCLR 0x0000000000000002ULL
+#define DIRTY_TID_CTL_DTIDENAB 0x0000000000000001ULL
+
+#define DIRTY_TID_STAT (FZC_PIO + 0x00018UL)
+#define DIRTY_TID_STAT_NPWSTAT 0x0000000000003f00ULL
+#define DIRTY_TID_STAT_RDSTAT 0x000000000000003fULL
+
+#define RST_CTL (FZC_PIO + 0x00038UL)
+#define RST_CTL_MAC_RST3 0x0000000000400000ULL
+#define RST_CTL_MAC_RST2 0x0000000000200000ULL
+#define RST_CTL_MAC_RST1 0x0000000000100000ULL
+#define RST_CTL_MAC_RST0 0x0000000000080000ULL
+#define RST_CTL_ACK_TO_EN 0x0000000000000800ULL
+#define RST_CTL_ACK_TO_VAL 0x00000000000007feULL
+
+#define SMX_CFIG_DAT (FZC_PIO + 0x00040UL)
+#define SMX_CFIG_DAT_RAS_DET 0x0000000080000000ULL
+#define SMX_CFIG_DAT_RAS_INJ 0x0000000040000000ULL
+#define SMX_CFIG_DAT_XACT_TO 0x000000000fffffffULL
+
+#define SMX_INT_STAT (FZC_PIO + 0x00048UL)
+#define SMX_INT_STAT_STAT 0x00000000ffffffffULL
+
+#define SMX_CTL (FZC_PIO + 0x00050UL)
+#define SMX_CTL_CTL 0x00000000ffffffffULL
+
+#define SMX_DBG_VEC (FZC_PIO + 0x00058UL)
+#define SMX_DBG_VEC_VEC 0x00000000ffffffffULL
+
+#define PIO_DBG_SEL (FZC_PIO + 0x00060UL)
+#define PIO_DBG_SEL_SEL 0x000000000000003fULL
+
+#define PIO_TRAIN_VEC (FZC_PIO + 0x00068UL)
+#define PIO_TRAIN_VEC_VEC 0x00000000ffffffffULL
+
+#define PIO_ARB_CTL (FZC_PIO + 0x00070UL)
+#define PIO_ARB_CTL_CTL 0x00000000ffffffffULL
+
+#define PIO_ARB_DBG_VEC (FZC_PIO + 0x00078UL)
+#define PIO_ARB_DBG_VEC_VEC 0x00000000ffffffffULL
+
+#define SYS_ERR_MASK (FZC_PIO + 0x00090UL)
+#define SYS_ERR_MASK_META2 0x0000000000000400ULL
+#define SYS_ERR_MASK_META1 0x0000000000000200ULL
+#define SYS_ERR_MASK_PEU 0x0000000000000100ULL
+#define SYS_ERR_MASK_TXC 0x0000000000000080ULL
+#define SYS_ERR_MASK_RDMC 0x0000000000000040ULL
+#define SYS_ERR_MASK_TDMC 0x0000000000000020ULL
+#define SYS_ERR_MASK_ZCP 0x0000000000000010ULL
+#define SYS_ERR_MASK_FFLP 0x0000000000000008ULL
+#define SYS_ERR_MASK_IPP 0x0000000000000004ULL
+#define SYS_ERR_MASK_MAC 0x0000000000000002ULL
+#define SYS_ERR_MASK_SMX 0x0000000000000001ULL
+
+#define SYS_ERR_STAT (FZC_PIO + 0x00098UL)
+#define SYS_ERR_STAT_META2 0x0000000000000400ULL
+#define SYS_ERR_STAT_META1 0x0000000000000200ULL
+#define SYS_ERR_STAT_PEU 0x0000000000000100ULL
+#define SYS_ERR_STAT_TXC 0x0000000000000080ULL
+#define SYS_ERR_STAT_RDMC 0x0000000000000040ULL
+#define SYS_ERR_STAT_TDMC 0x0000000000000020ULL
+#define SYS_ERR_STAT_ZCP 0x0000000000000010ULL
+#define SYS_ERR_STAT_FFLP 0x0000000000000008ULL
+#define SYS_ERR_STAT_IPP 0x0000000000000004ULL
+#define SYS_ERR_STAT_MAC 0x0000000000000002ULL
+#define SYS_ERR_STAT_SMX 0x0000000000000001ULL
+
+#define SID(LDG) (FZC_PIO + 0x10200UL + (LDG) * 8UL)
+#define SID_FUNC 0x0000000000000060ULL
+#define SID_FUNC_SHIFT 5
+#define SID_VECTOR 0x000000000000001fULL
+#define SID_VECTOR_SHIFT 0
+
+#define LDG_NUM(LDN) (FZC_PIO + 0x20000UL + (LDN) * 8UL)
+
+#define XMAC_PORT0_OFF (FZC_MAC + 0x000000)
+#define XMAC_PORT1_OFF (FZC_MAC + 0x006000)
+#define BMAC_PORT2_OFF (FZC_MAC + 0x00c000)
+#define BMAC_PORT3_OFF (FZC_MAC + 0x010000)
+
+/* XMAC registers, offset from np->mac_regs */
+
+#define XTXMAC_SW_RST 0x00000UL
+#define XTXMAC_SW_RST_REG_RS 0x0000000000000002ULL
+#define XTXMAC_SW_RST_SOFT_RST 0x0000000000000001ULL
+
+#define XRXMAC_SW_RST 0x00008UL
+#define XRXMAC_SW_RST_REG_RS 0x0000000000000002ULL
+#define XRXMAC_SW_RST_SOFT_RST 0x0000000000000001ULL
+
+#define XTXMAC_STATUS 0x00020UL
+#define XTXMAC_STATUS_FRAME_CNT_EXP 0x0000000000000800ULL
+#define XTXMAC_STATUS_BYTE_CNT_EXP 0x0000000000000400ULL
+#define XTXMAC_STATUS_TXFIFO_XFR_ERR 0x0000000000000010ULL
+#define XTXMAC_STATUS_TXMAC_OFLOW 0x0000000000000008ULL
+#define XTXMAC_STATUS_MAX_PSIZE_ERR 0x0000000000000004ULL
+#define XTXMAC_STATUS_TXMAC_UFLOW 0x0000000000000002ULL
+#define XTXMAC_STATUS_FRAME_XMITED 0x0000000000000001ULL
+
+#define XRXMAC_STATUS 0x00028UL
+#define XRXMAC_STATUS_RXHIST7_CNT_EXP 0x0000000000100000ULL
+#define XRXMAC_STATUS_LCL_FLT_STATUS 0x0000000000080000ULL
+#define XRXMAC_STATUS_RFLT_DET 0x0000000000040000ULL
+#define XRXMAC_STATUS_LFLT_CNT_EXP 0x0000000000020000ULL
+#define XRXMAC_STATUS_PHY_MDINT 0x0000000000010000ULL
+#define XRXMAC_STATUS_ALIGNERR_CNT_EXP 0x0000000000010000ULL
+#define XRXMAC_STATUS_RXFRAG_CNT_EXP 0x0000000000008000ULL
+#define XRXMAC_STATUS_RXMULTF_CNT_EXP 0x0000000000004000ULL
+#define XRXMAC_STATUS_RXBCAST_CNT_EXP 0x0000000000002000ULL
+#define XRXMAC_STATUS_RXHIST6_CNT_EXP 0x0000000000001000ULL
+#define XRXMAC_STATUS_RXHIST5_CNT_EXP 0x0000000000000800ULL
+#define XRXMAC_STATUS_RXHIST4_CNT_EXP 0x0000000000000400ULL
+#define XRXMAC_STATUS_RXHIST3_CNT_EXP 0x0000000000000200ULL
+#define XRXMAC_STATUS_RXHIST2_CNT_EXP 0x0000000000000100ULL
+#define XRXMAC_STATUS_RXHIST1_CNT_EXP 0x0000000000000080ULL
+#define XRXMAC_STATUS_RXOCTET_CNT_EXP 0x0000000000000040ULL
+#define XRXMAC_STATUS_CVIOLERR_CNT_EXP 0x0000000000000020ULL
+#define XRXMAC_STATUS_LENERR_CNT_EXP 0x0000000000000010ULL
+#define XRXMAC_STATUS_CRCERR_CNT_EXP 0x0000000000000008ULL
+#define XRXMAC_STATUS_RXUFLOW 0x0000000000000004ULL
+#define XRXMAC_STATUS_RXOFLOW 0x0000000000000002ULL
+#define XRXMAC_STATUS_FRAME_RCVD 0x0000000000000001ULL
+
+#define XMAC_FC_STAT 0x00030UL
+#define XMAC_FC_STAT_RX_RCV_PAUSE_TIME 0x00000000ffff0000ULL
+#define XMAC_FC_STAT_TX_MAC_NPAUSE 0x0000000000000004ULL
+#define XMAC_FC_STAT_TX_MAC_PAUSE 0x0000000000000002ULL
+#define XMAC_FC_STAT_RX_MAC_RPAUSE 0x0000000000000001ULL
+
+#define XTXMAC_STAT_MSK 0x00040UL
+#define XTXMAC_STAT_MSK_FRAME_CNT_EXP 0x0000000000000800ULL
+#define XTXMAC_STAT_MSK_BYTE_CNT_EXP 0x0000000000000400ULL
+#define XTXMAC_STAT_MSK_TXFIFO_XFR_ERR 0x0000000000000010ULL
+#define XTXMAC_STAT_MSK_TXMAC_OFLOW 0x0000000000000008ULL
+#define XTXMAC_STAT_MSK_MAX_PSIZE_ERR 0x0000000000000004ULL
+#define XTXMAC_STAT_MSK_TXMAC_UFLOW 0x0000000000000002ULL
+#define XTXMAC_STAT_MSK_FRAME_XMITED 0x0000000000000001ULL
+
+#define XRXMAC_STAT_MSK 0x00048UL
+#define XRXMAC_STAT_MSK_LCL_FLT_STAT_MSK 0x0000000000080000ULL
+#define XRXMAC_STAT_MSK_RFLT_DET 0x0000000000040000ULL
+#define XRXMAC_STAT_MSK_LFLT_CNT_EXP 0x0000000000020000ULL
+#define XRXMAC_STAT_MSK_PHY_MDINT 0x0000000000010000ULL
+#define XRXMAC_STAT_MSK_RXFRAG_CNT_EXP 0x0000000000008000ULL
+#define XRXMAC_STAT_MSK_RXMULTF_CNT_EXP 0x0000000000004000ULL
+#define XRXMAC_STAT_MSK_RXBCAST_CNT_EXP 0x0000000000002000ULL
+#define XRXMAC_STAT_MSK_RXHIST6_CNT_EXP 0x0000000000001000ULL
+#define XRXMAC_STAT_MSK_RXHIST5_CNT_EXP 0x0000000000000800ULL
+#define XRXMAC_STAT_MSK_RXHIST4_CNT_EXP 0x0000000000000400ULL
+#define XRXMAC_STAT_MSK_RXHIST3_CNT_EXP 0x0000000000000200ULL
+#define XRXMAC_STAT_MSK_RXHIST2_CNT_EXP 0x0000000000000100ULL
+#define XRXMAC_STAT_MSK_RXHIST1_CNT_EXP 0x0000000000000080ULL
+#define XRXMAC_STAT_MSK_RXOCTET_CNT_EXP 0x0000000000000040ULL
+#define XRXMAC_STAT_MSK_CVIOLERR_CNT_EXP 0x0000000000000020ULL
+#define XRXMAC_STAT_MSK_LENERR_CNT_EXP 0x0000000000000010ULL
+#define XRXMAC_STAT_MSK_CRCERR_CNT_EXP 0x0000000000000008ULL
+#define XRXMAC_STAT_MSK_RXUFLOW_CNT_EXP 0x0000000000000004ULL
+#define XRXMAC_STAT_MSK_RXOFLOW_CNT_EXP 0x0000000000000002ULL
+#define XRXMAC_STAT_MSK_FRAME_RCVD 0x0000000000000001ULL
+
+#define XMAC_FC_MSK 0x00050UL
+#define XMAC_FC_MSK_TX_MAC_NPAUSE 0x0000000000000004ULL
+#define XMAC_FC_MSK_TX_MAC_PAUSE 0x0000000000000002ULL
+#define XMAC_FC_MSK_RX_MAC_RPAUSE 0x0000000000000001ULL
+
+#define XMAC_CONFIG 0x00060UL
+#define XMAC_CONFIG_SEL_CLK_25MHZ 0x0000000080000000ULL
+#define XMAC_CONFIG_1G_PCS_BYPASS 0x0000000040000000ULL
+#define XMAC_CONFIG_10G_XPCS_BYPASS 0x0000000020000000ULL
+#define XMAC_CONFIG_MODE_MASK 0x0000000018000000ULL
+#define XMAC_CONFIG_MODE_XGMII 0x0000000000000000ULL
+#define XMAC_CONFIG_MODE_GMII 0x0000000008000000ULL
+#define XMAC_CONFIG_MODE_MII 0x0000000010000000ULL
+#define XMAC_CONFIG_LFS_DISABLE 0x0000000004000000ULL
+#define XMAC_CONFIG_LOOPBACK 0x0000000002000000ULL
+#define XMAC_CONFIG_TX_OUTPUT_EN 0x0000000001000000ULL
+#define XMAC_CONFIG_SEL_POR_CLK_SRC 0x0000000000800000ULL
+#define XMAC_CONFIG_LED_POLARITY 0x0000000000400000ULL
+#define XMAC_CONFIG_FORCE_LED_ON 0x0000000000200000ULL
+#define XMAC_CONFIG_PASS_FLOW_CTRL 0x0000000000100000ULL
+#define XMAC_CONFIG_RCV_PAUSE_ENABLE 0x0000000000080000ULL
+#define XMAC_CONFIG_MAC2IPP_PKT_CNT_EN 0x0000000000040000ULL
+#define XMAC_CONFIG_STRIP_CRC 0x0000000000020000ULL
+#define XMAC_CONFIG_ADDR_FILTER_EN 0x0000000000010000ULL
+#define XMAC_CONFIG_HASH_FILTER_EN 0x0000000000008000ULL
+#define XMAC_CONFIG_RX_CODEV_CHK_DIS 0x0000000000004000ULL
+#define XMAC_CONFIG_RESERVED_MULTICAST 0x0000000000002000ULL
+#define XMAC_CONFIG_RX_CRC_CHK_DIS 0x0000000000001000ULL
+#define XMAC_CONFIG_ERR_CHK_DIS 0x0000000000000800ULL
+#define XMAC_CONFIG_PROMISC_GROUP 0x0000000000000400ULL
+#define XMAC_CONFIG_PROMISCUOUS 0x0000000000000200ULL
+#define XMAC_CONFIG_RX_MAC_ENABLE 0x0000000000000100ULL
+#define XMAC_CONFIG_WARNING_MSG_EN 0x0000000000000080ULL
+#define XMAC_CONFIG_ALWAYS_NO_CRC 0x0000000000000008ULL
+#define XMAC_CONFIG_VAR_MIN_IPG_EN 0x0000000000000004ULL
+#define XMAC_CONFIG_STRETCH_MODE 0x0000000000000002ULL
+#define XMAC_CONFIG_TX_ENABLE 0x0000000000000001ULL
+
+#define XMAC_IPG 0x00080UL
+#define XMAC_IPG_STRETCH_CONST 0x0000000000e00000ULL
+#define XMAC_IPG_STRETCH_CONST_SHIFT 21
+#define XMAC_IPG_STRETCH_RATIO 0x00000000001f0000ULL
+#define XMAC_IPG_STRETCH_RATIO_SHIFT 16
+#define XMAC_IPG_IPG_MII_GMII 0x000000000000ff00ULL
+#define XMAC_IPG_IPG_MII_GMII_SHIFT 8
+#define XMAC_IPG_IPG_XGMII 0x0000000000000007ULL
+#define XMAC_IPG_IPG_XGMII_SHIFT 0
+
+#define IPG_12_15_XGMII 3
+#define IPG_16_19_XGMII 4
+#define IPG_20_23_XGMII 5
+#define IPG_12_MII_GMII 10
+#define IPG_13_MII_GMII 11
+#define IPG_14_MII_GMII 12
+#define IPG_15_MII_GMII 13
+#define IPG_16_MII_GMII 14
+
+#define XMAC_MIN 0x00088UL
+#define XMAC_MIN_RX_MIN_PKT_SIZE 0x000000003ff00000ULL
+#define XMAC_MIN_RX_MIN_PKT_SIZE_SHFT 20
+#define XMAC_MIN_SLOT_TIME 0x000000000003fc00ULL
+#define XMAC_MIN_SLOT_TIME_SHFT 10
+#define XMAC_MIN_TX_MIN_PKT_SIZE 0x00000000000003ffULL
+#define XMAC_MIN_TX_MIN_PKT_SIZE_SHFT 0
+
+#define XMAC_MAX 0x00090UL
+#define XMAC_MAX_FRAME_SIZE 0x0000000000003fffULL
+#define XMAC_MAX_FRAME_SIZE_SHFT 0
+
+#define XMAC_ADDR0 0x000a0UL
+#define XMAC_ADDR0_ADDR0 0x000000000000ffffULL
+
+#define XMAC_ADDR1 0x000a8UL
+#define XMAC_ADDR1_ADDR1 0x000000000000ffffULL
+
+#define XMAC_ADDR2 0x000b0UL
+#define XMAC_ADDR2_ADDR2 0x000000000000ffffULL
+
+#define XMAC_ADDR_CMPEN 0x00208UL
+#define XMAC_ADDR_CMPEN_EN15 0x0000000000008000ULL
+#define XMAC_ADDR_CMPEN_EN14 0x0000000000004000ULL
+#define XMAC_ADDR_CMPEN_EN13 0x0000000000002000ULL
+#define XMAC_ADDR_CMPEN_EN12 0x0000000000001000ULL
+#define XMAC_ADDR_CMPEN_EN11 0x0000000000000800ULL
+#define XMAC_ADDR_CMPEN_EN10 0x0000000000000400ULL
+#define XMAC_ADDR_CMPEN_EN9 0x0000000000000200ULL
+#define XMAC_ADDR_CMPEN_EN8 0x0000000000000100ULL
+#define XMAC_ADDR_CMPEN_EN7 0x0000000000000080ULL
+#define XMAC_ADDR_CMPEN_EN6 0x0000000000000040ULL
+#define XMAC_ADDR_CMPEN_EN5 0x0000000000000020ULL
+#define XMAC_ADDR_CMPEN_EN4 0x0000000000000010ULL
+#define XMAC_ADDR_CMPEN_EN3 0x0000000000000008ULL
+#define XMAC_ADDR_CMPEN_EN2 0x0000000000000004ULL
+#define XMAC_ADDR_CMPEN_EN1 0x0000000000000002ULL
+#define XMAC_ADDR_CMPEN_EN0 0x0000000000000001ULL
+
+#define XMAC_NUM_ALT_ADDR 16
+
+#define XMAC_ALT_ADDR0(NUM) (0x00218UL + (NUM)*0x18UL)
+#define XMAC_ALT_ADDR0_ADDR0 0x000000000000ffffULL
+
+#define XMAC_ALT_ADDR1(NUM) (0x00220UL + (NUM)*0x18UL)
+#define XMAC_ALT_ADDR1_ADDR1 0x000000000000ffffULL
+
+#define XMAC_ALT_ADDR2(NUM) (0x00228UL + (NUM)*0x18UL)
+#define XMAC_ALT_ADDR2_ADDR2 0x000000000000ffffULL
+
+#define XMAC_ADD_FILT0 0x00818UL
+#define XMAC_ADD_FILT0_FILT0 0x000000000000ffffULL
+
+#define XMAC_ADD_FILT1 0x00820UL
+#define XMAC_ADD_FILT1_FILT1 0x000000000000ffffULL
+
+#define XMAC_ADD_FILT2 0x00828UL
+#define XMAC_ADD_FILT2_FILT2 0x000000000000ffffULL
+
+#define XMAC_ADD_FILT12_MASK 0x00830UL
+#define XMAC_ADD_FILT12_MASK_VAL 0x00000000000000ffULL
+
+#define XMAC_ADD_FILT00_MASK 0x00838UL
+#define XMAC_ADD_FILT00_MASK_VAL 0x000000000000ffffULL
+
+#define XMAC_HASH_TBL(NUM) (0x00840UL + (NUM) * 0x8UL)
+#define XMAC_HASH_TBL_VAL 0x000000000000ffffULL
+
+#define XMAC_NUM_HOST_INFO 20
+
+#define XMAC_HOST_INFO(NUM) (0x00900UL + (NUM) * 0x8UL)
+
+#define XMAC_PA_DATA0 0x00b80UL
+#define XMAC_PA_DATA0_VAL 0x00000000ffffffffULL
+
+#define XMAC_PA_DATA1 0x00b88UL
+#define XMAC_PA_DATA1_VAL 0x00000000ffffffffULL
+
+#define XMAC_DEBUG_SEL 0x00b90UL
+#define XMAC_DEBUG_SEL_XMAC 0x0000000000000078ULL
+#define XMAC_DEBUG_SEL_MAC 0x0000000000000007ULL
+
+#define XMAC_TRAIN_VEC 0x00b98UL
+#define XMAC_TRAIN_VEC_VAL 0x00000000ffffffffULL
+
+#define RXMAC_BT_CNT 0x00100UL
+#define RXMAC_BT_CNT_COUNT 0x00000000ffffffffULL
+
+#define RXMAC_BC_FRM_CNT 0x00108UL
+#define RXMAC_BC_FRM_CNT_COUNT 0x00000000001fffffULL
+
+#define RXMAC_MC_FRM_CNT 0x00110UL
+#define RXMAC_MC_FRM_CNT_COUNT 0x00000000001fffffULL
+
+#define RXMAC_FRAG_CNT 0x00118UL
+#define RXMAC_FRAG_CNT_COUNT 0x00000000001fffffULL
+
+#define RXMAC_HIST_CNT1 0x00120UL
+#define RXMAC_HIST_CNT1_COUNT 0x00000000001fffffULL
+
+#define RXMAC_HIST_CNT2 0x00128UL
+#define RXMAC_HIST_CNT2_COUNT 0x00000000001fffffULL
+
+#define RXMAC_HIST_CNT3 0x00130UL
+#define RXMAC_HIST_CNT3_COUNT 0x00000000000fffffULL
+
+#define RXMAC_HIST_CNT4 0x00138UL
+#define RXMAC_HIST_CNT4_COUNT 0x000000000007ffffULL
+
+#define RXMAC_HIST_CNT5 0x00140UL
+#define RXMAC_HIST_CNT5_COUNT 0x000000000003ffffULL
+
+#define RXMAC_HIST_CNT6 0x00148UL
+#define RXMAC_HIST_CNT6_COUNT 0x000000000000ffffULL
+
+#define RXMAC_MPSZER_CNT 0x00150UL
+#define RXMAC_MPSZER_CNT_COUNT 0x00000000000000ffULL
+
+#define RXMAC_CRC_ER_CNT 0x00158UL
+#define RXMAC_CRC_ER_CNT_COUNT 0x00000000000000ffULL
+
+#define RXMAC_CD_VIO_CNT 0x00160UL
+#define RXMAC_CD_VIO_CNT_COUNT 0x00000000000000ffULL
+
+#define RXMAC_ALIGN_ERR_CNT 0x00168UL
+#define RXMAC_ALIGN_ERR_CNT_COUNT 0x00000000000000ffULL
+
+#define TXMAC_FRM_CNT 0x00170UL
+#define TXMAC_FRM_CNT_COUNT 0x00000000ffffffffULL
+
+#define TXMAC_BYTE_CNT 0x00178UL
+#define TXMAC_BYTE_CNT_COUNT 0x00000000ffffffffULL
+
+#define LINK_FAULT_CNT 0x00180UL
+#define LINK_FAULT_CNT_COUNT 0x00000000000000ffULL
+
+#define RXMAC_HIST_CNT7 0x00188UL
+#define RXMAC_HIST_CNT7_COUNT 0x0000000007ffffffULL
+
+#define XMAC_SM_REG 0x001a8UL
+#define XMAC_SM_REG_STATE 0x00000000ffffffffULL
+
+#define XMAC_INTER1 0x001b0UL
+#define XMAC_INTERN1_SIGNALS1 0x00000000ffffffffULL
+
+#define XMAC_INTER2 0x001b8UL
+#define XMAC_INTERN2_SIGNALS2 0x00000000ffffffffULL
+
+/* BMAC registers, offset from np->mac_regs */
+
+#define BTXMAC_SW_RST 0x00000UL
+#define BTXMAC_SW_RST_RESET 0x0000000000000001ULL
+
+#define BRXMAC_SW_RST 0x00008UL
+#define BRXMAC_SW_RST_RESET 0x0000000000000001ULL
+
+#define BMAC_SEND_PAUSE 0x00010UL
+#define BMAC_SEND_PAUSE_SEND 0x0000000000010000ULL
+#define BMAC_SEND_PAUSE_TIME 0x000000000000ffffULL
+
+#define BTXMAC_STATUS 0x00020UL
+#define BTXMAC_STATUS_XMIT 0x0000000000000001ULL
+#define BTXMAC_STATUS_UNDERRUN 0x0000000000000002ULL
+#define BTXMAC_STATUS_MAX_PKT_ERR 0x0000000000000004ULL
+#define BTXMAC_STATUS_BYTE_CNT_EXP 0x0000000000000400ULL
+#define BTXMAC_STATUS_FRAME_CNT_EXP 0x0000000000000800ULL
+
+#define BRXMAC_STATUS 0x00028UL
+#define BRXMAC_STATUS_RX_PKT 0x0000000000000001ULL
+#define BRXMAC_STATUS_OVERFLOW 0x0000000000000002ULL
+#define BRXMAC_STATUS_FRAME_CNT_EXP 0x0000000000000004ULL
+#define BRXMAC_STATUS_ALIGN_ERR_EXP 0x0000000000000008ULL
+#define BRXMAC_STATUS_CRC_ERR_EXP 0x0000000000000010ULL
+#define BRXMAC_STATUS_LEN_ERR_EXP 0x0000000000000020ULL
+
+#define BMAC_CTRL_STATUS 0x00030UL
+#define BMAC_CTRL_STATUS_PAUSE_RECV 0x0000000000000001ULL
+#define BMAC_CTRL_STATUS_PAUSE 0x0000000000000002ULL
+#define BMAC_CTRL_STATUS_NOPAUSE 0x0000000000000004ULL
+#define BMAC_CTRL_STATUS_TIME 0x00000000ffff0000ULL
+#define BMAC_CTRL_STATUS_TIME_SHIFT 16
+
+#define BTXMAC_STATUS_MASK 0x00040UL
+#define BRXMAC_STATUS_MASK 0x00048UL
+#define BMAC_CTRL_STATUS_MASK 0x00050UL
+
+#define BTXMAC_CONFIG 0x00060UL
+#define BTXMAC_CONFIG_ENABLE 0x0000000000000001ULL
+#define BTXMAC_CONFIG_FCS_DISABLE 0x0000000000000002ULL
+
+#define BRXMAC_CONFIG 0x00068UL
+#define BRXMAC_CONFIG_DISCARD_DIS 0x0000000000000080ULL
+#define BRXMAC_CONFIG_ADDR_FILT_EN 0x0000000000000040ULL
+#define BRXMAC_CONFIG_HASH_FILT_EN 0x0000000000000020ULL
+#define BRXMAC_CONFIG_PROMISC_GRP 0x0000000000000010ULL
+#define BRXMAC_CONFIG_PROMISC 0x0000000000000008ULL
+#define BRXMAC_CONFIG_STRIP_FCS 0x0000000000000004ULL
+#define BRXMAC_CONFIG_STRIP_PAD 0x0000000000000002ULL
+#define BRXMAC_CONFIG_ENABLE 0x0000000000000001ULL
+
+#define BMAC_CTRL_CONFIG 0x00070UL
+#define BMAC_CTRL_CONFIG_TX_PAUSE_EN 0x0000000000000001ULL
+#define BMAC_CTRL_CONFIG_RX_PAUSE_EN 0x0000000000000002ULL
+#define BMAC_CTRL_CONFIG_PASS_CTRL 0x0000000000000004ULL
+
+#define BMAC_XIF_CONFIG 0x00078UL
+#define BMAC_XIF_CONFIG_TX_OUTPUT_EN 0x0000000000000001ULL
+#define BMAC_XIF_CONFIG_MII_LOOPBACK 0x0000000000000002ULL
+#define BMAC_XIF_CONFIG_GMII_MODE 0x0000000000000008ULL
+#define BMAC_XIF_CONFIG_LINK_LED 0x0000000000000020ULL
+#define BMAC_XIF_CONFIG_LED_POLARITY 0x0000000000000040ULL
+#define BMAC_XIF_CONFIG_25MHZ_CLOCK 0x0000000000000080ULL
+
+#define BMAC_MIN_FRAME 0x000a0UL
+#define BMAC_MIN_FRAME_VAL 0x00000000000003ffULL
+
+#define BMAC_MAX_FRAME 0x000a8UL
+#define BMAC_MAX_FRAME_MAX_BURST 0x000000003fff0000ULL
+#define BMAC_MAX_FRAME_MAX_BURST_SHIFT 16
+#define BMAC_MAX_FRAME_MAX_FRAME 0x0000000000003fffULL
+#define BMAC_MAX_FRAME_MAX_FRAME_SHIFT 0
+
+#define BMAC_PREAMBLE_SIZE 0x000b0UL
+#define BMAC_PREAMBLE_SIZE_VAL 0x00000000000003ffULL
+
+#define BMAC_CTRL_TYPE 0x000c8UL
+
+#define BMAC_ADDR0 0x00100UL
+#define BMAC_ADDR0_ADDR0 0x000000000000ffffULL
+
+#define BMAC_ADDR1 0x00108UL
+#define BMAC_ADDR1_ADDR1 0x000000000000ffffULL
+
+#define BMAC_ADDR2 0x00110UL
+#define BMAC_ADDR2_ADDR2 0x000000000000ffffULL
+
+#define BMAC_NUM_ALT_ADDR 6
+
+#define BMAC_ALT_ADDR0(NUM) (0x00118UL + (NUM)*0x18UL)
+#define BMAC_ALT_ADDR0_ADDR0 0x000000000000ffffULL
+
+#define BMAC_ALT_ADDR1(NUM) (0x00120UL + (NUM)*0x18UL)
+#define BMAC_ALT_ADDR1_ADDR1 0x000000000000ffffULL
+
+#define BMAC_ALT_ADDR2(NUM) (0x00128UL + (NUM)*0x18UL)
+#define BMAC_ALT_ADDR2_ADDR2 0x000000000000ffffULL
+
+#define BMAC_FC_ADDR0 0x00268UL
+#define BMAC_FC_ADDR0_ADDR0 0x000000000000ffffULL
+
+#define BMAC_FC_ADDR1 0x00270UL
+#define BMAC_FC_ADDR1_ADDR1 0x000000000000ffffULL
+
+#define BMAC_FC_ADDR2 0x00278UL
+#define BMAC_FC_ADDR2_ADDR2 0x000000000000ffffULL
+
+#define BMAC_ADD_FILT0 0x00298UL
+#define BMAC_ADD_FILT0_FILT0 0x000000000000ffffULL
+
+#define BMAC_ADD_FILT1 0x002a0UL
+#define BMAC_ADD_FILT1_FILT1 0x000000000000ffffULL
+
+#define BMAC_ADD_FILT2 0x002a8UL
+#define BMAC_ADD_FILT2_FILT2 0x000000000000ffffULL
+
+#define BMAC_ADD_FILT12_MASK 0x002b0UL
+#define BMAC_ADD_FILT12_MASK_VAL 0x00000000000000ffULL
+
+#define BMAC_ADD_FILT00_MASK 0x002b8UL
+#define BMAC_ADD_FILT00_MASK_VAL 0x000000000000ffffULL
+
+#define BMAC_HASH_TBL(NUM) (0x002c0UL + (NUM) * 0x8UL)
+#define BMAC_HASH_TBL_VAL 0x000000000000ffffULL
+
+#define BRXMAC_FRAME_CNT 0x00370
+#define BRXMAC_FRAME_CNT_COUNT 0x000000000000ffffULL
+
+#define BRXMAC_MAX_LEN_ERR_CNT 0x00378
+
+#define BRXMAC_ALIGN_ERR_CNT 0x00380
+#define BRXMAC_ALIGN_ERR_CNT_COUNT 0x000000000000ffffULL
+
+#define BRXMAC_CRC_ERR_CNT 0x00388
+#define BRXMAC_ALIGN_ERR_CNT_COUNT 0x000000000000ffffULL
+
+#define BRXMAC_CODE_VIOL_ERR_CNT 0x00390
+#define BRXMAC_CODE_VIOL_ERR_CNT_COUNT 0x000000000000ffffULL
+
+#define BMAC_STATE_MACHINE 0x003a0
+
+#define BMAC_ADDR_CMPEN 0x003f8UL
+#define BMAC_ADDR_CMPEN_EN15 0x0000000000008000ULL
+#define BMAC_ADDR_CMPEN_EN14 0x0000000000004000ULL
+#define BMAC_ADDR_CMPEN_EN13 0x0000000000002000ULL
+#define BMAC_ADDR_CMPEN_EN12 0x0000000000001000ULL
+#define BMAC_ADDR_CMPEN_EN11 0x0000000000000800ULL
+#define BMAC_ADDR_CMPEN_EN10 0x0000000000000400ULL
+#define BMAC_ADDR_CMPEN_EN9 0x0000000000000200ULL
+#define BMAC_ADDR_CMPEN_EN8 0x0000000000000100ULL
+#define BMAC_ADDR_CMPEN_EN7 0x0000000000000080ULL
+#define BMAC_ADDR_CMPEN_EN6 0x0000000000000040ULL
+#define BMAC_ADDR_CMPEN_EN5 0x0000000000000020ULL
+#define BMAC_ADDR_CMPEN_EN4 0x0000000000000010ULL
+#define BMAC_ADDR_CMPEN_EN3 0x0000000000000008ULL
+#define BMAC_ADDR_CMPEN_EN2 0x0000000000000004ULL
+#define BMAC_ADDR_CMPEN_EN1 0x0000000000000002ULL
+#define BMAC_ADDR_CMPEN_EN0 0x0000000000000001ULL
+
+#define BMAC_NUM_HOST_INFO 9
+
+#define BMAC_HOST_INFO(NUM) (0x00400UL + (NUM) * 0x8UL)
+
+#define BTXMAC_BYTE_CNT 0x00448UL
+#define BTXMAC_BYTE_CNT_COUNT 0x00000000ffffffffULL
+
+#define BTXMAC_FRM_CNT 0x00450UL
+#define BTXMAC_FRM_CNT_COUNT 0x00000000ffffffffULL
+
+#define BRXMAC_BYTE_CNT 0x00458UL
+#define BRXMAC_BYTE_CNT_COUNT 0x00000000ffffffffULL
+
+#define HOST_INFO_MPR 0x0000000000000100ULL
+#define HOST_INFO_MACRDCTBLN 0x0000000000000007ULL
+
+/* XPCS registers, offset from np->regs + np->xpcs_off */
+
+#define XPCS_CONTROL1 (FZC_MAC + 0x00000UL)
+#define XPCS_CONTROL1_RESET 0x0000000000008000ULL
+#define XPCS_CONTROL1_LOOPBACK 0x0000000000004000ULL
+#define XPCS_CONTROL1_SPEED_SELECT3 0x0000000000002000ULL
+#define XPCS_CONTROL1_CSR_LOW_PWR 0x0000000000000800ULL
+#define XPCS_CONTROL1_CSR_SPEED1 0x0000000000000040ULL
+#define XPCS_CONTROL1_CSR_SPEED0 0x000000000000003cULL
+
+#define XPCS_STATUS1 (FZC_MAC + 0x00008UL)
+#define XPCS_STATUS1_CSR_FAULT 0x0000000000000080ULL
+#define XPCS_STATUS1_CSR_RXLNK_STAT 0x0000000000000004ULL
+#define XPCS_STATUS1_CSR_LPWR_ABLE 0x0000000000000002ULL
+
+#define XPCS_DEVICE_IDENTIFIER (FZC_MAC + 0x00010UL)
+#define XPCS_DEVICE_IDENTIFIER_VAL 0x00000000ffffffffULL
+
+#define XPCS_SPEED_ABILITY (FZC_MAC + 0x00018UL)
+#define XPCS_SPEED_ABILITY_10GIG 0x0000000000000001ULL
+
+#define XPCS_DEV_IN_PKG (FZC_MAC + 0x00020UL)
+#define XPCS_DEV_IN_PKG_CSR_VEND2 0x0000000080000000ULL
+#define XPCS_DEV_IN_PKG_CSR_VEND1 0x0000000040000000ULL
+#define XPCS_DEV_IN_PKG_DTE_XS 0x0000000000000020ULL
+#define XPCS_DEV_IN_PKG_PHY_XS 0x0000000000000010ULL
+#define XPCS_DEV_IN_PKG_PCS 0x0000000000000008ULL
+#define XPCS_DEV_IN_PKG_WIS 0x0000000000000004ULL
+#define XPCS_DEV_IN_PKG_PMD_PMA 0x0000000000000002ULL
+#define XPCS_DEV_IN_PKG_CLS22 0x0000000000000001ULL
+
+#define XPCS_CONTROL2 (FZC_MAC + 0x00028UL)
+#define XPCS_CONTROL2_CSR_PSC_SEL 0x0000000000000003ULL
+
+#define XPCS_STATUS2 (FZC_MAC + 0x00030UL)
+#define XPCS_STATUS2_CSR_DEV_PRES 0x000000000000c000ULL
+#define XPCS_STATUS2_CSR_TX_FAULT 0x0000000000000800ULL
+#define XPCS_STATUS2_CSR_RCV_FAULT 0x0000000000000400ULL
+#define XPCS_STATUS2_TEN_GBASE_W 0x0000000000000004ULL
+#define XPCS_STATUS2_TEN_GBASE_X 0x0000000000000002ULL
+#define XPCS_STATUS2_TEN_GBASE_R 0x0000000000000001ULL
+
+#define XPCS_PKG_ID (FZC_MAC + 0x00038UL)
+#define XPCS_PKG_ID_VAL 0x00000000ffffffffULL
+
+#define XPCS_STATUS(IDX) (FZC_MAC + 0x00040UL)
+#define XPCS_STATUS_CSR_LANE_ALIGN 0x0000000000001000ULL
+#define XPCS_STATUS_CSR_PATTEST_CAP 0x0000000000000800ULL
+#define XPCS_STATUS_CSR_LANE3_SYNC 0x0000000000000008ULL
+#define XPCS_STATUS_CSR_LANE2_SYNC 0x0000000000000004ULL
+#define XPCS_STATUS_CSR_LANE1_SYNC 0x0000000000000002ULL
+#define XPCS_STATUS_CSR_LANE0_SYNC 0x0000000000000001ULL
+
+#define XPCS_TEST_CONTROL (FZC_MAC + 0x00048UL)
+#define XPCS_TEST_CONTROL_TXTST_EN 0x0000000000000004ULL
+#define XPCS_TEST_CONTROL_TPAT_SEL 0x0000000000000003ULL
+
+#define XPCS_CFG_VENDOR1 (FZC_MAC + 0x00050UL)
+#define XPCS_CFG_VENDOR1_DBG_IOTST 0x0000000000000080ULL
+#define XPCS_CFG_VENDOR1_DBG_SEL 0x0000000000000078ULL
+#define XPCS_CFG_VENDOR1_BYPASS_DET 0x0000000000000004ULL
+#define XPCS_CFG_VENDOR1_TXBUF_EN 0x0000000000000002ULL
+#define XPCS_CFG_VENDOR1_XPCS_EN 0x0000000000000001ULL
+
+#define XPCS_DIAG_VENDOR2 (FZC_MAC + 0x00058UL)
+#define XPCS_DIAG_VENDOR2_SSM_LANE3 0x0000000001e00000ULL
+#define XPCS_DIAG_VENDOR2_SSM_LANE2 0x00000000001e0000ULL
+#define XPCS_DIAG_VENDOR2_SSM_LANE1 0x000000000001e000ULL
+#define XPCS_DIAG_VENDOR2_SSM_LANE0 0x0000000000001e00ULL
+#define XPCS_DIAG_VENDOR2_EBUF_SM 0x00000000000001feULL
+#define XPCS_DIAG_VENDOR2_RCV_SM 0x0000000000000001ULL
+
+#define XPCS_MASK1 (FZC_MAC + 0x00060UL)
+#define XPCS_MASK1_FAULT_MASK 0x0000000000000080ULL
+#define XPCS_MASK1_RXALIGN_STAT_MSK 0x0000000000000004ULL
+
+#define XPCS_PKT_COUNT (FZC_MAC + 0x00068UL)
+#define XPCS_PKT_COUNT_TX 0x00000000ffff0000ULL
+#define XPCS_PKT_COUNT_RX 0x000000000000ffffULL
+
+#define XPCS_TX_SM (FZC_MAC + 0x00070UL)
+#define XPCS_TX_SM_VAL 0x000000000000000fULL
+
+#define XPCS_DESKEW_ERR_CNT (FZC_MAC + 0x00078UL)
+#define XPCS_DESKEW_ERR_CNT_VAL 0x00000000000000ffULL
+
+#define XPCS_SYMERR_CNT01 (FZC_MAC + 0x00080UL)
+#define XPCS_SYMERR_CNT01_LANE1 0x00000000ffff0000ULL
+#define XPCS_SYMERR_CNT01_LANE0 0x000000000000ffffULL
+
+#define XPCS_SYMERR_CNT23 (FZC_MAC + 0x00088UL)
+#define XPCS_SYMERR_CNT23_LANE3 0x00000000ffff0000ULL
+#define XPCS_SYMERR_CNT23_LANE2 0x000000000000ffffULL
+
+#define XPCS_TRAINING_VECTOR (FZC_MAC + 0x00090UL)
+#define XPCS_TRAINING_VECTOR_VAL 0x00000000ffffffffULL
+
+/* PCS registers, offset from np->regs + np->pcs_off */
+
+#define PCS_MII_CTL (FZC_MAC + 0x00000UL)
+#define PCS_MII_CTL_RST 0x0000000000008000ULL
+#define PCS_MII_CTL_10_100_SPEED 0x0000000000002000ULL
+#define PCS_MII_AUTONEG_EN 0x0000000000001000ULL
+#define PCS_MII_PWR_DOWN 0x0000000000000800ULL
+#define PCS_MII_ISOLATE 0x0000000000000400ULL
+#define PCS_MII_AUTONEG_RESTART 0x0000000000000200ULL
+#define PCS_MII_DUPLEX 0x0000000000000100ULL
+#define PCS_MII_COLL_TEST 0x0000000000000080ULL
+#define PCS_MII_1000MB_SPEED 0x0000000000000040ULL
+
+#define PCS_MII_STAT (FZC_MAC + 0x00008UL)
+#define PCS_MII_STAT_EXT_STATUS 0x0000000000000100ULL
+#define PCS_MII_STAT_AUTONEG_DONE 0x0000000000000020ULL
+#define PCS_MII_STAT_REMOTE_FAULT 0x0000000000000010ULL
+#define PCS_MII_STAT_AUTONEG_ABLE 0x0000000000000008ULL
+#define PCS_MII_STAT_LINK_STATUS 0x0000000000000004ULL
+#define PCS_MII_STAT_JABBER_DET 0x0000000000000002ULL
+#define PCS_MII_STAT_EXT_CAP 0x0000000000000001ULL
+
+#define PCS_MII_ADV (FZC_MAC + 0x00010UL)
+#define PCS_MII_ADV_NEXT_PAGE 0x0000000000008000ULL
+#define PCS_MII_ADV_ACK 0x0000000000004000ULL
+#define PCS_MII_ADV_REMOTE_FAULT 0x0000000000003000ULL
+#define PCS_MII_ADV_ASM_DIR 0x0000000000000100ULL
+#define PCS_MII_ADV_PAUSE 0x0000000000000080ULL
+#define PCS_MII_ADV_HALF_DUPLEX 0x0000000000000040ULL
+#define PCS_MII_ADV_FULL_DUPLEX 0x0000000000000020ULL
+
+#define PCS_MII_PARTNER (FZC_MAC + 0x00018UL)
+#define PCS_MII_PARTNER_NEXT_PAGE 0x0000000000008000ULL
+#define PCS_MII_PARTNER_ACK 0x0000000000004000ULL
+#define PCS_MII_PARTNER_REMOTE_FAULT 0x0000000000002000ULL
+#define PCS_MII_PARTNER_PAUSE 0x0000000000000180ULL
+#define PCS_MII_PARTNER_HALF_DUPLEX 0x0000000000000040ULL
+#define PCS_MII_PARTNER_FULL_DUPLEX 0x0000000000000020ULL
+
+#define PCS_CONF (FZC_MAC + 0x00020UL)
+#define PCS_CONF_MASK 0x0000000000000040ULL
+#define PCS_CONF_10MS_TMR_OVERRIDE 0x0000000000000020ULL
+#define PCS_CONF_JITTER_STUDY 0x0000000000000018ULL
+#define PCS_CONF_SIGDET_ACTIVE_LOW 0x0000000000000004ULL
+#define PCS_CONF_SIGDET_OVERRIDE 0x0000000000000002ULL
+#define PCS_CONF_ENABLE 0x0000000000000001ULL
+
+#define PCS_STATE (FZC_MAC + 0x00028UL)
+#define PCS_STATE_D_PARTNER_FAIL 0x0000000020000000ULL
+#define PCS_STATE_D_WAIT_C_CODES_ACK 0x0000000010000000ULL
+#define PCS_STATE_D_SYNC_LOSS 0x0000000008000000ULL
+#define PCS_STATE_D_NO_GOOD_C_CODES 0x0000000004000000ULL
+#define PCS_STATE_D_SERDES 0x0000000002000000ULL
+#define PCS_STATE_D_BREAKLINK_C_CODES 0x0000000001000000ULL
+#define PCS_STATE_L_SIGDET 0x0000000000400000ULL
+#define PCS_STATE_L_SYNC_LOSS 0x0000000000200000ULL
+#define PCS_STATE_L_C_CODES 0x0000000000100000ULL
+#define PCS_STATE_LINK_CFG_STATE 0x000000000001e000ULL
+#define PCS_STATE_SEQ_DET_STATE 0x0000000000001800ULL
+#define PCS_STATE_WORD_SYNC_STATE 0x0000000000000700ULL
+#define PCS_STATE_NO_IDLE 0x000000000000000fULL
+
+#define PCS_INTERRUPT (FZC_MAC + 0x00030UL)
+#define PCS_INTERRUPT_LSTATUS 0x0000000000000004ULL
+
+#define PCS_DPATH_MODE (FZC_MAC + 0x000a0UL)
+#define PCS_DPATH_MODE_PCS 0x0000000000000000ULL
+#define PCS_DPATH_MODE_MII 0x0000000000000002ULL
+#define PCS_DPATH_MODE_LINKUP_F_ENAB 0x0000000000000001ULL
+
+#define PCS_PKT_CNT (FZC_MAC + 0x000c0UL)
+#define PCS_PKT_CNT_RX 0x0000000007ff0000ULL
+#define PCS_PKT_CNT_TX 0x00000000000007ffULL
+
+#define MIF_BB_MDC (FZC_MAC + 0x16000UL)
+#define MIF_BB_MDC_CLK 0x0000000000000001ULL
+
+#define MIF_BB_MDO (FZC_MAC + 0x16008UL)
+#define MIF_BB_MDO_DAT 0x0000000000000001ULL
+
+#define MIF_BB_MDO_EN (FZC_MAC + 0x16010UL)
+#define MIF_BB_MDO_EN_VAL 0x0000000000000001ULL
+
+#define MIF_FRAME_OUTPUT (FZC_MAC + 0x16018UL)
+#define MIF_FRAME_OUTPUT_ST 0x00000000c0000000ULL
+#define MIF_FRAME_OUTPUT_ST_SHIFT 30
+#define MIF_FRAME_OUTPUT_OP_ADDR 0x0000000000000000ULL
+#define MIF_FRAME_OUTPUT_OP_WRITE 0x0000000010000000ULL
+#define MIF_FRAME_OUTPUT_OP_READ_INC 0x0000000020000000ULL
+#define MIF_FRAME_OUTPUT_OP_READ 0x0000000030000000ULL
+#define MIF_FRAME_OUTPUT_OP_SHIFT 28
+#define MIF_FRAME_OUTPUT_PORT 0x000000000f800000ULL
+#define MIF_FRAME_OUTPUT_PORT_SHIFT 23
+#define MIF_FRAME_OUTPUT_REG 0x00000000007c0000ULL
+#define MIF_FRAME_OUTPUT_REG_SHIFT 18
+#define MIF_FRAME_OUTPUT_TA 0x0000000000030000ULL
+#define MIF_FRAME_OUTPUT_TA_SHIFT 16
+#define MIF_FRAME_OUTPUT_DATA 0x000000000000ffffULL
+#define MIF_FRAME_OUTPUT_DATA_SHIFT 0
+
+#define MDIO_ADDR_OP(port, dev, reg) \
+ ((0 << MIF_FRAME_OUTPUT_ST_SHIFT) | \
+ MIF_FRAME_OUTPUT_OP_ADDR | \
+ (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \
+ (dev << MIF_FRAME_OUTPUT_REG_SHIFT) | \
+ (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT) | \
+ (reg << MIF_FRAME_OUTPUT_DATA_SHIFT))
+
+#define MDIO_READ_OP(port, dev) \
+ ((0 << MIF_FRAME_OUTPUT_ST_SHIFT) | \
+ MIF_FRAME_OUTPUT_OP_READ | \
+ (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \
+ (dev << MIF_FRAME_OUTPUT_REG_SHIFT) | \
+ (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT))
+
+#define MDIO_WRITE_OP(port, dev, data) \
+ ((0 << MIF_FRAME_OUTPUT_ST_SHIFT) | \
+ MIF_FRAME_OUTPUT_OP_WRITE | \
+ (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \
+ (dev << MIF_FRAME_OUTPUT_REG_SHIFT) | \
+ (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT) | \
+ (data << MIF_FRAME_OUTPUT_DATA_SHIFT))
+
+#define MII_READ_OP(port, reg) \
+ ((1 << MIF_FRAME_OUTPUT_ST_SHIFT) | \
+ (2 << MIF_FRAME_OUTPUT_OP_SHIFT) | \
+ (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \
+ (reg << MIF_FRAME_OUTPUT_REG_SHIFT) | \
+ (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT))
+
+#define MII_WRITE_OP(port, reg, data) \
+ ((1 << MIF_FRAME_OUTPUT_ST_SHIFT) | \
+ (1 << MIF_FRAME_OUTPUT_OP_SHIFT) | \
+ (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \
+ (reg << MIF_FRAME_OUTPUT_REG_SHIFT) | \
+ (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT) | \
+ (data << MIF_FRAME_OUTPUT_DATA_SHIFT))
+
+#define MIF_CONFIG (FZC_MAC + 0x16020UL)
+#define MIF_CONFIG_ATCA_GE 0x0000000000010000ULL
+#define MIF_CONFIG_INDIRECT_MODE 0x0000000000008000ULL
+#define MIF_CONFIG_POLL_PRT_PHYADDR 0x0000000000003c00ULL
+#define MIF_CONFIG_POLL_DEV_REG_ADDR 0x00000000000003e0ULL
+#define MIF_CONFIG_BB_MODE 0x0000000000000010ULL
+#define MIF_CONFIG_POLL_EN 0x0000000000000008ULL
+#define MIF_CONFIG_BB_SER_SEL 0x0000000000000006ULL
+#define MIF_CONFIG_MANUAL_MODE 0x0000000000000001ULL
+
+#define MIF_POLL_STATUS (FZC_MAC + 0x16028UL)
+#define MIF_POLL_STATUS_DATA 0x00000000ffff0000ULL
+#define MIF_POLL_STATUS_STAT 0x000000000000ffffULL
+
+#define MIF_POLL_MASK (FZC_MAC + 0x16030UL)
+#define MIF_POLL_MASK_VAL 0x000000000000ffffULL
+
+#define MIF_SM (FZC_MAC + 0x16038UL)
+#define MIF_SM_PORT_ADDR 0x00000000001f0000ULL
+#define MIF_SM_MDI_1 0x0000000000004000ULL
+#define MIF_SM_MDI_0 0x0000000000002400ULL
+#define MIF_SM_MDCLK 0x0000000000001000ULL
+#define MIF_SM_MDO_EN 0x0000000000000800ULL
+#define MIF_SM_MDO 0x0000000000000400ULL
+#define MIF_SM_MDI 0x0000000000000200ULL
+#define MIF_SM_CTL 0x00000000000001c0ULL
+#define MIF_SM_EX 0x000000000000003fULL
+
+#define MIF_STATUS (FZC_MAC + 0x16040UL)
+#define MIF_STATUS_MDINT1 0x0000000000000020ULL
+#define MIF_STATUS_MDINT0 0x0000000000000010ULL
+
+#define MIF_MASK (FZC_MAC + 0x16048UL)
+#define MIF_MASK_MDINT1 0x0000000000000020ULL
+#define MIF_MASK_MDINT0 0x0000000000000010ULL
+#define MIF_MASK_PEU_ERR 0x0000000000000008ULL
+#define MIF_MASK_YC 0x0000000000000004ULL
+#define MIF_MASK_XGE_ERR0 0x0000000000000002ULL
+#define MIF_MASK_MIF_INIT_DONE 0x0000000000000001ULL
+
+#define ENET_SERDES_RESET (FZC_MAC + 0x14000UL)
+#define ENET_SERDES_RESET_1 0x0000000000000002ULL
+#define ENET_SERDES_RESET_0 0x0000000000000001ULL
+
+#define ENET_SERDES_CFG (FZC_MAC + 0x14008UL)
+#define ENET_SERDES_BE_LOOPBACK 0x0000000000000002ULL
+#define ENET_SERDES_CFG_FORCE_RDY 0x0000000000000001ULL
+
+#define ENET_SERDES_0_PLL_CFG (FZC_MAC + 0x14010UL)
+#define ENET_SERDES_PLL_FBDIV0 0x0000000000000001ULL
+#define ENET_SERDES_PLL_FBDIV1 0x0000000000000002ULL
+#define ENET_SERDES_PLL_FBDIV2 0x0000000000000004ULL
+#define ENET_SERDES_PLL_HRATE0 0x0000000000000008ULL
+#define ENET_SERDES_PLL_HRATE1 0x0000000000000010ULL
+#define ENET_SERDES_PLL_HRATE2 0x0000000000000020ULL
+#define ENET_SERDES_PLL_HRATE3 0x0000000000000040ULL
+
+#define ENET_SERDES_0_CTRL_CFG (FZC_MAC + 0x14018UL)
+#define ENET_SERDES_CTRL_SDET_0 0x0000000000000001ULL
+#define ENET_SERDES_CTRL_SDET_1 0x0000000000000002ULL
+#define ENET_SERDES_CTRL_SDET_2 0x0000000000000004ULL
+#define ENET_SERDES_CTRL_SDET_3 0x0000000000000008ULL
+#define ENET_SERDES_CTRL_EMPH_0 0x0000000000000070ULL
+#define ENET_SERDES_CTRL_EMPH_0_SHIFT 4
+#define ENET_SERDES_CTRL_EMPH_1 0x0000000000000380ULL
+#define ENET_SERDES_CTRL_EMPH_1_SHIFT 7
+#define ENET_SERDES_CTRL_EMPH_2 0x0000000000001c00ULL
+#define ENET_SERDES_CTRL_EMPH_2_SHIFT 10
+#define ENET_SERDES_CTRL_EMPH_3 0x000000000000e000ULL
+#define ENET_SERDES_CTRL_EMPH_3_SHIFT 13
+#define ENET_SERDES_CTRL_LADJ_0 0x0000000000070000ULL
+#define ENET_SERDES_CTRL_LADJ_0_SHIFT 16
+#define ENET_SERDES_CTRL_LADJ_1 0x0000000000380000ULL
+#define ENET_SERDES_CTRL_LADJ_1_SHIFT 19
+#define ENET_SERDES_CTRL_LADJ_2 0x0000000001c00000ULL
+#define ENET_SERDES_CTRL_LADJ_2_SHIFT 22
+#define ENET_SERDES_CTRL_LADJ_3 0x000000000e000000ULL
+#define ENET_SERDES_CTRL_LADJ_3_SHIFT 25
+#define ENET_SERDES_CTRL_RXITERM_0 0x0000000010000000ULL
+#define ENET_SERDES_CTRL_RXITERM_1 0x0000000020000000ULL
+#define ENET_SERDES_CTRL_RXITERM_2 0x0000000040000000ULL
+#define ENET_SERDES_CTRL_RXITERM_3 0x0000000080000000ULL
+
+#define ENET_SERDES_0_TEST_CFG (FZC_MAC + 0x14020UL)
+#define ENET_SERDES_TEST_MD_0 0x0000000000000003ULL
+#define ENET_SERDES_TEST_MD_0_SHIFT 0
+#define ENET_SERDES_TEST_MD_1 0x000000000000000cULL
+#define ENET_SERDES_TEST_MD_1_SHIFT 2
+#define ENET_SERDES_TEST_MD_2 0x0000000000000030ULL
+#define ENET_SERDES_TEST_MD_2_SHIFT 4
+#define ENET_SERDES_TEST_MD_3 0x00000000000000c0ULL
+#define ENET_SERDES_TEST_MD_3_SHIFT 6
+
+#define ENET_TEST_MD_NO_LOOPBACK 0x0
+#define ENET_TEST_MD_EWRAP 0x1
+#define ENET_TEST_MD_PAD_LOOPBACK 0x2
+#define ENET_TEST_MD_REV_LOOPBACK 0x3
+
+#define ENET_SERDES_1_PLL_CFG (FZC_MAC + 0x14028UL)
+#define ENET_SERDES_1_CTRL_CFG (FZC_MAC + 0x14030UL)
+#define ENET_SERDES_1_TEST_CFG (FZC_MAC + 0x14038UL)
+
+#define ENET_RGMII_CFG_REG (FZC_MAC + 0x14040UL)
+
+#define ESR_INT_SIGNALS (FZC_MAC + 0x14800UL)
+#define ESR_INT_SIGNALS_ALL 0x00000000ffffffffULL
+#define ESR_INT_SIGNALS_P0_BITS 0x0000000033e0000fULL
+#define ESR_INT_SIGNALS_P1_BITS 0x000000000c1f00f0ULL
+#define ESR_INT_SRDY0_P0 0x0000000020000000ULL
+#define ESR_INT_DET0_P0 0x0000000010000000ULL
+#define ESR_INT_SRDY0_P1 0x0000000008000000ULL
+#define ESR_INT_DET0_P1 0x0000000004000000ULL
+#define ESR_INT_XSRDY_P0 0x0000000002000000ULL
+#define ESR_INT_XDP_P0_CH3 0x0000000001000000ULL
+#define ESR_INT_XDP_P0_CH2 0x0000000000800000ULL
+#define ESR_INT_XDP_P0_CH1 0x0000000000400000ULL
+#define ESR_INT_XDP_P0_CH0 0x0000000000200000ULL
+#define ESR_INT_XSRDY_P1 0x0000000000100000ULL
+#define ESR_INT_XDP_P1_CH3 0x0000000000080000ULL
+#define ESR_INT_XDP_P1_CH2 0x0000000000040000ULL
+#define ESR_INT_XDP_P1_CH1 0x0000000000020000ULL
+#define ESR_INT_XDP_P1_CH0 0x0000000000010000ULL
+#define ESR_INT_SLOSS_P1_CH3 0x0000000000000080ULL
+#define ESR_INT_SLOSS_P1_CH2 0x0000000000000040ULL
+#define ESR_INT_SLOSS_P1_CH1 0x0000000000000020ULL
+#define ESR_INT_SLOSS_P1_CH0 0x0000000000000010ULL
+#define ESR_INT_SLOSS_P0_CH3 0x0000000000000008ULL
+#define ESR_INT_SLOSS_P0_CH2 0x0000000000000004ULL
+#define ESR_INT_SLOSS_P0_CH1 0x0000000000000002ULL
+#define ESR_INT_SLOSS_P0_CH0 0x0000000000000001ULL
+
+#define ESR_DEBUG_SEL (FZC_MAC + 0x14808UL)
+#define ESR_DEBUG_SEL_VAL 0x000000000000003fULL
+
+/* SerDes registers behind MIF */
+#define NIU_ESR_DEV_ADDR 0x1e
+#define ESR_BASE 0x0000
+
+#define ESR_RXTX_COMM_CTRL_L (ESR_BASE + 0x0000)
+#define ESR_RXTX_COMM_CTRL_H (ESR_BASE + 0x0001)
+
+#define ESR_RXTX_RESET_CTRL_L (ESR_BASE + 0x0002)
+#define ESR_RXTX_RESET_CTRL_H (ESR_BASE + 0x0003)
+
+#define ESR_RX_POWER_CTRL_L (ESR_BASE + 0x0004)
+#define ESR_RX_POWER_CTRL_H (ESR_BASE + 0x0005)
+
+#define ESR_TX_POWER_CTRL_L (ESR_BASE + 0x0006)
+#define ESR_TX_POWER_CTRL_H (ESR_BASE + 0x0007)
+
+#define ESR_MISC_POWER_CTRL_L (ESR_BASE + 0x0008)
+#define ESR_MISC_POWER_CTRL_H (ESR_BASE + 0x0009)
+
+#define ESR_RXTX_CTRL_L(CHAN) (ESR_BASE + 0x0080 + (CHAN) * 0x10)
+#define ESR_RXTX_CTRL_H(CHAN) (ESR_BASE + 0x0081 + (CHAN) * 0x10)
+#define ESR_RXTX_CTRL_BIASCNTL 0x80000000
+#define ESR_RXTX_CTRL_RESV1 0x7c000000
+#define ESR_RXTX_CTRL_TDENFIFO 0x02000000
+#define ESR_RXTX_CTRL_TDWS20 0x01000000
+#define ESR_RXTX_CTRL_VMUXLO 0x00c00000
+#define ESR_RXTX_CTRL_VMUXLO_SHIFT 22
+#define ESR_RXTX_CTRL_VPULSELO 0x00300000
+#define ESR_RXTX_CTRL_VPULSELO_SHIFT 20
+#define ESR_RXTX_CTRL_RESV2 0x000f0000
+#define ESR_RXTX_CTRL_RESV3 0x0000c000
+#define ESR_RXTX_CTRL_RXPRESWIN 0x00003000
+#define ESR_RXTX_CTRL_RXPRESWIN_SHIFT 12
+#define ESR_RXTX_CTRL_RESV4 0x00000800
+#define ESR_RXTX_CTRL_RISEFALL 0x00000700
+#define ESR_RXTX_CTRL_RISEFALL_SHIFT 8
+#define ESR_RXTX_CTRL_RESV5 0x000000fe
+#define ESR_RXTX_CTRL_ENSTRETCH 0x00000001
+
+#define ESR_RXTX_TUNING_L(CHAN) (ESR_BASE + 0x0082 + (CHAN) * 0x10)
+#define ESR_RXTX_TUNING_H(CHAN) (ESR_BASE + 0x0083 + (CHAN) * 0x10)
+
+#define ESR_RX_SYNCCHAR_L(CHAN) (ESR_BASE + 0x0084 + (CHAN) * 0x10)
+#define ESR_RX_SYNCCHAR_H(CHAN) (ESR_BASE + 0x0085 + (CHAN) * 0x10)
+
+#define ESR_RXTX_TEST_L(CHAN) (ESR_BASE + 0x0086 + (CHAN) * 0x10)
+#define ESR_RXTX_TEST_H(CHAN) (ESR_BASE + 0x0087 + (CHAN) * 0x10)
+
+#define ESR_GLUE_CTRL0_L(CHAN) (ESR_BASE + 0x0088 + (CHAN) * 0x10)
+#define ESR_GLUE_CTRL0_H(CHAN) (ESR_BASE + 0x0089 + (CHAN) * 0x10)
+#define ESR_GLUE_CTRL0_RESV1 0xf8000000
+#define ESR_GLUE_CTRL0_BLTIME 0x07000000
+#define ESR_GLUE_CTRL0_BLTIME_SHIFT 24
+#define ESR_GLUE_CTRL0_RESV2 0x00ff0000
+#define ESR_GLUE_CTRL0_RXLOS_TEST 0x00008000
+#define ESR_GLUE_CTRL0_RESV3 0x00004000
+#define ESR_GLUE_CTRL0_RXLOSENAB 0x00002000
+#define ESR_GLUE_CTRL0_FASTRESYNC 0x00001000
+#define ESR_GLUE_CTRL0_SRATE 0x00000f00
+#define ESR_GLUE_CTRL0_SRATE_SHIFT 8
+#define ESR_GLUE_CTRL0_THCNT 0x000000ff
+#define ESR_GLUE_CTRL0_THCNT_SHIFT 0
+
+#define BLTIME_64_CYCLES 0
+#define BLTIME_128_CYCLES 1
+#define BLTIME_256_CYCLES 2
+#define BLTIME_300_CYCLES 3
+#define BLTIME_384_CYCLES 4
+#define BLTIME_512_CYCLES 5
+#define BLTIME_1024_CYCLES 6
+#define BLTIME_2048_CYCLES 7
+
+#define ESR_GLUE_CTRL1_L(CHAN) (ESR_BASE + 0x008a + (CHAN) * 0x10)
+#define ESR_GLUE_CTRL1_H(CHAN) (ESR_BASE + 0x008b + (CHAN) * 0x10)
+#define ESR_RXTX_TUNING1_L(CHAN) (ESR_BASE + 0x00c2 + (CHAN) * 0x10)
+#define ESR_RXTX_TUNING1_H(CHAN) (ESR_BASE + 0x00c2 + (CHAN) * 0x10)
+#define ESR_RXTX_TUNING2_L(CHAN) (ESR_BASE + 0x0102 + (CHAN) * 0x10)
+#define ESR_RXTX_TUNING2_H(CHAN) (ESR_BASE + 0x0102 + (CHAN) * 0x10)
+#define ESR_RXTX_TUNING3_L(CHAN) (ESR_BASE + 0x0142 + (CHAN) * 0x10)
+#define ESR_RXTX_TUNING3_H(CHAN) (ESR_BASE + 0x0142 + (CHAN) * 0x10)
+
+#define NIU_ESR2_DEV_ADDR 0x1e
+#define ESR2_BASE 0x8000
+
+#define ESR2_TI_PLL_CFG_L (ESR2_BASE + 0x000)
+#define ESR2_TI_PLL_CFG_H (ESR2_BASE + 0x001)
+#define PLL_CFG_STD 0x00000c00
+#define PLL_CFG_STD_SHIFT 10
+#define PLL_CFG_LD 0x00000300
+#define PLL_CFG_LD_SHIFT 8
+#define PLL_CFG_MPY 0x0000001e
+#define PLL_CFG_MPY_SHIFT 1
+#define PLL_CFG_MPY_4X 0x0
+#define PLL_CFG_MPY_5X 0x00000002
+#define PLL_CFG_MPY_6X 0x00000004
+#define PLL_CFG_MPY_8X 0x00000008
+#define PLL_CFG_MPY_10X 0x0000000a
+#define PLL_CFG_MPY_12X 0x0000000c
+#define PLL_CFG_MPY_12P5X 0x0000000e
+#define PLL_CFG_ENPLL 0x00000001
+
+#define ESR2_TI_PLL_STS_L (ESR2_BASE + 0x002)
+#define ESR2_TI_PLL_STS_H (ESR2_BASE + 0x003)
+#define PLL_STS_LOCK 0x00000001
+
+#define ESR2_TI_PLL_TEST_CFG_L (ESR2_BASE + 0x004)
+#define ESR2_TI_PLL_TEST_CFG_H (ESR2_BASE + 0x005)
+#define PLL_TEST_INVPATT 0x00004000
+#define PLL_TEST_RATE 0x00003000
+#define PLL_TEST_RATE_SHIFT 12
+#define PLL_TEST_CFG_ENBSAC 0x00000400
+#define PLL_TEST_CFG_ENBSRX 0x00000200
+#define PLL_TEST_CFG_ENBSTX 0x00000100
+#define PLL_TEST_CFG_LOOPBACK_PAD 0x00000040
+#define PLL_TEST_CFG_LOOPBACK_CML_DIS 0x00000080
+#define PLL_TEST_CFG_LOOPBACK_CML_EN 0x000000c0
+#define PLL_TEST_CFG_CLKBYP 0x00000030
+#define PLL_TEST_CFG_CLKBYP_SHIFT 4
+#define PLL_TEST_CFG_EN_RXPATT 0x00000008
+#define PLL_TEST_CFG_EN_TXPATT 0x00000004
+#define PLL_TEST_CFG_TPATT 0x00000003
+#define PLL_TEST_CFG_TPATT_SHIFT 0
+
+#define ESR2_TI_PLL_TX_CFG_L(CHAN) (ESR2_BASE + 0x100 + (CHAN) * 4)
+#define ESR2_TI_PLL_TX_CFG_H(CHAN) (ESR2_BASE + 0x101 + (CHAN) * 4)
+#define PLL_TX_CFG_RDTCT 0x00600000
+#define PLL_TX_CFG_RDTCT_SHIFT 21
+#define PLL_TX_CFG_ENIDL 0x00100000
+#define PLL_TX_CFG_BSTX 0x00020000
+#define PLL_TX_CFG_ENFTP 0x00010000
+#define PLL_TX_CFG_DE 0x0000f000
+#define PLL_TX_CFG_DE_SHIFT 12
+#define PLL_TX_CFG_SWING_125MV 0x00000000
+#define PLL_TX_CFG_SWING_250MV 0x00000200
+#define PLL_TX_CFG_SWING_500MV 0x00000400
+#define PLL_TX_CFG_SWING_625MV 0x00000600
+#define PLL_TX_CFG_SWING_750MV 0x00000800
+#define PLL_TX_CFG_SWING_1000MV 0x00000a00
+#define PLL_TX_CFG_SWING_1250MV 0x00000c00
+#define PLL_TX_CFG_SWING_1375MV 0x00000e00
+#define PLL_TX_CFG_CM 0x00000100
+#define PLL_TX_CFG_INVPAIR 0x00000080
+#define PLL_TX_CFG_RATE 0x00000060
+#define PLL_TX_CFG_RATE_SHIFT 5
+#define PLL_TX_CFG_RATE_FULL 0x0
+#define PLL_TX_CFG_RATE_HALF 0x20
+#define PLL_TX_CFG_RATE_QUAD 0x40
+#define PLL_TX_CFG_BUSWIDTH 0x0000001c
+#define PLL_TX_CFG_BUSWIDTH_SHIFT 2
+#define PLL_TX_CFG_ENTEST 0x00000002
+#define PLL_TX_CFG_ENTX 0x00000001
+
+#define ESR2_TI_PLL_TX_STS_L(CHAN) (ESR2_BASE + 0x102 + (CHAN) * 4)
+#define ESR2_TI_PLL_TX_STS_H(CHAN) (ESR2_BASE + 0x103 + (CHAN) * 4)
+#define PLL_TX_STS_RDTCTIP 0x00000002
+#define PLL_TX_STS_TESTFAIL 0x00000001
+
+#define ESR2_TI_PLL_RX_CFG_L(CHAN) (ESR2_BASE + 0x120 + (CHAN) * 4)
+#define ESR2_TI_PLL_RX_CFG_H(CHAN) (ESR2_BASE + 0x121 + (CHAN) * 4)
+#define PLL_RX_CFG_BSINRXN 0x02000000
+#define PLL_RX_CFG_BSINRXP 0x01000000
+#define PLL_RX_CFG_EQ_MAX_LF 0x00000000
+#define PLL_RX_CFG_EQ_LP_ADAPTIVE 0x00080000
+#define PLL_RX_CFG_EQ_LP_1084MHZ 0x00400000
+#define PLL_RX_CFG_EQ_LP_805MHZ 0x00480000
+#define PLL_RX_CFG_EQ_LP_573MHZ 0x00500000
+#define PLL_RX_CFG_EQ_LP_402MHZ 0x00580000
+#define PLL_RX_CFG_EQ_LP_304MHZ 0x00600000
+#define PLL_RX_CFG_EQ_LP_216MHZ 0x00680000
+#define PLL_RX_CFG_EQ_LP_156MHZ 0x00700000
+#define PLL_RX_CFG_EQ_LP_135MHZ 0x00780000
+#define PLL_RX_CFG_EQ_SHIFT 19
+#define PLL_RX_CFG_CDR 0x00070000
+#define PLL_RX_CFG_CDR_SHIFT 16
+#define PLL_RX_CFG_LOS_DIS 0x00000000
+#define PLL_RX_CFG_LOS_HTHRESH 0x00004000
+#define PLL_RX_CFG_LOS_LTHRESH 0x00008000
+#define PLL_RX_CFG_ALIGN_DIS 0x00000000
+#define PLL_RX_CFG_ALIGN_ENA 0x00001000
+#define PLL_RX_CFG_ALIGN_JOG 0x00002000
+#define PLL_RX_CFG_TERM_VDDT 0x00000000
+#define PLL_RX_CFG_TERM_0P8VDDT 0x00000100
+#define PLL_RX_CFG_TERM_FLOAT 0x00000300
+#define PLL_RX_CFG_INVPAIR 0x00000080
+#define PLL_RX_CFG_RATE 0x00000060
+#define PLL_RX_CFG_RATE_SHIFT 5
+#define PLL_RX_CFG_RATE_FULL 0x0
+#define PLL_RX_CFG_RATE_HALF 0x20
+#define PLL_RX_CFG_RATE_QUAD 0x40
+#define PLL_RX_CFG_BUSWIDTH 0x0000001c
+#define PLL_RX_CFG_BUSWIDTH_SHIFT 2
+#define PLL_RX_CFG_ENTEST 0x00000002
+#define PLL_RX_CFG_ENRX 0x00000001
+
+#define ESR2_TI_PLL_RX_STS_L(CHAN) (ESR2_BASE + 0x122 + (CHAN) * 4)
+#define ESR2_TI_PLL_RX_STS_H(CHAN) (ESR2_BASE + 0x123 + (CHAN) * 4)
+#define PLL_RX_STS_CRCIDTCT 0x00000200
+#define PLL_RX_STS_CWDTCT 0x00000100
+#define PLL_RX_STS_BSRXN 0x00000020
+#define PLL_RX_STS_BSRXP 0x00000010
+#define PLL_RX_STS_LOSDTCT 0x00000008
+#define PLL_RX_STS_ODDCG 0x00000004
+#define PLL_RX_STS_SYNC 0x00000002
+#define PLL_RX_STS_TESTFAIL 0x00000001
+
+#define ENET_VLAN_TBL(IDX) (FZC_FFLP + 0x00000UL + (IDX) * 8UL)
+#define ENET_VLAN_TBL_PARITY1 0x0000000000020000ULL
+#define ENET_VLAN_TBL_PARITY0 0x0000000000010000ULL
+#define ENET_VLAN_TBL_VPR 0x0000000000000008ULL
+#define ENET_VLAN_TBL_VLANRDCTBLN 0x0000000000000007ULL
+#define ENET_VLAN_TBL_SHIFT(PORT) ((PORT) * 4)
+
+#define ENET_VLAN_TBL_NUM_ENTRIES 4096
+
+#define FFLP_VLAN_PAR_ERR (FZC_FFLP + 0x0800UL)
+#define FFLP_VLAN_PAR_ERR_ERR 0x0000000080000000ULL
+#define FFLP_VLAN_PAR_ERR_M_ERR 0x0000000040000000ULL
+#define FFLP_VLAN_PAR_ERR_ADDR 0x000000003ffc0000ULL
+#define FFLP_VLAN_PAR_ERR_DATA 0x000000000003ffffULL
+
+#define L2_CLS(IDX) (FZC_FFLP + 0x20000UL + (IDX) * 8UL)
+#define L2_CLS_VLD 0x0000000000010000ULL
+#define L2_CLS_ETYPE 0x000000000000ffffULL
+#define L2_CLS_ETYPE_SHIFT 0
+
+#define L3_CLS(IDX) (FZC_FFLP + 0x20010UL + (IDX) * 8UL)
+#define L3_CLS_VALID 0x0000000002000000ULL
+#define L3_CLS_IPVER 0x0000000001000000ULL
+#define L3_CLS_PID 0x0000000000ff0000ULL
+#define L3_CLS_PID_SHIFT 16
+#define L3_CLS_TOSMASK 0x000000000000ff00ULL
+#define L3_CLS_TOSMASK_SHIFT 8
+#define L3_CLS_TOS 0x00000000000000ffULL
+#define L3_CLS_TOS_SHIFT 0
+
+#define TCAM_KEY(IDX) (FZC_FFLP + 0x20030UL + (IDX) * 8UL)
+#define TCAM_KEY_DISC 0x0000000000000008ULL
+#define TCAM_KEY_TSEL 0x0000000000000004ULL
+#define TCAM_KEY_IPADDR 0x0000000000000001ULL
+
+#define TCAM_KEY_0 (FZC_FFLP + 0x20090UL)
+#define TCAM_KEY_0_KEY 0x00000000000000ffULL /* bits 192-199 */
+
+#define TCAM_KEY_1 (FZC_FFLP + 0x20098UL)
+#define TCAM_KEY_1_KEY 0xffffffffffffffffULL /* bits 128-191 */
+
+#define TCAM_KEY_2 (FZC_FFLP + 0x200a0UL)
+#define TCAM_KEY_2_KEY 0xffffffffffffffffULL /* bits 64-127 */
+
+#define TCAM_KEY_3 (FZC_FFLP + 0x200a8UL)
+#define TCAM_KEY_3_KEY 0xffffffffffffffffULL /* bits 0-63 */
+
+#define TCAM_KEY_MASK_0 (FZC_FFLP + 0x200b0UL)
+#define TCAM_KEY_MASK_0_KEY_SEL 0x00000000000000ffULL /* bits 192-199 */
+
+#define TCAM_KEY_MASK_1 (FZC_FFLP + 0x200b8UL)
+#define TCAM_KEY_MASK_1_KEY_SEL 0xffffffffffffffffULL /* bits 128-191 */
+
+#define TCAM_KEY_MASK_2 (FZC_FFLP + 0x200c0UL)
+#define TCAM_KEY_MASK_2_KEY_SEL 0xffffffffffffffffULL /* bits 64-127 */
+
+#define TCAM_KEY_MASK_3 (FZC_FFLP + 0x200c8UL)
+#define TCAM_KEY_MASK_3_KEY_SEL 0xffffffffffffffffULL /* bits 0-63 */
+
+#define TCAM_CTL (FZC_FFLP + 0x200d0UL)
+#define TCAM_CTL_RWC 0x00000000001c0000ULL
+#define TCAM_CTL_RWC_TCAM_WRITE 0x0000000000000000ULL
+#define TCAM_CTL_RWC_TCAM_READ 0x0000000000040000ULL
+#define TCAM_CTL_RWC_TCAM_COMPARE 0x0000000000080000ULL
+#define TCAM_CTL_RWC_RAM_WRITE 0x0000000000100000ULL
+#define TCAM_CTL_RWC_RAM_READ 0x0000000000140000ULL
+#define TCAM_CTL_STAT 0x0000000000020000ULL
+#define TCAM_CTL_MATCH 0x0000000000010000ULL
+#define TCAM_CTL_LOC 0x00000000000003ffULL
+
+#define TCAM_ERR (FZC_FFLP + 0x200d8UL)
+#define TCAM_ERR_ERR 0x0000000080000000ULL
+#define TCAM_ERR_P_ECC 0x0000000040000000ULL
+#define TCAM_ERR_MULT 0x0000000020000000ULL
+#define TCAM_ERR_ADDR 0x0000000000ff0000ULL
+#define TCAM_ERR_SYNDROME 0x000000000000ffffULL
+
+#define HASH_LOOKUP_ERR_LOG1 (FZC_FFLP + 0x200e0UL)
+#define HASH_LOOKUP_ERR_LOG1_ERR 0x0000000000000008ULL
+#define HASH_LOOKUP_ERR_LOG1_MULT_LK 0x0000000000000004ULL
+#define HASH_LOOKUP_ERR_LOG1_CU 0x0000000000000002ULL
+#define HASH_LOOKUP_ERR_LOG1_MULT_BIT 0x0000000000000001ULL
+
+#define HASH_LOOKUP_ERR_LOG2 (FZC_FFLP + 0x200e8UL)
+#define HASH_LOOKUP_ERR_LOG2_H1 0x000000007ffff800ULL
+#define HASH_LOOKUP_ERR_LOG2_SUBAREA 0x0000000000000700ULL
+#define HASH_LOOKUP_ERR_LOG2_SYNDROME 0x00000000000000ffULL
+
+#define FFLP_CFG_1 (FZC_FFLP + 0x20100UL)
+#define FFLP_CFG_1_TCAM_DIS 0x0000000004000000ULL
+#define FFLP_CFG_1_PIO_DBG_SEL 0x0000000003800000ULL
+#define FFLP_CFG_1_PIO_FIO_RST 0x0000000000400000ULL
+#define FFLP_CFG_1_PIO_FIO_LAT 0x0000000000300000ULL
+#define FFLP_CFG_1_CAMLAT 0x00000000000f0000ULL
+#define FFLP_CFG_1_CAMLAT_SHIFT 16
+#define FFLP_CFG_1_CAMRATIO 0x000000000000f000ULL
+#define FFLP_CFG_1_CAMRATIO_SHIFT 12
+#define FFLP_CFG_1_FCRAMRATIO 0x0000000000000f00ULL
+#define FFLP_CFG_1_FCRAMRATIO_SHIFT 8
+#define FFLP_CFG_1_FCRAMOUTDR_MASK 0x00000000000000f0ULL
+#define FFLP_CFG_1_FCRAMOUTDR_NORMAL 0x0000000000000000ULL
+#define FFLP_CFG_1_FCRAMOUTDR_STRONG 0x0000000000000050ULL
+#define FFLP_CFG_1_FCRAMOUTDR_WEAK 0x00000000000000a0ULL
+#define FFLP_CFG_1_FCRAMQS 0x0000000000000008ULL
+#define FFLP_CFG_1_ERRORDIS 0x0000000000000004ULL
+#define FFLP_CFG_1_FFLPINITDONE 0x0000000000000002ULL
+#define FFLP_CFG_1_LLCSNAP 0x0000000000000001ULL
+
+#define DEFAULT_FCRAMRATIO 10
+
+#define DEFAULT_TCAM_LATENCY 4
+#define DEFAULT_TCAM_ACCESS_RATIO 10
+
+#define TCP_CFLAG_MSK (FZC_FFLP + 0x20108UL)
+#define TCP_CFLAG_MSK_MASK 0x0000000000000fffULL
+
+#define FCRAM_REF_TMR (FZC_FFLP + 0x20110UL)
+#define FCRAM_REF_TMR_MAX 0x00000000ffff0000ULL
+#define FCRAM_REF_TMR_MAX_SHIFT 16
+#define FCRAM_REF_TMR_MIN 0x000000000000ffffULL
+#define FCRAM_REF_TMR_MIN_SHIFT 0
+
+#define DEFAULT_FCRAM_REFRESH_MAX 512
+#define DEFAULT_FCRAM_REFRESH_MIN 512
+
+#define FCRAM_FIO_ADDR (FZC_FFLP + 0x20118UL)
+#define FCRAM_FIO_ADDR_ADDR 0x00000000000000ffULL
+
+#define FCRAM_FIO_DAT (FZC_FFLP + 0x20120UL)
+#define FCRAM_FIO_DAT_DATA 0x000000000000ffffULL
+
+#define FCRAM_ERR_TST0 (FZC_FFLP + 0x20128UL)
+#define FCRAM_ERR_TST0_SYND 0x00000000000000ffULL
+
+#define FCRAM_ERR_TST1 (FZC_FFLP + 0x20130UL)
+#define FCRAM_ERR_TST1_DAT 0x00000000ffffffffULL
+
+#define FCRAM_ERR_TST2 (FZC_FFLP + 0x20138UL)
+#define FCRAM_ERR_TST2_DAT 0x00000000ffffffffULL
+
+#define FFLP_ERR_MASK (FZC_FFLP + 0x20140UL)
+#define FFLP_ERR_MASK_HSH_TBL_DAT 0x00000000000007f8ULL
+#define FFLP_ERR_MASK_HSH_TBL_LKUP 0x0000000000000004ULL
+#define FFLP_ERR_MASK_TCAM 0x0000000000000002ULL
+#define FFLP_ERR_MASK_VLAN 0x0000000000000001ULL
+
+#define FFLP_DBG_TRAIN_VCT (FZC_FFLP + 0x20148UL)
+#define FFLP_DBG_TRAIN_VCT_VECTOR 0x00000000ffffffffULL
+
+#define FCRAM_PHY_RD_LAT (FZC_FFLP + 0x20150UL)
+#define FCRAM_PHY_RD_LAT_LAT 0x00000000000000ffULL
+
+/* Ethernet TCAM format */
+#define TCAM_ETHKEY0_RESV1 0xffffffffffffff00ULL
+#define TCAM_ETHKEY0_CLASS_CODE 0x00000000000000f8ULL
+#define TCAM_ETHKEY0_CLASS_CODE_SHIFT 3
+#define TCAM_ETHKEY0_RESV2 0x0000000000000007ULL
+#define TCAM_ETHKEY1_FRAME_BYTE0_7(NUM) (0xff << ((7 - NUM) * 8))
+#define TCAM_ETHKEY2_FRAME_BYTE8 0xff00000000000000ULL
+#define TCAM_ETHKEY2_FRAME_BYTE8_SHIFT 56
+#define TCAM_ETHKEY2_FRAME_BYTE9 0x00ff000000000000ULL
+#define TCAM_ETHKEY2_FRAME_BYTE9_SHIFT 48
+#define TCAM_ETHKEY2_FRAME_BYTE10 0x0000ff0000000000ULL
+#define TCAM_ETHKEY2_FRAME_BYTE10_SHIFT 40
+#define TCAM_ETHKEY2_FRAME_RESV 0x000000ffffffffffULL
+#define TCAM_ETHKEY3_FRAME_RESV 0xffffffffffffffffULL
+
+/* IPV4 TCAM format */
+#define TCAM_V4KEY0_RESV1 0xffffffffffffff00ULL
+#define TCAM_V4KEY0_CLASS_CODE 0x00000000000000f8ULL
+#define TCAM_V4KEY0_CLASS_CODE_SHIFT 3
+#define TCAM_V4KEY0_RESV2 0x0000000000000007ULL
+#define TCAM_V4KEY1_L2RDCNUM 0xf800000000000000ULL
+#define TCAM_V4KEY1_L2RDCNUM_SHIFT 59
+#define TCAM_V4KEY1_NOPORT 0x0400000000000000ULL
+#define TCAM_V4KEY1_RESV 0x03ffffffffffffffULL
+#define TCAM_V4KEY2_RESV 0xffff000000000000ULL
+#define TCAM_V4KEY2_TOS 0x0000ff0000000000ULL
+#define TCAM_V4KEY2_TOS_SHIFT 40
+#define TCAM_V4KEY2_PROTO 0x000000ff00000000ULL
+#define TCAM_V4KEY2_PROTO_SHIFT 32
+#define TCAM_V4KEY2_PORT_SPI 0x00000000ffffffffULL
+#define TCAM_V4KEY2_PORT_SPI_SHIFT 0
+#define TCAM_V4KEY3_SADDR 0xffffffff00000000ULL
+#define TCAM_V4KEY3_SADDR_SHIFT 32
+#define TCAM_V4KEY3_DADDR 0x00000000ffffffffULL
+#define TCAM_V4KEY3_DADDR_SHIFT 0
+
+/* IPV6 TCAM format */
+#define TCAM_V6KEY0_RESV1 0xffffffffffffff00ULL
+#define TCAM_V6KEY0_CLASS_CODE 0x00000000000000f8ULL
+#define TCAM_V6KEY0_CLASS_CODE_SHIFT 3
+#define TCAM_V6KEY0_RESV2 0x0000000000000007ULL
+#define TCAM_V6KEY1_L2RDCNUM 0xf800000000000000ULL
+#define TCAM_V6KEY1_L2RDCNUM_SHIFT 59
+#define TCAM_V6KEY1_NOPORT 0x0400000000000000ULL
+#define TCAM_V6KEY1_RESV 0x03ff000000000000ULL
+#define TCAM_V6KEY1_TOS 0x0000ff0000000000ULL
+#define TCAM_V6KEY1_TOS_SHIFT 40
+#define TCAM_V6KEY1_NEXT_HDR 0x000000ff00000000ULL
+#define TCAM_V6KEY1_NEXT_HDR_SHIFT 32
+#define TCAM_V6KEY1_PORT_SPI 0x00000000ffffffffULL
+#define TCAM_V6KEY1_PORT_SPI_SHIFT 0
+#define TCAM_V6KEY2_ADDR_HIGH 0xffffffffffffffffULL
+#define TCAM_V6KEY3_ADDR_LOW 0xffffffffffffffffULL
+
+#define TCAM_ASSOCDATA_SYNDROME 0x000003fffc000000ULL
+#define TCAM_ASSOCDATA_SYNDROME_SHIFT 26
+#define TCAM_ASSOCDATA_ZFID 0x0000000003ffc000ULL
+#define TCAM_ASSOCDATA_ZFID_SHIFT 14
+#define TCAM_ASSOCDATA_V4_ECC_OK 0x0000000000002000ULL
+#define TCAM_ASSOCDATA_DISC 0x0000000000001000ULL
+#define TCAM_ASSOCDATA_TRES_MASK 0x0000000000000c00ULL
+#define TCAM_ASSOCDATA_TRES_USE_L2RDC 0x0000000000000000ULL
+#define TCAM_ASSOCDATA_TRES_USE_OFFSET 0x0000000000000400ULL
+#define TCAM_ASSOCDATA_TRES_OVR_RDC 0x0000000000000800ULL
+#define TCAM_ASSOCDATA_TRES_OVR_RDC_OFF 0x0000000000000c00ULL
+#define TCAM_ASSOCDATA_RDCTBL 0x0000000000000380ULL
+#define TCAM_ASSOCDATA_RDCTBL_SHIFT 7
+#define TCAM_ASSOCDATA_OFFSET 0x000000000000007cULL
+#define TCAM_ASSOCDATA_OFFSET_SHIFT 2
+#define TCAM_ASSOCDATA_ZFVLD 0x0000000000000002ULL
+#define TCAM_ASSOCDATA_AGE 0x0000000000000001ULL
+
+#define FLOW_KEY(IDX) (FZC_FFLP + 0x40000UL + (IDX) * 8UL)
+#define FLOW_KEY_PORT 0x0000000000000200ULL
+#define FLOW_KEY_L2DA 0x0000000000000100ULL
+#define FLOW_KEY_VLAN 0x0000000000000080ULL
+#define FLOW_KEY_IPSA 0x0000000000000040ULL
+#define FLOW_KEY_IPDA 0x0000000000000020ULL
+#define FLOW_KEY_PROTO 0x0000000000000010ULL
+#define FLOW_KEY_L4_0 0x000000000000000cULL
+#define FLOW_KEY_L4_0_SHIFT 2
+#define FLOW_KEY_L4_1 0x0000000000000003ULL
+#define FLOW_KEY_L4_1_SHIFT 0
+
+#define FLOW_KEY_L4_NONE 0x0
+#define FLOW_KEY_L4_RESV 0x1
+#define FLOW_KEY_L4_BYTE12 0x2
+#define FLOW_KEY_L4_BYTE56 0x3
+
+#define H1POLY (FZC_FFLP + 0x40060UL)
+#define H1POLY_INITVAL 0x00000000ffffffffULL
+
+#define H2POLY (FZC_FFLP + 0x40068UL)
+#define H2POLY_INITVAL 0x000000000000ffffULL
+
+#define FLW_PRT_SEL(IDX) (FZC_FFLP + 0x40070UL + (IDX) * 8UL)
+#define FLW_PRT_SEL_EXT 0x0000000000010000ULL
+#define FLW_PRT_SEL_MASK 0x0000000000001f00ULL
+#define FLW_PRT_SEL_MASK_SHIFT 8
+#define FLW_PRT_SEL_BASE 0x000000000000001fULL
+#define FLW_PRT_SEL_BASE_SHIFT 0
+
+#define HASH_TBL_ADDR(IDX) (FFLP + 0x00000UL + (IDX) * 8192UL)
+#define HASH_TBL_ADDR_AUTOINC 0x0000000000800000ULL
+#define HASH_TBL_ADDR_ADDR 0x00000000007fffffULL
+
+#define HASH_TBL_DATA(IDX) (FFLP + 0x00008UL + (IDX) * 8192UL)
+#define HASH_TBL_DATA_DATA 0xffffffffffffffffULL
+
+/* FCRAM hash table entries are up to 8 64-bit words in size.
+ * The layout of each entry is determined by the settings in the
+ * first word, which is the header.
+ *
+ * The indexing is controllable per partition (there is one partition
+ * per RDC group, thus a total of eight) using the BASE and MASK fields
+ * of FLW_PRT_SEL above.
+ */
+#define FCRAM_SIZE 0x800000
+#define FCRAM_NUM_PARTITIONS 8
+
+/* Generic HASH entry header, used for all non-optimized formats. */
+#define HASH_HEADER_FMT 0x8000000000000000ULL
+#define HASH_HEADER_EXT 0x4000000000000000ULL
+#define HASH_HEADER_VALID 0x2000000000000000ULL
+#define HASH_HEADER_RESVD 0x1000000000000000ULL
+#define HASH_HEADER_L2_DADDR 0x0ffffffffffff000ULL
+#define HASH_HEADER_L2_DADDR_SHIFT 12
+#define HASH_HEADER_VLAN 0x0000000000000fffULL
+#define HASH_HEADER_VLAN_SHIFT 0
+
+/* Optimized format, just a header with a special layout defined below.
+ * Set FMT and EXT both to zero to indicate this layout is being used.
+ */
+#define HASH_OPT_HEADER_FMT 0x8000000000000000ULL
+#define HASH_OPT_HEADER_EXT 0x4000000000000000ULL
+#define HASH_OPT_HEADER_VALID 0x2000000000000000ULL
+#define HASH_OPT_HEADER_RDCOFF 0x1f00000000000000ULL
+#define HASH_OPT_HEADER_RDCOFF_SHIFT 56
+#define HASH_OPT_HEADER_HASH2 0x00ffff0000000000ULL
+#define HASH_OPT_HEADER_HASH2_SHIFT 40
+#define HASH_OPT_HEADER_RESVD 0x000000ff00000000ULL
+#define HASH_OPT_HEADER_USERINFO 0x00000000ffffffffULL
+#define HASH_OPT_HEADER_USERINFO_SHIFT 0
+
+/* Port and protocol word used for ipv4 and ipv6 layouts. */
+#define HASH_PORT_DPORT 0xffff000000000000ULL
+#define HASH_PORT_DPORT_SHIFT 48
+#define HASH_PORT_SPORT 0x0000ffff00000000ULL
+#define HASH_PORT_SPORT_SHIFT 32
+#define HASH_PORT_PROTO 0x00000000ff000000ULL
+#define HASH_PORT_PROTO_SHIFT 24
+#define HASH_PORT_PORT_OFF 0x0000000000c00000ULL
+#define HASH_PORT_PORT_OFF_SHIFT 22
+#define HASH_PORT_PORT_RESV 0x00000000003fffffULL
+
+/* Action word used for ipv4 and ipv6 layouts. */
+#define HASH_ACTION_RESV1 0xe000000000000000ULL
+#define HASH_ACTION_RDCOFF 0x1f00000000000000ULL
+#define HASH_ACTION_RDCOFF_SHIFT 56
+#define HASH_ACTION_ZFVALID 0x0080000000000000ULL
+#define HASH_ACTION_RESV2 0x0070000000000000ULL
+#define HASH_ACTION_ZFID 0x000fff0000000000ULL
+#define HASH_ACTION_ZFID_SHIFT 40
+#define HASH_ACTION_RESV3 0x000000ff00000000ULL
+#define HASH_ACTION_USERINFO 0x00000000ffffffffULL
+#define HASH_ACTION_USERINFO_SHIFT 0
+
+/* IPV4 address word. Addresses are in network endian. */
+#define HASH_IP4ADDR_SADDR 0xffffffff00000000ULL
+#define HASH_IP4ADDR_SADDR_SHIFT 32
+#define HASH_IP4ADDR_DADDR 0x00000000ffffffffULL
+#define HASH_IP4ADDR_DADDR_SHIFT 0
+
+/* IPV6 address layout is 4 words, first two are saddr, next two
+ * are daddr. Addresses are in network endian.
+ */
+
+struct fcram_hash_opt {
+ u64 header;
+};
+
+/* EXT=1, FMT=0 */
+struct fcram_hash_ipv4 {
+ u64 header;
+ u64 addrs;
+ u64 ports;
+ u64 action;
+};
+
+/* EXT=1, FMT=1 */
+struct fcram_hash_ipv6 {
+ u64 header;
+ u64 addrs[4];
+ u64 ports;
+ u64 action;
+};
+
+#define HASH_TBL_DATA_LOG(IDX) (FFLP + 0x00010UL + (IDX) * 8192UL)
+#define HASH_TBL_DATA_LOG_ERR 0x0000000080000000ULL
+#define HASH_TBL_DATA_LOG_ADDR 0x000000007fffff00ULL
+#define HASH_TBL_DATA_LOG_SYNDROME 0x00000000000000ffULL
+
+#define RX_DMA_CK_DIV (FZC_DMC + 0x00000UL)
+#define RX_DMA_CK_DIV_CNT 0x000000000000ffffULL
+
+#define DEF_RDC(IDX) (FZC_DMC + 0x00008UL + (IDX) * 0x8UL)
+#define DEF_RDC_VAL 0x000000000000001fULL
+
+#define PT_DRR_WT(IDX) (FZC_DMC + 0x00028UL + (IDX) * 0x8UL)
+#define PT_DRR_WT_VAL 0x000000000000ffffULL
+
+#define PT_DRR_WEIGHT_DEFAULT_10G 0x0400
+#define PT_DRR_WEIGHT_DEFAULT_1G 0x0066
+
+#define PT_USE(IDX) (FZC_DMC + 0x00048UL + (IDX) * 0x8UL)
+#define PT_USE_CNT 0x00000000000fffffULL
+
+#define RED_RAN_INIT (FZC_DMC + 0x00068UL)
+#define RED_RAN_INIT_OPMODE 0x0000000000010000ULL
+#define RED_RAN_INIT_VAL 0x000000000000ffffULL
+
+#define RX_ADDR_MD (FZC_DMC + 0x00070UL)
+#define RX_ADDR_MD_DBG_PT_MUX_SEL 0x000000000000000cULL
+#define RX_ADDR_MD_RAM_ACC 0x0000000000000002ULL
+#define RX_ADDR_MD_MODE32 0x0000000000000001ULL
+
+#define RDMC_PRE_PAR_ERR (FZC_DMC + 0x00078UL)
+#define RDMC_PRE_PAR_ERR_ERR 0x0000000000008000ULL
+#define RDMC_PRE_PAR_ERR_MERR 0x0000000000004000ULL
+#define RDMC_PRE_PAR_ERR_ADDR 0x00000000000000ffULL
+
+#define RDMC_SHA_PAR_ERR (FZC_DMC + 0x00080UL)
+#define RDMC_SHA_PAR_ERR_ERR 0x0000000000008000ULL
+#define RDMC_SHA_PAR_ERR_MERR 0x0000000000004000ULL
+#define RDMC_SHA_PAR_ERR_ADDR 0x00000000000000ffULL
+
+#define RDMC_MEM_ADDR (FZC_DMC + 0x00088UL)
+#define RDMC_MEM_ADDR_PRE_SHAD 0x0000000000000100ULL
+#define RDMC_MEM_ADDR_ADDR 0x00000000000000ffULL
+
+#define RDMC_MEM_DAT0 (FZC_DMC + 0x00090UL)
+#define RDMC_MEM_DAT0_DATA 0x00000000ffffffffULL /* bits 31:0 */
+
+#define RDMC_MEM_DAT1 (FZC_DMC + 0x00098UL)
+#define RDMC_MEM_DAT1_DATA 0x00000000ffffffffULL /* bits 63:32 */
+
+#define RDMC_MEM_DAT2 (FZC_DMC + 0x000a0UL)
+#define RDMC_MEM_DAT2_DATA 0x00000000ffffffffULL /* bits 95:64 */
+
+#define RDMC_MEM_DAT3 (FZC_DMC + 0x000a8UL)
+#define RDMC_MEM_DAT3_DATA 0x00000000ffffffffULL /* bits 127:96 */
+
+#define RDMC_MEM_DAT4 (FZC_DMC + 0x000b0UL)
+#define RDMC_MEM_DAT4_DATA 0x00000000000fffffULL /* bits 147:128 */
+
+#define RX_CTL_DAT_FIFO_STAT (FZC_DMC + 0x000b8UL)
+#define RX_CTL_DAT_FIFO_STAT_ID_MISMATCH 0x0000000000000100ULL
+#define RX_CTL_DAT_FIFO_STAT_ZCP_EOP_ERR 0x00000000000000f0ULL
+#define RX_CTL_DAT_FIFO_STAT_IPP_EOP_ERR 0x000000000000000fULL
+
+#define RX_CTL_DAT_FIFO_MASK (FZC_DMC + 0x000c0UL)
+#define RX_CTL_DAT_FIFO_MASK_ID_MISMATCH 0x0000000000000100ULL
+#define RX_CTL_DAT_FIFO_MASK_ZCP_EOP_ERR 0x00000000000000f0ULL
+#define RX_CTL_DAT_FIFO_MASK_IPP_EOP_ERR 0x000000000000000fULL
+
+#define RDMC_TRAINING_VECTOR (FZC_DMC + 0x000c8UL)
+#define RDMC_TRAINING_VECTOR_TRAINING_VECTOR 0x00000000ffffffffULL
+
+#define RX_CTL_DAT_FIFO_STAT_DBG (FZC_DMC + 0x000d0UL)
+#define RX_CTL_DAT_FIFO_STAT_DBG_ID_MISMATCH 0x0000000000000100ULL
+#define RX_CTL_DAT_FIFO_STAT_DBG_ZCP_EOP_ERR 0x00000000000000f0ULL
+#define RX_CTL_DAT_FIFO_STAT_DBG_IPP_EOP_ERR 0x000000000000000fULL
+
+#define RDC_TBL(TBL,SLOT) (FZC_ZCP + 0x10000UL + \
+ (TBL) * (8UL * 16UL) + \
+ (SLOT) * 8UL)
+#define RDC_TBL_RDC 0x000000000000000fULL
+
+#define RX_LOG_PAGE_VLD(IDX) (FZC_DMC + 0x20000UL + (IDX) * 0x40UL)
+#define RX_LOG_PAGE_VLD_FUNC 0x000000000000000cULL
+#define RX_LOG_PAGE_VLD_FUNC_SHIFT 2
+#define RX_LOG_PAGE_VLD_PAGE1 0x0000000000000002ULL
+#define RX_LOG_PAGE_VLD_PAGE0 0x0000000000000001ULL
+
+#define RX_LOG_MASK1(IDX) (FZC_DMC + 0x20008UL + (IDX) * 0x40UL)
+#define RX_LOG_MASK1_MASK 0x00000000ffffffffULL
+
+#define RX_LOG_VAL1(IDX) (FZC_DMC + 0x20010UL + (IDX) * 0x40UL)
+#define RX_LOG_VAL1_VALUE 0x00000000ffffffffULL
+
+#define RX_LOG_MASK2(IDX) (FZC_DMC + 0x20018UL + (IDX) * 0x40UL)
+#define RX_LOG_MASK2_MASK 0x00000000ffffffffULL
+
+#define RX_LOG_VAL2(IDX) (FZC_DMC + 0x20020UL + (IDX) * 0x40UL)
+#define RX_LOG_VAL2_VALUE 0x00000000ffffffffULL
+
+#define RX_LOG_PAGE_RELO1(IDX) (FZC_DMC + 0x20028UL + (IDX) * 0x40UL)
+#define RX_LOG_PAGE_RELO1_RELO 0x00000000ffffffffULL
+
+#define RX_LOG_PAGE_RELO2(IDX) (FZC_DMC + 0x20030UL + (IDX) * 0x40UL)
+#define RX_LOG_PAGE_RELO2_RELO 0x00000000ffffffffULL
+
+#define RX_LOG_PAGE_HDL(IDX) (FZC_DMC + 0x20038UL + (IDX) * 0x40UL)
+#define RX_LOG_PAGE_HDL_HANDLE 0x00000000000fffffULL
+
+#define TX_LOG_PAGE_VLD(IDX) (FZC_DMC + 0x40000UL + (IDX) * 0x200UL)
+#define TX_LOG_PAGE_VLD_FUNC 0x000000000000000cULL
+#define TX_LOG_PAGE_VLD_FUNC_SHIFT 2
+#define TX_LOG_PAGE_VLD_PAGE1 0x0000000000000002ULL
+#define TX_LOG_PAGE_VLD_PAGE0 0x0000000000000001ULL
+
+#define TX_LOG_MASK1(IDX) (FZC_DMC + 0x40008UL + (IDX) * 0x200UL)
+#define TX_LOG_MASK1_MASK 0x00000000ffffffffULL
+
+#define TX_LOG_VAL1(IDX) (FZC_DMC + 0x40010UL + (IDX) * 0x200UL)
+#define TX_LOG_VAL1_VALUE 0x00000000ffffffffULL
+
+#define TX_LOG_MASK2(IDX) (FZC_DMC + 0x40018UL + (IDX) * 0x200UL)
+#define TX_LOG_MASK2_MASK 0x00000000ffffffffULL
+
+#define TX_LOG_VAL2(IDX) (FZC_DMC + 0x40020UL + (IDX) * 0x200UL)
+#define TX_LOG_VAL2_VALUE 0x00000000ffffffffULL
+
+#define TX_LOG_PAGE_RELO1(IDX) (FZC_DMC + 0x40028UL + (IDX) * 0x200UL)
+#define TX_LOG_PAGE_RELO1_RELO 0x00000000ffffffffULL
+
+#define TX_LOG_PAGE_RELO2(IDX) (FZC_DMC + 0x40030UL + (IDX) * 0x200UL)
+#define TX_LOG_PAGE_RELO2_RELO 0x00000000ffffffffULL
+
+#define TX_LOG_PAGE_HDL(IDX) (FZC_DMC + 0x40038UL + (IDX) * 0x200UL)
+#define TX_LOG_PAGE_HDL_HANDLE 0x00000000000fffffULL
+
+#define TX_ADDR_MD (FZC_DMC + 0x45000UL)
+#define TX_ADDR_MD_MODE32 0x0000000000000001ULL
+
+#define RDC_RED_PARA(IDX) (FZC_DMC + 0x30000UL + (IDX) * 0x40UL)
+#define RDC_RED_PARA_THRE_SYN 0x00000000fff00000ULL
+#define RDC_RED_PARA_THRE_SYN_SHIFT 20
+#define RDC_RED_PARA_WIN_SYN 0x00000000000f0000ULL
+#define RDC_RED_PARA_WIN_SYN_SHIFT 16
+#define RDC_RED_PARA_THRE 0x000000000000fff0ULL
+#define RDC_RED_PARA_THRE_SHIFT 4
+#define RDC_RED_PARA_WIN 0x000000000000000fULL
+#define RDC_RED_PARA_WIN_SHIFT 0
+
+#define RED_DIS_CNT(IDX) (FZC_DMC + 0x30008UL + (IDX) * 0x40UL)
+#define RED_DIS_CNT_OFLOW 0x0000000000010000ULL
+#define RED_DIS_CNT_COUNT 0x000000000000ffffULL
+
+#define IPP_CFIG (FZC_IPP + 0x00000UL)
+#define IPP_CFIG_SOFT_RST 0x0000000080000000ULL
+#define IPP_CFIG_IP_MAX_PKT 0x0000000001ffff00ULL
+#define IPP_CFIG_IP_MAX_PKT_SHIFT 8
+#define IPP_CFIG_FFLP_CS_PIO_W 0x0000000000000080ULL
+#define IPP_CFIG_PFIFO_PIO_W 0x0000000000000040ULL
+#define IPP_CFIG_DFIFO_PIO_W 0x0000000000000020ULL
+#define IPP_CFIG_CKSUM_EN 0x0000000000000010ULL
+#define IPP_CFIG_DROP_BAD_CRC 0x0000000000000008ULL
+#define IPP_CFIG_DFIFO_ECC_EN 0x0000000000000004ULL
+#define IPP_CFIG_DEBUG_BUS_OUT_EN 0x0000000000000002ULL
+#define IPP_CFIG_IPP_ENABLE 0x0000000000000001ULL
+
+#define IPP_PKT_DIS (FZC_IPP + 0x00020UL)
+#define IPP_PKT_DIS_COUNT 0x0000000000003fffULL
+
+#define IPP_BAD_CS_CNT (FZC_IPP + 0x00028UL)
+#define IPP_BAD_CS_CNT_COUNT 0x0000000000003fffULL
+
+#define IPP_ECC (FZC_IPP + 0x00030UL)
+#define IPP_ECC_COUNT 0x00000000000000ffULL
+
+#define IPP_INT_STAT (FZC_IPP + 0x00040UL)
+#define IPP_INT_STAT_SOP_MISS 0x0000000080000000ULL
+#define IPP_INT_STAT_EOP_MISS 0x0000000040000000ULL
+#define IPP_INT_STAT_DFIFO_UE 0x0000000030000000ULL
+#define IPP_INT_STAT_DFIFO_CE 0x000000000c000000ULL
+#define IPP_INT_STAT_DFIFO_ECC 0x0000000003000000ULL
+#define IPP_INT_STAT_DFIFO_ECC_IDX 0x00000000007ff000ULL
+#define IPP_INT_STAT_PFIFO_PERR 0x0000000000000800ULL
+#define IPP_INT_STAT_ECC_ERR_MAX 0x0000000000000400ULL
+#define IPP_INT_STAT_PFIFO_ERR_IDX 0x00000000000003f0ULL
+#define IPP_INT_STAT_PFIFO_OVER 0x0000000000000008ULL
+#define IPP_INT_STAT_PFIFO_UND 0x0000000000000004ULL
+#define IPP_INT_STAT_BAD_CS_MX 0x0000000000000002ULL
+#define IPP_INT_STAT_PKT_DIS_MX 0x0000000000000001ULL
+#define IPP_INT_STAT_ALL 0x00000000ff7fffffULL
+
+#define IPP_MSK (FZC_IPP + 0x00048UL)
+#define IPP_MSK_ECC_ERR_MX 0x0000000000000080ULL
+#define IPP_MSK_DFIFO_EOP_SOP 0x0000000000000040ULL
+#define IPP_MSK_DFIFO_UC 0x0000000000000020ULL
+#define IPP_MSK_PFIFO_PAR 0x0000000000000010ULL
+#define IPP_MSK_PFIFO_OVER 0x0000000000000008ULL
+#define IPP_MSK_PFIFO_UND 0x0000000000000004ULL
+#define IPP_MSK_BAD_CS 0x0000000000000002ULL
+#define IPP_MSK_PKT_DIS_CNT 0x0000000000000001ULL
+#define IPP_MSK_ALL 0x00000000000000ffULL
+
+#define IPP_PFIFO_RD0 (FZC_IPP + 0x00060UL)
+#define IPP_PFIFO_RD0_DATA 0x00000000ffffffffULL /* bits 31:0 */
+
+#define IPP_PFIFO_RD1 (FZC_IPP + 0x00068UL)
+#define IPP_PFIFO_RD1_DATA 0x00000000ffffffffULL /* bits 63:32 */
+
+#define IPP_PFIFO_RD2 (FZC_IPP + 0x00070UL)
+#define IPP_PFIFO_RD2_DATA 0x00000000ffffffffULL /* bits 95:64 */
+
+#define IPP_PFIFO_RD3 (FZC_IPP + 0x00078UL)
+#define IPP_PFIFO_RD3_DATA 0x00000000ffffffffULL /* bits 127:96 */
+
+#define IPP_PFIFO_RD4 (FZC_IPP + 0x00080UL)
+#define IPP_PFIFO_RD4_DATA 0x00000000ffffffffULL /* bits 145:128 */
+
+#define IPP_PFIFO_WR0 (FZC_IPP + 0x00088UL)
+#define IPP_PFIFO_WR0_DATA 0x00000000ffffffffULL /* bits 31:0 */
+
+#define IPP_PFIFO_WR1 (FZC_IPP + 0x00090UL)
+#define IPP_PFIFO_WR1_DATA 0x00000000ffffffffULL /* bits 63:32 */
+
+#define IPP_PFIFO_WR2 (FZC_IPP + 0x00098UL)
+#define IPP_PFIFO_WR2_DATA 0x00000000ffffffffULL /* bits 95:64 */
+
+#define IPP_PFIFO_WR3 (FZC_IPP + 0x000a0UL)
+#define IPP_PFIFO_WR3_DATA 0x00000000ffffffffULL /* bits 127:96 */
+
+#define IPP_PFIFO_WR4 (FZC_IPP + 0x000a8UL)
+#define IPP_PFIFO_WR4_DATA 0x00000000ffffffffULL /* bits 145:128 */
+
+#define IPP_PFIFO_RD_PTR (FZC_IPP + 0x000b0UL)
+#define IPP_PFIFO_RD_PTR_PTR 0x000000000000003fULL
+
+#define IPP_PFIFO_WR_PTR (FZC_IPP + 0x000b8UL)
+#define IPP_PFIFO_WR_PTR_PTR 0x000000000000007fULL
+
+#define IPP_DFIFO_RD0 (FZC_IPP + 0x000c0UL)
+#define IPP_DFIFO_RD0_DATA 0x00000000ffffffffULL /* bits 31:0 */
+
+#define IPP_DFIFO_RD1 (FZC_IPP + 0x000c8UL)
+#define IPP_DFIFO_RD1_DATA 0x00000000ffffffffULL /* bits 63:32 */
+
+#define IPP_DFIFO_RD2 (FZC_IPP + 0x000d0UL)
+#define IPP_DFIFO_RD2_DATA 0x00000000ffffffffULL /* bits 95:64 */
+
+#define IPP_DFIFO_RD3 (FZC_IPP + 0x000d8UL)
+#define IPP_DFIFO_RD3_DATA 0x00000000ffffffffULL /* bits 127:96 */
+
+#define IPP_DFIFO_RD4 (FZC_IPP + 0x000e0UL)
+#define IPP_DFIFO_RD4_DATA 0x00000000ffffffffULL /* bits 145:128 */
+
+#define IPP_DFIFO_WR0 (FZC_IPP + 0x000e8UL)
+#define IPP_DFIFO_WR0_DATA 0x00000000ffffffffULL /* bits 31:0 */
+
+#define IPP_DFIFO_WR1 (FZC_IPP + 0x000f0UL)
+#define IPP_DFIFO_WR1_DATA 0x00000000ffffffffULL /* bits 63:32 */
+
+#define IPP_DFIFO_WR2 (FZC_IPP + 0x000f8UL)
+#define IPP_DFIFO_WR2_DATA 0x00000000ffffffffULL /* bits 95:64 */
+
+#define IPP_DFIFO_WR3 (FZC_IPP + 0x00100UL)
+#define IPP_DFIFO_WR3_DATA 0x00000000ffffffffULL /* bits 127:96 */
+
+#define IPP_DFIFO_WR4 (FZC_IPP + 0x00108UL)
+#define IPP_DFIFO_WR4_DATA 0x00000000ffffffffULL /* bits 145:128 */
+
+#define IPP_DFIFO_RD_PTR (FZC_IPP + 0x00110UL)
+#define IPP_DFIFO_RD_PTR_PTR 0x0000000000000fffULL
+
+#define IPP_DFIFO_WR_PTR (FZC_IPP + 0x00118UL)
+#define IPP_DFIFO_WR_PTR_PTR 0x0000000000000fffULL
+
+#define IPP_SM (FZC_IPP + 0x00120UL)
+#define IPP_SM_SM 0x00000000ffffffffULL
+
+#define IPP_CS_STAT (FZC_IPP + 0x00128UL)
+#define IPP_CS_STAT_BCYC_CNT 0x00000000ff000000ULL
+#define IPP_CS_STAT_IP_LEN 0x0000000000fff000ULL
+#define IPP_CS_STAT_CS_FAIL 0x0000000000000800ULL
+#define IPP_CS_STAT_TERM 0x0000000000000400ULL
+#define IPP_CS_STAT_BAD_NUM 0x0000000000000200ULL
+#define IPP_CS_STAT_CS_STATE 0x00000000000001ffULL
+
+#define IPP_FFLP_CS_INFO (FZC_IPP + 0x00130UL)
+#define IPP_FFLP_CS_INFO_PKT_ID 0x0000000000003c00ULL
+#define IPP_FFLP_CS_INFO_L4_PROTO 0x0000000000000300ULL
+#define IPP_FFLP_CS_INFO_V4_HD_LEN 0x00000000000000f0ULL
+#define IPP_FFLP_CS_INFO_L3_VER 0x000000000000000cULL
+#define IPP_FFLP_CS_INFO_L2_OP 0x0000000000000003ULL
+
+#define IPP_DBG_SEL (FZC_IPP + 0x00138UL)
+#define IPP_DBG_SEL_SEL 0x000000000000000fULL
+
+#define IPP_DFIFO_ECC_SYND (FZC_IPP + 0x00140UL)
+#define IPP_DFIFO_ECC_SYND_SYND 0x000000000000ffffULL
+
+#define IPP_DFIFO_EOP_RD_PTR (FZC_IPP + 0x00148UL)
+#define IPP_DFIFO_EOP_RD_PTR_PTR 0x0000000000000fffULL
+
+#define IPP_ECC_CTL (FZC_IPP + 0x00150UL)
+#define IPP_ECC_CTL_DIS_DBL 0x0000000080000000ULL
+#define IPP_ECC_CTL_COR_DBL 0x0000000000020000ULL
+#define IPP_ECC_CTL_COR_SNG 0x0000000000010000ULL
+#define IPP_ECC_CTL_COR_ALL 0x0000000000000400ULL
+#define IPP_ECC_CTL_COR_1 0x0000000000000100ULL
+#define IPP_ECC_CTL_COR_LST 0x0000000000000004ULL
+#define IPP_ECC_CTL_COR_SND 0x0000000000000002ULL
+#define IPP_ECC_CTL_COR_FSR 0x0000000000000001ULL
+
+#define NIU_DFIFO_ENTRIES 1024
+#define ATLAS_P0_P1_DFIFO_ENTRIES 2048
+#define ATLAS_P2_P3_DFIFO_ENTRIES 1024
+
+#define ZCP_CFIG (FZC_ZCP + 0x00000UL)
+#define ZCP_CFIG_ZCP_32BIT_MODE 0x0000000001000000ULL
+#define ZCP_CFIG_ZCP_DEBUG_SEL 0x0000000000ff0000ULL
+#define ZCP_CFIG_DMA_TH 0x000000000000ffe0ULL
+#define ZCP_CFIG_ECC_CHK_DIS 0x0000000000000010ULL
+#define ZCP_CFIG_PAR_CHK_DIS 0x0000000000000008ULL
+#define ZCP_CFIG_DIS_BUFF_RSP_IF 0x0000000000000004ULL
+#define ZCP_CFIG_DIS_BUFF_REQ_IF 0x0000000000000002ULL
+#define ZCP_CFIG_ZC_ENABLE 0x0000000000000001ULL
+
+#define ZCP_INT_STAT (FZC_ZCP + 0x00008UL)
+#define ZCP_INT_STAT_RRFIFO_UNDERRUN 0x0000000000008000ULL
+#define ZCP_INT_STAT_RRFIFO_OVERRUN 0x0000000000004000ULL
+#define ZCP_INT_STAT_RSPFIFO_UNCOR_ERR 0x0000000000001000ULL
+#define ZCP_INT_STAT_BUFFER_OVERFLOW 0x0000000000000800ULL
+#define ZCP_INT_STAT_STAT_TBL_PERR 0x0000000000000400ULL
+#define ZCP_INT_STAT_DYN_TBL_PERR 0x0000000000000200ULL
+#define ZCP_INT_STAT_BUF_TBL_PERR 0x0000000000000100ULL
+#define ZCP_INT_STAT_TT_PROGRAM_ERR 0x0000000000000080ULL
+#define ZCP_INT_STAT_RSP_TT_INDEX_ERR 0x0000000000000040ULL
+#define ZCP_INT_STAT_SLV_TT_INDEX_ERR 0x0000000000000020ULL
+#define ZCP_INT_STAT_ZCP_TT_INDEX_ERR 0x0000000000000010ULL
+#define ZCP_INT_STAT_CFIFO_ECC3 0x0000000000000008ULL
+#define ZCP_INT_STAT_CFIFO_ECC2 0x0000000000000004ULL
+#define ZCP_INT_STAT_CFIFO_ECC1 0x0000000000000002ULL
+#define ZCP_INT_STAT_CFIFO_ECC0 0x0000000000000001ULL
+#define ZCP_INT_STAT_ALL 0x000000000000ffffULL
+
+#define ZCP_INT_MASK (FZC_ZCP + 0x00010UL)
+#define ZCP_INT_MASK_RRFIFO_UNDERRUN 0x0000000000008000ULL
+#define ZCP_INT_MASK_RRFIFO_OVERRUN 0x0000000000004000ULL
+#define ZCP_INT_MASK_LOJ 0x0000000000002000ULL
+#define ZCP_INT_MASK_RSPFIFO_UNCOR_ERR 0x0000000000001000ULL
+#define ZCP_INT_MASK_BUFFER_OVERFLOW 0x0000000000000800ULL
+#define ZCP_INT_MASK_STAT_TBL_PERR 0x0000000000000400ULL
+#define ZCP_INT_MASK_DYN_TBL_PERR 0x0000000000000200ULL
+#define ZCP_INT_MASK_BUF_TBL_PERR 0x0000000000000100ULL
+#define ZCP_INT_MASK_TT_PROGRAM_ERR 0x0000000000000080ULL
+#define ZCP_INT_MASK_RSP_TT_INDEX_ERR 0x0000000000000040ULL
+#define ZCP_INT_MASK_SLV_TT_INDEX_ERR 0x0000000000000020ULL
+#define ZCP_INT_MASK_ZCP_TT_INDEX_ERR 0x0000000000000010ULL
+#define ZCP_INT_MASK_CFIFO_ECC3 0x0000000000000008ULL
+#define ZCP_INT_MASK_CFIFO_ECC2 0x0000000000000004ULL
+#define ZCP_INT_MASK_CFIFO_ECC1 0x0000000000000002ULL
+#define ZCP_INT_MASK_CFIFO_ECC0 0x0000000000000001ULL
+#define ZCP_INT_MASK_ALL 0x000000000000ffffULL
+
+#define BAM4BUF (FZC_ZCP + 0x00018UL)
+#define BAM4BUF_LOJ 0x0000000080000000ULL
+#define BAM4BUF_EN_CK 0x0000000040000000ULL
+#define BAM4BUF_IDX_END0 0x000000003ff00000ULL
+#define BAM4BUF_IDX_ST0 0x00000000000ffc00ULL
+#define BAM4BUF_OFFSET0 0x00000000000003ffULL
+
+#define BAM8BUF (FZC_ZCP + 0x00020UL)
+#define BAM8BUF_LOJ 0x0000000080000000ULL
+#define BAM8BUF_EN_CK 0x0000000040000000ULL
+#define BAM8BUF_IDX_END1 0x000000003ff00000ULL
+#define BAM8BUF_IDX_ST1 0x00000000000ffc00ULL
+#define BAM8BUF_OFFSET1 0x00000000000003ffULL
+
+#define BAM16BUF (FZC_ZCP + 0x00028UL)
+#define BAM16BUF_LOJ 0x0000000080000000ULL
+#define BAM16BUF_EN_CK 0x0000000040000000ULL
+#define BAM16BUF_IDX_END2 0x000000003ff00000ULL
+#define BAM16BUF_IDX_ST2 0x00000000000ffc00ULL
+#define BAM16BUF_OFFSET2 0x00000000000003ffULL
+
+#define BAM32BUF (FZC_ZCP + 0x00030UL)
+#define BAM32BUF_LOJ 0x0000000080000000ULL
+#define BAM32BUF_EN_CK 0x0000000040000000ULL
+#define BAM32BUF_IDX_END3 0x000000003ff00000ULL
+#define BAM32BUF_IDX_ST3 0x00000000000ffc00ULL
+#define BAM32BUF_OFFSET3 0x00000000000003ffULL
+
+#define DST4BUF (FZC_ZCP + 0x00038UL)
+#define DST4BUF_DS_OFFSET0 0x00000000000003ffULL
+
+#define DST8BUF (FZC_ZCP + 0x00040UL)
+#define DST8BUF_DS_OFFSET1 0x00000000000003ffULL
+
+#define DST16BUF (FZC_ZCP + 0x00048UL)
+#define DST16BUF_DS_OFFSET2 0x00000000000003ffULL
+
+#define DST32BUF (FZC_ZCP + 0x00050UL)
+#define DST32BUF_DS_OFFSET3 0x00000000000003ffULL
+
+#define ZCP_RAM_DATA0 (FZC_ZCP + 0x00058UL)
+#define ZCP_RAM_DATA0_DAT0 0x00000000ffffffffULL
+
+#define ZCP_RAM_DATA1 (FZC_ZCP + 0x00060UL)
+#define ZCP_RAM_DAT10_DAT1 0x00000000ffffffffULL
+
+#define ZCP_RAM_DATA2 (FZC_ZCP + 0x00068UL)
+#define ZCP_RAM_DATA2_DAT2 0x00000000ffffffffULL
+
+#define ZCP_RAM_DATA3 (FZC_ZCP + 0x00070UL)
+#define ZCP_RAM_DATA3_DAT3 0x00000000ffffffffULL
+
+#define ZCP_RAM_DATA4 (FZC_ZCP + 0x00078UL)
+#define ZCP_RAM_DATA4_DAT4 0x00000000000000ffULL
+
+#define ZCP_RAM_BE (FZC_ZCP + 0x00080UL)
+#define ZCP_RAM_BE_VAL 0x000000000001ffffULL
+
+#define ZCP_RAM_ACC (FZC_ZCP + 0x00088UL)
+#define ZCP_RAM_ACC_BUSY 0x0000000080000000ULL
+#define ZCP_RAM_ACC_READ 0x0000000040000000ULL
+#define ZCP_RAM_ACC_WRITE 0x0000000000000000ULL
+#define ZCP_RAM_ACC_LOJ 0x0000000020000000ULL
+#define ZCP_RAM_ACC_ZFCID 0x000000001ffe0000ULL
+#define ZCP_RAM_ACC_ZFCID_SHIFT 17
+#define ZCP_RAM_ACC_RAM_SEL 0x000000000001f000ULL
+#define ZCP_RAM_ACC_RAM_SEL_SHIFT 12
+#define ZCP_RAM_ACC_CFIFOADDR 0x0000000000000fffULL
+#define ZCP_RAM_ACC_CFIFOADDR_SHIFT 0
+
+#define ZCP_RAM_SEL_BAM(INDEX) (0x00 + (INDEX))
+#define ZCP_RAM_SEL_TT_STATIC 0x08
+#define ZCP_RAM_SEL_TT_DYNAMIC 0x09
+#define ZCP_RAM_SEL_CFIFO(PORT) (0x10 + (PORT))
+
+#define NIU_CFIFO_ENTRIES 1024
+#define ATLAS_P0_P1_CFIFO_ENTRIES 2048
+#define ATLAS_P2_P3_CFIFO_ENTRIES 1024
+
+#define CHK_BIT_DATA (FZC_ZCP + 0x00090UL)
+#define CHK_BIT_DATA_DATA 0x000000000000ffffULL
+
+#define RESET_CFIFO (FZC_ZCP + 0x00098UL)
+#define RESET_CFIFO_RST(PORT) (0x1 << (PORT))
+
+#define CFIFO_ECC(PORT) (FZC_ZCP + 0x000a0UL + (PORT) * 8UL)
+#define CFIFO_ECC_DIS_DBLBIT_ERR 0x0000000080000000ULL
+#define CFIFO_ECC_DBLBIT_ERR 0x0000000000020000ULL
+#define CFIFO_ECC_SINGLEBIT_ERR 0x0000000000010000ULL
+#define CFIFO_ECC_ALL_PKT 0x0000000000000400ULL
+#define CFIFO_ECC_LAST_LINE 0x0000000000000004ULL
+#define CFIFO_ECC_2ND_LINE 0x0000000000000002ULL
+#define CFIFO_ECC_1ST_LINE 0x0000000000000001ULL
+
+#define ZCP_TRAINING_VECTOR (FZC_ZCP + 0x000c0UL)
+#define ZCP_TRAINING_VECTOR_VECTOR 0x00000000ffffffffULL
+
+#define ZCP_STATE_MACHINE (FZC_ZCP + 0x000c8UL)
+#define ZCP_STATE_MACHINE_SM 0x00000000ffffffffULL
+
+/* Same bits as ZCP_INT_STAT */
+#define ZCP_INT_STAT_TEST (FZC_ZCP + 0x00108UL)
+
+#define RXDMA_CFIG1(IDX) (DMC + 0x00000UL + (IDX) * 0x200UL)
+#define RXDMA_CFIG1_EN 0x0000000080000000ULL
+#define RXDMA_CFIG1_RST 0x0000000040000000ULL
+#define RXDMA_CFIG1_QST 0x0000000020000000ULL
+#define RXDMA_CFIG1_MBADDR_H 0x0000000000000fffULL /* mboxaddr 43:32 */
+
+#define RXDMA_CFIG2(IDX) (DMC + 0x00008UL + (IDX) * 0x200UL)
+#define RXDMA_CFIG2_MBADDR_L 0x00000000ffffffc0ULL /* mboxaddr 31:6 */
+#define RXDMA_CFIG2_OFFSET 0x0000000000000006ULL
+#define RXDMA_CFIG2_OFFSET_SHIFT 1
+#define RXDMA_CFIG2_FULL_HDR 0x0000000000000001ULL
+
+#define RBR_CFIG_A(IDX) (DMC + 0x00010UL + (IDX) * 0x200UL)
+#define RBR_CFIG_A_LEN 0xffff000000000000ULL
+#define RBR_CFIG_A_LEN_SHIFT 48
+#define RBR_CFIG_A_STADDR_BASE 0x00000ffffffc0000ULL
+#define RBR_CFIG_A_STADDR 0x000000000003ffc0ULL
+
+#define RBR_CFIG_B(IDX) (DMC + 0x00018UL + (IDX) * 0x200UL)
+#define RBR_CFIG_B_BLKSIZE 0x0000000003000000ULL
+#define RBR_CFIG_B_BLKSIZE_SHIFT 24
+#define RBR_CFIG_B_VLD2 0x0000000000800000ULL
+#define RBR_CFIG_B_BUFSZ2 0x0000000000030000ULL
+#define RBR_CFIG_B_BUFSZ2_SHIFT 16
+#define RBR_CFIG_B_VLD1 0x0000000000008000ULL
+#define RBR_CFIG_B_BUFSZ1 0x0000000000000300ULL
+#define RBR_CFIG_B_BUFSZ1_SHIFT 8
+#define RBR_CFIG_B_VLD0 0x0000000000000080ULL
+#define RBR_CFIG_B_BUFSZ0 0x0000000000000003ULL
+#define RBR_CFIG_B_BUFSZ0_SHIFT 0
+
+#define RBR_BLKSIZE_4K 0x0
+#define RBR_BLKSIZE_8K 0x1
+#define RBR_BLKSIZE_16K 0x2
+#define RBR_BLKSIZE_32K 0x3
+#define RBR_BUFSZ2_2K 0x0
+#define RBR_BUFSZ2_4K 0x1
+#define RBR_BUFSZ2_8K 0x2
+#define RBR_BUFSZ2_16K 0x3
+#define RBR_BUFSZ1_1K 0x0
+#define RBR_BUFSZ1_2K 0x1
+#define RBR_BUFSZ1_4K 0x2
+#define RBR_BUFSZ1_8K 0x3
+#define RBR_BUFSZ0_256 0x0
+#define RBR_BUFSZ0_512 0x1
+#define RBR_BUFSZ0_1K 0x2
+#define RBR_BUFSZ0_2K 0x3
+
+#define RBR_KICK(IDX) (DMC + 0x00020UL + (IDX) * 0x200UL)
+#define RBR_KICK_BKADD 0x000000000000ffffULL
+
+#define RBR_STAT(IDX) (DMC + 0x00028UL + (IDX) * 0x200UL)
+#define RBR_STAT_QLEN 0x000000000000ffffULL
+
+#define RBR_HDH(IDX) (DMC + 0x00030UL + (IDX) * 0x200UL)
+#define RBR_HDH_HEAD_H 0x0000000000000fffULL
+
+#define RBR_HDL(IDX) (DMC + 0x00038UL + (IDX) * 0x200UL)
+#define RBR_HDL_HEAD_L 0x00000000fffffffcULL
+
+#define RCRCFIG_A(IDX) (DMC + 0x00040UL + (IDX) * 0x200UL)
+#define RCRCFIG_A_LEN 0xffff000000000000ULL
+#define RCRCFIG_A_LEN_SHIFT 48
+#define RCRCFIG_A_STADDR_BASE 0x00000ffffff80000ULL
+#define RCRCFIG_A_STADDR 0x000000000007ffc0ULL
+
+#define RCRCFIG_B(IDX) (DMC + 0x00048UL + (IDX) * 0x200UL)
+#define RCRCFIG_B_PTHRES 0x00000000ffff0000ULL
+#define RCRCFIG_B_PTHRES_SHIFT 16
+#define RCRCFIG_B_ENTOUT 0x0000000000008000ULL
+#define RCRCFIG_B_TIMEOUT 0x000000000000003fULL
+#define RCRCFIG_B_TIMEOUT_SHIFT 0
+
+#define RCRSTAT_A(IDX) (DMC + 0x00050UL + (IDX) * 0x200UL)
+#define RCRSTAT_A_QLEN 0x000000000000ffffULL
+
+#define RCRSTAT_B(IDX) (DMC + 0x00058UL + (IDX) * 0x200UL)
+#define RCRSTAT_B_TIPTR_H 0x0000000000000fffULL
+
+#define RCRSTAT_C(IDX) (DMC + 0x00060UL + (IDX) * 0x200UL)
+#define RCRSTAT_C_TIPTR_L 0x00000000fffffff8ULL
+
+#define RX_DMA_CTL_STAT(IDX) (DMC + 0x00070UL + (IDX) * 0x200UL)
+#define RX_DMA_CTL_STAT_RBR_TMOUT 0x0020000000000000ULL
+#define RX_DMA_CTL_STAT_RSP_CNT_ERR 0x0010000000000000ULL
+#define RX_DMA_CTL_STAT_BYTE_EN_BUS 0x0008000000000000ULL
+#define RX_DMA_CTL_STAT_RSP_DAT_ERR 0x0004000000000000ULL
+#define RX_DMA_CTL_STAT_RCR_ACK_ERR 0x0002000000000000ULL
+#define RX_DMA_CTL_STAT_DC_FIFO_ERR 0x0001000000000000ULL
+#define RX_DMA_CTL_STAT_MEX 0x0000800000000000ULL
+#define RX_DMA_CTL_STAT_RCRTHRES 0x0000400000000000ULL
+#define RX_DMA_CTL_STAT_RCRTO 0x0000200000000000ULL
+#define RX_DMA_CTL_STAT_RCR_SHA_PAR 0x0000100000000000ULL
+#define RX_DMA_CTL_STAT_RBR_PRE_PAR 0x0000080000000000ULL
+#define RX_DMA_CTL_STAT_PORT_DROP_PKT 0x0000040000000000ULL
+#define RX_DMA_CTL_STAT_WRED_DROP 0x0000020000000000ULL
+#define RX_DMA_CTL_STAT_RBR_PRE_EMTY 0x0000010000000000ULL
+#define RX_DMA_CTL_STAT_RCRSHADOW_FULL 0x0000008000000000ULL
+#define RX_DMA_CTL_STAT_CONFIG_ERR 0x0000004000000000ULL
+#define RX_DMA_CTL_STAT_RCRINCON 0x0000002000000000ULL
+#define RX_DMA_CTL_STAT_RCRFULL 0x0000001000000000ULL
+#define RX_DMA_CTL_STAT_RBR_EMPTY 0x0000000800000000ULL
+#define RX_DMA_CTL_STAT_RBRFULL 0x0000000400000000ULL
+#define RX_DMA_CTL_STAT_RBRLOGPAGE 0x0000000200000000ULL
+#define RX_DMA_CTL_STAT_CFIGLOGPAGE 0x0000000100000000ULL
+#define RX_DMA_CTL_STAT_PTRREAD 0x00000000ffff0000ULL
+#define RX_DMA_CTL_STAT_PTRREAD_SHIFT 16
+#define RX_DMA_CTL_STAT_PKTREAD 0x000000000000ffffULL
+#define RX_DMA_CTL_STAT_PKTREAD_SHIFT 0
+
+#define RX_DMA_CTL_STAT_CHAN_FATAL (RX_DMA_CTL_STAT_RBR_TMOUT | \
+ RX_DMA_CTL_STAT_RSP_CNT_ERR | \
+ RX_DMA_CTL_STAT_BYTE_EN_BUS | \
+ RX_DMA_CTL_STAT_RSP_DAT_ERR | \
+ RX_DMA_CTL_STAT_RCR_ACK_ERR | \
+ RX_DMA_CTL_STAT_RCR_SHA_PAR | \
+ RX_DMA_CTL_STAT_RBR_PRE_PAR | \
+ RX_DMA_CTL_STAT_CONFIG_ERR | \
+ RX_DMA_CTL_STAT_RCRINCON | \
+ RX_DMA_CTL_STAT_RCRFULL | \
+ RX_DMA_CTL_STAT_RBRFULL | \
+ RX_DMA_CTL_STAT_RBRLOGPAGE | \
+ RX_DMA_CTL_STAT_CFIGLOGPAGE)
+
+#define RX_DMA_CTL_STAT_PORT_FATAL (RX_DMA_CTL_STAT_DC_FIFO_ERR)
+
+#define RX_DMA_CTL_WRITE_CLEAR_ERRS (RX_DMA_CTL_STAT_RBR_EMPTY | \
+ RX_DMA_CTL_STAT_RCRSHADOW_FULL | \
+ RX_DMA_CTL_STAT_RBR_PRE_EMTY | \
+ RX_DMA_CTL_STAT_WRED_DROP | \
+ RX_DMA_CTL_STAT_PORT_DROP_PKT | \
+ RX_DMA_CTL_STAT_RCRTO | \
+ RX_DMA_CTL_STAT_RCRTHRES | \
+ RX_DMA_CTL_STAT_DC_FIFO_ERR)
+
+#define RCR_FLSH(IDX) (DMC + 0x00078UL + (IDX) * 0x200UL)
+#define RCR_FLSH_FLSH 0x0000000000000001ULL
+
+#define RXMISC(IDX) (DMC + 0x00090UL + (IDX) * 0x200UL)
+#define RXMISC_OFLOW 0x0000000000010000ULL
+#define RXMISC_COUNT 0x000000000000ffffULL
+
+#define RX_DMA_CTL_STAT_DBG(IDX) (DMC + 0x00098UL + (IDX) * 0x200UL)
+#define RX_DMA_CTL_STAT_DBG_RBR_TMOUT 0x0020000000000000ULL
+#define RX_DMA_CTL_STAT_DBG_RSP_CNT_ERR 0x0010000000000000ULL
+#define RX_DMA_CTL_STAT_DBG_BYTE_EN_BUS 0x0008000000000000ULL
+#define RX_DMA_CTL_STAT_DBG_RSP_DAT_ERR 0x0004000000000000ULL
+#define RX_DMA_CTL_STAT_DBG_RCR_ACK_ERR 0x0002000000000000ULL
+#define RX_DMA_CTL_STAT_DBG_DC_FIFO_ERR 0x0001000000000000ULL
+#define RX_DMA_CTL_STAT_DBG_MEX 0x0000800000000000ULL
+#define RX_DMA_CTL_STAT_DBG_RCRTHRES 0x0000400000000000ULL
+#define RX_DMA_CTL_STAT_DBG_RCRTO 0x0000200000000000ULL
+#define RX_DMA_CTL_STAT_DBG_RCR_SHA_PAR 0x0000100000000000ULL
+#define RX_DMA_CTL_STAT_DBG_RBR_PRE_PAR 0x0000080000000000ULL
+#define RX_DMA_CTL_STAT_DBG_PORT_DROP_PKT 0x0000040000000000ULL
+#define RX_DMA_CTL_STAT_DBG_WRED_DROP 0x0000020000000000ULL
+#define RX_DMA_CTL_STAT_DBG_RBR_PRE_EMTY 0x0000010000000000ULL
+#define RX_DMA_CTL_STAT_DBG_RCRSHADOW_FULL 0x0000008000000000ULL
+#define RX_DMA_CTL_STAT_DBG_CONFIG_ERR 0x0000004000000000ULL
+#define RX_DMA_CTL_STAT_DBG_RCRINCON 0x0000002000000000ULL
+#define RX_DMA_CTL_STAT_DBG_RCRFULL 0x0000001000000000ULL
+#define RX_DMA_CTL_STAT_DBG_RBR_EMPTY 0x0000000800000000ULL
+#define RX_DMA_CTL_STAT_DBG_RBRFULL 0x0000000400000000ULL
+#define RX_DMA_CTL_STAT_DBG_RBRLOGPAGE 0x0000000200000000ULL
+#define RX_DMA_CTL_STAT_DBG_CFIGLOGPAGE 0x0000000100000000ULL
+#define RX_DMA_CTL_STAT_DBG_PTRREAD 0x00000000ffff0000ULL
+#define RX_DMA_CTL_STAT_DBG_PKTREAD 0x000000000000ffffULL
+
+#define RX_DMA_ENT_MSK(IDX) (DMC + 0x00068UL + (IDX) * 0x200UL)
+#define RX_DMA_ENT_MSK_RBR_TMOUT 0x0000000000200000ULL
+#define RX_DMA_ENT_MSK_RSP_CNT_ERR 0x0000000000100000ULL
+#define RX_DMA_ENT_MSK_BYTE_EN_BUS 0x0000000000080000ULL
+#define RX_DMA_ENT_MSK_RSP_DAT_ERR 0x0000000000040000ULL
+#define RX_DMA_ENT_MSK_RCR_ACK_ERR 0x0000000000020000ULL
+#define RX_DMA_ENT_MSK_DC_FIFO_ERR 0x0000000000010000ULL
+#define RX_DMA_ENT_MSK_RCRTHRES 0x0000000000004000ULL
+#define RX_DMA_ENT_MSK_RCRTO 0x0000000000002000ULL
+#define RX_DMA_ENT_MSK_RCR_SHA_PAR 0x0000000000001000ULL
+#define RX_DMA_ENT_MSK_RBR_PRE_PAR 0x0000000000000800ULL
+#define RX_DMA_ENT_MSK_PORT_DROP_PKT 0x0000000000000400ULL
+#define RX_DMA_ENT_MSK_WRED_DROP 0x0000000000000200ULL
+#define RX_DMA_ENT_MSK_RBR_PRE_EMTY 0x0000000000000100ULL
+#define RX_DMA_ENT_MSK_RCR_SHADOW_FULL 0x0000000000000080ULL
+#define RX_DMA_ENT_MSK_CONFIG_ERR 0x0000000000000040ULL
+#define RX_DMA_ENT_MSK_RCRINCON 0x0000000000000020ULL
+#define RX_DMA_ENT_MSK_RCRFULL 0x0000000000000010ULL
+#define RX_DMA_ENT_MSK_RBR_EMPTY 0x0000000000000008ULL
+#define RX_DMA_ENT_MSK_RBRFULL 0x0000000000000004ULL
+#define RX_DMA_ENT_MSK_RBRLOGPAGE 0x0000000000000002ULL
+#define RX_DMA_ENT_MSK_CFIGLOGPAGE 0x0000000000000001ULL
+#define RX_DMA_ENT_MSK_ALL 0x00000000003f7fffULL
+
+#define TX_RNG_CFIG(IDX) (DMC + 0x40000UL + (IDX) * 0x200UL)
+#define TX_RNG_CFIG_LEN 0x1fff000000000000ULL
+#define TX_RNG_CFIG_LEN_SHIFT 48
+#define TX_RNG_CFIG_STADDR_BASE 0x00000ffffff80000ULL
+#define TX_RNG_CFIG_STADDR 0x000000000007ffc0ULL
+
+#define TX_RING_HDL(IDX) (DMC + 0x40010UL + (IDX) * 0x200UL)
+#define TX_RING_HDL_WRAP 0x0000000000080000ULL
+#define TX_RING_HDL_HEAD 0x000000000007fff8ULL
+#define TX_RING_HDL_HEAD_SHIFT 3
+
+#define TX_RING_KICK(IDX) (DMC + 0x40018UL + (IDX) * 0x200UL)
+#define TX_RING_KICK_WRAP 0x0000000000080000ULL
+#define TX_RING_KICK_TAIL 0x000000000007fff8ULL
+
+#define TX_ENT_MSK(IDX) (DMC + 0x40020UL + (IDX) * 0x200UL)
+#define TX_ENT_MSK_MK 0x0000000000008000ULL
+#define TX_ENT_MSK_MBOX_ERR 0x0000000000000080ULL
+#define TX_ENT_MSK_PKT_SIZE_ERR 0x0000000000000040ULL
+#define TX_ENT_MSK_TX_RING_OFLOW 0x0000000000000020ULL
+#define TX_ENT_MSK_PREF_BUF_ECC_ERR 0x0000000000000010ULL
+#define TX_ENT_MSK_NACK_PREF 0x0000000000000008ULL
+#define TX_ENT_MSK_NACK_PKT_RD 0x0000000000000004ULL
+#define TX_ENT_MSK_CONF_PART_ERR 0x0000000000000002ULL
+#define TX_ENT_MSK_PKT_PRT_ERR 0x0000000000000001ULL
+
+#define TX_CS(IDX) (DMC + 0x40028UL + (IDX)*0x200UL)
+#define TX_CS_PKT_CNT 0x0fff000000000000ULL
+#define TX_CS_PKT_CNT_SHIFT 48
+#define TX_CS_LASTMARK 0x00000fff00000000ULL
+#define TX_CS_LASTMARK_SHIFT 32
+#define TX_CS_RST 0x0000000080000000ULL
+#define TX_CS_RST_STATE 0x0000000040000000ULL
+#define TX_CS_MB 0x0000000020000000ULL
+#define TX_CS_STOP_N_GO 0x0000000010000000ULL
+#define TX_CS_SNG_STATE 0x0000000008000000ULL
+#define TX_CS_MK 0x0000000000008000ULL
+#define TX_CS_MMK 0x0000000000004000ULL
+#define TX_CS_MBOX_ERR 0x0000000000000080ULL
+#define TX_CS_PKT_SIZE_ERR 0x0000000000000040ULL
+#define TX_CS_TX_RING_OFLOW 0x0000000000000020ULL
+#define TX_CS_PREF_BUF_PAR_ERR 0x0000000000000010ULL
+#define TX_CS_NACK_PREF 0x0000000000000008ULL
+#define TX_CS_NACK_PKT_RD 0x0000000000000004ULL
+#define TX_CS_CONF_PART_ERR 0x0000000000000002ULL
+#define TX_CS_PKT_PRT_ERR 0x0000000000000001ULL
+
+#define TXDMA_MBH(IDX) (DMC + 0x40030UL + (IDX) * 0x200UL)
+#define TXDMA_MBH_MBADDR 0x0000000000000fffULL
+
+#define TXDMA_MBL(IDX) (DMC + 0x40038UL + (IDX) * 0x200UL)
+#define TXDMA_MBL_MBADDR 0x00000000ffffffc0ULL
+
+#define TX_DMA_PRE_ST(IDX) (DMC + 0x40040UL + (IDX) * 0x200UL)
+#define TX_DMA_PRE_ST_SHADOW_HD 0x000000000007ffffULL
+
+#define TX_RNG_ERR_LOGH(IDX) (DMC + 0x40048UL + (IDX) * 0x200UL)
+#define TX_RNG_ERR_LOGH_ERR 0x0000000080000000ULL
+#define TX_RNG_ERR_LOGH_MERR 0x0000000040000000ULL
+#define TX_RNG_ERR_LOGH_ERRCODE 0x0000000038000000ULL
+#define TX_RNG_ERR_LOGH_ERRADDR 0x0000000000000fffULL
+
+#define TX_RNG_ERR_LOGL(IDX) (DMC + 0x40050UL + (IDX) * 0x200UL)
+#define TX_RNG_ERR_LOGL_ERRADDR 0x00000000ffffffffULL
+
+#define TDMC_INTR_DBG(IDX) (DMC + 0x40060UL + (IDX) * 0x200UL)
+#define TDMC_INTR_DBG_MK 0x0000000000008000ULL
+#define TDMC_INTR_DBG_MBOX_ERR 0x0000000000000080ULL
+#define TDMC_INTR_DBG_PKT_SIZE_ERR 0x0000000000000040ULL
+#define TDMC_INTR_DBG_TX_RING_OFLOW 0x0000000000000020ULL
+#define TDMC_INTR_DBG_PREF_BUF_PAR_ERR 0x0000000000000010ULL
+#define TDMC_INTR_DBG_NACK_PREF 0x0000000000000008ULL
+#define TDMC_INTR_DBG_NACK_PKT_RD 0x0000000000000004ULL
+#define TDMC_INTR_DBG_CONF_PART_ERR 0x0000000000000002ULL
+#define TDMC_INTR_DBG_PKT_PART_ERR 0x0000000000000001ULL
+
+#define TX_CS_DBG(IDX) (DMC + 0x40068UL + (IDX) * 0x200UL)
+#define TX_CS_DBG_PKT_CNT 0x0fff000000000000ULL
+
+#define TDMC_INJ_PAR_ERR(IDX) (DMC + 0x45040UL + (IDX) * 0x200UL)
+#define TDMC_INJ_PAR_ERR_VAL 0x000000000000ffffULL
+
+#define TDMC_DBG_SEL(IDX) (DMC + 0x45080UL + (IDX) * 0x200UL)
+#define TDMC_DBG_SEL_DBG_SEL 0x000000000000003fULL
+
+#define TDMC_TRAINING_VECTOR(IDX) (DMC + 0x45088UL + (IDX) * 0x200UL)
+#define TDMC_TRAINING_VECTOR_VEC 0x00000000ffffffffULL
+
+#define TXC_DMA_MAX(CHAN) (FZC_TXC + 0x00000UL + (CHAN)*0x1000UL)
+#define TXC_DMA_MAX_LEN(CHAN) (FZC_TXC + 0x00008UL + (CHAN)*0x1000UL)
+
+#define TXC_CONTROL (FZC_TXC + 0x20000UL)
+#define TXC_CONTROL_ENABLE 0x0000000000000010ULL
+#define TXC_CONTROL_PORT_ENABLE(X) (1 << (X))
+
+#define TXC_TRAINING_VEC (FZC_TXC + 0x20008UL)
+#define TXC_TRAINING_VEC_MASK 0x00000000ffffffffULL
+
+#define TXC_DEBUG (FZC_TXC + 0x20010UL)
+#define TXC_DEBUG_SELECT 0x000000000000003fULL
+
+#define TXC_MAX_REORDER (FZC_TXC + 0x20018UL)
+#define TXC_MAX_REORDER_PORT3 0x000000000f000000ULL
+#define TXC_MAX_REORDER_PORT2 0x00000000000f0000ULL
+#define TXC_MAX_REORDER_PORT1 0x0000000000000f00ULL
+#define TXC_MAX_REORDER_PORT0 0x000000000000000fULL
+
+#define TXC_PORT_CTL(PORT) (FZC_TXC + 0x20020UL + (PORT)*0x100UL)
+#define TXC_PORT_CTL_CLR_ALL_STAT 0x0000000000000001ULL
+
+#define TXC_PKT_STUFFED(PORT) (FZC_TXC + 0x20030UL + (PORT)*0x100UL)
+#define TXC_PKT_STUFFED_PP_REORDER 0x00000000ffff0000ULL
+#define TXC_PKT_STUFFED_PP_PACKETASSY 0x000000000000ffffULL
+
+#define TXC_PKT_XMIT(PORT) (FZC_TXC + 0x20038UL + (PORT)*0x100UL)
+#define TXC_PKT_XMIT_BYTES 0x00000000ffff0000ULL
+#define TXC_PKT_XMIT_PKTS 0x000000000000ffffULL
+
+#define TXC_ROECC_CTL(PORT) (FZC_TXC + 0x20040UL + (PORT)*0x100UL)
+#define TXC_ROECC_CTL_DISABLE_UE 0x0000000080000000ULL
+#define TXC_ROECC_CTL_DBL_BIT_ERR 0x0000000000020000ULL
+#define TXC_ROECC_CTL_SNGL_BIT_ERR 0x0000000000010000ULL
+#define TXC_ROECC_CTL_ALL_PKTS 0x0000000000000400ULL
+#define TXC_ROECC_CTL_ALT_PKTS 0x0000000000000200ULL
+#define TXC_ROECC_CTL_ONE_PKT_ONLY 0x0000000000000100ULL
+#define TXC_ROECC_CTL_LST_PKT_LINE 0x0000000000000004ULL
+#define TXC_ROECC_CTL_2ND_PKT_LINE 0x0000000000000002ULL
+#define TXC_ROECC_CTL_1ST_PKT_LINE 0x0000000000000001ULL
+
+#define TXC_ROECC_ST(PORT) (FZC_TXC + 0x20048UL + (PORT)*0x100UL)
+#define TXC_ROECC_CLR_ST 0x0000000080000000ULL
+#define TXC_ROECC_CE 0x0000000000020000ULL
+#define TXC_ROECC_UE 0x0000000000010000ULL
+#define TXC_ROECC_ST_ECC_ADDR 0x00000000000003ffULL
+
+#define TXC_RO_DATA0(PORT) (FZC_TXC + 0x20050UL + (PORT)*0x100UL)
+#define TXC_RO_DATA0_DATA0 0x00000000ffffffffULL /* bits 31:0 */
+
+#define TXC_RO_DATA1(PORT) (FZC_TXC + 0x20058UL + (PORT)*0x100UL)
+#define TXC_RO_DATA1_DATA1 0x00000000ffffffffULL /* bits 63:32 */
+
+#define TXC_RO_DATA2(PORT) (FZC_TXC + 0x20060UL + (PORT)*0x100UL)
+#define TXC_RO_DATA2_DATA2 0x00000000ffffffffULL /* bits 95:64 */
+
+#define TXC_RO_DATA3(PORT) (FZC_TXC + 0x20068UL + (PORT)*0x100UL)
+#define TXC_RO_DATA3_DATA3 0x00000000ffffffffULL /* bits 127:96 */
+
+#define TXC_RO_DATA4(PORT) (FZC_TXC + 0x20070UL + (PORT)*0x100UL)
+#define TXC_RO_DATA4_DATA4 0x0000000000ffffffULL /* bits 151:128 */
+
+#define TXC_SFECC_CTL(PORT) (FZC_TXC + 0x20078UL + (PORT)*0x100UL)
+#define TXC_SFECC_CTL_DISABLE_UE 0x0000000080000000ULL
+#define TXC_SFECC_CTL_DBL_BIT_ERR 0x0000000000020000ULL
+#define TXC_SFECC_CTL_SNGL_BIT_ERR 0x0000000000010000ULL
+#define TXC_SFECC_CTL_ALL_PKTS 0x0000000000000400ULL
+#define TXC_SFECC_CTL_ALT_PKTS 0x0000000000000200ULL
+#define TXC_SFECC_CTL_ONE_PKT_ONLY 0x0000000000000100ULL
+#define TXC_SFECC_CTL_LST_PKT_LINE 0x0000000000000004ULL
+#define TXC_SFECC_CTL_2ND_PKT_LINE 0x0000000000000002ULL
+#define TXC_SFECC_CTL_1ST_PKT_LINE 0x0000000000000001ULL
+
+#define TXC_SFECC_ST(PORT) (FZC_TXC + 0x20080UL + (PORT)*0x100UL)
+#define TXC_SFECC_ST_CLR_ST 0x0000000080000000ULL
+#define TXC_SFECC_ST_CE 0x0000000000020000ULL
+#define TXC_SFECC_ST_UE 0x0000000000010000ULL
+#define TXC_SFECC_ST_ECC_ADDR 0x00000000000003ffULL
+
+#define TXC_SF_DATA0(PORT) (FZC_TXC + 0x20088UL + (PORT)*0x100UL)
+#define TXC_SF_DATA0_DATA0 0x00000000ffffffffULL /* bits 31:0 */
+
+#define TXC_SF_DATA1(PORT) (FZC_TXC + 0x20090UL + (PORT)*0x100UL)
+#define TXC_SF_DATA1_DATA1 0x00000000ffffffffULL /* bits 63:32 */
+
+#define TXC_SF_DATA2(PORT) (FZC_TXC + 0x20098UL + (PORT)*0x100UL)
+#define TXC_SF_DATA2_DATA2 0x00000000ffffffffULL /* bits 95:64 */
+
+#define TXC_SF_DATA3(PORT) (FZC_TXC + 0x200a0UL + (PORT)*0x100UL)
+#define TXC_SF_DATA3_DATA3 0x00000000ffffffffULL /* bits 127:96 */
+
+#define TXC_SF_DATA4(PORT) (FZC_TXC + 0x200a8UL + (PORT)*0x100UL)
+#define TXC_SF_DATA4_DATA4 0x0000000000ffffffULL /* bits 151:128 */
+
+#define TXC_RO_TIDS(PORT) (FZC_TXC + 0x200b0UL + (PORT)*0x100UL)
+#define TXC_RO_TIDS_IN_USE 0x00000000ffffffffULL
+
+#define TXC_RO_STATE0(PORT) (FZC_TXC + 0x200b8UL + (PORT)*0x100UL)
+#define TXC_RO_STATE0_DUPLICATE_TID 0x00000000ffffffffULL
+
+#define TXC_RO_STATE1(PORT) (FZC_TXC + 0x200c0UL + (PORT)*0x100UL)
+#define TXC_RO_STATE1_UNUSED_TID 0x00000000ffffffffULL
+
+#define TXC_RO_STATE2(PORT) (FZC_TXC + 0x200c8UL + (PORT)*0x100UL)
+#define TXC_RO_STATE2_TRANS_TIMEOUT 0x00000000ffffffffULL
+
+#define TXC_RO_STATE3(PORT) (FZC_TXC + 0x200d0UL + (PORT)*0x100UL)
+#define TXC_RO_STATE3_ENAB_SPC_WMARK 0x0000000080000000ULL
+#define TXC_RO_STATE3_RO_SPC_WMARK 0x000000007fe00000ULL
+#define TXC_RO_STATE3_ROFIFO_SPC_AVAIL 0x00000000001ff800ULL
+#define TXC_RO_STATE3_ENAB_RO_WMARK 0x0000000000000100ULL
+#define TXC_RO_STATE3_HIGH_RO_USED 0x00000000000000f0ULL
+#define TXC_RO_STATE3_NUM_RO_USED 0x000000000000000fULL
+
+#define TXC_RO_CTL(PORT) (FZC_TXC + 0x200d8UL + (PORT)*0x100UL)
+#define TXC_RO_CTL_CLR_FAIL_STATE 0x0000000080000000ULL
+#define TXC_RO_CTL_RO_ADDR 0x000000000f000000ULL
+#define TXC_RO_CTL_ADDR_FAILED 0x0000000000400000ULL
+#define TXC_RO_CTL_DMA_FAILED 0x0000000000200000ULL
+#define TXC_RO_CTL_LEN_FAILED 0x0000000000100000ULL
+#define TXC_RO_CTL_CAPT_ADDR_FAILED 0x0000000000040000ULL
+#define TXC_RO_CTL_CAPT_DMA_FAILED 0x0000000000020000ULL
+#define TXC_RO_CTL_CAPT_LEN_FAILED 0x0000000000010000ULL
+#define TXC_RO_CTL_RO_STATE_RD_DONE 0x0000000000000080ULL
+#define TXC_RO_CTL_RO_STATE_WR_DONE 0x0000000000000040ULL
+#define TXC_RO_CTL_RO_STATE_RD 0x0000000000000020ULL
+#define TXC_RO_CTL_RO_STATE_WR 0x0000000000000010ULL
+#define TXC_RO_CTL_RO_STATE_ADDR 0x000000000000000fULL
+
+#define TXC_RO_ST_DATA0(PORT) (FZC_TXC + 0x200e0UL + (PORT)*0x100UL)
+#define TXC_RO_ST_DATA0_DATA0 0x00000000ffffffffULL
+
+#define TXC_RO_ST_DATA1(PORT) (FZC_TXC + 0x200e8UL + (PORT)*0x100UL)
+#define TXC_RO_ST_DATA1_DATA1 0x00000000ffffffffULL
+
+#define TXC_RO_ST_DATA2(PORT) (FZC_TXC + 0x200f0UL + (PORT)*0x100UL)
+#define TXC_RO_ST_DATA2_DATA2 0x00000000ffffffffULL
+
+#define TXC_RO_ST_DATA3(PORT) (FZC_TXC + 0x200f8UL + (PORT)*0x100UL)
+#define TXC_RO_ST_DATA3_DATA3 0x00000000ffffffffULL
+
+#define TXC_PORT_PACKET_REQ(PORT) (FZC_TXC + 0x20100UL + (PORT)*0x100UL)
+#define TXC_PORT_PACKET_REQ_GATHER_REQ 0x00000000f0000000ULL
+#define TXC_PORT_PACKET_REQ_PKT_REQ 0x000000000fff0000ULL
+#define TXC_PORT_PACKET_REQ_PERR_ABRT 0x000000000000ffffULL
+
+ /* bits are same as TXC_INT_STAT */
+#define TXC_INT_STAT_DBG (FZC_TXC + 0x20420UL)
+
+#define TXC_INT_STAT (FZC_TXC + 0x20428UL)
+#define TXC_INT_STAT_VAL_SHIFT(PORT) ((PORT) * 8)
+#define TXC_INT_STAT_VAL(PORT) (0x3f << TXC_INT_STAT_VAL_SHIFT(PORT))
+#define TXC_INT_STAT_SF_CE(PORT) (0x01 << TXC_INT_STAT_VAL_SHIFT(PORT))
+#define TXC_INT_STAT_SF_UE(PORT) (0x02 << TXC_INT_STAT_VAL_SHIFT(PORT))
+#define TXC_INT_STAT_RO_CE(PORT) (0x04 << TXC_INT_STAT_VAL_SHIFT(PORT))
+#define TXC_INT_STAT_RO_UE(PORT) (0x08 << TXC_INT_STAT_VAL_SHIFT(PORT))
+#define TXC_INT_STAT_REORDER_ERR(PORT) (0x10 << TXC_INT_STAT_VAL_SHIFT(PORT))
+#define TXC_INT_STAT_PKTASM_DEAD(PORT) (0x20 << TXC_INT_STAT_VAL_SHIFT(PORT))
+
+#define TXC_INT_MASK (FZC_TXC + 0x20430UL)
+#define TXC_INT_MASK_VAL_SHIFT(PORT) ((PORT) * 8)
+#define TXC_INT_MASK_VAL(PORT) (0x3f << TXC_INT_STAT_VAL_SHIFT(PORT))
+
+#define TXC_INT_MASK_SF_CE 0x01
+#define TXC_INT_MASK_SF_UE 0x02
+#define TXC_INT_MASK_RO_CE 0x04
+#define TXC_INT_MASK_RO_UE 0x08
+#define TXC_INT_MASK_REORDER_ERR 0x10
+#define TXC_INT_MASK_PKTASM_DEAD 0x20
+#define TXC_INT_MASK_ALL 0x3f
+
+#define TXC_PORT_DMA(IDX) (FZC_TXC + 0x20028UL + (IDX)*0x100UL)
+
+#define ESPC_PIO_EN (FZC_PROM + 0x40000UL)
+#define ESPC_PIO_EN_ENABLE 0x0000000000000001ULL
+
+#define ESPC_PIO_STAT (FZC_PROM + 0x40008UL)
+#define ESPC_PIO_STAT_READ_START 0x0000000080000000ULL
+#define ESPC_PIO_STAT_READ_END 0x0000000040000000ULL
+#define ESPC_PIO_STAT_WRITE_INIT 0x0000000020000000ULL
+#define ESPC_PIO_STAT_WRITE_END 0x0000000010000000ULL
+#define ESPC_PIO_STAT_ADDR 0x0000000003ffff00ULL
+#define ESPC_PIO_STAT_ADDR_SHIFT 8
+#define ESPC_PIO_STAT_DATA 0x00000000000000ffULL
+#define ESPC_PIO_STAT_DATA_SHIFT 0
+
+#define ESPC_NCR(IDX) (FZC_PROM + 0x40020UL + (IDX)*0x8UL)
+#define ESPC_NCR_VAL 0x00000000ffffffffULL
+
+#define ESPC_MAC_ADDR0 ESPC_NCR(0)
+#define ESPC_MAC_ADDR1 ESPC_NCR(1)
+#define ESPC_NUM_PORTS_MACS ESPC_NCR(2)
+#define ESPC_NUM_PORTS_MACS_VAL 0x00000000000000ffULL
+#define ESPC_MOD_STR_LEN ESPC_NCR(4)
+#define ESPC_MOD_STR_1 ESPC_NCR(5)
+#define ESPC_MOD_STR_2 ESPC_NCR(6)
+#define ESPC_MOD_STR_3 ESPC_NCR(7)
+#define ESPC_MOD_STR_4 ESPC_NCR(8)
+#define ESPC_MOD_STR_5 ESPC_NCR(9)
+#define ESPC_MOD_STR_6 ESPC_NCR(10)
+#define ESPC_MOD_STR_7 ESPC_NCR(11)
+#define ESPC_MOD_STR_8 ESPC_NCR(12)
+#define ESPC_BD_MOD_STR_LEN ESPC_NCR(13)
+#define ESPC_BD_MOD_STR_1 ESPC_NCR(14)
+#define ESPC_BD_MOD_STR_2 ESPC_NCR(15)
+#define ESPC_BD_MOD_STR_3 ESPC_NCR(16)
+#define ESPC_BD_MOD_STR_4 ESPC_NCR(17)
+
+#define ESPC_PHY_TYPE ESPC_NCR(18)
+#define ESPC_PHY_TYPE_PORT0 0x00000000ff000000ULL
+#define ESPC_PHY_TYPE_PORT0_SHIFT 24
+#define ESPC_PHY_TYPE_PORT1 0x0000000000ff0000ULL
+#define ESPC_PHY_TYPE_PORT1_SHIFT 16
+#define ESPC_PHY_TYPE_PORT2 0x000000000000ff00ULL
+#define ESPC_PHY_TYPE_PORT2_SHIFT 8
+#define ESPC_PHY_TYPE_PORT3 0x00000000000000ffULL
+#define ESPC_PHY_TYPE_PORT3_SHIFT 0
+
+#define ESPC_PHY_TYPE_1G_COPPER 3
+#define ESPC_PHY_TYPE_1G_FIBER 2
+#define ESPC_PHY_TYPE_10G_COPPER 1
+#define ESPC_PHY_TYPE_10G_FIBER 0
+
+#define ESPC_MAX_FM_SZ ESPC_NCR(19)
+
+#define ESPC_INTR_NUM ESPC_NCR(20)
+#define ESPC_INTR_NUM_PORT0 0x00000000ff000000ULL
+#define ESPC_INTR_NUM_PORT1 0x0000000000ff0000ULL
+#define ESPC_INTR_NUM_PORT2 0x000000000000ff00ULL
+#define ESPC_INTR_NUM_PORT3 0x00000000000000ffULL
+
+#define ESPC_VER_IMGSZ ESPC_NCR(21)
+#define ESPC_VER_IMGSZ_IMGSZ 0x00000000ffff0000ULL
+#define ESPC_VER_IMGSZ_IMGSZ_SHIFT 16
+#define ESPC_VER_IMGSZ_VER 0x000000000000ffffULL
+#define ESPC_VER_IMGSZ_VER_SHIFT 0
+
+#define ESPC_CHKSUM ESPC_NCR(22)
+#define ESPC_CHKSUM_SUM 0x00000000000000ffULL
+
+#define ESPC_EEPROM_SIZE 0x100000
+
+#define CLASS_CODE_UNRECOG 0x00
+#define CLASS_CODE_DUMMY1 0x01
+#define CLASS_CODE_ETHERTYPE1 0x02
+#define CLASS_CODE_ETHERTYPE2 0x03
+#define CLASS_CODE_USER_PROG1 0x04
+#define CLASS_CODE_USER_PROG2 0x05
+#define CLASS_CODE_USER_PROG3 0x06
+#define CLASS_CODE_USER_PROG4 0x07
+#define CLASS_CODE_TCP_IPV4 0x08
+#define CLASS_CODE_UDP_IPV4 0x09
+#define CLASS_CODE_AH_ESP_IPV4 0x0a
+#define CLASS_CODE_SCTP_IPV4 0x0b
+#define CLASS_CODE_TCP_IPV6 0x0c
+#define CLASS_CODE_UDP_IPV6 0x0d
+#define CLASS_CODE_AH_ESP_IPV6 0x0e
+#define CLASS_CODE_SCTP_IPV6 0x0f
+#define CLASS_CODE_ARP 0x10
+#define CLASS_CODE_RARP 0x11
+#define CLASS_CODE_DUMMY2 0x12
+#define CLASS_CODE_DUMMY3 0x13
+#define CLASS_CODE_DUMMY4 0x14
+#define CLASS_CODE_DUMMY5 0x15
+#define CLASS_CODE_DUMMY6 0x16
+#define CLASS_CODE_DUMMY7 0x17
+#define CLASS_CODE_DUMMY8 0x18
+#define CLASS_CODE_DUMMY9 0x19
+#define CLASS_CODE_DUMMY10 0x1a
+#define CLASS_CODE_DUMMY11 0x1b
+#define CLASS_CODE_DUMMY12 0x1c
+#define CLASS_CODE_DUMMY13 0x1d
+#define CLASS_CODE_DUMMY14 0x1e
+#define CLASS_CODE_DUMMY15 0x1f
+
+/* Logical devices and device groups */
+#define LDN_RXDMA(CHAN) (0 + (CHAN))
+#define LDN_RESV1(OFF) (16 + (OFF))
+#define LDN_TXDMA(CHAN) (32 + (CHAN))
+#define LDN_RESV2(OFF) (56 + (OFF))
+#define LDN_MIF 63
+#define LDN_MAC(PORT) (64 + (PORT))
+#define LDN_DEVICE_ERROR 68
+#define LDN_MAX LDN_DEVICE_ERROR
+
+#define NIU_LDG_MIN 0
+#define NIU_LDG_MAX 63
+#define NIU_NUM_LDG 64
+#define LDG_INVALID 0xff
+
+/* PHY stuff */
+#define NIU_PMA_PMD_DEV_ADDR 1
+#define NIU_PCS_DEV_ADDR 3
+
+#define NIU_PHY_ID_MASK 0xfffff0f0
+#define NIU_PHY_ID_BCM8704 0x00206030
+#define NIU_PHY_ID_BCM8706 0x00206035
+#define NIU_PHY_ID_BCM5464R 0x002060b0
+#define NIU_PHY_ID_MRVL88X2011 0x01410020
+
+/* MRVL88X2011 register addresses */
+#define MRVL88X2011_USER_DEV1_ADDR 1
+#define MRVL88X2011_USER_DEV2_ADDR 2
+#define MRVL88X2011_USER_DEV3_ADDR 3
+#define MRVL88X2011_USER_DEV4_ADDR 4
+#define MRVL88X2011_PMA_PMD_CTL_1 0x0000
+#define MRVL88X2011_PMA_PMD_STATUS_1 0x0001
+#define MRVL88X2011_10G_PMD_STATUS_2 0x0008
+#define MRVL88X2011_10G_PMD_TX_DIS 0x0009
+#define MRVL88X2011_10G_XGXS_LANE_STAT 0x0018
+#define MRVL88X2011_GENERAL_CTL 0x8300
+#define MRVL88X2011_LED_BLINK_CTL 0x8303
+#define MRVL88X2011_LED_8_TO_11_CTL 0x8306
+
+/* MRVL88X2011 register control */
+#define MRVL88X2011_ENA_XFPREFCLK 0x0001
+#define MRVL88X2011_ENA_PMDTX 0x0000
+#define MRVL88X2011_LOOPBACK 0x1
+#define MRVL88X2011_LED_ACT 0x1
+#define MRVL88X2011_LNK_STATUS_OK 0x4
+#define MRVL88X2011_LED_BLKRATE_MASK 0x70
+#define MRVL88X2011_LED_BLKRATE_034MS 0x0
+#define MRVL88X2011_LED_BLKRATE_067MS 0x1
+#define MRVL88X2011_LED_BLKRATE_134MS 0x2
+#define MRVL88X2011_LED_BLKRATE_269MS 0x3
+#define MRVL88X2011_LED_BLKRATE_538MS 0x4
+#define MRVL88X2011_LED_CTL_OFF 0x0
+#define MRVL88X2011_LED_CTL_PCS_ACT 0x5
+#define MRVL88X2011_LED_CTL_MASK 0x7
+#define MRVL88X2011_LED(n,v) ((v)<<((n)*4))
+#define MRVL88X2011_LED_STAT(n,v) ((v)>>((n)*4))
+
+#define BCM8704_PMA_PMD_DEV_ADDR 1
+#define BCM8704_PCS_DEV_ADDR 2
+#define BCM8704_USER_DEV3_ADDR 3
+#define BCM8704_PHYXS_DEV_ADDR 4
+#define BCM8704_USER_DEV4_ADDR 4
+
+#define BCM8704_PMD_RCV_SIGDET 0x000a
+#define PMD_RCV_SIGDET_LANE3 0x0010
+#define PMD_RCV_SIGDET_LANE2 0x0008
+#define PMD_RCV_SIGDET_LANE1 0x0004
+#define PMD_RCV_SIGDET_LANE0 0x0002
+#define PMD_RCV_SIGDET_GLOBAL 0x0001
+
+#define BCM8704_PCS_10G_R_STATUS 0x0020
+#define PCS_10G_R_STATUS_LINKSTAT 0x1000
+#define PCS_10G_R_STATUS_PRBS31_ABLE 0x0004
+#define PCS_10G_R_STATUS_HI_BER 0x0002
+#define PCS_10G_R_STATUS_BLK_LOCK 0x0001
+
+#define BCM8704_USER_CONTROL 0xc800
+#define USER_CONTROL_OPTXENB_LVL 0x8000
+#define USER_CONTROL_OPTXRST_LVL 0x4000
+#define USER_CONTROL_OPBIASFLT_LVL 0x2000
+#define USER_CONTROL_OBTMPFLT_LVL 0x1000
+#define USER_CONTROL_OPPRFLT_LVL 0x0800
+#define USER_CONTROL_OPTXFLT_LVL 0x0400
+#define USER_CONTROL_OPRXLOS_LVL 0x0200
+#define USER_CONTROL_OPRXFLT_LVL 0x0100
+#define USER_CONTROL_OPTXON_LVL 0x0080
+#define USER_CONTROL_RES1 0x007f
+#define USER_CONTROL_RES1_SHIFT 0
+
+#define BCM8704_USER_ANALOG_CLK 0xc801
+#define BCM8704_USER_PMD_RX_CONTROL 0xc802
+
+#define BCM8704_USER_PMD_TX_CONTROL 0xc803
+#define USER_PMD_TX_CTL_RES1 0xfe00
+#define USER_PMD_TX_CTL_XFP_CLKEN 0x0100
+#define USER_PMD_TX_CTL_TX_DAC_TXD 0x00c0
+#define USER_PMD_TX_CTL_TX_DAC_TXD_SH 6
+#define USER_PMD_TX_CTL_TX_DAC_TXCK 0x0030
+#define USER_PMD_TX_CTL_TX_DAC_TXCK_SH 4
+#define USER_PMD_TX_CTL_TSD_LPWREN 0x0008
+#define USER_PMD_TX_CTL_TSCK_LPWREN 0x0004
+#define USER_PMD_TX_CTL_CMU_LPWREN 0x0002
+#define USER_PMD_TX_CTL_SFIFORST 0x0001
+
+#define BCM8704_USER_ANALOG_STATUS0 0xc804
+#define BCM8704_USER_OPT_DIGITAL_CTRL 0xc808
+#define BCM8704_USER_TX_ALARM_STATUS 0x9004
+
+#define USER_ODIG_CTRL_FMODE 0x8000
+#define USER_ODIG_CTRL_TX_PDOWN 0x4000
+#define USER_ODIG_CTRL_RX_PDOWN 0x2000
+#define USER_ODIG_CTRL_EFILT_EN 0x1000
+#define USER_ODIG_CTRL_OPT_RST 0x0800
+#define USER_ODIG_CTRL_PCS_TIB 0x0400
+#define USER_ODIG_CTRL_PCS_RI 0x0200
+#define USER_ODIG_CTRL_RESV1 0x0180
+#define USER_ODIG_CTRL_GPIOS 0x0060
+#define USER_ODIG_CTRL_GPIOS_SHIFT 5
+#define USER_ODIG_CTRL_RESV2 0x0010
+#define USER_ODIG_CTRL_LB_ERR_DIS 0x0008
+#define USER_ODIG_CTRL_RESV3 0x0006
+#define USER_ODIG_CTRL_TXONOFF_PD_DIS 0x0001
+
+#define BCM8704_PHYXS_XGXS_LANE_STAT 0x0018
+#define PHYXS_XGXS_LANE_STAT_ALINGED 0x1000
+#define PHYXS_XGXS_LANE_STAT_PATTEST 0x0800
+#define PHYXS_XGXS_LANE_STAT_MAGIC 0x0400
+#define PHYXS_XGXS_LANE_STAT_LANE3 0x0008
+#define PHYXS_XGXS_LANE_STAT_LANE2 0x0004
+#define PHYXS_XGXS_LANE_STAT_LANE1 0x0002
+#define PHYXS_XGXS_LANE_STAT_LANE0 0x0001
+
+#define BCM5464R_AUX_CTL 24
+#define BCM5464R_AUX_CTL_EXT_LB 0x8000
+#define BCM5464R_AUX_CTL_EXT_PLEN 0x4000
+#define BCM5464R_AUX_CTL_ER1000 0x3000
+#define BCM5464R_AUX_CTL_ER1000_SHIFT 12
+#define BCM5464R_AUX_CTL_RESV1 0x0800
+#define BCM5464R_AUX_CTL_WRITE_1 0x0400
+#define BCM5464R_AUX_CTL_RESV2 0x0300
+#define BCM5464R_AUX_CTL_PRESP_DIS 0x0080
+#define BCM5464R_AUX_CTL_RESV3 0x0040
+#define BCM5464R_AUX_CTL_ER100 0x0030
+#define BCM5464R_AUX_CTL_ER100_SHIFT 4
+#define BCM5464R_AUX_CTL_DIAG_MODE 0x0008
+#define BCM5464R_AUX_CTL_SR_SEL 0x0007
+#define BCM5464R_AUX_CTL_SR_SEL_SHIFT 0
+
+#define BCM5464R_CTRL1000_AS_MASTER 0x0800
+#define BCM5464R_CTRL1000_ENABLE_AS_MASTER 0x1000
+
+#define RCR_ENTRY_MULTI 0x8000000000000000ULL
+#define RCR_ENTRY_PKT_TYPE 0x6000000000000000ULL
+#define RCR_ENTRY_PKT_TYPE_SHIFT 61
+#define RCR_ENTRY_ZERO_COPY 0x1000000000000000ULL
+#define RCR_ENTRY_NOPORT 0x0800000000000000ULL
+#define RCR_ENTRY_PROMISC 0x0400000000000000ULL
+#define RCR_ENTRY_ERROR 0x0380000000000000ULL
+#define RCR_ENTRY_DCF_ERR 0x0040000000000000ULL
+#define RCR_ENTRY_L2_LEN 0x003fff0000000000ULL
+#define RCR_ENTRY_L2_LEN_SHIFT 40
+#define RCR_ENTRY_PKTBUFSZ 0x000000c000000000ULL
+#define RCR_ENTRY_PKTBUFSZ_SHIFT 38
+#define RCR_ENTRY_PKT_BUF_ADDR 0x0000003fffffffffULL /* bits 43:6 */
+#define RCR_ENTRY_PKT_BUF_ADDR_SHIFT 6
+
+#define RCR_PKT_TYPE_OTHER 0x0
+#define RCR_PKT_TYPE_TCP 0x1
+#define RCR_PKT_TYPE_UDP 0x2
+#define RCR_PKT_TYPE_SCTP 0x3
+
+#define NIU_RXPULL_MAX ETH_HLEN
+
+struct rx_pkt_hdr0 {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 inputport:2,
+ maccheck:1,
+ class:5;
+ u8 vlan:1,
+ llcsnap:1,
+ noport:1,
+ badip:1,
+ tcamhit:1,
+ tres:2,
+ tzfvld:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 class:5,
+ maccheck:1,
+ inputport:2;
+ u8 tzfvld:1,
+ tres:2,
+ tcamhit:1,
+ badip:1,
+ noport:1,
+ llcsnap:1,
+ vlan:1;
+#endif
+};
+
+struct rx_pkt_hdr1 {
+ u8 hwrsvd1;
+ u8 tcammatch;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 hwrsvd2:2,
+ hashit:1,
+ exact:1,
+ hzfvld:1,
+ hashsidx:3;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 hashsidx:3,
+ hzfvld:1,
+ exact:1,
+ hashit:1,
+ hwrsvd2:2;
+#endif
+ u8 zcrsvd;
+
+ /* Bits 11:8 of zero copy flow ID. */
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 hwrsvd3:4, zflowid0:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 zflowid0:4, hwrsvd3:4;
+#endif
+
+ /* Bits 7:0 of zero copy flow ID. */
+ u8 zflowid1;
+
+ /* Bits 15:8 of hash value, H2. */
+ u8 hashval2_0;
+
+ /* Bits 7:0 of hash value, H2. */
+ u8 hashval2_1;
+
+ /* Bits 19:16 of hash value, H1. */
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 hwrsvd4:4, hashval1_0:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 hashval1_0:4, hwrsvd4:4;
+#endif
+
+ /* Bits 15:8 of hash value, H1. */
+ u8 hashval1_1;
+
+ /* Bits 7:0 of hash value, H1. */
+ u8 hashval1_2;
+
+ u8 hwrsvd5;
+ u8 hwrsvd6;
+
+ u8 usrdata_0; /* Bits 39:32 of user data. */
+ u8 usrdata_1; /* Bits 31:24 of user data. */
+ u8 usrdata_2; /* Bits 23:16 of user data. */
+ u8 usrdata_3; /* Bits 15:8 of user data. */
+ u8 usrdata_4; /* Bits 7:0 of user data. */
+};
+
+struct tx_dma_mbox {
+ u64 tx_dma_pre_st;
+ u64 tx_cs;
+ u64 tx_ring_kick;
+ u64 tx_ring_hdl;
+ u64 resv1;
+ u32 tx_rng_err_logl;
+ u32 tx_rng_err_logh;
+ u64 resv2;
+ u64 resv3;
+};
+
+struct tx_pkt_hdr {
+ __le64 flags;
+#define TXHDR_PAD 0x0000000000000007ULL
+#define TXHDR_PAD_SHIFT 0
+#define TXHDR_LEN 0x000000003fff0000ULL
+#define TXHDR_LEN_SHIFT 16
+#define TXHDR_L4STUFF 0x0000003f00000000ULL
+#define TXHDR_L4STUFF_SHIFT 32
+#define TXHDR_L4START 0x00003f0000000000ULL
+#define TXHDR_L4START_SHIFT 40
+#define TXHDR_L3START 0x000f000000000000ULL
+#define TXHDR_L3START_SHIFT 48
+#define TXHDR_IHL 0x00f0000000000000ULL
+#define TXHDR_IHL_SHIFT 52
+#define TXHDR_VLAN 0x0100000000000000ULL
+#define TXHDR_LLC 0x0200000000000000ULL
+#define TXHDR_IP_VER 0x2000000000000000ULL
+#define TXHDR_CSUM_NONE 0x0000000000000000ULL
+#define TXHDR_CSUM_TCP 0x4000000000000000ULL
+#define TXHDR_CSUM_UDP 0x8000000000000000ULL
+#define TXHDR_CSUM_SCTP 0xc000000000000000ULL
+ __le64 resv;
+};
+
+#define TX_DESC_SOP 0x8000000000000000ULL
+#define TX_DESC_MARK 0x4000000000000000ULL
+#define TX_DESC_NUM_PTR 0x3c00000000000000ULL
+#define TX_DESC_NUM_PTR_SHIFT 58
+#define TX_DESC_TR_LEN 0x01fff00000000000ULL
+#define TX_DESC_TR_LEN_SHIFT 44
+#define TX_DESC_SAD 0x00000fffffffffffULL
+#define TX_DESC_SAD_SHIFT 0
+
+struct tx_buff_info {
+ struct sk_buff *skb;
+ u64 mapping;
+};
+
+struct txdma_mailbox {
+ __le64 tx_dma_pre_st;
+ __le64 tx_cs;
+ __le64 tx_ring_kick;
+ __le64 tx_ring_hdl;
+ __le64 resv1;
+ __le32 tx_rng_err_logl;
+ __le32 tx_rng_err_logh;
+ __le64 resv2[2];
+} __attribute__((aligned(64)));
+
+#define MAX_TX_RING_SIZE 256
+#define MAX_TX_DESC_LEN 4076
+
+struct tx_ring_info {
+ struct tx_buff_info tx_buffs[MAX_TX_RING_SIZE];
+ struct niu *np;
+ u64 tx_cs;
+ int pending;
+ int prod;
+ int cons;
+ int wrap_bit;
+ u16 last_pkt_cnt;
+ u16 tx_channel;
+ u16 mark_counter;
+ u16 mark_freq;
+ u16 mark_pending;
+ u16 __pad;
+ struct txdma_mailbox *mbox;
+ __le64 *descr;
+
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_errors;
+
+ u64 mbox_dma;
+ u64 descr_dma;
+ int max_burst;
+};
+
+#define NEXT_TX(tp, index) \
+ (((index) + 1) < (tp)->pending ? ((index) + 1) : 0)
+
+static inline u32 niu_tx_avail(struct tx_ring_info *tp)
+{
+ return (tp->pending -
+ ((tp->prod - tp->cons) & (MAX_TX_RING_SIZE - 1)));
+}
+
+struct rxdma_mailbox {
+ __le64 rx_dma_ctl_stat;
+ __le64 rbr_stat;
+ __le32 rbr_hdl;
+ __le32 rbr_hdh;
+ __le64 resv1;
+ __le32 rcrstat_c;
+ __le32 rcrstat_b;
+ __le64 rcrstat_a;
+ __le64 resv2[2];
+} __attribute__((aligned(64)));
+
+#define MAX_RBR_RING_SIZE 128
+#define MAX_RCR_RING_SIZE (MAX_RBR_RING_SIZE * 2)
+
+#define RBR_REFILL_MIN 16
+
+#define RX_SKB_ALLOC_SIZE 128 + NET_IP_ALIGN
+
+struct rx_ring_info {
+ struct niu *np;
+ int rx_channel;
+ u16 rbr_block_size;
+ u16 rbr_blocks_per_page;
+ u16 rbr_sizes[4];
+ unsigned int rcr_index;
+ unsigned int rcr_table_size;
+ unsigned int rbr_index;
+ unsigned int rbr_pending;
+ unsigned int rbr_refill_pending;
+ unsigned int rbr_kick_thresh;
+ unsigned int rbr_table_size;
+ struct page **rxhash;
+ struct rxdma_mailbox *mbox;
+ __le64 *rcr;
+ __le32 *rbr;
+#define RBR_DESCR_ADDR_SHIFT 12
+
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_errors;
+
+ u64 mbox_dma;
+ u64 rcr_dma;
+ u64 rbr_dma;
+
+ /* WRED */
+ int nonsyn_window;
+ int nonsyn_threshold;
+ int syn_window;
+ int syn_threshold;
+
+ /* interrupt mitigation */
+ int rcr_pkt_threshold;
+ int rcr_timeout;
+};
+
+#define NEXT_RCR(rp, index) \
+ (((index) + 1) < (rp)->rcr_table_size ? ((index) + 1) : 0)
+#define NEXT_RBR(rp, index) \
+ (((index) + 1) < (rp)->rbr_table_size ? ((index) + 1) : 0)
+
+#define NIU_MAX_PORTS 4
+#define NIU_NUM_RXCHAN 16
+#define NIU_NUM_TXCHAN 24
+#define MAC_NUM_HASH 16
+
+#define NIU_MAX_MTU 9216
+
+/* VPD strings */
+#define NIU_QGC_LP_BM_STR "501-7606"
+#define NIU_2XGF_LP_BM_STR "501-7283"
+#define NIU_QGC_PEM_BM_STR "501-7765"
+#define NIU_2XGF_PEM_BM_STR "501-7626"
+#define NIU_ALONSO_BM_STR "373-0202"
+#define NIU_FOXXY_BM_STR "501-7961"
+#define NIU_2XGF_MRVL_BM_STR "SK-6E82"
+#define NIU_QGC_LP_MDL_STR "SUNW,pcie-qgc"
+#define NIU_2XGF_LP_MDL_STR "SUNW,pcie-2xgf"
+#define NIU_QGC_PEM_MDL_STR "SUNW,pcie-qgc-pem"
+#define NIU_2XGF_PEM_MDL_STR "SUNW,pcie-2xgf-pem"
+#define NIU_ALONSO_MDL_STR "SUNW,CP3220"
+#define NIU_KIMI_MDL_STR "SUNW,CP3260"
+#define NIU_MARAMBA_MDL_STR "SUNW,pcie-neptune"
+#define NIU_FOXXY_MDL_STR "SUNW,pcie-rfem"
+#define NIU_2XGF_MRVL_MDL_STR "SysKonnect,pcie-2xgf"
+
+#define NIU_VPD_MIN_MAJOR 3
+#define NIU_VPD_MIN_MINOR 4
+
+#define NIU_VPD_MODEL_MAX 32
+#define NIU_VPD_BD_MODEL_MAX 16
+#define NIU_VPD_VERSION_MAX 64
+#define NIU_VPD_PHY_TYPE_MAX 8
+
+struct niu_vpd {
+ char model[NIU_VPD_MODEL_MAX];
+ char board_model[NIU_VPD_BD_MODEL_MAX];
+ char version[NIU_VPD_VERSION_MAX];
+ char phy_type[NIU_VPD_PHY_TYPE_MAX];
+ u8 mac_num;
+ u8 __pad;
+ u8 local_mac[6];
+ int fcode_major;
+ int fcode_minor;
+};
+
+struct niu_altmac_rdc {
+ u8 alt_mac_num;
+ u8 rdc_num;
+ u8 mac_pref;
+};
+
+struct niu_vlan_rdc {
+ u8 rdc_num;
+ u8 vlan_pref;
+};
+
+struct niu_classifier {
+ struct niu_altmac_rdc alt_mac_mappings[16];
+ struct niu_vlan_rdc vlan_mappings[ENET_VLAN_TBL_NUM_ENTRIES];
+
+ u16 tcam_top;
+ u16 tcam_sz;
+ u16 tcam_valid_entries;
+ u16 num_alt_mac_mappings;
+
+ u32 h1_init;
+ u16 h2_init;
+};
+
+#define NIU_NUM_RDC_TABLES 8
+#define NIU_RDC_TABLE_SLOTS 16
+
+struct rdc_table {
+ u8 rxdma_channel[NIU_RDC_TABLE_SLOTS];
+};
+
+struct niu_rdc_tables {
+ struct rdc_table tables[NIU_NUM_RDC_TABLES];
+ int first_table_num;
+ int num_tables;
+};
+
+#define PHY_TYPE_PMA_PMD 0
+#define PHY_TYPE_PCS 1
+#define PHY_TYPE_MII 2
+#define PHY_TYPE_MAX 3
+
+struct phy_probe_info {
+ u32 phy_id[PHY_TYPE_MAX][NIU_MAX_PORTS];
+ u8 phy_port[PHY_TYPE_MAX][NIU_MAX_PORTS];
+ u8 cur[PHY_TYPE_MAX];
+
+ struct device_attribute phy_port_attrs[PHY_TYPE_MAX * NIU_MAX_PORTS];
+ struct device_attribute phy_type_attrs[PHY_TYPE_MAX * NIU_MAX_PORTS];
+ struct device_attribute phy_id_attrs[PHY_TYPE_MAX * NIU_MAX_PORTS];
+};
+
+struct niu_tcam_entry {
+ u8 valid;
+ u64 key[4];
+ u64 key_mask[4];
+ u64 assoc_data;
+};
+
+struct device_node;
+union niu_parent_id {
+ struct {
+ int domain;
+ int bus;
+ int device;
+ } pci;
+ struct device_node *of;
+};
+
+struct niu;
+struct niu_parent {
+ struct platform_device *plat_dev;
+ int index;
+
+ union niu_parent_id id;
+
+ struct niu *ports[NIU_MAX_PORTS];
+
+ atomic_t refcnt;
+ struct list_head list;
+
+ spinlock_t lock;
+
+ u32 flags;
+#define PARENT_FLGS_CLS_HWINIT 0x00000001
+
+ u32 port_phy;
+#define PORT_PHY_UNKNOWN 0x00000000
+#define PORT_PHY_INVALID 0xffffffff
+#define PORT_TYPE_10G 0x01
+#define PORT_TYPE_1G 0x02
+#define PORT_TYPE_MASK 0x03
+
+ u8 rxchan_per_port[NIU_MAX_PORTS];
+ u8 txchan_per_port[NIU_MAX_PORTS];
+
+ struct niu_rdc_tables rdc_group_cfg[NIU_MAX_PORTS];
+ u8 rdc_default[NIU_MAX_PORTS];
+
+ u8 ldg_map[LDN_MAX + 1];
+
+ u8 plat_type;
+#define PLAT_TYPE_INVALID 0x00
+#define PLAT_TYPE_ATLAS 0x01
+#define PLAT_TYPE_NIU 0x02
+#define PLAT_TYPE_VF_P0 0x03
+#define PLAT_TYPE_VF_P1 0x04
+#define PLAT_TYPE_ATCA_CP3220 0x08
+
+ u8 num_ports;
+
+ u16 tcam_num_entries;
+#define NIU_PCI_TCAM_ENTRIES 256
+#define NIU_NONPCI_TCAM_ENTRIES 128
+#define NIU_TCAM_ENTRIES_MAX 256
+
+ int rxdma_clock_divider;
+
+ struct phy_probe_info phy_probe_info;
+
+ struct niu_tcam_entry tcam[NIU_TCAM_ENTRIES_MAX];
+
+#define NIU_L2_PROG_CLS 2
+#define NIU_L3_PROG_CLS 4
+ u64 l2_cls[NIU_L2_PROG_CLS];
+ u64 l3_cls[NIU_L3_PROG_CLS];
+ u64 tcam_key[12];
+ u64 flow_key[12];
+ u16 l3_cls_refcnt[NIU_L3_PROG_CLS];
+ u8 l3_cls_pid[NIU_L3_PROG_CLS];
+};
+
+struct niu_ops {
+ void *(*alloc_coherent)(struct device *dev, size_t size,
+ u64 *handle, gfp_t flag);
+ void (*free_coherent)(struct device *dev, size_t size,
+ void *cpu_addr, u64 handle);
+ u64 (*map_page)(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction);
+ void (*unmap_page)(struct device *dev, u64 dma_address,
+ size_t size, enum dma_data_direction direction);
+ u64 (*map_single)(struct device *dev, void *cpu_addr,
+ size_t size,
+ enum dma_data_direction direction);
+ void (*unmap_single)(struct device *dev, u64 dma_address,
+ size_t size, enum dma_data_direction direction);
+};
+
+struct niu_link_config {
+ u32 supported;
+
+ /* Describes what we're trying to get. */
+ u32 advertising;
+ u16 speed;
+ u8 duplex;
+ u8 autoneg;
+
+ /* Describes what we actually have. */
+ u32 active_advertising;
+ u16 active_speed;
+ u8 active_duplex;
+ u8 active_autoneg;
+#define SPEED_INVALID 0xffff
+#define DUPLEX_INVALID 0xff
+#define AUTONEG_INVALID 0xff
+
+ u8 loopback_mode;
+#define LOOPBACK_DISABLED 0x00
+#define LOOPBACK_PHY 0x01
+#define LOOPBACK_MAC 0x02
+};
+
+struct niu_ldg {
+ struct napi_struct napi;
+ struct niu *np;
+ u8 ldg_num;
+ u8 timer;
+ u64 v0, v1, v2;
+ unsigned int irq;
+};
+
+struct niu_xmac_stats {
+ u64 tx_frames;
+ u64 tx_bytes;
+ u64 tx_fifo_errors;
+ u64 tx_overflow_errors;
+ u64 tx_max_pkt_size_errors;
+ u64 tx_underflow_errors;
+
+ u64 rx_local_faults;
+ u64 rx_remote_faults;
+ u64 rx_link_faults;
+ u64 rx_align_errors;
+ u64 rx_frags;
+ u64 rx_mcasts;
+ u64 rx_bcasts;
+ u64 rx_hist_cnt1;
+ u64 rx_hist_cnt2;
+ u64 rx_hist_cnt3;
+ u64 rx_hist_cnt4;
+ u64 rx_hist_cnt5;
+ u64 rx_hist_cnt6;
+ u64 rx_hist_cnt7;
+ u64 rx_octets;
+ u64 rx_code_violations;
+ u64 rx_len_errors;
+ u64 rx_crc_errors;
+ u64 rx_underflows;
+ u64 rx_overflows;
+
+ u64 pause_off_state;
+ u64 pause_on_state;
+ u64 pause_received;
+};
+
+struct niu_bmac_stats {
+ u64 tx_underflow_errors;
+ u64 tx_max_pkt_size_errors;
+ u64 tx_bytes;
+ u64 tx_frames;
+
+ u64 rx_overflows;
+ u64 rx_frames;
+ u64 rx_align_errors;
+ u64 rx_crc_errors;
+ u64 rx_len_errors;
+
+ u64 pause_off_state;
+ u64 pause_on_state;
+ u64 pause_received;
+};
+
+union niu_mac_stats {
+ struct niu_xmac_stats xmac;
+ struct niu_bmac_stats bmac;
+};
+
+struct niu_phy_ops {
+ int (*serdes_init)(struct niu *np);
+ int (*xcvr_init)(struct niu *np);
+ int (*link_status)(struct niu *np, int *);
+};
+
+struct platform_device;
+struct niu {
+ void __iomem *regs;
+ struct net_device *dev;
+ struct pci_dev *pdev;
+ struct device *device;
+ struct niu_parent *parent;
+
+ u32 flags;
+#define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removeable PHY detected*/
+#define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removeable PHY */
+#define NIU_FLAGS_VPD_VALID 0x00800000 /* VPD has valid version */
+#define NIU_FLAGS_MSIX 0x00400000 /* MSI-X in use */
+#define NIU_FLAGS_MCAST 0x00200000 /* multicast filter enabled */
+#define NIU_FLAGS_PROMISC 0x00100000 /* PROMISC enabled */
+#define NIU_FLAGS_XCVR_SERDES 0x00080000 /* 0=PHY 1=SERDES */
+#define NIU_FLAGS_10G 0x00040000 /* 0=1G 1=10G */
+#define NIU_FLAGS_FIBER 0x00020000 /* 0=COPPER 1=FIBER */
+#define NIU_FLAGS_XMAC 0x00010000 /* 0=BMAC 1=XMAC */
+
+ u32 msg_enable;
+ char irq_name[NIU_NUM_RXCHAN+NIU_NUM_TXCHAN+3][IFNAMSIZ + 6];
+
+ /* Protects hw programming, and ring state. */
+ spinlock_t lock;
+
+ const struct niu_ops *ops;
+ union niu_mac_stats mac_stats;
+
+ struct rx_ring_info *rx_rings;
+ struct tx_ring_info *tx_rings;
+ int num_rx_rings;
+ int num_tx_rings;
+
+ struct niu_ldg ldg[NIU_NUM_LDG];
+ int num_ldg;
+
+ void __iomem *mac_regs;
+ unsigned long ipp_off;
+ unsigned long pcs_off;
+ unsigned long xpcs_off;
+
+ struct timer_list timer;
+ u64 orig_led_state;
+ const struct niu_phy_ops *phy_ops;
+ int phy_addr;
+
+ struct niu_link_config link_config;
+
+ struct work_struct reset_task;
+
+ u8 port;
+ u8 mac_xcvr;
+#define MAC_XCVR_MII 1
+#define MAC_XCVR_PCS 2
+#define MAC_XCVR_XPCS 3
+
+ struct niu_classifier clas;
+
+ struct niu_vpd vpd;
+ u32 eeprom_len;
+
+ struct platform_device *op;
+ void __iomem *vir_regs_1;
+ void __iomem *vir_regs_2;
+};
+
+#endif /* _NIU_H */
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
new file mode 100644
index 000000000000..0d8cfd9ea053
--- /dev/null
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -0,0 +1,1307 @@
+/* sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters.
+ *
+ * Copyright (C) 1997, 1998, 1999, 2003, 2008 David S. Miller (davem@davemloft.net)
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/errno.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/gfp.h>
+
+#include <asm/auxio.h>
+#include <asm/byteorder.h>
+#include <asm/dma.h>
+#include <asm/idprom.h>
+#include <asm/io.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+
+#include "sunbmac.h"
+
+#define DRV_NAME "sunbmac"
+#define DRV_VERSION "2.1"
+#define DRV_RELDATE "August 26, 2008"
+#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
+
+static char version[] =
+ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
+
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION("Sun BigMAC 100baseT ethernet driver");
+MODULE_LICENSE("GPL");
+
+#undef DEBUG_PROBE
+#undef DEBUG_TX
+#undef DEBUG_IRQ
+
+#ifdef DEBUG_PROBE
+#define DP(x) printk x
+#else
+#define DP(x)
+#endif
+
+#ifdef DEBUG_TX
+#define DTX(x) printk x
+#else
+#define DTX(x)
+#endif
+
+#ifdef DEBUG_IRQ
+#define DIRQ(x) printk x
+#else
+#define DIRQ(x)
+#endif
+
+#define DEFAULT_JAMSIZE 4 /* Toe jam */
+
+#define QEC_RESET_TRIES 200
+
+static int qec_global_reset(void __iomem *gregs)
+{
+ int tries = QEC_RESET_TRIES;
+
+ sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
+ while (--tries) {
+ if (sbus_readl(gregs + GLOB_CTRL) & GLOB_CTRL_RESET) {
+ udelay(20);
+ continue;
+ }
+ break;
+ }
+ if (tries)
+ return 0;
+ printk(KERN_ERR "BigMAC: Cannot reset the QEC.\n");
+ return -1;
+}
+
+static void qec_init(struct bigmac *bp)
+{
+ struct platform_device *qec_op = bp->qec_op;
+ void __iomem *gregs = bp->gregs;
+ u8 bsizes = bp->bigmac_bursts;
+ u32 regval;
+
+ /* 64byte bursts do not work at the moment, do
+ * not even try to enable them. -DaveM
+ */
+ if (bsizes & DMA_BURST32)
+ regval = GLOB_CTRL_B32;
+ else
+ regval = GLOB_CTRL_B16;
+ sbus_writel(regval | GLOB_CTRL_BMODE, gregs + GLOB_CTRL);
+ sbus_writel(GLOB_PSIZE_2048, gregs + GLOB_PSIZE);
+
+ /* All of memsize is given to bigmac. */
+ sbus_writel(resource_size(&qec_op->resource[1]),
+ gregs + GLOB_MSIZE);
+
+ /* Half to the transmitter, half to the receiver. */
+ sbus_writel(resource_size(&qec_op->resource[1]) >> 1,
+ gregs + GLOB_TSIZE);
+ sbus_writel(resource_size(&qec_op->resource[1]) >> 1,
+ gregs + GLOB_RSIZE);
+}
+
+#define TX_RESET_TRIES 32
+#define RX_RESET_TRIES 32
+
+static void bigmac_tx_reset(void __iomem *bregs)
+{
+ int tries = TX_RESET_TRIES;
+
+ sbus_writel(0, bregs + BMAC_TXCFG);
+
+ /* The fifo threshold bit is read-only and does
+ * not clear. -DaveM
+ */
+ while ((sbus_readl(bregs + BMAC_TXCFG) & ~(BIGMAC_TXCFG_FIFO)) != 0 &&
+ --tries != 0)
+ udelay(20);
+
+ if (!tries) {
+ printk(KERN_ERR "BIGMAC: Transmitter will not reset.\n");
+ printk(KERN_ERR "BIGMAC: tx_cfg is %08x\n",
+ sbus_readl(bregs + BMAC_TXCFG));
+ }
+}
+
+static void bigmac_rx_reset(void __iomem *bregs)
+{
+ int tries = RX_RESET_TRIES;
+
+ sbus_writel(0, bregs + BMAC_RXCFG);
+ while (sbus_readl(bregs + BMAC_RXCFG) && --tries)
+ udelay(20);
+
+ if (!tries) {
+ printk(KERN_ERR "BIGMAC: Receiver will not reset.\n");
+ printk(KERN_ERR "BIGMAC: rx_cfg is %08x\n",
+ sbus_readl(bregs + BMAC_RXCFG));
+ }
+}
+
+/* Reset the transmitter and receiver. */
+static void bigmac_stop(struct bigmac *bp)
+{
+ bigmac_tx_reset(bp->bregs);
+ bigmac_rx_reset(bp->bregs);
+}
+
+static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs)
+{
+ struct net_device_stats *stats = &bp->enet_stats;
+
+ stats->rx_crc_errors += sbus_readl(bregs + BMAC_RCRCECTR);
+ sbus_writel(0, bregs + BMAC_RCRCECTR);
+
+ stats->rx_frame_errors += sbus_readl(bregs + BMAC_UNALECTR);
+ sbus_writel(0, bregs + BMAC_UNALECTR);
+
+ stats->rx_length_errors += sbus_readl(bregs + BMAC_GLECTR);
+ sbus_writel(0, bregs + BMAC_GLECTR);
+
+ stats->tx_aborted_errors += sbus_readl(bregs + BMAC_EXCTR);
+
+ stats->collisions +=
+ (sbus_readl(bregs + BMAC_EXCTR) +
+ sbus_readl(bregs + BMAC_LTCTR));
+ sbus_writel(0, bregs + BMAC_EXCTR);
+ sbus_writel(0, bregs + BMAC_LTCTR);
+}
+
+static void bigmac_clean_rings(struct bigmac *bp)
+{
+ int i;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (bp->rx_skbs[i] != NULL) {
+ dev_kfree_skb_any(bp->rx_skbs[i]);
+ bp->rx_skbs[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (bp->tx_skbs[i] != NULL) {
+ dev_kfree_skb_any(bp->tx_skbs[i]);
+ bp->tx_skbs[i] = NULL;
+ }
+ }
+}
+
+static void bigmac_init_rings(struct bigmac *bp, int from_irq)
+{
+ struct bmac_init_block *bb = bp->bmac_block;
+ struct net_device *dev = bp->dev;
+ int i;
+ gfp_t gfp_flags = GFP_KERNEL;
+
+ if (from_irq || in_interrupt())
+ gfp_flags = GFP_ATOMIC;
+
+ bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0;
+
+ /* Free any skippy bufs left around in the rings. */
+ bigmac_clean_rings(bp);
+
+ /* Now get new skbufs for the receive ring. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+
+ skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags);
+ if (!skb)
+ continue;
+
+ bp->rx_skbs[i] = skb;
+ skb->dev = dev;
+
+ /* Because we reserve afterwards. */
+ skb_put(skb, ETH_FRAME_LEN);
+ skb_reserve(skb, 34);
+
+ bb->be_rxd[i].rx_addr =
+ dma_map_single(&bp->bigmac_op->dev,
+ skb->data,
+ RX_BUF_ALLOC_SIZE - 34,
+ DMA_FROM_DEVICE);
+ bb->be_rxd[i].rx_flags =
+ (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++)
+ bb->be_txd[i].tx_flags = bb->be_txd[i].tx_addr = 0;
+}
+
+#define MGMT_CLKON (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB|MGMT_PAL_DCLOCK)
+#define MGMT_CLKOFF (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB)
+
+static void idle_transceiver(void __iomem *tregs)
+{
+ int i = 20;
+
+ while (i--) {
+ sbus_writel(MGMT_CLKOFF, tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ sbus_writel(MGMT_CLKON, tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ }
+}
+
+static void write_tcvr_bit(struct bigmac *bp, void __iomem *tregs, int bit)
+{
+ if (bp->tcvr_type == internal) {
+ bit = (bit & 1) << 3;
+ sbus_writel(bit | (MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO),
+ tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ sbus_writel(bit | MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK,
+ tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ } else if (bp->tcvr_type == external) {
+ bit = (bit & 1) << 2;
+ sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB,
+ tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB | MGMT_PAL_DCLOCK,
+ tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ } else {
+ printk(KERN_ERR "write_tcvr_bit: No transceiver type known!\n");
+ }
+}
+
+static int read_tcvr_bit(struct bigmac *bp, void __iomem *tregs)
+{
+ int retval = 0;
+
+ if (bp->tcvr_type == internal) {
+ sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK,
+ tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3;
+ } else if (bp->tcvr_type == external) {
+ sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2;
+ } else {
+ printk(KERN_ERR "read_tcvr_bit: No transceiver type known!\n");
+ }
+ return retval;
+}
+
+static int read_tcvr_bit2(struct bigmac *bp, void __iomem *tregs)
+{
+ int retval = 0;
+
+ if (bp->tcvr_type == internal) {
+ sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3;
+ sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ } else if (bp->tcvr_type == external) {
+ sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2;
+ sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ } else {
+ printk(KERN_ERR "read_tcvr_bit2: No transceiver type known!\n");
+ }
+ return retval;
+}
+
+static void put_tcvr_byte(struct bigmac *bp,
+ void __iomem *tregs,
+ unsigned int byte)
+{
+ int shift = 4;
+
+ do {
+ write_tcvr_bit(bp, tregs, ((byte >> shift) & 1));
+ shift -= 1;
+ } while (shift >= 0);
+}
+
+static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs,
+ int reg, unsigned short val)
+{
+ int shift;
+
+ reg &= 0xff;
+ val &= 0xffff;
+ switch(bp->tcvr_type) {
+ case internal:
+ case external:
+ break;
+
+ default:
+ printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
+ return;
+ }
+
+ idle_transceiver(tregs);
+ write_tcvr_bit(bp, tregs, 0);
+ write_tcvr_bit(bp, tregs, 1);
+ write_tcvr_bit(bp, tregs, 0);
+ write_tcvr_bit(bp, tregs, 1);
+
+ put_tcvr_byte(bp, tregs,
+ ((bp->tcvr_type == internal) ?
+ BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL));
+
+ put_tcvr_byte(bp, tregs, reg);
+
+ write_tcvr_bit(bp, tregs, 1);
+ write_tcvr_bit(bp, tregs, 0);
+
+ shift = 15;
+ do {
+ write_tcvr_bit(bp, tregs, (val >> shift) & 1);
+ shift -= 1;
+ } while (shift >= 0);
+}
+
+static unsigned short bigmac_tcvr_read(struct bigmac *bp,
+ void __iomem *tregs,
+ int reg)
+{
+ unsigned short retval = 0;
+
+ reg &= 0xff;
+ switch(bp->tcvr_type) {
+ case internal:
+ case external:
+ break;
+
+ default:
+ printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
+ return 0xffff;
+ }
+
+ idle_transceiver(tregs);
+ write_tcvr_bit(bp, tregs, 0);
+ write_tcvr_bit(bp, tregs, 1);
+ write_tcvr_bit(bp, tregs, 1);
+ write_tcvr_bit(bp, tregs, 0);
+
+ put_tcvr_byte(bp, tregs,
+ ((bp->tcvr_type == internal) ?
+ BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL));
+
+ put_tcvr_byte(bp, tregs, reg);
+
+ if (bp->tcvr_type == external) {
+ int shift = 15;
+
+ (void) read_tcvr_bit2(bp, tregs);
+ (void) read_tcvr_bit2(bp, tregs);
+
+ do {
+ int tmp;
+
+ tmp = read_tcvr_bit2(bp, tregs);
+ retval |= ((tmp & 1) << shift);
+ shift -= 1;
+ } while (shift >= 0);
+
+ (void) read_tcvr_bit2(bp, tregs);
+ (void) read_tcvr_bit2(bp, tregs);
+ (void) read_tcvr_bit2(bp, tregs);
+ } else {
+ int shift = 15;
+
+ (void) read_tcvr_bit(bp, tregs);
+ (void) read_tcvr_bit(bp, tregs);
+
+ do {
+ int tmp;
+
+ tmp = read_tcvr_bit(bp, tregs);
+ retval |= ((tmp & 1) << shift);
+ shift -= 1;
+ } while (shift >= 0);
+
+ (void) read_tcvr_bit(bp, tregs);
+ (void) read_tcvr_bit(bp, tregs);
+ (void) read_tcvr_bit(bp, tregs);
+ }
+ return retval;
+}
+
+static void bigmac_tcvr_init(struct bigmac *bp)
+{
+ void __iomem *tregs = bp->tregs;
+ u32 mpal;
+
+ idle_transceiver(tregs);
+ sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK,
+ tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+
+ /* Only the bit for the present transceiver (internal or
+ * external) will stick, set them both and see what stays.
+ */
+ sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL);
+ sbus_readl(tregs + TCVR_MPAL);
+ udelay(20);
+
+ mpal = sbus_readl(tregs + TCVR_MPAL);
+ if (mpal & MGMT_PAL_EXT_MDIO) {
+ bp->tcvr_type = external;
+ sbus_writel(~(TCVR_PAL_EXTLBACK | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE),
+ tregs + TCVR_TPAL);
+ sbus_readl(tregs + TCVR_TPAL);
+ } else if (mpal & MGMT_PAL_INT_MDIO) {
+ bp->tcvr_type = internal;
+ sbus_writel(~(TCVR_PAL_SERIAL | TCVR_PAL_EXTLBACK |
+ TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE),
+ tregs + TCVR_TPAL);
+ sbus_readl(tregs + TCVR_TPAL);
+ } else {
+ printk(KERN_ERR "BIGMAC: AIEEE, neither internal nor "
+ "external MDIO available!\n");
+ printk(KERN_ERR "BIGMAC: mgmt_pal[%08x] tcvr_pal[%08x]\n",
+ sbus_readl(tregs + TCVR_MPAL),
+ sbus_readl(tregs + TCVR_TPAL));
+ }
+}
+
+static int bigmac_init_hw(struct bigmac *, int);
+
+static int try_next_permutation(struct bigmac *bp, void __iomem *tregs)
+{
+ if (bp->sw_bmcr & BMCR_SPEED100) {
+ int timeout;
+
+ /* Reset the PHY. */
+ bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK);
+ bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
+ bp->sw_bmcr = (BMCR_RESET);
+ bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
+
+ timeout = 64;
+ while (--timeout) {
+ bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
+ if ((bp->sw_bmcr & BMCR_RESET) == 0)
+ break;
+ udelay(20);
+ }
+ if (timeout == 0)
+ printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name);
+
+ bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
+
+ /* Now we try 10baseT. */
+ bp->sw_bmcr &= ~(BMCR_SPEED100);
+ bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
+ return 0;
+ }
+
+ /* We've tried them all. */
+ return -1;
+}
+
+static void bigmac_timer(unsigned long data)
+{
+ struct bigmac *bp = (struct bigmac *) data;
+ void __iomem *tregs = bp->tregs;
+ int restart_timer = 0;
+
+ bp->timer_ticks++;
+ if (bp->timer_state == ltrywait) {
+ bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, MII_BMSR);
+ bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
+ if (bp->sw_bmsr & BMSR_LSTATUS) {
+ printk(KERN_INFO "%s: Link is now up at %s.\n",
+ bp->dev->name,
+ (bp->sw_bmcr & BMCR_SPEED100) ?
+ "100baseT" : "10baseT");
+ bp->timer_state = asleep;
+ restart_timer = 0;
+ } else {
+ if (bp->timer_ticks >= 4) {
+ int ret;
+
+ ret = try_next_permutation(bp, tregs);
+ if (ret == -1) {
+ printk(KERN_ERR "%s: Link down, cable problem?\n",
+ bp->dev->name);
+ ret = bigmac_init_hw(bp, 0);
+ if (ret) {
+ printk(KERN_ERR "%s: Error, cannot re-init the "
+ "BigMAC.\n", bp->dev->name);
+ }
+ return;
+ }
+ bp->timer_ticks = 0;
+ restart_timer = 1;
+ } else {
+ restart_timer = 1;
+ }
+ }
+ } else {
+ /* Can't happens.... */
+ printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n",
+ bp->dev->name);
+ restart_timer = 0;
+ bp->timer_ticks = 0;
+ bp->timer_state = asleep; /* foo on you */
+ }
+
+ if (restart_timer != 0) {
+ bp->bigmac_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
+ add_timer(&bp->bigmac_timer);
+ }
+}
+
+/* Well, really we just force the chip into 100baseT then
+ * 10baseT, each time checking for a link status.
+ */
+static void bigmac_begin_auto_negotiation(struct bigmac *bp)
+{
+ void __iomem *tregs = bp->tregs;
+ int timeout;
+
+ /* Grab new software copies of PHY registers. */
+ bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, MII_BMSR);
+ bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
+
+ /* Reset the PHY. */
+ bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK);
+ bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
+ bp->sw_bmcr = (BMCR_RESET);
+ bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
+
+ timeout = 64;
+ while (--timeout) {
+ bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
+ if ((bp->sw_bmcr & BMCR_RESET) == 0)
+ break;
+ udelay(20);
+ }
+ if (timeout == 0)
+ printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name);
+
+ bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
+
+ /* First we try 100baseT. */
+ bp->sw_bmcr |= BMCR_SPEED100;
+ bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
+
+ bp->timer_state = ltrywait;
+ bp->timer_ticks = 0;
+ bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10;
+ bp->bigmac_timer.data = (unsigned long) bp;
+ bp->bigmac_timer.function = bigmac_timer;
+ add_timer(&bp->bigmac_timer);
+}
+
+static int bigmac_init_hw(struct bigmac *bp, int from_irq)
+{
+ void __iomem *gregs = bp->gregs;
+ void __iomem *cregs = bp->creg;
+ void __iomem *bregs = bp->bregs;
+ unsigned char *e = &bp->dev->dev_addr[0];
+
+ /* Latch current counters into statistics. */
+ bigmac_get_counters(bp, bregs);
+
+ /* Reset QEC. */
+ qec_global_reset(gregs);
+
+ /* Init QEC. */
+ qec_init(bp);
+
+ /* Alloc and reset the tx/rx descriptor chains. */
+ bigmac_init_rings(bp, from_irq);
+
+ /* Initialize the PHY. */
+ bigmac_tcvr_init(bp);
+
+ /* Stop transmitter and receiver. */
+ bigmac_stop(bp);
+
+ /* Set hardware ethernet address. */
+ sbus_writel(((e[4] << 8) | e[5]), bregs + BMAC_MACADDR2);
+ sbus_writel(((e[2] << 8) | e[3]), bregs + BMAC_MACADDR1);
+ sbus_writel(((e[0] << 8) | e[1]), bregs + BMAC_MACADDR0);
+
+ /* Clear the hash table until mc upload occurs. */
+ sbus_writel(0, bregs + BMAC_HTABLE3);
+ sbus_writel(0, bregs + BMAC_HTABLE2);
+ sbus_writel(0, bregs + BMAC_HTABLE1);
+ sbus_writel(0, bregs + BMAC_HTABLE0);
+
+ /* Enable Big Mac hash table filter. */
+ sbus_writel(BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_FIFO,
+ bregs + BMAC_RXCFG);
+ udelay(20);
+
+ /* Ok, configure the Big Mac transmitter. */
+ sbus_writel(BIGMAC_TXCFG_FIFO, bregs + BMAC_TXCFG);
+
+ /* The HME docs recommend to use the 10LSB of our MAC here. */
+ sbus_writel(((e[5] | e[4] << 8) & 0x3ff),
+ bregs + BMAC_RSEED);
+
+ /* Enable the output drivers no matter what. */
+ sbus_writel(BIGMAC_XCFG_ODENABLE | BIGMAC_XCFG_RESV,
+ bregs + BMAC_XIFCFG);
+
+ /* Tell the QEC where the ring descriptors are. */
+ sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0),
+ cregs + CREG_RXDS);
+ sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0),
+ cregs + CREG_TXDS);
+
+ /* Setup the FIFO pointers into QEC local memory. */
+ sbus_writel(0, cregs + CREG_RXRBUFPTR);
+ sbus_writel(0, cregs + CREG_RXWBUFPTR);
+ sbus_writel(sbus_readl(gregs + GLOB_RSIZE),
+ cregs + CREG_TXRBUFPTR);
+ sbus_writel(sbus_readl(gregs + GLOB_RSIZE),
+ cregs + CREG_TXWBUFPTR);
+
+ /* Tell bigmac what interrupts we don't want to hear about. */
+ sbus_writel(BIGMAC_IMASK_GOTFRAME | BIGMAC_IMASK_SENTFRAME,
+ bregs + BMAC_IMASK);
+
+ /* Enable the various other irq's. */
+ sbus_writel(0, cregs + CREG_RIMASK);
+ sbus_writel(0, cregs + CREG_TIMASK);
+ sbus_writel(0, cregs + CREG_QMASK);
+ sbus_writel(0, cregs + CREG_BMASK);
+
+ /* Set jam size to a reasonable default. */
+ sbus_writel(DEFAULT_JAMSIZE, bregs + BMAC_JSIZE);
+
+ /* Clear collision counter. */
+ sbus_writel(0, cregs + CREG_CCNT);
+
+ /* Enable transmitter and receiver. */
+ sbus_writel(sbus_readl(bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE,
+ bregs + BMAC_TXCFG);
+ sbus_writel(sbus_readl(bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE,
+ bregs + BMAC_RXCFG);
+
+ /* Ok, start detecting link speed/duplex. */
+ bigmac_begin_auto_negotiation(bp);
+
+ /* Success. */
+ return 0;
+}
+
+/* Error interrupts get sent here. */
+static void bigmac_is_medium_rare(struct bigmac *bp, u32 qec_status, u32 bmac_status)
+{
+ printk(KERN_ERR "bigmac_is_medium_rare: ");
+ if (qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) {
+ if (qec_status & GLOB_STAT_ER)
+ printk("QEC_ERROR, ");
+ if (qec_status & GLOB_STAT_BM)
+ printk("QEC_BMAC_ERROR, ");
+ }
+ if (bmac_status & CREG_STAT_ERRORS) {
+ if (bmac_status & CREG_STAT_BERROR)
+ printk("BMAC_ERROR, ");
+ if (bmac_status & CREG_STAT_TXDERROR)
+ printk("TXD_ERROR, ");
+ if (bmac_status & CREG_STAT_TXLERR)
+ printk("TX_LATE_ERROR, ");
+ if (bmac_status & CREG_STAT_TXPERR)
+ printk("TX_PARITY_ERROR, ");
+ if (bmac_status & CREG_STAT_TXSERR)
+ printk("TX_SBUS_ERROR, ");
+
+ if (bmac_status & CREG_STAT_RXDROP)
+ printk("RX_DROP_ERROR, ");
+
+ if (bmac_status & CREG_STAT_RXSMALL)
+ printk("RX_SMALL_ERROR, ");
+ if (bmac_status & CREG_STAT_RXLERR)
+ printk("RX_LATE_ERROR, ");
+ if (bmac_status & CREG_STAT_RXPERR)
+ printk("RX_PARITY_ERROR, ");
+ if (bmac_status & CREG_STAT_RXSERR)
+ printk("RX_SBUS_ERROR, ");
+ }
+
+ printk(" RESET\n");
+ bigmac_init_hw(bp, 1);
+}
+
+/* BigMAC transmit complete service routines. */
+static void bigmac_tx(struct bigmac *bp)
+{
+ struct be_txd *txbase = &bp->bmac_block->be_txd[0];
+ struct net_device *dev = bp->dev;
+ int elem;
+
+ spin_lock(&bp->lock);
+
+ elem = bp->tx_old;
+ DTX(("bigmac_tx: tx_old[%d] ", elem));
+ while (elem != bp->tx_new) {
+ struct sk_buff *skb;
+ struct be_txd *this = &txbase[elem];
+
+ DTX(("this(%p) [flags(%08x)addr(%08x)]",
+ this, this->tx_flags, this->tx_addr));
+
+ if (this->tx_flags & TXD_OWN)
+ break;
+ skb = bp->tx_skbs[elem];
+ bp->enet_stats.tx_packets++;
+ bp->enet_stats.tx_bytes += skb->len;
+ dma_unmap_single(&bp->bigmac_op->dev,
+ this->tx_addr, skb->len,
+ DMA_TO_DEVICE);
+
+ DTX(("skb(%p) ", skb));
+ bp->tx_skbs[elem] = NULL;
+ dev_kfree_skb_irq(skb);
+
+ elem = NEXT_TX(elem);
+ }
+ DTX((" DONE, tx_old=%d\n", elem));
+ bp->tx_old = elem;
+
+ if (netif_queue_stopped(dev) &&
+ TX_BUFFS_AVAIL(bp) > 0)
+ netif_wake_queue(bp->dev);
+
+ spin_unlock(&bp->lock);
+}
+
+/* BigMAC receive complete service routines. */
+static void bigmac_rx(struct bigmac *bp)
+{
+ struct be_rxd *rxbase = &bp->bmac_block->be_rxd[0];
+ struct be_rxd *this;
+ int elem = bp->rx_new, drops = 0;
+ u32 flags;
+
+ this = &rxbase[elem];
+ while (!((flags = this->rx_flags) & RXD_OWN)) {
+ struct sk_buff *skb;
+ int len = (flags & RXD_LENGTH); /* FCS not included */
+
+ /* Check for errors. */
+ if (len < ETH_ZLEN) {
+ bp->enet_stats.rx_errors++;
+ bp->enet_stats.rx_length_errors++;
+
+ drop_it:
+ /* Return it to the BigMAC. */
+ bp->enet_stats.rx_dropped++;
+ this->rx_flags =
+ (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
+ goto next;
+ }
+ skb = bp->rx_skbs[elem];
+ if (len > RX_COPY_THRESHOLD) {
+ struct sk_buff *new_skb;
+
+ /* Now refill the entry, if we can. */
+ new_skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
+ if (new_skb == NULL) {
+ drops++;
+ goto drop_it;
+ }
+ dma_unmap_single(&bp->bigmac_op->dev,
+ this->rx_addr,
+ RX_BUF_ALLOC_SIZE - 34,
+ DMA_FROM_DEVICE);
+ bp->rx_skbs[elem] = new_skb;
+ new_skb->dev = bp->dev;
+ skb_put(new_skb, ETH_FRAME_LEN);
+ skb_reserve(new_skb, 34);
+ this->rx_addr =
+ dma_map_single(&bp->bigmac_op->dev,
+ new_skb->data,
+ RX_BUF_ALLOC_SIZE - 34,
+ DMA_FROM_DEVICE);
+ this->rx_flags =
+ (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
+
+ /* Trim the original skb for the netif. */
+ skb_trim(skb, len);
+ } else {
+ struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
+
+ if (copy_skb == NULL) {
+ drops++;
+ goto drop_it;
+ }
+ skb_reserve(copy_skb, 2);
+ skb_put(copy_skb, len);
+ dma_sync_single_for_cpu(&bp->bigmac_op->dev,
+ this->rx_addr, len,
+ DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len);
+ dma_sync_single_for_device(&bp->bigmac_op->dev,
+ this->rx_addr, len,
+ DMA_FROM_DEVICE);
+
+ /* Reuse original ring buffer. */
+ this->rx_flags =
+ (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
+
+ skb = copy_skb;
+ }
+
+ /* No checksums done by the BigMAC ;-( */
+ skb->protocol = eth_type_trans(skb, bp->dev);
+ netif_rx(skb);
+ bp->enet_stats.rx_packets++;
+ bp->enet_stats.rx_bytes += len;
+ next:
+ elem = NEXT_RX(elem);
+ this = &rxbase[elem];
+ }
+ bp->rx_new = elem;
+ if (drops)
+ printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", bp->dev->name);
+}
+
+static irqreturn_t bigmac_interrupt(int irq, void *dev_id)
+{
+ struct bigmac *bp = (struct bigmac *) dev_id;
+ u32 qec_status, bmac_status;
+
+ DIRQ(("bigmac_interrupt: "));
+
+ /* Latch status registers now. */
+ bmac_status = sbus_readl(bp->creg + CREG_STAT);
+ qec_status = sbus_readl(bp->gregs + GLOB_STAT);
+
+ DIRQ(("qec_status=%08x bmac_status=%08x\n", qec_status, bmac_status));
+ if ((qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) ||
+ (bmac_status & CREG_STAT_ERRORS))
+ bigmac_is_medium_rare(bp, qec_status, bmac_status);
+
+ if (bmac_status & CREG_STAT_TXIRQ)
+ bigmac_tx(bp);
+
+ if (bmac_status & CREG_STAT_RXIRQ)
+ bigmac_rx(bp);
+
+ return IRQ_HANDLED;
+}
+
+static int bigmac_open(struct net_device *dev)
+{
+ struct bigmac *bp = netdev_priv(dev);
+ int ret;
+
+ ret = request_irq(dev->irq, bigmac_interrupt, IRQF_SHARED, dev->name, bp);
+ if (ret) {
+ printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq);
+ return ret;
+ }
+ init_timer(&bp->bigmac_timer);
+ ret = bigmac_init_hw(bp, 0);
+ if (ret)
+ free_irq(dev->irq, bp);
+ return ret;
+}
+
+static int bigmac_close(struct net_device *dev)
+{
+ struct bigmac *bp = netdev_priv(dev);
+
+ del_timer(&bp->bigmac_timer);
+ bp->timer_state = asleep;
+ bp->timer_ticks = 0;
+
+ bigmac_stop(bp);
+ bigmac_clean_rings(bp);
+ free_irq(dev->irq, bp);
+ return 0;
+}
+
+static void bigmac_tx_timeout(struct net_device *dev)
+{
+ struct bigmac *bp = netdev_priv(dev);
+
+ bigmac_init_hw(bp, 0);
+ netif_wake_queue(dev);
+}
+
+/* Put a packet on the wire. */
+static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bigmac *bp = netdev_priv(dev);
+ int len, entry;
+ u32 mapping;
+
+ len = skb->len;
+ mapping = dma_map_single(&bp->bigmac_op->dev, skb->data,
+ len, DMA_TO_DEVICE);
+
+ /* Avoid a race... */
+ spin_lock_irq(&bp->lock);
+ entry = bp->tx_new;
+ DTX(("bigmac_start_xmit: len(%d) entry(%d)\n", len, entry));
+ bp->bmac_block->be_txd[entry].tx_flags = TXD_UPDATE;
+ bp->tx_skbs[entry] = skb;
+ bp->bmac_block->be_txd[entry].tx_addr = mapping;
+ bp->bmac_block->be_txd[entry].tx_flags =
+ (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
+ bp->tx_new = NEXT_TX(entry);
+ if (TX_BUFFS_AVAIL(bp) <= 0)
+ netif_stop_queue(dev);
+ spin_unlock_irq(&bp->lock);
+
+ /* Get it going. */
+ sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL);
+
+
+ return NETDEV_TX_OK;
+}
+
+static struct net_device_stats *bigmac_get_stats(struct net_device *dev)
+{
+ struct bigmac *bp = netdev_priv(dev);
+
+ bigmac_get_counters(bp, bp->bregs);
+ return &bp->enet_stats;
+}
+
+static void bigmac_set_multicast(struct net_device *dev)
+{
+ struct bigmac *bp = netdev_priv(dev);
+ void __iomem *bregs = bp->bregs;
+ struct netdev_hw_addr *ha;
+ int i;
+ u32 tmp, crc;
+
+ /* Disable the receiver. The bit self-clears when
+ * the operation is complete.
+ */
+ tmp = sbus_readl(bregs + BMAC_RXCFG);
+ tmp &= ~(BIGMAC_RXCFG_ENABLE);
+ sbus_writel(tmp, bregs + BMAC_RXCFG);
+ while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0)
+ udelay(20);
+
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
+ sbus_writel(0xffff, bregs + BMAC_HTABLE0);
+ sbus_writel(0xffff, bregs + BMAC_HTABLE1);
+ sbus_writel(0xffff, bregs + BMAC_HTABLE2);
+ sbus_writel(0xffff, bregs + BMAC_HTABLE3);
+ } else if (dev->flags & IFF_PROMISC) {
+ tmp = sbus_readl(bregs + BMAC_RXCFG);
+ tmp |= BIGMAC_RXCFG_PMISC;
+ sbus_writel(tmp, bregs + BMAC_RXCFG);
+ } else {
+ u16 hash_table[4];
+
+ for (i = 0; i < 4; i++)
+ hash_table[i] = 0;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
+ crc >>= 26;
+ hash_table[crc >> 4] |= 1 << (crc & 0xf);
+ }
+ sbus_writel(hash_table[0], bregs + BMAC_HTABLE0);
+ sbus_writel(hash_table[1], bregs + BMAC_HTABLE1);
+ sbus_writel(hash_table[2], bregs + BMAC_HTABLE2);
+ sbus_writel(hash_table[3], bregs + BMAC_HTABLE3);
+ }
+
+ /* Re-enable the receiver. */
+ tmp = sbus_readl(bregs + BMAC_RXCFG);
+ tmp |= BIGMAC_RXCFG_ENABLE;
+ sbus_writel(tmp, bregs + BMAC_RXCFG);
+}
+
+/* Ethtool support... */
+static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, "sunbmac");
+ strcpy(info->version, "2.0");
+}
+
+static u32 bigmac_get_link(struct net_device *dev)
+{
+ struct bigmac *bp = netdev_priv(dev);
+
+ spin_lock_irq(&bp->lock);
+ bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, MII_BMSR);
+ spin_unlock_irq(&bp->lock);
+
+ return (bp->sw_bmsr & BMSR_LSTATUS);
+}
+
+static const struct ethtool_ops bigmac_ethtool_ops = {
+ .get_drvinfo = bigmac_get_drvinfo,
+ .get_link = bigmac_get_link,
+};
+
+static const struct net_device_ops bigmac_ops = {
+ .ndo_open = bigmac_open,
+ .ndo_stop = bigmac_close,
+ .ndo_start_xmit = bigmac_start_xmit,
+ .ndo_get_stats = bigmac_get_stats,
+ .ndo_set_rx_mode = bigmac_set_multicast,
+ .ndo_tx_timeout = bigmac_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __devinit bigmac_ether_init(struct platform_device *op,
+ struct platform_device *qec_op)
+{
+ static int version_printed;
+ struct net_device *dev;
+ u8 bsizes, bsizes_more;
+ struct bigmac *bp;
+ int i;
+
+ /* Get a new device struct for this interface. */
+ dev = alloc_etherdev(sizeof(struct bigmac));
+ if (!dev)
+ return -ENOMEM;
+
+ if (version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = idprom->id_ethaddr[i];
+
+ /* Setup softc, with backpointers to QEC and BigMAC SBUS device structs. */
+ bp = netdev_priv(dev);
+ bp->qec_op = qec_op;
+ bp->bigmac_op = op;
+
+ SET_NETDEV_DEV(dev, &op->dev);
+
+ spin_lock_init(&bp->lock);
+
+ /* Map in QEC global control registers. */
+ bp->gregs = of_ioremap(&qec_op->resource[0], 0,
+ GLOB_REG_SIZE, "BigMAC QEC GLobal Regs");
+ if (!bp->gregs) {
+ printk(KERN_ERR "BIGMAC: Cannot map QEC global registers.\n");
+ goto fail_and_cleanup;
+ }
+
+ /* Make sure QEC is in BigMAC mode. */
+ if ((sbus_readl(bp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_BMODE) {
+ printk(KERN_ERR "BigMAC: AIEEE, QEC is not in BigMAC mode!\n");
+ goto fail_and_cleanup;
+ }
+
+ /* Reset the QEC. */
+ if (qec_global_reset(bp->gregs))
+ goto fail_and_cleanup;
+
+ /* Get supported SBUS burst sizes. */
+ bsizes = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff);
+ bsizes_more = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff);
+
+ bsizes &= 0xff;
+ if (bsizes_more != 0xff)
+ bsizes &= bsizes_more;
+ if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
+ (bsizes & DMA_BURST32) == 0)
+ bsizes = (DMA_BURST32 - 1);
+ bp->bigmac_bursts = bsizes;
+
+ /* Perform QEC initialization. */
+ qec_init(bp);
+
+ /* Map in the BigMAC channel registers. */
+ bp->creg = of_ioremap(&op->resource[0], 0,
+ CREG_REG_SIZE, "BigMAC QEC Channel Regs");
+ if (!bp->creg) {
+ printk(KERN_ERR "BIGMAC: Cannot map QEC channel registers.\n");
+ goto fail_and_cleanup;
+ }
+
+ /* Map in the BigMAC control registers. */
+ bp->bregs = of_ioremap(&op->resource[1], 0,
+ BMAC_REG_SIZE, "BigMAC Primary Regs");
+ if (!bp->bregs) {
+ printk(KERN_ERR "BIGMAC: Cannot map BigMAC primary registers.\n");
+ goto fail_and_cleanup;
+ }
+
+ /* Map in the BigMAC transceiver registers, this is how you poke at
+ * the BigMAC's PHY.
+ */
+ bp->tregs = of_ioremap(&op->resource[2], 0,
+ TCVR_REG_SIZE, "BigMAC Transceiver Regs");
+ if (!bp->tregs) {
+ printk(KERN_ERR "BIGMAC: Cannot map BigMAC transceiver registers.\n");
+ goto fail_and_cleanup;
+ }
+
+ /* Stop the BigMAC. */
+ bigmac_stop(bp);
+
+ /* Allocate transmit/receive descriptor DVMA block. */
+ bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev,
+ PAGE_SIZE,
+ &bp->bblock_dvma, GFP_ATOMIC);
+ if (bp->bmac_block == NULL || bp->bblock_dvma == 0) {
+ printk(KERN_ERR "BIGMAC: Cannot allocate consistent DMA.\n");
+ goto fail_and_cleanup;
+ }
+
+ /* Get the board revision of this BigMAC. */
+ bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node,
+ "board-version", 1);
+
+ /* Init auto-negotiation timer state. */
+ init_timer(&bp->bigmac_timer);
+ bp->timer_state = asleep;
+ bp->timer_ticks = 0;
+
+ /* Backlink to generic net device struct. */
+ bp->dev = dev;
+
+ /* Set links to our BigMAC open and close routines. */
+ dev->ethtool_ops = &bigmac_ethtool_ops;
+ dev->netdev_ops = &bigmac_ops;
+ dev->watchdog_timeo = 5*HZ;
+
+ /* Finish net device registration. */
+ dev->irq = bp->bigmac_op->archdata.irqs[0];
+ dev->dma = 0;
+
+ if (register_netdev(dev)) {
+ printk(KERN_ERR "BIGMAC: Cannot register device.\n");
+ goto fail_and_cleanup;
+ }
+
+ dev_set_drvdata(&bp->bigmac_op->dev, bp);
+
+ printk(KERN_INFO "%s: BigMAC 100baseT Ethernet %pM\n",
+ dev->name, dev->dev_addr);
+
+ return 0;
+
+fail_and_cleanup:
+ /* Something went wrong, undo whatever we did so far. */
+ /* Free register mappings if any. */
+ if (bp->gregs)
+ of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE);
+ if (bp->creg)
+ of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE);
+ if (bp->bregs)
+ of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE);
+ if (bp->tregs)
+ of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE);
+
+ if (bp->bmac_block)
+ dma_free_coherent(&bp->bigmac_op->dev,
+ PAGE_SIZE,
+ bp->bmac_block,
+ bp->bblock_dvma);
+
+ /* This also frees the co-located private data */
+ free_netdev(dev);
+ return -ENODEV;
+}
+
+/* QEC can be the parent of either QuadEthernet or a BigMAC. We want
+ * the latter.
+ */
+static int __devinit bigmac_sbus_probe(struct platform_device *op)
+{
+ struct device *parent = op->dev.parent;
+ struct platform_device *qec_op;
+
+ qec_op = to_platform_device(parent);
+
+ return bigmac_ether_init(op, qec_op);
+}
+
+static int __devexit bigmac_sbus_remove(struct platform_device *op)
+{
+ struct bigmac *bp = dev_get_drvdata(&op->dev);
+ struct device *parent = op->dev.parent;
+ struct net_device *net_dev = bp->dev;
+ struct platform_device *qec_op;
+
+ qec_op = to_platform_device(parent);
+
+ unregister_netdev(net_dev);
+
+ of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE);
+ of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE);
+ of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE);
+ of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE);
+ dma_free_coherent(&op->dev,
+ PAGE_SIZE,
+ bp->bmac_block,
+ bp->bblock_dvma);
+
+ free_netdev(net_dev);
+
+ dev_set_drvdata(&op->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id bigmac_sbus_match[] = {
+ {
+ .name = "be",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, bigmac_sbus_match);
+
+static struct platform_driver bigmac_sbus_driver = {
+ .driver = {
+ .name = "sunbmac",
+ .owner = THIS_MODULE,
+ .of_match_table = bigmac_sbus_match,
+ },
+ .probe = bigmac_sbus_probe,
+ .remove = __devexit_p(bigmac_sbus_remove),
+};
+
+static int __init bigmac_init(void)
+{
+ return platform_driver_register(&bigmac_sbus_driver);
+}
+
+static void __exit bigmac_exit(void)
+{
+ platform_driver_unregister(&bigmac_sbus_driver);
+}
+
+module_init(bigmac_init);
+module_exit(bigmac_exit);
diff --git a/drivers/net/ethernet/sun/sunbmac.h b/drivers/net/ethernet/sun/sunbmac.h
new file mode 100644
index 000000000000..06dd21707353
--- /dev/null
+++ b/drivers/net/ethernet/sun/sunbmac.h
@@ -0,0 +1,338 @@
+/* $Id: sunbmac.h,v 1.7 2000/07/11 22:35:22 davem Exp $
+ * sunbmac.h: Defines for the Sun "Big MAC" 100baseT ethernet cards.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#ifndef _SUNBMAC_H
+#define _SUNBMAC_H
+
+/* QEC global registers. */
+#define GLOB_CTRL 0x00UL /* Control */
+#define GLOB_STAT 0x04UL /* Status */
+#define GLOB_PSIZE 0x08UL /* Packet Size */
+#define GLOB_MSIZE 0x0cUL /* Local-mem size (64K) */
+#define GLOB_RSIZE 0x10UL /* Receive partition size */
+#define GLOB_TSIZE 0x14UL /* Transmit partition size */
+#define GLOB_REG_SIZE 0x18UL
+
+#define GLOB_CTRL_MMODE 0x40000000 /* MACE qec mode */
+#define GLOB_CTRL_BMODE 0x10000000 /* BigMAC qec mode */
+#define GLOB_CTRL_EPAR 0x00000020 /* Enable parity */
+#define GLOB_CTRL_ACNTRL 0x00000018 /* SBUS arbitration control */
+#define GLOB_CTRL_B64 0x00000004 /* 64 byte dvma bursts */
+#define GLOB_CTRL_B32 0x00000002 /* 32 byte dvma bursts */
+#define GLOB_CTRL_B16 0x00000000 /* 16 byte dvma bursts */
+#define GLOB_CTRL_RESET 0x00000001 /* Reset the QEC */
+
+#define GLOB_STAT_TX 0x00000008 /* BigMAC Transmit IRQ */
+#define GLOB_STAT_RX 0x00000004 /* BigMAC Receive IRQ */
+#define GLOB_STAT_BM 0x00000002 /* BigMAC Global IRQ */
+#define GLOB_STAT_ER 0x00000001 /* BigMAC Error IRQ */
+
+#define GLOB_PSIZE_2048 0x00 /* 2k packet size */
+#define GLOB_PSIZE_4096 0x01 /* 4k packet size */
+#define GLOB_PSIZE_6144 0x10 /* 6k packet size */
+#define GLOB_PSIZE_8192 0x11 /* 8k packet size */
+
+/* QEC BigMAC channel registers. */
+#define CREG_CTRL 0x00UL /* Control */
+#define CREG_STAT 0x04UL /* Status */
+#define CREG_RXDS 0x08UL /* RX descriptor ring ptr */
+#define CREG_TXDS 0x0cUL /* TX descriptor ring ptr */
+#define CREG_RIMASK 0x10UL /* RX Interrupt Mask */
+#define CREG_TIMASK 0x14UL /* TX Interrupt Mask */
+#define CREG_QMASK 0x18UL /* QEC Error Interrupt Mask */
+#define CREG_BMASK 0x1cUL /* BigMAC Error Interrupt Mask*/
+#define CREG_RXWBUFPTR 0x20UL /* Local memory rx write ptr */
+#define CREG_RXRBUFPTR 0x24UL /* Local memory rx read ptr */
+#define CREG_TXWBUFPTR 0x28UL /* Local memory tx write ptr */
+#define CREG_TXRBUFPTR 0x2cUL /* Local memory tx read ptr */
+#define CREG_CCNT 0x30UL /* Collision Counter */
+#define CREG_REG_SIZE 0x34UL
+
+#define CREG_CTRL_TWAKEUP 0x00000001 /* Transmitter Wakeup, 'go'. */
+
+#define CREG_STAT_BERROR 0x80000000 /* BigMAC error */
+#define CREG_STAT_TXIRQ 0x00200000 /* Transmit Interrupt */
+#define CREG_STAT_TXDERROR 0x00080000 /* TX Descriptor is bogus */
+#define CREG_STAT_TXLERR 0x00040000 /* Late Transmit Error */
+#define CREG_STAT_TXPERR 0x00020000 /* Transmit Parity Error */
+#define CREG_STAT_TXSERR 0x00010000 /* Transmit SBUS error ack */
+#define CREG_STAT_RXIRQ 0x00000020 /* Receive Interrupt */
+#define CREG_STAT_RXDROP 0x00000010 /* Dropped a RX'd packet */
+#define CREG_STAT_RXSMALL 0x00000008 /* Receive buffer too small */
+#define CREG_STAT_RXLERR 0x00000004 /* Receive Late Error */
+#define CREG_STAT_RXPERR 0x00000002 /* Receive Parity Error */
+#define CREG_STAT_RXSERR 0x00000001 /* Receive SBUS Error ACK */
+
+#define CREG_STAT_ERRORS (CREG_STAT_BERROR|CREG_STAT_TXDERROR|CREG_STAT_TXLERR| \
+ CREG_STAT_TXPERR|CREG_STAT_TXSERR|CREG_STAT_RXDROP| \
+ CREG_STAT_RXSMALL|CREG_STAT_RXLERR|CREG_STAT_RXPERR| \
+ CREG_STAT_RXSERR)
+
+#define CREG_QMASK_TXDERROR 0x00080000 /* TXD error */
+#define CREG_QMASK_TXLERR 0x00040000 /* TX late error */
+#define CREG_QMASK_TXPERR 0x00020000 /* TX parity error */
+#define CREG_QMASK_TXSERR 0x00010000 /* TX sbus error ack */
+#define CREG_QMASK_RXDROP 0x00000010 /* RX drop */
+#define CREG_QMASK_RXBERROR 0x00000008 /* RX buffer error */
+#define CREG_QMASK_RXLEERR 0x00000004 /* RX late error */
+#define CREG_QMASK_RXPERR 0x00000002 /* RX parity error */
+#define CREG_QMASK_RXSERR 0x00000001 /* RX sbus error ack */
+
+/* BIGMAC core registers */
+#define BMAC_XIFCFG 0x000UL /* XIF config register */
+ /* 0x004-->0x0fc, reserved */
+#define BMAC_STATUS 0x100UL /* Status register, clear on read */
+#define BMAC_IMASK 0x104UL /* Interrupt mask register */
+ /* 0x108-->0x204, reserved */
+#define BMAC_TXSWRESET 0x208UL /* Transmitter software reset */
+#define BMAC_TXCFG 0x20cUL /* Transmitter config register */
+#define BMAC_IGAP1 0x210UL /* Inter-packet gap 1 */
+#define BMAC_IGAP2 0x214UL /* Inter-packet gap 2 */
+#define BMAC_ALIMIT 0x218UL /* Transmit attempt limit */
+#define BMAC_STIME 0x21cUL /* Transmit slot time */
+#define BMAC_PLEN 0x220UL /* Size of transmit preamble */
+#define BMAC_PPAT 0x224UL /* Pattern for transmit preamble */
+#define BMAC_TXDELIM 0x228UL /* Transmit delimiter */
+#define BMAC_JSIZE 0x22cUL /* Toe jam... */
+#define BMAC_TXPMAX 0x230UL /* Transmit max pkt size */
+#define BMAC_TXPMIN 0x234UL /* Transmit min pkt size */
+#define BMAC_PATTEMPT 0x238UL /* Count of transmit peak attempts */
+#define BMAC_DTCTR 0x23cUL /* Transmit defer timer */
+#define BMAC_NCCTR 0x240UL /* Transmit normal-collision counter */
+#define BMAC_FCCTR 0x244UL /* Transmit first-collision counter */
+#define BMAC_EXCTR 0x248UL /* Transmit excess-collision counter */
+#define BMAC_LTCTR 0x24cUL /* Transmit late-collision counter */
+#define BMAC_RSEED 0x250UL /* Transmit random number seed */
+#define BMAC_TXSMACHINE 0x254UL /* Transmit state machine */
+ /* 0x258-->0x304, reserved */
+#define BMAC_RXSWRESET 0x308UL /* Receiver software reset */
+#define BMAC_RXCFG 0x30cUL /* Receiver config register */
+#define BMAC_RXPMAX 0x310UL /* Receive max pkt size */
+#define BMAC_RXPMIN 0x314UL /* Receive min pkt size */
+#define BMAC_MACADDR2 0x318UL /* Ether address register 2 */
+#define BMAC_MACADDR1 0x31cUL /* Ether address register 1 */
+#define BMAC_MACADDR0 0x320UL /* Ether address register 0 */
+#define BMAC_FRCTR 0x324UL /* Receive frame receive counter */
+#define BMAC_GLECTR 0x328UL /* Receive giant-length error counter */
+#define BMAC_UNALECTR 0x32cUL /* Receive unaligned error counter */
+#define BMAC_RCRCECTR 0x330UL /* Receive CRC error counter */
+#define BMAC_RXSMACHINE 0x334UL /* Receiver state machine */
+#define BMAC_RXCVALID 0x338UL /* Receiver code violation */
+ /* 0x33c, reserved */
+#define BMAC_HTABLE3 0x340UL /* Hash table 3 */
+#define BMAC_HTABLE2 0x344UL /* Hash table 2 */
+#define BMAC_HTABLE1 0x348UL /* Hash table 1 */
+#define BMAC_HTABLE0 0x34cUL /* Hash table 0 */
+#define BMAC_AFILTER2 0x350UL /* Address filter 2 */
+#define BMAC_AFILTER1 0x354UL /* Address filter 1 */
+#define BMAC_AFILTER0 0x358UL /* Address filter 0 */
+#define BMAC_AFMASK 0x35cUL /* Address filter mask */
+#define BMAC_REG_SIZE 0x360UL
+
+/* BigMac XIF config register. */
+#define BIGMAC_XCFG_ODENABLE 0x00000001 /* Output driver enable */
+#define BIGMAC_XCFG_RESV 0x00000002 /* Reserved, write always as 1 */
+#define BIGMAC_XCFG_MLBACK 0x00000004 /* Loopback-mode MII enable */
+#define BIGMAC_XCFG_SMODE 0x00000008 /* Enable serial mode */
+
+/* BigMAC status register. */
+#define BIGMAC_STAT_GOTFRAME 0x00000001 /* Received a frame */
+#define BIGMAC_STAT_RCNTEXP 0x00000002 /* Receive frame counter expired */
+#define BIGMAC_STAT_ACNTEXP 0x00000004 /* Align-error counter expired */
+#define BIGMAC_STAT_CCNTEXP 0x00000008 /* CRC-error counter expired */
+#define BIGMAC_STAT_LCNTEXP 0x00000010 /* Length-error counter expired */
+#define BIGMAC_STAT_RFIFOVF 0x00000020 /* Receive FIFO overflow */
+#define BIGMAC_STAT_CVCNTEXP 0x00000040 /* Code-violation counter expired */
+#define BIGMAC_STAT_SENTFRAME 0x00000100 /* Transmitted a frame */
+#define BIGMAC_STAT_TFIFO_UND 0x00000200 /* Transmit FIFO underrun */
+#define BIGMAC_STAT_MAXPKTERR 0x00000400 /* Max-packet size error */
+#define BIGMAC_STAT_NCNTEXP 0x00000800 /* Normal-collision counter expired */
+#define BIGMAC_STAT_ECNTEXP 0x00001000 /* Excess-collision counter expired */
+#define BIGMAC_STAT_LCCNTEXP 0x00002000 /* Late-collision counter expired */
+#define BIGMAC_STAT_FCNTEXP 0x00004000 /* First-collision counter expired */
+#define BIGMAC_STAT_DTIMEXP 0x00008000 /* Defer-timer expired */
+
+/* BigMAC interrupt mask register. */
+#define BIGMAC_IMASK_GOTFRAME 0x00000001 /* Received a frame */
+#define BIGMAC_IMASK_RCNTEXP 0x00000002 /* Receive frame counter expired */
+#define BIGMAC_IMASK_ACNTEXP 0x00000004 /* Align-error counter expired */
+#define BIGMAC_IMASK_CCNTEXP 0x00000008 /* CRC-error counter expired */
+#define BIGMAC_IMASK_LCNTEXP 0x00000010 /* Length-error counter expired */
+#define BIGMAC_IMASK_RFIFOVF 0x00000020 /* Receive FIFO overflow */
+#define BIGMAC_IMASK_CVCNTEXP 0x00000040 /* Code-violation counter expired */
+#define BIGMAC_IMASK_SENTFRAME 0x00000100 /* Transmitted a frame */
+#define BIGMAC_IMASK_TFIFO_UND 0x00000200 /* Transmit FIFO underrun */
+#define BIGMAC_IMASK_MAXPKTERR 0x00000400 /* Max-packet size error */
+#define BIGMAC_IMASK_NCNTEXP 0x00000800 /* Normal-collision counter expired */
+#define BIGMAC_IMASK_ECNTEXP 0x00001000 /* Excess-collision counter expired */
+#define BIGMAC_IMASK_LCCNTEXP 0x00002000 /* Late-collision counter expired */
+#define BIGMAC_IMASK_FCNTEXP 0x00004000 /* First-collision counter expired */
+#define BIGMAC_IMASK_DTIMEXP 0x00008000 /* Defer-timer expired */
+
+/* BigMac transmit config register. */
+#define BIGMAC_TXCFG_ENABLE 0x00000001 /* Enable the transmitter */
+#define BIGMAC_TXCFG_FIFO 0x00000010 /* Default tx fthresh... */
+#define BIGMAC_TXCFG_SMODE 0x00000020 /* Enable slow transmit mode */
+#define BIGMAC_TXCFG_CIGN 0x00000040 /* Ignore transmit collisions */
+#define BIGMAC_TXCFG_FCSOFF 0x00000080 /* Do not emit FCS */
+#define BIGMAC_TXCFG_DBACKOFF 0x00000100 /* Disable backoff */
+#define BIGMAC_TXCFG_FULLDPLX 0x00000200 /* Enable full-duplex */
+
+/* BigMac receive config register. */
+#define BIGMAC_RXCFG_ENABLE 0x00000001 /* Enable the receiver */
+#define BIGMAC_RXCFG_FIFO 0x0000000e /* Default rx fthresh... */
+#define BIGMAC_RXCFG_PSTRIP 0x00000020 /* Pad byte strip enable */
+#define BIGMAC_RXCFG_PMISC 0x00000040 /* Enable promiscuous mode */
+#define BIGMAC_RXCFG_DERR 0x00000080 /* Disable error checking */
+#define BIGMAC_RXCFG_DCRCS 0x00000100 /* Disable CRC stripping */
+#define BIGMAC_RXCFG_ME 0x00000200 /* Receive packets addressed to me */
+#define BIGMAC_RXCFG_PGRP 0x00000400 /* Enable promisc group mode */
+#define BIGMAC_RXCFG_HENABLE 0x00000800 /* Enable the hash filter */
+#define BIGMAC_RXCFG_AENABLE 0x00001000 /* Enable the address filter */
+
+/* The BigMAC PHY transceiver. Not nearly as sophisticated as the happy meal
+ * one. But it does have the "bit banger", oh baby.
+ */
+#define TCVR_TPAL 0x00UL
+#define TCVR_MPAL 0x04UL
+#define TCVR_REG_SIZE 0x08UL
+
+/* Frame commands. */
+#define FRAME_WRITE 0x50020000
+#define FRAME_READ 0x60020000
+
+/* Tranceiver registers. */
+#define TCVR_PAL_SERIAL 0x00000001 /* Enable serial mode */
+#define TCVR_PAL_EXTLBACK 0x00000002 /* Enable external loopback */
+#define TCVR_PAL_MSENSE 0x00000004 /* Media sense */
+#define TCVR_PAL_LTENABLE 0x00000008 /* Link test enable */
+#define TCVR_PAL_LTSTATUS 0x00000010 /* Link test status (P1 only) */
+
+/* Management PAL. */
+#define MGMT_PAL_DCLOCK 0x00000001 /* Data clock */
+#define MGMT_PAL_OENAB 0x00000002 /* Output enabler */
+#define MGMT_PAL_MDIO 0x00000004 /* MDIO Data/attached */
+#define MGMT_PAL_TIMEO 0x00000008 /* Transmit enable timeout error */
+#define MGMT_PAL_EXT_MDIO MGMT_PAL_MDIO
+#define MGMT_PAL_INT_MDIO MGMT_PAL_TIMEO
+
+/* Here are some PHY addresses. */
+#define BIGMAC_PHY_EXTERNAL 0 /* External transceiver */
+#define BIGMAC_PHY_INTERNAL 1 /* Internal transceiver */
+
+/* Ring descriptors and such, same as Quad Ethernet. */
+struct be_rxd {
+ u32 rx_flags;
+ u32 rx_addr;
+};
+
+#define RXD_OWN 0x80000000 /* Ownership. */
+#define RXD_UPDATE 0x10000000 /* Being Updated? */
+#define RXD_LENGTH 0x000007ff /* Packet Length. */
+
+struct be_txd {
+ u32 tx_flags;
+ u32 tx_addr;
+};
+
+#define TXD_OWN 0x80000000 /* Ownership. */
+#define TXD_SOP 0x40000000 /* Start Of Packet */
+#define TXD_EOP 0x20000000 /* End Of Packet */
+#define TXD_UPDATE 0x10000000 /* Being Updated? */
+#define TXD_LENGTH 0x000007ff /* Packet Length. */
+
+#define TX_RING_MAXSIZE 256
+#define RX_RING_MAXSIZE 256
+
+#define TX_RING_SIZE 256
+#define RX_RING_SIZE 256
+
+#define NEXT_RX(num) (((num) + 1) & (RX_RING_SIZE - 1))
+#define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1))
+#define PREV_RX(num) (((num) - 1) & (RX_RING_SIZE - 1))
+#define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1))
+
+#define TX_BUFFS_AVAIL(bp) \
+ (((bp)->tx_old <= (bp)->tx_new) ? \
+ (bp)->tx_old + (TX_RING_SIZE - 1) - (bp)->tx_new : \
+ (bp)->tx_old - (bp)->tx_new - 1)
+
+
+#define RX_COPY_THRESHOLD 256
+#define RX_BUF_ALLOC_SIZE (ETH_FRAME_LEN + (64 * 3))
+
+struct bmac_init_block {
+ struct be_rxd be_rxd[RX_RING_MAXSIZE];
+ struct be_txd be_txd[TX_RING_MAXSIZE];
+};
+
+#define bib_offset(mem, elem) \
+((__u32)((unsigned long)(&(((struct bmac_init_block *)0)->mem[elem]))))
+
+/* Now software state stuff. */
+enum bigmac_transceiver {
+ external = 0,
+ internal = 1,
+ none = 2,
+};
+
+/* Timer state engine. */
+enum bigmac_timer_state {
+ ltrywait = 1, /* Forcing try of all modes, from fastest to slowest. */
+ asleep = 2, /* Timer inactive. */
+};
+
+struct bigmac {
+ void __iomem *gregs; /* QEC Global Registers */
+ void __iomem *creg; /* QEC BigMAC Channel Registers */
+ void __iomem *bregs; /* BigMAC Registers */
+ void __iomem *tregs; /* BigMAC Transceiver */
+ struct bmac_init_block *bmac_block; /* RX and TX descriptors */
+ __u32 bblock_dvma; /* RX and TX descriptors */
+
+ spinlock_t lock;
+
+ struct sk_buff *rx_skbs[RX_RING_SIZE];
+ struct sk_buff *tx_skbs[TX_RING_SIZE];
+
+ int rx_new, tx_new, rx_old, tx_old;
+
+ int board_rev; /* BigMAC board revision. */
+
+ enum bigmac_transceiver tcvr_type;
+ unsigned int bigmac_bursts;
+ unsigned int paddr;
+ unsigned short sw_bmsr; /* SW copy of PHY BMSR */
+ unsigned short sw_bmcr; /* SW copy of PHY BMCR */
+ struct timer_list bigmac_timer;
+ enum bigmac_timer_state timer_state;
+ unsigned int timer_ticks;
+
+ struct net_device_stats enet_stats;
+ struct platform_device *qec_op;
+ struct platform_device *bigmac_op;
+ struct net_device *dev;
+};
+
+/* We use this to acquire receive skb's that we can DMA directly into. */
+#define ALIGNED_RX_SKB_ADDR(addr) \
+ ((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr))
+
+static inline struct sk_buff *big_mac_alloc_skb(unsigned int length, gfp_t gfp_flags)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(length + 64, gfp_flags);
+ if(skb) {
+ int offset = ALIGNED_RX_SKB_ADDR(skb->data);
+
+ if(offset)
+ skb_reserve(skb, offset);
+ }
+ return skb;
+}
+
+#endif /* !(_SUNBMAC_H) */
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
new file mode 100644
index 000000000000..6b62a73227c2
--- /dev/null
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -0,0 +1,3047 @@
+/* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $
+ * sungem.c: Sun GEM ethernet driver.
+ *
+ * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com)
+ *
+ * Support for Apple GMAC and assorted PHYs, WOL, Power Management
+ * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org)
+ * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp.
+ *
+ * NAPI and NETPOLL support
+ * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com)
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/random.h>
+#include <linux/workqueue.h>
+#include <linux/if_vlan.h>
+#include <linux/bitops.h>
+#include <linux/mm.h>
+#include <linux/gfp.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+
+#ifdef CONFIG_SPARC
+#include <asm/idprom.h>
+#include <asm/prom.h>
+#endif
+
+#ifdef CONFIG_PPC_PMAC
+#include <asm/pci-bridge.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#endif
+
+#include <linux/sungem_phy.h>
+#include "sungem.h"
+
+/* Stripping FCS is causing problems, disabled for now */
+#undef STRIP_FCS
+
+#define DEFAULT_MSG (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK)
+
+#define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
+ SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \
+ SUPPORTED_Pause | SUPPORTED_Autoneg)
+
+#define DRV_NAME "sungem"
+#define DRV_VERSION "1.0"
+#define DRV_AUTHOR "David S. Miller <davem@redhat.com>"
+
+static char version[] __devinitdata =
+ DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n";
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define GEM_MODULE_NAME "gem"
+
+static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = {
+ { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+
+ /* These models only differ from the original GEM in
+ * that their tx/rx fifos are of a different size and
+ * they only support 10/100 speeds. -DaveM
+ *
+ * Apple's GMAC does support gigabit on machines with
+ * the BCM54xx PHYs. -BenH
+ */
+ { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, gem_pci_tbl);
+
+static u16 __phy_read(struct gem *gp, int phy_addr, int reg)
+{
+ u32 cmd;
+ int limit = 10000;
+
+ cmd = (1 << 30);
+ cmd |= (2 << 28);
+ cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
+ cmd |= (reg << 18) & MIF_FRAME_REGAD;
+ cmd |= (MIF_FRAME_TAMSB);
+ writel(cmd, gp->regs + MIF_FRAME);
+
+ while (--limit) {
+ cmd = readl(gp->regs + MIF_FRAME);
+ if (cmd & MIF_FRAME_TALSB)
+ break;
+
+ udelay(10);
+ }
+
+ if (!limit)
+ cmd = 0xffff;
+
+ return cmd & MIF_FRAME_DATA;
+}
+
+static inline int _phy_read(struct net_device *dev, int mii_id, int reg)
+{
+ struct gem *gp = netdev_priv(dev);
+ return __phy_read(gp, mii_id, reg);
+}
+
+static inline u16 phy_read(struct gem *gp, int reg)
+{
+ return __phy_read(gp, gp->mii_phy_addr, reg);
+}
+
+static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
+{
+ u32 cmd;
+ int limit = 10000;
+
+ cmd = (1 << 30);
+ cmd |= (1 << 28);
+ cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
+ cmd |= (reg << 18) & MIF_FRAME_REGAD;
+ cmd |= (MIF_FRAME_TAMSB);
+ cmd |= (val & MIF_FRAME_DATA);
+ writel(cmd, gp->regs + MIF_FRAME);
+
+ while (limit--) {
+ cmd = readl(gp->regs + MIF_FRAME);
+ if (cmd & MIF_FRAME_TALSB)
+ break;
+
+ udelay(10);
+ }
+}
+
+static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val)
+{
+ struct gem *gp = netdev_priv(dev);
+ __phy_write(gp, mii_id, reg, val & 0xffff);
+}
+
+static inline void phy_write(struct gem *gp, int reg, u16 val)
+{
+ __phy_write(gp, gp->mii_phy_addr, reg, val);
+}
+
+static inline void gem_enable_ints(struct gem *gp)
+{
+ /* Enable all interrupts but TXDONE */
+ writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
+}
+
+static inline void gem_disable_ints(struct gem *gp)
+{
+ /* Disable all interrupts, including TXDONE */
+ writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
+ (void)readl(gp->regs + GREG_IMASK); /* write posting */
+}
+
+static void gem_get_cell(struct gem *gp)
+{
+ BUG_ON(gp->cell_enabled < 0);
+ gp->cell_enabled++;
+#ifdef CONFIG_PPC_PMAC
+ if (gp->cell_enabled == 1) {
+ mb();
+ pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1);
+ udelay(10);
+ }
+#endif /* CONFIG_PPC_PMAC */
+}
+
+/* Turn off the chip's clock */
+static void gem_put_cell(struct gem *gp)
+{
+ BUG_ON(gp->cell_enabled <= 0);
+ gp->cell_enabled--;
+#ifdef CONFIG_PPC_PMAC
+ if (gp->cell_enabled == 0) {
+ mb();
+ pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0);
+ udelay(10);
+ }
+#endif /* CONFIG_PPC_PMAC */
+}
+
+static inline void gem_netif_stop(struct gem *gp)
+{
+ gp->dev->trans_start = jiffies; /* prevent tx timeout */
+ napi_disable(&gp->napi);
+ netif_tx_disable(gp->dev);
+}
+
+static inline void gem_netif_start(struct gem *gp)
+{
+ /* NOTE: unconditional netif_wake_queue is only
+ * appropriate so long as all callers are assured to
+ * have free tx slots.
+ */
+ netif_wake_queue(gp->dev);
+ napi_enable(&gp->napi);
+}
+
+static void gem_schedule_reset(struct gem *gp)
+{
+ gp->reset_task_pending = 1;
+ schedule_work(&gp->reset_task);
+}
+
+static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits)
+{
+ if (netif_msg_intr(gp))
+ printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name);
+}
+
+static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+ u32 pcs_istat = readl(gp->regs + PCS_ISTAT);
+ u32 pcs_miistat;
+
+ if (netif_msg_intr(gp))
+ printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n",
+ gp->dev->name, pcs_istat);
+
+ if (!(pcs_istat & PCS_ISTAT_LSC)) {
+ netdev_err(dev, "PCS irq but no link status change???\n");
+ return 0;
+ }
+
+ /* The link status bit latches on zero, so you must
+ * read it twice in such a case to see a transition
+ * to the link being up.
+ */
+ pcs_miistat = readl(gp->regs + PCS_MIISTAT);
+ if (!(pcs_miistat & PCS_MIISTAT_LS))
+ pcs_miistat |=
+ (readl(gp->regs + PCS_MIISTAT) &
+ PCS_MIISTAT_LS);
+
+ if (pcs_miistat & PCS_MIISTAT_ANC) {
+ /* The remote-fault indication is only valid
+ * when autoneg has completed.
+ */
+ if (pcs_miistat & PCS_MIISTAT_RF)
+ netdev_info(dev, "PCS AutoNEG complete, RemoteFault\n");
+ else
+ netdev_info(dev, "PCS AutoNEG complete\n");
+ }
+
+ if (pcs_miistat & PCS_MIISTAT_LS) {
+ netdev_info(dev, "PCS link is now up\n");
+ netif_carrier_on(gp->dev);
+ } else {
+ netdev_info(dev, "PCS link is now down\n");
+ netif_carrier_off(gp->dev);
+ /* If this happens and the link timer is not running,
+ * reset so we re-negotiate.
+ */
+ if (!timer_pending(&gp->link_timer))
+ return 1;
+ }
+
+ return 0;
+}
+
+static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+ u32 txmac_stat = readl(gp->regs + MAC_TXSTAT);
+
+ if (netif_msg_intr(gp))
+ printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
+ gp->dev->name, txmac_stat);
+
+ /* Defer timer expiration is quite normal,
+ * don't even log the event.
+ */
+ if ((txmac_stat & MAC_TXSTAT_DTE) &&
+ !(txmac_stat & ~MAC_TXSTAT_DTE))
+ return 0;
+
+ if (txmac_stat & MAC_TXSTAT_URUN) {
+ netdev_err(dev, "TX MAC xmit underrun\n");
+ dev->stats.tx_fifo_errors++;
+ }
+
+ if (txmac_stat & MAC_TXSTAT_MPE) {
+ netdev_err(dev, "TX MAC max packet size error\n");
+ dev->stats.tx_errors++;
+ }
+
+ /* The rest are all cases of one of the 16-bit TX
+ * counters expiring.
+ */
+ if (txmac_stat & MAC_TXSTAT_NCE)
+ dev->stats.collisions += 0x10000;
+
+ if (txmac_stat & MAC_TXSTAT_ECE) {
+ dev->stats.tx_aborted_errors += 0x10000;
+ dev->stats.collisions += 0x10000;
+ }
+
+ if (txmac_stat & MAC_TXSTAT_LCE) {
+ dev->stats.tx_aborted_errors += 0x10000;
+ dev->stats.collisions += 0x10000;
+ }
+
+ /* We do not keep track of MAC_TXSTAT_FCE and
+ * MAC_TXSTAT_PCE events.
+ */
+ return 0;
+}
+
+/* When we get a RX fifo overflow, the RX unit in GEM is probably hung
+ * so we do the following.
+ *
+ * If any part of the reset goes wrong, we return 1 and that causes the
+ * whole chip to be reset.
+ */
+static int gem_rxmac_reset(struct gem *gp)
+{
+ struct net_device *dev = gp->dev;
+ int limit, i;
+ u64 desc_dma;
+ u32 val;
+
+ /* First, reset & disable MAC RX. */
+ writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
+ for (limit = 0; limit < 5000; limit++) {
+ if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD))
+ break;
+ udelay(10);
+ }
+ if (limit == 5000) {
+ netdev_err(dev, "RX MAC will not reset, resetting whole chip\n");
+ return 1;
+ }
+
+ writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB,
+ gp->regs + MAC_RXCFG);
+ for (limit = 0; limit < 5000; limit++) {
+ if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB))
+ break;
+ udelay(10);
+ }
+ if (limit == 5000) {
+ netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
+ return 1;
+ }
+
+ /* Second, disable RX DMA. */
+ writel(0, gp->regs + RXDMA_CFG);
+ for (limit = 0; limit < 5000; limit++) {
+ if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE))
+ break;
+ udelay(10);
+ }
+ if (limit == 5000) {
+ netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
+ return 1;
+ }
+
+ udelay(5000);
+
+ /* Execute RX reset command. */
+ writel(gp->swrst_base | GREG_SWRST_RXRST,
+ gp->regs + GREG_SWRST);
+ for (limit = 0; limit < 5000; limit++) {
+ if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST))
+ break;
+ udelay(10);
+ }
+ if (limit == 5000) {
+ netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
+ return 1;
+ }
+
+ /* Refresh the RX ring. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct gem_rxd *rxd = &gp->init_block->rxd[i];
+
+ if (gp->rx_skbs[i] == NULL) {
+ netdev_err(dev, "Parts of RX ring empty, resetting whole chip\n");
+ return 1;
+ }
+
+ rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
+ }
+ gp->rx_new = gp->rx_old = 0;
+
+ /* Now we must reprogram the rest of RX unit. */
+ desc_dma = (u64) gp->gblock_dvma;
+ desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
+ writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
+ writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
+ writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
+ val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
+ ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+ writel(val, gp->regs + RXDMA_CFG);
+ if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
+ writel(((5 & RXDMA_BLANK_IPKTS) |
+ ((8 << 12) & RXDMA_BLANK_ITIME)),
+ gp->regs + RXDMA_BLANK);
+ else
+ writel(((5 & RXDMA_BLANK_IPKTS) |
+ ((4 << 12) & RXDMA_BLANK_ITIME)),
+ gp->regs + RXDMA_BLANK);
+ val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
+ val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
+ writel(val, gp->regs + RXDMA_PTHRESH);
+ val = readl(gp->regs + RXDMA_CFG);
+ writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
+ writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
+ val = readl(gp->regs + MAC_RXCFG);
+ writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
+
+ return 0;
+}
+
+static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+ u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT);
+ int ret = 0;
+
+ if (netif_msg_intr(gp))
+ printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n",
+ gp->dev->name, rxmac_stat);
+
+ if (rxmac_stat & MAC_RXSTAT_OFLW) {
+ u32 smac = readl(gp->regs + MAC_SMACHINE);
+
+ netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_fifo_errors++;
+
+ ret = gem_rxmac_reset(gp);
+ }
+
+ if (rxmac_stat & MAC_RXSTAT_ACE)
+ dev->stats.rx_frame_errors += 0x10000;
+
+ if (rxmac_stat & MAC_RXSTAT_CCE)
+ dev->stats.rx_crc_errors += 0x10000;
+
+ if (rxmac_stat & MAC_RXSTAT_LCE)
+ dev->stats.rx_length_errors += 0x10000;
+
+ /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
+ * events.
+ */
+ return ret;
+}
+
+static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+ u32 mac_cstat = readl(gp->regs + MAC_CSTAT);
+
+ if (netif_msg_intr(gp))
+ printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n",
+ gp->dev->name, mac_cstat);
+
+ /* This interrupt is just for pause frame and pause
+ * tracking. It is useful for diagnostics and debug
+ * but probably by default we will mask these events.
+ */
+ if (mac_cstat & MAC_CSTAT_PS)
+ gp->pause_entered++;
+
+ if (mac_cstat & MAC_CSTAT_PRCV)
+ gp->pause_last_time_recvd = (mac_cstat >> 16);
+
+ return 0;
+}
+
+static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+ u32 mif_status = readl(gp->regs + MIF_STATUS);
+ u32 reg_val, changed_bits;
+
+ reg_val = (mif_status & MIF_STATUS_DATA) >> 16;
+ changed_bits = (mif_status & MIF_STATUS_STAT);
+
+ gem_handle_mif_event(gp, reg_val, changed_bits);
+
+ return 0;
+}
+
+static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+ u32 pci_estat = readl(gp->regs + GREG_PCIESTAT);
+
+ if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
+ gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
+ netdev_err(dev, "PCI error [%04x]", pci_estat);
+
+ if (pci_estat & GREG_PCIESTAT_BADACK)
+ pr_cont(" <No ACK64# during ABS64 cycle>");
+ if (pci_estat & GREG_PCIESTAT_DTRTO)
+ pr_cont(" <Delayed transaction timeout>");
+ if (pci_estat & GREG_PCIESTAT_OTHER)
+ pr_cont(" <other>");
+ pr_cont("\n");
+ } else {
+ pci_estat |= GREG_PCIESTAT_OTHER;
+ netdev_err(dev, "PCI error\n");
+ }
+
+ if (pci_estat & GREG_PCIESTAT_OTHER) {
+ u16 pci_cfg_stat;
+
+ /* Interrogate PCI config space for the
+ * true cause.
+ */
+ pci_read_config_word(gp->pdev, PCI_STATUS,
+ &pci_cfg_stat);
+ netdev_err(dev, "Read PCI cfg space status [%04x]\n",
+ pci_cfg_stat);
+ if (pci_cfg_stat & PCI_STATUS_PARITY)
+ netdev_err(dev, "PCI parity error detected\n");
+ if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)
+ netdev_err(dev, "PCI target abort\n");
+ if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)
+ netdev_err(dev, "PCI master acks target abort\n");
+ if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)
+ netdev_err(dev, "PCI master abort\n");
+ if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)
+ netdev_err(dev, "PCI system error SERR#\n");
+ if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)
+ netdev_err(dev, "PCI parity error\n");
+
+ /* Write the error bits back to clear them. */
+ pci_cfg_stat &= (PCI_STATUS_PARITY |
+ PCI_STATUS_SIG_TARGET_ABORT |
+ PCI_STATUS_REC_TARGET_ABORT |
+ PCI_STATUS_REC_MASTER_ABORT |
+ PCI_STATUS_SIG_SYSTEM_ERROR |
+ PCI_STATUS_DETECTED_PARITY);
+ pci_write_config_word(gp->pdev,
+ PCI_STATUS, pci_cfg_stat);
+ }
+
+ /* For all PCI errors, we should reset the chip. */
+ return 1;
+}
+
+/* All non-normal interrupt conditions get serviced here.
+ * Returns non-zero if we should just exit the interrupt
+ * handler right now (ie. if we reset the card which invalidates
+ * all of the other original irq status bits).
+ */
+static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+ if (gem_status & GREG_STAT_RXNOBUF) {
+ /* Frame arrived, no free RX buffers available. */
+ if (netif_msg_rx_err(gp))
+ printk(KERN_DEBUG "%s: no buffer for rx frame\n",
+ gp->dev->name);
+ dev->stats.rx_dropped++;
+ }
+
+ if (gem_status & GREG_STAT_RXTAGERR) {
+ /* corrupt RX tag framing */
+ if (netif_msg_rx_err(gp))
+ printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
+ gp->dev->name);
+ dev->stats.rx_errors++;
+
+ return 1;
+ }
+
+ if (gem_status & GREG_STAT_PCS) {
+ if (gem_pcs_interrupt(dev, gp, gem_status))
+ return 1;
+ }
+
+ if (gem_status & GREG_STAT_TXMAC) {
+ if (gem_txmac_interrupt(dev, gp, gem_status))
+ return 1;
+ }
+
+ if (gem_status & GREG_STAT_RXMAC) {
+ if (gem_rxmac_interrupt(dev, gp, gem_status))
+ return 1;
+ }
+
+ if (gem_status & GREG_STAT_MAC) {
+ if (gem_mac_interrupt(dev, gp, gem_status))
+ return 1;
+ }
+
+ if (gem_status & GREG_STAT_MIF) {
+ if (gem_mif_interrupt(dev, gp, gem_status))
+ return 1;
+ }
+
+ if (gem_status & GREG_STAT_PCIERR) {
+ if (gem_pci_interrupt(dev, gp, gem_status))
+ return 1;
+ }
+
+ return 0;
+}
+
+static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+ int entry, limit;
+
+ entry = gp->tx_old;
+ limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT);
+ while (entry != limit) {
+ struct sk_buff *skb;
+ struct gem_txd *txd;
+ dma_addr_t dma_addr;
+ u32 dma_len;
+ int frag;
+
+ if (netif_msg_tx_done(gp))
+ printk(KERN_DEBUG "%s: tx done, slot %d\n",
+ gp->dev->name, entry);
+ skb = gp->tx_skbs[entry];
+ if (skb_shinfo(skb)->nr_frags) {
+ int last = entry + skb_shinfo(skb)->nr_frags;
+ int walk = entry;
+ int incomplete = 0;
+
+ last &= (TX_RING_SIZE - 1);
+ for (;;) {
+ walk = NEXT_TX(walk);
+ if (walk == limit)
+ incomplete = 1;
+ if (walk == last)
+ break;
+ }
+ if (incomplete)
+ break;
+ }
+ gp->tx_skbs[entry] = NULL;
+ dev->stats.tx_bytes += skb->len;
+
+ for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
+ txd = &gp->init_block->txd[entry];
+
+ dma_addr = le64_to_cpu(txd->buffer);
+ dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ;
+
+ pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
+ entry = NEXT_TX(entry);
+ }
+
+ dev->stats.tx_packets++;
+ dev_kfree_skb(skb);
+ }
+ gp->tx_old = entry;
+
+ /* Need to make the tx_old update visible to gem_start_xmit()
+ * before checking for netif_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that gem_start_xmit()
+ * will miss it and cause the queue to be stopped forever.
+ */
+ smp_mb();
+
+ if (unlikely(netif_queue_stopped(dev) &&
+ TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+
+ __netif_tx_lock(txq, smp_processor_id());
+ if (netif_queue_stopped(dev) &&
+ TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
+ netif_wake_queue(dev);
+ __netif_tx_unlock(txq);
+ }
+}
+
+static __inline__ void gem_post_rxds(struct gem *gp, int limit)
+{
+ int cluster_start, curr, count, kick;
+
+ cluster_start = curr = (gp->rx_new & ~(4 - 1));
+ count = 0;
+ kick = -1;
+ wmb();
+ while (curr != limit) {
+ curr = NEXT_RX(curr);
+ if (++count == 4) {
+ struct gem_rxd *rxd =
+ &gp->init_block->rxd[cluster_start];
+ for (;;) {
+ rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
+ rxd++;
+ cluster_start = NEXT_RX(cluster_start);
+ if (cluster_start == curr)
+ break;
+ }
+ kick = curr;
+ count = 0;
+ }
+ }
+ if (kick >= 0) {
+ mb();
+ writel(kick, gp->regs + RXDMA_KICK);
+ }
+}
+
+#define ALIGNED_RX_SKB_ADDR(addr) \
+ ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
+static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size,
+ gfp_t gfp_flags)
+{
+ struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
+
+ if (likely(skb)) {
+ unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data);
+ skb_reserve(skb, offset);
+ skb->dev = dev;
+ }
+ return skb;
+}
+
+static int gem_rx(struct gem *gp, int work_to_do)
+{
+ struct net_device *dev = gp->dev;
+ int entry, drops, work_done = 0;
+ u32 done;
+ __sum16 csum;
+
+ if (netif_msg_rx_status(gp))
+ printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
+ gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new);
+
+ entry = gp->rx_new;
+ drops = 0;
+ done = readl(gp->regs + RXDMA_DONE);
+ for (;;) {
+ struct gem_rxd *rxd = &gp->init_block->rxd[entry];
+ struct sk_buff *skb;
+ u64 status = le64_to_cpu(rxd->status_word);
+ dma_addr_t dma_addr;
+ int len;
+
+ if ((status & RXDCTRL_OWN) != 0)
+ break;
+
+ if (work_done >= RX_RING_SIZE || work_done >= work_to_do)
+ break;
+
+ /* When writing back RX descriptor, GEM writes status
+ * then buffer address, possibly in separate transactions.
+ * If we don't wait for the chip to write both, we could
+ * post a new buffer to this descriptor then have GEM spam
+ * on the buffer address. We sync on the RX completion
+ * register to prevent this from happening.
+ */
+ if (entry == done) {
+ done = readl(gp->regs + RXDMA_DONE);
+ if (entry == done)
+ break;
+ }
+
+ /* We can now account for the work we're about to do */
+ work_done++;
+
+ skb = gp->rx_skbs[entry];
+
+ len = (status & RXDCTRL_BUFSZ) >> 16;
+ if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
+ dev->stats.rx_errors++;
+ if (len < ETH_ZLEN)
+ dev->stats.rx_length_errors++;
+ if (len & RXDCTRL_BAD)
+ dev->stats.rx_crc_errors++;
+
+ /* We'll just return it to GEM. */
+ drop_it:
+ dev->stats.rx_dropped++;
+ goto next;
+ }
+
+ dma_addr = le64_to_cpu(rxd->buffer);
+ if (len > RX_COPY_THRESHOLD) {
+ struct sk_buff *new_skb;
+
+ new_skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
+ if (new_skb == NULL) {
+ drops++;
+ goto drop_it;
+ }
+ pci_unmap_page(gp->pdev, dma_addr,
+ RX_BUF_ALLOC_SIZE(gp),
+ PCI_DMA_FROMDEVICE);
+ gp->rx_skbs[entry] = new_skb;
+ skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET));
+ rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,
+ virt_to_page(new_skb->data),
+ offset_in_page(new_skb->data),
+ RX_BUF_ALLOC_SIZE(gp),
+ PCI_DMA_FROMDEVICE));
+ skb_reserve(new_skb, RX_OFFSET);
+
+ /* Trim the original skb for the netif. */
+ skb_trim(skb, len);
+ } else {
+ struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
+
+ if (copy_skb == NULL) {
+ drops++;
+ goto drop_it;
+ }
+
+ skb_reserve(copy_skb, 2);
+ skb_put(copy_skb, len);
+ pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+ skb_copy_from_linear_data(skb, copy_skb->data, len);
+ pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+
+ /* We'll reuse the original ring buffer. */
+ skb = copy_skb;
+ }
+
+ csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
+ skb->csum = csum_unfold(csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->protocol = eth_type_trans(skb, gp->dev);
+
+ napi_gro_receive(&gp->napi, skb);
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+
+ next:
+ entry = NEXT_RX(entry);
+ }
+
+ gem_post_rxds(gp, entry);
+
+ gp->rx_new = entry;
+
+ if (drops)
+ netdev_info(gp->dev, "Memory squeeze, deferring packet\n");
+
+ return work_done;
+}
+
+static int gem_poll(struct napi_struct *napi, int budget)
+{
+ struct gem *gp = container_of(napi, struct gem, napi);
+ struct net_device *dev = gp->dev;
+ int work_done;
+
+ work_done = 0;
+ do {
+ /* Handle anomalies */
+ if (unlikely(gp->status & GREG_STAT_ABNORMAL)) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+ int reset;
+
+ /* We run the abnormal interrupt handling code with
+ * the Tx lock. It only resets the Rx portion of the
+ * chip, but we need to guard it against DMA being
+ * restarted by the link poll timer
+ */
+ __netif_tx_lock(txq, smp_processor_id());
+ reset = gem_abnormal_irq(dev, gp, gp->status);
+ __netif_tx_unlock(txq);
+ if (reset) {
+ gem_schedule_reset(gp);
+ napi_complete(napi);
+ return work_done;
+ }
+ }
+
+ /* Run TX completion thread */
+ gem_tx(dev, gp, gp->status);
+
+ /* Run RX thread. We don't use any locking here,
+ * code willing to do bad things - like cleaning the
+ * rx ring - must call napi_disable(), which
+ * schedule_timeout()'s if polling is already disabled.
+ */
+ work_done += gem_rx(gp, budget - work_done);
+
+ if (work_done >= budget)
+ return work_done;
+
+ gp->status = readl(gp->regs + GREG_STAT);
+ } while (gp->status & GREG_STAT_NAPI);
+
+ napi_complete(napi);
+ gem_enable_ints(gp);
+
+ return work_done;
+}
+
+static irqreturn_t gem_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct gem *gp = netdev_priv(dev);
+
+ if (napi_schedule_prep(&gp->napi)) {
+ u32 gem_status = readl(gp->regs + GREG_STAT);
+
+ if (unlikely(gem_status == 0)) {
+ napi_enable(&gp->napi);
+ return IRQ_NONE;
+ }
+ if (netif_msg_intr(gp))
+ printk(KERN_DEBUG "%s: gem_interrupt() gem_status: 0x%x\n",
+ gp->dev->name, gem_status);
+
+ gp->status = gem_status;
+ gem_disable_ints(gp);
+ __napi_schedule(&gp->napi);
+ }
+
+ /* If polling was disabled at the time we received that
+ * interrupt, we may return IRQ_HANDLED here while we
+ * should return IRQ_NONE. No big deal...
+ */
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void gem_poll_controller(struct net_device *dev)
+{
+ struct gem *gp = netdev_priv(dev);
+
+ disable_irq(gp->pdev->irq);
+ gem_interrupt(gp->pdev->irq, dev);
+ enable_irq(gp->pdev->irq);
+}
+#endif
+
+static void gem_tx_timeout(struct net_device *dev)
+{
+ struct gem *gp = netdev_priv(dev);
+
+ netdev_err(dev, "transmit timed out, resetting\n");
+
+ netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n",
+ readl(gp->regs + TXDMA_CFG),
+ readl(gp->regs + MAC_TXSTAT),
+ readl(gp->regs + MAC_TXCFG));
+ netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
+ readl(gp->regs + RXDMA_CFG),
+ readl(gp->regs + MAC_RXSTAT),
+ readl(gp->regs + MAC_RXCFG));
+
+ gem_schedule_reset(gp);
+}
+
+static __inline__ int gem_intme(int entry)
+{
+ /* Algorithm: IRQ every 1/2 of descriptors. */
+ if (!(entry & ((TX_RING_SIZE>>1)-1)))
+ return 1;
+
+ return 0;
+}
+
+static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct gem *gp = netdev_priv(dev);
+ int entry;
+ u64 ctrl;
+
+ ctrl = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ const u64 csum_start_off = skb_checksum_start_offset(skb);
+ const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
+
+ ctrl = (TXDCTRL_CENAB |
+ (csum_start_off << 15) |
+ (csum_stuff_off << 21));
+ }
+
+ if (unlikely(TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1))) {
+ /* This is a hard error, log it. */
+ if (!netif_queue_stopped(dev)) {
+ netif_stop_queue(dev);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = gp->tx_new;
+ gp->tx_skbs[entry] = skb;
+
+ if (skb_shinfo(skb)->nr_frags == 0) {
+ struct gem_txd *txd = &gp->init_block->txd[entry];
+ dma_addr_t mapping;
+ u32 len;
+
+ len = skb->len;
+ mapping = pci_map_page(gp->pdev,
+ virt_to_page(skb->data),
+ offset_in_page(skb->data),
+ len, PCI_DMA_TODEVICE);
+ ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len;
+ if (gem_intme(entry))
+ ctrl |= TXDCTRL_INTME;
+ txd->buffer = cpu_to_le64(mapping);
+ wmb();
+ txd->control_word = cpu_to_le64(ctrl);
+ entry = NEXT_TX(entry);
+ } else {
+ struct gem_txd *txd;
+ u32 first_len;
+ u64 intme;
+ dma_addr_t first_mapping;
+ int frag, first_entry = entry;
+
+ intme = 0;
+ if (gem_intme(entry))
+ intme |= TXDCTRL_INTME;
+
+ /* We must give this initial chunk to the device last.
+ * Otherwise we could race with the device.
+ */
+ first_len = skb_headlen(skb);
+ first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data),
+ offset_in_page(skb->data),
+ first_len, PCI_DMA_TODEVICE);
+ entry = NEXT_TX(entry);
+
+ for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
+ u32 len;
+ dma_addr_t mapping;
+ u64 this_ctrl;
+
+ len = this_frag->size;
+ mapping = skb_frag_dma_map(&gp->pdev->dev, this_frag,
+ 0, len, DMA_TO_DEVICE);
+ this_ctrl = ctrl;
+ if (frag == skb_shinfo(skb)->nr_frags - 1)
+ this_ctrl |= TXDCTRL_EOF;
+
+ txd = &gp->init_block->txd[entry];
+ txd->buffer = cpu_to_le64(mapping);
+ wmb();
+ txd->control_word = cpu_to_le64(this_ctrl | len);
+
+ if (gem_intme(entry))
+ intme |= TXDCTRL_INTME;
+
+ entry = NEXT_TX(entry);
+ }
+ txd = &gp->init_block->txd[first_entry];
+ txd->buffer = cpu_to_le64(first_mapping);
+ wmb();
+ txd->control_word =
+ cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
+ }
+
+ gp->tx_new = entry;
+ if (unlikely(TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))) {
+ netif_stop_queue(dev);
+
+ /* netif_stop_queue() must be done before checking
+ * checking tx index in TX_BUFFS_AVAIL() below, because
+ * in gem_tx(), we update tx_old before checking for
+ * netif_queue_stopped().
+ */
+ smp_mb();
+ if (TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
+ netif_wake_queue(dev);
+ }
+ if (netif_msg_tx_queued(gp))
+ printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
+ dev->name, entry, skb->len);
+ mb();
+ writel(gp->tx_new, gp->regs + TXDMA_KICK);
+
+ return NETDEV_TX_OK;
+}
+
+static void gem_pcs_reset(struct gem *gp)
+{
+ int limit;
+ u32 val;
+
+ /* Reset PCS unit. */
+ val = readl(gp->regs + PCS_MIICTRL);
+ val |= PCS_MIICTRL_RST;
+ writel(val, gp->regs + PCS_MIICTRL);
+
+ limit = 32;
+ while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
+ udelay(100);
+ if (limit-- <= 0)
+ break;
+ }
+ if (limit < 0)
+ netdev_warn(gp->dev, "PCS reset bit would not clear\n");
+}
+
+static void gem_pcs_reinit_adv(struct gem *gp)
+{
+ u32 val;
+
+ /* Make sure PCS is disabled while changing advertisement
+ * configuration.
+ */
+ val = readl(gp->regs + PCS_CFG);
+ val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
+ writel(val, gp->regs + PCS_CFG);
+
+ /* Advertise all capabilities except asymmetric
+ * pause.
+ */
+ val = readl(gp->regs + PCS_MIIADV);
+ val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
+ PCS_MIIADV_SP | PCS_MIIADV_AP);
+ writel(val, gp->regs + PCS_MIIADV);
+
+ /* Enable and restart auto-negotiation, disable wrapback/loopback,
+ * and re-enable PCS.
+ */
+ val = readl(gp->regs + PCS_MIICTRL);
+ val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
+ val &= ~PCS_MIICTRL_WB;
+ writel(val, gp->regs + PCS_MIICTRL);
+
+ val = readl(gp->regs + PCS_CFG);
+ val |= PCS_CFG_ENABLE;
+ writel(val, gp->regs + PCS_CFG);
+
+ /* Make sure serialink loopback is off. The meaning
+ * of this bit is logically inverted based upon whether
+ * you are in Serialink or SERDES mode.
+ */
+ val = readl(gp->regs + PCS_SCTRL);
+ if (gp->phy_type == phy_serialink)
+ val &= ~PCS_SCTRL_LOOP;
+ else
+ val |= PCS_SCTRL_LOOP;
+ writel(val, gp->regs + PCS_SCTRL);
+}
+
+#define STOP_TRIES 32
+
+static void gem_reset(struct gem *gp)
+{
+ int limit;
+ u32 val;
+
+ /* Make sure we won't get any more interrupts */
+ writel(0xffffffff, gp->regs + GREG_IMASK);
+
+ /* Reset the chip */
+ writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST,
+ gp->regs + GREG_SWRST);
+
+ limit = STOP_TRIES;
+
+ do {
+ udelay(20);
+ val = readl(gp->regs + GREG_SWRST);
+ if (limit-- <= 0)
+ break;
+ } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
+
+ if (limit < 0)
+ netdev_err(gp->dev, "SW reset is ghetto\n");
+
+ if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
+ gem_pcs_reinit_adv(gp);
+}
+
+static void gem_start_dma(struct gem *gp)
+{
+ u32 val;
+
+ /* We are ready to rock, turn everything on. */
+ val = readl(gp->regs + TXDMA_CFG);
+ writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
+ val = readl(gp->regs + RXDMA_CFG);
+ writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
+ val = readl(gp->regs + MAC_TXCFG);
+ writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
+ val = readl(gp->regs + MAC_RXCFG);
+ writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
+
+ (void) readl(gp->regs + MAC_RXCFG);
+ udelay(100);
+
+ gem_enable_ints(gp);
+
+ writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
+}
+
+/* DMA won't be actually stopped before about 4ms tho ...
+ */
+static void gem_stop_dma(struct gem *gp)
+{
+ u32 val;
+
+ /* We are done rocking, turn everything off. */
+ val = readl(gp->regs + TXDMA_CFG);
+ writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
+ val = readl(gp->regs + RXDMA_CFG);
+ writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
+ val = readl(gp->regs + MAC_TXCFG);
+ writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
+ val = readl(gp->regs + MAC_RXCFG);
+ writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
+
+ (void) readl(gp->regs + MAC_RXCFG);
+
+ /* Need to wait a bit ... done by the caller */
+}
+
+
+// XXX dbl check what that function should do when called on PCS PHY
+static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
+{
+ u32 advertise, features;
+ int autoneg;
+ int speed;
+ int duplex;
+
+ if (gp->phy_type != phy_mii_mdio0 &&
+ gp->phy_type != phy_mii_mdio1)
+ goto non_mii;
+
+ /* Setup advertise */
+ if (found_mii_phy(gp))
+ features = gp->phy_mii.def->features;
+ else
+ features = 0;
+
+ advertise = features & ADVERTISE_MASK;
+ if (gp->phy_mii.advertising != 0)
+ advertise &= gp->phy_mii.advertising;
+
+ autoneg = gp->want_autoneg;
+ speed = gp->phy_mii.speed;
+ duplex = gp->phy_mii.duplex;
+
+ /* Setup link parameters */
+ if (!ep)
+ goto start_aneg;
+ if (ep->autoneg == AUTONEG_ENABLE) {
+ advertise = ep->advertising;
+ autoneg = 1;
+ } else {
+ autoneg = 0;
+ speed = ethtool_cmd_speed(ep);
+ duplex = ep->duplex;
+ }
+
+start_aneg:
+ /* Sanitize settings based on PHY capabilities */
+ if ((features & SUPPORTED_Autoneg) == 0)
+ autoneg = 0;
+ if (speed == SPEED_1000 &&
+ !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)))
+ speed = SPEED_100;
+ if (speed == SPEED_100 &&
+ !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full)))
+ speed = SPEED_10;
+ if (duplex == DUPLEX_FULL &&
+ !(features & (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_10baseT_Full)))
+ duplex = DUPLEX_HALF;
+ if (speed == 0)
+ speed = SPEED_10;
+
+ /* If we are asleep, we don't try to actually setup the PHY, we
+ * just store the settings
+ */
+ if (!netif_device_present(gp->dev)) {
+ gp->phy_mii.autoneg = gp->want_autoneg = autoneg;
+ gp->phy_mii.speed = speed;
+ gp->phy_mii.duplex = duplex;
+ return;
+ }
+
+ /* Configure PHY & start aneg */
+ gp->want_autoneg = autoneg;
+ if (autoneg) {
+ if (found_mii_phy(gp))
+ gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise);
+ gp->lstate = link_aneg;
+ } else {
+ if (found_mii_phy(gp))
+ gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex);
+ gp->lstate = link_force_ok;
+ }
+
+non_mii:
+ gp->timer_ticks = 0;
+ mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
+}
+
+/* A link-up condition has occurred, initialize and enable the
+ * rest of the chip.
+ */
+static int gem_set_link_modes(struct gem *gp)
+{
+ struct netdev_queue *txq = netdev_get_tx_queue(gp->dev, 0);
+ int full_duplex, speed, pause;
+ u32 val;
+
+ full_duplex = 0;
+ speed = SPEED_10;
+ pause = 0;
+
+ if (found_mii_phy(gp)) {
+ if (gp->phy_mii.def->ops->read_link(&gp->phy_mii))
+ return 1;
+ full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL);
+ speed = gp->phy_mii.speed;
+ pause = gp->phy_mii.pause;
+ } else if (gp->phy_type == phy_serialink ||
+ gp->phy_type == phy_serdes) {
+ u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
+
+ if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes)
+ full_duplex = 1;
+ speed = SPEED_1000;
+ }
+
+ netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n",
+ speed, (full_duplex ? "full" : "half"));
+
+
+ /* We take the tx queue lock to avoid collisions between
+ * this code, the tx path and the NAPI-driven error path
+ */
+ __netif_tx_lock(txq, smp_processor_id());
+
+ val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU);
+ if (full_duplex) {
+ val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL);
+ } else {
+ /* MAC_TXCFG_NBO must be zero. */
+ }
+ writel(val, gp->regs + MAC_TXCFG);
+
+ val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED);
+ if (!full_duplex &&
+ (gp->phy_type == phy_mii_mdio0 ||
+ gp->phy_type == phy_mii_mdio1)) {
+ val |= MAC_XIFCFG_DISE;
+ } else if (full_duplex) {
+ val |= MAC_XIFCFG_FLED;
+ }
+
+ if (speed == SPEED_1000)
+ val |= (MAC_XIFCFG_GMII);
+
+ writel(val, gp->regs + MAC_XIFCFG);
+
+ /* If gigabit and half-duplex, enable carrier extension
+ * mode. Else, disable it.
+ */
+ if (speed == SPEED_1000 && !full_duplex) {
+ val = readl(gp->regs + MAC_TXCFG);
+ writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
+
+ val = readl(gp->regs + MAC_RXCFG);
+ writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
+ } else {
+ val = readl(gp->regs + MAC_TXCFG);
+ writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
+
+ val = readl(gp->regs + MAC_RXCFG);
+ writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
+ }
+
+ if (gp->phy_type == phy_serialink ||
+ gp->phy_type == phy_serdes) {
+ u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
+
+ if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP))
+ pause = 1;
+ }
+
+ if (!full_duplex)
+ writel(512, gp->regs + MAC_STIME);
+ else
+ writel(64, gp->regs + MAC_STIME);
+ val = readl(gp->regs + MAC_MCCFG);
+ if (pause)
+ val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE);
+ else
+ val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE);
+ writel(val, gp->regs + MAC_MCCFG);
+
+ gem_start_dma(gp);
+
+ __netif_tx_unlock(txq);
+
+ if (netif_msg_link(gp)) {
+ if (pause) {
+ netdev_info(gp->dev,
+ "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
+ gp->rx_fifo_sz,
+ gp->rx_pause_off,
+ gp->rx_pause_on);
+ } else {
+ netdev_info(gp->dev, "Pause is disabled\n");
+ }
+ }
+
+ return 0;
+}
+
+static int gem_mdio_link_not_up(struct gem *gp)
+{
+ switch (gp->lstate) {
+ case link_force_ret:
+ netif_info(gp, link, gp->dev,
+ "Autoneg failed again, keeping forced mode\n");
+ gp->phy_mii.def->ops->setup_forced(&gp->phy_mii,
+ gp->last_forced_speed, DUPLEX_HALF);
+ gp->timer_ticks = 5;
+ gp->lstate = link_force_ok;
+ return 0;
+ case link_aneg:
+ /* We try forced modes after a failed aneg only on PHYs that don't
+ * have "magic_aneg" bit set, which means they internally do the
+ * while forced-mode thingy. On these, we just restart aneg
+ */
+ if (gp->phy_mii.def->magic_aneg)
+ return 1;
+ netif_info(gp, link, gp->dev, "switching to forced 100bt\n");
+ /* Try forced modes. */
+ gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100,
+ DUPLEX_HALF);
+ gp->timer_ticks = 5;
+ gp->lstate = link_force_try;
+ return 0;
+ case link_force_try:
+ /* Downgrade from 100 to 10 Mbps if necessary.
+ * If already at 10Mbps, warn user about the
+ * situation every 10 ticks.
+ */
+ if (gp->phy_mii.speed == SPEED_100) {
+ gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10,
+ DUPLEX_HALF);
+ gp->timer_ticks = 5;
+ netif_info(gp, link, gp->dev,
+ "switching to forced 10bt\n");
+ return 0;
+ } else
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static void gem_link_timer(unsigned long data)
+{
+ struct gem *gp = (struct gem *) data;
+ struct net_device *dev = gp->dev;
+ int restart_aneg = 0;
+
+ /* There's no point doing anything if we're going to be reset */
+ if (gp->reset_task_pending)
+ return;
+
+ if (gp->phy_type == phy_serialink ||
+ gp->phy_type == phy_serdes) {
+ u32 val = readl(gp->regs + PCS_MIISTAT);
+
+ if (!(val & PCS_MIISTAT_LS))
+ val = readl(gp->regs + PCS_MIISTAT);
+
+ if ((val & PCS_MIISTAT_LS) != 0) {
+ if (gp->lstate == link_up)
+ goto restart;
+
+ gp->lstate = link_up;
+ netif_carrier_on(dev);
+ (void)gem_set_link_modes(gp);
+ }
+ goto restart;
+ }
+ if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) {
+ /* Ok, here we got a link. If we had it due to a forced
+ * fallback, and we were configured for autoneg, we do
+ * retry a short autoneg pass. If you know your hub is
+ * broken, use ethtool ;)
+ */
+ if (gp->lstate == link_force_try && gp->want_autoneg) {
+ gp->lstate = link_force_ret;
+ gp->last_forced_speed = gp->phy_mii.speed;
+ gp->timer_ticks = 5;
+ if (netif_msg_link(gp))
+ netdev_info(dev,
+ "Got link after fallback, retrying autoneg once...\n");
+ gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
+ } else if (gp->lstate != link_up) {
+ gp->lstate = link_up;
+ netif_carrier_on(dev);
+ if (gem_set_link_modes(gp))
+ restart_aneg = 1;
+ }
+ } else {
+ /* If the link was previously up, we restart the
+ * whole process
+ */
+ if (gp->lstate == link_up) {
+ gp->lstate = link_down;
+ netif_info(gp, link, dev, "Link down\n");
+ netif_carrier_off(dev);
+ gem_schedule_reset(gp);
+ /* The reset task will restart the timer */
+ return;
+ } else if (++gp->timer_ticks > 10) {
+ if (found_mii_phy(gp))
+ restart_aneg = gem_mdio_link_not_up(gp);
+ else
+ restart_aneg = 1;
+ }
+ }
+ if (restart_aneg) {
+ gem_begin_auto_negotiation(gp, NULL);
+ return;
+ }
+restart:
+ mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
+}
+
+static void gem_clean_rings(struct gem *gp)
+{
+ struct gem_init_block *gb = gp->init_block;
+ struct sk_buff *skb;
+ int i;
+ dma_addr_t dma_addr;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct gem_rxd *rxd;
+
+ rxd = &gb->rxd[i];
+ if (gp->rx_skbs[i] != NULL) {
+ skb = gp->rx_skbs[i];
+ dma_addr = le64_to_cpu(rxd->buffer);
+ pci_unmap_page(gp->pdev, dma_addr,
+ RX_BUF_ALLOC_SIZE(gp),
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(skb);
+ gp->rx_skbs[i] = NULL;
+ }
+ rxd->status_word = 0;
+ wmb();
+ rxd->buffer = 0;
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (gp->tx_skbs[i] != NULL) {
+ struct gem_txd *txd;
+ int frag;
+
+ skb = gp->tx_skbs[i];
+ gp->tx_skbs[i] = NULL;
+
+ for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
+ int ent = i & (TX_RING_SIZE - 1);
+
+ txd = &gb->txd[ent];
+ dma_addr = le64_to_cpu(txd->buffer);
+ pci_unmap_page(gp->pdev, dma_addr,
+ le64_to_cpu(txd->control_word) &
+ TXDCTRL_BUFSZ, PCI_DMA_TODEVICE);
+
+ if (frag != skb_shinfo(skb)->nr_frags)
+ i++;
+ }
+ dev_kfree_skb_any(skb);
+ }
+ }
+}
+
+static void gem_init_rings(struct gem *gp)
+{
+ struct gem_init_block *gb = gp->init_block;
+ struct net_device *dev = gp->dev;
+ int i;
+ dma_addr_t dma_addr;
+
+ gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0;
+
+ gem_clean_rings(gp);
+
+ gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN,
+ (unsigned)VLAN_ETH_FRAME_LEN);
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ struct gem_rxd *rxd = &gb->rxd[i];
+
+ skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_KERNEL);
+ if (!skb) {
+ rxd->buffer = 0;
+ rxd->status_word = 0;
+ continue;
+ }
+
+ gp->rx_skbs[i] = skb;
+ skb_put(skb, (gp->rx_buf_sz + RX_OFFSET));
+ dma_addr = pci_map_page(gp->pdev,
+ virt_to_page(skb->data),
+ offset_in_page(skb->data),
+ RX_BUF_ALLOC_SIZE(gp),
+ PCI_DMA_FROMDEVICE);
+ rxd->buffer = cpu_to_le64(dma_addr);
+ wmb();
+ rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
+ skb_reserve(skb, RX_OFFSET);
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ struct gem_txd *txd = &gb->txd[i];
+
+ txd->control_word = 0;
+ wmb();
+ txd->buffer = 0;
+ }
+ wmb();
+}
+
+/* Init PHY interface and start link poll state machine */
+static void gem_init_phy(struct gem *gp)
+{
+ u32 mifcfg;
+
+ /* Revert MIF CFG setting done on stop_phy */
+ mifcfg = readl(gp->regs + MIF_CFG);
+ mifcfg &= ~MIF_CFG_BBMODE;
+ writel(mifcfg, gp->regs + MIF_CFG);
+
+ if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
+ int i;
+
+ /* Those delay sucks, the HW seem to love them though, I'll
+ * serisouly consider breaking some locks here to be able
+ * to schedule instead
+ */
+ for (i = 0; i < 3; i++) {
+#ifdef CONFIG_PPC_PMAC
+ pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
+ msleep(20);
+#endif
+ /* Some PHYs used by apple have problem getting back to us,
+ * we do an additional reset here
+ */
+ phy_write(gp, MII_BMCR, BMCR_RESET);
+ msleep(20);
+ if (phy_read(gp, MII_BMCR) != 0xffff)
+ break;
+ if (i == 2)
+ netdev_warn(gp->dev, "GMAC PHY not responding !\n");
+ }
+ }
+
+ if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
+ gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
+ u32 val;
+
+ /* Init datapath mode register. */
+ if (gp->phy_type == phy_mii_mdio0 ||
+ gp->phy_type == phy_mii_mdio1) {
+ val = PCS_DMODE_MGM;
+ } else if (gp->phy_type == phy_serialink) {
+ val = PCS_DMODE_SM | PCS_DMODE_GMOE;
+ } else {
+ val = PCS_DMODE_ESM;
+ }
+
+ writel(val, gp->regs + PCS_DMODE);
+ }
+
+ if (gp->phy_type == phy_mii_mdio0 ||
+ gp->phy_type == phy_mii_mdio1) {
+ /* Reset and detect MII PHY */
+ sungem_phy_probe(&gp->phy_mii, gp->mii_phy_addr);
+
+ /* Init PHY */
+ if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
+ gp->phy_mii.def->ops->init(&gp->phy_mii);
+ } else {
+ gem_pcs_reset(gp);
+ gem_pcs_reinit_adv(gp);
+ }
+
+ /* Default aneg parameters */
+ gp->timer_ticks = 0;
+ gp->lstate = link_down;
+ netif_carrier_off(gp->dev);
+
+ /* Print things out */
+ if (gp->phy_type == phy_mii_mdio0 ||
+ gp->phy_type == phy_mii_mdio1)
+ netdev_info(gp->dev, "Found %s PHY\n",
+ gp->phy_mii.def ? gp->phy_mii.def->name : "no");
+
+ gem_begin_auto_negotiation(gp, NULL);
+}
+
+static void gem_init_dma(struct gem *gp)
+{
+ u64 desc_dma = (u64) gp->gblock_dvma;
+ u32 val;
+
+ val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE);
+ writel(val, gp->regs + TXDMA_CFG);
+
+ writel(desc_dma >> 32, gp->regs + TXDMA_DBHI);
+ writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW);
+ desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
+
+ writel(0, gp->regs + TXDMA_KICK);
+
+ val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
+ ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+ writel(val, gp->regs + RXDMA_CFG);
+
+ writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
+ writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
+
+ writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
+
+ val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
+ val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
+ writel(val, gp->regs + RXDMA_PTHRESH);
+
+ if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
+ writel(((5 & RXDMA_BLANK_IPKTS) |
+ ((8 << 12) & RXDMA_BLANK_ITIME)),
+ gp->regs + RXDMA_BLANK);
+ else
+ writel(((5 & RXDMA_BLANK_IPKTS) |
+ ((4 << 12) & RXDMA_BLANK_ITIME)),
+ gp->regs + RXDMA_BLANK);
+}
+
+static u32 gem_setup_multicast(struct gem *gp)
+{
+ u32 rxcfg = 0;
+ int i;
+
+ if ((gp->dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(gp->dev) > 256)) {
+ for (i=0; i<16; i++)
+ writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
+ rxcfg |= MAC_RXCFG_HFE;
+ } else if (gp->dev->flags & IFF_PROMISC) {
+ rxcfg |= MAC_RXCFG_PROM;
+ } else {
+ u16 hash_table[16];
+ u32 crc;
+ struct netdev_hw_addr *ha;
+ int i;
+
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(ha, gp->dev) {
+ crc = ether_crc_le(6, ha->addr);
+ crc >>= 24;
+ hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
+ }
+ for (i=0; i<16; i++)
+ writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2));
+ rxcfg |= MAC_RXCFG_HFE;
+ }
+
+ return rxcfg;
+}
+
+static void gem_init_mac(struct gem *gp)
+{
+ unsigned char *e = &gp->dev->dev_addr[0];
+
+ writel(0x1bf0, gp->regs + MAC_SNDPAUSE);
+
+ writel(0x00, gp->regs + MAC_IPG0);
+ writel(0x08, gp->regs + MAC_IPG1);
+ writel(0x04, gp->regs + MAC_IPG2);
+ writel(0x40, gp->regs + MAC_STIME);
+ writel(0x40, gp->regs + MAC_MINFSZ);
+
+ /* Ethernet payload + header + FCS + optional VLAN tag. */
+ writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ);
+
+ writel(0x07, gp->regs + MAC_PASIZE);
+ writel(0x04, gp->regs + MAC_JAMSIZE);
+ writel(0x10, gp->regs + MAC_ATTLIM);
+ writel(0x8808, gp->regs + MAC_MCTYPE);
+
+ writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED);
+
+ writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
+ writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
+ writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
+
+ writel(0, gp->regs + MAC_ADDR3);
+ writel(0, gp->regs + MAC_ADDR4);
+ writel(0, gp->regs + MAC_ADDR5);
+
+ writel(0x0001, gp->regs + MAC_ADDR6);
+ writel(0xc200, gp->regs + MAC_ADDR7);
+ writel(0x0180, gp->regs + MAC_ADDR8);
+
+ writel(0, gp->regs + MAC_AFILT0);
+ writel(0, gp->regs + MAC_AFILT1);
+ writel(0, gp->regs + MAC_AFILT2);
+ writel(0, gp->regs + MAC_AF21MSK);
+ writel(0, gp->regs + MAC_AF0MSK);
+
+ gp->mac_rx_cfg = gem_setup_multicast(gp);
+#ifdef STRIP_FCS
+ gp->mac_rx_cfg |= MAC_RXCFG_SFCS;
+#endif
+ writel(0, gp->regs + MAC_NCOLL);
+ writel(0, gp->regs + MAC_FASUCC);
+ writel(0, gp->regs + MAC_ECOLL);
+ writel(0, gp->regs + MAC_LCOLL);
+ writel(0, gp->regs + MAC_DTIMER);
+ writel(0, gp->regs + MAC_PATMPS);
+ writel(0, gp->regs + MAC_RFCTR);
+ writel(0, gp->regs + MAC_LERR);
+ writel(0, gp->regs + MAC_AERR);
+ writel(0, gp->regs + MAC_FCSERR);
+ writel(0, gp->regs + MAC_RXCVERR);
+
+ /* Clear RX/TX/MAC/XIF config, we will set these up and enable
+ * them once a link is established.
+ */
+ writel(0, gp->regs + MAC_TXCFG);
+ writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG);
+ writel(0, gp->regs + MAC_MCCFG);
+ writel(0, gp->regs + MAC_XIFCFG);
+
+ /* Setup MAC interrupts. We want to get all of the interesting
+ * counter expiration events, but we do not want to hear about
+ * normal rx/tx as the DMA engine tells us that.
+ */
+ writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK);
+ writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
+
+ /* Don't enable even the PAUSE interrupts for now, we
+ * make no use of those events other than to record them.
+ */
+ writel(0xffffffff, gp->regs + MAC_MCMASK);
+
+ /* Don't enable GEM's WOL in normal operations
+ */
+ if (gp->has_wol)
+ writel(0, gp->regs + WOL_WAKECSR);
+}
+
+static void gem_init_pause_thresholds(struct gem *gp)
+{
+ u32 cfg;
+
+ /* Calculate pause thresholds. Setting the OFF threshold to the
+ * full RX fifo size effectively disables PAUSE generation which
+ * is what we do for 10/100 only GEMs which have FIFOs too small
+ * to make real gains from PAUSE.
+ */
+ if (gp->rx_fifo_sz <= (2 * 1024)) {
+ gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz;
+ } else {
+ int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63;
+ int off = (gp->rx_fifo_sz - (max_frame * 2));
+ int on = off - max_frame;
+
+ gp->rx_pause_off = off;
+ gp->rx_pause_on = on;
+ }
+
+
+ /* Configure the chip "burst" DMA mode & enable some
+ * HW bug fixes on Apple version
+ */
+ cfg = 0;
+ if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
+ cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX;
+#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
+ cfg |= GREG_CFG_IBURST;
+#endif
+ cfg |= ((31 << 1) & GREG_CFG_TXDMALIM);
+ cfg |= ((31 << 6) & GREG_CFG_RXDMALIM);
+ writel(cfg, gp->regs + GREG_CFG);
+
+ /* If Infinite Burst didn't stick, then use different
+ * thresholds (and Apple bug fixes don't exist)
+ */
+ if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) {
+ cfg = ((2 << 1) & GREG_CFG_TXDMALIM);
+ cfg |= ((8 << 6) & GREG_CFG_RXDMALIM);
+ writel(cfg, gp->regs + GREG_CFG);
+ }
+}
+
+static int gem_check_invariants(struct gem *gp)
+{
+ struct pci_dev *pdev = gp->pdev;
+ u32 mif_cfg;
+
+ /* On Apple's sungem, we can't rely on registers as the chip
+ * was been powered down by the firmware. The PHY is looked
+ * up later on.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_APPLE) {
+ gp->phy_type = phy_mii_mdio0;
+ gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
+ gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
+ gp->swrst_base = 0;
+
+ mif_cfg = readl(gp->regs + MIF_CFG);
+ mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1);
+ mif_cfg |= MIF_CFG_MDI0;
+ writel(mif_cfg, gp->regs + MIF_CFG);
+ writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE);
+ writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG);
+
+ /* We hard-code the PHY address so we can properly bring it out of
+ * reset later on, we can't really probe it at this point, though
+ * that isn't an issue.
+ */
+ if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC)
+ gp->mii_phy_addr = 1;
+ else
+ gp->mii_phy_addr = 0;
+
+ return 0;
+ }
+
+ mif_cfg = readl(gp->regs + MIF_CFG);
+
+ if (pdev->vendor == PCI_VENDOR_ID_SUN &&
+ pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) {
+ /* One of the MII PHYs _must_ be present
+ * as this chip has no gigabit PHY.
+ */
+ if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) {
+ pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n",
+ mif_cfg);
+ return -1;
+ }
+ }
+
+ /* Determine initial PHY interface type guess. MDIO1 is the
+ * external PHY and thus takes precedence over MDIO0.
+ */
+
+ if (mif_cfg & MIF_CFG_MDI1) {
+ gp->phy_type = phy_mii_mdio1;
+ mif_cfg |= MIF_CFG_PSELECT;
+ writel(mif_cfg, gp->regs + MIF_CFG);
+ } else if (mif_cfg & MIF_CFG_MDI0) {
+ gp->phy_type = phy_mii_mdio0;
+ mif_cfg &= ~MIF_CFG_PSELECT;
+ writel(mif_cfg, gp->regs + MIF_CFG);
+ } else {
+#ifdef CONFIG_SPARC
+ const char *p;
+
+ p = of_get_property(gp->of_node, "shared-pins", NULL);
+ if (p && !strcmp(p, "serdes"))
+ gp->phy_type = phy_serdes;
+ else
+#endif
+ gp->phy_type = phy_serialink;
+ }
+ if (gp->phy_type == phy_mii_mdio1 ||
+ gp->phy_type == phy_mii_mdio0) {
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ gp->mii_phy_addr = i;
+ if (phy_read(gp, MII_BMCR) != 0xffff)
+ break;
+ }
+ if (i == 32) {
+ if (pdev->device != PCI_DEVICE_ID_SUN_GEM) {
+ pr_err("RIO MII phy will not respond\n");
+ return -1;
+ }
+ gp->phy_type = phy_serdes;
+ }
+ }
+
+ /* Fetch the FIFO configurations now too. */
+ gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
+ gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
+
+ if (pdev->vendor == PCI_VENDOR_ID_SUN) {
+ if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
+ if (gp->tx_fifo_sz != (9 * 1024) ||
+ gp->rx_fifo_sz != (20 * 1024)) {
+ pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n",
+ gp->tx_fifo_sz, gp->rx_fifo_sz);
+ return -1;
+ }
+ gp->swrst_base = 0;
+ } else {
+ if (gp->tx_fifo_sz != (2 * 1024) ||
+ gp->rx_fifo_sz != (2 * 1024)) {
+ pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
+ gp->tx_fifo_sz, gp->rx_fifo_sz);
+ return -1;
+ }
+ gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT;
+ }
+ }
+
+ return 0;
+}
+
+static void gem_reinit_chip(struct gem *gp)
+{
+ /* Reset the chip */
+ gem_reset(gp);
+
+ /* Make sure ints are disabled */
+ gem_disable_ints(gp);
+
+ /* Allocate & setup ring buffers */
+ gem_init_rings(gp);
+
+ /* Configure pause thresholds */
+ gem_init_pause_thresholds(gp);
+
+ /* Init DMA & MAC engines */
+ gem_init_dma(gp);
+ gem_init_mac(gp);
+}
+
+
+static void gem_stop_phy(struct gem *gp, int wol)
+{
+ u32 mifcfg;
+
+ /* Let the chip settle down a bit, it seems that helps
+ * for sleep mode on some models
+ */
+ msleep(10);
+
+ /* Make sure we aren't polling PHY status change. We
+ * don't currently use that feature though
+ */
+ mifcfg = readl(gp->regs + MIF_CFG);
+ mifcfg &= ~MIF_CFG_POLL;
+ writel(mifcfg, gp->regs + MIF_CFG);
+
+ if (wol && gp->has_wol) {
+ unsigned char *e = &gp->dev->dev_addr[0];
+ u32 csr;
+
+ /* Setup wake-on-lan for MAGIC packet */
+ writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB,
+ gp->regs + MAC_RXCFG);
+ writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0);
+ writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1);
+ writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2);
+
+ writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT);
+ csr = WOL_WAKECSR_ENABLE;
+ if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0)
+ csr |= WOL_WAKECSR_MII;
+ writel(csr, gp->regs + WOL_WAKECSR);
+ } else {
+ writel(0, gp->regs + MAC_RXCFG);
+ (void)readl(gp->regs + MAC_RXCFG);
+ /* Machine sleep will die in strange ways if we
+ * dont wait a bit here, looks like the chip takes
+ * some time to really shut down
+ */
+ msleep(10);
+ }
+
+ writel(0, gp->regs + MAC_TXCFG);
+ writel(0, gp->regs + MAC_XIFCFG);
+ writel(0, gp->regs + TXDMA_CFG);
+ writel(0, gp->regs + RXDMA_CFG);
+
+ if (!wol) {
+ gem_reset(gp);
+ writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST);
+ writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
+
+ if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend)
+ gp->phy_mii.def->ops->suspend(&gp->phy_mii);
+
+ /* According to Apple, we must set the MDIO pins to this begnign
+ * state or we may 1) eat more current, 2) damage some PHYs
+ */
+ writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
+ writel(0, gp->regs + MIF_BBCLK);
+ writel(0, gp->regs + MIF_BBDATA);
+ writel(0, gp->regs + MIF_BBOENAB);
+ writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG);
+ (void) readl(gp->regs + MAC_XIFCFG);
+ }
+}
+
+static int gem_do_start(struct net_device *dev)
+{
+ struct gem *gp = netdev_priv(dev);
+ int rc;
+
+ /* Enable the cell */
+ gem_get_cell(gp);
+
+ /* Make sure PCI access and bus master are enabled */
+ rc = pci_enable_device(gp->pdev);
+ if (rc) {
+ netdev_err(dev, "Failed to enable chip on PCI bus !\n");
+
+ /* Put cell and forget it for now, it will be considered as
+ * still asleep, a new sleep cycle may bring it back
+ */
+ gem_put_cell(gp);
+ return -ENXIO;
+ }
+ pci_set_master(gp->pdev);
+
+ /* Init & setup chip hardware */
+ gem_reinit_chip(gp);
+
+ /* An interrupt might come in handy */
+ rc = request_irq(gp->pdev->irq, gem_interrupt,
+ IRQF_SHARED, dev->name, (void *)dev);
+ if (rc) {
+ netdev_err(dev, "failed to request irq !\n");
+
+ gem_reset(gp);
+ gem_clean_rings(gp);
+ gem_put_cell(gp);
+ return rc;
+ }
+
+ /* Mark us as attached again if we come from resume(), this has
+ * no effect if we weren't detatched and needs to be done now.
+ */
+ netif_device_attach(dev);
+
+ /* Restart NAPI & queues */
+ gem_netif_start(gp);
+
+ /* Detect & init PHY, start autoneg etc... this will
+ * eventually result in starting DMA operations when
+ * the link is up
+ */
+ gem_init_phy(gp);
+
+ return 0;
+}
+
+static void gem_do_stop(struct net_device *dev, int wol)
+{
+ struct gem *gp = netdev_priv(dev);
+
+ /* Stop NAPI and stop tx queue */
+ gem_netif_stop(gp);
+
+ /* Make sure ints are disabled. We don't care about
+ * synchronizing as NAPI is disabled, thus a stray
+ * interrupt will do nothing bad (our irq handler
+ * just schedules NAPI)
+ */
+ gem_disable_ints(gp);
+
+ /* Stop the link timer */
+ del_timer_sync(&gp->link_timer);
+
+ /* We cannot cancel the reset task while holding the
+ * rtnl lock, we'd get an A->B / B->A deadlock stituation
+ * if we did. This is not an issue however as the reset
+ * task is synchronized vs. us (rtnl_lock) and will do
+ * nothing if the device is down or suspended. We do
+ * still clear reset_task_pending to avoid a spurrious
+ * reset later on in case we do resume before it gets
+ * scheduled.
+ */
+ gp->reset_task_pending = 0;
+
+ /* If we are going to sleep with WOL */
+ gem_stop_dma(gp);
+ msleep(10);
+ if (!wol)
+ gem_reset(gp);
+ msleep(10);
+
+ /* Get rid of rings */
+ gem_clean_rings(gp);
+
+ /* No irq needed anymore */
+ free_irq(gp->pdev->irq, (void *) dev);
+
+ /* Shut the PHY down eventually and setup WOL */
+ gem_stop_phy(gp, wol);
+
+ /* Make sure bus master is disabled */
+ pci_disable_device(gp->pdev);
+
+ /* Cell not needed neither if no WOL */
+ if (!wol)
+ gem_put_cell(gp);
+}
+
+static void gem_reset_task(struct work_struct *work)
+{
+ struct gem *gp = container_of(work, struct gem, reset_task);
+
+ /* Lock out the network stack (essentially shield ourselves
+ * against a racing open, close, control call, or suspend
+ */
+ rtnl_lock();
+
+ /* Skip the reset task if suspended or closed, or if it's
+ * been cancelled by gem_do_stop (see comment there)
+ */
+ if (!netif_device_present(gp->dev) ||
+ !netif_running(gp->dev) ||
+ !gp->reset_task_pending) {
+ rtnl_unlock();
+ return;
+ }
+
+ /* Stop the link timer */
+ del_timer_sync(&gp->link_timer);
+
+ /* Stop NAPI and tx */
+ gem_netif_stop(gp);
+
+ /* Reset the chip & rings */
+ gem_reinit_chip(gp);
+ if (gp->lstate == link_up)
+ gem_set_link_modes(gp);
+
+ /* Restart NAPI and Tx */
+ gem_netif_start(gp);
+
+ /* We are back ! */
+ gp->reset_task_pending = 0;
+
+ /* If the link is not up, restart autoneg, else restart the
+ * polling timer
+ */
+ if (gp->lstate != link_up)
+ gem_begin_auto_negotiation(gp, NULL);
+ else
+ mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
+
+ rtnl_unlock();
+}
+
+static int gem_open(struct net_device *dev)
+{
+ /* We allow open while suspended, we just do nothing,
+ * the chip will be initialized in resume()
+ */
+ if (netif_device_present(dev))
+ return gem_do_start(dev);
+ return 0;
+}
+
+static int gem_close(struct net_device *dev)
+{
+ if (netif_device_present(dev))
+ gem_do_stop(dev, 0);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct gem *gp = netdev_priv(dev);
+
+ /* Lock the network stack first to avoid racing with open/close,
+ * reset task and setting calls
+ */
+ rtnl_lock();
+
+ /* Not running, mark ourselves non-present, no need for
+ * a lock here
+ */
+ if (!netif_running(dev)) {
+ netif_device_detach(dev);
+ rtnl_unlock();
+ return 0;
+ }
+ netdev_info(dev, "suspending, WakeOnLan %s\n",
+ (gp->wake_on_lan && netif_running(dev)) ?
+ "enabled" : "disabled");
+
+ /* Tell the network stack we're gone. gem_do_stop() below will
+ * synchronize with TX, stop NAPI etc...
+ */
+ netif_device_detach(dev);
+
+ /* Switch off chip, remember WOL setting */
+ gp->asleep_wol = gp->wake_on_lan;
+ gem_do_stop(dev, gp->asleep_wol);
+
+ /* Unlock the network stack */
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int gem_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct gem *gp = netdev_priv(dev);
+
+ /* See locking comment in gem_suspend */
+ rtnl_lock();
+
+ /* Not running, mark ourselves present, no need for
+ * a lock here
+ */
+ if (!netif_running(dev)) {
+ netif_device_attach(dev);
+ rtnl_unlock();
+ return 0;
+ }
+
+ /* Restart chip. If that fails there isn't much we can do, we
+ * leave things stopped.
+ */
+ gem_do_start(dev);
+
+ /* If we had WOL enabled, the cell clock was never turned off during
+ * sleep, so we end up beeing unbalanced. Fix that here
+ */
+ if (gp->asleep_wol)
+ gem_put_cell(gp);
+
+ /* Unlock the network stack */
+ rtnl_unlock();
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct net_device_stats *gem_get_stats(struct net_device *dev)
+{
+ struct gem *gp = netdev_priv(dev);
+
+ /* I have seen this being called while the PM was in progress,
+ * so we shield against this. Let's also not poke at registers
+ * while the reset task is going on.
+ *
+ * TODO: Move stats collection elsewhere (link timer ?) and
+ * make this a nop to avoid all those synchro issues
+ */
+ if (!netif_device_present(dev) || !netif_running(dev))
+ goto bail;
+
+ /* Better safe than sorry... */
+ if (WARN_ON(!gp->cell_enabled))
+ goto bail;
+
+ dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
+ writel(0, gp->regs + MAC_FCSERR);
+
+ dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
+ writel(0, gp->regs + MAC_AERR);
+
+ dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
+ writel(0, gp->regs + MAC_LERR);
+
+ dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
+ dev->stats.collisions +=
+ (readl(gp->regs + MAC_ECOLL) + readl(gp->regs + MAC_LCOLL));
+ writel(0, gp->regs + MAC_ECOLL);
+ writel(0, gp->regs + MAC_LCOLL);
+ bail:
+ return &dev->stats;
+}
+
+static int gem_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *macaddr = (struct sockaddr *) addr;
+ struct gem *gp = netdev_priv(dev);
+ unsigned char *e = &dev->dev_addr[0];
+
+ if (!is_valid_ether_addr(macaddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len);
+
+ /* We'll just catch it later when the device is up'd or resumed */
+ if (!netif_running(dev) || !netif_device_present(dev))
+ return 0;
+
+ /* Better safe than sorry... */
+ if (WARN_ON(!gp->cell_enabled))
+ return 0;
+
+ writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
+ writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
+ writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
+
+ return 0;
+}
+
+static void gem_set_multicast(struct net_device *dev)
+{
+ struct gem *gp = netdev_priv(dev);
+ u32 rxcfg, rxcfg_new;
+ int limit = 10000;
+
+ if (!netif_running(dev) || !netif_device_present(dev))
+ return;
+
+ /* Better safe than sorry... */
+ if (gp->reset_task_pending || WARN_ON(!gp->cell_enabled))
+ return;
+
+ rxcfg = readl(gp->regs + MAC_RXCFG);
+ rxcfg_new = gem_setup_multicast(gp);
+#ifdef STRIP_FCS
+ rxcfg_new |= MAC_RXCFG_SFCS;
+#endif
+ gp->mac_rx_cfg = rxcfg_new;
+
+ writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
+ while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) {
+ if (!limit--)
+ break;
+ udelay(10);
+ }
+
+ rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE);
+ rxcfg |= rxcfg_new;
+
+ writel(rxcfg, gp->regs + MAC_RXCFG);
+}
+
+/* Jumbo-grams don't seem to work :-( */
+#define GEM_MIN_MTU 68
+#if 1
+#define GEM_MAX_MTU 1500
+#else
+#define GEM_MAX_MTU 9000
+#endif
+
+static int gem_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct gem *gp = netdev_priv(dev);
+
+ if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+
+ /* We'll just catch it later when the device is up'd or resumed */
+ if (!netif_running(dev) || !netif_device_present(dev))
+ return 0;
+
+ /* Better safe than sorry... */
+ if (WARN_ON(!gp->cell_enabled))
+ return 0;
+
+ gem_netif_stop(gp);
+ gem_reinit_chip(gp);
+ if (gp->lstate == link_up)
+ gem_set_link_modes(gp);
+ gem_netif_start(gp);
+
+ return 0;
+}
+
+static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct gem *gp = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(gp->pdev));
+}
+
+static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct gem *gp = netdev_priv(dev);
+
+ if (gp->phy_type == phy_mii_mdio0 ||
+ gp->phy_type == phy_mii_mdio1) {
+ if (gp->phy_mii.def)
+ cmd->supported = gp->phy_mii.def->features;
+ else
+ cmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full);
+
+ /* XXX hardcoded stuff for now */
+ cmd->port = PORT_MII;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->phy_address = 0; /* XXX fixed PHYAD */
+
+ /* Return current PHY settings */
+ cmd->autoneg = gp->want_autoneg;
+ ethtool_cmd_speed_set(cmd, gp->phy_mii.speed);
+ cmd->duplex = gp->phy_mii.duplex;
+ cmd->advertising = gp->phy_mii.advertising;
+
+ /* If we started with a forced mode, we don't have a default
+ * advertise set, we need to return something sensible so
+ * userland can re-enable autoneg properly.
+ */
+ if (cmd->advertising == 0)
+ cmd->advertising = cmd->supported;
+ } else { // XXX PCS ?
+ cmd->supported =
+ (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg);
+ cmd->advertising = cmd->supported;
+ ethtool_cmd_speed_set(cmd, 0);
+ cmd->duplex = cmd->port = cmd->phy_address =
+ cmd->transceiver = cmd->autoneg = 0;
+
+ /* serdes means usually a Fibre connector, with most fixed */
+ if (gp->phy_type == phy_serdes) {
+ cmd->port = PORT_FIBRE;
+ cmd->supported = (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE | SUPPORTED_Autoneg |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ cmd->advertising = cmd->supported;
+ cmd->transceiver = XCVR_INTERNAL;
+ if (gp->lstate == link_up)
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
+ cmd->duplex = DUPLEX_FULL;
+ cmd->autoneg = 1;
+ }
+ }
+ cmd->maxtxpkt = cmd->maxrxpkt = 0;
+
+ return 0;
+}
+
+static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct gem *gp = netdev_priv(dev);
+ u32 speed = ethtool_cmd_speed(cmd);
+
+ /* Verify the settings we care about. */
+ if (cmd->autoneg != AUTONEG_ENABLE &&
+ cmd->autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_ENABLE &&
+ cmd->advertising == 0)
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_DISABLE &&
+ ((speed != SPEED_1000 &&
+ speed != SPEED_100 &&
+ speed != SPEED_10) ||
+ (cmd->duplex != DUPLEX_HALF &&
+ cmd->duplex != DUPLEX_FULL)))
+ return -EINVAL;
+
+ /* Apply settings and restart link process. */
+ if (netif_device_present(gp->dev)) {
+ del_timer_sync(&gp->link_timer);
+ gem_begin_auto_negotiation(gp, cmd);
+ }
+
+ return 0;
+}
+
+static int gem_nway_reset(struct net_device *dev)
+{
+ struct gem *gp = netdev_priv(dev);
+
+ if (!gp->want_autoneg)
+ return -EINVAL;
+
+ /* Restart link process */
+ if (netif_device_present(gp->dev)) {
+ del_timer_sync(&gp->link_timer);
+ gem_begin_auto_negotiation(gp, NULL);
+ }
+
+ return 0;
+}
+
+static u32 gem_get_msglevel(struct net_device *dev)
+{
+ struct gem *gp = netdev_priv(dev);
+ return gp->msg_enable;
+}
+
+static void gem_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct gem *gp = netdev_priv(dev);
+ gp->msg_enable = value;
+}
+
+
+/* Add more when I understand how to program the chip */
+/* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */
+
+#define WOL_SUPPORTED_MASK (WAKE_MAGIC)
+
+static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct gem *gp = netdev_priv(dev);
+
+ /* Add more when I understand how to program the chip */
+ if (gp->has_wol) {
+ wol->supported = WOL_SUPPORTED_MASK;
+ wol->wolopts = gp->wake_on_lan;
+ } else {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ }
+}
+
+static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct gem *gp = netdev_priv(dev);
+
+ if (!gp->has_wol)
+ return -EOPNOTSUPP;
+ gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK;
+ return 0;
+}
+
+static const struct ethtool_ops gem_ethtool_ops = {
+ .get_drvinfo = gem_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_settings = gem_get_settings,
+ .set_settings = gem_set_settings,
+ .nway_reset = gem_nway_reset,
+ .get_msglevel = gem_get_msglevel,
+ .set_msglevel = gem_set_msglevel,
+ .get_wol = gem_get_wol,
+ .set_wol = gem_set_wol,
+};
+
+static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct gem *gp = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+ int rc = -EOPNOTSUPP;
+
+ /* For SIOCGMIIREG and SIOCSMIIREG the core checks for us that
+ * netif_device_present() is true and holds rtnl_lock for us
+ * so we have nothing to worry about
+ */
+
+ switch (cmd) {
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ data->phy_id = gp->mii_phy_addr;
+ /* Fallthrough... */
+
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ data->val_out = __phy_read(gp, data->phy_id & 0x1f,
+ data->reg_num & 0x1f);
+ rc = 0;
+ break;
+
+ case SIOCSMIIREG: /* Write MII PHY register. */
+ __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
+ data->val_in);
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+#if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC))
+/* Fetch MAC address from vital product data of PCI ROM. */
+static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr)
+{
+ int this_offset;
+
+ for (this_offset = 0x20; this_offset < len; this_offset++) {
+ void __iomem *p = rom_base + this_offset;
+ int i;
+
+ if (readb(p + 0) != 0x90 ||
+ readb(p + 1) != 0x00 ||
+ readb(p + 2) != 0x09 ||
+ readb(p + 3) != 0x4e ||
+ readb(p + 4) != 0x41 ||
+ readb(p + 5) != 0x06)
+ continue;
+
+ this_offset += 6;
+ p += 6;
+
+ for (i = 0; i < 6; i++)
+ dev_addr[i] = readb(p + i);
+ return 1;
+ }
+ return 0;
+}
+
+static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
+{
+ size_t size;
+ void __iomem *p = pci_map_rom(pdev, &size);
+
+ if (p) {
+ int found;
+
+ found = readb(p) == 0x55 &&
+ readb(p + 1) == 0xaa &&
+ find_eth_addr_in_vpd(p, (64 * 1024), dev_addr);
+ pci_unmap_rom(pdev, p);
+ if (found)
+ return;
+ }
+
+ /* Sun MAC prefix then 3 random bytes. */
+ dev_addr[0] = 0x08;
+ dev_addr[1] = 0x00;
+ dev_addr[2] = 0x20;
+ get_random_bytes(dev_addr + 3, 3);
+}
+#endif /* not Sparc and not PPC */
+
+static int __devinit gem_get_device_address(struct gem *gp)
+{
+#if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC)
+ struct net_device *dev = gp->dev;
+ const unsigned char *addr;
+
+ addr = of_get_property(gp->of_node, "local-mac-address", NULL);
+ if (addr == NULL) {
+#ifdef CONFIG_SPARC
+ addr = idprom->id_ethaddr;
+#else
+ printk("\n");
+ pr_err("%s: can't get mac-address\n", dev->name);
+ return -1;
+#endif
+ }
+ memcpy(dev->dev_addr, addr, 6);
+#else
+ get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
+#endif
+ return 0;
+}
+
+static void gem_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct gem *gp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ /* Ensure reset task is truely gone */
+ cancel_work_sync(&gp->reset_task);
+
+ /* Free resources */
+ pci_free_consistent(pdev,
+ sizeof(struct gem_init_block),
+ gp->init_block,
+ gp->gblock_dvma);
+ iounmap(gp->regs);
+ pci_release_regions(pdev);
+ free_netdev(dev);
+
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+static const struct net_device_ops gem_netdev_ops = {
+ .ndo_open = gem_open,
+ .ndo_stop = gem_close,
+ .ndo_start_xmit = gem_start_xmit,
+ .ndo_get_stats = gem_get_stats,
+ .ndo_set_rx_mode = gem_set_multicast,
+ .ndo_do_ioctl = gem_ioctl,
+ .ndo_tx_timeout = gem_tx_timeout,
+ .ndo_change_mtu = gem_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = gem_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = gem_poll_controller,
+#endif
+};
+
+static int __devinit gem_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ unsigned long gemreg_base, gemreg_len;
+ struct net_device *dev;
+ struct gem *gp;
+ int err, pci_using_dac;
+
+ printk_once(KERN_INFO "%s", version);
+
+ /* Apple gmac note: during probe, the chip is powered up by
+ * the arch code to allow the code below to work (and to let
+ * the chip be probed on the config space. It won't stay powered
+ * up until the interface is brought up however, so we can't rely
+ * on register configuration done at this point.
+ */
+ err = pci_enable_device(pdev);
+ if (err) {
+ pr_err("Cannot enable MMIO operation, aborting\n");
+ return err;
+ }
+ pci_set_master(pdev);
+
+ /* Configure DMA attributes. */
+
+ /* All of the GEM documentation states that 64-bit DMA addressing
+ * is fully supported and should work just fine. However the
+ * front end for RIO based GEMs is different and only supports
+ * 32-bit addressing.
+ *
+ * For now we assume the various PPC GEMs are 32-bit only as well.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_SUN &&
+ pdev->device == PCI_DEVICE_ID_SUN_GEM &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ pci_using_dac = 1;
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ pr_err("No usable DMA configuration, aborting\n");
+ goto err_disable_device;
+ }
+ pci_using_dac = 0;
+ }
+
+ gemreg_base = pci_resource_start(pdev, 0);
+ gemreg_len = pci_resource_len(pdev, 0);
+
+ if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
+ pr_err("Cannot find proper PCI device base address, aborting\n");
+ err = -ENODEV;
+ goto err_disable_device;
+ }
+
+ dev = alloc_etherdev(sizeof(*gp));
+ if (!dev) {
+ pr_err("Etherdev alloc failed, aborting\n");
+ err = -ENOMEM;
+ goto err_disable_device;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ gp = netdev_priv(dev);
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ pr_err("Cannot obtain PCI resources, aborting\n");
+ goto err_out_free_netdev;
+ }
+
+ gp->pdev = pdev;
+ dev->base_addr = (long) pdev;
+ gp->dev = dev;
+
+ gp->msg_enable = DEFAULT_MSG;
+
+ init_timer(&gp->link_timer);
+ gp->link_timer.function = gem_link_timer;
+ gp->link_timer.data = (unsigned long) gp;
+
+ INIT_WORK(&gp->reset_task, gem_reset_task);
+
+ gp->lstate = link_down;
+ gp->timer_ticks = 0;
+ netif_carrier_off(dev);
+
+ gp->regs = ioremap(gemreg_base, gemreg_len);
+ if (!gp->regs) {
+ pr_err("Cannot map device registers, aborting\n");
+ err = -EIO;
+ goto err_out_free_res;
+ }
+
+ /* On Apple, we want a reference to the Open Firmware device-tree
+ * node. We use it for clock control.
+ */
+#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC)
+ gp->of_node = pci_device_to_OF_node(pdev);
+#endif
+
+ /* Only Apple version supports WOL afaik */
+ if (pdev->vendor == PCI_VENDOR_ID_APPLE)
+ gp->has_wol = 1;
+
+ /* Make sure cell is enabled */
+ gem_get_cell(gp);
+
+ /* Make sure everything is stopped and in init state */
+ gem_reset(gp);
+
+ /* Fill up the mii_phy structure (even if we won't use it) */
+ gp->phy_mii.dev = dev;
+ gp->phy_mii.mdio_read = _phy_read;
+ gp->phy_mii.mdio_write = _phy_write;
+#ifdef CONFIG_PPC_PMAC
+ gp->phy_mii.platform_data = gp->of_node;
+#endif
+ /* By default, we start with autoneg */
+ gp->want_autoneg = 1;
+
+ /* Check fifo sizes, PHY type, etc... */
+ if (gem_check_invariants(gp)) {
+ err = -ENODEV;
+ goto err_out_iounmap;
+ }
+
+ /* It is guaranteed that the returned buffer will be at least
+ * PAGE_SIZE aligned.
+ */
+ gp->init_block = (struct gem_init_block *)
+ pci_alloc_consistent(pdev, sizeof(struct gem_init_block),
+ &gp->gblock_dvma);
+ if (!gp->init_block) {
+ pr_err("Cannot allocate init block, aborting\n");
+ err = -ENOMEM;
+ goto err_out_iounmap;
+ }
+
+ if (gem_get_device_address(gp))
+ goto err_out_free_consistent;
+
+ dev->netdev_ops = &gem_netdev_ops;
+ netif_napi_add(dev, &gp->napi, gem_poll, 64);
+ dev->ethtool_ops = &gem_ethtool_ops;
+ dev->watchdog_timeo = 5 * HZ;
+ dev->irq = pdev->irq;
+ dev->dma = 0;
+
+ /* Set that now, in case PM kicks in now */
+ pci_set_drvdata(pdev, dev);
+
+ /* We can do scatter/gather and HW checksum */
+ dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= dev->hw_features | NETIF_F_RXCSUM;
+ if (pci_using_dac)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ /* Register with kernel */
+ if (register_netdev(dev)) {
+ pr_err("Cannot register net device, aborting\n");
+ err = -ENOMEM;
+ goto err_out_free_consistent;
+ }
+
+ /* Undo the get_cell with appropriate locking (we could use
+ * ndo_init/uninit but that would be even more clumsy imho)
+ */
+ rtnl_lock();
+ gem_put_cell(gp);
+ rtnl_unlock();
+
+ netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n",
+ dev->dev_addr);
+ return 0;
+
+err_out_free_consistent:
+ gem_remove_one(pdev);
+err_out_iounmap:
+ gem_put_cell(gp);
+ iounmap(gp->regs);
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_out_free_netdev:
+ free_netdev(dev);
+err_disable_device:
+ pci_disable_device(pdev);
+ return err;
+
+}
+
+
+static struct pci_driver gem_driver = {
+ .name = GEM_MODULE_NAME,
+ .id_table = gem_pci_tbl,
+ .probe = gem_init_one,
+ .remove = gem_remove_one,
+#ifdef CONFIG_PM
+ .suspend = gem_suspend,
+ .resume = gem_resume,
+#endif /* CONFIG_PM */
+};
+
+static int __init gem_init(void)
+{
+ return pci_register_driver(&gem_driver);
+}
+
+static void __exit gem_cleanup(void)
+{
+ pci_unregister_driver(&gem_driver);
+}
+
+module_init(gem_init);
+module_exit(gem_cleanup);
diff --git a/drivers/net/ethernet/sun/sungem.h b/drivers/net/ethernet/sun/sungem.h
new file mode 100644
index 000000000000..835ce1b3cb9f
--- /dev/null
+++ b/drivers/net/ethernet/sun/sungem.h
@@ -0,0 +1,1027 @@
+/* $Id: sungem.h,v 1.10.2.4 2002/03/11 08:54:48 davem Exp $
+ * sungem.h: Definitions for Sun GEM ethernet driver.
+ *
+ * Copyright (C) 2000 David S. Miller (davem@redhat.com)
+ */
+
+#ifndef _SUNGEM_H
+#define _SUNGEM_H
+
+/* Global Registers */
+#define GREG_SEBSTATE 0x0000UL /* SEB State Register */
+#define GREG_CFG 0x0004UL /* Configuration Register */
+#define GREG_STAT 0x000CUL /* Status Register */
+#define GREG_IMASK 0x0010UL /* Interrupt Mask Register */
+#define GREG_IACK 0x0014UL /* Interrupt ACK Register */
+#define GREG_STAT2 0x001CUL /* Alias of GREG_STAT */
+#define GREG_PCIESTAT 0x1000UL /* PCI Error Status Register */
+#define GREG_PCIEMASK 0x1004UL /* PCI Error Mask Register */
+#define GREG_BIFCFG 0x1008UL /* BIF Configuration Register */
+#define GREG_BIFDIAG 0x100CUL /* BIF Diagnostics Register */
+#define GREG_SWRST 0x1010UL /* Software Reset Register */
+
+/* Global SEB State Register */
+#define GREG_SEBSTATE_ARB 0x00000003 /* State of Arbiter */
+#define GREG_SEBSTATE_RXWON 0x00000004 /* RX won internal arbitration */
+
+/* Global Configuration Register */
+#define GREG_CFG_IBURST 0x00000001 /* Infinite Burst */
+#define GREG_CFG_TXDMALIM 0x0000003e /* TX DMA grant limit */
+#define GREG_CFG_RXDMALIM 0x000007c0 /* RX DMA grant limit */
+#define GREG_CFG_RONPAULBIT 0x00000800 /* Use mem read multiple for PCI read
+ * after infinite burst (Apple) */
+#define GREG_CFG_ENBUG2FIX 0x00001000 /* Fix Rx hang after overflow */
+
+/* Global Interrupt Status Register.
+ *
+ * Reading this register automatically clears bits 0 through 6.
+ * This auto-clearing does not occur when the alias at GREG_STAT2
+ * is read instead. The rest of the interrupt bits only clear when
+ * the secondary interrupt status register corresponding to that
+ * bit is read (ie. if GREG_STAT_PCS is set, it will be cleared by
+ * reading PCS_ISTAT).
+ */
+#define GREG_STAT_TXINTME 0x00000001 /* TX INTME frame transferred */
+#define GREG_STAT_TXALL 0x00000002 /* All TX frames transferred */
+#define GREG_STAT_TXDONE 0x00000004 /* One TX frame transferred */
+#define GREG_STAT_RXDONE 0x00000010 /* One RX frame arrived */
+#define GREG_STAT_RXNOBUF 0x00000020 /* No free RX buffers available */
+#define GREG_STAT_RXTAGERR 0x00000040 /* RX tag framing is corrupt */
+#define GREG_STAT_PCS 0x00002000 /* PCS signalled interrupt */
+#define GREG_STAT_TXMAC 0x00004000 /* TX MAC signalled interrupt */
+#define GREG_STAT_RXMAC 0x00008000 /* RX MAC signalled interrupt */
+#define GREG_STAT_MAC 0x00010000 /* MAC Control signalled irq */
+#define GREG_STAT_MIF 0x00020000 /* MIF signalled interrupt */
+#define GREG_STAT_PCIERR 0x00040000 /* PCI Error interrupt */
+#define GREG_STAT_TXNR 0xfff80000 /* == TXDMA_TXDONE reg val */
+#define GREG_STAT_TXNR_SHIFT 19
+
+#define GREG_STAT_ABNORMAL (GREG_STAT_RXNOBUF | GREG_STAT_RXTAGERR | \
+ GREG_STAT_PCS | GREG_STAT_TXMAC | GREG_STAT_RXMAC | \
+ GREG_STAT_MAC | GREG_STAT_MIF | GREG_STAT_PCIERR)
+
+#define GREG_STAT_NAPI (GREG_STAT_TXALL | GREG_STAT_TXINTME | \
+ GREG_STAT_RXDONE | GREG_STAT_ABNORMAL)
+
+/* The layout of GREG_IMASK and GREG_IACK is identical to GREG_STAT.
+ * Bits set in GREG_IMASK will prevent that interrupt type from being
+ * signalled to the cpu. GREG_IACK can be used to clear specific top-level
+ * interrupt conditions in GREG_STAT, ie. it only works for bits 0 through 6.
+ * Setting the bit will clear that interrupt, clear bits will have no effect
+ * on GREG_STAT.
+ */
+
+/* Global PCI Error Status Register */
+#define GREG_PCIESTAT_BADACK 0x00000001 /* No ACK64# during ABS64 cycle */
+#define GREG_PCIESTAT_DTRTO 0x00000002 /* Delayed transaction timeout */
+#define GREG_PCIESTAT_OTHER 0x00000004 /* Other PCI error, check cfg space */
+
+/* The layout of the GREG_PCIEMASK is identical to that of GREG_PCIESTAT.
+ * Bits set in GREG_PCIEMASK will prevent that interrupt type from being
+ * signalled to the cpu.
+ */
+
+/* Global BIF Configuration Register */
+#define GREG_BIFCFG_SLOWCLK 0x00000001 /* Set if PCI runs < 25Mhz */
+#define GREG_BIFCFG_B64DIS 0x00000002 /* Disable 64bit wide data cycle*/
+#define GREG_BIFCFG_M66EN 0x00000004 /* Set if on 66Mhz PCI segment */
+
+/* Global BIF Diagnostics Register */
+#define GREG_BIFDIAG_BURSTSM 0x007f0000 /* PCI Burst state machine */
+#define GREG_BIFDIAG_BIFSM 0xff000000 /* BIF state machine */
+
+/* Global Software Reset Register.
+ *
+ * This register is used to perform a global reset of the RX and TX portions
+ * of the GEM asic. Setting the RX or TX reset bit will start the reset.
+ * The driver _MUST_ poll these bits until they clear. One may not attempt
+ * to program any other part of GEM until the bits clear.
+ */
+#define GREG_SWRST_TXRST 0x00000001 /* TX Software Reset */
+#define GREG_SWRST_RXRST 0x00000002 /* RX Software Reset */
+#define GREG_SWRST_RSTOUT 0x00000004 /* Force RST# pin active */
+#define GREG_SWRST_CACHESIZE 0x00ff0000 /* RIO only: cache line size */
+#define GREG_SWRST_CACHE_SHIFT 16
+
+/* TX DMA Registers */
+#define TXDMA_KICK 0x2000UL /* TX Kick Register */
+#define TXDMA_CFG 0x2004UL /* TX Configuration Register */
+#define TXDMA_DBLOW 0x2008UL /* TX Desc. Base Low */
+#define TXDMA_DBHI 0x200CUL /* TX Desc. Base High */
+#define TXDMA_FWPTR 0x2014UL /* TX FIFO Write Pointer */
+#define TXDMA_FSWPTR 0x2018UL /* TX FIFO Shadow Write Pointer */
+#define TXDMA_FRPTR 0x201CUL /* TX FIFO Read Pointer */
+#define TXDMA_FSRPTR 0x2020UL /* TX FIFO Shadow Read Pointer */
+#define TXDMA_PCNT 0x2024UL /* TX FIFO Packet Counter */
+#define TXDMA_SMACHINE 0x2028UL /* TX State Machine Register */
+#define TXDMA_DPLOW 0x2030UL /* TX Data Pointer Low */
+#define TXDMA_DPHI 0x2034UL /* TX Data Pointer High */
+#define TXDMA_TXDONE 0x2100UL /* TX Completion Register */
+#define TXDMA_FADDR 0x2104UL /* TX FIFO Address */
+#define TXDMA_FTAG 0x2108UL /* TX FIFO Tag */
+#define TXDMA_DLOW 0x210CUL /* TX FIFO Data Low */
+#define TXDMA_DHIT1 0x2110UL /* TX FIFO Data HighT1 */
+#define TXDMA_DHIT0 0x2114UL /* TX FIFO Data HighT0 */
+#define TXDMA_FSZ 0x2118UL /* TX FIFO Size */
+
+/* TX Kick Register.
+ *
+ * This 13-bit register is programmed by the driver to hold the descriptor
+ * entry index which follows the last valid transmit descriptor.
+ */
+
+/* TX Completion Register.
+ *
+ * This 13-bit register is updated by GEM to hold to descriptor entry index
+ * which follows the last descriptor already processed by GEM. Note that
+ * this value is mirrored in GREG_STAT which eliminates the need to even
+ * access this register in the driver during interrupt processing.
+ */
+
+/* TX Configuration Register.
+ *
+ * Note that TXDMA_CFG_FTHRESH, the TX FIFO Threshold, is an obsolete feature
+ * that was meant to be used with jumbo packets. It should be set to the
+ * maximum value of 0x4ff, else one risks getting TX MAC Underrun errors.
+ */
+#define TXDMA_CFG_ENABLE 0x00000001 /* Enable TX DMA channel */
+#define TXDMA_CFG_RINGSZ 0x0000001e /* TX descriptor ring size */
+#define TXDMA_CFG_RINGSZ_32 0x00000000 /* 32 TX descriptors */
+#define TXDMA_CFG_RINGSZ_64 0x00000002 /* 64 TX descriptors */
+#define TXDMA_CFG_RINGSZ_128 0x00000004 /* 128 TX descriptors */
+#define TXDMA_CFG_RINGSZ_256 0x00000006 /* 256 TX descriptors */
+#define TXDMA_CFG_RINGSZ_512 0x00000008 /* 512 TX descriptors */
+#define TXDMA_CFG_RINGSZ_1K 0x0000000a /* 1024 TX descriptors */
+#define TXDMA_CFG_RINGSZ_2K 0x0000000c /* 2048 TX descriptors */
+#define TXDMA_CFG_RINGSZ_4K 0x0000000e /* 4096 TX descriptors */
+#define TXDMA_CFG_RINGSZ_8K 0x00000010 /* 8192 TX descriptors */
+#define TXDMA_CFG_PIOSEL 0x00000020 /* Enable TX FIFO PIO from cpu */
+#define TXDMA_CFG_FTHRESH 0x001ffc00 /* TX FIFO Threshold, obsolete */
+#define TXDMA_CFG_PMODE 0x00200000 /* TXALL irq means TX FIFO empty*/
+
+/* TX Descriptor Base Low/High.
+ *
+ * These two registers store the 53 most significant bits of the base address
+ * of the TX descriptor table. The 11 least significant bits are always
+ * zero. As a result, the TX descriptor table must be 2K aligned.
+ */
+
+/* The rest of the TXDMA_* registers are for diagnostics and debug, I will document
+ * them later. -DaveM
+ */
+
+/* WakeOnLan Registers */
+#define WOL_MATCH0 0x3000UL
+#define WOL_MATCH1 0x3004UL
+#define WOL_MATCH2 0x3008UL
+#define WOL_MCOUNT 0x300CUL
+#define WOL_WAKECSR 0x3010UL
+
+/* WOL Match count register
+ */
+#define WOL_MCOUNT_N 0x00000010
+#define WOL_MCOUNT_M 0x00000000 /* 0 << 8 */
+
+#define WOL_WAKECSR_ENABLE 0x00000001
+#define WOL_WAKECSR_MII 0x00000002
+#define WOL_WAKECSR_SEEN 0x00000004
+#define WOL_WAKECSR_FILT_UCAST 0x00000008
+#define WOL_WAKECSR_FILT_MCAST 0x00000010
+#define WOL_WAKECSR_FILT_BCAST 0x00000020
+#define WOL_WAKECSR_FILT_SEEN 0x00000040
+
+
+/* Receive DMA Registers */
+#define RXDMA_CFG 0x4000UL /* RX Configuration Register */
+#define RXDMA_DBLOW 0x4004UL /* RX Descriptor Base Low */
+#define RXDMA_DBHI 0x4008UL /* RX Descriptor Base High */
+#define RXDMA_FWPTR 0x400CUL /* RX FIFO Write Pointer */
+#define RXDMA_FSWPTR 0x4010UL /* RX FIFO Shadow Write Pointer */
+#define RXDMA_FRPTR 0x4014UL /* RX FIFO Read Pointer */
+#define RXDMA_PCNT 0x4018UL /* RX FIFO Packet Counter */
+#define RXDMA_SMACHINE 0x401CUL /* RX State Machine Register */
+#define RXDMA_PTHRESH 0x4020UL /* Pause Thresholds */
+#define RXDMA_DPLOW 0x4024UL /* RX Data Pointer Low */
+#define RXDMA_DPHI 0x4028UL /* RX Data Pointer High */
+#define RXDMA_KICK 0x4100UL /* RX Kick Register */
+#define RXDMA_DONE 0x4104UL /* RX Completion Register */
+#define RXDMA_BLANK 0x4108UL /* RX Blanking Register */
+#define RXDMA_FADDR 0x410CUL /* RX FIFO Address */
+#define RXDMA_FTAG 0x4110UL /* RX FIFO Tag */
+#define RXDMA_DLOW 0x4114UL /* RX FIFO Data Low */
+#define RXDMA_DHIT1 0x4118UL /* RX FIFO Data HighT0 */
+#define RXDMA_DHIT0 0x411CUL /* RX FIFO Data HighT1 */
+#define RXDMA_FSZ 0x4120UL /* RX FIFO Size */
+
+/* RX Configuration Register. */
+#define RXDMA_CFG_ENABLE 0x00000001 /* Enable RX DMA channel */
+#define RXDMA_CFG_RINGSZ 0x0000001e /* RX descriptor ring size */
+#define RXDMA_CFG_RINGSZ_32 0x00000000 /* - 32 entries */
+#define RXDMA_CFG_RINGSZ_64 0x00000002 /* - 64 entries */
+#define RXDMA_CFG_RINGSZ_128 0x00000004 /* - 128 entries */
+#define RXDMA_CFG_RINGSZ_256 0x00000006 /* - 256 entries */
+#define RXDMA_CFG_RINGSZ_512 0x00000008 /* - 512 entries */
+#define RXDMA_CFG_RINGSZ_1K 0x0000000a /* - 1024 entries */
+#define RXDMA_CFG_RINGSZ_2K 0x0000000c /* - 2048 entries */
+#define RXDMA_CFG_RINGSZ_4K 0x0000000e /* - 4096 entries */
+#define RXDMA_CFG_RINGSZ_8K 0x00000010 /* - 8192 entries */
+#define RXDMA_CFG_RINGSZ_BDISAB 0x00000020 /* Disable RX desc batching */
+#define RXDMA_CFG_FBOFF 0x00001c00 /* Offset of first data byte */
+#define RXDMA_CFG_CSUMOFF 0x000fe000 /* Skip bytes before csum calc */
+#define RXDMA_CFG_FTHRESH 0x07000000 /* RX FIFO dma start threshold */
+#define RXDMA_CFG_FTHRESH_64 0x00000000 /* - 64 bytes */
+#define RXDMA_CFG_FTHRESH_128 0x01000000 /* - 128 bytes */
+#define RXDMA_CFG_FTHRESH_256 0x02000000 /* - 256 bytes */
+#define RXDMA_CFG_FTHRESH_512 0x03000000 /* - 512 bytes */
+#define RXDMA_CFG_FTHRESH_1K 0x04000000 /* - 1024 bytes */
+#define RXDMA_CFG_FTHRESH_2K 0x05000000 /* - 2048 bytes */
+
+/* RX Descriptor Base Low/High.
+ *
+ * These two registers store the 53 most significant bits of the base address
+ * of the RX descriptor table. The 11 least significant bits are always
+ * zero. As a result, the RX descriptor table must be 2K aligned.
+ */
+
+/* RX PAUSE Thresholds.
+ *
+ * These values determine when XOFF and XON PAUSE frames are emitted by
+ * GEM. The thresholds measure RX FIFO occupancy in units of 64 bytes.
+ */
+#define RXDMA_PTHRESH_OFF 0x000001ff /* XOFF emitted w/FIFO > this */
+#define RXDMA_PTHRESH_ON 0x001ff000 /* XON emitted w/FIFO < this */
+
+/* RX Kick Register.
+ *
+ * This 13-bit register is written by the host CPU and holds the last
+ * valid RX descriptor number plus one. This is, if 'N' is written to
+ * this register, it means that all RX descriptors up to but excluding
+ * 'N' are valid.
+ *
+ * The hardware requires that RX descriptors are posted in increments
+ * of 4. This means 'N' must be a multiple of four. For the best
+ * performance, the first new descriptor being posted should be (PCI)
+ * cache line aligned.
+ */
+
+/* RX Completion Register.
+ *
+ * This 13-bit register is updated by GEM to indicate which RX descriptors
+ * have already been used for receive frames. All descriptors up to but
+ * excluding the value in this register are ready to be processed. GEM
+ * updates this register value after the RX FIFO empties completely into
+ * the RX descriptor's buffer, but before the RX_DONE bit is set in the
+ * interrupt status register.
+ */
+
+/* RX Blanking Register. */
+#define RXDMA_BLANK_IPKTS 0x000001ff /* RX_DONE asserted after this
+ * many packets received since
+ * previous RX_DONE.
+ */
+#define RXDMA_BLANK_ITIME 0x000ff000 /* RX_DONE asserted after this
+ * many clocks (measured in 2048
+ * PCI clocks) were counted since
+ * the previous RX_DONE.
+ */
+
+/* RX FIFO Size.
+ *
+ * This 11-bit read-only register indicates how large, in units of 64-bytes,
+ * the RX FIFO is. The driver uses this to properly configure the RX PAUSE
+ * thresholds.
+ */
+
+/* The rest of the RXDMA_* registers are for diagnostics and debug, I will document
+ * them later. -DaveM
+ */
+
+/* MAC Registers */
+#define MAC_TXRST 0x6000UL /* TX MAC Software Reset Command*/
+#define MAC_RXRST 0x6004UL /* RX MAC Software Reset Command*/
+#define MAC_SNDPAUSE 0x6008UL /* Send Pause Command Register */
+#define MAC_TXSTAT 0x6010UL /* TX MAC Status Register */
+#define MAC_RXSTAT 0x6014UL /* RX MAC Status Register */
+#define MAC_CSTAT 0x6018UL /* MAC Control Status Register */
+#define MAC_TXMASK 0x6020UL /* TX MAC Mask Register */
+#define MAC_RXMASK 0x6024UL /* RX MAC Mask Register */
+#define MAC_MCMASK 0x6028UL /* MAC Control Mask Register */
+#define MAC_TXCFG 0x6030UL /* TX MAC Configuration Register*/
+#define MAC_RXCFG 0x6034UL /* RX MAC Configuration Register*/
+#define MAC_MCCFG 0x6038UL /* MAC Control Config Register */
+#define MAC_XIFCFG 0x603CUL /* XIF Configuration Register */
+#define MAC_IPG0 0x6040UL /* InterPacketGap0 Register */
+#define MAC_IPG1 0x6044UL /* InterPacketGap1 Register */
+#define MAC_IPG2 0x6048UL /* InterPacketGap2 Register */
+#define MAC_STIME 0x604CUL /* SlotTime Register */
+#define MAC_MINFSZ 0x6050UL /* MinFrameSize Register */
+#define MAC_MAXFSZ 0x6054UL /* MaxFrameSize Register */
+#define MAC_PASIZE 0x6058UL /* PA Size Register */
+#define MAC_JAMSIZE 0x605CUL /* JamSize Register */
+#define MAC_ATTLIM 0x6060UL /* Attempt Limit Register */
+#define MAC_MCTYPE 0x6064UL /* MAC Control Type Register */
+#define MAC_ADDR0 0x6080UL /* MAC Address 0 Register */
+#define MAC_ADDR1 0x6084UL /* MAC Address 1 Register */
+#define MAC_ADDR2 0x6088UL /* MAC Address 2 Register */
+#define MAC_ADDR3 0x608CUL /* MAC Address 3 Register */
+#define MAC_ADDR4 0x6090UL /* MAC Address 4 Register */
+#define MAC_ADDR5 0x6094UL /* MAC Address 5 Register */
+#define MAC_ADDR6 0x6098UL /* MAC Address 6 Register */
+#define MAC_ADDR7 0x609CUL /* MAC Address 7 Register */
+#define MAC_ADDR8 0x60A0UL /* MAC Address 8 Register */
+#define MAC_AFILT0 0x60A4UL /* Address Filter 0 Register */
+#define MAC_AFILT1 0x60A8UL /* Address Filter 1 Register */
+#define MAC_AFILT2 0x60ACUL /* Address Filter 2 Register */
+#define MAC_AF21MSK 0x60B0UL /* Address Filter 2&1 Mask Reg */
+#define MAC_AF0MSK 0x60B4UL /* Address Filter 0 Mask Reg */
+#define MAC_HASH0 0x60C0UL /* Hash Table 0 Register */
+#define MAC_HASH1 0x60C4UL /* Hash Table 1 Register */
+#define MAC_HASH2 0x60C8UL /* Hash Table 2 Register */
+#define MAC_HASH3 0x60CCUL /* Hash Table 3 Register */
+#define MAC_HASH4 0x60D0UL /* Hash Table 4 Register */
+#define MAC_HASH5 0x60D4UL /* Hash Table 5 Register */
+#define MAC_HASH6 0x60D8UL /* Hash Table 6 Register */
+#define MAC_HASH7 0x60DCUL /* Hash Table 7 Register */
+#define MAC_HASH8 0x60E0UL /* Hash Table 8 Register */
+#define MAC_HASH9 0x60E4UL /* Hash Table 9 Register */
+#define MAC_HASH10 0x60E8UL /* Hash Table 10 Register */
+#define MAC_HASH11 0x60ECUL /* Hash Table 11 Register */
+#define MAC_HASH12 0x60F0UL /* Hash Table 12 Register */
+#define MAC_HASH13 0x60F4UL /* Hash Table 13 Register */
+#define MAC_HASH14 0x60F8UL /* Hash Table 14 Register */
+#define MAC_HASH15 0x60FCUL /* Hash Table 15 Register */
+#define MAC_NCOLL 0x6100UL /* Normal Collision Counter */
+#define MAC_FASUCC 0x6104UL /* First Attmpt. Succ Coll Ctr. */
+#define MAC_ECOLL 0x6108UL /* Excessive Collision Counter */
+#define MAC_LCOLL 0x610CUL /* Late Collision Counter */
+#define MAC_DTIMER 0x6110UL /* Defer Timer */
+#define MAC_PATMPS 0x6114UL /* Peak Attempts Register */
+#define MAC_RFCTR 0x6118UL /* Receive Frame Counter */
+#define MAC_LERR 0x611CUL /* Length Error Counter */
+#define MAC_AERR 0x6120UL /* Alignment Error Counter */
+#define MAC_FCSERR 0x6124UL /* FCS Error Counter */
+#define MAC_RXCVERR 0x6128UL /* RX code Violation Error Ctr */
+#define MAC_RANDSEED 0x6130UL /* Random Number Seed Register */
+#define MAC_SMACHINE 0x6134UL /* State Machine Register */
+
+/* TX MAC Software Reset Command. */
+#define MAC_TXRST_CMD 0x00000001 /* Start sw reset, self-clears */
+
+/* RX MAC Software Reset Command. */
+#define MAC_RXRST_CMD 0x00000001 /* Start sw reset, self-clears */
+
+/* Send Pause Command. */
+#define MAC_SNDPAUSE_TS 0x0000ffff /* The pause_time operand used in
+ * Send_Pause and flow-control
+ * handshakes.
+ */
+#define MAC_SNDPAUSE_SP 0x00010000 /* Setting this bit instructs the MAC
+ * to send a Pause Flow Control
+ * frame onto the network.
+ */
+
+/* TX MAC Status Register. */
+#define MAC_TXSTAT_XMIT 0x00000001 /* Frame Transmitted */
+#define MAC_TXSTAT_URUN 0x00000002 /* TX Underrun */
+#define MAC_TXSTAT_MPE 0x00000004 /* Max Packet Size Error */
+#define MAC_TXSTAT_NCE 0x00000008 /* Normal Collision Cntr Expire */
+#define MAC_TXSTAT_ECE 0x00000010 /* Excess Collision Cntr Expire */
+#define MAC_TXSTAT_LCE 0x00000020 /* Late Collision Cntr Expire */
+#define MAC_TXSTAT_FCE 0x00000040 /* First Collision Cntr Expire */
+#define MAC_TXSTAT_DTE 0x00000080 /* Defer Timer Expire */
+#define MAC_TXSTAT_PCE 0x00000100 /* Peak Attempts Cntr Expire */
+
+/* RX MAC Status Register. */
+#define MAC_RXSTAT_RCV 0x00000001 /* Frame Received */
+#define MAC_RXSTAT_OFLW 0x00000002 /* Receive Overflow */
+#define MAC_RXSTAT_FCE 0x00000004 /* Frame Cntr Expire */
+#define MAC_RXSTAT_ACE 0x00000008 /* Align Error Cntr Expire */
+#define MAC_RXSTAT_CCE 0x00000010 /* CRC Error Cntr Expire */
+#define MAC_RXSTAT_LCE 0x00000020 /* Length Error Cntr Expire */
+#define MAC_RXSTAT_VCE 0x00000040 /* Code Violation Cntr Expire */
+
+/* MAC Control Status Register. */
+#define MAC_CSTAT_PRCV 0x00000001 /* Pause Received */
+#define MAC_CSTAT_PS 0x00000002 /* Paused State */
+#define MAC_CSTAT_NPS 0x00000004 /* Not Paused State */
+#define MAC_CSTAT_PTR 0xffff0000 /* Pause Time Received */
+
+/* The layout of the MAC_{TX,RX,C}MASK registers is identical to that
+ * of MAC_{TX,RX,C}STAT. Bits set in MAC_{TX,RX,C}MASK will prevent
+ * that interrupt type from being signalled to front end of GEM. For
+ * the interrupt to actually get sent to the cpu, it is necessary to
+ * properly set the appropriate GREG_IMASK_{TX,RX,}MAC bits as well.
+ */
+
+/* TX MAC Configuration Register.
+ *
+ * NOTE: The TX MAC Enable bit must be cleared and polled until
+ * zero before any other bits in this register are changed.
+ *
+ * Also, enabling the Carrier Extension feature of GEM is
+ * a 3 step process 1) Set TX Carrier Extension 2) Set
+ * RX Carrier Extension 3) Set Slot Time to 0x200. This
+ * mode must be enabled when in half-duplex at 1Gbps, else
+ * it must be disabled.
+ */
+#define MAC_TXCFG_ENAB 0x00000001 /* TX MAC Enable */
+#define MAC_TXCFG_ICS 0x00000002 /* Ignore Carrier Sense */
+#define MAC_TXCFG_ICOLL 0x00000004 /* Ignore Collisions */
+#define MAC_TXCFG_EIPG0 0x00000008 /* Enable IPG0 */
+#define MAC_TXCFG_NGU 0x00000010 /* Never Give Up */
+#define MAC_TXCFG_NGUL 0x00000020 /* Never Give Up Limit */
+#define MAC_TXCFG_NBO 0x00000040 /* No Backoff */
+#define MAC_TXCFG_SD 0x00000080 /* Slow Down */
+#define MAC_TXCFG_NFCS 0x00000100 /* No FCS */
+#define MAC_TXCFG_TCE 0x00000200 /* TX Carrier Extension */
+
+/* RX MAC Configuration Register.
+ *
+ * NOTE: The RX MAC Enable bit must be cleared and polled until
+ * zero before any other bits in this register are changed.
+ *
+ * Similar rules apply to the Hash Filter Enable bit when
+ * programming the hash table registers, and the Address Filter
+ * Enable bit when programming the address filter registers.
+ */
+#define MAC_RXCFG_ENAB 0x00000001 /* RX MAC Enable */
+#define MAC_RXCFG_SPAD 0x00000002 /* Strip Pad */
+#define MAC_RXCFG_SFCS 0x00000004 /* Strip FCS */
+#define MAC_RXCFG_PROM 0x00000008 /* Promiscuous Mode */
+#define MAC_RXCFG_PGRP 0x00000010 /* Promiscuous Group */
+#define MAC_RXCFG_HFE 0x00000020 /* Hash Filter Enable */
+#define MAC_RXCFG_AFE 0x00000040 /* Address Filter Enable */
+#define MAC_RXCFG_DDE 0x00000080 /* Disable Discard on Error */
+#define MAC_RXCFG_RCE 0x00000100 /* RX Carrier Extension */
+
+/* MAC Control Config Register. */
+#define MAC_MCCFG_SPE 0x00000001 /* Send Pause Enable */
+#define MAC_MCCFG_RPE 0x00000002 /* Receive Pause Enable */
+#define MAC_MCCFG_PMC 0x00000004 /* Pass MAC Control */
+
+/* XIF Configuration Register.
+ *
+ * NOTE: When leaving or entering loopback mode, a global hardware
+ * init of GEM should be performed.
+ */
+#define MAC_XIFCFG_OE 0x00000001 /* MII TX Output Driver Enable */
+#define MAC_XIFCFG_LBCK 0x00000002 /* Loopback TX to RX */
+#define MAC_XIFCFG_DISE 0x00000004 /* Disable RX path during TX */
+#define MAC_XIFCFG_GMII 0x00000008 /* Use GMII clocks + datapath */
+#define MAC_XIFCFG_MBOE 0x00000010 /* Controls MII_BUF_EN pin */
+#define MAC_XIFCFG_LLED 0x00000020 /* Force LINKLED# active (low) */
+#define MAC_XIFCFG_FLED 0x00000040 /* Force FDPLXLED# active (low) */
+
+/* InterPacketGap0 Register. This 8-bit value is used as an extension
+ * to the InterPacketGap1 Register. Specifically it contributes to the
+ * timing of the RX-to-TX IPG. This value is ignored and presumed to
+ * be zero for TX-to-TX IPG calculations and/or when the Enable IPG0 bit
+ * is cleared in the TX MAC Configuration Register.
+ *
+ * This value in this register in terms of media byte time.
+ *
+ * Recommended value: 0x00
+ */
+
+/* InterPacketGap1 Register. This 8-bit value defines the first 2/3
+ * portion of the Inter Packet Gap.
+ *
+ * This value in this register in terms of media byte time.
+ *
+ * Recommended value: 0x08
+ */
+
+/* InterPacketGap2 Register. This 8-bit value defines the second 1/3
+ * portion of the Inter Packet Gap.
+ *
+ * This value in this register in terms of media byte time.
+ *
+ * Recommended value: 0x04
+ */
+
+/* Slot Time Register. This 10-bit value specifies the slot time
+ * parameter in units of media byte time. It determines the physical
+ * span of the network.
+ *
+ * Recommended value: 0x40
+ */
+
+/* Minimum Frame Size Register. This 10-bit register specifies the
+ * smallest sized frame the TXMAC will send onto the medium, and the
+ * RXMAC will receive from the medium.
+ *
+ * Recommended value: 0x40
+ */
+
+/* Maximum Frame and Burst Size Register.
+ *
+ * This register specifies two things. First it specifies the maximum
+ * sized frame the TXMAC will send and the RXMAC will recognize as
+ * valid. Second, it specifies the maximum run length of a burst of
+ * packets sent in half-duplex gigabit modes.
+ *
+ * Recommended value: 0x200005ee
+ */
+#define MAC_MAXFSZ_MFS 0x00007fff /* Max Frame Size */
+#define MAC_MAXFSZ_MBS 0x7fff0000 /* Max Burst Size */
+
+/* PA Size Register. This 10-bit register specifies the number of preamble
+ * bytes which will be transmitted at the beginning of each frame. A
+ * value of two or greater should be programmed here.
+ *
+ * Recommended value: 0x07
+ */
+
+/* Jam Size Register. This 4-bit register specifies the duration of
+ * the jam in units of media byte time.
+ *
+ * Recommended value: 0x04
+ */
+
+/* Attempts Limit Register. This 8-bit register specifies the number
+ * of attempts that the TXMAC will make to transmit a frame, before it
+ * resets its Attempts Counter. After reaching the Attempts Limit the
+ * TXMAC may or may not drop the frame, as determined by the NGU
+ * (Never Give Up) and NGUL (Never Give Up Limit) bits in the TXMAC
+ * Configuration Register.
+ *
+ * Recommended value: 0x10
+ */
+
+/* MAX Control Type Register. This 16-bit register specifies the
+ * "type" field of a MAC Control frame. The TXMAC uses this field to
+ * encapsulate the MAC Control frame for transmission, and the RXMAC
+ * uses it for decoding valid MAC Control frames received from the
+ * network.
+ *
+ * Recommended value: 0x8808
+ */
+
+/* MAC Address Registers. Each of these registers specify the
+ * ethernet MAC of the interface, 16-bits at a time. Register
+ * 0 specifies bits [47:32], register 1 bits [31:16], and register
+ * 2 bits [15:0].
+ *
+ * Registers 3 through and including 5 specify an alternate
+ * MAC address for the interface.
+ *
+ * Registers 6 through and including 8 specify the MAC Control
+ * Address, which must be the reserved multicast address for MAC
+ * Control frames.
+ *
+ * Example: To program primary station address a:b:c:d:e:f into
+ * the chip.
+ * MAC_Address_2 = (a << 8) | b
+ * MAC_Address_1 = (c << 8) | d
+ * MAC_Address_0 = (e << 8) | f
+ */
+
+/* Address Filter Registers. Registers 0 through 2 specify bit
+ * fields [47:32] through [15:0], respectively, of the address
+ * filter. The Address Filter 2&1 Mask Register denotes the 8-bit
+ * nibble mask for Address Filter Registers 2 and 1. The Address
+ * Filter 0 Mask Register denotes the 16-bit mask for the Address
+ * Filter Register 0.
+ */
+
+/* Hash Table Registers. Registers 0 through 15 specify bit fields
+ * [255:240] through [15:0], respectively, of the hash table.
+ */
+
+/* Statistics Registers. All of these registers are 16-bits and
+ * track occurrences of a specific event. GEM can be configured
+ * to interrupt the host cpu when any of these counters overflow.
+ * They should all be explicitly initialized to zero when the interface
+ * is brought up.
+ */
+
+/* Random Number Seed Register. This 10-bit value is used as the
+ * RNG seed inside GEM for the CSMA/CD backoff algorithm. It is
+ * recommended to program this register to the 10 LSB of the
+ * interfaces MAC address.
+ */
+
+/* Pause Timer, read-only. This 16-bit timer is used to time the pause
+ * interval as indicated by a received pause flow control frame.
+ * A non-zero value in this timer indicates that the MAC is currently in
+ * the paused state.
+ */
+
+/* MIF Registers */
+#define MIF_BBCLK 0x6200UL /* MIF Bit-Bang Clock */
+#define MIF_BBDATA 0x6204UL /* MIF Bit-Band Data */
+#define MIF_BBOENAB 0x6208UL /* MIF Bit-Bang Output Enable */
+#define MIF_FRAME 0x620CUL /* MIF Frame/Output Register */
+#define MIF_CFG 0x6210UL /* MIF Configuration Register */
+#define MIF_MASK 0x6214UL /* MIF Mask Register */
+#define MIF_STATUS 0x6218UL /* MIF Status Register */
+#define MIF_SMACHINE 0x621CUL /* MIF State Machine Register */
+
+/* MIF Bit-Bang Clock. This 1-bit register is used to generate the
+ * MDC clock waveform on the MII Management Interface when the MIF is
+ * programmed in the "Bit-Bang" mode. Writing a '1' after a '0' into
+ * this register will create a rising edge on the MDC, while writing
+ * a '0' after a '1' will create a falling edge. For every bit that
+ * is transferred on the management interface, both edges have to be
+ * generated.
+ */
+
+/* MIF Bit-Bang Data. This 1-bit register is used to generate the
+ * outgoing data (MDO) on the MII Management Interface when the MIF
+ * is programmed in the "Bit-Bang" mode. The daa will be steered to the
+ * appropriate MDIO based on the state of the PHY_Select bit in the MIF
+ * Configuration Register.
+ */
+
+/* MIF Big-Band Output Enable. THis 1-bit register is used to enable
+ * ('1') or disable ('0') the I-directional driver on the MII when the
+ * MIF is programmed in the "Bit-Bang" mode. The MDIO should be enabled
+ * when data bits are transferred from the MIF to the transceiver, and it
+ * should be disabled when the interface is idle or when data bits are
+ * transferred from the transceiver to the MIF (data portion of a read
+ * instruction). Only one MDIO will be enabled at a given time, depending
+ * on the state of the PHY_Select bit in the MIF Configuration Register.
+ */
+
+/* MIF Configuration Register. This 15-bit register controls the operation
+ * of the MIF.
+ */
+#define MIF_CFG_PSELECT 0x00000001 /* Xcvr slct: 0=mdio0 1=mdio1 */
+#define MIF_CFG_POLL 0x00000002 /* Enable polling mechanism */
+#define MIF_CFG_BBMODE 0x00000004 /* 1=bit-bang 0=frame mode */
+#define MIF_CFG_PRADDR 0x000000f8 /* Xcvr poll register address */
+#define MIF_CFG_MDI0 0x00000100 /* MDIO_0 present or read-bit */
+#define MIF_CFG_MDI1 0x00000200 /* MDIO_1 present or read-bit */
+#define MIF_CFG_PPADDR 0x00007c00 /* Xcvr poll PHY address */
+
+/* MIF Frame/Output Register. This 32-bit register allows the host to
+ * communicate with a transceiver in frame mode (as opposed to big-bang
+ * mode). Writes by the host specify an instrution. After being issued
+ * the host must poll this register for completion. Also, after
+ * completion this register holds the data returned by the transceiver
+ * if applicable.
+ */
+#define MIF_FRAME_ST 0xc0000000 /* STart of frame */
+#define MIF_FRAME_OP 0x30000000 /* OPcode */
+#define MIF_FRAME_PHYAD 0x0f800000 /* PHY ADdress */
+#define MIF_FRAME_REGAD 0x007c0000 /* REGister ADdress */
+#define MIF_FRAME_TAMSB 0x00020000 /* Turn Around MSB */
+#define MIF_FRAME_TALSB 0x00010000 /* Turn Around LSB */
+#define MIF_FRAME_DATA 0x0000ffff /* Instruction Payload */
+
+/* MIF Status Register. This register reports status when the MIF is
+ * operating in the poll mode. The poll status field is auto-clearing
+ * on read.
+ */
+#define MIF_STATUS_DATA 0xffff0000 /* Live image of XCVR reg */
+#define MIF_STATUS_STAT 0x0000ffff /* Which bits have changed */
+
+/* MIF Mask Register. This 16-bit register is used when in poll mode
+ * to say which bits of the polled register will cause an interrupt
+ * when changed.
+ */
+
+/* PCS/Serialink Registers */
+#define PCS_MIICTRL 0x9000UL /* PCS MII Control Register */
+#define PCS_MIISTAT 0x9004UL /* PCS MII Status Register */
+#define PCS_MIIADV 0x9008UL /* PCS MII Advertisement Reg */
+#define PCS_MIILP 0x900CUL /* PCS MII Link Partner Ability */
+#define PCS_CFG 0x9010UL /* PCS Configuration Register */
+#define PCS_SMACHINE 0x9014UL /* PCS State Machine Register */
+#define PCS_ISTAT 0x9018UL /* PCS Interrupt Status Reg */
+#define PCS_DMODE 0x9050UL /* Datapath Mode Register */
+#define PCS_SCTRL 0x9054UL /* Serialink Control Register */
+#define PCS_SOS 0x9058UL /* Shared Output Select Reg */
+#define PCS_SSTATE 0x905CUL /* Serialink State Register */
+
+/* PCD MII Control Register. */
+#define PCS_MIICTRL_SPD 0x00000040 /* Read as one, writes ignored */
+#define PCS_MIICTRL_CT 0x00000080 /* Force COL signal active */
+#define PCS_MIICTRL_DM 0x00000100 /* Duplex mode, forced low */
+#define PCS_MIICTRL_RAN 0x00000200 /* Restart auto-neg, self clear */
+#define PCS_MIICTRL_ISO 0x00000400 /* Read as zero, writes ignored */
+#define PCS_MIICTRL_PD 0x00000800 /* Read as zero, writes ignored */
+#define PCS_MIICTRL_ANE 0x00001000 /* Auto-neg enable */
+#define PCS_MIICTRL_SS 0x00002000 /* Read as zero, writes ignored */
+#define PCS_MIICTRL_WB 0x00004000 /* Wrapback, loopback at 10-bit
+ * input side of Serialink
+ */
+#define PCS_MIICTRL_RST 0x00008000 /* Resets PCS, self clearing */
+
+/* PCS MII Status Register. */
+#define PCS_MIISTAT_EC 0x00000001 /* Ext Capability: Read as zero */
+#define PCS_MIISTAT_JD 0x00000002 /* Jabber Detect: Read as zero */
+#define PCS_MIISTAT_LS 0x00000004 /* Link Status: 1=up 0=down */
+#define PCS_MIISTAT_ANA 0x00000008 /* Auto-neg Ability, always 1 */
+#define PCS_MIISTAT_RF 0x00000010 /* Remote Fault */
+#define PCS_MIISTAT_ANC 0x00000020 /* Auto-neg complete */
+#define PCS_MIISTAT_ES 0x00000100 /* Extended Status, always 1 */
+
+/* PCS MII Advertisement Register. */
+#define PCS_MIIADV_FD 0x00000020 /* Advertise Full Duplex */
+#define PCS_MIIADV_HD 0x00000040 /* Advertise Half Duplex */
+#define PCS_MIIADV_SP 0x00000080 /* Advertise Symmetric Pause */
+#define PCS_MIIADV_AP 0x00000100 /* Advertise Asymmetric Pause */
+#define PCS_MIIADV_RF 0x00003000 /* Remote Fault */
+#define PCS_MIIADV_ACK 0x00004000 /* Read-only */
+#define PCS_MIIADV_NP 0x00008000 /* Next-page, forced low */
+
+/* PCS MII Link Partner Ability Register. This register is equivalent
+ * to the Link Partnet Ability Register of the standard MII register set.
+ * It's layout corresponds to the PCS MII Advertisement Register.
+ */
+
+/* PCS Configuration Register. */
+#define PCS_CFG_ENABLE 0x00000001 /* Must be zero while changing
+ * PCS MII advertisement reg.
+ */
+#define PCS_CFG_SDO 0x00000002 /* Signal detect override */
+#define PCS_CFG_SDL 0x00000004 /* Signal detect active low */
+#define PCS_CFG_JS 0x00000018 /* Jitter-study:
+ * 0 = normal operation
+ * 1 = high-frequency test pattern
+ * 2 = low-frequency test pattern
+ * 3 = reserved
+ */
+#define PCS_CFG_TO 0x00000020 /* 10ms auto-neg timer override */
+
+/* PCS Interrupt Status Register. This register is self-clearing
+ * when read.
+ */
+#define PCS_ISTAT_LSC 0x00000004 /* Link Status Change */
+
+/* Datapath Mode Register. */
+#define PCS_DMODE_SM 0x00000001 /* 1 = use internal Serialink */
+#define PCS_DMODE_ESM 0x00000002 /* External SERDES mode */
+#define PCS_DMODE_MGM 0x00000004 /* MII/GMII mode */
+#define PCS_DMODE_GMOE 0x00000008 /* GMII Output Enable */
+
+/* Serialink Control Register.
+ *
+ * NOTE: When in SERDES mode, the loopback bit has inverse logic.
+ */
+#define PCS_SCTRL_LOOP 0x00000001 /* Loopback enable */
+#define PCS_SCTRL_ESCD 0x00000002 /* Enable sync char detection */
+#define PCS_SCTRL_LOCK 0x00000004 /* Lock to reference clock */
+#define PCS_SCTRL_EMP 0x00000018 /* Output driver emphasis */
+#define PCS_SCTRL_STEST 0x000001c0 /* Self test patterns */
+#define PCS_SCTRL_PDWN 0x00000200 /* Software power-down */
+#define PCS_SCTRL_RXZ 0x00000c00 /* PLL input to Serialink */
+#define PCS_SCTRL_RXP 0x00003000 /* PLL input to Serialink */
+#define PCS_SCTRL_TXZ 0x0000c000 /* PLL input to Serialink */
+#define PCS_SCTRL_TXP 0x00030000 /* PLL input to Serialink */
+
+/* Shared Output Select Register. For test and debug, allows multiplexing
+ * test outputs into the PROM address pins. Set to zero for normal
+ * operation.
+ */
+#define PCS_SOS_PADDR 0x00000003 /* PROM Address */
+
+/* PROM Image Space */
+#define PROM_START 0x100000UL /* Expansion ROM run time access*/
+#define PROM_SIZE 0x0fffffUL /* Size of ROM */
+#define PROM_END 0x200000UL /* End of ROM */
+
+/* MII definitions missing from mii.h */
+
+#define BMCR_SPD2 0x0040 /* Gigabit enable? (bcm5411) */
+#define LPA_PAUSE 0x0400
+
+/* More PHY registers (specific to Broadcom models) */
+
+/* MII BCM5201 MULTIPHY interrupt register */
+#define MII_BCM5201_INTERRUPT 0x1A
+#define MII_BCM5201_INTERRUPT_INTENABLE 0x4000
+
+#define MII_BCM5201_AUXMODE2 0x1B
+#define MII_BCM5201_AUXMODE2_LOWPOWER 0x0008
+
+#define MII_BCM5201_MULTIPHY 0x1E
+
+/* MII BCM5201 MULTIPHY register bits */
+#define MII_BCM5201_MULTIPHY_SERIALMODE 0x0002
+#define MII_BCM5201_MULTIPHY_SUPERISOLATE 0x0008
+
+/* MII BCM5400 1000-BASET Control register */
+#define MII_BCM5400_GB_CONTROL 0x09
+#define MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP 0x0200
+
+/* MII BCM5400 AUXCONTROL register */
+#define MII_BCM5400_AUXCONTROL 0x18
+#define MII_BCM5400_AUXCONTROL_PWR10BASET 0x0004
+
+/* MII BCM5400 AUXSTATUS register */
+#define MII_BCM5400_AUXSTATUS 0x19
+#define MII_BCM5400_AUXSTATUS_LINKMODE_MASK 0x0700
+#define MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT 8
+
+/* When it can, GEM internally caches 4 aligned TX descriptors
+ * at a time, so that it can use full cacheline DMA reads.
+ *
+ * Note that unlike HME, there is no ownership bit in the descriptor
+ * control word. The same functionality is obtained via the TX-Kick
+ * and TX-Complete registers. As a result, GEM need not write back
+ * updated values to the TX descriptor ring, it only performs reads.
+ *
+ * Since TX descriptors are never modified by GEM, the driver can
+ * use the buffer DMA address as a place to keep track of allocated
+ * DMA mappings for a transmitted packet.
+ */
+struct gem_txd {
+ __le64 control_word;
+ __le64 buffer;
+};
+
+#define TXDCTRL_BUFSZ 0x0000000000007fffULL /* Buffer Size */
+#define TXDCTRL_CSTART 0x00000000001f8000ULL /* CSUM Start Offset */
+#define TXDCTRL_COFF 0x000000001fe00000ULL /* CSUM Stuff Offset */
+#define TXDCTRL_CENAB 0x0000000020000000ULL /* CSUM Enable */
+#define TXDCTRL_EOF 0x0000000040000000ULL /* End of Frame */
+#define TXDCTRL_SOF 0x0000000080000000ULL /* Start of Frame */
+#define TXDCTRL_INTME 0x0000000100000000ULL /* "Interrupt Me" */
+#define TXDCTRL_NOCRC 0x0000000200000000ULL /* No CRC Present */
+
+/* GEM requires that RX descriptors are provided four at a time,
+ * aligned. Also, the RX ring may not wrap around. This means that
+ * there will be at least 4 unused descriptor entries in the middle
+ * of the RX ring at all times.
+ *
+ * Similar to HME, GEM assumes that it can write garbage bytes before
+ * the beginning of the buffer and right after the end in order to DMA
+ * whole cachelines.
+ *
+ * Unlike for TX, GEM does update the status word in the RX descriptors
+ * when packets arrive. Therefore an ownership bit does exist in the
+ * RX descriptors. It is advisory, GEM clears it but does not check
+ * it in any way. So when buffers are posted to the RX ring (via the
+ * RX Kick register) by the driver it must make sure the buffers are
+ * truly ready and that the ownership bits are set properly.
+ *
+ * Even though GEM modifies the RX descriptors, it guarantees that the
+ * buffer DMA address field will stay the same when it performs these
+ * updates. Therefore it can be used to keep track of DMA mappings
+ * by the host driver just as in the TX descriptor case above.
+ */
+struct gem_rxd {
+ __le64 status_word;
+ __le64 buffer;
+};
+
+#define RXDCTRL_TCPCSUM 0x000000000000ffffULL /* TCP Pseudo-CSUM */
+#define RXDCTRL_BUFSZ 0x000000007fff0000ULL /* Buffer Size */
+#define RXDCTRL_OWN 0x0000000080000000ULL /* GEM owns this entry */
+#define RXDCTRL_HASHVAL 0x0ffff00000000000ULL /* Hash Value */
+#define RXDCTRL_HPASS 0x1000000000000000ULL /* Passed Hash Filter */
+#define RXDCTRL_ALTMAC 0x2000000000000000ULL /* Matched ALT MAC */
+#define RXDCTRL_BAD 0x4000000000000000ULL /* Frame has bad CRC */
+
+#define RXDCTRL_FRESH(gp) \
+ ((((RX_BUF_ALLOC_SIZE(gp) - RX_OFFSET) << 16) & RXDCTRL_BUFSZ) | \
+ RXDCTRL_OWN)
+
+#define TX_RING_SIZE 128
+#define RX_RING_SIZE 128
+
+#if TX_RING_SIZE == 32
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_32
+#elif TX_RING_SIZE == 64
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_64
+#elif TX_RING_SIZE == 128
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_128
+#elif TX_RING_SIZE == 256
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_256
+#elif TX_RING_SIZE == 512
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_512
+#elif TX_RING_SIZE == 1024
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_1K
+#elif TX_RING_SIZE == 2048
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_2K
+#elif TX_RING_SIZE == 4096
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_4K
+#elif TX_RING_SIZE == 8192
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_8K
+#else
+#error TX_RING_SIZE value is illegal...
+#endif
+
+#if RX_RING_SIZE == 32
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_32
+#elif RX_RING_SIZE == 64
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_64
+#elif RX_RING_SIZE == 128
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_128
+#elif RX_RING_SIZE == 256
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_256
+#elif RX_RING_SIZE == 512
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_512
+#elif RX_RING_SIZE == 1024
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_1K
+#elif RX_RING_SIZE == 2048
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_2K
+#elif RX_RING_SIZE == 4096
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_4K
+#elif RX_RING_SIZE == 8192
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_8K
+#else
+#error RX_RING_SIZE is illegal...
+#endif
+
+#define NEXT_TX(N) (((N) + 1) & (TX_RING_SIZE - 1))
+#define NEXT_RX(N) (((N) + 1) & (RX_RING_SIZE - 1))
+
+#define TX_BUFFS_AVAIL(GP) \
+ (((GP)->tx_old <= (GP)->tx_new) ? \
+ (GP)->tx_old + (TX_RING_SIZE - 1) - (GP)->tx_new : \
+ (GP)->tx_old - (GP)->tx_new - 1)
+
+#define RX_OFFSET 2
+#define RX_BUF_ALLOC_SIZE(gp) ((gp)->rx_buf_sz + 28 + RX_OFFSET + 64)
+
+#define RX_COPY_THRESHOLD 256
+
+#if TX_RING_SIZE < 128
+#define INIT_BLOCK_TX_RING_SIZE 128
+#else
+#define INIT_BLOCK_TX_RING_SIZE TX_RING_SIZE
+#endif
+
+#if RX_RING_SIZE < 128
+#define INIT_BLOCK_RX_RING_SIZE 128
+#else
+#define INIT_BLOCK_RX_RING_SIZE RX_RING_SIZE
+#endif
+
+struct gem_init_block {
+ struct gem_txd txd[INIT_BLOCK_TX_RING_SIZE];
+ struct gem_rxd rxd[INIT_BLOCK_RX_RING_SIZE];
+};
+
+enum gem_phy_type {
+ phy_mii_mdio0,
+ phy_mii_mdio1,
+ phy_serialink,
+ phy_serdes,
+};
+
+enum link_state {
+ link_down = 0, /* No link, will retry */
+ link_aneg, /* Autoneg in progress */
+ link_force_try, /* Try Forced link speed */
+ link_force_ret, /* Forced mode worked, retrying autoneg */
+ link_force_ok, /* Stay in forced mode */
+ link_up /* Link is up */
+};
+
+struct gem {
+ void __iomem *regs;
+ int rx_new, rx_old;
+ int tx_new, tx_old;
+
+ unsigned int has_wol : 1; /* chip supports wake-on-lan */
+ unsigned int asleep_wol : 1; /* was asleep with WOL enabled */
+
+ int cell_enabled;
+ u32 msg_enable;
+ u32 status;
+
+ struct napi_struct napi;
+
+ int tx_fifo_sz;
+ int rx_fifo_sz;
+ int rx_pause_off;
+ int rx_pause_on;
+ int rx_buf_sz;
+ u64 pause_entered;
+ u16 pause_last_time_recvd;
+ u32 mac_rx_cfg;
+ u32 swrst_base;
+
+ int want_autoneg;
+ int last_forced_speed;
+ enum link_state lstate;
+ struct timer_list link_timer;
+ int timer_ticks;
+ int wake_on_lan;
+ struct work_struct reset_task;
+ volatile int reset_task_pending;
+
+ enum gem_phy_type phy_type;
+ struct mii_phy phy_mii;
+ int mii_phy_addr;
+
+ struct gem_init_block *init_block;
+ struct sk_buff *rx_skbs[RX_RING_SIZE];
+ struct sk_buff *tx_skbs[TX_RING_SIZE];
+ dma_addr_t gblock_dvma;
+
+ struct pci_dev *pdev;
+ struct net_device *dev;
+#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC)
+ struct device_node *of_node;
+#endif
+};
+
+#define found_mii_phy(gp) ((gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) && \
+ gp->phy_mii.def && gp->phy_mii.def->ops)
+
+#endif /* _SUNGEM_H */
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
new file mode 100644
index 000000000000..869d47be54b4
--- /dev/null
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -0,0 +1,3359 @@
+/* sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching,
+ * auto carrier detecting ethernet driver. Also known as the
+ * "Happy Meal Ethernet" found on SunSwift SBUS cards.
+ *
+ * Copyright (C) 1996, 1998, 1999, 2002, 2003,
+ * 2006, 2008 David S. Miller (davem@davemloft.net)
+ *
+ * Changes :
+ * 2000/11/11 Willy Tarreau <willy AT meta-x.org>
+ * - port to non-sparc architectures. Tested only on x86 and
+ * only currently works with QFE PCI cards.
+ * - ability to specify the MAC address at module load time by passing this
+ * argument : macaddr=0x00,0x10,0x20,0x30,0x40,0x50
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/random.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/mm.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#ifdef CONFIG_SPARC
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <asm/idprom.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/prom.h>
+#include <asm/auxio.h>
+#endif
+#include <asm/uaccess.h>
+
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+
+#ifdef CONFIG_PCI
+#include <linux/pci.h>
+#endif
+
+#include "sunhme.h"
+
+#define DRV_NAME "sunhme"
+#define DRV_VERSION "3.10"
+#define DRV_RELDATE "August 26, 2008"
+#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
+
+static char version[] =
+ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
+
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver");
+MODULE_LICENSE("GPL");
+
+static int macaddr[6];
+
+/* accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
+module_param_array(macaddr, int, NULL, 0);
+MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set");
+
+#ifdef CONFIG_SBUS
+static struct quattro *qfe_sbus_list;
+#endif
+
+#ifdef CONFIG_PCI
+static struct quattro *qfe_pci_list;
+#endif
+
+#undef HMEDEBUG
+#undef SXDEBUG
+#undef RXDEBUG
+#undef TXDEBUG
+#undef TXLOGGING
+
+#ifdef TXLOGGING
+struct hme_tx_logent {
+ unsigned int tstamp;
+ int tx_new, tx_old;
+ unsigned int action;
+#define TXLOG_ACTION_IRQ 0x01
+#define TXLOG_ACTION_TXMIT 0x02
+#define TXLOG_ACTION_TBUSY 0x04
+#define TXLOG_ACTION_NBUFS 0x08
+ unsigned int status;
+};
+#define TX_LOG_LEN 128
+static struct hme_tx_logent tx_log[TX_LOG_LEN];
+static int txlog_cur_entry;
+static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigned int s)
+{
+ struct hme_tx_logent *tlp;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ tlp = &tx_log[txlog_cur_entry];
+ tlp->tstamp = (unsigned int)jiffies;
+ tlp->tx_new = hp->tx_new;
+ tlp->tx_old = hp->tx_old;
+ tlp->action = a;
+ tlp->status = s;
+ txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1);
+ local_irq_restore(flags);
+}
+static __inline__ void tx_dump_log(void)
+{
+ int i, this;
+
+ this = txlog_cur_entry;
+ for (i = 0; i < TX_LOG_LEN; i++) {
+ printk("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
+ tx_log[this].tstamp,
+ tx_log[this].tx_new, tx_log[this].tx_old,
+ tx_log[this].action, tx_log[this].status);
+ this = (this + 1) & (TX_LOG_LEN - 1);
+ }
+}
+static __inline__ void tx_dump_ring(struct happy_meal *hp)
+{
+ struct hmeal_init_block *hb = hp->happy_block;
+ struct happy_meal_txd *tp = &hb->happy_meal_txd[0];
+ int i;
+
+ for (i = 0; i < TX_RING_SIZE; i+=4) {
+ printk("TXD[%d..%d]: [%08x:%08x] [%08x:%08x] [%08x:%08x] [%08x:%08x]\n",
+ i, i + 4,
+ le32_to_cpu(tp[i].tx_flags), le32_to_cpu(tp[i].tx_addr),
+ le32_to_cpu(tp[i + 1].tx_flags), le32_to_cpu(tp[i + 1].tx_addr),
+ le32_to_cpu(tp[i + 2].tx_flags), le32_to_cpu(tp[i + 2].tx_addr),
+ le32_to_cpu(tp[i + 3].tx_flags), le32_to_cpu(tp[i + 3].tx_addr));
+ }
+}
+#else
+#define tx_add_log(hp, a, s) do { } while(0)
+#define tx_dump_log() do { } while(0)
+#define tx_dump_ring(hp) do { } while(0)
+#endif
+
+#ifdef HMEDEBUG
+#define HMD(x) printk x
+#else
+#define HMD(x)
+#endif
+
+/* #define AUTO_SWITCH_DEBUG */
+
+#ifdef AUTO_SWITCH_DEBUG
+#define ASD(x) printk x
+#else
+#define ASD(x)
+#endif
+
+#define DEFAULT_IPG0 16 /* For lance-mode only */
+#define DEFAULT_IPG1 8 /* For all modes */
+#define DEFAULT_IPG2 4 /* For all modes */
+#define DEFAULT_JAMSIZE 4 /* Toe jam */
+
+/* NOTE: In the descriptor writes one _must_ write the address
+ * member _first_. The card must not be allowed to see
+ * the updated descriptor flags until the address is
+ * correct. I've added a write memory barrier between
+ * the two stores so that I can sleep well at night... -DaveM
+ */
+
+#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
+static void sbus_hme_write32(void __iomem *reg, u32 val)
+{
+ sbus_writel(val, reg);
+}
+
+static u32 sbus_hme_read32(void __iomem *reg)
+{
+ return sbus_readl(reg);
+}
+
+static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
+{
+ rxd->rx_addr = (__force hme32)addr;
+ wmb();
+ rxd->rx_flags = (__force hme32)flags;
+}
+
+static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
+{
+ txd->tx_addr = (__force hme32)addr;
+ wmb();
+ txd->tx_flags = (__force hme32)flags;
+}
+
+static u32 sbus_hme_read_desc32(hme32 *p)
+{
+ return (__force u32)*p;
+}
+
+static void pci_hme_write32(void __iomem *reg, u32 val)
+{
+ writel(val, reg);
+}
+
+static u32 pci_hme_read32(void __iomem *reg)
+{
+ return readl(reg);
+}
+
+static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
+{
+ rxd->rx_addr = (__force hme32)cpu_to_le32(addr);
+ wmb();
+ rxd->rx_flags = (__force hme32)cpu_to_le32(flags);
+}
+
+static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
+{
+ txd->tx_addr = (__force hme32)cpu_to_le32(addr);
+ wmb();
+ txd->tx_flags = (__force hme32)cpu_to_le32(flags);
+}
+
+static u32 pci_hme_read_desc32(hme32 *p)
+{
+ return le32_to_cpup((__le32 *)p);
+}
+
+#define hme_write32(__hp, __reg, __val) \
+ ((__hp)->write32((__reg), (__val)))
+#define hme_read32(__hp, __reg) \
+ ((__hp)->read32(__reg))
+#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
+ ((__hp)->write_rxd((__rxd), (__flags), (__addr)))
+#define hme_write_txd(__hp, __txd, __flags, __addr) \
+ ((__hp)->write_txd((__txd), (__flags), (__addr)))
+#define hme_read_desc32(__hp, __p) \
+ ((__hp)->read_desc32(__p))
+#define hme_dma_map(__hp, __ptr, __size, __dir) \
+ ((__hp)->dma_map((__hp)->dma_dev, (__ptr), (__size), (__dir)))
+#define hme_dma_unmap(__hp, __addr, __size, __dir) \
+ ((__hp)->dma_unmap((__hp)->dma_dev, (__addr), (__size), (__dir)))
+#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
+ ((__hp)->dma_sync_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)))
+#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
+ ((__hp)->dma_sync_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)))
+#else
+#ifdef CONFIG_SBUS
+/* SBUS only compilation */
+#define hme_write32(__hp, __reg, __val) \
+ sbus_writel((__val), (__reg))
+#define hme_read32(__hp, __reg) \
+ sbus_readl(__reg)
+#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
+do { (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \
+ wmb(); \
+ (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \
+} while(0)
+#define hme_write_txd(__hp, __txd, __flags, __addr) \
+do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \
+ wmb(); \
+ (__txd)->tx_flags = (__force hme32)(u32)(__flags); \
+} while(0)
+#define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p))
+#define hme_dma_map(__hp, __ptr, __size, __dir) \
+ dma_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir))
+#define hme_dma_unmap(__hp, __addr, __size, __dir) \
+ dma_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir))
+#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
+ dma_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))
+#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
+ dma_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))
+#else
+/* PCI only compilation */
+#define hme_write32(__hp, __reg, __val) \
+ writel((__val), (__reg))
+#define hme_read32(__hp, __reg) \
+ readl(__reg)
+#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
+do { (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \
+ wmb(); \
+ (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \
+} while(0)
+#define hme_write_txd(__hp, __txd, __flags, __addr) \
+do { (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \
+ wmb(); \
+ (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \
+} while(0)
+static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
+{
+ return le32_to_cpup((__le32 *)p);
+}
+#define hme_dma_map(__hp, __ptr, __size, __dir) \
+ pci_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir))
+#define hme_dma_unmap(__hp, __addr, __size, __dir) \
+ pci_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir))
+#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
+ pci_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))
+#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
+ pci_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))
+#endif
+#endif
+
+
+/* Oh yes, the MIF BitBang is mighty fun to program. BitBucket is more like it. */
+static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit)
+{
+ hme_write32(hp, tregs + TCVR_BBDATA, bit);
+ hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
+ hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
+}
+
+#if 0
+static u32 BB_GET_BIT(struct happy_meal *hp, void __iomem *tregs, int internal)
+{
+ u32 ret;
+
+ hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
+ hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
+ ret = hme_read32(hp, tregs + TCVR_CFG);
+ if (internal)
+ ret &= TCV_CFG_MDIO0;
+ else
+ ret &= TCV_CFG_MDIO1;
+
+ return ret;
+}
+#endif
+
+static u32 BB_GET_BIT2(struct happy_meal *hp, void __iomem *tregs, int internal)
+{
+ u32 retval;
+
+ hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
+ udelay(1);
+ retval = hme_read32(hp, tregs + TCVR_CFG);
+ if (internal)
+ retval &= TCV_CFG_MDIO0;
+ else
+ retval &= TCV_CFG_MDIO1;
+ hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
+
+ return retval;
+}
+
+#define TCVR_FAILURE 0x80000000 /* Impossible MIF read value */
+
+static int happy_meal_bb_read(struct happy_meal *hp,
+ void __iomem *tregs, int reg)
+{
+ u32 tmp;
+ int retval = 0;
+ int i;
+
+ ASD(("happy_meal_bb_read: reg=%d ", reg));
+
+ /* Enable the MIF BitBang outputs. */
+ hme_write32(hp, tregs + TCVR_BBOENAB, 1);
+
+ /* Force BitBang into the idle state. */
+ for (i = 0; i < 32; i++)
+ BB_PUT_BIT(hp, tregs, 1);
+
+ /* Give it the read sequence. */
+ BB_PUT_BIT(hp, tregs, 0);
+ BB_PUT_BIT(hp, tregs, 1);
+ BB_PUT_BIT(hp, tregs, 1);
+ BB_PUT_BIT(hp, tregs, 0);
+
+ /* Give it the PHY address. */
+ tmp = hp->paddr & 0xff;
+ for (i = 4; i >= 0; i--)
+ BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
+
+ /* Tell it what register we want to read. */
+ tmp = (reg & 0xff);
+ for (i = 4; i >= 0; i--)
+ BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
+
+ /* Close down the MIF BitBang outputs. */
+ hme_write32(hp, tregs + TCVR_BBOENAB, 0);
+
+ /* Now read in the value. */
+ (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
+ for (i = 15; i >= 0; i--)
+ retval |= BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
+ (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
+ (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
+ (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
+ ASD(("value=%x\n", retval));
+ return retval;
+}
+
+static void happy_meal_bb_write(struct happy_meal *hp,
+ void __iomem *tregs, int reg,
+ unsigned short value)
+{
+ u32 tmp;
+ int i;
+
+ ASD(("happy_meal_bb_write: reg=%d value=%x\n", reg, value));
+
+ /* Enable the MIF BitBang outputs. */
+ hme_write32(hp, tregs + TCVR_BBOENAB, 1);
+
+ /* Force BitBang into the idle state. */
+ for (i = 0; i < 32; i++)
+ BB_PUT_BIT(hp, tregs, 1);
+
+ /* Give it write sequence. */
+ BB_PUT_BIT(hp, tregs, 0);
+ BB_PUT_BIT(hp, tregs, 1);
+ BB_PUT_BIT(hp, tregs, 0);
+ BB_PUT_BIT(hp, tregs, 1);
+
+ /* Give it the PHY address. */
+ tmp = (hp->paddr & 0xff);
+ for (i = 4; i >= 0; i--)
+ BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
+
+ /* Tell it what register we will be writing. */
+ tmp = (reg & 0xff);
+ for (i = 4; i >= 0; i--)
+ BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
+
+ /* Tell it to become ready for the bits. */
+ BB_PUT_BIT(hp, tregs, 1);
+ BB_PUT_BIT(hp, tregs, 0);
+
+ for (i = 15; i >= 0; i--)
+ BB_PUT_BIT(hp, tregs, ((value >> i) & 1));
+
+ /* Close down the MIF BitBang outputs. */
+ hme_write32(hp, tregs + TCVR_BBOENAB, 0);
+}
+
+#define TCVR_READ_TRIES 16
+
+static int happy_meal_tcvr_read(struct happy_meal *hp,
+ void __iomem *tregs, int reg)
+{
+ int tries = TCVR_READ_TRIES;
+ int retval;
+
+ ASD(("happy_meal_tcvr_read: reg=0x%02x ", reg));
+ if (hp->tcvr_type == none) {
+ ASD(("no transceiver, value=TCVR_FAILURE\n"));
+ return TCVR_FAILURE;
+ }
+
+ if (!(hp->happy_flags & HFLAG_FENABLE)) {
+ ASD(("doing bit bang\n"));
+ return happy_meal_bb_read(hp, tregs, reg);
+ }
+
+ hme_write32(hp, tregs + TCVR_FRAME,
+ (FRAME_READ | (hp->paddr << 23) | ((reg & 0xff) << 18)));
+ while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
+ udelay(20);
+ if (!tries) {
+ printk(KERN_ERR "happy meal: Aieee, transceiver MIF read bolixed\n");
+ return TCVR_FAILURE;
+ }
+ retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff;
+ ASD(("value=%04x\n", retval));
+ return retval;
+}
+
+#define TCVR_WRITE_TRIES 16
+
+static void happy_meal_tcvr_write(struct happy_meal *hp,
+ void __iomem *tregs, int reg,
+ unsigned short value)
+{
+ int tries = TCVR_WRITE_TRIES;
+
+ ASD(("happy_meal_tcvr_write: reg=0x%02x value=%04x\n", reg, value));
+
+ /* Welcome to Sun Microsystems, can I take your order please? */
+ if (!(hp->happy_flags & HFLAG_FENABLE)) {
+ happy_meal_bb_write(hp, tregs, reg, value);
+ return;
+ }
+
+ /* Would you like fries with that? */
+ hme_write32(hp, tregs + TCVR_FRAME,
+ (FRAME_WRITE | (hp->paddr << 23) |
+ ((reg & 0xff) << 18) | (value & 0xffff)));
+ while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
+ udelay(20);
+
+ /* Anything else? */
+ if (!tries)
+ printk(KERN_ERR "happy meal: Aieee, transceiver MIF write bolixed\n");
+
+ /* Fifty-two cents is your change, have a nice day. */
+}
+
+/* Auto negotiation. The scheme is very simple. We have a timer routine
+ * that keeps watching the auto negotiation process as it progresses.
+ * The DP83840 is first told to start doing it's thing, we set up the time
+ * and place the timer state machine in it's initial state.
+ *
+ * Here the timer peeks at the DP83840 status registers at each click to see
+ * if the auto negotiation has completed, we assume here that the DP83840 PHY
+ * will time out at some point and just tell us what (didn't) happen. For
+ * complete coverage we only allow so many of the ticks at this level to run,
+ * when this has expired we print a warning message and try another strategy.
+ * This "other" strategy is to force the interface into various speed/duplex
+ * configurations and we stop when we see a link-up condition before the
+ * maximum number of "peek" ticks have occurred.
+ *
+ * Once a valid link status has been detected we configure the BigMAC and
+ * the rest of the Happy Meal to speak the most efficient protocol we could
+ * get a clean link for. The priority for link configurations, highest first
+ * is:
+ * 100 Base-T Full Duplex
+ * 100 Base-T Half Duplex
+ * 10 Base-T Full Duplex
+ * 10 Base-T Half Duplex
+ *
+ * We start a new timer now, after a successful auto negotiation status has
+ * been detected. This timer just waits for the link-up bit to get set in
+ * the BMCR of the DP83840. When this occurs we print a kernel log message
+ * describing the link type in use and the fact that it is up.
+ *
+ * If a fatal error of some sort is signalled and detected in the interrupt
+ * service routine, and the chip is reset, or the link is ifconfig'd down
+ * and then back up, this entire process repeats itself all over again.
+ */
+static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs)
+{
+ hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+
+ /* Downgrade from full to half duplex. Only possible
+ * via ethtool.
+ */
+ if (hp->sw_bmcr & BMCR_FULLDPLX) {
+ hp->sw_bmcr &= ~(BMCR_FULLDPLX);
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+ return 0;
+ }
+
+ /* Downgrade from 100 to 10. */
+ if (hp->sw_bmcr & BMCR_SPEED100) {
+ hp->sw_bmcr &= ~(BMCR_SPEED100);
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+ return 0;
+ }
+
+ /* We've tried everything. */
+ return -1;
+}
+
+static void display_link_mode(struct happy_meal *hp, void __iomem *tregs)
+{
+ printk(KERN_INFO "%s: Link is up using ", hp->dev->name);
+ if (hp->tcvr_type == external)
+ printk("external ");
+ else
+ printk("internal ");
+ printk("transceiver at ");
+ hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
+ if (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) {
+ if (hp->sw_lpa & LPA_100FULL)
+ printk("100Mb/s, Full Duplex.\n");
+ else
+ printk("100Mb/s, Half Duplex.\n");
+ } else {
+ if (hp->sw_lpa & LPA_10FULL)
+ printk("10Mb/s, Full Duplex.\n");
+ else
+ printk("10Mb/s, Half Duplex.\n");
+ }
+}
+
+static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs)
+{
+ printk(KERN_INFO "%s: Link has been forced up using ", hp->dev->name);
+ if (hp->tcvr_type == external)
+ printk("external ");
+ else
+ printk("internal ");
+ printk("transceiver at ");
+ hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+ if (hp->sw_bmcr & BMCR_SPEED100)
+ printk("100Mb/s, ");
+ else
+ printk("10Mb/s, ");
+ if (hp->sw_bmcr & BMCR_FULLDPLX)
+ printk("Full Duplex.\n");
+ else
+ printk("Half Duplex.\n");
+}
+
+static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs)
+{
+ int full;
+
+ /* All we care about is making sure the bigmac tx_cfg has a
+ * proper duplex setting.
+ */
+ if (hp->timer_state == arbwait) {
+ hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
+ if (!(hp->sw_lpa & (LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL)))
+ goto no_response;
+ if (hp->sw_lpa & LPA_100FULL)
+ full = 1;
+ else if (hp->sw_lpa & LPA_100HALF)
+ full = 0;
+ else if (hp->sw_lpa & LPA_10FULL)
+ full = 1;
+ else
+ full = 0;
+ } else {
+ /* Forcing a link mode. */
+ hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+ if (hp->sw_bmcr & BMCR_FULLDPLX)
+ full = 1;
+ else
+ full = 0;
+ }
+
+ /* Before changing other bits in the tx_cfg register, and in
+ * general any of other the TX config registers too, you
+ * must:
+ * 1) Clear Enable
+ * 2) Poll with reads until that bit reads back as zero
+ * 3) Make TX configuration changes
+ * 4) Set Enable once more
+ */
+ hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
+ hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
+ ~(BIGMAC_TXCFG_ENABLE));
+ while (hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & BIGMAC_TXCFG_ENABLE)
+ barrier();
+ if (full) {
+ hp->happy_flags |= HFLAG_FULL;
+ hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
+ hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
+ BIGMAC_TXCFG_FULLDPLX);
+ } else {
+ hp->happy_flags &= ~(HFLAG_FULL);
+ hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
+ hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
+ ~(BIGMAC_TXCFG_FULLDPLX));
+ }
+ hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
+ hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
+ BIGMAC_TXCFG_ENABLE);
+ return 0;
+no_response:
+ return 1;
+}
+
+static int happy_meal_init(struct happy_meal *hp);
+
+static int is_lucent_phy(struct happy_meal *hp)
+{
+ void __iomem *tregs = hp->tcvregs;
+ unsigned short mr2, mr3;
+ int ret = 0;
+
+ mr2 = happy_meal_tcvr_read(hp, tregs, 2);
+ mr3 = happy_meal_tcvr_read(hp, tregs, 3);
+ if ((mr2 & 0xffff) == 0x0180 &&
+ ((mr3 & 0xffff) >> 10) == 0x1d)
+ ret = 1;
+
+ return ret;
+}
+
+static void happy_meal_timer(unsigned long data)
+{
+ struct happy_meal *hp = (struct happy_meal *) data;
+ void __iomem *tregs = hp->tcvregs;
+ int restart_timer = 0;
+
+ spin_lock_irq(&hp->happy_lock);
+
+ hp->timer_ticks++;
+ switch(hp->timer_state) {
+ case arbwait:
+ /* Only allow for 5 ticks, thats 10 seconds and much too
+ * long to wait for arbitration to complete.
+ */
+ if (hp->timer_ticks >= 10) {
+ /* Enter force mode. */
+ do_force_mode:
+ hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+ printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful, trying force link mode\n",
+ hp->dev->name);
+ hp->sw_bmcr = BMCR_SPEED100;
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+
+ if (!is_lucent_phy(hp)) {
+ /* OK, seems we need do disable the transceiver for the first
+ * tick to make sure we get an accurate link state at the
+ * second tick.
+ */
+ hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
+ hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
+ happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig);
+ }
+ hp->timer_state = ltrywait;
+ hp->timer_ticks = 0;
+ restart_timer = 1;
+ } else {
+ /* Anything interesting happen? */
+ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
+ if (hp->sw_bmsr & BMSR_ANEGCOMPLETE) {
+ int ret;
+
+ /* Just what we've been waiting for... */
+ ret = set_happy_link_modes(hp, tregs);
+ if (ret) {
+ /* Ooops, something bad happened, go to force
+ * mode.
+ *
+ * XXX Broken hubs which don't support 802.3u
+ * XXX auto-negotiation make this happen as well.
+ */
+ goto do_force_mode;
+ }
+
+ /* Success, at least so far, advance our state engine. */
+ hp->timer_state = lupwait;
+ restart_timer = 1;
+ } else {
+ restart_timer = 1;
+ }
+ }
+ break;
+
+ case lupwait:
+ /* Auto negotiation was successful and we are awaiting a
+ * link up status. I have decided to let this timer run
+ * forever until some sort of error is signalled, reporting
+ * a message to the user at 10 second intervals.
+ */
+ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
+ if (hp->sw_bmsr & BMSR_LSTATUS) {
+ /* Wheee, it's up, display the link mode in use and put
+ * the timer to sleep.
+ */
+ display_link_mode(hp, tregs);
+ hp->timer_state = asleep;
+ restart_timer = 0;
+ } else {
+ if (hp->timer_ticks >= 10) {
+ printk(KERN_NOTICE "%s: Auto negotiation successful, link still "
+ "not completely up.\n", hp->dev->name);
+ hp->timer_ticks = 0;
+ restart_timer = 1;
+ } else {
+ restart_timer = 1;
+ }
+ }
+ break;
+
+ case ltrywait:
+ /* Making the timeout here too long can make it take
+ * annoyingly long to attempt all of the link mode
+ * permutations, but then again this is essentially
+ * error recovery code for the most part.
+ */
+ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
+ hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
+ if (hp->timer_ticks == 1) {
+ if (!is_lucent_phy(hp)) {
+ /* Re-enable transceiver, we'll re-enable the transceiver next
+ * tick, then check link state on the following tick.
+ */
+ hp->sw_csconfig |= CSCONFIG_TCVDISAB;
+ happy_meal_tcvr_write(hp, tregs,
+ DP83840_CSCONFIG, hp->sw_csconfig);
+ }
+ restart_timer = 1;
+ break;
+ }
+ if (hp->timer_ticks == 2) {
+ if (!is_lucent_phy(hp)) {
+ hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
+ happy_meal_tcvr_write(hp, tregs,
+ DP83840_CSCONFIG, hp->sw_csconfig);
+ }
+ restart_timer = 1;
+ break;
+ }
+ if (hp->sw_bmsr & BMSR_LSTATUS) {
+ /* Force mode selection success. */
+ display_forced_link_mode(hp, tregs);
+ set_happy_link_modes(hp, tregs); /* XXX error? then what? */
+ hp->timer_state = asleep;
+ restart_timer = 0;
+ } else {
+ if (hp->timer_ticks >= 4) { /* 6 seconds or so... */
+ int ret;
+
+ ret = try_next_permutation(hp, tregs);
+ if (ret == -1) {
+ /* Aieee, tried them all, reset the
+ * chip and try all over again.
+ */
+
+ /* Let the user know... */
+ printk(KERN_NOTICE "%s: Link down, cable problem?\n",
+ hp->dev->name);
+
+ ret = happy_meal_init(hp);
+ if (ret) {
+ /* ho hum... */
+ printk(KERN_ERR "%s: Error, cannot re-init the "
+ "Happy Meal.\n", hp->dev->name);
+ }
+ goto out;
+ }
+ if (!is_lucent_phy(hp)) {
+ hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
+ DP83840_CSCONFIG);
+ hp->sw_csconfig |= CSCONFIG_TCVDISAB;
+ happy_meal_tcvr_write(hp, tregs,
+ DP83840_CSCONFIG, hp->sw_csconfig);
+ }
+ hp->timer_ticks = 0;
+ restart_timer = 1;
+ } else {
+ restart_timer = 1;
+ }
+ }
+ break;
+
+ case asleep:
+ default:
+ /* Can't happens.... */
+ printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n",
+ hp->dev->name);
+ restart_timer = 0;
+ hp->timer_ticks = 0;
+ hp->timer_state = asleep; /* foo on you */
+ break;
+ }
+
+ if (restart_timer) {
+ hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
+ add_timer(&hp->happy_timer);
+ }
+
+out:
+ spin_unlock_irq(&hp->happy_lock);
+}
+
+#define TX_RESET_TRIES 32
+#define RX_RESET_TRIES 32
+
+/* hp->happy_lock must be held */
+static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
+{
+ int tries = TX_RESET_TRIES;
+
+ HMD(("happy_meal_tx_reset: reset, "));
+
+ /* Would you like to try our SMCC Delux? */
+ hme_write32(hp, bregs + BMAC_TXSWRESET, 0);
+ while ((hme_read32(hp, bregs + BMAC_TXSWRESET) & 1) && --tries)
+ udelay(20);
+
+ /* Lettuce, tomato, buggy hardware (no extra charge)? */
+ if (!tries)
+ printk(KERN_ERR "happy meal: Transceiver BigMac ATTACK!");
+
+ /* Take care. */
+ HMD(("done\n"));
+}
+
+/* hp->happy_lock must be held */
+static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
+{
+ int tries = RX_RESET_TRIES;
+
+ HMD(("happy_meal_rx_reset: reset, "));
+
+ /* We have a special on GNU/Viking hardware bugs today. */
+ hme_write32(hp, bregs + BMAC_RXSWRESET, 0);
+ while ((hme_read32(hp, bregs + BMAC_RXSWRESET) & 1) && --tries)
+ udelay(20);
+
+ /* Will that be all? */
+ if (!tries)
+ printk(KERN_ERR "happy meal: Receiver BigMac ATTACK!");
+
+ /* Don't forget your vik_1137125_wa. Have a nice day. */
+ HMD(("done\n"));
+}
+
+#define STOP_TRIES 16
+
+/* hp->happy_lock must be held */
+static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
+{
+ int tries = STOP_TRIES;
+
+ HMD(("happy_meal_stop: reset, "));
+
+ /* We're consolidating our STB products, it's your lucky day. */
+ hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL);
+ while (hme_read32(hp, gregs + GREG_SWRESET) && --tries)
+ udelay(20);
+
+ /* Come back next week when we are "Sun Microelectronics". */
+ if (!tries)
+ printk(KERN_ERR "happy meal: Fry guys.");
+
+ /* Remember: "Different name, same old buggy as shit hardware." */
+ HMD(("done\n"));
+}
+
+/* hp->happy_lock must be held */
+static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
+{
+ struct net_device_stats *stats = &hp->net_stats;
+
+ stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR);
+ hme_write32(hp, bregs + BMAC_RCRCECTR, 0);
+
+ stats->rx_frame_errors += hme_read32(hp, bregs + BMAC_UNALECTR);
+ hme_write32(hp, bregs + BMAC_UNALECTR, 0);
+
+ stats->rx_length_errors += hme_read32(hp, bregs + BMAC_GLECTR);
+ hme_write32(hp, bregs + BMAC_GLECTR, 0);
+
+ stats->tx_aborted_errors += hme_read32(hp, bregs + BMAC_EXCTR);
+
+ stats->collisions +=
+ (hme_read32(hp, bregs + BMAC_EXCTR) +
+ hme_read32(hp, bregs + BMAC_LTCTR));
+ hme_write32(hp, bregs + BMAC_EXCTR, 0);
+ hme_write32(hp, bregs + BMAC_LTCTR, 0);
+}
+
+/* hp->happy_lock must be held */
+static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs)
+{
+ ASD(("happy_meal_poll_stop: "));
+
+ /* If polling disabled or not polling already, nothing to do. */
+ if ((hp->happy_flags & (HFLAG_POLLENABLE | HFLAG_POLL)) !=
+ (HFLAG_POLLENABLE | HFLAG_POLL)) {
+ HMD(("not polling, return\n"));
+ return;
+ }
+
+ /* Shut up the MIF. */
+ ASD(("were polling, mif ints off, "));
+ hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
+
+ /* Turn off polling. */
+ ASD(("polling off, "));
+ hme_write32(hp, tregs + TCVR_CFG,
+ hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PENABLE));
+
+ /* We are no longer polling. */
+ hp->happy_flags &= ~(HFLAG_POLL);
+
+ /* Let the bits set. */
+ udelay(200);
+ ASD(("done\n"));
+}
+
+/* Only Sun can take such nice parts and fuck up the programming interface
+ * like this. Good job guys...
+ */
+#define TCVR_RESET_TRIES 16 /* It should reset quickly */
+#define TCVR_UNISOLATE_TRIES 32 /* Dis-isolation can take longer. */
+
+/* hp->happy_lock must be held */
+static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
+{
+ u32 tconfig;
+ int result, tries = TCVR_RESET_TRIES;
+
+ tconfig = hme_read32(hp, tregs + TCVR_CFG);
+ ASD(("happy_meal_tcvr_reset: tcfg<%08lx> ", tconfig));
+ if (hp->tcvr_type == external) {
+ ASD(("external<"));
+ hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT));
+ hp->tcvr_type = internal;
+ hp->paddr = TCV_PADDR_ITX;
+ ASD(("ISOLATE,"));
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR,
+ (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
+ result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+ if (result == TCVR_FAILURE) {
+ ASD(("phyread_fail>\n"));
+ return -1;
+ }
+ ASD(("phyread_ok,PSELECT>"));
+ hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
+ hp->tcvr_type = external;
+ hp->paddr = TCV_PADDR_ETX;
+ } else {
+ if (tconfig & TCV_CFG_MDIO1) {
+ ASD(("internal<PSELECT,"));
+ hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT));
+ ASD(("ISOLATE,"));
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR,
+ (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
+ result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+ if (result == TCVR_FAILURE) {
+ ASD(("phyread_fail>\n"));
+ return -1;
+ }
+ ASD(("phyread_ok,~PSELECT>"));
+ hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT)));
+ hp->tcvr_type = internal;
+ hp->paddr = TCV_PADDR_ITX;
+ }
+ }
+
+ ASD(("BMCR_RESET "));
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET);
+
+ while (--tries) {
+ result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+ if (result == TCVR_FAILURE)
+ return -1;
+ hp->sw_bmcr = result;
+ if (!(result & BMCR_RESET))
+ break;
+ udelay(20);
+ }
+ if (!tries) {
+ ASD(("BMCR RESET FAILED!\n"));
+ return -1;
+ }
+ ASD(("RESET_OK\n"));
+
+ /* Get fresh copies of the PHY registers. */
+ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
+ hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
+ hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
+ hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
+
+ ASD(("UNISOLATE"));
+ hp->sw_bmcr &= ~(BMCR_ISOLATE);
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+
+ tries = TCVR_UNISOLATE_TRIES;
+ while (--tries) {
+ result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+ if (result == TCVR_FAILURE)
+ return -1;
+ if (!(result & BMCR_ISOLATE))
+ break;
+ udelay(20);
+ }
+ if (!tries) {
+ ASD((" FAILED!\n"));
+ return -1;
+ }
+ ASD((" SUCCESS and CSCONFIG_DFBYPASS\n"));
+ if (!is_lucent_phy(hp)) {
+ result = happy_meal_tcvr_read(hp, tregs,
+ DP83840_CSCONFIG);
+ happy_meal_tcvr_write(hp, tregs,
+ DP83840_CSCONFIG, (result | CSCONFIG_DFBYPASS));
+ }
+ return 0;
+}
+
+/* Figure out whether we have an internal or external transceiver.
+ *
+ * hp->happy_lock must be held
+ */
+static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tregs)
+{
+ unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG);
+
+ ASD(("happy_meal_transceiver_check: tcfg=%08lx ", tconfig));
+ if (hp->happy_flags & HFLAG_POLL) {
+ /* If we are polling, we must stop to get the transceiver type. */
+ ASD(("<polling> "));
+ if (hp->tcvr_type == internal) {
+ if (tconfig & TCV_CFG_MDIO1) {
+ ASD(("<internal> <poll stop> "));
+ happy_meal_poll_stop(hp, tregs);
+ hp->paddr = TCV_PADDR_ETX;
+ hp->tcvr_type = external;
+ ASD(("<external>\n"));
+ tconfig &= ~(TCV_CFG_PENABLE);
+ tconfig |= TCV_CFG_PSELECT;
+ hme_write32(hp, tregs + TCVR_CFG, tconfig);
+ }
+ } else {
+ if (hp->tcvr_type == external) {
+ ASD(("<external> "));
+ if (!(hme_read32(hp, tregs + TCVR_STATUS) >> 16)) {
+ ASD(("<poll stop> "));
+ happy_meal_poll_stop(hp, tregs);
+ hp->paddr = TCV_PADDR_ITX;
+ hp->tcvr_type = internal;
+ ASD(("<internal>\n"));
+ hme_write32(hp, tregs + TCVR_CFG,
+ hme_read32(hp, tregs + TCVR_CFG) &
+ ~(TCV_CFG_PSELECT));
+ }
+ ASD(("\n"));
+ } else {
+ ASD(("<none>\n"));
+ }
+ }
+ } else {
+ u32 reread = hme_read32(hp, tregs + TCVR_CFG);
+
+ /* Else we can just work off of the MDIO bits. */
+ ASD(("<not polling> "));
+ if (reread & TCV_CFG_MDIO1) {
+ hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
+ hp->paddr = TCV_PADDR_ETX;
+ hp->tcvr_type = external;
+ ASD(("<external>\n"));
+ } else {
+ if (reread & TCV_CFG_MDIO0) {
+ hme_write32(hp, tregs + TCVR_CFG,
+ tconfig & ~(TCV_CFG_PSELECT));
+ hp->paddr = TCV_PADDR_ITX;
+ hp->tcvr_type = internal;
+ ASD(("<internal>\n"));
+ } else {
+ printk(KERN_ERR "happy meal: Transceiver and a coke please.");
+ hp->tcvr_type = none; /* Grrr... */
+ ASD(("<none>\n"));
+ }
+ }
+ }
+}
+
+/* The receive ring buffers are a bit tricky to get right. Here goes...
+ *
+ * The buffers we dma into must be 64 byte aligned. So we use a special
+ * alloc_skb() routine for the happy meal to allocate 64 bytes more than
+ * we really need.
+ *
+ * We use skb_reserve() to align the data block we get in the skb. We
+ * also program the etxregs->cfg register to use an offset of 2. This
+ * imperical constant plus the ethernet header size will always leave
+ * us with a nicely aligned ip header once we pass things up to the
+ * protocol layers.
+ *
+ * The numbers work out to:
+ *
+ * Max ethernet frame size 1518
+ * Ethernet header size 14
+ * Happy Meal base offset 2
+ *
+ * Say a skb data area is at 0xf001b010, and its size alloced is
+ * (ETH_FRAME_LEN + 64 + 2) = (1514 + 64 + 2) = 1580 bytes.
+ *
+ * First our alloc_skb() routine aligns the data base to a 64 byte
+ * boundary. We now have 0xf001b040 as our skb data address. We
+ * plug this into the receive descriptor address.
+ *
+ * Next, we skb_reserve() 2 bytes to account for the Happy Meal offset.
+ * So now the data we will end up looking at starts at 0xf001b042. When
+ * the packet arrives, we will check out the size received and subtract
+ * this from the skb->length. Then we just pass the packet up to the
+ * protocols as is, and allocate a new skb to replace this slot we have
+ * just received from.
+ *
+ * The ethernet layer will strip the ether header from the front of the
+ * skb we just sent to it, this leaves us with the ip header sitting
+ * nicely aligned at 0xf001b050. Also, for tcp and udp packets the
+ * Happy Meal has even checksummed the tcp/udp data for us. The 16
+ * bit checksum is obtained from the low bits of the receive descriptor
+ * flags, thus:
+ *
+ * skb->csum = rxd->rx_flags & 0xffff;
+ * skb->ip_summed = CHECKSUM_COMPLETE;
+ *
+ * before sending off the skb to the protocols, and we are good as gold.
+ */
+static void happy_meal_clean_rings(struct happy_meal *hp)
+{
+ int i;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (hp->rx_skbs[i] != NULL) {
+ struct sk_buff *skb = hp->rx_skbs[i];
+ struct happy_meal_rxd *rxd;
+ u32 dma_addr;
+
+ rxd = &hp->happy_block->happy_meal_rxd[i];
+ dma_addr = hme_read_desc32(hp, &rxd->rx_addr);
+ dma_unmap_single(hp->dma_dev, dma_addr,
+ RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ hp->rx_skbs[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (hp->tx_skbs[i] != NULL) {
+ struct sk_buff *skb = hp->tx_skbs[i];
+ struct happy_meal_txd *txd;
+ u32 dma_addr;
+ int frag;
+
+ hp->tx_skbs[i] = NULL;
+
+ for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
+ txd = &hp->happy_block->happy_meal_txd[i];
+ dma_addr = hme_read_desc32(hp, &txd->tx_addr);
+ if (!frag)
+ dma_unmap_single(hp->dma_dev, dma_addr,
+ (hme_read_desc32(hp, &txd->tx_flags)
+ & TXFLAG_SIZE),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(hp->dma_dev, dma_addr,
+ (hme_read_desc32(hp, &txd->tx_flags)
+ & TXFLAG_SIZE),
+ DMA_TO_DEVICE);
+
+ if (frag != skb_shinfo(skb)->nr_frags)
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+ }
+}
+
+/* hp->happy_lock must be held */
+static void happy_meal_init_rings(struct happy_meal *hp)
+{
+ struct hmeal_init_block *hb = hp->happy_block;
+ struct net_device *dev = hp->dev;
+ int i;
+
+ HMD(("happy_meal_init_rings: counters to zero, "));
+ hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0;
+
+ /* Free any skippy bufs left around in the rings. */
+ HMD(("clean, "));
+ happy_meal_clean_rings(hp);
+
+ /* Now get new skippy bufs for the receive ring. */
+ HMD(("init rxring, "));
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+
+ skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
+ if (!skb) {
+ hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
+ continue;
+ }
+ hp->rx_skbs[i] = skb;
+ skb->dev = dev;
+
+ /* Because we reserve afterwards. */
+ skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
+ hme_write_rxd(hp, &hb->happy_meal_rxd[i],
+ (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
+ dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
+ DMA_FROM_DEVICE));
+ skb_reserve(skb, RX_OFFSET);
+ }
+
+ HMD(("init txring, "));
+ for (i = 0; i < TX_RING_SIZE; i++)
+ hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0);
+
+ HMD(("done\n"));
+}
+
+/* hp->happy_lock must be held */
+static void happy_meal_begin_auto_negotiation(struct happy_meal *hp,
+ void __iomem *tregs,
+ struct ethtool_cmd *ep)
+{
+ int timeout;
+
+ /* Read all of the registers we are interested in now. */
+ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
+ hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+ hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
+ hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
+
+ /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */
+
+ hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
+ if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
+ /* Advertise everything we can support. */
+ if (hp->sw_bmsr & BMSR_10HALF)
+ hp->sw_advertise |= (ADVERTISE_10HALF);
+ else
+ hp->sw_advertise &= ~(ADVERTISE_10HALF);
+
+ if (hp->sw_bmsr & BMSR_10FULL)
+ hp->sw_advertise |= (ADVERTISE_10FULL);
+ else
+ hp->sw_advertise &= ~(ADVERTISE_10FULL);
+ if (hp->sw_bmsr & BMSR_100HALF)
+ hp->sw_advertise |= (ADVERTISE_100HALF);
+ else
+ hp->sw_advertise &= ~(ADVERTISE_100HALF);
+ if (hp->sw_bmsr & BMSR_100FULL)
+ hp->sw_advertise |= (ADVERTISE_100FULL);
+ else
+ hp->sw_advertise &= ~(ADVERTISE_100FULL);
+ happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
+
+ /* XXX Currently no Happy Meal cards I know off support 100BaseT4,
+ * XXX and this is because the DP83840 does not support it, changes
+ * XXX would need to be made to the tx/rx logic in the driver as well
+ * XXX so I completely skip checking for it in the BMSR for now.
+ */
+
+#ifdef AUTO_SWITCH_DEBUG
+ ASD(("%s: Advertising [ ", hp->dev->name));
+ if (hp->sw_advertise & ADVERTISE_10HALF)
+ ASD(("10H "));
+ if (hp->sw_advertise & ADVERTISE_10FULL)
+ ASD(("10F "));
+ if (hp->sw_advertise & ADVERTISE_100HALF)
+ ASD(("100H "));
+ if (hp->sw_advertise & ADVERTISE_100FULL)
+ ASD(("100F "));
+#endif
+
+ /* Enable Auto-Negotiation, this is usually on already... */
+ hp->sw_bmcr |= BMCR_ANENABLE;
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+
+ /* Restart it to make sure it is going. */
+ hp->sw_bmcr |= BMCR_ANRESTART;
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+
+ /* BMCR_ANRESTART self clears when the process has begun. */
+
+ timeout = 64; /* More than enough. */
+ while (--timeout) {
+ hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+ if (!(hp->sw_bmcr & BMCR_ANRESTART))
+ break; /* got it. */
+ udelay(10);
+ }
+ if (!timeout) {
+ printk(KERN_ERR "%s: Happy Meal would not start auto negotiation "
+ "BMCR=0x%04x\n", hp->dev->name, hp->sw_bmcr);
+ printk(KERN_NOTICE "%s: Performing force link detection.\n",
+ hp->dev->name);
+ goto force_link;
+ } else {
+ hp->timer_state = arbwait;
+ }
+ } else {
+force_link:
+ /* Force the link up, trying first a particular mode.
+ * Either we are here at the request of ethtool or
+ * because the Happy Meal would not start to autoneg.
+ */
+
+ /* Disable auto-negotiation in BMCR, enable the duplex and
+ * speed setting, init the timer state machine, and fire it off.
+ */
+ if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
+ hp->sw_bmcr = BMCR_SPEED100;
+ } else {
+ if (ethtool_cmd_speed(ep) == SPEED_100)
+ hp->sw_bmcr = BMCR_SPEED100;
+ else
+ hp->sw_bmcr = 0;
+ if (ep->duplex == DUPLEX_FULL)
+ hp->sw_bmcr |= BMCR_FULLDPLX;
+ }
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+
+ if (!is_lucent_phy(hp)) {
+ /* OK, seems we need do disable the transceiver for the first
+ * tick to make sure we get an accurate link state at the
+ * second tick.
+ */
+ hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
+ DP83840_CSCONFIG);
+ hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
+ happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG,
+ hp->sw_csconfig);
+ }
+ hp->timer_state = ltrywait;
+ }
+
+ hp->timer_ticks = 0;
+ hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
+ hp->happy_timer.data = (unsigned long) hp;
+ hp->happy_timer.function = happy_meal_timer;
+ add_timer(&hp->happy_timer);
+}
+
+/* hp->happy_lock must be held */
+static int happy_meal_init(struct happy_meal *hp)
+{
+ void __iomem *gregs = hp->gregs;
+ void __iomem *etxregs = hp->etxregs;
+ void __iomem *erxregs = hp->erxregs;
+ void __iomem *bregs = hp->bigmacregs;
+ void __iomem *tregs = hp->tcvregs;
+ u32 regtmp, rxcfg;
+ unsigned char *e = &hp->dev->dev_addr[0];
+
+ /* If auto-negotiation timer is running, kill it. */
+ del_timer(&hp->happy_timer);
+
+ HMD(("happy_meal_init: happy_flags[%08x] ",
+ hp->happy_flags));
+ if (!(hp->happy_flags & HFLAG_INIT)) {
+ HMD(("set HFLAG_INIT, "));
+ hp->happy_flags |= HFLAG_INIT;
+ happy_meal_get_counters(hp, bregs);
+ }
+
+ /* Stop polling. */
+ HMD(("to happy_meal_poll_stop\n"));
+ happy_meal_poll_stop(hp, tregs);
+
+ /* Stop transmitter and receiver. */
+ HMD(("happy_meal_init: to happy_meal_stop\n"));
+ happy_meal_stop(hp, gregs);
+
+ /* Alloc and reset the tx/rx descriptor chains. */
+ HMD(("happy_meal_init: to happy_meal_init_rings\n"));
+ happy_meal_init_rings(hp);
+
+ /* Shut up the MIF. */
+ HMD(("happy_meal_init: Disable all MIF irqs (old[%08x]), ",
+ hme_read32(hp, tregs + TCVR_IMASK)));
+ hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
+
+ /* See if we can enable the MIF frame on this card to speak to the DP83840. */
+ if (hp->happy_flags & HFLAG_FENABLE) {
+ HMD(("use frame old[%08x], ",
+ hme_read32(hp, tregs + TCVR_CFG)));
+ hme_write32(hp, tregs + TCVR_CFG,
+ hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
+ } else {
+ HMD(("use bitbang old[%08x], ",
+ hme_read32(hp, tregs + TCVR_CFG)));
+ hme_write32(hp, tregs + TCVR_CFG,
+ hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
+ }
+
+ /* Check the state of the transceiver. */
+ HMD(("to happy_meal_transceiver_check\n"));
+ happy_meal_transceiver_check(hp, tregs);
+
+ /* Put the Big Mac into a sane state. */
+ HMD(("happy_meal_init: "));
+ switch(hp->tcvr_type) {
+ case none:
+ /* Cannot operate if we don't know the transceiver type! */
+ HMD(("AAIEEE no transceiver type, EAGAIN"));
+ return -EAGAIN;
+
+ case internal:
+ /* Using the MII buffers. */
+ HMD(("internal, using MII, "));
+ hme_write32(hp, bregs + BMAC_XIFCFG, 0);
+ break;
+
+ case external:
+ /* Not using the MII, disable it. */
+ HMD(("external, disable MII, "));
+ hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
+ break;
+ }
+
+ if (happy_meal_tcvr_reset(hp, tregs))
+ return -EAGAIN;
+
+ /* Reset the Happy Meal Big Mac transceiver and the receiver. */
+ HMD(("tx/rx reset, "));
+ happy_meal_tx_reset(hp, bregs);
+ happy_meal_rx_reset(hp, bregs);
+
+ /* Set jam size and inter-packet gaps to reasonable defaults. */
+ HMD(("jsize/ipg1/ipg2, "));
+ hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE);
+ hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1);
+ hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2);
+
+ /* Load up the MAC address and random seed. */
+ HMD(("rseed/macaddr, "));
+
+ /* The docs recommend to use the 10LSB of our MAC here. */
+ hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff));
+
+ hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5]));
+ hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3]));
+ hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1]));
+
+ HMD(("htable, "));
+ if ((hp->dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(hp->dev) > 64)) {
+ hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
+ hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
+ hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
+ hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
+ } else if ((hp->dev->flags & IFF_PROMISC) == 0) {
+ u16 hash_table[4];
+ struct netdev_hw_addr *ha;
+ u32 crc;
+
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(ha, hp->dev) {
+ crc = ether_crc_le(6, ha->addr);
+ crc >>= 26;
+ hash_table[crc >> 4] |= 1 << (crc & 0xf);
+ }
+ hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
+ hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
+ hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
+ hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
+ } else {
+ hme_write32(hp, bregs + BMAC_HTABLE3, 0);
+ hme_write32(hp, bregs + BMAC_HTABLE2, 0);
+ hme_write32(hp, bregs + BMAC_HTABLE1, 0);
+ hme_write32(hp, bregs + BMAC_HTABLE0, 0);
+ }
+
+ /* Set the RX and TX ring ptrs. */
+ HMD(("ring ptrs rxr[%08x] txr[%08x]\n",
+ ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
+ ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))));
+ hme_write32(hp, erxregs + ERX_RING,
+ ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)));
+ hme_write32(hp, etxregs + ETX_RING,
+ ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
+
+ /* Parity issues in the ERX unit of some HME revisions can cause some
+ * registers to not be written unless their parity is even. Detect such
+ * lost writes and simply rewrite with a low bit set (which will be ignored
+ * since the rxring needs to be 2K aligned).
+ */
+ if (hme_read32(hp, erxregs + ERX_RING) !=
+ ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)))
+ hme_write32(hp, erxregs + ERX_RING,
+ ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))
+ | 0x4);
+
+ /* Set the supported burst sizes. */
+ HMD(("happy_meal_init: old[%08x] bursts<",
+ hme_read32(hp, gregs + GREG_CFG)));
+
+#ifndef CONFIG_SPARC
+ /* It is always PCI and can handle 64byte bursts. */
+ hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64);
+#else
+ if ((hp->happy_bursts & DMA_BURST64) &&
+ ((hp->happy_flags & HFLAG_PCI) != 0
+#ifdef CONFIG_SBUS
+ || sbus_can_burst64()
+#endif
+ || 0)) {
+ u32 gcfg = GREG_CFG_BURST64;
+
+ /* I have no idea if I should set the extended
+ * transfer mode bit for Cheerio, so for now I
+ * do not. -DaveM
+ */
+#ifdef CONFIG_SBUS
+ if ((hp->happy_flags & HFLAG_PCI) == 0) {
+ struct platform_device *op = hp->happy_dev;
+ if (sbus_can_dma_64bit()) {
+ sbus_set_sbus64(&op->dev,
+ hp->happy_bursts);
+ gcfg |= GREG_CFG_64BIT;
+ }
+ }
+#endif
+
+ HMD(("64>"));
+ hme_write32(hp, gregs + GREG_CFG, gcfg);
+ } else if (hp->happy_bursts & DMA_BURST32) {
+ HMD(("32>"));
+ hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32);
+ } else if (hp->happy_bursts & DMA_BURST16) {
+ HMD(("16>"));
+ hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16);
+ } else {
+ HMD(("XXX>"));
+ hme_write32(hp, gregs + GREG_CFG, 0);
+ }
+#endif /* CONFIG_SPARC */
+
+ /* Turn off interrupts we do not want to hear. */
+ HMD((", enable global interrupts, "));
+ hme_write32(hp, gregs + GREG_IMASK,
+ (GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP |
+ GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR));
+
+ /* Set the transmit ring buffer size. */
+ HMD(("tx rsize=%d oreg[%08x], ", (int)TX_RING_SIZE,
+ hme_read32(hp, etxregs + ETX_RSIZE)));
+ hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1);
+
+ /* Enable transmitter DVMA. */
+ HMD(("tx dma enable old[%08x], ",
+ hme_read32(hp, etxregs + ETX_CFG)));
+ hme_write32(hp, etxregs + ETX_CFG,
+ hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE);
+
+ /* This chip really rots, for the receiver sometimes when you
+ * write to its control registers not all the bits get there
+ * properly. I cannot think of a sane way to provide complete
+ * coverage for this hardware bug yet.
+ */
+ HMD(("erx regs bug old[%08x]\n",
+ hme_read32(hp, erxregs + ERX_CFG)));
+ hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
+ regtmp = hme_read32(hp, erxregs + ERX_CFG);
+ hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
+ if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) {
+ printk(KERN_ERR "happy meal: Eieee, rx config register gets greasy fries.\n");
+ printk(KERN_ERR "happy meal: Trying to set %08x, reread gives %08x\n",
+ ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
+ /* XXX Should return failure here... */
+ }
+
+ /* Enable Big Mac hash table filter. */
+ HMD(("happy_meal_init: enable hash rx_cfg_old[%08x], ",
+ hme_read32(hp, bregs + BMAC_RXCFG)));
+ rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME;
+ if (hp->dev->flags & IFF_PROMISC)
+ rxcfg |= BIGMAC_RXCFG_PMISC;
+ hme_write32(hp, bregs + BMAC_RXCFG, rxcfg);
+
+ /* Let the bits settle in the chip. */
+ udelay(10);
+
+ /* Ok, configure the Big Mac transmitter. */
+ HMD(("BIGMAC init, "));
+ regtmp = 0;
+ if (hp->happy_flags & HFLAG_FULL)
+ regtmp |= BIGMAC_TXCFG_FULLDPLX;
+
+ /* Don't turn on the "don't give up" bit for now. It could cause hme
+ * to deadlock with the PHY if a Jabber occurs.
+ */
+ hme_write32(hp, bregs + BMAC_TXCFG, regtmp /*| BIGMAC_TXCFG_DGIVEUP*/);
+
+ /* Give up after 16 TX attempts. */
+ hme_write32(hp, bregs + BMAC_ALIMIT, 16);
+
+ /* Enable the output drivers no matter what. */
+ regtmp = BIGMAC_XCFG_ODENABLE;
+
+ /* If card can do lance mode, enable it. */
+ if (hp->happy_flags & HFLAG_LANCE)
+ regtmp |= (DEFAULT_IPG0 << 5) | BIGMAC_XCFG_LANCE;
+
+ /* Disable the MII buffers if using external transceiver. */
+ if (hp->tcvr_type == external)
+ regtmp |= BIGMAC_XCFG_MIIDISAB;
+
+ HMD(("XIF config old[%08x], ",
+ hme_read32(hp, bregs + BMAC_XIFCFG)));
+ hme_write32(hp, bregs + BMAC_XIFCFG, regtmp);
+
+ /* Start things up. */
+ HMD(("tx old[%08x] and rx [%08x] ON!\n",
+ hme_read32(hp, bregs + BMAC_TXCFG),
+ hme_read32(hp, bregs + BMAC_RXCFG)));
+
+ /* Set larger TX/RX size to allow for 802.1q */
+ hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8);
+ hme_write32(hp, bregs + BMAC_RXMAX, ETH_FRAME_LEN + 8);
+
+ hme_write32(hp, bregs + BMAC_TXCFG,
+ hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE);
+ hme_write32(hp, bregs + BMAC_RXCFG,
+ hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE);
+
+ /* Get the autonegotiation started, and the watch timer ticking. */
+ happy_meal_begin_auto_negotiation(hp, tregs, NULL);
+
+ /* Success. */
+ return 0;
+}
+
+/* hp->happy_lock must be held */
+static void happy_meal_set_initial_advertisement(struct happy_meal *hp)
+{
+ void __iomem *tregs = hp->tcvregs;
+ void __iomem *bregs = hp->bigmacregs;
+ void __iomem *gregs = hp->gregs;
+
+ happy_meal_stop(hp, gregs);
+ hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
+ if (hp->happy_flags & HFLAG_FENABLE)
+ hme_write32(hp, tregs + TCVR_CFG,
+ hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
+ else
+ hme_write32(hp, tregs + TCVR_CFG,
+ hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
+ happy_meal_transceiver_check(hp, tregs);
+ switch(hp->tcvr_type) {
+ case none:
+ return;
+ case internal:
+ hme_write32(hp, bregs + BMAC_XIFCFG, 0);
+ break;
+ case external:
+ hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
+ break;
+ }
+ if (happy_meal_tcvr_reset(hp, tregs))
+ return;
+
+ /* Latch PHY registers as of now. */
+ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
+ hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
+
+ /* Advertise everything we can support. */
+ if (hp->sw_bmsr & BMSR_10HALF)
+ hp->sw_advertise |= (ADVERTISE_10HALF);
+ else
+ hp->sw_advertise &= ~(ADVERTISE_10HALF);
+
+ if (hp->sw_bmsr & BMSR_10FULL)
+ hp->sw_advertise |= (ADVERTISE_10FULL);
+ else
+ hp->sw_advertise &= ~(ADVERTISE_10FULL);
+ if (hp->sw_bmsr & BMSR_100HALF)
+ hp->sw_advertise |= (ADVERTISE_100HALF);
+ else
+ hp->sw_advertise &= ~(ADVERTISE_100HALF);
+ if (hp->sw_bmsr & BMSR_100FULL)
+ hp->sw_advertise |= (ADVERTISE_100FULL);
+ else
+ hp->sw_advertise &= ~(ADVERTISE_100FULL);
+
+ /* Update the PHY advertisement register. */
+ happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
+}
+
+/* Once status is latched (by happy_meal_interrupt) it is cleared by
+ * the hardware, so we cannot re-read it and get a correct value.
+ *
+ * hp->happy_lock must be held
+ */
+static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
+{
+ int reset = 0;
+
+ /* Only print messages for non-counter related interrupts. */
+ if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND |
+ GREG_STAT_MAXPKTERR | GREG_STAT_RXERR |
+ GREG_STAT_RXPERR | GREG_STAT_RXTERR | GREG_STAT_EOPERR |
+ GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR |
+ GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR |
+ GREG_STAT_SLVPERR))
+ printk(KERN_ERR "%s: Error interrupt for happy meal, status = %08x\n",
+ hp->dev->name, status);
+
+ if (status & GREG_STAT_RFIFOVF) {
+ /* Receive FIFO overflow is harmless and the hardware will take
+ care of it, just some packets are lost. Who cares. */
+ printk(KERN_DEBUG "%s: Happy Meal receive FIFO overflow.\n", hp->dev->name);
+ }
+
+ if (status & GREG_STAT_STSTERR) {
+ /* BigMAC SQE link test failed. */
+ printk(KERN_ERR "%s: Happy Meal BigMAC SQE test failed.\n", hp->dev->name);
+ reset = 1;
+ }
+
+ if (status & GREG_STAT_TFIFO_UND) {
+ /* Transmit FIFO underrun, again DMA error likely. */
+ printk(KERN_ERR "%s: Happy Meal transmitter FIFO underrun, DMA error.\n",
+ hp->dev->name);
+ reset = 1;
+ }
+
+ if (status & GREG_STAT_MAXPKTERR) {
+ /* Driver error, tried to transmit something larger
+ * than ethernet max mtu.
+ */
+ printk(KERN_ERR "%s: Happy Meal MAX Packet size error.\n", hp->dev->name);
+ reset = 1;
+ }
+
+ if (status & GREG_STAT_NORXD) {
+ /* This is harmless, it just means the system is
+ * quite loaded and the incoming packet rate was
+ * faster than the interrupt handler could keep up
+ * with.
+ */
+ printk(KERN_INFO "%s: Happy Meal out of receive "
+ "descriptors, packet dropped.\n",
+ hp->dev->name);
+ }
+
+ if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) {
+ /* All sorts of DMA receive errors. */
+ printk(KERN_ERR "%s: Happy Meal rx DMA errors [ ", hp->dev->name);
+ if (status & GREG_STAT_RXERR)
+ printk("GenericError ");
+ if (status & GREG_STAT_RXPERR)
+ printk("ParityError ");
+ if (status & GREG_STAT_RXTERR)
+ printk("RxTagBotch ");
+ printk("]\n");
+ reset = 1;
+ }
+
+ if (status & GREG_STAT_EOPERR) {
+ /* Driver bug, didn't set EOP bit in tx descriptor given
+ * to the happy meal.
+ */
+ printk(KERN_ERR "%s: EOP not set in happy meal transmit descriptor!\n",
+ hp->dev->name);
+ reset = 1;
+ }
+
+ if (status & GREG_STAT_MIFIRQ) {
+ /* MIF signalled an interrupt, were we polling it? */
+ printk(KERN_ERR "%s: Happy Meal MIF interrupt.\n", hp->dev->name);
+ }
+
+ if (status &
+ (GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) {
+ /* All sorts of transmit DMA errors. */
+ printk(KERN_ERR "%s: Happy Meal tx DMA errors [ ", hp->dev->name);
+ if (status & GREG_STAT_TXEACK)
+ printk("GenericError ");
+ if (status & GREG_STAT_TXLERR)
+ printk("LateError ");
+ if (status & GREG_STAT_TXPERR)
+ printk("ParityErro ");
+ if (status & GREG_STAT_TXTERR)
+ printk("TagBotch ");
+ printk("]\n");
+ reset = 1;
+ }
+
+ if (status & (GREG_STAT_SLVERR|GREG_STAT_SLVPERR)) {
+ /* Bus or parity error when cpu accessed happy meal registers
+ * or it's internal FIFO's. Should never see this.
+ */
+ printk(KERN_ERR "%s: Happy Meal register access SBUS slave (%s) error.\n",
+ hp->dev->name,
+ (status & GREG_STAT_SLVPERR) ? "parity" : "generic");
+ reset = 1;
+ }
+
+ if (reset) {
+ printk(KERN_NOTICE "%s: Resetting...\n", hp->dev->name);
+ happy_meal_init(hp);
+ return 1;
+ }
+ return 0;
+}
+
+/* hp->happy_lock must be held */
+static void happy_meal_mif_interrupt(struct happy_meal *hp)
+{
+ void __iomem *tregs = hp->tcvregs;
+
+ printk(KERN_INFO "%s: Link status change.\n", hp->dev->name);
+ hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+ hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
+
+ /* Use the fastest transmission protocol possible. */
+ if (hp->sw_lpa & LPA_100FULL) {
+ printk(KERN_INFO "%s: Switching to 100Mbps at full duplex.", hp->dev->name);
+ hp->sw_bmcr |= (BMCR_FULLDPLX | BMCR_SPEED100);
+ } else if (hp->sw_lpa & LPA_100HALF) {
+ printk(KERN_INFO "%s: Switching to 100MBps at half duplex.", hp->dev->name);
+ hp->sw_bmcr |= BMCR_SPEED100;
+ } else if (hp->sw_lpa & LPA_10FULL) {
+ printk(KERN_INFO "%s: Switching to 10MBps at full duplex.", hp->dev->name);
+ hp->sw_bmcr |= BMCR_FULLDPLX;
+ } else {
+ printk(KERN_INFO "%s: Using 10Mbps at half duplex.", hp->dev->name);
+ }
+ happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+
+ /* Finally stop polling and shut up the MIF. */
+ happy_meal_poll_stop(hp, tregs);
+}
+
+#ifdef TXDEBUG
+#define TXD(x) printk x
+#else
+#define TXD(x)
+#endif
+
+/* hp->happy_lock must be held */
+static void happy_meal_tx(struct happy_meal *hp)
+{
+ struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
+ struct happy_meal_txd *this;
+ struct net_device *dev = hp->dev;
+ int elem;
+
+ elem = hp->tx_old;
+ TXD(("TX<"));
+ while (elem != hp->tx_new) {
+ struct sk_buff *skb;
+ u32 flags, dma_addr, dma_len;
+ int frag;
+
+ TXD(("[%d]", elem));
+ this = &txbase[elem];
+ flags = hme_read_desc32(hp, &this->tx_flags);
+ if (flags & TXFLAG_OWN)
+ break;
+ skb = hp->tx_skbs[elem];
+ if (skb_shinfo(skb)->nr_frags) {
+ int last;
+
+ last = elem + skb_shinfo(skb)->nr_frags;
+ last &= (TX_RING_SIZE - 1);
+ flags = hme_read_desc32(hp, &txbase[last].tx_flags);
+ if (flags & TXFLAG_OWN)
+ break;
+ }
+ hp->tx_skbs[elem] = NULL;
+ hp->net_stats.tx_bytes += skb->len;
+
+ for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
+ dma_addr = hme_read_desc32(hp, &this->tx_addr);
+ dma_len = hme_read_desc32(hp, &this->tx_flags);
+
+ dma_len &= TXFLAG_SIZE;
+ if (!frag)
+ dma_unmap_single(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
+ else
+ dma_unmap_page(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
+
+ elem = NEXT_TX(elem);
+ this = &txbase[elem];
+ }
+
+ dev_kfree_skb_irq(skb);
+ hp->net_stats.tx_packets++;
+ }
+ hp->tx_old = elem;
+ TXD((">"));
+
+ if (netif_queue_stopped(dev) &&
+ TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1))
+ netif_wake_queue(dev);
+}
+
+#ifdef RXDEBUG
+#define RXD(x) printk x
+#else
+#define RXD(x)
+#endif
+
+/* Originally I used to handle the allocation failure by just giving back just
+ * that one ring buffer to the happy meal. Problem is that usually when that
+ * condition is triggered, the happy meal expects you to do something reasonable
+ * with all of the packets it has DMA'd in. So now I just drop the entire
+ * ring when we cannot get a new skb and give them all back to the happy meal,
+ * maybe things will be "happier" now.
+ *
+ * hp->happy_lock must be held
+ */
+static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
+{
+ struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0];
+ struct happy_meal_rxd *this;
+ int elem = hp->rx_new, drops = 0;
+ u32 flags;
+
+ RXD(("RX<"));
+ this = &rxbase[elem];
+ while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) {
+ struct sk_buff *skb;
+ int len = flags >> 16;
+ u16 csum = flags & RXFLAG_CSUM;
+ u32 dma_addr = hme_read_desc32(hp, &this->rx_addr);
+
+ RXD(("[%d ", elem));
+
+ /* Check for errors. */
+ if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
+ RXD(("ERR(%08x)]", flags));
+ hp->net_stats.rx_errors++;
+ if (len < ETH_ZLEN)
+ hp->net_stats.rx_length_errors++;
+ if (len & (RXFLAG_OVERFLOW >> 16)) {
+ hp->net_stats.rx_over_errors++;
+ hp->net_stats.rx_fifo_errors++;
+ }
+
+ /* Return it to the Happy meal. */
+ drop_it:
+ hp->net_stats.rx_dropped++;
+ hme_write_rxd(hp, this,
+ (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
+ dma_addr);
+ goto next;
+ }
+ skb = hp->rx_skbs[elem];
+ if (len > RX_COPY_THRESHOLD) {
+ struct sk_buff *new_skb;
+
+ /* Now refill the entry, if we can. */
+ new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
+ if (new_skb == NULL) {
+ drops++;
+ goto drop_it;
+ }
+ dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
+ hp->rx_skbs[elem] = new_skb;
+ new_skb->dev = dev;
+ skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
+ hme_write_rxd(hp, this,
+ (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
+ dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE,
+ DMA_FROM_DEVICE));
+ skb_reserve(new_skb, RX_OFFSET);
+
+ /* Trim the original skb for the netif. */
+ skb_trim(skb, len);
+ } else {
+ struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
+
+ if (copy_skb == NULL) {
+ drops++;
+ goto drop_it;
+ }
+
+ skb_reserve(copy_skb, 2);
+ skb_put(copy_skb, len);
+ dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
+ skb_copy_from_linear_data(skb, copy_skb->data, len);
+ dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
+ /* Reuse original ring buffer. */
+ hme_write_rxd(hp, this,
+ (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
+ dma_addr);
+
+ skb = copy_skb;
+ }
+
+ /* This card is _fucking_ hot... */
+ skb->csum = csum_unfold(~(__force __sum16)htons(csum));
+ skb->ip_summed = CHECKSUM_COMPLETE;
+
+ RXD(("len=%d csum=%4x]", len, csum));
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+
+ hp->net_stats.rx_packets++;
+ hp->net_stats.rx_bytes += len;
+ next:
+ elem = NEXT_RX(elem);
+ this = &rxbase[elem];
+ }
+ hp->rx_new = elem;
+ if (drops)
+ printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", hp->dev->name);
+ RXD((">"));
+}
+
+static irqreturn_t happy_meal_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct happy_meal *hp = netdev_priv(dev);
+ u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
+
+ HMD(("happy_meal_interrupt: status=%08x ", happy_status));
+
+ spin_lock(&hp->happy_lock);
+
+ if (happy_status & GREG_STAT_ERRORS) {
+ HMD(("ERRORS "));
+ if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status))
+ goto out;
+ }
+
+ if (happy_status & GREG_STAT_MIFIRQ) {
+ HMD(("MIFIRQ "));
+ happy_meal_mif_interrupt(hp);
+ }
+
+ if (happy_status & GREG_STAT_TXALL) {
+ HMD(("TXALL "));
+ happy_meal_tx(hp);
+ }
+
+ if (happy_status & GREG_STAT_RXTOHOST) {
+ HMD(("RXTOHOST "));
+ happy_meal_rx(hp, dev);
+ }
+
+ HMD(("done\n"));
+out:
+ spin_unlock(&hp->happy_lock);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_SBUS
+static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie)
+{
+ struct quattro *qp = (struct quattro *) cookie;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ struct net_device *dev = qp->happy_meals[i];
+ struct happy_meal *hp = netdev_priv(dev);
+ u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
+
+ HMD(("quattro_interrupt: status=%08x ", happy_status));
+
+ if (!(happy_status & (GREG_STAT_ERRORS |
+ GREG_STAT_MIFIRQ |
+ GREG_STAT_TXALL |
+ GREG_STAT_RXTOHOST)))
+ continue;
+
+ spin_lock(&hp->happy_lock);
+
+ if (happy_status & GREG_STAT_ERRORS) {
+ HMD(("ERRORS "));
+ if (happy_meal_is_not_so_happy(hp, happy_status))
+ goto next;
+ }
+
+ if (happy_status & GREG_STAT_MIFIRQ) {
+ HMD(("MIFIRQ "));
+ happy_meal_mif_interrupt(hp);
+ }
+
+ if (happy_status & GREG_STAT_TXALL) {
+ HMD(("TXALL "));
+ happy_meal_tx(hp);
+ }
+
+ if (happy_status & GREG_STAT_RXTOHOST) {
+ HMD(("RXTOHOST "));
+ happy_meal_rx(hp, dev);
+ }
+
+ next:
+ spin_unlock(&hp->happy_lock);
+ }
+ HMD(("done\n"));
+
+ return IRQ_HANDLED;
+}
+#endif
+
+static int happy_meal_open(struct net_device *dev)
+{
+ struct happy_meal *hp = netdev_priv(dev);
+ int res;
+
+ HMD(("happy_meal_open: "));
+
+ /* On SBUS Quattro QFE cards, all hme interrupts are concentrated
+ * into a single source which we register handling at probe time.
+ */
+ if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
+ if (request_irq(dev->irq, happy_meal_interrupt,
+ IRQF_SHARED, dev->name, (void *)dev)) {
+ HMD(("EAGAIN\n"));
+ printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
+ dev->irq);
+
+ return -EAGAIN;
+ }
+ }
+
+ HMD(("to happy_meal_init\n"));
+
+ spin_lock_irq(&hp->happy_lock);
+ res = happy_meal_init(hp);
+ spin_unlock_irq(&hp->happy_lock);
+
+ if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO))
+ free_irq(dev->irq, dev);
+ return res;
+}
+
+static int happy_meal_close(struct net_device *dev)
+{
+ struct happy_meal *hp = netdev_priv(dev);
+
+ spin_lock_irq(&hp->happy_lock);
+ happy_meal_stop(hp, hp->gregs);
+ happy_meal_clean_rings(hp);
+
+ /* If auto-negotiation timer is running, kill it. */
+ del_timer(&hp->happy_timer);
+
+ spin_unlock_irq(&hp->happy_lock);
+
+ /* On Quattro QFE cards, all hme interrupts are concentrated
+ * into a single source which we register handling at probe
+ * time and never unregister.
+ */
+ if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)
+ free_irq(dev->irq, dev);
+
+ return 0;
+}
+
+#ifdef SXDEBUG
+#define SXD(x) printk x
+#else
+#define SXD(x)
+#endif
+
+static void happy_meal_tx_timeout(struct net_device *dev)
+{
+ struct happy_meal *hp = netdev_priv(dev);
+
+ printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+ tx_dump_log();
+ printk (KERN_ERR "%s: Happy Status %08x TX[%08x:%08x]\n", dev->name,
+ hme_read32(hp, hp->gregs + GREG_STAT),
+ hme_read32(hp, hp->etxregs + ETX_CFG),
+ hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
+
+ spin_lock_irq(&hp->happy_lock);
+ happy_meal_init(hp);
+ spin_unlock_irq(&hp->happy_lock);
+
+ netif_wake_queue(dev);
+}
+
+static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct happy_meal *hp = netdev_priv(dev);
+ int entry;
+ u32 tx_flags;
+
+ tx_flags = TXFLAG_OWN;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ const u32 csum_start_off = skb_checksum_start_offset(skb);
+ const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
+
+ tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
+ ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) |
+ ((csum_stuff_off << 20) & TXFLAG_CSLOCATION));
+ }
+
+ spin_lock_irq(&hp->happy_lock);
+
+ if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
+ netif_stop_queue(dev);
+ spin_unlock_irq(&hp->happy_lock);
+ printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
+ dev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = hp->tx_new;
+ SXD(("SX<l[%d]e[%d]>", len, entry));
+ hp->tx_skbs[entry] = skb;
+
+ if (skb_shinfo(skb)->nr_frags == 0) {
+ u32 mapping, len;
+
+ len = skb->len;
+ mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
+ tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
+ hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
+ (tx_flags | (len & TXFLAG_SIZE)),
+ mapping);
+ entry = NEXT_TX(entry);
+ } else {
+ u32 first_len, first_mapping;
+ int frag, first_entry = entry;
+
+ /* We must give this initial chunk to the device last.
+ * Otherwise we could race with the device.
+ */
+ first_len = skb_headlen(skb);
+ first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
+ DMA_TO_DEVICE);
+ entry = NEXT_TX(entry);
+
+ for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
+ u32 len, mapping, this_txflags;
+
+ len = this_frag->size;
+ mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
+ 0, len, DMA_TO_DEVICE);
+ this_txflags = tx_flags;
+ if (frag == skb_shinfo(skb)->nr_frags - 1)
+ this_txflags |= TXFLAG_EOP;
+ hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
+ (this_txflags | (len & TXFLAG_SIZE)),
+ mapping);
+ entry = NEXT_TX(entry);
+ }
+ hme_write_txd(hp, &hp->happy_block->happy_meal_txd[first_entry],
+ (tx_flags | TXFLAG_SOP | (first_len & TXFLAG_SIZE)),
+ first_mapping);
+ }
+
+ hp->tx_new = entry;
+
+ if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1))
+ netif_stop_queue(dev);
+
+ /* Get it going. */
+ hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP);
+
+ spin_unlock_irq(&hp->happy_lock);
+
+ tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
+ return NETDEV_TX_OK;
+}
+
+static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
+{
+ struct happy_meal *hp = netdev_priv(dev);
+
+ spin_lock_irq(&hp->happy_lock);
+ happy_meal_get_counters(hp, hp->bigmacregs);
+ spin_unlock_irq(&hp->happy_lock);
+
+ return &hp->net_stats;
+}
+
+static void happy_meal_set_multicast(struct net_device *dev)
+{
+ struct happy_meal *hp = netdev_priv(dev);
+ void __iomem *bregs = hp->bigmacregs;
+ struct netdev_hw_addr *ha;
+ u32 crc;
+
+ spin_lock_irq(&hp->happy_lock);
+
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
+ hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
+ hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
+ hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
+ hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
+ } else if (dev->flags & IFF_PROMISC) {
+ hme_write32(hp, bregs + BMAC_RXCFG,
+ hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_PMISC);
+ } else {
+ u16 hash_table[4];
+
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
+ crc >>= 26;
+ hash_table[crc >> 4] |= 1 << (crc & 0xf);
+ }
+ hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
+ hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
+ hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
+ hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
+ }
+
+ spin_unlock_irq(&hp->happy_lock);
+}
+
+/* Ethtool support... */
+static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct happy_meal *hp = netdev_priv(dev);
+ u32 speed;
+
+ cmd->supported =
+ (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
+
+ /* XXX hardcoded stuff for now */
+ cmd->port = PORT_TP; /* XXX no MII support */
+ cmd->transceiver = XCVR_INTERNAL; /* XXX no external xcvr support */
+ cmd->phy_address = 0; /* XXX fixed PHYAD */
+
+ /* Record PHY settings. */
+ spin_lock_irq(&hp->happy_lock);
+ hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
+ hp->sw_lpa = happy_meal_tcvr_read(hp, hp->tcvregs, MII_LPA);
+ spin_unlock_irq(&hp->happy_lock);
+
+ if (hp->sw_bmcr & BMCR_ANENABLE) {
+ cmd->autoneg = AUTONEG_ENABLE;
+ speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
+ SPEED_100 : SPEED_10);
+ if (speed == SPEED_100)
+ cmd->duplex =
+ (hp->sw_lpa & (LPA_100FULL)) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ else
+ cmd->duplex =
+ (hp->sw_lpa & (LPA_10FULL)) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ } else {
+ cmd->autoneg = AUTONEG_DISABLE;
+ speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
+ cmd->duplex =
+ (hp->sw_bmcr & BMCR_FULLDPLX) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ }
+ ethtool_cmd_speed_set(cmd, speed);
+ return 0;
+}
+
+static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct happy_meal *hp = netdev_priv(dev);
+
+ /* Verify the settings we care about. */
+ if (cmd->autoneg != AUTONEG_ENABLE &&
+ cmd->autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+ if (cmd->autoneg == AUTONEG_DISABLE &&
+ ((ethtool_cmd_speed(cmd) != SPEED_100 &&
+ ethtool_cmd_speed(cmd) != SPEED_10) ||
+ (cmd->duplex != DUPLEX_HALF &&
+ cmd->duplex != DUPLEX_FULL)))
+ return -EINVAL;
+
+ /* Ok, do it to it. */
+ spin_lock_irq(&hp->happy_lock);
+ del_timer(&hp->happy_timer);
+ happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd);
+ spin_unlock_irq(&hp->happy_lock);
+
+ return 0;
+}
+
+static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct happy_meal *hp = netdev_priv(dev);
+
+ strcpy(info->driver, "sunhme");
+ strcpy(info->version, "2.02");
+ if (hp->happy_flags & HFLAG_PCI) {
+ struct pci_dev *pdev = hp->happy_dev;
+ strcpy(info->bus_info, pci_name(pdev));
+ }
+#ifdef CONFIG_SBUS
+ else {
+ const struct linux_prom_registers *regs;
+ struct platform_device *op = hp->happy_dev;
+ regs = of_get_property(op->dev.of_node, "regs", NULL);
+ if (regs)
+ sprintf(info->bus_info, "SBUS:%d",
+ regs->which_io);
+ }
+#endif
+}
+
+static u32 hme_get_link(struct net_device *dev)
+{
+ struct happy_meal *hp = netdev_priv(dev);
+
+ spin_lock_irq(&hp->happy_lock);
+ hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
+ spin_unlock_irq(&hp->happy_lock);
+
+ return hp->sw_bmsr & BMSR_LSTATUS;
+}
+
+static const struct ethtool_ops hme_ethtool_ops = {
+ .get_settings = hme_get_settings,
+ .set_settings = hme_set_settings,
+ .get_drvinfo = hme_get_drvinfo,
+ .get_link = hme_get_link,
+};
+
+static int hme_version_printed;
+
+#ifdef CONFIG_SBUS
+/* Given a happy meal sbus device, find it's quattro parent.
+ * If none exist, allocate and return a new one.
+ *
+ * Return NULL on failure.
+ */
+static struct quattro * __devinit quattro_sbus_find(struct platform_device *child)
+{
+ struct device *parent = child->dev.parent;
+ struct platform_device *op;
+ struct quattro *qp;
+
+ op = to_platform_device(parent);
+ qp = dev_get_drvdata(&op->dev);
+ if (qp)
+ return qp;
+
+ qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
+ if (qp != NULL) {
+ int i;
+
+ for (i = 0; i < 4; i++)
+ qp->happy_meals[i] = NULL;
+
+ qp->quattro_dev = child;
+ qp->next = qfe_sbus_list;
+ qfe_sbus_list = qp;
+
+ dev_set_drvdata(&op->dev, qp);
+ }
+ return qp;
+}
+
+/* After all quattro cards have been probed, we call these functions
+ * to register the IRQ handlers for the cards that have been
+ * successfully probed and skip the cards that failed to initialize
+ */
+static int __init quattro_sbus_register_irqs(void)
+{
+ struct quattro *qp;
+
+ for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
+ struct platform_device *op = qp->quattro_dev;
+ int err, qfe_slot, skip = 0;
+
+ for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
+ if (!qp->happy_meals[qfe_slot])
+ skip = 1;
+ }
+ if (skip)
+ continue;
+
+ err = request_irq(op->archdata.irqs[0],
+ quattro_sbus_interrupt,
+ IRQF_SHARED, "Quattro",
+ qp);
+ if (err != 0) {
+ printk(KERN_ERR "Quattro HME: IRQ registration "
+ "error %d.\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void quattro_sbus_free_irqs(void)
+{
+ struct quattro *qp;
+
+ for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
+ struct platform_device *op = qp->quattro_dev;
+ int qfe_slot, skip = 0;
+
+ for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
+ if (!qp->happy_meals[qfe_slot])
+ skip = 1;
+ }
+ if (skip)
+ continue;
+
+ free_irq(op->archdata.irqs[0], qp);
+ }
+}
+#endif /* CONFIG_SBUS */
+
+#ifdef CONFIG_PCI
+static struct quattro * __devinit quattro_pci_find(struct pci_dev *pdev)
+{
+ struct pci_dev *bdev = pdev->bus->self;
+ struct quattro *qp;
+
+ if (!bdev) return NULL;
+ for (qp = qfe_pci_list; qp != NULL; qp = qp->next) {
+ struct pci_dev *qpdev = qp->quattro_dev;
+
+ if (qpdev == bdev)
+ return qp;
+ }
+ qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
+ if (qp != NULL) {
+ int i;
+
+ for (i = 0; i < 4; i++)
+ qp->happy_meals[i] = NULL;
+
+ qp->quattro_dev = bdev;
+ qp->next = qfe_pci_list;
+ qfe_pci_list = qp;
+
+ /* No range tricks necessary on PCI. */
+ qp->nranges = 0;
+ }
+ return qp;
+}
+#endif /* CONFIG_PCI */
+
+static const struct net_device_ops hme_netdev_ops = {
+ .ndo_open = happy_meal_open,
+ .ndo_stop = happy_meal_close,
+ .ndo_start_xmit = happy_meal_start_xmit,
+ .ndo_tx_timeout = happy_meal_tx_timeout,
+ .ndo_get_stats = happy_meal_get_stats,
+ .ndo_set_rx_mode = happy_meal_set_multicast,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+#ifdef CONFIG_SBUS
+static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
+{
+ struct device_node *dp = op->dev.of_node, *sbus_dp;
+ struct quattro *qp = NULL;
+ struct happy_meal *hp;
+ struct net_device *dev;
+ int i, qfe_slot = -1;
+ int err = -ENODEV;
+
+ sbus_dp = op->dev.parent->of_node;
+
+ /* We can match PCI devices too, do not accept those here. */
+ if (strcmp(sbus_dp->name, "sbus"))
+ return err;
+
+ if (is_qfe) {
+ qp = quattro_sbus_find(op);
+ if (qp == NULL)
+ goto err_out;
+ for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
+ if (qp->happy_meals[qfe_slot] == NULL)
+ break;
+ if (qfe_slot == 4)
+ goto err_out;
+ }
+
+ err = -ENOMEM;
+ dev = alloc_etherdev(sizeof(struct happy_meal));
+ if (!dev)
+ goto err_out;
+ SET_NETDEV_DEV(dev, &op->dev);
+
+ if (hme_version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+
+ /* If user did not specify a MAC address specifically, use
+ * the Quattro local-mac-address property...
+ */
+ for (i = 0; i < 6; i++) {
+ if (macaddr[i] != 0)
+ break;
+ }
+ if (i < 6) { /* a mac address was given */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = macaddr[i];
+ macaddr[5]++;
+ } else {
+ const unsigned char *addr;
+ int len;
+
+ addr = of_get_property(dp, "local-mac-address", &len);
+
+ if (qfe_slot != -1 && addr && len == 6)
+ memcpy(dev->dev_addr, addr, 6);
+ else
+ memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+ }
+
+ hp = netdev_priv(dev);
+
+ hp->happy_dev = op;
+ hp->dma_dev = &op->dev;
+
+ spin_lock_init(&hp->happy_lock);
+
+ err = -ENODEV;
+ if (qp != NULL) {
+ hp->qfe_parent = qp;
+ hp->qfe_ent = qfe_slot;
+ qp->happy_meals[qfe_slot] = dev;
+ }
+
+ hp->gregs = of_ioremap(&op->resource[0], 0,
+ GREG_REG_SIZE, "HME Global Regs");
+ if (!hp->gregs) {
+ printk(KERN_ERR "happymeal: Cannot map global registers.\n");
+ goto err_out_free_netdev;
+ }
+
+ hp->etxregs = of_ioremap(&op->resource[1], 0,
+ ETX_REG_SIZE, "HME TX Regs");
+ if (!hp->etxregs) {
+ printk(KERN_ERR "happymeal: Cannot map MAC TX registers.\n");
+ goto err_out_iounmap;
+ }
+
+ hp->erxregs = of_ioremap(&op->resource[2], 0,
+ ERX_REG_SIZE, "HME RX Regs");
+ if (!hp->erxregs) {
+ printk(KERN_ERR "happymeal: Cannot map MAC RX registers.\n");
+ goto err_out_iounmap;
+ }
+
+ hp->bigmacregs = of_ioremap(&op->resource[3], 0,
+ BMAC_REG_SIZE, "HME BIGMAC Regs");
+ if (!hp->bigmacregs) {
+ printk(KERN_ERR "happymeal: Cannot map BIGMAC registers.\n");
+ goto err_out_iounmap;
+ }
+
+ hp->tcvregs = of_ioremap(&op->resource[4], 0,
+ TCVR_REG_SIZE, "HME Tranceiver Regs");
+ if (!hp->tcvregs) {
+ printk(KERN_ERR "happymeal: Cannot map TCVR registers.\n");
+ goto err_out_iounmap;
+ }
+
+ hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
+ if (hp->hm_revision == 0xff)
+ hp->hm_revision = 0xa0;
+
+ /* Now enable the feature flags we can. */
+ if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
+ hp->happy_flags = HFLAG_20_21;
+ else if (hp->hm_revision != 0xa0)
+ hp->happy_flags = HFLAG_NOT_A0;
+
+ if (qp != NULL)
+ hp->happy_flags |= HFLAG_QUATTRO;
+
+ /* Get the supported DVMA burst sizes from our Happy SBUS. */
+ hp->happy_bursts = of_getintprop_default(sbus_dp,
+ "burst-sizes", 0x00);
+
+ hp->happy_block = dma_alloc_coherent(hp->dma_dev,
+ PAGE_SIZE,
+ &hp->hblock_dvma,
+ GFP_ATOMIC);
+ err = -ENOMEM;
+ if (!hp->happy_block) {
+ printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n");
+ goto err_out_iounmap;
+ }
+
+ /* Force check of the link first time we are brought up. */
+ hp->linkcheck = 0;
+
+ /* Force timer state to 'asleep' with count of zero. */
+ hp->timer_state = asleep;
+ hp->timer_ticks = 0;
+
+ init_timer(&hp->happy_timer);
+
+ hp->dev = dev;
+ dev->netdev_ops = &hme_netdev_ops;
+ dev->watchdog_timeo = 5*HZ;
+ dev->ethtool_ops = &hme_ethtool_ops;
+
+ /* Happy Meal can do it all... */
+ dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= dev->hw_features | NETIF_F_RXCSUM;
+
+ dev->irq = op->archdata.irqs[0];
+
+#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
+ /* Hook up SBUS register/descriptor accessors. */
+ hp->read_desc32 = sbus_hme_read_desc32;
+ hp->write_txd = sbus_hme_write_txd;
+ hp->write_rxd = sbus_hme_write_rxd;
+ hp->read32 = sbus_hme_read32;
+ hp->write32 = sbus_hme_write32;
+#endif
+
+ /* Grrr, Happy Meal comes up by default not advertising
+ * full duplex 100baseT capabilities, fix this.
+ */
+ spin_lock_irq(&hp->happy_lock);
+ happy_meal_set_initial_advertisement(hp);
+ spin_unlock_irq(&hp->happy_lock);
+
+ err = register_netdev(hp->dev);
+ if (err) {
+ printk(KERN_ERR "happymeal: Cannot register net device, "
+ "aborting.\n");
+ goto err_out_free_coherent;
+ }
+
+ dev_set_drvdata(&op->dev, hp);
+
+ if (qfe_slot != -1)
+ printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ",
+ dev->name, qfe_slot);
+ else
+ printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ",
+ dev->name);
+
+ printk("%pM\n", dev->dev_addr);
+
+ return 0;
+
+err_out_free_coherent:
+ dma_free_coherent(hp->dma_dev,
+ PAGE_SIZE,
+ hp->happy_block,
+ hp->hblock_dvma);
+
+err_out_iounmap:
+ if (hp->gregs)
+ of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
+ if (hp->etxregs)
+ of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
+ if (hp->erxregs)
+ of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
+ if (hp->bigmacregs)
+ of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
+ if (hp->tcvregs)
+ of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
+
+ if (qp)
+ qp->happy_meals[qfe_slot] = NULL;
+
+err_out_free_netdev:
+ free_netdev(dev);
+
+err_out:
+ return err;
+}
+#endif
+
+#ifdef CONFIG_PCI
+#ifndef CONFIG_SPARC
+static int is_quattro_p(struct pci_dev *pdev)
+{
+ struct pci_dev *busdev = pdev->bus->self;
+ struct list_head *tmp;
+ int n_hmes;
+
+ if (busdev == NULL ||
+ busdev->vendor != PCI_VENDOR_ID_DEC ||
+ busdev->device != PCI_DEVICE_ID_DEC_21153)
+ return 0;
+
+ n_hmes = 0;
+ tmp = pdev->bus->devices.next;
+ while (tmp != &pdev->bus->devices) {
+ struct pci_dev *this_pdev = pci_dev_b(tmp);
+
+ if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
+ this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
+ n_hmes++;
+
+ tmp = tmp->next;
+ }
+
+ if (n_hmes != 4)
+ return 0;
+
+ return 1;
+}
+
+/* Fetch MAC address from vital product data of PCI ROM. */
+static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, int index, unsigned char *dev_addr)
+{
+ int this_offset;
+
+ for (this_offset = 0x20; this_offset < len; this_offset++) {
+ void __iomem *p = rom_base + this_offset;
+
+ if (readb(p + 0) != 0x90 ||
+ readb(p + 1) != 0x00 ||
+ readb(p + 2) != 0x09 ||
+ readb(p + 3) != 0x4e ||
+ readb(p + 4) != 0x41 ||
+ readb(p + 5) != 0x06)
+ continue;
+
+ this_offset += 6;
+ p += 6;
+
+ if (index == 0) {
+ int i;
+
+ for (i = 0; i < 6; i++)
+ dev_addr[i] = readb(p + i);
+ return 1;
+ }
+ index--;
+ }
+ return 0;
+}
+
+static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr)
+{
+ size_t size;
+ void __iomem *p = pci_map_rom(pdev, &size);
+
+ if (p) {
+ int index = 0;
+ int found;
+
+ if (is_quattro_p(pdev))
+ index = PCI_SLOT(pdev->devfn);
+
+ found = readb(p) == 0x55 &&
+ readb(p + 1) == 0xaa &&
+ find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr);
+ pci_unmap_rom(pdev, p);
+ if (found)
+ return;
+ }
+
+ /* Sun MAC prefix then 3 random bytes. */
+ dev_addr[0] = 0x08;
+ dev_addr[1] = 0x00;
+ dev_addr[2] = 0x20;
+ get_random_bytes(&dev_addr[3], 3);
+}
+#endif /* !(CONFIG_SPARC) */
+
+static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct quattro *qp = NULL;
+#ifdef CONFIG_SPARC
+ struct device_node *dp;
+#endif
+ struct happy_meal *hp;
+ struct net_device *dev;
+ void __iomem *hpreg_base;
+ unsigned long hpreg_res;
+ int i, qfe_slot = -1;
+ char prom_name[64];
+ int err;
+
+ /* Now make sure pci_dev cookie is there. */
+#ifdef CONFIG_SPARC
+ dp = pci_device_to_OF_node(pdev);
+ strcpy(prom_name, dp->name);
+#else
+ if (is_quattro_p(pdev))
+ strcpy(prom_name, "SUNW,qfe");
+ else
+ strcpy(prom_name, "SUNW,hme");
+#endif
+
+ err = -ENODEV;
+
+ if (pci_enable_device(pdev))
+ goto err_out;
+ pci_set_master(pdev);
+
+ if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
+ qp = quattro_pci_find(pdev);
+ if (qp == NULL)
+ goto err_out;
+ for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
+ if (qp->happy_meals[qfe_slot] == NULL)
+ break;
+ if (qfe_slot == 4)
+ goto err_out;
+ }
+
+ dev = alloc_etherdev(sizeof(struct happy_meal));
+ err = -ENOMEM;
+ if (!dev)
+ goto err_out;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ if (hme_version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+
+ dev->base_addr = (long) pdev;
+
+ hp = netdev_priv(dev);
+
+ hp->happy_dev = pdev;
+ hp->dma_dev = &pdev->dev;
+
+ spin_lock_init(&hp->happy_lock);
+
+ if (qp != NULL) {
+ hp->qfe_parent = qp;
+ hp->qfe_ent = qfe_slot;
+ qp->happy_meals[qfe_slot] = dev;
+ }
+
+ hpreg_res = pci_resource_start(pdev, 0);
+ err = -ENODEV;
+ if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
+ printk(KERN_ERR "happymeal(PCI): Cannot find proper PCI device base address.\n");
+ goto err_out_clear_quattro;
+ }
+ if (pci_request_regions(pdev, DRV_NAME)) {
+ printk(KERN_ERR "happymeal(PCI): Cannot obtain PCI resources, "
+ "aborting.\n");
+ goto err_out_clear_quattro;
+ }
+
+ if ((hpreg_base = ioremap(hpreg_res, 0x8000)) == NULL) {
+ printk(KERN_ERR "happymeal(PCI): Unable to remap card memory.\n");
+ goto err_out_free_res;
+ }
+
+ for (i = 0; i < 6; i++) {
+ if (macaddr[i] != 0)
+ break;
+ }
+ if (i < 6) { /* a mac address was given */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = macaddr[i];
+ macaddr[5]++;
+ } else {
+#ifdef CONFIG_SPARC
+ const unsigned char *addr;
+ int len;
+
+ if (qfe_slot != -1 &&
+ (addr = of_get_property(dp, "local-mac-address", &len))
+ != NULL &&
+ len == 6) {
+ memcpy(dev->dev_addr, addr, 6);
+ } else {
+ memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+ }
+#else
+ get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
+#endif
+ }
+
+ /* Layout registers. */
+ hp->gregs = (hpreg_base + 0x0000UL);
+ hp->etxregs = (hpreg_base + 0x2000UL);
+ hp->erxregs = (hpreg_base + 0x4000UL);
+ hp->bigmacregs = (hpreg_base + 0x6000UL);
+ hp->tcvregs = (hpreg_base + 0x7000UL);
+
+#ifdef CONFIG_SPARC
+ hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
+ if (hp->hm_revision == 0xff)
+ hp->hm_revision = 0xc0 | (pdev->revision & 0x0f);
+#else
+ /* works with this on non-sparc hosts */
+ hp->hm_revision = 0x20;
+#endif
+
+ /* Now enable the feature flags we can. */
+ if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
+ hp->happy_flags = HFLAG_20_21;
+ else if (hp->hm_revision != 0xa0 && hp->hm_revision != 0xc0)
+ hp->happy_flags = HFLAG_NOT_A0;
+
+ if (qp != NULL)
+ hp->happy_flags |= HFLAG_QUATTRO;
+
+ /* And of course, indicate this is PCI. */
+ hp->happy_flags |= HFLAG_PCI;
+
+#ifdef CONFIG_SPARC
+ /* Assume PCI happy meals can handle all burst sizes. */
+ hp->happy_bursts = DMA_BURSTBITS;
+#endif
+
+ hp->happy_block = (struct hmeal_init_block *)
+ dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &hp->hblock_dvma, GFP_KERNEL);
+
+ err = -ENODEV;
+ if (!hp->happy_block) {
+ printk(KERN_ERR "happymeal(PCI): Cannot get hme init block.\n");
+ goto err_out_iounmap;
+ }
+
+ hp->linkcheck = 0;
+ hp->timer_state = asleep;
+ hp->timer_ticks = 0;
+
+ init_timer(&hp->happy_timer);
+
+ hp->dev = dev;
+ dev->netdev_ops = &hme_netdev_ops;
+ dev->watchdog_timeo = 5*HZ;
+ dev->ethtool_ops = &hme_ethtool_ops;
+ dev->irq = pdev->irq;
+ dev->dma = 0;
+
+ /* Happy Meal can do it all... */
+ dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= dev->hw_features | NETIF_F_RXCSUM;
+
+#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
+ /* Hook up PCI register/descriptor accessors. */
+ hp->read_desc32 = pci_hme_read_desc32;
+ hp->write_txd = pci_hme_write_txd;
+ hp->write_rxd = pci_hme_write_rxd;
+ hp->read32 = pci_hme_read32;
+ hp->write32 = pci_hme_write32;
+#endif
+
+ /* Grrr, Happy Meal comes up by default not advertising
+ * full duplex 100baseT capabilities, fix this.
+ */
+ spin_lock_irq(&hp->happy_lock);
+ happy_meal_set_initial_advertisement(hp);
+ spin_unlock_irq(&hp->happy_lock);
+
+ err = register_netdev(hp->dev);
+ if (err) {
+ printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
+ "aborting.\n");
+ goto err_out_iounmap;
+ }
+
+ dev_set_drvdata(&pdev->dev, hp);
+
+ if (!qfe_slot) {
+ struct pci_dev *qpdev = qp->quattro_dev;
+
+ prom_name[0] = 0;
+ if (!strncmp(dev->name, "eth", 3)) {
+ int i = simple_strtoul(dev->name + 3, NULL, 10);
+ sprintf(prom_name, "-%d", i + 3);
+ }
+ printk(KERN_INFO "%s%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet ", dev->name, prom_name);
+ if (qpdev->vendor == PCI_VENDOR_ID_DEC &&
+ qpdev->device == PCI_DEVICE_ID_DEC_21153)
+ printk("DEC 21153 PCI Bridge\n");
+ else
+ printk("unknown bridge %04x.%04x\n",
+ qpdev->vendor, qpdev->device);
+ }
+
+ if (qfe_slot != -1)
+ printk(KERN_INFO "%s: Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet ",
+ dev->name, qfe_slot);
+ else
+ printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ",
+ dev->name);
+
+ printk("%pM\n", dev->dev_addr);
+
+ return 0;
+
+err_out_iounmap:
+ iounmap(hp->gregs);
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_out_clear_quattro:
+ if (qp != NULL)
+ qp->happy_meals[qfe_slot] = NULL;
+
+ free_netdev(dev);
+
+err_out:
+ return err;
+}
+
+static void __devexit happy_meal_pci_remove(struct pci_dev *pdev)
+{
+ struct happy_meal *hp = dev_get_drvdata(&pdev->dev);
+ struct net_device *net_dev = hp->dev;
+
+ unregister_netdev(net_dev);
+
+ dma_free_coherent(hp->dma_dev, PAGE_SIZE,
+ hp->happy_block, hp->hblock_dvma);
+ iounmap(hp->gregs);
+ pci_release_regions(hp->happy_dev);
+
+ free_netdev(net_dev);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(pci, happymeal_pci_ids);
+
+static struct pci_driver hme_pci_driver = {
+ .name = "hme",
+ .id_table = happymeal_pci_ids,
+ .probe = happy_meal_pci_probe,
+ .remove = __devexit_p(happy_meal_pci_remove),
+};
+
+static int __init happy_meal_pci_init(void)
+{
+ return pci_register_driver(&hme_pci_driver);
+}
+
+static void happy_meal_pci_exit(void)
+{
+ pci_unregister_driver(&hme_pci_driver);
+
+ while (qfe_pci_list) {
+ struct quattro *qfe = qfe_pci_list;
+ struct quattro *next = qfe->next;
+
+ kfree(qfe);
+
+ qfe_pci_list = next;
+ }
+}
+
+#endif
+
+#ifdef CONFIG_SBUS
+static const struct of_device_id hme_sbus_match[];
+static int __devinit hme_sbus_probe(struct platform_device *op)
+{
+ const struct of_device_id *match;
+ struct device_node *dp = op->dev.of_node;
+ const char *model = of_get_property(dp, "model", NULL);
+ int is_qfe;
+
+ match = of_match_device(hme_sbus_match, &op->dev);
+ if (!match)
+ return -EINVAL;
+ is_qfe = (match->data != NULL);
+
+ if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
+ is_qfe = 1;
+
+ return happy_meal_sbus_probe_one(op, is_qfe);
+}
+
+static int __devexit hme_sbus_remove(struct platform_device *op)
+{
+ struct happy_meal *hp = dev_get_drvdata(&op->dev);
+ struct net_device *net_dev = hp->dev;
+
+ unregister_netdev(net_dev);
+
+ /* XXX qfe parent interrupt... */
+
+ of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
+ of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
+ of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
+ of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
+ of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
+ dma_free_coherent(hp->dma_dev,
+ PAGE_SIZE,
+ hp->happy_block,
+ hp->hblock_dvma);
+
+ free_netdev(net_dev);
+
+ dev_set_drvdata(&op->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id hme_sbus_match[] = {
+ {
+ .name = "SUNW,hme",
+ },
+ {
+ .name = "SUNW,qfe",
+ .data = (void *) 1,
+ },
+ {
+ .name = "qfe",
+ .data = (void *) 1,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hme_sbus_match);
+
+static struct platform_driver hme_sbus_driver = {
+ .driver = {
+ .name = "hme",
+ .owner = THIS_MODULE,
+ .of_match_table = hme_sbus_match,
+ },
+ .probe = hme_sbus_probe,
+ .remove = __devexit_p(hme_sbus_remove),
+};
+
+static int __init happy_meal_sbus_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&hme_sbus_driver);
+ if (!err)
+ err = quattro_sbus_register_irqs();
+
+ return err;
+}
+
+static void happy_meal_sbus_exit(void)
+{
+ platform_driver_unregister(&hme_sbus_driver);
+ quattro_sbus_free_irqs();
+
+ while (qfe_sbus_list) {
+ struct quattro *qfe = qfe_sbus_list;
+ struct quattro *next = qfe->next;
+
+ kfree(qfe);
+
+ qfe_sbus_list = next;
+ }
+}
+#endif
+
+static int __init happy_meal_probe(void)
+{
+ int err = 0;
+
+#ifdef CONFIG_SBUS
+ err = happy_meal_sbus_init();
+#endif
+#ifdef CONFIG_PCI
+ if (!err) {
+ err = happy_meal_pci_init();
+#ifdef CONFIG_SBUS
+ if (err)
+ happy_meal_sbus_exit();
+#endif
+ }
+#endif
+
+ return err;
+}
+
+
+static void __exit happy_meal_exit(void)
+{
+#ifdef CONFIG_SBUS
+ happy_meal_sbus_exit();
+#endif
+#ifdef CONFIG_PCI
+ happy_meal_pci_exit();
+#endif
+}
+
+module_init(happy_meal_probe);
+module_exit(happy_meal_exit);
diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h
new file mode 100644
index 000000000000..64f278360d89
--- /dev/null
+++ b/drivers/net/ethernet/sun/sunhme.h
@@ -0,0 +1,512 @@
+/* $Id: sunhme.h,v 1.33 2001/08/03 06:23:04 davem Exp $
+ * sunhme.h: Definitions for Sparc HME/BigMac 10/100baseT ethernet driver.
+ * Also known as the "Happy Meal".
+ *
+ * Copyright (C) 1996, 1999 David S. Miller (davem@redhat.com)
+ */
+
+#ifndef _SUNHME_H
+#define _SUNHME_H
+
+#include <linux/pci.h>
+
+/* Happy Meal global registers. */
+#define GREG_SWRESET 0x000UL /* Software Reset */
+#define GREG_CFG 0x004UL /* Config Register */
+#define GREG_STAT 0x108UL /* Status */
+#define GREG_IMASK 0x10cUL /* Interrupt Mask */
+#define GREG_REG_SIZE 0x110UL
+
+/* Global reset register. */
+#define GREG_RESET_ETX 0x01
+#define GREG_RESET_ERX 0x02
+#define GREG_RESET_ALL 0x03
+
+/* Global config register. */
+#define GREG_CFG_BURSTMSK 0x03
+#define GREG_CFG_BURST16 0x00
+#define GREG_CFG_BURST32 0x01
+#define GREG_CFG_BURST64 0x02
+#define GREG_CFG_64BIT 0x04
+#define GREG_CFG_PARITY 0x08
+#define GREG_CFG_RESV 0x10
+
+/* Global status register. */
+#define GREG_STAT_GOTFRAME 0x00000001 /* Received a frame */
+#define GREG_STAT_RCNTEXP 0x00000002 /* Receive frame counter expired */
+#define GREG_STAT_ACNTEXP 0x00000004 /* Align-error counter expired */
+#define GREG_STAT_CCNTEXP 0x00000008 /* CRC-error counter expired */
+#define GREG_STAT_LCNTEXP 0x00000010 /* Length-error counter expired */
+#define GREG_STAT_RFIFOVF 0x00000020 /* Receive FIFO overflow */
+#define GREG_STAT_CVCNTEXP 0x00000040 /* Code-violation counter expired */
+#define GREG_STAT_STSTERR 0x00000080 /* Test error in XIF for SQE */
+#define GREG_STAT_SENTFRAME 0x00000100 /* Transmitted a frame */
+#define GREG_STAT_TFIFO_UND 0x00000200 /* Transmit FIFO underrun */
+#define GREG_STAT_MAXPKTERR 0x00000400 /* Max-packet size error */
+#define GREG_STAT_NCNTEXP 0x00000800 /* Normal-collision counter expired */
+#define GREG_STAT_ECNTEXP 0x00001000 /* Excess-collision counter expired */
+#define GREG_STAT_LCCNTEXP 0x00002000 /* Late-collision counter expired */
+#define GREG_STAT_FCNTEXP 0x00004000 /* First-collision counter expired */
+#define GREG_STAT_DTIMEXP 0x00008000 /* Defer-timer expired */
+#define GREG_STAT_RXTOHOST 0x00010000 /* Moved from receive-FIFO to host memory */
+#define GREG_STAT_NORXD 0x00020000 /* No more receive descriptors */
+#define GREG_STAT_RXERR 0x00040000 /* Error during receive dma */
+#define GREG_STAT_RXLATERR 0x00080000 /* Late error during receive dma */
+#define GREG_STAT_RXPERR 0x00100000 /* Parity error during receive dma */
+#define GREG_STAT_RXTERR 0x00200000 /* Tag error during receive dma */
+#define GREG_STAT_EOPERR 0x00400000 /* Transmit descriptor did not have EOP set */
+#define GREG_STAT_MIFIRQ 0x00800000 /* MIF is signaling an interrupt condition */
+#define GREG_STAT_HOSTTOTX 0x01000000 /* Moved from host memory to transmit-FIFO */
+#define GREG_STAT_TXALL 0x02000000 /* Transmitted all packets in the tx-fifo */
+#define GREG_STAT_TXEACK 0x04000000 /* Error during transmit dma */
+#define GREG_STAT_TXLERR 0x08000000 /* Late error during transmit dma */
+#define GREG_STAT_TXPERR 0x10000000 /* Parity error during transmit dma */
+#define GREG_STAT_TXTERR 0x20000000 /* Tag error during transmit dma */
+#define GREG_STAT_SLVERR 0x40000000 /* PIO access got an error */
+#define GREG_STAT_SLVPERR 0x80000000 /* PIO access got a parity error */
+
+/* All interesting error conditions. */
+#define GREG_STAT_ERRORS 0xfc7efefc
+
+/* Global interrupt mask register. */
+#define GREG_IMASK_GOTFRAME 0x00000001 /* Received a frame */
+#define GREG_IMASK_RCNTEXP 0x00000002 /* Receive frame counter expired */
+#define GREG_IMASK_ACNTEXP 0x00000004 /* Align-error counter expired */
+#define GREG_IMASK_CCNTEXP 0x00000008 /* CRC-error counter expired */
+#define GREG_IMASK_LCNTEXP 0x00000010 /* Length-error counter expired */
+#define GREG_IMASK_RFIFOVF 0x00000020 /* Receive FIFO overflow */
+#define GREG_IMASK_CVCNTEXP 0x00000040 /* Code-violation counter expired */
+#define GREG_IMASK_STSTERR 0x00000080 /* Test error in XIF for SQE */
+#define GREG_IMASK_SENTFRAME 0x00000100 /* Transmitted a frame */
+#define GREG_IMASK_TFIFO_UND 0x00000200 /* Transmit FIFO underrun */
+#define GREG_IMASK_MAXPKTERR 0x00000400 /* Max-packet size error */
+#define GREG_IMASK_NCNTEXP 0x00000800 /* Normal-collision counter expired */
+#define GREG_IMASK_ECNTEXP 0x00001000 /* Excess-collision counter expired */
+#define GREG_IMASK_LCCNTEXP 0x00002000 /* Late-collision counter expired */
+#define GREG_IMASK_FCNTEXP 0x00004000 /* First-collision counter expired */
+#define GREG_IMASK_DTIMEXP 0x00008000 /* Defer-timer expired */
+#define GREG_IMASK_RXTOHOST 0x00010000 /* Moved from receive-FIFO to host memory */
+#define GREG_IMASK_NORXD 0x00020000 /* No more receive descriptors */
+#define GREG_IMASK_RXERR 0x00040000 /* Error during receive dma */
+#define GREG_IMASK_RXLATERR 0x00080000 /* Late error during receive dma */
+#define GREG_IMASK_RXPERR 0x00100000 /* Parity error during receive dma */
+#define GREG_IMASK_RXTERR 0x00200000 /* Tag error during receive dma */
+#define GREG_IMASK_EOPERR 0x00400000 /* Transmit descriptor did not have EOP set */
+#define GREG_IMASK_MIFIRQ 0x00800000 /* MIF is signaling an interrupt condition */
+#define GREG_IMASK_HOSTTOTX 0x01000000 /* Moved from host memory to transmit-FIFO */
+#define GREG_IMASK_TXALL 0x02000000 /* Transmitted all packets in the tx-fifo */
+#define GREG_IMASK_TXEACK 0x04000000 /* Error during transmit dma */
+#define GREG_IMASK_TXLERR 0x08000000 /* Late error during transmit dma */
+#define GREG_IMASK_TXPERR 0x10000000 /* Parity error during transmit dma */
+#define GREG_IMASK_TXTERR 0x20000000 /* Tag error during transmit dma */
+#define GREG_IMASK_SLVERR 0x40000000 /* PIO access got an error */
+#define GREG_IMASK_SLVPERR 0x80000000 /* PIO access got a parity error */
+
+/* Happy Meal external transmitter registers. */
+#define ETX_PENDING 0x00UL /* Transmit pending/wakeup register */
+#define ETX_CFG 0x04UL /* Transmit config register */
+#define ETX_RING 0x08UL /* Transmit ring pointer */
+#define ETX_BBASE 0x0cUL /* Transmit buffer base */
+#define ETX_BDISP 0x10UL /* Transmit buffer displacement */
+#define ETX_FIFOWPTR 0x14UL /* FIFO write ptr */
+#define ETX_FIFOSWPTR 0x18UL /* FIFO write ptr (shadow register) */
+#define ETX_FIFORPTR 0x1cUL /* FIFO read ptr */
+#define ETX_FIFOSRPTR 0x20UL /* FIFO read ptr (shadow register) */
+#define ETX_FIFOPCNT 0x24UL /* FIFO packet counter */
+#define ETX_SMACHINE 0x28UL /* Transmitter state machine */
+#define ETX_RSIZE 0x2cUL /* Ring descriptor size */
+#define ETX_BPTR 0x30UL /* Transmit data buffer ptr */
+#define ETX_REG_SIZE 0x34UL
+
+/* ETX transmit pending register. */
+#define ETX_TP_DMAWAKEUP 0x00000001 /* Restart transmit dma */
+
+/* ETX config register. */
+#define ETX_CFG_DMAENABLE 0x00000001 /* Enable transmit dma */
+#define ETX_CFG_FIFOTHRESH 0x000003fe /* Transmit FIFO threshold */
+#define ETX_CFG_IRQDAFTER 0x00000400 /* Interrupt after TX-FIFO drained */
+#define ETX_CFG_IRQDBEFORE 0x00000000 /* Interrupt before TX-FIFO drained */
+
+#define ETX_RSIZE_SHIFT 4
+
+/* Happy Meal external receiver registers. */
+#define ERX_CFG 0x00UL /* Receiver config register */
+#define ERX_RING 0x04UL /* Receiver ring ptr */
+#define ERX_BPTR 0x08UL /* Receiver buffer ptr */
+#define ERX_FIFOWPTR 0x0cUL /* FIFO write ptr */
+#define ERX_FIFOSWPTR 0x10UL /* FIFO write ptr (shadow register) */
+#define ERX_FIFORPTR 0x14UL /* FIFO read ptr */
+#define ERX_FIFOSRPTR 0x18UL /* FIFO read ptr (shadow register) */
+#define ERX_SMACHINE 0x1cUL /* Receiver state machine */
+#define ERX_REG_SIZE 0x20UL
+
+/* ERX config register. */
+#define ERX_CFG_DMAENABLE 0x00000001 /* Enable receive DMA */
+#define ERX_CFG_RESV1 0x00000006 /* Unused... */
+#define ERX_CFG_BYTEOFFSET 0x00000038 /* Receive first byte offset */
+#define ERX_CFG_RESV2 0x000001c0 /* Unused... */
+#define ERX_CFG_SIZE32 0x00000000 /* Receive ring size == 32 */
+#define ERX_CFG_SIZE64 0x00000200 /* Receive ring size == 64 */
+#define ERX_CFG_SIZE128 0x00000400 /* Receive ring size == 128 */
+#define ERX_CFG_SIZE256 0x00000600 /* Receive ring size == 256 */
+#define ERX_CFG_RESV3 0x0000f800 /* Unused... */
+#define ERX_CFG_CSUMSTART 0x007f0000 /* Offset of checksum start,
+ * in halfwords. */
+
+/* I'd like a Big Mac, small fries, small coke, and SparcLinux please. */
+#define BMAC_XIFCFG 0x0000UL /* XIF config register */
+ /* 0x4-->0x204, reserved */
+#define BMAC_TXSWRESET 0x208UL /* Transmitter software reset */
+#define BMAC_TXCFG 0x20cUL /* Transmitter config register */
+#define BMAC_IGAP1 0x210UL /* Inter-packet gap 1 */
+#define BMAC_IGAP2 0x214UL /* Inter-packet gap 2 */
+#define BMAC_ALIMIT 0x218UL /* Transmit attempt limit */
+#define BMAC_STIME 0x21cUL /* Transmit slot time */
+#define BMAC_PLEN 0x220UL /* Size of transmit preamble */
+#define BMAC_PPAT 0x224UL /* Pattern for transmit preamble */
+#define BMAC_TXSDELIM 0x228UL /* Transmit delimiter */
+#define BMAC_JSIZE 0x22cUL /* Jam size */
+#define BMAC_TXMAX 0x230UL /* Transmit max pkt size */
+#define BMAC_TXMIN 0x234UL /* Transmit min pkt size */
+#define BMAC_PATTEMPT 0x238UL /* Count of transmit peak attempts */
+#define BMAC_DTCTR 0x23cUL /* Transmit defer timer */
+#define BMAC_NCCTR 0x240UL /* Transmit normal-collision counter */
+#define BMAC_FCCTR 0x244UL /* Transmit first-collision counter */
+#define BMAC_EXCTR 0x248UL /* Transmit excess-collision counter */
+#define BMAC_LTCTR 0x24cUL /* Transmit late-collision counter */
+#define BMAC_RSEED 0x250UL /* Transmit random number seed */
+#define BMAC_TXSMACHINE 0x254UL /* Transmit state machine */
+ /* 0x258-->0x304, reserved */
+#define BMAC_RXSWRESET 0x308UL /* Receiver software reset */
+#define BMAC_RXCFG 0x30cUL /* Receiver config register */
+#define BMAC_RXMAX 0x310UL /* Receive max pkt size */
+#define BMAC_RXMIN 0x314UL /* Receive min pkt size */
+#define BMAC_MACADDR2 0x318UL /* Ether address register 2 */
+#define BMAC_MACADDR1 0x31cUL /* Ether address register 1 */
+#define BMAC_MACADDR0 0x320UL /* Ether address register 0 */
+#define BMAC_FRCTR 0x324UL /* Receive frame receive counter */
+#define BMAC_GLECTR 0x328UL /* Receive giant-length error counter */
+#define BMAC_UNALECTR 0x32cUL /* Receive unaligned error counter */
+#define BMAC_RCRCECTR 0x330UL /* Receive CRC error counter */
+#define BMAC_RXSMACHINE 0x334UL /* Receiver state machine */
+#define BMAC_RXCVALID 0x338UL /* Receiver code violation */
+ /* 0x33c, reserved */
+#define BMAC_HTABLE3 0x340UL /* Hash table 3 */
+#define BMAC_HTABLE2 0x344UL /* Hash table 2 */
+#define BMAC_HTABLE1 0x348UL /* Hash table 1 */
+#define BMAC_HTABLE0 0x34cUL /* Hash table 0 */
+#define BMAC_AFILTER2 0x350UL /* Address filter 2 */
+#define BMAC_AFILTER1 0x354UL /* Address filter 1 */
+#define BMAC_AFILTER0 0x358UL /* Address filter 0 */
+#define BMAC_AFMASK 0x35cUL /* Address filter mask */
+#define BMAC_REG_SIZE 0x360UL
+
+/* BigMac XIF config register. */
+#define BIGMAC_XCFG_ODENABLE 0x00000001 /* Output driver enable */
+#define BIGMAC_XCFG_XLBACK 0x00000002 /* Loopback-mode XIF enable */
+#define BIGMAC_XCFG_MLBACK 0x00000004 /* Loopback-mode MII enable */
+#define BIGMAC_XCFG_MIIDISAB 0x00000008 /* MII receive buffer disable */
+#define BIGMAC_XCFG_SQENABLE 0x00000010 /* SQE test enable */
+#define BIGMAC_XCFG_SQETWIN 0x000003e0 /* SQE time window */
+#define BIGMAC_XCFG_LANCE 0x00000010 /* Lance mode enable */
+#define BIGMAC_XCFG_LIPG0 0x000003e0 /* Lance mode IPG0 */
+
+/* BigMac transmit config register. */
+#define BIGMAC_TXCFG_ENABLE 0x00000001 /* Enable the transmitter */
+#define BIGMAC_TXCFG_SMODE 0x00000020 /* Enable slow transmit mode */
+#define BIGMAC_TXCFG_CIGN 0x00000040 /* Ignore transmit collisions */
+#define BIGMAC_TXCFG_FCSOFF 0x00000080 /* Do not emit FCS */
+#define BIGMAC_TXCFG_DBACKOFF 0x00000100 /* Disable backoff */
+#define BIGMAC_TXCFG_FULLDPLX 0x00000200 /* Enable full-duplex */
+#define BIGMAC_TXCFG_DGIVEUP 0x00000400 /* Don't give up on transmits */
+
+/* BigMac receive config register. */
+#define BIGMAC_RXCFG_ENABLE 0x00000001 /* Enable the receiver */
+#define BIGMAC_RXCFG_PSTRIP 0x00000020 /* Pad byte strip enable */
+#define BIGMAC_RXCFG_PMISC 0x00000040 /* Enable promiscuous mode */
+#define BIGMAC_RXCFG_DERR 0x00000080 /* Disable error checking */
+#define BIGMAC_RXCFG_DCRCS 0x00000100 /* Disable CRC stripping */
+#define BIGMAC_RXCFG_REJME 0x00000200 /* Reject packets addressed to me */
+#define BIGMAC_RXCFG_PGRP 0x00000400 /* Enable promisc group mode */
+#define BIGMAC_RXCFG_HENABLE 0x00000800 /* Enable the hash filter */
+#define BIGMAC_RXCFG_AENABLE 0x00001000 /* Enable the address filter */
+
+/* These are the "Management Interface" (ie. MIF) registers of the transceiver. */
+#define TCVR_BBCLOCK 0x00UL /* Bit bang clock register */
+#define TCVR_BBDATA 0x04UL /* Bit bang data register */
+#define TCVR_BBOENAB 0x08UL /* Bit bang output enable */
+#define TCVR_FRAME 0x0cUL /* Frame control/data register */
+#define TCVR_CFG 0x10UL /* MIF config register */
+#define TCVR_IMASK 0x14UL /* MIF interrupt mask */
+#define TCVR_STATUS 0x18UL /* MIF status */
+#define TCVR_SMACHINE 0x1cUL /* MIF state machine */
+#define TCVR_REG_SIZE 0x20UL
+
+/* Frame commands. */
+#define FRAME_WRITE 0x50020000
+#define FRAME_READ 0x60020000
+
+/* Transceiver config register */
+#define TCV_CFG_PSELECT 0x00000001 /* Select PHY */
+#define TCV_CFG_PENABLE 0x00000002 /* Enable MIF polling */
+#define TCV_CFG_BENABLE 0x00000004 /* Enable the "bit banger" oh baby */
+#define TCV_CFG_PREGADDR 0x000000f8 /* Address of poll register */
+#define TCV_CFG_MDIO0 0x00000100 /* MDIO zero, data/attached */
+#define TCV_CFG_MDIO1 0x00000200 /* MDIO one, data/attached */
+#define TCV_CFG_PDADDR 0x00007c00 /* Device PHY address polling */
+
+/* Here are some PHY addresses. */
+#define TCV_PADDR_ETX 0 /* Internal transceiver */
+#define TCV_PADDR_ITX 1 /* External transceiver */
+
+/* Transceiver status register */
+#define TCV_STAT_BASIC 0xffff0000 /* The "basic" part */
+#define TCV_STAT_NORMAL 0x0000ffff /* The "non-basic" part */
+
+/* Inside the Happy Meal transceiver is the physical layer, they use an
+ * implementations for National Semiconductor, part number DP83840VCE.
+ * You can retrieve the data sheets and programming docs for this beast
+ * from http://www.national.com/
+ *
+ * The DP83840 is capable of both 10 and 100Mbps ethernet, in both
+ * half and full duplex mode. It also supports auto negotiation.
+ *
+ * But.... THIS THING IS A PAIN IN THE ASS TO PROGRAM!
+ * Debugging eeprom burnt code is more fun than programming this chip!
+ */
+
+/* Generic MII registers defined in linux/mii.h, these below
+ * are DP83840 specific.
+ */
+#define DP83840_CSCONFIG 0x17 /* CS configuration */
+
+/* The Carrier Sense config register. */
+#define CSCONFIG_RESV1 0x0001 /* Unused... */
+#define CSCONFIG_LED4 0x0002 /* Pin for full-dplx LED4 */
+#define CSCONFIG_LED1 0x0004 /* Pin for conn-status LED1 */
+#define CSCONFIG_RESV2 0x0008 /* Unused... */
+#define CSCONFIG_TCVDISAB 0x0010 /* Turns off the transceiver */
+#define CSCONFIG_DFBYPASS 0x0020 /* Bypass disconnect function */
+#define CSCONFIG_GLFORCE 0x0040 /* Good link force for 100mbps */
+#define CSCONFIG_CLKTRISTATE 0x0080 /* Tristate 25m clock */
+#define CSCONFIG_RESV3 0x0700 /* Unused... */
+#define CSCONFIG_ENCODE 0x0800 /* 1=MLT-3, 0=binary */
+#define CSCONFIG_RENABLE 0x1000 /* Repeater mode enable */
+#define CSCONFIG_TCDISABLE 0x2000 /* Disable timeout counter */
+#define CSCONFIG_RESV4 0x4000 /* Unused... */
+#define CSCONFIG_NDISABLE 0x8000 /* Disable NRZI */
+
+/* Happy Meal descriptor rings and such.
+ * All descriptor rings must be aligned on a 2K boundary.
+ * All receive buffers must be 64 byte aligned.
+ * Always write the address first before setting the ownership
+ * bits to avoid races with the hardware scanning the ring.
+ */
+typedef u32 __bitwise__ hme32;
+
+struct happy_meal_rxd {
+ hme32 rx_flags;
+ hme32 rx_addr;
+};
+
+#define RXFLAG_OWN 0x80000000 /* 1 = hardware, 0 = software */
+#define RXFLAG_OVERFLOW 0x40000000 /* 1 = buffer overflow */
+#define RXFLAG_SIZE 0x3fff0000 /* Size of the buffer */
+#define RXFLAG_CSUM 0x0000ffff /* HW computed checksum */
+
+struct happy_meal_txd {
+ hme32 tx_flags;
+ hme32 tx_addr;
+};
+
+#define TXFLAG_OWN 0x80000000 /* 1 = hardware, 0 = software */
+#define TXFLAG_SOP 0x40000000 /* 1 = start of packet */
+#define TXFLAG_EOP 0x20000000 /* 1 = end of packet */
+#define TXFLAG_CSENABLE 0x10000000 /* 1 = enable hw-checksums */
+#define TXFLAG_CSLOCATION 0x0ff00000 /* Where to stick the csum */
+#define TXFLAG_CSBUFBEGIN 0x000fc000 /* Where to begin checksum */
+#define TXFLAG_SIZE 0x00003fff /* Size of the packet */
+
+#define TX_RING_SIZE 32 /* Must be >16 and <255, multiple of 16 */
+#define RX_RING_SIZE 32 /* see ERX_CFG_SIZE* for possible values */
+
+#if (TX_RING_SIZE < 16 || TX_RING_SIZE > 256 || (TX_RING_SIZE % 16) != 0)
+#error TX_RING_SIZE holds illegal value
+#endif
+
+#define TX_RING_MAXSIZE 256
+#define RX_RING_MAXSIZE 256
+
+/* We use a 14 byte offset for checksum computation. */
+#if (RX_RING_SIZE == 32)
+#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE32|((14/2)<<16))
+#else
+#if (RX_RING_SIZE == 64)
+#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE64|((14/2)<<16))
+#else
+#if (RX_RING_SIZE == 128)
+#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE128|((14/2)<<16))
+#else
+#if (RX_RING_SIZE == 256)
+#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE256|((14/2)<<16))
+#else
+#error RX_RING_SIZE holds illegal value
+#endif
+#endif
+#endif
+#endif
+
+#define NEXT_RX(num) (((num) + 1) & (RX_RING_SIZE - 1))
+#define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1))
+#define PREV_RX(num) (((num) - 1) & (RX_RING_SIZE - 1))
+#define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1))
+
+#define TX_BUFFS_AVAIL(hp) \
+ (((hp)->tx_old <= (hp)->tx_new) ? \
+ (hp)->tx_old + (TX_RING_SIZE - 1) - (hp)->tx_new : \
+ (hp)->tx_old - (hp)->tx_new - 1)
+
+#define RX_OFFSET 2
+#define RX_BUF_ALLOC_SIZE (1546 + RX_OFFSET + 64)
+
+#define RX_COPY_THRESHOLD 256
+
+struct hmeal_init_block {
+ struct happy_meal_rxd happy_meal_rxd[RX_RING_MAXSIZE];
+ struct happy_meal_txd happy_meal_txd[TX_RING_MAXSIZE];
+};
+
+#define hblock_offset(mem, elem) \
+((__u32)((unsigned long)(&(((struct hmeal_init_block *)0)->mem[elem]))))
+
+/* Now software state stuff. */
+enum happy_transceiver {
+ external = 0,
+ internal = 1,
+ none = 2,
+};
+
+/* Timer state engine. */
+enum happy_timer_state {
+ arbwait = 0, /* Waiting for auto negotiation to complete. */
+ lupwait = 1, /* Auto-neg complete, awaiting link-up status. */
+ ltrywait = 2, /* Forcing try of all modes, from fastest to slowest. */
+ asleep = 3, /* Time inactive. */
+};
+
+struct quattro;
+
+/* Happy happy, joy joy! */
+struct happy_meal {
+ void __iomem *gregs; /* Happy meal global registers */
+ struct hmeal_init_block *happy_block; /* RX and TX descriptors (CPU addr) */
+
+#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
+ u32 (*read_desc32)(hme32 *);
+ void (*write_txd)(struct happy_meal_txd *, u32, u32);
+ void (*write_rxd)(struct happy_meal_rxd *, u32, u32);
+#endif
+
+ /* This is either an platform_device or a pci_dev. */
+ void *happy_dev;
+ struct device *dma_dev;
+
+ spinlock_t happy_lock;
+
+ struct sk_buff *rx_skbs[RX_RING_SIZE];
+ struct sk_buff *tx_skbs[TX_RING_SIZE];
+
+ int rx_new, tx_new, rx_old, tx_old;
+
+ struct net_device_stats net_stats; /* Statistical counters */
+
+#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
+ u32 (*read32)(void __iomem *);
+ void (*write32)(void __iomem *, u32);
+#endif
+
+ void __iomem *etxregs; /* External transmitter regs */
+ void __iomem *erxregs; /* External receiver regs */
+ void __iomem *bigmacregs; /* BIGMAC core regs */
+ void __iomem *tcvregs; /* MIF transceiver regs */
+
+ dma_addr_t hblock_dvma; /* DVMA visible address happy block */
+ unsigned int happy_flags; /* Driver state flags */
+ enum happy_transceiver tcvr_type; /* Kind of transceiver in use */
+ unsigned int happy_bursts; /* Get your mind out of the gutter */
+ unsigned int paddr; /* PHY address for transceiver */
+ unsigned short hm_revision; /* Happy meal revision */
+ unsigned short sw_bmcr; /* SW copy of BMCR */
+ unsigned short sw_bmsr; /* SW copy of BMSR */
+ unsigned short sw_physid1; /* SW copy of PHYSID1 */
+ unsigned short sw_physid2; /* SW copy of PHYSID2 */
+ unsigned short sw_advertise; /* SW copy of ADVERTISE */
+ unsigned short sw_lpa; /* SW copy of LPA */
+ unsigned short sw_expansion; /* SW copy of EXPANSION */
+ unsigned short sw_csconfig; /* SW copy of CSCONFIG */
+ unsigned int auto_speed; /* Auto-nego link speed */
+ unsigned int forced_speed; /* Force mode link speed */
+ unsigned int poll_data; /* MIF poll data */
+ unsigned int poll_flag; /* MIF poll flag */
+ unsigned int linkcheck; /* Have we checked the link yet? */
+ unsigned int lnkup; /* Is the link up as far as we know? */
+ unsigned int lnkdown; /* Trying to force the link down? */
+ unsigned int lnkcnt; /* Counter for link-up attempts. */
+ struct timer_list happy_timer; /* To watch the link when coming up. */
+ enum happy_timer_state timer_state; /* State of the auto-neg timer. */
+ unsigned int timer_ticks; /* Number of clicks at each state. */
+
+ struct net_device *dev; /* Backpointer */
+ struct quattro *qfe_parent; /* For Quattro cards */
+ int qfe_ent; /* Which instance on quattro */
+};
+
+/* Here are the happy flags. */
+#define HFLAG_POLL 0x00000001 /* We are doing MIF polling */
+#define HFLAG_FENABLE 0x00000002 /* The MII frame is enabled */
+#define HFLAG_LANCE 0x00000004 /* We are using lance-mode */
+#define HFLAG_RXENABLE 0x00000008 /* Receiver is enabled */
+#define HFLAG_AUTO 0x00000010 /* Using auto-negotiation, 0 = force */
+#define HFLAG_FULL 0x00000020 /* Full duplex enable */
+#define HFLAG_MACFULL 0x00000040 /* Using full duplex in the MAC */
+#define HFLAG_POLLENABLE 0x00000080 /* Actually try MIF polling */
+#define HFLAG_RXCV 0x00000100 /* XXX RXCV ENABLE */
+#define HFLAG_INIT 0x00000200 /* Init called at least once */
+#define HFLAG_LINKUP 0x00000400 /* 1 = Link is up */
+#define HFLAG_PCI 0x00000800 /* PCI based Happy Meal */
+#define HFLAG_QUATTRO 0x00001000 /* On QFE/Quattro card */
+
+#define HFLAG_20_21 (HFLAG_POLLENABLE | HFLAG_FENABLE)
+#define HFLAG_NOT_A0 (HFLAG_POLLENABLE | HFLAG_FENABLE | HFLAG_LANCE | HFLAG_RXCV)
+
+/* Support for QFE/Quattro cards. */
+struct quattro {
+ struct net_device *happy_meals[4];
+
+ /* This is either a sbus_dev or a pci_dev. */
+ void *quattro_dev;
+
+ struct quattro *next;
+
+ /* PROM ranges, if any. */
+#ifdef CONFIG_SBUS
+ struct linux_prom_ranges ranges[8];
+#endif
+ int nranges;
+};
+
+/* We use this to acquire receive skb's that we can DMA directly into. */
+#define ALIGNED_RX_SKB_ADDR(addr) \
+ ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
+#define happy_meal_alloc_skb(__length, __gfp_flags) \
+({ struct sk_buff *__skb; \
+ __skb = alloc_skb((__length) + 64, (__gfp_flags)); \
+ if(__skb) { \
+ int __offset = (int) ALIGNED_RX_SKB_ADDR(__skb->data); \
+ if(__offset) \
+ skb_reserve(__skb, __offset); \
+ } \
+ __skb; \
+})
+
+#endif /* !(_SUNHME_H) */
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
new file mode 100644
index 000000000000..b28f74367ebe
--- /dev/null
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -0,0 +1,1007 @@
+/* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
+ * Once again I am out to prove that every ethernet
+ * controller out there can be most efficiently programmed
+ * if you make it look like a LANCE.
+ *
+ * Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller (davem@davemloft.net)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+#include <asm/idprom.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/auxio.h>
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+
+#include "sunqe.h"
+
+#define DRV_NAME "sunqe"
+#define DRV_VERSION "4.1"
+#define DRV_RELDATE "August 27, 2008"
+#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
+
+static char version[] =
+ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
+
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver");
+MODULE_LICENSE("GPL");
+
+static struct sunqec *root_qec_dev;
+
+static void qe_set_multicast(struct net_device *dev);
+
+#define QEC_RESET_TRIES 200
+
+static inline int qec_global_reset(void __iomem *gregs)
+{
+ int tries = QEC_RESET_TRIES;
+
+ sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
+ while (--tries) {
+ u32 tmp = sbus_readl(gregs + GLOB_CTRL);
+ if (tmp & GLOB_CTRL_RESET) {
+ udelay(20);
+ continue;
+ }
+ break;
+ }
+ if (tries)
+ return 0;
+ printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n");
+ return -1;
+}
+
+#define MACE_RESET_RETRIES 200
+#define QE_RESET_RETRIES 200
+
+static inline int qe_stop(struct sunqe *qep)
+{
+ void __iomem *cregs = qep->qcregs;
+ void __iomem *mregs = qep->mregs;
+ int tries;
+
+ /* Reset the MACE, then the QEC channel. */
+ sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG);
+ tries = MACE_RESET_RETRIES;
+ while (--tries) {
+ u8 tmp = sbus_readb(mregs + MREGS_BCONFIG);
+ if (tmp & MREGS_BCONFIG_RESET) {
+ udelay(20);
+ continue;
+ }
+ break;
+ }
+ if (!tries) {
+ printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n");
+ return -1;
+ }
+
+ sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL);
+ tries = QE_RESET_RETRIES;
+ while (--tries) {
+ u32 tmp = sbus_readl(cregs + CREG_CTRL);
+ if (tmp & CREG_CTRL_RESET) {
+ udelay(20);
+ continue;
+ }
+ break;
+ }
+ if (!tries) {
+ printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n");
+ return -1;
+ }
+ return 0;
+}
+
+static void qe_init_rings(struct sunqe *qep)
+{
+ struct qe_init_block *qb = qep->qe_block;
+ struct sunqe_buffers *qbufs = qep->buffers;
+ __u32 qbufs_dvma = qep->buffers_dvma;
+ int i;
+
+ qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
+ memset(qb, 0, sizeof(struct qe_init_block));
+ memset(qbufs, 0, sizeof(struct sunqe_buffers));
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i);
+ qb->qe_rxd[i].rx_flags =
+ (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
+ }
+}
+
+static int qe_init(struct sunqe *qep, int from_irq)
+{
+ struct sunqec *qecp = qep->parent;
+ void __iomem *cregs = qep->qcregs;
+ void __iomem *mregs = qep->mregs;
+ void __iomem *gregs = qecp->gregs;
+ unsigned char *e = &qep->dev->dev_addr[0];
+ u32 tmp;
+ int i;
+
+ /* Shut it up. */
+ if (qe_stop(qep))
+ return -EAGAIN;
+
+ /* Setup initial rx/tx init block pointers. */
+ sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
+ sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
+
+ /* Enable/mask the various irq's. */
+ sbus_writel(0, cregs + CREG_RIMASK);
+ sbus_writel(1, cregs + CREG_TIMASK);
+
+ sbus_writel(0, cregs + CREG_QMASK);
+ sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK);
+
+ /* Setup the FIFO pointers into QEC local memory. */
+ tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE);
+ sbus_writel(tmp, cregs + CREG_RXRBUFPTR);
+ sbus_writel(tmp, cregs + CREG_RXWBUFPTR);
+
+ tmp = sbus_readl(cregs + CREG_RXRBUFPTR) +
+ sbus_readl(gregs + GLOB_RSIZE);
+ sbus_writel(tmp, cregs + CREG_TXRBUFPTR);
+ sbus_writel(tmp, cregs + CREG_TXWBUFPTR);
+
+ /* Clear the channel collision counter. */
+ sbus_writel(0, cregs + CREG_CCNT);
+
+ /* For 10baseT, inter frame space nor throttle seems to be necessary. */
+ sbus_writel(0, cregs + CREG_PIPG);
+
+ /* Now dork with the AMD MACE. */
+ sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG);
+ sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL);
+ sbus_writeb(0, mregs + MREGS_RXFCNTL);
+
+ /* The QEC dma's the rx'd packets from local memory out to main memory,
+ * and therefore it interrupts when the packet reception is "complete".
+ * So don't listen for the MACE talking about it.
+ */
+ sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK);
+ sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG);
+ sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 |
+ MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU),
+ mregs + MREGS_FCONFIG);
+
+ /* Only usable interface on QuadEther is twisted pair. */
+ sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG);
+
+ /* Tell MACE we are changing the ether address. */
+ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET,
+ mregs + MREGS_IACONFIG);
+ while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
+ barrier();
+ sbus_writeb(e[0], mregs + MREGS_ETHADDR);
+ sbus_writeb(e[1], mregs + MREGS_ETHADDR);
+ sbus_writeb(e[2], mregs + MREGS_ETHADDR);
+ sbus_writeb(e[3], mregs + MREGS_ETHADDR);
+ sbus_writeb(e[4], mregs + MREGS_ETHADDR);
+ sbus_writeb(e[5], mregs + MREGS_ETHADDR);
+
+ /* Clear out the address filter. */
+ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
+ mregs + MREGS_IACONFIG);
+ while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
+ barrier();
+ for (i = 0; i < 8; i++)
+ sbus_writeb(0, mregs + MREGS_FILTER);
+
+ /* Address changes are now complete. */
+ sbus_writeb(0, mregs + MREGS_IACONFIG);
+
+ qe_init_rings(qep);
+
+ /* Wait a little bit for the link to come up... */
+ mdelay(5);
+ if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) {
+ int tries = 50;
+
+ while (--tries) {
+ u8 tmp;
+
+ mdelay(5);
+ barrier();
+ tmp = sbus_readb(mregs + MREGS_PHYCONFIG);
+ if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0)
+ break;
+ }
+ if (tries == 0)
+ printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name);
+ }
+
+ /* Missed packet counter is cleared on a read. */
+ sbus_readb(mregs + MREGS_MPCNT);
+
+ /* Reload multicast information, this will enable the receiver
+ * and transmitter.
+ */
+ qe_set_multicast(qep->dev);
+
+ /* QEC should now start to show interrupts. */
+ return 0;
+}
+
+/* Grrr, certain error conditions completely lock up the AMD MACE,
+ * so when we get these we _must_ reset the chip.
+ */
+static int qe_is_bolixed(struct sunqe *qep, u32 qe_status)
+{
+ struct net_device *dev = qep->dev;
+ int mace_hwbug_workaround = 0;
+
+ if (qe_status & CREG_STAT_EDEFER) {
+ printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name);
+ dev->stats.tx_errors++;
+ }
+
+ if (qe_status & CREG_STAT_CLOSS) {
+ printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name);
+ dev->stats.tx_errors++;
+ dev->stats.tx_carrier_errors++;
+ }
+
+ if (qe_status & CREG_STAT_ERETRIES) {
+ printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name);
+ dev->stats.tx_errors++;
+ mace_hwbug_workaround = 1;
+ }
+
+ if (qe_status & CREG_STAT_LCOLL) {
+ printk(KERN_ERR "%s: Late transmit collision.\n", dev->name);
+ dev->stats.tx_errors++;
+ dev->stats.collisions++;
+ mace_hwbug_workaround = 1;
+ }
+
+ if (qe_status & CREG_STAT_FUFLOW) {
+ printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name);
+ dev->stats.tx_errors++;
+ mace_hwbug_workaround = 1;
+ }
+
+ if (qe_status & CREG_STAT_JERROR) {
+ printk(KERN_ERR "%s: Jabber error.\n", dev->name);
+ }
+
+ if (qe_status & CREG_STAT_BERROR) {
+ printk(KERN_ERR "%s: Babble error.\n", dev->name);
+ }
+
+ if (qe_status & CREG_STAT_CCOFLOW) {
+ dev->stats.tx_errors += 256;
+ dev->stats.collisions += 256;
+ }
+
+ if (qe_status & CREG_STAT_TXDERROR) {
+ printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name);
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
+ mace_hwbug_workaround = 1;
+ }
+
+ if (qe_status & CREG_STAT_TXLERR) {
+ printk(KERN_ERR "%s: Transmit late error.\n", dev->name);
+ dev->stats.tx_errors++;
+ mace_hwbug_workaround = 1;
+ }
+
+ if (qe_status & CREG_STAT_TXPERR) {
+ printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name);
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
+ mace_hwbug_workaround = 1;
+ }
+
+ if (qe_status & CREG_STAT_TXSERR) {
+ printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name);
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
+ mace_hwbug_workaround = 1;
+ }
+
+ if (qe_status & CREG_STAT_RCCOFLOW) {
+ dev->stats.rx_errors += 256;
+ dev->stats.collisions += 256;
+ }
+
+ if (qe_status & CREG_STAT_RUOFLOW) {
+ dev->stats.rx_errors += 256;
+ dev->stats.rx_over_errors += 256;
+ }
+
+ if (qe_status & CREG_STAT_MCOFLOW) {
+ dev->stats.rx_errors += 256;
+ dev->stats.rx_missed_errors += 256;
+ }
+
+ if (qe_status & CREG_STAT_RXFOFLOW) {
+ printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name);
+ dev->stats.rx_errors++;
+ dev->stats.rx_over_errors++;
+ }
+
+ if (qe_status & CREG_STAT_RLCOLL) {
+ printk(KERN_ERR "%s: Late receive collision.\n", dev->name);
+ dev->stats.rx_errors++;
+ dev->stats.collisions++;
+ }
+
+ if (qe_status & CREG_STAT_FCOFLOW) {
+ dev->stats.rx_errors += 256;
+ dev->stats.rx_frame_errors += 256;
+ }
+
+ if (qe_status & CREG_STAT_CECOFLOW) {
+ dev->stats.rx_errors += 256;
+ dev->stats.rx_crc_errors += 256;
+ }
+
+ if (qe_status & CREG_STAT_RXDROP) {
+ printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name);
+ dev->stats.rx_errors++;
+ dev->stats.rx_dropped++;
+ dev->stats.rx_missed_errors++;
+ }
+
+ if (qe_status & CREG_STAT_RXSMALL) {
+ printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name);
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
+ }
+
+ if (qe_status & CREG_STAT_RXLERR) {
+ printk(KERN_ERR "%s: Receive late error.\n", dev->name);
+ dev->stats.rx_errors++;
+ mace_hwbug_workaround = 1;
+ }
+
+ if (qe_status & CREG_STAT_RXPERR) {
+ printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name);
+ dev->stats.rx_errors++;
+ dev->stats.rx_missed_errors++;
+ mace_hwbug_workaround = 1;
+ }
+
+ if (qe_status & CREG_STAT_RXSERR) {
+ printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name);
+ dev->stats.rx_errors++;
+ dev->stats.rx_missed_errors++;
+ mace_hwbug_workaround = 1;
+ }
+
+ if (mace_hwbug_workaround)
+ qe_init(qep, 1);
+ return mace_hwbug_workaround;
+}
+
+/* Per-QE receive interrupt service routine. Just like on the happy meal
+ * we receive directly into skb's with a small packet copy water mark.
+ */
+static void qe_rx(struct sunqe *qep)
+{
+ struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
+ struct net_device *dev = qep->dev;
+ struct qe_rxd *this;
+ struct sunqe_buffers *qbufs = qep->buffers;
+ __u32 qbufs_dvma = qep->buffers_dvma;
+ int elem = qep->rx_new, drops = 0;
+ u32 flags;
+
+ this = &rxbase[elem];
+ while (!((flags = this->rx_flags) & RXD_OWN)) {
+ struct sk_buff *skb;
+ unsigned char *this_qbuf =
+ &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0];
+ __u32 this_qbuf_dvma = qbufs_dvma +
+ qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1)));
+ struct qe_rxd *end_rxd =
+ &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)];
+ int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */
+
+ /* Check for errors. */
+ if (len < ETH_ZLEN) {
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
+ dev->stats.rx_dropped++;
+ } else {
+ skb = dev_alloc_skb(len + 2);
+ if (skb == NULL) {
+ drops++;
+ dev->stats.rx_dropped++;
+ } else {
+ skb_reserve(skb, 2);
+ skb_put(skb, len);
+ skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf,
+ len);
+ skb->protocol = eth_type_trans(skb, qep->dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ }
+ }
+ end_rxd->rx_addr = this_qbuf_dvma;
+ end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
+
+ elem = NEXT_RX(elem);
+ this = &rxbase[elem];
+ }
+ qep->rx_new = elem;
+ if (drops)
+ printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name);
+}
+
+static void qe_tx_reclaim(struct sunqe *qep);
+
+/* Interrupts for all QE's get filtered out via the QEC master controller,
+ * so we just run through each qe and check to see who is signaling
+ * and thus needs to be serviced.
+ */
+static irqreturn_t qec_interrupt(int irq, void *dev_id)
+{
+ struct sunqec *qecp = dev_id;
+ u32 qec_status;
+ int channel = 0;
+
+ /* Latch the status now. */
+ qec_status = sbus_readl(qecp->gregs + GLOB_STAT);
+ while (channel < 4) {
+ if (qec_status & 0xf) {
+ struct sunqe *qep = qecp->qes[channel];
+ u32 qe_status;
+
+ qe_status = sbus_readl(qep->qcregs + CREG_STAT);
+ if (qe_status & CREG_STAT_ERRORS) {
+ if (qe_is_bolixed(qep, qe_status))
+ goto next;
+ }
+ if (qe_status & CREG_STAT_RXIRQ)
+ qe_rx(qep);
+ if (netif_queue_stopped(qep->dev) &&
+ (qe_status & CREG_STAT_TXIRQ)) {
+ spin_lock(&qep->lock);
+ qe_tx_reclaim(qep);
+ if (TX_BUFFS_AVAIL(qep) > 0) {
+ /* Wake net queue and return to
+ * lazy tx reclaim.
+ */
+ netif_wake_queue(qep->dev);
+ sbus_writel(1, qep->qcregs + CREG_TIMASK);
+ }
+ spin_unlock(&qep->lock);
+ }
+ next:
+ ;
+ }
+ qec_status >>= 4;
+ channel++;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int qe_open(struct net_device *dev)
+{
+ struct sunqe *qep = netdev_priv(dev);
+
+ qep->mconfig = (MREGS_MCONFIG_TXENAB |
+ MREGS_MCONFIG_RXENAB |
+ MREGS_MCONFIG_MBAENAB);
+ return qe_init(qep, 0);
+}
+
+static int qe_close(struct net_device *dev)
+{
+ struct sunqe *qep = netdev_priv(dev);
+
+ qe_stop(qep);
+ return 0;
+}
+
+/* Reclaim TX'd frames from the ring. This must always run under
+ * the IRQ protected qep->lock.
+ */
+static void qe_tx_reclaim(struct sunqe *qep)
+{
+ struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
+ int elem = qep->tx_old;
+
+ while (elem != qep->tx_new) {
+ u32 flags = txbase[elem].tx_flags;
+
+ if (flags & TXD_OWN)
+ break;
+ elem = NEXT_TX(elem);
+ }
+ qep->tx_old = elem;
+}
+
+static void qe_tx_timeout(struct net_device *dev)
+{
+ struct sunqe *qep = netdev_priv(dev);
+ int tx_full;
+
+ spin_lock_irq(&qep->lock);
+
+ /* Try to reclaim, if that frees up some tx
+ * entries, we're fine.
+ */
+ qe_tx_reclaim(qep);
+ tx_full = TX_BUFFS_AVAIL(qep) <= 0;
+
+ spin_unlock_irq(&qep->lock);
+
+ if (! tx_full)
+ goto out;
+
+ printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+ qe_init(qep, 1);
+
+out:
+ netif_wake_queue(dev);
+}
+
+/* Get a packet queued to go onto the wire. */
+static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sunqe *qep = netdev_priv(dev);
+ struct sunqe_buffers *qbufs = qep->buffers;
+ __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
+ unsigned char *txbuf;
+ int len, entry;
+
+ spin_lock_irq(&qep->lock);
+
+ qe_tx_reclaim(qep);
+
+ len = skb->len;
+ entry = qep->tx_new;
+
+ txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0];
+ txbuf_dvma = qbufs_dvma +
+ qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1)));
+
+ /* Avoid a race... */
+ qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
+
+ skb_copy_from_linear_data(skb, txbuf, len);
+
+ qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
+ qep->qe_block->qe_txd[entry].tx_flags =
+ (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
+ qep->tx_new = NEXT_TX(entry);
+
+ /* Get it going. */
+ sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += len;
+
+ if (TX_BUFFS_AVAIL(qep) <= 0) {
+ /* Halt the net queue and enable tx interrupts.
+ * When the tx queue empties the tx irq handler
+ * will wake up the queue and return us back to
+ * the lazy tx reclaim scheme.
+ */
+ netif_stop_queue(dev);
+ sbus_writel(0, qep->qcregs + CREG_TIMASK);
+ }
+ spin_unlock_irq(&qep->lock);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static void qe_set_multicast(struct net_device *dev)
+{
+ struct sunqe *qep = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ u8 new_mconfig = qep->mconfig;
+ int i;
+ u32 crc;
+
+ /* Lock out others. */
+ netif_stop_queue(dev);
+
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
+ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
+ qep->mregs + MREGS_IACONFIG);
+ while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
+ barrier();
+ for (i = 0; i < 8; i++)
+ sbus_writeb(0xff, qep->mregs + MREGS_FILTER);
+ sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
+ } else if (dev->flags & IFF_PROMISC) {
+ new_mconfig |= MREGS_MCONFIG_PROMISC;
+ } else {
+ u16 hash_table[4];
+ u8 *hbytes = (unsigned char *) &hash_table[0];
+
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
+ crc >>= 26;
+ hash_table[crc >> 4] |= 1 << (crc & 0xf);
+ }
+ /* Program the qe with the new filter value. */
+ sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
+ qep->mregs + MREGS_IACONFIG);
+ while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
+ barrier();
+ for (i = 0; i < 8; i++) {
+ u8 tmp = *hbytes++;
+ sbus_writeb(tmp, qep->mregs + MREGS_FILTER);
+ }
+ sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
+ }
+
+ /* Any change of the logical address filter, the physical address,
+ * or enabling/disabling promiscuous mode causes the MACE to disable
+ * the receiver. So we must re-enable them here or else the MACE
+ * refuses to listen to anything on the network. Sheesh, took
+ * me a day or two to find this bug.
+ */
+ qep->mconfig = new_mconfig;
+ sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG);
+
+ /* Let us get going again. */
+ netif_wake_queue(dev);
+}
+
+/* Ethtool support... */
+static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ const struct linux_prom_registers *regs;
+ struct sunqe *qep = netdev_priv(dev);
+ struct platform_device *op;
+
+ strcpy(info->driver, "sunqe");
+ strcpy(info->version, "3.0");
+
+ op = qep->op;
+ regs = of_get_property(op->dev.of_node, "reg", NULL);
+ if (regs)
+ sprintf(info->bus_info, "SBUS:%d", regs->which_io);
+
+}
+
+static u32 qe_get_link(struct net_device *dev)
+{
+ struct sunqe *qep = netdev_priv(dev);
+ void __iomem *mregs = qep->mregs;
+ u8 phyconfig;
+
+ spin_lock_irq(&qep->lock);
+ phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG);
+ spin_unlock_irq(&qep->lock);
+
+ return phyconfig & MREGS_PHYCONFIG_LSTAT;
+}
+
+static const struct ethtool_ops qe_ethtool_ops = {
+ .get_drvinfo = qe_get_drvinfo,
+ .get_link = qe_get_link,
+};
+
+/* This is only called once at boot time for each card probed. */
+static void qec_init_once(struct sunqec *qecp, struct platform_device *op)
+{
+ u8 bsizes = qecp->qec_bursts;
+
+ if (sbus_can_burst64() && (bsizes & DMA_BURST64)) {
+ sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL);
+ } else if (bsizes & DMA_BURST32) {
+ sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL);
+ } else {
+ sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL);
+ }
+
+ /* Packetsize only used in 100baseT BigMAC configurations,
+ * set it to zero just to be on the safe side.
+ */
+ sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE);
+
+ /* Set the local memsize register, divided up to one piece per QE channel. */
+ sbus_writel((resource_size(&op->resource[1]) >> 2),
+ qecp->gregs + GLOB_MSIZE);
+
+ /* Divide up the local QEC memory amongst the 4 QE receiver and
+ * transmitter FIFOs. Basically it is (total / 2 / num_channels).
+ */
+ sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
+ qecp->gregs + GLOB_TSIZE);
+ sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
+ qecp->gregs + GLOB_RSIZE);
+}
+
+static u8 __devinit qec_get_burst(struct device_node *dp)
+{
+ u8 bsizes, bsizes_more;
+
+ /* Find and set the burst sizes for the QEC, since it
+ * does the actual dma for all 4 channels.
+ */
+ bsizes = of_getintprop_default(dp, "burst-sizes", 0xff);
+ bsizes &= 0xff;
+ bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff);
+
+ if (bsizes_more != 0xff)
+ bsizes &= bsizes_more;
+ if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
+ (bsizes & DMA_BURST32)==0)
+ bsizes = (DMA_BURST32 - 1);
+
+ return bsizes;
+}
+
+static struct sunqec * __devinit get_qec(struct platform_device *child)
+{
+ struct platform_device *op = to_platform_device(child->dev.parent);
+ struct sunqec *qecp;
+
+ qecp = dev_get_drvdata(&op->dev);
+ if (!qecp) {
+ qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL);
+ if (qecp) {
+ u32 ctrl;
+
+ qecp->op = op;
+ qecp->gregs = of_ioremap(&op->resource[0], 0,
+ GLOB_REG_SIZE,
+ "QEC Global Registers");
+ if (!qecp->gregs)
+ goto fail;
+
+ /* Make sure the QEC is in MACE mode. */
+ ctrl = sbus_readl(qecp->gregs + GLOB_CTRL);
+ ctrl &= 0xf0000000;
+ if (ctrl != GLOB_CTRL_MMODE) {
+ printk(KERN_ERR "qec: Not in MACE mode!\n");
+ goto fail;
+ }
+
+ if (qec_global_reset(qecp->gregs))
+ goto fail;
+
+ qecp->qec_bursts = qec_get_burst(op->dev.of_node);
+
+ qec_init_once(qecp, op);
+
+ if (request_irq(op->archdata.irqs[0], qec_interrupt,
+ IRQF_SHARED, "qec", (void *) qecp)) {
+ printk(KERN_ERR "qec: Can't register irq.\n");
+ goto fail;
+ }
+
+ dev_set_drvdata(&op->dev, qecp);
+
+ qecp->next_module = root_qec_dev;
+ root_qec_dev = qecp;
+ }
+ }
+
+ return qecp;
+
+fail:
+ if (qecp->gregs)
+ of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE);
+ kfree(qecp);
+ return NULL;
+}
+
+static const struct net_device_ops qec_ops = {
+ .ndo_open = qe_open,
+ .ndo_stop = qe_close,
+ .ndo_start_xmit = qe_start_xmit,
+ .ndo_set_rx_mode = qe_set_multicast,
+ .ndo_tx_timeout = qe_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __devinit qec_ether_init(struct platform_device *op)
+{
+ static unsigned version_printed;
+ struct net_device *dev;
+ struct sunqec *qecp;
+ struct sunqe *qe;
+ int i, res;
+
+ if (version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+
+ dev = alloc_etherdev(sizeof(struct sunqe));
+ if (!dev)
+ return -ENOMEM;
+
+ memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+
+ qe = netdev_priv(dev);
+
+ res = -ENODEV;
+
+ i = of_getintprop_default(op->dev.of_node, "channel#", -1);
+ if (i == -1)
+ goto fail;
+ qe->channel = i;
+ spin_lock_init(&qe->lock);
+
+ qecp = get_qec(op);
+ if (!qecp)
+ goto fail;
+
+ qecp->qes[qe->channel] = qe;
+ qe->dev = dev;
+ qe->parent = qecp;
+ qe->op = op;
+
+ res = -ENOMEM;
+ qe->qcregs = of_ioremap(&op->resource[0], 0,
+ CREG_REG_SIZE, "QEC Channel Registers");
+ if (!qe->qcregs) {
+ printk(KERN_ERR "qe: Cannot map channel registers.\n");
+ goto fail;
+ }
+
+ qe->mregs = of_ioremap(&op->resource[1], 0,
+ MREGS_REG_SIZE, "QE MACE Registers");
+ if (!qe->mregs) {
+ printk(KERN_ERR "qe: Cannot map MACE registers.\n");
+ goto fail;
+ }
+
+ qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE,
+ &qe->qblock_dvma, GFP_ATOMIC);
+ qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers),
+ &qe->buffers_dvma, GFP_ATOMIC);
+ if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
+ qe->buffers == NULL || qe->buffers_dvma == 0)
+ goto fail;
+
+ /* Stop this QE. */
+ qe_stop(qe);
+
+ SET_NETDEV_DEV(dev, &op->dev);
+
+ dev->watchdog_timeo = 5*HZ;
+ dev->irq = op->archdata.irqs[0];
+ dev->dma = 0;
+ dev->ethtool_ops = &qe_ethtool_ops;
+ dev->netdev_ops = &qec_ops;
+
+ res = register_netdev(dev);
+ if (res)
+ goto fail;
+
+ dev_set_drvdata(&op->dev, qe);
+
+ printk(KERN_INFO "%s: qe channel[%d] ", dev->name, qe->channel);
+ for (i = 0; i < 6; i++)
+ printk ("%2.2x%c",
+ dev->dev_addr[i],
+ i == 5 ? ' ': ':');
+ printk("\n");
+
+
+ return 0;
+
+fail:
+ if (qe->qcregs)
+ of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE);
+ if (qe->mregs)
+ of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE);
+ if (qe->qe_block)
+ dma_free_coherent(&op->dev, PAGE_SIZE,
+ qe->qe_block, qe->qblock_dvma);
+ if (qe->buffers)
+ dma_free_coherent(&op->dev,
+ sizeof(struct sunqe_buffers),
+ qe->buffers,
+ qe->buffers_dvma);
+
+ free_netdev(dev);
+
+ return res;
+}
+
+static int __devinit qec_sbus_probe(struct platform_device *op)
+{
+ return qec_ether_init(op);
+}
+
+static int __devexit qec_sbus_remove(struct platform_device *op)
+{
+ struct sunqe *qp = dev_get_drvdata(&op->dev);
+ struct net_device *net_dev = qp->dev;
+
+ unregister_netdev(net_dev);
+
+ of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE);
+ of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE);
+ dma_free_coherent(&op->dev, PAGE_SIZE,
+ qp->qe_block, qp->qblock_dvma);
+ dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers),
+ qp->buffers, qp->buffers_dvma);
+
+ free_netdev(net_dev);
+
+ dev_set_drvdata(&op->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id qec_sbus_match[] = {
+ {
+ .name = "qe",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, qec_sbus_match);
+
+static struct platform_driver qec_sbus_driver = {
+ .driver = {
+ .name = "qec",
+ .owner = THIS_MODULE,
+ .of_match_table = qec_sbus_match,
+ },
+ .probe = qec_sbus_probe,
+ .remove = __devexit_p(qec_sbus_remove),
+};
+
+static int __init qec_init(void)
+{
+ return platform_driver_register(&qec_sbus_driver);
+}
+
+static void __exit qec_exit(void)
+{
+ platform_driver_unregister(&qec_sbus_driver);
+
+ while (root_qec_dev) {
+ struct sunqec *next = root_qec_dev->next_module;
+ struct platform_device *op = root_qec_dev->op;
+
+ free_irq(op->archdata.irqs[0], (void *) root_qec_dev);
+ of_iounmap(&op->resource[0], root_qec_dev->gregs,
+ GLOB_REG_SIZE);
+ kfree(root_qec_dev);
+
+ root_qec_dev = next;
+ }
+}
+
+module_init(qec_init);
+module_exit(qec_exit);
diff --git a/drivers/net/ethernet/sun/sunqe.h b/drivers/net/ethernet/sun/sunqe.h
new file mode 100644
index 000000000000..581781b6b2fa
--- /dev/null
+++ b/drivers/net/ethernet/sun/sunqe.h
@@ -0,0 +1,350 @@
+/* $Id: sunqe.h,v 1.13 2000/02/09 11:15:42 davem Exp $
+ * sunqe.h: Definitions for the Sun QuadEthernet driver.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#ifndef _SUNQE_H
+#define _SUNQE_H
+
+/* QEC global registers. */
+#define GLOB_CTRL 0x00UL /* Control */
+#define GLOB_STAT 0x04UL /* Status */
+#define GLOB_PSIZE 0x08UL /* Packet Size */
+#define GLOB_MSIZE 0x0cUL /* Local-memory Size */
+#define GLOB_RSIZE 0x10UL /* Receive partition size */
+#define GLOB_TSIZE 0x14UL /* Transmit partition size */
+#define GLOB_REG_SIZE 0x18UL
+
+#define GLOB_CTRL_MMODE 0x40000000 /* MACE qec mode */
+#define GLOB_CTRL_BMODE 0x10000000 /* BigMAC qec mode */
+#define GLOB_CTRL_EPAR 0x00000020 /* Enable parity */
+#define GLOB_CTRL_ACNTRL 0x00000018 /* SBUS arbitration control */
+#define GLOB_CTRL_B64 0x00000004 /* 64 byte dvma bursts */
+#define GLOB_CTRL_B32 0x00000002 /* 32 byte dvma bursts */
+#define GLOB_CTRL_B16 0x00000000 /* 16 byte dvma bursts */
+#define GLOB_CTRL_RESET 0x00000001 /* Reset the QEC */
+
+#define GLOB_STAT_TX 0x00000008 /* BigMAC Transmit IRQ */
+#define GLOB_STAT_RX 0x00000004 /* BigMAC Receive IRQ */
+#define GLOB_STAT_BM 0x00000002 /* BigMAC Global IRQ */
+#define GLOB_STAT_ER 0x00000001 /* BigMAC Error IRQ */
+
+#define GLOB_PSIZE_2048 0x00 /* 2k packet size */
+#define GLOB_PSIZE_4096 0x01 /* 4k packet size */
+#define GLOB_PSIZE_6144 0x10 /* 6k packet size */
+#define GLOB_PSIZE_8192 0x11 /* 8k packet size */
+
+/* In MACE mode, there are four qe channels. Each channel has it's own
+ * status bits in the QEC status register. This macro picks out the
+ * ones you want.
+ */
+#define GLOB_STAT_PER_QE(status, channel) (((status) >> ((channel) * 4)) & 0xf)
+
+/* The following registers are for per-qe channel information/status. */
+#define CREG_CTRL 0x00UL /* Control */
+#define CREG_STAT 0x04UL /* Status */
+#define CREG_RXDS 0x08UL /* RX descriptor ring ptr */
+#define CREG_TXDS 0x0cUL /* TX descriptor ring ptr */
+#define CREG_RIMASK 0x10UL /* RX Interrupt Mask */
+#define CREG_TIMASK 0x14UL /* TX Interrupt Mask */
+#define CREG_QMASK 0x18UL /* QEC Error Interrupt Mask */
+#define CREG_MMASK 0x1cUL /* MACE Error Interrupt Mask */
+#define CREG_RXWBUFPTR 0x20UL /* Local memory rx write ptr */
+#define CREG_RXRBUFPTR 0x24UL /* Local memory rx read ptr */
+#define CREG_TXWBUFPTR 0x28UL /* Local memory tx write ptr */
+#define CREG_TXRBUFPTR 0x2cUL /* Local memory tx read ptr */
+#define CREG_CCNT 0x30UL /* Collision Counter */
+#define CREG_PIPG 0x34UL /* Inter-Frame Gap */
+#define CREG_REG_SIZE 0x38UL
+
+#define CREG_CTRL_RXOFF 0x00000004 /* Disable this qe's receiver*/
+#define CREG_CTRL_RESET 0x00000002 /* Reset this qe channel */
+#define CREG_CTRL_TWAKEUP 0x00000001 /* Transmitter Wakeup, 'go'. */
+
+#define CREG_STAT_EDEFER 0x10000000 /* Excessive Defers */
+#define CREG_STAT_CLOSS 0x08000000 /* Carrier Loss */
+#define CREG_STAT_ERETRIES 0x04000000 /* More than 16 retries */
+#define CREG_STAT_LCOLL 0x02000000 /* Late TX Collision */
+#define CREG_STAT_FUFLOW 0x01000000 /* FIFO Underflow */
+#define CREG_STAT_JERROR 0x00800000 /* Jabber Error */
+#define CREG_STAT_BERROR 0x00400000 /* Babble Error */
+#define CREG_STAT_TXIRQ 0x00200000 /* Transmit Interrupt */
+#define CREG_STAT_CCOFLOW 0x00100000 /* TX Coll-counter Overflow */
+#define CREG_STAT_TXDERROR 0x00080000 /* TX Descriptor is bogus */
+#define CREG_STAT_TXLERR 0x00040000 /* Late Transmit Error */
+#define CREG_STAT_TXPERR 0x00020000 /* Transmit Parity Error */
+#define CREG_STAT_TXSERR 0x00010000 /* Transmit SBUS error ack */
+#define CREG_STAT_RCCOFLOW 0x00001000 /* RX Coll-counter Overflow */
+#define CREG_STAT_RUOFLOW 0x00000800 /* Runt Counter Overflow */
+#define CREG_STAT_MCOFLOW 0x00000400 /* Missed Counter Overflow */
+#define CREG_STAT_RXFOFLOW 0x00000200 /* RX FIFO Overflow */
+#define CREG_STAT_RLCOLL 0x00000100 /* RX Late Collision */
+#define CREG_STAT_FCOFLOW 0x00000080 /* Frame Counter Overflow */
+#define CREG_STAT_CECOFLOW 0x00000040 /* CRC Error-counter Overflow*/
+#define CREG_STAT_RXIRQ 0x00000020 /* Receive Interrupt */
+#define CREG_STAT_RXDROP 0x00000010 /* Dropped a RX'd packet */
+#define CREG_STAT_RXSMALL 0x00000008 /* Receive buffer too small */
+#define CREG_STAT_RXLERR 0x00000004 /* Receive Late Error */
+#define CREG_STAT_RXPERR 0x00000002 /* Receive Parity Error */
+#define CREG_STAT_RXSERR 0x00000001 /* Receive SBUS Error ACK */
+
+#define CREG_STAT_ERRORS (CREG_STAT_EDEFER|CREG_STAT_CLOSS|CREG_STAT_ERETRIES| \
+ CREG_STAT_LCOLL|CREG_STAT_FUFLOW|CREG_STAT_JERROR| \
+ CREG_STAT_BERROR|CREG_STAT_CCOFLOW|CREG_STAT_TXDERROR| \
+ CREG_STAT_TXLERR|CREG_STAT_TXPERR|CREG_STAT_TXSERR| \
+ CREG_STAT_RCCOFLOW|CREG_STAT_RUOFLOW|CREG_STAT_MCOFLOW| \
+ CREG_STAT_RXFOFLOW|CREG_STAT_RLCOLL|CREG_STAT_FCOFLOW| \
+ CREG_STAT_CECOFLOW|CREG_STAT_RXDROP|CREG_STAT_RXSMALL| \
+ CREG_STAT_RXLERR|CREG_STAT_RXPERR|CREG_STAT_RXSERR)
+
+#define CREG_QMASK_COFLOW 0x00100000 /* CollCntr overflow */
+#define CREG_QMASK_TXDERROR 0x00080000 /* TXD error */
+#define CREG_QMASK_TXLERR 0x00040000 /* TX late error */
+#define CREG_QMASK_TXPERR 0x00020000 /* TX parity error */
+#define CREG_QMASK_TXSERR 0x00010000 /* TX sbus error ack */
+#define CREG_QMASK_RXDROP 0x00000010 /* RX drop */
+#define CREG_QMASK_RXBERROR 0x00000008 /* RX buffer error */
+#define CREG_QMASK_RXLEERR 0x00000004 /* RX late error */
+#define CREG_QMASK_RXPERR 0x00000002 /* RX parity error */
+#define CREG_QMASK_RXSERR 0x00000001 /* RX sbus error ack */
+
+#define CREG_MMASK_EDEFER 0x10000000 /* Excess defer */
+#define CREG_MMASK_CLOSS 0x08000000 /* Carrier loss */
+#define CREG_MMASK_ERETRY 0x04000000 /* Excess retry */
+#define CREG_MMASK_LCOLL 0x02000000 /* Late collision error */
+#define CREG_MMASK_UFLOW 0x01000000 /* Underflow */
+#define CREG_MMASK_JABBER 0x00800000 /* Jabber error */
+#define CREG_MMASK_BABBLE 0x00400000 /* Babble error */
+#define CREG_MMASK_OFLOW 0x00000800 /* Overflow */
+#define CREG_MMASK_RXCOLL 0x00000400 /* RX Coll-Cntr overflow */
+#define CREG_MMASK_RPKT 0x00000200 /* Runt pkt overflow */
+#define CREG_MMASK_MPKT 0x00000100 /* Missed pkt overflow */
+
+#define CREG_PIPG_TENAB 0x00000020 /* Enable Throttle */
+#define CREG_PIPG_MMODE 0x00000010 /* Manual Mode */
+#define CREG_PIPG_WMASK 0x0000000f /* SBUS Wait Mask */
+
+/* Per-channel AMD 79C940 MACE registers. */
+#define MREGS_RXFIFO 0x00UL /* Receive FIFO */
+#define MREGS_TXFIFO 0x01UL /* Transmit FIFO */
+#define MREGS_TXFCNTL 0x02UL /* Transmit Frame Control */
+#define MREGS_TXFSTAT 0x03UL /* Transmit Frame Status */
+#define MREGS_TXRCNT 0x04UL /* Transmit Retry Count */
+#define MREGS_RXFCNTL 0x05UL /* Receive Frame Control */
+#define MREGS_RXFSTAT 0x06UL /* Receive Frame Status */
+#define MREGS_FFCNT 0x07UL /* FIFO Frame Count */
+#define MREGS_IREG 0x08UL /* Interrupt Register */
+#define MREGS_IMASK 0x09UL /* Interrupt Mask */
+#define MREGS_POLL 0x0aUL /* POLL Register */
+#define MREGS_BCONFIG 0x0bUL /* BIU Config */
+#define MREGS_FCONFIG 0x0cUL /* FIFO Config */
+#define MREGS_MCONFIG 0x0dUL /* MAC Config */
+#define MREGS_PLSCONFIG 0x0eUL /* PLS Config */
+#define MREGS_PHYCONFIG 0x0fUL /* PHY Config */
+#define MREGS_CHIPID1 0x10UL /* Chip-ID, low bits */
+#define MREGS_CHIPID2 0x11UL /* Chip-ID, high bits */
+#define MREGS_IACONFIG 0x12UL /* Internal Address Config */
+ /* 0x13UL, reserved */
+#define MREGS_FILTER 0x14UL /* Logical Address Filter */
+#define MREGS_ETHADDR 0x15UL /* Our Ethernet Address */
+ /* 0x16UL, reserved */
+ /* 0x17UL, reserved */
+#define MREGS_MPCNT 0x18UL /* Missed Packet Count */
+ /* 0x19UL, reserved */
+#define MREGS_RPCNT 0x1aUL /* Runt Packet Count */
+#define MREGS_RCCNT 0x1bUL /* RX Collision Count */
+ /* 0x1cUL, reserved */
+#define MREGS_UTEST 0x1dUL /* User Test */
+#define MREGS_RTEST1 0x1eUL /* Reserved Test 1 */
+#define MREGS_RTEST2 0x1fUL /* Reserved Test 2 */
+#define MREGS_REG_SIZE 0x20UL
+
+#define MREGS_TXFCNTL_DRETRY 0x80 /* Retry disable */
+#define MREGS_TXFCNTL_DFCS 0x08 /* Disable TX FCS */
+#define MREGS_TXFCNTL_AUTOPAD 0x01 /* TX auto pad */
+
+#define MREGS_TXFSTAT_VALID 0x80 /* TX valid */
+#define MREGS_TXFSTAT_UNDERFLOW 0x40 /* TX underflow */
+#define MREGS_TXFSTAT_LCOLL 0x20 /* TX late collision */
+#define MREGS_TXFSTAT_MRETRY 0x10 /* TX > 1 retries */
+#define MREGS_TXFSTAT_ORETRY 0x08 /* TX 1 retry */
+#define MREGS_TXFSTAT_PDEFER 0x04 /* TX pkt deferred */
+#define MREGS_TXFSTAT_CLOSS 0x02 /* TX carrier lost */
+#define MREGS_TXFSTAT_RERROR 0x01 /* TX retry error */
+
+#define MREGS_TXRCNT_EDEFER 0x80 /* TX Excess defers */
+#define MREGS_TXRCNT_CMASK 0x0f /* TX retry count */
+
+#define MREGS_RXFCNTL_LOWLAT 0x08 /* RX low latency */
+#define MREGS_RXFCNTL_AREJECT 0x04 /* RX addr match rej */
+#define MREGS_RXFCNTL_AUTOSTRIP 0x01 /* RX auto strip */
+
+#define MREGS_RXFSTAT_OVERFLOW 0x80 /* RX overflow */
+#define MREGS_RXFSTAT_LCOLL 0x40 /* RX late collision */
+#define MREGS_RXFSTAT_FERROR 0x20 /* RX framing error */
+#define MREGS_RXFSTAT_FCSERROR 0x10 /* RX FCS error */
+#define MREGS_RXFSTAT_RBCNT 0x0f /* RX msg byte count */
+
+#define MREGS_FFCNT_RX 0xf0 /* RX FIFO frame cnt */
+#define MREGS_FFCNT_TX 0x0f /* TX FIFO frame cnt */
+
+#define MREGS_IREG_JABBER 0x80 /* IRQ Jabber error */
+#define MREGS_IREG_BABBLE 0x40 /* IRQ Babble error */
+#define MREGS_IREG_COLL 0x20 /* IRQ Collision error */
+#define MREGS_IREG_RCCO 0x10 /* IRQ Collision cnt overflow */
+#define MREGS_IREG_RPKTCO 0x08 /* IRQ Runt packet count overflow */
+#define MREGS_IREG_MPKTCO 0x04 /* IRQ missed packet cnt overflow */
+#define MREGS_IREG_RXIRQ 0x02 /* IRQ RX'd a packet */
+#define MREGS_IREG_TXIRQ 0x01 /* IRQ TX'd a packet */
+
+#define MREGS_IMASK_BABBLE 0x40 /* IMASK Babble errors */
+#define MREGS_IMASK_COLL 0x20 /* IMASK Collision errors */
+#define MREGS_IMASK_MPKTCO 0x04 /* IMASK Missed pkt cnt overflow */
+#define MREGS_IMASK_RXIRQ 0x02 /* IMASK RX interrupts */
+#define MREGS_IMASK_TXIRQ 0x01 /* IMASK TX interrupts */
+
+#define MREGS_POLL_TXVALID 0x80 /* TX is valid */
+#define MREGS_POLL_TDTR 0x40 /* TX data transfer request */
+#define MREGS_POLL_RDTR 0x20 /* RX data transfer request */
+
+#define MREGS_BCONFIG_BSWAP 0x40 /* Byte Swap */
+#define MREGS_BCONFIG_4TS 0x00 /* 4byte transmit start point */
+#define MREGS_BCONFIG_16TS 0x10 /* 16byte transmit start point */
+#define MREGS_BCONFIG_64TS 0x20 /* 64byte transmit start point */
+#define MREGS_BCONFIG_112TS 0x30 /* 112byte transmit start point */
+#define MREGS_BCONFIG_RESET 0x01 /* SW-Reset the MACE */
+
+#define MREGS_FCONFIG_TXF8 0x00 /* TX fifo 8 write cycles */
+#define MREGS_FCONFIG_TXF32 0x80 /* TX fifo 32 write cycles */
+#define MREGS_FCONFIG_TXF16 0x40 /* TX fifo 16 write cycles */
+#define MREGS_FCONFIG_RXF64 0x20 /* RX fifo 64 write cycles */
+#define MREGS_FCONFIG_RXF32 0x10 /* RX fifo 32 write cycles */
+#define MREGS_FCONFIG_RXF16 0x00 /* RX fifo 16 write cycles */
+#define MREGS_FCONFIG_TFWU 0x08 /* TX fifo watermark update */
+#define MREGS_FCONFIG_RFWU 0x04 /* RX fifo watermark update */
+#define MREGS_FCONFIG_TBENAB 0x02 /* TX burst enable */
+#define MREGS_FCONFIG_RBENAB 0x01 /* RX burst enable */
+
+#define MREGS_MCONFIG_PROMISC 0x80 /* Promiscuous mode enable */
+#define MREGS_MCONFIG_TPDDISAB 0x40 /* TX 2part deferral enable */
+#define MREGS_MCONFIG_MBAENAB 0x20 /* Modified backoff enable */
+#define MREGS_MCONFIG_RPADISAB 0x08 /* RX physical addr disable */
+#define MREGS_MCONFIG_RBDISAB 0x04 /* RX broadcast disable */
+#define MREGS_MCONFIG_TXENAB 0x02 /* Enable transmitter */
+#define MREGS_MCONFIG_RXENAB 0x01 /* Enable receiver */
+
+#define MREGS_PLSCONFIG_TXMS 0x08 /* TX mode select */
+#define MREGS_PLSCONFIG_GPSI 0x06 /* Use GPSI connector */
+#define MREGS_PLSCONFIG_DAI 0x04 /* Use DAI connector */
+#define MREGS_PLSCONFIG_TP 0x02 /* Use TwistedPair connector */
+#define MREGS_PLSCONFIG_AUI 0x00 /* Use AUI connector */
+#define MREGS_PLSCONFIG_IOENAB 0x01 /* PLS I/O enable */
+
+#define MREGS_PHYCONFIG_LSTAT 0x80 /* Link status */
+#define MREGS_PHYCONFIG_LTESTDIS 0x40 /* Disable link test logic */
+#define MREGS_PHYCONFIG_RXPOLARITY 0x20 /* RX polarity */
+#define MREGS_PHYCONFIG_APCDISAB 0x10 /* AutoPolarityCorrect disab */
+#define MREGS_PHYCONFIG_LTENAB 0x08 /* Select low threshold */
+#define MREGS_PHYCONFIG_AUTO 0x04 /* Connector port auto-sel */
+#define MREGS_PHYCONFIG_RWU 0x02 /* Remote WakeUp */
+#define MREGS_PHYCONFIG_AW 0x01 /* Auto Wakeup */
+
+#define MREGS_IACONFIG_ACHNGE 0x80 /* Do address change */
+#define MREGS_IACONFIG_PARESET 0x04 /* Physical address reset */
+#define MREGS_IACONFIG_LARESET 0x02 /* Logical address reset */
+
+#define MREGS_UTEST_RTRENAB 0x80 /* Enable resv test register */
+#define MREGS_UTEST_RTRDISAB 0x40 /* Disab resv test register */
+#define MREGS_UTEST_RPACCEPT 0x20 /* Accept runt packets */
+#define MREGS_UTEST_FCOLL 0x10 /* Force collision status */
+#define MREGS_UTEST_FCSENAB 0x08 /* Enable FCS on RX */
+#define MREGS_UTEST_INTLOOPM 0x06 /* Intern lpback w/MENDEC */
+#define MREGS_UTEST_INTLOOP 0x04 /* Intern lpback */
+#define MREGS_UTEST_EXTLOOP 0x02 /* Extern lpback */
+#define MREGS_UTEST_NOLOOP 0x00 /* No loopback */
+
+struct qe_rxd {
+ u32 rx_flags;
+ u32 rx_addr;
+};
+
+#define RXD_OWN 0x80000000 /* Ownership. */
+#define RXD_UPDATE 0x10000000 /* Being Updated? */
+#define RXD_LENGTH 0x000007ff /* Packet Length. */
+
+struct qe_txd {
+ u32 tx_flags;
+ u32 tx_addr;
+};
+
+#define TXD_OWN 0x80000000 /* Ownership. */
+#define TXD_SOP 0x40000000 /* Start Of Packet */
+#define TXD_EOP 0x20000000 /* End Of Packet */
+#define TXD_UPDATE 0x10000000 /* Being Updated? */
+#define TXD_LENGTH 0x000007ff /* Packet Length. */
+
+#define TX_RING_MAXSIZE 256
+#define RX_RING_MAXSIZE 256
+
+#define TX_RING_SIZE 16
+#define RX_RING_SIZE 16
+
+#define NEXT_RX(num) (((num) + 1) & (RX_RING_MAXSIZE - 1))
+#define NEXT_TX(num) (((num) + 1) & (TX_RING_MAXSIZE - 1))
+#define PREV_RX(num) (((num) - 1) & (RX_RING_MAXSIZE - 1))
+#define PREV_TX(num) (((num) - 1) & (TX_RING_MAXSIZE - 1))
+
+#define TX_BUFFS_AVAIL(qp) \
+ (((qp)->tx_old <= (qp)->tx_new) ? \
+ (qp)->tx_old + (TX_RING_SIZE - 1) - (qp)->tx_new : \
+ (qp)->tx_old - (qp)->tx_new - 1)
+
+struct qe_init_block {
+ struct qe_rxd qe_rxd[RX_RING_MAXSIZE];
+ struct qe_txd qe_txd[TX_RING_MAXSIZE];
+};
+
+#define qib_offset(mem, elem) \
+((__u32)((unsigned long)(&(((struct qe_init_block *)0)->mem[elem]))))
+
+struct sunqe;
+
+struct sunqec {
+ void __iomem *gregs; /* QEC Global Registers */
+ struct sunqe *qes[4]; /* Each child MACE */
+ unsigned int qec_bursts; /* Support burst sizes */
+ struct platform_device *op; /* QEC's OF device */
+ struct sunqec *next_module; /* List of all QECs in system */
+};
+
+#define PKT_BUF_SZ 1664
+#define RXD_PKT_SZ 1664
+
+struct sunqe_buffers {
+ u8 tx_buf[TX_RING_SIZE][PKT_BUF_SZ];
+ u8 __pad[2];
+ u8 rx_buf[RX_RING_SIZE][PKT_BUF_SZ];
+};
+
+#define qebuf_offset(mem, elem) \
+((__u32)((unsigned long)(&(((struct sunqe_buffers *)0)->mem[elem][0]))))
+
+struct sunqe {
+ void __iomem *qcregs; /* QEC per-channel Registers */
+ void __iomem *mregs; /* Per-channel MACE Registers */
+ struct qe_init_block *qe_block; /* RX and TX descriptors */
+ __u32 qblock_dvma; /* RX and TX descriptors */
+ spinlock_t lock; /* Protects txfull state */
+ int rx_new, rx_old; /* RX ring extents */
+ int tx_new, tx_old; /* TX ring extents */
+ struct sunqe_buffers *buffers; /* CPU visible address. */
+ __u32 buffers_dvma; /* DVMA visible address. */
+ struct sunqec *parent;
+ u8 mconfig; /* Base MACE mconfig value */
+ struct platform_device *op; /* QE's OF device struct */
+ struct net_device *dev; /* QE's netdevice struct */
+ int channel; /* Who am I? */
+};
+
+#endif /* !(_SUNQE_H) */
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
new file mode 100644
index 000000000000..8c6c059f3489
--- /dev/null
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -0,0 +1,1284 @@
+/* sunvnet.c: Sun LDOM Virtual Network Driver.
+ *
+ * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/mutex.h>
+
+#include <asm/vio.h>
+#include <asm/ldc.h>
+
+#include "sunvnet.h"
+
+#define DRV_MODULE_NAME "sunvnet"
+#define DRV_MODULE_VERSION "1.0"
+#define DRV_MODULE_RELDATE "June 25, 2007"
+
+static char version[] __devinitdata =
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_DESCRIPTION("Sun LDOM virtual network driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+/* Ordered from largest major to lowest */
+static struct vio_version vnet_versions[] = {
+ { .major = 1, .minor = 0 },
+};
+
+static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
+{
+ return vio_dring_avail(dr, VNET_TX_RING_SIZE);
+}
+
+static int vnet_handle_unknown(struct vnet_port *port, void *arg)
+{
+ struct vio_msg_tag *pkt = arg;
+
+ pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
+ pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
+ pr_err("Resetting connection\n");
+
+ ldc_disconnect(port->vio.lp);
+
+ return -ECONNRESET;
+}
+
+static int vnet_send_attr(struct vio_driver_state *vio)
+{
+ struct vnet_port *port = to_vnet_port(vio);
+ struct net_device *dev = port->vp->dev;
+ struct vio_net_attr_info pkt;
+ int i;
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.tag.type = VIO_TYPE_CTRL;
+ pkt.tag.stype = VIO_SUBTYPE_INFO;
+ pkt.tag.stype_env = VIO_ATTR_INFO;
+ pkt.tag.sid = vio_send_sid(vio);
+ pkt.xfer_mode = VIO_DRING_MODE;
+ pkt.addr_type = VNET_ADDR_ETHERMAC;
+ pkt.ack_freq = 0;
+ for (i = 0; i < 6; i++)
+ pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
+ pkt.mtu = ETH_FRAME_LEN;
+
+ viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
+ "ackfreq[%u] mtu[%llu]\n",
+ pkt.xfer_mode, pkt.addr_type,
+ (unsigned long long) pkt.addr,
+ pkt.ack_freq,
+ (unsigned long long) pkt.mtu);
+
+ return vio_ldc_send(vio, &pkt, sizeof(pkt));
+}
+
+static int handle_attr_info(struct vio_driver_state *vio,
+ struct vio_net_attr_info *pkt)
+{
+ viodbg(HS, "GOT NET ATTR INFO xmode[0x%x] atype[0x%x] addr[%llx] "
+ "ackfreq[%u] mtu[%llu]\n",
+ pkt->xfer_mode, pkt->addr_type,
+ (unsigned long long) pkt->addr,
+ pkt->ack_freq,
+ (unsigned long long) pkt->mtu);
+
+ pkt->tag.sid = vio_send_sid(vio);
+
+ if (pkt->xfer_mode != VIO_DRING_MODE ||
+ pkt->addr_type != VNET_ADDR_ETHERMAC ||
+ pkt->mtu != ETH_FRAME_LEN) {
+ viodbg(HS, "SEND NET ATTR NACK\n");
+
+ pkt->tag.stype = VIO_SUBTYPE_NACK;
+
+ (void) vio_ldc_send(vio, pkt, sizeof(*pkt));
+
+ return -ECONNRESET;
+ } else {
+ viodbg(HS, "SEND NET ATTR ACK\n");
+
+ pkt->tag.stype = VIO_SUBTYPE_ACK;
+
+ return vio_ldc_send(vio, pkt, sizeof(*pkt));
+ }
+
+}
+
+static int handle_attr_ack(struct vio_driver_state *vio,
+ struct vio_net_attr_info *pkt)
+{
+ viodbg(HS, "GOT NET ATTR ACK\n");
+
+ return 0;
+}
+
+static int handle_attr_nack(struct vio_driver_state *vio,
+ struct vio_net_attr_info *pkt)
+{
+ viodbg(HS, "GOT NET ATTR NACK\n");
+
+ return -ECONNRESET;
+}
+
+static int vnet_handle_attr(struct vio_driver_state *vio, void *arg)
+{
+ struct vio_net_attr_info *pkt = arg;
+
+ switch (pkt->tag.stype) {
+ case VIO_SUBTYPE_INFO:
+ return handle_attr_info(vio, pkt);
+
+ case VIO_SUBTYPE_ACK:
+ return handle_attr_ack(vio, pkt);
+
+ case VIO_SUBTYPE_NACK:
+ return handle_attr_nack(vio, pkt);
+
+ default:
+ return -ECONNRESET;
+ }
+}
+
+static void vnet_handshake_complete(struct vio_driver_state *vio)
+{
+ struct vio_dring_state *dr;
+
+ dr = &vio->drings[VIO_DRIVER_RX_RING];
+ dr->snd_nxt = dr->rcv_nxt = 1;
+
+ dr = &vio->drings[VIO_DRIVER_TX_RING];
+ dr->snd_nxt = dr->rcv_nxt = 1;
+}
+
+/* The hypervisor interface that implements copying to/from imported
+ * memory from another domain requires that copies are done to 8-byte
+ * aligned buffers, and that the lengths of such copies are also 8-byte
+ * multiples.
+ *
+ * So we align skb->data to an 8-byte multiple and pad-out the data
+ * area so we can round the copy length up to the next multiple of
+ * 8 for the copy.
+ *
+ * The transmitter puts the actual start of the packet 6 bytes into
+ * the buffer it sends over, so that the IP headers after the ethernet
+ * header are aligned properly. These 6 bytes are not in the descriptor
+ * length, they are simply implied. This offset is represented using
+ * the VNET_PACKET_SKIP macro.
+ */
+static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
+ unsigned int len)
+{
+ struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8);
+ unsigned long addr, off;
+
+ if (unlikely(!skb))
+ return NULL;
+
+ addr = (unsigned long) skb->data;
+ off = ((addr + 7UL) & ~7UL) - addr;
+ if (off)
+ skb_reserve(skb, off);
+
+ return skb;
+}
+
+static int vnet_rx_one(struct vnet_port *port, unsigned int len,
+ struct ldc_trans_cookie *cookies, int ncookies)
+{
+ struct net_device *dev = port->vp->dev;
+ unsigned int copy_len;
+ struct sk_buff *skb;
+ int err;
+
+ err = -EMSGSIZE;
+ if (unlikely(len < ETH_ZLEN || len > ETH_FRAME_LEN)) {
+ dev->stats.rx_length_errors++;
+ goto out_dropped;
+ }
+
+ skb = alloc_and_align_skb(dev, len);
+ err = -ENOMEM;
+ if (unlikely(!skb)) {
+ dev->stats.rx_missed_errors++;
+ goto out_dropped;
+ }
+
+ copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U;
+ skb_put(skb, copy_len);
+ err = ldc_copy(port->vio.lp, LDC_COPY_IN,
+ skb->data, copy_len, 0,
+ cookies, ncookies);
+ if (unlikely(err < 0)) {
+ dev->stats.rx_frame_errors++;
+ goto out_free_skb;
+ }
+
+ skb_pull(skb, VNET_PACKET_SKIP);
+ skb_trim(skb, len);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+
+ netif_rx(skb);
+
+ return 0;
+
+out_free_skb:
+ kfree_skb(skb);
+
+out_dropped:
+ dev->stats.rx_dropped++;
+ return err;
+}
+
+static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
+ u32 start, u32 end, u8 vio_dring_state)
+{
+ struct vio_dring_data hdr = {
+ .tag = {
+ .type = VIO_TYPE_DATA,
+ .stype = VIO_SUBTYPE_ACK,
+ .stype_env = VIO_DRING_DATA,
+ .sid = vio_send_sid(&port->vio),
+ },
+ .dring_ident = dr->ident,
+ .start_idx = start,
+ .end_idx = end,
+ .state = vio_dring_state,
+ };
+ int err, delay;
+
+ hdr.seq = dr->snd_nxt;
+ delay = 1;
+ do {
+ err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
+ if (err > 0) {
+ dr->snd_nxt++;
+ break;
+ }
+ udelay(delay);
+ if ((delay <<= 1) > 128)
+ delay = 128;
+ } while (err == -EAGAIN);
+
+ return err;
+}
+
+static u32 next_idx(u32 idx, struct vio_dring_state *dr)
+{
+ if (++idx == dr->num_entries)
+ idx = 0;
+ return idx;
+}
+
+static u32 prev_idx(u32 idx, struct vio_dring_state *dr)
+{
+ if (idx == 0)
+ idx = dr->num_entries - 1;
+ else
+ idx--;
+
+ return idx;
+}
+
+static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
+ struct vio_dring_state *dr,
+ u32 index)
+{
+ struct vio_net_desc *desc = port->vio.desc_buf;
+ int err;
+
+ err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
+ (index * dr->entry_size),
+ dr->cookies, dr->ncookies);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return desc;
+}
+
+static int put_rx_desc(struct vnet_port *port,
+ struct vio_dring_state *dr,
+ struct vio_net_desc *desc,
+ u32 index)
+{
+ int err;
+
+ err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
+ (index * dr->entry_size),
+ dr->cookies, dr->ncookies);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int vnet_walk_rx_one(struct vnet_port *port,
+ struct vio_dring_state *dr,
+ u32 index, int *needs_ack)
+{
+ struct vio_net_desc *desc = get_rx_desc(port, dr, index);
+ struct vio_driver_state *vio = &port->vio;
+ int err;
+
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
+ desc->hdr.state, desc->hdr.ack,
+ desc->size, desc->ncookies,
+ desc->cookies[0].cookie_addr,
+ desc->cookies[0].cookie_size);
+
+ if (desc->hdr.state != VIO_DESC_READY)
+ return 1;
+ err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
+ if (err == -ECONNRESET)
+ return err;
+ desc->hdr.state = VIO_DESC_DONE;
+ err = put_rx_desc(port, dr, desc, index);
+ if (err < 0)
+ return err;
+ *needs_ack = desc->hdr.ack;
+ return 0;
+}
+
+static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
+ u32 start, u32 end)
+{
+ struct vio_driver_state *vio = &port->vio;
+ int ack_start = -1, ack_end = -1;
+
+ end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr);
+
+ viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
+
+ while (start != end) {
+ int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
+ if (err == -ECONNRESET)
+ return err;
+ if (err != 0)
+ break;
+ if (ack_start == -1)
+ ack_start = start;
+ ack_end = start;
+ start = next_idx(start, dr);
+ if (ack && start != end) {
+ err = vnet_send_ack(port, dr, ack_start, ack_end,
+ VIO_DRING_ACTIVE);
+ if (err == -ECONNRESET)
+ return err;
+ ack_start = -1;
+ }
+ }
+ if (unlikely(ack_start == -1))
+ ack_start = ack_end = prev_idx(start, dr);
+ return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED);
+}
+
+static int vnet_rx(struct vnet_port *port, void *msgbuf)
+{
+ struct vio_dring_data *pkt = msgbuf;
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
+ struct vio_driver_state *vio = &port->vio;
+
+ viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
+ pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
+
+ if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
+ return 0;
+ if (unlikely(pkt->seq != dr->rcv_nxt)) {
+ pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
+ pkt->seq, dr->rcv_nxt);
+ return 0;
+ }
+
+ dr->rcv_nxt++;
+
+ /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
+
+ return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx);
+}
+
+static int idx_is_pending(struct vio_dring_state *dr, u32 end)
+{
+ u32 idx = dr->cons;
+ int found = 0;
+
+ while (idx != dr->prod) {
+ if (idx == end) {
+ found = 1;
+ break;
+ }
+ idx = next_idx(idx, dr);
+ }
+ return found;
+}
+
+static int vnet_ack(struct vnet_port *port, void *msgbuf)
+{
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ struct vio_dring_data *pkt = msgbuf;
+ struct net_device *dev;
+ struct vnet *vp;
+ u32 end;
+
+ if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
+ return 0;
+
+ end = pkt->end_idx;
+ if (unlikely(!idx_is_pending(dr, end)))
+ return 0;
+
+ dr->cons = next_idx(end, dr);
+
+ vp = port->vp;
+ dev = vp->dev;
+ if (unlikely(netif_queue_stopped(dev) &&
+ vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
+ return 1;
+
+ return 0;
+}
+
+static int vnet_nack(struct vnet_port *port, void *msgbuf)
+{
+ /* XXX just reset or similar XXX */
+ return 0;
+}
+
+static int handle_mcast(struct vnet_port *port, void *msgbuf)
+{
+ struct vio_net_mcast_info *pkt = msgbuf;
+
+ if (pkt->tag.stype != VIO_SUBTYPE_ACK)
+ pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
+ port->vp->dev->name,
+ pkt->tag.type,
+ pkt->tag.stype,
+ pkt->tag.stype_env,
+ pkt->tag.sid);
+
+ return 0;
+}
+
+static void maybe_tx_wakeup(struct vnet *vp)
+{
+ struct net_device *dev = vp->dev;
+
+ netif_tx_lock(dev);
+ if (likely(netif_queue_stopped(dev))) {
+ struct vnet_port *port;
+ int wake = 1;
+
+ list_for_each_entry(port, &vp->port_list, list) {
+ struct vio_dring_state *dr;
+
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ if (vnet_tx_dring_avail(dr) <
+ VNET_TX_WAKEUP_THRESH(dr)) {
+ wake = 0;
+ break;
+ }
+ }
+ if (wake)
+ netif_wake_queue(dev);
+ }
+ netif_tx_unlock(dev);
+}
+
+static void vnet_event(void *arg, int event)
+{
+ struct vnet_port *port = arg;
+ struct vio_driver_state *vio = &port->vio;
+ unsigned long flags;
+ int tx_wakeup, err;
+
+ spin_lock_irqsave(&vio->lock, flags);
+
+ if (unlikely(event == LDC_EVENT_RESET ||
+ event == LDC_EVENT_UP)) {
+ vio_link_state_change(vio, event);
+ spin_unlock_irqrestore(&vio->lock, flags);
+
+ if (event == LDC_EVENT_RESET)
+ vio_port_up(vio);
+ return;
+ }
+
+ if (unlikely(event != LDC_EVENT_DATA_READY)) {
+ pr_warning("Unexpected LDC event %d\n", event);
+ spin_unlock_irqrestore(&vio->lock, flags);
+ return;
+ }
+
+ tx_wakeup = err = 0;
+ while (1) {
+ union {
+ struct vio_msg_tag tag;
+ u64 raw[8];
+ } msgbuf;
+
+ err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
+ if (unlikely(err < 0)) {
+ if (err == -ECONNRESET)
+ vio_conn_reset(vio);
+ break;
+ }
+ if (err == 0)
+ break;
+ viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
+ msgbuf.tag.type,
+ msgbuf.tag.stype,
+ msgbuf.tag.stype_env,
+ msgbuf.tag.sid);
+ err = vio_validate_sid(vio, &msgbuf.tag);
+ if (err < 0)
+ break;
+
+ if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
+ if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
+ err = vnet_rx(port, &msgbuf);
+ } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
+ err = vnet_ack(port, &msgbuf);
+ if (err > 0)
+ tx_wakeup |= err;
+ } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) {
+ err = vnet_nack(port, &msgbuf);
+ }
+ } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
+ if (msgbuf.tag.stype_env == VNET_MCAST_INFO)
+ err = handle_mcast(port, &msgbuf);
+ else
+ err = vio_control_pkt_engine(vio, &msgbuf);
+ if (err)
+ break;
+ } else {
+ err = vnet_handle_unknown(port, &msgbuf);
+ }
+ if (err == -ECONNRESET)
+ break;
+ }
+ spin_unlock(&vio->lock);
+ if (unlikely(tx_wakeup && err != -ECONNRESET))
+ maybe_tx_wakeup(port->vp);
+ local_irq_restore(flags);
+}
+
+static int __vnet_tx_trigger(struct vnet_port *port)
+{
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ struct vio_dring_data hdr = {
+ .tag = {
+ .type = VIO_TYPE_DATA,
+ .stype = VIO_SUBTYPE_INFO,
+ .stype_env = VIO_DRING_DATA,
+ .sid = vio_send_sid(&port->vio),
+ },
+ .dring_ident = dr->ident,
+ .start_idx = dr->prod,
+ .end_idx = (u32) -1,
+ };
+ int err, delay;
+
+ hdr.seq = dr->snd_nxt;
+ delay = 1;
+ do {
+ err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
+ if (err > 0) {
+ dr->snd_nxt++;
+ break;
+ }
+ udelay(delay);
+ if ((delay <<= 1) > 128)
+ delay = 128;
+ } while (err == -EAGAIN);
+
+ return err;
+}
+
+struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
+{
+ unsigned int hash = vnet_hashfn(skb->data);
+ struct hlist_head *hp = &vp->port_hash[hash];
+ struct hlist_node *n;
+ struct vnet_port *port;
+
+ hlist_for_each_entry(port, n, hp, hash) {
+ if (!compare_ether_addr(port->raddr, skb->data))
+ return port;
+ }
+ port = NULL;
+ if (!list_empty(&vp->port_list))
+ port = list_entry(vp->port_list.next, struct vnet_port, list);
+
+ return port;
+}
+
+struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
+{
+ struct vnet_port *ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vp->lock, flags);
+ ret = __tx_port_find(vp, skb);
+ spin_unlock_irqrestore(&vp->lock, flags);
+
+ return ret;
+}
+
+static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct vnet *vp = netdev_priv(dev);
+ struct vnet_port *port = tx_port_find(vp, skb);
+ struct vio_dring_state *dr;
+ struct vio_net_desc *d;
+ unsigned long flags;
+ unsigned int len;
+ void *tx_buf;
+ int i, err;
+
+ if (unlikely(!port))
+ goto out_dropped;
+
+ spin_lock_irqsave(&port->vio.lock, flags);
+
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
+ if (!netif_queue_stopped(dev)) {
+ netif_stop_queue(dev);
+
+ /* This is a hard error, log it. */
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ dev->stats.tx_errors++;
+ }
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ d = vio_dring_cur(dr);
+
+ tx_buf = port->tx_bufs[dr->prod].buf;
+ skb_copy_from_linear_data(skb, tx_buf + VNET_PACKET_SKIP, skb->len);
+
+ len = skb->len;
+ if (len < ETH_ZLEN) {
+ len = ETH_ZLEN;
+ memset(tx_buf+VNET_PACKET_SKIP+skb->len, 0, len - skb->len);
+ }
+
+ d->hdr.ack = VIO_ACK_ENABLE;
+ d->size = len;
+ d->ncookies = port->tx_bufs[dr->prod].ncookies;
+ for (i = 0; i < d->ncookies; i++)
+ d->cookies[i] = port->tx_bufs[dr->prod].cookies[i];
+
+ /* This has to be a non-SMP write barrier because we are writing
+ * to memory which is shared with the peer LDOM.
+ */
+ wmb();
+
+ d->hdr.state = VIO_DESC_READY;
+
+ err = __vnet_tx_trigger(port);
+ if (unlikely(err < 0)) {
+ netdev_info(dev, "TX trigger error %d\n", err);
+ d->hdr.state = VIO_DESC_FREE;
+ dev->stats.tx_carrier_errors++;
+ goto out_dropped_unlock;
+ }
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
+ if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
+ netif_stop_queue(dev);
+ if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
+ netif_wake_queue(dev);
+ }
+
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+
+out_dropped_unlock:
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+
+out_dropped:
+ dev_kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+static void vnet_tx_timeout(struct net_device *dev)
+{
+ /* XXX Implement me XXX */
+}
+
+static int vnet_open(struct net_device *dev)
+{
+ netif_carrier_on(dev);
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int vnet_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ return 0;
+}
+
+static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
+{
+ struct vnet_mcast_entry *m;
+
+ for (m = vp->mcast_list; m; m = m->next) {
+ if (!memcmp(m->addr, addr, ETH_ALEN))
+ return m;
+ }
+ return NULL;
+}
+
+static void __update_mc_list(struct vnet *vp, struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ struct vnet_mcast_entry *m;
+
+ m = __vnet_mc_find(vp, ha->addr);
+ if (m) {
+ m->hit = 1;
+ continue;
+ }
+
+ if (!m) {
+ m = kzalloc(sizeof(*m), GFP_ATOMIC);
+ if (!m)
+ continue;
+ memcpy(m->addr, ha->addr, ETH_ALEN);
+ m->hit = 1;
+
+ m->next = vp->mcast_list;
+ vp->mcast_list = m;
+ }
+ }
+}
+
+static void __send_mc_list(struct vnet *vp, struct vnet_port *port)
+{
+ struct vio_net_mcast_info info;
+ struct vnet_mcast_entry *m, **pp;
+ int n_addrs;
+
+ memset(&info, 0, sizeof(info));
+
+ info.tag.type = VIO_TYPE_CTRL;
+ info.tag.stype = VIO_SUBTYPE_INFO;
+ info.tag.stype_env = VNET_MCAST_INFO;
+ info.tag.sid = vio_send_sid(&port->vio);
+ info.set = 1;
+
+ n_addrs = 0;
+ for (m = vp->mcast_list; m; m = m->next) {
+ if (m->sent)
+ continue;
+ m->sent = 1;
+ memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
+ m->addr, ETH_ALEN);
+ if (++n_addrs == VNET_NUM_MCAST) {
+ info.count = n_addrs;
+
+ (void) vio_ldc_send(&port->vio, &info,
+ sizeof(info));
+ n_addrs = 0;
+ }
+ }
+ if (n_addrs) {
+ info.count = n_addrs;
+ (void) vio_ldc_send(&port->vio, &info, sizeof(info));
+ }
+
+ info.set = 0;
+
+ n_addrs = 0;
+ pp = &vp->mcast_list;
+ while ((m = *pp) != NULL) {
+ if (m->hit) {
+ m->hit = 0;
+ pp = &m->next;
+ continue;
+ }
+
+ memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
+ m->addr, ETH_ALEN);
+ if (++n_addrs == VNET_NUM_MCAST) {
+ info.count = n_addrs;
+ (void) vio_ldc_send(&port->vio, &info,
+ sizeof(info));
+ n_addrs = 0;
+ }
+
+ *pp = m->next;
+ kfree(m);
+ }
+ if (n_addrs) {
+ info.count = n_addrs;
+ (void) vio_ldc_send(&port->vio, &info, sizeof(info));
+ }
+}
+
+static void vnet_set_rx_mode(struct net_device *dev)
+{
+ struct vnet *vp = netdev_priv(dev);
+ struct vnet_port *port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vp->lock, flags);
+ if (!list_empty(&vp->port_list)) {
+ port = list_entry(vp->port_list.next, struct vnet_port, list);
+
+ if (port->switch_port) {
+ __update_mc_list(vp, dev);
+ __send_mc_list(vp, port);
+ }
+ }
+ spin_unlock_irqrestore(&vp->lock, flags);
+}
+
+static int vnet_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (new_mtu != ETH_DATA_LEN)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static int vnet_set_mac_addr(struct net_device *dev, void *p)
+{
+ return -EINVAL;
+}
+
+static void vnet_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_MODULE_NAME);
+ strcpy(info->version, DRV_MODULE_VERSION);
+}
+
+static u32 vnet_get_msglevel(struct net_device *dev)
+{
+ struct vnet *vp = netdev_priv(dev);
+ return vp->msg_enable;
+}
+
+static void vnet_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct vnet *vp = netdev_priv(dev);
+ vp->msg_enable = value;
+}
+
+static const struct ethtool_ops vnet_ethtool_ops = {
+ .get_drvinfo = vnet_get_drvinfo,
+ .get_msglevel = vnet_get_msglevel,
+ .set_msglevel = vnet_set_msglevel,
+ .get_link = ethtool_op_get_link,
+};
+
+static void vnet_port_free_tx_bufs(struct vnet_port *port)
+{
+ struct vio_dring_state *dr;
+ int i;
+
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ if (dr->base) {
+ ldc_free_exp_dring(port->vio.lp, dr->base,
+ (dr->entry_size * dr->num_entries),
+ dr->cookies, dr->ncookies);
+ dr->base = NULL;
+ dr->entry_size = 0;
+ dr->num_entries = 0;
+ dr->pending = 0;
+ dr->ncookies = 0;
+ }
+
+ for (i = 0; i < VNET_TX_RING_SIZE; i++) {
+ void *buf = port->tx_bufs[i].buf;
+
+ if (!buf)
+ continue;
+
+ ldc_unmap(port->vio.lp,
+ port->tx_bufs[i].cookies,
+ port->tx_bufs[i].ncookies);
+
+ kfree(buf);
+ port->tx_bufs[i].buf = NULL;
+ }
+}
+
+static int __devinit vnet_port_alloc_tx_bufs(struct vnet_port *port)
+{
+ struct vio_dring_state *dr;
+ unsigned long len;
+ int i, err, ncookies;
+ void *dring;
+
+ for (i = 0; i < VNET_TX_RING_SIZE; i++) {
+ void *buf = kzalloc(ETH_FRAME_LEN + 8, GFP_KERNEL);
+ int map_len = (ETH_FRAME_LEN + 7) & ~7;
+
+ err = -ENOMEM;
+ if (!buf) {
+ pr_err("TX buffer allocation failure\n");
+ goto err_out;
+ }
+ err = -EFAULT;
+ if ((unsigned long)buf & (8UL - 1)) {
+ pr_err("TX buffer misaligned\n");
+ kfree(buf);
+ goto err_out;
+ }
+
+ err = ldc_map_single(port->vio.lp, buf, map_len,
+ port->tx_bufs[i].cookies, 2,
+ (LDC_MAP_SHADOW |
+ LDC_MAP_DIRECT |
+ LDC_MAP_RW));
+ if (err < 0) {
+ kfree(buf);
+ goto err_out;
+ }
+ port->tx_bufs[i].buf = buf;
+ port->tx_bufs[i].ncookies = err;
+ }
+
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+
+ len = (VNET_TX_RING_SIZE *
+ (sizeof(struct vio_net_desc) +
+ (sizeof(struct ldc_trans_cookie) * 2)));
+
+ ncookies = VIO_MAX_RING_COOKIES;
+ dring = ldc_alloc_exp_dring(port->vio.lp, len,
+ dr->cookies, &ncookies,
+ (LDC_MAP_SHADOW |
+ LDC_MAP_DIRECT |
+ LDC_MAP_RW));
+ if (IS_ERR(dring)) {
+ err = PTR_ERR(dring);
+ goto err_out;
+ }
+
+ dr->base = dring;
+ dr->entry_size = (sizeof(struct vio_net_desc) +
+ (sizeof(struct ldc_trans_cookie) * 2));
+ dr->num_entries = VNET_TX_RING_SIZE;
+ dr->prod = dr->cons = 0;
+ dr->pending = VNET_TX_RING_SIZE;
+ dr->ncookies = ncookies;
+
+ return 0;
+
+err_out:
+ vnet_port_free_tx_bufs(port);
+
+ return err;
+}
+
+static LIST_HEAD(vnet_list);
+static DEFINE_MUTEX(vnet_list_mutex);
+
+static const struct net_device_ops vnet_ops = {
+ .ndo_open = vnet_open,
+ .ndo_stop = vnet_close,
+ .ndo_set_rx_mode = vnet_set_rx_mode,
+ .ndo_set_mac_address = vnet_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = vnet_tx_timeout,
+ .ndo_change_mtu = vnet_change_mtu,
+ .ndo_start_xmit = vnet_start_xmit,
+};
+
+static struct vnet * __devinit vnet_new(const u64 *local_mac)
+{
+ struct net_device *dev;
+ struct vnet *vp;
+ int err, i;
+
+ dev = alloc_etherdev(sizeof(*vp));
+ if (!dev) {
+ pr_err("Etherdev alloc failed, aborting\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
+
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ vp = netdev_priv(dev);
+
+ spin_lock_init(&vp->lock);
+ vp->dev = dev;
+
+ INIT_LIST_HEAD(&vp->port_list);
+ for (i = 0; i < VNET_PORT_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&vp->port_hash[i]);
+ INIT_LIST_HEAD(&vp->list);
+ vp->local_mac = *local_mac;
+
+ dev->netdev_ops = &vnet_ops;
+ dev->ethtool_ops = &vnet_ethtool_ops;
+ dev->watchdog_timeo = VNET_TX_TIMEOUT;
+
+ err = register_netdev(dev);
+ if (err) {
+ pr_err("Cannot register net device, aborting\n");
+ goto err_out_free_dev;
+ }
+
+ netdev_info(dev, "Sun LDOM vnet %pM\n", dev->dev_addr);
+
+ list_add(&vp->list, &vnet_list);
+
+ return vp;
+
+err_out_free_dev:
+ free_netdev(dev);
+
+ return ERR_PTR(err);
+}
+
+static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac)
+{
+ struct vnet *iter, *vp;
+
+ mutex_lock(&vnet_list_mutex);
+ vp = NULL;
+ list_for_each_entry(iter, &vnet_list, list) {
+ if (iter->local_mac == *local_mac) {
+ vp = iter;
+ break;
+ }
+ }
+ if (!vp)
+ vp = vnet_new(local_mac);
+ mutex_unlock(&vnet_list_mutex);
+
+ return vp;
+}
+
+static const char *local_mac_prop = "local-mac-address";
+
+static struct vnet * __devinit vnet_find_parent(struct mdesc_handle *hp,
+ u64 port_node)
+{
+ const u64 *local_mac = NULL;
+ u64 a;
+
+ mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) {
+ u64 target = mdesc_arc_target(hp, a);
+ const char *name;
+
+ name = mdesc_get_property(hp, target, "name", NULL);
+ if (!name || strcmp(name, "network"))
+ continue;
+
+ local_mac = mdesc_get_property(hp, target,
+ local_mac_prop, NULL);
+ if (local_mac)
+ break;
+ }
+ if (!local_mac)
+ return ERR_PTR(-ENODEV);
+
+ return vnet_find_or_create(local_mac);
+}
+
+static struct ldc_channel_config vnet_ldc_cfg = {
+ .event = vnet_event,
+ .mtu = 64,
+ .mode = LDC_MODE_UNRELIABLE,
+};
+
+static struct vio_driver_ops vnet_vio_ops = {
+ .send_attr = vnet_send_attr,
+ .handle_attr = vnet_handle_attr,
+ .handshake_complete = vnet_handshake_complete,
+};
+
+static void __devinit print_version(void)
+{
+ printk_once(KERN_INFO "%s", version);
+}
+
+const char *remote_macaddr_prop = "remote-mac-address";
+
+static int __devinit vnet_port_probe(struct vio_dev *vdev,
+ const struct vio_device_id *id)
+{
+ struct mdesc_handle *hp;
+ struct vnet_port *port;
+ unsigned long flags;
+ struct vnet *vp;
+ const u64 *rmac;
+ int len, i, err, switch_port;
+
+ print_version();
+
+ hp = mdesc_grab();
+
+ vp = vnet_find_parent(hp, vdev->mp);
+ if (IS_ERR(vp)) {
+ pr_err("Cannot find port parent vnet\n");
+ err = PTR_ERR(vp);
+ goto err_out_put_mdesc;
+ }
+
+ rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
+ err = -ENODEV;
+ if (!rmac) {
+ pr_err("Port lacks %s property\n", remote_macaddr_prop);
+ goto err_out_put_mdesc;
+ }
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!port) {
+ pr_err("Cannot allocate vnet_port\n");
+ goto err_out_put_mdesc;
+ }
+
+ for (i = 0; i < ETH_ALEN; i++)
+ port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff;
+
+ port->vp = vp;
+
+ err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK,
+ vnet_versions, ARRAY_SIZE(vnet_versions),
+ &vnet_vio_ops, vp->dev->name);
+ if (err)
+ goto err_out_free_port;
+
+ err = vio_ldc_alloc(&port->vio, &vnet_ldc_cfg, port);
+ if (err)
+ goto err_out_free_port;
+
+ err = vnet_port_alloc_tx_bufs(port);
+ if (err)
+ goto err_out_free_ldc;
+
+ INIT_HLIST_NODE(&port->hash);
+ INIT_LIST_HEAD(&port->list);
+
+ switch_port = 0;
+ if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL)
+ switch_port = 1;
+ port->switch_port = switch_port;
+
+ spin_lock_irqsave(&vp->lock, flags);
+ if (switch_port)
+ list_add(&port->list, &vp->port_list);
+ else
+ list_add_tail(&port->list, &vp->port_list);
+ hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]);
+ spin_unlock_irqrestore(&vp->lock, flags);
+
+ dev_set_drvdata(&vdev->dev, port);
+
+ pr_info("%s: PORT ( remote-mac %pM%s )\n",
+ vp->dev->name, port->raddr, switch_port ? " switch-port" : "");
+
+ vio_port_up(&port->vio);
+
+ mdesc_release(hp);
+
+ return 0;
+
+err_out_free_ldc:
+ vio_ldc_free(&port->vio);
+
+err_out_free_port:
+ kfree(port);
+
+err_out_put_mdesc:
+ mdesc_release(hp);
+ return err;
+}
+
+static int vnet_port_remove(struct vio_dev *vdev)
+{
+ struct vnet_port *port = dev_get_drvdata(&vdev->dev);
+
+ if (port) {
+ struct vnet *vp = port->vp;
+ unsigned long flags;
+
+ del_timer_sync(&port->vio.timer);
+
+ spin_lock_irqsave(&vp->lock, flags);
+ list_del(&port->list);
+ hlist_del(&port->hash);
+ spin_unlock_irqrestore(&vp->lock, flags);
+
+ vnet_port_free_tx_bufs(port);
+ vio_ldc_free(&port->vio);
+
+ dev_set_drvdata(&vdev->dev, NULL);
+
+ kfree(port);
+ }
+ return 0;
+}
+
+static const struct vio_device_id vnet_port_match[] = {
+ {
+ .type = "vnet-port",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(vio, vnet_port_match);
+
+static struct vio_driver vnet_port_driver = {
+ .id_table = vnet_port_match,
+ .probe = vnet_port_probe,
+ .remove = vnet_port_remove,
+ .driver = {
+ .name = "vnet_port",
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init vnet_init(void)
+{
+ return vio_register_driver(&vnet_port_driver);
+}
+
+static void __exit vnet_exit(void)
+{
+ vio_unregister_driver(&vnet_port_driver);
+}
+
+module_init(vnet_init);
+module_exit(vnet_exit);
diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h
new file mode 100644
index 000000000000..d347a5bf24b0
--- /dev/null
+++ b/drivers/net/ethernet/sun/sunvnet.h
@@ -0,0 +1,83 @@
+#ifndef _SUNVNET_H
+#define _SUNVNET_H
+
+#define DESC_NCOOKIES(entry_size) \
+ ((entry_size) - sizeof(struct vio_net_desc))
+
+/* length of time before we decide the hardware is borked,
+ * and dev->tx_timeout() should be called to fix the problem
+ */
+#define VNET_TX_TIMEOUT (5 * HZ)
+
+#define VNET_TX_RING_SIZE 512
+#define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4)
+
+/* VNET packets are sent in buffers with the first 6 bytes skipped
+ * so that after the ethernet header the IPv4/IPv6 headers are aligned
+ * properly.
+ */
+#define VNET_PACKET_SKIP 6
+
+struct vnet_tx_entry {
+ void *buf;
+ unsigned int ncookies;
+ struct ldc_trans_cookie cookies[2];
+};
+
+struct vnet;
+struct vnet_port {
+ struct vio_driver_state vio;
+
+ struct hlist_node hash;
+ u8 raddr[ETH_ALEN];
+ u8 switch_port;
+ u8 __pad;
+
+ struct vnet *vp;
+
+ struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE];
+
+ struct list_head list;
+};
+
+static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio)
+{
+ return container_of(vio, struct vnet_port, vio);
+}
+
+#define VNET_PORT_HASH_SIZE 16
+#define VNET_PORT_HASH_MASK (VNET_PORT_HASH_SIZE - 1)
+
+static inline unsigned int vnet_hashfn(u8 *mac)
+{
+ unsigned int val = mac[4] ^ mac[5];
+
+ return val & (VNET_PORT_HASH_MASK);
+}
+
+struct vnet_mcast_entry {
+ u8 addr[ETH_ALEN];
+ u8 sent;
+ u8 hit;
+ struct vnet_mcast_entry *next;
+};
+
+struct vnet {
+ /* Protects port_list and port_hash. */
+ spinlock_t lock;
+
+ struct net_device *dev;
+
+ u32 msg_enable;
+
+ struct list_head port_list;
+
+ struct hlist_head port_hash[VNET_PORT_HASH_SIZE];
+
+ struct vnet_mcast_entry *mcast_list;
+
+ struct list_head list;
+ u64 local_mac;
+};
+
+#endif /* _SUNVNET_H */
diff --git a/drivers/net/ethernet/tehuti/Kconfig b/drivers/net/ethernet/tehuti/Kconfig
new file mode 100644
index 000000000000..1fc027eda33e
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/Kconfig
@@ -0,0 +1,27 @@
+#
+# Tehuti network device configuration
+#
+
+config NET_VENDOR_TEHUTI
+ bool "Tehuti devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Tehuti cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_TEHUTI
+
+config TEHUTI
+ tristate "Tehuti Networks 10G Ethernet"
+ depends on PCI
+ ---help---
+ Tehuti Networks 10G Ethernet NIC
+
+endif # NET_VENDOR_TEHUTI
diff --git a/drivers/net/ethernet/tehuti/Makefile b/drivers/net/ethernet/tehuti/Makefile
new file mode 100644
index 000000000000..f995421ddbc8
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Tehuti network device drivers.
+#
+
+obj-$(CONFIG_TEHUTI) += tehuti.o
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
new file mode 100644
index 000000000000..c77e3bf4750a
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -0,0 +1,2470 @@
+/*
+ * Tehuti Networks(R) Network Driver
+ * ethtool interface implementation
+ * Copyright (C) 2007 Tehuti Networks Ltd. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/*
+ * RX HW/SW interaction overview
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * There are 2 types of RX communication channels between driver and NIC.
+ * 1) RX Free Fifo - RXF - holds descriptors of empty buffers to accept incoming
+ * traffic. This Fifo is filled by SW and is readen by HW. Each descriptor holds
+ * info about buffer's location, size and ID. An ID field is used to identify a
+ * buffer when it's returned with data via RXD Fifo (see below)
+ * 2) RX Data Fifo - RXD - holds descriptors of full buffers. This Fifo is
+ * filled by HW and is readen by SW. Each descriptor holds status and ID.
+ * HW pops descriptor from RXF Fifo, stores ID, fills buffer with incoming data,
+ * via dma moves it into host memory, builds new RXD descriptor with same ID,
+ * pushes it into RXD Fifo and raises interrupt to indicate new RX data.
+ *
+ * Current NIC configuration (registers + firmware) makes NIC use 2 RXF Fifos.
+ * One holds 1.5K packets and another - 26K packets. Depending on incoming
+ * packet size, HW desides on a RXF Fifo to pop buffer from. When packet is
+ * filled with data, HW builds new RXD descriptor for it and push it into single
+ * RXD Fifo.
+ *
+ * RX SW Data Structures
+ * ~~~~~~~~~~~~~~~~~~~~~
+ * skb db - used to keep track of all skbs owned by SW and their dma addresses.
+ * For RX case, ownership lasts from allocating new empty skb for RXF until
+ * accepting full skb from RXD and passing it to OS. Each RXF Fifo has its own
+ * skb db. Implemented as array with bitmask.
+ * fifo - keeps info about fifo's size and location, relevant HW registers,
+ * usage and skb db. Each RXD and RXF Fifo has its own fifo structure.
+ * Implemented as simple struct.
+ *
+ * RX SW Execution Flow
+ * ~~~~~~~~~~~~~~~~~~~~
+ * Upon initialization (ifconfig up) driver creates RX fifos and initializes
+ * relevant registers. At the end of init phase, driver enables interrupts.
+ * NIC sees that there is no RXF buffers and raises
+ * RD_INTR interrupt, isr fills skbs and Rx begins.
+ * Driver has two receive operation modes:
+ * NAPI - interrupt-driven mixed with polling
+ * interrupt-driven only
+ *
+ * Interrupt-driven only flow is following. When buffer is ready, HW raises
+ * interrupt and isr is called. isr collects all available packets
+ * (bdx_rx_receive), refills skbs (bdx_rx_alloc_skbs) and exit.
+
+ * Rx buffer allocation note
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Driver cares to feed such amount of RxF descriptors that respective amount of
+ * RxD descriptors can not fill entire RxD fifo. The main reason is lack of
+ * overflow check in Bordeaux for RxD fifo free/used size.
+ * FIXME: this is NOT fully implemented, more work should be done
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "tehuti.h"
+
+static DEFINE_PCI_DEVICE_TABLE(bdx_pci_tbl) = {
+ { PCI_VDEVICE(TEHUTI, 0x3009), },
+ { PCI_VDEVICE(TEHUTI, 0x3010), },
+ { PCI_VDEVICE(TEHUTI, 0x3014), },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, bdx_pci_tbl);
+
+/* Definitions needed by ISR or NAPI functions */
+static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f);
+static void bdx_tx_cleanup(struct bdx_priv *priv);
+static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget);
+
+/* Definitions needed by FW loading */
+static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size);
+
+/* Definitions needed by hw_start */
+static int bdx_tx_init(struct bdx_priv *priv);
+static int bdx_rx_init(struct bdx_priv *priv);
+
+/* Definitions needed by bdx_close */
+static void bdx_rx_free(struct bdx_priv *priv);
+static void bdx_tx_free(struct bdx_priv *priv);
+
+/* Definitions needed by bdx_probe */
+static void bdx_set_ethtool_ops(struct net_device *netdev);
+
+/*************************************************************************
+ * Print Info *
+ *************************************************************************/
+
+static void print_hw_id(struct pci_dev *pdev)
+{
+ struct pci_nic *nic = pci_get_drvdata(pdev);
+ u16 pci_link_status = 0;
+ u16 pci_ctrl = 0;
+
+ pci_read_config_word(pdev, PCI_LINK_STATUS_REG, &pci_link_status);
+ pci_read_config_word(pdev, PCI_DEV_CTRL_REG, &pci_ctrl);
+
+ pr_info("%s%s\n", BDX_NIC_NAME,
+ nic->port_num == 1 ? "" : ", 2-Port");
+ pr_info("srom 0x%x fpga %d build %u lane# %d max_pl 0x%x mrrs 0x%x\n",
+ readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF,
+ readl(nic->regs + FPGA_SEED),
+ GET_LINK_STATUS_LANES(pci_link_status),
+ GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl));
+}
+
+static void print_fw_id(struct pci_nic *nic)
+{
+ pr_info("fw 0x%x\n", readl(nic->regs + FW_VER));
+}
+
+static void print_eth_id(struct net_device *ndev)
+{
+ netdev_info(ndev, "%s, Port %c\n",
+ BDX_NIC_NAME, (ndev->if_port == 0) ? 'A' : 'B');
+
+}
+
+/*************************************************************************
+ * Code *
+ *************************************************************************/
+
+#define bdx_enable_interrupts(priv) \
+ do { WRITE_REG(priv, regIMR, IR_RUN); } while (0)
+#define bdx_disable_interrupts(priv) \
+ do { WRITE_REG(priv, regIMR, 0); } while (0)
+
+/* bdx_fifo_init
+ * create TX/RX descriptor fifo for host-NIC communication.
+ * 1K extra space is allocated at the end of the fifo to simplify
+ * processing of descriptors that wraps around fifo's end
+ * @priv - NIC private structure
+ * @f - fifo to initialize
+ * @fsz_type - fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB
+ * @reg_XXX - offsets of registers relative to base address
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ */
+static int
+bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
+ u16 reg_CFG0, u16 reg_CFG1, u16 reg_RPTR, u16 reg_WPTR)
+{
+ u16 memsz = FIFO_SIZE * (1 << fsz_type);
+
+ memset(f, 0, sizeof(struct fifo));
+ /* pci_alloc_consistent gives us 4k-aligned memory */
+ f->va = pci_alloc_consistent(priv->pdev,
+ memsz + FIFO_EXTRA_SPACE, &f->da);
+ if (!f->va) {
+ pr_err("pci_alloc_consistent failed\n");
+ RET(-ENOMEM);
+ }
+ f->reg_CFG0 = reg_CFG0;
+ f->reg_CFG1 = reg_CFG1;
+ f->reg_RPTR = reg_RPTR;
+ f->reg_WPTR = reg_WPTR;
+ f->rptr = 0;
+ f->wptr = 0;
+ f->memsz = memsz;
+ f->size_mask = memsz - 1;
+ WRITE_REG(priv, reg_CFG0, (u32) ((f->da & TX_RX_CFG0_BASE) | fsz_type));
+ WRITE_REG(priv, reg_CFG1, H32_64(f->da));
+
+ RET(0);
+}
+
+/* bdx_fifo_free - free all resources used by fifo
+ * @priv - NIC private structure
+ * @f - fifo to release
+ */
+static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
+{
+ ENTER;
+ if (f->va) {
+ pci_free_consistent(priv->pdev,
+ f->memsz + FIFO_EXTRA_SPACE, f->va, f->da);
+ f->va = NULL;
+ }
+ RET();
+}
+
+/*
+ * bdx_link_changed - notifies OS about hw link state.
+ * @bdx_priv - hw adapter structure
+ */
+static void bdx_link_changed(struct bdx_priv *priv)
+{
+ u32 link = READ_REG(priv, regMAC_LNK_STAT) & MAC_LINK_STAT;
+
+ if (!link) {
+ if (netif_carrier_ok(priv->ndev)) {
+ netif_stop_queue(priv->ndev);
+ netif_carrier_off(priv->ndev);
+ netdev_err(priv->ndev, "Link Down\n");
+ }
+ } else {
+ if (!netif_carrier_ok(priv->ndev)) {
+ netif_wake_queue(priv->ndev);
+ netif_carrier_on(priv->ndev);
+ netdev_err(priv->ndev, "Link Up\n");
+ }
+ }
+}
+
+static void bdx_isr_extra(struct bdx_priv *priv, u32 isr)
+{
+ if (isr & IR_RX_FREE_0) {
+ bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
+ DBG("RX_FREE_0\n");
+ }
+
+ if (isr & IR_LNKCHG0)
+ bdx_link_changed(priv);
+
+ if (isr & IR_PCIE_LINK)
+ netdev_err(priv->ndev, "PCI-E Link Fault\n");
+
+ if (isr & IR_PCIE_TOUT)
+ netdev_err(priv->ndev, "PCI-E Time Out\n");
+
+}
+
+/* bdx_isr - Interrupt Service Routine for Bordeaux NIC
+ * @irq - interrupt number
+ * @ndev - network device
+ * @regs - CPU registers
+ *
+ * Return IRQ_NONE if it was not our interrupt, IRQ_HANDLED - otherwise
+ *
+ * It reads ISR register to know interrupt reasons, and proceed them one by one.
+ * Reasons of interest are:
+ * RX_DESC - new packet has arrived and RXD fifo holds its descriptor
+ * RX_FREE - number of free Rx buffers in RXF fifo gets low
+ * TX_FREE - packet was transmited and RXF fifo holds its descriptor
+ */
+
+static irqreturn_t bdx_isr_napi(int irq, void *dev)
+{
+ struct net_device *ndev = dev;
+ struct bdx_priv *priv = netdev_priv(ndev);
+ u32 isr;
+
+ ENTER;
+ isr = (READ_REG(priv, regISR) & IR_RUN);
+ if (unlikely(!isr)) {
+ bdx_enable_interrupts(priv);
+ return IRQ_NONE; /* Not our interrupt */
+ }
+
+ if (isr & IR_EXTRA)
+ bdx_isr_extra(priv, isr);
+
+ if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) {
+ if (likely(napi_schedule_prep(&priv->napi))) {
+ __napi_schedule(&priv->napi);
+ RET(IRQ_HANDLED);
+ } else {
+ /* NOTE: we get here if intr has slipped into window
+ * between these lines in bdx_poll:
+ * bdx_enable_interrupts(priv);
+ * return 0;
+ * currently intrs are disabled (since we read ISR),
+ * and we have failed to register next poll.
+ * so we read the regs to trigger chip
+ * and allow further interupts. */
+ READ_REG(priv, regTXF_WPTR_0);
+ READ_REG(priv, regRXD_WPTR_0);
+ }
+ }
+
+ bdx_enable_interrupts(priv);
+ RET(IRQ_HANDLED);
+}
+
+static int bdx_poll(struct napi_struct *napi, int budget)
+{
+ struct bdx_priv *priv = container_of(napi, struct bdx_priv, napi);
+ int work_done;
+
+ ENTER;
+ bdx_tx_cleanup(priv);
+ work_done = bdx_rx_receive(priv, &priv->rxd_fifo0, budget);
+ if ((work_done < budget) ||
+ (priv->napi_stop++ >= 30)) {
+ DBG("rx poll is done. backing to isr-driven\n");
+
+ /* from time to time we exit to let NAPI layer release
+ * device lock and allow waiting tasks (eg rmmod) to advance) */
+ priv->napi_stop = 0;
+
+ napi_complete(napi);
+ bdx_enable_interrupts(priv);
+ }
+ return work_done;
+}
+
+/* bdx_fw_load - loads firmware to NIC
+ * @priv - NIC private structure
+ * Firmware is loaded via TXD fifo, so it must be initialized first.
+ * Firware must be loaded once per NIC not per PCI device provided by NIC (NIC
+ * can have few of them). So all drivers use semaphore register to choose one
+ * that will actually load FW to NIC.
+ */
+
+static int bdx_fw_load(struct bdx_priv *priv)
+{
+ const struct firmware *fw = NULL;
+ int master, i;
+ int rc;
+
+ ENTER;
+ master = READ_REG(priv, regINIT_SEMAPHORE);
+ if (!READ_REG(priv, regINIT_STATUS) && master) {
+ rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev);
+ if (rc)
+ goto out;
+ bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size);
+ mdelay(100);
+ }
+ for (i = 0; i < 200; i++) {
+ if (READ_REG(priv, regINIT_STATUS)) {
+ rc = 0;
+ goto out;
+ }
+ mdelay(2);
+ }
+ rc = -EIO;
+out:
+ if (master)
+ WRITE_REG(priv, regINIT_SEMAPHORE, 1);
+ if (fw)
+ release_firmware(fw);
+
+ if (rc) {
+ netdev_err(priv->ndev, "firmware loading failed\n");
+ if (rc == -EIO)
+ DBG("VPC = 0x%x VIC = 0x%x INIT_STATUS = 0x%x i=%d\n",
+ READ_REG(priv, regVPC),
+ READ_REG(priv, regVIC),
+ READ_REG(priv, regINIT_STATUS), i);
+ RET(rc);
+ } else {
+ DBG("%s: firmware loading success\n", priv->ndev->name);
+ RET(0);
+ }
+}
+
+static void bdx_restore_mac(struct net_device *ndev, struct bdx_priv *priv)
+{
+ u32 val;
+
+ ENTER;
+ DBG("mac0=%x mac1=%x mac2=%x\n",
+ READ_REG(priv, regUNC_MAC0_A),
+ READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A));
+
+ val = (ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]);
+ WRITE_REG(priv, regUNC_MAC2_A, val);
+ val = (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]);
+ WRITE_REG(priv, regUNC_MAC1_A, val);
+ val = (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]);
+ WRITE_REG(priv, regUNC_MAC0_A, val);
+
+ DBG("mac0=%x mac1=%x mac2=%x\n",
+ READ_REG(priv, regUNC_MAC0_A),
+ READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A));
+ RET();
+}
+
+/* bdx_hw_start - inits registers and starts HW's Rx and Tx engines
+ * @priv - NIC private structure
+ */
+static int bdx_hw_start(struct bdx_priv *priv)
+{
+ int rc = -EIO;
+ struct net_device *ndev = priv->ndev;
+
+ ENTER;
+ bdx_link_changed(priv);
+
+ /* 10G overall max length (vlan, eth&ip header, ip payload, crc) */
+ WRITE_REG(priv, regFRM_LENGTH, 0X3FE0);
+ WRITE_REG(priv, regPAUSE_QUANT, 0x96);
+ WRITE_REG(priv, regRX_FIFO_SECTION, 0x800010);
+ WRITE_REG(priv, regTX_FIFO_SECTION, 0xE00010);
+ WRITE_REG(priv, regRX_FULLNESS, 0);
+ WRITE_REG(priv, regTX_FULLNESS, 0);
+ WRITE_REG(priv, regCTRLST,
+ regCTRLST_BASE | regCTRLST_RX_ENA | regCTRLST_TX_ENA);
+
+ WRITE_REG(priv, regVGLB, 0);
+ WRITE_REG(priv, regMAX_FRAME_A,
+ priv->rxf_fifo0.m.pktsz & MAX_FRAME_AB_VAL);
+
+ DBG("RDINTCM=%08x\n", priv->rdintcm); /*NOTE: test script uses this */
+ WRITE_REG(priv, regRDINTCM0, priv->rdintcm);
+ WRITE_REG(priv, regRDINTCM2, 0); /*cpu_to_le32(rcm.val)); */
+
+ DBG("TDINTCM=%08x\n", priv->tdintcm); /*NOTE: test script uses this */
+ WRITE_REG(priv, regTDINTCM0, priv->tdintcm); /* old val = 0x300064 */
+
+ /* Enable timer interrupt once in 2 secs. */
+ /*WRITE_REG(priv, regGTMR0, ((GTMR_SEC * 2) & GTMR_DATA)); */
+ bdx_restore_mac(priv->ndev, priv);
+
+ WRITE_REG(priv, regGMAC_RXF_A, GMAC_RX_FILTER_OSEN |
+ GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB);
+
+#define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI) ? 0 : IRQF_SHARED)
+
+ rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE,
+ ndev->name, ndev);
+ if (rc)
+ goto err_irq;
+ bdx_enable_interrupts(priv);
+
+ RET(0);
+
+err_irq:
+ RET(rc);
+}
+
+static void bdx_hw_stop(struct bdx_priv *priv)
+{
+ ENTER;
+ bdx_disable_interrupts(priv);
+ free_irq(priv->pdev->irq, priv->ndev);
+
+ netif_carrier_off(priv->ndev);
+ netif_stop_queue(priv->ndev);
+
+ RET();
+}
+
+static int bdx_hw_reset_direct(void __iomem *regs)
+{
+ u32 val, i;
+ ENTER;
+
+ /* reset sequences: read, write 1, read, write 0 */
+ val = readl(regs + regCLKPLL);
+ writel((val | CLKPLL_SFTRST) + 0x8, regs + regCLKPLL);
+ udelay(50);
+ val = readl(regs + regCLKPLL);
+ writel(val & ~CLKPLL_SFTRST, regs + regCLKPLL);
+
+ /* check that the PLLs are locked and reset ended */
+ for (i = 0; i < 70; i++, mdelay(10))
+ if ((readl(regs + regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) {
+ /* do any PCI-E read transaction */
+ readl(regs + regRXD_CFG0_0);
+ return 0;
+ }
+ pr_err("HW reset failed\n");
+ return 1; /* failure */
+}
+
+static int bdx_hw_reset(struct bdx_priv *priv)
+{
+ u32 val, i;
+ ENTER;
+
+ if (priv->port == 0) {
+ /* reset sequences: read, write 1, read, write 0 */
+ val = READ_REG(priv, regCLKPLL);
+ WRITE_REG(priv, regCLKPLL, (val | CLKPLL_SFTRST) + 0x8);
+ udelay(50);
+ val = READ_REG(priv, regCLKPLL);
+ WRITE_REG(priv, regCLKPLL, val & ~CLKPLL_SFTRST);
+ }
+ /* check that the PLLs are locked and reset ended */
+ for (i = 0; i < 70; i++, mdelay(10))
+ if ((READ_REG(priv, regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) {
+ /* do any PCI-E read transaction */
+ READ_REG(priv, regRXD_CFG0_0);
+ return 0;
+ }
+ pr_err("HW reset failed\n");
+ return 1; /* failure */
+}
+
+static int bdx_sw_reset(struct bdx_priv *priv)
+{
+ int i;
+
+ ENTER;
+ /* 1. load MAC (obsolete) */
+ /* 2. disable Rx (and Tx) */
+ WRITE_REG(priv, regGMAC_RXF_A, 0);
+ mdelay(100);
+ /* 3. disable port */
+ WRITE_REG(priv, regDIS_PORT, 1);
+ /* 4. disable queue */
+ WRITE_REG(priv, regDIS_QU, 1);
+ /* 5. wait until hw is disabled */
+ for (i = 0; i < 50; i++) {
+ if (READ_REG(priv, regRST_PORT) & 1)
+ break;
+ mdelay(10);
+ }
+ if (i == 50)
+ netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n");
+
+ /* 6. disable intrs */
+ WRITE_REG(priv, regRDINTCM0, 0);
+ WRITE_REG(priv, regTDINTCM0, 0);
+ WRITE_REG(priv, regIMR, 0);
+ READ_REG(priv, regISR);
+
+ /* 7. reset queue */
+ WRITE_REG(priv, regRST_QU, 1);
+ /* 8. reset port */
+ WRITE_REG(priv, regRST_PORT, 1);
+ /* 9. zero all read and write pointers */
+ for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
+ DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR);
+ for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
+ WRITE_REG(priv, i, 0);
+ /* 10. unseet port disable */
+ WRITE_REG(priv, regDIS_PORT, 0);
+ /* 11. unset queue disable */
+ WRITE_REG(priv, regDIS_QU, 0);
+ /* 12. unset queue reset */
+ WRITE_REG(priv, regRST_QU, 0);
+ /* 13. unset port reset */
+ WRITE_REG(priv, regRST_PORT, 0);
+ /* 14. enable Rx */
+ /* skiped. will be done later */
+ /* 15. save MAC (obsolete) */
+ for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
+ DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR);
+
+ RET(0);
+}
+
+/* bdx_reset - performs right type of reset depending on hw type */
+static int bdx_reset(struct bdx_priv *priv)
+{
+ ENTER;
+ RET((priv->pdev->device == 0x3009)
+ ? bdx_hw_reset(priv)
+ : bdx_sw_reset(priv));
+}
+
+/**
+ * bdx_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int bdx_close(struct net_device *ndev)
+{
+ struct bdx_priv *priv = NULL;
+
+ ENTER;
+ priv = netdev_priv(ndev);
+
+ napi_disable(&priv->napi);
+
+ bdx_reset(priv);
+ bdx_hw_stop(priv);
+ bdx_rx_free(priv);
+ bdx_tx_free(priv);
+ RET(0);
+}
+
+/**
+ * bdx_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int bdx_open(struct net_device *ndev)
+{
+ struct bdx_priv *priv;
+ int rc;
+
+ ENTER;
+ priv = netdev_priv(ndev);
+ bdx_reset(priv);
+ if (netif_running(ndev))
+ netif_stop_queue(priv->ndev);
+
+ if ((rc = bdx_tx_init(priv)) ||
+ (rc = bdx_rx_init(priv)) ||
+ (rc = bdx_fw_load(priv)))
+ goto err;
+
+ bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
+
+ rc = bdx_hw_start(priv);
+ if (rc)
+ goto err;
+
+ napi_enable(&priv->napi);
+
+ print_fw_id(priv->nic);
+
+ RET(0);
+
+err:
+ bdx_close(ndev);
+ RET(rc);
+}
+
+static int bdx_range_check(struct bdx_priv *priv, u32 offset)
+{
+ return (offset > (u32) (BDX_REGS_SIZE / priv->nic->port_num)) ?
+ -EINVAL : 0;
+}
+
+static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ struct bdx_priv *priv = netdev_priv(ndev);
+ u32 data[3];
+ int error;
+
+ ENTER;
+
+ DBG("jiffies=%ld cmd=%d\n", jiffies, cmd);
+ if (cmd != SIOCDEVPRIVATE) {
+ error = copy_from_user(data, ifr->ifr_data, sizeof(data));
+ if (error) {
+ pr_err("can't copy from user\n");
+ RET(-EFAULT);
+ }
+ DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
+ }
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ switch (data[0]) {
+
+ case BDX_OP_READ:
+ error = bdx_range_check(priv, data[1]);
+ if (error < 0)
+ return error;
+ data[2] = READ_REG(priv, data[1]);
+ DBG("read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2],
+ data[2]);
+ error = copy_to_user(ifr->ifr_data, data, sizeof(data));
+ if (error)
+ RET(-EFAULT);
+ break;
+
+ case BDX_OP_WRITE:
+ error = bdx_range_check(priv, data[1]);
+ if (error < 0)
+ return error;
+ WRITE_REG(priv, data[1], data[2]);
+ DBG("write_reg(0x%x, 0x%x)\n", data[1], data[2]);
+ break;
+
+ default:
+ RET(-EOPNOTSUPP);
+ }
+ return 0;
+}
+
+static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ ENTER;
+ if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15))
+ RET(bdx_ioctl_priv(ndev, ifr, cmd));
+ else
+ RET(-EOPNOTSUPP);
+}
+
+/*
+ * __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid
+ * by passing VLAN filter table to hardware
+ * @ndev network device
+ * @vid VLAN vid
+ * @op add or kill operation
+ */
+static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
+{
+ struct bdx_priv *priv = netdev_priv(ndev);
+ u32 reg, bit, val;
+
+ ENTER;
+ DBG2("vid=%d value=%d\n", (int)vid, enable);
+ if (unlikely(vid >= 4096)) {
+ pr_err("invalid VID: %u (> 4096)\n", vid);
+ RET();
+ }
+ reg = regVLAN_0 + (vid / 32) * 4;
+ bit = 1 << vid % 32;
+ val = READ_REG(priv, reg);
+ DBG2("reg=%x, val=%x, bit=%d\n", reg, val, bit);
+ if (enable)
+ val |= bit;
+ else
+ val &= ~bit;
+ DBG2("new val %x\n", val);
+ WRITE_REG(priv, reg, val);
+ RET();
+}
+
+/*
+ * bdx_vlan_rx_add_vid - kernel hook for adding VLAN vid to hw filtering table
+ * @ndev network device
+ * @vid VLAN vid to add
+ */
+static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
+{
+ __bdx_vlan_rx_vid(ndev, vid, 1);
+}
+
+/*
+ * bdx_vlan_rx_kill_vid - kernel hook for killing VLAN vid in hw filtering table
+ * @ndev network device
+ * @vid VLAN vid to kill
+ */
+static void bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
+{
+ __bdx_vlan_rx_vid(ndev, vid, 0);
+}
+
+/**
+ * bdx_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int bdx_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ ENTER;
+
+ if (new_mtu == ndev->mtu)
+ RET(0);
+
+ /* enforce minimum frame size */
+ if (new_mtu < ETH_ZLEN) {
+ netdev_err(ndev, "mtu %d is less then minimal %d\n",
+ new_mtu, ETH_ZLEN);
+ RET(-EINVAL);
+ }
+
+ ndev->mtu = new_mtu;
+ if (netif_running(ndev)) {
+ bdx_close(ndev);
+ bdx_open(ndev);
+ }
+ RET(0);
+}
+
+static void bdx_setmulti(struct net_device *ndev)
+{
+ struct bdx_priv *priv = netdev_priv(ndev);
+
+ u32 rxf_val =
+ GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB | GMAC_RX_FILTER_OSEN;
+ int i;
+
+ ENTER;
+ /* IMF - imperfect (hash) rx multicat filter */
+ /* PMF - perfect rx multicat filter */
+
+ /* FIXME: RXE(OFF) */
+ if (ndev->flags & IFF_PROMISC) {
+ rxf_val |= GMAC_RX_FILTER_PRM;
+ } else if (ndev->flags & IFF_ALLMULTI) {
+ /* set IMF to accept all multicast frmaes */
+ for (i = 0; i < MAC_MCST_HASH_NUM; i++)
+ WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
+ } else if (!netdev_mc_empty(ndev)) {
+ u8 hash;
+ struct netdev_hw_addr *ha;
+ u32 reg, val;
+
+ /* set IMF to deny all multicast frames */
+ for (i = 0; i < MAC_MCST_HASH_NUM; i++)
+ WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, 0);
+ /* set PMF to deny all multicast frames */
+ for (i = 0; i < MAC_MCST_NUM; i++) {
+ WRITE_REG(priv, regRX_MAC_MCST0 + i * 8, 0);
+ WRITE_REG(priv, regRX_MAC_MCST1 + i * 8, 0);
+ }
+
+ /* use PMF to accept first MAC_MCST_NUM (15) addresses */
+ /* TBD: sort addresses and write them in ascending order
+ * into RX_MAC_MCST regs. we skip this phase now and accept ALL
+ * multicast frames throu IMF */
+ /* accept the rest of addresses throu IMF */
+ netdev_for_each_mc_addr(ha, ndev) {
+ hash = 0;
+ for (i = 0; i < ETH_ALEN; i++)
+ hash ^= ha->addr[i];
+ reg = regRX_MCST_HASH0 + ((hash >> 5) << 2);
+ val = READ_REG(priv, reg);
+ val |= (1 << (hash % 32));
+ WRITE_REG(priv, reg, val);
+ }
+
+ } else {
+ DBG("only own mac %d\n", netdev_mc_count(ndev));
+ rxf_val |= GMAC_RX_FILTER_AB;
+ }
+ WRITE_REG(priv, regGMAC_RXF_A, rxf_val);
+ /* enable RX */
+ /* FIXME: RXE(ON) */
+ RET();
+}
+
+static int bdx_set_mac(struct net_device *ndev, void *p)
+{
+ struct bdx_priv *priv = netdev_priv(ndev);
+ struct sockaddr *addr = p;
+
+ ENTER;
+ /*
+ if (netif_running(dev))
+ return -EBUSY
+ */
+ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ bdx_restore_mac(ndev, priv);
+ RET(0);
+}
+
+static int bdx_read_mac(struct bdx_priv *priv)
+{
+ u16 macAddress[3], i;
+ ENTER;
+
+ macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
+ macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
+ macAddress[1] = READ_REG(priv, regUNC_MAC1_A);
+ macAddress[1] = READ_REG(priv, regUNC_MAC1_A);
+ macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
+ macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
+ for (i = 0; i < 3; i++) {
+ priv->ndev->dev_addr[i * 2 + 1] = macAddress[i];
+ priv->ndev->dev_addr[i * 2] = macAddress[i] >> 8;
+ }
+ RET(0);
+}
+
+static u64 bdx_read_l2stat(struct bdx_priv *priv, int reg)
+{
+ u64 val;
+
+ val = READ_REG(priv, reg);
+ val |= ((u64) READ_REG(priv, reg + 8)) << 32;
+ return val;
+}
+
+/*Do the statistics-update work*/
+static void bdx_update_stats(struct bdx_priv *priv)
+{
+ struct bdx_stats *stats = &priv->hw_stats;
+ u64 *stats_vector = (u64 *) stats;
+ int i;
+ int addr;
+
+ /*Fill HW structure */
+ addr = 0x7200;
+ /*First 12 statistics - 0x7200 - 0x72B0 */
+ for (i = 0; i < 12; i++) {
+ stats_vector[i] = bdx_read_l2stat(priv, addr);
+ addr += 0x10;
+ }
+ BDX_ASSERT(addr != 0x72C0);
+ /* 0x72C0-0x72E0 RSRV */
+ addr = 0x72F0;
+ for (; i < 16; i++) {
+ stats_vector[i] = bdx_read_l2stat(priv, addr);
+ addr += 0x10;
+ }
+ BDX_ASSERT(addr != 0x7330);
+ /* 0x7330-0x7360 RSRV */
+ addr = 0x7370;
+ for (; i < 19; i++) {
+ stats_vector[i] = bdx_read_l2stat(priv, addr);
+ addr += 0x10;
+ }
+ BDX_ASSERT(addr != 0x73A0);
+ /* 0x73A0-0x73B0 RSRV */
+ addr = 0x73C0;
+ for (; i < 23; i++) {
+ stats_vector[i] = bdx_read_l2stat(priv, addr);
+ addr += 0x10;
+ }
+ BDX_ASSERT(addr != 0x7400);
+ BDX_ASSERT((sizeof(struct bdx_stats) / sizeof(u64)) != i);
+}
+
+static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
+ u16 rxd_vlan);
+static void print_rxfd(struct rxf_desc *rxfd);
+
+/*************************************************************************
+ * Rx DB *
+ *************************************************************************/
+
+static void bdx_rxdb_destroy(struct rxdb *db)
+{
+ vfree(db);
+}
+
+static struct rxdb *bdx_rxdb_create(int nelem)
+{
+ struct rxdb *db;
+ int i;
+
+ db = vmalloc(sizeof(struct rxdb)
+ + (nelem * sizeof(int))
+ + (nelem * sizeof(struct rx_map)));
+ if (likely(db != NULL)) {
+ db->stack = (int *)(db + 1);
+ db->elems = (void *)(db->stack + nelem);
+ db->nelem = nelem;
+ db->top = nelem;
+ for (i = 0; i < nelem; i++)
+ db->stack[i] = nelem - i - 1; /* to make first allocs
+ close to db struct*/
+ }
+
+ return db;
+}
+
+static inline int bdx_rxdb_alloc_elem(struct rxdb *db)
+{
+ BDX_ASSERT(db->top <= 0);
+ return db->stack[--(db->top)];
+}
+
+static inline void *bdx_rxdb_addr_elem(struct rxdb *db, int n)
+{
+ BDX_ASSERT((n < 0) || (n >= db->nelem));
+ return db->elems + n;
+}
+
+static inline int bdx_rxdb_available(struct rxdb *db)
+{
+ return db->top;
+}
+
+static inline void bdx_rxdb_free_elem(struct rxdb *db, int n)
+{
+ BDX_ASSERT((n >= db->nelem) || (n < 0));
+ db->stack[(db->top)++] = n;
+}
+
+/*************************************************************************
+ * Rx Init *
+ *************************************************************************/
+
+/* bdx_rx_init - initialize RX all related HW and SW resources
+ * @priv - NIC private structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * It creates rxf and rxd fifos, update relevant HW registers, preallocate
+ * skb for rx. It assumes that Rx is desabled in HW
+ * funcs are grouped for better cache usage
+ *
+ * RxD fifo is smaller than RxF fifo by design. Upon high load, RxD will be
+ * filled and packets will be dropped by nic without getting into host or
+ * cousing interrupt. Anyway, in that condition, host has no chance to process
+ * all packets, but dropping in nic is cheaper, since it takes 0 cpu cycles
+ */
+
+/* TBD: ensure proper packet size */
+
+static int bdx_rx_init(struct bdx_priv *priv)
+{
+ ENTER;
+
+ if (bdx_fifo_init(priv, &priv->rxd_fifo0.m, priv->rxd_size,
+ regRXD_CFG0_0, regRXD_CFG1_0,
+ regRXD_RPTR_0, regRXD_WPTR_0))
+ goto err_mem;
+ if (bdx_fifo_init(priv, &priv->rxf_fifo0.m, priv->rxf_size,
+ regRXF_CFG0_0, regRXF_CFG1_0,
+ regRXF_RPTR_0, regRXF_WPTR_0))
+ goto err_mem;
+ priv->rxdb = bdx_rxdb_create(priv->rxf_fifo0.m.memsz /
+ sizeof(struct rxf_desc));
+ if (!priv->rxdb)
+ goto err_mem;
+
+ priv->rxf_fifo0.m.pktsz = priv->ndev->mtu + VLAN_ETH_HLEN;
+ return 0;
+
+err_mem:
+ netdev_err(priv->ndev, "Rx init failed\n");
+ return -ENOMEM;
+}
+
+/* bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo
+ * @priv - NIC private structure
+ * @f - RXF fifo
+ */
+static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
+{
+ struct rx_map *dm;
+ struct rxdb *db = priv->rxdb;
+ u16 i;
+
+ ENTER;
+ DBG("total=%d free=%d busy=%d\n", db->nelem, bdx_rxdb_available(db),
+ db->nelem - bdx_rxdb_available(db));
+ while (bdx_rxdb_available(db) > 0) {
+ i = bdx_rxdb_alloc_elem(db);
+ dm = bdx_rxdb_addr_elem(db, i);
+ dm->dma = 0;
+ }
+ for (i = 0; i < db->nelem; i++) {
+ dm = bdx_rxdb_addr_elem(db, i);
+ if (dm->dma) {
+ pci_unmap_single(priv->pdev,
+ dm->dma, f->m.pktsz,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(dm->skb);
+ }
+ }
+}
+
+/* bdx_rx_free - release all Rx resources
+ * @priv - NIC private structure
+ * It assumes that Rx is desabled in HW
+ */
+static void bdx_rx_free(struct bdx_priv *priv)
+{
+ ENTER;
+ if (priv->rxdb) {
+ bdx_rx_free_skbs(priv, &priv->rxf_fifo0);
+ bdx_rxdb_destroy(priv->rxdb);
+ priv->rxdb = NULL;
+ }
+ bdx_fifo_free(priv, &priv->rxf_fifo0.m);
+ bdx_fifo_free(priv, &priv->rxd_fifo0.m);
+
+ RET();
+}
+
+/*************************************************************************
+ * Rx Engine *
+ *************************************************************************/
+
+/* bdx_rx_alloc_skbs - fill rxf fifo with new skbs
+ * @priv - nic's private structure
+ * @f - RXF fifo that needs skbs
+ * It allocates skbs, build rxf descs and push it (rxf descr) into rxf fifo.
+ * skb's virtual and physical addresses are stored in skb db.
+ * To calculate free space, func uses cached values of RPTR and WPTR
+ * When needed, it also updates RPTR and WPTR.
+ */
+
+/* TBD: do not update WPTR if no desc were written */
+
+static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
+{
+ struct sk_buff *skb;
+ struct rxf_desc *rxfd;
+ struct rx_map *dm;
+ int dno, delta, idx;
+ struct rxdb *db = priv->rxdb;
+
+ ENTER;
+ dno = bdx_rxdb_available(db) - 1;
+ while (dno > 0) {
+ skb = dev_alloc_skb(f->m.pktsz + NET_IP_ALIGN);
+ if (!skb) {
+ pr_err("NO MEM: dev_alloc_skb failed\n");
+ break;
+ }
+ skb->dev = priv->ndev;
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ idx = bdx_rxdb_alloc_elem(db);
+ dm = bdx_rxdb_addr_elem(db, idx);
+ dm->dma = pci_map_single(priv->pdev,
+ skb->data, f->m.pktsz,
+ PCI_DMA_FROMDEVICE);
+ dm->skb = skb;
+ rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
+ rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */
+ rxfd->va_lo = idx;
+ rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma));
+ rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma));
+ rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz);
+ print_rxfd(rxfd);
+
+ f->m.wptr += sizeof(struct rxf_desc);
+ delta = f->m.wptr - f->m.memsz;
+ if (unlikely(delta >= 0)) {
+ f->m.wptr = delta;
+ if (delta > 0) {
+ memcpy(f->m.va, f->m.va + f->m.memsz, delta);
+ DBG("wrapped descriptor\n");
+ }
+ }
+ dno--;
+ }
+ /*TBD: to do - delayed rxf wptr like in txd */
+ WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
+ RET();
+}
+
+static inline void
+NETIF_RX_MUX(struct bdx_priv *priv, u32 rxd_val1, u16 rxd_vlan,
+ struct sk_buff *skb)
+{
+ ENTER;
+ DBG("rxdd->flags.bits.vtag=%d\n", GET_RXD_VTAG(rxd_val1));
+ if (GET_RXD_VTAG(rxd_val1)) {
+ DBG("%s: vlan rcv vlan '%x' vtag '%x'\n",
+ priv->ndev->name,
+ GET_RXD_VLAN_ID(rxd_vlan),
+ GET_RXD_VTAG(rxd_val1));
+ __vlan_hwaccel_put_tag(skb, GET_RXD_VLAN_TCI(rxd_vlan));
+ }
+ netif_receive_skb(skb);
+}
+
+static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
+{
+ struct rxf_desc *rxfd;
+ struct rx_map *dm;
+ struct rxf_fifo *f;
+ struct rxdb *db;
+ struct sk_buff *skb;
+ int delta;
+
+ ENTER;
+ DBG("priv=%p rxdd=%p\n", priv, rxdd);
+ f = &priv->rxf_fifo0;
+ db = priv->rxdb;
+ DBG("db=%p f=%p\n", db, f);
+ dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
+ DBG("dm=%p\n", dm);
+ skb = dm->skb;
+ rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
+ rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */
+ rxfd->va_lo = rxdd->va_lo;
+ rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma));
+ rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma));
+ rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz);
+ print_rxfd(rxfd);
+
+ f->m.wptr += sizeof(struct rxf_desc);
+ delta = f->m.wptr - f->m.memsz;
+ if (unlikely(delta >= 0)) {
+ f->m.wptr = delta;
+ if (delta > 0) {
+ memcpy(f->m.va, f->m.va + f->m.memsz, delta);
+ DBG("wrapped descriptor\n");
+ }
+ }
+ RET();
+}
+
+/* bdx_rx_receive - receives full packets from RXD fifo and pass them to OS
+ * NOTE: a special treatment is given to non-continuous descriptors
+ * that start near the end, wraps around and continue at the beginning. a second
+ * part is copied right after the first, and then descriptor is interpreted as
+ * normal. fifo has an extra space to allow such operations
+ * @priv - nic's private structure
+ * @f - RXF fifo that needs skbs
+ */
+
+/* TBD: replace memcpy func call by explicite inline asm */
+
+static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
+{
+ struct net_device *ndev = priv->ndev;
+ struct sk_buff *skb, *skb2;
+ struct rxd_desc *rxdd;
+ struct rx_map *dm;
+ struct rxf_fifo *rxf_fifo;
+ int tmp_len, size;
+ int done = 0;
+ int max_done = BDX_MAX_RX_DONE;
+ struct rxdb *db = NULL;
+ /* Unmarshalled descriptor - copy of descriptor in host order */
+ u32 rxd_val1;
+ u16 len;
+ u16 rxd_vlan;
+
+ ENTER;
+ max_done = budget;
+
+ f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_WR_PTR;
+
+ size = f->m.wptr - f->m.rptr;
+ if (size < 0)
+ size = f->m.memsz + size; /* size is negative :-) */
+
+ while (size > 0) {
+
+ rxdd = (struct rxd_desc *)(f->m.va + f->m.rptr);
+ rxd_val1 = CPU_CHIP_SWAP32(rxdd->rxd_val1);
+
+ len = CPU_CHIP_SWAP16(rxdd->len);
+
+ rxd_vlan = CPU_CHIP_SWAP16(rxdd->rxd_vlan);
+
+ print_rxdd(rxdd, rxd_val1, len, rxd_vlan);
+
+ tmp_len = GET_RXD_BC(rxd_val1) << 3;
+ BDX_ASSERT(tmp_len <= 0);
+ size -= tmp_len;
+ if (size < 0) /* test for partially arrived descriptor */
+ break;
+
+ f->m.rptr += tmp_len;
+
+ tmp_len = f->m.rptr - f->m.memsz;
+ if (unlikely(tmp_len >= 0)) {
+ f->m.rptr = tmp_len;
+ if (tmp_len > 0) {
+ DBG("wrapped desc rptr=%d tmp_len=%d\n",
+ f->m.rptr, tmp_len);
+ memcpy(f->m.va + f->m.memsz, f->m.va, tmp_len);
+ }
+ }
+
+ if (unlikely(GET_RXD_ERR(rxd_val1))) {
+ DBG("rxd_err = 0x%x\n", GET_RXD_ERR(rxd_val1));
+ ndev->stats.rx_errors++;
+ bdx_recycle_skb(priv, rxdd);
+ continue;
+ }
+
+ rxf_fifo = &priv->rxf_fifo0;
+ db = priv->rxdb;
+ dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
+ skb = dm->skb;
+
+ if (len < BDX_COPYBREAK &&
+ (skb2 = dev_alloc_skb(len + NET_IP_ALIGN))) {
+ skb_reserve(skb2, NET_IP_ALIGN);
+ /*skb_put(skb2, len); */
+ pci_dma_sync_single_for_cpu(priv->pdev,
+ dm->dma, rxf_fifo->m.pktsz,
+ PCI_DMA_FROMDEVICE);
+ memcpy(skb2->data, skb->data, len);
+ bdx_recycle_skb(priv, rxdd);
+ skb = skb2;
+ } else {
+ pci_unmap_single(priv->pdev,
+ dm->dma, rxf_fifo->m.pktsz,
+ PCI_DMA_FROMDEVICE);
+ bdx_rxdb_free_elem(db, rxdd->va_lo);
+ }
+
+ ndev->stats.rx_bytes += len;
+
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ /* Non-IP packets aren't checksum-offloaded */
+ if (GET_RXD_PKT_ID(rxd_val1) == 0)
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb);
+
+ if (++done >= max_done)
+ break;
+ }
+
+ ndev->stats.rx_packets += done;
+
+ /* FIXME: do smth to minimize pci accesses */
+ WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
+
+ bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
+
+ RET(done);
+}
+
+/*************************************************************************
+ * Debug / Temprorary Code *
+ *************************************************************************/
+static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
+ u16 rxd_vlan)
+{
+ DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d va_lo %d va_hi %d\n",
+ GET_RXD_BC(rxd_val1), GET_RXD_RXFQ(rxd_val1), GET_RXD_TO(rxd_val1),
+ GET_RXD_TYPE(rxd_val1), GET_RXD_ERR(rxd_val1),
+ GET_RXD_RXP(rxd_val1), GET_RXD_PKT_ID(rxd_val1),
+ GET_RXD_VTAG(rxd_val1), len, GET_RXD_VLAN_ID(rxd_vlan),
+ GET_RXD_CFI(rxd_vlan), GET_RXD_PRIO(rxd_vlan), rxdd->va_lo,
+ rxdd->va_hi);
+}
+
+static void print_rxfd(struct rxf_desc *rxfd)
+{
+ DBG("=== RxF desc CHIP ORDER/ENDIANESS =============\n"
+ "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n",
+ rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len);
+}
+
+/*
+ * TX HW/SW interaction overview
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * There are 2 types of TX communication channels between driver and NIC.
+ * 1) TX Free Fifo - TXF - holds ack descriptors for sent packets
+ * 2) TX Data Fifo - TXD - holds descriptors of full buffers.
+ *
+ * Currently NIC supports TSO, checksuming and gather DMA
+ * UFO and IP fragmentation is on the way
+ *
+ * RX SW Data Structures
+ * ~~~~~~~~~~~~~~~~~~~~~
+ * txdb - used to keep track of all skbs owned by SW and their dma addresses.
+ * For TX case, ownership lasts from geting packet via hard_xmit and until HW
+ * acknowledges sent by TXF descriptors.
+ * Implemented as cyclic buffer.
+ * fifo - keeps info about fifo's size and location, relevant HW registers,
+ * usage and skb db. Each RXD and RXF Fifo has its own fifo structure.
+ * Implemented as simple struct.
+ *
+ * TX SW Execution Flow
+ * ~~~~~~~~~~~~~~~~~~~~
+ * OS calls driver's hard_xmit method with packet to sent.
+ * Driver creates DMA mappings, builds TXD descriptors and kicks HW
+ * by updating TXD WPTR.
+ * When packet is sent, HW write us TXF descriptor and SW frees original skb.
+ * To prevent TXD fifo overflow without reading HW registers every time,
+ * SW deploys "tx level" technique.
+ * Upon strart up, tx level is initialized to TXD fifo length.
+ * For every sent packet, SW gets its TXD descriptor sizei
+ * (from precalculated array) and substructs it from tx level.
+ * The size is also stored in txdb. When TXF ack arrives, SW fetch size of
+ * original TXD descriptor from txdb and adds it to tx level.
+ * When Tx level drops under some predefined treshhold, the driver
+ * stops the TX queue. When TX level rises above that level,
+ * the tx queue is enabled again.
+ *
+ * This technique avoids eccessive reading of RPTR and WPTR registers.
+ * As our benchmarks shows, it adds 1.5 Gbit/sec to NIS's throuput.
+ */
+
+/*************************************************************************
+ * Tx DB *
+ *************************************************************************/
+static inline int bdx_tx_db_size(struct txdb *db)
+{
+ int taken = db->wptr - db->rptr;
+ if (taken < 0)
+ taken = db->size + 1 + taken; /* (size + 1) equals memsz */
+
+ return db->size - taken;
+}
+
+/* __bdx_tx_ptr_next - helper function, increment read/write pointer + wrap
+ * @d - tx data base
+ * @ptr - read or write pointer
+ */
+static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
+{
+ BDX_ASSERT(db == NULL || pptr == NULL); /* sanity */
+
+ BDX_ASSERT(*pptr != db->rptr && /* expect either read */
+ *pptr != db->wptr); /* or write pointer */
+
+ BDX_ASSERT(*pptr < db->start || /* pointer has to be */
+ *pptr >= db->end); /* in range */
+
+ ++*pptr;
+ if (unlikely(*pptr == db->end))
+ *pptr = db->start;
+}
+
+/* bdx_tx_db_inc_rptr - increment read pointer
+ * @d - tx data base
+ */
+static inline void bdx_tx_db_inc_rptr(struct txdb *db)
+{
+ BDX_ASSERT(db->rptr == db->wptr); /* can't read from empty db */
+ __bdx_tx_db_ptr_next(db, &db->rptr);
+}
+
+/* bdx_tx_db_inc_rptr - increment write pointer
+ * @d - tx data base
+ */
+static inline void bdx_tx_db_inc_wptr(struct txdb *db)
+{
+ __bdx_tx_db_ptr_next(db, &db->wptr);
+ BDX_ASSERT(db->rptr == db->wptr); /* we can not get empty db as
+ a result of write */
+}
+
+/* bdx_tx_db_init - creates and initializes tx db
+ * @d - tx data base
+ * @sz_type - size of tx fifo
+ * Returns 0 on success, error code otherwise
+ */
+static int bdx_tx_db_init(struct txdb *d, int sz_type)
+{
+ int memsz = FIFO_SIZE * (1 << (sz_type + 1));
+
+ d->start = vmalloc(memsz);
+ if (!d->start)
+ return -ENOMEM;
+
+ /*
+ * In order to differentiate between db is empty and db is full
+ * states at least one element should always be empty in order to
+ * avoid rptr == wptr which means db is empty
+ */
+ d->size = memsz / sizeof(struct tx_map) - 1;
+ d->end = d->start + d->size + 1; /* just after last element */
+
+ /* all dbs are created equally empty */
+ d->rptr = d->start;
+ d->wptr = d->start;
+
+ return 0;
+}
+
+/* bdx_tx_db_close - closes tx db and frees all memory
+ * @d - tx data base
+ */
+static void bdx_tx_db_close(struct txdb *d)
+{
+ BDX_ASSERT(d == NULL);
+
+ vfree(d->start);
+ d->start = NULL;
+}
+
+/*************************************************************************
+ * Tx Engine *
+ *************************************************************************/
+
+/* sizes of tx desc (including padding if needed) as function
+ * of skb's frag number */
+static struct {
+ u16 bytes;
+ u16 qwords; /* qword = 64 bit */
+} txd_sizes[MAX_SKB_FRAGS + 1];
+
+/* txdb_map_skb - creates and stores dma mappings for skb's data blocks
+ * @priv - NIC private structure
+ * @skb - socket buffer to map
+ *
+ * It makes dma mappings for skb's data blocks and writes them to PBL of
+ * new tx descriptor. It also stores them in the tx db, so they could be
+ * unmaped after data was sent. It is reponsibility of a caller to make
+ * sure that there is enough space in the tx db. Last element holds pointer
+ * to skb itself and marked with zero length
+ */
+static inline void
+bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
+ struct txd_desc *txdd)
+{
+ struct txdb *db = &priv->txdb;
+ struct pbl *pbl = &txdd->pbl[0];
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int i;
+
+ db->wptr->len = skb_headlen(skb);
+ db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data,
+ db->wptr->len, PCI_DMA_TODEVICE);
+ pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
+ pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
+ pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
+ DBG("=== pbl len: 0x%x ================\n", pbl->len);
+ DBG("=== pbl pa_lo: 0x%x ================\n", pbl->pa_lo);
+ DBG("=== pbl pa_hi: 0x%x ================\n", pbl->pa_hi);
+ bdx_tx_db_inc_wptr(db);
+
+ for (i = 0; i < nr_frags; i++) {
+ struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[i];
+ db->wptr->len = frag->size;
+ db->wptr->addr.dma = skb_frag_dma_map(&priv->pdev->dev, frag,
+ 0, frag->size,
+ DMA_TO_DEVICE);
+
+ pbl++;
+ pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
+ pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
+ pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
+ bdx_tx_db_inc_wptr(db);
+ }
+
+ /* add skb clean up info. */
+ db->wptr->len = -txd_sizes[nr_frags].bytes;
+ db->wptr->addr.skb = skb;
+ bdx_tx_db_inc_wptr(db);
+}
+
+/* init_txd_sizes - precalculate sizes of descriptors for skbs up to 16 frags
+ * number of frags is used as index to fetch correct descriptors size,
+ * instead of calculating it each time */
+static void __init init_txd_sizes(void)
+{
+ int i, lwords;
+
+ /* 7 - is number of lwords in txd with one phys buffer
+ * 3 - is number of lwords used for every additional phys buffer */
+ for (i = 0; i < MAX_SKB_FRAGS + 1; i++) {
+ lwords = 7 + (i * 3);
+ if (lwords & 1)
+ lwords++; /* pad it with 1 lword */
+ txd_sizes[i].qwords = lwords >> 1;
+ txd_sizes[i].bytes = lwords << 2;
+ }
+}
+
+/* bdx_tx_init - initialize all Tx related stuff.
+ * Namely, TXD and TXF fifos, database etc */
+static int bdx_tx_init(struct bdx_priv *priv)
+{
+ if (bdx_fifo_init(priv, &priv->txd_fifo0.m, priv->txd_size,
+ regTXD_CFG0_0,
+ regTXD_CFG1_0, regTXD_RPTR_0, regTXD_WPTR_0))
+ goto err_mem;
+ if (bdx_fifo_init(priv, &priv->txf_fifo0.m, priv->txf_size,
+ regTXF_CFG0_0,
+ regTXF_CFG1_0, regTXF_RPTR_0, regTXF_WPTR_0))
+ goto err_mem;
+
+ /* The TX db has to keep mappings for all packets sent (on TxD)
+ * and not yet reclaimed (on TxF) */
+ if (bdx_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size)))
+ goto err_mem;
+
+ priv->tx_level = BDX_MAX_TX_LEVEL;
+#ifdef BDX_DELAY_WPTR
+ priv->tx_update_mark = priv->tx_level - 1024;
+#endif
+ return 0;
+
+err_mem:
+ netdev_err(priv->ndev, "Tx init failed\n");
+ return -ENOMEM;
+}
+
+/*
+ * bdx_tx_space - calculates available space in TX fifo
+ * @priv - NIC private structure
+ * Returns available space in TX fifo in bytes
+ */
+static inline int bdx_tx_space(struct bdx_priv *priv)
+{
+ struct txd_fifo *f = &priv->txd_fifo0;
+ int fsize;
+
+ f->m.rptr = READ_REG(priv, f->m.reg_RPTR) & TXF_WPTR_WR_PTR;
+ fsize = f->m.rptr - f->m.wptr;
+ if (fsize <= 0)
+ fsize = f->m.memsz + fsize;
+ return fsize;
+}
+
+/* bdx_tx_transmit - send packet to NIC
+ * @skb - packet to send
+ * ndev - network device assigned to NIC
+ * Return codes:
+ * o NETDEV_TX_OK everything ok.
+ * o NETDEV_TX_BUSY Cannot transmit packet, try later
+ * Usually a bug, means queue start/stop flow control is broken in
+ * the driver. Note: the driver must NOT put the skb in its DMA ring.
+ * o NETDEV_TX_LOCKED Locking failed, please retry quickly.
+ */
+static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct bdx_priv *priv = netdev_priv(ndev);
+ struct txd_fifo *f = &priv->txd_fifo0;
+ int txd_checksum = 7; /* full checksum */
+ int txd_lgsnd = 0;
+ int txd_vlan_id = 0;
+ int txd_vtag = 0;
+ int txd_mss = 0;
+
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct txd_desc *txdd;
+ int len;
+ unsigned long flags;
+
+ ENTER;
+ local_irq_save(flags);
+ if (!spin_trylock(&priv->tx_lock)) {
+ local_irq_restore(flags);
+ DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
+ BDX_DRV_NAME, ndev->name);
+ return NETDEV_TX_LOCKED;
+ }
+
+ /* build tx descriptor */
+ BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
+ txdd = (struct txd_desc *)(f->m.va + f->m.wptr);
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
+ txd_checksum = 0;
+
+ if (skb_shinfo(skb)->gso_size) {
+ txd_mss = skb_shinfo(skb)->gso_size;
+ txd_lgsnd = 1;
+ DBG("skb %p skb len %d gso size = %d\n", skb, skb->len,
+ txd_mss);
+ }
+
+ if (vlan_tx_tag_present(skb)) {
+ /*Cut VLAN ID to 12 bits */
+ txd_vlan_id = vlan_tx_tag_get(skb) & BITS_MASK(12);
+ txd_vtag = 1;
+ }
+
+ txdd->length = CPU_CHIP_SWAP16(skb->len);
+ txdd->mss = CPU_CHIP_SWAP16(txd_mss);
+ txdd->txd_val1 =
+ CPU_CHIP_SWAP32(TXD_W1_VAL
+ (txd_sizes[nr_frags].qwords, txd_checksum, txd_vtag,
+ txd_lgsnd, txd_vlan_id));
+ DBG("=== TxD desc =====================\n");
+ DBG("=== w1: 0x%x ================\n", txdd->txd_val1);
+ DBG("=== w2: mss 0x%x len 0x%x\n", txdd->mss, txdd->length);
+
+ bdx_tx_map_skb(priv, skb, txdd);
+
+ /* increment TXD write pointer. In case of
+ fifo wrapping copy reminder of the descriptor
+ to the beginning */
+ f->m.wptr += txd_sizes[nr_frags].bytes;
+ len = f->m.wptr - f->m.memsz;
+ if (unlikely(len >= 0)) {
+ f->m.wptr = len;
+ if (len > 0) {
+ BDX_ASSERT(len > f->m.memsz);
+ memcpy(f->m.va, f->m.va + f->m.memsz, len);
+ }
+ }
+ BDX_ASSERT(f->m.wptr >= f->m.memsz); /* finished with valid wptr */
+
+ priv->tx_level -= txd_sizes[nr_frags].bytes;
+ BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
+#ifdef BDX_DELAY_WPTR
+ if (priv->tx_level > priv->tx_update_mark) {
+ /* Force memory writes to complete before letting h/w
+ know there are new descriptors to fetch.
+ (might be needed on platforms like IA64)
+ wmb(); */
+ WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
+ } else {
+ if (priv->tx_noupd++ > BDX_NO_UPD_PACKETS) {
+ priv->tx_noupd = 0;
+ WRITE_REG(priv, f->m.reg_WPTR,
+ f->m.wptr & TXF_WPTR_WR_PTR);
+ }
+ }
+#else
+ /* Force memory writes to complete before letting h/w
+ know there are new descriptors to fetch.
+ (might be needed on platforms like IA64)
+ wmb(); */
+ WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
+
+#endif
+#ifdef BDX_LLTX
+ ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
+#endif
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+
+ if (priv->tx_level < BDX_MIN_TX_LEVEL) {
+ DBG("%s: %s: TX Q STOP level %d\n",
+ BDX_DRV_NAME, ndev->name, priv->tx_level);
+ netif_stop_queue(ndev);
+ }
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ return NETDEV_TX_OK;
+}
+
+/* bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ.
+ * @priv - bdx adapter
+ * It scans TXF fifo for descriptors, frees DMA mappings and reports to OS
+ * that those packets were sent
+ */
+static void bdx_tx_cleanup(struct bdx_priv *priv)
+{
+ struct txf_fifo *f = &priv->txf_fifo0;
+ struct txdb *db = &priv->txdb;
+ int tx_level = 0;
+
+ ENTER;
+ f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_MASK;
+ BDX_ASSERT(f->m.rptr >= f->m.memsz); /* started with valid rptr */
+
+ while (f->m.wptr != f->m.rptr) {
+ f->m.rptr += BDX_TXF_DESC_SZ;
+ f->m.rptr &= f->m.size_mask;
+
+ /* unmap all the fragments */
+ /* first has to come tx_maps containing dma */
+ BDX_ASSERT(db->rptr->len == 0);
+ do {
+ BDX_ASSERT(db->rptr->addr.dma == 0);
+ pci_unmap_page(priv->pdev, db->rptr->addr.dma,
+ db->rptr->len, PCI_DMA_TODEVICE);
+ bdx_tx_db_inc_rptr(db);
+ } while (db->rptr->len > 0);
+ tx_level -= db->rptr->len; /* '-' koz len is negative */
+
+ /* now should come skb pointer - free it */
+ dev_kfree_skb_irq(db->rptr->addr.skb);
+ bdx_tx_db_inc_rptr(db);
+ }
+
+ /* let h/w know which TXF descriptors were cleaned */
+ BDX_ASSERT((f->m.wptr & TXF_WPTR_WR_PTR) >= f->m.memsz);
+ WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
+
+ /* We reclaimed resources, so in case the Q is stopped by xmit callback,
+ * we resume the transmition and use tx_lock to synchronize with xmit.*/
+ spin_lock(&priv->tx_lock);
+ priv->tx_level += tx_level;
+ BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
+#ifdef BDX_DELAY_WPTR
+ if (priv->tx_noupd) {
+ priv->tx_noupd = 0;
+ WRITE_REG(priv, priv->txd_fifo0.m.reg_WPTR,
+ priv->txd_fifo0.m.wptr & TXF_WPTR_WR_PTR);
+ }
+#endif
+
+ if (unlikely(netif_queue_stopped(priv->ndev) &&
+ netif_carrier_ok(priv->ndev) &&
+ (priv->tx_level >= BDX_MIN_TX_LEVEL))) {
+ DBG("%s: %s: TX Q WAKE level %d\n",
+ BDX_DRV_NAME, priv->ndev->name, priv->tx_level);
+ netif_wake_queue(priv->ndev);
+ }
+ spin_unlock(&priv->tx_lock);
+}
+
+/* bdx_tx_free_skbs - frees all skbs from TXD fifo.
+ * It gets called when OS stops this dev, eg upon "ifconfig down" or rmmod
+ */
+static void bdx_tx_free_skbs(struct bdx_priv *priv)
+{
+ struct txdb *db = &priv->txdb;
+
+ ENTER;
+ while (db->rptr != db->wptr) {
+ if (likely(db->rptr->len))
+ pci_unmap_page(priv->pdev, db->rptr->addr.dma,
+ db->rptr->len, PCI_DMA_TODEVICE);
+ else
+ dev_kfree_skb(db->rptr->addr.skb);
+ bdx_tx_db_inc_rptr(db);
+ }
+ RET();
+}
+
+/* bdx_tx_free - frees all Tx resources */
+static void bdx_tx_free(struct bdx_priv *priv)
+{
+ ENTER;
+ bdx_tx_free_skbs(priv);
+ bdx_fifo_free(priv, &priv->txd_fifo0.m);
+ bdx_fifo_free(priv, &priv->txf_fifo0.m);
+ bdx_tx_db_close(&priv->txdb);
+}
+
+/* bdx_tx_push_desc - push descriptor to TxD fifo
+ * @priv - NIC private structure
+ * @data - desc's data
+ * @size - desc's size
+ *
+ * Pushes desc to TxD fifo and overlaps it if needed.
+ * NOTE: this func does not check for available space. this is responsibility
+ * of the caller. Neither does it check that data size is smaller than
+ * fifo size.
+ */
+static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size)
+{
+ struct txd_fifo *f = &priv->txd_fifo0;
+ int i = f->m.memsz - f->m.wptr;
+
+ if (size == 0)
+ return;
+
+ if (i > size) {
+ memcpy(f->m.va + f->m.wptr, data, size);
+ f->m.wptr += size;
+ } else {
+ memcpy(f->m.va + f->m.wptr, data, i);
+ f->m.wptr = size - i;
+ memcpy(f->m.va, data + i, f->m.wptr);
+ }
+ WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
+}
+
+/* bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way
+ * @priv - NIC private structure
+ * @data - desc's data
+ * @size - desc's size
+ *
+ * NOTE: this func does check for available space and, if necessary, waits for
+ * NIC to read existing data before writing new one.
+ */
+static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size)
+{
+ int timer = 0;
+ ENTER;
+
+ while (size > 0) {
+ /* we substruct 8 because when fifo is full rptr == wptr
+ which also means that fifo is empty, we can understand
+ the difference, but could hw do the same ??? :) */
+ int avail = bdx_tx_space(priv) - 8;
+ if (avail <= 0) {
+ if (timer++ > 300) { /* prevent endless loop */
+ DBG("timeout while writing desc to TxD fifo\n");
+ break;
+ }
+ udelay(50); /* give hw a chance to clean fifo */
+ continue;
+ }
+ avail = min(avail, size);
+ DBG("about to push %d bytes starting %p size %d\n", avail,
+ data, size);
+ bdx_tx_push_desc(priv, data, avail);
+ size -= avail;
+ data += avail;
+ }
+ RET();
+}
+
+static const struct net_device_ops bdx_netdev_ops = {
+ .ndo_open = bdx_open,
+ .ndo_stop = bdx_close,
+ .ndo_start_xmit = bdx_tx_transmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = bdx_ioctl,
+ .ndo_set_rx_mode = bdx_setmulti,
+ .ndo_change_mtu = bdx_change_mtu,
+ .ndo_set_mac_address = bdx_set_mac,
+ .ndo_vlan_rx_add_vid = bdx_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = bdx_vlan_rx_kill_vid,
+};
+
+/**
+ * bdx_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in bdx_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * bdx_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ *
+ * functions and their order used as explained in
+ * /usr/src/linux/Documentation/DMA-{API,mapping}.txt
+ *
+ */
+
+/* TBD: netif_msg should be checked and implemented. I disable it for now */
+static int __devinit
+bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *ndev;
+ struct bdx_priv *priv;
+ int err, pci_using_dac, port;
+ unsigned long pciaddr;
+ u32 regionSize;
+ struct pci_nic *nic;
+
+ ENTER;
+
+ nic = vmalloc(sizeof(*nic));
+ if (!nic)
+ RET(-ENOMEM);
+
+ /************** pci *****************/
+ err = pci_enable_device(pdev);
+ if (err) /* it triggers interrupt, dunno why. */
+ goto err_pci; /* it's not a problem though */
+
+ if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
+ !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
+ pci_using_dac = 1;
+ } else {
+ if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
+ (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ pr_err("No usable DMA configuration, aborting\n");
+ goto err_dma;
+ }
+ pci_using_dac = 0;
+ }
+
+ err = pci_request_regions(pdev, BDX_DRV_NAME);
+ if (err)
+ goto err_dma;
+
+ pci_set_master(pdev);
+
+ pciaddr = pci_resource_start(pdev, 0);
+ if (!pciaddr) {
+ err = -EIO;
+ pr_err("no MMIO resource\n");
+ goto err_out_res;
+ }
+ regionSize = pci_resource_len(pdev, 0);
+ if (regionSize < BDX_REGS_SIZE) {
+ err = -EIO;
+ pr_err("MMIO resource (%x) too small\n", regionSize);
+ goto err_out_res;
+ }
+
+ nic->regs = ioremap(pciaddr, regionSize);
+ if (!nic->regs) {
+ err = -EIO;
+ pr_err("ioremap failed\n");
+ goto err_out_res;
+ }
+
+ if (pdev->irq < 2) {
+ err = -EIO;
+ pr_err("invalid irq (%d)\n", pdev->irq);
+ goto err_out_iomap;
+ }
+ pci_set_drvdata(pdev, nic);
+
+ if (pdev->device == 0x3014)
+ nic->port_num = 2;
+ else
+ nic->port_num = 1;
+
+ print_hw_id(pdev);
+
+ bdx_hw_reset_direct(nic->regs);
+
+ nic->irq_type = IRQ_INTX;
+#ifdef BDX_MSI
+ if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
+ err = pci_enable_msi(pdev);
+ if (err)
+ pr_err("Can't eneble msi. error is %d\n", err);
+ else
+ nic->irq_type = IRQ_MSI;
+ } else
+ DBG("HW does not support MSI\n");
+#endif
+
+ /************** netdev **************/
+ for (port = 0; port < nic->port_num; port++) {
+ ndev = alloc_etherdev(sizeof(struct bdx_priv));
+ if (!ndev) {
+ err = -ENOMEM;
+ pr_err("alloc_etherdev failed\n");
+ goto err_out_iomap;
+ }
+
+ ndev->netdev_ops = &bdx_netdev_ops;
+ ndev->tx_queue_len = BDX_NDEV_TXQ_LEN;
+
+ bdx_set_ethtool_ops(ndev); /* ethtool interface */
+
+ /* these fields are used for info purposes only
+ * so we can have them same for all ports of the board */
+ ndev->if_port = port;
+ ndev->base_addr = pciaddr;
+ ndev->mem_start = pciaddr;
+ ndev->mem_end = pciaddr + regionSize;
+ ndev->irq = pdev->irq;
+ ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
+ | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM
+ /*| NETIF_F_FRAGLIST */
+ ;
+ ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_HW_VLAN_TX;
+
+ if (pci_using_dac)
+ ndev->features |= NETIF_F_HIGHDMA;
+
+ /************** priv ****************/
+ priv = nic->priv[port] = netdev_priv(ndev);
+
+ priv->pBdxRegs = nic->regs + port * 0x8000;
+ priv->port = port;
+ priv->pdev = pdev;
+ priv->ndev = ndev;
+ priv->nic = nic;
+ priv->msg_enable = BDX_DEF_MSG_ENABLE;
+
+ netif_napi_add(ndev, &priv->napi, bdx_poll, 64);
+
+ if ((readl(nic->regs + FPGA_VER) & 0xFFF) == 308) {
+ DBG("HW statistics not supported\n");
+ priv->stats_flag = 0;
+ } else {
+ priv->stats_flag = 1;
+ }
+
+ /* Initialize fifo sizes. */
+ priv->txd_size = 2;
+ priv->txf_size = 2;
+ priv->rxd_size = 2;
+ priv->rxf_size = 3;
+
+ /* Initialize the initial coalescing registers. */
+ priv->rdintcm = INT_REG_VAL(0x20, 1, 4, 12);
+ priv->tdintcm = INT_REG_VAL(0x20, 1, 0, 12);
+
+ /* ndev->xmit_lock spinlock is not used.
+ * Private priv->tx_lock is used for synchronization
+ * between transmit and TX irq cleanup. In addition
+ * set multicast list callback has to use priv->tx_lock.
+ */
+#ifdef BDX_LLTX
+ ndev->features |= NETIF_F_LLTX;
+#endif
+ spin_lock_init(&priv->tx_lock);
+
+ /*bdx_hw_reset(priv); */
+ if (bdx_read_mac(priv)) {
+ pr_err("load MAC address failed\n");
+ goto err_out_iomap;
+ }
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ err = register_netdev(ndev);
+ if (err) {
+ pr_err("register_netdev failed\n");
+ goto err_out_free;
+ }
+ netif_carrier_off(ndev);
+ netif_stop_queue(ndev);
+
+ print_eth_id(ndev);
+ }
+ RET(0);
+
+err_out_free:
+ free_netdev(ndev);
+err_out_iomap:
+ iounmap(nic->regs);
+err_out_res:
+ pci_release_regions(pdev);
+err_dma:
+ pci_disable_device(pdev);
+err_pci:
+ vfree(nic);
+
+ RET(err);
+}
+
+/****************** Ethtool interface *********************/
+/* get strings for statistics counters */
+static const char
+ bdx_stat_names[][ETH_GSTRING_LEN] = {
+ "InUCast", /* 0x7200 */
+ "InMCast", /* 0x7210 */
+ "InBCast", /* 0x7220 */
+ "InPkts", /* 0x7230 */
+ "InErrors", /* 0x7240 */
+ "InDropped", /* 0x7250 */
+ "FrameTooLong", /* 0x7260 */
+ "FrameSequenceErrors", /* 0x7270 */
+ "InVLAN", /* 0x7280 */
+ "InDroppedDFE", /* 0x7290 */
+ "InDroppedIntFull", /* 0x72A0 */
+ "InFrameAlignErrors", /* 0x72B0 */
+
+ /* 0x72C0-0x72E0 RSRV */
+
+ "OutUCast", /* 0x72F0 */
+ "OutMCast", /* 0x7300 */
+ "OutBCast", /* 0x7310 */
+ "OutPkts", /* 0x7320 */
+
+ /* 0x7330-0x7360 RSRV */
+
+ "OutVLAN", /* 0x7370 */
+ "InUCastOctects", /* 0x7380 */
+ "OutUCastOctects", /* 0x7390 */
+
+ /* 0x73A0-0x73B0 RSRV */
+
+ "InBCastOctects", /* 0x73C0 */
+ "OutBCastOctects", /* 0x73D0 */
+ "InOctects", /* 0x73E0 */
+ "OutOctects", /* 0x73F0 */
+};
+
+/*
+ * bdx_get_settings - get device-specific settings
+ * @netdev
+ * @ecmd
+ */
+static int bdx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ u32 rdintcm;
+ u32 tdintcm;
+ struct bdx_priv *priv = netdev_priv(netdev);
+
+ rdintcm = priv->rdintcm;
+ tdintcm = priv->tdintcm;
+
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ ecmd->duplex = DUPLEX_FULL;
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL; /* what does it mean? */
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ /* PCK_TH measures in multiples of FIFO bytes
+ We translate to packets */
+ ecmd->maxtxpkt =
+ ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ);
+ ecmd->maxrxpkt =
+ ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc));
+
+ return 0;
+}
+
+/*
+ * bdx_get_drvinfo - report driver information
+ * @netdev
+ * @drvinfo
+ */
+static void
+bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct bdx_priv *priv = netdev_priv(netdev);
+
+ strlcat(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
+ strlcat(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
+ strlcat(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcat(drvinfo->bus_info, pci_name(priv->pdev),
+ sizeof(drvinfo->bus_info));
+
+ drvinfo->n_stats = ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0);
+ drvinfo->testinfo_len = 0;
+ drvinfo->regdump_len = 0;
+ drvinfo->eedump_len = 0;
+}
+
+/*
+ * bdx_get_coalesce - get interrupt coalescing parameters
+ * @netdev
+ * @ecoal
+ */
+static int
+bdx_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
+{
+ u32 rdintcm;
+ u32 tdintcm;
+ struct bdx_priv *priv = netdev_priv(netdev);
+
+ rdintcm = priv->rdintcm;
+ tdintcm = priv->tdintcm;
+
+ /* PCK_TH measures in multiples of FIFO bytes
+ We translate to packets */
+ ecoal->rx_coalesce_usecs = GET_INT_COAL(rdintcm) * INT_COAL_MULT;
+ ecoal->rx_max_coalesced_frames =
+ ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc));
+
+ ecoal->tx_coalesce_usecs = GET_INT_COAL(tdintcm) * INT_COAL_MULT;
+ ecoal->tx_max_coalesced_frames =
+ ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ);
+
+ /* adaptive parameters ignored */
+ return 0;
+}
+
+/*
+ * bdx_set_coalesce - set interrupt coalescing parameters
+ * @netdev
+ * @ecoal
+ */
+static int
+bdx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
+{
+ u32 rdintcm;
+ u32 tdintcm;
+ struct bdx_priv *priv = netdev_priv(netdev);
+ int rx_coal;
+ int tx_coal;
+ int rx_max_coal;
+ int tx_max_coal;
+
+ /* Check for valid input */
+ rx_coal = ecoal->rx_coalesce_usecs / INT_COAL_MULT;
+ tx_coal = ecoal->tx_coalesce_usecs / INT_COAL_MULT;
+ rx_max_coal = ecoal->rx_max_coalesced_frames;
+ tx_max_coal = ecoal->tx_max_coalesced_frames;
+
+ /* Translate from packets to multiples of FIFO bytes */
+ rx_max_coal =
+ (((rx_max_coal * sizeof(struct rxf_desc)) + PCK_TH_MULT - 1)
+ / PCK_TH_MULT);
+ tx_max_coal =
+ (((tx_max_coal * BDX_TXF_DESC_SZ) + PCK_TH_MULT - 1)
+ / PCK_TH_MULT);
+
+ if ((rx_coal > 0x7FFF) || (tx_coal > 0x7FFF) ||
+ (rx_max_coal > 0xF) || (tx_max_coal > 0xF))
+ return -EINVAL;
+
+ rdintcm = INT_REG_VAL(rx_coal, GET_INT_COAL_RC(priv->rdintcm),
+ GET_RXF_TH(priv->rdintcm), rx_max_coal);
+ tdintcm = INT_REG_VAL(tx_coal, GET_INT_COAL_RC(priv->tdintcm), 0,
+ tx_max_coal);
+
+ priv->rdintcm = rdintcm;
+ priv->tdintcm = tdintcm;
+
+ WRITE_REG(priv, regRDINTCM0, rdintcm);
+ WRITE_REG(priv, regTDINTCM0, tdintcm);
+
+ return 0;
+}
+
+/* Convert RX fifo size to number of pending packets */
+static inline int bdx_rx_fifo_size_to_packets(int rx_size)
+{
+ return (FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc);
+}
+
+/* Convert TX fifo size to number of pending packets */
+static inline int bdx_tx_fifo_size_to_packets(int tx_size)
+{
+ return (FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ;
+}
+
+/*
+ * bdx_get_ringparam - report ring sizes
+ * @netdev
+ * @ring
+ */
+static void
+bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+{
+ struct bdx_priv *priv = netdev_priv(netdev);
+
+ /*max_pending - the maximum-sized FIFO we allow */
+ ring->rx_max_pending = bdx_rx_fifo_size_to_packets(3);
+ ring->tx_max_pending = bdx_tx_fifo_size_to_packets(3);
+ ring->rx_pending = bdx_rx_fifo_size_to_packets(priv->rxf_size);
+ ring->tx_pending = bdx_tx_fifo_size_to_packets(priv->txd_size);
+}
+
+/*
+ * bdx_set_ringparam - set ring sizes
+ * @netdev
+ * @ring
+ */
+static int
+bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+{
+ struct bdx_priv *priv = netdev_priv(netdev);
+ int rx_size = 0;
+ int tx_size = 0;
+
+ for (; rx_size < 4; rx_size++) {
+ if (bdx_rx_fifo_size_to_packets(rx_size) >= ring->rx_pending)
+ break;
+ }
+ if (rx_size == 4)
+ rx_size = 3;
+
+ for (; tx_size < 4; tx_size++) {
+ if (bdx_tx_fifo_size_to_packets(tx_size) >= ring->tx_pending)
+ break;
+ }
+ if (tx_size == 4)
+ tx_size = 3;
+
+ /*Is there anything to do? */
+ if ((rx_size == priv->rxf_size) &&
+ (tx_size == priv->txd_size))
+ return 0;
+
+ priv->rxf_size = rx_size;
+ if (rx_size > 1)
+ priv->rxd_size = rx_size - 1;
+ else
+ priv->rxd_size = rx_size;
+
+ priv->txf_size = priv->txd_size = tx_size;
+
+ if (netif_running(netdev)) {
+ bdx_close(netdev);
+ bdx_open(netdev);
+ }
+ return 0;
+}
+
+/*
+ * bdx_get_strings - return a set of strings that describe the requested objects
+ * @netdev
+ * @data
+ */
+static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, *bdx_stat_names, sizeof(bdx_stat_names));
+ break;
+ }
+}
+
+/*
+ * bdx_get_sset_count - return number of statistics or tests
+ * @netdev
+ */
+static int bdx_get_sset_count(struct net_device *netdev, int stringset)
+{
+ struct bdx_priv *priv = netdev_priv(netdev);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ BDX_ASSERT(ARRAY_SIZE(bdx_stat_names)
+ != sizeof(struct bdx_stats) / sizeof(u64));
+ return (priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * bdx_get_ethtool_stats - return device's hardware L2 statistics
+ * @netdev
+ * @stats
+ * @data
+ */
+static void bdx_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct bdx_priv *priv = netdev_priv(netdev);
+
+ if (priv->stats_flag) {
+
+ /* Update stats from HW */
+ bdx_update_stats(priv);
+
+ /* Copy data to user buffer */
+ memcpy(data, &priv->hw_stats, sizeof(priv->hw_stats));
+ }
+}
+
+/*
+ * bdx_set_ethtool_ops - ethtool interface implementation
+ * @netdev
+ */
+static void bdx_set_ethtool_ops(struct net_device *netdev)
+{
+ static const struct ethtool_ops bdx_ethtool_ops = {
+ .get_settings = bdx_get_settings,
+ .get_drvinfo = bdx_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = bdx_get_coalesce,
+ .set_coalesce = bdx_set_coalesce,
+ .get_ringparam = bdx_get_ringparam,
+ .set_ringparam = bdx_set_ringparam,
+ .get_strings = bdx_get_strings,
+ .get_sset_count = bdx_get_sset_count,
+ .get_ethtool_stats = bdx_get_ethtool_stats,
+ };
+
+ SET_ETHTOOL_OPS(netdev, &bdx_ethtool_ops);
+}
+
+/**
+ * bdx_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * bdx_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void __devexit bdx_remove(struct pci_dev *pdev)
+{
+ struct pci_nic *nic = pci_get_drvdata(pdev);
+ struct net_device *ndev;
+ int port;
+
+ for (port = 0; port < nic->port_num; port++) {
+ ndev = nic->priv[port]->ndev;
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+ }
+
+ /*bdx_hw_reset_direct(nic->regs); */
+#ifdef BDX_MSI
+ if (nic->irq_type == IRQ_MSI)
+ pci_disable_msi(pdev);
+#endif
+
+ iounmap(nic->regs);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ vfree(nic);
+
+ RET();
+}
+
+static struct pci_driver bdx_pci_driver = {
+ .name = BDX_DRV_NAME,
+ .id_table = bdx_pci_tbl,
+ .probe = bdx_probe,
+ .remove = __devexit_p(bdx_remove),
+};
+
+/*
+ * print_driver_id - print parameters of the driver build
+ */
+static void __init print_driver_id(void)
+{
+ pr_info("%s, %s\n", BDX_DRV_DESC, BDX_DRV_VERSION);
+ pr_info("Options: hw_csum %s\n", BDX_MSI_STRING);
+}
+
+static int __init bdx_module_init(void)
+{
+ ENTER;
+ init_txd_sizes();
+ print_driver_id();
+ RET(pci_register_driver(&bdx_pci_driver));
+}
+
+module_init(bdx_module_init);
+
+static void __exit bdx_module_exit(void)
+{
+ ENTER;
+ pci_unregister_driver(&bdx_pci_driver);
+ RET();
+}
+
+module_exit(bdx_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(BDX_DRV_DESC);
+MODULE_FIRMWARE("tehuti/bdx.bin");
diff --git a/drivers/net/ethernet/tehuti/tehuti.h b/drivers/net/ethernet/tehuti/tehuti.h
new file mode 100644
index 000000000000..709ebd6e28b4
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/tehuti.h
@@ -0,0 +1,561 @@
+/*
+ * Tehuti Networks(R) Network Driver
+ * Copyright (C) 2007 Tehuti Networks Ltd. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _TEHUTI_H
+#define _TEHUTI_H
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/uaccess.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sched.h>
+#include <linux/tty.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/vmalloc.h>
+#include <linux/firmware.h>
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+/* Compile Time Switches */
+/* start */
+#define BDX_TSO
+#define BDX_LLTX
+#define BDX_DELAY_WPTR
+/* #define BDX_MSI */
+/* end */
+
+#if !defined CONFIG_PCI_MSI
+# undef BDX_MSI
+#endif
+
+#define BDX_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK)
+
+/* ioctl ops */
+#define BDX_OP_READ 1
+#define BDX_OP_WRITE 2
+
+/* RX copy break size */
+#define BDX_COPYBREAK 257
+
+#define DRIVER_AUTHOR "Tehuti Networks(R)"
+#define BDX_DRV_DESC "Tehuti Networks(R) Network Driver"
+#define BDX_DRV_NAME "tehuti"
+#define BDX_NIC_NAME "Tehuti 10 Giga TOE SmartNIC"
+#define BDX_NIC2PORT_NAME "Tehuti 2-Port 10 Giga TOE SmartNIC"
+#define BDX_DRV_VERSION "7.29.3"
+
+#ifdef BDX_MSI
+# define BDX_MSI_STRING "msi "
+#else
+# define BDX_MSI_STRING ""
+#endif
+
+/* netdev tx queue len for Luxor. default value is, btw, 1000
+ * ifcontig eth1 txqueuelen 3000 - to change it at runtime */
+#define BDX_NDEV_TXQ_LEN 3000
+
+#define FIFO_SIZE 4096
+#define FIFO_EXTRA_SPACE 1024
+
+#if BITS_PER_LONG == 64
+# define H32_64(x) (u32) ((u64)(x) >> 32)
+# define L32_64(x) (u32) ((u64)(x) & 0xffffffff)
+#elif BITS_PER_LONG == 32
+# define H32_64(x) 0
+# define L32_64(x) ((u32) (x))
+#else /* BITS_PER_LONG == ?? */
+# error BITS_PER_LONG is undefined. Must be 64 or 32
+#endif /* BITS_PER_LONG */
+
+#ifdef __BIG_ENDIAN
+# define CPU_CHIP_SWAP32(x) swab32(x)
+# define CPU_CHIP_SWAP16(x) swab16(x)
+#else
+# define CPU_CHIP_SWAP32(x) (x)
+# define CPU_CHIP_SWAP16(x) (x)
+#endif
+
+#define READ_REG(pp, reg) readl(pp->pBdxRegs + reg)
+#define WRITE_REG(pp, reg, val) writel(val, pp->pBdxRegs + reg)
+
+#ifndef NET_IP_ALIGN
+# define NET_IP_ALIGN 2
+#endif
+
+#ifndef NETDEV_TX_OK
+# define NETDEV_TX_OK 0
+#endif
+
+#define LUXOR_MAX_PORT 2
+#define BDX_MAX_RX_DONE 150
+#define BDX_TXF_DESC_SZ 16
+#define BDX_MAX_TX_LEVEL (priv->txd_fifo0.m.memsz - 16)
+#define BDX_MIN_TX_LEVEL 256
+#define BDX_NO_UPD_PACKETS 40
+
+struct pci_nic {
+ int port_num;
+ void __iomem *regs;
+ int irq_type;
+ struct bdx_priv *priv[LUXOR_MAX_PORT];
+};
+
+enum { IRQ_INTX, IRQ_MSI, IRQ_MSIX };
+
+#define PCK_TH_MULT 128
+#define INT_COAL_MULT 2
+
+#define BITS_MASK(nbits) ((1<<nbits)-1)
+#define GET_BITS_SHIFT(x, nbits, nshift) (((x)>>nshift)&BITS_MASK(nbits))
+#define BITS_SHIFT_MASK(nbits, nshift) (BITS_MASK(nbits)<<nshift)
+#define BITS_SHIFT_VAL(x, nbits, nshift) (((x)&BITS_MASK(nbits))<<nshift)
+#define BITS_SHIFT_CLEAR(x, nbits, nshift) \
+ ((x)&(~BITS_SHIFT_MASK(nbits, nshift)))
+
+#define GET_INT_COAL(x) GET_BITS_SHIFT(x, 15, 0)
+#define GET_INT_COAL_RC(x) GET_BITS_SHIFT(x, 1, 15)
+#define GET_RXF_TH(x) GET_BITS_SHIFT(x, 4, 16)
+#define GET_PCK_TH(x) GET_BITS_SHIFT(x, 4, 20)
+
+#define INT_REG_VAL(coal, coal_rc, rxf_th, pck_th) \
+ ((coal)|((coal_rc)<<15)|((rxf_th)<<16)|((pck_th)<<20))
+
+struct fifo {
+ dma_addr_t da; /* physical address of fifo (used by HW) */
+ char *va; /* virtual address of fifo (used by SW) */
+ u32 rptr, wptr; /* cached values of RPTR and WPTR registers,
+ they're 32 bits on both 32 and 64 archs */
+ u16 reg_CFG0, reg_CFG1;
+ u16 reg_RPTR, reg_WPTR;
+ u16 memsz; /* memory size allocated for fifo */
+ u16 size_mask;
+ u16 pktsz; /* skb packet size to allocate */
+ u16 rcvno; /* number of buffers that come from this RXF */
+};
+
+struct txf_fifo {
+ struct fifo m; /* minimal set of variables used by all fifos */
+};
+
+struct txd_fifo {
+ struct fifo m; /* minimal set of variables used by all fifos */
+};
+
+struct rxf_fifo {
+ struct fifo m; /* minimal set of variables used by all fifos */
+};
+
+struct rxd_fifo {
+ struct fifo m; /* minimal set of variables used by all fifos */
+};
+
+struct rx_map {
+ u64 dma;
+ struct sk_buff *skb;
+};
+
+struct rxdb {
+ int *stack;
+ struct rx_map *elems;
+ int nelem;
+ int top;
+};
+
+union bdx_dma_addr {
+ dma_addr_t dma;
+ struct sk_buff *skb;
+};
+
+/* Entry in the db.
+ * if len == 0 addr is dma
+ * if len != 0 addr is skb */
+struct tx_map {
+ union bdx_dma_addr addr;
+ int len;
+};
+
+/* tx database - implemented as circular fifo buffer*/
+struct txdb {
+ struct tx_map *start; /* points to the first element */
+ struct tx_map *end; /* points just AFTER the last element */
+ struct tx_map *rptr; /* points to the next element to read */
+ struct tx_map *wptr; /* points to the next element to write */
+ int size; /* number of elements in the db */
+};
+
+/*Internal stats structure*/
+struct bdx_stats {
+ u64 InUCast; /* 0x7200 */
+ u64 InMCast; /* 0x7210 */
+ u64 InBCast; /* 0x7220 */
+ u64 InPkts; /* 0x7230 */
+ u64 InErrors; /* 0x7240 */
+ u64 InDropped; /* 0x7250 */
+ u64 FrameTooLong; /* 0x7260 */
+ u64 FrameSequenceErrors; /* 0x7270 */
+ u64 InVLAN; /* 0x7280 */
+ u64 InDroppedDFE; /* 0x7290 */
+ u64 InDroppedIntFull; /* 0x72A0 */
+ u64 InFrameAlignErrors; /* 0x72B0 */
+
+ /* 0x72C0-0x72E0 RSRV */
+
+ u64 OutUCast; /* 0x72F0 */
+ u64 OutMCast; /* 0x7300 */
+ u64 OutBCast; /* 0x7310 */
+ u64 OutPkts; /* 0x7320 */
+
+ /* 0x7330-0x7360 RSRV */
+
+ u64 OutVLAN; /* 0x7370 */
+ u64 InUCastOctects; /* 0x7380 */
+ u64 OutUCastOctects; /* 0x7390 */
+
+ /* 0x73A0-0x73B0 RSRV */
+
+ u64 InBCastOctects; /* 0x73C0 */
+ u64 OutBCastOctects; /* 0x73D0 */
+ u64 InOctects; /* 0x73E0 */
+ u64 OutOctects; /* 0x73F0 */
+};
+
+struct bdx_priv {
+ void __iomem *pBdxRegs;
+ struct net_device *ndev;
+
+ struct napi_struct napi;
+
+ /* RX FIFOs: 1 for data (full) descs, and 2 for free descs */
+ struct rxd_fifo rxd_fifo0;
+ struct rxf_fifo rxf_fifo0;
+ struct rxdb *rxdb; /* rx dbs to store skb pointers */
+ int napi_stop;
+
+ /* Tx FIFOs: 1 for data desc, 1 for empty (acks) desc */
+ struct txd_fifo txd_fifo0;
+ struct txf_fifo txf_fifo0;
+
+ struct txdb txdb;
+ int tx_level;
+#ifdef BDX_DELAY_WPTR
+ int tx_update_mark;
+ int tx_noupd;
+#endif
+ spinlock_t tx_lock; /* NETIF_F_LLTX mode */
+
+ /* rarely used */
+ u8 port;
+ u32 msg_enable;
+ int stats_flag;
+ struct bdx_stats hw_stats;
+ struct pci_dev *pdev;
+
+ struct pci_nic *nic;
+
+ u8 txd_size;
+ u8 txf_size;
+ u8 rxd_size;
+ u8 rxf_size;
+ u32 rdintcm;
+ u32 tdintcm;
+};
+
+/* RX FREE descriptor - 64bit*/
+struct rxf_desc {
+ u32 info; /* Buffer Count + Info - described below */
+ u32 va_lo; /* VAdr[31:0] */
+ u32 va_hi; /* VAdr[63:32] */
+ u32 pa_lo; /* PAdr[31:0] */
+ u32 pa_hi; /* PAdr[63:32] */
+ u32 len; /* Buffer Length */
+};
+
+#define GET_RXD_BC(x) GET_BITS_SHIFT((x), 5, 0)
+#define GET_RXD_RXFQ(x) GET_BITS_SHIFT((x), 2, 8)
+#define GET_RXD_TO(x) GET_BITS_SHIFT((x), 1, 15)
+#define GET_RXD_TYPE(x) GET_BITS_SHIFT((x), 4, 16)
+#define GET_RXD_ERR(x) GET_BITS_SHIFT((x), 6, 21)
+#define GET_RXD_RXP(x) GET_BITS_SHIFT((x), 1, 27)
+#define GET_RXD_PKT_ID(x) GET_BITS_SHIFT((x), 3, 28)
+#define GET_RXD_VTAG(x) GET_BITS_SHIFT((x), 1, 31)
+#define GET_RXD_VLAN_ID(x) GET_BITS_SHIFT((x), 12, 0)
+#define GET_RXD_VLAN_TCI(x) GET_BITS_SHIFT((x), 16, 0)
+#define GET_RXD_CFI(x) GET_BITS_SHIFT((x), 1, 12)
+#define GET_RXD_PRIO(x) GET_BITS_SHIFT((x), 3, 13)
+
+struct rxd_desc {
+ u32 rxd_val1;
+ u16 len;
+ u16 rxd_vlan;
+ u32 va_lo;
+ u32 va_hi;
+};
+
+/* PBL describes each virtual buffer to be */
+/* transmitted from the host.*/
+struct pbl {
+ u32 pa_lo;
+ u32 pa_hi;
+ u32 len;
+};
+
+/* First word for TXD descriptor. It means: type = 3 for regular Tx packet,
+ * hw_csum = 7 for ip+udp+tcp hw checksums */
+#define TXD_W1_VAL(bc, checksum, vtag, lgsnd, vlan_id) \
+ ((bc) | ((checksum)<<5) | ((vtag)<<8) | \
+ ((lgsnd)<<9) | (0x30000) | ((vlan_id)<<20))
+
+struct txd_desc {
+ u32 txd_val1;
+ u16 mss;
+ u16 length;
+ u32 va_lo;
+ u32 va_hi;
+ struct pbl pbl[0]; /* Fragments */
+} __packed;
+
+/* Register region size */
+#define BDX_REGS_SIZE 0x1000
+
+/* Registers from 0x0000-0x00fc were remapped to 0x4000-0x40fc */
+#define regTXD_CFG1_0 0x4000
+#define regRXF_CFG1_0 0x4010
+#define regRXD_CFG1_0 0x4020
+#define regTXF_CFG1_0 0x4030
+#define regTXD_CFG0_0 0x4040
+#define regRXF_CFG0_0 0x4050
+#define regRXD_CFG0_0 0x4060
+#define regTXF_CFG0_0 0x4070
+#define regTXD_WPTR_0 0x4080
+#define regRXF_WPTR_0 0x4090
+#define regRXD_WPTR_0 0x40A0
+#define regTXF_WPTR_0 0x40B0
+#define regTXD_RPTR_0 0x40C0
+#define regRXF_RPTR_0 0x40D0
+#define regRXD_RPTR_0 0x40E0
+#define regTXF_RPTR_0 0x40F0
+#define regTXF_RPTR_3 0x40FC
+
+/* hardware versioning */
+#define FW_VER 0x5010
+#define SROM_VER 0x5020
+#define FPGA_VER 0x5030
+#define FPGA_SEED 0x5040
+
+/* Registers from 0x0100-0x0150 were remapped to 0x5100-0x5150 */
+#define regISR regISR0
+#define regISR0 0x5100
+
+#define regIMR regIMR0
+#define regIMR0 0x5110
+
+#define regRDINTCM0 0x5120
+#define regRDINTCM2 0x5128
+
+#define regTDINTCM0 0x5130
+
+#define regISR_MSK0 0x5140
+
+#define regINIT_SEMAPHORE 0x5170
+#define regINIT_STATUS 0x5180
+
+#define regMAC_LNK_STAT 0x0200
+#define MAC_LINK_STAT 0x4 /* Link state */
+
+#define regGMAC_RXF_A 0x1240
+
+#define regUNC_MAC0_A 0x1250
+#define regUNC_MAC1_A 0x1260
+#define regUNC_MAC2_A 0x1270
+
+#define regVLAN_0 0x1800
+
+#define regMAX_FRAME_A 0x12C0
+
+#define regRX_MAC_MCST0 0x1A80
+#define regRX_MAC_MCST1 0x1A84
+#define MAC_MCST_NUM 15
+#define regRX_MCST_HASH0 0x1A00
+#define MAC_MCST_HASH_NUM 8
+
+#define regVPC 0x2300
+#define regVIC 0x2320
+#define regVGLB 0x2340
+
+#define regCLKPLL 0x5000
+
+/*for 10G only*/
+#define regREVISION 0x6000
+#define regSCRATCH 0x6004
+#define regCTRLST 0x6008
+#define regMAC_ADDR_0 0x600C
+#define regMAC_ADDR_1 0x6010
+#define regFRM_LENGTH 0x6014
+#define regPAUSE_QUANT 0x6018
+#define regRX_FIFO_SECTION 0x601C
+#define regTX_FIFO_SECTION 0x6020
+#define regRX_FULLNESS 0x6024
+#define regTX_FULLNESS 0x6028
+#define regHASHTABLE 0x602C
+#define regMDIO_ST 0x6030
+#define regMDIO_CTL 0x6034
+#define regMDIO_DATA 0x6038
+#define regMDIO_ADDR 0x603C
+
+#define regRST_PORT 0x7000
+#define regDIS_PORT 0x7010
+#define regRST_QU 0x7020
+#define regDIS_QU 0x7030
+
+#define regCTRLST_TX_ENA 0x0001
+#define regCTRLST_RX_ENA 0x0002
+#define regCTRLST_PRM_ENA 0x0010
+#define regCTRLST_PAD_ENA 0x0020
+
+#define regCTRLST_BASE (regCTRLST_PAD_ENA|regCTRLST_PRM_ENA)
+
+#define regRX_FLT 0x1400
+
+/* TXD TXF RXF RXD CONFIG 0x0000 --- 0x007c*/
+#define TX_RX_CFG1_BASE 0xffffffff /*0-31 */
+#define TX_RX_CFG0_BASE 0xfffff000 /*31:12 */
+#define TX_RX_CFG0_RSVD 0x0ffc /*11:2 */
+#define TX_RX_CFG0_SIZE 0x0003 /*1:0 */
+
+/* TXD TXF RXF RXD WRITE 0x0080 --- 0x00BC */
+#define TXF_WPTR_WR_PTR 0x7ff8 /*14:3 */
+
+/* TXD TXF RXF RXD READ 0x00CO --- 0x00FC */
+#define TXF_RPTR_RD_PTR 0x7ff8 /*14:3 */
+
+#define TXF_WPTR_MASK 0x7ff0 /* last 4 bits are dropped
+ * size is rounded to 16 */
+
+/* regISR 0x0100 */
+/* regIMR 0x0110 */
+#define IMR_INPROG 0x80000000 /*31 */
+#define IR_LNKCHG1 0x10000000 /*28 */
+#define IR_LNKCHG0 0x08000000 /*27 */
+#define IR_GPIO 0x04000000 /*26 */
+#define IR_RFRSH 0x02000000 /*25 */
+#define IR_RSVD 0x01000000 /*24 */
+#define IR_SWI 0x00800000 /*23 */
+#define IR_RX_FREE_3 0x00400000 /*22 */
+#define IR_RX_FREE_2 0x00200000 /*21 */
+#define IR_RX_FREE_1 0x00100000 /*20 */
+#define IR_RX_FREE_0 0x00080000 /*19 */
+#define IR_TX_FREE_3 0x00040000 /*18 */
+#define IR_TX_FREE_2 0x00020000 /*17 */
+#define IR_TX_FREE_1 0x00010000 /*16 */
+#define IR_TX_FREE_0 0x00008000 /*15 */
+#define IR_RX_DESC_3 0x00004000 /*14 */
+#define IR_RX_DESC_2 0x00002000 /*13 */
+#define IR_RX_DESC_1 0x00001000 /*12 */
+#define IR_RX_DESC_0 0x00000800 /*11 */
+#define IR_PSE 0x00000400 /*10 */
+#define IR_TMR3 0x00000200 /*9 */
+#define IR_TMR2 0x00000100 /*8 */
+#define IR_TMR1 0x00000080 /*7 */
+#define IR_TMR0 0x00000040 /*6 */
+#define IR_VNT 0x00000020 /*5 */
+#define IR_RxFL 0x00000010 /*4 */
+#define IR_SDPERR 0x00000008 /*3 */
+#define IR_TR 0x00000004 /*2 */
+#define IR_PCIE_LINK 0x00000002 /*1 */
+#define IR_PCIE_TOUT 0x00000001 /*0 */
+
+#define IR_EXTRA (IR_RX_FREE_0 | IR_LNKCHG0 | IR_PSE | \
+ IR_TMR0 | IR_PCIE_LINK | IR_PCIE_TOUT)
+#define IR_RUN (IR_EXTRA | IR_RX_DESC_0 | IR_TX_FREE_0)
+#define IR_ALL 0xfdfffff7
+
+#define IR_LNKCHG0_ofst 27
+
+#define GMAC_RX_FILTER_OSEN 0x1000 /* shared OS enable */
+#define GMAC_RX_FILTER_TXFC 0x0400 /* Tx flow control */
+#define GMAC_RX_FILTER_RSV0 0x0200 /* reserved */
+#define GMAC_RX_FILTER_FDA 0x0100 /* filter out direct address */
+#define GMAC_RX_FILTER_AOF 0x0080 /* accept over run */
+#define GMAC_RX_FILTER_ACF 0x0040 /* accept control frames */
+#define GMAC_RX_FILTER_ARUNT 0x0020 /* accept under run */
+#define GMAC_RX_FILTER_ACRC 0x0010 /* accept crc error */
+#define GMAC_RX_FILTER_AM 0x0008 /* accept multicast */
+#define GMAC_RX_FILTER_AB 0x0004 /* accept broadcast */
+#define GMAC_RX_FILTER_PRM 0x0001 /* [0:1] promiscuous mode */
+
+#define MAX_FRAME_AB_VAL 0x3fff /* 13:0 */
+
+#define CLKPLL_PLLLKD 0x0200 /*9 */
+#define CLKPLL_RSTEND 0x0100 /*8 */
+#define CLKPLL_SFTRST 0x0001 /*0 */
+
+#define CLKPLL_LKD (CLKPLL_PLLLKD|CLKPLL_RSTEND)
+
+/*
+ * PCI-E Device Control Register (Offset 0x88)
+ * Source: Luxor Data Sheet, 7.1.3.3.3
+ */
+#define PCI_DEV_CTRL_REG 0x88
+#define GET_DEV_CTRL_MAXPL(x) GET_BITS_SHIFT(x, 3, 5)
+#define GET_DEV_CTRL_MRRS(x) GET_BITS_SHIFT(x, 3, 12)
+
+/*
+ * PCI-E Link Status Register (Offset 0x92)
+ * Source: Luxor Data Sheet, 7.1.3.3.7
+ */
+#define PCI_LINK_STATUS_REG 0x92
+#define GET_LINK_STATUS_LANES(x) GET_BITS_SHIFT(x, 6, 4)
+
+/* Debugging Macros */
+
+#define DBG2(fmt, args...) \
+ pr_err("%s:%-5d: " fmt, __func__, __LINE__, ## args)
+
+#define BDX_ASSERT(x) BUG_ON(x)
+
+#ifdef DEBUG
+
+#define ENTER \
+do { \
+ pr_err("%s:%-5d: ENTER\n", __func__, __LINE__); \
+} while (0)
+
+#define RET(args...) \
+do { \
+ pr_err("%s:%-5d: RETURN\n", __func__, __LINE__); \
+ return args; \
+} while (0)
+
+#define DBG(fmt, args...) \
+ pr_err("%s:%-5d: " fmt, __func__, __LINE__, ## args)
+#else
+#define ENTER do { } while (0)
+#define RET(args...) return args
+#define DBG(fmt, args...) \
+do { \
+ if (0) \
+ pr_err(fmt, ##args); \
+} while (0)
+#endif
+
+#endif /* _BDX__H */
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
new file mode 100644
index 000000000000..de76c70ec8fb
--- /dev/null
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -0,0 +1,77 @@
+#
+# TI device configuration
+#
+
+config NET_VENDOR_TI
+ bool "Texas Instruments (TI) devices"
+ default y
+ depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3))
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about TI devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_TI
+
+config TI_DAVINCI_EMAC
+ tristate "TI DaVinci EMAC Support"
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ select TI_DAVINCI_MDIO
+ select TI_DAVINCI_CPDMA
+ select PHYLIB
+ ---help---
+ This driver supports TI's DaVinci Ethernet .
+
+ To compile this driver as a module, choose M here: the module
+ will be called davinci_emac_driver. This is recommended.
+
+config TI_DAVINCI_MDIO
+ tristate "TI DaVinci MDIO Support"
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ select PHYLIB
+ ---help---
+ This driver supports TI's DaVinci MDIO module.
+
+ To compile this driver as a module, choose M here: the module
+ will be called davinci_mdio. This is recommended.
+
+config TI_DAVINCI_CPDMA
+ tristate "TI DaVinci CPDMA Support"
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ ---help---
+ This driver supports TI's DaVinci CPDMA dma engine.
+
+ To compile this driver as a module, choose M here: the module
+ will be called davinci_cpdma. This is recommended.
+
+config TLAN
+ tristate "TI ThunderLAN support"
+ depends on (PCI || EISA)
+ ---help---
+ If you have a PCI Ethernet network card based on the ThunderLAN chip
+ which is supported by this driver, say Y and read the
+ Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Devices currently supported by this driver are Compaq Netelligent,
+ Compaq NetFlex and Olicom cards. Please read the file
+ <file:Documentation/networking/tlan.txt> for more details.
+
+ To compile this driver as a module, choose M here. The module
+ will be called tlan.
+
+ Please email feedback to <torben.mathiasen@compaq.com>.
+
+config CPMAC
+ tristate "TI AR7 CPMAC Ethernet support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && AR7
+ select PHYLIB
+ ---help---
+ TI AR7 CPMAC Ethernet support
+
+endif # NET_VENDOR_TI
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
new file mode 100644
index 000000000000..aedb3af74e5a
--- /dev/null
+++ b/drivers/net/ethernet/ti/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the TI network device drivers.
+#
+
+obj-$(CONFIG_TLAN) += tlan.o
+obj-$(CONFIG_CPMAC) += cpmac.o
+obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
+obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
+obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
new file mode 100644
index 000000000000..aaac0c7ad111
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -0,0 +1,1305 @@
+/*
+ * Copyright (C) 2006, 2007 Eugene Konev
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/atomic.h>
+
+MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
+MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:cpmac");
+
+static int debug_level = 8;
+static int dumb_switch;
+
+/* Next 2 are only used in cpmac_probe, so it's pointless to change them */
+module_param(debug_level, int, 0444);
+module_param(dumb_switch, int, 0444);
+
+MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable");
+MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
+
+#define CPMAC_VERSION "0.5.2"
+/* frame size + 802.1q tag + FCS size */
+#define CPMAC_SKB_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+#define CPMAC_QUEUES 8
+
+/* Ethernet registers */
+#define CPMAC_TX_CONTROL 0x0004
+#define CPMAC_TX_TEARDOWN 0x0008
+#define CPMAC_RX_CONTROL 0x0014
+#define CPMAC_RX_TEARDOWN 0x0018
+#define CPMAC_MBP 0x0100
+# define MBP_RXPASSCRC 0x40000000
+# define MBP_RXQOS 0x20000000
+# define MBP_RXNOCHAIN 0x10000000
+# define MBP_RXCMF 0x01000000
+# define MBP_RXSHORT 0x00800000
+# define MBP_RXCEF 0x00400000
+# define MBP_RXPROMISC 0x00200000
+# define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16)
+# define MBP_RXBCAST 0x00002000
+# define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8)
+# define MBP_RXMCAST 0x00000020
+# define MBP_MCASTCHAN(channel) ((channel) & 0x7)
+#define CPMAC_UNICAST_ENABLE 0x0104
+#define CPMAC_UNICAST_CLEAR 0x0108
+#define CPMAC_MAX_LENGTH 0x010c
+#define CPMAC_BUFFER_OFFSET 0x0110
+#define CPMAC_MAC_CONTROL 0x0160
+# define MAC_TXPTYPE 0x00000200
+# define MAC_TXPACE 0x00000040
+# define MAC_MII 0x00000020
+# define MAC_TXFLOW 0x00000010
+# define MAC_RXFLOW 0x00000008
+# define MAC_MTEST 0x00000004
+# define MAC_LOOPBACK 0x00000002
+# define MAC_FDX 0x00000001
+#define CPMAC_MAC_STATUS 0x0164
+# define MAC_STATUS_QOS 0x00000004
+# define MAC_STATUS_RXFLOW 0x00000002
+# define MAC_STATUS_TXFLOW 0x00000001
+#define CPMAC_TX_INT_ENABLE 0x0178
+#define CPMAC_TX_INT_CLEAR 0x017c
+#define CPMAC_MAC_INT_VECTOR 0x0180
+# define MAC_INT_STATUS 0x00080000
+# define MAC_INT_HOST 0x00040000
+# define MAC_INT_RX 0x00020000
+# define MAC_INT_TX 0x00010000
+#define CPMAC_MAC_EOI_VECTOR 0x0184
+#define CPMAC_RX_INT_ENABLE 0x0198
+#define CPMAC_RX_INT_CLEAR 0x019c
+#define CPMAC_MAC_INT_ENABLE 0x01a8
+#define CPMAC_MAC_INT_CLEAR 0x01ac
+#define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4)
+#define CPMAC_MAC_ADDR_MID 0x01d0
+#define CPMAC_MAC_ADDR_HI 0x01d4
+#define CPMAC_MAC_HASH_LO 0x01d8
+#define CPMAC_MAC_HASH_HI 0x01dc
+#define CPMAC_TX_PTR(channel) (0x0600 + (channel) * 4)
+#define CPMAC_RX_PTR(channel) (0x0620 + (channel) * 4)
+#define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4)
+#define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4)
+#define CPMAC_REG_END 0x0680
+/*
+ * Rx/Tx statistics
+ * TODO: use some of them to fill stats in cpmac_stats()
+ */
+#define CPMAC_STATS_RX_GOOD 0x0200
+#define CPMAC_STATS_RX_BCAST 0x0204
+#define CPMAC_STATS_RX_MCAST 0x0208
+#define CPMAC_STATS_RX_PAUSE 0x020c
+#define CPMAC_STATS_RX_CRC 0x0210
+#define CPMAC_STATS_RX_ALIGN 0x0214
+#define CPMAC_STATS_RX_OVER 0x0218
+#define CPMAC_STATS_RX_JABBER 0x021c
+#define CPMAC_STATS_RX_UNDER 0x0220
+#define CPMAC_STATS_RX_FRAG 0x0224
+#define CPMAC_STATS_RX_FILTER 0x0228
+#define CPMAC_STATS_RX_QOSFILTER 0x022c
+#define CPMAC_STATS_RX_OCTETS 0x0230
+
+#define CPMAC_STATS_TX_GOOD 0x0234
+#define CPMAC_STATS_TX_BCAST 0x0238
+#define CPMAC_STATS_TX_MCAST 0x023c
+#define CPMAC_STATS_TX_PAUSE 0x0240
+#define CPMAC_STATS_TX_DEFER 0x0244
+#define CPMAC_STATS_TX_COLLISION 0x0248
+#define CPMAC_STATS_TX_SINGLECOLL 0x024c
+#define CPMAC_STATS_TX_MULTICOLL 0x0250
+#define CPMAC_STATS_TX_EXCESSCOLL 0x0254
+#define CPMAC_STATS_TX_LATECOLL 0x0258
+#define CPMAC_STATS_TX_UNDERRUN 0x025c
+#define CPMAC_STATS_TX_CARRIERSENSE 0x0260
+#define CPMAC_STATS_TX_OCTETS 0x0264
+
+#define cpmac_read(base, reg) (readl((void __iomem *)(base) + (reg)))
+#define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \
+ (reg)))
+
+/* MDIO bus */
+#define CPMAC_MDIO_VERSION 0x0000
+#define CPMAC_MDIO_CONTROL 0x0004
+# define MDIOC_IDLE 0x80000000
+# define MDIOC_ENABLE 0x40000000
+# define MDIOC_PREAMBLE 0x00100000
+# define MDIOC_FAULT 0x00080000
+# define MDIOC_FAULTDETECT 0x00040000
+# define MDIOC_INTTEST 0x00020000
+# define MDIOC_CLKDIV(div) ((div) & 0xff)
+#define CPMAC_MDIO_ALIVE 0x0008
+#define CPMAC_MDIO_LINK 0x000c
+#define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8)
+# define MDIO_BUSY 0x80000000
+# define MDIO_WRITE 0x40000000
+# define MDIO_REG(reg) (((reg) & 0x1f) << 21)
+# define MDIO_PHY(phy) (((phy) & 0x1f) << 16)
+# define MDIO_DATA(data) ((data) & 0xffff)
+#define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8)
+# define PHYSEL_LINKSEL 0x00000040
+# define PHYSEL_LINKINT 0x00000020
+
+struct cpmac_desc {
+ u32 hw_next;
+ u32 hw_data;
+ u16 buflen;
+ u16 bufflags;
+ u16 datalen;
+ u16 dataflags;
+#define CPMAC_SOP 0x8000
+#define CPMAC_EOP 0x4000
+#define CPMAC_OWN 0x2000
+#define CPMAC_EOQ 0x1000
+ struct sk_buff *skb;
+ struct cpmac_desc *next;
+ struct cpmac_desc *prev;
+ dma_addr_t mapping;
+ dma_addr_t data_mapping;
+};
+
+struct cpmac_priv {
+ spinlock_t lock;
+ spinlock_t rx_lock;
+ struct cpmac_desc *rx_head;
+ int ring_size;
+ struct cpmac_desc *desc_ring;
+ dma_addr_t dma_ring;
+ void __iomem *regs;
+ struct mii_bus *mii_bus;
+ struct phy_device *phy;
+ char phy_name[MII_BUS_ID_SIZE + 3];
+ int oldlink, oldspeed, oldduplex;
+ u32 msg_enable;
+ struct net_device *dev;
+ struct work_struct reset_work;
+ struct platform_device *pdev;
+ struct napi_struct napi;
+ atomic_t reset_pending;
+};
+
+static irqreturn_t cpmac_irq(int, void *);
+static void cpmac_hw_start(struct net_device *dev);
+static void cpmac_hw_stop(struct net_device *dev);
+static int cpmac_stop(struct net_device *dev);
+static int cpmac_open(struct net_device *dev);
+
+static void cpmac_dump_regs(struct net_device *dev)
+{
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ for (i = 0; i < CPMAC_REG_END; i += 4) {
+ if (i % 16 == 0) {
+ if (i)
+ pr_cont("\n");
+ printk(KERN_DEBUG "%s: reg[%p]:", dev->name,
+ priv->regs + i);
+ }
+ printk(" %08x", cpmac_read(priv->regs, i));
+ }
+ printk("\n");
+}
+
+static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
+{
+ int i;
+ printk(KERN_DEBUG "%s: desc[%p]:", dev->name, desc);
+ for (i = 0; i < sizeof(*desc) / 4; i++)
+ printk(" %08x", ((u32 *)desc)[i]);
+ printk("\n");
+}
+
+static void cpmac_dump_all_desc(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct cpmac_desc *dump = priv->rx_head;
+ do {
+ cpmac_dump_desc(dev, dump);
+ dump = dump->next;
+ } while (dump != priv->rx_head);
+}
+
+static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ int i;
+ printk(KERN_DEBUG "%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
+ for (i = 0; i < skb->len; i++) {
+ if (i % 16 == 0) {
+ if (i)
+ pr_cont("\n");
+ printk(KERN_DEBUG "%s: data[%p]:", dev->name,
+ skb->data + i);
+ }
+ printk(" %02x", ((u8 *)skb->data)[i]);
+ }
+ printk("\n");
+}
+
+static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ u32 val;
+
+ while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
+ cpu_relax();
+ cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) |
+ MDIO_PHY(phy_id));
+ while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY)
+ cpu_relax();
+ return MDIO_DATA(val);
+}
+
+static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
+ int reg, u16 val)
+{
+ while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
+ cpu_relax();
+ cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE |
+ MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val));
+ return 0;
+}
+
+static int cpmac_mdio_reset(struct mii_bus *bus)
+{
+ struct clk *cpmac_clk;
+
+ cpmac_clk = clk_get(&bus->dev, "cpmac");
+ if (IS_ERR(cpmac_clk)) {
+ printk(KERN_ERR "unable to get cpmac clock\n");
+ return -1;
+ }
+ ar7_device_reset(AR7_RESET_BIT_MDIO);
+ cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
+ MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1));
+ return 0;
+}
+
+static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
+
+static struct mii_bus *cpmac_mii;
+
+static int cpmac_config(struct net_device *dev, struct ifmap *map)
+{
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Don't allow changing the I/O address */
+ if (map->base_addr != dev->base_addr)
+ return -EOPNOTSUPP;
+
+ /* ignore other fields */
+ return 0;
+}
+
+static void cpmac_set_multicast_list(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ u8 tmp;
+ u32 mbp, bit, hash[2] = { 0, };
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ mbp = cpmac_read(priv->regs, CPMAC_MBP);
+ if (dev->flags & IFF_PROMISC) {
+ cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) |
+ MBP_RXPROMISC);
+ } else {
+ cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC);
+ if (dev->flags & IFF_ALLMULTI) {
+ /* enable all multicast mode */
+ cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff);
+ cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff);
+ } else {
+ /*
+ * cpmac uses some strange mac address hashing
+ * (not crc32)
+ */
+ netdev_for_each_mc_addr(ha, dev) {
+ bit = 0;
+ tmp = ha->addr[0];
+ bit ^= (tmp >> 2) ^ (tmp << 4);
+ tmp = ha->addr[1];
+ bit ^= (tmp >> 4) ^ (tmp << 2);
+ tmp = ha->addr[2];
+ bit ^= (tmp >> 6) ^ tmp;
+ tmp = ha->addr[3];
+ bit ^= (tmp >> 2) ^ (tmp << 4);
+ tmp = ha->addr[4];
+ bit ^= (tmp >> 4) ^ (tmp << 2);
+ tmp = ha->addr[5];
+ bit ^= (tmp >> 6) ^ tmp;
+ bit &= 0x3f;
+ hash[bit / 32] |= 1 << (bit % 32);
+ }
+
+ cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]);
+ cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]);
+ }
+ }
+}
+
+static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
+ struct cpmac_desc *desc)
+{
+ struct sk_buff *skb, *result = NULL;
+
+ if (unlikely(netif_msg_hw(priv)))
+ cpmac_dump_desc(priv->dev, desc);
+ cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
+ if (unlikely(!desc->datalen)) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING "%s: rx: spurious interrupt\n",
+ priv->dev->name);
+ return NULL;
+ }
+
+ skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
+ if (likely(skb)) {
+ skb_put(desc->skb, desc->datalen);
+ desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
+ skb_checksum_none_assert(desc->skb);
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += desc->datalen;
+ result = desc->skb;
+ dma_unmap_single(&priv->dev->dev, desc->data_mapping,
+ CPMAC_SKB_SIZE, DMA_FROM_DEVICE);
+ desc->skb = skb;
+ desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
+ CPMAC_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ desc->hw_data = (u32)desc->data_mapping;
+ if (unlikely(netif_msg_pktdata(priv))) {
+ printk(KERN_DEBUG "%s: received packet:\n",
+ priv->dev->name);
+ cpmac_dump_skb(priv->dev, result);
+ }
+ } else {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING
+ "%s: low on skbs, dropping packet\n",
+ priv->dev->name);
+ priv->dev->stats.rx_dropped++;
+ }
+
+ desc->buflen = CPMAC_SKB_SIZE;
+ desc->dataflags = CPMAC_OWN;
+
+ return result;
+}
+
+static int cpmac_poll(struct napi_struct *napi, int budget)
+{
+ struct sk_buff *skb;
+ struct cpmac_desc *desc, *restart;
+ struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
+ int received = 0, processed = 0;
+
+ spin_lock(&priv->rx_lock);
+ if (unlikely(!priv->rx_head)) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING "%s: rx: polling, but no queue\n",
+ priv->dev->name);
+ spin_unlock(&priv->rx_lock);
+ napi_complete(napi);
+ return 0;
+ }
+
+ desc = priv->rx_head;
+ restart = NULL;
+ while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
+ processed++;
+
+ if ((desc->dataflags & CPMAC_EOQ) != 0) {
+ /* The last update to eoq->hw_next didn't happen
+ * soon enough, and the receiver stopped here.
+ *Remember this descriptor so we can restart
+ * the receiver after freeing some space.
+ */
+ if (unlikely(restart)) {
+ if (netif_msg_rx_err(priv))
+ printk(KERN_ERR "%s: poll found a"
+ " duplicate EOQ: %p and %p\n",
+ priv->dev->name, restart, desc);
+ goto fatal_error;
+ }
+
+ restart = desc->next;
+ }
+
+ skb = cpmac_rx_one(priv, desc);
+ if (likely(skb)) {
+ netif_receive_skb(skb);
+ received++;
+ }
+ desc = desc->next;
+ }
+
+ if (desc != priv->rx_head) {
+ /* We freed some buffers, but not the whole ring,
+ * add what we did free to the rx list */
+ desc->prev->hw_next = (u32)0;
+ priv->rx_head->prev->hw_next = priv->rx_head->mapping;
+ }
+
+ /* Optimization: If we did not actually process an EOQ (perhaps because
+ * of quota limits), check to see if the tail of the queue has EOQ set.
+ * We should immediately restart in that case so that the receiver can
+ * restart and run in parallel with more packet processing.
+ * This lets us handle slightly larger bursts before running
+ * out of ring space (assuming dev->weight < ring_size) */
+
+ if (!restart &&
+ (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
+ == CPMAC_EOQ &&
+ (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
+ /* reset EOQ so the poll loop (above) doesn't try to
+ * restart this when it eventually gets to this descriptor.
+ */
+ priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
+ restart = priv->rx_head;
+ }
+
+ if (restart) {
+ priv->dev->stats.rx_errors++;
+ priv->dev->stats.rx_fifo_errors++;
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING "%s: rx dma ring overrun\n",
+ priv->dev->name);
+
+ if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
+ if (netif_msg_drv(priv))
+ printk(KERN_ERR "%s: cpmac_poll is trying to "
+ "restart rx from a descriptor that's "
+ "not free: %p\n",
+ priv->dev->name, restart);
+ goto fatal_error;
+ }
+
+ cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
+ }
+
+ priv->rx_head = desc;
+ spin_unlock(&priv->rx_lock);
+ if (unlikely(netif_msg_rx_status(priv)))
+ printk(KERN_DEBUG "%s: poll processed %d packets\n",
+ priv->dev->name, received);
+ if (processed == 0) {
+ /* we ran out of packets to read,
+ * revert to interrupt-driven mode */
+ napi_complete(napi);
+ cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
+ return 0;
+ }
+
+ return 1;
+
+fatal_error:
+ /* Something went horribly wrong.
+ * Reset hardware to try to recover rather than wedging. */
+
+ if (netif_msg_drv(priv)) {
+ printk(KERN_ERR "%s: cpmac_poll is confused. "
+ "Resetting hardware\n", priv->dev->name);
+ cpmac_dump_all_desc(priv->dev);
+ printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
+ priv->dev->name,
+ cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
+ cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
+ }
+
+ spin_unlock(&priv->rx_lock);
+ napi_complete(napi);
+ netif_tx_stop_all_queues(priv->dev);
+ napi_disable(&priv->napi);
+
+ atomic_inc(&priv->reset_pending);
+ cpmac_hw_stop(priv->dev);
+ if (!schedule_work(&priv->reset_work))
+ atomic_dec(&priv->reset_pending);
+ return 0;
+
+}
+
+static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ int queue, len;
+ struct cpmac_desc *desc;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(atomic_read(&priv->reset_pending)))
+ return NETDEV_TX_BUSY;
+
+ if (unlikely(skb_padto(skb, ETH_ZLEN)))
+ return NETDEV_TX_OK;
+
+ len = max(skb->len, ETH_ZLEN);
+ queue = skb_get_queue_mapping(skb);
+ netif_stop_subqueue(dev, queue);
+
+ desc = &priv->desc_ring[queue];
+ if (unlikely(desc->dataflags & CPMAC_OWN)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING "%s: tx dma ring full\n",
+ dev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ spin_lock(&priv->lock);
+ spin_unlock(&priv->lock);
+ desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
+ desc->skb = skb;
+ desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
+ DMA_TO_DEVICE);
+ desc->hw_data = (u32)desc->data_mapping;
+ desc->datalen = len;
+ desc->buflen = len;
+ if (unlikely(netif_msg_tx_queued(priv)))
+ printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb,
+ skb->len);
+ if (unlikely(netif_msg_hw(priv)))
+ cpmac_dump_desc(dev, desc);
+ if (unlikely(netif_msg_pktdata(priv)))
+ cpmac_dump_skb(dev, skb);
+ cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);
+
+ return NETDEV_TX_OK;
+}
+
+static void cpmac_end_xmit(struct net_device *dev, int queue)
+{
+ struct cpmac_desc *desc;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ desc = &priv->desc_ring[queue];
+ cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping);
+ if (likely(desc->skb)) {
+ spin_lock(&priv->lock);
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += desc->skb->len;
+ spin_unlock(&priv->lock);
+ dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(netif_msg_tx_done(priv)))
+ printk(KERN_DEBUG "%s: sent 0x%p, len=%d\n", dev->name,
+ desc->skb, desc->skb->len);
+
+ dev_kfree_skb_irq(desc->skb);
+ desc->skb = NULL;
+ if (__netif_subqueue_stopped(dev, queue))
+ netif_wake_subqueue(dev, queue);
+ } else {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING
+ "%s: end_xmit: spurious interrupt\n", dev->name);
+ if (__netif_subqueue_stopped(dev, queue))
+ netif_wake_subqueue(dev, queue);
+ }
+}
+
+static void cpmac_hw_stop(struct net_device *dev)
+{
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
+
+ ar7_device_reset(pdata->reset_bit);
+ cpmac_write(priv->regs, CPMAC_RX_CONTROL,
+ cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1);
+ cpmac_write(priv->regs, CPMAC_TX_CONTROL,
+ cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1);
+ for (i = 0; i < 8; i++) {
+ cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
+ cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
+ }
+ cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
+ cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII);
+}
+
+static void cpmac_hw_start(struct net_device *dev)
+{
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
+
+ ar7_device_reset(pdata->reset_bit);
+ for (i = 0; i < 8; i++) {
+ cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
+ cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
+ }
+ cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping);
+
+ cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST |
+ MBP_RXMCAST);
+ cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0);
+ for (i = 0; i < 8; i++)
+ cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]);
+ cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]);
+ cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] |
+ (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) |
+ (dev->dev_addr[3] << 24));
+ cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE);
+ cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1);
+ cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
+ cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff);
+ cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
+
+ cpmac_write(priv->regs, CPMAC_RX_CONTROL,
+ cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1);
+ cpmac_write(priv->regs, CPMAC_TX_CONTROL,
+ cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1);
+ cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
+ cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII |
+ MAC_FDX);
+}
+
+static void cpmac_clear_rx(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct cpmac_desc *desc;
+ int i;
+ if (unlikely(!priv->rx_head))
+ return;
+ desc = priv->rx_head;
+ for (i = 0; i < priv->ring_size; i++) {
+ if ((desc->dataflags & CPMAC_OWN) == 0) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING "%s: packet dropped\n",
+ dev->name);
+ if (unlikely(netif_msg_hw(priv)))
+ cpmac_dump_desc(dev, desc);
+ desc->dataflags = CPMAC_OWN;
+ dev->stats.rx_dropped++;
+ }
+ desc->hw_next = desc->next->mapping;
+ desc = desc->next;
+ }
+ priv->rx_head->prev->hw_next = 0;
+}
+
+static void cpmac_clear_tx(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ int i;
+ if (unlikely(!priv->desc_ring))
+ return;
+ for (i = 0; i < CPMAC_QUEUES; i++) {
+ priv->desc_ring[i].dataflags = 0;
+ if (priv->desc_ring[i].skb) {
+ dev_kfree_skb_any(priv->desc_ring[i].skb);
+ priv->desc_ring[i].skb = NULL;
+ }
+ }
+}
+
+static void cpmac_hw_error(struct work_struct *work)
+{
+ struct cpmac_priv *priv =
+ container_of(work, struct cpmac_priv, reset_work);
+
+ spin_lock(&priv->rx_lock);
+ cpmac_clear_rx(priv->dev);
+ spin_unlock(&priv->rx_lock);
+ cpmac_clear_tx(priv->dev);
+ cpmac_hw_start(priv->dev);
+ barrier();
+ atomic_dec(&priv->reset_pending);
+
+ netif_tx_wake_all_queues(priv->dev);
+ cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
+}
+
+static void cpmac_check_status(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS);
+ int rx_channel = (macstatus >> 8) & 7;
+ int rx_code = (macstatus >> 12) & 15;
+ int tx_channel = (macstatus >> 16) & 7;
+ int tx_code = (macstatus >> 20) & 15;
+
+ if (rx_code || tx_code) {
+ if (netif_msg_drv(priv) && net_ratelimit()) {
+ /* Can't find any documentation on what these
+ *error codes actually are. So just log them and hope..
+ */
+ if (rx_code)
+ printk(KERN_WARNING "%s: host error %d on rx "
+ "channel %d (macstatus %08x), resetting\n",
+ dev->name, rx_code, rx_channel, macstatus);
+ if (tx_code)
+ printk(KERN_WARNING "%s: host error %d on tx "
+ "channel %d (macstatus %08x), resetting\n",
+ dev->name, tx_code, tx_channel, macstatus);
+ }
+
+ netif_tx_stop_all_queues(dev);
+ cpmac_hw_stop(dev);
+ if (schedule_work(&priv->reset_work))
+ atomic_inc(&priv->reset_pending);
+ if (unlikely(netif_msg_hw(priv)))
+ cpmac_dump_regs(dev);
+ }
+ cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
+}
+
+static irqreturn_t cpmac_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct cpmac_priv *priv;
+ int queue;
+ u32 status;
+
+ priv = netdev_priv(dev);
+
+ status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);
+
+ if (unlikely(netif_msg_intr(priv)))
+ printk(KERN_DEBUG "%s: interrupt status: 0x%08x\n", dev->name,
+ status);
+
+ if (status & MAC_INT_TX)
+ cpmac_end_xmit(dev, (status & 7));
+
+ if (status & MAC_INT_RX) {
+ queue = (status >> 8) & 7;
+ if (napi_schedule_prep(&priv->napi)) {
+ cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
+ __napi_schedule(&priv->napi);
+ }
+ }
+
+ cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
+
+ if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS)))
+ cpmac_check_status(dev);
+
+ return IRQ_HANDLED;
+}
+
+static void cpmac_tx_timeout(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ spin_lock(&priv->lock);
+ dev->stats.tx_errors++;
+ spin_unlock(&priv->lock);
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING "%s: transmit timeout\n", dev->name);
+
+ atomic_inc(&priv->reset_pending);
+ barrier();
+ cpmac_clear_tx(dev);
+ barrier();
+ atomic_dec(&priv->reset_pending);
+
+ netif_tx_wake_all_queues(priv->dev);
+}
+
+static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ if (!(netif_running(dev)))
+ return -EINVAL;
+ if (!priv->phy)
+ return -EINVAL;
+
+ return phy_mii_ioctl(priv->phy, ifr, cmd);
+}
+
+static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (priv->phy)
+ return phy_ethtool_gset(priv->phy, cmd);
+
+ return -EINVAL;
+}
+
+static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (priv->phy)
+ return phy_ethtool_sset(priv->phy, cmd);
+
+ return -EINVAL;
+}
+
+static void cpmac_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ ring->rx_max_pending = 1024;
+ ring->rx_mini_max_pending = 1;
+ ring->rx_jumbo_max_pending = 1;
+ ring->tx_max_pending = 1;
+
+ ring->rx_pending = priv->ring_size;
+ ring->rx_mini_pending = 1;
+ ring->rx_jumbo_pending = 1;
+ ring->tx_pending = 1;
+}
+
+static int cpmac_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (netif_running(dev))
+ return -EBUSY;
+ priv->ring_size = ring->rx_pending;
+ return 0;
+}
+
+static void cpmac_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, "cpmac");
+ strcpy(info->version, CPMAC_VERSION);
+ info->fw_version[0] = '\0';
+ sprintf(info->bus_info, "%s", "cpmac");
+ info->regdump_len = 0;
+}
+
+static const struct ethtool_ops cpmac_ethtool_ops = {
+ .get_settings = cpmac_get_settings,
+ .set_settings = cpmac_set_settings,
+ .get_drvinfo = cpmac_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = cpmac_get_ringparam,
+ .set_ringparam = cpmac_set_ringparam,
+};
+
+static void cpmac_adjust_link(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ int new_state = 0;
+
+ spin_lock(&priv->lock);
+ if (priv->phy->link) {
+ netif_tx_start_all_queues(dev);
+ if (priv->phy->duplex != priv->oldduplex) {
+ new_state = 1;
+ priv->oldduplex = priv->phy->duplex;
+ }
+
+ if (priv->phy->speed != priv->oldspeed) {
+ new_state = 1;
+ priv->oldspeed = priv->phy->speed;
+ }
+
+ if (!priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 1;
+ }
+ } else if (priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+ }
+
+ if (new_state && netif_msg_link(priv) && net_ratelimit())
+ phy_print_status(priv->phy);
+
+ spin_unlock(&priv->lock);
+}
+
+static int cpmac_open(struct net_device *dev)
+{
+ int i, size, res;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct resource *mem;
+ struct cpmac_desc *desc;
+ struct sk_buff *skb;
+
+ mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
+ if (!request_mem_region(mem->start, resource_size(mem), dev->name)) {
+ if (netif_msg_drv(priv))
+ printk(KERN_ERR "%s: failed to request registers\n",
+ dev->name);
+ res = -ENXIO;
+ goto fail_reserve;
+ }
+
+ priv->regs = ioremap(mem->start, resource_size(mem));
+ if (!priv->regs) {
+ if (netif_msg_drv(priv))
+ printk(KERN_ERR "%s: failed to remap registers\n",
+ dev->name);
+ res = -ENXIO;
+ goto fail_remap;
+ }
+
+ size = priv->ring_size + CPMAC_QUEUES;
+ priv->desc_ring = dma_alloc_coherent(&dev->dev,
+ sizeof(struct cpmac_desc) * size,
+ &priv->dma_ring,
+ GFP_KERNEL);
+ if (!priv->desc_ring) {
+ res = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ for (i = 0; i < size; i++)
+ priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i;
+
+ priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
+ for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
+ skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
+ if (unlikely(!skb)) {
+ res = -ENOMEM;
+ goto fail_desc;
+ }
+ desc->skb = skb;
+ desc->data_mapping = dma_map_single(&dev->dev, skb->data,
+ CPMAC_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ desc->hw_data = (u32)desc->data_mapping;
+ desc->buflen = CPMAC_SKB_SIZE;
+ desc->dataflags = CPMAC_OWN;
+ desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
+ desc->next->prev = desc;
+ desc->hw_next = (u32)desc->next->mapping;
+ }
+
+ priv->rx_head->prev->hw_next = (u32)0;
+
+ res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev);
+ if (res) {
+ if (netif_msg_drv(priv))
+ printk(KERN_ERR "%s: failed to obtain irq\n",
+ dev->name);
+ goto fail_irq;
+ }
+
+ atomic_set(&priv->reset_pending, 0);
+ INIT_WORK(&priv->reset_work, cpmac_hw_error);
+ cpmac_hw_start(dev);
+
+ napi_enable(&priv->napi);
+ priv->phy->state = PHY_CHANGELINK;
+ phy_start(priv->phy);
+
+ return 0;
+
+fail_irq:
+fail_desc:
+ for (i = 0; i < priv->ring_size; i++) {
+ if (priv->rx_head[i].skb) {
+ dma_unmap_single(&dev->dev,
+ priv->rx_head[i].data_mapping,
+ CPMAC_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ kfree_skb(priv->rx_head[i].skb);
+ }
+ }
+fail_alloc:
+ kfree(priv->desc_ring);
+ iounmap(priv->regs);
+
+fail_remap:
+ release_mem_region(mem->start, resource_size(mem));
+
+fail_reserve:
+ return res;
+}
+
+static int cpmac_stop(struct net_device *dev)
+{
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct resource *mem;
+
+ netif_tx_stop_all_queues(dev);
+
+ cancel_work_sync(&priv->reset_work);
+ napi_disable(&priv->napi);
+ phy_stop(priv->phy);
+
+ cpmac_hw_stop(dev);
+
+ for (i = 0; i < 8; i++)
+ cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
+ cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0);
+ cpmac_write(priv->regs, CPMAC_MBP, 0);
+
+ free_irq(dev->irq, dev);
+ iounmap(priv->regs);
+ mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
+ release_mem_region(mem->start, resource_size(mem));
+ priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
+ for (i = 0; i < priv->ring_size; i++) {
+ if (priv->rx_head[i].skb) {
+ dma_unmap_single(&dev->dev,
+ priv->rx_head[i].data_mapping,
+ CPMAC_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ kfree_skb(priv->rx_head[i].skb);
+ }
+ }
+
+ dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) *
+ (CPMAC_QUEUES + priv->ring_size),
+ priv->desc_ring, priv->dma_ring);
+ return 0;
+}
+
+static const struct net_device_ops cpmac_netdev_ops = {
+ .ndo_open = cpmac_open,
+ .ndo_stop = cpmac_stop,
+ .ndo_start_xmit = cpmac_start_xmit,
+ .ndo_tx_timeout = cpmac_tx_timeout,
+ .ndo_set_rx_mode = cpmac_set_multicast_list,
+ .ndo_do_ioctl = cpmac_ioctl,
+ .ndo_set_config = cpmac_config,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int external_switch;
+
+static int __devinit cpmac_probe(struct platform_device *pdev)
+{
+ int rc, phy_id;
+ char mdio_bus_id[MII_BUS_ID_SIZE];
+ struct resource *mem;
+ struct cpmac_priv *priv;
+ struct net_device *dev;
+ struct plat_cpmac_data *pdata;
+
+ pdata = pdev->dev.platform_data;
+
+ if (external_switch || dumb_switch) {
+ strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */
+ phy_id = pdev->id;
+ } else {
+ for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
+ if (!(pdata->phy_mask & (1 << phy_id)))
+ continue;
+ if (!cpmac_mii->phy_map[phy_id])
+ continue;
+ strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE);
+ break;
+ }
+ }
+
+ if (phy_id == PHY_MAX_ADDR) {
+ dev_err(&pdev->dev, "no PHY present, falling back "
+ "to switch on MDIO bus 0\n");
+ strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */
+ phy_id = pdev->id;
+ }
+
+ dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES);
+
+ if (!dev) {
+ printk(KERN_ERR "cpmac: Unable to allocate net_device\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, dev);
+ priv = netdev_priv(dev);
+
+ priv->pdev = pdev;
+ mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ if (!mem) {
+ rc = -ENODEV;
+ goto fail;
+ }
+
+ dev->irq = platform_get_irq_byname(pdev, "irq");
+
+ dev->netdev_ops = &cpmac_netdev_ops;
+ dev->ethtool_ops = &cpmac_ethtool_ops;
+
+ netif_napi_add(dev, &priv->napi, cpmac_poll, 64);
+
+ spin_lock_init(&priv->lock);
+ spin_lock_init(&priv->rx_lock);
+ priv->dev = dev;
+ priv->ring_size = 64;
+ priv->msg_enable = netif_msg_init(debug_level, 0xff);
+ memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
+
+ snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
+ mdio_bus_id, phy_id);
+
+ priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+
+ if (IS_ERR(priv->phy)) {
+ if (netif_msg_drv(priv))
+ printk(KERN_ERR "%s: Could not attach to PHY\n",
+ dev->name);
+ rc = PTR_ERR(priv->phy);
+ goto fail;
+ }
+
+ rc = register_netdev(dev);
+ if (rc) {
+ printk(KERN_ERR "cpmac: error %i registering device %s\n", rc,
+ dev->name);
+ goto fail;
+ }
+
+ if (netif_msg_probe(priv)) {
+ printk(KERN_INFO
+ "cpmac: device %s (regs: %p, irq: %d, phy: %s, "
+ "mac: %pM)\n", dev->name, (void *)mem->start, dev->irq,
+ priv->phy_name, dev->dev_addr);
+ }
+ return 0;
+
+fail:
+ free_netdev(dev);
+ return rc;
+}
+
+static int __devexit cpmac_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ unregister_netdev(dev);
+ free_netdev(dev);
+ return 0;
+}
+
+static struct platform_driver cpmac_driver = {
+ .driver.name = "cpmac",
+ .driver.owner = THIS_MODULE,
+ .probe = cpmac_probe,
+ .remove = __devexit_p(cpmac_remove),
+};
+
+int __devinit cpmac_init(void)
+{
+ u32 mask;
+ int i, res;
+
+ cpmac_mii = mdiobus_alloc();
+ if (cpmac_mii == NULL)
+ return -ENOMEM;
+
+ cpmac_mii->name = "cpmac-mii";
+ cpmac_mii->read = cpmac_mdio_read;
+ cpmac_mii->write = cpmac_mdio_write;
+ cpmac_mii->reset = cpmac_mdio_reset;
+ cpmac_mii->irq = mii_irqs;
+
+ cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256);
+
+ if (!cpmac_mii->priv) {
+ printk(KERN_ERR "Can't ioremap mdio registers\n");
+ res = -ENXIO;
+ goto fail_alloc;
+ }
+
+#warning FIXME: unhardcode gpio&reset bits
+ ar7_gpio_disable(26);
+ ar7_gpio_disable(27);
+ ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
+ ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
+ ar7_device_reset(AR7_RESET_BIT_EPHY);
+
+ cpmac_mii->reset(cpmac_mii);
+
+ for (i = 0; i < 300; i++) {
+ mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE);
+ if (mask)
+ break;
+ else
+ msleep(10);
+ }
+
+ mask &= 0x7fffffff;
+ if (mask & (mask - 1)) {
+ external_switch = 1;
+ mask = 0;
+ }
+
+ cpmac_mii->phy_mask = ~(mask | 0x80000000);
+ snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "1");
+
+ res = mdiobus_register(cpmac_mii);
+ if (res)
+ goto fail_mii;
+
+ res = platform_driver_register(&cpmac_driver);
+ if (res)
+ goto fail_cpmac;
+
+ return 0;
+
+fail_cpmac:
+ mdiobus_unregister(cpmac_mii);
+
+fail_mii:
+ iounmap(cpmac_mii->priv);
+
+fail_alloc:
+ mdiobus_free(cpmac_mii);
+
+ return res;
+}
+
+void __devexit cpmac_exit(void)
+{
+ platform_driver_unregister(&cpmac_driver);
+ mdiobus_unregister(cpmac_mii);
+ iounmap(cpmac_mii->priv);
+ mdiobus_free(cpmac_mii);
+}
+
+module_init(cpmac_init);
+module_exit(cpmac_exit);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
new file mode 100644
index 000000000000..dca9d3369cdd
--- /dev/null
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -0,0 +1,970 @@
+/*
+ * Texas Instruments CPDMA Driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+
+#include "davinci_cpdma.h"
+
+/* DMA Registers */
+#define CPDMA_TXIDVER 0x00
+#define CPDMA_TXCONTROL 0x04
+#define CPDMA_TXTEARDOWN 0x08
+#define CPDMA_RXIDVER 0x10
+#define CPDMA_RXCONTROL 0x14
+#define CPDMA_SOFTRESET 0x1c
+#define CPDMA_RXTEARDOWN 0x18
+#define CPDMA_TXINTSTATRAW 0x80
+#define CPDMA_TXINTSTATMASKED 0x84
+#define CPDMA_TXINTMASKSET 0x88
+#define CPDMA_TXINTMASKCLEAR 0x8c
+#define CPDMA_MACINVECTOR 0x90
+#define CPDMA_MACEOIVECTOR 0x94
+#define CPDMA_RXINTSTATRAW 0xa0
+#define CPDMA_RXINTSTATMASKED 0xa4
+#define CPDMA_RXINTMASKSET 0xa8
+#define CPDMA_RXINTMASKCLEAR 0xac
+#define CPDMA_DMAINTSTATRAW 0xb0
+#define CPDMA_DMAINTSTATMASKED 0xb4
+#define CPDMA_DMAINTMASKSET 0xb8
+#define CPDMA_DMAINTMASKCLEAR 0xbc
+#define CPDMA_DMAINT_HOSTERR BIT(1)
+
+/* the following exist only if has_ext_regs is set */
+#define CPDMA_DMACONTROL 0x20
+#define CPDMA_DMASTATUS 0x24
+#define CPDMA_RXBUFFOFS 0x28
+#define CPDMA_EM_CONTROL 0x2c
+
+/* Descriptor mode bits */
+#define CPDMA_DESC_SOP BIT(31)
+#define CPDMA_DESC_EOP BIT(30)
+#define CPDMA_DESC_OWNER BIT(29)
+#define CPDMA_DESC_EOQ BIT(28)
+#define CPDMA_DESC_TD_COMPLETE BIT(27)
+#define CPDMA_DESC_PASS_CRC BIT(26)
+
+#define CPDMA_TEARDOWN_VALUE 0xfffffffc
+
+struct cpdma_desc {
+ /* hardware fields */
+ u32 hw_next;
+ u32 hw_buffer;
+ u32 hw_len;
+ u32 hw_mode;
+ /* software fields */
+ void *sw_token;
+ u32 sw_buffer;
+ u32 sw_len;
+};
+
+struct cpdma_desc_pool {
+ u32 phys;
+ u32 hw_addr;
+ void __iomem *iomap; /* ioremap map */
+ void *cpumap; /* dma_alloc map */
+ int desc_size, mem_size;
+ int num_desc, used_desc;
+ unsigned long *bitmap;
+ struct device *dev;
+ spinlock_t lock;
+};
+
+enum cpdma_state {
+ CPDMA_STATE_IDLE,
+ CPDMA_STATE_ACTIVE,
+ CPDMA_STATE_TEARDOWN,
+};
+
+const char *cpdma_state_str[] = { "idle", "active", "teardown" };
+
+struct cpdma_ctlr {
+ enum cpdma_state state;
+ struct cpdma_params params;
+ struct device *dev;
+ struct cpdma_desc_pool *pool;
+ spinlock_t lock;
+ struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
+};
+
+struct cpdma_chan {
+ enum cpdma_state state;
+ struct cpdma_ctlr *ctlr;
+ int chan_num;
+ spinlock_t lock;
+ struct cpdma_desc __iomem *head, *tail;
+ int count;
+ void __iomem *hdp, *cp, *rxfree;
+ u32 mask;
+ cpdma_handler_fn handler;
+ enum dma_data_direction dir;
+ struct cpdma_chan_stats stats;
+ /* offsets into dmaregs */
+ int int_set, int_clear, td;
+};
+
+/* The following make access to common cpdma_ctlr params more readable */
+#define dmaregs params.dmaregs
+#define num_chan params.num_chan
+
+/* various accessors */
+#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
+#define chan_read(chan, fld) __raw_readl((chan)->fld)
+#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
+#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
+#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
+#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
+
+/*
+ * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
+ * emac) have dedicated on-chip memory for these descriptors. Some other
+ * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
+ * abstract out these details
+ */
+static struct cpdma_desc_pool *
+cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
+ int size, int align)
+{
+ int bitmap_size;
+ struct cpdma_desc_pool *pool;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ spin_lock_init(&pool->lock);
+
+ pool->dev = dev;
+ pool->mem_size = size;
+ pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
+ pool->num_desc = size / pool->desc_size;
+
+ bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
+ pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!pool->bitmap)
+ goto fail;
+
+ if (phys) {
+ pool->phys = phys;
+ pool->iomap = ioremap(phys, size);
+ pool->hw_addr = hw_addr;
+ } else {
+ pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
+ GFP_KERNEL);
+ pool->iomap = pool->cpumap;
+ pool->hw_addr = pool->phys;
+ }
+
+ if (pool->iomap)
+ return pool;
+
+fail:
+ kfree(pool->bitmap);
+ kfree(pool);
+ return NULL;
+}
+
+static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
+{
+ unsigned long flags;
+
+ if (!pool)
+ return;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ WARN_ON(pool->used_desc);
+ kfree(pool->bitmap);
+ if (pool->cpumap) {
+ dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
+ pool->phys);
+ } else {
+ iounmap(pool->iomap);
+ }
+ spin_unlock_irqrestore(&pool->lock, flags);
+ kfree(pool);
+}
+
+static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
+ struct cpdma_desc __iomem *desc)
+{
+ if (!desc)
+ return 0;
+ return pool->hw_addr + (__force dma_addr_t)desc -
+ (__force dma_addr_t)pool->iomap;
+}
+
+static inline struct cpdma_desc __iomem *
+desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
+{
+ return dma ? pool->iomap + dma - pool->hw_addr : NULL;
+}
+
+static struct cpdma_desc __iomem *
+cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc)
+{
+ unsigned long flags;
+ int index;
+ struct cpdma_desc __iomem *desc = NULL;
+
+ spin_lock_irqsave(&pool->lock, flags);
+
+ index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0,
+ num_desc, 0);
+ if (index < pool->num_desc) {
+ bitmap_set(pool->bitmap, index, num_desc);
+ desc = pool->iomap + pool->desc_size * index;
+ pool->used_desc++;
+ }
+
+ spin_unlock_irqrestore(&pool->lock, flags);
+ return desc;
+}
+
+static void cpdma_desc_free(struct cpdma_desc_pool *pool,
+ struct cpdma_desc __iomem *desc, int num_desc)
+{
+ unsigned long flags, index;
+
+ index = ((unsigned long)desc - (unsigned long)pool->iomap) /
+ pool->desc_size;
+ spin_lock_irqsave(&pool->lock, flags);
+ bitmap_clear(pool->bitmap, index, num_desc);
+ pool->used_desc--;
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
+struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
+{
+ struct cpdma_ctlr *ctlr;
+
+ ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL);
+ if (!ctlr)
+ return NULL;
+
+ ctlr->state = CPDMA_STATE_IDLE;
+ ctlr->params = *params;
+ ctlr->dev = params->dev;
+ spin_lock_init(&ctlr->lock);
+
+ ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
+ ctlr->params.desc_mem_phys,
+ ctlr->params.desc_hw_addr,
+ ctlr->params.desc_mem_size,
+ ctlr->params.desc_align);
+ if (!ctlr->pool) {
+ kfree(ctlr);
+ return NULL;
+ }
+
+ if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
+ ctlr->num_chan = CPDMA_MAX_CHANNELS;
+ return ctlr;
+}
+
+int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_IDLE) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return -EBUSY;
+ }
+
+ if (ctlr->params.has_soft_reset) {
+ unsigned long timeout = jiffies + HZ/10;
+
+ dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
+ while (time_before(jiffies, timeout)) {
+ if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
+ break;
+ }
+ WARN_ON(!time_before(jiffies, timeout));
+ }
+
+ for (i = 0; i < ctlr->num_chan; i++) {
+ __raw_writel(0, ctlr->params.txhdp + 4 * i);
+ __raw_writel(0, ctlr->params.rxhdp + 4 * i);
+ __raw_writel(0, ctlr->params.txcp + 4 * i);
+ __raw_writel(0, ctlr->params.rxcp + 4 * i);
+ }
+
+ dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
+ dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
+
+ dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
+ dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
+
+ ctlr->state = CPDMA_STATE_ACTIVE;
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_start(ctlr->channels[i]);
+ }
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return -EINVAL;
+ }
+
+ ctlr->state = CPDMA_STATE_TEARDOWN;
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_stop(ctlr->channels[i]);
+ }
+
+ dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
+ dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
+
+ dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
+ dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
+
+ ctlr->state = CPDMA_STATE_IDLE;
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
+{
+ struct device *dev = ctlr->dev;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]);
+
+ dev_info(dev, "CPDMA: txidver: %x",
+ dma_reg_read(ctlr, CPDMA_TXIDVER));
+ dev_info(dev, "CPDMA: txcontrol: %x",
+ dma_reg_read(ctlr, CPDMA_TXCONTROL));
+ dev_info(dev, "CPDMA: txteardown: %x",
+ dma_reg_read(ctlr, CPDMA_TXTEARDOWN));
+ dev_info(dev, "CPDMA: rxidver: %x",
+ dma_reg_read(ctlr, CPDMA_RXIDVER));
+ dev_info(dev, "CPDMA: rxcontrol: %x",
+ dma_reg_read(ctlr, CPDMA_RXCONTROL));
+ dev_info(dev, "CPDMA: softreset: %x",
+ dma_reg_read(ctlr, CPDMA_SOFTRESET));
+ dev_info(dev, "CPDMA: rxteardown: %x",
+ dma_reg_read(ctlr, CPDMA_RXTEARDOWN));
+ dev_info(dev, "CPDMA: txintstatraw: %x",
+ dma_reg_read(ctlr, CPDMA_TXINTSTATRAW));
+ dev_info(dev, "CPDMA: txintstatmasked: %x",
+ dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED));
+ dev_info(dev, "CPDMA: txintmaskset: %x",
+ dma_reg_read(ctlr, CPDMA_TXINTMASKSET));
+ dev_info(dev, "CPDMA: txintmaskclear: %x",
+ dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR));
+ dev_info(dev, "CPDMA: macinvector: %x",
+ dma_reg_read(ctlr, CPDMA_MACINVECTOR));
+ dev_info(dev, "CPDMA: maceoivector: %x",
+ dma_reg_read(ctlr, CPDMA_MACEOIVECTOR));
+ dev_info(dev, "CPDMA: rxintstatraw: %x",
+ dma_reg_read(ctlr, CPDMA_RXINTSTATRAW));
+ dev_info(dev, "CPDMA: rxintstatmasked: %x",
+ dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED));
+ dev_info(dev, "CPDMA: rxintmaskset: %x",
+ dma_reg_read(ctlr, CPDMA_RXINTMASKSET));
+ dev_info(dev, "CPDMA: rxintmaskclear: %x",
+ dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR));
+ dev_info(dev, "CPDMA: dmaintstatraw: %x",
+ dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW));
+ dev_info(dev, "CPDMA: dmaintstatmasked: %x",
+ dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED));
+ dev_info(dev, "CPDMA: dmaintmaskset: %x",
+ dma_reg_read(ctlr, CPDMA_DMAINTMASKSET));
+ dev_info(dev, "CPDMA: dmaintmaskclear: %x",
+ dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR));
+
+ if (!ctlr->params.has_ext_regs) {
+ dev_info(dev, "CPDMA: dmacontrol: %x",
+ dma_reg_read(ctlr, CPDMA_DMACONTROL));
+ dev_info(dev, "CPDMA: dmastatus: %x",
+ dma_reg_read(ctlr, CPDMA_DMASTATUS));
+ dev_info(dev, "CPDMA: rxbuffofs: %x",
+ dma_reg_read(ctlr, CPDMA_RXBUFFOFS));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
+ if (ctlr->channels[i])
+ cpdma_chan_dump(ctlr->channels[i]);
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
+{
+ unsigned long flags;
+ int ret = 0, i;
+
+ if (!ctlr)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_IDLE)
+ cpdma_ctlr_stop(ctlr);
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_destroy(ctlr->channels[i]);
+ }
+
+ cpdma_desc_pool_destroy(ctlr->pool);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ kfree(ctlr);
+ return ret;
+}
+
+int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
+{
+ unsigned long flags;
+ int i, reg;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return -EINVAL;
+ }
+
+ reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
+ dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_int_ctrl(ctlr->channels[i], enable);
+ }
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
+{
+ dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
+}
+
+struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
+ cpdma_handler_fn handler)
+{
+ struct cpdma_chan *chan;
+ int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
+ unsigned long flags;
+
+ if (__chan_linear(chan_num) >= ctlr->num_chan)
+ return NULL;
+
+ ret = -ENOMEM;
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ goto err_chan_alloc;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ ret = -EBUSY;
+ if (ctlr->channels[chan_num])
+ goto err_chan_busy;
+
+ chan->ctlr = ctlr;
+ chan->state = CPDMA_STATE_IDLE;
+ chan->chan_num = chan_num;
+ chan->handler = handler;
+
+ if (is_rx_chan(chan)) {
+ chan->hdp = ctlr->params.rxhdp + offset;
+ chan->cp = ctlr->params.rxcp + offset;
+ chan->rxfree = ctlr->params.rxfree + offset;
+ chan->int_set = CPDMA_RXINTMASKSET;
+ chan->int_clear = CPDMA_RXINTMASKCLEAR;
+ chan->td = CPDMA_RXTEARDOWN;
+ chan->dir = DMA_FROM_DEVICE;
+ } else {
+ chan->hdp = ctlr->params.txhdp + offset;
+ chan->cp = ctlr->params.txcp + offset;
+ chan->int_set = CPDMA_TXINTMASKSET;
+ chan->int_clear = CPDMA_TXINTMASKCLEAR;
+ chan->td = CPDMA_TXTEARDOWN;
+ chan->dir = DMA_TO_DEVICE;
+ }
+ chan->mask = BIT(chan_linear(chan));
+
+ spin_lock_init(&chan->lock);
+
+ ctlr->channels[chan_num] = chan;
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return chan;
+
+err_chan_busy:
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ kfree(chan);
+err_chan_alloc:
+ return ERR_PTR(ret);
+}
+
+int cpdma_chan_destroy(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ unsigned long flags;
+
+ if (!chan)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (chan->state != CPDMA_STATE_IDLE)
+ cpdma_chan_stop(chan);
+ ctlr->channels[chan->chan_num] = NULL;
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ kfree(chan);
+ return 0;
+}
+
+int cpdma_chan_get_stats(struct cpdma_chan *chan,
+ struct cpdma_chan_stats *stats)
+{
+ unsigned long flags;
+ if (!chan)
+ return -EINVAL;
+ spin_lock_irqsave(&chan->lock, flags);
+ memcpy(stats, &chan->stats, sizeof(*stats));
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+int cpdma_chan_dump(struct cpdma_chan *chan)
+{
+ unsigned long flags;
+ struct device *dev = chan->ctlr->dev;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ dev_info(dev, "channel %d (%s %d) state %s",
+ chan->chan_num, is_rx_chan(chan) ? "rx" : "tx",
+ chan_linear(chan), cpdma_state_str[chan->state]);
+ dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp));
+ dev_info(dev, "\tcp: %x\n", chan_read(chan, cp));
+ if (chan->rxfree) {
+ dev_info(dev, "\trxfree: %x\n",
+ chan_read(chan, rxfree));
+ }
+
+ dev_info(dev, "\tstats head_enqueue: %d\n",
+ chan->stats.head_enqueue);
+ dev_info(dev, "\tstats tail_enqueue: %d\n",
+ chan->stats.tail_enqueue);
+ dev_info(dev, "\tstats pad_enqueue: %d\n",
+ chan->stats.pad_enqueue);
+ dev_info(dev, "\tstats misqueued: %d\n",
+ chan->stats.misqueued);
+ dev_info(dev, "\tstats desc_alloc_fail: %d\n",
+ chan->stats.desc_alloc_fail);
+ dev_info(dev, "\tstats pad_alloc_fail: %d\n",
+ chan->stats.pad_alloc_fail);
+ dev_info(dev, "\tstats runt_receive_buff: %d\n",
+ chan->stats.runt_receive_buff);
+ dev_info(dev, "\tstats runt_transmit_buff: %d\n",
+ chan->stats.runt_transmit_buff);
+ dev_info(dev, "\tstats empty_dequeue: %d\n",
+ chan->stats.empty_dequeue);
+ dev_info(dev, "\tstats busy_dequeue: %d\n",
+ chan->stats.busy_dequeue);
+ dev_info(dev, "\tstats good_dequeue: %d\n",
+ chan->stats.good_dequeue);
+ dev_info(dev, "\tstats requeue: %d\n",
+ chan->stats.requeue);
+ dev_info(dev, "\tstats teardown_dequeue: %d\n",
+ chan->stats.teardown_dequeue);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+static void __cpdma_chan_submit(struct cpdma_chan *chan,
+ struct cpdma_desc __iomem *desc)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc __iomem *prev = chan->tail;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ dma_addr_t desc_dma;
+ u32 mode;
+
+ desc_dma = desc_phys(pool, desc);
+
+ /* simple case - idle channel */
+ if (!chan->head) {
+ chan->stats.head_enqueue++;
+ chan->head = desc;
+ chan->tail = desc;
+ if (chan->state == CPDMA_STATE_ACTIVE)
+ chan_write(chan, hdp, desc_dma);
+ return;
+ }
+
+ /* first chain the descriptor at the tail of the list */
+ desc_write(prev, hw_next, desc_dma);
+ chan->tail = desc;
+ chan->stats.tail_enqueue++;
+
+ /* next check if EOQ has been triggered already */
+ mode = desc_read(prev, hw_mode);
+ if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
+ (chan->state == CPDMA_STATE_ACTIVE)) {
+ desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
+ chan_write(chan, hdp, desc_dma);
+ chan->stats.misqueued++;
+ }
+}
+
+int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+ int len, gfp_t gfp_mask)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc __iomem *desc;
+ dma_addr_t buffer;
+ unsigned long flags;
+ u32 mode;
+ int ret = 0;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (chan->state == CPDMA_STATE_TEARDOWN) {
+ ret = -EINVAL;
+ goto unlock_ret;
+ }
+
+ desc = cpdma_desc_alloc(ctlr->pool, 1);
+ if (!desc) {
+ chan->stats.desc_alloc_fail++;
+ ret = -ENOMEM;
+ goto unlock_ret;
+ }
+
+ if (len < ctlr->params.min_packet_size) {
+ len = ctlr->params.min_packet_size;
+ chan->stats.runt_transmit_buff++;
+ }
+
+ buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
+ mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
+
+ desc_write(desc, hw_next, 0);
+ desc_write(desc, hw_buffer, buffer);
+ desc_write(desc, hw_len, len);
+ desc_write(desc, hw_mode, mode | len);
+ desc_write(desc, sw_token, token);
+ desc_write(desc, sw_buffer, buffer);
+ desc_write(desc, sw_len, len);
+
+ __cpdma_chan_submit(chan, desc);
+
+ if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
+ chan_write(chan, rxfree, 1);
+
+ chan->count++;
+
+unlock_ret:
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return ret;
+}
+
+static void __cpdma_chan_free(struct cpdma_chan *chan,
+ struct cpdma_desc __iomem *desc,
+ int outlen, int status)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ dma_addr_t buff_dma;
+ int origlen;
+ void *token;
+
+ token = (void *)desc_read(desc, sw_token);
+ buff_dma = desc_read(desc, sw_buffer);
+ origlen = desc_read(desc, sw_len);
+
+ dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
+ cpdma_desc_free(pool, desc, 1);
+ (*chan->handler)(token, outlen, status);
+}
+
+static int __cpdma_chan_process(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc __iomem *desc;
+ int status, outlen;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ dma_addr_t desc_dma;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ desc = chan->head;
+ if (!desc) {
+ chan->stats.empty_dequeue++;
+ status = -ENOENT;
+ goto unlock_ret;
+ }
+ desc_dma = desc_phys(pool, desc);
+
+ status = __raw_readl(&desc->hw_mode);
+ outlen = status & 0x7ff;
+ if (status & CPDMA_DESC_OWNER) {
+ chan->stats.busy_dequeue++;
+ status = -EBUSY;
+ goto unlock_ret;
+ }
+ status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE);
+
+ chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
+ chan_write(chan, cp, desc_dma);
+ chan->count--;
+ chan->stats.good_dequeue++;
+
+ if (status & CPDMA_DESC_EOQ) {
+ chan->stats.requeue++;
+ chan_write(chan, hdp, desc_phys(pool, chan->head));
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ __cpdma_chan_free(chan, desc, outlen, status);
+ return status;
+
+unlock_ret:
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return status;
+}
+
+int cpdma_chan_process(struct cpdma_chan *chan, int quota)
+{
+ int used = 0, ret = 0;
+
+ if (chan->state != CPDMA_STATE_ACTIVE)
+ return -EINVAL;
+
+ while (used < quota) {
+ ret = __cpdma_chan_process(chan);
+ if (ret < 0)
+ break;
+ used++;
+ }
+ return used;
+}
+
+int cpdma_chan_start(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_IDLE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EBUSY;
+ }
+ if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+ dma_reg_write(ctlr, chan->int_set, chan->mask);
+ chan->state = CPDMA_STATE_ACTIVE;
+ if (chan->head) {
+ chan_write(chan, hdp, desc_phys(pool, chan->head));
+ if (chan->rxfree)
+ chan_write(chan, rxfree, chan->count);
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+int cpdma_chan_stop(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ unsigned long flags;
+ int ret;
+ unsigned long timeout;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ chan->state = CPDMA_STATE_TEARDOWN;
+ dma_reg_write(ctlr, chan->int_clear, chan->mask);
+
+ /* trigger teardown */
+ dma_reg_write(ctlr, chan->td, chan->chan_num);
+
+ /* wait for teardown complete */
+ timeout = jiffies + HZ/10; /* 100 msec */
+ while (time_before(jiffies, timeout)) {
+ u32 cp = chan_read(chan, cp);
+ if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
+ break;
+ cpu_relax();
+ }
+ WARN_ON(!time_before(jiffies, timeout));
+ chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
+
+ /* handle completed packets */
+ do {
+ ret = __cpdma_chan_process(chan);
+ if (ret < 0)
+ break;
+ } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
+
+ /* remaining packets haven't been tx/rx'ed, clean them up */
+ while (chan->head) {
+ struct cpdma_desc __iomem *desc = chan->head;
+ dma_addr_t next_dma;
+
+ next_dma = desc_read(desc, hw_next);
+ chan->head = desc_from_phys(pool, next_dma);
+ chan->stats.teardown_dequeue++;
+
+ /* issue callback without locks held */
+ spin_unlock_irqrestore(&chan->lock, flags);
+ __cpdma_chan_free(chan, desc, 0, -ENOSYS);
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+
+ chan->state = CPDMA_STATE_IDLE;
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
+ chan->mask);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return 0;
+}
+
+struct cpdma_control_info {
+ u32 reg;
+ u32 shift, mask;
+ int access;
+#define ACCESS_RO BIT(0)
+#define ACCESS_WO BIT(1)
+#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
+};
+
+struct cpdma_control_info controls[] = {
+ [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
+ [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
+ [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
+ [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
+ [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
+ [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
+ [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
+ [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
+ [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
+ [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
+ [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
+};
+
+int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
+{
+ unsigned long flags;
+ struct cpdma_control_info *info = &controls[control];
+ int ret;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ ret = -ENOTSUPP;
+ if (!ctlr->params.has_ext_regs)
+ goto unlock_ret;
+
+ ret = -EINVAL;
+ if (ctlr->state != CPDMA_STATE_ACTIVE)
+ goto unlock_ret;
+
+ ret = -ENOENT;
+ if (control < 0 || control >= ARRAY_SIZE(controls))
+ goto unlock_ret;
+
+ ret = -EPERM;
+ if ((info->access & ACCESS_RO) != ACCESS_RO)
+ goto unlock_ret;
+
+ ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
+
+unlock_ret:
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return ret;
+}
+
+int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
+{
+ unsigned long flags;
+ struct cpdma_control_info *info = &controls[control];
+ int ret;
+ u32 val;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ ret = -ENOTSUPP;
+ if (!ctlr->params.has_ext_regs)
+ goto unlock_ret;
+
+ ret = -EINVAL;
+ if (ctlr->state != CPDMA_STATE_ACTIVE)
+ goto unlock_ret;
+
+ ret = -ENOENT;
+ if (control < 0 || control >= ARRAY_SIZE(controls))
+ goto unlock_ret;
+
+ ret = -EPERM;
+ if ((info->access & ACCESS_WO) != ACCESS_WO)
+ goto unlock_ret;
+
+ val = dma_reg_read(ctlr, info->reg);
+ val &= ~(info->mask << info->shift);
+ val |= (value & info->mask) << info->shift;
+ dma_reg_write(ctlr, info->reg, val);
+ ret = 0;
+
+unlock_ret:
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return ret;
+}
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
new file mode 100644
index 000000000000..afa19a0c0d81
--- /dev/null
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -0,0 +1,109 @@
+/*
+ * Texas Instruments CPDMA Driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DAVINCI_CPDMA_H__
+#define __DAVINCI_CPDMA_H__
+
+#define CPDMA_MAX_CHANNELS BITS_PER_LONG
+
+#define tx_chan_num(chan) (chan)
+#define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
+#define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
+#define is_tx_chan(chan) (!is_rx_chan(chan))
+#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
+#define chan_linear(chan) __chan_linear((chan)->chan_num)
+
+struct cpdma_params {
+ struct device *dev;
+ void __iomem *dmaregs;
+ void __iomem *txhdp, *rxhdp, *txcp, *rxcp;
+ void __iomem *rxthresh, *rxfree;
+ int num_chan;
+ bool has_soft_reset;
+ int min_packet_size;
+ u32 desc_mem_phys;
+ u32 desc_hw_addr;
+ int desc_mem_size;
+ int desc_align;
+
+ /*
+ * Some instances of embedded cpdma controllers have extra control and
+ * status registers. The following flag enables access to these
+ * "extended" registers.
+ */
+ bool has_ext_regs;
+};
+
+struct cpdma_chan_stats {
+ u32 head_enqueue;
+ u32 tail_enqueue;
+ u32 pad_enqueue;
+ u32 misqueued;
+ u32 desc_alloc_fail;
+ u32 pad_alloc_fail;
+ u32 runt_receive_buff;
+ u32 runt_transmit_buff;
+ u32 empty_dequeue;
+ u32 busy_dequeue;
+ u32 good_dequeue;
+ u32 requeue;
+ u32 teardown_dequeue;
+};
+
+struct cpdma_ctlr;
+struct cpdma_chan;
+
+typedef void (*cpdma_handler_fn)(void *token, int len, int status);
+
+struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params);
+int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr);
+int cpdma_ctlr_start(struct cpdma_ctlr *ctlr);
+int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr);
+int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr);
+
+struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
+ cpdma_handler_fn handler);
+int cpdma_chan_destroy(struct cpdma_chan *chan);
+int cpdma_chan_start(struct cpdma_chan *chan);
+int cpdma_chan_stop(struct cpdma_chan *chan);
+int cpdma_chan_dump(struct cpdma_chan *chan);
+
+int cpdma_chan_get_stats(struct cpdma_chan *chan,
+ struct cpdma_chan_stats *stats);
+int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+ int len, gfp_t gfp_mask);
+int cpdma_chan_process(struct cpdma_chan *chan, int quota);
+
+int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr);
+int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
+
+enum cpdma_control {
+ CPDMA_CMD_IDLE, /* write-only */
+ CPDMA_COPY_ERROR_FRAMES, /* read-write */
+ CPDMA_RX_OFF_LEN_UPDATE, /* read-write */
+ CPDMA_RX_OWNERSHIP_FLIP, /* read-write */
+ CPDMA_TX_PRIO_FIXED, /* read-write */
+ CPDMA_STAT_IDLE, /* read-only */
+ CPDMA_STAT_TX_ERR_CHAN, /* read-only */
+ CPDMA_STAT_TX_ERR_CODE, /* read-only */
+ CPDMA_STAT_RX_ERR_CHAN, /* read-only */
+ CPDMA_STAT_RX_ERR_CODE, /* read-only */
+ CPDMA_RX_BUFFER_OFFSET, /* read-write */
+};
+
+int cpdma_control_get(struct cpdma_ctlr *ctlr, int control);
+int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value);
+
+#endif
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
new file mode 100644
index 000000000000..815c7970261b
--- /dev/null
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -0,0 +1,2047 @@
+/*
+ * DaVinci Ethernet Medium Access Controller
+ *
+ * DaVinci EMAC is based upon CPPI 3.0 TI DMA engine
+ *
+ * Copyright (C) 2009 Texas Instruments.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ---------------------------------------------------------------------------
+ * History:
+ * 0-5 A number of folks worked on this driver in bits and pieces but the major
+ * contribution came from Suraj Iyer and Anant Gole
+ * 6.0 Anant Gole - rewrote the driver as per Linux conventions
+ * 6.1 Chaithrika U S - added support for Gigabit and RMII features,
+ * PHY layer usage
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/highmem.h>
+#include <linux/proc_fs.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/semaphore.h>
+#include <linux/phy.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/davinci_emac.h>
+
+#include <asm/irq.h>
+#include <asm/page.h>
+
+#include "davinci_cpdma.h"
+
+static int debug_level;
+module_param(debug_level, int, 0);
+MODULE_PARM_DESC(debug_level, "DaVinci EMAC debug level (NETIF_MSG bits)");
+
+/* Netif debug messages possible */
+#define DAVINCI_EMAC_DEBUG (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR | \
+ NETIF_MSG_TX_QUEUED | \
+ NETIF_MSG_INTR | \
+ NETIF_MSG_TX_DONE | \
+ NETIF_MSG_RX_STATUS | \
+ NETIF_MSG_PKTDATA | \
+ NETIF_MSG_HW | \
+ NETIF_MSG_WOL)
+
+/* version info */
+#define EMAC_MAJOR_VERSION 6
+#define EMAC_MINOR_VERSION 1
+#define EMAC_MODULE_VERSION "6.1"
+MODULE_VERSION(EMAC_MODULE_VERSION);
+static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
+
+/* Configuration items */
+#define EMAC_DEF_PASS_CRC (0) /* Do not pass CRC up to frames */
+#define EMAC_DEF_QOS_EN (0) /* EMAC proprietary QoS disabled */
+#define EMAC_DEF_NO_BUFF_CHAIN (0) /* No buffer chain */
+#define EMAC_DEF_MACCTRL_FRAME_EN (0) /* Discard Maccontrol frames */
+#define EMAC_DEF_SHORT_FRAME_EN (0) /* Discard short frames */
+#define EMAC_DEF_ERROR_FRAME_EN (0) /* Discard error frames */
+#define EMAC_DEF_PROM_EN (0) /* Promiscuous disabled */
+#define EMAC_DEF_PROM_CH (0) /* Promiscuous channel is 0 */
+#define EMAC_DEF_BCAST_EN (1) /* Broadcast enabled */
+#define EMAC_DEF_BCAST_CH (0) /* Broadcast channel is 0 */
+#define EMAC_DEF_MCAST_EN (1) /* Multicast enabled */
+#define EMAC_DEF_MCAST_CH (0) /* Multicast channel is 0 */
+
+#define EMAC_DEF_TXPRIO_FIXED (1) /* TX Priority is fixed */
+#define EMAC_DEF_TXPACING_EN (0) /* TX pacing NOT supported*/
+
+#define EMAC_DEF_BUFFER_OFFSET (0) /* Buffer offset to DMA (future) */
+#define EMAC_DEF_MIN_ETHPKTSIZE (60) /* Minimum ethernet pkt size */
+#define EMAC_DEF_MAX_FRAME_SIZE (1500 + 14 + 4 + 4)
+#define EMAC_DEF_TX_CH (0) /* Default 0th channel */
+#define EMAC_DEF_RX_CH (0) /* Default 0th channel */
+#define EMAC_DEF_RX_NUM_DESC (128)
+#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
+#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
+#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
+
+/* Buffer descriptor parameters */
+#define EMAC_DEF_TX_MAX_SERVICE (32) /* TX max service BD's */
+#define EMAC_DEF_RX_MAX_SERVICE (64) /* should = netdev->weight */
+
+/* EMAC register related defines */
+#define EMAC_ALL_MULTI_REG_VALUE (0xFFFFFFFF)
+#define EMAC_NUM_MULTICAST_BITS (64)
+#define EMAC_TX_CONTROL_TX_ENABLE_VAL (0x1)
+#define EMAC_RX_CONTROL_RX_ENABLE_VAL (0x1)
+#define EMAC_MAC_HOST_ERR_INTMASK_VAL (0x2)
+#define EMAC_RX_UNICAST_CLEAR_ALL (0xFF)
+#define EMAC_INT_MASK_CLEAR (0xFF)
+
+/* RX MBP register bit positions */
+#define EMAC_RXMBP_PASSCRC_MASK BIT(30)
+#define EMAC_RXMBP_QOSEN_MASK BIT(29)
+#define EMAC_RXMBP_NOCHAIN_MASK BIT(28)
+#define EMAC_RXMBP_CMFEN_MASK BIT(24)
+#define EMAC_RXMBP_CSFEN_MASK BIT(23)
+#define EMAC_RXMBP_CEFEN_MASK BIT(22)
+#define EMAC_RXMBP_CAFEN_MASK BIT(21)
+#define EMAC_RXMBP_PROMCH_SHIFT (16)
+#define EMAC_RXMBP_PROMCH_MASK (0x7 << 16)
+#define EMAC_RXMBP_BROADEN_MASK BIT(13)
+#define EMAC_RXMBP_BROADCH_SHIFT (8)
+#define EMAC_RXMBP_BROADCH_MASK (0x7 << 8)
+#define EMAC_RXMBP_MULTIEN_MASK BIT(5)
+#define EMAC_RXMBP_MULTICH_SHIFT (0)
+#define EMAC_RXMBP_MULTICH_MASK (0x7)
+#define EMAC_RXMBP_CHMASK (0x7)
+
+/* EMAC register definitions/bit maps used */
+# define EMAC_MBP_RXPROMISC (0x00200000)
+# define EMAC_MBP_PROMISCCH(ch) (((ch) & 0x7) << 16)
+# define EMAC_MBP_RXBCAST (0x00002000)
+# define EMAC_MBP_BCASTCHAN(ch) (((ch) & 0x7) << 8)
+# define EMAC_MBP_RXMCAST (0x00000020)
+# define EMAC_MBP_MCASTCHAN(ch) ((ch) & 0x7)
+
+/* EMAC mac_control register */
+#define EMAC_MACCONTROL_TXPTYPE BIT(9)
+#define EMAC_MACCONTROL_TXPACEEN BIT(6)
+#define EMAC_MACCONTROL_GMIIEN BIT(5)
+#define EMAC_MACCONTROL_GIGABITEN BIT(7)
+#define EMAC_MACCONTROL_FULLDUPLEXEN BIT(0)
+#define EMAC_MACCONTROL_RMIISPEED_MASK BIT(15)
+
+/* GIGABIT MODE related bits */
+#define EMAC_DM646X_MACCONTORL_GIG BIT(7)
+#define EMAC_DM646X_MACCONTORL_GIGFORCE BIT(17)
+
+/* EMAC mac_status register */
+#define EMAC_MACSTATUS_TXERRCODE_MASK (0xF00000)
+#define EMAC_MACSTATUS_TXERRCODE_SHIFT (20)
+#define EMAC_MACSTATUS_TXERRCH_MASK (0x7)
+#define EMAC_MACSTATUS_TXERRCH_SHIFT (16)
+#define EMAC_MACSTATUS_RXERRCODE_MASK (0xF000)
+#define EMAC_MACSTATUS_RXERRCODE_SHIFT (12)
+#define EMAC_MACSTATUS_RXERRCH_MASK (0x7)
+#define EMAC_MACSTATUS_RXERRCH_SHIFT (8)
+
+/* EMAC RX register masks */
+#define EMAC_RX_MAX_LEN_MASK (0xFFFF)
+#define EMAC_RX_BUFFER_OFFSET_MASK (0xFFFF)
+
+/* MAC_IN_VECTOR (0x180) register bit fields */
+#define EMAC_DM644X_MAC_IN_VECTOR_HOST_INT BIT(17)
+#define EMAC_DM644X_MAC_IN_VECTOR_STATPEND_INT BIT(16)
+#define EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC BIT(8)
+#define EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC BIT(0)
+
+/** NOTE:: For DM646x the IN_VECTOR has changed */
+#define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC BIT(EMAC_DEF_RX_CH)
+#define EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC BIT(16 + EMAC_DEF_TX_CH)
+#define EMAC_DM646X_MAC_IN_VECTOR_HOST_INT BIT(26)
+#define EMAC_DM646X_MAC_IN_VECTOR_STATPEND_INT BIT(27)
+
+/* CPPI bit positions */
+#define EMAC_CPPI_SOP_BIT BIT(31)
+#define EMAC_CPPI_EOP_BIT BIT(30)
+#define EMAC_CPPI_OWNERSHIP_BIT BIT(29)
+#define EMAC_CPPI_EOQ_BIT BIT(28)
+#define EMAC_CPPI_TEARDOWN_COMPLETE_BIT BIT(27)
+#define EMAC_CPPI_PASS_CRC_BIT BIT(26)
+#define EMAC_RX_BD_BUF_SIZE (0xFFFF)
+#define EMAC_BD_LENGTH_FOR_CACHE (16) /* only CPPI bytes */
+#define EMAC_RX_BD_PKT_LENGTH_MASK (0xFFFF)
+
+/* Max hardware defines */
+#define EMAC_MAX_TXRX_CHANNELS (8) /* Max hardware channels */
+#define EMAC_DEF_MAX_MULTICAST_ADDRESSES (64) /* Max mcast addr's */
+
+/* EMAC Peripheral Device Register Memory Layout structure */
+#define EMAC_MACINVECTOR 0x90
+
+#define EMAC_DM646X_MACEOIVECTOR 0x94
+
+#define EMAC_MACINTSTATRAW 0xB0
+#define EMAC_MACINTSTATMASKED 0xB4
+#define EMAC_MACINTMASKSET 0xB8
+#define EMAC_MACINTMASKCLEAR 0xBC
+
+#define EMAC_RXMBPENABLE 0x100
+#define EMAC_RXUNICASTSET 0x104
+#define EMAC_RXUNICASTCLEAR 0x108
+#define EMAC_RXMAXLEN 0x10C
+#define EMAC_RXBUFFEROFFSET 0x110
+#define EMAC_RXFILTERLOWTHRESH 0x114
+
+#define EMAC_MACCONTROL 0x160
+#define EMAC_MACSTATUS 0x164
+#define EMAC_EMCONTROL 0x168
+#define EMAC_FIFOCONTROL 0x16C
+#define EMAC_MACCONFIG 0x170
+#define EMAC_SOFTRESET 0x174
+#define EMAC_MACSRCADDRLO 0x1D0
+#define EMAC_MACSRCADDRHI 0x1D4
+#define EMAC_MACHASH1 0x1D8
+#define EMAC_MACHASH2 0x1DC
+#define EMAC_MACADDRLO 0x500
+#define EMAC_MACADDRHI 0x504
+#define EMAC_MACINDEX 0x508
+
+/* EMAC statistics registers */
+#define EMAC_RXGOODFRAMES 0x200
+#define EMAC_RXBCASTFRAMES 0x204
+#define EMAC_RXMCASTFRAMES 0x208
+#define EMAC_RXPAUSEFRAMES 0x20C
+#define EMAC_RXCRCERRORS 0x210
+#define EMAC_RXALIGNCODEERRORS 0x214
+#define EMAC_RXOVERSIZED 0x218
+#define EMAC_RXJABBER 0x21C
+#define EMAC_RXUNDERSIZED 0x220
+#define EMAC_RXFRAGMENTS 0x224
+#define EMAC_RXFILTERED 0x228
+#define EMAC_RXQOSFILTERED 0x22C
+#define EMAC_RXOCTETS 0x230
+#define EMAC_TXGOODFRAMES 0x234
+#define EMAC_TXBCASTFRAMES 0x238
+#define EMAC_TXMCASTFRAMES 0x23C
+#define EMAC_TXPAUSEFRAMES 0x240
+#define EMAC_TXDEFERRED 0x244
+#define EMAC_TXCOLLISION 0x248
+#define EMAC_TXSINGLECOLL 0x24C
+#define EMAC_TXMULTICOLL 0x250
+#define EMAC_TXEXCESSIVECOLL 0x254
+#define EMAC_TXLATECOLL 0x258
+#define EMAC_TXUNDERRUN 0x25C
+#define EMAC_TXCARRIERSENSE 0x260
+#define EMAC_TXOCTETS 0x264
+#define EMAC_NETOCTETS 0x280
+#define EMAC_RXSOFOVERRUNS 0x284
+#define EMAC_RXMOFOVERRUNS 0x288
+#define EMAC_RXDMAOVERRUNS 0x28C
+
+/* EMAC DM644x control registers */
+#define EMAC_CTRL_EWCTL (0x4)
+#define EMAC_CTRL_EWINTTCNT (0x8)
+
+/* EMAC DM644x control module masks */
+#define EMAC_DM644X_EWINTCNT_MASK 0x1FFFF
+#define EMAC_DM644X_INTMIN_INTVL 0x1
+#define EMAC_DM644X_INTMAX_INTVL (EMAC_DM644X_EWINTCNT_MASK)
+
+/* EMAC DM646X control module registers */
+#define EMAC_DM646X_CMINTCTRL 0x0C
+#define EMAC_DM646X_CMRXINTEN 0x14
+#define EMAC_DM646X_CMTXINTEN 0x18
+#define EMAC_DM646X_CMRXINTMAX 0x70
+#define EMAC_DM646X_CMTXINTMAX 0x74
+
+/* EMAC DM646X control module masks */
+#define EMAC_DM646X_INTPACEEN (0x3 << 16)
+#define EMAC_DM646X_INTPRESCALE_MASK (0x7FF << 0)
+#define EMAC_DM646X_CMINTMAX_CNT 63
+#define EMAC_DM646X_CMINTMIN_CNT 2
+#define EMAC_DM646X_CMINTMAX_INTVL (1000 / EMAC_DM646X_CMINTMIN_CNT)
+#define EMAC_DM646X_CMINTMIN_INTVL ((1000 / EMAC_DM646X_CMINTMAX_CNT) + 1)
+
+
+/* EMAC EOI codes for C0 */
+#define EMAC_DM646X_MAC_EOI_C0_RXEN (0x01)
+#define EMAC_DM646X_MAC_EOI_C0_TXEN (0x02)
+
+/* EMAC Stats Clear Mask */
+#define EMAC_STATS_CLR_MASK (0xFFFFFFFF)
+
+/* emac_priv: EMAC private data structure
+ *
+ * EMAC adapter private data structure
+ */
+struct emac_priv {
+ u32 msg_enable;
+ struct net_device *ndev;
+ struct platform_device *pdev;
+ struct napi_struct napi;
+ char mac_addr[6];
+ void __iomem *remap_addr;
+ u32 emac_base_phys;
+ void __iomem *emac_base;
+ void __iomem *ctrl_base;
+ struct cpdma_ctlr *dma;
+ struct cpdma_chan *txchan;
+ struct cpdma_chan *rxchan;
+ u32 link; /* 1=link on, 0=link off */
+ u32 speed; /* 0=Auto Neg, 1=No PHY, 10,100, 1000 - mbps */
+ u32 duplex; /* Link duplex: 0=Half, 1=Full */
+ u32 rx_buf_size;
+ u32 isr_count;
+ u32 coal_intvl;
+ u32 bus_freq_mhz;
+ u8 rmii_en;
+ u8 version;
+ u32 mac_hash1;
+ u32 mac_hash2;
+ u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
+ u32 rx_addr_type;
+ const char *phy_id;
+ struct phy_device *phydev;
+ spinlock_t lock;
+ /*platform specific members*/
+ void (*int_enable) (void);
+ void (*int_disable) (void);
+};
+
+/* clock frequency for EMAC */
+static struct clk *emac_clk;
+static unsigned long emac_bus_frequency;
+
+/* EMAC TX Host Error description strings */
+static char *emac_txhost_errcodes[16] = {
+ "No error", "SOP error", "Ownership bit not set in SOP buffer",
+ "Zero Next Buffer Descriptor Pointer Without EOP",
+ "Zero Buffer Pointer", "Zero Buffer Length", "Packet Length Error",
+ "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
+ "Reserved", "Reserved", "Reserved", "Reserved"
+};
+
+/* EMAC RX Host Error description strings */
+static char *emac_rxhost_errcodes[16] = {
+ "No error", "Reserved", "Ownership bit not set in input buffer",
+ "Reserved", "Zero Buffer Pointer", "Reserved", "Reserved",
+ "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
+ "Reserved", "Reserved", "Reserved", "Reserved"
+};
+
+/* Helper macros */
+#define emac_read(reg) ioread32(priv->emac_base + (reg))
+#define emac_write(reg, val) iowrite32(val, priv->emac_base + (reg))
+
+#define emac_ctrl_read(reg) ioread32((priv->ctrl_base + (reg)))
+#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg)))
+
+/**
+ * emac_dump_regs: Dump important EMAC registers to debug terminal
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Executes ethtool set cmd & sets phy mode
+ *
+ */
+static void emac_dump_regs(struct emac_priv *priv)
+{
+ struct device *emac_dev = &priv->ndev->dev;
+
+ /* Print important registers in EMAC */
+ dev_info(emac_dev, "EMAC Basic registers\n");
+ if (priv->version == EMAC_VERSION_1) {
+ dev_info(emac_dev, "EMAC: EWCTL: %08X, EWINTTCNT: %08X\n",
+ emac_ctrl_read(EMAC_CTRL_EWCTL),
+ emac_ctrl_read(EMAC_CTRL_EWINTTCNT));
+ }
+ dev_info(emac_dev, "EMAC: EmuControl:%08X, FifoControl: %08X\n",
+ emac_read(EMAC_EMCONTROL), emac_read(EMAC_FIFOCONTROL));
+ dev_info(emac_dev, "EMAC: MBPEnable:%08X, RXUnicastSet: %08X, "\
+ "RXMaxLen=%08X\n", emac_read(EMAC_RXMBPENABLE),
+ emac_read(EMAC_RXUNICASTSET), emac_read(EMAC_RXMAXLEN));
+ dev_info(emac_dev, "EMAC: MacControl:%08X, MacStatus: %08X, "\
+ "MacConfig=%08X\n", emac_read(EMAC_MACCONTROL),
+ emac_read(EMAC_MACSTATUS), emac_read(EMAC_MACCONFIG));
+ dev_info(emac_dev, "EMAC Statistics\n");
+ dev_info(emac_dev, "EMAC: rx_good_frames:%d\n",
+ emac_read(EMAC_RXGOODFRAMES));
+ dev_info(emac_dev, "EMAC: rx_broadcast_frames:%d\n",
+ emac_read(EMAC_RXBCASTFRAMES));
+ dev_info(emac_dev, "EMAC: rx_multicast_frames:%d\n",
+ emac_read(EMAC_RXMCASTFRAMES));
+ dev_info(emac_dev, "EMAC: rx_pause_frames:%d\n",
+ emac_read(EMAC_RXPAUSEFRAMES));
+ dev_info(emac_dev, "EMAC: rx_crcerrors:%d\n",
+ emac_read(EMAC_RXCRCERRORS));
+ dev_info(emac_dev, "EMAC: rx_align_code_errors:%d\n",
+ emac_read(EMAC_RXALIGNCODEERRORS));
+ dev_info(emac_dev, "EMAC: rx_oversized_frames:%d\n",
+ emac_read(EMAC_RXOVERSIZED));
+ dev_info(emac_dev, "EMAC: rx_jabber_frames:%d\n",
+ emac_read(EMAC_RXJABBER));
+ dev_info(emac_dev, "EMAC: rx_undersized_frames:%d\n",
+ emac_read(EMAC_RXUNDERSIZED));
+ dev_info(emac_dev, "EMAC: rx_fragments:%d\n",
+ emac_read(EMAC_RXFRAGMENTS));
+ dev_info(emac_dev, "EMAC: rx_filtered_frames:%d\n",
+ emac_read(EMAC_RXFILTERED));
+ dev_info(emac_dev, "EMAC: rx_qos_filtered_frames:%d\n",
+ emac_read(EMAC_RXQOSFILTERED));
+ dev_info(emac_dev, "EMAC: rx_octets:%d\n",
+ emac_read(EMAC_RXOCTETS));
+ dev_info(emac_dev, "EMAC: tx_goodframes:%d\n",
+ emac_read(EMAC_TXGOODFRAMES));
+ dev_info(emac_dev, "EMAC: tx_bcastframes:%d\n",
+ emac_read(EMAC_TXBCASTFRAMES));
+ dev_info(emac_dev, "EMAC: tx_mcastframes:%d\n",
+ emac_read(EMAC_TXMCASTFRAMES));
+ dev_info(emac_dev, "EMAC: tx_pause_frames:%d\n",
+ emac_read(EMAC_TXPAUSEFRAMES));
+ dev_info(emac_dev, "EMAC: tx_deferred_frames:%d\n",
+ emac_read(EMAC_TXDEFERRED));
+ dev_info(emac_dev, "EMAC: tx_collision_frames:%d\n",
+ emac_read(EMAC_TXCOLLISION));
+ dev_info(emac_dev, "EMAC: tx_single_coll_frames:%d\n",
+ emac_read(EMAC_TXSINGLECOLL));
+ dev_info(emac_dev, "EMAC: tx_mult_coll_frames:%d\n",
+ emac_read(EMAC_TXMULTICOLL));
+ dev_info(emac_dev, "EMAC: tx_excessive_collisions:%d\n",
+ emac_read(EMAC_TXEXCESSIVECOLL));
+ dev_info(emac_dev, "EMAC: tx_late_collisions:%d\n",
+ emac_read(EMAC_TXLATECOLL));
+ dev_info(emac_dev, "EMAC: tx_underrun:%d\n",
+ emac_read(EMAC_TXUNDERRUN));
+ dev_info(emac_dev, "EMAC: tx_carrier_sense_errors:%d\n",
+ emac_read(EMAC_TXCARRIERSENSE));
+ dev_info(emac_dev, "EMAC: tx_octets:%d\n",
+ emac_read(EMAC_TXOCTETS));
+ dev_info(emac_dev, "EMAC: net_octets:%d\n",
+ emac_read(EMAC_NETOCTETS));
+ dev_info(emac_dev, "EMAC: rx_sof_overruns:%d\n",
+ emac_read(EMAC_RXSOFOVERRUNS));
+ dev_info(emac_dev, "EMAC: rx_mof_overruns:%d\n",
+ emac_read(EMAC_RXMOFOVERRUNS));
+ dev_info(emac_dev, "EMAC: rx_dma_overruns:%d\n",
+ emac_read(EMAC_RXDMAOVERRUNS));
+
+ cpdma_ctlr_dump(priv->dma);
+}
+
+/**
+ * emac_get_drvinfo: Get EMAC driver information
+ * @ndev: The DaVinci EMAC network adapter
+ * @info: ethtool info structure containing name and version
+ *
+ * Returns EMAC driver information (name and version)
+ *
+ */
+static void emac_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, emac_version_string);
+ strcpy(info->version, EMAC_MODULE_VERSION);
+}
+
+/**
+ * emac_get_settings: Get EMAC settings
+ * @ndev: The DaVinci EMAC network adapter
+ * @ecmd: ethtool command
+ *
+ * Executes ethool get command
+ *
+ */
+static int emac_get_settings(struct net_device *ndev,
+ struct ethtool_cmd *ecmd)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ if (priv->phydev)
+ return phy_ethtool_gset(priv->phydev, ecmd);
+ else
+ return -EOPNOTSUPP;
+
+}
+
+/**
+ * emac_set_settings: Set EMAC settings
+ * @ndev: The DaVinci EMAC network adapter
+ * @ecmd: ethtool command
+ *
+ * Executes ethool set command
+ *
+ */
+static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ if (priv->phydev)
+ return phy_ethtool_sset(priv->phydev, ecmd);
+ else
+ return -EOPNOTSUPP;
+
+}
+
+/**
+ * emac_get_coalesce : Get interrupt coalesce settings for this device
+ * @ndev : The DaVinci EMAC network adapter
+ * @coal : ethtool coalesce settings structure
+ *
+ * Fetch the current interrupt coalesce settings
+ *
+ */
+static int emac_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ coal->rx_coalesce_usecs = priv->coal_intvl;
+ return 0;
+
+}
+
+/**
+ * emac_set_coalesce : Set interrupt coalesce settings for this device
+ * @ndev : The DaVinci EMAC network adapter
+ * @coal : ethtool coalesce settings structure
+ *
+ * Set interrupt coalesce parameters
+ *
+ */
+static int emac_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ u32 int_ctrl, num_interrupts = 0;
+ u32 prescale = 0, addnl_dvdr = 1, coal_intvl = 0;
+
+ if (!coal->rx_coalesce_usecs)
+ return -EINVAL;
+
+ coal_intvl = coal->rx_coalesce_usecs;
+
+ switch (priv->version) {
+ case EMAC_VERSION_2:
+ int_ctrl = emac_ctrl_read(EMAC_DM646X_CMINTCTRL);
+ prescale = priv->bus_freq_mhz * 4;
+
+ if (coal_intvl < EMAC_DM646X_CMINTMIN_INTVL)
+ coal_intvl = EMAC_DM646X_CMINTMIN_INTVL;
+
+ if (coal_intvl > EMAC_DM646X_CMINTMAX_INTVL) {
+ /*
+ * Interrupt pacer works with 4us Pulse, we can
+ * throttle further by dilating the 4us pulse.
+ */
+ addnl_dvdr = EMAC_DM646X_INTPRESCALE_MASK / prescale;
+
+ if (addnl_dvdr > 1) {
+ prescale *= addnl_dvdr;
+ if (coal_intvl > (EMAC_DM646X_CMINTMAX_INTVL
+ * addnl_dvdr))
+ coal_intvl = (EMAC_DM646X_CMINTMAX_INTVL
+ * addnl_dvdr);
+ } else {
+ addnl_dvdr = 1;
+ coal_intvl = EMAC_DM646X_CMINTMAX_INTVL;
+ }
+ }
+
+ num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
+
+ int_ctrl |= EMAC_DM646X_INTPACEEN;
+ int_ctrl &= (~EMAC_DM646X_INTPRESCALE_MASK);
+ int_ctrl |= (prescale & EMAC_DM646X_INTPRESCALE_MASK);
+ emac_ctrl_write(EMAC_DM646X_CMINTCTRL, int_ctrl);
+
+ emac_ctrl_write(EMAC_DM646X_CMRXINTMAX, num_interrupts);
+ emac_ctrl_write(EMAC_DM646X_CMTXINTMAX, num_interrupts);
+
+ break;
+ default:
+ int_ctrl = emac_ctrl_read(EMAC_CTRL_EWINTTCNT);
+ int_ctrl &= (~EMAC_DM644X_EWINTCNT_MASK);
+ prescale = coal_intvl * priv->bus_freq_mhz;
+ if (prescale > EMAC_DM644X_EWINTCNT_MASK) {
+ prescale = EMAC_DM644X_EWINTCNT_MASK;
+ coal_intvl = prescale / priv->bus_freq_mhz;
+ }
+ emac_ctrl_write(EMAC_CTRL_EWINTTCNT, (int_ctrl | prescale));
+
+ break;
+ }
+
+ printk(KERN_INFO"Set coalesce to %d usecs.\n", coal_intvl);
+ priv->coal_intvl = coal_intvl;
+
+ return 0;
+
+}
+
+
+/**
+ * ethtool_ops: DaVinci EMAC Ethtool structure
+ *
+ * Ethtool support for EMAC adapter
+ *
+ */
+static const struct ethtool_ops ethtool_ops = {
+ .get_drvinfo = emac_get_drvinfo,
+ .get_settings = emac_get_settings,
+ .set_settings = emac_set_settings,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = emac_get_coalesce,
+ .set_coalesce = emac_set_coalesce,
+};
+
+/**
+ * emac_update_phystatus: Update Phy status
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Updates phy status and takes action for network queue if required
+ * based upon link status
+ *
+ */
+static void emac_update_phystatus(struct emac_priv *priv)
+{
+ u32 mac_control;
+ u32 new_duplex;
+ u32 cur_duplex;
+ struct net_device *ndev = priv->ndev;
+
+ mac_control = emac_read(EMAC_MACCONTROL);
+ cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ if (priv->phydev)
+ new_duplex = priv->phydev->duplex;
+ else
+ new_duplex = DUPLEX_FULL;
+
+ /* We get called only if link has changed (speed/duplex/status) */
+ if ((priv->link) && (new_duplex != cur_duplex)) {
+ priv->duplex = new_duplex;
+ if (DUPLEX_FULL == priv->duplex)
+ mac_control |= (EMAC_MACCONTROL_FULLDUPLEXEN);
+ else
+ mac_control &= ~(EMAC_MACCONTROL_FULLDUPLEXEN);
+ }
+
+ if (priv->speed == SPEED_1000 && (priv->version == EMAC_VERSION_2)) {
+ mac_control = emac_read(EMAC_MACCONTROL);
+ mac_control |= (EMAC_DM646X_MACCONTORL_GIG |
+ EMAC_DM646X_MACCONTORL_GIGFORCE);
+ } else {
+ /* Clear the GIG bit and GIGFORCE bit */
+ mac_control &= ~(EMAC_DM646X_MACCONTORL_GIGFORCE |
+ EMAC_DM646X_MACCONTORL_GIG);
+
+ if (priv->rmii_en && (priv->speed == SPEED_100))
+ mac_control |= EMAC_MACCONTROL_RMIISPEED_MASK;
+ else
+ mac_control &= ~EMAC_MACCONTROL_RMIISPEED_MASK;
+ }
+
+ /* Update mac_control if changed */
+ emac_write(EMAC_MACCONTROL, mac_control);
+
+ if (priv->link) {
+ /* link ON */
+ if (!netif_carrier_ok(ndev))
+ netif_carrier_on(ndev);
+ /* reactivate the transmit queue if it is stopped */
+ if (netif_running(ndev) && netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+ } else {
+ /* link OFF */
+ if (netif_carrier_ok(ndev))
+ netif_carrier_off(ndev);
+ if (!netif_queue_stopped(ndev))
+ netif_stop_queue(ndev);
+ }
+}
+
+/**
+ * hash_get: Calculate hash value from mac address
+ * @addr: mac address to delete from hash table
+ *
+ * Calculates hash value from mac address
+ *
+ */
+static u32 hash_get(u8 *addr)
+{
+ u32 hash;
+ u8 tmpval;
+ int cnt;
+ hash = 0;
+
+ for (cnt = 0; cnt < 2; cnt++) {
+ tmpval = *addr++;
+ hash ^= (tmpval >> 2) ^ (tmpval << 4);
+ tmpval = *addr++;
+ hash ^= (tmpval >> 4) ^ (tmpval << 2);
+ tmpval = *addr++;
+ hash ^= (tmpval >> 6) ^ (tmpval);
+ }
+
+ return hash & 0x3F;
+}
+
+/**
+ * hash_add: Hash function to add mac addr from hash table
+ * @priv: The DaVinci EMAC private adapter structure
+ * mac_addr: mac address to delete from hash table
+ *
+ * Adds mac address to the internal hash table
+ *
+ */
+static int hash_add(struct emac_priv *priv, u8 *mac_addr)
+{
+ struct device *emac_dev = &priv->ndev->dev;
+ u32 rc = 0;
+ u32 hash_bit;
+ u32 hash_value = hash_get(mac_addr);
+
+ if (hash_value >= EMAC_NUM_MULTICAST_BITS) {
+ if (netif_msg_drv(priv)) {
+ dev_err(emac_dev, "DaVinci EMAC: hash_add(): Invalid "\
+ "Hash %08x, should not be greater than %08x",
+ hash_value, (EMAC_NUM_MULTICAST_BITS - 1));
+ }
+ return -1;
+ }
+
+ /* set the hash bit only if not previously set */
+ if (priv->multicast_hash_cnt[hash_value] == 0) {
+ rc = 1; /* hash value changed */
+ if (hash_value < 32) {
+ hash_bit = BIT(hash_value);
+ priv->mac_hash1 |= hash_bit;
+ } else {
+ hash_bit = BIT((hash_value - 32));
+ priv->mac_hash2 |= hash_bit;
+ }
+ }
+
+ /* incr counter for num of mcast addr's mapped to "this" hash bit */
+ ++priv->multicast_hash_cnt[hash_value];
+
+ return rc;
+}
+
+/**
+ * hash_del: Hash function to delete mac addr from hash table
+ * @priv: The DaVinci EMAC private adapter structure
+ * mac_addr: mac address to delete from hash table
+ *
+ * Removes mac address from the internal hash table
+ *
+ */
+static int hash_del(struct emac_priv *priv, u8 *mac_addr)
+{
+ u32 hash_value;
+ u32 hash_bit;
+
+ hash_value = hash_get(mac_addr);
+ if (priv->multicast_hash_cnt[hash_value] > 0) {
+ /* dec cntr for num of mcast addr's mapped to this hash bit */
+ --priv->multicast_hash_cnt[hash_value];
+ }
+
+ /* if counter still > 0, at least one multicast address refers
+ * to this hash bit. so return 0 */
+ if (priv->multicast_hash_cnt[hash_value] > 0)
+ return 0;
+
+ if (hash_value < 32) {
+ hash_bit = BIT(hash_value);
+ priv->mac_hash1 &= ~hash_bit;
+ } else {
+ hash_bit = BIT((hash_value - 32));
+ priv->mac_hash2 &= ~hash_bit;
+ }
+
+ /* return 1 to indicate change in mac_hash registers reqd */
+ return 1;
+}
+
+/* EMAC multicast operation */
+#define EMAC_MULTICAST_ADD 0
+#define EMAC_MULTICAST_DEL 1
+#define EMAC_ALL_MULTI_SET 2
+#define EMAC_ALL_MULTI_CLR 3
+
+/**
+ * emac_add_mcast: Set multicast address in the EMAC adapter (Internal)
+ * @priv: The DaVinci EMAC private adapter structure
+ * @action: multicast operation to perform
+ * mac_addr: mac address to set
+ *
+ * Set multicast addresses in EMAC adapter - internal function
+ *
+ */
+static void emac_add_mcast(struct emac_priv *priv, u32 action, u8 *mac_addr)
+{
+ struct device *emac_dev = &priv->ndev->dev;
+ int update = -1;
+
+ switch (action) {
+ case EMAC_MULTICAST_ADD:
+ update = hash_add(priv, mac_addr);
+ break;
+ case EMAC_MULTICAST_DEL:
+ update = hash_del(priv, mac_addr);
+ break;
+ case EMAC_ALL_MULTI_SET:
+ update = 1;
+ priv->mac_hash1 = EMAC_ALL_MULTI_REG_VALUE;
+ priv->mac_hash2 = EMAC_ALL_MULTI_REG_VALUE;
+ break;
+ case EMAC_ALL_MULTI_CLR:
+ update = 1;
+ priv->mac_hash1 = 0;
+ priv->mac_hash2 = 0;
+ memset(&(priv->multicast_hash_cnt[0]), 0,
+ sizeof(priv->multicast_hash_cnt[0]) *
+ EMAC_NUM_MULTICAST_BITS);
+ break;
+ default:
+ if (netif_msg_drv(priv))
+ dev_err(emac_dev, "DaVinci EMAC: add_mcast"\
+ ": bad operation %d", action);
+ break;
+ }
+
+ /* write to the hardware only if the register status chances */
+ if (update > 0) {
+ emac_write(EMAC_MACHASH1, priv->mac_hash1);
+ emac_write(EMAC_MACHASH2, priv->mac_hash2);
+ }
+}
+
+/**
+ * emac_dev_mcast_set: Set multicast address in the EMAC adapter
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Set multicast addresses in EMAC adapter
+ *
+ */
+static void emac_dev_mcast_set(struct net_device *ndev)
+{
+ u32 mbp_enable;
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ mbp_enable = emac_read(EMAC_RXMBPENABLE);
+ if (ndev->flags & IFF_PROMISC) {
+ mbp_enable &= (~EMAC_MBP_PROMISCCH(EMAC_DEF_PROM_CH));
+ mbp_enable |= (EMAC_MBP_RXPROMISC);
+ } else {
+ mbp_enable = (mbp_enable & ~EMAC_MBP_RXPROMISC);
+ if ((ndev->flags & IFF_ALLMULTI) ||
+ netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
+ mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
+ emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
+ }
+ if (!netdev_mc_empty(ndev)) {
+ struct netdev_hw_addr *ha;
+
+ mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
+ emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
+ /* program multicast address list into EMAC hardware */
+ netdev_for_each_mc_addr(ha, ndev) {
+ emac_add_mcast(priv, EMAC_MULTICAST_ADD,
+ (u8 *) ha->addr);
+ }
+ } else {
+ mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST);
+ emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
+ }
+ }
+ /* Set mbp config register */
+ emac_write(EMAC_RXMBPENABLE, mbp_enable);
+}
+
+/*************************************************************************
+ * EMAC Hardware manipulation
+ *************************************************************************/
+
+/**
+ * emac_int_disable: Disable EMAC module interrupt (from adapter)
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Disable EMAC interrupt on the adapter
+ *
+ */
+static void emac_int_disable(struct emac_priv *priv)
+{
+ if (priv->version == EMAC_VERSION_2) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /* Program C0_Int_En to zero to turn off
+ * interrupts to the CPU */
+ emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0x0);
+ emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0x0);
+ /* NOTE: Rx Threshold and Misc interrupts are not disabled */
+ if (priv->int_disable)
+ priv->int_disable();
+
+ local_irq_restore(flags);
+
+ } else {
+ /* Set DM644x control registers for interrupt control */
+ emac_ctrl_write(EMAC_CTRL_EWCTL, 0x0);
+ }
+}
+
+/**
+ * emac_int_enable: Enable EMAC module interrupt (from adapter)
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Enable EMAC interrupt on the adapter
+ *
+ */
+static void emac_int_enable(struct emac_priv *priv)
+{
+ if (priv->version == EMAC_VERSION_2) {
+ if (priv->int_enable)
+ priv->int_enable();
+
+ emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0xff);
+ emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0xff);
+
+ /* In addition to turning on interrupt Enable, we need
+ * ack by writing appropriate values to the EOI
+ * register */
+
+ /* NOTE: Rx Threshold and Misc interrupts are not enabled */
+
+ /* ack rxen only then a new pulse will be generated */
+ emac_write(EMAC_DM646X_MACEOIVECTOR,
+ EMAC_DM646X_MAC_EOI_C0_RXEN);
+
+ /* ack txen- only then a new pulse will be generated */
+ emac_write(EMAC_DM646X_MACEOIVECTOR,
+ EMAC_DM646X_MAC_EOI_C0_TXEN);
+
+ } else {
+ /* Set DM644x control registers for interrupt control */
+ emac_ctrl_write(EMAC_CTRL_EWCTL, 0x1);
+ }
+}
+
+/**
+ * emac_irq: EMAC interrupt handler
+ * @irq: interrupt number
+ * @dev_id: EMAC network adapter data structure ptr
+ *
+ * EMAC Interrupt handler - we only schedule NAPI and not process any packets
+ * here. EVen the interrupt status is checked (TX/RX/Err) in NAPI poll function
+ *
+ * Returns interrupt handled condition
+ */
+static irqreturn_t emac_irq(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ ++priv->isr_count;
+ if (likely(netif_running(priv->ndev))) {
+ emac_int_disable(priv);
+ napi_schedule(&priv->napi);
+ } else {
+ /* we are closing down, so dont process anything */
+ }
+ return IRQ_HANDLED;
+}
+
+static struct sk_buff *emac_rx_alloc(struct emac_priv *priv)
+{
+ struct sk_buff *skb = dev_alloc_skb(priv->rx_buf_size);
+ if (WARN_ON(!skb))
+ return NULL;
+ skb->dev = priv->ndev;
+ skb_reserve(skb, NET_IP_ALIGN);
+ return skb;
+}
+
+static void emac_rx_handler(void *token, int len, int status)
+{
+ struct sk_buff *skb = token;
+ struct net_device *ndev = skb->dev;
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *emac_dev = &ndev->dev;
+ int ret;
+
+ /* free and bail if we are shutting down */
+ if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ /* recycle on receive error */
+ if (status < 0) {
+ ndev->stats.rx_errors++;
+ goto recycle;
+ }
+
+ /* feed received packet up the stack */
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb(skb);
+ ndev->stats.rx_bytes += len;
+ ndev->stats.rx_packets++;
+
+ /* alloc a new packet for receive */
+ skb = emac_rx_alloc(priv);
+ if (!skb) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "failed rx buffer alloc\n");
+ return;
+ }
+
+recycle:
+ ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
+ skb_tailroom(skb), GFP_KERNEL);
+ if (WARN_ON(ret < 0))
+ dev_kfree_skb_any(skb);
+}
+
+static void emac_tx_handler(void *token, int len, int status)
+{
+ struct sk_buff *skb = token;
+ struct net_device *ndev = skb->dev;
+
+ if (unlikely(netif_queue_stopped(ndev)))
+ netif_start_queue(ndev);
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += len;
+ dev_kfree_skb_any(skb);
+}
+
+/**
+ * emac_dev_xmit: EMAC Transmit function
+ * @skb: SKB pointer
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called by the system to transmit a packet - we queue the packet in
+ * EMAC hardware transmit queue
+ *
+ * Returns success(NETDEV_TX_OK) or error code (typically out of desc's)
+ */
+static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct device *emac_dev = &ndev->dev;
+ int ret_code;
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ /* If no link, return */
+ if (unlikely(!priv->link)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "DaVinci EMAC: No link to transmit");
+ goto fail_tx;
+ }
+
+ ret_code = skb_padto(skb, EMAC_DEF_MIN_ETHPKTSIZE);
+ if (unlikely(ret_code < 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "DaVinci EMAC: packet pad failed");
+ goto fail_tx;
+ }
+
+ skb_tx_timestamp(skb);
+
+ ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
+ GFP_KERNEL);
+ if (unlikely(ret_code != 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "DaVinci EMAC: desc submit failed");
+ goto fail_tx;
+ }
+
+ return NETDEV_TX_OK;
+
+fail_tx:
+ ndev->stats.tx_dropped++;
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+}
+
+/**
+ * emac_dev_tx_timeout: EMAC Transmit timeout function
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called when system detects that a skb timeout period has expired
+ * potentially due to a fault in the adapter in not being able to send
+ * it out on the wire. We teardown the TX channel assuming a hardware
+ * error and re-initialize the TX channel for hardware operation
+ *
+ */
+static void emac_dev_tx_timeout(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *emac_dev = &ndev->dev;
+
+ if (netif_msg_tx_err(priv))
+ dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX");
+
+ emac_dump_regs(priv);
+
+ ndev->stats.tx_errors++;
+ emac_int_disable(priv);
+ cpdma_chan_stop(priv->txchan);
+ cpdma_chan_start(priv->txchan);
+ emac_int_enable(priv);
+}
+
+/**
+ * emac_set_type0addr: Set EMAC Type0 mac address
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ *
+ * Called internally to set Type0 mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_set_type0addr(struct emac_priv *priv, u32 ch, char *mac_addr)
+{
+ u32 val;
+ val = ((mac_addr[5] << 8) | (mac_addr[4]));
+ emac_write(EMAC_MACSRCADDRLO, val);
+
+ val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \
+ (mac_addr[1] << 8) | (mac_addr[0]));
+ emac_write(EMAC_MACSRCADDRHI, val);
+ val = emac_read(EMAC_RXUNICASTSET);
+ val |= BIT(ch);
+ emac_write(EMAC_RXUNICASTSET, val);
+ val = emac_read(EMAC_RXUNICASTCLEAR);
+ val &= ~BIT(ch);
+ emac_write(EMAC_RXUNICASTCLEAR, val);
+}
+
+/**
+ * emac_set_type1addr: Set EMAC Type1 mac address
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ *
+ * Called internally to set Type1 mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_set_type1addr(struct emac_priv *priv, u32 ch, char *mac_addr)
+{
+ u32 val;
+ emac_write(EMAC_MACINDEX, ch);
+ val = ((mac_addr[5] << 8) | mac_addr[4]);
+ emac_write(EMAC_MACADDRLO, val);
+ val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \
+ (mac_addr[1] << 8) | (mac_addr[0]));
+ emac_write(EMAC_MACADDRHI, val);
+ emac_set_type0addr(priv, ch, mac_addr);
+}
+
+/**
+ * emac_set_type2addr: Set EMAC Type2 mac address
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ * @index: index into RX address entries
+ * @match: match parameter for RX address matching logic
+ *
+ * Called internally to set Type2 mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_set_type2addr(struct emac_priv *priv, u32 ch,
+ char *mac_addr, int index, int match)
+{
+ u32 val;
+ emac_write(EMAC_MACINDEX, index);
+ val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \
+ (mac_addr[1] << 8) | (mac_addr[0]));
+ emac_write(EMAC_MACADDRHI, val);
+ val = ((mac_addr[5] << 8) | mac_addr[4] | ((ch & 0x7) << 16) | \
+ (match << 19) | BIT(20));
+ emac_write(EMAC_MACADDRLO, val);
+ emac_set_type0addr(priv, ch, mac_addr);
+}
+
+/**
+ * emac_setmac: Set mac address in the adapter (internal function)
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ *
+ * Called internally to set the mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_setmac(struct emac_priv *priv, u32 ch, char *mac_addr)
+{
+ struct device *emac_dev = &priv->ndev->dev;
+
+ if (priv->rx_addr_type == 0) {
+ emac_set_type0addr(priv, ch, mac_addr);
+ } else if (priv->rx_addr_type == 1) {
+ u32 cnt;
+ for (cnt = 0; cnt < EMAC_MAX_TXRX_CHANNELS; cnt++)
+ emac_set_type1addr(priv, ch, mac_addr);
+ } else if (priv->rx_addr_type == 2) {
+ emac_set_type2addr(priv, ch, mac_addr, ch, 1);
+ emac_set_type0addr(priv, ch, mac_addr);
+ } else {
+ if (netif_msg_drv(priv))
+ dev_err(emac_dev, "DaVinci EMAC: Wrong addressing\n");
+ }
+}
+
+/**
+ * emac_dev_setmac_addr: Set mac address in the adapter
+ * @ndev: The DaVinci EMAC network adapter
+ * @addr: MAC address to set in device
+ *
+ * Called by the system to set the mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *emac_dev = &priv->ndev->dev;
+ struct sockaddr *sa = addr;
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EINVAL;
+
+ /* Store mac addr in priv and rx channel and set it in EMAC hw */
+ memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
+ memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
+
+ /* MAC address is configured only after the interface is enabled. */
+ if (netif_running(ndev)) {
+ memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
+ emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
+ }
+
+ if (netif_msg_drv(priv))
+ dev_notice(emac_dev, "DaVinci EMAC: emac_dev_setmac_addr %pM\n",
+ priv->mac_addr);
+
+ return 0;
+}
+
+/**
+ * emac_hw_enable: Enable EMAC hardware for packet transmission/reception
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Enables EMAC hardware for packet processing - enables PHY, enables RX
+ * for packet reception and enables device interrupts and then NAPI
+ *
+ * Returns success (0) or appropriate error code (none right now)
+ */
+static int emac_hw_enable(struct emac_priv *priv)
+{
+ u32 val, mbp_enable, mac_control;
+
+ /* Soft reset */
+ emac_write(EMAC_SOFTRESET, 1);
+ while (emac_read(EMAC_SOFTRESET))
+ cpu_relax();
+
+ /* Disable interrupt & Set pacing for more interrupts initially */
+ emac_int_disable(priv);
+
+ /* Full duplex enable bit set when auto negotiation happens */
+ mac_control =
+ (((EMAC_DEF_TXPRIO_FIXED) ? (EMAC_MACCONTROL_TXPTYPE) : 0x0) |
+ ((priv->speed == 1000) ? EMAC_MACCONTROL_GIGABITEN : 0x0) |
+ ((EMAC_DEF_TXPACING_EN) ? (EMAC_MACCONTROL_TXPACEEN) : 0x0) |
+ ((priv->duplex == DUPLEX_FULL) ? 0x1 : 0));
+ emac_write(EMAC_MACCONTROL, mac_control);
+
+ mbp_enable =
+ (((EMAC_DEF_PASS_CRC) ? (EMAC_RXMBP_PASSCRC_MASK) : 0x0) |
+ ((EMAC_DEF_QOS_EN) ? (EMAC_RXMBP_QOSEN_MASK) : 0x0) |
+ ((EMAC_DEF_NO_BUFF_CHAIN) ? (EMAC_RXMBP_NOCHAIN_MASK) : 0x0) |
+ ((EMAC_DEF_MACCTRL_FRAME_EN) ? (EMAC_RXMBP_CMFEN_MASK) : 0x0) |
+ ((EMAC_DEF_SHORT_FRAME_EN) ? (EMAC_RXMBP_CSFEN_MASK) : 0x0) |
+ ((EMAC_DEF_ERROR_FRAME_EN) ? (EMAC_RXMBP_CEFEN_MASK) : 0x0) |
+ ((EMAC_DEF_PROM_EN) ? (EMAC_RXMBP_CAFEN_MASK) : 0x0) |
+ ((EMAC_DEF_PROM_CH & EMAC_RXMBP_CHMASK) << \
+ EMAC_RXMBP_PROMCH_SHIFT) |
+ ((EMAC_DEF_BCAST_EN) ? (EMAC_RXMBP_BROADEN_MASK) : 0x0) |
+ ((EMAC_DEF_BCAST_CH & EMAC_RXMBP_CHMASK) << \
+ EMAC_RXMBP_BROADCH_SHIFT) |
+ ((EMAC_DEF_MCAST_EN) ? (EMAC_RXMBP_MULTIEN_MASK) : 0x0) |
+ ((EMAC_DEF_MCAST_CH & EMAC_RXMBP_CHMASK) << \
+ EMAC_RXMBP_MULTICH_SHIFT));
+ emac_write(EMAC_RXMBPENABLE, mbp_enable);
+ emac_write(EMAC_RXMAXLEN, (EMAC_DEF_MAX_FRAME_SIZE &
+ EMAC_RX_MAX_LEN_MASK));
+ emac_write(EMAC_RXBUFFEROFFSET, (EMAC_DEF_BUFFER_OFFSET &
+ EMAC_RX_BUFFER_OFFSET_MASK));
+ emac_write(EMAC_RXFILTERLOWTHRESH, 0);
+ emac_write(EMAC_RXUNICASTCLEAR, EMAC_RX_UNICAST_CLEAR_ALL);
+ priv->rx_addr_type = (emac_read(EMAC_MACCONFIG) >> 8) & 0xFF;
+
+ emac_write(EMAC_MACINTMASKSET, EMAC_MAC_HOST_ERR_INTMASK_VAL);
+
+ emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
+
+ /* Enable MII */
+ val = emac_read(EMAC_MACCONTROL);
+ val |= (EMAC_MACCONTROL_GMIIEN);
+ emac_write(EMAC_MACCONTROL, val);
+
+ /* Enable NAPI and interrupts */
+ napi_enable(&priv->napi);
+ emac_int_enable(priv);
+ return 0;
+
+}
+
+/**
+ * emac_poll: EMAC NAPI Poll function
+ * @ndev: The DaVinci EMAC network adapter
+ * @budget: Number of receive packets to process (as told by NAPI layer)
+ *
+ * NAPI Poll function implemented to process packets as per budget. We check
+ * the type of interrupt on the device and accordingly call the TX or RX
+ * packet processing functions. We follow the budget for RX processing and
+ * also put a cap on number of TX pkts processed through config param. The
+ * NAPI schedule function is called if more packets pending.
+ *
+ * Returns number of packets received (in most cases; else TX pkts - rarely)
+ */
+static int emac_poll(struct napi_struct *napi, int budget)
+{
+ unsigned int mask;
+ struct emac_priv *priv = container_of(napi, struct emac_priv, napi);
+ struct net_device *ndev = priv->ndev;
+ struct device *emac_dev = &ndev->dev;
+ u32 status = 0;
+ u32 num_tx_pkts = 0, num_rx_pkts = 0;
+
+ /* Check interrupt vectors and call packet processing */
+ status = emac_read(EMAC_MACINVECTOR);
+
+ mask = EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC;
+
+ if (priv->version == EMAC_VERSION_2)
+ mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC;
+
+ if (status & mask) {
+ num_tx_pkts = cpdma_chan_process(priv->txchan,
+ EMAC_DEF_TX_MAX_SERVICE);
+ } /* TX processing */
+
+ mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC;
+
+ if (priv->version == EMAC_VERSION_2)
+ mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC;
+
+ if (status & mask) {
+ num_rx_pkts = cpdma_chan_process(priv->rxchan, budget);
+ } /* RX processing */
+
+ mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT;
+ if (priv->version == EMAC_VERSION_2)
+ mask = EMAC_DM646X_MAC_IN_VECTOR_HOST_INT;
+
+ if (unlikely(status & mask)) {
+ u32 ch, cause;
+ dev_err(emac_dev, "DaVinci EMAC: Fatal Hardware Error\n");
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+
+ status = emac_read(EMAC_MACSTATUS);
+ cause = ((status & EMAC_MACSTATUS_TXERRCODE_MASK) >>
+ EMAC_MACSTATUS_TXERRCODE_SHIFT);
+ if (cause) {
+ ch = ((status & EMAC_MACSTATUS_TXERRCH_MASK) >>
+ EMAC_MACSTATUS_TXERRCH_SHIFT);
+ if (net_ratelimit()) {
+ dev_err(emac_dev, "TX Host error %s on ch=%d\n",
+ &emac_txhost_errcodes[cause][0], ch);
+ }
+ }
+ cause = ((status & EMAC_MACSTATUS_RXERRCODE_MASK) >>
+ EMAC_MACSTATUS_RXERRCODE_SHIFT);
+ if (cause) {
+ ch = ((status & EMAC_MACSTATUS_RXERRCH_MASK) >>
+ EMAC_MACSTATUS_RXERRCH_SHIFT);
+ if (netif_msg_hw(priv) && net_ratelimit())
+ dev_err(emac_dev, "RX Host error %s on ch=%d\n",
+ &emac_rxhost_errcodes[cause][0], ch);
+ }
+ } else if (num_rx_pkts < budget) {
+ napi_complete(napi);
+ emac_int_enable(priv);
+ }
+
+ return num_rx_pkts;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * emac_poll_controller: EMAC Poll controller function
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Polled functionality used by netconsole and others in non interrupt mode
+ *
+ */
+void emac_poll_controller(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ emac_int_disable(priv);
+ emac_irq(ndev->irq, ndev);
+ emac_int_enable(priv);
+}
+#endif
+
+static void emac_adjust_link(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct phy_device *phydev = priv->phydev;
+ unsigned long flags;
+ int new_state = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (phydev->link) {
+ /* check the mode of operation - full/half duplex */
+ if (phydev->duplex != priv->duplex) {
+ new_state = 1;
+ priv->duplex = phydev->duplex;
+ }
+ if (phydev->speed != priv->speed) {
+ new_state = 1;
+ priv->speed = phydev->speed;
+ }
+ if (!priv->link) {
+ new_state = 1;
+ priv->link = 1;
+ }
+
+ } else if (priv->link) {
+ new_state = 1;
+ priv->link = 0;
+ priv->speed = 0;
+ priv->duplex = ~0;
+ }
+ if (new_state) {
+ emac_update_phystatus(priv);
+ phy_print_status(priv->phydev);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/*************************************************************************
+ * Linux Driver Model
+ *************************************************************************/
+
+/**
+ * emac_devioctl: EMAC adapter ioctl
+ * @ndev: The DaVinci EMAC network adapter
+ * @ifrq: request parameter
+ * @cmd: command parameter
+ *
+ * EMAC driver ioctl function
+ *
+ * Returns success(0) or appropriate error code
+ */
+static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ if (!(netif_running(ndev)))
+ return -EINVAL;
+
+ /* TODO: Add phy read and write and private statistics get feature */
+
+ return phy_mii_ioctl(priv->phydev, ifrq, cmd);
+}
+
+static int match_first_device(struct device *dev, void *data)
+{
+ return 1;
+}
+
+/**
+ * emac_dev_open: EMAC device open
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called when system wants to start the interface. We init TX/RX channels
+ * and enable the hardware for packet reception/transmission and start the
+ * network queue.
+ *
+ * Returns 0 for a successful open, or appropriate error code
+ */
+static int emac_dev_open(struct net_device *ndev)
+{
+ struct device *emac_dev = &ndev->dev;
+ u32 cnt;
+ struct resource *res;
+ int q, m, ret;
+ int i = 0;
+ int k = 0;
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ netif_carrier_off(ndev);
+ for (cnt = 0; cnt < ETH_ALEN; cnt++)
+ ndev->dev_addr[cnt] = priv->mac_addr[cnt];
+
+ /* Configuration items */
+ priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN;
+
+ priv->mac_hash1 = 0;
+ priv->mac_hash2 = 0;
+ emac_write(EMAC_MACHASH1, 0);
+ emac_write(EMAC_MACHASH2, 0);
+
+ for (i = 0; i < EMAC_DEF_RX_NUM_DESC; i++) {
+ struct sk_buff *skb = emac_rx_alloc(priv);
+
+ if (!skb)
+ break;
+
+ ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
+ skb_tailroom(skb), GFP_KERNEL);
+ if (WARN_ON(ret < 0))
+ break;
+ }
+
+ /* Request IRQ */
+
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
+ for (i = res->start; i <= res->end; i++) {
+ if (request_irq(i, emac_irq, IRQF_DISABLED,
+ ndev->name, ndev))
+ goto rollback;
+ }
+ k++;
+ }
+
+ /* Start/Enable EMAC hardware */
+ emac_hw_enable(priv);
+
+ /* Enable Interrupt pacing if configured */
+ if (priv->coal_intvl != 0) {
+ struct ethtool_coalesce coal;
+
+ coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
+ emac_set_coalesce(ndev, &coal);
+ }
+
+ cpdma_ctlr_start(priv->dma);
+
+ priv->phydev = NULL;
+ /* use the first phy on the bus if pdata did not give us a phy id */
+ if (!priv->phy_id) {
+ struct device *phy;
+
+ phy = bus_find_device(&mdio_bus_type, NULL, NULL,
+ match_first_device);
+ if (phy)
+ priv->phy_id = dev_name(phy);
+ }
+
+ if (priv->phy_id && *priv->phy_id) {
+ priv->phydev = phy_connect(ndev, priv->phy_id,
+ &emac_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+
+ if (IS_ERR(priv->phydev)) {
+ dev_err(emac_dev, "could not connect to phy %s\n",
+ priv->phy_id);
+ priv->phydev = NULL;
+ return PTR_ERR(priv->phydev);
+ }
+
+ priv->link = 0;
+ priv->speed = 0;
+ priv->duplex = ~0;
+
+ dev_info(emac_dev, "attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, id=%x)\n",
+ priv->phydev->drv->name, dev_name(&priv->phydev->dev),
+ priv->phydev->phy_id);
+ } else {
+ /* No PHY , fix the link, speed and duplex settings */
+ dev_notice(emac_dev, "no phy, defaulting to 100/full\n");
+ priv->link = 1;
+ priv->speed = SPEED_100;
+ priv->duplex = DUPLEX_FULL;
+ emac_update_phystatus(priv);
+ }
+
+ if (!netif_running(ndev)) /* debug only - to avoid compiler warning */
+ emac_dump_regs(priv);
+
+ if (netif_msg_drv(priv))
+ dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name);
+
+ if (priv->phydev)
+ phy_start(priv->phydev);
+
+ return 0;
+
+rollback:
+
+ dev_err(emac_dev, "DaVinci EMAC: request_irq() failed");
+
+ for (q = k; k >= 0; k--) {
+ for (m = i; m >= res->start; m--)
+ free_irq(m, ndev);
+ res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k-1);
+ m = res->end;
+ }
+ return -EBUSY;
+}
+
+/**
+ * emac_dev_stop: EMAC device stop
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called when system wants to stop or down the interface. We stop the network
+ * queue, disable interrupts and cleanup TX/RX channels.
+ *
+ * We return the statistics in net_device_stats structure pulled from emac
+ */
+static int emac_dev_stop(struct net_device *ndev)
+{
+ struct resource *res;
+ int i = 0;
+ int irq_num;
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *emac_dev = &ndev->dev;
+
+ /* inform the upper layers. */
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+
+ netif_carrier_off(ndev);
+ emac_int_disable(priv);
+ cpdma_ctlr_stop(priv->dma);
+ emac_write(EMAC_SOFTRESET, 1);
+
+ if (priv->phydev)
+ phy_disconnect(priv->phydev);
+
+ /* Free IRQ */
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
+ for (irq_num = res->start; irq_num <= res->end; irq_num++)
+ free_irq(irq_num, priv->ndev);
+ i++;
+ }
+
+ if (netif_msg_drv(priv))
+ dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
+
+ return 0;
+}
+
+/**
+ * emac_dev_getnetstats: EMAC get statistics function
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called when system wants to get statistics from the device.
+ *
+ * We return the statistics in net_device_stats structure pulled from emac
+ */
+static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ u32 mac_control;
+ u32 stats_clear_mask;
+
+ /* update emac hardware stats and reset the registers*/
+
+ mac_control = emac_read(EMAC_MACCONTROL);
+
+ if (mac_control & EMAC_MACCONTROL_GMIIEN)
+ stats_clear_mask = EMAC_STATS_CLR_MASK;
+ else
+ stats_clear_mask = 0;
+
+ ndev->stats.multicast += emac_read(EMAC_RXMCASTFRAMES);
+ emac_write(EMAC_RXMCASTFRAMES, stats_clear_mask);
+
+ ndev->stats.collisions += (emac_read(EMAC_TXCOLLISION) +
+ emac_read(EMAC_TXSINGLECOLL) +
+ emac_read(EMAC_TXMULTICOLL));
+ emac_write(EMAC_TXCOLLISION, stats_clear_mask);
+ emac_write(EMAC_TXSINGLECOLL, stats_clear_mask);
+ emac_write(EMAC_TXMULTICOLL, stats_clear_mask);
+
+ ndev->stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) +
+ emac_read(EMAC_RXJABBER) +
+ emac_read(EMAC_RXUNDERSIZED));
+ emac_write(EMAC_RXOVERSIZED, stats_clear_mask);
+ emac_write(EMAC_RXJABBER, stats_clear_mask);
+ emac_write(EMAC_RXUNDERSIZED, stats_clear_mask);
+
+ ndev->stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) +
+ emac_read(EMAC_RXMOFOVERRUNS));
+ emac_write(EMAC_RXSOFOVERRUNS, stats_clear_mask);
+ emac_write(EMAC_RXMOFOVERRUNS, stats_clear_mask);
+
+ ndev->stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS);
+ emac_write(EMAC_RXDMAOVERRUNS, stats_clear_mask);
+
+ ndev->stats.tx_carrier_errors +=
+ emac_read(EMAC_TXCARRIERSENSE);
+ emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask);
+
+ ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN);
+ emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
+
+ return &ndev->stats;
+}
+
+static const struct net_device_ops emac_netdev_ops = {
+ .ndo_open = emac_dev_open,
+ .ndo_stop = emac_dev_stop,
+ .ndo_start_xmit = emac_dev_xmit,
+ .ndo_set_rx_mode = emac_dev_mcast_set,
+ .ndo_set_mac_address = emac_dev_setmac_addr,
+ .ndo_do_ioctl = emac_devioctl,
+ .ndo_tx_timeout = emac_dev_tx_timeout,
+ .ndo_get_stats = emac_dev_getnetstats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = emac_poll_controller,
+#endif
+};
+
+/**
+ * davinci_emac_probe: EMAC device probe
+ * @pdev: The DaVinci EMAC device that we are removing
+ *
+ * Called when probing for emac devicesr. We get details of instances and
+ * resource information from platform init and register a network device
+ * and allocate resources necessary for driver to perform
+ */
+static int __devinit davinci_emac_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct resource *res;
+ struct net_device *ndev;
+ struct emac_priv *priv;
+ unsigned long size, hw_ram_addr;
+ struct emac_platform_data *pdata;
+ struct device *emac_dev;
+ struct cpdma_params dma_params;
+
+ /* obtain emac clock from kernel */
+ emac_clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(emac_clk)) {
+ dev_err(&pdev->dev, "failed to get EMAC clock\n");
+ return -EBUSY;
+ }
+ emac_bus_frequency = clk_get_rate(emac_clk);
+ /* TODO: Probe PHY here if possible */
+
+ ndev = alloc_etherdev(sizeof(struct emac_priv));
+ if (!ndev) {
+ dev_err(&pdev->dev, "error allocating net_device\n");
+ rc = -ENOMEM;
+ goto free_clk;
+ }
+
+ platform_set_drvdata(pdev, ndev);
+ priv = netdev_priv(ndev);
+ priv->pdev = pdev;
+ priv->ndev = ndev;
+ priv->msg_enable = netif_msg_init(debug_level, DAVINCI_EMAC_DEBUG);
+
+ spin_lock_init(&priv->lock);
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data\n");
+ rc = -ENODEV;
+ goto probe_quit;
+ }
+
+ /* MAC addr and PHY mask , RMII enable info from platform_data */
+ memcpy(priv->mac_addr, pdata->mac_addr, 6);
+ priv->phy_id = pdata->phy_id;
+ priv->rmii_en = pdata->rmii_en;
+ priv->version = pdata->version;
+ priv->int_enable = pdata->interrupt_enable;
+ priv->int_disable = pdata->interrupt_disable;
+
+ priv->coal_intvl = 0;
+ priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000);
+
+ emac_dev = &ndev->dev;
+ /* Get EMAC platform data */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev,"error getting res\n");
+ rc = -ENOENT;
+ goto probe_quit;
+ }
+
+ priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
+ size = resource_size(res);
+ if (!request_mem_region(res->start, size, ndev->name)) {
+ dev_err(&pdev->dev, "failed request_mem_region() for regs\n");
+ rc = -ENXIO;
+ goto probe_quit;
+ }
+
+ priv->remap_addr = ioremap(res->start, size);
+ if (!priv->remap_addr) {
+ dev_err(&pdev->dev, "unable to map IO\n");
+ rc = -ENOMEM;
+ release_mem_region(res->start, size);
+ goto probe_quit;
+ }
+ priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset;
+ ndev->base_addr = (unsigned long)priv->remap_addr;
+
+ priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
+
+ hw_ram_addr = pdata->hw_ram_addr;
+ if (!hw_ram_addr)
+ hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
+
+ memset(&dma_params, 0, sizeof(dma_params));
+ dma_params.dev = emac_dev;
+ dma_params.dmaregs = priv->emac_base;
+ dma_params.rxthresh = priv->emac_base + 0x120;
+ dma_params.rxfree = priv->emac_base + 0x140;
+ dma_params.txhdp = priv->emac_base + 0x600;
+ dma_params.rxhdp = priv->emac_base + 0x620;
+ dma_params.txcp = priv->emac_base + 0x640;
+ dma_params.rxcp = priv->emac_base + 0x660;
+ dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS;
+ dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE;
+ dma_params.desc_hw_addr = hw_ram_addr;
+ dma_params.desc_mem_size = pdata->ctrl_ram_size;
+ dma_params.desc_align = 16;
+
+ dma_params.desc_mem_phys = pdata->no_bd_ram ? 0 :
+ (u32 __force)res->start + pdata->ctrl_ram_offset;
+
+ priv->dma = cpdma_ctlr_create(&dma_params);
+ if (!priv->dma) {
+ dev_err(&pdev->dev, "error initializing DMA\n");
+ rc = -ENOMEM;
+ goto no_dma;
+ }
+
+ priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH),
+ emac_tx_handler);
+ priv->rxchan = cpdma_chan_create(priv->dma, rx_chan_num(EMAC_DEF_RX_CH),
+ emac_rx_handler);
+ if (WARN_ON(!priv->txchan || !priv->rxchan)) {
+ rc = -ENOMEM;
+ goto no_irq_res;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "error getting irq res\n");
+ rc = -ENOENT;
+ goto no_irq_res;
+ }
+ ndev->irq = res->start;
+
+ if (!is_valid_ether_addr(priv->mac_addr)) {
+ /* Use random MAC if none passed */
+ random_ether_addr(priv->mac_addr);
+ dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
+ priv->mac_addr);
+ }
+
+ ndev->netdev_ops = &emac_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &ethtool_ops);
+ netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
+
+ clk_enable(emac_clk);
+
+ /* register the network device */
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ rc = register_netdev(ndev);
+ if (rc) {
+ dev_err(&pdev->dev, "error in register_netdev\n");
+ rc = -ENODEV;
+ goto netdev_reg_err;
+ }
+
+
+ if (netif_msg_probe(priv)) {
+ dev_notice(emac_dev, "DaVinci EMAC Probe found device "\
+ "(regs: %p, irq: %d)\n",
+ (void *)priv->emac_base_phys, ndev->irq);
+ }
+ return 0;
+
+netdev_reg_err:
+ clk_disable(emac_clk);
+no_irq_res:
+ if (priv->txchan)
+ cpdma_chan_destroy(priv->txchan);
+ if (priv->rxchan)
+ cpdma_chan_destroy(priv->rxchan);
+ cpdma_ctlr_destroy(priv->dma);
+no_dma:
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+ iounmap(priv->remap_addr);
+
+probe_quit:
+ free_netdev(ndev);
+free_clk:
+ clk_put(emac_clk);
+ return rc;
+}
+
+/**
+ * davinci_emac_remove: EMAC device remove
+ * @pdev: The DaVinci EMAC device that we are removing
+ *
+ * Called when removing the device driver. We disable clock usage and release
+ * the resources taken up by the driver and unregister network device
+ */
+static int __devexit davinci_emac_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n");
+
+ platform_set_drvdata(pdev, NULL);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (priv->txchan)
+ cpdma_chan_destroy(priv->txchan);
+ if (priv->rxchan)
+ cpdma_chan_destroy(priv->rxchan);
+ cpdma_ctlr_destroy(priv->dma);
+
+ release_mem_region(res->start, resource_size(res));
+
+ unregister_netdev(ndev);
+ iounmap(priv->remap_addr);
+ free_netdev(ndev);
+
+ clk_disable(emac_clk);
+ clk_put(emac_clk);
+
+ return 0;
+}
+
+static int davinci_emac_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ if (netif_running(ndev))
+ emac_dev_stop(ndev);
+
+ clk_disable(emac_clk);
+
+ return 0;
+}
+
+static int davinci_emac_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ clk_enable(emac_clk);
+
+ if (netif_running(ndev))
+ emac_dev_open(ndev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops davinci_emac_pm_ops = {
+ .suspend = davinci_emac_suspend,
+ .resume = davinci_emac_resume,
+};
+
+/**
+ * davinci_emac_driver: EMAC platform driver structure
+ */
+static struct platform_driver davinci_emac_driver = {
+ .driver = {
+ .name = "davinci_emac",
+ .owner = THIS_MODULE,
+ .pm = &davinci_emac_pm_ops,
+ },
+ .probe = davinci_emac_probe,
+ .remove = __devexit_p(davinci_emac_remove),
+};
+
+/**
+ * davinci_emac_init: EMAC driver module init
+ *
+ * Called when initializing the driver. We register the driver with
+ * the platform.
+ */
+static int __init davinci_emac_init(void)
+{
+ return platform_driver_register(&davinci_emac_driver);
+}
+late_initcall(davinci_emac_init);
+
+/**
+ * davinci_emac_exit: EMAC driver module exit
+ *
+ * Called when exiting the driver completely. We unregister the driver with
+ * the platform and exit
+ */
+static void __exit davinci_emac_exit(void)
+{
+ platform_driver_unregister(&davinci_emac_driver);
+}
+module_exit(davinci_emac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("DaVinci EMAC Maintainer: Anant Gole <anantgole@ti.com>");
+MODULE_AUTHOR("DaVinci EMAC Maintainer: Chaithrika U S <chaithrika@ti.com>");
+MODULE_DESCRIPTION("DaVinci EMAC Ethernet driver");
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
new file mode 100644
index 000000000000..7615040df756
--- /dev/null
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -0,0 +1,475 @@
+/*
+ * DaVinci MDIO Module driver
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * Shamelessly ripped out of davinci_emac.c, original copyrights follow:
+ *
+ * Copyright (C) 2009 Texas Instruments.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ---------------------------------------------------------------------------
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/davinci_emac.h>
+
+/*
+ * This timeout definition is a worst-case ultra defensive measure against
+ * unexpected controller lock ups. Ideally, we should never ever hit this
+ * scenario in practice.
+ */
+#define MDIO_TIMEOUT 100 /* msecs */
+
+#define PHY_REG_MASK 0x1f
+#define PHY_ID_MASK 0x1f
+
+#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */
+
+struct davinci_mdio_regs {
+ u32 version;
+ u32 control;
+#define CONTROL_IDLE BIT(31)
+#define CONTROL_ENABLE BIT(30)
+#define CONTROL_MAX_DIV (0xff)
+
+ u32 alive;
+ u32 link;
+ u32 linkintraw;
+ u32 linkintmasked;
+ u32 __reserved_0[2];
+ u32 userintraw;
+ u32 userintmasked;
+ u32 userintmaskset;
+ u32 userintmaskclr;
+ u32 __reserved_1[20];
+
+ struct {
+ u32 access;
+#define USERACCESS_GO BIT(31)
+#define USERACCESS_WRITE BIT(30)
+#define USERACCESS_ACK BIT(29)
+#define USERACCESS_READ (0)
+#define USERACCESS_DATA (0xffff)
+
+ u32 physel;
+ } user[0];
+};
+
+struct mdio_platform_data default_pdata = {
+ .bus_freq = DEF_OUT_FREQ,
+};
+
+struct davinci_mdio_data {
+ struct mdio_platform_data pdata;
+ struct davinci_mdio_regs __iomem *regs;
+ spinlock_t lock;
+ struct clk *clk;
+ struct device *dev;
+ struct mii_bus *bus;
+ bool suspended;
+ unsigned long access_time; /* jiffies */
+};
+
+static void __davinci_mdio_reset(struct davinci_mdio_data *data)
+{
+ u32 mdio_in, div, mdio_out_khz, access_time;
+
+ mdio_in = clk_get_rate(data->clk);
+ div = (mdio_in / data->pdata.bus_freq) - 1;
+ if (div > CONTROL_MAX_DIV)
+ div = CONTROL_MAX_DIV;
+
+ /* set enable and clock divider */
+ __raw_writel(div | CONTROL_ENABLE, &data->regs->control);
+
+ /*
+ * One mdio transaction consists of:
+ * 32 bits of preamble
+ * 32 bits of transferred data
+ * 24 bits of bus yield (not needed unless shared?)
+ */
+ mdio_out_khz = mdio_in / (1000 * (div + 1));
+ access_time = (88 * 1000) / mdio_out_khz;
+
+ /*
+ * In the worst case, we could be kicking off a user-access immediately
+ * after the mdio bus scan state-machine triggered its own read. If
+ * so, our request could get deferred by one access cycle. We
+ * defensively allow for 4 access cycles.
+ */
+ data->access_time = usecs_to_jiffies(access_time * 4);
+ if (!data->access_time)
+ data->access_time = 1;
+}
+
+static int davinci_mdio_reset(struct mii_bus *bus)
+{
+ struct davinci_mdio_data *data = bus->priv;
+ u32 phy_mask, ver;
+
+ __davinci_mdio_reset(data);
+
+ /* wait for scan logic to settle */
+ msleep(PHY_MAX_ADDR * data->access_time);
+
+ /* dump hardware version info */
+ ver = __raw_readl(&data->regs->version);
+ dev_info(data->dev, "davinci mdio revision %d.%d\n",
+ (ver >> 8) & 0xff, ver & 0xff);
+
+ /* get phy mask from the alive register */
+ phy_mask = __raw_readl(&data->regs->alive);
+ if (phy_mask) {
+ /* restrict mdio bus to live phys only */
+ dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
+ phy_mask = ~phy_mask;
+ } else {
+ /* desperately scan all phys */
+ dev_warn(data->dev, "no live phy, scanning all\n");
+ phy_mask = 0;
+ }
+ data->bus->phy_mask = phy_mask;
+
+ return 0;
+}
+
+/* wait until hardware is ready for another user access */
+static inline int wait_for_user_access(struct davinci_mdio_data *data)
+{
+ struct davinci_mdio_regs __iomem *regs = data->regs;
+ unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
+ u32 reg;
+
+ while (time_after(timeout, jiffies)) {
+ reg = __raw_readl(&regs->user[0].access);
+ if ((reg & USERACCESS_GO) == 0)
+ return 0;
+
+ reg = __raw_readl(&regs->control);
+ if ((reg & CONTROL_IDLE) == 0)
+ continue;
+
+ /*
+ * An emac soft_reset may have clobbered the mdio controller's
+ * state machine. We need to reset and retry the current
+ * operation
+ */
+ dev_warn(data->dev, "resetting idled controller\n");
+ __davinci_mdio_reset(data);
+ return -EAGAIN;
+ }
+ dev_err(data->dev, "timed out waiting for user access\n");
+ return -ETIMEDOUT;
+}
+
+/* wait until hardware state machine is idle */
+static inline int wait_for_idle(struct davinci_mdio_data *data)
+{
+ struct davinci_mdio_regs __iomem *regs = data->regs;
+ unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
+
+ while (time_after(timeout, jiffies)) {
+ if (__raw_readl(&regs->control) & CONTROL_IDLE)
+ return 0;
+ }
+ dev_err(data->dev, "timed out waiting for idle\n");
+ return -ETIMEDOUT;
+}
+
+static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
+{
+ struct davinci_mdio_data *data = bus->priv;
+ u32 reg;
+ int ret;
+
+ if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
+ return -EINVAL;
+
+ spin_lock(&data->lock);
+
+ if (data->suspended) {
+ spin_unlock(&data->lock);
+ return -ENODEV;
+ }
+
+ reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
+ (phy_id << 16));
+
+ while (1) {
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ break;
+
+ __raw_writel(reg, &data->regs->user[0].access);
+
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ break;
+
+ reg = __raw_readl(&data->regs->user[0].access);
+ ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
+ break;
+ }
+
+ spin_unlock(&data->lock);
+
+ return ret;
+}
+
+static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
+ int phy_reg, u16 phy_data)
+{
+ struct davinci_mdio_data *data = bus->priv;
+ u32 reg;
+ int ret;
+
+ if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
+ return -EINVAL;
+
+ spin_lock(&data->lock);
+
+ if (data->suspended) {
+ spin_unlock(&data->lock);
+ return -ENODEV;
+ }
+
+ reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
+ (phy_id << 16) | (phy_data & USERACCESS_DATA));
+
+ while (1) {
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ break;
+
+ __raw_writel(reg, &data->regs->user[0].access);
+
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ break;
+ }
+
+ spin_unlock(&data->lock);
+
+ return 0;
+}
+
+static int __devinit davinci_mdio_probe(struct platform_device *pdev)
+{
+ struct mdio_platform_data *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct davinci_mdio_data *data;
+ struct resource *res;
+ struct phy_device *phy;
+ int ret, addr;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(dev, "failed to alloc device data\n");
+ return -ENOMEM;
+ }
+
+ data->pdata = pdata ? (*pdata) : default_pdata;
+
+ data->bus = mdiobus_alloc();
+ if (!data->bus) {
+ dev_err(dev, "failed to alloc mii bus\n");
+ ret = -ENOMEM;
+ goto bail_out;
+ }
+
+ data->bus->name = dev_name(dev);
+ data->bus->read = davinci_mdio_read,
+ data->bus->write = davinci_mdio_write,
+ data->bus->reset = davinci_mdio_reset,
+ data->bus->parent = dev;
+ data->bus->priv = data;
+ snprintf(data->bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+
+ data->clk = clk_get(dev, NULL);
+ if (IS_ERR(data->clk)) {
+ data->clk = NULL;
+ dev_err(dev, "failed to get device clock\n");
+ ret = PTR_ERR(data->clk);
+ goto bail_out;
+ }
+
+ clk_enable(data->clk);
+
+ dev_set_drvdata(dev, data);
+ data->dev = dev;
+ spin_lock_init(&data->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "could not find register map resource\n");
+ ret = -ENOENT;
+ goto bail_out;
+ }
+
+ res = devm_request_mem_region(dev, res->start, resource_size(res),
+ dev_name(dev));
+ if (!res) {
+ dev_err(dev, "could not allocate register map resource\n");
+ ret = -ENXIO;
+ goto bail_out;
+ }
+
+ data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (!data->regs) {
+ dev_err(dev, "could not map mdio registers\n");
+ ret = -ENOMEM;
+ goto bail_out;
+ }
+
+ /* register the mii bus */
+ ret = mdiobus_register(data->bus);
+ if (ret)
+ goto bail_out;
+
+ /* scan and dump the bus */
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ phy = data->bus->phy_map[addr];
+ if (phy) {
+ dev_info(dev, "phy[%d]: device %s, driver %s\n",
+ phy->addr, dev_name(&phy->dev),
+ phy->drv ? phy->drv->name : "unknown");
+ }
+ }
+
+ return 0;
+
+bail_out:
+ if (data->bus)
+ mdiobus_free(data->bus);
+
+ if (data->clk) {
+ clk_disable(data->clk);
+ clk_put(data->clk);
+ }
+
+ kfree(data);
+
+ return ret;
+}
+
+static int __devexit davinci_mdio_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+
+ if (data->bus)
+ mdiobus_free(data->bus);
+
+ if (data->clk) {
+ clk_disable(data->clk);
+ clk_put(data->clk);
+ }
+
+ dev_set_drvdata(dev, NULL);
+
+ kfree(data);
+
+ return 0;
+}
+
+static int davinci_mdio_suspend(struct device *dev)
+{
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+ u32 ctrl;
+
+ spin_lock(&data->lock);
+
+ /* shutdown the scan state machine */
+ ctrl = __raw_readl(&data->regs->control);
+ ctrl &= ~CONTROL_ENABLE;
+ __raw_writel(ctrl, &data->regs->control);
+ wait_for_idle(data);
+
+ if (data->clk)
+ clk_disable(data->clk);
+
+ data->suspended = true;
+ spin_unlock(&data->lock);
+
+ return 0;
+}
+
+static int davinci_mdio_resume(struct device *dev)
+{
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+ u32 ctrl;
+
+ spin_lock(&data->lock);
+ if (data->clk)
+ clk_enable(data->clk);
+
+ /* restart the scan state machine */
+ ctrl = __raw_readl(&data->regs->control);
+ ctrl |= CONTROL_ENABLE;
+ __raw_writel(ctrl, &data->regs->control);
+
+ data->suspended = false;
+ spin_unlock(&data->lock);
+
+ return 0;
+}
+
+static const struct dev_pm_ops davinci_mdio_pm_ops = {
+ .suspend = davinci_mdio_suspend,
+ .resume = davinci_mdio_resume,
+};
+
+static struct platform_driver davinci_mdio_driver = {
+ .driver = {
+ .name = "davinci_mdio",
+ .owner = THIS_MODULE,
+ .pm = &davinci_mdio_pm_ops,
+ },
+ .probe = davinci_mdio_probe,
+ .remove = __devexit_p(davinci_mdio_remove),
+};
+
+static int __init davinci_mdio_init(void)
+{
+ return platform_driver_register(&davinci_mdio_driver);
+}
+device_initcall(davinci_mdio_init);
+
+static void __exit davinci_mdio_exit(void)
+{
+ platform_driver_unregister(&davinci_mdio_driver);
+}
+module_exit(davinci_mdio_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DaVinci MDIO driver");
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
new file mode 100644
index 000000000000..9c0dd6b8d6c9
--- /dev/null
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -0,0 +1,3258 @@
+/*******************************************************************************
+ *
+ * Linux ThunderLAN Driver
+ *
+ * tlan.c
+ * by James Banks
+ *
+ * (C) 1997-1998 Caldera, Inc.
+ * (C) 1998 James Banks
+ * (C) 1999-2001 Torben Mathiasen
+ * (C) 2002 Samuel Chessman
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ ** Useful (if not required) reading:
+ *
+ * Texas Instruments, ThunderLAN Programmer's Guide,
+ * TI Literature Number SPWU013A
+ * available in PDF format from www.ti.com
+ * Level One, LXT901 and LXT970 Data Sheets
+ * available in PDF format from www.level1.com
+ * National Semiconductor, DP83840A Data Sheet
+ * available in PDF format from www.national.com
+ * Microchip Technology, 24C01A/02A/04A Data Sheet
+ * available in PDF format from www.microchip.com
+ *
+ ******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/hardirq.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/eisa.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+
+#include "tlan.h"
+
+
+/* For removing EISA devices */
+static struct net_device *tlan_eisa_devices;
+
+static int tlan_devices_installed;
+
+/* Set speed, duplex and aui settings */
+static int aui[MAX_TLAN_BOARDS];
+static int duplex[MAX_TLAN_BOARDS];
+static int speed[MAX_TLAN_BOARDS];
+static int boards_found;
+module_param_array(aui, int, NULL, 0);
+module_param_array(duplex, int, NULL, 0);
+module_param_array(speed, int, NULL, 0);
+MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
+MODULE_PARM_DESC(duplex,
+ "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
+MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)");
+
+MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
+MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
+MODULE_LICENSE("GPL");
+
+
+/* Define this to enable Link beat monitoring */
+#undef MONITOR
+
+/* Turn on debugging. See Documentation/networking/tlan.txt for details */
+static int debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
+
+static const char tlan_signature[] = "TLAN";
+static const char tlan_banner[] = "ThunderLAN driver v1.17\n";
+static int tlan_have_pci;
+static int tlan_have_eisa;
+
+static const char * const media[] = {
+ "10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
+ "100BaseTx-FD", "100BaseT4", NULL
+};
+
+static struct board {
+ const char *device_label;
+ u32 flags;
+ u16 addr_ofs;
+} board_info[] = {
+ { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+ { "Compaq Netelligent 10/100 TX PCI UTP",
+ TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+ { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq NetFlex-3/P",
+ TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
+ { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq Netelligent Integrated 10/100 TX UTP",
+ TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+ { "Compaq Netelligent Dual 10/100 TX PCI UTP",
+ TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq Netelligent 10/100 TX Embedded UTP",
+ TLAN_ADAPTER_NONE, 0x83 },
+ { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
+ { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
+ { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
+ { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+ { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq NetFlex-3/E",
+ TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */
+ TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
+ { "Compaq NetFlex-3/E",
+ TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
+};
+
+static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
+ { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
+ { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
+ { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
+ { 0,}
+};
+MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
+
+static void tlan_eisa_probe(void);
+static void tlan_eisa_cleanup(void);
+static int tlan_init(struct net_device *);
+static int tlan_open(struct net_device *dev);
+static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
+static irqreturn_t tlan_handle_interrupt(int, void *);
+static int tlan_close(struct net_device *);
+static struct net_device_stats *tlan_get_stats(struct net_device *);
+static void tlan_set_multicast_list(struct net_device *);
+static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
+ int irq, int rev, const struct pci_device_id *ent);
+static void tlan_tx_timeout(struct net_device *dev);
+static void tlan_tx_timeout_work(struct work_struct *work);
+static int tlan_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+
+static u32 tlan_handle_tx_eof(struct net_device *, u16);
+static u32 tlan_handle_stat_overflow(struct net_device *, u16);
+static u32 tlan_handle_rx_eof(struct net_device *, u16);
+static u32 tlan_handle_dummy(struct net_device *, u16);
+static u32 tlan_handle_tx_eoc(struct net_device *, u16);
+static u32 tlan_handle_status_check(struct net_device *, u16);
+static u32 tlan_handle_rx_eoc(struct net_device *, u16);
+
+static void tlan_timer(unsigned long);
+
+static void tlan_reset_lists(struct net_device *);
+static void tlan_free_lists(struct net_device *);
+static void tlan_print_dio(u16);
+static void tlan_print_list(struct tlan_list *, char *, int);
+static void tlan_read_and_clear_stats(struct net_device *, int);
+static void tlan_reset_adapter(struct net_device *);
+static void tlan_finish_reset(struct net_device *);
+static void tlan_set_mac(struct net_device *, int areg, char *mac);
+
+static void tlan_phy_print(struct net_device *);
+static void tlan_phy_detect(struct net_device *);
+static void tlan_phy_power_down(struct net_device *);
+static void tlan_phy_power_up(struct net_device *);
+static void tlan_phy_reset(struct net_device *);
+static void tlan_phy_start_link(struct net_device *);
+static void tlan_phy_finish_auto_neg(struct net_device *);
+#ifdef MONITOR
+static void tlan_phy_monitor(struct net_device *);
+#endif
+
+/*
+ static int tlan_phy_nop(struct net_device *);
+ static int tlan_phy_internal_check(struct net_device *);
+ static int tlan_phy_internal_service(struct net_device *);
+ static int tlan_phy_dp83840a_check(struct net_device *);
+*/
+
+static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
+static void tlan_mii_send_data(u16, u32, unsigned);
+static void tlan_mii_sync(u16);
+static void tlan_mii_write_reg(struct net_device *, u16, u16, u16);
+
+static void tlan_ee_send_start(u16);
+static int tlan_ee_send_byte(u16, u8, int);
+static void tlan_ee_receive_byte(u16, u8 *, int);
+static int tlan_ee_read_byte(struct net_device *, u8, u8 *);
+
+
+static inline void
+tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
+{
+ unsigned long addr = (unsigned long)skb;
+ tag->buffer[9].address = addr;
+ tag->buffer[8].address = upper_32_bits(addr);
+}
+
+static inline struct sk_buff *
+tlan_get_skb(const struct tlan_list *tag)
+{
+ unsigned long addr;
+
+ addr = tag->buffer[9].address;
+ addr |= (tag->buffer[8].address << 16) << 16;
+ return (struct sk_buff *) addr;
+}
+
+static u32
+(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
+ NULL,
+ tlan_handle_tx_eof,
+ tlan_handle_stat_overflow,
+ tlan_handle_rx_eof,
+ tlan_handle_dummy,
+ tlan_handle_tx_eoc,
+ tlan_handle_status_check,
+ tlan_handle_rx_eoc
+};
+
+static inline void
+tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ unsigned long flags = 0;
+
+ if (!in_irq())
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->timer.function != NULL &&
+ priv->timer_type != TLAN_TIMER_ACTIVITY) {
+ if (!in_irq())
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return;
+ }
+ priv->timer.function = tlan_timer;
+ if (!in_irq())
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ priv->timer.data = (unsigned long) dev;
+ priv->timer_set_at = jiffies;
+ priv->timer_type = type;
+ mod_timer(&priv->timer, jiffies + ticks);
+
+}
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver primary functions
+
+these functions are more or less common to all linux network drivers.
+
+******************************************************************************
+*****************************************************************************/
+
+
+
+
+
+/***************************************************************
+ * tlan_remove_one
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * None
+ *
+ * Goes through the TLanDevices list and frees the device
+ * structs and memory associated with each device (lists
+ * and buffers). It also ureserves the IO port regions
+ * associated with this device.
+ *
+ **************************************************************/
+
+
+static void __devexit tlan_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tlan_priv *priv = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ if (priv->dma_storage) {
+ pci_free_consistent(priv->pci_dev,
+ priv->dma_size, priv->dma_storage,
+ priv->dma_storage_dma);
+ }
+
+#ifdef CONFIG_PCI
+ pci_release_regions(pdev);
+#endif
+
+ free_netdev(dev);
+
+ pci_set_drvdata(pdev, NULL);
+}
+
+static void tlan_start(struct net_device *dev)
+{
+ tlan_reset_lists(dev);
+ /* NOTE: It might not be necessary to read the stats before a
+ reset if you don't care what the values are.
+ */
+ tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+ tlan_reset_adapter(dev);
+ netif_wake_queue(dev);
+}
+
+static void tlan_stop(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
+ outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
+ /* Reset and power down phy */
+ tlan_reset_adapter(dev);
+ if (priv->timer.function != NULL) {
+ del_timer_sync(&priv->timer);
+ priv->timer.function = NULL;
+ }
+}
+
+#ifdef CONFIG_PM
+
+static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (netif_running(dev))
+ tlan_stop(dev);
+
+ netif_device_detach(dev);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int tlan_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_enable_wake(pdev, 0, 0);
+ netif_device_attach(dev);
+
+ if (netif_running(dev))
+ tlan_start(dev);
+
+ return 0;
+}
+
+#else /* CONFIG_PM */
+
+#define tlan_suspend NULL
+#define tlan_resume NULL
+
+#endif /* CONFIG_PM */
+
+
+static struct pci_driver tlan_driver = {
+ .name = "tlan",
+ .id_table = tlan_pci_tbl,
+ .probe = tlan_init_one,
+ .remove = __devexit_p(tlan_remove_one),
+ .suspend = tlan_suspend,
+ .resume = tlan_resume,
+};
+
+static int __init tlan_probe(void)
+{
+ int rc = -ENODEV;
+
+ pr_info("%s", tlan_banner);
+
+ TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
+
+ /* Use new style PCI probing. Now the kernel will
+ do most of this for us */
+ rc = pci_register_driver(&tlan_driver);
+
+ if (rc != 0) {
+ pr_err("Could not register pci driver\n");
+ goto err_out_pci_free;
+ }
+
+ TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
+ tlan_eisa_probe();
+
+ pr_info("%d device%s installed, PCI: %d EISA: %d\n",
+ tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
+ tlan_have_pci, tlan_have_eisa);
+
+ if (tlan_devices_installed == 0) {
+ rc = -ENODEV;
+ goto err_out_pci_unreg;
+ }
+ return 0;
+
+err_out_pci_unreg:
+ pci_unregister_driver(&tlan_driver);
+err_out_pci_free:
+ return rc;
+}
+
+
+static int __devinit tlan_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ return tlan_probe1(pdev, -1, -1, 0, ent);
+}
+
+
+/*
+***************************************************************
+* tlan_probe1
+*
+* Returns:
+* 0 on success, error code on error
+* Parms:
+* none
+*
+* The name is lower case to fit in with all the rest of
+* the netcard_probe names. This function looks for
+* another TLan based adapter, setting it up with the
+* allocated device struct if one is found.
+* tlan_probe has been ported to the new net API and
+* now allocates its own device structure. This function
+* is also used by modules.
+*
+**************************************************************/
+
+static int __devinit tlan_probe1(struct pci_dev *pdev,
+ long ioaddr, int irq, int rev,
+ const struct pci_device_id *ent)
+{
+
+ struct net_device *dev;
+ struct tlan_priv *priv;
+ u16 device_id;
+ int reg, rc = -ENODEV;
+
+#ifdef CONFIG_PCI
+ if (pdev) {
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pci_request_regions(pdev, tlan_signature);
+ if (rc) {
+ pr_err("Could not reserve IO regions\n");
+ goto err_out;
+ }
+ }
+#endif /* CONFIG_PCI */
+
+ dev = alloc_etherdev(sizeof(struct tlan_priv));
+ if (dev == NULL) {
+ pr_err("Could not allocate memory for device\n");
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ priv = netdev_priv(dev);
+
+ priv->pci_dev = pdev;
+ priv->dev = dev;
+
+ /* Is this a PCI device? */
+ if (pdev) {
+ u32 pci_io_base = 0;
+
+ priv->adapter = &board_info[ent->driver_data];
+
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ pr_err("No suitable PCI mapping available\n");
+ goto err_out_free_dev;
+ }
+
+ for (reg = 0; reg <= 5; reg++) {
+ if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
+ pci_io_base = pci_resource_start(pdev, reg);
+ TLAN_DBG(TLAN_DEBUG_GNRL,
+ "IO mapping is available at %x.\n",
+ pci_io_base);
+ break;
+ }
+ }
+ if (!pci_io_base) {
+ pr_err("No IO mappings available\n");
+ rc = -EIO;
+ goto err_out_free_dev;
+ }
+
+ dev->base_addr = pci_io_base;
+ dev->irq = pdev->irq;
+ priv->adapter_rev = pdev->revision;
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, dev);
+
+ } else { /* EISA card */
+ /* This is a hack. We need to know which board structure
+ * is suited for this adapter */
+ device_id = inw(ioaddr + EISA_ID2);
+ priv->is_eisa = 1;
+ if (device_id == 0x20F1) {
+ priv->adapter = &board_info[13]; /* NetFlex-3/E */
+ priv->adapter_rev = 23; /* TLAN 2.3 */
+ } else {
+ priv->adapter = &board_info[14];
+ priv->adapter_rev = 10; /* TLAN 1.0 */
+ }
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ }
+
+ /* Kernel parameters */
+ if (dev->mem_start) {
+ priv->aui = dev->mem_start & 0x01;
+ priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0
+ : (dev->mem_start & 0x06) >> 1;
+ priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
+ : (dev->mem_start & 0x18) >> 3;
+
+ if (priv->speed == 0x1)
+ priv->speed = TLAN_SPEED_10;
+ else if (priv->speed == 0x2)
+ priv->speed = TLAN_SPEED_100;
+
+ debug = priv->debug = dev->mem_end;
+ } else {
+ priv->aui = aui[boards_found];
+ priv->speed = speed[boards_found];
+ priv->duplex = duplex[boards_found];
+ priv->debug = debug;
+ }
+
+ /* This will be used when we get an adapter error from
+ * within our irq handler */
+ INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
+
+ spin_lock_init(&priv->lock);
+
+ rc = tlan_init(dev);
+ if (rc) {
+ pr_err("Could not set up device\n");
+ goto err_out_free_dev;
+ }
+
+ rc = register_netdev(dev);
+ if (rc) {
+ pr_err("Could not register device\n");
+ goto err_out_uninit;
+ }
+
+
+ tlan_devices_installed++;
+ boards_found++;
+
+ /* pdev is NULL if this is an EISA device */
+ if (pdev)
+ tlan_have_pci++;
+ else {
+ priv->next_device = tlan_eisa_devices;
+ tlan_eisa_devices = dev;
+ tlan_have_eisa++;
+ }
+
+ netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n",
+ (int)dev->irq,
+ (int)dev->base_addr,
+ priv->adapter->device_label,
+ priv->adapter_rev);
+ return 0;
+
+err_out_uninit:
+ pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
+ priv->dma_storage_dma);
+err_out_free_dev:
+ free_netdev(dev);
+err_out_regions:
+#ifdef CONFIG_PCI
+ if (pdev)
+ pci_release_regions(pdev);
+#endif
+err_out:
+ if (pdev)
+ pci_disable_device(pdev);
+ return rc;
+}
+
+
+static void tlan_eisa_cleanup(void)
+{
+ struct net_device *dev;
+ struct tlan_priv *priv;
+
+ while (tlan_have_eisa) {
+ dev = tlan_eisa_devices;
+ priv = netdev_priv(dev);
+ if (priv->dma_storage) {
+ pci_free_consistent(priv->pci_dev, priv->dma_size,
+ priv->dma_storage,
+ priv->dma_storage_dma);
+ }
+ release_region(dev->base_addr, 0x10);
+ unregister_netdev(dev);
+ tlan_eisa_devices = priv->next_device;
+ free_netdev(dev);
+ tlan_have_eisa--;
+ }
+}
+
+
+static void __exit tlan_exit(void)
+{
+ pci_unregister_driver(&tlan_driver);
+
+ if (tlan_have_eisa)
+ tlan_eisa_cleanup();
+
+}
+
+
+/* Module loading/unloading */
+module_init(tlan_probe);
+module_exit(tlan_exit);
+
+
+
+/**************************************************************
+ * tlan_eisa_probe
+ *
+ * Returns: 0 on success, 1 otherwise
+ *
+ * Parms: None
+ *
+ *
+ * This functions probes for EISA devices and calls
+ * TLan_probe1 when one is found.
+ *
+ *************************************************************/
+
+static void __init tlan_eisa_probe(void)
+{
+ long ioaddr;
+ int rc = -ENODEV;
+ int irq;
+ u16 device_id;
+
+ if (!EISA_bus) {
+ TLAN_DBG(TLAN_DEBUG_PROBE, "No EISA bus present\n");
+ return;
+ }
+
+ /* Loop through all slots of the EISA bus */
+ for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
+
+ TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+ (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
+ TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+ (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
+
+
+ TLAN_DBG(TLAN_DEBUG_PROBE,
+ "Probing for EISA adapter at IO: 0x%4x : ",
+ (int) ioaddr);
+ if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
+ goto out;
+
+ if (inw(ioaddr + EISA_ID) != 0x110E) {
+ release_region(ioaddr, 0x10);
+ goto out;
+ }
+
+ device_id = inw(ioaddr + EISA_ID2);
+ if (device_id != 0x20F1 && device_id != 0x40F1) {
+ release_region(ioaddr, 0x10);
+ goto out;
+ }
+
+ /* check if adapter is enabled */
+ if (inb(ioaddr + EISA_CR) != 0x1) {
+ release_region(ioaddr, 0x10);
+ goto out2;
+ }
+
+ if (debug == 0x10)
+ pr_info("Found one\n");
+
+
+ /* Get irq from board */
+ switch (inb(ioaddr + 0xcc0)) {
+ case(0x10):
+ irq = 5;
+ break;
+ case(0x20):
+ irq = 9;
+ break;
+ case(0x40):
+ irq = 10;
+ break;
+ case(0x80):
+ irq = 11;
+ break;
+ default:
+ goto out;
+ }
+
+
+ /* Setup the newly found eisa adapter */
+ rc = tlan_probe1(NULL, ioaddr, irq,
+ 12, NULL);
+ continue;
+
+out:
+ if (debug == 0x10)
+ pr_info("None found\n");
+ continue;
+
+out2:
+ if (debug == 0x10)
+ pr_info("Card found but it is not enabled, skipping\n");
+ continue;
+
+ }
+
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void tlan_poll(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ tlan_handle_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static const struct net_device_ops tlan_netdev_ops = {
+ .ndo_open = tlan_open,
+ .ndo_stop = tlan_close,
+ .ndo_start_xmit = tlan_start_tx,
+ .ndo_tx_timeout = tlan_tx_timeout,
+ .ndo_get_stats = tlan_get_stats,
+ .ndo_set_rx_mode = tlan_set_multicast_list,
+ .ndo_do_ioctl = tlan_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = tlan_poll,
+#endif
+};
+
+
+
+/***************************************************************
+ * tlan_init
+ *
+ * Returns:
+ * 0 on success, error code otherwise.
+ * Parms:
+ * dev The structure of the device to be
+ * init'ed.
+ *
+ * This function completes the initialization of the
+ * device structure and driver. It reserves the IO
+ * addresses, allocates memory for the lists and bounce
+ * buffers, retrieves the MAC address from the eeprom
+ * and assignes the device's methods.
+ *
+ **************************************************************/
+
+static int tlan_init(struct net_device *dev)
+{
+ int dma_size;
+ int err;
+ int i;
+ struct tlan_priv *priv;
+
+ priv = netdev_priv(dev);
+
+ dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
+ * (sizeof(struct tlan_list));
+ priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
+ dma_size,
+ &priv->dma_storage_dma);
+ priv->dma_size = dma_size;
+
+ if (priv->dma_storage == NULL) {
+ pr_err("Could not allocate lists and buffers for %s\n",
+ dev->name);
+ return -ENOMEM;
+ }
+ memset(priv->dma_storage, 0, dma_size);
+ priv->rx_list = (struct tlan_list *)
+ ALIGN((unsigned long)priv->dma_storage, 8);
+ priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
+ priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
+ priv->tx_list_dma =
+ priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
+
+ err = 0;
+ for (i = 0; i < 6 ; i++)
+ err |= tlan_ee_read_byte(dev,
+ (u8) priv->adapter->addr_ofs + i,
+ (u8 *) &dev->dev_addr[i]);
+ if (err) {
+ pr_err("%s: Error reading MAC from eeprom: %d\n",
+ dev->name, err);
+ }
+ dev->addr_len = 6;
+
+ netif_carrier_off(dev);
+
+ /* Device methods */
+ dev->netdev_ops = &tlan_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ return 0;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_open
+ *
+ * Returns:
+ * 0 on success, error code otherwise.
+ * Parms:
+ * dev Structure of device to be opened.
+ *
+ * This routine puts the driver and TLAN adapter in a
+ * state where it is ready to send and receive packets.
+ * It allocates the IRQ, resets and brings the adapter
+ * out of reset, and allows interrupts. It also delays
+ * the startup for autonegotiation or sends a Rx GO
+ * command to the adapter, as appropriate.
+ *
+ **************************************************************/
+
+static int tlan_open(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ int err;
+
+ priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
+ err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
+ dev->name, dev);
+
+ if (err) {
+ netdev_err(dev, "Cannot open because IRQ %d is already in use\n",
+ dev->irq);
+ return err;
+ }
+
+ init_timer(&priv->timer);
+
+ tlan_start(dev);
+
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
+ dev->name, priv->tlan_rev);
+
+ return 0;
+
+}
+
+
+
+/**************************************************************
+ * tlan_ioctl
+ *
+ * Returns:
+ * 0 on success, error code otherwise
+ * Params:
+ * dev structure of device to receive ioctl.
+ *
+ * rq ifreq structure to hold userspace data.
+ *
+ * cmd ioctl command.
+ *
+ *
+ *************************************************************/
+
+static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(rq);
+ u32 phy = priv->phy[priv->phy_num];
+
+ if (!priv->phy_online)
+ return -EAGAIN;
+
+ switch (cmd) {
+ case SIOCGMIIPHY: /* get address of MII PHY in use. */
+ data->phy_id = phy;
+
+
+ case SIOCGMIIREG: /* read MII PHY register. */
+ tlan_mii_read_reg(dev, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, &data->val_out);
+ return 0;
+
+
+ case SIOCSMIIREG: /* write MII PHY register. */
+ tlan_mii_write_reg(dev, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, data->val_in);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+
+/***************************************************************
+ * tlan_tx_timeout
+ *
+ * Returns: nothing
+ *
+ * Params:
+ * dev structure of device which timed out
+ * during transmit.
+ *
+ **************************************************************/
+
+static void tlan_tx_timeout(struct net_device *dev)
+{
+
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
+
+ /* Ok so we timed out, lets see what we can do about it...*/
+ tlan_free_lists(dev);
+ tlan_reset_lists(dev);
+ tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+ tlan_reset_adapter(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+
+}
+
+
+/***************************************************************
+ * tlan_tx_timeout_work
+ *
+ * Returns: nothing
+ *
+ * Params:
+ * work work item of device which timed out
+ *
+ **************************************************************/
+
+static void tlan_tx_timeout_work(struct work_struct *work)
+{
+ struct tlan_priv *priv =
+ container_of(work, struct tlan_priv, tlan_tqueue);
+
+ tlan_tx_timeout(priv->dev);
+}
+
+
+
+/***************************************************************
+ * tlan_start_tx
+ *
+ * Returns:
+ * 0 on success, non-zero on failure.
+ * Parms:
+ * skb A pointer to the sk_buff containing the
+ * frame to be sent.
+ * dev The device to send the data on.
+ *
+ * This function adds a frame to the Tx list to be sent
+ * ASAP. First it verifies that the adapter is ready and
+ * there is room in the queue. Then it sets up the next
+ * available list, copies the frame to the corresponding
+ * buffer. If the adapter Tx channel is idle, it gives
+ * the adapter a Tx Go command on the list, otherwise it
+ * sets the forward address of the previous list to point
+ * to this one. Then it frees the sk_buff.
+ *
+ **************************************************************/
+
+static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ dma_addr_t tail_list_phys;
+ struct tlan_list *tail_list;
+ unsigned long flags;
+ unsigned int txlen;
+
+ if (!priv->phy_online) {
+ TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
+ dev->name);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
+ return NETDEV_TX_OK;
+ txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
+
+ tail_list = priv->tx_list + priv->tx_tail;
+ tail_list_phys =
+ priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
+
+ if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
+ dev->name, priv->tx_head, priv->tx_tail);
+ netif_stop_queue(dev);
+ priv->tx_busy_count++;
+ return NETDEV_TX_BUSY;
+ }
+
+ tail_list->forward = 0;
+
+ tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
+ skb->data, txlen,
+ PCI_DMA_TODEVICE);
+ tlan_store_skb(tail_list, skb);
+
+ tail_list->frame_size = (u16) txlen;
+ tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
+ tail_list->buffer[1].count = 0;
+ tail_list->buffer[1].address = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ tail_list->c_stat = TLAN_CSTAT_READY;
+ if (!priv->tx_in_progress) {
+ priv->tx_in_progress = 1;
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: Starting TX on buffer %d\n",
+ priv->tx_tail);
+ outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
+ outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
+ } else {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: Adding buffer %d to TX channel\n",
+ priv->tx_tail);
+ if (priv->tx_tail == 0) {
+ (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
+ = tail_list_phys;
+ } else {
+ (priv->tx_list + (priv->tx_tail - 1))->forward
+ = tail_list_phys;
+ }
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
+
+ return NETDEV_TX_OK;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_handle_interrupt
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * irq The line on which the interrupt
+ * occurred.
+ * dev_id A pointer to the device assigned to
+ * this irq line.
+ *
+ * This function handles an interrupt generated by its
+ * assigned TLAN adapter. The function deactivates
+ * interrupts on its adapter, records the type of
+ * interrupt, executes the appropriate subhandler, and
+ * acknowdges the interrupt to the adapter (thus
+ * re-enabling adapter interrupts.
+ *
+ **************************************************************/
+
+static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 host_int;
+ u16 type;
+
+ spin_lock(&priv->lock);
+
+ host_int = inw(dev->base_addr + TLAN_HOST_INT);
+ type = (host_int & TLAN_HI_IT_MASK) >> 2;
+ if (type) {
+ u32 ack;
+ u32 host_cmd;
+
+ outw(host_int, dev->base_addr + TLAN_HOST_INT);
+ ack = tlan_int_vector[type](dev, host_int);
+
+ if (ack) {
+ host_cmd = TLAN_HC_ACK | ack | (type << 18);
+ outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
+ }
+ }
+
+ spin_unlock(&priv->lock);
+
+ return IRQ_RETVAL(type);
+}
+
+
+
+
+/***************************************************************
+ * tlan_close
+ *
+ * Returns:
+ * An error code.
+ * Parms:
+ * dev The device structure of the device to
+ * close.
+ *
+ * This function shuts down the adapter. It records any
+ * stats, puts the adapter into reset state, deactivates
+ * its time as needed, and frees the irq it is using.
+ *
+ **************************************************************/
+
+static int tlan_close(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+
+ priv->neg_be_verbose = 0;
+ tlan_stop(dev);
+
+ free_irq(dev->irq, dev);
+ tlan_free_lists(dev);
+ TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
+
+ return 0;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_get_stats
+ *
+ * Returns:
+ * A pointer to the device's statistics structure.
+ * Parms:
+ * dev The device structure to return the
+ * stats for.
+ *
+ * This function updates the devices statistics by reading
+ * the TLAN chip's onboard registers. Then it returns the
+ * address of the statistics structure.
+ *
+ **************************************************************/
+
+static struct net_device_stats *tlan_get_stats(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ int i;
+
+ /* Should only read stats if open ? */
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
+
+ TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
+ priv->rx_eoc_count);
+ TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
+ priv->tx_busy_count);
+ if (debug & TLAN_DEBUG_GNRL) {
+ tlan_print_dio(dev->base_addr);
+ tlan_phy_print(dev);
+ }
+ if (debug & TLAN_DEBUG_LIST) {
+ for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
+ tlan_print_list(priv->rx_list + i, "RX", i);
+ for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
+ tlan_print_list(priv->tx_list + i, "TX", i);
+ }
+
+ return &dev->stats;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_set_multicast_list
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure to set the
+ * multicast list for.
+ *
+ * This function sets the TLAN adaptor to various receive
+ * modes. If the IFF_PROMISC flag is set, promiscuous
+ * mode is acitviated. Otherwise, promiscuous mode is
+ * turned off. If the IFF_ALLMULTI flag is set, then
+ * the hash table is set to receive all group addresses.
+ * Otherwise, the first three multicast addresses are
+ * stored in AREG_1-3, and the rest are selected via the
+ * hash table, as necessary.
+ *
+ **************************************************************/
+
+static void tlan_set_multicast_list(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ u32 hash1 = 0;
+ u32 hash2 = 0;
+ int i;
+ u32 offset;
+ u8 tmp;
+
+ if (dev->flags & IFF_PROMISC) {
+ tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+ tlan_dio_write8(dev->base_addr,
+ TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
+ } else {
+ tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+ tlan_dio_write8(dev->base_addr,
+ TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
+ if (dev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < 3; i++)
+ tlan_set_mac(dev, i + 1, NULL);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
+ 0xffffffff);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
+ 0xffffffff);
+ } else {
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ if (i < 3) {
+ tlan_set_mac(dev, i + 1,
+ (char *) &ha->addr);
+ } else {
+ offset =
+ tlan_hash_func((u8 *)&ha->addr);
+ if (offset < 32)
+ hash1 |= (1 << offset);
+ else
+ hash2 |= (1 << (offset - 32));
+ }
+ i++;
+ }
+ for ( ; i < 3; i++)
+ tlan_set_mac(dev, i + 1, NULL);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
+ }
+ }
+
+}
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver interrupt vectors and table
+
+please see chap. 4, "Interrupt Handling" of the "ThunderLAN
+Programmer's Guide" for more informations on handling interrupts
+generated by TLAN based adapters.
+
+******************************************************************************
+*****************************************************************************/
+
+
+
+
+/***************************************************************
+ * tlan_handle_tx_eof
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles Tx EOF interrupts which are raised
+ * by the adapter when it has completed sending the
+ * contents of a buffer. If detemines which list/buffer
+ * was completed and resets it. If the buffer was the last
+ * in the channel (EOC), then the function checks to see if
+ * another buffer is ready to send, and if so, sends a Tx
+ * Go command. Finally, the driver activates/continues the
+ * activity LED.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ int eoc = 0;
+ struct tlan_list *head_list;
+ dma_addr_t head_list_phys;
+ u32 ack = 0;
+ u16 tmp_c_stat;
+
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
+ priv->tx_head, priv->tx_tail);
+ head_list = priv->tx_list + priv->tx_head;
+
+ while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+ && (ack < 255)) {
+ struct sk_buff *skb = tlan_get_skb(head_list);
+
+ ack++;
+ pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
+ max(skb->len,
+ (unsigned int)TLAN_MIN_FRAME_SIZE),
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(skb);
+ head_list->buffer[8].address = 0;
+ head_list->buffer[9].address = 0;
+
+ if (tmp_c_stat & TLAN_CSTAT_EOC)
+ eoc = 1;
+
+ dev->stats.tx_bytes += head_list->frame_size;
+
+ head_list->c_stat = TLAN_CSTAT_UNUSED;
+ netif_start_queue(dev);
+ CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
+ head_list = priv->tx_list + priv->tx_head;
+ }
+
+ if (!ack)
+ netdev_info(dev,
+ "Received interrupt for uncompleted TX frame\n");
+
+ if (eoc) {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n",
+ priv->tx_head, priv->tx_tail);
+ head_list = priv->tx_list + priv->tx_head;
+ head_list_phys = priv->tx_list_dma
+ + sizeof(struct tlan_list)*priv->tx_head;
+ if ((head_list->c_stat & TLAN_CSTAT_READY)
+ == TLAN_CSTAT_READY) {
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
+ ack |= TLAN_HC_GO;
+ } else {
+ priv->tx_in_progress = 0;
+ }
+ }
+
+ if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+ tlan_dio_write8(dev->base_addr,
+ TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+ if (priv->timer.function == NULL) {
+ priv->timer.function = tlan_timer;
+ priv->timer.data = (unsigned long) dev;
+ priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
+ priv->timer_set_at = jiffies;
+ priv->timer_type = TLAN_TIMER_ACTIVITY;
+ add_timer(&priv->timer);
+ } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+ priv->timer_set_at = jiffies;
+ }
+ }
+
+ return ack;
+
+}
+
+
+
+
+/***************************************************************
+ * TLan_HandleStatOverflow
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Statistics Overflow interrupt
+ * which means that one or more of the TLAN statistics
+ * registers has reached 1/2 capacity and needs to be read.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
+{
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
+
+ return 1;
+
+}
+
+
+
+
+/***************************************************************
+ * TLan_HandleRxEOF
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Rx EOF interrupt which
+ * indicates a frame has been received by the adapter from
+ * the net and the frame has been transferred to memory.
+ * The function determines the bounce buffer the frame has
+ * been loaded into, creates a new sk_buff big enough to
+ * hold the frame, and sends it to protocol stack. It
+ * then resets the used buffer and appends it to the end
+ * of the list. If the frame was the last in the Rx
+ * channel (EOC), the function restarts the receive channel
+ * by sending an Rx Go command to the adapter. Then it
+ * activates/continues the activity LED.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u32 ack = 0;
+ int eoc = 0;
+ struct tlan_list *head_list;
+ struct sk_buff *skb;
+ struct tlan_list *tail_list;
+ u16 tmp_c_stat;
+ dma_addr_t head_list_phys;
+
+ TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n",
+ priv->rx_head, priv->rx_tail);
+ head_list = priv->rx_list + priv->rx_head;
+ head_list_phys =
+ priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
+
+ while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+ && (ack < 255)) {
+ dma_addr_t frame_dma = head_list->buffer[0].address;
+ u32 frame_size = head_list->frame_size;
+ struct sk_buff *new_skb;
+
+ ack++;
+ if (tmp_c_stat & TLAN_CSTAT_EOC)
+ eoc = 1;
+
+ new_skb = netdev_alloc_skb_ip_align(dev,
+ TLAN_MAX_FRAME_SIZE + 5);
+ if (!new_skb)
+ goto drop_and_reuse;
+
+ skb = tlan_get_skb(head_list);
+ pci_unmap_single(priv->pci_dev, frame_dma,
+ TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
+ skb_put(skb, frame_size);
+
+ dev->stats.rx_bytes += frame_size;
+
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+
+ head_list->buffer[0].address =
+ pci_map_single(priv->pci_dev, new_skb->data,
+ TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
+
+ tlan_store_skb(head_list, new_skb);
+drop_and_reuse:
+ head_list->forward = 0;
+ head_list->c_stat = 0;
+ tail_list = priv->rx_list + priv->rx_tail;
+ tail_list->forward = head_list_phys;
+
+ CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
+ CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
+ head_list = priv->rx_list + priv->rx_head;
+ head_list_phys = priv->rx_list_dma
+ + sizeof(struct tlan_list)*priv->rx_head;
+ }
+
+ if (!ack)
+ netdev_info(dev,
+ "Received interrupt for uncompleted RX frame\n");
+
+
+ if (eoc) {
+ TLAN_DBG(TLAN_DEBUG_RX,
+ "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n",
+ priv->rx_head, priv->rx_tail);
+ head_list = priv->rx_list + priv->rx_head;
+ head_list_phys = priv->rx_list_dma
+ + sizeof(struct tlan_list)*priv->rx_head;
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
+ ack |= TLAN_HC_GO | TLAN_HC_RT;
+ priv->rx_eoc_count++;
+ }
+
+ if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+ tlan_dio_write8(dev->base_addr,
+ TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+ if (priv->timer.function == NULL) {
+ priv->timer.function = tlan_timer;
+ priv->timer.data = (unsigned long) dev;
+ priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
+ priv->timer_set_at = jiffies;
+ priv->timer_type = TLAN_TIMER_ACTIVITY;
+ add_timer(&priv->timer);
+ } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+ priv->timer_set_at = jiffies;
+ }
+ }
+
+ return ack;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_handle_dummy
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Dummy interrupt, which is
+ * raised whenever a test interrupt is generated by setting
+ * the Req_Int bit of HOST_CMD to 1.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
+{
+ netdev_info(dev, "Test interrupt\n");
+ return 1;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_handle_tx_eoc
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This driver is structured to determine EOC occurrences by
+ * reading the CSTAT member of the list structure. Tx EOC
+ * interrupts are disabled via the DIO INTDIS register.
+ * However, TLAN chips before revision 3.0 didn't have this
+ * functionality, so process EOC events if this is the
+ * case.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ struct tlan_list *head_list;
+ dma_addr_t head_list_phys;
+ u32 ack = 1;
+
+ host_int = 0;
+ if (priv->tlan_rev < 0x30) {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
+ priv->tx_head, priv->tx_tail);
+ head_list = priv->tx_list + priv->tx_head;
+ head_list_phys = priv->tx_list_dma
+ + sizeof(struct tlan_list)*priv->tx_head;
+ if ((head_list->c_stat & TLAN_CSTAT_READY)
+ == TLAN_CSTAT_READY) {
+ netif_stop_queue(dev);
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
+ ack |= TLAN_HC_GO;
+ } else {
+ priv->tx_in_progress = 0;
+ }
+ }
+
+ return ack;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_handle_status_check
+ *
+ * Returns:
+ * 0 if Adapter check, 1 if Network Status check.
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles Adapter Check/Network Status
+ * interrupts generated by the adapter. It checks the
+ * vector in the HOST_INT register to determine if it is
+ * an Adapter Check interrupt. If so, it resets the
+ * adapter. Otherwise it clears the status registers
+ * and services the PHY.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u32 ack;
+ u32 error;
+ u8 net_sts;
+ u32 phy;
+ u16 tlphy_ctl;
+ u16 tlphy_sts;
+
+ ack = 1;
+ if (host_int & TLAN_HI_IV_MASK) {
+ netif_stop_queue(dev);
+ error = inl(dev->base_addr + TLAN_CH_PARM);
+ netdev_info(dev, "Adaptor Error = 0x%x\n", error);
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
+ outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
+
+ schedule_work(&priv->tlan_tqueue);
+
+ netif_wake_queue(dev);
+ ack = 0;
+ } else {
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
+ phy = priv->phy[priv->phy_num];
+
+ net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
+ if (net_sts) {
+ tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
+ dev->name, (unsigned) net_sts);
+ }
+ if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) {
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+ if (!(tlphy_sts & TLAN_TS_POLOK) &&
+ !(tlphy_ctl & TLAN_TC_SWAPOL)) {
+ tlphy_ctl |= TLAN_TC_SWAPOL;
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+ tlphy_ctl);
+ } else if ((tlphy_sts & TLAN_TS_POLOK) &&
+ (tlphy_ctl & TLAN_TC_SWAPOL)) {
+ tlphy_ctl &= ~TLAN_TC_SWAPOL;
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+ tlphy_ctl);
+ }
+
+ if (debug)
+ tlan_phy_print(dev);
+ }
+ }
+
+ return ack;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_handle_rx_eoc
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This driver is structured to determine EOC occurrences by
+ * reading the CSTAT member of the list structure. Rx EOC
+ * interrupts are disabled via the DIO INTDIS register.
+ * However, TLAN chips before revision 3.0 didn't have this
+ * CSTAT member or a INTDIS register, so if this chip is
+ * pre-3.0, process EOC interrupts normally.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ dma_addr_t head_list_phys;
+ u32 ack = 1;
+
+ if (priv->tlan_rev < 0x30) {
+ TLAN_DBG(TLAN_DEBUG_RX,
+ "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n",
+ priv->rx_head, priv->rx_tail);
+ head_list_phys = priv->rx_list_dma
+ + sizeof(struct tlan_list)*priv->rx_head;
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
+ ack |= TLAN_HC_GO | TLAN_HC_RT;
+ priv->rx_eoc_count++;
+ }
+
+ return ack;
+
+}
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver timer function
+
+******************************************************************************
+*****************************************************************************/
+
+
+/***************************************************************
+ * tlan_timer
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * data A value given to add timer when
+ * add_timer was called.
+ *
+ * This function handles timed functionality for the
+ * TLAN driver. The two current timer uses are for
+ * delaying for autonegotionation and driving the ACT LED.
+ * - Autonegotiation requires being allowed about
+ * 2 1/2 seconds before attempting to transmit a
+ * packet. It would be a very bad thing to hang
+ * the kernel this long, so the driver doesn't
+ * allow transmission 'til after this time, for
+ * certain PHYs. It would be much nicer if all
+ * PHYs were interrupt-capable like the internal
+ * PHY.
+ * - The ACT LED, which shows adapter activity, is
+ * driven by the driver, and so must be left on
+ * for a short period to power up the LED so it
+ * can be seen. This delay can be changed by
+ * changing the TLAN_TIMER_ACT_DELAY in tlan.h,
+ * if desired. 100 ms produces a slightly
+ * sluggish response.
+ *
+ **************************************************************/
+
+static void tlan_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct tlan_priv *priv = netdev_priv(dev);
+ u32 elapsed;
+ unsigned long flags = 0;
+
+ priv->timer.function = NULL;
+
+ switch (priv->timer_type) {
+#ifdef MONITOR
+ case TLAN_TIMER_LINK_BEAT:
+ tlan_phy_monitor(dev);
+ break;
+#endif
+ case TLAN_TIMER_PHY_PDOWN:
+ tlan_phy_power_down(dev);
+ break;
+ case TLAN_TIMER_PHY_PUP:
+ tlan_phy_power_up(dev);
+ break;
+ case TLAN_TIMER_PHY_RESET:
+ tlan_phy_reset(dev);
+ break;
+ case TLAN_TIMER_PHY_START_LINK:
+ tlan_phy_start_link(dev);
+ break;
+ case TLAN_TIMER_PHY_FINISH_AN:
+ tlan_phy_finish_auto_neg(dev);
+ break;
+ case TLAN_TIMER_FINISH_RESET:
+ tlan_finish_reset(dev);
+ break;
+ case TLAN_TIMER_ACTIVITY:
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->timer.function == NULL) {
+ elapsed = jiffies - priv->timer_set_at;
+ if (elapsed >= TLAN_TIMER_ACT_DELAY) {
+ tlan_dio_write8(dev->base_addr,
+ TLAN_LED_REG, TLAN_LED_LINK);
+ } else {
+ priv->timer.function = tlan_timer;
+ priv->timer.expires = priv->timer_set_at
+ + TLAN_TIMER_ACT_DELAY;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ add_timer(&priv->timer);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+ break;
+ default:
+ break;
+ }
+
+}
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver adapter related routines
+
+******************************************************************************
+*****************************************************************************/
+
+
+/***************************************************************
+ * tlan_reset_lists
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure with the list
+ * stuctures to be reset.
+ *
+ * This routine sets the variables associated with managing
+ * the TLAN lists to their initial values.
+ *
+ **************************************************************/
+
+static void tlan_reset_lists(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ int i;
+ struct tlan_list *list;
+ dma_addr_t list_phys;
+ struct sk_buff *skb;
+
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+ for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+ list = priv->tx_list + i;
+ list->c_stat = TLAN_CSTAT_UNUSED;
+ list->buffer[0].address = 0;
+ list->buffer[2].count = 0;
+ list->buffer[2].address = 0;
+ list->buffer[8].address = 0;
+ list->buffer[9].address = 0;
+ }
+
+ priv->rx_head = 0;
+ priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
+ for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+ list = priv->rx_list + i;
+ list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
+ list->c_stat = TLAN_CSTAT_READY;
+ list->frame_size = TLAN_MAX_FRAME_SIZE;
+ list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
+ skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
+ if (!skb) {
+ netdev_err(dev, "Out of memory for received data\n");
+ break;
+ }
+
+ list->buffer[0].address = pci_map_single(priv->pci_dev,
+ skb->data,
+ TLAN_MAX_FRAME_SIZE,
+ PCI_DMA_FROMDEVICE);
+ tlan_store_skb(list, skb);
+ list->buffer[1].count = 0;
+ list->buffer[1].address = 0;
+ list->forward = list_phys + sizeof(struct tlan_list);
+ }
+
+ /* in case ran out of memory early, clear bits */
+ while (i < TLAN_NUM_RX_LISTS) {
+ tlan_store_skb(priv->rx_list + i, NULL);
+ ++i;
+ }
+ list->forward = 0;
+
+}
+
+
+static void tlan_free_lists(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ int i;
+ struct tlan_list *list;
+ struct sk_buff *skb;
+
+ for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+ list = priv->tx_list + i;
+ skb = tlan_get_skb(list);
+ if (skb) {
+ pci_unmap_single(
+ priv->pci_dev,
+ list->buffer[0].address,
+ max(skb->len,
+ (unsigned int)TLAN_MIN_FRAME_SIZE),
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(skb);
+ list->buffer[8].address = 0;
+ list->buffer[9].address = 0;
+ }
+ }
+
+ for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+ list = priv->rx_list + i;
+ skb = tlan_get_skb(list);
+ if (skb) {
+ pci_unmap_single(priv->pci_dev,
+ list->buffer[0].address,
+ TLAN_MAX_FRAME_SIZE,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(skb);
+ list->buffer[8].address = 0;
+ list->buffer[9].address = 0;
+ }
+ }
+}
+
+
+
+
+/***************************************************************
+ * tlan_print_dio
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base Base IO port of the device of
+ * which to print DIO registers.
+ *
+ * This function prints out all the internal (DIO)
+ * registers of a TLAN chip.
+ *
+ **************************************************************/
+
+static void tlan_print_dio(u16 io_base)
+{
+ u32 data0, data1;
+ int i;
+
+ pr_info("Contents of internal registers for io base 0x%04hx\n",
+ io_base);
+ pr_info("Off. +0 +4\n");
+ for (i = 0; i < 0x4C; i += 8) {
+ data0 = tlan_dio_read32(io_base, i);
+ data1 = tlan_dio_read32(io_base, i + 0x4);
+ pr_info("0x%02x 0x%08x 0x%08x\n", i, data0, data1);
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * TLan_PrintList
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * list A pointer to the struct tlan_list structure to
+ * be printed.
+ * type A string to designate type of list,
+ * "Rx" or "Tx".
+ * num The index of the list.
+ *
+ * This function prints out the contents of the list
+ * pointed to by the list parameter.
+ *
+ **************************************************************/
+
+static void tlan_print_list(struct tlan_list *list, char *type, int num)
+{
+ int i;
+
+ pr_info("%s List %d at %p\n", type, num, list);
+ pr_info(" Forward = 0x%08x\n", list->forward);
+ pr_info(" CSTAT = 0x%04hx\n", list->c_stat);
+ pr_info(" Frame Size = 0x%04hx\n", list->frame_size);
+ /* for (i = 0; i < 10; i++) { */
+ for (i = 0; i < 2; i++) {
+ pr_info(" Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
+ i, list->buffer[i].count, list->buffer[i].address);
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_read_and_clear_stats
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * to which to read stats.
+ * record Flag indicating whether to add
+ *
+ * This functions reads all the internal status registers
+ * of the TLAN chip, which clears them as a side effect.
+ * It then either adds the values to the device's status
+ * struct, or discards them, depending on whether record
+ * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0).
+ *
+ **************************************************************/
+
+static void tlan_read_and_clear_stats(struct net_device *dev, int record)
+{
+ u32 tx_good, tx_under;
+ u32 rx_good, rx_over;
+ u32 def_tx, crc, code;
+ u32 multi_col, single_col;
+ u32 excess_col, late_col, loss;
+
+ outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ tx_good = inb(dev->base_addr + TLAN_DIO_DATA);
+ tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+ tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+ outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ rx_good = inb(dev->base_addr + TLAN_DIO_DATA);
+ rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+ rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+ outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
+ def_tx = inb(dev->base_addr + TLAN_DIO_DATA);
+ def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ crc = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+ code = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+ outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ multi_col = inb(dev->base_addr + TLAN_DIO_DATA);
+ multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+ single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
+
+ outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
+ late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1);
+ loss = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+
+ if (record) {
+ dev->stats.rx_packets += rx_good;
+ dev->stats.rx_errors += rx_over + crc + code;
+ dev->stats.tx_packets += tx_good;
+ dev->stats.tx_errors += tx_under + loss;
+ dev->stats.collisions += multi_col
+ + single_col + excess_col + late_col;
+
+ dev->stats.rx_over_errors += rx_over;
+ dev->stats.rx_crc_errors += crc;
+ dev->stats.rx_frame_errors += code;
+
+ dev->stats.tx_aborted_errors += tx_under;
+ dev->stats.tx_carrier_errors += loss;
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * TLan_Reset
+ *
+ * Returns:
+ * 0
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * to be reset.
+ *
+ * This function resets the adapter and it's physical
+ * device. See Chap. 3, pp. 9-10 of the "ThunderLAN
+ * Programmer's Guide" for details. The routine tries to
+ * implement what is detailed there, though adjustments
+ * have been made.
+ *
+ **************************************************************/
+
+static void
+tlan_reset_adapter(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ int i;
+ u32 addr;
+ u32 data;
+ u8 data8;
+
+ priv->tlan_full_duplex = false;
+ priv->phy_online = 0;
+ netif_carrier_off(dev);
+
+/* 1. Assert reset bit. */
+
+ data = inl(dev->base_addr + TLAN_HOST_CMD);
+ data |= TLAN_HC_AD_RST;
+ outl(data, dev->base_addr + TLAN_HOST_CMD);
+
+ udelay(1000);
+
+/* 2. Turn off interrupts. (Probably isn't necessary) */
+
+ data = inl(dev->base_addr + TLAN_HOST_CMD);
+ data |= TLAN_HC_INT_OFF;
+ outl(data, dev->base_addr + TLAN_HOST_CMD);
+
+/* 3. Clear AREGs and HASHs. */
+
+ for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
+ tlan_dio_write32(dev->base_addr, (u16) i, 0);
+
+/* 4. Setup NetConfig register. */
+
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
+
+/* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */
+
+ outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
+ outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
+
+/* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */
+
+ outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
+ addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
+ tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
+
+/* 7. Setup the remaining registers. */
+
+ if (priv->tlan_rev >= 0x30) {
+ data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
+ tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
+ }
+ tlan_phy_detect(dev);
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
+
+ if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
+ data |= TLAN_NET_CFG_BIT;
+ if (priv->aui == 1) {
+ tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
+ } else if (priv->duplex == TLAN_DUPLEX_FULL) {
+ tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
+ priv->tlan_full_duplex = true;
+ } else {
+ tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
+ }
+ }
+
+ if (priv->phy_num == 0)
+ data |= TLAN_NET_CFG_PHY_EN;
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
+
+ if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
+ tlan_finish_reset(dev);
+ else
+ tlan_phy_power_down(dev);
+
+}
+
+
+
+
+static void
+tlan_finish_reset(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u8 data;
+ u32 phy;
+ u8 sio;
+ u16 status;
+ u16 partner;
+ u16 tlphy_ctl;
+ u16 tlphy_par;
+ u16 tlphy_id1, tlphy_id2;
+ int i;
+
+ phy = priv->phy[priv->phy_num];
+
+ data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
+ if (priv->tlan_full_duplex)
+ data |= TLAN_NET_CMD_DUPLEX;
+ tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
+ data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
+ if (priv->phy_num == 0)
+ data |= TLAN_NET_MASK_MASK7;
+ tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
+ tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
+
+ if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
+ (priv->aui)) {
+ status = MII_GS_LINK;
+ netdev_info(dev, "Link forced\n");
+ } else {
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ udelay(1000);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ if ((status & MII_GS_LINK) &&
+ /* We only support link info on Nat.Sem. PHY's */
+ (tlphy_id1 == NAT_SEM_ID1) &&
+ (tlphy_id2 == NAT_SEM_ID2)) {
+ tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner);
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par);
+
+ netdev_info(dev,
+ "Link active with %s %uMbps %s-Duplex\n",
+ !(tlphy_par & TLAN_PHY_AN_EN_STAT)
+ ? "forced" : "Autonegotiation enabled,",
+ tlphy_par & TLAN_PHY_SPEED_100
+ ? 100 : 10,
+ tlphy_par & TLAN_PHY_DUPLEX_FULL
+ ? "Full" : "Half");
+
+ if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
+ netdev_info(dev, "Partner capability:");
+ for (i = 5; i < 10; i++)
+ if (partner & (1 << i))
+ pr_cont(" %s", media[i-5]);
+ pr_cont("\n");
+ }
+
+ tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
+ TLAN_LED_LINK);
+#ifdef MONITOR
+ /* We have link beat..for now anyway */
+ priv->link = 1;
+ /*Enabling link beat monitoring */
+ tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT);
+#endif
+ } else if (status & MII_GS_LINK) {
+ netdev_info(dev, "Link active\n");
+ tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
+ TLAN_LED_LINK);
+ }
+ }
+
+ if (priv->phy_num == 0) {
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+ tlphy_ctl |= TLAN_TC_INTEN;
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
+ sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
+ sio |= TLAN_NET_SIO_MINTEN;
+ tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
+ }
+
+ if (status & MII_GS_LINK) {
+ tlan_set_mac(dev, 0, dev->dev_addr);
+ priv->phy_online = 1;
+ outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
+ if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
+ outb((TLAN_HC_REQ_INT >> 8),
+ dev->base_addr + TLAN_HOST_CMD + 1);
+ outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
+ outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
+ netif_carrier_on(dev);
+ } else {
+ netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
+ tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
+ return;
+ }
+ tlan_set_multicast_list(dev);
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_set_mac
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * on which to change the AREG.
+ * areg The AREG to set the address in (0 - 3).
+ * mac A pointer to an array of chars. Each
+ * element stores one byte of the address.
+ * IE, it isn't in ascii.
+ *
+ * This function transfers a MAC address to one of the
+ * TLAN AREGs (address registers). The TLAN chip locks
+ * the register on writing to offset 0 and unlocks the
+ * register after writing to offset 5. If NULL is passed
+ * in mac, then the AREG is filled with 0's.
+ *
+ **************************************************************/
+
+static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
+{
+ int i;
+
+ areg *= 6;
+
+ if (mac != NULL) {
+ for (i = 0; i < 6; i++)
+ tlan_dio_write8(dev->base_addr,
+ TLAN_AREG_0 + areg + i, mac[i]);
+ } else {
+ for (i = 0; i < 6; i++)
+ tlan_dio_write8(dev->base_addr,
+ TLAN_AREG_0 + areg + i, 0);
+ }
+
+}
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver PHY layer routines
+
+******************************************************************************
+*****************************************************************************/
+
+
+
+/*********************************************************************
+ * tlan_phy_print
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev A pointer to the device structure of the
+ * TLAN device having the PHYs to be detailed.
+ *
+ * This function prints the registers a PHY (aka transceiver).
+ *
+ ********************************************************************/
+
+static void tlan_phy_print(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 i, data0, data1, data2, data3, phy;
+
+ phy = priv->phy[priv->phy_num];
+
+ if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+ netdev_info(dev, "Unmanaged PHY\n");
+ } else if (phy <= TLAN_PHY_MAX_ADDR) {
+ netdev_info(dev, "PHY 0x%02x\n", phy);
+ pr_info(" Off. +0 +1 +2 +3\n");
+ for (i = 0; i < 0x20; i += 4) {
+ tlan_mii_read_reg(dev, phy, i, &data0);
+ tlan_mii_read_reg(dev, phy, i + 1, &data1);
+ tlan_mii_read_reg(dev, phy, i + 2, &data2);
+ tlan_mii_read_reg(dev, phy, i + 3, &data3);
+ pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n",
+ i, data0, data1, data2, data3);
+ }
+ } else {
+ netdev_info(dev, "Invalid PHY\n");
+ }
+
+}
+
+
+
+
+/*********************************************************************
+ * tlan_phy_detect
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev A pointer to the device structure of the adapter
+ * for which the PHY needs determined.
+ *
+ * So far I've found that adapters which have external PHYs
+ * may also use the internal PHY for part of the functionality.
+ * (eg, AUI/Thinnet). This function finds out if this TLAN
+ * chip has an internal PHY, and then finds the first external
+ * PHY (starting from address 0) if it exists).
+ *
+ ********************************************************************/
+
+static void tlan_phy_detect(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 control;
+ u16 hi;
+ u16 lo;
+ u32 phy;
+
+ if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+ priv->phy_num = 0xffff;
+ return;
+ }
+
+ tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
+
+ if (hi != 0xffff)
+ priv->phy[0] = TLAN_PHY_MAX_ADDR;
+ else
+ priv->phy[0] = TLAN_PHY_NONE;
+
+ priv->phy[1] = TLAN_PHY_NONE;
+ for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
+ tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
+ if ((control != 0xffff) ||
+ (hi != 0xffff) || (lo != 0xffff)) {
+ TLAN_DBG(TLAN_DEBUG_GNRL,
+ "PHY found at %02x %04x %04x %04x\n",
+ phy, control, hi, lo);
+ if ((priv->phy[1] == TLAN_PHY_NONE) &&
+ (phy != TLAN_PHY_MAX_ADDR)) {
+ priv->phy[1] = phy;
+ }
+ }
+ }
+
+ if (priv->phy[1] != TLAN_PHY_NONE)
+ priv->phy_num = 1;
+ else if (priv->phy[0] != TLAN_PHY_NONE)
+ priv->phy_num = 0;
+ else
+ netdev_info(dev, "Cannot initialize device, no PHY was found!\n");
+
+}
+
+
+
+
+static void tlan_phy_power_down(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 value;
+
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
+ value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
+ tlan_mii_sync(dev->base_addr);
+ tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+ if ((priv->phy_num == 0) &&
+ (priv->phy[1] != TLAN_PHY_NONE) &&
+ (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) {
+ tlan_mii_sync(dev->base_addr);
+ tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
+ }
+
+ /* Wait for 50 ms and powerup
+ * This is abitrary. It is intended to make sure the
+ * transceiver settles.
+ */
+ tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP);
+
+}
+
+
+
+
+static void tlan_phy_power_up(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 value;
+
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
+ tlan_mii_sync(dev->base_addr);
+ value = MII_GC_LOOPBK;
+ tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+ tlan_mii_sync(dev->base_addr);
+ /* Wait for 500 ms and reset the
+ * transceiver. The TLAN docs say both 50 ms and
+ * 500 ms, so do the longer, just in case.
+ */
+ tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET);
+
+}
+
+
+
+
+static void tlan_phy_reset(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 phy;
+ u16 value;
+
+ phy = priv->phy[priv->phy_num];
+
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name);
+ tlan_mii_sync(dev->base_addr);
+ value = MII_GC_LOOPBK | MII_GC_RESET;
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
+ tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
+ while (value & MII_GC_RESET)
+ tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
+
+ /* Wait for 500 ms and initialize.
+ * I don't remember why I wait this long.
+ * I've changed this to 50ms, as it seems long enough.
+ */
+ tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK);
+
+}
+
+
+
+
+static void tlan_phy_start_link(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 ability;
+ u16 control;
+ u16 data;
+ u16 phy;
+ u16 status;
+ u16 tctl;
+
+ phy = priv->phy[priv->phy_num];
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
+
+ if ((status & MII_GS_AUTONEG) &&
+ (!priv->aui)) {
+ ability = status >> 11;
+ if (priv->speed == TLAN_SPEED_10 &&
+ priv->duplex == TLAN_DUPLEX_HALF) {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
+ } else if (priv->speed == TLAN_SPEED_10 &&
+ priv->duplex == TLAN_DUPLEX_FULL) {
+ priv->tlan_full_duplex = true;
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
+ } else if (priv->speed == TLAN_SPEED_100 &&
+ priv->duplex == TLAN_DUPLEX_HALF) {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
+ } else if (priv->speed == TLAN_SPEED_100 &&
+ priv->duplex == TLAN_DUPLEX_FULL) {
+ priv->tlan_full_duplex = true;
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
+ } else {
+
+ /* Set Auto-Neg advertisement */
+ tlan_mii_write_reg(dev, phy, MII_AN_ADV,
+ (ability << 5) | 1);
+ /* Enablee Auto-Neg */
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
+ /* Restart Auto-Neg */
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
+ /* Wait for 4 sec for autonegotiation
+ * to complete. The max spec time is less than this
+ * but the card need additional time to start AN.
+ * .5 sec should be plenty extra.
+ */
+ netdev_info(dev, "Starting autonegotiation\n");
+ tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
+ return;
+ }
+
+ }
+
+ if ((priv->aui) && (priv->phy_num != 0)) {
+ priv->phy_num = 0;
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
+ | TLAN_NET_CFG_PHY_EN;
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
+ tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN);
+ return;
+ } else if (priv->phy_num == 0) {
+ control = 0;
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
+ if (priv->aui) {
+ tctl |= TLAN_TC_AUISEL;
+ } else {
+ tctl &= ~TLAN_TC_AUISEL;
+ if (priv->duplex == TLAN_DUPLEX_FULL) {
+ control |= MII_GC_DUPLEX;
+ priv->tlan_full_duplex = true;
+ }
+ if (priv->speed == TLAN_SPEED_100)
+ control |= MII_GC_SPEEDSEL;
+ }
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
+ }
+
+ /* Wait for 2 sec to give the transceiver time
+ * to establish link.
+ */
+ tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
+
+}
+
+
+
+
+static void tlan_phy_finish_auto_neg(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 an_adv;
+ u16 an_lpa;
+ u16 data;
+ u16 mode;
+ u16 phy;
+ u16 status;
+
+ phy = priv->phy[priv->phy_num];
+
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ udelay(1000);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+
+ if (!(status & MII_GS_AUTOCMPLT)) {
+ /* Wait for 8 sec to give the process
+ * more time. Perhaps we should fail after a while.
+ */
+ if (!priv->neg_be_verbose++) {
+ pr_info("Giving autonegotiation more time.\n");
+ pr_info("Please check that your adapter has\n");
+ pr_info("been properly connected to a HUB or Switch.\n");
+ pr_info("Trying to establish link in the background...\n");
+ }
+ tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN);
+ return;
+ }
+
+ netdev_info(dev, "Autonegotiation complete\n");
+ tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
+ tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
+ mode = an_adv & an_lpa & 0x03E0;
+ if (mode & 0x0100)
+ priv->tlan_full_duplex = true;
+ else if (!(mode & 0x0080) && (mode & 0x0040))
+ priv->tlan_full_duplex = true;
+
+ if ((!(mode & 0x0180)) &&
+ (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
+ (priv->phy_num != 0)) {
+ priv->phy_num = 0;
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
+ | TLAN_NET_CFG_PHY_EN;
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
+ tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
+ return;
+ }
+
+ if (priv->phy_num == 0) {
+ if ((priv->duplex == TLAN_DUPLEX_FULL) ||
+ (an_adv & an_lpa & 0x0040)) {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+ MII_GC_AUTOENB | MII_GC_DUPLEX);
+ netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n");
+ } else {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+ MII_GC_AUTOENB);
+ netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n");
+ }
+ }
+
+ /* Wait for 100 ms. No reason in partiticular.
+ */
+ tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET);
+
+}
+
+#ifdef MONITOR
+
+/*********************************************************************
+ *
+ * tlan_phy_monitor
+ *
+ * Returns:
+ * None
+ *
+ * Params:
+ * dev The device structure of this device.
+ *
+ *
+ * This function monitors PHY condition by reading the status
+ * register via the MII bus. This can be used to give info
+ * about link changes (up/down), and possible switch to alternate
+ * media.
+ *
+ *******************************************************************/
+
+void tlan_phy_monitor(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 phy;
+ u16 phy_status;
+
+ phy = priv->phy[priv->phy_num];
+
+ /* Get PHY status register */
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
+
+ /* Check if link has been lost */
+ if (!(phy_status & MII_GS_LINK)) {
+ if (priv->link) {
+ priv->link = 0;
+ printk(KERN_DEBUG "TLAN: %s has lost link\n",
+ dev->name);
+ netif_carrier_off(dev);
+ tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
+ return;
+ }
+ }
+
+ /* Link restablished? */
+ if ((phy_status & MII_GS_LINK) && !priv->link) {
+ priv->link = 1;
+ printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
+ dev->name);
+ netif_carrier_on(dev);
+ }
+
+ /* Setup a new monitor */
+ tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
+}
+
+#endif /* MONITOR */
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver MII routines
+
+these routines are based on the information in chap. 2 of the
+"ThunderLAN Programmer's Guide", pp. 15-24.
+
+******************************************************************************
+*****************************************************************************/
+
+
+/***************************************************************
+ * tlan_mii_read_reg
+ *
+ * Returns:
+ * false if ack received ok
+ * true if no ack received or other error
+ *
+ * Parms:
+ * dev The device structure containing
+ * The io address and interrupt count
+ * for this device.
+ * phy The address of the PHY to be queried.
+ * reg The register whose contents are to be
+ * retrieved.
+ * val A pointer to a variable to store the
+ * retrieved value.
+ *
+ * This function uses the TLAN's MII bus to retrieve the contents
+ * of a given register on a PHY. It sends the appropriate info
+ * and then reads the 16-bit register value from the MII bus via
+ * the TLAN SIO register.
+ *
+ **************************************************************/
+
+static bool
+tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
+{
+ u8 nack;
+ u16 sio, tmp;
+ u32 i;
+ bool err;
+ int minten;
+ struct tlan_priv *priv = netdev_priv(dev);
+ unsigned long flags = 0;
+
+ err = false;
+ outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
+ sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ if (!in_irq())
+ spin_lock_irqsave(&priv->lock, flags);
+
+ tlan_mii_sync(dev->base_addr);
+
+ minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+ if (minten)
+ tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
+
+ tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
+ tlan_mii_send_data(dev->base_addr, 0x2, 2); /* read (10b) */
+ tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
+ tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
+
+
+ tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio); /* change direction */
+
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* clock idle bit */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* wait 300ns */
+
+ nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio); /* check for ACK */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio); /* finish ACK */
+ if (nack) { /* no ACK, so fake it */
+ for (i = 0; i < 16; i++) {
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ }
+ tmp = 0xffff;
+ err = true;
+ } else { /* ACK, so read data */
+ for (tmp = 0, i = 0x8000; i; i >>= 1) {
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
+ tmp |= i;
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ }
+ }
+
+
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+
+ if (minten)
+ tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
+
+ *val = tmp;
+
+ if (!in_irq())
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return err;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_mii_send_data
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * base_port The base IO port of the adapter in
+ * question.
+ * dev The address of the PHY to be queried.
+ * data The value to be placed on the MII bus.
+ * num_bits The number of bits in data that are to
+ * be placed on the MII bus.
+ *
+ * This function sends on sequence of bits on the MII
+ * configuration bus.
+ *
+ **************************************************************/
+
+static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
+{
+ u16 sio;
+ u32 i;
+
+ if (num_bits == 0)
+ return;
+
+ outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
+ sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
+ tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
+
+ for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
+ if (data & i)
+ tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
+ else
+ tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * TLan_MiiSync
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * base_port The base IO port of the adapter in
+ * question.
+ *
+ * This functions syncs all PHYs in terms of the MII configuration
+ * bus.
+ *
+ **************************************************************/
+
+static void tlan_mii_sync(u16 base_port)
+{
+ int i;
+ u16 sio;
+
+ outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
+ sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
+ for (i = 0; i < 32; i++) {
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_mii_write_reg
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure for the device
+ * to write to.
+ * phy The address of the PHY to be written to.
+ * reg The register whose contents are to be
+ * written.
+ * val The value to be written to the register.
+ *
+ * This function uses the TLAN's MII bus to write the contents of a
+ * given register on a PHY. It sends the appropriate info and then
+ * writes the 16-bit register value from the MII configuration bus
+ * via the TLAN SIO register.
+ *
+ **************************************************************/
+
+static void
+tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
+{
+ u16 sio;
+ int minten;
+ unsigned long flags = 0;
+ struct tlan_priv *priv = netdev_priv(dev);
+
+ outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
+ sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ if (!in_irq())
+ spin_lock_irqsave(&priv->lock, flags);
+
+ tlan_mii_sync(dev->base_addr);
+
+ minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+ if (minten)
+ tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
+
+ tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
+ tlan_mii_send_data(dev->base_addr, 0x1, 2); /* write (01b) */
+ tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
+ tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
+
+ tlan_mii_send_data(dev->base_addr, 0x2, 2); /* send ACK */
+ tlan_mii_send_data(dev->base_addr, val, 16); /* send data */
+
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+
+ if (minten)
+ tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
+
+ if (!in_irq())
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+}
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver eeprom routines
+
+the Compaq netelligent 10 and 10/100 cards use a microchip 24C02A
+EEPROM. these functions are based on information in microchip's
+data sheet. I don't know how well this functions will work with
+other Eeproms.
+
+******************************************************************************
+*****************************************************************************/
+
+
+/***************************************************************
+ * tlan_ee_send_start
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ *
+ * This function sends a start cycle to an EEPROM attached
+ * to a TLAN chip.
+ *
+ **************************************************************/
+
+static void tlan_ee_send_start(u16 io_base)
+{
+ u16 sio;
+
+ outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
+ sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_ee_send_byte
+ *
+ * Returns:
+ * If the correct ack was received, 0, otherwise 1
+ * Parms: io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * data The 8 bits of information to
+ * send to the EEPROM.
+ * stop If TLAN_EEPROM_STOP is passed, a
+ * stop cycle is sent after the
+ * byte is sent after the ack is
+ * read.
+ *
+ * This function sends a byte on the serial EEPROM line,
+ * driving the clock to send each bit. The function then
+ * reverses transmission direction and reads an acknowledge
+ * bit.
+ *
+ **************************************************************/
+
+static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
+{
+ int err;
+ u8 place;
+ u16 sio;
+
+ outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
+ sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ /* Assume clock is low, tx is enabled; */
+ for (place = 0x80; place != 0; place >>= 1) {
+ if (place & data)
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+ else
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+ }
+ tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+
+ if ((!err) && stop) {
+ /* STOP, raise data while clock is high */
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+ }
+
+ return err;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_ee_receive_byte
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * data An address to a char to hold the
+ * data sent from the EEPROM.
+ * stop If TLAN_EEPROM_STOP is passed, a
+ * stop cycle is sent after the
+ * byte is received, and no ack is
+ * sent.
+ *
+ * This function receives 8 bits of data from the EEPROM
+ * over the serial link. It then sends and ack bit, or no
+ * ack and a stop bit. This function is used to retrieve
+ * data after the address of a byte in the EEPROM has been
+ * sent.
+ *
+ **************************************************************/
+
+static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
+{
+ u8 place;
+ u16 sio;
+
+ outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
+ sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
+ *data = 0;
+
+ /* Assume clock is low, tx is enabled; */
+ tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+ for (place = 0x80; place; place >>= 1) {
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
+ *data |= place;
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+ }
+
+ tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+ if (!stop) {
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); /* ack = 0 */
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+ } else {
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio); /* no ack = 1 (?) */
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+ /* STOP, raise data while clock is high */
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_ee_read_byte
+ *
+ * Returns:
+ * No error = 0, else, the stage at which the error
+ * occurred.
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * ee_addr The address of the byte in the
+ * EEPROM whose contents are to be
+ * retrieved.
+ * data An address to a char to hold the
+ * data obtained from the EEPROM.
+ *
+ * This function reads a byte of information from an byte
+ * cell in the EEPROM.
+ *
+ **************************************************************/
+
+static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
+{
+ int err;
+ struct tlan_priv *priv = netdev_priv(dev);
+ unsigned long flags = 0;
+ int ret = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ tlan_ee_send_start(dev->base_addr);
+ err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
+ if (err) {
+ ret = 1;
+ goto fail;
+ }
+ err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
+ if (err) {
+ ret = 2;
+ goto fail;
+ }
+ tlan_ee_send_start(dev->base_addr);
+ err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
+ if (err) {
+ ret = 3;
+ goto fail;
+ }
+ tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
+fail:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+
+}
+
+
+
diff --git a/drivers/net/ethernet/ti/tlan.h b/drivers/net/ethernet/ti/tlan.h
new file mode 100644
index 000000000000..5fc98a8e4889
--- /dev/null
+++ b/drivers/net/ethernet/ti/tlan.h
@@ -0,0 +1,546 @@
+#ifndef TLAN_H
+#define TLAN_H
+/********************************************************************
+ *
+ * Linux ThunderLAN Driver
+ *
+ * tlan.h
+ * by James Banks
+ *
+ * (C) 1997-1998 Caldera, Inc.
+ * (C) 1999-2001 Torben Mathiasen
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ *
+ * Dec 10, 1999 Torben Mathiasen <torben.mathiasen@compaq.com>
+ * New Maintainer
+ *
+ ********************************************************************/
+
+
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+
+
+ /*****************************************************************
+ * TLan Definitions
+ *
+ ****************************************************************/
+
+#define TLAN_MIN_FRAME_SIZE 64
+#define TLAN_MAX_FRAME_SIZE 1600
+
+#define TLAN_NUM_RX_LISTS 32
+#define TLAN_NUM_TX_LISTS 64
+
+#define TLAN_IGNORE 0
+#define TLAN_RECORD 1
+
+#define TLAN_DBG(lvl, format, args...) \
+ do { \
+ if (debug&lvl) \
+ printk(KERN_DEBUG "TLAN: " format, ##args); \
+ } while (0)
+
+#define TLAN_DEBUG_GNRL 0x0001
+#define TLAN_DEBUG_TX 0x0002
+#define TLAN_DEBUG_RX 0x0004
+#define TLAN_DEBUG_LIST 0x0008
+#define TLAN_DEBUG_PROBE 0x0010
+
+#define TX_TIMEOUT (10*HZ) /* We need time for auto-neg */
+#define MAX_TLAN_BOARDS 8 /* Max number of boards installed
+ at a time */
+
+
+ /*****************************************************************
+ * Device Identification Definitions
+ *
+ ****************************************************************/
+
+#define PCI_DEVICE_ID_NETELLIGENT_10_T2 0xB012
+#define PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100 0xB030
+#ifndef PCI_DEVICE_ID_OLICOM_OC2183
+#define PCI_DEVICE_ID_OLICOM_OC2183 0x0013
+#endif
+#ifndef PCI_DEVICE_ID_OLICOM_OC2325
+#define PCI_DEVICE_ID_OLICOM_OC2325 0x0012
+#endif
+#ifndef PCI_DEVICE_ID_OLICOM_OC2326
+#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
+#endif
+
+struct tlan_adapter_entry {
+ u16 vendor_id;
+ u16 device_id;
+ char *device_label;
+ u32 flags;
+ u16 addr_ofs;
+};
+
+#define TLAN_ADAPTER_NONE 0x00000000
+#define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001
+#define TLAN_ADAPTER_BIT_RATE_PHY 0x00000002
+#define TLAN_ADAPTER_USE_INTERN_10 0x00000004
+#define TLAN_ADAPTER_ACTIVITY_LED 0x00000008
+
+#define TLAN_SPEED_DEFAULT 0
+#define TLAN_SPEED_10 10
+#define TLAN_SPEED_100 100
+
+#define TLAN_DUPLEX_DEFAULT 0
+#define TLAN_DUPLEX_HALF 1
+#define TLAN_DUPLEX_FULL 2
+
+
+
+ /*****************************************************************
+ * EISA Definitions
+ *
+ ****************************************************************/
+
+#define EISA_ID 0xc80 /* EISA ID Registers */
+#define EISA_ID0 0xc80 /* EISA ID Register 0 */
+#define EISA_ID1 0xc81 /* EISA ID Register 1 */
+#define EISA_ID2 0xc82 /* EISA ID Register 2 */
+#define EISA_ID3 0xc83 /* EISA ID Register 3 */
+#define EISA_CR 0xc84 /* EISA Control Register */
+#define EISA_REG0 0xc88 /* EISA Configuration Register 0 */
+#define EISA_REG1 0xc89 /* EISA Configuration Register 1 */
+#define EISA_REG2 0xc8a /* EISA Configuration Register 2 */
+#define EISA_REG3 0xc8f /* EISA Configuration Register 3 */
+#define EISA_APROM 0xc90 /* Ethernet Address PROM */
+
+
+
+ /*****************************************************************
+ * Rx/Tx List Definitions
+ *
+ ****************************************************************/
+
+#define TLAN_BUFFERS_PER_LIST 10
+#define TLAN_LAST_BUFFER 0x80000000
+#define TLAN_CSTAT_UNUSED 0x8000
+#define TLAN_CSTAT_FRM_CMP 0x4000
+#define TLAN_CSTAT_READY 0x3000
+#define TLAN_CSTAT_EOC 0x0800
+#define TLAN_CSTAT_RX_ERROR 0x0400
+#define TLAN_CSTAT_PASS_CRC 0x0200
+#define TLAN_CSTAT_DP_PR 0x0100
+
+
+struct tlan_buffer {
+ u32 count;
+ u32 address;
+};
+
+
+struct tlan_list {
+ u32 forward;
+ u16 c_stat;
+ u16 frame_size;
+ struct tlan_buffer buffer[TLAN_BUFFERS_PER_LIST];
+};
+
+
+typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
+
+
+
+
+ /*****************************************************************
+ * PHY definitions
+ *
+ ****************************************************************/
+
+#define TLAN_PHY_MAX_ADDR 0x1F
+#define TLAN_PHY_NONE 0x20
+
+
+
+
+ /*****************************************************************
+ * TLAN Private Information Structure
+ *
+ ****************************************************************/
+
+struct tlan_priv {
+ struct net_device *next_device;
+ struct pci_dev *pci_dev;
+ struct net_device *dev;
+ void *dma_storage;
+ dma_addr_t dma_storage_dma;
+ unsigned int dma_size;
+ u8 *pad_buffer;
+ struct tlan_list *rx_list;
+ dma_addr_t rx_list_dma;
+ u8 *rx_buffer;
+ dma_addr_t rx_buffer_dma;
+ u32 rx_head;
+ u32 rx_tail;
+ u32 rx_eoc_count;
+ struct tlan_list *tx_list;
+ dma_addr_t tx_list_dma;
+ u8 *tx_buffer;
+ dma_addr_t tx_buffer_dma;
+ u32 tx_head;
+ u32 tx_in_progress;
+ u32 tx_tail;
+ u32 tx_busy_count;
+ u32 phy_online;
+ u32 timer_set_at;
+ u32 timer_type;
+ struct timer_list timer;
+ struct board *adapter;
+ u32 adapter_rev;
+ u32 aui;
+ u32 debug;
+ u32 duplex;
+ u32 phy[2];
+ u32 phy_num;
+ u32 speed;
+ u8 tlan_rev;
+ u8 tlan_full_duplex;
+ spinlock_t lock;
+ u8 link;
+ u8 is_eisa;
+ struct work_struct tlan_tqueue;
+ u8 neg_be_verbose;
+};
+
+
+
+
+ /*****************************************************************
+ * TLan Driver Timer Definitions
+ *
+ ****************************************************************/
+
+#define TLAN_TIMER_LINK_BEAT 1
+#define TLAN_TIMER_ACTIVITY 2
+#define TLAN_TIMER_PHY_PDOWN 3
+#define TLAN_TIMER_PHY_PUP 4
+#define TLAN_TIMER_PHY_RESET 5
+#define TLAN_TIMER_PHY_START_LINK 6
+#define TLAN_TIMER_PHY_FINISH_AN 7
+#define TLAN_TIMER_FINISH_RESET 8
+
+#define TLAN_TIMER_ACT_DELAY (HZ/10)
+
+
+
+
+ /*****************************************************************
+ * TLan Driver Eeprom Definitions
+ *
+ ****************************************************************/
+
+#define TLAN_EEPROM_ACK 0
+#define TLAN_EEPROM_STOP 1
+
+
+
+
+ /*****************************************************************
+ * Host Register Offsets and Contents
+ *
+ ****************************************************************/
+
+#define TLAN_HOST_CMD 0x00
+#define TLAN_HC_GO 0x80000000
+#define TLAN_HC_STOP 0x40000000
+#define TLAN_HC_ACK 0x20000000
+#define TLAN_HC_CS_MASK 0x1FE00000
+#define TLAN_HC_EOC 0x00100000
+#define TLAN_HC_RT 0x00080000
+#define TLAN_HC_NES 0x00040000
+#define TLAN_HC_AD_RST 0x00008000
+#define TLAN_HC_LD_TMR 0x00004000
+#define TLAN_HC_LD_THR 0x00002000
+#define TLAN_HC_REQ_INT 0x00001000
+#define TLAN_HC_INT_OFF 0x00000800
+#define TLAN_HC_INT_ON 0x00000400
+#define TLAN_HC_AC_MASK 0x000000FF
+#define TLAN_CH_PARM 0x04
+#define TLAN_DIO_ADR 0x08
+#define TLAN_DA_ADR_INC 0x8000
+#define TLAN_DA_RAM_ADR 0x4000
+#define TLAN_HOST_INT 0x0A
+#define TLAN_HI_IV_MASK 0x1FE0
+#define TLAN_HI_IT_MASK 0x001C
+#define TLAN_DIO_DATA 0x0C
+
+
+/* ThunderLAN Internal Register DIO Offsets */
+
+#define TLAN_NET_CMD 0x00
+#define TLAN_NET_CMD_NRESET 0x80
+#define TLAN_NET_CMD_NWRAP 0x40
+#define TLAN_NET_CMD_CSF 0x20
+#define TLAN_NET_CMD_CAF 0x10
+#define TLAN_NET_CMD_NOBRX 0x08
+#define TLAN_NET_CMD_DUPLEX 0x04
+#define TLAN_NET_CMD_TRFRAM 0x02
+#define TLAN_NET_CMD_TXPACE 0x01
+#define TLAN_NET_SIO 0x01
+#define TLAN_NET_SIO_MINTEN 0x80
+#define TLAN_NET_SIO_ECLOK 0x40
+#define TLAN_NET_SIO_ETXEN 0x20
+#define TLAN_NET_SIO_EDATA 0x10
+#define TLAN_NET_SIO_NMRST 0x08
+#define TLAN_NET_SIO_MCLK 0x04
+#define TLAN_NET_SIO_MTXEN 0x02
+#define TLAN_NET_SIO_MDATA 0x01
+#define TLAN_NET_STS 0x02
+#define TLAN_NET_STS_MIRQ 0x80
+#define TLAN_NET_STS_HBEAT 0x40
+#define TLAN_NET_STS_TXSTOP 0x20
+#define TLAN_NET_STS_RXSTOP 0x10
+#define TLAN_NET_STS_RSRVD 0x0F
+#define TLAN_NET_MASK 0x03
+#define TLAN_NET_MASK_MASK7 0x80
+#define TLAN_NET_MASK_MASK6 0x40
+#define TLAN_NET_MASK_MASK5 0x20
+#define TLAN_NET_MASK_MASK4 0x10
+#define TLAN_NET_MASK_RSRVD 0x0F
+#define TLAN_NET_CONFIG 0x04
+#define TLAN_NET_CFG_RCLK 0x8000
+#define TLAN_NET_CFG_TCLK 0x4000
+#define TLAN_NET_CFG_BIT 0x2000
+#define TLAN_NET_CFG_RXCRC 0x1000
+#define TLAN_NET_CFG_PEF 0x0800
+#define TLAN_NET_CFG_1FRAG 0x0400
+#define TLAN_NET_CFG_1CHAN 0x0200
+#define TLAN_NET_CFG_MTEST 0x0100
+#define TLAN_NET_CFG_PHY_EN 0x0080
+#define TLAN_NET_CFG_MSMASK 0x007F
+#define TLAN_MAN_TEST 0x06
+#define TLAN_DEF_VENDOR_ID 0x08
+#define TLAN_DEF_DEVICE_ID 0x0A
+#define TLAN_DEF_REVISION 0x0C
+#define TLAN_DEF_SUBCLASS 0x0D
+#define TLAN_DEF_MIN_LAT 0x0E
+#define TLAN_DEF_MAX_LAT 0x0F
+#define TLAN_AREG_0 0x10
+#define TLAN_AREG_1 0x16
+#define TLAN_AREG_2 0x1C
+#define TLAN_AREG_3 0x22
+#define TLAN_HASH_1 0x28
+#define TLAN_HASH_2 0x2C
+#define TLAN_GOOD_TX_FRMS 0x30
+#define TLAN_TX_UNDERUNS 0x33
+#define TLAN_GOOD_RX_FRMS 0x34
+#define TLAN_RX_OVERRUNS 0x37
+#define TLAN_DEFERRED_TX 0x38
+#define TLAN_CRC_ERRORS 0x3A
+#define TLAN_CODE_ERRORS 0x3B
+#define TLAN_MULTICOL_FRMS 0x3C
+#define TLAN_SINGLECOL_FRMS 0x3E
+#define TLAN_EXCESSCOL_FRMS 0x40
+#define TLAN_LATE_COLS 0x41
+#define TLAN_CARRIER_LOSS 0x42
+#define TLAN_ACOMMIT 0x43
+#define TLAN_LED_REG 0x44
+#define TLAN_LED_ACT 0x10
+#define TLAN_LED_LINK 0x01
+#define TLAN_BSIZE_REG 0x45
+#define TLAN_MAX_RX 0x46
+#define TLAN_INT_DIS 0x48
+#define TLAN_ID_TX_EOC 0x04
+#define TLAN_ID_RX_EOF 0x02
+#define TLAN_ID_RX_EOC 0x01
+
+
+
+/* ThunderLAN Interrupt Codes */
+
+#define TLAN_INT_NUMBER_OF_INTS 8
+
+#define TLAN_INT_NONE 0x0000
+#define TLAN_INT_TX_EOF 0x0001
+#define TLAN_INT_STAT_OVERFLOW 0x0002
+#define TLAN_INT_RX_EOF 0x0003
+#define TLAN_INT_DUMMY 0x0004
+#define TLAN_INT_TX_EOC 0x0005
+#define TLAN_INT_STATUS_CHECK 0x0006
+#define TLAN_INT_RX_EOC 0x0007
+
+
+
+/* ThunderLAN MII Registers */
+
+/* Generic MII/PHY Registers */
+
+#define MII_GEN_CTL 0x00
+#define MII_GC_RESET 0x8000
+#define MII_GC_LOOPBK 0x4000
+#define MII_GC_SPEEDSEL 0x2000
+#define MII_GC_AUTOENB 0x1000
+#define MII_GC_PDOWN 0x0800
+#define MII_GC_ISOLATE 0x0400
+#define MII_GC_AUTORSRT 0x0200
+#define MII_GC_DUPLEX 0x0100
+#define MII_GC_COLTEST 0x0080
+#define MII_GC_RESERVED 0x007F
+#define MII_GEN_STS 0x01
+#define MII_GS_100BT4 0x8000
+#define MII_GS_100BTXFD 0x4000
+#define MII_GS_100BTXHD 0x2000
+#define MII_GS_10BTFD 0x1000
+#define MII_GS_10BTHD 0x0800
+#define MII_GS_RESERVED 0x07C0
+#define MII_GS_AUTOCMPLT 0x0020
+#define MII_GS_RFLT 0x0010
+#define MII_GS_AUTONEG 0x0008
+#define MII_GS_LINK 0x0004
+#define MII_GS_JABBER 0x0002
+#define MII_GS_EXTCAP 0x0001
+#define MII_GEN_ID_HI 0x02
+#define MII_GEN_ID_LO 0x03
+#define MII_GIL_OUI 0xFC00
+#define MII_GIL_MODEL 0x03F0
+#define MII_GIL_REVISION 0x000F
+#define MII_AN_ADV 0x04
+#define MII_AN_LPA 0x05
+#define MII_AN_EXP 0x06
+
+/* ThunderLAN Specific MII/PHY Registers */
+
+#define TLAN_TLPHY_ID 0x10
+#define TLAN_TLPHY_CTL 0x11
+#define TLAN_TC_IGLINK 0x8000
+#define TLAN_TC_SWAPOL 0x4000
+#define TLAN_TC_AUISEL 0x2000
+#define TLAN_TC_SQEEN 0x1000
+#define TLAN_TC_MTEST 0x0800
+#define TLAN_TC_RESERVED 0x07F8
+#define TLAN_TC_NFEW 0x0004
+#define TLAN_TC_INTEN 0x0002
+#define TLAN_TC_TINT 0x0001
+#define TLAN_TLPHY_STS 0x12
+#define TLAN_TS_MINT 0x8000
+#define TLAN_TS_PHOK 0x4000
+#define TLAN_TS_POLOK 0x2000
+#define TLAN_TS_TPENERGY 0x1000
+#define TLAN_TS_RESERVED 0x0FFF
+#define TLAN_TLPHY_PAR 0x19
+#define TLAN_PHY_CIM_STAT 0x0020
+#define TLAN_PHY_SPEED_100 0x0040
+#define TLAN_PHY_DUPLEX_FULL 0x0080
+#define TLAN_PHY_AN_EN_STAT 0x0400
+
+/* National Sem. & Level1 PHY id's */
+#define NAT_SEM_ID1 0x2000
+#define NAT_SEM_ID2 0x5C01
+#define LEVEL1_ID1 0x7810
+#define LEVEL1_ID2 0x0000
+
+#define CIRC_INC(a, b) if (++a >= b) a = 0
+
+/* Routines to access internal registers. */
+
+static inline u8 tlan_dio_read8(u16 base_addr, u16 internal_addr)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3));
+
+}
+
+
+
+
+static inline u16 tlan_dio_read16(u16 base_addr, u16 internal_addr)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2));
+
+}
+
+
+
+
+static inline u32 tlan_dio_read32(u16 base_addr, u16 internal_addr)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ return inl(base_addr + TLAN_DIO_DATA);
+
+}
+
+
+
+
+static inline void tlan_dio_write8(u16 base_addr, u16 internal_addr, u8 data)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3));
+
+}
+
+
+
+
+static inline void tlan_dio_write16(u16 base_addr, u16 internal_addr, u16 data)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
+
+}
+
+
+
+
+static inline void tlan_dio_write32(u16 base_addr, u16 internal_addr, u32 data)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
+
+}
+
+#define tlan_clear_bit(bit, port) outb_p(inb_p(port) & ~bit, port)
+#define tlan_get_bit(bit, port) ((int) (inb_p(port) & bit))
+#define tlan_set_bit(bit, port) outb_p(inb_p(port) | bit, port)
+
+/*
+ * given 6 bytes, view them as 8 6-bit numbers and return the XOR of those
+ * the code below is about seven times as fast as the original code
+ *
+ * The original code was:
+ *
+ * u32 xor(u32 a, u32 b) { return ((a && !b ) || (! a && b )); }
+ *
+ * #define XOR8(a, b, c, d, e, f, g, h) \
+ * xor(a, xor(b, xor(c, xor(d, xor(e, xor(f, xor(g, h)) ) ) ) ) )
+ * #define DA(a, bit) (( (u8) a[bit/8] ) & ( (u8) (1 << bit%8)) )
+ *
+ * hash = XOR8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
+ * DA(a,30), DA(a,36), DA(a,42));
+ * hash |= XOR8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
+ * DA(a,31), DA(a,37), DA(a,43)) << 1;
+ * hash |= XOR8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
+ * DA(a,32), DA(a,38), DA(a,44)) << 2;
+ * hash |= XOR8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
+ * DA(a,33), DA(a,39), DA(a,45)) << 3;
+ * hash |= XOR8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
+ * DA(a,34), DA(a,40), DA(a,46)) << 4;
+ * hash |= XOR8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
+ * DA(a,35), DA(a,41), DA(a,47)) << 5;
+ *
+ */
+static inline u32 tlan_hash_func(const u8 *a)
+{
+ u8 hash;
+
+ hash = (a[0]^a[3]); /* & 077 */
+ hash ^= ((a[0]^a[3])>>6); /* & 003 */
+ hash ^= ((a[1]^a[4])<<2); /* & 074 */
+ hash ^= ((a[1]^a[4])>>4); /* & 017 */
+ hash ^= ((a[2]^a[5])<<4); /* & 060 */
+ hash ^= ((a[2]^a[5])>>2); /* & 077 */
+
+ return hash & 077;
+}
+#endif
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
new file mode 100644
index 000000000000..2d9218f86bca
--- /dev/null
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -0,0 +1,15 @@
+#
+# Tilera network device configuration
+#
+
+config TILE_NET
+ tristate "Tilera GBE/XGBE network driver support"
+ depends on TILE
+ default y
+ select CRC32
+ ---help---
+ This is a standard Linux network device driver for the
+ on-chip Tilera Gigabit Ethernet and XAUI interfaces.
+
+ To compile this driver as a module, choose M here: the module
+ will be called tile_net.
diff --git a/drivers/net/ethernet/tile/Makefile b/drivers/net/ethernet/tile/Makefile
new file mode 100644
index 000000000000..f634f142cab4
--- /dev/null
+++ b/drivers/net/ethernet/tile/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the TILE on-chip networking support.
+#
+
+obj-$(CONFIG_TILE_NET) += tile_net.o
+ifdef CONFIG_TILEGX
+tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o
+else
+tile_net-objs := tilepro.o
+endif
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
new file mode 100644
index 000000000000..1e2af96fc29c
--- /dev/null
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -0,0 +1,2465 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/slab.h> /* kmalloc() */
+#include <linux/errno.h> /* error codes */
+#include <linux/types.h> /* size_t */
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/netdevice.h> /* struct device, and other headers */
+#include <linux/etherdevice.h> /* eth_type_trans */
+#include <linux/skbuff.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/hugetlb.h>
+#include <linux/in6.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <asm/checksum.h>
+#include <asm/homecache.h>
+
+#include <hv/drv_xgbe_intf.h>
+#include <hv/drv_xgbe_impl.h>
+#include <hv/hypervisor.h>
+#include <hv/netio_intf.h>
+
+/* For TSO */
+#include <linux/ip.h>
+#include <linux/tcp.h>
+
+
+/*
+ * First, "tile_net_init_module()" initializes all four "devices" which
+ * can be used by linux.
+ *
+ * Then, "ifconfig DEVICE up" calls "tile_net_open()", which analyzes
+ * the network cpus, then uses "tile_net_open_aux()" to initialize
+ * LIPP/LEPP, and then uses "tile_net_open_inner()" to register all
+ * the tiles, provide buffers to LIPP, allow ingress to start, and
+ * turn on hypervisor interrupt handling (and NAPI) on all tiles.
+ *
+ * If registration fails due to the link being down, then "retry_work"
+ * is used to keep calling "tile_net_open_inner()" until it succeeds.
+ *
+ * If "ifconfig DEVICE down" is called, it uses "tile_net_stop()" to
+ * stop egress, drain the LIPP buffers, unregister all the tiles, stop
+ * LIPP/LEPP, and wipe the LEPP queue.
+ *
+ * We start out with the ingress interrupt enabled on each CPU. When
+ * this interrupt fires, we disable it, and call "napi_schedule()".
+ * This will cause "tile_net_poll()" to be called, which will pull
+ * packets from the netio queue, filtering them out, or passing them
+ * to "netif_receive_skb()". If our budget is exhausted, we will
+ * return, knowing we will be called again later. Otherwise, we
+ * reenable the ingress interrupt, and call "napi_complete()".
+ *
+ * HACK: Since disabling the ingress interrupt is not reliable, we
+ * ignore the interrupt if the global "active" flag is false.
+ *
+ *
+ * NOTE: The use of "native_driver" ensures that EPP exists, and that
+ * we are using "LIPP" and "LEPP".
+ *
+ * NOTE: Failing to free completions for an arbitrarily long time
+ * (which is defined to be illegal) does in fact cause bizarre
+ * problems. The "egress_timer" helps prevent this from happening.
+ */
+
+
+/* HACK: Allow use of "jumbo" packets. */
+/* This should be 1500 if "jumbo" is not set in LIPP. */
+/* This should be at most 10226 (10240 - 14) if "jumbo" is set in LIPP. */
+/* ISSUE: This has not been thoroughly tested (except at 1500). */
+#define TILE_NET_MTU 1500
+
+/* HACK: Define to support GSO. */
+/* ISSUE: This may actually hurt performance of the TCP blaster. */
+/* #define TILE_NET_GSO */
+
+/* Define this to collapse "duplicate" acks. */
+/* #define IGNORE_DUP_ACKS */
+
+/* HACK: Define this to verify incoming packets. */
+/* #define TILE_NET_VERIFY_INGRESS */
+
+/* Use 3000 to enable the Linux Traffic Control (QoS) layer, else 0. */
+#define TILE_NET_TX_QUEUE_LEN 0
+
+/* Define to dump packets (prints out the whole packet on tx and rx). */
+/* #define TILE_NET_DUMP_PACKETS */
+
+/* Define to enable debug spew (all PDEBUG's are enabled). */
+/* #define TILE_NET_DEBUG */
+
+
+/* Define to activate paranoia checks. */
+/* #define TILE_NET_PARANOIA */
+
+/* Default transmit lockup timeout period, in jiffies. */
+#define TILE_NET_TIMEOUT (5 * HZ)
+
+/* Default retry interval for bringing up the NetIO interface, in jiffies. */
+#define TILE_NET_RETRY_INTERVAL (5 * HZ)
+
+/* Number of ports (xgbe0, xgbe1, gbe0, gbe1). */
+#define TILE_NET_DEVS 4
+
+
+
+/* Paranoia. */
+#if NET_IP_ALIGN != LIPP_PACKET_PADDING
+#error "NET_IP_ALIGN must match LIPP_PACKET_PADDING."
+#endif
+
+
+/* Debug print. */
+#ifdef TILE_NET_DEBUG
+#define PDEBUG(fmt, args...) net_printk(fmt, ## args)
+#else
+#define PDEBUG(fmt, args...)
+#endif
+
+
+MODULE_AUTHOR("Tilera");
+MODULE_LICENSE("GPL");
+
+
+/*
+ * Queue of incoming packets for a specific cpu and device.
+ *
+ * Includes a pointer to the "system" data, and the actual "user" data.
+ */
+struct tile_netio_queue {
+ netio_queue_impl_t *__system_part;
+ netio_queue_user_impl_t __user_part;
+
+};
+
+
+/*
+ * Statistics counters for a specific cpu and device.
+ */
+struct tile_net_stats_t {
+ u32 rx_packets;
+ u32 rx_bytes;
+ u32 tx_packets;
+ u32 tx_bytes;
+};
+
+
+/*
+ * Info for a specific cpu and device.
+ *
+ * ISSUE: There is a "dev" pointer in "napi" as well.
+ */
+struct tile_net_cpu {
+ /* The NAPI struct. */
+ struct napi_struct napi;
+ /* Packet queue. */
+ struct tile_netio_queue queue;
+ /* Statistics. */
+ struct tile_net_stats_t stats;
+ /* True iff NAPI is enabled. */
+ bool napi_enabled;
+ /* True if this tile has succcessfully registered with the IPP. */
+ bool registered;
+ /* True if the link was down last time we tried to register. */
+ bool link_down;
+ /* True if "egress_timer" is scheduled. */
+ bool egress_timer_scheduled;
+ /* Number of small sk_buffs which must still be provided. */
+ unsigned int num_needed_small_buffers;
+ /* Number of large sk_buffs which must still be provided. */
+ unsigned int num_needed_large_buffers;
+ /* A timer for handling egress completions. */
+ struct timer_list egress_timer;
+};
+
+
+/*
+ * Info for a specific device.
+ */
+struct tile_net_priv {
+ /* Our network device. */
+ struct net_device *dev;
+ /* Pages making up the egress queue. */
+ struct page *eq_pages;
+ /* Address of the actual egress queue. */
+ lepp_queue_t *eq;
+ /* Protects "eq". */
+ spinlock_t eq_lock;
+ /* The hypervisor handle for this interface. */
+ int hv_devhdl;
+ /* The intr bit mask that IDs this device. */
+ u32 intr_id;
+ /* True iff "tile_net_open_aux()" has succeeded. */
+ bool partly_opened;
+ /* True iff the device is "active". */
+ bool active;
+ /* Effective network cpus. */
+ struct cpumask network_cpus_map;
+ /* Number of network cpus. */
+ int network_cpus_count;
+ /* Credits per network cpu. */
+ int network_cpus_credits;
+ /* Network stats. */
+ struct net_device_stats stats;
+ /* For NetIO bringup retries. */
+ struct delayed_work retry_work;
+ /* Quick access to per cpu data. */
+ struct tile_net_cpu *cpu[NR_CPUS];
+};
+
+/* Log2 of the number of small pages needed for the egress queue. */
+#define EQ_ORDER get_order(sizeof(lepp_queue_t))
+/* Size of the egress queue's pages. */
+#define EQ_SIZE (1 << (PAGE_SHIFT + EQ_ORDER))
+
+/*
+ * The actual devices (xgbe0, xgbe1, gbe0, gbe1).
+ */
+static struct net_device *tile_net_devs[TILE_NET_DEVS];
+
+/*
+ * The "tile_net_cpu" structures for each device.
+ */
+static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe0);
+static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe1);
+static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe0);
+static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe1);
+
+
+/*
+ * True if "network_cpus" was specified.
+ */
+static bool network_cpus_used;
+
+/*
+ * The actual cpus in "network_cpus".
+ */
+static struct cpumask network_cpus_map;
+
+
+
+#ifdef TILE_NET_DEBUG
+/*
+ * printk with extra stuff.
+ *
+ * We print the CPU we're running in brackets.
+ */
+static void net_printk(char *fmt, ...)
+{
+ int i;
+ int len;
+ va_list args;
+ static char buf[256];
+
+ len = sprintf(buf, "tile_net[%2.2d]: ", smp_processor_id());
+ va_start(args, fmt);
+ i = vscnprintf(buf + len, sizeof(buf) - len - 1, fmt, args);
+ va_end(args);
+ buf[255] = '\0';
+ pr_notice(buf);
+}
+#endif
+
+
+#ifdef TILE_NET_DUMP_PACKETS
+/*
+ * Dump a packet.
+ */
+static void dump_packet(unsigned char *data, unsigned long length, char *s)
+{
+ int my_cpu = smp_processor_id();
+
+ unsigned long i;
+ char buf[128];
+
+ static unsigned int count;
+
+ pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n",
+ data, length, s, count++);
+
+ pr_info("\n");
+
+ for (i = 0; i < length; i++) {
+ if ((i & 0xf) == 0)
+ sprintf(buf, "[%02d] %8.8lx:", my_cpu, i);
+ sprintf(buf + strlen(buf), " %2.2x", data[i]);
+ if ((i & 0xf) == 0xf || i == length - 1) {
+ strcat(buf, "\n");
+ pr_info("%s", buf);
+ }
+ }
+}
+#endif
+
+
+/*
+ * Provide support for the __netio_fastio1() swint
+ * (see <hv/drv_xgbe_intf.h> for how it is used).
+ *
+ * The fastio swint2 call may clobber all the caller-saved registers.
+ * It rarely clobbers memory, but we allow for the possibility in
+ * the signature just to be on the safe side.
+ *
+ * Also, gcc doesn't seem to allow an input operand to be
+ * clobbered, so we fake it with dummy outputs.
+ *
+ * This function can't be static because of the way it is declared
+ * in the netio header.
+ */
+inline int __netio_fastio1(u32 fastio_index, u32 arg0)
+{
+ long result, clobber_r1, clobber_r10;
+ asm volatile("swint2"
+ : "=R00" (result),
+ "=R01" (clobber_r1), "=R10" (clobber_r10)
+ : "R10" (fastio_index), "R01" (arg0)
+ : "memory", "r2", "r3", "r4",
+ "r5", "r6", "r7", "r8", "r9",
+ "r11", "r12", "r13", "r14",
+ "r15", "r16", "r17", "r18", "r19",
+ "r20", "r21", "r22", "r23", "r24",
+ "r25", "r26", "r27", "r28", "r29");
+ return result;
+}
+
+
+/*
+ * Provide a linux buffer to LIPP.
+ */
+static void tile_net_provide_linux_buffer(struct tile_net_cpu *info,
+ void *va, bool small)
+{
+ struct tile_netio_queue *queue = &info->queue;
+
+ /* Convert "va" and "small" to "linux_buffer_t". */
+ unsigned int buffer = ((unsigned int)(__pa(va) >> 7) << 1) + small;
+
+ __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer);
+}
+
+
+/*
+ * Provide a linux buffer for LIPP.
+ *
+ * Note that the ACTUAL allocation for each buffer is a "struct sk_buff",
+ * plus a chunk of memory that includes not only the requested bytes, but
+ * also NET_SKB_PAD bytes of initial padding, and a "struct skb_shared_info".
+ *
+ * Note that "struct skb_shared_info" is 88 bytes with 64K pages and
+ * 268 bytes with 4K pages (since the frags[] array needs 18 entries).
+ *
+ * Without jumbo packets, the maximum packet size will be 1536 bytes,
+ * and we use 2 bytes (NET_IP_ALIGN) of padding. ISSUE: If we told
+ * the hardware to clip at 1518 bytes instead of 1536 bytes, then we
+ * could save an entire cache line, but in practice, we don't need it.
+ *
+ * Since CPAs are 38 bits, and we can only encode the high 31 bits in
+ * a "linux_buffer_t", the low 7 bits must be zero, and thus, we must
+ * align the actual "va" mod 128.
+ *
+ * We assume that the underlying "head" will be aligned mod 64. Note
+ * that in practice, we have seen "head" NOT aligned mod 128 even when
+ * using 2048 byte allocations, which is surprising.
+ *
+ * If "head" WAS always aligned mod 128, we could change LIPP to
+ * assume that the low SIX bits are zero, and the 7th bit is one, that
+ * is, align the actual "va" mod 128 plus 64, which would be "free".
+ *
+ * For now, the actual "head" pointer points at NET_SKB_PAD bytes of
+ * padding, plus 28 or 92 bytes of extra padding, plus the sk_buff
+ * pointer, plus the NET_IP_ALIGN padding, plus 126 or 1536 bytes for
+ * the actual packet, plus 62 bytes of empty padding, plus some
+ * padding and the "struct skb_shared_info".
+ *
+ * With 64K pages, a large buffer thus needs 32+92+4+2+1536+62+88
+ * bytes, or 1816 bytes, which fits comfortably into 2048 bytes.
+ *
+ * With 64K pages, a small buffer thus needs 32+92+4+2+126+88
+ * bytes, or 344 bytes, which means we are wasting 64+ bytes, and
+ * could presumably increase the size of small buffers.
+ *
+ * With 4K pages, a large buffer thus needs 32+92+4+2+1536+62+268
+ * bytes, or 1996 bytes, which fits comfortably into 2048 bytes.
+ *
+ * With 4K pages, a small buffer thus needs 32+92+4+2+126+268
+ * bytes, or 524 bytes, which is annoyingly wasteful.
+ *
+ * Maybe we should increase LIPP_SMALL_PACKET_SIZE to 192?
+ *
+ * ISSUE: Maybe we should increase "NET_SKB_PAD" to 64?
+ */
+static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info,
+ bool small)
+{
+#if TILE_NET_MTU <= 1536
+ /* Without "jumbo", 2 + 1536 should be sufficient. */
+ unsigned int large_size = NET_IP_ALIGN + 1536;
+#else
+ /* ISSUE: This has not been tested. */
+ unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100;
+#endif
+
+ /* Avoid "false sharing" with last cache line. */
+ /* ISSUE: This is already done by "dev_alloc_skb()". */
+ unsigned int len =
+ (((small ? LIPP_SMALL_PACKET_SIZE : large_size) +
+ CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE());
+
+ unsigned int padding = 128 - NET_SKB_PAD;
+ unsigned int align;
+
+ struct sk_buff *skb;
+ void *va;
+
+ struct sk_buff **skb_ptr;
+
+ /* Request 96 extra bytes for alignment purposes. */
+ skb = dev_alloc_skb(len + padding);
+ if (skb == NULL)
+ return false;
+
+ /* Skip 32 or 96 bytes to align "data" mod 128. */
+ align = -(long)skb->data & (128 - 1);
+ BUG_ON(align > padding);
+ skb_reserve(skb, align);
+
+ /* This address is given to IPP. */
+ va = skb->data;
+
+ /* Buffers must not span a huge page. */
+ BUG_ON(((((long)va & ~HPAGE_MASK) + len) & HPAGE_MASK) != 0);
+
+#ifdef TILE_NET_PARANOIA
+#if CHIP_HAS_CBOX_HOME_MAP()
+ if (hash_default) {
+ HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va);
+ if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
+ panic("Non-HFH ingress buffer! VA=%p Mode=%d PTE=%llx",
+ va, hv_pte_get_mode(pte), hv_pte_val(pte));
+ }
+#endif
+#endif
+
+ /* Invalidate the packet buffer. */
+ if (!hash_default)
+ __inv_buffer(va, len);
+
+ /* Skip two bytes to satisfy LIPP assumptions. */
+ /* Note that this aligns IP on a 16 byte boundary. */
+ /* ISSUE: Do this when the packet arrives? */
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ /* Save a back-pointer to 'skb'. */
+ skb_ptr = va - sizeof(*skb_ptr);
+ *skb_ptr = skb;
+
+ /* Make sure "skb_ptr" has been flushed. */
+ __insn_mf();
+
+ /* Provide the new buffer. */
+ tile_net_provide_linux_buffer(info, va, small);
+
+ return true;
+}
+
+
+/*
+ * Provide linux buffers for LIPP.
+ */
+static void tile_net_provide_needed_buffers(struct tile_net_cpu *info)
+{
+ while (info->num_needed_small_buffers != 0) {
+ if (!tile_net_provide_needed_buffer(info, true))
+ goto oops;
+ info->num_needed_small_buffers--;
+ }
+
+ while (info->num_needed_large_buffers != 0) {
+ if (!tile_net_provide_needed_buffer(info, false))
+ goto oops;
+ info->num_needed_large_buffers--;
+ }
+
+ return;
+
+oops:
+
+ /* Add a description to the page allocation failure dump. */
+ pr_notice("Could not provide a linux buffer to LIPP.\n");
+}
+
+
+/*
+ * Grab some LEPP completions, and store them in "comps", of size
+ * "comps_size", and return the number of completions which were
+ * stored, so the caller can free them.
+ */
+static unsigned int tile_net_lepp_grab_comps(lepp_queue_t *eq,
+ struct sk_buff *comps[],
+ unsigned int comps_size,
+ unsigned int min_size)
+{
+ unsigned int n = 0;
+
+ unsigned int comp_head = eq->comp_head;
+ unsigned int comp_busy = eq->comp_busy;
+
+ while (comp_head != comp_busy && n < comps_size) {
+ comps[n++] = eq->comps[comp_head];
+ LEPP_QINC(comp_head);
+ }
+
+ if (n < min_size)
+ return 0;
+
+ eq->comp_head = comp_head;
+
+ return n;
+}
+
+
+/*
+ * Free some comps, and return true iff there are still some pending.
+ */
+static bool tile_net_lepp_free_comps(struct net_device *dev, bool all)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ lepp_queue_t *eq = priv->eq;
+
+ struct sk_buff *olds[64];
+ unsigned int wanted = 64;
+ unsigned int i, n;
+ bool pending;
+
+ spin_lock(&priv->eq_lock);
+
+ if (all)
+ eq->comp_busy = eq->comp_tail;
+
+ n = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
+
+ pending = (eq->comp_head != eq->comp_tail);
+
+ spin_unlock(&priv->eq_lock);
+
+ for (i = 0; i < n; i++)
+ kfree_skb(olds[i]);
+
+ return pending;
+}
+
+
+/*
+ * Make sure the egress timer is scheduled.
+ *
+ * Note that we use "schedule if not scheduled" logic instead of the more
+ * obvious "reschedule" logic, because "reschedule" is fairly expensive.
+ */
+static void tile_net_schedule_egress_timer(struct tile_net_cpu *info)
+{
+ if (!info->egress_timer_scheduled) {
+ mod_timer_pinned(&info->egress_timer, jiffies + 1);
+ info->egress_timer_scheduled = true;
+ }
+}
+
+
+/*
+ * The "function" for "info->egress_timer".
+ *
+ * This timer will reschedule itself as long as there are any pending
+ * completions expected (on behalf of any tile).
+ *
+ * ISSUE: Realistically, will the timer ever stop scheduling itself?
+ *
+ * ISSUE: This timer is almost never actually needed, so just use a global
+ * timer that can run on any tile.
+ *
+ * ISSUE: Maybe instead track number of expected completions, and free
+ * only that many, resetting to zero if "pending" is ever false.
+ */
+static void tile_net_handle_egress_timer(unsigned long arg)
+{
+ struct tile_net_cpu *info = (struct tile_net_cpu *)arg;
+ struct net_device *dev = info->napi.dev;
+
+ /* The timer is no longer scheduled. */
+ info->egress_timer_scheduled = false;
+
+ /* Free comps, and reschedule timer if more are pending. */
+ if (tile_net_lepp_free_comps(dev, false))
+ tile_net_schedule_egress_timer(info);
+}
+
+
+#ifdef IGNORE_DUP_ACKS
+
+/*
+ * Help detect "duplicate" ACKs. These are sequential packets (for a
+ * given flow) which are exactly 66 bytes long, sharing everything but
+ * ID=2@0x12, Hsum=2@0x18, Ack=4@0x2a, WinSize=2@0x30, Csum=2@0x32,
+ * Tstamps=10@0x38. The ID's are +1, the Hsum's are -1, the Ack's are
+ * +N, and the Tstamps are usually identical.
+ *
+ * NOTE: Apparently truly duplicate acks (with identical "ack" values),
+ * should not be collapsed, as they are used for some kind of flow control.
+ */
+static bool is_dup_ack(char *s1, char *s2, unsigned int len)
+{
+ int i;
+
+ unsigned long long ignorable = 0;
+
+ /* Identification. */
+ ignorable |= (1ULL << 0x12);
+ ignorable |= (1ULL << 0x13);
+
+ /* Header checksum. */
+ ignorable |= (1ULL << 0x18);
+ ignorable |= (1ULL << 0x19);
+
+ /* ACK. */
+ ignorable |= (1ULL << 0x2a);
+ ignorable |= (1ULL << 0x2b);
+ ignorable |= (1ULL << 0x2c);
+ ignorable |= (1ULL << 0x2d);
+
+ /* WinSize. */
+ ignorable |= (1ULL << 0x30);
+ ignorable |= (1ULL << 0x31);
+
+ /* Checksum. */
+ ignorable |= (1ULL << 0x32);
+ ignorable |= (1ULL << 0x33);
+
+ for (i = 0; i < len; i++, ignorable >>= 1) {
+
+ if ((ignorable & 1) || (s1[i] == s2[i]))
+ continue;
+
+#ifdef TILE_NET_DEBUG
+ /* HACK: Mention non-timestamp diffs. */
+ if (i < 0x38 && i != 0x2f &&
+ net_ratelimit())
+ pr_info("Diff at 0x%x\n", i);
+#endif
+
+ return false;
+ }
+
+#ifdef TILE_NET_NO_SUPPRESS_DUP_ACKS
+ /* HACK: Do not suppress truly duplicate ACKs. */
+ /* ISSUE: Is this actually necessary or helpful? */
+ if (s1[0x2a] == s2[0x2a] &&
+ s1[0x2b] == s2[0x2b] &&
+ s1[0x2c] == s2[0x2c] &&
+ s1[0x2d] == s2[0x2d]) {
+ return false;
+ }
+#endif
+
+ return true;
+}
+
+#endif
+
+
+
+static void tile_net_discard_aux(struct tile_net_cpu *info, int index)
+{
+ struct tile_netio_queue *queue = &info->queue;
+ netio_queue_impl_t *qsp = queue->__system_part;
+ netio_queue_user_impl_t *qup = &queue->__user_part;
+
+ int index2_aux = index + sizeof(netio_pkt_t);
+ int index2 =
+ ((index2_aux ==
+ qsp->__packet_receive_queue.__last_packet_plus_one) ?
+ 0 : index2_aux);
+
+ netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
+
+ /* Extract the "linux_buffer_t". */
+ unsigned int buffer = pkt->__packet.word;
+
+ /* Convert "linux_buffer_t" to "va". */
+ void *va = __va((phys_addr_t)(buffer >> 1) << 7);
+
+ /* Acquire the associated "skb". */
+ struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
+ struct sk_buff *skb = *skb_ptr;
+
+ kfree_skb(skb);
+
+ /* Consume this packet. */
+ qup->__packet_receive_read = index2;
+}
+
+
+/*
+ * Like "tile_net_poll()", but just discard packets.
+ */
+static void tile_net_discard_packets(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+ struct tile_netio_queue *queue = &info->queue;
+ netio_queue_impl_t *qsp = queue->__system_part;
+ netio_queue_user_impl_t *qup = &queue->__user_part;
+
+ while (qup->__packet_receive_read !=
+ qsp->__packet_receive_queue.__packet_write) {
+ int index = qup->__packet_receive_read;
+ tile_net_discard_aux(info, index);
+ }
+}
+
+
+/*
+ * Handle the next packet. Return true if "processed", false if "filtered".
+ */
+static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
+{
+ struct net_device *dev = info->napi.dev;
+
+ struct tile_netio_queue *queue = &info->queue;
+ netio_queue_impl_t *qsp = queue->__system_part;
+ netio_queue_user_impl_t *qup = &queue->__user_part;
+ struct tile_net_stats_t *stats = &info->stats;
+
+ int filter;
+
+ int index2_aux = index + sizeof(netio_pkt_t);
+ int index2 =
+ ((index2_aux ==
+ qsp->__packet_receive_queue.__last_packet_plus_one) ?
+ 0 : index2_aux);
+
+ netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
+
+ netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt);
+
+ /* Extract the packet size. FIXME: Shouldn't the second line */
+ /* get subtracted? Mostly moot, since it should be "zero". */
+ unsigned long len =
+ (NETIO_PKT_CUSTOM_LENGTH(pkt) +
+ NET_IP_ALIGN - NETIO_PACKET_PADDING);
+
+ /* Extract the "linux_buffer_t". */
+ unsigned int buffer = pkt->__packet.word;
+
+ /* Extract "small" (vs "large"). */
+ bool small = ((buffer & 1) != 0);
+
+ /* Convert "linux_buffer_t" to "va". */
+ void *va = __va((phys_addr_t)(buffer >> 1) << 7);
+
+ /* Extract the packet data pointer. */
+ /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */
+ unsigned char *buf = va + NET_IP_ALIGN;
+
+ /* Invalidate the packet buffer. */
+ if (!hash_default)
+ __inv_buffer(buf, len);
+
+ /* ISSUE: Is this needed? */
+ dev->last_rx = jiffies;
+
+#ifdef TILE_NET_DUMP_PACKETS
+ dump_packet(buf, len, "rx");
+#endif /* TILE_NET_DUMP_PACKETS */
+
+#ifdef TILE_NET_VERIFY_INGRESS
+ if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) &&
+ NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) {
+ /* Bug 6624: Includes UDP packets with a "zero" checksum. */
+ pr_warning("Bad L4 checksum on %d byte packet.\n", len);
+ }
+ if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) &&
+ NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) {
+ dump_packet(buf, len, "rx");
+ panic("Bad L3 checksum.");
+ }
+ switch (NETIO_PKT_STATUS_M(metadata, pkt)) {
+ case NETIO_PKT_STATUS_OVERSIZE:
+ if (len >= 64) {
+ dump_packet(buf, len, "rx");
+ panic("Unexpected OVERSIZE.");
+ }
+ break;
+ case NETIO_PKT_STATUS_BAD:
+ pr_warning("Unexpected BAD %ld byte packet.\n", len);
+ }
+#endif
+
+ filter = 0;
+
+ /* ISSUE: Filter TCP packets with "bad" checksums? */
+
+ if (!(dev->flags & IFF_UP)) {
+ /* Filter packets received before we're up. */
+ filter = 1;
+ } else if (NETIO_PKT_STATUS_M(metadata, pkt) == NETIO_PKT_STATUS_BAD) {
+ /* Filter "truncated" packets. */
+ filter = 1;
+ } else if (!(dev->flags & IFF_PROMISC)) {
+ /* FIXME: Implement HW multicast filter. */
+ if (!is_multicast_ether_addr(buf)) {
+ /* Filter packets not for our address. */
+ const u8 *mine = dev->dev_addr;
+ filter = compare_ether_addr(mine, buf);
+ }
+ }
+
+ if (filter) {
+
+ /* ISSUE: Update "drop" statistics? */
+
+ tile_net_provide_linux_buffer(info, va, small);
+
+ } else {
+
+ /* Acquire the associated "skb". */
+ struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
+ struct sk_buff *skb = *skb_ptr;
+
+ /* Paranoia. */
+ if (skb->data != buf)
+ panic("Corrupt linux buffer from LIPP! "
+ "VA=%p, skb=%p, skb->data=%p\n",
+ va, skb, skb->data);
+
+ /* Encode the actual packet length. */
+ skb_put(skb, len);
+
+ /* NOTE: This call also sets "skb->dev = dev". */
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* Avoid recomputing "good" TCP/UDP checksums. */
+ if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ netif_receive_skb(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+
+ if (small)
+ info->num_needed_small_buffers++;
+ else
+ info->num_needed_large_buffers++;
+ }
+
+ /* Return four credits after every fourth packet. */
+ if (--qup->__receive_credit_remaining == 0) {
+ u32 interval = qup->__receive_credit_interval;
+ qup->__receive_credit_remaining = interval;
+ __netio_fastio_return_credits(qup->__fastio_index, interval);
+ }
+
+ /* Consume this packet. */
+ qup->__packet_receive_read = index2;
+
+ return !filter;
+}
+
+
+/*
+ * Handle some packets for the given device on the current CPU.
+ *
+ * If "tile_net_stop()" is called on some other tile while this
+ * function is running, we will return, hopefully before that
+ * other tile asks us to call "napi_disable()".
+ *
+ * The "rotting packet" race condition occurs if a packet arrives
+ * during the extremely narrow window between the queue appearing to
+ * be empty, and the ingress interrupt being re-enabled. This happens
+ * a LOT under heavy network load.
+ */
+static int tile_net_poll(struct napi_struct *napi, int budget)
+{
+ struct net_device *dev = napi->dev;
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+ struct tile_netio_queue *queue = &info->queue;
+ netio_queue_impl_t *qsp = queue->__system_part;
+ netio_queue_user_impl_t *qup = &queue->__user_part;
+
+ unsigned int work = 0;
+
+ while (priv->active) {
+ int index = qup->__packet_receive_read;
+ if (index == qsp->__packet_receive_queue.__packet_write)
+ break;
+
+ if (tile_net_poll_aux(info, index)) {
+ if (++work >= budget)
+ goto done;
+ }
+ }
+
+ napi_complete(&info->napi);
+
+ if (!priv->active)
+ goto done;
+
+ /* Re-enable the ingress interrupt. */
+ enable_percpu_irq(priv->intr_id);
+
+ /* HACK: Avoid the "rotting packet" problem (see above). */
+ if (qup->__packet_receive_read !=
+ qsp->__packet_receive_queue.__packet_write) {
+ /* ISSUE: Sometimes this returns zero, presumably */
+ /* because an interrupt was handled for this tile. */
+ (void)napi_reschedule(&info->napi);
+ }
+
+done:
+
+ if (priv->active)
+ tile_net_provide_needed_buffers(info);
+
+ return work;
+}
+
+
+/*
+ * Handle an ingress interrupt for the given device on the current cpu.
+ *
+ * ISSUE: Sometimes this gets called after "disable_percpu_irq()" has
+ * been called! This is probably due to "pending hypervisor downcalls".
+ *
+ * ISSUE: Is there any race condition between the "napi_schedule()" here
+ * and the "napi_complete()" call above?
+ */
+static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+
+ /* Disable the ingress interrupt. */
+ disable_percpu_irq(priv->intr_id);
+
+ /* Ignore unwanted interrupts. */
+ if (!priv->active)
+ return IRQ_HANDLED;
+
+ /* ISSUE: Sometimes "info->napi_enabled" is false here. */
+
+ napi_schedule(&info->napi);
+
+ return IRQ_HANDLED;
+}
+
+
+/*
+ * One time initialization per interface.
+ */
+static int tile_net_open_aux(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ int ret;
+ int dummy;
+ unsigned int epp_lotar;
+
+ /*
+ * Find out where EPP memory should be homed.
+ */
+ ret = hv_dev_pread(priv->hv_devhdl, 0,
+ (HV_VirtAddr)&epp_lotar, sizeof(epp_lotar),
+ NETIO_EPP_SHM_OFF);
+ if (ret < 0) {
+ pr_err("could not read epp_shm_queue lotar.\n");
+ return -EIO;
+ }
+
+ /*
+ * Home the page on the EPP.
+ */
+ {
+ int epp_home = hv_lotar_to_cpu(epp_lotar);
+ homecache_change_page_home(priv->eq_pages, EQ_ORDER, epp_home);
+ }
+
+ /*
+ * Register the EPP shared memory queue.
+ */
+ {
+ netio_ipp_address_t ea = {
+ .va = 0,
+ .pa = __pa(priv->eq),
+ .pte = hv_pte(0),
+ .size = EQ_SIZE,
+ };
+ ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar);
+ ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3);
+ ret = hv_dev_pwrite(priv->hv_devhdl, 0,
+ (HV_VirtAddr)&ea,
+ sizeof(ea),
+ NETIO_EPP_SHM_OFF);
+ if (ret < 0)
+ return -EIO;
+ }
+
+ /*
+ * Start LIPP/LEPP.
+ */
+ if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
+ sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) {
+ pr_warning("Failed to start LIPP/LEPP.\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Register with hypervisor on the current CPU.
+ *
+ * Strangely, this function does important things even if it "fails",
+ * which is especially common if the link is not up yet. Hopefully
+ * these things are all "harmless" if done twice!
+ */
+static void tile_net_register(void *dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info;
+
+ struct tile_netio_queue *queue;
+
+ /* Only network cpus can receive packets. */
+ int queue_id =
+ cpumask_test_cpu(my_cpu, &priv->network_cpus_map) ? 0 : 255;
+
+ netio_input_config_t config = {
+ .flags = 0,
+ .num_receive_packets = priv->network_cpus_credits,
+ .queue_id = queue_id
+ };
+
+ int ret = 0;
+ netio_queue_impl_t *queuep;
+
+ PDEBUG("tile_net_register(queue_id %d)\n", queue_id);
+
+ if (!strcmp(dev->name, "xgbe0"))
+ info = &__get_cpu_var(hv_xgbe0);
+ else if (!strcmp(dev->name, "xgbe1"))
+ info = &__get_cpu_var(hv_xgbe1);
+ else if (!strcmp(dev->name, "gbe0"))
+ info = &__get_cpu_var(hv_gbe0);
+ else if (!strcmp(dev->name, "gbe1"))
+ info = &__get_cpu_var(hv_gbe1);
+ else
+ BUG();
+
+ /* Initialize the egress timer. */
+ init_timer(&info->egress_timer);
+ info->egress_timer.data = (long)info;
+ info->egress_timer.function = tile_net_handle_egress_timer;
+
+ priv->cpu[my_cpu] = info;
+
+ /*
+ * Register ourselves with LIPP. This does a lot of stuff,
+ * including invoking the LIPP registration code.
+ */
+ ret = hv_dev_pwrite(priv->hv_devhdl, 0,
+ (HV_VirtAddr)&config,
+ sizeof(netio_input_config_t),
+ NETIO_IPP_INPUT_REGISTER_OFF);
+ PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n",
+ ret);
+ if (ret < 0) {
+ if (ret != NETIO_LINK_DOWN) {
+ printk(KERN_DEBUG "hv_dev_pwrite "
+ "NETIO_IPP_INPUT_REGISTER_OFF failure %d\n",
+ ret);
+ }
+ info->link_down = (ret == NETIO_LINK_DOWN);
+ return;
+ }
+
+ /*
+ * Get the pointer to our queue's system part.
+ */
+
+ ret = hv_dev_pread(priv->hv_devhdl, 0,
+ (HV_VirtAddr)&queuep,
+ sizeof(netio_queue_impl_t *),
+ NETIO_IPP_INPUT_REGISTER_OFF);
+ PDEBUG("hv_dev_pread(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n",
+ ret);
+ PDEBUG("queuep %p\n", queuep);
+ if (ret <= 0) {
+ /* ISSUE: Shouldn't this be a fatal error? */
+ pr_err("hv_dev_pread NETIO_IPP_INPUT_REGISTER_OFF failure\n");
+ return;
+ }
+
+ queue = &info->queue;
+
+ queue->__system_part = queuep;
+
+ memset(&queue->__user_part, 0, sizeof(netio_queue_user_impl_t));
+
+ /* This is traditionally "config.num_receive_packets / 2". */
+ queue->__user_part.__receive_credit_interval = 4;
+ queue->__user_part.__receive_credit_remaining =
+ queue->__user_part.__receive_credit_interval;
+
+ /*
+ * Get a fastio index from the hypervisor.
+ * ISSUE: Shouldn't this check the result?
+ */
+ ret = hv_dev_pread(priv->hv_devhdl, 0,
+ (HV_VirtAddr)&queue->__user_part.__fastio_index,
+ sizeof(queue->__user_part.__fastio_index),
+ NETIO_IPP_GET_FASTIO_OFF);
+ PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret);
+
+ /* Now we are registered. */
+ info->registered = true;
+}
+
+
+/*
+ * Deregister with hypervisor on the current CPU.
+ *
+ * This simply discards all our credits, so no more packets will be
+ * delivered to this tile. There may still be packets in our queue.
+ *
+ * Also, disable the ingress interrupt.
+ */
+static void tile_net_deregister(void *dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+
+ /* Disable the ingress interrupt. */
+ disable_percpu_irq(priv->intr_id);
+
+ /* Do nothing else if not registered. */
+ if (info == NULL || !info->registered)
+ return;
+
+ {
+ struct tile_netio_queue *queue = &info->queue;
+ netio_queue_user_impl_t *qup = &queue->__user_part;
+
+ /* Discard all our credits. */
+ __netio_fastio_return_credits(qup->__fastio_index, -1);
+ }
+}
+
+
+/*
+ * Unregister with hypervisor on the current CPU.
+ *
+ * Also, disable the ingress interrupt.
+ */
+static void tile_net_unregister(void *dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+
+ int ret;
+ int dummy = 0;
+
+ /* Disable the ingress interrupt. */
+ disable_percpu_irq(priv->intr_id);
+
+ /* Do nothing else if not registered. */
+ if (info == NULL || !info->registered)
+ return;
+
+ /* Unregister ourselves with LIPP/LEPP. */
+ ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
+ sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF);
+ if (ret < 0)
+ panic("Failed to unregister with LIPP/LEPP!\n");
+
+ /* Discard all packets still in our NetIO queue. */
+ tile_net_discard_packets(dev);
+
+ /* Reset state. */
+ info->num_needed_small_buffers = 0;
+ info->num_needed_large_buffers = 0;
+
+ /* Cancel egress timer. */
+ del_timer(&info->egress_timer);
+ info->egress_timer_scheduled = false;
+}
+
+
+/*
+ * Helper function for "tile_net_stop()".
+ *
+ * Also used to handle registration failure in "tile_net_open_inner()",
+ * when the various extra steps in "tile_net_stop()" are not necessary.
+ */
+static void tile_net_stop_aux(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int i;
+
+ int dummy = 0;
+
+ /*
+ * Unregister all tiles, so LIPP will stop delivering packets.
+ * Also, delete all the "napi" objects (sequentially, to protect
+ * "dev->napi_list").
+ */
+ on_each_cpu(tile_net_unregister, (void *)dev, 1);
+ for_each_online_cpu(i) {
+ struct tile_net_cpu *info = priv->cpu[i];
+ if (info != NULL && info->registered) {
+ netif_napi_del(&info->napi);
+ info->registered = false;
+ }
+ }
+
+ /* Stop LIPP/LEPP. */
+ if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
+ sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0)
+ panic("Failed to stop LIPP/LEPP!\n");
+
+ priv->partly_opened = 0;
+}
+
+
+/*
+ * Disable NAPI for the given device on the current cpu.
+ */
+static void tile_net_stop_disable(void *dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+
+ /* Disable NAPI if needed. */
+ if (info != NULL && info->napi_enabled) {
+ napi_disable(&info->napi);
+ info->napi_enabled = false;
+ }
+}
+
+
+/*
+ * Enable NAPI and the ingress interrupt for the given device
+ * on the current cpu.
+ *
+ * ISSUE: Only do this for "network cpus"?
+ */
+static void tile_net_open_enable(void *dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+
+ /* Enable NAPI. */
+ napi_enable(&info->napi);
+ info->napi_enabled = true;
+
+ /* Enable the ingress interrupt. */
+ enable_percpu_irq(priv->intr_id);
+}
+
+
+/*
+ * tile_net_open_inner does most of the work of bringing up the interface.
+ * It's called from tile_net_open(), and also from tile_net_retry_open().
+ * The return value is 0 if the interface was brought up, < 0 if
+ * tile_net_open() should return the return value as an error, and > 0 if
+ * tile_net_open() should return success and schedule a work item to
+ * periodically retry the bringup.
+ */
+static int tile_net_open_inner(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info;
+ struct tile_netio_queue *queue;
+ int result = 0;
+ int i;
+ int dummy = 0;
+
+ /*
+ * First try to register just on the local CPU, and handle any
+ * semi-expected "link down" failure specially. Note that we
+ * do NOT call "tile_net_stop_aux()", unlike below.
+ */
+ tile_net_register(dev);
+ info = priv->cpu[my_cpu];
+ if (!info->registered) {
+ if (info->link_down)
+ return 1;
+ return -EAGAIN;
+ }
+
+ /*
+ * Now register everywhere else. If any registration fails,
+ * even for "link down" (which might not be possible), we
+ * clean up using "tile_net_stop_aux()". Also, add all the
+ * "napi" objects (sequentially, to protect "dev->napi_list").
+ * ISSUE: Only use "netif_napi_add()" for "network cpus"?
+ */
+ smp_call_function(tile_net_register, (void *)dev, 1);
+ for_each_online_cpu(i) {
+ struct tile_net_cpu *info = priv->cpu[i];
+ if (info->registered)
+ netif_napi_add(dev, &info->napi, tile_net_poll, 64);
+ else
+ result = -EAGAIN;
+ }
+ if (result != 0) {
+ tile_net_stop_aux(dev);
+ return result;
+ }
+
+ queue = &info->queue;
+
+ if (priv->intr_id == 0) {
+ unsigned int irq;
+
+ /*
+ * Acquire the irq allocated by the hypervisor. Every
+ * queue gets the same irq. The "__intr_id" field is
+ * "1 << irq", so we use "__ffs()" to extract "irq".
+ */
+ priv->intr_id = queue->__system_part->__intr_id;
+ BUG_ON(priv->intr_id == 0);
+ irq = __ffs(priv->intr_id);
+
+ /*
+ * Register the ingress interrupt handler for this
+ * device, permanently.
+ *
+ * We used to call "free_irq()" in "tile_net_stop()",
+ * and then re-register the handler here every time,
+ * but that caused DNP errors in "handle_IRQ_event()"
+ * because "desc->action" was NULL. See bug 9143.
+ */
+ tile_irq_activate(irq, TILE_IRQ_PERCPU);
+ BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt,
+ 0, dev->name, (void *)dev) != 0);
+ }
+
+ {
+ /* Allocate initial buffers. */
+
+ int max_buffers =
+ priv->network_cpus_count * priv->network_cpus_credits;
+
+ info->num_needed_small_buffers =
+ min(LIPP_SMALL_BUFFERS, max_buffers);
+
+ info->num_needed_large_buffers =
+ min(LIPP_LARGE_BUFFERS, max_buffers);
+
+ tile_net_provide_needed_buffers(info);
+
+ if (info->num_needed_small_buffers != 0 ||
+ info->num_needed_large_buffers != 0)
+ panic("Insufficient memory for buffer stack!");
+ }
+
+ /* We are about to be active. */
+ priv->active = true;
+
+ /* Make sure "active" is visible to all tiles. */
+ mb();
+
+ /* On each tile, enable NAPI and the ingress interrupt. */
+ on_each_cpu(tile_net_open_enable, (void *)dev, 1);
+
+ /* Start LIPP/LEPP and activate "ingress" at the shim. */
+ if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
+ sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0)
+ panic("Failed to activate the LIPP Shim!\n");
+
+ /* Start our transmit queue. */
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+
+/*
+ * Called periodically to retry bringing up the NetIO interface,
+ * if it doesn't come up cleanly during tile_net_open().
+ */
+static void tile_net_open_retry(struct work_struct *w)
+{
+ struct delayed_work *dw =
+ container_of(w, struct delayed_work, work);
+
+ struct tile_net_priv *priv =
+ container_of(dw, struct tile_net_priv, retry_work);
+
+ /*
+ * Try to bring the NetIO interface up. If it fails, reschedule
+ * ourselves to try again later; otherwise, tell Linux we now have
+ * a working link. ISSUE: What if the return value is negative?
+ */
+ if (tile_net_open_inner(priv->dev) != 0)
+ schedule_delayed_work(&priv->retry_work,
+ TILE_NET_RETRY_INTERVAL);
+ else
+ netif_carrier_on(priv->dev);
+}
+
+
+/*
+ * Called when a network interface is made active.
+ *
+ * Returns 0 on success, negative value on failure.
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS (if needed), the watchdog timer
+ * is started, and the stack is notified that the interface is ready.
+ *
+ * If the actual link is not available yet, then we tell Linux that
+ * we have no carrier, and we keep checking until the link comes up.
+ */
+static int tile_net_open(struct net_device *dev)
+{
+ int ret = 0;
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ /*
+ * We rely on priv->partly_opened to tell us if this is the
+ * first time this interface is being brought up. If it is
+ * set, the IPP was already initialized and should not be
+ * initialized again.
+ */
+ if (!priv->partly_opened) {
+
+ int count;
+ int credits;
+
+ /* Initialize LIPP/LEPP, and start the Shim. */
+ ret = tile_net_open_aux(dev);
+ if (ret < 0) {
+ pr_err("tile_net_open_aux failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Analyze the network cpus. */
+
+ if (network_cpus_used)
+ cpumask_copy(&priv->network_cpus_map,
+ &network_cpus_map);
+ else
+ cpumask_copy(&priv->network_cpus_map, cpu_online_mask);
+
+
+ count = cpumask_weight(&priv->network_cpus_map);
+
+ /* Limit credits to available buffers, and apply min. */
+ credits = max(16, (LIPP_LARGE_BUFFERS / count) & ~1);
+
+ /* Apply "GBE" max limit. */
+ /* ISSUE: Use higher limit for XGBE? */
+ credits = min(NETIO_MAX_RECEIVE_PKTS, credits);
+
+ priv->network_cpus_count = count;
+ priv->network_cpus_credits = credits;
+
+#ifdef TILE_NET_DEBUG
+ pr_info("Using %d network cpus, with %d credits each\n",
+ priv->network_cpus_count, priv->network_cpus_credits);
+#endif
+
+ priv->partly_opened = 1;
+
+ } else {
+ /* FIXME: Is this possible? */
+ /* printk("Already partly opened.\n"); */
+ }
+
+ /*
+ * Attempt to bring up the link.
+ */
+ ret = tile_net_open_inner(dev);
+ if (ret <= 0) {
+ if (ret == 0)
+ netif_carrier_on(dev);
+ return ret;
+ }
+
+ /*
+ * We were unable to bring up the NetIO interface, but we want to
+ * try again in a little bit. Tell Linux that we have no carrier
+ * so it doesn't try to use the interface before the link comes up
+ * and then remember to try again later.
+ */
+ netif_carrier_off(dev);
+ schedule_delayed_work(&priv->retry_work, TILE_NET_RETRY_INTERVAL);
+
+ return 0;
+}
+
+
+static int tile_net_drain_lipp_buffers(struct tile_net_priv *priv)
+{
+ int n = 0;
+
+ /* Drain all the LIPP buffers. */
+ while (true) {
+ int buffer;
+
+ /* NOTE: This should never fail. */
+ if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer,
+ sizeof(buffer), NETIO_IPP_DRAIN_OFF) < 0)
+ break;
+
+ /* Stop when done. */
+ if (buffer == 0)
+ break;
+
+ {
+ /* Convert "linux_buffer_t" to "va". */
+ void *va = __va((phys_addr_t)(buffer >> 1) << 7);
+
+ /* Acquire the associated "skb". */
+ struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
+ struct sk_buff *skb = *skb_ptr;
+
+ kfree_skb(skb);
+ }
+
+ n++;
+ }
+
+ return n;
+}
+
+
+/*
+ * Disables a network interface.
+ *
+ * Returns 0, this is not allowed to fail.
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ *
+ * ISSUE: How closely does "netif_running(dev)" mirror "priv->active"?
+ *
+ * Before we are called by "__dev_close()", "netif_running()" will
+ * have been cleared, so no NEW calls to "tile_net_poll()" will be
+ * made by "netpoll_poll_dev()".
+ *
+ * Often, this can cause some tiles to still have packets in their
+ * queues, so we must call "tile_net_discard_packets()" later.
+ *
+ * Note that some other tile may still be INSIDE "tile_net_poll()",
+ * and in fact, many will be, if there is heavy network load.
+ *
+ * Calling "on_each_cpu(tile_net_stop_disable, (void *)dev, 1)" when
+ * any tile is still "napi_schedule()"'d will induce a horrible crash
+ * when "msleep()" is called. This includes tiles which are inside
+ * "tile_net_poll()" which have not yet called "napi_complete()".
+ *
+ * So, we must first try to wait long enough for other tiles to finish
+ * with any current "tile_net_poll()" call, and, hopefully, to clear
+ * the "scheduled" flag. ISSUE: It is unclear what happens to tiles
+ * which have called "napi_schedule()" but which had not yet tried to
+ * call "tile_net_poll()", or which exhausted their budget inside
+ * "tile_net_poll()" just before this function was called.
+ */
+static int tile_net_stop(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ PDEBUG("tile_net_stop()\n");
+
+ /* Start discarding packets. */
+ priv->active = false;
+
+ /* Make sure "active" is visible to all tiles. */
+ mb();
+
+ /*
+ * On each tile, make sure no NEW packets get delivered, and
+ * disable the ingress interrupt.
+ *
+ * Note that the ingress interrupt can fire AFTER this,
+ * presumably due to packets which were recently delivered,
+ * but it will have no effect.
+ */
+ on_each_cpu(tile_net_deregister, (void *)dev, 1);
+
+ /* Optimistically drain LIPP buffers. */
+ (void)tile_net_drain_lipp_buffers(priv);
+
+ /* ISSUE: Only needed if not yet fully open. */
+ cancel_delayed_work_sync(&priv->retry_work);
+
+ /* Can't transmit any more. */
+ netif_stop_queue(dev);
+
+ /* Disable NAPI on each tile. */
+ on_each_cpu(tile_net_stop_disable, (void *)dev, 1);
+
+ /*
+ * Drain any remaining LIPP buffers. NOTE: This "printk()"
+ * has never been observed, but in theory it could happen.
+ */
+ if (tile_net_drain_lipp_buffers(priv) != 0)
+ printk("Had to drain some extra LIPP buffers!\n");
+
+ /* Stop LIPP/LEPP. */
+ tile_net_stop_aux(dev);
+
+ /*
+ * ISSUE: It appears that, in practice anyway, by the time we
+ * get here, there are no pending completions, but just in case,
+ * we free (all of) them anyway.
+ */
+ while (tile_net_lepp_free_comps(dev, true))
+ /* loop */;
+
+ /* Wipe the EPP queue, and wait till the stores hit the EPP. */
+ memset(priv->eq, 0, sizeof(lepp_queue_t));
+ mb();
+
+ return 0;
+}
+
+
+/*
+ * Prepare the "frags" info for the resulting LEPP command.
+ *
+ * If needed, flush the memory used by the frags.
+ */
+static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
+ struct sk_buff *skb,
+ void *b_data, unsigned int b_len)
+{
+ unsigned int i, n = 0;
+
+ struct skb_shared_info *sh = skb_shinfo(skb);
+
+ phys_addr_t cpa;
+
+ if (b_len != 0) {
+
+ if (!hash_default)
+ finv_buffer_remote(b_data, b_len, 0);
+
+ cpa = __pa(b_data);
+ frags[n].cpa_lo = cpa;
+ frags[n].cpa_hi = cpa >> 32;
+ frags[n].length = b_len;
+ frags[n].hash_for_home = hash_default;
+ n++;
+ }
+
+ for (i = 0; i < sh->nr_frags; i++) {
+
+ skb_frag_t *f = &sh->frags[i];
+ unsigned long pfn = page_to_pfn(f->page);
+
+ /* FIXME: Compute "hash_for_home" properly. */
+ /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */
+ int hash_for_home = hash_default;
+
+ /* FIXME: Hmmm. */
+ if (!hash_default) {
+ void *va = pfn_to_kaddr(pfn) + f->page_offset;
+ BUG_ON(PageHighMem(f->page));
+ finv_buffer_remote(va, f->size, 0);
+ }
+
+ cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset;
+ frags[n].cpa_lo = cpa;
+ frags[n].cpa_hi = cpa >> 32;
+ frags[n].length = f->size;
+ frags[n].hash_for_home = hash_for_home;
+ n++;
+ }
+
+ return n;
+}
+
+
+/*
+ * This function takes "skb", consisting of a header template and a
+ * payload, and hands it to LEPP, to emit as one or more segments,
+ * each consisting of a possibly modified header, plus a piece of the
+ * payload, via a process known as "tcp segmentation offload".
+ *
+ * Usually, "data" will contain the header template, of size "sh_len",
+ * and "sh->frags" will contain "skb->data_len" bytes of payload, and
+ * there will be "sh->gso_segs" segments.
+ *
+ * Sometimes, if "sendfile()" requires copying, we will be called with
+ * "data" containing the header and payload, with "frags" being empty.
+ *
+ * In theory, "sh->nr_frags" could be 3, but in practice, it seems
+ * that this will never actually happen.
+ *
+ * See "emulate_large_send_offload()" for some reference code, which
+ * does not handle checksumming.
+ *
+ * ISSUE: How do we make sure that high memory DMA does not migrate?
+ */
+static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+ struct tile_net_stats_t *stats = &info->stats;
+
+ struct skb_shared_info *sh = skb_shinfo(skb);
+
+ unsigned char *data = skb->data;
+
+ /* The ip header follows the ethernet header. */
+ struct iphdr *ih = ip_hdr(skb);
+ unsigned int ih_len = ih->ihl * 4;
+
+ /* Note that "nh == ih", by definition. */
+ unsigned char *nh = skb_network_header(skb);
+ unsigned int eh_len = nh - data;
+
+ /* The tcp header follows the ip header. */
+ struct tcphdr *th = (struct tcphdr *)(nh + ih_len);
+ unsigned int th_len = th->doff * 4;
+
+ /* The total number of header bytes. */
+ /* NOTE: This may be less than skb_headlen(skb). */
+ unsigned int sh_len = eh_len + ih_len + th_len;
+
+ /* The number of payload bytes at "skb->data + sh_len". */
+ /* This is non-zero for sendfile() without HIGHDMA. */
+ unsigned int b_len = skb_headlen(skb) - sh_len;
+
+ /* The total number of payload bytes. */
+ unsigned int d_len = b_len + skb->data_len;
+
+ /* The maximum payload size. */
+ unsigned int p_len = sh->gso_size;
+
+ /* The total number of segments. */
+ unsigned int num_segs = sh->gso_segs;
+
+ /* The temporary copy of the command. */
+ u32 cmd_body[(LEPP_MAX_CMD_SIZE + 3) / 4];
+ lepp_tso_cmd_t *cmd = (lepp_tso_cmd_t *)cmd_body;
+
+ /* Analyze the "frags". */
+ unsigned int num_frags =
+ tile_net_tx_frags(cmd->frags, skb, data + sh_len, b_len);
+
+ /* The size of the command, including frags and header. */
+ size_t cmd_size = LEPP_TSO_CMD_SIZE(num_frags, sh_len);
+
+ /* The command header. */
+ lepp_tso_cmd_t cmd_init = {
+ .tso = true,
+ .header_size = sh_len,
+ .ip_offset = eh_len,
+ .tcp_offset = eh_len + ih_len,
+ .payload_size = p_len,
+ .num_frags = num_frags,
+ };
+
+ unsigned long irqflags;
+
+ lepp_queue_t *eq = priv->eq;
+
+ struct sk_buff *olds[8];
+ unsigned int wanted = 8;
+ unsigned int i, nolds = 0;
+
+ unsigned int cmd_head, cmd_tail, cmd_next;
+ unsigned int comp_tail;
+
+
+ /* Paranoia. */
+ BUG_ON(skb->protocol != htons(ETH_P_IP));
+ BUG_ON(ih->protocol != IPPROTO_TCP);
+ BUG_ON(skb->ip_summed != CHECKSUM_PARTIAL);
+ BUG_ON(num_frags > LEPP_MAX_FRAGS);
+ /*--BUG_ON(num_segs != (d_len + (p_len - 1)) / p_len); */
+ BUG_ON(num_segs <= 1);
+
+
+ /* Finish preparing the command. */
+
+ /* Copy the command header. */
+ *cmd = cmd_init;
+
+ /* Copy the "header". */
+ memcpy(&cmd->frags[num_frags], data, sh_len);
+
+
+ /* Prefetch and wait, to minimize time spent holding the spinlock. */
+ prefetch_L1(&eq->comp_tail);
+ prefetch_L1(&eq->cmd_tail);
+ mb();
+
+
+ /* Enqueue the command. */
+
+ spin_lock_irqsave(&priv->eq_lock, irqflags);
+
+ /*
+ * Handle completions if needed to make room.
+ * HACK: Spin until there is sufficient room.
+ */
+ if (lepp_num_free_comp_slots(eq) == 0) {
+ nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
+ if (nolds == 0) {
+busy:
+ spin_unlock_irqrestore(&priv->eq_lock, irqflags);
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ cmd_head = eq->cmd_head;
+ cmd_tail = eq->cmd_tail;
+
+ /* Prepare to advance, detecting full queue. */
+ cmd_next = cmd_tail + cmd_size;
+ if (cmd_tail < cmd_head && cmd_next >= cmd_head)
+ goto busy;
+ if (cmd_next > LEPP_CMD_LIMIT) {
+ cmd_next = 0;
+ if (cmd_next == cmd_head)
+ goto busy;
+ }
+
+ /* Copy the command. */
+ memcpy(&eq->cmds[cmd_tail], cmd, cmd_size);
+
+ /* Advance. */
+ cmd_tail = cmd_next;
+
+ /* Record "skb" for eventual freeing. */
+ comp_tail = eq->comp_tail;
+ eq->comps[comp_tail] = skb;
+ LEPP_QINC(comp_tail);
+ eq->comp_tail = comp_tail;
+
+ /* Flush before allowing LEPP to handle the command. */
+ /* ISSUE: Is this the optimal location for the flush? */
+ __insn_mf();
+
+ eq->cmd_tail = cmd_tail;
+
+ /* NOTE: Using "4" here is more efficient than "0" or "2", */
+ /* and, strangely, more efficient than pre-checking the number */
+ /* of available completions, and comparing it to 4. */
+ if (nolds == 0)
+ nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4);
+
+ spin_unlock_irqrestore(&priv->eq_lock, irqflags);
+
+ /* Handle completions. */
+ for (i = 0; i < nolds; i++)
+ kfree_skb(olds[i]);
+
+ /* Update stats. */
+ stats->tx_packets += num_segs;
+ stats->tx_bytes += (num_segs * sh_len) + d_len;
+
+ /* Make sure the egress timer is scheduled. */
+ tile_net_schedule_egress_timer(info);
+
+ return NETDEV_TX_OK;
+}
+
+
+/*
+ * Transmit a packet (called by the kernel via "hard_start_xmit" hook).
+ */
+static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+ struct tile_net_stats_t *stats = &info->stats;
+
+ unsigned long irqflags;
+
+ struct skb_shared_info *sh = skb_shinfo(skb);
+
+ unsigned int len = skb->len;
+ unsigned char *data = skb->data;
+
+ unsigned int csum_start = skb_checksum_start_offset(skb);
+
+ lepp_frag_t frags[LEPP_MAX_FRAGS];
+
+ unsigned int num_frags;
+
+ lepp_queue_t *eq = priv->eq;
+
+ struct sk_buff *olds[8];
+ unsigned int wanted = 8;
+ unsigned int i, nolds = 0;
+
+ unsigned int cmd_size = sizeof(lepp_cmd_t);
+
+ unsigned int cmd_head, cmd_tail, cmd_next;
+ unsigned int comp_tail;
+
+ lepp_cmd_t cmds[LEPP_MAX_FRAGS];
+
+
+ /*
+ * This is paranoia, since we think that if the link doesn't come
+ * up, telling Linux we have no carrier will keep it from trying
+ * to transmit. If it does, though, we can't execute this routine,
+ * since data structures we depend on aren't set up yet.
+ */
+ if (!info->registered)
+ return NETDEV_TX_BUSY;
+
+
+ /* Save the timestamp. */
+ dev->trans_start = jiffies;
+
+
+#ifdef TILE_NET_PARANOIA
+#if CHIP_HAS_CBOX_HOME_MAP()
+ if (hash_default) {
+ HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data);
+ if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
+ panic("Non-HFH egress buffer! VA=%p Mode=%d PTE=%llx",
+ data, hv_pte_get_mode(pte), hv_pte_val(pte));
+ }
+#endif
+#endif
+
+
+#ifdef TILE_NET_DUMP_PACKETS
+ /* ISSUE: Does not dump the "frags". */
+ dump_packet(data, skb_headlen(skb), "tx");
+#endif /* TILE_NET_DUMP_PACKETS */
+
+
+ if (sh->gso_size != 0)
+ return tile_net_tx_tso(skb, dev);
+
+
+ /* Prepare the commands. */
+
+ num_frags = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
+
+ for (i = 0; i < num_frags; i++) {
+
+ bool final = (i == num_frags - 1);
+
+ lepp_cmd_t cmd = {
+ .cpa_lo = frags[i].cpa_lo,
+ .cpa_hi = frags[i].cpa_hi,
+ .length = frags[i].length,
+ .hash_for_home = frags[i].hash_for_home,
+ .send_completion = final,
+ .end_of_packet = final
+ };
+
+ if (i == 0 && skb->ip_summed == CHECKSUM_PARTIAL) {
+ cmd.compute_checksum = 1;
+ cmd.checksum_data.bits.start_byte = csum_start;
+ cmd.checksum_data.bits.count = len - csum_start;
+ cmd.checksum_data.bits.destination_byte =
+ csum_start + skb->csum_offset;
+ }
+
+ cmds[i] = cmd;
+ }
+
+
+ /* Prefetch and wait, to minimize time spent holding the spinlock. */
+ prefetch_L1(&eq->comp_tail);
+ prefetch_L1(&eq->cmd_tail);
+ mb();
+
+
+ /* Enqueue the commands. */
+
+ spin_lock_irqsave(&priv->eq_lock, irqflags);
+
+ /*
+ * Handle completions if needed to make room.
+ * HACK: Spin until there is sufficient room.
+ */
+ if (lepp_num_free_comp_slots(eq) == 0) {
+ nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
+ if (nolds == 0) {
+busy:
+ spin_unlock_irqrestore(&priv->eq_lock, irqflags);
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ cmd_head = eq->cmd_head;
+ cmd_tail = eq->cmd_tail;
+
+ /* Copy the commands, or fail. */
+ for (i = 0; i < num_frags; i++) {
+
+ /* Prepare to advance, detecting full queue. */
+ cmd_next = cmd_tail + cmd_size;
+ if (cmd_tail < cmd_head && cmd_next >= cmd_head)
+ goto busy;
+ if (cmd_next > LEPP_CMD_LIMIT) {
+ cmd_next = 0;
+ if (cmd_next == cmd_head)
+ goto busy;
+ }
+
+ /* Copy the command. */
+ *(lepp_cmd_t *)&eq->cmds[cmd_tail] = cmds[i];
+
+ /* Advance. */
+ cmd_tail = cmd_next;
+ }
+
+ /* Record "skb" for eventual freeing. */
+ comp_tail = eq->comp_tail;
+ eq->comps[comp_tail] = skb;
+ LEPP_QINC(comp_tail);
+ eq->comp_tail = comp_tail;
+
+ /* Flush before allowing LEPP to handle the command. */
+ /* ISSUE: Is this the optimal location for the flush? */
+ __insn_mf();
+
+ eq->cmd_tail = cmd_tail;
+
+ /* NOTE: Using "4" here is more efficient than "0" or "2", */
+ /* and, strangely, more efficient than pre-checking the number */
+ /* of available completions, and comparing it to 4. */
+ if (nolds == 0)
+ nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4);
+
+ spin_unlock_irqrestore(&priv->eq_lock, irqflags);
+
+ /* Handle completions. */
+ for (i = 0; i < nolds; i++)
+ kfree_skb(olds[i]);
+
+ /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */
+ stats->tx_packets++;
+ stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN);
+
+ /* Make sure the egress timer is scheduled. */
+ tile_net_schedule_egress_timer(info);
+
+ return NETDEV_TX_OK;
+}
+
+
+/*
+ * Deal with a transmit timeout.
+ */
+static void tile_net_tx_timeout(struct net_device *dev)
+{
+ PDEBUG("tile_net_tx_timeout()\n");
+ PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies,
+ jiffies - dev->trans_start);
+
+ /* XXX: ISSUE: This doesn't seem useful for us. */
+ netif_wake_queue(dev);
+}
+
+
+/*
+ * Ioctl commands.
+ */
+static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+
+/*
+ * Get System Network Statistics.
+ *
+ * Returns the address of the device statistics structure.
+ */
+static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ u32 rx_packets = 0;
+ u32 tx_packets = 0;
+ u32 rx_bytes = 0;
+ u32 tx_bytes = 0;
+ int i;
+
+ for_each_online_cpu(i) {
+ if (priv->cpu[i]) {
+ rx_packets += priv->cpu[i]->stats.rx_packets;
+ rx_bytes += priv->cpu[i]->stats.rx_bytes;
+ tx_packets += priv->cpu[i]->stats.tx_packets;
+ tx_bytes += priv->cpu[i]->stats.tx_bytes;
+ }
+ }
+
+ priv->stats.rx_packets = rx_packets;
+ priv->stats.rx_bytes = rx_bytes;
+ priv->stats.tx_packets = tx_packets;
+ priv->stats.tx_bytes = tx_bytes;
+
+ return &priv->stats;
+}
+
+
+/*
+ * Change the "mtu".
+ *
+ * The "change_mtu" method is usually not needed.
+ * If you need it, it must be like this.
+ */
+static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
+{
+ PDEBUG("tile_net_change_mtu()\n");
+
+ /* Check ranges. */
+ if ((new_mtu < 68) || (new_mtu > 1500))
+ return -EINVAL;
+
+ /* Accept the value. */
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+
+/*
+ * Change the Ethernet Address of the NIC.
+ *
+ * The hypervisor driver does not support changing MAC address. However,
+ * the IPP does not do anything with the MAC address, so the address which
+ * gets used on outgoing packets, and which is accepted on incoming packets,
+ * is completely up to the NetIO program or kernel driver which is actually
+ * handling them.
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int tile_net_set_mac_address(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ /* ISSUE: Note that "dev_addr" is now a pointer. */
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ return 0;
+}
+
+
+/*
+ * Obtain the MAC address from the hypervisor.
+ * This must be done before opening the device.
+ */
+static int tile_net_get_mac(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ char hv_dev_name[32];
+ int len;
+
+ __netio_getset_offset_t offset = { .word = NETIO_IPP_PARAM_OFF };
+
+ int ret;
+
+ /* For example, "xgbe0". */
+ strcpy(hv_dev_name, dev->name);
+ len = strlen(hv_dev_name);
+
+ /* For example, "xgbe/0". */
+ hv_dev_name[len] = hv_dev_name[len - 1];
+ hv_dev_name[len - 1] = '/';
+ len++;
+
+ /* For example, "xgbe/0/native_hash". */
+ strcpy(hv_dev_name + len, hash_default ? "/native_hash" : "/native");
+
+ /* Get the hypervisor handle for this device. */
+ priv->hv_devhdl = hv_dev_open((HV_VirtAddr)hv_dev_name, 0);
+ PDEBUG("hv_dev_open(%s) returned %d %p\n",
+ hv_dev_name, priv->hv_devhdl, &priv->hv_devhdl);
+ if (priv->hv_devhdl < 0) {
+ if (priv->hv_devhdl == HV_ENODEV)
+ printk(KERN_DEBUG "Ignoring unconfigured device %s\n",
+ hv_dev_name);
+ else
+ printk(KERN_DEBUG "hv_dev_open(%s) returned %d\n",
+ hv_dev_name, priv->hv_devhdl);
+ return -1;
+ }
+
+ /*
+ * Read the hardware address from the hypervisor.
+ * ISSUE: Note that "dev_addr" is now a pointer.
+ */
+ offset.bits.class = NETIO_PARAM;
+ offset.bits.addr = NETIO_PARAM_MAC;
+ ret = hv_dev_pread(priv->hv_devhdl, 0,
+ (HV_VirtAddr)dev->dev_addr, dev->addr_len,
+ offset.word);
+ PDEBUG("hv_dev_pread(NETIO_PARAM_MAC) returned %d\n", ret);
+ if (ret <= 0) {
+ printk(KERN_DEBUG "hv_dev_pread(NETIO_PARAM_MAC) %s failed\n",
+ dev->name);
+ /*
+ * Since the device is configured by the hypervisor but we
+ * can't get its MAC address, we are most likely running
+ * the simulator, so let's generate a random MAC address.
+ */
+ random_ether_addr(dev->dev_addr);
+ }
+
+ return 0;
+}
+
+
+static struct net_device_ops tile_net_ops = {
+ .ndo_open = tile_net_open,
+ .ndo_stop = tile_net_stop,
+ .ndo_start_xmit = tile_net_tx,
+ .ndo_do_ioctl = tile_net_ioctl,
+ .ndo_get_stats = tile_net_get_stats,
+ .ndo_change_mtu = tile_net_change_mtu,
+ .ndo_tx_timeout = tile_net_tx_timeout,
+ .ndo_set_mac_address = tile_net_set_mac_address
+};
+
+
+/*
+ * The setup function.
+ *
+ * This uses ether_setup() to assign various fields in dev, including
+ * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields.
+ */
+static void tile_net_setup(struct net_device *dev)
+{
+ PDEBUG("tile_net_setup()\n");
+
+ ether_setup(dev);
+
+ dev->netdev_ops = &tile_net_ops;
+
+ dev->watchdog_timeo = TILE_NET_TIMEOUT;
+
+ /* We want lockless xmit. */
+ dev->features |= NETIF_F_LLTX;
+
+ /* We support hardware tx checksums. */
+ dev->features |= NETIF_F_HW_CSUM;
+
+ /* We support scatter/gather. */
+ dev->features |= NETIF_F_SG;
+
+ /* We support TSO. */
+ dev->features |= NETIF_F_TSO;
+
+#ifdef TILE_NET_GSO
+ /* We support GSO. */
+ dev->features |= NETIF_F_GSO;
+#endif
+
+ if (hash_default)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ /* ISSUE: We should support NETIF_F_UFO. */
+
+ dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN;
+
+ dev->mtu = TILE_NET_MTU;
+}
+
+
+/*
+ * Allocate the device structure, register the device, and obtain the
+ * MAC address from the hypervisor.
+ */
+static struct net_device *tile_net_dev_init(const char *name)
+{
+ int ret;
+ struct net_device *dev;
+ struct tile_net_priv *priv;
+
+ /*
+ * Allocate the device structure. This allocates "priv", calls
+ * tile_net_setup(), and saves "name". Normally, "name" is a
+ * template, instantiated by register_netdev(), but not for us.
+ */
+ dev = alloc_netdev(sizeof(*priv), name, tile_net_setup);
+ if (!dev) {
+ pr_err("alloc_netdev(%s) failed\n", name);
+ return NULL;
+ }
+
+ priv = netdev_priv(dev);
+
+ /* Initialize "priv". */
+
+ memset(priv, 0, sizeof(*priv));
+
+ /* Save "dev" for "tile_net_open_retry()". */
+ priv->dev = dev;
+
+ INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry);
+
+ spin_lock_init(&priv->eq_lock);
+
+ /* Allocate "eq". */
+ priv->eq_pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, EQ_ORDER);
+ if (!priv->eq_pages) {
+ free_netdev(dev);
+ return NULL;
+ }
+ priv->eq = page_address(priv->eq_pages);
+
+ /* Register the network device. */
+ ret = register_netdev(dev);
+ if (ret) {
+ pr_err("register_netdev %s failed %d\n", dev->name, ret);
+ __free_pages(priv->eq_pages, EQ_ORDER);
+ free_netdev(dev);
+ return NULL;
+ }
+
+ /* Get the MAC address. */
+ ret = tile_net_get_mac(dev);
+ if (ret < 0) {
+ unregister_netdev(dev);
+ __free_pages(priv->eq_pages, EQ_ORDER);
+ free_netdev(dev);
+ return NULL;
+ }
+
+ return dev;
+}
+
+
+/*
+ * Module cleanup.
+ *
+ * FIXME: If compiled as a module, this module cannot be "unloaded",
+ * because the "ingress interrupt handler" is registered permanently.
+ */
+static void tile_net_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < TILE_NET_DEVS; i++) {
+ if (tile_net_devs[i]) {
+ struct net_device *dev = tile_net_devs[i];
+ struct tile_net_priv *priv = netdev_priv(dev);
+ unregister_netdev(dev);
+ finv_buffer_remote(priv->eq, EQ_SIZE, 0);
+ __free_pages(priv->eq_pages, EQ_ORDER);
+ free_netdev(dev);
+ }
+ }
+}
+
+
+/*
+ * Module initialization.
+ */
+static int tile_net_init_module(void)
+{
+ pr_info("Tilera IPP Net Driver\n");
+
+ tile_net_devs[0] = tile_net_dev_init("xgbe0");
+ tile_net_devs[1] = tile_net_dev_init("xgbe1");
+ tile_net_devs[2] = tile_net_dev_init("gbe0");
+ tile_net_devs[3] = tile_net_dev_init("gbe1");
+
+ return 0;
+}
+
+
+module_init(tile_net_init_module);
+module_exit(tile_net_cleanup);
+
+
+#ifndef MODULE
+
+/*
+ * The "network_cpus" boot argument specifies the cpus that are dedicated
+ * to handle ingress packets.
+ *
+ * The parameter should be in the form "network_cpus=m-n[,x-y]", where
+ * m, n, x, y are integer numbers that represent the cpus that can be
+ * neither a dedicated cpu nor a dataplane cpu.
+ */
+static int __init network_cpus_setup(char *str)
+{
+ int rc = cpulist_parse_crop(str, &network_cpus_map);
+ if (rc != 0) {
+ pr_warning("network_cpus=%s: malformed cpu list\n",
+ str);
+ } else {
+
+ /* Remove dedicated cpus. */
+ cpumask_and(&network_cpus_map, &network_cpus_map,
+ cpu_possible_mask);
+
+
+ if (cpumask_empty(&network_cpus_map)) {
+ pr_warning("Ignoring network_cpus='%s'.\n",
+ str);
+ } else {
+ char buf[1024];
+ cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map);
+ pr_info("Linux network CPUs: %s\n", buf);
+ network_cpus_used = true;
+ }
+ }
+
+ return 0;
+}
+__setup("network_cpus=", network_cpus_setup);
+
+#endif
diff --git a/drivers/net/ethernet/toshiba/Kconfig b/drivers/net/ethernet/toshiba/Kconfig
new file mode 100644
index 000000000000..051764704559
--- /dev/null
+++ b/drivers/net/ethernet/toshiba/Kconfig
@@ -0,0 +1,57 @@
+#
+# Toshiba network device configuration
+#
+
+config NET_VENDOR_TOSHIBA
+ bool "Toshiba devices"
+ default y
+ depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB) || PPC_PS3
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Toshiba cards. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_TOSHIBA
+
+config GELIC_NET
+ tristate "PS3 Gigabit Ethernet driver"
+ depends on PPC_PS3
+ select PS3_SYS_MANAGER
+ ---help---
+ This driver supports the network device on the PS3 game
+ console. This driver has built-in support for Ethernet.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ps3_gelic.
+
+config GELIC_WIRELESS
+ bool "PS3 Wireless support"
+ depends on GELIC_NET && WLAN
+ select WIRELESS_EXT
+ ---help---
+ This option adds the support for the wireless feature of PS3.
+ If you have the wireless-less model of PS3 or have no plan to
+ use wireless feature, disabling this option saves memory. As
+ the driver automatically distinguishes the models, you can
+ safely enable this option even if you have a wireless-less model.
+
+config SPIDER_NET
+ tristate "Spider Gigabit Ethernet driver"
+ depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)
+ select FW_LOADER
+ select SUNGEM_PHY
+ ---help---
+ This driver supports the Gigabit Ethernet chips present on the
+ Cell Processor-Based Blades from IBM.
+
+config TC35815
+ tristate "TOSHIBA TC35815 Ethernet support"
+ depends on PCI && MIPS
+ select PHYLIB
+
+endif # NET_VENDOR_TOSHIBA
diff --git a/drivers/net/ethernet/toshiba/Makefile b/drivers/net/ethernet/toshiba/Makefile
new file mode 100644
index 000000000000..a5069008435b
--- /dev/null
+++ b/drivers/net/ethernet/toshiba/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the Toshiba network device drivers.
+#
+
+obj-$(CONFIG_GELIC_NET) += ps3_gelic.o
+gelic_wireless-$(CONFIG_GELIC_WIRELESS) += ps3_gelic_wireless.o
+ps3_gelic-objs += ps3_gelic_net.o $(gelic_wireless-y)
+spidernet-y += spider_net.o spider_net_ethtool.o
+obj-$(CONFIG_SPIDER_NET) += spidernet.o
+obj-$(CONFIG_TC35815) += tc35815.o
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
new file mode 100644
index 000000000000..ddb33cfd3543
--- /dev/null
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -0,0 +1,1896 @@
+/*
+ * PS3 gelic network driver.
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2006, 2007 Sony Corporation
+ *
+ * This file is based on: spider_net.c
+ *
+ * (C) Copyright IBM Corp. 2005
+ *
+ * Authors : Utz Bacher <utz.bacher@de.ibm.com>
+ * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#undef DEBUG
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+
+#include <linux/dma-mapping.h>
+#include <net/checksum.h>
+#include <asm/firmware.h>
+#include <asm/ps3.h>
+#include <asm/lv1call.h>
+
+#include "ps3_gelic_net.h"
+#include "ps3_gelic_wireless.h"
+
+#define DRV_NAME "Gelic Network Driver"
+#define DRV_VERSION "2.0"
+
+MODULE_AUTHOR("SCE Inc.");
+MODULE_DESCRIPTION("Gelic Network driver");
+MODULE_LICENSE("GPL");
+
+
+static inline void gelic_card_enable_rxdmac(struct gelic_card *card);
+static inline void gelic_card_disable_rxdmac(struct gelic_card *card);
+static inline void gelic_card_disable_txdmac(struct gelic_card *card);
+static inline void gelic_card_reset_chain(struct gelic_card *card,
+ struct gelic_descr_chain *chain,
+ struct gelic_descr *start_descr);
+
+/* set irq_mask */
+int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask)
+{
+ int status;
+
+ status = lv1_net_set_interrupt_mask(bus_id(card), dev_id(card),
+ mask, 0);
+ if (status)
+ dev_info(ctodev(card),
+ "%s failed %d\n", __func__, status);
+ return status;
+}
+
+static inline void gelic_card_rx_irq_on(struct gelic_card *card)
+{
+ card->irq_mask |= GELIC_CARD_RXINT;
+ gelic_card_set_irq_mask(card, card->irq_mask);
+}
+static inline void gelic_card_rx_irq_off(struct gelic_card *card)
+{
+ card->irq_mask &= ~GELIC_CARD_RXINT;
+ gelic_card_set_irq_mask(card, card->irq_mask);
+}
+
+static void gelic_card_get_ether_port_status(struct gelic_card *card,
+ int inform)
+{
+ u64 v2;
+ struct net_device *ether_netdev;
+
+ lv1_net_control(bus_id(card), dev_id(card),
+ GELIC_LV1_GET_ETH_PORT_STATUS,
+ GELIC_LV1_VLAN_TX_ETHERNET_0, 0, 0,
+ &card->ether_port_status, &v2);
+
+ if (inform) {
+ ether_netdev = card->netdev[GELIC_PORT_ETHERNET_0];
+ if (card->ether_port_status & GELIC_LV1_ETHER_LINK_UP)
+ netif_carrier_on(ether_netdev);
+ else
+ netif_carrier_off(ether_netdev);
+ }
+}
+
+static int gelic_card_set_link_mode(struct gelic_card *card, int mode)
+{
+ int status;
+ u64 v1, v2;
+
+ status = lv1_net_control(bus_id(card), dev_id(card),
+ GELIC_LV1_SET_NEGOTIATION_MODE,
+ GELIC_LV1_PHY_ETHERNET_0, mode, 0, &v1, &v2);
+ if (status) {
+ pr_info("%s: failed setting negotiation mode %d\n", __func__,
+ status);
+ return -EBUSY;
+ }
+
+ card->link_mode = mode;
+ return 0;
+}
+
+void gelic_card_up(struct gelic_card *card)
+{
+ pr_debug("%s: called\n", __func__);
+ mutex_lock(&card->updown_lock);
+ if (atomic_inc_return(&card->users) == 1) {
+ pr_debug("%s: real do\n", __func__);
+ /* enable irq */
+ gelic_card_set_irq_mask(card, card->irq_mask);
+ /* start rx */
+ gelic_card_enable_rxdmac(card);
+
+ napi_enable(&card->napi);
+ }
+ mutex_unlock(&card->updown_lock);
+ pr_debug("%s: done\n", __func__);
+}
+
+void gelic_card_down(struct gelic_card *card)
+{
+ u64 mask;
+ pr_debug("%s: called\n", __func__);
+ mutex_lock(&card->updown_lock);
+ if (atomic_dec_if_positive(&card->users) == 0) {
+ pr_debug("%s: real do\n", __func__);
+ napi_disable(&card->napi);
+ /*
+ * Disable irq. Wireless interrupts will
+ * be disabled later if any
+ */
+ mask = card->irq_mask & (GELIC_CARD_WLAN_EVENT_RECEIVED |
+ GELIC_CARD_WLAN_COMMAND_COMPLETED);
+ gelic_card_set_irq_mask(card, mask);
+ /* stop rx */
+ gelic_card_disable_rxdmac(card);
+ gelic_card_reset_chain(card, &card->rx_chain,
+ card->descr + GELIC_NET_TX_DESCRIPTORS);
+ /* stop tx */
+ gelic_card_disable_txdmac(card);
+ }
+ mutex_unlock(&card->updown_lock);
+ pr_debug("%s: done\n", __func__);
+}
+
+/**
+ * gelic_descr_get_status -- returns the status of a descriptor
+ * @descr: descriptor to look at
+ *
+ * returns the status as in the dmac_cmd_status field of the descriptor
+ */
+static enum gelic_descr_dma_status
+gelic_descr_get_status(struct gelic_descr *descr)
+{
+ return be32_to_cpu(descr->dmac_cmd_status) & GELIC_DESCR_DMA_STAT_MASK;
+}
+
+/**
+ * gelic_descr_set_status -- sets the status of a descriptor
+ * @descr: descriptor to change
+ * @status: status to set in the descriptor
+ *
+ * changes the status to the specified value. Doesn't change other bits
+ * in the status
+ */
+static void gelic_descr_set_status(struct gelic_descr *descr,
+ enum gelic_descr_dma_status status)
+{
+ descr->dmac_cmd_status = cpu_to_be32(status |
+ (be32_to_cpu(descr->dmac_cmd_status) &
+ ~GELIC_DESCR_DMA_STAT_MASK));
+ /*
+ * dma_cmd_status field is used to indicate whether the descriptor
+ * is valid or not.
+ * Usually caller of this function wants to inform that to the
+ * hardware, so we assure here the hardware sees the change.
+ */
+ wmb();
+}
+
+/**
+ * gelic_card_free_chain - free descriptor chain
+ * @card: card structure
+ * @descr_in: address of desc
+ */
+static void gelic_card_free_chain(struct gelic_card *card,
+ struct gelic_descr *descr_in)
+{
+ struct gelic_descr *descr;
+
+ for (descr = descr_in; descr && descr->bus_addr; descr = descr->next) {
+ dma_unmap_single(ctodev(card), descr->bus_addr,
+ GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL);
+ descr->bus_addr = 0;
+ }
+}
+
+/**
+ * gelic_card_init_chain - links descriptor chain
+ * @card: card structure
+ * @chain: address of chain
+ * @start_descr: address of descriptor array
+ * @no: number of descriptors
+ *
+ * we manage a circular list that mirrors the hardware structure,
+ * except that the hardware uses bus addresses.
+ *
+ * returns 0 on success, <0 on failure
+ */
+static int __devinit gelic_card_init_chain(struct gelic_card *card,
+ struct gelic_descr_chain *chain,
+ struct gelic_descr *start_descr,
+ int no)
+{
+ int i;
+ struct gelic_descr *descr;
+
+ descr = start_descr;
+ memset(descr, 0, sizeof(*descr) * no);
+
+ /* set up the hardware pointers in each descriptor */
+ for (i = 0; i < no; i++, descr++) {
+ gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
+ descr->bus_addr =
+ dma_map_single(ctodev(card), descr,
+ GELIC_DESCR_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if (!descr->bus_addr)
+ goto iommu_error;
+
+ descr->next = descr + 1;
+ descr->prev = descr - 1;
+ }
+ /* make them as ring */
+ (descr - 1)->next = start_descr;
+ start_descr->prev = (descr - 1);
+
+ /* chain bus addr of hw descriptor */
+ descr = start_descr;
+ for (i = 0; i < no; i++, descr++) {
+ descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr);
+ }
+
+ chain->head = start_descr;
+ chain->tail = start_descr;
+
+ /* do not chain last hw descriptor */
+ (descr - 1)->next_descr_addr = 0;
+
+ return 0;
+
+iommu_error:
+ for (i--, descr--; 0 <= i; i--, descr--)
+ if (descr->bus_addr)
+ dma_unmap_single(ctodev(card), descr->bus_addr,
+ GELIC_DESCR_SIZE,
+ DMA_BIDIRECTIONAL);
+ return -ENOMEM;
+}
+
+/**
+ * gelic_card_reset_chain - reset status of a descriptor chain
+ * @card: card structure
+ * @chain: address of chain
+ * @start_descr: address of descriptor array
+ *
+ * Reset the status of dma descriptors to ready state
+ * and re-initialize the hardware chain for later use
+ */
+static void gelic_card_reset_chain(struct gelic_card *card,
+ struct gelic_descr_chain *chain,
+ struct gelic_descr *start_descr)
+{
+ struct gelic_descr *descr;
+
+ for (descr = start_descr; start_descr != descr->next; descr++) {
+ gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED);
+ descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr);
+ }
+
+ chain->head = start_descr;
+ chain->tail = (descr - 1);
+
+ (descr - 1)->next_descr_addr = 0;
+}
+/**
+ * gelic_descr_prepare_rx - reinitializes a rx descriptor
+ * @card: card structure
+ * @descr: descriptor to re-init
+ *
+ * return 0 on success, <0 on failure
+ *
+ * allocates a new rx skb, iommu-maps it and attaches it to the descriptor.
+ * Activate the descriptor state-wise
+ */
+static int gelic_descr_prepare_rx(struct gelic_card *card,
+ struct gelic_descr *descr)
+{
+ int offset;
+ unsigned int bufsize;
+
+ if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
+ dev_info(ctodev(card), "%s: ERROR status\n", __func__);
+ /* we need to round up the buffer size to a multiple of 128 */
+ bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN);
+
+ /* and we need to have it 128 byte aligned, therefore we allocate a
+ * bit more */
+ descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1);
+ if (!descr->skb) {
+ descr->buf_addr = 0; /* tell DMAC don't touch memory */
+ dev_info(ctodev(card),
+ "%s:allocate skb failed !!\n", __func__);
+ return -ENOMEM;
+ }
+ descr->buf_size = cpu_to_be32(bufsize);
+ descr->dmac_cmd_status = 0;
+ descr->result_size = 0;
+ descr->valid_size = 0;
+ descr->data_error = 0;
+
+ offset = ((unsigned long)descr->skb->data) &
+ (GELIC_NET_RXBUF_ALIGN - 1);
+ if (offset)
+ skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset);
+ /* io-mmu-map the skb */
+ descr->buf_addr = cpu_to_be32(dma_map_single(ctodev(card),
+ descr->skb->data,
+ GELIC_NET_MAX_MTU,
+ DMA_FROM_DEVICE));
+ if (!descr->buf_addr) {
+ dev_kfree_skb_any(descr->skb);
+ descr->skb = NULL;
+ dev_info(ctodev(card),
+ "%s:Could not iommu-map rx buffer\n", __func__);
+ gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
+ return -ENOMEM;
+ } else {
+ gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED);
+ return 0;
+ }
+}
+
+/**
+ * gelic_card_release_rx_chain - free all skb of rx descr
+ * @card: card structure
+ *
+ */
+static void gelic_card_release_rx_chain(struct gelic_card *card)
+{
+ struct gelic_descr *descr = card->rx_chain.head;
+
+ do {
+ if (descr->skb) {
+ dma_unmap_single(ctodev(card),
+ be32_to_cpu(descr->buf_addr),
+ descr->skb->len,
+ DMA_FROM_DEVICE);
+ descr->buf_addr = 0;
+ dev_kfree_skb_any(descr->skb);
+ descr->skb = NULL;
+ gelic_descr_set_status(descr,
+ GELIC_DESCR_DMA_NOT_IN_USE);
+ }
+ descr = descr->next;
+ } while (descr != card->rx_chain.head);
+}
+
+/**
+ * gelic_card_fill_rx_chain - fills descriptors/skbs in the rx chains
+ * @card: card structure
+ *
+ * fills all descriptors in the rx chain: allocates skbs
+ * and iommu-maps them.
+ * returns 0 on success, < 0 on failure
+ */
+static int gelic_card_fill_rx_chain(struct gelic_card *card)
+{
+ struct gelic_descr *descr = card->rx_chain.head;
+ int ret;
+
+ do {
+ if (!descr->skb) {
+ ret = gelic_descr_prepare_rx(card, descr);
+ if (ret)
+ goto rewind;
+ }
+ descr = descr->next;
+ } while (descr != card->rx_chain.head);
+
+ return 0;
+rewind:
+ gelic_card_release_rx_chain(card);
+ return ret;
+}
+
+/**
+ * gelic_card_alloc_rx_skbs - allocates rx skbs in rx descriptor chains
+ * @card: card structure
+ *
+ * returns 0 on success, < 0 on failure
+ */
+static int __devinit gelic_card_alloc_rx_skbs(struct gelic_card *card)
+{
+ struct gelic_descr_chain *chain;
+ int ret;
+ chain = &card->rx_chain;
+ ret = gelic_card_fill_rx_chain(card);
+ chain->tail = card->rx_top->prev; /* point to the last */
+ return ret;
+}
+
+/**
+ * gelic_descr_release_tx - processes a used tx descriptor
+ * @card: card structure
+ * @descr: descriptor to release
+ *
+ * releases a used tx descriptor (unmapping, freeing of skb)
+ */
+static void gelic_descr_release_tx(struct gelic_card *card,
+ struct gelic_descr *descr)
+{
+ struct sk_buff *skb = descr->skb;
+
+ BUG_ON(!(be32_to_cpu(descr->data_status) & GELIC_DESCR_TX_TAIL));
+
+ dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr), skb->len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+
+ descr->buf_addr = 0;
+ descr->buf_size = 0;
+ descr->next_descr_addr = 0;
+ descr->result_size = 0;
+ descr->valid_size = 0;
+ descr->data_status = 0;
+ descr->data_error = 0;
+ descr->skb = NULL;
+
+ /* set descr status */
+ gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
+}
+
+static void gelic_card_stop_queues(struct gelic_card *card)
+{
+ netif_stop_queue(card->netdev[GELIC_PORT_ETHERNET_0]);
+
+ if (card->netdev[GELIC_PORT_WIRELESS])
+ netif_stop_queue(card->netdev[GELIC_PORT_WIRELESS]);
+}
+static void gelic_card_wake_queues(struct gelic_card *card)
+{
+ netif_wake_queue(card->netdev[GELIC_PORT_ETHERNET_0]);
+
+ if (card->netdev[GELIC_PORT_WIRELESS])
+ netif_wake_queue(card->netdev[GELIC_PORT_WIRELESS]);
+}
+/**
+ * gelic_card_release_tx_chain - processes sent tx descriptors
+ * @card: adapter structure
+ * @stop: net_stop sequence
+ *
+ * releases the tx descriptors that gelic has finished with
+ */
+static void gelic_card_release_tx_chain(struct gelic_card *card, int stop)
+{
+ struct gelic_descr_chain *tx_chain;
+ enum gelic_descr_dma_status status;
+ struct net_device *netdev;
+ int release = 0;
+
+ for (tx_chain = &card->tx_chain;
+ tx_chain->head != tx_chain->tail && tx_chain->tail;
+ tx_chain->tail = tx_chain->tail->next) {
+ status = gelic_descr_get_status(tx_chain->tail);
+ netdev = tx_chain->tail->skb->dev;
+ switch (status) {
+ case GELIC_DESCR_DMA_RESPONSE_ERROR:
+ case GELIC_DESCR_DMA_PROTECTION_ERROR:
+ case GELIC_DESCR_DMA_FORCE_END:
+ if (printk_ratelimit())
+ dev_info(ctodev(card),
+ "%s: forcing end of tx descriptor " \
+ "with status %x\n",
+ __func__, status);
+ netdev->stats.tx_dropped++;
+ break;
+
+ case GELIC_DESCR_DMA_COMPLETE:
+ if (tx_chain->tail->skb) {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes +=
+ tx_chain->tail->skb->len;
+ }
+ break;
+
+ case GELIC_DESCR_DMA_CARDOWNED:
+ /* pending tx request */
+ default:
+ /* any other value (== GELIC_DESCR_DMA_NOT_IN_USE) */
+ if (!stop)
+ goto out;
+ }
+ gelic_descr_release_tx(card, tx_chain->tail);
+ release ++;
+ }
+out:
+ if (!stop && release)
+ gelic_card_wake_queues(card);
+}
+
+/**
+ * gelic_net_set_multi - sets multicast addresses and promisc flags
+ * @netdev: interface device structure
+ *
+ * gelic_net_set_multi configures multicast addresses as needed for the
+ * netdev interface. It also sets up multicast, allmulti and promisc
+ * flags appropriately
+ */
+void gelic_net_set_multi(struct net_device *netdev)
+{
+ struct gelic_card *card = netdev_card(netdev);
+ struct netdev_hw_addr *ha;
+ unsigned int i;
+ uint8_t *p;
+ u64 addr;
+ int status;
+
+ /* clear all multicast address */
+ status = lv1_net_remove_multicast_address(bus_id(card), dev_id(card),
+ 0, 1);
+ if (status)
+ dev_err(ctodev(card),
+ "lv1_net_remove_multicast_address failed %d\n",
+ status);
+ /* set broadcast address */
+ status = lv1_net_add_multicast_address(bus_id(card), dev_id(card),
+ GELIC_NET_BROADCAST_ADDR, 0);
+ if (status)
+ dev_err(ctodev(card),
+ "lv1_net_add_multicast_address failed, %d\n",
+ status);
+
+ if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(netdev) > GELIC_NET_MC_COUNT_MAX)) {
+ status = lv1_net_add_multicast_address(bus_id(card),
+ dev_id(card),
+ 0, 1);
+ if (status)
+ dev_err(ctodev(card),
+ "lv1_net_add_multicast_address failed, %d\n",
+ status);
+ return;
+ }
+
+ /* set multicast addresses */
+ netdev_for_each_mc_addr(ha, netdev) {
+ addr = 0;
+ p = ha->addr;
+ for (i = 0; i < ETH_ALEN; i++) {
+ addr <<= 8;
+ addr |= *p++;
+ }
+ status = lv1_net_add_multicast_address(bus_id(card),
+ dev_id(card),
+ addr, 0);
+ if (status)
+ dev_err(ctodev(card),
+ "lv1_net_add_multicast_address failed, %d\n",
+ status);
+ }
+}
+
+/**
+ * gelic_card_enable_rxdmac - enables the receive DMA controller
+ * @card: card structure
+ *
+ * gelic_card_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
+ * in the GDADMACCNTR register
+ */
+static inline void gelic_card_enable_rxdmac(struct gelic_card *card)
+{
+ int status;
+
+#ifdef DEBUG
+ if (gelic_descr_get_status(card->rx_chain.head) !=
+ GELIC_DESCR_DMA_CARDOWNED) {
+ printk(KERN_ERR "%s: status=%x\n", __func__,
+ be32_to_cpu(card->rx_chain.head->dmac_cmd_status));
+ printk(KERN_ERR "%s: nextphy=%x\n", __func__,
+ be32_to_cpu(card->rx_chain.head->next_descr_addr));
+ printk(KERN_ERR "%s: head=%p\n", __func__,
+ card->rx_chain.head);
+ }
+#endif
+ status = lv1_net_start_rx_dma(bus_id(card), dev_id(card),
+ card->rx_chain.head->bus_addr, 0);
+ if (status)
+ dev_info(ctodev(card),
+ "lv1_net_start_rx_dma failed, status=%d\n", status);
+}
+
+/**
+ * gelic_card_disable_rxdmac - disables the receive DMA controller
+ * @card: card structure
+ *
+ * gelic_card_disable_rxdmac terminates processing on the DMA controller by
+ * turing off DMA and issuing a force end
+ */
+static inline void gelic_card_disable_rxdmac(struct gelic_card *card)
+{
+ int status;
+
+ /* this hvc blocks until the DMA in progress really stopped */
+ status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card), 0);
+ if (status)
+ dev_err(ctodev(card),
+ "lv1_net_stop_rx_dma failed, %d\n", status);
+}
+
+/**
+ * gelic_card_disable_txdmac - disables the transmit DMA controller
+ * @card: card structure
+ *
+ * gelic_card_disable_txdmac terminates processing on the DMA controller by
+ * turing off DMA and issuing a force end
+ */
+static inline void gelic_card_disable_txdmac(struct gelic_card *card)
+{
+ int status;
+
+ /* this hvc blocks until the DMA in progress really stopped */
+ status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card), 0);
+ if (status)
+ dev_err(ctodev(card),
+ "lv1_net_stop_tx_dma failed, status=%d\n", status);
+}
+
+/**
+ * gelic_net_stop - called upon ifconfig down
+ * @netdev: interface device structure
+ *
+ * always returns 0
+ */
+int gelic_net_stop(struct net_device *netdev)
+{
+ struct gelic_card *card;
+
+ pr_debug("%s: start\n", __func__);
+
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+
+ card = netdev_card(netdev);
+ gelic_card_down(card);
+
+ pr_debug("%s: done\n", __func__);
+ return 0;
+}
+
+/**
+ * gelic_card_get_next_tx_descr - returns the next available tx descriptor
+ * @card: device structure to get descriptor from
+ *
+ * returns the address of the next descriptor, or NULL if not available.
+ */
+static struct gelic_descr *
+gelic_card_get_next_tx_descr(struct gelic_card *card)
+{
+ if (!card->tx_chain.head)
+ return NULL;
+ /* see if the next descriptor is free */
+ if (card->tx_chain.tail != card->tx_chain.head->next &&
+ gelic_descr_get_status(card->tx_chain.head) ==
+ GELIC_DESCR_DMA_NOT_IN_USE)
+ return card->tx_chain.head;
+ else
+ return NULL;
+
+}
+
+/**
+ * gelic_net_set_txdescr_cmdstat - sets the tx descriptor command field
+ * @descr: descriptor structure to fill out
+ * @skb: packet to consider
+ *
+ * fills out the command and status field of the descriptor structure,
+ * depending on hardware checksum settings. This function assumes a wmb()
+ * has executed before.
+ */
+static void gelic_descr_set_tx_cmdstat(struct gelic_descr *descr,
+ struct sk_buff *skb)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ descr->dmac_cmd_status =
+ cpu_to_be32(GELIC_DESCR_DMA_CMD_NO_CHKSUM |
+ GELIC_DESCR_TX_DMA_FRAME_TAIL);
+ else {
+ /* is packet ip?
+ * if yes: tcp? udp? */
+ if (skb->protocol == htons(ETH_P_IP)) {
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ descr->dmac_cmd_status =
+ cpu_to_be32(GELIC_DESCR_DMA_CMD_TCP_CHKSUM |
+ GELIC_DESCR_TX_DMA_FRAME_TAIL);
+
+ else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
+ descr->dmac_cmd_status =
+ cpu_to_be32(GELIC_DESCR_DMA_CMD_UDP_CHKSUM |
+ GELIC_DESCR_TX_DMA_FRAME_TAIL);
+ else /*
+ * the stack should checksum non-tcp and non-udp
+ * packets on his own: NETIF_F_IP_CSUM
+ */
+ descr->dmac_cmd_status =
+ cpu_to_be32(GELIC_DESCR_DMA_CMD_NO_CHKSUM |
+ GELIC_DESCR_TX_DMA_FRAME_TAIL);
+ }
+ }
+}
+
+static inline struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb,
+ unsigned short tag)
+{
+ struct vlan_ethhdr *veth;
+ static unsigned int c;
+
+ if (skb_headroom(skb) < VLAN_HLEN) {
+ struct sk_buff *sk_tmp = skb;
+ pr_debug("%s: hd=%d c=%ud\n", __func__, skb_headroom(skb), c);
+ skb = skb_realloc_headroom(sk_tmp, VLAN_HLEN);
+ if (!skb)
+ return NULL;
+ dev_kfree_skb_any(sk_tmp);
+ }
+ veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
+
+ /* Move the mac addresses to the top of buffer */
+ memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN);
+
+ veth->h_vlan_proto = cpu_to_be16(ETH_P_8021Q);
+ veth->h_vlan_TCI = htons(tag);
+
+ return skb;
+}
+
+/**
+ * gelic_descr_prepare_tx - setup a descriptor for sending packets
+ * @card: card structure
+ * @descr: descriptor structure
+ * @skb: packet to use
+ *
+ * returns 0 on success, <0 on failure.
+ *
+ */
+static int gelic_descr_prepare_tx(struct gelic_card *card,
+ struct gelic_descr *descr,
+ struct sk_buff *skb)
+{
+ dma_addr_t buf;
+
+ if (card->vlan_required) {
+ struct sk_buff *skb_tmp;
+ enum gelic_port_type type;
+
+ type = netdev_port(skb->dev)->type;
+ skb_tmp = gelic_put_vlan_tag(skb,
+ card->vlan[type].tx);
+ if (!skb_tmp)
+ return -ENOMEM;
+ skb = skb_tmp;
+ }
+
+ buf = dma_map_single(ctodev(card), skb->data, skb->len, DMA_TO_DEVICE);
+
+ if (!buf) {
+ dev_err(ctodev(card),
+ "dma map 2 failed (%p, %i). Dropping packet\n",
+ skb->data, skb->len);
+ return -ENOMEM;
+ }
+
+ descr->buf_addr = cpu_to_be32(buf);
+ descr->buf_size = cpu_to_be32(skb->len);
+ descr->skb = skb;
+ descr->data_status = 0;
+ descr->next_descr_addr = 0; /* terminate hw descr */
+ gelic_descr_set_tx_cmdstat(descr, skb);
+
+ /* bump free descriptor pointer */
+ card->tx_chain.head = descr->next;
+ return 0;
+}
+
+/**
+ * gelic_card_kick_txdma - enables TX DMA processing
+ * @card: card structure
+ * @descr: descriptor address to enable TX processing at
+ *
+ */
+static int gelic_card_kick_txdma(struct gelic_card *card,
+ struct gelic_descr *descr)
+{
+ int status = 0;
+
+ if (card->tx_dma_progress)
+ return 0;
+
+ if (gelic_descr_get_status(descr) == GELIC_DESCR_DMA_CARDOWNED) {
+ card->tx_dma_progress = 1;
+ status = lv1_net_start_tx_dma(bus_id(card), dev_id(card),
+ descr->bus_addr, 0);
+ if (status) {
+ card->tx_dma_progress = 0;
+ dev_info(ctodev(card), "lv1_net_start_txdma failed," \
+ "status=%d\n", status);
+ }
+ }
+ return status;
+}
+
+/**
+ * gelic_net_xmit - transmits a frame over the device
+ * @skb: packet to send out
+ * @netdev: interface device structure
+ *
+ * returns 0 on success, <0 on failure
+ */
+int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct gelic_card *card = netdev_card(netdev);
+ struct gelic_descr *descr;
+ int result;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->tx_lock, flags);
+
+ gelic_card_release_tx_chain(card, 0);
+
+ descr = gelic_card_get_next_tx_descr(card);
+ if (!descr) {
+ /*
+ * no more descriptors free
+ */
+ gelic_card_stop_queues(card);
+ spin_unlock_irqrestore(&card->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ result = gelic_descr_prepare_tx(card, descr, skb);
+ if (result) {
+ /*
+ * DMA map failed. As chances are that failure
+ * would continue, just release skb and return
+ */
+ netdev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&card->tx_lock, flags);
+ return NETDEV_TX_OK;
+ }
+ /*
+ * link this prepared descriptor to previous one
+ * to achieve high performance
+ */
+ descr->prev->next_descr_addr = cpu_to_be32(descr->bus_addr);
+ /*
+ * as hardware descriptor is modified in the above lines,
+ * ensure that the hardware sees it
+ */
+ wmb();
+ if (gelic_card_kick_txdma(card, descr)) {
+ /*
+ * kick failed.
+ * release descriptor which was just prepared
+ */
+ netdev->stats.tx_dropped++;
+ /* don't trigger BUG_ON() in gelic_descr_release_tx */
+ descr->data_status = cpu_to_be32(GELIC_DESCR_TX_TAIL);
+ gelic_descr_release_tx(card, descr);
+ /* reset head */
+ card->tx_chain.head = descr;
+ /* reset hw termination */
+ descr->prev->next_descr_addr = 0;
+ dev_info(ctodev(card), "%s: kick failure\n", __func__);
+ }
+
+ spin_unlock_irqrestore(&card->tx_lock, flags);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * gelic_net_pass_skb_up - takes an skb from a descriptor and passes it on
+ * @descr: descriptor to process
+ * @card: card structure
+ * @netdev: net_device structure to be passed packet
+ *
+ * iommu-unmaps the skb, fills out skb structure and passes the data to the
+ * stack. The descriptor state is not changed.
+ */
+static void gelic_net_pass_skb_up(struct gelic_descr *descr,
+ struct gelic_card *card,
+ struct net_device *netdev)
+
+{
+ struct sk_buff *skb = descr->skb;
+ u32 data_status, data_error;
+
+ data_status = be32_to_cpu(descr->data_status);
+ data_error = be32_to_cpu(descr->data_error);
+ /* unmap skb buffer */
+ dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr),
+ GELIC_NET_MAX_MTU,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, be32_to_cpu(descr->valid_size)?
+ be32_to_cpu(descr->valid_size) :
+ be32_to_cpu(descr->result_size));
+ if (!descr->valid_size)
+ dev_info(ctodev(card), "buffer full %x %x %x\n",
+ be32_to_cpu(descr->result_size),
+ be32_to_cpu(descr->buf_size),
+ be32_to_cpu(descr->dmac_cmd_status));
+
+ descr->skb = NULL;
+ /*
+ * the card put 2 bytes vlan tag in front
+ * of the ethernet frame
+ */
+ skb_pull(skb, 2);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ /* checksum offload */
+ if (netdev->features & NETIF_F_RXCSUM) {
+ if ((data_status & GELIC_DESCR_DATA_STATUS_CHK_MASK) &&
+ (!(data_error & GELIC_DESCR_DATA_ERROR_CHK_MASK)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+ } else
+ skb_checksum_none_assert(skb);
+
+ /* update netdevice statistics */
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += skb->len;
+
+ /* pass skb up to stack */
+ netif_receive_skb(skb);
+}
+
+/**
+ * gelic_card_decode_one_descr - processes an rx descriptor
+ * @card: card structure
+ *
+ * returns 1 if a packet has been sent to the stack, otherwise 0
+ *
+ * processes an rx descriptor by iommu-unmapping the data buffer and passing
+ * the packet up to the stack
+ */
+static int gelic_card_decode_one_descr(struct gelic_card *card)
+{
+ enum gelic_descr_dma_status status;
+ struct gelic_descr_chain *chain = &card->rx_chain;
+ struct gelic_descr *descr = chain->head;
+ struct net_device *netdev = NULL;
+ int dmac_chain_ended;
+
+ status = gelic_descr_get_status(descr);
+
+ if (status == GELIC_DESCR_DMA_CARDOWNED)
+ return 0;
+
+ if (status == GELIC_DESCR_DMA_NOT_IN_USE) {
+ dev_dbg(ctodev(card), "dormant descr? %p\n", descr);
+ return 0;
+ }
+
+ /* netdevice select */
+ if (card->vlan_required) {
+ unsigned int i;
+ u16 vid;
+ vid = *(u16 *)(descr->skb->data) & VLAN_VID_MASK;
+ for (i = 0; i < GELIC_PORT_MAX; i++) {
+ if (card->vlan[i].rx == vid) {
+ netdev = card->netdev[i];
+ break;
+ }
+ }
+ if (GELIC_PORT_MAX <= i) {
+ pr_info("%s: unknown packet vid=%x\n", __func__, vid);
+ goto refill;
+ }
+ } else
+ netdev = card->netdev[GELIC_PORT_ETHERNET_0];
+
+ if ((status == GELIC_DESCR_DMA_RESPONSE_ERROR) ||
+ (status == GELIC_DESCR_DMA_PROTECTION_ERROR) ||
+ (status == GELIC_DESCR_DMA_FORCE_END)) {
+ dev_info(ctodev(card), "dropping RX descriptor with state %x\n",
+ status);
+ netdev->stats.rx_dropped++;
+ goto refill;
+ }
+
+ if (status == GELIC_DESCR_DMA_BUFFER_FULL) {
+ /*
+ * Buffer full would occur if and only if
+ * the frame length was longer than the size of this
+ * descriptor's buffer. If the frame length was equal
+ * to or shorter than buffer'size, FRAME_END condition
+ * would occur.
+ * Anyway this frame was longer than the MTU,
+ * just drop it.
+ */
+ dev_info(ctodev(card), "overlength frame\n");
+ goto refill;
+ }
+ /*
+ * descriptors any other than FRAME_END here should
+ * be treated as error.
+ */
+ if (status != GELIC_DESCR_DMA_FRAME_END) {
+ dev_dbg(ctodev(card), "RX descriptor with state %x\n",
+ status);
+ goto refill;
+ }
+
+ /* ok, we've got a packet in descr */
+ gelic_net_pass_skb_up(descr, card, netdev);
+refill:
+
+ /* is the current descriptor terminated with next_descr == NULL? */
+ dmac_chain_ended =
+ be32_to_cpu(descr->dmac_cmd_status) &
+ GELIC_DESCR_RX_DMA_CHAIN_END;
+ /*
+ * So that always DMAC can see the end
+ * of the descriptor chain to avoid
+ * from unwanted DMAC overrun.
+ */
+ descr->next_descr_addr = 0;
+
+ /* change the descriptor state: */
+ gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
+
+ /*
+ * this call can fail, but for now, just leave this
+ * decriptor without skb
+ */
+ gelic_descr_prepare_rx(card, descr);
+
+ chain->tail = descr;
+ chain->head = descr->next;
+
+ /*
+ * Set this descriptor the end of the chain.
+ */
+ descr->prev->next_descr_addr = cpu_to_be32(descr->bus_addr);
+
+ /*
+ * If dmac chain was met, DMAC stopped.
+ * thus re-enable it
+ */
+
+ if (dmac_chain_ended)
+ gelic_card_enable_rxdmac(card);
+
+ return 1;
+}
+
+/**
+ * gelic_net_poll - NAPI poll function called by the stack to return packets
+ * @napi: napi structure
+ * @budget: number of packets we can pass to the stack at most
+ *
+ * returns the number of the processed packets
+ *
+ */
+static int gelic_net_poll(struct napi_struct *napi, int budget)
+{
+ struct gelic_card *card = container_of(napi, struct gelic_card, napi);
+ int packets_done = 0;
+
+ while (packets_done < budget) {
+ if (!gelic_card_decode_one_descr(card))
+ break;
+
+ packets_done++;
+ }
+
+ if (packets_done < budget) {
+ napi_complete(napi);
+ gelic_card_rx_irq_on(card);
+ }
+ return packets_done;
+}
+/**
+ * gelic_net_change_mtu - changes the MTU of an interface
+ * @netdev: interface device structure
+ * @new_mtu: new MTU value
+ *
+ * returns 0 on success, <0 on failure
+ */
+int gelic_net_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ /* no need to re-alloc skbs or so -- the max mtu is about 2.3k
+ * and mtu is outbound only anyway */
+ if ((new_mtu < GELIC_NET_MIN_MTU) ||
+ (new_mtu > GELIC_NET_MAX_MTU)) {
+ return -EINVAL;
+ }
+ netdev->mtu = new_mtu;
+ return 0;
+}
+
+/**
+ * gelic_card_interrupt - event handler for gelic_net
+ */
+static irqreturn_t gelic_card_interrupt(int irq, void *ptr)
+{
+ unsigned long flags;
+ struct gelic_card *card = ptr;
+ u64 status;
+
+ status = card->irq_status;
+
+ if (!status)
+ return IRQ_NONE;
+
+ status &= card->irq_mask;
+
+ if (status & GELIC_CARD_RXINT) {
+ gelic_card_rx_irq_off(card);
+ napi_schedule(&card->napi);
+ }
+
+ if (status & GELIC_CARD_TXINT) {
+ spin_lock_irqsave(&card->tx_lock, flags);
+ card->tx_dma_progress = 0;
+ gelic_card_release_tx_chain(card, 0);
+ /* kick outstanding tx descriptor if any */
+ gelic_card_kick_txdma(card, card->tx_chain.tail);
+ spin_unlock_irqrestore(&card->tx_lock, flags);
+ }
+
+ /* ether port status changed */
+ if (status & GELIC_CARD_PORT_STATUS_CHANGED)
+ gelic_card_get_ether_port_status(card, 1);
+
+#ifdef CONFIG_GELIC_WIRELESS
+ if (status & (GELIC_CARD_WLAN_EVENT_RECEIVED |
+ GELIC_CARD_WLAN_COMMAND_COMPLETED))
+ gelic_wl_interrupt(card->netdev[GELIC_PORT_WIRELESS], status);
+#endif
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * gelic_net_poll_controller - artificial interrupt for netconsole etc.
+ * @netdev: interface device structure
+ *
+ * see Documentation/networking/netconsole.txt
+ */
+void gelic_net_poll_controller(struct net_device *netdev)
+{
+ struct gelic_card *card = netdev_card(netdev);
+
+ gelic_card_set_irq_mask(card, 0);
+ gelic_card_interrupt(netdev->irq, netdev);
+ gelic_card_set_irq_mask(card, card->irq_mask);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+/**
+ * gelic_net_open - called upon ifconfig up
+ * @netdev: interface device structure
+ *
+ * returns 0 on success, <0 on failure
+ *
+ * gelic_net_open allocates all the descriptors and memory needed for
+ * operation, sets up multicast list and enables interrupts
+ */
+int gelic_net_open(struct net_device *netdev)
+{
+ struct gelic_card *card = netdev_card(netdev);
+
+ dev_dbg(ctodev(card), " -> %s %p\n", __func__, netdev);
+
+ gelic_card_up(card);
+
+ netif_start_queue(netdev);
+ gelic_card_get_ether_port_status(card, 1);
+
+ dev_dbg(ctodev(card), " <- %s\n", __func__);
+ return 0;
+}
+
+void gelic_net_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, DRV_NAME, sizeof(info->driver) - 1);
+ strncpy(info->version, DRV_VERSION, sizeof(info->version) - 1);
+}
+
+static int gelic_ether_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct gelic_card *card = netdev_card(netdev);
+
+ gelic_card_get_ether_port_status(card, 0);
+
+ if (card->ether_port_status & GELIC_LV1_ETHER_FULL_DUPLEX)
+ cmd->duplex = DUPLEX_FULL;
+ else
+ cmd->duplex = DUPLEX_HALF;
+
+ switch (card->ether_port_status & GELIC_LV1_ETHER_SPEED_MASK) {
+ case GELIC_LV1_ETHER_SPEED_10:
+ ethtool_cmd_speed_set(cmd, SPEED_10);
+ break;
+ case GELIC_LV1_ETHER_SPEED_100:
+ ethtool_cmd_speed_set(cmd, SPEED_100);
+ break;
+ case GELIC_LV1_ETHER_SPEED_1000:
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
+ break;
+ default:
+ pr_info("%s: speed unknown\n", __func__);
+ ethtool_cmd_speed_set(cmd, SPEED_10);
+ break;
+ }
+
+ cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg |
+ SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full;
+ cmd->advertising = cmd->supported;
+ if (card->link_mode & GELIC_LV1_ETHER_AUTO_NEG) {
+ cmd->autoneg = AUTONEG_ENABLE;
+ } else {
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->advertising &= ~ADVERTISED_Autoneg;
+ }
+ cmd->port = PORT_TP;
+
+ return 0;
+}
+
+static int gelic_ether_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct gelic_card *card = netdev_card(netdev);
+ u64 mode;
+ int ret;
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ mode = GELIC_LV1_ETHER_AUTO_NEG;
+ } else {
+ switch (cmd->speed) {
+ case SPEED_10:
+ mode = GELIC_LV1_ETHER_SPEED_10;
+ break;
+ case SPEED_100:
+ mode = GELIC_LV1_ETHER_SPEED_100;
+ break;
+ case SPEED_1000:
+ mode = GELIC_LV1_ETHER_SPEED_1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (cmd->duplex == DUPLEX_FULL)
+ mode |= GELIC_LV1_ETHER_FULL_DUPLEX;
+ else if (cmd->speed == SPEED_1000) {
+ pr_info("1000 half duplex is not supported.\n");
+ return -EINVAL;
+ }
+ }
+
+ ret = gelic_card_set_link_mode(card, mode);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void gelic_net_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ if (0 <= ps3_compare_firmware_version(2, 2, 0))
+ wol->supported = WAKE_MAGIC;
+ else
+ wol->supported = 0;
+
+ wol->wolopts = ps3_sys_manager_get_wol() ? wol->supported : 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+static int gelic_net_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ int status;
+ struct gelic_card *card;
+ u64 v1, v2;
+
+ if (ps3_compare_firmware_version(2, 2, 0) < 0 ||
+ !capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ card = netdev_card(netdev);
+ if (wol->wolopts & WAKE_MAGIC) {
+ status = lv1_net_control(bus_id(card), dev_id(card),
+ GELIC_LV1_SET_WOL,
+ GELIC_LV1_WOL_MAGIC_PACKET,
+ 0, GELIC_LV1_WOL_MP_ENABLE,
+ &v1, &v2);
+ if (status) {
+ pr_info("%s: enabling WOL failed %d\n", __func__,
+ status);
+ status = -EIO;
+ goto done;
+ }
+ status = lv1_net_control(bus_id(card), dev_id(card),
+ GELIC_LV1_SET_WOL,
+ GELIC_LV1_WOL_ADD_MATCH_ADDR,
+ 0, GELIC_LV1_WOL_MATCH_ALL,
+ &v1, &v2);
+ if (!status)
+ ps3_sys_manager_set_wol(1);
+ else {
+ pr_info("%s: enabling WOL filter failed %d\n",
+ __func__, status);
+ status = -EIO;
+ }
+ } else {
+ status = lv1_net_control(bus_id(card), dev_id(card),
+ GELIC_LV1_SET_WOL,
+ GELIC_LV1_WOL_MAGIC_PACKET,
+ 0, GELIC_LV1_WOL_MP_DISABLE,
+ &v1, &v2);
+ if (status) {
+ pr_info("%s: disabling WOL failed %d\n", __func__,
+ status);
+ status = -EIO;
+ goto done;
+ }
+ status = lv1_net_control(bus_id(card), dev_id(card),
+ GELIC_LV1_SET_WOL,
+ GELIC_LV1_WOL_DELETE_MATCH_ADDR,
+ 0, GELIC_LV1_WOL_MATCH_ALL,
+ &v1, &v2);
+ if (!status)
+ ps3_sys_manager_set_wol(0);
+ else {
+ pr_info("%s: removing WOL filter failed %d\n",
+ __func__, status);
+ status = -EIO;
+ }
+ }
+done:
+ return status;
+}
+
+static const struct ethtool_ops gelic_ether_ethtool_ops = {
+ .get_drvinfo = gelic_net_get_drvinfo,
+ .get_settings = gelic_ether_get_settings,
+ .set_settings = gelic_ether_set_settings,
+ .get_link = ethtool_op_get_link,
+ .get_wol = gelic_net_get_wol,
+ .set_wol = gelic_net_set_wol,
+};
+
+/**
+ * gelic_net_tx_timeout_task - task scheduled by the watchdog timeout
+ * function (to be called not under interrupt status)
+ * @work: work is context of tx timout task
+ *
+ * called as task when tx hangs, resets interface (if interface is up)
+ */
+static void gelic_net_tx_timeout_task(struct work_struct *work)
+{
+ struct gelic_card *card =
+ container_of(work, struct gelic_card, tx_timeout_task);
+ struct net_device *netdev = card->netdev[GELIC_PORT_ETHERNET_0];
+
+ dev_info(ctodev(card), "%s:Timed out. Restarting...\n", __func__);
+
+ if (!(netdev->flags & IFF_UP))
+ goto out;
+
+ netif_device_detach(netdev);
+ gelic_net_stop(netdev);
+
+ gelic_net_open(netdev);
+ netif_device_attach(netdev);
+
+out:
+ atomic_dec(&card->tx_timeout_task_counter);
+}
+
+/**
+ * gelic_net_tx_timeout - called when the tx timeout watchdog kicks in.
+ * @netdev: interface device structure
+ *
+ * called, if tx hangs. Schedules a task that resets the interface
+ */
+void gelic_net_tx_timeout(struct net_device *netdev)
+{
+ struct gelic_card *card;
+
+ card = netdev_card(netdev);
+ atomic_inc(&card->tx_timeout_task_counter);
+ if (netdev->flags & IFF_UP)
+ schedule_work(&card->tx_timeout_task);
+ else
+ atomic_dec(&card->tx_timeout_task_counter);
+}
+
+static const struct net_device_ops gelic_netdevice_ops = {
+ .ndo_open = gelic_net_open,
+ .ndo_stop = gelic_net_stop,
+ .ndo_start_xmit = gelic_net_xmit,
+ .ndo_set_rx_mode = gelic_net_set_multi,
+ .ndo_change_mtu = gelic_net_change_mtu,
+ .ndo_tx_timeout = gelic_net_tx_timeout,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = gelic_net_poll_controller,
+#endif
+};
+
+/**
+ * gelic_ether_setup_netdev_ops - initialization of net_device operations
+ * @netdev: net_device structure
+ *
+ * fills out function pointers in the net_device structure
+ */
+static void __devinit gelic_ether_setup_netdev_ops(struct net_device *netdev,
+ struct napi_struct *napi)
+{
+ netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
+ /* NAPI */
+ netif_napi_add(netdev, napi,
+ gelic_net_poll, GELIC_NET_NAPI_WEIGHT);
+ netdev->ethtool_ops = &gelic_ether_ethtool_ops;
+ netdev->netdev_ops = &gelic_netdevice_ops;
+}
+
+/**
+ * gelic_ether_setup_netdev - initialization of net_device
+ * @netdev: net_device structure
+ * @card: card structure
+ *
+ * Returns 0 on success or <0 on failure
+ *
+ * gelic_ether_setup_netdev initializes the net_device structure
+ * and register it.
+ **/
+int __devinit gelic_net_setup_netdev(struct net_device *netdev,
+ struct gelic_card *card)
+{
+ int status;
+ u64 v1, v2;
+
+ netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+
+ netdev->features = NETIF_F_IP_CSUM;
+ if (GELIC_CARD_RX_CSUM_DEFAULT)
+ netdev->features |= NETIF_F_RXCSUM;
+
+ status = lv1_net_control(bus_id(card), dev_id(card),
+ GELIC_LV1_GET_MAC_ADDRESS,
+ 0, 0, 0, &v1, &v2);
+ v1 <<= 16;
+ if (status || !is_valid_ether_addr((u8 *)&v1)) {
+ dev_info(ctodev(card),
+ "%s:lv1_net_control GET_MAC_ADDR failed %d\n",
+ __func__, status);
+ return -EINVAL;
+ }
+ memcpy(netdev->dev_addr, &v1, ETH_ALEN);
+
+ if (card->vlan_required) {
+ netdev->hard_header_len += VLAN_HLEN;
+ /*
+ * As vlan is internally used,
+ * we can not receive vlan packets
+ */
+ netdev->features |= NETIF_F_VLAN_CHALLENGED;
+ }
+
+ status = register_netdev(netdev);
+ if (status) {
+ dev_err(ctodev(card), "%s:Couldn't register %s %d\n",
+ __func__, netdev->name, status);
+ return status;
+ }
+ dev_info(ctodev(card), "%s: MAC addr %pM\n",
+ netdev->name, netdev->dev_addr);
+
+ return 0;
+}
+
+/**
+ * gelic_alloc_card_net - allocates net_device and card structure
+ *
+ * returns the card structure or NULL in case of errors
+ *
+ * the card and net_device structures are linked to each other
+ */
+#define GELIC_ALIGN (32)
+static struct gelic_card * __devinit gelic_alloc_card_net(struct net_device **netdev)
+{
+ struct gelic_card *card;
+ struct gelic_port *port;
+ void *p;
+ size_t alloc_size;
+ /*
+ * gelic requires dma descriptor is 32 bytes aligned and
+ * the hypervisor requires irq_status is 8 bytes aligned.
+ */
+ BUILD_BUG_ON(offsetof(struct gelic_card, irq_status) % 8);
+ BUILD_BUG_ON(offsetof(struct gelic_card, descr) % 32);
+ alloc_size =
+ sizeof(struct gelic_card) +
+ sizeof(struct gelic_descr) * GELIC_NET_RX_DESCRIPTORS +
+ sizeof(struct gelic_descr) * GELIC_NET_TX_DESCRIPTORS +
+ GELIC_ALIGN - 1;
+
+ p = kzalloc(alloc_size, GFP_KERNEL);
+ if (!p)
+ return NULL;
+ card = PTR_ALIGN(p, GELIC_ALIGN);
+ card->unalign = p;
+
+ /*
+ * alloc netdev
+ */
+ *netdev = alloc_etherdev(sizeof(struct gelic_port));
+ if (!netdev) {
+ kfree(card->unalign);
+ return NULL;
+ }
+ port = netdev_priv(*netdev);
+
+ /* gelic_port */
+ port->netdev = *netdev;
+ port->card = card;
+ port->type = GELIC_PORT_ETHERNET_0;
+
+ /* gelic_card */
+ card->netdev[GELIC_PORT_ETHERNET_0] = *netdev;
+
+ INIT_WORK(&card->tx_timeout_task, gelic_net_tx_timeout_task);
+ init_waitqueue_head(&card->waitq);
+ atomic_set(&card->tx_timeout_task_counter, 0);
+ mutex_init(&card->updown_lock);
+ atomic_set(&card->users, 0);
+
+ return card;
+}
+
+static void __devinit gelic_card_get_vlan_info(struct gelic_card *card)
+{
+ u64 v1, v2;
+ int status;
+ unsigned int i;
+ struct {
+ int tx;
+ int rx;
+ } vlan_id_ix[2] = {
+ [GELIC_PORT_ETHERNET_0] = {
+ .tx = GELIC_LV1_VLAN_TX_ETHERNET_0,
+ .rx = GELIC_LV1_VLAN_RX_ETHERNET_0
+ },
+ [GELIC_PORT_WIRELESS] = {
+ .tx = GELIC_LV1_VLAN_TX_WIRELESS,
+ .rx = GELIC_LV1_VLAN_RX_WIRELESS
+ }
+ };
+
+ for (i = 0; i < ARRAY_SIZE(vlan_id_ix); i++) {
+ /* tx tag */
+ status = lv1_net_control(bus_id(card), dev_id(card),
+ GELIC_LV1_GET_VLAN_ID,
+ vlan_id_ix[i].tx,
+ 0, 0, &v1, &v2);
+ if (status || !v1) {
+ if (status != LV1_NO_ENTRY)
+ dev_dbg(ctodev(card),
+ "get vlan id for tx(%d) failed(%d)\n",
+ vlan_id_ix[i].tx, status);
+ card->vlan[i].tx = 0;
+ card->vlan[i].rx = 0;
+ continue;
+ }
+ card->vlan[i].tx = (u16)v1;
+
+ /* rx tag */
+ status = lv1_net_control(bus_id(card), dev_id(card),
+ GELIC_LV1_GET_VLAN_ID,
+ vlan_id_ix[i].rx,
+ 0, 0, &v1, &v2);
+ if (status || !v1) {
+ if (status != LV1_NO_ENTRY)
+ dev_info(ctodev(card),
+ "get vlan id for rx(%d) failed(%d)\n",
+ vlan_id_ix[i].rx, status);
+ card->vlan[i].tx = 0;
+ card->vlan[i].rx = 0;
+ continue;
+ }
+ card->vlan[i].rx = (u16)v1;
+
+ dev_dbg(ctodev(card), "vlan_id[%d] tx=%02x rx=%02x\n",
+ i, card->vlan[i].tx, card->vlan[i].rx);
+ }
+
+ if (card->vlan[GELIC_PORT_ETHERNET_0].tx) {
+ BUG_ON(!card->vlan[GELIC_PORT_WIRELESS].tx);
+ card->vlan_required = 1;
+ } else
+ card->vlan_required = 0;
+
+ /* check wirelss capable firmware */
+ if (ps3_compare_firmware_version(1, 6, 0) < 0) {
+ card->vlan[GELIC_PORT_WIRELESS].tx = 0;
+ card->vlan[GELIC_PORT_WIRELESS].rx = 0;
+ }
+
+ dev_info(ctodev(card), "internal vlan %s\n",
+ card->vlan_required? "enabled" : "disabled");
+}
+/**
+ * ps3_gelic_driver_probe - add a device to the control of this driver
+ */
+static int __devinit ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
+{
+ struct gelic_card *card;
+ struct net_device *netdev;
+ int result;
+
+ pr_debug("%s: called\n", __func__);
+ result = ps3_open_hv_device(dev);
+
+ if (result) {
+ dev_dbg(&dev->core, "%s:ps3_open_hv_device failed\n",
+ __func__);
+ goto fail_open;
+ }
+
+ result = ps3_dma_region_create(dev->d_region);
+
+ if (result) {
+ dev_dbg(&dev->core, "%s:ps3_dma_region_create failed(%d)\n",
+ __func__, result);
+ BUG_ON("check region type");
+ goto fail_dma_region;
+ }
+
+ /* alloc card/netdevice */
+ card = gelic_alloc_card_net(&netdev);
+ if (!card) {
+ dev_info(&dev->core, "%s:gelic_net_alloc_card failed\n",
+ __func__);
+ result = -ENOMEM;
+ goto fail_alloc_card;
+ }
+ ps3_system_bus_set_drvdata(dev, card);
+ card->dev = dev;
+
+ /* get internal vlan info */
+ gelic_card_get_vlan_info(card);
+
+ card->link_mode = GELIC_LV1_ETHER_AUTO_NEG;
+
+ /* setup interrupt */
+ result = lv1_net_set_interrupt_status_indicator(bus_id(card),
+ dev_id(card),
+ ps3_mm_phys_to_lpar(__pa(&card->irq_status)),
+ 0);
+
+ if (result) {
+ dev_dbg(&dev->core,
+ "%s:set_interrupt_status_indicator failed: %s\n",
+ __func__, ps3_result(result));
+ result = -EIO;
+ goto fail_status_indicator;
+ }
+
+ result = ps3_sb_event_receive_port_setup(dev, PS3_BINDING_CPU_ANY,
+ &card->irq);
+
+ if (result) {
+ dev_info(ctodev(card),
+ "%s:gelic_net_open_device failed (%d)\n",
+ __func__, result);
+ result = -EPERM;
+ goto fail_alloc_irq;
+ }
+ result = request_irq(card->irq, gelic_card_interrupt,
+ IRQF_DISABLED, netdev->name, card);
+
+ if (result) {
+ dev_info(ctodev(card), "%s:request_irq failed (%d)\n",
+ __func__, result);
+ goto fail_request_irq;
+ }
+
+ /* setup card structure */
+ card->irq_mask = GELIC_CARD_RXINT | GELIC_CARD_TXINT |
+ GELIC_CARD_PORT_STATUS_CHANGED;
+
+
+ if (gelic_card_init_chain(card, &card->tx_chain,
+ card->descr, GELIC_NET_TX_DESCRIPTORS))
+ goto fail_alloc_tx;
+ if (gelic_card_init_chain(card, &card->rx_chain,
+ card->descr + GELIC_NET_TX_DESCRIPTORS,
+ GELIC_NET_RX_DESCRIPTORS))
+ goto fail_alloc_rx;
+
+ /* head of chain */
+ card->tx_top = card->tx_chain.head;
+ card->rx_top = card->rx_chain.head;
+ dev_dbg(ctodev(card), "descr rx %p, tx %p, size %#lx, num %#x\n",
+ card->rx_top, card->tx_top, sizeof(struct gelic_descr),
+ GELIC_NET_RX_DESCRIPTORS);
+ /* allocate rx skbs */
+ if (gelic_card_alloc_rx_skbs(card))
+ goto fail_alloc_skbs;
+
+ spin_lock_init(&card->tx_lock);
+ card->tx_dma_progress = 0;
+
+ /* setup net_device structure */
+ netdev->irq = card->irq;
+ SET_NETDEV_DEV(netdev, &card->dev->core);
+ gelic_ether_setup_netdev_ops(netdev, &card->napi);
+ result = gelic_net_setup_netdev(netdev, card);
+ if (result) {
+ dev_dbg(&dev->core, "%s: setup_netdev failed %d",
+ __func__, result);
+ goto fail_setup_netdev;
+ }
+
+#ifdef CONFIG_GELIC_WIRELESS
+ if (gelic_wl_driver_probe(card)) {
+ dev_dbg(&dev->core, "%s: WL init failed\n", __func__);
+ goto fail_setup_netdev;
+ }
+#endif
+ pr_debug("%s: done\n", __func__);
+ return 0;
+
+fail_setup_netdev:
+fail_alloc_skbs:
+ gelic_card_free_chain(card, card->rx_chain.head);
+fail_alloc_rx:
+ gelic_card_free_chain(card, card->tx_chain.head);
+fail_alloc_tx:
+ free_irq(card->irq, card);
+ netdev->irq = NO_IRQ;
+fail_request_irq:
+ ps3_sb_event_receive_port_destroy(dev, card->irq);
+fail_alloc_irq:
+ lv1_net_set_interrupt_status_indicator(bus_id(card),
+ bus_id(card),
+ 0, 0);
+fail_status_indicator:
+ ps3_system_bus_set_drvdata(dev, NULL);
+ kfree(netdev_card(netdev)->unalign);
+ free_netdev(netdev);
+fail_alloc_card:
+ ps3_dma_region_free(dev->d_region);
+fail_dma_region:
+ ps3_close_hv_device(dev);
+fail_open:
+ return result;
+}
+
+/**
+ * ps3_gelic_driver_remove - remove a device from the control of this driver
+ */
+
+static int ps3_gelic_driver_remove(struct ps3_system_bus_device *dev)
+{
+ struct gelic_card *card = ps3_system_bus_get_drvdata(dev);
+ struct net_device *netdev0;
+ pr_debug("%s: called\n", __func__);
+
+ /* set auto-negotiation */
+ gelic_card_set_link_mode(card, GELIC_LV1_ETHER_AUTO_NEG);
+
+#ifdef CONFIG_GELIC_WIRELESS
+ gelic_wl_driver_remove(card);
+#endif
+ /* stop interrupt */
+ gelic_card_set_irq_mask(card, 0);
+
+ /* turn off DMA, force end */
+ gelic_card_disable_rxdmac(card);
+ gelic_card_disable_txdmac(card);
+
+ /* release chains */
+ gelic_card_release_tx_chain(card, 1);
+ gelic_card_release_rx_chain(card);
+
+ gelic_card_free_chain(card, card->tx_top);
+ gelic_card_free_chain(card, card->rx_top);
+
+ netdev0 = card->netdev[GELIC_PORT_ETHERNET_0];
+ /* disconnect event port */
+ free_irq(card->irq, card);
+ netdev0->irq = NO_IRQ;
+ ps3_sb_event_receive_port_destroy(card->dev, card->irq);
+
+ wait_event(card->waitq,
+ atomic_read(&card->tx_timeout_task_counter) == 0);
+
+ lv1_net_set_interrupt_status_indicator(bus_id(card), dev_id(card),
+ 0 , 0);
+
+ unregister_netdev(netdev0);
+ kfree(netdev_card(netdev0)->unalign);
+ free_netdev(netdev0);
+
+ ps3_system_bus_set_drvdata(dev, NULL);
+
+ ps3_dma_region_free(dev->d_region);
+
+ ps3_close_hv_device(dev);
+
+ pr_debug("%s: done\n", __func__);
+ return 0;
+}
+
+static struct ps3_system_bus_driver ps3_gelic_driver = {
+ .match_id = PS3_MATCH_ID_GELIC,
+ .probe = ps3_gelic_driver_probe,
+ .remove = ps3_gelic_driver_remove,
+ .shutdown = ps3_gelic_driver_remove,
+ .core.name = "ps3_gelic_driver",
+ .core.owner = THIS_MODULE,
+};
+
+static int __init ps3_gelic_driver_init (void)
+{
+ return firmware_has_feature(FW_FEATURE_PS3_LV1)
+ ? ps3_system_bus_driver_register(&ps3_gelic_driver)
+ : -ENODEV;
+}
+
+static void __exit ps3_gelic_driver_exit (void)
+{
+ ps3_system_bus_driver_unregister(&ps3_gelic_driver);
+}
+
+module_init(ps3_gelic_driver_init);
+module_exit(ps3_gelic_driver_exit);
+
+MODULE_ALIAS(PS3_MODULE_ALIAS_GELIC);
+
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
new file mode 100644
index 000000000000..d3fadfbc3bcc
--- /dev/null
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -0,0 +1,380 @@
+/*
+ * PS3 Platfom gelic network driver.
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2006, 2007 Sony Corporation.
+ *
+ * This file is based on: spider_net.h
+ *
+ * (C) Copyright IBM Corp. 2005
+ *
+ * Authors : Utz Bacher <utz.bacher@de.ibm.com>
+ * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef _GELIC_NET_H
+#define _GELIC_NET_H
+
+/* descriptors */
+#define GELIC_NET_RX_DESCRIPTORS 128 /* num of descriptors */
+#define GELIC_NET_TX_DESCRIPTORS 128 /* num of descriptors */
+
+#define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN
+#define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN
+#define GELIC_NET_RXBUF_ALIGN 128
+#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */
+#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ
+#define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS)
+#define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL
+
+#define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */
+
+/* virtual interrupt status register bits */
+ /* INT1 */
+#define GELIC_CARD_TX_RAM_FULL_ERR 0x0000000000000001L
+#define GELIC_CARD_RX_RAM_FULL_ERR 0x0000000000000002L
+#define GELIC_CARD_TX_SHORT_FRAME_ERR 0x0000000000000004L
+#define GELIC_CARD_TX_INVALID_DESCR_ERR 0x0000000000000008L
+#define GELIC_CARD_RX_FIFO_FULL_ERR 0x0000000000002000L
+#define GELIC_CARD_RX_DESCR_CHAIN_END 0x0000000000004000L
+#define GELIC_CARD_RX_INVALID_DESCR_ERR 0x0000000000008000L
+#define GELIC_CARD_TX_RESPONCE_ERR 0x0000000000010000L
+#define GELIC_CARD_RX_RESPONCE_ERR 0x0000000000100000L
+#define GELIC_CARD_TX_PROTECTION_ERR 0x0000000000400000L
+#define GELIC_CARD_RX_PROTECTION_ERR 0x0000000004000000L
+#define GELIC_CARD_TX_TCP_UDP_CHECKSUM_ERR 0x0000000008000000L
+#define GELIC_CARD_PORT_STATUS_CHANGED 0x0000000020000000L
+#define GELIC_CARD_WLAN_EVENT_RECEIVED 0x0000000040000000L
+#define GELIC_CARD_WLAN_COMMAND_COMPLETED 0x0000000080000000L
+ /* INT 0 */
+#define GELIC_CARD_TX_FLAGGED_DESCR 0x0004000000000000L
+#define GELIC_CARD_RX_FLAGGED_DESCR 0x0040000000000000L
+#define GELIC_CARD_TX_TRANSFER_END 0x0080000000000000L
+#define GELIC_CARD_TX_DESCR_CHAIN_END 0x0100000000000000L
+#define GELIC_CARD_NUMBER_OF_RX_FRAME 0x1000000000000000L
+#define GELIC_CARD_ONE_TIME_COUNT_TIMER 0x4000000000000000L
+#define GELIC_CARD_FREE_RUN_COUNT_TIMER 0x8000000000000000L
+
+/* initial interrupt mask */
+#define GELIC_CARD_TXINT GELIC_CARD_TX_DESCR_CHAIN_END
+
+#define GELIC_CARD_RXINT (GELIC_CARD_RX_DESCR_CHAIN_END | \
+ GELIC_CARD_NUMBER_OF_RX_FRAME)
+
+ /* RX descriptor data_status bits */
+enum gelic_descr_rx_status {
+ GELIC_DESCR_RXDMADU = 0x80000000, /* destination MAC addr unknown */
+ GELIC_DESCR_RXLSTFBF = 0x40000000, /* last frame buffer */
+ GELIC_DESCR_RXIPCHK = 0x20000000, /* IP checksum performed */
+ GELIC_DESCR_RXTCPCHK = 0x10000000, /* TCP/UDP checksup performed */
+ GELIC_DESCR_RXWTPKT = 0x00C00000, /*
+ * wakeup trigger packet
+ * 01: Magic Packet (TM)
+ * 10: ARP packet
+ * 11: Multicast MAC addr
+ */
+ GELIC_DESCR_RXVLNPKT = 0x00200000, /* VLAN packet */
+ /* bit 20..16 reserved */
+ GELIC_DESCR_RXRRECNUM = 0x0000ff00, /* reception receipt number */
+ /* bit 7..0 reserved */
+};
+
+#define GELIC_DESCR_DATA_STATUS_CHK_MASK \
+ (GELIC_DESCR_RXIPCHK | GELIC_DESCR_RXTCPCHK)
+
+ /* TX descriptor data_status bits */
+enum gelic_descr_tx_status {
+ GELIC_DESCR_TX_TAIL = 0x00000001, /* gelic treated this
+ * descriptor was end of
+ * a tx frame
+ */
+};
+
+/* RX descriptor data error bits */
+enum gelic_descr_rx_error {
+ /* bit 31 reserved */
+ GELIC_DESCR_RXALNERR = 0x40000000, /* alignement error 10/100M */
+ GELIC_DESCR_RXOVERERR = 0x20000000, /* oversize error */
+ GELIC_DESCR_RXRNTERR = 0x10000000, /* Runt error */
+ GELIC_DESCR_RXIPCHKERR = 0x08000000, /* IP checksum error */
+ GELIC_DESCR_RXTCPCHKERR = 0x04000000, /* TCP/UDP checksum error */
+ GELIC_DESCR_RXDRPPKT = 0x00100000, /* drop packet */
+ GELIC_DESCR_RXIPFMTERR = 0x00080000, /* IP packet format error */
+ /* bit 18 reserved */
+ GELIC_DESCR_RXDATAERR = 0x00020000, /* IP packet format error */
+ GELIC_DESCR_RXCALERR = 0x00010000, /* cariier extension length
+ * error */
+ GELIC_DESCR_RXCREXERR = 0x00008000, /* carrier extension error */
+ GELIC_DESCR_RXMLTCST = 0x00004000, /* multicast address frame */
+ /* bit 13..0 reserved */
+};
+#define GELIC_DESCR_DATA_ERROR_CHK_MASK \
+ (GELIC_DESCR_RXIPCHKERR | GELIC_DESCR_RXTCPCHKERR)
+
+/* DMA command and status (RX and TX)*/
+enum gelic_descr_dma_status {
+ GELIC_DESCR_DMA_COMPLETE = 0x00000000, /* used in tx */
+ GELIC_DESCR_DMA_BUFFER_FULL = 0x00000000, /* used in rx */
+ GELIC_DESCR_DMA_RESPONSE_ERROR = 0x10000000, /* used in rx, tx */
+ GELIC_DESCR_DMA_PROTECTION_ERROR = 0x20000000, /* used in rx, tx */
+ GELIC_DESCR_DMA_FRAME_END = 0x40000000, /* used in rx */
+ GELIC_DESCR_DMA_FORCE_END = 0x50000000, /* used in rx, tx */
+ GELIC_DESCR_DMA_CARDOWNED = 0xa0000000, /* used in rx, tx */
+ GELIC_DESCR_DMA_NOT_IN_USE = 0xb0000000, /* any other value */
+};
+
+#define GELIC_DESCR_DMA_STAT_MASK (0xf0000000)
+
+/* tx descriptor command and status */
+enum gelic_descr_tx_dma_status {
+ /* [19] */
+ GELIC_DESCR_TX_DMA_IKE = 0x00080000, /* IPSEC off */
+ /* [18] */
+ GELIC_DESCR_TX_DMA_FRAME_TAIL = 0x00040000, /* last descriptor of
+ * the packet
+ */
+ /* [17..16] */
+ GELIC_DESCR_TX_DMA_TCP_CHKSUM = 0x00020000, /* TCP packet */
+ GELIC_DESCR_TX_DMA_UDP_CHKSUM = 0x00030000, /* UDP packet */
+ GELIC_DESCR_TX_DMA_NO_CHKSUM = 0x00000000, /* no checksum */
+
+ /* [1] */
+ GELIC_DESCR_TX_DMA_CHAIN_END = 0x00000002, /* DMA terminated
+ * due to chain end
+ */
+};
+
+#define GELIC_DESCR_DMA_CMD_NO_CHKSUM \
+ (GELIC_DESCR_DMA_CARDOWNED | GELIC_DESCR_TX_DMA_IKE | \
+ GELIC_DESCR_TX_DMA_NO_CHKSUM)
+
+#define GELIC_DESCR_DMA_CMD_TCP_CHKSUM \
+ (GELIC_DESCR_DMA_CARDOWNED | GELIC_DESCR_TX_DMA_IKE | \
+ GELIC_DESCR_TX_DMA_TCP_CHKSUM)
+
+#define GELIC_DESCR_DMA_CMD_UDP_CHKSUM \
+ (GELIC_DESCR_DMA_CARDOWNED | GELIC_DESCR_TX_DMA_IKE | \
+ GELIC_DESCR_TX_DMA_UDP_CHKSUM)
+
+enum gelic_descr_rx_dma_status {
+ /* [ 1 ] */
+ GELIC_DESCR_RX_DMA_CHAIN_END = 0x00000002, /* DMA terminated
+ * due to chain end
+ */
+};
+
+/* for lv1_net_control */
+enum gelic_lv1_net_control_code {
+ GELIC_LV1_GET_MAC_ADDRESS = 1,
+ GELIC_LV1_GET_ETH_PORT_STATUS = 2,
+ GELIC_LV1_SET_NEGOTIATION_MODE = 3,
+ GELIC_LV1_GET_VLAN_ID = 4,
+ GELIC_LV1_SET_WOL = 5,
+ GELIC_LV1_GET_CHANNEL = 6,
+ GELIC_LV1_POST_WLAN_CMD = 9,
+ GELIC_LV1_GET_WLAN_CMD_RESULT = 10,
+ GELIC_LV1_GET_WLAN_EVENT = 11,
+};
+
+/* for GELIC_LV1_SET_WOL */
+enum gelic_lv1_wol_command {
+ GELIC_LV1_WOL_MAGIC_PACKET = 1,
+ GELIC_LV1_WOL_ADD_MATCH_ADDR = 6,
+ GELIC_LV1_WOL_DELETE_MATCH_ADDR = 7,
+};
+
+/* for GELIC_LV1_WOL_MAGIC_PACKET */
+enum gelic_lv1_wol_mp_arg {
+ GELIC_LV1_WOL_MP_DISABLE = 0,
+ GELIC_LV1_WOL_MP_ENABLE = 1,
+};
+
+/* for GELIC_LV1_WOL_{ADD,DELETE}_MATCH_ADDR */
+enum gelic_lv1_wol_match_arg {
+ GELIC_LV1_WOL_MATCH_INDIVIDUAL = 0,
+ GELIC_LV1_WOL_MATCH_ALL = 1,
+};
+
+/* status returened from GET_ETH_PORT_STATUS */
+enum gelic_lv1_ether_port_status {
+ GELIC_LV1_ETHER_LINK_UP = 0x0000000000000001L,
+ GELIC_LV1_ETHER_FULL_DUPLEX = 0x0000000000000002L,
+ GELIC_LV1_ETHER_AUTO_NEG = 0x0000000000000004L,
+
+ GELIC_LV1_ETHER_SPEED_10 = 0x0000000000000010L,
+ GELIC_LV1_ETHER_SPEED_100 = 0x0000000000000020L,
+ GELIC_LV1_ETHER_SPEED_1000 = 0x0000000000000040L,
+ GELIC_LV1_ETHER_SPEED_MASK = 0x0000000000000070L,
+};
+
+enum gelic_lv1_vlan_index {
+ /* for outgoing packets */
+ GELIC_LV1_VLAN_TX_ETHERNET_0 = 0x0000000000000002L,
+ GELIC_LV1_VLAN_TX_WIRELESS = 0x0000000000000003L,
+
+ /* for incoming packets */
+ GELIC_LV1_VLAN_RX_ETHERNET_0 = 0x0000000000000012L,
+ GELIC_LV1_VLAN_RX_WIRELESS = 0x0000000000000013L,
+};
+
+enum gelic_lv1_phy {
+ GELIC_LV1_PHY_ETHERNET_0 = 0x0000000000000002L,
+};
+
+/* size of hardware part of gelic descriptor */
+#define GELIC_DESCR_SIZE (32)
+
+enum gelic_port_type {
+ GELIC_PORT_ETHERNET_0 = 0,
+ GELIC_PORT_WIRELESS = 1,
+ GELIC_PORT_MAX
+};
+
+struct gelic_descr {
+ /* as defined by the hardware */
+ __be32 buf_addr;
+ __be32 buf_size;
+ __be32 next_descr_addr;
+ __be32 dmac_cmd_status;
+ __be32 result_size;
+ __be32 valid_size; /* all zeroes for tx */
+ __be32 data_status;
+ __be32 data_error; /* all zeroes for tx */
+
+ /* used in the driver */
+ struct sk_buff *skb;
+ dma_addr_t bus_addr;
+ struct gelic_descr *next;
+ struct gelic_descr *prev;
+} __attribute__((aligned(32)));
+
+struct gelic_descr_chain {
+ /* we walk from tail to head */
+ struct gelic_descr *head;
+ struct gelic_descr *tail;
+};
+
+struct gelic_vlan_id {
+ u16 tx;
+ u16 rx;
+};
+
+struct gelic_card {
+ struct napi_struct napi;
+ struct net_device *netdev[GELIC_PORT_MAX];
+ /*
+ * hypervisor requires irq_status should be
+ * 8 bytes aligned, but u64 member is
+ * always disposed in that manner
+ */
+ u64 irq_status;
+ u64 irq_mask;
+
+ struct ps3_system_bus_device *dev;
+ struct gelic_vlan_id vlan[GELIC_PORT_MAX];
+ int vlan_required;
+
+ struct gelic_descr_chain tx_chain;
+ struct gelic_descr_chain rx_chain;
+ /*
+ * tx_lock guards tx descriptor list and
+ * tx_dma_progress.
+ */
+ spinlock_t tx_lock;
+ int tx_dma_progress;
+
+ struct work_struct tx_timeout_task;
+ atomic_t tx_timeout_task_counter;
+ wait_queue_head_t waitq;
+
+ /* only first user should up the card */
+ struct mutex updown_lock;
+ atomic_t users;
+
+ u64 ether_port_status;
+ int link_mode;
+
+ /* original address returned by kzalloc */
+ void *unalign;
+
+ /*
+ * each netdevice has copy of irq
+ */
+ unsigned int irq;
+ struct gelic_descr *tx_top, *rx_top;
+ struct gelic_descr descr[0]; /* must be the last */
+};
+
+struct gelic_port {
+ struct gelic_card *card;
+ struct net_device *netdev;
+ enum gelic_port_type type;
+ long priv[0]; /* long for alignment */
+};
+
+static inline struct gelic_card *port_to_card(struct gelic_port *p)
+{
+ return p->card;
+}
+static inline struct net_device *port_to_netdev(struct gelic_port *p)
+{
+ return p->netdev;
+}
+static inline struct gelic_card *netdev_card(struct net_device *d)
+{
+ return ((struct gelic_port *)netdev_priv(d))->card;
+}
+static inline struct gelic_port *netdev_port(struct net_device *d)
+{
+ return (struct gelic_port *)netdev_priv(d);
+}
+static inline struct device *ctodev(struct gelic_card *card)
+{
+ return &card->dev->core;
+}
+static inline u64 bus_id(struct gelic_card *card)
+{
+ return card->dev->bus_id;
+}
+static inline u64 dev_id(struct gelic_card *card)
+{
+ return card->dev->dev_id;
+}
+
+static inline void *port_priv(struct gelic_port *port)
+{
+ return port->priv;
+}
+
+extern int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
+/* shared netdev ops */
+extern void gelic_card_up(struct gelic_card *card);
+extern void gelic_card_down(struct gelic_card *card);
+extern int gelic_net_open(struct net_device *netdev);
+extern int gelic_net_stop(struct net_device *netdev);
+extern int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
+extern void gelic_net_set_multi(struct net_device *netdev);
+extern void gelic_net_tx_timeout(struct net_device *netdev);
+extern int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
+extern int gelic_net_setup_netdev(struct net_device *netdev,
+ struct gelic_card *card);
+
+/* shared ethtool ops */
+extern void gelic_net_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info);
+extern void gelic_net_poll_controller(struct net_device *netdev);
+
+#endif /* _GELIC_NET_H */
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
new file mode 100644
index 000000000000..fd4ed7f8cfa1
--- /dev/null
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -0,0 +1,2681 @@
+/*
+ * PS3 gelic network driver.
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2007 Sony Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/if_arp.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <net/iw_handler.h>
+
+#include <linux/dma-mapping.h>
+#include <net/checksum.h>
+#include <asm/firmware.h>
+#include <asm/ps3.h>
+#include <asm/lv1call.h>
+
+#include "ps3_gelic_net.h"
+#include "ps3_gelic_wireless.h"
+
+
+static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan,
+ u8 *essid, size_t essid_len);
+static int gelic_wl_try_associate(struct net_device *netdev);
+
+/*
+ * tables
+ */
+
+/* 802.11b/g channel to freq in MHz */
+static const int channel_freq[] = {
+ 2412, 2417, 2422, 2427, 2432,
+ 2437, 2442, 2447, 2452, 2457,
+ 2462, 2467, 2472, 2484
+};
+#define NUM_CHANNELS ARRAY_SIZE(channel_freq)
+
+/* in bps */
+static const int bitrate_list[] = {
+ 1000000,
+ 2000000,
+ 5500000,
+ 11000000,
+ 6000000,
+ 9000000,
+ 12000000,
+ 18000000,
+ 24000000,
+ 36000000,
+ 48000000,
+ 54000000
+};
+#define NUM_BITRATES ARRAY_SIZE(bitrate_list)
+
+/*
+ * wpa2 support requires the hypervisor version 2.0 or later
+ */
+static inline int wpa2_capable(void)
+{
+ return 0 <= ps3_compare_firmware_version(2, 0, 0);
+}
+
+static inline int precise_ie(void)
+{
+ return 0 <= ps3_compare_firmware_version(2, 2, 0);
+}
+/*
+ * post_eurus_cmd helpers
+ */
+struct eurus_cmd_arg_info {
+ int pre_arg; /* command requires arg1, arg2 at POST COMMAND */
+ int post_arg; /* command requires arg1, arg2 at GET_RESULT */
+};
+
+static const struct eurus_cmd_arg_info cmd_info[GELIC_EURUS_CMD_MAX_INDEX] = {
+ [GELIC_EURUS_CMD_SET_COMMON_CFG] = { .pre_arg = 1},
+ [GELIC_EURUS_CMD_SET_WEP_CFG] = { .pre_arg = 1},
+ [GELIC_EURUS_CMD_SET_WPA_CFG] = { .pre_arg = 1},
+ [GELIC_EURUS_CMD_GET_COMMON_CFG] = { .post_arg = 1},
+ [GELIC_EURUS_CMD_GET_WEP_CFG] = { .post_arg = 1},
+ [GELIC_EURUS_CMD_GET_WPA_CFG] = { .post_arg = 1},
+ [GELIC_EURUS_CMD_GET_RSSI_CFG] = { .post_arg = 1},
+ [GELIC_EURUS_CMD_START_SCAN] = { .pre_arg = 1},
+ [GELIC_EURUS_CMD_GET_SCAN] = { .post_arg = 1},
+};
+
+#ifdef DEBUG
+static const char *cmdstr(enum gelic_eurus_command ix)
+{
+ switch (ix) {
+ case GELIC_EURUS_CMD_ASSOC:
+ return "ASSOC";
+ case GELIC_EURUS_CMD_DISASSOC:
+ return "DISASSOC";
+ case GELIC_EURUS_CMD_START_SCAN:
+ return "SCAN";
+ case GELIC_EURUS_CMD_GET_SCAN:
+ return "GET SCAN";
+ case GELIC_EURUS_CMD_SET_COMMON_CFG:
+ return "SET_COMMON_CFG";
+ case GELIC_EURUS_CMD_GET_COMMON_CFG:
+ return "GET_COMMON_CFG";
+ case GELIC_EURUS_CMD_SET_WEP_CFG:
+ return "SET_WEP_CFG";
+ case GELIC_EURUS_CMD_GET_WEP_CFG:
+ return "GET_WEP_CFG";
+ case GELIC_EURUS_CMD_SET_WPA_CFG:
+ return "SET_WPA_CFG";
+ case GELIC_EURUS_CMD_GET_WPA_CFG:
+ return "GET_WPA_CFG";
+ case GELIC_EURUS_CMD_GET_RSSI_CFG:
+ return "GET_RSSI";
+ default:
+ break;
+ }
+ return "";
+};
+#else
+static inline const char *cmdstr(enum gelic_eurus_command ix)
+{
+ return "";
+}
+#endif
+
+/* synchronously do eurus commands */
+static void gelic_eurus_sync_cmd_worker(struct work_struct *work)
+{
+ struct gelic_eurus_cmd *cmd;
+ struct gelic_card *card;
+ struct gelic_wl_info *wl;
+
+ u64 arg1, arg2;
+
+ pr_debug("%s: <-\n", __func__);
+ cmd = container_of(work, struct gelic_eurus_cmd, work);
+ BUG_ON(cmd_info[cmd->cmd].pre_arg &&
+ cmd_info[cmd->cmd].post_arg);
+ wl = cmd->wl;
+ card = port_to_card(wl_port(wl));
+
+ if (cmd_info[cmd->cmd].pre_arg) {
+ arg1 = (cmd->buffer) ?
+ ps3_mm_phys_to_lpar(__pa(cmd->buffer)) :
+ 0;
+ arg2 = cmd->buf_size;
+ } else {
+ arg1 = 0;
+ arg2 = 0;
+ }
+ init_completion(&wl->cmd_done_intr);
+ pr_debug("%s: cmd='%s' start\n", __func__, cmdstr(cmd->cmd));
+ cmd->status = lv1_net_control(bus_id(card), dev_id(card),
+ GELIC_LV1_POST_WLAN_CMD,
+ cmd->cmd, arg1, arg2,
+ &cmd->tag, &cmd->size);
+ if (cmd->status) {
+ complete(&cmd->done);
+ pr_info("%s: cmd issue failed\n", __func__);
+ return;
+ }
+
+ wait_for_completion(&wl->cmd_done_intr);
+
+ if (cmd_info[cmd->cmd].post_arg) {
+ arg1 = ps3_mm_phys_to_lpar(__pa(cmd->buffer));
+ arg2 = cmd->buf_size;
+ } else {
+ arg1 = 0;
+ arg2 = 0;
+ }
+
+ cmd->status = lv1_net_control(bus_id(card), dev_id(card),
+ GELIC_LV1_GET_WLAN_CMD_RESULT,
+ cmd->tag, arg1, arg2,
+ &cmd->cmd_status, &cmd->size);
+#ifdef DEBUG
+ if (cmd->status || cmd->cmd_status) {
+ pr_debug("%s: cmd done tag=%#lx arg1=%#lx, arg2=%#lx\n", __func__,
+ cmd->tag, arg1, arg2);
+ pr_debug("%s: cmd done status=%#x cmd_status=%#lx size=%#lx\n",
+ __func__, cmd->status, cmd->cmd_status, cmd->size);
+ }
+#endif
+ complete(&cmd->done);
+ pr_debug("%s: cmd='%s' done\n", __func__, cmdstr(cmd->cmd));
+}
+
+static struct gelic_eurus_cmd *gelic_eurus_sync_cmd(struct gelic_wl_info *wl,
+ unsigned int eurus_cmd,
+ void *buffer,
+ unsigned int buf_size)
+{
+ struct gelic_eurus_cmd *cmd;
+
+ /* allocate cmd */
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ /* initialize members */
+ cmd->cmd = eurus_cmd;
+ cmd->buffer = buffer;
+ cmd->buf_size = buf_size;
+ cmd->wl = wl;
+ INIT_WORK(&cmd->work, gelic_eurus_sync_cmd_worker);
+ init_completion(&cmd->done);
+ queue_work(wl->eurus_cmd_queue, &cmd->work);
+
+ /* wait for command completion */
+ wait_for_completion(&cmd->done);
+
+ return cmd;
+}
+
+static u32 gelic_wl_get_link(struct net_device *netdev)
+{
+ struct gelic_wl_info *wl = port_wl(netdev_port(netdev));
+ u32 ret;
+
+ pr_debug("%s: <-\n", __func__);
+ mutex_lock(&wl->assoc_stat_lock);
+ if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED)
+ ret = 1;
+ else
+ ret = 0;
+ mutex_unlock(&wl->assoc_stat_lock);
+ pr_debug("%s: ->\n", __func__);
+ return ret;
+}
+
+static void gelic_wl_send_iwap_event(struct gelic_wl_info *wl, u8 *bssid)
+{
+ union iwreq_data data;
+
+ memset(&data, 0, sizeof(data));
+ if (bssid)
+ memcpy(data.ap_addr.sa_data, bssid, ETH_ALEN);
+ data.ap_addr.sa_family = ARPHRD_ETHER;
+ wireless_send_event(port_to_netdev(wl_port(wl)), SIOCGIWAP,
+ &data, NULL);
+}
+
+/*
+ * wireless extension handlers and helpers
+ */
+
+/* SIOGIWNAME */
+static int gelic_wl_get_name(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *iwreq, char *extra)
+{
+ strcpy(iwreq->name, "IEEE 802.11bg");
+ return 0;
+}
+
+static void gelic_wl_get_ch_info(struct gelic_wl_info *wl)
+{
+ struct gelic_card *card = port_to_card(wl_port(wl));
+ u64 ch_info_raw, tmp;
+ int status;
+
+ if (!test_and_set_bit(GELIC_WL_STAT_CH_INFO, &wl->stat)) {
+ status = lv1_net_control(bus_id(card), dev_id(card),
+ GELIC_LV1_GET_CHANNEL, 0, 0, 0,
+ &ch_info_raw,
+ &tmp);
+ /* some fw versions may return error */
+ if (status) {
+ if (status != LV1_NO_ENTRY)
+ pr_info("%s: available ch unknown\n", __func__);
+ wl->ch_info = 0x07ff;/* 11 ch */
+ } else
+ /* 16 bits of MSB has available channels */
+ wl->ch_info = ch_info_raw >> 48;
+ }
+}
+
+/* SIOGIWRANGE */
+static int gelic_wl_get_range(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *iwreq, char *extra)
+{
+ struct iw_point *point = &iwreq->data;
+ struct iw_range *range = (struct iw_range *)extra;
+ struct gelic_wl_info *wl = port_wl(netdev_port(netdev));
+ unsigned int i, chs;
+
+ pr_debug("%s: <-\n", __func__);
+ point->length = sizeof(struct iw_range);
+ memset(range, 0, sizeof(struct iw_range));
+
+ range->we_version_compiled = WIRELESS_EXT;
+ range->we_version_source = 22;
+
+ /* available channels and frequencies */
+ gelic_wl_get_ch_info(wl);
+
+ for (i = 0, chs = 0;
+ i < NUM_CHANNELS && chs < IW_MAX_FREQUENCIES; i++)
+ if (wl->ch_info & (1 << i)) {
+ range->freq[chs].i = i + 1;
+ range->freq[chs].m = channel_freq[i];
+ range->freq[chs].e = 6;
+ chs++;
+ }
+ range->num_frequency = chs;
+ range->old_num_frequency = chs;
+ range->num_channels = chs;
+ range->old_num_channels = chs;
+
+ /* bitrates */
+ for (i = 0; i < NUM_BITRATES; i++)
+ range->bitrate[i] = bitrate_list[i];
+ range->num_bitrates = i;
+
+ /* signal levels */
+ range->max_qual.qual = 100; /* relative value */
+ range->max_qual.level = 100;
+ range->avg_qual.qual = 50;
+ range->avg_qual.level = 50;
+ range->sensitivity = 0;
+
+ /* Event capability */
+ IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
+ IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
+ IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
+
+ /* encryption capability */
+ range->enc_capa = IW_ENC_CAPA_WPA |
+ IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP |
+ IW_ENC_CAPA_4WAY_HANDSHAKE;
+ if (wpa2_capable())
+ range->enc_capa |= IW_ENC_CAPA_WPA2;
+ range->encoding_size[0] = 5; /* 40bit WEP */
+ range->encoding_size[1] = 13; /* 104bit WEP */
+ range->encoding_size[2] = 32; /* WPA-PSK */
+ range->num_encoding_sizes = 3;
+ range->max_encoding_tokens = GELIC_WEP_KEYS;
+
+ /* scan capability */
+ range->scan_capa = IW_SCAN_CAPA_ESSID;
+
+ pr_debug("%s: ->\n", __func__);
+ return 0;
+
+}
+
+/* SIOC{G,S}IWSCAN */
+static int gelic_wl_set_scan(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
+ struct iw_scan_req *req;
+ u8 *essid = NULL;
+ size_t essid_len = 0;
+
+ if (wrqu->data.length == sizeof(struct iw_scan_req) &&
+ wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+ req = (struct iw_scan_req*)extra;
+ essid = req->essid;
+ essid_len = req->essid_len;
+ pr_debug("%s: ESSID scan =%s\n", __func__, essid);
+ }
+ return gelic_wl_start_scan(wl, 1, essid, essid_len);
+}
+
+#define OUI_LEN 3
+static const u8 rsn_oui[OUI_LEN] = { 0x00, 0x0f, 0xac };
+static const u8 wpa_oui[OUI_LEN] = { 0x00, 0x50, 0xf2 };
+
+/*
+ * synthesize WPA/RSN IE data
+ * See WiFi WPA specification and IEEE 802.11-2007 7.3.2.25
+ * for the format
+ */
+static size_t gelic_wl_synthesize_ie(u8 *buf,
+ struct gelic_eurus_scan_info *scan)
+{
+
+ const u8 *oui_header;
+ u8 *start = buf;
+ int rsn;
+ int ccmp;
+
+ pr_debug("%s: <- sec=%16x\n", __func__, scan->security);
+ switch (be16_to_cpu(scan->security) & GELIC_EURUS_SCAN_SEC_MASK) {
+ case GELIC_EURUS_SCAN_SEC_WPA:
+ rsn = 0;
+ break;
+ case GELIC_EURUS_SCAN_SEC_WPA2:
+ rsn = 1;
+ break;
+ default:
+ /* WEP or none. No IE returned */
+ return 0;
+ }
+
+ switch (be16_to_cpu(scan->security) & GELIC_EURUS_SCAN_SEC_WPA_MASK) {
+ case GELIC_EURUS_SCAN_SEC_WPA_TKIP:
+ ccmp = 0;
+ break;
+ case GELIC_EURUS_SCAN_SEC_WPA_AES:
+ ccmp = 1;
+ break;
+ default:
+ if (rsn) {
+ ccmp = 1;
+ pr_info("%s: no cipher info. defaulted to CCMP\n",
+ __func__);
+ } else {
+ ccmp = 0;
+ pr_info("%s: no cipher info. defaulted to TKIP\n",
+ __func__);
+ }
+ }
+
+ if (rsn)
+ oui_header = rsn_oui;
+ else
+ oui_header = wpa_oui;
+
+ /* element id */
+ if (rsn)
+ *buf++ = WLAN_EID_RSN;
+ else
+ *buf++ = WLAN_EID_GENERIC;
+
+ /* length filed; set later */
+ buf++;
+
+ /* wpa special header */
+ if (!rsn) {
+ memcpy(buf, wpa_oui, OUI_LEN);
+ buf += OUI_LEN;
+ *buf++ = 0x01;
+ }
+
+ /* version */
+ *buf++ = 0x01; /* version 1.0 */
+ *buf++ = 0x00;
+
+ /* group cipher */
+ memcpy(buf, oui_header, OUI_LEN);
+ buf += OUI_LEN;
+
+ if (ccmp)
+ *buf++ = 0x04; /* CCMP */
+ else
+ *buf++ = 0x02; /* TKIP */
+
+ /* pairwise key count always 1 */
+ *buf++ = 0x01;
+ *buf++ = 0x00;
+
+ /* pairwise key suit */
+ memcpy(buf, oui_header, OUI_LEN);
+ buf += OUI_LEN;
+ if (ccmp)
+ *buf++ = 0x04; /* CCMP */
+ else
+ *buf++ = 0x02; /* TKIP */
+
+ /* AKM count is 1 */
+ *buf++ = 0x01;
+ *buf++ = 0x00;
+
+ /* AKM suite is assumed as PSK*/
+ memcpy(buf, oui_header, OUI_LEN);
+ buf += OUI_LEN;
+ *buf++ = 0x02; /* PSK */
+
+ /* RSN capabilities is 0 */
+ *buf++ = 0x00;
+ *buf++ = 0x00;
+
+ /* set length field */
+ start[1] = (buf - start - 2);
+
+ pr_debug("%s: ->\n", __func__);
+ return buf - start;
+}
+
+struct ie_item {
+ u8 *data;
+ u8 len;
+};
+
+struct ie_info {
+ struct ie_item wpa;
+ struct ie_item rsn;
+};
+
+static void gelic_wl_parse_ie(u8 *data, size_t len,
+ struct ie_info *ie_info)
+{
+ size_t data_left = len;
+ u8 *pos = data;
+ u8 item_len;
+ u8 item_id;
+
+ pr_debug("%s: data=%p len=%ld\n", __func__,
+ data, len);
+ memset(ie_info, 0, sizeof(struct ie_info));
+
+ while (2 <= data_left) {
+ item_id = *pos++;
+ item_len = *pos++;
+ data_left -= 2;
+
+ if (data_left < item_len)
+ break;
+
+ switch (item_id) {
+ case WLAN_EID_GENERIC:
+ if ((OUI_LEN + 1 <= item_len) &&
+ !memcmp(pos, wpa_oui, OUI_LEN) &&
+ pos[OUI_LEN] == 0x01) {
+ ie_info->wpa.data = pos - 2;
+ ie_info->wpa.len = item_len + 2;
+ }
+ break;
+ case WLAN_EID_RSN:
+ ie_info->rsn.data = pos - 2;
+ /* length includes the header */
+ ie_info->rsn.len = item_len + 2;
+ break;
+ default:
+ pr_debug("%s: ignore %#x,%d\n", __func__,
+ item_id, item_len);
+ break;
+ }
+ pos += item_len;
+ data_left -= item_len;
+ }
+ pr_debug("%s: wpa=%p,%d wpa2=%p,%d\n", __func__,
+ ie_info->wpa.data, ie_info->wpa.len,
+ ie_info->rsn.data, ie_info->rsn.len);
+}
+
+
+/*
+ * translate the scan informations from hypervisor to a
+ * independent format
+ */
+static char *gelic_wl_translate_scan(struct net_device *netdev,
+ struct iw_request_info *info,
+ char *ev,
+ char *stop,
+ struct gelic_wl_scan_info *network)
+{
+ struct iw_event iwe;
+ struct gelic_eurus_scan_info *scan = network->hwinfo;
+ char *tmp;
+ u8 rate;
+ unsigned int i, j, len;
+ u8 buf[64]; /* arbitrary size large enough */
+
+ pr_debug("%s: <-\n", __func__);
+
+ /* first entry should be AP's mac address */
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(iwe.u.ap_addr.sa_data, &scan->bssid[2], ETH_ALEN);
+ ev = iwe_stream_add_event(info, ev, stop, &iwe, IW_EV_ADDR_LEN);
+
+ /* ESSID */
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ iwe.u.data.length = strnlen(scan->essid, 32);
+ ev = iwe_stream_add_point(info, ev, stop, &iwe, scan->essid);
+
+ /* FREQUENCY */
+ iwe.cmd = SIOCGIWFREQ;
+ iwe.u.freq.m = be16_to_cpu(scan->channel);
+ iwe.u.freq.e = 0; /* table value in MHz */
+ iwe.u.freq.i = 0;
+ ev = iwe_stream_add_event(info, ev, stop, &iwe, IW_EV_FREQ_LEN);
+
+ /* RATES */
+ iwe.cmd = SIOCGIWRATE;
+ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+ /* to stuff multiple values in one event */
+ tmp = ev + iwe_stream_lcp_len(info);
+ /* put them in ascendant order (older is first) */
+ i = 0;
+ j = 0;
+ pr_debug("%s: rates=%d rate=%d\n", __func__,
+ network->rate_len, network->rate_ext_len);
+ while (i < network->rate_len) {
+ if (j < network->rate_ext_len &&
+ ((scan->ext_rate[j] & 0x7f) < (scan->rate[i] & 0x7f)))
+ rate = scan->ext_rate[j++] & 0x7f;
+ else
+ rate = scan->rate[i++] & 0x7f;
+ iwe.u.bitrate.value = rate * 500000; /* 500kbps unit */
+ tmp = iwe_stream_add_value(info, ev, tmp, stop, &iwe,
+ IW_EV_PARAM_LEN);
+ }
+ while (j < network->rate_ext_len) {
+ iwe.u.bitrate.value = (scan->ext_rate[j++] & 0x7f) * 500000;
+ tmp = iwe_stream_add_value(info, ev, tmp, stop, &iwe,
+ IW_EV_PARAM_LEN);
+ }
+ /* Check if we added any rate */
+ if (iwe_stream_lcp_len(info) < (tmp - ev))
+ ev = tmp;
+
+ /* ENCODE */
+ iwe.cmd = SIOCGIWENCODE;
+ if (be16_to_cpu(scan->capability) & WLAN_CAPABILITY_PRIVACY)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ ev = iwe_stream_add_point(info, ev, stop, &iwe, scan->essid);
+
+ /* MODE */
+ iwe.cmd = SIOCGIWMODE;
+ if (be16_to_cpu(scan->capability) &
+ (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
+ if (be16_to_cpu(scan->capability) & WLAN_CAPABILITY_ESS)
+ iwe.u.mode = IW_MODE_MASTER;
+ else
+ iwe.u.mode = IW_MODE_ADHOC;
+ ev = iwe_stream_add_event(info, ev, stop, &iwe, IW_EV_UINT_LEN);
+ }
+
+ /* QUAL */
+ iwe.cmd = IWEVQUAL;
+ iwe.u.qual.updated = IW_QUAL_ALL_UPDATED |
+ IW_QUAL_QUAL_INVALID | IW_QUAL_NOISE_INVALID;
+ iwe.u.qual.level = be16_to_cpu(scan->rssi);
+ iwe.u.qual.qual = be16_to_cpu(scan->rssi);
+ iwe.u.qual.noise = 0;
+ ev = iwe_stream_add_event(info, ev, stop, &iwe, IW_EV_QUAL_LEN);
+
+ /* RSN */
+ memset(&iwe, 0, sizeof(iwe));
+ if (be16_to_cpu(scan->size) <= sizeof(*scan)) {
+ /* If wpa[2] capable station, synthesize IE and put it */
+ len = gelic_wl_synthesize_ie(buf, scan);
+ if (len) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = len;
+ ev = iwe_stream_add_point(info, ev, stop, &iwe, buf);
+ }
+ } else {
+ /* this scan info has IE data */
+ struct ie_info ie_info;
+ size_t data_len;
+
+ data_len = be16_to_cpu(scan->size) - sizeof(*scan);
+
+ gelic_wl_parse_ie(scan->elements, data_len, &ie_info);
+
+ if (ie_info.wpa.len && (ie_info.wpa.len <= sizeof(buf))) {
+ memcpy(buf, ie_info.wpa.data, ie_info.wpa.len);
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie_info.wpa.len;
+ ev = iwe_stream_add_point(info, ev, stop, &iwe, buf);
+ }
+
+ if (ie_info.rsn.len && (ie_info.rsn.len <= sizeof(buf))) {
+ memset(&iwe, 0, sizeof(iwe));
+ memcpy(buf, ie_info.rsn.data, ie_info.rsn.len);
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie_info.rsn.len;
+ ev = iwe_stream_add_point(info, ev, stop, &iwe, buf);
+ }
+ }
+
+ pr_debug("%s: ->\n", __func__);
+ return ev;
+}
+
+
+static int gelic_wl_get_scan(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
+ struct gelic_wl_scan_info *scan_info;
+ char *ev = extra;
+ char *stop = ev + wrqu->data.length;
+ int ret = 0;
+ unsigned long this_time = jiffies;
+
+ pr_debug("%s: <-\n", __func__);
+ if (mutex_lock_interruptible(&wl->scan_lock))
+ return -EAGAIN;
+
+ switch (wl->scan_stat) {
+ case GELIC_WL_SCAN_STAT_SCANNING:
+ /* If a scan in progress, caller should call me again */
+ ret = -EAGAIN;
+ goto out;
+ break;
+
+ case GELIC_WL_SCAN_STAT_INIT:
+ /* last scan request failed or never issued */
+ ret = -ENODEV;
+ goto out;
+ break;
+ case GELIC_WL_SCAN_STAT_GOT_LIST:
+ /* ok, use current list */
+ break;
+ }
+
+ list_for_each_entry(scan_info, &wl->network_list, list) {
+ if (wl->scan_age == 0 ||
+ time_after(scan_info->last_scanned + wl->scan_age,
+ this_time))
+ ev = gelic_wl_translate_scan(netdev, info,
+ ev, stop,
+ scan_info);
+ else
+ pr_debug("%s:entry too old\n", __func__);
+
+ if (stop - ev <= IW_EV_ADDR_LEN) {
+ ret = -E2BIG;
+ goto out;
+ }
+ }
+
+ wrqu->data.length = ev - extra;
+ wrqu->data.flags = 0;
+out:
+ mutex_unlock(&wl->scan_lock);
+ pr_debug("%s: -> %d %d\n", __func__, ret, wrqu->data.length);
+ return ret;
+}
+
+#ifdef DEBUG
+static void scan_list_dump(struct gelic_wl_info *wl)
+{
+ struct gelic_wl_scan_info *scan_info;
+ int i;
+
+ i = 0;
+ list_for_each_entry(scan_info, &wl->network_list, list) {
+ pr_debug("%s: item %d\n", __func__, i++);
+ pr_debug("valid=%d eurusindex=%d last=%lx\n",
+ scan_info->valid, scan_info->eurus_index,
+ scan_info->last_scanned);
+ pr_debug("r_len=%d r_ext_len=%d essid_len=%d\n",
+ scan_info->rate_len, scan_info->rate_ext_len,
+ scan_info->essid_len);
+ /* -- */
+ pr_debug("bssid=%pM\n", &scan_info->hwinfo->bssid[2]);
+ pr_debug("essid=%s\n", scan_info->hwinfo->essid);
+ }
+}
+#endif
+
+static int gelic_wl_set_auth(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *data, char *extra)
+{
+ struct iw_param *param = &data->param;
+ struct gelic_wl_info *wl = port_wl(netdev_port(netdev));
+ unsigned long irqflag;
+ int ret = 0;
+
+ pr_debug("%s: <- %d\n", __func__, param->flags & IW_AUTH_INDEX);
+ spin_lock_irqsave(&wl->lock, irqflag);
+ switch (param->flags & IW_AUTH_INDEX) {
+ case IW_AUTH_WPA_VERSION:
+ if (param->value & IW_AUTH_WPA_VERSION_DISABLED) {
+ pr_debug("%s: NO WPA selected\n", __func__);
+ wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE;
+ wl->group_cipher_method = GELIC_WL_CIPHER_WEP;
+ wl->pairwise_cipher_method = GELIC_WL_CIPHER_WEP;
+ }
+ if (param->value & IW_AUTH_WPA_VERSION_WPA) {
+ pr_debug("%s: WPA version 1 selected\n", __func__);
+ wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA;
+ wl->group_cipher_method = GELIC_WL_CIPHER_TKIP;
+ wl->pairwise_cipher_method = GELIC_WL_CIPHER_TKIP;
+ wl->auth_method = GELIC_EURUS_AUTH_OPEN;
+ }
+ if (param->value & IW_AUTH_WPA_VERSION_WPA2) {
+ /*
+ * As the hypervisor may not tell the cipher
+ * information of the AP if it is WPA2,
+ * you will not decide suitable cipher from
+ * its beacon.
+ * You should have knowledge about the AP's
+ * cipher information in other method prior to
+ * the association.
+ */
+ if (!precise_ie())
+ pr_info("%s: WPA2 may not work\n", __func__);
+ if (wpa2_capable()) {
+ wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA2;
+ wl->group_cipher_method = GELIC_WL_CIPHER_AES;
+ wl->pairwise_cipher_method =
+ GELIC_WL_CIPHER_AES;
+ wl->auth_method = GELIC_EURUS_AUTH_OPEN;
+ } else
+ ret = -EINVAL;
+ }
+ break;
+
+ case IW_AUTH_CIPHER_PAIRWISE:
+ if (param->value &
+ (IW_AUTH_CIPHER_WEP104 | IW_AUTH_CIPHER_WEP40)) {
+ pr_debug("%s: WEP selected\n", __func__);
+ wl->pairwise_cipher_method = GELIC_WL_CIPHER_WEP;
+ }
+ if (param->value & IW_AUTH_CIPHER_TKIP) {
+ pr_debug("%s: TKIP selected\n", __func__);
+ wl->pairwise_cipher_method = GELIC_WL_CIPHER_TKIP;
+ }
+ if (param->value & IW_AUTH_CIPHER_CCMP) {
+ pr_debug("%s: CCMP selected\n", __func__);
+ wl->pairwise_cipher_method = GELIC_WL_CIPHER_AES;
+ }
+ if (param->value & IW_AUTH_CIPHER_NONE) {
+ pr_debug("%s: no auth selected\n", __func__);
+ wl->pairwise_cipher_method = GELIC_WL_CIPHER_NONE;
+ }
+ break;
+ case IW_AUTH_CIPHER_GROUP:
+ if (param->value &
+ (IW_AUTH_CIPHER_WEP104 | IW_AUTH_CIPHER_WEP40)) {
+ pr_debug("%s: WEP selected\n", __func__);
+ wl->group_cipher_method = GELIC_WL_CIPHER_WEP;
+ }
+ if (param->value & IW_AUTH_CIPHER_TKIP) {
+ pr_debug("%s: TKIP selected\n", __func__);
+ wl->group_cipher_method = GELIC_WL_CIPHER_TKIP;
+ }
+ if (param->value & IW_AUTH_CIPHER_CCMP) {
+ pr_debug("%s: CCMP selected\n", __func__);
+ wl->group_cipher_method = GELIC_WL_CIPHER_AES;
+ }
+ if (param->value & IW_AUTH_CIPHER_NONE) {
+ pr_debug("%s: no auth selected\n", __func__);
+ wl->group_cipher_method = GELIC_WL_CIPHER_NONE;
+ }
+ break;
+ case IW_AUTH_80211_AUTH_ALG:
+ if (param->value & IW_AUTH_ALG_SHARED_KEY) {
+ pr_debug("%s: shared key specified\n", __func__);
+ wl->auth_method = GELIC_EURUS_AUTH_SHARED;
+ } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) {
+ pr_debug("%s: open system specified\n", __func__);
+ wl->auth_method = GELIC_EURUS_AUTH_OPEN;
+ } else
+ ret = -EINVAL;
+ break;
+
+ case IW_AUTH_WPA_ENABLED:
+ if (param->value) {
+ pr_debug("%s: WPA enabled\n", __func__);
+ wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA;
+ } else {
+ pr_debug("%s: WPA disabled\n", __func__);
+ wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE;
+ }
+ break;
+
+ case IW_AUTH_KEY_MGMT:
+ if (param->value & IW_AUTH_KEY_MGMT_PSK)
+ break;
+ /* intentionally fall through */
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (!ret)
+ set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
+
+ spin_unlock_irqrestore(&wl->lock, irqflag);
+ pr_debug("%s: -> %d\n", __func__, ret);
+ return ret;
+}
+
+static int gelic_wl_get_auth(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *iwreq, char *extra)
+{
+ struct iw_param *param = &iwreq->param;
+ struct gelic_wl_info *wl = port_wl(netdev_port(netdev));
+ unsigned long irqflag;
+ int ret = 0;
+
+ pr_debug("%s: <- %d\n", __func__, param->flags & IW_AUTH_INDEX);
+ spin_lock_irqsave(&wl->lock, irqflag);
+ switch (param->flags & IW_AUTH_INDEX) {
+ case IW_AUTH_WPA_VERSION:
+ switch (wl->wpa_level) {
+ case GELIC_WL_WPA_LEVEL_WPA:
+ param->value |= IW_AUTH_WPA_VERSION_WPA;
+ break;
+ case GELIC_WL_WPA_LEVEL_WPA2:
+ param->value |= IW_AUTH_WPA_VERSION_WPA2;
+ break;
+ default:
+ param->value |= IW_AUTH_WPA_VERSION_DISABLED;
+ }
+ break;
+
+ case IW_AUTH_80211_AUTH_ALG:
+ if (wl->auth_method == GELIC_EURUS_AUTH_SHARED)
+ param->value = IW_AUTH_ALG_SHARED_KEY;
+ else if (wl->auth_method == GELIC_EURUS_AUTH_OPEN)
+ param->value = IW_AUTH_ALG_OPEN_SYSTEM;
+ break;
+
+ case IW_AUTH_WPA_ENABLED:
+ switch (wl->wpa_level) {
+ case GELIC_WL_WPA_LEVEL_WPA:
+ case GELIC_WL_WPA_LEVEL_WPA2:
+ param->value = 1;
+ break;
+ default:
+ param->value = 0;
+ break;
+ }
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ spin_unlock_irqrestore(&wl->lock, irqflag);
+ pr_debug("%s: -> %d\n", __func__, ret);
+ return ret;
+}
+
+/* SIOC{S,G}IWESSID */
+static int gelic_wl_set_essid(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *data, char *extra)
+{
+ struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
+ unsigned long irqflag;
+
+ pr_debug("%s: <- l=%d f=%d\n", __func__,
+ data->essid.length, data->essid.flags);
+ if (IW_ESSID_MAX_SIZE < data->essid.length)
+ return -EINVAL;
+
+ spin_lock_irqsave(&wl->lock, irqflag);
+ if (data->essid.flags) {
+ wl->essid_len = data->essid.length;
+ memcpy(wl->essid, extra, wl->essid_len);
+ pr_debug("%s: essid = '%s'\n", __func__, extra);
+ set_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat);
+ } else {
+ pr_debug("%s: ESSID any\n", __func__);
+ clear_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat);
+ }
+ set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
+ spin_unlock_irqrestore(&wl->lock, irqflag);
+
+
+ gelic_wl_try_associate(netdev); /* FIXME */
+ pr_debug("%s: ->\n", __func__);
+ return 0;
+}
+
+static int gelic_wl_get_essid(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *data, char *extra)
+{
+ struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
+ unsigned long irqflag;
+
+ pr_debug("%s: <-\n", __func__);
+ mutex_lock(&wl->assoc_stat_lock);
+ spin_lock_irqsave(&wl->lock, irqflag);
+ if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat) ||
+ wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) {
+ memcpy(extra, wl->essid, wl->essid_len);
+ data->essid.length = wl->essid_len;
+ data->essid.flags = 1;
+ } else
+ data->essid.flags = 0;
+
+ mutex_unlock(&wl->assoc_stat_lock);
+ spin_unlock_irqrestore(&wl->lock, irqflag);
+ pr_debug("%s: -> len=%d\n", __func__, data->essid.length);
+
+ return 0;
+}
+
+/* SIO{S,G}IWENCODE */
+static int gelic_wl_set_encode(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *data, char *extra)
+{
+ struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
+ struct iw_point *enc = &data->encoding;
+ __u16 flags;
+ unsigned long irqflag;
+ int key_index, index_specified;
+ int ret = 0;
+
+ pr_debug("%s: <-\n", __func__);
+ flags = enc->flags & IW_ENCODE_FLAGS;
+ key_index = enc->flags & IW_ENCODE_INDEX;
+
+ pr_debug("%s: key_index = %d\n", __func__, key_index);
+ pr_debug("%s: key_len = %d\n", __func__, enc->length);
+ pr_debug("%s: flag=%x\n", __func__, enc->flags & IW_ENCODE_FLAGS);
+
+ if (GELIC_WEP_KEYS < key_index)
+ return -EINVAL;
+
+ spin_lock_irqsave(&wl->lock, irqflag);
+ if (key_index) {
+ index_specified = 1;
+ key_index--;
+ } else {
+ index_specified = 0;
+ key_index = wl->current_key;
+ }
+
+ if (flags & IW_ENCODE_NOKEY) {
+ /* if just IW_ENCODE_NOKEY, change current key index */
+ if (!flags && index_specified) {
+ wl->current_key = key_index;
+ goto done;
+ }
+
+ if (flags & IW_ENCODE_DISABLED) {
+ if (!index_specified) {
+ /* disable encryption */
+ wl->group_cipher_method = GELIC_WL_CIPHER_NONE;
+ wl->pairwise_cipher_method =
+ GELIC_WL_CIPHER_NONE;
+ /* invalidate all key */
+ wl->key_enabled = 0;
+ } else
+ clear_bit(key_index, &wl->key_enabled);
+ }
+
+ if (flags & IW_ENCODE_OPEN)
+ wl->auth_method = GELIC_EURUS_AUTH_OPEN;
+ if (flags & IW_ENCODE_RESTRICTED) {
+ pr_info("%s: shared key mode enabled\n", __func__);
+ wl->auth_method = GELIC_EURUS_AUTH_SHARED;
+ }
+ } else {
+ if (IW_ENCODING_TOKEN_MAX < enc->length) {
+ ret = -EINVAL;
+ goto done;
+ }
+ wl->key_len[key_index] = enc->length;
+ memcpy(wl->key[key_index], extra, enc->length);
+ set_bit(key_index, &wl->key_enabled);
+ wl->pairwise_cipher_method = GELIC_WL_CIPHER_WEP;
+ wl->group_cipher_method = GELIC_WL_CIPHER_WEP;
+ }
+ set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
+done:
+ spin_unlock_irqrestore(&wl->lock, irqflag);
+ pr_debug("%s: ->\n", __func__);
+ return ret;
+}
+
+static int gelic_wl_get_encode(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *data, char *extra)
+{
+ struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
+ struct iw_point *enc = &data->encoding;
+ unsigned long irqflag;
+ unsigned int key_index, index_specified;
+ int ret = 0;
+
+ pr_debug("%s: <-\n", __func__);
+ key_index = enc->flags & IW_ENCODE_INDEX;
+ pr_debug("%s: flag=%#x point=%p len=%d extra=%p\n", __func__,
+ enc->flags, enc->pointer, enc->length, extra);
+ if (GELIC_WEP_KEYS < key_index)
+ return -EINVAL;
+
+ spin_lock_irqsave(&wl->lock, irqflag);
+ if (key_index) {
+ index_specified = 1;
+ key_index--;
+ } else {
+ index_specified = 0;
+ key_index = wl->current_key;
+ }
+
+ if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) {
+ switch (wl->auth_method) {
+ case GELIC_EURUS_AUTH_OPEN:
+ enc->flags = IW_ENCODE_OPEN;
+ break;
+ case GELIC_EURUS_AUTH_SHARED:
+ enc->flags = IW_ENCODE_RESTRICTED;
+ break;
+ }
+ } else
+ enc->flags = IW_ENCODE_DISABLED;
+
+ if (test_bit(key_index, &wl->key_enabled)) {
+ if (enc->length < wl->key_len[key_index]) {
+ ret = -EINVAL;
+ goto done;
+ }
+ enc->length = wl->key_len[key_index];
+ memcpy(extra, wl->key[key_index], wl->key_len[key_index]);
+ } else {
+ enc->length = 0;
+ enc->flags |= IW_ENCODE_NOKEY;
+ }
+ enc->flags |= key_index + 1;
+ pr_debug("%s: -> flag=%x len=%d\n", __func__,
+ enc->flags, enc->length);
+
+done:
+ spin_unlock_irqrestore(&wl->lock, irqflag);
+ return ret;
+}
+
+/* SIOC{S,G}IWAP */
+static int gelic_wl_set_ap(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *data, char *extra)
+{
+ struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
+ unsigned long irqflag;
+
+ pr_debug("%s: <-\n", __func__);
+ if (data->ap_addr.sa_family != ARPHRD_ETHER)
+ return -EINVAL;
+
+ spin_lock_irqsave(&wl->lock, irqflag);
+ if (is_valid_ether_addr(data->ap_addr.sa_data)) {
+ memcpy(wl->bssid, data->ap_addr.sa_data,
+ ETH_ALEN);
+ set_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat);
+ set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
+ pr_debug("%s: bss=%pM\n", __func__, wl->bssid);
+ } else {
+ pr_debug("%s: clear bssid\n", __func__);
+ clear_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat);
+ memset(wl->bssid, 0, ETH_ALEN);
+ }
+ spin_unlock_irqrestore(&wl->lock, irqflag);
+ pr_debug("%s: ->\n", __func__);
+ return 0;
+}
+
+static int gelic_wl_get_ap(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *data, char *extra)
+{
+ struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
+ unsigned long irqflag;
+
+ pr_debug("%s: <-\n", __func__);
+ mutex_lock(&wl->assoc_stat_lock);
+ spin_lock_irqsave(&wl->lock, irqflag);
+ if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) {
+ data->ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(data->ap_addr.sa_data, wl->active_bssid,
+ ETH_ALEN);
+ } else
+ memset(data->ap_addr.sa_data, 0, ETH_ALEN);
+
+ spin_unlock_irqrestore(&wl->lock, irqflag);
+ mutex_unlock(&wl->assoc_stat_lock);
+ pr_debug("%s: ->\n", __func__);
+ return 0;
+}
+
+/* SIOC{S,G}IWENCODEEXT */
+static int gelic_wl_set_encodeext(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *data, char *extra)
+{
+ struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
+ struct iw_point *enc = &data->encoding;
+ struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+ __u16 alg;
+ __u16 flags;
+ unsigned long irqflag;
+ int key_index;
+ int ret = 0;
+
+ pr_debug("%s: <-\n", __func__);
+ flags = enc->flags & IW_ENCODE_FLAGS;
+ alg = ext->alg;
+ key_index = enc->flags & IW_ENCODE_INDEX;
+
+ pr_debug("%s: key_index = %d\n", __func__, key_index);
+ pr_debug("%s: key_len = %d\n", __func__, enc->length);
+ pr_debug("%s: flag=%x\n", __func__, enc->flags & IW_ENCODE_FLAGS);
+ pr_debug("%s: ext_flag=%x\n", __func__, ext->ext_flags);
+ pr_debug("%s: ext_key_len=%x\n", __func__, ext->key_len);
+
+ if (GELIC_WEP_KEYS < key_index)
+ return -EINVAL;
+
+ spin_lock_irqsave(&wl->lock, irqflag);
+ if (key_index)
+ key_index--;
+ else
+ key_index = wl->current_key;
+
+ if (!enc->length && (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) {
+ /* reques to change default key index */
+ pr_debug("%s: request to change default key to %d\n",
+ __func__, key_index);
+ wl->current_key = key_index;
+ goto done;
+ }
+
+ if (alg == IW_ENCODE_ALG_NONE || (flags & IW_ENCODE_DISABLED)) {
+ pr_debug("%s: alg disabled\n", __func__);
+ wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE;
+ wl->group_cipher_method = GELIC_WL_CIPHER_NONE;
+ wl->pairwise_cipher_method = GELIC_WL_CIPHER_NONE;
+ wl->auth_method = GELIC_EURUS_AUTH_OPEN; /* should be open */
+ } else if (alg == IW_ENCODE_ALG_WEP) {
+ pr_debug("%s: WEP requested\n", __func__);
+ if (flags & IW_ENCODE_OPEN) {
+ pr_debug("%s: open key mode\n", __func__);
+ wl->auth_method = GELIC_EURUS_AUTH_OPEN;
+ }
+ if (flags & IW_ENCODE_RESTRICTED) {
+ pr_debug("%s: shared key mode\n", __func__);
+ wl->auth_method = GELIC_EURUS_AUTH_SHARED;
+ }
+ if (IW_ENCODING_TOKEN_MAX < ext->key_len) {
+ pr_info("%s: key is too long %d\n", __func__,
+ ext->key_len);
+ ret = -EINVAL;
+ goto done;
+ }
+ /* OK, update the key */
+ wl->key_len[key_index] = ext->key_len;
+ memset(wl->key[key_index], 0, IW_ENCODING_TOKEN_MAX);
+ memcpy(wl->key[key_index], ext->key, ext->key_len);
+ set_bit(key_index, &wl->key_enabled);
+ /* remember wep info changed */
+ set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
+ } else if (alg == IW_ENCODE_ALG_PMK) {
+ if (ext->key_len != WPA_PSK_LEN) {
+ pr_err("%s: PSK length wrong %d\n", __func__,
+ ext->key_len);
+ ret = -EINVAL;
+ goto done;
+ }
+ memset(wl->psk, 0, sizeof(wl->psk));
+ memcpy(wl->psk, ext->key, ext->key_len);
+ wl->psk_len = ext->key_len;
+ wl->psk_type = GELIC_EURUS_WPA_PSK_BIN;
+ /* remember PSK configured */
+ set_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat);
+ }
+done:
+ spin_unlock_irqrestore(&wl->lock, irqflag);
+ pr_debug("%s: ->\n", __func__);
+ return ret;
+}
+
+static int gelic_wl_get_encodeext(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *data, char *extra)
+{
+ struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
+ struct iw_point *enc = &data->encoding;
+ struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+ unsigned long irqflag;
+ int key_index;
+ int ret = 0;
+ int max_key_len;
+
+ pr_debug("%s: <-\n", __func__);
+
+ max_key_len = enc->length - sizeof(struct iw_encode_ext);
+ if (max_key_len < 0)
+ return -EINVAL;
+ key_index = enc->flags & IW_ENCODE_INDEX;
+
+ pr_debug("%s: key_index = %d\n", __func__, key_index);
+ pr_debug("%s: key_len = %d\n", __func__, enc->length);
+ pr_debug("%s: flag=%x\n", __func__, enc->flags & IW_ENCODE_FLAGS);
+
+ if (GELIC_WEP_KEYS < key_index)
+ return -EINVAL;
+
+ spin_lock_irqsave(&wl->lock, irqflag);
+ if (key_index)
+ key_index--;
+ else
+ key_index = wl->current_key;
+
+ memset(ext, 0, sizeof(struct iw_encode_ext));
+ switch (wl->group_cipher_method) {
+ case GELIC_WL_CIPHER_WEP:
+ ext->alg = IW_ENCODE_ALG_WEP;
+ enc->flags |= IW_ENCODE_ENABLED;
+ break;
+ case GELIC_WL_CIPHER_TKIP:
+ ext->alg = IW_ENCODE_ALG_TKIP;
+ enc->flags |= IW_ENCODE_ENABLED;
+ break;
+ case GELIC_WL_CIPHER_AES:
+ ext->alg = IW_ENCODE_ALG_CCMP;
+ enc->flags |= IW_ENCODE_ENABLED;
+ break;
+ case GELIC_WL_CIPHER_NONE:
+ default:
+ ext->alg = IW_ENCODE_ALG_NONE;
+ enc->flags |= IW_ENCODE_NOKEY;
+ break;
+ }
+
+ if (!(enc->flags & IW_ENCODE_NOKEY)) {
+ if (max_key_len < wl->key_len[key_index]) {
+ ret = -E2BIG;
+ goto out;
+ }
+ if (test_bit(key_index, &wl->key_enabled))
+ memcpy(ext->key, wl->key[key_index],
+ wl->key_len[key_index]);
+ else
+ pr_debug("%s: disabled key requested ix=%d\n",
+ __func__, key_index);
+ }
+out:
+ spin_unlock_irqrestore(&wl->lock, irqflag);
+ pr_debug("%s: ->\n", __func__);
+ return ret;
+}
+/* SIOC{S,G}IWMODE */
+static int gelic_wl_set_mode(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *data, char *extra)
+{
+ __u32 mode = data->mode;
+ int ret;
+
+ pr_debug("%s: <-\n", __func__);
+ if (mode == IW_MODE_INFRA)
+ ret = 0;
+ else
+ ret = -EOPNOTSUPP;
+ pr_debug("%s: -> %d\n", __func__, ret);
+ return ret;
+}
+
+static int gelic_wl_get_mode(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *data, char *extra)
+{
+ __u32 *mode = &data->mode;
+ pr_debug("%s: <-\n", __func__);
+ *mode = IW_MODE_INFRA;
+ pr_debug("%s: ->\n", __func__);
+ return 0;
+}
+
+/* SIOCGIWNICKN */
+static int gelic_wl_get_nick(struct net_device *net_dev,
+ struct iw_request_info *info,
+ union iwreq_data *data, char *extra)
+{
+ strcpy(extra, "gelic_wl");
+ data->data.length = strlen(extra);
+ data->data.flags = 1;
+ return 0;
+}
+
+
+/* --- */
+
+static struct iw_statistics *gelic_wl_get_wireless_stats(
+ struct net_device *netdev)
+{
+
+ struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
+ struct gelic_eurus_cmd *cmd;
+ struct iw_statistics *is;
+ struct gelic_eurus_rssi_info *rssi;
+ void *buf;
+
+ pr_debug("%s: <-\n", __func__);
+
+ buf = (void *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ is = &wl->iwstat;
+ memset(is, 0, sizeof(*is));
+ cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_GET_RSSI_CFG,
+ buf, sizeof(*rssi));
+ if (cmd && !cmd->status && !cmd->cmd_status) {
+ rssi = buf;
+ is->qual.level = be16_to_cpu(rssi->rssi);
+ is->qual.updated = IW_QUAL_LEVEL_UPDATED |
+ IW_QUAL_QUAL_INVALID | IW_QUAL_NOISE_INVALID;
+ } else
+ /* not associated */
+ is->qual.updated = IW_QUAL_ALL_INVALID;
+
+ kfree(cmd);
+ free_page((unsigned long)buf);
+ pr_debug("%s: ->\n", __func__);
+ return is;
+}
+
+/*
+ * scanning helpers
+ */
+static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan,
+ u8 *essid, size_t essid_len)
+{
+ struct gelic_eurus_cmd *cmd;
+ int ret = 0;
+ void *buf = NULL;
+ size_t len;
+
+ pr_debug("%s: <- always=%d\n", __func__, always_scan);
+ if (mutex_lock_interruptible(&wl->scan_lock))
+ return -ERESTARTSYS;
+
+ /*
+ * If already a scan in progress, do not trigger more
+ */
+ if (wl->scan_stat == GELIC_WL_SCAN_STAT_SCANNING) {
+ pr_debug("%s: scanning now\n", __func__);
+ goto out;
+ }
+
+ init_completion(&wl->scan_done);
+ /*
+ * If we have already a bss list, don't try to get new
+ * unless we are doing an ESSID scan
+ */
+ if ((!essid_len && !always_scan)
+ && wl->scan_stat == GELIC_WL_SCAN_STAT_GOT_LIST) {
+ pr_debug("%s: already has the list\n", __func__);
+ complete(&wl->scan_done);
+ goto out;
+ }
+
+ /* ESSID scan ? */
+ if (essid_len && essid) {
+ buf = (void *)__get_free_page(GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ len = IW_ESSID_MAX_SIZE; /* hypervisor always requires 32 */
+ memset(buf, 0, len);
+ memcpy(buf, essid, essid_len);
+ pr_debug("%s: essid scan='%s'\n", __func__, (char *)buf);
+ } else
+ len = 0;
+
+ /*
+ * issue start scan request
+ */
+ wl->scan_stat = GELIC_WL_SCAN_STAT_SCANNING;
+ cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_START_SCAN,
+ buf, len);
+ if (!cmd || cmd->status || cmd->cmd_status) {
+ wl->scan_stat = GELIC_WL_SCAN_STAT_INIT;
+ complete(&wl->scan_done);
+ ret = -ENOMEM;
+ goto out;
+ }
+ kfree(cmd);
+out:
+ free_page((unsigned long)buf);
+ mutex_unlock(&wl->scan_lock);
+ pr_debug("%s: ->\n", __func__);
+ return ret;
+}
+
+/*
+ * retrieve scan result from the chip (hypervisor)
+ * this function is invoked by schedule work.
+ */
+static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
+{
+ struct gelic_eurus_cmd *cmd = NULL;
+ struct gelic_wl_scan_info *target, *tmp;
+ struct gelic_wl_scan_info *oldest = NULL;
+ struct gelic_eurus_scan_info *scan_info;
+ unsigned int scan_info_size;
+ union iwreq_data data;
+ unsigned long this_time = jiffies;
+ unsigned int data_len, i, found, r;
+ void *buf;
+
+ pr_debug("%s:start\n", __func__);
+ mutex_lock(&wl->scan_lock);
+
+ buf = (void *)__get_free_page(GFP_KERNEL);
+ if (!buf) {
+ pr_info("%s: scan buffer alloc failed\n", __func__);
+ goto out;
+ }
+
+ if (wl->scan_stat != GELIC_WL_SCAN_STAT_SCANNING) {
+ /*
+ * stop() may be called while scanning, ignore result
+ */
+ pr_debug("%s: scan complete when stat != scanning(%d)\n",
+ __func__, wl->scan_stat);
+ goto out;
+ }
+
+ cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_GET_SCAN,
+ buf, PAGE_SIZE);
+ if (!cmd || cmd->status || cmd->cmd_status) {
+ wl->scan_stat = GELIC_WL_SCAN_STAT_INIT;
+ pr_info("%s:cmd failed\n", __func__);
+ kfree(cmd);
+ goto out;
+ }
+ data_len = cmd->size;
+ pr_debug("%s: data_len = %d\n", __func__, data_len);
+ kfree(cmd);
+
+ /* OK, bss list retrieved */
+ wl->scan_stat = GELIC_WL_SCAN_STAT_GOT_LIST;
+
+ /* mark all entries are old */
+ list_for_each_entry_safe(target, tmp, &wl->network_list, list) {
+ target->valid = 0;
+ /* expire too old entries */
+ if (time_before(target->last_scanned + wl->scan_age,
+ this_time)) {
+ kfree(target->hwinfo);
+ target->hwinfo = NULL;
+ list_move_tail(&target->list, &wl->network_free_list);
+ }
+ }
+
+ /* put them in the network_list */
+ for (i = 0, scan_info_size = 0, scan_info = buf;
+ scan_info_size < data_len;
+ i++, scan_info_size += be16_to_cpu(scan_info->size),
+ scan_info = (void *)scan_info + be16_to_cpu(scan_info->size)) {
+ pr_debug("%s:size=%d bssid=%pM scan_info=%p\n", __func__,
+ be16_to_cpu(scan_info->size),
+ &scan_info->bssid[2], scan_info);
+
+ /*
+ * The wireless firmware may return invalid channel 0 and/or
+ * invalid rate if the AP emits zero length SSID ie. As this
+ * scan information is useless, ignore it
+ */
+ if (!be16_to_cpu(scan_info->channel) || !scan_info->rate[0]) {
+ pr_debug("%s: invalid scan info\n", __func__);
+ continue;
+ }
+
+ found = 0;
+ oldest = NULL;
+ list_for_each_entry(target, &wl->network_list, list) {
+ if (!compare_ether_addr(&target->hwinfo->bssid[2],
+ &scan_info->bssid[2])) {
+ found = 1;
+ pr_debug("%s: same BBS found scanned list\n",
+ __func__);
+ break;
+ }
+ if (!oldest ||
+ (target->last_scanned < oldest->last_scanned))
+ oldest = target;
+ }
+
+ if (!found) {
+ /* not found in the list */
+ if (list_empty(&wl->network_free_list)) {
+ /* expire oldest */
+ target = oldest;
+ } else {
+ target = list_entry(wl->network_free_list.next,
+ struct gelic_wl_scan_info,
+ list);
+ }
+ }
+
+ /* update the item */
+ target->last_scanned = this_time;
+ target->valid = 1;
+ target->eurus_index = i;
+ kfree(target->hwinfo);
+ target->hwinfo = kzalloc(be16_to_cpu(scan_info->size),
+ GFP_KERNEL);
+ if (!target->hwinfo) {
+ pr_info("%s: kzalloc failed\n", __func__);
+ continue;
+ }
+ /* copy hw scan info */
+ memcpy(target->hwinfo, scan_info, scan_info->size);
+ target->essid_len = strnlen(scan_info->essid,
+ sizeof(scan_info->essid));
+ target->rate_len = 0;
+ for (r = 0; r < 12; r++)
+ if (scan_info->rate[r])
+ target->rate_len++;
+ if (8 < target->rate_len)
+ pr_info("%s: AP returns %d rates\n", __func__,
+ target->rate_len);
+ target->rate_ext_len = 0;
+ for (r = 0; r < 16; r++)
+ if (scan_info->ext_rate[r])
+ target->rate_ext_len++;
+ list_move_tail(&target->list, &wl->network_list);
+ }
+ memset(&data, 0, sizeof(data));
+ wireless_send_event(port_to_netdev(wl_port(wl)), SIOCGIWSCAN, &data,
+ NULL);
+out:
+ free_page((unsigned long)buf);
+ complete(&wl->scan_done);
+ mutex_unlock(&wl->scan_lock);
+ pr_debug("%s:end\n", __func__);
+}
+
+/*
+ * Select an appropriate bss from current scan list regarding
+ * current settings from userspace.
+ * The caller must hold wl->scan_lock,
+ * and on the state of wl->scan_state == GELIC_WL_SCAN_GOT_LIST
+ */
+static void update_best(struct gelic_wl_scan_info **best,
+ struct gelic_wl_scan_info *candid,
+ int *best_weight,
+ int *weight)
+{
+ if (*best_weight < ++(*weight)) {
+ *best_weight = *weight;
+ *best = candid;
+ }
+}
+
+static
+struct gelic_wl_scan_info *gelic_wl_find_best_bss(struct gelic_wl_info *wl)
+{
+ struct gelic_wl_scan_info *scan_info;
+ struct gelic_wl_scan_info *best_bss;
+ int weight, best_weight;
+ u16 security;
+
+ pr_debug("%s: <-\n", __func__);
+
+ best_bss = NULL;
+ best_weight = 0;
+
+ list_for_each_entry(scan_info, &wl->network_list, list) {
+ pr_debug("%s: station %p\n", __func__, scan_info);
+
+ if (!scan_info->valid) {
+ pr_debug("%s: station invalid\n", __func__);
+ continue;
+ }
+
+ /* If bss specified, check it only */
+ if (test_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat)) {
+ if (!compare_ether_addr(&scan_info->hwinfo->bssid[2],
+ wl->bssid)) {
+ best_bss = scan_info;
+ pr_debug("%s: bssid matched\n", __func__);
+ break;
+ } else {
+ pr_debug("%s: bssid unmached\n", __func__);
+ continue;
+ }
+ }
+
+ weight = 0;
+
+ /* security */
+ security = be16_to_cpu(scan_info->hwinfo->security) &
+ GELIC_EURUS_SCAN_SEC_MASK;
+ if (wl->wpa_level == GELIC_WL_WPA_LEVEL_WPA2) {
+ if (security == GELIC_EURUS_SCAN_SEC_WPA2)
+ update_best(&best_bss, scan_info,
+ &best_weight, &weight);
+ else
+ continue;
+ } else if (wl->wpa_level == GELIC_WL_WPA_LEVEL_WPA) {
+ if (security == GELIC_EURUS_SCAN_SEC_WPA)
+ update_best(&best_bss, scan_info,
+ &best_weight, &weight);
+ else
+ continue;
+ } else if (wl->wpa_level == GELIC_WL_WPA_LEVEL_NONE &&
+ wl->group_cipher_method == GELIC_WL_CIPHER_WEP) {
+ if (security == GELIC_EURUS_SCAN_SEC_WEP)
+ update_best(&best_bss, scan_info,
+ &best_weight, &weight);
+ else
+ continue;
+ }
+
+ /* If ESSID is set, check it */
+ if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat)) {
+ if ((scan_info->essid_len == wl->essid_len) &&
+ !strncmp(wl->essid,
+ scan_info->hwinfo->essid,
+ scan_info->essid_len))
+ update_best(&best_bss, scan_info,
+ &best_weight, &weight);
+ else
+ continue;
+ }
+ }
+
+#ifdef DEBUG
+ pr_debug("%s: -> bss=%p\n", __func__, best_bss);
+ if (best_bss) {
+ pr_debug("%s:addr=%pM\n", __func__,
+ &best_bss->hwinfo->bssid[2]);
+ }
+#endif
+ return best_bss;
+}
+
+/*
+ * Setup WEP configuration to the chip
+ * The caller must hold wl->scan_lock,
+ * and on the state of wl->scan_state == GELIC_WL_SCAN_GOT_LIST
+ */
+static int gelic_wl_do_wep_setup(struct gelic_wl_info *wl)
+{
+ unsigned int i;
+ struct gelic_eurus_wep_cfg *wep;
+ struct gelic_eurus_cmd *cmd;
+ int wep104 = 0;
+ int have_key = 0;
+ int ret = 0;
+
+ pr_debug("%s: <-\n", __func__);
+ /* we can assume no one should uses the buffer */
+ wep = (struct gelic_eurus_wep_cfg *)__get_free_page(GFP_KERNEL);
+ if (!wep)
+ return -ENOMEM;
+
+ memset(wep, 0, sizeof(*wep));
+
+ if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) {
+ pr_debug("%s: WEP mode\n", __func__);
+ for (i = 0; i < GELIC_WEP_KEYS; i++) {
+ if (!test_bit(i, &wl->key_enabled))
+ continue;
+
+ pr_debug("%s: key#%d enabled\n", __func__, i);
+ have_key = 1;
+ if (wl->key_len[i] == 13)
+ wep104 = 1;
+ else if (wl->key_len[i] != 5) {
+ pr_info("%s: wrong wep key[%d]=%d\n",
+ __func__, i, wl->key_len[i]);
+ ret = -EINVAL;
+ goto out;
+ }
+ memcpy(wep->key[i], wl->key[i], wl->key_len[i]);
+ }
+
+ if (!have_key) {
+ pr_info("%s: all wep key disabled\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (wep104) {
+ pr_debug("%s: 104bit key\n", __func__);
+ wep->security = cpu_to_be16(GELIC_EURUS_WEP_SEC_104BIT);
+ } else {
+ pr_debug("%s: 40bit key\n", __func__);
+ wep->security = cpu_to_be16(GELIC_EURUS_WEP_SEC_40BIT);
+ }
+ } else {
+ pr_debug("%s: NO encryption\n", __func__);
+ wep->security = cpu_to_be16(GELIC_EURUS_WEP_SEC_NONE);
+ }
+
+ /* issue wep setup */
+ cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_SET_WEP_CFG,
+ wep, sizeof(*wep));
+ if (!cmd)
+ ret = -ENOMEM;
+ else if (cmd->status || cmd->cmd_status)
+ ret = -ENXIO;
+
+ kfree(cmd);
+out:
+ free_page((unsigned long)wep);
+ pr_debug("%s: ->\n", __func__);
+ return ret;
+}
+
+#ifdef DEBUG
+static const char *wpasecstr(enum gelic_eurus_wpa_security sec)
+{
+ switch (sec) {
+ case GELIC_EURUS_WPA_SEC_NONE:
+ return "NONE";
+ break;
+ case GELIC_EURUS_WPA_SEC_WPA_TKIP_TKIP:
+ return "WPA_TKIP_TKIP";
+ break;
+ case GELIC_EURUS_WPA_SEC_WPA_TKIP_AES:
+ return "WPA_TKIP_AES";
+ break;
+ case GELIC_EURUS_WPA_SEC_WPA_AES_AES:
+ return "WPA_AES_AES";
+ break;
+ case GELIC_EURUS_WPA_SEC_WPA2_TKIP_TKIP:
+ return "WPA2_TKIP_TKIP";
+ break;
+ case GELIC_EURUS_WPA_SEC_WPA2_TKIP_AES:
+ return "WPA2_TKIP_AES";
+ break;
+ case GELIC_EURUS_WPA_SEC_WPA2_AES_AES:
+ return "WPA2_AES_AES";
+ break;
+ }
+ return "";
+};
+#endif
+
+static int gelic_wl_do_wpa_setup(struct gelic_wl_info *wl)
+{
+ struct gelic_eurus_wpa_cfg *wpa;
+ struct gelic_eurus_cmd *cmd;
+ u16 security;
+ int ret = 0;
+
+ pr_debug("%s: <-\n", __func__);
+ /* we can assume no one should uses the buffer */
+ wpa = (struct gelic_eurus_wpa_cfg *)__get_free_page(GFP_KERNEL);
+ if (!wpa)
+ return -ENOMEM;
+
+ memset(wpa, 0, sizeof(*wpa));
+
+ if (!test_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat))
+ pr_info("%s: PSK not configured yet\n", __func__);
+
+ /* copy key */
+ memcpy(wpa->psk, wl->psk, wl->psk_len);
+
+ /* set security level */
+ if (wl->wpa_level == GELIC_WL_WPA_LEVEL_WPA2) {
+ if (wl->group_cipher_method == GELIC_WL_CIPHER_AES) {
+ security = GELIC_EURUS_WPA_SEC_WPA2_AES_AES;
+ } else {
+ if (wl->pairwise_cipher_method == GELIC_WL_CIPHER_AES &&
+ precise_ie())
+ security = GELIC_EURUS_WPA_SEC_WPA2_TKIP_AES;
+ else
+ security = GELIC_EURUS_WPA_SEC_WPA2_TKIP_TKIP;
+ }
+ } else {
+ if (wl->group_cipher_method == GELIC_WL_CIPHER_AES) {
+ security = GELIC_EURUS_WPA_SEC_WPA_AES_AES;
+ } else {
+ if (wl->pairwise_cipher_method == GELIC_WL_CIPHER_AES &&
+ precise_ie())
+ security = GELIC_EURUS_WPA_SEC_WPA_TKIP_AES;
+ else
+ security = GELIC_EURUS_WPA_SEC_WPA_TKIP_TKIP;
+ }
+ }
+ wpa->security = cpu_to_be16(security);
+
+ /* PSK type */
+ wpa->psk_type = cpu_to_be16(wl->psk_type);
+#ifdef DEBUG
+ pr_debug("%s: sec=%s psktype=%s\n", __func__,
+ wpasecstr(wpa->security),
+ (wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ?
+ "BIN" : "passphrase");
+#if 0
+ /*
+ * don't enable here if you plan to submit
+ * the debug log because this dumps your precious
+ * passphrase/key.
+ */
+ pr_debug("%s: psk=%s\n", __func__,
+ (wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ?
+ "N/A" : wpa->psk);
+#endif
+#endif
+ /* issue wpa setup */
+ cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_SET_WPA_CFG,
+ wpa, sizeof(*wpa));
+ if (!cmd)
+ ret = -ENOMEM;
+ else if (cmd->status || cmd->cmd_status)
+ ret = -ENXIO;
+ kfree(cmd);
+ free_page((unsigned long)wpa);
+ pr_debug("%s: --> %d\n", __func__, ret);
+ return ret;
+}
+
+/*
+ * Start association. caller must hold assoc_stat_lock
+ */
+static int gelic_wl_associate_bss(struct gelic_wl_info *wl,
+ struct gelic_wl_scan_info *bss)
+{
+ struct gelic_eurus_cmd *cmd;
+ struct gelic_eurus_common_cfg *common;
+ int ret = 0;
+ unsigned long rc;
+
+ pr_debug("%s: <-\n", __func__);
+
+ /* do common config */
+ common = (struct gelic_eurus_common_cfg *)__get_free_page(GFP_KERNEL);
+ if (!common)
+ return -ENOMEM;
+
+ memset(common, 0, sizeof(*common));
+ common->bss_type = cpu_to_be16(GELIC_EURUS_BSS_INFRA);
+ common->op_mode = cpu_to_be16(GELIC_EURUS_OPMODE_11BG);
+
+ common->scan_index = cpu_to_be16(bss->eurus_index);
+ switch (wl->auth_method) {
+ case GELIC_EURUS_AUTH_OPEN:
+ common->auth_method = cpu_to_be16(GELIC_EURUS_AUTH_OPEN);
+ break;
+ case GELIC_EURUS_AUTH_SHARED:
+ common->auth_method = cpu_to_be16(GELIC_EURUS_AUTH_SHARED);
+ break;
+ }
+
+#ifdef DEBUG
+ scan_list_dump(wl);
+#endif
+ pr_debug("%s: common cfg index=%d bsstype=%d auth=%d\n", __func__,
+ be16_to_cpu(common->scan_index),
+ be16_to_cpu(common->bss_type),
+ be16_to_cpu(common->auth_method));
+
+ cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_SET_COMMON_CFG,
+ common, sizeof(*common));
+ if (!cmd || cmd->status || cmd->cmd_status) {
+ ret = -ENOMEM;
+ kfree(cmd);
+ goto out;
+ }
+ kfree(cmd);
+
+ /* WEP/WPA */
+ switch (wl->wpa_level) {
+ case GELIC_WL_WPA_LEVEL_NONE:
+ /* If WEP or no security, setup WEP config */
+ ret = gelic_wl_do_wep_setup(wl);
+ break;
+ case GELIC_WL_WPA_LEVEL_WPA:
+ case GELIC_WL_WPA_LEVEL_WPA2:
+ ret = gelic_wl_do_wpa_setup(wl);
+ break;
+ }
+
+ if (ret) {
+ pr_debug("%s: WEP/WPA setup failed %d\n", __func__,
+ ret);
+ ret = -EPERM;
+ gelic_wl_send_iwap_event(wl, NULL);
+ goto out;
+ }
+
+ /* start association */
+ init_completion(&wl->assoc_done);
+ wl->assoc_stat = GELIC_WL_ASSOC_STAT_ASSOCIATING;
+ cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_ASSOC,
+ NULL, 0);
+ if (!cmd || cmd->status || cmd->cmd_status) {
+ pr_debug("%s: assoc request failed\n", __func__);
+ wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN;
+ kfree(cmd);
+ ret = -ENOMEM;
+ gelic_wl_send_iwap_event(wl, NULL);
+ goto out;
+ }
+ kfree(cmd);
+
+ /* wait for connected event */
+ rc = wait_for_completion_timeout(&wl->assoc_done, HZ * 4);/*FIXME*/
+
+ if (!rc) {
+ /* timeouted. Maybe key or cyrpt mode is wrong */
+ pr_info("%s: connect timeout\n", __func__);
+ cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC,
+ NULL, 0);
+ kfree(cmd);
+ wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN;
+ gelic_wl_send_iwap_event(wl, NULL);
+ ret = -ENXIO;
+ } else {
+ wl->assoc_stat = GELIC_WL_ASSOC_STAT_ASSOCIATED;
+ /* copy bssid */
+ memcpy(wl->active_bssid, &bss->hwinfo->bssid[2], ETH_ALEN);
+
+ /* send connect event */
+ gelic_wl_send_iwap_event(wl, wl->active_bssid);
+ pr_info("%s: connected\n", __func__);
+ }
+out:
+ free_page((unsigned long)common);
+ pr_debug("%s: ->\n", __func__);
+ return ret;
+}
+
+/*
+ * connected event
+ */
+static void gelic_wl_connected_event(struct gelic_wl_info *wl,
+ u64 event)
+{
+ u64 desired_event = 0;
+
+ switch (wl->wpa_level) {
+ case GELIC_WL_WPA_LEVEL_NONE:
+ desired_event = GELIC_LV1_WL_EVENT_CONNECTED;
+ break;
+ case GELIC_WL_WPA_LEVEL_WPA:
+ case GELIC_WL_WPA_LEVEL_WPA2:
+ desired_event = GELIC_LV1_WL_EVENT_WPA_CONNECTED;
+ break;
+ }
+
+ if (desired_event == event) {
+ pr_debug("%s: completed\n", __func__);
+ complete(&wl->assoc_done);
+ netif_carrier_on(port_to_netdev(wl_port(wl)));
+ } else
+ pr_debug("%s: event %#llx under wpa\n",
+ __func__, event);
+}
+
+/*
+ * disconnect event
+ */
+static void gelic_wl_disconnect_event(struct gelic_wl_info *wl,
+ u64 event)
+{
+ struct gelic_eurus_cmd *cmd;
+ int lock;
+
+ /*
+ * If we fall here in the middle of association,
+ * associate_bss() should be waiting for complation of
+ * wl->assoc_done.
+ * As it waits with timeout, just leave assoc_done
+ * uncompleted, then it terminates with timeout
+ */
+ if (!mutex_trylock(&wl->assoc_stat_lock)) {
+ pr_debug("%s: already locked\n", __func__);
+ lock = 0;
+ } else {
+ pr_debug("%s: obtain lock\n", __func__);
+ lock = 1;
+ }
+
+ cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC, NULL, 0);
+ kfree(cmd);
+
+ /* send disconnected event to the supplicant */
+ if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED)
+ gelic_wl_send_iwap_event(wl, NULL);
+
+ wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN;
+ netif_carrier_off(port_to_netdev(wl_port(wl)));
+
+ if (lock)
+ mutex_unlock(&wl->assoc_stat_lock);
+}
+/*
+ * event worker
+ */
+#ifdef DEBUG
+static const char *eventstr(enum gelic_lv1_wl_event event)
+{
+ static char buf[32];
+ char *ret;
+ if (event & GELIC_LV1_WL_EVENT_DEVICE_READY)
+ ret = "EURUS_READY";
+ else if (event & GELIC_LV1_WL_EVENT_SCAN_COMPLETED)
+ ret = "SCAN_COMPLETED";
+ else if (event & GELIC_LV1_WL_EVENT_DEAUTH)
+ ret = "DEAUTH";
+ else if (event & GELIC_LV1_WL_EVENT_BEACON_LOST)
+ ret = "BEACON_LOST";
+ else if (event & GELIC_LV1_WL_EVENT_CONNECTED)
+ ret = "CONNECTED";
+ else if (event & GELIC_LV1_WL_EVENT_WPA_CONNECTED)
+ ret = "WPA_CONNECTED";
+ else if (event & GELIC_LV1_WL_EVENT_WPA_ERROR)
+ ret = "WPA_ERROR";
+ else {
+ sprintf(buf, "Unknown(%#x)", event);
+ ret = buf;
+ }
+ return ret;
+}
+#else
+static const char *eventstr(enum gelic_lv1_wl_event event)
+{
+ return NULL;
+}
+#endif
+static void gelic_wl_event_worker(struct work_struct *work)
+{
+ struct gelic_wl_info *wl;
+ struct gelic_port *port;
+ u64 event, tmp;
+ int status;
+
+ pr_debug("%s:start\n", __func__);
+ wl = container_of(work, struct gelic_wl_info, event_work.work);
+ port = wl_port(wl);
+ while (1) {
+ status = lv1_net_control(bus_id(port->card), dev_id(port->card),
+ GELIC_LV1_GET_WLAN_EVENT, 0, 0, 0,
+ &event, &tmp);
+ if (status) {
+ if (status != LV1_NO_ENTRY)
+ pr_debug("%s:wlan event failed %d\n",
+ __func__, status);
+ /* got all events */
+ pr_debug("%s:end\n", __func__);
+ return;
+ }
+ pr_debug("%s: event=%s\n", __func__, eventstr(event));
+ switch (event) {
+ case GELIC_LV1_WL_EVENT_SCAN_COMPLETED:
+ gelic_wl_scan_complete_event(wl);
+ break;
+ case GELIC_LV1_WL_EVENT_BEACON_LOST:
+ case GELIC_LV1_WL_EVENT_DEAUTH:
+ gelic_wl_disconnect_event(wl, event);
+ break;
+ case GELIC_LV1_WL_EVENT_CONNECTED:
+ case GELIC_LV1_WL_EVENT_WPA_CONNECTED:
+ gelic_wl_connected_event(wl, event);
+ break;
+ default:
+ break;
+ }
+ } /* while */
+}
+/*
+ * association worker
+ */
+static void gelic_wl_assoc_worker(struct work_struct *work)
+{
+ struct gelic_wl_info *wl;
+
+ struct gelic_wl_scan_info *best_bss;
+ int ret;
+ unsigned long irqflag;
+ u8 *essid;
+ size_t essid_len;
+
+ wl = container_of(work, struct gelic_wl_info, assoc_work.work);
+
+ mutex_lock(&wl->assoc_stat_lock);
+
+ if (wl->assoc_stat != GELIC_WL_ASSOC_STAT_DISCONN)
+ goto out;
+
+ spin_lock_irqsave(&wl->lock, irqflag);
+ if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat)) {
+ pr_debug("%s: assoc ESSID configured %s\n", __func__,
+ wl->essid);
+ essid = wl->essid;
+ essid_len = wl->essid_len;
+ } else {
+ essid = NULL;
+ essid_len = 0;
+ }
+ spin_unlock_irqrestore(&wl->lock, irqflag);
+
+ ret = gelic_wl_start_scan(wl, 0, essid, essid_len);
+ if (ret == -ERESTARTSYS) {
+ pr_debug("%s: scan start failed association\n", __func__);
+ schedule_delayed_work(&wl->assoc_work, HZ/10); /*FIXME*/
+ goto out;
+ } else if (ret) {
+ pr_info("%s: scan prerequisite failed\n", __func__);
+ goto out;
+ }
+
+ /*
+ * Wait for bss scan completion
+ * If we have scan list already, gelic_wl_start_scan()
+ * returns OK and raises the complete. Thus,
+ * it's ok to wait unconditionally here
+ */
+ wait_for_completion(&wl->scan_done);
+
+ pr_debug("%s: scan done\n", __func__);
+ mutex_lock(&wl->scan_lock);
+ if (wl->scan_stat != GELIC_WL_SCAN_STAT_GOT_LIST) {
+ gelic_wl_send_iwap_event(wl, NULL);
+ pr_info("%s: no scan list. association failed\n", __func__);
+ goto scan_lock_out;
+ }
+
+ /* find best matching bss */
+ best_bss = gelic_wl_find_best_bss(wl);
+ if (!best_bss) {
+ gelic_wl_send_iwap_event(wl, NULL);
+ pr_info("%s: no bss matched. association failed\n", __func__);
+ goto scan_lock_out;
+ }
+
+ /* ok, do association */
+ ret = gelic_wl_associate_bss(wl, best_bss);
+ if (ret)
+ pr_info("%s: association failed %d\n", __func__, ret);
+scan_lock_out:
+ mutex_unlock(&wl->scan_lock);
+out:
+ mutex_unlock(&wl->assoc_stat_lock);
+}
+/*
+ * Interrupt handler
+ * Called from the ethernet interrupt handler
+ * Processes wireless specific virtual interrupts only
+ */
+void gelic_wl_interrupt(struct net_device *netdev, u64 status)
+{
+ struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
+
+ if (status & GELIC_CARD_WLAN_COMMAND_COMPLETED) {
+ pr_debug("%s:cmd complete\n", __func__);
+ complete(&wl->cmd_done_intr);
+ }
+
+ if (status & GELIC_CARD_WLAN_EVENT_RECEIVED) {
+ pr_debug("%s:event received\n", __func__);
+ queue_delayed_work(wl->event_queue, &wl->event_work, 0);
+ }
+}
+
+/*
+ * driver helpers
+ */
+static const iw_handler gelic_wl_wext_handler[] =
+{
+ IW_HANDLER(SIOCGIWNAME, gelic_wl_get_name),
+ IW_HANDLER(SIOCGIWRANGE, gelic_wl_get_range),
+ IW_HANDLER(SIOCSIWSCAN, gelic_wl_set_scan),
+ IW_HANDLER(SIOCGIWSCAN, gelic_wl_get_scan),
+ IW_HANDLER(SIOCSIWAUTH, gelic_wl_set_auth),
+ IW_HANDLER(SIOCGIWAUTH, gelic_wl_get_auth),
+ IW_HANDLER(SIOCSIWESSID, gelic_wl_set_essid),
+ IW_HANDLER(SIOCGIWESSID, gelic_wl_get_essid),
+ IW_HANDLER(SIOCSIWENCODE, gelic_wl_set_encode),
+ IW_HANDLER(SIOCGIWENCODE, gelic_wl_get_encode),
+ IW_HANDLER(SIOCSIWAP, gelic_wl_set_ap),
+ IW_HANDLER(SIOCGIWAP, gelic_wl_get_ap),
+ IW_HANDLER(SIOCSIWENCODEEXT, gelic_wl_set_encodeext),
+ IW_HANDLER(SIOCGIWENCODEEXT, gelic_wl_get_encodeext),
+ IW_HANDLER(SIOCSIWMODE, gelic_wl_set_mode),
+ IW_HANDLER(SIOCGIWMODE, gelic_wl_get_mode),
+ IW_HANDLER(SIOCGIWNICKN, gelic_wl_get_nick),
+};
+
+static const struct iw_handler_def gelic_wl_wext_handler_def = {
+ .num_standard = ARRAY_SIZE(gelic_wl_wext_handler),
+ .standard = gelic_wl_wext_handler,
+ .get_wireless_stats = gelic_wl_get_wireless_stats,
+};
+
+static struct net_device * __devinit gelic_wl_alloc(struct gelic_card *card)
+{
+ struct net_device *netdev;
+ struct gelic_port *port;
+ struct gelic_wl_info *wl;
+ unsigned int i;
+
+ pr_debug("%s:start\n", __func__);
+ netdev = alloc_etherdev(sizeof(struct gelic_port) +
+ sizeof(struct gelic_wl_info));
+ pr_debug("%s: netdev =%p card=%p\n", __func__, netdev, card);
+ if (!netdev)
+ return NULL;
+
+ strcpy(netdev->name, "wlan%d");
+
+ port = netdev_priv(netdev);
+ port->netdev = netdev;
+ port->card = card;
+ port->type = GELIC_PORT_WIRELESS;
+
+ wl = port_wl(port);
+ pr_debug("%s: wl=%p port=%p\n", __func__, wl, port);
+
+ /* allocate scan list */
+ wl->networks = kzalloc(sizeof(struct gelic_wl_scan_info) *
+ GELIC_WL_BSS_MAX_ENT, GFP_KERNEL);
+
+ if (!wl->networks)
+ goto fail_bss;
+
+ wl->eurus_cmd_queue = create_singlethread_workqueue("gelic_cmd");
+ if (!wl->eurus_cmd_queue)
+ goto fail_cmd_workqueue;
+
+ wl->event_queue = create_singlethread_workqueue("gelic_event");
+ if (!wl->event_queue)
+ goto fail_event_workqueue;
+
+ INIT_LIST_HEAD(&wl->network_free_list);
+ INIT_LIST_HEAD(&wl->network_list);
+ for (i = 0; i < GELIC_WL_BSS_MAX_ENT; i++)
+ list_add_tail(&wl->networks[i].list,
+ &wl->network_free_list);
+ init_completion(&wl->cmd_done_intr);
+
+ INIT_DELAYED_WORK(&wl->event_work, gelic_wl_event_worker);
+ INIT_DELAYED_WORK(&wl->assoc_work, gelic_wl_assoc_worker);
+ mutex_init(&wl->scan_lock);
+ mutex_init(&wl->assoc_stat_lock);
+
+ init_completion(&wl->scan_done);
+ /* for the case that no scan request is issued and stop() is called */
+ complete(&wl->scan_done);
+
+ spin_lock_init(&wl->lock);
+
+ wl->scan_age = 5*HZ; /* FIXME */
+
+ /* buffer for receiving scanned list etc */
+ BUILD_BUG_ON(PAGE_SIZE <
+ sizeof(struct gelic_eurus_scan_info) *
+ GELIC_EURUS_MAX_SCAN);
+ pr_debug("%s:end\n", __func__);
+ return netdev;
+
+fail_event_workqueue:
+ destroy_workqueue(wl->eurus_cmd_queue);
+fail_cmd_workqueue:
+ kfree(wl->networks);
+fail_bss:
+ free_netdev(netdev);
+ pr_debug("%s:end error\n", __func__);
+ return NULL;
+
+}
+
+static void gelic_wl_free(struct gelic_wl_info *wl)
+{
+ struct gelic_wl_scan_info *scan_info;
+ unsigned int i;
+
+ pr_debug("%s: <-\n", __func__);
+
+ pr_debug("%s: destroy queues\n", __func__);
+ destroy_workqueue(wl->eurus_cmd_queue);
+ destroy_workqueue(wl->event_queue);
+
+ scan_info = wl->networks;
+ for (i = 0; i < GELIC_WL_BSS_MAX_ENT; i++, scan_info++)
+ kfree(scan_info->hwinfo);
+ kfree(wl->networks);
+
+ free_netdev(port_to_netdev(wl_port(wl)));
+
+ pr_debug("%s: ->\n", __func__);
+}
+
+static int gelic_wl_try_associate(struct net_device *netdev)
+{
+ struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
+ int ret = -1;
+ unsigned int i;
+
+ pr_debug("%s: <-\n", __func__);
+
+ /* check constraits for start association */
+ /* for no access restriction AP */
+ if (wl->group_cipher_method == GELIC_WL_CIPHER_NONE) {
+ if (test_bit(GELIC_WL_STAT_CONFIGURED,
+ &wl->stat))
+ goto do_associate;
+ else {
+ pr_debug("%s: no wep, not configured\n", __func__);
+ return ret;
+ }
+ }
+
+ /* for WEP, one of four keys should be set */
+ if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) {
+ /* one of keys set */
+ for (i = 0; i < GELIC_WEP_KEYS; i++) {
+ if (test_bit(i, &wl->key_enabled))
+ goto do_associate;
+ }
+ pr_debug("%s: WEP, but no key specified\n", __func__);
+ return ret;
+ }
+
+ /* for WPA[2], psk should be set */
+ if ((wl->group_cipher_method == GELIC_WL_CIPHER_TKIP) ||
+ (wl->group_cipher_method == GELIC_WL_CIPHER_AES)) {
+ if (test_bit(GELIC_WL_STAT_WPA_PSK_SET,
+ &wl->stat))
+ goto do_associate;
+ else {
+ pr_debug("%s: AES/TKIP, but PSK not configured\n",
+ __func__);
+ return ret;
+ }
+ }
+
+do_associate:
+ ret = schedule_delayed_work(&wl->assoc_work, 0);
+ pr_debug("%s: start association work %d\n", __func__, ret);
+ return ret;
+}
+
+/*
+ * netdev handlers
+ */
+static int gelic_wl_open(struct net_device *netdev)
+{
+ struct gelic_card *card = netdev_card(netdev);
+
+ pr_debug("%s:->%p\n", __func__, netdev);
+
+ gelic_card_up(card);
+
+ /* try to associate */
+ gelic_wl_try_associate(netdev);
+
+ netif_start_queue(netdev);
+
+ pr_debug("%s:<-\n", __func__);
+ return 0;
+}
+
+/*
+ * reset state machine
+ */
+static int gelic_wl_reset_state(struct gelic_wl_info *wl)
+{
+ struct gelic_wl_scan_info *target;
+ struct gelic_wl_scan_info *tmp;
+
+ /* empty scan list */
+ list_for_each_entry_safe(target, tmp, &wl->network_list, list) {
+ list_move_tail(&target->list, &wl->network_free_list);
+ }
+ wl->scan_stat = GELIC_WL_SCAN_STAT_INIT;
+
+ /* clear configuration */
+ wl->auth_method = GELIC_EURUS_AUTH_OPEN;
+ wl->group_cipher_method = GELIC_WL_CIPHER_NONE;
+ wl->pairwise_cipher_method = GELIC_WL_CIPHER_NONE;
+ wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE;
+
+ wl->key_enabled = 0;
+ wl->current_key = 0;
+
+ wl->psk_type = GELIC_EURUS_WPA_PSK_PASSPHRASE;
+ wl->psk_len = 0;
+
+ wl->essid_len = 0;
+ memset(wl->essid, 0, sizeof(wl->essid));
+ memset(wl->bssid, 0, sizeof(wl->bssid));
+ memset(wl->active_bssid, 0, sizeof(wl->active_bssid));
+
+ wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN;
+
+ memset(&wl->iwstat, 0, sizeof(wl->iwstat));
+ /* all status bit clear */
+ wl->stat = 0;
+ return 0;
+}
+
+/*
+ * Tell eurus to terminate association
+ */
+static void gelic_wl_disconnect(struct net_device *netdev)
+{
+ struct gelic_port *port = netdev_priv(netdev);
+ struct gelic_wl_info *wl = port_wl(port);
+ struct gelic_eurus_cmd *cmd;
+
+ /*
+ * If scann process is running on chip,
+ * further requests will be rejected
+ */
+ if (wl->scan_stat == GELIC_WL_SCAN_STAT_SCANNING)
+ wait_for_completion_timeout(&wl->scan_done, HZ);
+
+ cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC, NULL, 0);
+ kfree(cmd);
+ gelic_wl_send_iwap_event(wl, NULL);
+};
+
+static int gelic_wl_stop(struct net_device *netdev)
+{
+ struct gelic_port *port = netdev_priv(netdev);
+ struct gelic_wl_info *wl = port_wl(port);
+ struct gelic_card *card = netdev_card(netdev);
+
+ pr_debug("%s:<-\n", __func__);
+
+ /*
+ * Cancel pending association work.
+ * event work can run after netdev down
+ */
+ cancel_delayed_work(&wl->assoc_work);
+
+ if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED)
+ gelic_wl_disconnect(netdev);
+
+ /* reset our state machine */
+ gelic_wl_reset_state(wl);
+
+ netif_stop_queue(netdev);
+
+ gelic_card_down(card);
+
+ pr_debug("%s:->\n", __func__);
+ return 0;
+}
+
+/* -- */
+
+static const struct net_device_ops gelic_wl_netdevice_ops = {
+ .ndo_open = gelic_wl_open,
+ .ndo_stop = gelic_wl_stop,
+ .ndo_start_xmit = gelic_net_xmit,
+ .ndo_set_rx_mode = gelic_net_set_multi,
+ .ndo_change_mtu = gelic_net_change_mtu,
+ .ndo_tx_timeout = gelic_net_tx_timeout,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = gelic_net_poll_controller,
+#endif
+};
+
+static const struct ethtool_ops gelic_wl_ethtool_ops = {
+ .get_drvinfo = gelic_net_get_drvinfo,
+ .get_link = gelic_wl_get_link,
+};
+
+static void __devinit gelic_wl_setup_netdev_ops(struct net_device *netdev)
+{
+ struct gelic_wl_info *wl;
+ wl = port_wl(netdev_priv(netdev));
+ BUG_ON(!wl);
+ netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
+
+ netdev->ethtool_ops = &gelic_wl_ethtool_ops;
+ netdev->netdev_ops = &gelic_wl_netdevice_ops;
+ netdev->wireless_data = &wl->wireless_data;
+ netdev->wireless_handlers = &gelic_wl_wext_handler_def;
+}
+
+/*
+ * driver probe/remove
+ */
+int __devinit gelic_wl_driver_probe(struct gelic_card *card)
+{
+ int ret;
+ struct net_device *netdev;
+
+ pr_debug("%s:start\n", __func__);
+
+ if (ps3_compare_firmware_version(1, 6, 0) < 0)
+ return 0;
+ if (!card->vlan[GELIC_PORT_WIRELESS].tx)
+ return 0;
+
+ /* alloc netdevice for wireless */
+ netdev = gelic_wl_alloc(card);
+ if (!netdev)
+ return -ENOMEM;
+
+ /* setup net_device structure */
+ SET_NETDEV_DEV(netdev, &card->dev->core);
+ gelic_wl_setup_netdev_ops(netdev);
+
+ /* setup some of net_device and register it */
+ ret = gelic_net_setup_netdev(netdev, card);
+ if (ret)
+ goto fail_setup;
+ card->netdev[GELIC_PORT_WIRELESS] = netdev;
+
+ /* add enable wireless interrupt */
+ card->irq_mask |= GELIC_CARD_WLAN_EVENT_RECEIVED |
+ GELIC_CARD_WLAN_COMMAND_COMPLETED;
+ /* to allow wireless commands while both interfaces are down */
+ gelic_card_set_irq_mask(card, GELIC_CARD_WLAN_EVENT_RECEIVED |
+ GELIC_CARD_WLAN_COMMAND_COMPLETED);
+ pr_debug("%s:end\n", __func__);
+ return 0;
+
+fail_setup:
+ gelic_wl_free(port_wl(netdev_port(netdev)));
+
+ return ret;
+}
+
+int gelic_wl_driver_remove(struct gelic_card *card)
+{
+ struct gelic_wl_info *wl;
+ struct net_device *netdev;
+
+ pr_debug("%s:start\n", __func__);
+
+ if (ps3_compare_firmware_version(1, 6, 0) < 0)
+ return 0;
+ if (!card->vlan[GELIC_PORT_WIRELESS].tx)
+ return 0;
+
+ netdev = card->netdev[GELIC_PORT_WIRELESS];
+ wl = port_wl(netdev_priv(netdev));
+
+ /* if the interface was not up, but associated */
+ if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED)
+ gelic_wl_disconnect(netdev);
+
+ complete(&wl->cmd_done_intr);
+
+ /* cancel all work queue */
+ cancel_delayed_work(&wl->assoc_work);
+ cancel_delayed_work(&wl->event_work);
+ flush_workqueue(wl->eurus_cmd_queue);
+ flush_workqueue(wl->event_queue);
+
+ unregister_netdev(netdev);
+
+ /* disable wireless interrupt */
+ pr_debug("%s: disable intr\n", __func__);
+ card->irq_mask &= ~(GELIC_CARD_WLAN_EVENT_RECEIVED |
+ GELIC_CARD_WLAN_COMMAND_COMPLETED);
+ /* free bss list, netdev*/
+ gelic_wl_free(wl);
+ pr_debug("%s:end\n", __func__);
+ return 0;
+}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
new file mode 100644
index 000000000000..f7e51b7d7049
--- /dev/null
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
@@ -0,0 +1,326 @@
+/*
+ * PS3 gelic network driver.
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2007 Sony Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef _GELIC_WIRELESS_H
+#define _GELIC_WIRELESS_H
+
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+
+
+/* return value from GELIC_LV1_GET_WLAN_EVENT netcontrol */
+enum gelic_lv1_wl_event {
+ GELIC_LV1_WL_EVENT_DEVICE_READY = 0x01, /* Eurus ready */
+ GELIC_LV1_WL_EVENT_SCAN_COMPLETED = 0x02, /* Scan has completed */
+ GELIC_LV1_WL_EVENT_DEAUTH = 0x04, /* Deauthed by the AP */
+ GELIC_LV1_WL_EVENT_BEACON_LOST = 0x08, /* Beacon lost detected */
+ GELIC_LV1_WL_EVENT_CONNECTED = 0x10, /* Connected to AP */
+ GELIC_LV1_WL_EVENT_WPA_CONNECTED = 0x20, /* WPA connection */
+ GELIC_LV1_WL_EVENT_WPA_ERROR = 0x40, /* MIC error */
+};
+
+/* arguments for GELIC_LV1_POST_WLAN_COMMAND netcontrol */
+enum gelic_eurus_command {
+ GELIC_EURUS_CMD_ASSOC = 1, /* association start */
+ GELIC_EURUS_CMD_DISASSOC = 2, /* disassociate */
+ GELIC_EURUS_CMD_START_SCAN = 3, /* scan start */
+ GELIC_EURUS_CMD_GET_SCAN = 4, /* get scan result */
+ GELIC_EURUS_CMD_SET_COMMON_CFG = 5, /* set common config */
+ GELIC_EURUS_CMD_GET_COMMON_CFG = 6, /* set common config */
+ GELIC_EURUS_CMD_SET_WEP_CFG = 7, /* set WEP config */
+ GELIC_EURUS_CMD_GET_WEP_CFG = 8, /* get WEP config */
+ GELIC_EURUS_CMD_SET_WPA_CFG = 9, /* set WPA config */
+ GELIC_EURUS_CMD_GET_WPA_CFG = 10, /* get WPA config */
+ GELIC_EURUS_CMD_GET_RSSI_CFG = 11, /* get RSSI info. */
+ GELIC_EURUS_CMD_MAX_INDEX
+};
+
+/* for GELIC_EURUS_CMD_COMMON_CFG */
+enum gelic_eurus_bss_type {
+ GELIC_EURUS_BSS_INFRA = 0,
+ GELIC_EURUS_BSS_ADHOC = 1, /* not supported */
+};
+
+enum gelic_eurus_auth_method {
+ GELIC_EURUS_AUTH_OPEN = 0, /* FIXME: WLAN_AUTH_OPEN */
+ GELIC_EURUS_AUTH_SHARED = 1, /* not supported */
+};
+
+enum gelic_eurus_opmode {
+ GELIC_EURUS_OPMODE_11BG = 0, /* 802.11b/g */
+ GELIC_EURUS_OPMODE_11B = 1, /* 802.11b only */
+ GELIC_EURUS_OPMODE_11G = 2, /* 802.11g only */
+};
+
+struct gelic_eurus_common_cfg {
+ /* all fields are big endian */
+ u16 scan_index;
+ u16 bss_type; /* infra or adhoc */
+ u16 auth_method; /* shared key or open */
+ u16 op_mode; /* B/G */
+} __packed;
+
+
+/* for GELIC_EURUS_CMD_WEP_CFG */
+enum gelic_eurus_wep_security {
+ GELIC_EURUS_WEP_SEC_NONE = 0,
+ GELIC_EURUS_WEP_SEC_40BIT = 1,
+ GELIC_EURUS_WEP_SEC_104BIT = 2,
+};
+
+struct gelic_eurus_wep_cfg {
+ /* all fields are big endian */
+ u16 security;
+ u8 key[4][16];
+} __packed;
+
+/* for GELIC_EURUS_CMD_WPA_CFG */
+enum gelic_eurus_wpa_security {
+ GELIC_EURUS_WPA_SEC_NONE = 0x0000,
+ /* group=TKIP, pairwise=TKIP */
+ GELIC_EURUS_WPA_SEC_WPA_TKIP_TKIP = 0x0001,
+ /* group=AES, pairwise=AES */
+ GELIC_EURUS_WPA_SEC_WPA_AES_AES = 0x0002,
+ /* group=TKIP, pairwise=TKIP */
+ GELIC_EURUS_WPA_SEC_WPA2_TKIP_TKIP = 0x0004,
+ /* group=AES, pairwise=AES */
+ GELIC_EURUS_WPA_SEC_WPA2_AES_AES = 0x0008,
+ /* group=TKIP, pairwise=AES */
+ GELIC_EURUS_WPA_SEC_WPA_TKIP_AES = 0x0010,
+ /* group=TKIP, pairwise=AES */
+ GELIC_EURUS_WPA_SEC_WPA2_TKIP_AES = 0x0020,
+};
+
+enum gelic_eurus_wpa_psk_type {
+ GELIC_EURUS_WPA_PSK_PASSPHRASE = 0, /* passphrase string */
+ GELIC_EURUS_WPA_PSK_BIN = 1, /* 32 bytes binary key */
+};
+
+#define GELIC_WL_EURUS_PSK_MAX_LEN 64
+#define WPA_PSK_LEN 32 /* WPA spec says 256bit */
+
+struct gelic_eurus_wpa_cfg {
+ /* all fields are big endian */
+ u16 security;
+ u16 psk_type; /* psk key encoding type */
+ u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN]; /* psk key; hex or passphrase */
+} __packed;
+
+/* for GELIC_EURUS_CMD_{START,GET}_SCAN */
+enum gelic_eurus_scan_capability {
+ GELIC_EURUS_SCAN_CAP_ADHOC = 0x0000,
+ GELIC_EURUS_SCAN_CAP_INFRA = 0x0001,
+ GELIC_EURUS_SCAN_CAP_MASK = 0x0001,
+};
+
+enum gelic_eurus_scan_sec_type {
+ GELIC_EURUS_SCAN_SEC_NONE = 0x0000,
+ GELIC_EURUS_SCAN_SEC_WEP = 0x0100,
+ GELIC_EURUS_SCAN_SEC_WPA = 0x0200,
+ GELIC_EURUS_SCAN_SEC_WPA2 = 0x0400,
+ GELIC_EURUS_SCAN_SEC_MASK = 0x0f00,
+};
+
+enum gelic_eurus_scan_sec_wep_type {
+ GELIC_EURUS_SCAN_SEC_WEP_UNKNOWN = 0x0000,
+ GELIC_EURUS_SCAN_SEC_WEP_40 = 0x0001,
+ GELIC_EURUS_SCAN_SEC_WEP_104 = 0x0002,
+ GELIC_EURUS_SCAN_SEC_WEP_MASK = 0x0003,
+};
+
+enum gelic_eurus_scan_sec_wpa_type {
+ GELIC_EURUS_SCAN_SEC_WPA_UNKNOWN = 0x0000,
+ GELIC_EURUS_SCAN_SEC_WPA_TKIP = 0x0001,
+ GELIC_EURUS_SCAN_SEC_WPA_AES = 0x0002,
+ GELIC_EURUS_SCAN_SEC_WPA_MASK = 0x0003,
+};
+
+/*
+ * hw BSS information structure returned from GELIC_EURUS_CMD_GET_SCAN
+ */
+struct gelic_eurus_scan_info {
+ /* all fields are big endian */
+ __be16 size;
+ __be16 rssi; /* percentage */
+ __be16 channel; /* channel number */
+ __be16 beacon_period; /* FIXME: in msec unit */
+ __be16 capability;
+ __be16 security;
+ u8 bssid[8]; /* last ETH_ALEN are valid. bssid[0],[1] are unused */
+ u8 essid[32]; /* IW_ESSID_MAX_SIZE */
+ u8 rate[16]; /* first 12 are valid */
+ u8 ext_rate[16]; /* first 16 are valid */
+ __be32 reserved1;
+ __be32 reserved2;
+ __be32 reserved3;
+ __be32 reserved4;
+ u8 elements[0]; /* ie */
+} __packed;
+
+/* the hypervisor returns bbs up to 16 */
+#define GELIC_EURUS_MAX_SCAN (16)
+struct gelic_wl_scan_info {
+ struct list_head list;
+ struct gelic_eurus_scan_info *hwinfo;
+
+ int valid; /* set 1 if this entry was in latest scanned list
+ * from Eurus */
+ unsigned int eurus_index; /* index in the Eurus list */
+ unsigned long last_scanned; /* acquired time */
+
+ unsigned int rate_len;
+ unsigned int rate_ext_len;
+ unsigned int essid_len;
+};
+
+/* for GELIC_EURUS_CMD_GET_RSSI */
+struct gelic_eurus_rssi_info {
+ /* big endian */
+ __be16 rssi;
+} __packed;
+
+
+/* for 'stat' member of gelic_wl_info */
+enum gelic_wl_info_status_bit {
+ GELIC_WL_STAT_CONFIGURED,
+ GELIC_WL_STAT_CH_INFO, /* ch info acquired */
+ GELIC_WL_STAT_ESSID_SET, /* ESSID specified by userspace */
+ GELIC_WL_STAT_BSSID_SET, /* BSSID specified by userspace */
+ GELIC_WL_STAT_WPA_PSK_SET, /* PMK specified by userspace */
+ GELIC_WL_STAT_WPA_LEVEL_SET, /* WEP or WPA[2] selected */
+};
+
+/* for 'scan_stat' member of gelic_wl_info */
+enum gelic_wl_scan_state {
+ /* just initialized or get last scan result failed */
+ GELIC_WL_SCAN_STAT_INIT,
+ /* scan request issued, accepted or chip is scanning */
+ GELIC_WL_SCAN_STAT_SCANNING,
+ /* scan results retrieved */
+ GELIC_WL_SCAN_STAT_GOT_LIST,
+};
+
+/* for 'cipher_method' */
+enum gelic_wl_cipher_method {
+ GELIC_WL_CIPHER_NONE,
+ GELIC_WL_CIPHER_WEP,
+ GELIC_WL_CIPHER_TKIP,
+ GELIC_WL_CIPHER_AES,
+};
+
+/* for 'wpa_level' */
+enum gelic_wl_wpa_level {
+ GELIC_WL_WPA_LEVEL_NONE,
+ GELIC_WL_WPA_LEVEL_WPA,
+ GELIC_WL_WPA_LEVEL_WPA2,
+};
+
+/* for 'assoc_stat' */
+enum gelic_wl_assoc_state {
+ GELIC_WL_ASSOC_STAT_DISCONN,
+ GELIC_WL_ASSOC_STAT_ASSOCIATING,
+ GELIC_WL_ASSOC_STAT_ASSOCIATED,
+};
+/* part of private data alloc_etherdev() allocated */
+#define GELIC_WEP_KEYS 4
+struct gelic_wl_info {
+ /* bss list */
+ struct mutex scan_lock;
+ struct list_head network_list;
+ struct list_head network_free_list;
+ struct gelic_wl_scan_info *networks;
+
+ unsigned long scan_age; /* last scanned time */
+ enum gelic_wl_scan_state scan_stat;
+ struct completion scan_done;
+
+ /* eurus command queue */
+ struct workqueue_struct *eurus_cmd_queue;
+ struct completion cmd_done_intr;
+
+ /* eurus event handling */
+ struct workqueue_struct *event_queue;
+ struct delayed_work event_work;
+
+ /* wl status bits */
+ unsigned long stat;
+ enum gelic_eurus_auth_method auth_method; /* open/shared */
+ enum gelic_wl_cipher_method group_cipher_method;
+ enum gelic_wl_cipher_method pairwise_cipher_method;
+ enum gelic_wl_wpa_level wpa_level; /* wpa/wpa2 */
+
+ /* association handling */
+ struct mutex assoc_stat_lock;
+ struct delayed_work assoc_work;
+ enum gelic_wl_assoc_state assoc_stat;
+ struct completion assoc_done;
+
+ spinlock_t lock;
+ u16 ch_info; /* available channels. bit0 = ch1 */
+ /* WEP keys */
+ u8 key[GELIC_WEP_KEYS][IW_ENCODING_TOKEN_MAX];
+ unsigned long key_enabled;
+ unsigned int key_len[GELIC_WEP_KEYS];
+ unsigned int current_key;
+ /* WWPA PSK */
+ u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN];
+ enum gelic_eurus_wpa_psk_type psk_type;
+ unsigned int psk_len;
+
+ u8 essid[IW_ESSID_MAX_SIZE];
+ u8 bssid[ETH_ALEN]; /* userland requested */
+ u8 active_bssid[ETH_ALEN]; /* associated bssid */
+ unsigned int essid_len;
+
+ struct iw_public_data wireless_data;
+ struct iw_statistics iwstat;
+};
+
+#define GELIC_WL_BSS_MAX_ENT 32
+#define GELIC_WL_ASSOC_RETRY 50
+static inline struct gelic_port *wl_port(struct gelic_wl_info *wl)
+{
+ return container_of((void *)wl, struct gelic_port, priv);
+}
+static inline struct gelic_wl_info *port_wl(struct gelic_port *port)
+{
+ return port_priv(port);
+}
+
+struct gelic_eurus_cmd {
+ struct work_struct work;
+ struct gelic_wl_info *wl;
+ unsigned int cmd; /* command code */
+ u64 tag;
+ u64 size;
+ void *buffer;
+ unsigned int buf_size;
+ struct completion done;
+ int status;
+ u64 cmd_status;
+};
+
+/* private ioctls to pass PSK */
+#define GELIC_WL_PRIV_SET_PSK (SIOCIWFIRSTPRIV + 0)
+#define GELIC_WL_PRIV_GET_PSK (SIOCIWFIRSTPRIV + 1)
+
+extern int gelic_wl_driver_probe(struct gelic_card *card);
+extern int gelic_wl_driver_remove(struct gelic_card *card);
+extern void gelic_wl_interrupt(struct net_device *netdev, u64 status);
+#endif /* _GELIC_WIRELESS_H */
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
new file mode 100644
index 000000000000..6199f6b387b6
--- /dev/null
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -0,0 +1,2603 @@
+/*
+ * Network device driver for Cell Processor-Based Blade and Celleb platform
+ *
+ * (C) Copyright IBM Corp. 2005
+ * (C) Copyright 2006 TOSHIBA CORPORATION
+ *
+ * Authors : Utz Bacher <utz.bacher@de.ibm.com>
+ * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/compiler.h>
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/firmware.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/gfp.h>
+#include <linux/ioport.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <asm/pci-bridge.h>
+#include <net/checksum.h>
+
+#include "spider_net.h"
+
+MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
+ "<Jens.Osterkamp@de.ibm.com>");
+MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VERSION);
+MODULE_FIRMWARE(SPIDER_NET_FIRMWARE_NAME);
+
+static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
+static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
+
+module_param(rx_descriptors, int, 0444);
+module_param(tx_descriptors, int, 0444);
+
+MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
+ "in rx chains");
+MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
+ "in tx chain");
+
+char spider_net_driver_name[] = "spidernet";
+
+static DEFINE_PCI_DEVICE_TABLE(spider_net_pci_tbl) = {
+ { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
+
+/**
+ * spider_net_read_reg - reads an SMMIO register of a card
+ * @card: device structure
+ * @reg: register to read from
+ *
+ * returns the content of the specified SMMIO register.
+ */
+static inline u32
+spider_net_read_reg(struct spider_net_card *card, u32 reg)
+{
+ /* We use the powerpc specific variants instead of readl_be() because
+ * we know spidernet is not a real PCI device and we can thus avoid the
+ * performance hit caused by the PCI workarounds.
+ */
+ return in_be32(card->regs + reg);
+}
+
+/**
+ * spider_net_write_reg - writes to an SMMIO register of a card
+ * @card: device structure
+ * @reg: register to write to
+ * @value: value to write into the specified SMMIO register
+ */
+static inline void
+spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
+{
+ /* We use the powerpc specific variants instead of writel_be() because
+ * we know spidernet is not a real PCI device and we can thus avoid the
+ * performance hit caused by the PCI workarounds.
+ */
+ out_be32(card->regs + reg, value);
+}
+
+/** spider_net_write_phy - write to phy register
+ * @netdev: adapter to be written to
+ * @mii_id: id of MII
+ * @reg: PHY register
+ * @val: value to be written to phy register
+ *
+ * spider_net_write_phy_register writes to an arbitrary PHY
+ * register via the spider GPCWOPCMD register. We assume the queue does
+ * not run full (not more than 15 commands outstanding).
+ **/
+static void
+spider_net_write_phy(struct net_device *netdev, int mii_id,
+ int reg, int val)
+{
+ struct spider_net_card *card = netdev_priv(netdev);
+ u32 writevalue;
+
+ writevalue = ((u32)mii_id << 21) |
+ ((u32)reg << 16) | ((u32)val);
+
+ spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
+}
+
+/** spider_net_read_phy - read from phy register
+ * @netdev: network device to be read from
+ * @mii_id: id of MII
+ * @reg: PHY register
+ *
+ * Returns value read from PHY register
+ *
+ * spider_net_write_phy reads from an arbitrary PHY
+ * register via the spider GPCROPCMD register
+ **/
+static int
+spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
+{
+ struct spider_net_card *card = netdev_priv(netdev);
+ u32 readvalue;
+
+ readvalue = ((u32)mii_id << 21) | ((u32)reg << 16);
+ spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue);
+
+ /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT
+ * interrupt, as we poll for the completion of the read operation
+ * in spider_net_read_phy. Should take about 50 us */
+ do {
+ readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD);
+ } while (readvalue & SPIDER_NET_GPREXEC);
+
+ readvalue &= SPIDER_NET_GPRDAT_MASK;
+
+ return readvalue;
+}
+
+/**
+ * spider_net_setup_aneg - initial auto-negotiation setup
+ * @card: device structure
+ **/
+static void
+spider_net_setup_aneg(struct spider_net_card *card)
+{
+ struct mii_phy *phy = &card->phy;
+ u32 advertise = 0;
+ u16 bmsr, estat;
+
+ bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
+ estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS);
+
+ if (bmsr & BMSR_10HALF)
+ advertise |= ADVERTISED_10baseT_Half;
+ if (bmsr & BMSR_10FULL)
+ advertise |= ADVERTISED_10baseT_Full;
+ if (bmsr & BMSR_100HALF)
+ advertise |= ADVERTISED_100baseT_Half;
+ if (bmsr & BMSR_100FULL)
+ advertise |= ADVERTISED_100baseT_Full;
+
+ if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL))
+ advertise |= SUPPORTED_1000baseT_Full;
+ if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF))
+ advertise |= SUPPORTED_1000baseT_Half;
+
+ sungem_phy_probe(phy, phy->mii_id);
+ phy->def->ops->setup_aneg(phy, advertise);
+
+}
+
+/**
+ * spider_net_rx_irq_off - switch off rx irq on this spider card
+ * @card: device structure
+ *
+ * switches off rx irq by masking them out in the GHIINTnMSK register
+ */
+static void
+spider_net_rx_irq_off(struct spider_net_card *card)
+{
+ u32 regvalue;
+
+ regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
+ spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
+}
+
+/**
+ * spider_net_rx_irq_on - switch on rx irq on this spider card
+ * @card: device structure
+ *
+ * switches on rx irq by enabling them in the GHIINTnMSK register
+ */
+static void
+spider_net_rx_irq_on(struct spider_net_card *card)
+{
+ u32 regvalue;
+
+ regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
+ spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
+}
+
+/**
+ * spider_net_set_promisc - sets the unicast address or the promiscuous mode
+ * @card: card structure
+ *
+ * spider_net_set_promisc sets the unicast destination address filter and
+ * thus either allows for non-promisc mode or promisc mode
+ */
+static void
+spider_net_set_promisc(struct spider_net_card *card)
+{
+ u32 macu, macl;
+ struct net_device *netdev = card->netdev;
+
+ if (netdev->flags & IFF_PROMISC) {
+ /* clear destination entry 0 */
+ spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0);
+ spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0);
+ spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
+ SPIDER_NET_PROMISC_VALUE);
+ } else {
+ macu = netdev->dev_addr[0];
+ macu <<= 8;
+ macu |= netdev->dev_addr[1];
+ memcpy(&macl, &netdev->dev_addr[2], sizeof(macl));
+
+ macu |= SPIDER_NET_UA_DESCR_VALUE;
+ spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu);
+ spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl);
+ spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
+ SPIDER_NET_NONPROMISC_VALUE);
+ }
+}
+
+/**
+ * spider_net_get_mac_address - read mac address from spider card
+ * @card: device structure
+ *
+ * reads MAC address from GMACUNIMACU and GMACUNIMACL registers
+ */
+static int
+spider_net_get_mac_address(struct net_device *netdev)
+{
+ struct spider_net_card *card = netdev_priv(netdev);
+ u32 macl, macu;
+
+ macl = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACL);
+ macu = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACU);
+
+ netdev->dev_addr[0] = (macu >> 24) & 0xff;
+ netdev->dev_addr[1] = (macu >> 16) & 0xff;
+ netdev->dev_addr[2] = (macu >> 8) & 0xff;
+ netdev->dev_addr[3] = macu & 0xff;
+ netdev->dev_addr[4] = (macl >> 8) & 0xff;
+ netdev->dev_addr[5] = macl & 0xff;
+
+ if (!is_valid_ether_addr(&netdev->dev_addr[0]))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * spider_net_get_descr_status -- returns the status of a descriptor
+ * @descr: descriptor to look at
+ *
+ * returns the status as in the dmac_cmd_status field of the descriptor
+ */
+static inline int
+spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr)
+{
+ return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
+}
+
+/**
+ * spider_net_free_chain - free descriptor chain
+ * @card: card structure
+ * @chain: address of chain
+ *
+ */
+static void
+spider_net_free_chain(struct spider_net_card *card,
+ struct spider_net_descr_chain *chain)
+{
+ struct spider_net_descr *descr;
+
+ descr = chain->ring;
+ do {
+ descr->bus_addr = 0;
+ descr->hwdescr->next_descr_addr = 0;
+ descr = descr->next;
+ } while (descr != chain->ring);
+
+ dma_free_coherent(&card->pdev->dev, chain->num_desc,
+ chain->hwring, chain->dma_addr);
+}
+
+/**
+ * spider_net_init_chain - alloc and link descriptor chain
+ * @card: card structure
+ * @chain: address of chain
+ *
+ * We manage a circular list that mirrors the hardware structure,
+ * except that the hardware uses bus addresses.
+ *
+ * Returns 0 on success, <0 on failure
+ */
+static int
+spider_net_init_chain(struct spider_net_card *card,
+ struct spider_net_descr_chain *chain)
+{
+ int i;
+ struct spider_net_descr *descr;
+ struct spider_net_hw_descr *hwdescr;
+ dma_addr_t buf;
+ size_t alloc_size;
+
+ alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
+
+ chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
+ &chain->dma_addr, GFP_KERNEL);
+
+ if (!chain->hwring)
+ return -ENOMEM;
+
+ memset(chain->ring, 0, chain->num_desc * sizeof(struct spider_net_descr));
+
+ /* Set up the hardware pointers in each descriptor */
+ descr = chain->ring;
+ hwdescr = chain->hwring;
+ buf = chain->dma_addr;
+ for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) {
+ hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
+ hwdescr->next_descr_addr = 0;
+
+ descr->hwdescr = hwdescr;
+ descr->bus_addr = buf;
+ descr->next = descr + 1;
+ descr->prev = descr - 1;
+
+ buf += sizeof(struct spider_net_hw_descr);
+ }
+ /* do actual circular list */
+ (descr-1)->next = chain->ring;
+ chain->ring->prev = descr-1;
+
+ spin_lock_init(&chain->lock);
+ chain->head = chain->ring;
+ chain->tail = chain->ring;
+ return 0;
+}
+
+/**
+ * spider_net_free_rx_chain_contents - frees descr contents in rx chain
+ * @card: card structure
+ *
+ * returns 0 on success, <0 on failure
+ */
+static void
+spider_net_free_rx_chain_contents(struct spider_net_card *card)
+{
+ struct spider_net_descr *descr;
+
+ descr = card->rx_chain.head;
+ do {
+ if (descr->skb) {
+ pci_unmap_single(card->pdev, descr->hwdescr->buf_addr,
+ SPIDER_NET_MAX_FRAME,
+ PCI_DMA_BIDIRECTIONAL);
+ dev_kfree_skb(descr->skb);
+ descr->skb = NULL;
+ }
+ descr = descr->next;
+ } while (descr != card->rx_chain.head);
+}
+
+/**
+ * spider_net_prepare_rx_descr - Reinitialize RX descriptor
+ * @card: card structure
+ * @descr: descriptor to re-init
+ *
+ * Return 0 on success, <0 on failure.
+ *
+ * Allocates a new rx skb, iommu-maps it and attaches it to the
+ * descriptor. Mark the descriptor as activated, ready-to-use.
+ */
+static int
+spider_net_prepare_rx_descr(struct spider_net_card *card,
+ struct spider_net_descr *descr)
+{
+ struct spider_net_hw_descr *hwdescr = descr->hwdescr;
+ dma_addr_t buf;
+ int offset;
+ int bufsize;
+
+ /* we need to round up the buffer size to a multiple of 128 */
+ bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
+ (~(SPIDER_NET_RXBUF_ALIGN - 1));
+
+ /* and we need to have it 128 byte aligned, therefore we allocate a
+ * bit more */
+ /* allocate an skb */
+ descr->skb = netdev_alloc_skb(card->netdev,
+ bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
+ if (!descr->skb) {
+ if (netif_msg_rx_err(card) && net_ratelimit())
+ dev_err(&card->netdev->dev,
+ "Not enough memory to allocate rx buffer\n");
+ card->spider_stats.alloc_rx_skb_error++;
+ return -ENOMEM;
+ }
+ hwdescr->buf_size = bufsize;
+ hwdescr->result_size = 0;
+ hwdescr->valid_size = 0;
+ hwdescr->data_status = 0;
+ hwdescr->data_error = 0;
+
+ offset = ((unsigned long)descr->skb->data) &
+ (SPIDER_NET_RXBUF_ALIGN - 1);
+ if (offset)
+ skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
+ /* iommu-map the skb */
+ buf = pci_map_single(card->pdev, descr->skb->data,
+ SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(card->pdev, buf)) {
+ dev_kfree_skb_any(descr->skb);
+ descr->skb = NULL;
+ if (netif_msg_rx_err(card) && net_ratelimit())
+ dev_err(&card->netdev->dev, "Could not iommu-map rx buffer\n");
+ card->spider_stats.rx_iommu_map_error++;
+ hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
+ } else {
+ hwdescr->buf_addr = buf;
+ wmb();
+ hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
+ SPIDER_NET_DMAC_NOINTR_COMPLETE;
+ }
+
+ return 0;
+}
+
+/**
+ * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
+ * @card: card structure
+ *
+ * spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the
+ * chip by writing to the appropriate register. DMA is enabled in
+ * spider_net_enable_rxdmac.
+ */
+static inline void
+spider_net_enable_rxchtails(struct spider_net_card *card)
+{
+ /* assume chain is aligned correctly */
+ spider_net_write_reg(card, SPIDER_NET_GDADCHA ,
+ card->rx_chain.tail->bus_addr);
+}
+
+/**
+ * spider_net_enable_rxdmac - enables a receive DMA controller
+ * @card: card structure
+ *
+ * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
+ * in the GDADMACCNTR register
+ */
+static inline void
+spider_net_enable_rxdmac(struct spider_net_card *card)
+{
+ wmb();
+ spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
+ SPIDER_NET_DMA_RX_VALUE);
+}
+
+/**
+ * spider_net_disable_rxdmac - disables the receive DMA controller
+ * @card: card structure
+ *
+ * spider_net_disable_rxdmac terminates processing on the DMA controller
+ * by turing off the DMA controller, with the force-end flag set.
+ */
+static inline void
+spider_net_disable_rxdmac(struct spider_net_card *card)
+{
+ spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
+ SPIDER_NET_DMA_RX_FEND_VALUE);
+}
+
+/**
+ * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
+ * @card: card structure
+ *
+ * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
+ */
+static void
+spider_net_refill_rx_chain(struct spider_net_card *card)
+{
+ struct spider_net_descr_chain *chain = &card->rx_chain;
+ unsigned long flags;
+
+ /* one context doing the refill (and a second context seeing that
+ * and omitting it) is ok. If called by NAPI, we'll be called again
+ * as spider_net_decode_one_descr is called several times. If some
+ * interrupt calls us, the NAPI is about to clean up anyway. */
+ if (!spin_trylock_irqsave(&chain->lock, flags))
+ return;
+
+ while (spider_net_get_descr_status(chain->head->hwdescr) ==
+ SPIDER_NET_DESCR_NOT_IN_USE) {
+ if (spider_net_prepare_rx_descr(card, chain->head))
+ break;
+ chain->head = chain->head->next;
+ }
+
+ spin_unlock_irqrestore(&chain->lock, flags);
+}
+
+/**
+ * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains
+ * @card: card structure
+ *
+ * Returns 0 on success, <0 on failure.
+ */
+static int
+spider_net_alloc_rx_skbs(struct spider_net_card *card)
+{
+ struct spider_net_descr_chain *chain = &card->rx_chain;
+ struct spider_net_descr *start = chain->tail;
+ struct spider_net_descr *descr = start;
+
+ /* Link up the hardware chain pointers */
+ do {
+ descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
+ descr = descr->next;
+ } while (descr != start);
+
+ /* Put at least one buffer into the chain. if this fails,
+ * we've got a problem. If not, spider_net_refill_rx_chain
+ * will do the rest at the end of this function. */
+ if (spider_net_prepare_rx_descr(card, chain->head))
+ goto error;
+ else
+ chain->head = chain->head->next;
+
+ /* This will allocate the rest of the rx buffers;
+ * if not, it's business as usual later on. */
+ spider_net_refill_rx_chain(card);
+ spider_net_enable_rxdmac(card);
+ return 0;
+
+error:
+ spider_net_free_rx_chain_contents(card);
+ return -ENOMEM;
+}
+
+/**
+ * spider_net_get_multicast_hash - generates hash for multicast filter table
+ * @addr: multicast address
+ *
+ * returns the hash value.
+ *
+ * spider_net_get_multicast_hash calculates a hash value for a given multicast
+ * address, that is used to set the multicast filter tables
+ */
+static u8
+spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
+{
+ u32 crc;
+ u8 hash;
+ char addr_for_crc[ETH_ALEN] = { 0, };
+ int i, bit;
+
+ for (i = 0; i < ETH_ALEN * 8; i++) {
+ bit = (addr[i / 8] >> (i % 8)) & 1;
+ addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
+ }
+
+ crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
+
+ hash = (crc >> 27);
+ hash <<= 3;
+ hash |= crc & 7;
+ hash &= 0xff;
+
+ return hash;
+}
+
+/**
+ * spider_net_set_multi - sets multicast addresses and promisc flags
+ * @netdev: interface device structure
+ *
+ * spider_net_set_multi configures multicast addresses as needed for the
+ * netdev interface. It also sets up multicast, allmulti and promisc
+ * flags appropriately
+ */
+static void
+spider_net_set_multi(struct net_device *netdev)
+{
+ struct netdev_hw_addr *ha;
+ u8 hash;
+ int i;
+ u32 reg;
+ struct spider_net_card *card = netdev_priv(netdev);
+ unsigned long bitmask[SPIDER_NET_MULTICAST_HASHES / BITS_PER_LONG] =
+ {0, };
+
+ spider_net_set_promisc(card);
+
+ if (netdev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < SPIDER_NET_MULTICAST_HASHES; i++) {
+ set_bit(i, bitmask);
+ }
+ goto write_hash;
+ }
+
+ /* well, we know, what the broadcast hash value is: it's xfd
+ hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
+ set_bit(0xfd, bitmask);
+
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash = spider_net_get_multicast_hash(netdev, ha->addr);
+ set_bit(hash, bitmask);
+ }
+
+write_hash:
+ for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) {
+ reg = 0;
+ if (test_bit(i * 4, bitmask))
+ reg += 0x08;
+ reg <<= 8;
+ if (test_bit(i * 4 + 1, bitmask))
+ reg += 0x08;
+ reg <<= 8;
+ if (test_bit(i * 4 + 2, bitmask))
+ reg += 0x08;
+ reg <<= 8;
+ if (test_bit(i * 4 + 3, bitmask))
+ reg += 0x08;
+
+ spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg);
+ }
+}
+
+/**
+ * spider_net_prepare_tx_descr - fill tx descriptor with skb data
+ * @card: card structure
+ * @skb: packet to use
+ *
+ * returns 0 on success, <0 on failure.
+ *
+ * fills out the descriptor structure with skb data and len. Copies data,
+ * if needed (32bit DMA!)
+ */
+static int
+spider_net_prepare_tx_descr(struct spider_net_card *card,
+ struct sk_buff *skb)
+{
+ struct spider_net_descr_chain *chain = &card->tx_chain;
+ struct spider_net_descr *descr;
+ struct spider_net_hw_descr *hwdescr;
+ dma_addr_t buf;
+ unsigned long flags;
+
+ buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(card->pdev, buf)) {
+ if (netif_msg_tx_err(card) && net_ratelimit())
+ dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
+ "Dropping packet\n", skb->data, skb->len);
+ card->spider_stats.tx_iommu_map_error++;
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&chain->lock, flags);
+ descr = card->tx_chain.head;
+ if (descr->next == chain->tail->prev) {
+ spin_unlock_irqrestore(&chain->lock, flags);
+ pci_unmap_single(card->pdev, buf, skb->len, PCI_DMA_TODEVICE);
+ return -ENOMEM;
+ }
+ hwdescr = descr->hwdescr;
+ chain->head = descr->next;
+
+ descr->skb = skb;
+ hwdescr->buf_addr = buf;
+ hwdescr->buf_size = skb->len;
+ hwdescr->next_descr_addr = 0;
+ hwdescr->data_status = 0;
+
+ hwdescr->dmac_cmd_status =
+ SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_TXFRMTL;
+ spin_unlock_irqrestore(&chain->lock, flags);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_TCP:
+ hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
+ break;
+ case IPPROTO_UDP:
+ hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
+ break;
+ }
+
+ /* Chain the bus address, so that the DMA engine finds this descr. */
+ wmb();
+ descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
+
+ card->netdev->trans_start = jiffies; /* set netdev watchdog timer */
+ return 0;
+}
+
+static int
+spider_net_set_low_watermark(struct spider_net_card *card)
+{
+ struct spider_net_descr *descr = card->tx_chain.tail;
+ struct spider_net_hw_descr *hwdescr;
+ unsigned long flags;
+ int status;
+ int cnt=0;
+ int i;
+
+ /* Measure the length of the queue. Measurement does not
+ * need to be precise -- does not need a lock. */
+ while (descr != card->tx_chain.head) {
+ status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
+ if (status == SPIDER_NET_DESCR_NOT_IN_USE)
+ break;
+ descr = descr->next;
+ cnt++;
+ }
+
+ /* If TX queue is short, don't even bother with interrupts */
+ if (cnt < card->tx_chain.num_desc/4)
+ return cnt;
+
+ /* Set low-watermark 3/4th's of the way into the queue. */
+ descr = card->tx_chain.tail;
+ cnt = (cnt*3)/4;
+ for (i=0;i<cnt; i++)
+ descr = descr->next;
+
+ /* Set the new watermark, clear the old watermark */
+ spin_lock_irqsave(&card->tx_chain.lock, flags);
+ descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
+ if (card->low_watermark && card->low_watermark != descr) {
+ hwdescr = card->low_watermark->hwdescr;
+ hwdescr->dmac_cmd_status =
+ hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
+ }
+ card->low_watermark = descr;
+ spin_unlock_irqrestore(&card->tx_chain.lock, flags);
+ return cnt;
+}
+
+/**
+ * spider_net_release_tx_chain - processes sent tx descriptors
+ * @card: adapter structure
+ * @brutal: if set, don't care about whether descriptor seems to be in use
+ *
+ * returns 0 if the tx ring is empty, otherwise 1.
+ *
+ * spider_net_release_tx_chain releases the tx descriptors that spider has
+ * finished with (if non-brutal) or simply release tx descriptors (if brutal).
+ * If some other context is calling this function, we return 1 so that we're
+ * scheduled again (if we were scheduled) and will not lose initiative.
+ */
+static int
+spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
+{
+ struct net_device *dev = card->netdev;
+ struct spider_net_descr_chain *chain = &card->tx_chain;
+ struct spider_net_descr *descr;
+ struct spider_net_hw_descr *hwdescr;
+ struct sk_buff *skb;
+ u32 buf_addr;
+ unsigned long flags;
+ int status;
+
+ while (1) {
+ spin_lock_irqsave(&chain->lock, flags);
+ if (chain->tail == chain->head) {
+ spin_unlock_irqrestore(&chain->lock, flags);
+ return 0;
+ }
+ descr = chain->tail;
+ hwdescr = descr->hwdescr;
+
+ status = spider_net_get_descr_status(hwdescr);
+ switch (status) {
+ case SPIDER_NET_DESCR_COMPLETE:
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += descr->skb->len;
+ break;
+
+ case SPIDER_NET_DESCR_CARDOWNED:
+ if (!brutal) {
+ spin_unlock_irqrestore(&chain->lock, flags);
+ return 1;
+ }
+
+ /* fallthrough, if we release the descriptors
+ * brutally (then we don't care about
+ * SPIDER_NET_DESCR_CARDOWNED) */
+
+ case SPIDER_NET_DESCR_RESPONSE_ERROR:
+ case SPIDER_NET_DESCR_PROTECTION_ERROR:
+ case SPIDER_NET_DESCR_FORCE_END:
+ if (netif_msg_tx_err(card))
+ dev_err(&card->netdev->dev, "forcing end of tx descriptor "
+ "with status x%02x\n", status);
+ dev->stats.tx_errors++;
+ break;
+
+ default:
+ dev->stats.tx_dropped++;
+ if (!brutal) {
+ spin_unlock_irqrestore(&chain->lock, flags);
+ return 1;
+ }
+ }
+
+ chain->tail = descr->next;
+ hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
+ skb = descr->skb;
+ descr->skb = NULL;
+ buf_addr = hwdescr->buf_addr;
+ spin_unlock_irqrestore(&chain->lock, flags);
+
+ /* unmap the skb */
+ if (skb) {
+ pci_unmap_single(card->pdev, buf_addr, skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ }
+ }
+ return 0;
+}
+
+/**
+ * spider_net_kick_tx_dma - enables TX DMA processing
+ * @card: card structure
+ *
+ * This routine will start the transmit DMA running if
+ * it is not already running. This routine ned only be
+ * called when queueing a new packet to an empty tx queue.
+ * Writes the current tx chain head as start address
+ * of the tx descriptor chain and enables the transmission
+ * DMA engine.
+ */
+static inline void
+spider_net_kick_tx_dma(struct spider_net_card *card)
+{
+ struct spider_net_descr *descr;
+
+ if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
+ SPIDER_NET_TX_DMA_EN)
+ goto out;
+
+ descr = card->tx_chain.tail;
+ for (;;) {
+ if (spider_net_get_descr_status(descr->hwdescr) ==
+ SPIDER_NET_DESCR_CARDOWNED) {
+ spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
+ descr->bus_addr);
+ spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
+ SPIDER_NET_DMA_TX_VALUE);
+ break;
+ }
+ if (descr == card->tx_chain.head)
+ break;
+ descr = descr->next;
+ }
+
+out:
+ mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
+}
+
+/**
+ * spider_net_xmit - transmits a frame over the device
+ * @skb: packet to send out
+ * @netdev: interface device structure
+ *
+ * returns 0 on success, !0 on failure
+ */
+static int
+spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ int cnt;
+ struct spider_net_card *card = netdev_priv(netdev);
+
+ spider_net_release_tx_chain(card, 0);
+
+ if (spider_net_prepare_tx_descr(card, skb) != 0) {
+ netdev->stats.tx_dropped++;
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ cnt = spider_net_set_low_watermark(card);
+ if (cnt < 5)
+ spider_net_kick_tx_dma(card);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * spider_net_cleanup_tx_ring - cleans up the TX ring
+ * @card: card structure
+ *
+ * spider_net_cleanup_tx_ring is called by either the tx_timer
+ * or from the NAPI polling routine.
+ * This routine releases resources associted with transmitted
+ * packets, including updating the queue tail pointer.
+ */
+static void
+spider_net_cleanup_tx_ring(struct spider_net_card *card)
+{
+ if ((spider_net_release_tx_chain(card, 0) != 0) &&
+ (card->netdev->flags & IFF_UP)) {
+ spider_net_kick_tx_dma(card);
+ netif_wake_queue(card->netdev);
+ }
+}
+
+/**
+ * spider_net_do_ioctl - called for device ioctls
+ * @netdev: interface device structure
+ * @ifr: request parameter structure for ioctl
+ * @cmd: command code for ioctl
+ *
+ * returns 0 on success, <0 on failure. Currently, we have no special ioctls.
+ * -EOPNOTSUPP is returned, if an unknown ioctl was requested
+ */
+static int
+spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
+ * @descr: descriptor to process
+ * @card: card structure
+ *
+ * Fills out skb structure and passes the data to the stack.
+ * The descriptor state is not changed.
+ */
+static void
+spider_net_pass_skb_up(struct spider_net_descr *descr,
+ struct spider_net_card *card)
+{
+ struct spider_net_hw_descr *hwdescr = descr->hwdescr;
+ struct sk_buff *skb = descr->skb;
+ struct net_device *netdev = card->netdev;
+ u32 data_status = hwdescr->data_status;
+ u32 data_error = hwdescr->data_error;
+
+ skb_put(skb, hwdescr->valid_size);
+
+ /* the card seems to add 2 bytes of junk in front
+ * of the ethernet frame */
+#define SPIDER_MISALIGN 2
+ skb_pull(skb, SPIDER_MISALIGN);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ /* checksum offload */
+ skb_checksum_none_assert(skb);
+ if (netdev->features & NETIF_F_RXCSUM) {
+ if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
+ SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
+ !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+
+ if (data_status & SPIDER_NET_VLAN_PACKET) {
+ /* further enhancements: HW-accel VLAN */
+ }
+
+ /* update netdevice statistics */
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += skb->len;
+
+ /* pass skb up to stack */
+ netif_receive_skb(skb);
+}
+
+static void show_rx_chain(struct spider_net_card *card)
+{
+ struct spider_net_descr_chain *chain = &card->rx_chain;
+ struct spider_net_descr *start= chain->tail;
+ struct spider_net_descr *descr= start;
+ struct spider_net_hw_descr *hwd = start->hwdescr;
+ struct device *dev = &card->netdev->dev;
+ u32 curr_desc, next_desc;
+ int status;
+
+ int tot = 0;
+ int cnt = 0;
+ int off = start - chain->ring;
+ int cstat = hwd->dmac_cmd_status;
+
+ dev_info(dev, "Total number of descrs=%d\n",
+ chain->num_desc);
+ dev_info(dev, "Chain tail located at descr=%d, status=0x%x\n",
+ off, cstat);
+
+ curr_desc = spider_net_read_reg(card, SPIDER_NET_GDACTDPA);
+ next_desc = spider_net_read_reg(card, SPIDER_NET_GDACNEXTDA);
+
+ status = cstat;
+ do
+ {
+ hwd = descr->hwdescr;
+ off = descr - chain->ring;
+ status = hwd->dmac_cmd_status;
+
+ if (descr == chain->head)
+ dev_info(dev, "Chain head is at %d, head status=0x%x\n",
+ off, status);
+
+ if (curr_desc == descr->bus_addr)
+ dev_info(dev, "HW curr desc (GDACTDPA) is at %d, status=0x%x\n",
+ off, status);
+
+ if (next_desc == descr->bus_addr)
+ dev_info(dev, "HW next desc (GDACNEXTDA) is at %d, status=0x%x\n",
+ off, status);
+
+ if (hwd->next_descr_addr == 0)
+ dev_info(dev, "chain is cut at %d\n", off);
+
+ if (cstat != status) {
+ int from = (chain->num_desc + off - cnt) % chain->num_desc;
+ int to = (chain->num_desc + off - 1) % chain->num_desc;
+ dev_info(dev, "Have %d (from %d to %d) descrs "
+ "with stat=0x%08x\n", cnt, from, to, cstat);
+ cstat = status;
+ cnt = 0;
+ }
+
+ cnt ++;
+ tot ++;
+ descr = descr->next;
+ } while (descr != start);
+
+ dev_info(dev, "Last %d descrs with stat=0x%08x "
+ "for a total of %d descrs\n", cnt, cstat, tot);
+
+#ifdef DEBUG
+ /* Now dump the whole ring */
+ descr = start;
+ do
+ {
+ struct spider_net_hw_descr *hwd = descr->hwdescr;
+ status = spider_net_get_descr_status(hwd);
+ cnt = descr - chain->ring;
+ dev_info(dev, "Descr %d stat=0x%08x skb=%p\n",
+ cnt, status, descr->skb);
+ dev_info(dev, "bus addr=%08x buf addr=%08x sz=%d\n",
+ descr->bus_addr, hwd->buf_addr, hwd->buf_size);
+ dev_info(dev, "next=%08x result sz=%d valid sz=%d\n",
+ hwd->next_descr_addr, hwd->result_size,
+ hwd->valid_size);
+ dev_info(dev, "dmac=%08x data stat=%08x data err=%08x\n",
+ hwd->dmac_cmd_status, hwd->data_status,
+ hwd->data_error);
+ dev_info(dev, "\n");
+
+ descr = descr->next;
+ } while (descr != start);
+#endif
+
+}
+
+/**
+ * spider_net_resync_head_ptr - Advance head ptr past empty descrs
+ *
+ * If the driver fails to keep up and empty the queue, then the
+ * hardware wil run out of room to put incoming packets. This
+ * will cause the hardware to skip descrs that are full (instead
+ * of halting/retrying). Thus, once the driver runs, it wil need
+ * to "catch up" to where the hardware chain pointer is at.
+ */
+static void spider_net_resync_head_ptr(struct spider_net_card *card)
+{
+ unsigned long flags;
+ struct spider_net_descr_chain *chain = &card->rx_chain;
+ struct spider_net_descr *descr;
+ int i, status;
+
+ /* Advance head pointer past any empty descrs */
+ descr = chain->head;
+ status = spider_net_get_descr_status(descr->hwdescr);
+
+ if (status == SPIDER_NET_DESCR_NOT_IN_USE)
+ return;
+
+ spin_lock_irqsave(&chain->lock, flags);
+
+ descr = chain->head;
+ status = spider_net_get_descr_status(descr->hwdescr);
+ for (i=0; i<chain->num_desc; i++) {
+ if (status != SPIDER_NET_DESCR_CARDOWNED) break;
+ descr = descr->next;
+ status = spider_net_get_descr_status(descr->hwdescr);
+ }
+ chain->head = descr;
+
+ spin_unlock_irqrestore(&chain->lock, flags);
+}
+
+static int spider_net_resync_tail_ptr(struct spider_net_card *card)
+{
+ struct spider_net_descr_chain *chain = &card->rx_chain;
+ struct spider_net_descr *descr;
+ int i, status;
+
+ /* Advance tail pointer past any empty and reaped descrs */
+ descr = chain->tail;
+ status = spider_net_get_descr_status(descr->hwdescr);
+
+ for (i=0; i<chain->num_desc; i++) {
+ if ((status != SPIDER_NET_DESCR_CARDOWNED) &&
+ (status != SPIDER_NET_DESCR_NOT_IN_USE)) break;
+ descr = descr->next;
+ status = spider_net_get_descr_status(descr->hwdescr);
+ }
+ chain->tail = descr;
+
+ if ((i == chain->num_desc) || (i == 0))
+ return 1;
+ return 0;
+}
+
+/**
+ * spider_net_decode_one_descr - processes an RX descriptor
+ * @card: card structure
+ *
+ * Returns 1 if a packet has been sent to the stack, otherwise 0.
+ *
+ * Processes an RX descriptor by iommu-unmapping the data buffer
+ * and passing the packet up to the stack. This function is called
+ * in softirq context, e.g. either bottom half from interrupt or
+ * NAPI polling context.
+ */
+static int
+spider_net_decode_one_descr(struct spider_net_card *card)
+{
+ struct net_device *dev = card->netdev;
+ struct spider_net_descr_chain *chain = &card->rx_chain;
+ struct spider_net_descr *descr = chain->tail;
+ struct spider_net_hw_descr *hwdescr = descr->hwdescr;
+ u32 hw_buf_addr;
+ int status;
+
+ status = spider_net_get_descr_status(hwdescr);
+
+ /* Nothing in the descriptor, or ring must be empty */
+ if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
+ (status == SPIDER_NET_DESCR_NOT_IN_USE))
+ return 0;
+
+ /* descriptor definitively used -- move on tail */
+ chain->tail = descr->next;
+
+ /* unmap descriptor */
+ hw_buf_addr = hwdescr->buf_addr;
+ hwdescr->buf_addr = 0xffffffff;
+ pci_unmap_single(card->pdev, hw_buf_addr,
+ SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
+
+ if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
+ (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
+ (status == SPIDER_NET_DESCR_FORCE_END) ) {
+ if (netif_msg_rx_err(card))
+ dev_err(&dev->dev,
+ "dropping RX descriptor with state %d\n", status);
+ dev->stats.rx_dropped++;
+ goto bad_desc;
+ }
+
+ if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
+ (status != SPIDER_NET_DESCR_FRAME_END) ) {
+ if (netif_msg_rx_err(card))
+ dev_err(&card->netdev->dev,
+ "RX descriptor with unknown state %d\n", status);
+ card->spider_stats.rx_desc_unk_state++;
+ goto bad_desc;
+ }
+
+ /* The cases we'll throw away the packet immediately */
+ if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
+ if (netif_msg_rx_err(card))
+ dev_err(&card->netdev->dev,
+ "error in received descriptor found, "
+ "data_status=x%08x, data_error=x%08x\n",
+ hwdescr->data_status, hwdescr->data_error);
+ goto bad_desc;
+ }
+
+ if (hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_BAD_STATUS) {
+ dev_err(&card->netdev->dev, "bad status, cmd_status=x%08x\n",
+ hwdescr->dmac_cmd_status);
+ pr_err("buf_addr=x%08x\n", hw_buf_addr);
+ pr_err("buf_size=x%08x\n", hwdescr->buf_size);
+ pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
+ pr_err("result_size=x%08x\n", hwdescr->result_size);
+ pr_err("valid_size=x%08x\n", hwdescr->valid_size);
+ pr_err("data_status=x%08x\n", hwdescr->data_status);
+ pr_err("data_error=x%08x\n", hwdescr->data_error);
+ pr_err("which=%ld\n", descr - card->rx_chain.ring);
+
+ card->spider_stats.rx_desc_error++;
+ goto bad_desc;
+ }
+
+ /* Ok, we've got a packet in descr */
+ spider_net_pass_skb_up(descr, card);
+ descr->skb = NULL;
+ hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
+ return 1;
+
+bad_desc:
+ if (netif_msg_rx_err(card))
+ show_rx_chain(card);
+ dev_kfree_skb_irq(descr->skb);
+ descr->skb = NULL;
+ hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
+ return 0;
+}
+
+/**
+ * spider_net_poll - NAPI poll function called by the stack to return packets
+ * @netdev: interface device structure
+ * @budget: number of packets we can pass to the stack at most
+ *
+ * returns 0 if no more packets available to the driver/stack. Returns 1,
+ * if the quota is exceeded, but the driver has still packets.
+ *
+ * spider_net_poll returns all packets from the rx descriptors to the stack
+ * (using netif_receive_skb). If all/enough packets are up, the driver
+ * reenables interrupts and returns 0. If not, 1 is returned.
+ */
+static int spider_net_poll(struct napi_struct *napi, int budget)
+{
+ struct spider_net_card *card = container_of(napi, struct spider_net_card, napi);
+ int packets_done = 0;
+
+ while (packets_done < budget) {
+ if (!spider_net_decode_one_descr(card))
+ break;
+
+ packets_done++;
+ }
+
+ if ((packets_done == 0) && (card->num_rx_ints != 0)) {
+ if (!spider_net_resync_tail_ptr(card))
+ packets_done = budget;
+ spider_net_resync_head_ptr(card);
+ }
+ card->num_rx_ints = 0;
+
+ spider_net_refill_rx_chain(card);
+ spider_net_enable_rxdmac(card);
+
+ spider_net_cleanup_tx_ring(card);
+
+ /* if all packets are in the stack, enable interrupts and return 0 */
+ /* if not, return 1 */
+ if (packets_done < budget) {
+ napi_complete(napi);
+ spider_net_rx_irq_on(card);
+ card->ignore_rx_ramfull = 0;
+ }
+
+ return packets_done;
+}
+
+/**
+ * spider_net_change_mtu - changes the MTU of an interface
+ * @netdev: interface device structure
+ * @new_mtu: new MTU value
+ *
+ * returns 0 on success, <0 on failure
+ */
+static int
+spider_net_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ /* no need to re-alloc skbs or so -- the max mtu is about 2.3k
+ * and mtu is outbound only anyway */
+ if ( (new_mtu < SPIDER_NET_MIN_MTU ) ||
+ (new_mtu > SPIDER_NET_MAX_MTU) )
+ return -EINVAL;
+ netdev->mtu = new_mtu;
+ return 0;
+}
+
+/**
+ * spider_net_set_mac - sets the MAC of an interface
+ * @netdev: interface device structure
+ * @ptr: pointer to new MAC address
+ *
+ * Returns 0 on success, <0 on failure. Currently, we don't support this
+ * and will always return EOPNOTSUPP.
+ */
+static int
+spider_net_set_mac(struct net_device *netdev, void *p)
+{
+ struct spider_net_card *card = netdev_priv(netdev);
+ u32 macl, macu, regvalue;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ /* switch off GMACTPE and GMACRPE */
+ regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
+ regvalue &= ~((1 << 5) | (1 << 6));
+ spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
+
+ /* write mac */
+ macu = (addr->sa_data[0]<<24) + (addr->sa_data[1]<<16) +
+ (addr->sa_data[2]<<8) + (addr->sa_data[3]);
+ macl = (addr->sa_data[4]<<8) + (addr->sa_data[5]);
+ spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu);
+ spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl);
+
+ /* switch GMACTPE and GMACRPE back on */
+ regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
+ regvalue |= ((1 << 5) | (1 << 6));
+ spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
+
+ spider_net_set_promisc(card);
+
+ /* look up, whether we have been successful */
+ if (spider_net_get_mac_address(netdev))
+ return -EADDRNOTAVAIL;
+ if (memcmp(netdev->dev_addr,addr->sa_data,netdev->addr_len))
+ return -EADDRNOTAVAIL;
+
+ return 0;
+}
+
+/**
+ * spider_net_link_reset
+ * @netdev: net device structure
+ *
+ * This is called when the PHY_LINK signal is asserted. For the blade this is
+ * not connected so we should never get here.
+ *
+ */
+static void
+spider_net_link_reset(struct net_device *netdev)
+{
+
+ struct spider_net_card *card = netdev_priv(netdev);
+
+ del_timer_sync(&card->aneg_timer);
+
+ /* clear interrupt, block further interrupts */
+ spider_net_write_reg(card, SPIDER_NET_GMACST,
+ spider_net_read_reg(card, SPIDER_NET_GMACST));
+ spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
+
+ /* reset phy and setup aneg */
+ card->aneg_count = 0;
+ card->medium = BCM54XX_COPPER;
+ spider_net_setup_aneg(card);
+ mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
+
+}
+
+/**
+ * spider_net_handle_error_irq - handles errors raised by an interrupt
+ * @card: card structure
+ * @status_reg: interrupt status register 0 (GHIINT0STS)
+ *
+ * spider_net_handle_error_irq treats or ignores all error conditions
+ * found when an interrupt is presented
+ */
+static void
+spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
+ u32 error_reg1, u32 error_reg2)
+{
+ u32 i;
+ int show_error = 1;
+
+ /* check GHIINT0STS ************************************/
+ if (status_reg)
+ for (i = 0; i < 32; i++)
+ if (status_reg & (1<<i))
+ switch (i)
+ {
+ /* let error_reg1 and error_reg2 evaluation decide, what to do
+ case SPIDER_NET_PHYINT:
+ case SPIDER_NET_GMAC2INT:
+ case SPIDER_NET_GMAC1INT:
+ case SPIDER_NET_GFIFOINT:
+ case SPIDER_NET_DMACINT:
+ case SPIDER_NET_GSYSINT:
+ break; */
+
+ case SPIDER_NET_GIPSINT:
+ show_error = 0;
+ break;
+
+ case SPIDER_NET_GPWOPCMPINT:
+ /* PHY write operation completed */
+ show_error = 0;
+ break;
+ case SPIDER_NET_GPROPCMPINT:
+ /* PHY read operation completed */
+ /* we don't use semaphores, as we poll for the completion
+ * of the read operation in spider_net_read_phy. Should take
+ * about 50 us */
+ show_error = 0;
+ break;
+ case SPIDER_NET_GPWFFINT:
+ /* PHY command queue full */
+ if (netif_msg_intr(card))
+ dev_err(&card->netdev->dev, "PHY write queue full\n");
+ show_error = 0;
+ break;
+
+ /* case SPIDER_NET_GRMDADRINT: not used. print a message */
+ /* case SPIDER_NET_GRMARPINT: not used. print a message */
+ /* case SPIDER_NET_GRMMPINT: not used. print a message */
+
+ case SPIDER_NET_GDTDEN0INT:
+ /* someone has set TX_DMA_EN to 0 */
+ show_error = 0;
+ break;
+
+ case SPIDER_NET_GDDDEN0INT: /* fallthrough */
+ case SPIDER_NET_GDCDEN0INT: /* fallthrough */
+ case SPIDER_NET_GDBDEN0INT: /* fallthrough */
+ case SPIDER_NET_GDADEN0INT:
+ /* someone has set RX_DMA_EN to 0 */
+ show_error = 0;
+ break;
+
+ /* RX interrupts */
+ case SPIDER_NET_GDDFDCINT:
+ case SPIDER_NET_GDCFDCINT:
+ case SPIDER_NET_GDBFDCINT:
+ case SPIDER_NET_GDAFDCINT:
+ /* case SPIDER_NET_GDNMINT: not used. print a message */
+ /* case SPIDER_NET_GCNMINT: not used. print a message */
+ /* case SPIDER_NET_GBNMINT: not used. print a message */
+ /* case SPIDER_NET_GANMINT: not used. print a message */
+ /* case SPIDER_NET_GRFNMINT: not used. print a message */
+ show_error = 0;
+ break;
+
+ /* TX interrupts */
+ case SPIDER_NET_GDTFDCINT:
+ show_error = 0;
+ break;
+ case SPIDER_NET_GTTEDINT:
+ show_error = 0;
+ break;
+ case SPIDER_NET_GDTDCEINT:
+ /* chain end. If a descriptor should be sent, kick off
+ * tx dma
+ if (card->tx_chain.tail != card->tx_chain.head)
+ spider_net_kick_tx_dma(card);
+ */
+ show_error = 0;
+ break;
+
+ /* case SPIDER_NET_G1TMCNTINT: not used. print a message */
+ /* case SPIDER_NET_GFREECNTINT: not used. print a message */
+ }
+
+ /* check GHIINT1STS ************************************/
+ if (error_reg1)
+ for (i = 0; i < 32; i++)
+ if (error_reg1 & (1<<i))
+ switch (i)
+ {
+ case SPIDER_NET_GTMFLLINT:
+ /* TX RAM full may happen on a usual case.
+ * Logging is not needed. */
+ show_error = 0;
+ break;
+ case SPIDER_NET_GRFDFLLINT: /* fallthrough */
+ case SPIDER_NET_GRFCFLLINT: /* fallthrough */
+ case SPIDER_NET_GRFBFLLINT: /* fallthrough */
+ case SPIDER_NET_GRFAFLLINT: /* fallthrough */
+ case SPIDER_NET_GRMFLLINT:
+ /* Could happen when rx chain is full */
+ if (card->ignore_rx_ramfull == 0) {
+ card->ignore_rx_ramfull = 1;
+ spider_net_resync_head_ptr(card);
+ spider_net_refill_rx_chain(card);
+ spider_net_enable_rxdmac(card);
+ card->num_rx_ints ++;
+ napi_schedule(&card->napi);
+ }
+ show_error = 0;
+ break;
+
+ /* case SPIDER_NET_GTMSHTINT: problem, print a message */
+ case SPIDER_NET_GDTINVDINT:
+ /* allrighty. tx from previous descr ok */
+ show_error = 0;
+ break;
+
+ /* chain end */
+ case SPIDER_NET_GDDDCEINT: /* fallthrough */
+ case SPIDER_NET_GDCDCEINT: /* fallthrough */
+ case SPIDER_NET_GDBDCEINT: /* fallthrough */
+ case SPIDER_NET_GDADCEINT:
+ spider_net_resync_head_ptr(card);
+ spider_net_refill_rx_chain(card);
+ spider_net_enable_rxdmac(card);
+ card->num_rx_ints ++;
+ napi_schedule(&card->napi);
+ show_error = 0;
+ break;
+
+ /* invalid descriptor */
+ case SPIDER_NET_GDDINVDINT: /* fallthrough */
+ case SPIDER_NET_GDCINVDINT: /* fallthrough */
+ case SPIDER_NET_GDBINVDINT: /* fallthrough */
+ case SPIDER_NET_GDAINVDINT:
+ /* Could happen when rx chain is full */
+ spider_net_resync_head_ptr(card);
+ spider_net_refill_rx_chain(card);
+ spider_net_enable_rxdmac(card);
+ card->num_rx_ints ++;
+ napi_schedule(&card->napi);
+ show_error = 0;
+ break;
+
+ /* case SPIDER_NET_GDTRSERINT: problem, print a message */
+ /* case SPIDER_NET_GDDRSERINT: problem, print a message */
+ /* case SPIDER_NET_GDCRSERINT: problem, print a message */
+ /* case SPIDER_NET_GDBRSERINT: problem, print a message */
+ /* case SPIDER_NET_GDARSERINT: problem, print a message */
+ /* case SPIDER_NET_GDSERINT: problem, print a message */
+ /* case SPIDER_NET_GDTPTERINT: problem, print a message */
+ /* case SPIDER_NET_GDDPTERINT: problem, print a message */
+ /* case SPIDER_NET_GDCPTERINT: problem, print a message */
+ /* case SPIDER_NET_GDBPTERINT: problem, print a message */
+ /* case SPIDER_NET_GDAPTERINT: problem, print a message */
+ default:
+ show_error = 1;
+ break;
+ }
+
+ /* check GHIINT2STS ************************************/
+ if (error_reg2)
+ for (i = 0; i < 32; i++)
+ if (error_reg2 & (1<<i))
+ switch (i)
+ {
+ /* there is nothing we can (want to) do at this time. Log a
+ * message, we can switch on and off the specific values later on
+ case SPIDER_NET_GPROPERINT:
+ case SPIDER_NET_GMCTCRSNGINT:
+ case SPIDER_NET_GMCTLCOLINT:
+ case SPIDER_NET_GMCTTMOTINT:
+ case SPIDER_NET_GMCRCAERINT:
+ case SPIDER_NET_GMCRCALERINT:
+ case SPIDER_NET_GMCRALNERINT:
+ case SPIDER_NET_GMCROVRINT:
+ case SPIDER_NET_GMCRRNTINT:
+ case SPIDER_NET_GMCRRXERINT:
+ case SPIDER_NET_GTITCSERINT:
+ case SPIDER_NET_GTIFMTERINT:
+ case SPIDER_NET_GTIPKTRVKINT:
+ case SPIDER_NET_GTISPINGINT:
+ case SPIDER_NET_GTISADNGINT:
+ case SPIDER_NET_GTISPDNGINT:
+ case SPIDER_NET_GRIFMTERINT:
+ case SPIDER_NET_GRIPKTRVKINT:
+ case SPIDER_NET_GRISPINGINT:
+ case SPIDER_NET_GRISADNGINT:
+ case SPIDER_NET_GRISPDNGINT:
+ break;
+ */
+ default:
+ break;
+ }
+
+ if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
+ dev_err(&card->netdev->dev, "Error interrupt, GHIINT0STS = 0x%08x, "
+ "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
+ status_reg, error_reg1, error_reg2);
+
+ /* clear interrupt sources */
+ spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1);
+ spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2);
+}
+
+/**
+ * spider_net_interrupt - interrupt handler for spider_net
+ * @irq: interrupt number
+ * @ptr: pointer to net_device
+ *
+ * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
+ * interrupt found raised by card.
+ *
+ * This is the interrupt handler, that turns off
+ * interrupts for this device and makes the stack poll the driver
+ */
+static irqreturn_t
+spider_net_interrupt(int irq, void *ptr)
+{
+ struct net_device *netdev = ptr;
+ struct spider_net_card *card = netdev_priv(netdev);
+ u32 status_reg, error_reg1, error_reg2;
+
+ status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS);
+ error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
+ error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
+
+ if (!(status_reg & SPIDER_NET_INT0_MASK_VALUE) &&
+ !(error_reg1 & SPIDER_NET_INT1_MASK_VALUE) &&
+ !(error_reg2 & SPIDER_NET_INT2_MASK_VALUE))
+ return IRQ_NONE;
+
+ if (status_reg & SPIDER_NET_RXINT ) {
+ spider_net_rx_irq_off(card);
+ napi_schedule(&card->napi);
+ card->num_rx_ints ++;
+ }
+ if (status_reg & SPIDER_NET_TXINT)
+ napi_schedule(&card->napi);
+
+ if (status_reg & SPIDER_NET_LINKINT)
+ spider_net_link_reset(netdev);
+
+ if (status_reg & SPIDER_NET_ERRINT )
+ spider_net_handle_error_irq(card, status_reg,
+ error_reg1, error_reg2);
+
+ /* clear interrupt sources */
+ spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * spider_net_poll_controller - artificial interrupt for netconsole etc.
+ * @netdev: interface device structure
+ *
+ * see Documentation/networking/netconsole.txt
+ */
+static void
+spider_net_poll_controller(struct net_device *netdev)
+{
+ disable_irq(netdev->irq);
+ spider_net_interrupt(netdev->irq, netdev);
+ enable_irq(netdev->irq);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+/**
+ * spider_net_enable_interrupts - enable interrupts
+ * @card: card structure
+ *
+ * spider_net_enable_interrupt enables several interrupts
+ */
+static void
+spider_net_enable_interrupts(struct spider_net_card *card)
+{
+ spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK,
+ SPIDER_NET_INT0_MASK_VALUE);
+ spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK,
+ SPIDER_NET_INT1_MASK_VALUE);
+ spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
+ SPIDER_NET_INT2_MASK_VALUE);
+}
+
+/**
+ * spider_net_disable_interrupts - disable interrupts
+ * @card: card structure
+ *
+ * spider_net_disable_interrupts disables all the interrupts
+ */
+static void
+spider_net_disable_interrupts(struct spider_net_card *card)
+{
+ spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
+ spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
+ spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
+ spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
+}
+
+/**
+ * spider_net_init_card - initializes the card
+ * @card: card structure
+ *
+ * spider_net_init_card initializes the card so that other registers can
+ * be used
+ */
+static void
+spider_net_init_card(struct spider_net_card *card)
+{
+ spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
+ SPIDER_NET_CKRCTRL_STOP_VALUE);
+
+ spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
+ SPIDER_NET_CKRCTRL_RUN_VALUE);
+
+ /* trigger ETOMOD signal */
+ spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
+ spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4);
+
+ spider_net_disable_interrupts(card);
+}
+
+/**
+ * spider_net_enable_card - enables the card by setting all kinds of regs
+ * @card: card structure
+ *
+ * spider_net_enable_card sets a lot of SMMIO registers to enable the device
+ */
+static void
+spider_net_enable_card(struct spider_net_card *card)
+{
+ int i;
+ /* the following array consists of (register),(value) pairs
+ * that are set in this function. A register of 0 ends the list */
+ u32 regs[][2] = {
+ { SPIDER_NET_GRESUMINTNUM, 0 },
+ { SPIDER_NET_GREINTNUM, 0 },
+
+ /* set interrupt frame number registers */
+ /* clear the single DMA engine registers first */
+ { SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
+ { SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
+ { SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
+ { SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
+ /* then set, what we really need */
+ { SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE },
+
+ /* timer counter registers and stuff */
+ { SPIDER_NET_GFREECNNUM, 0 },
+ { SPIDER_NET_GONETIMENUM, 0 },
+ { SPIDER_NET_GTOUTFRMNUM, 0 },
+
+ /* RX mode setting */
+ { SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE },
+ /* TX mode setting */
+ { SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE },
+ /* IPSEC mode setting */
+ { SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE },
+
+ { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
+
+ { SPIDER_NET_GMRWOLCTRL, 0 },
+ { SPIDER_NET_GTESTMD, 0x10000000 },
+ { SPIDER_NET_GTTQMSK, 0x00400040 },
+
+ { SPIDER_NET_GMACINTEN, 0 },
+
+ /* flow control stuff */
+ { SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE },
+ { SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE },
+
+ { SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE },
+ { 0, 0}
+ };
+
+ i = 0;
+ while (regs[i][0]) {
+ spider_net_write_reg(card, regs[i][0], regs[i][1]);
+ i++;
+ }
+
+ /* clear unicast filter table entries 1 to 14 */
+ for (i = 1; i <= 14; i++) {
+ spider_net_write_reg(card,
+ SPIDER_NET_GMRUAFILnR + i * 8,
+ 0x00080000);
+ spider_net_write_reg(card,
+ SPIDER_NET_GMRUAFILnR + i * 8 + 4,
+ 0x00000000);
+ }
+
+ spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000);
+
+ spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
+
+ /* set chain tail address for RX chains and
+ * enable DMA */
+ spider_net_enable_rxchtails(card);
+ spider_net_enable_rxdmac(card);
+
+ spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
+
+ spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
+ SPIDER_NET_LENLMT_VALUE);
+ spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
+ SPIDER_NET_OPMODE_VALUE);
+
+ spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
+ SPIDER_NET_GDTBSTA);
+}
+
+/**
+ * spider_net_download_firmware - loads firmware into the adapter
+ * @card: card structure
+ * @firmware_ptr: pointer to firmware data
+ *
+ * spider_net_download_firmware loads the firmware data into the
+ * adapter. It assumes the length etc. to be allright.
+ */
+static int
+spider_net_download_firmware(struct spider_net_card *card,
+ const void *firmware_ptr)
+{
+ int sequencer, i;
+ const u32 *fw_ptr = firmware_ptr;
+
+ /* stop sequencers */
+ spider_net_write_reg(card, SPIDER_NET_GSINIT,
+ SPIDER_NET_STOP_SEQ_VALUE);
+
+ for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
+ sequencer++) {
+ spider_net_write_reg(card,
+ SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
+ for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
+ spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
+ sequencer * 8, *fw_ptr);
+ fw_ptr++;
+ }
+ }
+
+ if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
+ return -EIO;
+
+ spider_net_write_reg(card, SPIDER_NET_GSINIT,
+ SPIDER_NET_RUN_SEQ_VALUE);
+
+ return 0;
+}
+
+/**
+ * spider_net_init_firmware - reads in firmware parts
+ * @card: card structure
+ *
+ * Returns 0 on success, <0 on failure
+ *
+ * spider_net_init_firmware opens the sequencer firmware and does some basic
+ * checks. This function opens and releases the firmware structure. A call
+ * to download the firmware is performed before the release.
+ *
+ * Firmware format
+ * ===============
+ * spider_fw.bin is expected to be a file containing 6*1024*4 bytes, 4k being
+ * the program for each sequencer. Use the command
+ * tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \
+ * Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt \
+ * Seq_code6_0x0B0.txt | xxd -r -p -c4 > spider_fw.bin
+ *
+ * to generate spider_fw.bin, if you have sequencer programs with something
+ * like the following contents for each sequencer:
+ * <ONE LINE COMMENT>
+ * <FIRST 4-BYTES-WORD FOR SEQUENCER>
+ * <SECOND 4-BYTES-WORD FOR SEQUENCER>
+ * ...
+ * <1024th 4-BYTES-WORD FOR SEQUENCER>
+ */
+static int
+spider_net_init_firmware(struct spider_net_card *card)
+{
+ struct firmware *firmware = NULL;
+ struct device_node *dn;
+ const u8 *fw_prop = NULL;
+ int err = -ENOENT;
+ int fw_size;
+
+ if (request_firmware((const struct firmware **)&firmware,
+ SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
+ if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
+ netif_msg_probe(card) ) {
+ dev_err(&card->netdev->dev,
+ "Incorrect size of spidernet firmware in " \
+ "filesystem. Looking in host firmware...\n");
+ goto try_host_fw;
+ }
+ err = spider_net_download_firmware(card, firmware->data);
+
+ release_firmware(firmware);
+ if (err)
+ goto try_host_fw;
+
+ goto done;
+ }
+
+try_host_fw:
+ dn = pci_device_to_OF_node(card->pdev);
+ if (!dn)
+ goto out_err;
+
+ fw_prop = of_get_property(dn, "firmware", &fw_size);
+ if (!fw_prop)
+ goto out_err;
+
+ if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
+ netif_msg_probe(card) ) {
+ dev_err(&card->netdev->dev,
+ "Incorrect size of spidernet firmware in host firmware\n");
+ goto done;
+ }
+
+ err = spider_net_download_firmware(card, fw_prop);
+
+done:
+ return err;
+out_err:
+ if (netif_msg_probe(card))
+ dev_err(&card->netdev->dev,
+ "Couldn't find spidernet firmware in filesystem " \
+ "or host firmware\n");
+ return err;
+}
+
+/**
+ * spider_net_open - called upon ifonfig up
+ * @netdev: interface device structure
+ *
+ * returns 0 on success, <0 on failure
+ *
+ * spider_net_open allocates all the descriptors and memory needed for
+ * operation, sets up multicast list and enables interrupts
+ */
+int
+spider_net_open(struct net_device *netdev)
+{
+ struct spider_net_card *card = netdev_priv(netdev);
+ int result;
+
+ result = spider_net_init_firmware(card);
+ if (result)
+ goto init_firmware_failed;
+
+ /* start probing with copper */
+ card->aneg_count = 0;
+ card->medium = BCM54XX_COPPER;
+ spider_net_setup_aneg(card);
+ if (card->phy.def->phy_id)
+ mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
+
+ result = spider_net_init_chain(card, &card->tx_chain);
+ if (result)
+ goto alloc_tx_failed;
+ card->low_watermark = NULL;
+
+ result = spider_net_init_chain(card, &card->rx_chain);
+ if (result)
+ goto alloc_rx_failed;
+
+ /* Allocate rx skbs */
+ if (spider_net_alloc_rx_skbs(card))
+ goto alloc_skbs_failed;
+
+ spider_net_set_multi(netdev);
+
+ /* further enhancement: setup hw vlan, if needed */
+
+ result = -EBUSY;
+ if (request_irq(netdev->irq, spider_net_interrupt,
+ IRQF_SHARED, netdev->name, netdev))
+ goto register_int_failed;
+
+ spider_net_enable_card(card);
+
+ netif_start_queue(netdev);
+ netif_carrier_on(netdev);
+ napi_enable(&card->napi);
+
+ spider_net_enable_interrupts(card);
+
+ return 0;
+
+register_int_failed:
+ spider_net_free_rx_chain_contents(card);
+alloc_skbs_failed:
+ spider_net_free_chain(card, &card->rx_chain);
+alloc_rx_failed:
+ spider_net_free_chain(card, &card->tx_chain);
+alloc_tx_failed:
+ del_timer_sync(&card->aneg_timer);
+init_firmware_failed:
+ return result;
+}
+
+/**
+ * spider_net_link_phy
+ * @data: used for pointer to card structure
+ *
+ */
+static void spider_net_link_phy(unsigned long data)
+{
+ struct spider_net_card *card = (struct spider_net_card *)data;
+ struct mii_phy *phy = &card->phy;
+
+ /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
+ if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) {
+
+ pr_debug("%s: link is down trying to bring it up\n",
+ card->netdev->name);
+
+ switch (card->medium) {
+ case BCM54XX_COPPER:
+ /* enable fiber with autonegotiation first */
+ if (phy->def->ops->enable_fiber)
+ phy->def->ops->enable_fiber(phy, 1);
+ card->medium = BCM54XX_FIBER;
+ break;
+
+ case BCM54XX_FIBER:
+ /* fiber didn't come up, try to disable fiber autoneg */
+ if (phy->def->ops->enable_fiber)
+ phy->def->ops->enable_fiber(phy, 0);
+ card->medium = BCM54XX_UNKNOWN;
+ break;
+
+ case BCM54XX_UNKNOWN:
+ /* copper, fiber with and without failed,
+ * retry from beginning */
+ spider_net_setup_aneg(card);
+ card->medium = BCM54XX_COPPER;
+ break;
+ }
+
+ card->aneg_count = 0;
+ mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
+ return;
+ }
+
+ /* link still not up, try again later */
+ if (!(phy->def->ops->poll_link(phy))) {
+ card->aneg_count++;
+ mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
+ return;
+ }
+
+ /* link came up, get abilities */
+ phy->def->ops->read_link(phy);
+
+ spider_net_write_reg(card, SPIDER_NET_GMACST,
+ spider_net_read_reg(card, SPIDER_NET_GMACST));
+ spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4);
+
+ if (phy->speed == 1000)
+ spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001);
+ else
+ spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0);
+
+ card->aneg_count = 0;
+
+ pr_info("%s: link up, %i Mbps, %s-duplex %sautoneg.\n",
+ card->netdev->name, phy->speed,
+ phy->duplex == 1 ? "Full" : "Half",
+ phy->autoneg == 1 ? "" : "no ");
+}
+
+/**
+ * spider_net_setup_phy - setup PHY
+ * @card: card structure
+ *
+ * returns 0 on success, <0 on failure
+ *
+ * spider_net_setup_phy is used as part of spider_net_probe.
+ **/
+static int
+spider_net_setup_phy(struct spider_net_card *card)
+{
+ struct mii_phy *phy = &card->phy;
+
+ spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
+ SPIDER_NET_DMASEL_VALUE);
+ spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
+ SPIDER_NET_PHY_CTRL_VALUE);
+
+ phy->dev = card->netdev;
+ phy->mdio_read = spider_net_read_phy;
+ phy->mdio_write = spider_net_write_phy;
+
+ for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) {
+ unsigned short id;
+ id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
+ if (id != 0x0000 && id != 0xffff) {
+ if (!sungem_phy_probe(phy, phy->mii_id)) {
+ pr_info("Found %s.\n", phy->def->name);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * spider_net_workaround_rxramfull - work around firmware bug
+ * @card: card structure
+ *
+ * no return value
+ **/
+static void
+spider_net_workaround_rxramfull(struct spider_net_card *card)
+{
+ int i, sequencer = 0;
+
+ /* cancel reset */
+ spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
+ SPIDER_NET_CKRCTRL_RUN_VALUE);
+
+ /* empty sequencer data */
+ for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
+ sequencer++) {
+ spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
+ sequencer * 8, 0x0);
+ for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
+ spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
+ sequencer * 8, 0x0);
+ }
+ }
+
+ /* set sequencer operation */
+ spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe);
+
+ /* reset */
+ spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
+ SPIDER_NET_CKRCTRL_STOP_VALUE);
+}
+
+/**
+ * spider_net_stop - called upon ifconfig down
+ * @netdev: interface device structure
+ *
+ * always returns 0
+ */
+int
+spider_net_stop(struct net_device *netdev)
+{
+ struct spider_net_card *card = netdev_priv(netdev);
+
+ napi_disable(&card->napi);
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ del_timer_sync(&card->tx_timer);
+ del_timer_sync(&card->aneg_timer);
+
+ spider_net_disable_interrupts(card);
+
+ free_irq(netdev->irq, netdev);
+
+ spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
+ SPIDER_NET_DMA_TX_FEND_VALUE);
+
+ /* turn off DMA, force end */
+ spider_net_disable_rxdmac(card);
+
+ /* release chains */
+ spider_net_release_tx_chain(card, 1);
+ spider_net_free_rx_chain_contents(card);
+
+ spider_net_free_chain(card, &card->tx_chain);
+ spider_net_free_chain(card, &card->rx_chain);
+
+ return 0;
+}
+
+/**
+ * spider_net_tx_timeout_task - task scheduled by the watchdog timeout
+ * function (to be called not under interrupt status)
+ * @data: data, is interface device structure
+ *
+ * called as task when tx hangs, resets interface (if interface is up)
+ */
+static void
+spider_net_tx_timeout_task(struct work_struct *work)
+{
+ struct spider_net_card *card =
+ container_of(work, struct spider_net_card, tx_timeout_task);
+ struct net_device *netdev = card->netdev;
+
+ if (!(netdev->flags & IFF_UP))
+ goto out;
+
+ netif_device_detach(netdev);
+ spider_net_stop(netdev);
+
+ spider_net_workaround_rxramfull(card);
+ spider_net_init_card(card);
+
+ if (spider_net_setup_phy(card))
+ goto out;
+
+ spider_net_open(netdev);
+ spider_net_kick_tx_dma(card);
+ netif_device_attach(netdev);
+
+out:
+ atomic_dec(&card->tx_timeout_task_counter);
+}
+
+/**
+ * spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
+ * @netdev: interface device structure
+ *
+ * called, if tx hangs. Schedules a task that resets the interface
+ */
+static void
+spider_net_tx_timeout(struct net_device *netdev)
+{
+ struct spider_net_card *card;
+
+ card = netdev_priv(netdev);
+ atomic_inc(&card->tx_timeout_task_counter);
+ if (netdev->flags & IFF_UP)
+ schedule_work(&card->tx_timeout_task);
+ else
+ atomic_dec(&card->tx_timeout_task_counter);
+ card->spider_stats.tx_timeouts++;
+}
+
+static const struct net_device_ops spider_net_ops = {
+ .ndo_open = spider_net_open,
+ .ndo_stop = spider_net_stop,
+ .ndo_start_xmit = spider_net_xmit,
+ .ndo_set_rx_mode = spider_net_set_multi,
+ .ndo_set_mac_address = spider_net_set_mac,
+ .ndo_change_mtu = spider_net_change_mtu,
+ .ndo_do_ioctl = spider_net_do_ioctl,
+ .ndo_tx_timeout = spider_net_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ /* HW VLAN */
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ /* poll controller */
+ .ndo_poll_controller = spider_net_poll_controller,
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+};
+
+/**
+ * spider_net_setup_netdev_ops - initialization of net_device operations
+ * @netdev: net_device structure
+ *
+ * fills out function pointers in the net_device structure
+ */
+static void
+spider_net_setup_netdev_ops(struct net_device *netdev)
+{
+ netdev->netdev_ops = &spider_net_ops;
+ netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT;
+ /* ethtool ops */
+ netdev->ethtool_ops = &spider_net_ethtool_ops;
+}
+
+/**
+ * spider_net_setup_netdev - initialization of net_device
+ * @card: card structure
+ *
+ * Returns 0 on success or <0 on failure
+ *
+ * spider_net_setup_netdev initializes the net_device structure
+ **/
+static int
+spider_net_setup_netdev(struct spider_net_card *card)
+{
+ int result;
+ struct net_device *netdev = card->netdev;
+ struct device_node *dn;
+ struct sockaddr addr;
+ const u8 *mac;
+
+ SET_NETDEV_DEV(netdev, &card->pdev->dev);
+
+ pci_set_drvdata(card->pdev, netdev);
+
+ init_timer(&card->tx_timer);
+ card->tx_timer.function =
+ (void (*)(unsigned long)) spider_net_cleanup_tx_ring;
+ card->tx_timer.data = (unsigned long) card;
+ netdev->irq = card->pdev->irq;
+
+ card->aneg_count = 0;
+ init_timer(&card->aneg_timer);
+ card->aneg_timer.function = spider_net_link_phy;
+ card->aneg_timer.data = (unsigned long) card;
+
+ netif_napi_add(netdev, &card->napi,
+ spider_net_poll, SPIDER_NET_NAPI_WEIGHT);
+
+ spider_net_setup_netdev_ops(netdev);
+
+ netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
+ if (SPIDER_NET_RX_CSUM_DEFAULT)
+ netdev->features |= NETIF_F_RXCSUM;
+ netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX;
+ /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+ * NETIF_F_HW_VLAN_FILTER */
+
+ netdev->irq = card->pdev->irq;
+ card->num_rx_ints = 0;
+ card->ignore_rx_ramfull = 0;
+
+ dn = pci_device_to_OF_node(card->pdev);
+ if (!dn)
+ return -EIO;
+
+ mac = of_get_property(dn, "local-mac-address", NULL);
+ if (!mac)
+ return -EIO;
+ memcpy(addr.sa_data, mac, ETH_ALEN);
+
+ result = spider_net_set_mac(netdev, &addr);
+ if ((result) && (netif_msg_probe(card)))
+ dev_err(&card->netdev->dev,
+ "Failed to set MAC address: %i\n", result);
+
+ result = register_netdev(netdev);
+ if (result) {
+ if (netif_msg_probe(card))
+ dev_err(&card->netdev->dev,
+ "Couldn't register net_device: %i\n", result);
+ return result;
+ }
+
+ if (netif_msg_probe(card))
+ pr_info("Initialized device %s.\n", netdev->name);
+
+ return 0;
+}
+
+/**
+ * spider_net_alloc_card - allocates net_device and card structure
+ *
+ * returns the card structure or NULL in case of errors
+ *
+ * the card and net_device structures are linked to each other
+ */
+static struct spider_net_card *
+spider_net_alloc_card(void)
+{
+ struct net_device *netdev;
+ struct spider_net_card *card;
+ size_t alloc_size;
+
+ alloc_size = sizeof(struct spider_net_card) +
+ (tx_descriptors + rx_descriptors) * sizeof(struct spider_net_descr);
+ netdev = alloc_etherdev(alloc_size);
+ if (!netdev)
+ return NULL;
+
+ card = netdev_priv(netdev);
+ card->netdev = netdev;
+ card->msg_enable = SPIDER_NET_DEFAULT_MSG;
+ INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
+ init_waitqueue_head(&card->waitq);
+ atomic_set(&card->tx_timeout_task_counter, 0);
+
+ card->rx_chain.num_desc = rx_descriptors;
+ card->rx_chain.ring = card->darray;
+ card->tx_chain.num_desc = tx_descriptors;
+ card->tx_chain.ring = card->darray + rx_descriptors;
+
+ return card;
+}
+
+/**
+ * spider_net_undo_pci_setup - releases PCI ressources
+ * @card: card structure
+ *
+ * spider_net_undo_pci_setup releases the mapped regions
+ */
+static void
+spider_net_undo_pci_setup(struct spider_net_card *card)
+{
+ iounmap(card->regs);
+ pci_release_regions(card->pdev);
+}
+
+/**
+ * spider_net_setup_pci_dev - sets up the device in terms of PCI operations
+ * @pdev: PCI device
+ *
+ * Returns the card structure or NULL if any errors occur
+ *
+ * spider_net_setup_pci_dev initializes pdev and together with the
+ * functions called in spider_net_open configures the device so that
+ * data can be transferred over it
+ * The net_device structure is attached to the card structure, if the
+ * function returns without error.
+ **/
+static struct spider_net_card *
+spider_net_setup_pci_dev(struct pci_dev *pdev)
+{
+ struct spider_net_card *card;
+ unsigned long mmio_start, mmio_len;
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "Couldn't enable PCI device\n");
+ return NULL;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev,
+ "Couldn't find proper PCI device base address.\n");
+ goto out_disable_dev;
+ }
+
+ if (pci_request_regions(pdev, spider_net_driver_name)) {
+ dev_err(&pdev->dev,
+ "Couldn't obtain PCI resources, aborting.\n");
+ goto out_disable_dev;
+ }
+
+ pci_set_master(pdev);
+
+ card = spider_net_alloc_card();
+ if (!card) {
+ dev_err(&pdev->dev,
+ "Couldn't allocate net_device structure, aborting.\n");
+ goto out_release_regions;
+ }
+ card->pdev = pdev;
+
+ /* fetch base address and length of first resource */
+ mmio_start = pci_resource_start(pdev, 0);
+ mmio_len = pci_resource_len(pdev, 0);
+
+ card->netdev->mem_start = mmio_start;
+ card->netdev->mem_end = mmio_start + mmio_len;
+ card->regs = ioremap(mmio_start, mmio_len);
+
+ if (!card->regs) {
+ dev_err(&pdev->dev,
+ "Couldn't obtain PCI resources, aborting.\n");
+ goto out_release_regions;
+ }
+
+ return card;
+
+out_release_regions:
+ pci_release_regions(pdev);
+out_disable_dev:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return NULL;
+}
+
+/**
+ * spider_net_probe - initialization of a device
+ * @pdev: PCI device
+ * @ent: entry in the device id list
+ *
+ * Returns 0 on success, <0 on failure
+ *
+ * spider_net_probe initializes pdev and registers a net_device
+ * structure for it. After that, the device can be ifconfig'ed up
+ **/
+static int __devinit
+spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err = -EIO;
+ struct spider_net_card *card;
+
+ card = spider_net_setup_pci_dev(pdev);
+ if (!card)
+ goto out;
+
+ spider_net_workaround_rxramfull(card);
+ spider_net_init_card(card);
+
+ err = spider_net_setup_phy(card);
+ if (err)
+ goto out_undo_pci;
+
+ err = spider_net_setup_netdev(card);
+ if (err)
+ goto out_undo_pci;
+
+ return 0;
+
+out_undo_pci:
+ spider_net_undo_pci_setup(card);
+ free_netdev(card->netdev);
+out:
+ return err;
+}
+
+/**
+ * spider_net_remove - removal of a device
+ * @pdev: PCI device
+ *
+ * Returns 0 on success, <0 on failure
+ *
+ * spider_net_remove is called to remove the device and unregisters the
+ * net_device
+ **/
+static void __devexit
+spider_net_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev;
+ struct spider_net_card *card;
+
+ netdev = pci_get_drvdata(pdev);
+ card = netdev_priv(netdev);
+
+ wait_event(card->waitq,
+ atomic_read(&card->tx_timeout_task_counter) == 0);
+
+ unregister_netdev(netdev);
+
+ /* switch off card */
+ spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
+ SPIDER_NET_CKRCTRL_STOP_VALUE);
+ spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
+ SPIDER_NET_CKRCTRL_RUN_VALUE);
+
+ spider_net_undo_pci_setup(card);
+ free_netdev(netdev);
+}
+
+static struct pci_driver spider_net_driver = {
+ .name = spider_net_driver_name,
+ .id_table = spider_net_pci_tbl,
+ .probe = spider_net_probe,
+ .remove = __devexit_p(spider_net_remove)
+};
+
+/**
+ * spider_net_init - init function when the driver is loaded
+ *
+ * spider_net_init registers the device driver
+ */
+static int __init spider_net_init(void)
+{
+ printk(KERN_INFO "Spidernet version %s.\n", VERSION);
+
+ if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
+ rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
+ pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
+ }
+ if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) {
+ rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX;
+ pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
+ }
+ if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) {
+ tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN;
+ pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
+ }
+ if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) {
+ tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX;
+ pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
+ }
+
+ return pci_register_driver(&spider_net_driver);
+}
+
+/**
+ * spider_net_cleanup - exit function when driver is unloaded
+ *
+ * spider_net_cleanup unregisters the device driver
+ */
+static void __exit spider_net_cleanup(void)
+{
+ pci_unregister_driver(&spider_net_driver);
+}
+
+module_init(spider_net_init);
+module_exit(spider_net_cleanup);
diff --git a/drivers/net/ethernet/toshiba/spider_net.h b/drivers/net/ethernet/toshiba/spider_net.h
new file mode 100644
index 000000000000..4ba2135474d1
--- /dev/null
+++ b/drivers/net/ethernet/toshiba/spider_net.h
@@ -0,0 +1,489 @@
+/*
+ * Network device driver for Cell Processor-Based Blade and Celleb platform
+ *
+ * (C) Copyright IBM Corp. 2005
+ * (C) Copyright 2006 TOSHIBA CORPORATION
+ *
+ * Authors : Utz Bacher <utz.bacher@de.ibm.com>
+ * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _SPIDER_NET_H
+#define _SPIDER_NET_H
+
+#define VERSION "2.0 B"
+
+#include <linux/sungem_phy.h>
+
+extern int spider_net_stop(struct net_device *netdev);
+extern int spider_net_open(struct net_device *netdev);
+
+extern const struct ethtool_ops spider_net_ethtool_ops;
+
+extern char spider_net_driver_name[];
+
+#define SPIDER_NET_MAX_FRAME 2312
+#define SPIDER_NET_MAX_MTU 2294
+#define SPIDER_NET_MIN_MTU 64
+
+#define SPIDER_NET_RXBUF_ALIGN 128
+
+#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 256
+#define SPIDER_NET_RX_DESCRIPTORS_MIN 16
+#define SPIDER_NET_RX_DESCRIPTORS_MAX 512
+
+#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 256
+#define SPIDER_NET_TX_DESCRIPTORS_MIN 16
+#define SPIDER_NET_TX_DESCRIPTORS_MAX 512
+
+#define SPIDER_NET_TX_TIMER (HZ/5)
+#define SPIDER_NET_ANEG_TIMER (HZ)
+#define SPIDER_NET_ANEG_TIMEOUT 5
+
+#define SPIDER_NET_RX_CSUM_DEFAULT 1
+
+#define SPIDER_NET_WATCHDOG_TIMEOUT 50*HZ
+#define SPIDER_NET_NAPI_WEIGHT 64
+
+#define SPIDER_NET_FIRMWARE_SEQS 6
+#define SPIDER_NET_FIRMWARE_SEQWORDS 1024
+#define SPIDER_NET_FIRMWARE_LEN (SPIDER_NET_FIRMWARE_SEQS * \
+ SPIDER_NET_FIRMWARE_SEQWORDS * \
+ sizeof(u32))
+#define SPIDER_NET_FIRMWARE_NAME "spider_fw.bin"
+
+/** spider_net SMMIO registers */
+#define SPIDER_NET_GHIINT0STS 0x00000000
+#define SPIDER_NET_GHIINT1STS 0x00000004
+#define SPIDER_NET_GHIINT2STS 0x00000008
+#define SPIDER_NET_GHIINT0MSK 0x00000010
+#define SPIDER_NET_GHIINT1MSK 0x00000014
+#define SPIDER_NET_GHIINT2MSK 0x00000018
+
+#define SPIDER_NET_GRESUMINTNUM 0x00000020
+#define SPIDER_NET_GREINTNUM 0x00000024
+
+#define SPIDER_NET_GFFRMNUM 0x00000028
+#define SPIDER_NET_GFAFRMNUM 0x0000002c
+#define SPIDER_NET_GFBFRMNUM 0x00000030
+#define SPIDER_NET_GFCFRMNUM 0x00000034
+#define SPIDER_NET_GFDFRMNUM 0x00000038
+
+/* clear them (don't use it) */
+#define SPIDER_NET_GFREECNNUM 0x0000003c
+#define SPIDER_NET_GONETIMENUM 0x00000040
+
+#define SPIDER_NET_GTOUTFRMNUM 0x00000044
+
+#define SPIDER_NET_GTXMDSET 0x00000050
+#define SPIDER_NET_GPCCTRL 0x00000054
+#define SPIDER_NET_GRXMDSET 0x00000058
+#define SPIDER_NET_GIPSECINIT 0x0000005c
+#define SPIDER_NET_GFTRESTRT 0x00000060
+#define SPIDER_NET_GRXDMAEN 0x00000064
+#define SPIDER_NET_GMRWOLCTRL 0x00000068
+#define SPIDER_NET_GPCWOPCMD 0x0000006c
+#define SPIDER_NET_GPCROPCMD 0x00000070
+#define SPIDER_NET_GTTFRMCNT 0x00000078
+#define SPIDER_NET_GTESTMD 0x0000007c
+
+#define SPIDER_NET_GSINIT 0x00000080
+#define SPIDER_NET_GSnPRGADR 0x00000084
+#define SPIDER_NET_GSnPRGDAT 0x00000088
+
+#define SPIDER_NET_GMACOPEMD 0x00000100
+#define SPIDER_NET_GMACLENLMT 0x00000108
+#define SPIDER_NET_GMACST 0x00000110
+#define SPIDER_NET_GMACINTEN 0x00000118
+#define SPIDER_NET_GMACPHYCTRL 0x00000120
+
+#define SPIDER_NET_GMACAPAUSE 0x00000154
+#define SPIDER_NET_GMACTXPAUSE 0x00000164
+
+#define SPIDER_NET_GMACMODE 0x000001b0
+#define SPIDER_NET_GMACBSTLMT 0x000001b4
+
+#define SPIDER_NET_GMACUNIMACU 0x000001c0
+#define SPIDER_NET_GMACUNIMACL 0x000001c8
+
+#define SPIDER_NET_GMRMHFILnR 0x00000400
+#define SPIDER_NET_MULTICAST_HASHES 256
+
+#define SPIDER_NET_GMRUAFILnR 0x00000500
+#define SPIDER_NET_GMRUA0FIL15R 0x00000578
+
+#define SPIDER_NET_GTTQMSK 0x00000934
+
+/* RX DMA controller registers, all 0x00000a.. are for DMA controller A,
+ * 0x00000b.. for DMA controller B, etc. */
+#define SPIDER_NET_GDADCHA 0x00000a00
+#define SPIDER_NET_GDADMACCNTR 0x00000a04
+#define SPIDER_NET_GDACTDPA 0x00000a08
+#define SPIDER_NET_GDACTDCNT 0x00000a0c
+#define SPIDER_NET_GDACDBADDR 0x00000a20
+#define SPIDER_NET_GDACDBSIZE 0x00000a24
+#define SPIDER_NET_GDACNEXTDA 0x00000a28
+#define SPIDER_NET_GDACCOMST 0x00000a2c
+#define SPIDER_NET_GDAWBCOMST 0x00000a30
+#define SPIDER_NET_GDAWBRSIZE 0x00000a34
+#define SPIDER_NET_GDAWBVSIZE 0x00000a38
+#define SPIDER_NET_GDAWBTRST 0x00000a3c
+#define SPIDER_NET_GDAWBTRERR 0x00000a40
+
+/* TX DMA controller registers */
+#define SPIDER_NET_GDTDCHA 0x00000e00
+#define SPIDER_NET_GDTDMACCNTR 0x00000e04
+#define SPIDER_NET_GDTCDPA 0x00000e08
+#define SPIDER_NET_GDTDMASEL 0x00000e14
+
+#define SPIDER_NET_ECMODE 0x00000f00
+/* clock and reset control register */
+#define SPIDER_NET_CKRCTRL 0x00000ff0
+
+/** SCONFIG registers */
+#define SPIDER_NET_SCONFIG_IOACTE 0x00002810
+
+/** interrupt mask registers */
+#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe2c7
+#define SPIDER_NET_INT1_MASK_VALUE 0x0000fff2
+#define SPIDER_NET_INT2_MASK_VALUE 0x000003f1
+
+/* we rely on flagged descriptor interrupts */
+#define SPIDER_NET_FRAMENUM_VALUE 0x00000000
+/* set this first, then the FRAMENUM_VALUE */
+#define SPIDER_NET_GFXFRAMES_VALUE 0x00000000
+
+#define SPIDER_NET_STOP_SEQ_VALUE 0x00000000
+#define SPIDER_NET_RUN_SEQ_VALUE 0x0000007e
+
+#define SPIDER_NET_PHY_CTRL_VALUE 0x00040040
+/* #define SPIDER_NET_PHY_CTRL_VALUE 0x01070080*/
+#define SPIDER_NET_RXMODE_VALUE 0x00000011
+/* auto retransmission in case of MAC aborts */
+#define SPIDER_NET_TXMODE_VALUE 0x00010000
+#define SPIDER_NET_RESTART_VALUE 0x00000000
+#define SPIDER_NET_WOL_VALUE 0x00001111
+#if 0
+#define SPIDER_NET_WOL_VALUE 0x00000000
+#endif
+#define SPIDER_NET_IPSECINIT_VALUE 0x6f716f71
+
+/* pause frames: automatic, no upper retransmission count */
+/* outside loopback mode: ETOMOD signal dont matter, not connected */
+/* ETOMOD signal is brought to PHY reset. bit 2 must be 1 in Celleb */
+#define SPIDER_NET_OPMODE_VALUE 0x00000067
+/*#define SPIDER_NET_OPMODE_VALUE 0x001b0062*/
+#define SPIDER_NET_LENLMT_VALUE 0x00000908
+
+#define SPIDER_NET_MACAPAUSE_VALUE 0x00000800 /* about 1 ms */
+#define SPIDER_NET_TXPAUSE_VALUE 0x00000000
+
+#define SPIDER_NET_MACMODE_VALUE 0x00000001
+#define SPIDER_NET_BURSTLMT_VALUE 0x00000200 /* about 16 us */
+
+/* DMAC control register GDMACCNTR
+ *
+ * 1(0) enable r/tx dma
+ * 0000000 fixed to 0
+ *
+ * 000000 fixed to 0
+ * 0(1) en/disable descr writeback on force end
+ * 0(1) force end
+ *
+ * 000000 fixed to 0
+ * 00 burst alignment: 128 bytes
+ * 11 burst alignment: 1024 bytes
+ *
+ * 00000 fixed to 0
+ * 0 descr writeback size 32 bytes
+ * 0(1) descr chain end interrupt enable
+ * 0(1) descr status writeback enable */
+
+/* to set RX_DMA_EN */
+#define SPIDER_NET_DMA_RX_VALUE 0x80000000
+#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003
+/* to set TX_DMA_EN */
+#define SPIDER_NET_TX_DMA_EN 0x80000000
+#define SPIDER_NET_GDTBSTA 0x00000300
+#define SPIDER_NET_GDTDCEIDIS 0x00000002
+#define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \
+ SPIDER_NET_GDTDCEIDIS | \
+ SPIDER_NET_GDTBSTA
+
+#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003
+
+/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */
+#define SPIDER_NET_UA_DESCR_VALUE 0x00080000
+#define SPIDER_NET_PROMISC_VALUE 0x00080000
+#define SPIDER_NET_NONPROMISC_VALUE 0x00000000
+
+#define SPIDER_NET_DMASEL_VALUE 0x00000001
+
+#define SPIDER_NET_ECMODE_VALUE 0x00000000
+
+#define SPIDER_NET_CKRCTRL_RUN_VALUE 0x1fff010f
+#define SPIDER_NET_CKRCTRL_STOP_VALUE 0x0000010f
+
+#define SPIDER_NET_SBIMSTATE_VALUE 0x00000000
+#define SPIDER_NET_SBTMSTATE_VALUE 0x00000000
+
+/* SPIDER_NET_GHIINT0STS bits, in reverse order so that they can be used
+ * with 1 << SPIDER_NET_... */
+enum spider_net_int0_status {
+ SPIDER_NET_GPHYINT = 0,
+ SPIDER_NET_GMAC2INT,
+ SPIDER_NET_GMAC1INT,
+ SPIDER_NET_GIPSINT,
+ SPIDER_NET_GFIFOINT,
+ SPIDER_NET_GDMACINT,
+ SPIDER_NET_GSYSINT,
+ SPIDER_NET_GPWOPCMPINT,
+ SPIDER_NET_GPROPCMPINT,
+ SPIDER_NET_GPWFFINT,
+ SPIDER_NET_GRMDADRINT,
+ SPIDER_NET_GRMARPINT,
+ SPIDER_NET_GRMMPINT,
+ SPIDER_NET_GDTDEN0INT,
+ SPIDER_NET_GDDDEN0INT,
+ SPIDER_NET_GDCDEN0INT,
+ SPIDER_NET_GDBDEN0INT,
+ SPIDER_NET_GDADEN0INT,
+ SPIDER_NET_GDTFDCINT,
+ SPIDER_NET_GDDFDCINT,
+ SPIDER_NET_GDCFDCINT,
+ SPIDER_NET_GDBFDCINT,
+ SPIDER_NET_GDAFDCINT,
+ SPIDER_NET_GTTEDINT,
+ SPIDER_NET_GDTDCEINT,
+ SPIDER_NET_GRFDNMINT,
+ SPIDER_NET_GRFCNMINT,
+ SPIDER_NET_GRFBNMINT,
+ SPIDER_NET_GRFANMINT,
+ SPIDER_NET_GRFNMINT,
+ SPIDER_NET_G1TMCNTINT,
+ SPIDER_NET_GFREECNTINT
+};
+/* GHIINT1STS bits */
+enum spider_net_int1_status {
+ SPIDER_NET_GTMFLLINT = 0,
+ SPIDER_NET_GRMFLLINT,
+ SPIDER_NET_GTMSHTINT,
+ SPIDER_NET_GDTINVDINT,
+ SPIDER_NET_GRFDFLLINT,
+ SPIDER_NET_GDDDCEINT,
+ SPIDER_NET_GDDINVDINT,
+ SPIDER_NET_GRFCFLLINT,
+ SPIDER_NET_GDCDCEINT,
+ SPIDER_NET_GDCINVDINT,
+ SPIDER_NET_GRFBFLLINT,
+ SPIDER_NET_GDBDCEINT,
+ SPIDER_NET_GDBINVDINT,
+ SPIDER_NET_GRFAFLLINT,
+ SPIDER_NET_GDADCEINT,
+ SPIDER_NET_GDAINVDINT,
+ SPIDER_NET_GDTRSERINT,
+ SPIDER_NET_GDDRSERINT,
+ SPIDER_NET_GDCRSERINT,
+ SPIDER_NET_GDBRSERINT,
+ SPIDER_NET_GDARSERINT,
+ SPIDER_NET_GDSERINT,
+ SPIDER_NET_GDTPTERINT,
+ SPIDER_NET_GDDPTERINT,
+ SPIDER_NET_GDCPTERINT,
+ SPIDER_NET_GDBPTERINT,
+ SPIDER_NET_GDAPTERINT
+};
+/* GHIINT2STS bits */
+enum spider_net_int2_status {
+ SPIDER_NET_GPROPERINT = 0,
+ SPIDER_NET_GMCTCRSNGINT,
+ SPIDER_NET_GMCTLCOLINT,
+ SPIDER_NET_GMCTTMOTINT,
+ SPIDER_NET_GMCRCAERINT,
+ SPIDER_NET_GMCRCALERINT,
+ SPIDER_NET_GMCRALNERINT,
+ SPIDER_NET_GMCROVRINT,
+ SPIDER_NET_GMCRRNTINT,
+ SPIDER_NET_GMCRRXERINT,
+ SPIDER_NET_GTITCSERINT,
+ SPIDER_NET_GTIFMTERINT,
+ SPIDER_NET_GTIPKTRVKINT,
+ SPIDER_NET_GTISPINGINT,
+ SPIDER_NET_GTISADNGINT,
+ SPIDER_NET_GTISPDNGINT,
+ SPIDER_NET_GRIFMTERINT,
+ SPIDER_NET_GRIPKTRVKINT,
+ SPIDER_NET_GRISPINGINT,
+ SPIDER_NET_GRISADNGINT,
+ SPIDER_NET_GRISPDNGINT
+};
+
+#define SPIDER_NET_TXINT (1 << SPIDER_NET_GDTFDCINT)
+
+/* We rely on flagged descriptor interrupts */
+#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) )
+
+#define SPIDER_NET_LINKINT ( 1 << SPIDER_NET_GMAC2INT )
+
+#define SPIDER_NET_ERRINT ( 0xffffffff & \
+ (~SPIDER_NET_TXINT) & \
+ (~SPIDER_NET_RXINT) & \
+ (~SPIDER_NET_LINKINT) )
+
+#define SPIDER_NET_GPREXEC 0x80000000
+#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
+
+#define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000
+#define SPIDER_NET_DMAC_TXFRMTL 0x00040000
+#define SPIDER_NET_DMAC_TCP 0x00020000
+#define SPIDER_NET_DMAC_UDP 0x00030000
+#define SPIDER_NET_TXDCEST 0x08000000
+
+#define SPIDER_NET_DESCR_RXFDIS 0x00000001
+#define SPIDER_NET_DESCR_RXDCEIS 0x00000002
+#define SPIDER_NET_DESCR_RXDEN0IS 0x00000004
+#define SPIDER_NET_DESCR_RXINVDIS 0x00000008
+#define SPIDER_NET_DESCR_RXRERRIS 0x00000010
+#define SPIDER_NET_DESCR_RXFDCIMS 0x00000100
+#define SPIDER_NET_DESCR_RXDCEIMS 0x00000200
+#define SPIDER_NET_DESCR_RXDEN0IMS 0x00000400
+#define SPIDER_NET_DESCR_RXINVDIMS 0x00000800
+#define SPIDER_NET_DESCR_RXRERRMIS 0x00001000
+#define SPIDER_NET_DESCR_UNUSED 0x077fe0e0
+
+#define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000
+#define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */
+#define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */
+#define SPIDER_NET_DESCR_PROTECTION_ERROR 0x20000000 /* used in rx and tx */
+#define SPIDER_NET_DESCR_FRAME_END 0x40000000 /* used in rx */
+#define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */
+#define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */
+#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000
+#define SPIDER_NET_DESCR_TXDESFLG 0x00800000
+
+#define SPIDER_NET_DESCR_BAD_STATUS (SPIDER_NET_DESCR_RXDEN0IS | \
+ SPIDER_NET_DESCR_RXRERRIS | \
+ SPIDER_NET_DESCR_RXDEN0IMS | \
+ SPIDER_NET_DESCR_RXINVDIMS | \
+ SPIDER_NET_DESCR_RXRERRMIS | \
+ SPIDER_NET_DESCR_UNUSED)
+
+/* Descriptor, as defined by the hardware */
+struct spider_net_hw_descr {
+ u32 buf_addr;
+ u32 buf_size;
+ u32 next_descr_addr;
+ u32 dmac_cmd_status;
+ u32 result_size;
+ u32 valid_size; /* all zeroes for tx */
+ u32 data_status;
+ u32 data_error; /* all zeroes for tx */
+} __attribute__((aligned(32)));
+
+struct spider_net_descr {
+ struct spider_net_hw_descr *hwdescr;
+ struct sk_buff *skb;
+ u32 bus_addr;
+ struct spider_net_descr *next;
+ struct spider_net_descr *prev;
+};
+
+struct spider_net_descr_chain {
+ spinlock_t lock;
+ struct spider_net_descr *head;
+ struct spider_net_descr *tail;
+ struct spider_net_descr *ring;
+ int num_desc;
+ struct spider_net_hw_descr *hwring;
+ dma_addr_t dma_addr;
+};
+
+/* descriptor data_status bits */
+#define SPIDER_NET_RX_IPCHK 29
+#define SPIDER_NET_RX_TCPCHK 28
+#define SPIDER_NET_VLAN_PACKET 21
+#define SPIDER_NET_DATA_STATUS_CKSUM_MASK ( (1 << SPIDER_NET_RX_IPCHK) | \
+ (1 << SPIDER_NET_RX_TCPCHK) )
+
+/* descriptor data_error bits */
+#define SPIDER_NET_RX_IPCHKERR 27
+#define SPIDER_NET_RX_RXTCPCHKERR 28
+
+#define SPIDER_NET_DATA_ERR_CKSUM_MASK (1 << SPIDER_NET_RX_IPCHKERR)
+
+/* the cases we don't pass the packet to the stack.
+ * 701b8000 would be correct, but every packets gets that flag */
+#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000
+
+#define SPIDER_NET_DEFAULT_MSG ( NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR | \
+ NETIF_MSG_TX_QUEUED | \
+ NETIF_MSG_INTR | \
+ NETIF_MSG_TX_DONE | \
+ NETIF_MSG_RX_STATUS | \
+ NETIF_MSG_PKTDATA | \
+ NETIF_MSG_HW | \
+ NETIF_MSG_WOL )
+
+struct spider_net_extra_stats {
+ unsigned long rx_desc_error;
+ unsigned long tx_timeouts;
+ unsigned long alloc_rx_skb_error;
+ unsigned long rx_iommu_map_error;
+ unsigned long tx_iommu_map_error;
+ unsigned long rx_desc_unk_state;
+};
+
+struct spider_net_card {
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct mii_phy phy;
+
+ struct napi_struct napi;
+
+ int medium;
+
+ void __iomem *regs;
+
+ struct spider_net_descr_chain tx_chain;
+ struct spider_net_descr_chain rx_chain;
+ struct spider_net_descr *low_watermark;
+
+ int aneg_count;
+ struct timer_list aneg_timer;
+ struct timer_list tx_timer;
+ struct work_struct tx_timeout_task;
+ atomic_t tx_timeout_task_counter;
+ wait_queue_head_t waitq;
+ int num_rx_ints;
+ int ignore_rx_ramfull;
+
+ /* for ethtool */
+ int msg_enable;
+ struct spider_net_extra_stats spider_stats;
+
+ /* Must be last item in struct */
+ struct spider_net_descr darray[0];
+};
+
+#endif
diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
new file mode 100644
index 000000000000..9c288cd7d171
--- /dev/null
+++ b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
@@ -0,0 +1,179 @@
+/*
+ * Network device driver for Cell Processor-Based Blade
+ *
+ * (C) Copyright IBM Corp. 2005
+ *
+ * Authors : Utz Bacher <utz.bacher@de.ibm.com>
+ * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+
+#include "spider_net.h"
+
+
+static struct {
+ const char str[ETH_GSTRING_LEN];
+} ethtool_stats_keys[] = {
+ { "tx_packets" },
+ { "tx_bytes" },
+ { "rx_packets" },
+ { "rx_bytes" },
+ { "tx_errors" },
+ { "tx_dropped" },
+ { "rx_dropped" },
+ { "rx_descriptor_error" },
+ { "tx_timeouts" },
+ { "alloc_rx_skb_error" },
+ { "rx_iommu_map_error" },
+ { "tx_iommu_map_error" },
+ { "rx_desc_unk_state" },
+};
+
+static int
+spider_net_ethtool_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct spider_net_card *card;
+ card = netdev_priv(netdev);
+
+ cmd->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE);
+ cmd->advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE);
+ cmd->port = PORT_FIBRE;
+ ethtool_cmd_speed_set(cmd, card->phy.speed);
+ cmd->duplex = DUPLEX_FULL;
+
+ return 0;
+}
+
+static void
+spider_net_ethtool_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct spider_net_card *card;
+ card = netdev_priv(netdev);
+
+ /* clear and fill out info */
+ memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
+ strncpy(drvinfo->driver, spider_net_driver_name, 32);
+ strncpy(drvinfo->version, VERSION, 32);
+ strcpy(drvinfo->fw_version, "no information");
+ strncpy(drvinfo->bus_info, pci_name(card->pdev), 32);
+}
+
+static void
+spider_net_ethtool_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wolinfo)
+{
+ /* no support for wol */
+ wolinfo->supported = 0;
+ wolinfo->wolopts = 0;
+}
+
+static u32
+spider_net_ethtool_get_msglevel(struct net_device *netdev)
+{
+ struct spider_net_card *card;
+ card = netdev_priv(netdev);
+ return card->msg_enable;
+}
+
+static void
+spider_net_ethtool_set_msglevel(struct net_device *netdev,
+ u32 level)
+{
+ struct spider_net_card *card;
+ card = netdev_priv(netdev);
+ card->msg_enable = level;
+}
+
+static int
+spider_net_ethtool_nway_reset(struct net_device *netdev)
+{
+ if (netif_running(netdev)) {
+ spider_net_stop(netdev);
+ spider_net_open(netdev);
+ }
+ return 0;
+}
+
+static void
+spider_net_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering)
+{
+ struct spider_net_card *card = netdev_priv(netdev);
+
+ ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
+ ering->tx_pending = card->tx_chain.num_desc;
+ ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX;
+ ering->rx_pending = card->rx_chain.num_desc;
+}
+
+static int spider_net_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(ethtool_stats_keys);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void spider_net_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct spider_net_card *card = netdev_priv(netdev);
+
+ data[0] = netdev->stats.tx_packets;
+ data[1] = netdev->stats.tx_bytes;
+ data[2] = netdev->stats.rx_packets;
+ data[3] = netdev->stats.rx_bytes;
+ data[4] = netdev->stats.tx_errors;
+ data[5] = netdev->stats.tx_dropped;
+ data[6] = netdev->stats.rx_dropped;
+ data[7] = card->spider_stats.rx_desc_error;
+ data[8] = card->spider_stats.tx_timeouts;
+ data[9] = card->spider_stats.alloc_rx_skb_error;
+ data[10] = card->spider_stats.rx_iommu_map_error;
+ data[11] = card->spider_stats.tx_iommu_map_error;
+ data[12] = card->spider_stats.rx_desc_unk_state;
+}
+
+static void spider_net_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
+}
+
+const struct ethtool_ops spider_net_ethtool_ops = {
+ .get_settings = spider_net_ethtool_get_settings,
+ .get_drvinfo = spider_net_ethtool_get_drvinfo,
+ .get_wol = spider_net_ethtool_get_wol,
+ .get_msglevel = spider_net_ethtool_get_msglevel,
+ .set_msglevel = spider_net_ethtool_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .nway_reset = spider_net_ethtool_nway_reset,
+ .get_ringparam = spider_net_ethtool_get_ringparam,
+ .get_strings = spider_net_get_strings,
+ .get_sset_count = spider_net_get_sset_count,
+ .get_ethtool_stats = spider_net_get_ethtool_stats,
+};
+
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
new file mode 100644
index 000000000000..71b785cd7563
--- /dev/null
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -0,0 +1,2228 @@
+/*
+ * tc35815.c: A TOSHIBA TC35815CF PCI 10/100Mbps ethernet driver for linux.
+ *
+ * Based on skelton.c by Donald Becker.
+ *
+ * This driver is a replacement of older and less maintained version.
+ * This is a header of the older version:
+ * -----<snip>-----
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * ahennessy@mvista.com
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ * static const char *version =
+ * "tc35815.c:v0.00 26/07/2000 by Toshiba Corporation\n";
+ * -----<snip>-----
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * (C) Copyright TOSHIBA CORPORATION 2004-2005
+ * All Rights Reserved.
+ */
+
+#define DRV_VERSION "1.39"
+static const char *version = "tc35815.c:v" DRV_VERSION "\n";
+#define MODNAME "tc35815"
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/if_vlan.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/phy.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/prefetch.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+enum tc35815_chiptype {
+ TC35815CF = 0,
+ TC35815_NWU,
+ TC35815_TX4939,
+};
+
+/* indexed by tc35815_chiptype, above */
+static const struct {
+ const char *name;
+} chip_info[] __devinitdata = {
+ { "TOSHIBA TC35815CF 10/100BaseTX" },
+ { "TOSHIBA TC35815 with Wake on LAN" },
+ { "TOSHIBA TC35815/TX4939" },
+};
+
+static DEFINE_PCI_DEVICE_TABLE(tc35815_pci_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF },
+ {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU },
+ {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 },
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, tc35815_pci_tbl);
+
+/* see MODULE_PARM_DESC */
+static struct tc35815_options {
+ int speed;
+ int duplex;
+} options;
+
+/*
+ * Registers
+ */
+struct tc35815_regs {
+ __u32 DMA_Ctl; /* 0x00 */
+ __u32 TxFrmPtr;
+ __u32 TxThrsh;
+ __u32 TxPollCtr;
+ __u32 BLFrmPtr;
+ __u32 RxFragSize;
+ __u32 Int_En;
+ __u32 FDA_Bas;
+ __u32 FDA_Lim; /* 0x20 */
+ __u32 Int_Src;
+ __u32 unused0[2];
+ __u32 PauseCnt;
+ __u32 RemPauCnt;
+ __u32 TxCtlFrmStat;
+ __u32 unused1;
+ __u32 MAC_Ctl; /* 0x40 */
+ __u32 CAM_Ctl;
+ __u32 Tx_Ctl;
+ __u32 Tx_Stat;
+ __u32 Rx_Ctl;
+ __u32 Rx_Stat;
+ __u32 MD_Data;
+ __u32 MD_CA;
+ __u32 CAM_Adr; /* 0x60 */
+ __u32 CAM_Data;
+ __u32 CAM_Ena;
+ __u32 PROM_Ctl;
+ __u32 PROM_Data;
+ __u32 Algn_Cnt;
+ __u32 CRC_Cnt;
+ __u32 Miss_Cnt;
+};
+
+/*
+ * Bit assignments
+ */
+/* DMA_Ctl bit assign ------------------------------------------------------- */
+#define DMA_RxAlign 0x00c00000 /* 1:Reception Alignment */
+#define DMA_RxAlign_1 0x00400000
+#define DMA_RxAlign_2 0x00800000
+#define DMA_RxAlign_3 0x00c00000
+#define DMA_M66EnStat 0x00080000 /* 1:66MHz Enable State */
+#define DMA_IntMask 0x00040000 /* 1:Interrupt mask */
+#define DMA_SWIntReq 0x00020000 /* 1:Software Interrupt request */
+#define DMA_TxWakeUp 0x00010000 /* 1:Transmit Wake Up */
+#define DMA_RxBigE 0x00008000 /* 1:Receive Big Endian */
+#define DMA_TxBigE 0x00004000 /* 1:Transmit Big Endian */
+#define DMA_TestMode 0x00002000 /* 1:Test Mode */
+#define DMA_PowrMgmnt 0x00001000 /* 1:Power Management */
+#define DMA_DmBurst_Mask 0x000001fc /* DMA Burst size */
+
+/* RxFragSize bit assign ---------------------------------------------------- */
+#define RxFrag_EnPack 0x00008000 /* 1:Enable Packing */
+#define RxFrag_MinFragMask 0x00000ffc /* Minimum Fragment */
+
+/* MAC_Ctl bit assign ------------------------------------------------------- */
+#define MAC_Link10 0x00008000 /* 1:Link Status 10Mbits */
+#define MAC_EnMissRoll 0x00002000 /* 1:Enable Missed Roll */
+#define MAC_MissRoll 0x00000400 /* 1:Missed Roll */
+#define MAC_Loop10 0x00000080 /* 1:Loop 10 Mbps */
+#define MAC_Conn_Auto 0x00000000 /*00:Connection mode (Automatic) */
+#define MAC_Conn_10M 0x00000020 /*01: (10Mbps endec)*/
+#define MAC_Conn_Mll 0x00000040 /*10: (Mll clock) */
+#define MAC_MacLoop 0x00000010 /* 1:MAC Loopback */
+#define MAC_FullDup 0x00000008 /* 1:Full Duplex 0:Half Duplex */
+#define MAC_Reset 0x00000004 /* 1:Software Reset */
+#define MAC_HaltImm 0x00000002 /* 1:Halt Immediate */
+#define MAC_HaltReq 0x00000001 /* 1:Halt request */
+
+/* PROM_Ctl bit assign ------------------------------------------------------ */
+#define PROM_Busy 0x00008000 /* 1:Busy (Start Operation) */
+#define PROM_Read 0x00004000 /*10:Read operation */
+#define PROM_Write 0x00002000 /*01:Write operation */
+#define PROM_Erase 0x00006000 /*11:Erase operation */
+ /*00:Enable or Disable Writting, */
+ /* as specified in PROM_Addr. */
+#define PROM_Addr_Ena 0x00000030 /*11xxxx:PROM Write enable */
+ /*00xxxx: disable */
+
+/* CAM_Ctl bit assign ------------------------------------------------------- */
+#define CAM_CompEn 0x00000010 /* 1:CAM Compare Enable */
+#define CAM_NegCAM 0x00000008 /* 1:Reject packets CAM recognizes,*/
+ /* accept other */
+#define CAM_BroadAcc 0x00000004 /* 1:Broadcast assept */
+#define CAM_GroupAcc 0x00000002 /* 1:Multicast assept */
+#define CAM_StationAcc 0x00000001 /* 1:unicast accept */
+
+/* CAM_Ena bit assign ------------------------------------------------------- */
+#define CAM_ENTRY_MAX 21 /* CAM Data entry max count */
+#define CAM_Ena_Mask ((1<<CAM_ENTRY_MAX)-1) /* CAM Enable bits (Max 21bits) */
+#define CAM_Ena_Bit(index) (1 << (index))
+#define CAM_ENTRY_DESTINATION 0
+#define CAM_ENTRY_SOURCE 1
+#define CAM_ENTRY_MACCTL 20
+
+/* Tx_Ctl bit assign -------------------------------------------------------- */
+#define Tx_En 0x00000001 /* 1:Transmit enable */
+#define Tx_TxHalt 0x00000002 /* 1:Transmit Halt Request */
+#define Tx_NoPad 0x00000004 /* 1:Suppress Padding */
+#define Tx_NoCRC 0x00000008 /* 1:Suppress Padding */
+#define Tx_FBack 0x00000010 /* 1:Fast Back-off */
+#define Tx_EnUnder 0x00000100 /* 1:Enable Underrun */
+#define Tx_EnExDefer 0x00000200 /* 1:Enable Excessive Deferral */
+#define Tx_EnLCarr 0x00000400 /* 1:Enable Lost Carrier */
+#define Tx_EnExColl 0x00000800 /* 1:Enable Excessive Collision */
+#define Tx_EnLateColl 0x00001000 /* 1:Enable Late Collision */
+#define Tx_EnTxPar 0x00002000 /* 1:Enable Transmit Parity */
+#define Tx_EnComp 0x00004000 /* 1:Enable Completion */
+
+/* Tx_Stat bit assign ------------------------------------------------------- */
+#define Tx_TxColl_MASK 0x0000000F /* Tx Collision Count */
+#define Tx_ExColl 0x00000010 /* Excessive Collision */
+#define Tx_TXDefer 0x00000020 /* Transmit Defered */
+#define Tx_Paused 0x00000040 /* Transmit Paused */
+#define Tx_IntTx 0x00000080 /* Interrupt on Tx */
+#define Tx_Under 0x00000100 /* Underrun */
+#define Tx_Defer 0x00000200 /* Deferral */
+#define Tx_NCarr 0x00000400 /* No Carrier */
+#define Tx_10Stat 0x00000800 /* 10Mbps Status */
+#define Tx_LateColl 0x00001000 /* Late Collision */
+#define Tx_TxPar 0x00002000 /* Tx Parity Error */
+#define Tx_Comp 0x00004000 /* Completion */
+#define Tx_Halted 0x00008000 /* Tx Halted */
+#define Tx_SQErr 0x00010000 /* Signal Quality Error(SQE) */
+
+/* Rx_Ctl bit assign -------------------------------------------------------- */
+#define Rx_EnGood 0x00004000 /* 1:Enable Good */
+#define Rx_EnRxPar 0x00002000 /* 1:Enable Receive Parity */
+#define Rx_EnLongErr 0x00000800 /* 1:Enable Long Error */
+#define Rx_EnOver 0x00000400 /* 1:Enable OverFlow */
+#define Rx_EnCRCErr 0x00000200 /* 1:Enable CRC Error */
+#define Rx_EnAlign 0x00000100 /* 1:Enable Alignment */
+#define Rx_IgnoreCRC 0x00000040 /* 1:Ignore CRC Value */
+#define Rx_StripCRC 0x00000010 /* 1:Strip CRC Value */
+#define Rx_ShortEn 0x00000008 /* 1:Short Enable */
+#define Rx_LongEn 0x00000004 /* 1:Long Enable */
+#define Rx_RxHalt 0x00000002 /* 1:Receive Halt Request */
+#define Rx_RxEn 0x00000001 /* 1:Receive Intrrupt Enable */
+
+/* Rx_Stat bit assign ------------------------------------------------------- */
+#define Rx_Halted 0x00008000 /* Rx Halted */
+#define Rx_Good 0x00004000 /* Rx Good */
+#define Rx_RxPar 0x00002000 /* Rx Parity Error */
+#define Rx_TypePkt 0x00001000 /* Rx Type Packet */
+#define Rx_LongErr 0x00000800 /* Rx Long Error */
+#define Rx_Over 0x00000400 /* Rx Overflow */
+#define Rx_CRCErr 0x00000200 /* Rx CRC Error */
+#define Rx_Align 0x00000100 /* Rx Alignment Error */
+#define Rx_10Stat 0x00000080 /* Rx 10Mbps Status */
+#define Rx_IntRx 0x00000040 /* Rx Interrupt */
+#define Rx_CtlRecd 0x00000020 /* Rx Control Receive */
+#define Rx_InLenErr 0x00000010 /* Rx In Range Frame Length Error */
+
+#define Rx_Stat_Mask 0x0000FFF0 /* Rx All Status Mask */
+
+/* Int_En bit assign -------------------------------------------------------- */
+#define Int_NRAbtEn 0x00000800 /* 1:Non-recoverable Abort Enable */
+#define Int_TxCtlCmpEn 0x00000400 /* 1:Transmit Ctl Complete Enable */
+#define Int_DmParErrEn 0x00000200 /* 1:DMA Parity Error Enable */
+#define Int_DParDEn 0x00000100 /* 1:Data Parity Error Enable */
+#define Int_EarNotEn 0x00000080 /* 1:Early Notify Enable */
+#define Int_DParErrEn 0x00000040 /* 1:Detected Parity Error Enable */
+#define Int_SSysErrEn 0x00000020 /* 1:Signalled System Error Enable */
+#define Int_RMasAbtEn 0x00000010 /* 1:Received Master Abort Enable */
+#define Int_RTargAbtEn 0x00000008 /* 1:Received Target Abort Enable */
+#define Int_STargAbtEn 0x00000004 /* 1:Signalled Target Abort Enable */
+#define Int_BLExEn 0x00000002 /* 1:Buffer List Exhausted Enable */
+#define Int_FDAExEn 0x00000001 /* 1:Free Descriptor Area */
+ /* Exhausted Enable */
+
+/* Int_Src bit assign ------------------------------------------------------- */
+#define Int_NRabt 0x00004000 /* 1:Non Recoverable error */
+#define Int_DmParErrStat 0x00002000 /* 1:DMA Parity Error & Clear */
+#define Int_BLEx 0x00001000 /* 1:Buffer List Empty & Clear */
+#define Int_FDAEx 0x00000800 /* 1:FDA Empty & Clear */
+#define Int_IntNRAbt 0x00000400 /* 1:Non Recoverable Abort */
+#define Int_IntCmp 0x00000200 /* 1:MAC control packet complete */
+#define Int_IntExBD 0x00000100 /* 1:Interrupt Extra BD & Clear */
+#define Int_DmParErr 0x00000080 /* 1:DMA Parity Error & Clear */
+#define Int_IntEarNot 0x00000040 /* 1:Receive Data write & Clear */
+#define Int_SWInt 0x00000020 /* 1:Software request & Clear */
+#define Int_IntBLEx 0x00000010 /* 1:Buffer List Empty & Clear */
+#define Int_IntFDAEx 0x00000008 /* 1:FDA Empty & Clear */
+#define Int_IntPCI 0x00000004 /* 1:PCI controller & Clear */
+#define Int_IntMacRx 0x00000002 /* 1:Rx controller & Clear */
+#define Int_IntMacTx 0x00000001 /* 1:Tx controller & Clear */
+
+/* MD_CA bit assign --------------------------------------------------------- */
+#define MD_CA_PreSup 0x00001000 /* 1:Preamble Suppress */
+#define MD_CA_Busy 0x00000800 /* 1:Busy (Start Operation) */
+#define MD_CA_Wr 0x00000400 /* 1:Write 0:Read */
+
+
+/*
+ * Descriptors
+ */
+
+/* Frame descripter */
+struct FDesc {
+ volatile __u32 FDNext;
+ volatile __u32 FDSystem;
+ volatile __u32 FDStat;
+ volatile __u32 FDCtl;
+};
+
+/* Buffer descripter */
+struct BDesc {
+ volatile __u32 BuffData;
+ volatile __u32 BDCtl;
+};
+
+#define FD_ALIGN 16
+
+/* Frame Descripter bit assign ---------------------------------------------- */
+#define FD_FDLength_MASK 0x0000FFFF /* Length MASK */
+#define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */
+#define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */
+#define FD_FrmOpt_BigEndian 0x40000000 /* Tx/Rx */
+#define FD_FrmOpt_IntTx 0x20000000 /* Tx only */
+#define FD_FrmOpt_NoCRC 0x10000000 /* Tx only */
+#define FD_FrmOpt_NoPadding 0x08000000 /* Tx only */
+#define FD_FrmOpt_Packing 0x04000000 /* Rx only */
+#define FD_CownsFD 0x80000000 /* FD Controller owner bit */
+#define FD_Next_EOL 0x00000001 /* FD EOL indicator */
+#define FD_BDCnt_SHIFT 16
+
+/* Buffer Descripter bit assign --------------------------------------------- */
+#define BD_BuffLength_MASK 0x0000FFFF /* Receive Data Size */
+#define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */
+#define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */
+#define BD_CownsBD 0x80000000 /* BD Controller owner bit */
+#define BD_RxBDID_SHIFT 16
+#define BD_RxBDSeqN_SHIFT 24
+
+
+/* Some useful constants. */
+
+#define TX_CTL_CMD (Tx_EnTxPar | Tx_EnLateColl | \
+ Tx_EnExColl | Tx_EnLCarr | Tx_EnExDefer | Tx_EnUnder | \
+ Tx_En) /* maybe 0x7b01 */
+/* Do not use Rx_StripCRC -- it causes trouble on BLEx/FDAEx condition */
+#define RX_CTL_CMD (Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \
+ | Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn) /* maybe 0x6f01 */
+#define INT_EN_CMD (Int_NRAbtEn | \
+ Int_DmParErrEn | Int_DParDEn | Int_DParErrEn | \
+ Int_SSysErrEn | Int_RMasAbtEn | Int_RTargAbtEn | \
+ Int_STargAbtEn | \
+ Int_BLExEn | Int_FDAExEn) /* maybe 0xb7f*/
+#define DMA_CTL_CMD DMA_BURST_SIZE
+#define HAVE_DMA_RXALIGN(lp) likely((lp)->chiptype != TC35815CF)
+
+/* Tuning parameters */
+#define DMA_BURST_SIZE 32
+#define TX_THRESHOLD 1024
+/* used threshold with packet max byte for low pci transfer ability.*/
+#define TX_THRESHOLD_MAX 1536
+/* setting threshold max value when overrun error occurred this count. */
+#define TX_THRESHOLD_KEEP_LIMIT 10
+
+/* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */
+#define FD_PAGE_NUM 4
+#define RX_BUF_NUM 128 /* < 256 */
+#define RX_FD_NUM 256 /* >= 32 */
+#define TX_FD_NUM 128
+#if RX_CTL_CMD & Rx_LongEn
+#define RX_BUF_SIZE PAGE_SIZE
+#elif RX_CTL_CMD & Rx_StripCRC
+#define RX_BUF_SIZE \
+ L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + NET_IP_ALIGN)
+#else
+#define RX_BUF_SIZE \
+ L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN + NET_IP_ALIGN)
+#endif
+#define RX_FD_RESERVE (2 / 2) /* max 2 BD per RxFD */
+#define NAPI_WEIGHT 16
+
+struct TxFD {
+ struct FDesc fd;
+ struct BDesc bd;
+ struct BDesc unused;
+};
+
+struct RxFD {
+ struct FDesc fd;
+ struct BDesc bd[0]; /* variable length */
+};
+
+struct FrFD {
+ struct FDesc fd;
+ struct BDesc bd[RX_BUF_NUM];
+};
+
+
+#define tc_readl(addr) ioread32(addr)
+#define tc_writel(d, addr) iowrite32(d, addr)
+
+#define TC35815_TX_TIMEOUT msecs_to_jiffies(400)
+
+/* Information that need to be kept for each controller. */
+struct tc35815_local {
+ struct pci_dev *pci_dev;
+
+ struct net_device *dev;
+ struct napi_struct napi;
+
+ /* statistics */
+ struct {
+ int max_tx_qlen;
+ int tx_ints;
+ int rx_ints;
+ int tx_underrun;
+ } lstats;
+
+ /* Tx control lock. This protects the transmit buffer ring
+ * state along with the "tx full" state of the driver. This
+ * means all netif_queue flow control actions are protected
+ * by this lock as well.
+ */
+ spinlock_t lock;
+ spinlock_t rx_lock;
+
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ int duplex;
+ int speed;
+ int link;
+ struct work_struct restart_work;
+
+ /*
+ * Transmitting: Batch Mode.
+ * 1 BD in 1 TxFD.
+ * Receiving: Non-Packing Mode.
+ * 1 circular FD for Free Buffer List.
+ * RX_BUF_NUM BD in Free Buffer FD.
+ * One Free Buffer BD has ETH_FRAME_LEN data buffer.
+ */
+ void *fd_buf; /* for TxFD, RxFD, FrFD */
+ dma_addr_t fd_buf_dma;
+ struct TxFD *tfd_base;
+ unsigned int tfd_start;
+ unsigned int tfd_end;
+ struct RxFD *rfd_base;
+ struct RxFD *rfd_limit;
+ struct RxFD *rfd_cur;
+ struct FrFD *fbl_ptr;
+ unsigned int fbl_count;
+ struct {
+ struct sk_buff *skb;
+ dma_addr_t skb_dma;
+ } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM];
+ u32 msg_enable;
+ enum tc35815_chiptype chiptype;
+};
+
+static inline dma_addr_t fd_virt_to_bus(struct tc35815_local *lp, void *virt)
+{
+ return lp->fd_buf_dma + ((u8 *)virt - (u8 *)lp->fd_buf);
+}
+#ifdef DEBUG
+static inline void *fd_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
+{
+ return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma));
+}
+#endif
+static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
+ struct pci_dev *hwdev,
+ dma_addr_t *dma_handle)
+{
+ struct sk_buff *skb;
+ skb = dev_alloc_skb(RX_BUF_SIZE);
+ if (!skb)
+ return NULL;
+ *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(hwdev, *dma_handle)) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+ skb_reserve(skb, 2); /* make IP header 4byte aligned */
+ return skb;
+}
+
+static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_t dma_handle)
+{
+ pci_unmap_single(hwdev, dma_handle, RX_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(skb);
+}
+
+/* Index to functions, as function prototypes. */
+
+static int tc35815_open(struct net_device *dev);
+static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev);
+static irqreturn_t tc35815_interrupt(int irq, void *dev_id);
+static int tc35815_rx(struct net_device *dev, int limit);
+static int tc35815_poll(struct napi_struct *napi, int budget);
+static void tc35815_txdone(struct net_device *dev);
+static int tc35815_close(struct net_device *dev);
+static struct net_device_stats *tc35815_get_stats(struct net_device *dev);
+static void tc35815_set_multicast_list(struct net_device *dev);
+static void tc35815_tx_timeout(struct net_device *dev);
+static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void tc35815_poll_controller(struct net_device *dev);
+#endif
+static const struct ethtool_ops tc35815_ethtool_ops;
+
+/* Example routines you must write ;->. */
+static void tc35815_chip_reset(struct net_device *dev);
+static void tc35815_chip_init(struct net_device *dev);
+
+#ifdef DEBUG
+static void panic_queues(struct net_device *dev);
+#endif
+
+static void tc35815_restart_work(struct work_struct *work);
+
+static int tc_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct net_device *dev = bus->priv;
+ struct tc35815_regs __iomem *tr =
+ (struct tc35815_regs __iomem *)dev->base_addr;
+ unsigned long timeout = jiffies + HZ;
+
+ tc_writel(MD_CA_Busy | (mii_id << 5) | (regnum & 0x1f), &tr->MD_CA);
+ udelay(12); /* it takes 32 x 400ns at least */
+ while (tc_readl(&tr->MD_CA) & MD_CA_Busy) {
+ if (time_after(jiffies, timeout))
+ return -EIO;
+ cpu_relax();
+ }
+ return tc_readl(&tr->MD_Data) & 0xffff;
+}
+
+static int tc_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 val)
+{
+ struct net_device *dev = bus->priv;
+ struct tc35815_regs __iomem *tr =
+ (struct tc35815_regs __iomem *)dev->base_addr;
+ unsigned long timeout = jiffies + HZ;
+
+ tc_writel(val, &tr->MD_Data);
+ tc_writel(MD_CA_Busy | MD_CA_Wr | (mii_id << 5) | (regnum & 0x1f),
+ &tr->MD_CA);
+ udelay(12); /* it takes 32 x 400ns at least */
+ while (tc_readl(&tr->MD_CA) & MD_CA_Busy) {
+ if (time_after(jiffies, timeout))
+ return -EIO;
+ cpu_relax();
+ }
+ return 0;
+}
+
+static void tc_handle_link_change(struct net_device *dev)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+ struct phy_device *phydev = lp->phy_dev;
+ unsigned long flags;
+ int status_change = 0;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ if (phydev->link &&
+ (lp->speed != phydev->speed || lp->duplex != phydev->duplex)) {
+ struct tc35815_regs __iomem *tr =
+ (struct tc35815_regs __iomem *)dev->base_addr;
+ u32 reg;
+
+ reg = tc_readl(&tr->MAC_Ctl);
+ reg |= MAC_HaltReq;
+ tc_writel(reg, &tr->MAC_Ctl);
+ if (phydev->duplex == DUPLEX_FULL)
+ reg |= MAC_FullDup;
+ else
+ reg &= ~MAC_FullDup;
+ tc_writel(reg, &tr->MAC_Ctl);
+ reg &= ~MAC_HaltReq;
+ tc_writel(reg, &tr->MAC_Ctl);
+
+ /*
+ * TX4939 PCFG.SPEEDn bit will be changed on
+ * NETDEV_CHANGE event.
+ */
+ /*
+ * WORKAROUND: enable LostCrS only if half duplex
+ * operation.
+ * (TX4939 does not have EnLCarr)
+ */
+ if (phydev->duplex == DUPLEX_HALF &&
+ lp->chiptype != TC35815_TX4939)
+ tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr,
+ &tr->Tx_Ctl);
+
+ lp->speed = phydev->speed;
+ lp->duplex = phydev->duplex;
+ status_change = 1;
+ }
+
+ if (phydev->link != lp->link) {
+ if (phydev->link) {
+ /* delayed promiscuous enabling */
+ if (dev->flags & IFF_PROMISC)
+ tc35815_set_multicast_list(dev);
+ } else {
+ lp->speed = 0;
+ lp->duplex = -1;
+ }
+ lp->link = phydev->link;
+
+ status_change = 1;
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ if (status_change && netif_msg_link(lp)) {
+ phy_print_status(phydev);
+ pr_debug("%s: MII BMCR %04x BMSR %04x LPA %04x\n",
+ dev->name,
+ phy_read(phydev, MII_BMCR),
+ phy_read(phydev, MII_BMSR),
+ phy_read(phydev, MII_LPA));
+ }
+}
+
+static int tc_mii_probe(struct net_device *dev)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+ struct phy_device *phydev = NULL;
+ int phy_addr;
+ u32 dropmask;
+
+ /* find the first phy */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+ if (lp->mii_bus->phy_map[phy_addr]) {
+ if (phydev) {
+ printk(KERN_ERR "%s: multiple PHYs found\n",
+ dev->name);
+ return -EINVAL;
+ }
+ phydev = lp->mii_bus->phy_map[phy_addr];
+ break;
+ }
+ }
+
+ if (!phydev) {
+ printk(KERN_ERR "%s: no PHY found\n", dev->name);
+ return -ENODEV;
+ }
+
+ /* attach the mac to the phy */
+ phydev = phy_connect(dev, dev_name(&phydev->dev),
+ &tc_handle_link_change, 0,
+ lp->chiptype == TC35815_TX4939 ?
+ PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phydev)) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(phydev);
+ }
+ printk(KERN_INFO "%s: attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, id=%x)\n",
+ dev->name, phydev->drv->name, dev_name(&phydev->dev),
+ phydev->phy_id);
+
+ /* mask with MAC supported features */
+ phydev->supported &= PHY_BASIC_FEATURES;
+ dropmask = 0;
+ if (options.speed == 10)
+ dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
+ else if (options.speed == 100)
+ dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full;
+ if (options.duplex == 1)
+ dropmask |= SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full;
+ else if (options.duplex == 2)
+ dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half;
+ phydev->supported &= ~dropmask;
+ phydev->advertising = phydev->supported;
+
+ lp->link = 0;
+ lp->speed = 0;
+ lp->duplex = -1;
+ lp->phy_dev = phydev;
+
+ return 0;
+}
+
+static int tc_mii_init(struct net_device *dev)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+ int err;
+ int i;
+
+ lp->mii_bus = mdiobus_alloc();
+ if (lp->mii_bus == NULL) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ lp->mii_bus->name = "tc35815_mii_bus";
+ lp->mii_bus->read = tc_mdio_read;
+ lp->mii_bus->write = tc_mdio_write;
+ snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x",
+ (lp->pci_dev->bus->number << 8) | lp->pci_dev->devfn);
+ lp->mii_bus->priv = dev;
+ lp->mii_bus->parent = &lp->pci_dev->dev;
+ lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!lp->mii_bus->irq) {
+ err = -ENOMEM;
+ goto err_out_free_mii_bus;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ lp->mii_bus->irq[i] = PHY_POLL;
+
+ err = mdiobus_register(lp->mii_bus);
+ if (err)
+ goto err_out_free_mdio_irq;
+ err = tc_mii_probe(dev);
+ if (err)
+ goto err_out_unregister_bus;
+ return 0;
+
+err_out_unregister_bus:
+ mdiobus_unregister(lp->mii_bus);
+err_out_free_mdio_irq:
+ kfree(lp->mii_bus->irq);
+err_out_free_mii_bus:
+ mdiobus_free(lp->mii_bus);
+err_out:
+ return err;
+}
+
+#ifdef CONFIG_CPU_TX49XX
+/*
+ * Find a platform_device providing a MAC address. The platform code
+ * should provide a "tc35815-mac" device with a MAC address in its
+ * platform_data.
+ */
+static int __devinit tc35815_mac_match(struct device *dev, void *data)
+{
+ struct platform_device *plat_dev = to_platform_device(dev);
+ struct pci_dev *pci_dev = data;
+ unsigned int id = pci_dev->irq;
+ return !strcmp(plat_dev->name, "tc35815-mac") && plat_dev->id == id;
+}
+
+static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+ struct device *pd = bus_find_device(&platform_bus_type, NULL,
+ lp->pci_dev, tc35815_mac_match);
+ if (pd) {
+ if (pd->platform_data)
+ memcpy(dev->dev_addr, pd->platform_data, ETH_ALEN);
+ put_device(pd);
+ return is_valid_ether_addr(dev->dev_addr) ? 0 : -ENODEV;
+ }
+ return -ENODEV;
+}
+#else
+static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+static int __devinit tc35815_init_dev_addr(struct net_device *dev)
+{
+ struct tc35815_regs __iomem *tr =
+ (struct tc35815_regs __iomem *)dev->base_addr;
+ int i;
+
+ while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
+ ;
+ for (i = 0; i < 6; i += 2) {
+ unsigned short data;
+ tc_writel(PROM_Busy | PROM_Read | (i / 2 + 2), &tr->PROM_Ctl);
+ while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
+ ;
+ data = tc_readl(&tr->PROM_Data);
+ dev->dev_addr[i] = data & 0xff;
+ dev->dev_addr[i+1] = data >> 8;
+ }
+ if (!is_valid_ether_addr(dev->dev_addr))
+ return tc35815_read_plat_dev_addr(dev);
+ return 0;
+}
+
+static const struct net_device_ops tc35815_netdev_ops = {
+ .ndo_open = tc35815_open,
+ .ndo_stop = tc35815_close,
+ .ndo_start_xmit = tc35815_send_packet,
+ .ndo_get_stats = tc35815_get_stats,
+ .ndo_set_rx_mode = tc35815_set_multicast_list,
+ .ndo_tx_timeout = tc35815_tx_timeout,
+ .ndo_do_ioctl = tc35815_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = tc35815_poll_controller,
+#endif
+};
+
+static int __devinit tc35815_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ void __iomem *ioaddr = NULL;
+ struct net_device *dev;
+ struct tc35815_local *lp;
+ int rc;
+
+ static int printed_version;
+ if (!printed_version++) {
+ printk(version);
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "speed:%d duplex:%d\n",
+ options.speed, options.duplex);
+ }
+
+ if (!pdev->irq) {
+ dev_warn(&pdev->dev, "no IRQ assigned.\n");
+ return -ENODEV;
+ }
+
+ /* dev zeroed in alloc_etherdev */
+ dev = alloc_etherdev(sizeof(*lp));
+ if (dev == NULL) {
+ dev_err(&pdev->dev, "unable to alloc new ethernet\n");
+ return -ENOMEM;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ lp = netdev_priv(dev);
+ lp->dev = dev;
+
+ /* enable device (incl. PCI PM wakeup), and bus-mastering */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ goto err_out;
+ rc = pcim_iomap_regions(pdev, 1 << 1, MODNAME);
+ if (rc)
+ goto err_out;
+ pci_set_master(pdev);
+ ioaddr = pcim_iomap_table(pdev)[1];
+
+ /* Initialize the device structure. */
+ dev->netdev_ops = &tc35815_netdev_ops;
+ dev->ethtool_ops = &tc35815_ethtool_ops;
+ dev->watchdog_timeo = TC35815_TX_TIMEOUT;
+ netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT);
+
+ dev->irq = pdev->irq;
+ dev->base_addr = (unsigned long)ioaddr;
+
+ INIT_WORK(&lp->restart_work, tc35815_restart_work);
+ spin_lock_init(&lp->lock);
+ spin_lock_init(&lp->rx_lock);
+ lp->pci_dev = pdev;
+ lp->chiptype = ent->driver_data;
+
+ lp->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK;
+ pci_set_drvdata(pdev, dev);
+
+ /* Soft reset the chip. */
+ tc35815_chip_reset(dev);
+
+ /* Retrieve the ethernet address. */
+ if (tc35815_init_dev_addr(dev)) {
+ dev_warn(&pdev->dev, "not valid ether addr\n");
+ random_ether_addr(dev->dev_addr);
+ }
+
+ rc = register_netdev(dev);
+ if (rc)
+ goto err_out;
+
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+ printk(KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n",
+ dev->name,
+ chip_info[ent->driver_data].name,
+ dev->base_addr,
+ dev->dev_addr,
+ dev->irq);
+
+ rc = tc_mii_init(dev);
+ if (rc)
+ goto err_out_unregister;
+
+ return 0;
+
+err_out_unregister:
+ unregister_netdev(dev);
+err_out:
+ free_netdev(dev);
+ return rc;
+}
+
+
+static void __devexit tc35815_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tc35815_local *lp = netdev_priv(dev);
+
+ phy_disconnect(lp->phy_dev);
+ mdiobus_unregister(lp->mii_bus);
+ kfree(lp->mii_bus->irq);
+ mdiobus_free(lp->mii_bus);
+ unregister_netdev(dev);
+ free_netdev(dev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static int
+tc35815_init_queues(struct net_device *dev)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+ int i;
+ unsigned long fd_addr;
+
+ if (!lp->fd_buf) {
+ BUG_ON(sizeof(struct FDesc) +
+ sizeof(struct BDesc) * RX_BUF_NUM +
+ sizeof(struct FDesc) * RX_FD_NUM +
+ sizeof(struct TxFD) * TX_FD_NUM >
+ PAGE_SIZE * FD_PAGE_NUM);
+
+ lp->fd_buf = pci_alloc_consistent(lp->pci_dev,
+ PAGE_SIZE * FD_PAGE_NUM,
+ &lp->fd_buf_dma);
+ if (!lp->fd_buf)
+ return -ENOMEM;
+ for (i = 0; i < RX_BUF_NUM; i++) {
+ lp->rx_skbs[i].skb =
+ alloc_rxbuf_skb(dev, lp->pci_dev,
+ &lp->rx_skbs[i].skb_dma);
+ if (!lp->rx_skbs[i].skb) {
+ while (--i >= 0) {
+ free_rxbuf_skb(lp->pci_dev,
+ lp->rx_skbs[i].skb,
+ lp->rx_skbs[i].skb_dma);
+ lp->rx_skbs[i].skb = NULL;
+ }
+ pci_free_consistent(lp->pci_dev,
+ PAGE_SIZE * FD_PAGE_NUM,
+ lp->fd_buf,
+ lp->fd_buf_dma);
+ lp->fd_buf = NULL;
+ return -ENOMEM;
+ }
+ }
+ printk(KERN_DEBUG "%s: FD buf %p DataBuf",
+ dev->name, lp->fd_buf);
+ printk("\n");
+ } else {
+ for (i = 0; i < FD_PAGE_NUM; i++)
+ clear_page((void *)((unsigned long)lp->fd_buf +
+ i * PAGE_SIZE));
+ }
+ fd_addr = (unsigned long)lp->fd_buf;
+
+ /* Free Descriptors (for Receive) */
+ lp->rfd_base = (struct RxFD *)fd_addr;
+ fd_addr += sizeof(struct RxFD) * RX_FD_NUM;
+ for (i = 0; i < RX_FD_NUM; i++)
+ lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD);
+ lp->rfd_cur = lp->rfd_base;
+ lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1);
+
+ /* Transmit Descriptors */
+ lp->tfd_base = (struct TxFD *)fd_addr;
+ fd_addr += sizeof(struct TxFD) * TX_FD_NUM;
+ for (i = 0; i < TX_FD_NUM; i++) {
+ lp->tfd_base[i].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[i+1]));
+ lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
+ lp->tfd_base[i].fd.FDCtl = cpu_to_le32(0);
+ }
+ lp->tfd_base[TX_FD_NUM-1].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[0]));
+ lp->tfd_start = 0;
+ lp->tfd_end = 0;
+
+ /* Buffer List (for Receive) */
+ lp->fbl_ptr = (struct FrFD *)fd_addr;
+ lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr));
+ lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD);
+ /*
+ * move all allocated skbs to head of rx_skbs[] array.
+ * fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in
+ * tc35815_rx() had failed.
+ */
+ lp->fbl_count = 0;
+ for (i = 0; i < RX_BUF_NUM; i++) {
+ if (lp->rx_skbs[i].skb) {
+ if (i != lp->fbl_count) {
+ lp->rx_skbs[lp->fbl_count].skb =
+ lp->rx_skbs[i].skb;
+ lp->rx_skbs[lp->fbl_count].skb_dma =
+ lp->rx_skbs[i].skb_dma;
+ }
+ lp->fbl_count++;
+ }
+ }
+ for (i = 0; i < RX_BUF_NUM; i++) {
+ if (i >= lp->fbl_count) {
+ lp->fbl_ptr->bd[i].BuffData = 0;
+ lp->fbl_ptr->bd[i].BDCtl = 0;
+ continue;
+ }
+ lp->fbl_ptr->bd[i].BuffData =
+ cpu_to_le32(lp->rx_skbs[i].skb_dma);
+ /* BDID is index of FrFD.bd[] */
+ lp->fbl_ptr->bd[i].BDCtl =
+ cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) |
+ RX_BUF_SIZE);
+ }
+
+ printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n",
+ dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr);
+ return 0;
+}
+
+static void
+tc35815_clear_queues(struct net_device *dev)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < TX_FD_NUM; i++) {
+ u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem);
+ struct sk_buff *skb =
+ fdsystem != 0xffffffff ?
+ lp->tx_skbs[fdsystem].skb : NULL;
+#ifdef DEBUG
+ if (lp->tx_skbs[i].skb != skb) {
+ printk("%s: tx_skbs mismatch(%d).\n", dev->name, i);
+ panic_queues(dev);
+ }
+#else
+ BUG_ON(lp->tx_skbs[i].skb != skb);
+#endif
+ if (skb) {
+ pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
+ lp->tx_skbs[i].skb = NULL;
+ lp->tx_skbs[i].skb_dma = 0;
+ dev_kfree_skb_any(skb);
+ }
+ lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
+ }
+
+ tc35815_init_queues(dev);
+}
+
+static void
+tc35815_free_queues(struct net_device *dev)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+ int i;
+
+ if (lp->tfd_base) {
+ for (i = 0; i < TX_FD_NUM; i++) {
+ u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem);
+ struct sk_buff *skb =
+ fdsystem != 0xffffffff ?
+ lp->tx_skbs[fdsystem].skb : NULL;
+#ifdef DEBUG
+ if (lp->tx_skbs[i].skb != skb) {
+ printk("%s: tx_skbs mismatch(%d).\n", dev->name, i);
+ panic_queues(dev);
+ }
+#else
+ BUG_ON(lp->tx_skbs[i].skb != skb);
+#endif
+ if (skb) {
+ dev_kfree_skb(skb);
+ pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
+ lp->tx_skbs[i].skb = NULL;
+ lp->tx_skbs[i].skb_dma = 0;
+ }
+ lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
+ }
+ }
+
+ lp->rfd_base = NULL;
+ lp->rfd_limit = NULL;
+ lp->rfd_cur = NULL;
+ lp->fbl_ptr = NULL;
+
+ for (i = 0; i < RX_BUF_NUM; i++) {
+ if (lp->rx_skbs[i].skb) {
+ free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb,
+ lp->rx_skbs[i].skb_dma);
+ lp->rx_skbs[i].skb = NULL;
+ }
+ }
+ if (lp->fd_buf) {
+ pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM,
+ lp->fd_buf, lp->fd_buf_dma);
+ lp->fd_buf = NULL;
+ }
+}
+
+static void
+dump_txfd(struct TxFD *fd)
+{
+ printk("TxFD(%p): %08x %08x %08x %08x\n", fd,
+ le32_to_cpu(fd->fd.FDNext),
+ le32_to_cpu(fd->fd.FDSystem),
+ le32_to_cpu(fd->fd.FDStat),
+ le32_to_cpu(fd->fd.FDCtl));
+ printk("BD: ");
+ printk(" %08x %08x",
+ le32_to_cpu(fd->bd.BuffData),
+ le32_to_cpu(fd->bd.BDCtl));
+ printk("\n");
+}
+
+static int
+dump_rxfd(struct RxFD *fd)
+{
+ int i, bd_count = (le32_to_cpu(fd->fd.FDCtl) & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT;
+ if (bd_count > 8)
+ bd_count = 8;
+ printk("RxFD(%p): %08x %08x %08x %08x\n", fd,
+ le32_to_cpu(fd->fd.FDNext),
+ le32_to_cpu(fd->fd.FDSystem),
+ le32_to_cpu(fd->fd.FDStat),
+ le32_to_cpu(fd->fd.FDCtl));
+ if (le32_to_cpu(fd->fd.FDCtl) & FD_CownsFD)
+ return 0;
+ printk("BD: ");
+ for (i = 0; i < bd_count; i++)
+ printk(" %08x %08x",
+ le32_to_cpu(fd->bd[i].BuffData),
+ le32_to_cpu(fd->bd[i].BDCtl));
+ printk("\n");
+ return bd_count;
+}
+
+#ifdef DEBUG
+static void
+dump_frfd(struct FrFD *fd)
+{
+ int i;
+ printk("FrFD(%p): %08x %08x %08x %08x\n", fd,
+ le32_to_cpu(fd->fd.FDNext),
+ le32_to_cpu(fd->fd.FDSystem),
+ le32_to_cpu(fd->fd.FDStat),
+ le32_to_cpu(fd->fd.FDCtl));
+ printk("BD: ");
+ for (i = 0; i < RX_BUF_NUM; i++)
+ printk(" %08x %08x",
+ le32_to_cpu(fd->bd[i].BuffData),
+ le32_to_cpu(fd->bd[i].BDCtl));
+ printk("\n");
+}
+
+static void
+panic_queues(struct net_device *dev)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+ int i;
+
+ printk("TxFD base %p, start %u, end %u\n",
+ lp->tfd_base, lp->tfd_start, lp->tfd_end);
+ printk("RxFD base %p limit %p cur %p\n",
+ lp->rfd_base, lp->rfd_limit, lp->rfd_cur);
+ printk("FrFD %p\n", lp->fbl_ptr);
+ for (i = 0; i < TX_FD_NUM; i++)
+ dump_txfd(&lp->tfd_base[i]);
+ for (i = 0; i < RX_FD_NUM; i++) {
+ int bd_count = dump_rxfd(&lp->rfd_base[i]);
+ i += (bd_count + 1) / 2; /* skip BDs */
+ }
+ dump_frfd(lp->fbl_ptr);
+ panic("%s: Illegal queue state.", dev->name);
+}
+#endif
+
+static void print_eth(const u8 *add)
+{
+ printk(KERN_DEBUG "print_eth(%p)\n", add);
+ printk(KERN_DEBUG " %pM => %pM : %02x%02x\n",
+ add + 6, add, add[12], add[13]);
+}
+
+static int tc35815_tx_full(struct net_device *dev)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+ return (lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end;
+}
+
+static void tc35815_restart(struct net_device *dev)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+
+ if (lp->phy_dev) {
+ int timeout;
+
+ phy_write(lp->phy_dev, MII_BMCR, BMCR_RESET);
+ timeout = 100;
+ while (--timeout) {
+ if (!(phy_read(lp->phy_dev, MII_BMCR) & BMCR_RESET))
+ break;
+ udelay(1);
+ }
+ if (!timeout)
+ printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name);
+ }
+
+ spin_lock_bh(&lp->rx_lock);
+ spin_lock_irq(&lp->lock);
+ tc35815_chip_reset(dev);
+ tc35815_clear_queues(dev);
+ tc35815_chip_init(dev);
+ /* Reconfigure CAM again since tc35815_chip_init() initialize it. */
+ tc35815_set_multicast_list(dev);
+ spin_unlock_irq(&lp->lock);
+ spin_unlock_bh(&lp->rx_lock);
+
+ netif_wake_queue(dev);
+}
+
+static void tc35815_restart_work(struct work_struct *work)
+{
+ struct tc35815_local *lp =
+ container_of(work, struct tc35815_local, restart_work);
+ struct net_device *dev = lp->dev;
+
+ tc35815_restart(dev);
+}
+
+static void tc35815_schedule_restart(struct net_device *dev)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+ struct tc35815_regs __iomem *tr =
+ (struct tc35815_regs __iomem *)dev->base_addr;
+ unsigned long flags;
+
+ /* disable interrupts */
+ spin_lock_irqsave(&lp->lock, flags);
+ tc_writel(0, &tr->Int_En);
+ tc_writel(tc_readl(&tr->DMA_Ctl) | DMA_IntMask, &tr->DMA_Ctl);
+ schedule_work(&lp->restart_work);
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+static void tc35815_tx_timeout(struct net_device *dev)
+{
+ struct tc35815_regs __iomem *tr =
+ (struct tc35815_regs __iomem *)dev->base_addr;
+
+ printk(KERN_WARNING "%s: transmit timed out, status %#x\n",
+ dev->name, tc_readl(&tr->Tx_Stat));
+
+ /* Try to restart the adaptor. */
+ tc35815_schedule_restart(dev);
+ dev->stats.tx_errors++;
+}
+
+/*
+ * Open/initialize the controller. This is called (in the current kernel)
+ * sometime after booting when the 'ifconfig' program is run.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is non-reboot way to recover if something goes wrong.
+ */
+static int
+tc35815_open(struct net_device *dev)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+
+ /*
+ * This is used if the interrupt line can turned off (shared).
+ * See 3c503.c for an example of selecting the IRQ at config-time.
+ */
+ if (request_irq(dev->irq, tc35815_interrupt, IRQF_SHARED,
+ dev->name, dev))
+ return -EAGAIN;
+
+ tc35815_chip_reset(dev);
+
+ if (tc35815_init_queues(dev) != 0) {
+ free_irq(dev->irq, dev);
+ return -EAGAIN;
+ }
+
+ napi_enable(&lp->napi);
+
+ /* Reset the hardware here. Don't forget to set the station address. */
+ spin_lock_irq(&lp->lock);
+ tc35815_chip_init(dev);
+ spin_unlock_irq(&lp->lock);
+
+ netif_carrier_off(dev);
+ /* schedule a link state check */
+ phy_start(lp->phy_dev);
+
+ /* We are now ready to accept transmit requeusts from
+ * the queueing layer of the networking.
+ */
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/* This will only be invoked if your driver is _not_ in XOFF state.
+ * What this means is that you need not check it, and that this
+ * invariant will hold if you make sure that the netif_*_queue()
+ * calls are done at the proper times.
+ */
+static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+ struct TxFD *txfd;
+ unsigned long flags;
+
+ /* If some error occurs while trying to transmit this
+ * packet, you should return '1' from this function.
+ * In such a case you _may not_ do anything to the
+ * SKB, it is still owned by the network queueing
+ * layer when an error is returned. This means you
+ * may not modify any SKB fields, you may not free
+ * the SKB, etc.
+ */
+
+ /* This is the most common case for modern hardware.
+ * The spinlock protects this code from the TX complete
+ * hardware interrupt handler. Queue flow control is
+ * thus managed under this lock as well.
+ */
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /* failsafe... (handle txdone now if half of FDs are used) */
+ if ((lp->tfd_start + TX_FD_NUM - lp->tfd_end) % TX_FD_NUM >
+ TX_FD_NUM / 2)
+ tc35815_txdone(dev);
+
+ if (netif_msg_pktdata(lp))
+ print_eth(skb->data);
+#ifdef DEBUG
+ if (lp->tx_skbs[lp->tfd_start].skb) {
+ printk("%s: tx_skbs conflict.\n", dev->name);
+ panic_queues(dev);
+ }
+#else
+ BUG_ON(lp->tx_skbs[lp->tfd_start].skb);
+#endif
+ lp->tx_skbs[lp->tfd_start].skb = skb;
+ lp->tx_skbs[lp->tfd_start].skb_dma = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
+
+ /*add to ring */
+ txfd = &lp->tfd_base[lp->tfd_start];
+ txfd->bd.BuffData = cpu_to_le32(lp->tx_skbs[lp->tfd_start].skb_dma);
+ txfd->bd.BDCtl = cpu_to_le32(skb->len);
+ txfd->fd.FDSystem = cpu_to_le32(lp->tfd_start);
+ txfd->fd.FDCtl = cpu_to_le32(FD_CownsFD | (1 << FD_BDCnt_SHIFT));
+
+ if (lp->tfd_start == lp->tfd_end) {
+ struct tc35815_regs __iomem *tr =
+ (struct tc35815_regs __iomem *)dev->base_addr;
+ /* Start DMA Transmitter. */
+ txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
+ txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
+ if (netif_msg_tx_queued(lp)) {
+ printk("%s: starting TxFD.\n", dev->name);
+ dump_txfd(txfd);
+ }
+ tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr);
+ } else {
+ txfd->fd.FDNext &= cpu_to_le32(~FD_Next_EOL);
+ if (netif_msg_tx_queued(lp)) {
+ printk("%s: queueing TxFD.\n", dev->name);
+ dump_txfd(txfd);
+ }
+ }
+ lp->tfd_start = (lp->tfd_start + 1) % TX_FD_NUM;
+
+ /* If we just used up the very last entry in the
+ * TX ring on this device, tell the queueing
+ * layer to send no more.
+ */
+ if (tc35815_tx_full(dev)) {
+ if (netif_msg_tx_queued(lp))
+ printk(KERN_WARNING "%s: TxFD Exhausted.\n", dev->name);
+ netif_stop_queue(dev);
+ }
+
+ /* When the TX completion hw interrupt arrives, this
+ * is when the transmit statistics are updated.
+ */
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return NETDEV_TX_OK;
+}
+
+#define FATAL_ERROR_INT \
+ (Int_IntPCI | Int_DmParErr | Int_IntNRAbt)
+static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status)
+{
+ static int count;
+ printk(KERN_WARNING "%s: Fatal Error Intterrupt (%#x):",
+ dev->name, status);
+ if (status & Int_IntPCI)
+ printk(" IntPCI");
+ if (status & Int_DmParErr)
+ printk(" DmParErr");
+ if (status & Int_IntNRAbt)
+ printk(" IntNRAbt");
+ printk("\n");
+ if (count++ > 100)
+ panic("%s: Too many fatal errors.", dev->name);
+ printk(KERN_WARNING "%s: Resetting ...\n", dev->name);
+ /* Try to restart the adaptor. */
+ tc35815_schedule_restart(dev);
+}
+
+static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+ int ret = -1;
+
+ /* Fatal errors... */
+ if (status & FATAL_ERROR_INT) {
+ tc35815_fatal_error_interrupt(dev, status);
+ return 0;
+ }
+ /* recoverable errors */
+ if (status & Int_IntFDAEx) {
+ if (netif_msg_rx_err(lp))
+ dev_warn(&dev->dev,
+ "Free Descriptor Area Exhausted (%#x).\n",
+ status);
+ dev->stats.rx_dropped++;
+ ret = 0;
+ }
+ if (status & Int_IntBLEx) {
+ if (netif_msg_rx_err(lp))
+ dev_warn(&dev->dev,
+ "Buffer List Exhausted (%#x).\n",
+ status);
+ dev->stats.rx_dropped++;
+ ret = 0;
+ }
+ if (status & Int_IntExBD) {
+ if (netif_msg_rx_err(lp))
+ dev_warn(&dev->dev,
+ "Excessive Buffer Descriptiors (%#x).\n",
+ status);
+ dev->stats.rx_length_errors++;
+ ret = 0;
+ }
+
+ /* normal notification */
+ if (status & Int_IntMacRx) {
+ /* Got a packet(s). */
+ ret = tc35815_rx(dev, limit);
+ lp->lstats.rx_ints++;
+ }
+ if (status & Int_IntMacTx) {
+ /* Transmit complete. */
+ lp->lstats.tx_ints++;
+ spin_lock_irq(&lp->lock);
+ tc35815_txdone(dev);
+ spin_unlock_irq(&lp->lock);
+ if (ret < 0)
+ ret = 0;
+ }
+ return ret;
+}
+
+/*
+ * The typical workload of the driver:
+ * Handle the network interface interrupts.
+ */
+static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct tc35815_local *lp = netdev_priv(dev);
+ struct tc35815_regs __iomem *tr =
+ (struct tc35815_regs __iomem *)dev->base_addr;
+ u32 dmactl = tc_readl(&tr->DMA_Ctl);
+
+ if (!(dmactl & DMA_IntMask)) {
+ /* disable interrupts */
+ tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl);
+ if (napi_schedule_prep(&lp->napi))
+ __napi_schedule(&lp->napi);
+ else {
+ printk(KERN_ERR "%s: interrupt taken in poll\n",
+ dev->name);
+ BUG();
+ }
+ (void)tc_readl(&tr->Int_Src); /* flush */
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void tc35815_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ tc35815_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static int
+tc35815_rx(struct net_device *dev, int limit)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+ unsigned int fdctl;
+ int i;
+ int received = 0;
+
+ while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) {
+ int status = le32_to_cpu(lp->rfd_cur->fd.FDStat);
+ int pkt_len = fdctl & FD_FDLength_MASK;
+ int bd_count = (fdctl & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT;
+#ifdef DEBUG
+ struct RxFD *next_rfd;
+#endif
+#if (RX_CTL_CMD & Rx_StripCRC) == 0
+ pkt_len -= ETH_FCS_LEN;
+#endif
+
+ if (netif_msg_rx_status(lp))
+ dump_rxfd(lp->rfd_cur);
+ if (status & Rx_Good) {
+ struct sk_buff *skb;
+ unsigned char *data;
+ int cur_bd;
+
+ if (--limit < 0)
+ break;
+ BUG_ON(bd_count > 1);
+ cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl)
+ & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;
+#ifdef DEBUG
+ if (cur_bd >= RX_BUF_NUM) {
+ printk("%s: invalid BDID.\n", dev->name);
+ panic_queues(dev);
+ }
+ BUG_ON(lp->rx_skbs[cur_bd].skb_dma !=
+ (le32_to_cpu(lp->rfd_cur->bd[0].BuffData) & ~3));
+ if (!lp->rx_skbs[cur_bd].skb) {
+ printk("%s: NULL skb.\n", dev->name);
+ panic_queues(dev);
+ }
+#else
+ BUG_ON(cur_bd >= RX_BUF_NUM);
+#endif
+ skb = lp->rx_skbs[cur_bd].skb;
+ prefetch(skb->data);
+ lp->rx_skbs[cur_bd].skb = NULL;
+ pci_unmap_single(lp->pci_dev,
+ lp->rx_skbs[cur_bd].skb_dma,
+ RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN)
+ memmove(skb->data, skb->data - NET_IP_ALIGN,
+ pkt_len);
+ data = skb_put(skb, pkt_len);
+ if (netif_msg_pktdata(lp))
+ print_eth(data);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ received++;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ } else {
+ dev->stats.rx_errors++;
+ if (netif_msg_rx_err(lp))
+ dev_info(&dev->dev, "Rx error (status %x)\n",
+ status & Rx_Stat_Mask);
+ /* WORKAROUND: LongErr and CRCErr means Overflow. */
+ if ((status & Rx_LongErr) && (status & Rx_CRCErr)) {
+ status &= ~(Rx_LongErr|Rx_CRCErr);
+ status |= Rx_Over;
+ }
+ if (status & Rx_LongErr)
+ dev->stats.rx_length_errors++;
+ if (status & Rx_Over)
+ dev->stats.rx_fifo_errors++;
+ if (status & Rx_CRCErr)
+ dev->stats.rx_crc_errors++;
+ if (status & Rx_Align)
+ dev->stats.rx_frame_errors++;
+ }
+
+ if (bd_count > 0) {
+ /* put Free Buffer back to controller */
+ int bdctl = le32_to_cpu(lp->rfd_cur->bd[bd_count - 1].BDCtl);
+ unsigned char id =
+ (bdctl & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;
+#ifdef DEBUG
+ if (id >= RX_BUF_NUM) {
+ printk("%s: invalid BDID.\n", dev->name);
+ panic_queues(dev);
+ }
+#else
+ BUG_ON(id >= RX_BUF_NUM);
+#endif
+ /* free old buffers */
+ lp->fbl_count--;
+ while (lp->fbl_count < RX_BUF_NUM)
+ {
+ unsigned char curid =
+ (id + 1 + lp->fbl_count) % RX_BUF_NUM;
+ struct BDesc *bd = &lp->fbl_ptr->bd[curid];
+#ifdef DEBUG
+ bdctl = le32_to_cpu(bd->BDCtl);
+ if (bdctl & BD_CownsBD) {
+ printk("%s: Freeing invalid BD.\n",
+ dev->name);
+ panic_queues(dev);
+ }
+#endif
+ /* pass BD to controller */
+ if (!lp->rx_skbs[curid].skb) {
+ lp->rx_skbs[curid].skb =
+ alloc_rxbuf_skb(dev,
+ lp->pci_dev,
+ &lp->rx_skbs[curid].skb_dma);
+ if (!lp->rx_skbs[curid].skb)
+ break; /* try on next reception */
+ bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma);
+ }
+ /* Note: BDLength was modified by chip. */
+ bd->BDCtl = cpu_to_le32(BD_CownsBD |
+ (curid << BD_RxBDID_SHIFT) |
+ RX_BUF_SIZE);
+ lp->fbl_count++;
+ }
+ }
+
+ /* put RxFD back to controller */
+#ifdef DEBUG
+ next_rfd = fd_bus_to_virt(lp,
+ le32_to_cpu(lp->rfd_cur->fd.FDNext));
+ if (next_rfd < lp->rfd_base || next_rfd > lp->rfd_limit) {
+ printk("%s: RxFD FDNext invalid.\n", dev->name);
+ panic_queues(dev);
+ }
+#endif
+ for (i = 0; i < (bd_count + 1) / 2 + 1; i++) {
+ /* pass FD to controller */
+#ifdef DEBUG
+ lp->rfd_cur->fd.FDNext = cpu_to_le32(0xdeaddead);
+#else
+ lp->rfd_cur->fd.FDNext = cpu_to_le32(FD_Next_EOL);
+#endif
+ lp->rfd_cur->fd.FDCtl = cpu_to_le32(FD_CownsFD);
+ lp->rfd_cur++;
+ }
+ if (lp->rfd_cur > lp->rfd_limit)
+ lp->rfd_cur = lp->rfd_base;
+#ifdef DEBUG
+ if (lp->rfd_cur != next_rfd)
+ printk("rfd_cur = %p, next_rfd %p\n",
+ lp->rfd_cur, next_rfd);
+#endif
+ }
+
+ return received;
+}
+
+static int tc35815_poll(struct napi_struct *napi, int budget)
+{
+ struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi);
+ struct net_device *dev = lp->dev;
+ struct tc35815_regs __iomem *tr =
+ (struct tc35815_regs __iomem *)dev->base_addr;
+ int received = 0, handled;
+ u32 status;
+
+ spin_lock(&lp->rx_lock);
+ status = tc_readl(&tr->Int_Src);
+ do {
+ /* BLEx, FDAEx will be cleared later */
+ tc_writel(status & ~(Int_BLEx | Int_FDAEx),
+ &tr->Int_Src); /* write to clear */
+
+ handled = tc35815_do_interrupt(dev, status, budget - received);
+ if (status & (Int_BLEx | Int_FDAEx))
+ tc_writel(status & (Int_BLEx | Int_FDAEx),
+ &tr->Int_Src);
+ if (handled >= 0) {
+ received += handled;
+ if (received >= budget)
+ break;
+ }
+ status = tc_readl(&tr->Int_Src);
+ } while (status);
+ spin_unlock(&lp->rx_lock);
+
+ if (received < budget) {
+ napi_complete(napi);
+ /* enable interrupts */
+ tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl);
+ }
+ return received;
+}
+
+#define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr)
+
+static void
+tc35815_check_tx_stat(struct net_device *dev, int status)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+ const char *msg = NULL;
+
+ /* count collisions */
+ if (status & Tx_ExColl)
+ dev->stats.collisions += 16;
+ if (status & Tx_TxColl_MASK)
+ dev->stats.collisions += status & Tx_TxColl_MASK;
+
+ /* TX4939 does not have NCarr */
+ if (lp->chiptype == TC35815_TX4939)
+ status &= ~Tx_NCarr;
+ /* WORKAROUND: ignore LostCrS in full duplex operation */
+ if (!lp->link || lp->duplex == DUPLEX_FULL)
+ status &= ~Tx_NCarr;
+
+ if (!(status & TX_STA_ERR)) {
+ /* no error. */
+ dev->stats.tx_packets++;
+ return;
+ }
+
+ dev->stats.tx_errors++;
+ if (status & Tx_ExColl) {
+ dev->stats.tx_aborted_errors++;
+ msg = "Excessive Collision.";
+ }
+ if (status & Tx_Under) {
+ dev->stats.tx_fifo_errors++;
+ msg = "Tx FIFO Underrun.";
+ if (lp->lstats.tx_underrun < TX_THRESHOLD_KEEP_LIMIT) {
+ lp->lstats.tx_underrun++;
+ if (lp->lstats.tx_underrun >= TX_THRESHOLD_KEEP_LIMIT) {
+ struct tc35815_regs __iomem *tr =
+ (struct tc35815_regs __iomem *)dev->base_addr;
+ tc_writel(TX_THRESHOLD_MAX, &tr->TxThrsh);
+ msg = "Tx FIFO Underrun.Change Tx threshold to max.";
+ }
+ }
+ }
+ if (status & Tx_Defer) {
+ dev->stats.tx_fifo_errors++;
+ msg = "Excessive Deferral.";
+ }
+ if (status & Tx_NCarr) {
+ dev->stats.tx_carrier_errors++;
+ msg = "Lost Carrier Sense.";
+ }
+ if (status & Tx_LateColl) {
+ dev->stats.tx_aborted_errors++;
+ msg = "Late Collision.";
+ }
+ if (status & Tx_TxPar) {
+ dev->stats.tx_fifo_errors++;
+ msg = "Transmit Parity Error.";
+ }
+ if (status & Tx_SQErr) {
+ dev->stats.tx_heartbeat_errors++;
+ msg = "Signal Quality Error.";
+ }
+ if (msg && netif_msg_tx_err(lp))
+ printk(KERN_WARNING "%s: %s (%#x)\n", dev->name, msg, status);
+}
+
+/* This handles TX complete events posted by the device
+ * via interrupts.
+ */
+static void
+tc35815_txdone(struct net_device *dev)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+ struct TxFD *txfd;
+ unsigned int fdctl;
+
+ txfd = &lp->tfd_base[lp->tfd_end];
+ while (lp->tfd_start != lp->tfd_end &&
+ !((fdctl = le32_to_cpu(txfd->fd.FDCtl)) & FD_CownsFD)) {
+ int status = le32_to_cpu(txfd->fd.FDStat);
+ struct sk_buff *skb;
+ unsigned long fdnext = le32_to_cpu(txfd->fd.FDNext);
+ u32 fdsystem = le32_to_cpu(txfd->fd.FDSystem);
+
+ if (netif_msg_tx_done(lp)) {
+ printk("%s: complete TxFD.\n", dev->name);
+ dump_txfd(txfd);
+ }
+ tc35815_check_tx_stat(dev, status);
+
+ skb = fdsystem != 0xffffffff ?
+ lp->tx_skbs[fdsystem].skb : NULL;
+#ifdef DEBUG
+ if (lp->tx_skbs[lp->tfd_end].skb != skb) {
+ printk("%s: tx_skbs mismatch.\n", dev->name);
+ panic_queues(dev);
+ }
+#else
+ BUG_ON(lp->tx_skbs[lp->tfd_end].skb != skb);
+#endif
+ if (skb) {
+ dev->stats.tx_bytes += skb->len;
+ pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE);
+ lp->tx_skbs[lp->tfd_end].skb = NULL;
+ lp->tx_skbs[lp->tfd_end].skb_dma = 0;
+ dev_kfree_skb_any(skb);
+ }
+ txfd->fd.FDSystem = cpu_to_le32(0xffffffff);
+
+ lp->tfd_end = (lp->tfd_end + 1) % TX_FD_NUM;
+ txfd = &lp->tfd_base[lp->tfd_end];
+#ifdef DEBUG
+ if ((fdnext & ~FD_Next_EOL) != fd_virt_to_bus(lp, txfd)) {
+ printk("%s: TxFD FDNext invalid.\n", dev->name);
+ panic_queues(dev);
+ }
+#endif
+ if (fdnext & FD_Next_EOL) {
+ /* DMA Transmitter has been stopping... */
+ if (lp->tfd_end != lp->tfd_start) {
+ struct tc35815_regs __iomem *tr =
+ (struct tc35815_regs __iomem *)dev->base_addr;
+ int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM;
+ struct TxFD *txhead = &lp->tfd_base[head];
+ int qlen = (lp->tfd_start + TX_FD_NUM
+ - lp->tfd_end) % TX_FD_NUM;
+
+#ifdef DEBUG
+ if (!(le32_to_cpu(txfd->fd.FDCtl) & FD_CownsFD)) {
+ printk("%s: TxFD FDCtl invalid.\n", dev->name);
+ panic_queues(dev);
+ }
+#endif
+ /* log max queue length */
+ if (lp->lstats.max_tx_qlen < qlen)
+ lp->lstats.max_tx_qlen = qlen;
+
+
+ /* start DMA Transmitter again */
+ txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
+ txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
+ if (netif_msg_tx_queued(lp)) {
+ printk("%s: start TxFD on queue.\n",
+ dev->name);
+ dump_txfd(txfd);
+ }
+ tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr);
+ }
+ break;
+ }
+ }
+
+ /* If we had stopped the queue due to a "tx full"
+ * condition, and space has now been made available,
+ * wake up the queue.
+ */
+ if (netif_queue_stopped(dev) && !tc35815_tx_full(dev))
+ netif_wake_queue(dev);
+}
+
+/* The inverse routine to tc35815_open(). */
+static int
+tc35815_close(struct net_device *dev)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ napi_disable(&lp->napi);
+ if (lp->phy_dev)
+ phy_stop(lp->phy_dev);
+ cancel_work_sync(&lp->restart_work);
+
+ /* Flush the Tx and disable Rx here. */
+ tc35815_chip_reset(dev);
+ free_irq(dev->irq, dev);
+
+ tc35815_free_queues(dev);
+
+ return 0;
+
+}
+
+/*
+ * Get the current statistics.
+ * This may be called with the card open or closed.
+ */
+static struct net_device_stats *tc35815_get_stats(struct net_device *dev)
+{
+ struct tc35815_regs __iomem *tr =
+ (struct tc35815_regs __iomem *)dev->base_addr;
+ if (netif_running(dev))
+ /* Update the statistics from the device registers. */
+ dev->stats.rx_missed_errors += tc_readl(&tr->Miss_Cnt);
+
+ return &dev->stats;
+}
+
+static void tc35815_set_cam_entry(struct net_device *dev, int index, unsigned char *addr)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+ struct tc35815_regs __iomem *tr =
+ (struct tc35815_regs __iomem *)dev->base_addr;
+ int cam_index = index * 6;
+ u32 cam_data;
+ u32 saved_addr;
+
+ saved_addr = tc_readl(&tr->CAM_Adr);
+
+ if (netif_msg_hw(lp))
+ printk(KERN_DEBUG "%s: CAM %d: %pM\n",
+ dev->name, index, addr);
+ if (index & 1) {
+ /* read modify write */
+ tc_writel(cam_index - 2, &tr->CAM_Adr);
+ cam_data = tc_readl(&tr->CAM_Data) & 0xffff0000;
+ cam_data |= addr[0] << 8 | addr[1];
+ tc_writel(cam_data, &tr->CAM_Data);
+ /* write whole word */
+ tc_writel(cam_index + 2, &tr->CAM_Adr);
+ cam_data = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
+ tc_writel(cam_data, &tr->CAM_Data);
+ } else {
+ /* write whole word */
+ tc_writel(cam_index, &tr->CAM_Adr);
+ cam_data = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
+ tc_writel(cam_data, &tr->CAM_Data);
+ /* read modify write */
+ tc_writel(cam_index + 4, &tr->CAM_Adr);
+ cam_data = tc_readl(&tr->CAM_Data) & 0x0000ffff;
+ cam_data |= addr[4] << 24 | (addr[5] << 16);
+ tc_writel(cam_data, &tr->CAM_Data);
+ }
+
+ tc_writel(saved_addr, &tr->CAM_Adr);
+}
+
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ * num_addrs == -1 Promiscuous mode, receive all packets
+ * num_addrs == 0 Normal mode, clear multicast list
+ * num_addrs > 0 Multicast mode, receive normal and MC packets,
+ * and do best-effort filtering.
+ */
+static void
+tc35815_set_multicast_list(struct net_device *dev)
+{
+ struct tc35815_regs __iomem *tr =
+ (struct tc35815_regs __iomem *)dev->base_addr;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* With some (all?) 100MHalf HUB, controller will hang
+ * if we enabled promiscuous mode before linkup... */
+ struct tc35815_local *lp = netdev_priv(dev);
+
+ if (!lp->link)
+ return;
+ /* Enable promiscuous mode */
+ tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+ netdev_mc_count(dev) > CAM_ENTRY_MAX - 3) {
+ /* CAM 0, 1, 20 are reserved. */
+ /* Disable promiscuous mode, use normal mode. */
+ tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
+ } else if (!netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+ int i;
+ int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
+
+ tc_writel(0, &tr->CAM_Ctl);
+ /* Walk the address list, and load the filter */
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ /* entry 0,1 is reserved. */
+ tc35815_set_cam_entry(dev, i + 2, ha->addr);
+ ena_bits |= CAM_Ena_Bit(i + 2);
+ i++;
+ }
+ tc_writel(ena_bits, &tr->CAM_Ena);
+ tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
+ } else {
+ tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena);
+ tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
+ }
+}
+
+static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+ strcpy(info->driver, MODNAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(lp->pci_dev));
+}
+
+static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+
+ if (!lp->phy_dev)
+ return -ENODEV;
+ return phy_ethtool_gset(lp->phy_dev, cmd);
+}
+
+static int tc35815_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+
+ if (!lp->phy_dev)
+ return -ENODEV;
+ return phy_ethtool_sset(lp->phy_dev, cmd);
+}
+
+static u32 tc35815_get_msglevel(struct net_device *dev)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+ return lp->msg_enable;
+}
+
+static void tc35815_set_msglevel(struct net_device *dev, u32 datum)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+ lp->msg_enable = datum;
+}
+
+static int tc35815_get_sset_count(struct net_device *dev, int sset)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return sizeof(lp->lstats) / sizeof(int);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void tc35815_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+ data[0] = lp->lstats.max_tx_qlen;
+ data[1] = lp->lstats.tx_ints;
+ data[2] = lp->lstats.rx_ints;
+ data[3] = lp->lstats.tx_underrun;
+}
+
+static struct {
+ const char str[ETH_GSTRING_LEN];
+} ethtool_stats_keys[] = {
+ { "max_tx_qlen" },
+ { "tx_ints" },
+ { "rx_ints" },
+ { "tx_underrun" },
+};
+
+static void tc35815_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
+}
+
+static const struct ethtool_ops tc35815_ethtool_ops = {
+ .get_drvinfo = tc35815_get_drvinfo,
+ .get_settings = tc35815_get_settings,
+ .set_settings = tc35815_set_settings,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = tc35815_get_msglevel,
+ .set_msglevel = tc35815_set_msglevel,
+ .get_strings = tc35815_get_strings,
+ .get_sset_count = tc35815_get_sset_count,
+ .get_ethtool_stats = tc35815_get_ethtool_stats,
+};
+
+static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+ if (!lp->phy_dev)
+ return -ENODEV;
+ return phy_mii_ioctl(lp->phy_dev, rq, cmd);
+}
+
+static void tc35815_chip_reset(struct net_device *dev)
+{
+ struct tc35815_regs __iomem *tr =
+ (struct tc35815_regs __iomem *)dev->base_addr;
+ int i;
+ /* reset the controller */
+ tc_writel(MAC_Reset, &tr->MAC_Ctl);
+ udelay(4); /* 3200ns */
+ i = 0;
+ while (tc_readl(&tr->MAC_Ctl) & MAC_Reset) {
+ if (i++ > 100) {
+ printk(KERN_ERR "%s: MAC reset failed.\n", dev->name);
+ break;
+ }
+ mdelay(1);
+ }
+ tc_writel(0, &tr->MAC_Ctl);
+
+ /* initialize registers to default value */
+ tc_writel(0, &tr->DMA_Ctl);
+ tc_writel(0, &tr->TxThrsh);
+ tc_writel(0, &tr->TxPollCtr);
+ tc_writel(0, &tr->RxFragSize);
+ tc_writel(0, &tr->Int_En);
+ tc_writel(0, &tr->FDA_Bas);
+ tc_writel(0, &tr->FDA_Lim);
+ tc_writel(0xffffffff, &tr->Int_Src); /* Write 1 to clear */
+ tc_writel(0, &tr->CAM_Ctl);
+ tc_writel(0, &tr->Tx_Ctl);
+ tc_writel(0, &tr->Rx_Ctl);
+ tc_writel(0, &tr->CAM_Ena);
+ (void)tc_readl(&tr->Miss_Cnt); /* Read to clear */
+
+ /* initialize internal SRAM */
+ tc_writel(DMA_TestMode, &tr->DMA_Ctl);
+ for (i = 0; i < 0x1000; i += 4) {
+ tc_writel(i, &tr->CAM_Adr);
+ tc_writel(0, &tr->CAM_Data);
+ }
+ tc_writel(0, &tr->DMA_Ctl);
+}
+
+static void tc35815_chip_init(struct net_device *dev)
+{
+ struct tc35815_local *lp = netdev_priv(dev);
+ struct tc35815_regs __iomem *tr =
+ (struct tc35815_regs __iomem *)dev->base_addr;
+ unsigned long txctl = TX_CTL_CMD;
+
+ /* load station address to CAM */
+ tc35815_set_cam_entry(dev, CAM_ENTRY_SOURCE, dev->dev_addr);
+
+ /* Enable CAM (broadcast and unicast) */
+ tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena);
+ tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
+
+ /* Use DMA_RxAlign_2 to make IP header 4-byte aligned. */
+ if (HAVE_DMA_RXALIGN(lp))
+ tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl);
+ else
+ tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl);
+ tc_writel(0, &tr->TxPollCtr); /* Batch mode */
+ tc_writel(TX_THRESHOLD, &tr->TxThrsh);
+ tc_writel(INT_EN_CMD, &tr->Int_En);
+
+ /* set queues */
+ tc_writel(fd_virt_to_bus(lp, lp->rfd_base), &tr->FDA_Bas);
+ tc_writel((unsigned long)lp->rfd_limit - (unsigned long)lp->rfd_base,
+ &tr->FDA_Lim);
+ /*
+ * Activation method:
+ * First, enable the MAC Transmitter and the DMA Receive circuits.
+ * Then enable the DMA Transmitter and the MAC Receive circuits.
+ */
+ tc_writel(fd_virt_to_bus(lp, lp->fbl_ptr), &tr->BLFrmPtr); /* start DMA receiver */
+ tc_writel(RX_CTL_CMD, &tr->Rx_Ctl); /* start MAC receiver */
+
+ /* start MAC transmitter */
+ /* TX4939 does not have EnLCarr */
+ if (lp->chiptype == TC35815_TX4939)
+ txctl &= ~Tx_EnLCarr;
+ /* WORKAROUND: ignore LostCrS in full duplex operation */
+ if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL)
+ txctl &= ~Tx_EnLCarr;
+ tc_writel(txctl, &tr->Tx_Ctl);
+}
+
+#ifdef CONFIG_PM
+static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tc35815_local *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ pci_save_state(pdev);
+ if (!netif_running(dev))
+ return 0;
+ netif_device_detach(dev);
+ if (lp->phy_dev)
+ phy_stop(lp->phy_dev);
+ spin_lock_irqsave(&lp->lock, flags);
+ tc35815_chip_reset(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ pci_set_power_state(pdev, PCI_D3hot);
+ return 0;
+}
+
+static int tc35815_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tc35815_local *lp = netdev_priv(dev);
+
+ pci_restore_state(pdev);
+ if (!netif_running(dev))
+ return 0;
+ pci_set_power_state(pdev, PCI_D0);
+ tc35815_restart(dev);
+ netif_carrier_off(dev);
+ if (lp->phy_dev)
+ phy_start(lp->phy_dev);
+ netif_device_attach(dev);
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct pci_driver tc35815_pci_driver = {
+ .name = MODNAME,
+ .id_table = tc35815_pci_tbl,
+ .probe = tc35815_init_one,
+ .remove = __devexit_p(tc35815_remove_one),
+#ifdef CONFIG_PM
+ .suspend = tc35815_suspend,
+ .resume = tc35815_resume,
+#endif
+};
+
+module_param_named(speed, options.speed, int, 0);
+MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps");
+module_param_named(duplex, options.duplex, int, 0);
+MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full");
+
+static int __init tc35815_init_module(void)
+{
+ return pci_register_driver(&tc35815_pci_driver);
+}
+
+static void __exit tc35815_cleanup_module(void)
+{
+ pci_unregister_driver(&tc35815_pci_driver);
+}
+
+module_init(tc35815_init_module);
+module_exit(tc35815_cleanup_module);
+
+MODULE_DESCRIPTION("TOSHIBA TC35815 PCI 10M/100M Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/tundra/Kconfig b/drivers/net/ethernet/tundra/Kconfig
new file mode 100644
index 000000000000..cf7d69b62c42
--- /dev/null
+++ b/drivers/net/ethernet/tundra/Kconfig
@@ -0,0 +1,29 @@
+#
+# Tundra network device configuration
+#
+
+config NET_VENDOR_TUNDRA
+ bool "Tundra devices"
+ default y
+ depends on TSI108_BRIDGE
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Tundra cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_TUNDRA
+
+config TSI108_ETH
+ tristate "Tundra TSI108 gigabit Ethernet support"
+ depends on TSI108_BRIDGE
+ ---help---
+ This driver supports Tundra TSI108 gigabit Ethernet ports.
+ To compile this driver as a module, choose M here: the module
+ will be called tsi108_eth.
+
+endif # NET_VENDOR_TUNDRA
diff --git a/drivers/net/ethernet/tundra/Makefile b/drivers/net/ethernet/tundra/Makefile
new file mode 100644
index 000000000000..439f6930235b
--- /dev/null
+++ b/drivers/net/ethernet/tundra/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Tundra network device drivers.
+#
+
+obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
new file mode 100644
index 000000000000..a03996cf88ed
--- /dev/null
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -0,0 +1,1728 @@
+/*******************************************************************************
+
+ Copyright(c) 2006 Tundra Semiconductor Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc., 59
+ Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+*******************************************************************************/
+
+/* This driver is based on the driver code originally developed
+ * for the Intel IOC80314 (ForestLake) Gigabit Ethernet by
+ * scott.wood@timesys.com * Copyright (C) 2003 TimeSys Corporation
+ *
+ * Currently changes from original version are:
+ * - porting to Tsi108-based platform and kernel 2.6 (kong.lai@tundra.com)
+ * - modifications to handle two ports independently and support for
+ * additional PHY devices (alexandre.bounine@tundra.com)
+ * - Get hardware information from platform device. (tie-fei.zang@freescale.com)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/gfp.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/tsi108.h>
+
+#include "tsi108_eth.h"
+
+#define MII_READ_DELAY 10000 /* max link wait time in msec */
+
+#define TSI108_RXRING_LEN 256
+
+/* NOTE: The driver currently does not support receiving packets
+ * larger than the buffer size, so don't decrease this (unless you
+ * want to add such support).
+ */
+#define TSI108_RXBUF_SIZE 1536
+
+#define TSI108_TXRING_LEN 256
+
+#define TSI108_TX_INT_FREQ 64
+
+/* Check the phy status every half a second. */
+#define CHECK_PHY_INTERVAL (HZ/2)
+
+static int tsi108_init_one(struct platform_device *pdev);
+static int tsi108_ether_remove(struct platform_device *pdev);
+
+struct tsi108_prv_data {
+ void __iomem *regs; /* Base of normal regs */
+ void __iomem *phyregs; /* Base of register bank used for PHY access */
+
+ struct net_device *dev;
+ struct napi_struct napi;
+
+ unsigned int phy; /* Index of PHY for this interface */
+ unsigned int irq_num;
+ unsigned int id;
+ unsigned int phy_type;
+
+ struct timer_list timer;/* Timer that triggers the check phy function */
+ unsigned int rxtail; /* Next entry in rxring to read */
+ unsigned int rxhead; /* Next entry in rxring to give a new buffer */
+ unsigned int rxfree; /* Number of free, allocated RX buffers */
+
+ unsigned int rxpending; /* Non-zero if there are still descriptors
+ * to be processed from a previous descriptor
+ * interrupt condition that has been cleared */
+
+ unsigned int txtail; /* Next TX descriptor to check status on */
+ unsigned int txhead; /* Next TX descriptor to use */
+
+ /* Number of free TX descriptors. This could be calculated from
+ * rxhead and rxtail if one descriptor were left unused to disambiguate
+ * full and empty conditions, but it's simpler to just keep track
+ * explicitly. */
+
+ unsigned int txfree;
+
+ unsigned int phy_ok; /* The PHY is currently powered on. */
+
+ /* PHY status (duplex is 1 for half, 2 for full,
+ * so that the default 0 indicates that neither has
+ * yet been configured). */
+
+ unsigned int link_up;
+ unsigned int speed;
+ unsigned int duplex;
+
+ tx_desc *txring;
+ rx_desc *rxring;
+ struct sk_buff *txskbs[TSI108_TXRING_LEN];
+ struct sk_buff *rxskbs[TSI108_RXRING_LEN];
+
+ dma_addr_t txdma, rxdma;
+
+ /* txlock nests in misclock and phy_lock */
+
+ spinlock_t txlock, misclock;
+
+ /* stats is used to hold the upper bits of each hardware counter,
+ * and tmpstats is used to hold the full values for returning
+ * to the caller of get_stats(). They must be separate in case
+ * an overflow interrupt occurs before the stats are consumed.
+ */
+
+ struct net_device_stats stats;
+ struct net_device_stats tmpstats;
+
+ /* These stats are kept separate in hardware, thus require individual
+ * fields for handling carry. They are combined in get_stats.
+ */
+
+ unsigned long rx_fcs; /* Add to rx_frame_errors */
+ unsigned long rx_short_fcs; /* Add to rx_frame_errors */
+ unsigned long rx_long_fcs; /* Add to rx_frame_errors */
+ unsigned long rx_underruns; /* Add to rx_length_errors */
+ unsigned long rx_overruns; /* Add to rx_length_errors */
+
+ unsigned long tx_coll_abort; /* Add to tx_aborted_errors/collisions */
+ unsigned long tx_pause_drop; /* Add to tx_aborted_errors */
+
+ unsigned long mc_hash[16];
+ u32 msg_enable; /* debug message level */
+ struct mii_if_info mii_if;
+ unsigned int init_media;
+};
+
+/* Structure for a device driver */
+
+static struct platform_driver tsi_eth_driver = {
+ .probe = tsi108_init_one,
+ .remove = tsi108_ether_remove,
+ .driver = {
+ .name = "tsi-ethernet",
+ .owner = THIS_MODULE,
+ },
+};
+
+static void tsi108_timed_checker(unsigned long dev_ptr);
+
+static void dump_eth_one(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+
+ printk("Dumping %s...\n", dev->name);
+ printk("intstat %x intmask %x phy_ok %d"
+ " link %d speed %d duplex %d\n",
+ TSI_READ(TSI108_EC_INTSTAT),
+ TSI_READ(TSI108_EC_INTMASK), data->phy_ok,
+ data->link_up, data->speed, data->duplex);
+
+ printk("TX: head %d, tail %d, free %d, stat %x, estat %x, err %x\n",
+ data->txhead, data->txtail, data->txfree,
+ TSI_READ(TSI108_EC_TXSTAT),
+ TSI_READ(TSI108_EC_TXESTAT),
+ TSI_READ(TSI108_EC_TXERR));
+
+ printk("RX: head %d, tail %d, free %d, stat %x,"
+ " estat %x, err %x, pending %d\n\n",
+ data->rxhead, data->rxtail, data->rxfree,
+ TSI_READ(TSI108_EC_RXSTAT),
+ TSI_READ(TSI108_EC_RXESTAT),
+ TSI_READ(TSI108_EC_RXERR), data->rxpending);
+}
+
+/* Synchronization is needed between the thread and up/down events.
+ * Note that the PHY is accessed through the same registers for both
+ * interfaces, so this can't be made interface-specific.
+ */
+
+static DEFINE_SPINLOCK(phy_lock);
+
+static int tsi108_read_mii(struct tsi108_prv_data *data, int reg)
+{
+ unsigned i;
+
+ TSI_WRITE_PHY(TSI108_MAC_MII_ADDR,
+ (data->phy << TSI108_MAC_MII_ADDR_PHY) |
+ (reg << TSI108_MAC_MII_ADDR_REG));
+ TSI_WRITE_PHY(TSI108_MAC_MII_CMD, 0);
+ TSI_WRITE_PHY(TSI108_MAC_MII_CMD, TSI108_MAC_MII_CMD_READ);
+ for (i = 0; i < 100; i++) {
+ if (!(TSI_READ_PHY(TSI108_MAC_MII_IND) &
+ (TSI108_MAC_MII_IND_NOTVALID | TSI108_MAC_MII_IND_BUSY)))
+ break;
+ udelay(10);
+ }
+
+ if (i == 100)
+ return 0xffff;
+ else
+ return TSI_READ_PHY(TSI108_MAC_MII_DATAIN);
+}
+
+static void tsi108_write_mii(struct tsi108_prv_data *data,
+ int reg, u16 val)
+{
+ unsigned i = 100;
+ TSI_WRITE_PHY(TSI108_MAC_MII_ADDR,
+ (data->phy << TSI108_MAC_MII_ADDR_PHY) |
+ (reg << TSI108_MAC_MII_ADDR_REG));
+ TSI_WRITE_PHY(TSI108_MAC_MII_DATAOUT, val);
+ while (i--) {
+ if(!(TSI_READ_PHY(TSI108_MAC_MII_IND) &
+ TSI108_MAC_MII_IND_BUSY))
+ break;
+ udelay(10);
+ }
+}
+
+static int tsi108_mdio_read(struct net_device *dev, int addr, int reg)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ return tsi108_read_mii(data, reg);
+}
+
+static void tsi108_mdio_write(struct net_device *dev, int addr, int reg, int val)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ tsi108_write_mii(data, reg, val);
+}
+
+static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
+ int reg, u16 val)
+{
+ unsigned i = 1000;
+ TSI_WRITE(TSI108_MAC_MII_ADDR,
+ (0x1e << TSI108_MAC_MII_ADDR_PHY)
+ | (reg << TSI108_MAC_MII_ADDR_REG));
+ TSI_WRITE(TSI108_MAC_MII_DATAOUT, val);
+ while(i--) {
+ if(!(TSI_READ(TSI108_MAC_MII_IND) & TSI108_MAC_MII_IND_BUSY))
+ return;
+ udelay(10);
+ }
+ printk(KERN_ERR "%s function time out\n", __func__);
+}
+
+static int mii_speed(struct mii_if_info *mii)
+{
+ int advert, lpa, val, media;
+ int lpa2 = 0;
+ int speed;
+
+ if (!mii_link_ok(mii))
+ return 0;
+
+ val = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_BMSR);
+ if ((val & BMSR_ANEGCOMPLETE) == 0)
+ return 0;
+
+ advert = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_ADVERTISE);
+ lpa = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_LPA);
+ media = mii_nway_result(advert & lpa);
+
+ if (mii->supports_gmii)
+ lpa2 = mii->mdio_read(mii->dev, mii->phy_id, MII_STAT1000);
+
+ speed = lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 :
+ (media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? 100 : 10);
+ return speed;
+}
+
+static void tsi108_check_phy(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ u32 mac_cfg2_reg, portctrl_reg;
+ u32 duplex;
+ u32 speed;
+ unsigned long flags;
+
+ spin_lock_irqsave(&phy_lock, flags);
+
+ if (!data->phy_ok)
+ goto out;
+
+ duplex = mii_check_media(&data->mii_if, netif_msg_link(data), data->init_media);
+ data->init_media = 0;
+
+ if (netif_carrier_ok(dev)) {
+
+ speed = mii_speed(&data->mii_if);
+
+ if ((speed != data->speed) || duplex) {
+
+ mac_cfg2_reg = TSI_READ(TSI108_MAC_CFG2);
+ portctrl_reg = TSI_READ(TSI108_EC_PORTCTRL);
+
+ mac_cfg2_reg &= ~TSI108_MAC_CFG2_IFACE_MASK;
+
+ if (speed == 1000) {
+ mac_cfg2_reg |= TSI108_MAC_CFG2_GIG;
+ portctrl_reg &= ~TSI108_EC_PORTCTRL_NOGIG;
+ } else {
+ mac_cfg2_reg |= TSI108_MAC_CFG2_NOGIG;
+ portctrl_reg |= TSI108_EC_PORTCTRL_NOGIG;
+ }
+
+ data->speed = speed;
+
+ if (data->mii_if.full_duplex) {
+ mac_cfg2_reg |= TSI108_MAC_CFG2_FULLDUPLEX;
+ portctrl_reg &= ~TSI108_EC_PORTCTRL_HALFDUPLEX;
+ data->duplex = 2;
+ } else {
+ mac_cfg2_reg &= ~TSI108_MAC_CFG2_FULLDUPLEX;
+ portctrl_reg |= TSI108_EC_PORTCTRL_HALFDUPLEX;
+ data->duplex = 1;
+ }
+
+ TSI_WRITE(TSI108_MAC_CFG2, mac_cfg2_reg);
+ TSI_WRITE(TSI108_EC_PORTCTRL, portctrl_reg);
+ }
+
+ if (data->link_up == 0) {
+ /* The manual says it can take 3-4 usecs for the speed change
+ * to take effect.
+ */
+ udelay(5);
+
+ spin_lock(&data->txlock);
+ if (is_valid_ether_addr(dev->dev_addr) && data->txfree)
+ netif_wake_queue(dev);
+
+ data->link_up = 1;
+ spin_unlock(&data->txlock);
+ }
+ } else {
+ if (data->link_up == 1) {
+ netif_stop_queue(dev);
+ data->link_up = 0;
+ printk(KERN_NOTICE "%s : link is down\n", dev->name);
+ }
+
+ goto out;
+ }
+
+
+out:
+ spin_unlock_irqrestore(&phy_lock, flags);
+}
+
+static inline void
+tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
+ unsigned long *upper)
+{
+ if (carry & carry_bit)
+ *upper += carry_shift;
+}
+
+static void tsi108_stat_carry(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ u32 carry1, carry2;
+
+ spin_lock_irq(&data->misclock);
+
+ carry1 = TSI_READ(TSI108_STAT_CARRY1);
+ carry2 = TSI_READ(TSI108_STAT_CARRY2);
+
+ TSI_WRITE(TSI108_STAT_CARRY1, carry1);
+ TSI_WRITE(TSI108_STAT_CARRY2, carry2);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXBYTES,
+ TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXPKTS,
+ TSI108_STAT_RXPKTS_CARRY,
+ &data->stats.rx_packets);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFCS,
+ TSI108_STAT_RXFCS_CARRY, &data->rx_fcs);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXMCAST,
+ TSI108_STAT_RXMCAST_CARRY,
+ &data->stats.multicast);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXALIGN,
+ TSI108_STAT_RXALIGN_CARRY,
+ &data->stats.rx_frame_errors);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXLENGTH,
+ TSI108_STAT_RXLENGTH_CARRY,
+ &data->stats.rx_length_errors);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXRUNT,
+ TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJUMBO,
+ TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFRAG,
+ TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJABBER,
+ TSI108_STAT_RXJABBER_CARRY, &data->rx_long_fcs);
+
+ tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXDROP,
+ TSI108_STAT_RXDROP_CARRY,
+ &data->stats.rx_missed_errors);
+
+ tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXBYTES,
+ TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
+
+ tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPKTS,
+ TSI108_STAT_TXPKTS_CARRY,
+ &data->stats.tx_packets);
+
+ tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXDEF,
+ TSI108_STAT_TXEXDEF_CARRY,
+ &data->stats.tx_aborted_errors);
+
+ tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXCOL,
+ TSI108_STAT_TXEXCOL_CARRY, &data->tx_coll_abort);
+
+ tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXTCOL,
+ TSI108_STAT_TXTCOL_CARRY,
+ &data->stats.collisions);
+
+ tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPAUSE,
+ TSI108_STAT_TXPAUSEDROP_CARRY,
+ &data->tx_pause_drop);
+
+ spin_unlock_irq(&data->misclock);
+}
+
+/* Read a stat counter atomically with respect to carries.
+ * data->misclock must be held.
+ */
+static inline unsigned long
+tsi108_read_stat(struct tsi108_prv_data * data, int reg, int carry_bit,
+ int carry_shift, unsigned long *upper)
+{
+ int carryreg;
+ unsigned long val;
+
+ if (reg < 0xb0)
+ carryreg = TSI108_STAT_CARRY1;
+ else
+ carryreg = TSI108_STAT_CARRY2;
+
+ again:
+ val = TSI_READ(reg) | *upper;
+
+ /* Check to see if it overflowed, but the interrupt hasn't
+ * been serviced yet. If so, handle the carry here, and
+ * try again.
+ */
+
+ if (unlikely(TSI_READ(carryreg) & carry_bit)) {
+ *upper += carry_shift;
+ TSI_WRITE(carryreg, carry_bit);
+ goto again;
+ }
+
+ return val;
+}
+
+static struct net_device_stats *tsi108_get_stats(struct net_device *dev)
+{
+ unsigned long excol;
+
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ spin_lock_irq(&data->misclock);
+
+ data->tmpstats.rx_packets =
+ tsi108_read_stat(data, TSI108_STAT_RXPKTS,
+ TSI108_STAT_CARRY1_RXPKTS,
+ TSI108_STAT_RXPKTS_CARRY, &data->stats.rx_packets);
+
+ data->tmpstats.tx_packets =
+ tsi108_read_stat(data, TSI108_STAT_TXPKTS,
+ TSI108_STAT_CARRY2_TXPKTS,
+ TSI108_STAT_TXPKTS_CARRY, &data->stats.tx_packets);
+
+ data->tmpstats.rx_bytes =
+ tsi108_read_stat(data, TSI108_STAT_RXBYTES,
+ TSI108_STAT_CARRY1_RXBYTES,
+ TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
+
+ data->tmpstats.tx_bytes =
+ tsi108_read_stat(data, TSI108_STAT_TXBYTES,
+ TSI108_STAT_CARRY2_TXBYTES,
+ TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
+
+ data->tmpstats.multicast =
+ tsi108_read_stat(data, TSI108_STAT_RXMCAST,
+ TSI108_STAT_CARRY1_RXMCAST,
+ TSI108_STAT_RXMCAST_CARRY, &data->stats.multicast);
+
+ excol = tsi108_read_stat(data, TSI108_STAT_TXEXCOL,
+ TSI108_STAT_CARRY2_TXEXCOL,
+ TSI108_STAT_TXEXCOL_CARRY,
+ &data->tx_coll_abort);
+
+ data->tmpstats.collisions =
+ tsi108_read_stat(data, TSI108_STAT_TXTCOL,
+ TSI108_STAT_CARRY2_TXTCOL,
+ TSI108_STAT_TXTCOL_CARRY, &data->stats.collisions);
+
+ data->tmpstats.collisions += excol;
+
+ data->tmpstats.rx_length_errors =
+ tsi108_read_stat(data, TSI108_STAT_RXLENGTH,
+ TSI108_STAT_CARRY1_RXLENGTH,
+ TSI108_STAT_RXLENGTH_CARRY,
+ &data->stats.rx_length_errors);
+
+ data->tmpstats.rx_length_errors +=
+ tsi108_read_stat(data, TSI108_STAT_RXRUNT,
+ TSI108_STAT_CARRY1_RXRUNT,
+ TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
+
+ data->tmpstats.rx_length_errors +=
+ tsi108_read_stat(data, TSI108_STAT_RXJUMBO,
+ TSI108_STAT_CARRY1_RXJUMBO,
+ TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
+
+ data->tmpstats.rx_frame_errors =
+ tsi108_read_stat(data, TSI108_STAT_RXALIGN,
+ TSI108_STAT_CARRY1_RXALIGN,
+ TSI108_STAT_RXALIGN_CARRY,
+ &data->stats.rx_frame_errors);
+
+ data->tmpstats.rx_frame_errors +=
+ tsi108_read_stat(data, TSI108_STAT_RXFCS,
+ TSI108_STAT_CARRY1_RXFCS, TSI108_STAT_RXFCS_CARRY,
+ &data->rx_fcs);
+
+ data->tmpstats.rx_frame_errors +=
+ tsi108_read_stat(data, TSI108_STAT_RXFRAG,
+ TSI108_STAT_CARRY1_RXFRAG,
+ TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
+
+ data->tmpstats.rx_missed_errors =
+ tsi108_read_stat(data, TSI108_STAT_RXDROP,
+ TSI108_STAT_CARRY1_RXDROP,
+ TSI108_STAT_RXDROP_CARRY,
+ &data->stats.rx_missed_errors);
+
+ /* These three are maintained by software. */
+ data->tmpstats.rx_fifo_errors = data->stats.rx_fifo_errors;
+ data->tmpstats.rx_crc_errors = data->stats.rx_crc_errors;
+
+ data->tmpstats.tx_aborted_errors =
+ tsi108_read_stat(data, TSI108_STAT_TXEXDEF,
+ TSI108_STAT_CARRY2_TXEXDEF,
+ TSI108_STAT_TXEXDEF_CARRY,
+ &data->stats.tx_aborted_errors);
+
+ data->tmpstats.tx_aborted_errors +=
+ tsi108_read_stat(data, TSI108_STAT_TXPAUSEDROP,
+ TSI108_STAT_CARRY2_TXPAUSE,
+ TSI108_STAT_TXPAUSEDROP_CARRY,
+ &data->tx_pause_drop);
+
+ data->tmpstats.tx_aborted_errors += excol;
+
+ data->tmpstats.tx_errors = data->tmpstats.tx_aborted_errors;
+ data->tmpstats.rx_errors = data->tmpstats.rx_length_errors +
+ data->tmpstats.rx_crc_errors +
+ data->tmpstats.rx_frame_errors +
+ data->tmpstats.rx_fifo_errors + data->tmpstats.rx_missed_errors;
+
+ spin_unlock_irq(&data->misclock);
+ return &data->tmpstats;
+}
+
+static void tsi108_restart_rx(struct tsi108_prv_data * data, struct net_device *dev)
+{
+ TSI_WRITE(TSI108_EC_RXQ_PTRHIGH,
+ TSI108_EC_RXQ_PTRHIGH_VALID);
+
+ TSI_WRITE(TSI108_EC_RXCTRL, TSI108_EC_RXCTRL_GO
+ | TSI108_EC_RXCTRL_QUEUE0);
+}
+
+static void tsi108_restart_tx(struct tsi108_prv_data * data)
+{
+ TSI_WRITE(TSI108_EC_TXQ_PTRHIGH,
+ TSI108_EC_TXQ_PTRHIGH_VALID);
+
+ TSI_WRITE(TSI108_EC_TXCTRL, TSI108_EC_TXCTRL_IDLEINT |
+ TSI108_EC_TXCTRL_GO | TSI108_EC_TXCTRL_QUEUE0);
+}
+
+/* txlock must be held by caller, with IRQs disabled, and
+ * with permission to re-enable them when the lock is dropped.
+ */
+static void tsi108_complete_tx(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ int tx;
+ struct sk_buff *skb;
+ int release = 0;
+
+ while (!data->txfree || data->txhead != data->txtail) {
+ tx = data->txtail;
+
+ if (data->txring[tx].misc & TSI108_TX_OWN)
+ break;
+
+ skb = data->txskbs[tx];
+
+ if (!(data->txring[tx].misc & TSI108_TX_OK))
+ printk("%s: bad tx packet, misc %x\n",
+ dev->name, data->txring[tx].misc);
+
+ data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
+ data->txfree++;
+
+ if (data->txring[tx].misc & TSI108_TX_EOF) {
+ dev_kfree_skb_any(skb);
+ release++;
+ }
+ }
+
+ if (release) {
+ if (is_valid_ether_addr(dev->dev_addr) && data->link_up)
+ netif_wake_queue(dev);
+ }
+}
+
+static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ int frags = skb_shinfo(skb)->nr_frags + 1;
+ int i;
+
+ if (!data->phy_ok && net_ratelimit())
+ printk(KERN_ERR "%s: Transmit while PHY is down!\n", dev->name);
+
+ if (!data->link_up) {
+ printk(KERN_ERR "%s: Transmit while link is down!\n",
+ dev->name);
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (data->txfree < MAX_SKB_FRAGS + 1) {
+ netif_stop_queue(dev);
+
+ if (net_ratelimit())
+ printk(KERN_ERR "%s: Transmit with full tx ring!\n",
+ dev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (data->txfree - frags < MAX_SKB_FRAGS + 1) {
+ netif_stop_queue(dev);
+ }
+
+ spin_lock_irq(&data->txlock);
+
+ for (i = 0; i < frags; i++) {
+ int misc = 0;
+ int tx = data->txhead;
+
+ /* This is done to mark every TSI108_TX_INT_FREQ tx buffers with
+ * the interrupt bit. TX descriptor-complete interrupts are
+ * enabled when the queue fills up, and masked when there is
+ * still free space. This way, when saturating the outbound
+ * link, the tx interrupts are kept to a reasonable level.
+ * When the queue is not full, reclamation of skbs still occurs
+ * as new packets are transmitted, or on a queue-empty
+ * interrupt.
+ */
+
+ if ((tx % TSI108_TX_INT_FREQ == 0) &&
+ ((TSI108_TXRING_LEN - data->txfree) >= TSI108_TX_INT_FREQ))
+ misc = TSI108_TX_INT;
+
+ data->txskbs[tx] = skb;
+
+ if (i == 0) {
+ data->txring[tx].buf0 = dma_map_single(NULL, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ data->txring[tx].len = skb_headlen(skb);
+ misc |= TSI108_TX_SOF;
+ } else {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
+
+ data->txring[tx].buf0 = skb_frag_dma_map(NULL, frag,
+ 0,
+ frag->size,
+ DMA_TO_DEVICE);
+ data->txring[tx].len = frag->size;
+ }
+
+ if (i == frags - 1)
+ misc |= TSI108_TX_EOF;
+
+ if (netif_msg_pktdata(data)) {
+ int i;
+ printk("%s: Tx Frame contents (%d)\n", dev->name,
+ skb->len);
+ for (i = 0; i < skb->len; i++)
+ printk(" %2.2x", skb->data[i]);
+ printk(".\n");
+ }
+ data->txring[tx].misc = misc | TSI108_TX_OWN;
+
+ data->txhead = (data->txhead + 1) % TSI108_TXRING_LEN;
+ data->txfree--;
+ }
+
+ tsi108_complete_tx(dev);
+
+ /* This must be done after the check for completed tx descriptors,
+ * so that the tail pointer is correct.
+ */
+
+ if (!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_QUEUE0))
+ tsi108_restart_tx(data);
+
+ spin_unlock_irq(&data->txlock);
+ return NETDEV_TX_OK;
+}
+
+static int tsi108_complete_rx(struct net_device *dev, int budget)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ int done = 0;
+
+ while (data->rxfree && done != budget) {
+ int rx = data->rxtail;
+ struct sk_buff *skb;
+
+ if (data->rxring[rx].misc & TSI108_RX_OWN)
+ break;
+
+ skb = data->rxskbs[rx];
+ data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
+ data->rxfree--;
+ done++;
+
+ if (data->rxring[rx].misc & TSI108_RX_BAD) {
+ spin_lock_irq(&data->misclock);
+
+ if (data->rxring[rx].misc & TSI108_RX_CRC)
+ data->stats.rx_crc_errors++;
+ if (data->rxring[rx].misc & TSI108_RX_OVER)
+ data->stats.rx_fifo_errors++;
+
+ spin_unlock_irq(&data->misclock);
+
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+ if (netif_msg_pktdata(data)) {
+ int i;
+ printk("%s: Rx Frame contents (%d)\n",
+ dev->name, data->rxring[rx].len);
+ for (i = 0; i < data->rxring[rx].len; i++)
+ printk(" %2.2x", skb->data[i]);
+ printk(".\n");
+ }
+
+ skb_put(skb, data->rxring[rx].len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ }
+
+ return done;
+}
+
+static int tsi108_refill_rx(struct net_device *dev, int budget)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ int done = 0;
+
+ while (data->rxfree != TSI108_RXRING_LEN && done != budget) {
+ int rx = data->rxhead;
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
+ data->rxskbs[rx] = skb;
+ if (!skb)
+ break;
+
+ data->rxring[rx].buf0 = dma_map_single(NULL, skb->data,
+ TSI108_RX_SKB_SIZE,
+ DMA_FROM_DEVICE);
+
+ /* Sometimes the hardware sets blen to zero after packet
+ * reception, even though the manual says that it's only ever
+ * modified by the driver.
+ */
+
+ data->rxring[rx].blen = TSI108_RX_SKB_SIZE;
+ data->rxring[rx].misc = TSI108_RX_OWN | TSI108_RX_INT;
+
+ data->rxhead = (data->rxhead + 1) % TSI108_RXRING_LEN;
+ data->rxfree++;
+ done++;
+ }
+
+ if (done != 0 && !(TSI_READ(TSI108_EC_RXSTAT) &
+ TSI108_EC_RXSTAT_QUEUE0))
+ tsi108_restart_rx(data, dev);
+
+ return done;
+}
+
+static int tsi108_poll(struct napi_struct *napi, int budget)
+{
+ struct tsi108_prv_data *data = container_of(napi, struct tsi108_prv_data, napi);
+ struct net_device *dev = data->dev;
+ u32 estat = TSI_READ(TSI108_EC_RXESTAT);
+ u32 intstat = TSI_READ(TSI108_EC_INTSTAT);
+ int num_received = 0, num_filled = 0;
+
+ intstat &= TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
+ TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT;
+
+ TSI_WRITE(TSI108_EC_RXESTAT, estat);
+ TSI_WRITE(TSI108_EC_INTSTAT, intstat);
+
+ if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT))
+ num_received = tsi108_complete_rx(dev, budget);
+
+ /* This should normally fill no more slots than the number of
+ * packets received in tsi108_complete_rx(). The exception
+ * is when we previously ran out of memory for RX SKBs. In that
+ * case, it's helpful to obey the budget, not only so that the
+ * CPU isn't hogged, but so that memory (which may still be low)
+ * is not hogged by one device.
+ *
+ * A work unit is considered to be two SKBs to allow us to catch
+ * up when the ring has shrunk due to out-of-memory but we're
+ * still removing the full budget's worth of packets each time.
+ */
+
+ if (data->rxfree < TSI108_RXRING_LEN)
+ num_filled = tsi108_refill_rx(dev, budget * 2);
+
+ if (intstat & TSI108_INT_RXERROR) {
+ u32 err = TSI_READ(TSI108_EC_RXERR);
+ TSI_WRITE(TSI108_EC_RXERR, err);
+
+ if (err) {
+ if (net_ratelimit())
+ printk(KERN_DEBUG "%s: RX error %x\n",
+ dev->name, err);
+
+ if (!(TSI_READ(TSI108_EC_RXSTAT) &
+ TSI108_EC_RXSTAT_QUEUE0))
+ tsi108_restart_rx(data, dev);
+ }
+ }
+
+ if (intstat & TSI108_INT_RXOVERRUN) {
+ spin_lock_irq(&data->misclock);
+ data->stats.rx_fifo_errors++;
+ spin_unlock_irq(&data->misclock);
+ }
+
+ if (num_received < budget) {
+ data->rxpending = 0;
+ napi_complete(napi);
+
+ TSI_WRITE(TSI108_EC_INTMASK,
+ TSI_READ(TSI108_EC_INTMASK)
+ & ~(TSI108_INT_RXQUEUE0
+ | TSI108_INT_RXTHRESH |
+ TSI108_INT_RXOVERRUN |
+ TSI108_INT_RXERROR |
+ TSI108_INT_RXWAIT));
+ } else {
+ data->rxpending = 1;
+ }
+
+ return num_received;
+}
+
+static void tsi108_rx_int(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+
+ /* A race could cause dev to already be scheduled, so it's not an
+ * error if that happens (and interrupts shouldn't be re-masked,
+ * because that can cause harmful races, if poll has already
+ * unmasked them but not cleared LINK_STATE_SCHED).
+ *
+ * This can happen if this code races with tsi108_poll(), which masks
+ * the interrupts after tsi108_irq_one() read the mask, but before
+ * napi_schedule is called. It could also happen due to calls
+ * from tsi108_check_rxring().
+ */
+
+ if (napi_schedule_prep(&data->napi)) {
+ /* Mask, rather than ack, the receive interrupts. The ack
+ * will happen in tsi108_poll().
+ */
+
+ TSI_WRITE(TSI108_EC_INTMASK,
+ TSI_READ(TSI108_EC_INTMASK) |
+ TSI108_INT_RXQUEUE0
+ | TSI108_INT_RXTHRESH |
+ TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR |
+ TSI108_INT_RXWAIT);
+ __napi_schedule(&data->napi);
+ } else {
+ if (!netif_running(dev)) {
+ /* This can happen if an interrupt occurs while the
+ * interface is being brought down, as the START
+ * bit is cleared before the stop function is called.
+ *
+ * In this case, the interrupts must be masked, or
+ * they will continue indefinitely.
+ *
+ * There's a race here if the interface is brought down
+ * and then up in rapid succession, as the device could
+ * be made running after the above check and before
+ * the masking below. This will only happen if the IRQ
+ * thread has a lower priority than the task brining
+ * up the interface. Fixing this race would likely
+ * require changes in generic code.
+ */
+
+ TSI_WRITE(TSI108_EC_INTMASK,
+ TSI_READ
+ (TSI108_EC_INTMASK) |
+ TSI108_INT_RXQUEUE0 |
+ TSI108_INT_RXTHRESH |
+ TSI108_INT_RXOVERRUN |
+ TSI108_INT_RXERROR |
+ TSI108_INT_RXWAIT);
+ }
+ }
+}
+
+/* If the RX ring has run out of memory, try periodically
+ * to allocate some more, as otherwise poll would never
+ * get called (apart from the initial end-of-queue condition).
+ *
+ * This is called once per second (by default) from the thread.
+ */
+
+static void tsi108_check_rxring(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+
+ /* A poll is scheduled, as opposed to caling tsi108_refill_rx
+ * directly, so as to keep the receive path single-threaded
+ * (and thus not needing a lock).
+ */
+
+ if (netif_running(dev) && data->rxfree < TSI108_RXRING_LEN / 4)
+ tsi108_rx_int(dev);
+}
+
+static void tsi108_tx_int(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ u32 estat = TSI_READ(TSI108_EC_TXESTAT);
+
+ TSI_WRITE(TSI108_EC_TXESTAT, estat);
+ TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_TXQUEUE0 |
+ TSI108_INT_TXIDLE | TSI108_INT_TXERROR);
+ if (estat & TSI108_EC_TXESTAT_Q0_ERR) {
+ u32 err = TSI_READ(TSI108_EC_TXERR);
+ TSI_WRITE(TSI108_EC_TXERR, err);
+
+ if (err && net_ratelimit())
+ printk(KERN_ERR "%s: TX error %x\n", dev->name, err);
+ }
+
+ if (estat & (TSI108_EC_TXESTAT_Q0_DESCINT | TSI108_EC_TXESTAT_Q0_EOQ)) {
+ spin_lock(&data->txlock);
+ tsi108_complete_tx(dev);
+ spin_unlock(&data->txlock);
+ }
+}
+
+
+static irqreturn_t tsi108_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ u32 stat = TSI_READ(TSI108_EC_INTSTAT);
+
+ if (!(stat & TSI108_INT_ANY))
+ return IRQ_NONE; /* Not our interrupt */
+
+ stat &= ~TSI_READ(TSI108_EC_INTMASK);
+
+ if (stat & (TSI108_INT_TXQUEUE0 | TSI108_INT_TXIDLE |
+ TSI108_INT_TXERROR))
+ tsi108_tx_int(dev);
+ if (stat & (TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
+ TSI108_INT_RXWAIT | TSI108_INT_RXOVERRUN |
+ TSI108_INT_RXERROR))
+ tsi108_rx_int(dev);
+
+ if (stat & TSI108_INT_SFN) {
+ if (net_ratelimit())
+ printk(KERN_DEBUG "%s: SFN error\n", dev->name);
+ TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_SFN);
+ }
+
+ if (stat & TSI108_INT_STATCARRY) {
+ tsi108_stat_carry(dev);
+ TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_STATCARRY);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void tsi108_stop_ethernet(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ int i = 1000;
+ /* Disable all TX and RX queues ... */
+ TSI_WRITE(TSI108_EC_TXCTRL, 0);
+ TSI_WRITE(TSI108_EC_RXCTRL, 0);
+
+ /* ...and wait for them to become idle */
+ while(i--) {
+ if(!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_ACTIVE))
+ break;
+ udelay(10);
+ }
+ i = 1000;
+ while(i--){
+ if(!(TSI_READ(TSI108_EC_RXSTAT) & TSI108_EC_RXSTAT_ACTIVE))
+ return;
+ udelay(10);
+ }
+ printk(KERN_ERR "%s function time out\n", __func__);
+}
+
+static void tsi108_reset_ether(struct tsi108_prv_data * data)
+{
+ TSI_WRITE(TSI108_MAC_CFG1, TSI108_MAC_CFG1_SOFTRST);
+ udelay(100);
+ TSI_WRITE(TSI108_MAC_CFG1, 0);
+
+ TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATRST);
+ udelay(100);
+ TSI_WRITE(TSI108_EC_PORTCTRL,
+ TSI_READ(TSI108_EC_PORTCTRL) &
+ ~TSI108_EC_PORTCTRL_STATRST);
+
+ TSI_WRITE(TSI108_EC_TXCFG, TSI108_EC_TXCFG_RST);
+ udelay(100);
+ TSI_WRITE(TSI108_EC_TXCFG,
+ TSI_READ(TSI108_EC_TXCFG) &
+ ~TSI108_EC_TXCFG_RST);
+
+ TSI_WRITE(TSI108_EC_RXCFG, TSI108_EC_RXCFG_RST);
+ udelay(100);
+ TSI_WRITE(TSI108_EC_RXCFG,
+ TSI_READ(TSI108_EC_RXCFG) &
+ ~TSI108_EC_RXCFG_RST);
+
+ TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,
+ TSI_READ(TSI108_MAC_MII_MGMT_CFG) |
+ TSI108_MAC_MII_MGMT_RST);
+ udelay(100);
+ TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,
+ (TSI_READ(TSI108_MAC_MII_MGMT_CFG) &
+ ~(TSI108_MAC_MII_MGMT_RST |
+ TSI108_MAC_MII_MGMT_CLK)) | 0x07);
+}
+
+static int tsi108_get_mac(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ u32 word1 = TSI_READ(TSI108_MAC_ADDR1);
+ u32 word2 = TSI_READ(TSI108_MAC_ADDR2);
+
+ /* Note that the octets are reversed from what the manual says,
+ * producing an even weirder ordering...
+ */
+ if (word2 == 0 && word1 == 0) {
+ dev->dev_addr[0] = 0x00;
+ dev->dev_addr[1] = 0x06;
+ dev->dev_addr[2] = 0xd2;
+ dev->dev_addr[3] = 0x00;
+ dev->dev_addr[4] = 0x00;
+ if (0x8 == data->phy)
+ dev->dev_addr[5] = 0x01;
+ else
+ dev->dev_addr[5] = 0x02;
+
+ word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
+
+ word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
+ (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
+
+ TSI_WRITE(TSI108_MAC_ADDR1, word1);
+ TSI_WRITE(TSI108_MAC_ADDR2, word2);
+ } else {
+ dev->dev_addr[0] = (word2 >> 16) & 0xff;
+ dev->dev_addr[1] = (word2 >> 24) & 0xff;
+ dev->dev_addr[2] = (word1 >> 0) & 0xff;
+ dev->dev_addr[3] = (word1 >> 8) & 0xff;
+ dev->dev_addr[4] = (word1 >> 16) & 0xff;
+ dev->dev_addr[5] = (word1 >> 24) & 0xff;
+ }
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ printk(KERN_ERR
+ "%s: Invalid MAC address. word1: %08x, word2: %08x\n",
+ dev->name, word1, word2);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tsi108_set_mac(struct net_device *dev, void *addr)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ u32 word1, word2;
+ int i;
+
+ if (!is_valid_ether_addr(addr))
+ return -EINVAL;
+
+ for (i = 0; i < 6; i++)
+ /* +2 is for the offset of the HW addr type */
+ dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
+
+ word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
+
+ word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
+ (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
+
+ spin_lock_irq(&data->misclock);
+ TSI_WRITE(TSI108_MAC_ADDR1, word1);
+ TSI_WRITE(TSI108_MAC_ADDR2, word2);
+ spin_lock(&data->txlock);
+
+ if (data->txfree && data->link_up)
+ netif_wake_queue(dev);
+
+ spin_unlock(&data->txlock);
+ spin_unlock_irq(&data->misclock);
+ return 0;
+}
+
+/* Protected by dev->xmit_lock. */
+static void tsi108_set_rx_mode(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ u32 rxcfg = TSI_READ(TSI108_EC_RXCFG);
+
+ if (dev->flags & IFF_PROMISC) {
+ rxcfg &= ~(TSI108_EC_RXCFG_UC_HASH | TSI108_EC_RXCFG_MC_HASH);
+ rxcfg |= TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE;
+ goto out;
+ }
+
+ rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE);
+
+ if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
+ int i;
+ struct netdev_hw_addr *ha;
+ rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
+
+ memset(data->mc_hash, 0, sizeof(data->mc_hash));
+
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 hash, crc;
+
+ crc = ether_crc(6, ha->addr);
+ hash = crc >> 23;
+ __set_bit(hash, &data->mc_hash[0]);
+ }
+
+ TSI_WRITE(TSI108_EC_HASHADDR,
+ TSI108_EC_HASHADDR_AUTOINC |
+ TSI108_EC_HASHADDR_MCAST);
+
+ for (i = 0; i < 16; i++) {
+ /* The manual says that the hardware may drop
+ * back-to-back writes to the data register.
+ */
+ udelay(1);
+ TSI_WRITE(TSI108_EC_HASHDATA,
+ data->mc_hash[i]);
+ }
+ }
+
+ out:
+ TSI_WRITE(TSI108_EC_RXCFG, rxcfg);
+}
+
+static void tsi108_init_phy(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ u32 i = 0;
+ u16 phyval = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&phy_lock, flags);
+
+ tsi108_write_mii(data, MII_BMCR, BMCR_RESET);
+ while (--i) {
+ if(!(tsi108_read_mii(data, MII_BMCR) & BMCR_RESET))
+ break;
+ udelay(10);
+ }
+ if (i == 0)
+ printk(KERN_ERR "%s function time out\n", __func__);
+
+ if (data->phy_type == TSI108_PHY_BCM54XX) {
+ tsi108_write_mii(data, 0x09, 0x0300);
+ tsi108_write_mii(data, 0x10, 0x1020);
+ tsi108_write_mii(data, 0x1c, 0x8c00);
+ }
+
+ tsi108_write_mii(data,
+ MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+ while (tsi108_read_mii(data, MII_BMCR) & BMCR_ANRESTART)
+ cpu_relax();
+
+ /* Set G/MII mode and receive clock select in TBI control #2. The
+ * second port won't work if this isn't done, even though we don't
+ * use TBI mode.
+ */
+
+ tsi108_write_tbi(data, 0x11, 0x30);
+
+ /* FIXME: It seems to take more than 2 back-to-back reads to the
+ * PHY_STAT register before the link up status bit is set.
+ */
+
+ data->link_up = 0;
+
+ while (!((phyval = tsi108_read_mii(data, MII_BMSR)) &
+ BMSR_LSTATUS)) {
+ if (i++ > (MII_READ_DELAY / 10)) {
+ break;
+ }
+ spin_unlock_irqrestore(&phy_lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&phy_lock, flags);
+ }
+
+ data->mii_if.supports_gmii = mii_check_gmii_support(&data->mii_if);
+ printk(KERN_DEBUG "PHY_STAT reg contains %08x\n", phyval);
+ data->phy_ok = 1;
+ data->init_media = 1;
+ spin_unlock_irqrestore(&phy_lock, flags);
+}
+
+static void tsi108_kill_phy(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&phy_lock, flags);
+ tsi108_write_mii(data, MII_BMCR, BMCR_PDOWN);
+ data->phy_ok = 0;
+ spin_unlock_irqrestore(&phy_lock, flags);
+}
+
+static int tsi108_open(struct net_device *dev)
+{
+ int i;
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ unsigned int rxring_size = TSI108_RXRING_LEN * sizeof(rx_desc);
+ unsigned int txring_size = TSI108_TXRING_LEN * sizeof(tx_desc);
+
+ i = request_irq(data->irq_num, tsi108_irq, 0, dev->name, dev);
+ if (i != 0) {
+ printk(KERN_ERR "tsi108_eth%d: Could not allocate IRQ%d.\n",
+ data->id, data->irq_num);
+ return i;
+ } else {
+ dev->irq = data->irq_num;
+ printk(KERN_NOTICE
+ "tsi108_open : Port %d Assigned IRQ %d to %s\n",
+ data->id, dev->irq, dev->name);
+ }
+
+ data->rxring = dma_alloc_coherent(NULL, rxring_size,
+ &data->rxdma, GFP_KERNEL);
+
+ if (!data->rxring) {
+ printk(KERN_DEBUG
+ "TSI108_ETH: failed to allocate memory for rxring!\n");
+ return -ENOMEM;
+ } else {
+ memset(data->rxring, 0, rxring_size);
+ }
+
+ data->txring = dma_alloc_coherent(NULL, txring_size,
+ &data->txdma, GFP_KERNEL);
+
+ if (!data->txring) {
+ printk(KERN_DEBUG
+ "TSI108_ETH: failed to allocate memory for txring!\n");
+ pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
+ return -ENOMEM;
+ } else {
+ memset(data->txring, 0, txring_size);
+ }
+
+ for (i = 0; i < TSI108_RXRING_LEN; i++) {
+ data->rxring[i].next0 = data->rxdma + (i + 1) * sizeof(rx_desc);
+ data->rxring[i].blen = TSI108_RXBUF_SIZE;
+ data->rxring[i].vlan = 0;
+ }
+
+ data->rxring[TSI108_RXRING_LEN - 1].next0 = data->rxdma;
+
+ data->rxtail = 0;
+ data->rxhead = 0;
+
+ for (i = 0; i < TSI108_RXRING_LEN; i++) {
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
+ if (!skb) {
+ /* Bah. No memory for now, but maybe we'll get
+ * some more later.
+ * For now, we'll live with the smaller ring.
+ */
+ printk(KERN_WARNING
+ "%s: Could only allocate %d receive skb(s).\n",
+ dev->name, i);
+ data->rxhead = i;
+ break;
+ }
+
+ data->rxskbs[i] = skb;
+ data->rxskbs[i] = skb;
+ data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
+ data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
+ }
+
+ data->rxfree = i;
+ TSI_WRITE(TSI108_EC_RXQ_PTRLOW, data->rxdma);
+
+ for (i = 0; i < TSI108_TXRING_LEN; i++) {
+ data->txring[i].next0 = data->txdma + (i + 1) * sizeof(tx_desc);
+ data->txring[i].misc = 0;
+ }
+
+ data->txring[TSI108_TXRING_LEN - 1].next0 = data->txdma;
+ data->txtail = 0;
+ data->txhead = 0;
+ data->txfree = TSI108_TXRING_LEN;
+ TSI_WRITE(TSI108_EC_TXQ_PTRLOW, data->txdma);
+ tsi108_init_phy(dev);
+
+ napi_enable(&data->napi);
+
+ setup_timer(&data->timer, tsi108_timed_checker, (unsigned long)dev);
+ mod_timer(&data->timer, jiffies + 1);
+
+ tsi108_restart_rx(data, dev);
+
+ TSI_WRITE(TSI108_EC_INTSTAT, ~0);
+
+ TSI_WRITE(TSI108_EC_INTMASK,
+ ~(TSI108_INT_TXQUEUE0 | TSI108_INT_RXERROR |
+ TSI108_INT_RXTHRESH | TSI108_INT_RXQUEUE0 |
+ TSI108_INT_RXOVERRUN | TSI108_INT_RXWAIT |
+ TSI108_INT_SFN | TSI108_INT_STATCARRY));
+
+ TSI_WRITE(TSI108_MAC_CFG1,
+ TSI108_MAC_CFG1_RXEN | TSI108_MAC_CFG1_TXEN);
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int tsi108_close(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ napi_disable(&data->napi);
+
+ del_timer_sync(&data->timer);
+
+ tsi108_stop_ethernet(dev);
+ tsi108_kill_phy(dev);
+ TSI_WRITE(TSI108_EC_INTMASK, ~0);
+ TSI_WRITE(TSI108_MAC_CFG1, 0);
+
+ /* Check for any pending TX packets, and drop them. */
+
+ while (!data->txfree || data->txhead != data->txtail) {
+ int tx = data->txtail;
+ struct sk_buff *skb;
+ skb = data->txskbs[tx];
+ data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
+ data->txfree++;
+ dev_kfree_skb(skb);
+ }
+
+ free_irq(data->irq_num, dev);
+
+ /* Discard the RX ring. */
+
+ while (data->rxfree) {
+ int rx = data->rxtail;
+ struct sk_buff *skb;
+
+ skb = data->rxskbs[rx];
+ data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
+ data->rxfree--;
+ dev_kfree_skb(skb);
+ }
+
+ dma_free_coherent(0,
+ TSI108_RXRING_LEN * sizeof(rx_desc),
+ data->rxring, data->rxdma);
+ dma_free_coherent(0,
+ TSI108_TXRING_LEN * sizeof(tx_desc),
+ data->txring, data->txdma);
+
+ return 0;
+}
+
+static void tsi108_init_mac(struct net_device *dev)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+
+ TSI_WRITE(TSI108_MAC_CFG2, TSI108_MAC_CFG2_DFLT_PREAMBLE |
+ TSI108_MAC_CFG2_PADCRC);
+
+ TSI_WRITE(TSI108_EC_TXTHRESH,
+ (192 << TSI108_EC_TXTHRESH_STARTFILL) |
+ (192 << TSI108_EC_TXTHRESH_STOPFILL));
+
+ TSI_WRITE(TSI108_STAT_CARRYMASK1,
+ ~(TSI108_STAT_CARRY1_RXBYTES |
+ TSI108_STAT_CARRY1_RXPKTS |
+ TSI108_STAT_CARRY1_RXFCS |
+ TSI108_STAT_CARRY1_RXMCAST |
+ TSI108_STAT_CARRY1_RXALIGN |
+ TSI108_STAT_CARRY1_RXLENGTH |
+ TSI108_STAT_CARRY1_RXRUNT |
+ TSI108_STAT_CARRY1_RXJUMBO |
+ TSI108_STAT_CARRY1_RXFRAG |
+ TSI108_STAT_CARRY1_RXJABBER |
+ TSI108_STAT_CARRY1_RXDROP));
+
+ TSI_WRITE(TSI108_STAT_CARRYMASK2,
+ ~(TSI108_STAT_CARRY2_TXBYTES |
+ TSI108_STAT_CARRY2_TXPKTS |
+ TSI108_STAT_CARRY2_TXEXDEF |
+ TSI108_STAT_CARRY2_TXEXCOL |
+ TSI108_STAT_CARRY2_TXTCOL |
+ TSI108_STAT_CARRY2_TXPAUSE));
+
+ TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATEN);
+ TSI_WRITE(TSI108_MAC_CFG1, 0);
+
+ TSI_WRITE(TSI108_EC_RXCFG,
+ TSI108_EC_RXCFG_SE | TSI108_EC_RXCFG_BFE);
+
+ TSI_WRITE(TSI108_EC_TXQ_CFG, TSI108_EC_TXQ_CFG_DESC_INT |
+ TSI108_EC_TXQ_CFG_EOQ_OWN_INT |
+ TSI108_EC_TXQ_CFG_WSWP | (TSI108_PBM_PORT <<
+ TSI108_EC_TXQ_CFG_SFNPORT));
+
+ TSI_WRITE(TSI108_EC_RXQ_CFG, TSI108_EC_RXQ_CFG_DESC_INT |
+ TSI108_EC_RXQ_CFG_EOQ_OWN_INT |
+ TSI108_EC_RXQ_CFG_WSWP | (TSI108_PBM_PORT <<
+ TSI108_EC_RXQ_CFG_SFNPORT));
+
+ TSI_WRITE(TSI108_EC_TXQ_BUFCFG,
+ TSI108_EC_TXQ_BUFCFG_BURST256 |
+ TSI108_EC_TXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
+ TSI108_EC_TXQ_BUFCFG_SFNPORT));
+
+ TSI_WRITE(TSI108_EC_RXQ_BUFCFG,
+ TSI108_EC_RXQ_BUFCFG_BURST256 |
+ TSI108_EC_RXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
+ TSI108_EC_RXQ_BUFCFG_SFNPORT));
+
+ TSI_WRITE(TSI108_EC_INTMASK, ~0);
+}
+
+static int tsi108_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&data->txlock, flags);
+ rc = mii_ethtool_gset(&data->mii_if, cmd);
+ spin_unlock_irqrestore(&data->txlock, flags);
+
+ return rc;
+}
+
+static int tsi108_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&data->txlock, flags);
+ rc = mii_ethtool_sset(&data->mii_if, cmd);
+ spin_unlock_irqrestore(&data->txlock, flags);
+
+ return rc;
+}
+
+static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct tsi108_prv_data *data = netdev_priv(dev);
+ if (!netif_running(dev))
+ return -EINVAL;
+ return generic_mii_ioctl(&data->mii_if, if_mii(rq), cmd, NULL);
+}
+
+static const struct ethtool_ops tsi108_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_settings = tsi108_get_settings,
+ .set_settings = tsi108_set_settings,
+};
+
+static const struct net_device_ops tsi108_netdev_ops = {
+ .ndo_open = tsi108_open,
+ .ndo_stop = tsi108_close,
+ .ndo_start_xmit = tsi108_send_packet,
+ .ndo_set_rx_mode = tsi108_set_rx_mode,
+ .ndo_get_stats = tsi108_get_stats,
+ .ndo_do_ioctl = tsi108_do_ioctl,
+ .ndo_set_mac_address = tsi108_set_mac,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static int
+tsi108_init_one(struct platform_device *pdev)
+{
+ struct net_device *dev = NULL;
+ struct tsi108_prv_data *data = NULL;
+ hw_info *einfo;
+ int err = 0;
+
+ einfo = pdev->dev.platform_data;
+
+ if (NULL == einfo) {
+ printk(KERN_ERR "tsi-eth %d: Missing additional data!\n",
+ pdev->id);
+ return -ENODEV;
+ }
+
+ /* Create an ethernet device instance */
+
+ dev = alloc_etherdev(sizeof(struct tsi108_prv_data));
+ if (!dev) {
+ printk("tsi108_eth: Could not allocate a device structure\n");
+ return -ENOMEM;
+ }
+
+ printk("tsi108_eth%d: probe...\n", pdev->id);
+ data = netdev_priv(dev);
+ data->dev = dev;
+
+ pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n",
+ pdev->id, einfo->regs, einfo->phyregs,
+ einfo->phy, einfo->irq_num);
+
+ data->regs = ioremap(einfo->regs, 0x400);
+ if (NULL == data->regs) {
+ err = -ENOMEM;
+ goto regs_fail;
+ }
+
+ data->phyregs = ioremap(einfo->phyregs, 0x400);
+ if (NULL == data->phyregs) {
+ err = -ENOMEM;
+ goto regs_fail;
+ }
+/* MII setup */
+ data->mii_if.dev = dev;
+ data->mii_if.mdio_read = tsi108_mdio_read;
+ data->mii_if.mdio_write = tsi108_mdio_write;
+ data->mii_if.phy_id = einfo->phy;
+ data->mii_if.phy_id_mask = 0x1f;
+ data->mii_if.reg_num_mask = 0x1f;
+
+ data->phy = einfo->phy;
+ data->phy_type = einfo->phy_type;
+ data->irq_num = einfo->irq_num;
+ data->id = pdev->id;
+ netif_napi_add(dev, &data->napi, tsi108_poll, 64);
+ dev->netdev_ops = &tsi108_netdev_ops;
+ dev->ethtool_ops = &tsi108_ethtool_ops;
+
+ /* Apparently, the Linux networking code won't use scatter-gather
+ * if the hardware doesn't do checksums. However, it's faster
+ * to checksum in place and use SG, as (among other reasons)
+ * the cache won't be dirtied (which then has to be flushed
+ * before DMA). The checksumming is done by the driver (via
+ * a new function skb_csum_dev() in net/core/skbuff.c).
+ */
+
+ dev->features = NETIF_F_HIGHDMA;
+
+ spin_lock_init(&data->txlock);
+ spin_lock_init(&data->misclock);
+
+ tsi108_reset_ether(data);
+ tsi108_kill_phy(dev);
+
+ if ((err = tsi108_get_mac(dev)) != 0) {
+ printk(KERN_ERR "%s: Invalid MAC address. Please correct.\n",
+ dev->name);
+ goto register_fail;
+ }
+
+ tsi108_init_mac(dev);
+ err = register_netdev(dev);
+ if (err) {
+ printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
+ dev->name);
+ goto register_fail;
+ }
+
+ platform_set_drvdata(pdev, dev);
+ printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: %pM\n",
+ dev->name, dev->dev_addr);
+#ifdef DEBUG
+ data->msg_enable = DEBUG;
+ dump_eth_one(dev);
+#endif
+
+ return 0;
+
+register_fail:
+ iounmap(data->regs);
+ iounmap(data->phyregs);
+
+regs_fail:
+ free_netdev(dev);
+ return err;
+}
+
+/* There's no way to either get interrupts from the PHY when
+ * something changes, or to have the Tsi108 automatically communicate
+ * with the PHY to reconfigure itself.
+ *
+ * Thus, we have to do it using a timer.
+ */
+
+static void tsi108_timed_checker(unsigned long dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ struct tsi108_prv_data *data = netdev_priv(dev);
+
+ tsi108_check_phy(dev);
+ tsi108_check_rxring(dev);
+ mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
+}
+
+static int tsi108_ether_init(void)
+{
+ int ret;
+ ret = platform_driver_register (&tsi_eth_driver);
+ if (ret < 0){
+ printk("tsi108_ether_init: error initializing ethernet "
+ "device\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int tsi108_ether_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct tsi108_prv_data *priv = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ tsi108_stop_ethernet(dev);
+ platform_set_drvdata(pdev, NULL);
+ iounmap(priv->regs);
+ iounmap(priv->phyregs);
+ free_netdev(dev);
+
+ return 0;
+}
+static void tsi108_ether_exit(void)
+{
+ platform_driver_unregister(&tsi_eth_driver);
+}
+
+module_init(tsi108_ether_init);
+module_exit(tsi108_ether_exit);
+
+MODULE_AUTHOR("Tundra Semiconductor Corporation");
+MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:tsi-ethernet");
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.h b/drivers/net/ethernet/tundra/tsi108_eth.h
new file mode 100644
index 000000000000..5fee7d78dc6d
--- /dev/null
+++ b/drivers/net/ethernet/tundra/tsi108_eth.h
@@ -0,0 +1,356 @@
+/*
+ * (C) Copyright 2005 Tundra Semiconductor Corp.
+ * Kong Lai, <kong.lai@tundra.com).
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/*
+ * net/tsi108_eth.h - definitions for Tsi108 GIGE network controller.
+ */
+
+#ifndef __TSI108_ETH_H
+#define __TSI108_ETH_H
+
+#include <linux/types.h>
+
+#define TSI_WRITE(offset, val) \
+ out_be32((data->regs + (offset)), val)
+
+#define TSI_READ(offset) \
+ in_be32((data->regs + (offset)))
+
+#define TSI_WRITE_PHY(offset, val) \
+ out_be32((data->phyregs + (offset)), val)
+
+#define TSI_READ_PHY(offset) \
+ in_be32((data->phyregs + (offset)))
+
+/*
+ * TSI108 GIGE port registers
+ */
+
+#define TSI108_ETH_PORT_NUM 2
+#define TSI108_PBM_PORT 2
+#define TSI108_SDRAM_PORT 4
+
+#define TSI108_MAC_CFG1 (0x000)
+#define TSI108_MAC_CFG1_SOFTRST (1 << 31)
+#define TSI108_MAC_CFG1_LOOPBACK (1 << 8)
+#define TSI108_MAC_CFG1_RXEN (1 << 2)
+#define TSI108_MAC_CFG1_TXEN (1 << 0)
+
+#define TSI108_MAC_CFG2 (0x004)
+#define TSI108_MAC_CFG2_DFLT_PREAMBLE (7 << 12)
+#define TSI108_MAC_CFG2_IFACE_MASK (3 << 8)
+#define TSI108_MAC_CFG2_NOGIG (1 << 8)
+#define TSI108_MAC_CFG2_GIG (2 << 8)
+#define TSI108_MAC_CFG2_PADCRC (1 << 2)
+#define TSI108_MAC_CFG2_FULLDUPLEX (1 << 0)
+
+#define TSI108_MAC_MII_MGMT_CFG (0x020)
+#define TSI108_MAC_MII_MGMT_CLK (7 << 0)
+#define TSI108_MAC_MII_MGMT_RST (1 << 31)
+
+#define TSI108_MAC_MII_CMD (0x024)
+#define TSI108_MAC_MII_CMD_READ (1 << 0)
+
+#define TSI108_MAC_MII_ADDR (0x028)
+#define TSI108_MAC_MII_ADDR_REG 0
+#define TSI108_MAC_MII_ADDR_PHY 8
+
+#define TSI108_MAC_MII_DATAOUT (0x02c)
+#define TSI108_MAC_MII_DATAIN (0x030)
+
+#define TSI108_MAC_MII_IND (0x034)
+#define TSI108_MAC_MII_IND_NOTVALID (1 << 2)
+#define TSI108_MAC_MII_IND_SCANNING (1 << 1)
+#define TSI108_MAC_MII_IND_BUSY (1 << 0)
+
+#define TSI108_MAC_IFCTRL (0x038)
+#define TSI108_MAC_IFCTRL_PHYMODE (1 << 24)
+
+#define TSI108_MAC_ADDR1 (0x040)
+#define TSI108_MAC_ADDR2 (0x044)
+
+#define TSI108_STAT_RXBYTES (0x06c)
+#define TSI108_STAT_RXBYTES_CARRY (1 << 24)
+
+#define TSI108_STAT_RXPKTS (0x070)
+#define TSI108_STAT_RXPKTS_CARRY (1 << 18)
+
+#define TSI108_STAT_RXFCS (0x074)
+#define TSI108_STAT_RXFCS_CARRY (1 << 12)
+
+#define TSI108_STAT_RXMCAST (0x078)
+#define TSI108_STAT_RXMCAST_CARRY (1 << 18)
+
+#define TSI108_STAT_RXALIGN (0x08c)
+#define TSI108_STAT_RXALIGN_CARRY (1 << 12)
+
+#define TSI108_STAT_RXLENGTH (0x090)
+#define TSI108_STAT_RXLENGTH_CARRY (1 << 12)
+
+#define TSI108_STAT_RXRUNT (0x09c)
+#define TSI108_STAT_RXRUNT_CARRY (1 << 12)
+
+#define TSI108_STAT_RXJUMBO (0x0a0)
+#define TSI108_STAT_RXJUMBO_CARRY (1 << 12)
+
+#define TSI108_STAT_RXFRAG (0x0a4)
+#define TSI108_STAT_RXFRAG_CARRY (1 << 12)
+
+#define TSI108_STAT_RXJABBER (0x0a8)
+#define TSI108_STAT_RXJABBER_CARRY (1 << 12)
+
+#define TSI108_STAT_RXDROP (0x0ac)
+#define TSI108_STAT_RXDROP_CARRY (1 << 12)
+
+#define TSI108_STAT_TXBYTES (0x0b0)
+#define TSI108_STAT_TXBYTES_CARRY (1 << 24)
+
+#define TSI108_STAT_TXPKTS (0x0b4)
+#define TSI108_STAT_TXPKTS_CARRY (1 << 18)
+
+#define TSI108_STAT_TXEXDEF (0x0c8)
+#define TSI108_STAT_TXEXDEF_CARRY (1 << 12)
+
+#define TSI108_STAT_TXEXCOL (0x0d8)
+#define TSI108_STAT_TXEXCOL_CARRY (1 << 12)
+
+#define TSI108_STAT_TXTCOL (0x0dc)
+#define TSI108_STAT_TXTCOL_CARRY (1 << 13)
+
+#define TSI108_STAT_TXPAUSEDROP (0x0e4)
+#define TSI108_STAT_TXPAUSEDROP_CARRY (1 << 12)
+
+#define TSI108_STAT_CARRY1 (0x100)
+#define TSI108_STAT_CARRY1_RXBYTES (1 << 16)
+#define TSI108_STAT_CARRY1_RXPKTS (1 << 15)
+#define TSI108_STAT_CARRY1_RXFCS (1 << 14)
+#define TSI108_STAT_CARRY1_RXMCAST (1 << 13)
+#define TSI108_STAT_CARRY1_RXALIGN (1 << 8)
+#define TSI108_STAT_CARRY1_RXLENGTH (1 << 7)
+#define TSI108_STAT_CARRY1_RXRUNT (1 << 4)
+#define TSI108_STAT_CARRY1_RXJUMBO (1 << 3)
+#define TSI108_STAT_CARRY1_RXFRAG (1 << 2)
+#define TSI108_STAT_CARRY1_RXJABBER (1 << 1)
+#define TSI108_STAT_CARRY1_RXDROP (1 << 0)
+
+#define TSI108_STAT_CARRY2 (0x104)
+#define TSI108_STAT_CARRY2_TXBYTES (1 << 13)
+#define TSI108_STAT_CARRY2_TXPKTS (1 << 12)
+#define TSI108_STAT_CARRY2_TXEXDEF (1 << 7)
+#define TSI108_STAT_CARRY2_TXEXCOL (1 << 3)
+#define TSI108_STAT_CARRY2_TXTCOL (1 << 2)
+#define TSI108_STAT_CARRY2_TXPAUSE (1 << 0)
+
+#define TSI108_STAT_CARRYMASK1 (0x108)
+#define TSI108_STAT_CARRYMASK2 (0x10c)
+
+#define TSI108_EC_PORTCTRL (0x200)
+#define TSI108_EC_PORTCTRL_STATRST (1 << 31)
+#define TSI108_EC_PORTCTRL_STATEN (1 << 28)
+#define TSI108_EC_PORTCTRL_NOGIG (1 << 18)
+#define TSI108_EC_PORTCTRL_HALFDUPLEX (1 << 16)
+
+#define TSI108_EC_INTSTAT (0x204)
+#define TSI108_EC_INTMASK (0x208)
+
+#define TSI108_INT_ANY (1 << 31)
+#define TSI108_INT_SFN (1 << 30)
+#define TSI108_INT_RXIDLE (1 << 29)
+#define TSI108_INT_RXABORT (1 << 28)
+#define TSI108_INT_RXERROR (1 << 27)
+#define TSI108_INT_RXOVERRUN (1 << 26)
+#define TSI108_INT_RXTHRESH (1 << 25)
+#define TSI108_INT_RXWAIT (1 << 24)
+#define TSI108_INT_RXQUEUE0 (1 << 16)
+#define TSI108_INT_STATCARRY (1 << 15)
+#define TSI108_INT_TXIDLE (1 << 13)
+#define TSI108_INT_TXABORT (1 << 12)
+#define TSI108_INT_TXERROR (1 << 11)
+#define TSI108_INT_TXUNDERRUN (1 << 10)
+#define TSI108_INT_TXTHRESH (1 << 9)
+#define TSI108_INT_TXWAIT (1 << 8)
+#define TSI108_INT_TXQUEUE0 (1 << 0)
+
+#define TSI108_EC_TXCFG (0x220)
+#define TSI108_EC_TXCFG_RST (1 << 31)
+
+#define TSI108_EC_TXCTRL (0x224)
+#define TSI108_EC_TXCTRL_IDLEINT (1 << 31)
+#define TSI108_EC_TXCTRL_ABORT (1 << 30)
+#define TSI108_EC_TXCTRL_GO (1 << 15)
+#define TSI108_EC_TXCTRL_QUEUE0 (1 << 0)
+
+#define TSI108_EC_TXSTAT (0x228)
+#define TSI108_EC_TXSTAT_ACTIVE (1 << 15)
+#define TSI108_EC_TXSTAT_QUEUE0 (1 << 0)
+
+#define TSI108_EC_TXESTAT (0x22c)
+#define TSI108_EC_TXESTAT_Q0_ERR (1 << 24)
+#define TSI108_EC_TXESTAT_Q0_DESCINT (1 << 16)
+#define TSI108_EC_TXESTAT_Q0_EOF (1 << 8)
+#define TSI108_EC_TXESTAT_Q0_EOQ (1 << 0)
+
+#define TSI108_EC_TXERR (0x278)
+
+#define TSI108_EC_TXQ_CFG (0x280)
+#define TSI108_EC_TXQ_CFG_DESC_INT (1 << 20)
+#define TSI108_EC_TXQ_CFG_EOQ_OWN_INT (1 << 19)
+#define TSI108_EC_TXQ_CFG_WSWP (1 << 11)
+#define TSI108_EC_TXQ_CFG_BSWP (1 << 10)
+#define TSI108_EC_TXQ_CFG_SFNPORT 0
+
+#define TSI108_EC_TXQ_BUFCFG (0x284)
+#define TSI108_EC_TXQ_BUFCFG_BURST8 (0 << 8)
+#define TSI108_EC_TXQ_BUFCFG_BURST32 (1 << 8)
+#define TSI108_EC_TXQ_BUFCFG_BURST128 (2 << 8)
+#define TSI108_EC_TXQ_BUFCFG_BURST256 (3 << 8)
+#define TSI108_EC_TXQ_BUFCFG_WSWP (1 << 11)
+#define TSI108_EC_TXQ_BUFCFG_BSWP (1 << 10)
+#define TSI108_EC_TXQ_BUFCFG_SFNPORT 0
+
+#define TSI108_EC_TXQ_PTRLOW (0x288)
+
+#define TSI108_EC_TXQ_PTRHIGH (0x28c)
+#define TSI108_EC_TXQ_PTRHIGH_VALID (1 << 31)
+
+#define TSI108_EC_TXTHRESH (0x230)
+#define TSI108_EC_TXTHRESH_STARTFILL 0
+#define TSI108_EC_TXTHRESH_STOPFILL 16
+
+#define TSI108_EC_RXCFG (0x320)
+#define TSI108_EC_RXCFG_RST (1 << 31)
+
+#define TSI108_EC_RXSTAT (0x328)
+#define TSI108_EC_RXSTAT_ACTIVE (1 << 15)
+#define TSI108_EC_RXSTAT_QUEUE0 (1 << 0)
+
+#define TSI108_EC_RXESTAT (0x32c)
+#define TSI108_EC_RXESTAT_Q0_ERR (1 << 24)
+#define TSI108_EC_RXESTAT_Q0_DESCINT (1 << 16)
+#define TSI108_EC_RXESTAT_Q0_EOF (1 << 8)
+#define TSI108_EC_RXESTAT_Q0_EOQ (1 << 0)
+
+#define TSI108_EC_HASHADDR (0x360)
+#define TSI108_EC_HASHADDR_AUTOINC (1 << 31)
+#define TSI108_EC_HASHADDR_DO1STREAD (1 << 30)
+#define TSI108_EC_HASHADDR_UNICAST (0 << 4)
+#define TSI108_EC_HASHADDR_MCAST (1 << 4)
+
+#define TSI108_EC_HASHDATA (0x364)
+
+#define TSI108_EC_RXQ_PTRLOW (0x388)
+
+#define TSI108_EC_RXQ_PTRHIGH (0x38c)
+#define TSI108_EC_RXQ_PTRHIGH_VALID (1 << 31)
+
+/* Station Enable -- accept packets destined for us */
+#define TSI108_EC_RXCFG_SE (1 << 13)
+/* Unicast Frame Enable -- for packets not destined for us */
+#define TSI108_EC_RXCFG_UFE (1 << 12)
+/* Multicast Frame Enable */
+#define TSI108_EC_RXCFG_MFE (1 << 11)
+/* Broadcast Frame Enable */
+#define TSI108_EC_RXCFG_BFE (1 << 10)
+#define TSI108_EC_RXCFG_UC_HASH (1 << 9)
+#define TSI108_EC_RXCFG_MC_HASH (1 << 8)
+
+#define TSI108_EC_RXQ_CFG (0x380)
+#define TSI108_EC_RXQ_CFG_DESC_INT (1 << 20)
+#define TSI108_EC_RXQ_CFG_EOQ_OWN_INT (1 << 19)
+#define TSI108_EC_RXQ_CFG_WSWP (1 << 11)
+#define TSI108_EC_RXQ_CFG_BSWP (1 << 10)
+#define TSI108_EC_RXQ_CFG_SFNPORT 0
+
+#define TSI108_EC_RXQ_BUFCFG (0x384)
+#define TSI108_EC_RXQ_BUFCFG_BURST8 (0 << 8)
+#define TSI108_EC_RXQ_BUFCFG_BURST32 (1 << 8)
+#define TSI108_EC_RXQ_BUFCFG_BURST128 (2 << 8)
+#define TSI108_EC_RXQ_BUFCFG_BURST256 (3 << 8)
+#define TSI108_EC_RXQ_BUFCFG_WSWP (1 << 11)
+#define TSI108_EC_RXQ_BUFCFG_BSWP (1 << 10)
+#define TSI108_EC_RXQ_BUFCFG_SFNPORT 0
+
+#define TSI108_EC_RXCTRL (0x324)
+#define TSI108_EC_RXCTRL_ABORT (1 << 30)
+#define TSI108_EC_RXCTRL_GO (1 << 15)
+#define TSI108_EC_RXCTRL_QUEUE0 (1 << 0)
+
+#define TSI108_EC_RXERR (0x378)
+
+#define TSI108_TX_EOF (1 << 0) /* End of frame; last fragment of packet */
+#define TSI108_TX_SOF (1 << 1) /* Start of frame; first frag. of packet */
+#define TSI108_TX_VLAN (1 << 2) /* Per-frame VLAN: enables VLAN override */
+#define TSI108_TX_HUGE (1 << 3) /* Huge frame enable */
+#define TSI108_TX_PAD (1 << 4) /* Pad the packet if too short */
+#define TSI108_TX_CRC (1 << 5) /* Generate CRC for this packet */
+#define TSI108_TX_INT (1 << 14) /* Generate an IRQ after frag. processed */
+#define TSI108_TX_RETRY (0xf << 16) /* 4 bit field indicating num. of retries */
+#define TSI108_TX_COL (1 << 20) /* Set if a collision occurred */
+#define TSI108_TX_LCOL (1 << 24) /* Set if a late collision occurred */
+#define TSI108_TX_UNDER (1 << 25) /* Set if a FIFO underrun occurred */
+#define TSI108_TX_RLIM (1 << 26) /* Set if the retry limit was reached */
+#define TSI108_TX_OK (1 << 30) /* Set if the frame TX was successful */
+#define TSI108_TX_OWN (1 << 31) /* Set if the device owns the descriptor */
+
+/* Note: the descriptor layouts assume big-endian byte order. */
+typedef struct {
+ u32 buf0;
+ u32 buf1; /* Base address of buffer */
+ u32 next0; /* Address of next descriptor, if any */
+ u32 next1;
+ u16 vlan; /* VLAN, if override enabled for this packet */
+ u16 len; /* Length of buffer in bytes */
+ u32 misc; /* See TSI108_TX_* above */
+ u32 reserved0; /*reserved0 and reserved1 are added to make the desc */
+ u32 reserved1; /* 32-byte aligned */
+} __attribute__ ((aligned(32))) tx_desc;
+
+#define TSI108_RX_EOF (1 << 0) /* End of frame; last fragment of packet */
+#define TSI108_RX_SOF (1 << 1) /* Start of frame; first frag. of packet */
+#define TSI108_RX_VLAN (1 << 2) /* Set on SOF if packet has a VLAN */
+#define TSI108_RX_FTYPE (1 << 3) /* Length/Type field is type, not length */
+#define TSI108_RX_RUNT (1 << 4)/* Packet is less than minimum size */
+#define TSI108_RX_HASH (1 << 7)/* Hash table match */
+#define TSI108_RX_BAD (1 << 8) /* Bad frame */
+#define TSI108_RX_OVER (1 << 9) /* FIFO overrun occurred */
+#define TSI108_RX_TRUNC (1 << 11) /* Packet truncated due to excess length */
+#define TSI108_RX_CRC (1 << 12) /* Packet had a CRC error */
+#define TSI108_RX_INT (1 << 13) /* Generate an IRQ after frag. processed */
+#define TSI108_RX_OWN (1 << 15) /* Set if the device owns the descriptor */
+
+#define TSI108_RX_SKB_SIZE 1536 /* The RX skb length */
+
+typedef struct {
+ u32 buf0; /* Base address of buffer */
+ u32 buf1; /* Base address of buffer */
+ u32 next0; /* Address of next descriptor, if any */
+ u32 next1; /* Address of next descriptor, if any */
+ u16 vlan; /* VLAN of received packet, first frag only */
+ u16 len; /* Length of received fragment in bytes */
+ u16 blen; /* Length of buffer in bytes */
+ u16 misc; /* See TSI108_RX_* above */
+ u32 reserved0; /* reserved0 and reserved1 are added to make the desc */
+ u32 reserved1; /* 32-byte aligned */
+} __attribute__ ((aligned(32))) rx_desc;
+
+#endif /* __TSI108_ETH_H */
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
new file mode 100644
index 000000000000..68a9ba66feba
--- /dev/null
+++ b/drivers/net/ethernet/via/Kconfig
@@ -0,0 +1,59 @@
+#
+# VIA device configuration
+#
+
+config NET_VENDOR_VIA
+ bool "VIA devices"
+ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about VIA devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_VIA
+
+config VIA_RHINE
+ tristate "VIA Rhine support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MII
+ ---help---
+ If you have a VIA "Rhine" based network card (Rhine-I (VT86C100A),
+ Rhine-II (VT6102), or Rhine-III (VT6105)), say Y here. Rhine-type
+ Ethernet functions can also be found integrated on South Bridges
+ (e.g. VT8235).
+
+ To compile this driver as a module, choose M here. The module
+ will be called via-rhine.
+
+config VIA_RHINE_MMIO
+ bool "Use MMIO instead of PIO"
+ depends on VIA_RHINE
+ ---help---
+ This instructs the driver to use PCI shared memory (MMIO) instead of
+ programmed I/O ports (PIO). Enabling this gives an improvement in
+ processing time in parts of the driver.
+
+ If unsure, say Y.
+
+config VIA_VELOCITY
+ tristate "VIA Velocity support"
+ depends on PCI
+ select CRC32
+ select CRC_CCITT
+ select NET_CORE
+ select MII
+ ---help---
+ If you have a VIA "Velocity" based network card say Y here.
+
+ To compile this driver as a module, choose M here. The module
+ will be called via-velocity.
+
+endif # NET_VENDOR_VIA
diff --git a/drivers/net/ethernet/via/Makefile b/drivers/net/ethernet/via/Makefile
new file mode 100644
index 000000000000..46c5d4a3d8f1
--- /dev/null
+++ b/drivers/net/ethernet/via/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the VIA device drivers.
+#
+
+obj-$(CONFIG_VIA_RHINE) += via-rhine.o
+obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
new file mode 100644
index 000000000000..f34dd99fe579
--- /dev/null
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -0,0 +1,2340 @@
+/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
+/*
+ Written 1998-2001 by Donald Becker.
+
+ Current Maintainer: Roger Luethi <rl@hellgate.ch>
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is designed for the VIA VT86C100A Rhine-I.
+ It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
+ and management NIC 6105M).
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+
+ This driver contains some changes from the original Donald Becker
+ version. He may or may not be interested in bug reports on this
+ code. You can find his versions at:
+ http://www.scyld.com/network/via-rhine.html
+ [link no longer provides useful info -jgarzik]
+
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DRV_NAME "via-rhine"
+#define DRV_VERSION "1.5.0"
+#define DRV_RELDATE "2010-10-09"
+
+
+/* A few user-configurable values.
+ These may be modified when a driver module is loaded. */
+
+#define DEBUG
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+static int max_interrupt_work = 20;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
+ defined(CONFIG_SPARC) || defined(__ia64__) || \
+ defined(__sh__) || defined(__mips__)
+static int rx_copybreak = 1518;
+#else
+static int rx_copybreak;
+#endif
+
+/* Work-around for broken BIOSes: they are unable to get the chip back out of
+ power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
+static int avoid_D3;
+
+/*
+ * In case you are looking for 'options[]' or 'full_duplex[]', they
+ * are gone. Use ethtool(8) instead.
+ */
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ The Rhine has a 64 element 8390-like hash table. */
+static const int multicast_filter_limit = 32;
+
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
+#define RX_RING_SIZE 64
+
+/* Operational parameters that usually are not changed. */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (2*HZ)
+
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/if_vlan.h>
+#include <linux/bitops.h>
+#include <linux/workqueue.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <linux/dmi.h>
+
+/* These identify the driver base version and may not be removed. */
+static const char version[] __devinitconst =
+ "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
+
+/* This driver was written to use PCI memory space. Some early versions
+ of the Rhine may only work correctly with I/O space accesses. */
+#ifdef CONFIG_VIA_RHINE_MMIO
+#define USE_MMIO
+#else
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(max_interrupt_work, int, 0);
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param(avoid_D3, bool, 0);
+MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
+MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
+MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
+
+#define MCAM_SIZE 32
+#define VCAM_SIZE 32
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
+controller.
+
+II. Board-specific settings
+
+Boards with this chip are functional only in a bus-master PCI slot.
+
+Many operational settings are loaded from the EEPROM to the Config word at
+offset 0x78. For most of these settings, this driver assumes that they are
+correct.
+If this driver is compiled to use PCI memory space operations the EEPROM
+must be configured to enable memory ops.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver attempts to use a zero-copy receive and transmit scheme.
+
+Alas, all data buffers are required to start on a 32 bit boundary, so
+the driver must often copy transmit packets into bounce buffers.
+
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack. Buffers consumed this way are replaced by newly allocated
+skbuffs in the last phase of rhine_rx().
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets. When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine. Copying also preloads the cache, which is
+most useful with small frames.
+
+Since the VIA chips are only able to transfer data to buffers on 32 bit
+boundaries, the IP header at offset 14 in an ethernet frame isn't
+longword aligned for further processing. Copying these unaligned buffers
+has the beneficial effect of 16-byte aligning the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
+which is single threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring. It locks the
+netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
+the ring is not available it stops the transmit queue by
+calling netif_stop_queue.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. If at least half of the entries in
+the Rx ring are available the transmit queue is woken up if it was stopped.
+
+IV. Notes
+
+IVb. References
+
+Preliminary VT86C100A manual from http://www.via.com.tw/
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
+ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
+
+
+IVc. Errata
+
+The VT86C100A manual is not reliable information.
+The 3043 chip does not handle unaligned transmit or receive buffers, resulting
+in significant performance degradation for bounce buffer copies on transmit
+and unaligned IP headers on receive.
+The chip does not pad to minimum transmit length.
+
+*/
+
+
+/* This table drives the PCI probe routines. It's mostly boilerplate in all
+ of the drivers, and will likely be provided by some future kernel.
+ Note the matching code -- the first table entry matchs all 56** cards but
+ second only the 1234 card.
+*/
+
+enum rhine_revs {
+ VT86C100A = 0x00,
+ VTunknown0 = 0x20,
+ VT6102 = 0x40,
+ VT8231 = 0x50, /* Integrated MAC */
+ VT8233 = 0x60, /* Integrated MAC */
+ VT8235 = 0x74, /* Integrated MAC */
+ VT8237 = 0x78, /* Integrated MAC */
+ VTunknown1 = 0x7C,
+ VT6105 = 0x80,
+ VT6105_B0 = 0x83,
+ VT6105L = 0x8A,
+ VT6107 = 0x8C,
+ VTunknown2 = 0x8E,
+ VT6105M = 0x90, /* Management adapter */
+};
+
+enum rhine_quirks {
+ rqWOL = 0x0001, /* Wake-On-LAN support */
+ rqForceReset = 0x0002,
+ rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
+ rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
+ rqRhineI = 0x0100, /* See comment below */
+};
+/*
+ * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
+ * MMIO as well as for the collision counter and the Tx FIFO underflow
+ * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
+ */
+
+/* Beware of PCI posted writes */
+#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
+
+static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
+ { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
+ { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
+ { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
+ { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
+ { } /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
+
+
+/* Offsets to the device registers. */
+enum register_offsets {
+ StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
+ ChipCmd1=0x09, TQWake=0x0A,
+ IntrStatus=0x0C, IntrEnable=0x0E,
+ MulticastFilter0=0x10, MulticastFilter1=0x14,
+ RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
+ MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
+ MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
+ ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
+ RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
+ StickyHW=0x83, IntrStatus2=0x84,
+ CamMask=0x88, CamCon=0x92, CamAddr=0x93,
+ WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
+ WOLcrClr1=0xA6, WOLcgClr=0xA7,
+ PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
+};
+
+/* Bits in ConfigD */
+enum backoff_bits {
+ BackOptional=0x01, BackModify=0x02,
+ BackCaptureEffect=0x04, BackRandom=0x08
+};
+
+/* Bits in the TxConfig (TCR) register */
+enum tcr_bits {
+ TCR_PQEN=0x01,
+ TCR_LB0=0x02, /* loopback[0] */
+ TCR_LB1=0x04, /* loopback[1] */
+ TCR_OFSET=0x08,
+ TCR_RTGOPT=0x10,
+ TCR_RTFT0=0x20,
+ TCR_RTFT1=0x40,
+ TCR_RTSF=0x80,
+};
+
+/* Bits in the CamCon (CAMC) register */
+enum camcon_bits {
+ CAMC_CAMEN=0x01,
+ CAMC_VCAMSL=0x02,
+ CAMC_CAMWR=0x04,
+ CAMC_CAMRD=0x08,
+};
+
+/* Bits in the PCIBusConfig1 (BCR1) register */
+enum bcr1_bits {
+ BCR1_POT0=0x01,
+ BCR1_POT1=0x02,
+ BCR1_POT2=0x04,
+ BCR1_CTFT0=0x08,
+ BCR1_CTFT1=0x10,
+ BCR1_CTSF=0x20,
+ BCR1_TXQNOBK=0x40, /* for VT6105 */
+ BCR1_VIDFR=0x80, /* for VT6105 */
+ BCR1_MED0=0x40, /* for VT6102 */
+ BCR1_MED1=0x80, /* for VT6102 */
+};
+
+#ifdef USE_MMIO
+/* Registers we check that mmio and reg are the same. */
+static const int mmio_verify_registers[] = {
+ RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
+ 0
+};
+#endif
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
+ IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
+ IntrPCIErr=0x0040,
+ IntrStatsMax=0x0080, IntrRxEarly=0x0100,
+ IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
+ IntrTxAborted=0x2000, IntrLinkChange=0x4000,
+ IntrRxWakeUp=0x8000,
+ IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
+ IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
+ IntrTxErrSummary=0x082218,
+};
+
+/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
+enum wol_bits {
+ WOLucast = 0x10,
+ WOLmagic = 0x20,
+ WOLbmcast = 0x30,
+ WOLlnkon = 0x40,
+ WOLlnkoff = 0x80,
+};
+
+/* The Rx and Tx buffer descriptors. */
+struct rx_desc {
+ __le32 rx_status;
+ __le32 desc_length; /* Chain flag, Buffer/frame length */
+ __le32 addr;
+ __le32 next_desc;
+};
+struct tx_desc {
+ __le32 tx_status;
+ __le32 desc_length; /* Chain flag, Tx Config, Frame length */
+ __le32 addr;
+ __le32 next_desc;
+};
+
+/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
+#define TXDESC 0x00e08000
+
+enum rx_status_bits {
+ RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
+};
+
+/* Bits in *_desc.*_status */
+enum desc_status_bits {
+ DescOwn=0x80000000
+};
+
+/* Bits in *_desc.*_length */
+enum desc_length_bits {
+ DescTag=0x00010000
+};
+
+/* Bits in ChipCmd. */
+enum chip_cmd_bits {
+ CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
+ CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
+ Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
+ Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
+};
+
+struct rhine_private {
+ /* Bit mask for configured VLAN ids */
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
+ /* Descriptor rings */
+ struct rx_desc *rx_ring;
+ struct tx_desc *tx_ring;
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
+
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
+
+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+ dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
+
+ /* Tx bounce buffers (Rhine-I only) */
+ unsigned char *tx_buf[TX_RING_SIZE];
+ unsigned char *tx_bufs;
+ dma_addr_t tx_bufs_dma;
+
+ struct pci_dev *pdev;
+ long pioaddr;
+ struct net_device *dev;
+ struct napi_struct napi;
+ spinlock_t lock;
+ struct work_struct reset_task;
+
+ /* Frequently used values: keep some adjacent for cache effect. */
+ u32 quirks;
+ struct rx_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int cur_tx, dirty_tx;
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ u8 wolopts;
+
+ u8 tx_thresh, rx_thresh;
+
+ struct mii_if_info mii_if;
+ void __iomem *base;
+};
+
+#define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
+#define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
+#define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
+
+#define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
+#define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
+#define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
+
+#define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
+#define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
+#define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
+
+#define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
+#define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
+#define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
+
+
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int rhine_open(struct net_device *dev);
+static void rhine_reset_task(struct work_struct *work);
+static void rhine_tx_timeout(struct net_device *dev);
+static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
+static void rhine_tx(struct net_device *dev);
+static int rhine_rx(struct net_device *dev, int limit);
+static void rhine_error(struct net_device *dev, int intr_status);
+static void rhine_set_rx_mode(struct net_device *dev);
+static struct net_device_stats *rhine_get_stats(struct net_device *dev);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static const struct ethtool_ops netdev_ethtool_ops;
+static int rhine_close(struct net_device *dev);
+static void rhine_shutdown (struct pci_dev *pdev);
+static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
+static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
+static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
+static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
+static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
+static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask);
+static void rhine_init_cam_filter(struct net_device *dev);
+static void rhine_update_vcam(struct net_device *dev);
+
+#define RHINE_WAIT_FOR(condition) \
+do { \
+ int i = 1024; \
+ while (!(condition) && --i) \
+ ; \
+ if (debug > 1 && i < 512) \
+ pr_info("%4d cycles used @ %s:%d\n", \
+ 1024 - i, __func__, __LINE__); \
+} while (0)
+
+static inline u32 get_intr_status(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ u32 intr_status;
+
+ intr_status = ioread16(ioaddr + IntrStatus);
+ /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
+ if (rp->quirks & rqStatusWBRace)
+ intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
+ return intr_status;
+}
+
+/*
+ * Get power related registers into sane state.
+ * Notify user about past WOL event.
+ */
+static void rhine_power_init(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ u16 wolstat;
+
+ if (rp->quirks & rqWOL) {
+ /* Make sure chip is in power state D0 */
+ iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
+
+ /* Disable "force PME-enable" */
+ iowrite8(0x80, ioaddr + WOLcgClr);
+
+ /* Clear power-event config bits (WOL) */
+ iowrite8(0xFF, ioaddr + WOLcrClr);
+ /* More recent cards can manage two additional patterns */
+ if (rp->quirks & rq6patterns)
+ iowrite8(0x03, ioaddr + WOLcrClr1);
+
+ /* Save power-event status bits */
+ wolstat = ioread8(ioaddr + PwrcsrSet);
+ if (rp->quirks & rq6patterns)
+ wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
+
+ /* Clear power-event status bits */
+ iowrite8(0xFF, ioaddr + PwrcsrClr);
+ if (rp->quirks & rq6patterns)
+ iowrite8(0x03, ioaddr + PwrcsrClr1);
+
+ if (wolstat) {
+ char *reason;
+ switch (wolstat) {
+ case WOLmagic:
+ reason = "Magic packet";
+ break;
+ case WOLlnkon:
+ reason = "Link went up";
+ break;
+ case WOLlnkoff:
+ reason = "Link went down";
+ break;
+ case WOLucast:
+ reason = "Unicast packet";
+ break;
+ case WOLbmcast:
+ reason = "Multicast/broadcast packet";
+ break;
+ default:
+ reason = "Unknown";
+ }
+ netdev_info(dev, "Woke system up. Reason: %s\n",
+ reason);
+ }
+ }
+}
+
+static void rhine_chip_reset(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
+ IOSYNC;
+
+ if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
+ netdev_info(dev, "Reset not complete yet. Trying harder.\n");
+
+ /* Force reset */
+ if (rp->quirks & rqForceReset)
+ iowrite8(0x40, ioaddr + MiscCmd);
+
+ /* Reset can take somewhat longer (rare) */
+ RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
+ }
+
+ if (debug > 1)
+ netdev_info(dev, "Reset %s\n",
+ (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
+ "failed" : "succeeded");
+}
+
+#ifdef USE_MMIO
+static void enable_mmio(long pioaddr, u32 quirks)
+{
+ int n;
+ if (quirks & rqRhineI) {
+ /* More recent docs say that this bit is reserved ... */
+ n = inb(pioaddr + ConfigA) | 0x20;
+ outb(n, pioaddr + ConfigA);
+ } else {
+ n = inb(pioaddr + ConfigD) | 0x80;
+ outb(n, pioaddr + ConfigD);
+ }
+}
+#endif
+
+/*
+ * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
+ * (plus 0x6C for Rhine-I/II)
+ */
+static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ outb(0x20, pioaddr + MACRegEEcsr);
+ RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
+
+#ifdef USE_MMIO
+ /*
+ * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
+ * MMIO. If reloading EEPROM was done first this could be avoided, but
+ * it is not known if that still works with the "win98-reboot" problem.
+ */
+ enable_mmio(pioaddr, rp->quirks);
+#endif
+
+ /* Turn off EEPROM-controlled wake-up (magic packet) */
+ if (rp->quirks & rqWOL)
+ iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
+
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void rhine_poll(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ rhine_interrupt(dev->irq, (void *)dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static int rhine_napipoll(struct napi_struct *napi, int budget)
+{
+ struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
+ struct net_device *dev = rp->dev;
+ void __iomem *ioaddr = rp->base;
+ int work_done;
+
+ work_done = rhine_rx(dev, budget);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+
+ iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
+ IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
+ IntrTxDone | IntrTxError | IntrTxUnderrun |
+ IntrPCIErr | IntrStatsMax | IntrLinkChange,
+ ioaddr + IntrEnable);
+ }
+ return work_done;
+}
+
+static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+
+ /* Reset the chip to erase previous misconfiguration. */
+ rhine_chip_reset(dev);
+
+ /* Rhine-I needs extra time to recuperate before EEPROM reload */
+ if (rp->quirks & rqRhineI)
+ msleep(5);
+
+ /* Reload EEPROM controlled bytes cleared by soft reset */
+ rhine_reload_eeprom(pioaddr, dev);
+}
+
+static const struct net_device_ops rhine_netdev_ops = {
+ .ndo_open = rhine_open,
+ .ndo_stop = rhine_close,
+ .ndo_start_xmit = rhine_start_tx,
+ .ndo_get_stats = rhine_get_stats,
+ .ndo_set_rx_mode = rhine_set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_do_ioctl = netdev_ioctl,
+ .ndo_tx_timeout = rhine_tx_timeout,
+ .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = rhine_poll,
+#endif
+};
+
+static int __devinit rhine_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct rhine_private *rp;
+ int i, rc;
+ u32 quirks;
+ long pioaddr;
+ long memaddr;
+ void __iomem *ioaddr;
+ int io_size, phy_id;
+ const char *name;
+#ifdef USE_MMIO
+ int bar = 1;
+#else
+ int bar = 0;
+#endif
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ pr_info_once("%s\n", version);
+#endif
+
+ io_size = 256;
+ phy_id = 0;
+ quirks = 0;
+ name = "Rhine";
+ if (pdev->revision < VTunknown0) {
+ quirks = rqRhineI;
+ io_size = 128;
+ }
+ else if (pdev->revision >= VT6102) {
+ quirks = rqWOL | rqForceReset;
+ if (pdev->revision < VT6105) {
+ name = "Rhine II";
+ quirks |= rqStatusWBRace; /* Rhine-II exclusive */
+ }
+ else {
+ phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
+ if (pdev->revision >= VT6105_B0)
+ quirks |= rq6patterns;
+ if (pdev->revision < VT6105M)
+ name = "Rhine III";
+ else
+ name = "Rhine III (Management Adapter)";
+ }
+ }
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ goto err_out;
+
+ /* this should always be supported */
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(&pdev->dev,
+ "32-bit PCI DMA addresses not supported by the card!?\n");
+ goto err_out;
+ }
+
+ /* sanity check */
+ if ((pci_resource_len(pdev, 0) < io_size) ||
+ (pci_resource_len(pdev, 1) < io_size)) {
+ rc = -EIO;
+ dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
+ goto err_out;
+ }
+
+ pioaddr = pci_resource_start(pdev, 0);
+ memaddr = pci_resource_start(pdev, 1);
+
+ pci_set_master(pdev);
+
+ dev = alloc_etherdev(sizeof(struct rhine_private));
+ if (!dev) {
+ rc = -ENOMEM;
+ dev_err(&pdev->dev, "alloc_etherdev failed\n");
+ goto err_out;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ rp = netdev_priv(dev);
+ rp->dev = dev;
+ rp->quirks = quirks;
+ rp->pioaddr = pioaddr;
+ rp->pdev = pdev;
+
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out_free_netdev;
+
+ ioaddr = pci_iomap(pdev, bar, io_size);
+ if (!ioaddr) {
+ rc = -EIO;
+ dev_err(&pdev->dev,
+ "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
+ pci_name(pdev), io_size, memaddr);
+ goto err_out_free_res;
+ }
+
+#ifdef USE_MMIO
+ enable_mmio(pioaddr, quirks);
+
+ /* Check that selected MMIO registers match the PIO ones */
+ i = 0;
+ while (mmio_verify_registers[i]) {
+ int reg = mmio_verify_registers[i++];
+ unsigned char a = inb(pioaddr+reg);
+ unsigned char b = readb(ioaddr+reg);
+ if (a != b) {
+ rc = -EIO;
+ dev_err(&pdev->dev,
+ "MMIO do not match PIO [%02x] (%02x != %02x)\n",
+ reg, a, b);
+ goto err_out_unmap;
+ }
+ }
+#endif /* USE_MMIO */
+
+ dev->base_addr = (unsigned long)ioaddr;
+ rp->base = ioaddr;
+
+ /* Get chip registers into a sane state */
+ rhine_power_init(dev);
+ rhine_hw_init(dev, pioaddr);
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ /* Report it and use a random ethernet address instead */
+ netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
+ random_ether_addr(dev->dev_addr);
+ netdev_info(dev, "Using random MAC address: %pM\n",
+ dev->dev_addr);
+ }
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ /* For Rhine-I/II, phy_id is loaded from EEPROM */
+ if (!phy_id)
+ phy_id = ioread8(ioaddr + 0x6C);
+
+ dev->irq = pdev->irq;
+
+ spin_lock_init(&rp->lock);
+ INIT_WORK(&rp->reset_task, rhine_reset_task);
+
+ rp->mii_if.dev = dev;
+ rp->mii_if.mdio_read = mdio_read;
+ rp->mii_if.mdio_write = mdio_write;
+ rp->mii_if.phy_id_mask = 0x1f;
+ rp->mii_if.reg_num_mask = 0x1f;
+
+ /* The chip-specific entries in the device structure. */
+ dev->netdev_ops = &rhine_netdev_ops;
+ dev->ethtool_ops = &netdev_ethtool_ops,
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
+
+ if (rp->quirks & rqRhineI)
+ dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
+
+ if (pdev->revision >= VT6105M)
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
+ /* dev->name not defined before register_netdev()! */
+ rc = register_netdev(dev);
+ if (rc)
+ goto err_out_unmap;
+
+ netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
+ name,
+#ifdef USE_MMIO
+ memaddr,
+#else
+ (long)ioaddr,
+#endif
+ dev->dev_addr, pdev->irq);
+
+ pci_set_drvdata(pdev, dev);
+
+ {
+ u16 mii_cmd;
+ int mii_status = mdio_read(dev, phy_id, 1);
+ mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
+ mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
+ netdev_info(dev,
+ "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
+ phy_id,
+ mii_status, rp->mii_if.advertising,
+ mdio_read(dev, phy_id, 5));
+
+ /* set IFF_RUNNING */
+ if (mii_status & BMSR_LSTATUS)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ }
+ }
+ rp->mii_if.phy_id = phy_id;
+ if (debug > 1 && avoid_D3)
+ netdev_info(dev, "No D3 power state at shutdown\n");
+
+ return 0;
+
+err_out_unmap:
+ pci_iounmap(pdev, ioaddr);
+err_out_free_res:
+ pci_release_regions(pdev);
+err_out_free_netdev:
+ free_netdev(dev);
+err_out:
+ return rc;
+}
+
+static int alloc_ring(struct net_device* dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void *ring;
+ dma_addr_t ring_dma;
+
+ ring = pci_alloc_consistent(rp->pdev,
+ RX_RING_SIZE * sizeof(struct rx_desc) +
+ TX_RING_SIZE * sizeof(struct tx_desc),
+ &ring_dma);
+ if (!ring) {
+ netdev_err(dev, "Could not allocate DMA memory\n");
+ return -ENOMEM;
+ }
+ if (rp->quirks & rqRhineI) {
+ rp->tx_bufs = pci_alloc_consistent(rp->pdev,
+ PKT_BUF_SZ * TX_RING_SIZE,
+ &rp->tx_bufs_dma);
+ if (rp->tx_bufs == NULL) {
+ pci_free_consistent(rp->pdev,
+ RX_RING_SIZE * sizeof(struct rx_desc) +
+ TX_RING_SIZE * sizeof(struct tx_desc),
+ ring, ring_dma);
+ return -ENOMEM;
+ }
+ }
+
+ rp->rx_ring = ring;
+ rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
+ rp->rx_ring_dma = ring_dma;
+ rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
+
+ return 0;
+}
+
+static void free_ring(struct net_device* dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+
+ pci_free_consistent(rp->pdev,
+ RX_RING_SIZE * sizeof(struct rx_desc) +
+ TX_RING_SIZE * sizeof(struct tx_desc),
+ rp->rx_ring, rp->rx_ring_dma);
+ rp->tx_ring = NULL;
+
+ if (rp->tx_bufs)
+ pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
+ rp->tx_bufs, rp->tx_bufs_dma);
+
+ rp->tx_bufs = NULL;
+
+}
+
+static void alloc_rbufs(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ dma_addr_t next;
+ int i;
+
+ rp->dirty_rx = rp->cur_rx = 0;
+
+ rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+ rp->rx_head_desc = &rp->rx_ring[0];
+ next = rp->rx_ring_dma;
+
+ /* Init the ring entries */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ rp->rx_ring[i].rx_status = 0;
+ rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
+ next += sizeof(struct rx_desc);
+ rp->rx_ring[i].next_desc = cpu_to_le32(next);
+ rp->rx_skbuff[i] = NULL;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
+ rp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+
+ rp->rx_skbuff_dma[i] =
+ pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+
+ rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
+ rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
+ }
+ rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+}
+
+static void free_rbufs(struct net_device* dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ int i;
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ rp->rx_ring[i].rx_status = 0;
+ rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
+ if (rp->rx_skbuff[i]) {
+ pci_unmap_single(rp->pdev,
+ rp->rx_skbuff_dma[i],
+ rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(rp->rx_skbuff[i]);
+ }
+ rp->rx_skbuff[i] = NULL;
+ }
+}
+
+static void alloc_tbufs(struct net_device* dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ dma_addr_t next;
+ int i;
+
+ rp->dirty_tx = rp->cur_tx = 0;
+ next = rp->tx_ring_dma;
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ rp->tx_skbuff[i] = NULL;
+ rp->tx_ring[i].tx_status = 0;
+ rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
+ next += sizeof(struct tx_desc);
+ rp->tx_ring[i].next_desc = cpu_to_le32(next);
+ if (rp->quirks & rqRhineI)
+ rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
+ }
+ rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
+
+}
+
+static void free_tbufs(struct net_device* dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ rp->tx_ring[i].tx_status = 0;
+ rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
+ rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
+ if (rp->tx_skbuff[i]) {
+ if (rp->tx_skbuff_dma[i]) {
+ pci_unmap_single(rp->pdev,
+ rp->tx_skbuff_dma[i],
+ rp->tx_skbuff[i]->len,
+ PCI_DMA_TODEVICE);
+ }
+ dev_kfree_skb(rp->tx_skbuff[i]);
+ }
+ rp->tx_skbuff[i] = NULL;
+ rp->tx_buf[i] = NULL;
+ }
+}
+
+static void rhine_check_media(struct net_device *dev, unsigned int init_media)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ mii_check_media(&rp->mii_if, debug, init_media);
+
+ if (rp->mii_if.full_duplex)
+ iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
+ ioaddr + ChipCmd1);
+ else
+ iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
+ ioaddr + ChipCmd1);
+ if (debug > 1)
+ netdev_info(dev, "force_media %d, carrier %d\n",
+ rp->mii_if.force_media, netif_carrier_ok(dev));
+}
+
+/* Called after status of force_media possibly changed */
+static void rhine_set_carrier(struct mii_if_info *mii)
+{
+ if (mii->force_media) {
+ /* autoneg is off: Link is always assumed to be up */
+ if (!netif_carrier_ok(mii->dev))
+ netif_carrier_on(mii->dev);
+ }
+ else /* Let MMI library update carrier status */
+ rhine_check_media(mii->dev, 0);
+ if (debug > 1)
+ netdev_info(mii->dev, "force_media %d, carrier %d\n",
+ mii->force_media, netif_carrier_ok(mii->dev));
+}
+
+/**
+ * rhine_set_cam - set CAM multicast filters
+ * @ioaddr: register block of this Rhine
+ * @idx: multicast CAM index [0..MCAM_SIZE-1]
+ * @addr: multicast address (6 bytes)
+ *
+ * Load addresses into multicast filters.
+ */
+static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
+{
+ int i;
+
+ iowrite8(CAMC_CAMEN, ioaddr + CamCon);
+ wmb();
+
+ /* Paranoid -- idx out of range should never happen */
+ idx &= (MCAM_SIZE - 1);
+
+ iowrite8((u8) idx, ioaddr + CamAddr);
+
+ for (i = 0; i < 6; i++, addr++)
+ iowrite8(*addr, ioaddr + MulticastFilter0 + i);
+ udelay(10);
+ wmb();
+
+ iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
+ udelay(10);
+
+ iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_set_vlan_cam - set CAM VLAN filters
+ * @ioaddr: register block of this Rhine
+ * @idx: VLAN CAM index [0..VCAM_SIZE-1]
+ * @addr: VLAN ID (2 bytes)
+ *
+ * Load addresses into VLAN filters.
+ */
+static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
+{
+ iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
+ wmb();
+
+ /* Paranoid -- idx out of range should never happen */
+ idx &= (VCAM_SIZE - 1);
+
+ iowrite8((u8) idx, ioaddr + CamAddr);
+
+ iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
+ udelay(10);
+ wmb();
+
+ iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
+ udelay(10);
+
+ iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_set_cam_mask - set multicast CAM mask
+ * @ioaddr: register block of this Rhine
+ * @mask: multicast CAM mask
+ *
+ * Mask sets multicast filters active/inactive.
+ */
+static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
+{
+ iowrite8(CAMC_CAMEN, ioaddr + CamCon);
+ wmb();
+
+ /* write mask */
+ iowrite32(mask, ioaddr + CamMask);
+
+ /* disable CAMEN */
+ iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_set_vlan_cam_mask - set VLAN CAM mask
+ * @ioaddr: register block of this Rhine
+ * @mask: VLAN CAM mask
+ *
+ * Mask sets VLAN filters active/inactive.
+ */
+static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
+{
+ iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
+ wmb();
+
+ /* write mask */
+ iowrite32(mask, ioaddr + CamMask);
+
+ /* disable CAMEN */
+ iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_init_cam_filter - initialize CAM filters
+ * @dev: network device
+ *
+ * Initialize (disable) hardware VLAN and multicast support on this
+ * Rhine.
+ */
+static void rhine_init_cam_filter(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ /* Disable all CAMs */
+ rhine_set_vlan_cam_mask(ioaddr, 0);
+ rhine_set_cam_mask(ioaddr, 0);
+
+ /* disable hardware VLAN support */
+ BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
+ BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
+}
+
+/**
+ * rhine_update_vcam - update VLAN CAM filters
+ * @rp: rhine_private data of this Rhine
+ *
+ * Update VLAN CAM filters to match configuration change.
+ */
+static void rhine_update_vcam(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ u16 vid;
+ u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
+ unsigned int i = 0;
+
+ for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
+ rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
+ vCAMmask |= 1 << i;
+ if (++i >= VCAM_SIZE)
+ break;
+ }
+ rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
+}
+
+static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+
+ spin_lock_irq(&rp->lock);
+ set_bit(vid, rp->active_vlans);
+ rhine_update_vcam(dev);
+ spin_unlock_irq(&rp->lock);
+}
+
+static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+
+ spin_lock_irq(&rp->lock);
+ clear_bit(vid, rp->active_vlans);
+ rhine_update_vcam(dev);
+ spin_unlock_irq(&rp->lock);
+}
+
+static void init_registers(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
+
+ /* Initialize other registers. */
+ iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
+ /* Configure initial FIFO thresholds. */
+ iowrite8(0x20, ioaddr + TxConfig);
+ rp->tx_thresh = 0x20;
+ rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
+
+ iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
+ iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
+
+ rhine_set_rx_mode(dev);
+
+ if (rp->pdev->revision >= VT6105M)
+ rhine_init_cam_filter(dev);
+
+ napi_enable(&rp->napi);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
+ IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
+ IntrTxDone | IntrTxError | IntrTxUnderrun |
+ IntrPCIErr | IntrStatsMax | IntrLinkChange,
+ ioaddr + IntrEnable);
+
+ iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
+ ioaddr + ChipCmd);
+ rhine_check_media(dev, 1);
+}
+
+/* Enable MII link status auto-polling (required for IntrLinkChange) */
+static void rhine_enable_linkmon(void __iomem *ioaddr)
+{
+ iowrite8(0, ioaddr + MIICmd);
+ iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
+ iowrite8(0x80, ioaddr + MIICmd);
+
+ RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
+
+ iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
+}
+
+/* Disable MII link status auto-polling (required for MDIO access) */
+static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
+{
+ iowrite8(0, ioaddr + MIICmd);
+
+ if (quirks & rqRhineI) {
+ iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
+
+ /* Can be called from ISR. Evil. */
+ mdelay(1);
+
+ /* 0x80 must be set immediately before turning it off */
+ iowrite8(0x80, ioaddr + MIICmd);
+
+ RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
+
+ /* Heh. Now clear 0x80 again. */
+ iowrite8(0, ioaddr + MIICmd);
+ }
+ else
+ RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
+}
+
+/* Read and write over the MII Management Data I/O (MDIO) interface. */
+
+static int mdio_read(struct net_device *dev, int phy_id, int regnum)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ int result;
+
+ rhine_disable_linkmon(ioaddr, rp->quirks);
+
+ /* rhine_disable_linkmon already cleared MIICmd */
+ iowrite8(phy_id, ioaddr + MIIPhyAddr);
+ iowrite8(regnum, ioaddr + MIIRegAddr);
+ iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
+ RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
+ result = ioread16(ioaddr + MIIData);
+
+ rhine_enable_linkmon(ioaddr);
+ return result;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ rhine_disable_linkmon(ioaddr, rp->quirks);
+
+ /* rhine_disable_linkmon already cleared MIICmd */
+ iowrite8(phy_id, ioaddr + MIIPhyAddr);
+ iowrite8(regnum, ioaddr + MIIRegAddr);
+ iowrite16(value, ioaddr + MIIData);
+ iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
+ RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
+
+ rhine_enable_linkmon(ioaddr);
+}
+
+static int rhine_open(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ int rc;
+
+ rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
+ dev);
+ if (rc)
+ return rc;
+
+ if (debug > 1)
+ netdev_dbg(dev, "%s() irq %d\n", __func__, rp->pdev->irq);
+
+ rc = alloc_ring(dev);
+ if (rc) {
+ free_irq(rp->pdev->irq, dev);
+ return rc;
+ }
+ alloc_rbufs(dev);
+ alloc_tbufs(dev);
+ rhine_chip_reset(dev);
+ init_registers(dev);
+ if (debug > 2)
+ netdev_dbg(dev, "%s() Done - status %04x MII status: %04x\n",
+ __func__, ioread16(ioaddr + ChipCmd),
+ mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static void rhine_reset_task(struct work_struct *work)
+{
+ struct rhine_private *rp = container_of(work, struct rhine_private,
+ reset_task);
+ struct net_device *dev = rp->dev;
+
+ /* protect against concurrent rx interrupts */
+ disable_irq(rp->pdev->irq);
+
+ napi_disable(&rp->napi);
+
+ spin_lock_bh(&rp->lock);
+
+ /* clear all descriptors */
+ free_tbufs(dev);
+ free_rbufs(dev);
+ alloc_tbufs(dev);
+ alloc_rbufs(dev);
+
+ /* Reinitialize the hardware. */
+ rhine_chip_reset(dev);
+ init_registers(dev);
+
+ spin_unlock_bh(&rp->lock);
+ enable_irq(rp->pdev->irq);
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ dev->stats.tx_errors++;
+ netif_wake_queue(dev);
+}
+
+static void rhine_tx_timeout(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
+ ioread16(ioaddr + IntrStatus),
+ mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
+
+ schedule_work(&rp->reset_task);
+}
+
+static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ unsigned entry;
+ unsigned long flags;
+
+ /* Caution: the write order is important here, set the field
+ with the "ownership" bits last. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = rp->cur_tx % TX_RING_SIZE;
+
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
+ rp->tx_skbuff[entry] = skb;
+
+ if ((rp->quirks & rqRhineI) &&
+ (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
+ /* Must use alignment buffer. */
+ if (skb->len > PKT_BUF_SZ) {
+ /* packet too long, drop it */
+ dev_kfree_skb(skb);
+ rp->tx_skbuff[entry] = NULL;
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ /* Padding is not copied and so must be redone. */
+ skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
+ if (skb->len < ETH_ZLEN)
+ memset(rp->tx_buf[entry] + skb->len, 0,
+ ETH_ZLEN - skb->len);
+ rp->tx_skbuff_dma[entry] = 0;
+ rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
+ (rp->tx_buf[entry] -
+ rp->tx_bufs));
+ } else {
+ rp->tx_skbuff_dma[entry] =
+ pci_map_single(rp->pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
+ }
+
+ rp->tx_ring[entry].desc_length =
+ cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
+
+ if (unlikely(vlan_tx_tag_present(skb))) {
+ rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
+ /* request tagging */
+ rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
+ }
+ else
+ rp->tx_ring[entry].tx_status = 0;
+
+ /* lock eth irq */
+ spin_lock_irqsave(&rp->lock, flags);
+ wmb();
+ rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
+ wmb();
+
+ rp->cur_tx++;
+
+ /* Non-x86 Todo: explicitly flush cache lines here. */
+
+ if (vlan_tx_tag_present(skb))
+ /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
+ BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
+
+ /* Wake the potentially-idle transmit channel */
+ iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
+ ioaddr + ChipCmd1);
+ IOSYNC;
+
+ if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
+ netif_stop_queue(dev);
+
+ spin_unlock_irqrestore(&rp->lock, flags);
+
+ if (debug > 4) {
+ netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
+ rp->cur_tx-1, entry);
+ }
+ return NETDEV_TX_OK;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
+{
+ struct net_device *dev = dev_instance;
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ u32 intr_status;
+ int boguscnt = max_interrupt_work;
+ int handled = 0;
+
+ while ((intr_status = get_intr_status(dev))) {
+ handled = 1;
+
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ if (intr_status & IntrTxDescRace)
+ iowrite8(0x08, ioaddr + IntrStatus2);
+ iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
+ IOSYNC;
+
+ if (debug > 4)
+ netdev_dbg(dev, "Interrupt, status %08x\n",
+ intr_status);
+
+ if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
+ IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
+ iowrite16(IntrTxAborted |
+ IntrTxDone | IntrTxError | IntrTxUnderrun |
+ IntrPCIErr | IntrStatsMax | IntrLinkChange,
+ ioaddr + IntrEnable);
+
+ napi_schedule(&rp->napi);
+ }
+
+ if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
+ if (intr_status & IntrTxErrSummary) {
+ /* Avoid scavenging before Tx engine turned off */
+ RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
+ if (debug > 2 &&
+ ioread8(ioaddr+ChipCmd) & CmdTxOn)
+ netdev_warn(dev,
+ "%s: Tx engine still on\n",
+ __func__);
+ }
+ rhine_tx(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & (IntrPCIErr | IntrLinkChange |
+ IntrStatsMax | IntrTxError | IntrTxAborted |
+ IntrTxUnderrun | IntrTxDescRace))
+ rhine_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ netdev_warn(dev, "Too much work at interrupt, status=%#08x\n",
+ intr_status);
+ break;
+ }
+ }
+
+ if (debug > 3)
+ netdev_dbg(dev, "exiting interrupt, status=%08x\n",
+ ioread16(ioaddr + IntrStatus));
+ return IRQ_RETVAL(handled);
+}
+
+/* This routine is logically part of the interrupt handler, but isolated
+ for clarity. */
+static void rhine_tx(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
+
+ spin_lock(&rp->lock);
+
+ /* find and cleanup dirty tx descriptors */
+ while (rp->dirty_tx != rp->cur_tx) {
+ txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
+ if (debug > 6)
+ netdev_dbg(dev, "Tx scavenge %d status %08x\n",
+ entry, txstatus);
+ if (txstatus & DescOwn)
+ break;
+ if (txstatus & 0x8000) {
+ if (debug > 1)
+ netdev_dbg(dev, "Transmit error, Tx status %08x\n",
+ txstatus);
+ dev->stats.tx_errors++;
+ if (txstatus & 0x0400)
+ dev->stats.tx_carrier_errors++;
+ if (txstatus & 0x0200)
+ dev->stats.tx_window_errors++;
+ if (txstatus & 0x0100)
+ dev->stats.tx_aborted_errors++;
+ if (txstatus & 0x0080)
+ dev->stats.tx_heartbeat_errors++;
+ if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
+ (txstatus & 0x0800) || (txstatus & 0x1000)) {
+ dev->stats.tx_fifo_errors++;
+ rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
+ break; /* Keep the skb - we try again */
+ }
+ /* Transmitter restarted in 'abnormal' handler. */
+ } else {
+ if (rp->quirks & rqRhineI)
+ dev->stats.collisions += (txstatus >> 3) & 0x0F;
+ else
+ dev->stats.collisions += txstatus & 0x0F;
+ if (debug > 6)
+ netdev_dbg(dev, "collisions: %1.1x:%1.1x\n",
+ (txstatus >> 3) & 0xF,
+ txstatus & 0xF);
+ dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
+ dev->stats.tx_packets++;
+ }
+ /* Free the original skb. */
+ if (rp->tx_skbuff_dma[entry]) {
+ pci_unmap_single(rp->pdev,
+ rp->tx_skbuff_dma[entry],
+ rp->tx_skbuff[entry]->len,
+ PCI_DMA_TODEVICE);
+ }
+ dev_kfree_skb_irq(rp->tx_skbuff[entry]);
+ rp->tx_skbuff[entry] = NULL;
+ entry = (++rp->dirty_tx) % TX_RING_SIZE;
+ }
+ if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
+ netif_wake_queue(dev);
+
+ spin_unlock(&rp->lock);
+}
+
+/**
+ * rhine_get_vlan_tci - extract TCI from Rx data buffer
+ * @skb: pointer to sk_buff
+ * @data_size: used data area of the buffer including CRC
+ *
+ * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
+ * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
+ * aligned following the CRC.
+ */
+static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
+{
+ u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
+ return be16_to_cpup((__be16 *)trailer);
+}
+
+/* Process up to limit frames from receive ring */
+static int rhine_rx(struct net_device *dev, int limit)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ int count;
+ int entry = rp->cur_rx % RX_RING_SIZE;
+
+ if (debug > 4) {
+ netdev_dbg(dev, "%s(), entry %d status %08x\n",
+ __func__, entry,
+ le32_to_cpu(rp->rx_head_desc->rx_status));
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ for (count = 0; count < limit; ++count) {
+ struct rx_desc *desc = rp->rx_head_desc;
+ u32 desc_status = le32_to_cpu(desc->rx_status);
+ u32 desc_length = le32_to_cpu(desc->desc_length);
+ int data_size = desc_status >> 16;
+
+ if (desc_status & DescOwn)
+ break;
+
+ if (debug > 4)
+ netdev_dbg(dev, "%s() status is %08x\n",
+ __func__, desc_status);
+
+ if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
+ if ((desc_status & RxWholePkt) != RxWholePkt) {
+ netdev_warn(dev,
+ "Oversized Ethernet frame spanned multiple buffers, "
+ "entry %#x length %d status %08x!\n",
+ entry, data_size,
+ desc_status);
+ netdev_warn(dev,
+ "Oversized Ethernet frame %p vs %p\n",
+ rp->rx_head_desc,
+ &rp->rx_ring[entry]);
+ dev->stats.rx_length_errors++;
+ } else if (desc_status & RxErr) {
+ /* There was a error. */
+ if (debug > 2)
+ netdev_dbg(dev, "%s() Rx error was %08x\n",
+ __func__, desc_status);
+ dev->stats.rx_errors++;
+ if (desc_status & 0x0030)
+ dev->stats.rx_length_errors++;
+ if (desc_status & 0x0048)
+ dev->stats.rx_fifo_errors++;
+ if (desc_status & 0x0004)
+ dev->stats.rx_frame_errors++;
+ if (desc_status & 0x0002) {
+ /* this can also be updated outside the interrupt handler */
+ spin_lock(&rp->lock);
+ dev->stats.rx_crc_errors++;
+ spin_unlock(&rp->lock);
+ }
+ }
+ } else {
+ struct sk_buff *skb = NULL;
+ /* Length should omit the CRC */
+ int pkt_len = data_size - 4;
+ u16 vlan_tci = 0;
+
+ /* Check if the packet is long enough to accept without
+ copying to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak)
+ skb = netdev_alloc_skb_ip_align(dev, pkt_len);
+ if (skb) {
+ pci_dma_sync_single_for_cpu(rp->pdev,
+ rp->rx_skbuff_dma[entry],
+ rp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+
+ skb_copy_to_linear_data(skb,
+ rp->rx_skbuff[entry]->data,
+ pkt_len);
+ skb_put(skb, pkt_len);
+ pci_dma_sync_single_for_device(rp->pdev,
+ rp->rx_skbuff_dma[entry],
+ rp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ } else {
+ skb = rp->rx_skbuff[entry];
+ if (skb == NULL) {
+ netdev_err(dev, "Inconsistent Rx descriptor chain\n");
+ break;
+ }
+ rp->rx_skbuff[entry] = NULL;
+ skb_put(skb, pkt_len);
+ pci_unmap_single(rp->pdev,
+ rp->rx_skbuff_dma[entry],
+ rp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ }
+
+ if (unlikely(desc_length & DescTag))
+ vlan_tci = rhine_get_vlan_tci(skb, data_size);
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (unlikely(desc_length & DescTag))
+ __vlan_hwaccel_put_tag(skb, vlan_tci);
+ netif_receive_skb(skb);
+ dev->stats.rx_bytes += pkt_len;
+ dev->stats.rx_packets++;
+ }
+ entry = (++rp->cur_rx) % RX_RING_SIZE;
+ rp->rx_head_desc = &rp->rx_ring[entry];
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = rp->dirty_rx % RX_RING_SIZE;
+ if (rp->rx_skbuff[entry] == NULL) {
+ skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
+ rp->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ rp->rx_skbuff_dma[entry] =
+ pci_map_single(rp->pdev, skb->data,
+ rp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
+ }
+ rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
+ }
+
+ return count;
+}
+
+/*
+ * Clears the "tally counters" for CRC errors and missed frames(?).
+ * It has been reported that some chips need a write of 0 to clear
+ * these, for others the counters are set to 1 when written to and
+ * instead cleared when read. So we clear them both ways ...
+ */
+static inline void clear_tally_counters(void __iomem *ioaddr)
+{
+ iowrite32(0, ioaddr + RxMissed);
+ ioread16(ioaddr + RxCRCErrs);
+ ioread16(ioaddr + RxMissed);
+}
+
+static void rhine_restart_tx(struct net_device *dev) {
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ int entry = rp->dirty_tx % TX_RING_SIZE;
+ u32 intr_status;
+
+ /*
+ * If new errors occurred, we need to sort them out before doing Tx.
+ * In that case the ISR will be back here RSN anyway.
+ */
+ intr_status = get_intr_status(dev);
+
+ if ((intr_status & IntrTxErrSummary) == 0) {
+
+ /* We know better than the chip where it should continue. */
+ iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
+ ioaddr + TxRingPtr);
+
+ iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
+ ioaddr + ChipCmd);
+
+ if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
+ /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
+ BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
+
+ iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
+ ioaddr + ChipCmd1);
+ IOSYNC;
+ }
+ else {
+ /* This should never happen */
+ if (debug > 1)
+ netdev_warn(dev, "%s() Another error occurred %08x\n",
+ __func__, intr_status);
+ }
+
+}
+
+static void rhine_error(struct net_device *dev, int intr_status)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ spin_lock(&rp->lock);
+
+ if (intr_status & IntrLinkChange)
+ rhine_check_media(dev, 0);
+ if (intr_status & IntrStatsMax) {
+ dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
+ dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
+ clear_tally_counters(ioaddr);
+ }
+ if (intr_status & IntrTxAborted) {
+ if (debug > 1)
+ netdev_info(dev, "Abort %08x, frame dropped\n",
+ intr_status);
+ }
+ if (intr_status & IntrTxUnderrun) {
+ if (rp->tx_thresh < 0xE0)
+ BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
+ if (debug > 1)
+ netdev_info(dev, "Transmitter underrun, Tx threshold now %02x\n",
+ rp->tx_thresh);
+ }
+ if (intr_status & IntrTxDescRace) {
+ if (debug > 2)
+ netdev_info(dev, "Tx descriptor write-back race\n");
+ }
+ if ((intr_status & IntrTxError) &&
+ (intr_status & (IntrTxAborted |
+ IntrTxUnderrun | IntrTxDescRace)) == 0) {
+ if (rp->tx_thresh < 0xE0) {
+ BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
+ }
+ if (debug > 1)
+ netdev_info(dev, "Unspecified error. Tx threshold now %02x\n",
+ rp->tx_thresh);
+ }
+ if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
+ IntrTxError))
+ rhine_restart_tx(dev);
+
+ if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
+ IntrTxError | IntrTxAborted | IntrNormalSummary |
+ IntrTxDescRace)) {
+ if (debug > 1)
+ netdev_err(dev, "Something Wicked happened! %08x\n",
+ intr_status);
+ }
+
+ spin_unlock(&rp->lock);
+}
+
+static struct net_device_stats *rhine_get_stats(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rp->lock, flags);
+ dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
+ dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
+ clear_tally_counters(ioaddr);
+ spin_unlock_irqrestore(&rp->lock, flags);
+
+ return &dev->stats;
+}
+
+static void rhine_set_rx_mode(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
+ struct netdev_hw_addr *ha;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ rx_mode = 0x1C;
+ iowrite32(0xffffffff, ioaddr + MulticastFilter0);
+ iowrite32(0xffffffff, ioaddr + MulticastFilter1);
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ iowrite32(0xffffffff, ioaddr + MulticastFilter0);
+ iowrite32(0xffffffff, ioaddr + MulticastFilter1);
+ } else if (rp->pdev->revision >= VT6105M) {
+ int i = 0;
+ u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
+ netdev_for_each_mc_addr(ha, dev) {
+ if (i == MCAM_SIZE)
+ break;
+ rhine_set_cam(ioaddr, i, ha->addr);
+ mCAMmask |= 1 << i;
+ i++;
+ }
+ rhine_set_cam_mask(ioaddr, mCAMmask);
+ } else {
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
+ iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
+ }
+ /* enable/disable VLAN receive filtering */
+ if (rp->pdev->revision >= VT6105M) {
+ if (dev->flags & IFF_PROMISC)
+ BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
+ else
+ BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
+ }
+ BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
+}
+
+static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(rp->pdev));
+}
+
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&rp->lock);
+ rc = mii_ethtool_gset(&rp->mii_if, cmd);
+ spin_unlock_irq(&rp->lock);
+
+ return rc;
+}
+
+static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ int rc;
+
+ spin_lock_irq(&rp->lock);
+ rc = mii_ethtool_sset(&rp->mii_if, cmd);
+ spin_unlock_irq(&rp->lock);
+ rhine_set_carrier(&rp->mii_if);
+
+ return rc;
+}
+
+static int netdev_nway_reset(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+
+ return mii_nway_restart(&rp->mii_if);
+}
+
+static u32 netdev_get_link(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+
+ return mii_link_ok(&rp->mii_if);
+}
+
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ return debug;
+}
+
+static void netdev_set_msglevel(struct net_device *dev, u32 value)
+{
+ debug = value;
+}
+
+static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+
+ if (!(rp->quirks & rqWOL))
+ return;
+
+ spin_lock_irq(&rp->lock);
+ wol->supported = WAKE_PHY | WAKE_MAGIC |
+ WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
+ wol->wolopts = rp->wolopts;
+ spin_unlock_irq(&rp->lock);
+}
+
+static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ u32 support = WAKE_PHY | WAKE_MAGIC |
+ WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
+
+ if (!(rp->quirks & rqWOL))
+ return -EINVAL;
+
+ if (wol->wolopts & ~support)
+ return -EINVAL;
+
+ spin_lock_irq(&rp->lock);
+ rp->wolopts = wol->wolopts;
+ spin_unlock_irq(&rp->lock);
+
+ return 0;
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_settings = netdev_get_settings,
+ .set_settings = netdev_set_settings,
+ .nway_reset = netdev_nway_reset,
+ .get_link = netdev_get_link,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+ .get_wol = rhine_get_wol,
+ .set_wol = rhine_set_wol,
+};
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ int rc;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irq(&rp->lock);
+ rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
+ spin_unlock_irq(&rp->lock);
+ rhine_set_carrier(&rp->mii_if);
+
+ return rc;
+}
+
+static int rhine_close(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ napi_disable(&rp->napi);
+ cancel_work_sync(&rp->reset_task);
+ netif_stop_queue(dev);
+
+ spin_lock_irq(&rp->lock);
+
+ if (debug > 1)
+ netdev_dbg(dev, "Shutting down ethercard, status was %04x\n",
+ ioread16(ioaddr + ChipCmd));
+
+ /* Switch to loopback mode to avoid hardware races. */
+ iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ iowrite16(0x0000, ioaddr + IntrEnable);
+
+ /* Stop the chip's Tx and Rx processes. */
+ iowrite16(CmdStop, ioaddr + ChipCmd);
+
+ spin_unlock_irq(&rp->lock);
+
+ free_irq(rp->pdev->irq, dev);
+ free_rbufs(dev);
+ free_tbufs(dev);
+ free_ring(dev);
+
+ return 0;
+}
+
+
+static void __devexit rhine_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rhine_private *rp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ pci_iounmap(pdev, rp->base);
+ pci_release_regions(pdev);
+
+ free_netdev(dev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static void rhine_shutdown (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ if (!(rp->quirks & rqWOL))
+ return; /* Nothing to do for non-WOL adapters */
+
+ rhine_power_init(dev);
+
+ /* Make sure we use pattern 0, 1 and not 4, 5 */
+ if (rp->quirks & rq6patterns)
+ iowrite8(0x04, ioaddr + WOLcgClr);
+
+ if (rp->wolopts & WAKE_MAGIC) {
+ iowrite8(WOLmagic, ioaddr + WOLcrSet);
+ /*
+ * Turn EEPROM-controlled wake-up back on -- some hardware may
+ * not cooperate otherwise.
+ */
+ iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
+ }
+
+ if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
+ iowrite8(WOLbmcast, ioaddr + WOLcgSet);
+
+ if (rp->wolopts & WAKE_PHY)
+ iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
+
+ if (rp->wolopts & WAKE_UCAST)
+ iowrite8(WOLucast, ioaddr + WOLcrSet);
+
+ if (rp->wolopts) {
+ /* Enable legacy WOL (for old motherboards) */
+ iowrite8(0x01, ioaddr + PwcfgSet);
+ iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
+ }
+
+ /* Hit power state D3 (sleep) */
+ if (!avoid_D3)
+ iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
+
+ /* TODO: Check use of pci_enable_wake() */
+
+}
+
+#ifdef CONFIG_PM
+static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rhine_private *rp = netdev_priv(dev);
+ unsigned long flags;
+
+ if (!netif_running(dev))
+ return 0;
+
+ napi_disable(&rp->napi);
+
+ netif_device_detach(dev);
+ pci_save_state(pdev);
+
+ spin_lock_irqsave(&rp->lock, flags);
+ rhine_shutdown(pdev);
+ spin_unlock_irqrestore(&rp->lock, flags);
+
+ free_irq(dev->irq, dev);
+ return 0;
+}
+
+static int rhine_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rhine_private *rp = netdev_priv(dev);
+ unsigned long flags;
+ int ret;
+
+ if (!netif_running(dev))
+ return 0;
+
+ if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
+ netdev_err(dev, "request_irq failed\n");
+
+ ret = pci_set_power_state(pdev, PCI_D0);
+ if (debug > 1)
+ netdev_info(dev, "Entering power state D0 %s (%d)\n",
+ ret ? "failed" : "succeeded", ret);
+
+ pci_restore_state(pdev);
+
+ spin_lock_irqsave(&rp->lock, flags);
+#ifdef USE_MMIO
+ enable_mmio(rp->pioaddr, rp->quirks);
+#endif
+ rhine_power_init(dev);
+ free_tbufs(dev);
+ free_rbufs(dev);
+ alloc_tbufs(dev);
+ alloc_rbufs(dev);
+ init_registers(dev);
+ spin_unlock_irqrestore(&rp->lock, flags);
+
+ netif_device_attach(dev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct pci_driver rhine_driver = {
+ .name = DRV_NAME,
+ .id_table = rhine_pci_tbl,
+ .probe = rhine_init_one,
+ .remove = __devexit_p(rhine_remove_one),
+#ifdef CONFIG_PM
+ .suspend = rhine_suspend,
+ .resume = rhine_resume,
+#endif /* CONFIG_PM */
+ .shutdown = rhine_shutdown,
+};
+
+static struct dmi_system_id __initdata rhine_dmi_table[] = {
+ {
+ .ident = "EPIA-M",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
+ DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
+ },
+ },
+ {
+ .ident = "KV7",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
+ DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
+ },
+ },
+ { NULL }
+};
+
+static int __init rhine_init(void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+ pr_info("%s\n", version);
+#endif
+ if (dmi_check_system(rhine_dmi_table)) {
+ /* these BIOSes fail at PXE boot if chip is in D3 */
+ avoid_D3 = 1;
+ pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
+ }
+ else if (avoid_D3)
+ pr_info("avoid_D3 set\n");
+
+ return pci_register_driver(&rhine_driver);
+}
+
+
+static void __exit rhine_cleanup(void)
+{
+ pci_unregister_driver(&rhine_driver);
+}
+
+
+module_init(rhine_init);
+module_exit(rhine_cleanup);
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
new file mode 100644
index 000000000000..b47bce1a2e2a
--- /dev/null
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -0,0 +1,3589 @@
+/*
+ * This code is derived from the VIA reference driver (copyright message
+ * below) provided to Red Hat by VIA Networking Technologies, Inc. for
+ * addition to the Linux kernel.
+ *
+ * The code has been merged into one source file, cleaned up to follow
+ * Linux coding style, ported to the Linux 2.6 kernel tree and cleaned
+ * for 64bit hardware platforms.
+ *
+ * TODO
+ * rx_copybreak/alignment
+ * More testing
+ *
+ * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
+ * Additional fixes and clean up: Francois Romieu
+ *
+ * This source has not been verified for use in safety critical systems.
+ *
+ * Please direct queries about the revamped driver to the linux-kernel
+ * list not VIA.
+ *
+ * Original code:
+ *
+ * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
+ * All rights reserved.
+ *
+ * This software may be redistributed and/or modified under
+ * the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * Author: Chuang Liang-Shing, AJ Jiang
+ *
+ * Date: Jan 24, 2003
+ *
+ * MODULE_LICENSE("GPL");
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/io.h>
+#include <linux/if.h>
+#include <linux/uaccess.h>
+#include <linux/proc_fs.h>
+#include <linux/inetdevice.h>
+#include <linux/reboot.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/crc-ccitt.h>
+#include <linux/crc32.h>
+
+#include "via-velocity.h"
+
+
+static int velocity_nics;
+static int msglevel = MSG_LEVEL_INFO;
+
+/**
+ * mac_get_cam_mask - Read a CAM mask
+ * @regs: register block for this velocity
+ * @mask: buffer to store mask
+ *
+ * Fetch the mask bits of the selected CAM and store them into the
+ * provided mask buffer.
+ */
+static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
+{
+ int i;
+
+ /* Select CAM mask */
+ BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
+
+ writeb(0, &regs->CAMADDR);
+
+ /* read mask */
+ for (i = 0; i < 8; i++)
+ *mask++ = readb(&(regs->MARCAM[i]));
+
+ /* disable CAMEN */
+ writeb(0, &regs->CAMADDR);
+
+ /* Select mar */
+ BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
+}
+
+/**
+ * mac_set_cam_mask - Set a CAM mask
+ * @regs: register block for this velocity
+ * @mask: CAM mask to load
+ *
+ * Store a new mask into a CAM
+ */
+static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
+{
+ int i;
+ /* Select CAM mask */
+ BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
+
+ writeb(CAMADDR_CAMEN, &regs->CAMADDR);
+
+ for (i = 0; i < 8; i++)
+ writeb(*mask++, &(regs->MARCAM[i]));
+
+ /* disable CAMEN */
+ writeb(0, &regs->CAMADDR);
+
+ /* Select mar */
+ BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
+}
+
+static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
+{
+ int i;
+ /* Select CAM mask */
+ BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
+
+ writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
+
+ for (i = 0; i < 8; i++)
+ writeb(*mask++, &(regs->MARCAM[i]));
+
+ /* disable CAMEN */
+ writeb(0, &regs->CAMADDR);
+
+ /* Select mar */
+ BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
+}
+
+/**
+ * mac_set_cam - set CAM data
+ * @regs: register block of this velocity
+ * @idx: Cam index
+ * @addr: 2 or 6 bytes of CAM data
+ *
+ * Load an address or vlan tag into a CAM
+ */
+static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
+{
+ int i;
+
+ /* Select CAM mask */
+ BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
+
+ idx &= (64 - 1);
+
+ writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
+
+ for (i = 0; i < 6; i++)
+ writeb(*addr++, &(regs->MARCAM[i]));
+
+ BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
+
+ udelay(10);
+
+ writeb(0, &regs->CAMADDR);
+
+ /* Select mar */
+ BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
+}
+
+static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
+ const u8 *addr)
+{
+
+ /* Select CAM mask */
+ BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
+
+ idx &= (64 - 1);
+
+ writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
+ writew(*((u16 *) addr), &regs->MARCAM[0]);
+
+ BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
+
+ udelay(10);
+
+ writeb(0, &regs->CAMADDR);
+
+ /* Select mar */
+ BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
+}
+
+
+/**
+ * mac_wol_reset - reset WOL after exiting low power
+ * @regs: register block of this velocity
+ *
+ * Called after we drop out of wake on lan mode in order to
+ * reset the Wake on lan features. This function doesn't restore
+ * the rest of the logic from the result of sleep/wakeup
+ */
+static void mac_wol_reset(struct mac_regs __iomem *regs)
+{
+
+ /* Turn off SWPTAG right after leaving power mode */
+ BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
+ /* clear sticky bits */
+ BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
+
+ BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
+ BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
+ /* disable force PME-enable */
+ writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
+ /* disable power-event config bit */
+ writew(0xFFFF, &regs->WOLCRClr);
+ /* clear power status */
+ writew(0xFFFF, &regs->WOLSRClr);
+}
+
+static const struct ethtool_ops velocity_ethtool_ops;
+
+/*
+ Define module options
+*/
+
+MODULE_AUTHOR("VIA Networking Technologies, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
+
+#define VELOCITY_PARAM(N, D) \
+ static int N[MAX_UNITS] = OPTION_DEFAULT;\
+ module_param_array(N, int, NULL, 0); \
+ MODULE_PARM_DESC(N, D);
+
+#define RX_DESC_MIN 64
+#define RX_DESC_MAX 255
+#define RX_DESC_DEF 64
+VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
+
+#define TX_DESC_MIN 16
+#define TX_DESC_MAX 256
+#define TX_DESC_DEF 64
+VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
+
+#define RX_THRESH_MIN 0
+#define RX_THRESH_MAX 3
+#define RX_THRESH_DEF 0
+/* rx_thresh[] is used for controlling the receive fifo threshold.
+ 0: indicate the rxfifo threshold is 128 bytes.
+ 1: indicate the rxfifo threshold is 512 bytes.
+ 2: indicate the rxfifo threshold is 1024 bytes.
+ 3: indicate the rxfifo threshold is store & forward.
+*/
+VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
+
+#define DMA_LENGTH_MIN 0
+#define DMA_LENGTH_MAX 7
+#define DMA_LENGTH_DEF 6
+
+/* DMA_length[] is used for controlling the DMA length
+ 0: 8 DWORDs
+ 1: 16 DWORDs
+ 2: 32 DWORDs
+ 3: 64 DWORDs
+ 4: 128 DWORDs
+ 5: 256 DWORDs
+ 6: SF(flush till emply)
+ 7: SF(flush till emply)
+*/
+VELOCITY_PARAM(DMA_length, "DMA length");
+
+#define IP_ALIG_DEF 0
+/* IP_byte_align[] is used for IP header DWORD byte aligned
+ 0: indicate the IP header won't be DWORD byte aligned.(Default) .
+ 1: indicate the IP header will be DWORD byte aligned.
+ In some environment, the IP header should be DWORD byte aligned,
+ or the packet will be droped when we receive it. (eg: IPVS)
+*/
+VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
+
+#define FLOW_CNTL_DEF 1
+#define FLOW_CNTL_MIN 1
+#define FLOW_CNTL_MAX 5
+
+/* flow_control[] is used for setting the flow control ability of NIC.
+ 1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
+ 2: enable TX flow control.
+ 3: enable RX flow control.
+ 4: enable RX/TX flow control.
+ 5: disable
+*/
+VELOCITY_PARAM(flow_control, "Enable flow control ability");
+
+#define MED_LNK_DEF 0
+#define MED_LNK_MIN 0
+#define MED_LNK_MAX 5
+/* speed_duplex[] is used for setting the speed and duplex mode of NIC.
+ 0: indicate autonegotiation for both speed and duplex mode
+ 1: indicate 100Mbps half duplex mode
+ 2: indicate 100Mbps full duplex mode
+ 3: indicate 10Mbps half duplex mode
+ 4: indicate 10Mbps full duplex mode
+ 5: indicate 1000Mbps full duplex mode
+
+ Note:
+ if EEPROM have been set to the force mode, this option is ignored
+ by driver.
+*/
+VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
+
+#define VAL_PKT_LEN_DEF 0
+/* ValPktLen[] is used for setting the checksum offload ability of NIC.
+ 0: Receive frame with invalid layer 2 length (Default)
+ 1: Drop frame with invalid layer 2 length
+*/
+VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
+
+#define WOL_OPT_DEF 0
+#define WOL_OPT_MIN 0
+#define WOL_OPT_MAX 7
+/* wol_opts[] is used for controlling wake on lan behavior.
+ 0: Wake up if recevied a magic packet. (Default)
+ 1: Wake up if link status is on/off.
+ 2: Wake up if recevied an arp packet.
+ 4: Wake up if recevied any unicast packet.
+ Those value can be sumed up to support more than one option.
+*/
+VELOCITY_PARAM(wol_opts, "Wake On Lan options");
+
+static int rx_copybreak = 200;
+module_param(rx_copybreak, int, 0644);
+MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
+
+/*
+ * Internal board variants. At the moment we have only one
+ */
+static struct velocity_info_tbl chip_info_table[] = {
+ {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
+ { }
+};
+
+/*
+ * Describe the PCI device identifiers that we support in this
+ * device driver. Used for hotplug autoloading.
+ */
+static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, velocity_id_table);
+
+/**
+ * get_chip_name - identifier to name
+ * @id: chip identifier
+ *
+ * Given a chip identifier return a suitable description. Returns
+ * a pointer a static string valid while the driver is loaded.
+ */
+static const char __devinit *get_chip_name(enum chip_type chip_id)
+{
+ int i;
+ for (i = 0; chip_info_table[i].name != NULL; i++)
+ if (chip_info_table[i].chip_id == chip_id)
+ break;
+ return chip_info_table[i].name;
+}
+
+/**
+ * velocity_remove1 - device unplug
+ * @pdev: PCI device being removed
+ *
+ * Device unload callback. Called on an unplug or on module
+ * unload for each active device that is present. Disconnects
+ * the device from the network layer and frees all the resources
+ */
+static void __devexit velocity_remove1(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct velocity_info *vptr = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ iounmap(vptr->mac_regs);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(dev);
+
+ velocity_nics--;
+}
+
+/**
+ * velocity_set_int_opt - parser for integer options
+ * @opt: pointer to option value
+ * @val: value the user requested (or -1 for default)
+ * @min: lowest value allowed
+ * @max: highest value allowed
+ * @def: default value
+ * @name: property name
+ * @dev: device name
+ *
+ * Set an integer property in the module options. This function does
+ * all the verification and checking as well as reporting so that
+ * we don't duplicate code for each option.
+ */
+static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, const char *devname)
+{
+ if (val == -1)
+ *opt = def;
+ else if (val < min || val > max) {
+ VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
+ devname, name, min, max);
+ *opt = def;
+ } else {
+ VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n",
+ devname, name, val);
+ *opt = val;
+ }
+}
+
+/**
+ * velocity_set_bool_opt - parser for boolean options
+ * @opt: pointer to option value
+ * @val: value the user requested (or -1 for default)
+ * @def: default value (yes/no)
+ * @flag: numeric value to set for true.
+ * @name: property name
+ * @dev: device name
+ *
+ * Set a boolean property in the module options. This function does
+ * all the verification and checking as well as reporting so that
+ * we don't duplicate code for each option.
+ */
+static void __devinit velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag, char *name, const char *devname)
+{
+ (*opt) &= (~flag);
+ if (val == -1)
+ *opt |= (def ? flag : 0);
+ else if (val < 0 || val > 1) {
+ printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
+ devname, name);
+ *opt |= (def ? flag : 0);
+ } else {
+ printk(KERN_INFO "%s: set parameter %s to %s\n",
+ devname, name, val ? "TRUE" : "FALSE");
+ *opt |= (val ? flag : 0);
+ }
+}
+
+/**
+ * velocity_get_options - set options on device
+ * @opts: option structure for the device
+ * @index: index of option to use in module options array
+ * @devname: device name
+ *
+ * Turn the module and command options into a single structure
+ * for the current device
+ */
+static void __devinit velocity_get_options(struct velocity_opt *opts, int index, const char *devname)
+{
+
+ velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
+ velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname);
+ velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
+ velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
+
+ velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
+ velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
+ velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
+ velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
+ velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
+ opts->numrx = (opts->numrx & ~3);
+}
+
+/**
+ * velocity_init_cam_filter - initialise CAM
+ * @vptr: velocity to program
+ *
+ * Initialize the content addressable memory used for filters. Load
+ * appropriately according to the presence of VLAN
+ */
+static void velocity_init_cam_filter(struct velocity_info *vptr)
+{
+ struct mac_regs __iomem *regs = vptr->mac_regs;
+ unsigned int vid, i = 0;
+
+ /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
+ WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
+ WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
+
+ /* Disable all CAMs */
+ memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
+ memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
+ mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
+ mac_set_cam_mask(regs, vptr->mCAMmask);
+
+ /* Enable VCAMs */
+ for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
+ mac_set_vlan_cam(regs, i, (u8 *) &vid);
+ vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
+ if (++i >= VCAM_SIZE)
+ break;
+ }
+ mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
+}
+
+static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+
+ spin_lock_irq(&vptr->lock);
+ set_bit(vid, vptr->active_vlans);
+ velocity_init_cam_filter(vptr);
+ spin_unlock_irq(&vptr->lock);
+}
+
+static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+
+ spin_lock_irq(&vptr->lock);
+ clear_bit(vid, vptr->active_vlans);
+ velocity_init_cam_filter(vptr);
+ spin_unlock_irq(&vptr->lock);
+}
+
+static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
+{
+ vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
+}
+
+/**
+ * velocity_rx_reset - handle a receive reset
+ * @vptr: velocity we are resetting
+ *
+ * Reset the ownership and status for the receive ring side.
+ * Hand all the receive queue to the NIC.
+ */
+static void velocity_rx_reset(struct velocity_info *vptr)
+{
+
+ struct mac_regs __iomem *regs = vptr->mac_regs;
+ int i;
+
+ velocity_init_rx_ring_indexes(vptr);
+
+ /*
+ * Init state, all RD entries belong to the NIC
+ */
+ for (i = 0; i < vptr->options.numrx; ++i)
+ vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
+
+ writew(vptr->options.numrx, &regs->RBRDU);
+ writel(vptr->rx.pool_dma, &regs->RDBaseLo);
+ writew(0, &regs->RDIdx);
+ writew(vptr->options.numrx - 1, &regs->RDCSize);
+}
+
+/**
+ * velocity_get_opt_media_mode - get media selection
+ * @vptr: velocity adapter
+ *
+ * Get the media mode stored in EEPROM or module options and load
+ * mii_status accordingly. The requested link state information
+ * is also returned.
+ */
+static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
+{
+ u32 status = 0;
+
+ switch (vptr->options.spd_dpx) {
+ case SPD_DPX_AUTO:
+ status = VELOCITY_AUTONEG_ENABLE;
+ break;
+ case SPD_DPX_100_FULL:
+ status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
+ break;
+ case SPD_DPX_10_FULL:
+ status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
+ break;
+ case SPD_DPX_100_HALF:
+ status = VELOCITY_SPEED_100;
+ break;
+ case SPD_DPX_10_HALF:
+ status = VELOCITY_SPEED_10;
+ break;
+ case SPD_DPX_1000_FULL:
+ status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
+ break;
+ }
+ vptr->mii_status = status;
+ return status;
+}
+
+/**
+ * safe_disable_mii_autopoll - autopoll off
+ * @regs: velocity registers
+ *
+ * Turn off the autopoll and wait for it to disable on the chip
+ */
+static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
+{
+ u16 ww;
+
+ /* turn off MAUTO */
+ writeb(0, &regs->MIICR);
+ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
+ udelay(1);
+ if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
+ break;
+ }
+}
+
+/**
+ * enable_mii_autopoll - turn on autopolling
+ * @regs: velocity registers
+ *
+ * Enable the MII link status autopoll feature on the Velocity
+ * hardware. Wait for it to enable.
+ */
+static void enable_mii_autopoll(struct mac_regs __iomem *regs)
+{
+ int ii;
+
+ writeb(0, &(regs->MIICR));
+ writeb(MIIADR_SWMPL, &regs->MIIADR);
+
+ for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
+ udelay(1);
+ if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
+ break;
+ }
+
+ writeb(MIICR_MAUTO, &regs->MIICR);
+
+ for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
+ udelay(1);
+ if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
+ break;
+ }
+
+}
+
+/**
+ * velocity_mii_read - read MII data
+ * @regs: velocity registers
+ * @index: MII register index
+ * @data: buffer for received data
+ *
+ * Perform a single read of an MII 16bit register. Returns zero
+ * on success or -ETIMEDOUT if the PHY did not respond.
+ */
+static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
+{
+ u16 ww;
+
+ /*
+ * Disable MIICR_MAUTO, so that mii addr can be set normally
+ */
+ safe_disable_mii_autopoll(regs);
+
+ writeb(index, &regs->MIIADR);
+
+ BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
+
+ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
+ if (!(readb(&regs->MIICR) & MIICR_RCMD))
+ break;
+ }
+
+ *data = readw(&regs->MIIDATA);
+
+ enable_mii_autopoll(regs);
+ if (ww == W_MAX_TIMEOUT)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+/**
+ * mii_check_media_mode - check media state
+ * @regs: velocity registers
+ *
+ * Check the current MII status and determine the link status
+ * accordingly
+ */
+static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
+{
+ u32 status = 0;
+ u16 ANAR;
+
+ if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
+ status |= VELOCITY_LINK_FAIL;
+
+ if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
+ status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
+ else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
+ status |= (VELOCITY_SPEED_1000);
+ else {
+ velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
+ if (ANAR & ADVERTISE_100FULL)
+ status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
+ else if (ANAR & ADVERTISE_100HALF)
+ status |= VELOCITY_SPEED_100;
+ else if (ANAR & ADVERTISE_10FULL)
+ status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
+ else
+ status |= (VELOCITY_SPEED_10);
+ }
+
+ if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
+ velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
+ if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
+ == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
+ if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
+ status |= VELOCITY_AUTONEG_ENABLE;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * velocity_mii_write - write MII data
+ * @regs: velocity registers
+ * @index: MII register index
+ * @data: 16bit data for the MII register
+ *
+ * Perform a single write to an MII 16bit register. Returns zero
+ * on success or -ETIMEDOUT if the PHY did not respond.
+ */
+static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
+{
+ u16 ww;
+
+ /*
+ * Disable MIICR_MAUTO, so that mii addr can be set normally
+ */
+ safe_disable_mii_autopoll(regs);
+
+ /* MII reg offset */
+ writeb(mii_addr, &regs->MIIADR);
+ /* set MII data */
+ writew(data, &regs->MIIDATA);
+
+ /* turn on MIICR_WCMD */
+ BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
+
+ /* W_MAX_TIMEOUT is the timeout period */
+ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
+ udelay(5);
+ if (!(readb(&regs->MIICR) & MIICR_WCMD))
+ break;
+ }
+ enable_mii_autopoll(regs);
+
+ if (ww == W_MAX_TIMEOUT)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+/**
+ * set_mii_flow_control - flow control setup
+ * @vptr: velocity interface
+ *
+ * Set up the flow control on this interface according to
+ * the supplied user/eeprom options.
+ */
+static void set_mii_flow_control(struct velocity_info *vptr)
+{
+ /*Enable or Disable PAUSE in ANAR */
+ switch (vptr->options.flow_cntl) {
+ case FLOW_CNTL_TX:
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
+ break;
+
+ case FLOW_CNTL_RX:
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
+ break;
+
+ case FLOW_CNTL_TX_RX:
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
+ break;
+
+ case FLOW_CNTL_DISABLE:
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * mii_set_auto_on - autonegotiate on
+ * @vptr: velocity
+ *
+ * Enable autonegotation on this interface
+ */
+static void mii_set_auto_on(struct velocity_info *vptr)
+{
+ if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
+ MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
+ else
+ MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
+}
+
+static u32 check_connection_type(struct mac_regs __iomem *regs)
+{
+ u32 status = 0;
+ u8 PHYSR0;
+ u16 ANAR;
+ PHYSR0 = readb(&regs->PHYSR0);
+
+ /*
+ if (!(PHYSR0 & PHYSR0_LINKGD))
+ status|=VELOCITY_LINK_FAIL;
+ */
+
+ if (PHYSR0 & PHYSR0_FDPX)
+ status |= VELOCITY_DUPLEX_FULL;
+
+ if (PHYSR0 & PHYSR0_SPDG)
+ status |= VELOCITY_SPEED_1000;
+ else if (PHYSR0 & PHYSR0_SPD10)
+ status |= VELOCITY_SPEED_10;
+ else
+ status |= VELOCITY_SPEED_100;
+
+ if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
+ velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
+ if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
+ == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
+ if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
+ status |= VELOCITY_AUTONEG_ENABLE;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * velocity_set_media_mode - set media mode
+ * @mii_status: old MII link state
+ *
+ * Check the media link state and configure the flow control
+ * PHY and also velocity hardware setup accordingly. In particular
+ * we need to set up CD polling and frame bursting.
+ */
+static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
+{
+ u32 curr_status;
+ struct mac_regs __iomem *regs = vptr->mac_regs;
+
+ vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
+ curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
+
+ /* Set mii link status */
+ set_mii_flow_control(vptr);
+
+ /*
+ Check if new status is consistent with current status
+ if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
+ (mii_status==curr_status)) {
+ vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
+ vptr->mii_status=check_connection_type(vptr->mac_regs);
+ VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
+ return 0;
+ }
+ */
+
+ if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
+ MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
+
+ /*
+ * If connection type is AUTO
+ */
+ if (mii_status & VELOCITY_AUTONEG_ENABLE) {
+ VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
+ /* clear force MAC mode bit */
+ BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
+ /* set duplex mode of MAC according to duplex mode of MII */
+ MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
+ MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
+
+ /* enable AUTO-NEGO mode */
+ mii_set_auto_on(vptr);
+ } else {
+ u16 CTRL1000;
+ u16 ANAR;
+ u8 CHIPGCR;
+
+ /*
+ * 1. if it's 3119, disable frame bursting in halfduplex mode
+ * and enable it in fullduplex mode
+ * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
+ * 3. only enable CD heart beat counter in 10HD mode
+ */
+
+ /* set force MAC mode bit */
+ BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
+
+ CHIPGCR = readb(&regs->CHIPGCR);
+
+ if (mii_status & VELOCITY_SPEED_1000)
+ CHIPGCR |= CHIPGCR_FCGMII;
+ else
+ CHIPGCR &= ~CHIPGCR_FCGMII;
+
+ if (mii_status & VELOCITY_DUPLEX_FULL) {
+ CHIPGCR |= CHIPGCR_FCFDX;
+ writeb(CHIPGCR, &regs->CHIPGCR);
+ VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n");
+ if (vptr->rev_id < REV_ID_VT3216_A0)
+ BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
+ } else {
+ CHIPGCR &= ~CHIPGCR_FCFDX;
+ VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n");
+ writeb(CHIPGCR, &regs->CHIPGCR);
+ if (vptr->rev_id < REV_ID_VT3216_A0)
+ BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
+ }
+
+ velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
+ CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+ if ((mii_status & VELOCITY_SPEED_1000) &&
+ (mii_status & VELOCITY_DUPLEX_FULL)) {
+ CTRL1000 |= ADVERTISE_1000FULL;
+ }
+ velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
+
+ if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
+ BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
+ else
+ BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
+
+ /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
+ velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
+ ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
+ if (mii_status & VELOCITY_SPEED_100) {
+ if (mii_status & VELOCITY_DUPLEX_FULL)
+ ANAR |= ADVERTISE_100FULL;
+ else
+ ANAR |= ADVERTISE_100HALF;
+ } else if (mii_status & VELOCITY_SPEED_10) {
+ if (mii_status & VELOCITY_DUPLEX_FULL)
+ ANAR |= ADVERTISE_10FULL;
+ else
+ ANAR |= ADVERTISE_10HALF;
+ }
+ velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
+ /* enable AUTO-NEGO mode */
+ mii_set_auto_on(vptr);
+ /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
+ }
+ /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
+ /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
+ return VELOCITY_LINK_CHANGE;
+}
+
+/**
+ * velocity_print_link_status - link status reporting
+ * @vptr: velocity to report on
+ *
+ * Turn the link status of the velocity card into a kernel log
+ * description of the new link state, detailing speed and duplex
+ * status
+ */
+static void velocity_print_link_status(struct velocity_info *vptr)
+{
+
+ if (vptr->mii_status & VELOCITY_LINK_FAIL) {
+ VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
+ } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
+ VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name);
+
+ if (vptr->mii_status & VELOCITY_SPEED_1000)
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
+ else if (vptr->mii_status & VELOCITY_SPEED_100)
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
+ else
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
+
+ if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
+ VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
+ else
+ VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
+ } else {
+ VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
+ switch (vptr->options.spd_dpx) {
+ case SPD_DPX_1000_FULL:
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n");
+ break;
+ case SPD_DPX_100_HALF:
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
+ break;
+ case SPD_DPX_100_FULL:
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
+ break;
+ case SPD_DPX_10_HALF:
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
+ break;
+ case SPD_DPX_10_FULL:
+ VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/**
+ * enable_flow_control_ability - flow control
+ * @vptr: veloity to configure
+ *
+ * Set up flow control according to the flow control options
+ * determined by the eeprom/configuration.
+ */
+static void enable_flow_control_ability(struct velocity_info *vptr)
+{
+
+ struct mac_regs __iomem *regs = vptr->mac_regs;
+
+ switch (vptr->options.flow_cntl) {
+
+ case FLOW_CNTL_DEFAULT:
+ if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
+ writel(CR0_FDXRFCEN, &regs->CR0Set);
+ else
+ writel(CR0_FDXRFCEN, &regs->CR0Clr);
+
+ if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
+ writel(CR0_FDXTFCEN, &regs->CR0Set);
+ else
+ writel(CR0_FDXTFCEN, &regs->CR0Clr);
+ break;
+
+ case FLOW_CNTL_TX:
+ writel(CR0_FDXTFCEN, &regs->CR0Set);
+ writel(CR0_FDXRFCEN, &regs->CR0Clr);
+ break;
+
+ case FLOW_CNTL_RX:
+ writel(CR0_FDXRFCEN, &regs->CR0Set);
+ writel(CR0_FDXTFCEN, &regs->CR0Clr);
+ break;
+
+ case FLOW_CNTL_TX_RX:
+ writel(CR0_FDXTFCEN, &regs->CR0Set);
+ writel(CR0_FDXRFCEN, &regs->CR0Set);
+ break;
+
+ case FLOW_CNTL_DISABLE:
+ writel(CR0_FDXRFCEN, &regs->CR0Clr);
+ writel(CR0_FDXTFCEN, &regs->CR0Clr);
+ break;
+
+ default:
+ break;
+ }
+
+}
+
+/**
+ * velocity_soft_reset - soft reset
+ * @vptr: velocity to reset
+ *
+ * Kick off a soft reset of the velocity adapter and then poll
+ * until the reset sequence has completed before returning.
+ */
+static int velocity_soft_reset(struct velocity_info *vptr)
+{
+ struct mac_regs __iomem *regs = vptr->mac_regs;
+ int i = 0;
+
+ writel(CR0_SFRST, &regs->CR0Set);
+
+ for (i = 0; i < W_MAX_TIMEOUT; i++) {
+ udelay(5);
+ if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
+ break;
+ }
+
+ if (i == W_MAX_TIMEOUT) {
+ writel(CR0_FORSRST, &regs->CR0Set);
+ /* FIXME: PCI POSTING */
+ /* delay 2ms */
+ mdelay(2);
+ }
+ return 0;
+}
+
+/**
+ * velocity_set_multi - filter list change callback
+ * @dev: network device
+ *
+ * Called by the network layer when the filter lists need to change
+ * for a velocity adapter. Reload the CAMs with the new address
+ * filter ruleset.
+ */
+static void velocity_set_multi(struct net_device *dev)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+ struct mac_regs __iomem *regs = vptr->mac_regs;
+ u8 rx_mode;
+ int i;
+ struct netdev_hw_addr *ha;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ writel(0xffffffff, &regs->MARCAM[0]);
+ writel(0xffffffff, &regs->MARCAM[4]);
+ rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
+ } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ writel(0xffffffff, &regs->MARCAM[0]);
+ writel(0xffffffff, &regs->MARCAM[4]);
+ rx_mode = (RCR_AM | RCR_AB);
+ } else {
+ int offset = MCAM_SIZE - vptr->multicast_limit;
+ mac_get_cam_mask(regs, vptr->mCAMmask);
+
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ mac_set_cam(regs, i + offset, ha->addr);
+ vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
+ i++;
+ }
+
+ mac_set_cam_mask(regs, vptr->mCAMmask);
+ rx_mode = RCR_AM | RCR_AB | RCR_AP;
+ }
+ if (dev->mtu > 1500)
+ rx_mode |= RCR_AL;
+
+ BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
+
+}
+
+/*
+ * MII access , media link mode setting functions
+ */
+
+/**
+ * mii_init - set up MII
+ * @vptr: velocity adapter
+ * @mii_status: links tatus
+ *
+ * Set up the PHY for the current link state.
+ */
+static void mii_init(struct velocity_info *vptr, u32 mii_status)
+{
+ u16 BMCR;
+
+ switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
+ case PHYID_CICADA_CS8201:
+ /*
+ * Reset to hardware default
+ */
+ MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
+ /*
+ * Turn on ECHODIS bit in NWay-forced full mode and turn it
+ * off it in NWay-forced half mode for NWay-forced v.s.
+ * legacy-forced issue.
+ */
+ if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
+ MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
+ else
+ MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
+ /*
+ * Turn on Link/Activity LED enable bit for CIS8201
+ */
+ MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
+ break;
+ case PHYID_VT3216_32BIT:
+ case PHYID_VT3216_64BIT:
+ /*
+ * Reset to hardware default
+ */
+ MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
+ /*
+ * Turn on ECHODIS bit in NWay-forced full mode and turn it
+ * off it in NWay-forced half mode for NWay-forced v.s.
+ * legacy-forced issue
+ */
+ if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
+ MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
+ else
+ MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
+ break;
+
+ case PHYID_MARVELL_1000:
+ case PHYID_MARVELL_1000S:
+ /*
+ * Assert CRS on Transmit
+ */
+ MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
+ /*
+ * Reset to hardware default
+ */
+ MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
+ break;
+ default:
+ ;
+ }
+ velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
+ if (BMCR & BMCR_ISOLATE) {
+ BMCR &= ~BMCR_ISOLATE;
+ velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
+ }
+}
+
+/**
+ * setup_queue_timers - Setup interrupt timers
+ *
+ * Setup interrupt frequency during suppression (timeout if the frame
+ * count isn't filled).
+ */
+static void setup_queue_timers(struct velocity_info *vptr)
+{
+ /* Only for newer revisions */
+ if (vptr->rev_id >= REV_ID_VT3216_A0) {
+ u8 txqueue_timer = 0;
+ u8 rxqueue_timer = 0;
+
+ if (vptr->mii_status & (VELOCITY_SPEED_1000 |
+ VELOCITY_SPEED_100)) {
+ txqueue_timer = vptr->options.txqueue_timer;
+ rxqueue_timer = vptr->options.rxqueue_timer;
+ }
+
+ writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
+ writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
+ }
+}
+
+/**
+ * setup_adaptive_interrupts - Setup interrupt suppression
+ *
+ * @vptr velocity adapter
+ *
+ * The velocity is able to suppress interrupt during high interrupt load.
+ * This function turns on that feature.
+ */
+static void setup_adaptive_interrupts(struct velocity_info *vptr)
+{
+ struct mac_regs __iomem *regs = vptr->mac_regs;
+ u16 tx_intsup = vptr->options.tx_intsup;
+ u16 rx_intsup = vptr->options.rx_intsup;
+
+ /* Setup default interrupt mask (will be changed below) */
+ vptr->int_mask = INT_MASK_DEF;
+
+ /* Set Tx Interrupt Suppression Threshold */
+ writeb(CAMCR_PS0, &regs->CAMCR);
+ if (tx_intsup != 0) {
+ vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
+ ISR_PTX2I | ISR_PTX3I);
+ writew(tx_intsup, &regs->ISRCTL);
+ } else
+ writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
+
+ /* Set Rx Interrupt Suppression Threshold */
+ writeb(CAMCR_PS1, &regs->CAMCR);
+ if (rx_intsup != 0) {
+ vptr->int_mask &= ~ISR_PRXI;
+ writew(rx_intsup, &regs->ISRCTL);
+ } else
+ writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
+
+ /* Select page to interrupt hold timer */
+ writeb(0, &regs->CAMCR);
+}
+
+/**
+ * velocity_init_registers - initialise MAC registers
+ * @vptr: velocity to init
+ * @type: type of initialisation (hot or cold)
+ *
+ * Initialise the MAC on a reset or on first set up on the
+ * hardware.
+ */
+static void velocity_init_registers(struct velocity_info *vptr,
+ enum velocity_init_type type)
+{
+ struct mac_regs __iomem *regs = vptr->mac_regs;
+ int i, mii_status;
+
+ mac_wol_reset(regs);
+
+ switch (type) {
+ case VELOCITY_INIT_RESET:
+ case VELOCITY_INIT_WOL:
+
+ netif_stop_queue(vptr->dev);
+
+ /*
+ * Reset RX to prevent RX pointer not on the 4X location
+ */
+ velocity_rx_reset(vptr);
+ mac_rx_queue_run(regs);
+ mac_rx_queue_wake(regs);
+
+ mii_status = velocity_get_opt_media_mode(vptr);
+ if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
+ velocity_print_link_status(vptr);
+ if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
+ netif_wake_queue(vptr->dev);
+ }
+
+ enable_flow_control_ability(vptr);
+
+ mac_clear_isr(regs);
+ writel(CR0_STOP, &regs->CR0Clr);
+ writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
+ &regs->CR0Set);
+
+ break;
+
+ case VELOCITY_INIT_COLD:
+ default:
+ /*
+ * Do reset
+ */
+ velocity_soft_reset(vptr);
+ mdelay(5);
+
+ mac_eeprom_reload(regs);
+ for (i = 0; i < 6; i++)
+ writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
+
+ /*
+ * clear Pre_ACPI bit.
+ */
+ BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
+ mac_set_rx_thresh(regs, vptr->options.rx_thresh);
+ mac_set_dma_length(regs, vptr->options.DMA_length);
+
+ writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
+ /*
+ * Back off algorithm use original IEEE standard
+ */
+ BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
+
+ /*
+ * Init CAM filter
+ */
+ velocity_init_cam_filter(vptr);
+
+ /*
+ * Set packet filter: Receive directed and broadcast address
+ */
+ velocity_set_multi(vptr->dev);
+
+ /*
+ * Enable MII auto-polling
+ */
+ enable_mii_autopoll(regs);
+
+ setup_adaptive_interrupts(vptr);
+
+ writel(vptr->rx.pool_dma, &regs->RDBaseLo);
+ writew(vptr->options.numrx - 1, &regs->RDCSize);
+ mac_rx_queue_run(regs);
+ mac_rx_queue_wake(regs);
+
+ writew(vptr->options.numtx - 1, &regs->TDCSize);
+
+ for (i = 0; i < vptr->tx.numq; i++) {
+ writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
+ mac_tx_queue_run(regs, i);
+ }
+
+ init_flow_control_register(vptr);
+
+ writel(CR0_STOP, &regs->CR0Clr);
+ writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
+
+ mii_status = velocity_get_opt_media_mode(vptr);
+ netif_stop_queue(vptr->dev);
+
+ mii_init(vptr, mii_status);
+
+ if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
+ velocity_print_link_status(vptr);
+ if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
+ netif_wake_queue(vptr->dev);
+ }
+
+ enable_flow_control_ability(vptr);
+ mac_hw_mibs_init(regs);
+ mac_write_int_mask(vptr->int_mask, regs);
+ mac_clear_isr(regs);
+
+ }
+}
+
+static void velocity_give_many_rx_descs(struct velocity_info *vptr)
+{
+ struct mac_regs __iomem *regs = vptr->mac_regs;
+ int avail, dirty, unusable;
+
+ /*
+ * RD number must be equal to 4X per hardware spec
+ * (programming guide rev 1.20, p.13)
+ */
+ if (vptr->rx.filled < 4)
+ return;
+
+ wmb();
+
+ unusable = vptr->rx.filled & 0x0003;
+ dirty = vptr->rx.dirty - unusable;
+ for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
+ dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
+ vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
+ }
+
+ writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
+ vptr->rx.filled = unusable;
+}
+
+/**
+ * velocity_init_dma_rings - set up DMA rings
+ * @vptr: Velocity to set up
+ *
+ * Allocate PCI mapped DMA rings for the receive and transmit layer
+ * to use.
+ */
+static int velocity_init_dma_rings(struct velocity_info *vptr)
+{
+ struct velocity_opt *opt = &vptr->options;
+ const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
+ const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
+ struct pci_dev *pdev = vptr->pdev;
+ dma_addr_t pool_dma;
+ void *pool;
+ unsigned int i;
+
+ /*
+ * Allocate all RD/TD rings a single pool.
+ *
+ * pci_alloc_consistent() fulfills the requirement for 64 bytes
+ * alignment
+ */
+ pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
+ rx_ring_size, &pool_dma);
+ if (!pool) {
+ dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
+ vptr->dev->name);
+ return -ENOMEM;
+ }
+
+ vptr->rx.ring = pool;
+ vptr->rx.pool_dma = pool_dma;
+
+ pool += rx_ring_size;
+ pool_dma += rx_ring_size;
+
+ for (i = 0; i < vptr->tx.numq; i++) {
+ vptr->tx.rings[i] = pool;
+ vptr->tx.pool_dma[i] = pool_dma;
+ pool += tx_ring_size;
+ pool_dma += tx_ring_size;
+ }
+
+ return 0;
+}
+
+static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
+{
+ vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
+}
+
+/**
+ * velocity_alloc_rx_buf - allocate aligned receive buffer
+ * @vptr: velocity
+ * @idx: ring index
+ *
+ * Allocate a new full sized buffer for the reception of a frame and
+ * map it into PCI space for the hardware to use. The hardware
+ * requires *64* byte alignment of the buffer which makes life
+ * less fun than would be ideal.
+ */
+static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
+{
+ struct rx_desc *rd = &(vptr->rx.ring[idx]);
+ struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
+
+ rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64);
+ if (rd_info->skb == NULL)
+ return -ENOMEM;
+
+ /*
+ * Do the gymnastics to get the buffer head for data at
+ * 64byte alignment.
+ */
+ skb_reserve(rd_info->skb,
+ 64 - ((unsigned long) rd_info->skb->data & 63));
+ rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
+ vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
+
+ /*
+ * Fill in the descriptor to match
+ */
+
+ *((u32 *) & (rd->rdesc0)) = 0;
+ rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
+ rd->pa_low = cpu_to_le32(rd_info->skb_dma);
+ rd->pa_high = 0;
+ return 0;
+}
+
+
+static int velocity_rx_refill(struct velocity_info *vptr)
+{
+ int dirty = vptr->rx.dirty, done = 0;
+
+ do {
+ struct rx_desc *rd = vptr->rx.ring + dirty;
+
+ /* Fine for an all zero Rx desc at init time as well */
+ if (rd->rdesc0.len & OWNED_BY_NIC)
+ break;
+
+ if (!vptr->rx.info[dirty].skb) {
+ if (velocity_alloc_rx_buf(vptr, dirty) < 0)
+ break;
+ }
+ done++;
+ dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
+ } while (dirty != vptr->rx.curr);
+
+ if (done) {
+ vptr->rx.dirty = dirty;
+ vptr->rx.filled += done;
+ }
+
+ return done;
+}
+
+/**
+ * velocity_free_rd_ring - free receive ring
+ * @vptr: velocity to clean up
+ *
+ * Free the receive buffers for each ring slot and any
+ * attached socket buffers that need to go away.
+ */
+static void velocity_free_rd_ring(struct velocity_info *vptr)
+{
+ int i;
+
+ if (vptr->rx.info == NULL)
+ return;
+
+ for (i = 0; i < vptr->options.numrx; i++) {
+ struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
+ struct rx_desc *rd = vptr->rx.ring + i;
+
+ memset(rd, 0, sizeof(*rd));
+
+ if (!rd_info->skb)
+ continue;
+ pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
+ PCI_DMA_FROMDEVICE);
+ rd_info->skb_dma = 0;
+
+ dev_kfree_skb(rd_info->skb);
+ rd_info->skb = NULL;
+ }
+
+ kfree(vptr->rx.info);
+ vptr->rx.info = NULL;
+}
+
+/**
+ * velocity_init_rd_ring - set up receive ring
+ * @vptr: velocity to configure
+ *
+ * Allocate and set up the receive buffers for each ring slot and
+ * assign them to the network adapter.
+ */
+static int velocity_init_rd_ring(struct velocity_info *vptr)
+{
+ int ret = -ENOMEM;
+
+ vptr->rx.info = kcalloc(vptr->options.numrx,
+ sizeof(struct velocity_rd_info), GFP_KERNEL);
+ if (!vptr->rx.info)
+ goto out;
+
+ velocity_init_rx_ring_indexes(vptr);
+
+ if (velocity_rx_refill(vptr) != vptr->options.numrx) {
+ VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
+ "%s: failed to allocate RX buffer.\n", vptr->dev->name);
+ velocity_free_rd_ring(vptr);
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+/**
+ * velocity_init_td_ring - set up transmit ring
+ * @vptr: velocity
+ *
+ * Set up the transmit ring and chain the ring pointers together.
+ * Returns zero on success or a negative posix errno code for
+ * failure.
+ */
+static int velocity_init_td_ring(struct velocity_info *vptr)
+{
+ int j;
+
+ /* Init the TD ring entries */
+ for (j = 0; j < vptr->tx.numq; j++) {
+
+ vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
+ sizeof(struct velocity_td_info),
+ GFP_KERNEL);
+ if (!vptr->tx.infos[j]) {
+ while (--j >= 0)
+ kfree(vptr->tx.infos[j]);
+ return -ENOMEM;
+ }
+
+ vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
+ }
+ return 0;
+}
+
+/**
+ * velocity_free_dma_rings - free PCI ring pointers
+ * @vptr: Velocity to free from
+ *
+ * Clean up the PCI ring buffers allocated to this velocity.
+ */
+static void velocity_free_dma_rings(struct velocity_info *vptr)
+{
+ const int size = vptr->options.numrx * sizeof(struct rx_desc) +
+ vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
+
+ pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
+}
+
+static int velocity_init_rings(struct velocity_info *vptr, int mtu)
+{
+ int ret;
+
+ velocity_set_rxbufsize(vptr, mtu);
+
+ ret = velocity_init_dma_rings(vptr);
+ if (ret < 0)
+ goto out;
+
+ ret = velocity_init_rd_ring(vptr);
+ if (ret < 0)
+ goto err_free_dma_rings_0;
+
+ ret = velocity_init_td_ring(vptr);
+ if (ret < 0)
+ goto err_free_rd_ring_1;
+out:
+ return ret;
+
+err_free_rd_ring_1:
+ velocity_free_rd_ring(vptr);
+err_free_dma_rings_0:
+ velocity_free_dma_rings(vptr);
+ goto out;
+}
+
+/**
+ * velocity_free_tx_buf - free transmit buffer
+ * @vptr: velocity
+ * @tdinfo: buffer
+ *
+ * Release an transmit buffer. If the buffer was preallocated then
+ * recycle it, if not then unmap the buffer.
+ */
+static void velocity_free_tx_buf(struct velocity_info *vptr,
+ struct velocity_td_info *tdinfo, struct tx_desc *td)
+{
+ struct sk_buff *skb = tdinfo->skb;
+
+ /*
+ * Don't unmap the pre-allocated tx_bufs
+ */
+ if (tdinfo->skb_dma) {
+ int i;
+
+ for (i = 0; i < tdinfo->nskb_dma; i++) {
+ size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
+
+ /* For scatter-gather */
+ if (skb_shinfo(skb)->nr_frags > 0)
+ pktlen = max_t(size_t, pktlen,
+ td->td_buf[i].size & ~TD_QUEUE);
+
+ pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i],
+ le16_to_cpu(pktlen), PCI_DMA_TODEVICE);
+ }
+ }
+ dev_kfree_skb_irq(skb);
+ tdinfo->skb = NULL;
+}
+
+/*
+ * FIXME: could we merge this with velocity_free_tx_buf ?
+ */
+static void velocity_free_td_ring_entry(struct velocity_info *vptr,
+ int q, int n)
+{
+ struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
+ int i;
+
+ if (td_info == NULL)
+ return;
+
+ if (td_info->skb) {
+ for (i = 0; i < td_info->nskb_dma; i++) {
+ if (td_info->skb_dma[i]) {
+ pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
+ td_info->skb->len, PCI_DMA_TODEVICE);
+ td_info->skb_dma[i] = 0;
+ }
+ }
+ dev_kfree_skb(td_info->skb);
+ td_info->skb = NULL;
+ }
+}
+
+/**
+ * velocity_free_td_ring - free td ring
+ * @vptr: velocity
+ *
+ * Free up the transmit ring for this particular velocity adapter.
+ * We free the ring contents but not the ring itself.
+ */
+static void velocity_free_td_ring(struct velocity_info *vptr)
+{
+ int i, j;
+
+ for (j = 0; j < vptr->tx.numq; j++) {
+ if (vptr->tx.infos[j] == NULL)
+ continue;
+ for (i = 0; i < vptr->options.numtx; i++)
+ velocity_free_td_ring_entry(vptr, j, i);
+
+ kfree(vptr->tx.infos[j]);
+ vptr->tx.infos[j] = NULL;
+ }
+}
+
+static void velocity_free_rings(struct velocity_info *vptr)
+{
+ velocity_free_td_ring(vptr);
+ velocity_free_rd_ring(vptr);
+ velocity_free_dma_rings(vptr);
+}
+
+/**
+ * velocity_error - handle error from controller
+ * @vptr: velocity
+ * @status: card status
+ *
+ * Process an error report from the hardware and attempt to recover
+ * the card itself. At the moment we cannot recover from some
+ * theoretically impossible errors but this could be fixed using
+ * the pci_device_failed logic to bounce the hardware
+ *
+ */
+static void velocity_error(struct velocity_info *vptr, int status)
+{
+
+ if (status & ISR_TXSTLI) {
+ struct mac_regs __iomem *regs = vptr->mac_regs;
+
+ printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0]));
+ BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
+ writew(TRDCSR_RUN, &regs->TDCSRClr);
+ netif_stop_queue(vptr->dev);
+
+ /* FIXME: port over the pci_device_failed code and use it
+ here */
+ }
+
+ if (status & ISR_SRCI) {
+ struct mac_regs __iomem *regs = vptr->mac_regs;
+ int linked;
+
+ if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
+ vptr->mii_status = check_connection_type(regs);
+
+ /*
+ * If it is a 3119, disable frame bursting in
+ * halfduplex mode and enable it in fullduplex
+ * mode
+ */
+ if (vptr->rev_id < REV_ID_VT3216_A0) {
+ if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
+ BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
+ else
+ BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
+ }
+ /*
+ * Only enable CD heart beat counter in 10HD mode
+ */
+ if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
+ BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
+ else
+ BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
+
+ setup_queue_timers(vptr);
+ }
+ /*
+ * Get link status from PHYSR0
+ */
+ linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
+
+ if (linked) {
+ vptr->mii_status &= ~VELOCITY_LINK_FAIL;
+ netif_carrier_on(vptr->dev);
+ } else {
+ vptr->mii_status |= VELOCITY_LINK_FAIL;
+ netif_carrier_off(vptr->dev);
+ }
+
+ velocity_print_link_status(vptr);
+ enable_flow_control_ability(vptr);
+
+ /*
+ * Re-enable auto-polling because SRCI will disable
+ * auto-polling
+ */
+
+ enable_mii_autopoll(regs);
+
+ if (vptr->mii_status & VELOCITY_LINK_FAIL)
+ netif_stop_queue(vptr->dev);
+ else
+ netif_wake_queue(vptr->dev);
+
+ }
+ if (status & ISR_MIBFI)
+ velocity_update_hw_mibs(vptr);
+ if (status & ISR_LSTEI)
+ mac_rx_queue_wake(vptr->mac_regs);
+}
+
+/**
+ * tx_srv - transmit interrupt service
+ * @vptr; Velocity
+ *
+ * Scan the queues looking for transmitted packets that
+ * we can complete and clean up. Update any statistics as
+ * necessary/
+ */
+static int velocity_tx_srv(struct velocity_info *vptr)
+{
+ struct tx_desc *td;
+ int qnum;
+ int full = 0;
+ int idx;
+ int works = 0;
+ struct velocity_td_info *tdinfo;
+ struct net_device_stats *stats = &vptr->dev->stats;
+
+ for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
+ for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
+ idx = (idx + 1) % vptr->options.numtx) {
+
+ /*
+ * Get Tx Descriptor
+ */
+ td = &(vptr->tx.rings[qnum][idx]);
+ tdinfo = &(vptr->tx.infos[qnum][idx]);
+
+ if (td->tdesc0.len & OWNED_BY_NIC)
+ break;
+
+ if ((works++ > 15))
+ break;
+
+ if (td->tdesc0.TSR & TSR0_TERR) {
+ stats->tx_errors++;
+ stats->tx_dropped++;
+ if (td->tdesc0.TSR & TSR0_CDH)
+ stats->tx_heartbeat_errors++;
+ if (td->tdesc0.TSR & TSR0_CRS)
+ stats->tx_carrier_errors++;
+ if (td->tdesc0.TSR & TSR0_ABT)
+ stats->tx_aborted_errors++;
+ if (td->tdesc0.TSR & TSR0_OWC)
+ stats->tx_window_errors++;
+ } else {
+ stats->tx_packets++;
+ stats->tx_bytes += tdinfo->skb->len;
+ }
+ velocity_free_tx_buf(vptr, tdinfo, td);
+ vptr->tx.used[qnum]--;
+ }
+ vptr->tx.tail[qnum] = idx;
+
+ if (AVAIL_TD(vptr, qnum) < 1)
+ full = 1;
+ }
+ /*
+ * Look to see if we should kick the transmit network
+ * layer for more work.
+ */
+ if (netif_queue_stopped(vptr->dev) && (full == 0) &&
+ (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
+ netif_wake_queue(vptr->dev);
+ }
+ return works;
+}
+
+/**
+ * velocity_rx_csum - checksum process
+ * @rd: receive packet descriptor
+ * @skb: network layer packet buffer
+ *
+ * Process the status bits for the received packet and determine
+ * if the checksum was computed and verified by the hardware
+ */
+static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
+{
+ skb_checksum_none_assert(skb);
+
+ if (rd->rdesc1.CSM & CSM_IPKT) {
+ if (rd->rdesc1.CSM & CSM_IPOK) {
+ if ((rd->rdesc1.CSM & CSM_TCPKT) ||
+ (rd->rdesc1.CSM & CSM_UDPKT)) {
+ if (!(rd->rdesc1.CSM & CSM_TUPOK))
+ return;
+ }
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ }
+}
+
+/**
+ * velocity_rx_copy - in place Rx copy for small packets
+ * @rx_skb: network layer packet buffer candidate
+ * @pkt_size: received data size
+ * @rd: receive packet descriptor
+ * @dev: network device
+ *
+ * Replace the current skb that is scheduled for Rx processing by a
+ * shorter, immediately allocated skb, if the received packet is small
+ * enough. This function returns a negative value if the received
+ * packet is too big or if memory is exhausted.
+ */
+static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
+ struct velocity_info *vptr)
+{
+ int ret = -1;
+ if (pkt_size < rx_copybreak) {
+ struct sk_buff *new_skb;
+
+ new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size);
+ if (new_skb) {
+ new_skb->ip_summed = rx_skb[0]->ip_summed;
+ skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
+ *rx_skb = new_skb;
+ ret = 0;
+ }
+
+ }
+ return ret;
+}
+
+/**
+ * velocity_iph_realign - IP header alignment
+ * @vptr: velocity we are handling
+ * @skb: network layer packet buffer
+ * @pkt_size: received data size
+ *
+ * Align IP header on a 2 bytes boundary. This behavior can be
+ * configured by the user.
+ */
+static inline void velocity_iph_realign(struct velocity_info *vptr,
+ struct sk_buff *skb, int pkt_size)
+{
+ if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
+ memmove(skb->data + 2, skb->data, pkt_size);
+ skb_reserve(skb, 2);
+ }
+}
+
+/**
+ * velocity_receive_frame - received packet processor
+ * @vptr: velocity we are handling
+ * @idx: ring index
+ *
+ * A packet has arrived. We process the packet and if appropriate
+ * pass the frame up the network stack
+ */
+static int velocity_receive_frame(struct velocity_info *vptr, int idx)
+{
+ void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
+ struct net_device_stats *stats = &vptr->dev->stats;
+ struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
+ struct rx_desc *rd = &(vptr->rx.ring[idx]);
+ int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
+ struct sk_buff *skb;
+
+ if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
+ VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
+ stats->rx_length_errors++;
+ return -EINVAL;
+ }
+
+ if (rd->rdesc0.RSR & RSR_MAR)
+ stats->multicast++;
+
+ skb = rd_info->skb;
+
+ pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
+ vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
+
+ /*
+ * Drop frame not meeting IEEE 802.3
+ */
+
+ if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
+ if (rd->rdesc0.RSR & RSR_RL) {
+ stats->rx_length_errors++;
+ return -EINVAL;
+ }
+ }
+
+ pci_action = pci_dma_sync_single_for_device;
+
+ velocity_rx_csum(rd, skb);
+
+ if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
+ velocity_iph_realign(vptr, skb, pkt_len);
+ pci_action = pci_unmap_single;
+ rd_info->skb = NULL;
+ }
+
+ pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
+ PCI_DMA_FROMDEVICE);
+
+ skb_put(skb, pkt_len - 4);
+ skb->protocol = eth_type_trans(skb, vptr->dev);
+
+ if (rd->rdesc0.RSR & RSR_DETAG) {
+ u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
+
+ __vlan_hwaccel_put_tag(skb, vid);
+ }
+ netif_rx(skb);
+
+ stats->rx_bytes += pkt_len;
+ stats->rx_packets++;
+
+ return 0;
+}
+
+/**
+ * velocity_rx_srv - service RX interrupt
+ * @vptr: velocity
+ *
+ * Walk the receive ring of the velocity adapter and remove
+ * any received packets from the receive queue. Hand the ring
+ * slots back to the adapter for reuse.
+ */
+static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
+{
+ struct net_device_stats *stats = &vptr->dev->stats;
+ int rd_curr = vptr->rx.curr;
+ int works = 0;
+
+ while (works < budget_left) {
+ struct rx_desc *rd = vptr->rx.ring + rd_curr;
+
+ if (!vptr->rx.info[rd_curr].skb)
+ break;
+
+ if (rd->rdesc0.len & OWNED_BY_NIC)
+ break;
+
+ rmb();
+
+ /*
+ * Don't drop CE or RL error frame although RXOK is off
+ */
+ if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
+ if (velocity_receive_frame(vptr, rd_curr) < 0)
+ stats->rx_dropped++;
+ } else {
+ if (rd->rdesc0.RSR & RSR_CRC)
+ stats->rx_crc_errors++;
+ if (rd->rdesc0.RSR & RSR_FAE)
+ stats->rx_frame_errors++;
+
+ stats->rx_dropped++;
+ }
+
+ rd->size |= RX_INTEN;
+
+ rd_curr++;
+ if (rd_curr >= vptr->options.numrx)
+ rd_curr = 0;
+ works++;
+ }
+
+ vptr->rx.curr = rd_curr;
+
+ if ((works > 0) && (velocity_rx_refill(vptr) > 0))
+ velocity_give_many_rx_descs(vptr);
+
+ VAR_USED(stats);
+ return works;
+}
+
+static int velocity_poll(struct napi_struct *napi, int budget)
+{
+ struct velocity_info *vptr = container_of(napi,
+ struct velocity_info, napi);
+ unsigned int rx_done;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vptr->lock, flags);
+ /*
+ * Do rx and tx twice for performance (taken from the VIA
+ * out-of-tree driver).
+ */
+ rx_done = velocity_rx_srv(vptr, budget / 2);
+ velocity_tx_srv(vptr);
+ rx_done += velocity_rx_srv(vptr, budget - rx_done);
+ velocity_tx_srv(vptr);
+
+ /* If budget not fully consumed, exit the polling mode */
+ if (rx_done < budget) {
+ napi_complete(napi);
+ mac_enable_int(vptr->mac_regs);
+ }
+ spin_unlock_irqrestore(&vptr->lock, flags);
+
+ return rx_done;
+}
+
+/**
+ * velocity_intr - interrupt callback
+ * @irq: interrupt number
+ * @dev_instance: interrupting device
+ *
+ * Called whenever an interrupt is generated by the velocity
+ * adapter IRQ line. We may not be the source of the interrupt
+ * and need to identify initially if we are, and if not exit as
+ * efficiently as possible.
+ */
+static irqreturn_t velocity_intr(int irq, void *dev_instance)
+{
+ struct net_device *dev = dev_instance;
+ struct velocity_info *vptr = netdev_priv(dev);
+ u32 isr_status;
+
+ spin_lock(&vptr->lock);
+ isr_status = mac_read_isr(vptr->mac_regs);
+
+ /* Not us ? */
+ if (isr_status == 0) {
+ spin_unlock(&vptr->lock);
+ return IRQ_NONE;
+ }
+
+ /* Ack the interrupt */
+ mac_write_isr(vptr->mac_regs, isr_status);
+
+ if (likely(napi_schedule_prep(&vptr->napi))) {
+ mac_disable_int(vptr->mac_regs);
+ __napi_schedule(&vptr->napi);
+ }
+
+ if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
+ velocity_error(vptr, isr_status);
+
+ spin_unlock(&vptr->lock);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * velocity_open - interface activation callback
+ * @dev: network layer device to open
+ *
+ * Called when the network layer brings the interface up. Returns
+ * a negative posix error code on failure, or zero on success.
+ *
+ * All the ring allocation and set up is done on open for this
+ * adapter to minimise memory usage when inactive
+ */
+static int velocity_open(struct net_device *dev)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+ int ret;
+
+ ret = velocity_init_rings(vptr, dev->mtu);
+ if (ret < 0)
+ goto out;
+
+ /* Ensure chip is running */
+ pci_set_power_state(vptr->pdev, PCI_D0);
+
+ velocity_init_registers(vptr, VELOCITY_INIT_COLD);
+
+ ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
+ dev->name, dev);
+ if (ret < 0) {
+ /* Power down the chip */
+ pci_set_power_state(vptr->pdev, PCI_D3hot);
+ velocity_free_rings(vptr);
+ goto out;
+ }
+
+ velocity_give_many_rx_descs(vptr);
+
+ mac_enable_int(vptr->mac_regs);
+ netif_start_queue(dev);
+ napi_enable(&vptr->napi);
+ vptr->flags |= VELOCITY_FLAGS_OPENED;
+out:
+ return ret;
+}
+
+/**
+ * velocity_shutdown - shut down the chip
+ * @vptr: velocity to deactivate
+ *
+ * Shuts down the internal operations of the velocity and
+ * disables interrupts, autopolling, transmit and receive
+ */
+static void velocity_shutdown(struct velocity_info *vptr)
+{
+ struct mac_regs __iomem *regs = vptr->mac_regs;
+ mac_disable_int(regs);
+ writel(CR0_STOP, &regs->CR0Set);
+ writew(0xFFFF, &regs->TDCSRClr);
+ writeb(0xFF, &regs->RDCSRClr);
+ safe_disable_mii_autopoll(regs);
+ mac_clear_isr(regs);
+}
+
+/**
+ * velocity_change_mtu - MTU change callback
+ * @dev: network device
+ * @new_mtu: desired MTU
+ *
+ * Handle requests from the networking layer for MTU change on
+ * this interface. It gets called on a change by the network layer.
+ * Return zero for success or negative posix error code.
+ */
+static int velocity_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+ int ret = 0;
+
+ if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
+ VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
+ vptr->dev->name);
+ ret = -EINVAL;
+ goto out_0;
+ }
+
+ if (!netif_running(dev)) {
+ dev->mtu = new_mtu;
+ goto out_0;
+ }
+
+ if (dev->mtu != new_mtu) {
+ struct velocity_info *tmp_vptr;
+ unsigned long flags;
+ struct rx_info rx;
+ struct tx_info tx;
+
+ tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
+ if (!tmp_vptr) {
+ ret = -ENOMEM;
+ goto out_0;
+ }
+
+ tmp_vptr->dev = dev;
+ tmp_vptr->pdev = vptr->pdev;
+ tmp_vptr->options = vptr->options;
+ tmp_vptr->tx.numq = vptr->tx.numq;
+
+ ret = velocity_init_rings(tmp_vptr, new_mtu);
+ if (ret < 0)
+ goto out_free_tmp_vptr_1;
+
+ spin_lock_irqsave(&vptr->lock, flags);
+
+ netif_stop_queue(dev);
+ velocity_shutdown(vptr);
+
+ rx = vptr->rx;
+ tx = vptr->tx;
+
+ vptr->rx = tmp_vptr->rx;
+ vptr->tx = tmp_vptr->tx;
+
+ tmp_vptr->rx = rx;
+ tmp_vptr->tx = tx;
+
+ dev->mtu = new_mtu;
+
+ velocity_init_registers(vptr, VELOCITY_INIT_COLD);
+
+ velocity_give_many_rx_descs(vptr);
+
+ mac_enable_int(vptr->mac_regs);
+ netif_start_queue(dev);
+
+ spin_unlock_irqrestore(&vptr->lock, flags);
+
+ velocity_free_rings(tmp_vptr);
+
+out_free_tmp_vptr_1:
+ kfree(tmp_vptr);
+ }
+out_0:
+ return ret;
+}
+
+/**
+ * velocity_mii_ioctl - MII ioctl handler
+ * @dev: network device
+ * @ifr: the ifreq block for the ioctl
+ * @cmd: the command
+ *
+ * Process MII requests made via ioctl from the network layer. These
+ * are used by tools like kudzu to interrogate the link state of the
+ * hardware
+ */
+static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+ struct mac_regs __iomem *regs = vptr->mac_regs;
+ unsigned long flags;
+ struct mii_ioctl_data *miidata = if_mii(ifr);
+ int err;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
+ break;
+ case SIOCGMIIREG:
+ if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
+ return -ETIMEDOUT;
+ break;
+ case SIOCSMIIREG:
+ spin_lock_irqsave(&vptr->lock, flags);
+ err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
+ spin_unlock_irqrestore(&vptr->lock, flags);
+ check_connection_type(vptr->mac_regs);
+ if (err)
+ return err;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+/**
+ * velocity_ioctl - ioctl entry point
+ * @dev: network device
+ * @rq: interface request ioctl
+ * @cmd: command code
+ *
+ * Called when the user issues an ioctl request to the network
+ * device in question. The velocity interface supports MII.
+ */
+static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+ int ret;
+
+ /* If we are asked for information and the device is power
+ saving then we need to bring the device back up to talk to it */
+
+ if (!netif_running(dev))
+ pci_set_power_state(vptr->pdev, PCI_D0);
+
+ switch (cmd) {
+ case SIOCGMIIPHY: /* Get address of MII PHY in use. */
+ case SIOCGMIIREG: /* Read MII PHY register. */
+ case SIOCSMIIREG: /* Write to MII PHY register. */
+ ret = velocity_mii_ioctl(dev, rq, cmd);
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+ if (!netif_running(dev))
+ pci_set_power_state(vptr->pdev, PCI_D3hot);
+
+
+ return ret;
+}
+
+/**
+ * velocity_get_status - statistics callback
+ * @dev: network device
+ *
+ * Callback from the network layer to allow driver statistics
+ * to be resynchronized with hardware collected state. In the
+ * case of the velocity we need to pull the MIB counters from
+ * the hardware into the counters before letting the network
+ * layer display them.
+ */
+static struct net_device_stats *velocity_get_stats(struct net_device *dev)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+
+ /* If the hardware is down, don't touch MII */
+ if (!netif_running(dev))
+ return &dev->stats;
+
+ spin_lock_irq(&vptr->lock);
+ velocity_update_hw_mibs(vptr);
+ spin_unlock_irq(&vptr->lock);
+
+ dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
+ dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
+ dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
+
+// unsigned long rx_dropped; /* no space in linux buffers */
+ dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
+ /* detailed rx_errors: */
+// unsigned long rx_length_errors;
+// unsigned long rx_over_errors; /* receiver ring buff overflow */
+ dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
+// unsigned long rx_frame_errors; /* recv'd frame alignment error */
+// unsigned long rx_fifo_errors; /* recv'r fifo overrun */
+// unsigned long rx_missed_errors; /* receiver missed packet */
+
+ /* detailed tx_errors */
+// unsigned long tx_fifo_errors;
+
+ return &dev->stats;
+}
+
+/**
+ * velocity_close - close adapter callback
+ * @dev: network device
+ *
+ * Callback from the network layer when the velocity is being
+ * deactivated by the network layer
+ */
+static int velocity_close(struct net_device *dev)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+
+ napi_disable(&vptr->napi);
+ netif_stop_queue(dev);
+ velocity_shutdown(vptr);
+
+ if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
+ velocity_get_ip(vptr);
+ if (dev->irq != 0)
+ free_irq(dev->irq, dev);
+
+ /* Power down the chip */
+ pci_set_power_state(vptr->pdev, PCI_D3hot);
+
+ velocity_free_rings(vptr);
+
+ vptr->flags &= (~VELOCITY_FLAGS_OPENED);
+ return 0;
+}
+
+/**
+ * velocity_xmit - transmit packet callback
+ * @skb: buffer to transmit
+ * @dev: network device
+ *
+ * Called by the networ layer to request a packet is queued to
+ * the velocity. Returns zero on success.
+ */
+static netdev_tx_t velocity_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+ int qnum = 0;
+ struct tx_desc *td_ptr;
+ struct velocity_td_info *tdinfo;
+ unsigned long flags;
+ int pktlen;
+ int index, prev;
+ int i = 0;
+
+ if (skb_padto(skb, ETH_ZLEN))
+ goto out;
+
+ /* The hardware can handle at most 7 memory segments, so merge
+ * the skb if there are more */
+ if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ pktlen = skb_shinfo(skb)->nr_frags == 0 ?
+ max_t(unsigned int, skb->len, ETH_ZLEN) :
+ skb_headlen(skb);
+
+ spin_lock_irqsave(&vptr->lock, flags);
+
+ index = vptr->tx.curr[qnum];
+ td_ptr = &(vptr->tx.rings[qnum][index]);
+ tdinfo = &(vptr->tx.infos[qnum][index]);
+
+ td_ptr->tdesc1.TCR = TCR0_TIC;
+ td_ptr->td_buf[0].size &= ~TD_QUEUE;
+
+ /*
+ * Map the linear network buffer into PCI space and
+ * add it to the transmit ring.
+ */
+ tdinfo->skb = skb;
+ tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
+ td_ptr->tdesc0.len = cpu_to_le16(pktlen);
+ td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
+ td_ptr->td_buf[0].pa_high = 0;
+ td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
+
+ /* Handle fragments */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ tdinfo->skb_dma[i + 1] = skb_frag_dma_map(&vptr->pdev->dev,
+ frag, 0,
+ frag->size,
+ DMA_TO_DEVICE);
+
+ td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
+ td_ptr->td_buf[i + 1].pa_high = 0;
+ td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
+ }
+ tdinfo->nskb_dma = i + 1;
+
+ td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
+
+ if (vlan_tx_tag_present(skb)) {
+ td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
+ td_ptr->tdesc1.TCR |= TCR0_VETAG;
+ }
+
+ /*
+ * Handle hardware checksum
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ const struct iphdr *ip = ip_hdr(skb);
+ if (ip->protocol == IPPROTO_TCP)
+ td_ptr->tdesc1.TCR |= TCR0_TCPCK;
+ else if (ip->protocol == IPPROTO_UDP)
+ td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
+ td_ptr->tdesc1.TCR |= TCR0_IPCK;
+ }
+
+ prev = index - 1;
+ if (prev < 0)
+ prev = vptr->options.numtx - 1;
+ td_ptr->tdesc0.len |= OWNED_BY_NIC;
+ vptr->tx.used[qnum]++;
+ vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
+
+ if (AVAIL_TD(vptr, qnum) < 1)
+ netif_stop_queue(dev);
+
+ td_ptr = &(vptr->tx.rings[qnum][prev]);
+ td_ptr->td_buf[0].size |= TD_QUEUE;
+ mac_tx_queue_wake(vptr->mac_regs, qnum);
+
+ spin_unlock_irqrestore(&vptr->lock, flags);
+out:
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops velocity_netdev_ops = {
+ .ndo_open = velocity_open,
+ .ndo_stop = velocity_close,
+ .ndo_start_xmit = velocity_xmit,
+ .ndo_get_stats = velocity_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_rx_mode = velocity_set_multi,
+ .ndo_change_mtu = velocity_change_mtu,
+ .ndo_do_ioctl = velocity_ioctl,
+ .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid,
+};
+
+/**
+ * velocity_init_info - init private data
+ * @pdev: PCI device
+ * @vptr: Velocity info
+ * @info: Board type
+ *
+ * Set up the initial velocity_info struct for the device that has been
+ * discovered.
+ */
+static void __devinit velocity_init_info(struct pci_dev *pdev,
+ struct velocity_info *vptr,
+ const struct velocity_info_tbl *info)
+{
+ memset(vptr, 0, sizeof(struct velocity_info));
+
+ vptr->pdev = pdev;
+ vptr->chip_id = info->chip_id;
+ vptr->tx.numq = info->txqueue;
+ vptr->multicast_limit = MCAM_SIZE;
+ spin_lock_init(&vptr->lock);
+}
+
+/**
+ * velocity_get_pci_info - retrieve PCI info for device
+ * @vptr: velocity device
+ * @pdev: PCI device it matches
+ *
+ * Retrieve the PCI configuration space data that interests us from
+ * the kernel PCI layer
+ */
+static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
+{
+ vptr->rev_id = pdev->revision;
+
+ pci_set_master(pdev);
+
+ vptr->ioaddr = pci_resource_start(pdev, 0);
+ vptr->memaddr = pci_resource_start(pdev, 1);
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
+ dev_err(&pdev->dev,
+ "region #0 is not an I/O resource, aborting.\n");
+ return -EINVAL;
+ }
+
+ if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
+ dev_err(&pdev->dev,
+ "region #1 is an I/O resource, aborting.\n");
+ return -EINVAL;
+ }
+
+ if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
+ dev_err(&pdev->dev, "region #1 is too small.\n");
+ return -EINVAL;
+ }
+ vptr->pdev = pdev;
+
+ return 0;
+}
+
+/**
+ * velocity_print_info - per driver data
+ * @vptr: velocity
+ *
+ * Print per driver data as the kernel driver finds Velocity
+ * hardware
+ */
+static void __devinit velocity_print_info(struct velocity_info *vptr)
+{
+ struct net_device *dev = vptr->dev;
+
+ printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
+ printk(KERN_INFO "%s: Ethernet Address: %pM\n",
+ dev->name, dev->dev_addr);
+}
+
+static u32 velocity_get_link(struct net_device *dev)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+ struct mac_regs __iomem *regs = vptr->mac_regs;
+ return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
+}
+
+/**
+ * velocity_found1 - set up discovered velocity card
+ * @pdev: PCI device
+ * @ent: PCI device table entry that matched
+ *
+ * Configure a discovered adapter from scratch. Return a negative
+ * errno error code on failure paths.
+ */
+static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int first = 1;
+ struct net_device *dev;
+ int i;
+ const char *drv_string;
+ const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
+ struct velocity_info *vptr;
+ struct mac_regs __iomem *regs;
+ int ret = -ENOMEM;
+
+ /* FIXME: this driver, like almost all other ethernet drivers,
+ * can support more than MAX_UNITS.
+ */
+ if (velocity_nics >= MAX_UNITS) {
+ dev_notice(&pdev->dev, "already found %d NICs.\n",
+ velocity_nics);
+ return -ENODEV;
+ }
+
+ dev = alloc_etherdev(sizeof(struct velocity_info));
+ if (!dev) {
+ dev_err(&pdev->dev, "allocate net device failed.\n");
+ goto out;
+ }
+
+ /* Chain it all together */
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ vptr = netdev_priv(dev);
+
+
+ if (first) {
+ printk(KERN_INFO "%s Ver. %s\n",
+ VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
+ printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
+ printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
+ first = 0;
+ }
+
+ velocity_init_info(pdev, vptr, info);
+
+ vptr->dev = dev;
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0)
+ goto err_free_dev;
+
+ dev->irq = pdev->irq;
+
+ ret = velocity_get_pci_info(vptr, pdev);
+ if (ret < 0) {
+ /* error message already printed */
+ goto err_disable;
+ }
+
+ ret = pci_request_regions(pdev, VELOCITY_NAME);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "No PCI resources.\n");
+ goto err_disable;
+ }
+
+ regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
+ if (regs == NULL) {
+ ret = -EIO;
+ goto err_release_res;
+ }
+
+ vptr->mac_regs = regs;
+
+ mac_wol_reset(regs);
+
+ dev->base_addr = vptr->ioaddr;
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = readb(&regs->PAR[i]);
+
+
+ drv_string = dev_driver_string(&pdev->dev);
+
+ velocity_get_options(&vptr->options, velocity_nics, drv_string);
+
+ /*
+ * Mask out the options cannot be set to the chip
+ */
+
+ vptr->options.flags &= info->flags;
+
+ /*
+ * Enable the chip specified capbilities
+ */
+
+ vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
+
+ vptr->wol_opts = vptr->options.wol_opts;
+ vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
+
+ vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
+
+ dev->irq = pdev->irq;
+ dev->netdev_ops = &velocity_netdev_ops;
+ dev->ethtool_ops = &velocity_ethtool_ops;
+ netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
+
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HW_VLAN_TX;
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
+ NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
+
+ ret = register_netdev(dev);
+ if (ret < 0)
+ goto err_iounmap;
+
+ if (!velocity_get_link(dev)) {
+ netif_carrier_off(dev);
+ vptr->mii_status |= VELOCITY_LINK_FAIL;
+ }
+
+ velocity_print_info(vptr);
+ pci_set_drvdata(pdev, dev);
+
+ /* and leave the chip powered down */
+
+ pci_set_power_state(pdev, PCI_D3hot);
+ velocity_nics++;
+out:
+ return ret;
+
+err_iounmap:
+ iounmap(regs);
+err_release_res:
+ pci_release_regions(pdev);
+err_disable:
+ pci_disable_device(pdev);
+err_free_dev:
+ free_netdev(dev);
+ goto out;
+}
+
+#ifdef CONFIG_PM
+/**
+ * wol_calc_crc - WOL CRC
+ * @pattern: data pattern
+ * @mask_pattern: mask
+ *
+ * Compute the wake on lan crc hashes for the packet header
+ * we are interested in.
+ */
+static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
+{
+ u16 crc = 0xFFFF;
+ u8 mask;
+ int i, j;
+
+ for (i = 0; i < size; i++) {
+ mask = mask_pattern[i];
+
+ /* Skip this loop if the mask equals to zero */
+ if (mask == 0x00)
+ continue;
+
+ for (j = 0; j < 8; j++) {
+ if ((mask & 0x01) == 0) {
+ mask >>= 1;
+ continue;
+ }
+ mask >>= 1;
+ crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
+ }
+ }
+ /* Finally, invert the result once to get the correct data */
+ crc = ~crc;
+ return bitrev32(crc) >> 16;
+}
+
+/**
+ * velocity_set_wol - set up for wake on lan
+ * @vptr: velocity to set WOL status on
+ *
+ * Set a card up for wake on lan either by unicast or by
+ * ARP packet.
+ *
+ * FIXME: check static buffer is safe here
+ */
+static int velocity_set_wol(struct velocity_info *vptr)
+{
+ struct mac_regs __iomem *regs = vptr->mac_regs;
+ enum speed_opt spd_dpx = vptr->options.spd_dpx;
+ static u8 buf[256];
+ int i;
+
+ static u32 mask_pattern[2][4] = {
+ {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
+ {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff} /* Magic Packet */
+ };
+
+ writew(0xFFFF, &regs->WOLCRClr);
+ writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
+ writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
+
+ /*
+ if (vptr->wol_opts & VELOCITY_WOL_PHY)
+ writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
+ */
+
+ if (vptr->wol_opts & VELOCITY_WOL_UCAST)
+ writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
+
+ if (vptr->wol_opts & VELOCITY_WOL_ARP) {
+ struct arp_packet *arp = (struct arp_packet *) buf;
+ u16 crc;
+ memset(buf, 0, sizeof(struct arp_packet) + 7);
+
+ for (i = 0; i < 4; i++)
+ writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
+
+ arp->type = htons(ETH_P_ARP);
+ arp->ar_op = htons(1);
+
+ memcpy(arp->ar_tip, vptr->ip_addr, 4);
+
+ crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
+ (u8 *) & mask_pattern[0][0]);
+
+ writew(crc, &regs->PatternCRC[0]);
+ writew(WOLCR_ARP_EN, &regs->WOLCRSet);
+ }
+
+ BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
+ BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
+
+ writew(0x0FFF, &regs->WOLSRClr);
+
+ if (spd_dpx == SPD_DPX_1000_FULL)
+ goto mac_done;
+
+ if (spd_dpx != SPD_DPX_AUTO)
+ goto advertise_done;
+
+ if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
+ if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
+ MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
+
+ MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
+ }
+
+ if (vptr->mii_status & VELOCITY_SPEED_1000)
+ MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
+
+advertise_done:
+ BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
+
+ {
+ u8 GCR;
+ GCR = readb(&regs->CHIPGCR);
+ GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
+ writeb(GCR, &regs->CHIPGCR);
+ }
+
+mac_done:
+ BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
+ /* Turn on SWPTAG just before entering power mode */
+ BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
+ /* Go to bed ..... */
+ BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
+
+ return 0;
+}
+
+/**
+ * velocity_save_context - save registers
+ * @vptr: velocity
+ * @context: buffer for stored context
+ *
+ * Retrieve the current configuration from the velocity hardware
+ * and stash it in the context structure, for use by the context
+ * restore functions. This allows us to save things we need across
+ * power down states
+ */
+static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
+{
+ struct mac_regs __iomem *regs = vptr->mac_regs;
+ u16 i;
+ u8 __iomem *ptr = (u8 __iomem *)regs;
+
+ for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
+ *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
+
+ for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
+ *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
+
+ for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
+ *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
+
+}
+
+static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct velocity_info *vptr = netdev_priv(dev);
+ unsigned long flags;
+
+ if (!netif_running(vptr->dev))
+ return 0;
+
+ netif_device_detach(vptr->dev);
+
+ spin_lock_irqsave(&vptr->lock, flags);
+ pci_save_state(pdev);
+
+ if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
+ velocity_get_ip(vptr);
+ velocity_save_context(vptr, &vptr->context);
+ velocity_shutdown(vptr);
+ velocity_set_wol(vptr);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ pci_set_power_state(pdev, PCI_D3hot);
+ } else {
+ velocity_save_context(vptr, &vptr->context);
+ velocity_shutdown(vptr);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ }
+
+ spin_unlock_irqrestore(&vptr->lock, flags);
+ return 0;
+}
+
+/**
+ * velocity_restore_context - restore registers
+ * @vptr: velocity
+ * @context: buffer for stored context
+ *
+ * Reload the register configuration from the velocity context
+ * created by velocity_save_context.
+ */
+static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
+{
+ struct mac_regs __iomem *regs = vptr->mac_regs;
+ int i;
+ u8 __iomem *ptr = (u8 __iomem *)regs;
+
+ for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
+ writel(*((u32 *) (context->mac_reg + i)), ptr + i);
+
+ /* Just skip cr0 */
+ for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
+ /* Clear */
+ writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
+ /* Set */
+ writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
+ }
+
+ for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
+ writel(*((u32 *) (context->mac_reg + i)), ptr + i);
+
+ for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
+ writel(*((u32 *) (context->mac_reg + i)), ptr + i);
+
+ for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
+ writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
+}
+
+static int velocity_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct velocity_info *vptr = netdev_priv(dev);
+ unsigned long flags;
+ int i;
+
+ if (!netif_running(vptr->dev))
+ return 0;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, 0, 0);
+ pci_restore_state(pdev);
+
+ mac_wol_reset(vptr->mac_regs);
+
+ spin_lock_irqsave(&vptr->lock, flags);
+ velocity_restore_context(vptr, &vptr->context);
+ velocity_init_registers(vptr, VELOCITY_INIT_WOL);
+ mac_disable_int(vptr->mac_regs);
+
+ velocity_tx_srv(vptr);
+
+ for (i = 0; i < vptr->tx.numq; i++) {
+ if (vptr->tx.used[i])
+ mac_tx_queue_wake(vptr->mac_regs, i);
+ }
+
+ mac_enable_int(vptr->mac_regs);
+ spin_unlock_irqrestore(&vptr->lock, flags);
+ netif_device_attach(vptr->dev);
+
+ return 0;
+}
+#endif
+
+/*
+ * Definition for our device driver. The PCI layer interface
+ * uses this to handle all our card discover and plugging
+ */
+static struct pci_driver velocity_driver = {
+ .name = VELOCITY_NAME,
+ .id_table = velocity_id_table,
+ .probe = velocity_found1,
+ .remove = __devexit_p(velocity_remove1),
+#ifdef CONFIG_PM
+ .suspend = velocity_suspend,
+ .resume = velocity_resume,
+#endif
+};
+
+
+/**
+ * velocity_ethtool_up - pre hook for ethtool
+ * @dev: network device
+ *
+ * Called before an ethtool operation. We need to make sure the
+ * chip is out of D3 state before we poke at it.
+ */
+static int velocity_ethtool_up(struct net_device *dev)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+ if (!netif_running(dev))
+ pci_set_power_state(vptr->pdev, PCI_D0);
+ return 0;
+}
+
+/**
+ * velocity_ethtool_down - post hook for ethtool
+ * @dev: network device
+ *
+ * Called after an ethtool operation. Restore the chip back to D3
+ * state if it isn't running.
+ */
+static void velocity_ethtool_down(struct net_device *dev)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+ if (!netif_running(dev))
+ pci_set_power_state(vptr->pdev, PCI_D3hot);
+}
+
+static int velocity_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+ struct mac_regs __iomem *regs = vptr->mac_regs;
+ u32 status;
+ status = check_connection_type(vptr->mac_regs);
+
+ cmd->supported = SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full;
+
+ cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
+ if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
+ cmd->advertising |=
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full;
+ } else {
+ switch (vptr->options.spd_dpx) {
+ case SPD_DPX_1000_FULL:
+ cmd->advertising |= ADVERTISED_1000baseT_Full;
+ break;
+ case SPD_DPX_100_HALF:
+ cmd->advertising |= ADVERTISED_100baseT_Half;
+ break;
+ case SPD_DPX_100_FULL:
+ cmd->advertising |= ADVERTISED_100baseT_Full;
+ break;
+ case SPD_DPX_10_HALF:
+ cmd->advertising |= ADVERTISED_10baseT_Half;
+ break;
+ case SPD_DPX_10_FULL:
+ cmd->advertising |= ADVERTISED_10baseT_Full;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (status & VELOCITY_SPEED_1000)
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
+ else if (status & VELOCITY_SPEED_100)
+ ethtool_cmd_speed_set(cmd, SPEED_100);
+ else
+ ethtool_cmd_speed_set(cmd, SPEED_10);
+
+ cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ cmd->port = PORT_TP;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->phy_address = readb(&regs->MIIADR) & 0x1F;
+
+ if (status & VELOCITY_DUPLEX_FULL)
+ cmd->duplex = DUPLEX_FULL;
+ else
+ cmd->duplex = DUPLEX_HALF;
+
+ return 0;
+}
+
+static int velocity_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+ u32 speed = ethtool_cmd_speed(cmd);
+ u32 curr_status;
+ u32 new_status = 0;
+ int ret = 0;
+
+ curr_status = check_connection_type(vptr->mac_regs);
+ curr_status &= (~VELOCITY_LINK_FAIL);
+
+ new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
+ new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
+ new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
+ new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
+ new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
+
+ if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
+ (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
+ ret = -EINVAL;
+ } else {
+ enum speed_opt spd_dpx;
+
+ if (new_status & VELOCITY_AUTONEG_ENABLE)
+ spd_dpx = SPD_DPX_AUTO;
+ else if ((new_status & VELOCITY_SPEED_1000) &&
+ (new_status & VELOCITY_DUPLEX_FULL)) {
+ spd_dpx = SPD_DPX_1000_FULL;
+ } else if (new_status & VELOCITY_SPEED_100)
+ spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
+ SPD_DPX_100_FULL : SPD_DPX_100_HALF;
+ else if (new_status & VELOCITY_SPEED_10)
+ spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
+ SPD_DPX_10_FULL : SPD_DPX_10_HALF;
+ else
+ return -EOPNOTSUPP;
+
+ vptr->options.spd_dpx = spd_dpx;
+
+ velocity_set_media_mode(vptr, new_status);
+ }
+
+ return ret;
+}
+
+static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+ strcpy(info->driver, VELOCITY_NAME);
+ strcpy(info->version, VELOCITY_VERSION);
+ strcpy(info->bus_info, pci_name(vptr->pdev));
+}
+
+static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+ wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
+ wol->wolopts |= WAKE_MAGIC;
+ /*
+ if (vptr->wol_opts & VELOCITY_WOL_PHY)
+ wol.wolopts|=WAKE_PHY;
+ */
+ if (vptr->wol_opts & VELOCITY_WOL_UCAST)
+ wol->wolopts |= WAKE_UCAST;
+ if (vptr->wol_opts & VELOCITY_WOL_ARP)
+ wol->wolopts |= WAKE_ARP;
+ memcpy(&wol->sopass, vptr->wol_passwd, 6);
+}
+
+static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+
+ if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
+ return -EFAULT;
+ vptr->wol_opts = VELOCITY_WOL_MAGIC;
+
+ /*
+ if (wol.wolopts & WAKE_PHY) {
+ vptr->wol_opts|=VELOCITY_WOL_PHY;
+ vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
+ }
+ */
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ vptr->wol_opts |= VELOCITY_WOL_MAGIC;
+ vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
+ }
+ if (wol->wolopts & WAKE_UCAST) {
+ vptr->wol_opts |= VELOCITY_WOL_UCAST;
+ vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
+ }
+ if (wol->wolopts & WAKE_ARP) {
+ vptr->wol_opts |= VELOCITY_WOL_ARP;
+ vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
+ }
+ memcpy(vptr->wol_passwd, wol->sopass, 6);
+ return 0;
+}
+
+static u32 velocity_get_msglevel(struct net_device *dev)
+{
+ return msglevel;
+}
+
+static void velocity_set_msglevel(struct net_device *dev, u32 value)
+{
+ msglevel = value;
+}
+
+static int get_pending_timer_val(int val)
+{
+ int mult_bits = val >> 6;
+ int mult = 1;
+
+ switch (mult_bits)
+ {
+ case 1:
+ mult = 4; break;
+ case 2:
+ mult = 16; break;
+ case 3:
+ mult = 64; break;
+ case 0:
+ default:
+ break;
+ }
+
+ return (val & 0x3f) * mult;
+}
+
+static void set_pending_timer_val(int *val, u32 us)
+{
+ u8 mult = 0;
+ u8 shift = 0;
+
+ if (us >= 0x3f) {
+ mult = 1; /* mult with 4 */
+ shift = 2;
+ }
+ if (us >= 0x3f * 4) {
+ mult = 2; /* mult with 16 */
+ shift = 4;
+ }
+ if (us >= 0x3f * 16) {
+ mult = 3; /* mult with 64 */
+ shift = 6;
+ }
+
+ *val = (mult << 6) | ((us >> shift) & 0x3f);
+}
+
+
+static int velocity_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+
+ ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
+ ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
+
+ ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
+ ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
+
+ return 0;
+}
+
+static int velocity_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+ int max_us = 0x3f * 64;
+ unsigned long flags;
+
+ /* 6 bits of */
+ if (ecmd->tx_coalesce_usecs > max_us)
+ return -EINVAL;
+ if (ecmd->rx_coalesce_usecs > max_us)
+ return -EINVAL;
+
+ if (ecmd->tx_max_coalesced_frames > 0xff)
+ return -EINVAL;
+ if (ecmd->rx_max_coalesced_frames > 0xff)
+ return -EINVAL;
+
+ vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
+ vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
+
+ set_pending_timer_val(&vptr->options.rxqueue_timer,
+ ecmd->rx_coalesce_usecs);
+ set_pending_timer_val(&vptr->options.txqueue_timer,
+ ecmd->tx_coalesce_usecs);
+
+ /* Setup the interrupt suppression and queue timers */
+ spin_lock_irqsave(&vptr->lock, flags);
+ mac_disable_int(vptr->mac_regs);
+ setup_adaptive_interrupts(vptr);
+ setup_queue_timers(vptr);
+
+ mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
+ mac_clear_isr(vptr->mac_regs);
+ mac_enable_int(vptr->mac_regs);
+ spin_unlock_irqrestore(&vptr->lock, flags);
+
+ return 0;
+}
+
+static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
+ "rx_all",
+ "rx_ok",
+ "tx_ok",
+ "rx_error",
+ "rx_runt_ok",
+ "rx_runt_err",
+ "rx_64",
+ "tx_64",
+ "rx_65_to_127",
+ "tx_65_to_127",
+ "rx_128_to_255",
+ "tx_128_to_255",
+ "rx_256_to_511",
+ "tx_256_to_511",
+ "rx_512_to_1023",
+ "tx_512_to_1023",
+ "rx_1024_to_1518",
+ "tx_1024_to_1518",
+ "tx_ether_collisions",
+ "rx_crc_errors",
+ "rx_jumbo",
+ "tx_jumbo",
+ "rx_mac_control_frames",
+ "tx_mac_control_frames",
+ "rx_frame_alignement_errors",
+ "rx_long_ok",
+ "rx_long_err",
+ "tx_sqe_errors",
+ "rx_no_buf",
+ "rx_symbol_errors",
+ "in_range_length_errors",
+ "late_collisions"
+};
+
+static void velocity_get_strings(struct net_device *dev, u32 sset, u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ memcpy(data, *velocity_gstrings, sizeof(velocity_gstrings));
+ break;
+ }
+}
+
+static int velocity_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(velocity_gstrings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void velocity_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ if (netif_running(dev)) {
+ struct velocity_info *vptr = netdev_priv(dev);
+ u32 *p = vptr->mib_counter;
+ int i;
+
+ spin_lock_irq(&vptr->lock);
+ velocity_update_hw_mibs(vptr);
+ spin_unlock_irq(&vptr->lock);
+
+ for (i = 0; i < ARRAY_SIZE(velocity_gstrings); i++)
+ *data++ = *p++;
+ }
+}
+
+static const struct ethtool_ops velocity_ethtool_ops = {
+ .get_settings = velocity_get_settings,
+ .set_settings = velocity_set_settings,
+ .get_drvinfo = velocity_get_drvinfo,
+ .get_wol = velocity_ethtool_get_wol,
+ .set_wol = velocity_ethtool_set_wol,
+ .get_msglevel = velocity_get_msglevel,
+ .set_msglevel = velocity_set_msglevel,
+ .get_link = velocity_get_link,
+ .get_strings = velocity_get_strings,
+ .get_sset_count = velocity_get_sset_count,
+ .get_ethtool_stats = velocity_get_ethtool_stats,
+ .get_coalesce = velocity_get_coalesce,
+ .set_coalesce = velocity_set_coalesce,
+ .begin = velocity_ethtool_up,
+ .complete = velocity_ethtool_down
+};
+
+#if defined(CONFIG_PM) && defined(CONFIG_INET)
+static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
+{
+ struct in_ifaddr *ifa = ptr;
+ struct net_device *dev = ifa->ifa_dev->dev;
+
+ if (dev_net(dev) == &init_net &&
+ dev->netdev_ops == &velocity_netdev_ops)
+ velocity_get_ip(netdev_priv(dev));
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block velocity_inetaddr_notifier = {
+ .notifier_call = velocity_netdev_event,
+};
+
+static void velocity_register_notifier(void)
+{
+ register_inetaddr_notifier(&velocity_inetaddr_notifier);
+}
+
+static void velocity_unregister_notifier(void)
+{
+ unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
+}
+
+#else
+
+#define velocity_register_notifier() do {} while (0)
+#define velocity_unregister_notifier() do {} while (0)
+
+#endif /* defined(CONFIG_PM) && defined(CONFIG_INET) */
+
+/**
+ * velocity_init_module - load time function
+ *
+ * Called when the velocity module is loaded. The PCI driver
+ * is registered with the PCI layer, and in turn will call
+ * the probe functions for each velocity adapter installed
+ * in the system.
+ */
+static int __init velocity_init_module(void)
+{
+ int ret;
+
+ velocity_register_notifier();
+ ret = pci_register_driver(&velocity_driver);
+ if (ret < 0)
+ velocity_unregister_notifier();
+ return ret;
+}
+
+/**
+ * velocity_cleanup - module unload
+ *
+ * When the velocity hardware is unloaded this function is called.
+ * It will clean up the notifiers and the unregister the PCI
+ * driver interface for this hardware. This in turn cleans up
+ * all discovered interfaces before returning from the function
+ */
+static void __exit velocity_cleanup_module(void)
+{
+ velocity_unregister_notifier();
+ pci_unregister_driver(&velocity_driver);
+}
+
+module_init(velocity_init_module);
+module_exit(velocity_cleanup_module);
diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h
new file mode 100644
index 000000000000..4cb9f13485e9
--- /dev/null
+++ b/drivers/net/ethernet/via/via-velocity.h
@@ -0,0 +1,1579 @@
+/*
+ * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
+ * All rights reserved.
+ *
+ * This software may be redistributed and/or modified under
+ * the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * File: via-velocity.h
+ *
+ * Purpose: Header file to define driver's private structures.
+ *
+ * Author: Chuang Liang-Shing, AJ Jiang
+ *
+ * Date: Jan 24, 2003
+ */
+
+
+#ifndef VELOCITY_H
+#define VELOCITY_H
+
+#define VELOCITY_TX_CSUM_SUPPORT
+
+#define VELOCITY_NAME "via-velocity"
+#define VELOCITY_FULL_DRV_NAM "VIA Networking Velocity Family Gigabit Ethernet Adapter Driver"
+#define VELOCITY_VERSION "1.15"
+
+#define VELOCITY_IO_SIZE 256
+#define VELOCITY_NAPI_WEIGHT 64
+
+#define PKT_BUF_SZ 1540
+
+#define MAX_UNITS 8
+#define OPTION_DEFAULT { [0 ... MAX_UNITS-1] = -1}
+
+#define REV_ID_VT6110 (0)
+
+#define BYTE_REG_BITS_ON(x,p) do { writeb(readb((p))|(x),(p));} while (0)
+#define WORD_REG_BITS_ON(x,p) do { writew(readw((p))|(x),(p));} while (0)
+#define DWORD_REG_BITS_ON(x,p) do { writel(readl((p))|(x),(p));} while (0)
+
+#define BYTE_REG_BITS_IS_ON(x,p) (readb((p)) & (x))
+#define WORD_REG_BITS_IS_ON(x,p) (readw((p)) & (x))
+#define DWORD_REG_BITS_IS_ON(x,p) (readl((p)) & (x))
+
+#define BYTE_REG_BITS_OFF(x,p) do { writeb(readb((p)) & (~(x)),(p));} while (0)
+#define WORD_REG_BITS_OFF(x,p) do { writew(readw((p)) & (~(x)),(p));} while (0)
+#define DWORD_REG_BITS_OFF(x,p) do { writel(readl((p)) & (~(x)),(p));} while (0)
+
+#define BYTE_REG_BITS_SET(x,m,p) do { writeb( (readb((p)) & (~(m))) |(x),(p));} while (0)
+#define WORD_REG_BITS_SET(x,m,p) do { writew( (readw((p)) & (~(m))) |(x),(p));} while (0)
+#define DWORD_REG_BITS_SET(x,m,p) do { writel( (readl((p)) & (~(m)))|(x),(p));} while (0)
+
+#define VAR_USED(p) do {(p)=(p);} while (0)
+
+/*
+ * Purpose: Structures for MAX RX/TX descriptors.
+ */
+
+
+#define B_OWNED_BY_CHIP 1
+#define B_OWNED_BY_HOST 0
+
+/*
+ * Bits in the RSR0 register
+ */
+
+#define RSR_DETAG cpu_to_le16(0x0080)
+#define RSR_SNTAG cpu_to_le16(0x0040)
+#define RSR_RXER cpu_to_le16(0x0020)
+#define RSR_RL cpu_to_le16(0x0010)
+#define RSR_CE cpu_to_le16(0x0008)
+#define RSR_FAE cpu_to_le16(0x0004)
+#define RSR_CRC cpu_to_le16(0x0002)
+#define RSR_VIDM cpu_to_le16(0x0001)
+
+/*
+ * Bits in the RSR1 register
+ */
+
+#define RSR_RXOK cpu_to_le16(0x8000) // rx OK
+#define RSR_PFT cpu_to_le16(0x4000) // Perfect filtering address match
+#define RSR_MAR cpu_to_le16(0x2000) // MAC accept multicast address packet
+#define RSR_BAR cpu_to_le16(0x1000) // MAC accept broadcast address packet
+#define RSR_PHY cpu_to_le16(0x0800) // MAC accept physical address packet
+#define RSR_VTAG cpu_to_le16(0x0400) // 802.1p/1q tagging packet indicator
+#define RSR_STP cpu_to_le16(0x0200) // start of packet
+#define RSR_EDP cpu_to_le16(0x0100) // end of packet
+
+/*
+ * Bits in the CSM register
+ */
+
+#define CSM_IPOK 0x40 //IP Checksum validation ok
+#define CSM_TUPOK 0x20 //TCP/UDP Checksum validation ok
+#define CSM_FRAG 0x10 //Fragment IP datagram
+#define CSM_IPKT 0x04 //Received an IP packet
+#define CSM_TCPKT 0x02 //Received a TCP packet
+#define CSM_UDPKT 0x01 //Received a UDP packet
+
+/*
+ * Bits in the TSR0 register
+ */
+
+#define TSR0_ABT cpu_to_le16(0x0080) // Tx abort because of excessive collision
+#define TSR0_OWT cpu_to_le16(0x0040) // Jumbo frame Tx abort
+#define TSR0_OWC cpu_to_le16(0x0020) // Out of window collision
+#define TSR0_COLS cpu_to_le16(0x0010) // experience collision in this transmit event
+#define TSR0_NCR3 cpu_to_le16(0x0008) // collision retry counter[3]
+#define TSR0_NCR2 cpu_to_le16(0x0004) // collision retry counter[2]
+#define TSR0_NCR1 cpu_to_le16(0x0002) // collision retry counter[1]
+#define TSR0_NCR0 cpu_to_le16(0x0001) // collision retry counter[0]
+#define TSR0_TERR cpu_to_le16(0x8000) //
+#define TSR0_FDX cpu_to_le16(0x4000) // current transaction is serviced by full duplex mode
+#define TSR0_GMII cpu_to_le16(0x2000) // current transaction is serviced by GMII mode
+#define TSR0_LNKFL cpu_to_le16(0x1000) // packet serviced during link down
+#define TSR0_SHDN cpu_to_le16(0x0400) // shutdown case
+#define TSR0_CRS cpu_to_le16(0x0200) // carrier sense lost
+#define TSR0_CDH cpu_to_le16(0x0100) // AQE test fail (CD heartbeat)
+
+//
+// Bits in the TCR0 register
+//
+#define TCR0_TIC 0x80 // assert interrupt immediately while descriptor has been send complete
+#define TCR0_PIC 0x40 // priority interrupt request, INA# is issued over adaptive interrupt scheme
+#define TCR0_VETAG 0x20 // enable VLAN tag
+#define TCR0_IPCK 0x10 // request IP checksum calculation.
+#define TCR0_UDPCK 0x08 // request UDP checksum calculation.
+#define TCR0_TCPCK 0x04 // request TCP checksum calculation.
+#define TCR0_JMBO 0x02 // indicate a jumbo packet in GMAC side
+#define TCR0_CRC 0x01 // disable CRC generation
+
+#define TCPLS_NORMAL 3
+#define TCPLS_START 2
+#define TCPLS_END 1
+#define TCPLS_MED 0
+
+
+// max transmit or receive buffer size
+#define CB_RX_BUF_SIZE 2048UL // max buffer size
+ // NOTE: must be multiple of 4
+
+#define CB_MAX_RD_NUM 512 // MAX # of RD
+#define CB_MAX_TD_NUM 256 // MAX # of TD
+
+#define CB_INIT_RD_NUM_3119 128 // init # of RD, for setup VT3119
+#define CB_INIT_TD_NUM_3119 64 // init # of TD, for setup VT3119
+
+#define CB_INIT_RD_NUM 128 // init # of RD, for setup default
+#define CB_INIT_TD_NUM 64 // init # of TD, for setup default
+
+// for 3119
+#define CB_TD_RING_NUM 4 // # of TD rings.
+#define CB_MAX_SEG_PER_PKT 7 // max data seg per packet (Tx)
+
+
+/*
+ * If collisions excess 15 times , tx will abort, and
+ * if tx fifo underflow, tx will fail
+ * we should try to resend it
+ */
+
+#define CB_MAX_TX_ABORT_RETRY 3
+
+/*
+ * Receive descriptor
+ */
+
+struct rdesc0 {
+ __le16 RSR; /* Receive status */
+ __le16 len; /* bits 0--13; bit 15 - owner */
+};
+
+struct rdesc1 {
+ __le16 PQTAG;
+ u8 CSM;
+ u8 IPKT;
+};
+
+enum {
+ RX_INTEN = cpu_to_le16(0x8000)
+};
+
+struct rx_desc {
+ struct rdesc0 rdesc0;
+ struct rdesc1 rdesc1;
+ __le32 pa_low; /* Low 32 bit PCI address */
+ __le16 pa_high; /* Next 16 bit PCI address (48 total) */
+ __le16 size; /* bits 0--14 - frame size, bit 15 - enable int. */
+} __packed;
+
+/*
+ * Transmit descriptor
+ */
+
+struct tdesc0 {
+ __le16 TSR; /* Transmit status register */
+ __le16 len; /* bits 0--13 - size of frame, bit 15 - owner */
+};
+
+struct tdesc1 {
+ __le16 vlan;
+ u8 TCR;
+ u8 cmd; /* bits 0--1 - TCPLS, bits 4--7 - CMDZ */
+} __packed;
+
+enum {
+ TD_QUEUE = cpu_to_le16(0x8000)
+};
+
+struct td_buf {
+ __le32 pa_low;
+ __le16 pa_high;
+ __le16 size; /* bits 0--13 - size, bit 15 - queue */
+} __packed;
+
+struct tx_desc {
+ struct tdesc0 tdesc0;
+ struct tdesc1 tdesc1;
+ struct td_buf td_buf[7];
+};
+
+struct velocity_rd_info {
+ struct sk_buff *skb;
+ dma_addr_t skb_dma;
+};
+
+/*
+ * Used to track transmit side buffers.
+ */
+
+struct velocity_td_info {
+ struct sk_buff *skb;
+ int nskb_dma;
+ dma_addr_t skb_dma[7];
+};
+
+enum velocity_owner {
+ OWNED_BY_HOST = 0,
+ OWNED_BY_NIC = cpu_to_le16(0x8000)
+};
+
+
+/*
+ * MAC registers and macros.
+ */
+
+
+#define MCAM_SIZE 64
+#define VCAM_SIZE 64
+#define TX_QUEUE_NO 4
+
+#define MAX_HW_MIB_COUNTER 32
+#define VELOCITY_MIN_MTU (64)
+#define VELOCITY_MAX_MTU (9000)
+
+/*
+ * Registers in the MAC
+ */
+
+#define MAC_REG_PAR 0x00 // physical address
+#define MAC_REG_RCR 0x06
+#define MAC_REG_TCR 0x07
+#define MAC_REG_CR0_SET 0x08
+#define MAC_REG_CR1_SET 0x09
+#define MAC_REG_CR2_SET 0x0A
+#define MAC_REG_CR3_SET 0x0B
+#define MAC_REG_CR0_CLR 0x0C
+#define MAC_REG_CR1_CLR 0x0D
+#define MAC_REG_CR2_CLR 0x0E
+#define MAC_REG_CR3_CLR 0x0F
+#define MAC_REG_MAR 0x10
+#define MAC_REG_CAM 0x10
+#define MAC_REG_DEC_BASE_HI 0x18
+#define MAC_REG_DBF_BASE_HI 0x1C
+#define MAC_REG_ISR_CTL 0x20
+#define MAC_REG_ISR_HOTMR 0x20
+#define MAC_REG_ISR_TSUPTHR 0x20
+#define MAC_REG_ISR_RSUPTHR 0x20
+#define MAC_REG_ISR_CTL1 0x21
+#define MAC_REG_TXE_SR 0x22
+#define MAC_REG_RXE_SR 0x23
+#define MAC_REG_ISR 0x24
+#define MAC_REG_ISR0 0x24
+#define MAC_REG_ISR1 0x25
+#define MAC_REG_ISR2 0x26
+#define MAC_REG_ISR3 0x27
+#define MAC_REG_IMR 0x28
+#define MAC_REG_IMR0 0x28
+#define MAC_REG_IMR1 0x29
+#define MAC_REG_IMR2 0x2A
+#define MAC_REG_IMR3 0x2B
+#define MAC_REG_TDCSR_SET 0x30
+#define MAC_REG_RDCSR_SET 0x32
+#define MAC_REG_TDCSR_CLR 0x34
+#define MAC_REG_RDCSR_CLR 0x36
+#define MAC_REG_RDBASE_LO 0x38
+#define MAC_REG_RDINDX 0x3C
+#define MAC_REG_TDBASE_LO 0x40
+#define MAC_REG_RDCSIZE 0x50
+#define MAC_REG_TDCSIZE 0x52
+#define MAC_REG_TDINDX 0x54
+#define MAC_REG_TDIDX0 0x54
+#define MAC_REG_TDIDX1 0x56
+#define MAC_REG_TDIDX2 0x58
+#define MAC_REG_TDIDX3 0x5A
+#define MAC_REG_PAUSE_TIMER 0x5C
+#define MAC_REG_RBRDU 0x5E
+#define MAC_REG_FIFO_TEST0 0x60
+#define MAC_REG_FIFO_TEST1 0x64
+#define MAC_REG_CAMADDR 0x68
+#define MAC_REG_CAMCR 0x69
+#define MAC_REG_GFTEST 0x6A
+#define MAC_REG_FTSTCMD 0x6B
+#define MAC_REG_MIICFG 0x6C
+#define MAC_REG_MIISR 0x6D
+#define MAC_REG_PHYSR0 0x6E
+#define MAC_REG_PHYSR1 0x6F
+#define MAC_REG_MIICR 0x70
+#define MAC_REG_MIIADR 0x71
+#define MAC_REG_MIIDATA 0x72
+#define MAC_REG_SOFT_TIMER0 0x74
+#define MAC_REG_SOFT_TIMER1 0x76
+#define MAC_REG_CFGA 0x78
+#define MAC_REG_CFGB 0x79
+#define MAC_REG_CFGC 0x7A
+#define MAC_REG_CFGD 0x7B
+#define MAC_REG_DCFG0 0x7C
+#define MAC_REG_DCFG1 0x7D
+#define MAC_REG_MCFG0 0x7E
+#define MAC_REG_MCFG1 0x7F
+
+#define MAC_REG_TBIST 0x80
+#define MAC_REG_RBIST 0x81
+#define MAC_REG_PMCC 0x82
+#define MAC_REG_STICKHW 0x83
+#define MAC_REG_MIBCR 0x84
+#define MAC_REG_EERSV 0x85
+#define MAC_REG_REVID 0x86
+#define MAC_REG_MIBREAD 0x88
+#define MAC_REG_BPMA 0x8C
+#define MAC_REG_EEWR_DATA 0x8C
+#define MAC_REG_BPMD_WR 0x8F
+#define MAC_REG_BPCMD 0x90
+#define MAC_REG_BPMD_RD 0x91
+#define MAC_REG_EECHKSUM 0x92
+#define MAC_REG_EECSR 0x93
+#define MAC_REG_EERD_DATA 0x94
+#define MAC_REG_EADDR 0x96
+#define MAC_REG_EMBCMD 0x97
+#define MAC_REG_JMPSR0 0x98
+#define MAC_REG_JMPSR1 0x99
+#define MAC_REG_JMPSR2 0x9A
+#define MAC_REG_JMPSR3 0x9B
+#define MAC_REG_CHIPGSR 0x9C
+#define MAC_REG_TESTCFG 0x9D
+#define MAC_REG_DEBUG 0x9E
+#define MAC_REG_CHIPGCR 0x9F /* Chip Operation and Diagnostic Control */
+#define MAC_REG_WOLCR0_SET 0xA0
+#define MAC_REG_WOLCR1_SET 0xA1
+#define MAC_REG_PWCFG_SET 0xA2
+#define MAC_REG_WOLCFG_SET 0xA3
+#define MAC_REG_WOLCR0_CLR 0xA4
+#define MAC_REG_WOLCR1_CLR 0xA5
+#define MAC_REG_PWCFG_CLR 0xA6
+#define MAC_REG_WOLCFG_CLR 0xA7
+#define MAC_REG_WOLSR0_SET 0xA8
+#define MAC_REG_WOLSR1_SET 0xA9
+#define MAC_REG_WOLSR0_CLR 0xAC
+#define MAC_REG_WOLSR1_CLR 0xAD
+#define MAC_REG_PATRN_CRC0 0xB0
+#define MAC_REG_PATRN_CRC1 0xB2
+#define MAC_REG_PATRN_CRC2 0xB4
+#define MAC_REG_PATRN_CRC3 0xB6
+#define MAC_REG_PATRN_CRC4 0xB8
+#define MAC_REG_PATRN_CRC5 0xBA
+#define MAC_REG_PATRN_CRC6 0xBC
+#define MAC_REG_PATRN_CRC7 0xBE
+#define MAC_REG_BYTEMSK0_0 0xC0
+#define MAC_REG_BYTEMSK0_1 0xC4
+#define MAC_REG_BYTEMSK0_2 0xC8
+#define MAC_REG_BYTEMSK0_3 0xCC
+#define MAC_REG_BYTEMSK1_0 0xD0
+#define MAC_REG_BYTEMSK1_1 0xD4
+#define MAC_REG_BYTEMSK1_2 0xD8
+#define MAC_REG_BYTEMSK1_3 0xDC
+#define MAC_REG_BYTEMSK2_0 0xE0
+#define MAC_REG_BYTEMSK2_1 0xE4
+#define MAC_REG_BYTEMSK2_2 0xE8
+#define MAC_REG_BYTEMSK2_3 0xEC
+#define MAC_REG_BYTEMSK3_0 0xF0
+#define MAC_REG_BYTEMSK3_1 0xF4
+#define MAC_REG_BYTEMSK3_2 0xF8
+#define MAC_REG_BYTEMSK3_3 0xFC
+
+/*
+ * Bits in the RCR register
+ */
+
+#define RCR_AS 0x80
+#define RCR_AP 0x40
+#define RCR_AL 0x20
+#define RCR_PROM 0x10
+#define RCR_AB 0x08
+#define RCR_AM 0x04
+#define RCR_AR 0x02
+#define RCR_SEP 0x01
+
+/*
+ * Bits in the TCR register
+ */
+
+#define TCR_TB2BDIS 0x80
+#define TCR_COLTMC1 0x08
+#define TCR_COLTMC0 0x04
+#define TCR_LB1 0x02 /* loopback[1] */
+#define TCR_LB0 0x01 /* loopback[0] */
+
+/*
+ * Bits in the CR0 register
+ */
+
+#define CR0_TXON 0x00000008UL
+#define CR0_RXON 0x00000004UL
+#define CR0_STOP 0x00000002UL /* stop MAC, default = 1 */
+#define CR0_STRT 0x00000001UL /* start MAC */
+#define CR0_SFRST 0x00008000UL /* software reset */
+#define CR0_TM1EN 0x00004000UL
+#define CR0_TM0EN 0x00002000UL
+#define CR0_DPOLL 0x00000800UL /* disable rx/tx auto polling */
+#define CR0_DISAU 0x00000100UL
+#define CR0_XONEN 0x00800000UL
+#define CR0_FDXTFCEN 0x00400000UL /* full-duplex TX flow control enable */
+#define CR0_FDXRFCEN 0x00200000UL /* full-duplex RX flow control enable */
+#define CR0_HDXFCEN 0x00100000UL /* half-duplex flow control enable */
+#define CR0_XHITH1 0x00080000UL /* TX XON high threshold 1 */
+#define CR0_XHITH0 0x00040000UL /* TX XON high threshold 0 */
+#define CR0_XLTH1 0x00020000UL /* TX pause frame low threshold 1 */
+#define CR0_XLTH0 0x00010000UL /* TX pause frame low threshold 0 */
+#define CR0_GSPRST 0x80000000UL
+#define CR0_FORSRST 0x40000000UL
+#define CR0_FPHYRST 0x20000000UL
+#define CR0_DIAG 0x10000000UL
+#define CR0_INTPCTL 0x04000000UL
+#define CR0_GINTMSK1 0x02000000UL
+#define CR0_GINTMSK0 0x01000000UL
+
+/*
+ * Bits in the CR1 register
+ */
+
+#define CR1_SFRST 0x80 /* software reset */
+#define CR1_TM1EN 0x40
+#define CR1_TM0EN 0x20
+#define CR1_DPOLL 0x08 /* disable rx/tx auto polling */
+#define CR1_DISAU 0x01
+
+/*
+ * Bits in the CR2 register
+ */
+
+#define CR2_XONEN 0x80
+#define CR2_FDXTFCEN 0x40 /* full-duplex TX flow control enable */
+#define CR2_FDXRFCEN 0x20 /* full-duplex RX flow control enable */
+#define CR2_HDXFCEN 0x10 /* half-duplex flow control enable */
+#define CR2_XHITH1 0x08 /* TX XON high threshold 1 */
+#define CR2_XHITH0 0x04 /* TX XON high threshold 0 */
+#define CR2_XLTH1 0x02 /* TX pause frame low threshold 1 */
+#define CR2_XLTH0 0x01 /* TX pause frame low threshold 0 */
+
+/*
+ * Bits in the CR3 register
+ */
+
+#define CR3_GSPRST 0x80
+#define CR3_FORSRST 0x40
+#define CR3_FPHYRST 0x20
+#define CR3_DIAG 0x10
+#define CR3_INTPCTL 0x04
+#define CR3_GINTMSK1 0x02
+#define CR3_GINTMSK0 0x01
+
+#define ISRCTL_UDPINT 0x8000
+#define ISRCTL_TSUPDIS 0x4000
+#define ISRCTL_RSUPDIS 0x2000
+#define ISRCTL_PMSK1 0x1000
+#define ISRCTL_PMSK0 0x0800
+#define ISRCTL_INTPD 0x0400
+#define ISRCTL_HCRLD 0x0200
+#define ISRCTL_SCRLD 0x0100
+
+/*
+ * Bits in the ISR_CTL1 register
+ */
+
+#define ISRCTL1_UDPINT 0x80
+#define ISRCTL1_TSUPDIS 0x40
+#define ISRCTL1_RSUPDIS 0x20
+#define ISRCTL1_PMSK1 0x10
+#define ISRCTL1_PMSK0 0x08
+#define ISRCTL1_INTPD 0x04
+#define ISRCTL1_HCRLD 0x02
+#define ISRCTL1_SCRLD 0x01
+
+/*
+ * Bits in the TXE_SR register
+ */
+
+#define TXESR_TFDBS 0x08
+#define TXESR_TDWBS 0x04
+#define TXESR_TDRBS 0x02
+#define TXESR_TDSTR 0x01
+
+/*
+ * Bits in the RXE_SR register
+ */
+
+#define RXESR_RFDBS 0x08
+#define RXESR_RDWBS 0x04
+#define RXESR_RDRBS 0x02
+#define RXESR_RDSTR 0x01
+
+/*
+ * Bits in the ISR register
+ */
+
+#define ISR_ISR3 0x80000000UL
+#define ISR_ISR2 0x40000000UL
+#define ISR_ISR1 0x20000000UL
+#define ISR_ISR0 0x10000000UL
+#define ISR_TXSTLI 0x02000000UL
+#define ISR_RXSTLI 0x01000000UL
+#define ISR_HFLD 0x00800000UL
+#define ISR_UDPI 0x00400000UL
+#define ISR_MIBFI 0x00200000UL
+#define ISR_SHDNI 0x00100000UL
+#define ISR_PHYI 0x00080000UL
+#define ISR_PWEI 0x00040000UL
+#define ISR_TMR1I 0x00020000UL
+#define ISR_TMR0I 0x00010000UL
+#define ISR_SRCI 0x00008000UL
+#define ISR_LSTPEI 0x00004000UL
+#define ISR_LSTEI 0x00002000UL
+#define ISR_OVFI 0x00001000UL
+#define ISR_FLONI 0x00000800UL
+#define ISR_RACEI 0x00000400UL
+#define ISR_TXWB1I 0x00000200UL
+#define ISR_TXWB0I 0x00000100UL
+#define ISR_PTX3I 0x00000080UL
+#define ISR_PTX2I 0x00000040UL
+#define ISR_PTX1I 0x00000020UL
+#define ISR_PTX0I 0x00000010UL
+#define ISR_PTXI 0x00000008UL
+#define ISR_PRXI 0x00000004UL
+#define ISR_PPTXI 0x00000002UL
+#define ISR_PPRXI 0x00000001UL
+
+/*
+ * Bits in the IMR register
+ */
+
+#define IMR_TXSTLM 0x02000000UL
+#define IMR_UDPIM 0x00400000UL
+#define IMR_MIBFIM 0x00200000UL
+#define IMR_SHDNIM 0x00100000UL
+#define IMR_PHYIM 0x00080000UL
+#define IMR_PWEIM 0x00040000UL
+#define IMR_TMR1IM 0x00020000UL
+#define IMR_TMR0IM 0x00010000UL
+
+#define IMR_SRCIM 0x00008000UL
+#define IMR_LSTPEIM 0x00004000UL
+#define IMR_LSTEIM 0x00002000UL
+#define IMR_OVFIM 0x00001000UL
+#define IMR_FLONIM 0x00000800UL
+#define IMR_RACEIM 0x00000400UL
+#define IMR_TXWB1IM 0x00000200UL
+#define IMR_TXWB0IM 0x00000100UL
+
+#define IMR_PTX3IM 0x00000080UL
+#define IMR_PTX2IM 0x00000040UL
+#define IMR_PTX1IM 0x00000020UL
+#define IMR_PTX0IM 0x00000010UL
+#define IMR_PTXIM 0x00000008UL
+#define IMR_PRXIM 0x00000004UL
+#define IMR_PPTXIM 0x00000002UL
+#define IMR_PPRXIM 0x00000001UL
+
+/* 0x0013FB0FUL = initial value of IMR */
+
+#define INT_MASK_DEF (IMR_PPTXIM|IMR_PPRXIM|IMR_PTXIM|IMR_PRXIM|\
+ IMR_PWEIM|IMR_TXWB0IM|IMR_TXWB1IM|IMR_FLONIM|\
+ IMR_OVFIM|IMR_LSTEIM|IMR_LSTPEIM|IMR_SRCIM|IMR_MIBFIM|\
+ IMR_SHDNIM|IMR_TMR1IM|IMR_TMR0IM|IMR_TXSTLM)
+
+/*
+ * Bits in the TDCSR0/1, RDCSR0 register
+ */
+
+#define TRDCSR_DEAD 0x0008
+#define TRDCSR_WAK 0x0004
+#define TRDCSR_ACT 0x0002
+#define TRDCSR_RUN 0x0001
+
+/*
+ * Bits in the CAMADDR register
+ */
+
+#define CAMADDR_CAMEN 0x80
+#define CAMADDR_VCAMSL 0x40
+
+/*
+ * Bits in the CAMCR register
+ */
+
+#define CAMCR_PS1 0x80
+#define CAMCR_PS0 0x40
+#define CAMCR_AITRPKT 0x20
+#define CAMCR_AITR16 0x10
+#define CAMCR_CAMRD 0x08
+#define CAMCR_CAMWR 0x04
+#define CAMCR_PS_CAM_MASK 0x40
+#define CAMCR_PS_CAM_DATA 0x80
+#define CAMCR_PS_MAR 0x00
+
+/*
+ * Bits in the MIICFG register
+ */
+
+#define MIICFG_MPO1 0x80
+#define MIICFG_MPO0 0x40
+#define MIICFG_MFDC 0x20
+
+/*
+ * Bits in the MIISR register
+ */
+
+#define MIISR_MIDLE 0x80
+
+/*
+ * Bits in the PHYSR0 register
+ */
+
+#define PHYSR0_PHYRST 0x80
+#define PHYSR0_LINKGD 0x40
+#define PHYSR0_FDPX 0x10
+#define PHYSR0_SPDG 0x08
+#define PHYSR0_SPD10 0x04
+#define PHYSR0_RXFLC 0x02
+#define PHYSR0_TXFLC 0x01
+
+/*
+ * Bits in the PHYSR1 register
+ */
+
+#define PHYSR1_PHYTBI 0x01
+
+/*
+ * Bits in the MIICR register
+ */
+
+#define MIICR_MAUTO 0x80
+#define MIICR_RCMD 0x40
+#define MIICR_WCMD 0x20
+#define MIICR_MDPM 0x10
+#define MIICR_MOUT 0x08
+#define MIICR_MDO 0x04
+#define MIICR_MDI 0x02
+#define MIICR_MDC 0x01
+
+/*
+ * Bits in the MIIADR register
+ */
+
+#define MIIADR_SWMPL 0x80
+
+/*
+ * Bits in the CFGA register
+ */
+
+#define CFGA_PMHCTG 0x08
+#define CFGA_GPIO1PD 0x04
+#define CFGA_ABSHDN 0x02
+#define CFGA_PACPI 0x01
+
+/*
+ * Bits in the CFGB register
+ */
+
+#define CFGB_GTCKOPT 0x80
+#define CFGB_MIIOPT 0x40
+#define CFGB_CRSEOPT 0x20
+#define CFGB_OFSET 0x10
+#define CFGB_CRANDOM 0x08
+#define CFGB_CAP 0x04
+#define CFGB_MBA 0x02
+#define CFGB_BAKOPT 0x01
+
+/*
+ * Bits in the CFGC register
+ */
+
+#define CFGC_EELOAD 0x80
+#define CFGC_BROPT 0x40
+#define CFGC_DLYEN 0x20
+#define CFGC_DTSEL 0x10
+#define CFGC_BTSEL 0x08
+#define CFGC_BPS2 0x04 /* bootrom select[2] */
+#define CFGC_BPS1 0x02 /* bootrom select[1] */
+#define CFGC_BPS0 0x01 /* bootrom select[0] */
+
+/*
+ * Bits in the CFGD register
+ */
+
+#define CFGD_IODIS 0x80
+#define CFGD_MSLVDACEN 0x40
+#define CFGD_CFGDACEN 0x20
+#define CFGD_PCI64EN 0x10
+#define CFGD_HTMRL4 0x08
+
+/*
+ * Bits in the DCFG1 register
+ */
+
+#define DCFG_XMWI 0x8000
+#define DCFG_XMRM 0x4000
+#define DCFG_XMRL 0x2000
+#define DCFG_PERDIS 0x1000
+#define DCFG_MRWAIT 0x0400
+#define DCFG_MWWAIT 0x0200
+#define DCFG_LATMEN 0x0100
+
+/*
+ * Bits in the MCFG0 register
+ */
+
+#define MCFG_RXARB 0x0080
+#define MCFG_RFT1 0x0020
+#define MCFG_RFT0 0x0010
+#define MCFG_LOWTHOPT 0x0008
+#define MCFG_PQEN 0x0004
+#define MCFG_RTGOPT 0x0002
+#define MCFG_VIDFR 0x0001
+
+/*
+ * Bits in the MCFG1 register
+ */
+
+#define MCFG_TXARB 0x8000
+#define MCFG_TXQBK1 0x0800
+#define MCFG_TXQBK0 0x0400
+#define MCFG_TXQNOBK 0x0200
+#define MCFG_SNAPOPT 0x0100
+
+/*
+ * Bits in the PMCC register
+ */
+
+#define PMCC_DSI 0x80
+#define PMCC_D2_DIS 0x40
+#define PMCC_D1_DIS 0x20
+#define PMCC_D3C_EN 0x10
+#define PMCC_D3H_EN 0x08
+#define PMCC_D2_EN 0x04
+#define PMCC_D1_EN 0x02
+#define PMCC_D0_EN 0x01
+
+/*
+ * Bits in STICKHW
+ */
+
+#define STICKHW_SWPTAG 0x10
+#define STICKHW_WOLSR 0x08
+#define STICKHW_WOLEN 0x04
+#define STICKHW_DS1 0x02 /* R/W by software/cfg cycle */
+#define STICKHW_DS0 0x01 /* suspend well DS write port */
+
+/*
+ * Bits in the MIBCR register
+ */
+
+#define MIBCR_MIBISTOK 0x80
+#define MIBCR_MIBISTGO 0x40
+#define MIBCR_MIBINC 0x20
+#define MIBCR_MIBHI 0x10
+#define MIBCR_MIBFRZ 0x08
+#define MIBCR_MIBFLSH 0x04
+#define MIBCR_MPTRINI 0x02
+#define MIBCR_MIBCLR 0x01
+
+/*
+ * Bits in the EERSV register
+ */
+
+#define EERSV_BOOT_RPL ((u8) 0x01) /* Boot method selection for VT6110 */
+
+#define EERSV_BOOT_MASK ((u8) 0x06)
+#define EERSV_BOOT_INT19 ((u8) 0x00)
+#define EERSV_BOOT_INT18 ((u8) 0x02)
+#define EERSV_BOOT_LOCAL ((u8) 0x04)
+#define EERSV_BOOT_BEV ((u8) 0x06)
+
+
+/*
+ * Bits in BPCMD
+ */
+
+#define BPCMD_BPDNE 0x80
+#define BPCMD_EBPWR 0x02
+#define BPCMD_EBPRD 0x01
+
+/*
+ * Bits in the EECSR register
+ */
+
+#define EECSR_EMBP 0x40 /* eeprom embedded programming */
+#define EECSR_RELOAD 0x20 /* eeprom content reload */
+#define EECSR_DPM 0x10 /* eeprom direct programming */
+#define EECSR_ECS 0x08 /* eeprom CS pin */
+#define EECSR_ECK 0x04 /* eeprom CK pin */
+#define EECSR_EDI 0x02 /* eeprom DI pin */
+#define EECSR_EDO 0x01 /* eeprom DO pin */
+
+/*
+ * Bits in the EMBCMD register
+ */
+
+#define EMBCMD_EDONE 0x80
+#define EMBCMD_EWDIS 0x08
+#define EMBCMD_EWEN 0x04
+#define EMBCMD_EWR 0x02
+#define EMBCMD_ERD 0x01
+
+/*
+ * Bits in TESTCFG register
+ */
+
+#define TESTCFG_HBDIS 0x80
+
+/*
+ * Bits in CHIPGCR register
+ */
+
+#define CHIPGCR_FCGMII 0x80 /* force GMII (else MII only) */
+#define CHIPGCR_FCFDX 0x40 /* force full duplex */
+#define CHIPGCR_FCRESV 0x20
+#define CHIPGCR_FCMODE 0x10 /* enable MAC forced mode */
+#define CHIPGCR_LPSOPT 0x08
+#define CHIPGCR_TM1US 0x04
+#define CHIPGCR_TM0US 0x02
+#define CHIPGCR_PHYINTEN 0x01
+
+/*
+ * Bits in WOLCR0
+ */
+
+#define WOLCR_MSWOLEN7 0x0080 /* enable pattern match filtering */
+#define WOLCR_MSWOLEN6 0x0040
+#define WOLCR_MSWOLEN5 0x0020
+#define WOLCR_MSWOLEN4 0x0010
+#define WOLCR_MSWOLEN3 0x0008
+#define WOLCR_MSWOLEN2 0x0004
+#define WOLCR_MSWOLEN1 0x0002
+#define WOLCR_MSWOLEN0 0x0001
+#define WOLCR_ARP_EN 0x0001
+
+/*
+ * Bits in WOLCR1
+ */
+
+#define WOLCR_LINKOFF_EN 0x0800 /* link off detected enable */
+#define WOLCR_LINKON_EN 0x0400 /* link on detected enable */
+#define WOLCR_MAGIC_EN 0x0200 /* magic packet filter enable */
+#define WOLCR_UNICAST_EN 0x0100 /* unicast filter enable */
+
+
+/*
+ * Bits in PWCFG
+ */
+
+#define PWCFG_PHYPWOPT 0x80 /* internal MII I/F timing */
+#define PWCFG_PCISTICK 0x40 /* PCI sticky R/W enable */
+#define PWCFG_WOLTYPE 0x20 /* pulse(1) or button (0) */
+#define PWCFG_LEGCY_WOL 0x10
+#define PWCFG_PMCSR_PME_SR 0x08
+#define PWCFG_PMCSR_PME_EN 0x04 /* control by PCISTICK */
+#define PWCFG_LEGACY_WOLSR 0x02 /* Legacy WOL_SR shadow */
+#define PWCFG_LEGACY_WOLEN 0x01 /* Legacy WOL_EN shadow */
+
+/*
+ * Bits in WOLCFG
+ */
+
+#define WOLCFG_PMEOVR 0x80 /* for legacy use, force PMEEN always */
+#define WOLCFG_SAM 0x20 /* accept multicast case reset, default=0 */
+#define WOLCFG_SAB 0x10 /* accept broadcast case reset, default=0 */
+#define WOLCFG_SMIIACC 0x08 /* ?? */
+#define WOLCFG_SGENWH 0x02
+#define WOLCFG_PHYINTEN 0x01 /* 0:PHYINT trigger enable, 1:use internal MII
+ to report status change */
+/*
+ * Bits in WOLSR1
+ */
+
+#define WOLSR_LINKOFF_INT 0x0800
+#define WOLSR_LINKON_INT 0x0400
+#define WOLSR_MAGIC_INT 0x0200
+#define WOLSR_UNICAST_INT 0x0100
+
+/*
+ * Ethernet address filter type
+ */
+
+#define PKT_TYPE_NONE 0x0000 /* Turn off receiver */
+#define PKT_TYPE_DIRECTED 0x0001 /* obselete, directed address is always accepted */
+#define PKT_TYPE_MULTICAST 0x0002
+#define PKT_TYPE_ALL_MULTICAST 0x0004
+#define PKT_TYPE_BROADCAST 0x0008
+#define PKT_TYPE_PROMISCUOUS 0x0020
+#define PKT_TYPE_LONG 0x2000 /* NOTE.... the definition of LONG is >2048 bytes in our chip */
+#define PKT_TYPE_RUNT 0x4000
+#define PKT_TYPE_ERROR 0x8000 /* Accept error packets, e.g. CRC error */
+
+/*
+ * Loopback mode
+ */
+
+#define MAC_LB_NONE 0x00
+#define MAC_LB_INTERNAL 0x01
+#define MAC_LB_EXTERNAL 0x02
+
+/*
+ * Enabled mask value of irq
+ */
+
+#if defined(_SIM)
+#define IMR_MASK_VALUE 0x0033FF0FUL /* initial value of IMR
+ set IMR0 to 0x0F according to spec */
+
+#else
+#define IMR_MASK_VALUE 0x0013FB0FUL /* initial value of IMR
+ ignore MIBFI,RACEI to
+ reduce intr. frequency
+ NOTE.... do not enable NoBuf int mask at driver driver
+ when (1) NoBuf -> RxThreshold = SF
+ (2) OK -> RxThreshold = original value
+ */
+#endif
+
+/*
+ * Revision id
+ */
+
+#define REV_ID_VT3119_A0 0x00
+#define REV_ID_VT3119_A1 0x01
+#define REV_ID_VT3216_A0 0x10
+
+/*
+ * Max time out delay time
+ */
+
+#define W_MAX_TIMEOUT 0x0FFFU
+
+
+/*
+ * MAC registers as a structure. Cannot be directly accessed this
+ * way but generates offsets for readl/writel() calls
+ */
+
+struct mac_regs {
+ volatile u8 PAR[6]; /* 0x00 */
+ volatile u8 RCR;
+ volatile u8 TCR;
+
+ volatile __le32 CR0Set; /* 0x08 */
+ volatile __le32 CR0Clr; /* 0x0C */
+
+ volatile u8 MARCAM[8]; /* 0x10 */
+
+ volatile __le32 DecBaseHi; /* 0x18 */
+ volatile __le16 DbfBaseHi; /* 0x1C */
+ volatile __le16 reserved_1E;
+
+ volatile __le16 ISRCTL; /* 0x20 */
+ volatile u8 TXESR;
+ volatile u8 RXESR;
+
+ volatile __le32 ISR; /* 0x24 */
+ volatile __le32 IMR;
+
+ volatile __le32 TDStatusPort; /* 0x2C */
+
+ volatile __le16 TDCSRSet; /* 0x30 */
+ volatile u8 RDCSRSet;
+ volatile u8 reserved_33;
+ volatile __le16 TDCSRClr;
+ volatile u8 RDCSRClr;
+ volatile u8 reserved_37;
+
+ volatile __le32 RDBaseLo; /* 0x38 */
+ volatile __le16 RDIdx; /* 0x3C */
+ volatile u8 TQETMR; /* 0x3E, VT3216 and above only */
+ volatile u8 RQETMR; /* 0x3F, VT3216 and above only */
+
+ volatile __le32 TDBaseLo[4]; /* 0x40 */
+
+ volatile __le16 RDCSize; /* 0x50 */
+ volatile __le16 TDCSize; /* 0x52 */
+ volatile __le16 TDIdx[4]; /* 0x54 */
+ volatile __le16 tx_pause_timer; /* 0x5C */
+ volatile __le16 RBRDU; /* 0x5E */
+
+ volatile __le32 FIFOTest0; /* 0x60 */
+ volatile __le32 FIFOTest1; /* 0x64 */
+
+ volatile u8 CAMADDR; /* 0x68 */
+ volatile u8 CAMCR; /* 0x69 */
+ volatile u8 GFTEST; /* 0x6A */
+ volatile u8 FTSTCMD; /* 0x6B */
+
+ volatile u8 MIICFG; /* 0x6C */
+ volatile u8 MIISR;
+ volatile u8 PHYSR0;
+ volatile u8 PHYSR1;
+ volatile u8 MIICR;
+ volatile u8 MIIADR;
+ volatile __le16 MIIDATA;
+
+ volatile __le16 SoftTimer0; /* 0x74 */
+ volatile __le16 SoftTimer1;
+
+ volatile u8 CFGA; /* 0x78 */
+ volatile u8 CFGB;
+ volatile u8 CFGC;
+ volatile u8 CFGD;
+
+ volatile __le16 DCFG; /* 0x7C */
+ volatile __le16 MCFG;
+
+ volatile u8 TBIST; /* 0x80 */
+ volatile u8 RBIST;
+ volatile u8 PMCPORT;
+ volatile u8 STICKHW;
+
+ volatile u8 MIBCR; /* 0x84 */
+ volatile u8 reserved_85;
+ volatile u8 rev_id;
+ volatile u8 PORSTS;
+
+ volatile __le32 MIBData; /* 0x88 */
+
+ volatile __le16 EEWrData;
+
+ volatile u8 reserved_8E;
+ volatile u8 BPMDWr;
+ volatile u8 BPCMD;
+ volatile u8 BPMDRd;
+
+ volatile u8 EECHKSUM; /* 0x92 */
+ volatile u8 EECSR;
+
+ volatile __le16 EERdData; /* 0x94 */
+ volatile u8 EADDR;
+ volatile u8 EMBCMD;
+
+
+ volatile u8 JMPSR0; /* 0x98 */
+ volatile u8 JMPSR1;
+ volatile u8 JMPSR2;
+ volatile u8 JMPSR3;
+ volatile u8 CHIPGSR; /* 0x9C */
+ volatile u8 TESTCFG;
+ volatile u8 DEBUG;
+ volatile u8 CHIPGCR;
+
+ volatile __le16 WOLCRSet; /* 0xA0 */
+ volatile u8 PWCFGSet;
+ volatile u8 WOLCFGSet;
+
+ volatile __le16 WOLCRClr; /* 0xA4 */
+ volatile u8 PWCFGCLR;
+ volatile u8 WOLCFGClr;
+
+ volatile __le16 WOLSRSet; /* 0xA8 */
+ volatile __le16 reserved_AA;
+
+ volatile __le16 WOLSRClr; /* 0xAC */
+ volatile __le16 reserved_AE;
+
+ volatile __le16 PatternCRC[8]; /* 0xB0 */
+ volatile __le32 ByteMask[4][4]; /* 0xC0 */
+};
+
+
+enum hw_mib {
+ HW_MIB_ifRxAllPkts = 0,
+ HW_MIB_ifRxOkPkts,
+ HW_MIB_ifTxOkPkts,
+ HW_MIB_ifRxErrorPkts,
+ HW_MIB_ifRxRuntOkPkt,
+ HW_MIB_ifRxRuntErrPkt,
+ HW_MIB_ifRx64Pkts,
+ HW_MIB_ifTx64Pkts,
+ HW_MIB_ifRx65To127Pkts,
+ HW_MIB_ifTx65To127Pkts,
+ HW_MIB_ifRx128To255Pkts,
+ HW_MIB_ifTx128To255Pkts,
+ HW_MIB_ifRx256To511Pkts,
+ HW_MIB_ifTx256To511Pkts,
+ HW_MIB_ifRx512To1023Pkts,
+ HW_MIB_ifTx512To1023Pkts,
+ HW_MIB_ifRx1024To1518Pkts,
+ HW_MIB_ifTx1024To1518Pkts,
+ HW_MIB_ifTxEtherCollisions,
+ HW_MIB_ifRxPktCRCE,
+ HW_MIB_ifRxJumboPkts,
+ HW_MIB_ifTxJumboPkts,
+ HW_MIB_ifRxMacControlFrames,
+ HW_MIB_ifTxMacControlFrames,
+ HW_MIB_ifRxPktFAE,
+ HW_MIB_ifRxLongOkPkt,
+ HW_MIB_ifRxLongPktErrPkt,
+ HW_MIB_ifTXSQEErrors,
+ HW_MIB_ifRxNobuf,
+ HW_MIB_ifRxSymbolErrors,
+ HW_MIB_ifInRangeLengthErrors,
+ HW_MIB_ifLateCollisions,
+ HW_MIB_SIZE
+};
+
+enum chip_type {
+ CHIP_TYPE_VT6110 = 1,
+};
+
+struct velocity_info_tbl {
+ enum chip_type chip_id;
+ const char *name;
+ int txqueue;
+ u32 flags;
+};
+
+#define mac_hw_mibs_init(regs) {\
+ BYTE_REG_BITS_ON(MIBCR_MIBFRZ,&((regs)->MIBCR));\
+ BYTE_REG_BITS_ON(MIBCR_MIBCLR,&((regs)->MIBCR));\
+ do {}\
+ while (BYTE_REG_BITS_IS_ON(MIBCR_MIBCLR,&((regs)->MIBCR)));\
+ BYTE_REG_BITS_OFF(MIBCR_MIBFRZ,&((regs)->MIBCR));\
+}
+
+#define mac_read_isr(regs) readl(&((regs)->ISR))
+#define mac_write_isr(regs, x) writel((x),&((regs)->ISR))
+#define mac_clear_isr(regs) writel(0xffffffffL,&((regs)->ISR))
+
+#define mac_write_int_mask(mask, regs) writel((mask),&((regs)->IMR));
+#define mac_disable_int(regs) writel(CR0_GINTMSK1,&((regs)->CR0Clr))
+#define mac_enable_int(regs) writel(CR0_GINTMSK1,&((regs)->CR0Set))
+
+#define mac_set_dma_length(regs, n) {\
+ BYTE_REG_BITS_SET((n),0x07,&((regs)->DCFG));\
+}
+
+#define mac_set_rx_thresh(regs, n) {\
+ BYTE_REG_BITS_SET((n),(MCFG_RFT0|MCFG_RFT1),&((regs)->MCFG));\
+}
+
+#define mac_rx_queue_run(regs) {\
+ writeb(TRDCSR_RUN, &((regs)->RDCSRSet));\
+}
+
+#define mac_rx_queue_wake(regs) {\
+ writeb(TRDCSR_WAK, &((regs)->RDCSRSet));\
+}
+
+#define mac_tx_queue_run(regs, n) {\
+ writew(TRDCSR_RUN<<((n)*4),&((regs)->TDCSRSet));\
+}
+
+#define mac_tx_queue_wake(regs, n) {\
+ writew(TRDCSR_WAK<<(n*4),&((regs)->TDCSRSet));\
+}
+
+static inline void mac_eeprom_reload(struct mac_regs __iomem * regs) {
+ int i=0;
+
+ BYTE_REG_BITS_ON(EECSR_RELOAD,&(regs->EECSR));
+ do {
+ udelay(10);
+ if (i++>0x1000)
+ break;
+ } while (BYTE_REG_BITS_IS_ON(EECSR_RELOAD,&(regs->EECSR)));
+}
+
+/*
+ * Header for WOL definitions. Used to compute hashes
+ */
+
+typedef u8 MCAM_ADDR[ETH_ALEN];
+
+struct arp_packet {
+ u8 dest_mac[ETH_ALEN];
+ u8 src_mac[ETH_ALEN];
+ __be16 type;
+ __be16 ar_hrd;
+ __be16 ar_pro;
+ u8 ar_hln;
+ u8 ar_pln;
+ __be16 ar_op;
+ u8 ar_sha[ETH_ALEN];
+ u8 ar_sip[4];
+ u8 ar_tha[ETH_ALEN];
+ u8 ar_tip[4];
+} __packed;
+
+struct _magic_packet {
+ u8 dest_mac[6];
+ u8 src_mac[6];
+ __be16 type;
+ u8 MAC[16][6];
+ u8 password[6];
+} __packed;
+
+/*
+ * Store for chip context when saving and restoring status. Not
+ * all fields are saved/restored currently.
+ */
+
+struct velocity_context {
+ u8 mac_reg[256];
+ MCAM_ADDR cam_addr[MCAM_SIZE];
+ u16 vcam[VCAM_SIZE];
+ u32 cammask[2];
+ u32 patcrc[2];
+ u32 pattern[8];
+};
+
+/*
+ * Registers in the MII (offset unit is WORD)
+ */
+
+// Marvell 88E1000/88E1000S
+#define MII_REG_PSCR 0x10 // PHY specific control register
+
+//
+// Bits in the Silicon revision register
+//
+
+#define TCSR_ECHODIS 0x2000 //
+#define AUXCR_MDPPS 0x0004 //
+
+// Bits in the PLED register
+#define PLED_LALBE 0x0004 //
+
+// Marvell 88E1000/88E1000S Bits in the PHY specific control register (10h)
+#define PSCR_ACRSTX 0x0800 // Assert CRS on Transmit
+
+#define PHYID_CICADA_CS8201 0x000FC410UL
+#define PHYID_VT3216_32BIT 0x000FC610UL
+#define PHYID_VT3216_64BIT 0x000FC600UL
+#define PHYID_MARVELL_1000 0x01410C50UL
+#define PHYID_MARVELL_1000S 0x01410C40UL
+
+#define PHYID_REV_ID_MASK 0x0000000FUL
+
+#define PHYID_GET_PHY_ID(i) ((i) & ~PHYID_REV_ID_MASK)
+
+#define MII_REG_BITS_ON(x,i,p) do {\
+ u16 w;\
+ velocity_mii_read((p),(i),&(w));\
+ (w)|=(x);\
+ velocity_mii_write((p),(i),(w));\
+} while (0)
+
+#define MII_REG_BITS_OFF(x,i,p) do {\
+ u16 w;\
+ velocity_mii_read((p),(i),&(w));\
+ (w)&=(~(x));\
+ velocity_mii_write((p),(i),(w));\
+} while (0)
+
+#define MII_REG_BITS_IS_ON(x,i,p) ({\
+ u16 w;\
+ velocity_mii_read((p),(i),&(w));\
+ ((int) ((w) & (x)));})
+
+#define MII_GET_PHY_ID(p) ({\
+ u32 id;\
+ velocity_mii_read((p),MII_PHYSID2,(u16 *) &id);\
+ velocity_mii_read((p),MII_PHYSID1,((u16 *) &id)+1);\
+ (id);})
+
+/*
+ * Inline debug routine
+ */
+
+
+enum velocity_msg_level {
+ MSG_LEVEL_ERR = 0, //Errors that will cause abnormal operation.
+ MSG_LEVEL_NOTICE = 1, //Some errors need users to be notified.
+ MSG_LEVEL_INFO = 2, //Normal message.
+ MSG_LEVEL_VERBOSE = 3, //Will report all trival errors.
+ MSG_LEVEL_DEBUG = 4 //Only for debug purpose.
+};
+
+#ifdef VELOCITY_DEBUG
+#define ASSERT(x) { \
+ if (!(x)) { \
+ printk(KERN_ERR "assertion %s failed: file %s line %d\n", #x,\
+ __func__, __LINE__);\
+ BUG(); \
+ }\
+}
+#define VELOCITY_DBG(p,args...) printk(p, ##args)
+#else
+#define ASSERT(x)
+#define VELOCITY_DBG(x)
+#endif
+
+#define VELOCITY_PRT(l, p, args...) do {if (l<=msglevel) printk( p ,##args);} while (0)
+
+#define VELOCITY_PRT_CAMMASK(p,t) {\
+ int i;\
+ if ((t)==VELOCITY_MULTICAST_CAM) {\
+ for (i=0;i<(MCAM_SIZE/8);i++)\
+ printk("%02X",(p)->mCAMmask[i]);\
+ }\
+ else {\
+ for (i=0;i<(VCAM_SIZE/8);i++)\
+ printk("%02X",(p)->vCAMmask[i]);\
+ }\
+ printk("\n");\
+}
+
+
+
+#define VELOCITY_WOL_MAGIC 0x00000000UL
+#define VELOCITY_WOL_PHY 0x00000001UL
+#define VELOCITY_WOL_ARP 0x00000002UL
+#define VELOCITY_WOL_UCAST 0x00000004UL
+#define VELOCITY_WOL_BCAST 0x00000010UL
+#define VELOCITY_WOL_MCAST 0x00000020UL
+#define VELOCITY_WOL_MAGIC_SEC 0x00000040UL
+
+/*
+ * Flags for options
+ */
+
+#define VELOCITY_FLAGS_TAGGING 0x00000001UL
+#define VELOCITY_FLAGS_RX_CSUM 0x00000004UL
+#define VELOCITY_FLAGS_IP_ALIGN 0x00000008UL
+#define VELOCITY_FLAGS_VAL_PKT_LEN 0x00000010UL
+
+#define VELOCITY_FLAGS_FLOW_CTRL 0x01000000UL
+
+/*
+ * Flags for driver status
+ */
+
+#define VELOCITY_FLAGS_OPENED 0x00010000UL
+#define VELOCITY_FLAGS_VMNS_CONNECTED 0x00020000UL
+#define VELOCITY_FLAGS_VMNS_COMMITTED 0x00040000UL
+#define VELOCITY_FLAGS_WOL_ENABLED 0x00080000UL
+
+/*
+ * Flags for MII status
+ */
+
+#define VELOCITY_LINK_FAIL 0x00000001UL
+#define VELOCITY_SPEED_10 0x00000002UL
+#define VELOCITY_SPEED_100 0x00000004UL
+#define VELOCITY_SPEED_1000 0x00000008UL
+#define VELOCITY_DUPLEX_FULL 0x00000010UL
+#define VELOCITY_AUTONEG_ENABLE 0x00000020UL
+#define VELOCITY_FORCED_BY_EEPROM 0x00000040UL
+
+/*
+ * For velocity_set_media_duplex
+ */
+
+#define VELOCITY_LINK_CHANGE 0x00000001UL
+
+enum speed_opt {
+ SPD_DPX_AUTO = 0,
+ SPD_DPX_100_HALF = 1,
+ SPD_DPX_100_FULL = 2,
+ SPD_DPX_10_HALF = 3,
+ SPD_DPX_10_FULL = 4,
+ SPD_DPX_1000_FULL = 5
+};
+
+enum velocity_init_type {
+ VELOCITY_INIT_COLD = 0,
+ VELOCITY_INIT_RESET,
+ VELOCITY_INIT_WOL
+};
+
+enum velocity_flow_cntl_type {
+ FLOW_CNTL_DEFAULT = 1,
+ FLOW_CNTL_TX,
+ FLOW_CNTL_RX,
+ FLOW_CNTL_TX_RX,
+ FLOW_CNTL_DISABLE,
+};
+
+struct velocity_opt {
+ int numrx; /* Number of RX descriptors */
+ int numtx; /* Number of TX descriptors */
+ enum speed_opt spd_dpx; /* Media link mode */
+
+ int DMA_length; /* DMA length */
+ int rx_thresh; /* RX_THRESH */
+ int flow_cntl;
+ int wol_opts; /* Wake on lan options */
+ int td_int_count;
+ int int_works;
+ int rx_bandwidth_hi;
+ int rx_bandwidth_lo;
+ int rx_bandwidth_en;
+ int rxqueue_timer;
+ int txqueue_timer;
+ int tx_intsup;
+ int rx_intsup;
+ u32 flags;
+};
+
+#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->tx.used[(q)]))
+
+#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
+
+struct velocity_info {
+ struct pci_dev *pdev;
+ struct net_device *dev;
+
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u8 ip_addr[4];
+ enum chip_type chip_id;
+
+ struct mac_regs __iomem * mac_regs;
+ unsigned long memaddr;
+ unsigned long ioaddr;
+
+ struct tx_info {
+ int numq;
+
+ /* FIXME: the locality of the data seems rather poor. */
+ int used[TX_QUEUE_NO];
+ int curr[TX_QUEUE_NO];
+ int tail[TX_QUEUE_NO];
+ struct tx_desc *rings[TX_QUEUE_NO];
+ struct velocity_td_info *infos[TX_QUEUE_NO];
+ dma_addr_t pool_dma[TX_QUEUE_NO];
+ } tx;
+
+ struct rx_info {
+ int buf_sz;
+
+ int dirty;
+ int curr;
+ u32 filled;
+ struct rx_desc *ring;
+ struct velocity_rd_info *info; /* It's an array */
+ dma_addr_t pool_dma;
+ } rx;
+
+ u32 mib_counter[MAX_HW_MIB_COUNTER];
+ struct velocity_opt options;
+
+ u32 int_mask;
+
+ u32 flags;
+
+ u32 mii_status;
+ u32 phy_id;
+ int multicast_limit;
+
+ u8 vCAMmask[(VCAM_SIZE / 8)];
+ u8 mCAMmask[(MCAM_SIZE / 8)];
+
+ spinlock_t lock;
+
+ int wol_opts;
+ u8 wol_passwd[6];
+
+ struct velocity_context context;
+
+ u32 ticks;
+
+ u8 rev_id;
+
+ struct napi_struct napi;
+};
+
+/**
+ * velocity_get_ip - find an IP address for the device
+ * @vptr: Velocity to query
+ *
+ * Dig out an IP address for this interface so that we can
+ * configure wakeup with WOL for ARP. If there are multiple IP
+ * addresses on this chain then we use the first - multi-IP WOL is not
+ * supported.
+ *
+ */
+
+static inline int velocity_get_ip(struct velocity_info *vptr)
+{
+ struct in_device *in_dev;
+ struct in_ifaddr *ifa;
+ int res = -ENOENT;
+
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(vptr->dev);
+ if (in_dev != NULL) {
+ ifa = (struct in_ifaddr *) in_dev->ifa_list;
+ if (ifa != NULL) {
+ memcpy(vptr->ip_addr, &ifa->ifa_address, 4);
+ res = 0;
+ }
+ }
+ rcu_read_unlock();
+ return res;
+}
+
+/**
+ * velocity_update_hw_mibs - fetch MIB counters from chip
+ * @vptr: velocity to update
+ *
+ * The velocity hardware keeps certain counters in the hardware
+ * side. We need to read these when the user asks for statistics
+ * or when they overflow (causing an interrupt). The read of the
+ * statistic clears it, so we keep running master counters in user
+ * space.
+ */
+
+static inline void velocity_update_hw_mibs(struct velocity_info *vptr)
+{
+ u32 tmp;
+ int i;
+ BYTE_REG_BITS_ON(MIBCR_MIBFLSH, &(vptr->mac_regs->MIBCR));
+
+ while (BYTE_REG_BITS_IS_ON(MIBCR_MIBFLSH, &(vptr->mac_regs->MIBCR)));
+
+ BYTE_REG_BITS_ON(MIBCR_MPTRINI, &(vptr->mac_regs->MIBCR));
+ for (i = 0; i < HW_MIB_SIZE; i++) {
+ tmp = readl(&(vptr->mac_regs->MIBData)) & 0x00FFFFFFUL;
+ vptr->mib_counter[i] += tmp;
+ }
+}
+
+/**
+ * init_flow_control_register - set up flow control
+ * @vptr: velocity to configure
+ *
+ * Configure the flow control registers for this velocity device.
+ */
+
+static inline void init_flow_control_register(struct velocity_info *vptr)
+{
+ struct mac_regs __iomem * regs = vptr->mac_regs;
+
+ /* Set {XHITH1, XHITH0, XLTH1, XLTH0} in FlowCR1 to {1, 0, 1, 1}
+ depend on RD=64, and Turn on XNOEN in FlowCR1 */
+ writel((CR0_XONEN | CR0_XHITH1 | CR0_XLTH1 | CR0_XLTH0), &regs->CR0Set);
+ writel((CR0_FDXTFCEN | CR0_FDXRFCEN | CR0_HDXFCEN | CR0_XHITH0), &regs->CR0Clr);
+
+ /* Set TxPauseTimer to 0xFFFF */
+ writew(0xFFFF, &regs->tx_pause_timer);
+
+ /* Initialize RBRDU to Rx buffer count. */
+ writew(vptr->options.numrx, &regs->RBRDU);
+}
+
+
+#endif
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
new file mode 100644
index 000000000000..d5a826063a82
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -0,0 +1,36 @@
+#
+# Xilink device configuration
+#
+
+config NET_VENDOR_XILINX
+ bool "Xilinx devices"
+ default y
+ depends on PPC || PPC32 || MICROBLAZE
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Xilinx devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_XILINX
+
+config XILINX_EMACLITE
+ tristate "Xilinx 10/100 Ethernet Lite support"
+ depends on (PPC32 || MICROBLAZE)
+ select PHYLIB
+ ---help---
+ This driver supports the 10/100 Ethernet Lite from Xilinx.
+
+config XILINX_LL_TEMAC
+ tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
+ depends on (PPC || MICROBLAZE)
+ select PHYLIB
+ ---help---
+ This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
+ core used in Xilinx Spartan and Virtex FPGAs
+
+endif # NET_VENDOR_XILINX
diff --git a/drivers/net/ethernet/xilinx/Makefile b/drivers/net/ethernet/xilinx/Makefile
new file mode 100644
index 000000000000..5feac734ea45
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Xilink network device drivers.
+#
+
+ll_temac-objs := ll_temac_main.o ll_temac_mdio.o
+obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o
+obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
new file mode 100644
index 000000000000..522abe2ff25a
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -0,0 +1,385 @@
+
+#ifndef XILINX_LL_TEMAC_H
+#define XILINX_LL_TEMAC_H
+
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/spinlock.h>
+
+#ifdef CONFIG_PPC_DCR
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+#endif
+
+/* packet size info */
+#define XTE_HDR_SIZE 14 /* size of Ethernet header */
+#define XTE_TRL_SIZE 4 /* size of Ethernet trailer (FCS) */
+#define XTE_JUMBO_MTU 9000
+#define XTE_MAX_JUMBO_FRAME_SIZE (XTE_JUMBO_MTU + XTE_HDR_SIZE + XTE_TRL_SIZE)
+
+/* Configuration options */
+
+/* Accept all incoming packets.
+ * This option defaults to disabled (cleared) */
+#define XTE_OPTION_PROMISC (1 << 0)
+/* Jumbo frame support for Tx & Rx.
+ * This option defaults to disabled (cleared) */
+#define XTE_OPTION_JUMBO (1 << 1)
+/* VLAN Rx & Tx frame support.
+ * This option defaults to disabled (cleared) */
+#define XTE_OPTION_VLAN (1 << 2)
+/* Enable recognition of flow control frames on Rx
+ * This option defaults to enabled (set) */
+#define XTE_OPTION_FLOW_CONTROL (1 << 4)
+/* Strip FCS and PAD from incoming frames.
+ * Note: PAD from VLAN frames is not stripped.
+ * This option defaults to disabled (set) */
+#define XTE_OPTION_FCS_STRIP (1 << 5)
+/* Generate FCS field and add PAD automatically for outgoing frames.
+ * This option defaults to enabled (set) */
+#define XTE_OPTION_FCS_INSERT (1 << 6)
+/* Enable Length/Type error checking for incoming frames. When this option is
+set, the MAC will filter frames that have a mismatched type/length field
+and if XTE_OPTION_REPORT_RXERR is set, the user is notified when these
+types of frames are encountered. When this option is cleared, the MAC will
+allow these types of frames to be received.
+This option defaults to enabled (set) */
+#define XTE_OPTION_LENTYPE_ERR (1 << 7)
+/* Enable the transmitter.
+ * This option defaults to enabled (set) */
+#define XTE_OPTION_TXEN (1 << 11)
+/* Enable the receiver
+* This option defaults to enabled (set) */
+#define XTE_OPTION_RXEN (1 << 12)
+
+/* Default options set when device is initialized or reset */
+#define XTE_OPTION_DEFAULTS \
+ (XTE_OPTION_TXEN | \
+ XTE_OPTION_FLOW_CONTROL | \
+ XTE_OPTION_RXEN)
+
+/* XPS_LL_TEMAC SDMA registers definition */
+
+#define TX_NXTDESC_PTR 0x00 /* r */
+#define TX_CURBUF_ADDR 0x01 /* r */
+#define TX_CURBUF_LENGTH 0x02 /* r */
+#define TX_CURDESC_PTR 0x03 /* rw */
+#define TX_TAILDESC_PTR 0x04 /* rw */
+#define TX_CHNL_CTRL 0x05 /* rw */
+/*
+ 0:7 24:31 IRQTimeout
+ 8:15 16:23 IRQCount
+ 16:20 11:15 Reserved
+ 21 10 0
+ 22 9 UseIntOnEnd
+ 23 8 LdIRQCnt
+ 24 7 IRQEn
+ 25:28 3:6 Reserved
+ 29 2 IrqErrEn
+ 30 1 IrqDlyEn
+ 31 0 IrqCoalEn
+*/
+#define CHNL_CTRL_IRQ_IOE (1 << 9)
+#define CHNL_CTRL_IRQ_EN (1 << 7)
+#define CHNL_CTRL_IRQ_ERR_EN (1 << 2)
+#define CHNL_CTRL_IRQ_DLY_EN (1 << 1)
+#define CHNL_CTRL_IRQ_COAL_EN (1 << 0)
+#define TX_IRQ_REG 0x06 /* rw */
+/*
+ 0:7 24:31 DltTmrValue
+ 8:15 16:23 ClscCntrValue
+ 16:17 14:15 Reserved
+ 18:21 10:13 ClscCnt
+ 22:23 8:9 DlyCnt
+ 24:28 3::7 Reserved
+ 29 2 ErrIrq
+ 30 1 DlyIrq
+ 31 0 CoalIrq
+ */
+#define TX_CHNL_STS 0x07 /* r */
+/*
+ 0:9 22:31 Reserved
+ 10 21 TailPErr
+ 11 20 CmpErr
+ 12 19 AddrErr
+ 13 18 NxtPErr
+ 14 17 CurPErr
+ 15 16 BsyWr
+ 16:23 8:15 Reserved
+ 24 7 Error
+ 25 6 IOE
+ 26 5 SOE
+ 27 4 Cmplt
+ 28 3 SOP
+ 29 2 EOP
+ 30 1 EngBusy
+ 31 0 Reserved
+*/
+
+#define RX_NXTDESC_PTR 0x08 /* r */
+#define RX_CURBUF_ADDR 0x09 /* r */
+#define RX_CURBUF_LENGTH 0x0a /* r */
+#define RX_CURDESC_PTR 0x0b /* rw */
+#define RX_TAILDESC_PTR 0x0c /* rw */
+#define RX_CHNL_CTRL 0x0d /* rw */
+/*
+ 0:7 24:31 IRQTimeout
+ 8:15 16:23 IRQCount
+ 16:20 11:15 Reserved
+ 21 10 0
+ 22 9 UseIntOnEnd
+ 23 8 LdIRQCnt
+ 24 7 IRQEn
+ 25:28 3:6 Reserved
+ 29 2 IrqErrEn
+ 30 1 IrqDlyEn
+ 31 0 IrqCoalEn
+ */
+#define RX_IRQ_REG 0x0e /* rw */
+#define IRQ_COAL (1 << 0)
+#define IRQ_DLY (1 << 1)
+#define IRQ_ERR (1 << 2)
+#define IRQ_DMAERR (1 << 7) /* this is not documented ??? */
+/*
+ 0:7 24:31 DltTmrValue
+ 8:15 16:23 ClscCntrValue
+ 16:17 14:15 Reserved
+ 18:21 10:13 ClscCnt
+ 22:23 8:9 DlyCnt
+ 24:28 3::7 Reserved
+*/
+#define RX_CHNL_STS 0x0f /* r */
+#define CHNL_STS_ENGBUSY (1 << 1)
+#define CHNL_STS_EOP (1 << 2)
+#define CHNL_STS_SOP (1 << 3)
+#define CHNL_STS_CMPLT (1 << 4)
+#define CHNL_STS_SOE (1 << 5)
+#define CHNL_STS_IOE (1 << 6)
+#define CHNL_STS_ERR (1 << 7)
+
+#define CHNL_STS_BSYWR (1 << 16)
+#define CHNL_STS_CURPERR (1 << 17)
+#define CHNL_STS_NXTPERR (1 << 18)
+#define CHNL_STS_ADDRERR (1 << 19)
+#define CHNL_STS_CMPERR (1 << 20)
+#define CHNL_STS_TAILERR (1 << 21)
+/*
+ 0:9 22:31 Reserved
+ 10 21 TailPErr
+ 11 20 CmpErr
+ 12 19 AddrErr
+ 13 18 NxtPErr
+ 14 17 CurPErr
+ 15 16 BsyWr
+ 16:23 8:15 Reserved
+ 24 7 Error
+ 25 6 IOE
+ 26 5 SOE
+ 27 4 Cmplt
+ 28 3 SOP
+ 29 2 EOP
+ 30 1 EngBusy
+ 31 0 Reserved
+*/
+
+#define DMA_CONTROL_REG 0x10 /* rw */
+#define DMA_CONTROL_RST (1 << 0)
+#define DMA_TAIL_ENABLE (1 << 2)
+
+/* XPS_LL_TEMAC direct registers definition */
+
+#define XTE_RAF0_OFFSET 0x00
+#define RAF0_RST (1 << 0)
+#define RAF0_MCSTREJ (1 << 1)
+#define RAF0_BCSTREJ (1 << 2)
+#define XTE_TPF0_OFFSET 0x04
+#define XTE_IFGP0_OFFSET 0x08
+#define XTE_ISR0_OFFSET 0x0c
+#define ISR0_HARDACSCMPLT (1 << 0)
+#define ISR0_AUTONEG (1 << 1)
+#define ISR0_RXCMPLT (1 << 2)
+#define ISR0_RXREJ (1 << 3)
+#define ISR0_RXFIFOOVR (1 << 4)
+#define ISR0_TXCMPLT (1 << 5)
+#define ISR0_RXDCMLCK (1 << 6)
+
+#define XTE_IPR0_OFFSET 0x10
+#define XTE_IER0_OFFSET 0x14
+
+#define XTE_MSW0_OFFSET 0x20
+#define XTE_LSW0_OFFSET 0x24
+#define XTE_CTL0_OFFSET 0x28
+#define XTE_RDY0_OFFSET 0x2c
+
+#define XTE_RSE_MIIM_RR_MASK 0x0002
+#define XTE_RSE_MIIM_WR_MASK 0x0004
+#define XTE_RSE_CFG_RR_MASK 0x0020
+#define XTE_RSE_CFG_WR_MASK 0x0040
+#define XTE_RDY0_HARD_ACS_RDY_MASK (0x10000)
+
+/* XPS_LL_TEMAC indirect registers offset definition */
+
+#define XTE_RXC0_OFFSET 0x00000200 /* Rx configuration word 0 */
+#define XTE_RXC1_OFFSET 0x00000240 /* Rx configuration word 1 */
+#define XTE_RXC1_RXRST_MASK (1 << 31) /* Receiver reset */
+#define XTE_RXC1_RXJMBO_MASK (1 << 30) /* Jumbo frame enable */
+#define XTE_RXC1_RXFCS_MASK (1 << 29) /* FCS not stripped */
+#define XTE_RXC1_RXEN_MASK (1 << 28) /* Receiver enable */
+#define XTE_RXC1_RXVLAN_MASK (1 << 27) /* VLAN enable */
+#define XTE_RXC1_RXHD_MASK (1 << 26) /* Half duplex */
+#define XTE_RXC1_RXLT_MASK (1 << 25) /* Length/type check disable */
+
+#define XTE_TXC_OFFSET 0x00000280 /* Tx configuration */
+#define XTE_TXC_TXRST_MASK (1 << 31) /* Transmitter reset */
+#define XTE_TXC_TXJMBO_MASK (1 << 30) /* Jumbo frame enable */
+#define XTE_TXC_TXFCS_MASK (1 << 29) /* Generate FCS */
+#define XTE_TXC_TXEN_MASK (1 << 28) /* Transmitter enable */
+#define XTE_TXC_TXVLAN_MASK (1 << 27) /* VLAN enable */
+#define XTE_TXC_TXHD_MASK (1 << 26) /* Half duplex */
+
+#define XTE_FCC_OFFSET 0x000002C0 /* Flow control config */
+#define XTE_FCC_RXFLO_MASK (1 << 29) /* Rx flow control enable */
+#define XTE_FCC_TXFLO_MASK (1 << 30) /* Tx flow control enable */
+
+#define XTE_EMCFG_OFFSET 0x00000300 /* EMAC configuration */
+#define XTE_EMCFG_LINKSPD_MASK 0xC0000000 /* Link speed */
+#define XTE_EMCFG_HOSTEN_MASK (1 << 26) /* Host interface enable */
+#define XTE_EMCFG_LINKSPD_10 0x00000000 /* 10 Mbit LINKSPD_MASK */
+#define XTE_EMCFG_LINKSPD_100 (1 << 30) /* 100 Mbit LINKSPD_MASK */
+#define XTE_EMCFG_LINKSPD_1000 (1 << 31) /* 1000 Mbit LINKSPD_MASK */
+
+#define XTE_GMIC_OFFSET 0x00000320 /* RGMII/SGMII config */
+#define XTE_MC_OFFSET 0x00000340 /* MDIO configuration */
+#define XTE_UAW0_OFFSET 0x00000380 /* Unicast address word 0 */
+#define XTE_UAW1_OFFSET 0x00000384 /* Unicast address word 1 */
+
+#define XTE_MAW0_OFFSET 0x00000388 /* Multicast addr word 0 */
+#define XTE_MAW1_OFFSET 0x0000038C /* Multicast addr word 1 */
+#define XTE_AFM_OFFSET 0x00000390 /* Promiscuous mode */
+#define XTE_AFM_EPPRM_MASK (1 << 31) /* Promiscuous mode enable */
+
+/* Interrupt Request status */
+#define XTE_TIS_OFFSET 0x000003A0
+#define TIS_FRIS (1 << 0)
+#define TIS_MRIS (1 << 1)
+#define TIS_MWIS (1 << 2)
+#define TIS_ARIS (1 << 3)
+#define TIS_AWIS (1 << 4)
+#define TIS_CRIS (1 << 5)
+#define TIS_CWIS (1 << 6)
+
+#define XTE_TIE_OFFSET 0x000003A4 /* Interrupt enable */
+
+/** MII Mamagement Control register (MGTCR) */
+#define XTE_MGTDR_OFFSET 0x000003B0 /* MII data */
+#define XTE_MIIMAI_OFFSET 0x000003B4 /* MII control */
+
+#define CNTLREG_WRITE_ENABLE_MASK 0x8000
+#define CNTLREG_EMAC1SEL_MASK 0x0400
+#define CNTLREG_ADDRESSCODE_MASK 0x03ff
+
+/* CDMAC descriptor status bit definitions */
+
+#define STS_CTRL_APP0_ERR (1 << 31)
+#define STS_CTRL_APP0_IRQONEND (1 << 30)
+/* undoccumented */
+#define STS_CTRL_APP0_STOPONEND (1 << 29)
+#define STS_CTRL_APP0_CMPLT (1 << 28)
+#define STS_CTRL_APP0_SOP (1 << 27)
+#define STS_CTRL_APP0_EOP (1 << 26)
+#define STS_CTRL_APP0_ENGBUSY (1 << 25)
+/* undocumented */
+#define STS_CTRL_APP0_ENGRST (1 << 24)
+
+#define TX_CONTROL_CALC_CSUM_MASK 1
+
+#define MULTICAST_CAM_TABLE_NUM 4
+
+/* TEMAC Synthesis features */
+#define TEMAC_FEATURE_RX_CSUM (1 << 0)
+#define TEMAC_FEATURE_TX_CSUM (1 << 1)
+
+/* TX/RX CURDESC_PTR points to first descriptor */
+/* TX/RX TAILDESC_PTR points to last descriptor in linked list */
+
+/**
+ * struct cdmac_bd - LocalLink buffer descriptor format
+ *
+ * app0 bits:
+ * 0 Error
+ * 1 IrqOnEnd generate an interrupt at completion of DMA op
+ * 2 reserved
+ * 3 completed Current descriptor completed
+ * 4 SOP TX - marks first desc/ RX marks first desct
+ * 5 EOP TX marks last desc/RX marks last desc
+ * 6 EngBusy DMA is processing
+ * 7 reserved
+ * 8:31 application specific
+ */
+struct cdmac_bd {
+ u32 next; /* Physical address of next buffer descriptor */
+ u32 phys;
+ u32 len;
+ u32 app0;
+ u32 app1; /* TX start << 16 | insert */
+ u32 app2; /* TX csum */
+ u32 app3;
+ u32 app4; /* skb for TX length for RX */
+};
+
+struct temac_local {
+ struct net_device *ndev;
+ struct device *dev;
+
+ /* Connection to PHY device */
+ struct phy_device *phy_dev; /* Pointer to PHY device */
+ struct device_node *phy_node;
+
+ /* MDIO bus data */
+ struct mii_bus *mii_bus; /* MII bus reference */
+ int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */
+
+ /* IO registers, dma functions and IRQs */
+ void __iomem *regs;
+ void __iomem *sdma_regs;
+#ifdef CONFIG_PPC_DCR
+ dcr_host_t sdma_dcrs;
+#endif
+ u32 (*dma_in)(struct temac_local *, int);
+ void (*dma_out)(struct temac_local *, int, u32);
+
+ int tx_irq;
+ int rx_irq;
+ int emac_num;
+
+ struct sk_buff **rx_skb;
+ spinlock_t rx_lock;
+ struct mutex indirect_mutex;
+ u32 options; /* Current options word */
+ int last_link;
+ unsigned int temac_features;
+
+ /* Buffer descriptors */
+ struct cdmac_bd *tx_bd_v;
+ dma_addr_t tx_bd_p;
+ struct cdmac_bd *rx_bd_v;
+ dma_addr_t rx_bd_p;
+ int tx_bd_ci;
+ int tx_bd_next;
+ int tx_bd_tail;
+ int rx_bd_ci;
+};
+
+/* xilinx_temac.c */
+u32 temac_ior(struct temac_local *lp, int offset);
+void temac_iow(struct temac_local *lp, int offset, u32 value);
+int temac_indirect_busywait(struct temac_local *lp);
+u32 temac_indirect_in32(struct temac_local *lp, int reg);
+void temac_indirect_out32(struct temac_local *lp, int reg, u32 value);
+
+
+/* xilinx_temac_mdio.c */
+int temac_mdio_setup(struct temac_local *lp, struct device_node *np);
+void temac_mdio_teardown(struct temac_local *lp);
+
+#endif /* XILINX_LL_TEMAC_H */
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
new file mode 100644
index 000000000000..570776edc01b
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -0,0 +1,1153 @@
+/*
+ * Driver for Xilinx TEMAC Ethernet device
+ *
+ * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
+ * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
+ * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
+ *
+ * This is a driver for the Xilinx ll_temac ipcore which is often used
+ * in the Virtex and Spartan series of chips.
+ *
+ * Notes:
+ * - The ll_temac hardware uses indirect access for many of the TEMAC
+ * registers, include the MDIO bus. However, indirect access to MDIO
+ * registers take considerably more clock cycles than to TEMAC registers.
+ * MDIO accesses are long, so threads doing them should probably sleep
+ * rather than busywait. However, since only one indirect access can be
+ * in progress at any given time, that means that *all* indirect accesses
+ * could end up sleeping (to wait for an MDIO access to complete).
+ * Fortunately none of the indirect accesses are on the 'hot' path for tx
+ * or rx, so this should be okay.
+ *
+ * TODO:
+ * - Factor out locallink DMA code into separate driver
+ * - Fix multicast assignment.
+ * - Fix support for hardware checksumming.
+ * - Testing. Lots and lots of testing.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/tcp.h> /* needed for sizeof(tcphdr) */
+#include <linux/udp.h> /* needed for sizeof(udphdr) */
+#include <linux/phy.h>
+#include <linux/in.h>
+#include <linux/io.h>
+#include <linux/ip.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+
+#include "ll_temac.h"
+
+#define TX_BD_NUM 64
+#define RX_BD_NUM 128
+
+/* ---------------------------------------------------------------------
+ * Low level register access functions
+ */
+
+u32 temac_ior(struct temac_local *lp, int offset)
+{
+ return in_be32((u32 *)(lp->regs + offset));
+}
+
+void temac_iow(struct temac_local *lp, int offset, u32 value)
+{
+ out_be32((u32 *) (lp->regs + offset), value);
+}
+
+int temac_indirect_busywait(struct temac_local *lp)
+{
+ long end = jiffies + 2;
+
+ while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
+ if (end - jiffies <= 0) {
+ WARN_ON(1);
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+ return 0;
+}
+
+/**
+ * temac_indirect_in32
+ *
+ * lp->indirect_mutex must be held when calling this function
+ */
+u32 temac_indirect_in32(struct temac_local *lp, int reg)
+{
+ u32 val;
+
+ if (temac_indirect_busywait(lp))
+ return -ETIMEDOUT;
+ temac_iow(lp, XTE_CTL0_OFFSET, reg);
+ if (temac_indirect_busywait(lp))
+ return -ETIMEDOUT;
+ val = temac_ior(lp, XTE_LSW0_OFFSET);
+
+ return val;
+}
+
+/**
+ * temac_indirect_out32
+ *
+ * lp->indirect_mutex must be held when calling this function
+ */
+void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
+{
+ if (temac_indirect_busywait(lp))
+ return;
+ temac_iow(lp, XTE_LSW0_OFFSET, value);
+ temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
+}
+
+/**
+ * temac_dma_in32 - Memory mapped DMA read, this function expects a
+ * register input that is based on DCR word addresses which
+ * are then converted to memory mapped byte addresses
+ */
+static u32 temac_dma_in32(struct temac_local *lp, int reg)
+{
+ return in_be32((u32 *)(lp->sdma_regs + (reg << 2)));
+}
+
+/**
+ * temac_dma_out32 - Memory mapped DMA read, this function expects a
+ * register input that is based on DCR word addresses which
+ * are then converted to memory mapped byte addresses
+ */
+static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
+{
+ out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value);
+}
+
+/* DMA register access functions can be DCR based or memory mapped.
+ * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
+ * memory mapped.
+ */
+#ifdef CONFIG_PPC_DCR
+
+/**
+ * temac_dma_dcr_in32 - DCR based DMA read
+ */
+static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
+{
+ return dcr_read(lp->sdma_dcrs, reg);
+}
+
+/**
+ * temac_dma_dcr_out32 - DCR based DMA write
+ */
+static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
+{
+ dcr_write(lp->sdma_dcrs, reg, value);
+}
+
+/**
+ * temac_dcr_setup - If the DMA is DCR based, then setup the address and
+ * I/O functions
+ */
+static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
+ struct device_node *np)
+{
+ unsigned int dcrs;
+
+ /* setup the dcr address mapping if it's in the device tree */
+
+ dcrs = dcr_resource_start(np, 0);
+ if (dcrs != 0) {
+ lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
+ lp->dma_in = temac_dma_dcr_in;
+ lp->dma_out = temac_dma_dcr_out;
+ dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
+ return 0;
+ }
+ /* no DCR in the device tree, indicate a failure */
+ return -1;
+}
+
+#else
+
+/*
+ * temac_dcr_setup - This is a stub for when DCR is not supported,
+ * such as with MicroBlaze
+ */
+static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
+ struct device_node *np)
+{
+ return -1;
+}
+
+#endif
+
+/**
+ * * temac_dma_bd_release - Release buffer descriptor rings
+ */
+static void temac_dma_bd_release(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ int i;
+
+ for (i = 0; i < RX_BD_NUM; i++) {
+ if (!lp->rx_skb[i])
+ break;
+ else {
+ dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
+ XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb(lp->rx_skb[i]);
+ }
+ }
+ if (lp->rx_bd_v)
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ lp->rx_bd_v, lp->rx_bd_p);
+ if (lp->tx_bd_v)
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ lp->tx_bd_v, lp->tx_bd_p);
+ if (lp->rx_skb)
+ kfree(lp->rx_skb);
+}
+
+/**
+ * temac_dma_bd_init - Setup buffer descriptor rings
+ */
+static int temac_dma_bd_init(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ struct sk_buff *skb;
+ int i;
+
+ lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
+ if (!lp->rx_skb) {
+ dev_err(&ndev->dev,
+ "can't allocate memory for DMA RX buffer\n");
+ goto out;
+ }
+ /* allocate the tx and rx ring buffer descriptors. */
+ /* returns a virtual address and a physical address. */
+ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ &lp->tx_bd_p, GFP_KERNEL);
+ if (!lp->tx_bd_v) {
+ dev_err(&ndev->dev,
+ "unable to allocate DMA TX buffer descriptors");
+ goto out;
+ }
+ lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ &lp->rx_bd_p, GFP_KERNEL);
+ if (!lp->rx_bd_v) {
+ dev_err(&ndev->dev,
+ "unable to allocate DMA RX buffer descriptors");
+ goto out;
+ }
+
+ memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
+ for (i = 0; i < TX_BD_NUM; i++) {
+ lp->tx_bd_v[i].next = lp->tx_bd_p +
+ sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
+ }
+
+ memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
+ for (i = 0; i < RX_BD_NUM; i++) {
+ lp->rx_bd_v[i].next = lp->rx_bd_p +
+ sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
+
+ skb = netdev_alloc_skb_ip_align(ndev,
+ XTE_MAX_JUMBO_FRAME_SIZE);
+
+ if (skb == 0) {
+ dev_err(&ndev->dev, "alloc_skb error %d\n", i);
+ goto out;
+ }
+ lp->rx_skb[i] = skb;
+ /* returns physical address of skb->data */
+ lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
+ skb->data,
+ XTE_MAX_JUMBO_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE;
+ lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
+ }
+
+ lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
+ CHNL_CTRL_IRQ_EN |
+ CHNL_CTRL_IRQ_DLY_EN |
+ CHNL_CTRL_IRQ_COAL_EN);
+ /* 0x10220483 */
+ /* 0x00100483 */
+ lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 |
+ CHNL_CTRL_IRQ_EN |
+ CHNL_CTRL_IRQ_DLY_EN |
+ CHNL_CTRL_IRQ_COAL_EN |
+ CHNL_CTRL_IRQ_IOE);
+ /* 0xff010283 */
+
+ lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
+ lp->dma_out(lp, RX_TAILDESC_PTR,
+ lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+ lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
+
+ return 0;
+
+out:
+ temac_dma_bd_release(ndev);
+ return -ENOMEM;
+}
+
+/* ---------------------------------------------------------------------
+ * net_device_ops
+ */
+
+static int temac_set_mac_address(struct net_device *ndev, void *address)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ if (address)
+ memcpy(ndev->dev_addr, address, ETH_ALEN);
+
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ random_ether_addr(ndev->dev_addr);
+
+ /* set up unicast MAC address filter set its mac address */
+ mutex_lock(&lp->indirect_mutex);
+ temac_indirect_out32(lp, XTE_UAW0_OFFSET,
+ (ndev->dev_addr[0]) |
+ (ndev->dev_addr[1] << 8) |
+ (ndev->dev_addr[2] << 16) |
+ (ndev->dev_addr[3] << 24));
+ /* There are reserved bits in EUAW1
+ * so don't affect them Set MAC bits [47:32] in EUAW1 */
+ temac_indirect_out32(lp, XTE_UAW1_OFFSET,
+ (ndev->dev_addr[4] & 0x000000ff) |
+ (ndev->dev_addr[5] << 8));
+ mutex_unlock(&lp->indirect_mutex);
+
+ return 0;
+}
+
+static int netdev_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ return temac_set_mac_address(ndev, addr->sa_data);
+}
+
+static void temac_set_multicast_list(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ u32 multi_addr_msw, multi_addr_lsw, val;
+ int i;
+
+ mutex_lock(&lp->indirect_mutex);
+ if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
+ netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
+ /*
+ * We must make the kernel realise we had to move
+ * into promisc mode or we start all out war on
+ * the cable. If it was a promisc request the
+ * flag is already set. If not we assert it.
+ */
+ ndev->flags |= IFF_PROMISC;
+ temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
+ dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
+ } else if (!netdev_mc_empty(ndev)) {
+ struct netdev_hw_addr *ha;
+
+ i = 0;
+ netdev_for_each_mc_addr(ha, ndev) {
+ if (i >= MULTICAST_CAM_TABLE_NUM)
+ break;
+ multi_addr_msw = ((ha->addr[3] << 24) |
+ (ha->addr[2] << 16) |
+ (ha->addr[1] << 8) |
+ (ha->addr[0]));
+ temac_indirect_out32(lp, XTE_MAW0_OFFSET,
+ multi_addr_msw);
+ multi_addr_lsw = ((ha->addr[5] << 8) |
+ (ha->addr[4]) | (i << 16));
+ temac_indirect_out32(lp, XTE_MAW1_OFFSET,
+ multi_addr_lsw);
+ i++;
+ }
+ } else {
+ val = temac_indirect_in32(lp, XTE_AFM_OFFSET);
+ temac_indirect_out32(lp, XTE_AFM_OFFSET,
+ val & ~XTE_AFM_EPPRM_MASK);
+ temac_indirect_out32(lp, XTE_MAW0_OFFSET, 0);
+ temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0);
+ dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
+ }
+ mutex_unlock(&lp->indirect_mutex);
+}
+
+struct temac_option {
+ int flg;
+ u32 opt;
+ u32 reg;
+ u32 m_or;
+ u32 m_and;
+} temac_options[] = {
+ /* Turn on jumbo packet support for both Rx and Tx */
+ {
+ .opt = XTE_OPTION_JUMBO,
+ .reg = XTE_TXC_OFFSET,
+ .m_or = XTE_TXC_TXJMBO_MASK,
+ },
+ {
+ .opt = XTE_OPTION_JUMBO,
+ .reg = XTE_RXC1_OFFSET,
+ .m_or =XTE_RXC1_RXJMBO_MASK,
+ },
+ /* Turn on VLAN packet support for both Rx and Tx */
+ {
+ .opt = XTE_OPTION_VLAN,
+ .reg = XTE_TXC_OFFSET,
+ .m_or =XTE_TXC_TXVLAN_MASK,
+ },
+ {
+ .opt = XTE_OPTION_VLAN,
+ .reg = XTE_RXC1_OFFSET,
+ .m_or =XTE_RXC1_RXVLAN_MASK,
+ },
+ /* Turn on FCS stripping on receive packets */
+ {
+ .opt = XTE_OPTION_FCS_STRIP,
+ .reg = XTE_RXC1_OFFSET,
+ .m_or =XTE_RXC1_RXFCS_MASK,
+ },
+ /* Turn on FCS insertion on transmit packets */
+ {
+ .opt = XTE_OPTION_FCS_INSERT,
+ .reg = XTE_TXC_OFFSET,
+ .m_or =XTE_TXC_TXFCS_MASK,
+ },
+ /* Turn on length/type field checking on receive packets */
+ {
+ .opt = XTE_OPTION_LENTYPE_ERR,
+ .reg = XTE_RXC1_OFFSET,
+ .m_or =XTE_RXC1_RXLT_MASK,
+ },
+ /* Turn on flow control */
+ {
+ .opt = XTE_OPTION_FLOW_CONTROL,
+ .reg = XTE_FCC_OFFSET,
+ .m_or =XTE_FCC_RXFLO_MASK,
+ },
+ /* Turn on flow control */
+ {
+ .opt = XTE_OPTION_FLOW_CONTROL,
+ .reg = XTE_FCC_OFFSET,
+ .m_or =XTE_FCC_TXFLO_MASK,
+ },
+ /* Turn on promiscuous frame filtering (all frames are received ) */
+ {
+ .opt = XTE_OPTION_PROMISC,
+ .reg = XTE_AFM_OFFSET,
+ .m_or =XTE_AFM_EPPRM_MASK,
+ },
+ /* Enable transmitter if not already enabled */
+ {
+ .opt = XTE_OPTION_TXEN,
+ .reg = XTE_TXC_OFFSET,
+ .m_or =XTE_TXC_TXEN_MASK,
+ },
+ /* Enable receiver? */
+ {
+ .opt = XTE_OPTION_RXEN,
+ .reg = XTE_RXC1_OFFSET,
+ .m_or =XTE_RXC1_RXEN_MASK,
+ },
+ {}
+};
+
+/**
+ * temac_setoptions
+ */
+static u32 temac_setoptions(struct net_device *ndev, u32 options)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ struct temac_option *tp = &temac_options[0];
+ int reg;
+
+ mutex_lock(&lp->indirect_mutex);
+ while (tp->opt) {
+ reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or;
+ if (options & tp->opt)
+ reg |= tp->m_or;
+ temac_indirect_out32(lp, tp->reg, reg);
+ tp++;
+ }
+ lp->options |= options;
+ mutex_unlock(&lp->indirect_mutex);
+
+ return 0;
+}
+
+/* Initialize temac */
+static void temac_device_reset(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ u32 timeout;
+ u32 val;
+
+ /* Perform a software reset */
+
+ /* 0x300 host enable bit ? */
+ /* reset PHY through control register ?:1 */
+
+ dev_dbg(&ndev->dev, "%s()\n", __func__);
+
+ mutex_lock(&lp->indirect_mutex);
+ /* Reset the receiver and wait for it to finish reset */
+ temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
+ timeout = 1000;
+ while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
+ udelay(1);
+ if (--timeout == 0) {
+ dev_err(&ndev->dev,
+ "temac_device_reset RX reset timeout!!\n");
+ break;
+ }
+ }
+
+ /* Reset the transmitter and wait for it to finish reset */
+ temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
+ timeout = 1000;
+ while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
+ udelay(1);
+ if (--timeout == 0) {
+ dev_err(&ndev->dev,
+ "temac_device_reset TX reset timeout!!\n");
+ break;
+ }
+ }
+
+ /* Disable the receiver */
+ val = temac_indirect_in32(lp, XTE_RXC1_OFFSET);
+ temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
+
+ /* Reset Local Link (DMA) */
+ lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
+ timeout = 1000;
+ while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
+ udelay(1);
+ if (--timeout == 0) {
+ dev_err(&ndev->dev,
+ "temac_device_reset DMA reset timeout!!\n");
+ break;
+ }
+ }
+ lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
+
+ if (temac_dma_bd_init(ndev)) {
+ dev_err(&ndev->dev,
+ "temac_device_reset descriptor allocation failed\n");
+ }
+
+ temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0);
+ temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0);
+ temac_indirect_out32(lp, XTE_TXC_OFFSET, 0);
+ temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
+
+ mutex_unlock(&lp->indirect_mutex);
+
+ /* Sync default options with HW
+ * but leave receiver and transmitter disabled. */
+ temac_setoptions(ndev,
+ lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
+
+ temac_set_mac_address(ndev, NULL);
+
+ /* Set address filter table */
+ temac_set_multicast_list(ndev);
+ if (temac_setoptions(ndev, lp->options))
+ dev_err(&ndev->dev, "Error setting TEMAC options\n");
+
+ /* Init Driver variable */
+ ndev->trans_start = jiffies; /* prevent tx timeout */
+}
+
+void temac_adjust_link(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ struct phy_device *phy = lp->phy_dev;
+ u32 mii_speed;
+ int link_state;
+
+ /* hash together the state values to decide if something has changed */
+ link_state = phy->speed | (phy->duplex << 1) | phy->link;
+
+ mutex_lock(&lp->indirect_mutex);
+ if (lp->last_link != link_state) {
+ mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET);
+ mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
+
+ switch (phy->speed) {
+ case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
+ case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
+ case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
+ }
+
+ /* Write new speed setting out to TEMAC */
+ temac_indirect_out32(lp, XTE_EMCFG_OFFSET, mii_speed);
+ lp->last_link = link_state;
+ phy_print_status(phy);
+ }
+ mutex_unlock(&lp->indirect_mutex);
+}
+
+static void temac_start_xmit_done(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ struct cdmac_bd *cur_p;
+ unsigned int stat = 0;
+
+ cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
+ stat = cur_p->app0;
+
+ while (stat & STS_CTRL_APP0_CMPLT) {
+ dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len,
+ DMA_TO_DEVICE);
+ if (cur_p->app4)
+ dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app3 = 0;
+ cur_p->app4 = 0;
+
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += cur_p->len;
+
+ lp->tx_bd_ci++;
+ if (lp->tx_bd_ci >= TX_BD_NUM)
+ lp->tx_bd_ci = 0;
+
+ cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
+ stat = cur_p->app0;
+ }
+
+ netif_wake_queue(ndev);
+}
+
+static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
+{
+ struct cdmac_bd *cur_p;
+ int tail;
+
+ tail = lp->tx_bd_tail;
+ cur_p = &lp->tx_bd_v[tail];
+
+ do {
+ if (cur_p->app0)
+ return NETDEV_TX_BUSY;
+
+ tail++;
+ if (tail >= TX_BD_NUM)
+ tail = 0;
+
+ cur_p = &lp->tx_bd_v[tail];
+ num_frag--;
+ } while (num_frag >= 0);
+
+ return 0;
+}
+
+static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ struct cdmac_bd *cur_p;
+ dma_addr_t start_p, tail_p;
+ int ii;
+ unsigned long num_frag;
+ skb_frag_t *frag;
+
+ num_frag = skb_shinfo(skb)->nr_frags;
+ frag = &skb_shinfo(skb)->frags[0];
+ start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
+ cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+
+ if (temac_check_tx_bd_space(lp, num_frag)) {
+ if (!netif_queue_stopped(ndev)) {
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ cur_p->app0 = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ unsigned int csum_start_off = skb_checksum_start_offset(skb);
+ unsigned int csum_index_off = csum_start_off + skb->csum_offset;
+
+ cur_p->app0 |= 1; /* TX Checksum Enabled */
+ cur_p->app1 = (csum_start_off << 16) | csum_index_off;
+ cur_p->app2 = 0; /* initial checksum seed */
+ }
+
+ cur_p->app0 |= STS_CTRL_APP0_SOP;
+ cur_p->len = skb_headlen(skb);
+ cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ cur_p->app4 = (unsigned long)skb;
+
+ for (ii = 0; ii < num_frag; ii++) {
+ lp->tx_bd_tail++;
+ if (lp->tx_bd_tail >= TX_BD_NUM)
+ lp->tx_bd_tail = 0;
+
+ cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ cur_p->phys = dma_map_single(ndev->dev.parent,
+ (void *)page_address(frag->page) +
+ frag->page_offset,
+ frag->size, DMA_TO_DEVICE);
+ cur_p->len = frag->size;
+ cur_p->app0 = 0;
+ frag++;
+ }
+ cur_p->app0 |= STS_CTRL_APP0_EOP;
+
+ tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
+ lp->tx_bd_tail++;
+ if (lp->tx_bd_tail >= TX_BD_NUM)
+ lp->tx_bd_tail = 0;
+
+ skb_tx_timestamp(skb);
+
+ /* Kick off the transfer */
+ lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
+
+ return NETDEV_TX_OK;
+}
+
+
+static void ll_temac_recv(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ struct sk_buff *skb, *new_skb;
+ unsigned int bdstat;
+ struct cdmac_bd *cur_p;
+ dma_addr_t tail_p;
+ int length;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->rx_lock, flags);
+
+ tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
+ cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
+
+ bdstat = cur_p->app0;
+ while ((bdstat & STS_CTRL_APP0_CMPLT)) {
+
+ skb = lp->rx_skb[lp->rx_bd_ci];
+ length = cur_p->app4 & 0x3FFF;
+
+ dma_unmap_single(ndev->dev.parent, cur_p->phys, length,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, length);
+ skb->dev = ndev;
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb_checksum_none_assert(skb);
+
+ /* if we're doing rx csum offload, set it up */
+ if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
+ (skb->protocol == __constant_htons(ETH_P_IP)) &&
+ (skb->len > 64)) {
+
+ skb->csum = cur_p->app3 & 0xFFFF;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+
+ if (!skb_defer_rx_timestamp(skb))
+ netif_rx(skb);
+
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += length;
+
+ new_skb = netdev_alloc_skb_ip_align(ndev,
+ XTE_MAX_JUMBO_FRAME_SIZE);
+
+ if (new_skb == 0) {
+ dev_err(&ndev->dev, "no memory for new sk_buff\n");
+ spin_unlock_irqrestore(&lp->rx_lock, flags);
+ return;
+ }
+
+ cur_p->app0 = STS_CTRL_APP0_IRQONEND;
+ cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
+ XTE_MAX_JUMBO_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ cur_p->len = XTE_MAX_JUMBO_FRAME_SIZE;
+ lp->rx_skb[lp->rx_bd_ci] = new_skb;
+
+ lp->rx_bd_ci++;
+ if (lp->rx_bd_ci >= RX_BD_NUM)
+ lp->rx_bd_ci = 0;
+
+ cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
+ bdstat = cur_p->app0;
+ }
+ lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
+
+ spin_unlock_irqrestore(&lp->rx_lock, flags);
+}
+
+static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
+{
+ struct net_device *ndev = _ndev;
+ struct temac_local *lp = netdev_priv(ndev);
+ unsigned int status;
+
+ status = lp->dma_in(lp, TX_IRQ_REG);
+ lp->dma_out(lp, TX_IRQ_REG, status);
+
+ if (status & (IRQ_COAL | IRQ_DLY))
+ temac_start_xmit_done(lp->ndev);
+ if (status & 0x080)
+ dev_err(&ndev->dev, "DMA error 0x%x\n", status);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
+{
+ struct net_device *ndev = _ndev;
+ struct temac_local *lp = netdev_priv(ndev);
+ unsigned int status;
+
+ /* Read and clear the status registers */
+ status = lp->dma_in(lp, RX_IRQ_REG);
+ lp->dma_out(lp, RX_IRQ_REG, status);
+
+ if (status & (IRQ_COAL | IRQ_DLY))
+ ll_temac_recv(lp->ndev);
+
+ return IRQ_HANDLED;
+}
+
+static int temac_open(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ int rc;
+
+ dev_dbg(&ndev->dev, "temac_open()\n");
+
+ if (lp->phy_node) {
+ lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
+ temac_adjust_link, 0, 0);
+ if (!lp->phy_dev) {
+ dev_err(lp->dev, "of_phy_connect() failed\n");
+ return -ENODEV;
+ }
+
+ phy_start(lp->phy_dev);
+ }
+
+ rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
+ if (rc)
+ goto err_tx_irq;
+ rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
+ if (rc)
+ goto err_rx_irq;
+
+ temac_device_reset(ndev);
+ return 0;
+
+ err_rx_irq:
+ free_irq(lp->tx_irq, ndev);
+ err_tx_irq:
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ lp->phy_dev = NULL;
+ dev_err(lp->dev, "request_irq() failed\n");
+ return rc;
+}
+
+static int temac_stop(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ dev_dbg(&ndev->dev, "temac_close()\n");
+
+ free_irq(lp->tx_irq, ndev);
+ free_irq(lp->rx_irq, ndev);
+
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ lp->phy_dev = NULL;
+
+ temac_dma_bd_release(ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void
+temac_poll_controller(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ disable_irq(lp->tx_irq);
+ disable_irq(lp->rx_irq);
+
+ ll_temac_rx_irq(lp->tx_irq, ndev);
+ ll_temac_tx_irq(lp->rx_irq, ndev);
+
+ enable_irq(lp->tx_irq);
+ enable_irq(lp->rx_irq);
+}
+#endif
+
+static const struct net_device_ops temac_netdev_ops = {
+ .ndo_open = temac_open,
+ .ndo_stop = temac_stop,
+ .ndo_start_xmit = temac_start_xmit,
+ .ndo_set_mac_address = netdev_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = temac_poll_controller,
+#endif
+};
+
+/* ---------------------------------------------------------------------
+ * SYSFS device attributes
+ */
+static ssize_t temac_show_llink_regs(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct temac_local *lp = netdev_priv(ndev);
+ int i, len = 0;
+
+ for (i = 0; i < 0x11; i++)
+ len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
+ (i % 8) == 7 ? "\n" : " ");
+ len += sprintf(buf + len, "\n");
+
+ return len;
+}
+
+static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
+
+static struct attribute *temac_device_attrs[] = {
+ &dev_attr_llink_regs.attr,
+ NULL,
+};
+
+static const struct attribute_group temac_attr_group = {
+ .attrs = temac_device_attrs,
+};
+
+static int __devinit temac_of_probe(struct platform_device *op)
+{
+ struct device_node *np;
+ struct temac_local *lp;
+ struct net_device *ndev;
+ const void *addr;
+ __be32 *p;
+ int size, rc = 0;
+
+ /* Init network device structure */
+ ndev = alloc_etherdev(sizeof(*lp));
+ if (!ndev) {
+ dev_err(&op->dev, "could not allocate device.\n");
+ return -ENOMEM;
+ }
+ ether_setup(ndev);
+ dev_set_drvdata(&op->dev, ndev);
+ SET_NETDEV_DEV(ndev, &op->dev);
+ ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
+ ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
+ ndev->netdev_ops = &temac_netdev_ops;
+#if 0
+ ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
+ ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
+ ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
+ ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
+ ndev->features |= NETIF_F_HW_VLAN_TX; /* Transmit VLAN hw accel */
+ ndev->features |= NETIF_F_HW_VLAN_RX; /* Receive VLAN hw acceleration */
+ ndev->features |= NETIF_F_HW_VLAN_FILTER; /* Receive VLAN filtering */
+ ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
+ ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
+ ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
+ ndev->features |= NETIF_F_LRO; /* large receive offload */
+#endif
+
+ /* setup temac private info structure */
+ lp = netdev_priv(ndev);
+ lp->ndev = ndev;
+ lp->dev = &op->dev;
+ lp->options = XTE_OPTION_DEFAULTS;
+ spin_lock_init(&lp->rx_lock);
+ mutex_init(&lp->indirect_mutex);
+
+ /* map device registers */
+ lp->regs = of_iomap(op->dev.of_node, 0);
+ if (!lp->regs) {
+ dev_err(&op->dev, "could not map temac regs.\n");
+ goto nodev;
+ }
+
+ /* Setup checksum offload, but default to off if not specified */
+ lp->temac_features = 0;
+ p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
+ if (p && be32_to_cpu(*p)) {
+ lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
+ /* Can checksum TCP/UDP over IPv4. */
+ ndev->features |= NETIF_F_IP_CSUM;
+ }
+ p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
+ if (p && be32_to_cpu(*p))
+ lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
+
+ /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
+ np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
+ if (!np) {
+ dev_err(&op->dev, "could not find DMA node\n");
+ goto err_iounmap;
+ }
+
+ /* Setup the DMA register accesses, could be DCR or memory mapped */
+ if (temac_dcr_setup(lp, op, np)) {
+
+ /* no DCR in the device tree, try non-DCR */
+ lp->sdma_regs = of_iomap(np, 0);
+ if (lp->sdma_regs) {
+ lp->dma_in = temac_dma_in32;
+ lp->dma_out = temac_dma_out32;
+ dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs);
+ } else {
+ dev_err(&op->dev, "unable to map DMA registers\n");
+ of_node_put(np);
+ goto err_iounmap;
+ }
+ }
+
+ lp->rx_irq = irq_of_parse_and_map(np, 0);
+ lp->tx_irq = irq_of_parse_and_map(np, 1);
+
+ of_node_put(np); /* Finished with the DMA node; drop the reference */
+
+ if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
+ dev_err(&op->dev, "could not determine irqs\n");
+ rc = -ENOMEM;
+ goto err_iounmap_2;
+ }
+
+
+ /* Retrieve the MAC address */
+ addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
+ if ((!addr) || (size != 6)) {
+ dev_err(&op->dev, "could not find MAC address\n");
+ rc = -ENODEV;
+ goto err_iounmap_2;
+ }
+ temac_set_mac_address(ndev, (void *)addr);
+
+ rc = temac_mdio_setup(lp, op->dev.of_node);
+ if (rc)
+ dev_warn(&op->dev, "error registering MDIO bus\n");
+
+ lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
+ if (lp->phy_node)
+ dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np);
+
+ /* Add the device attributes */
+ rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
+ if (rc) {
+ dev_err(lp->dev, "Error creating sysfs files\n");
+ goto err_iounmap_2;
+ }
+
+ rc = register_netdev(lp->ndev);
+ if (rc) {
+ dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
+ goto err_register_ndev;
+ }
+
+ return 0;
+
+ err_register_ndev:
+ sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
+ err_iounmap_2:
+ if (lp->sdma_regs)
+ iounmap(lp->sdma_regs);
+ err_iounmap:
+ iounmap(lp->regs);
+ nodev:
+ free_netdev(ndev);
+ ndev = NULL;
+ return rc;
+}
+
+static int __devexit temac_of_remove(struct platform_device *op)
+{
+ struct net_device *ndev = dev_get_drvdata(&op->dev);
+ struct temac_local *lp = netdev_priv(ndev);
+
+ temac_mdio_teardown(lp);
+ unregister_netdev(ndev);
+ sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
+ if (lp->phy_node)
+ of_node_put(lp->phy_node);
+ lp->phy_node = NULL;
+ dev_set_drvdata(&op->dev, NULL);
+ iounmap(lp->regs);
+ if (lp->sdma_regs)
+ iounmap(lp->sdma_regs);
+ free_netdev(ndev);
+ return 0;
+}
+
+static struct of_device_id temac_of_match[] __devinitdata = {
+ { .compatible = "xlnx,xps-ll-temac-1.01.b", },
+ { .compatible = "xlnx,xps-ll-temac-2.00.a", },
+ { .compatible = "xlnx,xps-ll-temac-2.02.a", },
+ { .compatible = "xlnx,xps-ll-temac-2.03.a", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, temac_of_match);
+
+static struct platform_driver temac_of_driver = {
+ .probe = temac_of_probe,
+ .remove = __devexit_p(temac_of_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "xilinx_temac",
+ .of_match_table = temac_of_match,
+ },
+};
+
+static int __init temac_init(void)
+{
+ return platform_driver_register(&temac_of_driver);
+}
+module_init(temac_init);
+
+static void __exit temac_exit(void)
+{
+ platform_driver_unregister(&temac_of_driver);
+}
+module_exit(temac_exit);
+
+MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
+MODULE_AUTHOR("Yoshio Kashiwagi");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
new file mode 100644
index 000000000000..8cf9d4f56bb2
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
@@ -0,0 +1,122 @@
+/*
+ * MDIO bus driver for the Xilinx TEMAC device
+ *
+ * Copyright (c) 2009 Secret Lab Technologies, Ltd.
+ */
+
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/mutex.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/of_mdio.h>
+
+#include "ll_temac.h"
+
+/* ---------------------------------------------------------------------
+ * MDIO Bus functions
+ */
+static int temac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct temac_local *lp = bus->priv;
+ u32 rc;
+
+ /* Write the PHY address to the MIIM Access Initiator register.
+ * When the transfer completes, the PHY register value will appear
+ * in the LSW0 register */
+ mutex_lock(&lp->indirect_mutex);
+ temac_iow(lp, XTE_LSW0_OFFSET, (phy_id << 5) | reg);
+ rc = temac_indirect_in32(lp, XTE_MIIMAI_OFFSET);
+ mutex_unlock(&lp->indirect_mutex);
+
+ dev_dbg(lp->dev, "temac_mdio_read(phy_id=%i, reg=%x) == %x\n",
+ phy_id, reg, rc);
+
+ return rc;
+}
+
+static int temac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
+{
+ struct temac_local *lp = bus->priv;
+
+ dev_dbg(lp->dev, "temac_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
+ phy_id, reg, val);
+
+ /* First write the desired value into the write data register
+ * and then write the address into the access initiator register
+ */
+ mutex_lock(&lp->indirect_mutex);
+ temac_indirect_out32(lp, XTE_MGTDR_OFFSET, val);
+ temac_indirect_out32(lp, XTE_MIIMAI_OFFSET, (phy_id << 5) | reg);
+ mutex_unlock(&lp->indirect_mutex);
+
+ return 0;
+}
+
+int temac_mdio_setup(struct temac_local *lp, struct device_node *np)
+{
+ struct mii_bus *bus;
+ const u32 *bus_hz;
+ int clk_div;
+ int rc, size;
+ struct resource res;
+
+ /* Calculate a reasonable divisor for the clock rate */
+ clk_div = 0x3f; /* worst-case default setting */
+ bus_hz = of_get_property(np, "clock-frequency", &size);
+ if (bus_hz && size >= sizeof(*bus_hz)) {
+ clk_div = (*bus_hz) / (2500 * 1000 * 2) - 1;
+ if (clk_div < 1)
+ clk_div = 1;
+ if (clk_div > 0x3f)
+ clk_div = 0x3f;
+ }
+
+ /* Enable the MDIO bus by asserting the enable bit and writing
+ * in the clock config */
+ mutex_lock(&lp->indirect_mutex);
+ temac_indirect_out32(lp, XTE_MC_OFFSET, 1 << 6 | clk_div);
+ mutex_unlock(&lp->indirect_mutex);
+
+ bus = mdiobus_alloc();
+ if (!bus)
+ return -ENOMEM;
+
+ of_address_to_resource(np, 0, &res);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
+ (unsigned long long)res.start);
+ bus->priv = lp;
+ bus->name = "Xilinx TEMAC MDIO";
+ bus->read = temac_mdio_read;
+ bus->write = temac_mdio_write;
+ bus->parent = lp->dev;
+ bus->irq = lp->mdio_irqs; /* preallocated IRQ table */
+
+ lp->mii_bus = bus;
+
+ rc = of_mdiobus_register(bus, np);
+ if (rc)
+ goto err_register;
+
+ mutex_lock(&lp->indirect_mutex);
+ dev_dbg(lp->dev, "MDIO bus registered; MC:%x\n",
+ temac_indirect_in32(lp, XTE_MC_OFFSET));
+ mutex_unlock(&lp->indirect_mutex);
+ return 0;
+
+ err_register:
+ mdiobus_free(bus);
+ return rc;
+}
+
+void temac_mdio_teardown(struct temac_local *lp)
+{
+ mdiobus_unregister(lp->mii_bus);
+ kfree(lp->mii_bus->irq);
+ mdiobus_free(lp->mii_bus);
+ lp->mii_bus = NULL;
+}
+
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
new file mode 100644
index 000000000000..8018d7d045b0
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -0,0 +1,1330 @@
+/*
+ * Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device.
+ *
+ * This is a new flat driver which is based on the original emac_lite
+ * driver from John Williams <john.williams@petalogix.com>.
+ *
+ * 2007-2009 (c) Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/interrupt.h>
+
+#define DRIVER_NAME "xilinx_emaclite"
+
+/* Register offsets for the EmacLite Core */
+#define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */
+#define XEL_MDIOADDR_OFFSET 0x07E4 /* MDIO Address Register */
+#define XEL_MDIOWR_OFFSET 0x07E8 /* MDIO Write Data Register */
+#define XEL_MDIORD_OFFSET 0x07EC /* MDIO Read Data Register */
+#define XEL_MDIOCTRL_OFFSET 0x07F0 /* MDIO Control Register */
+#define XEL_GIER_OFFSET 0x07F8 /* GIE Register */
+#define XEL_TSR_OFFSET 0x07FC /* Tx status */
+#define XEL_TPLR_OFFSET 0x07F4 /* Tx packet length */
+
+#define XEL_RXBUFF_OFFSET 0x1000 /* Receive Buffer */
+#define XEL_RPLR_OFFSET 0x100C /* Rx packet length */
+#define XEL_RSR_OFFSET 0x17FC /* Rx status */
+
+#define XEL_BUFFER_OFFSET 0x0800 /* Next Tx/Rx buffer's offset */
+
+/* MDIO Address Register Bit Masks */
+#define XEL_MDIOADDR_REGADR_MASK 0x0000001F /* Register Address */
+#define XEL_MDIOADDR_PHYADR_MASK 0x000003E0 /* PHY Address */
+#define XEL_MDIOADDR_PHYADR_SHIFT 5
+#define XEL_MDIOADDR_OP_MASK 0x00000400 /* RD/WR Operation */
+
+/* MDIO Write Data Register Bit Masks */
+#define XEL_MDIOWR_WRDATA_MASK 0x0000FFFF /* Data to be Written */
+
+/* MDIO Read Data Register Bit Masks */
+#define XEL_MDIORD_RDDATA_MASK 0x0000FFFF /* Data to be Read */
+
+/* MDIO Control Register Bit Masks */
+#define XEL_MDIOCTRL_MDIOSTS_MASK 0x00000001 /* MDIO Status Mask */
+#define XEL_MDIOCTRL_MDIOEN_MASK 0x00000008 /* MDIO Enable */
+
+/* Global Interrupt Enable Register (GIER) Bit Masks */
+#define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */
+
+/* Transmit Status Register (TSR) Bit Masks */
+#define XEL_TSR_XMIT_BUSY_MASK 0x00000001 /* Tx complete */
+#define XEL_TSR_PROGRAM_MASK 0x00000002 /* Program the MAC address */
+#define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */
+#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit
+ * only. This is not documented
+ * in the HW spec */
+
+/* Define for programming the MAC address into the EmacLite */
+#define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK)
+
+/* Receive Status Register (RSR) */
+#define XEL_RSR_RECV_DONE_MASK 0x00000001 /* Rx complete */
+#define XEL_RSR_RECV_IE_MASK 0x00000008 /* Rx interrupt enable bit */
+
+/* Transmit Packet Length Register (TPLR) */
+#define XEL_TPLR_LENGTH_MASK 0x0000FFFF /* Tx packet length */
+
+/* Receive Packet Length Register (RPLR) */
+#define XEL_RPLR_LENGTH_MASK 0x0000FFFF /* Rx packet length */
+
+#define XEL_HEADER_OFFSET 12 /* Offset to length field */
+#define XEL_HEADER_SHIFT 16 /* Shift value for length */
+
+/* General Ethernet Definitions */
+#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */
+#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */
+
+
+
+#define TX_TIMEOUT (60*HZ) /* Tx timeout is 60 seconds. */
+#define ALIGNMENT 4
+
+/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
+#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
+
+/**
+ * struct net_local - Our private per device data
+ * @ndev: instance of the network device
+ * @tx_ping_pong: indicates whether Tx Pong buffer is configured in HW
+ * @rx_ping_pong: indicates whether Rx Pong buffer is configured in HW
+ * @next_tx_buf_to_use: next Tx buffer to write to
+ * @next_rx_buf_to_use: next Rx buffer to read from
+ * @base_addr: base address of the Emaclite device
+ * @reset_lock: lock used for synchronization
+ * @deferred_skb: holds an skb (for transmission at a later time) when the
+ * Tx buffer is not free
+ * @phy_dev: pointer to the PHY device
+ * @phy_node: pointer to the PHY device node
+ * @mii_bus: pointer to the MII bus
+ * @mdio_irqs: IRQs table for MDIO bus
+ * @last_link: last link status
+ * @has_mdio: indicates whether MDIO is included in the HW
+ */
+struct net_local {
+
+ struct net_device *ndev;
+
+ bool tx_ping_pong;
+ bool rx_ping_pong;
+ u32 next_tx_buf_to_use;
+ u32 next_rx_buf_to_use;
+ void __iomem *base_addr;
+
+ spinlock_t reset_lock;
+ struct sk_buff *deferred_skb;
+
+ struct phy_device *phy_dev;
+ struct device_node *phy_node;
+
+ struct mii_bus *mii_bus;
+ int mdio_irqs[PHY_MAX_ADDR];
+
+ int last_link;
+ bool has_mdio;
+};
+
+
+/*************************/
+/* EmacLite driver calls */
+/*************************/
+
+/**
+ * xemaclite_enable_interrupts - Enable the interrupts for the EmacLite device
+ * @drvdata: Pointer to the Emaclite device private data
+ *
+ * This function enables the Tx and Rx interrupts for the Emaclite device along
+ * with the Global Interrupt Enable.
+ */
+static void xemaclite_enable_interrupts(struct net_local *drvdata)
+{
+ u32 reg_data;
+
+ /* Enable the Tx interrupts for the first Buffer */
+ reg_data = in_be32(drvdata->base_addr + XEL_TSR_OFFSET);
+ out_be32(drvdata->base_addr + XEL_TSR_OFFSET,
+ reg_data | XEL_TSR_XMIT_IE_MASK);
+
+ /* Enable the Tx interrupts for the second Buffer if
+ * configured in HW */
+ if (drvdata->tx_ping_pong != 0) {
+ reg_data = in_be32(drvdata->base_addr +
+ XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
+ out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
+ XEL_TSR_OFFSET,
+ reg_data | XEL_TSR_XMIT_IE_MASK);
+ }
+
+ /* Enable the Rx interrupts for the first buffer */
+ out_be32(drvdata->base_addr + XEL_RSR_OFFSET,
+ XEL_RSR_RECV_IE_MASK);
+
+ /* Enable the Rx interrupts for the second Buffer if
+ * configured in HW */
+ if (drvdata->rx_ping_pong != 0) {
+ out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
+ XEL_RSR_OFFSET,
+ XEL_RSR_RECV_IE_MASK);
+ }
+
+ /* Enable the Global Interrupt Enable */
+ out_be32(drvdata->base_addr + XEL_GIER_OFFSET, XEL_GIER_GIE_MASK);
+}
+
+/**
+ * xemaclite_disable_interrupts - Disable the interrupts for the EmacLite device
+ * @drvdata: Pointer to the Emaclite device private data
+ *
+ * This function disables the Tx and Rx interrupts for the Emaclite device,
+ * along with the Global Interrupt Enable.
+ */
+static void xemaclite_disable_interrupts(struct net_local *drvdata)
+{
+ u32 reg_data;
+
+ /* Disable the Global Interrupt Enable */
+ out_be32(drvdata->base_addr + XEL_GIER_OFFSET, XEL_GIER_GIE_MASK);
+
+ /* Disable the Tx interrupts for the first buffer */
+ reg_data = in_be32(drvdata->base_addr + XEL_TSR_OFFSET);
+ out_be32(drvdata->base_addr + XEL_TSR_OFFSET,
+ reg_data & (~XEL_TSR_XMIT_IE_MASK));
+
+ /* Disable the Tx interrupts for the second Buffer
+ * if configured in HW */
+ if (drvdata->tx_ping_pong != 0) {
+ reg_data = in_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
+ XEL_TSR_OFFSET);
+ out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
+ XEL_TSR_OFFSET,
+ reg_data & (~XEL_TSR_XMIT_IE_MASK));
+ }
+
+ /* Disable the Rx interrupts for the first buffer */
+ reg_data = in_be32(drvdata->base_addr + XEL_RSR_OFFSET);
+ out_be32(drvdata->base_addr + XEL_RSR_OFFSET,
+ reg_data & (~XEL_RSR_RECV_IE_MASK));
+
+ /* Disable the Rx interrupts for the second buffer
+ * if configured in HW */
+ if (drvdata->rx_ping_pong != 0) {
+
+ reg_data = in_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
+ XEL_RSR_OFFSET);
+ out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
+ XEL_RSR_OFFSET,
+ reg_data & (~XEL_RSR_RECV_IE_MASK));
+ }
+}
+
+/**
+ * xemaclite_aligned_write - Write from 16-bit aligned to 32-bit aligned address
+ * @src_ptr: Void pointer to the 16-bit aligned source address
+ * @dest_ptr: Pointer to the 32-bit aligned destination address
+ * @length: Number bytes to write from source to destination
+ *
+ * This function writes data from a 16-bit aligned buffer to a 32-bit aligned
+ * address in the EmacLite device.
+ */
+static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
+ unsigned length)
+{
+ u32 align_buffer;
+ u32 *to_u32_ptr;
+ u16 *from_u16_ptr, *to_u16_ptr;
+
+ to_u32_ptr = dest_ptr;
+ from_u16_ptr = src_ptr;
+ align_buffer = 0;
+
+ for (; length > 3; length -= 4) {
+ to_u16_ptr = (u16 *)&align_buffer;
+ *to_u16_ptr++ = *from_u16_ptr++;
+ *to_u16_ptr++ = *from_u16_ptr++;
+
+ /* Output a word */
+ *to_u32_ptr++ = align_buffer;
+ }
+ if (length) {
+ u8 *from_u8_ptr, *to_u8_ptr;
+
+ /* Set up to output the remaining data */
+ align_buffer = 0;
+ to_u8_ptr = (u8 *) &align_buffer;
+ from_u8_ptr = (u8 *) from_u16_ptr;
+
+ /* Output the remaining data */
+ for (; length > 0; length--)
+ *to_u8_ptr++ = *from_u8_ptr++;
+
+ *to_u32_ptr = align_buffer;
+ }
+}
+
+/**
+ * xemaclite_aligned_read - Read from 32-bit aligned to 16-bit aligned buffer
+ * @src_ptr: Pointer to the 32-bit aligned source address
+ * @dest_ptr: Pointer to the 16-bit aligned destination address
+ * @length: Number bytes to read from source to destination
+ *
+ * This function reads data from a 32-bit aligned address in the EmacLite device
+ * to a 16-bit aligned buffer.
+ */
+static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
+ unsigned length)
+{
+ u16 *to_u16_ptr, *from_u16_ptr;
+ u32 *from_u32_ptr;
+ u32 align_buffer;
+
+ from_u32_ptr = src_ptr;
+ to_u16_ptr = (u16 *) dest_ptr;
+
+ for (; length > 3; length -= 4) {
+ /* Copy each word into the temporary buffer */
+ align_buffer = *from_u32_ptr++;
+ from_u16_ptr = (u16 *)&align_buffer;
+
+ /* Read data from source */
+ *to_u16_ptr++ = *from_u16_ptr++;
+ *to_u16_ptr++ = *from_u16_ptr++;
+ }
+
+ if (length) {
+ u8 *to_u8_ptr, *from_u8_ptr;
+
+ /* Set up to read the remaining data */
+ to_u8_ptr = (u8 *) to_u16_ptr;
+ align_buffer = *from_u32_ptr++;
+ from_u8_ptr = (u8 *) &align_buffer;
+
+ /* Read the remaining data */
+ for (; length > 0; length--)
+ *to_u8_ptr = *from_u8_ptr;
+ }
+}
+
+/**
+ * xemaclite_send_data - Send an Ethernet frame
+ * @drvdata: Pointer to the Emaclite device private data
+ * @data: Pointer to the data to be sent
+ * @byte_count: Total frame size, including header
+ *
+ * This function checks if the Tx buffer of the Emaclite device is free to send
+ * data. If so, it fills the Tx buffer with data for transmission. Otherwise, it
+ * returns an error.
+ *
+ * Return: 0 upon success or -1 if the buffer(s) are full.
+ *
+ * Note: The maximum Tx packet size can not be more than Ethernet header
+ * (14 Bytes) + Maximum MTU (1500 bytes). This is excluding FCS.
+ */
+static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
+ unsigned int byte_count)
+{
+ u32 reg_data;
+ void __iomem *addr;
+
+ /* Determine the expected Tx buffer address */
+ addr = drvdata->base_addr + drvdata->next_tx_buf_to_use;
+
+ /* If the length is too large, truncate it */
+ if (byte_count > ETH_FRAME_LEN)
+ byte_count = ETH_FRAME_LEN;
+
+ /* Check if the expected buffer is available */
+ reg_data = in_be32(addr + XEL_TSR_OFFSET);
+ if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
+ XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
+
+ /* Switch to next buffer if configured */
+ if (drvdata->tx_ping_pong != 0)
+ drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET;
+ } else if (drvdata->tx_ping_pong != 0) {
+ /* If the expected buffer is full, try the other buffer,
+ * if it is configured in HW */
+
+ addr = (void __iomem __force *)((u32 __force)addr ^
+ XEL_BUFFER_OFFSET);
+ reg_data = in_be32(addr + XEL_TSR_OFFSET);
+
+ if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
+ XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
+ return -1; /* Buffers were full, return failure */
+ } else
+ return -1; /* Buffer was full, return failure */
+
+ /* Write the frame to the buffer */
+ xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
+
+ out_be32(addr + XEL_TPLR_OFFSET, (byte_count & XEL_TPLR_LENGTH_MASK));
+
+ /* Update the Tx Status Register to indicate that there is a
+ * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
+ * is used by the interrupt handler to check whether a frame
+ * has been transmitted */
+ reg_data = in_be32(addr + XEL_TSR_OFFSET);
+ reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
+ out_be32(addr + XEL_TSR_OFFSET, reg_data);
+
+ return 0;
+}
+
+/**
+ * xemaclite_recv_data - Receive a frame
+ * @drvdata: Pointer to the Emaclite device private data
+ * @data: Address where the data is to be received
+ *
+ * This function is intended to be called from the interrupt context or
+ * with a wrapper which waits for the receive frame to be available.
+ *
+ * Return: Total number of bytes received
+ */
+static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
+{
+ void __iomem *addr;
+ u16 length, proto_type;
+ u32 reg_data;
+
+ /* Determine the expected buffer address */
+ addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use);
+
+ /* Verify which buffer has valid data */
+ reg_data = in_be32(addr + XEL_RSR_OFFSET);
+
+ if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
+ if (drvdata->rx_ping_pong != 0)
+ drvdata->next_rx_buf_to_use ^= XEL_BUFFER_OFFSET;
+ } else {
+ /* The instance is out of sync, try other buffer if other
+ * buffer is configured, return 0 otherwise. If the instance is
+ * out of sync, do not update the 'next_rx_buf_to_use' since it
+ * will correct on subsequent calls */
+ if (drvdata->rx_ping_pong != 0)
+ addr = (void __iomem __force *)((u32 __force)addr ^
+ XEL_BUFFER_OFFSET);
+ else
+ return 0; /* No data was available */
+
+ /* Verify that buffer has valid data */
+ reg_data = in_be32(addr + XEL_RSR_OFFSET);
+ if ((reg_data & XEL_RSR_RECV_DONE_MASK) !=
+ XEL_RSR_RECV_DONE_MASK)
+ return 0; /* No data was available */
+ }
+
+ /* Get the protocol type of the ethernet frame that arrived */
+ proto_type = ((ntohl(in_be32(addr + XEL_HEADER_OFFSET +
+ XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
+ XEL_RPLR_LENGTH_MASK);
+
+ /* Check if received ethernet frame is a raw ethernet frame
+ * or an IP packet or an ARP packet */
+ if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
+
+ if (proto_type == ETH_P_IP) {
+ length = ((ntohl(in_be32(addr +
+ XEL_HEADER_IP_LENGTH_OFFSET +
+ XEL_RXBUFF_OFFSET)) >>
+ XEL_HEADER_SHIFT) &
+ XEL_RPLR_LENGTH_MASK);
+ length += ETH_HLEN + ETH_FCS_LEN;
+
+ } else if (proto_type == ETH_P_ARP)
+ length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN;
+ else
+ /* Field contains type other than IP or ARP, use max
+ * frame size and let user parse it */
+ length = ETH_FRAME_LEN + ETH_FCS_LEN;
+ } else
+ /* Use the length in the frame, plus the header and trailer */
+ length = proto_type + ETH_HLEN + ETH_FCS_LEN;
+
+ /* Read from the EmacLite device */
+ xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
+ data, length);
+
+ /* Acknowledge the frame */
+ reg_data = in_be32(addr + XEL_RSR_OFFSET);
+ reg_data &= ~XEL_RSR_RECV_DONE_MASK;
+ out_be32(addr + XEL_RSR_OFFSET, reg_data);
+
+ return length;
+}
+
+/**
+ * xemaclite_update_address - Update the MAC address in the device
+ * @drvdata: Pointer to the Emaclite device private data
+ * @address_ptr:Pointer to the MAC address (MAC address is a 48-bit value)
+ *
+ * Tx must be idle and Rx should be idle for deterministic results.
+ * It is recommended that this function should be called after the
+ * initialization and before transmission of any packets from the device.
+ * The MAC address can be programmed using any of the two transmit
+ * buffers (if configured).
+ */
+static void xemaclite_update_address(struct net_local *drvdata,
+ u8 *address_ptr)
+{
+ void __iomem *addr;
+ u32 reg_data;
+
+ /* Determine the expected Tx buffer address */
+ addr = drvdata->base_addr + drvdata->next_tx_buf_to_use;
+
+ xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
+
+ out_be32(addr + XEL_TPLR_OFFSET, ETH_ALEN);
+
+ /* Update the MAC address in the EmacLite */
+ reg_data = in_be32(addr + XEL_TSR_OFFSET);
+ out_be32(addr + XEL_TSR_OFFSET, reg_data | XEL_TSR_PROG_MAC_ADDR);
+
+ /* Wait for EmacLite to finish with the MAC address update */
+ while ((in_be32(addr + XEL_TSR_OFFSET) &
+ XEL_TSR_PROG_MAC_ADDR) != 0)
+ ;
+}
+
+/**
+ * xemaclite_set_mac_address - Set the MAC address for this device
+ * @dev: Pointer to the network device instance
+ * @addr: Void pointer to the sockaddr structure
+ *
+ * This function copies the HW address from the sockaddr strucutre to the
+ * net_device structure and updates the address in HW.
+ *
+ * Return: Error if the net device is busy or 0 if the addr is set
+ * successfully
+ */
+static int xemaclite_set_mac_address(struct net_device *dev, void *address)
+{
+ struct net_local *lp = netdev_priv(dev);
+ struct sockaddr *addr = address;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ xemaclite_update_address(lp, dev->dev_addr);
+ return 0;
+}
+
+/**
+ * xemaclite_tx_timeout - Callback for Tx Timeout
+ * @dev: Pointer to the network device
+ *
+ * This function is called when Tx time out occurs for Emaclite device.
+ */
+static void xemaclite_tx_timeout(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ dev_err(&lp->ndev->dev, "Exceeded transmit timeout of %lu ms\n",
+ TX_TIMEOUT * 1000UL / HZ);
+
+ dev->stats.tx_errors++;
+
+ /* Reset the device */
+ spin_lock_irqsave(&lp->reset_lock, flags);
+
+ /* Shouldn't really be necessary, but shouldn't hurt */
+ netif_stop_queue(dev);
+
+ xemaclite_disable_interrupts(lp);
+ xemaclite_enable_interrupts(lp);
+
+ if (lp->deferred_skb) {
+ dev_kfree_skb(lp->deferred_skb);
+ lp->deferred_skb = NULL;
+ dev->stats.tx_errors++;
+ }
+
+ /* To exclude tx timeout */
+ dev->trans_start = jiffies; /* prevent tx timeout */
+
+ /* We're all ready to go. Start the queue */
+ netif_wake_queue(dev);
+ spin_unlock_irqrestore(&lp->reset_lock, flags);
+}
+
+/**********************/
+/* Interrupt Handlers */
+/**********************/
+
+/**
+ * xemaclite_tx_handler - Interrupt handler for frames sent
+ * @dev: Pointer to the network device
+ *
+ * This function updates the number of packets transmitted and handles the
+ * deferred skb, if there is one.
+ */
+static void xemaclite_tx_handler(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ dev->stats.tx_packets++;
+ if (lp->deferred_skb) {
+ if (xemaclite_send_data(lp,
+ (u8 *) lp->deferred_skb->data,
+ lp->deferred_skb->len) != 0)
+ return;
+ else {
+ dev->stats.tx_bytes += lp->deferred_skb->len;
+ dev_kfree_skb_irq(lp->deferred_skb);
+ lp->deferred_skb = NULL;
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+ }
+ }
+}
+
+/**
+ * xemaclite_rx_handler- Interrupt handler for frames received
+ * @dev: Pointer to the network device
+ *
+ * This function allocates memory for a socket buffer, fills it with data
+ * received and hands it over to the TCP/IP stack.
+ */
+static void xemaclite_rx_handler(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ struct sk_buff *skb;
+ unsigned int align;
+ u32 len;
+
+ len = ETH_FRAME_LEN + ETH_FCS_LEN;
+ skb = dev_alloc_skb(len + ALIGNMENT);
+ if (!skb) {
+ /* Couldn't get memory. */
+ dev->stats.rx_dropped++;
+ dev_err(&lp->ndev->dev, "Could not allocate receive buffer\n");
+ return;
+ }
+
+ /*
+ * A new skb should have the data halfword aligned, but this code is
+ * here just in case that isn't true. Calculate how many
+ * bytes we should reserve to get the data to start on a word
+ * boundary */
+ align = BUFFER_ALIGN(skb->data);
+ if (align)
+ skb_reserve(skb, align);
+
+ skb_reserve(skb, 2);
+
+ len = xemaclite_recv_data(lp, (u8 *) skb->data);
+
+ if (!len) {
+ dev->stats.rx_errors++;
+ dev_kfree_skb_irq(skb);
+ return;
+ }
+
+ skb_put(skb, len); /* Tell the skb how much data we got */
+
+ skb->protocol = eth_type_trans(skb, dev);
+ skb_checksum_none_assert(skb);
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+
+ if (!skb_defer_rx_timestamp(skb))
+ netif_rx(skb); /* Send the packet upstream */
+}
+
+/**
+ * xemaclite_interrupt - Interrupt handler for this driver
+ * @irq: Irq of the Emaclite device
+ * @dev_id: Void pointer to the network device instance used as callback
+ * reference
+ *
+ * This function handles the Tx and Rx interrupts of the EmacLite device.
+ */
+static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
+{
+ bool tx_complete = 0;
+ struct net_device *dev = dev_id;
+ struct net_local *lp = netdev_priv(dev);
+ void __iomem *base_addr = lp->base_addr;
+ u32 tx_status;
+
+ /* Check if there is Rx Data available */
+ if ((in_be32(base_addr + XEL_RSR_OFFSET) & XEL_RSR_RECV_DONE_MASK) ||
+ (in_be32(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
+ & XEL_RSR_RECV_DONE_MASK))
+
+ xemaclite_rx_handler(dev);
+
+ /* Check if the Transmission for the first buffer is completed */
+ tx_status = in_be32(base_addr + XEL_TSR_OFFSET);
+ if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
+ (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
+
+ tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
+ out_be32(base_addr + XEL_TSR_OFFSET, tx_status);
+
+ tx_complete = 1;
+ }
+
+ /* Check if the Transmission for the second buffer is completed */
+ tx_status = in_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
+ if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
+ (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
+
+ tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
+ out_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET,
+ tx_status);
+
+ tx_complete = 1;
+ }
+
+ /* If there was a Tx interrupt, call the Tx Handler */
+ if (tx_complete != 0)
+ xemaclite_tx_handler(dev);
+
+ return IRQ_HANDLED;
+}
+
+/**********************/
+/* MDIO Bus functions */
+/**********************/
+
+/**
+ * xemaclite_mdio_wait - Wait for the MDIO to be ready to use
+ * @lp: Pointer to the Emaclite device private data
+ *
+ * This function waits till the device is ready to accept a new MDIO
+ * request.
+ *
+ * Return: 0 for success or ETIMEDOUT for a timeout
+ */
+
+static int xemaclite_mdio_wait(struct net_local *lp)
+{
+ long end = jiffies + 2;
+
+ /* wait for the MDIO interface to not be busy or timeout
+ after some time.
+ */
+ while (in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
+ XEL_MDIOCTRL_MDIOSTS_MASK) {
+ if (end - jiffies <= 0) {
+ WARN_ON(1);
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+ return 0;
+}
+
+/**
+ * xemaclite_mdio_read - Read from a given MII management register
+ * @bus: the mii_bus struct
+ * @phy_id: the phy address
+ * @reg: register number to read from
+ *
+ * This function waits till the device is ready to accept a new MDIO
+ * request and then writes the phy address to the MDIO Address register
+ * and reads data from MDIO Read Data register, when its available.
+ *
+ * Return: Value read from the MII management register
+ */
+static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct net_local *lp = bus->priv;
+ u32 ctrl_reg;
+ u32 rc;
+
+ if (xemaclite_mdio_wait(lp))
+ return -ETIMEDOUT;
+
+ /* Write the PHY address, register number and set the OP bit in the
+ * MDIO Address register. Set the Status bit in the MDIO Control
+ * register to start a MDIO read transaction.
+ */
+ ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET,
+ XEL_MDIOADDR_OP_MASK |
+ ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg));
+ out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
+ ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK);
+
+ if (xemaclite_mdio_wait(lp))
+ return -ETIMEDOUT;
+
+ rc = in_be32(lp->base_addr + XEL_MDIORD_OFFSET);
+
+ dev_dbg(&lp->ndev->dev,
+ "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
+ phy_id, reg, rc);
+
+ return rc;
+}
+
+/**
+ * xemaclite_mdio_write - Write to a given MII management register
+ * @bus: the mii_bus struct
+ * @phy_id: the phy address
+ * @reg: register number to write to
+ * @val: value to write to the register number specified by reg
+ *
+ * This function waits till the device is ready to accept a new MDIO
+ * request and then writes the val to the MDIO Write Data register.
+ */
+static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
+ u16 val)
+{
+ struct net_local *lp = bus->priv;
+ u32 ctrl_reg;
+
+ dev_dbg(&lp->ndev->dev,
+ "xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
+ phy_id, reg, val);
+
+ if (xemaclite_mdio_wait(lp))
+ return -ETIMEDOUT;
+
+ /* Write the PHY address, register number and clear the OP bit in the
+ * MDIO Address register and then write the value into the MDIO Write
+ * Data register. Finally, set the Status bit in the MDIO Control
+ * register to start a MDIO write transaction.
+ */
+ ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET,
+ ~XEL_MDIOADDR_OP_MASK &
+ ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg));
+ out_be32(lp->base_addr + XEL_MDIOWR_OFFSET, val);
+ out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
+ ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK);
+
+ return 0;
+}
+
+/**
+ * xemaclite_mdio_reset - Reset the mdio bus.
+ * @bus: Pointer to the MII bus
+ *
+ * This function is required(?) as per Documentation/networking/phy.txt.
+ * There is no reset in this device; this function always returns 0.
+ */
+static int xemaclite_mdio_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+/**
+ * xemaclite_mdio_setup - Register mii_bus for the Emaclite device
+ * @lp: Pointer to the Emaclite device private data
+ * @ofdev: Pointer to OF device structure
+ *
+ * This function enables MDIO bus in the Emaclite device and registers a
+ * mii_bus.
+ *
+ * Return: 0 upon success or a negative error upon failure
+ */
+static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
+{
+ struct mii_bus *bus;
+ int rc;
+ struct resource res;
+ struct device_node *np = of_get_parent(lp->phy_node);
+
+ /* Don't register the MDIO bus if the phy_node or its parent node
+ * can't be found.
+ */
+ if (!np)
+ return -ENODEV;
+
+ /* Enable the MDIO bus by asserting the enable bit in MDIO Control
+ * register.
+ */
+ out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
+ XEL_MDIOCTRL_MDIOEN_MASK);
+
+ bus = mdiobus_alloc();
+ if (!bus)
+ return -ENOMEM;
+
+ of_address_to_resource(np, 0, &res);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
+ (unsigned long long)res.start);
+ bus->priv = lp;
+ bus->name = "Xilinx Emaclite MDIO";
+ bus->read = xemaclite_mdio_read;
+ bus->write = xemaclite_mdio_write;
+ bus->reset = xemaclite_mdio_reset;
+ bus->parent = dev;
+ bus->irq = lp->mdio_irqs; /* preallocated IRQ table */
+
+ lp->mii_bus = bus;
+
+ rc = of_mdiobus_register(bus, np);
+ if (rc)
+ goto err_register;
+
+ return 0;
+
+err_register:
+ mdiobus_free(bus);
+ return rc;
+}
+
+/**
+ * xemaclite_adjust_link - Link state callback for the Emaclite device
+ * @ndev: pointer to net_device struct
+ *
+ * There's nothing in the Emaclite device to be configured when the link
+ * state changes. We just print the status.
+ */
+void xemaclite_adjust_link(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phy = lp->phy_dev;
+ int link_state;
+
+ /* hash together the state values to decide if something has changed */
+ link_state = phy->speed | (phy->duplex << 1) | phy->link;
+
+ if (lp->last_link != link_state) {
+ lp->last_link = link_state;
+ phy_print_status(phy);
+ }
+}
+
+/**
+ * xemaclite_open - Open the network device
+ * @dev: Pointer to the network device
+ *
+ * This function sets the MAC address, requests an IRQ and enables interrupts
+ * for the Emaclite device and starts the Tx queue.
+ * It also connects to the phy device, if MDIO is included in Emaclite device.
+ */
+static int xemaclite_open(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ int retval;
+
+ /* Just to be safe, stop the device first */
+ xemaclite_disable_interrupts(lp);
+
+ if (lp->phy_node) {
+ u32 bmcr;
+
+ lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
+ xemaclite_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+ if (!lp->phy_dev) {
+ dev_err(&lp->ndev->dev, "of_phy_connect() failed\n");
+ return -ENODEV;
+ }
+
+ /* EmacLite doesn't support giga-bit speeds */
+ lp->phy_dev->supported &= (PHY_BASIC_FEATURES);
+ lp->phy_dev->advertising = lp->phy_dev->supported;
+
+ /* Don't advertise 1000BASE-T Full/Half duplex speeds */
+ phy_write(lp->phy_dev, MII_CTRL1000, 0);
+
+ /* Advertise only 10 and 100mbps full/half duplex speeds */
+ phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL);
+
+ /* Restart auto negotiation */
+ bmcr = phy_read(lp->phy_dev, MII_BMCR);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ phy_write(lp->phy_dev, MII_BMCR, bmcr);
+
+ phy_start(lp->phy_dev);
+ }
+
+ /* Set the MAC address each time opened */
+ xemaclite_update_address(lp, dev->dev_addr);
+
+ /* Grab the IRQ */
+ retval = request_irq(dev->irq, xemaclite_interrupt, 0, dev->name, dev);
+ if (retval) {
+ dev_err(&lp->ndev->dev, "Could not allocate interrupt %d\n",
+ dev->irq);
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ lp->phy_dev = NULL;
+
+ return retval;
+ }
+
+ /* Enable Interrupts */
+ xemaclite_enable_interrupts(lp);
+
+ /* We're ready to go */
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/**
+ * xemaclite_close - Close the network device
+ * @dev: Pointer to the network device
+ *
+ * This function stops the Tx queue, disables interrupts and frees the IRQ for
+ * the Emaclite device.
+ * It also disconnects the phy device associated with the Emaclite device.
+ */
+static int xemaclite_close(struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ xemaclite_disable_interrupts(lp);
+ free_irq(dev->irq, dev);
+
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ lp->phy_dev = NULL;
+
+ return 0;
+}
+
+/**
+ * xemaclite_send - Transmit a frame
+ * @orig_skb: Pointer to the socket buffer to be transmitted
+ * @dev: Pointer to the network device
+ *
+ * This function checks if the Tx buffer of the Emaclite device is free to send
+ * data. If so, it fills the Tx buffer with data from socket buffer data,
+ * updates the stats and frees the socket buffer. The Tx completion is signaled
+ * by an interrupt. If the Tx buffer isn't free, then the socket buffer is
+ * deferred and the Tx queue is stopped so that the deferred socket buffer can
+ * be transmitted when the Emaclite device is free to transmit data.
+ *
+ * Return: 0, always.
+ */
+static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
+{
+ struct net_local *lp = netdev_priv(dev);
+ struct sk_buff *new_skb;
+ unsigned int len;
+ unsigned long flags;
+
+ len = orig_skb->len;
+
+ new_skb = orig_skb;
+
+ spin_lock_irqsave(&lp->reset_lock, flags);
+ if (xemaclite_send_data(lp, (u8 *) new_skb->data, len) != 0) {
+ /* If the Emaclite Tx buffer is busy, stop the Tx queue and
+ * defer the skb for transmission during the ISR, after the
+ * current transmission is complete */
+ netif_stop_queue(dev);
+ lp->deferred_skb = new_skb;
+ /* Take the time stamp now, since we can't do this in an ISR. */
+ skb_tx_timestamp(new_skb);
+ spin_unlock_irqrestore(&lp->reset_lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&lp->reset_lock, flags);
+
+ skb_tx_timestamp(new_skb);
+
+ dev->stats.tx_bytes += len;
+ dev_kfree_skb(new_skb);
+
+ return 0;
+}
+
+/**
+ * xemaclite_remove_ndev - Free the network device
+ * @ndev: Pointer to the network device to be freed
+ *
+ * This function un maps the IO region of the Emaclite device and frees the net
+ * device.
+ */
+static void xemaclite_remove_ndev(struct net_device *ndev)
+{
+ if (ndev) {
+ struct net_local *lp = netdev_priv(ndev);
+
+ if (lp->base_addr)
+ iounmap((void __iomem __force *) (lp->base_addr));
+ free_netdev(ndev);
+ }
+}
+
+/**
+ * get_bool - Get a parameter from the OF device
+ * @ofdev: Pointer to OF device structure
+ * @s: Property to be retrieved
+ *
+ * This function looks for a property in the device node and returns the value
+ * of the property if its found or 0 if the property is not found.
+ *
+ * Return: Value of the parameter if the parameter is found, or 0 otherwise
+ */
+static bool get_bool(struct platform_device *ofdev, const char *s)
+{
+ u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL);
+
+ if (p) {
+ return (bool)*p;
+ } else {
+ dev_warn(&ofdev->dev, "Parameter %s not found,"
+ "defaulting to false\n", s);
+ return 0;
+ }
+}
+
+static struct net_device_ops xemaclite_netdev_ops;
+
+/**
+ * xemaclite_of_probe - Probe method for the Emaclite device.
+ * @ofdev: Pointer to OF device structure
+ * @match: Pointer to the structure used for matching a device
+ *
+ * This function probes for the Emaclite device in the device tree.
+ * It initializes the driver data structure and the hardware, sets the MAC
+ * address and registers the network device.
+ * It also registers a mii_bus for the Emaclite device, if MDIO is included
+ * in the device.
+ *
+ * Return: 0, if the driver is bound to the Emaclite device, or
+ * a negative error if there is failure.
+ */
+static int __devinit xemaclite_of_probe(struct platform_device *ofdev)
+{
+ struct resource r_irq; /* Interrupt resources */
+ struct resource r_mem; /* IO mem resources */
+ struct net_device *ndev = NULL;
+ struct net_local *lp = NULL;
+ struct device *dev = &ofdev->dev;
+ const void *mac_address;
+
+ int rc = 0;
+
+ dev_info(dev, "Device Tree Probing\n");
+
+ /* Get iospace for the device */
+ rc = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem);
+ if (rc) {
+ dev_err(dev, "invalid address\n");
+ return rc;
+ }
+
+ /* Get IRQ for the device */
+ rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq);
+ if (rc == NO_IRQ) {
+ dev_err(dev, "no IRQ found\n");
+ return rc;
+ }
+
+ /* Create an ethernet device instance */
+ ndev = alloc_etherdev(sizeof(struct net_local));
+ if (!ndev) {
+ dev_err(dev, "Could not allocate network device\n");
+ return -ENOMEM;
+ }
+
+ dev_set_drvdata(dev, ndev);
+ SET_NETDEV_DEV(ndev, &ofdev->dev);
+
+ ndev->irq = r_irq.start;
+ ndev->mem_start = r_mem.start;
+ ndev->mem_end = r_mem.end;
+
+ lp = netdev_priv(ndev);
+ lp->ndev = ndev;
+
+ if (!request_mem_region(ndev->mem_start,
+ ndev->mem_end - ndev->mem_start + 1,
+ DRIVER_NAME)) {
+ dev_err(dev, "Couldn't lock memory region at %p\n",
+ (void *)ndev->mem_start);
+ rc = -EBUSY;
+ goto error2;
+ }
+
+ /* Get the virtual base address for the device */
+ lp->base_addr = ioremap(r_mem.start, resource_size(&r_mem));
+ if (NULL == lp->base_addr) {
+ dev_err(dev, "EmacLite: Could not allocate iomem\n");
+ rc = -EIO;
+ goto error1;
+ }
+
+ spin_lock_init(&lp->reset_lock);
+ lp->next_tx_buf_to_use = 0x0;
+ lp->next_rx_buf_to_use = 0x0;
+ lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong");
+ lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
+ mac_address = of_get_mac_address(ofdev->dev.of_node);
+
+ if (mac_address)
+ /* Set the MAC address. */
+ memcpy(ndev->dev_addr, mac_address, 6);
+ else
+ dev_warn(dev, "No MAC address found\n");
+
+ /* Clear the Tx CSR's in case this is a restart */
+ out_be32(lp->base_addr + XEL_TSR_OFFSET, 0);
+ out_be32(lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET, 0);
+
+ /* Set the MAC address in the EmacLite device */
+ xemaclite_update_address(lp, ndev->dev_addr);
+
+ lp->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
+ rc = xemaclite_mdio_setup(lp, &ofdev->dev);
+ if (rc)
+ dev_warn(&ofdev->dev, "error registering MDIO bus\n");
+
+ dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
+
+ ndev->netdev_ops = &xemaclite_netdev_ops;
+ ndev->flags &= ~IFF_MULTICAST;
+ ndev->watchdog_timeo = TX_TIMEOUT;
+
+ /* Finally, register the device */
+ rc = register_netdev(ndev);
+ if (rc) {
+ dev_err(dev,
+ "Cannot register network device, aborting\n");
+ goto error1;
+ }
+
+ dev_info(dev,
+ "Xilinx EmacLite at 0x%08X mapped to 0x%08X, irq=%d\n",
+ (unsigned int __force)ndev->mem_start,
+ (unsigned int __force)lp->base_addr, ndev->irq);
+ return 0;
+
+error1:
+ release_mem_region(ndev->mem_start, resource_size(&r_mem));
+
+error2:
+ xemaclite_remove_ndev(ndev);
+ return rc;
+}
+
+/**
+ * xemaclite_of_remove - Unbind the driver from the Emaclite device.
+ * @of_dev: Pointer to OF device structure
+ *
+ * This function is called if a device is physically removed from the system or
+ * if the driver module is being unloaded. It frees any resources allocated to
+ * the device.
+ *
+ * Return: 0, always.
+ */
+static int __devexit xemaclite_of_remove(struct platform_device *of_dev)
+{
+ struct device *dev = &of_dev->dev;
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ struct net_local *lp = netdev_priv(ndev);
+
+ /* Un-register the mii_bus, if configured */
+ if (lp->has_mdio) {
+ mdiobus_unregister(lp->mii_bus);
+ kfree(lp->mii_bus->irq);
+ mdiobus_free(lp->mii_bus);
+ lp->mii_bus = NULL;
+ }
+
+ unregister_netdev(ndev);
+
+ if (lp->phy_node)
+ of_node_put(lp->phy_node);
+ lp->phy_node = NULL;
+
+ release_mem_region(ndev->mem_start, ndev->mem_end-ndev->mem_start + 1);
+
+ xemaclite_remove_ndev(ndev);
+ dev_set_drvdata(dev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void
+xemaclite_poll_controller(struct net_device *ndev)
+{
+ disable_irq(ndev->irq);
+ xemaclite_interrupt(ndev->irq, ndev);
+ enable_irq(ndev->irq);
+}
+#endif
+
+static struct net_device_ops xemaclite_netdev_ops = {
+ .ndo_open = xemaclite_open,
+ .ndo_stop = xemaclite_close,
+ .ndo_start_xmit = xemaclite_send,
+ .ndo_set_mac_address = xemaclite_set_mac_address,
+ .ndo_tx_timeout = xemaclite_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = xemaclite_poll_controller,
+#endif
+};
+
+/* Match table for OF platform binding */
+static struct of_device_id xemaclite_of_match[] __devinitdata = {
+ { .compatible = "xlnx,opb-ethernetlite-1.01.a", },
+ { .compatible = "xlnx,opb-ethernetlite-1.01.b", },
+ { .compatible = "xlnx,xps-ethernetlite-1.00.a", },
+ { .compatible = "xlnx,xps-ethernetlite-2.00.a", },
+ { .compatible = "xlnx,xps-ethernetlite-2.01.a", },
+ { .compatible = "xlnx,xps-ethernetlite-3.00.a", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, xemaclite_of_match);
+
+static struct platform_driver xemaclite_of_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = xemaclite_of_match,
+ },
+ .probe = xemaclite_of_probe,
+ .remove = __devexit_p(xemaclite_of_remove),
+};
+
+/**
+ * xgpiopss_init - Initial driver registration call
+ *
+ * Return: 0 upon success, or a negative error upon failure.
+ */
+static int __init xemaclite_init(void)
+{
+ /* No kernel boot options used, we just need to register the driver */
+ return platform_driver_register(&xemaclite_of_driver);
+}
+
+/**
+ * xemaclite_cleanup - Driver un-registration call
+ */
+static void __exit xemaclite_cleanup(void)
+{
+ platform_driver_unregister(&xemaclite_of_driver);
+}
+
+module_init(xemaclite_init);
+module_exit(xemaclite_cleanup);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx Ethernet MAC Lite driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/xircom/Kconfig b/drivers/net/ethernet/xircom/Kconfig
new file mode 100644
index 000000000000..69f56a6de821
--- /dev/null
+++ b/drivers/net/ethernet/xircom/Kconfig
@@ -0,0 +1,31 @@
+#
+# Xircom network device configuration
+#
+
+config NET_VENDOR_XIRCOM
+ bool "Xircom devices"
+ default y
+ depends on PCMCIA
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Xircom cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_XIRCOM
+
+config PCMCIA_XIRC2PS
+ tristate "Xircom 16-bit PCMCIA support"
+ depends on PCMCIA
+ ---help---
+ Say Y here if you intend to attach a Xircom 16-bit PCMCIA (PC-card)
+ Ethernet or Fast Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called xirc2ps_cs. If unsure, say N.
+
+endif # NET_VENDOR_XIRCOM
diff --git a/drivers/net/ethernet/xircom/Makefile b/drivers/net/ethernet/xircom/Makefile
new file mode 100644
index 000000000000..3b7aebd8b849
--- /dev/null
+++ b/drivers/net/ethernet/xircom/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Xircom network device drivers.
+#
+
+obj-$(CONFIG_PCMCIA_XIRC2PS) += xirc2ps_cs.o
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
new file mode 100644
index 000000000000..bbe8b7dbf3f3
--- /dev/null
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -0,0 +1,1813 @@
+/* [xirc2ps_cs.c wk 03.11.99] (1.40 1999/11/18 00:06:03)
+ * Xircom CreditCard Ethernet Adapter IIps driver
+ * Xircom Realport 10/100 (RE-100) driver
+ *
+ * This driver supports various Xircom CreditCard Ethernet adapters
+ * including the CE2, CE IIps, RE-10, CEM28, CEM33, CE33, CEM56,
+ * CE3-100, CE3B, RE-100, REM10BT, and REM56G-100.
+ *
+ * 2000-09-24 <psheer@icon.co.za> The Xircom CE3B-100 may not
+ * autodetect the media properly. In this case use the
+ * if_port=1 (for 10BaseT) or if_port=4 (for 100BaseT) options
+ * to force the media type.
+ *
+ * Written originally by Werner Koch based on David Hinds' skeleton of the
+ * PCMCIA driver.
+ *
+ * Copyright (c) 1997,1998 Werner Koch (dd9jn)
+ *
+ * This driver is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * It is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ *
+ * ALTERNATIVELY, this driver may be distributed under the terms of
+ * the following license, in which case the provisions of this license
+ * are required INSTEAD OF the GNU General Public License. (This clause
+ * is necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, and the entire permission notice in its entirety,
+ * including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/bitops.h>
+#include <linux/mii.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#ifndef MANFID_COMPAQ
+ #define MANFID_COMPAQ 0x0138
+ #define MANFID_COMPAQ2 0x0183 /* is this correct? */
+#endif
+
+#include <pcmcia/ds.h>
+
+/* Time in jiffies before concluding Tx hung */
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+/****************
+ * Some constants used to access the hardware
+ */
+
+/* Register offsets and value constans */
+#define XIRCREG_CR 0 /* Command register (wr) */
+enum xirc_cr {
+ TransmitPacket = 0x01,
+ SoftReset = 0x02,
+ EnableIntr = 0x04,
+ ForceIntr = 0x08,
+ ClearTxFIFO = 0x10,
+ ClearRxOvrun = 0x20,
+ RestartTx = 0x40
+};
+#define XIRCREG_ESR 0 /* Ethernet status register (rd) */
+enum xirc_esr {
+ FullPktRcvd = 0x01, /* full packet in receive buffer */
+ PktRejected = 0x04, /* a packet has been rejected */
+ TxPktPend = 0x08, /* TX Packet Pending */
+ IncorPolarity = 0x10,
+ MediaSelect = 0x20 /* set if TP, clear if AUI */
+};
+#define XIRCREG_PR 1 /* Page Register select */
+#define XIRCREG_EDP 4 /* Ethernet Data Port Register */
+#define XIRCREG_ISR 6 /* Ethernet Interrupt Status Register */
+enum xirc_isr {
+ TxBufOvr = 0x01, /* TX Buffer Overflow */
+ PktTxed = 0x02, /* Packet Transmitted */
+ MACIntr = 0x04, /* MAC Interrupt occurred */
+ TxResGrant = 0x08, /* Tx Reservation Granted */
+ RxFullPkt = 0x20, /* Rx Full Packet */
+ RxPktRej = 0x40, /* Rx Packet Rejected */
+ ForcedIntr= 0x80 /* Forced Interrupt */
+};
+#define XIRCREG1_IMR0 12 /* Ethernet Interrupt Mask Register (on page 1)*/
+#define XIRCREG1_IMR1 13
+#define XIRCREG0_TSO 8 /* Transmit Space Open Register (on page 0)*/
+#define XIRCREG0_TRS 10 /* Transmit reservation Size Register (page 0)*/
+#define XIRCREG0_DO 12 /* Data Offset Register (page 0) (wr) */
+#define XIRCREG0_RSR 12 /* Receive Status Register (page 0) (rd) */
+enum xirc_rsr {
+ PhyPkt = 0x01, /* set:physical packet, clear: multicast packet */
+ BrdcstPkt = 0x02, /* set if it is a broadcast packet */
+ PktTooLong = 0x04, /* set if packet length > 1518 */
+ AlignErr = 0x10, /* incorrect CRC and last octet not complete */
+ CRCErr = 0x20, /* incorrect CRC and last octet is complete */
+ PktRxOk = 0x80 /* received ok */
+};
+#define XIRCREG0_PTR 13 /* packets transmitted register (rd) */
+#define XIRCREG0_RBC 14 /* receive byte count regsister (rd) */
+#define XIRCREG1_ECR 14 /* ethernet configurationn register */
+enum xirc_ecr {
+ FullDuplex = 0x04, /* enable full duplex mode */
+ LongTPMode = 0x08, /* adjust for longer lengths of TP cable */
+ DisablePolCor = 0x10,/* disable auto polarity correction */
+ DisableLinkPulse = 0x20, /* disable link pulse generation */
+ DisableAutoTx = 0x40, /* disable auto-transmit */
+};
+#define XIRCREG2_RBS 8 /* receive buffer start register */
+#define XIRCREG2_LED 10 /* LED Configuration register */
+/* values for the leds: Bits 2-0 for led 1
+ * 0 disabled Bits 5-3 for led 2
+ * 1 collision
+ * 2 noncollision
+ * 3 link_detected
+ * 4 incor_polarity
+ * 5 jabber
+ * 6 auto_assertion
+ * 7 rx_tx_activity
+ */
+#define XIRCREG2_MSR 12 /* Mohawk specific register */
+
+#define XIRCREG4_GPR0 8 /* General Purpose Register 0 */
+#define XIRCREG4_GPR1 9 /* General Purpose Register 1 */
+#define XIRCREG2_GPR2 13 /* General Purpose Register 2 (page2!)*/
+#define XIRCREG4_BOV 10 /* Bonding Version Register */
+#define XIRCREG4_LMA 12 /* Local Memory Address Register */
+#define XIRCREG4_LMD 14 /* Local Memory Data Port */
+/* MAC register can only by accessed with 8 bit operations */
+#define XIRCREG40_CMD0 8 /* Command Register (wr) */
+enum xirc_cmd { /* Commands */
+ Transmit = 0x01,
+ EnableRecv = 0x04,
+ DisableRecv = 0x08,
+ Abort = 0x10,
+ Online = 0x20,
+ IntrAck = 0x40,
+ Offline = 0x80
+};
+#define XIRCREG5_RHSA0 10 /* Rx Host Start Address */
+#define XIRCREG40_RXST0 9 /* Receive Status Register */
+#define XIRCREG40_TXST0 11 /* Transmit Status Register 0 */
+#define XIRCREG40_TXST1 12 /* Transmit Status Register 10 */
+#define XIRCREG40_RMASK0 13 /* Receive Mask Register */
+#define XIRCREG40_TMASK0 14 /* Transmit Mask Register 0 */
+#define XIRCREG40_TMASK1 15 /* Transmit Mask Register 0 */
+#define XIRCREG42_SWC0 8 /* Software Configuration 0 */
+#define XIRCREG42_SWC1 9 /* Software Configuration 1 */
+#define XIRCREG42_BOC 10 /* Back-Off Configuration */
+#define XIRCREG44_TDR0 8 /* Time Domain Reflectometry 0 */
+#define XIRCREG44_TDR1 9 /* Time Domain Reflectometry 1 */
+#define XIRCREG44_RXBC_LO 10 /* Rx Byte Count 0 (rd) */
+#define XIRCREG44_RXBC_HI 11 /* Rx Byte Count 1 (rd) */
+#define XIRCREG45_REV 15 /* Revision Register (rd) */
+#define XIRCREG50_IA 8 /* Individual Address (8-13) */
+
+static const char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" };
+
+/* card types */
+#define XIR_UNKNOWN 0 /* unknown: not supported */
+#define XIR_CE 1 /* (prodid 1) different hardware: not supported */
+#define XIR_CE2 2 /* (prodid 2) */
+#define XIR_CE3 3 /* (prodid 3) */
+#define XIR_CEM 4 /* (prodid 1) different hardware: not supported */
+#define XIR_CEM2 5 /* (prodid 2) */
+#define XIR_CEM3 6 /* (prodid 3) */
+#define XIR_CEM33 7 /* (prodid 4) */
+#define XIR_CEM56M 8 /* (prodid 5) */
+#define XIR_CEM56 9 /* (prodid 6) */
+#define XIR_CM28 10 /* (prodid 3) modem only: not supported here */
+#define XIR_CM33 11 /* (prodid 4) modem only: not supported here */
+#define XIR_CM56 12 /* (prodid 5) modem only: not supported here */
+#define XIR_CG 13 /* (prodid 1) GSM modem only: not supported */
+#define XIR_CBE 14 /* (prodid 1) cardbus ethernet: not supported */
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_DESCRIPTION("Xircom PCMCIA ethernet driver");
+MODULE_LICENSE("Dual MPL/GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
+
+INT_MODULE_PARM(if_port, 0);
+INT_MODULE_PARM(full_duplex, 0);
+INT_MODULE_PARM(do_sound, 1);
+INT_MODULE_PARM(lockup_hack, 0); /* anti lockup hack */
+
+/*====================================================================*/
+
+/* We do not process more than these number of bytes during one
+ * interrupt. (Of course we receive complete packets, so this is not
+ * an exact value).
+ * Something between 2000..22000; first value gives best interrupt latency,
+ * the second enables the usage of the complete on-chip buffer. We use the
+ * high value as the initial value.
+ */
+static unsigned maxrx_bytes = 22000;
+
+/* MII management prototypes */
+static void mii_idle(unsigned int ioaddr);
+static void mii_putbit(unsigned int ioaddr, unsigned data);
+static int mii_getbit(unsigned int ioaddr);
+static void mii_wbits(unsigned int ioaddr, unsigned data, int len);
+static unsigned mii_rd(unsigned int ioaddr, u_char phyaddr, u_char phyreg);
+static void mii_wr(unsigned int ioaddr, u_char phyaddr, u_char phyreg,
+ unsigned data, int len);
+
+static int has_ce2_string(struct pcmcia_device * link);
+static int xirc2ps_config(struct pcmcia_device * link);
+static void xirc2ps_release(struct pcmcia_device * link);
+static void xirc2ps_detach(struct pcmcia_device *p_dev);
+
+static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id);
+
+typedef struct local_info_t {
+ struct net_device *dev;
+ struct pcmcia_device *p_dev;
+
+ int card_type;
+ int probe_port;
+ int silicon; /* silicon revision. 0=old CE2, 1=Scipper, 4=Mohawk */
+ int mohawk; /* a CE3 type card */
+ int dingo; /* a CEM56 type card */
+ int new_mii; /* has full 10baseT/100baseT MII */
+ int modem; /* is a multi function card (i.e with a modem) */
+ void __iomem *dingo_ccr; /* only used for CEM56 cards */
+ unsigned last_ptr_value; /* last packets transmitted value */
+ const char *manf_str;
+ struct work_struct tx_timeout_task;
+} local_info_t;
+
+/****************
+ * Some more prototypes
+ */
+static netdev_tx_t do_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static void xirc_tx_timeout(struct net_device *dev);
+static void xirc2ps_tx_timeout_task(struct work_struct *work);
+static void set_addresses(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static int set_card_type(struct pcmcia_device *link);
+static int do_config(struct net_device *dev, struct ifmap *map);
+static int do_open(struct net_device *dev);
+static int do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static const struct ethtool_ops netdev_ethtool_ops;
+static void hardreset(struct net_device *dev);
+static void do_reset(struct net_device *dev, int full);
+static int init_mii(struct net_device *dev);
+static void do_powerdown(struct net_device *dev);
+static int do_stop(struct net_device *dev);
+
+/*=============== Helper functions =========================*/
+#define SelectPage(pgnr) outb((pgnr), ioaddr + XIRCREG_PR)
+#define GetByte(reg) ((unsigned)inb(ioaddr + (reg)))
+#define GetWord(reg) ((unsigned)inw(ioaddr + (reg)))
+#define PutByte(reg,value) outb((value), ioaddr+(reg))
+#define PutWord(reg,value) outw((value), ioaddr+(reg))
+
+/*====== Functions used for debugging =================================*/
+#if 0 /* reading regs may change system status */
+static void
+PrintRegisters(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+
+ if (pc_debug > 1) {
+ int i, page;
+
+ printk(KERN_DEBUG pr_fmt("Register common: "));
+ for (i = 0; i < 8; i++)
+ pr_cont(" %2.2x", GetByte(i));
+ pr_cont("\n");
+ for (page = 0; page <= 8; page++) {
+ printk(KERN_DEBUG pr_fmt("Register page %2x: "), page);
+ SelectPage(page);
+ for (i = 8; i < 16; i++)
+ pr_cont(" %2.2x", GetByte(i));
+ pr_cont("\n");
+ }
+ for (page=0x40 ; page <= 0x5f; page++) {
+ if (page == 0x43 || (page >= 0x46 && page <= 0x4f) ||
+ (page >= 0x51 && page <=0x5e))
+ continue;
+ printk(KERN_DEBUG pr_fmt("Register page %2x: "), page);
+ SelectPage(page);
+ for (i = 8; i < 16; i++)
+ pr_cont(" %2.2x", GetByte(i));
+ pr_cont("\n");
+ }
+ }
+}
+#endif /* 0 */
+
+/*============== MII Management functions ===============*/
+
+/****************
+ * Turn around for read
+ */
+static void
+mii_idle(unsigned int ioaddr)
+{
+ PutByte(XIRCREG2_GPR2, 0x04|0); /* drive MDCK low */
+ udelay(1);
+ PutByte(XIRCREG2_GPR2, 0x04|1); /* and drive MDCK high */
+ udelay(1);
+}
+
+/****************
+ * Write a bit to MDI/O
+ */
+static void
+mii_putbit(unsigned int ioaddr, unsigned data)
+{
+ #if 1
+ if (data) {
+ PutByte(XIRCREG2_GPR2, 0x0c|2|0); /* set MDIO */
+ udelay(1);
+ PutByte(XIRCREG2_GPR2, 0x0c|2|1); /* and drive MDCK high */
+ udelay(1);
+ } else {
+ PutByte(XIRCREG2_GPR2, 0x0c|0|0); /* clear MDIO */
+ udelay(1);
+ PutByte(XIRCREG2_GPR2, 0x0c|0|1); /* and drive MDCK high */
+ udelay(1);
+ }
+ #else
+ if (data) {
+ PutWord(XIRCREG2_GPR2-1, 0x0e0e);
+ udelay(1);
+ PutWord(XIRCREG2_GPR2-1, 0x0f0f);
+ udelay(1);
+ } else {
+ PutWord(XIRCREG2_GPR2-1, 0x0c0c);
+ udelay(1);
+ PutWord(XIRCREG2_GPR2-1, 0x0d0d);
+ udelay(1);
+ }
+ #endif
+}
+
+/****************
+ * Get a bit from MDI/O
+ */
+static int
+mii_getbit(unsigned int ioaddr)
+{
+ unsigned d;
+
+ PutByte(XIRCREG2_GPR2, 4|0); /* drive MDCK low */
+ udelay(1);
+ d = GetByte(XIRCREG2_GPR2); /* read MDIO */
+ PutByte(XIRCREG2_GPR2, 4|1); /* drive MDCK high again */
+ udelay(1);
+ return d & 0x20; /* read MDIO */
+}
+
+static void
+mii_wbits(unsigned int ioaddr, unsigned data, int len)
+{
+ unsigned m = 1 << (len-1);
+ for (; m; m >>= 1)
+ mii_putbit(ioaddr, data & m);
+}
+
+static unsigned
+mii_rd(unsigned int ioaddr, u_char phyaddr, u_char phyreg)
+{
+ int i;
+ unsigned data=0, m;
+
+ SelectPage(2);
+ for (i=0; i < 32; i++) /* 32 bit preamble */
+ mii_putbit(ioaddr, 1);
+ mii_wbits(ioaddr, 0x06, 4); /* Start and opcode for read */
+ mii_wbits(ioaddr, phyaddr, 5); /* PHY address to be accessed */
+ mii_wbits(ioaddr, phyreg, 5); /* PHY register to read */
+ mii_idle(ioaddr); /* turn around */
+ mii_getbit(ioaddr);
+
+ for (m = 1<<15; m; m >>= 1)
+ if (mii_getbit(ioaddr))
+ data |= m;
+ mii_idle(ioaddr);
+ return data;
+}
+
+static void
+mii_wr(unsigned int ioaddr, u_char phyaddr, u_char phyreg, unsigned data,
+ int len)
+{
+ int i;
+
+ SelectPage(2);
+ for (i=0; i < 32; i++) /* 32 bit preamble */
+ mii_putbit(ioaddr, 1);
+ mii_wbits(ioaddr, 0x05, 4); /* Start and opcode for write */
+ mii_wbits(ioaddr, phyaddr, 5); /* PHY address to be accessed */
+ mii_wbits(ioaddr, phyreg, 5); /* PHY Register to write */
+ mii_putbit(ioaddr, 1); /* turn around */
+ mii_putbit(ioaddr, 0);
+ mii_wbits(ioaddr, data, len); /* And write the data */
+ mii_idle(ioaddr);
+}
+
+/*============= Main bulk of functions =========================*/
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = do_open,
+ .ndo_stop = do_stop,
+ .ndo_start_xmit = do_start_xmit,
+ .ndo_tx_timeout = xirc_tx_timeout,
+ .ndo_set_config = do_config,
+ .ndo_do_ioctl = do_ioctl,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int
+xirc2ps_probe(struct pcmcia_device *link)
+{
+ struct net_device *dev;
+ local_info_t *local;
+
+ dev_dbg(&link->dev, "attach()\n");
+
+ /* Allocate the device structure */
+ dev = alloc_etherdev(sizeof(local_info_t));
+ if (!dev)
+ return -ENOMEM;
+ local = netdev_priv(dev);
+ local->dev = dev;
+ local->p_dev = link;
+ link->priv = dev;
+
+ /* General socket configuration */
+ link->config_index = 1;
+
+ /* Fill in card specific entries */
+ dev->netdev_ops = &netdev_ops;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task);
+
+ return xirc2ps_config(link);
+} /* xirc2ps_attach */
+
+static void
+xirc2ps_detach(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ dev_dbg(&link->dev, "detach\n");
+
+ unregister_netdev(dev);
+
+ xirc2ps_release(link);
+
+ free_netdev(dev);
+} /* xirc2ps_detach */
+
+/****************
+ * Detect the type of the card. s is the buffer with the data of tuple 0x20
+ * Returns: 0 := not supported
+ * mediaid=11 and prodid=47
+ * Media-Id bits:
+ * Ethernet 0x01
+ * Tokenring 0x02
+ * Arcnet 0x04
+ * Wireless 0x08
+ * Modem 0x10
+ * GSM only 0x20
+ * Prod-Id bits:
+ * Pocket 0x10
+ * External 0x20
+ * Creditcard 0x40
+ * Cardbus 0x80
+ *
+ */
+static int
+set_card_type(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ local_info_t *local = netdev_priv(dev);
+ u8 *buf;
+ unsigned int cisrev, mediaid, prodid;
+ size_t len;
+
+ len = pcmcia_get_tuple(link, CISTPL_MANFID, &buf);
+ if (len < 5) {
+ dev_err(&link->dev, "invalid CIS -- sorry\n");
+ return 0;
+ }
+
+ cisrev = buf[2];
+ mediaid = buf[3];
+ prodid = buf[4];
+
+ dev_dbg(&link->dev, "cisrev=%02x mediaid=%02x prodid=%02x\n",
+ cisrev, mediaid, prodid);
+
+ local->mohawk = 0;
+ local->dingo = 0;
+ local->modem = 0;
+ local->card_type = XIR_UNKNOWN;
+ if (!(prodid & 0x40)) {
+ pr_notice("Oops: Not a creditcard\n");
+ return 0;
+ }
+ if (!(mediaid & 0x01)) {
+ pr_notice("Not an Ethernet card\n");
+ return 0;
+ }
+ if (mediaid & 0x10) {
+ local->modem = 1;
+ switch(prodid & 15) {
+ case 1: local->card_type = XIR_CEM ; break;
+ case 2: local->card_type = XIR_CEM2 ; break;
+ case 3: local->card_type = XIR_CEM3 ; break;
+ case 4: local->card_type = XIR_CEM33 ; break;
+ case 5: local->card_type = XIR_CEM56M;
+ local->mohawk = 1;
+ break;
+ case 6:
+ case 7: /* 7 is the RealPort 10/56 */
+ local->card_type = XIR_CEM56 ;
+ local->mohawk = 1;
+ local->dingo = 1;
+ break;
+ }
+ } else {
+ switch(prodid & 15) {
+ case 1: local->card_type = has_ce2_string(link)? XIR_CE2 : XIR_CE ;
+ break;
+ case 2: local->card_type = XIR_CE2; break;
+ case 3: local->card_type = XIR_CE3;
+ local->mohawk = 1;
+ break;
+ }
+ }
+ if (local->card_type == XIR_CE || local->card_type == XIR_CEM) {
+ pr_notice("Sorry, this is an old CE card\n");
+ return 0;
+ }
+ if (local->card_type == XIR_UNKNOWN)
+ pr_notice("unknown card (mediaid=%02x prodid=%02x)\n", mediaid, prodid);
+
+ return 1;
+}
+
+/****************
+ * There are some CE2 cards out which claim to be a CE card.
+ * This function looks for a "CE2" in the 3rd version field.
+ * Returns: true if this is a CE2
+ */
+static int
+has_ce2_string(struct pcmcia_device * p_dev)
+{
+ if (p_dev->prod_id[2] && strstr(p_dev->prod_id[2], "CE2"))
+ return 1;
+ return 0;
+}
+
+static int
+xirc2ps_config_modem(struct pcmcia_device *p_dev, void *priv_data)
+{
+ unsigned int ioaddr;
+
+ if ((p_dev->resource[0]->start & 0xf) == 8)
+ return -ENODEV;
+
+ p_dev->resource[0]->end = 16;
+ p_dev->resource[1]->end = 8;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
+ p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
+ p_dev->io_lines = 10;
+
+ p_dev->resource[1]->start = p_dev->resource[0]->start;
+ for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
+ p_dev->resource[0]->start = ioaddr;
+ if (!pcmcia_request_io(p_dev))
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static int
+xirc2ps_config_check(struct pcmcia_device *p_dev, void *priv_data)
+{
+ int *pass = priv_data;
+ resource_size_t tmp = p_dev->resource[1]->start;
+
+ tmp += (*pass ? (p_dev->config_index & 0x20 ? -24 : 8)
+ : (p_dev->config_index & 0x20 ? 8 : -24));
+
+ if ((p_dev->resource[0]->start & 0xf) == 8)
+ return -ENODEV;
+
+ p_dev->resource[0]->end = 18;
+ p_dev->resource[1]->end = 8;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
+ p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
+ p_dev->io_lines = 10;
+
+ p_dev->resource[1]->start = p_dev->resource[0]->start;
+ p_dev->resource[0]->start = tmp;
+ return pcmcia_request_io(p_dev);
+}
+
+
+static int pcmcia_get_mac_ce(struct pcmcia_device *p_dev,
+ tuple_t *tuple,
+ void *priv)
+{
+ struct net_device *dev = priv;
+ int i;
+
+ if (tuple->TupleDataLen != 13)
+ return -EINVAL;
+ if ((tuple->TupleData[0] != 2) || (tuple->TupleData[1] != 1) ||
+ (tuple->TupleData[2] != 6))
+ return -EINVAL;
+ /* another try (James Lehmer's CE2 version 4.1)*/
+ for (i = 2; i < 6; i++)
+ dev->dev_addr[i] = tuple->TupleData[i+2];
+ return 0;
+};
+
+
+static int
+xirc2ps_config(struct pcmcia_device * link)
+{
+ struct net_device *dev = link->priv;
+ local_info_t *local = netdev_priv(dev);
+ unsigned int ioaddr;
+ int err;
+ u8 *buf;
+ size_t len;
+
+ local->dingo_ccr = NULL;
+
+ dev_dbg(&link->dev, "config\n");
+
+ /* Is this a valid card */
+ if (link->has_manf_id == 0) {
+ pr_notice("manfid not found in CIS\n");
+ goto failure;
+ }
+
+ switch (link->manf_id) {
+ case MANFID_XIRCOM:
+ local->manf_str = "Xircom";
+ break;
+ case MANFID_ACCTON:
+ local->manf_str = "Accton";
+ break;
+ case MANFID_COMPAQ:
+ case MANFID_COMPAQ2:
+ local->manf_str = "Compaq";
+ break;
+ case MANFID_INTEL:
+ local->manf_str = "Intel";
+ break;
+ case MANFID_TOSHIBA:
+ local->manf_str = "Toshiba";
+ break;
+ default:
+ pr_notice("Unknown Card Manufacturer ID: 0x%04x\n",
+ (unsigned)link->manf_id);
+ goto failure;
+ }
+ dev_dbg(&link->dev, "found %s card\n", local->manf_str);
+
+ if (!set_card_type(link)) {
+ pr_notice("this card is not supported\n");
+ goto failure;
+ }
+
+ /* get the ethernet address from the CIS */
+ err = pcmcia_get_mac_from_cis(link, dev);
+
+ /* not found: try to get the node-id from tuple 0x89 */
+ if (err) {
+ len = pcmcia_get_tuple(link, 0x89, &buf);
+ /* data layout looks like tuple 0x22 */
+ if (buf && len == 8) {
+ if (*buf == CISTPL_FUNCE_LAN_NODE_ID) {
+ int i;
+ for (i = 2; i < 6; i++)
+ dev->dev_addr[i] = buf[i+2];
+ } else
+ err = -1;
+ }
+ kfree(buf);
+ }
+
+ if (err)
+ err = pcmcia_loop_tuple(link, CISTPL_FUNCE, pcmcia_get_mac_ce, dev);
+
+ if (err) {
+ pr_notice("node-id not found in CIS\n");
+ goto failure;
+ }
+
+ if (local->modem) {
+ int pass;
+ link->config_flags |= CONF_AUTO_SET_IO;
+
+ if (local->dingo) {
+ /* Take the Modem IO port from the CIS and scan for a free
+ * Ethernet port */
+ if (!pcmcia_loop_config(link, xirc2ps_config_modem, NULL))
+ goto port_found;
+ } else {
+ /* We do 2 passes here: The first one uses the regular mapping and
+ * the second tries again, thereby considering that the 32 ports are
+ * mirrored every 32 bytes. Actually we use a mirrored port for
+ * the Mako if (on the first pass) the COR bit 5 is set.
+ */
+ for (pass=0; pass < 2; pass++)
+ if (!pcmcia_loop_config(link, xirc2ps_config_check,
+ &pass))
+ goto port_found;
+ /* if special option:
+ * try to configure as Ethernet only.
+ * .... */
+ }
+ pr_notice("no ports available\n");
+ } else {
+ link->io_lines = 10;
+ link->resource[0]->end = 16;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
+ for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
+ link->resource[0]->start = ioaddr;
+ if (!(err = pcmcia_request_io(link)))
+ goto port_found;
+ }
+ link->resource[0]->start = 0; /* let CS decide */
+ if ((err = pcmcia_request_io(link)))
+ goto config_error;
+ }
+ port_found:
+ if (err)
+ goto config_error;
+
+ /****************
+ * Now allocate an interrupt line. Note that this does not
+ * actually assign a handler to the interrupt.
+ */
+ if ((err=pcmcia_request_irq(link, xirc2ps_interrupt)))
+ goto config_error;
+
+ link->config_flags |= CONF_ENABLE_IRQ;
+ if (do_sound)
+ link->config_flags |= CONF_ENABLE_SPKR;
+
+ if ((err = pcmcia_enable_device(link)))
+ goto config_error;
+
+ if (local->dingo) {
+ /* Reset the modem's BAR to the correct value
+ * This is necessary because in the RequestConfiguration call,
+ * the base address of the ethernet port (BasePort1) is written
+ * to the BAR registers of the modem.
+ */
+ err = pcmcia_write_config_byte(link, CISREG_IOBASE_0, (u8)
+ link->resource[1]->start & 0xff);
+ if (err)
+ goto config_error;
+
+ err = pcmcia_write_config_byte(link, CISREG_IOBASE_1,
+ (link->resource[1]->start >> 8) & 0xff);
+ if (err)
+ goto config_error;
+
+ /* There is no config entry for the Ethernet part which
+ * is at 0x0800. So we allocate a window into the attribute
+ * memory and write direct to the CIS registers
+ */
+ link->resource[2]->flags = WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_AM |
+ WIN_ENABLE;
+ link->resource[2]->start = link->resource[2]->end = 0;
+ if ((err = pcmcia_request_window(link, link->resource[2], 0)))
+ goto config_error;
+
+ local->dingo_ccr = ioremap(link->resource[2]->start, 0x1000) + 0x0800;
+ if ((err = pcmcia_map_mem_page(link, link->resource[2], 0)))
+ goto config_error;
+
+ /* Setup the CCRs; there are no infos in the CIS about the Ethernet
+ * part.
+ */
+ writeb(0x47, local->dingo_ccr + CISREG_COR);
+ ioaddr = link->resource[0]->start;
+ writeb(ioaddr & 0xff , local->dingo_ccr + CISREG_IOBASE_0);
+ writeb((ioaddr >> 8)&0xff , local->dingo_ccr + CISREG_IOBASE_1);
+
+ #if 0
+ {
+ u_char tmp;
+ pr_info("ECOR:");
+ for (i=0; i < 7; i++) {
+ tmp = readb(local->dingo_ccr + i*2);
+ pr_cont(" %02x", tmp);
+ }
+ pr_cont("\n");
+ pr_info("DCOR:");
+ for (i=0; i < 4; i++) {
+ tmp = readb(local->dingo_ccr + 0x20 + i*2);
+ pr_cont(" %02x", tmp);
+ }
+ pr_cont("\n");
+ pr_info("SCOR:");
+ for (i=0; i < 10; i++) {
+ tmp = readb(local->dingo_ccr + 0x40 + i*2);
+ pr_cont(" %02x", tmp);
+ }
+ pr_cont("\n");
+ }
+ #endif
+
+ writeb(0x01, local->dingo_ccr + 0x20);
+ writeb(0x0c, local->dingo_ccr + 0x22);
+ writeb(0x00, local->dingo_ccr + 0x24);
+ writeb(0x00, local->dingo_ccr + 0x26);
+ writeb(0x00, local->dingo_ccr + 0x28);
+ }
+
+ /* The if_port symbol can be set when the module is loaded */
+ local->probe_port=0;
+ if (!if_port) {
+ local->probe_port = dev->if_port = 1;
+ } else if ((if_port >= 1 && if_port <= 2) ||
+ (local->mohawk && if_port==4))
+ dev->if_port = if_port;
+ else
+ pr_notice("invalid if_port requested\n");
+
+ /* we can now register the device with the net subsystem */
+ dev->irq = link->irq;
+ dev->base_addr = link->resource[0]->start;
+
+ if (local->dingo)
+ do_reset(dev, 1); /* a kludge to make the cem56 work */
+
+ SET_NETDEV_DEV(dev, &link->dev);
+
+ if ((err=register_netdev(dev))) {
+ pr_notice("register_netdev() failed\n");
+ goto config_error;
+ }
+
+ /* give some infos about the hardware */
+ netdev_info(dev, "%s: port %#3lx, irq %d, hwaddr %pM\n",
+ local->manf_str, (u_long)dev->base_addr, (int)dev->irq,
+ dev->dev_addr);
+
+ return 0;
+
+ config_error:
+ xirc2ps_release(link);
+ return -ENODEV;
+
+ failure:
+ return -ENODEV;
+} /* xirc2ps_config */
+
+static void
+xirc2ps_release(struct pcmcia_device *link)
+{
+ dev_dbg(&link->dev, "release\n");
+
+ if (link->resource[2]->end) {
+ struct net_device *dev = link->priv;
+ local_info_t *local = netdev_priv(dev);
+ if (local->dingo)
+ iounmap(local->dingo_ccr - 0x0800);
+ }
+ pcmcia_disable_device(link);
+} /* xirc2ps_release */
+
+/*====================================================================*/
+
+
+static int xirc2ps_suspend(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ if (link->open) {
+ netif_device_detach(dev);
+ do_powerdown(dev);
+ }
+
+ return 0;
+}
+
+static int xirc2ps_resume(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ if (link->open) {
+ do_reset(dev,1);
+ netif_device_attach(dev);
+ }
+
+ return 0;
+}
+
+
+/*====================================================================*/
+
+/****************
+ * This is the Interrupt service route.
+ */
+static irqreturn_t
+xirc2ps_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ local_info_t *lp = netdev_priv(dev);
+ unsigned int ioaddr;
+ u_char saved_page;
+ unsigned bytes_rcvd;
+ unsigned int_status, eth_status, rx_status, tx_status;
+ unsigned rsr, pktlen;
+ ulong start_ticks = jiffies; /* fixme: jiffies rollover every 497 days
+ * is this something to worry about?
+ * -- on a laptop?
+ */
+
+ if (!netif_device_present(dev))
+ return IRQ_HANDLED;
+
+ ioaddr = dev->base_addr;
+ if (lp->mohawk) { /* must disable the interrupt */
+ PutByte(XIRCREG_CR, 0);
+ }
+
+ pr_debug("%s: interrupt %d at %#x.\n", dev->name, irq, ioaddr);
+
+ saved_page = GetByte(XIRCREG_PR);
+ /* Read the ISR to see whats the cause for the interrupt.
+ * This also clears the interrupt flags on CE2 cards
+ */
+ int_status = GetByte(XIRCREG_ISR);
+ bytes_rcvd = 0;
+ loop_entry:
+ if (int_status == 0xff) { /* card may be ejected */
+ pr_debug("%s: interrupt %d for dead card\n", dev->name, irq);
+ goto leave;
+ }
+ eth_status = GetByte(XIRCREG_ESR);
+
+ SelectPage(0x40);
+ rx_status = GetByte(XIRCREG40_RXST0);
+ PutByte(XIRCREG40_RXST0, (~rx_status & 0xff));
+ tx_status = GetByte(XIRCREG40_TXST0);
+ tx_status |= GetByte(XIRCREG40_TXST1) << 8;
+ PutByte(XIRCREG40_TXST0, 0);
+ PutByte(XIRCREG40_TXST1, 0);
+
+ pr_debug("%s: ISR=%#2.2x ESR=%#2.2x RSR=%#2.2x TSR=%#4.4x\n",
+ dev->name, int_status, eth_status, rx_status, tx_status);
+
+ /***** receive section ******/
+ SelectPage(0);
+ while (eth_status & FullPktRcvd) {
+ rsr = GetByte(XIRCREG0_RSR);
+ if (bytes_rcvd > maxrx_bytes && (rsr & PktRxOk)) {
+ /* too many bytes received during this int, drop the rest of the
+ * packets */
+ dev->stats.rx_dropped++;
+ pr_debug("%s: RX drop, too much done\n", dev->name);
+ } else if (rsr & PktRxOk) {
+ struct sk_buff *skb;
+
+ pktlen = GetWord(XIRCREG0_RBC);
+ bytes_rcvd += pktlen;
+
+ pr_debug("rsr=%#02x packet_length=%u\n", rsr, pktlen);
+
+ skb = dev_alloc_skb(pktlen+3); /* 1 extra so we can use insw */
+ if (!skb) {
+ pr_notice("low memory, packet dropped (size=%u)\n", pktlen);
+ dev->stats.rx_dropped++;
+ } else { /* okay get the packet */
+ skb_reserve(skb, 2);
+ if (lp->silicon == 0 ) { /* work around a hardware bug */
+ unsigned rhsa; /* receive start address */
+
+ SelectPage(5);
+ rhsa = GetWord(XIRCREG5_RHSA0);
+ SelectPage(0);
+ rhsa += 3; /* skip control infos */
+ if (rhsa >= 0x8000)
+ rhsa = 0;
+ if (rhsa + pktlen > 0x8000) {
+ unsigned i;
+ u_char *buf = skb_put(skb, pktlen);
+ for (i=0; i < pktlen ; i++, rhsa++) {
+ buf[i] = GetByte(XIRCREG_EDP);
+ if (rhsa == 0x8000) {
+ rhsa = 0;
+ i--;
+ }
+ }
+ } else {
+ insw(ioaddr+XIRCREG_EDP,
+ skb_put(skb, pktlen), (pktlen+1)>>1);
+ }
+ }
+ #if 0
+ else if (lp->mohawk) {
+ /* To use this 32 bit access we should use
+ * a manual optimized loop
+ * Also the words are swapped, we can get more
+ * performance by using 32 bit access and swapping
+ * the words in a register. Will need this for cardbus
+ *
+ * Note: don't forget to change the ALLOC_SKB to .. +3
+ */
+ unsigned i;
+ u_long *p = skb_put(skb, pktlen);
+ register u_long a;
+ unsigned int edpreg = ioaddr+XIRCREG_EDP-2;
+ for (i=0; i < len ; i += 4, p++) {
+ a = inl(edpreg);
+ __asm__("rorl $16,%0\n\t"
+ :"=q" (a)
+ : "0" (a));
+ *p = a;
+ }
+ }
+ #endif
+ else {
+ insw(ioaddr+XIRCREG_EDP, skb_put(skb, pktlen),
+ (pktlen+1)>>1);
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pktlen;
+ if (!(rsr & PhyPkt))
+ dev->stats.multicast++;
+ }
+ } else { /* bad packet */
+ pr_debug("rsr=%#02x\n", rsr);
+ }
+ if (rsr & PktTooLong) {
+ dev->stats.rx_frame_errors++;
+ pr_debug("%s: Packet too long\n", dev->name);
+ }
+ if (rsr & CRCErr) {
+ dev->stats.rx_crc_errors++;
+ pr_debug("%s: CRC error\n", dev->name);
+ }
+ if (rsr & AlignErr) {
+ dev->stats.rx_fifo_errors++; /* okay ? */
+ pr_debug("%s: Alignment error\n", dev->name);
+ }
+
+ /* clear the received/dropped/error packet */
+ PutWord(XIRCREG0_DO, 0x8000); /* issue cmd: skip_rx_packet */
+
+ /* get the new ethernet status */
+ eth_status = GetByte(XIRCREG_ESR);
+ }
+ if (rx_status & 0x10) { /* Receive overrun */
+ dev->stats.rx_over_errors++;
+ PutByte(XIRCREG_CR, ClearRxOvrun);
+ pr_debug("receive overrun cleared\n");
+ }
+
+ /***** transmit section ******/
+ if (int_status & PktTxed) {
+ unsigned n, nn;
+
+ n = lp->last_ptr_value;
+ nn = GetByte(XIRCREG0_PTR);
+ lp->last_ptr_value = nn;
+ if (nn < n) /* rollover */
+ dev->stats.tx_packets += 256 - n;
+ else if (n == nn) { /* happens sometimes - don't know why */
+ pr_debug("PTR not changed?\n");
+ } else
+ dev->stats.tx_packets += lp->last_ptr_value - n;
+ netif_wake_queue(dev);
+ }
+ if (tx_status & 0x0002) { /* Execessive collissions */
+ pr_debug("tx restarted due to execssive collissions\n");
+ PutByte(XIRCREG_CR, RestartTx); /* restart transmitter process */
+ }
+ if (tx_status & 0x0040)
+ dev->stats.tx_aborted_errors++;
+
+ /* recalculate our work chunk so that we limit the duration of this
+ * ISR to about 1/10 of a second.
+ * Calculate only if we received a reasonable amount of bytes.
+ */
+ if (bytes_rcvd > 1000) {
+ u_long duration = jiffies - start_ticks;
+
+ if (duration >= HZ/10) { /* if more than about 1/10 second */
+ maxrx_bytes = (bytes_rcvd * (HZ/10)) / duration;
+ if (maxrx_bytes < 2000)
+ maxrx_bytes = 2000;
+ else if (maxrx_bytes > 22000)
+ maxrx_bytes = 22000;
+ pr_debug("set maxrx=%u (rcvd=%u ticks=%lu)\n",
+ maxrx_bytes, bytes_rcvd, duration);
+ } else if (!duration && maxrx_bytes < 22000) {
+ /* now much faster */
+ maxrx_bytes += 2000;
+ if (maxrx_bytes > 22000)
+ maxrx_bytes = 22000;
+ pr_debug("set maxrx=%u\n", maxrx_bytes);
+ }
+ }
+
+ leave:
+ if (lockup_hack) {
+ if (int_status != 0xff && (int_status = GetByte(XIRCREG_ISR)) != 0)
+ goto loop_entry;
+ }
+ SelectPage(saved_page);
+ PutByte(XIRCREG_CR, EnableIntr); /* re-enable interrupts */
+ /* Instead of dropping packets during a receive, we could
+ * force an interrupt with this command:
+ * PutByte(XIRCREG_CR, EnableIntr|ForceIntr);
+ */
+ return IRQ_HANDLED;
+} /* xirc2ps_interrupt */
+
+/*====================================================================*/
+
+static void
+xirc2ps_tx_timeout_task(struct work_struct *work)
+{
+ local_info_t *local =
+ container_of(work, local_info_t, tx_timeout_task);
+ struct net_device *dev = local->dev;
+ /* reset the card */
+ do_reset(dev,1);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+
+static void
+xirc_tx_timeout(struct net_device *dev)
+{
+ local_info_t *lp = netdev_priv(dev);
+ dev->stats.tx_errors++;
+ netdev_notice(dev, "transmit timed out\n");
+ schedule_work(&lp->tx_timeout_task);
+}
+
+static netdev_tx_t
+do_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ local_info_t *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ int okay;
+ unsigned freespace;
+ unsigned pktlen = skb->len;
+
+ pr_debug("do_start_xmit(skb=%p, dev=%p) len=%u\n",
+ skb, dev, pktlen);
+
+
+ /* adjust the packet length to min. required
+ * and hope that the buffer is large enough
+ * to provide some random data.
+ * fixme: For Mohawk we can change this by sending
+ * a larger packetlen than we actually have; the chip will
+ * pad this in his buffer with random bytes
+ */
+ if (pktlen < ETH_ZLEN)
+ {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ pktlen = ETH_ZLEN;
+ }
+
+ netif_stop_queue(dev);
+ SelectPage(0);
+ PutWord(XIRCREG0_TRS, (u_short)pktlen+2);
+ freespace = GetWord(XIRCREG0_TSO);
+ okay = freespace & 0x8000;
+ freespace &= 0x7fff;
+ /* TRS doesn't work - (indeed it is eliminated with sil-rev 1) */
+ okay = pktlen +2 < freespace;
+ pr_debug("%s: avail. tx space=%u%s\n",
+ dev->name, freespace, okay ? " (okay)":" (not enough)");
+ if (!okay) { /* not enough space */
+ return NETDEV_TX_BUSY; /* upper layer may decide to requeue this packet */
+ }
+ /* send the packet */
+ PutWord(XIRCREG_EDP, (u_short)pktlen);
+ outsw(ioaddr+XIRCREG_EDP, skb->data, pktlen>>1);
+ if (pktlen & 1)
+ PutByte(XIRCREG_EDP, skb->data[pktlen-1]);
+
+ if (lp->mohawk)
+ PutByte(XIRCREG_CR, TransmitPacket|EnableIntr);
+
+ dev_kfree_skb (skb);
+ dev->stats.tx_bytes += pktlen;
+ netif_start_queue(dev);
+ return NETDEV_TX_OK;
+}
+
+struct set_address_info {
+ int reg_nr;
+ int page_nr;
+ int mohawk;
+ unsigned int ioaddr;
+};
+
+static void set_address(struct set_address_info *sa_info, char *addr)
+{
+ unsigned int ioaddr = sa_info->ioaddr;
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ if (sa_info->reg_nr > 15) {
+ sa_info->reg_nr = 8;
+ sa_info->page_nr++;
+ SelectPage(sa_info->page_nr);
+ }
+ if (sa_info->mohawk)
+ PutByte(sa_info->reg_nr++, addr[5 - i]);
+ else
+ PutByte(sa_info->reg_nr++, addr[i]);
+ }
+}
+
+/****************
+ * Set all addresses: This first one is the individual address,
+ * the next 9 addresses are taken from the multicast list and
+ * the rest is filled with the individual address.
+ */
+static void set_addresses(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ local_info_t *lp = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ struct set_address_info sa_info;
+ int i;
+
+ /*
+ * Setup the info structure so that by first set_address call it will do
+ * SelectPage with the right page number. Hence these ones here.
+ */
+ sa_info.reg_nr = 15 + 1;
+ sa_info.page_nr = 0x50 - 1;
+ sa_info.mohawk = lp->mohawk;
+ sa_info.ioaddr = ioaddr;
+
+ set_address(&sa_info, dev->dev_addr);
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ if (i++ == 9)
+ break;
+ set_address(&sa_info, ha->addr);
+ }
+ while (i++ < 9)
+ set_address(&sa_info, dev->dev_addr);
+ SelectPage(0);
+}
+
+/****************
+ * Set or clear the multicast filter for this adaptor.
+ * We can filter up to 9 addresses, if more are requested we set
+ * multicast promiscuous mode.
+ */
+
+static void
+set_multicast_list(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ unsigned value;
+
+ SelectPage(0x42);
+ value = GetByte(XIRCREG42_SWC1) & 0xC0;
+
+ if (dev->flags & IFF_PROMISC) { /* snoop */
+ PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */
+ } else if (netdev_mc_count(dev) > 9 || (dev->flags & IFF_ALLMULTI)) {
+ PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */
+ } else if (!netdev_mc_empty(dev)) {
+ /* the chip can filter 9 addresses perfectly */
+ PutByte(XIRCREG42_SWC1, value | 0x01);
+ SelectPage(0x40);
+ PutByte(XIRCREG40_CMD0, Offline);
+ set_addresses(dev);
+ SelectPage(0x40);
+ PutByte(XIRCREG40_CMD0, EnableRecv | Online);
+ } else { /* standard usage */
+ PutByte(XIRCREG42_SWC1, value | 0x00);
+ }
+ SelectPage(0);
+}
+
+static int
+do_config(struct net_device *dev, struct ifmap *map)
+{
+ local_info_t *local = netdev_priv(dev);
+
+ pr_debug("do_config(%p)\n", dev);
+ if (map->port != 255 && map->port != dev->if_port) {
+ if (map->port > 4)
+ return -EINVAL;
+ if (!map->port) {
+ local->probe_port = 1;
+ dev->if_port = 1;
+ } else {
+ local->probe_port = 0;
+ dev->if_port = map->port;
+ }
+ netdev_info(dev, "switching to %s port\n", if_names[dev->if_port]);
+ do_reset(dev,1); /* not the fine way :-) */
+ }
+ return 0;
+}
+
+/****************
+ * Open the driver
+ */
+static int
+do_open(struct net_device *dev)
+{
+ local_info_t *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
+
+ dev_dbg(&link->dev, "do_open(%p)\n", dev);
+
+ /* Check that the PCMCIA card is still here. */
+ /* Physical device present signature. */
+ if (!pcmcia_dev_present(link))
+ return -ENODEV;
+
+ /* okay */
+ link->open++;
+
+ netif_start_queue(dev);
+ do_reset(dev,1);
+
+ return 0;
+}
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, "xirc2ps_cs");
+ sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+};
+
+static int
+do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ local_info_t *local = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ struct mii_ioctl_data *data = if_mii(rq);
+
+ pr_debug("%s: ioctl(%-.6s, %#04x) %04x %04x %04x %04x\n",
+ dev->name, rq->ifr_ifrn.ifrn_name, cmd,
+ data->phy_id, data->reg_num, data->val_in, data->val_out);
+
+ if (!local->mohawk)
+ return -EOPNOTSUPP;
+
+ switch(cmd) {
+ case SIOCGMIIPHY: /* Get the address of the PHY in use. */
+ data->phy_id = 0; /* we have only this address */
+ /* fall through */
+ case SIOCGMIIREG: /* Read the specified MII register. */
+ data->val_out = mii_rd(ioaddr, data->phy_id & 0x1f,
+ data->reg_num & 0x1f);
+ break;
+ case SIOCSMIIREG: /* Write the specified MII register */
+ mii_wr(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in,
+ 16);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static void
+hardreset(struct net_device *dev)
+{
+ local_info_t *local = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+
+ SelectPage(4);
+ udelay(1);
+ PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */
+ msleep(40); /* wait 40 msec */
+ if (local->mohawk)
+ PutByte(XIRCREG4_GPR1, 1); /* set bit 0: power up */
+ else
+ PutByte(XIRCREG4_GPR1, 1 | 4); /* set bit 0: power up, bit 2: AIC */
+ msleep(20); /* wait 20 msec */
+}
+
+static void
+do_reset(struct net_device *dev, int full)
+{
+ local_info_t *local = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ unsigned value;
+
+ pr_debug("%s: do_reset(%p,%d)\n", dev? dev->name:"eth?", dev, full);
+
+ hardreset(dev);
+ PutByte(XIRCREG_CR, SoftReset); /* set */
+ msleep(20); /* wait 20 msec */
+ PutByte(XIRCREG_CR, 0); /* clear */
+ msleep(40); /* wait 40 msec */
+ if (local->mohawk) {
+ SelectPage(4);
+ /* set pin GP1 and GP2 to output (0x0c)
+ * set GP1 to low to power up the ML6692 (0x00)
+ * set GP2 to high to power up the 10Mhz chip (0x02)
+ */
+ PutByte(XIRCREG4_GPR0, 0x0e);
+ }
+
+ /* give the circuits some time to power up */
+ msleep(500); /* about 500ms */
+
+ local->last_ptr_value = 0;
+ local->silicon = local->mohawk ? (GetByte(XIRCREG4_BOV) & 0x70) >> 4
+ : (GetByte(XIRCREG4_BOV) & 0x30) >> 4;
+
+ if (local->probe_port) {
+ if (!local->mohawk) {
+ SelectPage(4);
+ PutByte(XIRCREG4_GPR0, 4);
+ local->probe_port = 0;
+ }
+ } else if (dev->if_port == 2) { /* enable 10Base2 */
+ SelectPage(0x42);
+ PutByte(XIRCREG42_SWC1, 0xC0);
+ } else { /* enable 10BaseT */
+ SelectPage(0x42);
+ PutByte(XIRCREG42_SWC1, 0x80);
+ }
+ msleep(40); /* wait 40 msec to let it complete */
+
+ #if 0
+ {
+ SelectPage(0);
+ value = GetByte(XIRCREG_ESR); /* read the ESR */
+ pr_debug("%s: ESR is: %#02x\n", dev->name, value);
+ }
+ #endif
+
+ /* setup the ECR */
+ SelectPage(1);
+ PutByte(XIRCREG1_IMR0, 0xff); /* allow all ints */
+ PutByte(XIRCREG1_IMR1, 1 ); /* and Set TxUnderrunDetect */
+ value = GetByte(XIRCREG1_ECR);
+ #if 0
+ if (local->mohawk)
+ value |= DisableLinkPulse;
+ PutByte(XIRCREG1_ECR, value);
+ #endif
+ pr_debug("%s: ECR is: %#02x\n", dev->name, value);
+
+ SelectPage(0x42);
+ PutByte(XIRCREG42_SWC0, 0x20); /* disable source insertion */
+
+ if (local->silicon != 1) {
+ /* set the local memory dividing line.
+ * The comments in the sample code say that this is only
+ * settable with the scipper version 2 which is revision 0.
+ * Always for CE3 cards
+ */
+ SelectPage(2);
+ PutWord(XIRCREG2_RBS, 0x2000);
+ }
+
+ if (full)
+ set_addresses(dev);
+
+ /* Hardware workaround:
+ * The receive byte pointer after reset is off by 1 so we need
+ * to move the offset pointer back to 0.
+ */
+ SelectPage(0);
+ PutWord(XIRCREG0_DO, 0x2000); /* change offset command, off=0 */
+
+ /* setup MAC IMRs and clear status registers */
+ SelectPage(0x40); /* Bit 7 ... bit 0 */
+ PutByte(XIRCREG40_RMASK0, 0xff); /* ROK, RAB, rsv, RO, CRC, AE, PTL, MP */
+ PutByte(XIRCREG40_TMASK0, 0xff); /* TOK, TAB, SQE, LL, TU, JAB, EXC, CRS */
+ PutByte(XIRCREG40_TMASK1, 0xb0); /* rsv, rsv, PTD, EXT, rsv,rsv,rsv, rsv*/
+ PutByte(XIRCREG40_RXST0, 0x00); /* ROK, RAB, REN, RO, CRC, AE, PTL, MP */
+ PutByte(XIRCREG40_TXST0, 0x00); /* TOK, TAB, SQE, LL, TU, JAB, EXC, CRS */
+ PutByte(XIRCREG40_TXST1, 0x00); /* TEN, rsv, PTD, EXT, retry_counter:4 */
+
+ if (full && local->mohawk && init_mii(dev)) {
+ if (dev->if_port == 4 || local->dingo || local->new_mii) {
+ netdev_info(dev, "MII selected\n");
+ SelectPage(2);
+ PutByte(XIRCREG2_MSR, GetByte(XIRCREG2_MSR) | 0x08);
+ msleep(20);
+ } else {
+ netdev_info(dev, "MII detected; using 10mbs\n");
+ SelectPage(0x42);
+ if (dev->if_port == 2) /* enable 10Base2 */
+ PutByte(XIRCREG42_SWC1, 0xC0);
+ else /* enable 10BaseT */
+ PutByte(XIRCREG42_SWC1, 0x80);
+ msleep(40); /* wait 40 msec to let it complete */
+ }
+ if (full_duplex)
+ PutByte(XIRCREG1_ECR, GetByte(XIRCREG1_ECR | FullDuplex));
+ } else { /* No MII */
+ SelectPage(0);
+ value = GetByte(XIRCREG_ESR); /* read the ESR */
+ dev->if_port = (value & MediaSelect) ? 1 : 2;
+ }
+
+ /* configure the LEDs */
+ SelectPage(2);
+ if (dev->if_port == 1 || dev->if_port == 4) /* TP: Link and Activity */
+ PutByte(XIRCREG2_LED, 0x3b);
+ else /* Coax: Not-Collision and Activity */
+ PutByte(XIRCREG2_LED, 0x3a);
+
+ if (local->dingo)
+ PutByte(0x0b, 0x04); /* 100 Mbit LED */
+
+ /* enable receiver and put the mac online */
+ if (full) {
+ set_multicast_list(dev);
+ SelectPage(0x40);
+ PutByte(XIRCREG40_CMD0, EnableRecv | Online);
+ }
+
+ /* setup Ethernet IMR and enable interrupts */
+ SelectPage(1);
+ PutByte(XIRCREG1_IMR0, 0xff);
+ udelay(1);
+ SelectPage(0);
+ PutByte(XIRCREG_CR, EnableIntr);
+ if (local->modem && !local->dingo) { /* do some magic */
+ if (!(GetByte(0x10) & 0x01))
+ PutByte(0x10, 0x11); /* unmask master-int bit */
+ }
+
+ if (full)
+ netdev_info(dev, "media %s, silicon revision %d\n",
+ if_names[dev->if_port], local->silicon);
+ /* We should switch back to page 0 to avoid a bug in revision 0
+ * where regs with offset below 8 can't be read after an access
+ * to the MAC registers */
+ SelectPage(0);
+}
+
+/****************
+ * Initialize the Media-Independent-Interface
+ * Returns: True if we have a good MII
+ */
+static int
+init_mii(struct net_device *dev)
+{
+ local_info_t *local = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ unsigned control, status, linkpartner;
+ int i;
+
+ if (if_port == 4 || if_port == 1) { /* force 100BaseT or 10BaseT */
+ dev->if_port = if_port;
+ local->probe_port = 0;
+ return 1;
+ }
+
+ status = mii_rd(ioaddr, 0, 1);
+ if ((status & 0xff00) != 0x7800)
+ return 0; /* No MII */
+
+ local->new_mii = (mii_rd(ioaddr, 0, 2) != 0xffff);
+
+ if (local->probe_port)
+ control = 0x1000; /* auto neg */
+ else if (dev->if_port == 4)
+ control = 0x2000; /* no auto neg, 100mbs mode */
+ else
+ control = 0x0000; /* no auto neg, 10mbs mode */
+ mii_wr(ioaddr, 0, 0, control, 16);
+ udelay(100);
+ control = mii_rd(ioaddr, 0, 0);
+
+ if (control & 0x0400) {
+ netdev_notice(dev, "can't take PHY out of isolation mode\n");
+ local->probe_port = 0;
+ return 0;
+ }
+
+ if (local->probe_port) {
+ /* according to the DP83840A specs the auto negotiation process
+ * may take up to 3.5 sec, so we use this also for our ML6692
+ * Fixme: Better to use a timer here!
+ */
+ for (i=0; i < 35; i++) {
+ msleep(100); /* wait 100 msec */
+ status = mii_rd(ioaddr, 0, 1);
+ if ((status & 0x0020) && (status & 0x0004))
+ break;
+ }
+
+ if (!(status & 0x0020)) {
+ netdev_info(dev, "autonegotiation failed; using 10mbs\n");
+ if (!local->new_mii) {
+ control = 0x0000;
+ mii_wr(ioaddr, 0, 0, control, 16);
+ udelay(100);
+ SelectPage(0);
+ dev->if_port = (GetByte(XIRCREG_ESR) & MediaSelect) ? 1 : 2;
+ }
+ } else {
+ linkpartner = mii_rd(ioaddr, 0, 5);
+ netdev_info(dev, "MII link partner: %04x\n", linkpartner);
+ if (linkpartner & 0x0080) {
+ dev->if_port = 4;
+ } else
+ dev->if_port = 1;
+ }
+ }
+
+ return 1;
+}
+
+static void
+do_powerdown(struct net_device *dev)
+{
+
+ unsigned int ioaddr = dev->base_addr;
+
+ pr_debug("do_powerdown(%p)\n", dev);
+
+ SelectPage(4);
+ PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */
+ SelectPage(0);
+}
+
+static int
+do_stop(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ local_info_t *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
+
+ dev_dbg(&link->dev, "do_stop(%p)\n", dev);
+
+ if (!link)
+ return -ENODEV;
+
+ netif_stop_queue(dev);
+
+ SelectPage(0);
+ PutByte(XIRCREG_CR, 0); /* disable interrupts */
+ SelectPage(0x01);
+ PutByte(XIRCREG1_IMR0, 0x00); /* forbid all ints */
+ SelectPage(4);
+ PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */
+ SelectPage(0);
+
+ link->open--;
+ return 0;
+}
+
+static const struct pcmcia_device_id xirc2ps_ids[] = {
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0089, 0x110a),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0138, 0x110a),
+ PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "CEM28", 0x2e3ee845, 0x0ea978ea),
+ PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "CEM33", 0x2e3ee845, 0x80609023),
+ PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "CEM56", 0x2e3ee845, 0xa650c32a),
+ PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "REM10", 0x2e3ee845, 0x76df1d29),
+ PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "XEM5600", 0x2e3ee845, 0xf1403719),
+ PCMCIA_PFC_DEVICE_PROD_ID12(0, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf),
+ PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x010a),
+ PCMCIA_DEVICE_PROD_ID13("Toshiba Information Systems", "TPCENET", 0x1b3b94fe, 0xf381c1a2),
+ PCMCIA_DEVICE_PROD_ID13("Xircom", "CE3-10/100", 0x2e3ee845, 0x0ec0ac37),
+ PCMCIA_DEVICE_PROD_ID13("Xircom", "PS-CE2-10", 0x2e3ee845, 0x947d9073),
+ PCMCIA_DEVICE_PROD_ID13("Xircom", "R2E-100BTX", 0x2e3ee845, 0x2464a6e3),
+ PCMCIA_DEVICE_PROD_ID13("Xircom", "RE-10", 0x2e3ee845, 0x3e08d609),
+ PCMCIA_DEVICE_PROD_ID13("Xircom", "XE2000", 0x2e3ee845, 0xf7188e46),
+ PCMCIA_DEVICE_PROD_ID12("Compaq", "Ethernet LAN Card", 0x54f7c49c, 0x9fd2f0a2),
+ PCMCIA_DEVICE_PROD_ID12("Compaq", "Netelligent 10/100 PC Card", 0x54f7c49c, 0xefe96769),
+ PCMCIA_DEVICE_PROD_ID12("Intel", "EtherExpress(TM) PRO/100 PC Card Mobile Adapter16", 0x816cc815, 0x174397db),
+ PCMCIA_DEVICE_PROD_ID12("Toshiba", "10/100 Ethernet PC Card", 0x44a09d9c, 0xb44deecf),
+ /* also matches CFE-10 cards! */
+ /* PCMCIA_DEVICE_MANF_CARD(0x0105, 0x010a), */
+ PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, xirc2ps_ids);
+
+
+static struct pcmcia_driver xirc2ps_cs_driver = {
+ .owner = THIS_MODULE,
+ .name = "xirc2ps_cs",
+ .probe = xirc2ps_probe,
+ .remove = xirc2ps_detach,
+ .id_table = xirc2ps_ids,
+ .suspend = xirc2ps_suspend,
+ .resume = xirc2ps_resume,
+};
+
+static int __init
+init_xirc2ps_cs(void)
+{
+ return pcmcia_register_driver(&xirc2ps_cs_driver);
+}
+
+static void __exit
+exit_xirc2ps_cs(void)
+{
+ pcmcia_unregister_driver(&xirc2ps_cs_driver);
+}
+
+module_init(init_xirc2ps_cs);
+module_exit(exit_xirc2ps_cs);
+
+#ifndef MODULE
+static int __init setup_xirc2ps_cs(char *str)
+{
+ /* if_port, full_duplex, do_sound, lockup_hack
+ */
+ int ints[10] = { -1 };
+
+ str = get_options(str, 9, ints);
+
+#define MAYBE_SET(X,Y) if (ints[0] >= Y && ints[Y] != -1) { X = ints[Y]; }
+ MAYBE_SET(if_port, 3);
+ MAYBE_SET(full_duplex, 4);
+ MAYBE_SET(do_sound, 5);
+ MAYBE_SET(lockup_hack, 6);
+#undef MAYBE_SET
+
+ return 1;
+}
+
+__setup("xirc2ps_cs=", setup_xirc2ps_cs);
+#endif
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
new file mode 100644
index 000000000000..cf67352cea14
--- /dev/null
+++ b/drivers/net/ethernet/xscale/Kconfig
@@ -0,0 +1,32 @@
+#
+# Intel XScale IXP device configuration
+#
+
+config NET_VENDOR_XSCALE
+ bool "Intel XScale IXP devices"
+ default y
+ depends on NET_VENDOR_INTEL && ((ARM && ARCH_IXP4XX && \
+ IXP4XX_NPE && IXP4XX_QMGR) || ARCH_ENP2611)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question does not directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about XSacle IXP devices. If you say Y, you will be
+ asked for your specific card in the following questions.
+
+if NET_VENDOR_XSCALE
+
+config IXP4XX_ETH
+ tristate "Intel IXP4xx Ethernet support"
+ depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
+ select PHYLIB
+ ---help---
+ Say Y here if you want to use built-in Ethernet ports
+ on IXP4xx processor.
+
+source "drivers/net/ethernet/xscale/ixp2000/Kconfig"
+
+endif # NET_VENDOR_XSCALE
diff --git a/drivers/net/ethernet/xscale/Makefile b/drivers/net/ethernet/xscale/Makefile
new file mode 100644
index 000000000000..b195b9d7fe81
--- /dev/null
+++ b/drivers/net/ethernet/xscale/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Intel XScale IXP device drivers.
+#
+
+obj-$(CONFIG_ENP2611_MSF_NET) += ixp2000/
+obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
diff --git a/drivers/net/ethernet/xscale/ixp2000/Kconfig b/drivers/net/ethernet/xscale/ixp2000/Kconfig
new file mode 100644
index 000000000000..58dbc5b876bc
--- /dev/null
+++ b/drivers/net/ethernet/xscale/ixp2000/Kconfig
@@ -0,0 +1,6 @@
+config ENP2611_MSF_NET
+ tristate "Radisys ENP2611 MSF network interface support"
+ depends on ARCH_ENP2611
+ ---help---
+ This is a driver for the MSF network interface unit in
+ the IXP2400 on the Radisys ENP2611 platform.
diff --git a/drivers/net/ethernet/xscale/ixp2000/Makefile b/drivers/net/ethernet/xscale/ixp2000/Makefile
new file mode 100644
index 000000000000..fd38351ceaa7
--- /dev/null
+++ b/drivers/net/ethernet/xscale/ixp2000/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_ENP2611_MSF_NET) += enp2611_mod.o
+
+enp2611_mod-objs := caleb.o enp2611.o ixp2400-msf.o ixpdev.o pm3386.o
diff --git a/drivers/net/ethernet/xscale/ixp2000/caleb.c b/drivers/net/ethernet/xscale/ixp2000/caleb.c
new file mode 100644
index 000000000000..7dea5b95012c
--- /dev/null
+++ b/drivers/net/ethernet/xscale/ixp2000/caleb.c
@@ -0,0 +1,136 @@
+/*
+ * Helper functions for the SPI-3 bridge FPGA on the Radisys ENP2611
+ * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Dedicated to Marija Kulikova.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include "caleb.h"
+
+#define CALEB_IDLO 0x00
+#define CALEB_IDHI 0x01
+#define CALEB_RID 0x02
+#define CALEB_RESET 0x03
+#define CALEB_INTREN0 0x04
+#define CALEB_INTREN1 0x05
+#define CALEB_INTRSTAT0 0x06
+#define CALEB_INTRSTAT1 0x07
+#define CALEB_PORTEN 0x08
+#define CALEB_BURST 0x09
+#define CALEB_PORTPAUS 0x0A
+#define CALEB_PORTPAUSD 0x0B
+#define CALEB_PHY0RX 0x10
+#define CALEB_PHY1RX 0x11
+#define CALEB_PHY0TX 0x12
+#define CALEB_PHY1TX 0x13
+#define CALEB_IXPRX_HI_CNTR 0x15
+#define CALEB_PHY0RX_HI_CNTR 0x16
+#define CALEB_PHY1RX_HI_CNTR 0x17
+#define CALEB_IXPRX_CNTR 0x18
+#define CALEB_PHY0RX_CNTR 0x19
+#define CALEB_PHY1RX_CNTR 0x1A
+#define CALEB_IXPTX_CNTR 0x1B
+#define CALEB_PHY0TX_CNTR 0x1C
+#define CALEB_PHY1TX_CNTR 0x1D
+#define CALEB_DEBUG0 0x1E
+#define CALEB_DEBUG1 0x1F
+
+
+static u8 caleb_reg_read(int reg)
+{
+ u8 value;
+
+ value = *((volatile u8 *)(ENP2611_CALEB_VIRT_BASE + reg));
+
+// printk(KERN_INFO "caleb_reg_read(%d) = %.2x\n", reg, value);
+
+ return value;
+}
+
+static void caleb_reg_write(int reg, u8 value)
+{
+ u8 dummy;
+
+// printk(KERN_INFO "caleb_reg_write(%d, %.2x)\n", reg, value);
+
+ *((volatile u8 *)(ENP2611_CALEB_VIRT_BASE + reg)) = value;
+
+ dummy = *((volatile u8 *)ENP2611_CALEB_VIRT_BASE);
+ __asm__ __volatile__("mov %0, %0" : "+r" (dummy));
+}
+
+
+void caleb_reset(void)
+{
+ /*
+ * Perform a chip reset.
+ */
+ caleb_reg_write(CALEB_RESET, 0x02);
+ udelay(1);
+
+ /*
+ * Enable all interrupt sources. This is needed to get
+ * meaningful results out of the status bits (register 6
+ * and 7.)
+ */
+ caleb_reg_write(CALEB_INTREN0, 0xff);
+ caleb_reg_write(CALEB_INTREN1, 0x07);
+
+ /*
+ * Set RX and TX FIFO thresholds to 1.5kb.
+ */
+ caleb_reg_write(CALEB_PHY0RX, 0x11);
+ caleb_reg_write(CALEB_PHY1RX, 0x11);
+ caleb_reg_write(CALEB_PHY0TX, 0x11);
+ caleb_reg_write(CALEB_PHY1TX, 0x11);
+
+ /*
+ * Program SPI-3 burst size.
+ */
+ caleb_reg_write(CALEB_BURST, 0); // 64-byte RBUF mpackets
+// caleb_reg_write(CALEB_BURST, 1); // 128-byte RBUF mpackets
+// caleb_reg_write(CALEB_BURST, 2); // 256-byte RBUF mpackets
+}
+
+void caleb_enable_rx(int port)
+{
+ u8 temp;
+
+ temp = caleb_reg_read(CALEB_PORTEN);
+ temp |= 1 << port;
+ caleb_reg_write(CALEB_PORTEN, temp);
+}
+
+void caleb_disable_rx(int port)
+{
+ u8 temp;
+
+ temp = caleb_reg_read(CALEB_PORTEN);
+ temp &= ~(1 << port);
+ caleb_reg_write(CALEB_PORTEN, temp);
+}
+
+void caleb_enable_tx(int port)
+{
+ u8 temp;
+
+ temp = caleb_reg_read(CALEB_PORTEN);
+ temp |= 1 << (port + 4);
+ caleb_reg_write(CALEB_PORTEN, temp);
+}
+
+void caleb_disable_tx(int port)
+{
+ u8 temp;
+
+ temp = caleb_reg_read(CALEB_PORTEN);
+ temp &= ~(1 << (port + 4));
+ caleb_reg_write(CALEB_PORTEN, temp);
+}
diff --git a/drivers/net/ethernet/xscale/ixp2000/caleb.h b/drivers/net/ethernet/xscale/ixp2000/caleb.h
new file mode 100644
index 000000000000..e93a1ef5b8a3
--- /dev/null
+++ b/drivers/net/ethernet/xscale/ixp2000/caleb.h
@@ -0,0 +1,22 @@
+/*
+ * Helper functions for the SPI-3 bridge FPGA on the Radisys ENP2611
+ * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Dedicated to Marija Kulikova.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __CALEB_H
+#define __CALEB_H
+
+void caleb_reset(void);
+void caleb_enable_rx(int port);
+void caleb_disable_rx(int port);
+void caleb_enable_tx(int port);
+void caleb_disable_tx(int port);
+
+
+#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/enp2611.c b/drivers/net/ethernet/xscale/ixp2000/enp2611.c
new file mode 100644
index 000000000000..34a6cfd17930
--- /dev/null
+++ b/drivers/net/ethernet/xscale/ixp2000/enp2611.c
@@ -0,0 +1,232 @@
+/*
+ * IXP2400 MSF network device driver for the Radisys ENP2611
+ * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Dedicated to Marija Kulikova.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <asm/hardware/uengine.h>
+#include <asm/mach-types.h>
+#include <asm/io.h>
+#include "ixpdev.h"
+#include "caleb.h"
+#include "ixp2400-msf.h"
+#include "pm3386.h"
+
+/***********************************************************************
+ * The Radisys ENP2611 is a PCI form factor board with three SFP GBIC
+ * slots, connected via two PMC/Sierra 3386s and an SPI-3 bridge FPGA
+ * to the IXP2400.
+ *
+ * +-------------+
+ * SFP GBIC #0 ---+ | +---------+
+ * | PM3386 #0 +-------+ |
+ * SFP GBIC #1 ---+ | | "Caleb" | +---------+
+ * +-------------+ | | | |
+ * | SPI-3 +---------+ IXP2400 |
+ * +-------------+ | bridge | | |
+ * SFP GBIC #2 ---+ | | FPGA | +---------+
+ * | PM3386 #1 +-------+ |
+ * | | +---------+
+ * +-------------+
+ * ^ ^ ^
+ * | 1.25Gbaud | 104MHz | 104MHz
+ * | SERDES ea. | SPI-3 ea. | SPI-3
+ *
+ ***********************************************************************/
+static struct ixp2400_msf_parameters enp2611_msf_parameters =
+{
+ .rx_mode = IXP2400_RX_MODE_UTOPIA_POS |
+ IXP2400_RX_MODE_1x32 |
+ IXP2400_RX_MODE_MPHY |
+ IXP2400_RX_MODE_MPHY_32 |
+ IXP2400_RX_MODE_MPHY_POLLED_STATUS |
+ IXP2400_RX_MODE_MPHY_LEVEL3 |
+ IXP2400_RX_MODE_RBUF_SIZE_64,
+
+ .rxclk01_multiplier = IXP2400_PLL_MULTIPLIER_16,
+
+ .rx_poll_ports = 3,
+
+ .rx_channel_mode = {
+ IXP2400_PORT_RX_MODE_MASTER |
+ IXP2400_PORT_RX_MODE_POS_PHY |
+ IXP2400_PORT_RX_MODE_POS_PHY_L3 |
+ IXP2400_PORT_RX_MODE_ODD_PARITY |
+ IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
+
+ IXP2400_PORT_RX_MODE_MASTER |
+ IXP2400_PORT_RX_MODE_POS_PHY |
+ IXP2400_PORT_RX_MODE_POS_PHY_L3 |
+ IXP2400_PORT_RX_MODE_ODD_PARITY |
+ IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
+
+ IXP2400_PORT_RX_MODE_MASTER |
+ IXP2400_PORT_RX_MODE_POS_PHY |
+ IXP2400_PORT_RX_MODE_POS_PHY_L3 |
+ IXP2400_PORT_RX_MODE_ODD_PARITY |
+ IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
+
+ IXP2400_PORT_RX_MODE_MASTER |
+ IXP2400_PORT_RX_MODE_POS_PHY |
+ IXP2400_PORT_RX_MODE_POS_PHY_L3 |
+ IXP2400_PORT_RX_MODE_ODD_PARITY |
+ IXP2400_PORT_RX_MODE_2_CYCLE_DECODE
+ },
+
+ .tx_mode = IXP2400_TX_MODE_UTOPIA_POS |
+ IXP2400_TX_MODE_1x32 |
+ IXP2400_TX_MODE_MPHY |
+ IXP2400_TX_MODE_MPHY_32 |
+ IXP2400_TX_MODE_MPHY_POLLED_STATUS |
+ IXP2400_TX_MODE_MPHY_LEVEL3 |
+ IXP2400_TX_MODE_TBUF_SIZE_64,
+
+ .txclk01_multiplier = IXP2400_PLL_MULTIPLIER_16,
+
+ .tx_poll_ports = 3,
+
+ .tx_channel_mode = {
+ IXP2400_PORT_TX_MODE_MASTER |
+ IXP2400_PORT_TX_MODE_POS_PHY |
+ IXP2400_PORT_TX_MODE_ODD_PARITY |
+ IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
+
+ IXP2400_PORT_TX_MODE_MASTER |
+ IXP2400_PORT_TX_MODE_POS_PHY |
+ IXP2400_PORT_TX_MODE_ODD_PARITY |
+ IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
+
+ IXP2400_PORT_TX_MODE_MASTER |
+ IXP2400_PORT_TX_MODE_POS_PHY |
+ IXP2400_PORT_TX_MODE_ODD_PARITY |
+ IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
+
+ IXP2400_PORT_TX_MODE_MASTER |
+ IXP2400_PORT_TX_MODE_POS_PHY |
+ IXP2400_PORT_TX_MODE_ODD_PARITY |
+ IXP2400_PORT_TX_MODE_2_CYCLE_DECODE
+ }
+};
+
+static struct net_device *nds[3];
+static struct timer_list link_check_timer;
+
+/* @@@ Poll the SFP moddef0 line too. */
+/* @@@ Try to use the pm3386 DOOL interrupt as well. */
+static void enp2611_check_link_status(unsigned long __dummy)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ struct net_device *dev;
+ int status;
+
+ dev = nds[i];
+ if (dev == NULL)
+ continue;
+
+ status = pm3386_is_link_up(i);
+ if (status && !netif_carrier_ok(dev)) {
+ /* @@@ Should report autonegotiation status. */
+ printk(KERN_INFO "%s: NIC Link is Up\n", dev->name);
+
+ pm3386_enable_tx(i);
+ caleb_enable_tx(i);
+ netif_carrier_on(dev);
+ } else if (!status && netif_carrier_ok(dev)) {
+ printk(KERN_INFO "%s: NIC Link is Down\n", dev->name);
+
+ netif_carrier_off(dev);
+ caleb_disable_tx(i);
+ pm3386_disable_tx(i);
+ }
+ }
+
+ link_check_timer.expires = jiffies + HZ / 10;
+ add_timer(&link_check_timer);
+}
+
+static void enp2611_set_port_admin_status(int port, int up)
+{
+ if (up) {
+ caleb_enable_rx(port);
+
+ pm3386_set_carrier(port, 1);
+ pm3386_enable_rx(port);
+ } else {
+ caleb_disable_tx(port);
+ pm3386_disable_tx(port);
+ /* @@@ Flush out pending packets. */
+ pm3386_set_carrier(port, 0);
+
+ pm3386_disable_rx(port);
+ caleb_disable_rx(port);
+ }
+}
+
+static int __init enp2611_init_module(void)
+{
+ int ports;
+ int i;
+
+ if (!machine_is_enp2611())
+ return -ENODEV;
+
+ caleb_reset();
+ pm3386_reset();
+
+ ports = pm3386_port_count();
+ for (i = 0; i < ports; i++) {
+ nds[i] = ixpdev_alloc(i, sizeof(struct ixpdev_priv));
+ if (nds[i] == NULL) {
+ while (--i >= 0)
+ free_netdev(nds[i]);
+ return -ENOMEM;
+ }
+
+ pm3386_init_port(i);
+ pm3386_get_mac(i, nds[i]->dev_addr);
+ }
+
+ ixp2400_msf_init(&enp2611_msf_parameters);
+
+ if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) {
+ for (i = 0; i < ports; i++)
+ if (nds[i])
+ free_netdev(nds[i]);
+ return -EINVAL;
+ }
+
+ init_timer(&link_check_timer);
+ link_check_timer.function = enp2611_check_link_status;
+ link_check_timer.expires = jiffies;
+ add_timer(&link_check_timer);
+
+ return 0;
+}
+
+static void __exit enp2611_cleanup_module(void)
+{
+ int i;
+
+ del_timer_sync(&link_check_timer);
+
+ ixpdev_deinit();
+ for (i = 0; i < 3; i++)
+ free_netdev(nds[i]);
+}
+
+module_init(enp2611_init_module);
+module_exit(enp2611_cleanup_module);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c
new file mode 100644
index 000000000000..f5ffd7e05d26
--- /dev/null
+++ b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c
@@ -0,0 +1,212 @@
+/*
+ * Generic library functions for the MSF (Media and Switch Fabric) unit
+ * found on the Intel IXP2400 network processor.
+ *
+ * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Dedicated to Marija Kulikova.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of the
+ * License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <mach/hardware.h>
+#include <mach/ixp2000-regs.h>
+#include <asm/delay.h>
+#include <asm/io.h>
+#include "ixp2400-msf.h"
+
+/*
+ * This is the Intel recommended PLL init procedure as described on
+ * page 340 of the IXP2400/IXP2800 Programmer's Reference Manual.
+ */
+static void ixp2400_pll_init(struct ixp2400_msf_parameters *mp)
+{
+ int rx_dual_clock;
+ int tx_dual_clock;
+ u32 value;
+
+ /*
+ * If the RX mode is not 1x32, we have to enable both RX PLLs
+ * (#0 and #1.) The same thing for the TX direction.
+ */
+ rx_dual_clock = !!(mp->rx_mode & IXP2400_RX_MODE_WIDTH_MASK);
+ tx_dual_clock = !!(mp->tx_mode & IXP2400_TX_MODE_WIDTH_MASK);
+
+ /*
+ * Read initial value.
+ */
+ value = ixp2000_reg_read(IXP2000_MSF_CLK_CNTRL);
+
+ /*
+ * Put PLLs in powerdown and bypass mode.
+ */
+ value |= 0x0000f0f0;
+ ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
+
+ /*
+ * Set single or dual clock mode bits.
+ */
+ value &= ~0x03000000;
+ value |= (rx_dual_clock << 24) | (tx_dual_clock << 25);
+
+ /*
+ * Set multipliers.
+ */
+ value &= ~0x00ff0000;
+ value |= mp->rxclk01_multiplier << 16;
+ value |= mp->rxclk23_multiplier << 18;
+ value |= mp->txclk01_multiplier << 20;
+ value |= mp->txclk23_multiplier << 22;
+
+ /*
+ * And write value.
+ */
+ ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
+
+ /*
+ * Disable PLL bypass mode.
+ */
+ value &= ~(0x00005000 | rx_dual_clock << 13 | tx_dual_clock << 15);
+ ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
+
+ /*
+ * Turn on PLLs.
+ */
+ value &= ~(0x00000050 | rx_dual_clock << 5 | tx_dual_clock << 7);
+ ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
+
+ /*
+ * Wait for PLLs to lock. There are lock status bits, but IXP2400
+ * erratum #65 says that these lock bits should not be relied upon
+ * as they might not accurately reflect the true state of the PLLs.
+ */
+ udelay(100);
+}
+
+/*
+ * Needed according to p480 of Programmer's Reference Manual.
+ */
+static void ixp2400_msf_free_rbuf_entries(struct ixp2400_msf_parameters *mp)
+{
+ int size_bits;
+ int i;
+
+ /*
+ * Work around IXP2400 erratum #69 (silent RBUF-to-DRAM transfer
+ * corruption) in the Intel-recommended way: do not add the RBUF
+ * elements susceptible to corruption to the freelist.
+ */
+ size_bits = mp->rx_mode & IXP2400_RX_MODE_RBUF_SIZE_MASK;
+ if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_64) {
+ for (i = 1; i < 128; i++) {
+ if (i == 9 || i == 18 || i == 27)
+ continue;
+ ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
+ }
+ } else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_128) {
+ for (i = 1; i < 64; i++) {
+ if (i == 4 || i == 9 || i == 13)
+ continue;
+ ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
+ }
+ } else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_256) {
+ for (i = 1; i < 32; i++) {
+ if (i == 2 || i == 4 || i == 6)
+ continue;
+ ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
+ }
+ }
+}
+
+static u32 ixp2400_msf_valid_channels(u32 reg)
+{
+ u32 channels;
+
+ channels = 0;
+ switch (reg & IXP2400_RX_MODE_WIDTH_MASK) {
+ case IXP2400_RX_MODE_1x32:
+ channels = 0x1;
+ if (reg & IXP2400_RX_MODE_MPHY &&
+ !(reg & IXP2400_RX_MODE_MPHY_32))
+ channels = 0xf;
+ break;
+
+ case IXP2400_RX_MODE_2x16:
+ channels = 0x5;
+ break;
+
+ case IXP2400_RX_MODE_4x8:
+ channels = 0xf;
+ break;
+
+ case IXP2400_RX_MODE_1x16_2x8:
+ channels = 0xd;
+ break;
+ }
+
+ return channels;
+}
+
+static void ixp2400_msf_enable_rx(struct ixp2400_msf_parameters *mp)
+{
+ u32 value;
+
+ value = ixp2000_reg_read(IXP2000_MSF_RX_CONTROL) & 0x0fffffff;
+ value |= ixp2400_msf_valid_channels(mp->rx_mode) << 28;
+ ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, value);
+}
+
+static void ixp2400_msf_enable_tx(struct ixp2400_msf_parameters *mp)
+{
+ u32 value;
+
+ value = ixp2000_reg_read(IXP2000_MSF_TX_CONTROL) & 0x0fffffff;
+ value |= ixp2400_msf_valid_channels(mp->tx_mode) << 28;
+ ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, value);
+}
+
+
+void ixp2400_msf_init(struct ixp2400_msf_parameters *mp)
+{
+ u32 value;
+ int i;
+
+ /*
+ * Init the RX/TX PLLs based on the passed parameter block.
+ */
+ ixp2400_pll_init(mp);
+
+ /*
+ * Reset MSF. Bit 7 in IXP_RESET_0 resets the MSF.
+ */
+ value = ixp2000_reg_read(IXP2000_RESET0);
+ ixp2000_reg_write(IXP2000_RESET0, value | 0x80);
+ ixp2000_reg_write(IXP2000_RESET0, value & ~0x80);
+
+ /*
+ * Initialise the RX section.
+ */
+ ixp2000_reg_write(IXP2000_MSF_RX_MPHY_POLL_LIMIT, mp->rx_poll_ports - 1);
+ ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, mp->rx_mode);
+ for (i = 0; i < 4; i++) {
+ ixp2000_reg_write(IXP2000_MSF_RX_UP_CONTROL_0 + i,
+ mp->rx_channel_mode[i]);
+ }
+ ixp2400_msf_free_rbuf_entries(mp);
+ ixp2400_msf_enable_rx(mp);
+
+ /*
+ * Initialise the TX section.
+ */
+ ixp2000_reg_write(IXP2000_MSF_TX_MPHY_POLL_LIMIT, mp->tx_poll_ports - 1);
+ ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, mp->tx_mode);
+ for (i = 0; i < 4; i++) {
+ ixp2000_reg_write(IXP2000_MSF_TX_UP_CONTROL_0 + i,
+ mp->tx_channel_mode[i]);
+ }
+ ixp2400_msf_enable_tx(mp);
+}
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h
new file mode 100644
index 000000000000..3ac1af2771da
--- /dev/null
+++ b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h
@@ -0,0 +1,115 @@
+/*
+ * Generic library functions for the MSF (Media and Switch Fabric) unit
+ * found on the Intel IXP2400 network processor.
+ *
+ * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Dedicated to Marija Kulikova.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of the
+ * License, or (at your option) any later version.
+ */
+
+#ifndef __IXP2400_MSF_H
+#define __IXP2400_MSF_H
+
+struct ixp2400_msf_parameters
+{
+ u32 rx_mode;
+ unsigned rxclk01_multiplier:2;
+ unsigned rxclk23_multiplier:2;
+ unsigned rx_poll_ports:6;
+ u32 rx_channel_mode[4];
+
+ u32 tx_mode;
+ unsigned txclk01_multiplier:2;
+ unsigned txclk23_multiplier:2;
+ unsigned tx_poll_ports:6;
+ u32 tx_channel_mode[4];
+};
+
+void ixp2400_msf_init(struct ixp2400_msf_parameters *mp);
+
+#define IXP2400_PLL_MULTIPLIER_48 0x00
+#define IXP2400_PLL_MULTIPLIER_24 0x01
+#define IXP2400_PLL_MULTIPLIER_16 0x02
+#define IXP2400_PLL_MULTIPLIER_12 0x03
+
+#define IXP2400_RX_MODE_CSIX 0x00400000
+#define IXP2400_RX_MODE_UTOPIA_POS 0x00000000
+#define IXP2400_RX_MODE_WIDTH_MASK 0x00300000
+#define IXP2400_RX_MODE_1x16_2x8 0x00300000
+#define IXP2400_RX_MODE_4x8 0x00200000
+#define IXP2400_RX_MODE_2x16 0x00100000
+#define IXP2400_RX_MODE_1x32 0x00000000
+#define IXP2400_RX_MODE_MPHY 0x00080000
+#define IXP2400_RX_MODE_SPHY 0x00000000
+#define IXP2400_RX_MODE_MPHY_32 0x00040000
+#define IXP2400_RX_MODE_MPHY_4 0x00000000
+#define IXP2400_RX_MODE_MPHY_POLLED_STATUS 0x00020000
+#define IXP2400_RX_MODE_MPHY_DIRECT_STATUS 0x00000000
+#define IXP2400_RX_MODE_CBUS_FULL_DUPLEX 0x00010000
+#define IXP2400_RX_MODE_CBUS_SIMPLEX 0x00000000
+#define IXP2400_RX_MODE_MPHY_LEVEL2 0x00004000
+#define IXP2400_RX_MODE_MPHY_LEVEL3 0x00000000
+#define IXP2400_RX_MODE_CBUS_8BIT 0x00002000
+#define IXP2400_RX_MODE_CBUS_4BIT 0x00000000
+#define IXP2400_RX_MODE_CSIX_SINGLE_FREELIST 0x00000200
+#define IXP2400_RX_MODE_CSIX_SPLIT_FREELISTS 0x00000000
+#define IXP2400_RX_MODE_RBUF_SIZE_MASK 0x0000000c
+#define IXP2400_RX_MODE_RBUF_SIZE_256 0x00000008
+#define IXP2400_RX_MODE_RBUF_SIZE_128 0x00000004
+#define IXP2400_RX_MODE_RBUF_SIZE_64 0x00000000
+
+#define IXP2400_PORT_RX_MODE_SLAVE 0x00000040
+#define IXP2400_PORT_RX_MODE_MASTER 0x00000000
+#define IXP2400_PORT_RX_MODE_POS_PHY_L3 0x00000020
+#define IXP2400_PORT_RX_MODE_POS_PHY_L2 0x00000000
+#define IXP2400_PORT_RX_MODE_POS_PHY 0x00000010
+#define IXP2400_PORT_RX_MODE_UTOPIA 0x00000000
+#define IXP2400_PORT_RX_MODE_EVEN_PARITY 0x0000000c
+#define IXP2400_PORT_RX_MODE_ODD_PARITY 0x00000008
+#define IXP2400_PORT_RX_MODE_NO_PARITY 0x00000000
+#define IXP2400_PORT_RX_MODE_UTOPIA_BIG_CELLS 0x00000002
+#define IXP2400_PORT_RX_MODE_UTOPIA_NORMAL_CELLS 0x00000000
+#define IXP2400_PORT_RX_MODE_2_CYCLE_DECODE 0x00000001
+#define IXP2400_PORT_RX_MODE_1_CYCLE_DECODE 0x00000000
+
+#define IXP2400_TX_MODE_CSIX 0x00400000
+#define IXP2400_TX_MODE_UTOPIA_POS 0x00000000
+#define IXP2400_TX_MODE_WIDTH_MASK 0x00300000
+#define IXP2400_TX_MODE_1x16_2x8 0x00300000
+#define IXP2400_TX_MODE_4x8 0x00200000
+#define IXP2400_TX_MODE_2x16 0x00100000
+#define IXP2400_TX_MODE_1x32 0x00000000
+#define IXP2400_TX_MODE_MPHY 0x00080000
+#define IXP2400_TX_MODE_SPHY 0x00000000
+#define IXP2400_TX_MODE_MPHY_32 0x00040000
+#define IXP2400_TX_MODE_MPHY_4 0x00000000
+#define IXP2400_TX_MODE_MPHY_POLLED_STATUS 0x00020000
+#define IXP2400_TX_MODE_MPHY_DIRECT_STATUS 0x00000000
+#define IXP2400_TX_MODE_CBUS_FULL_DUPLEX 0x00010000
+#define IXP2400_TX_MODE_CBUS_SIMPLEX 0x00000000
+#define IXP2400_TX_MODE_MPHY_LEVEL2 0x00004000
+#define IXP2400_TX_MODE_MPHY_LEVEL3 0x00000000
+#define IXP2400_TX_MODE_CBUS_8BIT 0x00002000
+#define IXP2400_TX_MODE_CBUS_4BIT 0x00000000
+#define IXP2400_TX_MODE_TBUF_SIZE_MASK 0x0000000c
+#define IXP2400_TX_MODE_TBUF_SIZE_256 0x00000008
+#define IXP2400_TX_MODE_TBUF_SIZE_128 0x00000004
+#define IXP2400_TX_MODE_TBUF_SIZE_64 0x00000000
+
+#define IXP2400_PORT_TX_MODE_SLAVE 0x00000040
+#define IXP2400_PORT_TX_MODE_MASTER 0x00000000
+#define IXP2400_PORT_TX_MODE_POS_PHY 0x00000010
+#define IXP2400_PORT_TX_MODE_UTOPIA 0x00000000
+#define IXP2400_PORT_TX_MODE_EVEN_PARITY 0x0000000c
+#define IXP2400_PORT_TX_MODE_ODD_PARITY 0x00000008
+#define IXP2400_PORT_TX_MODE_NO_PARITY 0x00000000
+#define IXP2400_PORT_TX_MODE_UTOPIA_BIG_CELLS 0x00000002
+#define IXP2400_PORT_TX_MODE_2_CYCLE_DECODE 0x00000001
+#define IXP2400_PORT_TX_MODE_1_CYCLE_DECODE 0x00000000
+
+
+#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc
new file mode 100644
index 000000000000..42a73e357afa
--- /dev/null
+++ b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc
@@ -0,0 +1,408 @@
+/*
+ * RX ucode for the Intel IXP2400 in POS-PHY mode.
+ * Copyright (C) 2004, 2005 Lennert Buytenhek
+ * Dedicated to Marija Kulikova.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Assumptions made in this code:
+ * - The IXP2400 MSF is configured for POS-PHY mode, in a mode where
+ * only one full element list is used. This includes, for example,
+ * 1x32 SPHY and 1x32 MPHY32, but not 4x8 SPHY or 1x32 MPHY4. (This
+ * is not an exhaustive list.)
+ * - The RBUF uses 64-byte mpackets.
+ * - RX descriptors reside in SRAM, and have the following format:
+ * struct rx_desc
+ * {
+ * // to uengine
+ * u32 buf_phys_addr;
+ * u32 buf_length;
+ *
+ * // from uengine
+ * u32 channel;
+ * u32 pkt_length;
+ * };
+ * - Packet data resides in DRAM.
+ * - Packet buffer addresses are 8-byte aligned.
+ * - Scratch ring 0 is rx_pending.
+ * - Scratch ring 1 is rx_done, and has status condition 'full'.
+ * - The host triggers rx_done flush and rx_pending refill on seeing INTA.
+ * - This code is run on all eight threads of the microengine it runs on.
+ *
+ * Local memory is used for per-channel RX state.
+ */
+
+#define RX_THREAD_FREELIST_0 0x0030
+#define RBUF_ELEMENT_DONE 0x0044
+
+#define CHANNEL_FLAGS *l$index0[0]
+#define CHANNEL_FLAG_RECEIVING 1
+#define PACKET_LENGTH *l$index0[1]
+#define PACKET_CHECKSUM *l$index0[2]
+#define BUFFER_HANDLE *l$index0[3]
+#define BUFFER_START *l$index0[4]
+#define BUFFER_LENGTH *l$index0[5]
+
+#define CHANNEL_STATE_SIZE 24 // in bytes
+#define CHANNEL_STATE_SHIFT 5 // ceil(log2(state size))
+
+
+ .sig volatile sig1
+ .sig volatile sig2
+ .sig volatile sig3
+
+ .sig mpacket_arrived
+ .reg add_to_rx_freelist
+ .reg read $rsw0, $rsw1
+ .xfer_order $rsw0 $rsw1
+
+ .reg zero
+
+ /*
+ * Initialise add_to_rx_freelist.
+ */
+ .begin
+ .reg temp
+ .reg temp2
+
+ immed[add_to_rx_freelist, RX_THREAD_FREELIST_0]
+ immed_w1[add_to_rx_freelist, (&$rsw0 | (&mpacket_arrived << 12))]
+
+ local_csr_rd[ACTIVE_CTX_STS]
+ immed[temp, 0]
+ alu[temp2, temp, and, 0x1f]
+ alu_shf[add_to_rx_freelist, add_to_rx_freelist, or, temp2, <<20]
+ alu[temp2, temp, and, 0x80]
+ alu_shf[add_to_rx_freelist, add_to_rx_freelist, or, temp2, <<18]
+ .end
+
+ immed[zero, 0]
+
+ /*
+ * Skip context 0 initialisation?
+ */
+ .begin
+ br!=ctx[0, mpacket_receive_loop#]
+ .end
+
+ /*
+ * Initialise local memory.
+ */
+ .begin
+ .reg addr
+ .reg temp
+
+ immed[temp, 0]
+ init_local_mem_loop#:
+ alu_shf[addr, --, b, temp, <<CHANNEL_STATE_SHIFT]
+ local_csr_wr[ACTIVE_LM_ADDR_0, addr]
+ nop
+ nop
+ nop
+
+ immed[CHANNEL_FLAGS, 0]
+
+ alu[temp, temp, +, 1]
+ alu[--, temp, and, 0x20]
+ beq[init_local_mem_loop#]
+ .end
+
+ /*
+ * Initialise signal pipeline.
+ */
+ .begin
+ local_csr_wr[SAME_ME_SIGNAL, (&sig1 << 3)]
+ .set_sig sig1
+
+ local_csr_wr[SAME_ME_SIGNAL, (&sig2 << 3)]
+ .set_sig sig2
+
+ local_csr_wr[SAME_ME_SIGNAL, (&sig3 << 3)]
+ .set_sig sig3
+ .end
+
+mpacket_receive_loop#:
+ /*
+ * Synchronise and wait for mpacket.
+ */
+ .begin
+ ctx_arb[sig1]
+ local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig1 << 3))]
+
+ msf[fast_wr, --, add_to_rx_freelist, 0]
+ .set_sig mpacket_arrived
+ ctx_arb[mpacket_arrived]
+ .set $rsw0 $rsw1
+ .end
+
+ /*
+ * We halt if we see {inbparerr,parerr,null,soperror}.
+ */
+ .begin
+ alu_shf[--, 0x1b, and, $rsw0, >>8]
+ bne[abort_rswerr#]
+ .end
+
+ /*
+ * Point local memory pointer to this channel's state area.
+ */
+ .begin
+ .reg chanaddr
+
+ alu[chanaddr, $rsw0, and, 0x1f]
+ alu_shf[chanaddr, --, b, chanaddr, <<CHANNEL_STATE_SHIFT]
+ local_csr_wr[ACTIVE_LM_ADDR_0, chanaddr]
+ nop
+ nop
+ nop
+ .end
+
+ /*
+ * Check whether we received a SOP mpacket while we were already
+ * working on a packet, or a non-SOP mpacket while there was no
+ * packet pending. (SOP == RECEIVING -> abort) If everything's
+ * okay, update the RECEIVING flag to reflect our new state.
+ */
+ .begin
+ .reg temp
+ .reg eop
+
+ #if CHANNEL_FLAG_RECEIVING != 1
+ #error CHANNEL_FLAG_RECEIVING is not 1
+ #endif
+
+ alu_shf[temp, 1, and, $rsw0, >>15]
+ alu[temp, temp, xor, CHANNEL_FLAGS]
+ alu[--, temp, and, CHANNEL_FLAG_RECEIVING]
+ beq[abort_proterr#]
+
+ alu_shf[eop, 1, and, $rsw0, >>14]
+ alu[CHANNEL_FLAGS, temp, xor, eop]
+ .end
+
+ /*
+ * Copy the mpacket into the right spot, and in case of EOP,
+ * write back the descriptor and pass the packet on.
+ */
+ .begin
+ .reg buffer_offset
+ .reg _packet_length
+ .reg _packet_checksum
+ .reg _buffer_handle
+ .reg _buffer_start
+ .reg _buffer_length
+
+ /*
+ * Determine buffer_offset, _packet_length and
+ * _packet_checksum.
+ */
+ .begin
+ .reg temp
+
+ alu[--, 1, and, $rsw0, >>15]
+ beq[not_sop#]
+
+ immed[PACKET_LENGTH, 0]
+ immed[PACKET_CHECKSUM, 0]
+
+ not_sop#:
+ alu[buffer_offset, --, b, PACKET_LENGTH]
+ alu_shf[temp, 0xff, and, $rsw0, >>16]
+ alu[_packet_length, buffer_offset, +, temp]
+ alu[PACKET_LENGTH, --, b, _packet_length]
+
+ immed[temp, 0xffff]
+ alu[temp, $rsw1, and, temp]
+ alu[_packet_checksum, PACKET_CHECKSUM, +, temp]
+ alu[PACKET_CHECKSUM, --, b, _packet_checksum]
+ .end
+
+ /*
+ * Allocate buffer in case of SOP.
+ */
+ .begin
+ .reg temp
+
+ alu[temp, 1, and, $rsw0, >>15]
+ beq[skip_buffer_alloc#]
+
+ .begin
+ .sig zzz
+ .reg read $stemp $stemp2
+ .xfer_order $stemp $stemp2
+
+ rx_nobufs#:
+ scratch[get, $stemp, zero, 0, 1], ctx_swap[zzz]
+ alu[_buffer_handle, --, b, $stemp]
+ beq[rx_nobufs#]
+
+ sram[read, $stemp, _buffer_handle, 0, 2],
+ ctx_swap[zzz]
+ alu[_buffer_start, --, b, $stemp]
+ alu[_buffer_length, --, b, $stemp2]
+ .end
+
+ skip_buffer_alloc#:
+ .end
+
+ /*
+ * Resynchronise.
+ */
+ .begin
+ ctx_arb[sig2]
+ local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig2 << 3))]
+ .end
+
+ /*
+ * Synchronise buffer state.
+ */
+ .begin
+ .reg temp
+
+ alu[temp, 1, and, $rsw0, >>15]
+ beq[copy_from_local_mem#]
+
+ alu[BUFFER_HANDLE, --, b, _buffer_handle]
+ alu[BUFFER_START, --, b, _buffer_start]
+ alu[BUFFER_LENGTH, --, b, _buffer_length]
+ br[sync_state_done#]
+
+ copy_from_local_mem#:
+ alu[_buffer_handle, --, b, BUFFER_HANDLE]
+ alu[_buffer_start, --, b, BUFFER_START]
+ alu[_buffer_length, --, b, BUFFER_LENGTH]
+
+ sync_state_done#:
+ .end
+
+#if 0
+ /*
+ * Debug buffer state management.
+ */
+ .begin
+ .reg temp
+
+ alu[temp, 1, and, $rsw0, >>14]
+ beq[no_poison#]
+ immed[BUFFER_HANDLE, 0xdead]
+ immed[BUFFER_START, 0xdead]
+ immed[BUFFER_LENGTH, 0xdead]
+ no_poison#:
+
+ immed[temp, 0xdead]
+ alu[--, _buffer_handle, -, temp]
+ beq[state_corrupted#]
+ alu[--, _buffer_start, -, temp]
+ beq[state_corrupted#]
+ alu[--, _buffer_length, -, temp]
+ beq[state_corrupted#]
+ .end
+#endif
+
+ /*
+ * Check buffer length.
+ */
+ .begin
+ alu[--, _buffer_length, -, _packet_length]
+ blo[buffer_overflow#]
+ .end
+
+ /*
+ * Copy the mpacket and give back the RBUF element.
+ */
+ .begin
+ .reg element
+ .reg xfer_size
+ .reg temp
+ .sig copy_sig
+
+ alu_shf[element, 0x7f, and, $rsw0, >>24]
+ alu_shf[xfer_size, 0xff, and, $rsw0, >>16]
+
+ alu[xfer_size, xfer_size, -, 1]
+ alu_shf[xfer_size, 0x10, or, xfer_size, >>3]
+ alu_shf[temp, 0x10, or, xfer_size, <<21]
+ alu_shf[temp, temp, or, element, <<11]
+ alu_shf[--, temp, or, 1, <<18]
+
+ dram[rbuf_rd, --, _buffer_start, buffer_offset, max_8],
+ indirect_ref, sig_done[copy_sig]
+ ctx_arb[copy_sig]
+
+ alu[temp, RBUF_ELEMENT_DONE, or, element, <<16]
+ msf[fast_wr, --, temp, 0]
+ .end
+
+ /*
+ * If EOP, write back the packet descriptor.
+ */
+ .begin
+ .reg write $stemp $stemp2
+ .xfer_order $stemp $stemp2
+ .sig zzz
+
+ alu_shf[--, 1, and, $rsw0, >>14]
+ beq[no_writeback#]
+
+ alu[$stemp, $rsw0, and, 0x1f]
+ alu[$stemp2, --, b, _packet_length]
+ sram[write, $stemp, _buffer_handle, 8, 2], ctx_swap[zzz]
+
+ no_writeback#:
+ .end
+
+ /*
+ * Resynchronise.
+ */
+ .begin
+ ctx_arb[sig3]
+ local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig3 << 3))]
+ .end
+
+ /*
+ * If EOP, put the buffer back onto the scratch ring.
+ */
+ .begin
+ .reg write $stemp
+ .sig zzz
+
+ br_inp_state[SCR_Ring1_Status, rx_done_ring_overflow#]
+
+ alu_shf[--, 1, and, $rsw0, >>14]
+ beq[mpacket_receive_loop#]
+
+ alu[--, 1, and, $rsw0, >>10]
+ bne[rxerr#]
+
+ alu[$stemp, --, b, _buffer_handle]
+ scratch[put, $stemp, zero, 4, 1], ctx_swap[zzz]
+ cap[fast_wr, 0, XSCALE_INT_A]
+ br[mpacket_receive_loop#]
+
+ rxerr#:
+ alu[$stemp, --, b, _buffer_handle]
+ scratch[put, $stemp, zero, 0, 1], ctx_swap[zzz]
+ br[mpacket_receive_loop#]
+ .end
+ .end
+
+
+abort_rswerr#:
+ halt
+
+abort_proterr#:
+ halt
+
+state_corrupted#:
+ halt
+
+buffer_overflow#:
+ halt
+
+rx_done_ring_overflow#:
+ halt
+
+
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode
new file mode 100644
index 000000000000..e8aee2f81aad
--- /dev/null
+++ b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode
@@ -0,0 +1,130 @@
+static struct ixp2000_uengine_code ixp2400_rx =
+{
+ .cpu_model_bitmask = 0x000003fe,
+ .cpu_min_revision = 0,
+ .cpu_max_revision = 255,
+
+ .uengine_parameters = IXP2000_UENGINE_8_CONTEXTS |
+ IXP2000_UENGINE_PRN_UPDATE_EVERY |
+ IXP2000_UENGINE_NN_FROM_PREVIOUS |
+ IXP2000_UENGINE_ASSERT_EMPTY_AT_0 |
+ IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT |
+ IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT,
+
+ .initial_reg_values = (struct ixp2000_reg_value []) {
+ { -1, -1 }
+ },
+
+ .num_insns = 109,
+ .insns = (u8 []) {
+ 0xf0, 0x00, 0x0c, 0xc0, 0x05,
+ 0xf4, 0x44, 0x0c, 0x00, 0x05,
+ 0xfc, 0x04, 0x4c, 0x00, 0x00,
+ 0xf0, 0x00, 0x00, 0x3b, 0x00,
+ 0xb4, 0x40, 0xf0, 0x3b, 0x1f,
+ 0x8a, 0xc0, 0x50, 0x3e, 0x05,
+ 0xb4, 0x40, 0xf0, 0x3b, 0x80,
+ 0x9a, 0xe0, 0x00, 0x3e, 0x05,
+ 0xf0, 0x00, 0x00, 0x07, 0x00,
+ 0xd8, 0x05, 0xc0, 0x00, 0x11,
+ 0xf0, 0x00, 0x00, 0x0f, 0x00,
+ 0x91, 0xb0, 0x20, 0x0e, 0x00,
+ 0xfc, 0x06, 0x60, 0x0b, 0x00,
+ 0xf0, 0x00, 0x0c, 0x03, 0x00,
+ 0xf0, 0x00, 0x0c, 0x03, 0x00,
+ 0xf0, 0x00, 0x0c, 0x03, 0x00,
+ 0xf0, 0x00, 0x0c, 0x02, 0x00,
+ 0xb0, 0xc0, 0x30, 0x0f, 0x01,
+ 0xa4, 0x70, 0x00, 0x0f, 0x20,
+ 0xd8, 0x02, 0xc0, 0x01, 0x00,
+ 0xfc, 0x10, 0xac, 0x23, 0x08,
+ 0xfc, 0x10, 0xac, 0x43, 0x10,
+ 0xfc, 0x10, 0xac, 0x63, 0x18,
+ 0xe0, 0x00, 0x00, 0x00, 0x02,
+ 0xfc, 0x10, 0xae, 0x23, 0x88,
+ 0x3d, 0x00, 0x04, 0x03, 0x20,
+ 0xe0, 0x00, 0x00, 0x00, 0x10,
+ 0x84, 0x82, 0x02, 0x01, 0x3b,
+ 0xd8, 0x1a, 0x00, 0x01, 0x01,
+ 0xb4, 0x00, 0x8c, 0x7d, 0x80,
+ 0x91, 0xb0, 0x80, 0x22, 0x00,
+ 0xfc, 0x06, 0x60, 0x23, 0x00,
+ 0xf0, 0x00, 0x0c, 0x03, 0x00,
+ 0xf0, 0x00, 0x0c, 0x03, 0x00,
+ 0xf0, 0x00, 0x0c, 0x03, 0x00,
+ 0x94, 0xf0, 0x92, 0x01, 0x21,
+ 0xac, 0x40, 0x60, 0x26, 0x00,
+ 0xa4, 0x30, 0x0c, 0x04, 0x06,
+ 0xd8, 0x1a, 0x40, 0x01, 0x00,
+ 0x94, 0xe0, 0xa2, 0x01, 0x21,
+ 0xac, 0x20, 0x00, 0x28, 0x06,
+ 0x84, 0xf2, 0x02, 0x01, 0x21,
+ 0xd8, 0x0b, 0x40, 0x01, 0x00,
+ 0xf0, 0x00, 0x0c, 0x02, 0x01,
+ 0xf0, 0x00, 0x0c, 0x02, 0x02,
+ 0xa0, 0x00, 0x08, 0x04, 0x00,
+ 0x95, 0x00, 0xc6, 0x01, 0xff,
+ 0xa0, 0x80, 0x10, 0x30, 0x00,
+ 0xa0, 0x60, 0x1c, 0x00, 0x01,
+ 0xf0, 0x0f, 0xf0, 0x33, 0xff,
+ 0xb4, 0x00, 0xc0, 0x31, 0x81,
+ 0xb0, 0x80, 0xb0, 0x32, 0x02,
+ 0xa0, 0x20, 0x20, 0x2c, 0x00,
+ 0x94, 0xf0, 0xd2, 0x01, 0x21,
+ 0xd8, 0x0f, 0x40, 0x01, 0x00,
+ 0x19, 0x40, 0x10, 0x04, 0x20,
+ 0xa0, 0x00, 0x26, 0x04, 0x00,
+ 0xd8, 0x0d, 0xc0, 0x01, 0x00,
+ 0x00, 0x42, 0x10, 0x80, 0x02,
+ 0xb0, 0x00, 0x46, 0x04, 0x00,
+ 0xb0, 0x00, 0x56, 0x08, 0x00,
+ 0xe0, 0x00, 0x00, 0x00, 0x04,
+ 0xfc, 0x10, 0xae, 0x43, 0x90,
+ 0x84, 0xf0, 0x32, 0x01, 0x21,
+ 0xd8, 0x11, 0x40, 0x01, 0x00,
+ 0xa0, 0x60, 0x3c, 0x00, 0x02,
+ 0xa0, 0x20, 0x40, 0x10, 0x00,
+ 0xa0, 0x20, 0x50, 0x14, 0x00,
+ 0xd8, 0x12, 0x00, 0x00, 0x18,
+ 0xa0, 0x00, 0x28, 0x0c, 0x00,
+ 0xb0, 0x00, 0x48, 0x10, 0x00,
+ 0xb0, 0x00, 0x58, 0x14, 0x00,
+ 0xaa, 0xf0, 0x00, 0x14, 0x01,
+ 0xd8, 0x1a, 0xc0, 0x01, 0x05,
+ 0x85, 0x80, 0x42, 0x01, 0xff,
+ 0x95, 0x00, 0x66, 0x01, 0xff,
+ 0xba, 0xc0, 0x60, 0x1b, 0x01,
+ 0x9a, 0x30, 0x60, 0x19, 0x30,
+ 0x9a, 0xb0, 0x70, 0x1a, 0x30,
+ 0x9b, 0x50, 0x78, 0x1e, 0x04,
+ 0x8a, 0xe2, 0x08, 0x1e, 0x21,
+ 0x6a, 0x4e, 0x00, 0x13, 0x00,
+ 0xe0, 0x00, 0x00, 0x00, 0x30,
+ 0x9b, 0x00, 0x7a, 0x92, 0x04,
+ 0x3d, 0x00, 0x04, 0x1f, 0x20,
+ 0x84, 0xe2, 0x02, 0x01, 0x21,
+ 0xd8, 0x16, 0x80, 0x01, 0x00,
+ 0xa4, 0x18, 0x0c, 0x7d, 0x80,
+ 0xa0, 0x58, 0x1c, 0x00, 0x01,
+ 0x01, 0x42, 0x00, 0xa0, 0x02,
+ 0xe0, 0x00, 0x00, 0x00, 0x08,
+ 0xfc, 0x10, 0xae, 0x63, 0x98,
+ 0xd8, 0x1b, 0x00, 0xc2, 0x14,
+ 0x84, 0xe2, 0x02, 0x01, 0x21,
+ 0xd8, 0x05, 0xc0, 0x01, 0x00,
+ 0x84, 0xa2, 0x02, 0x01, 0x21,
+ 0xd8, 0x19, 0x40, 0x01, 0x01,
+ 0xa0, 0x58, 0x0c, 0x00, 0x02,
+ 0x1a, 0x40, 0x00, 0x04, 0x24,
+ 0x33, 0x00, 0x01, 0x2f, 0x20,
+ 0xd8, 0x05, 0xc0, 0x00, 0x18,
+ 0xa0, 0x58, 0x0c, 0x00, 0x02,
+ 0x1a, 0x40, 0x00, 0x04, 0x20,
+ 0xd8, 0x05, 0xc0, 0x00, 0x18,
+ 0xe0, 0x00, 0x02, 0x00, 0x00,
+ 0xe0, 0x00, 0x02, 0x00, 0x00,
+ 0xe0, 0x00, 0x02, 0x00, 0x00,
+ 0xe0, 0x00, 0x02, 0x00, 0x00,
+ 0xe0, 0x00, 0x02, 0x00, 0x00,
+ }
+};
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc
new file mode 100644
index 000000000000..d090d1884fb7
--- /dev/null
+++ b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc
@@ -0,0 +1,272 @@
+/*
+ * TX ucode for the Intel IXP2400 in POS-PHY mode.
+ * Copyright (C) 2004, 2005 Lennert Buytenhek
+ * Dedicated to Marija Kulikova.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Assumptions made in this code:
+ * - The IXP2400 MSF is configured for POS-PHY mode, in a mode where
+ * only one TBUF partition is used. This includes, for example,
+ * 1x32 SPHY and 1x32 MPHY32, but not 4x8 SPHY or 1x32 MPHY4. (This
+ * is not an exhaustive list.)
+ * - The TBUF uses 64-byte mpackets.
+ * - TX descriptors reside in SRAM, and have the following format:
+ * struct tx_desc
+ * {
+ * // to uengine
+ * u32 buf_phys_addr;
+ * u32 pkt_length;
+ * u32 channel;
+ * };
+ * - Packet data resides in DRAM.
+ * - Packet buffer addresses are 8-byte aligned.
+ * - Scratch ring 2 is tx_pending.
+ * - Scratch ring 3 is tx_done, and has status condition 'full'.
+ * - This code is run on all eight threads of the microengine it runs on.
+ */
+
+#define TX_SEQUENCE_0 0x0060
+#define TBUF_CTRL 0x1800
+
+#define PARTITION_SIZE 128
+#define PARTITION_THRESH 96
+
+
+ .sig volatile sig1
+ .sig volatile sig2
+ .sig volatile sig3
+
+ .reg @old_tx_seq_0
+ .reg @mpkts_in_flight
+ .reg @next_tbuf_mpacket
+
+ .reg @buffer_handle
+ .reg @buffer_start
+ .reg @packet_length
+ .reg @channel
+ .reg @packet_offset
+
+ .reg zero
+
+ immed[zero, 0]
+
+ /*
+ * Skip context 0 initialisation?
+ */
+ .begin
+ br!=ctx[0, mpacket_tx_loop#]
+ .end
+
+ /*
+ * Wait until all pending TBUF elements have been transmitted.
+ */
+ .begin
+ .reg read $tx
+ .sig zzz
+
+ loop_empty#:
+ msf[read, $tx, zero, TX_SEQUENCE_0, 1], ctx_swap[zzz]
+ alu_shf[--, --, b, $tx, >>31]
+ beq[loop_empty#]
+
+ alu[@old_tx_seq_0, --, b, $tx]
+ .end
+
+ immed[@mpkts_in_flight, 0]
+ alu[@next_tbuf_mpacket, @old_tx_seq_0, and, (PARTITION_SIZE - 1)]
+
+ immed[@buffer_handle, 0]
+
+ /*
+ * Initialise signal pipeline.
+ */
+ .begin
+ local_csr_wr[SAME_ME_SIGNAL, (&sig1 << 3)]
+ .set_sig sig1
+
+ local_csr_wr[SAME_ME_SIGNAL, (&sig2 << 3)]
+ .set_sig sig2
+
+ local_csr_wr[SAME_ME_SIGNAL, (&sig3 << 3)]
+ .set_sig sig3
+ .end
+
+mpacket_tx_loop#:
+ .begin
+ .reg tbuf_element_index
+ .reg buffer_handle
+ .reg sop_eop
+ .reg packet_data
+ .reg channel
+ .reg mpacket_size
+
+ /*
+ * If there is no packet currently being transmitted,
+ * dequeue the next TX descriptor, and fetch the buffer
+ * address, packet length and destination channel number.
+ */
+ .begin
+ .reg read $stemp $stemp2 $stemp3
+ .xfer_order $stemp $stemp2 $stemp3
+ .sig zzz
+
+ ctx_arb[sig1]
+
+ alu[--, --, b, @buffer_handle]
+ bne[already_got_packet#]
+
+ tx_nobufs#:
+ scratch[get, $stemp, zero, 8, 1], ctx_swap[zzz]
+ alu[@buffer_handle, --, b, $stemp]
+ beq[tx_nobufs#]
+
+ sram[read, $stemp, $stemp, 0, 3], ctx_swap[zzz]
+ alu[@buffer_start, --, b, $stemp]
+ alu[@packet_length, --, b, $stemp2]
+ beq[zero_byte_packet#]
+ alu[@channel, --, b, $stemp3]
+ immed[@packet_offset, 0]
+
+ already_got_packet#:
+ local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig1 << 3))]
+ .end
+
+ /*
+ * Determine tbuf element index, SOP/EOP flags, mpacket
+ * offset and mpacket size and cache buffer_handle and
+ * channel number.
+ */
+ .begin
+ alu[tbuf_element_index, --, b, @next_tbuf_mpacket]
+ alu[@next_tbuf_mpacket, @next_tbuf_mpacket, +, 1]
+ alu[@next_tbuf_mpacket, @next_tbuf_mpacket, and,
+ (PARTITION_SIZE - 1)]
+
+ alu[buffer_handle, --, b, @buffer_handle]
+ immed[@buffer_handle, 0]
+
+ immed[sop_eop, 1]
+
+ alu[packet_data, --, b, @packet_offset]
+ bne[no_sop#]
+ alu[sop_eop, sop_eop, or, 2]
+ no_sop#:
+ alu[packet_data, packet_data, +, @buffer_start]
+
+ alu[channel, --, b, @channel]
+
+ alu[mpacket_size, @packet_length, -, @packet_offset]
+ alu[--, 64, -, mpacket_size]
+ bhs[eop#]
+ alu[@buffer_handle, --, b, buffer_handle]
+ immed[mpacket_size, 64]
+ alu[sop_eop, sop_eop, and, 2]
+ eop#:
+
+ alu[@packet_offset, @packet_offset, +, mpacket_size]
+ .end
+
+ /*
+ * Wait until there's enough space in the TBUF.
+ */
+ .begin
+ .reg read $tx
+ .reg temp
+ .sig zzz
+
+ ctx_arb[sig2]
+
+ br[test_space#]
+
+ loop_space#:
+ msf[read, $tx, zero, TX_SEQUENCE_0, 1], ctx_swap[zzz]
+
+ alu[temp, $tx, -, @old_tx_seq_0]
+ alu[temp, temp, and, 0xff]
+ alu[@mpkts_in_flight, @mpkts_in_flight, -, temp]
+
+ alu[@old_tx_seq_0, --, b, $tx]
+
+ test_space#:
+ alu[--, PARTITION_THRESH, -, @mpkts_in_flight]
+ blo[loop_space#]
+
+ alu[@mpkts_in_flight, @mpkts_in_flight, +, 1]
+
+ local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig2 << 3))]
+ .end
+
+ /*
+ * Copy the packet data to the TBUF.
+ */
+ .begin
+ .reg temp
+ .sig copy_sig
+
+ alu[temp, mpacket_size, -, 1]
+ alu_shf[temp, 0x10, or, temp, >>3]
+ alu_shf[temp, 0x10, or, temp, <<21]
+ alu_shf[temp, temp, or, tbuf_element_index, <<11]
+ alu_shf[--, temp, or, 1, <<18]
+
+ dram[tbuf_wr, --, packet_data, 0, max_8],
+ indirect_ref, sig_done[copy_sig]
+ ctx_arb[copy_sig]
+ .end
+
+ /*
+ * Mark TBUF element as ready-to-be-transmitted.
+ */
+ .begin
+ .reg write $tsw $tsw2
+ .xfer_order $tsw $tsw2
+ .reg temp
+ .sig zzz
+
+ alu_shf[temp, channel, or, mpacket_size, <<24]
+ alu_shf[$tsw, temp, or, sop_eop, <<8]
+ immed[$tsw2, 0]
+
+ immed[temp, TBUF_CTRL]
+ alu_shf[temp, temp, or, tbuf_element_index, <<3]
+ msf[write, $tsw, temp, 0, 2], ctx_swap[zzz]
+ .end
+
+ /*
+ * Resynchronise.
+ */
+ .begin
+ ctx_arb[sig3]
+ local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig3 << 3))]
+ .end
+
+ /*
+ * If this was an EOP mpacket, recycle the TX buffer
+ * and signal the host.
+ */
+ .begin
+ .reg write $stemp
+ .sig zzz
+
+ alu[--, sop_eop, and, 1]
+ beq[mpacket_tx_loop#]
+
+ tx_done_ring_full#:
+ br_inp_state[SCR_Ring3_Status, tx_done_ring_full#]
+
+ alu[$stemp, --, b, buffer_handle]
+ scratch[put, $stemp, zero, 12, 1], ctx_swap[zzz]
+ cap[fast_wr, 0, XSCALE_INT_A]
+ br[mpacket_tx_loop#]
+ .end
+ .end
+
+
+zero_byte_packet#:
+ halt
+
+
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode
new file mode 100644
index 000000000000..a433e24b0a51
--- /dev/null
+++ b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode
@@ -0,0 +1,98 @@
+static struct ixp2000_uengine_code ixp2400_tx =
+{
+ .cpu_model_bitmask = 0x000003fe,
+ .cpu_min_revision = 0,
+ .cpu_max_revision = 255,
+
+ .uengine_parameters = IXP2000_UENGINE_8_CONTEXTS |
+ IXP2000_UENGINE_PRN_UPDATE_EVERY |
+ IXP2000_UENGINE_NN_FROM_PREVIOUS |
+ IXP2000_UENGINE_ASSERT_EMPTY_AT_0 |
+ IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT |
+ IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT,
+
+ .initial_reg_values = (struct ixp2000_reg_value []) {
+ { -1, -1 }
+ },
+
+ .num_insns = 77,
+ .insns = (u8 []) {
+ 0xf0, 0x00, 0x00, 0x07, 0x00,
+ 0xd8, 0x03, 0x00, 0x00, 0x11,
+ 0x3c, 0x40, 0x00, 0x04, 0xe0,
+ 0x81, 0xf2, 0x02, 0x01, 0x00,
+ 0xd8, 0x00, 0x80, 0x01, 0x00,
+ 0xb0, 0x08, 0x06, 0x00, 0x00,
+ 0xf0, 0x00, 0x0c, 0x00, 0x80,
+ 0xb4, 0x49, 0x02, 0x03, 0x7f,
+ 0xf0, 0x00, 0x02, 0x83, 0x00,
+ 0xfc, 0x10, 0xac, 0x23, 0x08,
+ 0xfc, 0x10, 0xac, 0x43, 0x10,
+ 0xfc, 0x10, 0xac, 0x63, 0x18,
+ 0xe0, 0x00, 0x00, 0x00, 0x02,
+ 0xa0, 0x30, 0x02, 0x80, 0x00,
+ 0xd8, 0x06, 0x00, 0x01, 0x01,
+ 0x19, 0x40, 0x00, 0x04, 0x28,
+ 0xb0, 0x0a, 0x06, 0x00, 0x00,
+ 0xd8, 0x03, 0xc0, 0x01, 0x00,
+ 0x00, 0x44, 0x00, 0x80, 0x80,
+ 0xa0, 0x09, 0x06, 0x00, 0x00,
+ 0xb0, 0x0b, 0x06, 0x04, 0x00,
+ 0xd8, 0x13, 0x00, 0x01, 0x00,
+ 0xb0, 0x0c, 0x06, 0x08, 0x00,
+ 0xf0, 0x00, 0x0c, 0x00, 0xa0,
+ 0xfc, 0x10, 0xae, 0x23, 0x88,
+ 0xa0, 0x00, 0x12, 0x40, 0x00,
+ 0xb0, 0xc9, 0x02, 0x43, 0x01,
+ 0xb4, 0x49, 0x02, 0x43, 0x7f,
+ 0xb0, 0x00, 0x22, 0x80, 0x00,
+ 0xf0, 0x00, 0x02, 0x83, 0x00,
+ 0xf0, 0x00, 0x0c, 0x04, 0x02,
+ 0xb0, 0x40, 0x6c, 0x00, 0xa0,
+ 0xd8, 0x08, 0x80, 0x01, 0x01,
+ 0xaa, 0x00, 0x2c, 0x08, 0x02,
+ 0xa0, 0xc0, 0x30, 0x18, 0x90,
+ 0xa0, 0x00, 0x43, 0x00, 0x00,
+ 0xba, 0xc0, 0x32, 0xc0, 0xa0,
+ 0xaa, 0xb0, 0x00, 0x0f, 0x40,
+ 0xd8, 0x0a, 0x80, 0x01, 0x04,
+ 0xb0, 0x0a, 0x00, 0x08, 0x00,
+ 0xf0, 0x00, 0x00, 0x0f, 0x40,
+ 0xa4, 0x00, 0x2c, 0x08, 0x02,
+ 0xa0, 0x8a, 0x00, 0x0c, 0xa0,
+ 0xe0, 0x00, 0x00, 0x00, 0x04,
+ 0xd8, 0x0c, 0x80, 0x00, 0x18,
+ 0x3c, 0x40, 0x00, 0x04, 0xe0,
+ 0xba, 0x80, 0x42, 0x01, 0x80,
+ 0xb4, 0x40, 0x40, 0x13, 0xff,
+ 0xaa, 0x88, 0x00, 0x10, 0x80,
+ 0xb0, 0x08, 0x06, 0x00, 0x00,
+ 0xaa, 0xf0, 0x0d, 0x80, 0x80,
+ 0xd8, 0x0b, 0x40, 0x01, 0x05,
+ 0xa0, 0x88, 0x0c, 0x04, 0x80,
+ 0xfc, 0x10, 0xae, 0x43, 0x90,
+ 0xba, 0xc0, 0x50, 0x0f, 0x01,
+ 0x9a, 0x30, 0x50, 0x15, 0x30,
+ 0x9a, 0xb0, 0x50, 0x16, 0x30,
+ 0x9b, 0x50, 0x58, 0x16, 0x01,
+ 0x8a, 0xe2, 0x08, 0x16, 0x21,
+ 0x6b, 0x4e, 0x00, 0x83, 0x03,
+ 0xe0, 0x00, 0x00, 0x00, 0x30,
+ 0x9a, 0x80, 0x70, 0x0e, 0x04,
+ 0x8b, 0x88, 0x08, 0x1e, 0x02,
+ 0xf0, 0x00, 0x0c, 0x01, 0x81,
+ 0xf0, 0x01, 0x80, 0x1f, 0x00,
+ 0x9b, 0xd0, 0x78, 0x1e, 0x01,
+ 0x3d, 0x42, 0x00, 0x1c, 0x20,
+ 0xe0, 0x00, 0x00, 0x00, 0x08,
+ 0xfc, 0x10, 0xae, 0x63, 0x98,
+ 0xa4, 0x30, 0x0c, 0x04, 0x02,
+ 0xd8, 0x03, 0x00, 0x01, 0x00,
+ 0xd8, 0x11, 0xc1, 0x42, 0x14,
+ 0xa0, 0x18, 0x00, 0x08, 0x00,
+ 0x1a, 0x40, 0x00, 0x04, 0x2c,
+ 0x33, 0x00, 0x01, 0x2f, 0x20,
+ 0xd8, 0x03, 0x00, 0x00, 0x18,
+ 0xe0, 0x00, 0x02, 0x00, 0x00,
+ }
+};
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev.c b/drivers/net/ethernet/xscale/ixp2000/ixpdev.c
new file mode 100644
index 000000000000..e122493ab70e
--- /dev/null
+++ b/drivers/net/ethernet/xscale/ixp2000/ixpdev.c
@@ -0,0 +1,440 @@
+/*
+ * IXP2000 MSF network device driver
+ * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Dedicated to Marija Kulikova.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/gfp.h>
+#include <asm/hardware/uengine.h>
+#include <asm/io.h>
+#include "ixp2400_rx.ucode"
+#include "ixp2400_tx.ucode"
+#include "ixpdev_priv.h"
+#include "ixpdev.h"
+#include "pm3386.h"
+
+#define DRV_MODULE_VERSION "0.2"
+
+static int nds_count;
+static struct net_device **nds;
+static int nds_open;
+static void (*set_port_admin_status)(int port, int up);
+
+static struct ixpdev_rx_desc * const rx_desc =
+ (struct ixpdev_rx_desc *)(IXP2000_SRAM0_VIRT_BASE + RX_BUF_DESC_BASE);
+static struct ixpdev_tx_desc * const tx_desc =
+ (struct ixpdev_tx_desc *)(IXP2000_SRAM0_VIRT_BASE + TX_BUF_DESC_BASE);
+static int tx_pointer;
+
+
+static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ixpdev_priv *ip = netdev_priv(dev);
+ struct ixpdev_tx_desc *desc;
+ int entry;
+ unsigned long flags;
+
+ if (unlikely(skb->len > PAGE_SIZE)) {
+ /* @@@ Count drops. */
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ entry = tx_pointer;
+ tx_pointer = (tx_pointer + 1) % TX_BUF_COUNT;
+
+ desc = tx_desc + entry;
+ desc->pkt_length = skb->len;
+ desc->channel = ip->channel;
+
+ skb_copy_and_csum_dev(skb, phys_to_virt(desc->buf_addr));
+ dev_kfree_skb(skb);
+
+ ixp2000_reg_write(RING_TX_PENDING,
+ TX_BUF_DESC_BASE + (entry * sizeof(struct ixpdev_tx_desc)));
+
+ local_irq_save(flags);
+ ip->tx_queue_entries++;
+ if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
+ netif_stop_queue(dev);
+ local_irq_restore(flags);
+
+ return NETDEV_TX_OK;
+}
+
+
+static int ixpdev_rx(struct net_device *dev, int processed, int budget)
+{
+ while (processed < budget) {
+ struct ixpdev_rx_desc *desc;
+ struct sk_buff *skb;
+ void *buf;
+ u32 _desc;
+
+ _desc = ixp2000_reg_read(RING_RX_DONE);
+ if (_desc == 0)
+ return 0;
+
+ desc = rx_desc +
+ ((_desc - RX_BUF_DESC_BASE) / sizeof(struct ixpdev_rx_desc));
+ buf = phys_to_virt(desc->buf_addr);
+
+ if (desc->pkt_length < 4 || desc->pkt_length > PAGE_SIZE) {
+ printk(KERN_ERR "ixp2000: rx err, length %d\n",
+ desc->pkt_length);
+ goto err;
+ }
+
+ if (desc->channel < 0 || desc->channel >= nds_count) {
+ printk(KERN_ERR "ixp2000: rx err, channel %d\n",
+ desc->channel);
+ goto err;
+ }
+
+ /* @@@ Make FCS stripping configurable. */
+ desc->pkt_length -= 4;
+
+ if (unlikely(!netif_running(nds[desc->channel])))
+ goto err;
+
+ skb = netdev_alloc_skb_ip_align(dev, desc->pkt_length);
+ if (likely(skb != NULL)) {
+ skb_copy_to_linear_data(skb, buf, desc->pkt_length);
+ skb_put(skb, desc->pkt_length);
+ skb->protocol = eth_type_trans(skb, nds[desc->channel]);
+
+ netif_receive_skb(skb);
+ }
+
+err:
+ ixp2000_reg_write(RING_RX_PENDING, _desc);
+ processed++;
+ }
+
+ return processed;
+}
+
+/* dev always points to nds[0]. */
+static int ixpdev_poll(struct napi_struct *napi, int budget)
+{
+ struct ixpdev_priv *ip = container_of(napi, struct ixpdev_priv, napi);
+ struct net_device *dev = ip->dev;
+ int rx;
+
+ rx = 0;
+ do {
+ ixp2000_reg_write(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0x00ff);
+
+ rx = ixpdev_rx(dev, rx, budget);
+ if (rx >= budget)
+ break;
+ } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff);
+
+ napi_complete(napi);
+ ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff);
+
+ return rx;
+}
+
+static void ixpdev_tx_complete(void)
+{
+ int channel;
+ u32 wake;
+
+ wake = 0;
+ while (1) {
+ struct ixpdev_priv *ip;
+ u32 desc;
+ int entry;
+
+ desc = ixp2000_reg_read(RING_TX_DONE);
+ if (desc == 0)
+ break;
+
+ /* @@@ Check whether entries come back in order. */
+ entry = (desc - TX_BUF_DESC_BASE) / sizeof(struct ixpdev_tx_desc);
+ channel = tx_desc[entry].channel;
+
+ if (channel < 0 || channel >= nds_count) {
+ printk(KERN_ERR "ixp2000: txcomp channel index "
+ "out of bounds (%d, %.8i, %d)\n",
+ channel, (unsigned int)desc, entry);
+ continue;
+ }
+
+ ip = netdev_priv(nds[channel]);
+ if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
+ wake |= 1 << channel;
+ ip->tx_queue_entries--;
+ }
+
+ for (channel = 0; wake != 0; channel++) {
+ if (wake & (1 << channel)) {
+ netif_wake_queue(nds[channel]);
+ wake &= ~(1 << channel);
+ }
+ }
+}
+
+static irqreturn_t ixpdev_interrupt(int irq, void *dev_id)
+{
+ u32 status;
+
+ status = ixp2000_reg_read(IXP2000_IRQ_THD_STATUS_A_0);
+ if (status == 0)
+ return IRQ_NONE;
+
+ /*
+ * Any of the eight receive units signaled RX?
+ */
+ if (status & 0x00ff) {
+ struct net_device *dev = nds[0];
+ struct ixpdev_priv *ip = netdev_priv(dev);
+
+ ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff);
+ if (likely(napi_schedule_prep(&ip->napi))) {
+ __napi_schedule(&ip->napi);
+ } else {
+ printk(KERN_CRIT "ixp2000: irq while polling!!\n");
+ }
+ }
+
+ /*
+ * Any of the eight transmit units signaled TXdone?
+ */
+ if (status & 0xff00) {
+ ixp2000_reg_wrb(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0xff00);
+ ixpdev_tx_complete();
+ }
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void ixpdev_poll_controller(struct net_device *dev)
+{
+ disable_irq(IRQ_IXP2000_THDA0);
+ ixpdev_interrupt(IRQ_IXP2000_THDA0, dev);
+ enable_irq(IRQ_IXP2000_THDA0);
+}
+#endif
+
+static int ixpdev_open(struct net_device *dev)
+{
+ struct ixpdev_priv *ip = netdev_priv(dev);
+ int err;
+
+ napi_enable(&ip->napi);
+ if (!nds_open++) {
+ err = request_irq(IRQ_IXP2000_THDA0, ixpdev_interrupt,
+ IRQF_SHARED, "ixp2000_eth", nds);
+ if (err) {
+ nds_open--;
+ napi_disable(&ip->napi);
+ return err;
+ }
+
+ ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0xffff);
+ }
+
+ set_port_admin_status(ip->channel, 1);
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int ixpdev_close(struct net_device *dev)
+{
+ struct ixpdev_priv *ip = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ napi_disable(&ip->napi);
+ set_port_admin_status(ip->channel, 0);
+
+ if (!--nds_open) {
+ ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0xffff);
+ free_irq(IRQ_IXP2000_THDA0, nds);
+ }
+
+ return 0;
+}
+
+static struct net_device_stats *ixpdev_get_stats(struct net_device *dev)
+{
+ struct ixpdev_priv *ip = netdev_priv(dev);
+
+ pm3386_get_stats(ip->channel, &(dev->stats));
+
+ return &(dev->stats);
+}
+
+static const struct net_device_ops ixpdev_netdev_ops = {
+ .ndo_open = ixpdev_open,
+ .ndo_stop = ixpdev_close,
+ .ndo_start_xmit = ixpdev_xmit,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_get_stats = ixpdev_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ixpdev_poll_controller,
+#endif
+};
+
+struct net_device *ixpdev_alloc(int channel, int sizeof_priv)
+{
+ struct net_device *dev;
+ struct ixpdev_priv *ip;
+
+ dev = alloc_etherdev(sizeof_priv);
+ if (dev == NULL)
+ return NULL;
+
+ dev->netdev_ops = &ixpdev_netdev_ops;
+
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+
+ ip = netdev_priv(dev);
+ ip->dev = dev;
+ netif_napi_add(dev, &ip->napi, ixpdev_poll, 64);
+ ip->channel = channel;
+ ip->tx_queue_entries = 0;
+
+ return dev;
+}
+
+int ixpdev_init(int __nds_count, struct net_device **__nds,
+ void (*__set_port_admin_status)(int port, int up))
+{
+ int i;
+ int err;
+
+ BUILD_BUG_ON(RX_BUF_COUNT > 192 || TX_BUF_COUNT > 192);
+
+ printk(KERN_INFO "IXP2000 MSF ethernet driver %s\n", DRV_MODULE_VERSION);
+
+ nds_count = __nds_count;
+ nds = __nds;
+ set_port_admin_status = __set_port_admin_status;
+
+ for (i = 0; i < RX_BUF_COUNT; i++) {
+ void *buf;
+
+ buf = (void *)get_zeroed_page(GFP_KERNEL);
+ if (buf == NULL) {
+ err = -ENOMEM;
+ while (--i >= 0)
+ free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
+ goto err_out;
+ }
+ rx_desc[i].buf_addr = virt_to_phys(buf);
+ rx_desc[i].buf_length = PAGE_SIZE;
+ }
+
+ /* @@@ Maybe we shouldn't be preallocating TX buffers. */
+ for (i = 0; i < TX_BUF_COUNT; i++) {
+ void *buf;
+
+ buf = (void *)get_zeroed_page(GFP_KERNEL);
+ if (buf == NULL) {
+ err = -ENOMEM;
+ while (--i >= 0)
+ free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
+ goto err_free_rx;
+ }
+ tx_desc[i].buf_addr = virt_to_phys(buf);
+ }
+
+ /* 256 entries, ring status set means 'empty', base address 0x0000. */
+ ixp2000_reg_write(RING_RX_PENDING_BASE, 0x44000000);
+ ixp2000_reg_write(RING_RX_PENDING_HEAD, 0x00000000);
+ ixp2000_reg_write(RING_RX_PENDING_TAIL, 0x00000000);
+
+ /* 256 entries, ring status set means 'full', base address 0x0400. */
+ ixp2000_reg_write(RING_RX_DONE_BASE, 0x40000400);
+ ixp2000_reg_write(RING_RX_DONE_HEAD, 0x00000000);
+ ixp2000_reg_write(RING_RX_DONE_TAIL, 0x00000000);
+
+ for (i = 0; i < RX_BUF_COUNT; i++) {
+ ixp2000_reg_write(RING_RX_PENDING,
+ RX_BUF_DESC_BASE + (i * sizeof(struct ixpdev_rx_desc)));
+ }
+
+ ixp2000_uengine_load(0, &ixp2400_rx);
+ ixp2000_uengine_start_contexts(0, 0xff);
+
+ /* 256 entries, ring status set means 'empty', base address 0x0800. */
+ ixp2000_reg_write(RING_TX_PENDING_BASE, 0x44000800);
+ ixp2000_reg_write(RING_TX_PENDING_HEAD, 0x00000000);
+ ixp2000_reg_write(RING_TX_PENDING_TAIL, 0x00000000);
+
+ /* 256 entries, ring status set means 'full', base address 0x0c00. */
+ ixp2000_reg_write(RING_TX_DONE_BASE, 0x40000c00);
+ ixp2000_reg_write(RING_TX_DONE_HEAD, 0x00000000);
+ ixp2000_reg_write(RING_TX_DONE_TAIL, 0x00000000);
+
+ ixp2000_uengine_load(1, &ixp2400_tx);
+ ixp2000_uengine_start_contexts(1, 0xff);
+
+ for (i = 0; i < nds_count; i++) {
+ err = register_netdev(nds[i]);
+ if (err) {
+ while (--i >= 0)
+ unregister_netdev(nds[i]);
+ goto err_free_tx;
+ }
+ }
+
+ for (i = 0; i < nds_count; i++) {
+ printk(KERN_INFO "%s: IXP2000 MSF ethernet (port %d), "
+ "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x.\n", nds[i]->name, i,
+ nds[i]->dev_addr[0], nds[i]->dev_addr[1],
+ nds[i]->dev_addr[2], nds[i]->dev_addr[3],
+ nds[i]->dev_addr[4], nds[i]->dev_addr[5]);
+ }
+
+ return 0;
+
+err_free_tx:
+ for (i = 0; i < TX_BUF_COUNT; i++)
+ free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
+
+err_free_rx:
+ for (i = 0; i < RX_BUF_COUNT; i++)
+ free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
+
+err_out:
+ return err;
+}
+
+void ixpdev_deinit(void)
+{
+ int i;
+
+ /* @@@ Flush out pending packets. */
+
+ for (i = 0; i < nds_count; i++)
+ unregister_netdev(nds[i]);
+
+ ixp2000_uengine_stop_contexts(1, 0xff);
+ ixp2000_uengine_stop_contexts(0, 0xff);
+ ixp2000_uengine_reset(0x3);
+
+ for (i = 0; i < TX_BUF_COUNT; i++)
+ free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
+
+ for (i = 0; i < RX_BUF_COUNT; i++)
+ free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
+}
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev.h b/drivers/net/ethernet/xscale/ixp2000/ixpdev.h
new file mode 100644
index 000000000000..391ece623243
--- /dev/null
+++ b/drivers/net/ethernet/xscale/ixp2000/ixpdev.h
@@ -0,0 +1,29 @@
+/*
+ * IXP2000 MSF network device driver
+ * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Dedicated to Marija Kulikova.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __IXPDEV_H
+#define __IXPDEV_H
+
+struct ixpdev_priv
+{
+ struct net_device *dev;
+ struct napi_struct napi;
+ int channel;
+ int tx_queue_entries;
+};
+
+struct net_device *ixpdev_alloc(int channel, int sizeof_priv);
+int ixpdev_init(int num_ports, struct net_device **nds,
+ void (*set_port_admin_status)(int port, int up));
+void ixpdev_deinit(void);
+
+
+#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h b/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h
new file mode 100644
index 000000000000..86aa08ea0c33
--- /dev/null
+++ b/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h
@@ -0,0 +1,57 @@
+/*
+ * IXP2000 MSF network device driver
+ * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Dedicated to Marija Kulikova.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __IXPDEV_PRIV_H
+#define __IXPDEV_PRIV_H
+
+#define RX_BUF_DESC_BASE 0x00001000
+#define RX_BUF_COUNT ((3 * PAGE_SIZE) / (4 * sizeof(struct ixpdev_rx_desc)))
+#define TX_BUF_DESC_BASE 0x00002000
+#define TX_BUF_COUNT ((3 * PAGE_SIZE) / (4 * sizeof(struct ixpdev_tx_desc)))
+#define TX_BUF_COUNT_PER_CHAN (TX_BUF_COUNT / 4)
+
+#define RING_RX_PENDING ((u32 *)IXP2000_SCRATCH_RING_VIRT_BASE)
+#define RING_RX_DONE ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 4))
+#define RING_TX_PENDING ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 8))
+#define RING_TX_DONE ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 12))
+
+#define SCRATCH_REG(x) ((u32 *)(IXP2000_GLOBAL_REG_VIRT_BASE | 0x0800 | (x)))
+#define RING_RX_PENDING_BASE SCRATCH_REG(0x00)
+#define RING_RX_PENDING_HEAD SCRATCH_REG(0x04)
+#define RING_RX_PENDING_TAIL SCRATCH_REG(0x08)
+#define RING_RX_DONE_BASE SCRATCH_REG(0x10)
+#define RING_RX_DONE_HEAD SCRATCH_REG(0x14)
+#define RING_RX_DONE_TAIL SCRATCH_REG(0x18)
+#define RING_TX_PENDING_BASE SCRATCH_REG(0x20)
+#define RING_TX_PENDING_HEAD SCRATCH_REG(0x24)
+#define RING_TX_PENDING_TAIL SCRATCH_REG(0x28)
+#define RING_TX_DONE_BASE SCRATCH_REG(0x30)
+#define RING_TX_DONE_HEAD SCRATCH_REG(0x34)
+#define RING_TX_DONE_TAIL SCRATCH_REG(0x38)
+
+struct ixpdev_rx_desc
+{
+ u32 buf_addr;
+ u32 buf_length;
+ u32 channel;
+ u32 pkt_length;
+};
+
+struct ixpdev_tx_desc
+{
+ u32 buf_addr;
+ u32 pkt_length;
+ u32 channel;
+ u32 unused;
+};
+
+
+#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/pm3386.c b/drivers/net/ethernet/xscale/ixp2000/pm3386.c
new file mode 100644
index 000000000000..e08d3f9863b8
--- /dev/null
+++ b/drivers/net/ethernet/xscale/ixp2000/pm3386.c
@@ -0,0 +1,351 @@
+/*
+ * Helper functions for the PM3386s on the Radisys ENP2611
+ * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Dedicated to Marija Kulikova.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <asm/io.h>
+#include "pm3386.h"
+
+/*
+ * Read from register 'reg' of PM3386 device 'pm'.
+ */
+static u16 pm3386_reg_read(int pm, int reg)
+{
+ void *_reg;
+ u16 value;
+
+ _reg = (void *)ENP2611_PM3386_0_VIRT_BASE;
+ if (pm == 1)
+ _reg = (void *)ENP2611_PM3386_1_VIRT_BASE;
+
+ value = *((volatile u16 *)(_reg + (reg << 1)));
+
+// printk(KERN_INFO "pm3386_reg_read(%d, %.3x) = %.8x\n", pm, reg, value);
+
+ return value;
+}
+
+/*
+ * Write to register 'reg' of PM3386 device 'pm', and perform
+ * a readback from the identification register.
+ */
+static void pm3386_reg_write(int pm, int reg, u16 value)
+{
+ void *_reg;
+ u16 dummy;
+
+// printk(KERN_INFO "pm3386_reg_write(%d, %.3x, %.8x)\n", pm, reg, value);
+
+ _reg = (void *)ENP2611_PM3386_0_VIRT_BASE;
+ if (pm == 1)
+ _reg = (void *)ENP2611_PM3386_1_VIRT_BASE;
+
+ *((volatile u16 *)(_reg + (reg << 1))) = value;
+
+ dummy = *((volatile u16 *)_reg);
+ __asm__ __volatile__("mov %0, %0" : "+r" (dummy));
+}
+
+/*
+ * Read from port 'port' register 'reg', where the registers
+ * for the different ports are 'spacing' registers apart.
+ */
+static u16 pm3386_port_reg_read(int port, int _reg, int spacing)
+{
+ int reg;
+
+ reg = _reg;
+ if (port & 1)
+ reg += spacing;
+
+ return pm3386_reg_read(port >> 1, reg);
+}
+
+/*
+ * Write to port 'port' register 'reg', where the registers
+ * for the different ports are 'spacing' registers apart.
+ */
+static void pm3386_port_reg_write(int port, int _reg, int spacing, u16 value)
+{
+ int reg;
+
+ reg = _reg;
+ if (port & 1)
+ reg += spacing;
+
+ pm3386_reg_write(port >> 1, reg, value);
+}
+
+int pm3386_secondary_present(void)
+{
+ return pm3386_reg_read(1, 0) == 0x3386;
+}
+
+void pm3386_reset(void)
+{
+ u8 mac[3][6];
+ int secondary;
+
+ secondary = pm3386_secondary_present();
+
+ /* Save programmed MAC addresses. */
+ pm3386_get_mac(0, mac[0]);
+ pm3386_get_mac(1, mac[1]);
+ if (secondary)
+ pm3386_get_mac(2, mac[2]);
+
+ /* Assert analog and digital reset. */
+ pm3386_reg_write(0, 0x002, 0x0060);
+ if (secondary)
+ pm3386_reg_write(1, 0x002, 0x0060);
+ mdelay(1);
+
+ /* Deassert analog reset. */
+ pm3386_reg_write(0, 0x002, 0x0062);
+ if (secondary)
+ pm3386_reg_write(1, 0x002, 0x0062);
+ mdelay(10);
+
+ /* Deassert digital reset. */
+ pm3386_reg_write(0, 0x002, 0x0063);
+ if (secondary)
+ pm3386_reg_write(1, 0x002, 0x0063);
+ mdelay(10);
+
+ /* Restore programmed MAC addresses. */
+ pm3386_set_mac(0, mac[0]);
+ pm3386_set_mac(1, mac[1]);
+ if (secondary)
+ pm3386_set_mac(2, mac[2]);
+
+ /* Disable carrier on all ports. */
+ pm3386_set_carrier(0, 0);
+ pm3386_set_carrier(1, 0);
+ if (secondary)
+ pm3386_set_carrier(2, 0);
+}
+
+static u16 swaph(u16 x)
+{
+ return ((x << 8) | (x >> 8)) & 0xffff;
+}
+
+int pm3386_port_count(void)
+{
+ return 2 + pm3386_secondary_present();
+}
+
+void pm3386_init_port(int port)
+{
+ int pm = port >> 1;
+
+ /*
+ * Work around ENP2611 bootloader programming MAC address
+ * in reverse.
+ */
+ if (pm3386_port_reg_read(port, 0x30a, 0x100) == 0x0000 &&
+ (pm3386_port_reg_read(port, 0x309, 0x100) & 0xff00) == 0x5000) {
+ u16 temp[3];
+
+ temp[0] = pm3386_port_reg_read(port, 0x308, 0x100);
+ temp[1] = pm3386_port_reg_read(port, 0x309, 0x100);
+ temp[2] = pm3386_port_reg_read(port, 0x30a, 0x100);
+ pm3386_port_reg_write(port, 0x308, 0x100, swaph(temp[2]));
+ pm3386_port_reg_write(port, 0x309, 0x100, swaph(temp[1]));
+ pm3386_port_reg_write(port, 0x30a, 0x100, swaph(temp[0]));
+ }
+
+ /*
+ * Initialise narrowbanding mode. See application note 2010486
+ * for more information. (@@@ We also need to issue a reset
+ * when ROOL or DOOL are detected.)
+ */
+ pm3386_port_reg_write(port, 0x708, 0x10, 0xd055);
+ udelay(500);
+ pm3386_port_reg_write(port, 0x708, 0x10, 0x5055);
+
+ /*
+ * SPI-3 ingress block. Set 64 bytes SPI-3 burst size
+ * towards SPI-3 bridge.
+ */
+ pm3386_port_reg_write(port, 0x122, 0x20, 0x0002);
+
+ /*
+ * Enable ingress protocol checking, and soft reset the
+ * SPI-3 ingress block.
+ */
+ pm3386_reg_write(pm, 0x103, 0x0003);
+ while (!(pm3386_reg_read(pm, 0x103) & 0x80))
+ ;
+
+ /*
+ * SPI-3 egress block. Gather 12288 bytes of the current
+ * packet in the TX fifo before initiating transmit on the
+ * SERDES interface. (Prevents TX underflows.)
+ */
+ pm3386_port_reg_write(port, 0x221, 0x20, 0x0007);
+
+ /*
+ * Enforce odd parity from the SPI-3 bridge, and soft reset
+ * the SPI-3 egress block.
+ */
+ pm3386_reg_write(pm, 0x203, 0x000d & ~(4 << (port & 1)));
+ while ((pm3386_reg_read(pm, 0x203) & 0x000c) != 0x000c)
+ ;
+
+ /*
+ * EGMAC block. Set this channels to reject long preambles,
+ * not send or transmit PAUSE frames, enable preamble checking,
+ * disable frame length checking, enable FCS appending, enable
+ * TX frame padding.
+ */
+ pm3386_port_reg_write(port, 0x302, 0x100, 0x0113);
+
+ /*
+ * Soft reset the EGMAC block.
+ */
+ pm3386_port_reg_write(port, 0x301, 0x100, 0x8000);
+ pm3386_port_reg_write(port, 0x301, 0x100, 0x0000);
+
+ /*
+ * Auto-sense autonegotiation status.
+ */
+ pm3386_port_reg_write(port, 0x306, 0x100, 0x0100);
+
+ /*
+ * Allow reception of jumbo frames.
+ */
+ pm3386_port_reg_write(port, 0x310, 0x100, 9018);
+
+ /*
+ * Allow transmission of jumbo frames.
+ */
+ pm3386_port_reg_write(port, 0x336, 0x100, 9018);
+
+ /* @@@ Should set 0x337/0x437 (RX forwarding threshold.) */
+
+ /*
+ * Set autonegotiation parameters to 'no PAUSE, full duplex.'
+ */
+ pm3386_port_reg_write(port, 0x31c, 0x100, 0x0020);
+
+ /*
+ * Enable and restart autonegotiation.
+ */
+ pm3386_port_reg_write(port, 0x318, 0x100, 0x0003);
+ pm3386_port_reg_write(port, 0x318, 0x100, 0x0002);
+}
+
+void pm3386_get_mac(int port, u8 *mac)
+{
+ u16 temp;
+
+ temp = pm3386_port_reg_read(port, 0x308, 0x100);
+ mac[0] = temp & 0xff;
+ mac[1] = (temp >> 8) & 0xff;
+
+ temp = pm3386_port_reg_read(port, 0x309, 0x100);
+ mac[2] = temp & 0xff;
+ mac[3] = (temp >> 8) & 0xff;
+
+ temp = pm3386_port_reg_read(port, 0x30a, 0x100);
+ mac[4] = temp & 0xff;
+ mac[5] = (temp >> 8) & 0xff;
+}
+
+void pm3386_set_mac(int port, u8 *mac)
+{
+ pm3386_port_reg_write(port, 0x308, 0x100, (mac[1] << 8) | mac[0]);
+ pm3386_port_reg_write(port, 0x309, 0x100, (mac[3] << 8) | mac[2]);
+ pm3386_port_reg_write(port, 0x30a, 0x100, (mac[5] << 8) | mac[4]);
+}
+
+static u32 pm3386_get_stat(int port, u16 base)
+{
+ u32 value;
+
+ value = pm3386_port_reg_read(port, base, 0x100);
+ value |= pm3386_port_reg_read(port, base + 1, 0x100) << 16;
+
+ return value;
+}
+
+void pm3386_get_stats(int port, struct net_device_stats *stats)
+{
+ /*
+ * Snapshot statistics counters.
+ */
+ pm3386_port_reg_write(port, 0x500, 0x100, 0x0001);
+ while (pm3386_port_reg_read(port, 0x500, 0x100) & 0x0001)
+ ;
+
+ memset(stats, 0, sizeof(*stats));
+
+ stats->rx_packets = pm3386_get_stat(port, 0x510);
+ stats->tx_packets = pm3386_get_stat(port, 0x590);
+ stats->rx_bytes = pm3386_get_stat(port, 0x514);
+ stats->tx_bytes = pm3386_get_stat(port, 0x594);
+ /* @@@ Add other stats. */
+}
+
+void pm3386_set_carrier(int port, int state)
+{
+ pm3386_port_reg_write(port, 0x703, 0x10, state ? 0x1001 : 0x0000);
+}
+
+int pm3386_is_link_up(int port)
+{
+ u16 temp;
+
+ temp = pm3386_port_reg_read(port, 0x31a, 0x100);
+ temp = pm3386_port_reg_read(port, 0x31a, 0x100);
+
+ return !!(temp & 0x0002);
+}
+
+void pm3386_enable_rx(int port)
+{
+ u16 temp;
+
+ temp = pm3386_port_reg_read(port, 0x303, 0x100);
+ temp |= 0x1000;
+ pm3386_port_reg_write(port, 0x303, 0x100, temp);
+}
+
+void pm3386_disable_rx(int port)
+{
+ u16 temp;
+
+ temp = pm3386_port_reg_read(port, 0x303, 0x100);
+ temp &= 0xefff;
+ pm3386_port_reg_write(port, 0x303, 0x100, temp);
+}
+
+void pm3386_enable_tx(int port)
+{
+ u16 temp;
+
+ temp = pm3386_port_reg_read(port, 0x303, 0x100);
+ temp |= 0x4000;
+ pm3386_port_reg_write(port, 0x303, 0x100, temp);
+}
+
+void pm3386_disable_tx(int port)
+{
+ u16 temp;
+
+ temp = pm3386_port_reg_read(port, 0x303, 0x100);
+ temp &= 0xbfff;
+ pm3386_port_reg_write(port, 0x303, 0x100, temp);
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/xscale/ixp2000/pm3386.h b/drivers/net/ethernet/xscale/ixp2000/pm3386.h
new file mode 100644
index 000000000000..cc4183dca911
--- /dev/null
+++ b/drivers/net/ethernet/xscale/ixp2000/pm3386.h
@@ -0,0 +1,29 @@
+/*
+ * Helper functions for the PM3386s on the Radisys ENP2611
+ * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Dedicated to Marija Kulikova.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __PM3386_H
+#define __PM3386_H
+
+void pm3386_reset(void);
+int pm3386_port_count(void);
+void pm3386_init_port(int port);
+void pm3386_get_mac(int port, u8 *mac);
+void pm3386_set_mac(int port, u8 *mac);
+void pm3386_get_stats(int port, struct net_device_stats *stats);
+void pm3386_set_carrier(int port, int state);
+int pm3386_is_link_up(int port);
+void pm3386_enable_rx(int port);
+void pm3386_disable_rx(int port);
+void pm3386_enable_tx(int port);
+void pm3386_disable_tx(int port);
+
+
+#endif
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
new file mode 100644
index 000000000000..ec96d910e9a3
--- /dev/null
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -0,0 +1,1489 @@
+/*
+ * Intel IXP4xx Ethernet driver for Linux
+ *
+ * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * Ethernet port config (0x00 is not present on IXP42X):
+ *
+ * logical port 0x00 0x10 0x20
+ * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C)
+ * physical PortId 2 0 1
+ * TX queue 23 24 25
+ * RX-free queue 26 27 28
+ * TX-done queue is always 31, per-port RX and TX-ready queues are configurable
+ *
+ *
+ * Queue entries:
+ * bits 0 -> 1 - NPE ID (RX and TX-done)
+ * bits 0 -> 2 - priority (TX, per 802.1D)
+ * bits 3 -> 4 - port ID (user-set?)
+ * bits 5 -> 31 - physical descriptor address
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/ptp_classify.h>
+#include <linux/slab.h>
+#include <mach/ixp46x_ts.h>
+#include <mach/npe.h>
+#include <mach/qmgr.h>
+
+#define DEBUG_DESC 0
+#define DEBUG_RX 0
+#define DEBUG_TX 0
+#define DEBUG_PKT_BYTES 0
+#define DEBUG_MDIO 0
+#define DEBUG_CLOSE 0
+
+#define DRV_NAME "ixp4xx_eth"
+
+#define MAX_NPES 3
+
+#define RX_DESCS 64 /* also length of all RX queues */
+#define TX_DESCS 16 /* also length of all TX queues */
+#define TXDONE_QUEUE_LEN 64 /* dwords */
+
+#define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
+#define REGS_SIZE 0x1000
+#define MAX_MRU 1536 /* 0x600 */
+#define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4)
+
+#define NAPI_WEIGHT 16
+#define MDIO_INTERVAL (3 * HZ)
+#define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */
+#define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */
+
+#define NPE_ID(port_id) ((port_id) >> 4)
+#define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3)
+#define TX_QUEUE(port_id) (NPE_ID(port_id) + 23)
+#define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26)
+#define TXDONE_QUEUE 31
+
+#define PTP_SLAVE_MODE 1
+#define PTP_MASTER_MODE 2
+#define PORT2CHANNEL(p) NPE_ID(p->id)
+
+/* TX Control Registers */
+#define TX_CNTRL0_TX_EN 0x01
+#define TX_CNTRL0_HALFDUPLEX 0x02
+#define TX_CNTRL0_RETRY 0x04
+#define TX_CNTRL0_PAD_EN 0x08
+#define TX_CNTRL0_APPEND_FCS 0x10
+#define TX_CNTRL0_2DEFER 0x20
+#define TX_CNTRL0_RMII 0x40 /* reduced MII */
+#define TX_CNTRL1_RETRIES 0x0F /* 4 bits */
+
+/* RX Control Registers */
+#define RX_CNTRL0_RX_EN 0x01
+#define RX_CNTRL0_PADSTRIP_EN 0x02
+#define RX_CNTRL0_SEND_FCS 0x04
+#define RX_CNTRL0_PAUSE_EN 0x08
+#define RX_CNTRL0_LOOP_EN 0x10
+#define RX_CNTRL0_ADDR_FLTR_EN 0x20
+#define RX_CNTRL0_RX_RUNT_EN 0x40
+#define RX_CNTRL0_BCAST_DIS 0x80
+#define RX_CNTRL1_DEFER_EN 0x01
+
+/* Core Control Register */
+#define CORE_RESET 0x01
+#define CORE_RX_FIFO_FLUSH 0x02
+#define CORE_TX_FIFO_FLUSH 0x04
+#define CORE_SEND_JAM 0x08
+#define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */
+
+#define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \
+ TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \
+ TX_CNTRL0_2DEFER)
+#define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN
+#define DEFAULT_CORE_CNTRL CORE_MDC_EN
+
+
+/* NPE message codes */
+#define NPE_GETSTATUS 0x00
+#define NPE_EDB_SETPORTADDRESS 0x01
+#define NPE_EDB_GETMACADDRESSDATABASE 0x02
+#define NPE_EDB_SETMACADDRESSSDATABASE 0x03
+#define NPE_GETSTATS 0x04
+#define NPE_RESETSTATS 0x05
+#define NPE_SETMAXFRAMELENGTHS 0x06
+#define NPE_VLAN_SETRXTAGMODE 0x07
+#define NPE_VLAN_SETDEFAULTRXVID 0x08
+#define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09
+#define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A
+#define NPE_VLAN_SETRXQOSENTRY 0x0B
+#define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C
+#define NPE_STP_SETBLOCKINGSTATE 0x0D
+#define NPE_FW_SETFIREWALLMODE 0x0E
+#define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F
+#define NPE_PC_SETAPMACTABLE 0x11
+#define NPE_SETLOOPBACK_MODE 0x12
+#define NPE_PC_SETBSSIDTABLE 0x13
+#define NPE_ADDRESS_FILTER_CONFIG 0x14
+#define NPE_APPENDFCSCONFIG 0x15
+#define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16
+#define NPE_MAC_RECOVERY_START 0x17
+
+
+#ifdef __ARMEB__
+typedef struct sk_buff buffer_t;
+#define free_buffer dev_kfree_skb
+#define free_buffer_irq dev_kfree_skb_irq
+#else
+typedef void buffer_t;
+#define free_buffer kfree
+#define free_buffer_irq kfree
+#endif
+
+struct eth_regs {
+ u32 tx_control[2], __res1[2]; /* 000 */
+ u32 rx_control[2], __res2[2]; /* 010 */
+ u32 random_seed, __res3[3]; /* 020 */
+ u32 partial_empty_threshold, __res4; /* 030 */
+ u32 partial_full_threshold, __res5; /* 038 */
+ u32 tx_start_bytes, __res6[3]; /* 040 */
+ u32 tx_deferral, rx_deferral, __res7[2];/* 050 */
+ u32 tx_2part_deferral[2], __res8[2]; /* 060 */
+ u32 slot_time, __res9[3]; /* 070 */
+ u32 mdio_command[4]; /* 080 */
+ u32 mdio_status[4]; /* 090 */
+ u32 mcast_mask[6], __res10[2]; /* 0A0 */
+ u32 mcast_addr[6], __res11[2]; /* 0C0 */
+ u32 int_clock_threshold, __res12[3]; /* 0E0 */
+ u32 hw_addr[6], __res13[61]; /* 0F0 */
+ u32 core_control; /* 1FC */
+};
+
+struct port {
+ struct resource *mem_res;
+ struct eth_regs __iomem *regs;
+ struct npe *npe;
+ struct net_device *netdev;
+ struct napi_struct napi;
+ struct phy_device *phydev;
+ struct eth_plat_info *plat;
+ buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
+ struct desc *desc_tab; /* coherent */
+ u32 desc_tab_phys;
+ int id; /* logical port ID */
+ int speed, duplex;
+ u8 firmware[4];
+ int hwts_tx_en;
+ int hwts_rx_en;
+};
+
+/* NPE message structure */
+struct msg {
+#ifdef __ARMEB__
+ u8 cmd, eth_id, byte2, byte3;
+ u8 byte4, byte5, byte6, byte7;
+#else
+ u8 byte3, byte2, eth_id, cmd;
+ u8 byte7, byte6, byte5, byte4;
+#endif
+};
+
+/* Ethernet packet descriptor */
+struct desc {
+ u32 next; /* pointer to next buffer, unused */
+
+#ifdef __ARMEB__
+ u16 buf_len; /* buffer length */
+ u16 pkt_len; /* packet length */
+ u32 data; /* pointer to data buffer in RAM */
+ u8 dest_id;
+ u8 src_id;
+ u16 flags;
+ u8 qos;
+ u8 padlen;
+ u16 vlan_tci;
+#else
+ u16 pkt_len; /* packet length */
+ u16 buf_len; /* buffer length */
+ u32 data; /* pointer to data buffer in RAM */
+ u16 flags;
+ u8 src_id;
+ u8 dest_id;
+ u16 vlan_tci;
+ u8 padlen;
+ u8 qos;
+#endif
+
+#ifdef __ARMEB__
+ u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3;
+ u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1;
+ u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5;
+#else
+ u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0;
+ u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4;
+ u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2;
+#endif
+};
+
+
+#define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
+ (n) * sizeof(struct desc))
+#define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
+
+#define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
+ ((n) + RX_DESCS) * sizeof(struct desc))
+#define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
+
+#ifndef __ARMEB__
+static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
+{
+ int i;
+ for (i = 0; i < cnt; i++)
+ dest[i] = swab32(src[i]);
+}
+#endif
+
+static spinlock_t mdio_lock;
+static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
+static struct mii_bus *mdio_bus;
+static int ports_open;
+static struct port *npe_port_tab[MAX_NPES];
+static struct dma_pool *dma_pool;
+
+static struct sock_filter ptp_filter[] = {
+ PTP_FILTER
+};
+
+static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
+{
+ u8 *data = skb->data;
+ unsigned int offset;
+ u16 *hi, *id;
+ u32 lo;
+
+ if (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)
+ return 0;
+
+ offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+
+ if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
+ return 0;
+
+ hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
+ id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+
+ memcpy(&lo, &hi[1], sizeof(lo));
+
+ return (uid_hi == ntohs(*hi) &&
+ uid_lo == ntohl(lo) &&
+ seqid == ntohs(*id));
+}
+
+static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct ixp46x_ts_regs *regs;
+ u64 ns;
+ u32 ch, hi, lo, val;
+ u16 uid, seq;
+
+ if (!port->hwts_rx_en)
+ return;
+
+ ch = PORT2CHANNEL(port);
+
+ regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
+
+ val = __raw_readl(&regs->channel[ch].ch_event);
+
+ if (!(val & RX_SNAPSHOT_LOCKED))
+ return;
+
+ lo = __raw_readl(&regs->channel[ch].src_uuid_lo);
+ hi = __raw_readl(&regs->channel[ch].src_uuid_hi);
+
+ uid = hi & 0xffff;
+ seq = (hi >> 16) & 0xffff;
+
+ if (!ixp_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
+ goto out;
+
+ lo = __raw_readl(&regs->channel[ch].rx_snap_lo);
+ hi = __raw_readl(&regs->channel[ch].rx_snap_hi);
+ ns = ((u64) hi) << 32;
+ ns |= lo;
+ ns <<= TICKS_NS_SHIFT;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+out:
+ __raw_writel(RX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
+}
+
+static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct ixp46x_ts_regs *regs;
+ struct skb_shared_info *shtx;
+ u64 ns;
+ u32 ch, cnt, hi, lo, val;
+
+ shtx = skb_shinfo(skb);
+ if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en))
+ shtx->tx_flags |= SKBTX_IN_PROGRESS;
+ else
+ return;
+
+ ch = PORT2CHANNEL(port);
+
+ regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
+
+ /*
+ * This really stinks, but we have to poll for the Tx time stamp.
+ * Usually, the time stamp is ready after 4 to 6 microseconds.
+ */
+ for (cnt = 0; cnt < 100; cnt++) {
+ val = __raw_readl(&regs->channel[ch].ch_event);
+ if (val & TX_SNAPSHOT_LOCKED)
+ break;
+ udelay(1);
+ }
+ if (!(val & TX_SNAPSHOT_LOCKED)) {
+ shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
+ return;
+ }
+
+ lo = __raw_readl(&regs->channel[ch].tx_snap_lo);
+ hi = __raw_readl(&regs->channel[ch].tx_snap_hi);
+ ns = ((u64) hi) << 32;
+ ns |= lo;
+ ns <<= TICKS_NS_SHIFT;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+
+ __raw_writel(TX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
+}
+
+static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config cfg;
+ struct ixp46x_ts_regs *regs;
+ struct port *port = netdev_priv(netdev);
+ int ch;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ if (cfg.flags) /* reserved for future extensions */
+ return -EINVAL;
+
+ ch = PORT2CHANNEL(port);
+ regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
+
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ port->hwts_tx_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ port->hwts_tx_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ port->hwts_rx_en = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ port->hwts_rx_en = PTP_SLAVE_MODE;
+ __raw_writel(0, &regs->channel[ch].ch_control);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ port->hwts_rx_en = PTP_MASTER_MODE;
+ __raw_writel(MASTER_MODE, &regs->channel[ch].ch_control);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* Clear out any old time stamps. */
+ __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED,
+ &regs->channel[ch].ch_event);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
+ int write, u16 cmd)
+{
+ int cycles = 0;
+
+ if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
+ printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name);
+ return -1;
+ }
+
+ if (write) {
+ __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]);
+ __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]);
+ }
+ __raw_writel(((phy_id << 5) | location) & 0xFF,
+ &mdio_regs->mdio_command[2]);
+ __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */,
+ &mdio_regs->mdio_command[3]);
+
+ while ((cycles < MAX_MDIO_RETRIES) &&
+ (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) {
+ udelay(1);
+ cycles++;
+ }
+
+ if (cycles == MAX_MDIO_RETRIES) {
+ printk(KERN_ERR "%s #%i: MII write failed\n", bus->name,
+ phy_id);
+ return -1;
+ }
+
+#if DEBUG_MDIO
+ printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name,
+ phy_id, write ? "write" : "read", cycles);
+#endif
+
+ if (write)
+ return 0;
+
+ if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
+#if DEBUG_MDIO
+ printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name,
+ phy_id);
+#endif
+ return 0xFFFF; /* don't return error */
+ }
+
+ return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
+ ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8);
+}
+
+static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mdio_lock, flags);
+ ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0);
+ spin_unlock_irqrestore(&mdio_lock, flags);
+#if DEBUG_MDIO
+ printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name,
+ phy_id, location, ret);
+#endif
+ return ret;
+}
+
+static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
+ u16 val)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mdio_lock, flags);
+ ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val);
+ spin_unlock_irqrestore(&mdio_lock, flags);
+#if DEBUG_MDIO
+ printk(KERN_DEBUG "%s #%i: MII write [%i] <- 0x%X, err = %i\n",
+ bus->name, phy_id, location, val, ret);
+#endif
+ return ret;
+}
+
+static int ixp4xx_mdio_register(void)
+{
+ int err;
+
+ if (!(mdio_bus = mdiobus_alloc()))
+ return -ENOMEM;
+
+ if (cpu_is_ixp43x()) {
+ /* IXP43x lacks NPE-B and uses NPE-C for MII PHY access */
+ if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEC_ETH))
+ return -ENODEV;
+ mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
+ } else {
+ /* All MII PHY accesses use NPE-B Ethernet registers */
+ if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
+ return -ENODEV;
+ mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
+ }
+
+ __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
+ spin_lock_init(&mdio_lock);
+ mdio_bus->name = "IXP4xx MII Bus";
+ mdio_bus->read = &ixp4xx_mdio_read;
+ mdio_bus->write = &ixp4xx_mdio_write;
+ strcpy(mdio_bus->id, "0");
+
+ if ((err = mdiobus_register(mdio_bus)))
+ mdiobus_free(mdio_bus);
+ return err;
+}
+
+static void ixp4xx_mdio_remove(void)
+{
+ mdiobus_unregister(mdio_bus);
+ mdiobus_free(mdio_bus);
+}
+
+
+static void ixp4xx_adjust_link(struct net_device *dev)
+{
+ struct port *port = netdev_priv(dev);
+ struct phy_device *phydev = port->phydev;
+
+ if (!phydev->link) {
+ if (port->speed) {
+ port->speed = 0;
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ }
+ return;
+ }
+
+ if (port->speed == phydev->speed && port->duplex == phydev->duplex)
+ return;
+
+ port->speed = phydev->speed;
+ port->duplex = phydev->duplex;
+
+ if (port->duplex)
+ __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
+ &port->regs->tx_control[0]);
+ else
+ __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
+ &port->regs->tx_control[0]);
+
+ printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
+ dev->name, port->speed, port->duplex ? "full" : "half");
+}
+
+
+static inline void debug_pkt(struct net_device *dev, const char *func,
+ u8 *data, int len)
+{
+#if DEBUG_PKT_BYTES
+ int i;
+
+ printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len);
+ for (i = 0; i < len; i++) {
+ if (i >= DEBUG_PKT_BYTES)
+ break;
+ printk("%s%02X",
+ ((i == 6) || (i == 12) || (i >= 14)) ? " " : "",
+ data[i]);
+ }
+ printk("\n");
+#endif
+}
+
+
+static inline void debug_desc(u32 phys, struct desc *desc)
+{
+#if DEBUG_DESC
+ printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X"
+ " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n",
+ phys, desc->next, desc->buf_len, desc->pkt_len,
+ desc->data, desc->dest_id, desc->src_id, desc->flags,
+ desc->qos, desc->padlen, desc->vlan_tci,
+ desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2,
+ desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5,
+ desc->src_mac_0, desc->src_mac_1, desc->src_mac_2,
+ desc->src_mac_3, desc->src_mac_4, desc->src_mac_5);
+#endif
+}
+
+static inline int queue_get_desc(unsigned int queue, struct port *port,
+ int is_tx)
+{
+ u32 phys, tab_phys, n_desc;
+ struct desc *tab;
+
+ if (!(phys = qmgr_get_entry(queue)))
+ return -1;
+
+ phys &= ~0x1F; /* mask out non-address bits */
+ tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
+ tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
+ n_desc = (phys - tab_phys) / sizeof(struct desc);
+ BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
+ debug_desc(phys, &tab[n_desc]);
+ BUG_ON(tab[n_desc].next);
+ return n_desc;
+}
+
+static inline void queue_put_desc(unsigned int queue, u32 phys,
+ struct desc *desc)
+{
+ debug_desc(phys, desc);
+ BUG_ON(phys & 0x1F);
+ qmgr_put_entry(queue, phys);
+ /* Don't check for queue overflow here, we've allocated sufficient
+ length and queues >= 32 don't support this check anyway. */
+}
+
+
+static inline void dma_unmap_tx(struct port *port, struct desc *desc)
+{
+#ifdef __ARMEB__
+ dma_unmap_single(&port->netdev->dev, desc->data,
+ desc->buf_len, DMA_TO_DEVICE);
+#else
+ dma_unmap_single(&port->netdev->dev, desc->data & ~3,
+ ALIGN((desc->data & 3) + desc->buf_len, 4),
+ DMA_TO_DEVICE);
+#endif
+}
+
+
+static void eth_rx_irq(void *pdev)
+{
+ struct net_device *dev = pdev;
+ struct port *port = netdev_priv(dev);
+
+#if DEBUG_RX
+ printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
+#endif
+ qmgr_disable_irq(port->plat->rxq);
+ napi_schedule(&port->napi);
+}
+
+static int eth_poll(struct napi_struct *napi, int budget)
+{
+ struct port *port = container_of(napi, struct port, napi);
+ struct net_device *dev = port->netdev;
+ unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id);
+ int received = 0;
+
+#if DEBUG_RX
+ printk(KERN_DEBUG "%s: eth_poll\n", dev->name);
+#endif
+
+ while (received < budget) {
+ struct sk_buff *skb;
+ struct desc *desc;
+ int n;
+#ifdef __ARMEB__
+ struct sk_buff *temp;
+ u32 phys;
+#endif
+
+ if ((n = queue_get_desc(rxq, port, 0)) < 0) {
+#if DEBUG_RX
+ printk(KERN_DEBUG "%s: eth_poll napi_complete\n",
+ dev->name);
+#endif
+ napi_complete(napi);
+ qmgr_enable_irq(rxq);
+ if (!qmgr_stat_below_low_watermark(rxq) &&
+ napi_reschedule(napi)) { /* not empty again */
+#if DEBUG_RX
+ printk(KERN_DEBUG "%s: eth_poll"
+ " napi_reschedule successed\n",
+ dev->name);
+#endif
+ qmgr_disable_irq(rxq);
+ continue;
+ }
+#if DEBUG_RX
+ printk(KERN_DEBUG "%s: eth_poll all done\n",
+ dev->name);
+#endif
+ return received; /* all work done */
+ }
+
+ desc = rx_desc_ptr(port, n);
+
+#ifdef __ARMEB__
+ if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
+ phys = dma_map_single(&dev->dev, skb->data,
+ RX_BUFF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&dev->dev, phys)) {
+ dev_kfree_skb(skb);
+ skb = NULL;
+ }
+ }
+#else
+ skb = netdev_alloc_skb(dev,
+ ALIGN(NET_IP_ALIGN + desc->pkt_len, 4));
+#endif
+
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ /* put the desc back on RX-ready queue */
+ desc->buf_len = MAX_MRU;
+ desc->pkt_len = 0;
+ queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
+ continue;
+ }
+
+ /* process received frame */
+#ifdef __ARMEB__
+ temp = skb;
+ skb = port->rx_buff_tab[n];
+ dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
+ RX_BUFF_SIZE, DMA_FROM_DEVICE);
+#else
+ dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN,
+ RX_BUFF_SIZE, DMA_FROM_DEVICE);
+ memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
+ ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
+#endif
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb_put(skb, desc->pkt_len);
+
+ debug_pkt(dev, "eth_poll", skb->data, skb->len);
+
+ ixp_rx_timestamp(port, skb);
+ skb->protocol = eth_type_trans(skb, dev);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+ netif_receive_skb(skb);
+
+ /* put the new buffer on RX-free queue */
+#ifdef __ARMEB__
+ port->rx_buff_tab[n] = temp;
+ desc->data = phys + NET_IP_ALIGN;
+#endif
+ desc->buf_len = MAX_MRU;
+ desc->pkt_len = 0;
+ queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
+ received++;
+ }
+
+#if DEBUG_RX
+ printk(KERN_DEBUG "eth_poll(): end, not all work done\n");
+#endif
+ return received; /* not all work done */
+}
+
+
+static void eth_txdone_irq(void *unused)
+{
+ u32 phys;
+
+#if DEBUG_TX
+ printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n");
+#endif
+ while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) {
+ u32 npe_id, n_desc;
+ struct port *port;
+ struct desc *desc;
+ int start;
+
+ npe_id = phys & 3;
+ BUG_ON(npe_id >= MAX_NPES);
+ port = npe_port_tab[npe_id];
+ BUG_ON(!port);
+ phys &= ~0x1F; /* mask out non-address bits */
+ n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc);
+ BUG_ON(n_desc >= TX_DESCS);
+ desc = tx_desc_ptr(port, n_desc);
+ debug_desc(phys, desc);
+
+ if (port->tx_buff_tab[n_desc]) { /* not the draining packet */
+ port->netdev->stats.tx_packets++;
+ port->netdev->stats.tx_bytes += desc->pkt_len;
+
+ dma_unmap_tx(port, desc);
+#if DEBUG_TX
+ printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n",
+ port->netdev->name, port->tx_buff_tab[n_desc]);
+#endif
+ free_buffer_irq(port->tx_buff_tab[n_desc]);
+ port->tx_buff_tab[n_desc] = NULL;
+ }
+
+ start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
+ queue_put_desc(port->plat->txreadyq, phys, desc);
+ if (start) { /* TX-ready queue was empty */
+#if DEBUG_TX
+ printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n",
+ port->netdev->name);
+#endif
+ netif_wake_queue(port->netdev);
+ }
+ }
+}
+
+static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct port *port = netdev_priv(dev);
+ unsigned int txreadyq = port->plat->txreadyq;
+ int len, offset, bytes, n;
+ void *mem;
+ u32 phys;
+ struct desc *desc;
+
+#if DEBUG_TX
+ printk(KERN_DEBUG "%s: eth_xmit\n", dev->name);
+#endif
+
+ if (unlikely(skb->len > MAX_MRU)) {
+ dev_kfree_skb(skb);
+ dev->stats.tx_errors++;
+ return NETDEV_TX_OK;
+ }
+
+ debug_pkt(dev, "eth_xmit", skb->data, skb->len);
+
+ len = skb->len;
+#ifdef __ARMEB__
+ offset = 0; /* no need to keep alignment */
+ bytes = len;
+ mem = skb->data;
+#else
+ offset = (int)skb->data & 3; /* keep 32-bit alignment */
+ bytes = ALIGN(offset + len, 4);
+ if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
+ dev_kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
+#endif
+
+ phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->dev, phys)) {
+ dev_kfree_skb(skb);
+#ifndef __ARMEB__
+ kfree(mem);
+#endif
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ n = queue_get_desc(txreadyq, port, 1);
+ BUG_ON(n < 0);
+ desc = tx_desc_ptr(port, n);
+
+#ifdef __ARMEB__
+ port->tx_buff_tab[n] = skb;
+#else
+ port->tx_buff_tab[n] = mem;
+#endif
+ desc->data = phys + offset;
+ desc->buf_len = desc->pkt_len = len;
+
+ /* NPE firmware pads short frames with zeros internally */
+ wmb();
+ queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
+
+ if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
+#if DEBUG_TX
+ printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
+#endif
+ netif_stop_queue(dev);
+ /* we could miss TX ready interrupt */
+ /* really empty in fact */
+ if (!qmgr_stat_below_low_watermark(txreadyq)) {
+#if DEBUG_TX
+ printk(KERN_DEBUG "%s: eth_xmit ready again\n",
+ dev->name);
+#endif
+ netif_wake_queue(dev);
+ }
+ }
+
+#if DEBUG_TX
+ printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name);
+#endif
+
+ ixp_tx_timestamp(port, skb);
+ skb_tx_timestamp(skb);
+
+#ifndef __ARMEB__
+ dev_kfree_skb(skb);
+#endif
+ return NETDEV_TX_OK;
+}
+
+
+static void eth_set_mcast_list(struct net_device *dev)
+{
+ struct port *port = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ u8 diffs[ETH_ALEN], *addr;
+ int i;
+ static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+ if (dev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < ETH_ALEN; i++) {
+ __raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
+ __raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
+ }
+ __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
+ &port->regs->rx_control[0]);
+ return;
+ }
+
+ if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) {
+ __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
+ &port->regs->rx_control[0]);
+ return;
+ }
+
+ memset(diffs, 0, ETH_ALEN);
+
+ addr = NULL;
+ netdev_for_each_mc_addr(ha, dev) {
+ if (!addr)
+ addr = ha->addr; /* first MAC address */
+ for (i = 0; i < ETH_ALEN; i++)
+ diffs[i] |= addr[i] ^ ha->addr[i];
+ }
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ __raw_writel(addr[i], &port->regs->mcast_addr[i]);
+ __raw_writel(~diffs[i], &port->regs->mcast_mask[i]);
+ }
+
+ __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
+ &port->regs->rx_control[0]);
+}
+
+
+static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ struct port *port = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (cpu_is_ixp46x() && cmd == SIOCSHWTSTAMP)
+ return hwtstamp_ioctl(dev, req, cmd);
+
+ return phy_mii_ioctl(port->phydev, req, cmd);
+}
+
+/* ethtool support */
+
+static void ixp4xx_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct port *port = netdev_priv(dev);
+ strcpy(info->driver, DRV_NAME);
+ snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u",
+ port->firmware[0], port->firmware[1],
+ port->firmware[2], port->firmware[3]);
+ strcpy(info->bus_info, "internal");
+}
+
+static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct port *port = netdev_priv(dev);
+ return phy_ethtool_gset(port->phydev, cmd);
+}
+
+static int ixp4xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct port *port = netdev_priv(dev);
+ return phy_ethtool_sset(port->phydev, cmd);
+}
+
+static int ixp4xx_nway_reset(struct net_device *dev)
+{
+ struct port *port = netdev_priv(dev);
+ return phy_start_aneg(port->phydev);
+}
+
+static const struct ethtool_ops ixp4xx_ethtool_ops = {
+ .get_drvinfo = ixp4xx_get_drvinfo,
+ .get_settings = ixp4xx_get_settings,
+ .set_settings = ixp4xx_set_settings,
+ .nway_reset = ixp4xx_nway_reset,
+ .get_link = ethtool_op_get_link,
+};
+
+
+static int request_queues(struct port *port)
+{
+ int err;
+
+ err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0,
+ "%s:RX-free", port->netdev->name);
+ if (err)
+ return err;
+
+ err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0,
+ "%s:RX", port->netdev->name);
+ if (err)
+ goto rel_rxfree;
+
+ err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0,
+ "%s:TX", port->netdev->name);
+ if (err)
+ goto rel_rx;
+
+ err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
+ "%s:TX-ready", port->netdev->name);
+ if (err)
+ goto rel_tx;
+
+ /* TX-done queue handles skbs sent out by the NPEs */
+ if (!ports_open) {
+ err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0,
+ "%s:TX-done", DRV_NAME);
+ if (err)
+ goto rel_txready;
+ }
+ return 0;
+
+rel_txready:
+ qmgr_release_queue(port->plat->txreadyq);
+rel_tx:
+ qmgr_release_queue(TX_QUEUE(port->id));
+rel_rx:
+ qmgr_release_queue(port->plat->rxq);
+rel_rxfree:
+ qmgr_release_queue(RXFREE_QUEUE(port->id));
+ printk(KERN_DEBUG "%s: unable to request hardware queues\n",
+ port->netdev->name);
+ return err;
+}
+
+static void release_queues(struct port *port)
+{
+ qmgr_release_queue(RXFREE_QUEUE(port->id));
+ qmgr_release_queue(port->plat->rxq);
+ qmgr_release_queue(TX_QUEUE(port->id));
+ qmgr_release_queue(port->plat->txreadyq);
+
+ if (!ports_open)
+ qmgr_release_queue(TXDONE_QUEUE);
+}
+
+static int init_queues(struct port *port)
+{
+ int i;
+
+ if (!ports_open)
+ if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
+ POOL_ALLOC_SIZE, 32, 0)))
+ return -ENOMEM;
+
+ if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
+ &port->desc_tab_phys)))
+ return -ENOMEM;
+ memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
+ memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
+ memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
+
+ /* Setup RX buffers */
+ for (i = 0; i < RX_DESCS; i++) {
+ struct desc *desc = rx_desc_ptr(port, i);
+ buffer_t *buff; /* skb or kmalloc()ated memory */
+ void *data;
+#ifdef __ARMEB__
+ if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE)))
+ return -ENOMEM;
+ data = buff->data;
+#else
+ if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL)))
+ return -ENOMEM;
+ data = buff;
+#endif
+ desc->buf_len = MAX_MRU;
+ desc->data = dma_map_single(&port->netdev->dev, data,
+ RX_BUFF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&port->netdev->dev, desc->data)) {
+ free_buffer(buff);
+ return -EIO;
+ }
+ desc->data += NET_IP_ALIGN;
+ port->rx_buff_tab[i] = buff;
+ }
+
+ return 0;
+}
+
+static void destroy_queues(struct port *port)
+{
+ int i;
+
+ if (port->desc_tab) {
+ for (i = 0; i < RX_DESCS; i++) {
+ struct desc *desc = rx_desc_ptr(port, i);
+ buffer_t *buff = port->rx_buff_tab[i];
+ if (buff) {
+ dma_unmap_single(&port->netdev->dev,
+ desc->data - NET_IP_ALIGN,
+ RX_BUFF_SIZE, DMA_FROM_DEVICE);
+ free_buffer(buff);
+ }
+ }
+ for (i = 0; i < TX_DESCS; i++) {
+ struct desc *desc = tx_desc_ptr(port, i);
+ buffer_t *buff = port->tx_buff_tab[i];
+ if (buff) {
+ dma_unmap_tx(port, desc);
+ free_buffer(buff);
+ }
+ }
+ dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
+ port->desc_tab = NULL;
+ }
+
+ if (!ports_open && dma_pool) {
+ dma_pool_destroy(dma_pool);
+ dma_pool = NULL;
+ }
+}
+
+static int eth_open(struct net_device *dev)
+{
+ struct port *port = netdev_priv(dev);
+ struct npe *npe = port->npe;
+ struct msg msg;
+ int i, err;
+
+ if (!npe_running(npe)) {
+ err = npe_load_firmware(npe, npe_name(npe), &dev->dev);
+ if (err)
+ return err;
+
+ if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
+ printk(KERN_ERR "%s: %s not responding\n", dev->name,
+ npe_name(npe));
+ return -EIO;
+ }
+ port->firmware[0] = msg.byte4;
+ port->firmware[1] = msg.byte5;
+ port->firmware[2] = msg.byte6;
+ port->firmware[3] = msg.byte7;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd = NPE_VLAN_SETRXQOSENTRY;
+ msg.eth_id = port->id;
+ msg.byte5 = port->plat->rxq | 0x80;
+ msg.byte7 = port->plat->rxq << 4;
+ for (i = 0; i < 8; i++) {
+ msg.byte3 = i;
+ if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ"))
+ return -EIO;
+ }
+
+ msg.cmd = NPE_EDB_SETPORTADDRESS;
+ msg.eth_id = PHYSICAL_ID(port->id);
+ msg.byte2 = dev->dev_addr[0];
+ msg.byte3 = dev->dev_addr[1];
+ msg.byte4 = dev->dev_addr[2];
+ msg.byte5 = dev->dev_addr[3];
+ msg.byte6 = dev->dev_addr[4];
+ msg.byte7 = dev->dev_addr[5];
+ if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC"))
+ return -EIO;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd = NPE_FW_SETFIREWALLMODE;
+ msg.eth_id = port->id;
+ if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE"))
+ return -EIO;
+
+ if ((err = request_queues(port)) != 0)
+ return err;
+
+ if ((err = init_queues(port)) != 0) {
+ destroy_queues(port);
+ release_queues(port);
+ return err;
+ }
+
+ port->speed = 0; /* force "link up" message */
+ phy_start(port->phydev);
+
+ for (i = 0; i < ETH_ALEN; i++)
+ __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
+ __raw_writel(0x08, &port->regs->random_seed);
+ __raw_writel(0x12, &port->regs->partial_empty_threshold);
+ __raw_writel(0x30, &port->regs->partial_full_threshold);
+ __raw_writel(0x08, &port->regs->tx_start_bytes);
+ __raw_writel(0x15, &port->regs->tx_deferral);
+ __raw_writel(0x08, &port->regs->tx_2part_deferral[0]);
+ __raw_writel(0x07, &port->regs->tx_2part_deferral[1]);
+ __raw_writel(0x80, &port->regs->slot_time);
+ __raw_writel(0x01, &port->regs->int_clock_threshold);
+
+ /* Populate queues with buffers, no failure after this point */
+ for (i = 0; i < TX_DESCS; i++)
+ queue_put_desc(port->plat->txreadyq,
+ tx_desc_phys(port, i), tx_desc_ptr(port, i));
+
+ for (i = 0; i < RX_DESCS; i++)
+ queue_put_desc(RXFREE_QUEUE(port->id),
+ rx_desc_phys(port, i), rx_desc_ptr(port, i));
+
+ __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]);
+ __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]);
+ __raw_writel(0, &port->regs->rx_control[1]);
+ __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
+
+ napi_enable(&port->napi);
+ eth_set_mcast_list(dev);
+ netif_start_queue(dev);
+
+ qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
+ eth_rx_irq, dev);
+ if (!ports_open) {
+ qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY,
+ eth_txdone_irq, NULL);
+ qmgr_enable_irq(TXDONE_QUEUE);
+ }
+ ports_open++;
+ /* we may already have RX data, enables IRQ */
+ napi_schedule(&port->napi);
+ return 0;
+}
+
+static int eth_close(struct net_device *dev)
+{
+ struct port *port = netdev_priv(dev);
+ struct msg msg;
+ int buffs = RX_DESCS; /* allocated RX buffers */
+ int i;
+
+ ports_open--;
+ qmgr_disable_irq(port->plat->rxq);
+ napi_disable(&port->napi);
+ netif_stop_queue(dev);
+
+ while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0)
+ buffs--;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd = NPE_SETLOOPBACK_MODE;
+ msg.eth_id = port->id;
+ msg.byte3 = 1;
+ if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
+ printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name);
+
+ i = 0;
+ do { /* drain RX buffers */
+ while (queue_get_desc(port->plat->rxq, port, 0) >= 0)
+ buffs--;
+ if (!buffs)
+ break;
+ if (qmgr_stat_empty(TX_QUEUE(port->id))) {
+ /* we have to inject some packet */
+ struct desc *desc;
+ u32 phys;
+ int n = queue_get_desc(port->plat->txreadyq, port, 1);
+ BUG_ON(n < 0);
+ desc = tx_desc_ptr(port, n);
+ phys = tx_desc_phys(port, n);
+ desc->buf_len = desc->pkt_len = 1;
+ wmb();
+ queue_put_desc(TX_QUEUE(port->id), phys, desc);
+ }
+ udelay(1);
+ } while (++i < MAX_CLOSE_WAIT);
+
+ if (buffs)
+ printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
+ " left in NPE\n", dev->name, buffs);
+#if DEBUG_CLOSE
+ if (!buffs)
+ printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i);
+#endif
+
+ buffs = TX_DESCS;
+ while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0)
+ buffs--; /* cancel TX */
+
+ i = 0;
+ do {
+ while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
+ buffs--;
+ if (!buffs)
+ break;
+ } while (++i < MAX_CLOSE_WAIT);
+
+ if (buffs)
+ printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
+ "left in NPE\n", dev->name, buffs);
+#if DEBUG_CLOSE
+ if (!buffs)
+ printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
+#endif
+
+ msg.byte3 = 0;
+ if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
+ printk(KERN_CRIT "%s: unable to disable loopback\n",
+ dev->name);
+
+ phy_stop(port->phydev);
+
+ if (!ports_open)
+ qmgr_disable_irq(TXDONE_QUEUE);
+ destroy_queues(port);
+ release_queues(port);
+ return 0;
+}
+
+static const struct net_device_ops ixp4xx_netdev_ops = {
+ .ndo_open = eth_open,
+ .ndo_stop = eth_close,
+ .ndo_start_xmit = eth_xmit,
+ .ndo_set_rx_mode = eth_set_mcast_list,
+ .ndo_do_ioctl = eth_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __devinit eth_init_one(struct platform_device *pdev)
+{
+ struct port *port;
+ struct net_device *dev;
+ struct eth_plat_info *plat = pdev->dev.platform_data;
+ u32 regs_phys;
+ char phy_id[MII_BUS_ID_SIZE + 3];
+ int err;
+
+ if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
+ pr_err("ixp4xx_eth: bad ptp filter\n");
+ return -EINVAL;
+ }
+
+ if (!(dev = alloc_etherdev(sizeof(struct port))))
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ port = netdev_priv(dev);
+ port->netdev = dev;
+ port->id = pdev->id;
+
+ switch (port->id) {
+ case IXP4XX_ETH_NPEA:
+ port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT;
+ regs_phys = IXP4XX_EthA_BASE_PHYS;
+ break;
+ case IXP4XX_ETH_NPEB:
+ port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
+ regs_phys = IXP4XX_EthB_BASE_PHYS;
+ break;
+ case IXP4XX_ETH_NPEC:
+ port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
+ regs_phys = IXP4XX_EthC_BASE_PHYS;
+ break;
+ default:
+ err = -ENODEV;
+ goto err_free;
+ }
+
+ dev->netdev_ops = &ixp4xx_netdev_ops;
+ dev->ethtool_ops = &ixp4xx_ethtool_ops;
+ dev->tx_queue_len = 100;
+
+ netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
+
+ if (!(port->npe = npe_request(NPE_ID(port->id)))) {
+ err = -EIO;
+ goto err_free;
+ }
+
+ port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name);
+ if (!port->mem_res) {
+ err = -EBUSY;
+ goto err_npe_rel;
+ }
+
+ port->plat = plat;
+ npe_port_tab[NPE_ID(port->id)] = port;
+ memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN);
+
+ platform_set_drvdata(pdev, dev);
+
+ __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET,
+ &port->regs->core_control);
+ udelay(50);
+ __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
+ udelay(50);
+
+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy);
+ port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(port->phydev)) {
+ err = PTR_ERR(port->phydev);
+ goto err_free_mem;
+ }
+
+ port->phydev->irq = PHY_POLL;
+
+ if ((err = register_netdev(dev)))
+ goto err_phy_dis;
+
+ printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
+ npe_name(port->npe));
+
+ return 0;
+
+err_phy_dis:
+ phy_disconnect(port->phydev);
+err_free_mem:
+ npe_port_tab[NPE_ID(port->id)] = NULL;
+ platform_set_drvdata(pdev, NULL);
+ release_resource(port->mem_res);
+err_npe_rel:
+ npe_release(port->npe);
+err_free:
+ free_netdev(dev);
+ return err;
+}
+
+static int __devexit eth_remove_one(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct port *port = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ phy_disconnect(port->phydev);
+ npe_port_tab[NPE_ID(port->id)] = NULL;
+ platform_set_drvdata(pdev, NULL);
+ npe_release(port->npe);
+ release_resource(port->mem_res);
+ free_netdev(dev);
+ return 0;
+}
+
+static struct platform_driver ixp4xx_eth_driver = {
+ .driver.name = DRV_NAME,
+ .probe = eth_init_one,
+ .remove = eth_remove_one,
+};
+
+static int __init eth_init_module(void)
+{
+ int err;
+ if ((err = ixp4xx_mdio_register()))
+ return err;
+ return platform_driver_register(&ixp4xx_eth_driver);
+}
+
+static void __exit eth_cleanup_module(void)
+{
+ platform_driver_unregister(&ixp4xx_eth_driver);
+ ixp4xx_mdio_remove();
+}
+
+MODULE_AUTHOR("Krzysztof Halasa");
+MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ixp4xx_eth");
+module_init(eth_init_module);
+module_exit(eth_cleanup_module);